[
  {
    "path": ".gitignore",
    "content": ".bundle\n.rvmrc\n*~\n*.pyc\n*.egg\n*.egg-info\n.eggs\n*.pid\n*.pid.lock\n*.gem\n*.rpm\n*.deb\ndocker/*/generated\ndocker/config.yml\n/doc/.site\n/doc/sdk/python/arvados\n/doc/sdk/python/arvados.html\n/doc/sdk/python/index.html\n/doc/sdk/python/search.js\n/doc/sdk/R/arvados\n/doc/sdk/java-v2/javadoc\n*.class\n/sdk/cli/binstubs/\n/sdk/ruby/binstubs/\n/services/api/binstubs/\n/services/login-sync/binstubs/\n/services/api/config/arvados-clients.yml\n/contrib/arvados-bootstrap/build/\n/contrib/arvados-bootstrap/dist/\n/sdk/cwl/build/\n/sdk/cwl/dist/\n/sdk/python/build/\n/sdk/python/dist/\n/services/dockercleaner/build/\n/services/dockercleaner/dist/\n/services/fuse/build/\n/services/fuse/dist/\n/tools/cluster-activity/build/\n/tools/cluster-activity/dist/\n/tools/crunchstat-summary/build/\n/tools/crunchstat-summary/dist/\n/tools/python-metapackage/build/\n/tools/python-metapackage/dist/\n/tools/user-activity/build/\n/tools/user-activity/dist/\n*#*\nvendor/\ntmp/\n.DS_Store/\n.vscode\n.Rproj.user\n*.bak\n*.log\narvados-snakeoil-ca.pem\n.vagrant\n/packages/\n.eslintcache\n"
  },
  {
    "path": ".licenseignore",
    "content": "*agpl-3.0.html\n*agpl-3.0.txt\napache-2.0.txt\nAUTHORS\n*/bootstrap.css\n*/bootstrap.js\n*bootstrap-theme.css\n*by-sa-3.0.html\n*by-sa-3.0.txt\n*COPYING\ndoc/fonts/*\ndoc/_includes/_config_default_yml.liquid\ndoc/_includes/_terraform_*_tfvars.liquid\ndoc/user/cwl/federated/*\ndoc/_includes/_federated_cwl.liquid\n*/docker_image\ndocker/jobs/apt.arvados.org*.list\ndocker/jobs/1078ECD7.key\n*/en.bootstrap.yml\n*font-awesome.css\n*.gif\n.gitignore\n*/.gitignore\n*/.gitkeep\n*/.gitstub\n*.gz\n*.gz.report\n*.ico\n*.jpg\n*.svg\n*.odg\n*.json\n*LICENSE*.html\n.licenseignore\n*LICENSE*.txt\n*.lock\n*.log\n*.map\n*.min.css\n*.min.js\n*.png\n*/proc_stat\n*/pytest.ini\n*/README\n*/robots.txt\n*/runit-docker/*\n*/sb-admin.css.scss\n*/script/rails\nsdk/cwl/tests/input/blorp.txt\nsdk/cwl/tests/tool/blub.txt\nsdk/cwl/tests/19109-upload-secondary/*\nsdk/cwl/tests/federation/data/*\nsdk/cwl/tests/fake-keep-mount/fake_collection_dir/.arvados#collection\nsdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt\nsdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt\nsdk/go/manifest/testdata/*_manifest\nsdk/java/.classpath\nsdk/java/pom.xml\nsdk/java/.project\nsdk/java/.settings/org.eclipse.jdt.core.prefs\nsdk/java/src/main/resources/log4j.properties\nsdk/pam/examples/shellinabox\nsdk/pam/pam-configs/arvados\nsdk/python/tests/data/*\nservices/api/config/unbound.template\nservices/api/config/config.default.yml\nservices/arv-web/sample-cgi-app/public/.htaccess\nservices/arv-web/sample-cgi-app/public/index.cgi\nservices/keepproxy/pkg-extras/etc/default/keepproxy\n*.tar\ntools/crunchstat-summary/tests/crunchstat_error_messages.txt\ntools/crunchstat-summary/crunchstat_summary/synchronizer.js\ntools/cluster-activity/tests/*.html\ntools/cluster-activity/tests/*.csv\ncontrib/R-sdk/DESCRIPTION\ncontrib/R-sdk/NAMESPACE\ncontrib/R-sdk/.Rbuildignore\ncontrib/R-sdk/ArvadosR.Rproj\n*.Rd\nlib/dispatchcloud/test/sshkey_*\n*.asc\ncontrib/java-sdk-v2/build.gradle\ncontrib/java-sdk-v2/settings.gradle\ncontrib/java-sdk-v2/src/test/resources/*\nsdk/cwl/tests/wf/feddemo\ngo.mod\ngo.sum\ndoc/install/*.xlsx\nsdk/cwl/tests/wf/hello.txt\nsdk/cwl/tests/wf/indir1/hello2.txt\nsdk/cwl/tests/chipseq/data/Genomes/*\nCITATION.cff\nSECURITY.md\nlib/crunchstat/testdata/*\nlib/controller/localdb/testdata/*.pub\nsdk/ruby-google-api-client/*\nservices/api/bin/rails\nservices/api/bin/rake\nservices/api/bin/setup\nservices/api/bin/yarn\nservices/api/storage.yml\nservices/api/test.rb.example\nservices/api/config/boot.rb\nservices/api/config/environment.rb\nservices/api/config/initializers/application_controller_renderer.rb\nservices/api/config/initializers/assets.rb\nservices/api/config/initializers/backtrace_silencers.rb\nservices/api/config/initializers/content_security_policy.rb\nservices/api/config/initializers/cookies_serializer.rb\nservices/api/config/initializers/filter_parameter_logging.rb\nservices/api/config/initializers/mime_types.rb\nservices/api/config/initializers/new_framework_defaults_*.rb\nservices/api/config/initializers/permissions_policy.rb\nservices/api/config/initializers/wrap_parameters.rb\nservices/api/config/locales/en.yml\nservices/api/config.ru\nservices/workbench2/*.d.ts\nservices/workbench2/*.css\nservices/workbench2/*.scss\nservices/workbench2/README.md\nservices/workbench2/public/*\nservices/workbench2/.yarnrc\nservices/workbench2/.npmrc\nservices/workbench2/src/lib/cwl-svg/*\nservices/workbench2/tools/arvados_config.yml\nservices/workbench2/cypress/fixtures/files/5mb.bin\nservices/workbench2/cypress/fixtures/files/15mb.bin\nservices/workbench2/cypress/fixtures/files/cat.png\nservices/workbench2/cypress/fixtures/files/banner.html\nservices/workbench2/cypress/fixtures/files/tooltips.txt\nservices/workbench2/cypress/fixtures/webdav-propfind-outputs.xml\nservices/workbench2/.yarn/releases/*\nservices/workbench2/package.json\nservices/workbench2/yarn.lock\n"
  },
  {
    "path": "AUTHORS",
    "content": "# Names should be added to this file with this pattern:\n#\n# For individuals:\n#   Name <email address>\n#\n# For organizations:\n#   Organization <fnmatch pattern>\n#\n# See python fnmatch module documentation for more information.\n\nCuroverse, Inc. <*@curoverse.com>\nAdam Savitzky <adam.savitzky@gmail.com>\nColin Nolan <colin.nolan@sanger.ac.uk>\nDavid <davide.fiorentino.loregio@gmail.com>\nGuillermo Carrasco <guille.ch.88@gmail.com>\nJoshua Randall <joshua.randall@sanger.ac.uk>\nPresident and Fellows of Harvard College <*@harvard.edu>\nThomas Mooney <tmooney@genome.wustl.edu>\nChen Chen <aflyhorse@gmail.com>\nVeritas Genetics, Inc. <*@veritasgenetics.com>\nCurii Corporation <*@curii.com>\nDante Tsang <dante@dantetsang.com>\nCodex Genetics Ltd <info@codexgenetics.com>\nBruno P. Kinoshita <brunodepaulak@yahoo.com.br>\nGeorge Chlipala <gchlip2@uic.edu>\n"
  },
  {
    "path": "CITATION.cff",
    "content": "cff-version: 1.2.0\nmessage: \"If you use this software, please cite it as below.\"\nauthors:\n- name: \"The Arvados Authors\"\n- family-names: \"Amstutz\"\n  given-names: \"Peter\"\n  orcid: \"https://orcid.org/0000-0003-3566-7705\"\n- family-names: \"Bértoli\"\n  given-names: \"Javier\"\n  family-names: \"César\"\n  given-names: \"Nico\"\n- family-names: \"Clegg\"\n  given-names: \"Tom\"\n  orcid: \"https://orcid.org/0000-0001-6751-2930\"\n- family-names: \"Di Pentima\"\n  given-names: \"Lucas\"\n  orcid: \"https://orcid.org/0000-0002-2807-6854\"\n- family-names: \"Kutyła\"\n  given-names: \"Daniel\"\n- family-names: \"Li\"\n  given-names: \"Jiayong\"\n- family-names: \"Smith\"\n  given-names: \"Stephen\"\n- family-names: \"Vandewege\"\n  given-names: \"Ward\"\n  orcid: \"https://orcid.org/0000-0002-2527-6949\"\n- family-names: \"Wait Zaranek\"\n  given-names: \"Alexander\"\n  orcid: \"https://orcid.org/0000-0002-0415-9655\"\n- family-names: \"Wait Zaranek\"\n  given-names: \"Sarah\"\n  orcid: \"https://orcid.org/0000-0003-4716-9121\"\ntitle: \"Arvados\"\nabstract: \"Arvados is an open source platform for managing, processing, and sharing genomic and other large scientific and biomedical data.\"\ntype: software\nurl: \"https://github.com/arvados/arvados/\"\ndoi: 10.5281/zenodo.6382942\n"
  },
  {
    "path": "CODE_OF_CONDUCT.md",
    "content": "Arvados Code of Conduct\n=======================\n\nThe Arvados Project is dedicated to providing a harassment-free experience for\neveryone. We do not tolerate harassment of participants in any form.\n\nThis code of conduct applies to all Arvados Project spaces both online and off:\nGitter chat, Redmine issues, wiki, mailing lists, forums, video chats, and any other\nArvados spaces. Anyone who violates this code of conduct may be sanctioned or\nexpelled from these spaces at the discretion of the Arvados Team.\n\nSome Arvados Project spaces may have additional rules in place, which will be\nmade clearly available to participants. Participants are responsible for\nknowing and abiding by these rules.\n\nHarassment includes, but is not limited to:\n\n - Offensive comments related to gender, gender identity and expression, sexual\norientation, disability, mental illness, neuro(a)typicality, physical\nappearance, body size, age, race, or religion.\n - Unwelcome comments regarding a person’s lifestyle choices and practices,\nincluding those related to food, health, parenting, drugs, and employment.\n - Deliberate misgendering or use of [dead](https://www.quora.com/What-is-deadnaming/answer/Nancy-C-Walker)\nor rejected names.\n - Gratuitous or off-topic sexual images or behaviour in spaces where they’re not\nappropriate.\n - Physical contact and simulated physical contact (eg, textual descriptions like\n“\\*hug\\*” or “\\*backrub\\*”) without consent or after a request to stop.\n - Threats of violence.\n - Incitement of violence towards any individual, including encouraging a person\nto commit suicide or to engage in self-harm.\n - Deliberate intimidation.\n - Stalking or following.\n - Harassing photography or recording, including logging online activity for\nharassment purposes.\n - Sustained disruption of discussion.\n - Unwelcome sexual attention.\n - Pattern of inappropriate social contact, such as requesting/assuming\ninappropriate levels of intimacy with others\n - Continued one-on-one communication after requests to cease.\n - Deliberate “outing” of any aspect of a person’s identity without their consent\nexcept as necessary to protect vulnerable people from intentional abuse.\n - Publication of non-harassing private communication.\n\nThe Arvados Project prioritizes marginalized people’s safety over privileged\npeople’s comfort. The Arvados Leadership Team will not act on complaints regarding:\n\n - ‘Reverse’ -isms, including ‘reverse racism,’ ‘reverse sexism,’ and ‘cisphobia’\n - Reasonable communication of boundaries, such as “leave me alone,” “go away,” or\n“I’m not discussing this with you.”\n - Communicating in a [tone](http://geekfeminism.wikia.com/wiki/Tone_argument)\nyou don’t find congenial\n\nReporting\n---------\n\nIf you are being harassed by a member of the Arvados Project, notice that someone\nelse is being harassed, or have any other concerns, please contact the Arvados\nProject Team at contact@arvados.org. If person who is harassing\nyou is on the team, they will recuse themselves from handling your incident. We\nwill respond as promptly as we can.\n\nThis code of conduct applies to Arvados Project spaces, but if you are being\nharassed by a member of Arvados Project outside our spaces, we still want to\nknow about it. We will take all good-faith reports of harassment by Arvados Project\nmembers, especially the Arvados Team, seriously. This includes harassment\noutside our spaces and harassment that took place at any point in time. The\nabuse team reserves the right to exclude people from the Arvados Project based on\ntheir past behavior, including behavior outside Arvados Project spaces and\nbehavior towards people who are not in the Arvados Project.\n\nIn order to protect volunteers from abuse and burnout, we reserve the right to\nreject any report we believe to have been made in bad faith. Reports intended\nto silence legitimate criticism may be deleted without response.\n\nWe will respect confidentiality requests for the purpose of protecting victims\nof abuse. At our discretion, we may publicly name a person about whom we’ve\nreceived harassment complaints, or privately warn third parties about them, if\nwe believe that doing so will increase the safety of Arvados Project members or\nthe general public. We will not name harassment victims without their\naffirmative consent.\n\nConsequences\n------------\n\nParticipants asked to stop any harassing behavior are expected to comply\nimmediately.\n\nIf a participant engages in harassing behavior, the Arvados Team may\ntake any action they deem appropriate, up to and including expulsion from all\nArvados Project spaces and identification of the participant as a harasser to other\nArvados Project members or the general public.\n\nThis anti-harassment policy is based on the [example policy from the Geek\nFeminism wiki](http://geekfeminism.wikia.com/wiki/Community_anti-harassment/Policy),\ncreated by the Geek Feminism community.\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Contributing to Arvados\n\nArvados is free software, which means it is free for all to use, learn\nfrom, and improve.  We encourage contributions from the community that\nimprove Arvados for everyone.  Some examples of contributions are bug\nreports, bug fixes, new features, and scripts or documentation that help\nwith using, administering, or installing Arvados.  We also love to\nhear about Arvados success stories.\n\n## Reporting Issues\n\nArvados uses [GitHub Issues](https://github.com/arvados/arvados/issues). You can file issues against any Arvados component there. Even if you're not sure which component causes the issue, you can still file problem reports and we'll work with you to address them.\n\n## Contributing Code\n\nThe preferred method for making contributions is through GitHub pull requests. The rest of this guide helps orient you with the code and discusses requirements for all contributions, from the smallest typo fix to entire new components.\n\nIf you're interested in developing a large new feature for Arvados, please file an issue to discuss it with us first. We can give you guidance on how to best organize the work before you start it.\n\n## Setting Up Your Development Environment\n\nThe [Arvados source code is hosted on GitHub](https://github.com/arvados/arvados). Once you clone it, you'll find guides for specific topics under the `doc/development` directory. You'll probably want to [install a development environment](doc/development/Prerequisites.md) and [learn how to run tests](doc/development/RunningTests.md). There are also some component-specific guides.\n\n### Setting Up Git\n\nWe provide Git configuration and hooks to help you follow project conventions.\n\n`doc/development/git.conf` includes a block of Git configuration settings. You can set it up for your checkout by running `git config edit --local`: insert the contents of `doc/development/git.conf`, edit them following the comments, then save and exit.\n\nInstall our `prepare-commit-msg` hook:\n\n```sh\n$ install -b -m 755 doc/development/prepare-commit-msg.sh .git/hooks/prepare-commit-msg\n```\n\n## Prepare a Development Branch\n\nIf you haven't before, fork the Arvados repository using the GitHub \"Fork\" button. If you have, make sure your fork's `main` branch is up-to-date with Arvados'.\n\nThen start a new branch for your development named like `1234-your-work`. The number at the start should match the GitHub issue this request is associated with. Then briefly describe the main change your branch makes.\n\n### Coding Standards\n\nPlease familiarize yourself with our [coding standards](doc/development/CodingStandards.md) for the component(s) you're working on and follow them in your work.\n\n### Sign Off Your Commits\n\nContributions must be signed off. The sign-off is a simple line at the end of each commit message  which certifies that you wrote it or otherwise have the right to contribute it under the license listed in the file(s) modified. Make sure each commit message contains the following line with your real name and email (sorry, no pseudonymous or anonymous contributions):\n\n    Arvados-DCO-1.1-Signed-off-by: Alex Doe <alex.doe@example.com>\n\nWhen you add this, you certify the below (from <https://developercertificate.org>):\n\n> Developer Certificate of Origin  \n> Version 1.1\n>\n> Copyright (C) 2004, 2006 The Linux Foundation and its contributors.\n>\n> Everyone is permitted to copy and distribute verbatim copies of this\n> license document, but changing it is not allowed.\n>\n>\n> Developer's Certificate of Origin 1.1\n>\n> By making a contribution to this project, I certify that:\n>\n> (a) The contribution was created in whole or in part by me and I\n>     have the right to submit it under the open source license\n>     indicated in the file; or\n>\n> (b) The contribution is based upon previous work that, to the best\n>     of my knowledge, is covered under an appropriate open source\n>     license and I have the right under that license to submit that\n>     work with modifications, whether created in whole or in part\n>     by me, under the same open source license (unless I am\n>     permitted to submit under a different license), as indicated\n>     in the file; or\n>\n> (c) The contribution was provided directly to me by some other\n>     person who certified (a), (b) or (c) and I have not modified\n>     it.\n>\n> (d) I understand and agree that this project and the contribution\n>     are public and that a record of the contribution (including all\n>     personal information I submit with it, including my sign-off) is\n>     maintained indefinitely and may be redistributed consistent with\n>     this project or the open source license(s) involved.\n\n### Add License Headers\n\nThe comments at the top of each file must contain this copyright notice:\n\n> Copyright © The Arvados Authors. All rights reserved.\n\nThey must also contain an `SPDX-License-Identifier` to identify the license of this component.\n\nIn most cases you can copy this header from another file in the component. If you need more guidance, refer to [the COPYING file](COPYING).\n\nIf it is not technically possible to add these comments to a file (for example, because it's a binary test file), you may add its path to the `.licenseignore` file instead.\n\n### Add Your Authorship\n\nIf you are not already listed in [the AUTHORS file](AUTHORS), please add yourself in the branch, following the documented format.\n\n## Create Your Pull Request\n\nOnce you've finished pushing changes to your branch, create a pull request against `arvados:main` with the following checklist filled out:\n\n    * All agreed upon points are implemented / addressed.  Describe changes from pre-implementation design.\n    ** _comments_\n    * Anything not implemented (discovered or discussed during work) has a follow-up story.\n    ** _comments_\n    * Code is tested and passing, both automated and manual, what manual testing was done is described.\n    ** _comments_\n    * The tested code incorporates recent main branch changes.\n    ** _confirm_\n    * New or changed UI/UX has gotten feedback from stakeholders.\n    ** _comments_\n    * Documentation has been updated.\n    ** _comments_\n    * Behaves appropriately at the intended scale (describe intended scale).\n    ** _comments_\n    * Considered backwards and forwards compatibility issues between client and server.\n    ** _comments_\n    * Follows our coding standards, including GUI style guidelines\n    ** _comments_\n\n\"Incorporates recent main branch changes\" means that the branch is either based on, or merged, the `main` branch within the last week. The more active development on a component is, the more important it is to be up-to-date with main to avoid surprising test failures post-merge.\n\nUI/UX stands for “User Interface / User Experience”. This includes new or modified GUI elements in Workbench and as well as usability elements of command line tools.\n\nStakeholders typically include the product manager and may include designers, salespeople, customers, and other end users as appropriate. In this process, the assigned developer demos the new feature, makes note of any feedback, and then based on their judgement either: implements the changes, provides a reason why the feedback cannot be acted on, or discusses how to handle the feedback with the product manager and/or assigned reviewer. This feedback is typically obtained in earlier drafts of the pull request before it is submitted for final review.\n\nA member of the core team will review the pull request. They may have questions or comments through the pull request interface. Once all issues have been resolved, your branch will be merged.\n\n## Continuous Integration\n\nContinuous integration is hosted at <https://ci.arvados.org/>. Currently, external contributors cannot trigger test runs. Trusted contributors may be given permission to do so.\n\n## Community Chat\n\nYou can chat with other members of the [Arvados community on Gitter](https://gitter.im/arvados/community). Come say hi!\n"
  },
  {
    "path": "COPYING",
    "content": "Unless indicated otherwise in the header of the file, the files in this\nrepository are distributed under one of three different licenses: AGPL-3.0,\nApache-2.0 or CC-BY-SA-3.0.\n\nIndividual files contain an SPDX tag that indicates the license for the file.\nThese are the three tags in use:\n\n    SPDX-License-Identifier: AGPL-3.0\n    SPDX-License-Identifier: Apache-2.0\n    SPDX-License-Identifier: CC-BY-SA-3.0\n\nThis enables machine processing of license information based on the SPDX\nLicense Identifiers that are available here: http://spdx.org/licenses/\n\nThe full license text for each license is appended below, and is also available\nin this directory:\n\n  AGPL-3.0:     agpl-3.0.txt\n  Apache-2.0:   apache-2.0.txt\n  CC-BY-SA-3.0: cc-by-sa-3.0.txt\n\nAs a general rule, code in the sdk/ directory is licensed Apache-2.0,\ndocumentation in the doc/ directory is licensed CC-BY-SA-3.0, and\neverything else is licensed AGPL-3.0.\n\n###############################################################################\n\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n\n###############################################################################\n\n                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<http://www.gnu.org/licenses/>.\n\n###############################################################################\n\nAttribution-ShareAlike 3.0 Unported\n\n    CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN \"AS-IS\" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE.\n\nLicense\n\nTHE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE (\"CCPL\" OR \"LICENSE\"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.\n\nBY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.\n\n1. Definitions\n\n    \"Adaptation\" means a work based upon the Work, or upon the Work and other pre-existing works, such as a translation, adaptation, derivative work, arrangement of music or other alterations of a literary or artistic work, or phonogram or performance and includes cinematographic adaptations or any other form in which the Work may be recast, transformed, or adapted including in any form recognizably derived from the original, except that a work that constitutes a Collection will not be considered an Adaptation for the purpose of this License. For the avoidance of doubt, where the Work is a musical work, performance or phonogram, the synchronization of the Work in timed-relation with a moving image (\"synching\") will be considered an Adaptation for the purpose of this License.\n    \"Collection\" means a collection of literary or artistic works, such as encyclopedias and anthologies, or performances, phonograms or broadcasts, or other works or subject matter other than works listed in Section 1(f) below, which, by reason of the selection and arrangement of their contents, constitute intellectual creations, in which the Work is included in its entirety in unmodified form along with one or more other contributions, each constituting separate and independent works in themselves, which together are assembled into a collective whole. A work that constitutes a Collection will not be considered an Adaptation (as defined below) for the purposes of this License.\n    \"Creative Commons Compatible License\" means a license that is listed at https://creativecommons.org/compatiblelicenses that has been approved by Creative Commons as being essentially equivalent to this License, including, at a minimum, because that license: (i) contains terms that have the same purpose, meaning and effect as the License Elements of this License; and, (ii) explicitly permits the relicensing of adaptations of works made available under that license under this License or a Creative Commons jurisdiction license with the same License Elements as this License.\n    \"Distribute\" means to make available to the public the original and copies of the Work or Adaptation, as appropriate, through sale or other transfer of ownership.\n    \"License Elements\" means the following high-level license attributes as selected by Licensor and indicated in the title of this License: Attribution, ShareAlike.\n    \"Licensor\" means the individual, individuals, entity or entities that offer(s) the Work under the terms of this License.\n    \"Original Author\" means, in the case of a literary or artistic work, the individual, individuals, entity or entities who created the Work or if no individual or entity can be identified, the publisher; and in addition (i) in the case of a performance the actors, singers, musicians, dancers, and other persons who act, sing, deliver, declaim, play in, interpret or otherwise perform literary or artistic works or expressions of folklore; (ii) in the case of a phonogram the producer being the person or legal entity who first fixes the sounds of a performance or other sounds; and, (iii) in the case of broadcasts, the organization that transmits the broadcast.\n    \"Work\" means the literary and/or artistic work offered under the terms of this License including without limitation any production in the literary, scientific and artistic domain, whatever may be the mode or form of its expression including digital form, such as a book, pamphlet and other writing; a lecture, address, sermon or other work of the same nature; a dramatic or dramatico-musical work; a choreographic work or entertainment in dumb show; a musical composition with or without words; a cinematographic work to which are assimilated works expressed by a process analogous to cinematography; a work of drawing, painting, architecture, sculpture, engraving or lithography; a photographic work to which are assimilated works expressed by a process analogous to photography; a work of applied art; an illustration, map, plan, sketch or three-dimensional work relative to geography, topography, architecture or science; a performance; a broadcast; a phonogram; a compilation of data to the extent it is protected as a copyrightable work; or a work performed by a variety or circus performer to the extent it is not otherwise considered a literary or artistic work.\n    \"You\" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation.\n    \"Publicly Perform\" means to perform public recitations of the Work and to communicate to the public those public recitations, by any means or process, including by wire or wireless means or public digital performances; to make available to the public Works in such a way that members of the public may access these Works from a place and at a place individually chosen by them; to perform the Work to the public by any means or process and the communication to the public of the performances of the Work, including by public digital performance; to broadcast and rebroadcast the Work by any means including signs, sounds or images.\n    \"Reproduce\" means to make copies of the Work by any means including without limitation by sound or visual recordings and the right of fixation and reproducing fixations of the Work, including storage of a protected performance or phonogram in digital form or other electronic medium.\n\n2. Fair Dealing Rights. Nothing in this License is intended to reduce, limit, or restrict any uses free from copyright or rights arising from limitations or exceptions that are provided for in connection with the copyright protection under copyright law or other applicable laws.\n\n3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below:\n\n    to Reproduce the Work, to incorporate the Work into one or more Collections, and to Reproduce the Work as incorporated in the Collections;\n    to create and Reproduce Adaptations provided that any such Adaptation, including any translation in any medium, takes reasonable steps to clearly label, demarcate or otherwise identify that changes were made to the original Work. For example, a translation could be marked \"The original work was translated from English to Spanish,\" or a modification could indicate \"The original work has been modified.\";\n    to Distribute and Publicly Perform the Work including as incorporated in Collections; and,\n    to Distribute and Publicly Perform Adaptations.\n\n    For the avoidance of doubt:\n        Non-waivable Compulsory License Schemes. In those jurisdictions in which the right to collect royalties through any statutory or compulsory licensing scheme cannot be waived, the Licensor reserves the exclusive right to collect such royalties for any exercise by You of the rights granted under this License;\n        Waivable Compulsory License Schemes. In those jurisdictions in which the right to collect royalties through any statutory or compulsory licensing scheme can be waived, the Licensor waives the exclusive right to collect such royalties for any exercise by You of the rights granted under this License; and,\n        Voluntary License Schemes. The Licensor waives the right to collect royalties, whether individually or, in the event that the Licensor is a member of a collecting society that administers voluntary licensing schemes, via that society, from any exercise by You of the rights granted under this License.\n\nThe above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. Subject to Section 8(f), all rights not expressly granted by Licensor are hereby reserved.\n\n4. Restrictions. The license granted in Section 3 above is expressly made subject to and limited by the following restrictions:\n\n    You may Distribute or Publicly Perform the Work only under the terms of this License. You must include a copy of, or the Uniform Resource Identifier (URI) for, this License with every copy of the Work You Distribute or Publicly Perform. You may not offer or impose any terms on the Work that restrict the terms of this License or the ability of the recipient of the Work to exercise the rights granted to that recipient under the terms of the License. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties with every copy of the Work You Distribute or Publicly Perform. When You Distribute or Publicly Perform the Work, You may not impose any effective technological measures on the Work that restrict the ability of a recipient of the Work from You to exercise the rights granted to that recipient under the terms of the License. This Section 4(a) applies to the Work as incorporated in a Collection, but this does not require the Collection apart from the Work itself to be made subject to the terms of this License. If You create a Collection, upon notice from any Licensor You must, to the extent practicable, remove from the Collection any credit as required by Section 4(c), as requested. If You create an Adaptation, upon notice from any Licensor You must, to the extent practicable, remove from the Adaptation any credit as required by Section 4(c), as requested.\n    You may Distribute or Publicly Perform an Adaptation only under the terms of: (i) this License; (ii) a later version of this License with the same License Elements as this License; (iii) a Creative Commons jurisdiction license (either this or a later license version) that contains the same License Elements as this License (e.g., Attribution-ShareAlike 3.0 US)); (iv) a Creative Commons Compatible License. If you license the Adaptation under one of the licenses mentioned in (iv), you must comply with the terms of that license. If you license the Adaptation under the terms of any of the licenses mentioned in (i), (ii) or (iii) (the \"Applicable License\"), you must comply with the terms of the Applicable License generally and the following provisions: (I) You must include a copy of, or the URI for, the Applicable License with every copy of each Adaptation You Distribute or Publicly Perform; (II) You may not offer or impose any terms on the Adaptation that restrict the terms of the Applicable License or the ability of the recipient of the Adaptation to exercise the rights granted to that recipient under the terms of the Applicable License; (III) You must keep intact all notices that refer to the Applicable License and to the disclaimer of warranties with every copy of the Work as included in the Adaptation You Distribute or Publicly Perform; (IV) when You Distribute or Publicly Perform the Adaptation, You may not impose any effective technological measures on the Adaptation that restrict the ability of a recipient of the Adaptation from You to exercise the rights granted to that recipient under the terms of the Applicable License. This Section 4(b) applies to the Adaptation as incorporated in a Collection, but this does not require the Collection apart from the Adaptation itself to be made subject to the terms of the Applicable License.\n    If You Distribute, or Publicly Perform the Work or any Adaptations or Collections, You must, unless a request has been made pursuant to Section 4(a), keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or if the Original Author and/or Licensor designate another party or parties (e.g., a sponsor institute, publishing entity, journal) for attribution (\"Attribution Parties\") in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; (ii) the title of the Work if supplied; (iii) to the extent reasonably practicable, the URI, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and (iv) , consistent with Ssection 3(b), in the case of an Adaptation, a credit identifying the use of the Work in the Adaptation (e.g., \"French translation of the Work by Original Author,\" or \"Screenplay based on original Work by Original Author\"). The credit required by this Section 4(c) may be implemented in any reasonable manner; provided, however, that in the case of a Adaptation or Collection, at a minimum such credit will appear, if a credit for all contributing authors of the Adaptation or Collection appears, then as part of these credits and in a manner at least as prominent as the credits for the other contributing authors. For the avoidance of doubt, You may only use the credit required by this Section for the purpose of attribution in the manner set out above and, by exercising Your rights under this License, You may not implicitly or explicitly assert or imply any connection with, sponsorship or endorsement by the Original Author, Licensor and/or Attribution Parties, as appropriate, of You or Your use of the Work, without the separate, express prior written permission of the Original Author, Licensor and/or Attribution Parties.\n    Except as otherwise agreed in writing by the Licensor or as may be otherwise permitted by applicable law, if You Reproduce, Distribute or Publicly Perform the Work either by itself or as part of any Adaptations or Collections, You must not distort, mutilate, modify or take other derogatory action in relation to the Work which would be prejudicial to the Original Author's honor or reputation. Licensor agrees that in those jurisdictions (e.g. Japan), in which any exercise of the right granted in Section 3(b) of this License (the right to make Adaptations) would be deemed to be a distortion, mutilation, modification or other derogatory action prejudicial to the Original Author's honor and reputation, the Licensor will waive or not assert, as appropriate, this Section, to the fullest extent permitted by the applicable national law, to enable You to reasonably exercise Your right under Section 3(b) of this License (right to make Adaptations) but not otherwise.\n\n5. Representations, Warranties and Disclaimer\n\nUNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.\n\n6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\n\n7. Termination\n\n    This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Adaptations or Collections from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License.\n    Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above.\n\n8. Miscellaneous\n\n    Each time You Distribute or Publicly Perform the Work or a Collection, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License.\n    Each time You Distribute or Publicly Perform an Adaptation, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License.\n    If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.\n    No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent.\n    This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You.\n    The rights granted under, and the subject matter referenced, in this License were drafted utilizing the terminology of the Berne Convention for the Protection of Literary and Artistic Works (as amended on September 28, 1979), the Rome Convention of 1961, the WIPO Copyright Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 and the Universal Copyright Convention (as revised on July 24, 1971). These rights and subject matter take effect in the relevant jurisdiction in which the License terms are sought to be enforced according to the corresponding provisions of the implementation of those treaty provisions in the applicable national law. If the standard suite of rights granted under applicable copyright law includes additional rights not granted under this License, such additional rights are deemed to be included in the License; this License is not intended to restrict the license of any rights under applicable law.\n\n    Creative Commons Notice\n\n    Creative Commons is not a party to this License, and makes no warranty whatsoever in connection with the Work. Creative Commons will not be liable to You or any party on any legal theory for any damages whatsoever, including without limitation any general, special, incidental or consequential damages arising in connection to this license. Notwithstanding the foregoing two (2) sentences, if Creative Commons has expressly identified itself as the Licensor hereunder, it shall have all rights and obligations of Licensor.\n\n    Except for the limited purpose of indicating to the public that the Work is licensed under the CCPL, Creative Commons does not authorize the use by either party of the trademark \"Creative Commons\" or any related trademark or logo of Creative Commons without the prior written consent of Creative Commons. Any permitted use will be in compliance with Creative Commons' then-current trademark usage guidelines, as may be published on its website or otherwise made available upon request from time to time. For the avoidance of doubt, this trademark restriction does not form part of the License.\n\n    Creative Commons may be contacted at https://creativecommons.org/.\n"
  },
  {
    "path": "Makefile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nexport WORKSPACE?=$(shell pwd)\nhelp:\n\t@echo >&2\n\t@echo >&2 \"There is no default make target here.  Did you mean 'make test'?\"\n\t@echo >&2\n\t@echo >&2 \"More info:\"\n\t@echo >&2 \"  Installing              --> http://doc.arvados.org/install\"\n\t@echo >&2 \"  Developing/contributing --> https://github.com/arvados/arvados\"\n\t@echo >&2 \"  Project home            --> https://arvados.org\"\n\t@echo >&2\n\t@false\ntest:\n\tbuild/run-tests.sh ${TEST_FLAGS}\npackages:\n\tbuild/run-build-packages-all-targets.sh ${PACKAGES_FLAGS}\ntest-packages:\n\tbuild/run-build-packages-all-targets.sh --test-packages ${PACKAGES_FLAGS}\n"
  },
  {
    "path": "README.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n[![Join the chat at https://gitter.im/arvados/community](https://badges.gitter.im/arvados/community.svg)](https://gitter.im/arvados/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) | [Installing Arvados](https://doc.arvados.org/install/index.html) | [Installing Client SDKs](https://doc.arvados.org/sdk/index.html) | [Report a bug](https://github.com/arvados/arvados/issues/new) | [Development and Contributing](CONTRIBUTING.md)\n\n<img align=\"right\" src=\"doc/images/dax.png\" height=\"240px\">\n\n[Arvados](https://arvados.org) is an open source platform for\nmanaging, processing, and sharing genomic and other large scientific\nand biomedical data.  With Arvados, bioinformaticians run and scale\ncompute-intensive workflows, developers create biomedical\napplications, and IT administrators manage large compute and storage\nresources.\n\nThe key components of Arvados are:\n\n* *Keep*: Keep is the Arvados storage system for managing and storing large\ncollections of files.  Keep combines content addressing and a\ndistributed storage architecture resulting in both high reliability\nand high throughput.  Every file stored in Keep can be accurately\nverified every time it is retrieved.  Keep supports the creation of\ncollections as a flexible way to define data sets without having to\nre-organize or needlessly copy data. Keep works on a wide range of\nunderlying filesystems and object stores.\n\n* *Crunch*: Crunch is the orchestration system for running [Common Workflow Language](https://www.commonwl.org) workflows. It is\ndesigned to maintain data provenance and workflow\nreproducibility. Crunch automatically tracks data inputs and outputs\nthrough Keep and executes workflow processes in Docker containers.  In\na cloud environment, Crunch optimizes costs by scaling compute on demand.\n\n* *Workbench*: The Workbench web application allows users to interactively access\nArvados functionality.  It is especially helpful for querying and\nbrowsing data, visualizing provenance, and tracking the progress of\nworkflows.\n\n* *Command Line tools*: The command line interface (CLI) provides convenient access to Arvados\nfunctionality in the Arvados platform from the command line.\n\n* *API and SDKs*: Arvados is designed to be integrated with existing infrastructure. All\nthe services in Arvados are accessed through a RESTful API.  SDKs are\navailable for Python, Go, R, Perl, Ruby, and Java.\n\n# Documentation\n\nComplete documentation, including the [User Guide](https://doc.arvados.org/user/index.html), [Installation documentation](https://doc.arvados.org/install/index.html), [Administrator documentation](https://doc.arvados.org/admin/index.html) and\n[API documentation](https://doc.arvados.org/api/index.html) is available at http://doc.arvados.org/\n\nIf you wish to build the Arvados documentation from a local git clone, see\n[doc/README.textile](doc/README.textile) for instructions.\n\n# Community\n\n[![Join the chat at https://gitter.im/arvados/community](https://badges.gitter.im/arvados/community.svg)](https://gitter.im/arvados/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)\n\nThe [Arvados community channel](https://gitter.im/arvados/community)\nchannel at [gitter.im](https://gitter.im) is available for live\ndiscussion and support.\n\nThe [Arvados developement channel](https://gitter.im/arvados/development)\nchannel at [gitter.im](https://gitter.im) is used to coordinate development.\n\nThe [Arvados user mailing list](http://lists.arvados.org/mailman/listinfo/arvados)\nis used to announce new versions and other news.\n\nAll participants are expected to abide by the [Arvados Code of Conduct](CODE_OF_CONDUCT.md).\n\n# Reporting bugs\n\n[Report an issue on GitHub](https://github.com/arvados/arvados/issues/new)\n\n# Development and Contributing\n\nSee [CONTRIBUTING](CONTRIBUTING.md) for information about Arvados development and how to contribute to the Arvados project.\n\n# Licensing\n\nArvados is Free Software.  See [COPYING](COPYING) for information about the open source licenses used in Arvados.\n"
  },
  {
    "path": "SECURITY.md",
    "content": "# Arvados Project Security Policy\n\n## Supported Versions\n\nThe Arvados project will issue security fixes by making point releases\non the current stable release series (X.Y.0, X.Y.1, X.Y.2, etc).\n\nThe most recent stable release version, along with release notes and\nupgrade notes documenting security fixes, can be found at these\nlocations:\n\nhttps://arvados.org/releases/\n\nhttps://doc.arvados.org/admin/upgrading.html\n\nThe Arvados project does not support versions older than the current\nstable release except by special arrangement (contact info@curii.com).\n\nRelease announcements, including notification of security fixes, are\nsent to the Arvados announcement list:\n\nhttps://lists.arvados.org//mailman/listinfo/arvados\n\n## Reporting Security Issues\n\nIf you believe you have found a security vulnerability in any Arvados-owned repository, please report it to us through coordinated disclosure.\n\n**Please do not report security vulnerabilities through public GitHub issues, discussions, or pull requests.**\n\nInstead, please send an email to dev@curii.com.\n\nPlease include as much of the information listed below as you can to help us better understand and resolve the issue:\n\n  * The type of issue (e.g., remote code execution, SQL injection, or cross-site scripting)\n  * Full paths of source file(s) related to the manifestation of the issue\n  * The location of the affected source code (tag/branch/commit or direct URL)\n  * Any special configuration required to reproduce the issue\n  * Step-by-step instructions to reproduce the issue\n  * Proof-of-concept or exploit code (if possible)\n  * Impact of the issue, including how an attacker might exploit the issue\n\nThis information will help us triage your report more quickly.\n"
  },
  {
    "path": "agpl-3.0.txt",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "apache-2.0.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "build/README",
    "content": "Prerequisites\n=============\n\nIn order to build packages, you will need:\n\n* Ansible installed following the instructions in `tools/ansible/README.md`\n* `ansible-galaxy` and `ansible-playbook` in `$PATH` (e.g., by activating\n  your Ansible virtualenv, or having symlinks to those commands inside it)\n* Docker installed\n* permission to run Docker commands\n* the `WORKSPACE` environment variable set to the absolute path of an\n  Arvados Git work tree\n\nThe Ansible playbook `tools/ansible/install-dev-tools.yml` can install all\nof these prerequisites except the last.\n\nQuickstart\n==========\n\nBuild and test all the packages for a distribution on your architecture by\nrunning:\n\n    ./run-build-test-packages-one-target.sh --target DISTRO\n\nThis will build package build and test Docker images for the named target\ndistribution, build all packages in a build container, then test all\npackages in a test container.\n\nLimit the build to a single package by adding the `--only-build\nPACKAGE_NAME` option. This is helpful when a build is mostly in good shape\nand you're tracking down last bugs in one or two packages.\n\nGet more verbose output by adding the `--debug` option.\n\nBy default the script avoids rebuilding or retesting packages that it\ndetects have already been done in past runs. You can force the script to\nrebuild or retest package(s) with the `--force-build` and `--force-test`\noptions, respectively.\n\nRun the script with `--help` for more information about other options.\n\nScripts in this directory\n=========================\n\nrun-tests.sh                             Run unit and integration test suite.\n\nrun-build-test-packages-one-target.sh    Entry point, wraps\n                                         run-build-packages-one-target.sh to\n                                         perform package building and testing\n                                         inside Docker.\n\nrun-build-packages-one-target.sh         Build packages for one target inside Docker.\n\nrun-build-packages-all-targets.sh        Run run-build-packages-one-target.sh\n                                         for every target.\n\nrun-build-packages.sh                    Actually build packages.  Intended to run\n                                         inside Docker container with proper\n                                         build environment.\n\nrun-build-packages-python-and-ruby.sh    Build Python and Ruby packages suitable\n                                         for upload to PyPi and Rubygems.\n\nrun-library.sh                           A library of functions shared by the\n                                         various scripts in this\n                                         directory.\n\nbuild_docker_image.py                    Build a Docker image from Arvados\n                                         source components\n\nAdding a new target\n===================\n\nIn order to build packages on a new distribution, you MUST:\n\n* Define containers to build the package build and test Docker images in\n  `tools/ansible/files/development-docker-images.yml`.\n* Create `package-testing/test-packages-TARGET.sh`, ideally by making it a\n  symlink to `FORMAT-common-test-packages.sh`.\n* Update the package download code near the bottom of `test_package_presence`\n  in `run-library.sh` so it can download packages for the new distribution.\n\nOf course, any part of our package build or test infrastructure may need to\nbe updated to accommodate the process for new distributions. If you're\nhaving trouble building lots of packages, consider grepping these build\nscripts for the identifier of the closest working target, and see if you may\nneed to add branches or similar hooks for your target. If you're having\ntrouble building specific packages, consider doing the same for those\npackages' `fpm-info.sh` files.\n"
  },
  {
    "path": "build/build_docker_image.py",
    "content": "#!/usr/bin/env python3\n# build_docker_image.py - Build a Docker image with Python source packages\n#\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# Requires you have requirements.build.txt installed\n\nimport argparse\nimport logging\nimport os\nimport re\nimport runpy\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nfrom pathlib import Path\n\nlogger = logging.getLogger('build_docker_image')\n_null_loghandler = logging.NullHandler()\nlogger.addHandler(_null_loghandler)\n\ndef _log_cmd(level, msg, *args):\n    *args, cmd = args\n    if logger.isEnabledFor(level):\n        logger.log(level, f'{msg}: %s', *args, ' '.join(shlex.quote(s) for s in cmd))\n\n\ndef _log_and_run(cmd, *, level=logging.DEBUG, check=True, **kwargs):\n    _log_cmd(level, \"running command\", cmd)\n    return subprocess.run(cmd, check=check, **kwargs)\n\n\nclass OptionError(ValueError):\n    pass\n\n\nclass DockerImage:\n    _BUILD_ARGS = {}\n    _REGISTRY = {}\n\n    @classmethod\n    def register(cls, subcls):\n        cls._REGISTRY[subcls.NAME] = subcls\n        pre_name, _, shortname = subcls.NAME.rpartition('/')\n        if pre_name == 'arvados':\n            cls._REGISTRY[shortname] = subcls\n        return subcls\n\n    @classmethod\n    def build_from_args(cls, args):\n        try:\n            subcls = cls._REGISTRY[args.docker_image]\n        except KeyError:\n            raise OptionError(f\"unrecognized Docker image {args.docker_image!r}\") from None\n        else:\n            return subcls(args)\n\n    def __init__(self, args):\n        self.extra_args = args.extra_args\n        self.workspace = args.workspace\n        if args.tag is not None:\n            self.tag = args.tag\n        elif version := (args.version or self.dev_version()):\n            self.tag = f'{self.NAME}:{version}'\n        else:\n            self.tag = None\n\n    def __enter__(self):\n        tmpname = self.NAME.replace('/', '-')\n        self.context_dir = Path(tempfile.mkdtemp(prefix=f'{tmpname}.'))\n        return self\n\n    def __exit__(self, exc_type, exc_value, exc_tb):\n        shutil.rmtree(self.context_dir, ignore_errors=True)\n        del self.context_dir\n\n    def build_docker_image(self):\n        logger.info(\"building Docker image %s\", self.tag or self.NAME)\n        cmd = ['docker', 'image', 'build']\n        cmd.extend(\n            f'--build-arg={key}={val}'\n            for key, val in self._BUILD_ARGS.items()\n        )\n        cmd.append(f'--file={self.workspace / self.DOCKERFILE_PATH}')\n        if self.tag is not None:\n            cmd.append(f'--tag={self.tag}')\n        cmd.append(str(self.context_dir))\n        return _log_and_run(cmd)\n\n    def dev_version(self):\n        return None\n\n\nclass PythonVenvImage(DockerImage):\n    DOCKERFILE_PATH = 'build/docker/python-venv.Dockerfile'\n    _EXTRAS = {}\n    _TEST_COMMAND = None\n\n    def __init__(self, args):\n        arv_vars = runpy.run_path(args.workspace / 'sdk/python/arvados_version.py')\n        self.arv_pymod = arv_vars['ARVADOS_PYTHON_MODULES'][self._PACKAGE_NAME]\n        super().__init__(args)\n\n    def dev_version(self):\n        return self.arv_pymod.get_version(self.workspace / self.arv_pymod.src_path)\n\n    def build_python_wheel(self, src_dir):\n        logger.info(\"building Python wheel at %s\", src_dir)\n        cmd = [sys.executable, '-m', 'build', '--outdir', str(self.context_dir)]\n        return _log_and_run(cmd, cwd=src_dir, umask=0o022)\n\n    def build_requirements(self):\n        with (self.context_dir / 'requirements.txt').open('w') as requirements_file:\n            for whl_path in self.context_dir.glob('*.whl'):\n                name, _, _ = whl_path.stem.partition('-')\n                try:\n                    name += f' [{self._EXTRAS[name]}]'\n                except KeyError:\n                    pass\n                whl_uri = Path('/usr/local/src', whl_path.name).as_uri()\n                print(name, '@', whl_uri, file=requirements_file)\n\n    def build_docker_image(self):\n        for path in self.extra_args:\n            self.build_python_wheel(path)\n        for dep in self.arv_pymod.dependencies:\n            self.build_python_wheel(self.workspace / dep.src_path)\n        self.build_python_wheel(self.workspace / self.arv_pymod.src_path)\n        self.build_requirements()\n        result = super().build_docker_image()\n        if self.tag and self._TEST_COMMAND:\n            _log_and_run(\n                ['docker', 'run', '--rm', '--tty', self.tag] + self._TEST_COMMAND,\n                stdout=subprocess.DEVNULL,\n            )\n        return result\n\n\n@DockerImage.register\nclass ClusterActivityImage(PythonVenvImage):\n    NAME = 'arvados/cluster-activity'\n    _BUILD_ARGS = {\n        'APT_PKGLIST': 'libcurl4',\n        'OLD_PKGNAME': 'python3-arvados-cluster-activity',\n    }\n    _EXTRAS = {\n        'arvados_cluster_activity': 'prometheus',\n    }\n    _PACKAGE_NAME = 'arvados-cluster-activity'\n    _TEST_COMMAND = ['arv-cluster-activity', '--version']\n\n\n@DockerImage.register\nclass JobsImage(PythonVenvImage):\n    NAME = 'arvados/jobs'\n    _BUILD_ARGS = {\n        'APT_PKGLIST': 'libcurl4 nodejs',\n        'OLD_PKGNAME': 'python3-arvados-cwl-runner',\n    }\n    _PACKAGE_NAME = 'arvados-cwl-runner'\n    _TEST_COMMAND = ['arvados-cwl-runner', '--version']\n\n\nclass Environments:\n    @staticmethod\n    def production(args):\n        if args.version is None:\n            raise OptionError(\n                \"$ARVADOS_BUILDING_VERSION must be set to build production images\"\n            )\n\n    @staticmethod\n    def development(args):\n        return\n\n    _ARG_MAP = {\n        'dev': development,\n        'devel': development,\n        'development': development,\n        'prod': production,\n        'production': production,\n    }\n\n    @classmethod\n    def parse_argument(cls, s):\n        try:\n            return cls._ARG_MAP[s.lower()]\n        except KeyError:\n            raise ValueError(f\"unrecognized environment {s!r}\")\n\n\nclass UploadActions:\n    @staticmethod\n    def to_arvados(tag):\n        logger.info(\"uploading Docker image %s to Arvados\", tag)\n        name, _, version = tag.rpartition(':')\n        if name:\n            cmd = ['arv-keepdocker', name, version]\n        else:\n            cmd = ['arv-keepdocker', tag]\n        return _log_and_run(cmd)\n\n    @staticmethod\n    def to_docker_hub(tag):\n        logger.info(\"uploading Docker image %s to Docker Hub\", tag)\n        cmd = ['docker', 'push', tag]\n        for tries_left in range(4, -1, -1):\n            try:\n                docker_push = _log_and_run(cmd)\n            except subprocess.CalledProcessError:\n                if tries_left == 0:\n                    raise\n            else:\n                break\n        return docker_push\n\n    _ARG_MAP = {\n        'arv-keepdocker': to_arvados,\n        'arvados': to_arvados,\n        'docker': to_docker_hub,\n        'docker_hub': to_docker_hub,\n        'dockerhub': to_docker_hub,\n        'keepdocker': to_arvados,\n    }\n\n    @classmethod\n    def parse_argument(cls, s):\n        try:\n            return cls._ARG_MAP[s.lower()]\n        except KeyError:\n            raise ValueError(f\"unrecognized upload method {s!r}\")\n\n\nclass ArgumentParser(argparse.ArgumentParser):\n    def __init__(self):\n        super().__init__(\n            prog='build_docker_image.py',\n            usage='%(prog)s [options ...] IMAGE_NAME [source directory ...]',\n        )\n        # We put environment variables for the tool in the args so the rest\n        # of the program has a single place to access parameters.\n        env_workspace = os.environ.get('WORKSPACE')\n        if version := os.environ.get('ARVADOS_BUILDING_VERSION'):\n            version = re.sub(r'~(dev[0-9])', r'.\\1', version, 1)\n            version = re.sub(r'~(a|b|rc)([0-9])', r'\\1\\2', version, 1)\n        self.set_defaults(\n            version=version,\n            workspace=Path(env_workspace) if env_workspace else None,\n        )\n\n        self.add_argument(\n            '--environment',\n            type=Environments.parse_argument,\n            default=Environments.production,\n            help=\"\"\"One of `development` or `production`.\nYour build settings will use defaults and be validated based on this setting.\nDefault is `production` because it's the strictest.\n\"\"\")\n\n        self.add_argument(\n            '--loglevel',\n            type=self._parse_loglevel,\n            default=logging.WARNING,\n            help=\"\"\"Log level to use, like `debug`, `info`, `warning`, or `error`\n\"\"\")\n\n        self.add_argument(\n            '--tag', '-t',\n            help=\"\"\"Tag for the built Docker image.\nDefault is generated from the image name and build version.\n\"\"\")\n\n        self.add_argument(\n            '--upload-to',\n            type=UploadActions.parse_argument,\n            help=\"\"\"After successfully building the Docker image, upload it to\nthis destination. Choices are `arvados` or `docker_hub`. Both require\ncredentials in place to work.\n\"\"\")\n\n        self.add_argument(\n            'docker_image',\n            metavar='IMAGE_NAME',\n            choices=sorted(DockerImage._REGISTRY),\n            help=\"\"\"Docker image to build.\nSupported images are: %(choices)s.\n\"\"\")\n\n        self.add_argument(\n            'extra_args',\n            metavar='SOURCE_DIR',\n            type=Path,\n            nargs=argparse.ZERO_OR_MORE,\n            default=[],\n            help=\"\"\"Before building the Docker image, the tool will build a\nPython wheel from each source directory and add it to the Docker build context.\nYou can use this during testing to install specific development versions of\ndependencies.\n\"\"\")\n\n    def _parse_loglevel(self, s):\n        try:\n            return logging.getLevelNamesMapping()[s.upper()]\n        except KeyError:\n            raise ValueError(f\"unrecognized logging level {s!r}\")\n\n\ndef main(args):\n    if not isinstance(args, argparse.Namespace):\n        args = ArgumentParser().parse_args(args)\n    if args.workspace is None:\n        raise OptionError(\"$WORKSPACE must be set to the Arvados source directory\")\n    args.environment(args)\n    docker_image = DockerImage.build_from_args(args)\n    if args.upload_to and not docker_image.tag:\n        raise OptionError(\"cannot upload a Docker image without a tag\")\n    with docker_image:\n        docker_image.build_docker_image()\n    if args.upload_to:\n        args.upload_to(docker_image.tag)\n    return os.EX_OK\n\n\nif __name__ == '__main__':\n    argparser = ArgumentParser()\n    _args = argparser.parse_args()\n    logging.basicConfig(\n        format=f'{logger.name}: %(levelname)s: %(message)s',\n        level=_args.loglevel,\n    )\n    try:\n        returncode = main(_args)\n    except OptionError as err:\n        argparser.error(err.args[0])\n        returncode = 2\n    except subprocess.CalledProcessError as err:\n        _log_cmd(\n            logging.ERROR,\n            \"command failed with exit code %s\",\n            err.returncode,\n            err.cmd,\n        )\n        returncode = err.returncode\n    exit(returncode)\n"
  },
  {
    "path": "build/check-copyright-notices",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e\n\nfix=false\nwhile [[ \"${@}\" != \"\" ]]\ndo\n    arg=${1}\n    shift\n    case ${arg} in\n        --help)\n            cat <<EOF\nUsage: $0 [--fix] [-- git-ls-args...]\n\nOptions:\n\n--fix   Insert missing copyright notices where possible.\n\nGit arguments:\n\nArguments after \"--\" are passed to \\`git ls-files\\`; this can be used to\nrestrict the set of files to check.\n\nEOF\n            exit 2\n            ;;\n        --fix)\n            fix=true\n            ;;\n        --)\n            break\n            ;;\n        *)\n            echo >&2 \"Unrecognized argument '${arg}'. Try $0 --help\"\n            exit 2\n            ;;\n    esac\ndone\n\nfixer() {\n    want=\"${want}\" perl -pi~ - \"${1}\" <<'EOF'\nBEGIN { undef $/ }\ns{^((\\#\\!.*?\\n|\\n*---\\n.*?\\n\\.\\.\\.\\n|<\\?xml.*?>\\n)\\n?)?}{${2}$ENV{want}\\n\\n}ms\nEOF\n}\n\nIFS=$'\\n' read -a ignores -r -d $'\\000' <.licenseignore || true\nresult=0\n\ncoproc git ls-files -z ${@} </dev/null\nwhile read -rd $'\\000' fnm\ndo\n    grepAfter=2\n    grepBefore=0\n    cs=\n    cc=\n    ce=\n    fixer=\n    if [[ ! -f ${fnm} ]] || [[ -L ${fnm} ]] || [[ ! -s ${fnm} ]]\n    then\n        continue\n    fi\n\n    ignore=\n    for pattern in \"${ignores[@]}\"\n    do\n        if [[ ${fnm} == ${pattern} ]]\n        then\n            ignore=1\n        fi\n    done\n    if [[ ${ignore} = 1 ]]; then continue; fi\n\n    case ${fnm} in\n        Makefile | */Makefile \\\n            | *.dockerfile | */Dockerfile.* | */Dockerfile | *.dockerignore \\\n            | */MANIFEST.in | */fuse.conf | */gitolite.rc \\\n            | *.pl | *.pm | *.PL \\\n            | *.rb | *.rb.example | *.rake | *.ru \\\n            | *.gemspec | */Gemfile | */Rakefile \\\n            | services/login-sync/bin/* \\\n            | sdk/cli/bin/* \\\n            | *.py \\\n            | sdk/python/bin/arv-* \\\n            | sdk/cwl/bin/* \\\n            | services/fuse/bin/* \\\n            | tools/crunchstat-summary/bin/* \\\n            | crunch_scripts/* \\\n            | *.yaml | *.yml | *.yml.example | *.cwl \\\n            | *.sh | *.service \\\n            | */run | */run-service | */restart-dns-server \\\n            | */nginx.conf \\\n            | build/build.list | *.R)\n            fixer=fixer\n            cc=\"#\"\n            ;;\n        *.md)\n            fixer=fixer\n            cc=\"[//]: #\"\n            ;;\n        *.rst)\n            fixer=fixer\n            cc=\"..\"\n            ;;\n        *.erb)\n            fixer=fixer\n            cs=\"<%# \"\n            cc=\"\"\n            ce=\" %>\"\n            ;;\n        *.liquid)\n            fixer=fixer\n            cs=$'{% comment %}\\n'\n            cc=\"\"\n            ce=$'\\n{% endcomment %}'\n            grepAfter=3\n            grepBefore=1\n            ;;\n        *.textile)\n            fixer=fixer\n            cs=\"###. \"\n            cc=\"....\"\n            ce=\n            ;;\n        *.css)\n            fixer=fixer\n            cs=\"/* \"\n            cc=\"\"\n            ce=\" */\"\n            ;;\n        *.coffee)\n            fixer=fixer\n            cs=\"### \"\n            cc=\"\"\n            ce=\" ###\"\n            ;;\n        *.go | *.scss | *.java | *.js)\n            fixer=fixer\n            cc=\"//\"\n            ;;\n        *.sql)\n            fixer=fixer\n            cc=\"--\"\n            ;;\n        *.html | *.svg)\n            fixer=fixer\n            cs=\"<!-- \"\n            cc=\"\"\n            ce=\" -->\"\n            ;;\n        *)\n            cc=\"#\"\n            hashbang=$(head -n1 ${fnm})\n            if [[ ${hashbang} = \"#!/bin/sh\" ]] ||  [[ ${hashbang} = \"#!/bin/bash\" ]]\n            then\n                fixer=fixer\n            fi\n            ;;\n    esac\n    wantGPL=\"${cs:-${cc}${cc:+ }}Copyright (C) The Arvados Authors. All rights reserved.\n${cc}\n${cc}${cc:+ }SPDX-License-Identifier: AGPL-3.0${ce}\"\n    wantApache=\"${cs:-${cc}${cc:+ }}Copyright (C) The Arvados Authors. All rights reserved.\n${cc}\n${cc}${cc:+ }SPDX-License-Identifier: Apache-2.0${ce}\"\n    wantBYSA=\"${cs:-${cc}${cc:+ }}Copyright (C) The Arvados Authors. All rights reserved.\n${cc}\n${cc}${cc:+ }SPDX-License-Identifier: CC-BY-SA-3.0${ce}\"\n    wantBYSAmd=\"[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\"\n    found=$(head -n20 \"$fnm\" | egrep -A${grepAfter} -B${grepBefore} 'Copyright.*All rights reserved.' || true)\n    case ${fnm} in\n        Makefile | build/* | lib/* | tools/* | apps/* | services/* | sdk/cli/bin/crunch-job)\n            want=${wantGPL}\n            ;;\n        crunch_scripts/* | docker/* | sdk/*)\n            want=${wantApache}\n            ;;\n        doc/*)\n            want=${wantBYSA}\n            ;;\n        README.md)\n            want=${wantBYSAmd}\n            ;;\n        *)\n            want=\n            ;;\n    esac\n    case \"$found\" in\n        \"$wantGPL\")\n            ;;\n        \"$wantApache\")\n            ;;\n        \"$wantBYSA\")\n            ;;\n        \"$wantBYSAmd\")\n            ;;\n        \"\")\n            if [[ -z ${found} ]] && [[ -n ${want} ]] && [[ $fix = true ]] && [[ $fixer != \"\" ]]\n            then\n                ${fixer} ${fnm}\n            else\n                echo \"missing copyright notice: $fnm\"\n                result=1\n            fi\n            ;;\n        *)\n            echo \"nonstandard copyright notice: $fnm '${found}'\"\n            result=1\n            ;;\n    esac\ndone <&${COPROC[0]}\nexit $result\n"
  },
  {
    "path": "build/create-plot-data-from-log.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nbuild=$1\nfile=$2\noutputdir=$3\n\nusage() {\n    echo \"./$0 build_number file_to_parse output_dir\"\n    echo \"this script will use the build output to generate *csv and *txt\"\n    echo \"for jenkins plugin plot https://github.com/jenkinsci/plot-plugin/\"\n}\n\nif [ $# -ne 3 ]\nthen\n    usage\n    exit 1\nfi\n\nif [ ! -e $file ]\nthen\n    usage\n    echo \"$file doesn't exist! exiting\"\n    exit 2\nfi\nif [ ! -w $outputdir ]\nthen\n    usage\n    echo \"$outputdir isn't writeable! exiting\"\n    exit 3\nfi\n\n#------------------------------\n## MAXLINE is the amount of lines that will read after the pattern\n## is match (the logfile could be hundred thousands lines long).\n## 1000 should be safe enough to capture all the output of the individual test\nMAXLINES=1000\n\n## TODO: check $build and $file make sense\n\nfor test in \\\n test_Create_and_show_large_collection_with_manifest_text_of_20000000 \\\n test_Create,_show,_and_update_description_for_large_collection_with_manifest_text_of_100000 \\\n test_Create_one_large_collection_of_20000000_and_one_small_collection_of_10000_and_combine_them\ndo\n cleaned_test=$(echo $test | tr -d \",.:;/\")\n (zgrep -i -E -A$MAXLINES \"^[A-Za-z0-9]+Test: $test\" $file && echo \"----\") | tail -n +1 | tail --lines=+3|grep -B$MAXLINES -E \"^-*$\" -m1 > $outputdir/$cleaned_test-$build.txt\n result=$?\n if [ $result -eq 0 ]\n then\n   echo processing  $outputdir/$cleaned_test-$build.txt creating  $outputdir/$cleaned_test.csv\n   echo $(grep ^Completed $outputdir/$cleaned_test-$build.txt | perl -n -e '/^Completed (.*) in [0-9]+ms.*$/;print \"\".++$line.\"-$1,\";' | perl -p -e 's/,$//g'|tr \" \" \"_\" ) >  $outputdir/$cleaned_test.csv\n   echo $(grep ^Completed $outputdir/$cleaned_test-$build.txt | perl -n -e '/^Completed.*in ([0-9]+)ms.*$/;print \"$1,\";' | perl -p -e 's/,$//g' ) >>  $outputdir/$cleaned_test.csv\n else\n   echo \"$test was't found on $file\"\n   cleaned_test=$(echo $test | tr -d \",.:;/\")\n   >  $outputdir/$cleaned_test.csv\n fi\ndone\n"
  },
  {
    "path": "build/docker/python-venv.Dockerfile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# Build this with `build/build_docker_image.py`\n\nFROM debian:bookworm-slim\n\nRUN apt-get update -q \\\n && DEBIAN_FRONTEND=noninteractive apt-get install -qy python3-venv \\\n && python3 -m venv /opt/arvados-py\n\n# The build script sets up our build context with all the Python source\n# packages to install.\nCOPY . /usr/local/src/\n\nRUN /opt/arvados-py/bin/pip install -qq --no-cache-dir --no-input \\\n    -r /usr/local/src/requirements.txt\n\n### Stage 2\nFROM debian:bookworm-slim\nMAINTAINER Arvados Package Maintainers <packaging@arvados.org>\nARG APT_PKGLIST\nARG OLD_PKGNAME=python3-arvados-python-client\n\nRUN apt-get update -q \\\n && DEBIAN_FRONTEND=noninteractive apt-get install -qy python3 $APT_PKGLIST\n\n# The symlinks provide path compatibility with old package-based images.\nRUN adduser --disabled-password --gecos 'Crunch execution user' crunch \\\n && install --directory --owner=crunch --group=crunch --mode=0700 \\\n    /keep /tmp/crunch-src /tmp/crunch-job \\\n && ln -s /opt/arvados-py \"/usr/lib/$OLD_PKGNAME\"\n\nUSER crunch\nENV PATH=/opt/arvados-py/bin:/usr/local/bin:/usr/bin:/bin\n\nCOPY --from=0 /opt/arvados-py/ /opt/arvados-py/\n"
  },
  {
    "path": "build/go-python-package-scripts/postinst",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e\n\n# Detect rpm-based systems: the exit code of the following command is zero\n# on rpm-based systems\nif /usr/bin/rpm -q -f /usr/bin/rpm >/dev/null 2>&1; then\n    # Red Hat (\"%{...}\" is interpolated at package build time)\n    pkg=\"%{name}\"\n    pkgtype=rpm\n    prefix=\"${RPM_INSTALL_PREFIX}\"\nelse\n    # Debian\n    script=\"$(basename \"${0}\")\"\n    pkg=\"${script%.postinst}\"\n    pkgtype=deb\n    prefix=/usr\nfi\n\ncase \"${pkgtype}-${1}\" in\n    deb-configure | rpm-1)\n        dest_dir=\"/lib/systemd/system\"\n        if ! [ -d \"${dest_dir}\" ]; then\n            exit 0\n        fi\n\n        # Find the unit file we need to install.\n        unit_file=\"${pkg}.service\"\n        for dir in \\\n            \"${prefix}/share/doc/${pkg}\" \\\n            \"${dest_dir}\"; do\n            if [ -e \"${dir}/${unit_file}\" ]; then\n                src_dir=\"${dir}\"\n                break\n            fi\n        done\n        if [ -z \"${src_dir}\" ]; then\n            echo >&2 \"WARNING: postinst script did not find ${unit_file} anywhere.\"\n            exit 0\n        fi\n\n        # Install/update the unit file if necessary.\n        if [ \"${src_dir}\" != \"${dest_dir}\" ]; then\n            cp \"${src_dir}/${unit_file}\" \"${dest_dir}/\" || exit 0\n        fi\n\n        # Enable service, and make sure systemd re-reads the unit\n        # file, in case we changed it.\n        if [ -e /run/systemd/system ]; then\n            systemctl daemon-reload || true\n            eval \"$(systemctl -p UnitFileState show \"${pkg}\")\"\n            case \"${UnitFileState}\" in\n                disabled)\n                    # Failing to enable or start the service is not a\n                    # package error, so don't let errors here\n                    # propagate up.\n                    systemctl enable \"${pkg}\" || true\n                    systemctl start \"${pkg}\" || true\n                    ;;\n                enabled)\n                    systemctl reload-or-try-restart \"${pkg}\" || true\n                    ;;\n            esac\n        fi\n        ;;\nesac\n"
  },
  {
    "path": "build/go-python-package-scripts/prerm",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e\n\n# Detect rpm-based systems: the exit code of the following command is zero\n# on rpm-based systems\nif /usr/bin/rpm -q -f /usr/bin/rpm >/dev/null 2>&1; then\n    # Red Hat (\"%{...}\" is interpolated at package build time)\n    pkg=\"%{name}\"\n    pkgtype=rpm\n    prefix=\"${RPM_INSTALL_PREFIX}\"\nelse\n    # Debian\n    script=\"$(basename \"${0}\")\"\n    pkg=\"${script%.prerm}\"\n    pkgtype=deb\n    prefix=/usr\nfi\n\ncase \"${pkgtype}-${1}\" in\n    deb-remove | rpm-0)\n        if [ -e /run/systemd/system ]; then\n            systemctl stop \"${pkg}\" || true\n            systemctl disable \"${pkg}\" || true\n        fi\n        if [ -e \"${prefix}/share/doc/${pkg}/${pkg}.service\" ]; then\n            # Unit files from Python packages get installed by\n            # postinst so we have to remove them explicitly here.\n            rm \"/lib/systemd/system/${pkg}/${pkg}.service\" || true\n        fi\n        ;;\nesac\n"
  },
  {
    "path": "build/package-testing/common-test-packages.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -eu\n\nFAIL=0\n\necho\n\nwhile read so && [ -n \"$so\" ]; do\n    if ldd \"$so\" | grep \"not found\" ; then\n        echo \"^^^ Missing while scanning $so ^^^\"\n        FAIL=1\n    fi\ndone <<EOF\n$(find -name '*.so')\nEOF\n\nif test -x \"/jenkins/package-testing/test-package-$1.sh\" ; then\n    if ! \"/jenkins/package-testing/test-package-$1.sh\" ; then\n       FAIL=1\n    fi\nfi\n\nif test $FAIL = 0 ; then\n   echo \"Package $1 passed\"\nfi\n\nexit $FAIL\n"
  },
  {
    "path": "build/package-testing/deb-common-test-packages.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\nset -eu\n\n# Set up\nDEBUG=${ARVADOS_DEBUG:-0}\nSTDOUT_IF_DEBUG=/dev/null\nSTDERR_IF_DEBUG=/dev/null\nDASHQQ_UNLESS_DEBUG=-qq\nif [[ \"$DEBUG\" != \"0\" ]]; then\n  STDOUT_IF_DEBUG=/dev/stdout\n  STDERR_IF_DEBUG=/dev/stderr\n  DASHQQ_UNLESS_DEBUG=\nfi\n\n# Multiple .deb based distros symlink to this script, so extract the target\n# from the invocation path.\ntarget=$(echo $0 | sed 's/.*test-packages-\\([^.]*\\)\\.sh.*/\\1/')\n\nexport ARV_PACKAGES_DIR=\"/arvados/packages/$target\"\n\ndpkg-query --show > \"$ARV_PACKAGES_DIR/$1.before\"\n\napt-get $DASHQQ_UNLESS_DEBUG --allow-insecure-repositories update\n\napt-get $DASHQQ_UNLESS_DEBUG -y --allow-unauthenticated install \"$1\" >\"$STDOUT_IF_DEBUG\" 2>\"$STDERR_IF_DEBUG\" ||\n    install_status=$?\n\ndpkg-query --show > \"$ARV_PACKAGES_DIR/$1.after\"\n\ndiff \"$ARV_PACKAGES_DIR/$1.before\" \"$ARV_PACKAGES_DIR/$1.after\" > \"$ARV_PACKAGES_DIR/$1.diff\" || true\n\nmkdir -p /tmp/opts\ncd /tmp/opts\n\nexport ARV_PACKAGES_DIR=\"/arvados/packages/$target\"\n\nif [[ -f $(ls -t \"$ARV_PACKAGES_DIR/$1\"_*.deb 2>/dev/null | head -n1) ]] ; then\n    debpkg=$(ls -t \"$ARV_PACKAGES_DIR/$1\"_*.deb | head -n1)\nelse\n    debpkg=$(ls -t \"$ARV_PACKAGES_DIR/processed/$1\"_*.deb | head -n1)\nfi\n\ndpkg-deb -x $debpkg .\n\nif [[ \"$DEBUG\" != \"0\" ]]; then\n  find -type f -name '*.so' | while read so; do\n      printf \"\\n== Package dependencies for %s ==\\n\" \"$so\"\n      # dpkg is not fully aware of merged-/usr systems: ldd may list a library\n      # under /lib where dpkg thinks it's under /usr/lib, or vice versa.\n      # awk constructs globs that we pass to `dpkg --search` to be flexible\n      # about which version we find. This could potentially return multiple\n      # results, but doing better probably requires restructuring this whole\n      # code to find and report the best match across multiple dpkg queries.\n      ldd \"$so\" \\\n          | awk 'BEGIN { ORS=\"\\0\" } ($3 ~ /^\\//) {print \"*\" $3}' \\\n          | sort --unique --zero-terminated \\\n          | xargs -0 --no-run-if-empty dpkg --search \\\n          | cut -d: -f1 \\\n          | sort --unique\n  done\nfi\n\ncase \"${install_status:-0}-$1\" in\n    0-* | 100-arvados-api-server )\n        exec /jenkins/package-testing/common-test-packages.sh \"$1\"\n        ;;\n    *)\n        exit \"$install_status\"\n        ;;\nesac\n"
  },
  {
    "path": "build/package-testing/rpm-common-test-packages.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -eu\n\n# Set up\nDEBUG=${ARVADOS_DEBUG:-0}\nSTDOUT_IF_DEBUG=/dev/null\nSTDERR_IF_DEBUG=/dev/null\nif [[ \"$DEBUG\" != \"0\" ]]; then\n  STDOUT_IF_DEBUG=/dev/stdout\n  STDERR_IF_DEBUG=/dev/stderr\nfi\n\ntarget=\"$(basename \"$0\" .sh)\"\ntarget=\"${target##*-}\"\n\nmicrodnf --assumeyes clean all\ntouch /var/lib/rpm/*\n\nexport ARV_PACKAGES_DIR=\"/arvados/packages/$target\"\n\nrpm -qa | sort > \"$ARV_PACKAGES_DIR/$1.before\"\nmicrodnf --assumeyes install \"$1\" || install_status=\"$?\"\nrpm -qa | sort > \"$ARV_PACKAGES_DIR/$1.after\"\ndiff \"$ARV_PACKAGES_DIR/$1\".{before,after} >\"$ARV_PACKAGES_DIR/$1.diff\" || true\n\nmkdir -p /tmp/opts\ncd /tmp/opts\n\nrpm2cpio $(ls -t \"$ARV_PACKAGES_DIR/$1\"-*.rpm | head -n1) | cpio -idm 2>/dev/null\n\nif [[ \"$DEBUG\" != \"0\" ]]; then\n  find -name '*.so' | while read so; do\n      echo -e \"\\n== Packages dependencies for $so ==\"\n      ldd \"$so\" \\\n          | awk '($3 ~ /^\\//){print $3}' | sort -u | xargs rpm -qf | sort -u\n  done\nfi\n\ncase \"${install_status:-0}-$1\" in\n    0-* )\n        # Install other packages alongside to test for build id conflicts.\n        # This can be removed after we have test-provision-rocky8, #21426.\n        microdnf --assumeyes install arvados-client arvados-server python3-arvados-python-client\n        ;;\n    1-arvados-api-server )\n        ;;\n    *)\n        exit \"$install_status\"\n        ;;\nesac\n\nexec /jenkins/package-testing/common-test-packages.sh \"$1\"\n"
  },
  {
    "path": "build/package-testing/test-package-arvados-api-server.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e\n\nPACKAGE_NAME=arvados-api-server\nAPI_GEMS_LS=\"$(mktemp --tmpdir api-gems-XXXXXX.list)\"\ntrap 'rm -f \"$API_GEMS_LS\"' EXIT INT TERM QUIT\n\ncd \"/var/www/${PACKAGE_NAME%-server}\"\n\ncat_dropins() {\n    cat /lib/systemd/system/arvados-railsapi.service.d/*.conf\n}\n\ncheck_gem_dirs() {\n    local when=\"$1\"; shift\n    env -C shared/vendor_bundle/ruby ls -1 >\"$API_GEMS_LS\"\n    local ls_count=\"$(wc -l <\"$API_GEMS_LS\")\"\n    if [ \"$ls_count\" = 1 ]; then\n        return 0\n    fi\n    echo \"Package $PACKAGE_NAME FAILED: $ls_count gem directories created after $when:\" >&2\n    case \"${ARVADOS_DEBUG:-0}\" in\n        0) cat \"$API_GEMS_LS\" >&2 ;;\n        *) env -C shared/vendor_bundle/ruby find -maxdepth 3 -type d -ls >&2 ;;\n    esac\n    return 11\n}\n\nexpect_grep() {\n    local expect_exit=\"$1\"; shift\n    local actual_exit=0\n    grep \"$@\" >/dev/null || actual_exit=$?\n    if [ \"$actual_exit\" -eq \"$expect_exit\" ]; then\n        return 0\n    fi\n    echo \"Package $PACKAGE_NAME FAILED: \\`grep\" \"$@\" \"\\` returned exit code $actual_exit\" >&2\n    case \"$actual_exit\" in\n        0) return 1 ;;\n        *) return \"$actual_exit\" ;;\n    esac\n}\n\n# We intentionally don't hardcode a Bundler path here because other parts of our\n# infrastructure expect `bundle` to be available system-wide after installation.\n# After that infrastructure is fixed, this test can invoke `bundle` the same\n# way the postinst script does.\nenv -C current bundle list >\"$ARV_PACKAGES_DIR/$PACKAGE_NAME.gems\"\ncheck_gem_dirs \"initial install\"\n\ncase \"$TARGET\" in\n    debian*|ubuntu*)\n        cat_dropins | expect_grep 0 -x SupplementaryGroups=www-data\n        ;;\n    rocky*)\n        cat_dropins | expect_grep 1 \"^SupplementaryGroups=\"\n        microdnf --assumeyes install nginx\n        microdnf --assumeyes reinstall \"$PACKAGE_NAME\" || test $? -eq 1\n        check_gem_dirs \"package reinstall\"\n        cat_dropins | expect_grep 0 -x SupplementaryGroups=nginx\n        ;;\n    *)\n        echo \"$0: WARNING: Unknown target '$TARGET'.\" >&2\n        ;;\nesac\n"
  },
  {
    "path": "build/package-testing/test-package-arvados-client.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e\n\narvados-client -version >/dev/null\n"
  },
  {
    "path": "build/package-testing/test-package-arvados-docker-cleaner.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e\n\narvados-docker-cleaner -h >/dev/null\n"
  },
  {
    "path": "build/package-testing/test-package-python3-arvados-cwl-runner.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e\n\narvados-cwl-runner --version >/dev/null\n"
  },
  {
    "path": "build/package-testing/test-package-python3-arvados-python-client.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\narv-put --version >/dev/null || exit\n\n. /usr/lib/python3-arvados-python-client/bin/activate\npython <<EOF\nimport arvados\nprint(\"Successfully imported arvados\")\nEOF\n"
  },
  {
    "path": "build/package-testing/test-package-python3-crunchstat-summary.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e\n\ncrunchstat-summary -h >/dev/null\n"
  },
  {
    "path": "build/package-testing/test-package-python3-python-arvados-fuse.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e\n\narv-mount --version\n"
  },
  {
    "path": "build/pypkg_info.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\"\"\"pypkg_info.py - Introspect installed Python packages\n\nThis tool can read metadata about any Python package installed in the current\nenvironment and report it out in various formats. We use this mainly to pass\ninformation through when building distribution packages.\n\"\"\"\n\nimport argparse\nimport enum\nimport importlib.metadata\nimport os\nimport sys\n\nfrom pathlib import PurePath\n\nclass RawFormat:\n    def format_metadata(self, key, value):\n        return value\n\n    def format_path(self, path):\n        return str(path)\n\n\nclass FPMFormat(RawFormat):\n    PYTHON_METADATA_MAP = {\n        'summary': 'description',\n    }\n\n    def format_metadata(self, key, value):\n        key = key.lower()\n        key = self.PYTHON_METADATA_MAP.get(key, key)\n        return f'--{key}={value}'\n\n\nclass Formats(enum.Enum):\n    RAW = RawFormat\n    FPM = FPMFormat\n\n    @classmethod\n    def from_arg(cls, arg):\n        try:\n            return cls[arg.upper()]\n        except KeyError:\n            raise ValueError(f\"unknown format {arg!r}\") from None\n\n\ndef report_binfiles(args):\n    bin_names = [\n        PurePath('bin', path.name)\n        for pkg_name in args.package_names\n        for path in importlib.metadata.distribution(pkg_name).files\n        if path.parts[-3:-1] == ('..', 'bin')\n    ]\n    fmt = args.format.value().format_path\n    return (fmt(path) for path in bin_names)\n\ndef report_metadata(args):\n    dist = importlib.metadata.distribution(args.package_name)\n    fmt = args.format.value().format_metadata\n    for key in args.metadata_key:\n        yield fmt(key, dist.metadata.get(key, ''))\n\ndef unescape_str(arg):\n    arg = arg.replace('\\'', '\\\\\\'')\n    return eval(f\"'''{arg}'''\", {})\n\ndef parse_arguments(arglist=None):\n    parser = argparse.ArgumentParser()\n    parser.set_defaults(action=None)\n    format_names = ', '.join(fmt.name.lower() for fmt in Formats)\n    parser.add_argument(\n        '--format', '-f',\n        choices=list(Formats),\n        default=Formats.RAW,\n        type=Formats.from_arg,\n        help=f\"Output format. Choices are: {format_names}\",\n    )\n    parser.add_argument(\n        '--delimiter', '-d',\n        default='\\n',\n        type=unescape_str,\n        help=\"Line ending. Python backslash escapes are supported. Default newline.\",\n    )\n    subparsers = parser.add_subparsers()\n\n    binfiles = subparsers.add_parser('binfiles')\n    binfiles.set_defaults(action=report_binfiles)\n    binfiles.add_argument(\n        'package_names',\n        nargs=argparse.ONE_OR_MORE,\n    )\n\n    metadata = subparsers.add_parser('metadata')\n    metadata.set_defaults(action=report_metadata)\n    metadata.add_argument(\n        'package_name',\n    )\n    metadata.add_argument(\n        'metadata_key',\n        nargs=argparse.ONE_OR_MORE,\n    )\n\n    args = parser.parse_args()\n    if args.action is None:\n        parser.error(\"subcommand is required\")\n    return args\n\ndef main(arglist=None):\n    args = parse_arguments(arglist)\n    try:\n        for line in args.action(args):\n            print(line, end=args.delimiter)\n    except importlib.metadata.PackageNotFoundError as error:\n        print(f\"error: package not found: {error.args[0]}\", file=sys.stderr)\n        return os.EX_NOTFOUND\n    else:\n        return os.EX_OK\n\nif __name__ == '__main__':\n    exit(main())\n"
  },
  {
    "path": "build/rails-package-scripts/README.md",
    "content": "[//]: # Copyright (C) The Arvados Authors. All rights reserved.\n[//]: #\n[//]: # SPDX-License-Identifier: AGPL-3.0\n\nWhen run-build-packages.sh builds a Rails package, it generates the package's pre/post-inst/rm scripts by concatenating `arvados-api-server.sh` to define common variables, then the actual step script. Especially when this infrastructure was shared with the old Rails Workbench, this seemed like the least worst option to share code between these files and packages.  More advanced code generation would've been too much trouble to integrate into our build process at this time.  Trying to inject portions of files into other files seemed error-prone and likely to introduce bugs to the end result.\n"
  },
  {
    "path": "build/rails-package-scripts/arvados-api-server.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# This file declares variables common to all scripts for one Rails package.\n\nPACKAGE_NAME=arvados-api-server\nINSTALL_PATH=/var/www/arvados-api\nCONFIG_PATH=/etc/arvados/api\nDOC_URL=\"http://doc.arvados.org/install/install-api-server.html#configure\"\nRELEASE_PATH=$INSTALL_PATH/current\nRELEASE_CONFIG_PATH=$RELEASE_PATH/config\nSHARED_PATH=$INSTALL_PATH/shared\n"
  },
  {
    "path": "build/rails-package-scripts/postinst.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# This code runs after package variable definitions.\n\nset -e\n\nfor DISTRO_FAMILY in $(. /etc/os-release && echo \"${ID:-} ${ID_LIKE:-}\"); do\n    case \"$DISTRO_FAMILY\" in\n        debian)\n            RESETUP_CMD=\"dpkg-reconfigure $PACKAGE_NAME\"\n            break ;;\n        rhel)\n            RESETUP_CMD=\"dnf reinstall $PACKAGE_NAME\"\n            break ;;\n    esac\ndone\nif [ -z \"$RESETUP_CMD\" ]; then\n   echo \"$PACKAGE_NAME postinst skipped: don't recognize the distribution from /etc/os-release\" >&2\n   exit 0\nfi\n# This will be set to a command path after we install the version we need.\nBUNDLE=\n\n# systemd_ctl is just \"systemctl if we booted with systemd, otherwise a noop.\"\n# This makes the package installable in Docker containers, albeit without any\n# service deployment.\nif [ -d /run/systemd/system ]; then\n    systemd_ctl() { systemctl \"$@\"; }\nelse\n    systemd_ctl() { true; }\nfi\n\nsystemd_quote() {\n    if [ $# -ne 1 ]; then\n        echo \"error: systemd_quote requires exactly one argument\" >&2\n        return 2\n    fi\n    # See systemd.syntax(7) - Use double quotes with backslash escapes\n    echo \"$1\" | sed -re 's/[\\\\\"]/\\\\\\0/g; s/^/\"/; s/$/\"/'\n}\n\nrun_and_report() {\n    # Usage: run_and_report ACTION_MSG CMD\n    # This is the usual wrapper that prints ACTION_MSG, runs CMD, then writes\n    # a message about whether CMD succeeded or failed.  Returns the exit code\n    # of CMD.\n    local action_message=\"$1\"; shift\n    local retcode=0\n    echo -n \"$action_message...\"\n    if \"$@\"; then\n        echo \" done.\"\n    else\n        retcode=$?\n        echo \" failed.\"\n    fi\n    return $retcode\n}\n\nreport_not_ready() {\n    local exitcode=\"$1\"; shift\n    local reason=\"$1\"; shift\n    local doc_url=\"${1:-}\"\n    case \"$doc_url\" in\n        http://* | https://* ) ;;\n        /*) doc_url=\"https://doc.arvados.org${doc_url}\" ;;\n        \\#*) doc_url=\"https://doc.arvados.org/install/install-api-server.html${doc_url}\" ;;\n        *) doc_url=\"https://doc.arvados.org/install/${doc_url}\" ;;\n    esac\n    cat >&2 <<EOF\nNOTE: The $PACKAGE_NAME package was not configured completely because\n$reason.\nPlease refer to the documentation for next steps:\n  <$doc_url>\n\nAfter you do that, resume $PACKAGE_NAME setup by running:\n  $RESETUP_CMD\nEOF\n    exit \"${exitcode:-20}\"\n}\n\nsetup_confdirs() {\n    local confdir confgrp\n    case \"$WWW_OWNER\" in\n        \"\") confgrp=root ;;\n        *) confgrp=\"$WWW_OWNER\" ;;\n    esac\n    for confdir in \"$@\"; do\n        if [ ! -d \"$confdir\" ]; then\n            install -d -g \"$confgrp\" -m 0750 \"$confdir\"\n        fi\n    done\n}\n\nsetup_conffile() {\n    # Usage: setup_conffile CONFFILE_PATH [SOURCE_PATH]\n    # Both paths are relative to RELEASE_CONFIG_PATH.\n    # This function will try to safely ensure that a symbolic link for\n    # the configuration file points from RELEASE_CONFIG_PATH to CONFIG_PATH.\n    # If SOURCE_PATH is given, this function will try to install that file as\n    # the configuration file in CONFIG_PATH, and return 1 if the file in\n    # CONFIG_PATH is unmodified from the source.\n    local conffile_relpath=\"$1\"; shift\n    local conffile_source=\"$1\"\n    local release_conffile=\"$RELEASE_CONFIG_PATH/$conffile_relpath\"\n    local etc_conffile=\"$CONFIG_PATH/$(basename \"$conffile_relpath\")\"\n\n    # Note that -h can return true and -e will return false simultaneously\n    # when the target is a dangling symlink.  We're okay with that outcome,\n    # so check -h first.\n    if [ ! -h \"$release_conffile\" ]; then\n        if [ ! -e \"$release_conffile\" ]; then\n            ln -s \"$etc_conffile\" \"$release_conffile\"\n        # If there's a config file in /var/www identical to the one in /etc,\n        # overwrite it with a symlink after porting its permissions.\n        elif cmp --quiet \"$release_conffile\" \"$etc_conffile\"; then\n            local ownership=\"$(stat -c \"%u:%g\" \"$release_conffile\")\"\n            local owning_group=\"${ownership#*:}\"\n            if [ 0 != \"$owning_group\" ]; then\n                chgrp \"$owning_group\" \"$CONFIG_PATH\" /etc/arvados\n            fi\n            chown \"$ownership\" \"$etc_conffile\"\n            chmod --reference=\"$release_conffile\" \"$etc_conffile\"\n            ln --force -s \"$etc_conffile\" \"$release_conffile\"\n        fi\n    fi\n\n    if [ -n \"$conffile_source\" ]; then\n        if [ ! -e \"$etc_conffile\" ]; then\n            install -g \"$WWW_OWNER\" -m 0640 \\\n                    \"$RELEASE_CONFIG_PATH/$conffile_source\" \"$etc_conffile\"\n            return 1\n        # Even if $etc_conffile already existed, it might be unmodified from\n        # the source.  This is especially likely when a user installs, updates\n        # database.yml, then reconfigures before they update application.yml.\n        # Use cmp to be sure whether $etc_conffile is modified.\n        elif cmp --quiet \"$RELEASE_CONFIG_PATH/$conffile_source\" \"$etc_conffile\"; then\n            return 1\n        fi\n    fi\n}\n\nprepare_database() {\n  # Prevent PostgreSQL from trying to page output\n  unset PAGER\n  DB_MIGRATE_STATUS=`\"$BUNDLE\" exec bin/rake db:migrate:status 2>&1 || true`\n  if echo \"$DB_MIGRATE_STATUS\" | grep -qF 'Schema migrations table does not exist yet.'; then\n      # The database exists, but the migrations table doesn't.\n      run_and_report \"Setting up database\" \"$BUNDLE\" exec bin/rake db:schema:load db:seed\n  elif echo \"$DB_MIGRATE_STATUS\" | grep -q '^database: '; then\n      run_and_report \"Running db:migrate\" \"$BUNDLE\" exec bin/rake db:migrate db:seed\n  elif echo \"$DB_MIGRATE_STATUS\" | grep -q 'database .* does not exist'; then\n      run_and_report \"Running db:setup\" \"$BUNDLE\" exec bin/rake db:setup\n  else\n      # We don't have enough configuration to even check the database.\n      return 1\n  fi\n}\n\ncase \"$DISTRO_FAMILY\" in\n    debian) WWW_OWNER=www-data ;;\n    rhel) WWW_OWNER=\"$(id --group --name nginx || true)\" ;;\nesac\n\n# Before we do anything else, make sure some directories and files are in place\nif [ ! -e $SHARED_PATH/log ]; then mkdir -p $SHARED_PATH/log; fi\nif [ ! -e $RELEASE_PATH/tmp ]; then mkdir -p $RELEASE_PATH/tmp; fi\nif [ ! -e $RELEASE_PATH/log ]; then ln -s $SHARED_PATH/log $RELEASE_PATH/log; fi\nif [ ! -e $SHARED_PATH/log/production.log ]; then touch $SHARED_PATH/log/production.log; fi\n\ncd \"$RELEASE_PATH\"\nexport RAILS_ENV=production\n# Bundler behaves inconsistently when gems are available system-wide.\n# Avoid those bugs by starting with a GEM_HOME that *only* contains Bundler.\nexport GEM_HOME=\"$SHARED_PATH/bundler\"\nexport GEM_PATH=\"$GEM_HOME\"\n# We still need to set directory switches because RHEL configures `gem` with\n# built-in options that override the environment variables.\nrun_and_report \"Installing bundler\" gem install \\\n               --bindir \"$GEM_HOME/bin\" \\\n               --install-dir \"$GEM_HOME\" \\\n               --version \"~> 2.5.0\" \\\n               bundler\nBUNDLE=\"$GEM_HOME/bin/bundle\"\nrun_and_report \"Running bundle install\" \"$BUNDLE\" install --prefer-local --quiet\nrun_and_report \"Verifying bundle is complete\" \"$BUNDLE\" exec true\n# Some of our infrastructure expects `bundler` to be available system-wide\n# after installing arvados-api-server. Ensure that's the case.\n# TODO: Make the other infrastructure stop doing that, then delete this code.\nfor bcmd in bundle bundler; do\n    if ! command -v \"$bcmd\" >/dev/null 2>&1; then\n        cat >\"/usr/local/bin/$bcmd\" <<EOF\n#!/bin/sh\nGEM_HOME=\"$GEM_HOME\"\nGEM_PATH=\"$GEM_PATH\"\nexport GEM_HOME GEM_PATH\nexec \"\\$GEM_HOME/bin/$bcmd\" \"\\$@\"\nEOF\n        chmod a+rx \"/usr/local/bin/$bcmd\"\n    fi\ndone\n\npassenger=\"$(\"$BUNDLE\" exec gem contents passenger | grep -E '/(bin|exe)/passenger$' | tail -n1)\"\nif ! [ -x \"$passenger\" ]; then\n    echo \"Error: failed to find \\`passenger\\` command after installing bundle\" >&2\n    exit 12\nfi\n\"$BUNDLE\" exec \"$passenger-config\" build-native-support\n# `passenger-config install-standalone-runtime` downloads an agent, but at\n# least with Passenger 6.0.23 (late 2024), that version tends to segfault.\n# Compiling our own is safer.\n\"$BUNDLE\" exec \"$passenger-config\" compile-agent --auto --optimize\n\"$BUNDLE\" exec \"$passenger-config\" install-standalone-runtime --auto --brief\n\necho -n \"Creating symlinks to configuration in $CONFIG_PATH ...\"\nsetup_confdirs /etc/arvados \"$CONFIG_PATH\"\nsetup_conffile environments/production.rb environments/production.rb.example \\\n    || true\n# Rails 5.2 does not tolerate dangling symlinks in the initializers\n# directory, and this one can still be there, left over from a previous\n# version of the API server package.\nrm -f $RELEASE_PATH/config/initializers/omniauth.rb\necho \"... done.\"\n\necho -n \"Extending systemd unit configuration ...\"\nif [ -z \"$WWW_OWNER\" ]; then\n    systemd_group=\"%N\"\nelse\n    systemd_group=\"$(systemd_quote \"$WWW_OWNER\")\"\nfi\ninstall -d /lib/systemd/system/arvados-railsapi.service.d\n# The 20 prefix is chosen so most user overrides should come after, which\n# is what most admins will expect, but there's still space to put drop-ins\n# earlier.\ncat >/lib/systemd/system/arvados-railsapi.service.d/20-postinst.conf <<EOF\n[Service]\nEnvironment=GEM_HOME=$(systemd_quote \"$GEM_HOME\")\nEnvironment=GEM_PATH=$(systemd_quote \"$GEM_PATH\")\nExecStartPre=+/bin/chgrp $systemd_group log tmp\nExecStartPre=+-/bin/chgrp $systemd_group \\${PASSENGER_LOG_FILE}\nExecStart=\nExecStart=$(systemd_quote \"$BUNDLE\") exec $(systemd_quote \"$passenger\") start --daemonize --pid-file %t/%N/passenger.pid\nExecStop=\nExecStop=$(systemd_quote \"$BUNDLE\") exec $(systemd_quote \"$passenger\") stop --pid-file %t/%N/passenger.pid\nExecReload=\nExecReload=$(systemd_quote \"$BUNDLE\") exec $(systemd_quote \"$passenger-config\") reopen-logs\n${WWW_OWNER:+SupplementaryGroups=$WWW_OWNER}\nEOF\nsystemd_ctl daemon-reload\necho \"... done.\"\n\n# warn about config errors (deprecated/removed keys from\n# previous version, etc)\nif ! run_and_report \"Checking configuration for completeness\" \"$BUNDLE\" exec bin/rake config:check; then\n    report_not_ready 21 \"you must add required configuration settings to /etc/arvados/config.yml\" \"#update-config\"\nelif ! prepare_database; then\n    report_not_ready 22 \"database setup could not be completed\"\nelse\n    systemd_ctl try-restart arvados-railsapi.service\nfi\n"
  },
  {
    "path": "build/rails-package-scripts/postrm.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# This code runs after package variable definitions.\n\nset -e\n\npurge () {\n  rm -rf $SHARED_PATH/vendor_bundle\n  rm -rf $SHARED_PATH/log\n  rm -rf $CONFIG_PATH\n  rmdir $SHARED_PATH || true\n  rmdir $INSTALL_PATH || true\n}\n\nif [ \"$1\" = 'purge' ]; then\n  # This is a debian-based system and purge was requested\n  purge\nelif [ \"$1\" = \"0\" ]; then\n  # This is an rpm-based system, no guarantees are made, always purge\n  # Apparently yum doesn't actually remember what it installed.\n  # Clean those files up here, then purge.\n  rm -rf $RELEASE_PATH\n  purge\nfi\n"
  },
  {
    "path": "build/rails-package-scripts/prerm.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# This code runs after package variable definitions.\n\nremove () {\n  rm -f $RELEASE_PATH/config/database.yml\n  rm -f $RELEASE_PATH/config/environments/production.rb\n  rm -f $RELEASE_PATH/config/application.yml\n  # Old API server configuration file.\n  rm -rf $RELEASE_PATH/public/assets/\n  rm -rf $RELEASE_PATH/tmp\n  rm -rf $RELEASE_PATH/.bundle\n  rm -rf $RELEASE_PATH/log\n  rm -rf /lib/systemd/system/arvados-railsapi.service.d\n}\n\nif [ \"$1\" = 'remove' ]; then\n  # This is a debian-based system and removal was requested\n  remove\nelif [ \"$1\" = \"0\" ]; then\n  # This is an rpm-based system and zero versions will remain after erasure\n  remove\nfi\n"
  },
  {
    "path": "build/requirements.build-packages.txt",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# Python requirements for run-build-packages\n\n-r requirements.build.txt\n\npiprepo\n"
  },
  {
    "path": "build/requirements.build.txt",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# Common requirements to build and install Arvados Python components\n\nbuild\n\n# 20.3 introduced the modern dependency resolver which we rely on.\npip >= 20.3\n\n# Technically this shouldn't be required: the build process should get the\n# build requirements listed in pyproject.toml. But it's nice to have it\n# cached.\nsetuptools ~= 80.9\n\nwheel\n"
  },
  {
    "path": "build/requirements.tests.txt",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# Python requirements for run-tests\n\n### Requirements for run-tests.sh itself\n-r requirements.build.txt\n\n# Required by sdk/python/tests/run_test_server.py\n# which is run directly by run-tests.sh\nPyYAML\n\n# yq is used by run-tests.sh directly and controller tests\nyq ~= 3.4\n\n### Requirements for Python tests generally\n# Required by older, unittest-style Python tests\n# Prefer using pytest.mark.parametrize in new tests\nparameterized\n\n# Our chosen Python testing tool\npytest\n\n### Requirements for individual tests\n# Run by CWL integration tests\ncwltest >= 2.5.20241122133319, < 3.0\n\n# Required to build Python SDK documentation\npdoc ~= 16.0\n\n# Used by controller and keep-web tests\ns3cmd\n"
  },
  {
    "path": "build/run-build-packages-all-targets.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nread -rd \"\\000\" helpmessage <<EOF\n$(basename $0): Orchestrate run-build-packages.sh for every target\n\nSyntax:\n        WORKSPACE=/path/to/arvados $(basename $0) [options]\n\nOptions:\n\n--command\n    Build command to execute (default: use built-in Docker image command)\n--test-packages\n    Run package install tests\n--debug\n    Output debug information (default: false)\n--build-version <string>\n    Version to build (default:\n    \\$ARVADOS_BUILDING_VERSION-\\$ARVADOS_BUILDING_ITERATION or\n    0.1.timestamp.commithash)\n\nWORKSPACE=path         Path to the Arvados source tree to build packages from\n\nEOF\n\nif ! [[ -n \"$WORKSPACE\" ]]; then\n  echo >&2 \"$helpmessage\"\n  echo >&2\n  echo >&2 \"Error: WORKSPACE environment variable not set\"\n  echo >&2\n  exit 1\nfi\n\nif ! [[ -d \"$WORKSPACE\" ]]; then\n  echo >&2 \"$helpmessage\"\n  echo >&2\n  echo >&2 \"Error: $WORKSPACE is not a directory\"\n  echo >&2\n  exit 1\nfi\n\nset -e\n\nPARSEDOPTS=$(getopt --name \"$0\" --longoptions \\\n    help,test-packages,debug,command:,only-test:,build-version: \\\n    -- \"\" \"$@\")\nif [ $? -ne 0 ]; then\n    exit 1\nfi\n\nCOMMAND=\nDEBUG=\nTEST_PACKAGES=\nONLY_TEST=\n\neval set -- \"$PARSEDOPTS\"\nwhile [ $# -gt 0 ]; do\n    case \"$1\" in\n        --help)\n            echo >&2 \"$helpmessage\"\n            echo >&2\n            exit 1\n            ;;\n        --debug)\n            DEBUG=\"--debug\"\n            ;;\n        --command)\n            COMMAND=\"$2\"; shift\n            ;;\n        --test-packages)\n            TEST_PACKAGES=\"--test-packages\"\n            ;;\n        --only-test)\n            ONLY_TEST=\"$1 $2\"; shift\n            ;;\n        --build-version)\n            ARVADOS_BUILDING_VERSION=\"$2\"; shift\n            ;;\n        --)\n            if [ $# -gt 1 ]; then\n                echo >&2 \"$0: unrecognized argument '$2'. Try: $0 --help\"\n                exit 1\n            fi\n            ;;\n    esac\n    shift\ndone\n\ncd $(dirname $0)\n\nFINAL_EXITCODE=0\n\nfor pkgtest_path in package-testing/test-packages-*.sh; do\n    target=\"$(basename \"${pkgtest_path##*-}\" .sh)\"\n    if ./run-build-packages-one-target.sh --target \"$target\" --command \"$COMMAND\" --build-version \"$ARVADOS_BUILDING_VERSION\" $DEBUG $TEST_PACKAGES $ONLY_TEST ; then\n        true\n    else\n        FINAL_EXITCODE=$?\n        echo\n        echo \"Build packages failed for $(basename $(dirname \"$dockerfile_path\"))\"\n        echo\n    fi\ndone\n\nif test $FINAL_EXITCODE != 0 ; then\n    echo\n    echo \"Build packages failed with code $FINAL_EXITCODE\" >&2\n    echo\nfi\n\nexit $FINAL_EXITCODE\n"
  },
  {
    "path": "build/run-build-packages-one-target.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nread -rd \"\\000\" helpmessage <<EOF\n$(basename $0): Orchestrate run-build-packages.sh for one target\n\nSyntax:\n        WORKSPACE=/path/to/arvados $(basename $0) --target <target> [options]\n\n--target <target>\n    Distribution to build packages for\n--command\n    Build command to execute (default: use built-in Docker image command)\n--test-packages\n    Run package install test script \"test-packages-[target].sh\"\n--debug\n    Output debug information (default: false)\n--only-build <package>\n    Build only a specific package\n--only-test <package>\n    Test only a specific package\n--force-build\n    Build even if the package exists upstream or if it has already been\n    built locally\n--force-test\n    Test even if there is no new untested package\n--build-version <string>\n    Version to build (default:\n    \\$ARVADOS_BUILDING_VERSION-\\$ARVADOS_BUILDING_ITERATION or\n    0.1.timestamp.commithash)\n--skip-docker-build\n    Don't try to build Docker images\n\nWORKSPACE=path         Path to the Arvados source tree to build packages from\n\nEOF\n\nset -e\n\nif ! [[ -n \"$WORKSPACE\" ]]; then\n  echo >&2 \"$helpmessage\"\n  echo >&2\n  echo >&2 \"Error: WORKSPACE environment variable not set\"\n  echo >&2\n  exit 1\nfi\n\nif ! [[ -d \"$WORKSPACE\" ]]; then\n  echo >&2 \"$helpmessage\"\n  echo >&2\n  echo >&2 \"Error: $WORKSPACE is not a directory\"\n  echo >&2\n  exit 1\nfi\n\nPARSEDOPTS=$(getopt --name \"$0\" --longoptions \\\n    help,debug,test-packages,target:,command:,only-test:,force-test,only-build:,force-build,arch:,build-version:,skip-docker-build \\\n    -- \"\" \"$@\")\nif [ $? -ne 0 ]; then\n    exit 1\nfi\n\nFORCE_BUILD=0\nCOMMAND=run-build-packages.sh\nDEBUG=\nTARGET=\n\neval set -- \"$PARSEDOPTS\"\nwhile [ $# -gt 0 ]; do\n    case \"$1\" in\n        --help)\n            echo >&2 \"$helpmessage\"\n            echo >&2\n            exit 1\n            ;;\n        --target)\n            TARGET=\"$2\"; shift\n            ;;\n        --only-test)\n            test_packages=1\n            testing_one_package=1\n            packages=\"$2\"; shift\n            ;;\n        --force-test)\n            FORCE_TEST=true\n            ;;\n        --force-build)\n            FORCE_BUILD=1\n            ;;\n        --only-build)\n            ONLY_BUILD=\"$2\"; shift\n            ;;\n        --arch)\n            case \"$2\" in\n                amd64) ;;\n                *)\n                    printf \"FATAL: --arch '%s' is not supported\" \"$2\" >&2\n                    exit 2\n                    ;;\n            esac\n            ARCH=\"$2\"; shift\n            ;;\n        --debug)\n            DEBUG=\" --debug\"\n            ARVADOS_DEBUG=\"1\"\n            ;;\n        --command)\n            COMMAND=\"$2\"; shift\n            ;;\n        --test-packages)\n            test_packages=1\n            ;;\n        --build-version)\n            if [[ -z \"$2\" ]]; then\n                :\n            elif ! [[ \"$2\" =~ (.*)-(.*) ]]; then\n                echo >&2 \"FATAL: --build-version '$2' does not include an iteration. Try '${2}-1'?\"\n                exit 1\n            elif ! [[ \"$2\" =~ ^[0-9]+\\.[0-9]+\\.[0-9]+(\\.[0-9]+|)(~rc[0-9]+|~dev[0-9]+|)-[0-9]+$ ]]; then\n                echo >&2 \"FATAL: --build-version '$2' is invalid, must match pattern ^[0-9]+\\.[0-9]+\\.[0-9]+(\\.[0-9]+|)(~rc[0-9]+|~dev[0-9]+|)-[0-9]+$\"\n                exit 1\n            else\n                [[ \"$2\" =~ (.*)-(.*) ]]\n                ARVADOS_BUILDING_VERSION=\"${BASH_REMATCH[1]}\"\n                ARVADOS_BUILDING_ITERATION=\"${BASH_REMATCH[2]}\"\n            fi\n            shift\n            ;;\n        --skip-docker-build)\n            SKIP_DOCKER_BUILD=1\n\t    ;;\n        --)\n            if [ $# -gt 1 ]; then\n                echo >&2 \"$0: unrecognized argument '$2'. Try: $0 --help\"\n                exit 1\n            fi\n            ;;\n    esac\n    shift\ndone\n\nset -e\norig_umask=\"$(umask)\"\n\nif [[ -z \"$TARGET\" ]]; then\n    echo \"FATAL: --target must be specified\" >&2\n    exit 2\nelif [[ ! -e \"$WORKSPACE/build/package-testing/test-packages-$TARGET.sh\" ]]; then\n    echo \"FATAL: unknown build target '$TARGET'\" >&2\n    exit 2\nfi\n\nif [[ -n \"$ARVADOS_BUILDING_VERSION\" ]]; then\n    echo \"build version='$ARVADOS_BUILDING_VERSION', package iteration='$ARVADOS_BUILDING_ITERATION'\"\nfi\n\nif [[ -n \"$test_packages\" ]]; then\n  # Packages are built world-readable, so package indexes should be too,\n  # especially because since 2022 apt uses an unprivileged user `_apt` to\n  # retrieve everything.  Ensure it has permissions to read the packages\n  # when mounted as a volume inside the Docker container.\n  chmod a+rx \"$WORKSPACE\" \"$WORKSPACE/packages\" \"$WORKSPACE/packages/$TARGET\"\n  umask 022\n  if [[ -n \"$(find $WORKSPACE/packages/$TARGET -name '*.rpm')\" ]] ; then\n    CREATEREPO=\"$(command -v createrepo createrepo_c | tail -n1)\"\n    if [[ -z \"$CREATEREPO\" ]]; then\n      echo >&2\n      echo >&2 \"Error: please install createrepo. E.g. sudo apt install createrepo-c\"\n      echo >&2\n      exit 1\n    fi\n    \"$CREATEREPO\" $WORKSPACE/packages/$TARGET\n  fi\n\n  if [[ -n \"$(find $WORKSPACE/packages/$TARGET -name '*.deb')\" ]] ; then\n    set +e\n    /usr/bin/which dpkg-scanpackages >/dev/null\n    if [[ \"$?\" != \"0\" ]]; then\n      echo >&2\n      echo >&2 \"Error: please install dpkg-dev. E.g. sudo apt-get install dpkg-dev\"\n      echo >&2\n      exit 1\n    fi\n    /usr/bin/which apt-ftparchive >/dev/null\n    if [[ \"$?\" != \"0\" ]]; then\n      echo >&2\n      echo >&2 \"Error: please install apt-utils. E.g. sudo apt-get install apt-utils\"\n      echo >&2\n      exit 1\n    fi\n    set -e\n    (cd $WORKSPACE/packages/$TARGET\n      dpkg-scanpackages --multiversion .  2> >(grep -v 'warning' 1>&2) | tee Packages | gzip -c > Packages.gz\n      apt-ftparchive -o APT::FTPArchive::Release::Origin=Arvados release . > Release\n    )\n  fi\n\n  COMMAND=\"/jenkins/package-testing/test-packages-$TARGET.sh\"\n  IMAGE=\"arvados/package-test:$TARGET\"\n  umask \"$orig_umask\"\nelse\n  IMAGE=\"arvados/build:$TARGET\"\n  COMMAND=\"bash /jenkins/$COMMAND --target $TARGET$DEBUG\"\nfi\n\nJENKINS_DIR=$(dirname \"$(readlink -e \"$0\")\")\n\nif [[ \"$SKIP_DOCKER_BUILD\" != 1 ]] ; then\n  env -C \"$WORKSPACE/tools/ansible\" ansible-galaxy install -r requirements.yml\n  declare -a ansible_opts=()\n  if [[ -n \"$test_packages\" ]]; then\n      ansible_opts+=(\n          --extra-vars=arvados_build_playbook=setup-package-tests.yml\n          --limit=\"arvados_pkgtest_$TARGET\"\n      )\n  else\n      ansible_opts+=(\n          --extra-vars=arvados_build_playbook=install-dev-tools.yml\n          --limit=\"arvados_pkgbuild_$TARGET\"\n      )\n  fi\n  env -C \"$WORKSPACE/tools/ansible\" ansible-playbook \\\n      --inventory=files/development-docker-images.yml \\\n      \"${ansible_opts[@]}\" build-docker-image.yml\n  unset ansible_opts\nfi\n\nif test -z \"$packages\" ; then\n    packages=\"arvados-api-server\n        arvados-client\n        arvados-controller\n        arvados-dispatch-cloud\n        arvados-dispatch-lsf\n        arvados-docker-cleaner\n        arvados-health\n        arvados-server\n        arvados-src\n        arvados-sync-groups\n        arvados-sync-users\n        arvados-workbench2\n        arvados-ws\n        crunch-dispatch-local\n        crunch-dispatch-slurm\n        crunch-run\n        keep-balance\n        keep-block-check\n        keep-exercise\n        keep-rsync\n        keep-web\n        keepproxy\n        keepstore\n        libpam-arvados-go\n        python3-arvados-cwl-runner\n        python3-arvados-fuse\n        python3-arvados-python-client\n        python3-arvados-user-activity\n        python3-arvados-cluster-activity\n        python3-crunchstat-summary\"\nfi\n\nFINAL_EXITCODE=0\n\npackage_fails=\"\"\n\nmkdir -p \"$WORKSPACE/services/api/vendor/cache-$TARGET\"\n\ndocker_volume_args=(\n    --mount \"type=bind,src=$JENKINS_DIR,dst=/jenkins\"\n    --mount \"type=bind,src=$WORKSPACE,dst=/arvados\"\n    --tmpfs /arvados/services/api/.bundle:rw,noexec,nosuid,size=1m\n    --tmpfs /arvados/services/api/vendor:rw,exec,nosuid,size=1g\n    --mount \"type=bind,src=$WORKSPACE/services/api/vendor/cache-$TARGET,dst=/arvados/services/api/vendor/cache\"\n)\n\nif [[ -n \"$test_packages\" ]]; then\n    for p in $packages ; do\n        if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$p\" != \"$ONLY_BUILD\" ]]; then\n            continue\n        fi\n        if [[ -e \"${WORKSPACE}/packages/.last_test_${TARGET}\" ]] && [[ -z \"$FORCE_TEST\" ]]; then\n          MATCH=`find ${WORKSPACE}/packages/ -newer ${WORKSPACE}/packages/.last_test_${TARGET} -regex .*${TARGET}/$p.*`\n          if [[ \"$MATCH\" == \"\" ]]; then\n            # No new package has been built that needs testing\n            echo \"Skipping $p test because no new package was built since the last test.\"\n            continue\n          fi\n        fi\n        # If we're testing all packages, we should not error out on packages that don't exist.\n        # If we are testing one specific package only (i.e. --only-test was given), we should\n        # error out if that package does not exist.\n        if [[ -z \"$testing_one_package\" ]]; then\n          MATCH=`find ${WORKSPACE}/packages/ -regextype posix-extended -regex .*${TARGET}/$p.*\\\\(deb\\\\|rpm\\\\)`\n          if [[ \"$MATCH\" == \"\" ]]; then\n            # No new package has been built that needs testing\n            echo \"Skipping $p test because no package file is available to test.\"\n            continue\n          fi\n        fi\n        echo\n        echo \"START: $p test on $IMAGE\" >&2\n        if docker run \\\n            --rm \\\n            \"${docker_volume_args[@]}\" \\\n            --env ARVADOS_DEBUG=$ARVADOS_DEBUG \\\n            --env \"TARGET=$TARGET\" \\\n            --env \"WORKSPACE=/arvados\" \\\n            \"$IMAGE\" $COMMAND $p\n        then\n            echo \"OK: $p test on $IMAGE succeeded\" >&2\n        else\n            FINAL_EXITCODE=$?\n            package_fails=\"$package_fails $p\"\n            echo \"ERROR: $p test on $IMAGE failed with exit status $FINAL_EXITCODE\" >&2\n        fi\n    done\n\n    if [[ \"$FINAL_EXITCODE\" == \"0\" ]]; then\n      touch ${WORKSPACE}/packages/.last_test_${TARGET}\n    fi\nelse\n    echo\n    echo \"START: build packages on $IMAGE\" >&2\n    # Move existing packages and other files into the processed/ subdirectory\n    if [[ ! -e \"${WORKSPACE}/packages/${TARGET}/processed\" ]]; then\n      mkdir -p \"${WORKSPACE}/packages/${TARGET}/processed\"\n    fi\n    set +e\n    mv -f ${WORKSPACE}/packages/${TARGET}/* ${WORKSPACE}/packages/${TARGET}/processed/ 2>/dev/null\n    set -e\n    # give bundle (almost) all the cores. See also the MAKE env var that is passed into the\n    # docker run command below.\n    # Cf. https://build.betterup.com/one-weird-trick-that-will-speed-up-your-bundle-install/\n    tmpfile=$(mktemp /tmp/run-build-packages-one-target.XXXXXX)\n    cores=$(let a=$(grep -c processor /proc/cpuinfo )-1; echo $a)\n    printf -- \"---\\nBUNDLE_JOBS: \\\"$cores\\\"\" > $tmpfile\n    # Build packages.\n    if docker run \\\n        --rm \\\n        \"${docker_volume_args[@]}\" \\\n        --mount \"type=bind,src=$tmpfile,dst=/root/.bundle/config\" \\\n        --env ARVADOS_BUILDING_VERSION=\"$ARVADOS_BUILDING_VERSION\" \\\n        --env ARVADOS_BUILDING_ITERATION=\"$ARVADOS_BUILDING_ITERATION\" \\\n        --env ARVADOS_DEBUG=$ARVADOS_DEBUG \\\n        --env \"ONLY_BUILD=$ONLY_BUILD\" \\\n        --env \"FORCE_BUILD=$FORCE_BUILD\" \\\n        --env \"ARCH=$ARCH\" \\\n        --env \"MAKE=make --jobs $cores\" \\\n        \"$IMAGE\" $COMMAND\n    then\n        echo\n        echo \"OK: build packages on $IMAGE succeeded\" >&2\n    else\n        FINAL_EXITCODE=$?\n        echo \"ERROR: build packages on $IMAGE failed with exit status $FINAL_EXITCODE\" >&2\n    fi\n    # Clean up the bundle config file\n    rm -f $tmpfile\nfi\n\nif test -n \"$package_fails\" ; then\n    echo \"Failed package tests:$package_fails\" >&2\nfi\n\nexit $FINAL_EXITCODE\n"
  },
  {
    "path": "build/run-build-packages-python-and-ruby.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nCOLUMNS=80\n\n. `dirname \"$(readlink -f \"$0\")\"`/run-library.sh\n\nread -rd \"\\000\" helpmessage <<EOF\n$(basename $0): Build Arvados Python packages and Ruby gems\n\nSyntax:\n        WORKSPACE=/path/to/arvados $(basename $0) [options]\n\nOptions:\n\n--debug\n    Output debug information (default: false)\n--upload\n    If the build and test steps are successful, upload the python\n    packages to pypi and the gems to rubygems (default: false)\n--ruby <true|false>\n    Build ruby gems (default: true)\n--python <true|false>\n    Build python packages (default: true)\n\nWORKSPACE=path         Path to the Arvados source tree to build packages from\n\nEOF\n\nexit_cleanly() {\n    trap - INT\n    report_outcomes\n    exit ${#failures[@]}\n}\n\ngem_wrapper() {\n  local gem_name=\"$1\"; shift\n  local gem_directory=\"$1\"; shift\n\n  title \"Start $gem_name gem build\"\n  timer_reset\n\n  cd \"$gem_directory\"\n  handle_ruby_gem $gem_name\n\n  checkexit $? \"$gem_name gem build\"\n  title \"End of $gem_name gem build (`timer`)\"\n}\n\npython_wrapper() {\n  local package_name=\"$1\"; shift\n  local package_directory=\"$1\"; shift\n\n  title \"Start $package_name python package build\"\n  timer_reset\n\n  python3 -m build \"$package_directory\"\n\n  checkexit $? \"$package_name python package build\"\n  title \"End of $package_name python package build (`timer`)\"\n}\n\nTARGET=\nUPLOAD=0\nRUBY=1\nPYTHON=1\nDEBUG=${ARVADOS_DEBUG:-0}\n\nPARSEDOPTS=$(getopt --name \"$0\" --longoptions \\\n    help,debug,ruby:,python:,upload,target: \\\n    -- \"\" \"$@\")\nif [ $? -ne 0 ]; then\n    exit 1\nfi\n\neval set -- \"$PARSEDOPTS\"\nwhile [ $# -gt 0 ]; do\n    case \"$1\" in\n        --help)\n            echo >&2 \"$helpmessage\"\n            echo >&2\n            exit 1\n            ;;\n        --target)\n            TARGET=\"$2\"; shift\n            ;;\n        --ruby)\n            RUBY=\"$2\"; shift\n            if [ \"$RUBY\" != \"true\" ] && [ \"$RUBY\" != \"1\" ]; then\n              RUBY=0\n            else\n              RUBY=1\n            fi\n            ;;\n        --python)\n            PYTHON=\"$2\"; shift\n            if [ \"$PYTHON\" != \"true\" ] && [ \"$PYTHON\" != \"1\" ]; then\n              PYTHON=0\n            else\n              PYTHON=1\n            fi\n            ;;\n        --upload)\n            UPLOAD=1\n            ;;\n        --debug)\n            DEBUG=1\n            ;;\n        --)\n            if [ $# -gt 1 ]; then\n                echo >&2 \"$0: unrecognized argument '$2'. Try: $0 --help\"\n                exit 1\n            fi\n            ;;\n    esac\n    shift\ndone\n\nif ! [[ -n \"$WORKSPACE\" ]]; then\n  echo >&2 \"$helpmessage\"\n  echo >&2\n  echo >&2 \"Error: WORKSPACE environment variable not set\"\n  echo >&2\n  exit 1\nfi\n\nSTDOUT_IF_DEBUG=/dev/null\nSTDERR_IF_DEBUG=/dev/null\nDASHQ_UNLESS_DEBUG=-q\nif [[ \"$DEBUG\" != 0 ]]; then\n    STDOUT_IF_DEBUG=/dev/stdout\n    STDERR_IF_DEBUG=/dev/stderr\n    DASHQ_UNLESS_DEBUG=\nfi\n\nRUN_BUILD_PACKAGES_PATH=\"`dirname \\\"$0\\\"`\"\nRUN_BUILD_PACKAGES_PATH=\"`( cd \\\"$RUN_BUILD_PACKAGES_PATH\\\" && pwd )`\"  # absolutized and normalized\nif [ -z \"$RUN_BUILD_PACKAGES_PATH\" ] ; then\n  # error; for some reason, the path is not accessible\n  # to the script (e.g. permissions re-evaled after suid)\n  exit 1  # fail\nfi\n\ndebug_echo \"$0 is running from $RUN_BUILD_PACKAGES_PATH\"\ndebug_echo \"Workspace is $WORKSPACE\"\n\nif [ $RUBY -eq 0 ] && [ $PYTHON -eq 0 ]; then\n  echo \"Nothing to do!\"\n  exit 0\nfi\n\n# Make all files world-readable -- jenkins runs with umask 027, and has checked\n# out our git tree here\nchmod o+r \"$WORKSPACE\" -R\n\n# More cleanup - make sure all executables that we'll package are 755\ncd \"$WORKSPACE\"\nfind -type d -name 'bin' |xargs -I {} find {} -type f |xargs -I {} chmod 755 {}\n\n# Now fix our umask to something better suited to building and publishing\n# gems and packages\numask 0022\n\ndebug_echo \"umask is\" `umask`\n\nGEM_BUILD_FAILURES=0\nif [ $RUBY -eq 1 ]; then\n  debug_echo \"Building Ruby gems\"\n  gem_wrapper arvados \"$WORKSPACE/sdk/ruby\"\n  gem_wrapper arvados-cli \"$WORKSPACE/sdk/cli\"\n  gem_wrapper arvados-login-sync \"$WORKSPACE/services/login-sync\"\n  if [ ${#failures[@]} -ne 0 ]; then\n    GEM_BUILD_FAILURES=${#failures[@]}\n  fi\nfi\n\nPYTHON_BUILD_FAILURES=0\nif [ $PYTHON -eq 1 ]; then\n  debug_echo \"Building Python packages\"\n  python_wrapper arvados-python-client \"$WORKSPACE/sdk/python\"\n  python_wrapper arvados-cwl-runner \"$WORKSPACE/sdk/cwl\"\n  python_wrapper arvados_fuse \"$WORKSPACE/services/fuse\"\n  python_wrapper crunchstat_summary \"$WORKSPACE/tools/crunchstat-summary\"\n  python_wrapper arvados-user-activity \"$WORKSPACE/tools/user-activity\"\n  python_wrapper arvados-cluster-activity \"$WORKSPACE/tools/cluster-activity\"\n\n  if [ $((${#failures[@]} - $GEM_BUILD_FAILURES)) -ne 0 ]; then\n    PYTHON_BUILD_FAILURES=$((${#failures[@]} - $GEM_BUILD_FAILURES))\n  fi\nfi\n\nif [ $UPLOAD -ne 0 ]; then\n    if get_ci_scripts\n    then\n        checkexit $? \"get CI scripts\"\n    else\n        checkexit $? \"get CI scripts\"\n        UPLOAD=0\n    fi\nfi\n\nif [ $UPLOAD -ne 0 ]; then\n  echo \"Uploading\"\n\n  if [ $DEBUG > 0 ]; then\n    EXTRA_UPLOAD_FLAGS=\" --verbose\"\n  else\n    EXTRA_UPLOAD_FLAGS=\"\"\n  fi\n\n  if [ ! -e \"$WORKSPACE/packages\" ]; then\n    mkdir -p \"$WORKSPACE/packages\"\n  fi\n\n  if [ $PYTHON -eq 1 ]; then\n    title \"Start upload python packages\"\n    timer_reset\n\n    if [ $PYTHON_BUILD_FAILURES -eq 0 ]; then\n      \"$CI_DIR/run_upload_packages.py\" $EXTRA_UPLOAD_FLAGS --workspace $WORKSPACE python\n    else\n      echo \"Skipping python packages upload, there were errors building the packages\"\n    fi\n    checkexit $? \"upload python packages\"\n    title \"End of upload python packages (`timer`)\"\n  fi\n\n  if [ $RUBY -eq 1 ]; then\n    title \"Start upload ruby gems\"\n    timer_reset\n\n    if [ $GEM_BUILD_FAILURES -eq 0 ]; then\n      \"$CI_DIR/run_upload_packages.py\" $EXTRA_UPLOAD_FLAGS --workspace $WORKSPACE gems\n    else\n      echo \"Skipping ruby gem upload, there were errors building the packages\"\n    fi\n    checkexit $? \"upload ruby gems\"\n    title \"End of upload ruby gems (`timer`)\"\n  fi\nfi\n\nexit_cleanly\n"
  },
  {
    "path": "build/run-build-packages.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n. \"$(dirname \"$(readlink -f \"$0\")\")\"/run-library.sh || exit 1\n\nread -rd \"\\000\" helpmessage <<EOF\n$(basename \"$0\"): Build Arvados packages\n\nSyntax:\n        WORKSPACE=/path/to/arvados $(basename \"$0\") --target <target> [options]\n\nOptions:\n\n--build-bundle-packages  (default: false)\n    Build api server package with vendor/bundle included\n--debug\n    Output debug information (default: false)\n--target <target>\n    Distribution to build packages for\n--only-build <package>\n    Build only a specific package (or ONLY_BUILD from environment)\n--force-build\n    Build even if the package exists upstream or if it has already been\n    built locally\n--command\n    Build command to execute (defaults to the run command defined in the\n    Docker image)\n\nWORKSPACE=path         Path to the Arvados source tree to build packages from\n\nEOF\n\n# Begin of user configuration\n\n# set to --no-cache-dir to disable pip caching\nCACHE_FLAG=\n\nMAINTAINER=\"Arvados Package Maintainers <packaging@arvados.org>\"\nVENDOR=\"The Arvados Project\"\n\n# End of user configuration\n\nDEBUG=${ARVADOS_DEBUG:-0}\nFORCE_BUILD=${FORCE_BUILD:-0}\nEXITCODE=0\nCOMMAND=\nTARGET=\n\nPARSEDOPTS=$(getopt --name \"$0\" --longoptions \\\n    help,build-bundle-packages,debug,target:,only-build:,arch:,force-build \\\n    -- \"\" \"$@\")\nif [ $? -ne 0 ]; then\n    exit 1\nfi\n\neval set -- \"$PARSEDOPTS\"\nwhile [ $# -gt 0 ]; do\n    case \"$1\" in\n        --help)\n            echo >&2 \"$helpmessage\"\n            echo >&2\n            exit 1\n            ;;\n        --target)\n            TARGET=\"$2\"; shift\n            ;;\n        --only-build)\n            ONLY_BUILD=\"$2\"; shift\n            ;;\n        --force-build)\n            FORCE_BUILD=1\n            ;;\n        --arch)\n            case \"$2\" in\n                amd64) ;;\n                *)\n                    printf \"FATAL: --arch '%s' is not supported\" \"$2\" >&2\n                    exit 2\n                    ;;\n            esac\n            ARCH=\"$2\"; shift\n            ;;\n        --debug)\n            DEBUG=1\n            ;;\n        --command)\n            COMMAND=\"$2\"; shift\n            ;;\n        --)\n            if [ $# -gt 1 ]; then\n                echo >&2 \"$0: unrecognized argument '$2'. Try: $0 --help\"\n                exit 1\n            fi\n            ;;\n    esac\n    shift\ndone\n\nif [[ -z \"$TARGET\" ]]; then\n    echo \"FATAL: --target must be specified\" >&2\n    exit 2\nelif [[ ! -e \"$WORKSPACE/build/package-testing/test-packages-$TARGET.sh\" ]]; then\n    echo \"FATAL: unknown build target '$TARGET'\" >&2\n    exit 2\nfi\n\nif [[ \"$COMMAND\" != \"\" ]]; then\n  COMMAND=\"bash /jenkins/$COMMAND --target $TARGET\"\nfi\n\nSTDOUT_IF_DEBUG=/dev/null\nSTDERR_IF_DEBUG=/dev/null\nDASHQ_UNLESS_DEBUG=-q\nif [[ \"$DEBUG\" != 0 ]]; then\n    STDOUT_IF_DEBUG=/dev/stdout\n    STDERR_IF_DEBUG=/dev/stderr\n    DASHQ_UNLESS_DEBUG=\nfi\n\n# The next section defines a bunch of constants used to build distro packages\n# for our Python tools. Because those packages include C extensions, they need\n# to depend on and refer to a specific minor version of Python 3. The logic\n# below should Just Work for most cases, but you can override variables for a\n# specific distro if you need to to do something weird.\n# * PYTHON3_VERSION: The major+minor version of Python we build against\n#   (e.g., \"3.11\")\n# * PYTHON3_EXECUTABLE: The command to run that version of Python,\n#   either a full path or something in $PATH (e.g., \"python3.11\")\n# * PYTHON3_PACKAGE: The name of the distro package that provides\n#   $PYTHON3_EXECUTABLE. Our Python packages will all depend on this.\n# * PYTHON3_PKG_PREFIX: The prefix used in the names of all of our Python\n#   packages. This should match distro convention.\nPYTHON3_PKG_PREFIX=python3\ncase \"$TARGET\" in\n    rocky9)\n        FORMAT=rpm\n        PYTHON3_VERSION=3.11\n        ;;\n    centos*|rocky*)\n        FORMAT=rpm\n        ;;\n    debian*|ubuntu*)\n        FORMAT=deb\n        ;;\n    *)\n        echo -e \"$0: Unknown target '$TARGET'.\\n\" >&2\n        exit 1\n        ;;\nesac\n: \"${PYTHON3_VERSION:=$(\"${PYTHON3_EXECUTABLE:-python3}\" -c 'import sys; print(\"{v.major}.{v.minor}\".format(v=sys.version_info))')}\"\n: \"${PYTHON3_EXECUTABLE:=python$PYTHON3_VERSION}\"\ncase \"$FORMAT\" in\n    deb)\n        : \"${PYTHON3_PACKAGE:=python$PYTHON3_VERSION}\"\n        ;;\n    rpm)\n        : \"${PYTHON3_PACKAGE:=$(rpm -qf \"$(command -v \"$PYTHON3_EXECUTABLE\")\" --queryformat '%{NAME}\\n')}\"\n        ;;\nesac\n\nif [[ -z \"$WORKSPACE\" ]]; then\n  echo >&2 \"$helpmessage\"\n  echo >&2\n  echo >&2 \"Error: WORKSPACE environment variable not set\"\n  echo >&2\n  exit 1\nfi\n\n# Test for fpm\nfpm --version >/dev/null 2>&1\n\nif [[ $? -ne 0 ]]; then\n  echo >&2 \"$helpmessage\"\n  echo >&2\n  echo >&2 \"Error: fpm not found\"\n  echo >&2\n  exit 1\nfi\n\nRUN_BUILD_PACKAGES_PATH=\"$(dirname \"$0\")\"\nRUN_BUILD_PACKAGES_PATH=\"$(cd \"$RUN_BUILD_PACKAGES_PATH\" && pwd)\"  # absolutized and normalized\nif [ -z \"$RUN_BUILD_PACKAGES_PATH\" ] ; then\n  # error; for some reason, the path is not accessible\n  # to the script (e.g. permissions re-evaled after suid)\n  exit 1  # fail\nfi\n\ndebug_echo \"$0 is running from $RUN_BUILD_PACKAGES_PATH\"\ndebug_echo \"Workspace is $WORKSPACE\"\n\n# Make all files world-readable -- jenkins runs with umask 027, and has checked\n# out our git tree here\nchmod o+r \"$WORKSPACE\" -R\n\n# More cleanup - make sure all executables that we'll package are 755\ncd \"$WORKSPACE\" || exit 1\nfind . -type d -name 'bin' -print0 |xargs -0 -I {} find {} -type f -print0 |xargs -0 -I {} chmod 755 {}\n\n# Now fix our umask to something better suited to building and publishing\n# gems and packages\numask 0022\n\ndebug_echo \"umask is\" \"$(umask)\"\n\nif [[ ! -d \"$WORKSPACE/packages/$TARGET\" ]]; then\n  mkdir -p \"$WORKSPACE/packages/$TARGET\"\n  chown --reference=\"$WORKSPACE\" \"$WORKSPACE/packages/$TARGET\"\nfi\n\n# Required due to CVE-2022-24765\ngit config --global --add safe.directory /arvados\n\n# Ruby gems\ndebug_echo -e \"\\nRuby gems\\n\"\n\nFPM_GEM_PREFIX=$(gem environment gemdir)\n\ncd \"$WORKSPACE/sdk/ruby\" || exit 1\nhandle_ruby_gem arvados\n\ncd \"$WORKSPACE/sdk/cli\" || exit 1\nhandle_ruby_gem arvados-cli\n\ncd \"$WORKSPACE/services/login-sync\" || exit 1\nhandle_ruby_gem arvados-login-sync\n\n# arvados-src\nhandle_arvados_src\n\n# Go packages\ndebug_echo -e \"\\nGo packages\\n\"\n\n# Go binaries\nexport GOPATH=~/go\npackage_go_binary cmd/arvados-client arvados-client \"$FORMAT\" \"$ARCH\" \\\n    \"Arvados command line tool (beta)\"\npackage_go_binary cmd/arvados-server arvados-server \"$FORMAT\" \"$ARCH\" \\\n    \"Arvados server daemons\"\npackage_go_binary cmd/arvados-server arvados-controller \"$FORMAT\" \"$ARCH\" \\\n    \"Arvados cluster controller daemon\"\npackage_go_binary cmd/arvados-server arvados-dispatch-cloud \"$FORMAT\" \"$ARCH\" \\\n    \"Arvados cluster cloud dispatch\"\npackage_go_binary cmd/arvados-server arvados-dispatch-lsf \"$FORMAT\" \"$ARCH\" \\\n    \"Dispatch Arvados containers to an LSF cluster\"\npackage_go_binary services/crunch-dispatch-local crunch-dispatch-local \"$FORMAT\" \"$ARCH\" \\\n    \"Dispatch Crunch containers on the local system\"\npackage_go_binary cmd/arvados-server crunch-dispatch-slurm \"$FORMAT\" \"$ARCH\" \\\n    \"Dispatch Crunch containers to a SLURM cluster\"\npackage_go_binary cmd/arvados-server crunch-run \"$FORMAT\" \"$ARCH\" \\\n    \"Supervise a single Crunch container\"\npackage_go_binary cmd/arvados-server arvados-health \"$FORMAT\" \"$ARCH\" \\\n    \"Check health of all Arvados cluster services\"\npackage_go_binary cmd/arvados-server keep-balance \"$FORMAT\" \"$ARCH\" \\\n    \"Rebalance and garbage-collect data blocks stored in Arvados Keep\"\npackage_go_binary cmd/arvados-server keepproxy \"$FORMAT\" \"$ARCH\" \\\n    \"Make a Keep cluster accessible to clients that are not on the LAN\"\npackage_go_binary cmd/arvados-server keepstore \"$FORMAT\" \"$ARCH\" \\\n    \"Keep storage daemon, accessible to clients on the LAN\"\npackage_go_binary cmd/arvados-server keep-web \"$FORMAT\" \"$ARCH\" \\\n    \"Static web hosting service for user data stored in Arvados Keep\"\npackage_go_binary cmd/arvados-server arvados-ws \"$FORMAT\" \"$ARCH\" \\\n    \"Arvados Websocket server\"\npackage_go_binary tools/sync-groups arvados-sync-groups \"$FORMAT\" \"$ARCH\" \\\n    \"Synchronize remote groups into Arvados from an external source\"\npackage_go_binary tools/sync-users arvados-sync-users \"$FORMAT\" \"$ARCH\" \\\n    \"Synchronize remote users into Arvados from an external source\"\npackage_go_binary tools/keep-block-check keep-block-check \"$FORMAT\" \"$ARCH\" \\\n    \"Verify that all data from one set of Keep servers to another was copied\"\npackage_go_binary tools/keep-rsync keep-rsync \"$FORMAT\" \"$ARCH\" \\\n    \"Copy all data from one set of Keep servers to another\"\npackage_go_binary tools/keep-exercise keep-exercise \"$FORMAT\" \"$ARCH\" \\\n    \"Performance testing tool for Arvados Keep\"\npackage_go_so lib/pam pam_arvados.so libpam-arvados-go \"$FORMAT\" \"$ARCH\" \\\n    \"Arvados PAM authentication module\"\n\n# Python packages\ndebug_echo -e \"\\nPython packages\\n\"\n\n# Before a Python package can be built, its dependencies must already be built.\n# This list is ordered accordingly.\nsetup_build_virtualenv\nfpm_build_virtualenv \"arvados-python-client\" \"sdk/python\" \"$FORMAT\" \"$ARCH\"\nfpm_build_virtualenv \"crunchstat-summary\" \"tools/crunchstat-summary\" \"$FORMAT\" \"$ARCH\"\nfpm_build_virtualenv \"arvados-cwl-runner\" \"sdk/cwl\" \"$FORMAT\" \"$ARCH\"\nfpm_build_virtualenv \"arvados-docker-cleaner\" \"services/dockercleaner\" \"$FORMAT\" \"$ARCH\"\nfpm_build_virtualenv \"arvados-fuse\" \"services/fuse\" \"$FORMAT\" \"$ARCH\"\nfpm_build_virtualenv \"arvados-user-activity\" \"tools/user-activity\" \"$FORMAT\" \"$ARCH\"\nfpm_build_virtualenv \"arvados-cluster-activity\" \"tools/cluster-activity\" \"$FORMAT\" \"$ARCH\"\n\n# Workbench2\npackage_workbench2\n\n# Rails packages\ndebug_echo -e \"\\nRails packages\\n\"\n\n# The rails api server package\nhandle_api_server \"$ARCH\"\n\n# clean up temporary GOPATH\nrm -rf \"$GOPATH\"\n\nexit $EXITCODE\n"
  },
  {
    "path": "build/run-build-test-packages-one-target.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nread -rd \"\\000\" helpmessage <<EOF\n$(basename $0): Build, test and (optionally) upload packages for one target\n\nSyntax:\n        WORKSPACE=/path/to/arvados $(basename $0) --target <target> [options]\n\n--target <target>\n    Distribution to build packages for\n--only-build <package>\n    Build only a specific package (or ONLY_BUILD from environment)\n--arch <arch>\n    Build a specific architecture (or ARCH from environment, defaults to native architecture)\n--force-build\n    Build even if the package exists upstream or if it has already been\n    built locally\n--force-test\n    Test even if there is no new untested package\n--upload\n    If the build and test steps are successful, upload the packages\n    to a remote apt repository (default: false)\n--debug\n    Output debug information (default: false)\n--rc\n    Optional Parameter to build Release Candidate\n--build-version <version>\n    Version to build (default:\n    \\$ARVADOS_BUILDING_VERSION-\\$ARVADOS_BUILDING_ITERATION or\n    0.1.timestamp.commithash)\n--skip-docker-build\n    Don't try to build Docker images\n\nWORKSPACE=path         Path to the Arvados source tree to build packages from\n\nEOF\n\nif ! [[ -n \"$WORKSPACE\" ]]; then\n  echo >&2 \"$helpmessage\"\n  echo >&2\n  echo >&2 \"Error: WORKSPACE environment variable not set\"\n  echo >&2\n  exit 1\nfi\n\nif ! [[ -d \"$WORKSPACE\" ]]; then\n  echo >&2 \"$helpmessage\"\n  echo >&2\n  echo >&2 \"Error: $WORKSPACE is not a directory\"\n  echo >&2\n  exit 1\nfi\n\nPARSEDOPTS=$(getopt --name \"$0\" --longoptions \\\n    help,debug,upload,rc,target:,force-test,only-build:,force-build,arch:,build-version:,skip-docker-build \\\n    -- \"\" \"$@\")\nif [ $? -ne 0 ]; then\n    exit 1\nfi\n\nUPLOAD=0\nUPLOAD_REPO=dev\nDEBUG=\nTARGET=\n\ndeclare -a build_args=()\n\neval set -- \"$PARSEDOPTS\"\nwhile [ $# -gt 0 ]; do\n    case \"$1\" in\n        --help)\n            echo >&2 \"$helpmessage\"\n            echo >&2\n            exit 1\n            ;;\n        --target)\n            TARGET=\"$2\"; shift\n            ;;\n        --force-test)\n            FORCE_TEST=1\n            ;;\n        --force-build)\n            FORCE_BUILD=1\n            ;;\n        --only-build)\n            ONLY_BUILD=\"$2\"; shift\n            ;;\n        --arch)\n            ARCH=\"$2\"; shift\n            ;;\n        --debug)\n            DEBUG=\" --debug\"\n            ;;\n        --upload)\n            UPLOAD=1\n            ;;\n        --rc)\n            UPLOAD_REPO=testing\n            ;;\n        --build-version)\n            build_args+=(\"$1\" \"$2\")\n            shift\n            ;;\n        --skip-docker-build)\n            SKIP_DOCKER_BUILD=1\n\t    ;;\n        --)\n            if [ $# -gt 1 ]; then\n                echo >&2 \"$0: unrecognized argument '$2'. Try: $0 --help\"\n                exit 1\n            fi\n            ;;\n    esac\n    shift\ndone\n\nif [[ -z \"$TARGET\" ]]; then\n    echo \"FATAL: --target must be specified\" >&2\n    exit 2\nelif [[ ! -e \"$WORKSPACE/build/package-testing/test-packages-$TARGET.sh\" ]]; then\n    echo \"FATAL: unknown build target '$TARGET'\" >&2\n    exit 2\nfi\n\nbuild_args+=(--target \"$TARGET\")\n\nif [[ -n \"$ONLY_BUILD\" ]]; then\n  build_args+=(--only-build \"$ONLY_BUILD\")\nfi\n\nif [[ -n \"$FORCE_BUILD\" ]]; then\n  build_args+=(--force-build)\nfi\n\nif [[ -n \"$FORCE_TEST\" ]]; then\n  build_args+=(--force-test)\nfi\n\nif [[ \"$SKIP_DOCKER_BUILD\" = 1 ]]; then\n  build_args+=(--skip-docker-build)\nfi\n\nif [[ -n \"$ARCH\" ]]; then\n  build_args+=(--arch \"$ARCH\")\nfi\n\nexit_cleanly() {\n    trap - INT\n    report_outcomes\n    exit ${#failures}\n}\n\nCOLUMNS=80\n. $WORKSPACE/build/run-library.sh\n\ntitle \"Start build packages\"\ntimer_reset\n\n$WORKSPACE/build/run-build-packages-one-target.sh \"${build_args[@]}\"$DEBUG\n\ncheckexit $? \"build packages\"\ntitle \"End of build packages (`timer`)\"\n\ntitle \"Start test packages\"\ntimer_reset\n\nif [ ${#failures[@]} -eq 0 ]; then\n  $WORKSPACE/build/run-build-packages-one-target.sh \"${build_args[@]}\" --test-packages$DEBUG\nelse\n  echo \"Skipping package upload, there were errors building the packages\"\nfi\n\ncheckexit $? \"test packages\"\ntitle \"End of test packages (`timer`)\"\n\nif [[ \"$UPLOAD\" != 0 ]]; then\n  title \"Start upload packages\"\n  timer_reset\n\n  get_ci_scripts\n  checkexit $? \"get CI scripts\"\n\n  if [ ${#failures[@]} -eq 0 ]; then\n    \"$CI_DIR/run_upload_packages.py\" \\\n        --repo=\"$UPLOAD_REPO\" \\\n        -H jenkinsapt@apt.arvados.org \\\n        --workspace=\"$WORKSPACE\" \\\n        \"$TARGET\"\n    checkexit $? \"upload packages\"\n  else\n    echo \"Skipping package upload, there were errors building and/or testing the packages\"\n  fi\n  title \"End of upload packages (`timer`)\"\nfi\n\nexit_cleanly\n"
  },
  {
    "path": "build/run-library.sh",
    "content": "#!/bin/bash -xe\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# A library of functions shared by the various scripts in this directory.\n\n# This is the timestamp about when we merged changed to include licenses\n# with Arvados packages.  We use it as a heuristic to add revisions for\n# older packages.\nLICENSE_PACKAGE_TS=20151208015500\nRAILS_PACKAGE_ITERATION=\"${ARVADOS_BUILDING_ITERATION:-1}\"\n\ndeclare -A LICENSE_FILE_NAME_MAP=(\n    [agpl-3.0.txt]=\"GNU Affero General Public License version 3.0\"\n    [LICENSE-2.0.txt]=\"Apache 2.0\"\n)\n\ndebug_echo () {\n    echo \"$@\" >\"$STDOUT_IF_DEBUG\"\n}\n\nfind_python_program() {\n    prog=\"$1\"\n    shift\n    for prog in \"$@\"; do\n        if \"$prog\" --version >/dev/null 2>&1; then\n            echo \"$prog\"\n            return 0\n        fi\n    done\n    cat >&2 <<EOF\n$helpmessage\n\nError: $prog (from Python setuptools module) not found\n\nEOF\n    exit 1\n}\n\n# get_ci_scripts sets $CI_DIR to the path of a directory with CI scripts.\n# If it is not already set, it uses the following to get them by creating a\n# temporary Git worktree:\n#  * CI_SRC: a Git checkout of the scripts (default $WORKSPACE)\n#  * CI_REF: the reference used to create that (default `remotes/arvados-ci/ci-build`)\n#  * CI_PATH: the path of CI scripts under the worktree (default `/jenkins`)\n# The defaults are all suitable for jobs running under ci.arvados.org, but\n# you can set them in the environment to customize the behavior, or just set\n# $CI_DIR to a path that already has the scripts ready.\nget_ci_scripts() {\n    if [ -n \"${CI_DIR:-}\" ]; then\n        return\n    fi\n    local clone_dir=\"$(mktemp --directory --tmpdir=\"${WORKSPACE_TMP:-}\")\" &&\n        git -C \"${CI_SRC:-$WORKSPACE}\" worktree add \"$clone_dir\" \"${CI_REF:-remotes/arvados-ci/ci-build}\" ||\n            return\n    CI_DIR=\"$clone_dir${CI_PATH:-/jenkins}\"\n}\n\nformat_last_commit_here() {\n    local format=\"$1\"; shift\n    local dir=\"${1:-.}\"; shift\n    TZ=UTC git log -n1 --first-parent \"--format=format:$format\" \"$dir\"\n}\n\nversion_from_git() {\n    # Output the version being built, or if we're building a\n    # dev/prerelease, output a version number based on the git log for\n    # the given $subdir.\n    local subdir=\"$1\"; shift\n    if [[ -n \"$ARVADOS_BUILDING_VERSION\" ]]; then\n        echo \"$ARVADOS_BUILDING_VERSION\"\n        return\n    fi\n\n    local git_ts git_hash\n    declare $(format_last_commit_here \"git_ts=%ct git_hash=%h\" \"$subdir\")\n    ARVADOS_BUILDING_VERSION=\"$($WORKSPACE/build/version-at-commit.sh $git_hash)\"\n    echo \"$ARVADOS_BUILDING_VERSION\"\n}\n\nnohash_version_from_git() {\n    local subdir=\"$1\"; shift\n    if [[ -n \"$ARVADOS_BUILDING_VERSION\" ]]; then\n        echo \"$ARVADOS_BUILDING_VERSION\"\n        return\n    fi\n    version_from_git $subdir | cut -d. -f1-4\n}\n\ntimestamp_from_git() {\n    local subdir=\"$1\"; shift\n    format_last_commit_here \"%ct\" \"$subdir\"\n}\n\n# Usage: get_native_arch\nget_native_arch() {\n  # Only amd64 and aarch64 are supported at the moment\n  local native_arch=\"\"\n  case \"$HOSTTYPE\" in\n    x86_64)\n      native_arch=\"amd64\"\n      ;;\n    *)\n      echo \"Error: architecture not supported\"\n      exit 1\n      ;;\n  esac\n  echo $native_arch\n}\n\nhandle_ruby_gem() {\n    local gem_name=\"$1\"; shift\n    local gem_version=\"$(nohash_version_from_git)\"\n    local gem_src_dir=\"$(pwd)\"\n\n    if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$gem_name\" != \"$ONLY_BUILD\" ]] ; then\n        return 0\n    fi\n\n    if ! [[ -e \"${gem_name}-${gem_version}.gem\" ]]; then\n        find -maxdepth 1 -name \"${gem_name}-*.gem\" -delete\n\n        # -q appears to be broken in gem version 2.2.2\n        gem build \"$gem_name.gemspec\" $DASHQ_UNLESS_DEBUG >\"$STDOUT_IF_DEBUG\" 2>\"$STDERR_IF_DEBUG\"\n    fi\n}\n\n# Usage: package_workbench2\npackage_workbench2() {\n    local pkgname=arvados-workbench2\n    local src=services/workbench2\n    local dst=/var/www/arvados-workbench2/workbench2\n    local description=\"Arvados Workbench 2\"\n    if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$pkgname\" != \"$ONLY_BUILD\" ]] ; then\n        return 0\n    fi\n    cd \"$WORKSPACE/$src\"\n    local version=\"$(version_from_git)\"\n    rm -rf ./build\n    NODE_ENV=production yarn install\n    VERSION=\"$version\" BUILD_NUMBER=\"$(default_iteration \"$pkgname\" \"$version\" yarn)\" GIT_COMMIT=\"$(git rev-parse HEAD | head -c9)\" yarn build\n    cd \"$WORKSPACE/packages/$TARGET\"\n    fpm_build \"${WORKSPACE}/$src\" \"${WORKSPACE}/$src/build/=$dst\" \"$pkgname\" dir \"$version\" \\\n              --license=\"GNU Affero General Public License, version 3.0\" \\\n              --description=\"${description}\" \\\n              --config-files=\"/etc/arvados/$pkgname/workbench2.example.json\" \\\n              \"$WORKSPACE/services/workbench2/etc/arvados/workbench2/workbench2.example.json=/etc/arvados/$pkgname/workbench2.example.json\"\n}\n\ncalculate_go_package_version() {\n  # $__returnvar has the nameref attribute set, which means it is a reference\n  # to another variable that is passed in as the first argument to this function.\n  # see https://www.gnu.org/software/bash/manual/html_node/Shell-Parameters.html\n  local -n __returnvar=\"$1\"; shift\n  local oldpwd=\"$PWD\"\n\n  cd \"$WORKSPACE\"\n  go mod download\n\n  # Update the version number and build a new package if the vendor\n  # bundle has changed, or the command imports anything from the\n  # Arvados SDK and the SDK has changed.\n  declare -a checkdirs=(go.mod go.sum)\n  while [ -n \"$1\" ]; do\n      checkdirs+=(\"$1\")\n      shift\n  done\n  # Even our rails packages (version calculation happens here!) depend on a go component (arvados-server)\n  # Everything depends on the build directory.\n  checkdirs+=(sdk/go lib build)\n  local timestamp=0\n  for dir in ${checkdirs[@]}; do\n      cd \"$WORKSPACE\"\n      ts=\"$(timestamp_from_git \"$dir\")\"\n      if [[ \"$ts\" -gt \"$timestamp\" ]]; then\n          version=$(version_from_git \"$dir\")\n          timestamp=\"$ts\"\n      fi\n  done\n  cd \"$oldpwd\"\n  __returnvar=\"$version\"\n}\n\n# Usage: package_go_binary services/foo arvados-foo [deb|rpm] [amd64] \"Compute foo to arbitrary precision\" [apache-2.0.txt]\npackage_go_binary() {\n  local src_path=\"$1\"; shift\n  local prog=\"$1\"; shift\n  local package_format=\"$1\"; shift\n  local target_arch=\"$1\"; shift\n  local description=\"$1\"; shift\n  local license_file=\"${1:-agpl-3.0.txt}\"; shift\n\n  if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$prog\" != \"$ONLY_BUILD\" ]]; then\n      debug_echo -e \"Skipping build of $prog package.\"\n      return 0\n  fi\n\n  native_arch=$(get_native_arch)\n\n  if [[ \"$native_arch\" != \"amd64\" ]] && [[ -n \"$target_arch\" ]] && [[ \"$native_arch\" != \"$target_arch\" ]]; then\n    echo \"Error: no cross compilation support for Go on $native_arch, can not build $prog for $target_arch\"\n    return 1\n  fi\n\n  case \"$package_format-$TARGET\" in\n    # Red Hat-based distributions do not support native cross compilation at\n    # all (they use a qemu-based solution we haven't implemented yet).\n    rpm-*)\n      cross_compilation=0\n      if [[ \"$native_arch\" == \"amd64\" ]] && [[ -n \"$target_arch\" ]] && [[ \"$native_arch\" != \"$target_arch\" ]]; then\n        echo \"Error: no cross compilation support for Go on $native_arch for $TARGET, can not build $prog for $target_arch\"\n        return 1\n      fi\n      ;;\n    *)\n      cross_compilation=1\n      ;;\n  esac\n\n  if [[ -n \"$target_arch\" ]]; then\n    archs=($target_arch)\n  else\n    # No target architecture specified, default to native target.\n    archs=($native_arch)\n  fi\n\n  for ta in ${archs[@]}; do\n    package_go_binary_worker \"$src_path\" \"$prog\" \"$package_format\" \"$description\" \"$native_arch\" \"$ta\" \"$license_file\"\n    retval=$?\n    if [[ $retval -ne 0 ]]; then\n      return $retval\n    fi\n  done\n}\n\n# Usage: package_go_binary services/foo arvados-foo deb \"Compute foo to arbitrary precision\" [amd64] [amd64] [apache-2.0.txt]\npackage_go_binary_worker() {\n    local src_path=\"$1\"; shift\n    local prog=\"$1\"; shift\n    local package_format=\"$1\"; shift\n    local description=\"$1\"; shift\n    local native_arch=\"${1:-amd64}\"; shift\n    local target_arch=\"${1:-amd64}\"; shift\n    local license_file=\"${1:-agpl-3.0.txt}\"; shift\n\n    debug_echo \"package_go_binary $src_path as $prog (native arch: $native_arch, target arch: $target_arch)\"\n    local basename=\"${src_path##*/}\"\n    calculate_go_package_version go_package_version $src_path\n\n    cd $WORKSPACE/packages/$TARGET\n    test_package_presence \"$prog\" \"$go_package_version\" \"go\" \"\" \"$target_arch\"\n    if [[ $? -ne 0 ]]; then\n      return 0\n    fi\n\n    echo \"Building $package_format ($target_arch) package for $prog from $src_path\"\n    GOARCH=${arch} go install -ldflags \"-X git.arvados.org/arvados.git/lib/cmd.version=${go_package_version} -X main.version=${go_package_version}\" \"git.arvados.org/arvados.git/$src_path\"\n\n    local -a switches=()\n\n    binpath=$GOPATH/bin/${basename}\n    if [[ \"${target_arch}\" != \"${native_arch}\" ]]; then\n      switches+=(\"-a${target_arch}\")\n      binpath=\"$GOPATH/bin/linux_${target_arch}/${basename}\"\n    fi\n\n    case \"$package_format\" in\n        # As of April 2024 we package identical Go binaries under different\n        # packages and names. This upsets the build id database, so don't\n        # register ourselves there.\n        rpm) switches+=(--rpm-rpmbuild-define=\"_build_id_links none\") ;;\n    esac\n\n    systemd_unit=\"$WORKSPACE/${src_path}/${prog}.service\"\n    if [[ -e \"${systemd_unit}\" ]]; then\n        switches+=(\n            --after-install \"${WORKSPACE}/build/go-python-package-scripts/postinst\"\n            --before-remove \"${WORKSPACE}/build/go-python-package-scripts/prerm\"\n            \"${systemd_unit}=/lib/systemd/system/${prog}.service\")\n    fi\n    switches+=(\"$WORKSPACE/${license_file}=/usr/share/doc/$prog/${license_file}\")\n\n    fpm_build \"${WORKSPACE}/${src_path}\" \"$binpath=/usr/bin/${prog}\" \"${prog}\" dir \"${go_package_version}\" \"--url=https://arvados.org\" \"--license=GNU Affero General Public License, version 3.0\" \"--description=${description}\" \"${switches[@]}\"\n}\n\n# Usage: package_go_so lib/foo arvados_foo.so arvados-foo deb amd64 \"Arvados foo library\"\npackage_go_so() {\n    local src_path=\"$1\"; shift\n    local sofile=\"$1\"; shift\n    local pkg=\"$1\"; shift\n    local package_format=\"$1\"; shift\n    local target_arch=\"$1\"; shift # supported: amd64\n    local description=\"$1\"; shift\n\n    if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$pkg\" != \"$ONLY_BUILD\" ]]; then\n      debug_echo -e \"Skipping build of $pkg package.\"\n      return 0\n    fi\n\n    debug_echo \"package_go_so $src_path as $pkg\"\n\n    calculate_go_package_version go_package_version $src_path\n    cd $WORKSPACE/packages/$TARGET\n    test_package_presence $pkg $go_package_version go || return 1\n    cd $WORKSPACE/$src_path\n    go build -buildmode=c-shared -o ${GOPATH}/bin/${sofile}\n    cd $WORKSPACE/packages/$TARGET\n    local -a fpmargs=(\n        \"--url=https://arvados.org\"\n        \"--license=Apache License, Version 2.0\"\n        \"--description=${description}\"\n        \"$WORKSPACE/apache-2.0.txt=/usr/share/doc/$pkg/apache-2.0.txt\"\n    )\n    if [[ -e \"$WORKSPACE/$src_path/pam-configs-arvados\" ]]; then\n        fpmargs+=(\"$WORKSPACE/$src_path/pam-configs-arvados=/usr/share/doc/$pkg/pam-configs-arvados-go\")\n    fi\n    if [[ -e \"$WORKSPACE/$src_path/README\" ]]; then\n        fpmargs+=(\"$WORKSPACE/$src_path/README=/usr/share/doc/$pkg/README\")\n    fi\n    fpm_build \"${WORKSPACE}/${src_path}\" \"$GOPATH/bin/${sofile}=/usr/lib/${sofile}\" \"${pkg}\" dir \"${go_package_version}\" \"${fpmargs[@]}\"\n}\n\ndefault_iteration() {\n    if [[ -n \"$ARVADOS_BUILDING_VERSION\" ]]; then\n        echo \"$ARVADOS_BUILDING_ITERATION\"\n        return\n    fi\n    local package_name=\"$1\"; shift\n    local package_version=\"$1\"; shift\n    local package_type=\"$1\"; shift\n    local iteration=1\n    if [[ $package_version =~ ^0\\.1\\.([0-9]{14})(\\.|$) ]] && \\\n           [[ ${BASH_REMATCH[1]} -le $LICENSE_PACKAGE_TS ]]; then\n        iteration=2\n    fi\n    echo $iteration\n}\n\n_build_rails_package_scripts() {\n    local pkgname=\"$1\"; shift\n    local destdir=\"$1\"; shift\n    local srcdir=\"$RUN_BUILD_PACKAGES_PATH/rails-package-scripts\"\n    for scriptname in postinst prerm postrm; do\n        cat \"$srcdir/$pkgname.sh\" \"$srcdir/$scriptname.sh\" \\\n            >\"$destdir/$scriptname\" || return $?\n    done\n}\n\nrails_package_version() {\n    local pkgname=\"$1\"; shift\n    local srcdir=\"$1\"; shift\n    if [[ -n \"$ARVADOS_BUILDING_VERSION\" ]]; then\n        echo \"$ARVADOS_BUILDING_VERSION\"\n        return\n    fi\n    local version=\"$(version_from_git)\"\n    if [ $pkgname = \"arvados-api-server\" ] ; then\n        calculate_go_package_version version cmd/arvados-server \"$srcdir\"\n    fi\n    echo $version\n}\n\ntest_rails_package_presence() {\n  local pkgname=\"$1\"; shift\n  local srcdir=\"$1\"; shift\n\n  if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$pkgname\" != \"$ONLY_BUILD\" ]] ; then\n    return 1\n  fi\n\n  tmppwd=`pwd`\n\n  cd $srcdir\n\n  local version=\"$(rails_package_version \"$pkgname\" \"$srcdir\")\"\n\n  cd $tmppwd\n\n  test_package_presence $pkgname $version rails \"$RAILS_PACKAGE_ITERATION\"\n}\n\nget_complete_package_name() {\n  # if the errexit flag is set, unset it until this function returns\n  # otherwise, the shift calls below will abort the program if optional arguments are not supplied\n  if [ -o errexit ]; then\n    set +e\n    trap 'set -e' RETURN\n  fi\n  # $__returnvar has the nameref attribute set, which means it is a reference\n  # to another variable that is passed in as the first argument to this function.\n  # see https://www.gnu.org/software/bash/manual/html_node/Shell-Parameters.html\n  local -n __returnvar=\"$1\"; shift\n  local pkgname=\"$1\"; shift\n  local version=\"$1\"; shift\n  local pkgtype=\"$1\"; shift\n  local iteration=\"$1\"; shift\n  local arch=\"$1\"; shift\n  if [[ \"$iteration\" == \"\" ]]; then\n      iteration=\"$(default_iteration \"$pkgname\" \"$version\" \"$pkgtype\")\"\n  fi\n\n  if [[ \"$arch\" == \"\" ]]; then\n    native_arch=$(get_native_arch)\n    rpm_native_arch=\"x86_64\"\n    rpm_architecture=\"$rpm_native_arch\"\n    deb_architecture=\"$native_arch\"\n\n    if [[ \"$pkgtype\" =~ ^(src)$ ]]; then\n      rpm_architecture=\"noarch\"\n      deb_architecture=\"all\"\n    fi\n  else\n    rpm_architecture=$arch\n    deb_architecture=$arch\n  fi\n\n  local complete_pkgname=\"${pkgname}_$version${iteration:+-$iteration}_$deb_architecture.deb\"\n  if [[ \"$FORMAT\" == \"rpm\" ]]; then\n      # rpm packages get iteration 1 if we don't supply one\n      iteration=${iteration:-1}\n      complete_pkgname=\"$pkgname-$version-${iteration}.$rpm_architecture.rpm\"\n  fi\n  __returnvar=${complete_pkgname}\n}\n\n# Test if the package already exists, if not return 0, if it does return 1\ntest_package_presence() {\n    local pkgname=\"$1\"; shift\n    local version=\"$1\"; shift\n    local pkgtype=\"$1\"; shift\n    local iteration=\"$1\"; shift\n    local arch=\"$1\"; shift\n    if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$pkgname\" != \"$ONLY_BUILD\" ]] ; then\n        return 1\n    fi\n\n    local full_pkgname\n    get_complete_package_name full_pkgname \"$pkgname\" \"$version\" \"$pkgtype\" \"$iteration\" \"$arch\"\n\n    # See if we can skip building the package, only if it already exists in the\n    # processed/ directory. If so, move it back to the packages directory to make\n    # sure it gets picked up by the test and/or upload steps.\n    # Get the list of packages from the repos\n\n    local pkg_url\n    if [[ \"$FORCE_BUILD\" == \"1\" ]]; then\n      echo \"Package $full_pkgname build forced with --force-build, building\"\n      return 0\n    elif [[ \"$FORMAT\" == \"deb\" ]]; then\n      local codename\n      case \"$TARGET\" in\n          debian12) codename=bookworm ;;\n          ubuntu2204) codename=jammy ;;\n          ubuntu2404) codename=noble ;;\n          *)\n              echo \"FIXME: Don't know deb URL path for $TARGET, building\"\n              return 0\n              ;;\n      esac\n      local repo_subdir\n      if [ ${pkgname:0:3} = \"lib\" ]; then\n        repo_subdir=${pkgname:0:4}\n      else\n        repo_subdir=${pkgname:0:1}\n      fi\n      pkg_url=\"http://apt.arvados.org/$codename/pool/main/$repo_subdir/$pkgname/$full_pkgname\"\n    else\n      local rpm_root\n      case \"$TARGET\" in\n        rocky8 | rocky9 | rocky10 ) rpm_root=\"RHEL/${TARGET#rocky}/dev\" ;;\n        *)\n          echo \"FIXME: Don't know RPM URL path for $TARGET, building\"\n          return 0\n          ;;\n      esac\n      pkg_url=\"https://rpm.arvados.org/$rpm_root/$arch/$full_pkgname\"\n    fi\n\n    if curl -fs -o \"$WORKSPACE/packages/$TARGET/$full_pkgname\" \"$pkg_url\"; then\n      echo \"Package $full_pkgname exists upstream, not rebuilding, downloading instead!\"\n      return 1\n    elif [[ -f \"$WORKSPACE/packages/$TARGET/processed/$full_pkgname\" ]]; then\n      echo \"Package $full_pkgname exists, not rebuilding!\"\n      return 1\n    else\n      echo \"Package $full_pkgname not found, building\"\n      return 0\n    fi\n}\n\nhandle_rails_package() {\n    local pkgname=\"$1\"; shift\n\n    if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$pkgname\" != \"$ONLY_BUILD\" ]] ; then\n        return 0\n    fi\n    local srcdir=\"$1\"; shift\n    cd \"$srcdir\"\n    local license_path=\"$1\"; shift\n    local version=\"$(rails_package_version \"$pkgname\" \"$srcdir\")\"\n    echo \"$version\" >package-build.version\n    local scripts_dir=\"$(mktemp --tmpdir -d \"$pkgname-XXXXXXXX.scripts\")\" && \\\n    (\n        set -e\n        _build_rails_package_scripts \"$pkgname\" \"$scripts_dir\"\n        cd \"$srcdir\"\n        mkdir -p tmp\n        git rev-parse HEAD >git-commit.version\n        # Prevent `bundle cache` from seeing system-wide gems and skipping\n        # their download. This depends on the Bundler install set up\n        # in the arvados_ruby Ansible role. See there for more background.\n        export GEM_HOME=/opt/arvados-bundler\n        export GEM_PATH=\"$GEM_HOME\"\n        # Please make sure you read `bundle help config` carefully before you\n        # modify any of these settings. Some of their names are not intuitive.\n        #\n        # `bundle cache` caches from Git and paths, not just rubygems.org.\n        bundle config set cache_all true\n        # `bundle cache` caches for all platforms listed in `Gemfile.lock`.\n        bundle config set cache_all_platforms true\n        # Avoid loading system-wide gems (although this seems to not work 100%).\n        bundle config set disable_shared_gems true\n        # `bundle cache` only downloads gems, doesn't install them.\n        # Our Rails postinst script does the install step.\n        bundle config set no_install true\n        # Do not install gem sets unnecessary for production.\n        bundle config set without development:test\n\n        bundle cache\n        # Configuration after this point is for the installed package but only\n        # makes sense to set *after* running `bundle cache`.\n        #\n        # Install with deployment settings.\n        bundle config set deployment true\n        # Install gems to a dedicated path that is only used by RailsAPI\n        # (but shared across versions for efficiency).\n        bundle config set path /var/www/arvados-api/shared/vendor_bundle\n    )\n    if [[ 0 != \"$?\" ]] || ! cd \"$WORKSPACE/packages/$TARGET\"; then\n        echo \"ERROR: $pkgname package prep failed\" >&2\n        rm -rf \"$scripts_dir\"\n        EXITCODE=1\n        return 1\n    fi\n    local railsdir=\"/var/www/${pkgname%-server}/current\"\n    local -a pos_args=(\"$srcdir/=$railsdir\" \"$pkgname\" dir \"$version\")\n    local -a switches=(--after-install \"$scripts_dir/postinst\"\n                       --before-remove \"$scripts_dir/prerm\"\n                       --after-remove \"$scripts_dir/postrm\")\n    if [[ -z \"$ARVADOS_BUILDING_VERSION\" ]]; then\n        switches+=(--iteration $RAILS_PACKAGE_ITERATION)\n    fi\n    # For some reason fpm excludes need to not start with /.\n    local exclude_root=\"${railsdir#/}\"\n    for exclude in tmp log coverage Capfile\\* \\\n                       config/deploy\\* \\\n                       config/application.yml \\\n                       config/database.yml \\\n                       \\*.service; do\n        switches+=(-x \"$exclude_root/$exclude\")\n    done\n    fpm_build \"${srcdir}\" \"${pos_args[@]}\" \"${switches[@]}\" \\\n              -x \"$exclude_root/vendor/cache-*\" \\\n              -x \"$exclude_root/vendor/bundle\" \"$@\" \\\n              \"$license_path=$railsdir/$(basename \"$license_path\")\" \\\n              \"$srcdir/arvados-railsapi.service=/lib/systemd/system/arvados-railsapi.service\"\n    rm -rf \"$scripts_dir\"\n}\n\n# Usage: handle_api_server [amd64]\nhandle_api_server () {\n  local target_arch=\"${1:-amd64}\"; shift\n\n  if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$ONLY_BUILD\" != \"arvados-api-server\" ]] ; then\n    debug_echo -e \"Skipping build of arvados-api-server package.\"\n    return 0\n  fi\n\n  native_arch=$(get_native_arch)\n  if [[ \"$target_arch\" != \"$native_arch\" ]]; then\n    echo \"Error: no cross compilation support for Rails yet, can not build arvados-api-server for $ARCH\"\n    echo\n    exit 1\n  fi\n\n  # Build the API server package\n  test_rails_package_presence arvados-api-server \"$WORKSPACE/services/api\"\n  if [[ \"$?\" == \"0\" ]]; then\n    calculate_go_package_version arvados_server_version cmd/arvados-server\n    arvados_server_iteration=$(default_iteration \"arvados-server\" \"$arvados_server_version\" \"go\")\n    handle_rails_package arvados-api-server \"$WORKSPACE/services/api\" \\\n        \"$WORKSPACE/agpl-3.0.txt\" --url=\"https://arvados.org\" \\\n        --description=\"Arvados API server - Arvados is a free and open source platform for big data science.\" \\\n        --license=\"GNU Affero General Public License, version 3.0\" --depends \"arvados-server = ${arvados_server_version}-${arvados_server_iteration}\"\n  fi\n}\n\n# Usage: handle_arvados_src\nhandle_arvados_src () {\n  if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$ONLY_BUILD\" != \"arvados-src\" ]] ; then\n    debug_echo -e \"Skipping build of arvados-src package.\"\n    return 0\n  fi\n  # arvados-src\n  (\n      cd \"$WORKSPACE\"\n      COMMIT_HASH=$(format_last_commit_here \"%H\")\n      arvados_src_version=\"$(version_from_git)\"\n\n      cd $WORKSPACE/packages/$TARGET\n      test_package_presence arvados-src \"$arvados_src_version\" src \"\"\n\n      if [[ \"$?\" == \"0\" ]]; then\n        cd \"$WORKSPACE\"\n        SRC_BUILD_DIR=$(mktemp -d)\n        # mktemp creates the directory with 0700 permissions by default\n        chmod 755 $SRC_BUILD_DIR\n        git clone $DASHQ_UNLESS_DEBUG \"$WORKSPACE/.git\" \"$SRC_BUILD_DIR\"\n        cd \"$SRC_BUILD_DIR\"\n\n        # go into detached-head state\n        git checkout $DASHQ_UNLESS_DEBUG \"$COMMIT_HASH\"\n        echo \"$COMMIT_HASH\" >git-commit.version\n\n        cd $WORKSPACE/packages/$TARGET\n        fpm_build \"$WORKSPACE\" $SRC_BUILD_DIR/=/usr/local/arvados/src arvados-src 'dir' \"$arvados_src_version\" \"--exclude=usr/local/arvados/src/.git\" \"--url=https://arvados.org\" \"--license=GNU Affero General Public License, version 3.0\" \"--description=The Arvados source code\" \"--architecture=all\"\n\n        rm -rf \"$SRC_BUILD_DIR\"\n      fi\n  )\n}\n\nsetup_build_virtualenv() {\n    PYTHON_BUILDROOT=\"$(mktemp --directory --tmpdir pybuild.XXXXXXXX)\"\n    \"$PYTHON3_EXECUTABLE\" -m venv \"$PYTHON_BUILDROOT/venv\"\n    \"$PYTHON_BUILDROOT/venv/bin/pip\" install -r \"$WORKSPACE/build/requirements.build-packages.txt\"\n    mkdir \"$PYTHON_BUILDROOT/wheelhouse\"\n}\n\n# Build python packages with a virtualenv built-in\n# Usage: fpm_build_virtualenv arvados-python-client sdk/python [deb|rpm] [amd64]\nfpm_build_virtualenv () {\n  local pkg=$1; shift\n  local pkg_dir=$1; shift\n  local package_format=\"$1\"; shift\n  local target_arch=\"${1:-amd64}\"; shift\n\n  fpm_build_virtualenv_worker \"$pkg\" \"$pkg_dir\" \"$package_format\" amd64 amd64\n}\n\n# Build python packages with a virtualenv built-in\n# Usage: fpm_build_virtualenv_worker arvados-python-client sdk/python python3 [deb|rpm] [amd64] [amd64]\nfpm_build_virtualenv_worker () {\n  PKG=$1; shift\n  PKG_DIR=$1; shift\n  local package_format=\"$1\"; shift\n  local native_arch=\"${1:-amd64}\"; shift\n  local target_arch=${1:-amd64}; shift\n\n  # Set up\n  STDOUT_IF_DEBUG=/dev/null\n  STDERR_IF_DEBUG=/dev/null\n  DASHQ_UNLESS_DEBUG=-q\n  if [[ \"$DEBUG\" != \"0\" ]]; then\n      STDOUT_IF_DEBUG=/dev/stdout\n      STDERR_IF_DEBUG=/dev/stderr\n      DASHQ_UNLESS_DEBUG=\n  fi\n  if [[ \"$ARVADOS_BUILDING_ITERATION\" == \"\" ]]; then\n    ARVADOS_BUILDING_ITERATION=1\n  fi\n\n  PACKAGE=\"$PKG_DIR\"\n  PACKAGE_PREFIX=$PYTHON3_PKG_PREFIX\n  if [[ \"$PKG\" != \"arvados-docker-cleaner\" ]]; then\n    PYTHON_PKG=$PACKAGE_PREFIX-$PKG\n  else\n    # Exception to our package naming convention\n    PYTHON_PKG=$PKG\n  fi\n\n  # We must always add a wheel to our repository, even if we're not building\n  # this distro package, because it might be a dependency for a later\n  # package we do build.\n  if [[ \"$PKG_DIR\" =~ ^.=[0-9]+\\. ]]; then\n      # Not source to build, but a version to download.\n      # The rest of the function expects a filesystem path, so set one afterwards.\n      \"$PYTHON_BUILDROOT/venv/bin/pip\" download --dest=\"$PYTHON_BUILDROOT/wheelhouse\" \"$PKG$PKG_DIR\" \\\n          && PKG_DIR=\"$PYTHON_BUILDROOT/nonexistent\"\n  else\n      # Make PKG_DIR absolute.\n      PKG_DIR=\"$(env -C \"$WORKSPACE\" readlink -e \"$PKG_DIR\")\"\n      \"$PYTHON_BUILDROOT/venv/bin/python\" -m build --outdir=\"$PYTHON_BUILDROOT/wheelhouse\" \"$PKG_DIR\"\n  fi\n  if [[ $? -ne 0 ]]; then\n    printf \"Error, unable to download/build wheel for %s @ %s\\n\" \"$PKG\" \"$PKG_DIR\"\n    exit 1\n  fi\n\n  if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$PYTHON_PKG\" != \"$ONLY_BUILD\" ]] && [[ \"$PKG\" != \"$ONLY_BUILD\" ]]; then\n    return 0\n  elif ! \"$PYTHON_BUILDROOT/venv/bin/piprepo\" build \"$PYTHON_BUILDROOT/wheelhouse\"; then\n    printf \"Error, unable to update local wheel repository\\n\"\n    exit 1\n  fi\n\n  local venv_dir=\"/usr/lib/$PYTHON_PKG\"\n  echo \"Creating virtualenv...\"\n  if ! \"$PYTHON3_EXECUTABLE\" -m venv \"$venv_dir\"; then\n    printf \"Error, unable to run\\n  %s -m venv %s\\n\" \"$PYTHON3_EXECUTABLE\" \"$venv_dir\"\n    exit 1\n  # We must have the dependency resolver introduced in late 2020 for the rest\n  # of our install process to work.\n  # <https://blog.python.org/2020/11/pip-20-3-release-new-resolver.html>\n  elif ! \"$venv_dir/bin/pip\" install \"pip>=20.3\"; then\n    printf \"Error, unable to run\\n  %s/bin/pip install 'pip>=20.3'\\n\" \"$venv_dir\"\n    exit 1\n  fi\n\n  local pip_wheel=\"$(ls --sort=time --reverse \"$PYTHON_BUILDROOT/wheelhouse/$(echo \"$PKG\" | sed s/-/_/g)-\"*.whl | tail -n1)\"\n  if [[ -z \"$pip_wheel\" ]]; then\n    printf \"Error, unable to find built wheel for $PKG\\n\"\n    exit 1\n  elif ! \"$venv_dir/bin/pip\" install $DASHQ_UNLESS_DEBUG $CACHE_FLAG --extra-index-url=\"file://$PYTHON_BUILDROOT/wheelhouse/simple\" \"$pip_wheel\"; then\n    printf \"Error, unable to run\n  %s/bin/pip install $DASHQ_UNLESS_DEBUG $CACHE_FLAG --extra-index-url=file://%s %s\n\" \"$venv_dir\" \"$PYTHON_BUILDROOT/wheelhouse/simple\" \"$pip_wheel\"\n    exit 1\n  fi\n\n  # Determine the package version from the wheel\n  PYTHON_VERSION=\"$(\"$venv_dir/bin/python\" \"$WORKSPACE/build/pypkg_info.py\" metadata \"$PKG\" Version)\"\n  UNFILTERED_PYTHON_VERSION=\"$(echo \"$PYTHON_VERSION\" | sed 's/\\.dev/~dev/; s/\\([0-9]\\)rc/\\1~rc/')\"\n\n  # See if we actually need to build this package; does it exist already?\n  # We can't do this earlier than here, because we need PYTHON_VERSION.\n  if ! test_package_presence \"$PYTHON_PKG\" \"$UNFILTERED_PYTHON_VERSION\" python3 \"$ARVADOS_BUILDING_ITERATION\" \"$target_arch\"; then\n    return 0\n  fi\n  echo \"Building $package_format ($target_arch) package for $PKG from $PKG_DIR\"\n\n  local lic_key=\"$(\"$venv_dir/bin/python3\" \"$WORKSPACE/build/pypkg_info.py\" metadata \"$PKG\" License-File)\"\n  local lic_desc=\"${LICENSE_FILE_NAME_MAP[$lic_key]}\"\n  if [[ -z \"$lic_desc\" ]]; then\n      echo \"Error, unable to determine license metadata for $PKG\" >&2\n      exit 1\n  fi\n  # Using `env -C` sets the directory where the package is built.\n  # Using `fpm --chdir` sets the root directory for source arguments.\n  declare -a COMMAND_ARR=(\n      env -C \"$PYTHON_BUILDROOT\" fpm\n      --chdir=\"$venv_dir\"\n      --name=\"$PYTHON_PKG\"\n      --version=\"$UNFILTERED_PYTHON_VERSION\"\n      --input-type=dir\n      --output-type=\"$package_format\"\n      --depends=\"$PYTHON3_PACKAGE\"\n      --iteration=\"$ARVADOS_BUILDING_ITERATION\"\n      --replaces=\"python-$PKG\"\n      --url=\"https://arvados.org\"\n      --license=\"$lic_desc\"\n  )\n  # Append fpm flags corresponding to Python package metadata.\n  readarray -d \"\" -O \"${#COMMAND_ARR[@]}\" -t COMMAND_ARR < \\\n            <(\"$venv_dir/bin/python3\" \"$WORKSPACE/build/pypkg_info.py\" \\\n                                      --delimiter=\\\\0 --format=fpm \\\n                                      metadata \"$PKG\" Summary)\n\n  if [[ -n \"$target_arch\" ]] && [[ \"$target_arch\" != \"amd64\" ]]; then\n    COMMAND_ARR+=(\"-a$target_arch\")\n  fi\n\n  if [[ \"$MAINTAINER\" != \"\" ]]; then\n    COMMAND_ARR+=('--maintainer' \"$MAINTAINER\")\n  fi\n\n  if [[ \"$VENDOR\" != \"\" ]]; then\n    COMMAND_ARR+=('--vendor' \"$VENDOR\")\n  fi\n\n  if [[ \"$DEBUG\" != \"0\" ]]; then\n    COMMAND_ARR+=('--verbose' '--log' 'info')\n  fi\n\n  systemd_unit=\"$PKG_DIR/$PKG.service\"\n  if [[ -e \"${systemd_unit}\" ]]; then\n    COMMAND_ARR+=('--after-install' \"${WORKSPACE}/build/go-python-package-scripts/postinst\")\n    COMMAND_ARR+=('--before-remove' \"${WORKSPACE}/build/go-python-package-scripts/prerm\")\n  fi\n\n  case \"$package_format\" in\n      deb)\n          COMMAND_ARR+=(\n              # Avoid warning\n              --deb-no-default-config-files\n          ) ;;\n      rpm)\n          COMMAND_ARR+=(\n              # Conflict with older packages we used to publish\n              --conflicts \"rh-python36-python-$PKG\"\n              # Do not generate /usr/lib/.build-id links on RH8+\n              # (otherwise our packages conflict with platform-python)\n              --rpm-rpmbuild-define \"_build_id_links none\"\n          ) ;;\n  esac\n\n  # Append --depends X and other arguments specified by fpm-info.sh in\n  # the package source dir. These are added last so they can override\n  # the arguments added by this script.\n  declare -a fpm_args=()\n  declare -a fpm_depends=()\n\n  fpminfo=\"$PKG_DIR/fpm-info.sh\"\n  if [[ -e \"$fpminfo\" ]]; then\n    echo \"Loading fpm overrides from $fpminfo\"\n    if ! source \"$fpminfo\"; then\n      echo \"Error, unable to source $WORKSPACE/$PKG_DIR/fpm-info.sh for $PKG\"\n      exit 1\n    fi\n  fi\n\n  for i in \"${fpm_depends[@]}\"; do\n    COMMAND_ARR+=('--depends' \"$i\")\n  done\n\n  # make sure the systemd service file ends up in the right place\n  # used by arvados-docker-cleaner\n  if [[ -e \"${systemd_unit}\" ]]; then\n    COMMAND_ARR+=(\"share/doc/$PKG/$PKG.service=/lib/systemd/system/$PKG.service\")\n  fi\n\n  COMMAND_ARR+=(\"${fpm_args[@]}\")\n\n  while read -d \"\" binpath; do\n      COMMAND_ARR+=(\"$binpath=/usr/$binpath\")\n  done < <(\"$venv_dir/bin/python3\" \"$WORKSPACE/build/pypkg_info.py\" --delimiter=\\\\0 binfiles \"$PKG\")\n\n  # the python3-arvados-cwl-runner package comes with cwltool, expose that version\n  if [[ \"$PKG\" == arvados-cwl-runner ]]; then\n    COMMAND_ARR+=(\"bin/cwltool=/usr/bin/cwltool\")\n  fi\n\n  COMMAND_ARR+=(\".=$venv_dir\")\n\n  debug_echo -e \"\\n${COMMAND_ARR[@]}\\n\"\n\n  FPM_RESULTS=$(\"${COMMAND_ARR[@]}\")\n  FPM_EXIT_CODE=$?\n\n  # if something went wrong and debug is off, print out the fpm command that errored\n  if ! fpm_verify $FPM_EXIT_CODE $FPM_RESULTS && [[ \"$STDOUT_IF_DEBUG\" == \"/dev/null\" ]]; then\n    echo \"fpm returned an error executing the command:\"\n    echo\n    echo -e \"\\n${COMMAND_ARR[@]}\\n\"\n  else\n    ls \"$PYTHON_BUILDROOT\"/*.\"$package_format\"\n    mv \"$PYTHON_BUILDROOT\"/*.\"$package_format\" \"$WORKSPACE/packages/$TARGET/\"\n  fi\n  echo\n}\n\n# Build packages for everything\nfpm_build() {\n  # Source dir where fpm-info.sh (if any) will be found.\n  SRC_DIR=$1\n  shift\n  # The package source.  Depending on the source type, this can be a\n  # path, or the name of the package in an upstream repository (e.g.,\n  # pip).\n  PACKAGE=$1\n  shift\n  # The name of the package to build.\n  PACKAGE_NAME=$1\n  shift\n  # The type of source package.  Passed to fpm -s.  Default \"dir\".\n  PACKAGE_TYPE=${1:-dir}\n  shift\n  # Optional: the package version number.  Passed to fpm -v.\n  VERSION=$1\n  shift\n\n  if [[ -n \"$ONLY_BUILD\" ]] && [[ \"$PACKAGE_NAME\" != \"$ONLY_BUILD\" ]] && [[ \"$PACKAGE\" != \"$ONLY_BUILD\" ]] ; then\n      return 0\n  fi\n\n  local default_iteration_value=\"$(default_iteration \"$PACKAGE\" \"$VERSION\" \"$PACKAGE_TYPE\")\"\n\n  declare -a COMMAND_ARR=(\"fpm\" \"-s\" \"$PACKAGE_TYPE\" \"-t\" \"$FORMAT\")\n  if [ python = \"$PACKAGE_TYPE\" ] && [ deb = \"$FORMAT\" ]; then\n      # Dependencies are built from Python package metadata.  Since that\n      # will never refer to Debian package iterations, it doesn't make sense\n      # to enforce those in the .deb dependencies.\n      COMMAND_ARR+=(--deb-ignore-iteration-in-dependencies)\n  fi\n\n  if [[ \"$DEBUG\" != \"0\" ]]; then\n    COMMAND_ARR+=('--verbose' '--log' 'info')\n  fi\n\n  if [[ -n \"$PACKAGE_NAME\" ]]; then\n    COMMAND_ARR+=('-n' \"$PACKAGE_NAME\")\n  fi\n\n  if [[ \"$MAINTAINER\" != \"\" ]]; then\n    COMMAND_ARR+=('--maintainer' \"$MAINTAINER\")\n  fi\n\n  if [[ \"$VENDOR\" != \"\" ]]; then\n    COMMAND_ARR+=('--vendor' \"$VENDOR\")\n  fi\n\n  if [[ \"$VERSION\" != \"\" ]]; then\n    COMMAND_ARR+=('-v' \"$VERSION\")\n  fi\n  if [[ -n \"$default_iteration_value\" ]]; then\n      # We can always add an --iteration here.  If another one is specified in $@,\n      # that will take precedence, as desired.\n      COMMAND_ARR+=(--iteration \"$default_iteration_value\")\n  fi\n\n  # Append --depends X and other arguments specified by fpm-info.sh in\n  # the package source dir. These are added last so they can override\n  # the arguments added by this script.\n  declare -a fpm_args=()\n  declare -a build_depends=()\n  declare -a fpm_depends=()\n  declare -a fpm_conflicts=()\n  declare -a fpm_exclude=()\n  if [[ ! -d \"$SRC_DIR\" ]]; then\n      echo >&2 \"BUG: looking in wrong dir for fpm-info.sh: $pkgdir\"\n      exit 1\n  fi\n  fpminfo=\"${SRC_DIR}/fpm-info.sh\"\n  if [[ -e \"$fpminfo\" ]]; then\n      debug_echo \"Loading fpm overrides from $fpminfo\"\n      source \"$fpminfo\"\n  fi\n  for pkg in \"${build_depends[@]}\"; do\n      if [[ $TARGET =~ debian|ubuntu ]]; then\n          pkg_deb=$(ls \"$WORKSPACE/packages/$TARGET/$pkg_\"*.deb | sort -rg | awk 'NR==1')\n          if [[ -e $pkg_deb ]]; then\n              echo \"Installing build_dep $pkg from $pkg_deb\"\n              dpkg -i \"$pkg_deb\"\n          else\n              echo \"Attemping to install build_dep $pkg using apt-get\"\n              apt-get install -y \"$pkg\"\n          fi\n          apt-get -y -f install\n      else\n          pkg_rpm=$(ls \"$WORKSPACE/packages/$TARGET/$pkg\"-[0-9]*.rpm | sort -rg | awk 'NR==1')\n          if [[ -e $pkg_rpm ]]; then\n              echo \"Installing build_dep $pkg from $pkg_rpm\"\n              rpm -i \"$pkg_rpm\"\n          else\n              echo \"Attemping to install build_dep $pkg\"\n              rpm -i \"$pkg\"\n          fi\n      fi\n  done\n  for i in \"${fpm_depends[@]}\"; do\n    COMMAND_ARR+=('--depends' \"$i\")\n  done\n  for i in \"${fpm_conflicts[@]}\"; do\n    COMMAND_ARR+=('--conflicts' \"$i\")\n  done\n  for i in \"${fpm_exclude[@]}\"; do\n    COMMAND_ARR+=('--exclude' \"$i\")\n  done\n\n  COMMAND_ARR+=(\"${fpm_args[@]}\")\n\n  # Append remaining function arguments directly to fpm's command line.\n  for i; do\n    COMMAND_ARR+=(\"$i\")\n  done\n\n  COMMAND_ARR+=(\"$PACKAGE\")\n\n  debug_echo -e \"\\n${COMMAND_ARR[@]}\\n\"\n\n  FPM_RESULTS=$(\"${COMMAND_ARR[@]}\")\n  FPM_EXIT_CODE=$?\n  echo \"fpm: exit code $FPM_EXIT_CODE\" >>$STDOUT_IF_DEBUG\n  echo \"$FPM_RESULTS\" >>$STDOUT_IF_DEBUG\n\n  fpm_verify $FPM_EXIT_CODE $FPM_RESULTS\n\n  # if something went wrong and debug is off, print out the fpm command that errored\n  if [[ 0 -ne $? ]] && [[ \"$STDOUT_IF_DEBUG\" == \"/dev/null\" ]]; then\n    echo -e \"\\n${COMMAND_ARR[@]}\\n\"\n  fi\n}\n\n# verify build results\nfpm_verify () {\n  FPM_EXIT_CODE=$1\n  shift\n  FPM_RESULTS=$@\n\n  FPM_PACKAGE_NAME=''\n  if [[ $FPM_RESULTS =~ ([A-Za-z0-9_\\.~-]*\\.)(deb|rpm) ]]; then\n    FPM_PACKAGE_NAME=${BASH_REMATCH[1]}${BASH_REMATCH[2]}\n  fi\n\n  if [[ \"$FPM_PACKAGE_NAME\" == \"\" ]]; then\n    EXITCODE=1\n    echo\n    echo \"Error: $PACKAGE: Unable to figure out package name from fpm results:\"\n    echo\n    echo $FPM_RESULTS\n    echo\n    return 1\n  elif [[ \"$FPM_RESULTS\" =~ \"File already exists\" ]]; then\n    echo \"Package $FPM_PACKAGE_NAME exists, not rebuilding\"\n    return 0\n  elif [[ 0 -ne \"$FPM_EXIT_CODE\" ]]; then\n    EXITCODE=1\n    echo \"Error building package for $1:\\n $FPM_RESULTS\"\n    return 1\n  fi\n}\n\ninstall_package() {\n  PACKAGES=$@\n  if [[ \"$FORMAT\" == \"deb\" ]]; then\n    $SUDO apt-get install $PACKAGES --yes\n  elif [[ \"$FORMAT\" == \"rpm\" ]]; then\n    $SUDO yum -q -y install $PACKAGES\n  fi\n}\n\ntitle() {\n    printf '%s %s\\n' \"=======\" \"$1\"\n}\n\ncheckexit() {\n    if [[ \"$1\" != \"0\" ]]; then\n        title \"$2 -- FAILED\"\n        failures+=(\"$2 (`timer`)\")\n    else\n        successes+=(\"$2 (`timer`)\")\n    fi\n}\n\ntimer_reset() {\n    t0=$SECONDS\n}\n\ntimer() {\n    if [[ -n \"$t0\" ]]; then\n        echo -n \"$(($SECONDS - $t0))s\"\n    fi\n}\n\nreport_outcomes() {\n    for x in \"${successes[@]}\"\n    do\n        echo \"Pass: $x\"\n    done\n\n    if [[ ${#failures[@]} == 0 ]]\n    then\n        if [[ ${#successes[@]} != 0 ]]; then\n           echo \"All test suites passed.\"\n        fi\n    else\n        echo \"Failures (${#failures[@]}):\"\n        for x in \"${failures[@]}\"\n        do\n            echo \"Fail: $x\"\n        done\n    fi\n}\n"
  },
  {
    "path": "build/run-tests.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nCOLUMNS=80\n. `dirname \"$(readlink -f \"$0\")\"`/run-library.sh\n\nread -rd \"\\000\" helpmessage <<EOF\n$(basename $0): Install and test Arvados components.\n\nExit non-zero if any tests fail.\n\nSyntax:\n        $0 [options]\n\nOptions:\n\n--skip FOO     Do not test the FOO component.\n--skip sanity  Skip initial dev environment sanity checks.\n--skip install Do not run any install steps. Just run tests.\n               You should provide GOPATH, GEMHOME, and VENVDIR options\n               from a previous invocation if you use this option.\n--only FOO     Do not test anything except the FOO component. If given\n               more than once, all specified test suites are run.\n--temp DIR     Install components and dependencies under DIR instead of\n               making a new temporary directory. Implies --leave-temp.\n--leave-temp   Do not remove GOPATH, virtualenv, and other temp dirs at exit.\n               Instead, show the path to give as --temp to reuse them in\n               subsequent invocations.\n--repeat N     Repeat each install/test step until it succeeds N times.\n--retry        Prompt to retry if an install or test suite fails.\n--only-install Run specific install step. If given more than once,\n               all but the last are ignored.\n--short        Skip (or scale down) some slow tests.\n--interactive  Set up, then prompt for test/install steps to perform.\nservices/api_test=\"TEST=test/functional/arvados/v1/collections_controller_test.rb\"\n               Restrict apiserver tests to the given file\nsdk/python_test=\"tests/test_api.py::ArvadosApiTest\"\n               Restrict Python SDK tests to the given class\nlib/dispatchcloud_test=\"-check.vv\"\n               Show all log messages, even when tests pass (also works\n               with services/keepstore_test etc.)\nARVADOS_DEBUG=1\n               Print more debug messages\nARVADOS_...=...\n               Set other ARVADOS_* env vars (note ARVADOS_* vars are\n               removed from the environment by this script when it\n               starts, so the usual way of passing them will not work)\n\nAssuming \"--skip install\" is not given, all components are installed\ninto \\$GOPATH, \\$VENDIR, and \\$GEMHOME before running any tests. Many\ntest suites depend on other components being installed, and installing\neverything tends to be quicker than debugging dependencies.\n\nEnvironment variables:\n\nWORKSPACE=path Arvados source tree to test.\nCONFIGSRC=path Dir with config.yml file containing PostgreSQL section\n               for use by tests.  As a special concession to the\n               current CI server config, CONFIGSRC defaults to\n               $HOME/arvados-api-server if that directory exists.\n\nMore information and background:\n\nhttps://github.com/arvados/arvados/blob/main/doc/development/RunningTests.md\nEOF\n\n# First make sure to remove any ARVADOS_ variables from the calling\n# environment that could interfere with the tests.\nunset $(env | cut -d= -f1 | grep \\^ARVADOS_)\n\n# Reset other variables that could affect our [tests'] behavior by\n# accident.\nGITDIR=\nGOPATH=\nVENV3DIR=\nPYTHONPATH=\nGEMHOME=\nR_LIBS=\nexport LANG=en_US.UTF-8\n# Many install steps will fail if we're not in a Git checkout, so this is a\n# safe default.\nif [[ -d \"${WORKSPACE:=$(git -C \"$(dirname \"$0\")\" rev-parse --show-toplevel)}\" ]]; then\n    export WORKSPACE\nelse\n    unset WORKSPACE\nfi\n# googleapiclient raises a FutureWarning if you use a recent version with the\n# last supported version of Python. That interferes with tests that check CLI\n# tool stderr. Filter it out, filling in default warnings if needed.\n# <https://docs.python.org/3.10/library/warnings.html#default-warning-filter>\nexport PYTHONWARNINGS=\"ignore::FutureWarning:google.api_core._python_version_support,${PYTHONWARNINGS:-\\\ndefault::DeprecationWarning:__main__\\\n,ignore::DeprecationWarning\\\n,ignore::PendingDeprecationWarning\\\n,ignore::ImportWarning\\\n,ignore::ResourceWarning\\\n}\"\n\n# setup_ruby_environment will set this to the path of the `bundle` executable\n# it installs. This stub will cause commands to fail if they try to run before\n# that.\nBUNDLE=false\n\nshort=\nonly_install=\ntemp=\ntemp_preserve=\n\nignore_sigint=\n\nclear_temp() {\n    if [[ -z \"$temp\" ]]; then\n        # we did not even get as far as making a temp dir\n        :\n    elif [[ -z \"$temp_preserve\" ]]; then\n        # Go creates readonly dirs in the module cache, which cause\n        # \"rm -rf\" to fail unless we chmod first.\n        chmod -R u+w \"$temp\"\n        rm -rf \"$temp\"\n    else\n        echo \"Leaving behind temp dirs in $temp\"\n    fi\n}\n\nfatal() {\n    clear_temp\n    echo >&2 \"Fatal: $* (encountered in ${FUNCNAME[1]} at ${BASH_SOURCE[1]} line ${BASH_LINENO[0]})\"\n    exit 1\n}\n\nexit_cleanly() {\n    trap - INT\n    stop_services\n    rotate_logfile \"$WORKSPACE/services/api/log/\" \"test.log\"\n    report_outcomes\n    clear_temp\n    exit ${#failures}\n}\n\nsanity_checks() {\n    [[ -n \"${skip[sanity]}\" ]] && return 0\n    ( [[ -n \"$WORKSPACE\" ]] && [[ -d \"$WORKSPACE/services\" ]] ) \\\n        || fatal \"WORKSPACE environment variable not set to a source directory (see: $0 --help)\"\n    [[ -z \"$CONFIGSRC\" ]] || [[ -s \"$CONFIGSRC/config.yml\" ]] \\\n        || fatal \"CONFIGSRC is $CONFIGSRC but '$CONFIGSRC/config.yml' is empty or not found (see: $0 --help)\"\n    echo Checking dependencies:\n    echo \"locale: ${LANG}\"\n    [[ \"$(locale charmap)\" = \"UTF-8\" ]] \\\n        || fatal \"Locale '${LANG}' is broken/missing. Try: echo ${LANG} | sudo tee -a /etc/locale.gen && sudo locale-gen\"\n    echo -n 'ruby: '\n    ruby -v \\\n        || fatal \"No ruby. Install >=2.7 from package or source\"\n    echo -n 'go: '\n    go version \\\n        || fatal \"No go binary. See http://golang.org/doc/install\"\n    [[ $(go version) =~ go1.([0-9]+) ]] && [[ ${BASH_REMATCH[1]} -ge 12 ]] \\\n        || fatal \"Go >= 1.12 required. See http://golang.org/doc/install\"\n    echo -n 'gcc: '\n    gcc --version | egrep ^gcc \\\n        || fatal \"No gcc. Try: apt-get install build-essential\"\n    echo -n 'fuse.h: '\n    find /usr/include -path '*fuse/fuse.h' | egrep --max-count=1 . \\\n        || fatal \"No fuse/fuse.h. Try: apt-get install libfuse-dev\"\n    echo -n 'virtualenv: '\n    python3 -m venv --help | grep -q '^usage: venv ' \\\n        && echo \"venv module found\" \\\n        || fatal \"No virtualenv. Try: apt-get install python3-venv\"\n    which netstat \\\n        || fatal \"No netstat. Try: apt-get install net-tools\"\n    echo -n 'nginx: '\n    PATH=\"$PATH:/sbin:/usr/sbin:/usr/local/sbin\" nginx -v \\\n        || fatal \"No nginx. Try: apt-get install nginx\"\n    echo -n 'npm: '\n    npm --version \\\n        || fatal \"No npm. Try: wget -O- https://nodejs.org/dist/v14.21.3/node-v14.21.3-linux-x64.tar.xz | sudo tar -C /usr/local -xJf - && sudo ln -s ../node-v14.21.3-linux-x64/bin/{node,npm} /usr/local/bin/\"\n    echo -n 'cadaver: '\n    cadaver --version | grep -w cadaver \\\n          || fatal \"No cadaver. Try: apt-get install cadaver\"\n    echo -n \"jq: \"\n    jq --version ||\n        fatal \"No jq. Try: apt-get install jq\"\n    echo -n 'libcurl curl.h: '\n    find /usr/include -path '*/curl/curl.h' | egrep --max-count=1 . \\\n        || fatal \"No libcurl curl.h. Try: apt-get install libcurl4-gnutls-dev\"\n    echo -n 'libpq libpq-fe.h: '\n    find /usr/include -path '*/postgresql/libpq-fe.h' | egrep --max-count=1 . \\\n        || fatal \"No libpq libpq-fe.h. Try: apt-get install libpq-dev\"\n    echo -n 'libpam pam_appl.h: '\n    find /usr/include -path '*/security/pam_appl.h' | egrep --max-count=1 . \\\n        || fatal \"No libpam pam_appl.h. Try: apt-get install libpam0g-dev\"\n    echo -n 'postgresql: '\n    psql --version || fatal \"No postgresql. Try: apt-get install postgresql postgresql-client-common\"\n    echo -n 'xvfb: '\n    which Xvfb || fatal \"No xvfb. Try: apt-get install xvfb\"\n    echo -n 'singularity: '\n    singularity --version || fatal \"No singularity.\"\n    echo -n 'docker client: '\n    docker --version || echo \"WARNING: No docker client.\"\n    echo -n 'docker server: '\n    docker info --format='{{.ServerVersion}}' || echo \"WARNING: No docker server.\"\n\n    if [[ \"$NEED_SDK_R\" = true ]]; then\n      # R SDK stuff\n      echo -n 'R: '\n      which Rscript || fatal \"No Rscript. Try: apt-get install r-base\"\n      echo -n 'testthat: '\n      Rscript -e \"library('testthat')\" || fatal \"No testthat. Try: apt-get install r-cran-testthat\"\n      # needed for roxygen2, needed for devtools, needed for R sdk\n      pkg-config --exists libxml-2.0 || fatal \"No libxml2. Try: apt-get install libxml2-dev\"\n    fi\n    echo 'procs with /dev/fuse open:'\n    find /proc/*/fd -lname /dev/fuse 2>/dev/null | cut -d/ -f3 | xargs --no-run-if-empty ps -lywww\n    echo 'grep fuse /proc/self/mountinfo:'\n    grep fuse /proc/self/mountinfo\n}\n\nrotate_logfile() {\n  # i.e.  rotate_logfile \"$WORKSPACE/services/api/log/\" \"test.log\"\n  # $BUILD_NUMBER is set by Jenkins if this script is being called as part of a Jenkins run\n  if [[ -f \"$1/$2\" ]]; then\n    THEDATE=`date +%Y%m%d%H%M%S`\n    mv \"$1/$2\" \"$1/$THEDATE-$BUILD_NUMBER-$2\"\n    gzip \"$1/$THEDATE-$BUILD_NUMBER-$2\"\n  fi\n}\n\ncheckpidfile() {\n    svc=\"$1\"\n    pid=\"$(cat \"$WORKSPACE/tmp/${svc}.pid\")\"\n    if [[ -z \"$pid\" ]] || ! kill -0 \"$pid\"; then\n        tail $WORKSPACE/tmp/${1}*.log\n        echo \"${svc} pid ${pid} not running\"\n        return 1\n    fi\n    echo \"${svc} pid ${pid} ok\"\n}\n\ncheckhealth() {\n    svc=\"$1\"\n    base=\"$(yq -r \"(.Clusters.zzzzz.Services.$svc.InternalURLs | keys)[0]\" \"$ARVADOS_CONFIG\")\"\n    url=\"$base/_health/ping\"\n    if ! curl -Ss -H \"Authorization: Bearer e687950a23c3a9bceec28c6223a06c79\" \"${url}\" | tee -a /dev/stderr | grep '\"OK\"'; then\n        echo \"${url} failed\"\n        return 1\n    fi\n}\n\ncheckdiscoverydoc() {\n    dd=\"https://${1}/discovery/v1/apis/arvados/v1/rest\"\n    if ! (set -o pipefail; curl -fsk \"$dd\" | grep -q ^{ ); then\n        echo >&2 \"ERROR: could not retrieve discovery doc from RailsAPI at $dd\"\n        tail -v $WORKSPACE/tmp/railsapi.log\n        return 1\n    fi\n    echo \"${dd} ok\"\n}\n\nstart_services() {\n    if [[ -n \"$ARVADOS_TEST_API_HOST\" ]]; then\n        return 0\n    fi\n    echo 'Starting API, controller, keepproxy, keep-web, ws, and nginx ssl proxy...'\n    if [[ ! -d \"$WORKSPACE/services/api/log\" ]]; then\n        mkdir -p \"$WORKSPACE/services/api/log\"\n    fi\n    # Remove empty api.pid file if it exists\n    if [[ -f \"$WORKSPACE/tmp/api.pid\" && ! -s \"$WORKSPACE/tmp/api.pid\" ]]; then\n        rm -f \"$WORKSPACE/tmp/api.pid\"\n    fi\n    all_services_stopped=\n    fail=1\n\n    cd \"$WORKSPACE\" \\\n        && eval $(python3 sdk/python/tests/run_test_server.py start --auth admin) \\\n        && export ARVADOS_TEST_API_HOST=\"$ARVADOS_API_HOST\" \\\n        && export ARVADOS_TEST_API_INSTALLED=\"$$\" \\\n        && checkpidfile api \\\n        && checkdiscoverydoc $ARVADOS_API_HOST \\\n        && eval $(python3 sdk/python/tests/run_test_server.py start_nginx) \\\n        && checkpidfile nginx \\\n        && python3 sdk/python/tests/run_test_server.py start_controller \\\n        && checkpidfile controller \\\n        && checkhealth Controller \\\n        && checkdiscoverydoc $ARVADOS_API_HOST \\\n        && python3 sdk/python/tests/run_test_server.py start_keep_proxy \\\n        && checkpidfile keepproxy \\\n        && python3 sdk/python/tests/run_test_server.py start_keep-web \\\n        && checkpidfile keep-web \\\n        && checkhealth WebDAV \\\n        && python3 sdk/python/tests/run_test_server.py start_ws \\\n        && checkpidfile ws \\\n        && export ARVADOS_TEST_PROXY_SERVICES=1 \\\n        && (env | egrep ^ARVADOS) \\\n        && fail=0\n    if [[ $fail != 0 ]]; then\n        unset ARVADOS_TEST_API_HOST\n    fi\n    return $fail\n}\n\nstop_services() {\n    if [[ -n \"$all_services_stopped\" ]]; then\n        return\n    fi\n    unset ARVADOS_TEST_API_HOST ARVADOS_TEST_PROXY_SERVICES\n    cd \"$WORKSPACE\" \\\n        && python3 sdk/python/tests/run_test_server.py stop_nginx \\\n        && python3 sdk/python/tests/run_test_server.py stop_ws \\\n        && python3 sdk/python/tests/run_test_server.py stop_keep-web \\\n        && python3 sdk/python/tests/run_test_server.py stop_keep_proxy \\\n        && python3 sdk/python/tests/run_test_server.py stop_controller \\\n        && python3 sdk/python/tests/run_test_server.py stop \\\n        && all_services_stopped=1\n    unset ARVADOS_CONFIG\n}\n\ninterrupt() {\n    if [[ -n \"$ignore_sigint\" ]]; then\n        echo >&2 \"ignored SIGINT\"\n        return\n    fi\n    failures+=(\"($(basename $0) interrupted)\")\n    exit_cleanly\n}\ntrap interrupt INT\n\nsetup_ruby_environment() {\n    # When our \"bundle install\"s need to install new gems to\n    # satisfy dependencies, we want them to go where \"gem install\n    # --user-install\" would put them. (However, if the caller has\n    # already set GEM_HOME, we assume that's where dependencies\n    # should be installed, and we should leave it alone.)\n\n    if [ -z \"$GEM_HOME\" ]; then\n        user_gempath=\"$(gem env gempath)\"\n        export GEM_HOME=\"${user_gempath%%:*}\"\n    fi\n    PATH=\"$(gem env gemdir)/bin:$PATH\"\n\n    # When we build and install our own gems, we install them in our\n    # $GEMHOME tmpdir, and we want them to be at the front of GEM_PATH and\n    # PATH so integration tests prefer them over other versions that\n    # happen to be installed in $user_gempath, system dirs, etc.\n\n    tmpdir_gem_home=\"$(env - PATH=\"$PATH\" HOME=\"$GEMHOME\" gem env gempath | cut -f1 -d:)\"\n    PATH=\"$tmpdir_gem_home/bin:$PATH\"\n    export GEM_PATH=\"$tmpdir_gem_home:$(gem env gempath)\"\n\n    echo \"Will install dependencies to $(gem env gemdir)\"\n    echo \"Will install bundler and arvados gems to $tmpdir_gem_home\"\n    echo \"Gem search path is GEM_PATH=$GEM_PATH\"\n    gem install --user --no-document --conservative --version '~> 2.5.0' bundler \\\n        || fatal 'install bundler'\n    BUNDLE=\"$(gem contents --version '~> 2.5.0' bundler | grep -E '/(bin|exe)/bundle$' | tail -n1)\"\n    if [[ ! -x \"$BUNDLE\" ]]; then\n        BUNDLE=false\n        fatal \"could not find 'bundle' executable after installation\"\n    fi\n}\n\nwith_test_gemset() {\n    GEM_HOME=\"$tmpdir_gem_home\" GEM_PATH=\"$tmpdir_gem_home\" \"$@\"\n}\n\nsetup_virtualenv() {\n    if [[ -z \"${VENV3DIR:-}\" ]]; then\n        fatal \"setup_virtualenv called before \\$VENV3DIR was set\"\n    elif ! [[ -e \"$VENV3DIR/bin/activate\" ]]; then\n        python3 -m venv \"$VENV3DIR\" || fatal \"virtualenv creation failed\"\n        # Configure pip options we always want to use.\n        \"$VENV3DIR/bin/pip\" config --quiet --site set global.disable-pip-version-check true\n        \"$VENV3DIR/bin/pip\" config --quiet --site set global.no-input true\n        \"$VENV3DIR/bin/pip\" config --quiet --site set global.no-python-version-warning true\n        \"$VENV3DIR/bin/pip\" config --quiet --site set install.progress-bar off\n        # If we didn't have a virtualenv before, we couldn't have started any\n        # services. Set the flag used by stop_services to indicate that.\n        all_services_stopped=1\n    fi\n    . \"$VENV3DIR/bin/activate\" || fatal \"virtualenv activation failed\"\n    # We must have these in place *before* we install the PySDK below.\n    pip install -r \"$WORKSPACE/build/requirements.tests.txt\" ||\n        fatal \"failed to install Python requirements in virtualenv\"\n    # run-tests.sh uses run_test_server.py from the Python SDK.\n    do_install_once sdk/python pip || fatal \"failed to install PySDK in virtualenv\"\n}\n\ninitialize() {\n    sanity_checks\n\n    echo \"WORKSPACE=$WORKSPACE\"\n    cd \"$WORKSPACE\"\n\n    if [[ -z \"$temp\" ]]; then\n        temp=\"$(mktemp -d)\"\n    fi\n\n    # Set up temporary install dirs (unless existing dirs were supplied)\n    for tmpdir in VENV3DIR GOPATH GEMHOME R_LIBS\n    do\n        if [[ -z \"${!tmpdir}\" ]]; then\n            eval \"$tmpdir\"=\"$temp/$tmpdir\"\n        fi\n        if ! [[ -d \"${!tmpdir}\" ]]; then\n            mkdir \"${!tmpdir}\" || fatal \"can't create ${!tmpdir} (does $temp exist?)\"\n        fi\n    done\n\n    rm -vf \"${WORKSPACE}/tmp/*.log\"\n\n    export R_LIBS\n\n    export GOPATH\n    # Make sure our compiled binaries under test override anything\n    # else that might be in the environment.\n    export PATH=$GOPATH/bin:$PATH\n\n    # Jenkins config requires that glob tmp/*.log match something. Ensure\n    # that happens even if we don't end up running services that set up\n    # logging.\n    mkdir -p \"${WORKSPACE}/tmp/\" || fatal \"could not mkdir ${WORKSPACE}/tmp\"\n    touch \"${WORKSPACE}/tmp/controller.log\" || fatal \"could not touch ${WORKSPACE}/tmp/controller.log\"\n\n    unset http_proxy https_proxy no_proxy\n\n    setup_ruby_environment\n    setup_virtualenv\n\n    echo \"PATH is $PATH\"\n}\n\ninstall_env() {\n    go mod download || fatal \"Go deps failed\"\n    which goimports >/dev/null || go install golang.org/x/tools/cmd/goimports@latest || fatal \"Go setup failed\"\n}\n\nretry() {\n    remain=\"${repeat}\"\n    while :\n    do\n        if ${@}; then\n            if [[ \"$remain\" -gt 1 ]]; then\n                remain=$((${remain}-1))\n                title \"(repeating ${remain} more times)\"\n            else\n                break\n            fi\n        elif [[ \"$retry\" == 1 ]]; then\n            read -p 'Try again? [Y/n] ' x\n            if [[ \"$x\" != \"y\" ]] && [[ \"$x\" != \"\" ]]\n            then\n                break\n            fi\n        else\n            break\n        fi\n    done\n}\n\ndo_test() {\n    case \"${1}\" in\n        services/workbench2_units | services/workbench2_integration)\n            suite=services/workbench2\n            ;;\n        *)\n            suite=\"${1}\"\n            ;;\n    esac\n    if [[ -n \"${skip[$suite]}\" || \\\n              -n \"${skip[$1]}\" || \\\n              (${#only[@]} -ne 0 && ${only[$suite]} -eq 0 && ${only[$1]} -eq 0) ]]; then\n        return 0\n    fi\n    case \"${1}\" in\n        services/api)\n            stop_services\n            check_arvados_config \"$1\"\n            ;;\n        gofmt \\\n            | arvados_version.py \\\n            | doc \\\n            | lib/boot \\\n            | lib/cli \\\n            | lib/cloud/azure \\\n            | lib/cloud/cloudtest \\\n            | lib/cloud/ec2 \\\n            | lib/cmd \\\n            | lib/dispatchcloud/sshexecutor \\\n            | lib/dispatchcloud/worker \\\n            | lib/install \\\n            | services/workbench2_integration \\\n            | services/workbench2_units \\\n            )\n            check_arvados_config \"$1\"\n            # don't care whether services are running\n            ;;\n        *)\n            check_arvados_config \"$1\"\n            if ! start_services; then\n                checkexit 1 \"$1 tests\"\n                title \"test $1 -- failed to start services\"\n                return 1\n            fi\n            ;;\n    esac\n    retry do_test_once ${@}\n}\n\ngo_ldflags() {\n    version=${ARVADOS_VERSION:-$(git log -n1 --format=%H)-dev}\n    echo \"-X git.arvados.org/arvados.git/lib/cmd.version=${version} -X main.version=${version} -s -w\"\n}\n\ndo_test_once() {\n    unset result\n\n    if [[ \"$2\" == pip && -n \"$interactive\" ]]; then\n        # We test out of the virtualenv to test with full build artifacts.\n        # We do this by setting --import-mode=append in pytest.ini.\n        # Install the developer's latest changes to the virtualenv.\n        # We need to do this before we start the test header+timer.\n        do_install_once \"$1\" \"$2\" || return\n    fi\n\n    local -a targs=()\n    case \"$1\" in\n        sdk/cwl )\n            # The CWL conformance/integration tests each take ~30\n            # minutes. Before July 2025 they were outside the standard test\n            # suite, so we deselect them by default for consistency.\n            targs+=(-m \"not integration\")\n\n            # The CWL conformance/integration tests expect keep\n            # servers and crunch-dispatch-local.\n            if ! ( env -C \"$WORKSPACE\" python3 sdk/python/tests/run_test_server.py start_keep \\\n                      && env -C \"$WORKSPACE\" python3 sdk/python/tests/run_test_server.py start_dispatch); then\n                checkexit 1 \"$1 tests\"\n                return 1\n            fi\n            ;;\n    esac\n    # Append the user's arguments to targs, respecting quoted strings.\n    eval \"targs+=(${testargs[$1]})\"\n\n    title \"test $1\"\n    timer_reset\n\n    result=\n    if [[ \"$2\" == \"go\" ]]\n    then\n        covername=\"coverage-$(echo \"$1\" | sed -e 's/\\//_/g')\"\n        coverflags=(\"-covermode=count\" \"-coverprofile=$WORKSPACE/tmp/.$covername.tmp\")\n        if ! compgen -G \"$WORKSPACE/$1/*_test.go\" >/dev/null; then\n            # Go 1.25, when invoked by Go 1.24 via \"toolchain go1.25\"\n            # directive, fails with 'go: no such tool \"covdata\"' when\n            # using $coverflags in a directory that has no tests.  See\n            # https://github.com/golang/go/issues/75031\n            #\n            # Workaround: skip coverflags when 'go test' is a no-op\n            # anyway.\n            coverflags=()\n        fi\n        testflags=()\n        # We do \"go install\" here to catch compilation errors\n        # before trying \"go test\". Otherwise, coverage-reporting\n        # mode makes Go show the wrong line numbers when reporting\n        # compilation errors.\n        go install -ldflags \"$(go_ldflags)\" \"$WORKSPACE/$1\" && \\\n            cd \"$WORKSPACE/$1\" && \\\n            if [[ \"${#targs}\" -gt 0 ]]\n        then\n            # \"go test -check.vv giturl\" doesn't work, but this\n            # does:\n            go test ${short:+-short} ${testflags[@]} \"${targs[@]}\"\n        else\n            # The above form gets verbose even when testargs is\n            # empty, so use this form in such cases:\n            go test ${short:+-short} ${testflags[@]} ${coverflags[@]} \"git.arvados.org/arvados.git/$1\"\n        fi\n        result=${result:-$?}\n        if [[ -f \"$WORKSPACE/tmp/.$covername.tmp\" ]]\n        then\n            go tool cover -html=\"$WORKSPACE/tmp/.$covername.tmp\" -o \"$WORKSPACE/tmp/$covername.html\"\n            rm \"$WORKSPACE/tmp/.$covername.tmp\"\n        fi\n        [[ $result = 0 ]] && gofmt -e -d *.go\n    elif [[ \"$2\" == \"pip\" ]]\n    then\n        tries=0\n        while :\n        do\n            tries=$((${tries}+1))\n            env -C \"$WORKSPACE/$1\" pytest \"${targs[@]}\"\n            result=$?\n            # pytest uses exit code 2 to mean \"test collection failed.\"\n            # See discussion in FUSE's IntegrationTest and MountTestBase.\n            if [[ ${tries} < 3 && ${result} == 2 ]]\n            then\n                printf '\\n*****\\n%s tests exited with code 2 -- retrying\\n*****\\n\\n' \"$1\"\n                continue\n            else\n                break\n            fi\n        done\n    elif [[ \"$2\" != \"\" ]]\n    then\n        \"test_$2\"\n    else\n        \"test_$1\"\n    fi\n    result=${result:-$?}\n    checkexit $result \"$1 tests\"\n    title \"test $1 -- `timer`\"\n    if [[ \"$1\" == \"sdk/cwl\" ]]; then\n        env -C \"$WORKSPACE\" python3 sdk/python/tests/run_test_server.py stop_keep\n        env -C \"$WORKSPACE\" python3 sdk/python/tests/run_test_server.py stop_dispatch\n        # Also reset test fixtures that were modified by the dispatcher\n        env -C \"$WORKSPACE\" python3 sdk/python/tests/run_test_server.py database_reset\n    fi\n    return $result\n}\n\ncheck_arvados_config() {\n    if [[ \"$1\" = \"env\" ]] ; then\n        return\n    fi\n    if [[ -z \"$ARVADOS_CONFIG\" ]] ; then\n        cd \"$WORKSPACE\"\n        eval $(python3 sdk/python/tests/run_test_server.py setup_config)\n    fi\n    # Set all PostgreSQL connection variables, and write a .pgpass, to connect\n    # to the test database, so test scripts can write `psql` commands with no\n    # additional configuration.\n    export PGPASSFILE=\"$WORKSPACE/tmp/.pgpass\"\n    export PGDATABASE=\"$(yq -r .Clusters.zzzzz.PostgreSQL.Connection.dbname \"$ARVADOS_CONFIG\")\"\n    export PGHOST=\"$(yq -r .Clusters.zzzzz.PostgreSQL.Connection.host \"$ARVADOS_CONFIG\")\"\n    export PGPORT=\"$(yq -r .Clusters.zzzzz.PostgreSQL.Connection.port \"$ARVADOS_CONFIG\")\"\n    export PGUSER=\"$(yq -r .Clusters.zzzzz.PostgreSQL.Connection.user \"$ARVADOS_CONFIG\")\"\n    local pgpassword=\"$(yq -r .Clusters.zzzzz.PostgreSQL.Connection.password \"$ARVADOS_CONFIG\")\"\n    echo \"$PGHOST:$PGPORT:$PGDATABASE:$PGUSER:$pgpassword\" >\"$PGPASSFILE\"\n    chmod 0600 \"$PGPASSFILE\"\n}\n\ndo_install() {\n    if [[ -n ${skip[\"install_$1\"]} || -n \"${skip[install]}\" || ( -n \"${only_install}\" && \"${only_install}\" != \"${1}\" && \"${only_install}\" != \"${2}\" ) ]]; then\n        return 0\n    fi\n    check_arvados_config \"$1\"\n    retry do_install_once ${@}\n}\n\ndo_install_once() {\n    title \"install $1\"\n    timer_reset\n\n    result=\n    if [[ \"$2\" == \"go\" ]]\n    then\n        go install -ldflags \"$(go_ldflags)\" \"$WORKSPACE/$1\"\n    elif [[ \"$2\" == \"pip\" ]]\n    then\n        pip install \"$WORKSPACE/$1\"\n    elif [[ \"$2\" != \"\" ]]\n    then\n        \"install_$2\"\n    else\n        \"install_$1\"\n    fi\n    result=${result:-$?}\n    checkexit $result \"$1 install\"\n    title \"install $1 -- `timer`\"\n    return $result\n}\n\nbundle_install_trylocal() {\n    (\n        set -e\n        echo \"(Running bundle install --local. 'could not find package' messages are OK.)\"\n        if ! \"$BUNDLE\" install --local --no-deployment; then\n            echo \"(Running bundle install again, without --local.)\"\n            \"$BUNDLE\" install --no-deployment\n        fi\n        \"$BUNDLE\" package\n    )\n}\n\ninstall_doc() {\n    cd \"$WORKSPACE/doc\" \\\n        && bundle_install_trylocal \\\n        && rm -rf .site\n}\n\ninstall_gem() {\n    gemname=$1\n    srcpath=$2\n    cd \"$WORKSPACE/$srcpath\" \\\n        && bundle_install_trylocal \\\n        && gem build \"$gemname.gemspec\" \\\n        && with_test_gemset gem install --no-document $(ls -t \"$gemname\"-*.gem|head -n1)\n}\n\ninstall_sdk/ruby() {\n    install_gem arvados sdk/ruby\n}\n\ninstall_sdk/ruby-google-api-client() {\n    install_gem arvados-google-api-client sdk/ruby-google-api-client\n}\n\ninstall_contrib/R-sdk() {\n  if [[ \"$NEED_SDK_R\" = true ]]; then\n    env -C \"$WORKSPACE/contrib/R-sdk\" Rscript --vanilla install_deps.R\n  fi\n}\n\ninstall_sdk/cli() {\n    install_gem arvados-cli sdk/cli\n}\n\ninstall_services/login-sync() {\n    install_gem arvados-login-sync services/login-sync\n}\n\ninstall_services/api() {\n    stop_services\n    check_arvados_config \"services/api\"\n    cd \"$WORKSPACE/services/api\" \\\n        && RAILS_ENV=test bundle_install_trylocal \\\n            || return 1\n\n    rm -f config/environments/test.rb\n    cp config/environments/test.rb.example config/environments/test.rb\n\n    # Clear out any lingering postgresql connections to the test\n    # database, so that we can drop it. This assumes the current user\n    # is a postgresql superuser.\n    psql -c \"SELECT pg_terminate_backend (pg_stat_activity.pid::int) FROM pg_stat_activity WHERE pg_stat_activity.datname = '$PGDATABASE';\" 2>/dev/null\n\n    mkdir -p \"$WORKSPACE/services/api/tmp/pids\"\n\n    cert=\"$WORKSPACE/services/api/tmp/self-signed\"\n    if [[ ! -e \"$cert.pem\" || \"$(date -r \"$cert.pem\" +%s)\" -lt 1512659226 ]]; then\n        (\n            dir=\"$WORKSPACE/services/api/tmp\"\n            set -e\n            openssl req -newkey rsa:2048 -nodes -subj '/C=US/ST=State/L=City/CN=localhost' -out \"$cert.csr\" -keyout \"$cert.key\" </dev/null\n            openssl x509 -req -in \"$cert.csr\" -signkey \"$cert.key\" -out \"$cert.pem\" -days 3650 -extfile <(printf 'subjectAltName=DNS:localhost,DNS:::1,DNS:0.0.0.0,DNS:127.0.0.1,IP:::1,IP:0.0.0.0,IP:127.0.0.1')\n        ) || return 1\n    fi\n\n    (\n        set -ex\n        cd \"$WORKSPACE/services/api\"\n        export RAILS_ENV=test\n        if bin/rails db:environment:set ; then\n            bin/rake db:drop\n        fi\n        bin/rake db:setup\n        bin/rake db:fixtures:load\n    ) || return 1\n}\n\ninstall_services/workbench2() {\n    cd \"$WORKSPACE/services/workbench2\" \\\n        && make yarn-install ARVADOS_DIRECTORY=\"${WORKSPACE}\"\n}\n\ndo_bundle() {\n    timer_reset\n    (\n        set -x\n        env -C \"$WORKSPACE/services/api\" RAILS_ENV=test \\\n            \"$BUNDLE\" ${@}\n    )\n    checkexit \"$?\" \"services/api bundle ${@}\"\n}\n\ndo_migrate() {\n    timer_reset\n    local task=\"db:migrate\"\n    case \"$1\" in\n        \"\")\n            ;;\n        rollback)\n            task=\"db:rollback\"\n            shift\n            ;;\n        *)\n            task=\"db:migrate:$1\"\n            shift\n            ;;\n    esac\n    check_arvados_config services/api\n    (\n        set -x\n        env -C \"$WORKSPACE/services/api\" RAILS_ENV=test \\\n            \"$BUNDLE\" exec rake $task ${@}\n    )\n    checkexit \"$?\" \"services/api $task\"\n}\n\nmigrate_down_services/api() {\n    echo \"running db:migrate:down\"\n    env -C \"$WORKSPACE/services/api\" RAILS_ENV=test \\\n        \"$BUNDLE\" exec rake db:migrate:down ${testargs[services/api]}\n    checkexit \"$?\" \"services/api db:migrate:down\"\n}\n\ntest_doc() {\n    local arvados_api_host=pirca.arvadosapi.com && \\\n        env -C \"$WORKSPACE/doc\" \\\n        \"$BUNDLE\" exec rake linkchecker \\\n        arvados_api_host=\"$arvados_api_host\" \\\n        arvados_workbench_host=\"https://workbench.$arvados_api_host\" \\\n        baseurl=\"file://$WORKSPACE/doc/.site/\" \\\n        ${testargs[doc]}\n}\n\ntest_gofmt() {\n    cd \"$WORKSPACE\" || return 1\n    dirs=$(ls -d */ | egrep -v 'vendor|tmp')\n    [[ -z \"$(gofmt -e -d $dirs | tee -a /dev/stderr)\" ]] || return 1\n    go vet -composites=false ./...\n}\n\ntest_arvados_version.py() {\n    local orig_fn=\"\"\n    local fail_count=0\n    while read -d \"\" fn; do\n        if [[ -z \"$orig_fn\" ]]; then\n            orig_fn=\"$fn\"\n        elif ! cmp \"$orig_fn\" \"$fn\"; then\n            fail_count=$(( $fail_count + 1 ))\n            printf \"FAIL: %s and %s are not identical\\n\" \"$orig_fn\" \"$fn\"\n        fi\n    done < <(git -C \"$WORKSPACE\" ls-files -z | grep -z '/arvados_version\\.py$')\n    case \"$orig_fn\" in\n        \"\") return 66 ;;  # EX_NOINPUT\n        *) return \"$fail_count\" ;;\n    esac\n}\n\ntest_services/api() {\n    rm -f \"$WORKSPACE/services/api/git-commit.version\"\n    cd \"$WORKSPACE/services/api\" \\\n        && eval env RAILS_ENV=test ${short:+RAILS_TEST_SHORT=1} \"$BUNDLE\" exec rake test TESTOPTS=\\'-v -d\\' ${testargs[services/api]}\n}\n\ntest_sdk/ruby() {\n    cd \"$WORKSPACE/sdk/ruby\" \\\n        && \"$BUNDLE\" exec rake test TESTOPTS=-v ${testargs[sdk/ruby]}\n}\n\ntest_sdk/ruby-google-api-client() {\n    echo \"*** note \\`test sdk/ruby-google-api-client\\` does not actually run any tests, see https://dev.arvados.org/issues/20993 ***\"\n    true\n}\n\ntest_contrib/R-sdk() {\n  if [[ \"$NEED_SDK_R\" = true ]]; then\n    env -C \"$WORKSPACE/contrib/R-sdk\" make test\n  fi\n}\n\ntest_sdk/cli() {\n    cd \"$WORKSPACE/sdk/cli\" \\\n        && mkdir -p /tmp/keep \\\n        && KEEP_LOCAL_STORE=/tmp/keep \"$BUNDLE\" exec rake test TESTOPTS=-v ${testargs[sdk/cli]}\n}\n\ntest_contrib/java-sdk-v2() {\n    env -C \"$WORKSPACE/contrib/java-sdk-v2\" gradle test ${testargs[contrib/java-sdk-v2]}\n}\n\ntest_services/login-sync() {\n    cd \"$WORKSPACE/services/login-sync\" \\\n        && \"$BUNDLE\" exec rake test TESTOPTS=-v ${testargs[services/login-sync]}\n}\n\ntest_services/workbench2_units() {\n    cd \"$WORKSPACE/services/workbench2\" && make unit-tests ARVADOS_DIRECTORY=\"${WORKSPACE}\" WORKSPACE=\"$(pwd)\" ${testargs[services/workbench2]}\n}\n\ntest_services/workbench2_integration() {\n    INTERACTIVE=\n    FAIL_FAST_ENABLED=false\n    if [[ -n ${interactive} ]] && [[ -n ${DISPLAY} ]]; then\n\tINTERACTIVE=-i\n\tFAIL_FAST_ENABLED=true\n    fi\n    cd \"$WORKSPACE/services/workbench2\" && make integration-tests ARVADOS_DIRECTORY=\"${WORKSPACE}\" \\\n\t\t\t\t\t\tWORKSPACE=\"$(pwd)\" \\\n\t\t\t\t\t\tINTERACTIVE=$INTERACTIVE \\\n\t\t\t\t\t\tCYPRESS_FAIL_FAST_ENABLED=$FAIL_FAST_ENABLED \\\n\t\t\t\t\t\t${testargs[services/workbench2]}\n}\n\ninstall_deps() {\n    # Install parts needed by test suites\n    do_install env\n    # Many other components rely on PySDK's run_test_server.py, which relies on\n    # the SDK itself, so install that first.\n    do_install sdk/python pip\n    # lib/controller integration tests depend on arv-mount to run containers.\n    do_install services/fuse pip\n    # sdk/cwl depends on crunch-dispatch-local and crunchstat-summary.\n    do_install services/crunch-dispatch-local go\n    do_install tools/crunchstat-summary pip\n    do_install cmd/arvados-server go\n    do_install sdk/ruby-google-api-client\n    do_install sdk/ruby\n    do_install sdk/cli\n    do_install services/api\n    do_install services/keepproxy go\n    do_install services/keep-web go\n}\n\ninstall_all() {\n    do_install env\n    do_install doc\n    do_install sdk/ruby-google-api-client\n    do_install sdk/ruby\n    do_install contrib/R-sdk\n    do_install sdk/cli\n    do_install services/login-sync\n    local pkg_dir\n    if [[ -z ${skip[python3]} ]]; then\n        for pkg_dir in \"${pythonstuff[@]}\"\n        do\n            do_install \"$pkg_dir\" pip\n        done\n    fi\n    for pkg_dir in \"${gostuff[@]}\"\n    do\n        do_install \"$pkg_dir\" go\n    done\n    do_install services/api\n    do_install services/workbench2\n}\n\ntest_all() {\n    stop_services\n    do_test services/api\n    do_test gofmt\n    do_test arvados_version.py\n    do_test doc\n    do_test sdk/ruby-google-api-client\n    do_test sdk/ruby\n    do_test contrib/R-sdk\n    do_test sdk/cli\n    do_test services/login-sync\n    do_test contrib/java-sdk-v2\n    local pkg_dir\n    if [[ -z ${skip[python3]} ]]; then\n        for pkg_dir in \"${pythonstuff[@]}\"\n        do\n            do_test \"$pkg_dir\" pip\n        done\n    fi\n    for pkg_dir in \"${gostuff[@]}\"\n    do\n        do_test \"$pkg_dir\" go\n    done\n    do_test services/workbench2_units\n    do_test services/workbench2_integration\n}\n\ntest_go() {\n    do_test gofmt\n    for g in \"${gostuff[@]}\"\n    do\n        do_test \"$g\" go\n    done\n}\n\nhelp_interactive() {\n    echo \"== Interactive commands:\"\n    echo \"TARGET                   (short for 'test DIR')\"\n    echo \"test TARGET\"\n    echo \"10 test TARGET           (run test 10 times)\"\n    echo \"test TARGET -check.vv    (pass arguments to test)\"\n    echo \"install TARGET\"\n    echo \"install env              (go/python libs)\"\n    echo \"install deps             (go/python libs + arvados components needed for integration tests)\"\n    echo \"bundle ...               (run arbitrary bundler command)\"\n    echo \"migrate                  (run outstanding migrations)\"\n    echo \"migrate rollback         (revert most recent migration)\"\n    echo \"migrate <dir> VERSION=n  (revert and/or run a single migration; <dir> is up|down|redo)\"\n    echo \"reset                    (...services used by integration tests)\"\n    echo \"exit\"\n    echo \"== Test targets:\"\n    printf \"%s\\n\" \"${!testfuncargs[@]}\" | sort | column\n}\n\ndeclare -a failures\ndeclare -A skip\ndeclare -A only\ndeclare -A testargs\n\ndeclare -a pythonstuff\npythonstuff=(\n    # The ordering of sdk/python, tools/crunchstat-summary, and\n    # sdk/cwl here is significant. See\n    # https://dev.arvados.org/issues/19744#note-26\n    sdk/python\n    tools/crunchstat-summary\n    sdk/cwl\n    services/dockercleaner\n    services/fuse\n    tools/cluster-activity\n)\n\ndeclare -a gostuff\nif [[ -n \"$WORKSPACE\" ]]; then\n    readarray -d \"\" -t gostuff < <(\n        git -C \"$WORKSPACE\" ls-files -z |\n            grep -z '\\.go$' |\n            xargs -0r dirname -z |\n            sort -zu\n    )\nfi\n\ndeclare -A testfuncargs=()\nfor testfuncname in $(declare -F | awk '\n($3 ~ /^test_/ && $3 !~ /_package_presence$/) {\n  print substr($3, 6);\n}\n'); do\n    testfuncargs[$testfuncname]=\"$testfuncname\"\ndone\nfor g in \"${gostuff[@]}\"; do\n    testfuncargs[$g]=\"$g go\"\ndone\nfor p in \"${pythonstuff[@]}\"; do\n    testfuncargs[$p]=\"$p pip\"\ndone\n\nwhile [[ -n \"$1\" ]]\ndo\n    arg=\"$1\"; shift\n    case \"$arg\" in\n        --help)\n            exec 1>&2\n            echo \"$helpmessage\"\n            if [[ ${#gostuff} -gt 0 ]]; then\n                printf \"\\nAvailable targets:\\n\\n\"\n                printf \"%s\\n\" \"${!testfuncargs[@]}\" | sort | column\n            fi\n            exit 1\n            ;;\n        --skip)\n            skip[\"${1%:py3}\"]=1; shift\n            ;;\n        --only)\n            only[\"${1%:py3}\"]=1; skip[\"${1%:py3}\"]=\"\"; shift\n            ;;\n        --short)\n            short=1\n            ;;\n        --interactive)\n            interactive=1\n            ;;\n        --skip-install)\n            skip[install]=1\n            ;;\n        --only-install)\n            only_install=\"$1\"; shift\n            ;;\n        --temp)\n            temp=\"$1\"; shift\n            temp_preserve=1\n            ;;\n        --leave-temp)\n            temp_preserve=1\n            ;;\n        --repeat)\n            repeat=$((${1}+0)); shift\n            ;;\n        --retry)\n            retry=1\n            ;;\n        *_test=*)\n            suite=\"${arg%%_test=*}\"\n            args=\"${arg#*=}\"\n            testargs[\"${suite%:py3}\"]=\"$args\"\n            ;;\n        ARVADOS_*=*)\n            eval export $(echo $arg | cut -d= -f1)=\\\"$(echo $arg | cut -d= -f2-)\\\"\n            ;;\n        *)\n            echo >&2 \"$0: Unrecognized option: '$arg'. Try: $0 --help\"\n            exit 1\n            ;;\n    esac\ndone\n\n# R SDK installation is very slow (~360s in a clean environment) and only\n# required when testing it. Skip that step if it is not needed.\nNEED_SDK_R=true\n\nif [[ ${#only[@]} -ne 0 ]] &&\n   [[ -z \"${only['contrib/R-sdk']}\" && -z \"${only['doc']}\" ]]; then\n  NEED_SDK_R=false\nfi\n\nif [[ ${skip[\"contrib/R-sdk\"]} == 1 && ${skip[\"doc\"]} == 1 ]]; then\n  NEED_SDK_R=false\nfi\n\nif [[ $NEED_SDK_R == false ]]; then\n        echo \"R SDK not needed, it will not be installed.\"\nfi\n\ninitialize\nif [[ -z ${interactive} ]]; then\n    install_all\n    test_all\nelse\n    skip=()\n    only=()\n    only_install=\"\"\n    stop_services\n    setnextcmd() {\n        if [[ \"$TERM\" = dumb ]]; then\n            # assume emacs, or something, is offering a history buffer\n            # and pre-populating the command will only cause trouble\n            nextcmd=\n        elif [[ ! -e \"$GOPATH/bin/arvados-server\" ]]; then\n            nextcmd=\"install deps\"\n        else\n            nextcmd=\"\"\n        fi\n    }\n    echo\n    help_interactive\n    setnextcmd\n    HISTFILE=\"$WORKSPACE/tmp/.history\"\n    history -r\n    ignore_sigint=1\n    while read -p 'What next? ' -e -i \"$nextcmd\" nextcmd; do\n        history -s \"$nextcmd\"\n        history -w\n        count=1\n        if [[ \"${nextcmd}\" =~ ^[0-9] ]]; then\n          read count nextcmd <<<\"${nextcmd}\"\n        fi\n        read verb target opts <<<\"${nextcmd}\"\n        target=\"${target%/}\"\n        target=\"${target/\\/:/:}\"\n        # Remove old Python version suffix for backwards compatibility\n        target=\"${target%:py3}\"\n        case \"${verb}\" in\n            \"exit\" | \"quit\")\n                exit_cleanly\n                ;;\n            \"reset\")\n                stop_services\n                ;;\n            \"migrate\")\n                do_migrate ${target} ${opts}\n                ;;\n            \"bundle\")\n                do_bundle ${target} ${opts}\n                ;;\n            \"test\" | \"install\")\n                case \"$target\" in\n                    \"\")\n                        help_interactive\n                        ;;\n                    all | deps)\n                        ${verb}_${target}\n                        ;;\n                    *)\n                        testargs[\"$target\"]=\"${opts}\"\n                        while [ $count -gt 0 ]; do\n                          do_$verb ${testfuncargs[${target}]}\n                          let \"count=count-1\"\n                        done\n                        ;;\n                esac\n                ;;\n            \"\" | \"help\" | *)\n                help_interactive\n                ;;\n        esac\n        if [[ ${#successes[@]} -gt 0 || ${#failures[@]} -gt 0 ]]; then\n            report_outcomes\n            successes=()\n            failures=()\n        fi\n        cd \"$WORKSPACE\"\n        setnextcmd\n    done\n    echo\nfi\nexit_cleanly\n"
  },
  {
    "path": "build/version-at-commit.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e -o pipefail\ncommit=\"$1\"\ndevsuffix=\"~dev\"\n\n# automatically assign *development* version\n#\n# handles the following cases:\n#\n# *  commit is on main or a development branch, the nearest tag is older\n#    than commit where this branch joins main.\n#    -> take greatest version tag in repo X.Y.Z and assign X.(Y+1).0\n#\n# *  commit is on a release branch, the nearest tag is newer\n#    than the commit where this branch joins main.\n#    -> take nearest tag X.Y.Z and assign X.Y.(Z+1)\n\n# X.Y.Z releases where Z=0 are called major\n# releases and X.Y.Z releases where Z>1 are called point releases.\n#\n# The development process distinction is that X.Y.0 releases are\n# branched from main and then subsequent X.Y.Z releases cherry-pick\n# individual features from main onto the \"X.Y-staging\" branch.\n#\n# In semantic versioning terminology an \"X.Y.0\" release which only\n# increments Y is called a \"minor\" release but typically these\n# releases have significant changes that calling them \"minor\" in\n# communications with users feels misleading.\n#\n# Incrementing X is reserved for times when a release has significant\n# backwards-incompatible changes, which we don't do very often and try\n# to avoid.\n#\n# In order to assign a useful development version, we need to\n# determine if we're on the main branch (or a development branch off\n# main) or on a release branch.  We do this by looking at the point\n# where the current commit history branched from main.\n#\n# If the tag for a new X+1 version appears on a release branch and not\n# directly in the history of main, the merge-base between main and the\n# release should be tagged as \"development-X.Y.Z\" so that\n# version-at-commit understands what version to assign to subsequent\n# commits on main.  It is also helpful to assign development-X.Y.Z\n# tags to make git-describe provide better version strings.\n\n# 1. get the nearest tag with 'git describe'\n# 2. get the merge base between this commit and main\n# 3. if the tag is an ancestor of the merge base,\n#    (tag is older than merge base) increment minor version\n#    else, tag is newer than merge base, so increment point version\n\nnearest_tag=$(git describe --abbrev=0 \"$commit\")\n# We must use a remote branch here because Jenkins CI checkouts usually only\n# have the current work branch ref (and not even that if we're working by\n# commit hash). As of June 2025 everything uses origin, so,\nmerge_base=$(git merge-base origin/main \"$commit\")\n\nif git merge-base --is-ancestor \"$nearest_tag\" \"$merge_base\" ; then\n    # the nearest tag appears before the merge base with main (the\n    # branch point), so assume this is a tag for the previous major\n    # release (or a tag with the \"development-\" prefix indicating the\n    # point where a major release branched off).  Subsequent\n    # development versions are given the anticipated version for the\n    # next major release.\n    #\n    # x.(y+1).0~devTIMESTAMP, where x.y.z is the newest version that does not contain $commit\n    # grep reads the list of tags (-f) that contain $commit and filters them out (-v)\n    # this prevents a newer tag from retroactively changing the versions of everything before it\n    v=$(git tag |\n            grep -vFf <(git tag --contains \"$merge_base\") |\n            sed -e 's/^development-//' |\n            sort --version-sort |\n            awk '\nBEGIN { FS=\".\"; OFS=\".\"; }\nEND { print $1, $2+1, 0; }\n')\nelse\n    # the nearest tag comes after the merge base with main (the branch\n    # point).  Assume this means this is a point release branch,\n    # following a major release.\n    #\n    # x.y.(z+1)~devTIMESTAMP, where x.y.z is the latest released ancestor of $commit\n    v=$(awk '\nBEGIN { FS=\".\"; OFS=\".\"; }\n{ print $1, $2, $3+1; exit; }\n' <<EOF\n${nearest_tag#development-}\nEOF\n        )\nfi\n\nisodate=$(TZ=UTC git log -n1 --format=%cd --date=iso \"$commit\")\nts=$(TZ=UTC date --date=\"$isodate\" \"+%Y%m%d%H%M%S\")\necho \"${v}${devsuffix}${ts}\"\n"
  },
  {
    "path": "cc-by-sa-3.0.txt",
    "content": "\nAttribution-ShareAlike 3.0 Unported\n\n    CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN \"AS-IS\" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM ITS USE.\n\nLicense\n\nTHE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE (\"CCPL\" OR \"LICENSE\"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.\n\nBY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.\n\n1. Definitions\n\n    \"Adaptation\" means a work based upon the Work, or upon the Work and other pre-existing works, such as a translation, adaptation, derivative work, arrangement of music or other alterations of a literary or artistic work, or phonogram or performance and includes cinematographic adaptations or any other form in which the Work may be recast, transformed, or adapted including in any form recognizably derived from the original, except that a work that constitutes a Collection will not be considered an Adaptation for the purpose of this License. For the avoidance of doubt, where the Work is a musical work, performance or phonogram, the synchronization of the Work in timed-relation with a moving image (\"synching\") will be considered an Adaptation for the purpose of this License.\n    \"Collection\" means a collection of literary or artistic works, such as encyclopedias and anthologies, or performances, phonograms or broadcasts, or other works or subject matter other than works listed in Section 1(f) below, which, by reason of the selection and arrangement of their contents, constitute intellectual creations, in which the Work is included in its entirety in unmodified form along with one or more other contributions, each constituting separate and independent works in themselves, which together are assembled into a collective whole. A work that constitutes a Collection will not be considered an Adaptation (as defined below) for the purposes of this License.\n    \"Creative Commons Compatible License\" means a license that is listed at https://creativecommons.org/compatiblelicenses that has been approved by Creative Commons as being essentially equivalent to this License, including, at a minimum, because that license: (i) contains terms that have the same purpose, meaning and effect as the License Elements of this License; and, (ii) explicitly permits the relicensing of adaptations of works made available under that license under this License or a Creative Commons jurisdiction license with the same License Elements as this License.\n    \"Distribute\" means to make available to the public the original and copies of the Work or Adaptation, as appropriate, through sale or other transfer of ownership.\n    \"License Elements\" means the following high-level license attributes as selected by Licensor and indicated in the title of this License: Attribution, ShareAlike.\n    \"Licensor\" means the individual, individuals, entity or entities that offer(s) the Work under the terms of this License.\n    \"Original Author\" means, in the case of a literary or artistic work, the individual, individuals, entity or entities who created the Work or if no individual or entity can be identified, the publisher; and in addition (i) in the case of a performance the actors, singers, musicians, dancers, and other persons who act, sing, deliver, declaim, play in, interpret or otherwise perform literary or artistic works or expressions of folklore; (ii) in the case of a phonogram the producer being the person or legal entity who first fixes the sounds of a performance or other sounds; and, (iii) in the case of broadcasts, the organization that transmits the broadcast.\n    \"Work\" means the literary and/or artistic work offered under the terms of this License including without limitation any production in the literary, scientific and artistic domain, whatever may be the mode or form of its expression including digital form, such as a book, pamphlet and other writing; a lecture, address, sermon or other work of the same nature; a dramatic or dramatico-musical work; a choreographic work or entertainment in dumb show; a musical composition with or without words; a cinematographic work to which are assimilated works expressed by a process analogous to cinematography; a work of drawing, painting, architecture, sculpture, engraving or lithography; a photographic work to which are assimilated works expressed by a process analogous to photography; a work of applied art; an illustration, map, plan, sketch or three-dimensional work relative to geography, topography, architecture or science; a performance; a broadcast; a phonogram; a compilation of data to the extent it is protected as a copyrightable work; or a work performed by a variety or circus performer to the extent it is not otherwise considered a literary or artistic work.\n    \"You\" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation.\n    \"Publicly Perform\" means to perform public recitations of the Work and to communicate to the public those public recitations, by any means or process, including by wire or wireless means or public digital performances; to make available to the public Works in such a way that members of the public may access these Works from a place and at a place individually chosen by them; to perform the Work to the public by any means or process and the communication to the public of the performances of the Work, including by public digital performance; to broadcast and rebroadcast the Work by any means including signs, sounds or images.\n    \"Reproduce\" means to make copies of the Work by any means including without limitation by sound or visual recordings and the right of fixation and reproducing fixations of the Work, including storage of a protected performance or phonogram in digital form or other electronic medium.\n\n2. Fair Dealing Rights. Nothing in this License is intended to reduce, limit, or restrict any uses free from copyright or rights arising from limitations or exceptions that are provided for in connection with the copyright protection under copyright law or other applicable laws.\n\n3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below:\n\n    to Reproduce the Work, to incorporate the Work into one or more Collections, and to Reproduce the Work as incorporated in the Collections;\n    to create and Reproduce Adaptations provided that any such Adaptation, including any translation in any medium, takes reasonable steps to clearly label, demarcate or otherwise identify that changes were made to the original Work. For example, a translation could be marked \"The original work was translated from English to Spanish,\" or a modification could indicate \"The original work has been modified.\";\n    to Distribute and Publicly Perform the Work including as incorporated in Collections; and,\n    to Distribute and Publicly Perform Adaptations.\n\n    For the avoidance of doubt:\n        Non-waivable Compulsory License Schemes. In those jurisdictions in which the right to collect royalties through any statutory or compulsory licensing scheme cannot be waived, the Licensor reserves the exclusive right to collect such royalties for any exercise by You of the rights granted under this License;\n        Waivable Compulsory License Schemes. In those jurisdictions in which the right to collect royalties through any statutory or compulsory licensing scheme can be waived, the Licensor waives the exclusive right to collect such royalties for any exercise by You of the rights granted under this License; and,\n        Voluntary License Schemes. The Licensor waives the right to collect royalties, whether individually or, in the event that the Licensor is a member of a collecting society that administers voluntary licensing schemes, via that society, from any exercise by You of the rights granted under this License.\n\nThe above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. Subject to Section 8(f), all rights not expressly granted by Licensor are hereby reserved.\n\n4. Restrictions. The license granted in Section 3 above is expressly made subject to and limited by the following restrictions:\n\n    You may Distribute or Publicly Perform the Work only under the terms of this License. You must include a copy of, or the Uniform Resource Identifier (URI) for, this License with every copy of the Work You Distribute or Publicly Perform. You may not offer or impose any terms on the Work that restrict the terms of this License or the ability of the recipient of the Work to exercise the rights granted to that recipient under the terms of the License. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties with every copy of the Work You Distribute or Publicly Perform. When You Distribute or Publicly Perform the Work, You may not impose any effective technological measures on the Work that restrict the ability of a recipient of the Work from You to exercise the rights granted to that recipient under the terms of the License. This Section 4(a) applies to the Work as incorporated in a Collection, but this does not require the Collection apart from the Work itself to be made subject to the terms of this License. If You create a Collection, upon notice from any Licensor You must, to the extent practicable, remove from the Collection any credit as required by Section 4(c), as requested. If You create an Adaptation, upon notice from any Licensor You must, to the extent practicable, remove from the Adaptation any credit as required by Section 4(c), as requested.\n    You may Distribute or Publicly Perform an Adaptation only under the terms of: (i) this License; (ii) a later version of this License with the same License Elements as this License; (iii) a Creative Commons jurisdiction license (either this or a later license version) that contains the same License Elements as this License (e.g., Attribution-ShareAlike 3.0 US)); (iv) a Creative Commons Compatible License. If you license the Adaptation under one of the licenses mentioned in (iv), you must comply with the terms of that license. If you license the Adaptation under the terms of any of the licenses mentioned in (i), (ii) or (iii) (the \"Applicable License\"), you must comply with the terms of the Applicable License generally and the following provisions: (I) You must include a copy of, or the URI for, the Applicable License with every copy of each Adaptation You Distribute or Publicly Perform; (II) You may not offer or impose any terms on the Adaptation that restrict the terms of the Applicable License or the ability of the recipient of the Adaptation to exercise the rights granted to that recipient under the terms of the Applicable License; (III) You must keep intact all notices that refer to the Applicable License and to the disclaimer of warranties with every copy of the Work as included in the Adaptation You Distribute or Publicly Perform; (IV) when You Distribute or Publicly Perform the Adaptation, You may not impose any effective technological measures on the Adaptation that restrict the ability of a recipient of the Adaptation from You to exercise the rights granted to that recipient under the terms of the Applicable License. This Section 4(b) applies to the Adaptation as incorporated in a Collection, but this does not require the Collection apart from the Adaptation itself to be made subject to the terms of the Applicable License.\n    If You Distribute, or Publicly Perform the Work or any Adaptations or Collections, You must, unless a request has been made pursuant to Section 4(a), keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or if the Original Author and/or Licensor designate another party or parties (e.g., a sponsor institute, publishing entity, journal) for attribution (\"Attribution Parties\") in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; (ii) the title of the Work if supplied; (iii) to the extent reasonably practicable, the URI, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and (iv) , consistent with Ssection 3(b), in the case of an Adaptation, a credit identifying the use of the Work in the Adaptation (e.g., \"French translation of the Work by Original Author,\" or \"Screenplay based on original Work by Original Author\"). The credit required by this Section 4(c) may be implemented in any reasonable manner; provided, however, that in the case of a Adaptation or Collection, at a minimum such credit will appear, if a credit for all contributing authors of the Adaptation or Collection appears, then as part of these credits and in a manner at least as prominent as the credits for the other contributing authors. For the avoidance of doubt, You may only use the credit required by this Section for the purpose of attribution in the manner set out above and, by exercising Your rights under this License, You may not implicitly or explicitly assert or imply any connection with, sponsorship or endorsement by the Original Author, Licensor and/or Attribution Parties, as appropriate, of You or Your use of the Work, without the separate, express prior written permission of the Original Author, Licensor and/or Attribution Parties.\n    Except as otherwise agreed in writing by the Licensor or as may be otherwise permitted by applicable law, if You Reproduce, Distribute or Publicly Perform the Work either by itself or as part of any Adaptations or Collections, You must not distort, mutilate, modify or take other derogatory action in relation to the Work which would be prejudicial to the Original Author's honor or reputation. Licensor agrees that in those jurisdictions (e.g. Japan), in which any exercise of the right granted in Section 3(b) of this License (the right to make Adaptations) would be deemed to be a distortion, mutilation, modification or other derogatory action prejudicial to the Original Author's honor and reputation, the Licensor will waive or not assert, as appropriate, this Section, to the fullest extent permitted by the applicable national law, to enable You to reasonably exercise Your right under Section 3(b) of this License (right to make Adaptations) but not otherwise.\n\n5. Representations, Warranties and Disclaimer\n\nUNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.\n\n6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.\n\n7. Termination\n\n    This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Adaptations or Collections from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License.\n    Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above.\n\n8. Miscellaneous\n\n    Each time You Distribute or Publicly Perform the Work or a Collection, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License.\n    Each time You Distribute or Publicly Perform an Adaptation, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License.\n    If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.\n    No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent.\n    This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You.\n    The rights granted under, and the subject matter referenced, in this License were drafted utilizing the terminology of the Berne Convention for the Protection of Literary and Artistic Works (as amended on September 28, 1979), the Rome Convention of 1961, the WIPO Copyright Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 and the Universal Copyright Convention (as revised on July 24, 1971). These rights and subject matter take effect in the relevant jurisdiction in which the License terms are sought to be enforced according to the corresponding provisions of the implementation of those treaty provisions in the applicable national law. If the standard suite of rights granted under applicable copyright law includes additional rights not granted under this License, such additional rights are deemed to be included in the License; this License is not intended to restrict the license of any rights under applicable law.\n\n    Creative Commons Notice\n\n    Creative Commons is not a party to this License, and makes no warranty whatsoever in connection with the Work. Creative Commons will not be liable to You or any party on any legal theory for any damages whatsoever, including without limitation any general, special, incidental or consequential damages arising in connection to this license. Notwithstanding the foregoing two (2) sentences, if Creative Commons has expressly identified itself as the Licensor hereunder, it shall have all rights and obligations of Licensor.\n\n    Except for the limited purpose of indicating to the public that the Work is licensed under the CCPL, Creative Commons does not authorize the use by either party of the trademark \"Creative Commons\" or any related trademark or logo of Creative Commons without the prior written consent of Creative Commons. Any permitted use will be in compliance with Creative Commons' then-current trademark usage guidelines, as may be published on its website or otherwise made available upon request from time to time. For the avoidance of doubt, this trademark restriction does not form part of the License.\n\n    Creative Commons may be contacted at https://creativecommons.org/.\n"
  },
  {
    "path": "cmd/arvados-server/arvados-controller.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=Arvados controller\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nAssertPathExists=/etc/arvados/config.yml\nStartLimitIntervalSec=0\n\n[Service]\nType=notify\nEnvironmentFile=-/etc/arvados/environment\nExecStart=/usr/bin/arvados-controller $EXTRA_OPTS\nRestart=always\nRestartSec=1\nRestartPreventExitStatus=2\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "cmd/arvados-server/arvados-dispatch-cloud.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=arvados-dispatch-cloud\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nAssertPathExists=/etc/arvados/config.yml\nStartLimitIntervalSec=0\n\n[Service]\nType=notify\nEnvironmentFile=-/etc/arvados/environment\nExecStart=/usr/bin/arvados-dispatch-cloud $EXTRA_OPTS\nRestart=always\nRestartSec=1\nRestartPreventExitStatus=2\n\n# Before Arvados 3.1, arvados-dispatch-cloud did not try to read the\n# configuration at ~/.aws. Now it can, but we disable that functionality\n# here to avoid disrupting existing clusters which might have credentials\n# for another service there. Administrators who do want to read the\n# configuration at ~/.aws can unset these environment variables in an\n# override.\nEnvironment=AWS_CONFIG_FILE=/dev/null\nEnvironment=AWS_SHARED_CREDENTIALS_FILE=/dev/null\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "cmd/arvados-server/arvados-dispatch-lsf.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=arvados-dispatch-lsf\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nAssertPathExists=/etc/arvados/config.yml\nStartLimitIntervalSec=0\n\n[Service]\nType=notify\nEnvironmentFile=-/etc/arvados/environment\nExecStart=/usr/bin/arvados-dispatch-lsf $EXTRA_OPTS\nRestart=always\nRestartSec=1\nRestartPreventExitStatus=2\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "cmd/arvados-server/arvados-health.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=Arvados healthcheck server\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nAssertPathExists=/etc/arvados/config.yml\nStartLimitIntervalSec=0\n\n[Service]\nType=notify\nEnvironmentFile=-/etc/arvados/environment\nExecStart=/usr/bin/arvados-health $EXTRA_OPTS\nRestart=always\nRestartSec=1\nRestartPreventExitStatus=2\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "cmd/arvados-server/arvados-ws.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=Arvados websocket server\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nAssertPathExists=/etc/arvados/config.yml\nStartLimitIntervalSec=0\n\n[Service]\nType=notify\nExecStart=/usr/bin/arvados-ws $EXTRA_OPTS\nRestart=always\nRestartSec=1\nRestartPreventExitStatus=2\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "cmd/arvados-server/cmd.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage main\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/boot\"\n\t\"git.arvados.org/arvados.git/lib/cloud/cloudtest\"\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/controller\"\n\t\"git.arvados.org/arvados.git/lib/crunchrun\"\n\t\"git.arvados.org/arvados.git/lib/crunchstat\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud\"\n\t\"git.arvados.org/arvados.git/lib/lsf\"\n\t\"git.arvados.org/arvados.git/lib/recovercollection\"\n\t\"git.arvados.org/arvados.git/lib/service\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/health\"\n\tdispatchslurm \"git.arvados.org/arvados.git/services/crunch-dispatch-slurm\"\n\tkeepbalance \"git.arvados.org/arvados.git/services/keep-balance\"\n\tkeepweb \"git.arvados.org/arvados.git/services/keep-web\"\n\t\"git.arvados.org/arvados.git/services/keepproxy\"\n\t\"git.arvados.org/arvados.git/services/keepstore\"\n\t\"git.arvados.org/arvados.git/services/ws\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar (\n\thandler = cmd.Multi(map[string]cmd.Handler{\n\t\t\"version\":   cmd.Version,\n\t\t\"-version\":  cmd.Version,\n\t\t\"--version\": cmd.Version,\n\n\t\t\"boot\":               boot.Command,\n\t\t\"check\":              health.CheckCommand,\n\t\t\"cloudtest\":          cloudtest.Command,\n\t\t\"config-check\":       config.CheckCommand,\n\t\t\"config-defaults\":    config.DumpDefaultsCommand,\n\t\t\"config-dump\":        config.DumpCommand,\n\t\t\"controller\":         controller.Command,\n\t\t\"crunch-run\":         crunchrun.Command,\n\t\t\"crunchstat\":         crunchstat.Command,\n\t\t\"dispatch-cloud\":     dispatchcloud.Command,\n\t\t\"dispatch-lsf\":       lsf.DispatchCommand,\n\t\t\"dispatch-slurm\":     dispatchslurm.Command,\n\t\t\"health\":             healthCommand,\n\t\t\"instance\":           dispatchcloud.InstanceCommand,\n\t\t\"keep-balance\":       keepbalance.Command,\n\t\t\"keep-web\":           keepweb.Command,\n\t\t\"keepproxy\":          keepproxy.Command,\n\t\t\"keepstore\":          keepstore.Command,\n\t\t\"recover-collection\": recovercollection.Command,\n\t\t\"workbench2\":         wb2command{},\n\t\t\"ws\":                 ws.Command,\n\t})\n)\n\nfunc main() {\n\tos.Exit(handler.RunCommand(os.Args[0], os.Args[1:], os.Stdin, os.Stdout, os.Stderr))\n}\n\ntype wb2command struct{}\n\nfunc (wb2command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tif len(args) != 3 {\n\t\tfmt.Fprintf(stderr, \"usage: %s api-host listen-addr app-dir\\n\", prog)\n\t\treturn 1\n\t}\n\tconfigJSON, err := json.Marshal(map[string]string{\"API_HOST\": args[0]})\n\tif err != nil {\n\t\tfmt.Fprintf(stderr, \"json.Marshal: %s\\n\", err)\n\t\treturn 1\n\t}\n\tservefs := http.FileServer(http.Dir(args[2]))\n\tmux := http.NewServeMux()\n\tmux.Handle(\"/\", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tfor _, ent := range strings.Split(req.URL.Path, \"/\") {\n\t\t\tif ent == \"..\" {\n\t\t\t\thttp.Error(w, \"invalid URL path\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tfnm := filepath.Join(args[2], filepath.FromSlash(path.Clean(\"/\"+req.URL.Path)))\n\t\tif _, err := os.Stat(fnm); os.IsNotExist(err) {\n\t\t\treq.URL.Path = \"/\"\n\t\t}\n\t\tservefs.ServeHTTP(w, req)\n\t}))\n\tmux.HandleFunc(\"/config.json\", func(w http.ResponseWriter, _ *http.Request) {\n\t\tw.Write(configJSON)\n\t})\n\tmux.HandleFunc(\"/_health/ping\", func(w http.ResponseWriter, _ *http.Request) {\n\t\tio.WriteString(w, `{\"health\":\"OK\"}`)\n\t})\n\terr = http.ListenAndServe(args[1], mux)\n\tif err != nil {\n\t\tfmt.Fprintln(stderr, err.Error())\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nvar healthCommand cmd.Handler = service.Command(arvados.ServiceNameHealth, func(ctx context.Context, cluster *arvados.Cluster, _ string, reg *prometheus.Registry) service.Handler {\n\tmClockSkew := prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"health\",\n\t\tName:      \"clock_skew_seconds\",\n\t\tHelp:      \"Clock skew observed in most recent health check\",\n\t})\n\treg.MustRegister(mClockSkew)\n\treturn &health.Aggregator{\n\t\tCluster:         cluster,\n\t\tMetricClockSkew: mClockSkew,\n\t}\n})\n"
  },
  {
    "path": "cmd/arvados-server/crunch-dispatch-slurm.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=Arvados Crunch Dispatcher for SLURM\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nAssertPathExists=/etc/arvados/config.yml\nStartLimitIntervalSec=0\n\n[Service]\nType=notify\nEnvironmentFile=-/etc/arvados/environment\nExecStart=/usr/bin/crunch-dispatch-slurm $EXTRA_OPTS\nRestart=always\nRestartSec=1\nRestartPreventExitStatus=2\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "cmd/arvados-server/keep-balance.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=Arvados Keep Balance\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nAssertPathExists=/etc/arvados/config.yml\nStartLimitIntervalSec=0\n\n[Service]\nType=notify\nEnvironmentFile=-/etc/arvados/environment\n# GOGC=50 reduces the Go runtime's default 100% garbage collection\n# threshold to 50%.  This reduces keep-balance's peak memory usage and\n# makes it run a bit slower.  See\n# https://dev.arvados.org/issues/23235#note-16.\nEnvironment=GOGC=50\nExecStart=/usr/bin/keep-balance $EXTRA_OPTS\nRestart=always\nRestartSec=10s\nNice=19\nRestartPreventExitStatus=2\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "cmd/arvados-server/keep-web.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=Arvados Keep WebDAV and S3 gateway\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nAssertPathExists=/etc/arvados/config.yml\nStartLimitIntervalSec=0\n\n[Service]\nType=notify\nEnvironmentFile=-/etc/arvados/environment\nExecStart=/usr/bin/keep-web $EXTRA_OPTS\nRestart=always\nRestartSec=1\nRestartPreventExitStatus=2\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "cmd/arvados-server/keepproxy.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=Arvados Keep Proxy\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nAssertPathExists=/etc/arvados/config.yml\nStartLimitIntervalSec=0\n\n[Service]\nType=notify\nEnvironmentFile=-/etc/arvados/environment\nExecStart=/usr/bin/keepproxy $EXTRA_OPTS\nRestart=always\nRestartSec=1\nRestartPreventExitStatus=2\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "cmd/arvados-server/keepstore.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=Arvados Keep Storage Daemon\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nAssertPathExists=/etc/arvados/config.yml\nStartLimitIntervalSec=0\n\n[Service]\n# trigger Go garbage collection when the ratio of freshly allocated data to live data\n# remaining after the previous collection reaches 10% rather than the default 100%, so\n# that Keepstore's memory use is tightly coupled to the number of buffers it is\n# configured to use.\nEnvironment=GOGC=10\nType=notify\nExecStart=/usr/bin/keepstore $EXTRA_OPTS\nRestart=always\nRestartSec=1\nRestartPreventExitStatus=2\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "contrib/R-sdk/.Rbuildignore",
    "content": "^.*\\.Rproj$\n^\\.Rproj\\.user$\n^docs$\n^pkgdown$\n"
  },
  {
    "path": "contrib/R-sdk/.gitignore",
    "content": "ArvadosR_*.tar.*\nman/\nR/Arvados.R\n"
  },
  {
    "path": "contrib/R-sdk/ArvadosR.Rproj",
    "content": "Version: 1.0\n\nRestoreWorkspace: Default\nSaveWorkspace: Default\nAlwaysSaveHistory: Default\n\nEnableCodeIndexing: Yes\nUseSpacesForTab: Yes\nNumSpacesForTab: 4\nEncoding: UTF-8\n\nRnwWeave: Sweave\nLaTeX: pdfLaTeX\n\nAutoAppendNewline: Yes\nStripTrailingWhitespace: Yes\n\nBuildType: Package\nPackageUseDevtools: Yes\nPackageInstallArgs: --no-multiarch --with-keep.source\n"
  },
  {
    "path": "contrib/R-sdk/DESCRIPTION",
    "content": "Package: ArvadosR\nType: Package\nTitle: Arvados R SDK\nVersion: 3.2.1\nAuthors@R: c(person(\"Fuad\", \"Muhic\", role = c(\"aut\", \"ctr\"), email = \"fmuhic@capeannenterprises.com\"),\n             person(\"Peter\", \"Amstutz\", role = c(\"cre\"), email = \"peter.amstutz@curii.com\"),\n             person(\"Piotr\", \"Nowosielski\", role = c(\"aut\"), email = \"piotr.nowosielski@contractors.roche.com\"),\n             person(\"Aneta\", \"Stanczyk\", role = c(\"aut\"), email = \"aneta.stanczyk@contractors.roche.com\"),\n             person(\"Brett\", \"Smith\", role = c(\"aut\"), email = \"brett.smith@curii.com\"))\nDescription: This is the Arvados R SDK\nURL: http://doc.arvados.org\nLicense: Apache-2.0\nEncoding: UTF-8\nLazyData: true\nRoxygen: list(markdown = TRUE)\nRoxygenNote: 7.2.3\nImports:\n    R6,\n    httr,\n    stringr,\n    jsonlite,\n    curl,\n    XML\nSuggests: testthat\n"
  },
  {
    "path": "contrib/R-sdk/Makefile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# NOTE: `R CMD check` (and by extension, the Arvados test suite)\n# will carp at you if you use GNU extensions.\n# <https://cran.r-project.org/doc/manuals/R-exts.html#Writing-portable-packages>\n\nALL=R/Arvados.R man\nSDK_VERSION!=awk '($$1 == \"Version:\"){v=$$2} END {print v}' DESCRIPTION\n\nall: $(ALL)\n\n.PHONY: api\napi: R/Arvados.R\nR/Arvados.R: arvados-v1-discovery.json generateApi.R\n\tRscript --vanilla generateApi.R\n\n# Used by arvados/doc/Rakefile.\n# Check whether we can load libraries necessary to build the package.\n.PHONY: can_run\ncan_run:\n\tRscript --vanilla -e \"library(jsonlite); library(roxygen2);\"\n\n.PHONY: clean\nclean:\n\trm -rf $(ALL) \"ArvadosR_$(SDK_VERSION).tar.gz\"\n\n.PHONY: install\ninstall:\n\tR CMD INSTALL .\n\nman: R/Arvados.R R/*.R\n\tRscript --vanilla -e \"library(roxygen2); roxygen2::roxygenize(clean=TRUE)\"\n\n.PHONY: package\npackage: \"ArvadosR_$(SDK_VERSION).tar.gz\"\n\"ArvadosR_$(SDK_VERSION).tar.gz\": $(ALL) [A-Z]* *.R tests/*.R tests/testthat/*.R tests/testthat/fakes/*.R\n\tR CMD build .\n\n.PHONY: test\ntest: $(ALL)\n\tRscript --vanilla run_test.R\n"
  },
  {
    "path": "contrib/R-sdk/NAMESPACE",
    "content": "# Generated by roxygen2: do not edit by hand\n\nS3method(print,ArvadosFile)\nS3method(print,Collection)\nS3method(print,Subcollection)\nexport(Arvados)\nexport(ArvadosFile)\nexport(Collection)\nexport(Subcollection)\nexport(listAll)\n"
  },
  {
    "path": "contrib/R-sdk/R/ArvadosFile.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n#' R6 Class Representing a ArvadosFile\n#'\n#' @description\n#' ArvadosFile class represents a file inside Arvados collection.\n\n#' @export\nArvadosFile <- R6::R6Class(\n\n    \"ArvadosFile\",\n\n    public = list(\n\n        #' @description\n        #' Initialize new enviroment.\n        #' @param name Name of the new enviroment.\n        #' @return A new `ArvadosFile` object.\n        #' @examples\n        #' \\dontrun{\n        #' myFile   <- ArvadosFile$new(\"myFile\")\n        #' }\n        initialize = function(name)\n        {\n            if(name == \"\")\n                stop(\"Invalid name.\")\n\n            private$name <- name\n        },\n\n        #' @description\n        #' Returns name of the file.\n        #' @examples\n        #' \\dontrun{\n        #' arvadosFile$getName()\n        #' }\n        getName = function() private$name,\n\n        #' @description\n        #' Returns collections file content as character vector.\n        #' @param fullPath Checking if TRUE.\n        #' @examples\n        #' \\dontrun{\n        #' arvadosFile$getFileListing()\n        #' }\n        getFileListing = function(fullpath = TRUE)\n        {\n            self$getName()\n        },\n\n        #' @description\n        #' Returns collections content size in bytes.\n        #' @examples\n        #' \\dontrun{\n        #' arvadosFile$getSizeInBytes()\n        #' }\n        getSizeInBytes = function()\n        {\n            if(is.null(private$collection))\n                return(0)\n\n            REST <- private$collection$getRESTService()\n\n            fileSize <- REST$getResourceSize(self$getRelativePath(),\n                                             private$collection$uuid)\n            fileSize\n        },\n\n        get = function(fileLikeObjectName)\n        {\n            return(NULL)\n        },\n\n        getFirst = function()\n        {\n            return(NULL)\n        },\n\n        #' @description\n        #' Returns collection UUID.\n        getCollection = function() private$collection,\n\n        #' @description\n        #' Sets new collection.\n        setCollection = function(collection, setRecursively = TRUE)\n        {\n            private$collection <- collection\n        },\n\n        #' @description\n        #' Returns file path relative to the root.\n        getRelativePath = function()\n        {\n            relativePath <- c(private$name)\n            parent <- private$parent\n\n            while(!is.null(parent))\n            {\n                relativePath <- c(parent$getName(), relativePath)\n                parent <- parent$getParent()\n            }\n\n            relativePath <- relativePath[relativePath != \"\"]\n            paste0(relativePath, collapse = \"/\")\n        },\n\n        #' @description\n        #' Returns project UUID.\n        getParent = function() private$parent,\n\n        #' @description\n        #' Sets project collection.\n        setParent = function(newParent) private$parent <- newParent,\n\n        #' @description\n        #' Read file content.\n        #' @param contentType Type of content. Possible is \"text\", \"raw\".\n        #' @param offset Describes the location of a piece of data compared to another location\n        #' @param length Length of content\n        #' @examples\n        #' \\dontrun{\n        #' collection <- Collection$new(arv, collectionUUID)\n        #' arvadosFile <- collection$get(fileName)\n        #' fileContent <- arvadosFile$read(\"text\")\n        #' }\n        read = function(contentType = \"raw\", offset = 0, length = 0)\n        {\n            if(is.null(private$collection))\n                stop(\"ArvadosFile doesn't belong to any collection.\")\n\n            if(offset < 0 || length < 0)\n                stop(\"Offset and length must be positive values.\")\n\n            REST <- private$collection$getRESTService()\n\n            fileContent <- REST$read(self$getRelativePath(),\n                                     private$collection$uuid,\n                                     contentType, offset, length)\n            fileContent\n        },\n\n        #' @description\n        #' Get connection opened in \"read\" or \"write\" mode.\n        #' @param rw Type of connection.\n        #' @examples\n        #' \\dontrun{\n        #' collection <- Collection$new(arv, collectionUUID)\n        #' arvadosFile <- collection$get(fileName)\n        #' arvConnection <- arvadosFile$connection(\"w\")\n        #' }\n        connection = function(rw)\n        {\n            if (rw == \"r\" || rw == \"rb\")\n            {\n                REST <- private$collection$getRESTService()\n                return(REST$getConnection(self$getRelativePath(),\n                                          private$collection$uuid,\n                                          rw))\n            }\n            else if (rw == \"w\")\n            {\n                private$buffer <- textConnection(NULL, \"w\")\n\n                return(private$buffer)\n            }\n        },\n\n        #' @description\n        #' Write connections content to a file or override current content of the file.\n        #' @examples\n        #' \\dontrun{\n        #' collection <- Collection$new(arv, collectionUUID)\n        #' arvadosFile <- collection$get(fileName)\n        #' myFile$write(\"This is new file content\")\n        #' arvadosFile$flush()\n        #' }\n        flush = function()\n        {\n            v <- textConnectionValue(private$buffer)\n            close(private$buffer)\n            self$write(paste(v, collapse='\\n'))\n        },\n\n        #' @description\n        #' Write to file or override current content of the file.\n        #' @param content File to write.\n        #' @param contentType Type of content. Possible is \"text\", \"raw\".\n        #' @examples\n        #' \\dontrun{\n        #' collection <- Collection$new(arv, collectionUUID)\n        #' arvadosFile <- collection$get(fileName)\n        #' myFile$write(\"This is new file content\")\n        #' }\n        write = function(content, contentType = \"text/html\")\n        {\n            if(is.null(private$collection))\n                stop(\"ArvadosFile doesn't belong to any collection.\")\n\n            REST <- private$collection$getRESTService()\n\n            writeResult <- REST$write(self$getRelativePath(),\n                                      private$collection$uuid,\n                                      content, contentType)\n            writeResult\n        },\n\n        #' @description\n        #' Moves file to a new location inside collection.\n        #' @param destination Path to new folder.\n        #' @examples\n        #' \\dontrun{\n        #' arvadosFile$move(newPath)\n        #' }\n        move = function(destination)\n        {\n            if(is.null(private$collection))\n                stop(\"ArvadosFile doesn't belong to any collection.\")\n\n            destination <- trimFromEnd(destination, \"/\")\n            nameAndPath <- splitToPathAndName(destination)\n\n            newParent <- private$collection$get(nameAndPath$path)\n\n            if(is.null(newParent))\n                stop(\"Unable to get destination subcollection.\")\n\n            childWithSameName <- newParent$get(nameAndPath$name)\n\n            if(!is.null(childWithSameName))\n                stop(\"Destination already contains content with same name.\")\n\n            REST <- private$collection$getRESTService()\n            REST$move(self$getRelativePath(),\n                      paste0(newParent$getRelativePath(), \"/\", nameAndPath$name),\n                      private$collection$uuid)\n\n            private$dettachFromCurrentParent()\n            private$attachToNewParent(self, newParent)\n\n            private$parent <- newParent\n            private$name <- nameAndPath$name\n\n            self\n        },\n\n        #' @description\n        #' Copies file to a new location inside collection.\n        #' @param destination Path to new folder.\n        #' @examples\n        #' \\dontrun{\n        #' arvadosFile$copy(\"NewName.format\")\n        #' }\n        copy = function(destination)\n        {\n            if(is.null(private$collection))\n                stop(\"ArvadosFile doesn't belong to any collection.\")\n\n            destination <- trimFromEnd(destination, \"/\")\n            nameAndPath <- splitToPathAndName(destination)\n\n            newParent <- private$collection$get(nameAndPath$path)\n\n            if(is.null(newParent))\n                stop(\"Unable to get destination subcollection.\")\n\n            childWithSameName <- newParent$get(nameAndPath$name)\n\n            if(!is.null(childWithSameName))\n                stop(\"Destination already contains content with same name.\")\n\n            REST <- private$collection$getRESTService()\n            REST$copy(self$getRelativePath(),\n                      paste0(newParent$getRelativePath(), \"/\", nameAndPath$name),\n                      private$collection$uuid)\n\n            newFile <- self$duplicate(nameAndPath$name)\n            newFile$setCollection(self$getCollection())\n            private$attachToNewParent(newFile, newParent)\n            newFile$setParent(newParent)\n\n            newFile\n        },\n\n        #' @description\n        #' Duplicate file and gives it a new name.\n        #' @param newName New name for duplicated file.\n        duplicate = function(newName = NULL)\n        {\n            name <- if(!is.null(newName)) newName else private$name\n            newFile <- ArvadosFile$new(name)\n            newFile\n        }\n    ),\n\n    private = list(\n\n        name       = NULL,\n        size       = NULL,\n        parent     = NULL,\n        collection = NULL,\n        buffer     = NULL,\n\n        attachToNewParent = function(content, newParent)\n        {\n            # We temporary set parents collection to NULL. This will ensure that\n            # add method doesn't post this file on REST.\n            # We also need to set content's collection to NULL because\n            # add method throws exception if we try to add content that already\n            # belongs to a collection.\n\n            parentsCollection <- newParent$getCollection()\n            #parent$.__enclos_env__$private$children <- c(parent$.__enclos_env__$private$children, self)\n            #private$parent <- parent\n            content$setCollection(NULL, setRecursively = FALSE)\n            newParent$setCollection(NULL, setRecursively = FALSE)\n            newParent$add(content)\n            content$setCollection(parentsCollection, setRecursively = FALSE)\n            newParent$setCollection(parentsCollection, setRecursively = FALSE)\n        },\n\n        dettachFromCurrentParent = function()\n        {\n            # We temporary set parents collection to NULL. This will ensure that\n            # remove method doesn't remove this file from REST.\n\n            #private$parent$.__enclos_env__$private$removeChild(private$name)\n            #private$parent <- NULL\n            parent <- private$parent\n            parentsCollection <- parent$getCollection()\n            parent$setCollection(NULL, setRecursively = FALSE)\n            parent$remove(private$name)\n            parent$setCollection(parentsCollection, setRecursively = FALSE)\n        }\n    ),\n\n    cloneable = FALSE\n)\n\n#' print.ArvadosFile\n#'\n#' Custom print function for ArvadosFile class\n#'\n#' @param x Instance of ArvadosFile class\n#' @param ... Optional arguments.\n#' @export\nprint.ArvadosFile = function(x, ...)\n{\n    collection   <- NULL\n    relativePath <- x$getRelativePath()\n\n    if(!is.null(x$getCollection()))\n    {\n        collection <- x$getCollection()$uuid\n        relativePath <- paste0(\"/\", relativePath)\n    }\n\n    cat(paste0(\"Type:          \", \"\\\"\", \"ArvadosFile\", \"\\\"\"), sep = \"\\n\")\n    cat(paste0(\"Name:          \", \"\\\"\", x$getName(),   \"\\\"\"), sep = \"\\n\")\n    cat(paste0(\"Relative path: \", \"\\\"\", relativePath,  \"\\\"\"), sep = \"\\n\")\n    cat(paste0(\"Collection:    \", \"\\\"\", collection,    \"\\\"\"), sep = \"\\n\")\n}\n"
  },
  {
    "path": "contrib/R-sdk/R/ArvadosR.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\r\n#\r\n# SPDX-License-Identifier: Apache-2.0\r\n\r\n#' @title ArvadosR\r\n#'\r\n#' @description\r\n#'\r\n#' Arvados is an open source platform for managing, processing, and sharing genomic and other large scientific and biomedical data. With Arvados, bioinformaticians run and scale compute-intensive workflows, developers create biomedical applications, and IT administrators manage large compute and storage resources.\r\n#'\r\n#' @author \\itemize{\r\n#' \\item Lucas Di Pentima\r\n#' \\item Ward Vandewege\r\n#' \\item Fuad Muhic\r\n#' \\item Peter Amstutz\r\n#' \\item Aneta Stanczyk\r\n#' \\item Piotr Nowosielski\r\n#' \\item Brett Smith}\r\n#'\r\n#' @seealso \\itemize{\r\n#' \\item https://arvados.org\r\n#' \\item https://doc.arvados.org/sdk/R/index.html\r\n#' \\item https://github.com/arvados/arvados/tree/main/contrib/R-sdk}\r\n#'\r\n#' @name ArvadosR\r\nNULL\r\n"
  },
  {
    "path": "contrib/R-sdk/R/Collection.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n#' R6 Class Representing Arvados Collection\n#'\n#' @description\n#' Collection class provides interface for working with Arvados collections,\n#' for exaplme actions like creating, updating, moving or removing are possible.\n#'\n#' @seealso\n#' https://github.com/arvados/arvados/tree/main/contrib/R-sdk\n#'\n#' @export\n\nCollection <- R6::R6Class(\n\n    \"Collection\",\n\n    public = list(\n\n        #' @field uuid Autentic for Collection UUID.\n        uuid = NULL,\n\n        #' @description\n        #' Initialize new enviroment.\n        #' @param api Arvados enviroment.\n        #' @param uuid The UUID Autentic for Collection UUID.\n        #' @return A new `Collection` object.\n        #' @examples\n        #' \\dontrun{\n        #' collection <- Collection$new(arv, CollectionUUID)\n        #' }\n        initialize = function(api, uuid)\n        {\n            private$REST <- api$getRESTService()\n            self$uuid <- uuid\n        },\n\n        #' @description\n        #' Adds ArvadosFile or Subcollection specified by content to the collection. Used only with ArvadosFile or Subcollection.\n        #' @param content Content to be added.\n        #' @param relativePath Path to add content.\n        add = function(content, relativePath = \"\")\n        {\n            if(is.null(private$tree))\n                private$generateCollectionTreeStructure()\n\n            if(relativePath == \"\"  ||\n               relativePath == \".\" ||\n               relativePath == \"./\")\n            {\n                subcollection <- private$tree$getTree()\n            }\n            else\n            {\n                relativePath <- trimFromEnd(relativePath, \"/\")\n                subcollection <- self$get(relativePath)\n            }\n\n            if(is.null(subcollection))\n                stop(paste(\"Subcollection\", relativePath, \"doesn't exist.\"))\n\n            if(\"ArvadosFile\"   %in% class(content) ||\n               \"Subcollection\" %in% class(content))\n            {\n                if(!is.null(content$getCollection()))\n                    stop(\"Content already belongs to a collection.\")\n\n                if(content$getName() == \"\")\n                    stop(\"Content has invalid name.\")\n\n                subcollection$add(content)\n                content\n            }\n            else\n            {\n                stop(paste0(\"Expected AravodsFile or Subcollection object, got \",\n                            paste0(\"(\", paste0(class(content), collapse = \", \"), \")\"),\n                            \".\"))\n            }\n        },\n\n        #' @description\n        #' Read file content.\n        #' @param file Name of the file.\n        #' @param col Collection from which the file is read.\n        #' @param sep  Separator used in reading tsv, csv file format.\n        #' @param istable Used in reading txt file to check if the file is table or not.\n        #' @param fileclass Used in reading fasta file to set file class.\n        #' @param Ncol Used in reading binary file to set numbers of columns in data.frame.\n        #' @param Nrow Used in reading binary file to set numbers of rows in data.frame size.\n        #' @examples\n        #' \\dontrun{\n        #' collection <- Collection$new(arv, collectionUUID)\n        #' readFile <- collection$readArvFile(arvadosFile, istable = 'yes')                    # table\n        #' readFile <- collection$readArvFile(arvadosFile, istable = 'no')                     # text\n        #' readFile <- collection$readArvFile(arvadosFile)                                     # xlsx, csv, tsv, rds, rdata\n        #' readFile <- collection$readArvFile(arvadosFile, fileclass = 'fasta')                # fasta\n        #' readFile <- collection$readArvFile(arvadosFile, Ncol= 4, Nrow = 32)                 # binary, only numbers\n        #' readFile <- collection$readArvFile(arvadosFile, Ncol = 5, Nrow = 150, istable = \"factor\") # binary with factor or text\n        #' }\n        readArvFile = function(file, con, sep = ',', istable = NULL, fileclass = \"SeqFastadna\", Ncol = NULL, Nrow = NULL, wantedFunction = NULL)\n        {\n            arvFile <- self$get(file)\n            FileName <- arvFile$getName()\n            FileName <- tolower(FileName)\n            FileFormat <- gsub(\".*\\\\.\", \"\", FileName)\n\n            # set enviroment\n            ARVADOS_API_TOKEN <- Sys.getenv(\"ARVADOS_API_TOKEN\")\n            ARVADOS_API_HOST <- Sys.getenv(\"ARVADOS_API_HOST\")\n            my_collection <- self$uuid\n            key <- gsub(\"/\", \"_\", ARVADOS_API_TOKEN)\n\n            Sys.setenv(\n                \"AWS_ACCESS_KEY_ID\" = key,\n                \"AWS_SECRET_ACCESS_KEY\" = key,\n                \"AWS_DEFAULT_REGION\" = \"collections\",\n                \"AWS_S3_ENDPOINT\" = gsub(\"api[.]\", \"\", ARVADOS_API_HOST))\n\n            if (FileFormat == \"txt\") {\n                if (is.null(istable)){\n                    stop(paste('You need to paste whether it is a text or table file'))\n                } else if (istable == 'no') {\n                    fileContent <- arvFile$read(\"text\") # used to read\n                    fileContent <- gsub(\"[\\r\\n]\", \" \", fileContent)\n                } else if (istable == 'yes') {\n                    arvConnection <- arvFile$connection(\"r\") # used to make possible use different function later\n                    fileContent <- read.table(arvConnection)\n                }\n            }\n            else if (FileFormat  == \"xlsx\") {\n                fileContent <- aws.s3::s3read_using(FUN = openxlsx::read.xlsx, object = file, bucket = my_collection)\n            }\n            else if (FileFormat == \"csv\" || FileFormat == \"tsv\") {\n                arvConnection <- arvFile$connection(\"r\")\n                if (FileFormat == \"tsv\"){\n                    mytable <- read.table(arvConnection, sep = '\\t')\n                } else if (FileFormat == \"csv\" & sep == '\\t') {\n                    mytable <- read.table(arvConnection, sep = '\\t')\n                } else if (FileFormat == \"csv\") {\n                    mytable <- read.table(arvConnection, sep = ',')\n                } else {\n                    stop(paste('File format not supported, use arvadosFile$connection() and customise it'))\n                }\n            }\n            else if (FileFormat == \"fasta\") {\n                fastafile <- aws.s3::s3read_using(FUN = seqinr::read.fasta, as.string = TRUE, object = file, bucket = my_collection)\n            }\n            else if (FileFormat == \"dat\" || FileFormat == \"bin\") {\n                fileContent <- gzcon(arvFile$connection(\"rb\"))\n\n                # function to precess data to binary format\n                read_bin.file <- function(fileContent) {\n                    # read binfile\n                    column.names <- readBin(fileContent, character(), n = Ncol)\n                    bindata <- readBin(fileContent, numeric(), Nrow*Ncol+Ncol)\n                    # check\n                    res <- which(bindata < 0.0000001)\n                    if (is.list(res)) {\n                        bindata <- bindata[-res]\n                    } else {\n                        bindata <- bindata\n                    }\n                    # make a dataframe\n                    data <- data.frame(matrix(data = NA, nrow = Nrow, ncol = Ncol))\n                    for (i in 1:Ncol) {\n                        data[,i] <- bindata[(1+Nrow*(i-1)):(Nrow*i)]\n                    }\n                    colnames(data) = column.names\n\n                    len <- which(is.na(data[,Ncol])) # error if sth went wrong\n                    if (length(len) == 0) {\n                        data\n                    } else {\n                        stop(paste(\"there is a factor or text in the table, customize the function by typing more arguments\"))\n                    }\n                }\n                if (is.null(Nrow) | is.null(Ncol)){\n                    stop(paste('You need to specify numbers of columns and rows'))\n                }\n                if (is.null(istable)) {\n                    fileContent <- read_bin.file(fileContent) # call a function\n                } else if (istable == \"factor\") { # if there is a table with col name\n                    fileContent <- read_bin.file(fileContent)\n                }\n            }\n            else if (FileFormat == \"rds\" || FileFormat == \"rdata\") {\n                arvConnection <- arvFile$connection(\"rb\")\n                mytable <- readRDS(gzcon(arvConnection))\n            }\n            else {\n                stop(parse(('File format not supported, use arvadosFile$connection() and customise it')))\n            }\n        },\n\n        #' @description\n        #' Write file content\n        #' @param name Name of the file.\n        #' @param file File to be saved.\n        #' @param istable Used in writing txt file to check if the file is table or not.\n        #' @examples\n        #' \\dontrun{\n        #' collection <- Collection$new(arv, collectionUUID)\n        #' writeFile <- collection$writeFile(name = \"myoutput.csv\", file = file, fileFormat = \"csv\", istable = NULL, collectionUUID = collectionUUID)             # csv\n        #' writeFile <- collection$writeFile(name = \"myoutput.tsv\", file = file, fileFormat = \"tsv\", istable = NULL, collectionUUID = collectionUUID)             # tsv\n        #' writeFile <- collection$writeFile(name = \"myoutput.fasta\", file = file, fileFormat = \"fasta\", istable = NULL, collectionUUID = collectionUUID)         # fasta\n        #' writeFile <- collection$writeFile(name = \"myoutputtable.txt\", file = file, fileFormat = \"txt\", istable = \"yes\", collectionUUID = collectionUUID)       # txt table\n        #' writeFile <- collection$writeFile(name = \"myoutputtext.txt\", file = file, fileFormat = \"txt\", istable = \"no\", collectionUUID = collectionUUID)         # txt text\n        #' writeFile <- collection$writeFile(name = \"myoutputbinary.dat\", file = file, fileFormat = \"dat\", collectionUUID = collectionUUID)                       # binary\n        #' writeFile <- collection$writeFile(name = \"myoutputxlsx.xlsx\", file = file, fileFormat = \"xlsx\", collectionUUID = collectionUUID)                       # xlsx\n        #' }\n        writeFile = function(name, file, collectionUUID, fileFormat, istable = NULL, seqName = NULL)\n        {\n            # set enviroment\n            ARVADOS_API_TOKEN <- Sys.getenv(\"ARVADOS_API_TOKEN\")\n            ARVADOS_API_HOST <- Sys.getenv(\"ARVADOS_API_HOST\")\n            my_collection <- self$uuid\n            key <- gsub(\"/\", \"_\", ARVADOS_API_TOKEN)\n\n            Sys.setenv(\n                \"AWS_ACCESS_KEY_ID\" = key,\n                \"AWS_SECRET_ACCESS_KEY\" = key,\n                \"AWS_DEFAULT_REGION\" = \"collections\",\n                \"AWS_S3_ENDPOINT\" = gsub(\"api[.]\", \"\", ARVADOS_API_HOST))\n\n            # save file\n            if (fileFormat == \"txt\") {\n                if (istable == \"yes\") {\n                    aws.s3::s3write_using(file, FUN = write.table, object = name, bucket = collectionUUID)\n                } else if (istable == \"no\") {\n                    aws.s3::s3write_using(file, FUN = writeChar, object = name, bucket = collectionUUID)\n                } else {\n                    stop(paste(\"Specify parametr istable\"))\n                }\n            } else if (fileFormat == \"csv\") {\n                aws.s3::s3write_using(file, FUN = write.csv, object = name, bucket = collectionUUID)\n            } else if (fileFormat == \"tsv\") {\n                aws.s3::s3write_using(file, FUN = write.table, row.names = FALSE, sep = \"\\t\", object = name, bucket = collectionUUID)\n            } else if (fileFormat == \"fasta\") {\n                aws.s3::s3write_using(file, FUN = seqinr::write.fasta, name = seqName, object = name, bucket = collectionUUID)\n            } else if (fileFormat == \"xlsx\") {\n                aws.s3::s3write_using(file, FUN = openxlsx::write.xlsx, object = name, bucket = collectionUUID)\n            } else if (fileFormat == \"dat\" || fileFormat == \"bin\") {\n                aws.s3::s3write_using(file, FUN = writeBin, object = name, bucket = collectionUUID)\n            } else {\n                stop(parse(('File format not supported, use arvadosFile$connection() and customise it')))\n            }\n        },\n\n        #' @description\n        #' Creates one or more ArvadosFiles and adds them to the collection at specified path.\n        #' @param files Content to be created.\n        #' @examples\n        #' \\dontrun{\n        #' collection <- arv$collections_create(name = collectionTitle, description = collectionDescription, owner_uuid = collectionOwner, properties = list(\"ROX37196928443768648\" = \"ROX37742976443830153\"))\n        #' }\n        create = function(files)\n        {\n            if(is.null(private$tree))\n                private$generateCollectionTreeStructure()\n\n            if(is.character(files))\n            {\n                sapply(files, function(file)\n                {\n                    childWithSameName <- self$get(file)\n                    if(!is.null(childWithSameName))\n                        stop(\"Destination already contains file with same name.\")\n\n                    newTreeBranch <- private$tree$createBranch(file)\n                    private$tree$addBranch(private$tree$getTree(), newTreeBranch)\n\n                    private$REST$create(file, self$uuid)\n                    newTreeBranch$setCollection(self)\n                    newTreeBranch\n                })\n            }\n            else\n            {\n                stop(paste0(\"Expected character vector, got \",\n                            paste0(\"(\", paste0(class(files), collapse = \", \"), \")\"),\n                            \".\"))\n            }\n        },\n\n        #' @description\n        #' Remove one or more files from the collection.\n        #' @param paths Content to be removed.\n        #' @examples\n        #' \\dontrun{\n        #' collection$remove(fileName.format)\n        #' }\n        remove = function(paths)\n        {\n            if(is.null(private$tree))\n                private$generateCollectionTreeStructure()\n\n            if(is.character(paths))\n            {\n                sapply(paths, function(filePath)\n                {\n                    filePath <- trimFromEnd(filePath, \"/\")\n                    file <- self$get(filePath)\n\n                    if(is.null(file))\n                        stop(paste(\"File\", filePath, \"doesn't exist.\"))\n\n                    parent <- file$getParent()\n\n                    if(is.null(parent))\n                        stop(\"You can't delete root folder.\")\n\n                    parent$remove(file$getName())\n                })\n\n                \"Content removed\"\n            }\n            else\n            {\n                stop(paste0(\"Expected character vector, got \",\n                            paste0(\"(\", paste0(class(paths), collapse = \", \"), \")\"),\n                            \".\"))\n            }\n        },\n\n        #' @description\n        #' Moves ArvadosFile or Subcollection to another location in the collection.\n        #' @param content Content to be moved.\n        #' @param destination Path to move content.\n        #' @examples\n        #' \\dontrun{\n        #' collection$move(\"fileName.format\", path)\n        #' }\n        move = function(content, destination)\n        {\n            if(is.null(private$tree))\n                private$generateCollectionTreeStructure()\n\n            content <- trimFromEnd(content, \"/\")\n\n            elementToMove <- self$get(content)\n\n            if(is.null(elementToMove))\n                stop(\"Content you want to move doesn't exist in the collection.\")\n\n            elementToMove$move(destination)\n        },\n\n        #' @description\n        #' Copies ArvadosFile or Subcollection to another location in the collection.\n        #' @param content Content to be moved.\n        #' @param destination Path to move content.\n        #' @examples\n        #' \\dontrun{\n        #' copied <- collection$copy(\"oldName.format\", \"newName.format\")\n        #' }\n        copy = function(content, destination)\n        {\n            if(is.null(private$tree))\n                private$generateCollectionTreeStructure()\n\n            content <- trimFromEnd(content, \"/\")\n\n            elementToCopy <- self$get(content)\n\n            if(is.null(elementToCopy))\n                stop(\"Content you want to copy doesn't exist in the collection.\")\n\n            elementToCopy$copy(destination)\n        },\n\n        #' @description\n        #' Refreshes the environment.\n        #' @examples\n        #' \\dontrun{\n        #' collection$refresh()\n        #' }\n        refresh = function()\n        {\n            if(!is.null(private$tree))\n            {\n                private$tree$getTree()$setCollection(NULL, setRecursively = TRUE)\n                private$tree <- NULL\n            }\n        },\n\n        #' @description\n        #' Returns collections file content as character vector.\n        #' @examples\n        #' \\dontrun{\n        #' list <- collection$getFileListing()\n        #' }\n        getFileListing = function()\n        {\n            if(is.null(private$tree))\n                private$generateCollectionTreeStructure()\n\n            content <- private$REST$getCollectionContent(self$uuid)\n            content[order(tolower(content))]\n        },\n\n        #' @description\n        #' If relativePath is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.\n        #' @param relativePath Path from content is taken.\n        #' @examples\n        #' \\dontrun{\n        #' arvadosFile <- collection$get(fileName)\n        #' }\n        get = function(relativePath)\n        {\n            if(is.null(private$tree))\n                private$generateCollectionTreeStructure()\n\n            private$tree$getElement(relativePath)\n        },\n\n        getRESTService = function() private$REST,\n        setRESTService = function(newRESTService) private$REST <- newRESTService\n    ),\n    private = list(\n\n        REST        = NULL,\n        #' @tree beautiful tree of sth\n        tree        = NULL,\n        fileContent = NULL,\n\n        generateCollectionTreeStructure = function(relativePath = NULL)\n        {\n            if(is.null(self$uuid))\n                stop(\"Collection uuid is not defined.\")\n\n            if(is.null(private$REST))\n                stop(\"REST service is not defined.\")\n\n            private$fileContent <- private$REST$getCollectionContent(self$uuid, relativePath)\n            private$tree <- CollectionTree$new(private$fileContent, self)\n        }\n    ),\n\n    cloneable = FALSE\n)\n\n#' print.Collection\n#'\n#' Custom print function for Collection class\n#'\n#' @param x Instance of Collection class\n#' @param ... Optional arguments.\n#' @export\nprint.Collection = function(x, ...)\n{\n    cat(paste0(\"Type: \", \"\\\"\", \"Arvados Collection\", \"\\\"\"), sep = \"\\n\")\n    cat(paste0(\"uuid: \", \"\\\"\", x$uuid,               \"\\\"\"), sep = \"\\n\")\n}\n"
  },
  {
    "path": "contrib/R-sdk/R/CollectionTree.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nCollectionTree <- R6::R6Class(\n    \"CollectionTree\",\n    public = list(\n\n        pathsList = NULL,\n\n        initialize = function(fileContent, collection)\n        {\n            self$pathsList <- fileContent\n            treeBranches <- sapply(fileContent, function(filePath) self$createBranch(filePath))\n            root <- Subcollection$new(\"\")\n            sapply(treeBranches, function(branch) self$addBranch(root, branch))\n            root$setCollection(collection)\n            private$tree <- root\n        },\n\n        createBranch = function(filePath)\n        {\n            splitPath <- unlist(strsplit(filePath, \"/\", fixed = TRUE))\n            branch <- NULL\n            lastElementIndex <- length(splitPath)\n\n            for(elementIndex in lastElementIndex:1)\n            {\n                if(elementIndex == lastElementIndex)\n                {\n                    branch <- ArvadosFile$new(splitPath[[elementIndex]])\n                }\n                else\n                {\n                    newFolder <- Subcollection$new(splitPath[[elementIndex]])\n                    newFolder$add(branch)\n                    branch <- newFolder\n                }\n            }\n\n            branch\n        },\n\n        addBranch = function(container, node)\n        {\n            child <- container$get(node$getName())\n\n            if(is.null(child))\n            {\n                # Make sure we are don't make any REST call while adding child\n                collection <- container$getCollection()\n                container$setCollection(NULL, setRecursively = FALSE)\n                container$add(node)\n                container$setCollection(collection, setRecursively = FALSE)\n            }\n            else\n            {\n                # Note: REST always returns folder name alone before other folder\n                # content, so in first iteration we don't know if it's a file\n                # or folder since its just a name, so we assume it's a file.\n                # If we encounter that same name again we know\n                # it's a folder so we need to replace ArvadosFile with Subcollection.\n                if(\"ArvadosFile\" %in% class(child))\n                    child = private$replaceFileWithSubcollection(child)\n\n                self$addBranch(child, node$getFirst())\n            }\n        },\n\n        getElement = function(relativePath)\n        {\n            relativePath <- trimFromStart(relativePath, \"./\")\n            relativePath <- trimFromEnd(relativePath, \"/\")\n\n            if(endsWith(relativePath, \"/\"))\n                relativePath <- substr(relativePath, 0, nchar(relativePath) - 1)\n\n            splitPath <- unlist(strsplit(relativePath, \"/\", fixed = TRUE))\n            returnElement <- private$tree\n\n            for(pathFragment in splitPath)\n            {\n                returnElement <- returnElement$get(pathFragment)\n\n                if(is.null(returnElement))\n                    return(NULL)\n            }\n\n            returnElement\n        },\n\n        getTree = function() private$tree\n    ),\n\n    private = list(\n\n        tree = NULL,\n\n        replaceFileWithSubcollection = function(arvadosFile)\n        {\n            subcollection <- Subcollection$new(arvadosFile$getName())\n            fileParent <- arvadosFile$getParent()\n            fileParent$remove(arvadosFile$getName())\n            fileParent$add(subcollection)\n\n            arvadosFile$setParent(NULL)\n\n            subcollection\n        }\n    )\n)\n"
  },
  {
    "path": "contrib/R-sdk/R/HttpParser.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nHttpParser <- R6::R6Class(\n\n    \"HttrParser\",\n\n    public = list(\n\n        validContentTypes = NULL,\n\n        initialize = function()\n        {\n            self$validContentTypes <- c(\"text\", \"raw\")\n        },\n\n        parseJSONResponse = function(serverResponse)\n        {\n            parsed_response <- httr::content(serverResponse,\n                                             as = \"parsed\",\n                                             type = \"application/json\")\n        },\n\n        parseResponse = function(serverResponse, outputType)\n        {\n            parsed_response <- httr::content(serverResponse, as = outputType)\n        },\n\n        getFileNamesFromResponse = function(response, uri)\n        {\n            text <- rawToChar(response$content)\n            doc <- XML::xmlParse(text, asText=TRUE)\n            base <- paste(\"/\", strsplit(uri, \"/\")[[1]][4], \"/\", sep=\"\")\n            result <- unlist(\n                XML::xpathApply(doc, \"//D:response/D:href\", function(node) {\n                    sub(base, \"\", URLdecode(XML::xmlValue(node)), fixed=TRUE)\n                })\n            )\n            result[result != \"\"]\n        },\n\n        getFileSizesFromResponse = function(response, uri)\n        {\n            text <- rawToChar(response$content)\n            doc <- XML::xmlParse(text, asText=TRUE)\n\n            base <- paste(paste(\"/\", strsplit(uri, \"/\")[[1]][-1:-3], sep=\"\", collapse=\"\"), \"/\", sep=\"\")\n            result <- XML::xpathApply(doc, \"//D:response/D:propstat/D:prop/D:getcontentlength\", function(node) {\n              XML::xmlValue(node)\n            })\n\n            unlist(result)\n        }\n    )\n)\n"
  },
  {
    "path": "contrib/R-sdk/R/HttpRequest.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nHttpRequest <- R6::R6Class(\n\n    \"HttrRequest\",\n\n    public = list(\n\n        validContentTypes = NULL,\n        validVerbs = NULL,\n\n        initialize = function()\n        {\n            self$validContentTypes <- c(\"text\", \"raw\")\n            self$validVerbs <- c(\"GET\", \"POST\", \"PUT\", \"DELETE\", \"PROPFIND\", \"MOVE\", \"COPY\")\n        },\n\n        exec = function(verb, url, headers = NULL, body = NULL, queryParams = NULL,\n                        retryTimes = 0)\n        {\n            if(!(verb %in% self$validVerbs))\n                stop(\"Http verb is not valid.\")\n\n            urlQuery <- self$createQuery(queryParams)\n            url      <- paste0(url, urlQuery)\n\n            config <- httr::add_headers(unlist(headers))\n            if(toString(Sys.getenv(\"ARVADOS_API_HOST_INSECURE\") == \"TRUE\"))\n               config$options = list(ssl_verifypeer = 0L)\n\n            response <- httr::RETRY(verb, url = url, body = body,\n                                    config = config, times = retryTimes + 1)\n        },\n\n        createQuery = function(queryParams)\n        {\n            queryParams <- Filter(Negate(is.null), queryParams)\n\n            query <- sapply(queryParams, function(param)\n            {\n                if(is.list(param) || length(param) > 1)\n                    param <- RListToPythonList(param, \",\")\n\n                URLencode(as.character(param), reserved = T, repeated = T)\n\n            }, USE.NAMES = TRUE)\n\n            if(length(query) > 0)\n            {\n                query <- paste0(names(query), \"=\", query, collapse = \"&\")\n\n                return(paste0(\"?\", query))\n            }\n\n            return(\"\")\n        },\n\n        getConnection = function(url, headers, openMode)\n        {\n            h <- curl::new_handle()\n            curl::handle_setheaders(h, .list = headers)\n\n            if(toString(Sys.getenv(\"ARVADOS_API_HOST_INSECURE\") == \"TRUE\"))\n               curl::handle_setopt(h, ssl_verifypeer = 0L)\n\n            conn <- curl::curl(url = url, open = openMode, handle = h)\n        }\n    ),\n\n    cloneable = FALSE\n)\n"
  },
  {
    "path": "contrib/R-sdk/R/RESTService.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nRESTService <- R6::R6Class(\n\n    \"RESTService\",\n\n    public = list(\n\n        token      = NULL,\n        http       = NULL,\n        httpParser = NULL,\n        numRetries = NULL,\n\n        initialize = function(token, rawHost,\n                              http, httpParser,\n                              numRetries     = 0,\n                              webDavHostName = NULL)\n        {\n            self$token      <- token\n            self$http       <- http\n            self$httpParser <- httpParser\n            self$numRetries <- numRetries\n\n            private$rawHostName    <- rawHost\n            private$webDavHostName <- webDavHostName\n        },\n\n        setNumConnRetries = function(newNumOfRetries)\n        {\n            self$numRetries <- newNumOfRetries\n        },\n\n        getWebDavHostName = function()\n        {\n            if(is.null(private$webDavHostName))\n            {\n                publicConfigURL <- paste0(\"https://\", private$rawHostName,\n                                               \"/arvados/v1/config\")\n\n                serverResponse <- self$http$exec(\"GET\", publicConfigURL, retryTimes = self$numRetries)\n\n                configDocument <- self$httpParser$parseJSONResponse(serverResponse)\n                private$webDavHostName <- configDocument$Services$WebDAVDownload$ExternalURL\n\n                if(is.null(private$webDavHostName))\n                    stop(\"Unable to find WebDAV server.\")\n            }\n\n            private$webDavHostName\n        },\n\n        create = function(files, uuid)\n        {\n            sapply(files, function(filePath)\n            {\n                private$createNewFile(filePath, uuid, \"text/html\")\n            })\n        },\n\n        delete = function(relativePath, uuid)\n        {\n            fileURL <- paste0(self$getWebDavHostName(), \"c=\",\n                              uuid, \"/\", relativePath);\n            headers <- list(Authorization = paste(\"Bearer\", self$token))\n\n            serverResponse <- self$http$exec(\"DELETE\", fileURL, headers,\n                                             retryTimes = self$numRetries)\n\n            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)\n                stop(paste(\"Server code:\", serverResponse$status_code))\n\n            serverResponse\n        },\n\n        move = function(from, to, uuid)\n        {\n            collectionURL <- paste0(self$getWebDavHostName(), \"c=\", uuid, \"/\")\n            fromURL <- paste0(collectionURL, from)\n            toURL <- paste0(collectionURL, trimFromStart(to, \"/\"))\n\n            headers <- list(\"Authorization\" = paste(\"Bearer\", self$token),\n                            \"Destination\" = toURL)\n\n            serverResponse <- self$http$exec(\"MOVE\", fromURL, headers,\n                                             retryTimes = self$numRetries)\n\n            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)\n                stop(paste(\"Server code:\", serverResponse$status_code))\n\n            serverResponse\n        },\n\n        copy = function(from, to, uuid)\n        {\n            collectionURL <- paste0(self$getWebDavHostName(), \"c=\", uuid, \"/\")\n            fromURL <- paste0(collectionURL, from)\n            toURL <- paste0(collectionURL, trimFromStart(to, \"/\"))\n\n            headers <- list(\"Authorization\" = paste(\"Bearer\", self$token),\n                            \"Destination\" = toURL)\n\n            serverResponse <- self$http$exec(\"COPY\", fromURL, headers,\n                                             retryTimes = self$numRetries)\n\n            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)\n                stop(paste(\"Server code:\", serverResponse$status_code))\n\n            serverResponse\n        },\n\n       getCollectionContent = function(uuid, relativePath = NULL)\n\n        {\n            collectionURL <- URLencode(paste0(self$getWebDavHostName(),\n                                             \"c=\", uuid, \"/\", relativePath))\n\n            headers <- list(\"Authorization\" = paste(\"Bearer\", self$token))\n\n            response <- self$http$exec(\"PROPFIND\", collectionURL, headers,\n                                       retryTimes = self$numRetries)\n\n            if(all(response == \"\"))\n                stop(\"Response is empty, request may be misconfigured\")\n\n            if(response$status_code < 200 || response$status_code >= 300)\n                stop(paste(\"Server code:\", response$status_code))\n\n            self$httpParser$getFileNamesFromResponse(response, collectionURL)\n        },\n\n        getResourceSize = function(relativePath, uuid)\n        {\n            collectionURL <- URLencode(paste0(self$getWebDavHostName(),\n                                              \"c=\", uuid))\n\n            subcollectionURL <- paste0(collectionURL, \"/\", relativePath);\n\n            headers <- list(\"Authorization\" = paste(\"Bearer\", self$token))\n\n            response <- self$http$exec(\"PROPFIND\", subcollectionURL, headers,\n                                       retryTimes = self$numRetries)\n\n            if(all(response == \"\"))\n                stop(\"Response is empty, request may be misconfigured\")\n\n            if(response$status_code < 200 || response$status_code >= 300)\n                stop(paste(\"Server code:\", response$status_code))\n\n            sizes <- self$httpParser$getFileSizesFromResponse(response,\n                                                              collectionURL)\n            as.numeric(sizes)\n        },\n\n        read = function(relativePath, uuid, contentType = \"raw\", offset = 0, length = 0)\n        {\n            fileURL <- paste0(self$getWebDavHostName(),\n                             \"c=\", uuid, \"/\", relativePath);\n\n            range <- paste0(\"bytes=\", offset, \"-\")\n\n            if(length > 0)\n                range = paste0(range, offset + length - 1)\n\n            if(offset == 0 && length == 0)\n            {\n                headers <- list(Authorization = paste(\"Bearer\", self$token))\n            }\n            else\n            {\n                headers <- list(Authorization = paste(\"Bearer\", self$token),\n                                Range = range)\n            }\n\n            if(!(contentType %in% self$httpParser$validContentTypes))\n                stop(\"Invalid contentType. Please use text or raw.\")\n\n            serverResponse <- self$http$exec(\"GET\", fileURL, headers,\n                                             retryTimes = self$numRetries)\n\n            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)\n                stop(paste(\"Server code:\", serverResponse$status_code))\n\n            self$httpParser$parseResponse(serverResponse, contentType)\n        },\n\n        write = function(relativePath, uuid, content, contentType)\n        {\n            fileURL <- paste0(self$getWebDavHostName(),\n                             \"c=\", uuid, \"/\", relativePath);\n            headers <- list(Authorization = paste(\"Bearer\", self$token),\n                            \"Content-Type\" = contentType)\n            body <- content\n\n            serverResponse <- self$http$exec(\"PUT\", fileURL, headers, body,\n                                             retryTimes = self$numRetries)\n\n            if(serverResponse$status_code < 200 || serverResponse$status_code >= 300)\n                stop(paste(\"Server code:\", serverResponse$status_code))\n\n            self$httpParser$parseResponse(serverResponse, \"text\")\n        },\n\n        getConnection = function(relativePath, uuid, openMode)\n        {\n            fileURL <- paste0(self$getWebDavHostName(),\n                              \"c=\", uuid, \"/\", relativePath);\n            headers <- list(Authorization = paste(\"Bearer\", self$token))\n\n            conn <- self$http$getConnection(fileURL, headers, openMode)\n        }\n    ),\n\n    private = list(\n\n        webDavHostName = NULL,\n        rawHostName    = NULL,\n\n        createNewFile = function(relativePath, uuid, contentType)\n        {\n            fileURL <- paste0(self$getWebDavHostName(), \"c=\",\n                              uuid, \"/\", relativePath)\n            headers <- list(Authorization = paste(\"Bearer\", self$token),\n                            \"Content-Type\" = contentType)\n            body <- NULL\n\n            serverResponse <- self$http$exec(\"PUT\", fileURL, headers, body,\n                                             retryTimes = self$numRetries)\n\n            if (serverResponse$status_code < 200){ # to wyrzuca błędy\n                stop(paste(\"Server code:\", serverResponse$status_code))}\n            else if (serverResponse$status_code >= 300 & serverResponse$status_code < 422) {\n                stop(paste(\"Server code:\", serverResponse$status_code))}\n            else if (serverResponse$status_code == 422 ) {\n                stop(paste(\"Project of that name already exists. If you want to change it use project_update() instead\"))}\n\n            paste(\"File created:\", relativePath)\n        }\n    ),\n\n    cloneable = FALSE\n)\n"
  },
  {
    "path": "contrib/R-sdk/R/Subcollection.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n#' R6 Class Representing a Subcollection\n#'\n#' @description\n#' Subcollection class represents a folder inside Arvados collection.\n#' It is essentially a composite of arvadosFiles and other subcollections.\n\n#' @export\nSubcollection <- R6::R6Class(\n\n    \"Subcollection\",\n\n    public = list(\n\n        #' @description\n        #' Initialize new enviroment.\n        #' @param name Name of the new enviroment.\n        #' @return A new `Subcollection` object.\n        initialize = function(name)\n        {\n            private$name <- name\n        },\n\n        #' @description\n        #' Returns name of the file.\n        getName = function() private$name,\n\n        #' @description\n        #' Returns Subcollection's path relative to the root.\n        getRelativePath = function()\n        {\n            relativePath <- c(private$name)\n            parent <- private$parent\n\n            while(!is.null(parent))\n            {\n                relativePath <- c(parent$getName(), relativePath)\n                parent <- parent$getParent()\n            }\n\n            relativePath <- relativePath[relativePath != \"\"]\n            paste0(relativePath, collapse = \"/\")\n        },\n\n        #' @description\n        #' Adds ArvadosFile or Subcollection specified by content to the Subcollection.\n        #' @param content Content to be added.\n        add = function(content)\n        {\n            if(\"ArvadosFile\"   %in% class(content) ||\n               \"Subcollection\" %in% class(content))\n            {\n                if(!is.null(content$getCollection()))\n                    stop(\"Content already belongs to a collection.\")\n\n                if(content$getName() == \"\")\n                    stop(\"Content has invalid name.\")\n\n                childWithSameName <- self$get(content$getName())\n\n                if(!is.null(childWithSameName))\n                    stop(paste(\"Subcollection already contains ArvadosFile\",\n                               \"or Subcollection with same name.\"))\n\n                if(!is.null(private$collection))\n                {\n                    if(self$getRelativePath() != \"\")\n                        contentPath <- paste0(self$getRelativePath(),\n                                              \"/\", content$getFileListing())\n                    else\n                        contentPath <- content$getFileListing()\n\n                    REST <- private$collection$getRESTService()\n                    REST$create(contentPath, private$collection$uuid)\n                    content$setCollection(private$collection)\n                }\n\n                private$children <- c(private$children, content)\n                content$setParent(self)\n\n                \"Content added successfully.\"\n            }\n            else\n            {\n                stop(paste0(\"Expected AravodsFile or Subcollection object, got \",\n                            paste0(\"(\", paste0(class(content), collapse = \", \"), \")\"),\n                            \".\"))\n            }\n        },\n\n        #' @description\n        #' Removes ArvadosFile or Subcollection specified by name from the Subcollection.\n        #' @param name Name of the file to be removed.\n        remove = function(name)\n        {\n            if(is.character(name))\n            {\n                child <- self$get(name)\n\n                if(is.null(child))\n                    stop(paste(\"Subcollection doesn't contains ArvadosFile\",\n                               \"or Subcollection with specified name.\"))\n\n                if(!is.null(private$collection))\n                {\n                    REST <- private$collection$getRESTService()\n                    REST$delete(child$getRelativePath(), private$collection$uuid)\n\n                    child$setCollection(NULL)\n                }\n\n                private$removeChild(name)\n                child$setParent(NULL)\n\n                \"Content removed\"\n            }\n            else\n            {\n                stop(paste0(\"Expected character, got \",\n                            paste0(\"(\", paste0(class(name), collapse = \", \"), \")\"),\n                            \".\"))\n            }\n        },\n\n        #' @description\n        #' Returns Subcollections file content as character vector.\n        #' @param fullPath Checking if the path to file exists.\n        getFileListing = function(fullPath = TRUE)\n        {\n            content <- private$getContentAsCharVector(fullPath)\n            content[order(tolower(content))]\n        },\n\n        #' @description\n        #' Returns subcollections content size in bytes.\n        getSizeInBytes = function()\n        {\n            if(is.null(private$collection))\n                return(0)\n\n            REST <- private$collection$getRESTService()\n\n            fileSizes <- REST$getResourceSize(paste0(self$getRelativePath(), \"/\"),\n                                              private$collection$uuid)\n            return(sum(fileSizes))\n        },\n\n        #' @description\n        #' Moves Subcollection to a new location inside collection.\n        #' @param destination Path to move the file.\n        move = function(destination)\n        {\n            if(is.null(private$collection))\n                stop(\"Subcollection doesn't belong to any collection.\")\n\n            destination <- trimFromEnd(destination, \"/\")\n            nameAndPath <- splitToPathAndName(destination)\n\n            newParent <- private$collection$get(nameAndPath$path)\n\n            if(is.null(newParent))\n                stop(\"Unable to get destination subcollection.\")\n\n            childWithSameName <- newParent$get(nameAndPath$name)\n\n            if(!is.null(childWithSameName))\n                stop(\"Destination already contains content with same name.\")\n\n            REST <- private$collection$getRESTService()\n            REST$move(self$getRelativePath(),\n                      paste0(newParent$getRelativePath(), \"/\", nameAndPath$name),\n                      private$collection$uuid)\n\n            private$dettachFromCurrentParent()\n            private$attachToNewParent(self, newParent)\n\n            private$parent <- newParent\n            private$name <- nameAndPath$name\n\n            self\n        },\n\n        #' @description\n        #' Copies Subcollection to a new location inside collection.\n        #' @param destination Path to copy the file.\n        copy = function(destination)\n        {\n            if(is.null(private$collection))\n                stop(\"Subcollection doesn't belong to any collection.\")\n\n            destination <- trimFromEnd(destination, \"/\")\n            nameAndPath <- splitToPathAndName(destination)\n\n            newParent <- private$collection$get(nameAndPath$path)\n\n            if(is.null(newParent) || !(\"Subcollection\" %in% class(newParent)))\n                stop(\"Unable to get destination subcollection.\")\n\n            childWithSameName <- newParent$get(nameAndPath$name)\n\n            if(!is.null(childWithSameName))\n                stop(\"Destination already contains content with same name.\")\n\n            REST <- private$collection$getRESTService()\n            REST$copy(self$getRelativePath(),\n                      paste0(newParent$getRelativePath(), \"/\", nameAndPath$name),\n                      private$collection$uuid)\n\n            newContent <- self$duplicate(nameAndPath$name)\n            newContent$setCollection(self$getCollection(), setRecursively = TRUE)\n            newContent$setParent(newParent)\n            private$attachToNewParent(newContent, newParent)\n\n            newContent\n        },\n\n        #' @description\n        #' Duplicate Subcollection and gives it a new name.\n        #' @param newName New name for duplicated file.\n        duplicate = function(newName = NULL)\n        {\n            name <- if(!is.null(newName)) newName else private$name\n            root <- Subcollection$new(name)\n            for(child in private$children)\n                root$add(child$duplicate())\n\n            root\n        },\n\n        #' @description\n        #' If name is valid, returns ArvadosFile or Subcollection specified by relativePath, else returns NULL.\n        #' @param name Name of the file.\n        get = function(name)\n        {\n            for(child in private$children)\n            {\n                if(child$getName() == name)\n                    return(child)\n            }\n\n            return(NULL)\n        },\n\n        #' @description\n        #' Returns files in Subcollection.\n        getFirst = function()\n        {\n            if(length(private$children) == 0)\n                return(NULL)\n\n            private$children[[1]]\n        },\n\n        #' @description\n        #' Sets Collection by its UUID.\n        setCollection = function(collection, setRecursively = TRUE)\n        {\n            private$collection = collection\n\n            if(setRecursively)\n            {\n                for(child in private$children)\n                    child$setCollection(collection)\n            }\n        },\n\n        #' @description\n        #' Returns Collection of Subcollection.\n        getCollection = function() private$collection,\n\n        #' @description\n        #' Returns Collection UUID.\n        getParent = function() private$parent,\n\n        #' @description\n        #' Sets new Collection.\n        setParent = function(newParent) private$parent <- newParent\n    ),\n\n    private = list(\n\n        name       = NULL,\n        children   = NULL,\n        parent     = NULL,\n        collection = NULL,\n\n        removeChild = function(name)\n        {\n            numberOfChildren = length(private$children)\n            if(numberOfChildren > 0)\n            {\n                for(childIndex in 1:numberOfChildren)\n                {\n                    if(private$children[[childIndex]]$getName() == name)\n                    {\n                        private$children = private$children[-childIndex]\n                        return()\n                    }\n                }\n            }\n        },\n\n        attachToNewParent = function(content, newParent)\n        {\n            # We temporary set parents collection to NULL. This will ensure that\n            # add method doesn't post this subcollection to REST.\n            # We also need to set content's collection to NULL because\n            # add method throws exception if we try to add content that already\n            # belongs to a collection.\n            parentsCollection <- newParent$getCollection()\n            content$setCollection(NULL, setRecursively = FALSE)\n            newParent$setCollection(NULL, setRecursively = FALSE)\n            newParent$add(content)\n            content$setCollection(parentsCollection, setRecursively = FALSE)\n            newParent$setCollection(parentsCollection, setRecursively = FALSE)\n        },\n\n        dettachFromCurrentParent = function()\n        {\n            # We temporary set parents collection to NULL. This will ensure that\n            # remove method doesn't remove this subcollection from REST.\n            parent <- private$parent\n            parentsCollection <- parent$getCollection()\n            parent$setCollection(NULL, setRecursively = FALSE)\n            parent$remove(private$name)\n            parent$setCollection(parentsCollection, setRecursively = FALSE)\n        },\n\n        getContentAsCharVector = function(fullPath = TRUE)\n        {\n            content <- NULL\n\n            if(fullPath)\n            {\n                for(child in private$children)\n                    content <- c(content, child$getFileListing())\n\n                if(private$name != \"\")\n                    content <- unlist(paste0(private$name, \"/\", content))\n            }\n            else\n            {\n                for(child in private$children)\n                    content <- c(content, child$getName())\n            }\n\n            content\n        }\n    ),\n\n    cloneable = FALSE\n)\n\n#' print.Subcollection\n#'\n#' Custom print function for Subcollection class\n#'\n#' @param x Instance of Subcollection class\n#' @param ... Optional arguments.\n#' @export\nprint.Subcollection = function(x, ...)\n{\n    collection   <- NULL\n    relativePath <- x$getRelativePath()\n\n    if(!is.null(x$getCollection()))\n    {\n        collection <- x$getCollection()$uuid\n\n        if(!x$getName() == \"\")\n            relativePath <- paste0(\"/\", relativePath)\n    }\n\n    cat(paste0(\"Type:          \", \"\\\"\", \"Arvados Subcollection\", \"\\\"\"), sep = \"\\n\")\n    cat(paste0(\"Name:          \", \"\\\"\", x$getName(),             \"\\\"\"), sep = \"\\n\")\n    cat(paste0(\"Relative path: \", \"\\\"\", relativePath,            \"\\\"\"), sep = \"\\n\")\n    cat(paste0(\"Collection:    \", \"\\\"\", collection,              \"\\\"\"), sep = \"\\n\")\n}\n"
  },
  {
    "path": "contrib/R-sdk/R/util.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n#' listAll\n#'\n#' List all resources even if the number of items is greater than maximum API limit.\n#'\n#' @param fn Arvados method used to retrieve items from REST service.\n#' @param ... Optional arguments which will be pased to fn .\n#' @examples\n#' \\dontrun{\n#' arv <- Arvados$new(\"your Arvados token\", \"example.arvadosapi.com\")\n#' cl <- listAll(arv$collections.list, filters = list(list(\"name\", \"like\", \"test%\"))\n#' }\n#' @export \nlistAll <- function(fn, ...)\n{\n    offset <- 0\n    itemsAvailable <- .Machine$integer.max\n    items <- c()\n\n    while(length(items) < itemsAvailable)\n    {\n        serverResponse <- fn(offset = offset, ...)\n\n        if(!is.null(serverResponse$errors))\n            stop(serverResponse$errors)\n\n        items          <- c(items, serverResponse$items)\n        offset         <- length(items)\n        itemsAvailable <- serverResponse$items_available\n    }\n\n    items\n}\n\n\n#NOTE: Package private functions\n\ntrimFromStart <- function(sample, trimCharacters)\n{\n    if(startsWith(sample, trimCharacters))\n        sample <- substr(sample, nchar(trimCharacters) + 1, nchar(sample))\n\n    sample\n}\n\ntrimFromEnd <- function(sample, trimCharacters)\n{\n    if(endsWith(sample, trimCharacters))\n        sample <- substr(sample, 0, nchar(sample) - nchar(trimCharacters))\n\n    sample\n}\n\nRListToPythonList <- function(RList, separator = \", \")\n{\n    pythonArrayContent <- sapply(RList, function(elementInList)\n    {\n        if((is.vector(elementInList) || is.list(elementInList)) &&\n            length(elementInList) > 1)\n        {\n            return(RListToPythonList(elementInList, separator))\n        }\n        else\n        {\n            return(paste0(\"\\\"\", elementInList, \"\\\"\"))\n        }\n    })\n\n    pythonArray <- paste0(\"[\", paste0(pythonArrayContent, collapse = separator), \"]\")\n    pythonArray\n}\n\nappendToStartIfNotExist <- function(sample, characters)\n{\n    if(!startsWith(sample, characters))\n        sample <- paste0(characters, sample)\n\n    sample\n}\n\nsplitToPathAndName = function(path)\n{\n    path <- appendToStartIfNotExist(path, \"/\")\n    components <- unlist(stringr::str_split(path, \"/\"))\n    nameAndPath <- list()\n    nameAndPath$name <- components[length(components)]\n    nameAndPath$path <- trimFromStart(paste0(components[-length(components)], collapse = \"/\"),\n                                      \"/\")\n    nameAndPath\n}\n"
  },
  {
    "path": "contrib/R-sdk/R/zzz.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n.onLoad <- function(libName, pkgName)\n{\n    minAllowedRVersion <- \"3.3.0\"\n    currentRVersion <- getRversion()\n\n    if(currentRVersion < minAllowedRVersion)\n        print(paste0(\"Minimum R version required to run \", pkgName, \" is \",\n                     minAllowedRVersion, \". Your current version is \",\n                     toString(currentRVersion), \". Please update R and try again.\"))\n}\n"
  },
  {
    "path": "contrib/R-sdk/README.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# R SDK for Arvados\n\nThis SDK focuses on providing support for accessing Arvados projects, collections, and the files within collections. The API is not final and feedback is solicited from users on ways in which it could be improved.\n\n## Key Topics\n* Installation\n* Usage\n  * Initializing API\n  * Working with collections\n  * Manipulating collection content\n  * Working with Arvados projects\n  * Help\n* Building the ArvadosR package\n\n## Installation\n\nMinimum R version required to run ArvadosR is 3.3.0.\n\n```r\ninstall.packages(\"ArvadosR\", repos=c(\"https://r.arvados.org\", getOption(\"repos\")[\"CRAN\"]), dependencies=TRUE)\nlibrary('ArvadosR')\n```\n\n> **Note**\n> On Linux, you may have to install supporting packages.\n>\n> On Red Hat, AlmaLinux, and Rocky Linux, this is:\n> ```\n> yum install libxml2-devel openssl-devel curl-devel\n> ```\n>\n> On Debian and Ubuntu, this is:\n> ```\n> apt-get install build-essential libxml2-dev libssl-dev libcurl4-gnutls-dev\n> ```\n\n\n## Usage\n\n### Initializing API\n\n```r\n# use environment variables ARVADOS_API_TOKEN and ARVADOS_API_HOST\narv <- Arvados$new()\n\n# provide them explicitly\narv <- Arvados$new(\"your Arvados token\", \"example.arvadosapi.com\")\n```\n\nOptionally, add `numRetries` parameter to specify number of times to retry failed service requests. Default is 0.\n\n```r\narv <- Arvados$new(\"your Arvados token\", \"example.arvadosapi.com\", numRetries = 3)\n```\n\nThis parameter can be set at any time using `setNumRetries`\n\n```r\narv$setNumRetries(5)\n```\n\n### Working with Arvados projects\n\n##### Create project:\n\n```r\nnewProject <- arv$project_create(name = \"project name\", description = \"project description\", owner_uuid = \"project UUID\", properties = NULL, ensureUniqueName = \"false\")\n```\n\n##### Update project:\n\n```r\nupdatedProject <- arv$project_update(name = \"new project name\", properties = newProperties, uuid = \"projectUUID\")\n```\n\n##### Delete a project:\n\n```r\ndeletedProject <- arv$project_delete(\"uuid\")\n```\n\n#### Find a project:\n\n##### Get a project:\n\n```r\nproject <- arv$project_get(\"uuid\")\n```\n\n##### List projects:\n\n```r\nlist subprojects of a project\nprojects <- arv$project_list(list(list(\"owner_uuid\", \"=\", \"aaaaa-j7d0g-ccccccccccccccc\")))\n\nlist projects which have names beginning with Example\nexamples <- arv$project_list(list(list(\"name\",\"like\",\"Example%\")))\n```\n\n##### List all projects even if the number of items is greater than maximum API limit:\n\n```r\nprojects <- listAll(arv$project_list, list(list(\"name\",\"like\",\"Example%\")))\n```\n\n### Working with collections\n\n#### Create a new collection:\n\n```r\nnewCollection <- arv$collections_create(name = \"collectionTitle\", description = \"collectionDescription\", ownerUUID = \"collectionOwner\", properties = Properties)\n```\n\n#### Update a collection’s metadata:\n\n```r\ncollection <- arv$collections_update(name = \"newCollectionTitle\", description = \"newCollectionDescription\", ownerUUID = \"collectionOwner\", properties = NULL, uuid =  \"collectionUUID\")\n```\n\n#### Delete a collection:\n\n```r\ndeletedCollection <- arv$collections_delete(\"uuid\")\n```\n\n#### Find a collection:\n\n#### Get a collection:\n\n```r\ncollection <- arv$collections_get(\"uuid\")\n```\n\nBe aware that the result from `collections_get` is not a Collection class. The object returned from this method lets you access collection fields like “name” and “description”. The Collection class lets you access the files in the collection for reading and writing, and is described in the next section.\n\n#### List collections:\n\n```r\n# offset of 0 and default limit of 100\ncollectionList <- arv$collections_list(list(list(\"name\", \"like\", \"Test%\")))\n\ncollectionList <- arv$collections_list(list(list(\"name\", \"like\", \"Test%\")), limit = 10, offset = 2)\n\n# count of total number of items (may be more than returned due to paging)\ncollectionList$items_available\n\n# items which match the filter criteria\ncollectionList$items\n```\n\n#### List all collections even if the number of items is greater than maximum API limit:\n\n```r\ncollectionList <- listAll(arv$collections_list, list(list(\"name\", \"like\", \"Test%\")))\n```\n\n### Manipulating collection content\n\n#### Initialize a collection object:\n\n```r\ncollection <- Collection$new(arv, \"uuid\")\n```\n\n#### Get list of files:\n\n```r\nfiles <- collection$getFileListing()\n```\n\n#### Get ArvadosFile or Subcollection from internal tree-like structure:\n\n```r\narvadosFile <- collection$get(\"location/to/my/file.cpp\")\n# or\narvadosSubcollection <- collection$get(\"location/to/my/directory/\")\n```\n\n#### Read a table:\n\n```r\narvadosFile   <- collection$get(\"myinput.txt\")\narvConnection <- arvadosFile$connection(\"r\")\nmytable       <- read.table(arvConnection)\n```\n\n#### Write a table:\n\n```r\narvadosFile   <- collection$create(\"myoutput.txt\")[[1]]\narvConnection <- arvadosFile$connection(\"w\")\nwrite.table(mytable, arvConnection)\narvadosFile$flush()\n```\n\n#### Read a table from a tab delimited file:\n\n```r\narvadosFile   <- collection$get(\"myinput.txt\")\narvConnection <- arvadosFile$connection(\"r\")\nmytable       <- read.delim(arvConnection)\n```\n\n#### Read a gzip compressed R object:\n\n```r\nobj <- readRDS(gzcon(coll$get(\"abc.RDS\")$connection(\"rb\")))\n```\n\n#### Write to existing file (overwrites current content of the file):\n\n```r\narvadosFile <- collection$get(\"location/to/my/file.cpp\")\narvadosFile$write(\"This is new file content\")\n```\n\n#### Read whole file or just a portion of it:\n\n```r\nfileContent <- arvadosFile$read()\nfileContent <- arvadosFile$read(\"text\")\nfileContent <- arvadosFile$read(\"raw\", offset = 1024, length = 512)\n```\n\n#### Read various file types:\n\nChooses file type based on file name extension.  Recognized file extensions: 'txt', 'xlsx', 'csv', 'tsv', 'fasta', 'dat', 'bin', 'rds', 'rdata'.\n\n```r\ncollection <- Collection$new(arv, collectionUUID)\nreadFile <- collection$readArvFile(arvadosFile, istable = 'yes')                    # table\nreadFile <- collection$readArvFile(arvadosFile, istable = 'no')                     # text\nreadFile <- collection$readArvFile(arvadosFile)                                     # xlsx, csv, tsv, rds, rdata\nreadFile <- collection$readArvFile(arvadosFile, fileclass = 'fasta')                # fasta\nreadFile <- collection$readArvFile(arvadosFile, Ncol= 4, Nrow = 32)                 # binary data.frame, only numbers\nreadFile <- collection$readArvFile(arvadosFile, Ncol = 5, Nrow = 150, istable = \"factor\") # binary data.frame with factor or text\n```\n\n#### Get ArvadosFile or Subcollection size:\n\n```r\nsize <- arvadosFile$getSizeInBytes()\n# or\nsize <- arvadosSubcollection$getSizeInBytes()\n```\n\n#### Create new file in a collection (returns a vector of one or more ArvadosFile objects):\n\n```r\ncollection$create(files)\n```\n\n**Example**\n\n```\nmainFile <- collection$create(\"cpp/src/main.cpp\")[[1]]\nfileList <- collection$create(c(\"cpp/src/main.cpp\", \"cpp/src/util.h\"))\n```\n\n#### Delete file from a collection:\n\n```r\ncollection$remove(\"location/to/my/file.cpp\")\n```\n\nYou can remove both Subcollection and ArvadosFile. If subcollection contains more files or folders they will be removed recursively.\n\n> **Note**\n> You can also remove multiple files at once:\n> ```\n> collection$remove(c(\"path/to/my/file.cpp\", \"path/to/other/file.cpp\"))\n> ```\n\n#### Delete file or folder from a Subcollection:\n\n```r\nsubcollection <- collection$get(\"mySubcollection/\")\nsubcollection$remove(\"fileInsideSubcollection.exe\")\nsubcollection$remove(\"folderInsideSubcollection/\")\n```\n\n#### Move or rename a file or folder within a collection (moving between collections is currently not supported):\n\n##### Directly from collection\n\n```r\ncollection$move(\"folder/file.cpp\", \"file.cpp\")\n```\n\n##### Or from file\n\n```r\nfile <- collection$get(\"location/to/my/file.cpp\")\nfile$move(\"newDestination/file.cpp\")\n```\n\n##### Or from subcollection\n\n```r\nsubcollection <- collection$get(\"location/to/folder\")\nsubcollection$move(\"newDestination/folder\")\n```\n\n> **Note**\n> Make sure to include new file name in destination. In second example `file$move(“newDestination/”)` will not work.\n\n#### Copy file or folder within a collection (copying between collections is currently not supported):\n\n##### Directly from collection\n\n```r\ncollection$copy(\"folder/file.cpp\", \"file.cpp\")\n```\n\n##### Or from file\n\n```r\nfile <- collection$get(\"location/to/my/file.cpp\")\nfile$copy(\"destination/file.cpp\")\n```\n\n##### Or from subcollection\n\n```r\nsubcollection <- collection$get(\"location/to/folder\")\nsubcollection$copy(\"destination/folder\")\n```\n\n\n### Help\n\n#### View help page of Arvados classes by puting `?` before class name:\n\n```r\n?Arvados\n?Collection\n?Subcollection\n?ArvadosFile\n```\n\n#### View help page of any method defined in Arvados class by puting `?` before method name:\n\n```r\n?collections_update\n?workflows_get\n```\n\n <!-- Taka konwencja USAGE -->\n\n## Building the ArvadosR package\n\n```\nmake package\n```\n\nThis will create a tarball of the ArvadosR package in the current directory.\n\n <!-- Czy dodawać Documentation / Community / Development and Contributing / Licensing? Ale tylko do części Rowej? Wszystko? Wcale? -->\n\n## Documentation\n\nComplete documentation, including the [User Guide](https://doc.arvados.org/user/index.html), [Installation documentation](https://doc.arvados.org/install/index.html), [Administrator documentation](https://doc.arvados.org/admin/index.html) and\n[API documentation](https://doc.arvados.org/api/index.html) is available at http://doc.arvados.org/\n\n## Community\n\nVisit [Arvados Community and Getting Help](https://doc.arvados.org/user/getting_started/community.html).\n\n## Reporting bugs\n\n[Report an issue on GitHub](https://github.com/arvados/arvados/issues/new)\n\n## Licensing\n\nArvados is Free Software.  See [Arvados Free Software Licenses](https://doc.arvados.org/user/copying/copying.html) for information about the open source licenses used in Arvados.\n"
  },
  {
    "path": "contrib/R-sdk/arvados-v1-discovery.json",
    "content": "{\n  \"auth\": {\n    \"oauth2\": {\n      \"scopes\": {\n        \"https://api.arvados.org/auth/arvados\": {\n          \"description\": \"View and manage objects\"\n        },\n        \"https://api.arvados.org/auth/arvados.readonly\": {\n          \"description\": \"View objects\"\n        }\n      }\n    }\n  },\n  \"basePath\": \"/arvados/v1/\",\n  \"batchPath\": \"batch\",\n  \"description\": \"The API to interact with Arvados.\",\n  \"discoveryVersion\": \"v1\",\n  \"documentationLink\": \"http://doc.arvados.org/api/index.html\",\n  \"id\": \"arvados:v1\",\n  \"kind\": \"discovery#restDescription\",\n  \"name\": \"arvados\",\n  \"parameters\": {\n    \"alt\": {\n      \"type\": \"string\",\n      \"description\": \"Data format for the response.\",\n      \"default\": \"json\",\n      \"enum\": [\n        \"json\"\n      ],\n      \"enumDescriptions\": [\n        \"Responses with Content-Type of application/json\"\n      ],\n      \"location\": \"query\"\n    },\n    \"fields\": {\n      \"type\": \"string\",\n      \"description\": \"Selector specifying which fields to include in a partial response.\",\n      \"location\": \"query\"\n    },\n    \"key\": {\n      \"type\": \"string\",\n      \"description\": \"API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.\",\n      \"location\": \"query\"\n    },\n    \"oauth_token\": {\n      \"type\": \"string\",\n      \"description\": \"OAuth 2.0 token for the current user.\",\n      \"location\": \"query\"\n    }\n  },\n  \"protocol\": \"rest\",\n  \"resources\": {\n    \"api_client_authorizations\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.api_client_authorizations.get\",\n          \"path\": \"api_client_authorizations/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a ApiClientAuthorization record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ApiClientAuthorization to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.api_client_authorizations.list\",\n          \"path\": \"api_client_authorizations\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a ApiClientAuthorizationList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorizationList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.api_client_authorizations.create\",\n          \"path\": \"api_client_authorizations\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new ApiClientAuthorization.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"api_client_authorization\": {\n                \"$ref\": \"ApiClientAuthorization\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.api_client_authorizations.update\",\n          \"path\": \"api_client_authorizations/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing ApiClientAuthorization.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ApiClientAuthorization to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"api_client_authorization\": {\n                \"$ref\": \"ApiClientAuthorization\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.api_client_authorizations.delete\",\n          \"path\": \"api_client_authorizations/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing ApiClientAuthorization.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ApiClientAuthorization to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"create_system_auth\": {\n          \"id\": \"arvados.api_client_authorizations.create_system_auth\",\n          \"path\": \"api_client_authorizations/create_system_auth\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a token for the system (\\\"root\\\") user.\",\n          \"parameters\": {\n            \"scopes\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"default\": \"[\\\"all\\\"]\",\n              \"description\": \"An array of strings defining the scope of resources this token will be allowed to access. Refer to the [scopes reference][] for details.\\n\\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"current\": {\n          \"id\": \"arvados.api_client_authorizations.current\",\n          \"path\": \"api_client_authorizations/current\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return all metadata for the token used to authorize this request.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"authorized_keys\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.authorized_keys.get\",\n          \"path\": \"authorized_keys/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a AuthorizedKey record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the AuthorizedKey to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"AuthorizedKey\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.authorized_keys.list\",\n          \"path\": \"authorized_keys\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a AuthorizedKeyList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"AuthorizedKeyList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.authorized_keys.create\",\n          \"path\": \"authorized_keys\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new AuthorizedKey.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"authorized_key\": {\n                \"$ref\": \"AuthorizedKey\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"AuthorizedKey\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.authorized_keys.update\",\n          \"path\": \"authorized_keys/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing AuthorizedKey.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the AuthorizedKey to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"authorized_key\": {\n                \"$ref\": \"AuthorizedKey\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"AuthorizedKey\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.authorized_keys.delete\",\n          \"path\": \"authorized_keys/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing AuthorizedKey.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the AuthorizedKey to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"AuthorizedKey\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"collections\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.collections.get\",\n          \"path\": \"collections/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Collection record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Show collection even if its `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.collections.list\",\n          \"path\": \"collections\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a CollectionList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include collections whose `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            },\n            \"include_old_versions\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include past collection versions.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"CollectionList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.collections.create\",\n          \"path\": \"collections\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Collection.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"replace_files\": {\n              \"type\": \"object\",\n              \"description\": \"Add, delete, and replace files and directories with new content\\nand/or content from other collections. Refer to the\\n[replace_files reference][] for details.\\n\\n[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files\\n\\n\",\n              \"required\": false,\n              \"location\": \"query\",\n              \"properties\": {},\n              \"additionalProperties\": {\n                \"type\": \"string\"\n              }\n            },\n            \"replace_segments\": {\n              \"type\": \"object\",\n              \"description\": \"Replace existing block segments in the collection with new segments.\\nRefer to the [replace_segments reference][] for details.\\n\\n[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments\\n\\n\",\n              \"required\": false,\n              \"location\": \"query\",\n              \"properties\": {},\n              \"additionalProperties\": {\n                \"type\": \"string\"\n              }\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"collection\": {\n                \"$ref\": \"Collection\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.collections.update\",\n          \"path\": \"collections/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Collection.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"replace_files\": {\n              \"type\": \"object\",\n              \"description\": \"Add, delete, and replace files and directories with new content\\nand/or content from other collections. Refer to the\\n[replace_files reference][] for details.\\n\\n[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files\\n\\n\",\n              \"required\": false,\n              \"location\": \"query\",\n              \"properties\": {},\n              \"additionalProperties\": {\n                \"type\": \"string\"\n              }\n            },\n            \"replace_segments\": {\n              \"type\": \"object\",\n              \"description\": \"Replace existing block segments in the collection with new segments.\\nRefer to the [replace_segments reference][] for details.\\n\\n[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments\\n\\n\",\n              \"required\": false,\n              \"location\": \"query\",\n              \"properties\": {},\n              \"additionalProperties\": {\n                \"type\": \"string\"\n              }\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"collection\": {\n                \"$ref\": \"Collection\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.collections.delete\",\n          \"path\": \"collections/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Collection.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"provenance\": {\n          \"id\": \"arvados.collections.provenance\",\n          \"path\": \"collections/{uuid}/provenance\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Detail the provenance of a given collection.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"used_by\": {\n          \"id\": \"arvados.collections.used_by\",\n          \"path\": \"collections/{uuid}/used_by\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Detail where a given collection has been used.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"trash\": {\n          \"id\": \"arvados.collections.trash\",\n          \"path\": \"collections/{uuid}/trash\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Trash a collection.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"untrash\": {\n          \"id\": \"arvados.collections.untrash\",\n          \"path\": \"collections/{uuid}/untrash\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Untrash a collection.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"computed_permissions\": {\n      \"methods\": {\n        \"list\": {\n          \"id\": \"arvados.computed_permissions.list\",\n          \"path\": \"computed_permissions\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a ComputedPermissionList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ComputedPermissionList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        }\n      }\n    },\n    \"containers\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.containers.get\",\n          \"path\": \"containers/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Container record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.containers.list\",\n          \"path\": \"containers\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a ContainerList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.containers.create\",\n          \"path\": \"containers\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Container.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"container\": {\n                \"$ref\": \"Container\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.containers.update\",\n          \"path\": \"containers/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Container.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"container\": {\n                \"$ref\": \"Container\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.containers.delete\",\n          \"path\": \"containers/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Container.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"auth\": {\n          \"id\": \"arvados.containers.auth\",\n          \"path\": \"containers/{uuid}/auth\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get the API client authorization token associated with this container.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"lock\": {\n          \"id\": \"arvados.containers.lock\",\n          \"path\": \"containers/{uuid}/lock\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Lock a container (for a dispatcher to begin running it).\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"unlock\": {\n          \"id\": \"arvados.containers.unlock\",\n          \"path\": \"containers/{uuid}/unlock\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Unlock a container (for a dispatcher to stop running it).\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update_priority\": {\n          \"id\": \"arvados.containers.update_priority\",\n          \"path\": \"containers/{uuid}/update_priority\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Recalculate and return the priority of a given container.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"secret_mounts\": {\n          \"id\": \"arvados.containers.secret_mounts\",\n          \"path\": \"containers/{uuid}/secret_mounts\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return secret mount information for the container associated with the API token authorizing this request.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"current\": {\n          \"id\": \"arvados.containers.current\",\n          \"path\": \"containers/current\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return the container record associated with the API token authorizing this request.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"container_requests\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.container_requests.get\",\n          \"path\": \"container_requests/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a ContainerRequest record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ContainerRequest to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Show container request even if its owner project is trashed.\",\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"ContainerRequest\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.container_requests.list\",\n          \"path\": \"container_requests\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a ContainerRequestList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include container requests whose owner project is trashed.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerRequestList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.container_requests.create\",\n          \"path\": \"container_requests\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new ContainerRequest.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"container_request\": {\n                \"$ref\": \"ContainerRequest\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerRequest\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.container_requests.update\",\n          \"path\": \"container_requests/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing ContainerRequest.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ContainerRequest to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"container_request\": {\n                \"$ref\": \"ContainerRequest\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerRequest\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.container_requests.delete\",\n          \"path\": \"container_requests/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing ContainerRequest.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ContainerRequest to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerRequest\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"container_status\": {\n          \"id\": \"arvados.container_requests.container_status\",\n          \"path\": \"container_requests/{uuid}/container_status\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return scheduling details for a container request.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"required\": true,\n              \"description\": \"The UUID of the container request to query.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerRequest\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"credentials\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.credentials.get\",\n          \"path\": \"credentials/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Credential record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Credential to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Credential\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.credentials.list\",\n          \"path\": \"credentials\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a CredentialList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"CredentialList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.credentials.create\",\n          \"path\": \"credentials\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Credential.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"credential\": {\n                \"$ref\": \"Credential\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Credential\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.credentials.update\",\n          \"path\": \"credentials/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Credential.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Credential to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"credential\": {\n                \"$ref\": \"Credential\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Credential\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.credentials.delete\",\n          \"path\": \"credentials/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Credential.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Credential to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Credential\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"secret\": {\n          \"id\": \"arvados.credentials.secret\",\n          \"path\": \"credentials/{uuid}/secret\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Fetch the secret part of the credential (can only be invoked by running containers).\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Credential to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Credential\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"groups\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.groups.get\",\n          \"path\": \"groups/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Group record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Group to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Return group/project even if its `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.groups.list\",\n          \"path\": \"groups\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a GroupList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include items whose `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"GroupList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.groups.create\",\n          \"path\": \"groups\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Group.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"async\": {\n              \"required\": false,\n              \"type\": \"boolean\",\n              \"location\": \"query\",\n              \"default\": \"false\",\n              \"description\": \"If true, cluster permission will not be updated immediately, but instead at the next configured update interval.\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"group\": {\n                \"$ref\": \"Group\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.groups.update\",\n          \"path\": \"groups/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Group.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Group to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"async\": {\n              \"required\": false,\n              \"type\": \"boolean\",\n              \"location\": \"query\",\n              \"default\": \"false\",\n              \"description\": \"If true, cluster permission will not be updated immediately, but instead at the next configured update interval.\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"group\": {\n                \"$ref\": \"Group\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.groups.delete\",\n          \"path\": \"groups/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Group.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Group to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"contents\": {\n          \"id\": \"arvados.groups.contents\",\n          \"path\": \"groups/contents\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List objects that belong to a group.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include items whose `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            },\n            \"uuid\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"\",\n              \"description\": \"If given, limit the listing to objects owned by the\\nuser or group with this UUID.\",\n              \"location\": \"query\"\n            },\n            \"recursive\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, include contents from child groups recursively.\",\n              \"location\": \"query\"\n            },\n            \"include\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of referenced objects to include in the `included` field of the response. Supported values in the array are:\\n\\n  * `\\\"container_uuid\\\"`\\n  * `\\\"owner_uuid\\\"`\\n  * `\\\"collection_uuid\\\"`\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"include_old_versions\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, include past versions of collections in the listing.\",\n              \"location\": \"query\"\n            },\n            \"exclude_home_project\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, exclude contents of the user's home project from the listing.\\nCalling this method with this flag set is how clients enumerate objects shared\\nwith the current user.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"shared\": {\n          \"id\": \"arvados.groups.shared\",\n          \"path\": \"groups/shared\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List groups that the current user can access via permission links.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include items whose `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            },\n            \"include\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"A string naming referenced objects to include in the `included` field of the response. Supported values are:\\n\\n  * `\\\"owner_uuid\\\"`\\n\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"trash\": {\n          \"id\": \"arvados.groups.trash\",\n          \"path\": \"groups/{uuid}/trash\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Trash a group.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Group to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"untrash\": {\n          \"id\": \"arvados.groups.untrash\",\n          \"path\": \"groups/{uuid}/untrash\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Untrash a group.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Group to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"keep_services\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.keep_services.get\",\n          \"path\": \"keep_services/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a KeepService record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the KeepService to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"KeepService\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.keep_services.list\",\n          \"path\": \"keep_services\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a KeepServiceList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"KeepServiceList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.keep_services.create\",\n          \"path\": \"keep_services\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new KeepService.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"keep_service\": {\n                \"$ref\": \"KeepService\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"KeepService\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.keep_services.update\",\n          \"path\": \"keep_services/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing KeepService.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the KeepService to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"keep_service\": {\n                \"$ref\": \"KeepService\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"KeepService\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.keep_services.delete\",\n          \"path\": \"keep_services/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing KeepService.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the KeepService to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"KeepService\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"accessible\": {\n          \"id\": \"arvados.keep_services.accessible\",\n          \"path\": \"keep_services/accessible\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List Keep services that the current client can access.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"KeepService\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"links\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.links.get\",\n          \"path\": \"links/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Link record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Link to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Link\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.links.list\",\n          \"path\": \"links\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a LinkList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"LinkList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.links.create\",\n          \"path\": \"links\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Link.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"link\": {\n                \"$ref\": \"Link\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Link\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.links.update\",\n          \"path\": \"links/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Link.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Link to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"link\": {\n                \"$ref\": \"Link\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Link\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.links.delete\",\n          \"path\": \"links/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Link.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Link to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Link\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"get_permissions\": {\n          \"id\": \"arvados.links.get_permissions\",\n          \"path\": \"permissions/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List permissions granted on an Arvados object.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Link to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Link\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"logs\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.logs.get\",\n          \"path\": \"logs/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Log record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Log to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Log\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.logs.list\",\n          \"path\": \"logs\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a LogList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"LogList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.logs.create\",\n          \"path\": \"logs\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Log.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"log\": {\n                \"$ref\": \"Log\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Log\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.logs.update\",\n          \"path\": \"logs/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Log.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Log to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"log\": {\n                \"$ref\": \"Log\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Log\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.logs.delete\",\n          \"path\": \"logs/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Log.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Log to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Log\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"users\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.users.get\",\n          \"path\": \"users/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a User record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the User to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.users.list\",\n          \"path\": \"users\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a UserList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"UserList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.users.create\",\n          \"path\": \"users\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new User.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"user\": {\n                \"$ref\": \"User\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.users.update\",\n          \"path\": \"users/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing User.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the User to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not try to update the user on any other clusters in the federation,\\nonly the cluster that received the request.\\nYou must be an administrator to use this flag.\",\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"user\": {\n                \"$ref\": \"User\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.users.delete\",\n          \"path\": \"users/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing User.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the User to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"current\": {\n          \"id\": \"arvados.users.current\",\n          \"path\": \"users/current\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return the user record associated with the API token authorizing this request.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"system\": {\n          \"id\": \"arvados.users.system\",\n          \"path\": \"users/system\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return this cluster's system (\\\"root\\\") user record.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"activate\": {\n          \"id\": \"arvados.users.activate\",\n          \"path\": \"users/{uuid}/activate\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Set the `is_active` flag on a user record.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the User to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"setup\": {\n          \"id\": \"arvados.users.setup\",\n          \"path\": \"users/setup\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Convenience method to \\\"fully\\\" set up a user record with a virtual machine login and notification email.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"UUID of an existing user record to set up.\",\n              \"location\": \"query\"\n            },\n            \"user\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"Attributes of a new user record to set up.\",\n              \"location\": \"query\"\n            },\n            \"repo_name\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"This parameter is obsolete and ignored.\",\n              \"location\": \"query\"\n            },\n            \"vm_uuid\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"If given, setup creates a login link to allow this user to access the Arvados virtual machine with this UUID.\",\n              \"location\": \"query\"\n            },\n            \"send_notification_email\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, send an email to the user notifying them they can now access this Arvados cluster.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"unsetup\": {\n          \"id\": \"arvados.users.unsetup\",\n          \"path\": \"users/{uuid}/unsetup\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Unset a user's active flag and delete associated records.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the User to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"merge\": {\n          \"id\": \"arvados.users.merge\",\n          \"path\": \"users/merge\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Transfer ownership of one user's data to another.\",\n          \"parameters\": {\n            \"new_owner_uuid\": {\n              \"type\": \"string\",\n              \"required\": true,\n              \"description\": \"UUID of the user or group that will take ownership of data owned by the old user.\",\n              \"location\": \"query\"\n            },\n            \"new_user_token\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"Valid API token for the user receiving ownership. If you use this option, it takes ownership of data owned by the user making the request.\",\n              \"location\": \"query\"\n            },\n            \"redirect_to_new_user\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, authorization attempts for the old user will be redirected to the new user.\",\n              \"location\": \"query\"\n            },\n            \"old_user_uuid\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"UUID of the user whose ownership is being transferred to `new_owner_uuid`. You must be an admin to use this option.\",\n              \"location\": \"query\"\n            },\n            \"new_user_uuid\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"UUID of the user receiving ownership. You must be an admin to use this option.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"user_agreements\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.user_agreements.get\",\n          \"path\": \"user_agreements/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a UserAgreement record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the UserAgreement to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.user_agreements.list\",\n          \"path\": \"user_agreements\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a UserAgreementList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"UserAgreementList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.user_agreements.create\",\n          \"path\": \"user_agreements\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new UserAgreement.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"user_agreement\": {\n                \"$ref\": \"UserAgreement\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.user_agreements.update\",\n          \"path\": \"user_agreements/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing UserAgreement.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the UserAgreement to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"user_agreement\": {\n                \"$ref\": \"UserAgreement\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.user_agreements.delete\",\n          \"path\": \"user_agreements/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing UserAgreement.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the UserAgreement to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"signatures\": {\n          \"id\": \"arvados.user_agreements.signatures\",\n          \"path\": \"user_agreements/signatures\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List all user agreement signature links from a user.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"sign\": {\n          \"id\": \"arvados.user_agreements.sign\",\n          \"path\": \"user_agreements/sign\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a signature link from the current user for a given user agreement.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"virtual_machines\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.virtual_machines.get\",\n          \"path\": \"virtual_machines/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a VirtualMachine record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the VirtualMachine to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.virtual_machines.list\",\n          \"path\": \"virtual_machines\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a VirtualMachineList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"VirtualMachineList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.virtual_machines.create\",\n          \"path\": \"virtual_machines\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new VirtualMachine.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"virtual_machine\": {\n                \"$ref\": \"VirtualMachine\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.virtual_machines.update\",\n          \"path\": \"virtual_machines/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing VirtualMachine.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the VirtualMachine to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"virtual_machine\": {\n                \"$ref\": \"VirtualMachine\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.virtual_machines.delete\",\n          \"path\": \"virtual_machines/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing VirtualMachine.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the VirtualMachine to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"logins\": {\n          \"id\": \"arvados.virtual_machines.logins\",\n          \"path\": \"virtual_machines/{uuid}/logins\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List login permission links for a given virtual machine.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the VirtualMachine to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"get_all_logins\": {\n          \"id\": \"arvados.virtual_machines.get_all_logins\",\n          \"path\": \"virtual_machines/get_all_logins\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List login permission links for all virtual machines.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"workflows\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.workflows.get\",\n          \"path\": \"workflows/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Workflow record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Workflow to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Workflow\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.workflows.list\",\n          \"path\": \"workflows\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a WorkflowList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"WorkflowList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.workflows.create\",\n          \"path\": \"workflows\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Workflow.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"workflow\": {\n                \"$ref\": \"Workflow\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Workflow\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.workflows.update\",\n          \"path\": \"workflows/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Workflow.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Workflow to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"workflow\": {\n                \"$ref\": \"Workflow\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Workflow\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.workflows.delete\",\n          \"path\": \"workflows/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Workflow.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Workflow to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Workflow\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"configs\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.configs.get\",\n          \"path\": \"config\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get this cluster's public configuration settings.\",\n          \"parameters\": {},\n          \"parameterOrder\": [],\n          \"response\": {},\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        }\n      }\n    },\n    \"vocabularies\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.vocabularies.get\",\n          \"path\": \"vocabulary\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get this cluster's configured vocabulary definition.\\n\\nRefer to [metadata vocabulary documentation][] for details.\\n\\n[metadata vocabulary documentation]: https://doc.arvados.org/admin/metadata-vocabulary.html\\n\\n\",\n          \"parameters\": {},\n          \"parameterOrder\": [],\n          \"response\": {},\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        }\n      }\n    },\n    \"sys\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.sys.trash_sweep\",\n          \"path\": \"sys/trash_sweep\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Run scheduled data trash and sweep operations across this cluster's Keep services.\",\n          \"parameters\": {},\n          \"parameterOrder\": [],\n          \"response\": {},\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        }\n      }\n    }\n  },\n  \"revision\": \"20250402\",\n  \"schemas\": {\n    \"ApiClientAuthorizationList\": {\n      \"id\": \"ApiClientAuthorizationList\",\n      \"description\": \"A list of ApiClientAuthorization objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#apiClientAuthorizationList.\",\n          \"default\": \"arvados#apiClientAuthorizationList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching ApiClientAuthorization objects.\",\n          \"items\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          }\n        }\n      }\n    },\n    \"ApiClientAuthorization\": {\n      \"id\": \"ApiClientAuthorization\",\n      \"description\": \"Arvados API client authorization token\\n\\nThis resource represents an API token a user may use to authenticate an\\nArvados API request.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"gj3su\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"api_token\": {\n          \"description\": \"The secret token that can be used to authorize Arvados API requests.\",\n          \"type\": \"string\"\n        },\n        \"created_by_ip_address\": {\n          \"description\": \"The IP address of the client that created this token.\",\n          \"type\": \"string\"\n        },\n        \"last_used_by_ip_address\": {\n          \"description\": \"The IP address of the client that last used this token.\",\n          \"type\": \"string\"\n        },\n        \"last_used_at\": {\n          \"description\": \"The last time this token was used to authorize a request. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"expires_at\": {\n          \"description\": \"The time after which this token is no longer valid for authorization. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this API client authorization was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"scopes\": {\n          \"description\": \"An array of strings identifying HTTP methods and API paths this token is\\nauthorized to use. Refer to the [scopes reference][] for details.\\n\\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\\n\\n\",\n          \"type\": \"Array\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This API client authorization's Arvados UUID, like `zzzzz-gj3su-12345abcde67890`.\"\n        }\n      }\n    },\n    \"AuthorizedKeyList\": {\n      \"id\": \"AuthorizedKeyList\",\n      \"description\": \"A list of AuthorizedKey objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#authorizedKeyList.\",\n          \"default\": \"arvados#authorizedKeyList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching AuthorizedKey objects.\",\n          \"items\": {\n            \"$ref\": \"AuthorizedKey\"\n          }\n        }\n      }\n    },\n    \"AuthorizedKey\": {\n      \"id\": \"AuthorizedKey\",\n      \"description\": \"Arvados authorized public key\\n\\nThis resource represents a public key a user may use to authenticate themselves\\nto services on the cluster. Its primary use today is to store SSH keys for\\nvirtual machines (\\\"shell nodes\\\"). It may be extended to store other keys in\\nthe future.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"fngyi\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This authorized key's Arvados UUID, like `zzzzz-fngyi-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this authorized key.\",\n          \"type\": \"string\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this authorized key.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this authorized key was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"name\": {\n          \"description\": \"The name of this authorized key assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"key_type\": {\n          \"description\": \"A string identifying what type of service uses this key. Supported values are:\\n\\n  * `\\\"SSH\\\"`\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"authorized_user_uuid\": {\n          \"description\": \"The UUID of the Arvados user that is authorized by this key.\",\n          \"type\": \"string\"\n        },\n        \"public_key\": {\n          \"description\": \"The full public key, in the format referenced by `key_type`.\",\n          \"type\": \"text\"\n        },\n        \"expires_at\": {\n          \"description\": \"The time after which this key is no longer valid for authorization. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this authorized key was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        }\n      }\n    },\n    \"CollectionList\": {\n      \"id\": \"CollectionList\",\n      \"description\": \"A list of Collection objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#collectionList.\",\n          \"default\": \"arvados#collectionList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Collection objects.\",\n          \"items\": {\n            \"$ref\": \"Collection\"\n          }\n        }\n      }\n    },\n    \"Collection\": {\n      \"id\": \"Collection\",\n      \"description\": \"Arvados data collection\\n\\nA collection describes how a set of files is stored in data blocks in Keep,\\nalong with associated metadata.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"4zz18\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this collection.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this collection was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this collection.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this collection was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"portable_data_hash\": {\n          \"description\": \"The portable data hash of this collection. This string provides a unique\\nand stable reference to these contents.\",\n          \"type\": \"string\"\n        },\n        \"replication_desired\": {\n          \"description\": \"The number of copies that should be made for data in this collection.\",\n          \"type\": \"integer\"\n        },\n        \"replication_confirmed_at\": {\n          \"description\": \"The last time the cluster confirmed that it met `replication_confirmed`\\nfor this collection. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"replication_confirmed\": {\n          \"description\": \"The number of copies of data in this collection that the cluster has confirmed\\nexist in storage.\",\n          \"type\": \"integer\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This collection's Arvados UUID, like `zzzzz-4zz18-12345abcde67890`.\"\n        },\n        \"manifest_text\": {\n          \"description\": \"The manifest text that describes how files are constructed from data blocks\\nin this collection. Refer to the [manifest format][] reference for details.\\n\\n[manifest format]: https://doc.arvados.org/architecture/manifest-format.html\\n\\n\",\n          \"type\": \"text\"\n        },\n        \"name\": {\n          \"description\": \"The name of this collection assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this collection assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"string\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this collection.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"delete_at\": {\n          \"description\": \"The time this collection will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"trash_at\": {\n          \"description\": \"The time this collection will be trashed. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"is_trashed\": {\n          \"description\": \"A boolean flag to indicate whether or not this collection is trashed.\",\n          \"type\": \"boolean\"\n        },\n        \"storage_classes_desired\": {\n          \"description\": \"An array of strings identifying the storage class(es) that should be used\\nfor data in this collection. Storage classes are configured by the cluster administrator.\",\n          \"type\": \"Array\"\n        },\n        \"storage_classes_confirmed\": {\n          \"description\": \"An array of strings identifying the storage class(es) the cluster has\\nconfirmed have a copy of this collection's data.\",\n          \"type\": \"Array\"\n        },\n        \"storage_classes_confirmed_at\": {\n          \"description\": \"The last time the cluster confirmed that data was stored on the storage\\nclass(es) in `storage_classes_confirmed`. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"current_version_uuid\": {\n          \"description\": \"The UUID of the current version of this collection.\",\n          \"type\": \"string\"\n        },\n        \"version\": {\n          \"description\": \"An integer that counts which version of a collection this record\\nrepresents. Refer to [collection versioning][] for details. This attribute is\\nread-only.\\n\\n[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html\\n\\n\",\n          \"type\": \"integer\"\n        },\n        \"preserve_version\": {\n          \"description\": \"A boolean flag to indicate whether this specific version of this collection\\nshould be persisted in cluster storage.\",\n          \"type\": \"boolean\"\n        },\n        \"file_count\": {\n          \"description\": \"The number of files represented in this collection's `manifest_text`.\\nThis attribute is read-only.\",\n          \"type\": \"integer\"\n        },\n        \"file_size_total\": {\n          \"description\": \"The total size in bytes of files represented in this collection's `manifest_text`.\\nThis attribute is read-only.\",\n          \"type\": \"integer\"\n        }\n      }\n    },\n    \"ComputedPermissionList\": {\n      \"id\": \"ComputedPermissionList\",\n      \"description\": \"A list of ComputedPermission objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#computedPermissionList.\",\n          \"default\": \"arvados#computedPermissionList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching ComputedPermission objects.\",\n          \"items\": {\n            \"$ref\": \"ComputedPermission\"\n          }\n        }\n      }\n    },\n    \"ComputedPermission\": {\n      \"id\": \"ComputedPermission\",\n      \"description\": \"Arvados computed permission\\n\\nComputed permissions do not correspond directly to any Arvados resource, but\\nprovide a simple way to query the entire graph of permissions granted to\\nusers and groups.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"user_uuid\": {\n          \"description\": \"The UUID of the Arvados user who has this permission.\",\n          \"type\": \"string\"\n        },\n        \"target_uuid\": {\n          \"description\": \"The UUID of the Arvados object the user has access to.\",\n          \"type\": \"string\"\n        },\n        \"perm_level\": {\n          \"description\": \"A string representing the user's level of access to the target object.\\nPossible values are:\\n\\n  * `\\\"can_read\\\"`\\n  * `\\\"can_write\\\"`\\n  * `\\\"can_manage\\\"`\\n\\n\",\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"ContainerList\": {\n      \"id\": \"ContainerList\",\n      \"description\": \"A list of Container objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#containerList.\",\n          \"default\": \"arvados#containerList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Container objects.\",\n          \"items\": {\n            \"$ref\": \"Container\"\n          }\n        }\n      }\n    },\n    \"Container\": {\n      \"id\": \"Container\",\n      \"description\": \"Arvados container record\\n\\nA container represents compute work that has been or should be dispatched,\\nalong with its results. A container can satisfy one or more container requests.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"dz642\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This container's Arvados UUID, like `zzzzz-dz642-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this container.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this container was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this container was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this container.\",\n          \"type\": \"string\"\n        },\n        \"state\": {\n          \"description\": \"A string representing the container's current execution status. Possible\\nvalues are:\\n\\n  * `\\\"Queued\\\"` --- This container has not been dispatched yet.\\n  * `\\\"Locked\\\"` --- A dispatcher has claimed this container in preparation to run it.\\n  * `\\\"Running\\\"` --- A dispatcher is running this container.\\n  * `\\\"Cancelled\\\"` --- Container execution has been cancelled by user request.\\n  * `\\\"Complete\\\"` --- A dispatcher ran this container to completion and recorded the results.\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"started_at\": {\n          \"description\": \" The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"finished_at\": {\n          \"description\": \" The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"log\": {\n          \"description\": \"The portable data hash of the Arvados collection that contains this\\ncontainer's logs.\",\n          \"type\": \"string\"\n        },\n        \"environment\": {\n          \"description\": \"A hash of string keys and values that defines the environment variables\\nfor the dispatcher to set when it executes this container.\",\n          \"type\": \"Hash\"\n        },\n        \"cwd\": {\n          \"description\": \"A string that the defines the working directory that the dispatcher should\\nuse when it executes the command inside this container.\",\n          \"type\": \"string\"\n        },\n        \"command\": {\n          \"description\": \"An array of strings that defines the command that the dispatcher should\\nexecute inside this container.\",\n          \"type\": \"Array\"\n        },\n        \"output_path\": {\n          \"description\": \"A string that defines the file or directory path where the command\\nwrites output that should be saved from this container.\",\n          \"type\": \"string\"\n        },\n        \"mounts\": {\n          \"description\": \"A hash where each key names a directory inside this container, and its\\nvalue is an object that defines the mount source for that directory. Refer\\nto the [mount types reference][] for details.\\n\\n[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"runtime_constraints\": {\n          \"description\": \"A hash that identifies compute resources this container requires to run\\nsuccessfully. See the [runtime constraints reference][] for details.\\n\\n[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"output\": {\n          \"description\": \"The portable data hash of the Arvados collection that contains this\\ncontainer's output file(s).\",\n          \"type\": \"string\"\n        },\n        \"container_image\": {\n          \"description\": \"The portable data hash of the Arvados collection that contains the image\\nto use for this container.\",\n          \"type\": \"string\"\n        },\n        \"progress\": {\n          \"description\": \"A float between 0.0 and 1.0 (inclusive) that represents the container's\\nexecution progress. This attribute is not implemented yet.\",\n          \"type\": \"float\"\n        },\n        \"priority\": {\n          \"description\": \"An integer between 0 and 1000 (inclusive) that represents this container's\\nscheduling priority. 0 represents a request to be cancelled. Higher\\nvalues represent higher priority. Refer to the [priority reference][] for details.\\n\\n[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority\\n\\n\",\n          \"type\": \"integer\"\n        },\n        \"exit_code\": {\n          \"description\": \"An integer that records the Unix exit code of the `command` from a\\nfinished container.\",\n          \"type\": \"integer\"\n        },\n        \"auth_uuid\": {\n          \"description\": \"The UUID of the Arvados API client authorization token that a dispatcher\\nshould use to set up this container. This token is automatically created by\\nArvados and this attribute automatically assigned unless a container is\\ncreated with `runtime_token`.\",\n          \"type\": \"string\"\n        },\n        \"locked_by_uuid\": {\n          \"description\": \"The UUID of the Arvados API client authorization token that successfully\\nlocked this container in preparation to execute it.\",\n          \"type\": \"string\"\n        },\n        \"scheduling_parameters\": {\n          \"description\": \"A hash of scheduling parameters that should be passed to the underlying\\ndispatcher when this container is run.\\nSee the [scheduling parameters reference][] for details.\\n\\n[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"runtime_status\": {\n          \"description\": \"A hash with status updates from a running container.\\nRefer to the [runtime status reference][] for details.\\n\\n[runtime status reference]: https://doc.arvados.org/api/methods/containers.html#runtime_status\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"runtime_user_uuid\": {\n          \"description\": \"The UUID of the Arvados user associated with the API client authorization\\ntoken used to run this container.\",\n          \"type\": \"text\"\n        },\n        \"runtime_auth_scopes\": {\n          \"description\": \"The `scopes` from the API client authorization token used to run this container.\",\n          \"type\": \"Array\"\n        },\n        \"lock_count\": {\n          \"description\": \"The number of times this container has been locked by a dispatcher. This\\nmay be greater than 1 if a dispatcher locks a container but then execution is\\ninterrupted for any reason.\",\n          \"type\": \"integer\"\n        },\n        \"gateway_address\": {\n          \"description\": \"A string with the address of the Arvados gateway server, in `HOST:PORT`\\nformat. This is for internal use only.\",\n          \"type\": \"string\"\n        },\n        \"interactive_session_started\": {\n          \"description\": \"This flag is set true if any user starts an interactive shell inside the\\nrunning container.\",\n          \"type\": \"boolean\"\n        },\n        \"output_storage_classes\": {\n          \"description\": \"An array of strings identifying the storage class(es) that should be set\\non the output collection of this container. Storage classes are configured by\\nthe cluster administrator.\",\n          \"type\": \"Array\"\n        },\n        \"output_properties\": {\n          \"description\": \"A hash of arbitrary metadata to set on the output collection of this container.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"cost\": {\n          \"description\": \"A float with the estimated cost of the cloud instance used to run this\\ncontainer. The value is `0` if cost estimation is not available on this cluster.\",\n          \"type\": \"float\"\n        },\n        \"subrequests_cost\": {\n          \"description\": \"A float with the estimated cost of all cloud instances used to run this\\ncontainer and all its subrequests. The value is `0` if cost estimation is not\\navailable on this cluster.\",\n          \"type\": \"float\"\n        },\n        \"output_glob\": {\n          \"description\": \"An array of strings of shell-style glob patterns that define which file(s)\\nand subdirectory(ies) under the `output_path` directory should be recorded in\\nthe container's final output. Refer to the [glob patterns reference][] for details.\\n\\n[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns\\n\\n\",\n          \"type\": \"Array\"\n        },\n        \"service\": {\n          \"description\": \"A boolean flag. If set, it informs the system that this is a long-running container\\nthat functions as a system service or web app, rather than a once-through batch operation.\",\n          \"type\": \"boolean\"\n        },\n        \"published_ports\": {\n          \"description\": \"A hash where keys are numeric TCP ports on the container which expose HTTP services.  Arvados\\nwill proxy HTTP requests to these ports.  Values are hashes with the following keys:\\n\\n  * `\\\"access\\\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\\n  * `\\\"label\\\"` --- A human readable label describing the service, for display in Workbench.\\n  * `\\\"initial_path\\\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.\",\n          \"type\": \"jsonb\"\n        }\n      }\n    },\n    \"ContainerRequestList\": {\n      \"id\": \"ContainerRequestList\",\n      \"description\": \"A list of ContainerRequest objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#containerRequestList.\",\n          \"default\": \"arvados#containerRequestList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching ContainerRequest objects.\",\n          \"items\": {\n            \"$ref\": \"ContainerRequest\"\n          }\n        }\n      }\n    },\n    \"ContainerRequest\": {\n      \"id\": \"ContainerRequest\",\n      \"description\": \"Arvados container request\\n\\nA container request represents a user's request that Arvados do some compute\\nwork, along with full details about what work should be done. Arvados will\\nattempt to fulfill the request by mapping it to a matching container record,\\nrunning the work on demand if necessary.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"xvhdp\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This container request's Arvados UUID, like `zzzzz-xvhdp-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this container request.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this container request was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this container request was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this container request.\",\n          \"type\": \"string\"\n        },\n        \"name\": {\n          \"description\": \"The name of this container request assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this container request assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"text\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this container request.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"state\": {\n          \"description\": \"A string indicating where this container request is in its lifecycle.\\nPossible values are:\\n\\n  * `\\\"Uncommitted\\\"` --- The container request has not been finalized and can still be edited.\\n  * `\\\"Committed\\\"` --- The container request is ready to be fulfilled.\\n  * `\\\"Final\\\"` --- The container request has been fulfilled or cancelled.\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"requesting_container_uuid\": {\n          \"description\": \"The UUID of the container that created this container request, if any.\",\n          \"type\": \"string\"\n        },\n        \"container_uuid\": {\n          \"description\": \"The UUID of the container that fulfills this container request, if any.\",\n          \"type\": \"string\"\n        },\n        \"container_count_max\": {\n          \"description\": \"An integer that defines the maximum number of times Arvados should attempt\\nto dispatch a container to fulfill this container request.\",\n          \"type\": \"integer\"\n        },\n        \"mounts\": {\n          \"description\": \"A hash where each key names a directory inside this container, and its\\nvalue is an object that defines the mount source for that directory. Refer\\nto the [mount types reference][] for details.\\n\\n[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"runtime_constraints\": {\n          \"description\": \"A hash that identifies compute resources this container requires to run\\nsuccessfully. See the [runtime constraints reference][] for details.\\n\\n[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"container_image\": {\n          \"description\": \"The portable data hash of the Arvados collection that contains the image\\nto use for this container.\",\n          \"type\": \"string\"\n        },\n        \"environment\": {\n          \"description\": \"A hash of string keys and values that defines the environment variables\\nfor the dispatcher to set when it executes this container.\",\n          \"type\": \"Hash\"\n        },\n        \"cwd\": {\n          \"description\": \"A string that the defines the working directory that the dispatcher should\\nuse when it executes the command inside this container.\",\n          \"type\": \"string\"\n        },\n        \"command\": {\n          \"description\": \"An array of strings that defines the command that the dispatcher should\\nexecute inside this container.\",\n          \"type\": \"Array\"\n        },\n        \"output_path\": {\n          \"description\": \"A string that defines the file or directory path where the command\\nwrites output that should be saved from this container.\",\n          \"type\": \"string\"\n        },\n        \"priority\": {\n          \"description\": \"An integer between 0 and 1000 (inclusive) that represents this container request's\\nscheduling priority. 0 represents a request to be cancelled. Higher\\nvalues represent higher priority. Refer to the [priority reference][] for details.\\n\\n[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority\\n\\n\",\n          \"type\": \"integer\"\n        },\n        \"expires_at\": {\n          \"description\": \"The time after which this container request will no longer be fulfilled. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"filters\": {\n          \"description\": \"Filters that limit which existing containers are eligible to satisfy this\\ncontainer request. This attribute is not implemented yet and should be null.\",\n          \"type\": \"text\"\n        },\n        \"container_count\": {\n          \"description\": \"An integer that records how many times Arvados has attempted to dispatch\\na container to fulfill this container request.\",\n          \"type\": \"integer\"\n        },\n        \"use_existing\": {\n          \"description\": \"A boolean flag. If set, Arvados may choose to satisfy this container\\nrequest with an eligible container that already exists. Otherwise, Arvados will\\nsatisfy this container request with a newer container, which will usually result\\nin the container running again.\",\n          \"type\": \"boolean\"\n        },\n        \"scheduling_parameters\": {\n          \"description\": \"A hash of scheduling parameters that should be passed to the underlying\\ndispatcher when this container is run.\\nSee the [scheduling parameters reference][] for details.\\n\\n[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"output_uuid\": {\n          \"description\": \"The UUID of the Arvados collection that contains output for all the\\ncontainer(s) that were dispatched to fulfill this container request.\",\n          \"type\": \"string\"\n        },\n        \"log_uuid\": {\n          \"description\": \"The UUID of the Arvados collection that contains logs for all the\\ncontainer(s) that were dispatched to fulfill this container request.\",\n          \"type\": \"string\"\n        },\n        \"output_name\": {\n          \"description\": \"The name to set on the output collection of this container request.\",\n          \"type\": \"string\"\n        },\n        \"output_ttl\": {\n          \"description\": \"An integer in seconds. If greater than zero, when an output collection is\\ncreated for this container request, its `expires_at` attribute will be set this\\nfar in the future.\",\n          \"type\": \"integer\"\n        },\n        \"output_storage_classes\": {\n          \"description\": \"An array of strings identifying the storage class(es) that should be set\\non the output collection of this container request. Storage classes are configured by\\nthe cluster administrator.\",\n          \"type\": \"Array\"\n        },\n        \"output_properties\": {\n          \"description\": \"A hash of arbitrary metadata to set on the output collection of this container request.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"cumulative_cost\": {\n          \"description\": \"A float with the estimated cost of all cloud instances used to run\\ncontainer(s) to fulfill this container request and their subrequests.\\nThe value is `0` if cost estimation is not available on this cluster.\",\n          \"type\": \"float\"\n        },\n        \"output_glob\": {\n          \"description\": \"An array of strings of shell-style glob patterns that define which file(s)\\nand subdirectory(ies) under the `output_path` directory should be recorded in\\nthe container's final output. Refer to the [glob patterns reference][] for details.\\n\\n[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns\\n\\n\",\n          \"type\": \"Array\"\n        },\n        \"service\": {\n          \"description\": \"A boolean flag. If set, it informs the system that this request is for a long-running container\\nthat functions as a system service or web app, rather than a once-through batch operation.\",\n          \"type\": \"boolean\"\n        },\n        \"published_ports\": {\n          \"description\": \"A hash where keys are numeric TCP ports on the container which expose HTTP services.  Arvados\\nwill proxy HTTP requests to these ports.  Values are hashes with the following keys:\\n\\n  * `\\\"access\\\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\\n  * `\\\"label\\\"` --- A human readable label describing the service, for display in Workbench.\\n  * `\\\"initial_path\\\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.\",\n          \"type\": \"Hash\"\n        }\n      }\n    },\n    \"CredentialList\": {\n      \"id\": \"CredentialList\",\n      \"description\": \"A list of Credential objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#credentialList.\",\n          \"default\": \"arvados#credentialList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Credential objects.\",\n          \"items\": {\n            \"$ref\": \"Credential\"\n          }\n        }\n      }\n    },\n    \"Credential\": {\n      \"id\": \"Credential\",\n      \"description\": \"Arvados credential.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"oss07\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This credential's Arvados UUID, like `zzzzz-oss07-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this credential.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this credential was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this credential was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this credential.\",\n          \"type\": \"string\"\n        },\n        \"name\": {\n          \"description\": \"The name of this credential assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this credential assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"text\"\n        },\n        \"credential_class\": {\n          \"description\": \"The type of credential being stored.\",\n          \"type\": \"string\"\n        },\n        \"scopes\": {\n          \"description\": \"The resources the credential applies to or should be used with.\",\n          \"type\": \"Array\"\n        },\n        \"external_id\": {\n          \"description\": \"The non-secret external identifier associated with a credential, e.g. a username.\",\n          \"type\": \"string\"\n        },\n        \"expires_at\": {\n          \"description\": \"Date after which the credential_secret field is no longer valid. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        }\n      }\n    },\n    \"GroupList\": {\n      \"id\": \"GroupList\",\n      \"description\": \"A list of Group objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#groupList.\",\n          \"default\": \"arvados#groupList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Group objects.\",\n          \"items\": {\n            \"$ref\": \"Group\"\n          }\n        }\n      }\n    },\n    \"Group\": {\n      \"id\": \"Group\",\n      \"description\": \"Arvados group\\n\\nGroups provide a way to organize users or data together, depending on their\\n`group_class`.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"j7d0g\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This group's Arvados UUID, like `zzzzz-j7d0g-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this group.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this group was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this group.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this group was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"name\": {\n          \"description\": \"The name of this group assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this group assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"string\"\n        },\n        \"group_class\": {\n          \"description\": \"A string representing which type of group this is. One of:\\n\\n  * `\\\"filter\\\"` --- A virtual project whose contents are selected dynamically by filters.\\n  * `\\\"project\\\"` --- An Arvados project that can contain collections,\\n    container records, workflows, and subprojects.\\n  * `\\\"role\\\"` --- A group of users that can be granted permissions in Arvados.\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"trash_at\": {\n          \"description\": \"The time this group will be trashed. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"is_trashed\": {\n          \"description\": \"A boolean flag to indicate whether or not this group is trashed.\",\n          \"type\": \"boolean\"\n        },\n        \"delete_at\": {\n          \"description\": \"The time this group will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this group.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"frozen_by_uuid\": {\n          \"description\": \"The UUID of the user that has frozen this group, if any. Frozen projects\\ncannot have their contents or metadata changed, even by admins.\",\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"KeepServiceList\": {\n      \"id\": \"KeepServiceList\",\n      \"description\": \"A list of KeepService objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#keepServiceList.\",\n          \"default\": \"arvados#keepServiceList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching KeepService objects.\",\n          \"items\": {\n            \"$ref\": \"KeepService\"\n          }\n        }\n      }\n    },\n    \"KeepService\": {\n      \"id\": \"KeepService\",\n      \"description\": \"Arvados Keep service\\n\\nThis resource stores information about a single Keep service in this Arvados\\ncluster that clients can contact to retrieve and store data.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"bi6l4\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This Keep service's Arvados UUID, like `zzzzz-bi6l4-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this Keep service.\",\n          \"type\": \"string\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this Keep service.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this Keep service was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"service_host\": {\n          \"description\": \"The DNS hostname of this Keep service.\",\n          \"type\": \"string\"\n        },\n        \"service_port\": {\n          \"description\": \"The TCP port where this Keep service listens.\",\n          \"type\": \"integer\"\n        },\n        \"service_ssl_flag\": {\n          \"description\": \"A boolean flag that indicates whether or not this Keep service uses TLS/SSL.\",\n          \"type\": \"boolean\"\n        },\n        \"service_type\": {\n          \"description\": \"A string that describes which type of Keep service this is. One of:\\n\\n  * `\\\"disk\\\"` --- A service that stores blocks on a local filesystem.\\n  * `\\\"blob\\\"` --- A service that stores blocks in a cloud object store.\\n  * `\\\"proxy\\\"` --- A keepproxy service.\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this Keep service was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"read_only\": {\n          \"description\": \"A boolean flag. If set, this Keep service does not accept requests to write data\\nblocks; it only serves blocks it already has.\",\n          \"type\": \"boolean\"\n        }\n      }\n    },\n    \"LinkList\": {\n      \"id\": \"LinkList\",\n      \"description\": \"A list of Link objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#linkList.\",\n          \"default\": \"arvados#linkList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Link objects.\",\n          \"items\": {\n            \"$ref\": \"Link\"\n          }\n        }\n      }\n    },\n    \"Link\": {\n      \"id\": \"Link\",\n      \"description\": \"Arvados object link\\n\\nA link provides a way to define relationships between Arvados objects,\\ndepending on their `link_class`.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"o0j2j\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This link's Arvados UUID, like `zzzzz-o0j2j-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this link.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this link was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this link.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this link was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"tail_uuid\": {\n          \"description\": \"The UUID of the Arvados object that is the target of this relationship.\",\n          \"type\": \"string\"\n        },\n        \"link_class\": {\n          \"description\": \"A string that defines which kind of link this is. One of:\\n\\n  * `\\\"permission\\\"` --- This link grants a permission to the user or group\\n    referenced by `head_uuid` to the object referenced by `tail_uuid`. The\\n    access level is set by `name`.\\n  * `\\\"star\\\"` --- This link represents a \\\"favorite.\\\" The user referenced\\n    by `head_uuid` wants quick access to the object referenced by `tail_uuid`.\\n  * `\\\"tag\\\"` --- This link represents an unstructured metadata tag. The object\\n    referenced by `tail_uuid` has the tag defined by `name`.\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"name\": {\n          \"description\": \"The primary value of this link. For `\\\"permission\\\"` links, this is one of\\n`\\\"can_read\\\"`, `\\\"can_write\\\"`, or `\\\"can_manage\\\"`.\",\n          \"type\": \"string\"\n        },\n        \"head_uuid\": {\n          \"description\": \"The UUID of the Arvados object that is the originator or actor in this\\nrelationship. May be null.\",\n          \"type\": \"string\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this link.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        }\n      }\n    },\n    \"LogList\": {\n      \"id\": \"LogList\",\n      \"description\": \"A list of Log objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#logList.\",\n          \"default\": \"arvados#logList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Log objects.\",\n          \"items\": {\n            \"$ref\": \"Log\"\n          }\n        }\n      }\n    },\n    \"Log\": {\n      \"id\": \"Log\",\n      \"description\": \"Arvados log record\\n\\nThis resource represents a single log record about an event in this Arvados\\ncluster. Some individual Arvados services create log records. Users can also\\ncreate custom logs.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"57u5n\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"id\": {\n          \"description\": \"The serial number of this log. You can use this in filters to query logs\\nthat were created before/after another.\",\n          \"type\": \"integer\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This log's Arvados UUID, like `zzzzz-57u5n-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this log.\",\n          \"type\": \"string\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this log.\",\n          \"type\": \"string\"\n        },\n        \"object_uuid\": {\n          \"description\": \"The UUID of the Arvados object that this log pertains to, such as a user\\nor container.\",\n          \"type\": \"string\"\n        },\n        \"event_at\": {\n          \"description\": \" The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"event_type\": {\n          \"description\": \"An arbitrary short string that classifies what type of log this is.\",\n          \"type\": \"string\"\n        },\n        \"summary\": {\n          \"description\": \"A text string that describes the logged event. This is the primary\\nattribute for simple logs.\",\n          \"type\": \"text\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this log.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this log was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this log was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"object_owner_uuid\": {\n          \"description\": \"The `owner_uuid` of the object referenced by `object_uuid` at the time\\nthis log was created.\",\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"UserList\": {\n      \"id\": \"UserList\",\n      \"description\": \"A list of User objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#userList.\",\n          \"default\": \"arvados#userList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching User objects.\",\n          \"items\": {\n            \"$ref\": \"User\"\n          }\n        }\n      }\n    },\n    \"User\": {\n      \"id\": \"User\",\n      \"description\": \"Arvados user\\n\\nA user represents a single individual or role who may be authorized to access\\nthis Arvados cluster.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"tpzed\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This user's Arvados UUID, like `zzzzz-tpzed-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this user.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this user was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this user.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this user was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"email\": {\n          \"description\": \"This user's email address.\",\n          \"type\": \"string\"\n        },\n        \"first_name\": {\n          \"description\": \"This user's first name.\",\n          \"type\": \"string\"\n        },\n        \"last_name\": {\n          \"description\": \"This user's last name.\",\n          \"type\": \"string\"\n        },\n        \"identity_url\": {\n          \"description\": \"A URL that represents this user with the cluster's identity provider.\",\n          \"type\": \"string\"\n        },\n        \"is_admin\": {\n          \"description\": \"A boolean flag. If set, this user is an administrator of the Arvados\\ncluster, and automatically passes most permissions checks.\",\n          \"type\": \"boolean\"\n        },\n        \"prefs\": {\n          \"description\": \"A hash that stores cluster-wide user preferences.\",\n          \"type\": \"Hash\"\n        },\n        \"is_active\": {\n          \"description\": \"A boolean flag. If unset, this user is not permitted to make any Arvados\\nAPI requests.\",\n          \"type\": \"boolean\"\n        },\n        \"username\": {\n          \"description\": \"This user's Unix username on virtual machines.\",\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"UserAgreementList\": {\n      \"id\": \"UserAgreementList\",\n      \"description\": \"A list of UserAgreement objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#userAgreementList.\",\n          \"default\": \"arvados#userAgreementList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching UserAgreement objects.\",\n          \"items\": {\n            \"$ref\": \"UserAgreement\"\n          }\n        }\n      }\n    },\n    \"UserAgreement\": {\n      \"id\": \"UserAgreement\",\n      \"description\": \"Arvados user agreement\\n\\nA user agreement is a collection with terms that users must agree to before\\nthey can use this Arvados cluster.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"gv0sa\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this user agreement.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this user agreement was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this user agreement.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this user agreement was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"portable_data_hash\": {\n          \"description\": \"The portable data hash of this user agreement. This string provides a unique\\nand stable reference to these contents.\",\n          \"type\": \"string\"\n        },\n        \"replication_desired\": {\n          \"description\": \"The number of copies that should be made for data in this user agreement.\",\n          \"type\": \"integer\"\n        },\n        \"replication_confirmed_at\": {\n          \"description\": \"The last time the cluster confirmed that it met `replication_confirmed`\\nfor this user agreement. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"replication_confirmed\": {\n          \"description\": \"The number of copies of data in this user agreement that the cluster has confirmed\\nexist in storage.\",\n          \"type\": \"integer\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This user agreement's Arvados UUID, like `zzzzz-gv0sa-12345abcde67890`.\"\n        },\n        \"manifest_text\": {\n          \"description\": \"The manifest text that describes how files are constructed from data blocks\\nin this user agreement. Refer to the [manifest format][] reference for details.\\n\\n[manifest format]: https://doc.arvados.org/architecture/manifest-format.html\\n\\n\",\n          \"type\": \"text\"\n        },\n        \"name\": {\n          \"description\": \"The name of this user agreement assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this user agreement assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"string\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this user agreement.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"delete_at\": {\n          \"description\": \"The time this user agreement will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"trash_at\": {\n          \"description\": \"The time this user agreement will be trashed. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"is_trashed\": {\n          \"description\": \"A boolean flag to indicate whether or not this user agreement is trashed.\",\n          \"type\": \"boolean\"\n        },\n        \"storage_classes_desired\": {\n          \"description\": \"An array of strings identifying the storage class(es) that should be used\\nfor data in this user agreement. Storage classes are configured by the cluster administrator.\",\n          \"type\": \"Array\"\n        },\n        \"storage_classes_confirmed\": {\n          \"description\": \"An array of strings identifying the storage class(es) the cluster has\\nconfirmed have a copy of this user agreement's data.\",\n          \"type\": \"Array\"\n        },\n        \"storage_classes_confirmed_at\": {\n          \"description\": \"The last time the cluster confirmed that data was stored on the storage\\nclass(es) in `storage_classes_confirmed`. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"current_version_uuid\": {\n          \"description\": \"The UUID of the current version of this user agreement.\",\n          \"type\": \"string\"\n        },\n        \"version\": {\n          \"description\": \"An integer that counts which version of a user agreement this record\\nrepresents. Refer to [collection versioning][] for details. This attribute is\\nread-only.\\n\\n[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html\\n\\n\",\n          \"type\": \"integer\"\n        },\n        \"preserve_version\": {\n          \"description\": \"A boolean flag to indicate whether this specific version of this user agreement\\nshould be persisted in cluster storage.\",\n          \"type\": \"boolean\"\n        },\n        \"file_count\": {\n          \"description\": \"The number of files represented in this user agreement's `manifest_text`.\\nThis attribute is read-only.\",\n          \"type\": \"integer\"\n        },\n        \"file_size_total\": {\n          \"description\": \"The total size in bytes of files represented in this user agreement's `manifest_text`.\\nThis attribute is read-only.\",\n          \"type\": \"integer\"\n        }\n      }\n    },\n    \"VirtualMachineList\": {\n      \"id\": \"VirtualMachineList\",\n      \"description\": \"A list of VirtualMachine objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#virtualMachineList.\",\n          \"default\": \"arvados#virtualMachineList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching VirtualMachine objects.\",\n          \"items\": {\n            \"$ref\": \"VirtualMachine\"\n          }\n        }\n      }\n    },\n    \"VirtualMachine\": {\n      \"id\": \"VirtualMachine\",\n      \"description\": \"Arvados virtual machine (\\\"shell node\\\")\\n\\nThis resource stores information about a virtual machine or \\\"shell node\\\"\\nhosted on this Arvados cluster where users can log in and use preconfigured\\nArvados client tools.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"2x53u\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This virtual machine's Arvados UUID, like `zzzzz-2x53u-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this virtual machine.\",\n          \"type\": \"string\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this virtual machine.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this virtual machine was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"hostname\": {\n          \"description\": \"The DNS hostname where users should access this virtual machine.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this virtual machine was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        }\n      }\n    },\n    \"WorkflowList\": {\n      \"id\": \"WorkflowList\",\n      \"description\": \"A list of Workflow objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#workflowList.\",\n          \"default\": \"arvados#workflowList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Workflow objects.\",\n          \"items\": {\n            \"$ref\": \"Workflow\"\n          }\n        }\n      }\n    },\n    \"Workflow\": {\n      \"id\": \"Workflow\",\n      \"description\": \"Arvados workflow\\n\\nA workflow contains workflow definition source code that Arvados can execute\\nalong with associated metadata for users.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"7fd4e\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This workflow's Arvados UUID, like `zzzzz-7fd4e-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this workflow.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this workflow was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this workflow was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this workflow.\",\n          \"type\": \"string\"\n        },\n        \"name\": {\n          \"description\": \"The name of this workflow assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this workflow assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"text\"\n        },\n        \"definition\": {\n          \"description\": \"A string with the CWL source of this workflow.\",\n          \"type\": \"text\"\n        },\n        \"collection_uuid\": {\n          \"description\": \"The collection this workflow is linked to, containing the definition of the workflow.\",\n          \"type\": \"string\"\n        }\n      }\n    }\n  },\n  \"servicePath\": \"arvados/v1/\",\n  \"title\": \"Arvados API\",\n  \"version\": \"v1\"\n}"
  },
  {
    "path": "contrib/R-sdk/generateApi.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlibrary(jsonlite)\n\ngetAPIDocument <- function(loc)\n{\n    if (length(grep(\"^[a-z]+://\", loc)) > 0) {\n        library(httr)\n        serverResponse <- httr::RETRY(\"GET\", url = loc)\n        httr::content(serverResponse, as = \"parsed\", type = \"application/json\")\n    } else {\n        jsonlite::read_json(loc)\n    }\n}\n\n#' generateAPI\n#'\n#' Autogenerate classes to interact with Arvados from the Arvados discovery document.\n#'\n#' @export\ngenerateAPI <- function(discoveryDocument)\n{\n    methodResources <- discoveryDocument$resources\n    resourceNames   <- names(methodResources)\n\n    classDoc <- genAPIClassDoc(methodResources, resourceNames)\n    arvadosAPIHeader <- genAPIClassHeader()\n    arvadosClassMethods <- genClassContent(methodResources, resourceNames)\n    arvadosProjectMethods <- genProjectMethods(methodResources)\n    arvadosAPIFooter <- genAPIClassFooter()\n\n    arvadosClass <- c(classDoc,\n                      arvadosAPIHeader,\n                      arvadosClassMethods,\n                      arvadosProjectMethods,\n                      arvadosAPIFooter)\n\n    fileConn <- file(\"./R/Arvados.R\", \"w\")\n    writeLines(c(\n    \"# Copyright (C) The Arvados Authors. All rights reserved.\",\n    \"#\",\n    \"# SPDX-License-Identifier: Apache-2.0\",\n    \"\",\n    \"#' Arvados\",\n    \"#'\",\n    \"#' This class implements a full REST client to the Arvados API.\",\n    \"#'\"), fileConn)\n    writeLines(unlist(arvadosClass), fileConn)\n    close(fileConn)\n    NULL\n}\n\ngenAPIClassHeader <- function()\n{\n    c(\"#' @export\",\n      \"Arvados <- R6::R6Class(\",\n      \"\",\n      \"\\t\\\"Arvados\\\",\",\n      \"\",\n      \"\\tpublic = list(\",\n      \"\",\n      \"\\t\\t#' @description Create a new Arvados API client.\",\n      \"\\t\\t#' @param authToken Authentification token. If not specified ARVADOS_API_TOKEN environment variable will be used.\",\n      \"\\t\\t#' @param hostName Host name. If not specified ARVADOS_API_HOST environment variable will be used.\",\n      \"\\t\\t#' @param numRetries Number which specifies how many times to retry failed service requests.\",\n      \"\\t\\t#' @return A new `Arvados` object.\",\n      \"\\t\\tinitialize = function(authToken = NULL, hostName = NULL, numRetries = 0)\",\n      \"\\t\\t{\",\n      \"\\t\\t\\tif(!is.null(hostName))\",\n      \"\\t\\t\\t\\tSys.setenv(ARVADOS_API_HOST = hostName)\",\n      \"\",\n      \"\\t\\t\\tif(!is.null(authToken))\",\n      \"\\t\\t\\t\\tSys.setenv(ARVADOS_API_TOKEN = authToken)\",\n      \"\",\n      \"\\t\\t\\thostName <- Sys.getenv(\\\"ARVADOS_API_HOST\\\")\",\n      \"\\t\\t\\ttoken    <- Sys.getenv(\\\"ARVADOS_API_TOKEN\\\")\",\n      \"\",\n      \"\\t\\t\\tif(hostName == \\\"\\\" | token == \\\"\\\")\",\n      \"\\t\\t\\t\\tstop(paste(\\\"Please provide host name and authentification token\\\",\",\n      \"\\t\\t\\t\\t\\t\\t   \\\"or set ARVADOS_API_HOST and ARVADOS_API_TOKEN\\\",\",\n      \"\\t\\t\\t\\t\\t\\t   \\\"environment variables.\\\"))\",\n      \"\",\n      \"\\t\\t\\tprivate$token <- token\",\n      \"\\t\\t\\tprivate$host  <- paste0(\\\"https://\\\", hostName, \\\"/arvados/v1/\\\")\",\n      \"\\t\\t\\tprivate$numRetries <- numRetries\",\n      \"\\t\\t\\tprivate$REST <- RESTService$new(token, hostName,\",\n      \"\\t\\t\\t                                HttpRequest$new(), HttpParser$new(),\",\n      \"\\t\\t\\t                                numRetries)\",\n      \"\",\n      \"\\t\\t},\\n\")\n}\n\ngenProjectMethods <- function(methodResources)\n{\n    toCallArg <- function(arg) {\n        callArg <- strsplit(arg, \" *=\")[[1]][1]\n        paste(callArg, callArg, sep=\" = \")\n    }\n    toCallArgs <- function(argList) {\n        paste0(Map(toCallArg, argList), collapse=\", \")\n    }\n    groupsMethods <- methodResources[[\"groups\"]][[\"methods\"]]\n    getArgs <- getMethodArguments(groupsMethods[[\"get\"]])\n    createArgs <- getMethodArguments(groupsMethods[[\"create\"]])\n    updateArgs <- getMethodArguments(groupsMethods[[\"update\"]])\n    listArgs <- getMethodArguments(groupsMethods[[\"list\"]])\n    deleteArgs <- getMethodArguments(groupsMethods[[\"delete\"]])\n\n    c(\"\\t\\t#' @description An alias for `groups_get`.\",\n      getMethodParams(groupsMethods[[\"get\"]]),\n      \"\\t\\t#' @return A Group object.\",\n      getMethodSignature(\"project_get\", getArgs),\n      \"\\t\\t{\",\n      paste(\"\\t\\t\\tself$groups_get(\", toCallArgs(getArgs), \")\", sep=\"\"),\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description A wrapper for `groups_create` that sets `group_class=\\\"project\\\"`.\",\n      getMethodParams(groupsMethods[[\"create\"]]),\n      \"\\t\\t#' @return A Group object.\",\n      getMethodSignature(\"project_create\", createArgs),\n      \"\\t\\t{\",\n      \"\\t\\t\\tgroup <- c(\\\"group_class\\\" = \\\"project\\\", group)\",\n      paste(\"\\t\\t\\tself$groups_create(\", toCallArgs(createArgs), \")\", sep=\"\"),\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description A wrapper for `groups_update` that sets `group_class=\\\"project\\\"`.\",\n      getMethodParams(groupsMethods[[\"update\"]]),\n      \"\\t\\t#' @return A Group object.\",\n      getMethodSignature(\"project_update\", updateArgs),\n      \"\\t\\t{\",\n      \"\\t\\t\\tgroup <- c(\\\"group_class\\\" = \\\"project\\\", group)\",\n      paste(\"\\t\\t\\tself$groups_update(\", toCallArgs(updateArgs), \")\", sep=\"\"),\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description A wrapper for `groups_list` that adds a filter for `group_class=\\\"project\\\"`.\",\n      getMethodParams(groupsMethods[[\"list\"]]),\n      \"\\t\\t#' @return A GroupList object.\",\n      getMethodSignature(\"project_list\", listArgs),\n      \"\\t\\t{\",\n      \"\\t\\t\\tfilters[[length(filters) + 1]] <- list(\\\"group_class\\\", \\\"=\\\", \\\"project\\\")\",\n      paste(\"\\t\\t\\tself$groups_list(\", toCallArgs(listArgs), \")\", sep=\"\"),\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description An alias for `groups_delete`.\",\n      getMethodParams(groupsMethods[[\"delete\"]]),\n      \"\\t\\t#' @return A Group object.\",\n      getMethodSignature(\"project_delete\", deleteArgs),\n      \"\\t\\t{\",\n      paste(\"\\t\\t\\tself$groups_delete(\", toCallArgs(deleteArgs), \")\", sep=\"\"),\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description Test whether or not a project exists.\",\n      getMethodParams(groupsMethods[[\"get\"]]),\n      getMethodSignature(\"project_exist\", getArgs),\n      \"\\t\\t{\",\n      paste(\"\\t\\t\\tresult <- try(self$groups_get(\", toCallArgs(getArgs), \"))\", sep=\"\"),\n      \"\\t\\t\\tif(inherits(result, \\\"try-error\\\"))\",\n      \"\\t\\t\\t\\texists <- FALSE\",\n      \"\\t\\t\\telse\",\n      \"\\t\\t\\t\\texists <- result['group_class'] == \\\"project\\\"\",\n      \"\\t\\t\\tcat(format(exists))\",\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description A convenience wrapper for `project_update` to set project metadata properties.\",\n      \"\\t\\t#' @param listProperties List of new properties.\",\n      \"\\t\\t#' @param uuid UUID of the project to update.\",\n      \"\\t\\t#' @return A Group object.\",\n      \"\\t\\tproject_properties_set = function(listProperties, uuid)\",\n      \"\\t\\t{\",\n      \"\\t\\t\\tself$project_update(list(\\\"properties\\\" = listProperties), uuid)\",\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description Get a project and update it with additional properties.\",\n      \"\\t\\t#' @param properties List of new properties.\",\n      \"\\t\\t#' @param uuid UUID of the project to update.\",\n      \"\\t\\t#' @return A Group object.\",\n      \"\\t\\tproject_properties_append = function(properties, uuid)\",\n      \"\\t\\t{\",\n      \"\\t\\t\\tproj <- private$get_project_by_list(uuid, list('uuid', 'properties'))\",\n      \"\\t\\t\\tnewListOfProperties <- c(proj$properties, properties)\",\n      \"\\t\\t\\tuniqueProperties <- unique(unlist(newListOfProperties))\",\n      \"\\t\\t\\tnewProperties <- suppressWarnings(newListOfProperties[which(newListOfProperties == uniqueProperties)])\",\n      \"\\t\\t\\tself$project_properties_set(newProperties, proj$uuid)\",\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description Get properties of a project.\",\n      \"\\t\\t#' @param uuid The UUID of the project to query.\",\n      \"\\t\\tproject_properties_get = function(uuid)\",\n      \"\\t\\t{\",\n      \"\\t\\t\\tprivate$get_project_by_list(uuid, list('uuid', 'properties'))$properties\",\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description Delete one property from a project by name.\",\n      \"\\t\\t#' @param oneProp Name of the property to delete.\",\n      \"\\t\\t#' @param uuid The UUID of the project to update.\",\n      \"\\t\\t#' @return A Group object.\",\n      \"\\t\\tproject_properties_delete = function(oneProp, uuid)\",\n      \"\\t\\t{\",\n      \"\\t\\t\\tprojProp <- self$project_properties_get(uuid)\",\n      \"\\t\\t\\tprojProp[[oneProp]] <- NULL\",\n      \"\\t\\t\\tself$project_properties_set(projProp, uuid)\",\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description Convenience wrapper of `links_list` to create a permission link.\",\n      \"\\t\\t#' @param type The type of permission: one of `'can_read'`, `'can_write'`, or `'can_manage'`.\",\n      \"\\t\\t#' @param uuid The UUID of the object to grant permission to.\",\n      \"\\t\\t#' @param user The UUID of the user or group who receives this permission.\",\n      \"\\t\\t#' @return A Link object if one was updated, else NULL.\",\n      \"\\t\\tproject_permission_give = function(type, uuid, user)\",\n      \"\\t\\t{\",\n      \"\\t\\t\\tlink <- list(\",\n      \"\\t\\t\\t\\t'link_class' = 'permission',\",\n      \"\\t\\t\\t\\t'name' = type,\",\n      \"\\t\\t\\t\\t'head_uuid' = uuid,\",\n      \"\\t\\t\\t\\t'tail_uuid' = user)\",\n      \"\\t\\t\\tself$links_create(link)\",\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description Find an existing permission link and update its level.\",\n      \"\\t\\t#' @param typeOld The type of permission to find: one of `'can_read'`, `'can_write'`, or `'can_manage'`.\",\n      \"\\t\\t#' @param typeNew The type of permission to set: one of `'can_read'`, `'can_write'`, or `'can_manage'`.\",\n      \"\\t\\t#' @param uuid The UUID of the object to grant permission to.\",\n      \"\\t\\t#' @param user The UUID of the user or group who receives this permission.\",\n      \"\\t\\t#' @return A Link object if one was updated, else NULL.\",\n      \"\\t\\tproject_permission_update = function(typeOld, typeNew, uuid, user)\",\n      \"\\t\\t{\",\n      \"\\t\\t\\tlinks <- self$links_list(filters = list(\",\n      \"\\t\\t\\t\\t\\tlist('link_class', '=', 'permission'),\",\n      \"\\t\\t\\t\\t\\tlist('name', '=', typeOld),\",\n      \"\\t\\t\\t\\t\\tlist('head_uuid', '=', uuid),\",\n      \"\\t\\t\\t\\t\\tlist('tail_uuid', '=', user)\",\n      \"\\t\\t\\t\\t), select=list('uuid'), count = 'none')$items\",\n      \"\\t\\t\\tif (length(links) == 0) {\",\n      \"\\t\\t\\t\\tcat(format('No permission granted'))\",\n      \"\\t\\t\\t} else {\",\n      \"\\t\\t\\t\\tself$links_update(list('name' = typeNew), links[[1]]$uuid)\",\n      \"\\t\\t\\t}\",\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description Delete an existing permission link.\",\n      \"\\t\\t#' @param type The type of permission to delete: one of `'can_read'`, `'can_write'`, or `'can_manage'`.\",\n      \"\\t\\t#' @param uuid The UUID of the object to grant permission to.\",\n      \"\\t\\t#' @param user The UUID of the user or group who receives this permission.\",\n      \"\\t\\t#' @return A Link object if one was deleted, else NULL.\",\n      \"\\t\\tproject_permission_delete = function(type, uuid, user)\",\n      \"\\t\\t{\",\n      \"\\t\\t\\tlinks <- self$links_list(filters = list(\",\n      \"\\t\\t\\t\\t\\tlist('link_class', '=', 'permission'),\",\n      \"\\t\\t\\t\\t\\tlist('name', '=', type),\",\n      \"\\t\\t\\t\\t\\tlist('head_uuid', '=', uuid),\",\n      \"\\t\\t\\t\\t\\tlist('tail_uuid', '=', user)\",\n      \"\\t\\t\\t\\t), select=list('uuid'), count = 'none')$items\",\n      \"\\t\\t\\tif (length(links) == 0) {\",\n      \"\\t\\t\\t\\tcat(format('No permission granted'))\",\n      \"\\t\\t\\t} else {\",\n      \"\\t\\t\\t\\tself$links_delete(links[[1]]$uuid)\",\n      \"\\t\\t\\t}\",\n      \"\\t\\t},\",\n      \"\",\n      \"\\t\\t#' @description Check for an existing permission link.\",\n      \"\\t\\t#' @param type The type of permission to check: one of `'can_read'`, `'can_write'`, `'can_manage'`, or `NULL` (the default).\",\n      \"\\t\\t#' @param uuid The UUID of the object to check permission on.\",\n      \"\\t\\t#' @param user The UUID of the user or group to check permission for.\",\n      \"\\t\\t#' @return If `type` is `NULL`, the list of matching permission links.\",\n      \"\\t\\t#' Otherwise, prints and invisibly returns the level of the found permission link.\",\n      \"\\t\\tproject_permission_check = function(uuid, user, type = NULL)\",\n      \"\\t\\t{\",\n      \"\\t\\t\\tfilters <- list(\",\n      \"\\t\\t\\t\\tlist('link_class', '=', 'permission'),\",\n      \"\\t\\t\\t\\tlist('head_uuid', '=', uuid),\",\n      \"\\t\\t\\t\\tlist('tail_uuid', '=', user))\",\n      \"\\t\\t\\tif (!is.null(type)) {\",\n      \"\\t\\t\\t\\tfilters <- c(filters, list(list('name', '=', type)))\",\n      \"\\t\\t\\t}\",\n      \"\\t\\t\\tlinks <- self$links_list(filters = filters, count='none')$items\",\n      \"\\t\\t\\tif (is.null(type)) {\",\n      \"\\t\\t\\t\\tlinks\",\n      \"\\t\\t\\t} else {\",\n      \"\\t\\t\\t\\tprint(links[[1]]$name)\",\n      \"\\t\\t\\t}\",\n      \"\\t\\t},\",\n      \"\")\n}\n\ngenClassContent <- function(methodResources, resourceNames)\n{\n    arvadosMethods <- Map(function(resource, resourceName)\n    {\n        methodNames <- names(resource$methods)\n\n        functions <- Map(function(methodMetaData, methodName)\n        {\n            #NOTE: Index, show and destroy are aliases for the preferred names\n            # \"list\", \"get\" and \"delete\". Until they are removed from discovery\n            # document we will filter them here.\n            if(methodName %in% c(\"index\", \"show\", \"destroy\"))\n               return(NULL)\n\n            methodName <- paste0(resourceName, \"_\", methodName)\n            unlist(c(\n                   getMethodDoc(methodName, methodMetaData),\n                   createMethod(methodName, methodMetaData)\n            ))\n\n        }, resource$methods, methodNames)\n\n        unlist(unname(functions))\n\n    }, methodResources, resourceNames)\n\n    arvadosMethods\n}\n\ngenAPIClassFooter <- function()\n{\n    c(\"\\t\\t#' @description Return the host name of this client's Arvados API server.\",\n      \"\\t\\t#' @return Hostname string.\",\n      \"\\t\\tgetHostName = function() private$host,\",\n      \"\",\n      \"\\t\\t#' @description Return the Arvados API token used by this client.\",\n      \"\\t\\t#' @return API token string.\",\n      \"\\t\\tgetToken = function() private$token,\",\n      \"\",\n      \"\\t\\t#' @description Set the RESTService object used by this client.\",\n      \"\\t\\tsetRESTService = function(newREST) private$REST <- newREST,\",\n      \"\",\n      \"\\t\\t#' @description Return the RESTService object used by this client.\",\n      \"\\t\\t#' @return RESTService object.\",\n      \"\\t\\tgetRESTService = function() private$REST\",\n      \"\\t),\",\n      \"\",\n      \"\\tprivate = list(\",\n      \"\\t\\ttoken = NULL,\",\n      \"\\t\\thost = NULL,\",\n      \"\\t\\tREST = NULL,\",\n      \"\\t\\tnumRetries = NULL,\",\n      \"\\t\\tget_project_by_list = function(uuid, select = NULL)\",\n      \"\\t\\t{\",\n      \"\\t\\t\\tself$groups_list(\",\n      \"\\t\\t\\t\\tfilters = list(list('uuid', '=', uuid), list('group_class', '=', 'project')),\",\n      \"\\t\\t\\t\\tselect = select,\",\n      \"\\t\\t\\t\\tcount = 'none'\",\n      \"\\t\\t\\t)$items[[1]]\",\n      \"\\t\\t}\",\n      \"\\t),\",\n      \"\",\n      \"\\tcloneable = FALSE\",\n      \")\")\n}\n\ncreateMethod <- function(name, methodMetaData)\n{\n    args      <- getMethodArguments(methodMetaData)\n    signature <- getMethodSignature(name, args)\n    body      <- getMethodBody(methodMetaData)\n\n    c(signature,\n      \"\\t\\t{\",\n          body,\n      \"\\t\\t},\\n\")\n}\n\nnormalizeParamName <- function(name)\n{\n    # Downcase the first letter\n    name <- sub(\"^(\\\\w)\", \"\\\\L\\\\1\", name, perl=TRUE)\n    # Convert snake_case to camelCase\n    gsub(\"_(uuid\\\\b|id\\\\b|\\\\w)\", \"\\\\U\\\\1\", name, perl=TRUE)\n}\n\ngetMethodArguments <- function(methodMetaData)\n{\n    request <- methodMetaData$request\n    requestArgs <- NULL\n\n    if(!is.null(request))\n    {\n        resourceName <- normalizeParamName(request$properties[[1]][[1]])\n\n        if(request$required)\n            requestArgs <- resourceName\n        else\n            requestArgs <- paste(resourceName, \"=\", \"NULL\")\n    }\n\n    argNames <- names(methodMetaData$parameters)\n\n    args <- sapply(argNames, function(argName)\n    {\n        arg <- methodMetaData$parameters[[argName]]\n        argName <- normalizeParamName(argName)\n\n        if(!arg$required)\n        {\n            return(paste(argName, \"=\", \"NULL\"))\n        }\n\n        argName\n    })\n\n    c(requestArgs, args)\n}\n\ngetMethodSignature <- function(methodName, args)\n{\n    collapsedArgs <- paste0(args, collapse = \", \")\n    lineLengthLimit <- 40\n\n    if(nchar(collapsedArgs) > lineLengthLimit)\n    {\n        return(paste0(\"\\t\\t\",\n                      formatArgs(paste(methodName, \"= function(\"),\n                                 \"\\t\", args, \")\", lineLengthLimit)))\n    }\n    else\n    {\n        return(paste0(\"\\t\\t\", methodName, \" = function(\", collapsedArgs, \")\"))\n    }\n}\n\ngetMethodBody <- function(methodMetaData)\n{\n    url              <- getRequestURL(methodMetaData)\n    headers          <- getRequestHeaders()\n    requestQueryList <- getRequestQueryList(methodMetaData)\n    requestBody      <- getRequestBody(methodMetaData)\n    request          <- getRequest(methodMetaData)\n    response         <- getResponse(methodMetaData)\n    errorCheck       <- getErrorCheckingCode(methodMetaData)\n    returnStatement  <- getReturnObject()\n\n    body <- c(url,\n              headers,\n              requestQueryList, \"\",\n              requestBody, \"\",\n              request, response, \"\",\n              errorCheck, \"\",\n              returnStatement)\n\n    paste0(\"\\t\\t\\t\", body)\n}\n\ngetRequestURL <- function(methodMetaData)\n{\n    endPoint <- methodMetaData$path\n    endPoint <- stringr::str_replace_all(endPoint, \"\\\\{\", \"${\")\n    url <- c(paste0(\"endPoint <- stringr::str_interp(\\\"\", endPoint, \"\\\")\"),\n             paste0(\"url <- paste0(private$host, endPoint)\"))\n    url\n}\n\ngetRequestHeaders <- function()\n{\n    c(\"headers <- list(Authorization = paste(\\\"Bearer\\\", private$token), \",\n      \"                \\\"Content-Type\\\" = \\\"application/json\\\")\")\n}\n\ngetRequestQueryList <- function(methodMetaData)\n{\n    queryArgs <- names(Filter(function(arg) arg$location == \"query\",\n                        methodMetaData$parameters))\n\n    if(length(queryArgs) == 0)\n        return(\"queryArgs <- NULL\")\n\n    queryArgs <- sapply(queryArgs, function(arg) {\n        arg <- normalizeParamName(arg)\n        paste(arg, \"=\", arg)\n    })\n    collapsedArgs <- paste0(queryArgs, collapse = \", \")\n\n    lineLengthLimit <- 40\n\n    if(nchar(collapsedArgs) > lineLengthLimit)\n        return(formatArgs(\"queryArgs <- list(\", \"\\t\\t\\t\\t  \", queryArgs, \")\",\n                          lineLengthLimit))\n    else\n        return(paste0(\"queryArgs <- list(\", collapsedArgs, \")\"))\n}\n\ngetRequestBody <- function(methodMetaData)\n{\n    request <- methodMetaData$request\n\n    if(is.null(request) || !request$required)\n        return(\"body <- NULL\")\n\n    resourceName <- normalizeParamName(request$properties[[1]][[1]])\n\n    requestParameterName <- names(request$properties)[1]\n\n    c(paste0(\"if(length(\", resourceName, \") > 0)\"),\n      paste0(\"\\tbody <- jsonlite::toJSON(list(\", resourceName, \" = \", resourceName, \"), \"),\n             \"\\t                         auto_unbox = TRUE)\",\n      \"else\",\n      \"\\tbody <- NULL\")\n}\n\ngetRequest <- function(methodMetaData)\n{\n    method <- methodMetaData$httpMethod\n    c(paste0(\"response <- private$REST$http$exec(\\\"\", method, \"\\\", url, headers, body,\"),\n      \"                                   queryArgs, private$numRetries)\")\n}\n\ngetResponse <- function(methodMetaData)\n{\n    \"resource <- private$REST$httpParser$parseJSONResponse(response)\"\n}\n\ngetErrorCheckingCode <- function(methodMetaData)\n{\n    if (\"ensure_unique_name\" %in% names(methodMetaData$parameters)) {\n        body <- c(\"\\tif (identical(sub('Entity:.*', '', resource$errors), '//railsapi.internal/arvados/v1/collections: 422 Unprocessable ')) {\",\n                  \"\\t\\tresource <- cat(format('An object with the given name already exists with this owner. If you want to update it use the update method instead'))\",\n                  \"\\t} else {\",\n                  \"\\t\\tstop(resource$errors)\",\n                  \"\\t}\")\n    } else {\n        body <- \"\\tstop(resource$errors)\"\n    }\n    c(\"if(!is.null(resource$errors)) {\", body, \"}\")\n}\n\ngetReturnObject <- function()\n{\n    \"resource\"\n}\n\ngenAPIClassDoc <- function(methodResources, resourceNames)\n{\n    c(\"#' @examples\",\n      \"#' \\\\dontrun{\",\n      \"#' arv <- Arvados$new(\\\"your Arvados token\\\", \\\"example.arvadosapi.com\\\")\",\n      \"#'\",\n      \"#' collection <- arv$collections.get(\\\"uuid\\\")\",\n      \"#'\",\n      \"#' collectionList <- arv$collections.list(list(list(\\\"name\\\", \\\"like\\\", \\\"Test%\\\")))\",\n      \"#' collectionList <- listAll(arv$collections.list, list(list(\\\"name\\\", \\\"like\\\", \\\"Test%\\\")))\",\n      \"#'\",\n      \"#' deletedCollection <- arv$collections.delete(\\\"uuid\\\")\",\n      \"#'\",\n      \"#' updatedCollection <- arv$collections.update(list(name = \\\"New name\\\", description = \\\"New description\\\"),\",\n      \"#'                                             \\\"uuid\\\")\",\n      \"#'\",\n      \"#' createdCollection <- arv$collections.create(list(name = \\\"Example\\\",\",\n      \"#'                                                  description = \\\"This is a test collection\\\"))\",\n      \"#' }\",\n      \"\")\n}\n\ngetAPIClassMethodList <- function(methodResources, resourceNames)\n{\n    methodList <- unlist(unname(Map(function(resource, resourceName)\n    {\n        methodNames <- names(resource$methods)\n        paste0(resourceName,\n               \".\",\n               methodNames[!(methodNames %in% c(\"index\", \"show\", \"destroy\"))])\n\n    }, methodResources, resourceNames)))\n\n    hardcodedMethods <- c(\"projects.create\", \"projects.get\",\n                          \"projects.list\", \"projects.update\", \"projects.delete\")\n    paste0(\"#' \\t\\\\item{}{\\\\code{\\\\link{\", sort(c(methodList, hardcodedMethods)), \"}}}\")\n}\n\ngetMethodDoc <- function(methodName, methodMetaData)\n{\n    description <- paste(\"\\t\\t#' @description\", gsub(\"\\n\", \"\\n\\t\\t#' \", methodMetaData$description))\n    params      <- getMethodParams(methodMetaData)\n    returnValue <- paste(\"\\t\\t#' @return\", methodMetaData$response[[\"$ref\"]], \"object.\")\n\n    c(description, params, returnValue)\n}\n\ngetMethodParams <- function(methodMetaData)\n{\n    request <- methodMetaData$request\n    requestDoc <- NULL\n\n    if(!is.null(request))\n    {\n        requestDoc <- unname(unlist(sapply(request$properties, function(prop)\n                             {\n                                 className <- sapply(prop, function(ref) ref)\n                                 objectName <- normalizeParamName(className)\n                                 paste(\"\\t\\t#' @param\", objectName, className, \"object.\")\n                             })))\n    }\n\n    argNames <- names(methodMetaData$parameters)\n\n    argsDoc <- unname(unlist(sapply(argNames, function(argName)\n    {\n        arg <- methodMetaData$parameters[[argName]]\n        paste(\"\\t\\t#' @param\",\n              normalizeParamName(argName),\n              gsub(\"\\n\", \"\\n\\t\\t#' \", arg$description)\n        )\n    })))\n\n    c(requestDoc, argsDoc)\n}\n\n#NOTE: Utility functions:\n\n# This function is used to split very long lines of code into smaller chunks.\n# This is usually the case when we pass a lot of named argumets to a function.\nformatArgs <- function(prependAtStart, prependToEachSplit,\n                       args, appendAtEnd, lineLength)\n{\n    if(length(args) > 1)\n    {\n        args[1:(length(args) - 1)] <- paste0(args[1:(length(args) - 1)], \",\")\n    }\n\n    args[1] <- paste0(prependAtStart, args[1])\n    args[length(args)] <- paste0(args[length(args)], appendAtEnd)\n\n    argsLength <- length(args)\n    argLines <- list()\n    index <- 1\n\n    while(index <= argsLength)\n    {\n        line <- args[index]\n        index <- index + 1\n\n        while(nchar(line) < lineLength && index <= argsLength)\n        {\n            line <- paste(line, args[index])\n            index <- index + 1\n        }\n\n        argLines <- c(argLines, line)\n    }\n\n    argLines <- unlist(argLines)\n    argLinesLen <- length(argLines)\n\n    if(argLinesLen > 1)\n        argLines[2:argLinesLen] <- paste0(prependToEachSplit, argLines[2:argLinesLen])\n\n    argLines\n}\n\nargs <- commandArgs(TRUE)\nif (length(args) == 0) {\n   loc <- \"arvados-v1-discovery.json\"\n} else {\n   loc <- args[[1]]\n}\ndiscoveryDocument <- getAPIDocument(loc)\ngenerateAPI(discoveryDocument)\n"
  },
  {
    "path": "contrib/R-sdk/install_deps.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\noptions(repos=structure(c(CRAN=\"https://cloud.r-project.org/\")))\nif (!requireNamespace(\"devtools\")) {\n  install.packages(\"devtools\")\n}\nif (!requireNamespace(\"roxygen2\")) {\n  install.packages(\"roxygen2\")\n}\nif (!requireNamespace(\"knitr\")) {\n  install.packages(\"knitr\")\n}\nif (!requireNamespace(\"markdown\")) {\n  install.packages(\"markdown\")\n}\nif (!requireNamespace(\"XML\")) {\n  install.packages(\"XML\")\n}\n\ndevtools::install_dev_deps()\n"
  },
  {
    "path": "contrib/R-sdk/run_test.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndevtools::check()\n\nresults <- devtools::test()\nany_error <- any(as.data.frame(results)$error)\nif (any_error) {\n  q(\"no\", 1)\n} else {\n  q(\"no\", 0)\n}\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/fakes/FakeArvados.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nFakeArvados <- R6::R6Class(\n\n    \"FakeArvados\",\n\n    public = list(\n\n        token      = NULL,\n        host       = NULL,\n        webdavHost = NULL,\n        http       = NULL,\n        httpParser = NULL,\n        REST       = NULL,\n\n        initialize = function(token      = NULL,\n                              host       = NULL,\n                              webdavHost = NULL,\n                              http       = NULL,\n                              httpParser = NULL)\n        {\n            self$token      <- token\n            self$host       <- host\n            self$webdavHost <- webdavHost\n            self$http       <- http\n            self$httpParser <- httpParser\n        },\n\n        getToken    = function() self$token,\n        getHostName = function() self$host,\n        getHttpClient = function() self$http,\n        getHttpParser = function() self$httpParser,\n        getWebDavHostName = function() self$webdavHost\n    ),\n\n    cloneable = FALSE\n)\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/fakes/FakeHttpParser.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nFakeHttpParser <- R6::R6Class(\n\n    \"FakeHttrParser\",\n\n    public = list(\n\n        validContentTypes = NULL,\n        parserCallCount = NULL,\n\n        initialize = function()\n        {\n            self$parserCallCount <- 0\n            self$validContentTypes <- c(\"text\", \"raw\")\n        },\n\n        parseJSONResponse = function(serverResponse)\n        {\n            self$parserCallCount <- self$parserCallCount + 1\n\n            if(!is.null(serverResponse$content))\n                return(serverResponse$content)\n\n            serverResponse\n        },\n\n        parseResponse = function(serverResponse, outputType)\n        {\n            self$parserCallCount <- self$parserCallCount + 1\n\n            if(!is.null(serverResponse$content))\n                return(serverResponse$content)\n\n            serverResponse\n        },\n\n        getFileNamesFromResponse = function(serverResponse, uri)\n        {\n            self$parserCallCount <- self$parserCallCount + 1\n\n            if(!is.null(serverResponse$content))\n                return(serverResponse$content)\n\n            serverResponse\n        },\n\n        getFileSizesFromResponse = function(serverResponse, uri)\n        {\n            self$parserCallCount <- self$parserCallCount + 1\n\n            if(!is.null(serverResponse$content))\n                return(serverResponse$content)\n\n            serverResponse\n        }\n    )\n)\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/fakes/FakeHttpRequest.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nFakeHttpRequest <- R6::R6Class(\n\n    \"FakeHttpRequest\",\n\n    public = list(\n\n        serverMaxElementsPerRequest = NULL,\n\n        content                                 = NULL,\n        expectedURL                             = NULL,\n        URLIsProperlyConfigured                 = NULL,\n        expectedQueryFilters                    = NULL,\n        queryFiltersAreCorrect                  = NULL,\n        requestHeaderContainsAuthorizationField = NULL,\n        requestHeaderContainsDestinationField   = NULL,\n        requestHeaderContainsRangeField         = NULL,\n        requestHeaderContainsContentTypeField   = NULL,\n        JSONEncodedBodyIsProvided               = NULL,\n        requestBodyIsProvided                   = NULL,\n\n        numberOfGETRequests        = NULL,\n        numberOfDELETERequests     = NULL,\n        numberOfPUTRequests        = NULL,\n        numberOfPOSTRequests       = NULL,\n        numberOfMOVERequests       = NULL,\n        numberOfCOPYRequests       = NULL,\n        numberOfgetConnectionCalls = NULL,\n\n        initialize = function(expectedURL      = NULL,\n                              serverResponse   = NULL,\n                              expectedFilters  = NULL)\n        {\n            if(is.null(serverResponse))\n            {\n                self$content <- list()\n                self$content$status_code <- 200\n            }\n            else\n                self$content <- serverResponse\n\n            self$expectedURL                             <- expectedURL\n            self$URLIsProperlyConfigured                 <- FALSE\n            self$expectedQueryFilters                    <- expectedFilters\n            self$queryFiltersAreCorrect                  <- FALSE\n            self$requestHeaderContainsAuthorizationField <- FALSE\n            self$requestHeaderContainsDestinationField   <- FALSE\n            self$requestHeaderContainsRangeField         <- FALSE\n            self$requestHeaderContainsContentTypeField   <- FALSE\n            self$JSONEncodedBodyIsProvided               <- FALSE\n            self$requestBodyIsProvided                   <- FALSE\n\n            self$numberOfGETRequests    <- 0\n            self$numberOfDELETERequests <- 0\n            self$numberOfPUTRequests    <- 0\n            self$numberOfPOSTRequests   <- 0\n            self$numberOfMOVERequests   <- 0\n            self$numberOfCOPYRequests   <- 0\n\n            self$numberOfgetConnectionCalls <- 0\n\n            self$serverMaxElementsPerRequest <- 5\n        },\n\n        exec = function(verb, url, headers = NULL, body = NULL, query = NULL,\n                        limit = NULL, offset = NULL, retryTimes = 0)\n        {\n            private$validateURL(url)\n            private$validateHeaders(headers)\n            private$validateFilters(queryFilters)\n            private$validateBody(body)\n\n            if(verb == \"GET\")\n                self$numberOfGETRequests <- self$numberOfGETRequests + 1\n            else if(verb == \"POST\")\n                self$numberOfPOSTRequests <- self$numberOfPOSTRequests + 1\n            else if(verb == \"PUT\")\n                self$numberOfPUTRequests <- self$numberOfPUTRequests + 1\n            else if(verb == \"DELETE\")\n                self$numberOfDELETERequests <- self$numberOfDELETERequests + 1\n            else if(verb == \"MOVE\")\n                self$numberOfMOVERequests <- self$numberOfMOVERequests + 1\n            else if(verb == \"COPY\")\n                self$numberOfCOPYRequests <- self$numberOfCOPYRequests + 1\n            else if(verb == \"PROPFIND\")\n            {\n                return(self$content)\n            }\n\n            if(!is.null(self$content$items_available))\n                return(private$getElements(offset, limit))\n            else\n                return(self$content)\n        },\n\n        getConnection = function(url, headers, openMode)\n        {\n            self$numberOfgetConnectionCalls <- self$numberOfgetConnectionCalls + 1\n            c(url, headers, openMode)\n        }\n    ),\n\n    private = list(\n\n        validateURL = function(url)\n        {\n            if(!is.null(self$expectedURL) && url == self$expectedURL)\n                self$URLIsProperlyConfigured <- TRUE\n        },\n\n        validateHeaders = function(headers)\n        {\n            if(!is.null(headers$Authorization))\n                self$requestHeaderContainsAuthorizationField <- TRUE\n\n            if(!is.null(headers$Destination))\n                self$requestHeaderContainsDestinationField <- TRUE\n\n            if(!is.null(headers$Range))\n                self$requestHeaderContainsRangeField <- TRUE\n\n            if(!is.null(headers[[\"Content-Type\"]]))\n                self$requestHeaderContainsContentTypeField <- TRUE\n        },\n\n        validateBody = function(body)\n        {\n            if(!is.null(body))\n            {\n                self$requestBodyIsProvided <- TRUE\n\n                if(class(body) == \"json\")\n                    self$JSONEncodedBodyIsProvided <- TRUE\n            }\n        },\n\n        validateFilters = function(filters)\n        {\n            if(!is.null(self$expectedQueryFilters) &&\n               !is.null(filters) &&\n               all.equal(unname(filters), self$expectedQueryFilters))\n            {\n                self$queryFiltersAreCorrect <- TRUE\n            }\n        },\n\n        getElements = function(offset, limit)\n        {\n            start <- 1\n            elementCount <- self$serverMaxElementsPerRequest\n\n            if(!is.null(offset))\n            {\n                if(offset > self$content$items_available)\n                    stop(\"Invalid offset\")\n\n                start <- offset + 1\n            }\n\n            if(!is.null(limit))\n                if(limit < self$serverMaxElementsPerRequest)\n                    elementCount <- limit - 1\n\n\n            serverResponse <- list()\n            serverResponse$items_available <- self$content$items_available\n            serverResponse$items <- self$content$items[start:(start + elementCount - 1)]\n\n            if(start + elementCount > self$content$items_available)\n            {\n                elementCount = self$content$items_available - start\n                serverResponse$items <- self$content$items[start:(start + elementCount)]\n            }\n\n            serverResponse\n        }\n    ),\n\n    cloneable = FALSE\n)\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/fakes/FakeRESTService.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nFakeRESTService <- R6::R6Class(\n\n    \"FakeRESTService\",\n\n    public = list(\n\n        getResourceCallCount    = NULL,\n        createResourceCallCount = NULL,\n        listResourcesCallCount  = NULL,\n        deleteResourceCallCount = NULL,\n        updateResourceCallCount = NULL,\n        fetchAllItemsCallCount  = NULL,\n\n        createCallCount               = NULL,\n        deleteCallCount               = NULL,\n        moveCallCount                 = NULL,\n        copyCallCount                 = NULL,\n        getCollectionContentCallCount = NULL,\n        getResourceSizeCallCount      = NULL,\n        readCallCount                 = NULL,\n        writeCallCount                = NULL,\n        getConnectionCallCount        = NULL,\n        writeBuffer                   = NULL,\n        filtersAreConfiguredCorrectly = NULL,\n        bodyIsConfiguredCorrectly     = NULL,\n        expectedFilterContent         = NULL,\n\n        collectionContent = NULL,\n        returnContent     = NULL,\n\n        initialize = function(collectionContent = NULL, returnContent = NULL,\n                              expectedFilterContent = NULL)\n        {\n            self$getResourceCallCount    <- 0\n            self$createResourceCallCount <- 0\n            self$listResourcesCallCount  <- 0\n            self$deleteResourceCallCount <- 0\n            self$updateResourceCallCount <- 0\n            self$fetchAllItemsCallCount  <- 0\n\n            self$createCallCount               <- 0\n            self$deleteCallCount               <- 0\n            self$moveCallCount                 <- 0\n            self$copyCallCount                 <- 0\n            self$getCollectionContentCallCount <- 0\n            self$getResourceSizeCallCount      <- 0\n            self$readCallCount                 <- 0\n            self$writeCallCount                <- 0\n            self$getConnectionCallCount        <- 0\n            self$filtersAreConfiguredCorrectly <- FALSE\n            self$bodyIsConfiguredCorrectly     <- FALSE\n\n            self$collectionContent     <- collectionContent\n            self$returnContent         <- returnContent\n            self$expectedFilterContent <- expectedFilterContent\n        },\n\n        getWebDavHostName = function()\n        {\n        },\n\n        getResource = function(resource, uuid)\n        {\n            self$getResourceCallCount <- self$getResourceCallCount + 1\n            self$returnContent\n        },\n\n        listResources = function(resource, filters = NULL, limit = 100, offset = 0)\n        {\n            self$listResourcesCallCount <- self$listResourcesCallCount + 1\n\n            if(!is.null(self$expectedFilterContent) && !is.null(filters))\n               if(all.equal(filters, self$expectedFilterContent))\n                    self$filtersAreConfiguredCorrectly <- TRUE\n\n            self$returnContent\n        },\n\n        fetchAllItems = function(resourceURL, filters)\n        {\n            self$fetchAllItemsCallCount <- self$fetchAllItemsCallCount + 1\n\n            if(!is.null(self$expectedFilterContent) && !is.null(filters))\n               if(all.equal(filters, self$expectedFilterContent))\n                    self$filtersAreConfiguredCorrectly <- TRUE\n\n            self$returnContent\n        },\n\n        deleteResource = function(resource, uuid)\n        {\n            self$deleteResourceCallCount <- self$deleteResourceCallCount + 1\n            self$returnContent\n        },\n\n        updateResource = function(resource, uuid, newContent)\n        {\n            self$updateResourceCallCount <- self$updateResourceCallCount + 1\n\n            if(!is.null(self$returnContent) && !is.null(newContent))\n               if(all.equal(newContent, self$returnContent))\n                    self$bodyIsConfiguredCorrectly <- TRUE\n\n            self$returnContent\n        },\n\n        createResource = function(resource, content)\n        {\n            self$createResourceCallCount <- self$createResourceCallCount + 1\n\n            if(!is.null(self$returnContent) && !is.null(content))\n               if(all.equal(content, self$returnContent))\n                    self$bodyIsConfiguredCorrectly <- TRUE\n\n            self$returnContent\n        },\n\n        create = function(files, uuid)\n        {\n            self$createCallCount <- self$createCallCount + 1\n            self$returnContent\n        },\n\n        delete = function(relativePath, uuid)\n        {\n            self$deleteCallCount <- self$deleteCallCount + 1\n            self$returnContent\n        },\n\n        move = function(from, to, uuid)\n        {\n            self$moveCallCount <- self$moveCallCount + 1\n            self$returnContent\n        },\n\n        copy = function(from, to, uuid)\n        {\n            self$copyCallCount <- self$copyCallCount + 1\n            self$returnContent\n        },\n\n        getCollectionContent = function(uuid, relativePath = NULL)\n        {\n            self$getCollectionContentCallCount <- self$getCollectionContentCallCount + 1\n            if (!is.null(relativePath)) {\n                self$collectionContent[startsWith(self$collectionContent, relativePath)]\n            } else {\n                self$collectionContent\n            }\n        },\n\n        getResourceSize = function(uuid, relativePathToResource)\n        {\n            self$getResourceSizeCallCount <- self$getResourceSizeCallCount + 1\n            self$returnContent\n        },\n\n        read = function(relativePath, uuid, contentType = \"text\", offset = 0, length = 0)\n        {\n            self$readCallCount <- self$readCallCount + 1\n            self$returnContent\n        },\n\n        write = function(relativePath, uuid, content, contentType)\n        {\n            self$writeBuffer <- content\n            self$writeCallCount <- self$writeCallCount + 1\n            self$returnContent\n        },\n\n        getConnection = function(uuid, relativePath, openMode)\n        {\n            self$getConnectionCallCount <- self$getConnectionCallCount + 1\n            self$returnContent\n        }\n    ),\n\n    cloneable = FALSE\n)\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/test-ArvadosFile.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nsource(\"fakes/FakeRESTService.R\")\n\ncontext(\"ArvadosFile\")\n\ntest_that(\"constructor raises error if  file name is empty string\", {\n\n    expect_that(ArvadosFile$new(\"\"), throws_error(\"Invalid name.\"))\n})\n\ntest_that(\"getFileListing always returns file name\", {\n\n    dog <- ArvadosFile$new(\"dog\")\n\n    expect_that(dog$getFileListing(), equals(\"dog\"))\n})\n\ntest_that(\"get always returns NULL\", {\n\n    dog <- ArvadosFile$new(\"dog\")\n\n    responseIsNull <- is.null(dog$get(\"something\"))\n    expect_true(responseIsNull)\n})\n\ntest_that(\"getFirst always returns NULL\", {\n\n    dog <- ArvadosFile$new(\"dog\")\n\n    responseIsNull <- is.null(dog$getFirst())\n    expect_true(responseIsNull)\n})\n\ntest_that(paste(\"getSizeInBytes returns zero if arvadosFile\",\n                \"is not part of a collection\"), {\n\n    dog <- ArvadosFile$new(\"dog\")\n\n    expect_that(dog$getSizeInBytes(), equals(0))\n})\n\ntest_that(paste(\"getSizeInBytes delegates size calculation\",\n                \"to REST service class\"), {\n\n    collectionContent <- c(\"animal\", \"animal/fish\")\n    returnSize <- 100\n    fakeREST <- FakeRESTService$new(collectionContent, returnSize)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    resourceSize <- fish$getSizeInBytes()\n\n    expect_that(resourceSize, equals(100))\n})\n\ntest_that(\"getRelativePath returns path relative to the tree root\", {\n\n    animal <- Subcollection$new(\"animal\")\n    fish <- Subcollection$new(\"fish\")\n    shark <- ArvadosFile$new(\"shark\")\n\n    animal$add(fish)\n    fish$add(shark)\n\n    expect_that(shark$getRelativePath(), equals(\"animal/fish/shark\"))\n})\n\ntest_that(\"read raises exception if file doesn't belong to a collection\", {\n\n    dog <- ArvadosFile$new(\"dog\")\n\n    expect_that(dog$read(),\n                throws_error(\"ArvadosFile doesn't belong to any collection.\"))\n})\n\ntest_that(\"read raises exception offset or length is negative number\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    expect_that(fish$read(contentType = \"text\", offset = -1),\n                throws_error(\"Offset and length must be positive values.\"))\n    expect_that(fish$read(contentType = \"text\", length = -1),\n                throws_error(\"Offset and length must be positive values.\"))\n    expect_that(fish$read(contentType = \"text\", offset = -1, length = -1),\n                throws_error(\"Offset and length must be positive values.\"))\n})\n\ntest_that(\"read delegates reading operation to REST service class\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\")\n    readContent <- \"my file\"\n    fakeREST <- FakeRESTService$new(collectionContent, readContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    fileContent <- fish$read(\"text\")\n\n    expect_that(fileContent, equals(\"my file\"))\n    expect_that(fakeREST$readCallCount, equals(1))\n})\n\ntest_that(paste(\"connection delegates connection creation ro RESTService class\",\n                \"which returns curl connection opened in read mode when\",\n                \"'r' of 'rb' is passed as argument\"), {\n\n    collectionContent <- c(\"animal\", \"animal/fish\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    connection <- fish$connection(\"r\")\n\n    expect_that(fakeREST$getConnectionCallCount, equals(1))\n})\n\ntest_that(paste(\"connection returns textConnection opened\",\n                \"in write mode when 'w' is passed as argument\"), {\n\n    collectionContent <- c(\"animal\", \"animal/fish\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    connection <- fish$connection(\"w\")\n\n    writeLines(\"file\", connection)\n    writeLines(\"content\", connection)\n\n    writeResult <- textConnectionValue(connection)\n\n    expect_that(writeResult[1], equals(\"file\"))\n    expect_that(writeResult[2], equals(\"content\"))\n})\n\ntest_that(\"flush sends data stored in a connection to a REST server\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    connection <- fish$connection(\"w\")\n\n    writeLines(\"file content\", connection)\n\n    fish$flush()\n\n    expect_that(fakeREST$writeBuffer, equals(\"file content\"))\n})\n\ntest_that(\"write raises exception if file doesn't belong to a collection\", {\n\n    dog <- ArvadosFile$new(\"dog\")\n\n    expect_that(dog$write(),\n                throws_error(\"ArvadosFile doesn't belong to any collection.\"))\n})\n\ntest_that(\"write delegates writing operation to REST service class\", {\n\n\n    collectionContent <- c(\"animal\", \"animal/fish\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    fileContent <- fish$write(\"new file content\")\n\n    expect_that(fakeREST$writeBuffer, equals(\"new file content\"))\n})\n\ntest_that(paste(\"move raises exception if arvados file\",\n                \"doesn't belong to any collection\"), {\n\n    animal <- ArvadosFile$new(\"animal\")\n\n    expect_that(animal$move(\"new/location\"),\n                throws_error(\"ArvadosFile doesn't belong to any collection.\"))\n})\n\ntest_that(paste(\"move raises exception if newLocationInCollection\",\n                \"parameter is invalid\"), {\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"ball\")\n\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n\n    collection <- Collection$new(api, \"myUUID\")\n    dog <- collection$get(\"animal/dog\")\n\n    expect_that(dog$move(\"objects/dog\"),\n                throws_error(\"Unable to get destination subcollection.\"))\n})\n\ntest_that(\"move raises exception if new location contains content with the same name\", {\n\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"dog\")\n\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    dog <- collection$get(\"animal/dog\")\n\n    expect_that(dog$move(\"dog\"),\n                throws_error(\"Destination already contains content with same name.\"))\n\n})\n\ntest_that(\"move moves arvados file inside collection tree\", {\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"ball\")\n\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    dog <- collection$get(\"animal/dog\")\n\n    dog$move(\"dog\")\n    dogIsNullOnOldLocation <- is.null(collection$get(\"animal/dog\"))\n    dogExistsOnNewLocation <- !is.null(collection$get(\"dog\"))\n\n    expect_true(dogIsNullOnOldLocation)\n    expect_true(dogExistsOnNewLocation)\n})\n\ntest_that(paste(\"copy raises exception if arvados file\",\n                \"doesn't belong to any collection\"), {\n\n    animal <- ArvadosFile$new(\"animal\")\n\n    expect_that(animal$copy(\"new/location\"),\n                throws_error(\"ArvadosFile doesn't belong to any collection.\"))\n})\n\ntest_that(paste(\"copy raises exception if location parameter is invalid\"), {\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"ball\")\n\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n\n    collection <- Collection$new(api, \"myUUID\")\n    dog <- collection$get(\"animal/dog\")\n\n    expect_that(dog$copy(\"objects/dog\"),\n                throws_error(\"Unable to get destination subcollection.\"))\n})\n\ntest_that(\"copy raises exception if new location contains content with the same name\", {\n\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"dog\")\n\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    dog <- collection$get(\"animal/dog\")\n\n    expect_that(dog$copy(\"dog\"),\n                throws_error(\"Destination already contains content with same name.\"))\n\n})\n\ntest_that(\"copy copies arvados file inside collection tree\", {\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"ball\")\n\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    dog <- collection$get(\"animal/dog\")\n\n    dog$copy(\"dog\")\n    dogExistsOnOldLocation <- !is.null(collection$get(\"animal/dog\"))\n    dogExistsOnNewLocation <- !is.null(collection$get(\"dog\"))\n\n    expect_true(dogExistsOnOldLocation)\n    expect_true(dogExistsOnNewLocation)\n})\n\ntest_that(\"duplicate performs deep cloning of Arvados file\", {\n    arvFile <- ArvadosFile$new(\"foo\")\n    newFile1 <- arvFile$duplicate()\n    newFile2 <- arvFile$duplicate(\"bar\")\n\n    expect_that(newFile1$getFileListing(), equals(arvFile$getFileListing()))\n    expect_that(newFile2$getFileListing(), equals(c(\"bar\")))\n})\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/test-Collection.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nsource(\"fakes/FakeRESTService.R\")\n\ncontext(\"Collection\")\n\ntest_that(paste(\"constructor creates file tree from text content\",\n                \"retreived form REST service\"), {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    root <- collection$get(\"\")\n\n    expect_that(fakeREST$getCollectionContentCallCount, equals(1))\n    expect_that(root$getName(), equals(\"\"))\n})\n\ntest_that(paste(\"add raises exception if passed argumet is not\",\n                \"ArvadosFile or Subcollection\"), {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    newNumber <- 10\n\n    expect_that(collection$add(newNumber),\n    throws_error(paste(\"Expected AravodsFile or Subcollection\",\n                       \"object, got (numeric).\"), fixed = TRUE))\n})\n\ntest_that(\"add raises exception if relative path is not valid\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    newPen <- ArvadosFile$new(\"pen\")\n\n    expect_that(collection$add(newPen, \"objects\"),\n                throws_error(\"Subcollection objects doesn't exist.\",\n                              fixed = TRUE))\n})\n\ntest_that(\"add raises exception if content name is empty string\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    rootFolder <- Subcollection$new(\"\")\n\n    expect_that(collection$add(rootFolder),\n                throws_error(\"Content has invalid name.\", fixed = TRUE))\n})\n\ntest_that(paste(\"add adds ArvadosFile or Subcollection\",\n                \"to local tree structure and remote REST service\"), {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    newDog <- ArvadosFile$new(\"dog\")\n    collection$add(newDog, \"animal\")\n\n    dog <- collection$get(\"animal/dog\")\n    dogExistsInCollection <- !is.null(dog) && dog$getName() == \"dog\"\n\n    expect_true(dogExistsInCollection)\n    expect_that(fakeREST$createCallCount, equals(1))\n})\n\ntest_that(\"create raises exception if passed argumet is not character vector\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    expect_that(collection$create(10),\n                throws_error(\"Expected character vector, got (numeric).\",\n                             fixed = TRUE))\n})\n\ntest_that(paste(\"create adds files specified by fileNames\",\n                \"to local tree structure and remote REST service\"), {\n\n    fakeREST <- FakeRESTService$new()\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    collection$create(c(\"animal/dog\", \"animal/cat\"))\n\n    dog <- collection$get(\"animal/dog\")\n    cat <- collection$get(\"animal/cat\")\n    dogExistsInCollection <- !is.null(dog) && dog$getName() == \"dog\"\n    catExistsInCollection <- !is.null(cat) && cat$getName() == \"cat\"\n\n    expect_true(dogExistsInCollection)\n    expect_true(catExistsInCollection)\n    expect_that(fakeREST$createCallCount, equals(2))\n})\n\ntest_that(\"remove raises exception if passed argumet is not character vector\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    expect_that(collection$remove(10),\n                throws_error(\"Expected character vector, got (numeric).\",\n                             fixed = TRUE))\n})\n\ntest_that(\"remove raises exception if user tries to remove root folder\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    expect_that(collection$remove(\"\"),\n                throws_error(\"You can't delete root folder.\", fixed = TRUE))\n})\n\ntest_that(paste(\"remove removes files specified by paths\",\n                \"from local tree structure and from remote REST service\"), {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"animal/dog\", \"animal/cat\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    collection$remove(c(\"animal/dog\", \"animal/cat\"))\n\n    dog <- collection$get(\"animal/dog\")\n    cat <- collection$get(\"animal/dog\")\n    dogExistsInCollection <- !is.null(dog) && dog$getName() == \"dog\"\n    catExistsInCollection <- !is.null(cat) && cat$getName() == \"cat\"\n\n    expect_false(dogExistsInCollection)\n    expect_false(catExistsInCollection)\n    expect_that(fakeREST$deleteCallCount, equals(2))\n})\n\ntest_that(paste(\"move moves content to a new location inside file tree\",\n                \"and on REST service\"), {\n\n    collectionContent <- c(\"animal\", \"animal/dog\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    collection$move(\"animal/dog\", \"dog\")\n\n    dogIsNullOnOldLocation <- is.null(collection$get(\"animal/dog\"))\n    dogExistsOnNewLocation <- !is.null(collection$get(\"dog\"))\n\n    expect_true(dogIsNullOnOldLocation)\n    expect_true(dogExistsOnNewLocation)\n    expect_that(fakeREST$moveCallCount, equals(1))\n})\n\ntest_that(\"move raises exception if new location is not valid\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    expect_that(collection$move(\"fish\", \"object\"),\n                throws_error(\"Content you want to move doesn't exist in the collection.\",\n                             fixed = TRUE))\n})\n\ntest_that(\"getFileListing returns sorted collection content received from REST service\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    contentMatchExpected <- all(collection$getFileListing() ==\n                                c(\"animal\", \"animal/fish\", \"ball\"))\n\n    expect_true(contentMatchExpected)\n    #2 calls because Collection$new calls getFileListing once\n    expect_that(fakeREST$getCollectionContentCallCount, equals(2))\n\n})\n\ntest_that(\"get returns arvados file or subcollection from internal tree structure\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    fish <- collection$get(\"animal/fish\")\n    fishIsNotNull <- !is.null(fish)\n\n    expect_true(fishIsNotNull)\n    expect_that(fish$getName(), equals(\"fish\"))\n\n    ball <- collection$get(\"ball\")\n    ballIsNotNull <- !is.null(ball)\n\n    expect_true(ballIsNotNull)\n    expect_that(ball$getName(), equals(\"ball\"))\n})\n\ntest_that(paste(\"copy copies content to a new location inside file tree\",\n                \"and on REST service\"), {\n\n    collectionContent <- c(\"animal\", \"animal/dog\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    collection$copy(\"animal/dog\", \"dog\")\n\n    dogExistsOnOldLocation <- !is.null(collection$get(\"animal/dog\"))\n    dogExistsOnNewLocation <- !is.null(collection$get(\"dog\"))\n\n    expect_true(dogExistsOnOldLocation)\n    expect_true(dogExistsOnNewLocation)\n    expect_that(fakeREST$copyCallCount, equals(1))\n})\n\ntest_that(\"copy raises exception if new location is not valid\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n\n    expect_that(collection$copy(\"fish\", \"object\"),\n                throws_error(\"Content you want to copy doesn't exist in the collection.\",\n                             fixed = TRUE))\n})\n\ntest_that(\"refresh invalidates current tree structure\", {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"aaaaa-j7d0g-ccccccccccccccc\")\n\n    # Before refresh\n    fish <- collection$get(\"animal/fish\")\n    expect_that(fish$getName(), equals(\"fish\"))\n    expect_that(fish$getCollection()$uuid, equals(\"aaaaa-j7d0g-ccccccccccccccc\"))\n\n    collection$refresh()\n\n    # After refresh\n    expect_that(fish$getName(), equals(\"fish\"))\n    expect_true(is.null(fish$getCollection()))\n})\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/test-CollectionTree.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncontext(\"CollectionTree\")\n\ntest_that(\"constructor creates file tree from character array properly\", {\n\n    collection <- \"myCollection\"\n    characterArray <- c(\"animal\",\n                        \"animal/dog\",\n                        \"boat\")\n\n    collectionTree <- CollectionTree$new(characterArray, collection)\n\n    root   <- collectionTree$getTree()\n    animal <- collectionTree$getElement(\"animal\")\n    dog    <- collectionTree$getElement(\"animal/dog\")\n    boat   <- collectionTree$getElement(\"boat\")\n\n    rootHasNoParent             <- is.null(root$getParent())\n    rootIsOfTypeSubcollection   <- \"Subcollection\" %in% class(root)\n    animalIsOfTypeSubcollection <- \"Subcollection\" %in% class(animal)\n    dogIsOfTypeArvadosFile      <- \"ArvadosFile\" %in% class(dog)\n    boatIsOfTypeArvadosFile     <- \"ArvadosFile\" %in% class(boat)\n    animalsParentIsRoot         <- animal$getParent()$getName() == root$getName()\n    animalContainsDog           <- animal$getFirst()$getName() == dog$getName()\n    dogsParentIsAnimal          <- dog$getParent()$getName() == animal$getName()\n    boatsParentIsRoot           <- boat$getParent()$getName() == root$getName()\n\n    allElementsBelongToSameCollection <- root$getCollection()   == \"myCollection\" &&\n                                         animal$getCollection() == \"myCollection\" &&\n                                         dog$getCollection()    == \"myCollection\" &&\n                                         boat$getCollection()   == \"myCollection\"\n\n    expect_that(root$getName(), equals(\"\"))\n    expect_true(rootIsOfTypeSubcollection)\n    expect_true(rootHasNoParent)\n    expect_true(animalIsOfTypeSubcollection)\n    expect_true(animalsParentIsRoot)\n    expect_true(animalContainsDog)\n    expect_true(dogIsOfTypeArvadosFile)\n    expect_true(dogsParentIsAnimal)\n    expect_true(boatIsOfTypeArvadosFile)\n    expect_true(boatsParentIsRoot)\n    expect_true(allElementsBelongToSameCollection)\n})\n\ntest_that(\"getElement returns element from tree if element exists on specified path\", {\n\n    collection <- \"myCollection\"\n    characterArray <- c(\"animal\",\n                        \"animal/dog\",\n                        \"boat\")\n\n    collectionTree <- CollectionTree$new(characterArray, collection)\n\n    dog <- collectionTree$getElement(\"animal/dog\")\n\n    expect_that(dog$getName(), equals(\"dog\"))\n})\n\ntest_that(\"getElement returns NULL from tree if element doesn't exists on specified path\", {\n\n    collection <- \"myCollection\"\n    characterArray <- c(\"animal\",\n                        \"animal/dog\",\n                        \"boat\")\n\n    collectionTree <- CollectionTree$new(characterArray, collection)\n\n    fish <- collectionTree$getElement(\"animal/fish\")\n    fishIsNULL <- is.null(fish)\n\n    expect_true(fishIsNULL)\n})\n\ntest_that(\"getElement trims ./ from start of relativePath\", {\n\n    collection <- \"myCollection\"\n    characterArray <- c(\"animal\",\n                        \"animal/dog\",\n                        \"boat\")\n\n    collectionTree <- CollectionTree$new(characterArray, collection)\n\n    dog <- collectionTree$getElement(\"animal/dog\")\n    dogWithDotSlash <- collectionTree$getElement(\"./animal/dog\")\n\n    expect_that(dogWithDotSlash$getName(), equals(dog$getName()))\n})\n\ntest_that(\"getElement trims / from end of relativePath\", {\n\n    collection <- \"myCollection\"\n    characterArray <- c(\"animal\",\n                        \"animal/dog\",\n                        \"boat\")\n\n    collectionTree <- CollectionTree$new(characterArray, collection)\n\n    animal <- collectionTree$getElement(\"animal\")\n    animalWithSlash <- collectionTree$getElement(\"animal/\")\n\n    expect_that(animalWithSlash$getName(), equals(animal$getName()))\n})\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/test-HttpParser.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncontext(\"Http Parser\")\n\n\ntest_that(\"parseJSONResponse generates and returns JSON object from server response\", {\n\n    JSONContent <- \"{\\\"bar\\\":{\\\"foo\\\":[10]}}\"\n    serverResponse <- list()\n    serverResponse$content <- charToRaw(JSONContent)\n    serverResponse$headers[[\"Content-Type\"]] <- \"application/json; charset=utf-8\"\n    class(serverResponse) <- c(\"response\")\n\n    parser <- HttpParser$new()\n\n    result <- parser$parseJSONResponse(serverResponse)\n    barExists <- !is.null(result$bar)\n\n    expect_true(barExists)\n    expect_that(unlist(result$bar$foo), equals(10))\n})\n\ntest_that(paste(\"parseResponse generates and returns character vector\",\n                \"from server response if outputType is text\"), {\n\n    content <- \"random text\"\n    serverResponse <- list()\n    serverResponse$content <- charToRaw(content)\n    serverResponse$headers[[\"Content-Type\"]] <- \"text/plain; charset=utf-8\"\n    class(serverResponse) <- c(\"response\")\n\n    parser <- HttpParser$new()\n    parsedResponse <- parser$parseResponse(serverResponse, \"text\")\n\n    expect_that(parsedResponse, equals(\"random text\"))\n})\n\n\nwebDAVResponseSample =\n    paste0(\"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?><D:multistatus xmlns:\",\n           \"D=\\\"DAV:\\\"><D:response><D:href>/c=aaaaa-bbbbb-ccccccccccccccc/</D\",\n           \":href><D:propstat><D:prop><D:resourcetype><D:collection xmlns:D=\",\n           \"\\\"DAV:\\\"/></D:resourcetype><D:getlastmodified>Fri, 11 Jan 2018 1\",\n           \"1:11:11 GMT</D:getlastmodified><D:displayname></D:displayname><D\",\n           \":supportedlock><D:lockentry xmlns:D=\\\"DAV:\\\"><D:lockscope><D:exc\",\n           \"lusive/></D:lockscope><D:locktype><D:write/></D:locktype></D:loc\",\n           \"kentry></D:supportedlock></D:prop><D:status>HTTP/1.1 200 OK</D:s\",\n           \"tatus></D:propstat></D:response><D:response><D:href>/c=aaaaa-bbb\",\n           \"bb-ccccccccccccccc/myFile.exe</D:href><D:propstat><D:prop><D:r\",\n           \"esourcetype></D:resourcetype><D:getlastmodified>Fri, 12 Jan 2018\",\n           \" 22:22:22 GMT</D:getlastmodified><D:getcontenttype>text/x-c++src\",\n           \"; charset=utf-8</D:getcontenttype><D:displayname>myFile.exe</D\",\n           \":displayname><D:getcontentlength>25</D:getcontentlength><D:getet\",\n           \"ag>\\\"123b12dd1234567890\\\"</D:getetag><D:supportedlock><D:lockent\",\n           \"ry xmlns:D=\\\"DAV:\\\"><D:lockscope><D:exclusive/></D:lockscope><D:\",\n           \"locktype><D:write/></D:locktype></D:lockentry></D:supportedlock>\",\n           \"</D:prop><D:status>HTTP/1.1 200 OK</D:status></D:propstat></D:re\",\n           \"sponse></D:multistatus>\")\n\n\n\ntest_that(paste(\"getFileNamesFromResponse returns file names belonging to specific\",\n                \"collection parsed from webDAV server response\"), {\n\n    serverResponse <- list()\n    serverResponse$content <- charToRaw(webDAVResponseSample)\n    serverResponse$headers[[\"Content-Type\"]] <- \"text/xml; charset=utf-8\"\n    class(serverResponse) <- c(\"response\")\n    url <- URLencode(\"https://webdav/c=aaaaa-bbbbb-ccccccccccccccc\")\n\n    parser <- HttpParser$new()\n    result <- parser$getFileNamesFromResponse(serverResponse, url)\n    expectedResult <- \"myFile.exe\"\n    resultMatchExpected <- all.equal(result, expectedResult)\n\n    expect_true(resultMatchExpected)\n})\n\ntest_that(paste(\"getFileSizesFromResponse returns file sizes\",\n                \"parsed from webDAV server response\"), {\n\n    serverResponse <- list()\n    serverResponse$content <- charToRaw(webDAVResponseSample)\n    serverResponse$headers[[\"Content-Type\"]] <- \"text/xml; charset=utf-8\"\n    class(serverResponse) <- c(\"response\")\n    url <- URLencode(\"https://webdav/c=aaaaa-bbbbb-ccccccccccccccc\")\n\n    parser <- HttpParser$new()\n    expectedResult <- \"25\"\n    result <- parser$getFileSizesFromResponse(serverResponse, url)\n    resultMatchExpected <- result == expectedResult\n\n    expect_true(resultMatchExpected)\n})\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/test-HttpRequest.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncontext(\"Http Request\")\n\n\ntest_that(\"execute raises exception if http verb is not valid\", {\n\n    http <- HttpRequest$new()\n    expect_that(http$exec(\"FAKE VERB\", \"url\"),\n               throws_error(\"Http verb is not valid.\"))\n})\n\ntest_that(\"createQuery generates and encodes query portion of http\", {\n\n    http <- HttpRequest$new()\n    queryParams <- list()\n    queryParams$filters <- list(list(\"color\", \"=\", \"red\"))\n    queryParams$limit <- 20\n    queryParams$offset <- 50\n    expect_that(http$createQuery(queryParams),\n                equals(paste0(\"?filters=%5B%5B%22color%22%2C%22%3D%22%2C%22red\",\n                              \"%22%5D%5D&limit=20&offset=50\")))\n})\n\ntest_that(\"createQuery generates and empty string when queryParams is an empty list\", {\n\n    http <- HttpRequest$new()\n    expect_that(http$createQuery(list()), equals(\"\"))\n})\n\ntest_that(\"exec calls httr functions correctly\", {\n    httrNamespace <- getNamespace(\"httr\")\n\n    # Monkeypatch httr functions and assert that they are called later\n    add_headersCalled <- FALSE\n    unlockBinding(\"add_headers\", httrNamespace)\n    newAddHeaders <- function(h)\n    {\n        add_headersCalled <<- TRUE\n        list()\n    }\n    httrNamespace$add_headers <- newAddHeaders\n    lockBinding(\"add_headers\", httrNamespace)\n\n    expectedConfig <- list()\n    retryCalled <- FALSE\n    unlockBinding(\"RETRY\", httrNamespace)\n    newRETRY <- function(verb, url, body, config, times)\n    {\n        retryCalled <<- TRUE\n        expectedConfig <<- config\n    }\n    httrNamespace$RETRY <- newRETRY\n    lockBinding(\"RETRY\", httrNamespace)\n\n    Sys.setenv(\"ARVADOS_API_HOST_INSECURE\" = TRUE)\n    http <- HttpRequest$new()\n    http$exec(\"GET\", \"url\")\n\n    expect_true(add_headersCalled)\n    expect_true(retryCalled)\n    expect_that(expectedConfig$options, equals(list(ssl_verifypeer = 0L)))\n})\n\ntest_that(\"getConnection calls curl functions correctly\", {\n    curlNamespace <- getNamespace(\"curl\")\n\n    # Monkeypatch curl functions and assert that they are called later\n    curlCalled <- FALSE\n    unlockBinding(\"curl\", curlNamespace)\n    newCurl <- function(url, open, handle) curlCalled <<- TRUE\n    curlNamespace$curl <- newCurl\n    lockBinding(\"curl\", curlNamespace)\n\n    new_handleCalled <- FALSE\n    unlockBinding(\"new_handle\", curlNamespace)\n    newHandleFun <- function()\n    {\n        new_handleCalled <<- TRUE\n        list()\n    }\n    curlNamespace$new_handle <- newHandleFun\n    lockBinding(\"new_handle\", curlNamespace)\n\n    handle_setheadersCalled <- FALSE\n    unlockBinding(\"handle_setheaders\", curlNamespace)\n    newHandleSetHeaders <- function(h, .list) handle_setheadersCalled <<- TRUE\n    curlNamespace$handle_setheaders <- newHandleSetHeaders\n    lockBinding(\"handle_setheaders\", curlNamespace)\n\n    handle_setoptCalled <- FALSE\n    unlockBinding(\"handle_setopt\", curlNamespace)\n    newHandleSetOpt <- function(h, ssl_verifypeer) handle_setoptCalled <<- TRUE\n    curlNamespace$handle_setopt <- newHandleSetOpt\n    lockBinding(\"handle_setopt\", curlNamespace)\n\n\n    Sys.setenv(\"ARVADOS_API_HOST_INSECURE\" = TRUE)\n    http <- HttpRequest$new()\n    http$getConnection(\"location\", list(), \"r\")\n\n    expect_true(new_handleCalled)\n    expect_true(handle_setheadersCalled)\n    expect_true(handle_setoptCalled)\n    expect_true(curlCalled)\n})\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/test-RESTService.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nsource(\"fakes/FakeArvados.R\")\nsource(\"fakes/FakeHttpRequest.R\")\nsource(\"fakes/FakeHttpParser.R\")\n\ncontext(\"REST service\")\n\ntest_that(\"getWebDavHostName calls REST service properly\", {\n\n    expectedURL <- \"https://host/arvados/v1/config\"\n    serverResponse <- list(Services = list(WebDAVDownload = list(ExternalURL = \"https://myWebDavServer.com\")))\n    httpRequest <- FakeHttpRequest$new(expectedURL, serverResponse)\n\n    REST <- RESTService$new(\"token\", \"host\",\n                            httpRequest, FakeHttpParser$new())\n\n    REST$getWebDavHostName()\n\n    expect_true(httpRequest$URLIsProperlyConfigured)\n    expect_false(httpRequest$requestHeaderContainsAuthorizationField)\n    expect_that(httpRequest$numberOfGETRequests, equals(1))\n})\n\ntest_that(\"getWebDavHostName returns webDAV host name properly\", {\n\n    serverResponse <- list(Services = list(WebDAVDownload = list(ExternalURL = \"https://myWebDavServer.com\")))\n    httpRequest <- FakeHttpRequest$new(expectedURL = NULL, serverResponse)\n\n    REST <- RESTService$new(\"token\", \"host\",\n                            httpRequest, FakeHttpParser$new())\n\n    expect_that(\"https://myWebDavServer.com\", equals(REST$getWebDavHostName()))\n})\n\ntest_that(\"create calls REST service properly\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    expectedURL <- \"https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file\"\n    fakeHttp <- FakeHttpRequest$new(expectedURL)\n    fakeHttpParser <- FakeHttpParser$new()\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, fakeHttpParser,\n                            0, \"https://webDavHost/\")\n\n    REST$create(\"file\", uuid)\n\n    expect_true(fakeHttp$URLIsProperlyConfigured)\n    expect_true(fakeHttp$requestHeaderContainsAuthorizationField)\n    expect_that(fakeHttp$numberOfPUTRequests, equals(1))\n})\n\ntest_that(\"create raises exception if server response code is not between 200 and 300\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    response <- list()\n    response$status_code <- 404\n    fakeHttp <- FakeHttpRequest$new(serverResponse = response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, HttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$create(\"file\", uuid),\n                throws_error(\"Server code: 404\"))\n})\n\ntest_that(\"delete calls REST service properly\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    expectedURL <- \"https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file\"\n    fakeHttp <- FakeHttpRequest$new(expectedURL)\n    fakeHttpParser <- FakeHttpParser$new()\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, fakeHttpParser,\n                            0, \"https://webDavHost/\")\n\n    REST$delete(\"file\", uuid)\n\n    expect_true(fakeHttp$URLIsProperlyConfigured)\n    expect_true(fakeHttp$requestHeaderContainsAuthorizationField)\n    expect_that(fakeHttp$numberOfDELETERequests, equals(1))\n})\n\ntest_that(\"delete raises exception if server response code is not between 200 and 300\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    response <- list()\n    response$status_code <- 404\n    fakeHttp <- FakeHttpRequest$new(serverResponse = response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, HttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$delete(\"file\", uuid),\n                throws_error(\"Server code: 404\"))\n})\n\ntest_that(\"move calls REST service properly\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    expectedURL <- \"https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file\"\n    fakeHttp <- FakeHttpRequest$new(expectedURL)\n    fakeHttpParser <- FakeHttpParser$new()\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, fakeHttpParser,\n                            0, \"https://webDavHost/\")\n\n    REST$move(\"file\", \"newDestination/file\", uuid)\n\n    expect_true(fakeHttp$URLIsProperlyConfigured)\n    expect_true(fakeHttp$requestHeaderContainsAuthorizationField)\n    expect_true(fakeHttp$requestHeaderContainsDestinationField)\n    expect_that(fakeHttp$numberOfMOVERequests, equals(1))\n})\n\ntest_that(\"move raises exception if server response code is not between 200 and 300\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    response <- list()\n    response$status_code <- 404\n    fakeHttp <- FakeHttpRequest$new(serverResponse = response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, HttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$move(\"file\", \"newDestination/file\", uuid),\n                throws_error(\"Server code: 404\"))\n})\n\ntest_that(\"copy calls REST service properly\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    expectedURL <- \"https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file\"\n    fakeHttp <- FakeHttpRequest$new(expectedURL)\n    fakeHttpParser <- FakeHttpParser$new()\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, fakeHttpParser,\n                            0, \"https://webDavHost/\")\n\n    REST$copy(\"file\", \"newDestination/file\", uuid)\n\n    expect_true(fakeHttp$URLIsProperlyConfigured)\n    expect_true(fakeHttp$requestHeaderContainsAuthorizationField)\n    expect_true(fakeHttp$requestHeaderContainsDestinationField)\n    expect_that(fakeHttp$numberOfCOPYRequests, equals(1))\n})\n\ntest_that(\"copy raises exception if server response code is not between 200 and 300\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    response <- list()\n    response$status_code <- 404\n    fakeHttp <- FakeHttpRequest$new(serverResponse = response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, HttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$copy(\"file\", \"newDestination/file\", uuid),\n                throws_error(\"Server code: 404\"))\n})\n\ntest_that(\"getCollectionContent retreives correct content from WebDAV server\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    expectedURL <- \"https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc\"\n    returnContent <- list()\n    returnContent$status_code <- 200\n    returnContent$content <- c(\"animal\", \"animal/dog\", \"ball\")\n\n    fakeHttp <- FakeHttpRequest$new(expectedURL, returnContent)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, FakeHttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    returnResult <- REST$getCollectionContent(uuid)\n    returnedContentMatchExpected <- all.equal(returnResult,\n                                              c(\"animal\", \"animal/dog\", \"ball\"))\n\n    expect_true(returnedContentMatchExpected)\n    expect_true(fakeHttp$requestHeaderContainsAuthorizationField)\n})\n\ntest_that(\"getCollectionContent raises exception if server returns empty response\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    response <- \"\"\n    fakeHttp <- FakeHttpRequest$new(serverResponse = response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, FakeHttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$getCollectionContent(uuid),\n                throws_error(\"Response is empty, request may be misconfigured\"))\n})\n\ntest_that(\"getCollectionContent parses server response\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    fakeHttpParser <- FakeHttpParser$new()\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            FakeHttpRequest$new(), fakeHttpParser,\n                            0, \"https://webDavHost/\")\n\n    REST$getCollectionContent(uuid)\n\n    expect_that(fakeHttpParser$parserCallCount, equals(1))\n})\n\ntest_that(\"getCollectionContent raises exception if server returns empty response\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    response <- \"\"\n    fakeHttp <- FakeHttpRequest$new(serverResponse = response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, FakeHttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$getCollectionContent(uuid),\n                throws_error(\"Response is empty, request may be misconfigured\"))\n})\n\ntest_that(paste(\"getCollectionContent raises exception if server\",\n                \"response code is not between 200 and 300\"), {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    response <- list()\n    response$status_code <- 404\n    fakeHttp <- FakeHttpRequest$new(serverResponse = response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, HttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$getCollectionContent(uuid),\n                throws_error(\"Server code: 404\"))\n})\n\n\ntest_that(\"getResourceSize calls REST service properly\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    expectedURL <- \"https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file\"\n    response <- list()\n    response$status_code <- 200\n    response$content <- c(6, 2, 931, 12003)\n    fakeHttp <- FakeHttpRequest$new(expectedURL, response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, FakeHttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    returnResult <- REST$getResourceSize(\"file\", uuid)\n    returnedContentMatchExpected <- all.equal(returnResult,\n                                              c(6, 2, 931, 12003))\n\n    expect_true(fakeHttp$URLIsProperlyConfigured)\n    expect_true(fakeHttp$requestHeaderContainsAuthorizationField)\n    expect_true(returnedContentMatchExpected)\n})\n\ntest_that(\"getResourceSize raises exception if server returns empty response\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    response <- \"\"\n    fakeHttp <- FakeHttpRequest$new(serverResponse = response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, FakeHttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$getResourceSize(\"file\", uuid),\n                throws_error(\"Response is empty, request may be misconfigured\"))\n})\n\ntest_that(paste(\"getResourceSize raises exception if server\",\n                \"response code is not between 200 and 300\"), {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    response <- list()\n    response$status_code <- 404\n    fakeHttp <- FakeHttpRequest$new(serverResponse = response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, HttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$getResourceSize(\"file\", uuid),\n                throws_error(\"Server code: 404\"))\n})\n\ntest_that(\"getResourceSize parses server response\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    fakeHttpParser <- FakeHttpParser$new()\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            FakeHttpRequest$new(), fakeHttpParser,\n                            0, \"https://webDavHost/\")\n\n    REST$getResourceSize(\"file\", uuid)\n\n    expect_that(fakeHttpParser$parserCallCount, equals(1))\n})\n\ntest_that(\"read calls REST service properly\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    expectedURL <- \"https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file\"\n    serverResponse <- list()\n    serverResponse$status_code <- 200\n    serverResponse$content <- \"file content\"\n\n    fakeHttp <- FakeHttpRequest$new(expectedURL, serverResponse)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, FakeHttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    returnResult <- REST$read(\"file\", uuid, \"text\", 1024, 512)\n\n    expect_true(fakeHttp$URLIsProperlyConfigured)\n    expect_true(fakeHttp$requestHeaderContainsAuthorizationField)\n    expect_true(fakeHttp$requestHeaderContainsRangeField)\n    expect_that(returnResult, equals(\"file content\"))\n})\n\ntest_that(\"read raises exception if server response code is not between 200 and 300\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    response <- list()\n    response$status_code <- 404\n    fakeHttp <- FakeHttpRequest$new(serverResponse = response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, HttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$read(\"file\", uuid),\n                throws_error(\"Server code: 404\"))\n})\n\ntest_that(\"read raises exception if contentType is not valid\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    fakeHttp <- FakeHttpRequest$new()\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, HttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$read(\"file\", uuid, \"some invalid content type\"),\n                throws_error(\"Invalid contentType. Please use text or raw.\"))\n})\n\ntest_that(\"read parses server response\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    fakeHttpParser <- FakeHttpParser$new()\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            FakeHttpRequest$new(), fakeHttpParser,\n                            0, \"https://webDavHost/\")\n\n    REST$read(\"file\", uuid, \"text\", 1024, 512)\n\n    expect_that(fakeHttpParser$parserCallCount, equals(1))\n})\n\ntest_that(\"write calls REST service properly\", {\n\n    fileContent <- \"new file content\"\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    expectedURL <- \"https://webDavHost/c=aaaaa-j7d0g-ccccccccccccccc/file\"\n    fakeHttp <- FakeHttpRequest$new(expectedURL)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, FakeHttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    REST$write(\"file\", uuid, fileContent, \"text/html\")\n\n    expect_true(fakeHttp$URLIsProperlyConfigured)\n    expect_true(fakeHttp$requestBodyIsProvided)\n    expect_true(fakeHttp$requestHeaderContainsAuthorizationField)\n    expect_true(fakeHttp$requestHeaderContainsContentTypeField)\n})\n\ntest_that(\"write raises exception if server response code is not between 200 and 300\", {\n\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    fileContent <- \"new file content\"\n    response <- list()\n    response$status_code <- 404\n    fakeHttp <- FakeHttpRequest$new(serverResponse = response)\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, HttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    expect_that(REST$write(\"file\", uuid, fileContent, \"text/html\"),\n                throws_error(\"Server code: 404\"))\n})\n\ntest_that(\"getConnection calls REST service properly\", {\n    uuid <- \"aaaaa-j7d0g-ccccccccccccccc\"\n    fakeHttp <- FakeHttpRequest$new()\n\n    REST <- RESTService$new(\"token\", \"https://host/\",\n                            fakeHttp, FakeHttpParser$new(),\n                            0, \"https://webDavHost/\")\n\n    REST$getConnection(\"file\", uuid, \"r\")\n\n    expect_that(fakeHttp$numberOfgetConnectionCalls, equals(1))\n})\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/test-Subcollection.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nsource(\"fakes/FakeRESTService.R\")\n\ncontext(\"Subcollection\")\n\ntest_that(\"getRelativePath returns path relative to the tree root\", {\n\n    animal <- Subcollection$new(\"animal\")\n\n    fish <- Subcollection$new(\"fish\")\n    animal$add(fish)\n\n    expect_that(animal$getRelativePath(), equals(\"animal\"))\n    expect_that(fish$getRelativePath(), equals(\"animal/fish\"))\n})\n\ntest_that(paste(\"getFileListing by default returns sorted path of all files\",\n                \"relative to the current subcollection\"), {\n\n    animal   <- Subcollection$new(\"animal\")\n    fish     <- Subcollection$new(\"fish\")\n    shark    <- ArvadosFile$new(\"shark\")\n    blueFish <- ArvadosFile$new(\"blueFish\")\n\n    animal$add(fish)\n    fish$add(shark)\n    fish$add(blueFish)\n\n    result <- animal$getFileListing()\n\n    #expect sorted array\n    expectedResult <- c(\"animal/fish/blueFish\", \"animal/fish/shark\")\n\n    resultsMatch <- length(expectedResult) == length(result) &&\n                    all(expectedResult == result)\n\n    expect_true(resultsMatch)\n})\n\ntest_that(paste(\"getFileListing returns sorted names of all direct children\",\n                \"if fullPath is set to FALSE\"), {\n\n    animal <- Subcollection$new(\"animal\")\n    fish   <- Subcollection$new(\"fish\")\n    shark  <- ArvadosFile$new(\"shark\")\n    dog    <- ArvadosFile$new(\"dog\")\n\n    animal$add(fish)\n    animal$add(dog)\n    fish$add(shark)\n\n    result <- animal$getFileListing(fullPath = FALSE)\n    expectedResult <- c(\"dog\", \"fish\")\n\n    resultsMatch <- length(expectedResult) == length(result) &&\n                    all(expectedResult == result)\n\n    expect_true(resultsMatch)\n})\n\ntest_that(\"add adds content to inside collection tree\", {\n\n    animal <- Subcollection$new(\"animal\")\n    fish   <- Subcollection$new(\"fish\")\n    dog    <- ArvadosFile$new(\"dog\")\n\n    animal$add(fish)\n    animal$add(dog)\n\n    animalContainsFish <- animal$get(\"fish\")$getName() == fish$getName()\n    animalContainsDog  <- animal$get(\"dog\")$getName()  == dog$getName()\n\n    expect_true(animalContainsFish)\n    expect_true(animalContainsDog)\n})\n\ntest_that(\"add raises exception if content name is empty string\", {\n\n    animal     <- Subcollection$new(\"animal\")\n    rootFolder <- Subcollection$new(\"\")\n\n    expect_that(animal$add(rootFolder),\n                throws_error(\"Content has invalid name.\", fixed = TRUE))\n})\n\ntest_that(paste(\"add raises exception if ArvadosFile/Subcollection\",\n                \"with same name already exists in the subcollection\"), {\n\n    animal     <- Subcollection$new(\"animal\")\n    fish       <- Subcollection$new(\"fish\")\n    secondFish <- Subcollection$new(\"fish\")\n    thirdFish  <- ArvadosFile$new(\"fish\")\n\n    animal$add(fish)\n\n    expect_that(animal$add(secondFish),\n                throws_error(paste(\"Subcollection already contains ArvadosFile or\",\n                                   \"Subcollection with same name.\"), fixed = TRUE))\n    expect_that(animal$add(thirdFish),\n                throws_error(paste(\"Subcollection already contains ArvadosFile or\",\n                                   \"Subcollection with same name.\"), fixed = TRUE))\n})\n\ntest_that(paste(\"add raises exception if passed argument is\",\n                \"not ArvadosFile or Subcollection\"), {\n\n    animal <- Subcollection$new(\"animal\")\n    number <- 10\n\n    expect_that(animal$add(number),\n                throws_error(paste(\"Expected AravodsFile or Subcollection object,\",\n                                   \"got (numeric).\"), fixed = TRUE))\n})\n\ntest_that(paste(\"add post content to a REST service\",\n                \"if subcollection belongs to a collection\"), {\n\n    collectionContent <- c(\"animal\", \"animal/fish\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n\n    collection <- Collection$new(api, \"myUUID\")\n    animal <- collection$get(\"animal\")\n    dog <- ArvadosFile$new(\"dog\")\n\n    animal$add(dog)\n\n    expect_that(fakeREST$createCallCount, equals(1))\n})\n\ntest_that(\"remove removes content from subcollection\", {\n\n    animal <- Subcollection$new(\"animal\")\n    fish   <- Subcollection$new(\"fish\")\n\n    animal$add(fish)\n    animal$remove(\"fish\")\n\n    returnValueAfterRemovalIsNull <- is.null(animal$get(\"fish\"))\n\n    expect_true(returnValueAfterRemovalIsNull)\n})\n\ntest_that(paste(\"remove raises exception\",\n                \"if content to remove doesn't exist in the subcollection\"), {\n\n    animal <- Subcollection$new(\"animal\")\n\n    expect_that(animal$remove(\"fish\"),\n                throws_error(paste(\"Subcollection doesn't contains ArvadosFile\",\n                                   \"or Subcollection with specified name.\")))\n})\n\ntest_that(\"remove raises exception if passed argument is not character vector\", {\n\n    animal <- Subcollection$new(\"animal\")\n    number <- 10\n\n    expect_that(animal$remove(number),\n                throws_error(paste(\"Expected character,\",\n                                   \"got (numeric).\"), fixed = TRUE))\n})\n\ntest_that(paste(\"remove removes content from REST service\",\n                \"if subcollection belongs to a collection\"), {\n\n    collectionContent <- c(\"animal\", \"animal/fish\", \"animal/dog\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    animal <- collection$get(\"animal\")\n\n    animal$remove(\"fish\")\n\n    expect_that(fakeREST$deleteCallCount, equals(1))\n})\n\ntest_that(paste(\"get returns ArvadosFile or Subcollection\",\n                \"if file or folder with given name exists\"), {\n\n    animal <- Subcollection$new(\"animal\")\n    fish   <- Subcollection$new(\"fish\")\n    dog    <- ArvadosFile$new(\"dog\")\n\n    animal$add(fish)\n    animal$add(dog)\n\n    returnedFish <- animal$get(\"fish\")\n    returnedDog  <- animal$get(\"dog\")\n\n    returnedFishIsSubcollection <- \"Subcollection\" %in% class(returnedFish)\n    returnedDogIsArvadosFile    <- \"ArvadosFile\"   %in% class(returnedDog)\n\n    expect_true(returnedFishIsSubcollection)\n    expect_that(returnedFish$getName(), equals(\"fish\"))\n\n    expect_true(returnedDogIsArvadosFile)\n    expect_that(returnedDog$getName(), equals(\"dog\"))\n})\n\ntest_that(paste(\"get returns NULL if file or folder\",\n                \"with given name doesn't exists\"), {\n\n    animal <- Subcollection$new(\"animal\")\n    fish   <- Subcollection$new(\"fish\")\n\n    animal$add(fish)\n\n    returnedDogIsNull <- is.null(animal$get(\"dog\"))\n\n    expect_true(returnedDogIsNull)\n})\n\ntest_that(\"getFirst returns first child in the subcollection\", {\n\n    animal <- Subcollection$new(\"animal\")\n    fish   <- Subcollection$new(\"fish\")\n\n    animal$add(fish)\n\n    expect_that(animal$getFirst()$getName(), equals(\"fish\"))\n})\n\ntest_that(\"getFirst returns NULL if subcollection contains no children\", {\n\n    animal <- Subcollection$new(\"animal\")\n\n    returnedElementIsNull <- is.null(animal$getFirst())\n\n    expect_true(returnedElementIsNull)\n})\n\ntest_that(paste(\"setCollection by default sets collection\",\n                \"filed of subcollection and all its children\"), {\n\n    animal <- Subcollection$new(\"animal\")\n    fish   <- Subcollection$new(\"fish\")\n    animal$add(fish)\n\n    animal$setCollection(\"myCollection\")\n\n    expect_that(animal$getCollection(), equals(\"myCollection\"))\n    expect_that(fish$getCollection(), equals(\"myCollection\"))\n})\n\ntest_that(paste(\"setCollection sets collection filed of subcollection only\",\n                \"if parameter setRecursively is set to FALSE\"), {\n\n    animal <- Subcollection$new(\"animal\")\n    fish   <- Subcollection$new(\"fish\")\n    animal$add(fish)\n\n    animal$setCollection(\"myCollection\", setRecursively = FALSE)\n    fishCollectionIsNull <- is.null(fish$getCollection())\n\n    expect_that(animal$getCollection(), equals(\"myCollection\"))\n    expect_true(fishCollectionIsNull)\n})\n\ntest_that(paste(\"move raises exception if subcollection\",\n                \"doesn't belong to any collection\"), {\n\n    animal <- Subcollection$new(\"animal\")\n\n    expect_that(animal$move(\"new/location\"),\n                throws_error(\"Subcollection doesn't belong to any collection\"))\n})\n\ntest_that(\"move raises exception if new location contains content with the same name\", {\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"fish\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    expect_that(fish$move(\"fish\"),\n                throws_error(\"Destination already contains content with same name.\"))\n\n})\n\ntest_that(paste(\"move raises exception if newLocationInCollection\",\n                \"parameter is invalid\"), {\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    expect_that(fish$move(\"objects/dog\"),\n                throws_error(\"Unable to get destination subcollection.\"))\n})\n\ntest_that(\"move moves subcollection inside collection tree\", {\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    fish$move(\"fish\")\n    fishIsNullOnOldLocation <- is.null(collection$get(\"animal/fish\"))\n    fishExistsOnNewLocation <- !is.null(collection$get(\"fish\"))\n\n    expect_true(fishIsNullOnOldLocation)\n    expect_true(fishExistsOnNewLocation)\n})\n\ntest_that(paste(\"getSizeInBytes returns zero if subcollection\",\n                \"is not part of a collection\"), {\n\n    animal <- Subcollection$new(\"animal\")\n\n    expect_that(animal$getSizeInBytes(), equals(0))\n})\n\ntest_that(paste(\"getSizeInBytes delegates size calculation\",\n                \"to REST service class\"), {\n\n    collectionContent <- c(\"animal\", \"animal/fish\")\n    returnSize <- 100\n    fakeREST <- FakeRESTService$new(collectionContent, returnSize)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    animal <- collection$get(\"animal\")\n\n    resourceSize <- animal$getSizeInBytes()\n\n    expect_that(resourceSize, equals(100))\n})\n\n#########################\ntest_that(paste(\"copy raises exception if subcollection\",\n                \"doesn't belong to any collection\"), {\n\n    animal <- Subcollection$new(\"animal\")\n\n    expect_that(animal$copy(\"new/location\"),\n                throws_error(\"Subcollection doesn't belong to any collection.\"))\n})\n\ntest_that(\"copy raises exception if new location contains content with the same name\", {\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"fish\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    expect_that(fish$copy(\"fish\"),\n                throws_error(\"Destination already contains content with same name.\"))\n\n})\n\ntest_that(paste(\"copy raises exception if location parameter is invalid\"), {\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    expect_that(fish$copy(\"objects/dog\"),\n                throws_error(\"Unable to get destination subcollection.\"))\n})\n\ntest_that(\"copy copies subcollection inside collection tree\", {\n\n    collectionContent <- c(\"animal\",\n                           \"animal/fish\",\n                           \"animal/dog\",\n                           \"animal/fish/shark\",\n                           \"ball\")\n    fakeREST <- FakeRESTService$new(collectionContent)\n\n    api <- Arvados$new(\"myToken\", \"myHostName\")\n    api$setRESTService(fakeREST)\n    collection <- Collection$new(api, \"myUUID\")\n    fish <- collection$get(\"animal/fish\")\n\n    fish$copy(\"fish\")\n    fishExistsOnOldLocation <- !is.null(collection$get(\"animal/fish\"))\n    fishExistsOnNewLocation <- !is.null(collection$get(\"fish\"))\n\n    expect_true(fishExistsOnOldLocation)\n    expect_true(fishExistsOnNewLocation)\n})\n\ntest_that(\"duplicate performs deep cloning of Subcollection\", {\n    foo <- ArvadosFile$new(\"foo\")\n    bar <- ArvadosFile$new(\"bar\")\n    sub <- Subcollection$new(\"qux\")\n    sub$add(foo)\n    sub$add(bar)\n\n    newSub1 <- sub$duplicate()\n    newSub2 <- sub$duplicate(\"quux\")\n\n    expect_that(newSub1$getFileListing(), equals(sub$getFileListing()))\n    expect_that(sort(newSub2$getFileListing()), equals(c(\"quux/bar\", \"quux/foo\")))\n})\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat/test-util.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncontext(\"Utility function\")\n\ntest_that(\"listAll always returns all resource items from server\", {\n\n    serverResponseLimit <- 3\n    itemsAvailable <- 8\n    items <- list(\"collection1\", \"collection2\", \"collection3\", \"collection4\",\n                  \"collection5\", \"collection6\", \"collection7\", \"collection8\")\n\n    testFunction <- function(offset, ...)\n    {\n        response <- list()\n        response$items_available <- itemsAvailable\n\n        maxIndex <- offset + serverResponseLimit\n        lastElementIndex <- if(maxIndex < itemsAvailable) maxIndex else itemsAvailable\n\n        response$items <- items[(offset + 1):lastElementIndex]\n        response\n    }\n\n    result <- listAll(testFunction)\n\n    expect_that(length(result), equals(8))\n})\n\ntest_that(\"trimFromStart trims string correctly if string starts with trimCharacters\", {\n\n    sample <- \"./something/random\"\n    trimCharacters <- \"./something/\"\n\n    result <- trimFromStart(sample, trimCharacters)\n\n    expect_that(result, equals(\"random\"))\n})\n\ntest_that(\"trimFromStart returns original string if string doesn't starts with trimCharacters\", {\n\n    sample <- \"./something/random\"\n    trimCharacters <- \"./nothing/\"\n\n    result <- trimFromStart(sample, trimCharacters)\n\n    expect_that(result, equals(\"./something/random\"))\n})\n\ntest_that(\"trimFromEnd trims string correctly if string ends with trimCharacters\", {\n\n    sample <- \"./something/random\"\n    trimCharacters <- \"/random\"\n\n    result <- trimFromEnd(sample, trimCharacters)\n\n    expect_that(result, equals(\"./something\"))\n})\n\ntest_that(\"trimFromEnd returns original string if string doesn't end with trimCharacters\", {\n\n    sample <- \"./something/random\"\n    trimCharacters <- \"specific\"\n\n    result <- trimFromStart(sample, trimCharacters)\n\n    expect_that(result, equals(\"./something/random\"))\n})\n\ntest_that(\"RListToPythonList converts nested R list to char representation of Python list\", {\n\n    sample <- list(\"insert\", list(\"random\", list(\"text\")), list(\"here\"))\n\n    result              <- RListToPythonList(sample)\n    resultWithSeparator <- RListToPythonList(sample, separator = \",+\")\n\n    expect_that(result, equals(\"[\\\"insert\\\", [\\\"random\\\", \\\"text\\\"], \\\"here\\\"]\"))\n    expect_that(resultWithSeparator,\n                equals(\"[\\\"insert\\\",+[\\\"random\\\",+\\\"text\\\"],+\\\"here\\\"]\"))\n})\n\ntest_that(\"appendToStartIfNotExist appends characters to beginning of a string\", {\n\n    sample <- \"New Year\"\n    charactersToAppend <- \"Happy \"\n\n    result <- appendToStartIfNotExist(sample, charactersToAppend)\n\n    expect_that(result, equals(\"Happy New Year\"))\n})\n\ntest_that(paste(\"appendToStartIfNotExist returns original string if string\",\n                \"doesn't start with specified characters\"), {\n\n    sample <- \"Happy New Year\"\n    charactersToAppend <- \"Happy\"\n\n    result <- appendToStartIfNotExist(sample, charactersToAppend)\n\n    expect_that(result, equals(\"Happy New Year\"))\n})\n\ntest_that(paste(\"splitToPathAndName splits relative path to file/folder\",\n                \"name and rest of the path\"), {\n\n    relativePath <- \"path/to/my/file.exe\"\n\n    result <- splitToPathAndName( relativePath)\n\n    expect_that(result$name, equals(\"file.exe\"))\n    expect_that(result$path, equals(\"path/to/my\"))\n})\n"
  },
  {
    "path": "contrib/R-sdk/tests/testthat.R",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nlibrary(testthat)\nlibrary(ArvadosR)\n\ntest_check(\"ArvadosR\")\n"
  },
  {
    "path": "contrib/README.md",
    "content": "## Arvados Client Contributions\n\n<!--\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: Apache-2.0\n-->\n\nThis directory contains client libraries and tools that can be used with an Arvados cluster. Core components of Arvados are regularly tested together to ensure they work in concert. These tools receive less frequent testing before release. The Arvados team is happy to receive bug reports and contributions to help improve them. However, bugs in these components will never be considered release-critical.\n\n* `arvados-bootstrap`: Scripts to initialize an Arvados cluster with data\n\n* `arvbash`: Arvados utility functions for the bash shell\n\n* `java-sdk-v2`: Java client SDK for Arvados\n\n* `R-sdk`: R client SDK for Arvados\n"
  },
  {
    "path": "contrib/arvados-bootstrap/LICENSE-2.0.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "contrib/arvados-bootstrap/README.md",
    "content": "# Arvados Bootstrap Tools\n\n<!--\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: Apache-2.0\n-->\n\n## Introduction\n\nThis package provides scripts to initialize an Arvados cluster with data, built on top of the Python SDK. From inside this directory, you can install it by running:\n\n      pipx install .\n\nor if you're managing your own virtualenvs and have one activated:\n\n      pip install .\n\n## arv-export\n\n`arv-export` saves records from a running Arvados cluster to the directory where you run it. It finds Arvados credentials the same way arv-copy does, by reading `~/.config/arvados/ZZZZZ.conf`, where `ZZZZZ` is a five-alphanumeric cluster ID.\n\n`cd` to a directory where you want to save data and run:\n\n      arv-export [other options] OBJECT_UUID\n\nThis will subdirectories inside the current directory with data from Arvados. You can load this data with `arv-import` as described in the next section.\n\n## arv-import\n\n`arv-import` creates records on an Arvados cluster from records previously saved by `arv-export`. It finds Arvados credentials the same way arv-copy does, by reading `~/.config/arvados/ZZZZZ.conf`, where `ZZZZZ` is a five-alphanumeric cluster ID.\n\n`cd` to a directory where you previously saved data with `arv-export` and run:\n\n      arv-import [--project-uuid=UUID] [--no-block-copy] [other options] OBJECT_UUID\n\n`OBJECT_UUID` should match a UUID you exported with `arv-export`.\n\n### Using --no-block-copy\n\nIf you have administrator access to the destination cluster, then you have the option to write Keep blocks directly to the underlying storage and skip the normal upload using the `--no-block-copy` option. This is normally faster than uploading the blocks via HTTP, but you are entirely responsible for the separate data transfer. For example, if you use a standard filesystem-backed Keep volume, you might run:\n\n      rsync -r arv-export-data/keep/ root@keep.xurid.example:/var/lib/arvados/keep-data/\n\nThe exact process will vary by Keep volume and system configuration. Documenting all the possibilities is outside the scope of this document.\n\n## arv-seed\n\n### Synopsis\n\narv-seed is a script to bulk create Arvados objects from JSON files.\n\n        arv-seed [options] DIRECTORY [directory ...]\n\n### Configuration\n\nBy default, when running as root, this tool will read the cluster\nconfiguration file `$ARVADOS_CONFIG` (default `/etc/arvados/config.yml`),\nsearch for exactly one cluster configuration with a `Controller` endpoint and\n`SystemRootToken` configured, and use that.\n\nWhen running as a non-root user, this tool will search for user credentials\nthe same way as other Arvados command-line tools.\n\nYou can control how to load credentials using the `--client-from` option.\n\n### Input\n\nEach directory will be scanned for files named `NAME.TYPE.json`. `NAME` is any\nname you like. `TYPE` is the name of an Arvados API resource type, like `group`,\n`collection`, or `container_request`. `TYPE` can be spelled with any\npunctuation, use CamelCase or not, and be singular or plural.\n\nInput can be further controlled with \"base\" JSON that sets attributes for all\nobjects as well as additional parameters for the Arvados create method. Refer\nto the `--help` output for `--base-object` and `--parameters` for details.\n\n### Output\n\nWhen finished, the tool writes JSON output like this to stdout:\n\n      {\n        \"created\": {\"/path1\": {… Arvados object…}, …},\n        \"failed\": {\"/path2\": \"error message\", …}\n      }\n\nFor both `created` and `failed`, each key is the absolute path of a JSON file\nthat the tool read. For `created`, each value is the object that Arvados\nreturned after creation. For `failed`, each value is an error message that\ndescribes why no object could be created.\n\n### Logging\n\nThe tool always logs to syslog. It also logs to stderr if `$TERM` is set.\nControl what gets logged with the `--loglevel` option.\n\n### Exit codes\n\narv-seed uses the following exit codes:\n\n* 0: Created all objects successfully (at least one)\n* 1: Early internal error\n* 2: Incorrect command line arguments\n* 11: Created no objects successfully (at least one)\n* 12: Mixed results: some objects were created, others failed\n* 66: Did not find any JSON input files (`EX_NOINPUT`)\n* 70: Internal error (`EX_SOFTWARE`)\n* 78: Could not initialize from configuration (`EX_CONFIG`)\n\n### Example\n\nRead JSON from `~/arv-seed` and create them all in the given directory:\n\n      arv-seed --base='{\"owner_uuid\":\"zzzzz-j7d0g-12345abcde67890\"}' ~/arv-seed\n\n### systemd service example\n\n      [Unit]\n      After=arvados-railsapi.service arvados-controller.service network-online.target\n\n      [Service]\n      Type=oneshot\n      StandardOutput=file:%t/%N.json\n      ExecStart=/opt/arvados-bootstrap/bin/arv-seed /usr/local/share/arv-seed\n\n## arv-federation-migrate\n\n### Introduction\n\nWhen using multiple Arvados clusters before a federation, a user would have to create a separate account on each cluster.  Unfortunately, because each account represents a separate \"identity\", in this system permissions granted to a user on one cluster do not transfer to another cluster, even if the accounts are associated with the same user.\n\nTo address this, Arvados supports \"federated user accounts\".  A federated user account is associated with a specific \"home\" cluster, and can be used to access other clusters in the federation that trust the home cluster.  When a user arrives at another cluster's Workbench, they select and log in to their home cluster, and then are returned to the starting cluster logged in with the federated user account.\n\nWhen setting up federation capabilities on existing clusters, some users might already have accounts on multiple clusters.  In order to have a single federated identity, users should be assigned a \"home\" cluster, and accounts associated with that user on the other (non-home) clusters should be migrated to the new federated user account.  The @arv-federation-migrate@ tool assists with this.\n\nThis tool is designed to help an administrator who has access to all clusters in a federation to migrate users who have multiple accounts to a single federated account.\n\nAs part of migrating a user, any data or permissions associated with old user accounts will be reassigned to the federated account.\n\n### Step 1: Get a user report\n\n#### With a LoginCluster\n\nWhen using centralized user database as specified by `LoginCluster` in the config file.\n\nSet the `ARVADOS_API_HOST` and `ARVADOS_API_TOKEN` environment variables to be an admin user on cluster in `LoginCluster` .  It will automatically determine the other clusters that are listed in the federation.\n\nNext, run `arv-federation-migrate` with the `--report` flag:\n\n    $ arv-federation-migrate --report users.csv\n    Getting user list from x6b1s\n    Getting user list from x3982\n    Wrote users.csv\n\n#### Without a LoginCluster\n\nThe first step is to create `tokens.csv` and list each cluster and API token to access the cluster.  API tokens must be trusted tokens with administrator access.  This is a simple comma separated value file and can be created in a text editor.  Example:\n\n    x3982.arvadosapi.com,v2/x3982-gj3su-sb6meh2jf145s7x/98d40d70d8862e33d7398213435d1a71a96cf870\n    x6b1s.arvadosapi.com,v2/x6b1s-gj3su-dxc87btfv5kg91z/5575d980d3ff6231bb0c692281c42a7541c59417\n\nNext, run `arv-federation-migrate` with the `--tokens` and `--report` flags:\n\n    $ arv-federation-migrate --tokens tokens.csv --report users.csv\n    Reading tokens.csv\n    Getting user list from x6b1s\n    Getting user list from x3982\n    Wrote users.csv\n\n### Step 2: Update the user report\n\nThis will produce a report of users across all clusters listed in `tokens.csv`, sorted by email address.  This file can be loaded into a text editor or spreadsheet program for ease of viewing and editing.\n\n    email,username,user uuid,primary cluster/user\n    person_a@example.com,person_a,x6b1s-tpzed-hb5n7doogwhk6cf,x6b1s\n    person_b@example.com,person_b,x3982-tpzed-1vl3k7knf7qihbe,\n    person_b@example.com,person_b,x6b1s-tpzed-w4nhkx2rmrhlr54,\n\nThe fourth column describes that user's home cluster.  If a user only has one account (identified by email address), the column will be filled in and there is nothing to do.  If the column is blank, that means there is more than one Arvados account associated with the user.  Edit the file and provide the desired home cluster for each user as necessary (note: if there is a LoginCluster, all users will be migrated to the LoginCluster).  It is also possible to change the desired username for a user.  In this example, `person_b@example.com` is assigned the home cluster `x3982`.\n\n    email,username,user uuid,primary cluster/user\n    person_a@example.com,person_a,x6b1s-tpzed-hb5n7doogwhk6cf,x6b1s\n    person_b@example.com,person_b,x3982-tpzed-1vl3k7knf7qihbe,x3982\n    person_b@example.com,person_b,x6b1s-tpzed-w4nhkx2rmrhlr54,x3982\n\n### Step 3: Migrate users\n\nTo avoid disruption, advise users to log out and avoid running workflows while performing the migration.\n\nAfter updating `users.csv`, you can preview the migration using the `--dry-run` option (add `--tokens tokens.csv` if not using LoginCluster).  This will print out what actions the migration will take (as if it were happening) and report possible problems, but not make any actual changes on any cluster:\n\n    $ arv-federation-migrate --dry-run users.csv\n    (person_b@example.com) Migrating x6b1s-tpzed-w4nhkx2rmrhlr54 to x3982-tpzed-1vl3k7knf7qihbe\n\nExecute the migration using the `--migrate` option (add `--tokens tokens.csv` if not using LoginCluster):\n\n    $ arv-federation-migrate --migrate users.csv\n    (person_b@example.com) Migrating x6b1s-tpzed-w4nhkx2rmrhlr54 to x3982-tpzed-1vl3k7knf7qihbe\n\nAfter migration, users should select their home cluster when logging into Arvados Workbench.  If a user attempts to log into a migrated user account, they will be redirected to log in with their home cluster.\n"
  },
  {
    "path": "contrib/arvados-bootstrap/pyproject.toml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n[build-system]\nrequires = [\"setuptools ~= 80.9\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\nname = \"arvados-bootstrap\"\nversion = \"3.2.1\"\ndependencies = [\n  \"arvados-python-client == 3.2.1\",\n]\ndescription = \"Tools to bootstrap an Arvados cluster\"\nauthors = [\n  {name = \"Arvados\", email = \"info@arvados.org\"},\n]\nclassifiers = [\n  \"Development Status :: 4 - Beta\",\n  \"Environment :: Console\",\n  \"Intended Audience :: Science/Research\",\n  \"Operating System :: POSIX\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\",\n  \"Programming Language :: Python :: 3.12\",\n  \"Programming Language :: Python :: 3.13\",\n]\nlicense = \"Apache-2.0\"\nlicense-files = [\n  \"LICENSE-2.0.txt\",\n]\nreadme = \"README.md\"\nrequires-python = \"~= 3.10\"\n\n[project.scripts]\narv-export = \"arv_bootstrap.export_import:export_main\"\narv-federation-migrate = \"arv_bootstrap.federation_migrate:main\"\narv-import = \"arv_bootstrap.export_import:import_main\"\narv-seed = \"arv_bootstrap.seed:main\"\n\n[project.urls]\nHomepage = \"https://arvados.org\"\nDocumentation = \"https://doc.arvados.org\"\nRepository = \"https://github.com/arvados/arvados\"\nIssues = \"https://github.com/arvados/arvados/issues\"\nChangelog = \"https://arvados.org/releases/\"\n\n[tool.setuptools.packages.find]\nwhere = [\"src\"]\n"
  },
  {
    "path": "contrib/arvados-bootstrap/src/arv_bootstrap/__init__.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n"
  },
  {
    "path": "contrib/arvados-bootstrap/src/arv_bootstrap/export_import.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport importlib.metadata\nimport logging\nimport os\nimport re\nimport sys\n\nimport arvados.commands.arv_copy as arv_copy\nimport arvados.commands._util as arv_cmd\nimport arvados.util as arv_util\n\nfrom . import stubapi\n\nlogger = logging.getLogger('arvados.arv-export-import')\n\nclass ArgumentParser(argparse.ArgumentParser):\n    @classmethod\n    def _base_options(cls, cmdname=sys.argv[0]):\n        opts = argparse.ArgumentParser(add_help=False)\n        opts.add_argument(\n            '--version',\n            action='version',\n            version=f'{cmdname} {importlib.metadata.version(\"arvados-bootstrap\")}',\n            help='Print version and exit.',\n        )\n        opts.add_argument(\n            '--verbose', '-v',\n            dest='verbose',\n            action='store_true',\n            help='Verbose output.',\n        )\n        return opts\n\n    @classmethod\n    def _common_options(cls, verb):\n        opts = cls._base_options(f'arv-{verb}')\n        opts.add_argument(\n            '--force', '-f',\n            action='store_true',\n            help=f\"\"\"{verb.capitalize()} even if the object has already been {verb}ed.\n\"\"\")\n        opts.add_argument(\n            '--recursive',\n            action='store_true',\n            help=f\"\"\"Recursively {verb} any dependencies for this object\nand subprojects. (default)\n\"\"\")\n        opts.add_argument(\n            '--no-recursive',\n            dest='recursive',\n            action='store_false',\n            help=f\"\"\"Do not {verb} any dependencies or subprojects.\n\"\"\")\n\n        opts.add_argument(\n            '--block-copy',\n            dest='keep_block_copy',\n            action='store_true',\n            help=f\"\"\"Copy Keep blocks when {verb}ing collections. (default)\n\"\"\")\n        opts.add_argument(\n            '--no-block-copy',\n            dest='keep_block_copy',\n            action='store_false',\n            help=f\"\"\"Do not copy Keep blocks when {verb}ing collections.\nMust have administrator privileges to import collections.\n\"\"\")\n        opts.add_argument(\n            'object_uuid',\n            help=f\"\"\"The UUID of the collection or project to {verb}.\n\"\"\")\n        return opts\n\n    @classmethod\n    def _import_options(cls):\n        opts = cls._common_options('import')\n        opts.add_argument(\n            '--project-uuid',\n            help=\"\"\"The UUID of the project at the destination to which the\ncollection or project should be imported.\n\"\"\")\n        opts.add_argument(\n            '--storage-classes',\n            type=arv_cmd.UniqueSplit(),\n            help=\"\"\"Comma-separated list of storage classes to be used when\nsaving data to the destinaton Arvados instance.\n\"\"\")\n        opts.add_argument(\n            '--replication',\n            type=arv_cmd.RangedValue(int, range(1, sys.maxsize)),\n            metavar='N',\n            help=\"\"\"\nNumber of replicas per storage class for the copied collections at the destination.\nIf not provided (or if provided with invalid value),\nuse the destination's default replication-level setting (if found),\nor the fallback value 2.\n\"\"\")\n        return opts\n\n    def _set_common_defaults(self):\n        self.set_defaults(\n            # Common defaults should use the \"safer\" value.\n            export_all_fields=False,\n            force=False,\n            keep_block_copy=True,\n            prefer_cached_downloads=False,\n            project_uuid=None,\n            progress=None,\n            recursive=True,\n            varying_url_params=\"\",\n        )\n\n    @classmethod\n    def export_parser(cls):\n        parser = cls(\n            description=f\"Export Arvados objects to a local filesystem\",\n            parents=[cls._common_options('export'), arv_cmd.retry_opt],\n        )\n        parser._set_common_defaults()\n        parser.set_defaults(\n            export_all_fields=True,\n            progress=True,\n            replication=1,\n            storage_classes=[],\n        )\n        return parser\n\n    @classmethod\n    def import_parser(cls):\n        parser = cls(\n            description=f\"Import Arvados objects from a local filesystem\",\n            parents=[cls._import_options(), arv_cmd.retry_opt],\n        )\n        parser._set_common_defaults()\n        return parser\n\n\ndef setup_logging(name, args):\n    global logger\n    arvlogger = logging.getLogger('arvados')\n    logger = arvlogger.getChild(name)\n    if args.verbose:\n        arvlogger.setLevel(logging.DEBUG)\n    else:\n        arvlogger.setLevel(logging.INFO)\n        arvlogger.getChild('keep').setLevel(logging.WARNING)\n\n\ndef transfer(src_arv, dst_arv, args, verb):\n    if re.match(arv_util.collection_uuid_pattern, args.object_uuid):\n        result = arv_copy.copy_collection(args.object_uuid, src_arv, dst_arv, args)\n    elif re.match(arv_util.group_uuid_pattern, args.object_uuid):\n        result = arv_copy.copy_project(args.object_uuid, src_arv, dst_arv, args.project_uuid, args)\n    else:\n        logger.error(\"Unsupported object type for %s: %s\", verb, args.object_uuid)\n        return os.EX_DATAERR\n    if error := result.get('partial_error'):\n        logger.error(\n            \"Error copying %s: %s\",\n            args.object_uuid,\n            result if logger.isEnabledFor(logging.DEBUG) else error,\n        )\n        return os.EX_IOERR\n    return os.EX_OK\n\n\ndef export_main(arglist=None):\n    args = ArgumentParser.export_parser().parse_args(arglist)\n    setup_logging('arv-export', args)\n    src_arv = arv_copy.api_for_instance(args.object_uuid[:5], args.retries)\n    dst_arv = stubapi.StubArvadosAPI.for_cwd()\n    return transfer(src_arv, dst_arv, args, 'export')\n\n\ndef import_main(arglist=None):\n    args = ArgumentParser.import_parser().parse_args(arglist)\n    setup_logging('arv-import', args)\n    src_arv = stubapi.StubArvadosAPI.for_cwd()\n    try:\n        dst_id = args.project_uuid[:5]\n    except TypeError:\n        dst_id = ''\n    dst_arv = arv_copy.api_for_instance(dst_id, args.retries)\n    if args.project_uuid is None:\n        args.project_uuid = dst_arv.users().current().execute()['uuid']\n    if args.replication is None:\n        try:\n            args.replication = int(dst_arv.config()[\"Collections\"][\"DefaultReplication\"])\n        except (KeyError, TypeError, ValueError):\n            args.replication = 2\n    return transfer(src_arv, dst_arv, args, 'import')\n"
  },
  {
    "path": "contrib/arvados-bootstrap/src/arv_bootstrap/federation_migrate.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Migration tool for merging user accounts belonging to the same user\n# but on separate clusters to use a single user account managed by a\n# specific cluster.\n\nimport argparse\nimport csv\nimport hashlib\nimport hmac\nimport importlib.metadata\nimport os\nimport re\nimport sys\nimport urllib.parse\n\nimport arvados\nimport arvados.commands._util as arv_cmd\nimport arvados.util\nimport arvados.errors\n\nEMAIL=0\nUSERNAME=1\nUUID=2\nHOMECLUSTER=3\n\ndef connect_clusters(args):\n    clusters = {}\n    errors = []\n    loginCluster = None\n    if args.tokens:\n        print(\"Reading %s\" % args.tokens)\n        with open(args.tokens, \"rt\") as f:\n            for r in csv.reader(f):\n                if len(r) != 2:\n                    continue\n                host = r[0]\n                token = r[1]\n                print(\"Contacting %s\" % (host))\n                arv = arvados.api(host=host, token=token, cache=False, num_retries=args.retries)\n                clusters[arv._rootDesc[\"uuidPrefix\"]] = arv\n    else:\n        arv = arvados.api(cache=False, num_retries=args.retries)\n        rh = arv._rootDesc[\"remoteHosts\"]\n        tok = arv.api_client_authorizations().current().execute()\n        token = \"v2/%s/%s\" % (tok[\"uuid\"], tok[\"api_token\"])\n\n        for k,v in rh.items():\n            arv = arvados.api(host=v, token=token, cache=False, insecure=os.environ.get(\"ARVADOS_API_HOST_INSECURE\"))\n            clusters[k] = arv\n\n    for _, arv in clusters.items():\n        config = arv.configs().get().execute()\n        if config[\"Login\"][\"LoginCluster\"] != \"\" and loginCluster is None:\n            loginCluster = config[\"Login\"][\"LoginCluster\"]\n\n    print(\"Checking that the federation is well connected\")\n    for arv in clusters.values():\n        config = arv.configs().get().execute()\n        if loginCluster and config[\"Login\"][\"LoginCluster\"] != loginCluster and config[\"ClusterID\"] != loginCluster:\n            errors.append(\"Inconsistent login cluster configuration, expected '%s' on %s but was '%s'\" % (loginCluster, config[\"ClusterID\"], config[\"Login\"][\"LoginCluster\"]))\n            continue\n\n        if arv._rootDesc[\"revision\"] < \"20200331\":\n            errors.append(\"Arvados API server revision on cluster '%s' is too old, must be updated to at least Arvados 2.0.2 before running migration.\" % config[\"ClusterID\"])\n            continue\n\n        try:\n            cur = arv.users().current().execute()\n        except arvados.errors.ApiError as e:\n            errors.append(\"checking token for %s   %s\" % (arv._rootDesc[\"rootUrl\"], e))\n            continue\n\n        if not cur[\"is_admin\"]:\n            errors.append(\"User %s is not admin on %s\" % (cur[\"uuid\"], arv._rootDesc[\"uuidPrefix\"]))\n            continue\n\n        for r in clusters:\n            if r != arv._rootDesc[\"uuidPrefix\"] and r not in arv._rootDesc[\"remoteHosts\"]:\n                errors.append(\"%s is missing from remoteHosts of %s\" % (r, arv._rootDesc[\"uuidPrefix\"]))\n        for r in arv._rootDesc[\"remoteHosts\"]:\n            if r != \"*\" and r not in clusters:\n                print(\"WARNING: %s is federated with %s but %s is missing from the tokens file or the token is invalid\" % (arv._rootDesc[\"uuidPrefix\"], r, r))\n\n    return clusters, errors, loginCluster\n\n\ndef fetch_users(clusters, loginCluster):\n    rows = []\n    by_email = {}\n    by_username = {}\n\n    users = [\n        user\n        for prefix, arv in clusters.items()\n        for user in arvados.util.keyset_list_all(arv.users().list, bypass_federation=True)\n        if user['uuid'].startswith(prefix)\n    ]\n\n    # Users list is sorted by email\n    # Go through users and collect users with same email\n    # when we see a different email (or get to the end)\n    # call add_accum_rows() to generate the report rows with\n    # the \"home cluster\" set, and also fill in the by_email table.\n\n    users.sort(key=lambda u: (u[\"email\"], u[\"username\"] or \"\", u[\"uuid\"]))\n\n    accum = []\n    lastemail = None\n\n    def add_accum_rows():\n        homeuuid = None\n        for a in accum:\n            uuids = set(a[\"uuid\"] for a in accum)\n            homeuuid = ((len(uuids) == 1) and uuids.pop()) or \"\"\n        for a in accum:\n            r = (a[\"email\"], a[\"username\"], a[\"uuid\"], loginCluster or homeuuid[0:5])\n            by_email.setdefault(a[\"email\"], {})\n            by_email[a[\"email\"]][a[\"uuid\"]] = r\n            homeuuid_and_username = \"%s::%s\" % (r[HOMECLUSTER], a[\"username\"])\n            if homeuuid_and_username not in by_username:\n                by_username[homeuuid_and_username] = a[\"email\"]\n            elif by_username[homeuuid_and_username] != a[\"email\"]:\n                print(\"ERROR: the username '%s' is listed for both '%s' and '%s' on cluster '%s'\" % (r[USERNAME], r[EMAIL], by_username[homeuuid_and_username], r[HOMECLUSTER]))\n                exit(1)\n            rows.append(r)\n\n    for u in users:\n        if u[\"uuid\"].endswith(\"-anonymouspublic\") or u[\"uuid\"].endswith(\"-000000000000000\"):\n            continue\n        if lastemail == None:\n            lastemail = u[\"email\"]\n        if u[\"email\"] == lastemail:\n            accum.append(u)\n        else:\n            add_accum_rows()\n            lastemail = u[\"email\"]\n            accum = [u]\n\n    add_accum_rows()\n\n    return rows, by_email, by_username\n\n\ndef read_migrations(args, by_email, by_username):\n    rows = []\n    with open(args.migrate or args.dry_run, \"rt\") as f:\n        for r in csv.reader(f):\n            if r[EMAIL] == \"email\":\n                continue\n            by_email.setdefault(r[EMAIL], {})\n            by_email[r[EMAIL]][r[UUID]] = r\n\n            homeuuid_and_username = \"%s::%s\" % (r[HOMECLUSTER], r[USERNAME])\n            if homeuuid_and_username not in by_username:\n                by_username[homeuuid_and_username] = r[EMAIL]\n            elif by_username[homeuuid_and_username] != r[EMAIL]:\n                print(\"ERROR: the username '%s' is listed for both '%s' and '%s' on cluster '%s'\" % (r[USERNAME], r[EMAIL], by_username[homeuuid_and_username], r[HOMECLUSTER]))\n                exit(1)\n\n            rows.append(r)\n    return rows\n\ndef update_username(args, email, user_uuid, username, migratecluster, migratearv):\n    print(\"(%s) Updating username of %s to '%s' on %s\" % (email, user_uuid, username, migratecluster))\n    if args.dry_run:\n        return\n    try:\n        conflicts = migratearv.users().list(filters=[[\"username\", \"=\", username]], bypass_federation=True).execute()\n        if conflicts[\"items\"]:\n            # There's already a user with the username, move the old user out of the way\n            migratearv.users().update(uuid=conflicts[\"items\"][0][\"uuid\"],\n                                        bypass_federation=True,\n                                        body={\"user\": {\"username\": username+\"migrate\"}}).execute()\n        migratearv.users().update(uuid=user_uuid,\n                                    bypass_federation=True,\n                                    body={\"user\": {\"username\": username}}).execute()\n    except arvados.errors.ApiError as e:\n        print(\"(%s) Error updating username of %s to '%s' on %s: %s\" % (email, user_uuid, username, migratecluster, e))\n\n\ndef choose_new_user(args, by_email, email, userhome, username, old_user_uuid, clusters):\n    candidates = []\n    conflict = False\n    for b in by_email[email].values():\n        if b[2].startswith(userhome):\n            candidates.append(b)\n        if b[1] != username and b[3] == userhome:\n            print(\"(%s) Cannot migrate %s, conflicting usernames %s and %s\" % (email, old_user_uuid, b[1], username))\n            conflict = True\n            break\n    if conflict:\n        return None\n    if len(candidates) == 0:\n        if len(userhome) == 5 and userhome not in clusters:\n            print(\"(%s) Cannot migrate %s, unknown home cluster %s (typo?)\" % (email, old_user_uuid, userhome))\n            return None\n        print(\"(%s) No user listed with same email to migrate %s to %s, will create new user with username '%s'\" % (email, old_user_uuid, userhome, username))\n        if not args.dry_run:\n            oldhomecluster = old_user_uuid[0:5]\n            oldhomearv = clusters[oldhomecluster]\n            newhomecluster = userhome[0:5]\n            homearv = clusters[userhome]\n            user = None\n            try:\n                olduser = oldhomearv.users().get(uuid=old_user_uuid).execute()\n                conflicts = homearv.users().list(filters=[[\"username\", \"=\", username]],\n                                                 bypass_federation=True).execute()\n                if conflicts[\"items\"]:\n                    homearv.users().update(\n                        uuid=conflicts[\"items\"][0][\"uuid\"],\n                        bypass_federation=True,\n                        body={\"user\": {\"username\": username+\"migrate\"}}).execute()\n                user = homearv.users().create(\n                    body={\"user\": {\n                        \"email\": email,\n                        \"first_name\": olduser[\"first_name\"],\n                        \"last_name\": olduser[\"last_name\"],\n                        \"username\": username,\n                        \"is_active\": olduser[\"is_active\"]}}).execute()\n            except arvados.errors.ApiError as e:\n                print(\"(%s) Could not create user: %s\" % (email, str(e)))\n                return None\n\n            tup = (email, username, user[\"uuid\"], userhome)\n        else:\n            # dry run\n            tup = (email, username, \"%s-tpzed-xfakexfakexfake\" % (userhome[0:5]), userhome)\n        by_email[email][tup[2]] = tup\n        candidates.append(tup)\n    if len(candidates) > 1:\n        print(\"(%s) Multiple users listed to migrate %s to %s, use full uuid\" % (email, old_user_uuid, userhome))\n        return None\n    return candidates[0][2]\n\n\ndef activate_remote_user(args, email, homearv, migratearv, old_user_uuid, new_user_uuid):\n    # create a token for the new user and salt it for the\n    # migration cluster, then use it to access the migration\n    # cluster as the new user once before merging to ensure\n    # the new user is known on that cluster.\n    migratecluster = migratearv._rootDesc[\"uuidPrefix\"]\n    try:\n        if not args.dry_run:\n            newtok = homearv.api_client_authorizations().create(body={\n                \"api_client_authorization\": {'owner_uuid': new_user_uuid}}).execute()\n        else:\n            newtok = {\"uuid\": \"dry-run\", \"api_token\": \"12345\"}\n    except arvados.errors.ApiError as e:\n        print(\"(%s) Could not create API token for %s: %s\" % (email, new_user_uuid, e))\n        return None\n\n    try:\n        findolduser = migratearv.users().list(filters=[[\"uuid\", \"=\", old_user_uuid]], bypass_federation=True).execute()\n        if len(findolduser[\"items\"]) == 0:\n            return False\n        if len(findolduser[\"items\"]) == 1:\n            olduser = findolduser[\"items\"][0]\n        else:\n            print(\"(%s) Unexpected result\" % (email))\n            return None\n    except arvados.errors.ApiError as e:\n        print(\"(%s) Could not retrieve user %s from %s, user may have already been migrated: %s\" % (email, old_user_uuid, migratecluster, e))\n        return None\n\n    salted = 'v2/' + newtok[\"uuid\"] + '/' + hmac.new(newtok[\"api_token\"].encode(),\n                                                     msg=migratecluster.encode(),\n                                                     digestmod=hashlib.sha1).hexdigest()\n    try:\n        ru = urllib.parse.urlparse(migratearv._rootDesc[\"rootUrl\"])\n        if not args.dry_run:\n            newuser = arvados.api(host=ru.netloc, token=salted,\n                                  insecure=os.environ.get(\"ARVADOS_API_HOST_INSECURE\")).users().current().execute()\n        else:\n            newuser = {\"is_active\": True, \"username\": email.split('@')[0], \"is_admin\": False}\n    except arvados.errors.ApiError as e:\n        print(\"(%s) Error getting user info for %s from %s: %s\" % (email, new_user_uuid, migratecluster, e))\n        return None\n\n    if not newuser[\"is_active\"] and olduser[\"is_active\"]:\n        print(\"(%s) Activating user %s on %s\" % (email, new_user_uuid, migratecluster))\n        try:\n            if not args.dry_run:\n                migratearv.users().update(uuid=new_user_uuid, bypass_federation=True,\n                                          body={\"is_active\": True}).execute()\n        except arvados.errors.ApiError as e:\n            print(\"(%s) Could not activate user %s on %s: %s\" % (email, new_user_uuid, migratecluster, e))\n            return None\n\n    if olduser[\"is_admin\"] and not newuser[\"is_admin\"]:\n        print(\"(%s) Not migrating %s because user is admin but target user %s is not admin on %s. Please ensure the user admin status is the same on both clusters. Note that a federated admin account has admin privileges on the entire federation.\" % (email, old_user_uuid, new_user_uuid, migratecluster))\n        return None\n\n    return newuser\n\ndef migrate_user(args, migratearv, email, new_user_uuid, old_user_uuid):\n    if args.dry_run:\n        return\n    try:\n        new_owner_uuid = new_user_uuid\n        if args.data_into_subproject:\n            grp = migratearv.groups().create(body={\n                \"owner_uuid\": new_user_uuid,\n                \"name\": \"Migrated from %s (%s)\" % (email, old_user_uuid),\n                \"group_class\": \"project\"\n            }, ensure_unique_name=True).execute()\n            new_owner_uuid = grp[\"uuid\"]\n        migratearv.users().merge(old_user_uuid=old_user_uuid,\n                                    new_user_uuid=new_user_uuid,\n                                    new_owner_uuid=new_owner_uuid,\n                                    redirect_to_new_user=True).execute()\n    except arvados.errors.ApiError as e:\n        name_collision = re.search(r'Key \\(owner_uuid, name\\)=\\((.*?), (.*?)\\) already exists\\.\\n.*UPDATE \"(.*?)\"', e._get_reason())\n        if name_collision:\n            target_owner, rsc_name, rsc_type = name_collision.groups()\n            print(\"(%s) Cannot migrate to %s because both origin and target users have a %s named '%s'. Please rename the conflicting items or use --data-into-subproject to migrate all users' data into a special subproject.\" % (email, target_owner, rsc_type[:-1], rsc_name))\n        else:\n            print(\"(%s) Skipping user migration because of error: %s\" % (email, e))\n\n\ndef main():\n    parser = argparse.ArgumentParser(\n        description='Migrate users to federated identity, see https://doc.arvados.org/admin/merge-remote-account.html',\n        parents=[arv_cmd.retry_opt],\n    )\n    parser.add_argument(\n        '--version',\n        action='version',\n        version=f\"{sys.argv[0]} {importlib.metadata.version('arvados-bootstrap')}\",\n        help='Print version and exit.')\n    parser.add_argument('--tokens', type=str, metavar='FILE', required=False, help=\"Read tokens from FILE. Not needed when using LoginCluster.\")\n    parser.add_argument('--data-into-subproject', action=\"store_true\", help=\"Migrate user's data into a separate subproject. This can be used to avoid name collisions from within an account.\")\n    group = parser.add_mutually_exclusive_group(required=True)\n    group.add_argument('--report', type=str, metavar='FILE', help=\"Generate report .csv file listing users by email address and their associated Arvados accounts.\")\n    group.add_argument('--migrate', type=str, metavar='FILE', help=\"Consume report .csv and migrate users to designated Arvados accounts.\")\n    group.add_argument('--dry-run', type=str, metavar='FILE', help=\"Consume report .csv and report how user would be migrated to designated Arvados accounts.\")\n    group.add_argument('--check', action=\"store_true\", help=\"Check that tokens are usable and the federation is well connected.\")\n    args = parser.parse_args()\n\n    clusters, errors, loginCluster = connect_clusters(args)\n\n    if errors:\n        for e in errors:\n            print(\"ERROR: \"+str(e))\n        exit(1)\n\n    if args.check:\n        print(\"Tokens file passed checks\")\n        exit(0)\n\n    rows, by_email, by_username = fetch_users(clusters, loginCluster)\n\n    if args.report:\n        out = csv.writer(open(args.report, \"wt\"))\n        out.writerow((\"email\", \"username\", \"user uuid\", \"home cluster\"))\n        for r in rows:\n            out.writerow(r)\n        print(\"Wrote %s\" % args.report)\n        return\n\n    if args.migrate or args.dry_run:\n        if args.dry_run:\n            print(\"Performing dry run\")\n\n        rows = read_migrations(args, by_email, by_username)\n\n        for r in rows:\n            email = r[EMAIL]\n            username = r[USERNAME]\n            old_user_uuid = r[UUID]\n            userhome = r[HOMECLUSTER]\n\n            if userhome == \"\":\n                print(\"(%s) Skipping %s, no home cluster specified\" % (email, old_user_uuid))\n            if old_user_uuid.startswith(userhome):\n                migratecluster = old_user_uuid[0:5]\n                migratearv = clusters[migratecluster]\n                if migratearv.users().get(uuid=old_user_uuid).execute()[\"username\"] != username:\n                    update_username(args, email, old_user_uuid, username, migratecluster, migratearv)\n                continue\n\n            new_user_uuid = choose_new_user(args, by_email, email, userhome, username, old_user_uuid, clusters)\n            if new_user_uuid is None:\n                continue\n\n            remote_users = {}\n            got_error = False\n            for migratecluster in clusters:\n                # cluster where the migration is happening\n                migratearv = clusters[migratecluster]\n\n                # the user's new home cluster\n                newhomecluster = userhome[0:5]\n                homearv = clusters[newhomecluster]\n\n                newuser = activate_remote_user(args, email, homearv, migratearv, old_user_uuid, new_user_uuid)\n                if newuser is None:\n                    got_error = True\n                remote_users[migratecluster] = newuser\n\n            if not got_error:\n                for migratecluster in clusters:\n                    migratearv = clusters[migratecluster]\n                    newuser = remote_users[migratecluster]\n                    if newuser is False:\n                        continue\n\n                    print(\"(%s) Migrating %s to %s on %s\" % (email, old_user_uuid, new_user_uuid, migratecluster))\n\n                    migrate_user(args, migratearv, email, new_user_uuid, old_user_uuid)\n\n                    if newuser['username'] != username:\n                        update_username(args, email, new_user_uuid, username, migratecluster, migratearv)\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "contrib/arvados-bootstrap/src/arv_bootstrap/seed.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport contextlib\nimport dataclasses\nimport functools\nimport json\nimport logging\nimport logging.handlers\nimport pathlib\nimport os\nimport re\nimport sys\nimport traceback\nimport urllib.parse\n\nfrom collections import abc\n\nimport arvados\nimport arvados.commands._util as cmd_util\n\nlogger = logging.getLogger('arvados.commands.seed')\n_root_logger = logging.getLogger()\n\ndef is_mapping(arg):\n    return isinstance(arg, abc.Mapping)\n\n\n@dataclasses.dataclass\nclass ExceptHook:\n    logger: logging.Logger\n    exit_code: int = os.EX_SOFTWARE\n\n    def __call__(self, exc_type, exc_value, exc_tb):\n        self.logger.critical(\n            \"internal %s: %s\", exc_type.__name__, exc_value,\n            exc_info=self.logger.isEnabledFor(logging.DEBUG),\n        )\n        raise SystemExit(self.exit_code)\n\n    @contextlib.contextmanager\n    def using_exit_code(self, exit_code):\n        orig_code = self.exit_code\n        self.exit_code = exit_code\n        # We intentionally *don't* want to use `finally` here, because we don't\n        # want to restore the original code for an unhandled exception.\n        yield self\n        self.exit_code = orig_code\n\n\nclass Path(pathlib.Path):\n    _flavour = pathlib._posix_flavour\n\n    def __format__(self, format_spec=''):\n        if format_spec.endswith('`'):\n            return f\"`{super().__format__(format_spec[:-1])}`\"\n        else:\n            return super().__format__(format_spec)\n\n\n@dataclasses.dataclass\nclass ArvadosResources:\n    _resources: abc.Mapping\n\n    @classmethod\n    def from_client(cls, arv_client):\n        return cls(arv_client._resourceDesc['resources'])\n\n    @staticmethod\n    def _sep_caps(match):\n        return '_'.join(match.group(0).lower())\n\n    def singular_name(self, name):\n        if name == 'sys':\n            return name\n        elif name.endswith('ies'):\n            return f'{name[:-3]}y'\n        else:\n            return name.removesuffix('s')\n\n    def canonical_name(self, name):\n        s = re.sub(r'\\W', '_', name)\n        s = re.sub(r'[a-z][A-Z]', self._sep_caps, s)\n        s = s.lower()\n        if s in self._resources:\n            return s\n        elif s.endswith('y'):\n            s = f'{s[:1]}ies'\n        else:\n            s += 's'\n        if s in self._resources:\n            return s\n        raise ValueError(f\"no resource found for {name!r}\")\n\n    def parameters(self, resource_name, method_name):\n        return self._resources[resource_name]['methods'][method_name]['parameters']\n\n\n@dataclasses.dataclass\nclass DirectoryLoader:\n    arv_client: arvados.api.ThreadSafeAPIClient\n    base: abc.Mapping | None\n    params: abc.Mapping | None\n    resources: ArvadosResources\n\n    OBJECT_BASE_PATH = Path('arvados_seed_object.json')\n    PARAMETERS_PATH = Path('arvados_seed_parameters.json')\n\n    @classmethod\n    def from_args(cls, args):\n        arv_client = arvados.api.api(**args.api_kwargs)\n        return cls(\n            arv_client,\n            args.object_base,\n            args.parameters,\n            ArvadosResources.from_client(arv_client),\n        )\n\n    def _load_defaults(self, instance_defaults, path):\n        if instance_defaults is not None:\n            return instance_defaults\n        try:\n            with path.open('rb') as json_file:\n                defaults = json.load(json_file)\n        except FileNotFoundError:\n            defaults = {}\n        if not is_mapping(defaults):\n            raise ValueError(f\"{path:`} does not contain a JSON object\")\n        return defaults\n\n    def _create_from(self, json_path, base, base_params):\n        prefix, _, rname = json_path.stem.rpartition('.')\n        if not prefix:\n            raise ValueError(f\"{path:`} does have an object type in the name\")\n        rname = self.resources.canonical_name(rname)\n\n        kwargs = dict(base_params)\n        if 'ensure_unique_name' in self.resources.parameters(rname, 'create'):\n            kwargs.setdefault('ensure_unique_name', True)\n\n        with json_path.open('rb') as json_file:\n            json_body = json.load(json_file)\n        kwargs['body'] = {self.resources.singular_name(rname): base | json_body}\n\n        resource = getattr(self.arv_client, rname)\n        return resource().create(**kwargs).execute()\n\n    def build_from(self, dir_path):\n        base = self._load_defaults(self.base, dir_path / self.OBJECT_BASE_PATH)\n        base_params = self._load_defaults(self.params, dir_path / self.PARAMETERS_PATH)\n        created = {}\n        failed = {}\n        for path in sorted(dir_path.glob('*.json')):\n            path_key = str(path.absolute())\n            try:\n                result = self._create_from(path, base, base_params)\n            except Exception as err:\n                logger.warning(\n                    \"failed to load %s: %s\", path, err,\n                    exc_info=logger.isEnabledFor(logging.DEBUG),\n                )\n                failed[path_key] = str(err)\n            else:\n                created[path_key] = result\n        return (created, failed)\n\n\nclass ConfigLoader:\n    DEFAULT_CONFIG_PATH = Path('/etc/arvados/config.yml')\n    DISCOVERY_SERVICE_PATH = 'discovery/v1/apis/{api}/{apiVersion}/rest'\n\n    @classmethod\n    def _load_yaml(cls, path):\n        try:\n            with open(path, 'rb') as yaml_file:\n                result = yaml.safe_load(yaml_file)\n        except OSError as err:\n            raise ValueError(f\"error reading {path:`}: {err}\") from None\n        if not is_mapping(result):\n            raise ValueError(f\"{path:`} is not a YAML object\")\n        return result\n\n    @classmethod\n    def _cluster_config_path(cls):\n        return Path(os.environ.get('ARVADOS_CONFIG', cls.DEFAULT_CONFIG_PATH))\n\n    @classmethod\n    def _from_one_cluster(cls, config):\n        try:\n            controller_url = config['Services']['Controller']['ExternalURL']\n            token = config['SystemRootToken']\n        except (KeyError, TypeError) as err:\n            raise ValueError(f\"error loading cluster configuration: {err}\") from None\n        try:\n            insecure = config['TLS']['Insecure']\n        except (KeyError, TypeError):\n            insecure = False\n        return {\n            'version': 'v1',\n            'discoveryServiceUrl': urllib.parse.urljoin(controller_url, cls.DISCOVERY_SERVICE_PATH),\n            'token': token,\n            'insecure': insecure,\n        }\n\n    @classmethod\n    def from_cluster(cls, arg):\n        path = cls._cluster_config_path()\n        whole_config = cls._load_yaml(path)\n        try:\n            configs = whole_config['Clusters'].items()\n        except (AttributeError, KeyError, TypeError) as err:\n            raise ValueError(f\"error loading clusters configuration: {err}\") from None\n        kwargs = None\n        kwargs_id = None\n        for cluster_id, config in configs:\n            try:\n                new_kwargs = cls._from_one_cluster(config)\n            except ValueError:\n                continue\n            if kwargs is None:\n                kwargs = new_kwargs\n                kwargs_id = cluster_id\n            else:\n                raise ValueError(\n                    f\"{path:`} has configuration for both {kwargs_id} and {cluster_id} - \"\n                    \"specify a cluster ID\",\n                )\n        if kwargs is None:\n            raise ValueError(f\"no usable cluster configuration found in {path:`}\")\n        else:\n            return kwargs\n\n    @classmethod\n    def from_cluster_id(cls, arg):\n        path = cls._cluster_config_path()\n        whole_config = cls._load_yaml(path)\n        try:\n            config = whole_config['Clusters'][arg]\n        except (AttributeError, KeyError, TypeError) as err:\n            raise ValueError(f\"error loading {arg} configuration from {path:`}: {err}\") from None\n        return self._from_one_cluster(config)\n\n    @classmethod\n    def from_env(cls, arg):\n        arvados.config.initialize('')\n        return cls.from_user()\n\n    @classmethod\n    def from_user(cls, arg):\n        return arvados.api.api_kwargs_from_config('v1')\n\n    @classmethod\n    def parse_arg(cls, arg):\n        try:\n            constructor = getattr(cls, f'from_{arg}')\n        except AttributeError:\n            if re.fullmatch(r'^[a-z0-9]$', arg):\n                constructor = cls.from_cluster_id\n            else:\n                raise ValueError(f\"invalid configuration source {arg!r}\") from None\n        return constructor(arg)\n\n    @classmethod\n    def default_config(cls):\n        if os.geteuid() == 0:\n            return cls.from_cluster(None)\n        else:\n            return cls.from_user(None)\n\n\ndef parse_loglevel(arg):\n    try:\n        return logging.getLevelNamesMapping()[arg.upper()]\n    except KeyError:\n        raise ValueError(f\"invalid log level {arg!r}\") from None\n\n\ndef validate_mapping(arg):\n    if is_mapping(arg):\n        return arg\n    else:\n        raise ValueError(\"value is not a JSON object\")\n\n\ndef parse_arguments(arglist=None):\n    parser = argparse.ArgumentParser(\n        prog=\"arv-seed\",\n        description=\"Create multiple Arvados objects from a directory of JSON files\",\n    )\n    parser.add_argument(\n        '--client-from',\n        metavar='SOURCE',\n        type=ConfigLoader.parse_arg,\n        dest='api_kwargs',\n        help=\"\"\"\nWhere to find the Arvados API server and token. Specify one of a cluster ID,\n`cluster`, `env`, or `user`. The first two options load the cluster configuration\nfile from `$ARVADOS_CONFIG` or `/etc/arvados/config.yml`.\n\"\"\")\n    parser.add_argument(\n        '--loglevel',\n        metavar='LEVEL',\n        type=parse_loglevel,\n        default=logging.INFO,\n        help=\"\"\"\nThe name of a log level like `debug`, `info`, `warning`, or `error`\n\"\"\")\n    parser.add_argument(\n        '--object-base', '--base',\n        metavar='BASE_JSON',\n        type=cmd_util.JSONArgument(validate_mapping, \"JSON object\"),\n        help=\"\"\"\nJSON object or path to set common attributes for all created objects.\nIf not set, will try to read `arvados_seed_object.json` in each directory.\n\"\"\")\n    parser.add_argument(\n        '--parameters', '--params',\n        metavar='PARAMS_JSON',\n        type=cmd_util.JSONArgument(validate_mapping, \"JSON object\"),\n        help=\"\"\"\nJSON object or path to set parameters when creating objects.\nIf not set, will try to read `arvados_seed_parameters.json` in each directory.\n\"\"\")\n    parser.add_argument(\n        'dir_paths',\n        metavar='DIRECTORY',\n        type=Path,\n        nargs=argparse.ONE_OR_MORE,\n        help=\"\"\"\nDirectory to read object JSON files from. Object files must be named\n`<name>.<type>.json`, where `type` is an Arvados API resource type.\n\"\"\")\n    args = parser.parse_args(arglist)\n    if args.api_kwargs is None:\n        args.api_kwargs = ConfigLoader.default_config()\n    return args\n\n\ndef add_log_handlers(logger, stderr=sys.stderr):\n    syslog = logging.handlers.SysLogHandler('/dev/log')\n    syslog.setFormatter(logging.Formatter('[%(name)s] %(message)s'))\n    logger.addHandler(syslog)\n    if os.environ.get('TERM'):\n        stream = logging.StreamHandler(stderr)\n        stream.setFormatter(logging.Formatter(\n            '[%(asctime)s] arv-seed: %(levelname)s: %(message)s',\n            '%Y-%m-%d %H:%M:%S',\n        ))\n        logger.addHandler(stream)\n\n\ndef main(\n        arglist=None,\n        *,\n        stdout=sys.stdout,\n        stderr=sys.stderr,\n        is_main=Path(sys.argv[0]).stem == 'arv-seed',\n):\n    if is_main:\n        add_log_handlers(_root_logger)\n        sys.excepthook = ExceptHook(logger)\n        arvados.logger.removeHandler(arvados.logging.log_handler)\n    args = parse_arguments(arglist)\n    if is_main:\n        _root_logger.setLevel(args.loglevel)\n        setup_ctx = sys.excepthook.using_exit_code(os.EX_CONFIG)\n    else:\n        logger.setLevel(args.loglevel)\n        setup_ctx = contextlib.nullcontext()\n    with setup_ctx:\n        loader = DirectoryLoader.from_args(args)\n\n    created = {}\n    failed = {}\n    for dir_path in args.dir_paths:\n        try:\n            dir_created, dir_failed = loader.build_from(dir_path)\n        except Exception as err:\n            logger.warning(\n                \"failed to load directory %s: %s\", dir_path, err,\n                exc_info=logger.isEnabledFor(logging.DEBUG),\n            )\n            failed[dir_path] = err\n        else:\n            created.update(dir_created)\n            failed.update(dir_failed)\n    json.dump({'created': created, 'failed': failed}, stdout)\n    print(file=stdout)\n\n    if created and failed:\n        return 12\n    elif failed:\n        return 11\n    elif created:\n        return os.EX_OK\n    else:\n        return os.EX_NOINPUT\n"
  },
  {
    "path": "contrib/arvados-bootstrap/src/arv_bootstrap/stubapi.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\nimport hashlib\nimport json\nimport logging\nimport operator\nimport os\n\nimport arvados.util\n\n_FILTER_OPS = {\n    '=': operator.eq,\n    '>=': operator.ge,\n    '>': operator.gt,\n    '<=': operator.le,\n    '<': operator.lt,\n    '!=': operator.ne,\n    '<>': operator.ne,\n}\n\nclass DeferExecution:\n    def __init__(self, fn):\n        self._fn = fn\n\n    def execute(self, *, num_retries=None):\n        return self._fn()\n\n\ndef defer_execution(f):\n    @functools.wraps(f)\n    def wrapper(*args, **kwds):\n        return DeferExecution(functools.partial(f, *args, **kwds))\n    return wrapper\n\n\nclass StubKeepClient:\n    def __init__(self, basedir):\n        self._basedir = basedir\n\n    def get(self, locator):\n        blockdir = os.path.join(self._basedir, locator[0:3])\n        filepath = os.path.join(blockdir, arvados.KeepLocator(locator).md5sum)\n        with open(filepath, \"rb\") as fr:\n            return fr.read()\n\n    def put(self, data, copies=2, num_retries=None, request_id=None, classes=None):\n        md5 = hashlib.md5(data).hexdigest()\n        locator = '%s+%d' % (md5, len(data))\n\n        blockdir = os.path.join(self._basedir, locator[0:3])\n        os.makedirs(blockdir, exist_ok=True)\n        filepath = os.path.join(blockdir, md5)\n\n        with open(os.path.join(filepath + '.tmp'), 'wb') as f:\n            f.write(data)\n        os.rename(os.path.join(filepath + '.tmp'),\n                  os.path.join(filepath))\n        return locator\n\n\ndef match_filter(fl, obj):\n    key, op_key, val = fl\n    try:\n        op_func = _FILTER_OPS[op_key]\n    except KeyError:\n        raise NotImplementedError(f\"unsupported filter operator {op_key}\") from None\n    else:\n        return op_func(obj[key], val)\n\n\nclass StubArvadosResources:\n    def __init__(self, basedir, resource_type):\n        self._basedir = basedir\n        self._resource_type = resource_type\n        self._logger = logging.getLogger(f'arvados.stubapi.{resource_type}')\n\n    @defer_execution\n    def get(self, *, uuid=\"\"):\n        with open(os.path.join(self._basedir, uuid), \"rt\") as fr:\n            return json.load(fr)\n\n    @defer_execution\n    def create(self, *, body=None, ensure_unique_name=None):\n        if self._resource_type in body:\n            body = body[self._resource_type]\n        with open(os.path.join(self._basedir, body[\"uuid\"]), \"wt\") as fw:\n            json.dump(body, fw, indent=2)\n\n        return body\n\n    @defer_execution\n    def update(self, *, uuid=\"\", body=None):\n        if self._resource_type in body:\n            body = body[self._resource_type]\n        with open(os.path.join(self._basedir, uuid), \"rt\") as fr:\n            obj = json.load(fr)\n\n        for k,v in body.items():\n            obj[k] = v\n\n        with open(os.path.join(self._basedir, uuid), \"wt\") as fw:\n            json.dump(obj, fw, indent=2)\n\n        return obj\n\n    @defer_execution\n    def list(self, *, filters=None, limit=None, count=None, order=None):\n        items = []\n        for dirent in os.scandir(self._basedir):\n            if not arvados.util.uuid_pattern.match(dirent.name) or not dirent.is_file():\n                continue\n\n            with open(os.path.join(self._basedir, dirent.name), \"rt\") as fr:\n                obj = json.load(fr)\n\n            if all(match_filter(f, obj) for f in filters):\n                items.append(obj)\n\n        if order:\n            if len(order) == 1:\n                k1, r1 = order[0].split(' ')\n                keycomp = lambda x: x[k1]\n            elif len(order) == 2:\n                k1, r1 = order[0].split(' ')\n                k2, r2 = order[1].split(' ')\n                if r1 != r2:\n                    raise NotImplementedError(\"Can't have secondary sort column in opposite direction\")\n                keycomp = lambda x: (x[k1], x[k2])\n\n            items.sort(key=keycomp, reverse=(r1=='desc'))\n\n        if limit is not None:\n            items = items[0:limit]\n\n        return {\n            \"items\": items,\n            \"items_available\": len(items)\n        }\n\n\nclass StubArvadosAPI:\n    def __init__(self, basedir):\n        self._basedir = basedir\n\n        os.makedirs(os.path.join(self._basedir, \"keep\"), exist_ok=True)\n        os.makedirs(os.path.join(self._basedir, \"arvados/v1/collections\"), exist_ok=True)\n        os.makedirs(os.path.join(self._basedir, \"arvados/v1/links\"), exist_ok=True)\n        os.makedirs(os.path.join(self._basedir, \"arvados/v1/groups\"), exist_ok=True)\n        os.makedirs(os.path.join(self._basedir, \"arvados/v1/workflows\"), exist_ok=True)\n\n        self.keep = StubKeepClient(os.path.join(self._basedir, \"keep\"))\n\n    @classmethod\n    def for_cwd(cls):\n        return cls(os.getcwd())\n\n    def collections(self):\n        return StubArvadosResources(os.path.join(self._basedir, \"arvados/v1/collections\"), \"collection\")\n\n    def links(self):\n        return StubArvadosResources(os.path.join(self._basedir, \"arvados/v1/links\"), \"link\")\n\n    def groups(self):\n        return StubArvadosResources(os.path.join(self._basedir, \"arvados/v1/groups\"), \"group\")\n\n    def workflows(self):\n        return StubArvadosResources(os.path.join(self._basedir, \"arvados/v1/workflows\"), \"workflow\")\n"
  },
  {
    "path": "contrib/arvbash/arvbash.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# bash functions for managing Arvados tokens and other conveniences.\n\nread -rd \"\\000\" helpmessage <<EOF\n$(basename $0): bash functions for managing Arvados tokens and other shortcuts.\n\nSyntax:\n        . $0            # activate for current shell\n        $0 --install    # install into .bashrc\n\narvswitch <name>\n  Set ARVADOS_API_HOST and ARVADOS_API_TOKEN in the current environment based on\n  $HOME/.config/arvados/<name>.conf\n  With no arguments, print current API host and available Arvados configurations.\n\narvsave <name>\n  Save current values of ARVADOS_API_HOST and ARVADOS_API_TOKEN in the current environment to\n  $HOME/.config/arvados/<name>.conf\n\narvrm <name>\n  Delete $HOME/.config/arvados/<name>.conf\n\narvopen <uuid>\n  Open an Arvados uuid in web browser (http://arvadosapi.com)\n\narvissue <issue number>\n  Open an Arvados ticket in web browser (http://dev.arvados.org)\n\nEOF\n\nif [[ \"$1\" = \"--install\" ]] ; then\n    this=$(readlink -f $0)\n    if ! grep \". $this\" ~/.bashrc >/dev/null ; then\n        echo \". $this\" >> ~/.bashrc\n        echo \"Installed into ~/.bashrc\"\n    else\n        echo \"Already installed in ~/.bashrc\"\n    fi\nelif ! [[ $0 =~ bash$ ]] ; then\n    echo \"$helpmessage\"\nfi\n\nHISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'\n\narvswitch() {\n    if [[ -n \"$1\" ]] ; then\n        if [[ -f $HOME/.config/arvados/$1.conf ]] ; then\n            unset ARVADOS_API_HOST_INSECURE\n            for a in $(cat $HOME/.config/arvados/$1.conf) ; do export $a ; done\n            echo \"Switched to $1\"\n        else\n            echo \"$1 unknown\"\n        fi\n    else\n        echo \"Switch Arvados environment conf\"\n\techo \"Current host: ${ARVADOS_API_HOST}\"\n        echo \"Usage: arvswitch <name>\"\n        echo \"Available confs:\" $((cd $HOME/.config/arvados && ls --indicator-style=none *.conf) | rev | cut -c6- | rev)\n    fi\n}\n\narvsave() {\n    if [[ -n \"$1\" ]] ; then\n\ttouch $HOME/.config/arvados/$1.conf\n\tchmod 0600 $HOME/.config/arvados/$1.conf\n        env | grep ARVADOS_ > $HOME/.config/arvados/$1.conf\n    else\n        echo \"Save current Arvados environment variables to conf file\"\n        echo \"Usage: arvsave <name>\"\n    fi\n}\n\narvrm() {\n    if [[ -n \"$1\" ]] ; then\n        if [[ -f $HOME/.config/arvados/$1.conf ]] ; then\n            rm $HOME/.config/arvados/$1.conf\n        else\n            echo \"$1 unknown\"\n        fi\n    else\n        echo \"Delete Arvados environment conf\"\n        echo \"Usage: arvrm <name>\"\n    fi\n}\n\narvopen() {\n    if [[ -n \"$1\" ]] ; then\n        xdg-open https://arvadosapi.com/$1\n    else\n        echo \"Open Arvados uuid in browser\"\n        echo \"Usage: arvopen <uuid>\"\n    fi\n}\n\narvissue() {\n    if [[ -n \"$1\" ]] ; then\n        xdg-open https://dev.arvados.org/issues/$1\n    else\n        echo \"Open Arvados issue in browser\"\n        echo \"Usage: arvissue <issue number>\"\n    fi\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/.gitignore",
    "content": "/.gradle/\n/bin/\n/build/\n.project\n.classpath\n/.settings/\n.DS_Store\n/.idea/\n/out/\n"
  },
  {
    "path": "contrib/java-sdk-v2/.licenseignore",
    "content": ".licenseignore\nagpl-3.0.txt\napache-2.0.txt\nCOPYING"
  },
  {
    "path": "contrib/java-sdk-v2/COPYING",
    "content": "Unless indicated otherwise in the header of the file, the files in this\nrepository are dual-licensed AGPL-3.0 and Apache-2.0\n\nIndividual files contain an SPDX tag that indicates the license for the file.\ndual-licensed files use the following tag:\n\n    SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n\nThis enables machine processing of license information based on the SPDX\nLicense Identifiers that are available here: http://spdx.org/licenses/\n\nThe full license text for each license is available in this directory:\n\n  AGPL-3.0:     agpl-3.0.txt\n  Apache-2.0:   apache-2.0.txt\n"
  },
  {
    "path": "contrib/java-sdk-v2/README.md",
    "content": "```\nCopyright (C) The Arvados Authors. All rights reserved.\n \nSPDX-License-Identifier: CC-BY-SA-3.0\n```\n\n# Arvados Java SDK\n\n##### About\nArvados Java Client allows to access Arvados servers and uses two APIs:\n* lower level [Keep Server API](https://doc.arvados.org/api/index.html)\n* higher level [Keep-Web API](https://godoc.org/github.com/arvados/arvados/services/keep-web) (when needed)\n\n##### Required Java version\nThis SDK requires Java 8+\n\n##### Logging\n\nSLF4J is used for logging. Concrete logging framework and configuration must be provided by a client.\n\n##### Configuration\n\n[TypeSafe Configuration](https://github.com/lightbend/config) is used for configuring this library.\n\nPlease, have a look at java/resources/reference.conf for default values provided with this library.\n\n* **keepweb-host** - change to host of your Keep-Web installation\n* **keepweb-port** - change to port of your Keep-Web installation\n* **host** - change to host of your Arvados installation\n* **port** - change to port of your Arvados installation\n* **token** - authenticates registered user, one must provide\n  [token obtained from Arvados Workbench](https://doc.arvados.org/user/reference/api-tokens.html)\n* **protocol** - don't change to unless really needed\n* **host-insecure** - insecure communication with Arvados (ignores SSL certificate verification), \n  don't change to *true* unless really needed\n* **split-size** - size of chunk files in megabytes\n* **temp-dir** - temporary chunk files storage\n* **copies** - amount of chunk files duplicates per Keep server\n* **retries** - in case of chunk files send failure this should allow to repeat send \n  (*NOTE*: this parameter is not used at the moment but was left for future improvements)\n\nIn order to override default settings one can create application.conf file in an application.\nExample: src/test/resources/application.conf.\n\nAlternatively ExternalConfigProvider class can be used to pass configuration via code. \nExternalConfigProvider comes with a builder and all of the above values must be provided in order for it to work properly.\n\nArvadosFacade has two constructors, one without arguments that uses values from reference.conf and second one \ntaking ExternalConfigProvider as an argument.\n\n##### API clients\n\nAll API clients inherit from BaseStandardApiClient. This class contains implementation of all \ncommon methods as described in http://doc.arvados.org/api/methods.html.\n\nParameters provided to common or specific methods are String UUID or fields wrapped in Java objects. For example:\n\n```java\nString uuid = \"ardev-4zz18-rxcql7qwyakg1r1\";\n\nCollection actual = client.get(uuid);\n```\n\n```java\nListArgument listArgument = ListArgument.builder()\n        .filters(Arrays.asList(\n                Filter.of(\"owner_uuid\", Operator.LIKE, \"ardev%\"),\n                Filter.of(\"name\", Operator.LIKE, \"Super%\"),\n                Filter.of(\"portable_data_hash\", Operator.IN, Lists.newArrayList(\"54f6d9f59065d3c009d4306660989379+65\")\n            )))\n        .build();\n\nCollectionList actual = client.list(listArgument);\n```\n\nNon-standard API clients must inherit from BaseApiClient. \nFor example: KeepServerApiClient communicates directly with Keep servers using exclusively non-common methods.\n\n##### Business logic\n\nMore advanced API data handling could be implemented as *Facade* classes. \nIn current version functionalities provided by SDK are handled by *ArvadosFacade*.\nThey include:\n* **downloading single file from collection** - using Keep-Web\n* **downloading whole collection** - using Keep-Web or Keep Server API\n* **listing file info from certain collection** - information is returned as list of *FileTokens* providing file details\n* **uploading single file** - to either new or existing collection\n* **uploading list of files** - to either new or existing collection\n* **creating an empty collection**\n* **getting current user info**\n* **listing current user's collections**\n* **creating new project**\n* **deleting certain collection**\n\n##### Note regarding Keep-Web\n\nCurrent version requires both Keep Web and standard Keep Server API configured in order to use Keep-Web functionalities.\n\n##### Integration tests\n\nIn order to run integration tests all fields within following configuration file must be provided: \n```java\nsrc/test/resources/integration-test-appliation.conf \n```\nParameter **integration-tests.project-uuid** should contain UUID of one project available to user,\nwhose token was provided within configuration file. \n\nIntegration tests require connection to real Arvados server.\n\n##### Note regarding file naming\n\nWhile uploading via this SDK all uploaded files within single collection must have different names.\nThis applies also to uploading files to already existing collection. \nRenaming files with duplicate names is not implemented in current version.\n\n##### Building with Gradle\n\nThe Arvados Java SDK is built with `gradle`. Common development build tasks are:\n\n* `clean`\n* `test`\n* `jar` (build the jar files, including documentation)\n* `install`\n"
  },
  {
    "path": "contrib/java-sdk-v2/agpl-3.0.txt",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "contrib/java-sdk-v2/apache-2.0.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "contrib/java-sdk-v2/build.gradle",
    "content": "apply plugin: 'java-library'\napply plugin: 'eclipse'\napply plugin: 'idea'\napply plugin: 'maven'\napply plugin: 'signing'\n\n\nrepositories {\n    mavenCentral()\n}\n\ndependencies {\n    api 'com.squareup.okhttp3:okhttp:3.9.1'\n    api 'com.fasterxml.jackson.core:jackson-databind:2.9.2'\n    api 'com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.9.2'\n    api 'com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.9.2'\n    api 'commons-codec:commons-codec:1.11'\n    api 'commons-io:commons-io:2.6'\n    api 'com.google.guava:guava:23.4-jre'\n    api 'org.slf4j:slf4j-api:1.7.25'\n    api 'com.typesafe:config:1.3.2'\n    \n    testImplementation 'junit:junit:4.12'\n    testImplementation 'org.mockito:mockito-core:5.17.0'\n    testImplementation 'org.assertj:assertj-core:3.8.0'\n    testImplementation 'com.squareup.okhttp3:mockwebserver:3.9.1'\n}\n\ntest {\n    useJUnit {\n        excludeCategories 'org.arvados.client.junit.categories.IntegrationTests'\n    }\n\n\ttestLogging {\n\t    events \"passed\", \"skipped\", \"failed\"\n\t    afterSuite { desc, result ->\n\t        if (!desc.parent) { // will match the outermost suite\n\t            println \"\\n---- Test results ----\"\n\t            println \"${result.resultType} (${result.testCount} tests, ${result.successfulTestCount} successes, ${result.failedTestCount} failures, ${result.skippedTestCount} skipped)\"\n\t            println \"\"\n\t        }\n\t    }\n\t}\n}\n\ntask integrationTest(type: Test) {\n    useJUnit {\n        includeCategories 'org.arvados.client.junit.categories.IntegrationTests'\n    }\n}\n\ntask javadocJar(type: Jar) {\n    classifier = 'javadoc'\n    from javadoc\n}\n\ntask sourcesJar(type: Jar) {\n    classifier = 'sources'\n    from sourceSets.main.allSource\n}\n\nartifacts {\n    archives javadocJar, sourcesJar\n}\n\nsigning {\n    sign configurations.archives\n}\n\nuploadArchives {\n  repositories {\n    mavenDeployer {\n      beforeDeployment { MavenDeployment deployment -> signing.signPom(deployment) }\n\n      repository(url: \"https://ossrh-staging-api.central.sonatype.com/service/local/staging/deploy/maven2\") {\n        authentication(userName: ossrhUsername, password: ossrhPassword)\n      }\n\n      snapshotRepository(url: \"https://ossrh-staging-api.central.sonatype.com/content/repositories/snapshots\") {\n        authentication(userName: ossrhUsername, password: ossrhPassword)\n      }\n\n      pom.project {\n        name 'Arvados Java SDK'\n        packaging 'jar'\n        groupId 'org.arvados'\n        description 'Arvados Java SDK'\n        url 'https://github.com/arvados/arvados'\n               \n       scm {\n         url 'scm:git@https://github.com/arvados/arvados.git'\n         connection 'scm:git@https://github.com/arvados/arvados.git'\n         developerConnection 'scm:git@https://github.com/arvados/arvados.git'\n       }\n\n        licenses {\n          license {\n            name 'The Apache License, Version 2.0'\n            url 'http://www.apache.org/licenses/LICENSE-2.0.txt'\n          }\n        }\n\n        developers {\n          developer {\n            id 'veritasgenetics'\n            name 'Veritas Genetics'\n            email 'ops@veritasgenetics.com'\n          }\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/gradle.properties",
    "content": "/*\n Copyright (C) The Arvados Authors. All rights reserved.\n\n SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n*/\n\nossrhUsername = ''\nossrhPassword = ''\n"
  },
  {
    "path": "contrib/java-sdk-v2/settings.gradle",
    "content": "rootProject.name = 'arvados-java-sdk'\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/BaseApiClient.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport org.arvados.client.exception.ArvadosApiException;\nimport org.arvados.client.api.client.factory.OkHttpClientFactory;\nimport org.arvados.client.api.model.ApiError;\nimport org.arvados.client.config.ConfigProvider;\nimport okhttp3.OkHttpClient;\nimport okhttp3.Request;\nimport okhttp3.Response;\nimport okhttp3.ResponseBody;\nimport org.slf4j.Logger;\n\nimport java.io.IOException;\nimport java.io.UnsupportedEncodingException;\nimport java.net.URLDecoder;\nimport java.nio.charset.StandardCharsets;\nimport java.util.Objects;\nimport java.util.concurrent.TimeUnit;\n\nabstract class BaseApiClient {\n\n    static final ObjectMapper MAPPER = new ObjectMapper().findAndRegisterModules();\n\n    final OkHttpClient client;\n    final ConfigProvider config;\n    private final Logger log = org.slf4j.LoggerFactory.getLogger(BaseApiClient.class);\n\n    BaseApiClient(ConfigProvider config) {\n        this.config = config;\n        this.client = OkHttpClientFactory.INSTANCE.create(config.isApiHostInsecure())\n\t    .newBuilder()\n\t    .connectTimeout(config.getConnectTimeout(), TimeUnit.MILLISECONDS)\n\t    .readTimeout(config.getReadTimeout(), TimeUnit.MILLISECONDS)\n\t    .writeTimeout(config.getWriteTimeout(), TimeUnit.MILLISECONDS)\n\t    .build();\n    }\n\n    Request.Builder getRequestBuilder() {\n        return new Request.Builder()\n                .addHeader(\"authorization\", String.format(\"Bearer %s\", config.getApiToken()))\n                .addHeader(\"cache-control\", \"no-cache\");\n    }\n\n    String newCall(Request request) {\n        return (String) getResponseBody(request, body -> body.string().trim());\n    }\n\n    byte[] newFileCall(Request request) {\n        return (byte[]) getResponseBody(request, ResponseBody::bytes);\n    }\n\n    private Object getResponseBody(Request request, Command command) {\n        try {\n            log.debug(URLDecoder.decode(request.toString(), StandardCharsets.UTF_8.name()));\n        } catch (UnsupportedEncodingException e) {\n            throw new ArvadosApiException(e);\n        }\n\n        try (Response response = client.newCall(request).execute()) {\n            ResponseBody responseBody = response.body();\n\n            if (!response.isSuccessful()) {\n                String errorBody = Objects.requireNonNull(responseBody).string();\n                if (errorBody == null || errorBody.length() == 0) {\n                    throw new ArvadosApiException(String.format(\"Error code %s with message: %s\", response.code(), response.message()));\n                }\n                ApiError apiError = MAPPER.readValue(errorBody, ApiError.class);\n                throw new ArvadosApiException(String.format(\"Error code %s with messages: %s\", response.code(), apiError.getErrors()));\n            }\n            return command.readResponseBody(responseBody);\n        } catch (IOException e) {\n            throw new ArvadosApiException(e);\n        }\n    }\n\n    private interface Command {\n        Object readResponseBody(ResponseBody body) throws IOException;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/BaseStandardApiClient.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport com.fasterxml.jackson.core.JsonProcessingException;\nimport com.fasterxml.jackson.core.type.TypeReference;\nimport com.fasterxml.jackson.databind.ObjectWriter;\nimport okhttp3.MediaType;\nimport okhttp3.HttpUrl;\nimport okhttp3.HttpUrl.Builder;\nimport okhttp3.Request;\nimport okhttp3.RequestBody;\nimport org.arvados.client.exception.ArvadosApiException;\nimport org.arvados.client.api.model.Item;\nimport org.arvados.client.api.model.ItemList;\nimport org.arvados.client.api.model.argument.ListArgument;\nimport org.arvados.client.config.ConfigProvider;\nimport org.slf4j.Logger;\n\nimport java.io.IOException;\nimport java.util.Map;\n\npublic abstract class BaseStandardApiClient<T extends Item, L extends ItemList> extends BaseApiClient {\n\n    protected static final MediaType JSON = MediaType.parse(com.google.common.net.MediaType.JSON_UTF_8.toString());\n    private final Logger log = org.slf4j.LoggerFactory.getLogger(BaseStandardApiClient.class);\n\n    BaseStandardApiClient(ConfigProvider config) {\n        super(config);\n    }\n\n    public L list(ListArgument listArguments) {\n        log.debug(\"Get list of {}\", getType().getSimpleName());\n        Builder urlBuilder = getUrlBuilder();\n        addQueryParameters(urlBuilder, listArguments);\n        HttpUrl url = urlBuilder.build();\n        Request request = getRequestBuilder().url(url).build();\n        return callForList(request);\n    }\n    \n    public L list() {\n        return list(ListArgument.builder().build());\n    }\n\n    public T get(String uuid) {\n        log.debug(\"Get {} by UUID {}\", getType().getSimpleName(), uuid);\n        HttpUrl url = getUrlBuilder().addPathSegment(uuid).build();\n        Request request = getRequestBuilder().get().url(url).build();\n        return callForType(request);\n    }\n\n    public T create(T type) {\n        log.debug(\"Create {}\", getType().getSimpleName());\n        String json = mapToJson(type);\n        RequestBody body = RequestBody.create(JSON, json);\n        Request request = getRequestBuilder().post(body).build();\n        return callForType(request);\n    }\n\n    public T delete(String uuid) {\n        log.debug(\"Delete {} by UUID {}\", getType().getSimpleName(), uuid);\n        HttpUrl url = getUrlBuilder().addPathSegment(uuid).build();\n        Request request = getRequestBuilder().delete().url(url).build();\n        return callForType(request);\n    }\n\n    public T update(T type) {\n        String uuid = type.getUuid();\n        log.debug(\"Update {} by UUID {}\", getType().getSimpleName(), uuid);\n        String json = mapToJson(type);\n        RequestBody body = RequestBody.create(JSON, json);\n        HttpUrl url = getUrlBuilder().addPathSegment(uuid).build();\n        Request request = getRequestBuilder().put(body).url(url).build();\n        return callForType(request);\n    }\n\n    @Override\n    Request.Builder getRequestBuilder() {\n        return super.getRequestBuilder().url(getUrlBuilder().build());\n    }\n\n    HttpUrl.Builder getUrlBuilder() {\n        return new HttpUrl.Builder()\n                .scheme(config.getApiProtocol())\n                .host(config.getApiHost())\n                .port(config.getApiPort())\n                .addPathSegment(\"arvados\")\n                .addPathSegment(\"v1\")\n                .addPathSegment(getResource());\n    }\n\n    <TL> TL call(Request request, Class<TL> cls) {\n        String bodyAsString = newCall(request);\n        try {\n            return mapToObject(bodyAsString, cls);\n        } catch (IOException e) {\n            throw new ArvadosApiException(\"A problem occurred while parsing JSON data\", e);\n        }\n    }\n\n    private <TL> TL mapToObject(String content, Class<TL> cls) throws IOException {\n        return MAPPER.readValue(content, cls);\n    }\n\n    protected  <TL> String mapToJson(TL type) {\n        ObjectWriter writer = MAPPER.writer().withDefaultPrettyPrinter();\n        try {\n            return writer.writeValueAsString(type);\n        } catch (JsonProcessingException e) {\n            log.error(e.getMessage());\n            return null;\n        }\n    }\n\n    T callForType(Request request) {\n        return call(request, getType());\n    }\n\n    L callForList(Request request) {\n        return call(request, getListType());\n    }\n\n    abstract String getResource();\n\n    abstract Class<T> getType();\n\n    abstract Class<L> getListType();\n    \n    Request getNoArgumentMethodRequest(String method) {\n        HttpUrl url = getUrlBuilder().addPathSegment(method).build();\n        return getRequestBuilder().get().url(url).build();\n    }\n    \n    RequestBody getJsonRequestBody(Object object) {\n        return RequestBody.create(JSON, mapToJson(object));\n    }\n    \n    void addQueryParameters(Builder urlBuilder, Object object) {\n        Map<String, Object> queryMap = MAPPER.convertValue(object, new TypeReference<Map<String, Object>>() {});\n        queryMap.keySet().forEach(key -> {\n            Object type = queryMap.get(key);\n            if (!(type instanceof String)) {\n                type = mapToJson(type);\n            }\n            urlBuilder.addQueryParameter(key, (String) type);\n        });\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/CollectionsApiClient.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport org.arvados.client.api.model.Collection;\nimport org.arvados.client.api.model.CollectionList;\nimport org.arvados.client.api.model.CollectionReplaceFiles;\nimport org.arvados.client.config.ConfigProvider;\nimport org.slf4j.Logger;\n\nimport okhttp3.HttpUrl;\nimport okhttp3.Request;\nimport okhttp3.RequestBody;\n\npublic class CollectionsApiClient extends BaseStandardApiClient<Collection, CollectionList> {\n\n    private static final String RESOURCE = \"collections\";\n\n    private final Logger log = org.slf4j.LoggerFactory.getLogger(CollectionsApiClient.class);\n\n    public CollectionsApiClient(ConfigProvider config) {\n        super(config);\n    }\n    \n    @Override\n    public Collection create(Collection type) {\n        Collection newCollection = super.create(type);\n        log.debug(String.format(\"New collection '%s' with UUID %s has been created\", newCollection.getName(), newCollection.getUuid()));\n        return newCollection;\n    }\n\n    public Collection update(String collectionUUID, CollectionReplaceFiles replaceFilesRequest) {\n        String json = mapToJson(replaceFilesRequest);\n        RequestBody body = RequestBody.create(JSON, json);\n        HttpUrl url = getUrlBuilder().addPathSegment(collectionUUID).build();\n        Request request = getRequestBuilder().put(body).url(url).build();\n        return callForType(request);\n    }\n\n    @Override\n    String getResource() {\n        return RESOURCE;\n    }\n\n    @Override\n    Class<Collection> getType() {\n        return Collection.class;\n    }\n\n    @Override\n    Class<CollectionList> getListType() {\n        return CollectionList.class;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/ConfigApiClient.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport okhttp3.OkHttpClient;\nimport okhttp3.Request;\nimport okhttp3.Response;\nimport org.arvados.client.api.client.factory.OkHttpClientFactory;\nimport org.arvados.client.api.model.ArvadosConfig;\nimport org.arvados.client.exception.ArvadosApiException;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport java.io.IOException;\nimport java.util.concurrent.TimeUnit;\n\npublic class ConfigApiClient {\n\n    private static final Logger log = LoggerFactory.getLogger(ConfigApiClient.class);\n    private static final ObjectMapper MAPPER = new ObjectMapper().findAndRegisterModules();\n    private static final String CONFIG_ENDPOINT = \"/arvados/v1/config\";\n\n    private final OkHttpClient client;\n    private final String baseUrl;\n\n    public ConfigApiClient(String protocol, String host, int port, boolean insecure) {\n        this.baseUrl = String.format(\"%s://%s:%d\", protocol, host, port);\n        this.client = OkHttpClientFactory.INSTANCE.create(insecure)\n                .newBuilder()\n                .connectTimeout(10, TimeUnit.SECONDS)\n                .readTimeout(10, TimeUnit.SECONDS)\n                .build();\n    }\n\n    public ArvadosConfig fetchConfig() throws ArvadosApiException {\n        String url = baseUrl + CONFIG_ENDPOINT;\n        Request request = new Request.Builder()\n                .url(url)\n                .get()\n                .build();\n\n        try (Response response = client.newCall(request).execute()) {\n            if (!response.isSuccessful()) {\n                String errorMessage = String.format(\"Failed to fetch config from %s. Status: %d\",\n                        url, response.code());\n                log.error(errorMessage);\n                throw new ArvadosApiException(errorMessage);\n            }\n\n            String responseBody = response.body() != null ? response.body().string() : \"\";\n            return MAPPER.readValue(responseBody, ArvadosConfig.class);\n\n        } catch (IOException e) {\n            String errorMessage = String.format(\"Error fetching config from %s: %s\",\n                    url, e.getMessage());\n            log.error(errorMessage, e);\n            throw new ArvadosApiException(errorMessage, e);\n        }\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/CountingFileRequestBody.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport okio.BufferedSink;\nimport okio.Okio;\nimport okio.Source;\n\nimport java.io.File;\n\n/**\n * Based on:\n * {@link} https://gist.github.com/eduardb/dd2dc530afd37108e1ac\n */\npublic class CountingFileRequestBody extends CountingRequestBody<File> {\n\n    CountingFileRequestBody(final File file, final ProgressListener listener) {\n        super(file, listener);\n    }\n\n    @Override\n    public long contentLength() {\n        return requestBodyData.length();\n    }\n\n    @Override\n    public void writeTo(BufferedSink sink) {\n        try (Source source = Okio.source(requestBodyData)) {\n            long total = 0;\n            long read;\n\n            while ((read = source.read(sink.buffer(), SEGMENT_SIZE)) != -1) {\n                total += read;\n                sink.flush();\n                listener.updateProgress(total);\n\n            }\n        } catch (RuntimeException rethrown) {\n            throw rethrown;\n        } catch (Exception ignored) {\n            //ignore\n        }\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/CountingRequestBody.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport okhttp3.MediaType;\nimport okhttp3.RequestBody;\nimport org.slf4j.Logger;\n\nabstract class CountingRequestBody<T> extends RequestBody {\n\n    protected static final int SEGMENT_SIZE = 2048; // okio.Segment.SIZE\n    protected static final MediaType CONTENT_BINARY = MediaType.parse(com.google.common.net.MediaType.OCTET_STREAM.toString());\n\n    protected final ProgressListener listener;\n\n    protected final T requestBodyData;\n\n    CountingRequestBody(T file, final ProgressListener listener) {\n        this.requestBodyData = file;\n        this.listener = listener;\n    }\n\n    @Override\n    public MediaType contentType() {\n        return CONTENT_BINARY;\n    }\n\n    static class TransferData {\n\n        private final Logger log = org.slf4j.LoggerFactory.getLogger(TransferData.class);\n        private int progressValue;\n        private long totalSize;\n\n        TransferData(long totalSize) {\n            this.progressValue = 0;\n            this.totalSize = totalSize;\n        }\n\n        void updateTransferProgress(long transferred) {\n            float progress = (transferred / (float) totalSize) * 100;\n            if (progressValue != (int) progress) {\n                progressValue = (int) progress;\n                log.debug(\"{} / {} / {}%\", transferred, totalSize, progressValue);\n            }\n        }\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/CountingStreamRequestBody.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport okio.BufferedSink;\nimport okio.Okio;\nimport okio.Source;\n\nimport java.io.File;\nimport java.io.IOException;\nimport java.io.InputStream;\n\npublic class CountingStreamRequestBody extends CountingRequestBody<InputStream> {\n\n    CountingStreamRequestBody(final InputStream inputStream, final ProgressListener listener) {\n        super(inputStream, listener);\n    }\n\n    @Override\n    public long contentLength() throws IOException {\n        return requestBodyData.available();\n    }\n\n    @Override\n    public void writeTo(BufferedSink sink) {\n        try (Source source = Okio.source(requestBodyData)) {\n            long total = 0;\n            long read;\n\n            while ((read = source.read(sink.buffer(), SEGMENT_SIZE)) != -1) {\n                total += read;\n                sink.flush();\n                listener.updateProgress(total);\n\n            }\n        } catch (RuntimeException rethrown) {\n            throw rethrown;\n        } catch (Exception ignored) {\n            //ignore\n        }\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/GroupsApiClient.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport okhttp3.HttpUrl;\nimport okhttp3.HttpUrl.Builder;\nimport okhttp3.Request;\nimport okhttp3.RequestBody;\nimport org.arvados.client.api.model.Group;\nimport org.arvados.client.api.model.GroupList;\nimport org.arvados.client.api.model.argument.ContentsGroup;\nimport org.arvados.client.api.model.argument.ListArgument;\nimport org.arvados.client.api.model.argument.UntrashGroup;\nimport org.arvados.client.config.ConfigProvider;\nimport org.slf4j.Logger;\n\npublic class GroupsApiClient extends BaseStandardApiClient<Group, GroupList> {\n\n    private static final String RESOURCE = \"groups\";\n    private final Logger log = org.slf4j.LoggerFactory.getLogger(GroupsApiClient.class);\n\n    public GroupsApiClient(ConfigProvider config) {\n        super(config);\n    }\n\n    public GroupList contents(ContentsGroup contentsGroup) {\n        log.debug(\"Get {} contents\", getType().getSimpleName());\n        Builder urlBuilder = getUrlBuilder().addPathSegment(\"contents\");\n        addQueryParameters(urlBuilder, contentsGroup);\n        HttpUrl url = urlBuilder.build();\n        Request request = getRequestBuilder().url(url).build();\n        return callForList(request);\n    }\n\n    public GroupList contents(ListArgument listArguments) {\n        this.log.debug(\"Get {} contents\", this.getType().getSimpleName());\n        HttpUrl.Builder urlBuilder = this.getUrlBuilder().addPathSegment(\"contents\");\n        this.addQueryParameters(urlBuilder, listArguments);\n        HttpUrl url = urlBuilder.build();\n        Request request = this.getRequestBuilder().url(url).build();\n        return callForList(request);\n    }\n\n    public Group untrash(UntrashGroup untrashGroup) {\n        log.debug(\"Untrash {} by UUID {}\", getType().getSimpleName(), untrashGroup.getUuid());\n        HttpUrl url = getUrlBuilder().addPathSegment(untrashGroup.getUuid()).addPathSegment(\"untrash\").build();\n        RequestBody requestBody = getJsonRequestBody(untrashGroup);\n        Request request = getRequestBuilder().post(requestBody).url(url).build();\n        return callForType(request);\n    }\n\n    @Override\n    String getResource() {\n        return RESOURCE;\n    }\n\n    @Override\n    Class<Group> getType() {\n        return Group.class;\n    }\n\n    @Override\n    Class<GroupList> getListType() {\n        return GroupList.class;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/KeepWebApiClient.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport okhttp3.HttpUrl;\nimport okhttp3.Request;\nimport okhttp3.RequestBody;\nimport okhttp3.Response;\nimport okhttp3.ResponseBody;\n\nimport org.arvados.client.config.ConfigProvider;\n\nimport java.io.File;\nimport java.io.IOException;\nimport java.io.InputStream;\n\npublic class KeepWebApiClient extends BaseApiClient {\n\n    public KeepWebApiClient(ConfigProvider config) {\n        super(config);\n    }\n\n    public byte[] download(String collectionUuid, String filePathName) {\n        Request request = getRequestBuilder()\n                .url(getUrlBuilder(collectionUuid,filePathName).build())\n                .get()\n                .build();\n\n        return newFileCall(request);\n    }\n\n    public InputStream get(String collectionUuid, String filePathName, long start, Long end) throws IOException {\n        Request.Builder builder = this.getRequestBuilder();\n        String rangeValue = \"bytes=\" + start + \"-\";\n        if (end != null) {\n            rangeValue += end;\n        }\n        builder.addHeader(\"Range\", rangeValue);\n        Request request = builder.url(this.getUrlBuilder(collectionUuid, filePathName).build()).get().build();\n        Response response = client.newCall(request).execute();\n        if (!response.isSuccessful()) {\n            response.close();\n            throw new IOException(\"Failed to download file: \" + response);\n        }\n        ResponseBody body = response.body();\n        if (body == null) {\n            response.close();\n            throw new IOException(\"Response body is null for request: \" + request);\n        }\n        return body.byteStream();\n    }\n\n    public String delete(String collectionUuid, String filePathName) {\n        Request request = getRequestBuilder()\n                .url(getUrlBuilder(collectionUuid, filePathName).build())\n                .delete()\n                .build();\n\n        return newCall(request);\n    }\n\n    public String upload(String collectionUuid, File file, ProgressListener progressListener) {\n        RequestBody requestBody = new CountingFileRequestBody(file, progressListener);\n\n        Request request = getRequestBuilder()\n                .url(getUrlBuilder(collectionUuid, file.getName()).build())\n                .put(requestBody)\n                .build();\n        return newCall(request);\n    }\n\n    public String upload(String collectionUuid, InputStream inputStream, String fileName, ProgressListener progressListener) {\n        RequestBody requestBody = new CountingStreamRequestBody(inputStream, progressListener);\n\n        Request request = getRequestBuilder()\n                .url(getUrlBuilder(collectionUuid, fileName).build())\n                .put(requestBody)\n                .build();\n        return newCall(request);\n    }\n\n    private HttpUrl.Builder getUrlBuilder(String collectionUuid, String filePathName) {\n        return new HttpUrl.Builder()\n                .scheme(config.getApiProtocol())\n                .host(config.getKeepWebHost())\n                .port(config.getKeepWebPort())\n                .addPathSegment(\"c=\" + collectionUuid)\n                .addPathSegment(filePathName);\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/LinksApiClient.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport org.arvados.client.api.model.Link;\nimport org.arvados.client.api.model.LinkList;\nimport org.arvados.client.config.ConfigProvider;\n\npublic class LinksApiClient extends BaseStandardApiClient<Link, LinkList> {\n\n    private static final String RESOURCE = \"links\";\n\n    public LinksApiClient(ConfigProvider config) {\n        super(config);\n    }\n\n    @Override\n    String getResource() {\n        return RESOURCE;\n    }\n\n    @Override\n    Class<Link> getType() {\n        return Link.class;\n    }\n\n    @Override\n    Class<LinkList> getListType() {\n        return LinkList.class;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/ProgressListener.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\n@FunctionalInterface\npublic interface ProgressListener {\n\n    void updateProgress(long uploadedBytes);\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/UsersApiClient.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport okhttp3.Request;\nimport org.arvados.client.api.model.User;\nimport org.arvados.client.api.model.UserList;\nimport org.arvados.client.config.ConfigProvider;\nimport org.slf4j.Logger;\n\npublic class UsersApiClient extends BaseStandardApiClient<User, UserList> {\n\n    private static final String RESOURCE = \"users\";\n    private final Logger log = org.slf4j.LoggerFactory.getLogger(UsersApiClient.class);\n\n    public UsersApiClient(ConfigProvider config) {\n        super(config);\n    }\n\n    public User current() {\n        log.debug(\"Get current {}\", getType().getSimpleName());\n        Request request = getNoArgumentMethodRequest(\"current\");\n        return callForType(request);\n    }\n\n    public User system() {\n        log.debug(\"Get system {}\", getType().getSimpleName());\n        Request request = getNoArgumentMethodRequest(\"system\");\n        return callForType(request);\n    }\n\n    @Override\n    String getResource() {\n        return RESOURCE;\n    }\n\n    @Override\n    Class<User> getType() {\n        return User.class;\n    }\n\n    @Override\n    Class<UserList> getListType() {\n        return UserList.class;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/client/factory/OkHttpClientFactory.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client.factory;\n\nimport com.google.common.base.Suppliers;\nimport okhttp3.OkHttpClient;\nimport org.arvados.client.exception.ArvadosClientException;\nimport org.slf4j.Logger;\n\nimport javax.net.ssl.SSLContext;\nimport javax.net.ssl.SSLSocketFactory;\nimport javax.net.ssl.TrustManager;\nimport javax.net.ssl.X509TrustManager;\nimport java.security.KeyManagementException;\nimport java.security.NoSuchAlgorithmException;\nimport java.security.SecureRandom;\nimport java.security.cert.X509Certificate;\nimport java.util.function.Supplier;\n\n/**\n * {@link OkHttpClient} instance factory that builds and configures client instances sharing\n * the common resource pool: this is the recommended approach to optimize resource usage.\n */\npublic final class OkHttpClientFactory {\n    public static final OkHttpClientFactory INSTANCE = new OkHttpClientFactory();\n    private final Logger log = org.slf4j.LoggerFactory.getLogger(OkHttpClientFactory.class);\n    private final OkHttpClient clientSecure = new OkHttpClient();\n    private final Supplier<OkHttpClient> clientUnsecure =\n            Suppliers.memoize(this::getDefaultClientAcceptingAllCertificates);\n\n    private OkHttpClientFactory() { /* singleton */}\n\n    public OkHttpClient create(boolean apiHostInsecure) {\n        return apiHostInsecure ? getDefaultUnsecureClient() : getDefaultClient();\n    }\n\n    /**\n     * @return default secure {@link OkHttpClient} with shared resource pool.\n     */\n    public OkHttpClient getDefaultClient() {\n        return clientSecure;\n    }\n\n    /**\n     * @return default {@link OkHttpClient} with shared resource pool\n     * that will accept all SSL certificates by default.\n     */\n    public OkHttpClient getDefaultUnsecureClient() {\n        return clientUnsecure.get();\n    }\n\n    /**\n     * @return default {@link OkHttpClient.Builder} with shared resource pool.\n     */\n    public OkHttpClient.Builder getDefaultClientBuilder() {\n        return clientSecure.newBuilder();\n    }\n\n    /**\n     * @return default {@link OkHttpClient.Builder} with shared resource pool\n     * that is preconfigured to accept all SSL certificates.\n     */\n    public OkHttpClient.Builder getDefaultUnsecureClientBuilder() {\n        return clientUnsecure.get().newBuilder();\n    }\n\n    private OkHttpClient getDefaultClientAcceptingAllCertificates() {\n        log.warn(\"Creating unsafe OkHttpClient. All SSL certificates will be accepted.\");\n        try {\n            // Create a trust manager that does not validate certificate chains\n            final TrustManager[] trustAllCerts = {createX509TrustManager()};\n\n            // Install the all-trusting trust manager\n            SSLContext sslContext = SSLContext.getInstance(\"SSL\");\n            sslContext.init(null, trustAllCerts, new SecureRandom());\n            // Create an ssl socket factory with our all-trusting manager\n            final SSLSocketFactory sslSocketFactory = sslContext.getSocketFactory();\n\n            // Create the OkHttpClient.Builder with shared resource pool\n            final OkHttpClient.Builder builder = clientSecure.newBuilder();\n            builder.sslSocketFactory(sslSocketFactory, (X509TrustManager) trustAllCerts[0]);\n            builder.hostnameVerifier((hostname, session) -> true);\n            return builder.build();\n        } catch (NoSuchAlgorithmException | KeyManagementException e) {\n            throw new ArvadosClientException(\"Error establishing SSL context\", e);\n        }\n    }\n\n    private static X509TrustManager createX509TrustManager() {\n        return new X509TrustManager() {\n\n            @Override\n            public void checkClientTrusted(X509Certificate[] chain, String authType) {\n            }\n\n            @Override\n            public void checkServerTrusted(X509Certificate[] chain, String authType) {\n            }\n\n            @Override\n            public X509Certificate[] getAcceptedIssuers() {\n                return new X509Certificate[]{};\n            }\n        };\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/ApiError.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.util.List;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"errors\", \"error_token\" })\npublic class ApiError {\n\n    @JsonProperty(\"errors\")\n    private List<String> errors;\n    @JsonProperty(\"error_token\")\n    private String errorToken;\n\n    public List<String> getErrors() {\n        return this.errors;\n    }\n\n    public String getErrorToken() {\n        return this.errorToken;\n    }\n\n    public void setErrors(List<String> errors) {\n        this.errors = errors;\n    }\n\n    public void setErrorToken(String errorToken) {\n        this.errorToken = errorToken;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/ArvadosConfig.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonProperty;\n\n@JsonIgnoreProperties(ignoreUnknown = true)\npublic class ArvadosConfig {\n\n    @JsonProperty(\"Services\")\n    private Services services;\n\n    public Services getServices() {\n        return services;\n    }\n\n    public void setServices(Services services) {\n        this.services = services;\n    }\n\n    @JsonIgnoreProperties(ignoreUnknown = true)\n    public static class Services {\n\n        @JsonProperty(\"WebDAVDownload\")\n        private WebDAVDownload webDAVDownload;\n\n        public WebDAVDownload getWebDAVDownload() {\n            return webDAVDownload;\n        }\n\n        public void setWebDAVDownload(WebDAVDownload webDAVDownload) {\n            this.webDAVDownload = webDAVDownload;\n        }\n    }\n\n    @JsonIgnoreProperties(ignoreUnknown = true)\n    public static class WebDAVDownload {\n\n        @JsonProperty(\"ExternalURL\")\n        private String externalURL;\n\n        public String getExternalURL() {\n            return externalURL;\n        }\n\n        public void setExternalURL(String externalURL) {\n            this.externalURL = externalURL;\n        }\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/Collection.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.time.LocalDateTime;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"portable_data_hash\", \"replication_desired\", \"replication_confirmed_at\", \"replication_confirmed\", \"manifest_text\", \n    \"name\", \"description\", \"properties\", \"delete_at\", \"trash_at\", \"is_trashed\" })\npublic class Collection extends Item {\n\n    @JsonProperty(\"portable_data_hash\")\n    private String portableDataHash;\n    @JsonProperty(\"replication_desired\")\n    private Integer replicationDesired;\n    @JsonProperty(\"replication_confirmed_at\")\n    private LocalDateTime replicationConfirmedAt;\n    @JsonProperty(\"replication_confirmed\")\n    private Integer replicationConfirmed;\n    @JsonProperty(\"manifest_text\")\n    private String manifestText;\n    @JsonProperty(\"name\")\n    private String name;\n    @JsonProperty(\"description\")\n    private String description;\n    @JsonProperty(\"properties\")\n    private Object properties;\n    @JsonProperty(\"delete_at\")\n    private LocalDateTime deleteAt;\n    @JsonProperty(\"trash_at\")\n    private LocalDateTime trashAt;\n    @JsonProperty(\"is_trashed\")\n    private Boolean trashed;\n\n    public String getPortableDataHash() {\n        return this.portableDataHash;\n    }\n\n    public Integer getReplicationDesired() {\n        return this.replicationDesired;\n    }\n\n    public LocalDateTime getReplicationConfirmedAt() {\n        return this.replicationConfirmedAt;\n    }\n\n    public Integer getReplicationConfirmed() {\n        return this.replicationConfirmed;\n    }\n\n    public String getManifestText() {\n        return this.manifestText;\n    }\n\n    public String getName() {\n        return this.name;\n    }\n\n    public String getDescription() {\n        return this.description;\n    }\n\n    public Object getProperties() {\n        return this.properties;\n    }\n\n    public LocalDateTime getDeleteAt() {\n        return this.deleteAt;\n    }\n\n    public LocalDateTime getTrashAt() {\n        return this.trashAt;\n    }\n\n    public Boolean getTrashed() {\n        return this.trashed;\n    }\n\n    public void setPortableDataHash(String portableDataHash) {\n        this.portableDataHash = portableDataHash;\n    }\n\n    public void setReplicationDesired(Integer replicationDesired) {\n        this.replicationDesired = replicationDesired;\n    }\n\n    public void setReplicationConfirmedAt(LocalDateTime replicationConfirmedAt) {\n        this.replicationConfirmedAt = replicationConfirmedAt;\n    }\n\n    public void setReplicationConfirmed(Integer replicationConfirmed) {\n        this.replicationConfirmed = replicationConfirmed;\n    }\n\n    public void setManifestText(String manifestText) {\n        this.manifestText = manifestText;\n    }\n\n    public void setName(String name) {\n        this.name = name;\n    }\n\n    public void setDescription(String description) {\n        this.description = description;\n    }\n\n    public void setProperties(Object properties) {\n        this.properties = properties;\n    }\n\n    public void setDeleteAt(LocalDateTime deleteAt) {\n        this.deleteAt = deleteAt;\n    }\n\n    public void setTrashAt(LocalDateTime trashAt) {\n        this.trashAt = trashAt;\n    }\n\n    public void setTrashed(Boolean trashed) {\n        this.trashed = trashed;\n    }\n\n    public String toString() {\n        return \"Collection(portableDataHash=\" + this.getPortableDataHash() + \", replicationDesired=\" + this.getReplicationDesired() + \", replicationConfirmedAt=\" + this.getReplicationConfirmedAt() + \", replicationConfirmed=\" + this.getReplicationConfirmed() + \", manifestText=\" + this.getManifestText() + \", name=\" + this.getName() + \", description=\" + this.getDescription() + \", properties=\" + this.getProperties() + \", deleteAt=\" + this.getDeleteAt() + \", trashAt=\" + this.getTrashAt() + \", trashed=\" + this.getTrashed() + \")\";\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/CollectionList.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.util.List;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"items\" })\npublic class CollectionList extends ItemList {\n\n    @JsonProperty(\"items\")\n    private List<Collection> items;\n\n    public List<Collection> getItems() {\n        return this.items;\n    }\n\n    public void setItems(List<Collection> items) {\n        this.items = items;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/CollectionReplaceFiles.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonProperty;\n\nimport java.util.HashMap;\nimport java.util.Map;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\npublic class CollectionReplaceFiles {\n\n    @JsonProperty(\"collection\")\n    private CollectionOptions collectionOptions;\n\n    @JsonProperty(\"replace_files\")\n    private Map<String, String> replaceFiles;\n\n    public CollectionReplaceFiles() {\n        this.collectionOptions = new CollectionOptions();\n        this.replaceFiles = new HashMap<>();\n    }\n\n    public void addFileReplacement(String targetPath, String sourcePath) {\n        this.replaceFiles.put(targetPath, sourcePath);\n    }\n\n    @JsonInclude(JsonInclude.Include.NON_NULL)\n    @JsonIgnoreProperties(ignoreUnknown = true)\n    public static class CollectionOptions {\n        @JsonProperty(\"preserve_version\")\n        private boolean preserveVersion;\n\n        public CollectionOptions() {\n            this.preserveVersion = true;\n        }\n\n        public boolean isPreserveVersion() {\n            return preserveVersion;\n        }\n\n        public void setPreserveVersion(boolean preserveVersion) {\n            this.preserveVersion = preserveVersion;\n        }\n    }\n\n    public CollectionOptions getCollectionOptions() {\n        return collectionOptions;\n    }\n\n    public void setCollectionOptions(CollectionOptions collectionOptions) {\n        this.collectionOptions = collectionOptions;\n    }\n\n    public Map<String, String> getReplaceFiles() {\n        return replaceFiles;\n    }\n\n    public void setReplaceFiles(Map<String, String> replaceFiles) {\n        this.replaceFiles = replaceFiles;\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/Group.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.time.LocalDateTime;\nimport java.util.List;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"command\", \"container_count\", \"container_count_max\", \"container_image\", \"container_uuid\", \"cwd\", \"environment\", \"expires_at\", \n    \"filters\", \"log_uuid\", \"mounts\", \"output_name\", \"output_path\", \"output_uuid\", \"output_ttl\", \"priority\", \"properties\", \"requesting_container_uuid\", \n    \"runtime_constraints\", \"scheduling_parameters\", \"state\", \"use_existing\" })\npublic class Group extends Item {\n\n    @JsonProperty(\"name\")\n    private String name;\n    @JsonProperty(\"group_class\")\n    private String groupClass;\n    @JsonProperty(\"description\")\n    private String description;\n    @JsonProperty(value = \"writable_by\", access = JsonProperty.Access.WRITE_ONLY)\n    private List<String> writableBy;\n    @JsonProperty(\"delete_at\")\n    private LocalDateTime deleteAt;\n    @JsonProperty(\"trash_at\")\n    private LocalDateTime trashAt;\n    @JsonProperty(\"is_trashed\")\n    private Boolean isTrashed;\n    @JsonProperty(\"command\")\n    private List<String> command;\n    @JsonProperty(\"container_count\")\n    private Integer containerCount;\n    @JsonProperty(\"container_count_max\")\n    private Integer containerCountMax;\n    @JsonProperty(\"container_image\")\n    private String containerImage;\n    @JsonProperty(\"container_uuid\")\n    private String containerUuid;\n    @JsonProperty(\"cwd\")\n    private String cwd;\n    @JsonProperty(\"environment\")\n    private Object environment;\n    @JsonProperty(\"expires_at\")\n    private LocalDateTime expiresAt;\n    @JsonProperty(\"filters\")\n    private List<String> filters;\n    @JsonProperty(\"log_uuid\")\n    private String logUuid;\n    @JsonProperty(\"mounts\")\n    private Object mounts;\n    @JsonProperty(\"output_name\")\n    private String outputName;\n    @JsonProperty(\"output_path\")\n    private String outputPath;\n    @JsonProperty(\"output_uuid\")\n    private String outputUuid;\n    @JsonProperty(\"output_ttl\")\n    private Integer outputTtl;\n    @JsonProperty(\"priority\")\n    private Integer priority;\n    @JsonProperty(\"properties\")\n    private Object properties;\n    @JsonProperty(\"requesting_container_uuid\")\n    private String requestingContainerUuid;\n    @JsonProperty(\"runtime_constraints\")\n    private RuntimeConstraints runtimeConstraints;\n    @JsonProperty(\"scheduling_parameters\")\n    private Object schedulingParameters;\n    @JsonProperty(\"state\")\n    private String state;\n    @JsonProperty(\"use_existing\")\n    private Boolean useExisting;\n\n    public String getName() {\n        return this.name;\n    }\n\n    public String getGroupClass() {\n        return this.groupClass;\n    }\n\n    public String getDescription() {\n        return this.description;\n    }\n\n    public List<String> getWritableBy() {\n        return this.writableBy;\n    }\n\n    public LocalDateTime getDeleteAt() {\n        return this.deleteAt;\n    }\n\n    public LocalDateTime getTrashAt() {\n        return this.trashAt;\n    }\n\n    public Boolean getIsTrashed() {\n        return this.isTrashed;\n    }\n\n    public List<String> getCommand() {\n        return this.command;\n    }\n\n    public Integer getContainerCount() {\n        return this.containerCount;\n    }\n\n    public Integer getContainerCountMax() {\n        return this.containerCountMax;\n    }\n\n    public String getContainerImage() {\n        return this.containerImage;\n    }\n\n    public String getContainerUuid() {\n        return this.containerUuid;\n    }\n\n    public String getCwd() {\n        return this.cwd;\n    }\n\n    public Object getEnvironment() {\n        return this.environment;\n    }\n\n    public LocalDateTime getExpiresAt() {\n        return this.expiresAt;\n    }\n\n    public List<String> getFilters() {\n        return this.filters;\n    }\n\n    public String getLogUuid() {\n        return this.logUuid;\n    }\n\n    public Object getMounts() {\n        return this.mounts;\n    }\n\n    public String getOutputName() {\n        return this.outputName;\n    }\n\n    public String getOutputPath() {\n        return this.outputPath;\n    }\n\n    public String getOutputUuid() {\n        return this.outputUuid;\n    }\n\n    public Integer getOutputTtl() {\n        return this.outputTtl;\n    }\n\n    public Integer getPriority() {\n        return this.priority;\n    }\n\n    public Object getProperties() {\n        return this.properties;\n    }\n\n    public String getRequestingContainerUuid() {\n        return this.requestingContainerUuid;\n    }\n\n    public RuntimeConstraints getRuntimeConstraints() {\n        return this.runtimeConstraints;\n    }\n\n    public Object getSchedulingParameters() {\n        return this.schedulingParameters;\n    }\n\n    public String getState() {\n        return this.state;\n    }\n\n    public Boolean getUseExisting() {\n        return this.useExisting;\n    }\n\n    public void setName(String name) {\n        this.name = name;\n    }\n\n    public void setGroupClass(String groupClass) {\n        this.groupClass = groupClass;\n    }\n\n    public void setDescription(String description) {\n        this.description = description;\n    }\n\n    public void setWritableBy(List<String> writableBy) {\n        this.writableBy = writableBy;\n    }\n\n    public void setDeleteAt(LocalDateTime deleteAt) {\n        this.deleteAt = deleteAt;\n    }\n\n    public void setTrashAt(LocalDateTime trashAt) {\n        this.trashAt = trashAt;\n    }\n\n    public void setIsTrashed(Boolean isTrashed) {\n        this.isTrashed = isTrashed;\n    }\n\n    public void setCommand(List<String> command) {\n        this.command = command;\n    }\n\n    public void setContainerCount(Integer containerCount) {\n        this.containerCount = containerCount;\n    }\n\n    public void setContainerCountMax(Integer containerCountMax) {\n        this.containerCountMax = containerCountMax;\n    }\n\n    public void setContainerImage(String containerImage) {\n        this.containerImage = containerImage;\n    }\n\n    public void setContainerUuid(String containerUuid) {\n        this.containerUuid = containerUuid;\n    }\n\n    public void setCwd(String cwd) {\n        this.cwd = cwd;\n    }\n\n    public void setEnvironment(Object environment) {\n        this.environment = environment;\n    }\n\n    public void setExpiresAt(LocalDateTime expiresAt) {\n        this.expiresAt = expiresAt;\n    }\n\n    public void setFilters(List<String> filters) {\n        this.filters = filters;\n    }\n\n    public void setLogUuid(String logUuid) {\n        this.logUuid = logUuid;\n    }\n\n    public void setMounts(Object mounts) {\n        this.mounts = mounts;\n    }\n\n    public void setOutputName(String outputName) {\n        this.outputName = outputName;\n    }\n\n    public void setOutputPath(String outputPath) {\n        this.outputPath = outputPath;\n    }\n\n    public void setOutputUuid(String outputUuid) {\n        this.outputUuid = outputUuid;\n    }\n\n    public void setOutputTtl(Integer outputTtl) {\n        this.outputTtl = outputTtl;\n    }\n\n    public void setPriority(Integer priority) {\n        this.priority = priority;\n    }\n\n    public void setProperties(Object properties) {\n        this.properties = properties;\n    }\n\n    public void setRequestingContainerUuid(String requestingContainerUuid) {\n        this.requestingContainerUuid = requestingContainerUuid;\n    }\n\n    public void setRuntimeConstraints(RuntimeConstraints runtimeConstraints) {\n        this.runtimeConstraints = runtimeConstraints;\n    }\n\n    public void setSchedulingParameters(Object schedulingParameters) {\n        this.schedulingParameters = schedulingParameters;\n    }\n\n    public void setState(String state) {\n        this.state = state;\n    }\n\n    public void setUseExisting(Boolean useExisting) {\n        this.useExisting = useExisting;\n    }\n\n    public String toString() {\n        return \"Group(name=\" + this.getName() + \", groupClass=\" + this.getGroupClass() + \", description=\" + this.getDescription() + \", writableBy=\" + this.getWritableBy() + \", deleteAt=\" + this.getDeleteAt() + \", trashAt=\" + this.getTrashAt() + \", isTrashed=\" + this.getIsTrashed() + \", command=\" + this.getCommand() + \", containerCount=\" + this.getContainerCount() + \", containerCountMax=\" + this.getContainerCountMax() + \", containerImage=\" + this.getContainerImage() + \", containerUuid=\" + this.getContainerUuid() + \", cwd=\" + this.getCwd() + \", environment=\" + this.getEnvironment() + \", expiresAt=\" + this.getExpiresAt() + \", filters=\" + this.getFilters() + \", logUuid=\" + this.getLogUuid() + \", mounts=\" + this.getMounts() + \", outputName=\" + this.getOutputName() + \", outputPath=\" + this.getOutputPath() + \", outputUuid=\" + this.getOutputUuid() + \", outputTtl=\" + this.getOutputTtl() + \", priority=\" + this.getPriority() + \", properties=\" + this.getProperties() + \", requestingContainerUuid=\" + this.getRequestingContainerUuid() + \", runtimeConstraints=\" + this.getRuntimeConstraints() + \", schedulingParameters=\" + this.getSchedulingParameters() + \", state=\" + this.getState() + \", useExisting=\" + this.getUseExisting() + \")\";\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/GroupList.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.util.List;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"items\" })\npublic class GroupList extends ItemList {\n\n    @JsonProperty(\"items\")\n    private List<Group> items;\n\n    public List<Group> getItems() {\n        return this.items;\n    }\n\n    public void setItems(List<Group> items) {\n        this.items = items;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/Item.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.time.LocalDateTime;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"kind\", \"etag\", \"uuid\", \"owner_uuid\", \"created_at\", \"modified_by_client_uuid\",\n        \"modified_by_user_uuid\", \"modified_at\", \"updated_at\" })\npublic abstract class Item {\n\n    @JsonProperty(\"kind\")\n    private String kind;\n    @JsonProperty(\"etag\")\n    private String etag;\n    @JsonProperty(\"uuid\")\n    private String uuid;\n    @JsonProperty(\"owner_uuid\")\n    private String ownerUuid;\n    @JsonProperty(\"created_at\")\n    private LocalDateTime createdAt;\n    @JsonProperty(\"modified_by_client_uuid\")\n    private String modifiedByClientUuid;\n    @JsonProperty(\"modified_by_user_uuid\")\n    private String modifiedByUserUuid;\n    @JsonProperty(\"modified_at\")\n    private LocalDateTime modifiedAt;\n    @JsonProperty(\"updated_at\")\n    private LocalDateTime updatedAt;\n\n    public String getKind() {\n        return this.kind;\n    }\n\n    public String getEtag() {\n        return this.etag;\n    }\n\n    public String getUuid() {\n        return this.uuid;\n    }\n\n    public String getOwnerUuid() {\n        return this.ownerUuid;\n    }\n\n    public LocalDateTime getCreatedAt() {\n        return this.createdAt;\n    }\n\n    public String getModifiedByClientUuid() {\n        return this.modifiedByClientUuid;\n    }\n\n    public String getModifiedByUserUuid() {\n        return this.modifiedByUserUuid;\n    }\n\n    public LocalDateTime getModifiedAt() {\n        return this.modifiedAt;\n    }\n\n    public LocalDateTime getUpdatedAt() {\n        return this.updatedAt;\n    }\n\n    public void setKind(String kind) {\n        this.kind = kind;\n    }\n\n    public void setEtag(String etag) {\n        this.etag = etag;\n    }\n\n    public void setUuid(String uuid) {\n        this.uuid = uuid;\n    }\n\n    public void setOwnerUuid(String ownerUuid) {\n        this.ownerUuid = ownerUuid;\n    }\n\n    public void setCreatedAt(LocalDateTime createdAt) {\n        this.createdAt = createdAt;\n    }\n\n    public void setModifiedByClientUuid(String modifiedByClientUuid) {\n        this.modifiedByClientUuid = modifiedByClientUuid;\n    }\n\n    public void setModifiedByUserUuid(String modifiedByUserUuid) {\n        this.modifiedByUserUuid = modifiedByUserUuid;\n    }\n\n    public void setModifiedAt(LocalDateTime modifiedAt) {\n        this.modifiedAt = modifiedAt;\n    }\n\n    public void setUpdatedAt(LocalDateTime updatedAt) {\n        this.updatedAt = updatedAt;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/ItemList.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"kind\", \"etag\", \"offset\", \"limit\", \"items_available\" })\npublic class ItemList {\n\n    @JsonProperty(\"kind\")\n    private String kind;\n    @JsonProperty(\"etag\")\n    private String etag;\n    @JsonProperty(\"offset\")\n    private Object offset;\n    @JsonProperty(\"limit\")\n    private Object limit;\n    @JsonProperty(\"items_available\")\n    private Integer itemsAvailable;\n\n    public String getKind() {\n        return this.kind;\n    }\n\n    public String getEtag() {\n        return this.etag;\n    }\n\n    public Object getOffset() {\n        return this.offset;\n    }\n\n    public Object getLimit() {\n        return this.limit;\n    }\n\n    public Integer getItemsAvailable() {\n        return this.itemsAvailable;\n    }\n\n    public void setKind(String kind) {\n        this.kind = kind;\n    }\n\n    public void setEtag(String etag) {\n        this.etag = etag;\n    }\n\n    public void setOffset(Object offset) {\n        this.offset = offset;\n    }\n\n    public void setLimit(Object limit) {\n        this.limit = limit;\n    }\n\n    public void setItemsAvailable(Integer itemsAvailable) {\n        this.itemsAvailable = itemsAvailable;\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/KeepService.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.*;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"service_host\", \"service_port\", \"service_ssl_flag\", \"service_type\", \"read_only\" })\npublic class KeepService extends Item {\n\n    @JsonProperty(\"service_host\")\n    private String serviceHost;\n    @JsonProperty(\"service_port\")\n    private Integer servicePort;\n    @JsonProperty(\"service_ssl_flag\")\n    private Boolean serviceSslFlag;\n    @JsonProperty(\"service_type\")\n    private String serviceType;\n    @JsonProperty(\"read_only\")\n    private Boolean readOnly;\n    @JsonIgnore\n    private String serviceRoot;\n\n    public String getServiceHost() {\n        return this.serviceHost;\n    }\n\n    public Integer getServicePort() {\n        return this.servicePort;\n    }\n\n    public Boolean getServiceSslFlag() {\n        return this.serviceSslFlag;\n    }\n\n    public String getServiceType() {\n        return this.serviceType;\n    }\n\n    public Boolean getReadOnly() {\n        return this.readOnly;\n    }\n\n    public String getServiceRoot() {\n        return this.serviceRoot;\n    }\n\n    public void setServiceHost(String serviceHost) {\n        this.serviceHost = serviceHost;\n    }\n\n    public void setServicePort(Integer servicePort) {\n        this.servicePort = servicePort;\n    }\n\n    public void setServiceSslFlag(Boolean serviceSslFlag) {\n        this.serviceSslFlag = serviceSslFlag;\n    }\n\n    public void setServiceType(String serviceType) {\n        this.serviceType = serviceType;\n    }\n\n    public void setReadOnly(Boolean readOnly) {\n        this.readOnly = readOnly;\n    }\n\n    public void setServiceRoot(String serviceRoot) {\n        this.serviceRoot = serviceRoot;\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/KeepServiceList.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.util.List;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"items\" })\npublic class KeepServiceList extends ItemList {\n\n    @JsonProperty(\"items\")\n    private List<KeepService> items;\n\n    public List<KeepService> getItems() {\n        return this.items;\n    }\n\n    public void setItems(List<KeepService> items) {\n        this.items = items;\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/Link.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({\"name\", \"head_kind\", \"head_uuid\", \"link_class\"})\npublic class Link extends Item {\n\n    @JsonProperty(\"name\")\n    private String name;\n    @JsonProperty(value = \"head_kind\", access = JsonProperty.Access.WRITE_ONLY)\n    private String headKind;\n    @JsonProperty(\"head_uuid\")\n    private String headUuid;\n    @JsonProperty(\"tail_uuid\")\n    private String tailUuid;\n    @JsonProperty(value = \"tail_kind\", access = JsonProperty.Access.WRITE_ONLY)\n    private String tailKind;\n    @JsonProperty(\"link_class\")\n    private String linkClass;\n\n    public String getName() {\n        return name;\n    }\n\n    public String getHeadKind() {\n        return headKind;\n    }\n\n    public String getHeadUuid() {\n        return headUuid;\n    }\n\n    public String getTailUuid() {\n        return tailUuid;\n    }\n\n    public String getTailKind() {\n        return tailKind;\n    }\n\n    public String getLinkClass() {\n        return linkClass;\n    }\n\n    public void setName(String name) {\n        this.name = name;\n    }\n\n    public void setHeadKind(String headKind) {\n        this.headKind = headKind;\n    }\n\n    public void setHeadUuid(String headUuid) {\n        this.headUuid = headUuid;\n    }\n\n    public void setTailUuid(String tailUuid) {\n        this.tailUuid = tailUuid;\n    }\n\n    public void setTailKind(String tailKind) {\n        this.tailKind = tailKind;\n    }\n\n    public void setLinkClass(String linkClass) {\n        this.linkClass = linkClass;\n    }\n\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/LinkList.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.util.List;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"items\" })\npublic class LinkList extends ItemList {\n\n    @JsonProperty(\"items\")\n    private List<Link> items;\n\n    public List<Link> getItems() {\n        return this.items;\n    }\n\n    public void setItems(List<Link> items) {\n        this.items = items;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/RuntimeConstraints.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"API\", \"vcpus\", \"ram\", \"keep_cache_ram\" })\npublic class RuntimeConstraints {\n\n    @JsonProperty(\"API\")\n    private Boolean api;\n    @JsonProperty(\"vcpus\")\n    private Integer vcpus;\n    @JsonProperty(\"ram\")\n    private Long ram;\n    @JsonProperty(\"keep_cache_ram\")\n    private Long keepCacheRam;\n\n    public Boolean getApi() {\n        return this.api;\n    }\n\n    public Integer getVcpus() {\n        return this.vcpus;\n    }\n\n    public Long getRam() {\n        return this.ram;\n    }\n\n    public Long getKeepCacheRam() {\n        return this.keepCacheRam;\n    }\n\n    public void setApi(Boolean api) {\n        this.api = api;\n    }\n\n    public void setVcpus(Integer vcpus) {\n        this.vcpus = vcpus;\n    }\n\n    public void setRam(Long ram) {\n        this.ram = ram;\n    }\n\n    public void setKeepCacheRam(Long keepCacheRam) {\n        this.keepCacheRam = keepCacheRam;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/User.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.util.List;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"email\", \"username\", \"full_name\", \"first_name\", \"last_name\", \"identity_url\", \"is_active\", \"is_admin\", \"is_invited\", \n    \"prefs\", \"writable_by\" })\npublic class User extends Item {\n\n    @JsonProperty(\"email\")\n    private String email;\n    @JsonProperty(\"username\")\n    private String username;\n    @JsonProperty(\"full_name\")\n    private String fullName;\n    @JsonProperty(\"first_name\")\n    private String firstName;\n    @JsonProperty(\"last_name\")\n    private String lastName;\n    @JsonProperty(\"identity_url\")\n    private String identityUrl;\n    @JsonProperty(\"is_active\")\n    private Boolean isActive;\n    @JsonProperty(\"is_admin\")\n    private Boolean isAdmin;\n    @JsonProperty(\"is_invited\")\n    private Boolean isInvited;\n    @JsonProperty(\"prefs\")\n    private Object prefs;\n    @JsonProperty(\"writable_by\")\n    private List<String> writableBy;\n\n    public String getEmail() {\n        return this.email;\n    }\n\n    public String getUsername() {\n        return this.username;\n    }\n\n    public String getFullName() {\n        return this.fullName;\n    }\n\n    public String getFirstName() {\n        return this.firstName;\n    }\n\n    public String getLastName() {\n        return this.lastName;\n    }\n\n    public String getIdentityUrl() {\n        return this.identityUrl;\n    }\n\n    public Boolean getIsActive() {\n        return this.isActive;\n    }\n\n    public Boolean getIsAdmin() {\n        return this.isAdmin;\n    }\n\n    public Boolean getIsInvited() {\n        return this.isInvited;\n    }\n\n    public Object getPrefs() {\n        return this.prefs;\n    }\n\n    public List<String> getWritableBy() {\n        return this.writableBy;\n    }\n\n    public void setEmail(String email) {\n        this.email = email;\n    }\n\n    public void setUsername(String username) {\n        this.username = username;\n    }\n\n    public void setFullName(String fullName) {\n        this.fullName = fullName;\n    }\n\n    public void setFirstName(String firstName) {\n        this.firstName = firstName;\n    }\n\n    public void setLastName(String lastName) {\n        this.lastName = lastName;\n    }\n\n    public void setIdentityUrl(String identityUrl) {\n        this.identityUrl = identityUrl;\n    }\n\n    public void setIsActive(Boolean isActive) {\n        this.isActive = isActive;\n    }\n\n    public void setIsAdmin(Boolean isAdmin) {\n        this.isAdmin = isAdmin;\n    }\n\n    public void setIsInvited(Boolean isInvited) {\n        this.isInvited = isInvited;\n    }\n\n    public void setPrefs(Object prefs) {\n        this.prefs = prefs;\n    }\n\n    public void setWritableBy(List<String> writableBy) {\n        this.writableBy = writableBy;\n    }\n\n    public String toString() {\n        return \"User(email=\" + this.getEmail() + \", username=\" + this.getUsername() + \", fullName=\" + this.getFullName() + \", firstName=\" + this.getFirstName() + \", lastName=\" + this.getLastName() + \", identityUrl=\" + this.getIdentityUrl() + \", isActive=\" + this.getIsActive() + \", isAdmin=\" + this.getIsAdmin() + \", isInvited=\" + this.getIsInvited() + \", prefs=\" + this.getPrefs() + \", writableBy=\" + this.getWritableBy() + \")\";\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/UserList.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model;\n\nimport com.fasterxml.jackson.annotation.JsonIgnoreProperties;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.util.List;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonIgnoreProperties(ignoreUnknown = true)\n@JsonPropertyOrder({ \"items\" })\npublic class UserList extends ItemList {\n\n    @JsonProperty(\"items\")\n    private List<User> items;\n\n    public List<User> getItems() {\n        return this.items;\n    }\n\n    public void setItems(List<User> items) {\n        this.items = items;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/argument/Argument.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model.argument;\n\nimport com.fasterxml.jackson.annotation.JsonIgnore;\n\npublic abstract class Argument {\n\n    @JsonIgnore\n    private String uuid;\n\n    public String getUuid() {\n        return this.uuid;\n    }\n\n    public void setUuid(String uuid) {\n        this.uuid = uuid;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/argument/ContentsGroup.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model.argument;\n\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.util.List;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonPropertyOrder({ \"limit\", \"order\", \"filters\", \"recursive\" })\npublic class ContentsGroup extends Argument {\n\n    @JsonProperty(\"limit\")\n    private Integer limit;\n\n    @JsonProperty(\"order\")\n    private String order;\n\n    @JsonProperty(\"filters\")\n    private List<String> filters;\n\n    @JsonProperty(\"recursive\")\n    private Boolean recursive;\n\n    public Integer getLimit() {\n        return this.limit;\n    }\n\n    public String getOrder() {\n        return this.order;\n    }\n\n    public List<String> getFilters() {\n        return this.filters;\n    }\n\n    public Boolean getRecursive() {\n        return this.recursive;\n    }\n\n    public void setLimit(Integer limit) {\n        this.limit = limit;\n    }\n\n    public void setOrder(String order) {\n        this.order = order;\n    }\n\n    public void setFilters(List<String> filters) {\n        this.filters = filters;\n    }\n\n    public void setRecursive(Boolean recursive) {\n        this.recursive = recursive;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/argument/Filter.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model.argument;\n\nimport com.fasterxml.jackson.annotation.JsonFormat;\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\n@JsonFormat(shape = JsonFormat.Shape.ARRAY)\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonPropertyOrder({ \"attribute\", \"operator\", \"operand\" })\npublic class Filter {\n\n    @JsonProperty(\"attribute\")\n    private String attribute;\n\n    @JsonProperty(\"operator\")\n    private Operator operator;\n\n    @JsonProperty(\"operand\")\n    private Object operand;\n\n    private Filter(String attribute, Operator operator, Object operand) {\n        this.attribute = attribute;\n        this.operator = operator;\n        this.operand = operand;\n    }\n\n    public static Filter of(String attribute, Operator operator, Object operand) {\n        return new Filter(attribute, operator, operand);\n    }\n\n    public String getAttribute() {\n        return this.attribute;\n    }\n\n    public Operator getOperator() {\n        return this.operator;\n    }\n\n    public Object getOperand() {\n        return this.operand;\n    }\n\n    public boolean equals(Object o) {\n        if (o == this) return true;\n        if (!(o instanceof Filter)) return false;\n        final Filter other = (Filter) o;\n        final Object this$attribute = this.getAttribute();\n        final Object other$attribute = other.getAttribute();\n        if (this$attribute == null ? other$attribute != null : !this$attribute.equals(other$attribute)) return false;\n        final Object this$operator = this.getOperator();\n        final Object other$operator = other.getOperator();\n        if (this$operator == null ? other$operator != null : !this$operator.equals(other$operator)) return false;\n        final Object this$operand = this.getOperand();\n        final Object other$operand = other.getOperand();\n        if (this$operand == null ? other$operand != null : !this$operand.equals(other$operand)) return false;\n        return true;\n    }\n\n    public int hashCode() {\n        final int PRIME = 59;\n        int result = 1;\n        final Object $attribute = this.getAttribute();\n        result = result * PRIME + ($attribute == null ? 43 : $attribute.hashCode());\n        final Object $operator = this.getOperator();\n        result = result * PRIME + ($operator == null ? 43 : $operator.hashCode());\n        final Object $operand = this.getOperand();\n        result = result * PRIME + ($operand == null ? 43 : $operand.hashCode());\n        return result;\n    }\n\n    public String toString() {\n        return \"Filter(attribute=\" + this.getAttribute() + \", operator=\" + this.getOperator() + \", operand=\" + this.getOperand() + \")\";\n    }\n\n    public enum Operator {\n\n        @JsonProperty(\"<\")\n        LESS,\n\n        @JsonProperty(\"<=\")\n        LESS_EQUALS,\n\n        @JsonProperty(\">=\")\n        MORE_EQUALS,\n\n        @JsonProperty(\">\")\n        MORE,\n\n        @JsonProperty(\"like\")\n        LIKE,\n\n        @JsonProperty(\"ilike\")\n        ILIKE,\n\n        @JsonProperty(\"=\")\n        EQUALS,\n\n        @JsonProperty(\"!=\")\n        NOT_EQUALS,\n\n        @JsonProperty(\"in\")\n        IN,\n\n        @JsonProperty(\"not in\")\n        NOT_IN,\n\n        @JsonProperty(\"is_a\")\n        IS_A,\n\n        @JsonProperty(\"exists\")\n        EXISTS\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/argument/ListArgument.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model.argument;\n\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\nimport java.util.List;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonPropertyOrder({ \"limit\", \"offset\", \"filters\", \"order\", \"select\", \"distinct\", \"count\", \"exclude_home_project\", \"include_old_versions\", \"include_trash\" })\npublic class ListArgument extends Argument {\n\n    @JsonProperty(\"limit\")\n    private Integer limit;\n\n    @JsonProperty(\"offset\")\n    private Integer offset;\n    \n    @JsonProperty(\"filters\")\n    private List<Filter> filters;\n\n    @JsonProperty(\"order\")\n    private List<String> order;\n\n    @JsonProperty(\"select\")\n    private List<String> select;\n\n    @JsonProperty(\"distinct\")\n    private Boolean distinct;\n\n    @JsonProperty(\"count\")\n    private Count count;\n\n    @JsonProperty(\"exclude_home_project\")\n    private Boolean excludeHomeProject;\n\n    @JsonProperty(\"include_old_versions\")\n    private Boolean includeOldVersions;\n\n    @JsonProperty(\"include_trash\")\n    private Boolean includeTrash;\n\n    ListArgument(\n            Integer limit, Integer offset, List<Filter> filters, List<String> order, List<String> select,\n            Boolean distinct, Count count, Boolean excludeHomeProject, Boolean includeOldVersions,\n            Boolean includeTrash\n    ) {\n        this.limit = limit;\n        this.offset = offset;\n        this.filters = filters;\n        this.order = order;\n        this.select = select;\n        this.distinct = distinct;\n        this.count = count;\n        this.excludeHomeProject = excludeHomeProject;\n        this.includeOldVersions = includeOldVersions;\n        this.includeTrash = includeTrash;\n    }\n\n    public static ListArgumentBuilder builder() {\n        return new ListArgumentBuilder();\n    }\n\n    public enum Count {\n        \n        @JsonProperty(\"exact\")\n        EXACT,\n        \n        @JsonProperty(\"none\")\n        NONE\n    }\n\n    public static class ListArgumentBuilder {\n        private Integer limit;\n        private Integer offset;\n        private List<Filter> filters;\n        private List<String> order;\n        private List<String> select;\n        private Boolean distinct;\n        private Count count;\n        private Boolean excludeHomeProject;\n        private Boolean includeOldVersions;\n        private Boolean includeTrash;\n\n        ListArgumentBuilder() {\n        }\n\n        public ListArgumentBuilder limit(Integer limit) {\n            this.limit = limit;\n            return this;\n        }\n\n        public ListArgumentBuilder offset(Integer offset) {\n            this.offset = offset;\n            return this;\n        }\n\n        public ListArgumentBuilder filters(List<Filter> filters) {\n            this.filters = filters;\n            return this;\n        }\n\n        public ListArgumentBuilder order(List<String> order) {\n            this.order = order;\n            return this;\n        }\n\n        public ListArgumentBuilder select(List<String> select) {\n            this.select = select;\n            return this;\n        }\n\n        public ListArgumentBuilder distinct(Boolean distinct) {\n            this.distinct = distinct;\n            return this;\n        }\n\n        public ListArgumentBuilder count(Count count) {\n            this.count = count;\n            return this;\n        }\n\n        public ListArgument.ListArgumentBuilder excludeHomeProject(Boolean excludeHomeProject) {\n            this.excludeHomeProject = excludeHomeProject;\n            return this;\n        }\n\n        public ListArgument.ListArgumentBuilder includeOldVersions(Boolean includeOldVersions) {\n            this.includeOldVersions = includeOldVersions;\n            return this;\n        }\n\n        public ListArgument.ListArgumentBuilder includeTrash(Boolean includeTrash) {\n            this.includeTrash = includeTrash;\n            return this;\n        }\n\n        public ListArgument build() {\n            return new ListArgument(limit, offset, filters, order, select, distinct, count, excludeHomeProject, includeOldVersions, includeTrash);\n        }\n\n        public String toString() {\n            return \"ListArgument.ListArgumentBuilder(limit=\" + this.limit +\n                    \", offset=\" + this.offset + \", filters=\" + this.filters +\n                    \", order=\" + this.order + \", select=\" + this.select +\n                    \", distinct=\" + this.distinct + \", count=\" + this.count +\n                    \", excludeHomeProject=\" + this.excludeHomeProject +\n                    \", includeOldVersions=\" + this.includeOldVersions +\n                    \", includeTrash=\" + this.includeTrash +\n                    \")\";\n        }\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/api/model/argument/UntrashGroup.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.model.argument;\n\nimport com.fasterxml.jackson.annotation.JsonInclude;\nimport com.fasterxml.jackson.annotation.JsonProperty;\nimport com.fasterxml.jackson.annotation.JsonPropertyOrder;\n\n@JsonInclude(JsonInclude.Include.NON_NULL)\n@JsonPropertyOrder({ \"ensure_unique_name\" })\npublic class UntrashGroup extends Argument {\n\n    @JsonProperty(\"ensure_unique_name\")\n    private Boolean ensureUniqueName;\n\n    public Boolean getEnsureUniqueName() {\n        return this.ensureUniqueName;\n    }\n\n    public void setEnsureUniqueName(Boolean ensureUniqueName) {\n        this.ensureUniqueName = ensureUniqueName;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/common/Characters.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.common;\n\npublic final class Characters {\n\n    private Characters() {}\n\n    public static final String SPACE = \"\\\\040\";\n    public static final String NEW_LINE = \"\\n\";\n    public static final String SLASH = \"/\";\n    public static final String DOT = \".\";\n    public static final String COLON = \":\";\n    public static final String PERCENT = \"%\";\n    public static final String QUOTE = \"\\\"\";\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/common/Headers.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.common;\n\npublic final class Headers {\n\n    private Headers() {}\n    \n    public static final String X_KEEP_DESIRED_REPLICAS = \"X-Keep-Desired-Replicas\";\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/common/Patterns.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.common;\n\npublic final class Patterns {\n\n    public static final String HINT_PATTERN = \"^[A-Z][A-Za-z0-9@_-]+$\";\n    public static final String FILE_TOKEN_PATTERN = \"(\\\\d+:\\\\d+:\\\\S+)\";\n    public static final String LOCATOR_PATTERN = \"([0-9a-f]{32})\\\\+([0-9]+)(\\\\+[A-Z][-A-Za-z0-9@_]*)*\";\n    public static final String GROUP_UUID_PATTERN = \"[a-z0-9]{5}-j7d0g-[a-z0-9]{15}\";\n    public static final String USER_UUID_PATTERN = \"[a-z0-9]{5}-tpzed-[a-z0-9]{15}\";\n\n    private Patterns() {}\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/config/ConfigProvider.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.config;\n\nimport java.io.File;\n\npublic interface ConfigProvider {\n\n    //API\n    boolean isApiHostInsecure();\n\n    String getKeepWebHost();\n\n    int getKeepWebPort();\n\n    String getApiHost();\n\n    int getApiPort();\n\n    String getApiToken();\n\n    String getApiProtocol();\n\n    int getConnectTimeout();\n\n    int getReadTimeout();\n\n    int getWriteTimeout();\n\n    //FILE UPLOAD\n    int getFileSplitSize();\n\n    File getFileSplitDirectory();\n\n    int getNumberOfCopies();\n\n    int getNumberOfRetries();\n\n\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/config/ExternalConfigProvider.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.config;\n\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport java.io.File;\n\npublic class ExternalConfigProvider implements ConfigProvider {\n\n    private static final Logger log = LoggerFactory.getLogger(ExternalConfigProvider.class);\n    private static final int DEFAULT_CONNECTION_TIMEOUT = 60000;\n    private static final int DEFAULT_READ_TIMEOUT = 60000;\n    private static final int DEFAULT_WRITE_TIMEOUT = 60000;\n\n    private final boolean apiHostInsecure;\n    private final String keepWebHost;\n    private final int keepWebPort;\n    private final String apiHost;\n    private final int apiPort;\n    private final String apiToken;\n    private final String apiProtocol;\n    private final int fileSplitSize;\n    private final File fileSplitDirectory;\n    private final int numberOfCopies;\n    private final int numberOfRetries;\n    private final int connectTimeout;\n    private final int readTimeout;\n    private final int writeTimeout;\n\n    ExternalConfigProvider(boolean apiHostInsecure, String keepWebHost, int keepWebPort, String apiHost, int apiPort,\n                           String apiToken, String apiProtocol, int fileSplitSize, File fileSplitDirectory,\n                           int numberOfCopies, int numberOfRetries) {\n        this.apiHostInsecure = apiHostInsecure;\n        this.keepWebHost = keepWebHost;\n        this.keepWebPort = keepWebPort;\n        this.apiHost = apiHost;\n        this.apiPort = apiPort;\n        this.apiToken = apiToken;\n        this.apiProtocol = apiProtocol;\n        this.fileSplitSize = fileSplitSize;\n        this.fileSplitDirectory = fileSplitDirectory;\n        this.numberOfCopies = numberOfCopies;\n        this.numberOfRetries = numberOfRetries;\n        this.connectTimeout = DEFAULT_CONNECTION_TIMEOUT;\n        this.readTimeout = DEFAULT_READ_TIMEOUT;\n        this.writeTimeout = DEFAULT_WRITE_TIMEOUT;\n    }\n\n    ExternalConfigProvider(boolean apiHostInsecure, String keepWebHost, int keepWebPort, String apiHost, int apiPort,\n                           String apiToken, String apiProtocol, int fileSplitSize, File fileSplitDirectory,\n                           int numberOfCopies, int numberOfRetries,\n                           int connectTimeout, int readTimeout, int writeTimeout) {\n        this.apiHostInsecure = apiHostInsecure;\n        this.keepWebHost = keepWebHost;\n        this.keepWebPort = keepWebPort;\n        this.apiHost = apiHost;\n        this.apiPort = apiPort;\n        this.apiToken = apiToken;\n        this.apiProtocol = apiProtocol;\n        this.fileSplitSize = fileSplitSize;\n        this.fileSplitDirectory = fileSplitDirectory;\n        this.numberOfCopies = numberOfCopies;\n        this.numberOfRetries = numberOfRetries;\n        this.connectTimeout = connectTimeout;\n        this.readTimeout = readTimeout;\n        this.writeTimeout = writeTimeout;\n    }\n\n    public static ExternalConfigProviderBuilder builder() {\n        return new ExternalConfigProviderBuilder();\n    }\n\n    @Override\n    public String toString() {\n        return \"ExternalConfigProvider{\" +\n               \"apiHostInsecure=\" + apiHostInsecure +\n               \", keepWebHost='\" + keepWebHost + '\\'' +\n               \", keepWebPort=\" + keepWebPort +\n               \", apiHost='\" + apiHost + '\\'' +\n               \", apiPort=\" + apiPort +\n               \", apiToken='\" + apiToken + '\\'' +\n               \", apiProtocol='\" + apiProtocol + '\\'' +\n               \", fileSplitSize=\" + fileSplitSize +\n               \", fileSplitDirectory=\" + fileSplitDirectory +\n               \", numberOfCopies=\" + numberOfCopies +\n               \", numberOfRetries=\" + numberOfRetries +\n               '}';\n    }\n\n    public boolean isApiHostInsecure() {\n        return this.apiHostInsecure;\n    }\n\n    public String getKeepWebHost() {\n        return this.keepWebHost;\n    }\n\n    public int getKeepWebPort() {\n        return this.keepWebPort;\n    }\n\n    public String getApiHost() {\n        return this.apiHost;\n    }\n\n    public int getApiPort() {\n        return this.apiPort;\n    }\n\n    public String getApiToken() {\n        return this.apiToken;\n    }\n\n    public String getApiProtocol() {\n        return this.apiProtocol;\n    }\n\n    public int getFileSplitSize() {\n        return this.fileSplitSize;\n    }\n\n    public File getFileSplitDirectory() {\n        return this.fileSplitDirectory;\n    }\n\n    public int getNumberOfCopies() {\n        return this.numberOfCopies;\n    }\n\n    public int getNumberOfRetries() {\n        return this.numberOfRetries;\n    }\n\n    public int getConnectTimeout() {\n        return this.connectTimeout;\n    }\n\n    public int getReadTimeout() {\n        return this.readTimeout;\n    }\n\n    public int getWriteTimeout() {\n        return this.writeTimeout;\n    }\n\n    public static class ExternalConfigProviderBuilder {\n        private boolean apiHostInsecure;\n        private String keepWebHost;\n        private int keepWebPort;\n        private String apiHost;\n        private int apiPort;\n        private String apiToken;\n        private String apiProtocol;\n        private int fileSplitSize;\n        private File fileSplitDirectory;\n        private int numberOfCopies;\n        private int numberOfRetries;\n        private int connectTimeout = DEFAULT_CONNECTION_TIMEOUT;\n        private int readTimeout = DEFAULT_READ_TIMEOUT;\n        private int writeTimeout = DEFAULT_WRITE_TIMEOUT;\n        private boolean autoFetchWebDAV = true;\n\n        ExternalConfigProviderBuilder() {\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder apiHostInsecure(boolean apiHostInsecure) {\n            this.apiHostInsecure = apiHostInsecure;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder keepWebHost(String keepWebHost) {\n            this.keepWebHost = keepWebHost;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder keepWebPort(int keepWebPort) {\n            this.keepWebPort = keepWebPort;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder apiHost(String apiHost) {\n            this.apiHost = apiHost;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder apiPort(int apiPort) {\n            this.apiPort = apiPort;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder apiToken(String apiToken) {\n            this.apiToken = apiToken;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder apiProtocol(String apiProtocol) {\n            this.apiProtocol = apiProtocol;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder fileSplitSize(int fileSplitSize) {\n            this.fileSplitSize = fileSplitSize;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder fileSplitDirectory(File fileSplitDirectory) {\n            this.fileSplitDirectory = fileSplitDirectory;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder numberOfCopies(int numberOfCopies) {\n            this.numberOfCopies = numberOfCopies;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder numberOfRetries(int numberOfRetries) {\n            this.numberOfRetries = numberOfRetries;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder connectTimeout(int connectTimeout) {\n            this.connectTimeout = connectTimeout;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder readTimeout(int readTimeout) {\n            this.readTimeout = readTimeout;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder writeTimeout(int writeTimeout) {\n            this.writeTimeout = writeTimeout;\n            return this;\n        }\n\n        public ExternalConfigProvider.ExternalConfigProviderBuilder autoFetchWebDAV(boolean autoFetchWebDAV) {\n            this.autoFetchWebDAV = autoFetchWebDAV;\n            return this;\n        }\n\n        public ExternalConfigProvider build() {\n            if (shouldAutoFetchWebDAV()) {\n                autoFetchWebDAVConfiguration();\n            }\n\n            validateWebDAVConfiguration();\n\n            return new ExternalConfigProvider(\n                    apiHostInsecure, keepWebHost, keepWebPort, apiHost,\n                    apiPort, apiToken, apiProtocol, fileSplitSize, fileSplitDirectory,\n                    numberOfCopies, numberOfRetries, connectTimeout, readTimeout, writeTimeout\n            );\n        }\n\n        private boolean shouldAutoFetchWebDAV() {\n            return autoFetchWebDAV &&\n                   (keepWebHost == null || keepWebHost.isEmpty());\n        }\n\n        private void autoFetchWebDAVConfiguration() {\n            WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher(\n                    apiProtocol, apiHost, apiPort, apiHostInsecure\n            );\n\n            WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch();\n\n            if (config != null) {\n                keepWebHost = config.getHost();\n                keepWebPort = config.getPort();\n            }\n        }\n\n        private void validateWebDAVConfiguration() {\n            if (keepWebHost == null || keepWebHost.isEmpty()) {\n                log.warn(\"WebDAV host is not configured. File operations may not work properly. Consider providing keepWebHost/keepWebPort or ensuring the Arvados API config endpoint is accessible.\");\n            }\n        }\n\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/config/FileConfigProvider.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.config;\n\nimport com.typesafe.config.Config;\nimport com.typesafe.config.ConfigFactory;\n\nimport java.io.File;\n\npublic class FileConfigProvider implements ConfigProvider {\n\n    private static final String DEFAULT_PATH = \"arvados\";\n    private final Config config;\n\n    public FileConfigProvider() {\n        config = ConfigFactory.load().getConfig(DEFAULT_PATH);\n    }\n\n    public FileConfigProvider(final String configFile) {\n        config = (configFile != null) ?\n                ConfigFactory.load(configFile).getConfig(DEFAULT_PATH) : ConfigFactory.load().getConfig(DEFAULT_PATH);\n    }\n\n    public Config getConfig() {\n        return config;\n    }\n\n    private File getFile(String path) {\n        return new File(config.getString(path));\n    }\n\n    private int getInt(String path) {\n        return config.getInt(path);\n    }\n\n    private boolean getBoolean(String path) {\n        return config.getBoolean(path);\n    }\n\n    private String getString(String path) {\n        return config.getString(path);\n    }\n\n    @Override\n    public boolean isApiHostInsecure() {\n        return this.getBoolean(\"api.host-insecure\");\n    }\n\n    @Override\n    public String getKeepWebHost() {\n        return this.getString(\"api.keepweb-host\");\n    }\n\n    @Override\n    public int getKeepWebPort() {\n        return this.getInt(\"api.keepweb-port\");\n    }\n\n    @Override\n    public String getApiHost() {\n        return this.getString(\"api.host\");\n    }\n\n    @Override\n    public int getApiPort() {\n        return this.getInt(\"api.port\");\n    }\n\n    @Override\n    public String getApiToken() {\n        return this.getString(\"api.token\");\n    }\n\n    @Override\n    public String getApiProtocol() {\n        return this.getString(\"api.protocol\");\n    }\n\n    @Override\n    public int getFileSplitSize() {\n        return this.getInt(\"split-size\");\n    }\n\n    @Override\n    public File getFileSplitDirectory() {\n        return this.getFile(\"temp-dir\");\n    }\n\n    @Override\n    public int getNumberOfCopies() {\n        return this.getInt(\"copies\");\n    }\n\n    @Override\n    public int getNumberOfRetries() {\n        return this.getInt(\"retries\");\n    }\n\n    public String getIntegrationTestProjectUuid() {\n        return this.getString(\"integration-tests.project-uuid\");\n    }\n\n    @Override\n    public int getConnectTimeout() {\n        return this.getInt(\"connectTimeout\");\n    }\n\n    @Override\n    public int getReadTimeout() {\n        return this.getInt(\"readTimeout\");\n    }\n\n    @Override\n    public int getWriteTimeout() {\n        return this.getInt(\"writeTimeout\");\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/config/WebDAVConfigFetcher.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.config;\n\nimport org.arvados.client.api.client.ConfigApiClient;\nimport org.arvados.client.api.model.ArvadosConfig;\nimport org.arvados.client.exception.ArvadosApiException;\nimport org.slf4j.Logger;\nimport org.slf4j.LoggerFactory;\n\nimport java.net.MalformedURLException;\nimport java.net.URL;\n\npublic class WebDAVConfigFetcher {\n    \n    private static final Logger log = LoggerFactory.getLogger(WebDAVConfigFetcher.class);\n    private static final int DEFAULT_HTTPS_PORT = 443;\n    private static final int DEFAULT_HTTP_PORT = 80;\n    \n    private final String apiProtocol;\n    private final String apiHost;\n    private final int apiPort;\n    private final boolean apiHostInsecure;\n    \n    public WebDAVConfigFetcher(String apiProtocol, String apiHost, int apiPort, boolean apiHostInsecure) {\n        this.apiProtocol = apiProtocol != null ? apiProtocol : \"https\";\n        this.apiHost = apiHost;\n        this.apiPort = apiPort > 0 ? apiPort : (this.apiProtocol.equals(\"https\") ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT);\n        this.apiHostInsecure = apiHostInsecure;\n    }\n    \n    public WebDAVConfig fetch() {\n        if (!isConfigured()) {\n            log.debug(\"API host not configured, skipping WebDAV auto-fetch\");\n            return null;\n        }\n        \n        try {\n            log.info(\"Attempting to auto-fetch WebDAV configuration from Arvados API\");\n            \n            ArvadosConfig config = fetchArvadosConfig();\n            String webDavUrl = extractWebDAVUrl(config);\n            \n            if (webDavUrl == null) {\n                log.debug(\"No WebDAV URL found in Arvados config\");\n                return null;\n            }\n            \n            return parseWebDAVUrl(webDavUrl);\n            \n        } catch (ArvadosApiException e) {\n            log.warn(\"Failed to auto-fetch WebDAV configuration: {}. \" +\n                \"You may need to configure keepWebHost and keepWebPort manually.\", \n                e.getMessage());\n        } catch (Exception e) {\n            log.warn(\"Unexpected error while auto-fetching WebDAV configuration: {}. \" +\n                \"You may need to configure keepWebHost and keepWebPort manually.\", \n                e.getMessage());\n        }\n        \n        return null;\n    }\n    \n    private boolean isConfigured() {\n        return apiHost != null && !apiHost.isEmpty();\n    }\n    \n    private ArvadosConfig fetchArvadosConfig() throws ArvadosApiException {\n        ConfigApiClient configClient = new ConfigApiClient(\n            apiProtocol, apiHost, apiPort, apiHostInsecure\n        );\n        return configClient.fetchConfig();\n    }\n    \n    private String extractWebDAVUrl(ArvadosConfig config) {\n        if (config == null || config.getServices() == null) {\n            return null;\n        }\n        \n        ArvadosConfig.WebDAVDownload webDav = config.getServices().getWebDAVDownload();\n        if (webDav == null) {\n            return null;\n        }\n        \n        return webDav.getExternalURL();\n    }\n    \n    private WebDAVConfig parseWebDAVUrl(String webDavUrl) {\n        if (webDavUrl == null || webDavUrl.isEmpty()) {\n            return null;\n        }\n        \n        try {\n            URL url = new URL(webDavUrl);\n            String host = url.getHost();\n            int port = url.getPort();\n            \n            // Use default port based on protocol if not specified\n            if (port == -1) {\n                port = \"https\".equals(url.getProtocol()) ? DEFAULT_HTTPS_PORT : DEFAULT_HTTP_PORT;\n            }\n            \n            log.info(\"Successfully auto-configured WebDAV: host={}, port={}\", host, port);\n            return new WebDAVConfig(host, port);\n            \n        } catch (MalformedURLException e) {\n            log.warn(\"Failed to parse WebDAV URL '{}': {}\", webDavUrl, e.getMessage());\n            return null;\n        }\n    }\n    \n    public static class WebDAVConfig {\n        private final String host;\n        private final int port;\n        \n        public WebDAVConfig(String host, int port) {\n            this.host = host;\n            this.port = port;\n        }\n        \n        public String getHost() {\n            return host;\n        }\n        \n        public int getPort() {\n            return port;\n        }\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/exception/ArvadosApiException.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.exception;\n\npublic class ArvadosApiException extends ArvadosClientException {\n\n    private static final long serialVersionUID = 1L;\n\n    public ArvadosApiException(String message) {\n        super(message);\n    }\n    \n    public ArvadosApiException(String message, Throwable cause) {\n        super(message, cause);\n    }\n    \n    public ArvadosApiException(Throwable cause) {\n        super(cause);\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/exception/ArvadosClientException.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.exception;\n\n/**\n * Parent exception for all exceptions in library.\n * More specific exceptions like ArvadosApiException extend this class.\n */\npublic class ArvadosClientException extends RuntimeException {\n\n    public ArvadosClientException(String message) {\n        super(message);\n    }\n\n    public ArvadosClientException(String message, Throwable cause) {\n        super(message, cause);\n    }\n\n    public ArvadosClientException(Throwable cause) {\n        super(cause);\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/facade/ArvadosFacade.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.facade;\n\nimport com.google.common.collect.Lists;\nimport org.arvados.client.api.client.CollectionsApiClient;\nimport org.arvados.client.api.client.GroupsApiClient;\nimport org.arvados.client.api.client.KeepWebApiClient;\nimport org.arvados.client.api.client.UsersApiClient;\nimport org.arvados.client.api.model.*;\nimport org.arvados.client.api.model.argument.Filter;\nimport org.arvados.client.api.model.argument.ListArgument;\nimport org.arvados.client.config.FileConfigProvider;\nimport org.arvados.client.config.ConfigProvider;\nimport org.arvados.client.logic.collection.FileToken;\nimport org.arvados.client.logic.collection.ManifestDecoder;\nimport org.arvados.client.logic.keep.FileDownloader;\nimport org.arvados.client.logic.keep.FileUploader;\nimport org.slf4j.Logger;\n\nimport java.io.File;\nimport java.util.Arrays;\nimport java.util.Collections;\nimport java.util.List;\nimport java.util.Map;\n\npublic class ArvadosFacade {\n\n    private final ConfigProvider config;\n    private final Logger log = org.slf4j.LoggerFactory.getLogger(ArvadosFacade.class);\n    private CollectionsApiClient collectionsApiClient;\n    private GroupsApiClient groupsApiClient;\n    private UsersApiClient usersApiClient;\n    private FileDownloader fileDownloader;\n    private FileUploader fileUploader;\n    private static final String PROJECT = \"project\";\n    private static final String SUBPROJECT = \"sub-project\";\n\n    public ArvadosFacade(ConfigProvider config) {\n        this.config = config;\n        setFacadeFields();\n    }\n\n    public ArvadosFacade() {\n        this.config = new FileConfigProvider();\n        setFacadeFields();\n    }\n\n    private void setFacadeFields() {\n        collectionsApiClient = new CollectionsApiClient(config);\n        groupsApiClient = new GroupsApiClient(config);\n        usersApiClient = new UsersApiClient(config);\n        ManifestDecoder manifestDecoder = new ManifestDecoder();\n        KeepWebApiClient keepWebApiClient = new KeepWebApiClient(config);\n        fileDownloader = new FileDownloader(manifestDecoder, collectionsApiClient, keepWebApiClient);\n        fileUploader = new FileUploader(keepWebApiClient, collectionsApiClient, config);\n    }\n\n    /**\n     * This method downloads single file from collection using Arvados Keep-Web.\n     * File is saved on a drive in specified location and returned.\n     *\n     * @param filePathName         path to the file in collection. If requested file is stored\n     *                             directly in collection (not within its subdirectory) this\n     *                             would be just the name of file (ex. 'file.txt').\n     *                             Otherwise full file path must be passed (ex. 'folder/file.txt')\n     * @param collectionUuid       uuid of collection containing requested file\n     * @param pathToDownloadFolder path to location in which file should be saved.\n     *                             Passed location must be a directory in which file of\n     *                             that name does not already exist.\n     * @return downloaded file\n     */\n    public File downloadFile(String filePathName, String collectionUuid, String pathToDownloadFolder) {\n        return fileDownloader.downloadSingleFileUsingKeepWeb(filePathName, collectionUuid, pathToDownloadFolder);\n    }\n\n    /**\n     * This method downloads all files from collection.\n     * Directory named by collection uuid is created in specified location,\n     * files are saved on a drive in this directory and list with downloaded\n     * files is returned.\n     *\n     * @param collectionUuid       uuid of collection from which files are downloaded\n     * @param pathToDownloadFolder path to location in which files should be saved.\n     *                             New folder named by collection uuid, containing\n     *                             downloaded files, is created in this location.\n     *                             Passed location must be a directory in which folder\n     *                             of that name does not already exist.\n     * @param usingKeepWeb         if set to true files will be downloaded using Keep Web.\n     *                             If set to false files will be downloaded using Keep Server API.\n     * @return list containing downloaded files\n     */\n    public List<File> downloadCollectionFiles(String collectionUuid, String pathToDownloadFolder, boolean usingKeepWeb) {\n        if (usingKeepWeb)\n            return fileDownloader.downloadFilesFromCollectionUsingKeepWeb(collectionUuid, pathToDownloadFolder);\n        return fileDownloader.downloadFilesFromCollection(collectionUuid, pathToDownloadFolder);\n    }\n\n    /**\n     * Lists all FileTokens (objects containing information about files) for\n     * specified collection.\n     * Information in each FileToken includes file path, name, size and position\n     * in data stream\n     *\n     * @param collectionUuid uuid of collection for which FileTokens are listed\n     * @return list containing FileTokens for each file in specified collection\n     */\n    public List<FileToken> listFileInfoFromCollection(String collectionUuid) {\n        return fileDownloader.listFileInfoFromCollection(collectionUuid);\n    }\n\n    /**\n     * Creates and uploads new collection containing passed files.\n     * Created collection has a default name and is uploaded to user's 'Home' project.\n     *\n     * @see ArvadosFacade#upload(List, String, String)\n     * @param files    list of files to be uploaded within new collection\n     * @return collection object mapped from JSON that is returned from server after successful upload\n     */\n    public Collection upload(List<File> files) {\n        return upload(files, null, null);\n    }\n\n    /**\n     * Creates and uploads new collection containing a single file.\n     * Created collection has a default name and is uploaded to user's 'Home' project.\n     *\n     * @see ArvadosFacade#upload(List, String, String)\n     * @param file file to be uploaded\n     * @return collection object mapped from JSON that is returned from server after successful upload\n     */\n    public Collection upload(File file) {\n        return upload(Collections.singletonList(file), null, null);\n    }\n\n    /**\n     * Uploads new collection with specified name and containing selected files\n     * to an existing project.\n     *\n     * @param sourceFiles    list of files to be uploaded within new collection\n     * @param collectionName name for the newly created collection.\n     *                       Collection with that name cannot be already created\n     *                       in specified project. If null is passed\n     *                       then collection name is set to default, containing\n     *                       phrase 'New Collection' and a timestamp.\n     * @param projectUuid    uuid of the project in which created collection is to be included.\n     *                       If null is passed then collection is uploaded to user's 'Home' project.\n     * @return collection object mapped from JSON that is returned from server after successful upload\n     */\n    public Collection upload(List<File> sourceFiles, String collectionName, String projectUuid) {\n        return fileUploader.upload(sourceFiles, collectionName, projectUuid);\n    }\n\n    /**\n     * Uploads a file to a specified collection.\n     *\n     * @see ArvadosFacade#uploadToExistingCollection(List, String)\n     * @param file           file to be uploaded to existing collection. Filenames must be unique\n     *                       in comparison with files already existing within collection.\n     * @param collectionUUID UUID of collection to which files should be uploaded\n     * @return collection object mapped from JSON that is returned from server after successful upload\n     */\n    public Collection uploadToExistingCollection(File file, String collectionUUID) {\n        return fileUploader.uploadToExistingCollection(Collections.singletonList(file), collectionUUID);\n    }\n\n    /**\n     * Uploads multiple files to an existing collection.\n     *\n     * @param files          list of files to be uploaded to existing collection.\n     *                       File names must be unique - both within passed list and\n     *                       in comparison with files already existing within collection.\n     * @param collectionUUID UUID of collection to which files should be uploaded\n     * @return collection object mapped from JSON that is returned from server after successful upload\n     */\n    public Collection uploadToExistingCollection(List<File> files, String collectionUUID) {\n        return fileUploader.uploadToExistingCollection(files, collectionUUID);\n    }\n\n    /**\n     * Creates and uploads new empty collection to specified project.\n     *\n     * @param collectionName name for the newly created collection.\n     *                       Collection with that name cannot be already created\n     *                       in specified project.\n     * @param projectUuid    uuid of project that will contain uploaded empty collection.\n     *                       To select home project pass current user's uuid from getCurrentUser()\n     * @return collection object mapped from JSON that is returned from server after successful upload\n     * @see ArvadosFacade#getCurrentUser()\n     */\n    public Collection createEmptyCollection(String collectionName, String projectUuid) {\n        Collection collection = new Collection();\n        collection.setOwnerUuid(projectUuid);\n        collection.setName(collectionName);\n        return collectionsApiClient.create(collection);\n    }\n\n    /**\n     * Uploads multiple files to an existing collection.\n     *\n     * @param collectionUUID UUID of collection to which the files are to be copied\n     * @param files          map of files to be copied to existing collection.\n     *                       The map consists of a pair in the form of a filename and a filename\n     *                       along with the Portable data hash\n     * @return collection object mapped from JSON that is returned from server after successful copied\n     */\n    public Collection updateWithReplaceFiles(String collectionUUID, Map<String, String> files) {\n        CollectionReplaceFiles replaceFilesRequest = new CollectionReplaceFiles();\n        replaceFilesRequest.getReplaceFiles().putAll(files);\n        return collectionsApiClient.update(collectionUUID, replaceFilesRequest);\n    }\n\n    /**\n     * Returns current user information based on Api Token provided via configuration\n     *\n     * @return user object mapped from JSON that is returned from server based on provided Api Token.\n     * It contains information about user who has this token assigned.\n     */\n    public User getCurrentUser() {\n        return usersApiClient.current();\n    }\n\n    /**\n     * Gets uuid of current user based on api Token provided in configuration and uses it to list all\n     * projects that this user owns in Arvados.\n     *\n     * @return GroupList containing all groups that current user is owner of.\n     * @see ArvadosFacade#getCurrentUser()\n     */\n    public GroupList showGroupsOwnedByCurrentUser() {\n        ListArgument listArgument = ListArgument.builder()\n                .filters(Arrays.asList(\n                        Filter.of(\"owner_uuid\", Filter.Operator.LIKE, getCurrentUser().getUuid()),\n                        Filter.of(\"group_class\", Filter.Operator.IN, Lists.newArrayList(PROJECT, SUBPROJECT)\n                        )))\n                .build();\n        GroupList groupList = groupsApiClient.list(listArgument);\n        log.debug(\"Groups owned by user:\");\n        groupList.getItems().forEach(m -> log.debug(m.getUuid() + \" -- \" + m.getName()));\n\n        return groupList;\n    }\n\n    /**\n     * Gets uuid of current user based on api Token provided in configuration and uses it to list all\n     * projects that this user has read access to in Arvados.\n     *\n     * @return GroupList containing all groups that current user has read access to.\n     */\n    public GroupList showGroupsAccessibleByCurrentUser() {\n        ListArgument listArgument = ListArgument.builder()\n                .filters(Collections.singletonList(\n                        Filter.of(\"group_class\", Filter.Operator.IN, Lists.newArrayList(PROJECT, SUBPROJECT)\n                        )))\n                .build();\n        GroupList groupList = groupsApiClient.list(listArgument);\n        log.debug(\"Groups accessible by user:\");\n        groupList.getItems().forEach(m -> log.debug(m.getUuid() + \" -- \" + m.getName()));\n\n        return groupList;\n    }\n\n    /**\n     * Filters all collections from selected project and returns list of those that contain passed String in their name.\n     * Operator \"LIKE\" is used so in order to obtain certain collection it is sufficient to pass just part of its name.\n     * Returned collections in collectionList are ordered by date of creation (starting from oldest one).\n     *\n     * @param collectionName collections containing this param in their name will be returned.\n     *                       Passing a wildcard is possible - for example passing \"a%\" searches for\n     *                       all collections starting with \"a\".\n     * @param projectUuid    uuid of project in which will be searched for collections with given name. To search home\n     *                       project provide user uuid (from getCurrentUser())\n     * @return object CollectionList containing all collections matching specified name criteria\n     * @see ArvadosFacade#getCurrentUser()\n     */\n    public CollectionList getCollectionsFromProjectByName(String collectionName, String projectUuid) {\n        ListArgument listArgument = ListArgument.builder()\n                .filters(Arrays.asList(\n                        Filter.of(\"owner_uuid\", Filter.Operator.LIKE, projectUuid),\n                        Filter.of(\"name\", Filter.Operator.LIKE, collectionName)\n                ))\n                .order(Collections.singletonList(\"created_at\"))\n                .build();\n\n        return collectionsApiClient.list(listArgument);\n    }\n\n    /**\n     * Gets project details by uuid.\n     *\n     * @param projectUuid uuid of project\n     * @return Group object containing information about project\n     */\n    public Group getProjectByUuid(String projectUuid) {\n        Group project = groupsApiClient.get(projectUuid);\n        log.debug(\"Retrieved \" + project.getName() + \" with UUID: \" + project.getUuid());\n        return project;\n    }\n\n    /**\n     * Creates new project that will be a subproject of \"home\" for current user.\n     *\n     * @param projectName name for the newly created project\n     * @return Group object containing information about created project\n     * (mapped from JSON returned from server after creating the project)\n     */\n    public Group createNewProject(String projectName) {\n        Group project = new Group();\n        project.setName(projectName);\n        project.setGroupClass(PROJECT);\n        Group createdProject = groupsApiClient.create(project);\n        log.debug(\"Project \" + createdProject.getName() + \" created with UUID: \" + createdProject.getUuid());\n        return createdProject;\n    }\n\n\n    /**\n     * Creates new project that will be a subproject of \"home\" for the specified owner.\n     *\n     * @param ownerUuid uuid of owner for subproject\n     * @param projectName name for the newly created subproject\n     * @return Group object containing information about created project\n     * (mapped from JSON returned from server after creating the project)\n     */\n    public Group createNewSubProject(String ownerUuid, String projectName) {\n        Group project = new Group();\n        project.setName(projectName);\n        project.setGroupClass(PROJECT);\n        project.setOwnerUuid(ownerUuid);\n        Group createdProject = groupsApiClient.create(project);\n        this.log.debug(\"Project \" + createdProject.getName() + \" created with UUID: \" + createdProject.getUuid());\n        return createdProject;\n    }\n\n    /**\n     * Deletes collection with specified uuid.\n     *\n     * @param collectionUuid uuid of collection to be deleted. User whose token is provided in configuration\n     *                       must be authorized to delete such collection.\n     * @return collection object with deleted collection (mapped from JSON returned from server after deleting the collection)\n     */\n    public Collection deleteCollection(String collectionUuid) {\n        Collection deletedCollection = collectionsApiClient.delete(collectionUuid);\n        log.debug(\"Collection: \" + collectionUuid + \" deleted.\");\n        return deletedCollection;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/collection/CollectionFactory.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.collection;\n\nimport org.arvados.client.api.client.GroupsApiClient;\nimport org.arvados.client.api.client.UsersApiClient;\nimport org.arvados.client.exception.ArvadosApiException;\nimport org.arvados.client.api.model.Collection;\nimport org.arvados.client.common.Patterns;\nimport org.arvados.client.config.FileConfigProvider;\nimport org.arvados.client.config.ConfigProvider;\nimport org.arvados.client.exception.ArvadosClientException;\n\nimport java.time.LocalDateTime;\nimport java.time.format.DateTimeFormatter;\nimport java.util.Optional;\n\npublic class CollectionFactory {\n\n    private ConfigProvider config;\n    private UsersApiClient usersApiClient;\n    private GroupsApiClient groupsApiClient;\n\n    private final String name;\n    private final String projectUuid;\n\n    private CollectionFactory(ConfigProvider config, String name, String projectUuid) {\n        this.name = name;\n        this.projectUuid = projectUuid;\n        this.config = config;\n        setApiClients();\n    }\n\n    public static CollectionFactoryBuilder builder() {\n        return new CollectionFactoryBuilder();\n    }\n\n    private void setApiClients() {\n        if(this.config == null) this.config = new FileConfigProvider();\n\n        this.usersApiClient = new UsersApiClient(config);\n        this.groupsApiClient = new GroupsApiClient(config);\n    }\n\n    public Collection create() {\n        Collection newCollection = new Collection();\n        newCollection.setName(getNameOrDefault(name));\n        newCollection.setOwnerUuid(getDesiredProjectUuid(projectUuid));\n\n        return newCollection;\n    }\n\n    private String getNameOrDefault(String name) {\n        return Optional.ofNullable(name).orElseGet(() -> {\n            LocalDateTime dateTime = LocalDateTime.now();\n            DateTimeFormatter formatter = DateTimeFormatter.ofPattern(\"Y-MM-dd HH:mm:ss.SSS\");\n            return String.format(\"New Collection (%s)\", dateTime.format(formatter));\n        });\n    }\n\n    public String getDesiredProjectUuid(String projectUuid) {\n        try {\n            if (projectUuid == null || projectUuid.length() == 0){\n                return usersApiClient.current().getUuid();\n            } else if (projectUuid.matches(Patterns.USER_UUID_PATTERN)) {\n                return usersApiClient.get(projectUuid).getUuid();\n            } else if (projectUuid.matches(Patterns.GROUP_UUID_PATTERN)) {\n                return groupsApiClient.get(projectUuid).getUuid();\n            }\n        } catch (ArvadosApiException e) {\n            throw new ArvadosClientException(String.format(\"An error occurred while getting project by UUID %s\", projectUuid));\n        }\n        throw new ArvadosClientException(String.format(\"No project with %s UUID found\", projectUuid));\n    }\n\n    public static class CollectionFactoryBuilder {\n        private ConfigProvider config;\n        private String name;\n        private String projectUuid;\n\n        CollectionFactoryBuilder() {\n        }\n\n        public CollectionFactoryBuilder config(ConfigProvider config) {\n            this.config = config;\n            return this;\n        }\n\n        public CollectionFactoryBuilder name(String name) {\n            this.name = name;\n            return this;\n        }\n\n        public CollectionFactoryBuilder projectUuid(String projectUuid) {\n            this.projectUuid = projectUuid;\n            return this;\n        }\n\n        public CollectionFactory build() {\n            return new CollectionFactory(config, name, projectUuid);\n        }\n\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/collection/FileToken.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.collection;\n\nimport com.google.common.base.Strings;\nimport org.arvados.client.common.Characters;\n\npublic class FileToken {\n\n    private long filePosition;\n    private long fileSize;\n    private String fileName;\n    private String path;\n\n    public FileToken(String fileTokenInfo) {\n        splitFileTokenInfo(fileTokenInfo);\n    }\n\n    public FileToken(String fileTokenInfo, String path) {\n        splitFileTokenInfo(fileTokenInfo);\n        this.path = path;\n    }\n\n    private void splitFileTokenInfo(String fileTokenInfo) {\n        String[] tokenPieces = fileTokenInfo.split(\":\");\n        this.filePosition = Long.parseLong(tokenPieces[0]);\n        this.fileSize = Long.parseLong(tokenPieces[1]);\n        this.fileName = tokenPieces[2].replace(Characters.SPACE, \" \");\n    }\n\n    @Override\n    public String toString() {\n        return filePosition + \":\" + fileSize + \":\" + fileName;\n    }\n\n    public String getFullPath() {\n        return Strings.isNullOrEmpty(path) ? fileName : path + fileName;\n    }\n\n    public long getFilePosition() {\n        return this.filePosition;\n    }\n\n    public long getFileSize() {\n        return this.fileSize;\n    }\n\n    public String getFileName() {\n        return this.fileName;\n    }\n\n    public String getPath() {\n        return this.path;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/collection/ManifestDecoder.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.collection;\n\nimport org.arvados.client.common.Characters;\nimport org.arvados.client.exception.ArvadosClientException;\nimport org.arvados.client.logic.keep.KeepLocator;\n\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.LinkedList;\nimport java.util.List;\nimport java.util.Objects;\n\nimport static java.util.stream.Collectors.toList;\nimport static org.arvados.client.common.Patterns.FILE_TOKEN_PATTERN;\nimport static org.arvados.client.common.Patterns.LOCATOR_PATTERN;\n\npublic class ManifestDecoder {\n\n    public List<ManifestStream> decode(String manifestText) {\n\n        if (manifestText == null || manifestText.isEmpty()) {\n            throw new ArvadosClientException(\"Manifest text cannot be empty.\");\n        }\n\n        List<String> manifestStreams = new ArrayList<>(Arrays.asList(manifestText.split(\"\\\\n\")));\n        if (!manifestStreams.get(0).startsWith(\". \")) {\n            throw new ArvadosClientException(\"Invalid first path component (expecting \\\".\\\")\");\n        }\n\n        return manifestStreams.stream()\n                .map(this::decodeSingleManifestStream)\n                .collect(toList());\n    }\n\n    private ManifestStream decodeSingleManifestStream(String manifestStream) {\n        Objects.requireNonNull(manifestStream, \"Manifest stream cannot be empty.\");\n\n        LinkedList<String> manifestPieces = new LinkedList<>(Arrays.asList(manifestStream.split(\"\\\\s+\")));\n        String streamName = manifestPieces.poll();\n        String path = \".\".equals(streamName) ? \"\" : streamName.substring(2).concat(Characters.SLASH);\n\n        List<KeepLocator> keepLocators = manifestPieces\n                .stream()\n                .filter(p -> p.matches(LOCATOR_PATTERN))\n                .map(this::getKeepLocator)\n                .collect(toList());\n\n\n        List<FileToken> fileTokens = manifestPieces.stream()\n                .skip(keepLocators.size())\n                .filter(p -> p.matches(FILE_TOKEN_PATTERN))\n                .map(p -> new FileToken(p, path))\n                .collect(toList());\n\n        return new ManifestStream(streamName, keepLocators, fileTokens);\n\n    }\n\n    private KeepLocator getKeepLocator(String locatorString ) {\n        try {\n            return new KeepLocator(locatorString);\n        } catch (Exception e) {\n            throw new RuntimeException(e);\n        }\n    }\n\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/collection/ManifestFactory.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.collection;\n\nimport com.google.common.collect.ImmutableList;\nimport org.arvados.client.common.Characters;\n\nimport java.io.File;\nimport java.util.Collection;\nimport java.util.List;\nimport java.util.stream.Collectors;\n\npublic class ManifestFactory {\n\n    private Collection<File> files;\n    private List<String> locators;\n\n    ManifestFactory(Collection<File> files, List<String> locators) {\n        this.files = files;\n        this.locators = locators;\n    }\n\n    public static ManifestFactoryBuilder builder() {\n        return new ManifestFactoryBuilder();\n    }\n\n    public String create() {\n        ImmutableList.Builder<String> builder = new ImmutableList.Builder<String>()\n                .add(Characters.DOT)\n                .addAll(locators);\n        long filePosition = 0;\n        for (File file : files) {\n            builder.add(String.format(\"%d:%d:%s\", filePosition, file.length(), file.getName().replace(\" \", Characters.SPACE)));\n            filePosition += file.length();\n        }\n        String manifest = builder.build().stream().collect(Collectors.joining(\" \")).concat(Characters.NEW_LINE);\n        return manifest;\n    }\n\n    public static class ManifestFactoryBuilder {\n        private Collection<File> files;\n        private List<String> locators;\n\n        ManifestFactoryBuilder() {\n        }\n\n        public ManifestFactory.ManifestFactoryBuilder files(Collection<File> files) {\n            this.files = files;\n            return this;\n        }\n\n        public ManifestFactory.ManifestFactoryBuilder locators(List<String> locators) {\n            this.locators = locators;\n            return this;\n        }\n\n        public ManifestFactory build() {\n            return new ManifestFactory(files, locators);\n        }\n\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/collection/ManifestStream.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.collection;\n\nimport org.arvados.client.logic.keep.KeepLocator;\n\nimport java.util.List;\nimport java.util.stream.Collectors;\nimport java.util.stream.Stream;\n\npublic class ManifestStream {\n\n    private String streamName;\n    private List<KeepLocator> keepLocators;\n    private List<FileToken> fileTokens;\n\n    public ManifestStream(String streamName, List<KeepLocator> keepLocators, List<FileToken> fileTokens) {\n        this.streamName = streamName;\n        this.keepLocators = keepLocators;\n        this.fileTokens = fileTokens;\n    }\n\n    @Override\n    public String toString() {\n        return streamName + \" \" + Stream.concat(keepLocators.stream().map(KeepLocator::toString), fileTokens.stream().map(FileToken::toString))\n                .collect(Collectors.joining(\" \"));\n    }\n\n    public String getStreamName() {\n        return this.streamName;\n    }\n\n    public List<KeepLocator> getKeepLocators() {\n        return this.keepLocators;\n    }\n\n    public List<FileToken> getFileTokens() {\n        return this.fileTokens;\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/keep/FileDownloader.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.keep;\n\nimport com.google.common.collect.Lists;\nimport org.arvados.client.api.client.CollectionsApiClient;\nimport org.arvados.client.api.client.KeepWebApiClient;\nimport org.arvados.client.api.model.Collection;\nimport org.arvados.client.common.Characters;\nimport org.arvados.client.exception.ArvadosClientException;\nimport org.arvados.client.logic.collection.FileToken;\nimport org.arvados.client.logic.collection.ManifestDecoder;\nimport org.arvados.client.logic.collection.ManifestStream;\nimport org.arvados.client.logic.keep.exception.DownloadFolderAlreadyExistsException;\nimport org.arvados.client.logic.keep.exception.FileAlreadyExistsException;\nimport org.slf4j.Logger;\n\nimport java.io.File;\nimport java.io.FileOutputStream;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.io.RandomAccessFile;\nimport java.nio.file.Files;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.concurrent.CompletableFuture;\nimport java.util.stream.Collectors;\nimport java.util.stream.Stream;\n\npublic class FileDownloader {\n\n    private final ManifestDecoder manifestDecoder;\n    private final CollectionsApiClient collectionsApiClient;\n    private final KeepWebApiClient keepWebApiClient;\n    private final Logger log = org.slf4j.LoggerFactory.getLogger(FileDownloader.class);\n\n    public FileDownloader(ManifestDecoder manifestDecoder, CollectionsApiClient collectionsApiClient, KeepWebApiClient keepWebApiClient) {\n        this.manifestDecoder = manifestDecoder;\n        this.collectionsApiClient = collectionsApiClient;\n        this.keepWebApiClient = keepWebApiClient;\n    }\n\n    public List<FileToken> listFileInfoFromCollection(String collectionUuid) {\n        Collection requestedCollection = collectionsApiClient.get(collectionUuid);\n        String manifestText = requestedCollection.getManifestText();\n\n        // decode manifest text and get list of all FileTokens for this collection\n        return manifestDecoder.decode(manifestText)\n                .stream()\n                .flatMap(p -> p.getFileTokens().stream())\n                .collect(Collectors.toList());\n    }\n\n    public File downloadSingleFileUsingKeepWeb(String filePathName, String collectionUuid, String pathToDownloadFolder) {\n        FileToken fileToken = getFileTokenFromCollection(filePathName, collectionUuid);\n        if (fileToken == null) {\n            throw new ArvadosClientException(String.format(\"%s not found in Collection with UUID %s\", filePathName, collectionUuid));\n        }\n\n        File downloadedFile = checkIfFileExistsInTargetLocation(fileToken, pathToDownloadFolder);\n        try (FileOutputStream fos = new FileOutputStream(downloadedFile)) {\n            fos.write(keepWebApiClient.download(collectionUuid, filePathName));\n        } catch (IOException e) {\n            throw new ArvadosClientException(String.format(\"Unable to write down file %s\", fileToken.getFileName()), e);\n        }\n        return downloadedFile;\n    }\n\n    public File downloadFileWithResume(String collectionUuid, String fileName, String pathToDownloadFolder, long start, Long end) throws IOException {\n        if (end != null && end < start) {\n            throw new IllegalArgumentException(\"End index must be greater than or equal to the start index\");\n        }\n\n        File destinationFile = new File(pathToDownloadFolder, fileName);\n\n        if (!destinationFile.exists()) {\n            boolean isCreated = destinationFile.createNewFile();\n            if (!isCreated) {\n                throw new IOException(\"Failed to create new file: \" + destinationFile.getAbsolutePath());\n            }\n        }\n\n        try (RandomAccessFile outputFile = new RandomAccessFile(destinationFile, \"rw\");\n             InputStream inputStream = keepWebApiClient.get(collectionUuid, fileName, start, end)) {\n            outputFile.seek(start);\n\n            long remaining = (end == null) ? Long.MAX_VALUE : end - start + 1;\n            byte[] buffer = new byte[4096];\n            int bytesRead;\n            while ((bytesRead = inputStream.read(buffer)) != -1 && remaining > 0) {\n                int bytesToWrite = (int) Math.min(bytesRead, remaining);\n                outputFile.write(buffer, 0, bytesToWrite);\n                remaining -= bytesToWrite;\n            }\n        }\n\n        return destinationFile;\n    }\n\n    public List<File> downloadFilesFromCollectionUsingKeepWeb(String collectionUuid, String pathToDownloadFolder) {\n        String collectionTargetDir = setTargetDirectory(collectionUuid, pathToDownloadFolder).getAbsolutePath();\n        List<FileToken> fileTokens = listFileInfoFromCollection(collectionUuid);\n\n        List<CompletableFuture<File>> futures = Lists.newArrayList();\n        for (FileToken fileToken : fileTokens) {\n            futures.add(CompletableFuture.supplyAsync(() -> this.downloadOneFileFromCollectionUsingKeepWeb(fileToken, collectionUuid, collectionTargetDir)));\n        }\n\n        @SuppressWarnings(\"unchecked\")\n        CompletableFuture<File>[] array = futures.toArray(new CompletableFuture[0]);\n        return Stream.of(array)\n                .map(CompletableFuture::join).collect(Collectors.toList());\n    }\n\n    private FileToken getFileTokenFromCollection(String filePathName, String collectionUuid) {\n        return listFileInfoFromCollection(collectionUuid)\n                .stream()\n                .filter(p -> (p.getFullPath()).equals(filePathName))\n                .findFirst()\n                .orElse(null);\n    }\n\n    private File checkIfFileExistsInTargetLocation(FileToken fileToken, String pathToDownloadFolder) {\n        String fileName = fileToken.getFileName();\n\n        File downloadFile = new File(pathToDownloadFolder + Characters.SLASH + fileName);\n        if (downloadFile.exists()) {\n            throw new FileAlreadyExistsException(String.format(\"File %s exists in location %s\", fileName, pathToDownloadFolder));\n        } else {\n            return downloadFile;\n        }\n    }\n\n    private File downloadOneFileFromCollectionUsingKeepWeb(FileToken fileToken, String collectionUuid, String pathToDownloadFolder) {\n        String filePathName = fileToken.getPath() + fileToken.getFileName();\n        File downloadedFile = new File(pathToDownloadFolder + Characters.SLASH + filePathName);\n        downloadedFile.getParentFile().mkdirs();\n\n        try (FileOutputStream fos = new FileOutputStream(downloadedFile)) {\n            fos.write(keepWebApiClient.download(collectionUuid, filePathName));\n        } catch (IOException e) {\n            throw new RuntimeException(e);\n        }\n        return downloadedFile;\n    }\n\n    public List<File> downloadFilesFromCollection(String collectionUuid, String pathToDownloadFolder) {\n\n        // download requested collection and extract manifest text\n        Collection requestedCollection = collectionsApiClient.get(collectionUuid);\n        String manifestText = requestedCollection.getManifestText();\n\n        // if directory with this collectionUUID does not exist - create one\n        // if exists - abort (throw exception)\n        File collectionTargetDir = setTargetDirectory(collectionUuid, pathToDownloadFolder);\n\n        // decode manifest text and create list of ManifestStream objects containing KeepLocators and FileTokens\n        List<ManifestStream> manifestStreams = manifestDecoder.decode(manifestText);\n\n        //list of all downloaded files that will be returned by this method\n        List<File> downloadedFilesFromCollection = new ArrayList<>();\n\n        // download files for each manifest stream\n        for (ManifestStream manifestStream : manifestStreams)\n            downloadedFilesFromCollection.addAll(downloadFilesFromSingleManifestStream(collectionUuid, manifestStream, collectionTargetDir));\n\n        log.debug(String.format(\"Total of: %d files downloaded\", downloadedFilesFromCollection.size()));\n        return downloadedFilesFromCollection;\n    }\n\n    private File setTargetDirectory(String collectionUUID, String pathToDownloadFolder) {\n        //local directory to save downloaded files\n        File collectionTargetDir = new File(pathToDownloadFolder + Characters.SLASH + collectionUUID);\n        if (collectionTargetDir.exists()) {\n            throw new DownloadFolderAlreadyExistsException(String.format(\"Directory for collection UUID %s already exists\", collectionUUID));\n        } else {\n            collectionTargetDir.mkdirs();\n        }\n        return collectionTargetDir;\n    }\n\n    private List<File> downloadFilesFromSingleManifestStream(String collectionUuid, ManifestStream manifestStream, File collectionTargetDir){\n        List<File> downloadedFiles = new ArrayList<>();\n\n        for (FileToken fileToken : manifestStream.getFileTokens()) {\n            File downloadedFile = new File(collectionTargetDir.getAbsolutePath() + Characters.SLASH + fileToken.getFullPath()); //create file\n            downloadedFile.getParentFile().mkdirs();\n\n            try  {\n                byte[] download = keepWebApiClient.download(collectionUuid, fileToken.getFileName());\n                Files.write(downloadedFile.toPath(), download);\n            } catch (IOException | ArvadosClientException e) {\n                throw new ArvadosClientException(String.format(\"Unable to write down file %s\", fileToken.getFileName()), e);\n            }\n\n            downloadedFiles.add(downloadedFile);\n            log.debug(String.format(\"File %d / %d downloaded from manifest stream\",\n                    manifestStream.getFileTokens().indexOf(fileToken) + 1,\n                    manifestStream.getFileTokens().size()));\n        }\n        return downloadedFiles;\n    }\n\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/keep/FileUploader.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.keep;\n\nimport org.arvados.client.api.client.CollectionsApiClient;\nimport org.arvados.client.api.client.KeepWebApiClient;\nimport org.arvados.client.api.model.Collection;\nimport org.arvados.client.config.ConfigProvider;\nimport org.arvados.client.logic.collection.CollectionFactory;\nimport org.slf4j.Logger;\n\nimport java.io.File;\nimport java.util.List;\n\npublic class FileUploader {\n\n    private final KeepWebApiClient keepWebApiClient;\n    private final CollectionsApiClient collectionsApiClient;\n    private final ConfigProvider config;\n    private final Logger log = org.slf4j.LoggerFactory.getLogger(FileUploader.class);\n\n    public FileUploader(KeepWebApiClient keepWebApiClient, CollectionsApiClient collectionsApiClient, ConfigProvider config) {\n        this.keepWebApiClient = keepWebApiClient;\n        this.collectionsApiClient = collectionsApiClient;\n        this.config = config;\n    }\n\n    public Collection upload(List<File> sourceFiles, String collectionName, String projectUuid) {\n        Collection newCollection = CollectionFactory.builder()\n                .config(config)\n                .name(collectionName)\n                .projectUuid(projectUuid)\n                .build()\n                .create();\n\n        newCollection = collectionsApiClient.create(newCollection);\n        String newCollectionId = newCollection.getUuid();\n\n        sourceFiles.forEach(file -> uploadFile(newCollectionId, file));\n\n        return collectionsApiClient.get(newCollection.getUuid());\n    }\n\n    private void uploadFile(String collectionUuid, File file) {\n        keepWebApiClient.upload(collectionUuid, file, (progress) -> log.info(\"Uploaded {} bytes for file: {}\", progress, file.getName()));\n    }\n\n    public Collection uploadToExistingCollection(List<File> files, String collectionUuid) {\n        files.forEach(file -> uploadFile(collectionUuid, file));\n\n        return collectionsApiClient.get(collectionUuid);\n    }\n\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/keep/KeepLocator.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.keep;\n\nimport org.arvados.client.exception.ArvadosClientException;\n\nimport java.time.Instant;\nimport java.time.LocalDateTime;\nimport java.time.ZoneOffset;\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.LinkedList;\nimport java.util.List;\nimport java.util.Objects;\nimport java.util.stream.Collectors;\nimport java.util.stream.Stream;\n\nimport static org.arvados.client.common.Patterns.HINT_PATTERN;\n\npublic class KeepLocator {\n\n    private final List<String> hints = new ArrayList<>();\n    private String permSig;\n    private LocalDateTime permExpiry;\n    private final String md5sum;\n    private final Integer size;\n\n    public KeepLocator(String locatorString) {\n        LinkedList<String> pieces = new LinkedList<>(Arrays.asList(locatorString.split(\"\\\\+\")));\n\n        md5sum = pieces.poll();\n        size = Integer.valueOf(Objects.requireNonNull(pieces.poll()));\n\n        for (String hint : pieces) {\n            if (!hint.matches(HINT_PATTERN)) {\n                throw new ArvadosClientException(String.format(\"invalid hint format: %s\", hint));\n            } else if (hint.startsWith(\"A\")) {\n                parsePermissionHint(hint);\n            } else {\n                hints.add(hint);\n            }\n        }\n    }\n\n    public List<String> getHints() {\n        return hints;\n    }\n\n    public String getMd5sum() {\n        return md5sum;\n    }\n\n    @Override\n    public String toString() {\n        return Stream.concat(Stream.of(md5sum, size.toString(), permissionHint()), hints.stream())\n                .filter(Objects::nonNull)\n                .collect(Collectors.joining(\"+\"));\n    }\n\n    public String stripped() {\n        return size != null ? String.format(\"%s+%d\", md5sum, size) : md5sum;\n    }\n\n    public String permissionHint() {\n        if (permSig == null || permExpiry == null) {\n            return null;\n        }\n\n        long timestamp = permExpiry.toEpochSecond(ZoneOffset.UTC);\n        String signTimestamp = Long.toHexString(timestamp);\n        return String.format(\"A%s@%s\", permSig, signTimestamp);\n    }\n\n    private void parsePermissionHint(String hint) {\n        String[] hintSplit = hint.substring(1).split(\"@\", 2);\n        permSig = hintSplit[0];\n\n        int permExpiryDecimal = Integer.parseInt(hintSplit[1], 16);\n        permExpiry = LocalDateTime.ofInstant(Instant.ofEpochSecond(permExpiryDecimal), ZoneOffset.UTC);\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/keep/exception/DownloadFolderAlreadyExistsException.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.keep.exception;\n\nimport org.arvados.client.exception.ArvadosClientException;\n\n/**\n * Exception indicating that directory with given name was already created in specified location.\n *\n * <p> This exception will be thrown during an attempt to download all files from certain\n * collection to a location that already contains folder named by this collection's UUID.</p>\n */\npublic class DownloadFolderAlreadyExistsException extends ArvadosClientException {\n\n    public DownloadFolderAlreadyExistsException(String message) {\n        super(message);\n    }\n\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/logic/keep/exception/FileAlreadyExistsException.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.keep.exception;\n\nimport org.arvados.client.exception.ArvadosClientException;\n\n/**\n * Signals that an attempt to download a file with given name has failed for a specified\n * download location.\n *\n * <p> This exception will be thrown during an attempt to download single file to a location\n * that already contains file with given name</p>\n */\npublic class FileAlreadyExistsException extends ArvadosClientException {\n\n    public FileAlreadyExistsException(String message) { super(message); }\n\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/utils/FileMerge.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.utils;\n\nimport java.io.BufferedOutputStream;\nimport java.io.File;\nimport java.io.FileOutputStream;\nimport java.io.IOException;\nimport java.nio.file.Files;\nimport java.util.Collection;\n\npublic class FileMerge {\n\n    public static void merge(Collection<File> files, File targetFile) throws IOException {\n        try (FileOutputStream fos = new FileOutputStream(targetFile); BufferedOutputStream mergingStream = new BufferedOutputStream(fos)) {\n            for (File file : files) {\n                Files.copy(file.toPath(), mergingStream);\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/java/org/arvados/client/utils/FileSplit.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.utils;\n\nimport org.apache.commons.io.FileUtils;\n\nimport java.io.*;\nimport java.util.ArrayList;\nimport java.util.List;\n\n/**\n * Based on:\n * {@link} https://stackoverflow.com/questions/10864317/how-to-break-a-file-into-pieces-using-java\n */\npublic class FileSplit {\n\n    public static List<File> split(File f, File dir, int splitSize) throws IOException {\n        int partCounter = 1;\n\n        long sizeOfFiles = splitSize * FileUtils.ONE_MB;\n        byte[] buffer = new byte[(int) sizeOfFiles];\n\n        List<File> files = new ArrayList<>();\n        String fileName = f.getName();\n\n        try (FileInputStream fis = new FileInputStream(f); BufferedInputStream bis = new BufferedInputStream(fis)) {\n            int bytesAmount = 0;\n            while ((bytesAmount = bis.read(buffer)) > 0) {\n                String filePartName = String.format(\"%s.%03d\", fileName, partCounter++);\n                File newFile = new File(dir, filePartName);\n                try (FileOutputStream out = new FileOutputStream(newFile)) {\n                    out.write(buffer, 0, bytesAmount);\n                }\n                files.add(newFile);\n            }\n        }\n        return files;\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/main/resources/reference.conf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n#\n# Arvados client default configuration\n#\n# Remarks:\n# * While providing data remove apostrophes (\"\") from each line\n# * See Arvados documentation for information how to obtain a token:\n#   https://doc.arvados.org/user/reference/api-tokens.html\n#\n\narvados {\n    api {\n    \tkeepweb-host = localhost\n    \tkeepweb-port = 8000\n    \thost = localhost\n    \tport = 8000\n    \ttoken = \"\"\n    \tprotocol = https\n    \thost-insecure = false\n    }\n    split-size = 64\n    temp-dir = /tmp/file-split\n    copies = 2\n    retries = 0\n    connectTimeout = 60000\n    readTimeout = 60000\n    writeTimeout = 60000\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/BaseStandardApiClientTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport okhttp3.HttpUrl;\nimport org.arvados.client.api.model.Item;\nimport org.arvados.client.api.model.ItemList;\nimport org.arvados.client.test.utils.ArvadosClientUnitTest;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\nimport org.mockito.Spy;\nimport org.mockito.junit.MockitoJUnitRunner;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\n@RunWith(MockitoJUnitRunner.class)\npublic class BaseStandardApiClientTest extends ArvadosClientUnitTest {\n\n    @Spy\n    private BaseStandardApiClient<?, ?> client = new BaseStandardApiClient<Item, ItemList>(CONFIG) {\n        @Override\n        String getResource() {\n            return \"resource\";\n        }\n\n        @Override\n        Class<Item> getType() {\n            return null;\n        }\n\n        @Override\n        Class<ItemList> getListType() {\n            return null;\n        }\n    };\n\n    @Test\n    public void urlBuilderBuildsExpectedUrlFormat() {\n        // when\n        HttpUrl.Builder actual = client.getUrlBuilder();\n\n        // then\n        assertThat(actual.build().toString()).isEqualTo(\"http://localhost:9000/arvados/v1/resource\");\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/CollectionsApiClientTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport java.io.IOException;\nimport java.util.HashMap;\nimport java.util.Map;\n\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport com.fasterxml.jackson.databind.SerializationFeature;\nimport okhttp3.mockwebserver.RecordedRequest;\nimport org.arvados.client.api.model.Collection;\nimport org.arvados.client.api.model.CollectionList;\nimport org.arvados.client.api.model.CollectionReplaceFiles;\nimport org.arvados.client.test.utils.RequestMethod;\nimport org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;\nimport org.junit.Before;\nimport org.junit.Test;\n\nimport static org.arvados.client.test.utils.ApiClientTestUtils.*;\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.junit.Assert.assertEquals;\n\npublic class CollectionsApiClientTest extends ArvadosClientMockedWebServerTest {\n\n    private static final String RESOURCE = \"collections\";\n    private static final String TEST_COLLECTION_NAME = \"Super Collection\";\n    private static final String TEST_COLLECTION_UUID = \"test-collection-uuid\";\n    private ObjectMapper objectMapper;\n    private CollectionsApiClient client;\n\n    @Before\n    public void setUp() {\n        objectMapper = new ObjectMapper();\n        objectMapper.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);\n        client = new CollectionsApiClient(CONFIG);\n    }\n\n    @Test\n    public void listCollections() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"collections-list\"));\n\n        // when\n        CollectionList actual = client.list();\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE);\n        assertRequestMethod(request, RequestMethod.GET);\n        assertThat(actual.getItemsAvailable()).isEqualTo(41);\n    }\n\n    @Test\n    public void getCollection() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"collections-get\"));\n\n        String uuid = \"112ci-4zz18-p51w7z3fpopo6sm\";\n\n        // when\n        Collection actual = client.get(uuid);\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE + \"/\" + uuid);\n        assertRequestMethod(request, RequestMethod.GET);\n        assertThat(actual.getUuid()).isEqualTo(uuid);\n        assertThat(actual.getPortableDataHash()).isEqualTo(\"6c4106229b08fe25f48b3a7a8289dd46+143\");\n    }\n\n    @Test\n    public void createCollection() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"collections-create-simple\"));\n\n        String name = TEST_COLLECTION_NAME;\n        \n        Collection collection = new Collection();\n        collection.setName(name);\n\n        // when\n        Collection actual = client.create(collection);\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE);\n        assertRequestMethod(request, RequestMethod.POST);\n        assertThat(actual.getName()).isEqualTo(name);\n        assertThat(actual.getPortableDataHash()).isEqualTo(\"d41d8cd98f00b204e9800998ecf8427e+0\");\n        assertThat(actual.getManifestText()).isEmpty();\n    }\n\n    @Test\n    public void createCollectionWithManifest() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"collections-create-manifest\"));\n\n        String name = TEST_COLLECTION_NAME;\n        String manifestText = \". 7df44272090cee6c0732382bba415ee9+70+Aa5ece4560e3329315165b36c239b8ab79c888f8a@5a1d5708 0:70:README.md\\n\";\n        \n        Collection collection = new Collection();\n        collection.setName(name);\n        collection.setManifestText(manifestText);\n\n        // when\n        Collection actual = client.create(collection);\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE);\n        assertRequestMethod(request, RequestMethod.POST);\n        assertThat(actual.getName()).isEqualTo(name);\n        assertThat(actual.getPortableDataHash()).isEqualTo(\"d41d8cd98f00b204e9800998ecf8427e+0\");\n        assertThat(actual.getManifestText()).isEqualTo(manifestText);\n    }\n\n    @Test\n    public void testUpdateWithReplaceFiles() throws IOException, InterruptedException {\n        // given\n        server.enqueue(getResponse(\"collections-create-manifest\"));\n\n        Map<String, String> files = new HashMap<>();\n        files.put(\"targetPath1\", \"sourcePath1\");\n        files.put(\"targetPath2\", \"sourcePath2\");\n\n        CollectionReplaceFiles replaceFilesRequest = new CollectionReplaceFiles();\n        replaceFilesRequest.setReplaceFiles(files);\n\n        // when\n        Collection actual = client.update(TEST_COLLECTION_UUID, replaceFilesRequest);\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, \"collections/test-collection-uuid\");\n        assertRequestMethod(request, RequestMethod.PUT);\n        assertThat(actual.getPortableDataHash()).isEqualTo(\"d41d8cd98f00b204e9800998ecf8427e+0\");\n\n        String actualRequestBody = request.getBody().readUtf8();\n        Map<String, Object> actualRequestMap = objectMapper.readValue(actualRequestBody, Map.class);\n\n        Map<String, Object> expectedRequestMap = new HashMap<>();\n        Map<String, Object> collectionOptionsMap = new HashMap<>();\n        collectionOptionsMap.put(\"preserve_version\", true);\n\n        Map<String, String> replaceFilesMap = new HashMap<>();\n        replaceFilesMap.put(\"targetPath1\", \"sourcePath1\");\n        replaceFilesMap.put(\"targetPath2\", \"sourcePath2\");\n\n        expectedRequestMap.put(\"collection\", collectionOptionsMap);\n        expectedRequestMap.put(\"replace_files\", replaceFilesMap);\n\n        String expectedJson = objectMapper.writeValueAsString(expectedRequestMap);\n        String actualJson = objectMapper.writeValueAsString(actualRequestMap);\n        assertEquals(expectedJson, actualJson);\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/GroupsApiClientTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport com.google.common.collect.Lists;\nimport okhttp3.mockwebserver.RecordedRequest;\nimport org.arvados.client.api.model.Group;\nimport org.arvados.client.api.model.GroupList;\nimport org.arvados.client.api.model.argument.Filter;\nimport org.arvados.client.api.model.argument.ListArgument;\nimport org.arvados.client.test.utils.RequestMethod;\nimport org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;\nimport org.junit.Test;\n\nimport java.util.Arrays;\n\nimport static org.arvados.client.test.utils.ApiClientTestUtils.*;\nimport static org.junit.Assert.assertEquals;\n\npublic class GroupsApiClientTest extends ArvadosClientMockedWebServerTest {\n    private static final String RESOURCE = \"groups\";\n    private GroupsApiClient client = new GroupsApiClient(CONFIG);\n\n    @Test\n    public void listGroups() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"groups-list\"));\n\n        // when\n        GroupList actual = client.list();\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE);\n        assertRequestMethod(request, RequestMethod.GET);\n        assertEquals(20, actual.getItems().size());\n    }\n\n    @Test\n    public void listProjectsByOwner() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"groups-list\"));\n        String ownerUuid = \"ardev-tpzed-n3kzq4fvoks3uw4\";\n        String filterSubPath = \"?filters=[%20[%20%22owner_uuid%22,%20%22like%22,%20%22ardev-tpzed-n3kzq4fvoks3uw4%22%20],%20\" +\n                \"[%20%22group_class%22,%20%22in%22,%20[%20%22project%22,%20%22sub-project%22%20]%20]%20]\";\n\n        // when\n        ListArgument listArgument = ListArgument.builder()\n                .filters(Arrays.asList(\n                        Filter.of(\"owner_uuid\", Filter.Operator.LIKE, ownerUuid),\n                        Filter.of(\"group_class\", Filter.Operator.IN, Lists.newArrayList(\"project\", \"sub-project\")\n                        )))\n                .build();\n        GroupList actual = client.list(listArgument);\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE + filterSubPath);\n        assertRequestMethod(request, RequestMethod.GET);\n        assertEquals(20, actual.getItems().size());\n    }\n\n    @Test\n    public void getGroup() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"groups-get\"));\n\n        String uuid = \"ardev-j7d0g-bmg3pfqtx3ivczp\";\n\n        // when\n        Group actual = client.get(uuid);\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE + \"/\" + uuid);\n        assertRequestMethod(request, RequestMethod.GET);\n        assertEquals(uuid, actual.getUuid());\n        assertEquals(\"3hw0vk4mbl0ofvia5k6x4dwrx\", actual.getEtag());\n        assertEquals(\"ardev-tpzed-n3kzq4fvoks3uw4\", actual.getOwnerUuid());\n        assertEquals(\"TestGroup1\", actual.getName());\n        assertEquals(\"project\", actual.getGroupClass());\n\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/KeepWebApiClientTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;\nimport org.junit.Test;\n\nimport java.io.ByteArrayOutputStream;\nimport java.io.File;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.nio.file.Files;\n\nimport okhttp3.mockwebserver.MockResponse;\nimport okio.Buffer;\n\nimport static org.arvados.client.test.utils.ApiClientTestUtils.getResponse;\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.junit.Assert.assertArrayEquals;\nimport static org.junit.Assert.assertNotNull;\n\npublic class KeepWebApiClientTest extends ArvadosClientMockedWebServerTest {\n\n    private final KeepWebApiClient client = new KeepWebApiClient(CONFIG);\n\n    @Test\n    public void uploadFile() throws Exception {\n        // given\n        String collectionUuid = \"112ci-4zz18-p51w7z3fpopo6sm\";\n        File file = Files.createTempFile(\"keep-upload-test\", \"txt\").toFile();\n        Files.write(file.toPath(), \"test data\".getBytes());\n\n        server.enqueue(getResponse(\"keep-client-upload-response\"));\n\n        // when\n        String uploadResponse = client.upload(collectionUuid, file, uploadedBytes -> System.out.printf(\"Uploaded bytes: %s/%s%n\", uploadedBytes, file.length()));\n\n        // then\n        assertThat(uploadResponse).isEqualTo(\"Created\");\n    }\n\n    @Test\n    public void downloadPartialIsPerformedSuccessfully() throws Exception {\n        // given\n        String collectionUuid = \"some-collection-uuid\";\n        String filePathName = \"sample-file-path\";\n        long start = 1024;\n        Long end = null;\n\n        byte[] expectedData = \"test data\".getBytes();\n\n        try (Buffer buffer = new Buffer().write(expectedData)) {\n            server.enqueue(new MockResponse().setBody(buffer));\n\n            // when\n            InputStream inputStream = client.get(collectionUuid, filePathName, start, end);\n            byte[] actualData = inputStreamToByteArray(inputStream);\n\n            // then\n            assertNotNull(actualData);\n            assertArrayEquals(expectedData, actualData);\n        }\n    }\n\n    private byte[] inputStreamToByteArray(InputStream inputStream) throws IOException {\n        ByteArrayOutputStream buffer = new ByteArrayOutputStream();\n        int nRead;\n        byte[] data = new byte[1024];\n        while ((nRead = inputStream.read(data, 0, data.length)) != -1) {\n            buffer.write(data, 0, nRead);\n        }\n        buffer.flush();\n        return buffer.toByteArray();\n    }\n\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/LinkApiClientTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport okhttp3.mockwebserver.RecordedRequest;\nimport org.arvados.client.api.model.Link;\nimport org.arvados.client.api.model.LinkList;\nimport org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;\nimport org.arvados.client.test.utils.RequestMethod;\nimport org.junit.Test;\n\nimport static org.arvados.client.test.utils.ApiClientTestUtils.assertAuthorizationHeader;\nimport static org.arvados.client.test.utils.ApiClientTestUtils.assertRequestMethod;\nimport static org.arvados.client.test.utils.ApiClientTestUtils.assertRequestPath;\nimport static org.arvados.client.test.utils.ApiClientTestUtils.getResponse;\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.junit.Assert.assertEquals;\n\npublic class LinkApiClientTest extends ArvadosClientMockedWebServerTest {\n\n    private static final String RESOURCE = \"links\";\n\n    private final LinksApiClient client = new LinksApiClient(CONFIG);\n\n    @Test\n    public void listLinks() throws Exception {\n        // given\n        server.enqueue(getResponse(\"links-list\"));\n\n        // when\n        LinkList actual = client.list();\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE);\n        assertRequestMethod(request, RequestMethod.GET);\n        assertThat(actual.getItemsAvailable()).isEqualTo(2);\n    }\n\n    @Test\n    public void getLink() throws Exception {\n        // given\n        server.enqueue(getResponse(\"links-get\"));\n\n        String uuid = \"arkau-o0j2j-huxuaxbi46s1yml\";\n\n        // when\n        Link actual = client.get(uuid);\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE + \"/\" + uuid);\n        assertRequestMethod(request, RequestMethod.GET);\n        assertEquals(actual.getUuid(), uuid);\n        assertEquals(actual.getName(), \"can_read\");\n        assertEquals(actual.getHeadKind(), \"arvados#group\");\n        assertEquals(actual.getHeadUuid(), \"arkau-j7d0g-fcedae2076pw56h\");\n        assertEquals(actual.getTailUuid(), \"ardev-tpzed-n3kzq4fvoks3uw4\");\n        assertEquals(actual.getTailKind(), \"arvados#user\");\n        assertEquals(actual.getLinkClass(), \"permission\");\n    }\n\n    @Test\n    public void createLink() throws Exception {\n        // given\n        server.enqueue(getResponse(\"links-create\"));\n\n        String name = \"Star Link\";\n\n        Link collection = new Link();\n        collection.setName(name);\n\n        // when\n        Link actual = client.create(collection);\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE);\n        assertRequestMethod(request, RequestMethod.POST);\n        assertThat(actual.getName()).isEqualTo(name);\n        assertEquals(actual.getName(), name);\n        assertEquals(actual.getUuid(), \"arkau-o0j2j-huxuaxbi46s1yml\");\n        assertEquals(actual.getHeadKind(), \"arvados#group\");\n        assertEquals(actual.getHeadUuid(), \"arkau-j7d0g-fcedae2076pw56h\");\n        assertEquals(actual.getTailUuid(), \"ardev-tpzed-n3kzq4fvoks3uw4\");\n        assertEquals(actual.getTailKind(), \"arvados#user\");\n        assertEquals(actual.getLinkClass(), \"star\");\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/UsersApiClientTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client;\n\nimport okhttp3.mockwebserver.RecordedRequest;\nimport org.arvados.client.api.model.User;\nimport org.arvados.client.api.model.UserList;\nimport org.arvados.client.test.utils.RequestMethod;\nimport org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;\nimport org.junit.Test;\n\nimport static org.arvados.client.common.Characters.SLASH;\nimport static org.arvados.client.test.utils.ApiClientTestUtils.*;\nimport static org.assertj.core.api.Assertions.assertThat;\n\npublic class UsersApiClientTest extends ArvadosClientMockedWebServerTest {\n\n    private static final String RESOURCE = \"users\";\n    private static final String USER_UUID = \"ardev-tpzed-q6dvn7sby55up1b\";\n\n    private UsersApiClient client = new UsersApiClient(CONFIG);\n\n    @Test\n    public void listUsers() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"users-list\"));\n\n        // when\n        UserList actual = client.list();\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE);\n        assertRequestMethod(request, RequestMethod.GET);\n        assertThat(actual.getItemsAvailable()).isEqualTo(13);\n    }\n\n    @Test\n    public void getUser() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"users-get\"));\n\n        // when\n        User actual = client.get(USER_UUID);\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE + SLASH + USER_UUID);\n        assertRequestMethod(request, RequestMethod.GET);\n        assertThat(actual.getUuid()).isEqualTo(USER_UUID);\n    }\n\n    @Test\n    public void getCurrentUser() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"users-get\"));\n\n        // when\n        User actual = client.current();\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE + SLASH + \"current\");\n        assertRequestMethod(request, RequestMethod.GET);\n        assertThat(actual.getUuid()).isEqualTo(USER_UUID);\n    }\n\n    @Test\n    public void getSystemUser() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"users-system\"));\n\n        // when\n        User actual = client.system();\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE + SLASH + \"system\");\n        assertRequestMethod(request, RequestMethod.GET);\n        assertThat(actual.getUuid()).isEqualTo(\"ardev-tpzed-000000000000000\");\n    }\n\n    @Test\n    public void createUser() throws Exception {\n\n        // given\n        server.enqueue(getResponse(\"users-create\"));\n\n        String firstName = \"John\";\n        String lastName = \"Wayne\";\n        String fullName = String.format(\"%s %s\", firstName, lastName);\n        String username = String.format(\"%s%s\", firstName, lastName).toLowerCase();\n\n        User user = new User();\n        user.setFirstName(firstName);\n        user.setLastName(lastName);\n        user.setFullName(fullName);\n        user.setUsername(username);\n\n        // when\n        User actual = client.create(user);\n\n        // then\n        RecordedRequest request = server.takeRequest();\n        assertAuthorizationHeader(request);\n        assertRequestPath(request, RESOURCE);\n        assertRequestMethod(request, RequestMethod.POST);\n        assertThat(actual.getFirstName()).isEqualTo(firstName);\n        assertThat(actual.getLastName()).isEqualTo(lastName);\n        assertThat(actual.getFullName()).isEqualTo(fullName);\n        assertThat(actual.getUsername()).isEqualTo(username);\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/api/client/factory/OkHttpClientFactoryTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.api.client.factory;\n\nimport okhttp3.OkHttpClient;\nimport okhttp3.Request;\nimport okhttp3.Response;\nimport okhttp3.mockwebserver.MockResponse;\nimport org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;\nimport org.junit.Assert;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\nimport org.mockito.junit.MockitoJUnitRunner;\n\nimport javax.net.ssl.KeyManagerFactory;\nimport javax.net.ssl.SSLContext;\nimport javax.net.ssl.SSLSocketFactory;\nimport javax.net.ssl.TrustManagerFactory;\nimport java.io.FileInputStream;\nimport java.security.KeyStore;\n\n\n@RunWith(MockitoJUnitRunner.class)\npublic class OkHttpClientFactoryTest extends ArvadosClientMockedWebServerTest {\n\n    @Test(expected = javax.net.ssl.SSLHandshakeException.class)\n    public void secureOkHttpClientIsCreated() throws Exception {\n\n        // given\n        OkHttpClientFactory factory = OkHttpClientFactory.INSTANCE;\n        // * configure HTTPS server\n        SSLSocketFactory sf = getSSLSocketFactoryWithSelfSignedCertificate();\n        server.useHttps(sf, false);\n        server.enqueue(new MockResponse().setBody(\"OK\"));\n        // * prepare client HTTP request\n        Request request = new Request.Builder()\n                .url(\"https://localhost:9000/\")\n                .build();\n\n        // when - then (SSL certificate is verified)\n        OkHttpClient actual = factory.create(false);\n        Response response = actual.newCall(request).execute();\n    }\n\n    @Test\n    public void insecureOkHttpClientIsCreated() throws Exception {\n        // given\n        OkHttpClientFactory factory = OkHttpClientFactory.INSTANCE;\n        // * configure HTTPS server\n        SSLSocketFactory sf = getSSLSocketFactoryWithSelfSignedCertificate();\n        server.useHttps(sf, false);\n        server.enqueue(new MockResponse().setBody(\"OK\"));\n        // * prepare client HTTP request\n        Request request = new Request.Builder()\n                .url(\"https://localhost:9000/\")\n                .build();\n\n        // when (SSL certificate is not verified)\n        OkHttpClient actual = factory.create(true);\n        Response response = actual.newCall(request).execute();\n\n        // then\n        Assert.assertEquals(response.body().string(),\"OK\");\n    }\n\n\n    /*\n        This ugly boilerplate is needed to enable self signed certificate.\n\n        It requires selfsigned.keystore.jks file. It was generated with:\n        keytool -genkey -v -keystore mystore.keystore.jks -alias alias_name -keyalg RSA -keysize 2048 -validity 10000\n     */\n    public SSLSocketFactory getSSLSocketFactoryWithSelfSignedCertificate() throws Exception {\n\n        FileInputStream stream = new FileInputStream(\"src/test/resources/selfsigned.keystore.jks\");\n        char[] serverKeyStorePassword = \"123456\".toCharArray();\n        KeyStore serverKeyStore = KeyStore.getInstance(KeyStore.getDefaultType());\n        serverKeyStore.load(stream, serverKeyStorePassword);\n\n        String kmfAlgorithm = KeyManagerFactory.getDefaultAlgorithm();\n        KeyManagerFactory kmf = KeyManagerFactory.getInstance(kmfAlgorithm);\n        kmf.init(serverKeyStore, serverKeyStorePassword);\n\n        TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(kmfAlgorithm);\n        trustManagerFactory.init(serverKeyStore);\n\n        SSLContext sslContext = SSLContext.getInstance(\"SSL\");\n        sslContext.init(kmf.getKeyManagers(), trustManagerFactory.getTrustManagers(), null);\n        return sslContext.getSocketFactory();\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/config/ExternalConfigProviderTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.config;\n\nimport okhttp3.mockwebserver.MockResponse;\nimport okhttp3.mockwebserver.MockWebServer;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\nimport java.io.IOException;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\npublic class ExternalConfigProviderTest {\n\n    private MockWebServer mockServer;\n\n    @Before\n    public void setUp() throws IOException {\n        mockServer = new MockWebServer();\n        mockServer.start();\n    }\n\n    @After\n    public void tearDown() throws IOException {\n        mockServer.shutdown();\n    }\n\n    @Test\n    public void testAutoFetchWebDAVConfiguration() {\n        // Given\n        String configResponse = \"{\\n\" +\n                                \"  \\\"Services\\\": {\\n\" +\n                                \"    \\\"WebDAVDownload\\\": {\\n\" +\n                                \"      \\\"ExternalURL\\\": \\\"https://download.example.com:9000/\\\"\\n\" +\n                                \"    }\\n\" +\n                                \"  }\\n\" +\n                                \"}\";\n\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(configResponse)\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When\n        ExternalConfigProvider provider = ExternalConfigProvider.builder()\n                .apiHost(mockServer.getHostName())\n                .apiPort(mockServer.getPort())\n                .apiProtocol(\"http\")\n                .apiToken(\"test-token\")\n                .build();\n\n        // Then\n        assertThat(provider.getKeepWebHost()).isEqualTo(\"download.example.com\");\n        assertThat(provider.getKeepWebPort()).isEqualTo(9000);\n    }\n\n    @Test\n    public void testAutoFetchWebDAVConfigurationWithDefaultHttpsPort() {\n        // Given\n        String configResponse = \"{\\n\" +\n                                \"  \\\"Services\\\": {\\n\" +\n                                \"    \\\"WebDAVDownload\\\": {\\n\" +\n                                \"      \\\"ExternalURL\\\": \\\"https://download.example.com/\\\"\\n\" +\n                                \"    }\\n\" +\n                                \"  }\\n\" +\n                                \"}\";\n\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(configResponse)\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When\n        ExternalConfigProvider provider = ExternalConfigProvider.builder()\n                .apiHost(mockServer.getHostName())\n                .apiPort(mockServer.getPort())\n                .apiProtocol(\"http\")\n                .apiToken(\"test-token\")\n                .build();\n\n        // Then\n        assertThat(provider.getKeepWebHost()).isEqualTo(\"download.example.com\");\n        assertThat(provider.getKeepWebPort()).isEqualTo(443);\n    }\n\n    @Test\n    public void testAutoFetchWebDAVConfigurationWithDefaultHttpPort() {\n        // Given\n        String configResponse = \"{\\n\" +\n                                \"  \\\"Services\\\": {\\n\" +\n                                \"    \\\"WebDAVDownload\\\": {\\n\" +\n                                \"      \\\"ExternalURL\\\": \\\"http://download.example.com/\\\"\\n\" +\n                                \"    }\\n\" +\n                                \"  }\\n\" +\n                                \"}\";\n\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(configResponse)\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When\n        ExternalConfigProvider provider = ExternalConfigProvider.builder()\n                .apiHost(mockServer.getHostName())\n                .apiPort(mockServer.getPort())\n                .apiProtocol(\"http\")\n                .apiToken(\"test-token\")\n                .build();\n\n        // Then\n        assertThat(provider.getKeepWebHost()).isEqualTo(\"download.example.com\");\n        assertThat(provider.getKeepWebPort()).isEqualTo(80);\n    }\n\n    @Test\n    public void testManualConfigurationTakesPrecedence() {\n        // Given - server returns config but we provide manual values\n        String configResponse = \"{\\n\" +\n                                \"  \\\"Services\\\": {\\n\" +\n                                \"    \\\"WebDAVDownload\\\": {\\n\" +\n                                \"      \\\"ExternalURL\\\": \\\"https://auto.example.com/\\\"\\n\" +\n                                \"    }\\n\" +\n                                \"  }\\n\" +\n                                \"}\";\n\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(configResponse)\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When - manual configuration is provided\n        ExternalConfigProvider provider = ExternalConfigProvider.builder()\n                .apiHost(mockServer.getHostName())\n                .apiPort(mockServer.getPort())\n                .apiProtocol(\"http\")\n                .apiToken(\"test-token\")\n                .keepWebHost(\"manual.example.com\")\n                .keepWebPort(8080)\n                .build();\n\n        // Then - manual values should be used\n        assertThat(provider.getKeepWebHost()).isEqualTo(\"manual.example.com\");\n        assertThat(provider.getKeepWebPort()).isEqualTo(8080);\n    }\n\n    @Test\n    public void testAutoFetchDisabled() {\n        // When - auto-fetch is explicitly disabled\n        ExternalConfigProvider provider = ExternalConfigProvider.builder()\n                .apiHost(\"api.example.com\")\n                .apiPort(443)\n                .apiProtocol(\"https\")\n                .apiToken(\"test-token\")\n                .autoFetchWebDAV(false)\n                .build();\n\n        // Then - keepWeb values should be null/0\n        assertThat(provider.getKeepWebHost()).isNull();\n        assertThat(provider.getKeepWebPort()).isEqualTo(0);\n    }\n\n    @Test\n    public void testHandlesApiError() {\n        // Given - server returns error\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(500)\n                .setBody(\"Internal Server Error\"));\n\n        // When\n        ExternalConfigProvider provider = ExternalConfigProvider.builder()\n                .apiHost(mockServer.getHostName())\n                .apiPort(mockServer.getPort())\n                .apiProtocol(\"http\")\n                .apiToken(\"test-token\")\n                .build();\n\n        // Then - should handle gracefully, keepWeb values should be null/0\n        assertThat(provider.getKeepWebHost()).isNull();\n        assertThat(provider.getKeepWebPort()).isEqualTo(0);\n    }\n\n    @Test\n    public void testHandlesMalformedResponse() {\n        // Given - server returns malformed JSON\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(\"{ invalid json ]\")\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When\n        ExternalConfigProvider provider = ExternalConfigProvider.builder()\n                .apiHost(mockServer.getHostName())\n                .apiPort(mockServer.getPort())\n                .apiProtocol(\"http\")\n                .apiToken(\"test-token\")\n                .build();\n\n        // Then - should handle gracefully\n        assertThat(provider.getKeepWebHost()).isNull();\n        assertThat(provider.getKeepWebPort()).isEqualTo(0);\n    }\n\n    @Test\n    public void testHandlesMissingWebDAVInResponse() {\n        // Given - server returns config without WebDAV section\n        String configResponse = \"{\\n\" +\n                                \"  \\\"Services\\\": {\\n\" +\n                                \"  }\\n\" +\n                                \"}\";\n\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(configResponse)\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When\n        ExternalConfigProvider provider = ExternalConfigProvider.builder()\n                .apiHost(mockServer.getHostName())\n                .apiPort(mockServer.getPort())\n                .apiProtocol(\"http\")\n                .apiToken(\"test-token\")\n                .build();\n\n        // Then - should handle gracefully\n        assertThat(provider.getKeepWebHost()).isNull();\n        assertThat(provider.getKeepWebPort()).isEqualTo(0);\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/config/WebDAVConfigFetcherTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.config;\n\nimport okhttp3.mockwebserver.MockResponse;\nimport okhttp3.mockwebserver.MockWebServer;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\nimport java.io.IOException;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\npublic class WebDAVConfigFetcherTest {\n\n    private MockWebServer mockServer;\n\n    @Before\n    public void setUp() throws IOException {\n        mockServer = new MockWebServer();\n        mockServer.start();\n    }\n\n    @After\n    public void tearDown() throws IOException {\n        mockServer.shutdown();\n    }\n\n    @Test\n    public void testFetchWithValidConfig() {\n        // Given\n        String configResponse = \"{\\n\" +\n                                \"  \\\"Services\\\": {\\n\" +\n                                \"    \\\"WebDAVDownload\\\": {\\n\" +\n                                \"      \\\"ExternalURL\\\": \\\"https://download.example.com:9000/\\\"\\n\" +\n                                \"    }\\n\" +\n                                \"  }\\n\" +\n                                \"}\";\n\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(configResponse)\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When\n        WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher(\n                \"http\", mockServer.getHostName(), mockServer.getPort(), false\n        );\n        WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch();\n\n        // Then\n        assertThat(config).isNotNull();\n        assertThat(config.getHost()).isEqualTo(\"download.example.com\");\n        assertThat(config.getPort()).isEqualTo(9000);\n    }\n\n    @Test\n    public void testFetchWithDefaultHttpsPort() {\n        // Given\n        String configResponse = \"{\\n\" +\n                                \"  \\\"Services\\\": {\\n\" +\n                                \"    \\\"WebDAVDownload\\\": {\\n\" +\n                                \"      \\\"ExternalURL\\\": \\\"https://download.example.com/\\\"\\n\" +\n                                \"    }\\n\" +\n                                \"  }\\n\" +\n                                \"}\";\n\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(configResponse)\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When\n        WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher(\n                \"http\", mockServer.getHostName(), mockServer.getPort(), false\n        );\n        WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch();\n\n        // Then\n        assertThat(config).isNotNull();\n        assertThat(config.getHost()).isEqualTo(\"download.example.com\");\n        assertThat(config.getPort()).isEqualTo(443);\n    }\n\n    @Test\n    public void testFetchWithDefaultHttpPort() {\n        // Given\n        String configResponse = \"{\\n\" +\n                                \"  \\\"Services\\\": {\\n\" +\n                                \"    \\\"WebDAVDownload\\\": {\\n\" +\n                                \"      \\\"ExternalURL\\\": \\\"http://download.example.com/\\\"\\n\" +\n                                \"    }\\n\" +\n                                \"  }\\n\" +\n                                \"}\";\n\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(configResponse)\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When\n        WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher(\n                \"http\", mockServer.getHostName(), mockServer.getPort(), false\n        );\n        WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch();\n\n        // Then\n        assertThat(config).isNotNull();\n        assertThat(config.getHost()).isEqualTo(\"download.example.com\");\n        assertThat(config.getPort()).isEqualTo(80);\n    }\n\n    @Test\n    public void testFetchWithApiError() {\n        // Given\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(500)\n                .setBody(\"Internal Server Error\"));\n\n        // When\n        WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher(\n                \"http\", mockServer.getHostName(), mockServer.getPort(), false\n        );\n        WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch();\n\n        // Then\n        assertThat(config).isNull();\n    }\n\n    @Test\n    public void testFetchWithMalformedJson() {\n        // Given\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(\"{ invalid json ]\")\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When\n        WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher(\n                \"http\", mockServer.getHostName(), mockServer.getPort(), false\n        );\n        WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch();\n\n        // Then\n        assertThat(config).isNull();\n    }\n\n    @Test\n    public void testFetchWithMissingWebDAVSection() {\n        // Given\n        String configResponse = \"{\\n\" +\n                                \"  \\\"Services\\\": {\\n\" +\n                                \"  }\\n\" +\n                                \"}\";\n\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(configResponse)\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When\n        WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher(\n                \"http\", mockServer.getHostName(), mockServer.getPort(), false\n        );\n        WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch();\n\n        // Then\n        assertThat(config).isNull();\n    }\n\n    @Test\n    public void testFetchWithNullApiHost() {\n        // When\n        WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher(\n                \"https\", null, 443, false\n        );\n        WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch();\n\n        // Then\n        assertThat(config).isNull();\n    }\n\n    @Test\n    public void testFetchWithEmptyApiHost() {\n        // When\n        WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher(\n                \"https\", \"\", 443, false\n        );\n        WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch();\n\n        // Then\n        assertThat(config).isNull();\n    }\n\n    @Test\n    public void testFetchWithInvalidWebDAVUrl() {\n        // Given\n        String configResponse = \"{\\n\" +\n                                \"  \\\"Services\\\": {\\n\" +\n                                \"    \\\"WebDAVDownload\\\": {\\n\" +\n                                \"      \\\"ExternalURL\\\": \\\"not-a-valid-url\\\"\\n\" +\n                                \"    }\\n\" +\n                                \"  }\\n\" +\n                                \"}\";\n\n        mockServer.enqueue(new MockResponse()\n                .setResponseCode(200)\n                .setBody(configResponse)\n                .addHeader(\"Content-Type\", \"application/json\"));\n\n        // When\n        WebDAVConfigFetcher fetcher = new WebDAVConfigFetcher(\n                \"http\", mockServer.getHostName(), mockServer.getPort(), false\n        );\n        WebDAVConfigFetcher.WebDAVConfig config = fetcher.fetch();\n\n        // Then\n        assertThat(config).isNull();\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeIntegrationTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.facade;\n\nimport org.apache.commons.io.FileUtils;\nimport org.arvados.client.api.model.Collection;\nimport org.arvados.client.common.Characters;\nimport org.arvados.client.config.ExternalConfigProvider;\nimport org.arvados.client.junit.categories.IntegrationTests;\nimport org.arvados.client.logic.collection.FileToken;\nimport org.arvados.client.test.utils.ArvadosClientIntegrationTest;\nimport org.arvados.client.test.utils.FileTestUtils;\nimport org.junit.After;\nimport org.junit.Assert;\nimport org.junit.Before;\nimport org.junit.Test;\nimport org.junit.experimental.categories.Category;\n\nimport java.io.File;\nimport java.util.Collections;\nimport java.util.List;\nimport java.util.UUID;\n\nimport static org.arvados.client.test.utils.FileTestUtils.FILE_DOWNLOAD_TEST_DIR;\nimport static org.arvados.client.test.utils.FileTestUtils.FILE_SPLIT_TEST_DIR;\nimport static org.arvados.client.test.utils.FileTestUtils.TEST_FILE;\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.junit.Assert.assertEquals;\nimport static org.junit.Assert.assertTrue;\n\n@Category(IntegrationTests.class)\npublic class ArvadosFacadeIntegrationTest extends ArvadosClientIntegrationTest {\n\n\n    private static final String COLLECTION_NAME = \"Test collection \" + UUID.randomUUID().toString();\n    private String collectionUuid;\n\n    @Before\n    public void setUp() throws Exception {\n        FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR);\n        FileTestUtils.createDirectory(FILE_DOWNLOAD_TEST_DIR);\n    }\n\n    @Test\n    public void uploadOfFileIsPerformedSuccessfully() throws Exception {\n        // given\n        File file = FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB / 200);\n\n        // when\n        Collection actual = FACADE.upload(Collections.singletonList(file), COLLECTION_NAME, PROJECT_UUID);\n        collectionUuid = actual.getUuid();\n\n        // then\n        assertThat(actual.getName()).contains(\"Test collection\");\n        assertThat(actual.getManifestText()).contains(file.length() + Characters.COLON + file.getName());\n    }\n\n    @Test\n    public void uploadOfFilesIsPerformedSuccessfully() throws Exception {\n        // given\n        List<File> files = FileTestUtils.generatePredefinedFiles();\n        files.addAll(FileTestUtils.generatePredefinedFiles());\n\n        // when\n        Collection actual = FACADE.upload(files, COLLECTION_NAME, PROJECT_UUID);\n        collectionUuid = actual.getUuid();\n\n        // then\n        assertThat(actual.getName()).contains(\"Test collection\");\n        files.forEach(f -> assertThat(actual.getManifestText()).contains(f.length() + Characters.COLON + f.getName().replace(\" \", Characters.SPACE)));\n    }\n\n    @Test\n    public void uploadToExistingCollectionIsPerformedSuccessfully() throws Exception {\n        // given\n        File file = FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_EIGTH_GB / 500);\n        Collection existing = createTestCollection();\n\n        // when\n        Collection actual = FACADE.uploadToExistingCollection(Collections.singletonList(file), collectionUuid);\n\n        // then\n        assertEquals(collectionUuid, actual.getUuid());\n        assertThat(actual.getManifestText()).contains(file.length() + Characters.COLON + file.getName());\n    }\n\n    @Test\n    public void uploadWithExternalConfigProviderWorksProperly() throws Exception {\n        //given\n        ArvadosFacade facade = new ArvadosFacade(buildExternalConfig());\n        File file = FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB / 200);\n\n        //when\n        Collection actual = facade.upload(Collections.singletonList(file), COLLECTION_NAME, PROJECT_UUID);\n        collectionUuid = actual.getUuid();\n\n        //then\n        assertThat(actual.getName()).contains(\"Test collection\");\n        assertThat(actual.getManifestText()).contains(file.length() + Characters.COLON + file.getName());\n    }\n\n    @Test\n    public void creationOfEmptyCollectionPerformedSuccesfully() {\n        // given\n        String collectionName = \"Empty collection \" + UUID.randomUUID().toString();\n\n        // when\n        Collection actual = FACADE.createEmptyCollection(collectionName, PROJECT_UUID);\n        collectionUuid = actual.getUuid();\n\n        // then\n        assertEquals(collectionName, actual.getName());\n        assertEquals(PROJECT_UUID, actual.getOwnerUuid());\n    }\n\n    @Test\n    public void fileTokensAreListedFromCollection() throws Exception {\n        //given\n        List<File> files = uploadTestFiles();\n\n        //when\n        List<FileToken> actual = FACADE.listFileInfoFromCollection(collectionUuid);\n\n        //then\n        assertEquals(files.size(), actual.size());\n        for (int i = 0; i < files.size(); i++) {\n            assertEquals(files.get(i).length(), actual.get(i).getFileSize());\n        }\n    }\n\n    @Test\n    public void downloadOfFilesPerformedSuccessfully() throws Exception {\n        //given\n        List<File> files = uploadTestFiles();\n        File destination = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionUuid);\n\n        //when\n        List<File> actual = FACADE.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false);\n\n        //then\n        assertEquals(files.size(), actual.size());\n        assertTrue(destination.exists());\n        assertThat(actual).allMatch(File::exists);\n        for (int i = 0; i < files.size(); i++) {\n            assertEquals(files.get(i).length(), actual.get(i).length());\n        }\n    }\n\n    @Test\n    public void downloadOfFilesPerformedSuccessfullyUsingKeepWeb() throws Exception {\n        //given\n        List<File> files = uploadTestFiles();\n        File destination = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionUuid);\n\n        //when\n        List<File> actual = FACADE.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, true);\n\n        //then\n        assertEquals(files.size(), actual.size());\n        assertTrue(destination.exists());\n        assertThat(actual).allMatch(File::exists);\n        for (int i = 0; i < files.size(); i++) {\n            assertEquals(files.get(i).length(), actual.get(i).length());\n        }\n    }\n\n    @Test\n    public void singleFileIsDownloadedSuccessfullyUsingKeepWeb() throws Exception {\n        //given\n        File file = uploadSingleTestFile(false);\n\n        //when\n        File actual = FACADE.downloadFile(file.getName(), collectionUuid, FILE_DOWNLOAD_TEST_DIR);\n\n        //then\n        assertThat(actual).exists();\n        assertThat(actual.length()).isEqualTo(file.length());\n    }\n\n    @Test\n    public void downloadOfOneFileSplittedToMultipleLocatorsPerformedSuccesfully() throws Exception {\n        //given\n        File file = uploadSingleTestFile(true);\n\n        List<File> actual = FACADE.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false);\n\n        Assert.assertEquals(1, actual.size());\n        assertThat(actual.get(0).length()).isEqualTo(file.length());\n    }\n\n    @Test\n    public void downloadWithExternalConfigProviderWorksProperly() throws Exception {\n        //given\n        ArvadosFacade facade = new ArvadosFacade(buildExternalConfig());\n        List<File> files = uploadTestFiles();\n        //when\n        List<File> actual = facade.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false);\n\n        //then\n        assertEquals(files.size(), actual.size());\n        assertThat(actual).allMatch(File::exists);\n        for (int i = 0; i < files.size(); i++) {\n            assertEquals(files.get(i).length(), actual.get(i).length());\n        }\n    }\n\n    private ExternalConfigProvider buildExternalConfig() {\n        return ExternalConfigProvider\n                .builder()\n                .apiHostInsecure(CONFIG.isApiHostInsecure())\n                .keepWebHost(CONFIG.getKeepWebHost())\n                .keepWebPort(CONFIG.getKeepWebPort())\n                .apiHost(CONFIG.getApiHost())\n                .apiPort(CONFIG.getApiPort())\n                .apiToken(CONFIG.getApiToken())\n                .apiProtocol(CONFIG.getApiProtocol())\n                .fileSplitSize(CONFIG.getFileSplitSize())\n                .fileSplitDirectory(CONFIG.getFileSplitDirectory())\n                .numberOfCopies(CONFIG.getNumberOfCopies())\n                .numberOfRetries(CONFIG.getNumberOfRetries())\n                .connectTimeout(CONFIG.getConnectTimeout())\n                .readTimeout(CONFIG.getReadTimeout())\n                .writeTimeout(CONFIG.getWriteTimeout())\n                .build();\n    }\n\n    private Collection createTestCollection() {\n        Collection collection = FACADE.createEmptyCollection(COLLECTION_NAME, PROJECT_UUID);\n        collectionUuid = collection.getUuid();\n        return collection;\n    }\n\n    private List<File> uploadTestFiles() throws Exception{\n        createTestCollection();\n        List<File> files = FileTestUtils.generatePredefinedFiles();\n        FACADE.uploadToExistingCollection(files, collectionUuid);\n        return files;\n    }\n\n    private File uploadSingleTestFile(boolean bigFile) throws Exception{\n        createTestCollection();\n        Long fileSize = bigFile ? FileUtils.ONE_MB * 70 : FileTestUtils.ONE_EIGTH_GB / 100;\n        File file = FileTestUtils.generateFile(TEST_FILE, fileSize);\n        FACADE.uploadToExistingCollection(Collections.singletonList(file), collectionUuid);\n        return file;\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR);\n        FileTestUtils.cleanDirectory(FILE_DOWNLOAD_TEST_DIR);\n\n        if(collectionUuid != null)\n        FACADE.deleteCollection(collectionUuid);\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/facade/ArvadosFacadeTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.facade;\n\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport com.fasterxml.jackson.databind.ObjectWriter;\nimport okhttp3.mockwebserver.MockResponse;\nimport okio.Buffer;\nimport org.apache.commons.io.FileUtils;\nimport org.arvados.client.api.model.Collection;\nimport org.arvados.client.api.model.KeepService;\nimport org.arvados.client.api.model.KeepServiceList;\nimport org.arvados.client.common.Characters;\nimport org.arvados.client.test.utils.ArvadosClientMockedWebServerTest;\nimport org.arvados.client.test.utils.FileTestUtils;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\nimport org.junit.Ignore;\n\nimport java.io.File;\nimport java.nio.charset.Charset;\nimport java.nio.file.Files;\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\nimport java.util.stream.Collectors;\n\nimport static org.arvados.client.test.utils.ApiClientTestUtils.getResponse;\nimport static org.arvados.client.test.utils.FileTestUtils.*;\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.junit.Assert.assertEquals;\nimport static org.junit.Assert.assertTrue;\n\npublic class ArvadosFacadeTest extends ArvadosClientMockedWebServerTest {\n\n    ArvadosFacade facade = new ArvadosFacade(CONFIG);\n\n    @Before\n    public void setUp() throws Exception {\n        FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR);\n        FileTestUtils.createDirectory(FILE_DOWNLOAD_TEST_DIR);\n    }\n\n    @Test\n    @Ignore(\"Failing test #15041\")\n    public void uploadIsPerformedSuccessfullyUsingDiskOnlyKeepServices() throws Exception {\n\n        // given\n        String keepServicesAccessible = setMockedServerPortToKeepServices(\"keep-services-accessible-disk-only\");\n        server.enqueue(new MockResponse().setBody(keepServicesAccessible));\n\n        String blockLocator = \"7df44272090cee6c0732382bba415ee9\";\n        String signedBlockLocator = blockLocator + \"+70+A189a93acda6e1fba18a9dffd42b6591cbd36d55d@5a1c17b6\";\n        for (int i = 0; i < 8; i++) {\n            server.enqueue(new MockResponse().setBody(signedBlockLocator));\n        }\n        server.enqueue(getResponse(\"users-get\"));\n        server.enqueue(getResponse(\"collections-create-manifest\"));\n\n        FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB);\n\n        // when\n        Collection actual = facade.upload(Arrays.asList(new File(TEST_FILE)), \"Super Collection\", null);\n\n        // then\n        assertThat(actual.getName()).contains(\"Super Collection\");\n    }\n\n    @Test\n    public void uploadIsPerformedSuccessfully() throws Exception {\n\n        // given\n        // First response: get current user (called by CollectionFactory when projectUuid is null)\n        server.enqueue(getResponse(\"users-get\"));\n\n        // Second response: create collection\n        server.enqueue(getResponse(\"collections-create-manifest\"));\n\n        // Third response: upload file to KeepWeb (it returns empty response)\n        server.enqueue(new MockResponse().setBody(\"\"));\n\n        // Fourth response: get the updated collection\n        server.enqueue(getResponse(\"collections-create-manifest\"));\n\n        FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_FOURTH_GB);\n\n        // when\n        Collection actual = facade.upload(Arrays.asList(new File(TEST_FILE)), \"Super Collection\", null);\n\n        // then\n        assertThat(actual.getName()).contains(\"Super Collection\");\n    }\n\n    @Test\n    public void downloadOfWholeCollectionIsPerformedSuccessfully() throws Exception {\n\n        //given\n        String collectionUuid = \"ardev-4zz18-jk5vo4uo9u5vj52\";\n        server.enqueue(getResponse(\"collections-download-file\"));\n\n        // Mock KeepWeb API responses for each file\n        List<File> files = generatePredefinedFiles();\n        for (File f : files) {\n            server.enqueue(new MockResponse().setBody(new Buffer().write(Files.readAllBytes(f.toPath()))));\n        }\n\n        //when\n        List<File> downloadedFiles = facade.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, false);\n\n        //then\n        File collectionDestination = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionUuid);\n        assertEquals(3, downloadedFiles.size());\n        assertTrue(collectionDestination.exists());\n        assertThat(downloadedFiles).allMatch(File::exists);\n        assertEquals(files.stream().map(File::getName).collect(Collectors.toList()), downloadedFiles.stream().map(File::getName).collect(Collectors.toList()));\n        assertEquals(files.stream().map(File::length).collect(Collectors.toList()), downloadedFiles.stream().map(File::length).collect(Collectors.toList()));\n    }\n\n    @Test\n    public void downloadOfWholeCollectionUsingKeepWebPerformedSuccessfully() throws Exception {\n\n        //given\n        String collectionUuid = \"ardev-4zz18-jk5vo4uo9u5vj52\";\n        server.enqueue(getResponse(\"collections-download-file\"));\n\n        List<File> files = generatePredefinedFiles();\n        for (File f : files) {\n            server.enqueue(new MockResponse().setBody(new Buffer().write(FileUtils.readFileToByteArray(f))));\n        }\n\n        //when\n        List<File> downloadedFiles = facade.downloadCollectionFiles(collectionUuid, FILE_DOWNLOAD_TEST_DIR, true);\n\n        //then\n        assertEquals(3, downloadedFiles.size());\n        assertThat(downloadedFiles).allMatch(File::exists);\n        assertEquals(files.stream().map(File::getName).collect(Collectors.toList()), downloadedFiles.stream().map(File::getName).collect(Collectors.toList()));\n        assertTrue(downloadedFiles.stream().map(File::length).collect(Collectors.toList()).containsAll(files.stream().map(File::length).collect(Collectors.toList())));\n    }\n\n    @Test\n    public void downloadOfSingleFilePerformedSuccessfully() throws Exception {\n\n        //given\n        String collectionUuid = \"ardev-4zz18-jk5vo4uo9u5vj52\";\n        server.enqueue(getResponse(\"collections-download-file\"));\n\n        File file = generatePredefinedFiles().get(0);\n        byte[] fileData = FileUtils.readFileToByteArray(file);\n        server.enqueue(new MockResponse().setBody(new Buffer().write(fileData)));\n\n        //when\n        File downloadedFile = facade.downloadFile(file.getName(), collectionUuid, FILE_DOWNLOAD_TEST_DIR);\n\n        //then\n        assertTrue(downloadedFile.exists());\n        assertEquals(file.getName(), downloadedFile.getName());\n        assertEquals(file.length(), downloadedFile.length());\n    }\n\n    private String setMockedServerPortToKeepServices(String jsonPath) throws Exception {\n\n        ObjectMapper mapper = new ObjectMapper().findAndRegisterModules();\n        String filePath = String.format(\"src/test/resources/org/arvados/client/api/client/%s.json\", jsonPath);\n        File jsonFile = new File(filePath);\n        String json = FileUtils.readFileToString(jsonFile, Charset.defaultCharset());\n        KeepServiceList keepServiceList = mapper.readValue(json, KeepServiceList.class);\n        List<KeepService> items = keepServiceList.getItems();\n        for (KeepService keepService : items) {\n            keepService.setServicePort(server.getPort());\n        }\n        ObjectWriter writer = mapper.writer().withDefaultPrettyPrinter();\n        return writer.writeValueAsString(keepServiceList);\n    }\n\n    //Method to copy multiple byte[] arrays into one byte[] array\n    private byte[] addAll(byte[] array1, byte[] array2) {\n        byte[] joinedArray = new byte[array1.length + array2.length];\n        System.arraycopy(array1, 0, joinedArray, 0, array1.length);\n        System.arraycopy(array2, 0, joinedArray, array1.length, array2.length);\n        return joinedArray;\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR);\n        FileTestUtils.cleanDirectory(FILE_DOWNLOAD_TEST_DIR);\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/junit/categories/IntegrationTests.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.junit.categories;\n\npublic interface IntegrationTests {}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/collection/FileTokenTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.collection;\n\nimport org.arvados.client.common.Characters;\nimport org.junit.Assert;\nimport org.junit.Test;\n\npublic class FileTokenTest {\n\n    public static final String FILE_TOKEN_INFO = \"0:1024:test-file1\";\n    public static final int FILE_POSITION = 0;\n    public static final long FILE_LENGTH = 1024L;\n    public static final String FILE_NAME = \"test-file1\";\n    public static final String FILE_PATH = \"c\" + Characters.SLASH;\n\n    private static FileToken fileToken = new FileToken(FILE_TOKEN_INFO);\n    private static FileToken fileTokenWithPath = new FileToken(FILE_TOKEN_INFO, FILE_PATH);\n\n    @Test\n    public void tokenInfoIsDividedCorrectly(){\n        Assert.assertEquals(FILE_NAME, fileToken.getFileName());\n        Assert.assertEquals(FILE_POSITION, fileToken.getFilePosition());\n        Assert.assertEquals(FILE_LENGTH, fileToken.getFileSize());\n    }\n\n    @Test\n    public void toStringReturnsOriginalFileTokenInfo(){\n        Assert.assertEquals(FILE_TOKEN_INFO, fileToken.toString());\n    }\n\n    @Test\n    public void fullPathIsReturnedProperly(){\n        Assert.assertEquals(FILE_NAME, fileToken.getFullPath());\n        Assert.assertEquals(FILE_PATH + FILE_NAME, fileTokenWithPath.getFullPath());\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/collection/ManifestDecoderTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.collection;\n\nimport org.arvados.client.exception.ArvadosClientException;\nimport org.junit.Assert;\nimport org.junit.Test;\n\nimport java.util.List;\n\nimport static junit.framework.TestCase.fail;\n\npublic class ManifestDecoderTest {\n\n    private ManifestDecoder manifestDecoder = new ManifestDecoder();\n\n    private static final String ONE_LINE_MANIFEST_TEXT = \". \" +\n            \"eff999f3b5158331eb44a9a93e3b36e1+67108864+Aad3839bea88bce22cbfe71cf4943de7dab3ea52a@5826180f \" +\n            \"db141bfd11f7da60dce9e5ee85a988b8+34038725+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f \" +\n            \"0:101147589:rna.SRR948778.bam\" +\n            \"\\\\n\";\n\n    private static final String MULTIPLE_LINES_MANIFEST_TEXT  = \". \" +\n            \"930625b054ce894ac40596c3f5a0d947+33 \" +\n            \"0:0:a 0:0:b 0:33:output.txt\\n\" +\n            \"./c d41d8cd98f00b204e9800998ecf8427e+0 0:0:d\";\n\n    private static final String MANIFEST_TEXT_WITH_INVALID_FIRST_PATH_COMPONENT = \"a\" + ONE_LINE_MANIFEST_TEXT;\n\n\n    @Test\n    public void allLocatorsAndFileTokensAreExtractedFromSimpleManifest() {\n\n        List<ManifestStream> actual = manifestDecoder.decode(ONE_LINE_MANIFEST_TEXT);\n\n        // one manifest stream\n        Assert.assertEquals(1, actual.size());\n\n        ManifestStream manifest = actual.get(0);\n        // two locators\n        Assert.assertEquals(2, manifest.getKeepLocators().size());\n        // one file token\n        Assert.assertEquals(1, manifest.getFileTokens().size());\n\n    }\n\n    @Test\n    public void allLocatorsAndFileTokensAreExtractedFromComplexManifest() {\n\n        List<ManifestStream> actual = manifestDecoder.decode(MULTIPLE_LINES_MANIFEST_TEXT);\n\n        // two manifest streams\n        Assert.assertEquals(2, actual.size());\n\n        // first stream - 1 locator and 3 file tokens\n        ManifestStream firstManifestStream = actual.get(0);\n        Assert.assertEquals(1, firstManifestStream.getKeepLocators().size());\n        Assert.assertEquals(3, firstManifestStream.getFileTokens().size());\n\n        // second stream - 1 locator and 1 file token\n        ManifestStream secondManifestStream = actual.get(1);\n        Assert.assertEquals(1, secondManifestStream.getKeepLocators().size());\n        Assert.assertEquals(1, secondManifestStream.getFileTokens().size());\n    }\n\n    @Test\n    public void manifestTextWithInvalidStreamNameThrowsException() {\n\n        try {\n            List<ManifestStream> actual = manifestDecoder.decode(MANIFEST_TEXT_WITH_INVALID_FIRST_PATH_COMPONENT);\n            fail();\n        } catch (ArvadosClientException e) {\n            Assert.assertEquals(\"Invalid first path component (expecting \\\".\\\")\", e.getMessage());\n        }\n\n    }\n\n    @Test\n    public void emptyManifestTextThrowsException() {\n        String emptyManifestText = null;\n\n        try {\n            List<ManifestStream> actual = manifestDecoder.decode(emptyManifestText);\n            fail();\n        } catch (ArvadosClientException e) {\n            Assert.assertEquals(\"Manifest text cannot be empty.\", e.getMessage());\n        }\n\n        emptyManifestText = \"\";\n        try {\n            List<ManifestStream> actual = manifestDecoder.decode(emptyManifestText);\n            fail();\n        } catch (ArvadosClientException e) {\n            Assert.assertEquals(\"Manifest text cannot be empty.\", e.getMessage());\n        }\n\n    }\n\n\n\n\n\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/collection/ManifestFactoryTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.collection;\n\nimport org.arvados.client.test.utils.FileTestUtils;\nimport org.assertj.core.util.Lists;\nimport org.junit.Test;\nimport org.junit.Ignore;\n\nimport java.io.File;\nimport java.util.List;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\npublic class ManifestFactoryTest {\n\n    @Test\n    @Ignore(\"Failing test #15041\")\n    public void manifestIsCreatedAsExpected() throws Exception {\n\n        // given\n        List<File> files = FileTestUtils.generatePredefinedFiles();\n        List<String> locators = Lists.newArrayList(\"a\", \"b\", \"c\");\n        ManifestFactory factory = ManifestFactory.builder()\n                .files(files)\n                .locators(locators)\n                .build();\n\n        // when\n        String actual = factory.create();\n\n        // then\n        assertThat(actual).isEqualTo(\". a b c 0:1024:test-file1 1024:20480:test-file2 21504:1048576:test-file\\\\0403\\n\");\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/collection/ManifestStreamTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.collection;\n\n\nimport org.junit.Assert;\nimport org.junit.Test;\n\nimport java.util.List;\n\npublic class ManifestStreamTest {\n\n    private ManifestDecoder manifestDecoder = new ManifestDecoder();\n\n    @Test\n    public void toStringReturnsProperlyConnectedManifestStream() throws Exception{\n        String encodedManifest = \". eff999f3b5158331eb44a9a93e3b36e1+67108864 db141bfd11f7da60dce9e5ee85a988b8+34038725 0:101147589:rna.SRR948778.bam\\\\n\\\"\";\n        List<ManifestStream> manifestStreams = manifestDecoder.decode(encodedManifest);\n        Assert.assertEquals(encodedManifest, manifestStreams.get(0).toString());\n\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/keep/FileDownloaderTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.keep;\n\nimport com.fasterxml.jackson.databind.ObjectMapper;\nimport org.arvados.client.api.client.CollectionsApiClient;\nimport org.arvados.client.api.client.KeepWebApiClient;\nimport org.arvados.client.api.model.Collection;\nimport org.arvados.client.common.Characters;\nimport org.arvados.client.logic.collection.FileToken;\nimport org.arvados.client.logic.collection.ManifestDecoder;\nimport org.arvados.client.logic.collection.ManifestStream;\nimport org.arvados.client.test.utils.FileTestUtils;\nimport org.apache.commons.io.FileUtils;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\nimport org.junit.runner.RunWith;\nimport org.mockito.InjectMocks;\nimport org.mockito.Mock;\nimport org.mockito.junit.MockitoJUnitRunner;\n\nimport java.io.ByteArrayInputStream;\nimport java.io.File;\nimport java.io.IOException;\nimport java.io.InputStream;\nimport java.nio.charset.StandardCharsets;\nimport java.nio.file.Files;\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.List;\n\nimport static org.arvados.client.test.utils.FileTestUtils.*;\nimport static org.assertj.core.api.Assertions.assertThat;\nimport static org.junit.Assert.assertArrayEquals;\nimport static org.junit.Assert.assertEquals;\nimport static org.junit.Assert.assertNotNull;\nimport static org.junit.Assert.assertTrue;\nimport static org.mockito.Mockito.when;\n\n@RunWith(MockitoJUnitRunner.class)\npublic class FileDownloaderTest {\n\n    static final ObjectMapper MAPPER = new ObjectMapper().findAndRegisterModules();\n    private Collection collectionToDownload;\n    private ManifestStream manifestStream;\n\n    @Mock\n    private CollectionsApiClient collectionsApiClient;\n    @Mock\n    private KeepWebApiClient keepWebApiClient;\n    @Mock\n    private ManifestDecoder manifestDecoder;\n    @InjectMocks\n    private FileDownloader fileDownloader;\n\n    @Before\n    public void setUp() throws Exception {\n        FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR);\n        FileTestUtils.createDirectory(FILE_DOWNLOAD_TEST_DIR);\n\n        collectionToDownload = prepareCollection();\n        manifestStream = prepareManifestStream();\n    }\n\n    @Test\n    public void downloadingAllFilesFromCollectionWorksProperly() throws Exception {\n        // given\n        List<File> files = generatePredefinedFiles();\n\n        //having\n        when(collectionsApiClient.get(collectionToDownload.getUuid())).thenReturn(collectionToDownload);\n        when(manifestDecoder.decode(collectionToDownload.getManifestText())).thenReturn(Arrays.asList(manifestStream));\n\n        // Mock download responses for all three files based on the file tokens\n        when(keepWebApiClient.download(collectionToDownload.getUuid(), \"test-file1\")).thenReturn(FileUtils.readFileToByteArray(files.get(0)));\n        when(keepWebApiClient.download(collectionToDownload.getUuid(), \"test-file2\")).thenReturn(FileUtils.readFileToByteArray(files.get(1)));\n        when(keepWebApiClient.download(collectionToDownload.getUuid(), \"test-file 3\")).thenReturn(FileUtils.readFileToByteArray(files.get(2)));\n\n        //when\n        List<File> downloadedFiles = fileDownloader.downloadFilesFromCollection(collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR);\n\n        //then\n        assertEquals(3, downloadedFiles.size()); // 3 files downloaded\n\n        File collectionDir = new File(FILE_DOWNLOAD_TEST_DIR + Characters.SLASH + collectionToDownload.getUuid());\n        assertTrue(collectionDir.exists()); // collection directory created\n\n        // 3 files correctly saved\n        assertThat(downloadedFiles).allMatch(File::exists);\n\n        // Verify file contents match\n        File downloaded1 = new File(collectionDir + Characters.SLASH + \"test-file1\");\n        File downloaded2 = new File(collectionDir + Characters.SLASH + \"test-file2\");\n        File downloaded3 = new File(collectionDir + Characters.SLASH + \"test-file 3\");\n\n        assertArrayEquals(FileUtils.readFileToByteArray(downloaded1), FileUtils.readFileToByteArray(files.get(0)));\n        assertArrayEquals(FileUtils.readFileToByteArray(downloaded2), FileUtils.readFileToByteArray(files.get(1)));\n        assertArrayEquals(FileUtils.readFileToByteArray(downloaded3), FileUtils.readFileToByteArray(files.get(2)));\n    }\n\n    @Test\n    public void downloadingSingleFileFromKeepWebWorksCorrectly() throws Exception{\n        //given\n        File file = generatePredefinedFiles().get(0);\n\n        //having\n        when(collectionsApiClient.get(collectionToDownload.getUuid())).thenReturn(collectionToDownload);\n        when(manifestDecoder.decode(collectionToDownload.getManifestText())).thenReturn(Arrays.asList(manifestStream));\n        when(keepWebApiClient.download(collectionToDownload.getUuid(), file.getName())).thenReturn(FileUtils.readFileToByteArray(file));\n\n        //when\n        File downloadedFile = fileDownloader.downloadSingleFileUsingKeepWeb(file.getName(), collectionToDownload.getUuid(), FILE_DOWNLOAD_TEST_DIR);\n\n        //then\n        assertTrue(downloadedFile.exists());\n        assertEquals(file.getName(), downloadedFile.getName());\n        assertArrayEquals(FileUtils.readFileToByteArray(downloadedFile), FileUtils.readFileToByteArray(file));\n    }\n\n    @Test\n    public void testDownloadFileWithResume() throws Exception {\n        //given\n        String collectionUuid = \"some-collection-uuid\";\n        String expectedDataString = \"testData\";\n        String fileName = \"sample-file-name\";\n        long start = 0;\n        Long end = null;\n\n        InputStream inputStream = new ByteArrayInputStream(expectedDataString.getBytes());\n\n        when(keepWebApiClient.get(collectionUuid, fileName, start, end)).thenReturn(inputStream);\n\n        //when\n        File downloadedFile = fileDownloader.downloadFileWithResume(collectionUuid, fileName, FILE_DOWNLOAD_TEST_DIR, start, end);\n\n        //then\n        assertNotNull(downloadedFile);\n        assertTrue(downloadedFile.exists());\n        String actualDataString = Files.readString(downloadedFile.toPath());\n        assertEquals(\"The content of the file does not match the expected data.\", expectedDataString, actualDataString);\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR);\n        FileTestUtils.cleanDirectory(FILE_DOWNLOAD_TEST_DIR);\n    }\n\n    private Collection prepareCollection() throws IOException {\n        // collection that will be returned by mocked collectionsApiClient\n        String filePath = \"src/test/resources/org/arvados/client/api/client/collections-download-file.json\";\n        File jsonFile = new File(filePath);\n        return MAPPER.readValue(jsonFile, Collection.class);\n    }\n\n    private ManifestStream prepareManifestStream() throws Exception {\n        // manifestStream that will be returned by mocked manifestDecoder\n        List<FileToken> fileTokens = new ArrayList<>();\n        fileTokens.add(new FileToken(\"0:1024:test-file1\"));\n        fileTokens.add(new FileToken(\"1024:20480:test-file2\"));\n        fileTokens.add(new FileToken(\"21504:1048576:test-file\\\\0403\"));\n\n        KeepLocator keepLocator = new KeepLocator(\"163679d58edaadc28db769011728a72c+1070080+A3acf8c1fe582c265d2077702e4a7d74fcc03aba8@5aa4fdeb\");\n        return new ManifestStream(\".\", Arrays.asList(keepLocator), fileTokens);\n    }\n\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/logic/keep/KeepLocatorTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.logic.keep;\n\nimport org.junit.Test;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\npublic class KeepLocatorTest {\n\n    private KeepLocator locator;\n\n    @Test\n    public void md5sumIsExtracted() throws Exception {\n\n        // given\n        locator = new KeepLocator(\"7df44272090cee6c0732382bba415ee9+70\");\n\n        // when\n        String actual = locator.getMd5sum();\n\n        // then\n        assertThat(actual).isEqualTo(\"7df44272090cee6c0732382bba415ee9\");\n    }\n\n    @Test\n    public void locatorIsStrippedWithMd5sumAndSize() throws Exception {\n\n        // given\n        locator = new KeepLocator(\"7df44272090cee6c0732382bba415ee9+70\");\n\n        // when\n        String actual = locator.stripped();\n\n        // then\n        assertThat(actual).isEqualTo(\"7df44272090cee6c0732382bba415ee9+70\");\n    }\n\n\n    @Test\n    public void locatorToStringProperlyShowing() throws Exception {\n\n        // given\n        locator = new KeepLocator(\"7df44272090cee6c0732382bba415ee9+70+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f\");\n\n        // when\n        String actual = locator.toString();\n\n        // then\n        assertThat(actual).isEqualTo(\"7df44272090cee6c0732382bba415ee9+70+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f\");\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/ApiClientTestUtils.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.test.utils;\n\nimport org.arvados.client.config.FileConfigProvider;\nimport okhttp3.mockwebserver.MockResponse;\nimport okhttp3.mockwebserver.RecordedRequest;\nimport org.apache.commons.io.FileUtils;\n\nimport java.io.File;\nimport java.io.IOException;\nimport java.nio.charset.Charset;\n\nimport static org.assertj.core.api.Assertions.assertThat;\n\npublic final class ApiClientTestUtils {\n\n    static final String BASE_URL = \"/arvados/v1/\";\n\n    private ApiClientTestUtils() {}\n\n    public static MockResponse getResponse(String filename) throws IOException {\n        String filePath = String.format(\"src/test/resources/org/arvados/client/api/client/%s.json\", filename);\n        File jsonFile = new File(filePath);\n        String json = FileUtils.readFileToString(jsonFile, Charset.defaultCharset());\n        return new MockResponse().setBody(json);\n    }\n\n    public static void assertAuthorizationHeader(RecordedRequest request) {\n        assertThat(request.getHeader(\"authorization\")).isEqualTo(\"Bearer \" + new FileConfigProvider().getApiToken());\n    }\n\n    public static void assertRequestPath(RecordedRequest request, String subPath) {\n        assertThat(request.getPath()).isEqualTo(BASE_URL + subPath);\n    }\n\n    public static void assertRequestMethod(RecordedRequest request, RequestMethod requestMethod) {\n        assertThat(request.getMethod()).isEqualTo(requestMethod.name());\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientIntegrationTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.test.utils;\n\nimport org.arvados.client.config.FileConfigProvider;\nimport org.arvados.client.facade.ArvadosFacade;\nimport org.junit.BeforeClass;\n\nimport static org.junit.Assert.assertTrue;\n\npublic class ArvadosClientIntegrationTest {\n\n    protected static final FileConfigProvider CONFIG = new FileConfigProvider(\"integration-tests-application.conf\");\n    protected static final ArvadosFacade FACADE = new ArvadosFacade(CONFIG);\n    protected static final String PROJECT_UUID = CONFIG.getIntegrationTestProjectUuid();\n\n    @BeforeClass\n    public static void validateConfiguration(){\n        String msg = \" info must be provided in configuration\";\n        CONFIG.getConfig().entrySet()\n                .forEach(e -> assertTrue(\"Parameter \" + e.getKey() + msg, !e.getValue().render().equals(\"\\\"\\\"\")));\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientMockedWebServerTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.test.utils;\n\nimport okhttp3.mockwebserver.MockWebServer;\nimport org.junit.After;\nimport org.junit.Before;\n\npublic class ArvadosClientMockedWebServerTest extends ArvadosClientUnitTest {\n    private static final int PORT = CONFIG.getApiPort();\n    protected MockWebServer server = new MockWebServer();\n\n    @Before\n    public void setUpServer() throws Exception {\n        server.start(PORT);\n    }\n    \n    @After\n    public void tearDownServer() throws Exception {\n        server.shutdown();\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/ArvadosClientUnitTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.test.utils;\n\nimport org.arvados.client.config.FileConfigProvider;\nimport org.junit.BeforeClass;\n\nimport static org.junit.Assert.assertTrue;\n\npublic class ArvadosClientUnitTest {\n\n    protected static final FileConfigProvider CONFIG = new FileConfigProvider(\"application.conf\");\n\n    @BeforeClass\n    public static void validateConfiguration(){\n        String msg = \" info must be provided in configuration\";\n        CONFIG.getConfig().entrySet().forEach(e -> assertTrue(\"Parameter \" + e.getKey() + msg, !e.getValue().render().equals(\"\\\"\\\"\")));\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/FileTestUtils.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.test.utils;\n\nimport org.apache.commons.io.FileUtils;\nimport org.assertj.core.util.Lists;\n\nimport java.io.File;\nimport java.io.IOException;\nimport java.io.RandomAccessFile;\nimport java.util.List;\n\npublic class FileTestUtils {\n\n    public static final String FILE_SPLIT_TEST_DIR = \"/tmp/file-split\";\n    public static final String FILE_DOWNLOAD_TEST_DIR = \"/tmp/arvados-downloaded\";\n    public static final String TEST_FILE = FILE_SPLIT_TEST_DIR + \"/test-file\";\n    public static long ONE_FOURTH_GB = FileUtils.ONE_GB / 4;\n    public static long ONE_EIGTH_GB = FileUtils.ONE_GB / 8;\n    public static long HALF_GB = FileUtils.ONE_GB / 2;\n    public static int FILE_SPLIT_SIZE = 64;\n\n    public static void createDirectory(String path) throws Exception {\n        new File(path).mkdirs();\n    }\n\n    public static void cleanDirectory(String directory) throws Exception {\n        FileUtils.cleanDirectory(new File(directory));\n    }\n    \n    public static File generateFile(String path, long length) throws IOException {\n        RandomAccessFile testFile = new RandomAccessFile(path, \"rwd\");\n        testFile.setLength(length);\n        testFile.close();\n        return new File(path);\n    }\n    \n    public static List<File> generatePredefinedFiles() throws IOException {\n        return Lists.newArrayList(\n                generateFile(TEST_FILE + 1, FileUtils.ONE_KB),\n                generateFile(TEST_FILE + 2, FileUtils.ONE_KB * 20),\n                generateFile(TEST_FILE + \" \" + 3, FileUtils.ONE_MB)\n            );\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/test/utils/RequestMethod.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.test.utils;\n\npublic enum RequestMethod {\n    \n    GET, POST, PUT, DELETE\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/utils/FileMergeTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.utils;\n\nimport org.arvados.client.test.utils.FileTestUtils;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\nimport java.io.File;\nimport java.util.List;\n\nimport static org.arvados.client.test.utils.FileTestUtils.*;\nimport static org.assertj.core.api.Assertions.assertThat;\n\npublic class FileMergeTest {\n\n    @Before\n    public void setUp() throws Exception {\n        FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR);\n    }\n\n    @Test\n    public void fileChunksAreMergedIntoOneFile() throws Exception {\n\n        // given\n        FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_EIGTH_GB);\n\n        List<File> files = FileSplit.split(new File(TEST_FILE), new File(FILE_SPLIT_TEST_DIR), FILE_SPLIT_SIZE);\n        File targetFile = new File(TEST_FILE);\n\n        // when\n        FileMerge.merge(files, targetFile);\n\n        // then\n        assertThat(targetFile.length()).isEqualTo(FileTestUtils.ONE_EIGTH_GB);\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR);\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/java/org/arvados/client/utils/FileSplitTest.java",
    "content": "/*\n * Copyright (C) The Arvados Authors. All rights reserved.\n *\n * SPDX-License-Identifier: AGPL-3.0 OR Apache-2.0\n *\n */\n\npackage org.arvados.client.utils;\n\nimport org.arvados.client.test.utils.FileTestUtils;\nimport org.junit.After;\nimport org.junit.Before;\nimport org.junit.Test;\n\nimport java.io.File;\nimport java.util.List;\n\nimport static org.arvados.client.test.utils.FileTestUtils.*;\nimport static org.assertj.core.api.Assertions.assertThat;\n\npublic class FileSplitTest {\n\n    @Before\n    public void setUp() throws Exception {\n        FileTestUtils.createDirectory(FILE_SPLIT_TEST_DIR);\n    }\n\n    @Test\n    public void fileIsDividedIntoSmallerChunks() throws Exception {\n\n        // given\n        int expectedSize = 2;\n        int expectedFileSizeInBytes = 67108864;\n        FileTestUtils.generateFile(TEST_FILE, FileTestUtils.ONE_EIGTH_GB);\n\n        // when\n        List<File> actual = FileSplit.split(new File(TEST_FILE), new File(FILE_SPLIT_TEST_DIR), FILE_SPLIT_SIZE);\n\n        // then\n        assertThat(actual).hasSize(expectedSize);\n        assertThat(actual).allMatch(a -> a.length() == expectedFileSizeInBytes);\n    }\n\n    @After\n    public void tearDown() throws Exception {\n        FileTestUtils.cleanDirectory(FILE_SPLIT_TEST_DIR);\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/application.conf",
    "content": "# configuration for unit tests\n\narvados {\n    api {\n        port = 9000\n        keepweb-port = 9000\n        token = 1m69yw9m2wanubzyfkb1e9icplqhtr2r969bu9rnzqbqhb7cnb\n        protocol = \"http\"\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/integration-tests-application.conf",
    "content": "# Configuration for integration tests\n#\n# Remarks:\n# * For example see integration-tests-application.conf.example\n# * While providing data remove apostrophes (\"\") from each line\n# * See Arvados documentation for information how to obtain a token:\n#   https://doc.arvados.org/user/reference/api-tokens.html\n#\n\narvados {\n    api {\n        keepweb-host = \"\"\n        keepweb-port = 443\n        host = \"\"\n        port = 443\n        token = \"\"\n        protocol = https\n        host-insecure = false\n    }\n    integration-tests {\n        project-uuid = \"\"\n    }\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/integration-tests-application.conf.example",
    "content": "# example configuration for integration tests\n\narvados {\n    api {\n        keepweb-host = collections.ardev.mycompany.com\n        keepweb-port = 443\n        host = api.ardev.mycompany.com\n        port = 443\n        token = mytoken\n        protocol = https\n        host-insecure = false\n    }\n    integration-tests {\n        project-uuid = ardev-j7d0g-aa123f81q6y7skk\n    }\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker",
    "content": "mock-maker-inline"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/collections-create-manifest.json",
    "content": "{\n    \"kind\": \"arvados#collection\",\n    \"etag\": \"bqoujj7oybdx0jybwvtsebj7y\",\n    \"uuid\": \"112ci-4zz18-12tncxzptzbec1p\",\n    \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n    \"created_at\": \"2017-11-21T13:38:56.521853000Z\",\n    \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n    \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n    \"modified_at\": \"2017-11-21T13:38:56.521853000Z\",\n    \"name\": \"Super Collection\",\n    \"description\": null,\n    \"properties\": {},\n    \"portable_data_hash\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n    \"manifest_text\": \". 7df44272090cee6c0732382bba415ee9+70+Aa5ece4560e3329315165b36c239b8ab79c888f8a@5a1d5708 0:70:README.md\\n\",\n    \"replication_desired\": null,\n    \"replication_confirmed\": null,\n    \"replication_confirmed_at\": null,\n    \"delete_at\": null,\n    \"trash_at\": null,\n    \"is_trashed\": false\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/collections-create-simple.json",
    "content": "{\n    \"kind\": \"arvados#collection\",\n    \"etag\": \"bqoujj7oybdx0jybwvtsebj7y\",\n    \"uuid\": \"112ci-4zz18-12tncxzptzbec1p\",\n    \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n    \"created_at\": \"2017-11-21T13:38:56.521853000Z\",\n    \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n    \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n    \"modified_at\": \"2017-11-21T13:38:56.521853000Z\",\n    \"name\": \"Super Collection\",\n    \"description\": null,\n    \"properties\": {},\n    \"portable_data_hash\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n    \"manifest_text\": \"\",\n    \"replication_desired\": null,\n    \"replication_confirmed\": null,\n    \"replication_confirmed_at\": null,\n    \"delete_at\": null,\n    \"trash_at\": null,\n    \"is_trashed\": false\n}\n"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/collections-download-file.json",
    "content": "{\n  \"kind\": \"arvados#collection\",\n  \"etag\": \"2vm76dxmzr23u9774iguuxsrg\",\n  \"uuid\": \"ardev-4zz18-jk5vo4uo9u5vj52\",\n  \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n  \"created_at\": \"2018-02-19T11:00:00.852389000Z\",\n  \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n  \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n  \"modified_at\": \"2018-02-19T11:00:00.852389000Z\",\n  \"name\": \"New Collection (2018-02-19 12:00:00.273)\",\n  \"description\": null,\n  \"properties\": {},\n  \"portable_data_hash\": \"49581091dfad651945c12b08d4735d88+112\",\n  \"manifest_text\": \". 163679d58edaadc28db769011728a72c+1070080+A3acf8c1fe582c265d2077702e4a7d74fcc03aba8@5aa4fdeb 0:1024:test-file1 1024:20480:test-file2 21504:1048576:test-file\\\\0403\\n\",\n  \"replication_desired\": null,\n  \"replication_confirmed\": null,\n  \"replication_confirmed_at\": null,\n  \"delete_at\": null,\n  \"trash_at\": null,\n  \"is_trashed\": false\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/collections-get.json",
    "content": "{\n    \"kind\": \"arvados#collection\",\n    \"etag\": \"52tk5yg024cwhkkcidu3zcmj2\",\n    \"uuid\": \"112ci-4zz18-p51w7z3fpopo6sm\",\n    \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n    \"created_at\": \"2017-11-15T10:36:03.554356000Z\",\n    \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n    \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n    \"modified_at\": \"2017-11-15T10:36:03.554356000Z\",\n    \"name\": \"Collection With Manifest #2\",\n    \"description\": null,\n    \"properties\": {},\n    \"portable_data_hash\": \"6c4106229b08fe25f48b3a7a8289dd46+143\",\n    \"manifest_text\": \". 66c9daa69630e092e9ce554b7aae8a20+524288+A4a15ffea58f259e09f68d3f7eea29942750a79d0@5a269ff6 435f38dd384b06c248feabee0cabca52+524288+A8a99e8148bd368c49901526098901bb7d7890c3b@5a269ff6 dc5b6c104aab35fff6d70a4dadc28d37+391727+Ab0662d549c422c983fccaad02b4ade7b48a8255b@5a269ff6 0:1440303:lombok.jar\\n\",\n    \"replication_desired\": null,\n    \"replication_confirmed\": null,\n    \"replication_confirmed_at\": null,\n    \"delete_at\": null,\n    \"trash_at\": null,\n    \"is_trashed\": false\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/collections-list.json",
    "content": "{\n    \"kind\": \"arvados#collectionList\",\n    \"etag\": \"\",\n    \"offset\": 0,\n    \"limit\": 100,\n    \"items\": [\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"8xyiwnih5b5vzmj5sa33348a7\",\n            \"uuid\": \"112ci-4zz18-x6xfmvz0chnkzgv\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-15T13:06:36.934337000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-15T13:06:36.934337000Z\",\n            \"name\": \"Collection With Manifest #3\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"6c4106229b08fe25f48b3a7a8289dd46+143\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"8cmhep8aixe4p42pxjoct5502\",\n            \"uuid\": \"112ci-4zz18-p51w7z3fpopo6sm\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-15T10:36:03.554356000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-15T10:36:03.554356000Z\",\n            \"name\": \"Collection With Manifest #2\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"6c4106229b08fe25f48b3a7a8289dd46+143\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"de2ol2dyvsba3mn46al760cyg\",\n            \"uuid\": \"112ci-4zz18-xb6gf2yraln7cwa\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-15T09:32:44.146172000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-15T09:32:44.146172000Z\",\n            \"name\": \"New collection\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"dby68gd0vatvi090cu0axvtq3\",\n            \"uuid\": \"112ci-4zz18-r5jfktpn3a9o0ap\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-14T13:00:35.431046000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-14T13:00:35.431046000Z\",\n            \"name\": \"Collection With Manifest #1\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"3c59518bf8e1100d420488d822682b4a+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"2b34uzau862w862a2rv36agv6\",\n            \"uuid\": \"112ci-4zz18-nqxk8xjn6mtskzt\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-14T12:59:34.767068000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-14T12:59:34.767068000Z\",\n            \"name\": \"Empty Collection #2\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"60aywazztwfspnasltufcjxpa\",\n            \"uuid\": \"112ci-4zz18-rs9bcf5qnyfjrkm\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-14T12:52:33.124452000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-14T12:52:33.124452000Z\",\n            \"name\": \"Empty Collection #1\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"1jward6snif3tsjzftxh8hvwh\",\n            \"uuid\": \"112ci-4zz18-af656lee4kv7q2m\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-14T12:09:05.319319000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-14T12:09:05.319319000Z\",\n            \"name\": \"create example\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"zs2n4zliu6nb5yk3rw6h5ugw\",\n            \"uuid\": \"112ci-4zz18-y2zqix7k9an7nro\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-13T16:59:02.299257000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-13T16:59:02.299257000Z\",\n            \"name\": \"Saved at 2017-11-13 16:59:01 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"eijhemzgy44ofmu0dtrowl604\",\n            \"uuid\": \"112ci-4zz18-wq77jfi62u5i4rv\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-13T16:58:10.637548000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-13T16:58:10.637548000Z\",\n            \"name\": \"Saved at 2017-11-13 16:58:07 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"1oq7ye0gfbf3ih6y864w3n683\",\n            \"uuid\": \"112ci-4zz18-unaeckkjgeg7ui0\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-10T09:43:07.583862000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-10T09:43:07.583862000Z\",\n            \"name\": \"Saved at 2017-11-10 09:43:03 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"4qmqlro878yx8q7ikhilo8qwn\",\n            \"uuid\": \"112ci-4zz18-5y6atonkxq55lms\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T12:46:15.245770000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T12:46:15.245770000Z\",\n            \"name\": \"Saved at 2017-11-09 12:46:13 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"91v698hngoz241c38bbmh0ogc\",\n            \"uuid\": \"112ci-4zz18-b3fjqd01pxjvseo\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:54:07.259998000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:54:07.259998000Z\",\n            \"name\": \"Saved at 2017-11-09 11:54:04 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"215t842ckrrgjpxrxr4j0gsui\",\n            \"uuid\": \"112ci-4zz18-cwfxl8h41q18n65\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:49:38.276888000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:49:38.276888000Z\",\n            \"name\": \"Saved at 2017-11-09 11:49:35 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"90z6i3oqv197osng3wvjjir3t\",\n            \"uuid\": \"112ci-4zz18-uv4xu08739tn1vy\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:43:05.917513000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:43:05.917513000Z\",\n            \"name\": \"Saved at 2017-11-09 11:43:05 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"5lcf6wvc3wypwobswdz22wen\",\n            \"uuid\": \"112ci-4zz18-pzisn8c5mefzczv\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:40:38.804718000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:40:38.804718000Z\",\n            \"name\": \"Saved at 2017-11-09 11:40:36 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"98s08xew49avui1gy3mzit8je\",\n            \"uuid\": \"112ci-4zz18-mj24uwtnqqrno27\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:40:25.189869000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:40:25.189869000Z\",\n            \"name\": \"Saved at 2017-11-09 11:40:24 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"a09wnvl4i51xqx7u9yf4qbi94\",\n            \"uuid\": \"112ci-4zz18-oco162516upgqng\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:39:04.148785000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:39:04.148785000Z\",\n            \"name\": \"Saved at 2017-11-09 11:39:03 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"4ee2xudbc5rkr597drgu9tg10\",\n            \"uuid\": \"112ci-4zz18-tlze7dgczsdwkep\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:37:59.478975000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:37:59.478975000Z\",\n            \"name\": \"Saved at 2017-11-09 11:37:58 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"5aa3evnbceo3brnps2e1sq8ts\",\n            \"uuid\": \"112ci-4zz18-nq0kxi9d7w64la1\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:32:23.329259000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:32:23.329259000Z\",\n            \"name\": \"Saved at 2017-11-09 11:32:22 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"97vicgogv8bovmk4s2jymsdq\",\n            \"uuid\": \"112ci-4zz18-fks9mewtw155pvx\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:30:17.589462000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:30:17.589462000Z\",\n            \"name\": \"Saved at 2017-11-09 11:30:17 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"btktwjclv063s1rd6duvk51v3\",\n            \"uuid\": \"112ci-4zz18-kp356e0q2wdl2df\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:29:26.820481000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:29:26.820481000Z\",\n            \"name\": \"Saved at 2017-11-09 11:29:25 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"bob83na42pufqli1a5buxryvm\",\n            \"uuid\": \"112ci-4zz18-0ey8ob38xf7surq\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:08:53.781498000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:08:53.781498000Z\",\n            \"name\": \"Saved at 2017-11-09 11:08:52 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"7pl1x327eeutqtsjppdj284g8\",\n            \"uuid\": \"112ci-4zz18-wu2n0fv3cewna1n\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T11:08:33.423284000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T11:08:33.423284000Z\",\n            \"name\": \"Saved at 2017-11-09 11:08:33 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"2wg1wn2o18ubrgbhbqwwsslhf\",\n            \"uuid\": \"112ci-4zz18-hyybo6yuvkx4hrm\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T10:44:53.096798000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T10:44:53.096798000Z\",\n            \"name\": \"Saved at 2017-11-09 10:44:51 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"8jk0at4e69cwjyjamvm4wz2oj\",\n            \"uuid\": \"112ci-4zz18-h3gjq7gzd4syanw\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T10:41:31.278281000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T10:41:31.278281000Z\",\n            \"name\": \"Saved at 2017-11-09 10:41:30 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"be57zhzufz2hp1tbdwidoro5j\",\n            \"uuid\": \"112ci-4zz18-jinwyyaeigjs1yg\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T10:41:07.083017000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T10:41:07.083017000Z\",\n            \"name\": \"Saved at 2017-11-09 10:41:06 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"29lj2roie4cygo5ffgrduflly\",\n            \"uuid\": \"112ci-4zz18-etf8aghyxlfxvo1\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T10:40:31.710865000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T10:40:31.710865000Z\",\n            \"name\": \"Saved at 2017-11-09 10:40:31 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"6div78e1nhusii4x1xkp3rg2v\",\n            \"uuid\": \"112ci-4zz18-jtbn4edpkkhbm9b\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T10:39:36.999602000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T10:39:36.999602000Z\",\n            \"name\": \"Saved at 2017-11-09 10:39:36 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"12wlbsxlmy3sze4v2m0ua7ake\",\n            \"uuid\": \"112ci-4zz18-whdleimp34hiqp6\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T10:19:52.879907000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T10:19:52.879907000Z\",\n            \"name\": \"Saved at 2017-11-09 10:19:52 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"9bv1bw9afb3w84gu55uzcgd6h\",\n            \"uuid\": \"112ci-4zz18-kj8dz72zpo5kbtm\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T10:16:31.558621000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T10:16:31.558621000Z\",\n            \"name\": \"Saved at 2017-11-09 10:16:30 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"5ba3fc508718fabfa20d24390fe31856+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"683d77tvlhe97etk9bk2bx8ds\",\n            \"uuid\": \"112ci-4zz18-tr306nau9hrr437\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T09:59:44.978811000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T09:59:44.978811000Z\",\n            \"name\": \"Saved at 2017-11-09 09:59:44 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"67cbebb9f739b6b06ca056d21115cf43+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"1m34v9jbna2v7gv7auio54i8w\",\n            \"uuid\": \"112ci-4zz18-oxuk69569mxztp0\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T09:59:30.774888000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T09:59:30.774888000Z\",\n            \"name\": \"Saved at 2017-11-09 09:59:30 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"67cbebb9f739b6b06ca056d21115cf43+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"7l2a9fhqmxg7ghn7osx0s19v4\",\n            \"uuid\": \"112ci-4zz18-wf8sl6xbyfwjyer\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T09:58:21.496088000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T09:58:21.496088000Z\",\n            \"name\": \"Saved at 2017-11-09 09:58:20 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"67cbebb9f739b6b06ca056d21115cf43+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"33dw426fhs2vlb50b6301ukn0\",\n            \"uuid\": \"112ci-4zz18-drpia2es1hp9ydi\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T09:56:08.506505000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T09:56:08.506505000Z\",\n            \"name\": \"Saved at 2017-11-09 09:56:08 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"67cbebb9f739b6b06ca056d21115cf43+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"2437tnhn2gmti52lpm8nfq9ct\",\n            \"uuid\": \"112ci-4zz18-5b4px2i2dwyidfi\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T09:54:06.651026000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T09:54:06.651026000Z\",\n            \"name\": \"Saved at 2017-11-09 09:54:06 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"67cbebb9f739b6b06ca056d21115cf43+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"7e0k48zu93o57zudxjp1yrgjq\",\n            \"uuid\": \"112ci-4zz18-94oslnwnxe1f9wp\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T09:40:04.240297000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T09:40:04.240297000Z\",\n            \"name\": \"Saved at 2017-11-09 09:39:58 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"67cbebb9f739b6b06ca056d21115cf43+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"cuirr803f54e89reakuq50oaq\",\n            \"uuid\": \"112ci-4zz18-2fk0d5d4jjc1fmq\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T09:36:14.952671000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T09:36:14.952671000Z\",\n            \"name\": \"Saved at 2017-11-09 09:36:08 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"67cbebb9f739b6b06ca056d21115cf43+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"3bi5xd8ezxrazk5266cwzn4s4\",\n            \"uuid\": \"112ci-4zz18-xp9pu81xyc5h422\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T09:35:29.552746000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T09:35:29.552746000Z\",\n            \"name\": \"Saved at 2017-11-09 09:35:29 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"67cbebb9f739b6b06ca056d21115cf43+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"59uaoxy6uh82i6lrvr3ht8gz1\",\n            \"uuid\": \"112ci-4zz18-znb4lo0if2as58c\",\n            \"owner_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"created_at\": \"2017-11-09T09:31:08.109971000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-09T09:31:08.109971000Z\",\n            \"name\": \"Saved at 2017-11-09 09:31:06 UTC by VirtualBox\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"67cbebb9f739b6b06ca056d21115cf43+53\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"dksrh8jznxoaidl29i1vv5904\",\n            \"uuid\": \"112ci-4zz18-6pvl5ea5u932qzi\",\n            \"owner_uuid\": \"112ci-j7d0g-tw71k7mxii6fqgx\",\n            \"created_at\": \"2017-11-08T12:48:32.238698000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-f4633qdjs6w8zcy\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-nd84czdo4iea1mz\",\n            \"modified_at\": \"2017-11-08T12:50:23.946608000Z\",\n            \"name\": \"New collection\",\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"18c037c51c3f74be53ea2b115afd0c5f+69\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        },\n        {\n            \"kind\": \"arvados#collection\",\n            \"etag\": \"1w1rhhd6oql4ceb7h9t16sf0q\",\n            \"uuid\": \"112ci-4zz18-wq5pyrxfv1t9isu\",\n            \"owner_uuid\": \"112ci-j7d0g-anonymouspublic\",\n            \"created_at\": \"2017-11-03T10:03:20.364737000Z\",\n            \"modified_by_client_uuid\": null,\n            \"modified_by_user_uuid\": \"112ci-tpzed-000000000000000\",\n            \"modified_at\": \"2017-11-03T10:03:20.364737000Z\",\n            \"name\": null,\n            \"description\": null,\n            \"properties\": {},\n            \"portable_data_hash\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n            \"replication_desired\": null,\n            \"replication_confirmed\": null,\n            \"replication_confirmed_at\": null,\n            \"delete_at\": null,\n            \"trash_at\": null,\n            \"is_trashed\": false\n        }\n    ],\n    \"items_available\": 41\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/groups-get.json",
    "content": "{\n  \"kind\": \"arvados#group\",\n  \"etag\": \"3hw0vk4mbl0ofvia5k6x4dwrx\",\n  \"uuid\": \"ardev-j7d0g-bmg3pfqtx3ivczp\",\n  \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n  \"created_at\": \"2018-03-29T11:09:05.984597000Z\",\n  \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n  \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n  \"modified_at\": \"2018-03-29T11:09:05.984597000Z\",\n  \"name\": \"TestGroup1\",\n  \"group_class\": \"project\",\n  \"description\": null,\n  \"writable_by\": [\n    \"ardev-tpzed-n3kzq4fvoks3uw4\",\n    \"ardev-tpzed-n3kzq4fvoks3uw4\"\n  ],\n  \"delete_at\": null,\n  \"trash_at\": null,\n  \"is_trashed\": false\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/groups-list.json",
    "content": "{\n  \"kind\": \"arvados#groupList\",\n  \"etag\": \"\",\n  \"offset\": 0,\n  \"limit\": 100,\n  \"items\": [\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"68vubv3iw7663763bozxebmyf\",\n      \"uuid\": \"ardev-j7d0g-ylx7wnu1moge2di\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-18T09:09:21.126649000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-18T09:09:21.126649000Z\",\n      \"name\": \"TestProject1\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"68q7r8r37u9hckr2zsynvton3\",\n      \"uuid\": \"ardev-j7d0g-mnzhga726itrbrq\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-17T12:11:24.389594000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-17T12:11:24.389594000Z\",\n      \"name\": \"TestProject2\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"ef4vzx5gyudkrg9zml0zdv6qu\",\n      \"uuid\": \"ardev-j7d0g-0w9m1sz46ljtdnm\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-17T12:08:39.066802000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-17T12:08:39.066802000Z\",\n      \"name\": \"TestProject3\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"6h6h4ta6yyf9058delxk8fnqs\",\n      \"uuid\": \"ardev-j7d0g-r20iem5ou6h5wao\",\n      \"owner_uuid\": \"ardev-j7d0g-j7drd8yikkp6evd\",\n      \"created_at\": \"2018-04-17T12:03:39.647244000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-17T12:03:39.647244000Z\",\n      \"name\": \"TestProject4\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-j7d0g-j7drd8yikkp6evd\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"6se2y8f9o7uu06pbopgq56xds\",\n      \"uuid\": \"ardev-j7d0g-j7drd8yikkp6evd\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-17T11:58:31.339515000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-17T11:58:31.339515000Z\",\n      \"name\": \"TestProject5\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"2si26vaig3vig9266pqkqh2gy\",\n      \"uuid\": \"ardev-j7d0g-kh1g7i5va870xt0\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-17T10:56:54.391676000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-17T10:56:54.391676000Z\",\n      \"name\": \"TestProject6\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"edgnz6q0vt2u3o13ujtfohb75\",\n      \"uuid\": \"ardev-j7d0g-sclkdyuwm4h2m78\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-17T10:27:15.914517000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-17T10:27:15.914517000Z\",\n      \"name\": \"TestProject7\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"39ig9ttgec6lbe096uetn2cb9\",\n      \"uuid\": \"ardev-j7d0g-593khc577zuyyhe\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-17T10:27:03.858203000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-17T10:27:03.858203000Z\",\n      \"name\": \"TestProject8\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"1dpr8v6tx6pta0fozq93eyeou\",\n      \"uuid\": \"ardev-j7d0g-iotds0tm559dbz7\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-17T10:26:25.180623000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-17T10:26:25.180623000Z\",\n      \"name\": \"TestProject9\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"dizbavs2opfe1wpx6thocfki0\",\n      \"uuid\": \"ardev-j7d0g-gbqay74778tonb8\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-17T10:26:06.435961000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-17T10:26:06.435961000Z\",\n      \"name\": \"TestProject10\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"6xue8m3lx9qpptfvdf13val5t\",\n      \"uuid\": \"ardev-j7d0g-fmq1t0jlznehbdm\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-17T10:25:55.546399000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-17T10:25:55.546399000Z\",\n      \"name\": \"TestProject11\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"2gqix9e4m023usi9exhrsjx6z\",\n      \"uuid\": \"ardev-j7d0g-vxju56ch64u51gq\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-16T14:09:49.700566000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-16T14:09:49.700566000Z\",\n      \"name\": \"TestProject12\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"73n8x82814o6ihld0kltf468d\",\n      \"uuid\": \"ardev-j7d0g-g8m4w0d22gv6fbj\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-11T15:02:35.016850000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-11T15:02:35.016850000Z\",\n      \"name\": \"TestProject13\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"91f7uwq7pj3d3ez1u4smjg3ch\",\n      \"uuid\": \"ardev-j7d0g-lstqed4y78khaqm\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-06T15:29:27.754408000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-06T15:29:27.754408000Z\",\n      \"name\": \"TestProject14\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"7dbxhvbcfaogwnvo8k4mtqthk\",\n      \"uuid\": \"ardev-j7d0g-0jbezvnq8i07l7p\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-04-05T09:32:46.946417000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-04-05T09:32:46.946417000Z\",\n      \"name\": \"TestProject15\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"dhfu203rckzdzvx832wm7jv59\",\n      \"uuid\": \"ardev-j7d0g-72dxer22g6iltqz\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-03-29T11:27:02.482218000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-03-29T13:17:00.045606000Z\",\n      \"name\": \"TestProject16\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"7l9oxbdf4e1m9ddnujokf7czz\",\n      \"uuid\": \"ardev-j7d0g-nebzwquxtq1v3o5\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-03-29T11:11:26.235411000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-03-29T11:11:26.235411000Z\",\n      \"name\": \"TestProject17\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"83862x2o4453mja2rvypjl5gv\",\n      \"uuid\": \"ardev-j7d0g-5589c8dmxevecqh\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-03-29T11:10:58.496482000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-03-29T11:10:58.496482000Z\",\n      \"name\": \"TestProject18\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"3hw0vk4mbl0ofvia5k6x4dwrx\",\n      \"uuid\": \"ardev-j7d0g-bmg3pfqtx3ivczp\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-03-29T11:09:05.984597000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-03-29T11:09:05.984597000Z\",\n      \"name\": \"TestProject19\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    },\n    {\n      \"kind\": \"arvados#group\",\n      \"etag\": \"6p9xbxpttj782mpqs537gfvc6\",\n      \"uuid\": \"ardev-j7d0g-mfitz2oa4rpycou\",\n      \"owner_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"created_at\": \"2018-03-29T11:00:19.809612000Z\",\n      \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n      \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n      \"modified_at\": \"2018-03-29T11:00:19.809612000Z\",\n      \"name\": \"TestProject20\",\n      \"group_class\": \"project\",\n      \"description\": null,\n      \"writable_by\": [\n        \"ardev-tpzed-n3kzq4fvoks3uw4\",\n        \"ardev-tpzed-n3kzq4fvoks3uw4\"\n      ],\n      \"delete_at\": null,\n      \"trash_at\": null,\n      \"is_trashed\": false\n    }\n  ],\n  \"items_available\": 20\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-client-test-file.txt",
    "content": "Sample text file to test keep client."
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-client-upload-response.json",
    "content": "Created"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible-disk-only.json",
    "content": "{\n    \"kind\": \"arvados#keepServiceList\",\n    \"etag\": \"\",\n    \"offset\": null,\n    \"limit\": null,\n    \"items\": [\n        {\n            \"kind\": \"arvados#keepService\",\n            \"etag\": \"bjzh7og2d9z949lbd38vnnslt\",\n            \"uuid\": \"112ci-bi6l4-hv02fg8sbti8ykk\",\n            \"owner_uuid\": \"112ci-tpzed-000000000000000\",\n            \"created_at\": \"2017-11-03T10:04:48.314229000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-xxy0ipzwti8gnmt\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-000000000000000\",\n            \"modified_at\": \"2017-11-03T10:04:48.314229000Z\",\n            \"service_host\": \"localhost\",\n            \"service_port\": 9000,\n            \"service_ssl_flag\": false,\n            \"service_type\": \"disk\",\n            \"read_only\": false\n        },\n        {\n            \"kind\": \"arvados#keepService\",\n            \"etag\": \"7m64l69kko4bytpsykf8cay7t\",\n            \"uuid\": \"112ci-bi6l4-f0r03wrqymotwql\",\n            \"owner_uuid\": \"112ci-tpzed-000000000000000\",\n            \"created_at\": \"2017-11-03T10:04:48.351577000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-xxy0ipzwti8gnmt\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-000000000000000\",\n            \"modified_at\": \"2017-11-03T10:04:48.351577000Z\",\n            \"service_host\": \"localhost\",\n            \"service_port\": 9001,\n            \"service_ssl_flag\": false,\n            \"service_type\": \"disk\",\n            \"read_only\": false\n        }\n    ],\n    \"items_available\": 2\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-services-accessible.json",
    "content": "{\n    \"kind\": \"arvados#keepServiceList\",\n    \"etag\": \"\",\n    \"offset\": null,\n    \"limit\": null,\n    \"items\": [\n        {\n            \"kind\": \"arvados#keepService\",\n            \"etag\": \"bjzh7og2d9z949lbd38vnnslt\",\n            \"uuid\": \"112ci-bi6l4-hv02fg8sbti8ykk\",\n            \"owner_uuid\": \"112ci-tpzed-000000000000000\",\n            \"created_at\": \"2017-11-03T10:04:48.314229000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-xxy0ipzwti8gnmt\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-000000000000000\",\n            \"modified_at\": \"2017-11-03T10:04:48.314229000Z\",\n            \"service_host\": \"localhost\",\n            \"service_port\": 9000,\n            \"service_ssl_flag\": false,\n            \"service_type\": \"disk\",\n            \"read_only\": false\n        },\n        {\n            \"kind\": \"arvados#keepService\",\n            \"etag\": \"7m64l69kko4bytpsykf8cay7t\",\n            \"uuid\": \"112ci-bi6l4-f0r03wrqymotwql\",\n            \"owner_uuid\": \"112ci-tpzed-000000000000000\",\n            \"created_at\": \"2017-11-03T10:04:48.351577000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-xxy0ipzwti8gnmt\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-000000000000000\",\n            \"modified_at\": \"2017-11-03T10:04:48.351577000Z\",\n            \"service_host\": \"localhost\",\n            \"service_port\": 9000,\n            \"service_ssl_flag\": false,\n            \"service_type\": \"gpfs\",\n            \"read_only\": false\n        }\n    ],\n    \"items_available\": 2\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-services-get.json",
    "content": "{\n    \"kind\": \"arvados#keepService\",\n    \"etag\": \"bjzh7og2d9z949lbd38vnnslt\",\n    \"uuid\": \"112ci-bi6l4-hv02fg8sbti8ykk\",\n    \"owner_uuid\": \"112ci-tpzed-000000000000000\",\n    \"created_at\": \"2017-11-03T10:04:48.314229000Z\",\n    \"modified_by_client_uuid\": \"112ci-ozdt8-xxy0ipzwti8gnmt\",\n    \"modified_by_user_uuid\": \"112ci-tpzed-000000000000000\",\n    \"modified_at\": \"2017-11-03T10:04:48.314229000Z\",\n    \"service_host\": \"10.0.2.15\",\n    \"service_port\": 9000,\n    \"service_ssl_flag\": false,\n    \"service_type\": \"disk\",\n    \"read_only\": false\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-services-list.json",
    "content": "{\n    \"kind\": \"arvados#keepServiceList\",\n    \"etag\": \"\",\n    \"offset\": 0,\n    \"limit\": 100,\n    \"items\": [\n        {\n            \"kind\": \"arvados#keepService\",\n            \"etag\": \"7m64l69kko4bytpsykf8cay7t\",\n            \"uuid\": \"112ci-bi6l4-f0r03wrqymotwql\",\n            \"owner_uuid\": \"112ci-tpzed-000000000000000\",\n            \"created_at\": \"2017-11-03T10:04:48.351577000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-xxy0ipzwti8gnmt\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-000000000000000\",\n            \"modified_at\": \"2017-11-03T10:04:48.351577000Z\",\n            \"service_host\": \"10.0.2.15\",\n            \"service_port\": 9000,\n            \"service_ssl_flag\": false,\n            \"service_type\": \"disk\",\n            \"read_only\": false\n        },\n        {\n            \"kind\": \"arvados#keepService\",\n            \"etag\": \"bjzh7og2d9z949lbd38vnnslt\",\n            \"uuid\": \"112ci-bi6l4-hv02fg8sbti8ykk\",\n            \"owner_uuid\": \"112ci-tpzed-000000000000000\",\n            \"created_at\": \"2017-11-03T10:04:48.314229000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-xxy0ipzwti8gnmt\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-000000000000000\",\n            \"modified_at\": \"2017-11-03T10:04:48.314229000Z\",\n            \"service_host\": \"10.0.2.15\",\n            \"service_port\": 9001,\n            \"service_ssl_flag\": false,\n            \"service_type\": \"disk\",\n            \"read_only\": false\n        },\n        {\n            \"kind\": \"arvados#keepService\",\n            \"etag\": \"4be61qkpt6nzdfff4vj9nkpmj\",\n            \"uuid\": \"112ci-bi6l4-ko27cfbsf2ssx2m\",\n            \"owner_uuid\": \"112ci-tpzed-000000000000000\",\n            \"created_at\": \"2017-11-03T10:04:36.355045000Z\",\n            \"modified_by_client_uuid\": \"112ci-ozdt8-xxy0ipzwti8gnmt\",\n            \"modified_by_user_uuid\": \"112ci-tpzed-000000000000000\",\n            \"modified_at\": \"2017-11-03T10:04:36.355045000Z\",\n            \"service_host\": \"10.0.2.15\",\n            \"service_port\": 9002,\n            \"service_ssl_flag\": false,\n            \"service_type\": \"proxy\",\n            \"read_only\": false\n        }\n    ],\n    \"items_available\": 3\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/keep-services-not-accessible.json",
    "content": "{\n    \"kind\": \"arvados#keepServiceList\",\n    \"etag\": \"\",\n    \"offset\": null,\n    \"limit\": null,\n    \"items\": [],\n    \"items_available\": 0\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/links-create.json",
    "content": "{\n  \"kind\": \"arvados#link\",\n  \"etag\": \"zw1rlnbig0kpm9btw8us3pn9\",\n  \"uuid\": \"arkau-o0j2j-huxuaxbi46s1yml\",\n  \"owner_uuid\": \"arkau-tpzed-000000000000000\",\n  \"created_at\": \"2021-11-30T08:45:04.373354745Z\",\n  \"modified_by_client_uuid\": null,\n  \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n  \"modified_at\": \"2021-11-30T08:45:04.374489000Z\",\n  \"tail_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n  \"link_class\": \"star\",\n  \"name\": \"Star Link\",\n  \"head_uuid\": \"arkau-j7d0g-fcedae2076pw56h\",\n  \"head_kind\": \"arvados#group\",\n  \"tail_kind\": \"arvados#user\",\n  \"properties\": {}\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/links-get.json",
    "content": "{\n  \"kind\": \"arvados#link\",\n  \"etag\": \"zw1rlnbig0kpm9btw8us3pn9\",\n  \"uuid\": \"arkau-o0j2j-huxuaxbi46s1yml\",\n  \"owner_uuid\": \"arkau-tpzed-000000000000000\",\n  \"created_at\": \"2021-11-30T08:45:04.373354745Z\",\n  \"modified_by_client_uuid\": null,\n  \"modified_by_user_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n  \"modified_at\": \"2021-11-30T08:45:04.374489000Z\",\n  \"tail_uuid\": \"ardev-tpzed-n3kzq4fvoks3uw4\",\n  \"link_class\": \"permission\",\n  \"name\": \"can_read\",\n  \"head_uuid\": \"arkau-j7d0g-fcedae2076pw56h\",\n  \"head_kind\": \"arvados#group\",\n  \"tail_kind\": \"arvados#user\",\n  \"properties\": {}\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/links-list.json",
    "content": "{\n  \"kind\": \"arvados#linkList\",\n  \"etag\": \"\",\n  \"offset\": 0,\n  \"limit\": 100,\n  \"items\": [\n    {\n      \"kind\": \"arvados#link\",\n      \"etag\": \"dkhtr9tvp9zfy0d90xjn7w1t7\",\n      \"uuid\": \"arkau-o0j2j-x2b4rdadxs2fizn\",\n      \"owner_uuid\": \"arkau-j7d0g-publicfavorites\",\n      \"created_at\": \"2021-10-27T12:00:06.607794000Z\",\n      \"modified_by_client_uuid\": null,\n      \"modified_by_user_uuid\": \"arlog-tpzed-fyiau9qwo7ytntu\",\n      \"modified_at\": \"2021-10-27T12:00:06.609840000Z\",\n      \"tail_uuid\": \"arkau-j7d0g-publicfavorites\",\n      \"link_class\": \"star\",\n      \"name\": \"pRED Data Commons Service - Open access\",\n      \"head_uuid\": \"arkau-j7d0g-sfhw8b1uson0hwh\",\n      \"head_kind\": \"arvados#group\",\n      \"tail_kind\": \"arvados#group\",\n      \"properties\": {}\n    },\n    {\n      \"kind\": \"arvados#link\",\n      \"etag\": \"9nt0c2xn5oz1jzjzawlycmehz\",\n      \"uuid\": \"arkau-o0j2j-r5am4lz9gnu488k\",\n      \"owner_uuid\": \"arkau-j7d0g-publicfavorites\",\n      \"created_at\": \"2021-06-23T14:58:06.189520000Z\",\n      \"modified_by_client_uuid\": null,\n      \"modified_by_user_uuid\": \"arlog-tpzed-xzjyeljl6co7vlz\",\n      \"modified_at\": \"2021-06-23T14:58:06.196208000Z\",\n      \"tail_uuid\": \"arkau-j7d0g-publicfavorites\",\n      \"link_class\": \"star\",\n      \"name\": \"Open Targets Genetics\",\n      \"head_uuid\": \"arkau-j7d0g-pj5wysmpy5wn8yo\",\n      \"head_kind\": \"arvados#group\",\n      \"tail_kind\": \"arvados#group\",\n      \"properties\": {}\n    }\n  ],\n  \"items_available\": 2\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/users-create.json",
    "content": "{\n    \"kind\": \"arvados#user\",\n    \"etag\": \"b21emst9eu9u1wdpqcz6la583\",\n    \"uuid\": \"ardev-tpzed-q6dvn7sby55up1b\",\n    \"owner_uuid\": \"ardev-tpzed-000000000000000\",\n    \"created_at\": \"2017-10-30T19:42:43.324740000Z\",\n    \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n    \"modified_by_user_uuid\": \"ardev-tpzed-o3km4ug9jhs189j\",\n    \"modified_at\": \"2017-10-31T09:01:03.985749000Z\",\n    \"email\": \"example@email.com\",\n    \"username\": \"johnwayne\",\n    \"full_name\": \"John Wayne\",\n    \"first_name\": \"John\",\n    \"last_name\": \"Wayne\",\n    \"identity_url\": \"ardev-tpzed-r09t5ztf5qd3rlj\",\n    \"is_active\": true,\n    \"is_admin\": null,\n    \"is_invited\": true,\n    \"prefs\": {},\n    \"writable_by\": [\n        \"ardev-tpzed-000000000000000\",\n        \"ardev-tpzed-q6dvn7sby55up1b\",\n        \"ardev-j7d0g-000000000000000\"\n    ]\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/users-get.json",
    "content": "{\n    \"kind\": \"arvados#user\",\n    \"etag\": \"b21emst9eu9u1wdpqcz6la583\",\n    \"uuid\": \"ardev-tpzed-q6dvn7sby55up1b\",\n    \"owner_uuid\": \"ardev-tpzed-000000000000000\",\n    \"created_at\": \"2017-10-30T19:42:43.324740000Z\",\n    \"modified_by_client_uuid\": \"ardev-ozdt8-97tzh5x96spqkay\",\n    \"modified_by_user_uuid\": \"ardev-tpzed-o3km4ug9jhs189j\",\n    \"modified_at\": \"2017-10-31T09:01:03.985749000Z\",\n    \"email\": \"example@email.com\",\n    \"username\": \"johnwayne\",\n    \"full_name\": \"John Wayne\",\n    \"first_name\": \"John\",\n    \"last_name\": \"Wayne\",\n    \"identity_url\": \"ardev-tpzed-r09t5ztf5qd3rlj\",\n    \"is_active\": true,\n    \"is_admin\": null,\n    \"is_invited\": true,\n    \"prefs\": {},\n    \"writable_by\": [\n        \"ardev-tpzed-000000000000000\",\n        \"ardev-tpzed-q6dvn7sby55up1b\",\n        \"ardev-j7d0g-000000000000000\"\n    ]\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/users-list.json",
    "content": "{\n    \"kind\": \"arvados#userList\",\n    \"etag\": \"\",\n    \"offset\": 0,\n    \"limit\": 100,\n    \"items\": [\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-12389ux30402est\",\n            \"email\": \"test.user@email.com\",\n            \"first_name\": \"Test\",\n            \"last_name\": \"User\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-123vn7sby55up1b\",\n            \"email\": \"test.user1@email.com\",\n            \"first_name\": \"Test1\",\n            \"last_name\": \"User1\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-123g70lq1m3c6fz\",\n            \"email\": \"test.user2@email.com\",\n            \"first_name\": \"Test2\",\n            \"last_name\": \"User2\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-1233zsoudkgq92e\",\n            \"email\": \"test.user3@email.com\",\n            \"first_name\": \"Test3\",\n            \"last_name\": \"User3\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-1234xjvs0clppd3\",\n            \"email\": \"test.user4@email.com\",\n            \"first_name\": \"Test4\",\n            \"last_name\": \"User4\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-123bpggscmn6z8m\",\n            \"email\": \"test.user5@email.com\",\n            \"first_name\": \"Test5\",\n            \"last_name\": \"User5\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-1231uysivaz6ipi\",\n            \"email\": \"test.user6@email.com\",\n            \"first_name\": \"Test6\",\n            \"last_name\": \"User6\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-123b0a1wu0q6cm4\",\n            \"email\": \"test.user7@email.com\",\n            \"first_name\": \"Test7\",\n            \"last_name\": \"User7\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-123bz6n6si24t6v\",\n            \"email\": \"test.user8@email.com\",\n            \"first_name\": \"Test8\",\n            \"last_name\": \"User8\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-123lxhzifligheu\",\n            \"email\": \"test.user9@email.com\",\n            \"first_name\": \"Test9\",\n            \"last_name\": \"User9\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-123gaz31qbopewh\",\n            \"email\": \"test.user10@email.com\",\n            \"first_name\": \"Test10\",\n            \"last_name\": \"User10\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-123dmcf65z973uo\",\n            \"email\": \"test.user11@email.com\",\n            \"first_name\": \"Test11\",\n            \"last_name\": \"User11\",\n            \"is_active\": true\n        },\n        {\n            \"kind\": \"arvados#user\",\n            \"uuid\": \"ardev-tpzed-1239y3lj7ybpyg8\",\n            \"email\": \"test.user12@email.com\",\n            \"first_name\": \"Test12\",\n            \"last_name\": \"User12\",\n            \"is_active\": true\n        }\n\n    ],\n    \"items_available\": 13\n}"
  },
  {
    "path": "contrib/java-sdk-v2/src/test/resources/org/arvados/client/api/client/users-system.json",
    "content": "{\n    \"kind\": \"arvados#user\",\n    \"etag\": \"2ehmra38iwfuexvz1cjno5xua\",\n    \"uuid\": \"ardev-tpzed-000000000000000\",\n    \"owner_uuid\": \"ardev-tpzed-000000000000000\",\n    \"created_at\": \"2016-10-19T07:48:04.838534000Z\",\n    \"modified_by_client_uuid\": null,\n    \"modified_by_user_uuid\": \"ardev-tpzed-000000000000000\",\n    \"modified_at\": \"2016-10-19T07:48:04.833164000Z\",\n    \"email\": \"root\",\n    \"username\": null,\n    \"full_name\": \"root\",\n    \"first_name\": \"root\",\n    \"last_name\": \"\",\n    \"identity_url\": null,\n    \"is_active\": true,\n    \"is_admin\": true,\n    \"is_invited\": true,\n    \"prefs\": {},\n    \"writable_by\": [\n        \"ardev-tpzed-000000000000000\"\n    ]\n}"
  },
  {
    "path": "doc/Gemfile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nsource 'https://rubygems.org'\n\ngem 'zenweb'\ngem 'liquid', '~>4.0.0'\ngem 'RedCloth'\ngem 'colorize'\n"
  },
  {
    "path": "doc/README.textile",
    "content": "###. Copyright (C) The Arvados Authors. All rights reserved.\n....\n.... SPDX-License-Identifier: CC-BY-SA-3.0\n\nh1. Arvados documentation\n\nThis is the source code for \"doc.arvados.org\":http://doc.arvados.org.\n\nHere's how to build the HTML pages locally so you can preview your updates before you commit and push.\n\nAdditional information is available on the \"'Documentation' page on the Arvados wiki\":https://dev.arvados.org/projects/arvados/wiki/Documentation.\n\nh2. Install dependencies\n\nTo build the core Arvados documentation:\n\n<pre>\narvados/doc$ sudo apt-get install build-essential libcurl4-openssl-dev libgnutls28-dev libssl-dev\narvados/doc$ bundle install\n</pre>\n\nSDK reference documentation has additional, optional build requirements.\n\nh3. Java SDK documentation\n\n<pre>\n$ sudo apt install gradle\n</pre>\n\nh3. Python SDK documentation\n\n<pre>\narvados/doc$ sudo apt install python3-venv\narvados/doc$ python3 -m venv .venv\narvados/doc$ .venv/bin/pip install pdoc setuptools\n</pre>\n\nThen you must activate the virtualenv (e.g., run @. .venv/bin/activate@) before you run the @bundle exec rake@ commands below.\n\nh3. R SDK documentation\n\n<pre>\n$ sudo apt install r-cran-devtools r-cran-roxygen2 r-cran-knitr r-cran-markdown r-cran-xml\n</pre>\n\nh2. Generate HTML pages\n\n<pre>\narvados/doc$ bundle exec rake\n</pre>\n\nAlternately, to make the documentation browsable on the local filesystem:\n\n<pre>\narvados/doc$ bundle exec rake generate baseurl=$PWD/.site\n</pre>\n\nh3. Selecting SDK documentation to build\n\nBy default, the build process will try to detect what SDK documentation it can build, build all that, and skip the rest. You can specify exactly what you want to build using the @sdks@ environment variable. This is a list of comma- or space-separated SDKs you wanted to build documentation for. Valid values are @java@, @python@, @r@, @all@, or @none@. @all@ is a shortcut for listing all the valid SDKs. @none@ means do not build documentation for any SDK. For example, to build documentation for the Java and Python SDKs, but skip R:\n\n<pre>\narvados/doc$ bundle exec rake generate baseurl=$PWD/.site sdks=java,python\n</pre>\n\nSpecifying @sdks@ skips the build detection logic. If the Rakefile cannot build the requested SDK documentation, the build will fail.\n\nFor backwards compatibility, if you do not specify @sdks@, but the @NO_SDK@ environment variable is set, or the @no-sdk@ file exists, the build will run as if you set @sdks=none@.\n\nh2. Run linkchecker\n\nIf you have \"Linkchecker\":http://wummel.github.io/linkchecker/ installed on\nyour system, you can run it against the documentation:\n\n<pre>\narvados/doc$ bundle exec rake linkchecker baseurl=file://$PWD/.site\n</pre>\n\nPlease note that this will regenerate your $PWD/.site directory.\n\nh2. Preview HTML pages\n\n<pre>\narvados/doc$ bundle exec rake run\n[2014-03-10 09:03:41] INFO  WEBrick 1.3.1\n[2014-03-10 09:03:41] INFO  ruby 2.1.1 (2014-02-24) [x86_64-linux]\n[2014-03-10 09:03:41] INFO  WEBrick::HTTPServer#start: pid=8926 port=8000\n</pre>\n\nPreview the rendered pages at \"http://localhost:8000\":http://localhost:8000.\n\nh2. Publish HTML pages inside Workbench\n\n(or some other web site)\n\nYou can set @baseurl@ (the URL prefix for all internal links), @arvados_cluster_uuid@, @arvados_api_host@ and @arvados_workbench_host@ without changing @_config.yml@:\n\n<pre>\narvados/doc$ bundle exec rake generate baseurl=/doc arvados_api_host=xyzzy.arvadosapi.com\n</pre>\n\nh2. Delete generated files\n\n<pre>\narvados/doc$ bundle exec rake realclean\n</pre>\n"
  },
  {
    "path": "doc/Rakefile",
    "content": "#!/usr/bin/env rake\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# As a convenience to the documentation writer, you can touch a file\n# called 'no-sdk' in the 'doc' directory and it will suppress\n# generating the documentation for the SDKs, which (the R docs\n# especially) take a fair bit of time and slow down the edit-preview\n# cycle.\n#\n# To generate and view the documentation locally, run this command\n#\n#   rake && sensible-browser .site/index.html\n#\n# Or alternatively:\n#\n#   baseurl=http://localhost:8000 rake && rake run\n#\n# and then visit http://localhost:8000 in a browser.\n\nrequire \"uri\"\n\nrequire \"rubygems\"\nrequire \"colorize\"\n\ndef can_run?(*command, **options)\n  options = {\n    :in => :close,\n    :out => [File::NULL, \"w\"],\n  }.merge(options)\n  system(*command, **options)\nend\n\nclass JavaSDK\n  def self.build_path\n    \"contrib/java-sdk-v2\"\n  end\n\n  def self.can_build?\n    can_run?(\"gradle\", \"--version\")\n  end\n\n  def self.doc_path\n    \"sdk/java-v2\"\n  end\nend\n\nclass PythonSDK\n  def self.build_path\n    \"sdk/python/arvados\"\n  end\n\n  def self.can_build?\n    can_run?(\"./pysdk_pdoc.py\", \"--version\")\n  end\n\n  def self.doc_path\n    \"sdk/python/arvados\"\n  end\nend\n\nclass RSDK\n  def self.build_path\n    \"contrib/R-sdk\"\n  end\n\n  def self.can_build?\n    can_run?(\"make\", \"can_run\", chdir: File.join(\"..\", self.build_path))\n  end\n\n  def self.doc_path\n    \"sdk/R\"\n  end\nend\n\n$build_sdks = begin\n  no_sdk_env = ENV.fetch(\"NO_SDK\", \"\")\n  sdks_env = ENV.fetch(\"sdks\", \"\")\n  all_sdks = Hash[[JavaSDK, PythonSDK, RSDK].map { |c| [c.name, c] }]\n\n  if no_sdk_env != \"\" and sdks_env != \"\"\n    fail \"both NO_SDK and sdks defined in environment\"\n  elsif sdks_env != \"\"\n    # Nothing to do\n  elsif no_sdk_env != \"\" or File.exist?(\"no-sdk\")\n    sdks_env = \"none\"\n  end\n\n  if sdks_env == \"\"\n    all_sdks.each_pair.filter_map do |name, sdk|\n      if sdk.can_build?\n        sdk\n      else\n        puts \"Warning: cannot build #{name.gsub(/SDK$/, ' SDK')} documentation, skipping\".colorize(:light_red)\n      end\n    end\n  else\n    wanted_sdks = []\n    sdks_env.split(/\\s*[,\\s]\\s*/).each do |key|\n      key = \"#{key.capitalize}SDK\"\n      if key == \"AllSDK\"\n        wanted_sdks = all_sdks.values\n      elsif key == \"NoneSDK\"\n        wanted_sdks.clear\n      elsif sdk = all_sdks[key]\n        wanted_sdks << sdk\n      else\n        fail \"cannot build documentation for unknown #{key}\"\n      end\n    end\n    wanted_sdks\n  end\nend\n\nmodule Zenweb\n  class Site\n    @binary_files = %w[png jpg gif eot svg ttf woff2? ico pdf m4a t?gz xlsx]\n  end\nend\n\ntask :generate => [ :realclean, 'sdk/python/arvados.html', 'sdk/R/arvados/index.html', 'sdk/java-v2/javadoc/index.html' ] do\n  vars = ['baseurl', 'arvados_cluster_uuid', 'arvados_api_host', 'arvados_workbench_host']\n  if ! ENV.key?('baseurl') || ENV['baseurl'] == \"\"\n    if !ENV.key?('WORKSPACE') || ENV['WORKSPACE'] == \"\"\n      puts \"The `baseurl` variable was not specified and the `WORKSPACE` environment variable is not set. Defaulting `baseurl` to file://#{pwd}/.site\"\n      ENV['baseurl'] = \"file://#{pwd}/.site/\"\n    else\n      puts \"The `baseurl` variable was not specified, defaulting to a value derived from the `WORKSPACE` environment variable\"\n      ENV['baseurl'] = \"file://#{ENV['WORKSPACE']}/doc/.site/\"\n    end\n  end\n  vars.each do |v|\n    if ENV[v]\n      website.config.h[v] = ENV[v]\n    end\n  end\nend\n\nfile [\"install/new_cluster_checklist_Azure.xlsx\", \"install/new_cluster_checklist_AWS.xlsx\"] do |t|\n  cp(t, t)\nend\n\nfile \"sdk/python/arvados.html\" do |t|\n  next unless $build_sdks.include?(PythonSDK)\n  raise unless system(\"pip\", \"install\", \"../sdk/python\",\n                      out: :err)\n  raise unless system(\"python3\", \"pysdk_pdoc.py\",\n                      out: :err)\nend\n\nfile \"sdk/R/arvados/index.html\" do |t|\n  next unless $build_sdks.include?(RSDK)\n  Dir.mkdir(\"sdk/R\")\n  Dir.mkdir(\"sdk/R/arvados\")\n  cp('css/R.css', 'sdk/R/arvados')\n  raise unless system(\"make\", \"man\", chdir: \"../contrib/R-sdk\", out: :err)\n  docnames = Dir.glob(\"../contrib/R-sdk/man/*.Rd\").map { |rd| File.basename(rd, \".Rd\") }.sort\n  docnames.each do |basename|\n    raise unless system(\n                   \"R\", \"CMD\", \"Rdconv\", \"--type=html\", \"man/#{basename}.Rd\",\n                   chdir: \"../contrib/R-sdk\",\n                   out: [\"sdk/R/arvados/#{basename}.html\", \"w\"],\n                 )\n  end\n\n  File.open(\"sdk/R/index.html.md\", \"w\") do |fn|\n    fn.write(<<-EOF\n---\nlayout: default\nnavsection: sdk\nnavmenu: R\ntitle: \"R SDK Overview\"\n...\n\nEOF\n            )\n    File.open(\"../contrib/R-sdk/README.md\", \"r\") do |rd|\n      fn.write(rd.read)\n    end\n  end\n\n  File.open(\"sdk/R/arvados/index.html.textile.liquid\", \"w\") do |fn|\n    fn.write(<<-EOF\n---\nlayout: default\nnavsection: sdk\nnavmenu: R\ntitle: \"R Reference\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nEOF\n            )\n    docnames.each do |basename|\n      fn.printf(\"* \\\"%s\\\":%s.html\\n\", basename, basename)\n    end\n  end\nend\n\nfile \"sdk/java-v2/javadoc/index.html\" do |t|\n  next unless $build_sdks.include?(JavaSDK)\n  tgt = Dir.pwd\n  docfiles = []\n  Dir.chdir(\"../contrib/java-sdk-v2\") do\n    STDERR.puts `gradle javadoc 2>&1`\n    raise if $? != 0\n    puts `sed -i \"s/@import.*dejavu.css.*//g\" build/docs/javadoc/stylesheet.css`\n    raise if $? != 0\n  end\n  cp_r(\"../contrib/java-sdk-v2/build/docs/javadoc\", \"sdk/java-v2\")\n  raise if $? != 0\nend\n\ntask :linkchecker => [ :generate ] do\n  # we need --check-extern to check relative links, weird but true\n  opts = [\n    \"--check-extern\",\n    \"--ignore-url=!^file://\",\n  ]\n  ([JavaSDK, PythonSDK, RSDK] - $build_sdks).map(&:doc_path).each do |sdk_path|\n    sdk_url = URI.join(ENV[\"baseurl\"], sdk_path)\n    url_re = Regexp.escape(sdk_url.to_s)\n    opts << \"--ignore-url=^#{url_re}[./]\"\n  end\n  result = system(\n    \"linkchecker\", *opts, \"index.html\",\n    chdir: \".site\",\n  )\n  if result.nil?\n    fail \"could not run linkchecker command (is it installed?)\"\n  elsif !result\n    fail \"linkchecker exited #{$?.exitstatus}\"\n  end\nend\n\ntask :import_vscode_training do\n  Dir.chdir(\"user\") do\n  rm_rf \"arvados-vscode-cwl-training\"\n  `git clone https://github.com/arvados/arvados-vscode-cwl-training`\n  githash = `git --git-dir arvados-vscode-cwl-training/.git log -n1 --format=%H HEAD`\n  File.open(\"cwl/arvados-vscode-training.html.md.liquid\", \"w\") do |fn|\n    File.open(\"arvados-vscode-cwl-training/README.md\", \"r\") do |rd|\n      fn.write(<<-EOF\n---\nlayout: default\nnavsection: userguide\ntitle: \"Developing CWL Workflows with VSCode\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n\nImported from https://github.com/arvados/arvados-vscode-cwl-training\ngit hash: #{githash}\n{% endcomment %}\n\nEOF\n              )\n               fn.write(rd.read())\n    end\n  end\n  rm_rf \"arvados-vscode-cwl-training\"\n  end\nend\n\ntask :clean do\n  rm_rf \"sdk/python/arvados\"\n  rm_f \"sdk/python/arvados.html\"\n  rm_f \"sdk/python/index.html\"\n  rm_rf \"sdk/R\"\n  rm_rf \"sdk/java-v2/javadoc\"\nend\n\nrequire \"zenweb/tasks\"\nload \"zenweb-textile.rb\"\nload \"zenweb-liquid.rb\"\nload \"zenweb-fix-body.rb\"\n\ntask :extra_wirings do\n  $website.pages[\"sdk/python/python.html.textile.liquid\"].depends_on(\"sdk/python/arvados.html\")\nend\n"
  },
  {
    "path": "doc/_config.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# baseurl is the location of the generated site from the browser's\n# perspective (e.g., http://doc.arvados.org or\n# file:///tmp/arvados/doc/.site).  You can also set these on the\n# command line: $ rake generate baseurl=/example\n# arvados_api_host=example.com\n\nbaseurl:\ncurrent_version:\nall_versions:\nlatest_version:\narvados_api_host: localhost\narvados_cluster_uuid: local\narvados_workbench_host: http://localhost\ngoogle_analytics: \"G-EFLSBXJ5SQ\"\nmatomo_analytics_url: \"https://piwik.arvados.org\"\nmatomo_analytics_siteid: \"3\"\n\nexclude: [\"Rakefile\", \"tmp\", \"vendor\"]\n\nnavbar:\n  userguide:\n    - Welcome:\n      - user/index.html.textile.liquid\n      - user/getting_started/community.html.textile.liquid\n    - Walkthough:\n      - user/getting_started/workbench.html.textile.liquid\n      - user/tutorials/wgs-tutorial.html.textile.liquid\n    - Working at the Command Line:\n      - user/getting_started/setup-cli.html.textile.liquid\n      - user/reference/api-tokens.html.textile.liquid\n      - user/getting_started/check-environment.html.textile.liquid\n    - Working with data sets:\n      - user/tutorials/tutorial-projects.html.textile.liquid\n      - user/tutorials/tutorial-keep.html.textile.liquid\n      - user/tutorials/tutorial-keep-get.html.textile.liquid\n      - user/tutorials/tutorial-keep-collection-lifecycle.html.textile.liquid\n      - user/topics/arv-copy.html.textile.liquid\n      - user/tutorials/tutorial-keep-mount-gnu-linux.html.textile.liquid\n      - user/tutorials/tutorial-keep-mount-os-x.html.textile.liquid\n      - user/tutorials/tutorial-keep-mount-windows.html.textile.liquid\n      - user/topics/collection-versioning.html.textile.liquid\n      - user/topics/storage-classes.html.textile.liquid\n    - Data Analysis with Workflows:\n      - user/tutorials/tutorial-workflow-workbench.html.textile.liquid\n      - user/cwl/cwl-runner.html.textile.liquid\n      - user/cwl/crunchstat-summary.html.textile.liquid\n      - user/debugging/container-shell-access.html.textile.liquid\n      - user/topics/external-inputs.html.textile.liquid\n      - user/topics/service-containers.html.textile.liquid\n      - user/cwl/costanalyzer.html.textile.liquid\n      - user/cwl/federated-workflows.html.textile.liquid\n      - user/cwl/cwl-run-options.html.textile.liquid\n    - Common Workflow Language:\n      - user/cwl/rnaseq-cwl-training.html.textile.liquid\n      - user/cwl/arvados-vscode-training.html.md.liquid\n      - user/topics/arv-docker.html.textile.liquid\n      - user/cwl/cwl-style.html.textile.liquid\n      - user/tutorials/writing-cwl-workflow.html.textile.liquid\n      - user/cwl/cwl-extensions.html.textile.liquid\n      - user/cwl/cwl-versions.html.textile.liquid\n    - Access an Arvados virtual machine:\n      - user/getting_started/vm-login-with-webshell.html.textile.liquid\n      - user/getting_started/ssh-access-unix.html.textile.liquid\n      - user/getting_started/ssh-access-windows.html.textile.liquid\n    - Reference:\n      - user/topics/workbench-migration.html.textile.liquid\n      - user/topics/link-accounts.html.textile.liquid\n      - user/reference/cookbook.html.textile.liquid\n    - Arvados License:\n      - user/copying/copying.html.textile.liquid\n      - user/copying/agpl-3.0.html\n      - user/copying/LICENSE-2.0.html\n      - user/copying/by-sa-3.0.html\n  sdk:\n    - Overview:\n      - sdk/index.html.textile.liquid\n    - Python:\n      - sdk/python/sdk-python.html.textile.liquid\n      - sdk/python/api-client.html.textile.liquid\n      - sdk/python/cookbook.html.textile.liquid\n      - sdk/python/python.html.textile.liquid\n      - sdk/python/arvados-cwl-runner.html.textile.liquid\n      - sdk/python/events.html.textile.liquid\n    - Command line tools (CLI SDK):\n      - sdk/cli/install.html.textile.liquid\n      - sdk/cli/index.html.textile.liquid\n      - sdk/cli/reference.html.textile.liquid\n      - sdk/cli/subcommands.html.textile.liquid\n    - FUSE Driver:\n      - sdk/fuse/install.html.textile.liquid\n      - sdk/fuse/options.html.textile.liquid\n    - Go:\n      - sdk/go/index.html.textile.liquid\n      - sdk/go/example.html.textile.liquid\n    - Java:\n      - sdk/java-v2/index.html.textile.liquid\n      - sdk/java-v2/example.html.textile.liquid\n      - sdk/java-v2/javadoc.html.textile.liquid\n    - R:\n      - sdk/R/index.html.md\n      - sdk/R/arvados/index.html.textile.liquid\n    - Ruby:\n      - sdk/ruby/index.html.textile.liquid\n      - sdk/ruby/example.html.textile.liquid\n  api:\n    - Concepts:\n      - api/index.html.textile.liquid\n      - api/tokens.html.textile.liquid\n      - api/requests.html.textile.liquid\n      - api/methods.html.textile.liquid\n      - api/resources.html.textile.liquid\n    - Permission and authentication:\n      - api/methods/users.html.textile.liquid\n      - api/methods/groups.html.textile.liquid\n      - api/methods/api_client_authorizations.html.textile.liquid\n      - api/methods/links.html.textile.liquid\n      - api/methods/computed_permissions.html.textile.liquid\n      - api/methods/authorized_keys.html.textile.liquid\n      - api/methods/credentials.html.textile.liquid\n      - api/methods/user_agreements.html.textile.liquid\n      - api/methods/virtual_machines.html.textile.liquid\n    - Data management:\n      - api/keep-webdav.html.textile.liquid\n      - api/keep-s3.html.textile.liquid\n      - api/keep-web-urls.html.textile.liquid\n      - api/projects.html.textile.liquid\n      - api/properties.html.textile.liquid\n      - api/methods/collections.html.textile.liquid\n      - api/methods/logs.html.textile.liquid\n      - api/methods/keep_services.html.textile.liquid\n    - Container engine:\n      - api/methods/container_requests.html.textile.liquid\n      - api/methods/containers.html.textile.liquid\n      - api/methods/workflows.html.textile.liquid\n      - api/dispatch.html.textile.liquid\n  architecture:\n    - Topics:\n      - architecture/index.html.textile.liquid\n    - Storage in Keep:\n      - architecture/storage.html.textile.liquid\n      - architecture/keep-components-overview.html.textile.liquid\n      - architecture/keep-clients.html.textile.liquid\n      - architecture/keep-data-lifecycle.html.textile.liquid\n      - architecture/manifest-format.html.textile.liquid\n    - Computation with Crunch:\n      - api/execution.html.textile.liquid\n      - architecture/dispatchcloud.html.textile.liquid\n      - architecture/hpc.html.textile.liquid\n      - architecture/singularity.html.textile.liquid\n    - Other:\n      - api/permission-model.html.textile.liquid\n      - architecture/federation.html.textile.liquid\n  admin:\n    - Topics:\n      - admin/index.html.textile.liquid\n    - Users and Groups:\n      - admin/user-management.html.textile.liquid\n      - admin/user-management-cli.html.textile.liquid\n      - admin/group-management.html.textile.liquid\n      - admin/reassign-ownership.html.textile.liquid\n      - admin/link-accounts.html.textile.liquid\n      - admin/federation.html.textile.liquid\n      - admin/migrating-providers.html.textile.liquid\n      - user/topics/arvados-sync-external-sources.html.textile.liquid\n      - admin/scoped-tokens.html.textile.liquid\n      - admin/token-expiration-policy.html.textile.liquid\n    - Monitoring:\n      - admin/logging.html.textile.liquid\n      - admin/metrics.html.textile.liquid\n      - admin/health-checks.html.textile.liquid\n      - admin/inspect.html.textile.liquid\n      - admin/diagnostics.html.textile.liquid\n      - admin/management-token.html.textile.liquid\n      - admin/user-activity.html.textile.liquid\n      - admin/memory-cpu-profiling.html.textile.liquid\n    - Data Management:\n      - admin/collection-versioning.html.textile.liquid\n      - admin/collection-managed-properties.html.textile.liquid\n      - admin/restricting-upload-download.html.textile.liquid\n      - admin/keep-balance.html.textile.liquid\n      - admin/controlling-container-reuse.html.textile.liquid\n      - admin/logs-table-management.html.textile.liquid\n      - admin/metadata-vocabulary.html.textile.liquid\n      - admin/storage-classes.html.textile.liquid\n      - admin/keep-recovering-data.html.textile.liquid\n      - admin/keep-measuring-deduplication.html.textile.liquid\n      - admin/keep-faster-gc-s3.html.textile.liquid\n    - Cloud:\n      - admin/spot-instances.html.textile.liquid\n      - admin/cloudtest.html.textile.liquid\n      - admin/dispatch.html.textile.liquid\n  installguide:\n    - Overview:\n      - install/index.html.textile.liquid\n    - Arvados Installer:\n      - install/install-single-host.html.textile.liquid\n      - install/install-multi-host.html.textile.liquid\n    - Manual installation:\n      - install/install-manual-prerequisites.html.textile.liquid\n      - install/packages.html.textile.liquid\n    - Configuration:\n      - install/config.html.textile.liquid\n      - admin/config-urls.html.textile.liquid\n      - admin/config.html.textile.liquid\n    - Maintenance and upgrading:\n      - admin/upgrading.html.textile.liquid\n      - admin/maintenance-and-upgrading.html.textile.liquid\n    - Core:\n      - install/install-api-server.html.textile.liquid\n      - install/diagnostics.html.textile.liquid\n    - Keep:\n      - install/install-keepstore.html.textile.liquid\n      - install/configure-fs-storage.html.textile.liquid\n      - install/configure-s3-object-storage.html.textile.liquid\n      - install/configure-azure-blob-storage.html.textile.liquid\n      - install/install-keepproxy.html.textile.liquid\n      - install/install-keep-web.html.textile.liquid\n      - install/install-keep-balance.html.textile.liquid\n    - User interface:\n      - install/setup-login.html.textile.liquid\n      - install/install-ws.html.textile.liquid\n      - install/install-workbench2-app.html.textile.liquid\n      - install/workbench.html.textile.liquid\n    - Additional services:\n      - install/install-shell-server.html.textile.liquid\n      - install/install-webshell.html.textile.liquid\n    - Containers API (cloud):\n      - install/crunch2-cloud/install-compute-node.html.textile.liquid\n      - install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid\n    - Compute nodes (Slurm or LSF):\n      - install/crunch2/install-compute-node-docker.html.textile.liquid\n      - install/crunch2/install-compute-node-singularity.html.textile.liquid\n    - Containers API (Slurm):\n      - install/crunch2-slurm/install-dispatch.html.textile.liquid\n      - install/crunch2-slurm/configure-slurm.html.textile.liquid\n      - install/crunch2-slurm/install-test.html.textile.liquid\n    - Containers API (LSF):\n      - install/crunch2-lsf/install-dispatch.html.textile.liquid\n    - Additional configuration:\n      - install/container-shell-access.html.textile.liquid\n    - External dependencies:\n      - install/install-postgresql.html.textile.liquid\n      - install/ruby.html.textile.liquid\n      - install/nginx.html.textile.liquid\n      - install/install-docker.html.textile.liquid\n"
  },
  {
    "path": "doc/_includes/_admin_list_collections_without_property_py.liquid",
    "content": "#!/usr/bin/env python3\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\nimport arvados\nimport arvados.util as util\n\nfilters = [['properties.responsible_person_uuid', 'exists', False]]\ncols = util.keyset_list_all(arvados.api().collections().list, filters=filters, select=['uuid', 'name'], order='uuid')\n\nprint('Found {} collections:'.format(len(cols)))\nfor c in cols:\n    print('{}, \"{}\"'.format(c['uuid'], c['name']))\n"
  },
  {
    "path": "doc/_includes/_admin_set_property_to_collections_under_project_py.liquid",
    "content": "#!/usr/bin/env python3\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\nimport arvados\nimport arvados.util as util\n\ndef get_subproject_uuids(api, root_uuid):\n    uuids = []\n    groups = util.keyset_list_all(api.groups().list, filters=[['owner_uuid', '=', '{}'.format(root_uuid)]], select=['uuid'], order='uuid')\n    for g in groups:\n        uuids += ([g['uuid']] + get_subproject_uuids(api, g['uuid']))\n    return uuids\n\ndef get_cols(api, filters):\n    cols = util.keyset_list_all(api.collections().list, filters=filters, select=['uuid', 'properties'], order='uuid')\n    return cols\n\n# Search for collections on project hierarchy rooted at root_uuid\nroot_uuid = 'zzzzz-j7d0g-ppppppppppppppp'\n# Set the property to the UUID below\nresponsible_uuid = 'zzzzz-tpzed-xxxxxxxxxxxxxxx'\n\napi = arvados.api()\nfor p_uuid in [root_uuid] + get_subproject_uuids(api, root_uuid):\n    f = [['properties.responsible_person_uuid', 'exists', False],\n         ['owner_uuid', '=', p_uuid]]\n    cols = get_cols(api, f)\n    print('Found {} collections owned by {}'.format(len(cols), p_uuid))\n    for c in cols:\n        print(' - Updating collection {}'.format(c['uuid']))\n        props = c['properties']\n        props['responsible_person_uuid'] = responsible_uuid\n        api.collections().update(uuid=c['uuid'], body={'properties': props}).execute()\n"
  },
  {
    "path": "doc/_includes/_admin_update_collection_property_py.liquid",
    "content": "#!/usr/bin/env python3\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\nimport arvados\nimport arvados.util as util\n\nold_uuid = 'zzzzz-tpzed-xxxxxxxxxxxxxxx'\nnew_uuid = 'zzzzz-tpzed-yyyyyyyyyyyyyyy'\n\napi = arvados.api()\nfilters = [['properties.responsible_person_uuid', '=', '{}'.format(old_uuid)]]\ncols = util.keyset_list_all(api.collections().list, filters=filters, select=['uuid', 'properties'], order='uuid')\n\nprint('Found {} collections'.format(len(cols)))\nfor c in cols:\n    print('Updating collection {}'.format(c['uuid']))\n    props = c['properties']\n    props['responsible_person_uuid'] = new_uuid\n    api.collections().update(uuid=c['uuid'], body={'properties': props}).execute()\n"
  },
  {
    "path": "doc/_includes/_assign_volume_uuid.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nNote that each volume has a UUID, like @zzzzz-nyw5e-0123456789abcde@. You assign these manually: replace @zzzzz@ with your Cluster ID, and replace @0123456789abcde@ with an arbitrary unique string of 15 alphanumerics. Once assigned, UUIDs should not be changed.\n\nEssential configuration values are highlighted in <span class=\"userinput\">red</span>.  Remaining parameters are provided for documentation, with their default values."
  },
  {
    "path": "doc/_includes/_branchname.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% if site.current_version and site.current_version != 'main' %}\n{% assign branchname = site.current_version | slice: 1, 3 | append: '-release' %}\n{% else %}\n{% assign branchname = 'main' %}\n{% endif %}\n"
  },
  {
    "path": "doc/_includes/_container_glob_patterns.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2. Glob patterns\n\nEach pattern in the @output_glob@ array can include the following special terms:\n\ntable(table table-bordered table-condensed).\n|@*@|matches any sequence of non-@/@ characters|\n|@?@|matches any single non-@/@ character|\n|@[abcde]@ or @[a-e]@|matches any non-@/@ character in @abcde@|\n|@[^abcde]@ or @[^a-e]@ or\n@[!abcde]@ or @[!a-e]@|matches any non-@/@ character other than @abcde@|\n|@/**/@|matches zero or more levels of subdirectories|\n|@**/@|at the beginning of a pattern, matches zero or more directories|\n|@/**@|at the end of a pattern, matches any file in any subdirectory|\n\nExample patterns:\n\ntable(table table-bordered table-condensed).\n|@*.txt@|matches files with extension @.txt@ at the top level|\n|@foo/**@|matches the entire tree rooted at @foo@ in the top level|\n|@**/fo[og]@|matches all files named @foo@ or @fog@ anywhere in the tree|\n|@foo/**/*.txt@|matches all files with extension @.txt@ anywhere in the tree rooted at @foo@ in the top level|\n"
  },
  {
    "path": "doc/_includes/_container_published_ports.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2. Published ports\n\nContainers can expose web services.  These can be interactive web applications such as Jupyter notebooks or AI chats, or HTTP-based APIs.  Arvados acts as a reverse proxy, forwarding HTTP requests to the container and relaying responses back to the client.  The external URL will be one of the following, depending on how Arvados is configured (see ContainerWebServices in the \"default config file\":{{site.baseurl}}/admin/config.html):\n\n* @https://<uuid>-<port>.containers.zzzzz.example.com/@ where @<uuid>@ is the container UUID and @<port>@ is the port where the container process is listening\n* @https://containers.zzzzz.example.com:<port>/@ where @<port>@ is a dynamically assigned port\n\nTo accept requests, the container should listen on 0.0.0.0 (listening on localhost or 127.0.0.1 will _not_ work) and handle plain text HTTP/1.1 requests.\n\nThe @published_ports@ attribute of the container request record advertises which ports on the container should be available to external clients.\n\nThe value @published_ports@ is a hash.  Each key in the hash is a port number that the container is listening on.  Each entry in the hash has three keys described here:\n\ntable(table table-bordered table-condensed).\n|_. Key|_. Type|_. Description|\n|access|string|One of \"public\" or \"private\".  If \"private\", the client connecting to the container must provide an Arvados API for the user who submitted the container request(s) corresponding to the container.  The token is provided as a query parameter @?arvados_api_token=...@.  Arvados will consume the query parameter and respond with a redirect and a cookie used to authenticate subsequent requests.  If \"public\", no authentication is required.|\n|label|string|A string that will be displayed to the user on Workbench describing the service.  Cannot be empty.|\n|initial_path|string|The relative path that should be included when constructing the URL that will be presented in Workbench and in the @initial_url@ field described below.  May include any or none of path, fragment and query parameter parts of the URL, or be blank.  Leading slash is optional.|\n\nThe @published_ports@ attribute of the container record is a copy of the corresponding container request attribute, with the following entries added for each exposed port when the container enters @Running@ state:\n\ntable(table table-bordered table-condensed).\n|_. Key|_. Type|_. Description|_. Examples|\n|base_url|string|The external URL where the service is reachable.|@https://zzzzz-dz642-abcdefghijklmno-80.containers.zzzzz.example.com/@\n@https://containers.zzzzz.example.com:2000/@|\n|initial_url|string|The external URL with @initial_path@ applied.|@https://zzzzz-dz642-abcdefghijklmno-80.containers.zzzzz.example.com/index.html?start=true@\n@https://containers.zzzzz.example.com:2000/index.html?start=true@|\n|external_port|integer|The dynamically assigned external port if applicable, otherwise null.|@2000@\n@null@|\n\nExample @published_ports@ attribute for a container request:\n\n<pre>\n{\n  \"published_ports\": {\n    \"80\": {\n      \"access\": \"private\",\n      \"label\": \"Jupyter notebook instance\",\n      \"initial_path\": \"?path=example.ipynb\"\n    }\n  }\n}\n</pre>\n\nExample @published_ports@ attribute for a running container:\n\n<pre>\n{\n  \"published_ports\": {\n    \"80\": {\n      \"access\": \"private\",\n      \"label\": \"Jupyter notebook instance\",\n      \"initial_path\": \"?path=example.ipynb\",\n      \"external_port\": 2025,\n      \"base_url\": \"https://containers.zzzzz.example.com:2025/\",\n      \"initial_url\": \"https://containers.zzzzz.example.com:2025/?path=example.ipynb\"\n    }\n  }\n}\n</pre>\n\nh3. Accessing unpublished ports\n\nIf the @Services.ContainerWebServices.ExternalURL@ config entry is a wildcard, it is possible to connect to _any_ port in a running container, whether or not it is listed in @published_ports@, by providing the container request or container UUID and the listening port number as @<uuid>-<port>@ in place of the @*@ wildcard in the URL, _i.e._, @https://<uuid>-<port>.containers.zzzzz.example.com/@.\n\nUnpublished ports are not displayed in Workbench and have a default acccess level of \"private\".\n"
  },
  {
    "path": "doc/_includes/_container_runtime_constraints.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2. Runtime constraints\n\nRuntime constraints restrict the container's access to compute resources and the outside world (in addition to its explicitly stated inputs and output).\n\ntable(table table-bordered table-condensed).\n|_. Key|_. Type|_. Description|_. Notes|\n|ram|integer|Number of ram bytes to be used to run this process.|Optional. However, a ContainerRequest that is in \"Committed\" state must provide this.|\n|vcpus|integer|Number of cores to be used to run this process.|Optional. However, a ContainerRequest that is in \"Committed\" state must provide this.|\n|keep_cache_disk|integer|When the container process accesses data from Keep via the filesystem, that data will be cached on disk, up to this amount in bytes.|Optional. If your cluster is configured to use a disk cache by default, the default size will match your @ram@ constraint, bounded between 2GiB and 32GiB.|\n|keep_cache_ram|integer|When the container process accesses data from Keep via the filesystem, that data will be cached in memory, up to this amount in bytes.|Optional. If your cluster is configured to use a RAM cache by default, the administrator sets a default cache size.|\n|API|boolean|When set, ARVADOS_API_HOST and ARVADOS_API_TOKEN will be set, and container will have networking enabled to access the Arvados API server.|Optional.|\n|gpu|object|Request GPU support, see below|Optional.|\n|cuda|object|Old way to request CUDA GPU support, included for backwards compatability only.  Use the 'gpu' field instead.|Deprecated.|\n\nh3. GPU support\n\ntable(table table-bordered table-condensed).\n|_. Key|_. Type|_. Description|_. Notes|\n|stack|string|One of 'cuda' or 'rocm' to request Nvidia or AMD GPU support.||\n|device_count|int|Number of GPUs to request.|Count greater than 0 enables GPU support.|\n|driver_version|string|Minimum driver version, in \"X.Y\" format.|Required when device_count > 0|\n|hardware_target|array of strings|For CUDA: a single item with minimum CUDA hardware capability, in \"X.Y\" format, or multiple items listing CUDA specific hardware capability versions, one of which must be an exact match on the compute node the container is scheduled on.\nFor ROCm: A list of one or more hardware targets (e.g. gfx1100) corresponding to the GPU architectures supported by the container.  To be scheduled, at least one item in this list must match the @HardwareTarget@ of one of the cluster's @InstanceTypes@.|Required when device_count > 0|\n|vram|int|Amount of VRAM to request, in bytes.||\n\nh3. CUDA support (deprecated)\n\nNote.  This API is deprecated.  Use the 'gpu' API instead.\n\ntable(table table-bordered table-condensed).\n|_. Key|_. Type|_. Description|_. Notes|\n|device_count|int|Number of GPUs to request.|Count greater than 0 enables CUDA GPU support.|\n|driver_version|string|Minimum CUDA driver version, in \"X.Y\" format.|Required when device_count > 0|\n|hardware_capability|string|Minimum CUDA hardware capability, in \"X.Y\" format.|Required when device_count > 0|\n"
  },
  {
    "path": "doc/_includes/_container_scheduling_parameters.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2. Scheduling parameters\n\nParameters to be passed to the container scheduler (e.g., Slurm) when running a container.\n\ntable(table table-bordered table-condensed).\n|_. Key|_. Type|_. Description|_. Notes|\n|partitions|array of strings|The names of one or more compute partitions that may run this container. If not provided, the system will choose where to run the container.|Optional.|\n|preemptible|boolean|If true, the dispatcher should use a preemptible cloud node instance (eg: AWS Spot Instance) to run this container.  Whether a preemptible instance is actually used \"depends on cluster configuration.\":{{site.baseurl}}/admin/spot-instances.html|Optional. Default is false.|\n|max_run_time|integer|Maximum running time (in seconds) that this container will be allowed to run before being cancelled.|Optional. Default is 0 (no limit).|\n"
  },
  {
    "path": "doc/_includes/_contrib_component.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin' %}\n{{ component_name|default: 'This component' }} is an Arvados client contribution. It is supported by the Arvados development team and we are happy to receive contributions for it, but it receives less testing than core components and bug reports may get lower priority.\n{% include 'notebox_end' %}\n"
  },
  {
    "path": "doc/_includes/_download_installer.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'branchname' %}\n\nThis is a package-based installation method, however the installation script is currently distributed in source form via @git@. We recommend checking out the git tree on your local workstation, not directly on the target(s) where you want to install and run Arvados.\n\n<notextile>\n<pre><code class=\"userinput\">git clone https://github.com/arvados/arvados.git\ncd arvados\ngit checkout {{ branchname }}\ncd tools/salt-install\n</code></pre>\n</notextile>\n\nThe @install.sh@ and @provision.sh@ scripts will help you deploy Arvados by preparing your environment to be able to run the installer, then running it. The actual installer is located in the \"arvados-formula git repository\":https://github.com/arvados/arvados-formula/tree/refs/heads/{{ branchname }} and will be cloned during the running of the @provision.sh@ script.  The installer is built using \"Saltstack\":https://saltproject.io/ and @provision.sh@ performs the install using masterless mode.\n\nh2(#copy_config). Initialize the installer\n\nReplace \"xarv1\" with the cluster id you selected earlier.\n\nThis creates a git repository in @~/setup-arvados-xarv1@.  The @installer.sh@ will record all the configuration changes you make, as well as using @git push@ to synchronize configuration edits if you have multiple nodes.\n\nImportant!  Once you have initialized the installer directory, all further commands must be run with @~/setup-arvados-${CLUSTER}@ as the current working directory.\n\nh3. Using Terraform (AWS specific)\n\nIf you are going to use Terraform to set up the infrastructure on AWS, you first need to install the \"Terraform CLI\":https://developer.hashicorp.com/terraform/tutorials/aws-get-started/install-cli and the \"AWS CLI\":https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html tool.  Then you can initialize the installer.\n\n<notextile>\n<pre><code class=\"userinput\">CLUSTER=xarv1\n./installer.sh initialize ~/setup-arvados-${CLUSTER} {{local_params_src}} {{config_examples_src}} {{terraform_src}}\ncd ~/setup-arvados-${CLUSTER}\n</code></pre>\n</notextile>\n\nh3. Without Terraform\n\n<notextile>\n<pre><code class=\"userinput\">CLUSTER=xarv1\n./installer.sh initialize ~/setup-arvados-${CLUSTER} {{local_params_src}} {{config_examples_src}}\ncd ~/setup-arvados-${CLUSTER}\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_example_sdk_go.liquid",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n// \n// SPDX-License-Identifier: CC-BY-SA-3.0\n\npackage main\n\n\n// *******************\n// Import the modules.\n//\n// Our examples don't use keepclient, but they do use fmt and log to\n// display output.\n\nimport (\n\t\"fmt\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"log\"\n)\n\nfunc main() {\n\n\n\t// ********************************\n\t// Set up an API client user agent.\n\t//\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error setting up arvados client %s\", err.Error())\n\t}\n\n\n\t// *****************************************\n\t// Print the full name of the current user.\n\t//\n\n\ttype user struct {\n\t\t// Remember to start each field name with a capital letter,\n\t\t// otherwise it won't get populated by the arvados client because\n\t\t// the field will be invisible to it.\n\t\tUuid     string `json:\"uuid\"`\n\t\tFullName string `json:\"full_name\"`\n\t}\n\n\tvar u user\n\terr = arv.Call(\"GET\", \"users\", \"\", \"current\", nil, &u)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"error querying current user\", err.Error())\n\t}\n\n\tlog.Printf(\"Logged in as %s (uuid %s)\", u.FullName, u.Uuid)\n\n\n\t// ********************************************************\n\t// Print all fields from the first five collections returned.\n\t//\n\t// Note that some fields, are not returned by default and have to be\n\t// requested. See below for an example.\n\n\tvar results map[string]interface{}\n\n\tparams := arvadosclient.Dict{\"limit\": 5}\n\n\terr = arv.List(\"collections\", params, &results)\n\tif err != nil {\n\t\tlog.Fatalf(\"error querying collections\", err.Error())\n\t}\n\n\tprintArvadosResults(results)\n\n\n\t// *********************************************************\n\t// Print some fields from the first two collections returned.\n\t//\n\t// We also print manifest_test, which has to be explicitly requested.\n\t//\n\n\tcollection_fields_wanted := []string{\"manifest_text\", \"owner_uuid\", \"uuid\"}\n\tparams = arvadosclient.Dict{\"limit\": 2, \"select\": collection_fields_wanted}\n\n\terr = arv.List(\"collections\", params, &results)\n\tif err != nil {\n\t\tlog.Fatalf(\"error querying collections\", err.Error())\n\t}\n\n\tprintArvadosResults(results)\n}\n\n\n// A helper method which will print out a result map returned by\n// arvadosclient.\nfunc printArvadosResults(results map[string]interface{}) {\n\tfor key, value := range results {\n\t\t// \"items\", if it exists, holds a map.\n\t\t// So we print it prettily below.\n\t\tif key != \"items\" {\n\t\t\tfmt.Println(key, \":\", value)\n\t\t}\n\t}\n\n\tif value, ok := results[\"items\"]; ok {\n\t\titems := value.([]interface{})\n\t\tfor index, item := range items {\n\t\t\tfmt.Println(\"===========  \", index, \"  ===========\")\n\t\t\titem_map := item.(map[string]interface{})\n\t\t\tif len(item_map) == 0 {\n\t\t\t\tfmt.Println(\"item\", index, \": empty map\")\n\t\t\t} else {\n\t\t\t\tfor k, v := range item_map {\n\t\t\t\t\tfmt.Println(index, k, \":\", v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "doc/_includes/_google_analytics.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n<script>\n  window['ga-disable-{{ site.google_analytics }}'] =\n    window.doNotTrack === '1' ||\n    navigator.doNotTrack === '1' ||\n    navigator.doNotTrack === 'yes' ||\n    navigator.msDoNotTrack === '1';\n  window.dataLayer = window.dataLayer || [];\n  function gtag() {\n    window.dataLayer.push(arguments);\n  }\n  gtag('js', new Date());\n\n  gtag('config', '{{ site.google_analytics }}');\n</script>\n<script defer src=\"https://www.googletagmanager.com/gtag/js?id={{ site.google_analytics }}\"></script>"
  },
  {
    "path": "doc/_includes/_hpc_max_gateway_tunnels.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh3(#MaxGatewayTunnels). API.MaxGatewayTunnels\n\nEach Arvados container that runs on your HPC cluster will bring up a long-lived connection to the Arvados controller and keep it open for the entire duration of the container. This connection is used to access real-time container logs from Workbench, and to enable the \"container shell\":{{site.baseurl}}/install/container-shell-access.html feature.\n\nSet the @MaxGatewayTunnels@ config entry high enough to accommodate the maximum number of containers you expect to run concurrently on your HPC cluster, plus incoming container shell sessions.\n\n<notextile>\n<pre>    API:\n      MaxGatewayTunnels: 2000</pre>\n</notextile>\n\nAlso, configure Nginx (and any other HTTP proxies or load balancers running between the HPC and Arvados controller) to allow the expected number of connections, i.e., @MaxConcurrentRequests + MaxQueuedRequests + MaxGatewayTunnels@.\n"
  },
  {
    "path": "doc/_includes/_html_tags.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe following HTML tags are permitted: *a*, *b*, *blockquote*, *br*, *code*, *del*, *dd*, *dl*, *dt*, *em*, *h1*, *h2*, *h3*, *h4*, *h5*, *h6*, *hr*, *i*, *img*, *kbd*, *li*, *ol*, *p*, *pre*, *s*, *del*, *section*, *span*, *strong*, *sub*, *sup*, and *ul*.\n\nThe following HTML attributes are permitted: *src*, *width*, *height*, *href*, *alt*, *title*, and *style*.\n\nAll styling must be made in-line with the style attribute. Disallowed tags and attributes will not render."
  },
  {
    "path": "doc/_includes/_install_ansible.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{{ header_level|default: 'h3' }}(#install-ansible-pipx). Option 1. Install Ansible with pipx\n\nThe pipx tool is packaged in many of our supported distributions. You can install it on Debian/Ubuntu by running:\n\n<notextile>\n<pre># <code class=\"userinput\">apt install pipx\n</code></pre>\n</notextile>\n\nOr install it on Red Hat/AlmaLinux/Rocky Linux by running:\n\n<notextile>\n<pre># <code class=\"userinput\">dnf install pipx</code></pre>\n</code></pre>\n</notextile>\n\n{% include 'notebox_begin' %}\nIf the pipx package is not found, it is not available for your distribution. Instead \"install Ansible with virtualenv and pip\":#install-ansible-venv.\n{% include 'notebox_end' %}\n\nAfter pipx is installed, install Ansible by running:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados/tools/ansible/install-ansible.sh</span>\n  installed package ansible-core 2.15.13, installed using Python 3.11.2\n  These apps are now globally available\n    - ansible\n    - ansible-config\n    - ansible-connection\n    - ansible-console\n    - ansible-doc\n    - ansible-galaxy\n    - ansible-inventory\n    - ansible-playbook\n    - ansible-pull\n    - ansible-test\n    - ansible-vault\ndone! ✨ 🌟 ✨\n\n[…]\n\nAnsible successfully installed!\n</code></pre>\n</notextile>\n\nIf this script reports the final success message, skip the next section.\n\n{{ header_level|default: 'h3' }}(#install-ansible-venv). Option 2. Install Ansible in a virtualenv\n\nThis method works on all of our supported distributions, but requires you to configure a lot of paths manually. Install Python and virtualenv on Debian/Ubuntu by running:\n\n<notextile>\n<pre># <code class=\"userinput\">apt install python3-venv\n</code></pre>\n</notextile>\n\nOr install it on Red Hat/AlmaLinux/Rocky Linux by running:\n\n<notextile>\n<pre># <code class=\"userinput\">dnf install python3\n</code></pre>\n</notextile>\n\nNext, set up a virtualenv. If you want to install this somewhere other than @~/arvados-ansible@, you may change that path each time it appears.\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados/tools/ansible/install-ansible.sh <strong>~/arvados-ansible</strong></span>\nCollecting ansible-core~=2.15.13\n[…]\n\nAnsible successfully installed!\n</code></pre>\n</notextile>\n\nFinally, add all the Ansible tools to your executable path. If you keep personal executables somewhere other than @~/.local/bin@, you may change that path.\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">ln -st <strong>~/.local/bin ~/arvados-ansible</strong>/bin/ansible*</span>\n</code></pre>\n</notextile>\n\nAlternatively, you may reconfigure your shell to add <notextile><code><strong>$HOME/arvados-ansible</strong>/bin</code></notextile> to the end of your @$PATH@ variable.\n"
  },
  {
    "path": "doc/_includes/_install_ca_cert.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh3. Web Browser\n\nInstalling the root certificate into your web browser will prevent security errors when accessing Arvados services with your web browser.\n\nh4. Chrome\n\n# Go to \"Settings &rarr; Privacy and Security &rarr; Security &rarr; Manage Certificates\" or enter @chrome://settings/certificates@ in the URL bar.\n# *Click on the \"Authorities\" tab*  (it is not selected by default)\n# Click on the \"Import\" button\n# Choose @{{ca_cert_name}}@\n# Tick the checkbox next to \"Trust this certificate for identifying websites\"\n# Hit OK\n# The certificate should appear in the list of Authorities under \"Arvados\"\n\nh4. Firefox\n\n# Go to \"Preferences &rarr; Privacy &amp; Security\" or enter @about:preferences#privacy@ in the URL bar\n# Scroll down to the *Certificates* section\n# Click on the button \"View Certificates...\".\n# Make sure the \"Authorities\" tab is selected\n# Press the \"Import...\" button.\n# Choose @{{ca_cert_name}}@\n# Tick the checkbox next to \"Trust this CA to identify websites\"\n# Hit OK\n# The certificate should appear in the list of Authorities under \"Arvados\"\n\nh4. Other browsers (Safari, etc)\n\nThe process will be similar to that of Chrome and Firefox, but the exact user interface will be different.  If you can't figure it out, try searching for \"how do I install a custom certificate authority in (my browser)\".\n\nh3. Installation on Linux OS certificate storage\n\nTo access your Arvados instance using command line clients (such as @arv-get@ and @arv-put@) without security errors, install the certificate into the OS certificate storage.\n\nh4. Debian/Ubuntu\n\n*Important* the certificate file added to @ca-certificates@ must have the extension @.crt@ or it won't be recognized.\n\n<notextile>\n<pre><code>cp {{ca_cert_name}} /usr/local/share/ca-certificates/arvados-snakeoil-ca.crt\n/usr/sbin/update-ca-certificates\n</code></pre>\n</notextile>\n\nh4. Red Hat, AlmaLinux, and Rocky Linux\n\n<notextile>\n<pre><code>cp {{ca_cert_name}} /etc/pki/ca-trust/source/anchors/\n/usr/bin/update-ca-trust\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_install_compute_docker.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2(#cgroups). Configure Linux cgroups accounting\n\nLinux can report what compute resources are used by processes in a specific cgroup or Docker container.  Crunch can use these reports to share that information with users running compute work.  This can help workflow authors debug and optimize their workflows.\n\nTo enable cgroups accounting, you must boot Linux with the command line parameters @cgroup_enable=memory swapaccount=1@.\n\nCurrently Arvados is not compatible with the new cgroups accounting, also known as cgroups v2. Currently, all supported GNU/Linux distributions don't use cgroups v2 as default\nIf you are using a distribution in the compute nodes that ships with cgroups v2 enabled, make sure to disable it by booting Linux with the command line parameters @systemd.unified_cgroup_hierarchy=0@.\n\nAfter making changes, reboot the system to make these changes effective.\n\nh3. Red Hat, AlmaLinux, and Rocky Linux\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">sudo grubby --update-kernel=ALL --args='cgroup_enable=memory swapaccount=1 systemd.unified_cgroup_hierarchy=0'</span>\n</code></pre>\n</notextile>\n\nh3. Debian and Ubuntu\n\nOpen the file @/etc/default/grub@ in an editor.  Find where the string @GRUB_CMDLINE_LINUX@ is set.  Add @cgroup_enable=memory swapaccount=1 systemd.unified_cgroup_hierarchy=0@ to that string.  Save the file and exit the editor.  Then run:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">sudo update-grub</span>\n</code></pre>\n</notextile>\n\nh2(#install_docker). Install Docker\n\nCompute nodes must have Docker installed to run containers.  This requires a relatively recent version of Linux (at least upstream version 3.10, or a distribution version with the appropriate patches backported).  Follow the \"Docker Engine installation documentation\":https://docs.docker.com/install/ for your distribution.\n\nMake sure Docker is enabled to start on boot:\n\n<notextile>\n<pre><code># <span class=\"userinput\">systemctl enable --now docker</span>\n</code></pre>\n</notextile>\n\nh2(#configure_docker_daemon). Configure the Docker daemon\n\nDepending on your anticipated workload or cluster configuration, you may need to tweak Docker options.\n\nFor information about how to set configuration options for the Docker daemon, see https://docs.docker.com/config/daemon/systemd/\n\nh3. Changing ulimits\n\nDocker containers inherit ulimits from the Docker daemon.  However, the ulimits for a single Unix daemon may not accommodate a long-running Crunch job.  You may want to increase default limits for compute containers by passing @--default-ulimit@ options to the Docker daemon.  For example, to allow containers to open 10,000 files, set @--default-ulimit nofile=10000:10000@.\n\nh2. Troubleshooting\n\nh3. Workflows fail with @ValidationException: Not found: '/var/lib/cwl/workflow.json#main'@\n\nA possible configuration error is having Docker installed as a @snap@ package rather than a @deb@ package.  This is a problem because @snap@ packages are partially containerized and may have a different view of the filesystem than @crunch-run@.  This will produce confusing problems, for example, directory bind mounts sent to Docker that are empty (instead of containing the intended files) and resulting in unexpected \"file not found\" errors.\n\nTo check for this situation, run @snap list@ and look for @docker@.  If found, run @snap remove docker@ and follow the instructions to above to \"install Docker Engine\":#install_docker .\n"
  },
  {
    "path": "doc/_includes/_install_compute_fuse.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2(#fuse). Update fuse.conf\n\nFUSE must be configured with the @user_allow_other@ option enabled for Crunch to set up Keep mounts that are readable by containers.  Install this file as @/etc/fuse.conf@:\n\n<notextile>\n<pre>\n# Allow non-root users to specify the 'allow_other' or 'allow_root'\n# mount options.\nuser_allow_other\n</pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_install_cuda.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2(#cuda). Install NVIDA CUDA Toolkit (optional)\n\nIf you want to use NVIDIA GPUs, \"install the CUDA toolkit\":https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html and the \"NVIDIA Container Toolkit\":https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html.\n"
  },
  {
    "path": "doc/_includes/_install_debian_key.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n<notextile>\n<pre><code># <span class=\"userinput\">install -d /etc/apt/keyrings</span>\n# <span class=\"userinput\">curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg</span>\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_install_docker_cleaner.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2(#docker-cleaner). Update docker-cleaner.json\n\nThe @arvados-docker-cleaner@ program removes least recently used Docker images as needed to keep disk usage below a configured limit.\n\nCreate a file @/etc/arvados/docker-cleaner/docker-cleaner.json@ in an editor, with the following contents.\n\n<notextile>\n<pre><code>{\n    \"Quota\": \"<span class=\"userinput\">10G</span>\",\n    \"RemoveStoppedContainers\": \"always\"\n}\n</code></pre>\n</notextile>\n\n*Choosing a quota:* Most deployments will want a quota that's at least 10G.  From there, a larger quota can help reduce compute overhead by preventing reloading the same Docker image repeatedly, but will leave less space for other files on the same storage (usually Docker volumes).  Make sure the quota is less than the total space available for Docker images.\n\n{% include 'notebox_begin' %}\nThis also removes all containers as soon as they exit, as if they were run with @docker run --rm@. If you need to debug or inspect containers after they stop, temporarily stop arvados-docker-cleaner or configure it with @\"RemoveStoppedContainers\":\"never\"@.\n{% include 'notebox_end' %}\n"
  },
  {
    "path": "doc/_includes/_install_packages.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n\npackages_to_install should be a list\nfallback on arvados_component if not defined\n{% endcomment %}\n\n{% if package_to_install == nil %}\n  {% assign packages_to_install = arvados_component | split: \" \" %}\n{% endif %}\n\nh2(#install-packages). Install {{packages_to_install | join: \" and \" }}\n\nh3. Red Hat, AlmaLinux, and Rocky Linux\n\n<notextile>\n<pre><code># <span class=\"userinput\">dnf install {{packages_to_install | join: \" \"}}</span>\n</code></pre>\n</notextile>\n\nh3. Debian and Ubuntu\n\n<notextile>\n<pre><code># <span class=\"userinput\">apt install {{packages_to_install  | join \" \"}}</span>\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_install_postgres_database.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n<ol class=>\n<li>Start a shell for the postgres user:\n<notextile><pre># <span class=\"userinput\">su postgres</span></pre></notextile>\n</li>\n<li>Generate a new database password:\n<notextile><pre>postgres$ <span class=\"userinput\"><span class=\"userinput\">tr -dc 0-9a-zA-Z &lt;/dev/urandom | head -c25; echo</span>\nyourgeneratedpassword\n</pre></notextile> Record this.  You'll need it when you set up the Rails server later.\n</li>\n<li>Create a database user with the password you generated:\n  <notextile><pre><code>postgres$ <span class=\"userinput\">createuser --encrypted --no-createrole --no-superuser --pwprompt {{service_role}}</span>\n  Enter password for new role: <span class=\"userinput\">yourgeneratedpassword</span>\n  Enter it again: <span class=\"userinput\">yourgeneratedpassword</span></code></pre></notextile>\n</li>\n<li>Create a database owned by the new user:\n  <notextile><pre><code>postgres$ <span class=\"userinput\">createdb {{service_database}} -T template0 -E UTF8 -O {{service_role}}</span></code></pre></notextile>\n</li>\n{% if use_contrib %}\n<li>Enable the pg_trgm extension\n  <notextile><pre>postgres$ <span class=\"userinput\">psql {{service_database}} -c \"CREATE EXTENSION IF NOT EXISTS pg_trgm\"</span></pre></notextile>\n</li>\n{% endif %}\n<li>Exit the postgres user shell:\n  <notextile><pre>postgres$ <span class=\"userinput\">exit</span></pre></notextile>\n</li>\n</ol>\n"
  },
  {
    "path": "doc/_includes/_install_rails_command.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% comment %}\nThis template recognizes four variables:\n* railshost: The hostname included in the prompt, to let the user know where to run the command.  If this is the empty string, no hostname will be displayed.  Default \"apiserver\".\n* railsdir: The directory included in the prompt, to let the user know where to run the command.  Default \"/var/www/arvados-api/current\".\n* railscmd: The full command to run.  Default \"bin/rails console\".\n* railsout: The expected output of the command, if any.\n{% endcomment %} Change *@webserver-user@* to the user that runs your web server process.  If you install Phusion Passenger as we recommend, this is *@www-data@* on Debian-based systems, and *@nginx@* on Red Hat-based systems.\n\n{% unless railshost %}\n  {% assign railshost = \"apiserver\" %}\n{% endunless %}\n\n{% unless (railshost == \"\") or (railshost contains \":\") %}\n  {% capture railshost %}{{railshost}}:{% endcapture %}\n{% endunless %}\n\n{% unless railsdir %}\n  {% assign railsdir = \"/var/www/arvados-api/current\" %}\n{% endunless %}\n\n{% unless railscmd %}\n  {% assign railscmd = \"bin/rails console\" %}\n{% endunless %}\n\n<notextile>\n<pre><code>{{railshost}}~$ <span class=\"userinput\">cd {{railsdir}}</span>\n{{railshost}}{{railsdir}}$ <span class=\"userinput\">sudo -u <b>webserver-user</b> RAILS_ENV=production {{railscmd}}</span>\n{% if railsout %}{{railsout}}\n{% endif %}</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_install_ruby_and_bundler.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nRuby 2.7 or newer is required.\n\nh2. Red Hat, AlmaLinux, and Rocky Linux\n\nVersion 8 of these distributions provides Ruby 2.7. You can install it by running:\n\n<notextile>\n<pre><code># <span class=\"userinput\">dnf module enable ruby:2.7</span>\n# <span class=\"userinput\">dnf install --enablerepo=devel ruby ruby-devel</span></code></pre>\n</notextile>\n\nh2. Debian and Ubuntu\n\nAll supported versions of Debian and Ubuntu include a version of Ruby you can use with Arvados.\n\n<notextile>\n<pre><code># <span class=\"userinput\">apt --no-install-recommends install ruby ruby-dev</span></code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_matomo_analytics.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n<!-- Matomo analytics (used to be called \"Piwik\") -->\n<script type=\"text/javascript\">\n  var _paq = _paq || [];\n  _paq.push([\"trackPageView\"]);\n  _paq.push([\"enableLinkTracking\"]);\n  _paq.push([\"setTrackerUrl\", \"{{ site.matomo_analytics_url }}/piwik.php\"]);\n  _paq.push([\"setSiteId\", \"{{ site.matomo_analytics_siteid }}\"]);\n</script>\n<script defer src=\"{{ site.matomo_analytics_url }}/piwik.js\"></script>\n<!-- End Matomo code -->\n"
  },
  {
    "path": "doc/_includes/_metadata_vocabulary_example.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}{\n    \"strict_tags\": false,\n    \"tags\": {\n        \"IDTAGANIMALS\": {\n            \"strict\": false,\n            \"labels\": [{\"label\": \"Animal\" }, {\"label\": \"Creature\"}, {\"label\": \"Species\"}],\n            \"values\": {\n                \"IDVALANIMALS1\": { \"labels\": [{\"label\": \"Human\"}, {\"label\": \"Homo sapiens\"}] },\n                \"IDVALANIMALS2\": { \"labels\": [{\"label\": \"Dog\"}, {\"label\": \"Canis lupus familiaris\"}] },\n                \"IDVALANIMALS3\": { \"labels\": [{\"label\": \"Elephant\"}, {\"label\": \"Loxodonta\"}] },\n                \"IDVALANIMALS4\": { \"labels\": [{\"label\": \"Eagle\"}, {\"label\": \"Haliaeetus leucocephalus\"}] }\n            }\n        },\n        \"IDTAGCOMMENT\": {\n            \"labels\": [{\"label\": \"Comment\"}, {\"label\": \"Suggestion\"}]\n        },\n        \"IDTAGIMPORTANCES\": {\n            \"strict\": true,\n            \"labels\": [{\"label\": \"Importance\"}, {\"label\": \"Priority\"}],\n            \"values\": {\n                \"IDVALIMPORTANCES1\": { \"labels\": [{\"label\": \"Critical\"}, {\"label\": \"Urgent\"}, {\"label\": \"High\"}] },\n                \"IDVALIMPORTANCES2\": { \"labels\": [{\"label\": \"Normal\"}, {\"label\": \"Moderate\"}] },\n                \"IDVALIMPORTANCES3\": { \"labels\": [{\"label\": \"Low\"}] }\n            }\n        }\n    }\n}"
  },
  {
    "path": "doc/_includes/_mount_types.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2. Mount types\n\nThe \"mounts\" hash is the primary mechanism for adding data to the container at runtime (beyond what is already in the container image).\n\nEach value of the \"mounts\" hash is itself a hash, whose \"kind\" key determines the handler used to attach data to the container.\n\ntable(table table-bordered table-condensed).\n|_. Mount type|_. Kind|_. Description|_. Examples|\n|Arvados data collection|@collection@|@\"portable_data_hash\"@ _or_ @\"uuid\"@ _may_ be provided. If not provided, a new collection will be created. This is useful when @\"writable\":true@ and the container's @output_path@ is (or is a subdirectory of) this mount target.\n@\"writable\"@ may be provided with a @true@ or @false@ to indicate the path must (or must not) be writable. If not specified, the system can choose.\n@\"path\"@ may be provided, and defaults to @\"/\"@.\nAt container startup, the target path will have the same directory structure as the given path within the collection. Even if the files/directories are writable in the container, modifications will _not_ be saved back to the original collections when the container ends.|<pre><code>{\n \"kind\":\"collection\",\n \"uuid\":\"...\",\n \"path\":\"/foo.txt\"\n}\n{\n \"kind\":\"collection\",\n \"uuid\":\"...\"\n}</code></pre>|\n|Temporary directory|@tmp@|@\"capacity\"@: capacity (in bytes) of the storage device.\n@\"device_type\"@ (optional, default \"network\"): one of @{\"ram\", \"ssd\", \"disk\", \"network\"}@ indicating the acceptable level of performance. (*note: not yet implemented as of v1.5*)\nAt container startup, the target path will be empty. When the container finishes, the content will be discarded. This will be backed by a storage mechanism no slower than the specified type.|<pre><code>{\n \"kind\":\"tmp\",\n \"capacity\":100000000000\n}\n{\n \"kind\":\"tmp\",\n \"capacity\":1000000000,\n \"device_type\":\"ram\"\n}</code></pre>|\n|Keep|@keep@|Expose all readable collections via arv-mount.\nRequires suitable runtime constraints.|<pre><code>{\n \"kind\":\"keep\"\n}</code></pre>|\n|Mounted file or directory|@file@|@\"path\"@: absolute path (inside the container) of a file or directory that is (or is inside) another mount target.\nCan be used for \"stdin\" and \"stdout\" targets.|<pre><code>{\n \"kind\":\"file\",\n \"path\":\"/mounted_tmp/a.out\"\n}</code></pre>|\n|JSON document|@json@|A JSON-encoded string, array, or object.|<pre><code>{\n \"kind\":\"json\",\n \"content\":{\"foo\":\"bar\"}\n}</code></pre>|\n|Plain text|@text@|A plain text string.|<pre><code>{\n \"kind\":\"text\",\n \"content\":\"foo bar\\n\"\n}</code></pre>|\n\nh2(#pre-populate-output). Pre-populate output using Mount points\n\nWhen a container's output_path is a tmp mount backed by local disk, this output directory can be pre-populated with content from existing collections. This content can be specified by mounting collections at mount points that are subdirectories of output_path. Certain restrictions apply:\n\n1. Only mount points of kind @collection@ are supported.\n\n2. Mount points underneath output_path which have @\"writable\":true@ are copied into output_path during container initialization and may be updated, renamed, or deleted by the running container.  The original collection is not modified.  On container completion, files remaining in the output are saved to the output collection.   The mount at output_path must be big enough to accommodate copies of the inner writable mounts.\n\n3. If any such mount points are configured as @\"exclude_from_output\":true@, they will be excluded from the output.\n\nIf any process in the container tries to modify, remove, or rename these mount points or anything underneath them, the operation will fail and the container output and the underlying collections used to pre-populate are unaffected.\n\nh3. Example mount point configurations\n\nAll the below examples are based on this collection:\n<pre><code>\nportable_data_hash cdfbe2e823222d26483d52e5089d553c+175\n\nmanifest_text: ./alice 03032680d3fa0561ef4f85071140861e+13+A04e9d06459cda00aa997565bd78001061cf5bffb@58ab593d 0:13:hello.txt\\n./bob d820b9df970e1b498e7723c50b107e1b+11+A42d162a60210479d1cfaf9fbb98d494ac6322ae6@58ab593d 0:11:hello.txt\\n./carol cf72b172ff969250ae14a893a6745440+13+A476a2fd39e14e9c03af3076bd17e3612c075ff66@58ab593d 0:13:hello.txt\\n\n</code></pre>\n\ntable(table table-bordered table-condensed).\n|{width:40%}. *Mount point*|{width:30%}. *Description*|{width:30%}. *Resulting collection manifest text*|\n|<pre><code>\"mounts\": {\n  \"/tmp/foo\": {\n    \"kind\": \"collection\",\n    \"portable_data_hash\": \"cdfbe2...+175\"\n  },\n},\n\"output_path\": \"/tmp\"\n</code></pre>|No path specified and hence the entire collection will be mounted.|./*foo/*alice 030326... 0:13:hello.txt\\n\n./*foo/*bob d820b9... 0:11:hello.txt\\n\n./*foo/*carol cf72b1... 0:13:hello.txt\\n\n*Note:* Here the \".\" in streams is replaced with *foo*.|\n|<pre><code>\"mounts\": {\n  \"/tmp/foo/bar\": {\n    \"kind\": \"collection\",\n    \"portable_data_hash\": \"cdfbe2...+175\"\n    \"path\": \"alice\"\n  },\n},\n\"output_path\": \"/tmp\"\n</code></pre>|Specified path refers to the subdirectory *alice* in the collection.|./*foo/bar* 030326... 0:13:hello.txt\\n\n*Note:* only the manifest text segment for the subdirectory *alice* is included after replacing the subdirectory *alice* with *foo/bar*.|\n|<pre><code>\"mounts\": {\n  \"/tmp/foo/bar\": {\n    \"kind\": \"collection\",\n    \"portable_data_hash\": \"cdfbe2...+175\"\n    \"path\": \"alice/hello.txt\"\n  },\n},\n\"output_path\": \"/tmp\"\n</code></pre>|Specified path refers to the file *hello.txt* in the *alice* subdirectory|./*foo* 030326... 0:13:*bar*\\n\n*Note:* Here the subdirectory *alice* is replaced with *foo* and the filename *hello.txt* from this subdirectory is replaced with *bar*.|\n\nh2(#symlinks-in-output). Symlinks in output\n\nWhen a container's output_path is a tmp mount backed by local disk, this output directory can contain symlinks to other files in the output directory, or to collection mount points.  If the symlink leads to a collection mount, efficiently copy the collection into the output collection.  Symlinks leading to files or directories are expanded and created as regular files in the output collection.  Further, whether symlinks are relative or absolute, every symlink target (even targets that are symlinks themselves) must point to a path in either the output directory or a collection mount.\n"
  },
  {
    "path": "doc/_includes/_multi_host_install_custom_certificates.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nYou will need certificates for each DNS name and DNS wildcard previously listed in the \"DNS hostnames for each service\":#DNS .\n\nTo simplify certificate management, we recommend creating a single certificate for all of the hostnames, or creating a wildcard certificate that covers all possible hostnames (with the following patterns in subjectAltName):\n\n<pre>\nxarv1.example.com\n*.xarv1.example.com\n*.collections.xarv1.example.com\n*.containers.xarv1.example.com\n</pre>\n\n(Replacing @xarv1.example.com@ with your own @${DOMAIN}@)\n\nCopy your certificates to the directory specified with the variable @CUSTOM_CERTS_DIR@ in the remote directory where you copied the @provision.sh@ script. The provision script will find the certificates there.\n\nThe script expects cert/key files with these basenames (matching the role except for <i>keepweb</i>, which is split in both <i>download / collections</i>):\n\n# @balancer@         -- Optional on multi-node installations\n# @collections@      -- Part of keepweb, must be a wildcard for @*.collections.${DOMAIN}@\n# @controller@       -- Must be valid for @${DOMAIN}@ and @*.containers.${DOMAIN}@\n# @download@         -- Part of keepweb\n# @grafana@          -- Service available by default on multi-node installations\n# @keepproxy@        -- Corresponds to default domain @keep.${DOMAIN}@\n# @prometheus@       -- Service available by default on multi-node installations\n# @webshell@\n# @websocket@        -- Corresponds to default domain @ws.${DOMAIN}@\n# @workbench@\n# @workbench2@\n\nFor example, for the @keepproxy@ service the script will expect to find this certificate:\n\n<notextile>\n<pre><code>${CUSTOM_CERTS_DIR}/keepproxy.crt\n${CUSTOM_CERTS_DIR}/keepproxy.key\n</code></pre>\n</notextile>\n\nMake sure that all the FQDNs that you will use for the public-facing applications (API/controller, Workbench, Keepproxy/Keepweb) are reachable.\n\nNote: because the installer currently looks for a different certificate file for each service, if you use a single certificate, we recommend creating a symlink for each certificate and key file to the primary certificate and key, e.g.\n\n<notextile>\n<pre><code class=\"userinput\">ln -s xarv1.crt ${CUSTOM_CERTS_DIR}/controller.crt\nln -s xarv1.key ${CUSTOM_CERTS_DIR}/controller.key\nln -s xarv1.crt ${CUSTOM_CERTS_DIR}/keepproxy.crt\nln -s xarv1.key ${CUSTOM_CERTS_DIR}/keepproxy.key\n...\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_navbar_left.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% assign highlighturl = \"\" %}\n{% for section in site.navbar[page.navsection] %}\n  {% for entry in section %}\n    {% comment %}\n    Want to highlight the current page on the left nav.\n    But some pages have been renamed with a symlink from the old page to the new one.\n    Then the URL won't match.\n    So if the URL doesn't match, as a fallback look for a page with a matching title.\n    {% endcomment %}\n\n    {% for item in entry[1] %}\n      {% if site.pages[item].url == page.url %}\n        {% assign highlighturl = site.pages[item].url %}\n      {% endif %}\n    {% endfor %}\n\n    {% if highlighturl == \"\" %}\n      {% for item in entry[1] %}\n        {% if site.pages[item].title == page.title %}\n          {% assign highlighturl = site.pages[item].url %}\n        {% endif %}\n      {% endfor %}\n    {% endif %}\n  {% endfor %}\n{% endfor %}\n\n<div class=\"col-sm-3\">\n  <div class=\"well\">\n    <ol class=\"nav nav-list\">\n      {% for section in site.navbar[page.navsection] %}\n      {% for entry in section %}\n      <li><span class=\"nav-header\">{{ entry[0] }}</span>\n\t<ol class=\"nav nav-list\">\n          {% for item in entry[1] %}\n            {% assign p = site.pages[item] %}\n            <li {% if p.url == highlighturl %} class=\"active activesubnav\" {% elsif p.title == page.subnavsection %} class=\"activesubnav\" {% endif %}>\n              <a href=\"{{ site.baseurl }}{{ p.url }}\">{{ p.title }}</a></li>\n          {% endfor %}\n        </ol>\n        {% endfor %}\n        {% endfor %}\n    </ol>\n  </div>\n</div>\n"
  },
  {
    "path": "doc/_includes/_navbar_top.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n<div class=\"navbar navbar-default navbar-fixed-top\">\n  <div class=\"container-fluid\">\n    <div class=\"navbar-header\">\n      <button type=\"button\" class=\"navbar-toggle\" data-toggle=\"collapse\" data-target=\"#bs-navbar-collapse\">\n        <span class=\"sr-only\">Toggle navigation</span>\n        <span class=\"icon-bar\"></span>\n        <span class=\"icon-bar\"></span>\n        <span class=\"icon-bar\"></span>\n      </button>\n      <a class=\"navbar-brand\" href=\"{{ site.baseurl }}/\">Arvados<sup>&trade;</sup> Docs</a>\n    </div>\n    <div class=\"collapse navbar-collapse\" id=\"bs-navbar-collapse\">\n      <ul class=\"nav navbar-nav\">\n        <!--<li {% if page.navsection == 'start' %} class=\"active\" {% endif %}><a href=\"{{ site.baseurl }}/start/index.html\">Getting&nbsp;Started</a></li>-->\n        <li {% if page.navsection == 'userguide' %} class=\"active\" {% endif %}><a href=\"{{ site.baseurl }}/user/index.html\">User&nbsp;Guide</a></li>\n        <li {% if page.navsection == 'sdk' %} class=\"active\" {% endif %}><a href=\"{{ site.baseurl }}/sdk/index.html\">SDKs</a></li>\n        <li {% if page.navsection == 'architecture' %} class=\"active\" {% endif %}><a href=\"{{ site.baseurl }}/architecture/index.html\">Architecture</a></li>\n        <li {% if page.navsection == 'api' %} class=\"active\" {% endif %}><a href=\"{{ site.baseurl }}/api/index.html\">API</a></li>\n        <li {% if page.navsection == 'admin' %} class=\"active\" {% endif %}><a href=\"{{ site.baseurl }}/admin/index.html\">Admin</a></li>\n        <li {% if page.navsection == 'installguide' %} class=\"active\" {% endif %}><a href=\"{{ site.baseurl }}/install/index.html\">Install</a></li>\n        <li><a href=\"#\" class=\"dropdown-toggle\" role=\"button\" id=\"versionMenuLink\" data-toggle=\"dropdown\" aria-haspopup=\"true\" aria-expanded=\"false\">{{ site.current_version }}{% if site.all_versions != [] %}&nbsp;&#9662;{% endif %}</a>\n          {% if site.all_versions != [] %}\n            <div class=\"dropdown-menu\" aria-labelledby=\"versionMenuLink\">\n            {% for version in site.all_versions %}\n              &nbsp;<a href=\"/{{ version }}{{ page.url }}\" class=\"dropdown-item\">{{ version }}</a><br/>\n            {% endfor %}\n            </div>\n          {% endif %}\n        </li>\n        <li><a href=\"https://arvados.org\" style=\"padding-left: 2em\">arvados.org&nbsp;&raquo;</a></li>\n      </ul>\n\n      <div class=\"pull-right\" style=\"padding-top: 6px; padding-right: 25px\">\n        <form method=\"get\" action=\"https://www.google.com/search\">\n          <div class=\"input-group\" style=\"width: 220px\">\n            <input type=\"text\" class=\"form-control\" name=\"q\" placeholder=\"search\">\n            <div class=\"input-group-addon\">\n              <button class=\"glyphicon glyphicon-search\" style=\"border: 0px\" type=\"submit\"></button>\n            </div>\n            <input type=\"hidden\" name=\"sitesearch\" value=\"doc.arvados.org\"/>\n          </div>\n        </form>\n      </div>\n    </div>\n\n{% comment %}\nThe div with id \"old-version-warning\" is automatically enabled by the Apache\nconfig under certain conditions, as described at\nhttps://dev.arvados.org/projects/ops/wiki/Docarvadosorg.  Changing the class\nshould be safe, but please double check the matching and substitution that is\ndone in the Apache config to be sure before you modify this div.\n{% endcomment %}\n   <div class=\"alert alert-block alert-info\" style=\"display: none;\" id=\"old-version-warning\">\n     WARNING - you are viewing the documentation for an old version of Arvados. For the latest version, click <a href=\"/\">here</a>.\n   </div>\n   <div class=\"alert alert-block alert-info\" style=\"display: none;\" id=\"annotate-notify\">\n     <div style=\"margin-top: -26px; font-size: 12pt\">Hey!  You can use the annotation sidebar from <a href=\"https://hypothes.is\">hypothes.is</a> to make public comments and private notes\n       <span style=\"font-size: 32pt\">&rarr;</span></div>\n      <button type=\"button\" class=\"close\" onclick=\"dismissAnnotateNotify()\">Got it</button>\n   </div>\n\n   <script>\n     function dismissAnnotateNotify() {\n\t window.localStorage.setItem(\"dismiss-annotate-notify\", \"true\");\n         $('#annotate-notify').attr('style', \"display: none;\");\n     }\n     if (window.localStorage.getItem(\"dismiss-annotate-notify\") === \"true\") {\n\t dismissAnnotateNotify();\n     } else {\n         $('#annotate-notify').attr('style', \"display: inline-block;\");\n     }\n   </script>\n\n  </div>\n</div>\n"
  },
  {
    "path": "doc/_includes/_notebox_begin.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n<div class=\"alert alert-block alert-info\">\n  <button type=\"button\" class=\"close\" data-dismiss=\"alert\">&times;</button>\n  <h4>Note:</h4>\n"
  },
  {
    "path": "doc/_includes/_notebox_begin_warning.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n<div class=\"alert alert-block alert-warning\">\n  <h4>Note:</h4>\n"
  },
  {
    "path": "doc/_includes/_notebox_end.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n</div>\n"
  },
  {
    "path": "doc/_includes/_restart_api.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2(#restart-api). Restart the API server and controller\n\n*Make sure the cluster config file is up to date on the API server host* then restart the API server and controller processes to ensure the configuration changes are visible to the whole cluster.\n\n<notextile>\n<pre><code># <span class=\"userinput\">systemctl restart nginx arvados-controller</span>\n# <span class=\"userinput\">arvados-server check</span>\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_setup_debian_repo.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n\npackages_to_install may be a space-separated string\n{% endcomment %}\n\nSet up the Arvados package repository\n{%- if packages_to_install == nil %}\n{%- elsif packages_to_install contains \" \" %} and install the packages\n{%- else %} and install @{{ packages_to_install }}@\n{%- endif %} by running these commands:\n\n<notextile>\n<pre><code># <span class=\"userinput\">install -d /etc/apt/keyrings</span>\n# <span class=\"userinput\">curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg</span>\n# <span class=\"userinput\">declare $(grep \"^VERSION_CODENAME=\" /etc/os-release || echo VERSION_CODENAME=MISSING)</span>\n# <span class=\"userinput\">tee /etc/apt/sources.list.d/arvados.sources &gt;/dev/null &lt;&lt;EOF\nTypes: deb\nURIs: https://apt.arvados.org/$VERSION_CODENAME\nSuites: $VERSION_CODENAME\nComponents: main\nSigned-by: /etc/apt/keyrings/arvados.asc\nEOF</span>\n# <span class=\"userinput\">apt update</span>\n{%- if packages_to_install != nil %}\n# <span class=\"userinput\">apt install {{ packages_to_install }}</span>\n{% endif -%}\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_setup_redhat_repo.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n\nmodules_to_enable and packages_to_install may be space-separated strings\n{% endcomment %}\n\n{%- if modules_to_enable != nil %}\n{% include 'notebox_begin_warning' %}\n\nArvados tools require newer language runtimes than the default versions included with these distributions. These instructions will **upgrade language runtimes for the entire system**. Check that won't interfere with any existing software before you proceed.\n\n{% include 'notebox_end' %}\n{% endif -%}\n\nSet up the Arvados package repository\n{%- if packages_to_install == nil %}\n{%- elsif packages_to_install contains \" \" %} and install the packages\n{%- else %} and install @{{ packages_to_install }}@\n{%- endif %} by running these commands:\n\n<notextile>\n<pre><code># <span class=\"userinput\">tee /etc/yum.repos.d/arvados.repo &gt;/dev/null &lt;&lt;'EOF'\n[arvados]\nname=Arvados\nbaseurl=https://rpm.arvados.org/RHEL/$releasever/os/$basearch/\ngpgcheck=1\ngpgkey=https://rpm.arvados.org/RHEL/$releasever/RPM-GPG-KEY-arvados\nEOF</span>\n{%- if modules_to_enable != nil %}\n# <span class=\"userinput\">dnf module enable {{ modules_to_enable }}</span>\n{% endif -%}\n{%- if packages_to_install != nil -%}\n# <span class=\"userinput\">dnf install {{ packages_to_install }}</span>\n{% endif -%}\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_singularity_mksquashfs_configuration.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{{ mksquashfs_header|default: \"h2\" }}(#singularity_mksquashfs_configuration). Singularity mksquashfs configuration\n\n{% if show_docker_warning != nil %}\n{% include 'notebox_begin_warning' %}\nThis section is only relevant when using Singularity. Skip this section when using Docker.\n{% include 'notebox_end' %}\n{% endif %}\n\nDocker images are converted on the fly by @mksquashfs@, which can consume a considerable amount of RAM. The RAM usage of mksquashfs can be restricted in @/etc/singularity/singularity.conf@ with a line like @mksquashfs mem = 256M@. The amount of memory made available for mksquashfs should be configured lower than the smallest amount of memory requested by a container on the cluster to avoid the conversion being killed for using too much memory. The default memory allocation in CWL is 256M, so that is also a good choice for the @mksquashfs mem@ setting.\n"
  },
  {
    "path": "doc/_includes/_ssh_addkey.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n\nYou may now proceed to \"adding your key to the Arvados Workbench.\":#workbench\n\nh1(#workbench). Adding your key to Arvados Workbench\n\nIn the Workbench top navigation menu, click on the dropdown menu icon <i class=\"fa fa-lg fa-user\"></i> to access the Account Management menu. Then, click on the menu item *Ssh keys* to go to the *SSH keys* page. Click on the <span class=\"btn btn-primary\">+ ADD NEW SSH KEY</span> button in the upper-right on that page. You will see a popup as shown in this screenshot:\n\n!{width: 100%;}{{ site.baseurl }}/images/ssh-adding-public-key.png!\n\nPaste your _public_ key into the text area labeled *Public Key*, and click on the <span class=\"btn btn-sm btn-primary\">ADD NEW SSH KEY</span> button in lower-right. You are now ready to \"log into an Arvados VM\":#login.\n\nh1(#login). Using SSH to log into an Arvados VM\n\nTo see a list of virtual machines that you have access to, click on the dropdown menu icon <i class=\"fa fa-lg fa-user\"></i> in the upper right corner of the top navigation menu to access the Account Management menu. Then, click on the menu item *Virtual Machines*.\n\nYou will then see a page that lists the virtual machines you can access. The *Host name* column lists the name of each available VM.  The *Login name* column lists your login name on that VM.  The *Command line* column provides a sample @ssh@ command.\n\n!{width: 100%;}{{ site.baseurl }}/images/vm-access-with-webshell.png!\n\nAt the bottom of the page there may be additional instructions for connecting your specific Arvados instance.  If so, follow your site-specific instructions.  If there are no site-specific instructions, you can probably connect directly with @ssh@.\n\nThe following are generic instructions.  In these examples, the login name will be *_you_* and the host domain will be *_ClusterID.example.com_*.  Replace these with your login name and hostname as appropriate.\n"
  },
  {
    "path": "doc/_includes/_ssh_intro.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n\nArvados requires a public SSH key in order to securely log in to an Arvados VM instance. The three sections below help you get started:\n\n# \"Getting your SSH key\":#gettingkey\n# \"Adding your key to Arvados Workbench\":#workbench\n# \"Using SSH to log into an Arvados VM instance\":#login\n\n"
  },
  {
    "path": "doc/_includes/_ssl_config_multi.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2(#certificates). Choose the SSL/TLS configuration (SSL_MODE)\n\nArvados requires a valid TLS certificate to work correctly. This installer supports these options:\n\n# @lets-encrypt@: \"automatically obtain and install an SSL certificates for your hostnames\":#lets-encrypt\n# @bring-your-own@: \"supply your own certificates in the @certs@ directory\":#bring-your-own\n\nh3(#lets-encrypt). Using a Let's Encrypt certificate\n\nIn the default configuration, this installer gets a valid certificate via Let's Encrypt. If you have the <b>CLUSTER.DOMAIN</b> domain in a route53 zone, you can set <b>USE_LETSENCRYPT_ROUTE53</b> to <b>YES</b> and supply appropriate credentials so that Let's Encrypt can use dns-01 validation to get the appropriate certificates.\n\n<notextile>\n<pre><code>SSL_MODE=\"lets-encrypt\"\nUSE_LETSENCRYPT_ROUTE53=\"yes\"\nLE_AWS_REGION=\"us-east-1\"\nLE_AWS_ACCESS_KEY_ID=\"AKIABCDEFGHIJKLMNOPQ\"\nLE_AWS_SECRET_ACCESS_KEY=\"thisistherandomstringthatisyoursecretkey\"\n</code></pre>\n</notextile>\n\nPlease note that when using AWS, EC2 instances can have a default hostname that ends with <i>amazonaws.com</i>. Let's Encrypt has a blacklist of domain names for which it will not issue certificates, and that blacklist includes the <i>amazonaws.com</i> domain, which means the default hostname can not be used to get a certificate from Let's Encrypt.\n\nh3(#bring-your-own). Bring your own certificates\n\nTo supply your own certificates, change the configuration like this:\n\n<notextile>\n<pre><code>SSL_MODE=\"bring-your-own\"\n</code></pre>\n</notextile>\n\n{% include 'multi_host_install_custom_certificates' %}\n\nAll certificate files will be used by nginx. You may need to include intermediate certificates in your certificate files. See \"the nginx documentation\":http://nginx.org/en/docs/http/configuring_https_servers.html#chains for more details.\n\nh4(#secure-tls-keys). Securing your TLS certificate keys (AWS specific) (optional)\n\nWhen using @SSL_MODE=bring-your-own@, you can keep your TLS certificate keys encrypted on the server nodes. This reduces the risk of certificate leaks from node disk volumes snapshots or backups.\n\nThis feature is currently implemented in AWS by providing the certificate keys’ password via Amazon’s \"Secrets Manager\":https://aws.amazon.com/es/secrets-manager/ service, and installing appropriate services on the nodes that provide this password to nginx via a file that only lives in system RAM.\n\nIf you use the installer's Terraform code, the secret and related permission cloud resources are created automatically, and you can customize the secret's name by editing @terraform/services/terraform.tfvars@ and setting its suffix in @ssl_password_secret_name_suffix@.\n\nIn @local.params@ you need to set @SSL_KEY_ENCRYPTED@ to @yes@ and change the default values for @SSL_KEY_AWS_SECRET_NAME@ and @SSL_KEY_AWS_REGION@ if necessary.\n\nThen, if your certificate key file is not yet encrypted, you can generated an encrypted version of it by running the @openssl@ command as follows:\n\n<notextile>\n<pre><code>openssl rsa -aes256 -in your.key -out your.encrypted.key\n</code></pre>\n</notextile>\n(this will ask you to type the encryption password)\n\nThis encrypted key file will be the one needed to be copied to the @${CUSTOM_CERTS_DIR}@ directory, instead of the plain key file.\n\nIn order to allow the appropriate nodes decrypt the key file, you should set the password on Amazon Secrets Manager. There're a couple way this can be done:\n\n# Through AWS web interface may be the easiest, just make sure to set it as \"plain text\" instead of JSON.\n# By using the AWS CLI tools, for example:\n<notextile>\n<pre><code>aws secretsmanager put-secret-value --secret-id pkey-pwd --secret-string \"p455w0rd\" --region us-east-1\n</code></pre>\n</notextile>Where @pkey-pwd@ should match with what's set in @SSL_KEY_AWS_SECRET_NAME@ and @us-east-1@ with what's set in @SSL_KEY_AWS_REGION@.\n\nTake into account that the AWS secret should be set before running @installer.sh deploy@ to avoid any failures when trying to start the @nginx@ servers.\n\nIf you ever need to change the encryption password on a running cluster, you should first change the secret's value on AWS, and only then copy the newly encrypted key file to @${CUSTOM_CERTS_DIR}@ and re-run the deploy command."
  },
  {
    "path": "doc/_includes/_ssl_config_single.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2(#certificates). Choose the SSL configuration (SSL_MODE)\n\nArvados requires an SSL certificate to work correctly. This installer supports these options:\n\n* @self-signed@: let the installer create a self-signed certificate\n* @lets-encrypt@: automatically obtain and install an SSL certificate for your hostname\n* @bring-your-own@: supply your own certificate in the `certs` directory\n\nh3(#self-signed). Using a self-signed certificate\n\nIn the default configuration, this installer uses self-signed certificate(s):\n\n<notextile>\n<pre><code>SSL_MODE=\"self-signed\"\n</code></pre>\n</notextile>\n\nThis works everywhere and does not require that you have a domain name.  However, after installation, users will need to \"install the self-signed root certificate in the browser.\":#ca_root_certificate\"\n\nh3(#lets-encrypt). Using a Let's Encrypt certificate\n\nTo automatically get a valid certificate via Let's Encrypt, change the configuration like this:\n\n<notextile>\n<pre><code>SSL_MODE=\"lets-encrypt\"\n</code></pre>\n</notextile>\n\nThis requires that you have a \"real\" hostname that you control.  The hostname for your Arvados cluster must be defined in @HOSTNAME_EXT@ and resolve to the public IP address of your Arvados instance, so that Let's Encrypt can validate the domainname ownership and issue the certificate.\n\nWhen using AWS, EC2 instances can have a default hostname that ends with <i>amazonaws.com</i>. Let's Encrypt has a blacklist of domain names for which it will not issue certificates, and that blacklist includes the <i>amazonaws.com</i> domain, which means the default hostname can not be used to get a certificate from Let's Encrypt.\n\nh3(#bring-your-own). Bring your own certificate\n\nTo supply your own certificate, change the configuration like this:\n\n<notextile>\n<pre><code>SSL_MODE=\"bring-your-own\"\n</code></pre>\n</notextile>\n\nCopy your certificate files to the directory specified with the variable @CUSTOM_CERTS_DIR@. The provision script will find it there. The certificate and its key need to be copied to a file named after @HOSTNAME_EXT@. For example, if @HOSTNAME_EXT@ is defined as @my-arvados.example.net@, the script will look for\n\n<notextile>\n<pre><code>${CUSTOM_CERTS_DIR}/my-arvados.example.net.crt\n${CUSTOM_CERTS_DIR}/my-arvados.example.net.key\n</code></pre>\n</notextile>\n\nAll certificate files will be used by nginx. You may need to include intermediate certificates in your certificate file. See \"the nginx documentation\":http://nginx.org/en/docs/http/configuring_https_servers.html#chains for more details.\n"
  },
  {
    "path": "doc/_includes/_start_service.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2(#start-service). Start the service\n\n<notextile>\n<pre><code># <span class=\"userinput\">systemctl enable --now {{arvados_component}}</span>\n# <span class=\"userinput\">systemctl status {{arvados_component}}</span>\n[...]\n</code></pre>\n</notextile>\n\nIf @systemctl status@ indicates it is not running, use @journalctl@ to check logs for errors:\n\n<notextile>\n<pre><code># <span class=\"userinput\">journalctl --since -5min -u {{ arvados_component | split: ' ' | join: ' -u ' }}</span>\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/_includes/_supportedlinux.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\ntable(table table-bordered table-condensed).\n|_. *Supported Linux Distributions*|\n|AlmaLinux 10 (since 10.0)|\n|AlmaLinux 9 (since 9.2)|\n|AlmaLinux 8 (since 8.8)|\n|Debian 12 (\"bookworm\")|\n|Red Hat Enterprise Linux 10 (since 10.0)|\n|Red Hat Enterprise Linux 9 (since 9.2)|\n|Red Hat Enterprise Linux 8 (since 8.8)|\n|Rocky Linux 10 (since 10.0)|\n|Rocky Linux 9 (since 9.2)|\n|Rocky Linux 8 (since 8.8)|\n|Ubuntu 24.04 (\"noble\")|\n|Ubuntu 22.04 (\"jammy\")|\n"
  },
  {
    "path": "doc/_includes/_tutorial_expectations.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin' %}\nThis tutorial assumes that you have access to \"Arvados command line tools\":{{ site.baseurl }}/user/getting_started/setup-cli.html, configured your \"API token\":{{site.baseurl}}/user/reference/api-tokens.html, and confirmed a \"working environment\":{{site.baseurl}}/user/getting_started/check-environment.html.\n{% include 'notebox_end' %}\n"
  },
  {
    "path": "doc/_includes/_tutorial_expectations_workstation.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin' %}\nThis tutorial assumes that you have installed the Arvados \"Command line SDK\":{{site.baseurl}}/sdk/cli/install.html and \"Python SDK\":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation and have a \"working environment.\":{{site.baseurl}}/user/getting_started/check-environment.html\n{% include 'notebox_end' %}\n"
  },
  {
    "path": "doc/_includes/_tutorial_hello_cwl.liquid",
    "content": "#!/usr/bin/env cwl-runner\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs: []\noutputs: []\narguments: [\"echo\", \"hello world!\"]\n"
  },
  {
    "path": "doc/_includes/_webring.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% assign n = 0 %}\n{% assign prev = \"\" %}\n{% assign nx = 0 %}\n{% for section in site.navbar[page.navsection] %}\n  {% for entry in section %}\n    {% for item in entry[1] %}        \n      {% assign p = site.pages[item] %}\n      {% if nx == 1 %}\n        <hr>\n        {% if prev != \"\" %}\n          <a href=\"{{ site.baseurl }}{{ prev.url }}\" class=\"pull-left\">Previous: {{ prev.title }}</a>\n        {% endif %}\n        <a href=\"{{ site.baseurl }}{{ p.url }}\" class=\"pull-right\">Next: {{ p.title }}</a>\n        {% assign nx = 0 %}\n        {% assign n = 1 %}\n      {% endif %}\n      {% if p.url == page.url %}\n        {% assign nx = 1 %}\n      {% else %}\n        {% assign prev = p %}\n      {% endif %}\n    {% endfor %}\n  {% endfor %}\n{% endfor %}\n{% if n == 0 && prev != \"\" %}\n  <hr>\n  <a href=\"{{ site.baseurl }}{{ prev.url }}\" class=\"pull-left\">Previous: {{ prev.title }}</a>\n  {% assign n = 1 %}\n{% endif %}"
  },
  {
    "path": "doc/_includes/_what_is_cwl.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe \"Common Workflow Language (CWL)\":http://commonwl.org is a multi-vendor open standard for describing analysis tools and workflows that are portable across a variety of platforms.  CWL is the primary way to develop and run workflows for Arvados.  Arvados supports versions \"v1.0\":http://commonwl.org/v1.0, \"v1.1\":http://commonwl.org/v1.1, and \"v1.2\":http://commonwl.org/v1.2 of the CWL standard.\n"
  },
  {
    "path": "doc/_layouts/default.html.liquid",
    "content": "{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n<!DOCTYPE html>\n<html>\n  <head>\n    <meta charset=\"utf-8\">\n    <title>{% unless page.title == \"Arvados | Documentation\" %} Arvados {% if page.navmenu %}| {{ page.navmenu }} {% endif %} | {% endunless %}{{ page.title }}</title>\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">\n    <meta name=\"description\" content=\"Arvados documentation site\">\n    <meta name=\"author\" content=\"Arvados authors\">\n    {% if site.current_version != site.latest_version %}\n    <meta name=\"robots\" content=\"noindex\">\n    {% endif %}\n    <link rel=\"icon\" href=\"{{ site.baseurl }}/images/favicon.ico\" type=\"image/x-icon\">\n    <link rel=\"shortcut icon\" href=\"{{ site.baseurl }}/images/favicon.ico\" type=\"image/x-icon\">\n    <link href=\"{{ site.baseurl }}/css/bootstrap.css\" rel=\"stylesheet\">\n    <link href=\"{{ site.baseurl }}/css/nav-list.css\" rel=\"stylesheet\">\n    <link href=\"{{ site.baseurl }}/css/badges.css\" rel=\"stylesheet\">\n    <link href=\"{{ site.baseurl }}/css/code.css\" rel=\"stylesheet\">\n    <link href=\"{{ site.baseurl }}/css/font-awesome.css\" rel=\"stylesheet\">\n    <link href=\"{{ site.baseurl }}/css/carousel-override.css\" rel=\"stylesheet\">\n    <link href=\"{{ site.baseurl }}/css/button-override.css\" rel=\"stylesheet\">\n    <link href=\"{{ site.baseurl }}/css/images.css\" rel=\"stylesheet\">\n    <link href=\"{{ site.baseurl }}/css/layout.css\" rel=\"stylesheet\">\n    <script src=\"{{ site.baseurl }}/js/jquery.min.js\"></script>\n    <script src=\"{{ site.baseurl }}/js/bootstrap.min.js\"></script>\n    <script src=\"https://hypothes.is/embed.js\" async></script>\n\n    {% include 'matomo_analytics' %}\n    {% include 'google_analytics' %}\n\n  </head>\n  <body class=\"nopad\">\n    {% include 'navbar_top' %}\n\n    {% if page.navsection == 'top' or page.no_nav_left %}\n    {{ content }}\n    {% else %}\n\n    <div class=\"container-fluid\" style=\"padding-right: 30px\">\n\n      <div class=\"row\">\n        {% include 'navbar_left' %}\n        <div class=\"col-sm-9\">\n          <h1>{{ page.title }}</h1>\n          {{ content }}\n          {% include 'webring' %}\n        </div>\n      </div>\n\n      <div style=\"height: 2em\"></div>\n\n    </div>\n    {% endif %}\n\n{% if page.no_nav_left %}\n{% else %}\n<p style=\"text-align: center\"><small>\nThe content of this documentation is licensed under the\n<a href=\"{{ site.baseurl }}/user/copying/by-sa-3.0.html\">Creative\n  Commons Attribution-Share Alike 3.0 United States</a> licence.<br>\nCode samples in this documentation are licensed under the\n<a href=\"{{ site.baseurl }}/user/copying/LICENSE-2.0.html\">Apache License, Version 2.0.</a></small>\n</p>\n{% endif %}\n\n  </body>\n</html>\n"
  },
  {
    "path": "doc/admin/cloudtest.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Testing cloud configuration\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe @arvados-server@ package includes a @cloudtest@ tool that checks compatibility between your Arvados configuration, your cloud driver, your cloud provider's API, your cloud provider's VM instances, and the worker image you use with the \"cloud dispatcher\":../install/crunch2-cloud/install-dispatch-cloud.html.\n\n@arvados-server cloudtest@ performs the following steps:\n# Create a new instance\n# Wait for it to finish booting\n# Run a shell command on the new instance (optional)\n# Pause while you log in to the new instance and do other tests yourself (optional)\n# Shut down the instance\n\nThis is an easy way to expose problems like these:\n* Configured cloud credentials don't work\n* Configured image types don't work\n* Configured driver is not compatible with your cloud API/region\n* Newly created instances are not usable due to a network problem or misconfiguration\n* Newly created instances do not accept the configured SSH private key\n* Selected machine image does not boot properly\n* Selected machine image is incompatible with some instance types\n* Driver has bugs\n\nh2. Typical uses\n\nBefore bringing up the @arvados-dispatch-cloud@ service for the first time, we recommend running @cloudtest@ to check your configuration:\n\n<notextile><pre>\n$ <span class=\"userinput\">arvados-server cloudtest -command \"crunch-run --list\"</span>\n</pre></notextile>\n\nBefore updating your configuration to use a new VM image, we recommend running @cloudtest@ with the new image:\n\n<notextile><pre>\n$ <span class=\"userinput\">arvados-server cloudtest -image-id <b>new_image_id</b> -command \"crunch-run --list\"</span>\n</pre></notextile>\n\nAfter adding an instance type to your configuration, we recommend running @cloudtest@ with the new instance type:\n\n<notextile><pre>\n$ <span class=\"userinput\">arvados-server cloudtest -instance-type <b>new_instance_type_name</b></span>\n</pre></notextile>\n\nFor a full list of options, use the @-help@ flag:\n\n<notextile><pre>\n$ <span class=\"userinput\">arvados-server cloudtest -help</span>\nUsage:\n  -command string\n        Run an interactive shell command on the test instance when it boots\n  -config file\n        Site configuration file (default \"/etc/arvados/config.yml\")\n  -destroy-existing\n        Destroy any existing instances tagged with our InstanceSetID, instead of erroring out\n  -image-id string\n        Image ID to use when creating the test instance (if empty, use cluster config)\n  -instance-set-id value\n        InstanceSetID tag value to use on the test instance (default \"cloudtest-user@hostname.example\")\n  -instance-type string\n        Instance type to create (if empty, use cheapest type in config)\n  -pause-before-destroy\n        Prompt and wait before destroying the test instance\n</pre></notextile>\n"
  },
  {
    "path": "doc/admin/collection-managed-properties.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Configuring collection's managed properties\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nCollection's managed properties allow a cluster administrator to enable some special behaviors regarding properties at creation & update times.\nThis page describes how to enable and configure these behaviors on the API server.\n\nh3. API Server configuration\n\nThe @Collections.ManagedProperties@ setting from the @config.yml@ file is used for enabling any of the following behaviors:\n\nh4. Pre-assigned property key & value\n\nFor every newly created collection, assign a predefined key/value pair if it isn't already passed at creation time:\n\n<pre>\nCollections:\n  ManagedProperties:\n    foo: {Value: bar}\n</pre>\n\nh4. Original owner UUID\n\nThis behavior will assign to a property key the UUID of the user who owns the collection's contaning project.\n\n<pre>\nCollections:\n  ManagedProperties:\n    responsible_person_uuid: {Function: original_owner}\n</pre>\n\nh4. Protected properties\n\nIf there's a need to prevent a non-admin user from modifying a specific property, even by its owner, the @Protected@ attribute can be set to @true@, like so:\n\n<pre>\nCollections:\n  ManagedProperties:\n    sample_id: {Protected: true}\n</pre>\n\nThis configuration won't assign a @sample_id@ property on collection creation, but if the user adds it to any collection, its value is protected from that point on.\n\nAnother use case would be to protect properties that were automatically assigned by the system:\n\n<pre>\nCollections:\n  ManagedProperties:\n    responsible_person_uuid: {Function: original_owner, Protected: true}\n</pre>\n\nIf missing, the @Protected@ attribute it’s assumed as being @false@ by default.\n\nh3. Supporting example scripts\n\nWhen enabling this feature, there may be pre-existing collections that won't have the managed properties just configured. The following script examples may be helpful to sync these older collections.\n\nFor the following examples we assume that the @responsible_person_uuid@ property is set as @{Function: original_owner, Protected: true}@.\n\nh4. List uuid/names of collections without @responsible_person_uuid@ property\n\nThe collection's managed properties feature assigns the configured properties to newly created collections. This means that previously existing collections won't get the default properties and if needed, they should be assigned manually.\n\nThe following example script outputs a listing of collection UUIDs and names of those collections that don't include the @responsible_person_uuid@ property.\n\n{% codeblock as python %}\n{% include 'admin_list_collections_without_property_py' %}\n{% endcodeblock %}\n\nh4. Update the @responsible_person_uuid@ property from nil to X in the project hierarchy rooted at P\n\nWhen enabling @responsible_person_uuid@, new collections will get this property's value set to the user who owns the root project where the collection is placed, but older collections won't have the property set. The following example script allows an administrator to set the @responsible_person_uuid@ property to collections below a certaing project hierarchy.\n\n{% codeblock as python %}\n{% include 'admin_set_property_to_collections_under_project_py' %}\n{% endcodeblock %}\n\nh4. Update the @responsible_person_uuid@ property from X to Y on all collections\n\nThis example can be useful to change responsibility from one user to another.\n\nPlease note that the following code should run with admin privileges, assuming that the managed property is @Protected@.\n\n{% codeblock as python %}\n{% include 'admin_update_collection_property_py' %}\n{% endcodeblock %}\n\n"
  },
  {
    "path": "doc/admin/collection-versioning.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Configuring collection versioning\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis page describes how to enable and configure the collection versioning feature on the API server.\n\nh3. Configuration\n\nThere are 2 configuration settings in the @Collections@ section of @config.yml@ that control this feature.\n\n<pre>\n    Collections:\n      # If true, enable collection versioning.\n      # When a collection's preserve_version field is true or the current version\n      # is older than the amount of seconds defined on PreserveVersionIfIdle,\n      # a snapshot of the collection's previous state is created and linked to\n      # the current collection.\n      CollectionVersioning: true\n\n      # This setting control the auto-save aspect of collection versioning, and can be set to:\n      #   0s = auto-create a new version on every update.\n      #  -1s = never auto-create new versions.\n      # > 0s = auto-create a new version when older than the specified number of seconds.\n      PreserveVersionIfIdle: 10s\n</pre>\n\nNote that if you set @CollectionVersioning@ to @false@ after being enabled, old versions will still be accessible, but further changes will not be versioned.\n\nh3. Using collection versioning\n\n\"Discussed in the user guide\":{{site.baseurl}}/user/topics/collection-versioning.html\n"
  },
  {
    "path": "doc/admin/config-urls.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: InternalURLs and ExternalURL\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe Arvados configuration is stored at @/etc/arvados/config.yml@. See the \"Configuration reference\":config.html for more detail.\n\nThe @Services@ section lists a number of Arvados services, each with an @InternalURLs@ and/or @ExternalURL@ configuration key. This document explains the precise meaning of these configuration keys, and how they are used by the Arvados services.\n\nThe @ExternalURL@ is the address where the service should be reachable by clients, both from inside and from outside the Arvados cluster. Some services do not expose an Arvados API, only Prometheus metrics. In that case, @ExternalURL@ is not used.\n\nThe keys under @InternalURLs@ are the URLs through which Arvados system components can connect to one another, including the reverse proxy (e.g. Nginx) that fronts Arvados services. The exception is the @Keepstore@ service, where clients on the local network connect directly to @Keepstore.InternalURLs@ (while clients from outside networks connect to @Keepproxy.ExternalURL@). If a service is not fronted by a reverse proxy, e.g. when its endpoint only exposes Prometheus metrics, the intention is that metrics are collected directly from the endpoints defined in @InternalURLs@.\n\nEach entry in the @InternalURLs@ section may also indicate a @ListenURL@ to determine the protocol, address/interface, and port where the service process will listen, in case the desired listening address differs from the @InternalURLs@ key itself -- for example, when passing internal traffic through a reverse proxy.\n\nIf the Arvados service lives behind a reverse proxy (e.g. Nginx), configuring the reverse proxy and the @InternalURLs@ and @ExternalURL@ values must be done in concert.\n\nh2. Overview\n\n<div class=\"offset1\">\ntable(table table-bordered table-condensed).\n|_.Service     |_.ExternalURL required? |_.InternalURLs required?|_.InternalURLs must be reachable from other cluster nodes?|_.Note|\n|railsapi       |no                     |yes|no ^1^|InternalURLs only used by Controller|\n|controller     |yes                    |yes|yes ^2,4^|InternalURLs used by reverse proxy and container shell connections|\n|arvados-dispatch-cloud|no              |yes|no ^3^|InternalURLs only used to expose Prometheus metrics|\n|arvados-dispatch-lsf|no                |yes|no ^3^|InternalURLs only used to expose Prometheus metrics|\n|container web services|yes             |no |no    |controller's InternalURLs are used by reverse proxy (e.g. Nginx)|\n|git-ssh        |yes                    |no |no    ||\n|keepproxy      |yes                    |yes|no ^2^|InternalURLs only used by reverse proxy (e.g. Nginx)|\n|keepstore      |no                     |yes|yes   |All clients connect to InternalURLs|\n|keep-balance   |no                     |yes|no ^3^|InternalURLs only used to expose Prometheus metrics|\n|keep-web       |yes                    |yes|yes ^5^|InternalURLs used by reverse proxy and container log API|\n|websocket      |yes                    |yes|no ^2^|InternalURLs only used by reverse proxy (e.g. Nginx)|\n|workbench2     |yes                    |no|no     ||\n</div>\n\n^1^ If @Controller@ runs on a different host than @RailsAPI@, the @InternalURLs@ will need to be reachable from the host that runs @Controller@.\n^2^ If the reverse proxy (e.g. Nginx) does not run on the same host as the Arvados service it fronts, the @InternalURLs@ will need to be reachable from the host that runs the reverse proxy.\n^3^ If the Prometheus metrics are not collected from the same machine that runs the service, the @InternalURLs@ will need to be reachable from the host that collects the metrics.\n^4^ If dispatching containers to HPC (Slurm/LSF) and there are multiple @Controller@ services, they must be able to connect to one another using their InternalURLs, otherwise the \"tunnel connections\":{{site.baseurl}}/architecture/hpc.html enabling \"container shell access\":{{site.baseurl}}/install/container-shell-access.html will not work.\n^5^ All URLs in @Services.WebDAV.InternalURLs@ must be reachable by all Controller services. Alternatively, each entry in @Services.Controller.InternalURLs@ must have a corresponding entry in @Services.WebDAV.InternalURLs@ with the same hostname.\n\nWhen @InternalURLs@ do not need to be reachable from other nodes, it is most secure to use loopback addresses as @InternalURLs@, e.g. @http://127.0.0.1:9005@.\n\nIt is recommended to use a split-horizon DNS setup where the hostnames specified in @ExternalURL@ resolve to an internal IP address from inside the Arvados cluster, and a publicly routed external IP address when resolved from outside the cluster. This simplifies firewalling and provides optimally efficient traffic routing. In a cloud environment where traffic that flows via public IP addresses is charged, using split horizon DNS can also avoid unnecessary expense.\n\nh2. Examples\n\nThe remainder of this document walks through a number of examples to provide more detail.\n\nh3. Keep-balance\n\nConsider this section for the @Keep-balance@ service:\n\n{% codeblock as yaml %}\n      Keepbalance:\n        InternalURLs:\n          \"http://ip-10-0-1-233.internal:9005/\": {}\n{% endcodeblock %}\n\n@Keep-balance@ has an API endpoint, but it is only used to expose \"Prometheus\":https://prometheus.io metrics.\n\nThere is no @ExternalURL@ key because @Keep-balance@ does not have an Arvados API, no Arvados services need to connect to @Keep-balance@.\n\nThe value for @InternalURLs@ tells the @Keep-balance@ service to start up and listen on port 9005, if it is started on a host where @ip-10-0-1-233.internal@ resolves to a local IP address. If @Keep-balance@ is started on a machine where the @ip-10-0-1-233.internal@ hostname does not resolve to a local IP address, it would refuse to start up, because it would not be able to find a local IP address to listen on.\n\nIt is also possible to use IP addresses in @InternalURLs@, for example:\n\n{% codeblock as yaml %}\n      Keepbalance:\n        InternalURLs:\n          \"http://127.0.0.1:9005/\": {}\n{% endcodeblock %}\n\nIn this example, @Keep-balance@ would start up and listen on port 9005 at the @127.0.0.1@ IP address. Prometheus would only be able to access the @Keep-balance@ metrics if it could reach that IP and port, e.g. if it runs on the same machine.\n\nFinally, it is also possible to listen on all interfaces, for example:\n\n{% codeblock as yaml %}\n      Keepbalance:\n        InternalURLs:\n          \"http://0.0.0.0:9005/\": {}\n{% endcodeblock %}\n\nIn this case, @Keep-balance@ will listen on port 9005 on all IP addresses local to the machine.\n\nh3. Keepstore\n\nConsider this section for the @Keepstore@ service:\n\n{% codeblock as yaml %}\n      Keepstore:\n        InternalURLs:\n          \"http://keep0.ClusterID.example.com:25107\": {}\n          \"http://keep1.ClusterID.example.com:25107\": {}\n{% endcodeblock %}\n\nThere is no @ExternalURL@ key because @Keepstore@ is only accessed from inside the Arvados cluster. For access from outside, all traffic goes via @Keepproxy@.\n\nWhen @Keepstore@ is installed on the host where @keep0.ClusterID.example.com@ resolves to a local IP address, it will listen on port 25107 on that IP address. Likewise on the @keep1.ClusterID.example.com@ host. On all other systems, @Keepstore@ will refuse to start.\n\nh3. Keepproxy\n\nConsider this section for the @Keepproxy@ service:\n\n{% codeblock as yaml %}\n      Keepproxy:\n        ExternalURL: https://keep.ClusterID.example.com\n        InternalURLs:\n          \"http://localhost:25107\": {}\n{% endcodeblock %}\n\nThe @ExternalURL@ advertised is @https://keep.ClusterID.example.com@. The @Keepproxy@ service will start up on @localhost@ port 25107, however. This is possible because we also configure Nginx to terminate SSL and sit in front of the @Keepproxy@ service:\n\n<notextile><pre><code>upstream keepproxy {\n  server                127.0.0.1:<span class=\"userinput\">25107</span>;\n}\n\nserver {\n  listen                  443 ssl;\n  server_name             <span class=\"userinput\">keep.ClusterID.example.com</span>;\n\n  proxy_connect_timeout   90s;\n  proxy_read_timeout      300s;\n  proxy_set_header        X-Real-IP $remote_addr;\n  proxy_http_version      1.1;\n  proxy_request_buffering off;\n  proxy_max_temp_file_size 0;\n\n  ssl_certificate     <span class=\"userinput\">/YOUR/PATH/TO/cert.pem</span>;\n  ssl_certificate_key <span class=\"userinput\">/YOUR/PATH/TO/cert.key</span>;\n\n  # Clients need to be able to upload blocks of data up to 64MiB in size.\n  client_max_body_size    64m;\n\n  location / {\n    proxy_pass            http://keepproxy;\n  }\n}\n</code></pre></notextile>\n\nIf a client connects to the @Keepproxy@ service, it will talk to Nginx which will reverse proxy the traffic to the @Keepproxy@ service.\n\nh3. API server\n\nConsider this section for the @RailsAPI@ service:\n\n{% codeblock as yaml %}\n      RailsAPI:\n        InternalURLs:\n          \"http://localhost:8004\": {}\n{% endcodeblock %}\n\nThere is no @ExternalURL@ defined because the @RailsAPI@ is not directly accessible and does not need to advertise a URL: all traffic to it flows via @Controller@, which is the only client that talks to it.\n\nThe @RailsAPI@ service is also a Rails application, and its listening host and port are set in the @arvados-railsapi.service@ unit definition:\n\n<notextile>\n<pre><code># <span class=\"userinput\">systemctl cat arvados-railsapi.service</span>\n[...]\n[Service]\nEnvironment=<strong>PASSENGER_ADDRESS=localhost</strong>\nEnvironment=<strong>PASSENGER_PORT=8004</strong>\n[...]\n</code></pre></notextile>\n\nSo then, why is there a need to specify @InternalURLs@ for the @RailsAPI@ service? It is there because this is how the @Controller@ service locates the @RailsAPI@ service it should talk to. Since this connection is internal to the Arvados cluster, @Controller@ uses @InternalURLs@ to find the @RailsAPI@ endpoint.\n\nh3. Controller\n\nConsider this section for the @Controller@ service:\n\n{% codeblock as yaml %}\n  Controller:\n    InternalURLs:\n      \"https://ctrl-0.internal\":\n        ListenURL: \"http://localhost:8003\"\n    ExternalURL: \"https://ClusterID.example.com\"\n{% endcodeblock %}\n\nThe @ExternalURL@ advertised to clients is @https://ClusterID.example.com@. The @arvados-controller@ process will listen on @localhost@ port 8003. Other Arvados service processes in the cluster can connect to this specific controller instance, using the URL @https://ctrl-0.internal@. Container web service traffic at @https://*.containers.ClusterID.example.com@ is also handled by the same @arvados-controller@ process. Nginx is configured to sit in front of the @Controller@ service and terminate TLS:\n\n<notextile><pre><code>\n# This is the port where nginx expects to contact arvados-controller.\nupstream controller {\n  server     localhost:8003  fail_timeout=10s;\n}\n\nserver {\n  # This configures the public https port that clients will actually connect to,\n  # the request is reverse proxied to the upstream 'controller'\n\n  listen       443 ssl;\n  server_name  ClusterID.example.com\n               ctrl-0.internal\n               *.containers.ClusterID.example.com;\n\n  ssl_certificate     /YOUR/PATH/TO/cert.pem;\n  ssl_certificate_key /YOUR/PATH/TO/cert.key;\n\n  # Refer to the comment about this setting in the passenger (arvados\n  # api server) section of your Nginx configuration.\n  client_max_body_size 128m;\n\n  location / {\n    proxy_pass               http://controller;\n    proxy_redirect           off;\n    proxy_connect_timeout    90s;\n    proxy_read_timeout       300s;\n    proxy_max_temp_file_size 0;\n    proxy_request_buffering  off;\n    proxy_buffering          off;\n    proxy_http_version       1.1;\n\n    proxy_set_header      Host              $http_host;\n    proxy_set_header      Upgrade           $http_upgrade;\n    proxy_set_header      Connection        \"upgrade\";\n    proxy_set_header      X-External-Client $external_client;\n    proxy_set_header      X-Forwarded-For   $proxy_add_x_forwarded_for;\n    proxy_set_header      X-Forwarded-Proto https;\n    proxy_set_header      X-Real-IP         $remote_addr;\n  }\n}\n</code></pre></notextile>\n\nIf the host part of @ListenURL@ is ambiguous, in the sense that more than one system host is able to listen on that address (e.g., @localhost@), configure each host's startup scripts to set the environment variable @ARVADOS_SERVICE_INTERNAL_URL@ to the @InternalURLs@ key that will reach that host. In the example above, this would be @ARVADOS_SERVICE_INTERNAL_URL=https://ctrl-0.internal@.\n\nIf the cluster has just a single node running all of the Arvados server processes, configuration can be simplified:\n\n{% codeblock as yaml %}\n  Controller:\n    InternalURLs:\n      \"http://localhost:8003\": {}\n    ExternalURL: \"https://ClusterID.example.com\"\n{% endcodeblock %}\n"
  },
  {
    "path": "doc/admin/config.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Configuration reference\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe Arvados configuration is stored at @/etc/arvados/config.yml@\n\n{% codeblock as yaml %}\n{% include 'config_default_yml' %}\n{% endcodeblock %}\n"
  },
  {
    "path": "doc/admin/controlling-container-reuse.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Preventing container reuse\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nSometimes a container exited successfully but produced bad output, and re-running the workflow will cause it to re-use the bad container instead of running a new container.  One way to deal with this is to re-run the entire workflow with reuse disabled.  Another way is for the workflow author to tweak the input data or workflow so that on re-run it produces a distinct container request.  However, for large or complex workflows both these options may be impractical.\n\nTo prevent an individual container from being reused in later workflows, an admin can manually change the state of the bad container record from @Complete@ to @Cancelled@.  The following @arv@ command demonstrates how change a container state to @Cancelled@, where @xxxxx-xxxxx-xxxxxxxxxxxxxxx@ is the @UUID@ of the container:\n\n<pre>arv container update -u xxxxx-xxxxx-xxxxxxxxxxxxxxx -c '{\"state\":\"Cancelled\"}'</pre>\n"
  },
  {
    "path": "doc/admin/diagnostics.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Diagnostics\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe @arvados-client diagnostics@ command exercises basic cluster functionality, and identifies some common installation and configuration problems. Especially after upgrading or reconfiguring Arvados or server/network infrastructure, it can be the quickest way to identify problems.\n\nh2. Using system privileges\n\nOn a server node, it is easiest to run the diagnostics command with system privileges. The word @sudo@ here instructs the @arvados-client@ command to load @Controller.ExternalURL@ and @SystemRootToken@ from @/etc/arvados/config.yml@ and use those credentials to run tests with system privileges.\n\nWhen run this way, diagnostics will also include \"health checks\":health-checks.html.\n\n<notextile><pre>\n# <span class=\"userinput\">arvados-client sudo diagnostics</span>\n</pre></notextile>\n\nh2. Using regular user privileges\n\nOn any node (server node, shell node, or a workstation outside the system network), you can also run diagnostics by setting the usual @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables. Typically this is done with a regular user account.\n\n<notextile><pre>\n$ <span class=\"userinput\">export ARVADOS_API_HOST=zzzzz.arvadosapi.com</span>\n$ <span class=\"userinput\">export ARVADOS_API_TOKEN=xxxxxxxxxx</span>\n$ <span class=\"userinput\">arvados-client diagnostics</span>\n</pre></notextile>\n\nh2. Internal/external client detection\n\nThe diagnostics output indicates whether its client connection is categorized by the server as internal or external. If you run diagnostics automatically with cron or a monitoring tool, you can use the @-internal-client@ or @-external-client@ flag to specify how you _expect_ the client to be categorized, and the test will fail otherwise. Example:\n\n<notextile><pre>\n# <span class=\"userinput\">arvados-client sudo diagnostics -internal-client</span>\n[...]\n\n--- cut here --- error summary ---\n\nERROR     60: checking internal/external client detection (11 ms): expecting internal=true external=false, but found internal=false external=true\n</pre></notextile>\n\nh2(#container-options). Container-running options\n\nBy default, the @diagnostics@ command builds a custom Docker image containing a copy of its own binary, and uses that image to run diagnostic checks from inside an Arvados container. This can help detect problems like lack of network connectivity between containers and Arvados cluster services.\n\nThe default approach works well if the client host (i.e., the host where you invoke @arvados-client diagnostics@) meets certain conditions:\n* Docker is installed and working (so the diagnostics command can run @docker build@ and @docker save@).\n* Its hardware and kernel are similar to the cluster's compute instances (so the @arvados-client@ binary and the custom-built Docker image are compatible with the compute instances).\n* Network bandwidth supports uploading the Docker image (about 100 megabytes) in less than a minute.\n\nThe following options provide flexibility in case the default approach is not suitable.\n* @-priority=0@ skips the container-running part of the diagnostics suite.\n* @-docker-image=\"hello-world\"@ uses a tiny \"hello world\" image that is already embedded in the @arvados-client@ binary. This works even if the client host does not have any docker tools installed, and it minimizes the data transferred during the diagnostics suite. It provides less test coverage than the default option, but it will at least check that it is possible to run a container on the cluster.\n* @-docker-image=X@ (where @X@ is a Docker image name or a portable data hash) uses a Docker image that has already been uploaded to your Arvados cluster using @arv keep docker@. In this case the diagnostics tool will run a container with the command @echo {timestamp}@.\n* @-docker-image-from=NAME@ builds a custom Docker image on the fly as described above, but using the specified image as a base instead of the default @debian:slim-stable@ image. Note that the build recipe runs commands like @apt-get install [...] libfuse2 ca-certificates@ so only Debian-based base images are supported. For more flexibility, use one of the above @-docker-image=...@ options.\n* @-timeout=2m@ extends the time limit for each HTTP request made by the diagnostics suite, including the process of uploading a custom-built Docker image, to 2 minutes (the default HTTP request timeout is 10 seconds, and the default upload time limit is either the HTTP timeout or 1 minute, whichever is longer).\n\nh2. Example output\n\n<notextile><pre>\n# <span class=\"userinput\">arvados-client sudo diagnostics</span>\nINFO       5: running health check (same as `arvados-server check`)\nINFO      10: getting discovery document from https://zzzzz.arvadosapi.com/discovery/v1/apis/arvados/v1/rest\nINFO      20: getting exported config from https://zzzzz.arvadosapi.com/arvados/v1/config\nINFO      30: getting current user record\nINFO      40: connecting to service endpoint https://keep.zzzzz.arvadosapi.com/\nINFO      41: connecting to service endpoint https://*.collections.zzzzz.arvadosapi.com/\nINFO      42: connecting to service endpoint https://download.zzzzz.arvadosapi.com/\nINFO      43: connecting to service endpoint wss://ws.zzzzz.arvadosapi.com/websocket\nINFO      44: connecting to service endpoint https://workbench.zzzzz.arvadosapi.com/\nINFO      45: connecting to service endpoint https://workbench2.zzzzz.arvadosapi.com/\nINFO      50: checking CORS headers at https://zzzzz.arvadosapi.com/\nINFO      51: checking CORS headers at https://keep.zzzzz.arvadosapi.com/d41d8cd98f00b204e9800998ecf8427e+0\nINFO      52: checking CORS headers at https://download.zzzzz.arvadosapi.com/\nINFO      60: checking internal/external client detection\nINFO      61: reading+writing via keep service at https://keep.zzzzz.arvadosapi.com:443/\nINFO      80: finding/creating \"scratch area for diagnostics\" project\nINFO      90: creating temporary collection\nINFO     100: uploading file via webdav\nINFO     110: checking WebDAV ExternalURL wildcard (https://*.collections.zzzzz.arvadosapi.com/)\nINFO     120: downloading from webdav (https://d41d8cd98f00b204e9800998ecf8427e-0.collections.zzzzz.arvadosapi.com/foo)\nINFO     121: downloading from webdav (https://d41d8cd98f00b204e9800998ecf8427e-0.collections.zzzzz.arvadosapi.com/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)\nINFO     122: downloading from webdav (https://download.zzzzz.arvadosapi.com/c=d41d8cd98f00b204e9800998ecf8427e+0/_/foo)\nINFO     123: downloading from webdav (https://download.zzzzz.arvadosapi.com/c=d41d8cd98f00b204e9800998ecf8427e+0/_/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)\nINFO     124: downloading from webdav (https://a15a27cbc1c7d2d4a0d9e02529aaec7e-128.collections.zzzzz.arvadosapi.com/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)\nINFO     125: downloading from webdav (https://download.zzzzz.arvadosapi.com/c=zzzzz-4zz18-twitqma8mbvwydy/_/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)\nINFO     130: getting list of virtual machines\nINFO     150: connecting to webshell service\nINFO     160: running a container\nINFO      ... container request submitted, waiting up to 10m for container to run\nINFO    9990: deleting temporary collection\n</pre></notextile>\n"
  },
  {
    "path": "doc/admin/dispatch.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Cloud dispatcher\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe @arvados-server@ program provides subcommands for accessing the \"cloud dispatcher's management API\":{{site.baseurl}}/api/dispatch.html interactively.\n\nh2. List instances\n\n@arvados-server instance list@\n\nDisplay a list of instances managed by the dispatcher.\n\nA placeholder @-@ in the @instance@ column indicates that an instance has been requested but the cloud provider has not yet returned a response with an instance ID.\n\nA placeholder @-@ in the @address@ column indicates that an instance has been requested but has not yet been assigned an IP address by the cloud provider.\n\nA placeholder @-@ in the @running-containers@ column indicates that the instance is not running any containers.\n\nUse the @-header@ flag to display column names.\n\n<notextile><pre><code># <span class=\"userinput\">arvados-server instance list -header</span>\ninstance\taddress\tstate\tidle-behavior\tconfig-type\tprovider-type\tprice\trunning-containers\ni-03d59cfcfacf307ff\t10.253.254.184\trunning\trun\tc5large\tc5.large\t0.085000\ttordo-dz642-r6fz90awybvywr6\ni-0df614e93e4170ae7\t10.253.254.157\tbooting\trun\tt3small\tt3.small\t0.020800\t-\n</code></pre></notextile>\n\nh2. Drain instances\n\n<pre># arvados-server instance drain <instance-id> [instance-id ...]</pre>\n\nSet the indicated instances' idle behavior to @drain@.  Containers currently running will be allowed to continue, but when each instance becomes idle, it will be shut down.\n\nh2. Hold instances\n\n<pre># arvados-server instance hold <instance-id> [instance-id ...]</pre>\n\nSet the indicated instances' idle behavior to @hold@.  The instances will not be shut down automatically.  Containers currently running will be allowed to continue, but no new containers will be scheduled.\n\nh2. Run instances\n\n<pre># arvados-server instance run <instance-id> [instance-id ...]</pre>\n\nSet the indicated instances' idle behavior to @run@ (the normal behavior). When the instances become idle, they will be eligible to run new containers. They will be shut down automatically when the configured idle threshold is reached.\n\nh2. Kill instances\n\n<pre># arvados-server instance kill [-reason \"...\"] <instance-id> [instance-id ...]</pre>\n\nShut down the indicated instances immediately, abandoning/failing any containers they are currently running.\n\nThe provided reason string will appear in the dispatcher’s log.\n"
  },
  {
    "path": "doc/admin/federation.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Configuring federation\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis page describes how to enable and configure federation capabilities between clusters.\n\nAn overview on how this feature works is discussed in the \"architecture section\":{{site.baseurl}}/architecture/federation.html\n\nh2. Configuration\n\nTo enable a cluster to communicate with other clusters, some settings need to be added to the @config.yml@ file.  Federated clusters are identified by listing the cluster-to-hostname mapping in the @RemoteClusters@ section.\n\nHere is an example of the settings that should be added to the @/etc/arvados/config.yml@ file:\n\n<pre>\nClusters:\n  clsr1:\n    RemoteClusters:\n      clsr2:\n        Host: api.cluster2.example\n        Proxy: true\n\tActivateUsers: true\n      clsr3:\n        Host: api.cluster3.example\n        Proxy: true\n\tActivateUsers: false\n</pre>\n\nSimilar settings should be added to @clsr2@ & @clsr3@ hosts, so that all clusters in the federation can talk to each other.\n\nThe @ActivateUsers@ setting indicates whether users from a given cluster are automatically activated or they require manual activation.  User activation is covered in more detail in the \"user activation section\":{{site.baseurl}}/admin/user-management.html.  In the current example, users from @clsr2@ would be automatically activated but users from @clsr3@ would require an admin to activate the account.\n\nNote: The @Proxy:@ variable is intended for future use, and should always be set to @true@.\n\nh2(#LoginCluster). User management\n\nA federation of clusters can be configured to use a separate user database per cluster, or delegate a central cluster to manage the database.\n\nh3. Peer federation\n\nIf clusters belong to separate organizations, each cluster will have its own user database for the members of that organization.  Through federation, a user from one organization can be granted access to the cluster of another organization.  The admin of the second cluster can control access on a individual basis by choosing to activate or deactivate accounts from other organizations.\n\nh3. Centralized (LoginCluster) federation\n\nIf all clusters belong to the same organization, and users in that organization should have access to all the clusters, user management can be simplified by setting the @LoginCluster@ which manages the user database used by all other clusters in the federation.  To do this, choose one cluster in the federation which will be the 'login cluster'.  Set the @Login.LoginCluster@ configuration value on all clusters in the federation to the cluster id of the login cluster.  After setting @LoginCluster@, restart arvados-api-server and arvados-controller.\n\n<pre>\nClusters:\n  clsr2:\n    Login:\n      LoginCluster: clsr1\n</pre>\n\nThe @LoginCluster@ configuration redirects all user logins to the LoginCluster, and the LoginCluster will issue API tokens which will be accepted by the federation.  Users are activated or deactivated across the entire federation based on their status on the login cluster.\n\nNote: tokens issued by the login cluster need to be periodically re-validated when used on other clusters in the federation.  The period between revalidation attempts is configured with @Login.RemoteTokenRefresh@.  The default is 5 minutes.  A longer period reduces overhead from validating tokens, but means it may take longer for other clusters to notice when a token has been revoked or a user has changed status (being activated/deactivated, admin flag changed).\n\nTo migrate users of existing clusters with separate user databases to use a single LoginCluster, a script @arv-federation-migrate@ is available in @contrib/arvados-bootstrap@.\n\nh2. Groups\n\nIn order for a user to see (and be able to share with) other users, the admin needs to create a \"can_read\" permission link from the user to either the \"All users\" group, or another group that grants visibility to a subset of users.\n\nIn a peer federation, this means that for a user that has joined a second cluster, that user needs to be added to the \"All users\" group on the second cluster as well, to be able to share with other users.\n\nIn a LoginCluster federation, all visibility of users to share with other users is set by the LoginCluster.  It is not necessary to add users to \"All users\" on the other clusters.\n\nh3. Trusted clients\n\nWhen a cluster is configured to use a LoginCluster, the login flow goes to the LoginCluster to log in and issue a token, then returns the user to the starting workbench.  In this case, you want to configure the LoginCluster to \"trust\" the workbench instances associated with the other clusters.\n\n<pre>\nClusters:\n  clsr1:\n    Login:\n      TrustedClients:\n        \"https://workbench.cluster2.example\": {}\n        \"https://workbench2.cluster2.example\": {}\n        \"https://workbench.cluster3.example\": {}\n        \"https://workbench2.cluster3.example\": {}\n</pre>\n\nh2. Testing\n\nFollowing the above example, let's suppose @clsr1@ is our \"home cluster\", that is to say, we use our @clsr1@ user account as our federated identity and both @clsr2@ and @clsr3@ remote clusters are set up to allow users from @clsr1@ and to auto-activate them. The first thing to do would be to log into a remote workbench using the local user token. This can be done following these steps:\n\n1. Log into the local workbench and get the user token\n2. Visit the remote workbench specifying the local user token by URL: @https://workbench.cluster2.example?api_token=token_from_clsr1@\n3. You should now be logged into @clsr2@ with your account from @clsr1@\n\nTo further test the federation setup, you can create a collection on @clsr2@, uploading some files and copying its UUID. Next, logged into a shell node on your home cluster you should be able to get that collection by running:\n\n<pre>\nuser@clsr1:~$ arv collection get --uuid clsr2-xvhdp-xxxxxxxxxxxxxxx\n</pre>\n\nThe returned collection metadata should show the local user's uuid on the @owner_uuid@ field. This tests that the @arvados-controller@ service is proxying requests correctly.\n\nOne last test may be performed, to confirm that the @keepstore@ services also recognize remote cluster prefixes and proxy the requests. You can ask for the previously created collection using any of the usual tools, for example:\n\n<pre>\nuser@clsr1:~$ arv-get clsr2-xvhdp-xxxxxxxxxxxxxxx/uploaded_file .\n</pre>\n"
  },
  {
    "path": "doc/admin/group-management.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Role group management at the CLI\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis page describes how to manage groups at the command line.  You should be familiar with the \"permission system\":{{site.baseurl}}/api/permission-model.html .\n\nh2. Create a role group\n\nUser groups are entries in the \"groups\" table with @\"group_class\": \"role\"@.\n\n<pre>\narv group create --group '{\"name\": \"My new group\", \"group_class\": \"role\"}'\n</pre>\n\nh2(#add). Add a user to a role group\n\nThere are two separate permissions associated with group membership.  The first link grants the user @can_manage@ permission to manage things that the group can manage.  The second link grants permission for other users of the group to see that this user is part of the group.\n\n<pre>\narv link create --link '{\n  \"link_class\": \"permission\",\n  \"name\": \"can_manage\",\n  \"tail_uuid\": \"the_user_uuid\",\n  \"head_uuid\": \"the_group_uuid\"}'\n\narv link create --link '{\n  \"link_class\": \"permission\",\n  \"name\": \"can_read\",\n  \"tail_uuid\": \"the_group_uuid\",\n  \"head_uuid\": \"the_user_uuid\"}'\n</pre>\n\nA user can also be given read-only access to a group.  In that case, the first link should be created with @can_read@ instead of @can_manage@.\n\nh2. List role groups\n\n<pre>\narv group list --filters '[[\"group_class\", \"=\", \"role\"]]'\n</pre>\n\nh2. List members of a role group\n\nUse the command \"jq\":https://stedolan.github.io/jq/ to extract the tail_uuid of each permission link which has the user uuid.\n\n<pre>\narv link list --filters '[[\"link_class\", \"=\", \"permission\"],\n  [\"head_uuid\", \"=\", \"the_group_uuid\"]]' | jq .items[].tail_uuid\n</pre>\n\nh2(#share-project). Share a project with a role group\n\nMembers of the role group will have access to the project based on their level of access to the role group.\n\n<pre>\narv link create --link '{\n  \"link_class\": \"permission\",\n  \"name\": \"can_manage\",\n  \"tail_uuid\": \"the_group_uuid\",\n  \"head_uuid\": \"the_project_uuid\"}'\n</pre>\n\nA project can also be shared read-only.  In that case, the link @name@ should be @can_read@ instead of @can_manage@.\n\nh2. List things shared with the group\n\nUse the command \"jq\":https://stedolan.github.io/jq/ to extract the head_uuid of each permission link which has the object uuid.\n\n<pre>\narv link list --filters '[[\"link_class\", \"=\", \"permission\"],\n  [\"tail_uuid\", \"=\", \"the_group_uuid\"]]' | jq .items[].head_uuid\n</pre>\n\nh2(#stop-sharing-project). Stop sharing a project with a group\n\nThis will remove access for members of the group.\n\nThe first step is to find the permission link objects.  The second step is to delete them.\n\n<pre>\narv --format=uuid link list --filters '[[\"link_class\", \"=\", \"permission\"],\n  [\"tail_uuid\", \"=\", \"the_group_uuid\"], [\"head_uuid\", \"=\", \"the_project_uuid\"]]'\n\narv link delete --uuid each_link_uuid\n</pre>\n\nh2. Remove user from a role group\n\nThe first step is to find the permission link objects.  The second step is to delete them.\n\n<pre>\narv --format=uuid link list --filters '[[\"link_class\", \"=\", \"permission\"],\n  [\"tail_uuid\", \"in\", [\"the_user_uuid\", \"the_group_uuid\"]],\n  [\"head_uuid\", \"in\", [\"the_user_uuid\", \"the_group_uuid\"]]'\n\narv link delete --uuid each_link_uuid\n</pre>\n"
  },
  {
    "path": "doc/admin/health-checks.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Health checks\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nHealth check endpoints are found at @/_health/ping@ on many Arvados services.  The purpose of the health check is to offer a simple method of determining if a service can be reached and allow the service to self-report any problems, suitable for integrating into operational alert systems.\n\nTo access health check endpoints, services must be configured with a \"management token\":management-token.html .\n\nHealth check endpoints return a JSON object with the field @health@.  This has a value of either @OK@ or @ERROR@.  On error, it may also include a  field @error@ with additional information.  Examples:\n\n<pre>\n{\n  \"health\": \"OK\"\n}\n</pre>\n\n<pre>\n{\n  \"health\": \"ERROR\"\n  \"error\": \"Inverted polarity in the warp core\"\n}\n</pre>\n\nh2. Health check aggregator\n\nThe service @arvados-health@ performs health checks on all configured services and returns a single value of @OK@ or @ERROR@ for the entire cluster.  It exposes the endpoint @/_health/all@ .\n\nThe healthcheck aggregator uses the @Services@ section of the cluster-wide @config.yml@ configuration file.\n\nh2. Health check command\n\nThe @arvados-server check@ command is another way to perform the same health checks as the health check aggregator service. It does not depend on the aggregator service.\n\nIf all checks pass, it writes @health check OK@ to stderr (unless the @-quiet@ flag is used) and exits 0. Otherwise, it writes error messages to stderr and exits with error status.\n\n@arvados-server check -yaml@ outputs a YAML document on stdout with additional details about each service endpoint that was checked.\n\n{% codeblock as yaml %}\nChecks:\n  \"arvados-api-server+http://localhost:8004/_health/ping\":\n    ClockTime: \"2024-12-13T14:38:25Z\"\n    ConfigSourceSHA256: 5a2b21ce0aeeeebcaf623329871b4628772446d4684ab0f89da4a2cbc7b3f17c\n    ConfigSourceTimestamp: \"2024-12-12T11:14:06.487848-05:00\"\n    HTTPStatusCode: 200\n    Health: OK\n    Response:\n      health: OK\n    ResponseTime: 0.051136\n    Server: nginx/1.26.1 + Phusion Passenger(R) 6.0.23\n    Version: 3.0.0\n  \"arvados-controller+http://localhost:8003/_health/ping\":\n    ClockTime: \"2024-12-13T14:38:25Z\"\n    ConfigSourceSHA256: 5a2b21ce0aeeeebcaf623329871b4628772446d4684ab0f89da4a2cbc7b3f17c\n    ConfigSourceTimestamp: \"2024-12-12T11:14:06.487848-05:00\"\n    HTTPStatusCode: 200\n    Health: OK\n    Response:\n      health: OK\n    ResponseTime: 0.014869\n    Server: \"\"\n    Version: 3.0.0 (go1.21.10)\n# ...\n{% endcodeblock %}\n"
  },
  {
    "path": "doc/admin/index.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: \"Arvados admin overview\"\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis section describes how to administer an Arvados cluster.  Cluster admins should already be familiar with the \"Arvados architecture.\":{{site.baseurl}}/architecture/index.html  For instructions on installing and configuring an Arvados cluster, see the \"install guide.\":{{site.baseurl}}/install/index.html\n"
  },
  {
    "path": "doc/admin/inspect.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Inspecting active requests\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nMost Arvados services publish a snapshot of HTTP requests currently being serviced at @/_inspect/requests@. This can be useful for troubleshooting slow requests and understanding high server load conditions.\n\nTo access snapshots, services must be configured with a \"management token\":management-token.html. When accessing this endpoint, prefix the management token with @\"Bearer \"@ and supply it in the @Authorization@ request header.\n\nIn an interactive setting, use the @jq@ tool to format the JSON response.\n\n<notextile><pre><code>curl -sfH \"Authorization: Bearer <span class=\"userinput\">your_management_token_goes_here</span>\" \"https://<span class=\"userinput\">0.0.0.0:25107</span>/_inspect/requests\" | jq .\n</code></pre></notextile>\n\ntable(table table-bordered table-condensed table-hover){width:40em}.\n|_. Component|_. Provides @/_inspect/requests@ endpoint|\n|arvados-api-server||\n|arvados-controller|✓|\n|arvados-dispatch-cloud|✓|\n|arvados-dispatch-lsf|✓|\n|arvados-ws|✓|\n|composer||\n|keepproxy|✓|\n|keepstore|✓|\n|keep-balance|✓|\n|keep-web|✓|\n|workbench2||\n\nh2. Report fields\n\nMost fields are self explanatory.\n\nThe @Host@ field reports the virtual host specified in the incoming HTTP request.\n\nThe @RemoteAddr@ field reports the source of the incoming TCP connection, which is typically a local address associated with the Nginx proxy service.\n\nThe @XForwardedFor@ field reports the value of the \"@X-Forwarded-For@ header\":https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/X-Forwarded-For in the request.\n\nThe @Elapsed@ field reports the number of seconds since the incoming HTTP request headers were received.\n\nh2. Example response\n\n<pre>\n[\n  {\n    \"RequestID\": \"req-1vzzj6nwrki0rd2hj08a\",\n    \"Method\": \"GET\",\n    \"Host\": \"tordo.arvadosapi.com\",\n    \"URL\": \"/arvados/v1/groups?order=name+asc&filters=[[%22owner_uuid%22,%22%3D%22,%22zzzzz-tpzed-aaaaaaaaaaaaaaa%22],[%22group_class%22,%22in%22,[%22project%22,%22filter%22]]]\",\n    \"RemoteAddr\": \"127.0.0.1:55822\",\n    \"XForwardedFor\": \"192.168.0.111, 10.0.0.123\",\n    \"Elapsed\": 0.006363228\n  },\n  {\n    \"RequestID\": \"req-1wrof2b2wlj5s1rao4u3\",\n    \"Method\": \"GET\",\n    \"Host\": \"tordo.arvadosapi.com\",\n    \"URL\": \"/arvados/v1/users/current\",\n    \"RemoteAddr\": \"127.0.0.1:55814\",\n    \"XForwardedFor\": \"192.168.0.222, 10.0.0.123\",\n    \"Elapsed\": 0.04796585\n  }\n]\n</pre>\n"
  },
  {
    "path": "doc/admin/keep-balance.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Balancing Keep servers\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis page describes how to balance keepstore servers using keep-balance. Keep-balance creates new copies of under-replicated blocks, deletes excess copies of over-replicated and unreferenced blocks, and moves blocks to better positions (e.g. after adding new keepstore servers) so clients find them faster.\n\nSee \"the Keep-balance install docs\":{{site.baseurl}}/install/install-keep-balance.html for installation instructions.\n\nh3. Data deletion\n\nThe keep-balance service determines which blocks are candidates for deletion and instructs the keepstore to move those blocks to the trash. When a block is newly written, it is protected from deletion for the duration in @BlobSigningTTL@.  During this time, it cannot be trashed or deleted.\n\nIf keep-balance instructs keepstore to trash a block which is older than @BlobSigningTTL@, and @BlobTrashLifetime@ is non-zero, the block will be moved to \"trash\".  A block which is in the trash is no longer accessible by read requests, but has not yet been permanently deleted.  Blocks which are in the trash may be recovered using the \"untrash\" API endpoint.  Blocks are permanently deleted after they have been in the trash for the duration in @BlobTrashLifetime@.\n\nKeep-balance is also responsible for balancing the distribution of blocks across keepstore servers by asking servers to pull blocks from other servers (as determined by their \"storage class\":{{site.baseurl}}/admin/storage-classes.html and \"rendezvous hashing order\":{{site.baseurl}}/architecture/keep-clients.html#rendezvous).  Pulling a block makes a copy.  If a block is overreplicated (i.e. there are excess copies) after pulling, it will be subsequently trashed and deleted on the original server, subject to @BlobTrash@ and @BlobTrashLifetime@ settings.\n\nh3. Scanning\n\nBy default, keep-balance operates periodically, i.e. do a scan/balance operation, sleep, repeat.\n\nThe @Collections.BalancePeriod@ value in @/etc/arvados/config.yml@ determines the interval between start times of successive scan/balance operations. If an operation takes longer than the @Collections.BalancePeriod@, the next operation will follow it immediately. If SIGUSR1 is received during an idle period between operations, the next operation will start immediately.\n\nKeep-balance can also be run with the @-once@ flag to do a single scan/balance operation and then exit. The exit code will be zero if the operation was successful.\n\nh3. Additional configuration\n\nFor configuring resource usage tuning and lost block reporting, please see the @Collections.BlobMissingReport@, @Collections.BalanceCollectionBatch@, @Collections.BalanceCollectionBuffers@ option in the \"default config.yml file\":{{site.baseurl}}/admin/config.html.\n\nThe @Collections.BalancePullLimit@ and @Collections.BalanceTrashLimit@ configuration entries determine the maximum number of pull and trash operations keep-balance will attempt to apply on each keepstore server. If both values are zero, keep-balance will operate in \"dry run\" mode, where all changes are computed but none are committed.\n\nh3. Limitations\n\nKeep-balance does not attempt to discover whether committed pull and trash requests ever get carried out -- only that they are accepted by the Keep services. If some services are full, new copies of under-replicated blocks might never get made, only repeatedly requested.\n"
  },
  {
    "path": "doc/admin/keep-faster-gc-s3.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: \"Faster garbage collection in S3\"\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nWhen there is a large number of unneeded blocks stored in an S3 bucket, particularly when using @PrefixLength: 0@, the speed of garbage collection can be severely limited by AWS API rate limits and Arvados's multi-step trash/delete process.\n\nThe multi-step trash/delete process can be short-circuited by setting @BlobTrashLifetime@ to zero and enabling @UnsafeDelete@ on S3-backed volumes. However, on an actively used cluster such a configuration *can result in data loss* in the rare case where a given block is trashed and then rewritten soon afterward, and S3 processes the write and delete requests in the opposite order.\n\nThe following steps can be used to temporarily disable writes on an S3 bucket to enable faster garbage collection without data loss or service interruption. Note that garbage collection on other S3 volumes will be temporarily disabled during this procedure.\n# Create a new S3 bucket and configure it as an additional volume (this step may be skipped if the configuration already has enough writable volumes that clients will still be able to write blocks while the target volume is read-only). We recommend using @PrefixLength: 3@ for the new volume because this results in a much higher rate limit for I/O and garbage collection operations compared to the default @PrefixLength: 0@. If the target volume configuration specifies @StorageClasses@, use the same values for the new volume.\n# Shut down the @keep-balance@ service.\n# Update your configuration as follows: <notextile><pre>\n  Collections:\n    BlobTrashLifetime: 0\n    BalancePullLimit: 0\n  [...]\n  Volumes:\n    <span class=\"userinput\">target-volume-uuid</span>:\n      ReadOnly: true\n      AllowTrashWhenReadOnly: true\n      DriverParameters:\n        UnsafeDelete: true\n</pre></notextile> Note that @BlobTrashLifetime: 0@ instructs keepstore to delete unneeded blocks outright (bypassing the recoverable trash phase); however, in this mode it will normally not trash any blocks at all on an S3 volume due to the safety issue mentioned above, unless the volume is configured with @UnsafeDelete: true@.\n# Restart all @keepstore@ services with the updated configuration.\n# Start the @keep-balance@ service.\n# Objects will be deleted immediately instead of being first copied to trash on the S3 volume, which should significantly speed up cleanup of trashed objects. Monitor progress by watching @keep-balance@ logs and metrics. When garbage collection is complete, keep-balance logs will show an empty changeset: <notextile><pre><code>zzzzz-bi6l4-0123456789abcdef (keep0.zzzzz.arvadosapi.com:25107, disk): ChangeSet{Pulls:0, Trashes:0}</code></pre></notextile>\n# Remove the @UnsafeDelete@ configuration entry on the target volume.\n# Remove the @BlobTrashLifetime@ configuration entry (or restore it to its previous value).\n# If the target volume has @PrefixLength: 0@ and the new volume has @PrefixLength: 3@, skip the next two steps: new data will be stored on the new volume, some existing data will be moved automatically to other volumes, and some will be left on the target volume as long as it's needed.\n# If you want to resume writing new data to the target volume, revert to @ReadOnly: false@ and @AllowTrashWhenReadOnly: false@ on the target volume.\n# If you want to stop writing new data to the newly created volume, set @ReadOnly: true@ and @AllowTrashWhenReadOnly: true@ on the new volume.\n# Remove the @BalancePullLimit@ configuration entry (or restore its previous value), and restart @keep-balance@.\n# Restart all @keepstore@ services with the updated configuration.\n"
  },
  {
    "path": "doc/admin/keep-measuring-deduplication.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: \"Measuring deduplication\"\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe @arvados-client@ tool can be used to generate a deduplication report across an arbitrary number of collections. It can be installed from packages (@apt install arvados-client@ or @dnf install arvados-client@).\n\nh2(#syntax). Syntax\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arvados-client deduplication-report -h</span>\nUsage:\n  arvados-client deduplication-report [options ...] <collection-uuid> <collection-uuid> ...\n\n  arvados-client deduplication-report [options ...] <collection-pdh>,<collection_uuid> \\\n     <collection-pdh>,<collection_uuid> ...\n\n  This program analyzes the overlap in blocks used by 2 or more collections. It\n  prints a deduplication report that shows the nominal space used by the\n  collections, as well as the actual size and the amount of space that is saved\n  by Keep's deduplication.\n\n  The list of collections may be provided in two ways. A list of collection\n  uuids is sufficient. Alternatively, the PDH for each collection may also be\n  provided. This is will greatly speed up operation when the list contains\n  multiple collections with the same PDH.\n\n  Exit status will be zero if there were no errors generating the report.\n\nExample:\n\n  Use the 'arv' and 'jq' commands to get the list of the 100\n  largest collections and generate the deduplication report:\n\n  arv collection list --order 'file_size_total desc' --limit 100 | \\\n    jq -r '.items[] | [.portable_data_hash,.uuid] |@csv' | \\\n    sed -e 's/\"//g'|tr '\\n' ' ' | \\\n    xargs arvados-client deduplication-report\n\nOptions:\n  -log-level string\n      logging level (debug, info, ...) (default \"info\")\n</code>\n</pre>\n</notextile>\n\nThe usual environment variables (@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@) need to be set for the deduplication report to be be generated. To get cluster-wide results, an admin token will need to be supplied. Users can also run this report, but only collections their token is able to read will be included.\n\nExample output (with uuids and portable data hashes obscured) from a small Arvados cluster:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv collection list --order 'file_size_total desc' --limit 10 | jq -r '.items[] | [.portable_data_hash,.uuid] |@csv' |sed -e 's/\"//g'|tr '\\n' ' ' |xargs arvados-client deduplication-report</span>\nCollection _____-_____-_______________: pdh ________________________________+5003343; nominal size 7382073267640 (6.7 TiB); file count 2796\nCollection _____-_____-_______________: pdh ________________________________+4961919; nominal size 6989909625775 (6.4 TiB); file count 5592\nCollection _____-_____-_______________: pdh ________________________________+1903643; nominal size 2677933564052 (2.4 TiB); file count 2796\nCollection _____-_____-_______________: pdh ________________________________+1903643; nominal size 2677933564052 (2.4 TiB); file count 2796\nCollection _____-_____-_______________: pdh ________________________________+137710; nominal size 191858151583 (179 GiB); file count 201\nCollection _____-_____-_______________: pdh ________________________________+137636; nominal size 191858101962 (179 GiB); file count 200\nCollection _____-_____-_______________: pdh ________________________________+135350; nominal size 191715427388 (178 GiB); file count 201\nCollection _____-_____-_______________: pdh ________________________________+135276; nominal size 191715384167 (178 GiB); file count 200\nCollection _____-_____-_______________: pdh ________________________________+135350; nominal size 191707276684 (178 GiB); file count 201\nCollection _____-_____-_______________: pdh ________________________________+135276; nominal size 191707233463 (178 GiB); file count 200\n\nCollections:                              10\nNominal size of stored data:  20878411596766 bytes (19 TiB)\nActual size of stored data:   17053104444050 bytes (16 TiB)\nSaved by Keep deduplication:   3825307152716 bytes (3.5 TiB)\n\n</code>\n</pre>\n</notextile>\n"
  },
  {
    "path": "doc/admin/keep-recovering-data.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: \"Recovering data\"\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados has several features to prevent accidental loss or deletion of data, but accidents can happen. This page lays out the options to recover deleted or overwritten collections.\n\nFor more detail on the data lifecycle in Arvados, see the \"Data lifecycle\":{{ site.baseurl }}/architecture/keep-data-lifecycle.html page.\n\nh2(#check_the_trash). Check the trash\n\nWhen a collection is deleted, it is moved to the trash. It will remain there for the duration of @Collections.DefaultTrashLifetime@, and it can be untrashed via workbench or with the cli tools, as described in \"Recovering trashed collections\":{{ site.baseurl }}/user/tutorials/tutorial-keep-collection-lifecycle.html#trash-recovery.\n\nh2(#check_other_collections). Check for other collections with the same PDH\n\nMultiple collections may share a _portable data hash_, i.e. have the same contents. If another collection exists with the same portable data hash, recovering data is not necessary, everything is still stored in Keep. A new copy of the collection can be made to make the data available in the correct project and with the correct permissions.\n\nh2(#check_collection_versioning). Consider collection versioning\n\nArvados supports collection versioning. If it has not been \"disabled\":{{ site.baseurl }}/admin/collection-versioning.html on your cluster, the deleted collection may be recoverable from an older version. See \"Using collection versioning\":{{ site.baseurl }}/user/topics/collection-versioning.html for details.\n\nh2(#recover_collection). Recovering collections\n\nWhen all the above options fail, it may still be possible to recover a collection that has been deleted.\n\nTo recover a collection the manifest is required. Arvados has a built-in audit log, which consists of a row added to the \"logs\" table in the PostgreSQL database each time an Arvados object is created, modified, or deleted. Collection manifests are included, unless they are listed in the @AuditLogs.UnloggedAttributes@ configuration parameter. The audit log is retained for up to @AuditLogs.MaxAge@.\n\nIn some cases, it is possible to recover files that have been lost by modifying or deleting a collection.\n\nPossibility of recovery depends on many factors, including:\n* Whether the collection manifest is still available, e.g., in an audit log entry\n* Whether the data blocks are also referenced by other collections\n* Whether the data blocks have been unreferenced long enough to be marked for deletion/trash by keep-balance\n* Blob signature TTL, trash lifetime, trash check interval, and other config settings\n\nTo attempt recovery of a previous version of a deleted/modified collection, use the @arvados-server recover-collection@ command. It should be run on one of your server nodes where the @arvados-server@ package is installed and the @/etc/arvados/config.yml@ file is up to date.\n\nSpecify the collection you want to recover by passing either the UUID of an audit log entry, or a file containing the manifest.\n\nIf recovery is successful, the @recover-collection@ program saves the recovered data a new collection belonging to the system user, and prints the new collection's UUID on stdout.\n\n<pre>\n# arvados-server recover-collection 9tee4-57u5n-nb5awmk1pahac2t\nINFO[2020-06-05T19:52:29.557761245Z] loaded log entry                              logged_event_time=\"2020-06-05 16:48:01.438791 +0000 UTC\" logged_event_type=update old_collection_uuid=9tee4-4zz18-1ex26g95epmgw5w src=9tee4-57u5n-nb5awmk1pahac2t\nINFO[2020-06-05T19:52:29.642145127Z] recovery succeeded                            UUID=9tee4-4zz18-5trfp4k4xxg97f1 src=9tee4-57u5n-nb5awmk1pahac2t\n9tee4-4zz18-5trfp4k4xxg97f1\nINFO[2020-06-05T19:52:29.644699436Z] exiting\n</pre>\n\nIn this example, the original data has been restored and saved in a new collection with UUID @9tee4-4zz18-5trfp4k4xxg97f1@.\n\nFor more options, run @arvados-server recover-collection -help@.\n\nh2(#untrashing_lost_blocks). Untrashing lost blocks\n\nIn some cases it is possible to recover data blocks that were trashed erroneously by @keep-balance@ (e.g. due to an install/config error).\n\nIf you suspect blocks have been trashed erroneously, you should immediately:\n\n* On all keepstore servers: set @BlobTrashCheckInterval@ to a long time like 2400h\n* On all keepstore servers: restart keepstore\n* Stop the keep-balance service\n\nWhen you think you have corrected the underlying problem, you should:\n\n* Set @Collections.BlobMissingReport@ to a suitable value (perhaps \"/tmp/keep-balance-lost-blocks.txt\").\n* Start @keep-balance@\n* After @keep-balance@ completes its first sweep, inspect /tmp/keep-balance-lost-blocks.txt. If it's not empty, you can request all keepstores to untrash any blocks that are still recoverable with a script like this:\n\n<notextile>\n<pre><code>\n#!/bin/bash\nset -e\n\n# see Client.AuthToken in /etc/arvados/keep-balance/keep-balance.yml\ntoken=xxxxxxx-your-system-auth-token-xxxxxxx\n\n# all keep server hostnames\nhosts=(keep0 keep1 keep2 keep3 keep4 keep5)\n\nwhile read hash pdhs; do\n  echo \"${hash}\"\n  for h in ${hosts[@]}; do\n    if curl -fgs -H \"Authorization: Bearer $token\" -X PUT \"http://${h}:25107/untrash/$hash\"; then\n      echo \"${hash} ok ${host}\"\n    fi\n  done\ndone < /tmp/keep-balance-lost-blocks.txt\n</code>\n</pre>\n</notextile>\n\nAny blocks which were successfully untrashed can be removed from the list of blocks and collections which need to be recovered.\n\nh2(#regenerating_lost_blocks). Regenerating lost blocks\n\nFor blocks which were trashed long enough ago that they've been deleted, it may be possible to regenerate them by rerunning the workflows which generated them. To do this, the process is:\n\n* Delete the affected collections so that job reuse doesn't attempt to reuse them (it's likely that if one block is missing, they all are, so they're unlikely to contain any useful data)\n* Resubmit any container requests for which you want the output collections regenerated\n\nThe Arvados repository contains a tool that can be used to generate a report to help with this task at \"arvados/tools/keep-xref/keep-xref.py\":https://github.com/arvados/arvados/blob/main/tools/keep-xref/keep-xref.py\n"
  },
  {
    "path": "doc/admin/link-accounts.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: \"Link user accounts\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nIf a user needs to log in to Arvados with a upstream account or provider, they may end up with two Arvados user accounts.  If the user still has the ability to log in with the old account, they can use the \"self-serve account linking\":{{site.baseurl}}/user/topics/link-accounts.html feature of workbench.  However, if the user does not have the ability to log in with both upstream accounts, the admin can also link the accounts using the command line.\n\nbq. NOTE: self-serve account linking is currently not supported on LoginCluster federations and needs to be performed manually by the site admin.\n\nh3. Step 1: Determine user uuids\n\nUser uuids can be determined by browsing workbench or using @arv user list@ at the command line.\n\nAccount linking works by recording in the database that a log in to the \"old\" account should redirected and treated as a login to the \"new\" account.\n\nThe \"old\" account is the Arvados account that will be redirected.\n\nThe \"new\" account is the user that the \"old\" account is redirected to.  As part of account linking any Arvados records owned by the \"old\" account is also transferred to the \"new\" account.\n\nCounter-intuitively, if you do not want the account uuid of the user to change, the \"new\" account should be the pre-existing account, and the \"old\" account should be the redundant second account that was more recently created.  This means \"old\" and \"new\" are opposite from their expected chronological meaning.  In this case, the use of \"old\" and \"new\" reflect the direction of transfer of ownership -- the login was associated with the \"old\" user account, but will be associated with the \"new\" user account.\n\nIn the example below, @zzzzz-tpzed-3kz0nwtjehhl0u4@ is the \"old\" account (the pre-existing account we want to keep) and @zzzzz-tpzed-fr97h9t4m5jffxs@ is the \"new\" account (the redundant account we want to merge into the existing account).\n\nh3. Step 2: Create a project\n\nCreate a project owned by the \"new\" account that will hold any data owned by the \"old\" account.\n\n<pre>\n$ arv --format=uuid group create --group '{\"group_class\": \"project\", \"name\": \"Data from old user\", \"owner_uuid\": \"zzzzz-tpzed-fr97h9t4m5jffxs\"}'\nzzzzz-j7d0g-mczqiguhil13083\n</pre>\n\nh3. Step 3: Merge \"old\" user to \"new\" user\n\nThe @user merge@ method redirects login and reassigns data from the \"old\" account to the \"new\" account.\n\n<pre>\n$ arv user merge  --redirect-to-new-user \\\n  --old-user-uuid=zzzzz-tpzed-3kz0nwtjehhl0u4 \\\n  --new-user-uuid=zzzzz-tpzed-fr97h9t4m5jffxs \\\n  --new-owner-uuid=zzzzz-j7d0g-mczqiguhil13083 \\\n</pre>\n\nNote that authorization credentials (API tokens, ssh keys) are also transferred to the \"new\" account, so credentials used to access the \"old\" account work with the \"new\" account.\n"
  },
  {
    "path": "doc/admin/logging.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Logging\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nMost Arvados services write JSON-format structured logs to stderr, which can be parsed by any operational tools that support JSON.\n\nh2. Request ids\n\nUsing a distributed system with several services working together sometimes makes it difficult to find the root cause of errors, as one single client request usually means several different requests to more than one service.\n\nTo deal with this difficulty, Arvados creates a request ID that gets carried over different services as the requests take place. This ID has a specific format and it's comprised of the prefix \"@req-@\" followed by 20 random alphanumeric characters:\n\n<pre>req-frdyrcgdh4rau1ajiq5q</pre>\n\nThis ID gets propagated via an HTTP @X-Request-Id@ header, and gets logged on every service.\n\nh3. API Server error reporting and logging\n\nIn addition to providing the request ID on every HTTP response, the API Server adds it to every error message so that all clients show enough information to the user to be able to track a particular issue. As an example, let's suppose that we get the following error when trying to create a collection using the CLI tools:\n\n<pre>\n$ arv collection create --collection '{}'\nError: #<RuntimeError: Whoops, something bad happened> (req-ku5ct9ehw0y71f1c5p79)\n</pre>\n\nThe API Server logs every request in JSON format on the @production.log@ (usually under @/var/www/arvados-api/current/log/@ when installing from packages) file, so we can retrieve more information about this by using @grep@ and @jq@ tools:\n\n<pre>\n# grep req-ku5ct9ehw0y71f1c5p79 /var/www/arvados-api/current/log/production.log | jq .\n{\n  \"method\": \"POST\",\n  \"path\": \"/arvados/v1/collections\",\n  \"format\": \"json\",\n  \"controller\": \"Arvados::V1::CollectionsController\",\n  \"action\": \"create\",\n  \"status\": 422,\n  \"duration\": 1.52,\n  \"view\": 0.25,\n  \"db\": 0,\n  \"request_id\": \"req-ku5ct9ehw0y71f1c5p79\",\n  \"client_ipaddr\": \"127.0.0.1\",\n  \"client_auth\": \"zzzzz-gj3su-jllemyj9v3s5emu\",\n  \"exception\": \"#<RuntimeError: Whoops, something bad happened>\",\n  \"exception_backtrace\": \"/var/www/arvados-api/current/app/controllers/arvados/v1/collections_controller.rb:43:in `create'\\n/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/action_controller/metal/basic_implicit_render.rb:4:in `send_action'\\n ...[snipped]\",\n  \"params\": {\n    \"collection\": \"{}\",\n    \"_profile\": \"true\",\n    \"cluster_id\": \"\",\n    \"collection_given\": \"true\",\n    \"ensure_unique_name\": \"false\",\n    \"help\": \"false\"\n  },\n  \"@timestamp\": \"2019-07-15T16:40:41.726634182Z\",\n  \"@version\": \"1\",\n  \"message\": \"[422] POST /arvados/v1/collections (Arvados::V1::CollectionsController#create)\"\n}\n</pre>\n\nWhen logging a request that produced an error, the API Server adds @exception@ and @exception_backtrace@ keys to the JSON log. The latter includes the complete error stack trace as a string, and can be displayed in a more readable form like so:\n\n<pre>\n# grep req-ku5ct9ehw0y71f1c5p79 /var/www/arvados-api/current/log/production.log | jq -r .exception_backtrace\n/var/www/arvados-api/current/app/controllers/arvados/v1/collections_controller.rb:43:in `create'\n/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/action_controller/metal/basic_implicit_render.rb:4:in `send_action'\n/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/abstract_controller/base.rb:188:in `process_action'\n/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/action_controller/metal/rendering.rb:30:in `process_action'\n/var/lib/gems/ruby/2.3.0/gems/actionpack-5.0.7.2/lib/abstract_controller/callbacks.rb:20:in `block in process_action'\n/var/lib/gems/ruby/2.3.0/gems/activesupport-5.0.7.2/lib/active_support/callbacks.rb:126:in `call'\n...\n</pre>\n"
  },
  {
    "path": "doc/admin/logs-table-management.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: \"Logs table management\"\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis page aims to provide insight about managing the ever growing API Server's logs table.\n\nh3. Logs table purpose & behavior\n\nThis database table is accessed via \"the @logs@ endpoint.\":../api/methods/logs.html\n\nThis table currently serves several purposes:\n\n* Audit logging, permitting admins and users to look up the time and details of past changes to Arvados objects.\n* Logging other system events, specifically \"file uploads and downloads from keep-web.\":restricting-upload-download.html#audit_logs\n* The source for cache-invalidation events, published through websockets to Workbench to refresh the view.  It can also be monitored by the Python SDK \"events module.\":../sdk/python/events.html\n* Prior to Arvados 2.7, it was used a staging area for stdout/stderr text coming from users' containers, permitting users to see what their containers are doing while they are still running (i.e., before those text files are written to Keep).  Starting with Arvados 2.7, this is superseded by a more efficient mechanism, so these logs are disabled by default.  See \"2.7.0 upgrade notes\":upgrading.html#v2_7_0 for details.\n\nAs a result, this table grows indefinitely, even on sites where policy does not require an audit log; making backups, migrations, and upgrades unnecessarily slow and painful.\n\nh3. Configuration\n\nTo solve the problem mentioned above, the @AuditLogs@ section of @config.yml@ offers several options to limit the amount of log information stored on the table:\n\n<pre>\n    AuditLogs:\n      # Time to keep audit logs. (An audit log is a row added\n      # to the \"logs\" table in the PostgreSQL database each time an\n      # Arvados object is created, modified, or deleted.)\n      #\n      # Currently, websocket event notifications rely on audit logs, so\n      # this should not be set lower than 5 minutes.\n      MaxAge: 336h\n\n      # Maximum number of log rows to delete in a single SQL transaction,\n      # to prevent surprises and avoid bad database behavior\n      # (especially the first time the cleanup job runs on an existing\n      # cluster with a huge backlog) a maximum number of rows to\n      # delete in a single transaction.\n      #\n      # If MaxDeleteBatch is 0, log entries will never be\n      # deleted by Arvados. Cleanup can be done by an external process\n      # without affecting any Arvados system processes, as long as very\n      # recent (<5 minutes old) logs are not deleted.\n      #\n      # 100000 is a reasonable batch size for most sites.\n      MaxDeleteBatch: 0\n\n      # Attributes to suppress in events and audit logs.  Notably,\n      # specifying {\"manifest_text\": {}} here typically makes the database\n      # smaller and faster.\n      #\n      # Warning: Using any non-empty value here can have undesirable side\n      # effects for any client or component that relies on event logs.\n      # Use at your own risk.\n      UnloggedAttributes: {}\n</pre>\n\n\nh3. Additional consideration\n\nDepending on the local installation's audit requirements, the cluster admins should plan for an external backup procedure before enabling this feature, as this information is not replicated anywhere else.\n"
  },
  {
    "path": "doc/admin/maintenance-and-upgrading.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Maintenance and upgrading\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Commercial support\":#commercial_support\n# \"Maintaining Arvados\":#maintaining\n## \"Modification of the config.yml file\":#configuration\n## \"Distributing the configuration file\":#distribution\n## \"Restart the services affected by the change\":#restart\n# \"Upgrading Arvados\":#upgrading\n\nh2(#commercial_support). Commercial support\n\nArvados is \"100% open source software\":{{site.baseurl}}/user/copying/copying.html. Anyone can download, install, maintain and upgrade it. However, if this is not something you want to spend your time and energy doing, \"Curii Corporation\":https://curii.com provides managed Arvados installations as well as commercial support for Arvados. Please contact \"info@curii.com\":mailto:info@curii.com for more information.\n\nIf you'd prefer to do things yourself, a few starting points for maintaining and upgrading Arvados can be found below.\n\nh2(#maintaining). Maintaining Arvados\n\nAfter Arvados is installed, periodic configuration changes may be required to adapt the software to your needs. Arvados uses a unified configuration file, which is normally found at @/etc/arvados/config.yml@.\n\nMaking a configuration change to Arvados typically involves three steps:\n\n* modification of the @config.yml@ file\n* distribution of the modified file to the machines in the cluster\n* restarting of the services affected by the change\n\nh3(#configchange). Modification of the @config.yml@ file\n\nConsult the \"configuration reference\":{{site.baseurl}}/admin/config.html or another part of the documentation to identify the change to be made.\n\nPreserve a copy of your existing configuration file as a backup, and make the desired modification.\n\nRun @arvados-server config-check@ to make sure the configuration file has no errors and no warnings.\n\nh3(#distribution). Distribute the configuration file\n\nIt is very important to keep the @config.yml@ file in sync between all the Arvados system nodes, to avoid issues with services running on different versions of the configuration.\n\nWe provide \"installer.sh\":../install/install-multi-host.html#installation to distribute config changes.  You may also do your own orchestration e.g. @scp@, configuration management software, etc.\n\nh3(#restart). Restart the services affected by the change\n\nIf you know which Arvados service uses the specific configuration that was modified, restart those services. When in doubt, restart all Arvados system services.\n\nTo check for services that have not restarted since the configuration file was updated, run the @arvados-server check@ command on each system node.\n\nTo test functionality and check for common problems, run the @arvados-client sudo diagnostics@ command on a system node.\n\nh2(#upgrading). Upgrading Arvados\n\nUpgrading Arvados typically involves the following steps:\n\n# consult the \"upgrade notes\":{{site.baseurl}}/admin/upgrading.html and the \"release notes\":https://arvados.org/releases/ for the release you want to upgrade to\n# Wait for the cluster to be idle and stop Arvados services.\n# Make a backup of your database, as a precaution.\n# update the configuration file for the new release, if necessary (see \"Maintaining Arvados\":#maintaining above)\n# Update compute nodes\n## (cloud) Rebuild and deploy the \"compute node image\":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html\n## (slurm/LSF) Upgrade the @python3-arvados-fuse@ package used on your compute nodes\n# Install new packages using @apt upgrade@ or @dnf upgrade@.\n# Wait for package installation scripts as they perform any necessary data migrations.\n# Run @arvados-server config-check@ to detect configuration errors or deprecated entries.\n# Verify that the Arvados services were restarted as part of the package upgrades.\n# Run @arvados-server check@ to detect services that did not restart properly.\n# Run @arvados-client sudo diagnostics@ to test functionality.\n"
  },
  {
    "path": "doc/admin/management-token.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Management token\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nTo enable and collect health checks and metrics, services must be configured with a \"management token\".\n\nServices must have ManagementToken configured.  This is used to authorize access monitoring endpoints.  If ManagementToken is not configured, monitoring endpoints will return the error @404 disabled@.\n\nTo access a monitoring endpoint, the requester must provide the HTTP header @Authorization: Bearer (ManagementToken)@.\n\nh2. API server and other services\n\nThe following services also support monitoring.\n\n* API server\n* controller\n* keep-balance\n* keepproxy\n* keepstore\n* keep-web\n* arvados-ws \n\nSet @ManagementToken@ in the appropriate section of @/etc/arvados/config.yml@.\n\n<notextile>\n<pre><code>Clusters:\n  <span class=\"userinput\">ClusterID</span>:\n    # Token to be included in all healthcheck requests. Disabled by default.\n    # Server expects request header of the format \"Authorization: Bearer xxx\"\n    ManagementToken: xxx\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/admin/memory-cpu-profiling.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Memory and CPU profiling\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados system services (other than the Rails API server) have an option to provide live profiling data on an HTTP endpoint.  This can be analyzed with the @go tool pprof@ program from the Go runtime to help identify memory and CPU usage issues.  The @go tool pprof@ program can either connect directly to the profiling endpoint, or read a snapshot from disk.\n\nEnable profiling by choosing a listening address and adding @-pprof <address>@ to the @EXTRA_OPTS@ environment variable in the systemd service.\n\nExample:\n\n<notextile><pre><code>$ <span class=\"userinput\">sudo systemctl edit keep-balance</span>\n### Editing /etc/systemd/system/keep-balance.service.d/override.conf\n### Anything between here and the comment below will become the new contents of the file\n<span class=\"userinput\">[Service]\nEnvironment=\"EXTRA_OPTS=-pprof <strong>127.0.0.1:3333</strong>\"</span>\n### Lines below this comment will be discarded\n[...]\n</code></pre></notextile>\n\nRestart the service.\n\n<notextile><pre><code>$ <span class=\"userinput\">sudo systemctl restart keep-balance</span>\n</code></pre></notextile>\n\nSave a snapshot of the program's active memory usage after garbage collection.\n\n<notextile><pre><code>$ <span class=\"userinput\">curl 'http://<strong>localhost:3333</strong>/debug/pprof/heap?gc=1' > <strong>/tmp/pprof.gz</strong></span>\n</code></pre></notextile>\n\nThe following analysis steps can be done on the server itself or on a different machine.\n\nTo get the @go tool pprof@ command, install the Go runtime from OS packages or from the \"Go download page\":https://go.dev/doc/install.\n\n<notextile><pre><code>$ <span class=\"userinput\">sudo apt install golang</span>\n</code></pre></notextile>\n\nRun the @go tool pprof@ command to summarize the snapshot.\n\n<notextile><pre><code>$ <span class=\"userinput\">go tool pprof -top <strong>/tmp/pprof.gz</strong></span>\nFile: keep-balance\nBuild ID: edd0405c97f4235473dba21b7c7fd52c8f755cde\nType: inuse_space\nTime: Nov 3, 2025 at 11:12am (EST)\nShowing nodes accounting for 443.71MB, 98.02% of 452.67MB total\nDropped 35 nodes (cum <= 2.26MB)\n      flat  flat%   sum%        cum   cum%\n  217.54MB 48.06% 48.06%   217.54MB 48.06%  git.arvados.org/arvados.git/services/keep-balance.(*BlockState).increaseDesired (inline)\n   85.46MB 18.88% 66.94%    85.46MB 18.88%  git.arvados.org/arvados.git/services/keep-balance.(*BlockStateMap).get (inline)\n   66.07MB 14.60% 81.53%    76.58MB 16.92%  git.arvados.org/arvados.git/sdk/go/arvados.(*Collection).SizedDigests\n   19.87MB  4.39% 85.92%    20.87MB  4.61%  github.com/lib/pq.textDecode\n      13MB  2.87% 88.79%       13MB  2.87%  git.arvados.org/arvados.git/services/keep-balance.(*BlockState).addReplica (inline)\n   10.51MB  2.32% 91.12%    10.51MB  2.32%  bytes.genSplit\n   10.25MB  2.26% 93.38%    10.25MB  2.26%  github.com/lib/pq.(*conn).recvMessage\n    7.50MB  1.66% 95.04%    53.63MB 11.85%  git.arvados.org/arvados.git/services/keep-balance.EachCollection\n       5MB  1.10% 96.14%    13.50MB  2.98%  encoding/json.Unmarshal\n    4.50MB  0.99% 97.14%     4.50MB  0.99%  encoding/json.(*scanner).pushParseState\n       4MB  0.88% 98.02%        4MB  0.88%  encoding/json.(*decodeState).literalStore\n         0     0% 98.02%    10.51MB  2.32%  bytes.Split (inline)\n         0     0% 98.02%    31.13MB  6.88%  database/sql.(*Rows).Next\n         0     0% 98.02%    31.13MB  6.88%  database/sql.(*Rows).Next.func1\n         0     0% 98.02%    31.13MB  6.88%  database/sql.(*Rows).nextLocked\n         0     0% 98.02%    31.13MB  6.88%  database/sql.withLock\n         0     0% 98.02%        4MB  0.88%  encoding/json.(*decodeState).array\n         0     0% 98.02%        4MB  0.88%  encoding/json.(*decodeState).unmarshal\n         0     0% 98.02%        4MB  0.88%  encoding/json.(*decodeState).value\n         0     0% 98.02%     4.50MB  0.99%  encoding/json.checkValid\n         0     0% 98.02%     4.50MB  0.99%  encoding/json.stateBeginValue\n         0     0% 98.02%     2.46MB  0.54%  git.arvados.org/arvados.git/lib/service.(*command).RunCommand.ifCollectionInHost.func9\n         0     0% 98.02%     2.46MB  0.54%  git.arvados.org/arvados.git/sdk/go/httpserver.(*metrics).ServeAPI.RequireLiteralToken.func3\n...\n</code></pre></notextile>\n\nThe @go tool pprof@ command can also connect directly to the profiling endpoint.  In this mode, by default it will also save a snapshot in @$HOME/pprof/@.\n\nTo connect directly to the profiling endpoint and display a sampling of CPU usage over a 2-second interval:\n\n<notextile><pre><code>$ <span class=\"userinput\">go tool pprof -top 'http://<strong>localhost:3333</strong>/debug/pprof/profile?seconds=2'</span>\nFetching profile over HTTP from http://localhost:3333/debug/pprof/profile?seconds=2\nSaved profile in /home/username/pprof/pprof.keep-balance.samples.cpu.001.pb.gz\nFile: keep-balance\nBuild ID: edd0405c97f4235473dba21b7c7fd52c8f755cde\nType: cpu\nTime: Nov 3, 2025 at 11:12am (EST)\nDuration: 2.19s, Total samples = 2.98s (136.17%)\nShowing nodes accounting for 2.57s, 86.24% of 2.98s total\nDropped 73 nodes (cum <= 0.01s)\n      flat  flat%   sum%        cum   cum%\n     0.34s 11.41% 11.41%      0.36s 12.08%  runtime.findObject\n     0.15s  5.03% 16.44%      0.32s 10.74%  regexp.(*Regexp).doOnePass\n     0.13s  4.36% 20.81%      0.13s  4.36%  runtime.(*mspan).heapBitsSmallForAddr\n     0.12s  4.03% 24.83%      0.12s  4.03%  runtime.(*gcBits).bitp (inline)\n     0.11s  3.69% 28.52%      0.11s  3.69%  regexp/syntax.(*Inst).MatchRunePos\n     0.08s  2.68% 31.21%      0.09s  3.02%  runtime.(*mspan).writeHeapBitsSmall\n     0.07s  2.35% 33.56%      0.07s  2.35%  runtime.nextFreeFast (inline)\n     0.06s  2.01% 35.57%      0.06s  2.01%  runtime.futex\n     0.06s  2.01% 37.58%      0.06s  2.01%  runtime.memclrNoHeapPointers\n     0.05s  1.68% 39.26%      0.05s  1.68%  indexbytebody\n     0.05s  1.68% 40.94%      0.05s  1.68%  internal/runtime/syscall.Syscall6\n     0.05s  1.68% 42.62%      0.43s 14.43%  runtime.mallocgc\n     0.05s  1.68% 44.30%      0.05s  1.68%  runtime.memmove\n     0.05s  1.68% 45.97%      0.71s 23.83%  runtime.scanobject\n     0.05s  1.68% 47.65%      0.05s  1.68%  runtime.usleep\n     0.04s  1.34% 48.99%      0.26s  8.72%  runtime.mallocgcSmallScanNoHeader\n     0.04s  1.34% 50.34%      0.04s  1.34%  runtime.rand\n     0.03s  1.01% 51.34%      0.03s  1.01%  crypto/internal/fips140/aes/gcm.gcmAesDec\n     0.03s  1.01% 52.35%      0.57s 19.13%  database/sql.(*Rows).nextLocked\n     0.03s  1.01% 53.36%      0.13s  4.36%  database/sql.convertAssignRows\n     0.03s  1.01% 54.36%      0.56s 18.79%  git.arvados.org/arvados.git/sdk/go/arvados.(*Collection).SizedDigests\n     0.03s  1.01% 55.37%      0.18s  6.04%  git.arvados.org/arvados.git/services/keep-balance.(*BlockStateMap).GetConfirmedReplication\n...\n</code></pre></notextile>\n\n@http://localhost:3333/debug/pprof/@ serves an HTML page with a list of available profiles:\n* @allocs@ -- A sampling of all past memory allocations\n* @block@ -- Stack traces that led to blocking on synchronization primitives\n* @cmdline@ -- The command line invocation of the current program\n* @goroutine@ -- Stack traces of all current goroutines. Use debug=2 as a query parameter to export in the same format as an unrecovered panic.\n* @heap@ -- A sampling of memory allocations of live objects. You can specify the gc GET parameter to run GC before taking the heap sample.\n* @mutex@ -- Stack traces of holders of contended mutexes\n* @profile@ -- CPU profile. You can specify the duration in the seconds GET parameter. After you get the profile file, use the go tool pprof command to investigate the profile.\n* @symbol@ -- Maps given program counters to function names. Counters can be specified in a GET raw query or POST body, multiple counters are separated by '+'.\n* @threadcreate@ -- Stack traces that led to the creation of new OS threads\n* @trace@ -- A trace of execution of the current program. You can specify the duration in the seconds GET parameter. After you get the trace file, use the go tool trace command to investigate the trace.\n\nAdditional resources:\n* \"pprof tool documentation\":https://github.com/google/pprof/blob/main/doc/README.md\n* \"Go profiling data endpoint documentation\":https://pkg.go.dev/net/http/pprof\n"
  },
  {
    "path": "doc/admin/metadata-vocabulary.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Metadata vocabulary\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nMany Arvados objects (like collections and projects) can store metadata as properties that in turn can be used in searches allowing a flexible way of organizing data inside the system.\n\nArvados enables the site administrator to set up a formal metadata vocabulary definition so that users can select from predefined key/value pairs of properties, offering the possibility to add different terms for the same concept on clients' UI such as workbench2.\n\nThe Controller service loads and caches the configured vocabulary file in memory at startup time, exporting it on a particular endpoint. From time to time, it'll check for updates in the local copy and refresh its cache if validation passes.\n\nh2. Configuration\n\nThe site administrator should place the JSON vocabulary file on the same host as the controller service and set up the config file as follows:\n\n<notextile>\n<pre><code>Cluster:\n  zzzzz:\n    API:\n      VocabularyPath: <span class=\"userinput\">/etc/arvados/vocabulary.json</span>\n</code></pre>\n</notextile>\n\nh2. Definition format\n\nThe JSON file describes the available keys and values and if the user is allowed to enter free text not defined by the vocabulary.\n\nKeys and values are indexed by identifiers so that the concept of a term is preserved even if vocabulary labels are changed.\n\nThe following is an example of a vocabulary definition:\n\n{% codeblock as json %}\n{% include 'metadata_vocabulary_example' %}\n{% endcodeblock %}\n\nFor clients to be able to query the vocabulary definition, a special endpoint is exposed on the @controller@ service: @/arvados/v1/vocabulary@. This endpoint doesn't require authentication and returns the vocabulary definition in JSON format.\n\nIf the @strict_tags@ flag at the root level is @true@, it will restrict the users from saving property keys other than the ones defined in the vocabulary. This restriction is enforced at the backend level to ensure consistency across different clients.\n\nInside the @tags@ member, IDs are defined (@IDTAGANIMALS@, @IDTAGCOMMENT@, @IDTAGIMPORTANCES@) and can have any format that the current application requires. Every key will declare at least a @labels@ list with zero or more label objects.\n\nThe @strict@ flag inside a tag definition operates the same as the @strict_tags@ root member, but at the individual tag level. When @strict@ is @true@, a tag’s value options are limited to those defined by the vocabulary.\n\nThe @values@ member is optional and is used to define valid key/label pairs when applicable. In the example above, @IDTAGCOMMENT@ allows open-ended text by only defining the tag's ID and labels and leaving out @values@.\n\nWhen any key or value has more than one label option, Workbench2's user interface will allow the user to select any of the options. But because only the IDs are saved in the system, when the property is displayed in the user interface, the label shown will be the first of each group defined in the vocabulary file. For example, the user could select the property key @Species@ and @Homo sapiens@ as its value, but the user interface will display it as @Animal: Human@ because those labels are the first in the vocabulary definition.\n\nInternally, Workbench2 uses the IDs to do property based searches, so if the user searches by @Animal: Human@ or @Species: Homo sapiens@, both will return the same results.\n\nh2. Definition validation\n\nBecause the vocabulary definition is prone to syntax or logical errors, the @controller@ service needs to do some validation before answering requests. If the vocabulary validation fails, the service won't start.\nThe site administrator can make sure the vocabulary file is correct before even trying to start the @controller@ service by running @arvados-server config-check@. When the vocabulary definition isn't correct, the administrator will get a list of issues like the one below:\n\n<notextile>\n<pre><code># arvados-server config-check -config /etc/arvados/config.yml\nError loading vocabulary file \"/etc/arvados/vocabulary.json\" for cluster zzzzz:\nduplicate JSON key \"tags.IDTAGFRUITS.values.IDVALFRUITS1\"\ntag key \"IDTAGCOMMENT\" is configured as strict but doesn't provide values\ntag value label \"Banana\" for pair (\"IDTAGFRUITS\":\"IDVALFRUITS8\") already seen on value \"IDVALFRUITS4\"\nexit status 1\n</code></pre>\n</notextile>\n\nbq. NOTE: These validation checks are performed only on the node that hosts the vocabulary file defined on the configuration. As the same configuration file is shared between different nodes, those who don't host the file won't produce spurious errors when running @arvados-server config-check@.\n\nh2. Live updates\n\nSometimes it may be necessary to modify the vocabulary definition in a running production environment.\nWhen a change is detected, the @controller@ service will automatically attempt to load the new vocabulary and check its validity before making it active.\nIf the new vocabulary has some issue, the last valid one will keep being active. The service will export any errors on its health endpoint so that a monitoring solution can send an alert appropriately.\nWith the above mechanisms in place, no outages should occur from making typos or other errors when updating the vocabulary file.\n\nh2. Health status\n\nTo be able for the administrator to guarantee the system's metadata integrity, the @controller@ service exports a specific health endpoint for the vocabulary at @/_health/vocabulary@.\nAs a first measure, the service won't start if the vocabulary file is incorrect. Once running, if there are updates (that may even be periodical), the service needs to keep running while notifying the operator that some fixing is in order.\nAn example of a vocabulary health error is included below:\n\n<notextile>\n<pre><code>$ curl --silent -H \"Authorization: Bearer xxxtokenxxx\" https://controller/_health/vocabulary | jq .\n{\n  \"error\": \"while loading vocabulary file \\\"/etc/arvados/vocabulary.json\\\": duplicate JSON key \\\"tags.IDTAGSIZES.values.IDVALSIZES3\\\"\",\n  \"health\": \"ERROR\"\n}\n</code></pre>\n</notextile>\n\nh2. Client support\n\nWorkbench2 currently takes advantage of this vocabulary definition by providing an easy-to-use interface for searching and applying metadata to different objects in the system. Because the definition file only resides on the @controller@ node, and Workbench2 is just a static web application run by every users' web browser, there's a mechanism in place that allows Workbench2 and any other client to request the active vocabulary.\n\nThe @controller@ service provides an unauthenticated endpoint at @/arvados/v1/vocabulary@ where it exports the contents of the vocabulary JSON file:\n\n<notextile>\n<pre><code>$ curl --silent https://controller/arvados/v1/vocabulary | jq .\n{\n  \"kind\": \"arvados#vocabulary\",\n  \"strict_tags\": false,\n  \"tags\": {\n    \"IDTAGANIMALS\": {\n      \"labels\": [\n        {\n          \"label\": \"Animal\"\n        },\n        {\n          \"label\": \"Creature\"\n        }\n      ],\n      \"strict\": false,\n...\n}\n</code></pre>\n</notextile>\n\nAlthough the vocabulary enforcement is done on the backend side, clients can use this information to provide helping features to users, like doing ID-to-label translations, preemptive error checking, etc.\n\nh2. Properties migration\n\nAfter installing the new vocabulary definition, it may be necessary to migrate preexisting properties that were set up using literal strings. This can be a big task depending on the number of properties on the vocabulary and the amount of collections and projects on the cluster.\n\nTo help with this task we provide below a migration example script that accepts the new vocabulary definition file as an input, and uses the @ARVADOS_API_TOKEN@ and @ARVADOS_API_HOST@ environment variables to connect to the cluster, search for every collection and group that has properties with labels defined on the vocabulary file, and migrates them to the corresponding identifiers.\n\nThis script will not run if the vocabulary file has duplicated labels for different keys or for different values inside a key, this is a failsafe mechanism to avoid migration errors.\n\nPlease take into account that this script requires admin credentials. It also offers a @--dry-run@ flag that will report what changes are required without applying them, so it can be reviewed by an administrator.\n\nAlso, take into consideration that this example script does case-sensitive matching on labels.\n\n{% codeblock as python %}\n{% include 'vocabulary_migrate_py' %}\n{% endcodeblock %}\n"
  },
  {
    "path": "doc/admin/metrics.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Metrics\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nSome Arvados services publish Prometheus/OpenMetrics-compatible metrics at @/metrics@. Metrics can help you understand how components perform under load, find performance bottlenecks, and detect and diagnose problems.\n\nTo access metrics endpoints, services must be configured with a \"management token\":management-token.html. When accessing a metrics endpoint, prefix the management token with @\"Bearer \"@ and supply it in the @Authorization@ request header.\n\n<pre>curl -sfH \"Authorization: Bearer your_management_token_goes_here\" \"https://0.0.0.0:25107/metrics\"\n</pre>\n\nThe plain text export format includes \"help\" messages with a description of each reported metric.\n\nWhen configuring Prometheus, use a @bearer_token@ or @bearer_token_file@ option to authenticate requests.\n\n<pre>scrape_configs:\n  - job_name: keepstore\n    bearer_token: your_management_token_goes_here\n    static_configs:\n    - targets:\n      - \"keep0.ClusterID.example.com:25107\"\n</pre>\n\ntable(table table-bordered table-condensed table-hover).\n|_. Component|_. Metrics endpoint|\n|arvados-api-server|✓|\n|arvados-controller|✓|\n|arvados-dispatch-cloud|✓|\n|arvados-dispatch-lsf|✓|\n|arvados-ws|✓|\n|keepproxy|✓|\n|keepstore|✓|\n|keep-balance|✓|\n|keep-web|✓|\n|workbench2||\n"
  },
  {
    "path": "doc/admin/migrating-providers.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Changing upstream login providers\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nWhen a user logs in to Arvados, their email address (as returned by the authentication provider) is used as the primary key for their Arvados account.\n\nIf you reconfigure Arvados to use a different authentication provider after some users have created accounts, you should either ensure the new provider returns the same email addresses as the old one, or update your Arvados users' @email@ attributes to match the email addresses returned by the new provider.\n\nOtherwise, next time users log in, they will be given new accounts instead of logging in to their existing accounts.\n"
  },
  {
    "path": "doc/admin/reassign-ownership.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: \"Reassign user data ownership\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nIf a user leaves an organization and stops using their Arvados account, it may be desirable to reassign the data owned by that user to another user to maintain easy access.\n\nThis is currently a command line based, admin-only feature.\n\nh3. Step 1: Determine user uuids\n\nUser uuids can be determined by browsing workbench or using @arv user list@ at the command line.\n\nThe \"old user\" is the user that is leaving the organization.\n\nThe \"new user\" is the user that will gain ownership of the old user's data.  This includes collections, projects, container requests, workflows, and git repositories owned by the old user.  It also transfers any permissions granted to the old user, to the new user.\n\nIn the example below, @x1u39-tpzed-3kz0nwtjehhl0u4@ is the old user and @x1u39-tpzed-fr97h9t4m5jffxs@ is the new user.\n\nh3. Step 2: Create a project\n\nCreate a project owned by the new user that will hold the data from the old user.\n\n<pre>\n$ arv --format=uuid group create --group '{\"group_class\": \"project\", \"name\": \"Data from old user\", \"owner_uuid\": \"x1u39-tpzed-fr97h9t4m5jffxs\"}'\nx1u39-j7d0g-mczqiguhil13083\n</pre>\n\nh3. Step 3: Reassign data from the old user to the new user and project\n\nThe @user merge@ method reassigns data from the old user to the new user.\n\n<pre>\n$ arv user merge --old-user-uuid=x1u39-tpzed-3kz0nwtjehhl0u4 \\\n  --new-user-uuid=x1u39-tpzed-fr97h9t4m5jffxs \\\n  --new-owner-uuid=x1u39-j7d0g-mczqiguhil13083\n</pre>\n\nAfter reassigning data, use @unsetup@ to deactivate the old user's account.\n\n<pre>\n$ arv user unsetup --uuid=x1u39-tpzed-3kz0nwtjehhl0u4\n</pre>\n\nNote that authorization credentials (API tokens, ssh keys) are *not* transferred to the new user, as this would potentially give the old user access to the new user's account.\n"
  },
  {
    "path": "doc/admin/restricting-upload-download.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Restricting upload or download\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nFor some use cases, you may want to limit the ability of users to upload or download data from outside the cluster.  (By \"outside\" we mean from networks other than the cluster's own private network).  For example, this makes it possible to share restricted data sets with users so that they may run their own data analysis on the cluster, while preventing them from easily downloading the data set to their local workstation.\n\nThis feature exists in addition to the existing Arvados permission system.  Users can only download from collections they have @read@ access to, and can only upload to projects and collections they have @write@ access to.\n\nThere are two services involved in accessing data from outside the cluster.\n\nh2. Keepproxy Permissions\n\nPermitting @keepproxy@ makes it possible to use @arv-put@ and @arv-get@.  It works in terms of individual 64 MiB keep blocks.  It prints a log line each time a user uploads or downloads an individual block. Those logs are usually stored by @journald@ or @syslog@.\n\nThe default policy allows anyone to upload or download.\n\n<pre>\n    Collections:\n      KeepproxyPermission:\n        User:\n          Download: true\n          Upload: true\n        Admin:\n          Download: true\n          Upload: true\n</pre>\n\nh2. WebDAV and S3 API Permissions\n\nPermitting @WebDAV@ makes it possible to use WebDAV, S3 API, and upload/download with Workbench 2.  It works in terms of individual files.  It prints a log each time a user uploads or downloads a file (\"subject to throttling discussed below\":#throttling).  When @WebDAVLogEvents@ (default true) is enabled, it also adds an entry into the API server @logs@ table.\n\nWhen a user attempts to upload or download from a service without permission, they will receive a @403 Forbidden@ response.  This only applies to file content.\n\nDenying download permission does not deny access to access to XML file listings with PROPFIND, or auto-generated HTML documents containing file listings.\n\nDenying upload permission does not deny other operations that modify collections without directly accessing file content, such as MOVE and COPY.\n\nThe default policy allows anyone to upload or download.\n\n<pre>\n    Collections:\n      WebDAVPermission:\n        User:\n          Download: true\n          Upload: true\n        Admin:\n          Download: true\n          Upload: true\n      WebDAVLogEvents: true\n      WebDAVLogDownloadInterval: 30s\n</pre>\n\nWhen a user or admin creates a sharing link, a custom scoped token is embedded in that link. This effectively allows anonymous user access to the associated data via that link. These custom scoped tokens are always treated as user tokens for the purposes of restricting download access, even when created by an admin user. In other words, these custom scoped tokens, when used in a sharing link, are always subject to the value of the @WebDAVPermission/User/Download@ configuration setting.\n\nIf that custom scoped token is used with @arv-get@, its use will be subject to the value of the @KeepproxyPermission/User/Download@ configuration setting.\n\nh2. Shell node and container permissions\n\nBe aware that even when upload and download from outside the network is not allowed, a user who has access to a shell node or runs a container still has internal access to Keep.  (This is necessary to be able to run workflows).  From the shell node or container, a user could send data outside the network by some other method, although this requires more intent than accidentally clicking on a link and downloading a file.  It is possible to set up a firewall to prevent shell and compute nodes from making connections to hosts outside the private network.  Exactly how to configure firewalls is out of scope for this page, as it depends on the specific network infrastructure of your cluster.\n\nh2. Choosing a policy\n\nThis distinction between WebDAV and Keepproxy is important for auditing.  WebDAV records 'upload' and 'download' events on the API server that are included in the \"User Activity Report\":user-activity.html,  whereas @keepproxy@ only logs upload and download of individual blocks, which require a reverse lookup to determine the collection(s) and file(s) a block is associated with.\n\nYou set separate permissions for @WebDAV@ and @Keepproxy@, with separate policies for regular users and admin users.\n\nThese policies apply to only access from outside the cluster, using Workbench or Arvados CLI tools.\n\nThe @WebDAVLogEvents@ option should be enabled if you intend to the run the \"User Activity Report\":user-activity.html.  If you don't need audits, or you are running a site that is mostly serving public data to anonymous downloaders, you can disable it to avoid the extra API server request.\n\nh3. Audited downloads\n\nFor ease of access auditing, this policy prevents downloads using @arv-get@.  Downloads through WebDAV and S3 API are permitted, but logged.  Uploads are allowed.\n\n<pre>\n    Collections:\n      WebDAVPermission:\n        User:\n          Download: true\n          Upload: true\n        Admin:\n          Download: true\n          Upload: true\n\n      KeepproxyPermission:\n        User:\n          Download: false\n          Upload: true\n        Admin:\n          Download: false\n          Upload: true\n      WebDAVLogEvents: true\n</pre>\n\nh3. Disallow downloads by regular users\n\nThis policy prevents regular users (non-admin) from downloading data.  Uploading is allowed.  This supports the case where restricted data sets are shared with users so that they may run their own data analysis on the cluster, while preventing them from downloading the data set to their local workstation.  Be aware that users won't be able to download the results of their analysis, either, requiring an admin in the loop or some other process to release results.\n\n<pre>\n    Collections:\n      WebDAVPermission:\n        User:\n          Download: false\n          Upload: true\n        Admin:\n          Download: true\n          Upload: true\n\n      KeepproxyPermission:\n        User:\n          Download: false\n          Upload: true\n        Admin:\n          Download: true\n          Upload: true\n      WebDAVLogEvents: true\n</pre>\n\nh3. Disallow uploads by regular users\n\nThis policy is suitable for an installation where data is being shared with a group of users who are allowed to download the data, but not permitted to store their own data on the cluster.\n\n<pre>\n    Collections:\n      WebDAVPermission:\n        User:\n          Download: true\n          Upload: false\n        Admin:\n          Download: true\n          Upload: true\n\n      KeepproxyPermission:\n        User:\n          Download: true\n          Upload: false\n        Admin:\n          Download: true\n          Upload: true\n      WebDAVLogEvents: true\n</pre>\n\n\nh2(#audit_log). Accessing the audit log\n\nWhen @WebDAVLogEvents@ is enabled, uploads and downloads of files are logged in the Arvados audit log. These events are included in the \"User Activity Report\":user-activity.html. The audit log can also be accessed via the API, SDKs or command line. For example, to show the 100 most recent file downloads:\n\n<pre>\narv log list --filters '[[\"event_type\",\"=\",\"file_download\"]]' -o 'created_at desc' -l 100\n</pre>\n\nFor uploads, use the @file_upload@ event type.\n\nNote that this only covers upload and download activity via WebDAV, S3, and Workbench 2.\n\nThe @arv-get@ and @arv-put@ tools upload via @Keepproxy@, which does not log activity to the audit log because it operates at the block level, not the file level. @Keepproxy@ records the uuid of the user that owns the token used in the request in its system logs. Those logs are usually stored by @journald@ or @syslog@. A typical log line for such a block download looks like this:\n\n<pre>\nJul 20 15:03:38 keep.xxxx1.arvadosapi.com keepproxy[63828]: {\"level\":\"info\",\"locator\":\"abcdefghijklmnopqrstuvwxyz012345+53251584\",\"msg\":\"Block download\",\"time\":\"2021-07-20T15:03:38.458792300Z\",\"user_full_name\":\"Albert User\",\"user_uuid\":\"ce8i5-tpzed-abcdefghijklmno\"}\n</pre>\n\nIt is possible to do a reverse lookup from the locator to find all matching collections: the @manifest_text@ field of a collection lists all the block locators that are part of the collection. The @manifest_text@ field also provides the relevant filename in the collection. Because this lookup is rather involved and there is no automated tool to do it, we recommend disabling @KeepproxyPermission.User.Download@ and @KeepproxyPermission.User.Upload@ for sites where the audit log is important and @arv-get@ and @arv-put@ are not essential.\n\nh3(#throttling). WebDAV download log throttling\n\nIf a client requests partial content past the start of a file, and a request from the same client for the same file was logged within the last time interval configured by @WebDAVLogDownloadInterval@, @keep-web@ will not write a new log. This throttling applies to both printed and API server logs. The default value of 30 seconds reduces log output when clients like @aws s3 cp@ download one file in small chunks in parallel. Administrators can set this setting to @0@ to disable log throttling. This setting lets administrators choose how they want to balance full auditability against logging overhead: a shorter interval means more download requests are logged, with all the overhead that entails.\n"
  },
  {
    "path": "doc/admin/scoped-tokens.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Securing API access with scoped tokens\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nBy default, Arvados API tokens grant unlimited access to a user account, and admin account tokens have unlimited access to the whole system.  If you want to grant restricted access to a user account, you can create a \"scoped token\" which is an Arvados API token which is limited to accessing specific APIs.\n\nOne use of token scopes is to grant access to data, such as a collection, to users who do not have an Arvados accounts on your cluster.  This is done by creating scoped token that only allows getting a specific record.  An example of this is \"creating a collection sharing link.\":{{site.baseurl}}/sdk/python/cookbook.html#sharing_link\n\nAnother example is situations where admin access is required but there is risk of the token being compromised.  Setting a scope prevents the token from being used for any action other than the specific action the token is intended for.  For example, \"synchronizing user accounts on a shell node.\":{{site.baseurl}}/install/install-shell-server.html#scoped-token\n\nh2. Defining scopes\n\nA \"scope\" consists of a HTTP method and API path.  A token can have multiple scopes.  Token scopes act as a whitelist, and the API server checks the HTTP method and the API path of every request against the scopes of the request token.  Scopes are also described on the \"API Authorization\":{{site.baseurl}}/api/tokens.html#scopes page of the \"API documentation\":{{site.baseurl}}/api/index.html.\n\nThese examples use @/arvados/v1/collections@, but can be applied to any endpoint.  Consult the \"API documentation\":{{site.baseurl}}/api/index.html to determine the endpoints for specific methods.\n\nThe scope @[\"GET\", \"/arvados/v1/collections\"]@ will allow only GET or HEAD requests for the list of collections.  Any other HTTP method or path (including requests for a specific collection record, eg a request with path @/arvados/v1/collections/zzzzz-4zz18-0123456789abcde@) will return a permission error.\n\nA trailing slash in a scope is signficant.  The scope @[\"GET\", \"/arvados/v1/collections/\"]@ will allow only GET or HEAD requests *starting with* @/arvados/v1/collections/@.  A request for an individual record path @/arvados/v1/collections/zzzzz-4zz18-0123456789abcde@) is allowed but a request to list collections (@/arvados/v1/collections@) will be denied because it does not end with @/@ (API requests with a trailing @/@ will have the slash stripped before the scope is checked.)\n\nThe scope can include an object uuid.  The scope @[\"GET\", \"/arvados/v1/collections/zzzzz-4zz18-0123456789abcde\"]@ only permits requests to read the record @zzzzz-4zz18-0123456789abcde@.\n\nSince a token can have multiple scopes, use @[[\"GET\", \"/arvados/v1/collections\"], [\"GET\", \"/arvados/v1/collections/\"]]@ to allow both listing collections and fetching individual collection records.  This will reject requests to create or change collections, or access any other API method.\n\nObject create calls use the @POST@ method.  A scope of @[\"POST\", \"/arvados/v1/collections\"]@ will allow creating collections, but not reading, listing or updating them (or accessing anything else).\n\nObject update calls use the @PATCH@ method.  A scope of @[\"PATCH\", \"/arvados/v1/collections/\"]@ will allow updating collections, but not listing or creating them.  (Note: while GET requests are denied an object can be read indirectly by using an empty PATCH which will return the unmodified object as the result).\n\nSimilarly, you can use a scope of @[\"PATCH\", \"/arvados/v1/collections/zzzzz-4zz18-0123456789abcde\"]@ to restrict updates to a single collection.\n\nThere is one special exception to the scope rules: a valid token is always allowed to issue a request to \"@GET /arvados/v1/api_client_authorizations/current@\":{{ site.baseurl }}/api/methods/api_client_authorizations.html#current regardless of its scopes. This allows clients to reliably determine whether a request failed because a token is invalid, or because the token is not permitted to perform a particular request. The API server itself needs to be able to do this to validate tokens issued by other clusters in a federation.\n\nh2. Creating a scoped token\n\nA scoped token can be created at the command line:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv api_client_authorization create --api-client-authorization '{\"scopes\": [[\"GET\", \"/arvados/v1/collections\"], [\"GET\", \"/arvados/v1/collections/\"]]}'</span>\n{\n \"kind\":\"arvados#apiClientAuthorization\",\n \"etag\":\"9yk144t0v6cvyp0342exoh2vq\",\n \"uuid\":\"zzzzz-gj3su-bizbsw0mx5pju3w\",\n \"owner_uuid\":\"zzzzz-tpzed-fr97h9t4m5jffxs\",\n \"created_at\":\"2020-03-12T20:36:12.517375422Z\",\n \"modified_by_user_uuid\":null,\n \"modified_at\":null,\n \"api_token\":\"5a74htnoqwkhtfo2upekpfbsg04hv7cy5v4nowf7dtpxer086m\",\n \"created_by_ip_address\":null,\n \"expires_at\":null,\n \"last_used_at\":null,\n \"last_used_by_ip_address\":null,\n \"scopes\":[\n  [\n   \"GET\",\n   \"/arvados/v1/collections\"\n  ],\n  [\n   \"GET\",\n   \"/arvados/v1/collections/\"\n  ]\n ]\n}\n</code></pre>\n</notextile>\n\nThe response will include @api_token@ field which is the newly issued secret token.  It can be passed directly to the API server that issued it, or can be used to construct a @v2@ token.  A @v2@ format token is required if the token will be used to access other clusters in an Arvados federation.  An Arvados @v2@ format token consists of three fields separate by slashes: the prefix @v2@, followed by the token uuid, followed by the token secret.  For example: @v2/x1u39-gj3su-bizbsw0mx5pju3w/5a74htnoqwkhtfo2upekpfbsg04hv7cy5v4nowf7dtpxer086m@.\n"
  },
  {
    "path": "doc/admin/spot-instances.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Using Preemptible instances\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis page describes how to enable preemptible instances.  Preemptible instances typically offer lower cost computation with a tradeoff of lower service guarantees.  If a compute node is preempted, Arvados will restart the computation on a new instance.\n\nCurrently Arvados supports preemptible instances using AWS and Azure spot instances.\n\nh2. Configuration\n\nFirst, configure some @InstanceTypes@ that have @Preemptible: true@. For a preemptible instance, @Price@ determines the maximum bid price; the actual price paid is dynamic and will likely be lower.\n\nTypically you want to add both preemptible and non-preemptible entries for each cloud provider VM type. To do this automatically, use @PreemptiblePriceFactor@ to enable a preemptible version of each listed type, using the given factor to set the maximum bid price relative to the non-preemptible price. Alternatively, you can configure preemptible instance types explicitly. For example, the following two configurations are equivalent:\n\n<pre>\nClusters:\n  ClusterID:\n    Containers:\n      PreemptiblePriceFactor: 0.8\n    InstanceTypes:\n      m4.large:\n        ProviderType: m4.large\n        VCPUs: 2\n        RAM: 8GiB\n        AddedScratch: 32GB\n        Price: 0.1\n</pre>\n\n<pre>\nClusters:\n  ClusterID:\n    InstanceTypes:\n      m4.large:\n        ProviderType: m4.large\n        VCPUs: 2\n        RAM: 8GiB\n        AddedScratch: 32GB\n        Price: 0.1\n      m4.large.preemptible:\n        Preemptible: true\n        ProviderType: m4.large\n        VCPUs: 2\n        RAM: 8GiB\n        AddedScratch: 32GB\n        Price: 0.08\n</pre>\n\nNext, you can choose to enable automatic use of preemptible instances:\n\n<pre>\nClusters:\n  ClusterID:\n    Containers:\n      AlwaysUsePreemptibleInstances: true\n</pre>\n\nIf @AlwaysUsePreemptibleInstances@ is \"true\", child containers (workflow steps) will always select preemptible instances, regardless of user option.\n\nIf @AlwaysUsePreemptibleInstances@ is \"false\" (the default) or unspecified, preemptible instance are \"used when requested by the user.\":{{site.baseurl}}/user/cwl/cwl-run-options.html#preemptible\n\nNote that regardless of the value of @AlwaysUsePreemptibleInstances@, the top level workflow runner container always runs in a reserved (non-preemptible) instance, to avoid situations where the workflow runner is killed requiring the entire to be restarted.\n\nNo additional configuration is required, \"arvados-dispatch-cloud\":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html will now start preemptible instances where appropriate.\n\nh3. Cost Tracking\n\nPreemptible instances prices are declared at instance request time and defined by the maximum price that the user is willing to pay per hour. By default, this price is the same amount as the on-demand version of each instance type, and this setting is the one that @arvados-dispatch-cloud@ uses for now, as it doesn't include any pricing data to the spot instance request.\n\nFor AWS, the real price that a spot instance has at any point in time is discovered at the end of each usage hour, depending on instance demand. For this reason, AWS provides a data feed subscription to get hourly logs, as described on \"Amazon's User Guide\":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html.\n\nh2. Preemptible instances on AWS\n\nFor general information, see \"using Amazon EC2 spot instances\":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html.\n\nh3. Permissions\n\nWhen requesting spot instances, Amazon's API may return an authorization error depending on how users and permissions are set on the account. If this is the case check logs for this error:\n\n<pre>\nBaseHTTPError: AuthFailure.ServiceLinkedRoleCreationNotPermitted: The provided credentials do not have permission to create the service-linked role for EC2 Spot Instances.\n</pre>\n\nThe account needs to have a service linked role created. This can be done by logging into the AWS account, go to _IAM Management_ &rarr; _Roles_ and create the @AWSServiceRoleForEC2Spot@ role by clicking on the @Create@ button, selecting @EC2@ service and @EC2 - Spot Instances@ use case.\n\nh3. Interruption notices\n\nWhen running a container on a spot instance, Arvados monitors the EC2 metadata endpoint for interruption notices. When an interruption notice is received, it is reported in a log entry in the @crunch-run.txt@ file as well as @warning@ and @preemptionNotice@ keys in the @runtime_status@ field of the affected container.\n\nExample excerpt from @crunch-run.txt@:\n\n<pre>\n2023-02-21T21:12:42.350719824Z Cloud provider scheduled instance stop at 2023-02-21T21:14:42Z\n</pre>\n\nExample @runtime_status@:\n\n<pre>\n{\n  \"warning\": \"preemption notice\",\n  \"warningDetail\": \"Cloud provider scheduled instance stop at 2023-02-21T21:14:42Z\",\n  \"preemptionNotice\": \"Cloud provider scheduled instance stop at 2023-02-21T21:14:42Z\"\n}\n</pre>\n\nh2. Preemptible instances on Azure\n\nFor general information, see \"Use Spot VMs in Azure\":https://docs.microsoft.com/en-us/azure/virtual-machines/spot-vms.\n\nWhen starting preemptible instances on Azure, Arvados configures the eviction policy to 'delete', with max price set to '-1'. This has the effect that preemptible VMs will not be evicted for pricing reasons. The price paid for the instance will be the current spot price for the VM type, up to a maximum of the price for a standard, non-spot VM of that type.\n\nPlease note that Azure provides no SLA for preemptible instances. Even in this configuration, preemptible instances can still be evicted for capacity reasons. If that happens and a container is aborted, Arvados will try to restart it, subject to the usual retry rules.\n\nSpot pricing is not available on 'B-series' VMs, those should not be defined in the configuration file with the _Preemptible_ flag set to true. Spot instances have a separate quota pool, make sure you have sufficient quota available.\n"
  },
  {
    "path": "doc/admin/storage-classes.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Configuring storage classes\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nStorage classes (alternately known as \"storage tiers\") allow you to control which volumes should be used to store particular collection data blocks.  This can be used to implement data storage policies such as moving data to archival storage.\n\nIn the default Arvados configuration, with no storage classes specified in the configuration file, all volumes belong to a single implicit storage class called \"default\". Apart from that, names of storage classes are internal to the cluster and decided by the administrator.  Other than the implicit \"default\" class, Arvados currently does not define any standard storage class names.\n\nTo use multiple storage classes, update the @StorageClasses@ and @Volumes@ sections of your configuration file.\n* Every storage class you use (including \"default\") must be defined in the @StorageClasses@ section.\n* The @StorageClasses@ section must use @Default: true@ to indicate at least one default storage class. When a client/user does not specify storage classes when creating a new collection, the default storage classes are used implicitly.\n* If some storage classes are faster or cheaper to access than others, assign a higher @Priority@ to the faster ones. When reading data, volumes with high priority storage classes are searched first.\n\nExample:\n\n<pre>\n    StorageClasses:\n\n      default:\n        # When reading a block that is stored on multiple volumes,\n        # prefer a volume with this class.\n        Priority: 20\n\n        # When a client does not specify a storage class when saving a\n        # new collection, use this one.\n        Default: true\n\n      archival:\n        Priority: 10\n\n    Volumes:\n\n      ClusterID-nyw5e-000000000000000:\n        # This volume is in the \"default\" storage class.\n        StorageClasses:\n          default: true\n\n      ClusterID-nyw5e-000000000000001:\n        # This volume is in the \"archival\" storage class.\n        StorageClasses:\n          archival: true\n</pre>\n\nRefer to the \"configuration reference\":{{site.baseurl}}/admin/config.html for more details.\n\nh3. Using storage classes\n\n\"Discussed in the user guide\":{{site.baseurl}}/user/topics/storage-classes.html\n\nh3. Storage management notes\n\nWhen uploading data, if a data block cannot be uploaded to all desired storage classes, it will result in a fatal error.  Data blocks will not be uploaded to volumes that do not have the desired storage class.\n\nIf you change the storage classes for a collection, the data is not moved immediately.  The \"keep-balance\":{{site.baseurl}}/install/install-keep-balance.html service is responsible for deciding which blocks should be placed on which keepstore volumes.  As part of the rebalancing behavior, it will determine where a block should go in order to satisfy the desired storage classes, and issue pull requests to copy the block from its original volume to the desired volume.  The block will subsequently be moved to trash on the original volume.\n\nIf a block is assigned to multiple storage classes, the block will be stored on @desired_replication@ number of volumes for storage class, even if that results in overreplication.\n\nIf a collection has a desired storage class which is not available in any keepstore volume, the collection's blocks will remain in place, and an error will appear in the @keep-balance@ logs.\n"
  },
  {
    "path": "doc/admin/token-expiration-policy.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: Automatic logout and token expiration\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nWhen a user logs in to Workbench, they receive a newly created token (a long string of random characters) which grants access to the Arvados API on behalf of that user.  In the default configuration, this token does not expire until the user explicitly logs out.\n\nSecurity policies, such as those required to comply with regulations such as HIPAA and GxP, may include policies for \"automatic logoff\".  In order to limit the window of risk associated with unauthorized access of the desktop of an Arvados user, or a token being leaked, Arvados offers options for automatic logout from the web app, and to configure access tokens to expire by default.\n\nThe @Workbench.IdleTimeout@, @Login.TokenLifetime@, and @API.MaxTokenLifetime@ options give the administrator ways to control automatic expiration of tokens granted through the login flow.\n\nIf you are looking for information on how to expire a token manually, see how to \"delete a single token\":user-management-cli.html#delete-token and \"delete all tokens belonging to a user\":user-management-cli.html#delete-all-tokens .\n\nh2. Automatic logout\n\nUse @Workbench.IdleTimeout@ to configure Workbench 2 for automatic logout after a period of idle time.  For example, this configuration would log the user out after five minutes of no keyboard or pointer activity:\n\n<pre>\nClusters:\n  zzzzz:\n    ...\n    Workbench:\n      IdleTimeout: 5m\n    ...\n</pre>\n\nWhen idle timeout is set, several behaviors and considerations apply:\n\n* The user will be automatically logged out after a period of inactivity.  When the automatic logout happens, the token associated with that session will be revoked.\n* Users should use the \"open in new tab\" functionality of Workbench 2.  This will share the same token between tabs without requiring the user to log in again.  Logging out will apply to all browser tabs that use the same token.\n* If the user closes a Workbench tab without first logging out, the browser will forget the token, but not expire the token (this is desirable if the user has several tabs open).\n* If the user closes all Workbench tabs, they will be required to log in again.\n* This only affects browser behavior.  Automatic logout should be used together automatic token expiration described below.\n\nThe default value for @Workbench.IdleTimeout@ is zero, which disables auto-logout.\n\nh2. Automatic expiration of login tokens\n\nUse @Login.TokenLifetime@ to set the lifetime for tokens issued through the login process.  This is the maximum amount of time a user can maintain a session before having to log in again.  This setting applies to both regular and admin user logins.  Here is an example configuration that would require the user to log in again after 12 hours:\n\n<pre>\nClusters:\n  zzzzz:\n    ...\n    Login:\n      TokenLifetime: 12h\n    ...\n</pre>\n\nThis is independent of @Workbench.IdleTimeout@.  Even if Workbench auto-logout is disabled, this option will ensure that the user is always required to log in again after the configured amount of time.\n\nThe default value of @Login.TokenLifetime@ is zero, meaning login tokens do not expire (unless @API.MaxTokenLifetime@ is set).\n\nh2. Untrusted login tokens\n\n<pre>\nClusters:\n  zzzzz:\n    ...\n    Login:\n      IssueTrustedTokens: false\n    ...\n</pre>\n\nWhen @IssueTrustedTokens@ is @false@, tokens are \"untrusted\" and cannot be used to list other tokens issued to the same user, nor to grant new tokens.  This prevents an attacker from leveraging a leaked token to aquire other tokens, but also interferes with some Workbench features that create new tokens on behalf of the user.\n\nh2. Automatic expiration of all tokens\n\nUse @API.MaxTokenLifetime@ to set the maximum lifetime for any access token created by regular (non-admin) users.  For example, this configuration would require that all tokens expire after 24 hours:\n\n<pre>\nClusters:\n  zzzzz:\n    ...\n    API:\n      MaxTokenLifetime: 24h\n    ...\n</pre>\n\nTokens created without an explicit expiration time, or that exceed maximum lifetime, will be set to @API.MaxTokenLifetime@.\n\nSimilar to @Login.TokenLifetime@, this option ensures that the user is always required to log in again after the configured amount of time.\n\nUnlike @Login.TokenLifetime@, this applies to all API operations that manipulate tokens, regardless of whether the token was created by logging in, or by using the API.  If @Login.TokenLifetime@ is greater than @API.MaxTokenLifetime@, MaxTokenLifetime takes precedence.\n\nAdmin users are permitted to create tokens with expiration times further in the future than @MaxTokenLifetime@.\n\nThe default value @MaxTokenLifetime@ is zero, which means there is no maximum token lifetime.\n\nh2. Choosing a policy\n\n@Workbench.IdleTimeout@ only affects browser behavior.  It is strongly recommended that automatic browser logout be used together with @Login.TokenLifetime@, which is enforced on API side.\n\n@IssueTrustedTokens: true@ (default value) is less restrictive.  Be aware that an unrestricted token can be \"refreshed\" to gain access for an indefinite period.  This means, during the window that the token is valid, the user is permitted to create a new token, which will have a new expiration further in the future (of course, once the token has expired, this is no longer possible).  Unrestricted tokens are required for some Workbench features, as well as ease of use in other contexts, such as the Arvados command line.  This option is recommended if many users will interact with the system through the command line.\n\n@IssueTrustedTokens: false@ is more restrictive.  A token obtained by logging into Workbench cannot be \"refreshed\" to gain access for an indefinite period.  However, it interferes with some Workbench features, as well as ease of use in other contexts, such as the Arvados command line.  This option is recommended only if most users will only ever interact with the system through Workbench or WebShell.  With this configuration, it is still possible to \"create a token at the command line\":user-management-cli.html#create-token using the @SystemRootToken@.\n\nIn every case, admin users may always create tokens with expiration dates far in the future.\n\nThese policies do not apply to tokens created by the API server for the purposes of authorizing a container to run, as those tokens are automatically expired when the container is finished.\n\nh2. Applying policy to existing tokens\n\nIf you have an existing Arvados installation and want to set a token lifetime policy, there may be long-lived user tokens already granted.  The administrator can use the following @rake@ tasks to enforce the new policy.\n\nThe @db:check_long_lived_tokens@ task will list which users have tokens with no expiration date.\n\n<notextile>\n<pre><code># <span class=\"userinput\">bin/rake db:check_long_lived_tokens</span>\nFound 6 long-lived tokens from users:\nuser2,user2@example.com,zzzzz-tpzed-5vzt5wc62k46p6r\nadmin,admin@example.com,zzzzz-tpzed-6drplgwq9nm5cox\nuser1,user1@example.com,zzzzz-tpzed-ftz2tfurbpf7xox\n</code></pre>\n</notextile>\n\nTo apply the new policy to existing tokens, use the @db:fix_long_lived_tokens@ task.\n\n<notextile>\n<pre><code># <span class=\"userinput\">bin/rake db:fix_long_lived_tokens</span>\nSetting token expiration to: 2020-08-25 03:30:50 +0000\n6 tokens updated.\n</code></pre>\n</notextile>\n\nNOTE: These rake tasks adjust the expiration of all tokens except those belonging to the system root user (@zzzzz-tpzed-000000000000000@).  If you have tokens used by automated service accounts that need to be long-lived, you can \"create tokens that don't expire using the command line\":user-management-cli.html#create-token .\n"
  },
  {
    "path": "doc/admin/upgrading.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: \"Arvados upgrade notes\"\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nFor Arvados administrators, this page will cover what you need to know and do in order to ensure a smooth upgrade of your Arvados installation.  For general release notes covering features added and bugs fixed, see \"Arvados releases\":https://arvados.org/releases.\n\nUpgrade instructions can be found at \"Maintenance and upgrading\":{{site.baseurl}}/admin/maintenance-and-upgrading.html#upgrading.\n\nh2. Upgrade notes\n\nSome versions introduce changes that require special attention when upgrading: e.g., there is a new service to install, or there is a change to the default configuration that you might need to override in order to preserve the old behavior. These notes are listed below, organized by release version. Scroll down to the version number you are upgrading to.\n\n{% comment %}\nNote to developers: Add new items at the top. Include the date, issue number, commit, and considerations/instructions for those about to upgrade.\n\nTODO: extract this information based on git commit messages and generate changelogs / release notes automatically.\n{% endcomment %}\n\n<notextile>\n<div class=\"releasenotes\">\n</notextile>\n\nh2(#main). development main\n\n\"previous: Upgrading to 3.2.1\":#v3_2_1\n\nh3. @CUDA@ configuration in @InstanceTypes@ is no longer supported\n\nArvados 3.1 introduced general \"GPU\" instance type settings and runtime constraints to replace vendor-specific \"CUDA\" settings. To support new cloud dispatch features, Arvados 3.3 removes support for the old @CUDA@ configuration sections of the cluster's configured @InstanceTypes@. Before you upgrade to Arvados 3.3, \"update your cluster's configured @InstanceTypes@ with @GPU@ sections\":#v3_1_0_instance_type_cuda. Make sure that each @GPU@ section has an accurate @VRAM@ setting. Without this, the cloud dispatcher may not consider your instance types suitable for GPU-accelerated workflows. You can update and test your current configuration before you start with the rest of your upgrade planning.\n\n@cuda@ runtime constraints in container requests are still deprecated but supported in this release.\n\nh3. Cloud dispatcher runs multiple containers per instance\n\nThe cloud dispatcher will now run multiple containers at once on an instance if it has enough RAM and VCPUs to do so _and_ the instance type is suitable for each container individually.\n\nThese conditions can arise in the following cases:\n* the configured @MaximumPriceFactor@ is large enough that an instance type with more than the exact number of VCPUs specified by a container is considered suitable; or\n* no configured instance type has exactly the number of VCPUs specified by a container, _i.e.,_ every suitable instance type has one or more spare VCPUs available to run another container alongside it.\n\nTo disable the new behavior and ensure that each container runs on a dedicated instance even in the above cases, set the new configuration entry @Containers.MaxRunningContainersPerInstance@ to @1@.\n\nh2(#v3_2_1). v3.2.1 (2026-03-02)\n\n\"previous: Upgrading to 3.2.0\":#v3_2_0\n\nh3. Configuration URLs are stricter about bracketed addresses\n\nThe new version of Go used to build this release has stricter URL parsing: _only_ an IPv6 address can appear inside brackets around the network location of the URL. If your cluster configuration includes URLs with brackets around DNS hostnames or IPv4 addresses, remove those brackets before upgrading.\n\nh2(#v3_2_0). v3.2.0 (2025-11-03)\n\n\"previous: Upgrading to 3.1.2\":#v3_1_2\n\nh3. Debian 11 and Ubuntu 20.04 are no longer supported\n\nArvados 3.2 no longer supports some of the older distributions supported by Arvados 3.1: Debian 11 \"bullseye\" and Ubuntu 20.04 \"focal.\" If you are running Arvados on any of these distributions, you must first upgrade to a supported distribution before you upgrade to Arvados 3.2.\n\nArvados 3.1 supports Debian 12 \"bookworm\" and Ubuntu 22.04 \"jammy.\" You can upgrade your Arvados cluster to one of those releases, then proceed to upgrade Arvados to 3.2.\n\nThe list of distributions supported by Arvados 3.2 can be found on the \"planning and prerequisites page\":{{site.baseurl}}/install/install-manual-prerequisites.html#supportedlinux of the install guide.\n\nh3. RPMs now require Red Hat/AlmaLinux/Rocky Linux 8.8 or later\n\nOur packages for Red Hat/AlmaLinux/Rocky Linux 8 now depend on appstreams in version 8.8, particularly Python 3.11. Please make sure you are running at least version 8.8 of your distribution before you upgrade to Arvados 3.2.0. If not, you should follow your distributor's instructions to upgrade from your current 8.x release to 8.8 or later before you upgrade Arvados.\n\nh3. New GPG key URL for Red Hat, AlmaLinux, and Rocky Linux\n\nAs part of adding support for the RHEL 9 family of distributions, we have started using a new signing key for packages. For these distributions, the key corresponding to your distribution is now available at a URL that includes the release version. Before you upgrade, on each system where you have the Arvados package repository installed, edit the file with that repository configuration, usually @/etc/yum.repos.d/arvados.repo@. Find the line that defines @gpgkey@:\n\n<notextile>\n<pre><code>[arvados]\n…\ngpgkey=https://rpm.arvados.org/RHEL/RPM-GPG-KEY-arvados\n</code></pre>\n</notextile>\n\nEdit this line to add @$releasever/@ after @RHEL/@, so it looks like this:\n\n<notextile>\n<pre><code>gpgkey=https://rpm.arvados.org/RHEL/<span class=\"userinput\">$releasever/</span>RPM-GPG-KEY-arvados\n</code></pre>\n</notextile>\n\nThen save and close the file. The old key URL still works, so this step is not required to upgrade Arvados itself. However, doing it now will help ensure you retain access to the Arvados repositories next time you upgrade your distribution.\n\nh3. SbatchArgumentsList (SLURM) configuration semantics have changed\n\n@Containers.SLURM.SbatchArgumentsList@ must now specify arguments that were previously added implicitly. Also, the @%@ character invokes template behavior. If your current configuration looks like this:\n\n<notextile>\n<pre><code>SbatchArgumentsList: [\"--clusters=all\"]\n</code></pre>\n</notextile>\n\nYou must update it to add the arguments that were previously added implicitly:\n\n<notextile>\n<pre><code>SbatchArgumentsList: [<span class=\"userinput\">\"--job-name=%U\", \"--mem=%M\", \"--cpus-per-task=%C\", \"--tmp=%T\", \"--constraint=instancetype=%I\", \"--partition=%P\", </span>\"--clusters=all\"]\n</code></pre>\n</notextile>\n\nIf your configuration file does not have an @SbatchArgumentsList@ entry, you do not need to add one.\n\nh3. @Users.SendUserSetupNotificationEmail@ is disabled by default\n\nIf you want to preserve the old default behavior of sending an email to each user when their account has been set up, update your configuration file accordingly.\n\n<notextile><pre>\nUsers:\n  SendUserSetupNotificationEmail: true\n</pre></notextile>\n\nh3. Admin container shell access is enabled by default\n\n\"Container shell access\":{{ site.baseurl }}/user/debugging/container-shell-access.html by admin users is now enabled by default to make it easier to diagnose workflow issues on new deployments.  If you prefer to leave it disabled, update your configuration file accordingly.\n\n<notextile><pre>\nContainers:\n  ShellAccess:\n    Admin: false\n</pre></notextile>\n\nContainer shell access for non-admin users is still disabled by default.\n\nh3. Configure ExternalURL, DNS, and TLS for container web services\n\nArvados now allows external clients to connect to HTTP services running in containers. To enable this feature:\n* Add a @Services.ContainerWebServices.ExternalURL@ entry to @/etc/arvados/config.yml@ with a wildcard URL, e.g., @https://*.containers.ClusterID.example.com/@\n* Add the wildcard name to the @server_name@ directive in the controller section of your Nginx configuration, e.g., @server_name ClusterID.example.com *.containers.ClusterID.example.com;@\n* Add wildcard DNS records so @*.containers.ClusterID.example.com@ names resolve to the same address(es) as your controller's external URL\n* Update the TLS certificate used by Nginx for @ClusterID.example.com@ so it also validates for @*.containers.ClusterID.example.com@\n\nh3. Loki credentials in @local.params.secrets@ are no longer needed\n\nSalt installer's Terraform code replaces the use of AWS access key and secret for Loki's S3 bucket with equivalent permissions through an instance profile. Once applied, the credentials in the @local.params.secrets@ file will be invalid and can be safely removed.\n\nh3. arvbox and @arvados-server install@ are no longer supported\n\nArvados 3.2 no longer includes the arvbox Docker image and associated tooling. The @arvados-server install@ subcommand has also been removed from this release.\n\nIf you were using arvbox in demo mode, consider installing on a Debian-based virtual machine with our \"single-node Ansible installer\":{{ site.baseurl }}/install/install-single-host.html.\n\nIf you were using arvbox or @arvados-server install@ for development, we now provide an Ansible playbook to install development dependencies on a Debian-based system. Our \"Hacking Prerequisites wiki\":https://dev.arvados.org/projects/arvados/wiki/Hacking_prerequisites has instructions for how to use it.\n\nh2(#v3_1_2). v3.1.2 (2025-05-27)\n\n\"previous: Upgrading to 3.1.1\":#v3_1_1\n\nThere are no changes that require administrator attention in this release.\n\nh2(#v3_1_1). v3.1.1 (2025-04-14)\n\n\"previous: Upgrading to 3.1.0\":#v3_1_0\n\nh3. Clusters using cloud dispatch should rebuild a compute node image\n\nArvados 3.1.1 fixes a handful of bugs in installation tools, particularly for deployments on Ubuntu. If you have already successfully upgraded to 3.1.0, the only thing in this release that affects you is a bug fix in the compute node image builder for cloud deployments. If your cluster uses @arvados-dispatch-cloud@, you should \"build a new compute node following our install guide\":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html and configure your cluster to use it. You do not need to upgrade any cluster services; there are no changes to them since 3.1.0.\n\nh2(#v3_1_0). v3.1.0 (2025-03-20)\n\n\"previous: Upgrading to 3.0.0\":#v3_0_0\n\nh3. Rails API server now runs standalone\n\nThe Arvados Rails API server now runs from a standalone Passenger server to simplify deployment. Before upgrading, existing deployments should remove the Rails API server from their nginx configuration. e.g., remove the entire @server@ block with @root /var/www/arvados-api/current/public@ from @/etc/nginx/conf.d/arvados-api-and-controller.conf@. If you customized this deployment at all, the \"updated install instructions\":{{ site.baseurl }}/install/install-api-server.html#railsapi-config explain how to customize the standalone Passenger server. Finally, you'll need to enable the new service by running:\n\n<notextile>\n<pre><code># <span class=\"userinput\">systemctl enable --now arvados-railsapi.service</span>\n</code></pre></notextile>\n\nh3. Rails API server needs PowerTools on Red Hat, AlmaLinux, and Rocky Linux\n\nThe Arvados Rails API server now needs to be able to link against @libyaml@ development headers. On Red Hat, AlmaLinux, and Rocky Linux, these are provided by the @libyaml-devel@ package in the PowerTools repository. Before you upgrade, make sure you have this repository enabled on the host where you run the Rails API server by running:\n\n<notextile>\n<pre><code># <span class=\"userinput\">dnf config-manager --set-enabled powertools</span>\n</code></pre></notextile>\n\nh3. \"cuda\" runtime constraint is deprecated in favor of \"gpu\"\n\nArvados 3.1.0 adds support for containers that use AMD ROCm alongside our existing support for NVIDIA CUDA. As part of this, the @cuda@ runtime constraint has been deprecated and replaced with a more general @gpu@ constraint. The requested type of GPU is named in the @stack@ field of this object. Other fields have been carried over from @cuda@ and work the same way. Refer to the \"runtime constraints reference\":{{ site.baseurl }}/api/methods/container_requests.html#runtime_constraints for details.\n\nIf client software creates or updates a container request with a @cuda@ runtime constraint, the Arvados API server will automatically translate that to a @gpu@ constraint. This client software should still be updated to specify a @gpu@ runtime constraint, but you can safely upgrade to Arvados 3.1.0 and do these updates opportunistically.\n\nClient software that reads and reports runtime constraints (like Workbench does) must be updated to read the new @gpu@ constraint. The @cuda@ constraint will no longer appear in API responses.\n\nh3(#v3_1_0_instance_type_cuda). Generalized configuration for GPU compute nodes\n\nAs part of adding support for AMD GPUs in Arvados 3.1, the @CUDA@ section of @InstanceType@ definitions is now deprecated in favor of a new @GPU@ section that is generalized for both CUDA and ROCm.\n\nWhere previously there would be a @CUDA@ section:\n\n<pre>\n  InstanceTypes:\n     gpuInstance:\n       CUDA:\n         DriverVersion: \"11.0\"\n         HardwareCapability: \"9.0\"\n         DeviceCount: 1\n</pre>\n\nThe configuration file should now be updated to use a @GPU@ section:\n\n* Rename the section from @CUDA@ to @GPU@.\n* Rename the setting @HardwareCapability@ to @HardwareTarget@. The value can remain unchanged.\n* Add the setting @Stack: \"cuda\"@.\n* Add a @VRAM@ setting that defines the amount of RAM available on the GPU.\n\n<notextile>\n<pre><code>  InstanceTypes:\n     gpuInstance:\n       <span class=\"userinput\">GPU</span>:\n         <span class=\"userinput\">Stack: \"cuda\"</span>\n         DriverVersion: \"11.0\"\n         <span class=\"userinput\">HardwareTarget</span>: \"9.0\"\n         DeviceCount: 1\n         <span class=\"userinput\">VRAM: 8GiB</span>\n</code></pre>\n</notextile>\n\nTo minimize disruption, the config loader will continue to accept the deprecated @CUDA@ field and a emit warning.  Admins are advised to update the configuration file as the legacy field will be removed in a future version.\n\nh3. BsubCUDAArguments renamed to BsubGPUArguments\n\nThe configuration item @Containers.LSF.BsubCUDAArguments@ has been renamed to @Containers.LSF.BsubGPUArguments@.  There is no change in content.  To minimize disruption, the config loader will continue to accept the deprecated @BsubCUDAArguments@ field and a emit warning.  Admins are advised to update the configuration file as the legacy field will be removed in a future version.\n\nh2(#v3_0_0). v3.0.0 (2024-11-12)\n\n\"previous: Upgrading to 2.7.4\":#v2_7_4\n\nh3. Debian 10 and Ubuntu 18.04 are no longer supported\n\nArvados 3.0 no longer supports some of the older distributions supported by Arvados 2.7: Debian 10 \"buster\" and Ubuntu 18.04 \"bionic.\" If you are running Arvados on any of these distributions, you must first upgrade to a supported distribution before you upgrade to Arvados 3.0.\n\nArvados 2.7 supports Debian 11 \"bullseye\" and Ubuntu 20.04 \"focal.\" You can upgrade your Arvados cluster to one of those releases, then proceed to upgrade Arvados to 3.0.\n\nThe list of distributions supported by Arvados 3.0 can be found on \"Planning and prerequisites.\":{{site.baseurl}}/install/install-manual-prerequisites.html#supportedlinux\n\nh3. Red Hat 8 package dependency on package streams\n\nThe Red Hat 8 package of the Rails API server now depends on the Ruby 3.1 stream, and the various Python packages now depend on the Python 3.9 stream. Plan for these streams to be activated and installed automatically during your upgrade.\n\nh3. RVM is no longer supported\n\nSome Arvados packages, most notably the Rails API server package @arvados-api-server@, would check whether RVM is installed on the system, and invoke Ruby commands through it if so. Arvados 3.0 no longer specially supports RVM. Instead, Arvados 3.0 supports all the different versions of Ruby that are packaged in our supported distributions, mitigating the need to support separate Ruby installations. Package scripts run plain @ruby@ and @gem@ commands and expect they come from a supported version.\n\nIf you have a custom install that requires a different version of Ruby than the one included with your distribution, you must configure your system to ensure package scripts find that version of @ruby@ before any others. For example, you might do this on Debian-based distributions by customizing apt's @DPkg::Path@ setting.\n\nh3. Keep-web requires PostgreSQL database access\n\nThe keep-web service now connects directly to the PostgreSQL database. Make sure these connections are supported by your network firewall rules, PostgreSQL connection settings, and PostgreSQL server configuration (in @pg_hba.conf@) as shown in the \"PostgreSQL install instructions\":{{site.baseurl}}/install/install-postgresql.html.\n\nh3. Slow migration on upgrade\n\nThis upgrade includes a database schema update to rebuild full text search indexes to remove UUID and portable data hash column data.  This will provide better search results to users and take less space on the database, but plan for the @arvados-api-server@ package upgrade to take longer than usual.\n\nh3. WebDAV service uses @/var/cache@ for file content\n\nWhen running as root, @keep-web@ now stores copies of recently accessed data blocks in @/var/cache/arvados/keep@ instead of in memory. This directory is created automatically. The default cache size is 10% of the filesystem size. Use the new @Collections.WebDAVCache.DiskCacheSize@ config to specify a different percentage or an absolute size.  If @keep-web@ is not running as root, it will store the cache in @$HOME/.cache/arvados/keep@.\n\nIf the previously supported @MaxBlockEntries@ config is present, remove it to avoid warning messages at startup.\n\nh3. Python SDK reorganization of internal classes and modules\n\nWe have reorganized the Python SDK to make it clearer which APIs are intended to be public, and make it easier to find documentation for them. As part of this work, some modules that only included internal support code have been moved, most notably @arvados.diskcache@, @arvados.http_to_keep@, and @arvados.timer@.\n\nIf you need immediate access to these modules, you can find them under @arvados._internal@, but we do not intend to support them as part of our public SDK API, so they may change or be removed entirely in future versions. If you've written client software that relies on these modules, please \"file an issue\":https://github.com/arvados/arvados/issues/new to let us know so we can figure out how best to support you.\n\nh3. Virtual environments inside distribution Python packages have moved\n\nThe distribution packages that we publish for Python packages include an entire virtualenv with all required libraries. In Arvados 3.0 these virtualenvs have moved from @/usr/share/python3/dist/PACKAGE_NAME@ to @/usr/lib/PACKAGE_NAME@ to prevent conflicts with distribution packages and better conform to filesystem standards.\n\nIf you only run the executables installed by these packages, you don't need to change anything. Those are still installed under @/usr/bin@ and will use the new location when you upgrade. If you have written your own scripts or tools that rely on these virtualenvs, you may need to update those with the new location. For example, if you have a shell script that activates the virtualenv by running:\n\n<pre><code class=\"shell\">source /usr/share/python3/dist/python3-arvados-python-client/bin/activate</code></pre>\n\nYou must update it to:\n\n<notextile>\n<pre><code class=\"shell\">source <span class=\"userinput\">/usr/lib/python3-arvados-python-client</span>/bin/activate</code></pre>\n</notextile>\n\nIf you have a Python script with this shebang line:\n\n<pre><code class=\"shell\">#!/usr/share/python3/dist/python3-arvados-python-client/bin/python</code></pre>\n\nYou must update it to:\n\n<notextile>\n<pre><code class=\"shell\">#!<span class=\"userinput\">/usr/lib/python3-arvados-python-client</span>/bin/python</code></pre>\n</notextile>\n\nh3. costanalyzer subcommand replaced by Arvados cluster activity tool\n\nThe functionality of @arvados-client costanalyzer@ has been replaced by a new @arvados-cluster-activity@ tool.  More information can be found at \"Analyzing workflow cost\":{{site.baseurl}}/user/cwl/costanalyzer.html .\n\nh3. @arv-migrate-docker19@ tool removed\n\nThe @arv-migrate-docker19@ tool that updates images from Docker 1.9 to be used with Docker 1.10+ (released February 2016) has been removed. In the unlikely event you still need to \"run this migration\":https://doc.arvados.org/v2.7/install/migrate-docker19.html, please do so before you upgrade to Arvados 3.0.\n\nh3. Legacy APIs and response fields have been removed\n\nThe following APIs have been removed:\n* \"api_clients\":https://doc.arvados.org/v2.7/api/methods/api_clients.html\n* \"humans\":https://doc.arvados.org/v2.7/api/methods/humans.html\n* \"jobs\":https://doc.arvados.org/v2.7/api/methods/jobs.html\n* \"job_tasks\":https://doc.arvados.org/v2.7/api/methods/job_tasks.html\n* \"nodes\":https://doc.arvados.org/v2.7/api/methods/nodes.html\n* \"pipeline_instances\":https://doc.arvados.org/v2.7/api/methods/pipeline_instances.html\n* \"pipeline_templates\":https://doc.arvados.org/v2.7/api/methods/pipeline_templates.html\n* \"repositories\":https://doc.arvados.org/v2.7/api/methods/repositories.html, and \"keep_disks\":https://doc.arvados.org/v2.7/api/methods/keep_disks.html\n* \"specimens\":https://doc.arvados.org/v2.7/api/methods/specimens.html\n* \"traits\":https://doc.arvados.org/v2.7/api/methods/traits.html\n\nThe following fields are no longer returned in API responses.\n* @api_client_id@, @user_id@, @default_owner_uuid@ (\"api_client_authorizations\":{{site.baseurl}}/api/methods/api_client_authorizations.html API)\n* @modified_by_client_uuid@ (all APIs)\n\nh3. Configuration entries have been removed or renamed\n\nThe following configuration keys have been renamed or removed.  Renamed keys will still be loaded if they appear with their old names, but you should update your @/etc/arvados/config.yml@ file to avoid warnings when services start up.\n* @API.LogCreateRequestFraction@ has been removed\n* @Containers.JobsAPI.Enable@ has been removed\n* @Mail.EmailFrom@ has been removed\n* @Mail.IssueReporterEmailFrom@ has been removed\n* @Mail.IssueReporterEmailTo@ has been removed\n* @Mail.MailchimpAPIKey@ has been removed\n* @Mail.MailchimpListID@ has been removed\n* @Mail.SendUserSetupNotificationEmail@ has moved to @Users.SendUserSetupNotificationEmail@\n* @Mail.SupportEmailAddress@ has moved to @Users.SupportEmailAddress@\n\nh3. S3 volume IAMRole configuration entry has been removed\n\nThe @Volumes.*.DriverParameters.IAMRole@ configuration entry for S3 volumes has been removed. You should remove it from your @/etc/arvados/config.yml@ file to avoid warnings when services start up. As before, if @AccessKeyID@ and @SecretAccessKey@ are blank, keepstore will retrieve IAM role credentials from instance metadata. Previously, documentation indicated that keepstore would refuse to use the IAM credentials if @IAMRole@ was specified and did not match the instance metadata, but that check has not been working for some time.\n\nh3. Legacy container logging system has been removed\n\nThe following configuration keys are no longer supported. Remove them from your @/etc/arvados/config.yml@ file to avoid warnings when services start up.\n* @Containers.Logging.LimitLogBytesPerJob@\n* @Containers.Logging.LogBytesPerEvent@\n* @Containers.Logging.LogPartialLineThrottlePeriod@\n* @Containers.Logging.LogSecondsBetweenEvents@\n* @Containers.Logging.LogThrottleBytes@\n* @Containers.Logging.LogThrottleLines@\n* @Containers.Logging.LogThrottlePeriod@\n* @Containers.Logging.MaxAge@\n* @Containers.Logging.SweepInterval@\n\nAny container logging content remaining in the database from the legacy system will be deleted.\n\nh2(#v2_7_4). v2.7.4 (2024-07-08)\n\n\"previous: Upgrading to 2.7.3\":#v2_7_3\n\nStarting from 2.7.4, Arvados no longer supports CentOS.  CentOS users should migrate to an Arvados-supported version of Red Hat Enterprise Linux (RHEL), Rocky Linux or AlmaLinux.\n\nThere are no other configuration changes requiring administrator attention in this release.\n\nh2(#v2_7_3). v2.7.3 (2024-05-24)\n\n\"previous: Upgrading to 2.7.2\":#v2_7_2\n\nThere are no configuration changes requiring administrator attention in this release.\n\nh2(#v2_7_2). v2.7.2 (2024-04-09)\n\n\"previous: Upgrading to 2.7.1\":#v2_7_1\n\nh3. Check MaxGatewayTunnels config\n\nIf you use the LSF or Slurm dispatcher, ensure the new @API.MaxGatewayTunnels@ config entry is high enough to support the size of your cluster. See \"LSF docs\":{{site.baseurl}}/install/crunch2-lsf/install-dispatch.html#MaxGatewayTunnels or \"Slurm docs\":{{site.baseurl}}/install/crunch2-slurm/install-dispatch.html#MaxGatewayTunnels for details.\n\nh3. New LSF dispatcher config items MaxRunTimeOverhead and MaxRunTimeDefault\n\nNow supports configuration parameter @Containers.LSF.MaxRunTimeDefault@ as the default value for @max_run_time@ for containers that do not specify a time limit (using CWL @ToolTimeLimit@).\n\nNow supports configuration parameter @Containers.LSF.MaxRunTimeOverhead@ so that when @scheduling_constraints.max_run_time@ or @MaxRunTimeDefault@ are non-zero, this adds time to account for crunch-run startup/shutdown overhead.\n\nh2(#v2_7_1). v2.7.1 (2023-12-12)\n\n\"previous: Upgrading to 2.7.0\":#v2_7_0\n\nh3. Separate configs for MaxConcurrentRequests and MaxConcurrentRailsRequests\n\nThe default configuration value @API.MaxConcurrentRequests@ (the number of concurrent requests that will be processed by a single instance of an arvados service process) is raised from 8 to 64.\n\nA new configuration key @API.MaxConcurrentRailsRequests@ (default 8) limits the number of concurrent requests processed by a RailsAPI service process.\n\nh3. Remove Workbench1 packages after upgrading the salt installer\n\nIf you installed a previous version of Arvados with the Salt installer, and you upgrade your installer to upgrade the cluster, you should uninstall the @arvados-workbench@ package from the workbench instance afterwards.\n\nh3. Remove Workbench1 packages and configuration\n\nThe Workbench1 application has been removed from the Arvados distribution. We recommend the following follow-up steps.\n* Remove the Workbench1 package from any service node where it is installed (e.g., @apt remove arvados-workbench@).\n* In your Nginx configuration, add your Workbench1 URL host (from @Services.Workbench1.ExternalURL@) to the @server_name@ directive in the Workbench2 section. For example: <notextile><pre>server {\n  listen 443 ssl;\n  server_name workbench.ClusterID.example.com workbench2.ClusterID.example.com;\n  ...\n}</pre></notextile>\n* In your Nginx configuration, remove the @upstream@ and @server@ sections for Workbench1.\n* Remove the @Services.Workbench1.InternalURLs@ section of your configuration file. (Do not remove @ExternalURL@.)\n* Run @arvados-server config-check@ to identify any Workbench1-specific entries in your configuration file, and remove them.\n\nh3. Check implications of Containers.MaximumPriceFactor 1.5\n\nWhen scheduling a container, Arvados now considers using instance types other than the lowest-cost type consistent with the container's resource constraints. If a larger instance is already running and idle, or the cloud provider reports that the optimal instance type is not currently available, Arvados will select a larger instance type, provided the cost does not exceed 1.5x the optimal instance type cost.\n\nThis will typically reduce overall latency for containers and reduce instance booting/shutdown overhead, but may increase costs depending on workload and instance availability. To avoid this behavior, configure @Containers.MaximumPriceFactor: 1.0@.\n\nh3. Synchronize keepstore and keep-balance upgrades\n\nThe internal communication between keepstore and keep-balance about read-only volumes has changed. After keep-balance is upgraded, old versions of keepstore will be treated as read-only. We recommend upgrading and restarting all keepstore services first, then upgrading and restarting keep-balance.\n\nh3. Separate configs for MaxConcurrentRequests and MaxConcurrentRailsRequests\n\nThe default configuration value @API.MaxConcurrentRequests@ (the number of concurrent requests that will be processed by a single instance of an arvados service process) is raised from 8 to 64.\n\nA new configuration key @API.MaxConcurrentRailsRequests@ (default 8) limits the number of concurrent requests processed by a RailsAPI service process.\n\nh2(#v2_7_0). v2.7.0 (2023-09-21)\n\n\"previous: Upgrading to 2.6.3\":#v2_6_3\n\nh3. New system for live container logs\n\nStarting with Arvados 2.7, a new system for fetching live container logs is in place.  This system features significantly reduced database load compared to previous releases.  When Workbench or another application needs to access the logs of a process (running or completed), they should use the \"log endpoint of container_requests\":{{ site.baseurl }}/api/methods/container_requests.html which forwards requests to the running container.  This supersedes the previous system where compute processes would send all of their logs to the database, which produced significant load.\n\nThe legacy logging system is now disabled by default for all installations with the setting @Containers.Logging.LimitLogBytesForJob: 0@.  If you have an existing Arvados installation where you have customized this value and do not need the legacy container logging system, we recommend removing @LimitLogBytesForJob@ from your configuration.\n\nIf you need to re-enable the legacy logging system, set @Containers.Logging.LimitLogBytesForJob@ to a positive value (the previous default was @Containers.Logging.LimitLogBytesForJob: 67108864@).\n\nh3. Workbench 1 deprecated\n\nThe original Arvados Workbench application (referred to as \"Workbench 1\") is deprecated and will be removed in a future major version of Arvados.  Users are advised to migrate to \"Workbench 2\".  Starting with this release, new installations of Arvados will only set up Workbench 2 and no longer include Workbench 1 by default.\n\nIt is also important to note that Workbench 1 only supports the legacy logging system, which is now disabled by default.  If you need to re-enable the legacy logging system, see above.\n\nh3. Multi-node installer's domain name configuration changes\n\nThe @domain_name@ variable at @terraform/vpc/terraform.tfvars@ and @DOMAIN@ variable at @local.params@ changed their meaning. In previous versions they were used in combination with @cluster_name@ and @CLUSTER@ to build the cluster's domain name (e.g.: @cluster_name@.@domain_name@). To allow the use of any arbitrary cluster domain, now we don't enforce using the cluster prefix as part of the domain, so @domain_name@ and @DOMAIN@ need to hold the entire domain for the given cluster.\nFor example, if @cluster_name@ is set to @\"xarv1\"@ and @domain_name@ was previously set to @\"example.com\"@, it should now be set to @\"xarv1.example.com\"@ to keep using the same cluster domain.\n\nh3. Crunchstat log format change\n\nThe reported number of CPUs available in a container is now formatted in @crunchstat.txt@ log files and @crunchstat-summary@ text reports as a floating-point number rather than an integer (@2.00 cpus@ rather than @2 cpus@). Programs that parse these files may need to be updated accordingly.\n\nh3. arvados-login-sync configuration changes, including ignored groups\n\nIn the @Users@ section of your cluster configuration, there are now several options to control what system resources are or are not managed by @arvados-login-sync@. These options all have names that begin with @Sync@.\n\nThe defaults for all of these options match the previous behavior of @arvados-login-sync@ _except_ for @SyncIgnoredGroups@. This list names groups that @arvados-login-sync@ will never modify by adding or removing members. As a security precaution, the default list names security-sensitive system groups on Debian- and Red Hat-based distributions. If you are using Arvados to manage system group membership on shell nodes, especially @sudo@ or @wheel@, you may want to provide your own list. Set @SyncIgnoredGroups: []@ to restore the original behavior of ignoring no groups.\n\nh3. API clients can always retrieve their current token, regardless of scopes\n\nWe have introduced a small exception to the previous behavior of \"Arvados API token scopes\":{{ site.baseurl }}/admin/scoped-tokens.html in this release. A valid token is now always allowed to issue a request to \"@GET /arvados/v1/api_client_authorizations/current@\":{{ site.baseurl }}/api/methods/api_client_authorizations.html#current regardless of its scopes. This allows clients to reliably determine whether a request failed because a token is invalid, or because the token is not permitted to perform a particular request. The API server itself needs to be able to do this to validate tokens issued by other clusters in a federation.\n\nh3. Deprecated/legacy APIs slated for removal\n\nThe legacy APIs \"humans\":https://doc.arvados.org/v2.7/api/methods/humans.html, \"specimens\":https://doc.arvados.org/v2.7/api/methods/specimens.html, \"traits\":https://doc.arvados.org/v2.7/api/methods/traits.html, \"jobs\":https://doc.arvados.org/v2.7/api/methods/jobs.html, \"job_tasks\":https://doc.arvados.org/v2.7/api/methods/job_tasks.html, \"pipeline_instances\":https://doc.arvados.org/v2.7/api/methods/pipeline_instances.html, \"pipeline_templates\":https://doc.arvados.org/v2.7/api/methods/pipeline_templates.html, \"nodes\":https://doc.arvados.org/v2.7/api/methods/nodes.html, \"repositories\":https://doc.arvados.org/v2.7/api/methods/repositories.html, and \"keep_disks\":https://doc.arvados.org/v2.7/api/methods/keep_disks.html are deprecated and will be removed in a future major version of Arvados.\n\nIn addition, the @default_owner_uuid@, @api_client_id@, and @user_id@ fields of \"api_client_authorizations\":../api/methods/api_client_authorizations.html are deprecated and will be removed from @api_client_authorization@ responses in a future major version of Arvados.  This should not affect clients as  @default_owner_uuid@ was never implemented, and @api_client_id@ and @user_id@ returned internal ids that were not meaningful or usable with any other API call.\n\nh3. UseAWSS3v2Driver option removed\n\nThe old \"v1\" S3 driver for keepstore has been removed. The new \"v2\" implementation, which has been the default since Arvados 2.5.0, is always used. The @Volumes.*.DriverParameters.UseAWSS3v2Driver@ configuration key is no longer recognized. If your config file uses it, remove it to avoid warning messages at startup.\n\nh2(#v2_6_3). v2.6.3 (2023-06-06)\n\nh3. Python SDK automatically retries failed requests much more\n\nThe Python SDK has always provided functionality to retry API requests that fail due to temporary problems like network failures, by passing @num_retries=N@ to a request's @execute()@ method. In this release, API client constructor functions like @arvados.api@ also accept a @num_retries@ argument. This value is stored on the client object and used as a floor for all API requests made with this client. This allows developers to set their preferred retry strategy once, without having to pass it to each @execute()@ call.\n\nThe default value for @num_retries@ in API constructor functions is 10. This means that an API request that repeatedly encounters temporary problems may spend up to about 35 minutes retrying in the worst case. We believe this is an appropriate default for most users, where eventual success is a much greater concern than responsiveness. If you have client applications where this is undesirable, update them to pass a lower @num_retries@ value to the constructor function. You can even pass @num_retries=0@ to have the API client act as it did before, like this:\n\n{% codeblock as python %}\nimport arvados\narv_client = arvados.api('v1', num_retries=0, ...)\n{% endcodeblock %}\n\nThe first time the Python SDK fetches an Arvados API discovery document, it will ensure that @googleapiclient.http@ logs are handled so you have a way to know about early problems that are being retried. If you prefer to handle these logs your own way, just ensure that the @googleapiclient.http@ logger (or a parent logger) has a handler installed before you call any Arvados API client constructor.\n\nh2(#v2_6_2). v2.6.2 (2023-05-22)\n\n\"previous: Upgrading to 2.6.1\":#v2_6_1\n\nThis version introduces a new API feature which is used by Workbench 2 to improve page loading performance.  To avoid any errors using the new Workbench with an old API server, be sure to upgrade the API server before upgrading Workbench 2.\n\nh2(#v2_6_1). v2.6.1 (2023-04-17)\n\n\"previous: Upgrading to 2.6.0\":#v2_6_0\n\nh3. Performance improvement for permission row de-duplication migration\n\nThe migration which de-duplicates permission links has been optimized.  We recommend upgrading from 2.5.0 directly to 2.6.1 in order to avoid the slow permission de-deplication migration in 2.6.0.\n\nYou should still plan for the arvados-api-server package upgrade to take longer than usual due to the database schema update changing the integer id column in each table from 32-bit to 64-bit.\n\nh2(#v2_6_0). v2.6.0 (2023-04-06)\n\n\"previous: Upgrading to 2.5.0\":#v2_5_0\n\nh3. WebDAV InternalURLs must be reachable from controller nodes\n\nEnsure your internal keep-web service addresses are listed in the @Services.WebDAV.InternalURLs@ section of your configuration file, and reachable from controller processes, as noted on the \"updated install page\":{{site.baseurl}}/admin/config-urls.html.\n\nh3. Slow migration on upgrade\n\nImportant!  This upgrade includes a database schema update changing the integer id column in each table from 32-bit to 64-bit.  Because it touches every row in the table, on moderate to large sized installations *this may be very slow* (on the order of hours). Plan for the arvados-api-server package upgrade to take longer than usual.\n\nh3. Default request concurrency, new limit on log requests\n\nThe configuration value @API.MaxConcurrentRequests@ (the number of concurrent requests that will be accepted by a single instance of arvados-controller) now has a default value of 64, instead of being unlimited.\n\nNew configuration value @API.LogCreateRequestFraction@ (default 0.50) limits requests that post live container logs to the API server, to avoid situations where log messages crowd out other more important requests.\n\nh3. New limit on concurrent workflows\n\nNew configuration options @CloudVMs.SupervisorFraction@ (default 0.30) limits the number of concurrent workflow supervisors, to avoid situations where too many workflow runners crowds out actual workers.\n\nh3. Default limit for cloud VM instances\n\nThere is a new configuration entry @CloudVMs.MaxInstances@ (default 64) that limits the number of VMs the cloud dispatcher will run at a time. This may need to be adjusted to suit your anticipated workload.\n\nUsing the obsolete configuration entry @MaxCloudVMs@, which was previously accepted in config files but not obeyed, will now result in a deprecation warning.\n\nh3. Default frequency for running keep-balance has changed\n\nThe frequency that @keep-balance@ will run (@Collections.BalancePeriod@) has been changed from every 10 minutes to every 6 hours.\n\nh2(#v2_5_0). v2.5.0 (2022-12-22)\n\n\"previous: Upgrading to 2.4.4\":#v2_4_4\n\nh3. Dispatchers require PostgreSQL database access\n\nAll dispatchers (cloud, LSF, and Slurm) now connect directly to the PostgreSQL database. Make sure these connections are supported by your network firewall rules, PostgreSQL connection settings, and PostgreSQL server configuration (in @pg_hba.conf@) as shown in the \"PostgreSQL install instructions\":{{site.baseurl}}/install/install-postgresql.html.\n\nh3. Google or OpenID Connect login restricted to trusted clients\n\nIf you use OpenID Connect or Google login, and your cluster serves as the @LoginCluster@ in a federation _or_ your users log in from a web application other than the Workbench1 and Workbench2 @ExternalURL@ addresses in your configuration file, the additional web application URLs (e.g., the other clusters' Workbench addresses) must be listed explicitly in @Login.TrustedClients@, otherwise login will fail. Previously, login would succeed with a less-privileged token.\n\nh3. New keepstore S3 driver enabled by default\n\nA more actively maintained S3 client library is now enabled by default for keeepstore services. The previous driver is still available for use in case of unknown issues. To use the old driver, set @DriverParameters.UseAWSS3v2Driver@ to @false@ on the appropriate @Volumes@ config entries.\n\nh3. Old container logs are automatically deleted from PostgreSQL\n\nCached copies of log entries from containers that finished more than 1 month ago are now deleted automatically (this only affects the \"live\" logs saved in the PostgreSQL database, not log collections saved in Keep). If you have an existing cron job that runs @rake db:delete_old_container_logs@, you can remove it. See configuration options @Containers.Logging.MaxAge@ and @Containers.Logging.SweepInterval@.\n\nh3. Fixed salt installer template file to support container shell access\n\nIf you manage your cluster using the salt installer, you may want to update it to the latest version, use the appropriate @config_examples@ subdirectory and re-reploy with your custom @local.params@ file so that the @arvados-controller@'s @nginx@ configuration file gets fixed.\n\nh3. Login-sync script requires configuration update on LoginCluster federations\n\nIf you have @arvados-login-sync@ running on a satellite cluster, please update the environment variable settings by removing the @LOGINCLUSTER_ARVADOS_API_*@ variables and setting @ARVADOS_API_TOKEN@ to a LoginCluster's admin token, as described on the \"updated install page\":{{site.baseurl}}/install/install-shell-server.html#arvados-login-sync.\n\nh3. Renamed keep-web metrics and WebDAV configs\n\nMetrics previously reported by keep-web (@arvados_keepweb_collectioncache_requests@, @..._hits@, @..._pdh_hits@, @..._api_calls@, @..._cached_manifests@, and @arvados_keepweb_sessions_cached_collection_bytes@) have been replaced with @arvados_keepweb_cached_session_bytes@.\n\nThe config entries @Collections.WebDAVCache.UUIDTTL@, @...MaxCollectionEntries@, and @...MaxUUIDEntries@ are no longer used, and should be removed from your config file.\n\nh2(#v2_4_4). v2.4.4 (2022-11-18)\n\n\"previous: Upgrading to 2.4.3\":#v2_4_3\n\nThis update only consists of improvements to @arvados-cwl-runner@.  There are no changes to backend services.\n\nh2(#v2_4_3). v2.4.3 (2022-09-21)\n\n\"previous: Upgrading to 2.4.2\":#v2_4_2\n\nh3. Fixed PAM authentication security vulnerability\n\nIn Arvados 2.4.2 and earlier, when using PAM authentication, if a user presented valid credentials but the account is disabled or otherwise not allowed to access the host, it would still be accepted for access to Arvados.  From 2.4.3 onwards, Arvados now also checks that the account is permitted to access the host before completing the PAM login process.\n\nOther authentication methods (LDAP, OpenID Connect) are not affected by this flaw.\n\nh2(#v2_4_2). v2.4.2 (2022-08-09)\n\n\"previous: Upgrading to 2.4.1\":#v2_4_1\n\nh3. GHSL-2022-063\n\nGitHub Security Lab (GHSL) reported a remote code execution (RCE) vulnerability in the Arvados Workbench that allows authenticated attackers to execute arbitrary code via specially crafted JSON payloads.\n\nThis vulnerability is fixed in 2.4.2 (\"#19316\":https://dev.arvados.org/issues/19316).\n\nIt is likely that this vulnerability exists in all versions of Arvados up to 2.4.1.\n\nThis vulnerability is specific to the Ruby on Rails Workbench application (\"Workbench 1\").  We do not believe any other Arvados components, including the TypesScript browser-based Workbench application (\"Workbench 2\") or API Server, are vulnerable to this attack.\n\nh3. CVE-2022-31163 and CVE-2022-32224\n\nAs a precaution, Arvados 2.4.2 has includes security updates for Ruby on Rails and the TZInfo Ruby gem.  However, there are no known exploits in Arvados based on these CVEs.\n\nh3. Disable Sharing URLs UI\n\nThere is now a configuration option @Workbench.DisableSharingURLsUI@ for admins to disable the user interface for \"sharing link\" feature (URLs which can be sent to users to access the data in a specific collection in Arvados without an Arvados account), for organizations where sharing links violate their data sharing policy.\n\nh2(#v2_4_1). v2.4.1 (2022-06-02)\n\n\"previous: Upgrading to 2.4.0\":#v2_4_0\n\nh3. Slurm dispatcher requires configuration update\n\nIf you use the Slurm dispatcher (@crunch-dispatch-slurm@) you must add a @Services.DispatchSLURM.InternalURLs@ section to your configuration file, as shown on the \"updated install page\":{{site.baseurl}}/install/crunch2-slurm/install-dispatch.html.\n\nh3. New proxy parameters for arvados-controller\n\nWe now recommend disabling nginx proxy caching for arvados-controller, to avoid truncation of large responses.\n\nIn your Nginx configuration file (@/etc/nginx/conf.d/arvados-api-and-controller.conf@), add the following lines to the @location /@ block with @http://controller@ (see \"Update nginx configuration\":{{site.baseurl}}/install/install-api-server.html#update-nginx for an example) and reload/restart Nginx (@sudo nginx -s reload@).\n\n<pre>\n    proxy_max_temp_file_size 0;\n    proxy_request_buffering  off;\n    proxy_buffering          off;\n    proxy_http_version       1.1;\n</pre>\n\nh3. Now recommending Singularity 3.9.9\n\nThe compute image \"build script\":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html now installs Singularity 3.9.9 instead of 3.7.4. The newer version includes a bugfix that should resolve \"intermittent loopback device errors\":https://dev.arvados.org/issues/18489 when running containers.\n\nh3. Changes to @arvados-cwl-runner --create-workflow@ and @--update-workflow@\n\nWhen using @arvados-cwl-runner --create-workflow@ or @--update-workflow@, by default it will now make a copy of all collection and Docker image dependencies in the target project.  Running workflows retains the old behavior (use the dependencies wherever they are found).  The can be controlled explicit with @--copy-deps@ and @--no-copy-deps@.\n\nh2(#v2_4_0). v2.4.0 (2022-04-08)\n\n\"previous: Upgrading to 2.3.1\":#v2_3_1\n\nh3. Default result order changed\n\nWhen requesting a list of objects without an explicit @order@ parameter, the default order has changed from @modified_at desc, uuid asc@ to @modified_at desc, uuid desc@.  This means that if two objects have identical @modified_at@ timestamps, the tiebreaker will now be based on @uuid@ in decending order where previously it would be ascending order. The practical effect of this should be minor; with microsecond precision it is unusual to have two records with exactly the same timestamp, and order-sensitive queries should already provide an explicit @order@ parameter.\n\nh3. Ubuntu 18.04 Arvados Python packages now depend on python-3.8\n\nUbuntu 18.04 ships with Python 3.6 as the default version of Python 3. Ubuntu also ships a version of Python 3.8, and the Arvados Python packages (@python3-arvados-cwl-runner@, @python3-arvados-fuse@, @python3-arvados-python-client@, @python3-arvados-user-activity@ and @python3-crunchstat-summary@) now depend on the @python-3.8@ system package.\n\nThis means that they are now installed under @/usr/share/python3.8@ (before, the path was @/usr/share/python3@). If you rely on the @python3@ executable from the packages (e.g. to load a virtualenv), you may need to update the path to that executable.\n\nh3. Minimum supported Ruby version is now 2.6\n\nThe minimum supported Ruby version is now 2.6.  If you are running Arvados on Debian 10 or Ubuntu 18.04, you may need to switch to using RVM or upgrade your OS.  See \"Install Ruby and Bundler\":../install/ruby.html for more information.\n\nh3. Anonymous token changes\n\nThe anonymous token configured in @Users.AnonymousUserToken@ must now be 32 characters or longer. This was already the suggestion in the documentation, now it is enforced. The @script/get_anonymous_user_token.rb@ script that was needed to register the anonymous user token in the database has been removed. Registration of the anonymous token is no longer necessary.\n\nh3. Preemptible instance support changes\n\nThe @Containers.UsePreemptibleInstances@ option has been renamed to @Containers.AlwaysUsePreemptibleInstances@ and has the same behavior when @true@ and one or more preemptible instances are configured.  However, a value of @false@ no longer disables support for preemptible instances, instead users can now enable use of preemptible instances at the level of an individual workflow or workflow step.\n\nIn addition, there is a new configuration option @Containers.PreemptiblePriceFactor@ will automatically add a preemptible instance type corresponding to each regular instance type.  See \"Using Preemptible instances\":spot-instances.html for details.\n\nh3. Default LSF arguments have changed\n\nIf you use LSF and your configuration specifies @Containers.LSF.BsubArgumentsList@, you should update it to include the new arguments (@\"-R\", \"select[mem>=%MMB]\", ...@, see \"configuration reference\":{{site.baseurl}}/admin/config.html). Otherwise, containers that are too big to run on any LSF host will remain in the LSF queue instead of being cancelled.\n\nh3. Support for NVIDIA CUDA GPUs\n\nArvados now supports requesting NVIDIA CUDA GPUs for cloud and LSF (Slurm is currently not supported).  To be able to request GPU nodes, some additional configuration is needed:\n\n\"Including GPU support in cloud compute node image\":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html#nvidia\n\n\"Configure cloud dispatcher for GPU support\":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html#GPUsupport\n\n\"LSF GPU configuration\":{{site.baseurl}}/install/crunch2-lsf/install-dispatch.html\n\nh3. Role groups are visible to all users by default\n\nThe permission model has changed such that all role groups are visible to all active users. This enables users to share objects with groups they don't belong to. To preserve the previous behavior, where role groups are only visible to members and admins, add @RoleGroupsVisibleToAll: false@ to the @Users@ section of your configuration file.\n\nh3. Previously trashed role groups will be deleted\n\nDue to a bug in previous versions, the @DELETE@ operation on a role group caused the group to be flagged as trash in the database, but continue to grant permissions regardless. After upgrading, any role groups that had been trashed this way will be deleted. This might surprise some users if they were relying on permissions that were still in effect due to this bug. Future @DELETE@ operations on a role group will immediately delete the group and revoke the associated permissions.\n\nh3. Dedicated keepstore process for each container\n\nWhen Arvados runs a container via @arvados-dispatch-cloud@, the @crunch-run@ supervisor process now brings up its own keepstore server to handle I/O for mounted collections, outputs, and logs. With the default configuration, the keepstore process allocates one 64 MiB block buffer per VCPU requested by the container. For most workloads this will increase throughput, reduce total network traffic, and make it possible to run more containers at once without provisioning additional keepstore nodes to handle the I/O load.\n* If you have containers that can effectively handle multiple I/O threads per VCPU, consider increasing the @Containers.LocalKeepBlobBuffersPerVCPU@ value.\n* If you already have a robust permanent keepstore infrastructure, you can set @Containers.LocalKeepBlobBuffersPerVCPU@ to 0 to disable this feature and preserve the previous behavior of sending container I/O traffic to your separately provisioned keepstore servers.\n* This feature is enabled only if no volumes use @AccessViaHosts@, and no volumes have underlying @Replication@ less than @Collections.DefaultReplication@. If the feature is configured but cannot be enabled due to an incompatible volume configuration, this will be noted in the @crunch-run.txt@ file in the container log.\n\nh2(#v2_3_1). v2.3.1 (2021-11-24)\n\n\"previous: Upgrading to 2.3.0\":#v2_3_0\n\nh3. Users are visible to other users by default\n\nWhen a new user is set up (either via @AutoSetupNewUsers@ config or via Workbench admin interface) the user immediately becomes visible to other users. To revert to the previous behavior, where the administrator must add two users to the same group using the Workbench admin interface in order for the users to see each other, change the new @Users.ActivatedUsersAreVisibleToOthers@ config to @false@.\n\nh3. Backend support for vocabulary checking\n\nIf your installation uses the vocabulary feature on Workbench2, you will need to update the cluster configuration by moving the vocabulary definition file to the node where @controller@ runs, and set the @API.VocabularyPath@ configuration parameter to the local path where the file was placed.\nThis will enable the vocabulary checking cluster-wide, including Workbench2. The @Workbench.VocabularyURL@ configuration parameter is deprecated and will be removed in a future release.\nYou can read more about how this feature works on the \"admin page\":{{site.baseurl}}/admin/metadata-vocabulary.html.\n\nh2(#v2_3_0). v2.3.0 (2021-10-27)\n\n\"previous: Upgrading to 2.2.0\":#v2_2_0\n\nh3. Ubuntu 18.04 packages for arvados-api-server and arvados-workbench now conflict with ruby-bundler\n\nUbuntu 18.04 ships with Bundler version 1.16.1, which is no longer compatible with the Gemfiles in the Arvados packages (made with Bundler 2.2.19). The Ubuntu 18.04 packages for arvados-api-server and arvados-workbench now conflict with the ruby-bundler package to work around this issue. The post-install scripts for arvados-api-server and arvados-workbench install the proper version of Bundler as a gem.\n\nh3. Removed unused @update_uuid@ endpoint for users.\n\nThe @update_uuid@ endpoint was superseded by the \"link accounts feature\":{{site.baseurl}}/admin/link-accounts.html, so it's no longer available.\n\nh3. Removed deprecated '@@' search operator\n\nThe '@@' full text search operator, previously deprecated, has been removed. To perform a string search across multiple columns, use the 'ilike' operator on 'any' column as described in the \"available list method filter section\":{{site.baseurl}}/api/methods.html#substringsearchfilter of the API documentation.\n\nh3. Storage classes must be defined explicitly\n\nIf your configuration uses the StorageClasses attribute on any Keep volumes, you must add a new @StorageClasses@ section that lists all of your storage classes. Refer to the updated documentation about \"configuring storage classes\":{{site.baseurl}}/admin/storage-classes.html for details.\n\nh3. keep-balance requires access to PostgreSQL\n\nMake sure the keep-balance process can connect to your PostgreSQL server using the settings in your config file. (In previous versions, keep-balance accessed the database through controller instead of connecting to the database server directly.)\n\nh3. crunch-dispatch-local now requires config.yml\n\nThe @crunch-dispatch-local@ dispatcher now reads the API host and token from the system wide @/etc/arvados/config.yml@ .  It will fail to start that file is not found or not readable.\n\nh3. Multi-file docker image collections\n\nTypically a docker image collection contains a single @.tar@ file at the top level. Handling of atypical cases has changed. If a docker image collection contains files with extensions other than @.tar@, they will be ignored (previously they could cause errors). If a docker image collection contains multiple @.tar@ files, it will cause an error at runtime, \"cannot choose from multiple tar files in image collection\" (previously one of the @.tar@ files was selected). Subdirectories are ignored. The @arv keep docker@ command always creates a collection with a single @.tar@ file, and never uses subdirectories, so this change will not affect most users.\n\nh2(#v2_2_0). v2.2.0 (2021-06-03)\n\n\"previous: Upgrading to 2.1.0\":#v2_1_0\n\nh3. New spelling of S3 credential configs\n\nIf you use the S3 driver for Keep volumes and specify credentials in your configuration file (as opposed to using an IAM role), you should change the spelling of the @AccessKey@ and @SecretKey@ config keys to @AccessKeyID@ and @SecretAccessKey@. If you don't update them, the previous spellings will still be accepted, but warnings will be logged at server startup.\n\nh3. New proxy parameters for arvados-controller\n\nIn your Nginx configuration file (@/etc/nginx/conf.d/arvados-api-and-controller.conf@), add the following lines to the @location /@ block with @http://controller@ (see \"Update nginx configuration\":{{site.baseurl}}/install/install-api-server.html#update-nginx for an example) and reload/restart Nginx (@sudo nginx -s reload@).\n\n<pre>\n    proxy_set_header      Upgrade           $http_upgrade;\n    proxy_set_header      Connection        \"upgrade\";\n</pre>\n\nh3. Changes on the collection's @preserve_version@ attribute semantics\n\nThe @preserve_version@ attribute on collections was originally designed to allow clients to persist a preexisting collection version. This forced clients to make 2 requests if the intention is to \"make this set of changes in a new version that will be kept\", so we have changed the semantics to do just that: When passing @preserve_version=true@ along with other collection updates, the current version is persisted and also the newly created one will be persisted on the next update.\n\nh3. System token requirements\n\nSystem services now log a warning at startup if any of the system tokens (@ManagementToken@, @SystemRootToken@, and @Collections.BlobSigningKey@) are less than 32 characters, or contain characters other than a-z, A-Z, and 0-9. After upgrading, run @arvados-server config-check@ and update your configuration file if needed to resolve any warnings.\n\nThe @API.RailsSessionSecretToken@ configuration key has been removed. Delete this entry from your configuration file after upgrading.\n\nh3. Centos7 Python 3 dependency upgraded to python3\n\nNow that Python 3 is part of the base repository in CentOS 7, the Python 3 dependency for Centos7 Arvados packages was changed from SCL rh-python36 to python3.\n\nh3. ForceLegacyAPI14 option removed\n\nThe ForceLegacyAPI14 configuration option has been removed. In the unlikely event it is mentioned in your config file, remove it to avoid \"deprecated/unknown config\" warning logs.\n\nh2(#v2_1_0). v2.1.0 (2020-10-13)\n\n\"previous: Upgrading to 2.0.0\":#v2_0_0\n\nh3. LoginCluster conflicts with other Login providers\n\nA satellite cluster that delegates its user login to a central user database must only have `Login.LoginCluster` set, or it will return an error.  This is a change in behavior, previously it would return an error if another login provider was _not_ configured, even though the provider would never be used.\n\nh3. Minimum supported Python version is now 3.5\n\nWe no longer publish Python 2 based distribution packages for our Python components. There are equivalent packages based on Python 3, but their names are slightly different. If you were using the Python 2 based packages, you can install the Python 3 based package for a drop in replacement. On Debian and Ubuntu:\n\n<pre>\n    apt remove python-arvados-fuse && apt install python3-arvados-fuse\n    apt remove python-arvados-python-client && apt install python3-arvados-python-client\n    apt remove python-arvados-cwl-runner && apt install python3-arvados-cwl-runner\n    apt remove python-crunchstat-summary && apt install python3-crunchstat-summary\n    apt remove python-cwltest && apt install python3-cwltest\n</pre>\n\nOn CentOS:\n\n<pre>\n    yum remove python-arvados-fuse && yum install python3-arvados-fuse\n    yum remove python-arvados-python-client && yum install python3-arvados-python-client\n    yum remove python-arvados-cwl-runner && yum install python3-arvados-cwl-runner\n    yum remove python-crunchstat-summary && yum install python3-crunchstat-summary\n    yum remove python-cwltest && yum install python3-cwltest\n</pre>\n\nh3. Minimum supported Ruby version is now 2.5\n\nThe minimum supported Ruby version is now 2.5.  If you are running Arvados on Debian 9 or Ubuntu 16.04, you may need to switch to using RVM or upgrade your OS.  See \"Install Ruby and Bundler\":../install/ruby.html for more information.\n\nh3. Removing libpam-arvados, replaced with libpam-arvados-go\n\nThe Python-based PAM package has been replaced with a version written in Go. See \"using PAM for authentication\":{{site.baseurl}}/install/setup-login.html#pam for details.\n\nh3. Removing sso-provider\n\nThe SSO (single sign-on) component is deprecated and will not be supported in future releases. Existing configurations will continue to work in this release, but you should switch to one of the built-in authentication mechanisms as soon as possible. See \"setting up web based login\":{{site.baseurl}}/install/setup-login.html for details.\n\nAfter migrating your configuration, uninstall the @arvados-sso-provider@ package.\n\nh3. S3 signatures\n\nKeepstore now uses \"V4 signatures\":https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html by default for S3 requests. If you are using Amazon S3, no action is needed; all regions support V4 signatures. If you are using a different S3-compatible service that does not support V4 signatures, add @V2Signature: true@ to your volume driver parameters to preserve the old behavior. See \"configuring S3 object storage\":{{site.baseurl}}/install/configure-s3-object-storage.html.\n\nh3. New permission system constraints\n\nSome constraints on the permission system have been added, in particular @role@ and @project@ group types now have distinct behavior. These constraints were already de-facto imposed by the Workbench UI, so on most installations the only effect of this migration will be to reassign @role@ groups to the system user and create a @can_manage@ permission link for the previous owner.\n\n# The @group_class@ field must be either @role@ or @project@. Invalid group_class are migrated to @role@.\n# A @role@ cannot own things. Anything owned by a role is migrated to a @can_manage@ link and reassigned to the system user.\n# Only @role@ and @user@ can have outgoing permission links. Permission links originating from projects are deleted by the migration.\n# A @role@ is always owned by the system_user. When a group is created, it creates a @can_manage@ link for the object that would have been assigned to @owner_uuid@.  Migration adds @can_manage@ links and reassigns roles to the system user.  This also has the effect of requiring that all @role@ groups have unique names on the system.  If there is a name collision during migration, roles will be renamed to ensure they are unique.\n# A permission link can have the permission level (@name@) updated but not @head_uuid@, @tail_uuid@ or @link_class@.\n\nThe @arvados-sync-groups@ tool has been updated to reflect these constraints, so it is important to use the version of @arvados-sync-groups@ that matches the API server version.\n\nBefore upgrading, use the following commands to find out which groups and permissions in your database will be automatically modified or deleted during the upgrade.\n\nTo determine which groups have invalid @group_class@ (these will be migrated to @role@ groups):\n\n<pre>\narv group list --filters '[[\"group_class\", \"not in\", [\"project\", \"role\"]]]'\n</pre>\n\nTo list all @role@ groups, which will be reassigned to the system user (unless @owner_uuid@ is already the system user):\n\n<pre>\narv group list --filters '[[\"group_class\", \"=\", \"role\"]]'\n</pre>\n\nTo list which @project@ groups have outgoing permission links (such links are now invalid and will be deleted by the migration):\n\n<pre>\nfor uuid in $(arv link list --filters '[[\"link_class\", \"=\", \"permission\"], [\"tail_uuid\", \"like\", \"%-j7d0g-%\"]]' |\n              jq -r .items[].tail_uuid | sort | uniq) ; do\n   arv group list --filters '[[\"group_class\", \"=\", \"project\"], [\"uuid\", \"=\", \"'$uuid'\"]]' | jq .items\ndone\n</pre>\n\nh4. \"Public favorites\" moved to their own project\n\nAs a side effect of new permission system constraints, \"star\" links (indicating shortcuts in Workbench) that were previously owned by \"All users\" (which is now a \"role\" and cannot own things) will be migrated to a new system project called \"Public favorites\" which is readable by the \"Anonymous users\" role.\n\nh2(#v2_0_0). v2.0.0 (2020-02-07)\n\n\"previous: Upgrading to 1.4.1\":#v1_4_1\n\nArvados 2.0 is a major upgrade, with many changes.  Please read these upgrade notes carefully before you begin.\n\nh3. Migrating to centralized config.yml\n\nSee \"Migrating Configuration\":https://doc.arvados.org/v2.1/admin/config-migration.html for notes on migrating legacy per-component configuration files to the new centralized @/etc/arvados/config.yml@.\n\nTo ensure a smooth transition, the per-component config files continue to be read, and take precedence over the centralized configuration.  Your cluster should continue to function after upgrade but before doing the full configuration migration.  However, several services (keepstore, keep-web, keepproxy) require a minimal `/etc/arvados/config.yml` to start:\n\n<pre>\nClusters:\n  zzzzz:\n    Services:\n      Controller:\n        ExternalURL: \"https://zzzzz.example.com\"\n</pre>\n\nh3. Keep-balance configuration migration\n\n(feature \"#14714\":https://dev.arvados.org/issues/14714 ) The keep-balance service can now be configured using the centralized configuration file at @/etc/arvados/config.yml@. The following command line and configuration options have changed.\n\nYou can no longer specify types of keep services to balance via the @KeepServiceTypes@ config option in the legacy config at @/etc/arvados/keep-balance/keep-balance.yml@. If you are still using the legacy config and @KeepServiceTypes@ has a value other than \"disk\", keep-balance will produce an error.\n\nYou can no longer specify individual keep services to balance via the @config.KeepServiceList@ command line option or @KeepServiceList@ legacy config option. Instead, keep-balance will operate on all keepstore servers with @service_type:disk@ as reported by the @arv keep_service list@ command. If you are still using the legacy config, @KeepServiceList@ should be removed or keep-balance will produce an error.\n\nPlease see the \"config migration guide\":https://doc.arvados.org/v2.1/admin/config-migration.html and \"keep-balance install guide\":{{site.baseurl}}/install/install-keep-balance.html for more details.\n\nh3. Arv-git-httpd configuration migration\n\n(feature \"#14712\":https://dev.arvados.org/issues/14712 ) The arv-git-httpd package can now be configured using the centralized configuration file at @/etc/arvados/config.yml@. Configuration via individual command line arguments is no longer available. Please see \"arv-git-httpd's config migration guide\":https://doc.arvados.org/v2.1/admin/config-migration.html#arv-git-httpd for more details.\n\nh3. Keepstore and keep-web configuration migration\n\nkeepstore and keep-web no longer support configuration via (previously deprecated) command line configuration flags and environment variables.\n\nkeep-web now supports the legacy @keep-web.yml@ config format (used by Arvados 1.4) and the new cluster config file format. Please check \"keep-web's install guide\":{{site.baseurl}}/install/install-keep-web.html for more details.\n\nkeepstore now supports the legacy @keepstore.yml@ config format (used by Arvados 1.4) and the new cluster config file format. Please check the \"keepstore config migration notes\":https://doc.arvados.org/v2.1/admin/config-migration.html#keepstore and \"keepstore install guide\":{{site.baseurl}}/install/install-keepstore.html for more details.\n\nh3. Keepproxy configuration migration\n\n(feature \"#14715\":https://dev.arvados.org/issues/14715 ) Keepproxy can now be configured using the centralized config at @/etc/arvados/config.yml@. Configuration via individual command line arguments is no longer available and the @DisableGet@, @DisablePut@, and @PIDFile@ configuration options are no longer supported. If you are still using the legacy config and @DisableGet@ or @DisablePut@ are set to true or @PIDFile@ has a value, keepproxy will produce an error and fail to start. Please see \"keepproxy's config migration guide\":https://doc.arvados.org/v2.1/admin/config-migration.html#keepproxy for more details.\n\nh3. Delete \"keep_services\" records\n\nAfter all keepproxy and keepstore configurations have been migrated to the centralized configuration file, all keep_services records you added manually during installation should be removed. System logs from keepstore and keepproxy at startup, as well as the output of @arvados-server config-check@, will remind you to do this.\n\n<notextile><pre><code>$ export ARVADOS_API_HOST=...\n$ export ARVADOS_API_TOKEN=...\n$ arv --format=uuid keep_service list | xargs -n1 arv keep_service delete --uuid\n</code></pre></notextile>\n\nOnce these old records are removed, @arv keep_service list@ will instead return the services listed under Services/Keepstore/InternalURLs and Services/Keepproxy/ExternalURL in your centralized configuration file.\n\nh3. Enabling Postgres trigram indexes\n\nFeature \"#15106\":https://dev.arvados.org/issues/15106 improves the speed and functionality of full text search by introducing trigram indexes on text searchable database columns via a migration. Prior to updating, you must first install the postgresql-contrib package on your system and subsequently run the <code class=\"userprint\">CREATE EXTENSION pg_trgm</code> SQL command on the arvados_production database as a postgres superuser.\n\nThe \"postgres-contrib package\":https://www.postgresql.org/docs/10/contrib.html has been supported since PostgreSQL version 9.4. The version of the contrib package should match the version of your PostgreSQL installation. Using 9.5 as an example, the package can be installed and the extension enabled using the following:\n\n<strong>Centos 7</strong>\n<notextile>\n<pre><code>~$ <span class=\"userinput\">sudo yum install -y postgresql95-contrib</span>\n~$ <span class=\"userinput\">su - postgres -c \"psql -d 'arvados_production' -c 'CREATE EXTENSION IF NOT EXISTS pg_trgm'\"</span>\n</code></pre>\n</notextile>\n\n<strong>RHEL 7</strong>\n<notextile>\n<pre><code>~$ <span class=\"userinput\">sudo yum install -y rh-postgresql95-postgresql-contrib</span>\n~$ <span class=\"userinput\">su - postgres -c \"psql -d 'arvados_production' -c 'CREATE EXTENSION IF NOT EXISTS pg_trgm'\"</span>\n</code></pre>\n</notextile>\n\n<strong>Debian or Ubuntu</strong>\n<notextile>\n<pre><code>~$ <span class=\"userinput\">sudo apt-get install -y postgresql-contrib-9.5</span>\n~$ <span class=\"userinput\">sudo -u postgres psql -d 'arvados_production' -c 'CREATE EXTENSION IF NOT EXISTS pg_trgm'</span>\n</code></pre>\n</notextile>\n\nSubsequently, the <code class=\"userinput\">psql -d 'arvados_production' -c '\\dx'</code> command will display the installed extensions for the arvados_production database. This list should now contain @pg_trgm@.\n\nh3. New Workbench 2\n\nWorkbench 2 is now ready for regular use.  Follow the instructions to \"install workbench 2\":../install/install-workbench2-app.html\n\nh3. New property vocabulary format for Workbench2\n\n(feature \"#14151\":https://dev.arvados.org/issues/14151) Workbench2 supports a new vocabulary format and it isn't compatible with the previous one, please read the \"metadata vocabulary format admin page\":{{site.baseurl}}/admin/metadata-vocabulary.html for more information.\n\nh3. Cloud installations only: node manager replaced by arvados-dispatch-cloud\n\nNode manager is deprecated and replaced by @arvados-dispatch-cloud@.  No automated config migration is available.  Follow the instructions to \"install the cloud dispatcher\":../install/crunch2-cloud/install-dispatch-cloud.html\n\n*Only one dispatch process should be running at a time.* If you are migrating a system that currently runs Node manager and @crunch-dispatch-slurm@, it is safest to remove the @crunch-dispatch-slurm@ service entirely before installing @arvados-dispatch-cloud@.\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">sudo systemctl --now disable crunch-dispatch-slurm</span>\n~$ <span class=\"userinput\">sudo apt-get remove crunch-dispatch-slurm</span>\n</code></pre>\n</notextile>\n\nh3. Jobs API is read-only\n\n(task \"#15133\":https://dev.arvados.org/issues/15133 ) The legacy 'jobs' API is now read-only.  It has been superceded since Arvados 1.1 by containers / container_requests (aka crunch v2).  Arvados installations since the end of 2017 (v1.1.0) have probably only used containers, and are unaffected by this change.\n\nSo that older Arvados sites don't lose access to legacy records, the API has been converted to read-only.  Creating and updating jobs (and related types job_task, pipeline_template and pipeline_instance) is disabled and much of the business logic related has been removed, along with various other code specific to the jobs API.  Specifically, the following programs associated with the jobs API have been removed: @crunch-dispatch.rb@, @crunch-job@, @crunchrunner@, @arv-run-pipeline-instance@, @arv-run@.\n\nh3. \"/\" prohibited in collection and project names\n\n(issue \"#15836\":https://dev.arvados.org/issues/15836) By default, Arvados now rejects new names containing the @/@ character when creating or renaming collections and projects. Previously, these names were permitted, but the resulting objects were invisible in the WebDAV \"home\" tree. If you prefer, you can restore the previous behavior, and optionally configure a substitution string to make the affected objects accessible via WebDAV. See @ForwardSlashNameSubstitution@ in the \"configuration reference\":config.html.\n\nh3. No longer stripping ':' from strings in serialized database columns\n\n(bug \"#15311\":https://dev.arvados.org/issues/15311 ) Strings read from serialized columns in the database with a leading ':' would have the ':' stripped after loading the record.  This behavior existed due to legacy serialization behavior which stored Ruby symbols with a leading ':'.  Unfortunately this corrupted fields where the leading \":\" was intentional.  This behavior has been removed.\n\nYou can test if any records in your database are affected by going to the API server directory and running @bundle exec rake symbols:check@.  This will report which records contain fields with a leading ':' that would previously have been stripped.  If there are records to be updated, you can update the database using @bundle exec rake symbols:stringify@.\n\nh3. Scoped tokens should use PATCH for updates\n\nThe API server accepts both PUT and PATCH for updates, but they will be normalized to PATCH by arvados-controller.  Scoped tokens should be updated accordingly.\n\n\n\nh2(#v1_4_1). v1.4.1 (2019-09-20)\n\n\"previous: Upgrading to 1.4.0\":#v1_4_0\n\nh3. Centos7 Python 3 dependency upgraded to rh-python36\n\nThe Python 3 dependency for Centos7 Arvados packages was upgraded from rh-python35 to rh-python36.\n\nh2(#v1_4_0). v1.4.0 (2019-06-05)\n\n\"previous: Upgrading to 1.3.3\":#v1_3_3\n\nh3. Populating the new file_count and file_size_total columns on the collections table\n\nAs part of story \"#14484\":https://dev.arvados.org/issues/14484, two new columns were added to the collections table in a database migration. If your installation has a large collections table, this migration may take some time. We've seen it take ~5 minutes on an installation with 250k collections, but your mileage may vary.\n\nThe new columns are initialized with a zero value. In order to populate them, it is necessary to run a script called <code class=\"userinput\">populate-file-info-columns-in-collections.rb</code> from the scripts directory of the API server. This can be done out of band, ideally directly after the API server has been upgraded to v1.4.0.\n\nh3. Stricter collection manifest validation on the API server\n\nAs a consequence of \"#14482\":https://dev.arvados.org/issues/14482, the Ruby SDK does a more rigorous collection manifest validation. Collections created after 2015-05 are unlikely to be invalid, however you may check for invalid manifests using the script below.\n\nYou could set up a new rvm gemset and install the specific arvados gem for testing, like so:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">rvm gemset create rubysdk-test</span>\n~$ <span class=\"userinput\">rvm gemset use rubysdk-test</span>\n~$ <span class=\"userinput\">gem install arvados -v 1.3.1.20190301212059</span>\n</code></pre>\n</notextile>\n\nNext, you can run the following script using admin credentials, it will scan the whole collection database and report any collection that didn't pass the check:\n\n{% codeblock as ruby %}\nrequire 'arvados'\nrequire 'arvados/keep'\n\napi = Arvados.new\noffset = 0\nbatch_size = 100\ninvalid = []\n\nwhile true\n    begin\n        req = api.collection.index(\n            :select => [:uuid, :created_at, :manifest_text],\n            :include_trash => true, :include_old_versions => true,\n            :limit => batch_size, :offset => offset)\n    rescue\n        invalid.each {|c| puts \"#{c[:uuid]} (Created at #{c[:created_at]}): #{c[:error]}\" }\n        raise\n    end\n\n    req[:items].each do |col|\n        begin\n            Keep::Manifest.validate! col[:manifest_text]\n        rescue Exception => e\n            puts \"Collection #{col[:uuid]} manifest not valid\"\n            invalid << {uuid: col[:uuid], error: e, created_at: col[:created_at]}\n        end\n    end\n    puts \"Checked #{offset} / #{req[:items_available]} - Invalid: #{invalid.size}\"\n    offset += req[:limit]\n    break if offset > req[:items_available]\nend\n\nif invalid.empty?\n    puts \"No invalid collection manifests found\"\nelse\n    invalid.each {|c| puts \"#{c[:uuid]} (Created at #{c[:created_at]}): #{c[:error]}\" }\nend\n{% endcodeblock %}\n\nThe script will return a final report enumerating any invalid collection by UUID, with its creation date and error message so you can take the proper correction measures, if needed.\n\nh3. Python packaging change\n\nAs part of story \"#9945\":https://dev.arvados.org/issues/9945, the distribution packaging (deb/rpm) of our Python packages has changed. These packages now include a built-in virtualenv to reduce dependencies on system packages. We have also stopped packaging and publishing backports for all the Python dependencies of our packages, as they are no longer needed.\n\nOne practical consequence of this change is that the use of the Arvados Python SDK (aka \"import arvados\") will require a tweak if the SDK was installed from a distribution package. It now requires the loading of the virtualenv environment from our packages. The \"Install documentation for the Arvados Python SDK\":{{ site.baseurl }}/sdk/python/sdk-python.html reflects this change. This does not affect the use of the command line tools (e.g. arv-get, etc.).\n\nPython scripts that rely on the distribution Arvados Python SDK packages to import the Arvados SDK will need to be tweaked to load the correct Python environment.\n\nThis can be done by activating the virtualenv outside of the script:\n\n<notextile>\n<pre>~$ <code class=\"userinput\">source /usr/share/python2.7/dist/python-arvados-python-client/bin/activate</code>\n(python-arvados-python-client) ~$ <code class=\"userinput\">path-to-the-python-script</code>\n</pre>\n</notextile>\n\nOr alternatively, by updating the shebang line at the start of the script to:\n\n<notextile>\n<pre>\n#!/usr/share/python2.7/dist/python-arvados-python-client/bin/python\n</pre>\n</notextile>\n\nh3. python-arvados-cwl-runner deb/rpm package now conflicts with python-cwltool deb/rpm package\n\nAs part of story \"#9945\":https://dev.arvados.org/issues/9945, the distribution packaging (deb/rpm) of our Python packages has changed. The python-arvados-cwl-runner package now includes a version of cwltool. If present, the python-cwltool and cwltool distribution packages will need to be uninstalled before the python-arvados-cwl-runner deb or rpm package can be installed.\n\nh3. Centos7 Python 3 dependency upgraded to rh-python35\n\nAs part of story \"#9945\":https://dev.arvados.org/issues/9945, the Python 3 dependency for Centos7 Arvados packages was upgraded from SCL python33 to rh-python35.\n\nh3. Centos7 package for libpam-arvados depends on the python-pam package, which is available from EPEL\n\nAs part of story \"#9945\":https://dev.arvados.org/issues/9945, it was discovered that the Centos7 package for libpam-arvados was missing a dependency on the python-pam package, which is available from the EPEL repository. The dependency has been added to the libpam-arvados package. This means that going forward, the EPEL repository will need to be enabled to install libpam-arvados on Centos7.\n\nh3. New configuration\n\nArvados is migrating to a centralized configuration file for all components.  During the migration, legacy configuration files will continue to be loaded.  See \"Migrating Configuration\":https://doc.arvados.org/v2.1/admin/config-migration.html for details.\n\nh2(#v1_3_3). v1.3.3 (2019-05-14)\n\n\"previous: Upgrading to 1.3.0\":#v1_3_0\n\nThis release corrects a potential data loss issue, if you are running Arvados 1.3.0 or 1.3.1 we strongly recommended disabling @keep-balance@ until you can upgrade to 1.3.3 or 1.4.0. With keep-balance disabled, there is no chance of data loss.\n\nWe've put together a \"wiki page\":https://dev.arvados.org/projects/arvados/wiki/Recovering_lost_data which outlines how to recover blocks which have been put in the trash, but not yet deleted, as well as how to identify any collections which have missing blocks so that they can be regenerated. The keep-balance component has been enhanced to provide a list of missing blocks and affected collections and we've provided a \"utility script\":https://github.com/arvados/arvados/blob/main/tools/keep-xref/keep-xref.py  which can be used to identify the workflows that generated those collections and who ran those workflows, so that they can be rerun.\n\nh2(#v1_3_0). v1.3.0 (2018-12-05)\n\n\"previous: Upgrading to 1.2\":#v1_2_0\n\nThis release includes several database migrations, which will be executed automatically as part of the API server upgrade. On large Arvados installations, these migrations will take a while. We've seen the upgrade take 30 minutes or more on installations with a lot of collections.\n\nThe @arvados-controller@ component now requires the /etc/arvados/config.yml file to be present.\n\nSupport for the deprecated \"jobs\" API is broken in this release.  Users who rely on it should not upgrade.  This will be fixed in an upcoming 1.3.1 patch release, however users are encouraged to migrate as support for the \"jobs\" API will be dropped in an upcoming release.  Users who are already using the \"containers\" API are not affected.\n\nh2(#v1_2_1). v1.2.1 (2018-11-26)\n\nThere are no special upgrade notes for this release.\n\nh2(#v1_2_0). v1.2.0 (2018-09-05)\n\n\"previous: Upgrading to 1.1.2 or 1.1.3\":#v1_1_2\n\nh3. Regenerate Postgres table statistics\n\nIt is recommended to regenerate the table statistics for Postgres after upgrading to v1.2.0. If autovacuum is enabled on your installation, this script would do the trick:\n\n<pre>\n#!/bin/bash\n\nset -e\nset -u\n\ntables=`echo \"\\dt\" | psql arvados_production | grep public|awk -e '{print $3}'`\n\nfor t in $tables; do\n    echo \"echo 'analyze $t' | psql arvados_production\"\n    time echo \"analyze $t\" | psql arvados_production\ndone\n</pre>\n\nIf you also need to do the vacuum, you could adapt the script to run 'vacuum analyze' instead of 'analyze'.\n\nh3. New component: arvados-controller\n\nCommit \"db5107dca\":https://dev.arvados.org/projects/arvados/repository/revisions/db5107dca adds a new system service, arvados-controller. More detail is available in story \"#13496\":https://dev.arvados.org/issues/13497.\n\nTo add the Arvados Controller to your system please refer to the \"installation instructions\":../install/install-api-server.html after upgrading your system to 1.2.0.\n\nVerify your setup by confirming that API calls appear in the controller's logs (_e.g._, @journalctl -fu arvados-controller@) while loading a workbench page.\n\nh2(#v1_1_4). v1.1.4 (2018-04-10)\n\n\"previous: Upgrading to 1.1.3\":#v1_1_3\n\nh3. arvados-cwl-runner regressions (2018-04-05)\n\n<strong>Secondary files missing from toplevel workflow inputs</strong>\n\nThis only affects workflows that rely on implicit discovery of secondaryFiles.\n\nIf a workflow input does not declare @secondaryFiles@ corresponding to the @secondaryFiles@ of workflow steps which use the input, the workflow would inconsistently succeed or fail depending on whether the input values were specified as local files or referenced an existing collection (and whether the existing collection contained the secondary files or not).  To ensure consistent behavior, the workflow is now required to declare in the top level workflow inputs any secondaryFiles that are expected by workflow steps.\n\nAs an example, the following workflow will fail because the @toplevel_input@ does not declare the @secondaryFiles@ that are expected by @step_input@:\n\n<pre>\nclass: Workflow\ncwlVersion: v1.0\ninputs:\n  toplevel_input: File\noutputs: []\nsteps:\n  step1:\n    in:\n      step_input: toplevel_input\n    out: []\n    run:\n      id: sub\n      class: CommandLineTool\n      inputs:\n        step_input:\n          type: File\n          secondaryFiles:\n            - .idx\n      outputs: []\n      baseCommand: echo\n</pre>\n\nWhen run, this produces an error like this:\n\n<pre>\ncwltool ERROR: [step step1] Cannot make job: Missing required secondary file 'hello.txt.idx' from file object: {\n    \"basename\": \"hello.txt\",\n    \"class\": \"File\",\n    \"location\": \"keep:ade9d0e032044bd7f58daaecc0d06bc6+51/hello.txt\",\n    \"size\": 0,\n    \"nameroot\": \"hello\",\n    \"nameext\": \".txt\",\n    \"secondaryFiles\": []\n}\n</pre>\n\nTo fix this error, add the appropriate @secondaryFiles@ section to @toplevel_input@\n\n<notextile>\n<pre><code>class: Workflow\ncwlVersion: v1.0\ninputs:\n  <span class=\"userinput\">toplevel_input:\n    type: File\n    secondaryFiles:\n      - .idx</span>\noutputs: []\nsteps:\n  step1:\n    in:\n      step_input: toplevel_input\n    out: []\n    run:\n      id: sub\n      class: CommandLineTool\n      inputs:\n        step_input:\n          type: File\n          secondaryFiles:\n            - .idx\n      outputs: []\n      baseCommand: echo\n</code></pre>\n</notextile>\n\nThis bug has been fixed in Arvados release v1.2.0.\n\n<strong>Secondary files on default file inputs</strong>\n\n@File@ inputs that have default values and also expect @secondaryFiles@ and will fail to upload default @secondaryFiles@.  As an example, the following case will fail:\n\n<pre>\nclass: CommandLineTool\ninputs:\n  step_input:\n    type: File\n    secondaryFiles:\n      - .idx\n    default:\n      class: File\n      location: hello.txt\noutputs: []\nbaseCommand: echo\n</pre>\n\nWhen run, this produces an error like this:\n\n<pre>\n2018-05-03 10:58:47 cwltool ERROR: Unhandled error, try again with --debug for more information:\n  [Errno 2] File not found: u'hello.txt.idx'\n</pre>\n\nTo fix this, manually upload the primary and secondary files to keep and explicitly declare @secondaryFiles@ on the default primary file:\n\n<notextile>\n<pre><code>class: CommandLineTool\ninputs:\n  step_input:\n    type: File\n    secondaryFiles:\n      - .idx\n    <span class=\"userinput\">default:\n      class: File\n      location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt\n      secondaryFiles:\n       - class: File\n         location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt.idx</span>\noutputs: []\nbaseCommand: echo\n</code></pre>\n</notextile>\n\nThis bug has been fixed in Arvados release v1.2.0.\n\nh2(#v1_1_3). v1.1.3 (2018-02-08)\n\nThere are no special upgrade notes for this release.\n\nh2(#v1_1_2). v1.1.2 (2017-12-22)\n\n\"previous: Upgrading to 1.1.0 or 1.1.1\":#v1_1_0\n\nh3. The minimum version for Postgres is now 9.4 (2017-12-08)\n\nAs part of story \"#11908\":https://dev.arvados.org/issues/11908, commit \"8f987a9271\":https://dev.arvados.org/projects/arvados/repository/revisions/8f987a9271 introduces a dependency on Postgres 9.4. Previously, Arvados required Postgres 9.3.\n\n* Debian 8 (pg 9.4) and Debian 9 (pg 9.6) do not require an upgrade\n* Ubuntu 16.04 (pg 9.5) does not require an upgrade\n* Ubuntu 14.04 (pg 9.3) requires upgrade to Postgres 9.4: https://www.postgresql.org/download/linux/ubuntu/\n* CentOS 7 and RHEL7 (pg 9.2) require upgrade to Postgres 9.4. It is necessary to migrate of the contents of your database: https://www.postgresql.org/docs/9.0/static/migration.html\n*# Create a database backup using @pg_dump@\n*# Install the @rh-postgresql94@ backport package from either Software Collections: http://doc.arvados.org/install/install-postgresql.html or the Postgres developers: https://www.postgresql.org/download/linux/redhat/\n*# Restore from the backup using @psql@\n\nh2(#v1_1_1). v1.1.1 (2017-11-30)\n\nThere are no special upgrade notes for this release.\n\nh2(#v1_1_0). v1.1.0 (2017-10-24)\n\nh3. The minimum version for Postgres is now 9.3 (2017-09-25)\n\nAs part of story \"#12032\":https://dev.arvados.org/issues/12032, commit \"68bdf4cbb1\":https://dev.arvados.org/projects/arvados/repository/revisions/68bdf4cbb1 introduces a dependency on Postgres 9.3. Previously, Arvados required Postgres 9.1.\n\n* Debian 8 (pg 9.4) and Debian 9 (pg 9.6) do not require an upgrade\n* Ubuntu 16.04 (pg 9.5) does not require an upgrade\n* Ubuntu 14.04 (pg 9.3) is compatible, however upgrading to Postgres 9.4 is recommended: https://www.postgresql.org/download/linux/ubuntu/\n* CentOS 7 and RHEL7 (pg 9.2) should upgrade to Postgres 9.4. It is necessary to migrate of the contents of your database: https://www.postgresql.org/docs/9.0/static/migration.html\n*# Create a database backup using @pg_dump@\n*# Install the @rh-postgresql94@ backport package from either Software Collections: http://doc.arvados.org/install/install-postgresql.html or the Postgres developers: https://www.postgresql.org/download/linux/redhat/\n*# Restore from the backup using @psql@\n\nh2(#older). Older versions\n\nh3. Upgrade slower than usual (2017-06-30)\n\nAs part of story \"#11807\":https://dev.arvados.org/issues/11807, commit \"55aafbb\":https://dev.arvados.org/projects/arvados/repository/revisions/55aafbb converts old \"jobs\" database records from YAML to JSON, making the upgrade process slower than usual.\n\n* The migration can take some time if your database contains a substantial number of YAML-serialized rows (i.e., you installed Arvados before March 3, 2017 \"660a614\":https://dev.arvados.org/projects/arvados/repository/revisions/660a614 and used the jobs/pipelines APIs). Otherwise, the upgrade will be no slower than usual.\n* The conversion runs as a database migration, i.e., during the deb/rpm package upgrade process, while your API server is unavailable.\n* Expect it to take about 1 minute per 20K jobs that have ever been created/run.\n\nh3. Service discovery overhead change in keep-web (2017-06-05)\n\nAs part of story \"#9005\":https://dev.arvados.org/issues/9005, commit \"cb230b0\":https://dev.arvados.org/projects/arvados/repository/revisions/cb230b0 reduces service discovery overhead in keep-web requests.\n\n* When upgrading keep-web _or keepproxy_ to/past this version, make sure to update API server as well. Otherwise, a bad token in a request can cause keep-web to fail future requests until either keep-web restarts or API server gets upgraded.\n\nh3. Node manager now has an http endpoint for management (2017-04-12)\n\nAs part of story \"#11349\":https://dev.arvados.org/issues/11349, commit \"2c094e2\":https://dev.arvados.org/projects/arvados/repository/revisions/2c094e2 adds a \"management\" http server to nodemanager.\n\n* To enable it, add to your configuration file: <pre>[Manage]\n  address = 127.0.0.1\n  port = 8989</pre>\n* The server responds to @http://{address}:{port}/status.json@ with a summary of how many nodes are in each state (booting, busy, shutdown, etc.)\n\nh3. New websockets component (2017-03-23)\n\nAs part of story \"#10766\":https://dev.arvados.org/issues/10766, commit \"e8cc0d7\":https://dev.arvados.org/projects/arvados/repository/revisions/e8cc0d7 replaces puma with arvados-ws as the recommended websocket server.\n* See http://doc.arvados.org/install/install-ws.html for install/upgrade instructions.\n* Remove the old puma server after the upgrade is complete. Example, with runit: <pre>\n$ sudo sv down /etc/sv/puma\n$ sudo rm -r /etc/sv/puma\n</pre> Example, with systemd: <pre>\n$ systemctl disable puma\n$ systemctl stop puma\n</pre>\n\nh3. Change of database encoding for hashes and arrays (2017-03-06)\n\nAs part of story \"#11168\":https://dev.arvados.org/issues/11168, commit \"660a614\":https://dev.arvados.org/projects/arvados/repository/revisions/660a614 uses JSON instead of YAML to encode hashes and arrays in the database.\n\n* Aside from a slight performance improvement, this should have no externally visible effect.\n* Downgrading past this version is not supported, and is likely to cause errors. If this happens, the solution is to upgrade past this version.\n* After upgrading, make sure to restart puma and crunch-dispatch-* processes.\n\nh3. Docker image format compatibility check (2017-02-03)\n\nAs part of story \"#10969\":https://dev.arvados.org/issues/10969, commit \"74a9dec\":https://dev.arvados.org/projects/arvados/repository/revisions/74a9dec introduces a Docker image format compatibility check: the @arv keep docker@ command prevents users from inadvertently saving docker images that compute nodes won't be able to run.\n* If your compute nodes run a version of *docker older than 1.10* you must override the default by adding to your API server configuration (@/etc/arvados/api/application.yml@): <pre><code class=\"yaml\">docker_image_formats: [\"v1\"]</code></pre>\n* Refer to the comments above @docker_image_formats@ in @/var/www/arvados-api/current/config/application.default.yml@ or source:services/api/config/application.default.yml or issue \"#10969\":https://dev.arvados.org/issues/10969 for more detail.\n* *NOTE:* This does *not* include any support for migrating existing Docker images from v1 to v2 format. This will come later: for now, sites running Docker 1.9 or earlier should still *avoid upgrading Docker further than 1.9.*\n\nh3. Debian and RPM packages now have systemd unit files (2016-09-27)\n\nSeveral Debian and RPM packages -- keep-balance (\"d9eec0b\":https://dev.arvados.org/projects/arvados/repository/revisions/d9eec0b), keep-web (\"3399e63\":https://dev.arvados.org/projects/arvados/repository/revisions/3399e63), keepproxy (\"6de67b6\":https://dev.arvados.org/projects/arvados/repository/revisions/6de67b6), and arvados-git-httpd (\"9e27ddf\":https://dev.arvados.org/projects/arvados/repository/revisions/9e27ddf) -- now enable their respective components using systemd. These components prefer YAML configuration files over command line flags (\"3bbe1cd\":https://dev.arvados.org/projects/arvados/repository/revisions/3bbe1cd).\n\n* On Debian-based systems using systemd, services are enabled automatically when packages are installed.\n* On RedHat-based systems using systemd, unit files are installed but services must be enabled explicitly: e.g., <code>\"sudo systemctl enable keep-web; sudo systemctl start keep-web\"</code>.\n* The new systemd-supervised services will not start up successfully until configuration files are installed in /etc/arvados/: e.g., <code>\"Sep 26 18:23:55 62751f5bb946 keep-web[74]: 2016/09/26 18:23:55 open /etc/arvados/keep-web/keep-web.yml: no such file or directory\"</code>\n* To migrate from runit to systemd after installing the new packages, we recommend the following procedure:\n*# Bring down the runit service: \"sv down /etc/sv/keep-web\"\n*# Create a JSON configuration file (e.g., /etc/arvados/keep-web/keep-web.yml -- see \"keep-web -help\")\n*# Ensure the service is running correctly under systemd: \"systemctl status keep-web\" / \"journalctl -u keep-web\"\n*# Remove the runit service so it doesn't start at next boot\n* Affected services:\n** keep-balance - /etc/arvados/keep-balance/keep-balance.yml\n** keep-web - /etc/arvados/keep-web/keep-web.yml\n** keepproxy - /etc/arvados/keepproxy/keepproxy.yml\n** arvados-git-httpd - /etc/arvados/arv-git-httpd/arv-git-httpd.yml\n\nh3. Installation paths for Python modules and script changed (2016-05-31)\n\nCommits \"ae72b172c8\":https://dev.arvados.org/projects/arvados/repository/revisions/ae72b172c8 and \"3aae316c25\":https://dev.arvados.org/projects/arvados/repository/revisions/3aae316c25 change the filesystem location where Python modules and scripts are installed.\n\n* Previous packages installed these files to the distribution's preferred path under @/usr/local@ (or the equivalent location in a Software Collection).  Now they get installed to a path under @/usr@.  This improves compatibility with other Python packages provided by the distribution.  See \"#9242\":https://dev.arvados.org/issues/9242 for more background.\n* If you simply import Python modules from scripts, or call Python tools relying on $PATH, you don't need to make any changes.  If you have hardcoded full paths to some of these files (e.g., in symbolic links or configuration files), you will need to update those paths after this upgrade.\n\nh3. Crunchrunner package is required on compute and shell nodes (2016-04-25)\n\nCommit \"eebcb5e\":https://dev.arvados.org/projects/arvados/repository/revisions/eebcb5e requires the crunchrunner package to be installed on compute nodes and shell nodes in order to run CWL workflows.\n\n* On each Debian-based compute node and shell node, run: @sudo apt-get install crunchrunner@\n* On each Red Hat-based compute node and shell node, run: @sudo yum install crunchrunner@\n\nh3. Keep permission signature algorithm change (2016-04-21)\n\nCommit \"3c88abd\":https://dev.arvados.org/projects/arvados/repository/revisions/3c88abd changes the Keep permission signature algorithm.\n\n* All software components that generate signatures must be upgraded together. These are: keepstore, API server, keep-block-check, and keep-rsync. For example, if keepstore < 0.1.20160421183420 but API server >= 0.1.20160421183420, clients will not be able to read or write data in Keep.\n* Jobs and client operations that are in progress during the upgrade (including arv-put's \"resume cache\") will fail.\n\nh3. Workbench's \"Getting Started\" popup disabled by default (2015-01-05)\n\nCommit \"e1276d6e\":https://dev.arvados.org/projects/arvados/repository/revisions/e1276d6e disables Workbench's \"Getting Started\" popup by default.\n\n* If you want new users to continue seeing this popup, set @enable_getting_started_popup: true@ in Workbench's @application.yml@ configuration.\n\nh3. Crunch jobs now have access to Keep-backed writable scratch storage (2015-12-03)\n\nCommit \"5590c9ac\":https://dev.arvados.org/projects/arvados/repository/revisions/5590c9ac makes a Keep-backed writable scratch directory available in crunch jobs (see \"#7751\":https://dev.arvados.org/issues/7751)\n\n* All compute nodes must be upgraded to arvados-fuse >= 0.1.2015112518060 because crunch-job uses some new arv-mount flags (--mount-tmp, --mount-by-pdh) introduced in merge \"346a558\":https://dev.arvados.org/projects/arvados/repository/revisions/346a558\n* Jobs will fail if the API server (in particular crunch-job from the arvados-cli gem) is upgraded without upgrading arvados-fuse on compute nodes.\n\nh3. Recommended configuration change for keep-web (2015-11-11)\n\nCommit \"1e2ace5\":https://dev.arvados.org/projects/arvados/repository/revisions/1e2ace5 changes recommended config for keep-web (see \"#5824\":https://dev.arvados.org/issues/5824)\n\n* proxy/dns/ssl config should be updated to route \"https://download.ClusterID.example.com/\" requests to keep-web (alongside the existing \"collections\" routing)\n* keep-web command line adds @-attachment-only-host download.ClusterID.example.com@\n* Workbench config adds @keep_web_download_url@\n* More info on the (still beta/non-TOC-linked) \"keep-web doc page\":http://doc.arvados.org/install/install-keep-web.html\n\nh3. Stopped containers are now automatically removed on compute nodes (2015-11-04)\n\nCommit \"1d1c6de\":https://dev.arvados.org/projects/arvados/repository/revisions/1d1c6de removes stopped containers (see \"#7444\":https://dev.arvados.org/issues/7444)\n\n* arvados-docker-cleaner removes _all_ docker containers as soon as they exit, effectively making @docker run@ default to @--rm@. If you run arvados-docker-cleaner on a host that does anything other than run crunch-jobs, and you still want to be able to use @docker start@, read the \"new doc page\":http://doc.arvados.org/install/install-compute-node.html to learn how to turn this off before upgrading.\n\nh3. New keep-web service (2015-11-04)\n\nCommit \"21006cf\":https://dev.arvados.org/projects/arvados/repository/revisions/21006cf adds a new keep-web service (see \"#5824\":https://dev.arvados.org/issues/5824).\n\n* Nothing relies on keep-web yet, but early adopters can install it now by following http://doc.arvados.org/install/install-keep-web.html (it is not yet linked in the TOC).\n\n<notextile>\n</div>\n</notextile>\n"
  },
  {
    "path": "doc/admin/user-activity.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: \"User activity report\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe @arv-user-activity@ tool generates a summary report of user activity on an Arvados instance based on the audit logs (the @logs@ table).\n\nh2. Installation\n\nh2. Option 1: Install from a distribution package\n\nThis installation method is recommended to make the CLI tools available system-wide. It can coexist with the installation method described in option 2, below.\n\nFirst, configure the \"Arvados package repositories\":{{ site.baseurl }}/install/packages.html\n\n{% assign arvados_component = 'python3-arvados-user-activity' %}\n\n{% include 'install_packages' %}\n\nh2. Option 2: Install from source\n\nStep 1: Check out the arvados source code\n\nStep 2: Change directory to @arvados/tools/user-activity@\n\nStep 3: Run @pip install .@ in an appropriate installation environment, such as a @virtualenv@.\n\nNote: depends on the \"Arvados Python SDK\":{{ site.baseurl }}/sdk/python/sdk-python.html and its associated build prerequisites (e.g. @pycurl@).\n\nh2. Usage\n\nSet ARVADOS_API_HOST to the api server of the cluster for which the report should be generated. ARVADOS_API_TOKEN needs to be a \"v2 token\":../admin/scoped-tokens.html for an admin user, or the system root token. Please note that in a login cluster federation, the token needs to be issued by the login cluster, but the report should be generated against the API server of the cluster for which it is desired. In other words, ARVADOS_API_HOST would point at the satellite cluster for which the report is desired, but ARVADOS_API_TOKEN would be a token that belongs to a login cluster user, or the login cluster's system root token.\n\nRun the tool with the option @--days@ giving the number of days to report on.  It will request activity logs from the API and generate a summary report on standard output.\n\nExample run:\n\n<pre>\n$ bin/arv-user-activity --days 14\nUser activity on pirca between 2020-11-10 16:42 and 2020-11-24 16:42\n\nPeter Amstutz <peter.amstutz@curii.com> (https://workbench.pirca.arvadosapi.com/users/jutro-tpzed-a4qnxq3pcfcgtkz)\n  organization: \"Curii\"\n  role: \"Software Developer\"\n\n  2020-11-10 16:51-05:00 to 2020-11-11 13:51-05:00 (21:00) Account activity\n  2020-11-13 13:47-05:00 to 2020-11-14 03:32-05:00 (13:45) Account activity\n  2020-11-14 04:33-05:00 to 2020-11-15 20:33-05:00 (40:00) Account activity\n  2020-11-15 21:34-05:00 to 2020-11-16 13:34-05:00 (16:00) Account activity\n  2020-11-16 16:21-05:00 to 2020-11-16 16:28-05:00 (00:07) Account activity\n  2020-11-17 15:49-05:00 to 2020-11-17 15:49-05:00 (00:00) Account activity\n  2020-11-17 15:51-05:00 Created project \"New project\" (pirca-j7d0g-7bxvkyr4khfa1a4)\n  2020-11-17 15:51-05:00 Updated project \"Test run\" (pirca-j7d0g-7bxvkyr4khfa1a4)\n  2020-11-17 15:51-05:00 Ran container \"bwa-mem.cwl container\" (pirca-xvhdp-xf2w8dkk17jkk5r)\n  2020-11-17 15:51-05:00 to 2020-11-17 15:51-05:00 (0:00) Account activity\n  2020-11-17 15:53-05:00 Ran container \"WGS processing workflow scattered over samples container\" (pirca-xvhdp-u7bm0wdy6lq4r8k)\n  2020-11-17 15:53-05:00 to 2020-11-17 15:54-05:00 (00:01) Account activity\n  2020-11-17 15:55-05:00 Created collection \"output for pirca-dz642-36ffk81c8zzopxz\" (pirca-4zz18-np35gw690ndzzk7)\n  2020-11-17 15:55-05:00 to 2020-11-17 15:55-05:00 (0:00) Account activity\n  2020-11-17 15:55-05:00 Created collection \"Output of main\" (pirca-4zz18-oiiymetwhnnhhwc)\n  2020-11-17 15:55-05:00 Tagged pirca-4zz18-oiiymetwhnnhhwc\n  2020-11-17 15:55-05:00 Updated collection \"Output of main\" (pirca-4zz18-oiiymetwhnnhhwc)\n  2020-11-17 15:55-05:00 to 2020-11-17 16:04-05:00 (00:09) Account activity\n  2020-11-17 16:04-05:00 Created collection \"Output of main\" (pirca-4zz18-f6n9n89e3dhtwvl)\n  2020-11-17 16:04-05:00 Tagged pirca-4zz18-f6n9n89e3dhtwvl\n  2020-11-17 16:04-05:00 Updated collection \"Output of main\" (pirca-4zz18-f6n9n89e3dhtwvl)\n  2020-11-17 16:04-05:00 to 2020-11-17 17:55-05:00 (01:51) Account activity\n  2020-11-17 20:09-05:00 to 2020-11-17 20:09-05:00 (00:00) Account activity\n  2020-11-17 21:35-05:00 to 2020-11-17 21:35-05:00 (00:00) Account activity\n  2020-11-18 10:09-05:00 to 2020-11-18 11:00-05:00 (00:51) Account activity\n  2020-11-18 14:37-05:00 Untagged pirca-4zz18-st8yzjan1nhxo1a\n  2020-11-18 14:37-05:00 Deleted collection \"Output of main\" (pirca-4zz18-st8yzjan1nhxo1a)\n  2020-11-18 17:44-05:00 to 2020-11-18 17:44-05:00 (00:00) Account activity\n  2020-11-19 12:18-05:00 to 2020-11-19 12:19-05:00 (00:01) Account activity\n  2020-11-19 13:57-05:00 to 2020-11-19 14:21-05:00 (00:24) Account activity\n  2020-11-20 09:48-05:00 to 2020-11-20 22:51-05:00 (13:03) Account activity\n  2020-11-20 23:52-05:00 to 2020-11-22 22:32-05:00 (46:40) Account activity\n  2020-11-22 23:37-05:00 to 2020-11-23 13:52-05:00 (14:15) Account activity\n  2020-11-23 14:53-05:00 to 2020-11-24 11:58-05:00 (21:05) Account activity\n  2020-11-24 15:06-05:00 to 2020-11-24 16:38-05:00 (01:32) Account activity\n\nMarc Rubenfield <mrubenfield@curii.com> (https://workbench.pirca.arvadosapi.com/users/jutro-tpzed-v9s9q97pgydh1yf)\n  2020-11-11 12:27-05:00 Untagged pirca-4zz18-xmq257bsla4kdco\n  2020-11-11 12:27-05:00 Deleted collection \"Output of main\" (pirca-4zz18-xmq257bsla4kdco)\n\nWard Vandewege <ward@curii.com> (https://workbench.pirca.arvadosapi.com/users/jutro-tpzed-9z6foyez9ydn2hl)\n  organization: \"Curii Corporation, Inc.\"\n  organization_email: \"ward@curii.com\"\n  role: \"System Administrator\"\n  website_url: \"https://curii.com\"\n\n  2020-11-19 19:30-05:00 to 2020-11-19 19:46-05:00 (00:16) Account activity\n  2020-11-20 10:51-05:00 to 2020-11-20 11:26-05:00 (00:35) Account activity\n  2020-11-24 12:01-05:00 to 2020-11-24 13:01-05:00 (01:00) Account activity\n</pre>\n"
  },
  {
    "path": "doc/admin/user-management-cli.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: User management at the CLI\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nInitial setup\n\n<pre>\nARVADOS_API_HOST={{ site.arvados_api_host }}\nARVADOS_API_TOKEN=1234567890qwertyuiopasdfghjklzxcvbnm1234567890zzzz\n</pre>\n\nIn these examples, @zzzzz-tpzed-3kz0nwtjehhl0u4@ is the sample user account.  Replace with the uuid of the user you wish to manipulate.\n\nSee \"user management\":{{site.baseurl}}/admin/user-management.html for an overview of how to use these commands.\n\nh3. Setup a user\n\nThis creates a default git repository and VM login.  Enables user to self-activate using Workbench.\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv user setup --uuid zzzzz-tpzed-3kz0nwtjehhl0u4</span>\n</code></pre>\n</notextile>\n\n\nh3. Deactivate user\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv user unsetup --uuid zzzzz-tpzed-3kz0nwtjehhl0u4</span>\n</code></pre>\n</notextile>\n\n\nWhen deactivating a user, you may also want to \"reassign ownership of their data\":{{site.baseurl}}/admin/reassign-ownership.html .\n\nh3(#activate-user). Directly activate user\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv user update --uuid \"zzzzz-tpzed-3kz0nwtjehhl0u4\" --user '{\"is_active\":true}'</span>\n</code></pre>\n</notextile>\n\nNote: this bypasses user agreements checks, and does not set up the user with a default git repository or VM login.\n\nh3(#create-token). Create a token for a user\n\nAs an admin, you can create tokens for other users.\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv api_client_authorization create --api-client-authorization '{\"owner_uuid\": \"zzzzz-tpzed-fr97h9t4m5jffxs\"}'</span>\n{\n \"kind\":\"arvados#apiClientAuthorization\",\n \"etag\":\"9yk144t0v6cvyp0342exoh2vq\",\n \"uuid\":\"zzzzz-gj3su-yyyyyyyyyyyyyyy\",\n \"owner_uuid\":\"zzzzz-tpzed-fr97h9t4m5jffxs\",\n \"created_at\":\"2020-03-12T20:36:12.517375422Z\",\n \"modified_by_user_uuid\":null,\n \"modified_at\":null,\n \"api_token\":\"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\",\n \"created_by_ip_address\":null,\n \"expires_at\":null,\n \"last_used_at\":null,\n \"last_used_by_ip_address\":null,\n \"scopes\":[\"all\"]\n}\n</code></pre>\n</notextile>\n\n\nTo get the token string, combine the values of @uuid@ and @api_token@ in the form \"v2/$uuid/$api_token\".  In this example the string that goes in @ARVADOS_API_TOKEN@ would be:\n\n<pre>\nARVADOS_API_TOKEN=v2/zzzzz-gj3su-yyyyyyyyyyyyyyy/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n</pre>\n\nh3(#delete-token). Delete a single token\n\nAs a user or admin, if you need to revoke a specific, known token, for example a token that may have been leaked to an unauthorized party, you can delete it at the command line.\n\nFirst, determine the token UUID.  If it is a \"v2\" format token (starts with \"v2/\") then the token UUID is middle section between the two slashes.   For example:\n\n<pre>\nv2/zzzzz-gj3su-yyyyyyyyyyyyyyy/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n</pre>\n\nthe UUID is \"zzzzz-gj3su-yyyyyyyyyyyyyyy\" and you can skip to the next step.\n\nIf you have a \"bare\" token (only the secret part) then, as an admin, you need to query the token to get the uuid:\n\n<pre>\n$ ARVADOS_API_TOKEN=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx arv --format=uuid api_client_authorization current\nzzzzz-gj3su-yyyyyyyyyyyyyyy\n</pre>\n\nNow you can delete the token:\n\n<pre>\n$ ARVADOS_API_TOKEN=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx arv api_client_authorization delete --uuid zzzzz-gj3su-yyyyyyyyyyyyyyy\n</pre>\n\nh3(#delete-all-tokens). Delete all tokens belonging to a user\n\nFirst, \"obtain a valid token for the user.\":#create-token\n\nThen, use that token to get all the user's tokens, and delete each one:\n\n<pre>\n$ ARVADOS_API_TOKEN=xxxxtoken-belonging-to-user-whose-tokens-will-be-deletedxxxxxxxx ; \\\nfor uuid in $(arv --format=uuid api_client_authorization list) ; do \\\narv api_client_authorization delete --uuid $uuid ; \\\ndone\n</pre>\n\nh2. Adding Permissions\n\nh3(#vm-login). VM login\n\nGive @$user_uuid@ permission to log in to @$vm_uuid@ as @$target_username@ and make sure that @$target_username@ is a member of the @docker@ group\n\n<pre>\nuser_uuid=xxxxxxxchangeme\nvm_uuid=xxxxxxxchangeme\ntarget_username=xxxxxxxchangeme\n\nread -rd $'\\000' newlink <<EOF; arv link create --link \"$newlink\"\n{\n\"tail_uuid\":\"$user_uuid\",\n\"head_uuid\":\"$vm_uuid\",\n\"link_class\":\"permission\",\n\"name\":\"can_login\",\n\"properties\":{\"username\":\"$target_username\", \"groups\": [ \"docker\" ]}\n}\nEOF\n</pre>\n"
  },
  {
    "path": "doc/admin/user-management.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: User management\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Authentication\":#authentication\n## \"Federated Authentication\":#federated_auth\n# \"User activation\":#user_activation\n# \"User agreements and self-activation\":#user_agreements\n# \"User profile\":#user_profile\n# \"User visibility\":#user_visibility\n# \"Pre-setup user by email address\":#pre-activated\n# \"Pre-activate federated user\":#pre-activated-fed\n# \"Auto-setup federated users from trusted clusters\":#auto_setup_federated\n# \"Activation flows\":#activation_flows\n## \"Private instance\":#activation_flow_private\n## \"Federated instance\":#federated\n## \"Open instance\":#activation_flow_open\n# \"Service Accounts\":#service_accounts\n\n{% comment %}\nTODO: Link to relevant workbench documentation when it gets written\n{% endcomment %}\n\nThis page describes how user accounts are created, set up and activated.\n\nh2(#authentication). Authentication\n\n\"Browser login and management of API tokens is described here.\":{{site.baseurl}}/api/tokens.html\n\nAfter completing the log in and authentication process, the API server receives a user record from the upstream identity provider (Google, LDAP, etc) consisting of the user's name, primary email address, alternate email addresses, and optional unique provider identifier (@identity_url@).\n\nIf a provider identifier is given, the API server searches for a matching user record.\n\nIf a provider identifier is not given, no match is found, it next searches by primary email and then alternate email address.  This enables \"provider migration\":migrating-providers.html and \"pre-activated accounts.\":#pre-activated\n\nIf no user account is found, a new user account is created with the information from the identity provider.\n\nIf a user account has been \"linked\":{{site.baseurl}}/user/topics/link-accounts.html or migrated the API server may follow internal redirects (@redirect_to_user_uuid@) to select the linked or migrated user account.\n\nh3(#federated_auth). Federated Authentication\n\nA federated user follows a slightly different flow.  The client presents a token issued by the remote cluster.  The local API server contacts the remote cluster to verify the user's identity.  This results in a user object (representing the remote user) being created on the local cluster.  If the user cannot be verified, the token will be rejected.  If the user is inactive on the remote cluster, a user record will be created, but it will also be inactive.\n\nh2(#user_activation). User activation\n\nThis section describes the different user account states.\n\n!(side){{site.baseurl}}/images/user-account-states.svg!\n\nnotextile. <div class=\"spaced-out\">\n\n# A new user record is not set up, and not active.  An inactive user cannot create or update any object, but can read Arvados objects that the user account has permission to read (such as publicly available items readable by the \"anonymous\" user).\n# Using Workbench or the \"command line\":{{site.baseurl}}/admin/user-management-cli.html, the admin invokes @setup@ on the user.  The setup method adds the user to the \"All users\" group.\n- If \"Users.AutoSetupNewUsers\":config.html is true, this happens automatically during user creation, so in that case new users start at step (3).\n- If \"Users.AutoSetupNewUsersWithVmUUID\":config.html is set, the user is given login permission to the specified shell node\n# User is set up, but still not yet active.  The browser presents \"user agreements\":#user_agreements (if any) and then invokes the user @activate@ method on the user's behalf.\n# The user @activate@ method checks that all \"user agreements\":#user_agreements are signed.  If so, or there are no user agreements, the user is activated.\n# The user is active.  User has normal access to the system.\n# From steps (1) and (3), an admin user can directly update the @is_active@ flag.  This bypasses enforcement that user agreements are signed.\nIf the user was not yet set up (still in step (1)), it adds the user to the \"All users\", but bypasses creating default git repository and assigning default VM access.\n# An existing user can have their access revoked using @unsetup@ and \"ownership reassigned\":reassign-ownership.html .\nUnsetup removes the user from the \"All users\" group and makes them inactive, preventing them from re-activating themselves.\n\"Ownership reassignment\":reassign-ownership.html moves any objects or permission from the old user to a new user and deletes any credentials for the old user.\n\nnotextile. </div>\n\nUser management can be performed through the web using Workbench or the command line.  See \"user management at the CLI\":{{site.baseurl}}/admin/user-management-cli.html for specific examples.\n\nh2(#user_agreements). User agreements and self-activation\n\nThe @activate@ method of the users controller checks if the user account is part of the \"All Users\" group and whether the user has \"signed\" all the user agreements.\n\nUser agreements are accessed through the \"user_agreements API\":{{site.baseurl}}/api/methods/user_agreements.html .  This returns a list of collection records.\n\nThe user agreements that users are required to sign should be added to the @links@ table this way:\n\n<pre>\n$ arv link create --link '{\n  \"link_class\": \"signature\",\n  \"name\": \"require\",\n  \"tail_uuid\": \"*system user uuid*\",\n  \"head_uuid: \"*collection uuid*\"\n}'\n</pre>\n\nThe collection should contain a single HTML file with the user agreement text.\n\nWorkbench displays the clickthrough agreements which the user can \"sign\".\n\nThe @user_agreements/sign@ endpoint creates a Link object:\n\n<pre>\n{\n  \"link_class\": \"signature\"\n  \"name\": \"click\",\n  \"tail_uuid\": \"*user uuid*\",\n  \"head_uuid: \"*collection uuid*\"\n}\n</pre>\n\nThe @user_agreements/signatures@ endpoint returns the list of Link objects that represent signatures by the current user (created by @sign@).\n\nh2(#user_profile). User profile\n\nThe fields making up the user profile are described in @Workbench.UserProfileFormFields@ .  See \"Configuration reference\":config.html .\n\nThe user profile is checked by workbench after checking if user agreements need to be signed.  The values entered are stored in the @properties@ field on the user object.  Unlike user agreements, the requirement to fill out the user profile is not enforced by the API server.\n\nh2(#user_visibility). User visibility\n\nInitially, a user is not part of any groups and will not be able to interact with other users on the system.  The admin should determine who the user is permited to interact with and use Workbench or the \"command line\":group-management.html#add to create and add the user to the appropriate group(s).\n\nh2(#pre-activated). Pre-setup user by email address\n\nYou may create a user account for a user that has not yet logged in, and identify the user by email address.\n\n1. As an admin, create a user object:\n\n<pre>\n$ arv --format=uuid user create --user '{\"email\": \"foo@example.com\", \"username\": \"foo\"}'\nclsr1-tpzed-1234567890abcdf\n$ arv user setup --uuid clsr1-tpzed-1234567890abcdf\n</pre>\n\n2. When the user logs in the first time, the email address will be recognized and the user will be associated with the existing user object.\n\nh2(#pre-activated-fed). Pre-activate federated user\n\n1. As admin, create a user object with the @uuid@ of the federated user (this is the user's uuid on their home cluster, called @clsr2@ in this example):\n\n<pre>\n$ arv user create --user '{\"uuid\": \"clsr2-tpzed-1234567890abcdf\", \"email\": \"foo@example.com\", \"username\": \"foo\", \"is_active\": true}'\n</pre>\n\n2. When the user logs in, they will be associated with the existing user object.\n\nh2(#auto_setup_federated). Auto-setup federated users from trusted clusters\n\nBy setting @ActivateUsers: true@ for each federated cluster in @RemoteClusters@, a federated user from one of the listed clusters will be automatically set up and activated on this cluster.  See configuration example in \"Federated instance\":#federated .\n\nh2(#activation_flows). Activation flows\n\nh3(#activation_flow_private). Private instance\n\nPolicy: users must be manually set up by the admin.\n\nHere is the configuration for this policy.  This is also the default if not provided.\n\n<pre>\nUsers:\n  AutoSetupNewUsers: false\n</pre>\n\n# User is created.  Not set up.  @is_active@ is false.\n# Workbench checks @is_invited@ and finds it is false.  User gets \"inactive user\" page.\n# Admin goes to user page and clicks \"setup user\" or sets @is_active@ to true.\n# On refreshing workbench, the user is able to self-activate after signing clickthrough agreements (if any).\n# Alternately, directly setting @is_active@ to true also sets up the user, but skips clickthrough agreements (because the user is already active).\n\nh3(#federated). Federated instance\n\nPolicy: users from other clusters in the federation are activated, users from outside the federation must be manually approved.\n\nHere is the configuration for this policy and an example remote cluster @clsr2@.\n\n<pre>\nUsers:\n  AutoSetupNewUsers: false\nRemoteClusters:\n  clsr2:\n    ActivateUsers: true\n</pre>\n\n# Federated user arrives claiming to be from cluster 'clsr2'\n# API server authenticates user as being from cluster 'clsr2'\n# Because 'clsr2' has @ActivateUsers@ the user is set up and activated.\n# User can immediately start using Workbench.\n\nh3(#activation_flow_open). Open instance\n\nPolicy: anybody who shows up and signs the agreements is activated.\n\n<pre>\nUsers:\n  AutoSetupNewUsers: true\n</pre>\n\n\"Set up user agreements\":#user_agreements by creating \"signature\" \"require\" links as described earlier.\n\n# User is created and auto-setup.  At this point, @is_active@ is false, but user has been added to \"All users\" group.\n# Workbench checks @is_invited@ and finds it is true, because the user is a member of \"All users\" group.\n# Workbench presents user with list of user agreements, user reads and clicks \"sign\" for each one.\n# Workbench tries to activate user.\n# User is activated.\n\nh2(#service_accounts). Service Accounts\n\nFor automation purposes, you can create service accounts that aren't tied to an external authorization system. These kind of accounts don't really differ much from standard user accounts, they just cannot be accessed through a normal login mechanism.\n\nAs an admin, you can create accounts like described in the \"user pre-setup section above\":#pre-activated and then \"activate them by updating its @is_active@ field\":{{site.baseurl}}/admin/user-management-cli.html#activate-user.\n\nOnce a service account is created you can \"use an admin account to set up a token\":{{site.baseurl}}/admin/user-management-cli.html#create-token for it, so that the required automations can authenticate. Note that these tokens support having a limited lifetime by using the @expires_at@ field and also \"limited scope\":{{site.baseurl}}/admin/scoped-tokens.html, if required by your security policies. You can read more about them at \"the API reference page\":{{site.baseurl}}/api/methods/api_client_authorizations.html."
  },
  {
    "path": "doc/api/dispatch.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"cloud dispatcher\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe cloud dispatcher provides several management/diagnostic APIs, intended to be used by a system administrator.\n\n{% include 'notebox_begin' %}\nThe \"@arvados-server instance@ subcommand\":{{site.baseurl}}/admin/dispatch.html provides a command-line interface for the most commonly used parts of this API.\n{% include 'notebox_end' %}\n\nThese APIs are not normally exposed to external clients. To use them, connect directly to the dispatcher's internal URL (see Services.DispatchCloud.InternalURLs in the cluster config file). All requests must include the cluster's management token (@ManagementToken@ in the cluster config file).\n\nExample:\n\n<notextile><pre><code>curl -H \"Authorization: Bearer $management_token\" http://localhost:9006/arvados/v1/dispatch/containers</code></pre></notextile>\n\nThese APIs are not available via @arv@ CLI tool.\n\nNote: the term \"instance\" here refers to a virtual machine provided by a cloud computing service. The alternate terms \"cloud VM\", \"compute node\", and \"worker node\" are sometimes used as well in config files, documentation, and log messages.\n\nh3. List containers\n\n@GET /arvados/v1/dispatch/containers@\n\nReturn a list of containers that are either ready to dispatch, or being started/monitored by the dispatcher.\n\nEach entry in the returned list of @items@ includes:\n* an @instance_type@ entry with the name and attributes of the instance type that will be used to schedule the container (chosen from the @InstanceTypes@ section of your cluster config file); and\n* a @container@ entry with selected attributes of the container itself, including @uuid@, @priority@, @runtime_constraints@, and @state@. Other fields of the container records are not loaded by the dispatcher, and will have empty/zero values here (e.g., @{...,\"created_at\":\"0001-01-01T00:00:00Z\",\"command\":[],...}@).\n* a @scheduling_status@ field with a brief explanation of the container's status in the dispatch queue, or an empty string if scheduling is not applicable, e.g., the container has already started running.\n\nExample response:\n\n<notextile><pre>{\n  \"items\": [\n    {\n      \"container\": {\n        \"uuid\": \"zzzzz-dz642-xz68ptr62m49au7\",\n        ...\n        \"priority\": 562948375092493200,\n        ...\n        \"state\": \"Locked\",\n        ...\n      },\n      \"instance_type\": {\n        \"Name\": \"Standard_E2s_v3\",\n        \"ProviderType\": \"Standard_E2s_v3\",\n        \"VCPUs\": 2,\n        \"RAM\": 17179869184,\n        \"Scratch\": 32000000000,\n        \"IncludedScratch\": 32000000000,\n        \"AddedScratch\": 0,\n        \"Price\": 0.146,\n        \"Preemptible\": false\n      },\n      \"scheduling_status\": \"Waiting for a Standard_E2s_v3 instance to boot and be ready to accept work.\"\n    },\n    ...\n  ]\n}</pre></notextile>\n\nh3. Get specified container\n\n@GET /arvados/v1/dispatch/container?container_uuid={uuid}@\n\nReturn the same information as \"list containers\" above, but for a single specified container.\n\nExample response:\n\n<notextile><pre>{\n  \"container\": {\n    ...\n  },\n  \"instance_type\": {\n    ...\n  },\n  \"scheduling_status\": \"Waiting for a Standard_E2s_v3 instance to boot and be ready to accept work.\"\n}</pre></notextile>\n\nh3. Terminate a container\n\n@POST /arvados/v1/dispatch/containers/kill?container_uuid={uuid}&reason={string}@\n\nMake a single attempt to terminate the indicated container on the relevant instance. (The caller can implement a delay-and-retry loop if needed.)\n\nA container terminated this way will end with state @Cancelled@ if its docker container had already started, or @Queued@ if it was terminated while setting up the runtime environment.\n\nThe provided @reason@ string will appear in the dispatcher's log, but not in the user-visible container log.\n\nIf the provided @container_uuid@ is not scheduled/running on an instance, the response status will be 404.\n\nh3. List instances\n\n@GET /arvados/v1/dispatch/instances@\n\nReturn a list of cloud instances.\n\nExample response:\n\n<notextile><pre>{\n  \"items\": [\n    {\n      \"instance\": \"/subscriptions/abcdefab-abcd-abcd-abcd-abcdefabcdef/resourceGroups/zzzzz/providers/Microsoft.Compute/virtualMachines/compute-abcdef0123456789abcdef0123456789-abcdefghijklmno\",\n      \"address\": \"10.23.45.67\",\n      \"price\": 0.073,\n      \"arvados_instance_type\": \"Standard_DS1_v2\",\n      \"provider_instance_type\": \"Standard_DS1_v2\",\n      \"last_container_uuid\": \"zzzzz-dz642-vp7scm21telkadq\",\n      \"last_busy\": \"2020-01-13T15:20:21.775019617Z\",\n      \"running_container_uuids\": [\"zzzzz-dz642-vp7scm21telkadq\"],\n      \"worker_state\": \"running\",\n      \"idle_behavior\": \"run\"\n    },\n    ...\n}</pre></notextile>\n\nThe @instance@ value is the instance's identifier, assigned by the cloud provider. It can be used with the instance APIs below.\n\nThe @last_container_uuid@ value indicates the most recently started container, if any. (It does not necessarily indicate that the container is still running.)\n\nThe @worker_state@ value indicates the instance's capability to run containers.\n* @unknown@: instance was not created by this dispatcher, and a boot probe has not yet succeeded (this state typically appears briefly after the dispatcher restarts).\n* @booting@: cloud provider says the instance exists, but a boot probe has not yet succeeded.\n* @idle@: instance is idle and ready to run a container.\n* @running@: instance is running one or more containers.\n* @shutdown@: cloud provider has been instructed to terminate the instance.\n\nThe @idle_behavior@ value determines what the dispatcher will do with the instance when it is idle; see hold/drain/run APIs below.\n\nh3. Hold an instance\n\n@POST /arvados/v1/dispatch/instances/hold?instance_id={instance}@\n\nSet the indicated instance's idle behavior to @hold@. The instance will not be shut down automatically. If any containers are currently running, they will be allowed to continue, but no new containers will be scheduled.\n\nh3. Drain an instance\n\n@POST /arvados/v1/dispatch/instances/drain?instance_id={instance}@\n\nSet the indicated instance's idle behavior to @drain@. If any containers are currently running, they will be allowed to continue, but when the instance becomes idle, it will be shut down.\n\nh3. Resume an instance\n\n@POST /arvados/v1/dispatch/instances/run?instance_id={instance}@\n\nSet the indicated instance's idle behavior to @run@ (the normal behavior). When it becomes idle, it will be eligible to run new containers. It will be shut down automatically when the configured idle threshold is reached.\n\nh3. Shut down an instance\n\n@POST /arvados/v1/dispatch/instances/kill?instance_id={instance}&reason={string}@\n\nTerminate the indicated instance.\n\nIf any containers are running on the instance, they will be killed too; no effort is made to wait for them to end gracefully.\n\nThe provided @reason@ string will appear in the dispatcher's log.\n"
  },
  {
    "path": "doc/api/execution.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\ntitle: Computing with Crunch\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nCrunch is the name for the Arvados system for managing computation.  It provides an abstract API to various clouds and HPC resource allocation and scheduling systems, and integrates closely with Keep storage and the Arvados permission system.\n\nh2. Container API\n\n# To submit work, create a \"container request\":{{site.baseurl}}/api/methods/container_requests.html in the @Committed@ state.\n# The system will fufill the container request by creating or reusing a \"Container object\":{{site.baseurl}}/api/methods/containers.html and assigning it to the @container_uuid@ field.  If the same request has been submitted in the past, it may reuse an existing container.  The reuse behavior can be suppressed with @use_existing: false@ in the container request.\n# The dispatcher process will notice a new container in @Queued@ state and submit a container executor to the underlying work queuing system (such as Slurm).\n# The container executes.  Upon termination the container goes into the  @Complete@ state.  If the container execution was interrupted or lost due to system failure, it will go into the @Cancelled@ state.\n# When the container associated with the container request is completed, the container request will go into the @Final@ state.\n# The @output_uuid@ field of the container request contains the uuid of output collection produced by container request.\n\n!(full-width){{site.baseurl}}/images/Crunch_dispatch.svg!\n\nh2(#RAM). Understanding RAM requests for containers\n\nThe @runtime_constraints@ section of a container specifies working RAM (@ram@) and Keep cache (@keep_cache_ram@).  If not specified, containers get a default Keep cache (@container_default_keep_cache_ram@, default 256 MiB).  The total RAM requested for a container is the sum of working RAM, Keep cache, and an additional RAM reservation configured by the admin (@ReserveExtraRAM@ in the dispatcher configuration, default zero).\n\nThe total RAM request is used to schedule containers onto compute nodes.  RAM allocation limits are enforced using kernel controls such as cgroups.  A container which requests 1 GiB RAM will only be permitted to allocate up to 1 GiB of RAM, even if scheduled on a 4 GiB node.  On HPC systems, a multi-core node may run multiple containers at a time.\n\nWhen running on the cloud, the memory request (along with CPU and disk) is used to select (and possibly boot) an instance type with adequate resources to run the container.  Instance type RAM is derated 5% from the published specification to accomodate virtual machine, kernel and system services overhead.\n\nh3. Calculate minimum instance type RAM for a container\n\n    (RAM request + Keep cache + ReserveExtraRAM) * (100/95)\n\nFor example, for a 3 GiB request, default Keep cache, and no extra RAM reserved:\n\n    (3072 + 256) * 1.0526 = 3494 MiB\n\nTo run this container, the instance type must have a published RAM size of at least 3494 MiB.\n\nh3. Calculate the maximum requestable RAM for an instance type\n\n    (Instance type RAM * (95/100)) - Keep cache - ReserveExtraRAM\n\nFor example, for a 3.75 GiB node, default Keep cache, and no extra RAM reserved:\n\n    (3840 * 0.95) - 256 = 3392 MiB\n\nTo run on this instance type, the container can request at most 3392 MiB of working RAM.\n"
  },
  {
    "path": "doc/api/index.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\ntitle: API Reference\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis reference describes the semantics of Arvados resources and how to programatically access Arvados via its REST API.  Each resource listed in this section is exposed on the Arvados API server under the @/arvados/v1/@ path prefix, for example, @https://{{ site.arvados_api_host }}/arvados/v1/collections@.\n\nh2. Discovery document\n\nThe API server publishes a machine-readable description of its endpoints and some additional site configuration values via a JSON-formatted discovery document.  This is available at @/discovery/v1/apis/arvados/v1/rest@, for example @https://{{ site.arvados_api_host }}/discovery/v1/apis/arvados/v1/rest@.  Some Arvados SDKs use the discovery document to generate language bindings.\n\nh2. Exported configuration\n\nThe Controller exposes a subset of the cluster's configuration and makes it available to clients in JSON format. This public config includes valuable information like several service's URLs, timeout settings, etc. and it is available at @/arvados/v1/config@, for example @https://{{ site.arvados_api_host }}/arvados/v1/config@. Workbench is one example of a client using this information, as it's a client-side application and doesn't have access to the cluster's config file.\n\nh2. Exported vocabulary definition\n\nWhen configured, the Controller also exports the \"metadata vocabulary definition\":{{site.baseurl}}/admin/metadata-vocabulary.html in JSON format. This functionality is useful for clients like Workbench and the Python SDK to translate between identifiers and human-readable labels when reading and writing objects on the system. This is available at @/arvados/v1/vocabulary@, for example @https://{{ site.arvados_api_host }}/arvados/v1/vocabulary@.\n"
  },
  {
    "path": "doc/api/keep-s3.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"S3 API\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe Simple Storage Service (S3) API is a de-facto standard for object storage originally developed by Amazon Web Services.  Arvados supports accessing files in Keep using the S3 API.\n\nS3 is supported by many \"cloud native\" applications, and client libraries exist in many languages for programmatic access.\n\nh3. Endpoints and Buckets\n\nTo access Arvados S3 using an S3 client library, you must tell it to use the URL of the keep-web server (this is @Services.WebDAVDownload.ExternalURL@ in the public configuration) as the custom endpoint.  The keep-web server will decide to treat it as an S3 API request based on the presence of an AWS-format Authorization header.  Requests without an Authorization header, or differently formatted Authorization, will be treated as \"WebDAV\":keep-webdav.html .\n\nThe \"bucket name\" is an Arvados collection uuid, portable data hash, or project uuid.\n\nPath-style and virtual host-style requests are supported.\n* A path-style request uses the hostname indicated by @Services.WebDAVDownload.ExternalURL@, with the bucket name in the first path segment: @https://download.example.com/zzzzz-4zz18-asdfgasdfgasdfg/@.\n* A virtual host-style request uses the hostname pattern indicated by @Services.WebDAV.ExternalURL@, with a bucket name in place of the leading @*@: @https://zzzzz-4zz18-asdfgasdfgasdfg.collections.example.com/@.\n\nIf you have wildcard DNS, TLS, and routing set up, an S3 client configured with endpoint @collections.example.com@ should work regardless of which request style it uses.\n\nh3. Supported Operations\n\nh4. ListObjects\n\nSupports the following request query parameters:\n\n* delimiter\n* marker\n* max-keys\n* prefix\n\nh4. GetObject\n\nSupports the @Range@ header.\n\nh4. PutObject\n\nCan be used to create or replace a file in a collection.\n\nAn empty PUT with a trailing slash and @Content-Type: application/x-directory@ will create a directory within a collection if Arvados configuration option @Collections.S3FolderObjects@ is true.\n\nMissing parent/intermediate directories within a collection are created automatically.\n\nCannot be used to create a collection or project.\n\nh4. DeleteObject\n\nCan be used to remove files from a collection.\n\nIf used on a directory marker, it will delete the directory only if the directory is empty.\n\nh4. HeadBucket\n\nCan be used to determine if a bucket exists and if client has read access to it.\n\nh4. HeadObject\n\nCan be used to determine if an object exists and if client has read access to it.\n\nh4. GetBucketVersioning\n\nBucket versioning is presently not supported, so this will always respond that bucket versioning is not enabled.\n\nh3. Accessing collection/project properties as metadata\n\nGetObject, HeadObject, and HeadBucket return Arvados object properties as S3 metadata headers, e.g., @X-Amz-Meta-Foo: bar@.\n\nIf the requested path indicates a file or directory placeholder inside a collection, or the top level of a collection, GetObject and HeadObject return the collection properties.\n\nIf the requested path indicates a directory placeholder corresponding to a project, GetObject and HeadObject return the properties of the project.\n\nHeadBucket returns the properties of the collection or project corresponding to the bucket name.\n\nNon-string property values are returned in a JSON representation, e.g., @[\"foo\",\"bar\"]@.\n\nAs in Amazon S3, property values containing non-ASCII characters are returned in BASE64-encoded form as described in RFC 2047, e.g., @=?UTF-8?b?4pu1?=@.\n\nGetBucketTagging and GetObjectTagging APIs are _not_ supported.\n\nIt is not possible to modify collection or project properties using the S3 API.\n\nh3. Authorization mechanisms\n\nKeep-web accepts AWS Signature Version 4 (AWS4-HMAC-SHA256) as well as the older V2 AWS signature.\n\nIf your client uses V4 signatures exclusively _and_ your Arvados token was issued by the same cluster you are connecting to, you can use the Arvados token's UUID part as your S3 Access Key, and its secret part as your S3 Secret Key. This is preferred, where applicable.\n\nExample using cluster @zzzzz@:\n* Arvados token: @v2/zzzzz-gj3su-yyyyyyyyyyyyyyy/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@\n* Access Key: @zzzzz-gj3su-yyyyyyyyyyyyyyy@\n* Secret Key: @xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@\n\nIn all other cases, replace every @/@ character in your Arvados token with @_@, and use the resulting string as both Access Key and Secret Key.\n\nExample using a cluster other than @zzzzz@ _or_ an S3 client that uses V2 signatures:\n* Arvados token: @v2/zzzzz-gj3su-yyyyyyyyyyyyyyy/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@\n* Access Key: @v2_zzzzz-gj3su-yyyyyyyyyyyyyyy_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@\n* Secret Key: @v2_zzzzz-gj3su-yyyyyyyyyyyyyyy_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx@\n"
  },
  {
    "path": "doc/api/keep-web-urls.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"Keep-web URL patterns\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nFiles served by @keep-web@ can be rendered directly in the browser, or @keep-web@ can instruct the browser to only download the file.\n\nWhen serving files that will render directly in the browser, it is important to properly configure the keep-web service to migitate cross-site-scripting (XSS) attacks.  A HTML page can be stored in a collection.  If an attacker causes a victim to visit that page through Workbench, the HTML will be rendered by the browser.  If all collections are served at the same domain, the browser will consider collections as coming from the same origin, which will grant access to the same browsing data (cookies and local storage).  This would enable malicious Javascript on that page to access Arvados on behalf of the victim.\n\nThis can be mitigated by having separate domains for each collection, or limiting preview to circumstances where the collection is not accessed with the user's regular full-access token.  For clusters where this risk is acceptable, this protection can also be turned off by setting the @Collections/TrustAllContent@ configuration flag to true, see the \"configuration reference\":../admin/config.html for more detail.\n\nThe following \"same origin\" URL patterns are supported for public collections and collections shared anonymously via secret links (i.e., collections which can be served by keep-web without making use of any implicit credentials like cookies). See \"Same-origin URLs\" below.\n\n<pre>\nhttp://collections.example.com/c=uuid_or_pdh/path/file.txt\nhttp://collections.example.com/c=uuid_or_pdh/t=TOKEN/path/file.txt\n</pre>\n\nThe following \"multiple origin\" URL patterns are supported for all collections:\n\n<pre>\nhttp://uuid_or_pdh--collections.example.com/path/file.txt\nhttp://uuid_or_pdh--collections.example.com/t=TOKEN/path/file.txt\n</pre>\n\nIn the \"multiple origin\" form, the string @--@ can be replaced with @.@ with identical results (assuming the downstream proxy is configured accordingly). These two are equivalent:\n\n<pre>\nhttp://uuid_or_pdh--collections.example.com/path/file.txt\nhttp://uuid_or_pdh.collections.example.com/path/file.txt\n</pre>\n\nThe first form (with @--@ instead of @.@) avoids the cost and effort of deploying a wildcard TLS certificate for @*.collections.example.com@ at sites that already have a wildcard certificate for @*.example.com@ . The second form is likely to be easier to configure, and more efficient to run, on a downstream proxy.\n\nIn all of the above forms, the @collections.example.com@ part can be anything at all: keep-web itself ignores everything after the first @.@ or @--@. (Of course, in order for clients to connect at all, DNS and any relevant proxies must be configured accordingly.)\n\nIn all of the above forms, the @uuid_or_pdh@ part can be either a collection UUID or a portable data hash with the @+@ character optionally replaced by @-@ . (When @uuid_or_pdh@ appears in the domain name, replacing @+@ with @-@ is mandatory, because @+@ is not a valid character in a domain name.)\n\nIn all of the above forms, a top level directory called @_@ is skipped. In cases where the @path/file.txt@ part might start with @t=@ or @c=@ or @_/@, links should be constructed with a leading @_/@ to ensure the top level directory is not interpreted as a token or collection ID.\n\nAssuming there is a collection with UUID @zzzzz-4zz18-znfnqtbbv4spc3w@ and portable data hash @1f4b0bc7583c2a7f9102c395f4ffc5e3+45@, the following URLs are interchangeable:\n\n<pre>\nhttp://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/foo/bar.txt\nhttp://zzzzz-4zz18-znfnqtbbv4spc3w.collections.example.com/_/foo/bar.txt\nhttp://zzzzz-4zz18-znfnqtbbv4spc3w--collections.example.com/_/foo/bar.txt\n</pre>\n\nThe following URLs are read-only, but will return the same content as above:\n\n<pre>\nhttp://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--foo.example.com/foo/bar.txt\nhttp://1f4b0bc7583c2a7f9102c395f4ffc5e3-45--.invalid/foo/bar.txt\nhttp://collections.example.com/by_id/1f4b0bc7583c2a7f9102c395f4ffc5e3%2B45/foo/bar.txt\nhttp://collections.example.com/by_id/zzzzz-4zz18-znfnqtbbv4spc3w/foo/bar.txt\n</pre>\n\nIf the collection is named \"MyCollection\" and located in a project called \"MyProject\" which is in the home project of a user with username is \"bob\", the following read-only URL is also available when authenticating as bob:\n\npre. http://collections.example.com/users/bob/MyProject/MyCollection/foo/bar.txt\n\nAn additional form is supported specifically to make it more convenient to maintain support for existing Workbench download links:\n\npre. http://collections.example.com/collections/download/uuid_or_pdh/TOKEN/foo/bar.txt\n\nA regular Workbench \"download\" link is also accepted, but credentials passed via cookie, header, etc. are ignored. Only public data can be served this way:\n\npre. http://collections.example.com/collections/uuid_or_pdh/foo/bar.txt\n\nh2(#same-site). Same-site requirements for requests with tokens\n\nAlthough keep-web doesn't care about the domain part of the URL, the clients do: especially when rendering inline content.\n\nWhen a client passes a token in the URL, keep-web sends a redirect response placing the token in a @Set-Cookie@ header with the @SameSite=Lax@ attribute. The browser will ignore the cookie if it's not coming from a _same-site_ request, and thus its subsequent request will fail with a @401 Unauthorized@ error.\n\nThis mainly affects Workbench's ability to show inline content, so it should be taken into account when configuring both services' URL schemes.\n\nYou can read more about the definition of a _same-site_ request at the \"RFC 6265bis-03 page\":https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-5.2\n"
  },
  {
    "path": "doc/api/keep-webdav.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"WebDAV\"\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n\"Web Distributed Authoring and Versioning (WebDAV)\":https://tools.ietf.org/html/rfc4918 is an IETF standard set of extensions to HTTP to manipulate and retrieve hierarchical web resources, similar to directories in a file system.  Arvados supports accessing files in Keep using WebDAV.\n\nMost major operating systems include built-in support for mounting WebDAV resources as network file systems, see user guide sections for \"Windows\":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-windows.html, \"macOS\":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-os-x.html, or \"Linux (GNOME)\":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-gnu-linux.html#gnome.  WebDAV is also supported by various standalone storage browser applications such as \"Cyberduck\":https://cyberduck.io/ and client libraries exist in many languages for programmatic access.\n\nKeep-web provides read/write HTTP (WebDAV) access to files stored in Keep. It serves public data to anonymous and unauthenticated clients, and serves private data to clients that supply Arvados API tokens.\n\nh3. Supported Operations\n\nSupports WebDAV HTTP methods @GET@, @PUT@, @DELETE@, @PROPFIND@, @COPY@, and @MOVE@.\n\nDoes not support @LOCK@ or @UNLOCK@.  These methods will be accepted, but are no-ops.\n\nh3. Browsing\n\nRequests can be authenticated a variety of ways as described below in \"Authentication mechanisms\":#auth .  An unauthenticated request will return a 401 Unauthorized response with a @WWW-Authenticate@ header indicating \"support for RFC 7617 Basic Authentication\":https://tools.ietf.org/html/rfc7617 .\n\nGetting a listing from keep-web starting at the root path @/@ will return two folders, @by_id@ and @users@.\n\nThe @by_id@ folder will return an empty listing.  However, a path which starts with /by_id/ followed by a collection uuid, portable data hash, or project uuid will return the listing of that object.\n\nThe @users@ folder will return a listing of the users for whom the client has permission to read the \"home\" project of that user.  Browsing an individual user will return the collections and projects directly owned by that user.  Browsing those collections and projects return listings of the files, directories, collections, and subprojects they contain, and so forth.\n\nIn addition to the @/by_id/@ path prefix, the collection or project can be specified using a path prefix of @/c=<uuid or pdh>/@ or (if the cluster is properly configured) as a virtual host.  This is described on \"Keep-web URLs\":keep-web-urls.html\n\nIt is possible for a project or a \"filter group\":methods/groups.html#filter to appear as its own descendant in the @by_id@ and @users@ tree (a filter group may match itself, its own ancestor, another filter group that matches its ancestor, etc). When this happens, the descendant appears as an empty read-only directory. For example, if filter group @f@ matches its own parent @p@:\n* @/users/example/p/f@ will show the filter group's contents (matched projects and collections).\n* @/users/example/p/f/p@ will appear as an empty directory.\n* @/by_id/uuid_of_f/p@ will show the parent project's contents, including @f@.\n* @/by_id/uuid_of_f/p/f@ will appear as an empty directory.\n\nh3(#zip). Downloading ZIP archives\n\nKeep-web can produce an uncompressed ZIP archive of a collection, or a subset of a collection.\n\nTo request a ZIP archive:\n* The request must include an @Accept: application/zip@ header _or_ @?accept=application/zip&disposition=attachment@ in the query.\n* The request URI must specify the root directory of a collection, e.g., @/by_id/<uuid>/@.  See \"Keep-web URLs\":keep-web-urls.html for more examples.\n\nTo download a subset of a collection, the request can specify one or more pathnames relative to the collection directory:\n* A @files@ parameter in the query of a @GET@ request, e.g., @https://<uuid>.collections.example.com/?files=file1&files=file2@,\n* A @files@ parameter in the body of a @POST@ request with a @Content-Type: application/x-www-form-urlencoded@ header, or\n* The value of a @files@ key in a JSON object in the body of a @POST@ request with a @Content-Type: application/json@ header, e.g., @{\"files\":[\"file1\",\"file2\"]}@.\n\nKeep-web returns an error if one of the specified paths does not exist in the requested collection.\n\nThe ZIP archive comment will include a download URL with the collection UUID or portable data hash, e.g., \"Downloaded from https://collections.example.com/by_id/zzzzz-4zz18-0pg114rezrbz46u/\".\n\nThe ZIP archive will also include collection metadata if the request sets an @include_collection_metadata@ parameter, e.g., @https://<uuid>.collections.example.com/?include_collection_metadata=true@. The resulting ZIP archive will also include a file named @collection.json@ containing the collection's metadata (UUID, name, description, portable data hash, properties, creation time, modification time) and information about the user who last modified it (UUID, full name, username, and email). If the collection is specified by portable data hash rather than name or UUID, @collection.json@ will contain only the portable data hash.\n\nExample @collection.json@ content:\n\n<pre>\n{\n  \"created_at\":\"2025-04-28T19:50:49.046969000Z\",\n  \"description\":\"Description of test collection\\n\",\n  \"modified_at\":\"2025-04-28T19:50:49.093166000Z\",\n  \"modified_by_user\":{\n    \"email\":\"example@example.com\",\n    \"full_name\":\"Example Name\",\n    \"username\":\"example\",\n    \"uuid\":\"zzzzz-tpzed-xurymjxw79nv3jz\"\n  },\n  \"name\":\"collection name\",\n  \"portable_data_hash\":\"6acf043b102afcf04e3be2443e7ea2ba+223\",\n  \"properties\":{\n    \"key\":\"value\"\n  },\n  \"uuid\":\"zzzzz-4zz18-0pg114rezrbz46u\"\n}\n</pre>\n\nThe request can also include a @download_filename@ parameter with a desired name for the downloaded zip file. This filename will be included in the @Content-Disposition@ response header. If this parameter is not provided, the filename suggested in the response header will be based on the collection name or portable data hash:\n* @{collection name}.zip@ if downloading an entire collection\n* @{collection name} - {file name}.zip@ if a single file was specified in the request\n* @{collection name} - 3 files.zip@ if a directory or multiple files were specified in the request\n* @{portable data hash}.zip@, @{portable data hash} - {file name}.zip@, etc., if the source collection was specified by portable data hash rather than name or UUID\n\nExample request:\n\n<pre>\nGET /by_id/zzzzz-4zz18-0pg114rezrbz46u\nAccept: application/zip\nContent-Type: application/json\n\n{\n  \"download_filename\": \"odd-numbered files and directories.zip\",\n  \"files\": [\n    \"file1.txt\",\n    \"file3.bin\",\n    \"dir5\"\n  ],\n  \"include_collection_metadata\": true\n}\n</pre>\n\nh3(#auth). Authentication mechanisms\n\nA token can be provided in an Authorization header as a @Bearer@ token:\n\n<pre>\nAuthorization: Bearer o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK\n</pre>\n\nA token can also be provided with \"RFC 7617 Basic Authentication\":https://tools.ietf.org/html/rfc7617 in this case, the payload is formatted as @username:token@ and encoded with base64.  The username must be non-empty, but is ignored.  In this example, the username is \"user\":\n\n<pre>\nAuthorization: Basic dXNlcjpvMDdqNHB4N1JsSks0Q3VNWXA3QzBMRFQ0Q3pSMUoxcUJFNUF2bzdlQ2NVak9UaWt4Swo=\n</pre>\n\nA base64-encoded token can be provided in a cookie named \"api_token\":\n\n<pre>\nCookie: api_token=bzA3ajRweDdSbEpLNEN1TVlwN0MwTERUNEN6UjFKMXFCRTVBdm83ZUNjVWpPVGlreEs=\n</pre>\n\nA token can be provided in an URL-encoded query string:\n\n<pre>\nGET /foo/bar.txt?api_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK\n</pre>\n\nA token can be provided in a URL-encoded path (as described in the previous section):\n\n<pre>\nGET /t=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK/_/foo/bar.txt\n</pre>\n\nA suitably encoded token can be provided in a POST body if the request has a content type of application/x-www-form-urlencoded or multipart/form-data:\n\n<pre>\nPOST /foo/bar.txt\nContent-Type: application/x-www-form-urlencoded\n[...]\napi_token=o07j4px7RlJK4CuMYp7C0LDT4CzR1J1qBE5Avo7eCcUjOTikxK\n</pre>\n\nIf a token is provided in a query string or in a POST request, the response is an HTTP 303 redirect to an equivalent GET request, with the token stripped from the query string and added to a cookie instead.\n\nh3. Indexes\n\nKeep-web returns a generic HTML index listing when a directory is requested with the GET method. It does not serve a default file like \"index.html\". Directory listings are also returned for WebDAV PROPFIND requests.\n\nh3. Range requests\n\nKeep-web supports partial resource reads using the HTTP @Range@ header as specified in \"RFC 7233\":https://tools.ietf.org/html/rfc7233 .\n\nh3. Compatibility\n\nClient-provided authorization tokens are ignored if the client does not provide a @Host@ header.\n\nIn order to use the query string or a POST form authorization mechanisms, the client must follow 303 redirects; the client must accept cookies with a 303 response and send those cookies when performing the redirect; and either the client or an intervening proxy must resolve a relative URL (\"//host/path\") if given in a response Location header.\n\nh3. Intranet mode\n\nNormally, Keep-web accepts requests for multiple collections using the same host name, provided the client's credentials are not being used. This provides insufficient XSS protection in an installation where the \"anonymously accessible\" data is not truly public, but merely protected by network topology.\n\nIn such cases -- for example, a site which is not reachable from the internet, where some data is world-readable from Arvados's perspective but is intended to be available only to users within the local network -- the downstream proxy should configured to return 401 for all paths beginning with \"/c=\".\n\nh3. Same-origin URLs\n\nWithout the same-origin protection outlined above, a web page stored in collection X could execute JavaScript code that uses the current viewer's credentials to download additional data from collection Y -- data which is accessible to the current viewer, but not to the author of collection X -- from the same origin (``https://collections.example.com/'') and upload it to some other site chosen by the author of collection X.\n"
  },
  {
    "path": "doc/api/methods/api_client_authorizations.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"api_client_authorizations\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/api_client_authorizations@\n\nObject type: @gj3su@\n\nExample UUID: @zzzzz-gj3su-0123456789abcde@\n\nh2. Resource\n\nThe @api_client_authorizations@ resource stores the API tokens that have been issued to permit access the API server.\n\nAn ApiClientAuthorization is *not* a generic Arvados resource.  The full list of properties that belong to an ApiClientAuthorization is:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Example|\n|uuid|string|An identifier used to refer to the token without exposing the actual token.||\n|api_token|string|The actual token string that is expected in the Authorization header.||\n|created_by_ip_address|string|-||\n|last_used_by_ip_address|string|The network address of the most recent client using this token.||\n|last_used_at|datetime|Timestamp of the most recent request using this token.||\n|expires_at|datetime|Time at which the token is no longer valid.  May be set to a time in the past in order to immediately expire a token.||\n|owner_uuid|string|The user associated with the token.  All operations using this token are checked against the permissions of this user.||\n|scopes|array|A list of resources this token is allowed to access.  A scope of [\"all\"] allows all resources.  See \"API Authorization\":{{site.baseurl}}/api/tokens.html#scopes for details.||\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nh3(#create). create\n\nCreate a new ApiClientAuthorization.\n\nRegular users may only create self-owned API tokens, but may provide a restricted \"scope\":{{site.baseurl}}/api/tokens.html#scopes .  Administrators may create API tokens corresponding to any user.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|api_client_authorization|object||query||\n\nh3. create_system_auth\n\ncreate_system_auth api_client_authorizations\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|scopes|array||query||\n\nh3(#current). current\n\nReturn the full record associated with the provided API token. This endpoint is often used to check the validity of a given token.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n\nh3. delete\n\nDelete an existing ApiClientAuthorization.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path||\n\nh3. get\n\nGets an ApiClientAuthorization's metadata by UUID.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path||\n\nh3. list\n\nList api_client_authorizations.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\nh3. update\n\nUpdate attributes of an existing ApiClientAuthorization.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the ApiClientAuthorization in question.|path||\n|api_client_authorization|object||query||\n"
  },
  {
    "path": "doc/api/methods/authorized_keys.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"authorized_keys\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/authorized_keys@\n\nObject type: @fngyi@\n\nExample UUID: @zzzzz-fngyi-0123456789abcde@\n\nh2. Resource\n\nThe authorized_keys resource stores SSH public keys which grant access to virtual machines or git repositories on the Arvados cluster as the user in @authorized_user_uuid@.\n\nEach AuthorizedKey has, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Example|\n|name|string|A name to help the user manage their keys.||\n|key_type|string|Public key type, currently only supports \"SSH\"||\n|authorized_user_uuid|string|The user to which this key belongs.  Authentication using this key authenticates as this user.||\n|public_key|text|The actual public key material, e.g., from @~/.ssh/id_rsa.pub@||\n|expires_at|datetime|Expiration date after which the key is no longer valid.||\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nh3. create\n\nCreate a new AuthorizedKey.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|authorized_key|object||query||\n\nh3. delete\n\nDelete an existing AuthorizedKey.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path||\n\nh3. get\n\nGets a AuthorizedKey's metadata by UUID.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path||\n\nh3. list\n\nList authorized_keys.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\nh3. update\n\nUpdate attributes of an existing AuthorizedKey.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the AuthorizedKey in question.|path||\n|authorized_key|object||query||\n"
  },
  {
    "path": "doc/api/methods/collections.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"collections\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/collections@\n\nObject type: @4zz18@\n\nExample UUID: @zzzzz-4zz18-0123456789abcde@\n\nh2. Resource\n\nCollections describe sets of files in terms of data blocks stored in Keep.  See \"Keep - Content-Addressable Storage\":{{site.baseurl}}/architecture/storage.html and \"using collection versioning\":../../user/topics/collection-versioning.html for details.\n\nEach collection has, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Example|\n|name|string|||\n|description|text|Free text description of the group.  Allows \"HTML formatting.\":{{site.baseurl}}/api/resources.html#descriptions||\n|properties|hash|User-defined metadata, may be used in queries using \"subproperty filters\":{{site.baseurl}}/api/methods.html#subpropertyfilters ||\n|portable_data_hash|string|The MD5 sum of the manifest text stripped of block hints other than the size hint.||\n|manifest_text|text|The manifest describing how to assemble blocks into files, in the \"Arvados manifest format\":{{site.baseurl}}/architecture/manifest-format.html||\n|replication_desired|number|Minimum storage replication level desired for each data block referenced by this collection. A value of @null@ signifies that the site default replication level (typically 2) is desired.|@2@|\n|replication_confirmed|number|Replication level most recently confirmed by the storage system. This field is null when a collection is first created, and is reset to null when the manifest_text changes in a way that introduces a new data block. An integer value indicates the replication level of the _least replicated_ data block in the collection.|@2@, null|\n|replication_confirmed_at|datetime|When @replication_confirmed@ was confirmed. If @replication_confirmed@ is null, this field is also null.||\n|storage_classes_desired|list|An optional list of storage class names where the blocks should be saved. If not provided, the cluster's default storage class(es) will be set.|@['archival']@|\n|storage_classes_confirmed|list|Storage classes most recently confirmed by the storage system. This field is an empty list when a collection is first created.|@'archival']@, @[]@|\n|storage_classes_confirmed_at|datetime|When @storage_classes_confirmed@ was confirmed. If @storage_classes_confirmed@ is @[]@, this field is null.||\n|trash_at|datetime|If @trash_at@ is non-null and in the past, this collection will be hidden from API calls.  May be untrashed.||\n|delete_at|datetime|If @delete_at@ is non-null and in the past, the collection may be permanently deleted.||\n|is_trashed|boolean|True if @trash_at@ is in the past, false if not.||\n|current_version_uuid|string|UUID of the collection's current version. On new collections, it'll be equal to the @uuid@ attribute.||\n|version|number|Version number, starting at 1 on new collections. This attribute is read-only.||\n|preserve_version|boolean|When set to true on a current version, it will be persisted. When passing @true@ as part of a bigger update call, both current and newly created versions are persisted.||\n|file_count|number|The total number of files in the collection. This attribute is read-only.||\n|file_size_total|number|The sum of the file sizes in the collection. This attribute is read-only.||\n\nh3. Conditions of creating a Collection\n\nIf a new @portable_data_hash@ is specified when creating or updating a Collection, it must match the cryptographic digest of the supplied @manifest_text@.\n\nh3. Side effects of creating a Collection\n\nReferenced blocks are protected from garbage collection in Keep.\n\nData can be shared with other users via the Arvados permission model.\n\nh3(#trashing). Trashing collections\n\nCollections can be trashed by updating the record and setting the @trash_at@ field, or with the \"delete\":#delete method.  The delete method sets @trash_at@ to \"now\".\n\nThe value of @trash_at@ can be set to a time in the future as a feature to automatically expire collections.\n\nWhen @trash_at@ is set, @delete_at@ will also be set.  Normally @delete_at = trash_at + Collections.DefaultTrashLifetime@.  When the @trash_at@ time is past but @delete_at@ is in the future, the trashed collection is invisible to most API calls unless the @include_trash@ parameter is true.  Collections in the trashed state can be \"untrashed\":#untrash so long as @delete_at@ has not past.  Collections are also trashed if they are contained in a \"trashed group\":groups.html#trashing\n\nOnce @delete_at@ is past, the collection and all of its previous versions will be deleted permanently and can no longer be untrashed.\n\nh3(#replace_files). Using \"replace_files\" to create or update a collection\n\nThe @replace_files@ option can be used with the \"create\":#create and \"update\":#update APIs to efficiently and atomically copy individual files and directory trees from other collections, copy/rename/delete items within an existing collection, and add new items to a collection.\n\n@replace_files@ keys indicate target paths in the new collection, and values specify sources that should be copied to the target paths.\n* Each target path must be an absolute canonical path beginning with @/@. It must not contain @.@ or @..@ components, consecutive @/@ characters, or a trailing @/@ after the final component.\n* Each source must be one of the following:\n** an empty string (signifying that the target path is to be deleted),\n** @<PDH>/<path>@ where @<PDH>@ is the portable data hash of a collection on the cluster and @<path>@ is a file or directory in that collection,\n** @manifest_text/<path>@ where @<path>@ is an existing file or directory in a collection supplied in the @manifest_text@ attribute in the request, or\n** @current/<path>@ where @<path>@ is an existing file or directory in the collection being updated.\n\nIn an @update@ request, sources may reference the current portable data hash of the collection being updated. However, in many cases it is more appropriate to use a @current/<path>@ source instead, to ensure the latest content is used even if the collection has been updated since the PDH was last retrieved.\n\nh4(#replace_files-delete). Delete a file\n\nDelete @foo.txt@.\n\n<notextile><pre>\n\"replace_files\": {\n  \"/foo.txt\": \"\"\n}\n</pre></notextile>\n\nh4(#replace_files-rename). Rename a file\n\nRename @foo.txt@ to @bar.txt@.\n\n<notextile><pre>\n\"replace_files\": {\n  \"/foo.txt\": \"\",\n  \"/bar.txt\": \"current/foo.txt\"\n}\n</pre></notextile>\n\nh4(#replace_files-swap). Swap files\n\nSwap contents of files @foo@ and @bar@.\n\n<notextile><pre>\n\"replace_files\": {\n  \"/foo\": \"current/bar\",\n  \"/bar\": \"current/foo\"\n}\n</pre></notextile>\n\nh4(#replace_files-add). Add a file\n\n<notextile><pre>\n\"replace_files\": {\n  \"/new_directory/new_file.txt\": \"manifest_text/new_file.txt\"\n},\n\"collection\": {\n  \"manifest_text\": \". acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\\n\"\n}\n</pre></notextile>\n\nh4(#replace_files-replace). Replace all content with new content\n\nNote this is equivalent to omitting the @replace_files@ argument.\n\n<notextile><pre>\n\"replace_files\": {\n  \"/\": \"manifest_text/\"\n},\n\"collection\": {\n  \"manifest_text\": \"./new_directory acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\\n\"\n}\n</pre></notextile>\n\nh4(#replace_files-rename-and-replace). Atomic rename and replace\n\nRename @current_file.txt@ to @old_file.txt@ and replace @current_file.txt@ with new content, all in a single atomic operation.\n\n<notextile><pre>\n\"replace_files\": {\n  \"/current_file.txt\": \"manifest_text/new_file.txt\",\n  \"/old_file.txt\": \"current/current_file.txt\"\n},\n\"collection\": {\n  \"manifest_text\": \". acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\\n\"\n}\n</pre></notextile>\n\nh4(#replace_files-combine). Combine collections\n\nDelete all current content, then copy content from other collections into new subdirectories.\n\n<notextile><pre>\n\"replace_files\": {\n  \"/\": \"\",\n  \"/copy of collection 1\": \"1f4b0bc7583c2a7f9102c395f4ffc5e3+45/\",\n  \"/copy of collection 2\": \"ea10d51bcf88862dbcc36eb292017dfd+45/\"\n}\n</pre></notextile>\n\nh4(#replace_files-extract-subdirectory). Extract a subdirectory\n\nReplace all current content with a copy of a subdirectory from another collection.\n\n<notextile><pre>\n\"replace_files\": {\n  \"/\": \"1f4b0bc7583c2a7f9102c395f4ffc5e3+45/subdir\"\n}\n</pre></notextile>\n\nh4(#replace_files-usage-restrictions). Usage restrictions\n\nA target path with a non-empty source cannot be the ancestor of another target path in the same request. For example, the following request is invalid:\n\n<notextile><pre>\n\"replace_files\": {\n  \"/foo\": \"fa7aeb5140e2848d39b416daeef4ffc5+45/\",\n  \"/foo/this_will_return_an_error\": \"\"\n}\n</pre></notextile>\n\nIt is an error to supply a non-empty @manifest_text@ that is unused, i.e., the @replace_files@ argument does not contain any values beginning with @\"manifest_text/\"@. For example, the following request is invalid:\n\n<notextile><pre>\n\"replace_files\": {\n  \"/foo\": \"current/bar\"\n},\n\"collection\": {\n  \"manifest_text\": \". acbd18db4cc2f85cedef654fccc4a4d8+3+A82740cd577ff5745925af5780de5992cbb25d937@668efec4 0:3:new_file.txt\\n\"\n}\n</pre></notextile>\n\nCollections on other clusters in a federation cannot be used as sources. Each source must exist on the current cluster and be readable by the current user.\n\nSimilarly, if @manifest_text@ is provided, it must only reference data blocks that are stored on the current cluster. This API does not copy data from other clusters in a federation.\n\nh3(#replace_segments). Using \"replace_segments\" to repack file data\n\nThe @replace_segments@ option can be used with the \"create\":#create or \"update\":#update API to atomically apply a new file packing, typically with the goal of replacing a number of small blocks with one larger block. The repacking is specified in terms of _block segments_: a block segment is a portion of a stored block that is referenced by a file in a manifest.\n\n@replace_segments@ keys indicate existing block segments in the collection, and values specify replacement segments.\n* Each segment is specified as space-separated tokens: @\"locator offset length\"@ where @locator@ is a signed block locator and @offset@ and @length@ are decimal-encoded integers specifying a portion of the block that is referenced in the collection.\n* Each replacement block locator must be properly signed (just as if it appeared in a @manifest_text@).\n* Each existing block segment must correspond to an entire contiguous portion of a block referenced by a single file (splitting existing segments is not supported).\n* If a segment to be replaced does not match any existing block segment in the manifest, that segment _and all other @replace_segments@ entries referencing the same replacement block_ will be skipped. Other replacements will still be applied. Replacements that are skipped for this reason do not cause the request to fail. This rule ensures that when concurrent clients compute different repackings and request similar replacements such as @a,b,c,d,e → X@ and @a,b,c,d,e,f → Y@, the resulting manifest references @X@ or @Y@ but not both. Otherwise, the effect could be @a,b,c,d,e → X, f → Y@ where @Y@ is just an inefficient way to reference the same data as @f@.\n\nThe @replace_files@ and @manifest_text@ options, if present, are applied before @replace_segments@. This means @replace_segments@ can apply to blocks from @manifest_text@ and/or other collections referenced by @replace_files@.\n\nIn the following example, two files were originally saved by writing two small blocks (@c410@ and @c93e@). After concatenating the two small blocks and writing a single larger block @ca9c@, the manifest is being updated to reference the larger block.\n\n<notextile><pre>\n\"collection\": {\n  \"manifest_text\": \". c4103f122d27677c9db144cae1394a66+2+A3d02f1f3d8a622b2061ad5afe4853dbea42039e2@674dd351 693e9af84d3dfcc71e640e005bdc5e2e+3+A6528480b63d90a24b60b2ee2409040f050cc5d0c@674dd351 0:2:file1.txt 2:3:file2.txt\\n\"\n},\n\"replace_segments\": {\n  \"c4103f122d27677c9db144cae1394a66+2+A3d02f1f3d8a622b2061ad5afe4853dbea42039e2@674dd351 0 2\": \"ca9c491ac66b2c62500882e93f3719a8+5+A312fea6de5807e9e77d844450d36533a599c40f1@674dd351 0 2\",\n  \"693e9af84d3dfcc71e640e005bdc5e2e+3+A6528480b63d90a24b60b2ee2409040f050cc5d0c@674dd351 0 3\": \"ca9c491ac66b2c62500882e93f3719a8+5+A312fea6de5807e9e77d844450d36533a599c40f1@674dd351 2 3\"\n}\n</pre></notextile>\n\nResulting manifest:\n\n<notextile><pre>\n. ca9c491ac66b2c62500882e93f3719a8+5+A312fea6de5807e9e77d844450d36533a599c40f1@674dd351 0:2:file1.txt 2:3:file2.txt\n</pre></notextile>\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nSupports federated @get@ only, which may be called with either a uuid or a portable data hash.  When requesting a portable data hash which is not available on the home cluster, the query is forwarded to all the clusters listed in @RemoteClusters@ and returns the first successful result.\n\nh3(#create). create\n\nCreate a new Collection.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|collection|object||query||\n|replace_files|object|Initialize files and directories with new content and/or content from other collections|query||\n|replace_segments|object|Repack the collection by substituting data blocks|query||\n\nThe new collection's content can be initialized by providing a @manifest_text@ key in the provided @collection@ object, or by \"using the @replace_files@ option\":#replace_files.\n\nAn alternative file packing can be applied atomically \"using the @replace_segments@ option\":#replace_segments.\n\nh3(#delete). delete\n\nPut a Collection in the trash.  This sets the @trash_at@ field to @now@ and @delete_at@ field to @now@ + token TTL.  A trashed collection is invisible to most API calls unless the @include_trash@ parameter is true.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path||\n\nh3. get\n\nGets a Collection's metadata by UUID or portable data hash.  When making a request by portable data hash, attributes other than @portable_data_hash@, @manifest_text@, and @trash_at@ are not returned, even when requested explicitly using the @select@ parameter.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID or portable data hash of the Collection in question.|path||\n\nh3. list\n\nList collections.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|include_trash|boolean (default false)|Include trashed collections.|query||\n|include_old_versions|boolean (default false)|Include past versions of the collection(s) being listed, if any.|query||\n\nNote: Because adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in results by default.  If you need it, pass a @select@ parameter that includes @manifest_text@.\n\nh4. Searching Collections for names of file or directories\n\nYou can search collections for specific file or directory names (whole or part) using the following filter in a @list@ query.\n\n<pre>\nfilters: [[\"file_names\", \"ilike\", \"%sample1234.fastq%\"]]\n</pre>\n\nNote: @file_names@ is a hidden field used for indexing.  It is not returned by any API call.  On the client, you can programmatically enumerate all the files in a collection using @arv-ls@, the Python SDK @Collection@ class, Go SDK @FileSystem@ struct, the WebDAV API, or the S3-compatible API.\n\nAs of this writing (Arvados 2.4), you can also search for directory paths, but _not_ complete file paths.\n\nIn other words, this will work (when @dir3@ is a directory):\n\n<pre>\nfilters: [[\"file_names\", \"ilike\", \"%dir1/dir2/dir3%\"]]\n</pre>\n\nHowever, this will _not_ return the desired results (where @sample1234.fastq@ is a file):\n\n<pre>\nfilters: [[\"file_names\", \"ilike\", \"%dir1/dir2/dir3/sample1234.fastq%\"]]\n</pre>\n\nAs a workaround, you can search for both the directory path and file name separately, and then filter on the client side.\n\n<pre>\nfilters: [[\"file_names\", \"ilike\", \"%dir1/dir2/dir3%\"], [\"file_names\", \"ilike\", \"%sample1234.fastq%\"]]\n</pre>\n\nh3(#update). update\n\nUpdate attributes of an existing Collection.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Collection in question.|path||\n|collection|object||query||\n|replace_files|object|Add, delete, and replace files and directories with new content and/or content from other collections|query||\n|replace_segments|object|Repack the collection by substituting data blocks|query||\n\nThe collection's existing content can be replaced entirely by providing a @manifest_text@ key in the provided @collection@ object, or updated in place by \"using the @replace_files@ option\":#replace_files.\n\nAn alternative file packing can be applied atomically \"using the @replace_segments@ option\":#replace_segments.\n\nh3(#untrash). untrash\n\nRemove a Collection from the trash.  This sets the @trash_at@ and @delete_at@ fields to @null@.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Collection to untrash.|path||\n|ensure_unique_name|boolean (default false)|Rename collection uniquely if untrashing it would fail with a unique name conflict.|query||\n\n\nh3. provenance\n\nReturns a list of objects in the database that directly or indirectly contributed to producing this collection, such as the container request that produced this collection as output.\n\nThe general algorithm is:\n\n# Visit the container request that produced this collection (via @output_uuid@ or @log_uuid@ attributes of the container request)\n# Visit the input collections to that container request (via @mounts@ and @container_image@ of the container request)\n# Iterate until there are no more objects to visit\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Collection to get provenance.|path||\n\nh3. used_by\n\nReturns a list of objects in the database this collection directly or indirectly contributed to, such as containers that takes this collection as input.\n\nThe general algorithm is:\n\n# Visit containers that take this collection as input (via @mounts@ or @container_image@ of the container)\n# Visit collections produced by those containers (via @output@ or @log@ of the container)\n# Iterate until there are no more objects to visit\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Collection to get usage.|path||\n"
  },
  {
    "path": "doc/api/methods/computed_permissions.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"computed_permissions\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/computed_permissions@\n\nh2. Resource\n\nComputed permissions are entries from the internal cache of the highest permission level each user has on each permission target.\n\nEach entry has the following attributes:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|\n|user_uuid|string|An individual user.|\n|target_uuid|string|An object (role group, project group, collection, etc.) on which the user has implicit or explicit permission.|\n|perm_level|string|@can_read@, @can_write@, or @can_manage@|\n\nThere is only one row for a given (@user_uuid@, @target_uuid@) pair.\n\nComputed permissions cannot be created or updated directly. To change permissions, use \"groups\":groups.html and \"links\":links.html APIs as described in the \"permission model\":../permission-model.html.\n\nh2. Method\n\nh3. list\n\n@GET /arvados/v1/computed_permissions@\n\nList computed permissions.\n\nThe computed permissions API accepts the arguments described in the \"common resource list method\":{{site.baseurl}}/api/methods.html#index with the following exceptions:\n* It is an error to supply a non-zero @offset@ argument.\n* The default value for @order@ is @[\"user_uuid\", \"target_uuid\"]@.\n* The default value for @count@ is @\"none\"@ and no other values are accepted.\n"
  },
  {
    "path": "doc/api/methods/container_requests.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"container_requests\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/container_requests@\n\nObject type: @xvhdp@\n\nExample UUID: @zzzzz-xvhdp-0123456789abcde@\n\nh2. Resource\n\nA container request is a request for the Arvados cluster to perform some computational work.  See \"computing with Crunch\":{{site.baseurl}}/api/execution.html for details.\n\nEach ContainerRequest offers the following attributes, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\nAll attributes are optional, unless otherwise marked as required.\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Notes|\n|name|string|The name of the container_request.||\n|description|string|The description of the container_request.  Allows \"HTML formatting.\":{{site.baseurl}}/api/resources.html#descriptions ||\n|properties|hash|User-defined metadata that does not affect how the container is run.  May be used in queries using \"subproperty filters\":{{site.baseurl}}/api/methods.html#subpropertyfilters||\n|state|string|The allowed states are \"Uncommitted\", \"Committed\", and \"Final\".|Once a request is Committed, the only attributes that can be modified are priority, container_uuid, and container_count_max. A request in the \"Final\" state cannot have any of its functional parts modified (i.e., only name, description, and properties fields can be modified).|\n|requesting_container_uuid|string|The uuid of the parent container that created this container_request, if any. Represents a process tree.|The priority of this container_request is inherited from the parent container, if the parent container is cancelled, this container_request will be cancelled as well.|\n|container_uuid|string|The uuid of the container that satisfies this container_request. The system may return a preexisting Container that matches the container request criteria. See \"Container reuse\":#container_reuse for more details.|Container reuse is the default behavior, but may be disabled with @use_existing: false@ to always create a new container.|\n|container_count_max|integer|Maximum number of containers to start, i.e., the maximum number of \"attempts\" to be made.||\n|mounts|hash|Objects to attach to the container's filesystem and stdin/stdout.|See \"Mount types\":#mount_types for more details.|\n|secret_mounts|hash|Objects to attach to the container's filesystem.  Only \"json\" or \"text\" mount types allowed.|Not returned in API responses. Reset to empty when state is \"Complete\" or \"Cancelled\".|\n|runtime_constraints|hash|Restrict the container's access to compute resources and the outside world.|Required when in \"Committed\" state. e.g.,<pre><code>{\n  \"ram\":12000000000,\n  \"vcpus\":2,\n  \"API\":true\n}</code></pre>See \"Runtime constraints\":#runtime_constraints for more details.|\n|scheduling_parameters|hash|Parameters to be passed to the container scheduler when running this container.|e.g.,<pre><code>{\n\"partitions\":[\"fastcpu\",\"vfastcpu\"]\n}</code></pre>See \"Scheduling parameters\":#scheduling_parameters for more details.|\n|container_image|string|Portable data hash of a collection containing the docker image to run the container.|Required.|\n|environment|hash|Environment variables and values that should be set in the container environment (@docker run --env@). This augments and (when conflicts exist) overrides environment variables given in the image's Dockerfile.||\n|cwd|string|Initial working directory, given as an absolute path (in the container) or a path relative to the WORKDIR given in the image's Dockerfile.|Optional. If omitted or blank, @\".\"@ is implied.|\n|command|array of strings|Command to execute in the container.|Required. e.g., @[\"echo\",\"hello\"]@|\n|output_path|string|Path to a directory or file inside the container that should be preserved as container's output when it finishes. This path must be one of the mount targets. For best performance, point output_path to a writable collection mount.  See \"Pre-populate output using Mount points\":#pre-populate-output for details regarding optional output pre-population using mount points and \"Symlinks in output\":#symlinks-in-output for additional details.|Required.|\n|output_glob|array of strings|Glob patterns determining which files (of those present in the output directory when the container finishes) will be included in the output collection. If multiple patterns are given, files that match any pattern are included. If null or empty, all files will be included.|e.g., @[\"**/*.vcf\", \"**/*.vcf.gz\"]@\nSee \"Glob patterns\":#glob_patterns for more details.|\n|output_name|string|Desired name for the output collection. If null or empty, a name will be assigned automatically.||\n|output_ttl|integer|Desired lifetime for the output collection, in seconds. If zero, the output collection will not be deleted automatically.||\n|priority|integer|Range 0-1000.  Indicate scheduling order preference.|Clients are expected to submit container requests with zero priority in order to preview the container that will be used to satisfy it. Priority can be null if and only if state!=\"Committed\".  See \"priority below for more details.\":#priority |\n|expires_at|datetime|After this time, priority is considered to be zero.|Not yet implemented.|\n|use_existing|boolean|If possible, use an existing (non-failed) container to satisfy the request instead of creating a new one.|Default is true.|\n|log_uuid|string|Log collection containing log messages provided by the scheduler and crunch processes.|Null if the container has not yet started running.\nTo retrieve logs in real time while the container is running, use the log API (see below).|\n|output_uuid|string|Output collection created when the container finished successfully.|Null if the container has failed or not yet completed.|\n|filters|string|Additional constraints for satisfying the container_request, given in the same form as the filters parameter accepted by the container_requests.list API.|This attribute is not implemented yet. The value should always be null.|\n|runtime_token|string|A v2 token to be passed into the container itself, used to access Keep-backed mounts, etc.  |Not returned in API responses.  Reset to null when state is \"Complete\" or \"Cancelled\".|\n|runtime_user_uuid|string|The user permission that will be granted to this container.||\n|runtime_auth_scopes|array of string|The scopes associated with the auth token used to run this container.||\n|output_storage_classes|array of strings|The storage classes that will be used for the log and output collections of this container request|If omitted, the cluster's configured default storage classes are used.|\n|output_properties|hash|User metadata properties to set on the output collection.  The output collection will also have default properties \"type\" (\"intermediate\" or \"output\") and \"container_request\" (the uuid of container request that produced the collection).|\n|cumulative_cost|number|Estimated cost of the cloud VMs used to satisfy the request, including retried attempts and completed subrequests, but not including reused containers.|0 if container was reused or VM price information was not available.|\n|service|boolean|Indicates that this container is a long-lived service rather than a once-through batch job.  Incompatible with @use_existing@||\n|published_ports|hash|Web service ports that are published by this container.  See \"published ports\":#published_ports below.||\n\nh2(#lifecycle). Container request lifecycle\n\nA container request may be created in the Committed state, or created in the Uncommitted state and then moved into the Committed state.\n\nOnce a request is in the Committed state, Arvados locates a suitable existing container or schedules a new one. When the assigned container finishes, the request state changes to Final.\n\nA client may cancel a committed request early (before the assigned container finishes) by setting the request priority to zero.\n\n!{max-width:60em;}{{site.baseurl}}/api/methods/container_request_lifecycle.svg!\n{% comment %}\n# svg generated using `graphviz -Tsvg -O`\ndigraph {\n    graph [nojustify=true] [labeljust=l]\n\n    invisiblestart [label = \"\"] [color=white] [group=lifecycle];\n    node [color=black] [fillcolor=white] [style=filled] [shape=box] [nojustify=true];\n    uncommitted [label = \"container request:\\l   state=Uncommitted\\l\"] [fillcolor=lightgrey] [group=lifecycle];\n    {\n        rank=same;\n        committed [label = \"container request:\\l   state=Committed\\l   priority>0\\l\"] [group=lifecycle];\n        reused [label = \"container request:\\l   state=Final\\lcontainer:\\l   state=Complete\\l(reused existing container)\\l\"] [fillcolor=lightblue] [group=endstate];\n    }\n    invisiblestart -> uncommitted [label = \"   user creates container request\\l\"] [color=navy] [fontcolor=navy];\n    uncommitted -> committed [label = \"   user updates to\\l      state=Committed, priority>0\\l\"] [color=navy] [fontcolor=navy];\n    queued [label = \"container request:\\l   state=Committed\\l   priority>0\\lcontainer:\\l   state=Queued\\l\"] [group=lifecycle];\n    committed -> queued [label = \"   Arvados creates a new container\\l\"];\n    {\n        rank=same;\n        locked [label = \"container request:\\l   state=Committed\\l   priority>0\\lcontainer:\\l   state=Locked\\l\"] [group=lifecycle];\n        latecancelled [label = \"container request:\\l   state=Final\\lcontainer:\\l   state=Cancelled\\l\"] [fillcolor=lightblue] [group=endstate];\n    }\n    queued -> locked [label = \"   Arvados is ready to dispatch the container\\l\"];\n    {\n        rank=same;\n        running [label = \"container request:\\l   state=Committed\\l   priority>0\\lcontainer:\\l   state=Running\\l\"] [group=lifecycle];\n        containerfailed [label = \"container request:\\l   state=Final\\lcontainer:\\l   state=Complete\\l   exit_code≠0\\l\"] [fillcolor=lightblue] [group=endstate];\n    }\n    locked -> running [label = \"   Arvados starts the container process\\l\"];\n    containerfinished [label = \"container request:\\l   state=Final\\lcontainer:\\l   state=Complete\\l   exit_code=0\\l\"] [fillcolor=lightblue] [group=lifecycle];\n\n    committed -> reused [label = \"Arvados selects an existing container\"] [constraint=false] [labeldistance=0.5];\n    queued -> latecancelled [label = \"user updates to priority=0\"] [color=navy] [fontcolor=navy];\n    locked -> latecancelled [label = \"user updates to priority=0\"] [color=navy] [fontcolor=navy] [constraint=false];\n    running -> latecancelled [label = \"user updates to priority=0\"] [color=navy] [fontcolor=navy] [constraint=false];\n    running -> containerfailed [label = \"container process fails\"];\n    running -> containerfinished [label = \"   container process succeeds\\l\"];\n\n    # layout hacks\n    reused -> latecancelled [style=invis];\n    latecancelled -> containerfailed [style=invis];\n}\n{% endcomment %}\n\nh2(#priority). Priority\n\nThe @priority@ field has a range of 0-1000.\n\nPriority 0 means no container should run on behalf of this request, and containers already running will be terminated (setting container priority to 0 is the cancel operation.)\n\nPriority 1 is the lowest priority.\n\nPriority 1000 is the highest priority.\n\nThe actual order that containers execute is determined by the underlying scheduling software (e.g. Slurm) and may be based on a combination of container priority, submission time, available resources, and other factors.\n\nIn the current implementation, the magnitude of difference in priority between two containers affects the weight of priority vs age in determining scheduling order.  If two containers have only a small difference in priority (for example, 500 and 501) and the lower priority container has a longer queue time, the lower priority container may be scheduled before the higher priority container.  Use a greater magnitude difference (for example, 500 and 600) to give higher weight to priority over queue time.\n\nh2(#mount_types). {% include 'mount_types' %}\n\nh2(#runtime_constraints). {% include 'container_runtime_constraints' %}\n\nh2(#scheduling_parameters). {% include 'container_scheduling_parameters' %}\n\nh2(#glob_patterns). {% include 'container_glob_patterns' %}\n\nh2(#published_ports). {% include 'container_published_ports' %}\n\nh2(#container_reuse). Container reuse\n\nWhen a container request is \"Committed\", the system will try to find and reuse an existing Container with the same command, cwd, environment, output_path, container_image, mounts, secret_mounts, runtime_constraints, runtime_user_uuid, and runtime_auth_scopes being requested.\n\n* The serialized fields environment, mounts, and runtime_constraints are normalized when searching.\n* The system will also search for containers with minor variations in the keep_cache_disk and keep_cache_ram runtime_constraints that should not affect the result. This searches for other common values for those constraints, so a container that used a non-default value for these constraints may not be reused by later container requests that use a different value.\n\nIn order of preference, the system will use:\n\n* The first matching container to have finished successfully (i.e., reached state \"Complete\" with an exit_code of 0) whose log and output collections are still available.\n* The oldest matching \"Running\" container with the highest progress, i.e., the container that is most likely to finish first.\n* The oldest matching \"Locked\" container with the highest priority, i.e., the container that is most likely to start first.\n* The oldest matching \"Queued\" container with the highest priority, i.e,, the container that is most likely to start first.\n* A new container.\n\nh2(#cancel_container). Canceling a container request\n\nA container request may be canceled by setting its priority to 0, using an update call.\n\nWhen a container request is canceled, it will still reflect the state of the Container it is associated with via the container_uuid attribute. If that Container is being reused by any other container_requests that are still active, i.e., not yet canceled, that Container may continue to run or be scheduled to run by the system in future. However, if no other container_requests are using that Container, then the Container will get canceled as well.\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nSupports federated @create@, @delete@, @get@, @list@, and @update@.\n\nh2(#create). create\n\nCreate a new container request.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|container_request|object|Container request resource.|request body||\n|cluster_id|string|The federated cluster to submit the container request.|query||\n\nThe request body must include the required attributes @command@, @container_image@, @mounts@, and @output_path@. It can also include other attributes such as @environment@, @published_ports@, @runtime_constraints@, and @scheduling_parameters@.\n\nh3. delete\n\nDelete an existing container request.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the container request in question.|path||\n\nh3. get\n\nGet a container request's metadata by UUID.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the container request in question.|path||\n\nh3. list\n\nList container requests.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\nThe @filters@ argument can also filter on attributes of the container referenced by @container_uuid@. For example, @[[\"container.state\", \"=\", \"Running\"]]@ will match any container request whose container is running now.\n\nh3. update\n\nUpdate attributes of an existing container request.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the container request in question.|path||\n|container_request|object||query||\n\n{% include 'notebox_begin' %}\nSetting the priority of a committed container_request to 0 may cancel a running container assigned for it.\nSee \"Canceling a container request\":{{site.baseurl}}/api/methods/container_requests.html#cancel_container for further details.\n{% include 'notebox_end' %}\n\nh3(#container_status). container_status\n\nGet container status.\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |\n{background:#ccffcc}.|uuid|string|The UUID of the container request in question.|path|\n\nExample request: @GET /arvados/v1/container_requests/zzzzz-xvdhp-0123456789abcde/container_status@\n\nResponse attributes:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Examples|\n|uuid|string|The UUID of the container assigned to this request.||\n|state|string|The state of the container assigned to this request (see \"container resource attributes\":containers.html).||\n|scheduling_status|string|A brief explanation of the container's status in the dispatch queue, or an empty string if scheduling is not applicable, e.g., the container is running or finished.|@waiting for cloud resources: queue position 3@\n@creating new instance@\n@preparing runtime environment@|\n\nh3(#log). log\n\nGet container log data using WebDAV methods.\n\nThis API retrieves data from the container request's log collection. It can be used at any time in the container request lifecycle.\n* Before a container has been assigned (the request is @Uncommitted@) it returns an empty directory.\n* While the container is @Queued@ or @Locked@, it returns an empty directory.\n* While the container is @Running@, @.../log/{container_uuid}/@ returns real-time logging data.\n* While the container is @Complete@ or @Cancelled@, @.../log/{container_uuid}/@ returns the final log collection.\n\nIf a request results in multiple containers being run (see @container_count_max@ above), the logs from prior attempts remain available at @.../log/{old_container_uuid}/@.\n\nCurrently, this API has a limitation that a directory listing at the top level @/arvados/v1/container_requests/{uuid}/log/@ does not reveal the per-container subdirectories. Instead, clients should look up the container request record and use the @container_uuid@ attribute to request files and directory listings under the per-container directory, as in the examples below.\n\nThis API supports the @Range@ request header, so it can be used to poll for and retrieve logs incrementally while the container is running.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|method|string|Read-only WebDAV method|HTTP method|@GET@, @OPTIONS@, @PROPFIND@|\n{background:#ccffcc}.|uuid|string|The UUID of the container request.|path|zzzzz-xvdhp-0123456789abcde|\n{background:#ccffcc}.|path|string|Path to a file in the log collection.|path|@/zzzzz-dz642-0123456789abcde/stderr.txt@|\n\nExamples:\n* @GET /arvados/v1/container_requests/zzzzz-xvdhp-0123456789abcde/log/zzzzz-dz642-0123456789abcde/stderr.txt@\n* @PROPFIND /arvados/v1/container_requests/zzzzz-xvdhp-0123456789abcde/log/zzzzz-dz642-0123456789abcde/@\n"
  },
  {
    "path": "doc/api/methods/containers.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"containers\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/containers@\n\nObject type: @dz642@\n\nExample UUID: @zzzzz-dz642-0123456789abcde@\n\nh2. Resource\n\nA container is work order to be dispatched to an Arvados cluster to perform some computational work.  A container is created in response to a container request.  See \"computing with Crunch\":{{site.baseurl}}/api/execution.html for details.\n\nEach Container offers the following attributes, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Notes|\n|state|string|The allowed states are \"Queued\", \"Locked\", \"Running\", \"Cancelled\" and \"Complete\".|See \"Container states\":#container_states for more details.|\n|started_at|datetime|When this container started running.|Null if container has not yet started.|\n|finished_at|datetime|When this container finished.|Null if container has not yet finished.|\n|log|string|Portable data hash of a collection containing the log messages produced when executing the container.|Null if container has not yet started. The Crunch system will periodically update this field for a running container.|\n|environment|hash|Environment variables and values that should be set in the container environment (@docker run --env@). This augments and (when conflicts exist) overrides environment variables given in the image's Dockerfile.|Must be equal to a ContainerRequest's environment in order to satisfy the ContainerRequest.|\n|cwd|string|Initial working directory.|Must be equal to a ContainerRequest's cwd in order to satisfy the ContainerRequest.|\n|command|array of strings|Command to execute.| Must be equal to a ContainerRequest's command in order to satisfy the ContainerRequest.|\n|output_path|string|Path to a directory or file inside the container that should be preserved as this container's output when it finishes.|Must be equal to a ContainerRequest's output_path in order to satisfy the ContainerRequest.|\n|output_glob|array of strings|Glob patterns determining which files will be included in the output collection. See corresponding attribute in the \"container_requests resource\":container_requests.html.|Must be equal to a ContainerRequest's output_glob in order to satisfy the ContainerRequest. See \"Glob patterns\":#glob_patterns for more details.|\n|mounts|hash|Must contain the same keys as the ContainerRequest being satisfied. Each value must be within the range of values described in the ContainerRequest at the time the Container is assigned to the ContainerRequest.|See \"Mount types\":#mount_types for more details.|\n|secret_mounts|hash|Must contain the same keys as the ContainerRequest being satisfied. Each value must be within the range of values described in the ContainerRequest at the time the Container is assigned to the ContainerRequest.|Not returned in API responses. Reset to empty when state is \"Complete\" or \"Cancelled\".|\n|runtime_constraints|hash|Compute resources, and access to the outside world, that are / were available to the container.\nGenerally this will contain additional keys that are not present in any corresponding ContainerRequests: for example, even if no ContainerRequests specified constraints on the number of CPU cores, the number of cores actually used will be recorded here.|e.g.,\n<pre><code>{\n  \"ram\":12000000000,\n  \"vcpus\":2,\n  \"API\":true\n}</code></pre>See \"Runtime constraints\":#runtime_constraints for more details.|\n|runtime_status|hash|Information related to the container's run, including its steps. Some keys have specific meaning and are described later in this page.|e.g.,\n<pre><code>{\n  \"error\": \"This container won't be successful because at least one step has already failed.\"\n}</code></pre>See \"Runtime status\":#runtime_status for more details.|\n|scheduling_parameters|hash|Parameters to be passed to the container scheduler when running this container.|e.g.,<pre><code>{\n\"partitions\":[\"fastcpu\",\"vfastcpu\"]\n}</code></pre>See \"Scheduling parameters\":#scheduling_parameters for more details.|\n|output|string|Portable data hash of the output collection.|Null if the container is not yet finished.|\n|container_image|string|Portable data hash of a collection containing the docker image used to run the container.||\n|progress|number|A number between 0.0 and 1.0 describing the fraction of work done.||\n|priority|integer|Range 0-1000.  Indicate scheduling order preference.|Currently assigned by the system as the max() of the priorities of all associated ContainerRequests.  See \"container request priority\":container_requests.html#priority.|\n|exit_code|integer|Process exit code.|Null if container process has not exited yet.|\n|auth_uuid|string|UUID of a token to be passed into the container itself, used to access Keep-backed mounts, etc.  Automatically assigned.|Null if state∉{\"Locked\",\"Running\"} or if @runtime_token@ was provided.|\n|locked_by_uuid|string|UUID of a token, indicating which dispatch process changed state to Locked. If null, any token can be used to lock. If not null, only the indicated token can modify this container.|Null if state∉{\"Locked\",\"Running\"}|\n|runtime_token|string|A v2 token to be passed into the container itself, used to access Keep-backed mounts, etc.|Not returned in API responses.  Reset to null when state is \"Complete\" or \"Cancelled\".|\n|gateway_address|string|Address (host:port) of gateway server.|Internal use only.|\n|interactive_session_started|boolean|Indicates whether @arvados-client shell@ has been used to run commands in the container, which may have altered the container's behavior and output.||\n|output_storage_classes|array of strings|The storage classes that will be used for the log and output collections of this container||\n|output_properties|hash|User metadata properties to set on the output collection.|\n|cost|number|Estimated cost of the cloud VM used to run the container.|0 if not available.|\n|subrequests_cost|number|Total estimated cumulative cost of container requests submitted by this container.|0 if not available.|\n|service|boolean|Indicates that this container is a long-lived service rather than a once-through batch job.  Incompatible with @use_existing@||\n|published_ports|hash|Web service ports that are published by this container.  See \"published ports\":#published_ports below.||\n\nh2(#container_states). Container states\n\ntable(table table-bordered table-condensed).\n|_. State&nbsp;value|_. Description|_. Allowed next|\n|Queued|Waiting for a dispatcher to lock it and try to run the container.|Locked, Cancelled|\n|Locked|A dispatcher has \"taken\" the container and is allocating resources for it. The container has not started yet.|Queued, Running, Cancelled|\n|Running|Resources have been allocated and the contained process has been started (or is about to start). Crunch-run _must_ set state to Running _before_ there is any possibility that user code will run in the container.|Complete, Cancelled|\n|Complete|Container was running, and the contained process/command has exited.|Cancelled|\n|Cancelled|The container did not run long enough to produce an exit code. This includes cases where the container didn't even start, cases where the container was interrupted/killed before it exited by itself (e.g., priority changed to 0), and cases where some problem prevented the system from capturing the contained process's exit status (exit code and output).|-|\n\nSee \"Controlling container reuse\":{{site.baseurl}}/admin/controlling-container-reuse.html for details about changing state from @Complete@ to @Cancelled@\n\nh2(#mount_types). {% include 'mount_types' %}\n\nh2(#runtime_constraints). {% include 'container_runtime_constraints' %}\n\nh2(#runtime_status). Runtime status\n\nRuntime status provides container's relevant information about its progress even while it's still in Running state. This is used to avoid reusing containers that have not yet failed but will definitely do, and also for easier workflow debugging.\n\nThe following keys have well known meanings:\n\ntable(table table-bordered table-condensed).\n|_. Key|_. Type|_. Description|_. Notes|\n|error|string|The existance of this key indicates the container will definitely fail, or has already failed.|Optional.|\n|warning|string|Indicates something unusual happened or is currently happening, but isn't considered fatal.|Optional.|\n|activity|string|A message for the end user about what state the container is currently in.|Optional.|\n|errorDetail|string|Additional structured error details.|Optional.|\n|warningDetail|string|Additional structured warning details.|Optional.|\n|preemptionNotice|string|Details about any cloud provider scheduled interruption to the instance running this container.|Existence of this key indicates the container likely was (or will soon be) @Cancelled@ due to an instance interruption.|\n\nh2(#scheduling_parameters). {% include 'container_scheduling_parameters' %}\n\nh2(#glob_patterns). {% include 'container_glob_patterns' %}\n\nh2(#published_ports). {% include 'container_published_ports' %}\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nSupports federated @get@ and @list@.\n\nh3(#create). create\n\nCreate a new Container.\n\nThis API requires admin privileges. In normal operation, it should not be used at all.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|container|object|Container resource|request body||\n\nh3. delete\n\nDelete a Container.\n\nThis API requires admin privileges. In normal operation, it should not be used at all. API clients like Workbench might not work correctly when a container request references a container that has been deleted.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path||\n\nh3. get\n\nGet a Container's metadata by UUID.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path||\n\nh3. list\n\nList containers.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\nh3. update\n\nUpdate attributes of an existing Container.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Container in question.|path||\n|container|object||query||\n\nh3. auth\n\nGet the api_client_authorization record indicated by this container's auth_uuid, which belongs to the container's locked_by_uuid.\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string||path||\n"
  },
  {
    "path": "doc/api/methods/credentials.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"credentials\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/credentials@\n\nObject type: @oss07@\n\nExample UUID: @zzzzz-oss07-0123456789abcde@\n\nh2. Resource\n\nStores a credential, such as a username/password or API token, for use by running containers to access an external resource on the user's behalf.\n\nEach Credential offers the following attributes, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|\n|name|string|Name for the credential, unique by owner.|\n|description|string|(optional) Free text description of this credential.|\n|credential_class|string|The type of credential stored in this record. See below for more information.|\n|scopes|array of string|(optional) One or more specific resources this credential applies to.|\n|external_id|string|The non-secret part of the credential.|\n|secret|string|The secret part of the credential that should kept hidden where possible.|\n|expires_at|timestamp|Date at which the @secret@ field is not longer valid and can no longer be accessed (and may be scrubbed from the database).  If @expires_at@ has past, any attempts to access the @secret@ endpoint (see below) also return an error.|\n\nThe @secret@ field can be set when the record is created or updated by users with at @can_write@ permission, however the value of @secret@ is not returned in the regular @get@ or @list@ API calls, and cannot be used in queries.\n\nCredentials can be read using an Arvados token issued to a container running on behalf of a user who has @can_read@ permission to the credential, using the @secret@ API call (see below).  Calling the @secret@ API with a regular Arvados token (i.e. not associated with a running container) will return a permission denied error.\n\nThis design is intended to minimize accidental exposure of the secret material, but does not inherently protect it from users who have been given @can_read@ access, since it is necessary for code running on those user's behalf to access the secret in order to make use of it.\n\nAs of Arvados 3.2, all credentials are owned by the system user and the @name@ field must be unique on a given Arvados instance.  Credentials are shared using normal permission links.\n\nh2. Credential classes\n\nThe @credential_class@ field is used to identify what kind of credential is stored and how to interpret the other fields of the record. Some credential classes, like @aws_access_key@, are reserved and must be prefixed with @arv:@. Being reserved means that each scope in the associated @scopes@ field is checked to ensure that it is valid for that credential class.\n\nh3. aws_access_key\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Description|\n|credential_class|String \"arv:aws_access_key\"|\n|scopes|A list of S3 buckets (in the form \"s3://bucketname\") to which these credentials grant access. The special value \"s3://*\" means this credential can be used for any bucket.|\n|external_id|The value of \"aws_access_key_id\" from @~/.aws/credentials@|\n|secret|The value of \"aws_secret_access_key\" from @~/.aws/credentials@|\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nh3. create\n\nCreate a new Credential.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|credential|object|Credential resource|request body||\n\nh3. delete\n\nDelete an existing Credential.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path||\n\nh3. get\n\nGet a credential by UUID.  The @secret@ field is not returned in @get@ API calls.  To get the value of @secret@, use the @secret@ API call.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path||\n\nh3. list\n\nList credentials.  The @secret@ field is not returned in @list@ API calls, and cannot be used in queries.  To get the value of @secret@, use the @secret@ API call.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\nh3. update\n\nUpdate attributes of an existing credential.  May be used to update the value of @secret@.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path||\n|credential|object||query||\n\nh3. secret\n\nGet the value of @secret@.  Returns a JSON object in the form @{\"external_id\": \"...\", \"secret\": \"...\"}@.\n\nOnly permitted when called with a Arvados token issued to a container running on behalf of a user who has @can_read@ permission to the credential.  Calling this API with a regular Arvados token (i.e. not associated with a running container) will return a permission denied error.\n\nIf @expires_at@ has passed, this endpoint will return an error.\n\nCalls to the @secret@ API endpoint are logged as @event_type: secret_access@ in the audit log table.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Credential in question.|path||\n"
  },
  {
    "path": "doc/api/methods/groups.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"groups\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/groups@\n\nObject type: @j7d0g@\n\nExample UUID: @zzzzz-j7d0g-0123456789abcde@\n\nh2. Resource\n\nGroups provides a way to apply the same permissions to a set of Arvados objects.  See \"permission model\":{{site.baseurl}}/api/permission-model.html for details.\n\nEach Group has, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Example|\n|name|string|||\n|group_class|string|Type of group. @project@ and @filter@ indicate that the group should be displayed by Workbench and arv-mount as a project for organizing and naming objects. @role@ is used as part of the \"permission system\":{{site.baseurl}}/api/permission-model.html. |@\"filter\"@\n@\"project\"@\n@\"role\"@|\n|description|text|Free text description of the group.  Allows \"HTML formatting.\":{{site.baseurl}}/api/resources.html#descriptions ||\n|properties|hash|User-defined metadata, may be used in queries using \"subproperty filters\":{{site.baseurl}}/api/methods.html#subpropertyfilters ||\n|can_write|boolean|True if the current user has write permission on this group.||\n|can_manage|boolean|True if the current user has manage permission on this group.||\n|trash_at|datetime|If @trash_at@ is non-null and in the past, this group and all objects directly or indirectly owned by the group will be hidden from API calls.  May be untrashed as long as @delete_at@ is in the future.||\n|delete_at|datetime|If @delete_at@ is non-null and in the past, the group and all objects directly or indirectly owned by the group may be permanently deleted.||\n|is_trashed|datetime|True if @trash_at@ is in the past, false if not.||\n|frozen_by_uuid|string|For a frozen project, indicates the user who froze the project; null in all other cases. When a project is frozen, no further changes can be made to the project or its contents, even by admins. Attempting to add new items or modify, rename, move, trash, or delete the project or its contents, including any subprojects, will return an error.||\n\nh2. Group types and states\n\nh3(#project). Project groups\n\nGroups with @group_class: project@ are used to organize objects and subprojects through ownership.  When \"trashed or deleted\":#trashing, all items owned by the project (including subprojects, collections, or container requests) as well as permissions (permission links) granted to the project are also trashed or deleted.\n\nh3(#role). Role groups\n\nGroups with @group_class: role@ are used to grant permissions to users (or other groups) through permission links.  Role groups can confer \"can_manage\" permission but cannot directly own objects.  When \"trashed and deleted\":#trashing group membership and permission grants (expressed as permission links) are deleted as well.\n\nh3(#filter). Filter groups\n\nGroups with @group_class: filter@ groups are virtual groups; they can not own other objects, but instead their contents (as returned by the \"contents\":#contents API method) are defined by a query. Filter groups have a special @properties@ field named @filters@, which must be an array of filter conditions. See \"list method filters\":{{site.baseurl}}/api/methods.html#filters for details on the syntax of valid filters, but keep in mind that the attributes must include the object type (@collections@, @container_requests@, @groups@, @workflows@), separated with a dot from the field to be filtered on.\n\nFilters are applied with an implied *and* between them, but each filter only applies to the object type specified. The results are subject to the usual access controls - they are a subset of all objects the user can see. Here is an example:\n\n<pre>\n \"properties\":{\n  \"filters\":[\n   [\n    \"groups.name\",\n    \"like\",\n    \"Public%\"\n   ]\n  ]\n },\n</pre>\n\nThis @filter@ group will return all groups (projects) that have a name starting with the word @Public@ and are visible to the user issuing the query. Because groups can contain many types of object, it will also return all objects of other types that the user can see.\n\nThe 'is_a' filter operator is of particular interest to limit the @filter@ group 'content' to the desired object(s). When the 'is_a' operator is used, the attribute must be 'uuid'. The operand may be a string or an array which means objects of either type will match the filter. This example will return all groups (projects) that have a name starting with the word @Public@, as well as all collections that are in the project with uuid @zzzzz-j7d0g-0123456789abcde@.\n\n<pre>\n \"properties\":{\n  \"filters\":[\n   [\n    \"groups.name\",\n    \"like\",\n    \"Public%\"\n   ],\n   [\n    \"collections.owner_uuid\",\n    \"=\",\n    \"zzzzz-j7d0g-0123456789abcde\"\n   ],\n   [\n    \"uuid\",\n    \"is_a\",\n    [\n     \"arvados#group\",\n     \"arvados#collection\"\n    ]\n   ]\n  ]\n },\n </pre>\n\n\"Trashed or deleting\":#trashing a filter group causes the group itself to be hidden or deleted, but has no effect on the items returned in \"contents\", i.e. the database objects in \"contents\" are not hidden or deleted and may be accessed by other means.\n\nh3(#trashing). Trashing groups\n\nGroups can be trashed by updating the record and setting the @trash_at@ field, or with the \"delete\":#delete method.  The delete method sets @trash_at@ to \"now\".\n\nThe value of @trash_at@ can be set to a time in the future as a feature to automatically expire groups.\n\nWhen @trash_at@ is set, @delete_at@ will also be set.  Normally @delete_at = trash_at + Collections.DefaultTrashLifetime@ for projects and filter groups, and @delete_at = trash_at@ for role groups.  When the @trash_at@ time is past but @delete_at@ is in the future, the trashed group is invisible to most API calls unless the @include_trash@ parameter is true.  All objects directly or indirectly owned by the group (including subprojects, collections, or container requests) are considered trashed as well.  Groups in the trashed state can be \"untrashed\":#untrash so long as @delete_at@ has not past.\n\nOnce @delete_at@ is past, the group will be deleted permanently and can no longer be untrashed.  Different group types have different behavior when deleted, described above.\n\nNote: like other groups, \"role\" groups may have @trash_at@ set to date in the future, however roles groups are required to have @delete_at = trash_at@, so the trash time and delete time expire at the same time.  This means once @trash_at@ expires the role group is deleted immediately.  Role groups with @trash_at@ set can only be \"untrashed\":#untrash before they expire.\n\nh3(#frozen). Frozen projects\n\nA user with @manage@ permission can set the @frozen_by_uuid@ attribute of a @project@ group to their own user UUID. Once this is done, no further changes can be made to the project or its contents, including subprojects.\n\nThe @frozen_by_uuid@ attribute can be cleared by an admin user. It can also be cleared by a user with @manage@ permission, unless the @API.UnfreezeProjectRequiresAdmin@ configuration setting is active.\n\nThe optional @API.FreezeProjectRequiresDescription@ and @API.FreezeProjectRequiresProperties@ configuration settings can be used to prevent users from freezing projects that have empty @description@ and/or empty @properties@ entries.\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nh3(#contents). contents\n\nRetrieve a list of items owned by the group or user.  Use \"recursive\" to list objects within subprojects as well.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the group or user to enumerate. If this is a user UUID, this method returns the contents of that user's home project.|path||\n|limit|integer (default 100)|Maximum number of items to return.|query||\n|order|array|Attributes to use as sort keys to determine the order resources are returned, each optionally followed by @asc@ or @desc@ to indicate ascending or descending order. Sort within a resource type by prefixing the attribute with the resource name and a period.|query|@[\"collections.modified_at desc\"]@|\n|filters|array|Conditions for filtering items.|query|@[[\"uuid\", \"is_a\", \"arvados#job\"]]@|\n|recursive|boolean (default false)|Include items owned by subprojects.|query|@true@|\n|exclude_home_project|boolean (default false)|Only return items which are visible to the user but not accessible within the user's home project.  Use this to get a list of items that are shared with the user.  Uses the logic described under the \"shared\" endpoint.|query|@true@|\n|include|array|Look up objects referenced by the indicated fields and include them in the response. Only \"owner_uuid\", \"container_uuid\" and \"collection_uuid\" are supported. If \"owner_uuid\" is given, the parent project or user will be returned. If \"container_uuid\" is given and container requests are returned in the response, the corresponding container records will also be returned.  If \"collection_uuid\" is given and workflows are returned in the response, the collection records will also be returned. These referenced objects will be returned in the \"included\" field of the response. For compatibility, a string @\"owner_uuid\"@ is accepted as equivalent to @[\"owner_uuid\"]@.|query|@\"owner_uuid\"@\n@[\"owner_uuid\",\"container_uuid\"]@|\n|include_trash|boolean (default false)|Include trashed objects.|query|@true@|\n|include_old_versions|boolean (default false)|Include past versions of the collections being listed.|query|@true@|\n|select|array|Attributes of each object to return in the response. Specify an unqualified name like @uuid@ to select that attribute on all object types, or a qualified name like @collections.name@ to select that attribute on objects of the specified type. By default, all available attributes are returned, except on collections, where @manifest_text@ is not returned and cannot be selected due to an implementation limitation. This limitation may be removed in the future.|query|@[\"uuid\", \"collections.name\"]@|\n\nNotes:\n\nBecause adding access tokens to manifests can be computationally expensive, the @manifest_text@ field is not included in listed collections.  If you need it, request a \"list of collections\":{{site.baseurl}}/api/methods/collections.html with the filter @[\"owner_uuid\", \"=\", GROUP_UUID]@, and @\"manifest_text\"@ listed in the select parameter.\n\nUse filters with the attribute format @<item type>.<field name>@ to filter items of a specific type. For example: @[\"container_requests.state\", \"=\", \"Final\"]@ to filter @container_requests@ where @state@ is @Final@. All other types of items owned by this group will be unimpacted by this filter and will still be included.\n\nWhen called with “include=owner_uuid”, the @included@ field of the response is populated with users, projects, or other groups that own the objects returned in @items@.  This can be used to fetch an object and its parent with a single API call.\n\nWhen called with “include=container_uuid”, the @included@ field of the response is populated with the container associated with each container request in the response.\n\n\nh3. create\n\nCreate a new Group.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|group|object||query||\n|async|boolean (default false)|Defer the permissions graph update by a configured number of seconds. (By default, @async_permissions_update_interval@ is 20 seconds). On success, the response is 202 (Accepted).|query|@true@|\n\nh3(#delete). delete\n\nPut a Group in the trash.  See \"Trashing groups\":#trashing for details.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||\n\nh3. get\n\nGets a Group's metadata by UUID.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||\n\nh3. list\n\nList groups.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\nh3. show\n\nshow groups\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string||path||\n\nh3. update\n\nUpdate attributes of an existing Group.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Group in question.|path||\n|group|object||query||\n|async|boolean (default false)|Defer the permissions graph update by a configured number of seconds. (By default, @async_permissions_update_interval@ is 20 seconds). On success, the response is 202 (Accepted).|query|@true@|\n\nh3(#untrash). untrash\n\nRemove a Group from the trash.  Only valid when @delete_at@ is in the future.  This sets the @trash_at@ and @delete_at@ fields to @null@.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Group to untrash.|path||\n|ensure_unique_name|boolean (default false)|Rename project uniquely if untrashing it would fail with a unique name conflict.|query||\n\nh3(#shared). shared\n\nThis endpoint returns the toplevel set of groups to which access is granted through a chain of one or more permission links rather than through direct ownership by the current user account.  This is useful for clients which wish to browse the list of projects the user has permission to read which are not part of the \"home\" project tree.  Similar behavior is also available with the @exclude_home_project@ option of the \"contents\" endpoint.\n\nSpecifically, the logic is:\n\n<pre>\nselect groups that are readable by current user AND\n    (the owner_uuid is a user (but not the current user) OR\n     the owner_uuid is not readable by the current user OR\n     the owner_uuid is a group but group_class is not a project)\n</pre>\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|include|string|If provided with the value \"owner_uuid\", this will return owner objects in the @included@ field of the response.|query||\n\nNotes:\n\nWhen called with “include=owner_uuid”, the @included@ field of the response is populated with users and non-project groups that own the objects returned in @items@.\n\nIn addition to the \"include\" parameter this endpoint also supports the same parameters as the \"list method.\":{{site.baseurl}}/api/methods.html#index\n"
  },
  {
    "path": "doc/api/methods/keep_services.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"keep_services\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/keep_services@\n\nObject type: @bi6l4@\n\nExample UUID: @zzzzz-bi6l4-0123456789abcde@\n\nh2. Resource\n\nThe keep_services resource keep clients to discover storage servers and proxies available on the cluster for persistent storage and retrieval of keep blocks.\n\nEach KeepService has, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Example|\n|service_host|string|hostname of the server||\n|service_port|integer|TCP port of the service||\n|service_ssl_flag|boolean|if the server uses SSL||\n|service_type|string|The service type, one of \"disk\", \"blob\" (cloud object store) or \"proxy\" (keepproxy)||\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nh3. accessible\n\nGet a list of keep services that are accessible to the requesting client.  Unlike @list@, this is context-sensitive based on the requester, for example providing the list of actual Keep servers when inside the cluster, but providing a proxy service if client contacts Arvados from outside the cluster.\n\nh3. create\n\nCreate a new KeepService.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|keep_service|object||query||\n\nh3. delete\n\nDelete an existing KeepService.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||\n\nh3. get\n\nGets a KeepService's metadata by UUID.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||\n\nh3. list\n\nList keep_services.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\nh3. update\n\nUpdate attributes of an existing KeepService.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the KeepService in question.|path||\n|keep_service|object||query||\n"
  },
  {
    "path": "doc/api/methods/links.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"links\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/links@\n\nObject type: @o0j2j@\n\nExample UUID: @zzzzz-o0j2j-0123456789abcde@\n\nh2. Resource\n\nLinks are an extensible way to describe relationships between Arvados objects and metadata about individual objects.\n\nEach link has, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|\n|head_uuid|string|The object being described or acted on.|\n|tail_uuid|string|The origin or actor in the description or action (may be null).|\n|link_class|string|Type of link|\n|name|string|Primary value of the link.|\n|properties|hash|Additional information, expressed as a key&rarr;value hash. Key: string. Value: string, number, array, or hash.  May be used in queries using \"subproperty filters\":{{site.baseurl}}/api/methods.html#subpropertyfilters|\n\nh2. Link classes\n\nSome classes are pre-defined by convention and have standard meanings attached to names.\n\nh3. permission\n\nThe significance of permission links is discussed in the \"permission links\":{{site.baseurl}}/api/permission-model.html#links section of the permission model documentation.\n\nh3. star\n\nA **star** link is a shortcut to a project that is displayed in the user interface (Workbench) as \"favorites\".  Users can mark their own favorites (implemented by creating or deleting **star** links).\n\nAn admin can also create **star** links owned by the \"Public favorites\" project.  These are favorites will be displayed to all users that have permission to read the project that has been favorited.\n\nThe schema for a star link is:\n\ntable(table table-bordered table-condensed).\n|_. Field|_. Value|_. Description|\n|owner_uuid|user or group uuid|Either the user that owns the favorite, or the \"Public favorites\" group.|\n|tail_uuid|user or group uuid|Should be the same as owner_uuid|\n|head_uuid|project uuid|The project being favorited|\n|link_class|string of value \"star\"|Indicates this represents a link to a user favorite|\n\nh4. Creating a public favorite\n\n@owner_uuid@ is either an individual user, or the \"Public favorites\" group.  The @head_uuid@ is the project being favorited.\n\n<pre>\n$ linkuuid=$(arv --format=uuid link create --link '{\n    \"link_class\": \"star\",\n    \"owner_uuid\": \"zzzzz-j7d0g-publicfavorites\",\n    \"tail_uuid\": \"zzzzz-j7d0g-publicfavorites\",\n    \"head_uuid\":  \"zzzzz-j7d0g-theprojectuuid\"}')\n</pre>\n\nh4. Removing a favorite\n\n<pre>\n$ arv link delete --uuid zzzzz-o0j2j-thestarlinkuuid\n</pre>\n\nh4. Listing favorites\n\nTo list all 'star' links that will be displayed for a user:\n\n<pre>\n$ arv link list --filters '[\n  [\"link_class\", \"=\", \"star\"],\n  [\"tail_uuid\", \"in\", [\"zzzzz-j7d0g-publicfavorites\", \"zzzzz-tpzed-currentuseruuid\"]]]'\n</pre>\n\nh3. tag\n\nA **tag** link describes an object using an unparsed plain text string.  Tags can be used to annotate objects that are not directly editable by the user, like collections and objects shared as read-only.\n\ntable(table table-bordered table-condensed).\n|_. tail_type&rarr;head_type|_. name&rarr;head_uuid {properties}|\n|&rarr;Collection           | _tag name_ &rarr; _collection uuid_|\n|&rarr;Job                  | _tag name_ &rarr; _job uuid_|\n\nh3. published_port\n\nA **published_port** link enables external access to container ports via user-defined domain names.\n\nIf the cluster is configured as follows to forward HTTP requests from external clients to container ports:\n\n<pre>\nServices:\n  ContainerWebServices:\n    ExternalURL: https://*.containers.zzzzz.example.com/\n</pre>\n\nA user can create the following link to route HTTP requests like @https://servicename.containers.zzzzz.example.com/@ to port 12345 in the container running for container request @zzzzz-xvhdp-012340123401234@:\n\n<pre>\n{\n  \"link_class\" \"published_port\",\n  \"head_uuid\": \"zzzzz-xvhdp-012340123401234\",\n  \"name\": \"servicename\",\n  \"properties\": {\n    \"port\": 12345\n  }\n}\n</pre>\n\nRefer to the \"documentation about published ports\":container_requests.html#published_ports for additional information.\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nh3. create\n\nCreate a new Link.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|link|object||query||\n\nWhen you create a new permission link with the same @head_uuid@ and @tail_uuid@ as an existing permission link, the API returns the existing link instead of creating a new one. If the requested permission level is higher than the existing link, the existing link is updated accordingly. Otherwise the existing link is returned unchanged.\n\nh3. delete\n\nDelete an existing Link.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path||\n\nWhen you delete a permission link, any other existing permission links that have the same @head_uuid@ and @tail_uuid@ are also deleted.\n\nh3. get\n\nGets a Link's metadata by UUID.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path||\n\nh3. list\n\nList links.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\nh3. update\n\nUpdate attributes of an existing Link.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Link in question.|path||\n|link|object||query||\n\nWhen you update a permission link such that it has the same @head_uuid@ and @tail_uuid@ as one or more existing permission links, the API deletes the other links. If the highest permission level among the deleted links was higher than the newly updated link, the updated link's permission level is increased accordingly.\n\nh3. get_permissions\n\nGet all permission links that point directly to given UUID (in the head_uuid field).  The requesting user must have @can_manage@ permission or be an admin.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the object.|path||\n"
  },
  {
    "path": "doc/api/methods/logs.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"logs\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/logs@\n\nObject type: @57u5n@\n\nExample UUID: @zzzzz-57u5n-0123456789abcde@\n\nh2. Resource\n\nEach Log has, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Example|\n|object_uuid|string|The arvados object that is the subject of the log.||\n|event_at|datetime|||\n|event_type|string|A user-defined category or type for this event.|@LOGIN@|\n|summary|text|||\n|properties|hash|||\n\nh3. Creation\n\nAny user may create Log entries for any event they find useful. User-generated Logs have no intrinsic meaning to other users or to the Arvados system itself; it is up to each user to choose appropriate log event types and summaries for their project.\n\nh3. System Logs\n\nArvados uses Logs to record creation, deletion, and updates of other Arvados resources.\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nh3. create\n\nCreate a new log entry.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|log|object||query||\n\nh3. delete\n\nDelete an existing log entry. This method can only be used by privileged (system administrator) users.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||\n\nh3. get\n\nRetrieve a log entry.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||\n\nh3. list\n\nList log entries.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\nh3. update\n\nUpdate attributes of an existing log entry. This method can only be used by privileged (system administrator) users.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the log entry in question.|path||\n|log|object||query||\n"
  },
  {
    "path": "doc/api/methods/user_agreements.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"user_agreements\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/user_agreements@\n\nh2. Resource\n\nThis provides an API for inactive users to sign clickthrough agreements prior to being activated.\n\nh2. Methods\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nh3. list\n\nList user agreements.  This is a list of collections which contain HTML files with the text of the clickthrough agreement(s) which can be rendered by Workbench.\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n\nh3. signatures\n\nList user agreements that have already been signed.  These are recorded as link objects of @{\"link_class\": \"signature\", \"name\": \"click\"}@.\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n\nh3. sign\n\nSign a user agreement.\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the user agreement collection.|path||\n"
  },
  {
    "path": "doc/api/methods/users.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"users\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/users@\n\nObject type: @tpzed@\n\nExample UUID: @zzzzz-tpzed-0123456789abcde@\n\nh2. Resource\n\nUsers represent individuals with access to the Arvados cluster.\n\nEach User has, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Example|\n|email|string|||\n|username|string|The username used for the user's git repositories and virtual machine logins.  Usernames must start with a letter, and contain only alphanumerics.  When a new user is created, a default username is set from their e-mail address.  Only administrators may change the username.||\n|first_name|string|||\n|last_name|string|||\n|identity_url|string|||\n|is_admin|boolean|||\n|prefs|hash|||\n|is_active|boolean|||\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nh3. create\n\nCreate a new User.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|user|object||query||\n\nh3(#current). current\n\nGet the user associated with the provided API token.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n\nh3. delete\n\nDelete an existing User.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string||path||\n\nh3. get\n\nGets a User's metadata by UUID.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||\n\nh3. list\n\nList users.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\nh3. system\n\nGet the user record for the \"system user.\":{{site.baseurl}}/api/permission-model.html#system\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n\nh3. update\n\nUpdate attributes of an existing User.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||\n|user|object|The new attributes.|query||\n\nh3. setup\n\nSet up a user.  Adds the user to the \"All users\" group.  Enables the user to invoke @activate@.  See \"user management\":{{site.baseurl}}/admin/user-management.html for details.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the User in question.|query||\n\nh3. activate\n\nCheck that a user has is set up and has signed all the user agreements.  If so, activate the user.  Users can invoke this for themselves.  See \"user agreements\":{{site.baseurl}}/admin/user-management.html#user_agreements for details.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the User in question.|query||\n\nh3. unsetup\n\nRemove the user from the \"All users\" group and deactivate the user.  See \"user management\":{{site.baseurl}}/admin/user-management.html for details.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the User in question.|path||\n\nh3. merge\n\nTransfer ownership of data from the \"old\" user account to the \"new\" user account.  When @redirect_to_new_user@ is @true@ this also causes logins to the \"old\" account to be redirected to the \"new\" account.  The \"old\" user account that was redirected becomes invisible in user listings.\n\nSee \"Merge user accounts\":{{site.baseurl}}/admin/link-accounts.html, \"Reassign user data ownership\":{{site.baseurl}}/admin/reassign-ownership.html, and \"Linking alternate login accounts\":{{site.baseurl}}/user/topics/link-accounts.html for examples of how this method is used.\n\nMust supply either @new_user_token@ (the currently authorized user will be the \"old\" user), or both @new_user_uuid@ and @old_user_uuid@ (the currently authorized user must be an admin).\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|new_user_token|string|A valid token for the \"new\" user|query||\n|new_user_uuid|uuid|The uuid of the \"new\" account|query||\n|old_user_uuid|uuid|The uuid of the \"old\" account|query||\n|new_owner_uuid|uuid|The uuid of a project to which objects owned by the \"old\" user will be reassigned.|query||\n|redirect_to_new_user|boolean|If true, also redirect login and reassign authorization credentials from \"old\" user to the \"new\" user|query||\n\nh3. authenticate\n\nCreate a new API token based on username/password credentials.  Returns an \"API client authorization\":api_client_authorizations.html object containing the API token, or an \"error object.\":../requests.html#errors\n\nValid credentials are determined by the choice of \"configured login backend.\":{{site.baseurl}}/install/setup-login.html\n\nNote: this endpoint cannot be used with login backends that use web-based third party authentication, such as Google or OpenID Connect.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|username|string|The username.|body||\n{background:#ccffcc}.|password|string|The password.|body||\n"
  },
  {
    "path": "doc/api/methods/virtual_machines.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"virtual_machines\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/virtual_machines@\n\nObject type: @2x53u@\n\nExample UUID: @zzzzz-2x53u-0123456789abcde@\n\nh2. Resource\n\nThe virtual_machines resource lists compute resources in the Arvados cluster to which a user may log in to get an interactive shell (via ssh or webshell).\n\nEach VirtualMachine has, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Example|\n|hostname|string|||\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nh3. create\n\nCreate a new VirtualMachine.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n|virtual_machine|object||query||\n\nh3. delete\n\nDelete an existing VirtualMachine.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path||\n\nh3. get\n\nGets a VirtualMachine's metadata by UUID.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path||\n\nh3(#logins). logins\n\nGet a list of SSH keys and account names that should be able to log in to a given virtual machine.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string||path||\n\nThe response is an object with the field @items@ containing an array of objects in the following format:\n\ntable(table table-bordered table-condensed).\n|_. Key|_. Value type|_. Description|_. Example|\n|username|string|Name of the Unix login account to which the user should be able to log in|@\"jsmith\"@|\n|hostname|string|Hostname of the virtual machine|@\"shell.xyzzy.arvadosapi.com\"@|\n|public_key|string|SSH public key|@\"ssh-rsa AAAAB3NzaC1yc2E...\"@|\n|user_uuid|string|UUID of the user who should be able to log in|@\"xyzzy-tpzed-mv4d7dy7n91te11\"@|\n|virtual_machine_uuid|string|UUID of the \"virtual machine resource\":{{site.baseurl}}/api/methods/virtual_machines.html|@\"zzzzz-2x53u-kvszmclnbjuv8xc\"@|\n|authorized_key_uuid|string|UUID of the \"authorized key resource\":{{site.baseurl}}/api/methods/authorized_keys.html|@\"zzzzz-fngyi-v9p0cyfmjxbio64\"@|\n\nh3. get_all_logins\n\nGet a list of SSH keys and account names that should be able to log in for every virtual machine in the system.\n\nArguments: none.\n\nThe response has the same format as the response to the \"logins method\":#logins above.\n\nh3. list\n\nList virtual_machines.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\nh3. update\n\nUpdate attributes of an existing VirtualMachine.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the VirtualMachine in question.|path||\n|virtual_machine|object||query||\n"
  },
  {
    "path": "doc/api/methods/workflows.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: API Methods\ntitle: \"workflows\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAPI endpoint base: @https://{{ site.arvados_api_host }}/arvados/v1/workflows@\n\nObject type: @7fd4e@\n\nExample UUID: @zzzzz-7fd4e-0123456789abcde@\n\nh2. Resource\n\nStores a \"Common Workflow Language\":http://commonwl.org (CWL) computational workflow that can be searched for, browsed and executed (submitted to Crunch) from the workbench.\n\nEach Workflow offers the following optional attributes, in addition to the \"Common resource fields\":{{site.baseurl}}/api/resources.html:\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|_. Example|\n|name|string|If not specified, will be set to any \"name\" from the \"definition\" attribute.||\n|description|string|If not specified, will be set to any \"description\" from the \"definition\" attribute.||\n|definition|string|A \"Common Workflow Language\" document.|Visit \"Common Workflow Language\":http://www.commonwl.org/ for details.|\n|collection_uuid|string|This attribute is always null. It is reserved for future development. {% comment until 23057 %} If non-null, a linked workflow definition stored in a Collection.  See below. {% endcomment %}||\n\n{% comment until 23057 %}\nh2. Workflows linked to Collections\n\nIf @collection_uuid@ is set, this significantly changes the behavior of the workflow record.\n\nThe linked Collection must have the following properties.  These are extracted from and must be synchronized with the workflow in @arv:workflowMain@. They are copied into the workflow collection's @properties@ for ease of processing by client tools such as Workbench.\n\ntable(table table-bordered table-condensed).\n|_. Attribute|_. Type|_. Description|\n|type|string|Value must be 'workflow'|\n|arv:workflowMain|string|The file path within the collection that is the top-level workflow that will be launched.|\n|arv:cwl_inputs|array of object|Array of \"workflow input parameters\":https://www.commonwl.org/v1.2/Workflow.html#WorkflowInputParameter in \"fully expanded form\":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing |\n|arv:cwl_outputs|array of object|Array of \"workflow output parameters\":https://www.commonwl.org/v1.2/Workflow.html#WorkflowOutputParameter in \"fully expanded form\":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing |\n|arv:cwl_requirements|array of object|Array of \"workflow process requirements\":https://www.commonwl.org/v1.2/Workflow.html#Workflow in \"fully expanded form\":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing (in particular, this must list requirements that affect initial launching of the workflow such as \"WorkflowRunnerResources\":{{site.baseurl}}/user/cwl/cwl-extensions.html ).|\n|arv:cwl_hints|array of object|Array of \"workflow process hints\":https://www.commonwl.org/v1.2/Workflow.html#Workflow in \"fully expanded form\":https://www.commonwl.org/v1.2/SchemaSalad.html#Document_preprocessing (in particular, this must list hints that affect initial launching of the workflow such as \"WorkflowRunnerResources\":{{site.baseurl}}/user/cwl/cwl-extensions.html ).|\n\nWhen @collection_uuid@ is set, the workflow record @name@, @description@, @definition@ and @owner_uuid@ are all set from the linked collection.  The workflow record can no longer be updated directly, but changes to the linked collection will be reflected in the workflow record.  Trashing the linked collection will cause the workflow record to become trashed and eventually deleted as well.  The workflow record cannot be un-linked from a collection, only deleted and re-created.\n\nWhen a workflow is linked to a collection, the collection can be queried and fetched together with the workflow.  The @filters@ argument can filter on attributes of the collection referenced by @collection_uuid@. For example, @[[\"collection.properties.category\", \"=\", \"WGS\"]]@ will match workflow definitions linked to collections that have a \"category\" property with the value \"WGS\".  When using the \"group contents\":groups.html#contents API to fetch workflow records, in addition the previously-described filters, you can use @include=[\"collection_uuid\"]@ to include the collection records corresponding to the @collection_uuid@ of the workflow records in the response.\n{% endcomment %}\n\nh2. Methods\n\nSee \"Common resource methods\":{{site.baseurl}}/api/methods.html for more information about @create@, @delete@, @get@, @list@, and @update@.\n\nRequired arguments are displayed in %{background:#ccffcc}green%.\n\nSupports federated @create@, @delete@, @get@, @list@, and @update@.\n\nh3. create\n\nCreate a new Workflow.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|workflow|object|Workflow resource|request body||\n\nh3. delete\n\nDelete an existing Workflow.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path||\n\nh3. get\n\nGet a Workflow's metadata by UUID.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path||\n\nh3. list\n\nList workflows.\n\nSee \"common resource list method.\":{{site.baseurl}}/api/methods.html#index\n\n{% comment until 23057 %}\nThe @filters@ argument can filter on attributes of the collection referenced by @collection_uuid@. For example, @[[\"collection.properties.category\", \"=\", \"WGS\"]]@ will match workflow definitions linked to collections that have a \"category\" property with the value \"WGS\".\n{% endcomment %}\n\nh3. update\n\nUpdate attributes of an existing Workflow.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |_. Example |\n{background:#ccffcc}.|uuid|string|The UUID of the Workflow in question.|path||\n|workflow|object||query||\n"
  },
  {
    "path": "doc/api/methods.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: Concepts\ntitle: Common resource methods\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe following methods are available for most resources.  Some resources may limit who can perform certain operations.  Consult documentation for individual resource types for details.\n\nThe methods are relative to the base URI, e.g., @/arvados/v1/resource_type@.  For arguments specifying a *Location* of @path@, the value of the argument is incorporated into the path portion of the URI.  For example, a @uuid@ of @aaaaa-bbbbb-ccccccccccccccc@ in a path position yields a URI of @/arvados/v1/resource_type/aaaaa-bbbbb-ccccccccccccccc@.\n\nArguments specifying a *Location* of \"query\" are incorporated into the query portion of the URI or request body.  For example, @/arvados/v1/resource_type?count=none@.\n\nCertain method calls on certain object types support \"federation\":{{site.baseurl}}/architecture/federation.html: the ability to operate on objects owned by different clusters.   API pages for specific object types list which federated operations are supported for that type (if any) in the \"Methods\" section.  Methods which implicitly include a cluster ID (such as @GET@ on a specific UUID, using the UUID prefix) will be directed to the appropriate cluster.  Methods that don't implicitly include the cluster ID (such as @create@) use the @cluster_id@ query parameter to specify which cluster to direct the request.\n\n* \"create\":#create\n* \"delete\":#delete\n* \"get\":#get\n* \"list\":#index\n** \"Available list method filters\":#filters\n*** \"Filtering using substring search\":#substringsearchfilter\n*** \"Filtering on subproperties\":#subpropertyfilters\n*** \"Filtering using boolean expressions\":#filterexpression\n** \"Federated listing\":#federated-list\n** \"Results of list method\":#list-results\n* \"update\":#update\n\nh2(#create). create\n\nThe @create@ method creates a new object of the specified type.  Note that:\n\n* Only the listed attributes (and \"standard metadata\":resources.html) are set\n* Unset attributes will get default values\n* The attributes of a given resource type are fixed (you cannot introduce new toplevel attributes)\n\nThis method corresponds to the HTTP request @POST /arvados/v1/resource_type@.  A successful create call returns a copy of the new object.\n\nTo create an object on a remote cluster (federated create), provide the @cluster_id@ of the target cluster.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |\n|{resource_type}|object|Name is the singular form of the resource type, e.g., for the \"collections\" resource, this argument is \"collection\"|body|\n|{cluster_id}|string|Optional, the cluster on which to create the object if not the current cluster.|query|\n|select  |array  |Attributes of the new object to return in the response (by default, all available attributes are returned).\nExample: @[\"uuid\",\"name\",\"modified_at\"]@|query|\n\nh2(#delete). delete\n\nThe @delete@ method deletes an object of the specified type.  It corresponds to the HTTP request @DELETE /arvados/v1/resource_type/uuid@.  A successful delete call returns a copy of the deleted object.\n\nThe cluster ID portion of the @uuid@ is used to determine which cluster owns the object, a federated delete request will be routed to that cluster.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |\n{background:#ccffcc}.|uuid|string|The UUID of the object in question.|path|\n|select  |array  |Attributes of the deleted object to return in the response (by default, all available attributes are returned).\nExample: @[\"uuid\",\"name\",\"modified_at\"]@|query|\n\nh2(#get). get\n\nThe @get@ method gets a single object with the specified @uuid@.  It corresponds to the HTTP request @GET /arvados/v1/resource_type/uuid@.\n\nThe cluster ID portion of the @uuid@ is used to determine which cluster owns the object, a federated get request will be routed to that cluster.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |\n{background:#ccffcc}.|uuid|string|The UUID of the object in question.|path|\n|select  |array  |Attributes of the object to return in the response (by default, all available attributes are returned).\nExample: @[\"uuid\",\"name\",\"modified_at\"]@|query|\n\nh2(#index). list\n\nThe @list@ method requests an list of resources of that type.  It corresponds to the HTTP request @GET /arvados/v1/resource_type@.  All resources support the @list@ method unless otherwise noted.\n\nArguments:\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |\n|limit   |integer|Maximum number of resources to return.  If not provided, server will provide a default limit.  Server may also impose a maximum number of records that can be returned in a single request.|query|\n|offset  |integer|Skip the first 'offset' number of resources that would be returned under the given filter conditions.|query|\n|filters |array  |\"Conditions for selecting resources to return.\":#filters|query|\n|order   |array  |Attributes to use as sort keys to determine the order resources are returned, each optionally followed by @asc@ or @desc@ to indicate ascending or descending order.  (If not specified, it will be ascending).\nExample: @[\"head_uuid asc\",\"modified_at desc\"]@\nDefault: @[\"modified_at desc\", \"uuid asc\"]@|query|\n|select  |array  |Attributes of each object to return in the response (by default, all available attributes are returned, except collections, which do not return @manifest_text@ unless explicitly selected).\nExample: @[\"uuid\",\"name\",\"modified_at\"]@|query|\n|distinct|boolean|When returning multiple records whose selected attributes (see @select@) are equal, return them as a single response entry.\nDefault is @false@.|query|\n|count|string|@\"exact\"@ (default): Include an @items_available@ response field giving the number of distinct matching items that can be retrieved (irrespective of @limit@ and @offset@ arguments).\n@\"none\"@: Omit the @items_available@ response field. This option will produce a faster response.|query|\n\nh3(#filters). Available list method filters\n\nThe value of the @filters@ parameter is an array of conditions. The @list@ method returns only the resources that satisfy all of the given conditions. In other words, the conjunction @AND@ is implicit.\n\nEach condition is expressed as an array with three elements: @[attribute, operator, operand]@.\n\ntable(table table-bordered table-condensed).\n|_. Index|_. Element|_. Type|_. Description|_. Examples|\n|0|attribute|string|Name of the attribute to compare (or \"any\" to return resources with any matching attribute)|@script_version@, @head_uuid@, @any@|\n|1|operator|string|Comparison operator|@>@, @>=@, @like@, @not in@|\n|2|operand|string, array, or null|Value to compare with the resource attribute|@\"d00220fb%\"@, @\"1234\"@, @[\"foo\",\"bar\"]@, @nil@|\n\nThe following operators are available.\n\ntable(table table-bordered table-condensed).\n|_. Operator|_. Operand type|_. Description|_. Example|\n|@=@, @!=@, @<>@|string, number, timestamp, JSON-encoded array, JSON-encoded object, or null|Equality comparison|@[\"tail_uuid\",\"=\",\"xyzzy-j7d0g-fffffffffffffff\"]@\n@[\"tail_uuid\",\"!=\",null]@\n@[\"storage_classes_desired\",\"=\",\"[\\\"default\\\"]\"]@|\n|@<@, @<=@, @>=@, @>@|string, number, or timestamp|Ordering comparison|@[\"script_version\",\">\",\"123\"]@|\n|@like@, @ilike@|string|SQL pattern match.  Single character match is @_@ and wildcard is @%@. The @ilike@ operator is case-insensitive|@[\"script_version\",\"like\",\"d00220fb%\"]@|\n|@in@, @not in@|array of strings or integers|Set membership|@[\"script_version\",\"in\",[\"main\",\"d00220fb38d4b85ca8fc28a8151702a2b9d1dec5\"]]@|\n|@is_a@|string|Arvados object type|@[\"head_uuid\",\"is_a\",\"arvados#collection\"]@|\n|@exists@|string|Presence of subproperty|@[\"properties\",\"exists\",\"my_subproperty\"]@|\n|@contains@|string, array of strings|Presence of one or more keys or array elements|@[\"storage_classes_desired\", \"contains\", [\"foo\", \"bar\"]]@ (matches both @[\"foo\", \"bar\"]@ and @[\"foo\", \"bar\", \"baz\"]@)\n(note @[..., \"contains\", \"foo\"]@ is also accepted, and is equivalent to @[..., \"contains\", [\"foo\"]]@)|\n\nh4(#substringsearchfilter). Filtering using substring search\n\nResources can also be filtered by searching for a substring in attributes of type @string@, @array of strings@, @text@, and @hash@, which are indexed in the database specifically for search. To use substring search, the filter must:\n\n* Specify @any@ as the attribute\n* Use either the @like@ or @ilike@ operator\n* Have an operand of type @string@ that is wrapped in the SQL pattern match wildcard character @%@\n\nFor example, the @[\"any\", \"like\", \"%foo%\"]@ filter will return all resources that contain @foo@ in the content of at least one attribute of the previously defined types. This is the recommended way to do keyword and file name search across the entire database. Note that only exact substring matches are returned and results are unranked and returned in the order specified by the @list@ @order@ argument.\n\nh4(#subpropertyfilters). Filtering on subproperties\n\nSome record types have an additional @properties@ attribute that allows recording and filtering on additional key-value pairs.  To filter on a subproperty, the value in the @attribute@ position has the form @properties.user_property@.  You may also use JSON-LD / RDF style URIs for property keys by enclosing them in @<...>@ for example @properties.<http://example.com/user_property>@.  Alternately you may also provide a JSON-LD \"@context\" field, however at this time JSON-LD contexts are not interpreted by Arvados.\n\ntable(table table-bordered table-condensed).\n|_. Operator|_. Operand type|_. Description|_. Example|\n|@=@, @!=@|string, number or boolean|Equality comparison|@[\"properties.my_subproperty\", \"=\", \"fizzy whizy sparkle pop\"]@|\n|@<@, @<=@, @>=@, @>@|string or number|Ordering comparison|@[\"properties.my_subproperty\", \"<\", 3]@|\n|@like@, @ilike@|string|SQL pattern match, single character match is @_@ and wildcard is @%@, ilike is case-insensitive|@[\"properties.my_subproperty\", \"like\", \"d00220fb%\"]@|\n|@in@, @not in@|array of strings|Set membership|@[\"properties.my_subproperty\", \"in\", [\"fizz\", \"buzz\"]]@|\n|@exists@|boolean|Test if a subproperty is present or not (determined by operand).|@[\"properties.my_subproperty\", \"exists\", true]@|\n|@contains@|string, number|Filter where subproperty has a value either by exact match or value is element of subproperty list.|@[\"properties.foo\", \"contains\", \"bar\"]@ will find both @{\"foo\": \"bar\"}@ and @{\"foo\": [\"bar\", \"baz\"]}@.|\n\nNote that exclusion filters @!=@ and @not in@ will return records for which the property is not defined at all.  To restrict filtering to records on which the subproperty is defined, combine with an @exists@ filter.\n\nh4(#filterexpression). Filtering using boolean expressions\n\nIn addition to the three-element array form described above, a string containing a boolean expression is also accepted. The following restrictions apply:\n* The expression must contain exactly one operator.\n* The operator must be @=@, @<@, @<=@, @>@, or @>=@.\n* There must be exactly one pair of parentheses, surrounding the entire expression.\n* Each operand must be the name of a numeric attribute like @replication_desired@ (literal values like @3@ and non-numeric attributes like @uuid@ are not accepted).\n* The expression must not contain whitespace other than an ASCII space (newline and tab characters are not accepted).\n\nExamples:\n* @(replication_desired > replication_confirmed)@\n* @(replication_desired = replication_confirmed)@\n\nBoth types of filter (boolean expressions and @[attribute, operator, operand]@ filters) can be combined in the same API call. Example:\n* @{\"filters\": [\"(replication_desired > replication_confirmed)\", [\"replication_desired\", \"<\", 2]]}@\n\nh3(#federated-list). Federated listing\n\nFederated listing forwards a request to multiple clusters and combines the results.  Currently only a very restricted form of the \"list\" method is supported.\n\nTo query multiple clusters, the list request must:\n\n* Have filters only matching @[[\"uuid\", \"in\", [...]]@ or @[\"uuid\", \"=\", \"...\"]@\n* Specify @count=none@\n* Not specify @limit@, @offset@ or @order@\n* Not request more items than the maximum response size\n\nThis form may be used to request a specific list of objects by UUID which are owned by multiple clusters.\n\nh3(#list-results). Results of list method\n\nA successful call to list will return the following object.\n\ntable(table table-bordered table-condensed).\n|_. Attribute |_. Type |_. Description |\n|kind|string|type of objects returned|\n|offset|integer|query offset in effect|\n|limit|integer|query limit in effect|\n|items|array|actual query payload, an array of resource objects|\n|items_available|integer|total items available matching query|\n\nh2(#update). update\n\nThe @update@ method updates fields on the object with the specified @uuid@.  It corresponds to the HTTP request @PUT /arvados/v1/resource_type/uuid@.  Note that only the listed attributes (and \"standard metadata\":resources.html) are updated, unset attributes will retain their previous values, and the attributes of a given resource type are fixed (you cannot introduce new toplevel attributes).  Also note that updates replace the value of the attribute, so if an attribute has an object value, the entire object is replaced.  A successful update call returns the updated copy of the object.\n\nThe cluster ID portion of the @uuid@ is used to determine which cluster owns the object, a federated update request will be routed to that cluster.\n\ntable(table table-bordered table-condensed).\n|_. Argument |_. Type |_. Description |_. Location |\n{background:#ccffcc}.|uuid|string|The UUID of the resource in question.|path||\n|{resource_type}|object||query||\n|select  |array  |Attributes of the updated object to return in the response (by default, all available attributes are returned).\nExample: @[\"uuid\",\"name\",\"modified_at\"]@|query|\n"
  },
  {
    "path": "doc/api/permission-model.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\nnavmenu: Concepts\ntitle: \"Permission model\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThere are four levels of permission: *none*, *can_read*, *can_write*, and *can_manage*.\n\n* *none* is the default state when there are no other permission grants.\n** the object is not included in any list query response.\n** direct queries of the object by uuid return 404 Not Found.\n** Link objects require valid identifiers in @head_uuid@ and @tail_uuid@, so an attempt to create a Link that references an unreadable object will return an error indicating the object is not found.\n* *can_read* grants read-only access to the record.  Attempting to update or delete the record returns an error.\n** *can_read* does not allow a reader to see any permission grants on the object except the object's owner_uuid and the reader's own permissions.\n* *can_write* permits changes to the record, including changing ownership and deleting the object.\n** *can_write* cannot read, create, update or delete permission links associated with the object.\n** *can_write* also implies *can_read*.\n* *can_manage* permits the user to read, create, update and delete permission links whose @head_uuid@ is this object's @uuid@.\n** *can_manage* also implies *can_write* and *can_read*.\n\nh2. Ownership\n\nAll Arvados objects have an @owner_uuid@ field. Valid uuid types for @owner_uuid@ are \"User\" and \"Group\".  In the case of a Group, the @group_class@ must be \"project\".\n\nThe User or Group specified by @owner_uuid@ has *can_manage* permission on the object.  This permission is one way: an object that is owned does not get any special permissions on the User or Group that owns it.\n\nTo change the @owner_uuid@ field, it is necessary to have @can_write@ permission on both the current owner and the new owner.\n\nh2(#links). Permission links\n\nA permission link is a link object with:\n\n* @owner_uuid@ of the system user.\n* @link_class@ \"permission\"\n* @name@ one of *can_read*, *can_write*, *can_manage* or *can_login*\n* @head_uuid@ of some Arvados object\n* @tail_uuid@ of a User or Group.  For Group, the @group_class@ must be a \"role\".\n\nThis grants the permission in @name@ for @tail_uuid@ accessing @head_uuid@.\n\nIf a User has *can_manage* permission on some object, the user has the ability to read, create, update and delete permission links with @head_uuid@ of the managed object.  In other words, the user has the ability to modify the permission grants on the object.\n\nThe *can_login* @name@ is only meaningful on a permission link with with @tail_uuid@ a user UUID and @head_uuid@ a Virtual Machine UUID. A permission link of this type gives the user UUID permission to log into the Virtual Machine UUID. The username for the VM is specified in the @properties@ field. Group membership can be specified that way as well, optionally. See the \"VM login section on the 'User management at the CLI' page\":{{ site.baseurl }}/admin/user-management-cli.html#vm-login for an example.\n\nh3. Transitive permissions\n\nPermissions can be obtained indirectly through nested ownership (*can_manage*) or by following multiple permission links.\n\n* If a User X owns project A, and project A owns project B, then User X *can_manage* project B.\n* If a User X *can_read* role A, and role A *can_read* Object B, then User X *can_read* Object B.\n* Permissions are narrowed to the least powerful permission on the path.\n** If User X *can_write* role A, and role A *can_read* Object B, then User X *can_read* Object B.\n** If User X *can_read* role A, and role A *can_write* Object B, then User X *can_read* Object B.\n\nh2. Projects and Roles\n\nA \"project\" is a subtype of Group that is displayed as a \"Project\" in Workbench, and as a directory by @arv-mount@.\n* A project can own things (appear in @owner_uuid@)\n* A project can be owned by a user or another project.\n* The name of a project is unique only among projects and filters with the same owner_uuid.\n* Projects can be targets (@head_uuid@) of permission links, but not origins (@tail_uuid@).  Putting a project in a @tail_uuid@ field is an error.\n\nA \"filter\" is a subtype of Group that is displayed as a \"Project\" in Workbench, and as a directory by @arv-mount@. See \"the groups API documentation\":{{ site.baseurl }}/api/methods/groups.html for more information.\n* A filter group cannot own things (cannot appear in @owner_uuid@).  Putting a filter group in an @owner_uuid@ field is an error.\n* A filter group can be owned by a user or a project.\n* The name of a filter is unique only among projects and filters with the same owner_uuid.\n* Filters can be targets (@head_uuid@) of permission links, but not origins (@tail_uuid@).  Putting a filter in a @tail_uuid@ field is an error.\n\nA \"role\" is a subtype of Group that is treated in Workbench as a group of users who have permissions in common (typically an organizational group).\n* A role cannot own things (cannot appear in @owner_uuid@).  Putting a role in an @owner_uuid@ field is an error.\n* All roles are owned by the system user.\n* The name of a role is unique across a single Arvados cluster.\n* Roles can be both targets (@head_uuid@) and origins (@tail_uuid@) of permission links.\n* By default, all roles are visible to all active users. However, if the configuration entry @Users.RoleGroupsVisibleToAll@ is @false@, visibility is determined by normal permission rules, _i.e._, a role is only visible to users who have that role, and to admins.\n* By default, any user can create a new role. However, if the configuration entry @Users.CanCreateRoleGroups@ is @false@, only admins can create roles.\n\nh3. Access through Roles\n\nA \"role\" consists of a set of users or other roles that have that role, and a set of permissions (primarily read/write/manage access to projects) the role grants.\n\nIf there is a permission link stating that user A *can_write* role R, then we say A has role R.  This means user A has up to *can_write* access to everything the role has access to.\n\nBecause permissions are one-way, the links A *can_write* R and B *can_write* R does not imply that user A and B will be able to see each other.  For users in a role to see each other, read permission should be added going in the opposite direction: R *can_read* A and R *can_read* B.\n\nIf a user needs to be able to manipulate permissions of objects that are accessed through the role (for example, to share project P with a user outside the role), then role R must have *can_manage* permission on project P (R *can_manage* P) and the user must be granted *can_manage* permission on R (A *can_manage* R).\n\nh2. Special cases\n\nLog table objects are additionally readable based on whether the User has *can_read* permission on @object_uuid@ (User can access log history about objects it can read).  To retain the integrity of the log, the log table denies all update or delete operations.\n\nPermission links where @tail_uuid@ is a User allow *can_read* on the link record by that user (User can discover her own permission grants.)\n\nAt least *can_read* on a Collection grants permission to read the blocks that make up the collection (API server returns signed blocks).\n\nA user can only read a container record if the user has read permission to a container_request with that container_uuid.\n\n*can_read* and *can_write* access on a user grants access to the user record, but not anything owned by the user.\n*can_manage* access to a user grants can_manage access to the user, _and everything owned by that user_ .\nIf a user A *can_read* role R, and role R *can_manage* user B, then user A *can_read* user B _and everything owned by that user_ .\n\nModifying a role group requires *can_manage* permission (by contrast, *can_write* is sufficient to modify project groups and other object types).\n\nh2(#system). System user and group\n\nA privileged user account exists for the use by internal Arvados components.  This user manages system objects which should not be \"owned\" by any particular user.  The system user uuid is @{siteprefix}-tpzed-000000000000000@.\n\nh2. Anoymous user and group\n\nAn Arvados site may be configured to allow users to browse resources without requiring a login.  In this case, permissions for non-logged-in users are associated with the \"anonymous\" user.  To make objects visible to anyone (both logged-in and non-logged-in users), they can be shared with the \"anonymous\" role.  Note that objects shared with the \"anonymous\" user will only be visible to non-logged-in users!\n\nThe anonymous user uuid is @{siteprefix}-tpzed-anonymouspublic@.  The anonymous group uuid is @{siteprefix}-j7d0g-anonymouspublic@.\n\nh2. Example\n\n!(full-width){{site.baseurl}}/images/Arvados_Permissions.svg!\n"
  },
  {
    "path": "doc/api/projects.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\ntitle: \"Projects and filter groups\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados @projects@ are used to organize objects. Projects can contain @collections@, @container requests@, @workflows@, etc. Projects can also contain other projects. An object is part of a project if the @owner_uuid@ of the object is set to the uuid of the project.\n\nProjects are implemented as a subtype of the Arvados @group@ object type, with @group_class@ set to the value \"project\". More information is available in the \"groups API reference\":{{ site.baseurl }}/api/methods/groups.html.\n\nProjects can be manipulated via Workbench, the cli tools, the SDKs, and the Arvados APIs.\n\nh2. The home project\n\nEach user has a @home project@, which is implemented differently. This is a virtual project that is comprised of all objects owned by the user, in other words, all objects with the @owner_uuid@ set to the @uuid@ of the user. The home project is accessible via Workbench, which makes it easy view its contents and to move objects from and to the home project. The home project is also accessible via FUSE, WebDAV and the S3 interface.\n\nThe same thing can be done via the APIs. To put something in a user's home project via the cli or SDKs, one would set the @owner_uuid@ of the object to the user's @uuid@. This also implies that this user now has full ownership and control over that object.\n\nThe contents of the home project can be accessed with the @group contents@ API, e.g. via the cli with this command:\n<pre>arv group contents --uuid zzzzz-tpzed-123456789012345</pre>\nIn this command, `zzzzz-tpzed-123456789012345` is a @user@ uuid, which is unusual because we are using it as the argument to a @groups@ API. The @group contents@ API is normally used with a @group@ uuid.\n\nBecause the home project is a virtual project, other operations via the @groups@ API are not supported.\n\nh2(#filtergroups). Filter groups\n\nFilter groups are another type of virtual project. They are implemented as an Arvados @group@ object with @group_class@ set to the value \"filter\".\n\nFilter groups define one or more filters which are applied to all objects that the current user can see, and returned as the contents of the @group@. Filter groups are described in more detail in the \"groups API reference\":{{site.baseurl}}/api/methods/groups.html, and the rules for creating valid filters are the same as for \"list method filters\":{{site.baseurl}}/api/methods.html#filters.\n\nFilter groups are accessible (read-only) via Workbench and the Arvados FUSE mount, WebDAV and S3 interface. Filter groups must currently be defined via the API, SDK or cli, there is no Workbench support yet.\n\nAs an example, create a filter group with the @arv@ cli:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\"> FILTER_GROUP_UUID=`arv -s group create --group '{\n    \"group_class\":\"filter\",\n    \"name\":\"my filter group\",\n    \"properties\":{\n      \"filters\":\n        [\n          [\"collections.name\",\"ilike\",\"%test%\"],\n          [\"uuid\",\"is_a\",\"arvados#collection\"]\n        ]\n      }\n    }'`\n</code>\n</pre>\n</notextile>\nThis filter group will contain all collections visible to the current user whose name matches the word @test@ (case insensitive).\n\nTo see how this works via the keep FUSE mount, create a few matching (and non-matching) collections:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv collection create --collection '{\"name\":\"empty test collection 1\"}'</span>\n~$ <span class=\"userinput\">arv collection create --collection '{\"name\":\"another empty collection\"}'</span>\n~$ <span class=\"userinput\">arv collection create --collection '{\"name\":\"empty Test collection 2\"}'</span>\n~$ <span class=\"userinput\">mkdir -p keep</span>\n~$ <span class=\"userinput\">arv-mount keep</span>\n~$ <span class=\"userinput\">ls keep/by_id/$FILTER_GROUP_UUID/ -C1</span>\n'empty test collection 1'\n'empty Test collection 2'</code>\n</pre>\n</notextile>\n"
  },
  {
    "path": "doc/api/properties.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\ntitle: \"Metadata properties\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados allows you to attach arbitrary properties to \"collection\":methods/collections.html, \"container_request\":methods/container_requests.html, \"link\":methods/links.html and \"group\":methods/groups.html records that have a @properties@ field.  These are key-value pairs, where the value is a valid JSON type (string, number, null, boolean, array, object).\n\nSearching for records using properties is described in \"Filtering on subproperties\":methods.html#subpropertyfilters .\n\nh2. Controlling user-supplied properties\n\nArvados can be configured with a vocabulary file that lists valid properties and the range of valid values for those properties.  This is described in \"Metadata vocabulary\":{{site.baseurl}}/admin/metadata-vocabulary.html .\n\nArvados offers options to set properties automatically and/or prevent certain properties, once set, from being changed by non-admin users.  This is described in \"Configuring collection's managed properties\":{{site.baseurl}}/admin/collection-managed-properties.html .\n\nThe admin can require that certain properties must be non-empty before \"freezing a project\":methods/groups.html#frozen .\n\nh2. Reserved properties\n\nComponents that ship with Arvados may automatically set properties on objects. These usually help track provenance or provide additional link metadata. These properties usually have a key that starts with @arv:@, and can always be set even when the system is configured with a strict vocabulary.\n\ntable(table table-bordered table-condensed).\n|_. Property&nbsp;name|_. Appears&nbsp;on|_. Value&nbsp;type|_.Description|\n{% comment %}\nThe arv:git* container properties, and the associated Git commands, primarily come from arvados_cwl.executor.ArvCwlExecutor.get_git_info.\n{% endcomment -%}\n|arv:gitBranch|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the name of the branch checked out (the output of @git rev-parse --abbrev-ref HEAD@)|\n|arv:gitCommitter|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the name and email address of the committer of the most recent commit (the output of @git log --format='%cn <%ce>' -n1 HEAD@)|\n|arv:gitCommit|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the full checksum of the most recent commit (the output of @git log --format='%H' -n1 HEAD@)|\n|arv:gitDate|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the commit date of the most recent commit in RFC 2822 format (the output of @git log --format='%cD' -n1 HEAD@)|\n|arv:gitDescribe|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the name of the most recent tag that is reachable from the most recent commit (the output of @git describe --always --tags@)|\n|arv:gitOrigin|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the URL of the remote named @origin@, if set (the output of @git remote get-url origin@)|\n|arv:gitPath|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with the absolute path of the checkout on the filesystem|\n|arv:gitStatus|container request, collection of type=workflow|string|When @arvados-cwl-runner@ is run from a Git checkout, this property is set with a machine-readable summary of files modified in the checkout since the most recent commit (the output of @git status --untracked-files=no --porcelain@)|\n|arv:workflowMain|collection of type=workflow|string|Set on a collection containing a workflow created by @arvados-cwl-runner --create-workflow@, this is a relative reference inside the collection to the entry point of the workflow.|\n|arv:failed_container_resubmitted|container request|uuid|Set on container requests that were automatically resubmitted by the workflow runner with modified run options, such as when using the @PreemptionBehavior@ or @OutOfMemoryRetry@ CWL extensions.  Set to the uuid of the new, resubmitted container request.|\n\nThe following system properties predate the @arv:@ key prefix, but are still reserved and can always be set.\n\ntable(table table-bordered table-condensed).\n|_. Property name|_. Appears on|_. Value type|_.Description|\n|type|collection|string|Appears on collections to indicates the contents or usage. See \"Collection type values\":#collectiontype below for details.|\n|container_request|collection|string|The UUID of the container request that produced an output or log collection.|\n|docker-image-repo-tag|collection|string|For collections containing a Docker image, the repo/name:tag identifier|\n|container_uuid|collection|string|The UUID of the container that produced a collection (set on collections with type=log)|\n|container|collection|string|(legacy) The UUID of the container that produced a collection.  Set on intermediate collections created by arvados-cwl-runner.  Starting with Arvados 2.6.0 arvados-cwl-runner uses @container_uuid@ instead, but older versions may still set the @container@ property.|\n|cwl_input|container_request|object|On an intermediate container request, the CWL workflow-level input parameters used to generate the container request|\n|cwl_output|container_request|object|On an intermediate container request, the CWL workflow-level output parameters collected from the container request|\n|template_uuid|container_request|string|For a workflow runner container request, the workflow record that was used to launch it.|\n|workflowName|container_request|string|For a workflow runner container request, the \"name\" of the workflow record in @template_uuid@ at the time of launch (used for display only).|\n|username|link|string|For a \"can_login\":permission-model.html#links permission link, the unix username on the VM that the user will have.|\n|groups|link|array of string|For a \"can_login\":permission-model.html#links permission link, the unix groups on the VM that the user will be added to.|\n|image_timestamp|link|string|When resolving a Docker image name and multiple links are found with @link_class=docker_image_repo+tag@ and same @link_name@, the @image_timestamp@ is used to determine precedence (most recent wins).|\n|filters|group|array of array of string|Used to define \"filter groups\":projects.html#filtergroup|\n\nh3(#collectiontype). Collection \"type\" values\n\nMeaningful values of the @type@ property.  These are recognized by Workbench when filtering on types of collections from the project content listing.\n\ntable(table table-bordered table-condensed).\n|_. Type|_.Description|\n|log|The collection contains log files from a container run.|\n|output|The collection contains the output of a top-level container run (this is a container request where @requesting_container_uuid@  is null).|\n|intermediate|The collection contains the output of a child container run (this is a container request where @requesting_container_uuid@ is non-empty).|\n|workflow|A collection created by @arvados-cwl-runner --create-workflow@ containing a workflow definition.|\n"
  },
  {
    "path": "doc/api/requests.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: Concepts\ntitle: REST API syntax\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados exposes a REST API using standard HTTP requests.\n\nh3. HTTP Method\n\nUse @GET@ to request individual resources or lists of resources.\n\nUse @POST@ to create new resources.\n\nUse @PUT@ to update an existing resource.\n\nUse @DELETE@ to remove an existing resource.\n\nAs a special case, a @POST@ with the query parameter @_method=GET@ will be treated as a GET request.  This makes it possible to issue @GET@ requests where the query string exceeds the maximum request URI length, by putting the query string in the body of the request.\n\nh3. Request URI\n\nThe URI portion of the request identifies the specific resource to operate on.  For example, operations on \"collections\":{{site.baseurl}}/api/methods/collections.html use the @https://{{ site.arvados_api_host }}/arvados/v1/collections@ request URI prefix.\n\nh3. Authorization header\n\nEvery request must include an API token.  This identifies the user making the request for the purposes of access control.  In addition, tokens may be further \"restricted in scope\":{{site.baseurl}}/api/methods/api_client_authorizations.html#scope to only access certain API endpoints.\n\nAPI requests must provide the API token using the @Authorization@ header in the following format:\n\n<pre>\n$ curl -v -H \"Authorization: Bearer xxxxapitokenxxxx\" https://192.168.5.2:8000/arvados/v1/collections\n> GET /arvados/v1/collections HTTP/1.1\n> ...\n> Authorization: Bearer xxxxapitokenxxxx\n> ...\n</pre>\n\nOn a cluster configured to use an OpenID Connect provider (other than Google) as a login backend, Arvados can be configured to accept an OpenID Connect access token in place of an Arvados API token. OIDC access tokens are also accepted by a cluster that delegates login to another cluster (LoginCluster) which in turn has this feature configured. See @Login.OpenIDConnect.AcceptAccessTokenScope@ in the \"default config.yml file\":{{site.baseurl}}/admin/config.html for details.\n\n<pre>\n$ curl -v -H \"Authorization: Bearer xxxx-openid-connect-access-token-xxxx\" https://192.168.5.2:8000/arvados/v1/collections\n</pre>\n\nh3. Parameters\n\nRequest parameters may be provided in one of two ways.  They may be provided in the \"query\" section of request URI, or they may be provided in the body of the request with application/x-www-form-urlencoded encoding.  If parameters are provided in both places, their values will be merged.  Parameter names must be unique.  If a parameter appears multiple times, the behavior is undefined.\n\nStructured and nested parameter values must be provided as urlencoded JSON.\n\nh3. Result\n\nResults are returned JSON-encoded in the response body.\n\nh3(#errors). Errors\n\nIf a request cannot be fulfilled, the API will return 4xx or 5xx HTTP status code.  Be aware that the API server may return a 404 (Not Found) status for resources that exist but for which the client does not have read access.  The API will also return an error record:\n\ntable(table table-bordered table-condensed).\n|*Parameter name*|*Value*|*Description*|\n|errors|array|An array of one or more error messages|\n|error_token|string|a unique identifier used to correlate the error in the API server logs|\n\nh2. Examples\n\nh3. Create a new record\n\n<pre>\n$ curl -v -X POST --data-urlencode 'collection={\"name\":\"empty collection\"}' -H \"Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr\" https://192.168.5.2:8000/arvados/v1/collections | jq .\n> POST /arvados/v1/collections HTTP/1.1\n> User-Agent: curl/7.38.0\n> Host: 192.168.5.2:8000\n> Accept: */*\n> Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr\n> Content-Length: 54\n> Content-Type: application/x-www-form-urlencoded\n>\n} [data not shown]\n< HTTP/1.1 200 OK\n< Content-Type: application/json; charset=utf-8\n< Transfer-Encoding: chunked\n< Connection: keep-alive\n< Status: 200 OK\n< Access-Control-Allow-Origin: *\n< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE\n< Access-Control-Allow-Headers: Authorization\n< Access-Control-Max-Age: 86486400\n< X-UA-Compatible: IE=Edge,chrome=1\n< ETag: \"2ec9ef5151c1f7a1486ad169c33ae462\"\n< Cache-Control: max-age=0, private, must-revalidate\n< Set-Cookie: _server_session=BAh7BkkiD3Nlc3Npb25faWQGOgZFVEkiJTIwMjQ1NTE5YmEwMzU1MGZkMTBmYmY1YzllY2ZiMjFlBjsAVA%3D%3D--653bc9c20899d48ee8523e18d9a4c1cde0702577; path=/; HttpOnly\n< X-Request-Id: 56aa10bc49097f3b44d3ed946bf0e61e\n< X-Runtime: 0.049951\n< X-Powered-By: Phusion Passenger 4.0.41\n< Date: Fri, 28 Oct 2016 19:20:09 GMT\n< Server: nginx/1.4.7 + Phusion Passenger 4.0.41\n<\n{\n  \"kind\": \"arvados#collection\",\n  \"etag\": \"c5ifrv1ox2tu6alb559ymtkb7\",\n  \"uuid\": \"962eh-4zz18-m1ma0mxxfg3mbcc\",\n  \"owner_uuid\": \"962eh-tpzed-000000000000000\",\n  \"created_at\": \"2016-10-28T19:20:09.320771531Z\",\n  \"modified_by_user_uuid\": \"962eh-tpzed-000000000000000\",\n  \"modified_at\": \"2016-10-28T19:20:09.319661000Z\",\n  \"name\": \"empty collection\",\n  \"description\": null,\n  \"properties\": {},\n  \"portable_data_hash\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n  \"manifest_text\": \"\",\n  \"replication_desired\": null,\n  \"replication_confirmed\": null,\n  \"replication_confirmed_at\": null,\n  \"expires_at\": null\n}\n</pre>\n\nh3. Delete a record\n\n<pre>\n$ curl -X DELETE -v -H \"Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr\" https://192.168.5.2:8000/arvados/v1/collections/962eh-4zz18-m1ma0mxxfg3mbcc | jq .\n> DELETE /arvados/v1/collections/962eh-4zz18-m1ma0mxxfg3mbcc HTTP/1.1\n> User-Agent: curl/7.38.0\n> Host: 192.168.5.2:8000\n> Accept: */*\n> Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr\n>\n< HTTP/1.1 200 OK\n< Content-Type: application/json; charset=utf-8\n< Transfer-Encoding: chunked\n< Connection: keep-alive\n< Status: 200 OK\n< Access-Control-Allow-Origin: *\n< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE\n< Access-Control-Allow-Headers: Authorization\n< Access-Control-Max-Age: 86486400\n< X-UA-Compatible: IE=Edge,chrome=1\n< ETag: \"1e8f72802cf1a6d0a5c4a1ebbfcc46a9\"\n< Cache-Control: max-age=0, private, must-revalidate\n< Set-Cookie: _server_session=BAh7BkkiD3Nlc3Npb25faWQGOgZFVEkiJTc2NDYyY2M0NTNlNmU3M2Y2M2E3YmFiMWQ1MTEyZGZkBjsAVA%3D%3D--d28c7dd640bd24e2b12f01e77088072138dcf145; path=/; HttpOnly\n< X-Request-Id: e66fd3ab825bdb87301f5456161fb641\n< X-Runtime: 0.028788\n< X-Powered-By: Phusion Passenger 4.0.41\n< Date: Fri, 28 Oct 2016 19:33:31 GMT\n< Server: nginx/1.4.7 + Phusion Passenger 4.0.41\n<\n{\n  \"kind\": \"arvados#collection\",\n  \"etag\": \"c5ifrv1ox2tu6alb559ymtkb7\",\n  \"uuid\": \"962eh-4zz18-m1ma0mxxfg3mbcc\",\n  \"owner_uuid\": \"962eh-tpzed-000000000000000\",\n  \"created_at\": \"2016-10-28T19:20:09.320771000Z\",\n  \"modified_by_user_uuid\": \"962eh-tpzed-000000000000000\",\n  \"modified_at\": \"2016-10-28T19:20:09.319661000Z\",\n  \"name\": \"empty collection\",\n  \"description\": null,\n  \"properties\": {},\n  \"portable_data_hash\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n  \"manifest_text\": \"\",\n  \"replication_desired\": null,\n  \"replication_confirmed\": null,\n  \"replication_confirmed_at\": null,\n  \"expires_at\": null\n}\n</pre>\n\nh3. Get a specific record\n\n<pre>\n$ curl -v -H \"Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr\" https://192.168.5.2:8000/arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km | jq .\n> GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km HTTP/1.1\n> User-Agent: curl/7.38.0\n> Host: 192.168.5.2:8000\n> Accept: */*\n> Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr\n>\n< HTTP/1.1 200 OK\n< Content-Type: application/json; charset=utf-8\n< Transfer-Encoding: chunked\n< Connection: keep-alive\n< Status: 200 OK\n< Access-Control-Allow-Origin: *\n< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE\n< Access-Control-Allow-Headers: Authorization\n< Access-Control-Max-Age: 86486400\n< X-UA-Compatible: IE=Edge,chrome=1\n< ETag: \"fec2ddf433a352e5a2b5d356abd6d3d4\"\n< Cache-Control: max-age=0, private, must-revalidate\n< X-Request-Id: 40b447507ff202ae9a0b0b3e0ebe98da\n< X-Runtime: 0.011404\n< X-Powered-By: Phusion Passenger 4.0.41\n< Date: Fri, 28 Oct 2016 18:59:09 GMT\n< Server: nginx/1.4.7 + Phusion Passenger 4.0.41\n<\n{\n  \"kind\": \"arvados#collection\",\n  \"etag\": \"3mmn0s9e1z5s5opfofmtb9k8p\",\n  \"uuid\": \"962eh-4zz18-xi32mpz2621o8km\",\n  \"owner_uuid\": \"962eh-tpzed-000000000000000\",\n  \"created_at\": \"2016-10-27T14:47:43.792587000Z\",\n  \"modified_by_user_uuid\": \"962eh-tpzed-000000000000000\",\n  \"modified_at\": \"2016-10-27T14:47:43.792166000Z\",\n  \"name\": \"Saved at 2016-10-27 14:47:43 UTC by peter@debian\",\n  \"description\": null,\n  \"properties\": {},\n  \"portable_data_hash\": \"93a45073511646a5c3e2f4953fcf6f61+116\",\n  \"manifest_text\": \". eff999f3b5158331eb44a9a93e3b36e1+67108864+Aad3839bea88bce22cbfe71cf4943de7dab3ea52a@5826180f db141bfd11f7da60dce9e5ee85a988b8+34038725+Ae8f48913fed782cbe463e0499ab37697ee06a2f8@5826180f 0:101147589:rna.SRR948778.bam\\n\",\n  \"replication_desired\": null,\n  \"replication_confirmed\": null,\n  \"replication_confirmed_at\": null,\n  \"expires_at\": null\n}\n</pre>\n\nh3. List records and filter by date\n\n(Note, return result is truncated).\n\n<pre>\n$ curl -v -G --data-urlencode 'filters=[[\"created_at\",\">\",\"2016-11-08T21:38:24.124834000Z\"]]' -H \"Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr\" https://192.168.5.2:8000/arvados/v1/collections | jq .\n> GET /arvados/v1/collections?filters=%5B%5B%22uuid%22%2C%20%22%3D%22%2C%20%22962eh-4zz18-xi32mpz2621o8km%22%5D%5D HTTP/1.1\n> User-Agent: curl/7.38.0\n> Host: 192.168.5.2:8000\n> Accept: */*\n> Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr\n>\n< HTTP/1.1 200 OK\n< Content-Type: application/json; charset=utf-8\n< Transfer-Encoding: chunked\n< Connection: keep-alive\n< Status: 200 OK\n< Access-Control-Allow-Origin: *\n< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE\n< Access-Control-Allow-Headers: Authorization\n< Access-Control-Max-Age: 86486400\n< X-UA-Compatible: IE=Edge,chrome=1\n< ETag: \"76345ef24952f073acc3a0c550241d4e\"\n< Cache-Control: max-age=0, private, must-revalidate\n< X-Request-Id: d34b8ede4ffc707d8ed172dc2f47ff5e\n< X-Runtime: 0.012727\n< X-Powered-By: Phusion Passenger 4.0.41\n< Date: Fri, 28 Oct 2016 19:08:52 GMT\n< Server: nginx/1.4.7 + Phusion Passenger 4.0.41\n<\n{\n  \"kind\": \"arvados#collectionList\",\n  \"etag\": \"\",\n  \"self_link\": \"\",\n  \"offset\": 0,\n  \"limit\": 100,\n  \"items\": [\n    {\n      \"kind\": \"arvados#collection\",\n      \"etag\": \"bvgrrsg63zsenb9wnpnp0nsgl\",\n      \"uuid\": \"962eh-4zz18-ybggo9im899vv60\",\n      \"owner_uuid\": \"962eh-tpzed-000000000000000\",\n      \"created_at\": \"2016-11-08T21:47:36.937106000Z\",\n      \"modified_by_user_uuid\": \"962eh-tpzed-000000000000000\",\n      \"modified_at\": \"2016-11-08T21:47:36.936625000Z\",\n      \"name\": \"Log from cwl-runner job 962eh-8i9sb-45jww0k15fi5ldd\",\n      \"description\": null,\n      \"properties\": {},\n      \"portable_data_hash\": \"a7820b94717eff86229927565fedbd72+85\",\n      \"replication_desired\": null,\n      \"replication_confirmed\": null,\n      \"replication_confirmed_at\": null,\n      \"expires_at\": null\n    },\n   ...\n    {\n      \"kind\": \"arvados#collection\",\n      \"etag\": \"2fa07dx52lux8wa1loehwyrc5\",\n      \"uuid\": \"962eh-4zz18-37i1tfl5de5ild9\",\n      \"owner_uuid\": \"962eh-tpzed-000000000000000\",\n      \"created_at\": \"2016-11-08T21:38:46.717798000Z\",\n      \"modified_by_user_uuid\": \"962eh-tpzed-000000000000000\",\n      \"modified_at\": \"2016-11-08T21:38:46.717409000Z\",\n      \"name\": null,\n      \"description\": null,\n      \"properties\": {},\n      \"portable_data_hash\": \"9d43d4c8328640446f6e252cda584e7e+54\",\n      \"replication_desired\": null,\n      \"replication_confirmed\": null,\n      \"replication_confirmed_at\": null,\n      \"expires_at\": null\n    }\n  ],\n  \"items_available\": 99\n}\n</pre>\n\nh3. Update a field\n\n<pre>\n$ curl -v -X PUT --data-urlencode 'collection={\"name\":\"rna.SRR948778.bam\"}' -H \"Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr\" https://192.168.5.2:8000/arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km | jq .\n> PUT /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km HTTP/1.1\n> User-Agent: curl/7.38.0\n> Host: 192.168.5.2:8000\n> Accept: */*\n> Authorization: Bearer oz0os4nyudswvglxhdlnrgnuelxptmj7qu7dpwvyz3g9ocqtr\n> Content-Length: 53\n> Content-Type: application/x-www-form-urlencoded\n>\n} [data not shown]\n< HTTP/1.1 200 OK\n< Content-Type: application/json; charset=utf-8\n< Transfer-Encoding: chunked\n< Connection: keep-alive\n< Status: 200 OK\n< Access-Control-Allow-Origin: *\n< Access-Control-Allow-Methods: GET, HEAD, PUT, POST, DELETE\n< Access-Control-Allow-Headers: Authorization\n< Access-Control-Max-Age: 86486400\n< X-UA-Compatible: IE=Edge,chrome=1\n< ETag: \"fbb50d2847426eab793e3fcf346ca9eb\"\n< Cache-Control: max-age=0, private, must-revalidate\n< Set-Cookie: _server_session=BAh7BkkiD3Nlc3Npb25faWQGOgZFVEkiJWI3NjFjMzVjMGI5OGExYmNjZDg0ZTg5MjZhMzcwMDE1BjsAVA%3D%3D--0e005d71fad15cb366e47361c38474b7447ba155; path=/; HttpOnly\n< X-Request-Id: 76d3cb3c0995af6133b0a73a64f57354\n< X-Runtime: 0.030756\n< X-Powered-By: Phusion Passenger 4.0.41\n< Date: Fri, 28 Oct 2016 19:15:16 GMT\n< Server: nginx/1.4.7 + Phusion Passenger 4.0.41\n<\n{\n  \"kind\": \"arvados#collection\",\n  \"etag\": \"51509hhxo9qqjxqewnoz1b7og\",\n  \"uuid\": \"962eh-4zz18-xi32mpz2621o8km\",\n  \"owner_uuid\": \"962eh-tpzed-000000000000000\",\n  \"created_at\": \"2016-10-27T14:47:43.792587000Z\",\n  \"modified_by_user_uuid\": \"962eh-tpzed-000000000000000\",\n  \"modified_at\": \"2016-10-28T19:15:16.137814000Z\",\n  \"name\": \"rna.SRR948778.bam\",\n  \"description\": null,\n  \"properties\": {},\n  \"portable_data_hash\": \"93a45073511646a5c3e2f4953fcf6f61+116\",\n  \"manifest_text\": \". eff999f3b5158331eb44a9a93e3b36e1+67108864+Acca57af82cc18c5dfa47bdfd16e335fccd09dfa5@582618c4 db141bfd11f7da60dce9e5ee85a988b8+34038725+A7764f122f41f92c2d5bde1852fcdd1bea5f8bd78@582618c4 0:101147589:rna.SRR948778.bam\\n\",\n  \"replication_desired\": null,\n  \"replication_confirmed\": null,\n  \"replication_confirmed_at\": null,\n  \"expires_at\": null\n}\n</pre>\n"
  },
  {
    "path": "doc/api/resources.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\nnavmenu: Concepts\ntitle: Common resource fields\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis page describes the common attributes shared by most or all Arvados resources.\n\nh2(#resource). Resource\n\ntable(table table-bordered table-condensed).\n|_. Attribute |_. Type |_. Description |_. Example|\n|uuid|string|universally unique object identifier.  Set on @create@.|@mk2qn-4zz18-w3anr2hk2wgfpuo@|\n|owner_uuid|string|UUID of owner (must be a User or Group), set on @create@.  Controls who may access the resource. Ownership may be changed explicitly with @update@, see \"permission model\":{{site.baseurl}}/api/permission-model.html for details.|@mk2qn-tpzed-a4lcehql0dv2u25@|\n|name|string|Human-assigned name.  Not present on all object types, check individual API page.  Uniqueness constraint varys by object type.||\n|description|string|Free text description of the object.  Not present on all object types, check individual API page.  May be HTML formatted, \"see below for valid HTML tags and attributes\":#descriptions .||\n|created_at|datetime|When resource was created.  Set on @create@.|@2013-01-21T22:17:39Z@|\n|modified_at|datetime|When resource was last modified.  Set on @create@ and @update@.|@2013-01-25T22:29:32Z@|\n|modified_by_user_uuid|string|The owner of the API token used to authenticate the @create@ or @update@ request.|@mk2qn-tpzed-a4lcehql0dv2u25@|\n|kind|string|@arvados#{resource_type}@|@arvados#collection@|\n|etag|string|The ETag[1] of the resource|@1xlmizzjq7wro3dlb2dirf505@|\n\nh2. Object UUID\n\nEach object is assigned a UUID.  This has the format @aaaaa-bbbbb-ccccccccccccccc@.\n\n# The first field (@aaaaa@ in the example) is the site prefix.  This is unique to a specific Arvados installation.\n# The second field (@bbbbb@ in the example) is the object type.\n# The third field (@ccccccccccccccc@ in the example) uniquely identifies the object.\n\nh2(#descriptions). Descriptions\n\n{% include 'html_tags' %}\n\nh2. Timestamps\n\nAll Arvados timestamps follow ISO 8601 datetime format with fractional seconds (microsecond precision).  All timestamps are UTC.  Date format: @YYYY-mm-ddTHH:MM:SS.SSSSZ@ example date: @2016-11-08T21:38:24.124834000Z@.\n\nh2. ETags\n\nfn1. Each response includes an ETag, a string which changes when the resource changes.  Clients can use this to check whether a resource has changed since they last retrieved it.  If a previous ETag is provided along with a request, and the resource has not changed since, the server may return a \"not modified\" response.\n"
  },
  {
    "path": "doc/api/tokens.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: api\ntitle: API Authorization\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nAll requests to the API server must have an API token.  API tokens can be issued by going though the login flow, or created via the API.  At this time, only browser based applications can perform login from email/password.  Command line applications and services must use an API token provided via the @ARVADOS_API_TOKEN@ environment variable or configuration file.\n\nh2. Login\n\nBrowser based applications can log in using one of the two possible flows:\n\nh3. Authenticate via a third party\n\n# The web application instructs the user to click on a link to the @/login@ endpoint on the API server.  This link should include the @return_to@ parameter in the query portion of the URL.  For example @https://{{ site.arvados_api_host }}/login?return_to=XXX@ where @return_to=XXX@ is a page in the web application.\n# The @/login@ endpoint redirects the user to the configured third party authentication provider (e.g. Google or other OpenID Connect provider).\n# The user logs in to the third party provider, then they are redirected back to the API server.\n# The API server authenticates the user, issues a new API token, and redirects the browser to the URL provided in @return_to=XXX@ with the addition of @?api_token=xxxxapitokenxxxx@.\n# The web application gets the authorization token from the query and uses it to access the API server on the user's behalf.\n\nh3. Direct username/password authentication\n\n# The web application presents username and password fields.\n# When the submit button is pressed, using Javascript, the browser sends a POST request to @/arvados/v1/users/authenticate@\n** The request payload type is @application/javascript@\n** The request body is a JSON object with @username@ and @password@ fields.\n# The API server receives the username and password, authenticates them with the upstream provider (such as LDAP or PAM), and responds with the @api_client_authorization@ object for the new API token.\n# The web application receives the authorization token in the response and uses it to access the API server on the user's behalf.\n\nh3. Using an OpenID Connect access token\n\nA cluster that uses OpenID Connect as a login provider can be configured to accept OIDC access tokens as well as Arvados API tokens (this is disabled by default; see @Login.OpenIDConnect.AcceptAccessToken@ in the \"default config.yml file\":{{site.baseurl}}/admin/config.html).\n# The client obtains an access token from the OpenID Connect provider via some method outside of Arvados.\n# The client presents the access token with an Arvados API request (e.g., request header @Authorization: Bearer xxxxaccesstokenxxxx@).\n# Depending on configuration, the API server decodes the access token (which must be a signed JWT) and confirms that it includes the required scope (see @Login.OpenIDConnect.AcceptAccessTokenScope@ in the \"default config.yml file\":{{site.baseurl}}/admin/config.html).\n# The API server uses the provider's UserInfo endpoint to validate the presented token.\n# If the token is valid, it is cached in the Arvados database and accepted in subsequent API calls for the next 10 minutes.\n\nh3. Diagram\n\n!{{site.baseurl}}/images/Session_Establishment.svg!\n\nh2. User activation\n\n\"Creation and activation of new users is described here.\":{{site.baseurl}}/admin/user-management.html\n\nh2. Creating tokens via the API\n\nThe browser login method above issues a new token.  Using that token, it is possible to make API calls to create additional tokens.  To do so, use the @create@ method of the \"API client authorizations\":{{site.baseurl}}/api/methods/api_client_authorizations.html resource.\n\nh2(#scopes). Scopes\n\nScopes can restrict a token so it may only access certain resources.  This is in addition to normal permission checks for the user associated with the token.\n\nEach entry in scopes consists of a @request_method@ and @request_path@.  The @request_method@ is a HTTP method (one of @GET@, @POST@, @PATCH@ or @DELETE@) and @request_path@ is the request URI.  A given request is permitted if it matches a scopes exactly, or the scope ends with @/@ and the request string is a prefix of the scope.\n\nAs a special case, a scope of @[\"all\"]@ allows all resources.  This is the default if no scope is given.\n\nA valid token is always allowed to issue a request to \"@GET /arvados/v1/api_client_authorizations/current@\":{{ site.baseurl }}/api/methods/api_client_authorizations.html#current regardless of its scopes.\n\nUsing scopes is also described on the \"Securing API access with scoped tokens\":{{site.baseurl}}/admin/scoped-tokens.html page of the admin documentation.\n\nh3. Scope examples\n\nA scope of @GET /arvados/v1/collections@ permits listing collections.\n\n* Requests with different methods, such as creating a new collection using @POST /arvados/v1/collections@, will be rejected.\n* Requests to access other resources, such as @GET /arvados/v1/groups@, will be rejected (except \"@GET /arvados/v1/api_client_authorizations/current@\":{{ site.baseurl }}/api/methods/api_client_authorizations.html#current, which is always allowed).\n* Be aware that requests for specific records, such as @GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km@ will also be rejected.  This is because the scope @GET /arvados/v1/collections@ does not end in @/@\n\nA scope of @GET /arvados/v1/collections/@ (with @/@ suffix) will permit access to individual collections.\n\n* The request @GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km@ will succeed\n* Be aware that requests for listing @GET /arvados/v1/collections@ (no @/@ suffix) will be rejected, because it is not a match with the rule @GET /arvados/v1/collections/@\n* A listing request @GET /arvados/v1/collections/@ will have the trailing @/@ suffix trimmed before the scope check, as a result it will not match the rule @GET /arvados/v1/collections/@.\n\nTo allow both listing objects and requesting individual objects, include both in the scope: @[\"GET /arvados/v1/collections\", \"GET /arvados/v1/collections/\"]@\n\nA narrow scope such as @GET /arvados/v1/collections/962eh-4zz18-xi32mpz2621o8km@ will disallow listing objects as well as disallow requesting any object other than those listed in the scope.\n"
  },
  {
    "path": "doc/architecture/dispatchcloud.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\ntitle: Dispatching containers to cloud VMs\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe arvados-dispatch-cloud component runs Arvados user containers on generic public cloud infrastructure by automatically creating and destroying VMs (“instances”) of various sizes according to demand, preparing the instances’ runtime environments, and running containers on them.\n\nThis does not use a cloud provider’s container-execution service.\n\nh2. Overview\n\nIn this diagram, the black edges show interactions involved in starting a VM instance and running a container. The blue edges show the “container shell” communication channel.\n\n!{max-width:40em}{{site.baseurl}}/architecture/dispatchcloud.svg!\n\n{% comment %}\n# svg generated using https://dreampuf.github.io/\ndigraph {\n    subgraph cluster_cloudvm {\n        node [color=black] [fillcolor=white] [style=filled];\n        style = filled;\n        color = lightgrey;\n        label = \"cloud instance (VM)\";\n        \"SSH server\" -> \"crunch-run\" [label = \"start crunch-run\"];\n        \"crunch-run\" -> docker [label = \"create container\"];\n        \"crunch-run\" -> docker [label = \"shell\"] [color = blue] [fontcolor = blue];\n        \"crunch-run\" -> container [label = \"tcp/http\"] [color = blue] [fontcolor = blue];\n        docker -> container;\n    }\n    \"cloud provider\" [shape=box] [style=dashed];\n    dispatcher -> controller [label = \"get container queue\"];\n    dispatcher -> \"cloud provider\" [label = \"create/destroy/list VMs\"];\n    \"cloud provider\" -> \"SSH server\" [label = \"add authorized_keys\"];\n    \"crunch-run\" -> controller [label = \"update\\ngateway ip:port,\\ncontainer state,\\noutput, ...\"];\n    client -> controller [label = \"shell/tcp/http (https tunnel)\"] [color = blue] [fontcolor = blue];\n    controller -> \"crunch-run\" [label = \"shell/tcp/http (https tunnel)\"] [color = blue] [fontcolor = blue];\n    dispatcher -> \"SSH server\" [label = \"start crunch-run\"];\n}\n{% endcomment %}\n\nh2. Scheduling\n\nThe dispatcher periodically polls the \"containers API\":{{site.baseurl}}/api/methods/containers.html to get a list of containers that are ready to run. Whenever this list changes, the dispatcher runs a scheduling loop that determines the set of suitable instance types for each container, allocates the highest priority containers to instances with sufficient unused resources, requests new instances if needed, and shuts down instances that have been idle for longer than the configured idle timeout. It will run multiple containers on an instance if it has enough RAM and CPUs to do so _and_ the instance type is suitable for each container individually.\n\nThe lowest-priced instance type with enough resources to run a given container is always suitable. Other instance types that have enough resources to run the container, and whose prices are within @MaximumPriceFactor@ of that lowest-priced type, are also suitable. The dispatcher will select a suitable instance type other than the lowest-priced one when:\n* the lowest-priced instance that is _already running or requested,_ and has sufficient resources, is one of the suitable types (_e.g.,_ it just finished running a container that needed a higher-priced type), whereas in order to use the lowest-priced type the dispatcher would need to request a new instance, or\n* the cloud provider indicates that the lowest-priced suitable type is not available (_e.g.,_ due to a per-instance-type quota restriction).\n\nh2. Creating instances\n\nWhen creating a new instance, the dispatcher uses the cloud provider’s metadata feature to add a tag with key “InstanceSetID” and a value derived from its Arvados authentication token. This enables the dispatcher to recognize and reconnect to existing instances that belong to it, and continue monitoring existing containers, after a restart or upgrade.\n\nWhen using the Azure cloud service, the dispatcher needs to first create a new network interface, then attach it to a new instance. The network interface is also tagged with “InstanceSetID”.\n\nIf the cloud provider returns a rate-limiting error when creating a new instance, the dispatcher avoids requesting new instances for a short period, and shuts down idle nodes more aggressively (i.e., without waiting for the usual idle timeout to elapse) until a new instance is successfully created.\n\nh2. Recovering state after a restart\n\nRestarting the dispatcher does not interrupt containers that are already running. When the dispatcher starts up, it gets the cloud provider’s current list of instances that have the expected InstanceSetID tag value. It ignores instances without that tag, so it won’t interfere with other VM instances in the same cloud account. It runs the boot probe command on each instance, checks for containers that were started by a previous invocation and are still running, and resumes monitoring. Before dispatching any new containers to a previously existing instance, it ensures the crunch-run program is updated if needed.\n\nh2. Instance boot process\n\nWhen the cloud provider indicates that a new instance has been created, the dispatcher connects to the instance’s SSH service (see “instance control channel” below) and executes the configured boot probe command. If this fails, the dispatcher retries until the configured boot timeout is reached, then shuts down the instance. When the boot probe succeeds, the dispatcher copies the crunch-run program to the instance, and runs it to check for running containers before reporting the instance’s state as “idle” or “busy”. (Normally of course a freshly booted instance has no containers running, but this covers the case where the dispatcher itself has restarted and containers submitted by the previous dispatcher process are still running.)\n\nThe dispatcher and crunch-run programs are both packaged in a single executable file: when dispatcher copies crunch-run to an instance, it is really copying itself. This ensures the dispatcher is always using the version of crunch-run that it expects.\n\nh2. Boot probe command\n\nThe purpose of the boot probe command is to ensure the dispatcher does not try to schedule containers on an instance before the instance is ready, even if its SSH daemon comes up early in the boot process. The default boot probe command, @systemctl is-system-running@, is appropriate for images that use @systemd@ to manage the boot process. Another approach is to use a custom startup script in the VM image that writes a file when it finishes, and a boot probe command that checks for that file, such as @cat /var/run/boot.complete@.\n\nh2. Automatic instance shutdown\n\nNormally, the dispatcher shuts down any instance that has remained idle for 1 minute (see TimeoutIdle configuration) but there are some exceptions to this rule. If the cloud provider returns a quota error when trying to create a new instance, the dispatcher shuts down idle nodes right away, in case the idle nodes are contributing to the quota. Also, the operator can use the management API to set an instance’s idle behavior to “drain” or “hold”. “Drain” shuts down the instance as soon as it becomes idle, which can be used to recycle a suspect node without interrupting a running container. “Hold” keeps the instance alive indefinitely without scheduling additional containers on it, which can be used to investigate problems like a failed startup script.\n\nEach instance is tagged with its current idle behavior (using the tag name “IdleBehavior”), which makes it visible in the cloud provider’s console and ensures the behavior is retained if dispatcher restarts.\n\nh2. Management API\n\nThe dispatcher provides an HTTP management interface, which provides the operator with more visibility and control for purposes of troubleshooting and monitoring. APIs are provided to return details of current VM instances and running/scheduled containers as seen by the dispatcher, immediately terminate containers and instances, and control the on-idle behavior of instances. This interface also provides Prometheus metrics. See the \"cloud dispatcher management API\":{{site.baseurl}}/api/dispatch.html documentation for details.\n\nh2. Instance control channel (SSH)\n\nThe dispatcher uses a multiplexed SSH connection to monitor instance boot progress, install the crunch-run supervisor program, start and stop containers, and detect crashed containers and failing instances. It establishes a persistent SSH connection to each cloud instance when the instance first appears, retrying/reconnecting as needed.\n\nCloud VMs typically generate a random SSH host key at boot time, making host key verification impossible. To provide some assurance the dispatcher is connecting to the intended instance, when it creates a new instance the dispatcher generates a random “instance secret”, uses the cloud provider’s bootstrap command feature to save it in @/var/run/arvados-instance-secret@ on the new instance, and executes @cat /var/run/arvados-instance-secret@ to verify the instance’ identity when first connecting to its SSH server. Each instance is also tagged with its instance secret, so it can still be verified after a dispatcher restart.\n\nh2. Container communication channel (https tunnel)\n\nThe crunch-run program runs a gateway server which facilitates the “container shell” feature without sending traffic through the dispatcher process. The gateway server accepts TLS connections from arvados-controller on a dynamic TCP port (typically in the range 32768-60999, see @sysctl net.ipv4.ip_local_port_range@). Crunch-run saves the selected port, along with the external IP address of the VM instance as seen by the dispatcher, in the @gateway_address@ field in the container record so arvados-controller can connect to it.\n\nOn the client host (typically a shell node or a user’s workstation) the @arvados-client shell@ command sends an https “connect” request to arvados-controller, which sends an https “connect” request to the gateway server. These tunnels convey SSH protocol traffic between the user’s SSH client and crunch-run’s built-in SSH server, which uses @docker exec@ to run commands inside the container.\n\nArvados-controller and crunch-run gateway server authenticate each other using a self-signed certificate and a shared secret based on the cluster-wide @SystemRootToken@. If that token changes (and the dispatcher restarts to load the new token) while a container is running, the container will stop accepting container shell traffic.\n\nh2. Scaling\n\nArchitecturally, the dispatcher is _designed_ to accommodate multiple concurrent dispatcher processes on multiple hosts, each using a different authorization token, but such a configuration is not yet supported. Currently, each cluster should run a single dispatcher process. A single process can support thousands of concurrent VM instances.\n"
  },
  {
    "path": "doc/architecture/federation.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\ntitle: \"Federation\"\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados federation enables clients to transparently read, create and manipulate objects and collections across clusters in different regions or organizations.  Federation supports workfows that integrate and analyze data across multiple clusters by sending computation to where the data is, reducing the technical and legal barriers to analyzing large, sensitive data sets.\n\n_This feature is under development.  Support for federation is limited to certain types of requests.  The behaviors described here should not be interpreted as a stable API._\n\nDetailed configuration information is available on the \"federation admin section\":{{site.baseurl}}/admin/federation.html.\n\nh2(#cluster_id). Cluster identifiers\n\nClusters are identified by a five-digit alphanumeric id (numbers and lowercase letters).  There are 36 ^5^ = 60466176 possible cluster identifiers.\n\n* For automated test purposes, use \"z****\"\n* For experimental/local-only/private clusters that won't ever be visible on the public Internet, use \"x****\"\n* For long-lived clusters, we recommend reserving a cluster id.  Contact \"info@curii.com\":mailto:info@curii.com for more information.\n\nCluster identifiers are mapped API server hosts one of two ways:\n\n* Through DNS resolution, under the @arvadosapi.com@ domain.  For example, the API server for the cluster @pirca@ can be found at @pirca.arvadosapi.com@.  To register a cluster id for free under @arvadosapi.com@, contact \"info@curii.com\":mailto:info@curii.com\n* Through explicit configuration:\n\nThe @RemoteClusters@ section of @/etc/arvados/config.yml@ (for arvados-controller)\n\n<pre>\nClusters:\n  clsr1:\n    RemoteClusters:\n      clsr2:\n        Host: api.cluster2.example\n        Proxy: true\n      clsr3:\n        Host: api.cluster3.example\n        Proxy: true\n</pre>\n\nIn this example, the cluster @clsr1@ is configured to contact @api.cluster2.example@ for requests involving @clsr2@ and @api.cluster3.example@ for requests involving @clsr3@.\n\nh2(#identity). Identity\n\nThe goal is for a federated user to have a single identity across the cluster federation.  This identity is a user account on a specific \"home cluster\".  When arvados-controller contacts a remote cluster, the remote cluster verifies the user's identity (see below) and then creates a mirror of the user account with the same uuid of the user's home cluster.  On the remote cluster, permissions can then be granted to the federated user, and the federated user can create and own objects.\n\nh3. Peer federation: Authenticating remote users with salted tokens\n\nWhen making a request to the home cluster, authorization is established by looking up the API token in the @api_client_authorizations@ table to determine the user identity.  When making a request to a remote cluster, we need to provide an API token which can be used to establish the user's identity.  The remote cluster will connect back to the home cluster to determine if the token valid and the user it corresponds to.  However, we do not want to send along the same API token used for the original request.  If the remote cluster is malicious or compromised, sending along user's regular token would compromise the user account on the home cluster.  Instead, the controller sends a \"salted token\".  The salted token is restricted to only to fetching the user account and group membership.  The salted token consists of the uuid of the token in @api_client_authorizations@ and the SHA1 HMAC of the original token and the cluster id of remote cluster.  To verify the token, the remote cluster contacts the home cluster and provides the token uuid, the hash, and its cluster id.  The home cluster uses the uuid to look up the token re-computes the SHA1 HMAC of the original token and cluster id.  If that hash matches, then the token is valid.  To avoid having to re-validate the token on every request, it is cached for a short period.\n\nThe security properties of this scheme are:\n\n* The salted token does not grant access on the home cluster beyond what is needed to verify user identity\n* Revoking a token on the home cluster also revokes it for remote clusters (after the cache period)\n* A salted token given to a malicious/compromised cluster cannot be used to gain access to the user account on another remote cluster\n\nh3. LoginCluster federation: Centralized user database\n\nIn a LoginCluster federation, there is a central \"home\" called the LoginCluster, and one or more \"satellite\" clusters.  The satellite clusters delegate their user management to the LoginCluster.  Unlike the peer federation, satellite clusters implicitly trust the home cluster, so the \"salted token\" scheme is not used.  Users arriving at a satellite cluster are redirected to the home cluster for login, the user token is issued by the LoginCluster, and then the user is sent back to the satellite cluster.   Tokens issued by the LoginCluster are accepted by all clusters in the federation.  All requests for user records on a satellite cluster is forwarded to the LoginCluster.\n\nh2(#retrieval). Federated records\n\n!(full-width){{site.baseurl}}/images/arvados_federation.svg!\n\nh3. Retrieving and updating records\n\nIn the REST API, GET and PUT/PATCH requests are used to fetch and update records.\n\n# the client begins by making a request to the home arvados-controller to retrieve or update a specific record owned by a remote cluster\n# arvados-controller determines the 5-digit cluster id from the first part of the uuid string\n# arvados-controller determines the API server host corresponding to the cluster id\n# arvados-controller creates a \"salted\" token by combining the API token used for the request and the target cluster id\n# arvados-controller contacts the remote cluster to request the desired record, providing the salted token\n# the remote cluster verifies the salted token\n# the remote cluster processes the request and returns a response\n# arvados-controller forwards the response to the client\n\nh3. Creating records\n\nIn the REST API, POST requests create new records, so there is no uuid to use for the cluster id.  In this case, to create an object on a remote cluster, the request includes the @cluster_id@ parameter.  The flow is otherwise the same as described above.\n\nh3. Collections and Keep block retrieval\n\nEach collection record has @manifest_text@, which describes how to reassemble keep blocks into files as described in the \"Manifest format\":{{site.baseurl}}/architecture/manifest-format.html.  Each block identifier in the manifest has an added signature which is used to confirm permission to read the block.  To read a block from a keepstore server, the client must provide the block identifier, the signature, and the same API token used to retrieve the collection record.\n\nSee \"Federation signatures\":{{site.baseurl}}/architecture/manifest-format.html#federationsignatures for details on how federation affects block signatures.\n"
  },
  {
    "path": "doc/architecture/hpc.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\ntitle: Dispatching containers to HPC\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados can be configured to run containers on an HPC cluster using Slurm or LSF, as an alternative to \"dispatching to cloud VMs\":dispatchcloud.html.\n\nIn this configuration, the appropriate Arvados dispatcher service -- @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@ -- picks up each container as it appears in the Arvados queue and submits a short shell script as a batch job to the HPC job queue. The shell script executes the @crunch-run@ container supervisor which retrieves the container specification from the Arvados controller, starts an arv-mount process, runs the container using @docker exec@ or @singularity exec@, and sends updates (logs, outputs, exit code, etc.) back to the Arvados controller.\n\nh2. Container communication channel (reverse https tunnel)\n\nThe crunch-run program runs a gateway server to facilitate the “container shell” feature. However, depending on the site's network topology, the Arvados controller may not be able to connect directly to the compute node where a given crunch-run process is running.\n\nInstead, in the HPC configuration, crunch-run connects to the Arvados controller at startup and sets up a multiplexed tunnel, allowing the controller process to connect to crunch-run's gateway server without initiating a connection to the compute node, or even knowing the compute node's IP address.\n\nThis means that when a client requests a container shell connection, the traffic goes through two or three servers:\n# The client connects to a controller host C1.\n# If the multiplexed tunnel is connected to a different controller host C2, then C1 proxies the incoming request to C2, using C2's InternalURL.\n# The controller host (C1 or C2) uses the multiplexed tunnel to connect to crunch-run's container gateway.\n\nh2. Scaling\n\nThe @API.MaxConcurrentRequests@ configuration should not be set too low, or the long-lived tunnel connections can starve other clients.\n"
  },
  {
    "path": "doc/architecture/index.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\ntitle: \"Arvados components\"\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Services\":#Services\n# \"Arvados-server\":#Arvados-server\n# \"SDK\":#SDK\n# \"Tools\":#Tools\n# \"Arvados-client\":#Arvados-client\n\n!(full-width){{site.baseurl}}/images/Arvados_arch.svg!\n\nh3(#Services). Services\n\nLocated in @arvados/services@.  Many services have been incorporated into @arvados-server@, see below.\n\ntable(table table-bordered table-condensed).\n|_. Component|_. Description|\n|api|Along with Controller, the API server is the core of Arvados.  It is backed by a Postgres database and manages information such as metadata for storage, a record of submitted compute jobs, users, groups, and associated permissions.|\n|crunch-dispatch-local|Get compute requests submitted to the API server and execute them locally.|\n|dockercleaner|Daemon for cleaning up Docker containers and images.|\n|fuse|Filesystem in Userspace (FUSE) enabling users to mount Keep collections as a filesystem.|\n|login-sync|Synchronize virtual machine users with Arvados users and permissions.|\n|workbench2|Web application providing user interface to Arvados services.|\n\nh3(#Arvados-server). Arvados-server\n\nLocated in @cmd/arvados-server@.  It consists of a single @arvados-server@ binary with a number of different subcommands.  Although the binary itself is monolithic, subcommands are each a standalone service and only handle requests for that specific service, i.e. a @arvados-server controller@ process will not respond to requests intended for a @arvados-server keep-web@.\n\ntable(table table-bordered table-condensed).\n|_. Subcommand|_. Description |\n|boot|Boot an Arvados cluster from source, used by automated testing.|\n|check|Contact the a health check endpoint on services and print a report.|\n|cloudtest|Diagnostic tool which attempts to start a cloud instance using the current settings in the config file.|\n|config-check|Check that the config file is valid.|\n|config-defaults|Dump the default config options.|\n|config-dump|Dump the active config options that would be used by the other @arvados-server@ commands.|\n|controller|Controller works with the API server to make up the core of Arvados.  It intercepts requests and implements additional features such as federation.|\n|crunch-run|Dispatched by crunch-dispatch, executes a single compute run: setting up a Docker container, running it, and collecting the output.|\n|crunchstat|Run a program and collect resource usage stats using cgroups.|\n|dispatch-cloud|Get compute requests submitted to the API server and schedule them on elastic cloud compute, creating and destroying cloud based virtual machines on demand.|\n|dispatch-lsf|Get compute requests submitted to the API server and submit them to LSF HPC scheduler.|\n|dispatch-slurm|Get compute requests submitted to the API server and submit them to SLURM HPC scheduler.|\n|health|Service that aggregates the other health check results to provide a single cluster-wide health status.|\n|install|Install development dependencies to be able to build and run Arvados from source.|\n|init|Create an initial configuration file for a new cluster and perform database setup.|\n|keep-balance|Perform storage utilization reporting, optimization and garbage collection.  Moves data blocks to their optimum location, ensures correct replication and storage class, and trashes unreferenced blocks.|\n|keep-web|Provides high-level to files in collections as either a WebDAV or S3-compatible API endpoint.|\n|keepproxy|Provides low-level access to keepstore services (block-level data access) for clients outside the internal (private) network.|\n|keepstore|Provides access to underlying storage (filesystem or object storage such as Amazon S3 or Azure Blob) with Arvados permissions.|\n|recover-collection|Recovers deleted collections. Recovery is possible when the collection's manifest is still available and all of its data blocks are still available or recoverable.|\n|workbench2|Serve the HTML/Javascript for the single-page Workbench application.|\n|ws|Publishes API server change events over websockets.|\n\nh3(#SDK). SDK\n\nThe @arv@ command is located in @arvados/sdk/ruby@, the @arv-*@ tools are located in @arvados/sdk/python@.\n\ntable(table table-bordered table-condensed).\n|_. Component|_. Description |\n|arv|Provides command line access to API, also provides some purpose utilities.|\n|arv-copy|Copy a collection from one cluster to another|\n|arv-get|Get files from a collection.|\n|arv-keepdocker|Upload Docker images from local Docker daemon to Keep.|\n|arv-ls|List files in a collection|\n|arv-put|Upload files to a collection.|\n|arv-ws|Print events from Arvados websocket event source.|\n\nh3(#Tools). Tools\n\nLocated in @arvados/tools@.\n\ntable(table table-bordered table-condensed).\n|_. Component|_. Description |\n|cluster-activity|Generate a HTML and/or CSV report of cluster activity over a time period.|\n|crunchstat-summary|Read execution metrics (cpu %, ram, network, etc) collected from a compute container and produce a report.|\n|keep-block-check|Given a list of keep block locators, check that each block exists on one of the configured keepstore servers and verify the block hash.|\n|keep-exercise|Benchmarking tool to test throughput and reliability of keepstores under various usage patterns.|\n|keep-rsync|Get lists of blocks from two clusters, copy blocks which exist on source cluster but are missing from destination cluster.|\n|sync-groups|Takes a CSV file listing with rows in the form (group, user, permission) records and synchronize membership in Arvados groups.|\n|sync-users|Takes a CSV file listing with rows in the form (email, first name, last name, active, admin) and synchronize Arvados users.|\n|user-activity|Generate a text report of user activity over a time period.|\n\nh3(#Arvados-client). Arvados-client\n\nLocated in @cmd/arvados-client@.  It consists of a single @arvados-client@ binary with a number of different subcommands.\n\ntable(table table-bordered table-condensed).\n|_. Subcommand|_. Description |\n|connect-ssh|Connects stdin/stdout to a container's gateway server. It is intended to be invoked with OpenSSH client's ProxyCommand config.|\n|deduplication-report|Analyzes the overlap in blocks used by 2 or more collections. It prints a deduplication report that shows the nominal space used by the collections, as well as the actual size and the amount of space that is saved by Keep's deduplication.|\n|diagnostics|Perform cluster diagnostics to check that all the services are available and responding normally to requests.|\n|logs|Prints live streaming logs for a container.|\n|mount|Alternate Keep FUSE mount written in Go.|\n|shell|Connects the terminal to an interactive shell on a running container.|\n|sudo|Runs another command using API connection info and SystemRootToken from the system config file instead of the caller's environment vars.|\n"
  },
  {
    "path": "doc/architecture/keep-clients.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\ntitle: Keep clients\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nKeep clients are applications such as @arv-get@, @arv-put@ and @arv-mount@ which store and retrieve data from Keep.  In doing so, these programs interact with both the API server (which stores file metadata in the form of @collection@ objects) and individual @keepstore@ servers (which store the actual data blocks).\n\n!(full-width){{site.baseurl}}/images/Keep_reading_writing_block.svg!\n\nh2. Storing a file\n\n# The client discovers keep servers (or proxies) using the @accessible@ method on \"keep_services\":{{site.baseurl}}/api/methods/keep_services.html\n# Data is split into 64 MiB blocks and the MD5 hash is computed for each block.\n# The client uploads each block to one or more Keep servers, based on the number of desired replicas.  The priority order is determined using rendezvous hashing, described below.\n# The Keep server returns a block locator (the MD5 sum of the block) and a \"signed token\" which the client can use as proof of knowledge for the block.\n# The client constructs a @manifest@ which lists the blocks by MD5 hash and how to reassemble them into the original files.\n# The client creates a \"collection\":{{site.baseurl}}/api/methods/collections.html and provides the @manifest_text@\n# The API server accepts the collection after validating the signed tokens (proof of knowledge) for each block.\n\nh2. Fetching a file\n\n# The client requests a @collection@ object including @manifest_text@ from the APIs server\n# The server adds \"token signatures\" to the @manifest_text@ and returns it to the client.\n# The client discovers keep servers (or proxies) using the @accessible@ method on \"keep_services\":{{site.baseurl}}/api/methods/keep_services.html\n# For each data block, the client chooses the highest priority server using rendezvous hashing, described below.\n# The client sends the data block request to the keep server, along with the token signature from the API which proves to Keep servers that the client is permitted to read a given block.\n# The server provides the block data after validating the token signature for the block (if the server does not have the block, it returns a 404 and the client tries the next highest priority server)\n\nh2(#rendezvous). Rendezvous hashing\n!(full-width){{site.baseurl}}/images/Keep_rendezvous_hashing.svg!\n\nEach @keep_service@ resource has an assigned uuid.  To determine priority assignments of blocks to servers, for each keep service compute the MD5 sum of the string concatenation of the block locator (hex-coded hash part only) and service uuid, then sort this list in descending order.  Blocks are preferentially placed on servers with the highest weight.\n\n"
  },
  {
    "path": "doc/architecture/keep-components-overview.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\ntitle: Keep components overview\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nKeep has a number of components. This page describes each component and the role it plays.\n\nh3. Keep clients for data access\n\nIn order to access data in Keep, a client is needed to store data in and retrieve data from Keep. Different types of Keep clients exist:\n* a command line client like \"@arv-get@\":{{ site.baseurl }}/user/tutorials/tutorial-keep-get.html#download-using-arv or \"@arv-put@\":{{ site.baseurl }}/user/tutorials/tutorial-keep.html#upload-using-command\n* a FUSE mount provided by \"@arv-mount@\":{{ site.baseurl }}/user/tutorials/tutorial-keep-mount-gnu-linux.html\n* a WebDAV mount provided by @keep-web@\n* an S3-compatible endpoint provided by @keep-web@\n* programmatic access via the \"Arvados SDKs\":{{ site.baseurl }}/sdk/index.html\n\nIn essense, these clients all do the same thing: they translate file and directory references into requests for Keep blocks and collection manifests. How Keep clients work, and how they use rendezvous hashing, is described in greater detail in \"the next section\":{{ site.baseurl }}/architecture/keep-clients.html.\n\nFor example, when a request comes in to read a file from Keep, the client will\n* request the collection object (including its manifest) from the API server\n* look up the file in the collection manifest, and retrieve the hashes of the block(s) that contain its content\n* ask the keepstore(s) for the block hashes\n* return the contents of the file to the requestor\n\nAll of those steps are subject to access control, which applies at the level of the collection: in the example above, the API server and the keepstore daemons verify that the client has permission to read the collection, and will reject the request if it does not.\n\nh3. API server\n\nThe API server stores collection objects and all associated metadata. That includes data about where the blocks for a collection are to be stored, e.g. when \"storage classes\":{{ site.baseurl }}/admin/storage-classes.html are configured, as well as the desired and confirmed replication count for each block. It also stores the ACLs that control access to the collections. Finally, the API server provides Keep clients with time-based block signatures for access.\n\nh3. Keepstore\n\nThe @keepstore@ daemon is Keep's workhorse, the storage server that stores and retrieves data from an underlying storage system. Keepstore exposes an HTTP REST API. Keepstore only handles requests for blocks. Because blocks are content-addressed, they can be written and deleted, but there is no _update_ operation: blocks are immutable.\n\nSo what happens if the content of a file changes? When a client changes a file, it first writes any new blocks to the keepstore(s). Then, it updates the manifest for the collection the file belongs to with the references to the new blocks.\n\nA keepstore can store its blocks in object storage (S3 or an S3-compatible system, or Azure Blob Storage). It can also store blocks on a POSIX file system. A keepstore can be configured with multiple storage volumes. Each keepstore volume is configured with a replication number; e.g. a POSIX file system backed by a single disk would have a replication factor of 1, while an Azure 'LRS'  storage volume could be configured with a replication factor of 3 (that is how many copies LRS stores under the hood, according to the Azure documentation).\n\nBy default, Arvados uses a replication factor of 2. See the @DefaultReplication@ configuration parameter in \"the configuration reference\":https://doc.arvados.org/admin/config.html. Additionally, each collection can be configured with its own replication factor. It's worth noting that it is the responsibility of the Keep clients to make sure that all blocks are stored subject to their desired replica count, which is derived from the collections the blocks belong to. @keepstore@ itself does not provide replication; all it does is store blocks on the volumes it knows about. The @keepproxy@ and @keep-balance@ processes (see below) make sure that blocks are replicated properly.\n\nThe maximum block size for @keepstore@ is 64 MiB, and keep clients typically combine small files into larger blocks. In a typical Arvados installation, the majority of blocks stored in Keep will be 64 MiB, though some fraction will be smaller.\n\nh3. Keepproxy\n\nThe @keepproxy@ server is a gateway into your Keep storage. Unlike the Keepstore servers, which are only accessible on the local LAN, Keepproxy is suitable for clients located elsewhere on the internet. A client writing through Keepproxy only writes one copy of each block; the Keepproxy server will write additional copies of the data to the Keepstore servers, to fulfill the requested replication factor. Keepproxy also checks API token validity before processing requests.\n\nh3. Keep-web\n\nThe @keep-web@ server provides read/write access to files stored in Keep using the HTTP, WebDAV and S3 protocols. This makes it easy to access files in Keep from a browser, or mount Keep as a network folder using WebDAV support in various operating systems. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens.\n\nh3. Keep-balance\n\nKeep is a garbage-collected system. When a block is no longer referenced in any collection manifest in the system, it becomes eligible for garbage collection. When the desired replication factor for a block (derived from the default replication factor, in addition to the replication factor of any collection(s) the block belongs to) does not match reality, the number of copies stored in the available Keepstore servers needs to be adjusted.\n\nThe @keep-balance@ program takes care of these things. It runs as a service, and wakes up periodically to do a scan of the system and send instructions to the Keepstore servers. That process is described in more detail at \"Balancing Keep servers\":https://doc.arvados.org/admin/keep-balance.html.\n"
  },
  {
    "path": "doc/architecture/keep-data-lifecycle.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\ntitle: \"Data lifecycle\"\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2(#overview). Overview\n\nArvados collections consist of a \"manifest\":{{site.baseurl}}/architecture/manifest-format.html and the data blocks referenced in that manifest. Manifests are stored in the PosgreSQL database, @data blocks@ are stored by a @keepstore@.\n\nData blocks are frequently shared between collections. Each collection has its own @manifest@. Collection manifests and data blocks have a separate lifecycle, which is described in detail below.\n\nh2(#collection_lifecycle). Collection lifecycle\n\nDuring its lifetime, a collection can be in various states. These states are *persisted*, *expiring*, *trashed*  and *permanently deleted*.\n\nThe nominal state is *persisted* which means the data can be can be accessed normally and will be retained indefinitely.\n\nA collection is *expiring* when it has a *trash_at* time in the future. An expiring collection can be accessed as normal, but is scheduled to be trashed automatically at the *trash_at* time.\n\nA collection is *trashed* when it has a *trash_at* time in the past. The *is_trashed* attribute will also be \"true\". The delete operation immediately puts the collection in the trash by setting the *trash_at* time to \"now\", and *delete_at* defaults to \"now\" + @Collections.DefaultTrashLifetime@. Once trashed, the collection is no longer readable through normal data access APIs. The collection will have *delete_at* set to some time in the future. The trashed collection is recoverable until the *delete_at* time passes, at which point the collection is permanently deleted.\n\nSee \"Recovering trashed collections\":{{ site.baseurl }}/user/tutorials/tutorial-keep-collection-lifecycle.html#trash-recovery for instructions to recover trashed collections.\n\nh3(#collection_attributes). Collection lifecycle attributes\n\nAs listed above the attributes that are used to manage a collection lifecycle are *is_trashed*, *trash_at*, and *delete_at*. The table below lists the values of these attributes and how they influence the state of a collection and its accessibility.\n\ntable(table table-bordered table-condensed).\n|_. collection state|_. is_trashed|_. trash_at|_. delete_at|_. get|_. list|_. list?include_trash=true|_. can be modified|\n|persisted collection|false |null |null |yes |yes |yes |yes |\n|expiring collection|false |future |future |yes  |yes |yes |yes |\n|trashed collection|true |past |future |no |no |yes |only is_trashed, trash_at and delete_at attributes|\n|deleted collection|true|past |past |no |no |no |no |\n\nh2(#block_lifecycle). Block lifecycle\n\nDuring its lifetime, a data block can be in various states. These states are *persisted*, *unreferenced*, *trashed* and *permanently deleted*.\n\nThe nominal state is *persisted* which means the block can be can be retrieved normally from a @keepstore@ process.\n\nA block is *unreferenced* when there are no collection manifests in the PostgreSQL collections table that reference it. The block can still be retrieved normally from a @keepstore@ process, e.g. by creating a new collection with a manifest that references the hash of the block. Unreferenced blocks will be moved to the *trashed* state by @keep-balance@ after @BlobSigningTTL@, if @BlobTrash@ is enabled and @keep-balance@ is running and configured to send trash lists to the keepstores.\n\nA block is *trashed* when @keep-balance@ has asked a @keepstore@ to move it to its trash and @BlobTrash@ is enabled. It will stay there for a period of time, subject to the @BlobTrashLifetime@ settings.\n\nA block is *permanently deleted* on the first wakeup of its @keepstore@ trash process after the block has spent @BlobTrashLifetime@ in that keepstore's trash. The trash process wakes up with a frequency defined by the @BlobTrashCheckInterval@.\n\ntable(table table-bordered table-condensed).\n|_. block state|_. duration|_. retrievable via Keep|_. can be recovered|\n|persisted block|indefinitely|yes |n/a |\n|unreferenced block|@BlobSigningTTL@ + up to @BalancePeriod@ + duration of keep-balance run|yes |n/a |\n|trashed block|@BlobTrashLifetime@ + up to @BlobTrashCheckInterval@|no |yes |\n|deleted block||no |no |\n"
  },
  {
    "path": "doc/architecture/manifest-format.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\ntitle: Manifest format\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nEach collection record has a @manifest_text@ field, which describes how to reassemble keep blocks into files. Each block identifier in the manifest has an added signature which is used to confirm permission to read the block.  To read a block from a keepstore server, the client must provide the block identifier, the signature, and the same API token used to retrieve the collection record.\n\n!(full-width){{site.baseurl}}/images/Keep_manifests.svg!\n\nh2. Manifest v1\n\nA manifest is utf-8 encoded text, consisting of zero or more newline-terminated streams.\n\n<pre>\nmanifest       ::= stream*\nstream         ::= stream-name (\" \" locator)+ (\" \" file-segment)+ \"\\n\"\nstream-name    ::= \".\" (\"/\" path-component)*\npath-component ::= <printable ASCII - (whitespace, \"/\")>+\nfile-segment   ::= position \":\" size \":\" filename\nposition       ::= [0-9]+\nsize           ::= [0-9]+\nfilename       ::= path-component (\"/\" path-component)*\n</pre>\n\nNotes:\n\n* The first token is the stream name, consisting of one or more path components, delimited by @\"/\"@.\n** The first path component is always @\".\"@.\n** No path component is empty.\n** No path component following the first one can be \".\" or \"..\".\n** The stream name never begins or ends with @\"/\"@.\n* The next N tokens are \"keep locators\":#locator\n** These describe the \"data stream\".  By logically concatenating the blocks in the order that they appear, we can refer to \"positions\" in the data stream.\n* File tokens come after the sequence of keep locators.\n** A file token has three parts, delimited by @\":\"@: position, size, filename.\n** Position and size are given in decimal\n** The position is the position in the data stream\n** The size is the count of bytes following the position in the data stream.  A file size may cross multiple blocks in the data stream.\n** Filename may contain @\"/\"@ characters, but must not start or end with @\"/\"@, and must not contain @\"//\"@.\n** Filename components (delimited by @\"/\"@) must not be @\".\"@ or @\"..\"@.\n** There may be multiple file tokens.\n\nIt is legal to have multiple file tokens in the manifest (possible across different streams) with the same combined path name @stream name + \"/\" + filename@.  This must be interpreted as a concatenation of file content, in the order that the file tokens appear in the manifest.\n\nSpaces are represented by the escape sequence @\\040@.  Spaces in stream names and filenames must be translated when reading and writing manifests.  A manifest may not contain TAB characters, nor other ASCII whitespace characters or control codes other than the spaces or newlines used as delimiters specified above.  A manifest always ends with a newline -- except the empty (zero-length) string, which is a valid manifest.\n\nh3. Normalized manifest v1\n\nA normalized manifest is a manifest that meets the following additional restrictions:\n\n* Streams are in alphanumeric order.\n* Each stream name is unique within the manifest.\n* Files within a stream are listed in alphanumeric order.\n* Blocks within a stream are ordered based on order of file tokens of the stream.  A given block is listed at most once in a stream.\n* Filename must not contain @\"/\"@ (the stream name represents the path prefix)\n\nh3. Estimating manifest size\n\nHere's a formula for estimating manifest size as stored in the database, assuming efficiently packed blocks.\n\n<pre>\nmanifest_size =\n   + (total data size / 64 MB) * 40\n   + sum(number of files * 20)\n   + sum(size of all directory paths)\n   + sum(size of all file names)\n</pre>\n\nHere is the size when including block signatures.  The block signatures authorize access to fetch each block from a Keep server, as <a href=\"#token_signatures\">described below</a>.  The signed manifest text is what is actually transferred to/from the API server and stored in RAM by @arv-mount@.  The effective upper limit on how large a collection manifest can be is determined by @API.MaxRequestSize@ in @config.yml@ as well as the maximum request size configuration in your reverse proxy or load balancer (e.g. @client_max_body_size@ in Nginx).\n\n<pre>\nmanifest_size =\n   + (total data size / 64 MB) * 94\n   + sum(number of files * 20)\n   + sum(size of all directory paths)\n   + sum(size of all file names)\n</pre>\n\nh3. Example manifests\n\nA manifest with four files in two directories:\n\n<pre>\n. 930625b054ce894ac40596c3f5a0d947+33 0:0:a 0:0:b 0:33:output.txt\n./c d41d8cd98f00b204e9800998ecf8427e+0 0:0:d\n</pre>\n\nThe same manifest with permission signatures on each block:\n\n<pre>\n. 930625b054ce894ac40596c3f5a0d947+33+A1f27a35dd9af37191d63ad8eb8985624451e7b79@5835c8bc 0:0:a 0:0:b 0:33:output.txt\n./c d41d8cd98f00b204e9800998ecf8427e+0+A27117dcd30c013a6e85d6d74c9a50179a1446efa@5835c8bc 0:0:d\n</pre>\n\nA manifest containing a file consisting of multiple blocks and a space in the file name:\n\n<pre>\n. c449ed86671e4a34a8b8b9430850beba+67108864 09fcfea01c3a141b89dd0dcfa1b7768e+22534144 0:89643008:Docker\\040image.tar\n</pre>\nh2(#locator). Keep locator format\n\nBNF notation for a valid Keep locator string (with hints).  For example: *d41d8cd98f00b204e9800998ecf8427e+0+Z+Ada39a3ee5e6b4b0d3255bfef95601890afd80709@53bed294*\n\n<pre>\nlocator          ::= sized-digest hint*\nsized-digest     ::= digest size-hint\ndigest           ::= <32 lowercase hexadecimal digits>\nsize-hint        ::= \"+\" [0-9]+\nhint             ::= \"+\" hint-type hint-content\nhint-type        ::= [A-Z]+\nhint-content     ::= [A-Za-z0-9@_-]*\nsign-hint        ::= \"+A\" <40 lowercase hexadecimal digits> \"@\" sign-timestamp\nremote-sign-hint ::= \"+R\" [A-Za-z0-9]{5} \"-\" <40 lowercase hexadecimal digits> \"@\" sign-timestamp\nsign-timestamp   ::= <8 lowercase hexadecimal digits>\n</pre>\n\nh3. Regular expression to validate locator\n\n<pre>\n/^([0-9a-f]{32})\\+([0-9]+)(\\+[A-Z][-A-Za-z0-9@_]*)*$/\n</pre>\n\nh3. Valid locators\n\ntable(table table-bordered table-condensed).\n|@d41d8cd98f00b204e9800998ecf8427e+0@|\n|@d41d8cd98f00b204e9800998ecf8427e+0+Z@|\n|<code>d41d8cd98f00b204e9800998ecf8427e+0+Z+Ada39a3ee5e6b4b0d3255bfef95601890afd80709@53bed294</code>|\n|<code>930625b054ce894ac40596c3f5a0d947+33+Rzzzzz-1f27a35dd9af37191d63ad8eb8985624451e7b79@5835c8bc</code>|\n\nh3. Invalid locators\n\ntable(table table-bordered table-condensed).\n||Why|\n|@d41d8cd98f00b204e9800998ecf8427e@|No size hint|\n|@d41d8cd98f00b204e9800998ecf8427e+Z+0@|Other hint before size hint|\n|@d41d8cd98f00b204e9800998ecf8427e+0+0@|Multiple size hints|\n|@d41d8cd98f00b204e9800998ecf8427e+0+z@|Hint does not start with uppercase letter|\n|@d41d8cd98f00b204e9800998ecf8427e+0+Zfoo*bar@|Hint contains invalid character @*@|\n\nh3(#token_signatures). Token signatures\n\nA token signature (sign-hint) provides proof-of-access for a data block.  It is computed by taking a SHA1 HMAC of the blob signing token (a shared secret between the API server and keep servers), block digest, current API token, expiration timestamp, and blob signature TTL.\n\nWhen communicating with the @keepstore@ to fetch a block, or the API server to create or update a collection, the service computes the expected token signature for each block and compares it to the token signature that was presented by the client.  Keep clients receive valid block signatures when uploading a block to a keep store (getting back a signed token as proof of knowledge) or, from the API server, getting the manifest text of a collection on which the user has read permission.\n\nSecurity of a token signature is derived from the following characteristics:\n\n# Valid signatures can only be generated by entities that know the shared secret (the \"blob signing token\")\n# A signature can only be used by an entity that also know the API token that was used to generate it.\n# It expires after a set date (the expiration time, based on the \"blob signature time-to-live (TTL)\")\n\nh3(#federationsignatures). Federation and signatures\n\nWhen a collection record is returned through a federation request, the keep blocks listed in the manifest may not be available on the local cluster, and the keep block signatures returned by the remote cluster are not valid for the local cluster.  To solve this, @arvados-controller@ rewrites the signatures in the manifest to \"remote cluster\" signatures.\n\nA local signature comes after the block identifier and block size, and starts with @+A@:\n\n<code>930625b054ce894ac40596c3f5a0d947+33+A1f27a35dd9af37191d63ad8eb8985624451e7b79@5835c8bc</code>\n\nA remote cluster signature starts with @+R@, then the cluster id of the cluster it originated from (@zzzzz@ in this example), a dash, and then the original signature:\n\n<code>930625b054ce894ac40596c3f5a0d947+33+Rzzzzz-1f27a35dd9af37191d63ad8eb8985624451e7b79@5835c8bc</code>\n\nWhen the client provides a remote-signed block locator to keepstore, the keepstore proxies the request to the remote cluster.\n\n# keepstore determines the cluster id to contact from the first part of the @+R@ signature\n# creates a salted token using the API token and cluster id\n# contacts the \"accessible\" endpoint on the remote cluster to determine the remote cluster's keepstore or keepproxy hosts\n# converts the remote signature @+R@ back to a local signature @+A@\n# contacts the remote keepstore or keepproxy host and requests the block using the local signature\n# returns the block contents back to the client\n\nh3(#example). Example\n\nThis example uses @c1bad4b39ca5a924e481008009d94e32+210@, which is the content hash of a @collection@ that was added to Keep in \"how to upload data\":{{ site.baseurl }}/user/tutorials/tutorial-keep.html.  Get the collection manifest using @arv-get@:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-get c1bad4b39ca5a924e481008009d94e32+210</span>\n. 204e43b8a1185621ca55a94839582e6f+67108864+Aasignatureforthisblockaaaaaaaaaaaaaaaaaa@5f612ee6 b9677abbac956bd3e86b1deb28dfac03+67108864+Aasignatureforthisblockbbbbbbbbbbbbbbbbbb@5f612ee6 fc15aff2a762b13f521baf042140acec+67108864+Aasignatureforthisblockcccccccccccccccccc@5f612ee6 323d2a3ce20370c4ca1d3462a344f8fd+25885655+Aasignatureforthisblockdddddddddddddddddd@5f612ee6 0:227212247:var-GS000016015-ASM.tsv.bz2\n</code></pre>\n</notextile>\n\nThis collection includes a single file @var-GS000016015-ASM.tsv.bz2@ which is 227212247 bytes long. It is stored using four sequential data blocks with hashes @204e43b8a1185621ca55a94839582e6f+67108864@, @b9677abbac956bd3e86b1deb28dfac03+67108864@, @fc15aff2a762b13f521baf042140acec+67108864@, and @323d2a3ce20370c4ca1d3462a344f8fd+25885655@. Each of the block hashes is followed by the rest of their \"locator\":#locator.\n\nUse @arv-get@ to download the first data block:\n\nnotextile. <pre><code>~$ <span class=\"userinput\">arv-get 204e43b8a1185621ca55a94839582e6f+67108864+Aasignatureforthisblockaaaaaaaaaaaaaaaaaa@5f612ee6 &gt; block1</span></code></pre>\n\nInspect the size and compute the MD5 hash of @block1@:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">ls -l block1</span>\n-rw-r--r-- 1 you group 67108864 Dec  9 20:14 block1\n~$ <span class=\"userinput\">md5sum block1</span>\n204e43b8a1185621ca55a94839582e6f  block1\n</code></pre>\n</notextile>\n\nAs expected, the md5sum of the contents of the block matches the @digest@ part of the \"locator\":#locator, and the size of the contents matches the @size-hint@.\n"
  },
  {
    "path": "doc/architecture/singularity.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\ntitle: Singularity\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados can be configured to use \"Singularity\":https://sylabs.io/singularity/ instead of Docker to execute containers on cloud nodes or a Slurm/LSF cluster. Singularity may be preferable due to its simpler installation and lack of long-running daemon process and special system users/groups. For on premises Slurm/LSF clusters, see the \"Set up a compute node with Singularity\":{{ site.baseurl }}/install/crunch2/install-compute-node-singularity.html page. For cloud compute clusters, see the \"Build a cloud compute node image\":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html page.\n\nh2. Design overview\n\nWhen Arvados is configured to use Singularity as the runtime engine for Crunch, containers are executed by Singularity. The images specified in workflows and tool definitions must be Docker images uploaded via @arv-keepdocker@ or @arvados-cwl-runner@. When Singularity is the runtime engine, these images are converted to Singularity format (@.sif@) at runtime, as needed.\n\nTo avoid repeating this conversion work unnecessarily, the @.sif@ files are cached in @Keep@. This is done on a per-user basis. If it does not exist yet, a new Arvados project named @.cache@ is automatically created in the user's home project. Similarly, a subproject named @auto-generated singularity images@ will be created in the @.cache@ project. The automatically generated @.sif@ files are stored in collections in that project, with an expiration date two weeks in the future. If the cached image exists when Crunch runs a new container, the expiration date will be pushed out, so that it is always 2 weeks in the future from the most recent start of a container using the image.\n\nIt is safe to empty out or even remove the .cache project or any of its contents; if necessary the cache projects and the @.sif@ files will automatically be regenerated.\n\nh2. Notes\n\n* Programs running in Singularity containers may behave differently than when run in Docker, due to differences between Singularity and Docker. For example, the root (image) filesystem is read-only in a Singularity container. Programs that attempt to write outside a designated output or temporary directory are likely to fail.\n\n* When using Singularity as the runtime engine, the compute node needs to have a compatible Singularity executable installed, as well as the @mksquashfs@ program used to convert Docker images to Singularity's @.sif@ format. The Arvados \"compute node image build script\":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html includes these executables since Arvados 2.3.0.\n\nh2. Limitations\n\nArvados @Singularity@ support is a work in progress. These are the current limitations of the implementation:\n\n* Even when using the Singularity runtime, users' container images are expected to be saved in Docker format. Specifying a @.sif@ file as an image when submitting a container request is not yet supported.\n* Arvados' Singularity implementation does not yet limit the amount of memory available in a container. Each container will have access to all memory on the host where it runs, unless memory use is restricted by Slurm/LSF.\n* The Docker ENTRYPOINT instruction is ignored.\n* Arvados is tested with Singularity version 3.10.4. Other versions may not work.\n"
  },
  {
    "path": "doc/architecture/storage.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: architecture\ntitle: Introduction to Keep\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nKeep is a content-addressable storage system that yields high performance for I/O-bound workloads. Keep is designed to run on low-cost commodity hardware or cloud services and is tightly integrated with the rest of the Arvados system. It provides high fault tolerance and high aggregate performance to a large number of clients.\n\nh2. Design goals and core features\n\n* *Scale* - Keep installations are managing petabytes of data today. Keep scales horizontally.\n\n* *Data deduplication* - Keep automatically deduplicates data through its use of content addressing.\n\n* *Flexibility* - Keep can store data in S3, S3-compatible storage systems (e.g. Ceph) and Azure blob storage. Keep can also store data on POSIX file systems.\n\n* *Fault-Tolerance* - Errors and failure are expected. Keep has redundancy and recovery capabilities at its core.\n\n* *Optimized for Aggregate Throughput* - Like S3 and Azure blob storage, Keep is optimized for aggregate throughput. This is optimal in a scenario with many reader/writer processes.\n\n* *Complex Data Management* - Keep operates well in environments where there are many independent users accessing the same data or users who want to organize data in many different ways. Keep facilitates data sharing without expecting users either to agree with one another about directory structures or to create redundant copies of the data.\n\n* *Security* - Keep works well combined with encryption at rest and transport encryption. All data is managed through @collection@ objects, which implement a rich \"permission model\":{{site.baseurl}}/api/permission-model.html.\n\nh2. How Keep works\n\nKeep is a content-addressable file system.  This means that files are managed using special unique identifiers derived from the _contents_ of the file (specifically, the MD5 hash), rather than human-assigned file names.  This has a number of advantages:\n* Files can be stored and replicated across a cluster of servers without requiring a central name server.\n* Both the server and client systematically validate data integrity because the checksum is built into the identifier.\n* Data duplication is minimized—two files with the same contents will have in the same identifier, and will not be stored twice.\n* It avoids data race conditions, since an identifier always points to the same data.\n\nIn Keep, information is stored in @data blocks@.  Data blocks are normally between 1 byte and 64 megabytes in size.  If a file exceeds the maximum size of a single data block, the file will be split across multiple data blocks until the entire file can be stored.  These data blocks may be stored and replicated across multiple disks, servers, or clusters.  Each data block has its own identifier for the contents of that specific data block.\n\nIn order to reassemble the file, Keep stores a @collection@ manifest which lists in sequence the data blocks that make up the original file.  A @manifest@ may store the information for multiple files, including a directory structure. See \"manifest format\":{{site.baseurl}}/architecture/manifest-format.html for more information on how manifests are structured.\n"
  },
  {
    "path": "doc/css/R.css",
    "content": "body {\n    background: white;\n    color: black;\n}\n\na:link {\n    background: white;\n    color: blue;\n}\n\na:visited {\n    background: white;\n    color: rgb(50%, 0%, 50%);\n}\n\nh1 {\n    background: white;\n    color: rgb(55%, 55%, 55%);\n    font-family: monospace;\n    font-size: x-large;\n    text-align: center;\n}\n\nh2 {\n    background: white;\n    color: rgb(40%, 40%, 40%);\n    font-family: monospace;\n    font-size: large;\n    text-align: center;\n}\n\nh3 {\n    background: white;\n    color: rgb(40%, 40%, 40%);\n    font-family: monospace;\n    font-size: large;\n}\n\nh4 {\n    background: white;\n    color: rgb(40%, 40%, 40%);\n    font-family: monospace;\n    font-style: italic;\n    font-size: large;\n}\n\nh5 {\n    background: white;\n    color: rgb(40%, 40%, 40%);\n    font-family: monospace;\n}\n\nh6 {\n    background: white;\n    color: rgb(40%, 40%, 40%);\n    font-family: monospace;\n    font-style: italic;\n}\n\t\t\nimg.toplogo {\n    width: 4em;\n    vertical-align: middle;\n}\n\nimg.arrow {\n    width: 30px;\n    height: 30px;\n    border: 0;\n}\n\nspan.acronym {\n    font-size: small;\n}\n\nspan.env {\n    font-family: monospace;\n}\n\nspan.file {\n    font-family: monospace;\n}\n\nspan.option{\n    font-family: monospace;\n}\n\nspan.pkg {\n    font-weight: bold;\n}\n\nspan.samp{\n    font-family: monospace;\n}\n\ndiv.vignettes a:hover {\n    background: rgb(85%, 85%, 85%);\n}\n"
  },
  {
    "path": "doc/css/badges.css",
    "content": "/* Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0 */\n\n/* Colors\n * Contextual variations of badges\n * Bootstrap 3.0 removed contexts for badges, we re-introduce them, based on what is done for labels\n */\n\n.badge.badge-error {\n  background-color: #b94a48;\n}\n\n.badge.badge-warning {\n  background-color: #f89406;\n}\n\n.badge.badge-success {\n  background-color: #468847;\n}\n\n.badge.badge-info {\n  background-color: #3a87ad;\n}\n\n.badge.badge-inverse {\n  background-color: #333333;\n}\n\n.badge.badge-alert {\n    background: red;\n}\n"
  },
  {
    "path": "doc/css/bootstrap-theme.css",
    "content": "/*!\n * Bootstrap v3.1.0 (http://getbootstrap.com)\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\n.btn-default,\n.btn-primary,\n.btn-success,\n.btn-info,\n.btn-warning,\n.btn-danger {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, .2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 1px rgba(0, 0, 0, .075);\n}\n.btn-default:active,\n.btn-primary:active,\n.btn-success:active,\n.btn-info:active,\n.btn-warning:active,\n.btn-danger:active,\n.btn-default.active,\n.btn-primary.active,\n.btn-success.active,\n.btn-info.active,\n.btn-warning.active,\n.btn-danger.active {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n}\n.btn-default {\n  text-shadow: 0 1px 0 #fff;\n  background-image: -webkit-linear-gradient(top, #fff 0%, #e0e0e0 100%);\n  background-image:         linear-gradient(to bottom, #fff 0%, #e0e0e0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#ffe0e0e0', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #dbdbdb;\n  border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus {\n  background-color: #e0e0e0;\n  background-position: 0 -15px;\n}\n.btn-default:active,\n.btn-default.active {\n  background-color: #e0e0e0;\n  border-color: #dbdbdb;\n}\n.btn-primary {\n  background-image: -webkit-linear-gradient(top, #428bca 0%, #2d6ca2 100%);\n  background-image:         linear-gradient(to bottom, #428bca 0%, #2d6ca2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff2d6ca2', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #2b669a;\n}\n.btn-primary:hover,\n.btn-primary:focus {\n  background-color: #2d6ca2;\n  background-position: 0 -15px;\n}\n.btn-primary:active,\n.btn-primary.active {\n  background-color: #2d6ca2;\n  border-color: #2b669a;\n}\n.btn-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #419641 100%);\n  background-image:         linear-gradient(to bottom, #5cb85c 0%, #419641 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff419641', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #3e8f3e;\n}\n.btn-success:hover,\n.btn-success:focus {\n  background-color: #419641;\n  background-position: 0 -15px;\n}\n.btn-success:active,\n.btn-success.active {\n  background-color: #419641;\n  border-color: #3e8f3e;\n}\n.btn-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #2aabd2 100%);\n  background-image:         linear-gradient(to bottom, #5bc0de 0%, #2aabd2 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff2aabd2', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #28a4c9;\n}\n.btn-info:hover,\n.btn-info:focus {\n  background-color: #2aabd2;\n  background-position: 0 -15px;\n}\n.btn-info:active,\n.btn-info.active {\n  background-color: #2aabd2;\n  border-color: #28a4c9;\n}\n.btn-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #eb9316 100%);\n  background-image:         linear-gradient(to bottom, #f0ad4e 0%, #eb9316 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffeb9316', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #e38d13;\n}\n.btn-warning:hover,\n.btn-warning:focus {\n  background-color: #eb9316;\n  background-position: 0 -15px;\n}\n.btn-warning:active,\n.btn-warning.active {\n  background-color: #eb9316;\n  border-color: #e38d13;\n}\n.btn-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c12e2a 100%);\n  background-image:         linear-gradient(to bottom, #d9534f 0%, #c12e2a 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc12e2a', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-color: #b92c28;\n}\n.btn-danger:hover,\n.btn-danger:focus {\n  background-color: #c12e2a;\n  background-position: 0 -15px;\n}\n.btn-danger:active,\n.btn-danger.active {\n  background-color: #c12e2a;\n  border-color: #b92c28;\n}\n.thumbnail,\n.img-thumbnail {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075);\n          box-shadow: 0 1px 2px rgba(0, 0, 0, .075);\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  background-color: #e8e8e8;\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image:         linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  background-color: #357ebd;\n  background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);\n  background-image:         linear-gradient(to bottom, #428bca 0%, #357ebd 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);\n  background-repeat: repeat-x;\n}\n.navbar-default {\n  background-image: -webkit-linear-gradient(top, #fff 0%, #f8f8f8 100%);\n  background-image:         linear-gradient(to bottom, #fff 0%, #f8f8f8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff', endColorstr='#fff8f8f8', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .15), 0 1px 5px rgba(0, 0, 0, .075);\n}\n.navbar-default .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f3f3f3 100%);\n  background-image:         linear-gradient(to bottom, #ebebeb 0%, #f3f3f3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff3f3f3', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 3px 9px rgba(0, 0, 0, .075);\n}\n.navbar-brand,\n.navbar-nav > li > a {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, .25);\n}\n.navbar-inverse {\n  background-image: -webkit-linear-gradient(top, #3c3c3c 0%, #222 100%);\n  background-image:         linear-gradient(to bottom, #3c3c3c 0%, #222 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff3c3c3c', endColorstr='#ff222222', GradientType=0);\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n  background-repeat: repeat-x;\n}\n.navbar-inverse .navbar-nav > .active > a {\n  background-image: -webkit-linear-gradient(top, #222 0%, #282828 100%);\n  background-image:         linear-gradient(to bottom, #222 0%, #282828 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222', endColorstr='#ff282828', GradientType=0);\n  background-repeat: repeat-x;\n  -webkit-box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25);\n          box-shadow: inset 0 3px 9px rgba(0, 0, 0, .25);\n}\n.navbar-inverse .navbar-brand,\n.navbar-inverse .navbar-nav > li > a {\n  text-shadow: 0 -1px 0 rgba(0, 0, 0, .25);\n}\n.navbar-static-top,\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  border-radius: 0;\n}\n.alert {\n  text-shadow: 0 1px 0 rgba(255, 255, 255, .2);\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);\n          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .25), 0 1px 2px rgba(0, 0, 0, .05);\n}\n.alert-success {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #c8e5bc 100%);\n  background-image:         linear-gradient(to bottom, #dff0d8 0%, #c8e5bc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffc8e5bc', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #b2dba1;\n}\n.alert-info {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #b9def0 100%);\n  background-image:         linear-gradient(to bottom, #d9edf7 0%, #b9def0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffb9def0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #9acfea;\n}\n.alert-warning {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #f8efc0 100%);\n  background-image:         linear-gradient(to bottom, #fcf8e3 0%, #f8efc0 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fff8efc0', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #f5e79e;\n}\n.alert-danger {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #e7c3c3 100%);\n  background-image:         linear-gradient(to bottom, #f2dede 0%, #e7c3c3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffe7c3c3', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dca7a7;\n}\n.progress {\n  background-image: -webkit-linear-gradient(top, #ebebeb 0%, #f5f5f5 100%);\n  background-image:         linear-gradient(to bottom, #ebebeb 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffebebeb', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar {\n  background-image: -webkit-linear-gradient(top, #428bca 0%, #3071a9 100%);\n  background-image:         linear-gradient(to bottom, #428bca 0%, #3071a9 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3071a9', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-success {\n  background-image: -webkit-linear-gradient(top, #5cb85c 0%, #449d44 100%);\n  background-image:         linear-gradient(to bottom, #5cb85c 0%, #449d44 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5cb85c', endColorstr='#ff449d44', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-info {\n  background-image: -webkit-linear-gradient(top, #5bc0de 0%, #31b0d5 100%);\n  background-image:         linear-gradient(to bottom, #5bc0de 0%, #31b0d5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de', endColorstr='#ff31b0d5', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-warning {\n  background-image: -webkit-linear-gradient(top, #f0ad4e 0%, #ec971f 100%);\n  background-image:         linear-gradient(to bottom, #f0ad4e 0%, #ec971f 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff0ad4e', endColorstr='#ffec971f', GradientType=0);\n  background-repeat: repeat-x;\n}\n.progress-bar-danger {\n  background-image: -webkit-linear-gradient(top, #d9534f 0%, #c9302c 100%);\n  background-image:         linear-gradient(to bottom, #d9534f 0%, #c9302c 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9534f', endColorstr='#ffc9302c', GradientType=0);\n  background-repeat: repeat-x;\n}\n.list-group {\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .075);\n          box-shadow: 0 1px 2px rgba(0, 0, 0, .075);\n}\n.list-group-item.active,\n.list-group-item.active:hover,\n.list-group-item.active:focus {\n  text-shadow: 0 -1px 0 #3071a9;\n  background-image: -webkit-linear-gradient(top, #428bca 0%, #3278b3 100%);\n  background-image:         linear-gradient(to bottom, #428bca 0%, #3278b3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff3278b3', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #3278b3;\n}\n.panel {\n  -webkit-box-shadow: 0 1px 2px rgba(0, 0, 0, .05);\n          box-shadow: 0 1px 2px rgba(0, 0, 0, .05);\n}\n.panel-default > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f5f5f5 0%, #e8e8e8 100%);\n  background-image:         linear-gradient(to bottom, #f5f5f5 0%, #e8e8e8 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5', endColorstr='#ffe8e8e8', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-primary > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #428bca 0%, #357ebd 100%);\n  background-image:         linear-gradient(to bottom, #428bca 0%, #357ebd 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff428bca', endColorstr='#ff357ebd', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-success > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #dff0d8 0%, #d0e9c6 100%);\n  background-image:         linear-gradient(to bottom, #dff0d8 0%, #d0e9c6 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffdff0d8', endColorstr='#ffd0e9c6', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-info > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #d9edf7 0%, #c4e3f3 100%);\n  background-image:         linear-gradient(to bottom, #d9edf7 0%, #c4e3f3 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffd9edf7', endColorstr='#ffc4e3f3', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-warning > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #fcf8e3 0%, #faf2cc 100%);\n  background-image:         linear-gradient(to bottom, #fcf8e3 0%, #faf2cc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffcf8e3', endColorstr='#fffaf2cc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.panel-danger > .panel-heading {\n  background-image: -webkit-linear-gradient(top, #f2dede 0%, #ebcccc 100%);\n  background-image:         linear-gradient(to bottom, #f2dede 0%, #ebcccc 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2dede', endColorstr='#ffebcccc', GradientType=0);\n  background-repeat: repeat-x;\n}\n.well {\n  background-image: -webkit-linear-gradient(top, #e8e8e8 0%, #f5f5f5 100%);\n  background-image:         linear-gradient(to bottom, #e8e8e8 0%, #f5f5f5 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffe8e8e8', endColorstr='#fff5f5f5', GradientType=0);\n  background-repeat: repeat-x;\n  border-color: #dcdcdc;\n  -webkit-box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);\n          box-shadow: inset 0 1px 3px rgba(0, 0, 0, .05), 0 1px 0 rgba(255, 255, 255, .1);\n}\n/*# sourceMappingURL=bootstrap-theme.css.map */\n"
  },
  {
    "path": "doc/css/bootstrap.css",
    "content": "/*!\n * Bootstrap v3.1.0 (http://getbootstrap.com)\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\n/*! normalize.css v3.0.0 | MIT License | git.io/normalize */\nhtml {\n  font-family: sans-serif;\n  -webkit-text-size-adjust: 100%;\n      -ms-text-size-adjust: 100%;\n}\nbody {\n  margin: 0;\n}\narticle,\naside,\ndetails,\nfigcaption,\nfigure,\nfooter,\nheader,\nhgroup,\nmain,\nnav,\nsection,\nsummary {\n  display: block;\n}\naudio,\ncanvas,\nprogress,\nvideo {\n  display: inline-block;\n  vertical-align: baseline;\n}\naudio:not([controls]) {\n  display: none;\n  height: 0;\n}\n[hidden],\ntemplate {\n  display: none;\n}\na {\n  background: transparent;\n}\na:active,\na:hover {\n  outline: 0;\n}\nabbr[title] {\n  border-bottom: 1px dotted;\n}\nb,\nstrong {\n  font-weight: bold;\n}\ndfn {\n  font-style: italic;\n}\nh1 {\n  margin: .67em 0;\n  font-size: 2em;\n}\nmark {\n  color: #000;\n  background: #ff0;\n}\nsmall {\n  font-size: 80%;\n}\nsub,\nsup {\n  position: relative;\n  font-size: 75%;\n  line-height: 0;\n  vertical-align: baseline;\n}\nsup {\n  top: -.5em;\n}\nsub {\n  bottom: -.25em;\n}\nimg {\n  border: 0;\n}\nsvg:not(:root) {\n  overflow: hidden;\n}\nfigure {\n  margin: 1em 40px;\n}\nhr {\n  height: 0;\n  -moz-box-sizing: content-box;\n       box-sizing: content-box;\n}\npre {\n  overflow: auto;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: monospace, monospace;\n  font-size: 1em;\n}\nbutton,\ninput,\noptgroup,\nselect,\ntextarea {\n  margin: 0;\n  font: inherit;\n  color: inherit;\n}\nbutton {\n  overflow: visible;\n}\nbutton,\nselect {\n  text-transform: none;\n}\nbutton,\nhtml input[type=\"button\"],\ninput[type=\"reset\"],\ninput[type=\"submit\"] {\n  -webkit-appearance: button;\n  cursor: pointer;\n}\nbutton[disabled],\nhtml input[disabled] {\n  cursor: default;\n}\nbutton::-moz-focus-inner,\ninput::-moz-focus-inner {\n  padding: 0;\n  border: 0;\n}\ninput {\n  line-height: normal;\n}\ninput[type=\"checkbox\"],\ninput[type=\"radio\"] {\n  box-sizing: border-box;\n  padding: 0;\n}\ninput[type=\"number\"]::-webkit-inner-spin-button,\ninput[type=\"number\"]::-webkit-outer-spin-button {\n  height: auto;\n}\ninput[type=\"search\"] {\n  -webkit-box-sizing: content-box;\n     -moz-box-sizing: content-box;\n          box-sizing: content-box;\n  -webkit-appearance: textfield;\n}\ninput[type=\"search\"]::-webkit-search-cancel-button,\ninput[type=\"search\"]::-webkit-search-decoration {\n  -webkit-appearance: none;\n}\nfieldset {\n  padding: .35em .625em .75em;\n  margin: 0 2px;\n  border: 1px solid #c0c0c0;\n}\nlegend {\n  padding: 0;\n  border: 0;\n}\ntextarea {\n  overflow: auto;\n}\noptgroup {\n  font-weight: bold;\n}\ntable {\n  border-spacing: 0;\n  border-collapse: collapse;\n}\ntd,\nth {\n  padding: 0;\n}\n@media print {\n  * {\n    color: #000 !important;\n    text-shadow: none !important;\n    background: transparent !important;\n    box-shadow: none !important;\n  }\n  a,\n  a:visited {\n    text-decoration: underline;\n  }\n  a[href]:after {\n    content: \" (\" attr(href) \")\";\n  }\n  abbr[title]:after {\n    content: \" (\" attr(title) \")\";\n  }\n  a[href^=\"javascript:\"]:after,\n  a[href^=\"#\"]:after {\n    content: \"\";\n  }\n  pre,\n  blockquote {\n    border: 1px solid #999;\n\n    page-break-inside: avoid;\n  }\n  thead {\n    display: table-header-group;\n  }\n  tr,\n  img {\n    page-break-inside: avoid;\n  }\n  img {\n    max-width: 100% !important;\n  }\n  p,\n  h2,\n  h3 {\n    orphans: 3;\n    widows: 3;\n  }\n  h2,\n  h3 {\n    page-break-after: avoid;\n  }\n  select {\n    background: #fff !important;\n  }\n  .navbar {\n    display: none;\n  }\n  .table td,\n  .table th {\n    background-color: #fff !important;\n  }\n  .btn > .caret,\n  .dropup > .btn > .caret {\n    border-top-color: #000 !important;\n  }\n  .label {\n    border: 1px solid #000;\n  }\n  .table {\n    border-collapse: collapse !important;\n  }\n  .table-bordered th,\n  .table-bordered td {\n    border: 1px solid #ddd !important;\n  }\n}\n* {\n  -webkit-box-sizing: border-box;\n     -moz-box-sizing: border-box;\n          box-sizing: border-box;\n}\n*:before,\n*:after {\n  -webkit-box-sizing: border-box;\n     -moz-box-sizing: border-box;\n          box-sizing: border-box;\n}\nhtml {\n  font-size: 62.5%;\n\n  -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\nbody {\n  font-family: \"Helvetica Neue\", Helvetica, Arial, sans-serif;\n  font-size: 14px;\n  line-height: 1.428571429;\n  color: #333;\n  background-color: #fff;\n}\ninput,\nbutton,\nselect,\ntextarea {\n  font-family: inherit;\n  font-size: inherit;\n  line-height: inherit;\n}\na {\n  color: #428bca;\n  text-decoration: none;\n}\na:hover,\na:focus {\n  color: #2a6496;\n  text-decoration: underline;\n}\na:focus {\n  outline: thin dotted;\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\nfigure {\n  margin: 0;\n}\nimg {\n  vertical-align: middle;\n}\n.img-responsive {\n  display: block;\n  max-width: 100%;\n  height: auto;\n}\n.img-rounded {\n  border-radius: 6px;\n}\n.img-thumbnail {\n  display: inline-block;\n  max-width: 100%;\n  height: auto;\n  padding: 4px;\n  line-height: 1.428571429;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: all .2s ease-in-out;\n          transition: all .2s ease-in-out;\n}\n.img-circle {\n  border-radius: 50%;\n}\nhr {\n  margin-top: 20px;\n  margin-bottom: 20px;\n  border: 0;\n  border-top: 1px solid #eee;\n}\n.sr-only {\n  position: absolute;\n  width: 1px;\n  height: 1px;\n  padding: 0;\n  margin: -1px;\n  overflow: hidden;\n  clip: rect(0, 0, 0, 0);\n  border: 0;\n}\nh1,\nh2,\nh3,\nh4,\nh5,\nh6,\n.h1,\n.h2,\n.h3,\n.h4,\n.h5,\n.h6 {\n  font-family: inherit;\n  font-weight: 500;\n  line-height: 1.1;\n  color: inherit;\n}\nh1 small,\nh2 small,\nh3 small,\nh4 small,\nh5 small,\nh6 small,\n.h1 small,\n.h2 small,\n.h3 small,\n.h4 small,\n.h5 small,\n.h6 small,\nh1 .small,\nh2 .small,\nh3 .small,\nh4 .small,\nh5 .small,\nh6 .small,\n.h1 .small,\n.h2 .small,\n.h3 .small,\n.h4 .small,\n.h5 .small,\n.h6 .small {\n  font-weight: normal;\n  line-height: 1;\n  color: #999;\n}\nh1,\n.h1,\nh2,\n.h2,\nh3,\n.h3 {\n  margin-top: 20px;\n  margin-bottom: 10px;\n}\nh1 small,\n.h1 small,\nh2 small,\n.h2 small,\nh3 small,\n.h3 small,\nh1 .small,\n.h1 .small,\nh2 .small,\n.h2 .small,\nh3 .small,\n.h3 .small {\n  font-size: 65%;\n}\nh4,\n.h4,\nh5,\n.h5,\nh6,\n.h6 {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\nh4 small,\n.h4 small,\nh5 small,\n.h5 small,\nh6 small,\n.h6 small,\nh4 .small,\n.h4 .small,\nh5 .small,\n.h5 .small,\nh6 .small,\n.h6 .small {\n  font-size: 75%;\n}\nh1,\n.h1 {\n  font-size: 36px;\n}\nh2,\n.h2 {\n  font-size: 30px;\n}\nh3,\n.h3 {\n  font-size: 24px;\n}\nh4,\n.h4 {\n  font-size: 18px;\n}\nh5,\n.h5 {\n  font-size: 14px;\n}\nh6,\n.h6 {\n  font-size: 12px;\n}\np {\n  margin: 0 0 10px;\n}\n.lead {\n  margin-bottom: 20px;\n  font-size: 16px;\n  font-weight: 200;\n  line-height: 1.4;\n}\n@media (min-width: 768px) {\n  .lead {\n    font-size: 21px;\n  }\n}\nsmall,\n.small {\n  font-size: 85%;\n}\ncite {\n  font-style: normal;\n}\n.text-left {\n  text-align: left;\n}\n.text-right {\n  text-align: right;\n}\n.text-center {\n  text-align: center;\n}\n.text-justify {\n  text-align: justify;\n}\n.text-muted {\n  color: #999;\n}\n.text-primary {\n  color: #428bca;\n}\na.text-primary:hover {\n  color: #3071a9;\n}\n.text-success {\n  color: #3c763d;\n}\na.text-success:hover {\n  color: #2b542c;\n}\n.text-info {\n  color: #31708f;\n}\na.text-info:hover {\n  color: #245269;\n}\n.text-warning {\n  color: #8a6d3b;\n}\na.text-warning:hover {\n  color: #66512c;\n}\n.text-danger {\n  color: #a94442;\n}\na.text-danger:hover {\n  color: #843534;\n}\n.bg-primary {\n  color: #fff;\n  background-color: #428bca;\n}\na.bg-primary:hover {\n  background-color: #3071a9;\n}\n.bg-success {\n  background-color: #dff0d8;\n}\na.bg-success:hover {\n  background-color: #c1e2b3;\n}\n.bg-info {\n  background-color: #d9edf7;\n}\na.bg-info:hover {\n  background-color: #afd9ee;\n}\n.bg-warning {\n  background-color: #fcf8e3;\n}\na.bg-warning:hover {\n  background-color: #f7ecb5;\n}\n.bg-danger {\n  background-color: #f2dede;\n}\na.bg-danger:hover {\n  background-color: #e4b9b9;\n}\n.page-header {\n  padding-bottom: 9px;\n  margin: 40px 0 20px;\n  border-bottom: 1px solid #eee;\n}\nul,\nol {\n  margin-top: 0;\n  margin-bottom: 10px;\n}\nul ul,\nol ul,\nul ol,\nol ol {\n  margin-bottom: 0;\n}\n.list-unstyled {\n  padding-left: 0;\n  list-style: none;\n}\n.list-inline {\n  padding-left: 0;\n  list-style: none;\n}\n.list-inline > li {\n  display: inline-block;\n  padding-right: 5px;\n  padding-left: 5px;\n}\n.list-inline > li:first-child {\n  padding-left: 0;\n}\ndl {\n  margin-top: 0;\n  margin-bottom: 20px;\n}\ndt,\ndd {\n  line-height: 1.428571429;\n}\ndt {\n  font-weight: bold;\n}\ndd {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .dl-horizontal dt {\n    float: left;\n    width: 160px;\n    overflow: hidden;\n    clear: left;\n    text-align: right;\n    text-overflow: ellipsis;\n    white-space: nowrap;\n  }\n  .dl-horizontal dd {\n    margin-left: 180px;\n  }\n}\nabbr[title],\nabbr[data-original-title] {\n  cursor: help;\n  border-bottom: 1px dotted #999;\n}\n.initialism {\n  font-size: 90%;\n  text-transform: uppercase;\n}\nblockquote {\n  padding: 10px 20px;\n  margin: 0 0 20px;\n  font-size: 17.5px;\n  border-left: 5px solid #eee;\n}\nblockquote p:last-child,\nblockquote ul:last-child,\nblockquote ol:last-child {\n  margin-bottom: 0;\n}\nblockquote footer,\nblockquote small,\nblockquote .small {\n  display: block;\n  font-size: 80%;\n  line-height: 1.428571429;\n  color: #999;\n}\nblockquote footer:before,\nblockquote small:before,\nblockquote .small:before {\n  content: '\\2014 \\00A0';\n}\n.blockquote-reverse,\nblockquote.pull-right {\n  padding-right: 15px;\n  padding-left: 0;\n  text-align: right;\n  border-right: 5px solid #eee;\n  border-left: 0;\n}\n.blockquote-reverse footer:before,\nblockquote.pull-right footer:before,\n.blockquote-reverse small:before,\nblockquote.pull-right small:before,\n.blockquote-reverse .small:before,\nblockquote.pull-right .small:before {\n  content: '';\n}\n.blockquote-reverse footer:after,\nblockquote.pull-right footer:after,\n.blockquote-reverse small:after,\nblockquote.pull-right small:after,\n.blockquote-reverse .small:after,\nblockquote.pull-right .small:after {\n  content: '\\00A0 \\2014';\n}\nblockquote:before,\nblockquote:after {\n  content: \"\";\n}\naddress {\n  margin-bottom: 20px;\n  font-style: normal;\n  line-height: 1.428571429;\n}\ncode,\nkbd,\npre,\nsamp {\n  font-family: Menlo, Monaco, Consolas, \"Courier New\", monospace;\n}\ncode {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #c7254e;\n  white-space: nowrap;\n  background-color: #f9f2f4;\n  border-radius: 4px;\n}\nkbd {\n  padding: 2px 4px;\n  font-size: 90%;\n  color: #fff;\n  background-color: #333;\n  border-radius: 3px;\n  box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .25);\n}\npre {\n  display: block;\n  padding: 9.5px;\n  margin: 0 0 10px;\n  font-size: 13px;\n  line-height: 1.428571429;\n  color: #333;\n  word-break: break-all;\n  word-wrap: break-word;\n  background-color: #f5f5f5;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\npre code {\n  padding: 0;\n  font-size: inherit;\n  color: inherit;\n  white-space: pre-wrap;\n  background-color: transparent;\n  border-radius: 0;\n}\n.pre-scrollable {\n  max-height: 340px;\n  overflow-y: scroll;\n}\n.container {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n@media (min-width: 768px) {\n  .container {\n    width: 750px;\n  }\n}\n@media (min-width: 992px) {\n  .container {\n    width: 970px;\n  }\n}\n@media (min-width: 1200px) {\n  .container {\n    width: 1170px;\n  }\n}\n.container-fluid {\n  padding-right: 15px;\n  padding-left: 15px;\n  margin-right: auto;\n  margin-left: auto;\n}\n.row {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n.col-xs-1, .col-sm-1, .col-md-1, .col-lg-1, .col-xs-2, .col-sm-2, .col-md-2, .col-lg-2, .col-xs-3, .col-sm-3, .col-md-3, .col-lg-3, .col-xs-4, .col-sm-4, .col-md-4, .col-lg-4, .col-xs-5, .col-sm-5, .col-md-5, .col-lg-5, .col-xs-6, .col-sm-6, .col-md-6, .col-lg-6, .col-xs-7, .col-sm-7, .col-md-7, .col-lg-7, .col-xs-8, .col-sm-8, .col-md-8, .col-lg-8, .col-xs-9, .col-sm-9, .col-md-9, .col-lg-9, .col-xs-10, .col-sm-10, .col-md-10, .col-lg-10, .col-xs-11, .col-sm-11, .col-md-11, .col-lg-11, .col-xs-12, .col-sm-12, .col-md-12, .col-lg-12 {\n  position: relative;\n  min-height: 1px;\n  padding-right: 15px;\n  padding-left: 15px;\n}\n.col-xs-1, .col-xs-2, .col-xs-3, .col-xs-4, .col-xs-5, .col-xs-6, .col-xs-7, .col-xs-8, .col-xs-9, .col-xs-10, .col-xs-11, .col-xs-12 {\n  float: left;\n}\n.col-xs-12 {\n  width: 100%;\n}\n.col-xs-11 {\n  width: 91.66666666666666%;\n}\n.col-xs-10 {\n  width: 83.33333333333334%;\n}\n.col-xs-9 {\n  width: 75%;\n}\n.col-xs-8 {\n  width: 66.66666666666666%;\n}\n.col-xs-7 {\n  width: 58.333333333333336%;\n}\n.col-xs-6 {\n  width: 50%;\n}\n.col-xs-5 {\n  width: 41.66666666666667%;\n}\n.col-xs-4 {\n  width: 33.33333333333333%;\n}\n.col-xs-3 {\n  width: 25%;\n}\n.col-xs-2 {\n  width: 16.666666666666664%;\n}\n.col-xs-1 {\n  width: 8.333333333333332%;\n}\n.col-xs-pull-12 {\n  right: 100%;\n}\n.col-xs-pull-11 {\n  right: 91.66666666666666%;\n}\n.col-xs-pull-10 {\n  right: 83.33333333333334%;\n}\n.col-xs-pull-9 {\n  right: 75%;\n}\n.col-xs-pull-8 {\n  right: 66.66666666666666%;\n}\n.col-xs-pull-7 {\n  right: 58.333333333333336%;\n}\n.col-xs-pull-6 {\n  right: 50%;\n}\n.col-xs-pull-5 {\n  right: 41.66666666666667%;\n}\n.col-xs-pull-4 {\n  right: 33.33333333333333%;\n}\n.col-xs-pull-3 {\n  right: 25%;\n}\n.col-xs-pull-2 {\n  right: 16.666666666666664%;\n}\n.col-xs-pull-1 {\n  right: 8.333333333333332%;\n}\n.col-xs-pull-0 {\n  right: 0;\n}\n.col-xs-push-12 {\n  left: 100%;\n}\n.col-xs-push-11 {\n  left: 91.66666666666666%;\n}\n.col-xs-push-10 {\n  left: 83.33333333333334%;\n}\n.col-xs-push-9 {\n  left: 75%;\n}\n.col-xs-push-8 {\n  left: 66.66666666666666%;\n}\n.col-xs-push-7 {\n  left: 58.333333333333336%;\n}\n.col-xs-push-6 {\n  left: 50%;\n}\n.col-xs-push-5 {\n  left: 41.66666666666667%;\n}\n.col-xs-push-4 {\n  left: 33.33333333333333%;\n}\n.col-xs-push-3 {\n  left: 25%;\n}\n.col-xs-push-2 {\n  left: 16.666666666666664%;\n}\n.col-xs-push-1 {\n  left: 8.333333333333332%;\n}\n.col-xs-push-0 {\n  left: 0;\n}\n.col-xs-offset-12 {\n  margin-left: 100%;\n}\n.col-xs-offset-11 {\n  margin-left: 91.66666666666666%;\n}\n.col-xs-offset-10 {\n  margin-left: 83.33333333333334%;\n}\n.col-xs-offset-9 {\n  margin-left: 75%;\n}\n.col-xs-offset-8 {\n  margin-left: 66.66666666666666%;\n}\n.col-xs-offset-7 {\n  margin-left: 58.333333333333336%;\n}\n.col-xs-offset-6 {\n  margin-left: 50%;\n}\n.col-xs-offset-5 {\n  margin-left: 41.66666666666667%;\n}\n.col-xs-offset-4 {\n  margin-left: 33.33333333333333%;\n}\n.col-xs-offset-3 {\n  margin-left: 25%;\n}\n.col-xs-offset-2 {\n  margin-left: 16.666666666666664%;\n}\n.col-xs-offset-1 {\n  margin-left: 8.333333333333332%;\n}\n.col-xs-offset-0 {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12 {\n    float: left;\n  }\n  .col-sm-12 {\n    width: 100%;\n  }\n  .col-sm-11 {\n    width: 91.66666666666666%;\n  }\n  .col-sm-10 {\n    width: 83.33333333333334%;\n  }\n  .col-sm-9 {\n    width: 75%;\n  }\n  .col-sm-8 {\n    width: 66.66666666666666%;\n  }\n  .col-sm-7 {\n    width: 58.333333333333336%;\n  }\n  .col-sm-6 {\n    width: 50%;\n  }\n  .col-sm-5 {\n    width: 41.66666666666667%;\n  }\n  .col-sm-4 {\n    width: 33.33333333333333%;\n  }\n  .col-sm-3 {\n    width: 25%;\n  }\n  .col-sm-2 {\n    width: 16.666666666666664%;\n  }\n  .col-sm-1 {\n    width: 8.333333333333332%;\n  }\n  .col-sm-pull-12 {\n    right: 100%;\n  }\n  .col-sm-pull-11 {\n    right: 91.66666666666666%;\n  }\n  .col-sm-pull-10 {\n    right: 83.33333333333334%;\n  }\n  .col-sm-pull-9 {\n    right: 75%;\n  }\n  .col-sm-pull-8 {\n    right: 66.66666666666666%;\n  }\n  .col-sm-pull-7 {\n    right: 58.333333333333336%;\n  }\n  .col-sm-pull-6 {\n    right: 50%;\n  }\n  .col-sm-pull-5 {\n    right: 41.66666666666667%;\n  }\n  .col-sm-pull-4 {\n    right: 33.33333333333333%;\n  }\n  .col-sm-pull-3 {\n    right: 25%;\n  }\n  .col-sm-pull-2 {\n    right: 16.666666666666664%;\n  }\n  .col-sm-pull-1 {\n    right: 8.333333333333332%;\n  }\n  .col-sm-pull-0 {\n    right: 0;\n  }\n  .col-sm-push-12 {\n    left: 100%;\n  }\n  .col-sm-push-11 {\n    left: 91.66666666666666%;\n  }\n  .col-sm-push-10 {\n    left: 83.33333333333334%;\n  }\n  .col-sm-push-9 {\n    left: 75%;\n  }\n  .col-sm-push-8 {\n    left: 66.66666666666666%;\n  }\n  .col-sm-push-7 {\n    left: 58.333333333333336%;\n  }\n  .col-sm-push-6 {\n    left: 50%;\n  }\n  .col-sm-push-5 {\n    left: 41.66666666666667%;\n  }\n  .col-sm-push-4 {\n    left: 33.33333333333333%;\n  }\n  .col-sm-push-3 {\n    left: 25%;\n  }\n  .col-sm-push-2 {\n    left: 16.666666666666664%;\n  }\n  .col-sm-push-1 {\n    left: 8.333333333333332%;\n  }\n  .col-sm-push-0 {\n    left: 0;\n  }\n  .col-sm-offset-12 {\n    margin-left: 100%;\n  }\n  .col-sm-offset-11 {\n    margin-left: 91.66666666666666%;\n  }\n  .col-sm-offset-10 {\n    margin-left: 83.33333333333334%;\n  }\n  .col-sm-offset-9 {\n    margin-left: 75%;\n  }\n  .col-sm-offset-8 {\n    margin-left: 66.66666666666666%;\n  }\n  .col-sm-offset-7 {\n    margin-left: 58.333333333333336%;\n  }\n  .col-sm-offset-6 {\n    margin-left: 50%;\n  }\n  .col-sm-offset-5 {\n    margin-left: 41.66666666666667%;\n  }\n  .col-sm-offset-4 {\n    margin-left: 33.33333333333333%;\n  }\n  .col-sm-offset-3 {\n    margin-left: 25%;\n  }\n  .col-sm-offset-2 {\n    margin-left: 16.666666666666664%;\n  }\n  .col-sm-offset-1 {\n    margin-left: 8.333333333333332%;\n  }\n  .col-sm-offset-0 {\n    margin-left: 0;\n  }\n}\n@media (min-width: 992px) {\n  .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12 {\n    float: left;\n  }\n  .col-md-12 {\n    width: 100%;\n  }\n  .col-md-11 {\n    width: 91.66666666666666%;\n  }\n  .col-md-10 {\n    width: 83.33333333333334%;\n  }\n  .col-md-9 {\n    width: 75%;\n  }\n  .col-md-8 {\n    width: 66.66666666666666%;\n  }\n  .col-md-7 {\n    width: 58.333333333333336%;\n  }\n  .col-md-6 {\n    width: 50%;\n  }\n  .col-md-5 {\n    width: 41.66666666666667%;\n  }\n  .col-md-4 {\n    width: 33.33333333333333%;\n  }\n  .col-md-3 {\n    width: 25%;\n  }\n  .col-md-2 {\n    width: 16.666666666666664%;\n  }\n  .col-md-1 {\n    width: 8.333333333333332%;\n  }\n  .col-md-pull-12 {\n    right: 100%;\n  }\n  .col-md-pull-11 {\n    right: 91.66666666666666%;\n  }\n  .col-md-pull-10 {\n    right: 83.33333333333334%;\n  }\n  .col-md-pull-9 {\n    right: 75%;\n  }\n  .col-md-pull-8 {\n    right: 66.66666666666666%;\n  }\n  .col-md-pull-7 {\n    right: 58.333333333333336%;\n  }\n  .col-md-pull-6 {\n    right: 50%;\n  }\n  .col-md-pull-5 {\n    right: 41.66666666666667%;\n  }\n  .col-md-pull-4 {\n    right: 33.33333333333333%;\n  }\n  .col-md-pull-3 {\n    right: 25%;\n  }\n  .col-md-pull-2 {\n    right: 16.666666666666664%;\n  }\n  .col-md-pull-1 {\n    right: 8.333333333333332%;\n  }\n  .col-md-pull-0 {\n    right: 0;\n  }\n  .col-md-push-12 {\n    left: 100%;\n  }\n  .col-md-push-11 {\n    left: 91.66666666666666%;\n  }\n  .col-md-push-10 {\n    left: 83.33333333333334%;\n  }\n  .col-md-push-9 {\n    left: 75%;\n  }\n  .col-md-push-8 {\n    left: 66.66666666666666%;\n  }\n  .col-md-push-7 {\n    left: 58.333333333333336%;\n  }\n  .col-md-push-6 {\n    left: 50%;\n  }\n  .col-md-push-5 {\n    left: 41.66666666666667%;\n  }\n  .col-md-push-4 {\n    left: 33.33333333333333%;\n  }\n  .col-md-push-3 {\n    left: 25%;\n  }\n  .col-md-push-2 {\n    left: 16.666666666666664%;\n  }\n  .col-md-push-1 {\n    left: 8.333333333333332%;\n  }\n  .col-md-push-0 {\n    left: 0;\n  }\n  .col-md-offset-12 {\n    margin-left: 100%;\n  }\n  .col-md-offset-11 {\n    margin-left: 91.66666666666666%;\n  }\n  .col-md-offset-10 {\n    margin-left: 83.33333333333334%;\n  }\n  .col-md-offset-9 {\n    margin-left: 75%;\n  }\n  .col-md-offset-8 {\n    margin-left: 66.66666666666666%;\n  }\n  .col-md-offset-7 {\n    margin-left: 58.333333333333336%;\n  }\n  .col-md-offset-6 {\n    margin-left: 50%;\n  }\n  .col-md-offset-5 {\n    margin-left: 41.66666666666667%;\n  }\n  .col-md-offset-4 {\n    margin-left: 33.33333333333333%;\n  }\n  .col-md-offset-3 {\n    margin-left: 25%;\n  }\n  .col-md-offset-2 {\n    margin-left: 16.666666666666664%;\n  }\n  .col-md-offset-1 {\n    margin-left: 8.333333333333332%;\n  }\n  .col-md-offset-0 {\n    margin-left: 0;\n  }\n}\n@media (min-width: 1200px) {\n  .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12 {\n    float: left;\n  }\n  .col-lg-12 {\n    width: 100%;\n  }\n  .col-lg-11 {\n    width: 91.66666666666666%;\n  }\n  .col-lg-10 {\n    width: 83.33333333333334%;\n  }\n  .col-lg-9 {\n    width: 75%;\n  }\n  .col-lg-8 {\n    width: 66.66666666666666%;\n  }\n  .col-lg-7 {\n    width: 58.333333333333336%;\n  }\n  .col-lg-6 {\n    width: 50%;\n  }\n  .col-lg-5 {\n    width: 41.66666666666667%;\n  }\n  .col-lg-4 {\n    width: 33.33333333333333%;\n  }\n  .col-lg-3 {\n    width: 25%;\n  }\n  .col-lg-2 {\n    width: 16.666666666666664%;\n  }\n  .col-lg-1 {\n    width: 8.333333333333332%;\n  }\n  .col-lg-pull-12 {\n    right: 100%;\n  }\n  .col-lg-pull-11 {\n    right: 91.66666666666666%;\n  }\n  .col-lg-pull-10 {\n    right: 83.33333333333334%;\n  }\n  .col-lg-pull-9 {\n    right: 75%;\n  }\n  .col-lg-pull-8 {\n    right: 66.66666666666666%;\n  }\n  .col-lg-pull-7 {\n    right: 58.333333333333336%;\n  }\n  .col-lg-pull-6 {\n    right: 50%;\n  }\n  .col-lg-pull-5 {\n    right: 41.66666666666667%;\n  }\n  .col-lg-pull-4 {\n    right: 33.33333333333333%;\n  }\n  .col-lg-pull-3 {\n    right: 25%;\n  }\n  .col-lg-pull-2 {\n    right: 16.666666666666664%;\n  }\n  .col-lg-pull-1 {\n    right: 8.333333333333332%;\n  }\n  .col-lg-pull-0 {\n    right: 0;\n  }\n  .col-lg-push-12 {\n    left: 100%;\n  }\n  .col-lg-push-11 {\n    left: 91.66666666666666%;\n  }\n  .col-lg-push-10 {\n    left: 83.33333333333334%;\n  }\n  .col-lg-push-9 {\n    left: 75%;\n  }\n  .col-lg-push-8 {\n    left: 66.66666666666666%;\n  }\n  .col-lg-push-7 {\n    left: 58.333333333333336%;\n  }\n  .col-lg-push-6 {\n    left: 50%;\n  }\n  .col-lg-push-5 {\n    left: 41.66666666666667%;\n  }\n  .col-lg-push-4 {\n    left: 33.33333333333333%;\n  }\n  .col-lg-push-3 {\n    left: 25%;\n  }\n  .col-lg-push-2 {\n    left: 16.666666666666664%;\n  }\n  .col-lg-push-1 {\n    left: 8.333333333333332%;\n  }\n  .col-lg-push-0 {\n    left: 0;\n  }\n  .col-lg-offset-12 {\n    margin-left: 100%;\n  }\n  .col-lg-offset-11 {\n    margin-left: 91.66666666666666%;\n  }\n  .col-lg-offset-10 {\n    margin-left: 83.33333333333334%;\n  }\n  .col-lg-offset-9 {\n    margin-left: 75%;\n  }\n  .col-lg-offset-8 {\n    margin-left: 66.66666666666666%;\n  }\n  .col-lg-offset-7 {\n    margin-left: 58.333333333333336%;\n  }\n  .col-lg-offset-6 {\n    margin-left: 50%;\n  }\n  .col-lg-offset-5 {\n    margin-left: 41.66666666666667%;\n  }\n  .col-lg-offset-4 {\n    margin-left: 33.33333333333333%;\n  }\n  .col-lg-offset-3 {\n    margin-left: 25%;\n  }\n  .col-lg-offset-2 {\n    margin-left: 16.666666666666664%;\n  }\n  .col-lg-offset-1 {\n    margin-left: 8.333333333333332%;\n  }\n  .col-lg-offset-0 {\n    margin-left: 0;\n  }\n}\ntable {\n  max-width: 100%;\n  background-color: transparent;\n}\nth {\n  text-align: left;\n}\n.table {\n  width: 100%;\n  margin-bottom: 20px;\n}\n.table > thead > tr > th,\n.table > tbody > tr > th,\n.table > tfoot > tr > th,\n.table > thead > tr > td,\n.table > tbody > tr > td,\n.table > tfoot > tr > td {\n  padding: 8px;\n  line-height: 1.428571429;\n  vertical-align: top;\n  border-top: 1px solid #ddd;\n}\n.table > thead > tr > th {\n  vertical-align: bottom;\n  border-bottom: 2px solid #ddd;\n}\n.table > caption + thead > tr:first-child > th,\n.table > colgroup + thead > tr:first-child > th,\n.table > thead:first-child > tr:first-child > th,\n.table > caption + thead > tr:first-child > td,\n.table > colgroup + thead > tr:first-child > td,\n.table > thead:first-child > tr:first-child > td {\n  border-top: 0;\n}\n.table > tbody + tbody {\n  border-top: 2px solid #ddd;\n}\n.table .table {\n  background-color: #fff;\n}\n.table-condensed > thead > tr > th,\n.table-condensed > tbody > tr > th,\n.table-condensed > tfoot > tr > th,\n.table-condensed > thead > tr > td,\n.table-condensed > tbody > tr > td,\n.table-condensed > tfoot > tr > td {\n  padding: 5px;\n}\n.table-bordered {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > tbody > tr > th,\n.table-bordered > tfoot > tr > th,\n.table-bordered > thead > tr > td,\n.table-bordered > tbody > tr > td,\n.table-bordered > tfoot > tr > td {\n  border: 1px solid #ddd;\n}\n.table-bordered > thead > tr > th,\n.table-bordered > thead > tr > td {\n  border-bottom-width: 2px;\n}\n.table-striped > tbody > tr:nth-child(odd) > td,\n.table-striped > tbody > tr:nth-child(odd) > th {\n  background-color: #f9f9f9;\n}\n.table-hover > tbody > tr:hover > td,\n.table-hover > tbody > tr:hover > th {\n  background-color: #f5f5f5;\n}\ntable col[class*=\"col-\"] {\n  position: static;\n  display: table-column;\n  float: none;\n}\ntable td[class*=\"col-\"],\ntable th[class*=\"col-\"] {\n  position: static;\n  display: table-cell;\n  float: none;\n}\n.table > thead > tr > td.active,\n.table > tbody > tr > td.active,\n.table > tfoot > tr > td.active,\n.table > thead > tr > th.active,\n.table > tbody > tr > th.active,\n.table > tfoot > tr > th.active,\n.table > thead > tr.active > td,\n.table > tbody > tr.active > td,\n.table > tfoot > tr.active > td,\n.table > thead > tr.active > th,\n.table > tbody > tr.active > th,\n.table > tfoot > tr.active > th {\n  background-color: #f5f5f5;\n}\n.table-hover > tbody > tr > td.active:hover,\n.table-hover > tbody > tr > th.active:hover,\n.table-hover > tbody > tr.active:hover > td,\n.table-hover > tbody > tr.active:hover > th {\n  background-color: #e8e8e8;\n}\n.table > thead > tr > td.success,\n.table > tbody > tr > td.success,\n.table > tfoot > tr > td.success,\n.table > thead > tr > th.success,\n.table > tbody > tr > th.success,\n.table > tfoot > tr > th.success,\n.table > thead > tr.success > td,\n.table > tbody > tr.success > td,\n.table > tfoot > tr.success > td,\n.table > thead > tr.success > th,\n.table > tbody > tr.success > th,\n.table > tfoot > tr.success > th {\n  background-color: #dff0d8;\n}\n.table-hover > tbody > tr > td.success:hover,\n.table-hover > tbody > tr > th.success:hover,\n.table-hover > tbody > tr.success:hover > td,\n.table-hover > tbody > tr.success:hover > th {\n  background-color: #d0e9c6;\n}\n.table > thead > tr > td.info,\n.table > tbody > tr > td.info,\n.table > tfoot > tr > td.info,\n.table > thead > tr > th.info,\n.table > tbody > tr > th.info,\n.table > tfoot > tr > th.info,\n.table > thead > tr.info > td,\n.table > tbody > tr.info > td,\n.table > tfoot > tr.info > td,\n.table > thead > tr.info > th,\n.table > tbody > tr.info > th,\n.table > tfoot > tr.info > th {\n  background-color: #d9edf7;\n}\n.table-hover > tbody > tr > td.info:hover,\n.table-hover > tbody > tr > th.info:hover,\n.table-hover > tbody > tr.info:hover > td,\n.table-hover > tbody > tr.info:hover > th {\n  background-color: #c4e3f3;\n}\n.table > thead > tr > td.warning,\n.table > tbody > tr > td.warning,\n.table > tfoot > tr > td.warning,\n.table > thead > tr > th.warning,\n.table > tbody > tr > th.warning,\n.table > tfoot > tr > th.warning,\n.table > thead > tr.warning > td,\n.table > tbody > tr.warning > td,\n.table > tfoot > tr.warning > td,\n.table > thead > tr.warning > th,\n.table > tbody > tr.warning > th,\n.table > tfoot > tr.warning > th {\n  background-color: #fcf8e3;\n}\n.table-hover > tbody > tr > td.warning:hover,\n.table-hover > tbody > tr > th.warning:hover,\n.table-hover > tbody > tr.warning:hover > td,\n.table-hover > tbody > tr.warning:hover > th {\n  background-color: #faf2cc;\n}\n.table > thead > tr > td.danger,\n.table > tbody > tr > td.danger,\n.table > tfoot > tr > td.danger,\n.table > thead > tr > th.danger,\n.table > tbody > tr > th.danger,\n.table > tfoot > tr > th.danger,\n.table > thead > tr.danger > td,\n.table > tbody > tr.danger > td,\n.table > tfoot > tr.danger > td,\n.table > thead > tr.danger > th,\n.table > tbody > tr.danger > th,\n.table > tfoot > tr.danger > th {\n  background-color: #f2dede;\n}\n.table-hover > tbody > tr > td.danger:hover,\n.table-hover > tbody > tr > th.danger:hover,\n.table-hover > tbody > tr.danger:hover > td,\n.table-hover > tbody > tr.danger:hover > th {\n  background-color: #ebcccc;\n}\n@media (max-width: 767px) {\n  .table-responsive {\n    width: 100%;\n    margin-bottom: 15px;\n    overflow-x: scroll;\n    overflow-y: hidden;\n    -webkit-overflow-scrolling: touch;\n    -ms-overflow-style: -ms-autohiding-scrollbar;\n    border: 1px solid #ddd;\n  }\n  .table-responsive > .table {\n    margin-bottom: 0;\n  }\n  .table-responsive > .table > thead > tr > th,\n  .table-responsive > .table > tbody > tr > th,\n  .table-responsive > .table > tfoot > tr > th,\n  .table-responsive > .table > thead > tr > td,\n  .table-responsive > .table > tbody > tr > td,\n  .table-responsive > .table > tfoot > tr > td {\n    white-space: nowrap;\n  }\n  .table-responsive > .table-bordered {\n    border: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:first-child,\n  .table-responsive > .table-bordered > tbody > tr > th:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n  .table-responsive > .table-bordered > thead > tr > td:first-child,\n  .table-responsive > .table-bordered > tbody > tr > td:first-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n    border-left: 0;\n  }\n  .table-responsive > .table-bordered > thead > tr > th:last-child,\n  .table-responsive > .table-bordered > tbody > tr > th:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n  .table-responsive > .table-bordered > thead > tr > td:last-child,\n  .table-responsive > .table-bordered > tbody > tr > td:last-child,\n  .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n    border-right: 0;\n  }\n  .table-responsive > .table-bordered > tbody > tr:last-child > th,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n  .table-responsive > .table-bordered > tbody > tr:last-child > td,\n  .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n    border-bottom: 0;\n  }\n}\nfieldset {\n  min-width: 0;\n  padding: 0;\n  margin: 0;\n  border: 0;\n}\nlegend {\n  display: block;\n  width: 100%;\n  padding: 0;\n  margin-bottom: 20px;\n  font-size: 21px;\n  line-height: inherit;\n  color: #333;\n  border: 0;\n  border-bottom: 1px solid #e5e5e5;\n}\nlabel {\n  display: inline-block;\n  margin-bottom: 5px;\n  font-weight: bold;\n}\ninput[type=\"search\"] {\n  -webkit-box-sizing: border-box;\n     -moz-box-sizing: border-box;\n          box-sizing: border-box;\n}\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n  margin: 4px 0 0;\n  margin-top: 1px \\9;\n  /* IE8-9 */\n  line-height: normal;\n}\ninput[type=\"file\"] {\n  display: block;\n}\ninput[type=\"range\"] {\n  display: block;\n  width: 100%;\n}\nselect[multiple],\nselect[size] {\n  height: auto;\n}\ninput[type=\"file\"]:focus,\ninput[type=\"radio\"]:focus,\ninput[type=\"checkbox\"]:focus {\n  outline: thin dotted;\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\noutput {\n  display: block;\n  padding-top: 7px;\n  font-size: 14px;\n  line-height: 1.428571429;\n  color: #555;\n}\n.form-control {\n  display: block;\n  width: 100%;\n  height: 34px;\n  padding: 6px 12px;\n  font-size: 14px;\n  line-height: 1.428571429;\n  color: #555;\n  background-color: #fff;\n  background-image: none;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n  -webkit-transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n          transition: border-color ease-in-out .15s, box-shadow ease-in-out .15s;\n}\n.form-control:focus {\n  border-color: #66afe9;\n  outline: 0;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);\n          box-shadow: inset 0 1px 1px rgba(0,0,0,.075), 0 0 8px rgba(102, 175, 233, .6);\n}\n.form-control:-moz-placeholder {\n  color: #999;\n}\n.form-control::-moz-placeholder {\n  color: #999;\n  opacity: 1;\n}\n.form-control:-ms-input-placeholder {\n  color: #999;\n}\n.form-control::-webkit-input-placeholder {\n  color: #999;\n}\n.form-control[disabled],\n.form-control[readonly],\nfieldset[disabled] .form-control {\n  cursor: not-allowed;\n  background-color: #eee;\n  opacity: 1;\n}\ntextarea.form-control {\n  height: auto;\n}\ninput[type=\"date\"] {\n  line-height: 34px;\n}\n.form-group {\n  margin-bottom: 15px;\n}\n.radio,\n.checkbox {\n  display: block;\n  min-height: 20px;\n  padding-left: 20px;\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.radio label,\n.checkbox label {\n  display: inline;\n  font-weight: normal;\n  cursor: pointer;\n}\n.radio input[type=\"radio\"],\n.radio-inline input[type=\"radio\"],\n.checkbox input[type=\"checkbox\"],\n.checkbox-inline input[type=\"checkbox\"] {\n  float: left;\n  margin-left: -20px;\n}\n.radio + .radio,\n.checkbox + .checkbox {\n  margin-top: -5px;\n}\n.radio-inline,\n.checkbox-inline {\n  display: inline-block;\n  padding-left: 20px;\n  margin-bottom: 0;\n  font-weight: normal;\n  vertical-align: middle;\n  cursor: pointer;\n}\n.radio-inline + .radio-inline,\n.checkbox-inline + .checkbox-inline {\n  margin-top: 0;\n  margin-left: 10px;\n}\ninput[type=\"radio\"][disabled],\ninput[type=\"checkbox\"][disabled],\n.radio[disabled],\n.radio-inline[disabled],\n.checkbox[disabled],\n.checkbox-inline[disabled],\nfieldset[disabled] input[type=\"radio\"],\nfieldset[disabled] input[type=\"checkbox\"],\nfieldset[disabled] .radio,\nfieldset[disabled] .radio-inline,\nfieldset[disabled] .checkbox,\nfieldset[disabled] .checkbox-inline {\n  cursor: not-allowed;\n}\n.input-sm {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-sm {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-sm,\nselect[multiple].input-sm {\n  height: auto;\n}\n.input-lg {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.33;\n  border-radius: 6px;\n}\nselect.input-lg {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-lg,\nselect[multiple].input-lg {\n  height: auto;\n}\n.has-feedback {\n  position: relative;\n}\n.has-feedback .form-control {\n  padding-right: 42.5px;\n}\n.has-feedback .form-control-feedback {\n  position: absolute;\n  top: 25px;\n  right: 0;\n  display: block;\n  width: 34px;\n  height: 34px;\n  line-height: 34px;\n  text-align: center;\n}\n.has-success .help-block,\n.has-success .control-label,\n.has-success .radio,\n.has-success .checkbox,\n.has-success .radio-inline,\n.has-success .checkbox-inline {\n  color: #3c763d;\n}\n.has-success .form-control {\n  border-color: #3c763d;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-success .form-control:focus {\n  border-color: #2b542c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #67b168;\n}\n.has-success .input-group-addon {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #3c763d;\n}\n.has-success .form-control-feedback {\n  color: #3c763d;\n}\n.has-warning .help-block,\n.has-warning .control-label,\n.has-warning .radio,\n.has-warning .checkbox,\n.has-warning .radio-inline,\n.has-warning .checkbox-inline {\n  color: #8a6d3b;\n}\n.has-warning .form-control {\n  border-color: #8a6d3b;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-warning .form-control:focus {\n  border-color: #66512c;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #c0a16b;\n}\n.has-warning .input-group-addon {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #8a6d3b;\n}\n.has-warning .form-control-feedback {\n  color: #8a6d3b;\n}\n.has-error .help-block,\n.has-error .control-label,\n.has-error .radio,\n.has-error .checkbox,\n.has-error .radio-inline,\n.has-error .checkbox-inline {\n  color: #a94442;\n}\n.has-error .form-control {\n  border-color: #a94442;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075);\n}\n.has-error .form-control:focus {\n  border-color: #843534;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .075), 0 0 6px #ce8483;\n}\n.has-error .input-group-addon {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #a94442;\n}\n.has-error .form-control-feedback {\n  color: #a94442;\n}\n.form-control-static {\n  margin-bottom: 0;\n}\n.help-block {\n  display: block;\n  margin-top: 5px;\n  margin-bottom: 10px;\n  color: #737373;\n}\n@media (min-width: 768px) {\n  .form-inline .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .form-inline .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio,\n  .form-inline .checkbox {\n    display: inline-block;\n    padding-left: 0;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .form-inline .radio input[type=\"radio\"],\n  .form-inline .checkbox input[type=\"checkbox\"] {\n    float: none;\n    margin-left: 0;\n  }\n  .form-inline .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n.form-horizontal .control-label,\n.form-horizontal .radio,\n.form-horizontal .checkbox,\n.form-horizontal .radio-inline,\n.form-horizontal .checkbox-inline {\n  padding-top: 7px;\n  margin-top: 0;\n  margin-bottom: 0;\n}\n.form-horizontal .radio,\n.form-horizontal .checkbox {\n  min-height: 27px;\n}\n.form-horizontal .form-group {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n.form-horizontal .form-control-static {\n  padding-top: 7px;\n}\n@media (min-width: 768px) {\n  .form-horizontal .control-label {\n    text-align: right;\n  }\n}\n.form-horizontal .has-feedback .form-control-feedback {\n  top: 0;\n  right: 15px;\n}\n.btn {\n  display: inline-block;\n  padding: 6px 12px;\n  margin-bottom: 0;\n  font-size: 14px;\n  font-weight: normal;\n  line-height: 1.428571429;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: middle;\n  cursor: pointer;\n  -webkit-user-select: none;\n     -moz-user-select: none;\n      -ms-user-select: none;\n       -o-user-select: none;\n          user-select: none;\n  background-image: none;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.btn:focus {\n  outline: thin dotted;\n  outline: 5px auto -webkit-focus-ring-color;\n  outline-offset: -2px;\n}\n.btn:hover,\n.btn:focus {\n  color: #333;\n  text-decoration: none;\n}\n.btn:active,\n.btn.active {\n  background-image: none;\n  outline: 0;\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn.disabled,\n.btn[disabled],\nfieldset[disabled] .btn {\n  pointer-events: none;\n  cursor: not-allowed;\n  filter: alpha(opacity=65);\n  -webkit-box-shadow: none;\n          box-shadow: none;\n  opacity: .65;\n}\n.btn-default {\n  color: #333;\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default:hover,\n.btn-default:focus,\n.btn-default:active,\n.btn-default.active,\n.open .dropdown-toggle.btn-default {\n  color: #333;\n  background-color: #ebebeb;\n  border-color: #adadad;\n}\n.btn-default:active,\n.btn-default.active,\n.open .dropdown-toggle.btn-default {\n  background-image: none;\n}\n.btn-default.disabled,\n.btn-default[disabled],\nfieldset[disabled] .btn-default,\n.btn-default.disabled:hover,\n.btn-default[disabled]:hover,\nfieldset[disabled] .btn-default:hover,\n.btn-default.disabled:focus,\n.btn-default[disabled]:focus,\nfieldset[disabled] .btn-default:focus,\n.btn-default.disabled:active,\n.btn-default[disabled]:active,\nfieldset[disabled] .btn-default:active,\n.btn-default.disabled.active,\n.btn-default[disabled].active,\nfieldset[disabled] .btn-default.active {\n  background-color: #fff;\n  border-color: #ccc;\n}\n.btn-default .badge {\n  color: #fff;\n  background-color: #333;\n}\n.btn-primary {\n  color: #fff;\n  background-color: #428bca;\n  border-color: #357ebd;\n}\n.btn-primary:hover,\n.btn-primary:focus,\n.btn-primary:active,\n.btn-primary.active,\n.open .dropdown-toggle.btn-primary {\n  color: #fff;\n  background-color: #3276b1;\n  border-color: #285e8e;\n}\n.btn-primary:active,\n.btn-primary.active,\n.open .dropdown-toggle.btn-primary {\n  background-image: none;\n}\n.btn-primary.disabled,\n.btn-primary[disabled],\nfieldset[disabled] .btn-primary,\n.btn-primary.disabled:hover,\n.btn-primary[disabled]:hover,\nfieldset[disabled] .btn-primary:hover,\n.btn-primary.disabled:focus,\n.btn-primary[disabled]:focus,\nfieldset[disabled] .btn-primary:focus,\n.btn-primary.disabled:active,\n.btn-primary[disabled]:active,\nfieldset[disabled] .btn-primary:active,\n.btn-primary.disabled.active,\n.btn-primary[disabled].active,\nfieldset[disabled] .btn-primary.active {\n  background-color: #428bca;\n  border-color: #357ebd;\n}\n.btn-primary .badge {\n  color: #428bca;\n  background-color: #fff;\n}\n.btn-success {\n  color: #fff;\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success:hover,\n.btn-success:focus,\n.btn-success:active,\n.btn-success.active,\n.open .dropdown-toggle.btn-success {\n  color: #fff;\n  background-color: #47a447;\n  border-color: #398439;\n}\n.btn-success:active,\n.btn-success.active,\n.open .dropdown-toggle.btn-success {\n  background-image: none;\n}\n.btn-success.disabled,\n.btn-success[disabled],\nfieldset[disabled] .btn-success,\n.btn-success.disabled:hover,\n.btn-success[disabled]:hover,\nfieldset[disabled] .btn-success:hover,\n.btn-success.disabled:focus,\n.btn-success[disabled]:focus,\nfieldset[disabled] .btn-success:focus,\n.btn-success.disabled:active,\n.btn-success[disabled]:active,\nfieldset[disabled] .btn-success:active,\n.btn-success.disabled.active,\n.btn-success[disabled].active,\nfieldset[disabled] .btn-success.active {\n  background-color: #5cb85c;\n  border-color: #4cae4c;\n}\n.btn-success .badge {\n  color: #5cb85c;\n  background-color: #fff;\n}\n.btn-info {\n  color: #fff;\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info:hover,\n.btn-info:focus,\n.btn-info:active,\n.btn-info.active,\n.open .dropdown-toggle.btn-info {\n  color: #fff;\n  background-color: #39b3d7;\n  border-color: #269abc;\n}\n.btn-info:active,\n.btn-info.active,\n.open .dropdown-toggle.btn-info {\n  background-image: none;\n}\n.btn-info.disabled,\n.btn-info[disabled],\nfieldset[disabled] .btn-info,\n.btn-info.disabled:hover,\n.btn-info[disabled]:hover,\nfieldset[disabled] .btn-info:hover,\n.btn-info.disabled:focus,\n.btn-info[disabled]:focus,\nfieldset[disabled] .btn-info:focus,\n.btn-info.disabled:active,\n.btn-info[disabled]:active,\nfieldset[disabled] .btn-info:active,\n.btn-info.disabled.active,\n.btn-info[disabled].active,\nfieldset[disabled] .btn-info.active {\n  background-color: #5bc0de;\n  border-color: #46b8da;\n}\n.btn-info .badge {\n  color: #5bc0de;\n  background-color: #fff;\n}\n.btn-warning {\n  color: #fff;\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning:hover,\n.btn-warning:focus,\n.btn-warning:active,\n.btn-warning.active,\n.open .dropdown-toggle.btn-warning {\n  color: #fff;\n  background-color: #ed9c28;\n  border-color: #d58512;\n}\n.btn-warning:active,\n.btn-warning.active,\n.open .dropdown-toggle.btn-warning {\n  background-image: none;\n}\n.btn-warning.disabled,\n.btn-warning[disabled],\nfieldset[disabled] .btn-warning,\n.btn-warning.disabled:hover,\n.btn-warning[disabled]:hover,\nfieldset[disabled] .btn-warning:hover,\n.btn-warning.disabled:focus,\n.btn-warning[disabled]:focus,\nfieldset[disabled] .btn-warning:focus,\n.btn-warning.disabled:active,\n.btn-warning[disabled]:active,\nfieldset[disabled] .btn-warning:active,\n.btn-warning.disabled.active,\n.btn-warning[disabled].active,\nfieldset[disabled] .btn-warning.active {\n  background-color: #f0ad4e;\n  border-color: #eea236;\n}\n.btn-warning .badge {\n  color: #f0ad4e;\n  background-color: #fff;\n}\n.btn-danger {\n  color: #fff;\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger:hover,\n.btn-danger:focus,\n.btn-danger:active,\n.btn-danger.active,\n.open .dropdown-toggle.btn-danger {\n  color: #fff;\n  background-color: #d2322d;\n  border-color: #ac2925;\n}\n.btn-danger:active,\n.btn-danger.active,\n.open .dropdown-toggle.btn-danger {\n  background-image: none;\n}\n.btn-danger.disabled,\n.btn-danger[disabled],\nfieldset[disabled] .btn-danger,\n.btn-danger.disabled:hover,\n.btn-danger[disabled]:hover,\nfieldset[disabled] .btn-danger:hover,\n.btn-danger.disabled:focus,\n.btn-danger[disabled]:focus,\nfieldset[disabled] .btn-danger:focus,\n.btn-danger.disabled:active,\n.btn-danger[disabled]:active,\nfieldset[disabled] .btn-danger:active,\n.btn-danger.disabled.active,\n.btn-danger[disabled].active,\nfieldset[disabled] .btn-danger.active {\n  background-color: #d9534f;\n  border-color: #d43f3a;\n}\n.btn-danger .badge {\n  color: #d9534f;\n  background-color: #fff;\n}\n.btn-link {\n  font-weight: normal;\n  color: #428bca;\n  cursor: pointer;\n  border-radius: 0;\n}\n.btn-link,\n.btn-link:active,\n.btn-link[disabled],\nfieldset[disabled] .btn-link {\n  background-color: transparent;\n  -webkit-box-shadow: none;\n          box-shadow: none;\n}\n.btn-link,\n.btn-link:hover,\n.btn-link:focus,\n.btn-link:active {\n  border-color: transparent;\n}\n.btn-link:hover,\n.btn-link:focus {\n  color: #2a6496;\n  text-decoration: underline;\n  background-color: transparent;\n}\n.btn-link[disabled]:hover,\nfieldset[disabled] .btn-link:hover,\n.btn-link[disabled]:focus,\nfieldset[disabled] .btn-link:focus {\n  color: #999;\n  text-decoration: none;\n}\n.btn-lg {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.33;\n  border-radius: 6px;\n}\n.btn-sm {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-xs {\n  padding: 1px 5px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-block {\n  display: block;\n  width: 100%;\n  padding-right: 0;\n  padding-left: 0;\n}\n.btn-block + .btn-block {\n  margin-top: 5px;\n}\ninput[type=\"submit\"].btn-block,\ninput[type=\"reset\"].btn-block,\ninput[type=\"button\"].btn-block {\n  width: 100%;\n}\n.fade {\n  opacity: 0;\n  -webkit-transition: opacity .15s linear;\n          transition: opacity .15s linear;\n}\n.fade.in {\n  opacity: 1;\n}\n.collapse {\n  display: none;\n}\n.collapse.in {\n  display: block;\n}\n.collapsing {\n  position: relative;\n  height: 0;\n  overflow: hidden;\n  -webkit-transition: height .35s ease;\n          transition: height .35s ease;\n}\n@font-face {\n  font-family: 'Glyphicons Halflings';\n\n  src: url('../fonts/glyphicons-halflings-regular.eot');\n  src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg');\n}\n.glyphicon {\n  position: relative;\n  top: 1px;\n  display: inline-block;\n  font-family: 'Glyphicons Halflings';\n  font-style: normal;\n  font-weight: normal;\n  line-height: 1;\n\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n.glyphicon-asterisk:before {\n  content: \"\\2a\";\n}\n.glyphicon-plus:before {\n  content: \"\\2b\";\n}\n.glyphicon-euro:before {\n  content: \"\\20ac\";\n}\n.glyphicon-minus:before {\n  content: \"\\2212\";\n}\n.glyphicon-cloud:before {\n  content: \"\\2601\";\n}\n.glyphicon-envelope:before {\n  content: \"\\2709\";\n}\n.glyphicon-pencil:before {\n  content: \"\\270f\";\n}\n.glyphicon-glass:before {\n  content: \"\\e001\";\n}\n.glyphicon-music:before {\n  content: \"\\e002\";\n}\n.glyphicon-search:before {\n  content: \"\\e003\";\n}\n.glyphicon-heart:before {\n  content: \"\\e005\";\n}\n.glyphicon-star:before {\n  content: \"\\e006\";\n}\n.glyphicon-star-empty:before {\n  content: \"\\e007\";\n}\n.glyphicon-user:before {\n  content: \"\\e008\";\n}\n.glyphicon-film:before {\n  content: \"\\e009\";\n}\n.glyphicon-th-large:before {\n  content: \"\\e010\";\n}\n.glyphicon-th:before {\n  content: \"\\e011\";\n}\n.glyphicon-th-list:before {\n  content: \"\\e012\";\n}\n.glyphicon-ok:before {\n  content: \"\\e013\";\n}\n.glyphicon-remove:before {\n  content: \"\\e014\";\n}\n.glyphicon-zoom-in:before {\n  content: \"\\e015\";\n}\n.glyphicon-zoom-out:before {\n  content: \"\\e016\";\n}\n.glyphicon-off:before {\n  content: \"\\e017\";\n}\n.glyphicon-signal:before {\n  content: \"\\e018\";\n}\n.glyphicon-cog:before {\n  content: \"\\e019\";\n}\n.glyphicon-trash:before {\n  content: \"\\e020\";\n}\n.glyphicon-home:before {\n  content: \"\\e021\";\n}\n.glyphicon-file:before {\n  content: \"\\e022\";\n}\n.glyphicon-time:before {\n  content: \"\\e023\";\n}\n.glyphicon-road:before {\n  content: \"\\e024\";\n}\n.glyphicon-download-alt:before {\n  content: \"\\e025\";\n}\n.glyphicon-download:before {\n  content: \"\\e026\";\n}\n.glyphicon-upload:before {\n  content: \"\\e027\";\n}\n.glyphicon-inbox:before {\n  content: \"\\e028\";\n}\n.glyphicon-play-circle:before {\n  content: \"\\e029\";\n}\n.glyphicon-repeat:before {\n  content: \"\\e030\";\n}\n.glyphicon-refresh:before {\n  content: \"\\e031\";\n}\n.glyphicon-list-alt:before {\n  content: \"\\e032\";\n}\n.glyphicon-lock:before {\n  content: \"\\e033\";\n}\n.glyphicon-flag:before {\n  content: \"\\e034\";\n}\n.glyphicon-headphones:before {\n  content: \"\\e035\";\n}\n.glyphicon-volume-off:before {\n  content: \"\\e036\";\n}\n.glyphicon-volume-down:before {\n  content: \"\\e037\";\n}\n.glyphicon-volume-up:before {\n  content: \"\\e038\";\n}\n.glyphicon-qrcode:before {\n  content: \"\\e039\";\n}\n.glyphicon-barcode:before {\n  content: \"\\e040\";\n}\n.glyphicon-tag:before {\n  content: \"\\e041\";\n}\n.glyphicon-tags:before {\n  content: \"\\e042\";\n}\n.glyphicon-book:before {\n  content: \"\\e043\";\n}\n.glyphicon-bookmark:before {\n  content: \"\\e044\";\n}\n.glyphicon-print:before {\n  content: \"\\e045\";\n}\n.glyphicon-camera:before {\n  content: \"\\e046\";\n}\n.glyphicon-font:before {\n  content: \"\\e047\";\n}\n.glyphicon-bold:before {\n  content: \"\\e048\";\n}\n.glyphicon-italic:before {\n  content: \"\\e049\";\n}\n.glyphicon-text-height:before {\n  content: \"\\e050\";\n}\n.glyphicon-text-width:before {\n  content: \"\\e051\";\n}\n.glyphicon-align-left:before {\n  content: \"\\e052\";\n}\n.glyphicon-align-center:before {\n  content: \"\\e053\";\n}\n.glyphicon-align-right:before {\n  content: \"\\e054\";\n}\n.glyphicon-align-justify:before {\n  content: \"\\e055\";\n}\n.glyphicon-list:before {\n  content: \"\\e056\";\n}\n.glyphicon-indent-left:before {\n  content: \"\\e057\";\n}\n.glyphicon-indent-right:before {\n  content: \"\\e058\";\n}\n.glyphicon-facetime-video:before {\n  content: \"\\e059\";\n}\n.glyphicon-picture:before {\n  content: \"\\e060\";\n}\n.glyphicon-map-marker:before {\n  content: \"\\e062\";\n}\n.glyphicon-adjust:before {\n  content: \"\\e063\";\n}\n.glyphicon-tint:before {\n  content: \"\\e064\";\n}\n.glyphicon-edit:before {\n  content: \"\\e065\";\n}\n.glyphicon-share:before {\n  content: \"\\e066\";\n}\n.glyphicon-check:before {\n  content: \"\\e067\";\n}\n.glyphicon-move:before {\n  content: \"\\e068\";\n}\n.glyphicon-step-backward:before {\n  content: \"\\e069\";\n}\n.glyphicon-fast-backward:before {\n  content: \"\\e070\";\n}\n.glyphicon-backward:before {\n  content: \"\\e071\";\n}\n.glyphicon-play:before {\n  content: \"\\e072\";\n}\n.glyphicon-pause:before {\n  content: \"\\e073\";\n}\n.glyphicon-stop:before {\n  content: \"\\e074\";\n}\n.glyphicon-forward:before {\n  content: \"\\e075\";\n}\n.glyphicon-fast-forward:before {\n  content: \"\\e076\";\n}\n.glyphicon-step-forward:before {\n  content: \"\\e077\";\n}\n.glyphicon-eject:before {\n  content: \"\\e078\";\n}\n.glyphicon-chevron-left:before {\n  content: \"\\e079\";\n}\n.glyphicon-chevron-right:before {\n  content: \"\\e080\";\n}\n.glyphicon-plus-sign:before {\n  content: \"\\e081\";\n}\n.glyphicon-minus-sign:before {\n  content: \"\\e082\";\n}\n.glyphicon-remove-sign:before {\n  content: \"\\e083\";\n}\n.glyphicon-ok-sign:before {\n  content: \"\\e084\";\n}\n.glyphicon-question-sign:before {\n  content: \"\\e085\";\n}\n.glyphicon-info-sign:before {\n  content: \"\\e086\";\n}\n.glyphicon-screenshot:before {\n  content: \"\\e087\";\n}\n.glyphicon-remove-circle:before {\n  content: \"\\e088\";\n}\n.glyphicon-ok-circle:before {\n  content: \"\\e089\";\n}\n.glyphicon-ban-circle:before {\n  content: \"\\e090\";\n}\n.glyphicon-arrow-left:before {\n  content: \"\\e091\";\n}\n.glyphicon-arrow-right:before {\n  content: \"\\e092\";\n}\n.glyphicon-arrow-up:before {\n  content: \"\\e093\";\n}\n.glyphicon-arrow-down:before {\n  content: \"\\e094\";\n}\n.glyphicon-share-alt:before {\n  content: \"\\e095\";\n}\n.glyphicon-resize-full:before {\n  content: \"\\e096\";\n}\n.glyphicon-resize-small:before {\n  content: \"\\e097\";\n}\n.glyphicon-exclamation-sign:before {\n  content: \"\\e101\";\n}\n.glyphicon-gift:before {\n  content: \"\\e102\";\n}\n.glyphicon-leaf:before {\n  content: \"\\e103\";\n}\n.glyphicon-fire:before {\n  content: \"\\e104\";\n}\n.glyphicon-eye-open:before {\n  content: \"\\e105\";\n}\n.glyphicon-eye-close:before {\n  content: \"\\e106\";\n}\n.glyphicon-warning-sign:before {\n  content: \"\\e107\";\n}\n.glyphicon-plane:before {\n  content: \"\\e108\";\n}\n.glyphicon-calendar:before {\n  content: \"\\e109\";\n}\n.glyphicon-random:before {\n  content: \"\\e110\";\n}\n.glyphicon-comment:before {\n  content: \"\\e111\";\n}\n.glyphicon-magnet:before {\n  content: \"\\e112\";\n}\n.glyphicon-chevron-up:before {\n  content: \"\\e113\";\n}\n.glyphicon-chevron-down:before {\n  content: \"\\e114\";\n}\n.glyphicon-retweet:before {\n  content: \"\\e115\";\n}\n.glyphicon-shopping-cart:before {\n  content: \"\\e116\";\n}\n.glyphicon-folder-close:before {\n  content: \"\\e117\";\n}\n.glyphicon-folder-open:before {\n  content: \"\\e118\";\n}\n.glyphicon-resize-vertical:before {\n  content: \"\\e119\";\n}\n.glyphicon-resize-horizontal:before {\n  content: \"\\e120\";\n}\n.glyphicon-hdd:before {\n  content: \"\\e121\";\n}\n.glyphicon-bullhorn:before {\n  content: \"\\e122\";\n}\n.glyphicon-bell:before {\n  content: \"\\e123\";\n}\n.glyphicon-certificate:before {\n  content: \"\\e124\";\n}\n.glyphicon-thumbs-up:before {\n  content: \"\\e125\";\n}\n.glyphicon-thumbs-down:before {\n  content: \"\\e126\";\n}\n.glyphicon-hand-right:before {\n  content: \"\\e127\";\n}\n.glyphicon-hand-left:before {\n  content: \"\\e128\";\n}\n.glyphicon-hand-up:before {\n  content: \"\\e129\";\n}\n.glyphicon-hand-down:before {\n  content: \"\\e130\";\n}\n.glyphicon-circle-arrow-right:before {\n  content: \"\\e131\";\n}\n.glyphicon-circle-arrow-left:before {\n  content: \"\\e132\";\n}\n.glyphicon-circle-arrow-up:before {\n  content: \"\\e133\";\n}\n.glyphicon-circle-arrow-down:before {\n  content: \"\\e134\";\n}\n.glyphicon-globe:before {\n  content: \"\\e135\";\n}\n.glyphicon-wrench:before {\n  content: \"\\e136\";\n}\n.glyphicon-tasks:before {\n  content: \"\\e137\";\n}\n.glyphicon-filter:before {\n  content: \"\\e138\";\n}\n.glyphicon-briefcase:before {\n  content: \"\\e139\";\n}\n.glyphicon-fullscreen:before {\n  content: \"\\e140\";\n}\n.glyphicon-dashboard:before {\n  content: \"\\e141\";\n}\n.glyphicon-paperclip:before {\n  content: \"\\e142\";\n}\n.glyphicon-heart-empty:before {\n  content: \"\\e143\";\n}\n.glyphicon-link:before {\n  content: \"\\e144\";\n}\n.glyphicon-phone:before {\n  content: \"\\e145\";\n}\n.glyphicon-pushpin:before {\n  content: \"\\e146\";\n}\n.glyphicon-usd:before {\n  content: \"\\e148\";\n}\n.glyphicon-gbp:before {\n  content: \"\\e149\";\n}\n.glyphicon-sort:before {\n  content: \"\\e150\";\n}\n.glyphicon-sort-by-alphabet:before {\n  content: \"\\e151\";\n}\n.glyphicon-sort-by-alphabet-alt:before {\n  content: \"\\e152\";\n}\n.glyphicon-sort-by-order:before {\n  content: \"\\e153\";\n}\n.glyphicon-sort-by-order-alt:before {\n  content: \"\\e154\";\n}\n.glyphicon-sort-by-attributes:before {\n  content: \"\\e155\";\n}\n.glyphicon-sort-by-attributes-alt:before {\n  content: \"\\e156\";\n}\n.glyphicon-unchecked:before {\n  content: \"\\e157\";\n}\n.glyphicon-expand:before {\n  content: \"\\e158\";\n}\n.glyphicon-collapse-down:before {\n  content: \"\\e159\";\n}\n.glyphicon-collapse-up:before {\n  content: \"\\e160\";\n}\n.glyphicon-log-in:before {\n  content: \"\\e161\";\n}\n.glyphicon-flash:before {\n  content: \"\\e162\";\n}\n.glyphicon-log-out:before {\n  content: \"\\e163\";\n}\n.glyphicon-new-window:before {\n  content: \"\\e164\";\n}\n.glyphicon-record:before {\n  content: \"\\e165\";\n}\n.glyphicon-save:before {\n  content: \"\\e166\";\n}\n.glyphicon-open:before {\n  content: \"\\e167\";\n}\n.glyphicon-saved:before {\n  content: \"\\e168\";\n}\n.glyphicon-import:before {\n  content: \"\\e169\";\n}\n.glyphicon-export:before {\n  content: \"\\e170\";\n}\n.glyphicon-send:before {\n  content: \"\\e171\";\n}\n.glyphicon-floppy-disk:before {\n  content: \"\\e172\";\n}\n.glyphicon-floppy-saved:before {\n  content: \"\\e173\";\n}\n.glyphicon-floppy-remove:before {\n  content: \"\\e174\";\n}\n.glyphicon-floppy-save:before {\n  content: \"\\e175\";\n}\n.glyphicon-floppy-open:before {\n  content: \"\\e176\";\n}\n.glyphicon-credit-card:before {\n  content: \"\\e177\";\n}\n.glyphicon-transfer:before {\n  content: \"\\e178\";\n}\n.glyphicon-cutlery:before {\n  content: \"\\e179\";\n}\n.glyphicon-header:before {\n  content: \"\\e180\";\n}\n.glyphicon-compressed:before {\n  content: \"\\e181\";\n}\n.glyphicon-earphone:before {\n  content: \"\\e182\";\n}\n.glyphicon-phone-alt:before {\n  content: \"\\e183\";\n}\n.glyphicon-tower:before {\n  content: \"\\e184\";\n}\n.glyphicon-stats:before {\n  content: \"\\e185\";\n}\n.glyphicon-sd-video:before {\n  content: \"\\e186\";\n}\n.glyphicon-hd-video:before {\n  content: \"\\e187\";\n}\n.glyphicon-subtitles:before {\n  content: \"\\e188\";\n}\n.glyphicon-sound-stereo:before {\n  content: \"\\e189\";\n}\n.glyphicon-sound-dolby:before {\n  content: \"\\e190\";\n}\n.glyphicon-sound-5-1:before {\n  content: \"\\e191\";\n}\n.glyphicon-sound-6-1:before {\n  content: \"\\e192\";\n}\n.glyphicon-sound-7-1:before {\n  content: \"\\e193\";\n}\n.glyphicon-copyright-mark:before {\n  content: \"\\e194\";\n}\n.glyphicon-registration-mark:before {\n  content: \"\\e195\";\n}\n.glyphicon-cloud-download:before {\n  content: \"\\e197\";\n}\n.glyphicon-cloud-upload:before {\n  content: \"\\e198\";\n}\n.glyphicon-tree-conifer:before {\n  content: \"\\e199\";\n}\n.glyphicon-tree-deciduous:before {\n  content: \"\\e200\";\n}\n.caret {\n  display: inline-block;\n  width: 0;\n  height: 0;\n  margin-left: 2px;\n  vertical-align: middle;\n  border-top: 4px solid;\n  border-right: 4px solid transparent;\n  border-left: 4px solid transparent;\n}\n.dropdown {\n  position: relative;\n}\n.dropdown-toggle:focus {\n  outline: 0;\n}\n.dropdown-menu {\n  position: absolute;\n  top: 100%;\n  left: 0;\n  z-index: 1000;\n  display: none;\n  float: left;\n  min-width: 160px;\n  padding: 5px 0;\n  margin: 2px 0 0;\n  font-size: 14px;\n  list-style: none;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, .15);\n  border-radius: 4px;\n  -webkit-box-shadow: 0 6px 12px rgba(0, 0, 0, .175);\n          box-shadow: 0 6px 12px rgba(0, 0, 0, .175);\n}\n.dropdown-menu.pull-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu .divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.dropdown-menu > li > a {\n  display: block;\n  padding: 3px 20px;\n  clear: both;\n  font-weight: normal;\n  line-height: 1.428571429;\n  color: #333;\n  white-space: nowrap;\n}\n.dropdown-menu > li > a:hover,\n.dropdown-menu > li > a:focus {\n  color: #262626;\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\n.dropdown-menu > .active > a,\n.dropdown-menu > .active > a:hover,\n.dropdown-menu > .active > a:focus {\n  color: #fff;\n  text-decoration: none;\n  background-color: #428bca;\n  outline: 0;\n}\n.dropdown-menu > .disabled > a,\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  color: #999;\n}\n.dropdown-menu > .disabled > a:hover,\n.dropdown-menu > .disabled > a:focus {\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n  background-image: none;\n  filter: progid:DXImageTransform.Microsoft.gradient(enabled = false);\n}\n.open > .dropdown-menu {\n  display: block;\n}\n.open > a {\n  outline: 0;\n}\n.dropdown-menu-right {\n  right: 0;\n  left: auto;\n}\n.dropdown-menu-left {\n  right: auto;\n  left: 0;\n}\n.dropdown-header {\n  display: block;\n  padding: 3px 20px;\n  font-size: 12px;\n  line-height: 1.428571429;\n  color: #999;\n}\n.dropdown-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 990;\n}\n.pull-right > .dropdown-menu {\n  right: 0;\n  left: auto;\n}\n.dropup .caret,\n.navbar-fixed-bottom .dropdown .caret {\n  content: \"\";\n  border-top: 0;\n  border-bottom: 4px solid;\n}\n.dropup .dropdown-menu,\n.navbar-fixed-bottom .dropdown .dropdown-menu {\n  top: auto;\n  bottom: 100%;\n  margin-bottom: 1px;\n}\n@media (min-width: 768px) {\n  .navbar-right .dropdown-menu {\n    right: 0;\n    left: auto;\n  }\n  .navbar-right .dropdown-menu-left {\n    right: auto;\n    left: 0;\n  }\n}\n.btn-group,\n.btn-group-vertical {\n  position: relative;\n  display: inline-block;\n  vertical-align: middle;\n}\n.btn-group > .btn,\n.btn-group-vertical > .btn {\n  position: relative;\n  float: left;\n}\n.btn-group > .btn:hover,\n.btn-group-vertical > .btn:hover,\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus,\n.btn-group > .btn:active,\n.btn-group-vertical > .btn:active,\n.btn-group > .btn.active,\n.btn-group-vertical > .btn.active {\n  z-index: 2;\n}\n.btn-group > .btn:focus,\n.btn-group-vertical > .btn:focus {\n  outline: none;\n}\n.btn-group .btn + .btn,\n.btn-group .btn + .btn-group,\n.btn-group .btn-group + .btn,\n.btn-group .btn-group + .btn-group {\n  margin-left: -1px;\n}\n.btn-toolbar {\n  margin-left: -5px;\n}\n.btn-toolbar .btn-group,\n.btn-toolbar .input-group {\n  float: left;\n}\n.btn-toolbar > .btn,\n.btn-toolbar > .btn-group,\n.btn-toolbar > .input-group {\n  margin-left: 5px;\n}\n.btn-group > .btn:not(:first-child):not(:last-child):not(.dropdown-toggle) {\n  border-radius: 0;\n}\n.btn-group > .btn:first-child {\n  margin-left: 0;\n}\n.btn-group > .btn:first-child:not(:last-child):not(.dropdown-toggle) {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn:last-child:not(:first-child),\n.btn-group > .dropdown-toggle:not(:first-child) {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group > .btn-group {\n  float: left;\n}\n.btn-group > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group > .btn-group:first-child > .btn:last-child,\n.btn-group > .btn-group:first-child > .dropdown-toggle {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.btn-group > .btn-group:last-child > .btn:first-child {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group .dropdown-toggle:active,\n.btn-group.open .dropdown-toggle {\n  outline: 0;\n}\n.btn-group-xs > .btn {\n  padding: 1px 5px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-group-sm > .btn {\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\n.btn-group-lg > .btn {\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.33;\n  border-radius: 6px;\n}\n.btn-group > .btn + .dropdown-toggle {\n  padding-right: 8px;\n  padding-left: 8px;\n}\n.btn-group > .btn-lg + .dropdown-toggle {\n  padding-right: 12px;\n  padding-left: 12px;\n}\n.btn-group.open .dropdown-toggle {\n  -webkit-box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n          box-shadow: inset 0 3px 5px rgba(0, 0, 0, .125);\n}\n.btn-group.open .dropdown-toggle.btn-link {\n  -webkit-box-shadow: none;\n          box-shadow: none;\n}\n.btn .caret {\n  margin-left: 0;\n}\n.btn-lg .caret {\n  border-width: 5px 5px 0;\n  border-bottom-width: 0;\n}\n.dropup .btn-lg .caret {\n  border-width: 0 5px 5px;\n}\n.btn-group-vertical > .btn,\n.btn-group-vertical > .btn-group,\n.btn-group-vertical > .btn-group > .btn {\n  display: block;\n  float: none;\n  width: 100%;\n  max-width: 100%;\n}\n.btn-group-vertical > .btn-group > .btn {\n  float: none;\n}\n.btn-group-vertical > .btn + .btn,\n.btn-group-vertical > .btn + .btn-group,\n.btn-group-vertical > .btn-group + .btn,\n.btn-group-vertical > .btn-group + .btn-group {\n  margin-top: -1px;\n  margin-left: 0;\n}\n.btn-group-vertical > .btn:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn:first-child:not(:last-child) {\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn:last-child:not(:first-child) {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n  border-bottom-left-radius: 4px;\n}\n.btn-group-vertical > .btn-group:not(:first-child):not(:last-child) > .btn {\n  border-radius: 0;\n}\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .btn:last-child,\n.btn-group-vertical > .btn-group:first-child:not(:last-child) > .dropdown-toggle {\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.btn-group-vertical > .btn-group:last-child:not(:first-child) > .btn:first-child {\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.btn-group-justified {\n  display: table;\n  width: 100%;\n  table-layout: fixed;\n  border-collapse: separate;\n}\n.btn-group-justified > .btn,\n.btn-group-justified > .btn-group {\n  display: table-cell;\n  float: none;\n  width: 1%;\n}\n.btn-group-justified > .btn-group .btn {\n  width: 100%;\n}\n[data-toggle=\"buttons\"] > .btn > input[type=\"radio\"],\n[data-toggle=\"buttons\"] > .btn > input[type=\"checkbox\"] {\n  display: none;\n}\n.input-group {\n  position: relative;\n  display: table;\n  border-collapse: separate;\n}\n.input-group[class*=\"col-\"] {\n  float: none;\n  padding-right: 0;\n  padding-left: 0;\n}\n.input-group .form-control {\n  float: left;\n  width: 100%;\n  margin-bottom: 0;\n}\n.input-group-lg > .form-control,\n.input-group-lg > .input-group-addon,\n.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  padding: 10px 16px;\n  font-size: 18px;\n  line-height: 1.33;\n  border-radius: 6px;\n}\nselect.input-group-lg > .form-control,\nselect.input-group-lg > .input-group-addon,\nselect.input-group-lg > .input-group-btn > .btn {\n  height: 46px;\n  line-height: 46px;\n}\ntextarea.input-group-lg > .form-control,\ntextarea.input-group-lg > .input-group-addon,\ntextarea.input-group-lg > .input-group-btn > .btn,\nselect[multiple].input-group-lg > .form-control,\nselect[multiple].input-group-lg > .input-group-addon,\nselect[multiple].input-group-lg > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-sm > .form-control,\n.input-group-sm > .input-group-addon,\n.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  padding: 5px 10px;\n  font-size: 12px;\n  line-height: 1.5;\n  border-radius: 3px;\n}\nselect.input-group-sm > .form-control,\nselect.input-group-sm > .input-group-addon,\nselect.input-group-sm > .input-group-btn > .btn {\n  height: 30px;\n  line-height: 30px;\n}\ntextarea.input-group-sm > .form-control,\ntextarea.input-group-sm > .input-group-addon,\ntextarea.input-group-sm > .input-group-btn > .btn,\nselect[multiple].input-group-sm > .form-control,\nselect[multiple].input-group-sm > .input-group-addon,\nselect[multiple].input-group-sm > .input-group-btn > .btn {\n  height: auto;\n}\n.input-group-addon,\n.input-group-btn,\n.input-group .form-control {\n  display: table-cell;\n}\n.input-group-addon:not(:first-child):not(:last-child),\n.input-group-btn:not(:first-child):not(:last-child),\n.input-group .form-control:not(:first-child):not(:last-child) {\n  border-radius: 0;\n}\n.input-group-addon,\n.input-group-btn {\n  width: 1%;\n  white-space: nowrap;\n  vertical-align: middle;\n}\n.input-group-addon {\n  padding: 6px 12px;\n  font-size: 14px;\n  font-weight: normal;\n  line-height: 1;\n  color: #555;\n  text-align: center;\n  background-color: #eee;\n  border: 1px solid #ccc;\n  border-radius: 4px;\n}\n.input-group-addon.input-sm {\n  padding: 5px 10px;\n  font-size: 12px;\n  border-radius: 3px;\n}\n.input-group-addon.input-lg {\n  padding: 10px 16px;\n  font-size: 18px;\n  border-radius: 6px;\n}\n.input-group-addon input[type=\"radio\"],\n.input-group-addon input[type=\"checkbox\"] {\n  margin-top: 0;\n}\n.input-group .form-control:first-child,\n.input-group-addon:first-child,\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group > .btn,\n.input-group-btn:first-child > .dropdown-toggle,\n.input-group-btn:last-child > .btn:not(:last-child):not(.dropdown-toggle),\n.input-group-btn:last-child > .btn-group:not(:last-child) > .btn {\n  border-top-right-radius: 0;\n  border-bottom-right-radius: 0;\n}\n.input-group-addon:first-child {\n  border-right: 0;\n}\n.input-group .form-control:last-child,\n.input-group-addon:last-child,\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group > .btn,\n.input-group-btn:last-child > .dropdown-toggle,\n.input-group-btn:first-child > .btn:not(:first-child),\n.input-group-btn:first-child > .btn-group:not(:first-child) > .btn {\n  border-top-left-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.input-group-addon:last-child {\n  border-left: 0;\n}\n.input-group-btn {\n  position: relative;\n  font-size: 0;\n  white-space: nowrap;\n}\n.input-group-btn > .btn {\n  position: relative;\n}\n.input-group-btn > .btn + .btn {\n  margin-left: -1px;\n}\n.input-group-btn > .btn:hover,\n.input-group-btn > .btn:focus,\n.input-group-btn > .btn:active {\n  z-index: 2;\n}\n.input-group-btn:first-child > .btn,\n.input-group-btn:first-child > .btn-group {\n  margin-right: -1px;\n}\n.input-group-btn:last-child > .btn,\n.input-group-btn:last-child > .btn-group {\n  margin-left: -1px;\n}\n.nav {\n  padding-left: 0;\n  margin-bottom: 0;\n  list-style: none;\n}\n.nav > li {\n  position: relative;\n  display: block;\n}\n.nav > li > a {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n}\n.nav > li > a:hover,\n.nav > li > a:focus {\n  text-decoration: none;\n  background-color: #eee;\n}\n.nav > li.disabled > a {\n  color: #999;\n}\n.nav > li.disabled > a:hover,\n.nav > li.disabled > a:focus {\n  color: #999;\n  text-decoration: none;\n  cursor: not-allowed;\n  background-color: transparent;\n}\n.nav .open > a,\n.nav .open > a:hover,\n.nav .open > a:focus {\n  background-color: #eee;\n  border-color: #428bca;\n}\n.nav .nav-divider {\n  height: 1px;\n  margin: 9px 0;\n  overflow: hidden;\n  background-color: #e5e5e5;\n}\n.nav > li > a > img {\n  max-width: none;\n}\n.nav-tabs {\n  border-bottom: 1px solid #ddd;\n}\n.nav-tabs > li {\n  float: left;\n  margin-bottom: -1px;\n}\n.nav-tabs > li > a {\n  margin-right: 2px;\n  line-height: 1.428571429;\n  border: 1px solid transparent;\n  border-radius: 4px 4px 0 0;\n}\n.nav-tabs > li > a:hover {\n  border-color: #eee #eee #ddd;\n}\n.nav-tabs > li.active > a,\n.nav-tabs > li.active > a:hover,\n.nav-tabs > li.active > a:focus {\n  color: #555;\n  cursor: default;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-bottom-color: transparent;\n}\n.nav-tabs.nav-justified {\n  width: 100%;\n  border-bottom: 0;\n}\n.nav-tabs.nav-justified > li {\n  float: none;\n}\n.nav-tabs.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-tabs.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-tabs.nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs.nav-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs.nav-justified > .active > a,\n.nav-tabs.nav-justified > .active > a:hover,\n.nav-tabs.nav-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs.nav-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs.nav-justified > .active > a,\n  .nav-tabs.nav-justified > .active > a:hover,\n  .nav-tabs.nav-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.nav-pills > li {\n  float: left;\n}\n.nav-pills > li > a {\n  border-radius: 4px;\n}\n.nav-pills > li + li {\n  margin-left: 2px;\n}\n.nav-pills > li.active > a,\n.nav-pills > li.active > a:hover,\n.nav-pills > li.active > a:focus {\n  color: #fff;\n  background-color: #428bca;\n}\n.nav-stacked > li {\n  float: none;\n}\n.nav-stacked > li + li {\n  margin-top: 2px;\n  margin-left: 0;\n}\n.nav-justified {\n  width: 100%;\n}\n.nav-justified > li {\n  float: none;\n}\n.nav-justified > li > a {\n  margin-bottom: 5px;\n  text-align: center;\n}\n.nav-justified > .dropdown .dropdown-menu {\n  top: auto;\n  left: auto;\n}\n@media (min-width: 768px) {\n  .nav-justified > li {\n    display: table-cell;\n    width: 1%;\n  }\n  .nav-justified > li > a {\n    margin-bottom: 0;\n  }\n}\n.nav-tabs-justified {\n  border-bottom: 0;\n}\n.nav-tabs-justified > li > a {\n  margin-right: 0;\n  border-radius: 4px;\n}\n.nav-tabs-justified > .active > a,\n.nav-tabs-justified > .active > a:hover,\n.nav-tabs-justified > .active > a:focus {\n  border: 1px solid #ddd;\n}\n@media (min-width: 768px) {\n  .nav-tabs-justified > li > a {\n    border-bottom: 1px solid #ddd;\n    border-radius: 4px 4px 0 0;\n  }\n  .nav-tabs-justified > .active > a,\n  .nav-tabs-justified > .active > a:hover,\n  .nav-tabs-justified > .active > a:focus {\n    border-bottom-color: #fff;\n  }\n}\n.tab-content > .tab-pane {\n  display: none;\n}\n.tab-content > .active {\n  display: block;\n}\n.nav-tabs .dropdown-menu {\n  margin-top: -1px;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar {\n  position: relative;\n  min-height: 50px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n}\n@media (min-width: 768px) {\n  .navbar {\n    border-radius: 4px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-header {\n    float: left;\n  }\n}\n.navbar-collapse {\n  max-height: 340px;\n  padding-right: 15px;\n  padding-left: 15px;\n  overflow-x: visible;\n  -webkit-overflow-scrolling: touch;\n  border-top: 1px solid transparent;\n  box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1);\n}\n.navbar-collapse.in {\n  overflow-y: auto;\n}\n@media (min-width: 768px) {\n  .navbar-collapse {\n    width: auto;\n    border-top: 0;\n    box-shadow: none;\n  }\n  .navbar-collapse.collapse {\n    display: block !important;\n    height: auto !important;\n    padding-bottom: 0;\n    overflow: visible !important;\n  }\n  .navbar-collapse.in {\n    overflow-y: visible;\n  }\n  .navbar-fixed-top .navbar-collapse,\n  .navbar-static-top .navbar-collapse,\n  .navbar-fixed-bottom .navbar-collapse {\n    padding-right: 0;\n    padding-left: 0;\n  }\n}\n.container > .navbar-header,\n.container-fluid > .navbar-header,\n.container > .navbar-collapse,\n.container-fluid > .navbar-collapse {\n  margin-right: -15px;\n  margin-left: -15px;\n}\n@media (min-width: 768px) {\n  .container > .navbar-header,\n  .container-fluid > .navbar-header,\n  .container > .navbar-collapse,\n  .container-fluid > .navbar-collapse {\n    margin-right: 0;\n    margin-left: 0;\n  }\n}\n.navbar-static-top {\n  z-index: 1000;\n  border-width: 0 0 1px;\n}\n@media (min-width: 768px) {\n  .navbar-static-top {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top,\n.navbar-fixed-bottom {\n  position: fixed;\n  right: 0;\n  left: 0;\n  z-index: 1030;\n}\n@media (min-width: 768px) {\n  .navbar-fixed-top,\n  .navbar-fixed-bottom {\n    border-radius: 0;\n  }\n}\n.navbar-fixed-top {\n  top: 0;\n  border-width: 0 0 1px;\n}\n.navbar-fixed-bottom {\n  bottom: 0;\n  margin-bottom: 0;\n  border-width: 1px 0 0;\n}\n.navbar-brand {\n  float: left;\n  height: 20px;\n  padding: 15px 15px;\n  font-size: 18px;\n  line-height: 20px;\n}\n.navbar-brand:hover,\n.navbar-brand:focus {\n  text-decoration: none;\n}\n@media (min-width: 768px) {\n  .navbar > .container .navbar-brand,\n  .navbar > .container-fluid .navbar-brand {\n    margin-left: -15px;\n  }\n}\n.navbar-toggle {\n  position: relative;\n  float: right;\n  padding: 9px 10px;\n  margin-top: 8px;\n  margin-right: 15px;\n  margin-bottom: 8px;\n  background-color: transparent;\n  background-image: none;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.navbar-toggle:focus {\n  outline: none;\n}\n.navbar-toggle .icon-bar {\n  display: block;\n  width: 22px;\n  height: 2px;\n  border-radius: 1px;\n}\n.navbar-toggle .icon-bar + .icon-bar {\n  margin-top: 4px;\n}\n@media (min-width: 768px) {\n  .navbar-toggle {\n    display: none;\n  }\n}\n.navbar-nav {\n  margin: 7.5px -15px;\n}\n.navbar-nav > li > a {\n  padding-top: 10px;\n  padding-bottom: 10px;\n  line-height: 20px;\n}\n@media (max-width: 767px) {\n  .navbar-nav .open .dropdown-menu {\n    position: static;\n    float: none;\n    width: auto;\n    margin-top: 0;\n    background-color: transparent;\n    border: 0;\n    box-shadow: none;\n  }\n  .navbar-nav .open .dropdown-menu > li > a,\n  .navbar-nav .open .dropdown-menu .dropdown-header {\n    padding: 5px 15px 5px 25px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a {\n    line-height: 20px;\n  }\n  .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-nav .open .dropdown-menu > li > a:focus {\n    background-image: none;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-nav {\n    float: left;\n    margin: 0;\n  }\n  .navbar-nav > li {\n    float: left;\n  }\n  .navbar-nav > li > a {\n    padding-top: 15px;\n    padding-bottom: 15px;\n  }\n  .navbar-nav.navbar-right:last-child {\n    margin-right: -15px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-left {\n    float: left !important;\n  }\n  .navbar-right {\n    float: right !important;\n  }\n}\n.navbar-form {\n  padding: 10px 15px;\n  margin-top: 8px;\n  margin-right: -15px;\n  margin-bottom: 8px;\n  margin-left: -15px;\n  border-top: 1px solid transparent;\n  border-bottom: 1px solid transparent;\n  -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n          box-shadow: inset 0 1px 0 rgba(255, 255, 255, .1), 0 1px 0 rgba(255, 255, 255, .1);\n}\n@media (min-width: 768px) {\n  .navbar-form .form-group {\n    display: inline-block;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .form-control {\n    display: inline-block;\n    width: auto;\n    vertical-align: middle;\n  }\n  .navbar-form .control-label {\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio,\n  .navbar-form .checkbox {\n    display: inline-block;\n    padding-left: 0;\n    margin-top: 0;\n    margin-bottom: 0;\n    vertical-align: middle;\n  }\n  .navbar-form .radio input[type=\"radio\"],\n  .navbar-form .checkbox input[type=\"checkbox\"] {\n    float: none;\n    margin-left: 0;\n  }\n  .navbar-form .has-feedback .form-control-feedback {\n    top: 0;\n  }\n}\n@media (max-width: 767px) {\n  .navbar-form .form-group {\n    margin-bottom: 5px;\n  }\n}\n@media (min-width: 768px) {\n  .navbar-form {\n    width: auto;\n    padding-top: 0;\n    padding-bottom: 0;\n    margin-right: 0;\n    margin-left: 0;\n    border: 0;\n    -webkit-box-shadow: none;\n            box-shadow: none;\n  }\n  .navbar-form.navbar-right:last-child {\n    margin-right: -15px;\n  }\n}\n.navbar-nav > li > .dropdown-menu {\n  margin-top: 0;\n  border-top-left-radius: 0;\n  border-top-right-radius: 0;\n}\n.navbar-fixed-bottom .navbar-nav > li > .dropdown-menu {\n  border-bottom-right-radius: 0;\n  border-bottom-left-radius: 0;\n}\n.navbar-btn {\n  margin-top: 8px;\n  margin-bottom: 8px;\n}\n.navbar-btn.btn-sm {\n  margin-top: 10px;\n  margin-bottom: 10px;\n}\n.navbar-btn.btn-xs {\n  margin-top: 14px;\n  margin-bottom: 14px;\n}\n.navbar-text {\n  margin-top: 15px;\n  margin-bottom: 15px;\n}\n@media (min-width: 768px) {\n  .navbar-text {\n    float: left;\n    margin-right: 15px;\n    margin-left: 15px;\n  }\n  .navbar-text.navbar-right:last-child {\n    margin-right: 0;\n  }\n}\n.navbar-default {\n  background-color: #f8f8f8;\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-brand {\n  color: #777;\n}\n.navbar-default .navbar-brand:hover,\n.navbar-default .navbar-brand:focus {\n  color: #5e5e5e;\n  background-color: transparent;\n}\n.navbar-default .navbar-text {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a {\n  color: #777;\n}\n.navbar-default .navbar-nav > li > a:hover,\n.navbar-default .navbar-nav > li > a:focus {\n  color: #333;\n  background-color: transparent;\n}\n.navbar-default .navbar-nav > .active > a,\n.navbar-default .navbar-nav > .active > a:hover,\n.navbar-default .navbar-nav > .active > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .disabled > a,\n.navbar-default .navbar-nav > .disabled > a:hover,\n.navbar-default .navbar-nav > .disabled > a:focus {\n  color: #ccc;\n  background-color: transparent;\n}\n.navbar-default .navbar-toggle {\n  border-color: #ddd;\n}\n.navbar-default .navbar-toggle:hover,\n.navbar-default .navbar-toggle:focus {\n  background-color: #ddd;\n}\n.navbar-default .navbar-toggle .icon-bar {\n  background-color: #888;\n}\n.navbar-default .navbar-collapse,\n.navbar-default .navbar-form {\n  border-color: #e7e7e7;\n}\n.navbar-default .navbar-nav > .open > a,\n.navbar-default .navbar-nav > .open > a:hover,\n.navbar-default .navbar-nav > .open > a:focus {\n  color: #555;\n  background-color: #e7e7e7;\n}\n@media (max-width: 767px) {\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a {\n    color: #777;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #333;\n    background-color: transparent;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #555;\n    background-color: #e7e7e7;\n  }\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-default .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #ccc;\n    background-color: transparent;\n  }\n}\n.navbar-default .navbar-link {\n  color: #777;\n}\n.navbar-default .navbar-link:hover {\n  color: #333;\n}\n.navbar-inverse {\n  background-color: #222;\n  border-color: #080808;\n}\n.navbar-inverse .navbar-brand {\n  color: #999;\n}\n.navbar-inverse .navbar-brand:hover,\n.navbar-inverse .navbar-brand:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-text {\n  color: #999;\n}\n.navbar-inverse .navbar-nav > li > a {\n  color: #999;\n}\n.navbar-inverse .navbar-nav > li > a:hover,\n.navbar-inverse .navbar-nav > li > a:focus {\n  color: #fff;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-nav > .active > a,\n.navbar-inverse .navbar-nav > .active > a:hover,\n.navbar-inverse .navbar-nav > .active > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n.navbar-inverse .navbar-nav > .disabled > a,\n.navbar-inverse .navbar-nav > .disabled > a:hover,\n.navbar-inverse .navbar-nav > .disabled > a:focus {\n  color: #444;\n  background-color: transparent;\n}\n.navbar-inverse .navbar-toggle {\n  border-color: #333;\n}\n.navbar-inverse .navbar-toggle:hover,\n.navbar-inverse .navbar-toggle:focus {\n  background-color: #333;\n}\n.navbar-inverse .navbar-toggle .icon-bar {\n  background-color: #fff;\n}\n.navbar-inverse .navbar-collapse,\n.navbar-inverse .navbar-form {\n  border-color: #101010;\n}\n.navbar-inverse .navbar-nav > .open > a,\n.navbar-inverse .navbar-nav > .open > a:hover,\n.navbar-inverse .navbar-nav > .open > a:focus {\n  color: #fff;\n  background-color: #080808;\n}\n@media (max-width: 767px) {\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .dropdown-header {\n    border-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu .divider {\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a {\n    color: #999;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > li > a:focus {\n    color: #fff;\n    background-color: transparent;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .active > a:focus {\n    color: #fff;\n    background-color: #080808;\n  }\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:hover,\n  .navbar-inverse .navbar-nav .open .dropdown-menu > .disabled > a:focus {\n    color: #444;\n    background-color: transparent;\n  }\n}\n.navbar-inverse .navbar-link {\n  color: #999;\n}\n.navbar-inverse .navbar-link:hover {\n  color: #fff;\n}\n.breadcrumb {\n  padding: 8px 15px;\n  margin-bottom: 20px;\n  list-style: none;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n}\n.breadcrumb > li {\n  display: inline-block;\n}\n.breadcrumb > li + li:before {\n  padding: 0 5px;\n  color: #ccc;\n  content: \"/\\00a0\";\n}\n.breadcrumb > .active {\n  color: #999;\n}\n.pagination {\n  display: inline-block;\n  padding-left: 0;\n  margin: 20px 0;\n  border-radius: 4px;\n}\n.pagination > li {\n  display: inline;\n}\n.pagination > li > a,\n.pagination > li > span {\n  position: relative;\n  float: left;\n  padding: 6px 12px;\n  margin-left: -1px;\n  line-height: 1.428571429;\n  color: #428bca;\n  text-decoration: none;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.pagination > li:first-child > a,\n.pagination > li:first-child > span {\n  margin-left: 0;\n  border-top-left-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.pagination > li:last-child > a,\n.pagination > li:last-child > span {\n  border-top-right-radius: 4px;\n  border-bottom-right-radius: 4px;\n}\n.pagination > li > a:hover,\n.pagination > li > span:hover,\n.pagination > li > a:focus,\n.pagination > li > span:focus {\n  color: #2a6496;\n  background-color: #eee;\n  border-color: #ddd;\n}\n.pagination > .active > a,\n.pagination > .active > span,\n.pagination > .active > a:hover,\n.pagination > .active > span:hover,\n.pagination > .active > a:focus,\n.pagination > .active > span:focus {\n  z-index: 2;\n  color: #fff;\n  cursor: default;\n  background-color: #428bca;\n  border-color: #428bca;\n}\n.pagination > .disabled > span,\n.pagination > .disabled > span:hover,\n.pagination > .disabled > span:focus,\n.pagination > .disabled > a,\n.pagination > .disabled > a:hover,\n.pagination > .disabled > a:focus {\n  color: #999;\n  cursor: not-allowed;\n  background-color: #fff;\n  border-color: #ddd;\n}\n.pagination-lg > li > a,\n.pagination-lg > li > span {\n  padding: 10px 16px;\n  font-size: 18px;\n}\n.pagination-lg > li:first-child > a,\n.pagination-lg > li:first-child > span {\n  border-top-left-radius: 6px;\n  border-bottom-left-radius: 6px;\n}\n.pagination-lg > li:last-child > a,\n.pagination-lg > li:last-child > span {\n  border-top-right-radius: 6px;\n  border-bottom-right-radius: 6px;\n}\n.pagination-sm > li > a,\n.pagination-sm > li > span {\n  padding: 5px 10px;\n  font-size: 12px;\n}\n.pagination-sm > li:first-child > a,\n.pagination-sm > li:first-child > span {\n  border-top-left-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.pagination-sm > li:last-child > a,\n.pagination-sm > li:last-child > span {\n  border-top-right-radius: 3px;\n  border-bottom-right-radius: 3px;\n}\n.pager {\n  padding-left: 0;\n  margin: 20px 0;\n  text-align: center;\n  list-style: none;\n}\n.pager li {\n  display: inline;\n}\n.pager li > a,\n.pager li > span {\n  display: inline-block;\n  padding: 5px 14px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 15px;\n}\n.pager li > a:hover,\n.pager li > a:focus {\n  text-decoration: none;\n  background-color: #eee;\n}\n.pager .next > a,\n.pager .next > span {\n  float: right;\n}\n.pager .previous > a,\n.pager .previous > span {\n  float: left;\n}\n.pager .disabled > a,\n.pager .disabled > a:hover,\n.pager .disabled > a:focus,\n.pager .disabled > span {\n  color: #999;\n  cursor: not-allowed;\n  background-color: #fff;\n}\n.label {\n  display: inline;\n  padding: .2em .6em .3em;\n  font-size: 75%;\n  font-weight: bold;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  border-radius: .25em;\n}\n.label[href]:hover,\n.label[href]:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\n.label:empty {\n  display: none;\n}\n.btn .label {\n  position: relative;\n  top: -1px;\n}\n.label-default {\n  background-color: #999;\n}\n.label-default[href]:hover,\n.label-default[href]:focus {\n  background-color: #808080;\n}\n.label-primary {\n  background-color: #428bca;\n}\n.label-primary[href]:hover,\n.label-primary[href]:focus {\n  background-color: #3071a9;\n}\n.label-success {\n  background-color: #5cb85c;\n}\n.label-success[href]:hover,\n.label-success[href]:focus {\n  background-color: #449d44;\n}\n.label-info {\n  background-color: #5bc0de;\n}\n.label-info[href]:hover,\n.label-info[href]:focus {\n  background-color: #31b0d5;\n}\n.label-warning {\n  background-color: #f0ad4e;\n}\n.label-warning[href]:hover,\n.label-warning[href]:focus {\n  background-color: #ec971f;\n}\n.label-danger {\n  background-color: #d9534f;\n}\n.label-danger[href]:hover,\n.label-danger[href]:focus {\n  background-color: #c9302c;\n}\n.badge {\n  display: inline-block;\n  min-width: 10px;\n  padding: 3px 7px;\n  font-size: 12px;\n  font-weight: bold;\n  line-height: 1;\n  color: #fff;\n  text-align: center;\n  white-space: nowrap;\n  vertical-align: baseline;\n  background-color: #999;\n  border-radius: 10px;\n}\n.badge:empty {\n  display: none;\n}\n.btn .badge {\n  position: relative;\n  top: -1px;\n}\n.btn-xs .badge {\n  top: 0;\n  padding: 1px 5px;\n}\na.badge:hover,\na.badge:focus {\n  color: #fff;\n  text-decoration: none;\n  cursor: pointer;\n}\na.list-group-item.active > .badge,\n.nav-pills > .active > a > .badge {\n  color: #428bca;\n  background-color: #fff;\n}\n.nav-pills > li > a > .badge {\n  margin-left: 3px;\n}\n.jumbotron {\n  padding: 30px;\n  margin-bottom: 30px;\n  color: inherit;\n  background-color: #eee;\n}\n.jumbotron h1,\n.jumbotron .h1 {\n  color: inherit;\n}\n.jumbotron p {\n  margin-bottom: 15px;\n  font-size: 21px;\n  font-weight: 200;\n}\n.container .jumbotron {\n  border-radius: 6px;\n}\n.jumbotron .container {\n  max-width: 100%;\n}\n@media screen and (min-width: 768px) {\n  .jumbotron {\n    padding-top: 48px;\n    padding-bottom: 48px;\n  }\n  .container .jumbotron {\n    padding-right: 60px;\n    padding-left: 60px;\n  }\n  .jumbotron h1,\n  .jumbotron .h1 {\n    font-size: 63px;\n  }\n}\n.thumbnail {\n  display: block;\n  padding: 4px;\n  margin-bottom: 20px;\n  line-height: 1.428571429;\n  background-color: #fff;\n  border: 1px solid #ddd;\n  border-radius: 4px;\n  -webkit-transition: all .2s ease-in-out;\n          transition: all .2s ease-in-out;\n}\n.thumbnail > img,\n.thumbnail a > img {\n  display: block;\n  max-width: 100%;\n  height: auto;\n  margin-right: auto;\n  margin-left: auto;\n}\na.thumbnail:hover,\na.thumbnail:focus,\na.thumbnail.active {\n  border-color: #428bca;\n}\n.thumbnail .caption {\n  padding: 9px;\n  color: #333;\n}\n.alert {\n  padding: 15px;\n  margin-bottom: 20px;\n  border: 1px solid transparent;\n  border-radius: 4px;\n}\n.alert h4 {\n  margin-top: 0;\n  color: inherit;\n}\n.alert .alert-link {\n  font-weight: bold;\n}\n.alert > p,\n.alert > ul {\n  margin-bottom: 0;\n}\n.alert > p + p {\n  margin-top: 5px;\n}\n.alert-dismissable {\n  padding-right: 35px;\n}\n.alert-dismissable .close {\n  position: relative;\n  top: -2px;\n  right: -21px;\n  color: inherit;\n}\n.alert-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.alert-success hr {\n  border-top-color: #c9e2b3;\n}\n.alert-success .alert-link {\n  color: #2b542c;\n}\n.alert-info {\n  color: #31708f;\n  background-color: #edf6fa;\n  border-color: #bce8f1;\n}\n.alert-info hr {\n  border-top-color: #a6e1ec;\n}\n.alert-info .alert-link {\n  color: #245269;\n  font-weight: bold;\n}\n.alert-info a {\n  font-weight: bold;\n}\n.alert-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.alert-warning hr {\n  border-top-color: #f7e1b5;\n}\n.alert-warning .alert-link {\n  color: #66512c;\n}\n.alert-danger {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.alert-danger hr {\n  border-top-color: #e4b9c0;\n}\n.alert-danger .alert-link {\n  color: #843534;\n}\n@-webkit-keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n@keyframes progress-bar-stripes {\n  from {\n    background-position: 40px 0;\n  }\n  to {\n    background-position: 0 0;\n  }\n}\n.progress {\n  height: 20px;\n  margin-bottom: 20px;\n  overflow: hidden;\n  background-color: #f5f5f5;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);\n          box-shadow: inset 0 1px 2px rgba(0, 0, 0, .1);\n}\n.progress-bar {\n  float: left;\n  width: 0;\n  height: 100%;\n  font-size: 12px;\n  line-height: 20px;\n  color: #fff;\n  text-align: center;\n  background-color: #428bca;\n  -webkit-box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);\n          box-shadow: inset 0 -1px 0 rgba(0, 0, 0, .15);\n  -webkit-transition: width .6s ease;\n          transition: width .6s ease;\n}\n.progress-striped .progress-bar {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-size: 40px 40px;\n}\n.progress.active .progress-bar {\n  -webkit-animation: progress-bar-stripes 2s linear infinite;\n          animation: progress-bar-stripes 2s linear infinite;\n}\n.progress-bar-success {\n  background-color: #5cb85c;\n}\n.progress-striped .progress-bar-success {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-info {\n  background-color: #5bc0de;\n}\n.progress-striped .progress-bar-info {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-warning {\n  background-color: #f0ad4e;\n}\n.progress-striped .progress-bar-warning {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.progress-bar-danger {\n  background-color: #d9534f;\n}\n.progress-striped .progress-bar-danger {\n  background-image: -webkit-linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n  background-image:         linear-gradient(45deg, rgba(255, 255, 255, .15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, .15) 50%, rgba(255, 255, 255, .15) 75%, transparent 75%, transparent);\n}\n.media,\n.media-body {\n  overflow: hidden;\n  zoom: 1;\n}\n.media,\n.media .media {\n  margin-top: 15px;\n}\n.media:first-child {\n  margin-top: 0;\n}\n.media-object {\n  display: block;\n}\n.media-heading {\n  margin: 0 0 5px;\n}\n.media > .pull-left {\n  margin-right: 10px;\n}\n.media > .pull-right {\n  margin-left: 10px;\n}\n.media-list {\n  padding-left: 0;\n  list-style: none;\n}\n.list-group {\n  padding-left: 0;\n  margin-bottom: 20px;\n}\n.list-group-item {\n  position: relative;\n  display: block;\n  padding: 10px 15px;\n  margin-bottom: -1px;\n  background-color: #fff;\n  border: 1px solid #ddd;\n}\n.list-group-item:first-child {\n  border-top-left-radius: 4px;\n  border-top-right-radius: 4px;\n}\n.list-group-item:last-child {\n  margin-bottom: 0;\n  border-bottom-right-radius: 4px;\n  border-bottom-left-radius: 4px;\n}\n.list-group-item > .badge {\n  float: right;\n}\n.list-group-item > .badge + .badge {\n  margin-right: 5px;\n}\na.list-group-item {\n  color: #555;\n}\na.list-group-item .list-group-item-heading {\n  color: #333;\n}\na.list-group-item:hover,\na.list-group-item:focus {\n  text-decoration: none;\n  background-color: #f5f5f5;\n}\na.list-group-item.active,\na.list-group-item.active:hover,\na.list-group-item.active:focus {\n  z-index: 2;\n  color: #fff;\n  background-color: #428bca;\n  border-color: #428bca;\n}\na.list-group-item.active .list-group-item-heading,\na.list-group-item.active:hover .list-group-item-heading,\na.list-group-item.active:focus .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item.active .list-group-item-text,\na.list-group-item.active:hover .list-group-item-text,\na.list-group-item.active:focus .list-group-item-text {\n  color: #e1edf7;\n}\n.list-group-item-success {\n  color: #3c763d;\n  background-color: #dff0d8;\n}\na.list-group-item-success {\n  color: #3c763d;\n}\na.list-group-item-success .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-success:hover,\na.list-group-item-success:focus {\n  color: #3c763d;\n  background-color: #d0e9c6;\n}\na.list-group-item-success.active,\na.list-group-item-success.active:hover,\na.list-group-item-success.active:focus {\n  color: #fff;\n  background-color: #3c763d;\n  border-color: #3c763d;\n}\n.list-group-item-info {\n  color: #31708f;\n  background-color: #d9edf7;\n}\na.list-group-item-info {\n  color: #31708f;\n}\na.list-group-item-info .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-info:hover,\na.list-group-item-info:focus {\n  color: #31708f;\n  background-color: #c4e3f3;\n}\na.list-group-item-info.active,\na.list-group-item-info.active:hover,\na.list-group-item-info.active:focus {\n  color: #fff;\n  background-color: #31708f;\n  border-color: #31708f;\n}\n.list-group-item-warning {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n}\na.list-group-item-warning {\n  color: #8a6d3b;\n}\na.list-group-item-warning .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-warning:hover,\na.list-group-item-warning:focus {\n  color: #8a6d3b;\n  background-color: #faf2cc;\n}\na.list-group-item-warning.active,\na.list-group-item-warning.active:hover,\na.list-group-item-warning.active:focus {\n  color: #fff;\n  background-color: #8a6d3b;\n  border-color: #8a6d3b;\n}\n.list-group-item-danger {\n  color: #a94442;\n  background-color: #f2dede;\n}\na.list-group-item-danger {\n  color: #a94442;\n}\na.list-group-item-danger .list-group-item-heading {\n  color: inherit;\n}\na.list-group-item-danger:hover,\na.list-group-item-danger:focus {\n  color: #a94442;\n  background-color: #ebcccc;\n}\na.list-group-item-danger.active,\na.list-group-item-danger.active:hover,\na.list-group-item-danger.active:focus {\n  color: #fff;\n  background-color: #a94442;\n  border-color: #a94442;\n}\n.list-group-item-heading {\n  margin-top: 0;\n  margin-bottom: 5px;\n}\n.list-group-item-text {\n  margin-bottom: 0;\n  line-height: 1.3;\n}\n.panel {\n  margin-bottom: 20px;\n  background-color: #fff;\n  border: 1px solid transparent;\n  border-radius: 4px;\n  -webkit-box-shadow: 0 1px 1px rgba(0, 0, 0, .05);\n          box-shadow: 0 1px 1px rgba(0, 0, 0, .05);\n}\n.panel-body {\n  padding: 15px;\n}\n.panel > .list-group {\n  margin-bottom: 0;\n}\n.panel > .list-group .list-group-item {\n  border-width: 1px 0;\n  border-radius: 0;\n}\n.panel > .list-group .list-group-item:first-child {\n  border-top: 0;\n}\n.panel > .list-group .list-group-item:last-child {\n  border-bottom: 0;\n}\n.panel > .list-group:first-child .list-group-item:first-child {\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel > .list-group:last-child .list-group-item:last-child {\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel-heading + .list-group .list-group-item:first-child {\n  border-top-width: 0;\n}\n.panel > .table,\n.panel > .table-responsive > .table {\n  margin-bottom: 0;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:first-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:first-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:first-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:first-child {\n  border-top-left-radius: 3px;\n}\n.panel > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child td:last-child,\n.panel > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > thead:first-child > tr:first-child th:last-child,\n.panel > .table:first-child > tbody:first-child > tr:first-child th:last-child,\n.panel > .table-responsive:first-child > .table:first-child > tbody:first-child > tr:first-child th:last-child {\n  border-top-right-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:first-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:first-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:first-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:first-child {\n  border-bottom-left-radius: 3px;\n}\n.panel > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child td:last-child,\n.panel > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tbody:last-child > tr:last-child th:last-child,\n.panel > .table:last-child > tfoot:last-child > tr:last-child th:last-child,\n.panel > .table-responsive:last-child > .table:last-child > tfoot:last-child > tr:last-child th:last-child {\n  border-bottom-right-radius: 3px;\n}\n.panel > .panel-body + .table,\n.panel > .panel-body + .table-responsive {\n  border-top: 1px solid #ddd;\n}\n.panel > .table > tbody:first-child > tr:first-child th,\n.panel > .table > tbody:first-child > tr:first-child td {\n  border-top: 0;\n}\n.panel > .table-bordered,\n.panel > .table-responsive > .table-bordered {\n  border: 0;\n}\n.panel > .table-bordered > thead > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:first-child,\n.panel > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:first-child,\n.panel > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:first-child,\n.panel > .table-bordered > thead > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:first-child,\n.panel > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:first-child,\n.panel > .table-bordered > tfoot > tr > td:first-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:first-child {\n  border-left: 0;\n}\n.panel > .table-bordered > thead > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > th:last-child,\n.panel > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > th:last-child,\n.panel > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > th:last-child,\n.panel > .table-bordered > thead > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > thead > tr > td:last-child,\n.panel > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tbody > tr > td:last-child,\n.panel > .table-bordered > tfoot > tr > td:last-child,\n.panel > .table-responsive > .table-bordered > tfoot > tr > td:last-child {\n  border-right: 0;\n}\n.panel > .table-bordered > thead > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > th,\n.panel > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > th,\n.panel > .table-bordered > tfoot > tr:first-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:first-child > th,\n.panel > .table-bordered > thead > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:first-child > td,\n.panel > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:first-child > td,\n.panel > .table-bordered > tfoot > tr:first-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:first-child > td {\n  border-top: 0;\n}\n.panel > .table-bordered > thead > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > thead > tr:last-child > th,\n.panel > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > th,\n.panel > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > th,\n.panel > .table-bordered > thead > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > thead > tr:last-child > td,\n.panel > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tbody > tr:last-child > td,\n.panel > .table-bordered > tfoot > tr:last-child > td,\n.panel > .table-responsive > .table-bordered > tfoot > tr:last-child > td {\n  border-bottom: 0;\n}\n.panel > .table-responsive {\n  margin-bottom: 0;\n  border: 0;\n}\n.panel-heading {\n  padding: 10px 15px;\n  border-bottom: 1px solid transparent;\n  border-top-left-radius: 3px;\n  border-top-right-radius: 3px;\n}\n.panel-heading > .dropdown .dropdown-toggle {\n  color: inherit;\n}\n.panel-title {\n  margin-top: 0;\n  margin-bottom: 0;\n  font-size: 16px;\n  color: inherit;\n}\n.panel-title > a {\n  color: inherit;\n}\n.panel-footer {\n  padding: 10px 15px;\n  background-color: #f5f5f5;\n  border-top: 1px solid #ddd;\n  border-bottom-right-radius: 3px;\n  border-bottom-left-radius: 3px;\n}\n.panel-group {\n  margin-bottom: 20px;\n}\n.panel-group .panel {\n  margin-bottom: 0;\n  overflow: hidden;\n  border-radius: 4px;\n}\n.panel-group .panel + .panel {\n  margin-top: 5px;\n}\n.panel-group .panel-heading {\n  border-bottom: 0;\n}\n.panel-group .panel-heading + .panel-collapse .panel-body {\n  border-top: 1px solid #ddd;\n}\n.panel-group .panel-footer {\n  border-top: 0;\n}\n.panel-group .panel-footer + .panel-collapse .panel-body {\n  border-bottom: 1px solid #ddd;\n}\n.panel-default {\n  border-color: #ddd;\n}\n.panel-default > .panel-heading {\n  color: #333;\n  background-color: #f5f5f5;\n  border-color: #ddd;\n}\n.panel-default > .panel-heading + .panel-collapse .panel-body {\n  border-top-color: #ddd;\n}\n.panel-default > .panel-footer + .panel-collapse .panel-body {\n  border-bottom-color: #ddd;\n}\n.panel-primary {\n  border-color: #428bca;\n}\n.panel-primary > .panel-heading {\n  color: #fff;\n  background-color: #428bca;\n  border-color: #428bca;\n}\n.panel-primary > .panel-heading + .panel-collapse .panel-body {\n  border-top-color: #428bca;\n}\n.panel-primary > .panel-footer + .panel-collapse .panel-body {\n  border-bottom-color: #428bca;\n}\n.panel-success {\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading {\n  color: #3c763d;\n  background-color: #dff0d8;\n  border-color: #d6e9c6;\n}\n.panel-success > .panel-heading + .panel-collapse .panel-body {\n  border-top-color: #d6e9c6;\n}\n.panel-success > .panel-footer + .panel-collapse .panel-body {\n  border-bottom-color: #d6e9c6;\n}\n.panel-info {\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading {\n  color: #31708f;\n  background-color: #d9edf7;\n  border-color: #bce8f1;\n}\n.panel-info > .panel-heading + .panel-collapse .panel-body {\n  border-top-color: #bce8f1;\n}\n.panel-info > .panel-footer + .panel-collapse .panel-body {\n  border-bottom-color: #bce8f1;\n}\n.panel-warning {\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading {\n  color: #8a6d3b;\n  background-color: #fcf8e3;\n  border-color: #faebcc;\n}\n.panel-warning > .panel-heading + .panel-collapse .panel-body {\n  border-top-color: #faebcc;\n}\n.panel-warning > .panel-footer + .panel-collapse .panel-body {\n  border-bottom-color: #faebcc;\n}\n.panel-danger {\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading {\n  color: #a94442;\n  background-color: #f2dede;\n  border-color: #ebccd1;\n}\n.panel-danger > .panel-heading + .panel-collapse .panel-body {\n  border-top-color: #ebccd1;\n}\n.panel-danger > .panel-footer + .panel-collapse .panel-body {\n  border-bottom-color: #ebccd1;\n}\n.well {\n  min-height: 20px;\n  padding: 19px;\n  margin-bottom: 20px;\n  background-color: #f5f5f5;\n  border: 1px solid #e3e3e3;\n  border-radius: 4px;\n  -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);\n          box-shadow: inset 0 1px 1px rgba(0, 0, 0, .05);\n}\n.well blockquote {\n  border-color: #ddd;\n  border-color: rgba(0, 0, 0, .15);\n}\n.well-lg {\n  padding: 24px;\n  border-radius: 6px;\n}\n.well-sm {\n  padding: 9px;\n  border-radius: 3px;\n}\n.close {\n  float: right;\n  font-size: 21px;\n  font-weight: bold;\n  line-height: 1;\n  color: #000;\n  text-shadow: 0 1px 0 #fff;\n  filter: alpha(opacity=20);\n  opacity: .2;\n}\n.close:hover,\n.close:focus {\n  color: #000;\n  text-decoration: none;\n  cursor: pointer;\n  filter: alpha(opacity=50);\n  opacity: .5;\n}\nbutton.close {\n  -webkit-appearance: none;\n  padding: 0;\n  cursor: pointer;\n  background: transparent;\n  border: 0;\n}\n.modal-open {\n  overflow: hidden;\n}\n.modal {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1050;\n  display: none;\n  overflow: auto;\n  overflow-y: scroll;\n  -webkit-overflow-scrolling: touch;\n  outline: 0;\n}\n.modal.fade .modal-dialog {\n  -webkit-transition: -webkit-transform .3s ease-out;\n     -moz-transition:    -moz-transform .3s ease-out;\n       -o-transition:      -o-transform .3s ease-out;\n          transition:         transform .3s ease-out;\n  -webkit-transform: translate(0, -25%);\n      -ms-transform: translate(0, -25%);\n          transform: translate(0, -25%);\n}\n.modal.in .modal-dialog {\n  -webkit-transform: translate(0, 0);\n      -ms-transform: translate(0, 0);\n          transform: translate(0, 0);\n}\n.modal-dialog {\n  position: relative;\n  width: auto;\n  margin: 10px;\n}\n.modal-content {\n  position: relative;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #999;\n  border: 1px solid rgba(0, 0, 0, .2);\n  border-radius: 6px;\n  outline: none;\n  -webkit-box-shadow: 0 3px 9px rgba(0, 0, 0, .5);\n          box-shadow: 0 3px 9px rgba(0, 0, 0, .5);\n}\n.modal-backdrop {\n  position: fixed;\n  top: 0;\n  right: 0;\n  bottom: 0;\n  left: 0;\n  z-index: 1040;\n  background-color: #000;\n}\n.modal-backdrop.fade {\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.modal-backdrop.in {\n  filter: alpha(opacity=50);\n  opacity: .5;\n}\n.modal-header {\n  min-height: 16.428571429px;\n  padding: 15px;\n  border-bottom: 1px solid #e5e5e5;\n}\n.modal-header .close {\n  margin-top: -2px;\n}\n.modal-title {\n  margin: 0;\n  line-height: 1.428571429;\n}\n.modal-body {\n  position: relative;\n  padding: 20px;\n}\n.modal-footer {\n  padding: 19px 20px 20px;\n  margin-top: 15px;\n  text-align: right;\n  border-top: 1px solid #e5e5e5;\n}\n.modal-footer .btn + .btn {\n  margin-bottom: 0;\n  margin-left: 5px;\n}\n.modal-footer .btn-group .btn + .btn {\n  margin-left: -1px;\n}\n.modal-footer .btn-block + .btn-block {\n  margin-left: 0;\n}\n@media (min-width: 768px) {\n  .modal-dialog {\n    width: 600px;\n    margin: 30px auto;\n  }\n  .modal-content {\n    -webkit-box-shadow: 0 5px 15px rgba(0, 0, 0, .5);\n            box-shadow: 0 5px 15px rgba(0, 0, 0, .5);\n  }\n  .modal-sm {\n    width: 300px;\n  }\n  .modal-lg {\n    width: 900px;\n  }\n}\n.tooltip {\n  position: absolute;\n  z-index: 1030;\n  display: block;\n  font-size: 12px;\n  line-height: 1.4;\n  visibility: visible;\n  filter: alpha(opacity=0);\n  opacity: 0;\n}\n.tooltip.in {\n  filter: alpha(opacity=90);\n  opacity: .9;\n}\n.tooltip.top {\n  padding: 5px 0;\n  margin-top: -3px;\n}\n.tooltip.right {\n  padding: 0 5px;\n  margin-left: 3px;\n}\n.tooltip.bottom {\n  padding: 5px 0;\n  margin-top: 3px;\n}\n.tooltip.left {\n  padding: 0 5px;\n  margin-left: -3px;\n}\n.tooltip-inner {\n  max-width: 200px;\n  padding: 3px 8px;\n  color: #fff;\n  text-align: center;\n  text-decoration: none;\n  background-color: #000;\n  border-radius: 4px;\n}\n.tooltip-arrow {\n  position: absolute;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.tooltip.top .tooltip-arrow {\n  bottom: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-left .tooltip-arrow {\n  bottom: 0;\n  left: 5px;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.top-right .tooltip-arrow {\n  right: 5px;\n  bottom: 0;\n  border-width: 5px 5px 0;\n  border-top-color: #000;\n}\n.tooltip.right .tooltip-arrow {\n  top: 50%;\n  left: 0;\n  margin-top: -5px;\n  border-width: 5px 5px 5px 0;\n  border-right-color: #000;\n}\n.tooltip.left .tooltip-arrow {\n  top: 50%;\n  right: 0;\n  margin-top: -5px;\n  border-width: 5px 0 5px 5px;\n  border-left-color: #000;\n}\n.tooltip.bottom .tooltip-arrow {\n  top: 0;\n  left: 50%;\n  margin-left: -5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-left .tooltip-arrow {\n  top: 0;\n  left: 5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.tooltip.bottom-right .tooltip-arrow {\n  top: 0;\n  right: 5px;\n  border-width: 0 5px 5px;\n  border-bottom-color: #000;\n}\n.popover {\n  position: absolute;\n  top: 0;\n  left: 0;\n  z-index: 1010;\n  display: none;\n  max-width: 276px;\n  padding: 1px;\n  text-align: left;\n  white-space: normal;\n  background-color: #fff;\n  background-clip: padding-box;\n  border: 1px solid #ccc;\n  border: 1px solid rgba(0, 0, 0, .2);\n  border-radius: 6px;\n  -webkit-box-shadow: 0 5px 10px rgba(0, 0, 0, .2);\n          box-shadow: 0 5px 10px rgba(0, 0, 0, .2);\n}\n.popover.top {\n  margin-top: -10px;\n}\n.popover.right {\n  margin-left: 10px;\n}\n.popover.bottom {\n  margin-top: 10px;\n}\n.popover.left {\n  margin-left: -10px;\n}\n.popover-title {\n  padding: 8px 14px;\n  margin: 0;\n  font-size: 14px;\n  font-weight: normal;\n  line-height: 18px;\n  background-color: #f7f7f7;\n  border-bottom: 1px solid #ebebeb;\n  border-radius: 5px 5px 0 0;\n}\n.popover-content {\n  padding: 9px 14px;\n}\n.popover .arrow,\n.popover .arrow:after {\n  position: absolute;\n  display: block;\n  width: 0;\n  height: 0;\n  border-color: transparent;\n  border-style: solid;\n}\n.popover .arrow {\n  border-width: 11px;\n}\n.popover .arrow:after {\n  content: \"\";\n  border-width: 10px;\n}\n.popover.top .arrow {\n  bottom: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-color: #999;\n  border-top-color: rgba(0, 0, 0, .25);\n  border-bottom-width: 0;\n}\n.popover.top .arrow:after {\n  bottom: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-color: #fff;\n  border-bottom-width: 0;\n}\n.popover.right .arrow {\n  top: 50%;\n  left: -11px;\n  margin-top: -11px;\n  border-right-color: #999;\n  border-right-color: rgba(0, 0, 0, .25);\n  border-left-width: 0;\n}\n.popover.right .arrow:after {\n  bottom: -10px;\n  left: 1px;\n  content: \" \";\n  border-right-color: #fff;\n  border-left-width: 0;\n}\n.popover.bottom .arrow {\n  top: -11px;\n  left: 50%;\n  margin-left: -11px;\n  border-top-width: 0;\n  border-bottom-color: #999;\n  border-bottom-color: rgba(0, 0, 0, .25);\n}\n.popover.bottom .arrow:after {\n  top: 1px;\n  margin-left: -10px;\n  content: \" \";\n  border-top-width: 0;\n  border-bottom-color: #fff;\n}\n.popover.left .arrow {\n  top: 50%;\n  right: -11px;\n  margin-top: -11px;\n  border-right-width: 0;\n  border-left-color: #999;\n  border-left-color: rgba(0, 0, 0, .25);\n}\n.popover.left .arrow:after {\n  right: 1px;\n  bottom: -10px;\n  content: \" \";\n  border-right-width: 0;\n  border-left-color: #fff;\n}\n.carousel {\n  position: relative;\n}\n.carousel-inner {\n  position: relative;\n  width: 100%;\n  overflow: hidden;\n}\n.carousel-inner > .item {\n  position: relative;\n  display: none;\n  -webkit-transition: .6s ease-in-out left;\n          transition: .6s ease-in-out left;\n}\n.carousel-inner > .item > img,\n.carousel-inner > .item > a > img {\n  display: block;\n  max-width: 100%;\n  height: auto;\n  line-height: 1;\n}\n.carousel-inner > .active,\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  display: block;\n}\n.carousel-inner > .active {\n  left: 0;\n}\n.carousel-inner > .next,\n.carousel-inner > .prev {\n  position: absolute;\n  top: 0;\n  width: 100%;\n}\n.carousel-inner > .next {\n  left: 100%;\n}\n.carousel-inner > .prev {\n  left: -100%;\n}\n.carousel-inner > .next.left,\n.carousel-inner > .prev.right {\n  left: 0;\n}\n.carousel-inner > .active.left {\n  left: -100%;\n}\n.carousel-inner > .active.right {\n  left: 100%;\n}\n.carousel-control {\n  position: absolute;\n  top: 0;\n  bottom: 0;\n  left: 0;\n  width: 15%;\n  font-size: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, .6);\n  filter: alpha(opacity=50);\n  opacity: .5;\n}\n.carousel-control.left {\n  background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, .5) 0%), color-stop(rgba(0, 0, 0, .0001) 100%));\n  background-image:         linear-gradient(to right, rgba(0, 0, 0, .5) 0%, rgba(0, 0, 0, .0001) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control.right {\n  right: 0;\n  left: auto;\n  background-image: -webkit-linear-gradient(left, color-stop(rgba(0, 0, 0, .0001) 0%), color-stop(rgba(0, 0, 0, .5) 100%));\n  background-image:         linear-gradient(to right, rgba(0, 0, 0, .0001) 0%, rgba(0, 0, 0, .5) 100%);\n  filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);\n  background-repeat: repeat-x;\n}\n.carousel-control:hover,\n.carousel-control:focus {\n  color: #fff;\n  text-decoration: none;\n  filter: alpha(opacity=90);\n  outline: none;\n  opacity: .9;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-left,\n.carousel-control .glyphicon-chevron-right {\n  position: absolute;\n  top: 50%;\n  z-index: 5;\n  display: inline-block;\n}\n.carousel-control .icon-prev,\n.carousel-control .glyphicon-chevron-left {\n  left: 50%;\n}\n.carousel-control .icon-next,\n.carousel-control .glyphicon-chevron-right {\n  right: 50%;\n}\n.carousel-control .icon-prev,\n.carousel-control .icon-next {\n  width: 20px;\n  height: 20px;\n  margin-top: -10px;\n  margin-left: -10px;\n  font-family: serif;\n}\n.carousel-control .icon-prev:before {\n  content: '\\2039';\n}\n.carousel-control .icon-next:before {\n  content: '\\203a';\n}\n.carousel-indicators {\n  position: absolute;\n  bottom: 10px;\n  left: 50%;\n  z-index: 15;\n  width: 60%;\n  padding-left: 0;\n  margin-left: -30%;\n  text-align: center;\n  list-style: none;\n}\n.carousel-indicators li {\n  display: inline-block;\n  width: 10px;\n  height: 10px;\n  margin: 1px;\n  text-indent: -999px;\n  cursor: pointer;\n  background-color: #000 \\9;\n  background-color: rgba(0, 0, 0, 0);\n  border: 1px solid #fff;\n  border-radius: 10px;\n}\n.carousel-indicators .active {\n  width: 12px;\n  height: 12px;\n  margin: 0;\n  background-color: #fff;\n}\n.carousel-caption {\n  position: absolute;\n  right: 15%;\n  bottom: 20px;\n  left: 15%;\n  z-index: 10;\n  padding-top: 20px;\n  padding-bottom: 20px;\n  color: #fff;\n  text-align: center;\n  text-shadow: 0 1px 2px rgba(0, 0, 0, .6);\n}\n.carousel-caption .btn {\n  text-shadow: none;\n}\n@media screen and (min-width: 768px) {\n  .carousel-control .glyphicons-chevron-left,\n  .carousel-control .glyphicons-chevron-right,\n  .carousel-control .icon-prev,\n  .carousel-control .icon-next {\n    width: 30px;\n    height: 30px;\n    margin-top: -15px;\n    margin-left: -15px;\n    font-size: 30px;\n  }\n  .carousel-caption {\n    right: 20%;\n    left: 20%;\n    padding-bottom: 30px;\n  }\n  .carousel-indicators {\n    bottom: 20px;\n  }\n}\n.clearfix:before,\n.clearfix:after,\n.container:before,\n.container:after,\n.container-fluid:before,\n.container-fluid:after,\n.row:before,\n.row:after,\n.form-horizontal .form-group:before,\n.form-horizontal .form-group:after,\n.btn-toolbar:before,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:before,\n.btn-group-vertical > .btn-group:after,\n.nav:before,\n.nav:after,\n.navbar:before,\n.navbar:after,\n.navbar-header:before,\n.navbar-header:after,\n.navbar-collapse:before,\n.navbar-collapse:after,\n.pager:before,\n.pager:after,\n.panel-body:before,\n.panel-body:after,\n.modal-footer:before,\n.modal-footer:after {\n  display: table;\n  content: \" \";\n}\n.clearfix:after,\n.container:after,\n.container-fluid:after,\n.row:after,\n.form-horizontal .form-group:after,\n.btn-toolbar:after,\n.btn-group-vertical > .btn-group:after,\n.nav:after,\n.navbar:after,\n.navbar-header:after,\n.navbar-collapse:after,\n.pager:after,\n.panel-body:after,\n.modal-footer:after {\n  clear: both;\n}\n.center-block {\n  display: block;\n  margin-right: auto;\n  margin-left: auto;\n}\n.pull-right {\n  float: right !important;\n}\n.pull-left {\n  float: left !important;\n}\n.hide {\n  display: none !important;\n}\n.show {\n  display: block !important;\n}\n.invisible {\n  visibility: hidden;\n}\n.text-hide {\n  font: 0/0 a;\n  color: transparent;\n  text-shadow: none;\n  background-color: transparent;\n  border: 0;\n}\n.hidden {\n  display: none !important;\n  visibility: hidden !important;\n}\n.affix {\n  position: fixed;\n}\n@-ms-viewport {\n  width: device-width;\n}\n.visible-xs,\ntr.visible-xs,\nth.visible-xs,\ntd.visible-xs {\n  display: none !important;\n}\n@media (max-width: 767px) {\n  .visible-xs {\n    display: block !important;\n  }\n  table.visible-xs {\n    display: table;\n  }\n  tr.visible-xs {\n    display: table-row !important;\n  }\n  th.visible-xs,\n  td.visible-xs {\n    display: table-cell !important;\n  }\n}\n.visible-sm,\ntr.visible-sm,\nth.visible-sm,\ntd.visible-sm {\n  display: none !important;\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .visible-sm {\n    display: block !important;\n  }\n  table.visible-sm {\n    display: table;\n  }\n  tr.visible-sm {\n    display: table-row !important;\n  }\n  th.visible-sm,\n  td.visible-sm {\n    display: table-cell !important;\n  }\n}\n.visible-md,\ntr.visible-md,\nth.visible-md,\ntd.visible-md {\n  display: none !important;\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .visible-md {\n    display: block !important;\n  }\n  table.visible-md {\n    display: table;\n  }\n  tr.visible-md {\n    display: table-row !important;\n  }\n  th.visible-md,\n  td.visible-md {\n    display: table-cell !important;\n  }\n}\n.visible-lg,\ntr.visible-lg,\nth.visible-lg,\ntd.visible-lg {\n  display: none !important;\n}\n@media (min-width: 1200px) {\n  .visible-lg {\n    display: block !important;\n  }\n  table.visible-lg {\n    display: table;\n  }\n  tr.visible-lg {\n    display: table-row !important;\n  }\n  th.visible-lg,\n  td.visible-lg {\n    display: table-cell !important;\n  }\n}\n@media (max-width: 767px) {\n  .hidden-xs,\n  tr.hidden-xs,\n  th.hidden-xs,\n  td.hidden-xs {\n    display: none !important;\n  }\n}\n@media (min-width: 768px) and (max-width: 991px) {\n  .hidden-sm,\n  tr.hidden-sm,\n  th.hidden-sm,\n  td.hidden-sm {\n    display: none !important;\n  }\n}\n@media (min-width: 992px) and (max-width: 1199px) {\n  .hidden-md,\n  tr.hidden-md,\n  th.hidden-md,\n  td.hidden-md {\n    display: none !important;\n  }\n}\n@media (min-width: 1200px) {\n  .hidden-lg,\n  tr.hidden-lg,\n  th.hidden-lg,\n  td.hidden-lg {\n    display: none !important;\n  }\n}\n.visible-print,\ntr.visible-print,\nth.visible-print,\ntd.visible-print {\n  display: none !important;\n}\n@media print {\n  .visible-print {\n    display: block !important;\n  }\n  table.visible-print {\n    display: table;\n  }\n  tr.visible-print {\n    display: table-row !important;\n  }\n  th.visible-print,\n  td.visible-print {\n    display: table-cell !important;\n  }\n}\n@media print {\n  .hidden-print,\n  tr.hidden-print,\n  th.hidden-print,\n  td.hidden-print {\n    display: none !important;\n  }\n}\n/*# sourceMappingURL=bootstrap.css.map */\n"
  },
  {
    "path": "doc/css/button-override.css",
    "content": "/* Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0 */\n\n.btn:hover,\n.btn:focus,\n.btn:active,\n.btn.active,\n.open .dropdown-toggle.btn {\n  opacity: 0.4;\n}\n"
  },
  {
    "path": "doc/css/carousel-override.css",
    "content": "/* Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0 */\n\n.carousel-control {\n  width: 5%;\n}\n\n.carousel-caption {\n  position: static;\n  background: rgba(0,0,0,0.6);\n  color: white;\n  padding-bottom: 35px;\n  padding-left: 1em;\n  padding-right: 1em;\n  padding-top: 15px;\n}\n\n.carousel {\n  overflow: hidden;\n  border-radius: 5px;\n  max-width: 900px;\n  margin: 1em;\n}\n\n.carousel-indicators {\n  bottom: 0px;\n}\n\n"
  },
  {
    "path": "doc/css/code.css",
    "content": "/* Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0 */\n\ntable.code {\n    font-family: Menlo,Monaco,Consolas,\"Courier New\",monospace;\n    display: block;\n    padding: 9.5px;\n    margin: 0px 0px 10px;\n    font-size: 13px;\n    line-height: 1.42857;\n    color: rgb(51, 51, 51);\n    word-break: break-all;\n    word-wrap: break-word;\n    background-color: rgb(245, 245, 245);\n    border: 1px solid rgb(204, 204, 204);\n    border-radius: 4px 4px 4px 4px;\n}\n\ntable.code tr td {\n    white-space: pre;\n}\n\ntable.code tr td:nth-child(2) {\n    color: #d14;\n    padding-left: .5em;\n}\n\n.userinput {\n    color: #d14;\n}\n\ntable.CodeRay {\n    margin-left: 3em;\n    width: calc(100% - 6em);\n}\n\ntd.line-numbers {\n    width: 2em;\n}\n\n.releasenotes h2 { margin-top: 1.5em; text-decoration: underline; }\n"
  },
  {
    "path": "doc/css/font-awesome.css",
    "content": "/*!\n *  Font Awesome 4.1.0 by @davegandy - http://fontawesome.io - @fontawesome\n *  License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)\n */\n/* FONT PATH\n * -------------------------- */\n@font-face {\n  font-family: 'FontAwesome';\n  src: url('../fonts/fontawesome-webfont.eot?v=4.1.0');\n  src: url('../fonts/fontawesome-webfont.eot?#iefix&v=4.1.0') format('embedded-opentype'), url('../fonts/fontawesome-webfont.woff?v=4.1.0') format('woff'), url('../fonts/fontawesome-webfont.ttf?v=4.1.0') format('truetype'), url('../fonts/fontawesome-webfont.svg?v=4.1.0#fontawesomeregular') format('svg');\n  font-weight: normal;\n  font-style: normal;\n}\n.fa {\n  display: inline-block;\n  font-family: 'FontAwesome', sans-serif;\n  font-style: normal;\n  font-weight: normal;\n  line-height: 1;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n/* makes the font 33% larger relative to the icon container */\n.fa-lg {\n  font-size: 1.33333333em;\n  line-height: 0.75em;\n  vertical-align: -15%;\n}\n.fa-2x {\n  font-size: 2em;\n}\n.fa-3x {\n  font-size: 3em;\n}\n.fa-4x {\n  font-size: 4em;\n}\n.fa-5x {\n  font-size: 5em;\n}\n.fa-fw {\n  width: 1.28571429em;\n  text-align: center;\n}\n.fa-ul {\n  padding-left: 0;\n  margin-left: 2.14285714em;\n  list-style-type: none;\n}\n.fa-ul > li {\n  position: relative;\n}\n.fa-li {\n  position: absolute;\n  left: -2.14285714em;\n  width: 2.14285714em;\n  top: 0.14285714em;\n  text-align: center;\n}\n.fa-li.fa-lg {\n  left: -1.85714286em;\n}\n.fa-border {\n  padding: .2em .25em .15em;\n  border: solid 0.08em #eeeeee;\n  border-radius: .1em;\n}\n.pull-right {\n  float: right;\n}\n.pull-left {\n  float: left;\n}\n.fa.pull-left {\n  margin-right: .3em;\n}\n.fa.pull-right {\n  margin-left: .3em;\n}\n.fa-spin {\n  -webkit-animation: spin 2s infinite linear;\n  -moz-animation: spin 2s infinite linear;\n  -o-animation: spin 2s infinite linear;\n  animation: spin 2s infinite linear;\n}\n@-moz-keyframes spin {\n  0% {\n    -moz-transform: rotate(0deg);\n  }\n  100% {\n    -moz-transform: rotate(359deg);\n  }\n}\n@-webkit-keyframes spin {\n  0% {\n    -webkit-transform: rotate(0deg);\n  }\n  100% {\n    -webkit-transform: rotate(359deg);\n  }\n}\n@-o-keyframes spin {\n  0% {\n    -o-transform: rotate(0deg);\n  }\n  100% {\n    -o-transform: rotate(359deg);\n  }\n}\n@keyframes spin {\n  0% {\n    -webkit-transform: rotate(0deg);\n    transform: rotate(0deg);\n  }\n  100% {\n    -webkit-transform: rotate(359deg);\n    transform: rotate(359deg);\n  }\n}\n.fa-rotate-90 {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=1);\n  -webkit-transform: rotate(90deg);\n  -moz-transform: rotate(90deg);\n  -ms-transform: rotate(90deg);\n  -o-transform: rotate(90deg);\n  transform: rotate(90deg);\n}\n.fa-rotate-180 {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2);\n  -webkit-transform: rotate(180deg);\n  -moz-transform: rotate(180deg);\n  -ms-transform: rotate(180deg);\n  -o-transform: rotate(180deg);\n  transform: rotate(180deg);\n}\n.fa-rotate-270 {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);\n  -webkit-transform: rotate(270deg);\n  -moz-transform: rotate(270deg);\n  -ms-transform: rotate(270deg);\n  -o-transform: rotate(270deg);\n  transform: rotate(270deg);\n}\n.fa-flip-horizontal {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);\n  -webkit-transform: scale(-1, 1);\n  -moz-transform: scale(-1, 1);\n  -ms-transform: scale(-1, 1);\n  -o-transform: scale(-1, 1);\n  transform: scale(-1, 1);\n}\n.fa-flip-vertical {\n  filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);\n  -webkit-transform: scale(1, -1);\n  -moz-transform: scale(1, -1);\n  -ms-transform: scale(1, -1);\n  -o-transform: scale(1, -1);\n  transform: scale(1, -1);\n}\n.fa-stack {\n  position: relative;\n  display: inline-block;\n  width: 2em;\n  height: 2em;\n  line-height: 2em;\n  vertical-align: middle;\n}\n.fa-stack-1x,\n.fa-stack-2x {\n  position: absolute;\n  left: 0;\n  width: 100%;\n  text-align: center;\n}\n.fa-stack-1x {\n  line-height: inherit;\n}\n.fa-stack-2x {\n  font-size: 2em;\n}\n.fa-inverse {\n  color: #ffffff;\n}\n/* Font Awesome uses the Unicode Private Use Area (PUA) to ensure screen\n   readers do not read off random characters that represent icons */\n.fa-glass:before {\n  content: \"\\f000\";\n}\n.fa-music:before {\n  content: \"\\f001\";\n}\n.fa-search:before {\n  content: \"\\f002\";\n}\n.fa-envelope-o:before {\n  content: \"\\f003\";\n}\n.fa-heart:before {\n  content: \"\\f004\";\n}\n.fa-star:before {\n  content: \"\\f005\";\n}\n.fa-star-o:before {\n  content: \"\\f006\";\n}\n.fa-user:before {\n  content: \"\\f007\";\n}\n.fa-film:before {\n  content: \"\\f008\";\n}\n.fa-th-large:before {\n  content: \"\\f009\";\n}\n.fa-th:before {\n  content: \"\\f00a\";\n}\n.fa-th-list:before {\n  content: \"\\f00b\";\n}\n.fa-check:before {\n  content: \"\\f00c\";\n}\n.fa-times:before {\n  content: \"\\f00d\";\n}\n.fa-search-plus:before {\n  content: \"\\f00e\";\n}\n.fa-search-minus:before {\n  content: \"\\f010\";\n}\n.fa-power-off:before {\n  content: \"\\f011\";\n}\n.fa-signal:before {\n  content: \"\\f012\";\n}\n.fa-gear:before,\n.fa-cog:before {\n  content: \"\\f013\";\n}\n.fa-trash-o:before {\n  content: \"\\f014\";\n}\n.fa-home:before {\n  content: \"\\f015\";\n}\n.fa-file-o:before {\n  content: \"\\f016\";\n}\n.fa-clock-o:before {\n  content: \"\\f017\";\n}\n.fa-road:before {\n  content: \"\\f018\";\n}\n.fa-download:before {\n  content: \"\\f019\";\n}\n.fa-arrow-circle-o-down:before {\n  content: \"\\f01a\";\n}\n.fa-arrow-circle-o-up:before {\n  content: \"\\f01b\";\n}\n.fa-inbox:before {\n  content: \"\\f01c\";\n}\n.fa-play-circle-o:before {\n  content: \"\\f01d\";\n}\n.fa-rotate-right:before,\n.fa-repeat:before {\n  content: \"\\f01e\";\n}\n.fa-refresh:before {\n  content: \"\\f021\";\n}\n.fa-list-alt:before {\n  content: \"\\f022\";\n}\n.fa-lock:before {\n  content: \"\\f023\";\n}\n.fa-flag:before {\n  content: \"\\f024\";\n}\n.fa-headphones:before {\n  content: \"\\f025\";\n}\n.fa-volume-off:before {\n  content: \"\\f026\";\n}\n.fa-volume-down:before {\n  content: \"\\f027\";\n}\n.fa-volume-up:before {\n  content: \"\\f028\";\n}\n.fa-qrcode:before {\n  content: \"\\f029\";\n}\n.fa-barcode:before {\n  content: \"\\f02a\";\n}\n.fa-tag:before {\n  content: \"\\f02b\";\n}\n.fa-tags:before {\n  content: \"\\f02c\";\n}\n.fa-book:before {\n  content: \"\\f02d\";\n}\n.fa-bookmark:before {\n  content: \"\\f02e\";\n}\n.fa-print:before {\n  content: \"\\f02f\";\n}\n.fa-camera:before {\n  content: \"\\f030\";\n}\n.fa-font:before {\n  content: \"\\f031\";\n}\n.fa-bold:before {\n  content: \"\\f032\";\n}\n.fa-italic:before {\n  content: \"\\f033\";\n}\n.fa-text-height:before {\n  content: \"\\f034\";\n}\n.fa-text-width:before {\n  content: \"\\f035\";\n}\n.fa-align-left:before {\n  content: \"\\f036\";\n}\n.fa-align-center:before {\n  content: \"\\f037\";\n}\n.fa-align-right:before {\n  content: \"\\f038\";\n}\n.fa-align-justify:before {\n  content: \"\\f039\";\n}\n.fa-list:before {\n  content: \"\\f03a\";\n}\n.fa-dedent:before,\n.fa-outdent:before {\n  content: \"\\f03b\";\n}\n.fa-indent:before {\n  content: \"\\f03c\";\n}\n.fa-video-camera:before {\n  content: \"\\f03d\";\n}\n.fa-photo:before,\n.fa-image:before,\n.fa-picture-o:before {\n  content: \"\\f03e\";\n}\n.fa-pencil:before {\n  content: \"\\f040\";\n}\n.fa-map-marker:before {\n  content: \"\\f041\";\n}\n.fa-adjust:before {\n  content: \"\\f042\";\n}\n.fa-tint:before {\n  content: \"\\f043\";\n}\n.fa-edit:before,\n.fa-pencil-square-o:before {\n  content: \"\\f044\";\n}\n.fa-share-square-o:before {\n  content: \"\\f045\";\n}\n.fa-check-square-o:before {\n  content: \"\\f046\";\n}\n.fa-arrows:before {\n  content: \"\\f047\";\n}\n.fa-step-backward:before {\n  content: \"\\f048\";\n}\n.fa-fast-backward:before {\n  content: \"\\f049\";\n}\n.fa-backward:before {\n  content: \"\\f04a\";\n}\n.fa-play:before {\n  content: \"\\f04b\";\n}\n.fa-pause:before {\n  content: \"\\f04c\";\n}\n.fa-stop:before {\n  content: \"\\f04d\";\n}\n.fa-forward:before {\n  content: \"\\f04e\";\n}\n.fa-fast-forward:before {\n  content: \"\\f050\";\n}\n.fa-step-forward:before {\n  content: \"\\f051\";\n}\n.fa-eject:before {\n  content: \"\\f052\";\n}\n.fa-chevron-left:before {\n  content: \"\\f053\";\n}\n.fa-chevron-right:before {\n  content: \"\\f054\";\n}\n.fa-plus-circle:before {\n  content: \"\\f055\";\n}\n.fa-minus-circle:before {\n  content: \"\\f056\";\n}\n.fa-times-circle:before {\n  content: \"\\f057\";\n}\n.fa-check-circle:before {\n  content: \"\\f058\";\n}\n.fa-question-circle:before {\n  content: \"\\f059\";\n}\n.fa-info-circle:before {\n  content: \"\\f05a\";\n}\n.fa-crosshairs:before {\n  content: \"\\f05b\";\n}\n.fa-times-circle-o:before {\n  content: \"\\f05c\";\n}\n.fa-check-circle-o:before {\n  content: \"\\f05d\";\n}\n.fa-ban:before {\n  content: \"\\f05e\";\n}\n.fa-arrow-left:before {\n  content: \"\\f060\";\n}\n.fa-arrow-right:before {\n  content: \"\\f061\";\n}\n.fa-arrow-up:before {\n  content: \"\\f062\";\n}\n.fa-arrow-down:before {\n  content: \"\\f063\";\n}\n.fa-mail-forward:before,\n.fa-share:before {\n  content: \"\\f064\";\n}\n.fa-expand:before {\n  content: \"\\f065\";\n}\n.fa-compress:before {\n  content: \"\\f066\";\n}\n.fa-plus:before {\n  content: \"\\f067\";\n}\n.fa-minus:before {\n  content: \"\\f068\";\n}\n.fa-asterisk:before {\n  content: \"\\f069\";\n}\n.fa-exclamation-circle:before {\n  content: \"\\f06a\";\n}\n.fa-gift:before {\n  content: \"\\f06b\";\n}\n.fa-leaf:before {\n  content: \"\\f06c\";\n}\n.fa-fire:before {\n  content: \"\\f06d\";\n}\n.fa-eye:before {\n  content: \"\\f06e\";\n}\n.fa-eye-slash:before {\n  content: \"\\f070\";\n}\n.fa-warning:before,\n.fa-exclamation-triangle:before {\n  content: \"\\f071\";\n}\n.fa-plane:before {\n  content: \"\\f072\";\n}\n.fa-calendar:before {\n  content: \"\\f073\";\n}\n.fa-random:before {\n  content: \"\\f074\";\n}\n.fa-comment:before {\n  content: \"\\f075\";\n}\n.fa-magnet:before {\n  content: \"\\f076\";\n}\n.fa-chevron-up:before {\n  content: \"\\f077\";\n}\n.fa-chevron-down:before {\n  content: \"\\f078\";\n}\n.fa-retweet:before {\n  content: \"\\f079\";\n}\n.fa-shopping-cart:before {\n  content: \"\\f07a\";\n}\n.fa-folder:before {\n  content: \"\\f07b\";\n}\n.fa-folder-open:before {\n  content: \"\\f07c\";\n}\n.fa-arrows-v:before {\n  content: \"\\f07d\";\n}\n.fa-arrows-h:before {\n  content: \"\\f07e\";\n}\n.fa-bar-chart-o:before {\n  content: \"\\f080\";\n}\n.fa-twitter-square:before {\n  content: \"\\f081\";\n}\n.fa-facebook-square:before {\n  content: \"\\f082\";\n}\n.fa-camera-retro:before {\n  content: \"\\f083\";\n}\n.fa-key:before {\n  content: \"\\f084\";\n}\n.fa-gears:before,\n.fa-cogs:before {\n  content: \"\\f085\";\n}\n.fa-comments:before {\n  content: \"\\f086\";\n}\n.fa-thumbs-o-up:before {\n  content: \"\\f087\";\n}\n.fa-thumbs-o-down:before {\n  content: \"\\f088\";\n}\n.fa-star-half:before {\n  content: \"\\f089\";\n}\n.fa-heart-o:before {\n  content: \"\\f08a\";\n}\n.fa-sign-out:before {\n  content: \"\\f08b\";\n}\n.fa-linkedin-square:before {\n  content: \"\\f08c\";\n}\n.fa-thumb-tack:before {\n  content: \"\\f08d\";\n}\n.fa-external-link:before {\n  content: \"\\f08e\";\n}\n.fa-sign-in:before {\n  content: \"\\f090\";\n}\n.fa-trophy:before {\n  content: \"\\f091\";\n}\n.fa-github-square:before {\n  content: \"\\f092\";\n}\n.fa-upload:before {\n  content: \"\\f093\";\n}\n.fa-lemon-o:before {\n  content: \"\\f094\";\n}\n.fa-phone:before {\n  content: \"\\f095\";\n}\n.fa-square-o:before {\n  content: \"\\f096\";\n}\n.fa-bookmark-o:before {\n  content: \"\\f097\";\n}\n.fa-phone-square:before {\n  content: \"\\f098\";\n}\n.fa-twitter:before {\n  content: \"\\f099\";\n}\n.fa-facebook:before {\n  content: \"\\f09a\";\n}\n.fa-github:before {\n  content: \"\\f09b\";\n}\n.fa-unlock:before {\n  content: \"\\f09c\";\n}\n.fa-credit-card:before {\n  content: \"\\f09d\";\n}\n.fa-rss:before {\n  content: \"\\f09e\";\n}\n.fa-hdd-o:before {\n  content: \"\\f0a0\";\n}\n.fa-bullhorn:before {\n  content: \"\\f0a1\";\n}\n.fa-bell:before {\n  content: \"\\f0f3\";\n}\n.fa-certificate:before {\n  content: \"\\f0a3\";\n}\n.fa-hand-o-right:before {\n  content: \"\\f0a4\";\n}\n.fa-hand-o-left:before {\n  content: \"\\f0a5\";\n}\n.fa-hand-o-up:before {\n  content: \"\\f0a6\";\n}\n.fa-hand-o-down:before {\n  content: \"\\f0a7\";\n}\n.fa-arrow-circle-left:before {\n  content: \"\\f0a8\";\n}\n.fa-arrow-circle-right:before {\n  content: \"\\f0a9\";\n}\n.fa-arrow-circle-up:before {\n  content: \"\\f0aa\";\n}\n.fa-arrow-circle-down:before {\n  content: \"\\f0ab\";\n}\n.fa-globe:before {\n  content: \"\\f0ac\";\n}\n.fa-wrench:before {\n  content: \"\\f0ad\";\n}\n.fa-tasks:before {\n  content: \"\\f0ae\";\n}\n.fa-filter:before {\n  content: \"\\f0b0\";\n}\n.fa-briefcase:before {\n  content: \"\\f0b1\";\n}\n.fa-arrows-alt:before {\n  content: \"\\f0b2\";\n}\n.fa-group:before,\n.fa-users:before {\n  content: \"\\f0c0\";\n}\n.fa-chain:before,\n.fa-link:before {\n  content: \"\\f0c1\";\n}\n.fa-cloud:before {\n  content: \"\\f0c2\";\n}\n.fa-flask:before {\n  content: \"\\f0c3\";\n}\n.fa-cut:before,\n.fa-scissors:before {\n  content: \"\\f0c4\";\n}\n.fa-copy:before,\n.fa-files-o:before {\n  content: \"\\f0c5\";\n}\n.fa-paperclip:before {\n  content: \"\\f0c6\";\n}\n.fa-save:before,\n.fa-floppy-o:before {\n  content: \"\\f0c7\";\n}\n.fa-square:before {\n  content: \"\\f0c8\";\n}\n.fa-navicon:before,\n.fa-reorder:before,\n.fa-bars:before {\n  content: \"\\f0c9\";\n}\n.fa-list-ul:before {\n  content: \"\\f0ca\";\n}\n.fa-list-ol:before {\n  content: \"\\f0cb\";\n}\n.fa-strikethrough:before {\n  content: \"\\f0cc\";\n}\n.fa-underline:before {\n  content: \"\\f0cd\";\n}\n.fa-table:before {\n  content: \"\\f0ce\";\n}\n.fa-magic:before {\n  content: \"\\f0d0\";\n}\n.fa-truck:before {\n  content: \"\\f0d1\";\n}\n.fa-pinterest:before {\n  content: \"\\f0d2\";\n}\n.fa-pinterest-square:before {\n  content: \"\\f0d3\";\n}\n.fa-google-plus-square:before {\n  content: \"\\f0d4\";\n}\n.fa-google-plus:before {\n  content: \"\\f0d5\";\n}\n.fa-money:before {\n  content: \"\\f0d6\";\n}\n.fa-caret-down:before {\n  content: \"\\f0d7\";\n}\n.fa-caret-up:before {\n  content: \"\\f0d8\";\n}\n.fa-caret-left:before {\n  content: \"\\f0d9\";\n}\n.fa-caret-right:before {\n  content: \"\\f0da\";\n}\n.fa-columns:before {\n  content: \"\\f0db\";\n}\n.fa-unsorted:before,\n.fa-sort:before {\n  content: \"\\f0dc\";\n}\n.fa-sort-down:before,\n.fa-sort-desc:before {\n  content: \"\\f0dd\";\n}\n.fa-sort-up:before,\n.fa-sort-asc:before {\n  content: \"\\f0de\";\n}\n.fa-envelope:before {\n  content: \"\\f0e0\";\n}\n.fa-linkedin:before {\n  content: \"\\f0e1\";\n}\n.fa-rotate-left:before,\n.fa-undo:before {\n  content: \"\\f0e2\";\n}\n.fa-legal:before,\n.fa-gavel:before {\n  content: \"\\f0e3\";\n}\n.fa-dashboard:before,\n.fa-tachometer:before {\n  content: \"\\f0e4\";\n}\n.fa-comment-o:before {\n  content: \"\\f0e5\";\n}\n.fa-comments-o:before {\n  content: \"\\f0e6\";\n}\n.fa-flash:before,\n.fa-bolt:before {\n  content: \"\\f0e7\";\n}\n.fa-sitemap:before {\n  content: \"\\f0e8\";\n}\n.fa-umbrella:before {\n  content: \"\\f0e9\";\n}\n.fa-paste:before,\n.fa-clipboard:before {\n  content: \"\\f0ea\";\n}\n.fa-lightbulb-o:before {\n  content: \"\\f0eb\";\n}\n.fa-exchange:before {\n  content: \"\\f0ec\";\n}\n.fa-cloud-download:before {\n  content: \"\\f0ed\";\n}\n.fa-cloud-upload:before {\n  content: \"\\f0ee\";\n}\n.fa-user-md:before {\n  content: \"\\f0f0\";\n}\n.fa-stethoscope:before {\n  content: \"\\f0f1\";\n}\n.fa-suitcase:before {\n  content: \"\\f0f2\";\n}\n.fa-bell-o:before {\n  content: \"\\f0a2\";\n}\n.fa-coffee:before {\n  content: \"\\f0f4\";\n}\n.fa-cutlery:before {\n  content: \"\\f0f5\";\n}\n.fa-file-text-o:before {\n  content: \"\\f0f6\";\n}\n.fa-building-o:before {\n  content: \"\\f0f7\";\n}\n.fa-hospital-o:before {\n  content: \"\\f0f8\";\n}\n.fa-ambulance:before {\n  content: \"\\f0f9\";\n}\n.fa-medkit:before {\n  content: \"\\f0fa\";\n}\n.fa-fighter-jet:before {\n  content: \"\\f0fb\";\n}\n.fa-beer:before {\n  content: \"\\f0fc\";\n}\n.fa-h-square:before {\n  content: \"\\f0fd\";\n}\n.fa-plus-square:before {\n  content: \"\\f0fe\";\n}\n.fa-angle-double-left:before {\n  content: \"\\f100\";\n}\n.fa-angle-double-right:before {\n  content: \"\\f101\";\n}\n.fa-angle-double-up:before {\n  content: \"\\f102\";\n}\n.fa-angle-double-down:before {\n  content: \"\\f103\";\n}\n.fa-angle-left:before {\n  content: \"\\f104\";\n}\n.fa-angle-right:before {\n  content: \"\\f105\";\n}\n.fa-angle-up:before {\n  content: \"\\f106\";\n}\n.fa-angle-down:before {\n  content: \"\\f107\";\n}\n.fa-desktop:before {\n  content: \"\\f108\";\n}\n.fa-laptop:before {\n  content: \"\\f109\";\n}\n.fa-tablet:before {\n  content: \"\\f10a\";\n}\n.fa-mobile-phone:before,\n.fa-mobile:before {\n  content: \"\\f10b\";\n}\n.fa-circle-o:before {\n  content: \"\\f10c\";\n}\n.fa-quote-left:before {\n  content: \"\\f10d\";\n}\n.fa-quote-right:before {\n  content: \"\\f10e\";\n}\n.fa-spinner:before {\n  content: \"\\f110\";\n}\n.fa-circle:before {\n  content: \"\\f111\";\n}\n.fa-mail-reply:before,\n.fa-reply:before {\n  content: \"\\f112\";\n}\n.fa-github-alt:before {\n  content: \"\\f113\";\n}\n.fa-folder-o:before {\n  content: \"\\f114\";\n}\n.fa-folder-open-o:before {\n  content: \"\\f115\";\n}\n.fa-smile-o:before {\n  content: \"\\f118\";\n}\n.fa-frown-o:before {\n  content: \"\\f119\";\n}\n.fa-meh-o:before {\n  content: \"\\f11a\";\n}\n.fa-gamepad:before {\n  content: \"\\f11b\";\n}\n.fa-keyboard-o:before {\n  content: \"\\f11c\";\n}\n.fa-flag-o:before {\n  content: \"\\f11d\";\n}\n.fa-flag-checkered:before {\n  content: \"\\f11e\";\n}\n.fa-terminal:before {\n  content: \"\\f120\";\n}\n.fa-code:before {\n  content: \"\\f121\";\n}\n.fa-mail-reply-all:before,\n.fa-reply-all:before {\n  content: \"\\f122\";\n}\n.fa-star-half-empty:before,\n.fa-star-half-full:before,\n.fa-star-half-o:before {\n  content: \"\\f123\";\n}\n.fa-location-arrow:before {\n  content: \"\\f124\";\n}\n.fa-crop:before {\n  content: \"\\f125\";\n}\n.fa-code-fork:before {\n  content: \"\\f126\";\n}\n.fa-unlink:before,\n.fa-chain-broken:before {\n  content: \"\\f127\";\n}\n.fa-question:before {\n  content: \"\\f128\";\n}\n.fa-info:before {\n  content: \"\\f129\";\n}\n.fa-exclamation:before {\n  content: \"\\f12a\";\n}\n.fa-superscript:before {\n  content: \"\\f12b\";\n}\n.fa-subscript:before {\n  content: \"\\f12c\";\n}\n.fa-eraser:before {\n  content: \"\\f12d\";\n}\n.fa-puzzle-piece:before {\n  content: \"\\f12e\";\n}\n.fa-microphone:before {\n  content: \"\\f130\";\n}\n.fa-microphone-slash:before {\n  content: \"\\f131\";\n}\n.fa-shield:before {\n  content: \"\\f132\";\n}\n.fa-calendar-o:before {\n  content: \"\\f133\";\n}\n.fa-fire-extinguisher:before {\n  content: \"\\f134\";\n}\n.fa-rocket:before {\n  content: \"\\f135\";\n}\n.fa-maxcdn:before {\n  content: \"\\f136\";\n}\n.fa-chevron-circle-left:before {\n  content: \"\\f137\";\n}\n.fa-chevron-circle-right:before {\n  content: \"\\f138\";\n}\n.fa-chevron-circle-up:before {\n  content: \"\\f139\";\n}\n.fa-chevron-circle-down:before {\n  content: \"\\f13a\";\n}\n.fa-html5:before {\n  content: \"\\f13b\";\n}\n.fa-css3:before {\n  content: \"\\f13c\";\n}\n.fa-anchor:before {\n  content: \"\\f13d\";\n}\n.fa-unlock-alt:before {\n  content: \"\\f13e\";\n}\n.fa-bullseye:before {\n  content: \"\\f140\";\n}\n.fa-ellipsis-h:before {\n  content: \"\\f141\";\n}\n.fa-ellipsis-v:before {\n  content: \"\\f142\";\n}\n.fa-rss-square:before {\n  content: \"\\f143\";\n}\n.fa-play-circle:before {\n  content: \"\\f144\";\n}\n.fa-ticket:before {\n  content: \"\\f145\";\n}\n.fa-minus-square:before {\n  content: \"\\f146\";\n}\n.fa-minus-square-o:before {\n  content: \"\\f147\";\n}\n.fa-level-up:before {\n  content: \"\\f148\";\n}\n.fa-level-down:before {\n  content: \"\\f149\";\n}\n.fa-check-square:before {\n  content: \"\\f14a\";\n}\n.fa-pencil-square:before {\n  content: \"\\f14b\";\n}\n.fa-external-link-square:before {\n  content: \"\\f14c\";\n}\n.fa-share-square:before {\n  content: \"\\f14d\";\n}\n.fa-compass:before {\n  content: \"\\f14e\";\n}\n.fa-toggle-down:before,\n.fa-caret-square-o-down:before {\n  content: \"\\f150\";\n}\n.fa-toggle-up:before,\n.fa-caret-square-o-up:before {\n  content: \"\\f151\";\n}\n.fa-toggle-right:before,\n.fa-caret-square-o-right:before {\n  content: \"\\f152\";\n}\n.fa-euro:before,\n.fa-eur:before {\n  content: \"\\f153\";\n}\n.fa-gbp:before {\n  content: \"\\f154\";\n}\n.fa-dollar:before,\n.fa-usd:before {\n  content: \"\\f155\";\n}\n.fa-rupee:before,\n.fa-inr:before {\n  content: \"\\f156\";\n}\n.fa-cny:before,\n.fa-rmb:before,\n.fa-yen:before,\n.fa-jpy:before {\n  content: \"\\f157\";\n}\n.fa-ruble:before,\n.fa-rouble:before,\n.fa-rub:before {\n  content: \"\\f158\";\n}\n.fa-won:before,\n.fa-krw:before {\n  content: \"\\f159\";\n}\n.fa-bitcoin:before,\n.fa-btc:before {\n  content: \"\\f15a\";\n}\n.fa-file:before {\n  content: \"\\f15b\";\n}\n.fa-file-text:before {\n  content: \"\\f15c\";\n}\n.fa-sort-alpha-asc:before {\n  content: \"\\f15d\";\n}\n.fa-sort-alpha-desc:before {\n  content: \"\\f15e\";\n}\n.fa-sort-amount-asc:before {\n  content: \"\\f160\";\n}\n.fa-sort-amount-desc:before {\n  content: \"\\f161\";\n}\n.fa-sort-numeric-asc:before {\n  content: \"\\f162\";\n}\n.fa-sort-numeric-desc:before {\n  content: \"\\f163\";\n}\n.fa-thumbs-up:before {\n  content: \"\\f164\";\n}\n.fa-thumbs-down:before {\n  content: \"\\f165\";\n}\n.fa-youtube-square:before {\n  content: \"\\f166\";\n}\n.fa-youtube:before {\n  content: \"\\f167\";\n}\n.fa-xing:before {\n  content: \"\\f168\";\n}\n.fa-xing-square:before {\n  content: \"\\f169\";\n}\n.fa-youtube-play:before {\n  content: \"\\f16a\";\n}\n.fa-dropbox:before {\n  content: \"\\f16b\";\n}\n.fa-stack-overflow:before {\n  content: \"\\f16c\";\n}\n.fa-instagram:before {\n  content: \"\\f16d\";\n}\n.fa-flickr:before {\n  content: \"\\f16e\";\n}\n.fa-adn:before {\n  content: \"\\f170\";\n}\n.fa-bitbucket:before {\n  content: \"\\f171\";\n}\n.fa-bitbucket-square:before {\n  content: \"\\f172\";\n}\n.fa-tumblr:before {\n  content: \"\\f173\";\n}\n.fa-tumblr-square:before {\n  content: \"\\f174\";\n}\n.fa-long-arrow-down:before {\n  content: \"\\f175\";\n}\n.fa-long-arrow-up:before {\n  content: \"\\f176\";\n}\n.fa-long-arrow-left:before {\n  content: \"\\f177\";\n}\n.fa-long-arrow-right:before {\n  content: \"\\f178\";\n}\n.fa-apple:before {\n  content: \"\\f179\";\n}\n.fa-windows:before {\n  content: \"\\f17a\";\n}\n.fa-android:before {\n  content: \"\\f17b\";\n}\n.fa-linux:before {\n  content: \"\\f17c\";\n}\n.fa-dribbble:before {\n  content: \"\\f17d\";\n}\n.fa-skype:before {\n  content: \"\\f17e\";\n}\n.fa-foursquare:before {\n  content: \"\\f180\";\n}\n.fa-trello:before {\n  content: \"\\f181\";\n}\n.fa-female:before {\n  content: \"\\f182\";\n}\n.fa-male:before {\n  content: \"\\f183\";\n}\n.fa-gittip:before {\n  content: \"\\f184\";\n}\n.fa-sun-o:before {\n  content: \"\\f185\";\n}\n.fa-moon-o:before {\n  content: \"\\f186\";\n}\n.fa-archive:before {\n  content: \"\\f187\";\n}\n.fa-bug:before {\n  content: \"\\f188\";\n}\n.fa-vk:before {\n  content: \"\\f189\";\n}\n.fa-weibo:before {\n  content: \"\\f18a\";\n}\n.fa-renren:before {\n  content: \"\\f18b\";\n}\n.fa-pagelines:before {\n  content: \"\\f18c\";\n}\n.fa-stack-exchange:before {\n  content: \"\\f18d\";\n}\n.fa-arrow-circle-o-right:before {\n  content: \"\\f18e\";\n}\n.fa-arrow-circle-o-left:before {\n  content: \"\\f190\";\n}\n.fa-toggle-left:before,\n.fa-caret-square-o-left:before {\n  content: \"\\f191\";\n}\n.fa-dot-circle-o:before {\n  content: \"\\f192\";\n}\n.fa-wheelchair:before {\n  content: \"\\f193\";\n}\n.fa-vimeo-square:before {\n  content: \"\\f194\";\n}\n.fa-turkish-lira:before,\n.fa-try:before {\n  content: \"\\f195\";\n}\n.fa-plus-square-o:before {\n  content: \"\\f196\";\n}\n.fa-space-shuttle:before {\n  content: \"\\f197\";\n}\n.fa-slack:before {\n  content: \"\\f198\";\n}\n.fa-envelope-square:before {\n  content: \"\\f199\";\n}\n.fa-wordpress:before {\n  content: \"\\f19a\";\n}\n.fa-openid:before {\n  content: \"\\f19b\";\n}\n.fa-institution:before,\n.fa-bank:before,\n.fa-university:before {\n  content: \"\\f19c\";\n}\n.fa-mortar-board:before,\n.fa-graduation-cap:before {\n  content: \"\\f19d\";\n}\n.fa-yahoo:before {\n  content: \"\\f19e\";\n}\n.fa-google:before {\n  content: \"\\f1a0\";\n}\n.fa-reddit:before {\n  content: \"\\f1a1\";\n}\n.fa-reddit-square:before {\n  content: \"\\f1a2\";\n}\n.fa-stumbleupon-circle:before {\n  content: \"\\f1a3\";\n}\n.fa-stumbleupon:before {\n  content: \"\\f1a4\";\n}\n.fa-delicious:before {\n  content: \"\\f1a5\";\n}\n.fa-digg:before {\n  content: \"\\f1a6\";\n}\n.fa-pied-piper-square:before,\n.fa-pied-piper:before {\n  content: \"\\f1a7\";\n}\n.fa-pied-piper-alt:before {\n  content: \"\\f1a8\";\n}\n.fa-drupal:before {\n  content: \"\\f1a9\";\n}\n.fa-joomla:before {\n  content: \"\\f1aa\";\n}\n.fa-language:before {\n  content: \"\\f1ab\";\n}\n.fa-fax:before {\n  content: \"\\f1ac\";\n}\n.fa-building:before {\n  content: \"\\f1ad\";\n}\n.fa-child:before {\n  content: \"\\f1ae\";\n}\n.fa-paw:before {\n  content: \"\\f1b0\";\n}\n.fa-spoon:before {\n  content: \"\\f1b1\";\n}\n.fa-cube:before {\n  content: \"\\f1b2\";\n}\n.fa-cubes:before {\n  content: \"\\f1b3\";\n}\n.fa-behance:before {\n  content: \"\\f1b4\";\n}\n.fa-behance-square:before {\n  content: \"\\f1b5\";\n}\n.fa-steam:before {\n  content: \"\\f1b6\";\n}\n.fa-steam-square:before {\n  content: \"\\f1b7\";\n}\n.fa-recycle:before {\n  content: \"\\f1b8\";\n}\n.fa-automobile:before,\n.fa-car:before {\n  content: \"\\f1b9\";\n}\n.fa-cab:before,\n.fa-taxi:before {\n  content: \"\\f1ba\";\n}\n.fa-tree:before {\n  content: \"\\f1bb\";\n}\n.fa-spotify:before {\n  content: \"\\f1bc\";\n}\n.fa-deviantart:before {\n  content: \"\\f1bd\";\n}\n.fa-soundcloud:before {\n  content: \"\\f1be\";\n}\n.fa-database:before {\n  content: \"\\f1c0\";\n}\n.fa-file-pdf-o:before {\n  content: \"\\f1c1\";\n}\n.fa-file-word-o:before {\n  content: \"\\f1c2\";\n}\n.fa-file-excel-o:before {\n  content: \"\\f1c3\";\n}\n.fa-file-powerpoint-o:before {\n  content: \"\\f1c4\";\n}\n.fa-file-photo-o:before,\n.fa-file-picture-o:before,\n.fa-file-image-o:before {\n  content: \"\\f1c5\";\n}\n.fa-file-zip-o:before,\n.fa-file-archive-o:before {\n  content: \"\\f1c6\";\n}\n.fa-file-sound-o:before,\n.fa-file-audio-o:before {\n  content: \"\\f1c7\";\n}\n.fa-file-movie-o:before,\n.fa-file-video-o:before {\n  content: \"\\f1c8\";\n}\n.fa-file-code-o:before {\n  content: \"\\f1c9\";\n}\n.fa-vine:before {\n  content: \"\\f1ca\";\n}\n.fa-codepen:before {\n  content: \"\\f1cb\";\n}\n.fa-jsfiddle:before {\n  content: \"\\f1cc\";\n}\n.fa-life-bouy:before,\n.fa-life-saver:before,\n.fa-support:before,\n.fa-life-ring:before {\n  content: \"\\f1cd\";\n}\n.fa-circle-o-notch:before {\n  content: \"\\f1ce\";\n}\n.fa-ra:before,\n.fa-rebel:before {\n  content: \"\\f1d0\";\n}\n.fa-ge:before,\n.fa-empire:before {\n  content: \"\\f1d1\";\n}\n.fa-git-square:before {\n  content: \"\\f1d2\";\n}\n.fa-git:before {\n  content: \"\\f1d3\";\n}\n.fa-hacker-news:before {\n  content: \"\\f1d4\";\n}\n.fa-tencent-weibo:before {\n  content: \"\\f1d5\";\n}\n.fa-qq:before {\n  content: \"\\f1d6\";\n}\n.fa-wechat:before,\n.fa-weixin:before {\n  content: \"\\f1d7\";\n}\n.fa-send:before,\n.fa-paper-plane:before {\n  content: \"\\f1d8\";\n}\n.fa-send-o:before,\n.fa-paper-plane-o:before {\n  content: \"\\f1d9\";\n}\n.fa-history:before {\n  content: \"\\f1da\";\n}\n.fa-circle-thin:before {\n  content: \"\\f1db\";\n}\n.fa-header:before {\n  content: \"\\f1dc\";\n}\n.fa-paragraph:before {\n  content: \"\\f1dd\";\n}\n.fa-sliders:before {\n  content: \"\\f1de\";\n}\n.fa-share-alt:before {\n  content: \"\\f1e0\";\n}\n.fa-share-alt-square:before {\n  content: \"\\f1e1\";\n}\n.fa-bomb:before {\n  content: \"\\f1e2\";\n}\n"
  },
  {
    "path": "doc/css/images.css",
    "content": "/* Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0 */\n\nimg.full-width {\n    width: 100%\n}\n\nimg.screenshot {\n    max-width: calc(100% - 2em);\n    border: 3px;\n    border-style: solid;\n    margin-left: 2em;\n    margin-bottom: 2em;\n}\n\nimg.side {\n    float: left;\n    width: 50%;\n}\n"
  },
  {
    "path": "doc/css/layout.css",
    "content": "/* Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0 */\n\nhtml {\n    height:100%;\n}\nbody {\n    padding-top: 61px;\n    height: 90%; /* If calc() is not supported */\n    height: calc(100% - 46px); /* Sets the body full height minus the padding for the menu bar */\n}\n@media (max-width: 1050px) {\n    body {\n\tpadding-top: 121px;\n    }\n    div.frontpagehero {\n\tmargin-left: -20px;\n\tmargin-right: -20px;\n\tpadding-left: 20px;\n    }\n}\n.sidebar-nav {\n    padding: 9px 0;\n}\n.section-block {\n    background: #eeeeee;\n    padding: 1em;\n    -webkit-border-radius: 12px;\n    -moz-border-radius: 12px;\n    border-radius: 12px;\n    margin: 0 2em;\n}\n.row-fluid :first-child .section-block {\n    margin-left: 0;\n}\n.row-fluid :last-child .section-block {\n    margin-right: 0;\n}\n.rarr {\n    font-size: 1.5em;\n}\n.darr {\n    font-size: 4em;\n    text-align: center;\n    margin-bottom: 1em;\n}\n:target {\n    padding-top: 61px;\n    margin-top: -61px;\n}\n\n#annotate-notify { position: fixed; right: 40px; top: 3px;  }\n\nfigure {\n    margin-bottom: 20px;\n}\n"
  },
  {
    "path": "doc/css/nav-list.css",
    "content": "/* Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0 */\n\n/* NAV LIST\n   -------- */\n\n.nav-list {\n  padding-left: 15px;\n  padding-right: 15px;\n  margin-bottom: 0;\n}\n.nav-list > li > a,\n.nav-list .nav-header {\n  margin-left:  -15px;\n  margin-right: -15px;\n  text-shadow: 0 1px 0 rgba(255,255,255,.5);\n}\n.nav-list > li > a {\n  padding: 3px 15px;\n}\n.nav-list > .active > a,\n.nav-list > .active > a:hover,\n.nav-list > .active > a:focus {\n  color: white;\n  text-shadow: 0 -1px 0 rgba(0,0,0,.2);\n  background-color: rgb(66, 139, 202);\n}\n\n.spaced-out li {\n   padding-bottom: 1em;\n}\n\n.inside-list ul {\n    list-style-position: inside;\n    padding-left: 0;\n}\n"
  },
  {
    "path": "doc/development/CodingStandards.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Coding Standards\n\nThe rules are always up for debate. However, when debate is needed, it should happen outside the source tree. In other words, if the rules are wrong, first debate the rules at sprint retrospective, then fix the rules, then follow the new rules.\n\n## Git commit messages\n\n- Prefix the summary line with the issue number this addresses.\n- Describe the delta between the old and new tree. If possible, describe the delta in **behavior** rather than the source code itself.\n- Good: “1234: Support use of spaces in filenames.”\n- Good: “1234: Fix crash when user_id is nil.”\n- Less good: “Add some controller methods.” (What do they do?)\n- Less good: “More progress on UI branch.” (What is different?)\n- Less good: “Incorporate Tom’s suggestions.” (Who cares whose suggestions — what changed?)\n\nIf further background or explanation is needed, separate it from the summary with a blank line.\n\n- Example: “Users found it confusing that the boxes had different colors even though they represented the same kinds of things.”\n\n**Every commit** (including merge commits) must have a DCO sign-off. See `CONTRIBUTING.md` for the full terms of what this means.\n\n- Example: `Arvados-DCO-1.1-Signed-off-by: Alex Doe <alex.doe@example.com>`\n\nFull examples:\n\n    commit 9c6540b9d42adc4a397a28be1ac23f357ba14ab5\n    Author: Tom Clegg <tom@curoverse.com>\n    Date:   Mon Aug 7 09:58:04 2017 -0400\n\n        12027: Recognize a new \"node failed\" error message.\n\n        \"srun: error: Cannot communicate with node 0.  Aborting job.\"\n\n        Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@curoverse.com>\n\n    commit 0b4800608e6394d66deec9cecea610c5fbbd75ad\n    Merge: 6f2ce94 3a356c4\n    Author: Tom Clegg <tom@curoverse.com>\n    Date:   Thu Aug 17 13:16:36 2017 -0400\n\n        Merge branch '12081-crunch-job-retry'\n\n        refs #12080\n        refs #12081\n        refs #12108\n\n        Arvados-DCO-1.1-Signed-off-by: Tom Clegg <tom@curoverse.com>\n\n## Source code formatting\n\nThese are general baseline rules except when a language-specific guide specifies otherwise.\n\nNo TAB characters in source files [except Go](https://golang.org/cmd/gofmt/).\n\n- For Emacs, add `(setq-default indent-tabs-mode nil)` to `~/.emacs`.\n- For Vim, add `:set expandtab` to `~/.vimrc`.\n\nAvoid long (\\>100 column) lines.\n\nNo whitespace at the end of lines unless technically required (like Markdown line breaks).\n\n## What to include\n\nNo commented-out blocks of code that have been replaced or obsoleted.\n\n- It is in the git history if we want it back.\n- If its absence would confuse someone reading the new code (despite never having read the old code), explain its absence in an English comment. If the old code is really still needed to support the English explanation, then go ahead — now we know why it’s there.\n\nNo commented-out debug statements.\n\n- If the debug statements are likely to be needed in the future, use a logging facility that can be enabled at run time. `logger.debug \"foo\"`\n\n## Style mismatch\n\nAdopt indentation style of surrounding lines or (when starting a new file) the nearest existing source code in this tree/language.\n\nIf you fix up existing indentation/formatting, do that in a separate commit.\n\n- If you bundle formatting changes with functional changes, it makes functional changes hard to find in the diff.\n\n## Go\n\nFollow gofmt, golint, etc., and <https://github.com/golang/go/wiki/CodeReviewComments>\n\nUse `%w` when wrapping an error with fmt.Errorf(), so errors.As() can access the wrapped error.\n\n```go\nif err != nil {\n        return fmt.Errorf(\"could not swap widgets: %w\", err)\n}\n```\n\nUse `(logrus.FieldLogger)WithError()` (instead of `Logf(\"blah: %s\", err)`) when logging an error.\n\n```go\nif err != nil {\n        logger.WithError(err).Warn(\"error swapping widgets\")\n}\n```\n\n## Ruby\n\nFollow <https://github.com/bbatsov/ruby-style-guide>\n\n## Python\n\n### Python code\n\nFor code, follow [PEP 8](https://peps.python.org/pep-0008/).\n\nWhen you add functions, methods, or attributes that SDK users should not use, their name should start with a leading underscore. This is a common convention to signal that an interface is not intended to be public. Anything named this way will be excluded from our SDK web documentation by default.\n\nYou’re encouraged to add type annotations to functions and methods. As of May 2024 these are purely for documentation: we are not type checking any of our Python. Note that your annotations must be understood by the oldest version of Python we currently support (3.10).\n\n### Python docstrings\n\nPublic classes, methods, and functions should all have docstrings. The content of the docstring should follow [PEP 257](https://peps.python.org/pep-0257/).\n\nFormat docstrings with Markdown and follow these style rules:\n\n* Document function argument lists after the high-level description following this format for each argument:\n\n        * name: type --- Description\n\n   Use exactly three minus-hyphens to get an em dash in the web rendering. Provide a helpful type hint whenever practical. The type hint should be written in “modern” style with builtin subscripting and type union syntax, like `list[str | bytes]`.\n\n   Use fully qualified names for custom types. This way pdoc hyperlinks them.\n\n* When something is deprecated, write a `.. WARNING:: Deprecated` admonition immediately after the first line. Its text should explain that the thing is deprecated, and suggest what to use instead. For example:\n\n        def add(a, b):\n            \"\"\"Add two things.\n\n            .. WARNING:: Deprecated\n               This function is deprecated. Use the `+` operator instead.\n\n            …\n            \"\"\"\n\n   You can similarly note private methods with `.. ATTENTION:: Internal`.\n\n* Mark up all identifiers outside the type hint with backticks. When the identifier exists in the current module, use the short name. Otherwise, use the fully-qualified name. Our web documentation will automatically link these identifiers to their corresponding documentation.\n\n* Mark up links using Markdown’s footnote style. For example:\n\n        \"\"\"Python docstring following [PEP 257][pep257].\n\n\n        \"\"\"\n\n   This looks best in plaintext. A descriptive identifier is nice if you can keep it short, but if that’s challenging, plain ordinals are fine too.\n\n* Mark up headers (e.g., in a module docstring) using underline style. For example:\n\n        \"\"\"Generic utility module\n\n        Filesystem functions\n        --------------------\n\n        …\n\n        Regular expressions\n        -------------------\n\n        …\n        \"\"\"\n\n   This looks best in plaintext.\n\nThe goal of these style rules is to provide a readable, consistent appearance whether people read the documentation in plain text (e.g., using `pydoc`) or their browser (as rendered by `pdoc`).\n\n## JavaScript\n\nWe already have 4-space indents everywhere, so do that.\n\nOther than that, follow the [Airbnb Javascript coding style](https://github.com/airbnb/javascript) guide unless otherwise stated.\n\n## Workbench Design Guidelines\n\n### Font Sizes\n\n- Minimum 12pt (16px)\n- Minimum 9 pt (12px) for things like by copyright, footer\n\nThis should be able to be-resized up to 200% without loss of content or functionality.\n\n### Color\n\n- Text and images of text have a color contrast ratio of at least 4.5:1 You can use [this contrast tool](https://snook.ca/technical/colour_contrast/colour.html#fg=1F7EA1,bg=FFFFFF) to check.\n- Non-text icon, controls, etc - 3:1 must have a color contrast ratio of 3:1.\n- Avoid hard-coding colors. Use theme colors. If a new color is needed, add it to the theme.\n- Used defined grays when possible using RGB value and changing the a value to indicate different meanings (i.e. Active icons have an opacity of 87, Inactive icons have an opacity of 60, Disabled icons have an opacity of 38%)\n\n### Icons\n\n#### General\n\n- Interaction target size of at least 44 x 44 pixels\n- Label should be on right, icon on left for maximum readability\n- Use minimum 3:1 color contrast (see Color above)\n- User appropriate concise alt text for people using screen readers\n\n#### Menu/Navigation\n\n- No navigation should only supported via breadcrumbs\n- If less than 5 menu options, consider visible navigation options\n- If more than 5 menu options, consider a combination navigation where some options are visible and some are hidden\n- Use the following menu consistently:\n  - Hamburger (three bars stacked vertically): Used to indicate navigation bar/menu that toggles between being collapsed behind the button or displayed on the screen, often used for global/site-wide/whole application navigation\n  - Döner (three bars that narrow vertically): Indicates a group filtering menu\n  - Bento (3×3 grid of squares): Indicates a menu presenting a grid of options (not currently applicable to WB)\n  - Kebab (three dots stacked vertically): Indicates a smaller inline-menu or an overflow/combination menu\n  - Meatballs (three dots stacked horizontally): Used to indicate a smaller inline-menu. Often used to indicate action on a related item (i.e. item next to the meatball), good for repeated use in tables, or horizontal elements\n- If component is an accordion window, use caret(‸)\n\nPreferred Icon Repositories:\n\n- <https://v5.mui.com/material-ui/material-icons/>\n- <https://materialdesignicons.com/>\n- <https://fontawesome.com/v5/search>\n\n### Buttons\n\n- Label button with action for usability/to reduce ambiguity (avoid generic button labels for actions)\n- Buttons vs Links\n  - Buttons should cause change in current context\n  - Links should navigate to a different content or a new resource (e.g. different page)\n- If text on button - color contract should be 4.5 :1 between button and text\n- Button color and background color contrast should be 3:1\n\n### Arvados Specific Components\n\nUse chips for displaying tokenized values/arrays\n\n### Loading Indicators\n\n#### Page Navigation\n\n- Navigation between pages should be indicated using `progressIndicatorActions.START_WORKING` and `progressIndicatorActions.STOP_WORKING` to show the global top-of-page pulser\n- Only the initial load or refresh of the full page (eg. triggered by the upper right refresh button) should use this indicator. Partial refreshes should use a more local indicator.\n  - Refreshes of only one section of a page should only show its own loading indicator in that section\n- Full page refreshes where the location is unchanged should avoid using the initial full-page spinner in favor of the top-of-page spinner, with updated values substituting in the UI when loaded\n\n#### User Actions\n\n- Form submissions or user actions should be indicated by both the `progressIndicatorActions.START_WORKING` and by enabling the spinner on the submit button of the form (if the action takes place through a form AND if the form stays open for the duration of the action in order to show errors). If the form closes immediately then the page spinner is the only indicator.\n- Toasts should not be used to notify the user of an in-progress action but only completion / error\n\n#### Lazy-loaded fields\n\n- Fields that load or update (eg. with extra info) after the main view should wait 3-5 seconds before showing a spinner/pulser icon while loading - if the request for extra data fails, a placeholder icon should show with a hint (text or tooltip) indicating that the data failed to load.\n  - The delayed indicator should be implemented as a reusable component (tbd)\n- Suggested loading indicator for inline fields: https://mhnpd.github.io/react-loader-spinner/docs/components/three-dots)\n\n### References\n\n[WCAG2.1](https://www.w3.org/WAI/WCAG21/Understanding/)\n\n[Sarah’s talk for references](https://docs.google.com/presentation/d/1HNrhvK7zVZ7jgH3ELbX7KB97SdXCZXrvov_I4Oe1l2c/edit?usp=sharing)\n"
  },
  {
    "path": "doc/development/DevelopmentProcess.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Development Process\n\nThis document is intended for core engineers who work on the `main` branch of Arvados.\n\n## Two Remotes\n\nThis document assumes you have two remotes, where `origin` refers to `git.arvados.org` and `github` refers to `github.com`:\n\n```sh\n$ git remote -v\ngithub\tgit@github.com:arvados/arvados.git (fetch)\ngithub\tgit@github.com:arvados/arvados.git (push)\norigin\tgit@git.arvados.org:arvados.git (fetch)\norigin\tgit@git.arvados.org:arvados.git (push)\n```\n\n## Fetch GitHub Pull Requests as Branches\n\nGitHub tracks pull requests under `refs/pull/`. You can configure Git to map these to your local repository when you fetch GitHub:\n\n```sh\n$ git config set --append remote.github.fetch '+refs/pull/*:refs/remotes/ghpr/*'\n```\n\nNow after you fetch, you can refer to pull request #123 as `ghpr/123/head`.\n\nIf you prefer to fetch pull requests individually, the command to do that is:\n\n```sh\n$ git fetch github \"pull/PRNUM/head:BRANCHNAME\"\n```\n\n## Review a Pull Request\n\nReviewing a pull request is about verifying that the branch follows all our [coding standards](CodingStandards.md). You should be able to verify that the ready-to-merge checklist is complete and accurate: the branch does what it says, tests pass, it follows our style, etc.\n\nIf you notice scale issues, bugs, missing documentation, etc., you can bring that up as part of the review and it should be addressed. However, the *point* of review is *not* to try to find problems. The *point* is to verify that the branch solves a problem and the code is maintainable.\n\n## Merge a Pull Request\n\nWhen a branch passes review, it should be merged to `main`. Core engineers can (and normally do) merge their own branches. Contributions from others need to be merged by a core engineer. Either way, the process is:\n\n```sh\n$ git switch main\n$ git pull --ff-only\n$ git merge --no-ff BRANCHREF\n# Make sure the commit message includes an issue ref and your DCO signoff.\n$ git push origin main\n```\n"
  },
  {
    "path": "doc/development/DistroVersions.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Distribution dependency versions\n\nFor runtime dependencies that we aim to get from the distribution, this page lists the versions included with each distribution we support.\n\nFor RHEL releases, this table shows the *latest* version available from AppStreams for **both** the earliest point release we support and the latest point release at the time of the last update.\n\n| Distribution              | Release Date  | PostgreSQL | Python  | Ruby  |\n|---------------------------|---------------|------------|---------|-------|\n| Ubuntu 22.04 “jammy”      | April 2022    | 14.17      | 3.10.12 | 3.0.2 |\n| RHEL 8.8 with AppStreams  | May 2023      | 15.6       | 3.11.5  | 3.1.4 |\n| RHEL 9.2 with AppStreams  | May 2023      | 15.6       | 3.11.5  | 3.1.4 |\n| Debian 12 “bookworm”      | June 2023     | 15.13      | 3.11.2  | 3.1.2 |\n| RHEL 8.9 with AppStreams  | November 2023 | 15.6       | 3.11.5  | 3.1.4 |\n| Ubuntu 24.04 “noble”      | April 2024    | 16.2       | 3.12.3  | 3.2.3 |\n| RHEL 9.5 with AppStreams  | November 2024 | 16.8       | 3.12.5  | 3.3.8 |\n| RHEL 10.0 with AppStreams | May 2025      | 16.8       | 3.12.9  | 3.3.8 |\n| Debian 13 “trixie”        | August 2025   | 17.5       | 3.13.3  | 3.3.8 |\n\n## For Arvados 3.1.x\n\n| Distribution             | Release Date | PostgreSQL | Python | Ruby  |\n|--------------------------|--------------|------------|--------|-------|\n| Ubuntu 20.04 “focal”     | April 2020   | 12.22      | 3.8.10 | 2.7.0 |\n| RHEL 8.4 with AppStreams | June 2021    | 13.3       | 3.9.2  | 2.7.4 |\n| Debian 11 “bullseye”     | August 2021  | 13.16      | 3.9.2  | 2.7.4 |\n"
  },
  {
    "path": "doc/development/Prerequisites.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Hacking prerequisites\n\nThis page describes how to install all the software necessary to develop Arvados and run tests.\n\n## Host options\n\nYou must have a system running a supported distribution. That system can be installed directly on hardware; running on a cloud instance; or in a virtual machine.\n\n### Supported distributions\n\nAs of March 2026/Arvados 3.2, these instructions and the entire test suite are known to work on Debian 12 \"bookworm\" and Debian 13 “trixie.”\n\nYou may try to run these instructions and tests on Ubuntu 22.04 “jammy”/24.04 “noble,” but they have not been tested and you may find some bugs throughout.\n\nThese instructions are not suitable for any Red Hat-based distribution. Our Ansible playbook will refuse to run on them.\n\n### Base configuration\n\nOn your development system, you should have a user account with full permission to use sudo.\n\nYou can run the Ansible playbook to install your development system on a different system. To do this, you must have permission to SSH into your user account from the system running Ansible (the “control node”) to the development system you’re installing (the “target node”).\n\n### Virtual machine requirements\n\nIf you run your development system in a virtual machine, it needs some permissions. Many environments will allow these operations by default, but they could be limited by your virtual machine setup.\n\n- It must be able to create and manage FUSE mounts (`/dev/fuse`)\n- It must be able to create and run Docker containers\n- It must be able to create and run Singularity containers—this requires creating and managing block loopback devices (`/dev/block-loop`)\n- It must have the `fs.inotify.max_user_watches` sysctl set to at least 524288. Our Ansible playbook will try to set this on the managed host, but if it is unable to do so, you may need to set it on the parent host instead.\n\n## Install development environment with Ansible\n\n### Clone Arvados source\n\nYou will need the Arvados source code to follow this process.\n\n```sh\n$ git clone https://github.com/arvados/arvados.git\n```\n\nIf you want to switch to a specific branch or revision like `3.2-release`, do that here.\n\n### Install Ansible\n\nInstall Ansible following the instructions in `arvados/tools/ansible/README.md`. This ensures you get the right versions of everything.\n\n### Write an Arvados database configuration\n\nMake a copy of the default test configuration:\n\n```sh\n$ cp arvados/tools/ansible/files/default-test-config.yml ~/zzzzz-config.yml\n```\n\nYou can copy the file to a different location if you like. This page will use `~/zzzzz-config.yml` as the placeholder path throughout.\n\nEdit this file with the database configuration you’d like to use. The cluster ID **must** be `zzzzz`. You can change the `user`, `password`, and `dbname` settings freely. Our Ansible playbook will configure PostgreSQL so your settings here work.\n\nThe playbook will always install the `postgresql` server package. It will **not** change any PostgreSQL configuration except to add `pg_hba.conf` entries for this user. You should only change `host` and `port` if you need to use a PostgreSQL server that is already installed and running somewhere else.\n\n### Write an Ansible inventory\n\nAn inventory file tells Ansible what host(s) to manage, how to connect to them, and what settings they use. Write an inventory file to `~/zzzzz-inventory.yml` like this:\n\n```yaml\narvados_test_all:\n  # This is the list of host(s) where we're installing the test environment.\n  # This example installs on the same system running Ansible.\n  # If you want to manage remote hosts, you can write your own host list:\n  # <https://docs.ansible.com/ansible/latest/getting_started/get_started_inventory.html>\n  hosts:\n    localhost:\n      ansible_connection: local\n  vars:\n    # The path to the Arvados cluster configuration you wrote in the previous section.\n    arvados_config_file: \"{{ lookup('env', 'HOME') }}/zzzzz-config.yml\"\n\n    # The primary user doing Arvados development and tests.\n    # This user will be added to the `docker` group.\n    # It defaults to the name of the user running `ansible-playbook`.\n    # If you want to configure a different user, set that here:\n    #arvados_dev_user: USERNAME\n\n    # By default, the playbook installs old versions of Python and Ruby from source.\n    # This helps you make sure you don't accidentally use too-new features during\n    # development. If you're sure you don't need that—for example, you specifically\n    # want to test a distribution's packaged version—set this flag:\n    #arvados_dev_from_pkgs: true\n```\n\n### Run the playbook\n\nThe basic command to run the playbook is:\n\n```sh\n$ cd arvados/tools/ansible\n$ ansible-playbook -K -i ~/zzzzz-inventory.yml install-dev-tools.yml\n```\n\nWhen you are prompted for the `BECOME password:`, enter the password for your user account on the development host that lets you run `sudo` commands.\n\n`ansible-playbook` has many options to control how it runs that you can add if you like. Refer to [the `ansible-playbook` documentation](https://docs.ansible.com/ansible/latest/cli/ansible-playbook.html) for more information.\n\n## Run Arvados tests\n\nAfter the playbook runs successfully, you should be able to run the Arvados tests from a source checkout on your development host. This document will walk you through setting up and running a single test suite to verify your setup. `cd` to your Arvados checkout and run:\n\n```sh\n$ mkdir -p ~/.cache/arvados-test\n$ build/run-tests.sh --temp ~/.cache/arvados-test --interactive\n```\n\nThis will install baseline prerequisites, then list commands and test targets, then prompt you with:\n\n    What next? install deps\n\nAccept that command. It will install the rest of the dependencies that are necessary for running a test cluster, then report:\n\n    All test suites passed.\n\nAt this stage, this message simply means that the \"install deps\" command has succeeded.\n\nNow we can run a test suite. The controller tests are a good first example, because they interact with a test cluster but not much else. At the `What next?` prompt, enter `test lib/controller`, and you'll see the test cluster start:\n\n    What next? test lib/controller\n    Starting API, controller, keepproxy, keep-web, ws, and nginx ssl proxy...\n\nYou'll see logs from individual services, then, hopefully, the controller tests starting and passing:\n\n    ======= test lib/controller\n    ok  \tgit.arvados.org/arvados.git/lib/controller\t64.679s\tcoverage: 82.0% of statements\n    ======= test lib/controller -- 68s\n    Pass: lib/controller tests (68s)\n    All test suites passed.\n\nRefer to [Running tests](RunningTests.md) for details about running specific test suites, test selection, and other features.\n\n## Troubleshooting\n\nIf the playbook succeeds but you can't get tests running, there might be a disconnect between your shell configuration and what the system expects. This section documents some places you can look.\n\n### Dependencies in `$PATH`\n\nThe playbook will install symlinks for Go, Node, Python, Ruby, Singularity, and Yarn under `/usr/local/bin`. The actual tools are installed under `/opt`. When you run Arvados tests or other development tools, you must ensure `/usr/local/bin` appears in your `$PATH` before any directories with other versions like `/usr/bin`.\n\n### Arvados `$CONFIGSRC`\n\nThe playbook writes the Arvados test cluster's database configuration at `~/.config/arvados/config.yml`, and sets up a hook `/etc/profile.d/arvados-test.sh` to set your `CONFIGSRC` environment variable to that file's base directory. If most tests fail with a database connection error, check that this variable is set:\n\n```sh\n$ echo \"${CONFIGSRC:-UNSET}\"\n/home/you/.config/arvados\n```\n\nIf that reports `UNSET`, first check if you're using a stale shell session started before the Ansible playbook run. You may need to log out of that session and start a new one.\n\nIf that doesn't work, you may add a line to set `CONFIGSRC=\"$HOME/.config/arvados\"` to your shell configuration, or set it manually when you run `run-tests.sh`:\n\n```sh\n$ CONFIGSRC=\"$HOME/.config/arvados\" build/run-tests.sh ...\n```\n"
  },
  {
    "path": "doc/development/RunningTests.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Running Tests\n\nArvados includes a script at `build/run-tests.sh` which tests (nearly) all of the components in the source tree. This is the script that [Arvados CI tests](https://ci.arvados.org) use, so running it locally is the most consistent entry point to all Arvados tests.\n\nThis document assumes you have [installed a development environment](Prerequisites.md) following that guide.\n\n## Running interactively\n\nMost developers want to run tests with `--temp` and `--interactive`:\n\n```sh\n$ mkdir -p ~/.cache/arvados-test\n$ build/run-tests.sh --temp ~/.cache/arvados-test --interactive\n```\n\nThis will display help with a list of commands and test targets. When you run with a fresh temp directory, the tool will probably prompt you to `install deps`. You should do this to install dependencies to the temp directory.\n\n### Dealing with state\n\nBefore you change `run-tests.sh` itself—including pulling changes from other developers—you should end any interactive test sessions.\n\nIf you make changes to a low-level library or SDK and want to see how it affects dependent tests, `install` your changed component, then `test` the dependents.\n\nIf you make changes to a cluster component and want to see how they affect tests, `reset` the test cluster, then `test` the components you're interested in.\n\nIf you want to clean your `--temp` directory—because you pulled a bad dependency or just want to recover some disk space—it is safe to end any interactive sessions, remove it, then `mkdir` it again.\n\n### Running individual test cases\n\n#### Golang\n\nMost Go packages use gocheck. Use gocheck command line args like `-check.f` to select tests and `-check.v` to show more output.\n\n    What next? test lib/controller/router -check.f=RouterSuite -check.v\n    ======= test lib/controller/router\n    PASS: request_test.go:135: RouterSuite.TestAttrsInBody\t0.000s\n    PASS: request_test.go:164: RouterSuite.TestBoolParam\t0.000s\n    PASS: router_test.go:55: RouterSuite.TestOptions\t0.002s\n    PASS: request_test.go:209: RouterSuite.TestStringOrArrayParam\t0.000s\n    OK: 4 passed\n    PASS\n    ok  \tgit.arvados.org/arvados.git/lib/controller/router\t0.012s\n    ======= test lib/controller/router -- 1s\n\n#### Python\n\nTests for Python components run under pytest. If what you really want to do is to focus on failing or newly-added tests, consider passing the appropriate switches to do that:\n\n      -x, --exitfirst       Exit instantly on first error or failed test\n      --lf, --last-failed   Rerun only the tests that failed at the last run (or\n                            all if none failed)\n      --ff, --failed-first  Run all tests, but run the last failures first. This\n                            may re-order tests and thus lead to repeated fixture\n                            setup/teardown.\n      --nf, --new-first     Run tests from new files first, then the rest of the\n                            tests sorted by file mtime\n\nIf you want to manually select tests:\n\n      FILENAME              Run tests from FILENAME, relative to the source root\n      FILENAME::CLASSNAME   Run tests from CLASSNAME\n      FILENAME::FUNCNAME, FILENAME::CLASSNAME::FUNCNAME\n                            Run only the named test function\n      -k EXPRESSION         Only run tests which match the given substring\n                            expression. An expression is a Python evaluable\n                            expression where all names are substring-matched\n                            against test names and their parent classes.\n                            Example: -k 'test_method or test_other' matches all\n                            test functions and classes whose name contains\n                            'test_method' or 'test_other', while -k 'not\n                            test_method' matches those that don't contain\n                            'test_method' in their names. -k 'not test_method\n                            and not test_other' will eliminate the matches.\n                            Additionally keywords are matched to classes and\n                            functions containing extra names in their\n                            'extra_keyword_matches' set, as well as functions\n                            which have names assigned directly to them. The\n                            matching is case-insensitive.\n      -m MARKEXPR           Only run tests matching given mark expression. For\n                            example: -m 'mark1 and not mark2'.\n\nFor even more options, refer to the [pytest command line reference](https://docs.pytest.org/en/stable/reference/reference.html#command-line-flags).\n\nExample:\n\n    What next? test sdk/python --disable-warnings --tb=no --no-showlocals tests/test_keep_client.py::KeepDiskCacheTestCase\n    ======= test sdk/python\n    […pip output…]\n    ========================================================== test session starts ==========================================================\n    platform linux -- Python 3.10.19, pytest-9.0.2, pluggy-1.6.0\n    rootdir: /home/brett/Curii/arvados/sdk/python\n    configfile: pytest.ini\n    collected 9 items\n\n    tests/test_keep_client.py F........                                                                                               [100%]\n\n    ======================================================== short test summary info ========================================================\n    FAILED tests/test_keep_client.py::KeepDiskCacheTestCase::test_disk_cache_cap - AssertionError: True is not false\n    ====================================================== 1 failed, 8 passed in 0.16s ======================================================\n    ======= sdk/python tests -- FAILED\n    ======= test sdk/python -- 2s\n    Failures (1):\n    Fail: sdk/python tests (2s)\n    What next? test sdk/python --disable-warnings --tb=no --no-showlocals --lf\n    ======= test sdk/python\n    […pip output…]\n    ========================================================== test session starts ==========================================================\n    platform linux -- Python 3.10.19, pytest-9.0.2, pluggy-1.6.0\n    rootdir: /home/brett/Curii/arvados/sdk/python\n    configfile: pytest.ini\n    testpaths: tests\n    collected 964 items / 963 deselected / 1 selected\n    run-last-failure: rerun previous 1 failure\n\n    tests/test_keep_client.py F                                                                                                       [100%]\n\n    ======================================================== short test summary info ========================================================\n    FAILED tests/test_keep_client.py::KeepDiskCacheTestCase::test_disk_cache_cap - AssertionError: True is not false\n    ============================================= 1 failed, 963 deselected, 1 warning in 0.43s ==============================================\n    ======= sdk/python tests -- FAILED\n    ======= test sdk/python -- 2s\n    Failures (1):\n    Fail: sdk/python tests (2s)\n\n#### RailsAPI\n\nRails parses `TESTOPTS` and passes them to the test runner:\n\n    What next? test services/api TESTOPTS=--name=/.*signed.locators.*/\n    [...]\n    # Running:\n\n    ....\n\n    Finished in 1.080084s, 3.7034 runs/s, 461.0751 assertions/s.\n\n##### Controlling Rails test order\n\nRails tests start off with a line like this\n\n    Run options: -v -d --seed 57089\n\nThe seed value determines the order tests are run. To reproduce reproduce an order-dependent test failure, specify the same seed as a previous failed run:\n\n    What next? test services/api TESTOPTS=\"-v -d --seed 57089\"\n\n## Environment variables\n\nThe following variables affect test setup and execution:\n\nVariable    | Value\n------------|-----------------------------------------------------------------\n`CONFIGSRC` | A directory with an Arvados cluster `config.yml`. Tests will read `Clusters.zzzzz.PostgreSQL.Connection` from that file to determine how to connect to the test database. If not set, the tests will use default connection settings.\n`WORKSPACE` | A directory with an Arvados Git checkout. Defaults to what Git reports for `run-tests.sh` itself.\n\n`run-tests.sh` cleans the `ARVADOS_[…]` variables from the environment to help ensure consistent test execution. Sometimes you may want to set these variables nonetheless, but then you must pass them as arguments to `run-tests.sh` rather than export them to the environment directly.\n\nVariable               | Value\n-----------------------|------------------------------------------------------\n`ARVADOS_DEBUG`        | If 1, lots of components will log more information.\n`ARVADOS_TEST_PRIVESC` | A literal string. If `sudo`, various tests that need to perform privileged operations with run with `sudo` to get them. Otherwise, those tests are skipped.\n\n## Scripting run-tests\n\nIf you run `run-tests.sh` without `--interactive`, by default it runs all the tests and reports their results. This is how CI runs. Run the script with `--help` to see the options you can use to control this behavior. Common options include:\n\nOption           | Behavior\n-----------------|------------------------------------------------------------\n`--only`         | Run a single set of tests\n`--skip`         | Skip a set of tests during a full run\n`NAME_test=ARGS` | Pass arguments to a set of tests\n\n## Running Workbench tests in Docker\n\nIf you do not have a full development environment, Workbench tests can be run in Docker. The `services/workbench2` subfolder includes Makefile targets that preinstall the necessary dependencies in a Docker container using Ansible.\n\nWith Docker and Ansible installed (see `arvados/tools/ansible/README.md`), run this command from within the `arvados/services/workbench2` directory:\n\n    make workbench-docker-image\n\nYou can verify the docker image was built by looking for `arvados/workbench` in `docker image ls`\n\nThen, start the interactive tests with this command:\n\n    make interactive-tests-in-docker\n\nNon-interactive (headless) tests can be run with the targets:\n\n    # Both e2e & component tests\n    make tests-in-docker\n\n    # Integration (e2e) only\n    make integration-tests-in-docker\n\n    # Unit (component) only\n    make unit-tests-in-docker\n\n### Troubleshooting\n\n#### Missing X server or `$DISPLAY`\n\nRun:\n\n    xhost +local:root\n\n#### No version of Cypress is installed / other error starting Cypress\n\nRecreate the home volume which re-installs Cypress and other persisted dependencies by running:\n\n    make clean-docker-volume\n    make workbench-docker-volume\n"
  },
  {
    "path": "doc/development/UpdatingDependencies.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Updating dependencies\n\n## Go\n\n(see also: [real documentation](https://go.dev/doc/modules/managing-dependencies))\n\nUpdate a single dependency:\n\n~/arvados\\$ go get github.com/docker/docker@latest\n\nUpdate all dependencies:\n\n~/arvados\\$ go get -u -t ./…\n\nThen sync:\n\n~/arvados\\$ go mod tidy\n\nThis is a good time to review “replace” directives in source:go.mod and find better solutions to issues that are currently handled by pinning modules to old versions or unmaintained forks.\n"
  },
  {
    "path": "doc/development/git.conf",
    "content": "# Suggested Git configuration for Arvados\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Arvados standards forbid trailing whitespace.\n# Configure Git to highlight it.\n[color \"diff\"]\nwhitespace = red reverse\n\n[core]\nwhitespace = trailing-space\n\n[merge]\n# Merges to main should never be fast-forward.\n# Easiest to turn it off by default:\nff = false\n\n[user]\n# The Arvados DCO sign-off requires your real name and email.\n# Refer to CONTRIBUTING.md for full details.\n#name = Your Name\n#email = yourmail@example.local\n\n[trailer.arvados]\nkey = Arvados-DCO-1.1-Signed-off-by\nifexists = doNothing\n# If you uncomment cmd, the prepare-commit-msg hook will prepare commit\n# messages with your DCO sign-off, which attests you have permission to\n# contribute the code. Refer to CONTRIBUTING.md for full terms.\n#cmd = echo \\\"$(git config user.name) <$(git config user.email)>\\\"\n"
  },
  {
    "path": "doc/development/prepare-commit-msg.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# This Git hook adds refs to branch merges and adds the Arvados DCO sign-off\n# if you have configured it to do so.\n\nset -e\nset -u\n\nmsgfile=\"$1\"; shift\ntrailer=\"$(git interpret-trailers --trailer arvados </dev/null 2>/dev/null | grep @ || :)\"\n\nnew_msg=\"$(mktemp --tmpdir=\"$(dirname \"$msgfile\")\" commit-XXXXXX.txt)\"\ntrap 'rm -f \"$new_msg\"' EXIT INT TERM QUIT\ngawk -f - -v source=\"${1:-}\" -v trailer=\"$trailer\" -- \"$msgfile\" >\"$new_msg\" <<'EOF'\nBEGIN { $0=trailer; trailer_key=$1; }\nfunction write_trailer() {\n  if (trailer) {\n    if (last1 != trailer_key) { print \"\"; }\n    print trailer;\n    trailer=\"\";\n  }\n}\nEND { write_trailer(); }\n($0 == trailer) { trailer=\"\"; }\n((last1 == trailer_key && $1 != trailer_key) ||\n $1 == \"#\" || $1 == \"---\" || $1 == \"diff\") { write_trailer(); }\n(NR == 1 && $1 == \"Merge\" && $(NF - 1) == \"branch\") {\n  match($NF, /[[:punct:]]([0-9]+)[[:punct:]]/, bmatch);\n  sub(/^'.*\\//, \"'\", $NF);\n  print \"Merge branch\", $NF;\n  if (RSTART) {\n    printf(\"%sRefs #%s.%s\", ORS, bmatch[1], ORS);\n  }\n  last1=$1;\n  next;\n}\n{ print; last1=$1; }\nEOF\nmv -f \"$new_msg\" \"$msgfile\"\n"
  },
  {
    "path": "doc/development/release/Checklist.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Release Checklist\n\nPre-process:\n\n1.  Create an issue for the release.\n2.  Add each of the following steps (starting at step 1) as tasks with the step number in the subject\n3.  Assign each task\n4.  The current task goes into the “In Progress” column\n5.  When the current task is finished, move it to resolved, and move the next task into “In Progress”\n6.  Notify the assignee of the next task that it is ready to begin\n\nMeta-process:\n\n1.  Periodically review this documented process reflects our actual process & update it\n2.  When steps are added/changed/rearranged/removed, be sure to update [`cmd/art/TASKS` in the `arvados-dev` repository](https://dev.arvados.org/projects/arvados/repository/arvados-dev/revisions/main/show/cmd/art).\n\n<table>\n  <thead>\n    <tr class=\"header\">\n      <th>Step</th>\n      <th>Who</th>\n      <th>What</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr class=\"odd\">\n      <td>0</td>\n      <td>engineering</td>\n      <td>Build new features, refine good code into great code</td>\n    </tr>\n    <tr class=\"even\">\n      <td>1</td>\n      <td>ops</td>\n      <td>\n        <a href=\"https://ci.arvados.org/view/All/job/packer-build-compute-image/\">Build a new tordo compute image</a> against the latest development packages.<br>\n        <a href=\"https://dev.arvados.org/projects/ops/wiki/Updating_clusters\">Update the tordo configuration</a> and test it with a couple representative workflows (at least one bioinformatics workflow and one S3 download workflow).<br>\n        If everything works well, update version pins based on the versions installed in the new image. Update:\n        <ul>\n          <li><code>tools/ansible/roles/arvados_docker/files/arvados-docker.pref</code></li>\n          <li><code>tools/ansible/roles/compute_amd_rocm/defaults/main.yml</code> (update <code>arvados_compute_amd_rocm_version</code>)</li>\n          <li><code>tools/ansible/roles/compute_nvidia/files/arvados-nvidia.pref</code></li>\n        </ul>\n      </td>\n    </tr>\n    <tr class=\"odd\">\n      <td>2</td>\n      <td>engineering</td>\n      <td>Prepare release branch on the <code>arvados</code> and <code>arvados-formula</code> repositories. For major releases, this means branching a new <code>X.Y-staging</code> from main. For minor releases, this means cherry-picking features onto the existing <code>X.Y-staging</code> branch. Ensure that Redmine issues for features or bugfixes that are appearing for the first time in this version are associated with the correct release (for major releases, use <code>art redmine issues find-and-associate</code>).</td>\n    </tr>\n    <tr class=\"even\">\n      <td>3</td>\n      <td>engineering</td>\n      <td>\n        Ensure that the release staging branch passes automated tests on Jenkins.\n        <ul>\n          <li><a href=\"https://ci.arvados.org/job/developer-run-tests/\">developer-run-tests</a></li>\n          <li><a href=\"https://ci.arvados.org/job/developer-run-tests-doc-sdk-java-R/\">developer-run-tests-doc-sdk-java-R</a></li>\n          <li><a href=\"https://ci.arvados.org/job/arvados-cwl-conformance-tests/\">arvados-cwl-conformance-tests</a></li>\n        </ul>\n      </td>\n    </tr>\n    <tr class=\"odd\">\n      <td>4</td>\n      <td>engineering</td>\n      <td>Review release branch to make sure all commits that need to be in the release are in the release. If new commits are added, resume checklist from step 3.</td>\n    </tr>\n    <tr class=\"even\">\n      <td>5</td>\n      <td>product mgr</td>\n      <td>Write release notes and publish them <a href=\"https://www-dev.arvados.org/releases/\">on the www-dev site</a>.</td>\n    </tr>\n    <tr class=\"odd\">\n      <td>6</td>\n      <td>everyone</td>\n      <td>Review release notes</td>\n    </tr>\n    <tr class=\"even\">\n      <td>7</td>\n      <td>product mgr</td>\n      <td>Create a Redmine release for the next patch release after the current one.</td>\n    </tr>\n    <tr class=\"odd\">\n      <td>8</td>\n      <td>release eng</td>\n      <td>Build release candidate packages with version <code>X.Y.Z~rcN-1</code> using the Jenkins job <a href=\"https://ci.arvados.org/job/build-and-publish-rc-packages/\">build-and-publish-rc-packages</a>. Add a comment on the release ticket identifying the Git commit hash used for the build, and link to your Jenkins run.</td>\n    </tr>\n    <tr class=\"even\">\n      <td>9</td>\n      <td>release eng</td>\n      <td>Publish release candidate <code>arvados/jobs</code> Docker image using <a href=\"https://ci.arvados.org/job/docker-jobs-image-release/\">docker-jobs-image-release</a></td>\n    </tr>\n    <tr class=\"odd\">\n      <td>10</td>\n      <td>ops</td>\n      <td>Test installer formula / provision scripts with RC packages. Run the <a href=\"https://ci.arvados.org/job/test-provision/\">test-provision Jenkins job</a> where <code>git_hash</code> is your <code>X.Y-staging</code> commit and <code>RELEASE</code> is <code>testing</code>.</td>\n    </tr>\n    <tr class=\"even\">\n      <td>11</td>\n      <td>ops</td>\n      <td>\n        Update pirca to use the RC packages: <a href=\"https://ci.arvados.org/job/packer-build-compute-image/\">build a new compute image</a>, <a href=\"https://dev.arvados.org/projects/ops/wiki/Updating_clusters\">update the Arvados version in Salt</a> and deploy.<br>\n        After Salt updates the cluster, check that your new version deployed successfully by running <code>arvados-server version</code> and then <code>arvados-server check</code> to verify other running services have the same version.\n      </td>\n    </tr>\n    <tr class=\"odd\">\n      <td>12</td>\n      <td>bfx</td>\n      <td>\n        Run <a href=\"https://ci.arvados.org/job/run-tests-cwl-suite/\">CWL integration tests</a> and <a href=\"https://workbench.pirca.arvadosapi.com/workflows/pirca-7fd4e-ut5n6r2ydl6o6kj\">fastq-to-gvcf pipeline</a> on pirca (<a href=\"FastqPipeline.md\">more about running fastq-to-gvcf</a>).<br>\n        After the workflow succeeds, check the versions reported at the top of the workflow logs to verify it ran your RC for crunch-run, arv-mount, and a-c-r.\n      </td>\n    </tr>\n    <tr class=\"even\">\n      <td>13</td>\n      <td>engineering</td>\n      <td>Perform final manual testing based on risk assessment, the release notes and <a href=\"ManualTests.md\">manual testing plan</a>. This should involve at least a \"smell check\" to confirm that key features, improvements or bug fixes intended to appear in the release are present and behave as intended.</td>\n    </tr>\n    <tr class=\"odd\">\n      <td>14</td>\n      <td>product mgr</td>\n      <td>Approve RC for release</td>\n    </tr>\n    <tr class=\"even\">\n      <td>15</td>\n      <td>release eng</td>\n      <td>Publish Ruby gems using <a href=\"https://ci.arvados.org/job/build-publish-packages-python-ruby/\">build-publish-packages-python-ruby</a> with <strong>only</strong> the <code>BUILD_RUBY</code> box checked.</td>\n    </tr>\n    <tr class=\"odd\">\n      <td>16</td>\n      <td>release eng</td>\n      <td>\n        On the <code>X.Y-staging</code> branch, update these files to refer to the release version:\n        <ul>\n          <li><code>doc/admin/upgrading.html.textile.liquid</code> the \"Upgrading Arvados and Release notes\" doc page with the version and date of the release.</li>\n          <li><code>contrib/arvados-bootstrap/pyproject.toml</code>, update <code>project.version</code> and <code>project.dependencies</code></li>\n          <li><code>contrib/R-sdk/DESCRIPTION</code>, update <code>Version:</code>\n          <li><code>services/api/Gemfile</code> to depend on the newly published Arvados gem and run <code>bundle install</code> to update <code>Gemfile.lock</code></li>\n          <li><code>tools/ansible/roles/arvados_apt/defaults/main.yml</code> update <code>arvados_pin_version</code></li>\n        </ul>\n      </td>\n    </tr>\n    <tr class=\"even\">\n      <td>17</td>\n      <td>release eng</td>\n      <td>Build final release packages with version <code>X.Y.Z-1</code> using the Jenkins job <a href=\"https://ci.arvados.org/job/build-and-publish-rc-packages/\">build-and-publish-rc-packages</a>. Add a comment on the release ticket identifying the Git commit hash used for the build, and link to your Jenkins run.</td>\n    </tr>\n    <tr class=\"odd\">\n      <td>18</td>\n      <td>release eng</td>\n      <td>Publish stable release <code>arvados/jobs</code> Docker image using <a href=\"https://ci.arvados.org/job/docker-jobs-image-release/\">docker-jobs-image-release</a></td>\n    </tr>\n    <tr class=\"even\">\n      <td>19</td>\n      <td>release eng</td>\n      <td>Push packages to stable repos using <a href=\"https://ci.arvados.org/job/publish-packages-to-stable-repo/\">publish-packages-to-stable-repo</a> (<a href=\"https://dev.arvados.org/projects/ops/wiki/Promoting_Packages_to_Stable\">more info</a>)</td>\n    </tr>\n    <tr class=\"odd\">\n      <td>20</td>\n      <td>release eng</td>\n      <td>Publish Python packages using <a href=\"https://ci.arvados.org/job/build-publish-packages-python-ruby/\">build-publish-packages-python-ruby</a> with <strong>only</strong> the <code>BUILD_PYTHON</code> box checked.</td>\n    </tr>\n    <tr class=\"even\">\n      <td>21</td>\n      <td>release eng</td>\n      <td>Publish Java package using <a href=\"https://ci.arvados.org/job/build-java-sdk/\">build-java-sdk</a> and following <a href=\"JavaSDK.md\">Releasing Java SDK packages</a></td>\n    </tr>\n    <tr class=\"odd\">\n      <td>22</td>\n      <td>release eng</td>\n      <td>Publish R package using <a href=\"https://ci.arvados.org/job/build-package-r/\">build-package-r</a></td>\n    </tr>\n    <tr class=\"even\">\n      <td>23</td>\n      <td>release eng</td>\n      <td>\n        Tag the commits in each repo used to build the release in Git. Create an annotated tag (<code>git tag --annotate</code>) with a message like \"Release notes at https://arvados.org/release-notes/X.Y.Z/\" That makes the <a href=\"https://github.com/arvados/arvados/releases\">GitHub releases page</a> look good. See <a href=\"https://docs.github.com/en/repositories/releasing-projects-on-github/automatically-generated-release-notes\">GitHub documentation for more details about how to automate releases</a>.<br>\n        Create or fast forward the <code>X.Y-release</code> branch to match <code>X.Y-staging</code>.<br>\n        Cherry-pick the upgrade notes commit (from step 2) onto <code>main</code>.\n      </td>\n    </tr>\n    <tr class=\"odd\">\n      <td>24</td>\n      <td>release eng</td>\n      <td>\n        Ensure new release is published on <a href=\"https://doc.arvados.org/\">https://doc.arvados.org/</a>.<br>\n        Ensure that release notes &amp; any other materials are pointing to correct version of the docs.<br>\n        (If anything goes wrong, see <a href=\"https://dev.arvados.org/projects/arvados-private/wiki/Docarvadosorg_deployment\">https://dev.arvados.org/projects/arvados-private/wiki/Docarvadosorg_deployment</a>)\n      </td>\n    </tr>\n    <tr class=\"even\">\n      <td>25</td>\n      <td>ops</td>\n      <td>Update pirca and jutro to the new stable release: <a href=\"https://ci.arvados.org/job/packer-build-compute-image/\">build new compute images</a>, <a href=\"https://dev.arvados.org/projects/ops/wiki/Updating_clusters\">update the Arvados version in Salt</a> and deploy.</td>\n    </tr>\n    <tr class=\"odd\">\n      <td>26</td>\n      <td>product mgr</td>\n      <td>Merge release notes (step 6) from \"develop\" branch to \"main\" branch of the <code>arvados-www</code> Git repository and check that the <a href=\"https://arvados.org/\">https://arvados.org</a> front page is updated</td>\n    </tr>\n    <tr class=\"even\">\n      <td>27</td>\n      <td>product mgr</td>\n      <td>Send out the release notes via MailChimp, tweet from the Arvados account, announce on the Discourse forum, Matrix, etc.</td>\n    </tr>\n    <tr class=\"odd\">\n      <td>28</td>\n      <td>release eng</td>\n      <td>\n        In Jenkins:\n        <ul>\n          <li>For each test from step 3, go to \"Job Config History\" and record on the release ticket the timestamp of the configuration used to test the release</li>\n          <li>Go to <a href=\"https://ci.arvados.org/manage/cloud/gce-gce2/configure\">Manage Jenkins &gt; Clouds &gt; gce2 &gt; Configure</a> and record the VM image tagged \"tests\" used for jenkins workers to run the tests for the release (should be something like jenkins-image-arvados-tests-YYYYMMDDHHMMSS) on the release ticket</li>\n          <li>Go to <a href=\"https://ci.arvados.org/job/packer-build-jenkins-image-arvados-tests/\">packer-build-jenkins-image-arvados-tests history</a> and record on the release ticket the Jenkins job used to build the above VM image.</li>\n        <ul>\n      </td>\n    </tr>\n    <tr class=\"even\">\n      <td>29</td>\n      <td>release eng</td>\n      <td>\n        Add the release to <a href=\"https://doi.org/10.5281/zenodo.6382942\">doi:10.5281/zenodo.6382942</a><br>\n        <a href=\"Zenodo.md\">Updating Zenodo Version of Arvados after Release</a><br>\n        <a href=\"https://zenodo.org/record/6382943\">https://zenodo.org/record/6382943</a>\n      </td>\n    </tr>\n  </tbody>\n</table>\n"
  },
  {
    "path": "doc/development/release/FastqPipeline.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# More about running fastq-to-gVCF\n\nWhen we do releases, we run a test pipeline that is intended to be representative of a bioinformatics workload.\n\n1. Deploy the version of `arvados-cwl-runner` that you want to test and make sure that the corresponding `arvados/jobs` image [has been built and uploaded to docker hub](https://ci.arvados.org/view/Release%20Pipeline/job/docker-jobs-image-release/) or built using the `arvados/build/build-dev-docker-jobs-image.sh` script and uploaded using `arv-keepdocker`.\n2. Clone <https://github.com/arvados/arvados-tutorial/>\n3. Create an Arvados project for the test run\n4. `cd arvados/tutorial/WGS-processing`\n5. Run the following command: `arvados-cwl-runner --no-wait --disable-reuse --project-uuid <my project> cwl/wgs-processing-wf.cwl yml/wgs-processing-wf-chr19.yml`\n6. Monitor this for success. It usually takes about an hour to run.\n\nIf you are running this on `pirca` then all the data should already be present. If you are running it from somewhere else, you may need to do some additional data copying from `pirca` to the other cluster. The input document `yml/wgs-processing-wf-chr19.yml` has the portable data hashes of the collections.\n"
  },
  {
    "path": "doc/development/release/JavaSDK.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Releasing Java SDK packages\n\nThe Java SDK is distributed on the Sonatype Central Repository. Here are the steps to release a new jar file:\n\n1.  Build and upload package using https://ci.arvados.org/view/All/job/build-java-sdk\n2.  Go to [Sonatype Publishing Settings](https://central.sonatype.com/publishing/deployments) and log in with the appropriate credentials (gopass oss.sonatype.org/curii)\n3.  Make sure you’re on the “Deployments” tab.\n4.  Find the jar that was just uploaded by Jenkins. Click the “Publish” button and wait for the process to finish.\n\nSee [documentation about the publishing API we use](https://central.sonatype.org/publish/publish-portal-ossrh-staging-api/).\n\n## Getting the authentication token for Sonatype\n\n[Log into Sonatype](https://central.sonatype.com/usertoken) and under the account menu select “User Tokens” to review and manage tokens. Our current Jenkins token is stored in gopass as `curii-systems/websites/oss.sonatype.org/jenkins`.\n\n## gradle.properties\n\nTo upload to Sonatype, you need the token (see above) and a secret key. You must upload a GPG-signed package. All these parameters are set in `gradle.properties` which we keep as a Jenkins secret. Note that the property values after the equals sign should not be quoted. I’m not certain if spaces are allowed around the equals sign, but currently it works with no extra spaces.\n\n    ossrhUsername=...\n    ossrhPassword=...\n    signing.keyId=... \n    signing.password= \n    signing.secretKeyRingFile=...-secret-key.gpg \n"
  },
  {
    "path": "doc/development/release/ManualTests.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Manual testing plan\n\n## Manual testing of SDKs that don’t have good coverage\n\n## Release candidate builds with ~1 ~2\n\n## Workbench2\n\nNeed to go through this whole testing plan with both an admin and non-admin account. Admin only operations are indicated.\n\n### Login\n\n- Test login using username/password\n- Test login using OpenID Connect\n- Test login as federated user\n  - Login to using remote account on centralized federation (LoginCluster)\n  - Login to using remote account on peer federation\n\n### Left side navigation\n\n- Click on each top level icon (home projects, favorites/public favorite, shared, all processes, instance types, shell access, groups, trash) and confirm that the appropriate page loads with no errors\n- Check that the left side panel can be resized\n- Check that that the toggle side panel button works as expected\n- Check that the +NEW button is disabled unless a project is displayed\n\n### Home projects top panel\n\n- Project name should match the logged in user\n- Check for expected toolbar buttons\n  - Details\n  - User account\n  - API Details\n- Check the buttons have the expected behavior\n\n### Project view\n\n#### Top panel\n\nCheck that it shows the project name\n\nCheck that it shows the first line of the project description. Check that there is an arrow that expands to show the full description.\n\nCheck that it shows project properties. Check that there is an arrow that expands to show all the properties if they don’t fit on a single line.\n\nCheck that it renders the toolbar of project operations (listed below).\n\nCheck that the each operation behaves correctly and operates on the project.\n\nCheck that if the window is narrow, the rightmost toolbar icons spill into an overflow menu\n\nCheck that breadcrumbs include the project name and each parent project.\n\n#### Data tab\n\nShould show projects, workflows, and collections (in that order)\n\nClicking on the name rendered in blue text should navigate to the item\n\nClicking anywhere else but the name should toggle between selected and not selected\n\n- Unless clicking on the checkbox, clicking on the row clears any other selected items\n- When a row is selected, the toolbar moves from the top panel to the data table panel\n- Clicking on the check box to the left when a different item is selected selects both items\n  - The toolbar updates to show only the operations that can be applied to both items\n- Clicking “View details” should open the right info panel. Check that it shows details for the currently selected item.\n\nCheck that the toolbar operations are sorted and grouped consistently across different types of items.\n\nCheck that the toolbar operations are appropriate to the type of item selected.\n\nExpected toolbar when project is selected:\n\n- View details\n- Open in new tab\n- Copy link to clipboard\n- Open with 3rd party client\n- API details\n- —-\n- Share\n- New project\n- Edit project\n- Move to\n- Move to trash\n- —-\n- Freeze project\n- Add to favorites\n- Add to public favorites (admin only)\n\nExpected toolbar when workflow is selected:\n\n- View details\n- Open in new tab\n- Copy link to clipboard\n- API details\n- —-\n- Run workflow\n- Delete workflow\n\nExpected toolbar when collection is selected\n\n- View details\n- Open in new tab\n- Copy link to clipboard\n- Open with 3rd party client\n- API details\n- —-\n- Share\n- Edit collection\n- Move to\n- Make a copy\n- Move to trash\n- —-\n- Add to favorites\n- Add to public favorites (admin only)\n\nCheck that all the toolbar operations work as expected.\n\nCheck that right-clicking on a row selects the row and then opens the appropriate context menu.\n\n- Check that the operations apply to the item that was clicked on.\n- Check that the operations in the right-click context menu match the toolbar.\n\nCheck that clicking on each action in the context menu works as expected.\n\nCheck that entering text into the search box refreshes the list with search results\n\nCheck that clicking on the three bars in the upper right opens a menu to select columns\n\nCheck that enabling/disabling data columns works. Check that all columns are filled in appropriately for each item, or blank (“-”) where no such data applies.\n\nCheck that clicking on the “Name” column sorts by name.\n\n- Check sort by “Date created”\n- Check sort by “Last modified”\n- Check sort by “Trash at”\n- Check sort by “Delete at”\n\nCheck that clicking on “Go to the next page” loads the next page of items.\n\nCheck that the getting the number of items doesn’t block loading the table contents.\n\n#### Workflows tab\n\nCheck it shows processes (workflow runs) only.\n\nCheck that it shows the number of completed, failed, queued and running processes, as well as the total, at the top of the data table.\n\nCheck that it shows the name, status, type, runtime and last modified times.\n\nCheck that entering text into the search box refreshes the list with search results\n\nCheck that the toolbar and context menu behave as expected:\n\n- View details\n- Open in new tab\n- Outputs\n- API details\n- —-\n- Edit process\n- Copy and re-run process\n- Remove\n- —-\n- Add to favorites\n- Add to public favorites (admin only)\n\nCheck that selecting more that one item updates the toolbar to “Remove”.\n\nCheck that clicking on “Go to the next page” loads the next page of items.\n\nCheck that the getting the number of items doesn’t block loading the table contents.\n\nCheck that process status is rendered correctly.\n\nCheck that filtering by process status shows only rows with the intended status.\n\nCheck that runtime is calculated/rendered correctly.\n\n### My favorites\n\nCheck that all items marked as “favorite” appear.\n\nCheck that clicking on an item shows the appropriate toolbar.\n\nCheck that clicking “Remove from favorites” on an item refreshes the favorites list and the item is no longer present.\n\n### Public favorites\n\nCheck that all items marked as “public favorite” appear.\n\nCheck that clicking on an item shows the appropriate toolbar.\n\nCheck that clicking “Remove from public favorites” on an item refreshes the public favorites list and the item is no longer present. (admin only)\n\n### Shared with me\n\nCheck that it shows all the things that don’t belong to the current user.\n\nCheck that selecting an item shows the appropriate toolbar.\n\nCheck that clicking on “Go to the next page” loads the next page of items.\n\nCheck that the getting the number of items doesn’t block loading the table contents.\n\n### All processes\n\nCheck that it shows all processes visible to the user, regardless of owner project.\n\nCheck that selecting an item shows the appropriate toolbar.\n\nCheck that clicking on “Go to the next page” loads the next page of items.\n\nCheck that the getting the number of items doesn’t block loading the table contents.\n\n### Instance types\n\nCheck that all the available instance types are listed and formatted properly.\n\n### Shell Access\n\nCheck that shell nodes are listed.\n\nCheck that the ssh command line is valid.\n\nCheck that webshell works properly.\n\n## Groups — standalone and peer federation\n\n1.  Create group\n2.  Log in as non-admin user.\n3.  Log in as a second non-admin user in a private window for testing sharing.\n4.  check that users cannot see one another\n5.  Add user to group\n6.  Check that users can see one another\n\n## Collections\n\n1.  Create a collection & upload a file\n2.  Add a file\n3.  Rename a file\n4.  Remove a file\n5.  Download one of the files\n6.  Make a sharing link to the collection & check usage from private window\n7.  Mark collection as a favorite, check that it shows up in favorites\n8.  Rename collection\n9.  Edit description\n10. Add property\n11. Search for collection by property\n12. Search for collection by name\n13. Search for collection by filename\n14. Search for collection by keyword in description\n15. Trash collection\n16. Check that collection can be found in the trash\n17. Untrash collection\n\n## Projects\n\n1.  Create a project\n2.  Rename a project\n3.  Edit description\n4.  Create a collection inside the project\n5.  Move a collection into the project\n6.  Add read-only sharing permission to the project & check access from other user\n7.  Add read-write sharing permission to project & check access from other user\n8.  Add manage sharing permission to project & check access from other user\n9.  Mark project as favorite, check that it shows up in favorites\n10. Search for project by name\n11. Search for project by keyword in description\n12. Trash project\n13. Check that project can be found in the trash\n14. Untrash project\n\n## Workflows\n\n1.  Upload workflow with arvados-cwl-runnner —create-workflow\n2.  Browse workflow\n3.  Select workflow to run\n4.  Choose input file\n5.  Watch it run\n    1.  Check logging\n    2.  Check live updates\n    3.  Check links to input & output\n6.  Check that it shows up in All Processes\n\n## Federation\n\n### Peer federation\n\n2 or more clusters are configured with a ‘Remoteclusters’ entry in config.yml.\n\n### Login cluster federation\n\n2 or more clusters are configured with a ‘Remoteclusters’ entry in config.yml. One of the clusters is the ‘login cluster’, which means the **other** clusters have a section like this in their config (clsr1 is the login cluster):\n\n    Clusters:\n      clsr2:\n        Login:\n          LoginCluster: clsr1\n\n#### Groups\n\n1.  Login cluster: create group\n2.  Satellite cluster: Log in as non-admin user.\n3.  Satellite cluster: Log in as a second non-admin user in a private window for testing sharing.\n4.  Satellite cluster: check that users cannot see one another\n5.  Login cluster: add both users to group\n6.  Satellite cluster: Check that users can see one another\n7.  Satellite cluster: create group\n8.  Satellite cluster: add both users to group\n9.  Satellite cluster: Check that both users can share with the group created on the satellite cluster\n\n## Misc\n\n1.  As admin, create a “public favorite” and make sure users see it.\n2.  As admin, deactivate a user. Make sure that user can’t log back in\n3.  Add a cluster for multi-site search.\n4.  Upload ssh key & check view\n5.  Create git repo & check view\n6.  As admin, add virtual machine access & check view\n"
  },
  {
    "path": "doc/development/release/Zenodo.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Updating Zenodo Version of Arvados after Release\n\n1.  Download a `.zip` of your new Arvados release from [GitHub Releases](https://github.com/arvados/arvados/tags)\n2.  Log in to [Zenodo](https://zenodo.org/) using the credentials from `gopass \"curii-systems/zenodo.org/sysadmin+zenodo@curii.com\"`\n3.  Go to the [Arvados record](https://zenodo.org/records/15213491) and press the the New Version button  \n    (Using new versions lets us use the overarching DOI for our `citations.md` and keep all the versions together on Zenodo)\n4.  In the form, update the following:\n    1.  Upload the `.zip` file for this release you downloaded earlier\n    2.  Request a new DOI for this version\n    3.  Update the Publication Date for this release\n    4.  Add any Creators who have worked on Arvados and aren’t listed\n    5.  Under Additional Description, edit the links for Release Notes and (if you’re doing a major release) Documentation\n    6.  Update the Version number with this release\n\nOnce you add a new version, you can’t change its DOI but everything else is editable if you accidentally make a mistake. So, don’t worry :) just edit the new version to fix it.\n"
  },
  {
    "path": "doc/examples/config/zzzzz.yml",
    "content": "AutoReloadConfig: true\nClusters:\n  zzzzz:\n    ManagementToken: e687950a23c3a9bceec28c6223a06c79\n    SystemRootToken: systemusertesttoken1234567890aoeuidhtnsqjkxbmwvzpy\n    API:\n      RequestTimeout: 30s\n    TLS:\n      Insecure: true\n    Collections:\n      BlobSigningKey: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc\n      TrustAllContent: true\n      ForwardSlashNameSubstitution: /\n"
  },
  {
    "path": "doc/index.html.liquid",
    "content": "---\nlayout: default\nno_nav_left: true\nnavsection: top\ntitle: Arvados | Documentation\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n<div class=\"jumbotron\">\n  <div class=\"container\">\n    <div class=\"row\">\n      <div class=\"col-sm-6\">\n        <h1>ARVADOS</h1>\n        <p>A free and open source platform for big data science</p>\n      </div>\n      <div class=\"col-sm-6\">\n        <img src=\"images/dax-reading-book.png\" style=\"max-height: 10em\" alt=\"Dax reading a book\" />\n      </div>\n    </div>\n  </div>\n</div>\n\n<div class=\"container-fluid\">\n  <div class=\"row\">\n    <div class=\"col-sm-6\">\n      <p><strong>What is Arvados</strong>\n      <p><a href=\"https://arvados.org/\">Arvados</a> is a platform for managing compute and storage for cloud and HPC clusters. It allows you to track your methods and datasets, share them securely, and easily re-run analyses.  It also make it possible to run analysis across multiple clusters (HPC, cloud, or hybrid) with <a href=\"{{site.baseurl}}/user/cwl/federated-workflows.html\">Federated Multi-Cluster Workflows</a>.\n      </p>\n\n      <a name=\"Support\"></a>\n      <p><strong>Support and Community</strong></p>\n\n      <p>Interact with the Arvados community on the <a href=\"https://forum.arvados.org\">Arvados Forum</a>\n\tand the <a href=\"https://gitter.im/arvados/community\">arvados/community</a> channel at gitter.im.\n      </p>\n\n      <p>Curii Corporation provides managed Arvados installations as well as commercial support for Arvados. Please contact <a href=\"mailto:info@curii.com\">info@curii.com</a> for more information.</p>\n\n      <p><strong>Contributing</strong></p>\n      <p>Please visit the <a href=\"https://github.com/arvados/arvados/tree/main/doc/development\">developer documentation</a>. Arvados is 100% free and open source software, check out the code on <a href=\"https://github.com/arvados/arvados\">GitHub</a>.\n\n      <p>Arvados is under active development, see the <a href=\"https://github.com/arvados/arvados/pulse\">recent developer activity</a>.\n      </p>\n      <p><strong>License</strong></p>\n      <p>Most of Arvados is licensed under the <a href=\"{{ site.baseurl }}/user/copying/agpl-3.0.html\">GNU AGPL v3</a>. The SDKs are licensed under the <a href=\"{{ site.baseurl }}/user/copying/LICENSE-2.0.html\">Apache License 2.0</a> and can be incorporated into proprietary code. See <a href=\"{{ site.baseurl }}/user/copying/copying.html\">Arvados Free Software Licenses</a> for more information.\n      </p>\n\n    </div>\n    <div class=\"col-sm-6\" style=\"border-left: solid; border-width: 1px\">\n      <p><strong>Sections\n      </strong></p>\n      <p>\n        <a href=\"{{ site.baseurl }}/user/index.html\">User Guide</a> &mdash; How to manage data and do analysis with Arvados.\n      </p>\n      <p>\n        <a href=\"{{ site.baseurl }}/sdk/index.html\">SDK Reference</a> &mdash; Details about the accessing Arvados from various programming languages.\n      </p>\n      <p>\n        <a href=\"{{ site.baseurl }}/architecture/index.html\">Arvados Architecture</a> &mdash; Details about the Arvados components and architecture.\n      </p>\n      <p>\n        <a href=\"{{ site.baseurl }}/api/index.html\">API Reference</a> &mdash; Details about the Arvados REST API.\n      </p>\n      <p>\n        <a href=\"{{ site.baseurl }}/admin/index.html\">Admin Guide</a> &mdash; Details about administering an Arvados cluster.\n      </p>\n      <p>\n        <a href=\"{{ site.baseurl }}/install/index.html\">Install Guide</a> &mdash; How to install Arvados.\n      </p>\n    </div>\n  </div>\n\n  <div class=\"row\">\n    <div class=\"col-sm-12\">\n      <br>\n      <p><em><small>\n      The content of the above documentation is licensed under the\n      <a href=\"{{ site.baseurl }}/user/copying/by-sa-3.0.html\">Creative\n        Commons Attribution-Share Alike 3.0 United States</a> license. Code samples in the above documentation are licensed under the\n      <a href=\"{{ site.baseurl }}/user/copying/LICENSE-2.0.html\">Apache License, Version 2.0.</a></small></em>\n      </p>\n    </div>\n  </div>\n</div>\n"
  },
  {
    "path": "doc/install/arvbox.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Arvados-in-a-box\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2. arvbox is unsupported\n\nThe Arvados team does not maintain or support arvbox as of Arvados 3.2.0.\n\nIf you were using arvbox in demo mode, consider installing on a Debian-based virtual machine with our \"single-node Ansible installer\":{{ site.baseurl }}/install/install-single-host.html.\n\nIf you were using arvbox for development, we now provide an Ansible playbook to install development dependencies on a Debian-based system. Our \"Hacking Prerequisites documentation\":https://github.com/arvados/arvados/blob/main/doc/development/Prerequisites.md has instructions for how to use it.\n\nInstalling systems with Ansible requires a little more initial setup, but once you've done that, it's easier to keep a system up-to-date: when you want to update a system, you simply re-run the playbook. We think this trade-off lets us provide a better experience to a wider variety of users.\n"
  },
  {
    "path": "doc/install/config.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Configuration files\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2. Arvados /etc/arvados/config.yml\n\nThe configuration file is normally found at @/etc/arvados/config.yml@ and will be referred to as just @config.yml@ in this guide.  This configuration file must be kept in sync across every service node in the cluster, but not shell and compute nodes (which do not require config.yml).\n\nh3. Syntax\n\nThe configuration file is in \"YAML\":https://yaml.org/ format.  This is a block syntax where indentation is significant (similar to Python).  By convention we use two space indent.  The first line of the file is always \"Clusters:\", underneath it at the first indent level is the Cluster ID.  All the actual cluster configuration follows under the Cluster ID.  This means all configuration parameters are indented by at least two levels (four spaces).  Comments start with @#@ .\n\nWe recommend a YAML-syntax plugin for your favorite text editor, such as @yaml-mode@ (Emacs) or @yaml-vim@.\n\nExample file:\n\n<pre>\nClusters:                         # Clusters block, everything else is listed under this\n  abcde:                          # Cluster ID, everything under it is configuration for this cluster\n    ExampleConfigKey: \"fghijk\"    # An example configuration key\n    ExampleConfigGroup:           # A group of keys\n      ExampleDurationConfig: 12s  # Example duration\n      ExampleSizeConfig: 99KiB    # Example with a size suffix\n</pre>\n\nEach configuration group may only appear once.  When a configuration key is within a config group, it will be written with the group name leading, for example @ExampleConfigGroup.ExampleSizeConfig@.\n\nDuration suffixes are s=seconds, m=minutes or h=hours.\n\nSize suffixes are K=10 ^3^, Ki=2 ^10^, M=10 ^6^, Mi=2 ^20^, G=10 ^9^, Gi=2 ^30^, T=10 ^12^, Ti=2 ^40^, P=10 ^15^, Pi=2 ^50^, E=10 ^18^, Ei=2 ^60^.  You can optionally follow with a \"B\" (eg \"MB\" or \"MiB\") for readability (it does not affect the units.)\n\nh3(#empty). Create empty configuration file\n\nChange @webserver-user@ to the user that runs your web server process.  This is @www-data@ on Debian-based systems, and @nginx@ on Red Hat-based systems.\n\n<notextile>\n<pre><code># <span class=\"userinput\">export ClusterID=xxxxx</span>\n# <span class=\"userinput\">umask 027</span>\n# <span class=\"userinput\">mkdir -p /etc/arvados</span>\n# <span class=\"userinput\">cat &gt; /etc/arvados/config.yml &lt;&lt;EOF\nClusters:\n  ${ClusterID}:\nEOF</span>\n# <span class=\"userinput\">chgrp webserver-user /etc/arvados /etc/arvados/config.yml</span>\n</span></code></pre>\n</notextile>\n\nh2. Nginx configuration\n\nThis guide will also cover setting up \"Nginx\":https://www.nginx.com/ as a reverse proxy for Arvados services.  Nginx performs two main functions: TLS termination and virtual host routing.  The virtual host configuration for each component will go in its own file in @/etc/nginx/conf.d/@.\n\nh2. Synchronizing config file\n\nThe Arvados configuration file must be kept in sync across every service node in the cluster.  We strongly recommend using a devops configuration management tool such as \"Puppet\":https://puppet.com/open-source/ to synchronize the config file.  Alternately, something like the following script to securely copy the configuration file to each node may be helpful.  Replace the @ssh@ targets with your nodes.\n\n<notextile>\n<pre><code>#!/bin/sh\nsudo cat /etc/arvados/config.yml | ssh <span class=\"userinput\">10.0.0.2</span> sudo sh -c \"'cat > /etc/arvados/config.yml'\"\nsudo cat /etc/arvados/config.yml | ssh <span class=\"userinput\">10.0.0.3</span> sudo sh -c \"'cat > /etc/arvados/config.yml'\"\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/install/configure-azure-blob-storage.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Configure Azure Blob storage\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nKeepstore can store data in one or more Azure Storage containers.\n\nh2. Set up VMs and Storage Accounts\n\nBefore starting the configuration of individual keepstore servers is good to have an idea of the keepstores servers' final layout. One key decision is the amount of servers and type of VM to run. Azure may change over time the bandwith capacity of each type. After conducting some empirical saturation tests, the conclusion was that the bandwith is proportional to the amount of cores with some exceptions. As a rule of thumb, is better to invest resources in more cores instead of memory or IOps.\n\nAnother decision is how many VMs should be running keepstore. For example there could be 8 VMs with one core each or one machine with 8 cores. Or anything in between. Assuming is the same cost for Cloud resources, there is always the benefit of distributing the risk of faulty VMs. The recommendation is to start with 2 VMs and expand in pairs. Having a minimum of 2 cores each. The total amount of VMs will be a function of the budget and the pipeline traffic to avoid saturation during periods of high usage. Standard D v3 family is a balanced choice, making Standard_D2_v3 the 2-core option\n\nThere are many options for storage accounts. You can read details from Azure on their documentation \"https://docs.microsoft.com/en-us/azure/storage/common/storage-introduction\":https://docs.microsoft.com/en-us/azure/storage/common/storage-introduction. The type of storage and access tier will be a function of the budget and desired responsiveness. A balanced option is to have General-purpose Standard Storage account and use Blob storage, hot access tiers.\n\nKeepstore can be configure to reflect the level of underlaying redundancy the storage will have. This is call data replication option. For example LRS (Locally Redundant Storage) saves 3 copies of the data. There desired redundancy can be chosen at the keepstore layer or at the Storage Accunt layer. The decision where the redundancy will be done and the type of Storage Account data replication (LRS, ZRS, GRS and RA-GRS) has trade-offs. Please read more on \"https://docs.microsoft.com/en-us/azure/storage/common/storage-redundancy\":https://docs.microsoft.com/en-us/azure/storage/common/storage-redundancy and decide what is best for your needs.\n\nh2. Create a storage container\n\nUsing the Azure web portal or command line tool, create or choose a storage account with a suitable redundancy profile and availability region. Use the storage account keys to create a new container.\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">azure config mode arm</span>\n~$ <span class=\"userinput\">az login</span>\n~$ <span class=\"userinput\">az group create exampleGroupName eastus2</span>\n~$ <span class=\"userinput\">az storage account create --sku Standard_LRS --kind BlobStorage --encryption-services blob --access-tier Hot --https-only true --location eastus2 --resource-group exampleGroupName --name exampleStorageAccountName</span>\n~$ <span class=\"userinput\">az storage account keys list --resource-group exampleGroupName --account-name exampleStorageAccountName\n[\n  {\n    \"keyName\": \"key1\",\n    \"permissions\": \"Full\",\n    \"value\": \"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==\"\n  },\n  {\n    \"keyName\": \"key2\",\n    \"permissions\": \"Full\",\n    \"value\": \"yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy==\"\n  }\n]</span>\n~$ <span class=\"userinput\">AZURE_STORAGE_ACCOUNT=\"exampleStorageAccountName\" \\\nAZURE_STORAGE_ACCESS_KEY=\"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz==\" \\\nazure storage container create --name exampleContainerName</span>\n</code></pre>\n</notextile>\n\nNote that Keepstore services may be configured to use multiple Azure Storage accounts and multiple containers within a storage account.\n\nh2. Configure keepstore\n\nVolumes are configured in the @Volumes@ section of the cluster configuration file.\n\n{% include 'assign_volume_uuid' %}\n\n<notextile><pre><code>    Volumes:\n      <span class=\"userinput\">ClusterID</span>-nyw5e-<span class=\"userinput\">000000000000000</span>:\n        AccessViaHosts:\n          # This section determines which keepstore servers access the\n          # volume. In this example, keep0 has read/write access, and\n          # keep1 has read-only access.\n          #\n          # If the AccessViaHosts section is empty or omitted, all\n          # keepstore servers will have read/write access to the\n          # volume.\n          \"http://<span class=\"userinput\">keep0.ClusterID.example.com</span>:25107\": {}\n          \"http://<span class=\"userinput\">keep1.ClusterID.example.com</span>:25107\": {ReadOnly: true}\n\n        Driver: <span class=\"userinput\">Azure</span>\n        DriverParameters:\n          # Storage account name and secret key, used for\n          # authentication.\n          StorageAccountName: <span class=\"userinput\">exampleStorageAccountName</span>\n          StorageAccountKey: <span class=\"userinput\">zzzzzzzzzzzzzzzzzzzzzzzzzz</span>\n\n          # Storage container name.\n          ContainerName: <span class=\"userinput\">exampleContainerName</span>\n\n          # The cloud environment to use,\n          # e.g. \"core.chinacloudapi.cn\". Defaults to\n          # \"core.windows.net\" if blank or omitted.\n          StorageBaseURL: \"\"\n\n          # Time to wait for an upstream response before failing the\n          # request.\n          RequestTimeout: 10m\n\n          # Time to wait before retrying a failed \"list blobs\" Azure\n          # API call.\n          ListBlobsRetryDelay: 10s\n\n          # Maximum attempts at a \"list blobs\" Azure API call before\n          # giving up.\n          ListBlobsMaxAttempts: 12\n\n          # If non-zero, use multiple concurrent requests (each\n          # requesting MaxGetBytes bytes) when retrieving data. If\n          # zero or omitted, get the entire blob with one request.\n          #\n          # Normally this is zero but if you find that 4 small\n          # requests complete faster than a single large request, for\n          # example, you might set this to 16777216 (64 MiB ÷ 4).\n          MaxGetBytes: 0\n\n          # Time to wait for an unexpectedly empty blob to become\n          # non-empty. Azure's create-and-write operation is not\n          # atomic. The default value typically allows concurrent GET\n          # and PUT requests to succeed despite the race window.\n          WriteRaceInterval: 15s\n\n          # Time to wait between GET attempts while waiting for\n          # WriteRaceInterval to expire.\n          WriteRacePollTime: 1s\n\n        # How much replication is provided by the underlying storage\n        # container.  This is used to inform replication decisions at\n        # the Keep layer.\n        Replication: 3\n\n        # If true, do not accept write or trash operations, even if\n        # AccessViaHosts.*.ReadOnly is false.\n        #\n        # If false or omitted, enable write access (subject to\n        # AccessViaHosts.*.ReadOnly, where applicable).\n        ReadOnly: false\n\n        # Storage classes to associate with this volume.  See \"Storage\n        # classes\" in the \"Admin\" section of doc.arvados.org.\n        StorageClasses: null\n</code></pre></notextile>\n"
  },
  {
    "path": "doc/install/configure-fs-storage.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Configure filesystem storage\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nKeepstore can store data in local and network-attached POSIX filesystems.\n\nh2. Setting up filesystem mounts\n\nVolumes are configured in the @Volumes@ section of the cluster configuration file.  You may provide multiple volumes for a single keepstore process to manage multiple disks.  Keepstore distributes blocks among volumes in round-robin fashion.\n\n{% include 'assign_volume_uuid' %}\n\nNote that each volume entry has an @AccessViaHosts@ section indicating which Keepstore instance(s) will serve that volume.  In this example, keep0 and keep1 each have their own data disk.  The @/mnt/local-disk@ directory on keep0 is volume @ClusterID-nyw5e-000000000000000@, and the @/mnt/local-disk@ directory on keep1 is volume @ClusterID-nyw5e-000000000000001@ .\n\n<notextile>\n<pre><code>    Volumes:\n      <span class=\"userinput\">ClusterID</span>-nyw5e-<span class=\"userinput\">000000000000000</span>:\n        AccessViaHosts:\n          \"http://<span class=\"userinput\">keep0.ClusterID.example.com</span>:25107\": {}\n        Driver: <span class=\"userinput\">Directory</span>\n        DriverParameters:\n          # The directory that will be used as the backing store.\n          Root: <span class=\"userinput\">/mnt/local-disk</span>\n\n        # How much replication is performed by the underlying\n        # filesystem.  (for example, a network filesystem may provide\n        # its own replication).  This is used to inform replication\n        # decisions at the Keep layer.\n        Replication: 1\n\n        # If true, do not accept write or trash operations, only\n        # reads.\n        ReadOnly: false\n\n        # <a href=\"{{site.baseurl}}/admin/storage-classes.html\">Storage classes</a> to associate with this volume.\n        StorageClasses: null\n\n      <span class=\"userinput\">ClusterID</span>-nyw5e-<span class=\"userinput\">000000000000001</span>:\n        AccessViaHosts:\n          \"http://<span class=\"userinput\">keep1.ClusterID.example.com</span>:25107\": {}\n        Driver: <span class=\"userinput\">Directory</span>\n        DriverParameters:\n          Root: <span class=\"userinput\">/mnt/local-disk</span>\n</code></pre></notextile>\n\nIn the case of a network-attached filesystem, the @AccessViaHosts@ section can have multiple entries. If the filesystem is accessible by all keepstore servers, the AccessViaHosts section can be empty, or omitted entirely.  In this example, the underlying storage system performs replication, so specifying @Replication: 2@ means a block is considered to be stored twice for the purposes of data integrity, while only stored on a single volume from the perspective of Keep.\n\n<notextile>\n<pre><code>    Volumes:\n      <span class=\"userinput\">ClusterID</span>-nyw5e-<span class=\"userinput\">000000000000002</span>:\n        AccessViaHosts:\n          # This section determines which keepstore servers access the\n          # volume. In this example, keep0 has read/write access, and\n          # keep1 has read-only access.\n          #\n          # If the AccessViaHosts section is empty or omitted, all\n          # keepstore servers will have read/write access to the\n          # volume.\n          \"http://<span class=\"userinput\">keep0.ClusterID.example.com</span>:25107/\": {}\n          \"http://<span class=\"userinput\">keep1.ClusterID.example.com</span>:25107/\": {ReadOnly: true}\n        Driver: <span class=\"userinput\">Directory</span>\n        DriverParameters:\n          Root: <span class=\"userinput\">/mnt/network-attached-filesystem</span>\n        Replication: 2\n</code></pre></notextile>\n"
  },
  {
    "path": "doc/install/configure-s3-object-storage.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Configure S3 object storage\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nKeepstore can store data in object storage compatible with the S3 API, such as Amazon S3, Google Cloud Storage, Ceph RADOS, NetApp StorageGRID, and others.\n\nVolumes are configured in the @Volumes@ section of the cluster configuration file.\n\n# \"Configuration example\":#example\n# \"IAM Policy\":#IAM\n\nh2(#example). Configuration example\n\n{% include 'assign_volume_uuid' %}\n\n<notextile><pre><code>    Volumes:\n      <span class=\"userinput\">ClusterID</span>-nyw5e-<span class=\"userinput\">000000000000000</span>:\n        AccessViaHosts:\n          # This section determines which keepstore servers access the\n          # volume. In this example, keep0 has read/write access, and\n          # keep1 has read-only access.\n          #\n          # If the AccessViaHosts section is empty or omitted, all\n          # keepstore servers will have read/write access to the\n          # volume.\n          \"http://<span class=\"userinput\">keep0.ClusterID.example.com</span>:25107\": {}\n          \"http://<span class=\"userinput\">keep1.ClusterID.example.com</span>:25107\": {ReadOnly: true}\n\n        Driver: <span class=\"userinput\">S3</span>\n        DriverParameters:\n          # Bucket name.\n          Bucket: <span class=\"userinput\">example-bucket-name</span>\n\n          # Optionally, you can specify S3 access credentials here.\n          # If these are left blank, IAM role credentials will be\n          # retrieved from instance metadata (IMDSv2).\n          AccessKeyID: <span class=\"userinput\">\"\"</span>\n          SecretAccessKey: <span class=\"userinput\">\"\"</span>\n\n          # Storage provider region. If Endpoint is specified, the\n          # region determines the request signing method, and defaults\n          # to \"us-east-1\".\n          Region: <span class=\"userinput\">us-east-1</span>\n\n          # Storage provider endpoint. For Amazon S3, use \"\" or\n          # omit. For Google Cloud Storage, use\n          # \"https://storage.googleapis.com\".\n          Endpoint: \"\"\n\n          # Change to true if the region requires a LocationConstraint\n          # declaration.\n          LocationConstraint: false\n\n          # Use V2 signatures instead of the default V4. Amazon S3\n          # supports V4 signatures in all regions, but this option\n          # might be needed for other S3-compatible services.\n          V2Signature: false\n\n          # Use path-style requests instead of the default\n          # virtual-hosted-style requests.  This might be needed for\n          # S3-compatible services other than AWS.  If using AWS, see\n          # https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access\n          # for deprecation information.\n          UsePathStyle: false\n\n          # By default keepstore stores data using the MD5 checksum\n          # (32 hexadecimal characters) as the object name, e.g.,\n          # \"0123456abc...\". Setting PrefixLength to 3 changes this\n          # naming scheme to \"012/0123456abc...\". This can improve\n          # performance, depending on the S3 service being used. For\n          # example, PrefixLength 3 is recommended to avoid AWS\n          # limitations on the number of read/write operations per\n          # second per prefix (see\n          # https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/).\n          #\n          # Note that changing PrefixLength on an existing volume is\n          # not currently supported. Once you have started using a\n          # bucket as an Arvados volume, you should not change its\n          # configured PrefixLength, or configure another volume using\n          # the same bucket and a different PrefixLength.\n          PrefixLength: 0\n\n          # Requested page size for \"list bucket contents\" requests.\n          IndexPageSize: 1000\n\n          # Maximum time to wait while making the initial connection\n          # to the backend before failing the request.\n          ConnectTimeout: 1m\n\n          # Maximum time to wait for a complete response from the\n          # backend before failing the request.\n          ReadTimeout: 2m\n\n          # Maximum eventual consistency latency\n          RaceWindow: 24h\n\n        # How much replication is provided by the underlying bucket.\n        # This is used to inform replication decisions at the Keep\n        # layer.\n        Replication: 2\n\n        # If true, do not accept write or trash operations, even if\n        # AccessViaHosts.*.ReadOnly is false.\n        #\n        # If false or omitted, enable write access (subject to\n        # AccessViaHosts.*.ReadOnly, where applicable).\n        ReadOnly: false\n\n        # Storage classes to associate with this volume.  See \"Storage\n        # classes\" in the \"Admin\" section of doc.arvados.org.\n        StorageClasses: null\n</code></pre></notextile>\n\nh2(#IAM). IAM Policy\n\nOn Amazon, VMs which will access the S3 bucket (these include keepstore and compute nodes) will need an IAM policy with \"permission that can read, write, list and delete objects in the bucket\":https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create.html .  Here is an example policy:\n\n<notextile>\n<pre>\n{\n    \"Id\": \"arvados-keepstore policy\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                  \"s3:*\"\n            ],\n            \"Resource\": \"arn:aws:s3:::xarv1-nyw5e-000000000000000-volume\"\n            \"Resource\": \"arn:aws:s3:::xarv1-nyw5e-000000000000000-volume/*\"\n        }\n    ]\n}\n</pre>\n</notextile>\n"
  },
  {
    "path": "doc/install/container-shell-access.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Configure container shell access\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados can be configured to permit shell access to running containers. This can be handy for debugging, but it could affect reproducability of workflows. This feature can be enabled for admin users, or for all users. By default, it is entirely disabled.\n\nThe relevant configuration section is\n\n<notextile>\n<pre><code>    Containers:\n      ShellAccess:\n        # An admin user can use \"arvados-client shell\" to start an\n        # interactive shell (with any user ID) in any running\n        # container.\n        Admin: false\n\n        # Any user can use \"arvados-client shell\" to start an\n        # interactive shell (with any user ID) in any running\n        # container that they started, provided it isn't also\n        # associated with a different user's container request.\n        #\n        # Interactive sessions make it easy to alter the container's\n        # runtime environment in ways that aren't recorded or\n        # reproducible. Consider the implications for automatic\n        # container reuse before enabling and using this feature. In\n        # particular, note that starting an interactive session does\n        # not disqualify a container from being reused by a different\n        # user/workflow in the future.\n        User: false\n</code></pre>\n</notextile>\n\nTo enable the feature a firewall change may also be required. This feature requires the opening of tcp connections from @arvados-controller@ to the range specified in the @net.ipv4.ip_local_port_range@ sysctl on compute nodes. If that range is unknown or hard to determine, it will be sufficient to allow tcp connections from @arvados-controller@ to port 1024-65535 on compute nodes, while allowing traffic that is part of existing tcp connections.\n\nAfter changing the configuration, @arvados-controller@ must be restarted for the change to take effect. When enabling, shell access will be enabled for any running containers. When disabling, access is removed immediately for any running containers, as well as any containers started subsequently. Restarting @arvados-controller@ will kill any active connections.\n\nUsage instructions for this feature are available in the \"User guide\":{{site.baseurl}}/user/debugging/container-shell-access.html.\n"
  },
  {
    "path": "doc/install/crunch2/install-compute-node-docker.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Set up a compute node with Docker\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin_warning' %}\nThis page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to \"Build a cloud compute node image\":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html.\n{% include 'notebox_end' %}\n\n{% include 'notebox_begin_warning' %}\nThese instructions apply when Containers.RuntimeEngine is set to @docker@, refer to \"Set up a compute node with Singularity\":install-compute-node-singularity.html when running @singularity@.\n{% include 'notebox_end' %}\n\n# \"Introduction\":#introduction\n# \"Set up Docker\":#docker\n# \"Update fuse.conf\":#fuse\n# \"Update docker-cleaner.json\":#docker-cleaner\n# \"Install'python-arvados-fuse and crunch-run and arvados-docker-cleaner\":#install-packages\n\nh2(#introduction). Introduction\n\nThis page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados on a static cluster. These steps must be performed on every compute node.\n\nh2(#docker). Set up Docker\n\nSee \"Set up Docker\":../install-docker.html\n\n{% include 'install_cuda' %}\n\n{% assign arvados_component = 'python-arvados-fuse crunch-run arvados-docker-cleaner' %}\n\n{% include 'install_compute_fuse' %}\n\n{% include 'install_docker_cleaner' %}\n\n{% include 'install_packages' %}\n\n{% assign arvados_component = 'arvados-docker-cleaner' %}\n\n{% include 'start_service' %}\n"
  },
  {
    "path": "doc/install/crunch2/install-compute-node-singularity.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Set up a compute node with Singularity\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin_warning' %}\nThis page describes the requirements for a compute node in a Slurm or LSF cluster that will run containers dispatched by @crunch-dispatch-slurm@ or @arvados-dispatch-lsf@. If you are installing a cloud cluster, refer to \"Build a cloud compute node image\":{{ site.baseurl }}/install/crunch2-cloud/install-compute-node.html.\n{% include 'notebox_end' %}\n\n{% include 'notebox_begin_warning' %}\nThese instructions apply when Containers.RuntimeEngine is set to @singularity@, refer to \"Set up a compute node with Docker\":install-compute-node-docker.html when running @docker@.\n{% include 'notebox_end' %}\n\n# \"Introduction\":#introduction\n# \"Install python-arvados-fuse and crunch-run and squashfs-tools\":#install-packages\n# \"Set up Singularity\":#singularity\n# \"Singularity mksquashfs configuration\":#singularity_mksquashfs_configuration\n\nh2(#introduction). Introduction\n\nPlease refer to the \"Singularity\":{{site.baseurl}}/architecture/singularity.html documentation in the Architecture section.\n\nThis page describes how to configure a compute node so that it can be used to run containers dispatched by Arvados on a static cluster. These steps must be performed on every compute node.\n\n{% assign arvados_component = 'python-arvados-fuse crunch-run squashfs-tools' %}\n\n{% include 'install_packages' %}\n\n{% include 'install_cuda' %}\n\nh2(#singularity). Set up Singularity\n\nFollow the \"Singularity installation instructions\":https://sylabs.io/guides/latest/user-guide/quick_start.html. Note that while the latest stable version is normally expected to be compatible, Arvados is currently tested with singularity 3.10.4.\n\nMake sure @singularity@ and @mksquashfs@ are working:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">singularity version</span>\nsingularity-ce version 3.10.4-dirty\n$ <span class=\"userinput\">mksquashfs -version</span>\nmksquashfs version 4.4 (2019/08/29)\n[...]\n</code></pre>\n</notextile>\n\nThen update @Containers.RuntimeEngine@ in your cluster configuration:\n\n<notextile>\n<pre><code>      # Container runtime: \"docker\" (default) or \"singularity\"\n      RuntimeEngine: singularity\n</code></pre>\n</notextile>\n\n{% include 'singularity_mksquashfs_configuration' %}\n"
  },
  {
    "path": "doc/install/crunch2-cloud/install-compute-node.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Build a cloud compute node image\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin_warning' %}\n@arvados-dispatch-cloud@ is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm or LSF.\n{% include 'notebox_end' %}\n\np(#introduction). This page describes how to build a compute node image that can be used to run containers dispatched by Arvados in the cloud.\n\n# \"Prerequisites\":#prerequisites\n## \"Check your distribution\":#check-distro\n## \"Create and configure an SSH keypair\":#sshkeypair\n## \"Get the Arvados source\":#git-clone\n## \"Install Ansible\":#install-ansible\n## \"Install Packer and the Ansible plugin\":#install-packer\n# \"Fully automated build with Packer and Ansible\":#building\n## \"Write Ansible settings for the compute node\":#ansible-variables\n## \"Set up Packer for your cloud\":#packer-variables\n### \"AWS\":#aws-variables\n### \"Azure\":#azure-variables\n## \"Run Packer\":#run-packer\n# \"Partially automated build with Ansible\":#ansible-build\n## \"Write Ansible settings for the compute node\":#ansible-variables-standalone\n## \"Write an Ansible inventory\":#ansible-inventory\n## \"Run Ansible\":#run-ansible\n# \"Manual build\":#requirements\n\n<a name=\"check-distro\"></a>\n\nh2(#prerequisites). Prerequisites\n\nh3(#sshkeypair). Create and configure an SSH keypair\n\n@arvados-dispatch-cloud@ communicates with the compute nodes via SSH. To do this securely, an SSH keypair is needed. The key type must be RSA or ED25519 to work with Amazon EC2. Generate an ED25519 keypair with no passphrase:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">ssh-keygen -t ed25519 -N '' -f ~/.ssh/id_dispatcher</span>\nGenerating public/private ed25519 key pair.\nYour identification has been saved in /home/user/.ssh/id_dispatcher.\nYour public key has been saved in /home/user/.ssh/id_dispatcher.pub.\nThe key fingerprint is:\n[...]\n</code></pre>\n</notextile>\n\nAfter you do this, the contents of the private key in @~/.ssh/id_dispatcher@ need to be stored in your \"cluster configuration file\":{{ site.baseurl }}/admin/config.html under @Containers.DispatchPrivateKey@.\n\nThe public key at @~/.ssh/id_dispatcher.pub@ will need to be authorized to access instances booted from the image. Keep this file; our Ansible playbook will read it to set this up for you.\n\nh3(#git-clone). Get the Arvados source\n\nCompute node templates are only available in the Arvados source tree. Clone a copy of the Arvados source for the version of Arvados you're using in a directory convenient for you:\n\n{% include 'branchname' %}\n<notextile>\n<pre><code>~$ <span class=\"userinput\">git clone --depth=1 --branch=<strong>{{ branchname }}</strong> https://github.com/arvados/arvados ~/<strong>arvados</strong></span>\n</code></pre>\n</notextile>\n\nh3(#install-ansible). Install Ansible\n\n{% include 'install_ansible' header_level: 'h4' %}\n\nh3(#install-packer). Install Packer and the Ansible plugin\n\nWe provide Packer templates that can automatically create a compute instance, configure it with Ansible, shut it down, and create a cloud image from the result. \"Install Packer following their instructions.\":https://developer.hashicorp.com/packer/docs/install After you do, install Packer's Ansible provisioner by running:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">packer plugins install github.com/hashicorp/ansible</span>\n</code></pre>\n</notextile>\n\nh2(#building). Fully automated build with Packer and Ansible\n\nAfter you have both tools installed, you can configure both with information about your Arvados cluster and cloud environment and then run a fully automated build.\n\nh3(#ansible-variables). Write Ansible settings for the compute node\n\nIn the @tools/compute-images@ directory of your Arvados source checkout, copy @host_config.example.yml@ to @host_config.yml@. Edit @host_config.yml@ with information about how your compute nodes should be set up following the instructions in the comments.\n\nh3(#packer-variables). Set up Packer for your cloud\n\nYou need to provide different configuration to Packer depending on which cloud you're deploying Arvados in.\n\nh4(#aws-variables). AWS\n\nInstall Packer's AWS builder by running:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">packer plugins install github.com/hashicorp/amazon</span>\n</code></pre>\n</notextile>\n\nIn the @tools/compute-images@ directory of your Arvados source checkout, copy @aws_config.example.json@ to @aws_config.json@. Fill in values for the configuration settings as follows:\n\n* If you already have AWS credentials configured that Packer can use to create and manage an EC2 instance, set @aws_profile@ to the name of those credentials in your configuration. Otherwise, set @aws_access_key@ and @aws_secret_key@ with information from an API token with those permissions.\n* Set @aws_region@, @vpc_id@, and @subnet_id@ with identifiers for the network where Packer should create the EC2 instance.\n* Set @aws_source_ami@ to the AMI of the base image that should be booted and used as the base for your compute node image. Set @ssh_user@ to the name of administrator account that is used on that image.\n* Set @aws_volume_gb@ to the size of of the image you want to create in GB. The default 20 should be sufficient for most installs. You may increase this if you're using a custom source AMI with more software pre-installed.\n* Set @arvados_cluster@ to the same five-alphanumeric identifier used under @Clusters@ in your Arvados cluster configuration.\n* If you installed Ansible to a nonstandard location, set @ansible_command@ to the absolute path of @ansible-playbook@. For example, if you installed Ansible in a virtualenv at @~/ansible@, set @ansible_command@ to {% raw %}<notextile><code class=\"userinput\">\"{{env `HOME`}}<strong>/ansible</strong>/bin/ansible-playbook\"</code></notextile>{% endraw %}.\n\nWhen you finish writing your configuration, \"run Packer\":#run-packer.\n\nh4(#azure-variables). Azure\n\n{% comment %}\nFIXME: Incomplete\n{% endcomment %}\n\nInstall Packer's Azure builder by running:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">packer plugins install github.com/hashicorp/azure</span>\n</code></pre>\n</notextile>\n\nIn the @tools/compute-images@ directory of your Arvados source checkout, copy @azure_config.example.json@ to @azure_config.json@. Fill in values for the configuration settings as follows:\n\n* The settings load credentials from Azure's standard environment variables. As long as you have these environment variables set in the shell before you run Packer, they will be loaded as normal. Alternatively, you can set them directly in the configuration file. These secrets can be generated from the Azure portal, or with the CLI using a command like:<notextile><pre><code>~$ <span class=\"userinput\">az ad sp create-for-rbac --name Packer --password ...</span>\n</code></pre></notextile>\n* Set @location@ and @resource_group@ with identifiers for where Packer should create the cloud instance.\n* Set @image_sku@ to the identifier of the base image that should be booted and used as the base for your compute node image. Set @ssh_user@ to the name of administrator account you want to use on that image.\n* Set @ssh_private_key_file@ to the path with the private key you generated earlier for the dispatcher to use. For example, {% raw %}<notextile><code class=\"userinput\">\"{{env `HOME`}}/.ssh/<strong>id_dispatcher</strong>\"</code></notextile>{% endraw %}.\n* Set @arvados_cluster@ to the same five-alphanumeric identifier used under @Clusters@ in your Arvados cluster configuration.\n* If you installed Ansible to a nonstandard location, set @ansible_command@ to the absolute path of @ansible-playbook@. For example, if you installed Ansible in a virtualenv at @~/ansible@, set @ansible_command@ to {% raw %}<notextile><code class=\"userinput\">\"{{env `HOME`}}<strong>/ansible</strong>/bin/ansible-playbook\"</code></notextile>{% endraw %}.\n\nWhen you finish writing your configuration, \"run Packer\":#run-packer.\n\nh3(#run-packer). Run Packer\n\nIn the @tools/compute-images@ directory of your Arvados source checkout, run Packer with your configuration and the template appropriate for your cloud. For example, to build an image on AWS, run:\n\n<notextile>\n<pre><code>arvados/tools/compute-images$ <span class=\"userinput\">packer build -var-file=<strong>aws</strong>_config.json <strong>aws</strong>_template.json</span>\n</code></pre>\n</notextile>\n\nTo build an image on Azure, replace both instances of *@aws@* with *@azure@*, and run that command.\n\n{% include 'notebox_begin_warning' %}\nIf @packer build@ fails early with @ok=0@, @changed=0@, @failed=1@, and a message like this:\n\n<notextile>\n<pre><code>TASK [Gathering Facts] *********************************************************\nfatal: [default]: FAILED! =&gt; {\"msg\": \"failed to transfer file to /home/you/.ansible/tmp/ansible-local-1821271ym6nh1cw/tmp2kyfkhy4 /home/admin/.ansible/tmp/ansible-tmp-1732380360.0917368-1821275-172216075852170/AnsiballZ_setup.py:\\n\\n\"}\n\nPLAY RECAP *********************************************************************\ndefault : ok=0  changed=0  unreachable=0  failed=1  skipped=0  rescued=0  ignored=0\n</code></pre>\n</notextile>\n\nThis might mean the version of @scp@ on your computer is trying to use new protocol features that doesn't work with the older SSH server on the cloud image. You can work around this by running:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">export ANSIBLE_SCP_EXTRA_ARGS=\"'-O'\"</span>\n</code></pre>\n</notextile>\n\nThen rerun your full @packer build@ command from the same shell.\n{% include 'notebox_end' %}\n\nIf the build succeeds, it will report the identifier of your image at the end of the process. For example, when you build an AWS image, it will look like this:\n\n<notextile>\n<pre><code>==&gt; Builds finished. The artifacts of successful builds are:\n--&gt; amazon-ebs: AMIs were created:\nus-east-1: <strong>ami-012345abcdef56789</strong>\n</code></pre>\n</notextile>\n\nThat identifier can now be set as @CloudVMs.ImageID@ in your cluster configuration. You do not need to run any other compute node build process on this page; continue to \"installing the cloud dispatcher\":install-dispatch-cloud.html.\n\nh2(#ansible-build). Partially automated build with Ansible\n\nIf Arvados does not include a template for your cloud, or you do not have permission to run Packer, you can run the Ansible playbook by itself. This can set up a base Debian or Ubuntu system with all the software and configuration necessary to do Arvados compute work. After it's done, you can manually snapshot the node and create a cloud image from it.\n\nh3(#ansible-variables-standalone). Write Ansible settings for the compute node\n\nIn the @tools/compute-images@ directory of your Arvados source checkout, copy @host_config.example.yml@ to @host_config.yml@. Edit @host_config.yml@ with information about how your compute nodes should be set up following the instructions in the comments. Note that you *must set* @arvados_cluster_id@ in this file since you are not running Packer.\n\nh3(#ansible-inventory). Write an Ansible inventory\n\nThe compute node playbook runs on a host named @default@. In the @tools/compute-images@ directory of your Arvados source checkout, write a file named @inventory.ini@ with information about how to connect to this node via SSH. It should be one line like this:\n\n<notextile>\n<pre><code># Example inventory.ini for an Arvados compute node\n<span class=\"userinput\">default ansible_host=<strong>192.0.2.9</strong> ansible_user=<strong>admin</strong></span>\n</code></pre>\n</notextile>\n\n* @ansible_host@ can be the running node's hostname or IP address. You need to be able to reach this host from the system where you're running Ansible.\n* @ansible_user@ names the user account that Ansible should use for the SSH connection. It needs to have permission to use @sudo@ on the running node.\n\nYou can add other Ansible configuration options like @ansible_port@ to your inventory if needed. Refer to the \"Ansible inventory documentation\":https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html for details.\n\nh3(#run-ansible). Run Ansible\n\nIf you installed Ansible inside a virtualenv, activate that virtualenv now. Then, in the @tools/compute-images@ directory of your Arvados source checkout, run @ansible-playbook@ with your inventory and configuration:\n\n<notextile>\n<pre><code>arvados/tools/compute-images$ <span class=\"userinput\">ansible-playbook --ask-become-pass --inventory=inventory.ini --extra-vars=@host_config.yml ../ansible/build-compute-image.yml</span>\n</code></pre>\n</notextile>\n\nYou'll be prompted with @BECOME password:@. Enter the password for the @ansible_user@ you defined in the inventory to use sudo on the running node.\n\n{% include 'notebox_begin_warning' %}\nIf @ansible-playbook@ fails early with @ok=0@, @changed=0@, @failed=1@, and a message like this:\n\n<notextile>\n<pre><code>TASK [Gathering Facts] *********************************************************\nfatal: [default]: FAILED! =&gt; {\"msg\": \"failed to transfer file to /home/you/.ansible/tmp/ansible-local-1821271ym6nh1cw/tmp2kyfkhy4 /home/admin/.ansible/tmp/ansible-tmp-1732380360.0917368-1821275-172216075852170/AnsiballZ_setup.py:\\n\\n\"}\n\nPLAY RECAP *********************************************************************\ndefault : ok=0  changed=0  unreachable=0  failed=1  skipped=0  rescued=0  ignored=0\n</code></pre>\n</notextile>\n\nThis might mean the version of @scp@ on your computer is trying to use new protocol features that doesn't work with the older SSH server on the cloud image. You can work around this by running:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">export ANSIBLE_SCP_EXTRA_ARGS=\"'-O'\"</span>\n</code></pre>\n</notextile>\n\nThen rerun your full @ansible-playbook@ command from the same shell.\n{% include 'notebox_end' %}\n\nIf it succeeds, Ansible should report a \"PLAY RECAP\" with @failed=0@:\n\n<notextile>\n<pre><code>PLAY RECAP *********************************************************************\ndefault : ok=41  changed=37  unreachable=0  <strong>failed=0</strong>  skipped=5  rescued=0  ignored=0\n</code></pre>\n</notextile>\n\nYour node is now ready to run Arvados compute work. You can snapshot the node, create an image from it, and set that image as @CloudVMs.ImageID@ in your Arvados cluster configuration. The details of that process are cloud-specific and out of scope for this documentation. You do not need to run any other compute node build process on this page; continue to \"installing the cloud dispatcher\":install-dispatch-cloud.html.\n\nh2(#requirements). Manual build\n\nIf you cannot run Ansible, you can create a cloud instance, manually set it up to be a compute node, and then create an image from it. The details of this process depend on which distribution you use on the cloud instance and which cloud you use; all these variations are out of scope for this documentation. These are the requirements:\n\n* Except on Azure, the SSH public key you generated previously must be an authorized key for the user that Crunch is configured to use. For example, if your cluster's @CloudVMs.DriverParameters.AdminUsername@ setting is *@crunch@*, then the dispatcher's public key should be listed in <notextile><code class=\"userinput\">~<strong>crunch</strong>/.ssh/authorized_keys</code></notextile> in the image. This user must also be allowed to use sudo without a password unless the user is @root@.\n  (On Azure, the dispatcher makes additional calls to automatically set up and authorize the user, making these steps unnecessary.)\n* SSH needs to be running and reachable by @arvados-dispatch-cloud@ on the port named by @CloudVMs.SSHPort@ in your cluster's configuration file (default 22).\n* Install the @python3-arvados-fuse@ package. Enable the @user_allow_other@ option in @/etc/fuse.conf@.\n* Install either \"Docker\":https://docs.docker.com/engine/install/ or \"Singularity\":https://docs.sylabs.io/guides/3.0/user-guide/installation.html as appropriate based on the @Containers.RuntimeEngine@ setting in your cluster's configuration file. If you install Docker, you may also want to install and set up the @arvados-docker-cleaner@ package to conserve space on long-running instances, but it's not strictly required.\n* All available scratch space should be made available under @/tmp@.\n"
  },
  {
    "path": "doc/install/crunch2-cloud/install-dispatch-cloud.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install the cloud dispatcher\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin_warning' %}\n@arvados-dispatch-cloud@ is only relevant for cloud installations. Skip this section if you are installing an on premises cluster that will spool jobs to Slurm or LSF.\n{% include 'notebox_end' %}\n\n# \"Introduction\":#introduction\n# \"Create compute node VM image\":#create-image\n# \"Update config.yml\":#update-config\n# \"Install arvados-dispatch-cloud\":#install-packages\n# \"Start the service\":#start-service\n# \"Restart the API server and controller\":#restart-api\n# \"Confirm working installation\":#confirm-working\n\nh2(#introduction). Introduction\n\nThe cloud dispatch service is for running containers on cloud VMs. It works with Microsoft Azure and Amazon EC2; future versions will also support Google Compute Engine.\n\nThe cloud dispatch service can run on any node that can connect to the Arvados API service, the cloud provider's API, and the SSH service on cloud VMs.  It is not resource-intensive, so you can run it on the API server node.\n\nMore detail about the internal operation of the dispatcher can be found in the \"architecture section\":{{site.baseurl}}/architecture/dispatchcloud.html.\n\nh2(#update-config). Update config.yml\n\nh3. Configure CloudVMs\n\nAdd or update the following portions of your cluster configuration file, @config.yml@. Refer to \"config.defaults.yml\":{{site.baseurl}}/admin/config.html for information about additional configuration options. The @DispatchPrivateKey@ should be the *private* key generated in \"Create a SSH keypair\":install-compute-node.html#sshkeypair .\n\n<notextile>\n<pre><code>    Services:\n      DispatchCloud:\n        InternalURLs:\n          \"http://localhost:9006\": {}\n    Containers:\n      CloudVMs:\n        # BootProbeCommand is a shell command that succeeds when an instance is ready for service\n        BootProbeCommand: \"sudo systemctl status docker\"\n\n        <b># --- driver-specific configuration goes here --- see Amazon and Azure examples below ---</b>\n\n      DispatchPrivateKey: |\n        -----BEGIN RSA PRIVATE KEY-----\n        MIIEpQIBAAKCAQEAqXoCzcOBkFQ7w4dvXf9B++1ctgZRqEbgRYL3SstuMV4oawks\n        ttUuxJycDdsPmeYcHsKo8vsEZpN6iYsX6ZZzhkO5nEayUTU8sBjmg1ZCTo4QqKXr\n        FJ+amZ7oYMDof6QEdwl6KNDfIddL+NfBCLQTVInOAaNss7GRrxLTuTV7HcRaIUUI\n        jYg0Ibg8ZZTzQxCvFXXnjseTgmOcTv7CuuGdt91OVdoq8czG/w8TwOhymEb7mQlt\n        lXuucwQvYgfoUgcnTgpJr7j+hafp75g2wlPozp8gJ6WQ2yBWcfqL2aw7m7Ll88Nd\n        [...]\n        oFyAjVoexx0RBcH6BveTfQtJKbktP1qBO4mXo2dP0cacuZEtlAqW9Eb06Pvaw/D9\n        foktmqOY8MyctzFgXBpGTxPliGjqo8OkrOyQP2g+FL7v+Km31Xs61P8=\n        -----END RSA PRIVATE KEY-----\n    InstanceTypes:\n      x1md:\n        ProviderType: x1.medium\n        VCPUs: 8\n        RAM: 64GiB\n        IncludedScratch: 64GB\n        Price: 0.62\n      x1lg:\n        ProviderType: x1.large\n        VCPUs: 16\n        RAM: 128GiB\n        IncludedScratch: 128GB\n        Price: 1.23\n</code></pre>\n</notextile>\n\nh3(#GPUsupport). NVIDIA GPU support\n\nTo specify instance types with NVIDIA GPUs, the compute image must be built with CUDA support (this means setting @arvados_compute_nvidia: true@ in @host_config.yml@ when \"building the compute image\":install-compute-node.html).  You must include an additional @GPU@ section for each instance type that includes GPUs:\n\n<notextile>\n<pre><code>    InstanceTypes:\n      g4dn:\n        ProviderType: g4dn.xlarge\n        VCPUs: 4\n        RAM: 16GiB\n        IncludedScratch: 125GB\n        Price: 0.56\n        GPU:\n          Stack: \"cuda\"\n          DriverVersion: \"11.4\"\n          HardwareTarget: \"7.5\"\n          DeviceCount: 1\n          VRAM: 16GiB\n</code></pre>\n</notextile>\n\nThe @DriverVersion@ is the version of the CUDA toolkit installed in your compute image (in \"X.Y\" format, do not include the patchlevel).\n\nThe @HardwareTarget@ is the \"CUDA compute capability of the GPUs available for this instance type\":https://developer.nvidia.com/cuda-gpus in \"X.Y\" format.\n\nThe @DeviceCount@ is the number of GPU cores available for this instance type.\n\n@VRAM@ is the amount of VRAM available per GPU device.\n\nh3(#ROCmGPUsupport). AMD GPU support\n\nTo specify instance types with AMD GPUs, the compute image must be built with ROCm support (currently, installing ROCm automatically is not supported by the Arvados compute image Ansible playbook, but can be added manually after the fact).  You must include an additional @GPU@ section for each instance type that includes GPUs:\n\n<notextile>\n<pre><code>    InstanceTypes:\n      g4dn:\n        ProviderType: g4da.xlarge\n        VCPUs: 4\n        RAM: 16GiB\n        IncludedScratch: 125GB\n        Price: 0.56\n        GPU:\n          Stack: \"rocm\"\n          DriverVersion: \"6.2\"\n          HardwareTarget: \"gfx1100\"\n          DeviceCount: 1\n          VRAM: 16GiB\n</code></pre>\n</notextile>\n\n@DriverVersion@ is the version of the ROCm toolkit installed in your compute image (in \"X.Y\" format, do not include the patchlevel).\n\n@HardwareTarget@ (e.g. gfx1100) corresponds to the GPU architecture of the device.  Use @rocminfo@ to determine your hardware target.  See also \"Accelerator and GPU hardware specifications\":https://rocm.docs.amd.com/en/latest/reference/gpu-arch-specs.html (use the column \"LLVM target name\") and \"LLVM AMDGPU backend documentation\":https://llvm.org/docs/AMDGPUUsage.html .\n\n@DeviceCount@ is the number of GPU cores available for this instance type.\n\n@VRAM@ is the amount of VRAM available per GPU device.\n\nh3(#aws-ebs-autoscaler). EBS Autoscale configuration\n\nSee \"Autoscaling compute node scratch space\":install-compute-node.html#aws-ebs-autoscaler for details about compute image configuration.\n\nThe @Containers.InstanceTypes@ list should be modified so that all @AddedScratch@ lines are removed, and the @IncludedScratch@ value should be set to 5 TB. This way, the scratch space requirements will be met by all the defined instance type. For example:\n\n<notextile><pre><code>    InstanceTypes:\n      c5large:\n        ProviderType: c5.large\n        VCPUs: 2\n        RAM: 4GiB\n        IncludedScratch: 5TB\n        Price: 0.085\n      m5large:\n        ProviderType: m5.large\n        VCPUs: 2\n        RAM: 8GiB\n        IncludedScratch: 5TB\n        Price: 0.096\n...\n</code></pre></notextile>\n\nYou will also need to create an IAM role in AWS with these permissions:\n\n<notextile><pre><code>{\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"ec2:AttachVolume\",\n                \"ec2:DescribeVolumeStatus\",\n                \"ec2:DescribeVolumes\",\n                \"ec2:DescribeTags\",\n                \"ec2:ModifyInstanceAttribute\",\n                \"ec2:DescribeVolumeAttribute\",\n                \"ec2:CreateVolume\",\n                \"ec2:DeleteVolume\",\n                \"ec2:CreateTags\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n</code></pre></notextile>\n\nThen set @Containers.CloudVMs.DriverParameters.IAMInstanceProfile@ to the name of the IAM role. This will make @arvados-dispatch-cloud@ pass an IAM instance profile to the compute nodes when they start up, giving them sufficient permissions to attach and grow EBS volumes.\n\nh3. AWS Credentials for Local Keepstore on Compute node\n\nWhen @Containers.LocalKeepBlobBuffersPerVCPU@ is non-zero, the compute node will spin up a local Keepstore service for direct storage access. If Keep is backed by S3, the compute node will need to be able to access the S3 bucket.\n\nIf the AWS credentials for S3 access are configured in @config.yml@ (i.e. @Volumes.DriverParameters.AccessKeyID@ and @Volumes.DriverParameters.SecretAccessKey@), these credentials will be made available to the local Keepstore on the compute node to access S3 directly and no further configuration is necessary.\n\nIf @config.yml@ does not have @Volumes.DriverParameters.AccessKeyID@ and @Volumes.DriverParameters.SecretAccessKey@ defined, Keepstore uses instance metadata to retrieve IAM role credentials. The @CloudVMs.DriverParameters.IAMInstanceProfile@ parameter must be configured with the name of a profile whose IAM role has permission to access the S3 bucket(s). With this setup, @arvados-dispatch-cloud@ will attach the IAM role to the compute node as it is created. The instance profile name is \"often identical to the name of the IAM role\":https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#ec2-instance-profile.\n\n*If you are also using EBS Autoscale feature, the role in @IAMInstanceProfile@ must have both ec2 and s3 permissions.*\n\nh3. Minimal configuration example for Amazon EC2\n\nThe <span class=\"userinput\">ImageID</span> value is the compute node image that was built in \"the previous section\":install-compute-node.html#aws.\n\n<notextile>\n<pre><code>    Containers:\n      CloudVMs:\n        ImageID: <span class=\"userinput\">ami-01234567890abcdef</span>\n        Driver: ec2\n        DriverParameters:\n          # If you are not using an IAM role for authentication, specify access\n          # credentials here. Otherwise, omit or set AccessKeyID and\n          # SecretAccessKey to an empty value.\n          AccessKeyID: XXXXXXXXXXXXXXXXXXXX\n          SecretAccessKey: YYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYYY\n\n          SecurityGroupIDs:\n          - sg-0123abcd\n          SubnetID: subnet-0123abcd\n          Region: us-east-1\n          EBSVolumeType: gp2\n          AdminUsername: arvados\n</code></pre>\n</notextile>\n\nh3(#IAM). Example IAM policy for cloud dispatcher\n\nExample policy for the IAM role used by the cloud dispatcher:\n\n<notextile>\n<pre>\n{\n    \"Id\": \"arvados-dispatch-cloud policy\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                  \"ec2:CreateTags\",\n                  \"ec2:Describe*\",\n                  \"ec2:CreateImage\",\n                  \"ec2:CreateKeyPair\",\n                  \"ec2:ImportKeyPair\",\n                  \"ec2:DeleteKeyPair\",\n                  \"ec2:RunInstances\",\n                  \"ec2:StopInstances\",\n                  \"ec2:TerminateInstances\",\n                  \"ec2:ModifyInstanceAttribute\",\n                  \"ec2:CreateSecurityGroup\",\n                  \"ec2:DeleteSecurityGroup\",\n                  \"iam:PassRole\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n</pre>\n</notextile>\n\nh3. Minimal configuration example for Azure\n\nUsing managed disks:\n\nThe <span class=\"userinput\">ImageID</span> value is the compute node image that was built in \"the previous section\":install-compute-node.html#azure.\n\n<notextile>\n<pre><code>    Containers:\n      CloudVMs:\n        ImageID: <span class=\"userinput\">\"zzzzz-compute-v1597349873\"</span>\n        Driver: azure\n        # (azure) managed disks: set MaxConcurrentInstanceCreateOps to 20 to avoid timeouts, cf\n        # https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image\n        MaxConcurrentInstanceCreateOps: 20\n        DriverParameters:\n          # Credentials.\n          SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n          ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n          ClientSecret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n          TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n\n          # Data center where VMs will be allocated\n          Location: centralus\n\n          # The resource group where the VM and virtual NIC will be\n          # created.\n          ResourceGroup: zzzzz\n          NetworkResourceGroup: yyyyy   # only if different from ResourceGroup\n          Network: xxxxx\n          Subnet: xxxxx-subnet-private\n\n          # The resource group where the disk image is stored, only needs to\n          # be specified if it is different from ResourceGroup\n          ImageResourceGroup: aaaaa\n\n</code></pre>\n</notextile>\n\nAzure recommends using managed images. If you plan to start more than 20 VMs simultaneously, Azure recommends using a shared image gallery instead to avoid slowdowns and timeouts during the creation of the VMs.\n\nUsing an image from a shared image gallery:\n\n<notextile>\n<pre><code>    Containers:\n      CloudVMs:\n        ImageID: <span class=\"userinput\">\"shared_image_gallery_image_definition_name\"</span>\n        Driver: azure\n        DriverParameters:\n          # Credentials.\n          SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n          ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n          ClientSecret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n          TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n\n          # Data center where VMs will be allocated\n          Location: centralus\n\n          # The resource group where the VM and virtual NIC will be\n          # created.\n          ResourceGroup: zzzzz\n          NetworkResourceGroup: yyyyy   # only if different from ResourceGroup\n          Network: xxxxx\n          Subnet: xxxxx-subnet-private\n\n          # The resource group where the disk image is stored, only needs to\n          # be specified if it is different from ResourceGroup\n          ImageResourceGroup: aaaaa\n\n          # (azure) shared image gallery: the name of the gallery\n          SharedImageGalleryName: \"shared_image_gallery_1\"\n          # (azure) shared image gallery: the version of the image definition\n          SharedImageGalleryImageVersion: \"0.0.1\"\n\n</code></pre>\n</notextile>\n\nUsing unmanaged disks (deprecated):\n\nThe <span class=\"userinput\">ImageID</span> value is the compute node image that was built in \"the previous section\":install-compute-node.html#azure.\n\n<notextile>\n<pre><code>    Containers:\n      CloudVMs:\n        ImageID: <span class=\"userinput\">\"https://zzzzzzzz.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.55555555-5555-5555-5555-555555555555.vhd\"</span>\n        Driver: azure\n        DriverParameters:\n          # Credentials.\n          SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n          ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n          ClientSecret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n          TenantID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n\n          # Data center where VMs will be allocated\n          Location: centralus\n\n          # The resource group where the VM and virtual NIC will be\n          # created.\n          ResourceGroup: zzzzz\n          NetworkResourceGroup: yyyyy   # only if different from ResourceGroup\n          Network: xxxxx\n          Subnet: xxxxx-subnet-private\n\n          # Where to store the VM VHD blobs\n          StorageAccount: example\n          BlobContainer: vhds\n\n</code></pre>\n</notextile>\n\nGet the @SubscriptionID@ and @TenantID@:\n\n<pre>\n$ az account list\n[\n  {\n    \"cloudName\": \"AzureCloud\",\n    \"id\": \"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXX\",\n    \"isDefault\": true,\n    \"name\": \"Your Subscription\",\n    \"state\": \"Enabled\",\n    \"tenantId\": \"YYYYYYYY-YYYY-YYYY-YYYYYYYY\",\n    \"user\": {\n      \"name\": \"you@example.com\",\n      \"type\": \"user\"\n    }\n  }\n]\n</pre>\n\nYou will need to create a \"service principal\" to use as a delegated authority for API access.\n\n<notextile><pre><code>$ az ad app create --display-name \"Arvados Dispatch Cloud (<span class=\"userinput\">ClusterID</span>)\" --homepage \"https://arvados.org\" --identifier-uris \"https://<span class=\"userinput\">ClusterID.example.com</span>\" --end-date 2299-12-31 --password <span class=\"userinput\">Your_Password</span>\n$ az ad sp create \"<span class=\"userinput\">appId</span>\"\n(appId is part of the response of the previous command)\n$ az role assignment create --assignee \"<span class=\"userinput\">objectId</span>\" --role Owner --scope /subscriptions/{subscriptionId}/\n(objectId is part of the response of the previous command)\n</code></pre></notextile>\n\nNow update your @config.yml@ file:\n\n@ClientID@ is the 'appId' value.\n\n@ClientSecret@ is what was provided as <span class=\"userinput\">Your_Password</span>.\n\nh3. Test your configuration\n\nRun the @cloudtest@ tool to verify that your configuration works. This creates a new cloud VM, confirms that it boots correctly and accepts your configured SSH private key, and shuts it down.\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arvados-server cloudtest && echo \"OK!\"</span>\n</code></pre>\n</notextile>\n\nRefer to the \"cloudtest tool documentation\":../../admin/cloudtest.html for more information.\n\n{% assign arvados_component = 'arvados-dispatch-cloud' %}\n\n{% include 'install_packages' %}\n\n{% include 'start_service' %}\n\n{% include 'restart_api' %}\n\nh2(#confirm-working). Confirm working installation\n\nOn the dispatch node, start monitoring the arvados-dispatch-cloud logs:\n\n<notextile>\n<pre><code># <span class=\"userinput\">journalctl -o cat -fu arvados-dispatch-cloud.service</span>\n</code></pre>\n</notextile>\n\nIn another terminal window, use the diagnostics tool to run a simple container.\n\n<notextile>\n<pre><code># <span class=\"userinput\">arvados-client sudo diagnostics</span>\nINFO       5: running health check (same as `arvados-server check`)\nINFO      10: getting discovery document from https://zzzzz.arvadosapi.com/discovery/v1/apis/arvados/v1/rest\n...\nINFO     160: running a container\nINFO      ... container request submitted, waiting up to 10m for container to run\n</code></pre>\n</notextile>\n\nAfter performing a number of other quick tests, this will submit a new container request and wait for it to finish.\n\nWhile the diagnostics tool is waiting, the @arvados-dispatch-cloud@ logs will show details about creating a cloud instance, waiting for it to be ready, and scheduling the new container on it.\n\nYou can also use the \"arvados-dispatch-cloud API\":{{site.baseurl}}/api/dispatch.html to get a list of queued and running jobs and cloud instances. Use your @ManagementToken@ to test the dispatcher's endpoint. For example, when one container is running:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">curl -sH \"Authorization: Bearer $token\" http://localhost:9006/arvados/v1/dispatch/containers</span>\n{\n  \"items\": [\n    {\n      \"container\": {\n        \"uuid\": \"zzzzz-dz642-hdp2vpu9nq14tx0\",\n        ...\n        \"state\": \"Running\",\n        \"scheduling_parameters\": {\n          \"partitions\": null,\n          \"preemptible\": false,\n          \"max_run_time\": 0\n        },\n        \"exit_code\": 0,\n        \"runtime_status\": null,\n        \"started_at\": null,\n        \"finished_at\": null\n      },\n      \"instance_type\": {\n        \"Name\": \"Standard_D2s_v3\",\n        \"ProviderType\": \"Standard_D2s_v3\",\n        \"VCPUs\": 2,\n        \"RAM\": 8589934592,\n        \"Scratch\": 16000000000,\n        \"IncludedScratch\": 16000000000,\n        \"AddedScratch\": 0,\n        \"Price\": 0.11,\n        \"Preemptible\": false\n      }\n    }\n  ]\n}\n</code></pre>\n</notextile>\n\nA similar request can be made to the @http://localhost:9006/arvados/v1/dispatch/instances@ endpoint.\n\nAfter the container finishes, you can get the container record by UUID *from a shell server* to see its results:\n\n<notextile>\n<pre><code>shell:~$ <span class=\"userinput\">arv get <b>zzzzz-dz642-hdp2vpu9nq14tx0</b></span>\n{\n ...\n \"exit_code\":0,\n \"log\":\"a01df2f7e5bc1c2ad59c60a837e90dc6+166\",\n \"output\":\"d41d8cd98f00b204e9800998ecf8427e+0\",\n \"state\":\"Complete\",\n ...\n}\n</code></pre>\n</notextile>\n\nYou can use standard Keep tools to view the container's output and logs from their corresponding fields.  For example, to see the logs from the collection referenced in the @log@ field:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv keep ls <b>a01df2f7e5bc1c2ad59c60a837e90dc6+166</b></span>\n./crunch-run.txt\n./stderr.txt\n./stdout.txt\n~$ <span class=\"userinput\">arv-get <b>a01df2f7e5bc1c2ad59c60a837e90dc6+166</b>/stdout.txt</span>\n2016-08-05T13:53:06.201011Z Hello, Crunch!\n</code></pre>\n</notextile>\n\nIf the container does not dispatch successfully, refer to the @arvados-dispatch-cloud@ logs for information about why it failed.\n"
  },
  {
    "path": "doc/install/crunch2-lsf/install-dispatch.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install the LSF dispatcher\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin_warning' %}\n@arvados-dispatch-lsf@ is only relevant for on premises clusters that will spool jobs to LSF. Skip this section if you use Slurm or if you are installing a cloud cluster.\n{% include 'notebox_end' %}\n\nh2(#overview). Overview\n\nContainers can be dispatched to an LSF cluster.  The dispatcher sends work to the cluster using LSF's @bsub@ command, so it works in a variety of LSF configurations.\n\nIn order to run containers, you must choose a user that has permission to set up FUSE mounts and run Singularity/Docker containers on each compute node.  This install guide refers to this user as the @crunch@ user.  We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions.  However, you can run the dispatcher under any account with sufficient permissions across the cluster.\n\nSet up all of your compute nodes with \"Docker\":../crunch2/install-compute-node-singularity.html or \"Singularity\":../crunch2/install-compute-node-docker.html.\n\n*Current limitations*:\n* Arvados container priority is not propagated to LSF job priority. This can cause inefficient use of compute resources, and even deadlock if there are fewer compute nodes than concurrent Arvados workflows.\n* Combining LSF with docker may not work, depending on LSF configuration and user/group IDs (if LSF only sets up the configured user's primary group ID when executing the crunch-run process on a compute node, it may not have permission to connect to the docker daemon).\n\nh2(#update-config). Update config.yml\n\nArvados-dispatch-lsf reads the common configuration file at @/etc/arvados/config.yml@.\n\nAdd a DispatchLSF entry to the Services section, using the hostname where @arvados-dispatch-lsf@ will run, and an available port:\n\n<notextile>\n<pre>    Services:\n      DispatchLSF:\n        InternalURLs:\n          \"http://<code class=\"userinput\">hostname.zzzzz.arvadosapi.com:9007</code>\": {}</pre>\n</notextile>\n\nReview the following configuration parameters and adjust as needed.\n\n{% include 'hpc_max_gateway_tunnels' %}\n\nh3(#BsubSudoUser). Containers.LSF.BsubSudoUser\n\narvados-dispatch-lsf uses @sudo@ to execute @bsub@, for example @sudo -E -u crunch bsub [...]@. This means the @crunch@ account must exist on the hosts where LSF jobs run (\"execution hosts\"), as well as on the host where you are installing the Arvados LSF dispatcher (the \"submission host\"). To use a user account other than @crunch@, configure @BsubSudoUser@:\n\n<notextile>\n<pre>    Containers:\n      LSF:\n        <code class=\"userinput\">BsubSudoUser: <b>lsfuser</b>\n</code></pre>\n</notextile>\n\nAlternatively, you can arrange for the arvados-dispatch-lsf process to run as an unprivileged user that has a corresponding account on all compute nodes, and disable the use of @sudo@ by specifying an empty string:\n\n<notextile>\n<pre>    Containers:\n      LSF:\n        # Don't use sudo\n        <code class=\"userinput\">BsubSudoUser: <b>\"\"</b>\n</code></pre>\n</notextile>\n\n\nh3(#BsubArgumentsList). Containers.LSF.BsubArgumentsList\n\nWhen arvados-dispatch-lsf invokes @bsub@, you can add arguments to the command by specifying @BsubArgumentsList@.  You can use this to send the jobs to specific cluster partitions or add resource requests.  Set @BsubArgumentsList@ to an array of strings.\n\nTemplate variables starting with % will be substituted as follows:\n\n%U uuid\n%C number of VCPUs\n%M memory in MB\n%T tmp in MB\n%G number of GPU devices (@runtime_constraints.cuda.device_count@)\n%W maximum job run time in minutes, suitable for use with @-W@ or @-We@ flags (see MaxRunTimeOverhead MaxRunTimeDefault below)\n\nUse %% to express a literal %. The %%J in the default will be changed to %J, which is interpreted by @bsub@ itself.\n\nFor example:\n\n<notextile>\n<pre>    Containers:\n      LSF:\n        <code class=\"userinput\">BsubArgumentsList: <b>[\"-o\", \"/tmp/crunch-run.%%J.out\", \"-e\", \"/tmp/crunch-run.%%J.err\", \"-J\", \"%U\", \"-n\", \"%C\", \"-D\", \"%MMB\", \"-R\", \"rusage[mem=%MMB:tmp=%TMB] span[hosts=1]\", \"-R\", \"select[mem>=%MMB]\", \"-R\", \"select[tmp>=%TMB]\", \"-R\", \"select[ncpus>=%C]\", \"-We\", \"%W\"]</b></code>\n</pre>\n</notextile>\n\nNote that the default value for @BsubArgumentsList@ uses the @-o@ and @-e@ arguments to write stdout/stderr data to files in @/tmp@ on the compute nodes, which is helpful for troubleshooting installation/configuration problems. Ensure you have something in place to delete old files from @/tmp@, or adjust these arguments accordingly.\n\nh3(#BsubCUDAArguments). Containers.LSF.BsubCUDAArguments\n\nIf the container requests access to GPUs (@runtime_constraints.cuda.device_count@ of the container request is greater than zero), the command line arguments in @BsubCUDAArguments@ will be added to the command line _after_ @BsubArgumentsList@.  This should consist of the additional @bsub@ flags your site requires to schedule the job on a node with GPU support.  Set @BsubCUDAArguments@ to an array of strings.  For example:\n\n<notextile>\n<pre>    Containers:\n      LSF:\n        <code class=\"userinput\">BsubCUDAArguments: <b>[\"-gpu\", \"num=%G\"]</b></code>\n</pre>\n</notextile>\n\nh3(#MaxRunTimeOverhead). Containers.LSF.MaxRunTimeOverhead\n\nExtra time to add to each container's @scheduling_parameters.max_run_time@ value when substituting for @%W@ in @BsubArgumentsList@, to account for time spent setting up the container image, copying output files, etc.\n\nh3(#MaxRunTimeDefault). Containers.LSF.MaxRunTimeDefault\n\nDefault @max_run_time@ value to use for containers that do not specify one in @scheduling_parameters.max_run_time@. If this is zero, and @BsubArgumentsList@ contains @\"-W\", \"%W\"@ or @\"-We\", \"%W\"@, those arguments will be dropped when submitting containers that do not specify @scheduling_parameters.max_run_time@.\n\nh3(#PollInterval). Containers.PollInterval\n\narvados-dispatch-lsf polls the API server periodically for new containers to run.  The @PollInterval@ option controls how often this poll happens.  Set this to a string of numbers suffixed with one of the time units @s@, @m@, or @h@.  For example:\n\n<notextile>\n<pre>    Containers:\n      <code class=\"userinput\">PollInterval: <b>10s</b>\n</code></pre>\n</notextile>\n\n\nh3(#ReserveExtraRAM). Containers.ReserveExtraRAM: Extra RAM for jobs\n\nExtra RAM to reserve (in bytes) on each LSF job submitted by Arvados, which is added to the amount specified in the container's @runtime_constraints@.  If not provided, the default value is zero.\n\nSupports suffixes @KB@, @KiB@, @MB@, @MiB@, @GB@, @GiB@, @TB@, @TiB@, @PB@, @PiB@, @EB@, @EiB@ (where @KB@ is 10[^3^], @KiB@ is 2[^10^], @MB@ is 10[^6^], @MiB@ is 2[^20^] and so forth).\n\n<notextile>\n<pre>    Containers:\n      <code class=\"userinput\">ReserveExtraRAM: <b>256MiB</b></code>\n</pre>\n</notextile>\n\n\nh3(#CrunchRunArgumentList). Containers.CrunchRunArgumentList: Using host networking for containers\n\nOlder Linux kernels (prior to 3.18) have bugs in network namespace handling which can lead to compute node lockups.  This by is indicated by blocked kernel tasks in \"Workqueue: netns cleanup_net\".   If you are experiencing this problem, as a workaround you can disable use of network namespaces by Docker across the cluster.  Be aware this reduces container isolation, which may be a security risk.\n\n<notextile>\n<pre>    Containers:\n      <code class=\"userinput\">CrunchRunArgumentsList:\n        - <b>\"-container-enable-networking=always\"</b>\n        - <b>\"-container-network-mode=host\"</b></code>\n</pre>\n</notextile>\n\n\nh3(#InstanceTypes). InstanceTypes: Avoid submitting jobs with unsatisfiable resource constraints\n\nLSF does not provide feedback when a submitted job's RAM, CPU, or disk space constraints cannot be satisfied by any node: the job will wait in the queue indefinitely with \"pending\" status, reported by Arvados as \"queued\".\n\nAs a workaround, you can configure @InstanceTypes@ with your LSF cluster's compute node sizes. Arvados will use these sizes to determine when a container is impossible to run, and cancel it instead of submitting an LSF job.\n\nApart from detecting non-runnable containers, the configured instance types will not have any effect on scheduling.\n\n<notextile>\n<pre>    InstanceTypes:\n      most-ram:\n        VCPUs: 8\n        RAM: 640GiB\n        IncludedScratch: 640GB\n      most-cpus:\n        VCPUs: 32\n        RAM: 256GiB\n        IncludedScratch: 640GB\n      gpu:\n        VCPUs: 8\n        RAM: 256GiB\n        IncludedScratch: 640GB\n        CUDA:\n          DriverVersion: \"11.4\"\n          HardwareCapability: \"7.5\"\n          DeviceCount: 1\n</pre>\n</notextile>\n\n\n{% assign arvados_component = 'arvados-dispatch-lsf' %}\n\n{% include 'install_packages' %}\n\n{% include 'start_service' %}\n\n{% include 'restart_api' %}\n\nh2(#confirm-working). Confirm working installation\n\nOn the dispatch node, start monitoring the arvados-dispatch-lsf logs:\n\n<notextile>\n<pre><code># <span class=\"userinput\">journalctl -o cat -fu arvados-dispatch-lsf.service</span>\n</code></pre>\n</notextile>\n\nIn another terminal window, use the diagnostics tool to run a simple container.\n\n<notextile>\n<pre><code># <span class=\"userinput\">arvados-client sudo diagnostics</span>\nINFO       5: running health check (same as `arvados-server check`)\nINFO      10: getting discovery document from https://zzzzz.arvadosapi.com/discovery/v1/apis/arvados/v1/rest\n...\nINFO     160: running a container\nINFO      ... container request submitted, waiting up to 10m for container to run\n</code></pre>\n</notextile>\n\nAfter performing a number of other quick tests, this will submit a new container request and wait for it to finish.\n\nWhile the diagnostics tool is waiting, the @arvados-dispatch-lsf@ logs will show details about submitting an LSF job to run the container.\n"
  },
  {
    "path": "doc/install/crunch2-slurm/configure-slurm.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Configure Slurm\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin_warning' %}\n@crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster.\n{% include 'notebox_end' %}\n\nContainers can be dispatched to a Slurm cluster.  The dispatcher sends work to the cluster using Slurm's @sbatch@ command, so it works in a variety of Slurm configurations.\n\nIn order to run containers, you must run the dispatcher as a user that has permission to set up FUSE mounts and run Docker containers on each compute node.  This install guide refers to this user as the @crunch@ user.  We recommend you create this user on each compute node with the same UID and GID, and add it to the @fuse@ and @docker@ system groups to grant it the necessary permissions.  However, you can run the dispatcher under any account with sufficient permissions across the cluster.\n\nWe will assume that you have Slurm and munge running.\n\nh3. Sample Slurm configuration file\n\nHere's an example @slurm.conf@ for use with Arvados:\n\n<notextile>\n<pre><code>\nControlMachine=<span class=\"userinput\">ClusterID.example.com</class>\nSlurmctldPort=6817\nSlurmdPort=6818\nAuthType=auth/munge\nStateSaveLocation=/tmp\nSlurmdSpoolDir=/tmp/slurmd\nSwitchType=switch/none\nMpiDefault=none\nSlurmctldPidFile=/var/run/slurmctld.pid\nSlurmdPidFile=/var/run/slurmd.pid\nProctrackType=proctrack/pgid\nCacheGroups=0\nReturnToService=2\nTaskPlugin=task/affinity\n#\n# TIMERS\nSlurmctldTimeout=300\nSlurmdTimeout=300\nInactiveLimit=0\nMinJobAge=300\nKillWait=30\nWaittime=0\n#\n# SCHEDULING\nSchedulerType=sched/backfill\nSchedulerPort=7321\nSelectType=select/linear\nFastSchedule=0\n#\n# LOGGING\nSlurmctldDebug=3\n#SlurmctldLogFile=\nSlurmdDebug=3\n#SlurmdLogFile=\nJobCompType=jobcomp/none\n#JobCompLoc=\nJobAcctGatherType=jobacct_gather/none\n#\n# COMPUTE NODES\nNodeName=DEFAULT\nPartitionName=DEFAULT MaxTime=INFINITE State=UP\n\nNodeName=compute[0-255]\nPartitionName=compute Nodes=compute[0-255] Default=YES Shared=YES\n</code></pre>\n</notextile>\n\nh3. Slurm configuration essentials\n\nWhenever you change this file, you will need to update the copy _on every compute node_ as well as the controller node, and then run @sudo scontrol reconfigure@.\n\n*@ControlMachine@* should be a DNS name that resolves to the Slurm controller (dispatch/API server). This must resolve correctly on all Slurm worker nodes as well as the controller itself. In general Slurm is very sensitive about all of the nodes being able to communicate with the controller _and one another_, all using the same DNS names.\n\n*@SelectType=select/linear@* is needed on cloud-based installations that update node sizes dynamically, but it can only schedule one container at a time on each node. On a static or homogeneous cluster, use @SelectType=select/cons_res@ with @SelectTypeParameters=CR_CPU_Memory@ instead to enable node sharing.\n\n*@NodeName=compute[0-255]@* establishes that the hostnames of the worker nodes will be compute0, compute1, etc. through compute255.\n* There are several ways to compress sequences of names, like @compute[0-9,80,100-110]@. See the \"hostlist\" discussion in the @slurm.conf(5)@ and @scontrol(1)@ man pages for more information.\n* It is not necessary for all of the nodes listed here to be alive in order for Slurm to work, although you should make sure the DNS entries exist. It is easiest to define lots of hostnames up front, assigning them to real nodes and updating your DNS records as the nodes appear. This minimizes the frequency of @slurm.conf@ updates and use of @scontrol reconfigure@.\n\nEach hostname in @slurm.conf@ must also resolve correctly on all Slurm worker nodes as well as the controller itself. Furthermore, the hostnames used in the configuration file must match the hostnames reported by @hostname@ or @hostname -s@ on the nodes themselves. This applies to the ControlMachine as well as the worker nodes.\n\nFor example:\n* In @slurm.conf@ on control and worker nodes: @ControlMachine=ClusterID.example.com@\n* In @slurm.conf@ on control and worker nodes: @NodeName=compute[0-255]@\n* In @/etc/resolv.conf@ on control and worker nodes: @search ClusterID.example.com@\n* On the control node: @hostname@ reports @ClusterID.example.com@\n* On worker node 123: @hostname@ reports @compute123.ClusterID.example.com@\n\nh3. Automatic hostname assignment\n\nThe API server will choose an unused hostname from the set given in @application.yml@, which defaults to @compute[0-255]@.\n\nIf it is not feasible to give your compute nodes hostnames like compute0, compute1, etc., you can accommodate other naming schemes with a bit of extra configuration.\n\nIf you want Arvados to assign names to your nodes with a different consecutive numeric series like @{worker1-0000, worker1-0001, worker1-0002}@, add an entry to @application.yml@; see @/var/www/arvados-api/current/config/application.default.yml@ for details. Example:\n* In @application.yml@: <code>assign_node_hostname: worker1-%<slot_number>04d</code>\n* In @slurm.conf@: <code>NodeName=worker1-[0000-0255]</code>\n\nIf your worker hostnames are already assigned by other means, and the full set of names is known in advance, have your worker node bootstrapping script send its current hostname, rather than expect Arvados to assign one.\n* In @application.yml@: <code>assign_node_hostname: false</code>\n* In @slurm.conf@: <code>NodeName=alice,bob,clay,darlene</code>\n\nIf your worker hostnames are already assigned by other means, but the full set of names is _not_ known in advance, you can use the @slurm.conf@ and @application.yml@ settings in the previous example, but you must also update @slurm.conf@ (both on the controller and on all worker nodes) and run @sudo scontrol reconfigure@ whenever a new node comes online.\n"
  },
  {
    "path": "doc/install/crunch2-slurm/install-dispatch.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install the Slurm dispatcher\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin_warning' %}\n@crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster.\n{% include 'notebox_end' %}\n\n# \"Introduction\":#introduction\n# \"Update config.yml\":#update-config\n# \"Install crunch-dispatch-slurm\":#install-packages\n# \"Start the service\":#start-service\n# \"Restart the API server and controller\":#restart-api\n\nh2(#introduction). Introduction\n\nThis assumes you already have a Slurm cluster, and have set up all of your compute nodes with \"Docker\":../crunch2/install-compute-node-docker.html or \"Singularity\":../crunch2/install-compute-node-singularity.html.  Slurm packages are available on all distributions supported by Arvados. Please see your distribution package repositories. For information on installing Slurm from source, see \"this install guide\":https://slurm.schedmd.com/quickstart_admin.html\n\nThe Arvados Slurm dispatcher can run on any node that can submit requests to both the Arvados API server and the Slurm controller (via @sbatch@).  It is not resource-intensive, so you can run it on the API server node.\n\nh2(#update-config). Update config.yml\n\nCrunch-dispatch-slurm reads the common configuration file at @/etc/arvados/config.yml@.\n\nAdd a DispatchSLURM entry to the Services section, using the hostname where @crunch-dispatch-slurm@ will run, and an available port:\n\n<notextile>\n<pre>    Services:\n      DispatchSLURM:\n        InternalURLs:\n          \"http://<code class=\"userinput\">hostname.zzzzz.arvadosapi.com:9007</code>\": {}</pre>\n</notextile>\n\nThe following configuration parameters are optional.\n\n{% include 'hpc_max_gateway_tunnels' %}\n\nh3(#PollPeriod). Containers.PollInterval\n\ncrunch-dispatch-slurm polls the API server periodically for new containers to run.  The @PollInterval@ option controls how often this poll happens.  Set this to a string of numbers suffixed with one of the time units @ns@, @us@, @ms@, @s@, @m@, or @h@.  For example:\n\n<notextile>\n<pre>    Containers:\n      <code class=\"userinput\">PollInterval: <b>3m30s</b>\n</code></pre>\n</notextile>\n\nh3(#ReserveExtraRAM). Containers.ReserveExtraRAM: Extra RAM for jobs\n\nExtra RAM to reserve (in bytes) on each Slurm job submitted by Arvados, which is added to the amount specified in the container's @runtime_constraints@.  If not provided, the default value is zero.  Helpful when using @-cgroup-parent-subsystem@, where @crunch-run@ and @arv-mount@ share the control group memory limit with the user process.  In this situation, at least 256MiB is recommended to accommodate each container's @crunch-run@ and @arv-mount@ processes.\n\nSupports suffixes @KB@, @KiB@, @MB@, @MiB@, @GB@, @GiB@, @TB@, @TiB@, @PB@, @PiB@, @EB@, @EiB@ (where @KB@ is 10[^3^], @KiB@ is 2[^10^], @MB@ is 10[^6^], @MiB@ is 2[^20^] and so forth).\n\n<notextile>\n<pre>    Containers:\n      <code class=\"userinput\">ReserveExtraRAM: <b>256MiB</b></code>\n</pre>\n</notextile>\n\nh3(#MinRetryPeriod). Containers.MinRetryPeriod: Rate-limit repeated attempts to start containers\n\nIf Slurm is unable to run a container, the dispatcher will submit it again after the next PollPeriod. If PollPeriod is very short, this can be excessive. If MinRetryPeriod is set, the dispatcher will avoid submitting the same container to Slurm more than once in the given time span.\n\n<notextile>\n<pre>    Containers:\n      <code class=\"userinput\">MinRetryPeriod: <b>30s</b></code>\n</pre>\n</notextile>\n\nh3(#KeepServiceURIs). Containers.Slurm.SbatchEnvironmentVariables\n\nSome Arvados installations run a local keepstore on each compute node to handle all Keep traffic.  To override Keep service discovery and access the local keep server instead of the global servers, set ARVADOS_KEEP_SERVICES in SbatchEnvironmentVariables:\n\n<notextile>\n<pre>    Containers:\n      SLURM:\n        <span class=\"userinput\">SbatchEnvironmentVariables:\n          ARVADOS_KEEP_SERVICES: \"http://127.0.0.1:25107\"</span>\n</code></pre>\n</notextile>\n\nh3(#PrioritySpread). Containers.Slurm.PrioritySpread\n\ncrunch-dispatch-slurm adjusts the \"nice\" values of its Slurm jobs to ensure containers are prioritized correctly relative to one another. This option tunes the adjustment mechanism.\n* If non-Arvados jobs run on your Slurm cluster, and your Arvados containers are waiting too long in the Slurm queue because their \"nice\" values are too high for them to compete with other Slurm jobs, you should use a smaller PrioritySpread value.\n* If you have an older Slurm system that limits nice values to 10000, a smaller @PrioritySpread@ can help avoid reaching that limit.\n* In other cases, a larger value is beneficial because it reduces the total number of adjustments made by executing @scontrol@.\n\nThe smallest usable value is @1@. The default value of @10@ is used if this option is zero or negative. Example:\n\n<notextile>\n<pre>    Containers:\n      SLURM:\n        <code class=\"userinput\">PrioritySpread: <b>1000</b></code></pre>\n</notextile>\n\nh3(#SbatchArguments). Containers.Slurm.SbatchArgumentsList\n\nWhen crunch-dispatch-slurm invokes @sbatch@, you can add arguments to the command by specifying @SbatchArguments@.  You can use this to send the jobs to specific cluster partitions or add resource requests.  Set @SbatchArguments@ to an array of strings.  For example:\n\n<notextile>\n<pre>    Containers:\n      SLURM:\n        <code class=\"userinput\">SbatchArgumentsList:\n          - <b>\"--partition=PartitionName\"</b></code>\n</pre>\n</notextile>\n\nNote: If an argument is supplied multiple times, @slurm@ uses the value of the last occurrence of the argument on the command line.  Arguments specified through Arvados are added after the arguments listed in SbatchArguments.  This means, for example, an Arvados container with that specifies @partitions@ in @scheduling_parameter@ will override an occurrence of @--partition@ in SbatchArguments.  As a result, for container parameters that can be specified through Arvados, SbatchArguments can be used to specify defaults but not enforce specific policy.\n\nh3(#CrunchRunCommand-cgroups). Containers.CrunchRunArgumentList: Dispatch to Slurm cgroups\n\nIf your Slurm cluster uses the @task/cgroup@ TaskPlugin, you can configure Crunch's Docker containers to be dispatched inside Slurm's cgroups.  This provides consistent enforcement of resource constraints.  To do this, use a crunch-dispatch-slurm configuration like the following:\n\n<notextile>\n<pre>    Containers:\n      <code class=\"userinput\">CrunchRunArgumentsList:\n        - <b>\"-cgroup-parent-subsystem=memory\"</b></code>\n</pre>\n</notextile>\n\nWhen using cgroups v1, the choice of subsystem (\"memory\" in this example) must correspond to one of the resource types enabled in Slurm's @cgroup.conf@.  The specified subsystem is singled out only to let Crunch determine the name of the cgroup provided by Slurm.  Limits for other resource types will also be respected.\n\nWhen doing this, you should also set \"ReserveExtraRAM\":#ReserveExtraRAM .\n\n{% include 'notebox_begin' %}\n\nSome versions of Docker (at least 1.9), when run under systemd, require the cgroup parent to be specified as a systemd slice.  This causes an error when specifying a cgroup parent created outside systemd, such as those created by Slurm.\n\nYou can work around this issue by disabling the Docker daemon's systemd integration.  This makes it more difficult to manage Docker services with systemd, but Crunch does not require that functionality, and it will be able to use Slurm's cgroups as container parents.  To do this, configure the Docker daemon on all compute nodes to run with the option @--exec-opt native.cgroupdriver=cgroupfs@.\n\n{% include 'notebox_end' %}\n\nh3(#CrunchRunCommand-network). Containers.CrunchRunArgumentList: Using host networking for containers\n\nOlder Linux kernels (prior to 3.18) have bugs in network namespace handling which can lead to compute node lockups.  This by is indicated by blocked kernel tasks in \"Workqueue: netns cleanup_net\".   If you are experiencing this problem, as a workaround you can disable use of network namespaces by Docker across the cluster.  Be aware this reduces container isolation, which may be a security risk.\n\n<notextile>\n<pre>    Containers:\n      <code class=\"userinput\">CrunchRunArgumentsList:\n        - <b>\"-container-enable-networking=always\"</b>\n        - <b>\"-container-network-mode=host\"</b></code>\n</pre>\n</notextile>\n\n{% assign arvados_component = 'crunch-dispatch-slurm' %}\n\n{% include 'install_packages' %}\n\n{% include 'start_service' %}\n\n{% include 'restart_api' %}\n"
  },
  {
    "path": "doc/install/crunch2-slurm/install-test.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Test Slurm dispatch\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin_warning' %}\n@crunch-dispatch-slurm@ is only relevant for on premises clusters that will spool jobs to Slurm. Skip this section if you use LSF or if you are installing a cloud cluster.\n{% include 'notebox_end' %}\n\nh2. Test compute node setup\n\nYou should now be able to submit Slurm jobs that run in Docker containers.  On the node where you're running the dispatcher, you can test this by running:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">sudo -u <b>crunch</b> srun -N1 docker run busybox echo OK\n</code></pre>\n</notextile>\n\nIf it works, this command should print @OK@ (it may also show some status messages from Slurm and/or Docker).  If it does not print @OK@, double-check your compute node setup, and that the @crunch@ user can submit Slurm jobs.\n\nh2. Test the dispatcher\n\nMake sure all of your compute nodes are set up with \"Docker\":../crunch2/install-compute-node-docker.html or \"Singularity\":../crunch2/install-compute-node-singularity.html.\n\nOn the dispatch node, start monitoring the crunch-dispatch-slurm logs:\n\n<notextile>\n<pre><code># <span class=\"userinput\">journalctl -o cat -fu crunch-dispatch-slurm.service</span>\n</code></pre>\n</notextile>\n\nIn another terminal window, use the diagnostics tool to run a simple container.\n\n<notextile>\n<pre><code># <span class=\"userinput\">arvados-client sudo diagnostics</span>\nINFO       5: running health check (same as `arvados-server check`)\nINFO      10: getting discovery document from https://zzzzz.arvadosapi.com/discovery/v1/apis/arvados/v1/rest\n...\nINFO     160: running a container\nINFO      ... container request submitted, waiting up to 10m for container to run\n</code></pre>\n</notextile>\n\nOnce @crunch-dispatch-slurm@ polls the API server for new containers to run, you should see it dispatch the new container.  It will log messages like:\n\n<notextile>\n<pre><code>2016/08/05 13:52:54 Monitoring container zzzzz-dz642-hdp2vpu9nq14tx0 started\n2016/08/05 13:53:04 About to submit queued container zzzzz-dz642-hdp2vpu9nq14tx0\n2016/08/05 13:53:04 sbatch succeeded: Submitted batch job 8102\n</code></pre>\n</notextile>\n\nBefore the container finishes, Slurm's @squeue@ command will show the new job in the list of queued and running jobs.  For example, you might see:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">squeue --long</span>\nFri Aug  5 13:57:50 2016\n  JOBID PARTITION     NAME     USER    STATE       TIME TIMELIMIT  NODES NODELIST(REASON)\n   8103   compute zzzzz-dz   crunch  RUNNING       1:56 UNLIMITED      1 compute0\n</code></pre>\n</notextile>\n\nThe job's name corresponds to the container's UUID.  You can get more information about it by running, e.g., <notextile><code>scontrol show job Name=<b>UUID</b></code></notextile>.\n\nWhen the container finishes, the dispatcher will log that, with the final result:\n\n<notextile>\n<pre><code>2016/08/05 13:53:14 Container zzzzz-dz642-hdp2vpu9nq14tx0 now in state \"Complete\" with locked_by_uuid \"\"\n2016/08/05 13:53:14 Monitoring container zzzzz-dz642-hdp2vpu9nq14tx0 finished\n</code></pre>\n</notextile>\n\nAfter the container finishes, you can get the container record by UUID *from a shell server* to see its results:\n\n<notextile>\n<pre><code>shell:~$ <span class=\"userinput\">arv get <b>zzzzz-dz642-hdp2vpu9nq14tx0</b></span>\n{\n ...\n \"exit_code\":0,\n \"log\":\"a01df2f7e5bc1c2ad59c60a837e90dc6+166\",\n \"output\":\"d41d8cd98f00b204e9800998ecf8427e+0\",\n \"state\":\"Complete\",\n ...\n}\n</code></pre>\n</notextile>\n\nYou can use standard Keep tools to view the container's output and logs from their corresponding fields.  For example, to see the logs from the collection referenced in the @log@ field:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv keep ls <b>a01df2f7e5bc1c2ad59c60a837e90dc6+166</b></span>\n./crunch-run.txt\n./stderr.txt\n./stdout.txt\n~$ <span class=\"userinput\">arv-get <b>a01df2f7e5bc1c2ad59c60a837e90dc6+166</b>/stdout.txt</span>\n2016-08-05T13:53:06.201011Z Hello, Crunch!\n</code></pre>\n</notextile>\n\nIf the container does not dispatch successfully, refer to the @crunch-dispatch-slurm@ logs for information about why it failed.\n"
  },
  {
    "path": "doc/install/diagnostics.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Cluster diagnostics tool\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe @diagnostics@ subcommand of @arvados-client@ performs a variety of checks to help confirm that your Arvados installation has been properly configured.  It is extremely helpful to validate that your install is successful.\n\nDepending on where you are running the installer, you need to provide @-internal-client@ or @-external-client@.\n\n* If you are running the diagnostics from one of the Arvados machines inside the private network, you want @-internal-client@.\n* If you running the diagnostics from your workstation outside of the private network, you should use @-external-client@.\n\nHere is an example of it in action:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">ARVADOS_API_HOST=ClusterID.example.com ARVADOS_API_TOKEN=YourSystemTokenHere arvados-client diagnostics -external-client</span>\nINFO      10: getting discovery document from https://ClusterID.example.com/discovery/v1/apis/arvados/v1/rest\nINFO      20: getting exported config from https://ClusterID.example.com/arvados/v1/config\nINFO      30: getting current user record\nINFO      40: connecting to service endpoint https://keep.ClusterID.example.com/\nINFO      41: connecting to service endpoint https://*.collections.ClusterID.example.com/\nINFO      42: connecting to service endpoint https://download.ClusterID.example.com/\nINFO      43: connecting to service endpoint wss://ws.ClusterID.example.com/websocket\nINFO      44: connecting to service endpoint https://workbench.ClusterID.example.com/\nINFO      45: connecting to service endpoint https://workbench2.ClusterID.example.com/\nINFO      50: checking CORS headers at https://ClusterID.example.com/\nINFO      51: checking CORS headers at https://keep.ClusterID.example.com/d41d8cd98f00b204e9800998ecf8427e+0\nINFO      52: checking CORS headers at https://download.ClusterID.example.com/\nINFO      60: checking internal/external client detection\nINFO      61: reading+writing via keep service at https://keep.ClusterID.example.com:443/\nINFO      80: finding/creating \"scratch area for diagnostics\" project\nINFO      90: creating temporary collection\nINFO     100: uploading file via webdav\nINFO     110: checking WebDAV ExternalURL wildcard (https://*.collections.ClusterID.example.com/)\nINFO     120: downloading from webdav (https://d41d8cd98f00b204e9800998ecf8427e-0.collections.ClusterID.example.com/foo)\nINFO     121: downloading from webdav (https://d41d8cd98f00b204e9800998ecf8427e-0.collections.ClusterID.example.com/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)\nINFO     122: downloading from webdav (https://download.ClusterID.example.com/c=d41d8cd98f00b204e9800998ecf8427e+0/_/foo)\nINFO     123: downloading from webdav (https://download.ClusterID.example.com/c=d41d8cd98f00b204e9800998ecf8427e+0/_/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)\nINFO     124: downloading from webdav (https://a15a27cbc1c7d2d4a0d9e02529aaec7e-128.collections.ClusterID.example.com/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)\nINFO     125: downloading from webdav (https://download.ClusterID.example.com/c=ce8i5-4zz18-bkfvq2skqqf78xd/_/sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412.tar)\nINFO     130: getting list of virtual machines\nINFO     140: getting workbench1 webshell page\nINFO     150: connecting to webshell service\nINFO     160: running a container\nINFO      ... container request submitted, waiting up to 10m for container to run\nINFO    9990: deleting temporary collection\nINFO    --- no errors ---\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/install/index.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Installation options\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin' %}\nThis section is about installing an Arvados cluster.  If you are just looking to install Arvados client tools and libraries, \"go to the SDK section.\":{{site.baseurl}}/sdk/\n{% include 'notebox_end' %}\n\nArvados components run on supported GNU/Linux distributions. The Arvados elastic compute management layer supports Amazon Web Services (AWS) and Microsoft Azure cloud platforms as well as on-premises installs using SLURM and IBM Spectrum LSF.  The Arvados storage layer supports filesystem storage (including NFS, such as IBM GPFS), Azure blob storage, Amazon S3, and systems that offer an S3-compatible API such as Ceph Object Gateway and NetApp StorageGRID.\n\n\"Arvados is Free Software\":{{site.baseurl}}/user/copying/copying.html and self-install installations are not limited in any way.  Commercial support and development are also available from \"Curii Corporation.\":https://www.curii.com/\n\nArvados components can be installed and configured in a number of different ways.\n\n<div class=\"offset1\">\ntable(table table-bordered table-condensed).\n||_. Setup difficulty|_. Arvados Evaluation|_. Development|_. Production Data Management|_. Production Workflows|\n|\"Single-host install\":install-single-host.html|Easy|yes|limited|limited|limited|\n|\"Multi-host install\":install-multi-host.html|Moderate|yes|yes|yes|yes|\n|\"Manual installation\":install-manual-prerequisites.html|Difficult|yes|yes|yes|yes|\n|\"Cluster Operation Subscription supported by Curii\":https://curii.com|N/A ^1^|yes|yes|yes|yes|\n</div>\n\n^1^ No user installation necessary.  Curii engineers will install and configure Arvados in your own infrastructure.\n"
  },
  {
    "path": "doc/install/install-api-server.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install API server and Controller\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Introduction\":#introduction\n# \"Install dependencies\":#dependencies\n# \"Set up database\":#database-setup\n# \"Update config.yml\":#update-config\n# \"Update nginx configuration\":#update-nginx\n# \"Install arvados-api-server and arvados-controller\":#install-packages\n# \"Confirm working installation\":#confirm-working\n\nh2(#introduction). Introduction\n\nThe Arvados core API server consists of four services: PostgreSQL, Arvados Rails API, Arvados Controller, and Nginx.\n\nHere is a simplified diagram showing the relationship between the core services.  Client requests arrive at the public-facing Nginx reverse proxy.  The request is forwarded to Arvados controller.  The controller is able handle some requests itself, the rest are forwarded to the Arvados Rails API.  The Rails API server implements the majority of business logic, communicating with the PostgreSQL database to fetch data and make transactional updates.  All services are stateless, except the PostgreSQL database.  This guide assumes all of these services will be installed on the same node, but it is possible to install these services across multiple nodes.\n\n!(full-width){{site.baseurl}}/images/proxy-chain.svg!\n\nh2(#dependencies). Install dependencies\n\n# \"Install PostgreSQL\":install-postgresql.html\n# \"Install nginx\":nginx.html\n\nh2(#database-setup). Set up database\n\n{% assign service_role = \"arvados\" %}\n{% assign service_database = \"arvados_production\" %}\n{% assign use_contrib = true %}\n{% include 'install_postgres_database' %}\n\nh2(#update-config). Update config.yml\n\nStarting from an \"empty config.yml file,\":config.html#empty add the following configuration keys.\n\nh3. Tokens\n\n<notextile>\n<pre><code>    SystemRootToken: <span class=\"userinput\">\"$system_root_token\"</span>\n    ManagementToken: <span class=\"userinput\">\"$management_token\"</span>\n    Collections:\n      BlobSigningKey: <span class=\"userinput\">\"$blob_signing_key\"</span>\n</code></pre>\n</notextile>\n\nThese secret tokens are used to authenticate messages between Arvados components.\n* @SystemRootToken@ is used by Arvados system services to authenticate as the system (root) user when communicating with the API server.\n* @ManagementToken@ is used to authenticate access to system metrics.\n* @Collections.BlobSigningKey@ is used to control access to Keep blocks.\n\nEach token should be a string of at least 50 alphanumeric characters. You can generate a suitable token with the following command:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">tr -dc 0-9a-zA-Z &lt;/dev/urandom | head -c50 ; echo</span>\n</code></pre>\n</notextile>\n\nh3. PostgreSQL.Connection\n\n<notextile>\n<pre><code>    PostgreSQL:\n      Connection:\n        host: <span class=\"userinput\">localhost</span>\n        user: <span class=\"userinput\">arvados</span>\n        password: <span class=\"userinput\">$postgres_password</span>\n        dbname: <span class=\"userinput\">arvados_production</span>\n</code></pre>\n</notextile>\n\nReplace the @$postgres_password@ placeholder with the password you generated during \"database setup\":#database-setup.\n\nh3. Services\n\n<notextile>\n<pre><code>    Services:\n      Controller:\n        ExternalURL: <span class=\"userinput\">\"https://ClusterID.example.com\"</span>\n        InternalURLs:\n          <span class=\"userinput\">\"http://localhost:8003\": {}</span>\n      RailsAPI:\n        # Does not have an ExternalURL\n        InternalURLs:\n          <span class=\"userinput\">\"http://localhost:8004\": {}</span>\n      ContainerWebServices:\n        # Does not have InternalURLs\n        ExternalURL: <span class=\"userinput\">\"https://*.containers.ClusterID.example.com\"</span>\n</code></pre>\n</notextile>\n\nReplace @ClusterID.example.com@ with the hostname that you previously selected for the API server.\n\nThe @Services@ section of the configuration helps Arvados components contact one another (service discovery).  Each service has one or more @InternalURLs@ and an @ExternalURL@.  The @InternalURLs@ describe where the service runs, and how the Nginx reverse proxy will connect to it.  The @ExternalURL@ is how external clients contact the service.\n\nh2(#update-nginx). Update nginx configuration\n\nUse a text editor to create a new file @/etc/nginx/conf.d/arvados-controller.conf@ with the following configuration.  Options that need attention are marked in <span class=\"userinput\">red</span>.\n\n<notextile>\n<pre><code>proxy_http_version 1.1;\n\n# When Keep clients request a list of Keep services from the API\n# server, use the origin IP address to determine if the request came\n# from the internal subnet or it is an external client.  This sets the\n# $external_client variable which in turn is used to set the\n# X-External-Client header.\n#\n# The API server uses this header to choose whether to respond to a\n# \"available keep services\" request with either a list of internal keep\n# servers (0) or with the keepproxy (1).\n#\n# <span class=\"userinput\">Following the example here, update the 10.20.30.0/24 netmask</span>\n# <span class=\"userinput\">to match your private subnet.</span>\n# <span class=\"userinput\">Update 1.2.3.4 and add lines as necessary with the public IP</span>\n# <span class=\"userinput\">address of all servers that can also access the private network to</span>\n# <span class=\"userinput\">ensure they are not considered 'external'.</span>\n\ngeo $external_client {\n  default        1;\n  127.0.0.0/24   0;\n  <span class=\"userinput\">10.20.30.0/24</span>  0;\n  <span class=\"userinput\">1.2.3.4/32</span>     0;\n}\n\n# This is the port where nginx expects to contact arvados-controller.\nupstream controller {\n  server     localhost:8003  fail_timeout=10s;\n}\n\nserver {\n  # This configures the public https port that clients will actually connect to,\n  # the request is reverse proxied to the upstream 'controller'\n\n  listen       443 ssl;\n  server_name  <span class=\"userinput\">ClusterID.example.com</span>\n               *.<span class=\"userinput\">containers.ClusterID.example.com</span>;\n\n  ## If a wildcard name like <span class=\"userinput\">*.containers.ClusterID.example.com</span> is not\n  ## available, and Services.ContainerWebServices.ExternalPortMin and\n  ## ExternalPortMax are configured instead, then the \"listen\" and\n  ## \"server_name\" directives should be adjusted accordingly.  Example:\n  #\n  # listen       443 ssl;\n  # listen       2000-2999 ssl;\n  # server_name  <span class=\"userinput\">ClusterID.example.com</span>\n  #              <span class=\"userinput\">containers.ClusterID.example.com</span>;\n  #\n  ## The number of ports in the range (1000 in this example) should be\n  ## added to the worker_connections setting in the events section of\n  ## your Nginx configuration (default 512).  If the system-supplied\n  ## RLIMIT_NOFILE value is low (some systems default to 1024), the\n  ## worker_rlimit_nofile setting in the main section should also be\n  ## increased by the same amount.\n  #\n  # events { worker_connections: 1512; }\n  # worker_rlimit_nofile: 2024;\n\n  ssl_certificate     <span class=\"userinput\">/YOUR/PATH/TO/cert.pem</span>;\n  ssl_certificate_key <span class=\"userinput\">/YOUR/PATH/TO/cert.key</span>;\n\n  # Refer to the comment about this setting in the passenger (arvados\n  # api server) section of your Nginx configuration.\n  client_max_body_size 128m;\n\n  location / {\n    proxy_pass               http://controller;\n    proxy_redirect           off;\n    proxy_connect_timeout    90s;\n    proxy_read_timeout       300s;\n    proxy_max_temp_file_size 0;\n    proxy_request_buffering  off;\n    proxy_buffering          off;\n    proxy_http_version       1.1;\n\n    proxy_set_header      Host              $http_host;\n    proxy_set_header      Upgrade           $http_upgrade;\n    proxy_set_header      Connection        \"upgrade\";\n    proxy_set_header      X-External-Client $external_client;\n    proxy_set_header      X-Forwarded-For   $proxy_add_x_forwarded_for;\n    proxy_set_header      X-Forwarded-Proto https;\n    proxy_set_header      X-Real-IP         $remote_addr;\n  }\n}\n</code></pre>\n</notextile>\n\nh2(#install-packages). Install arvados-api-server and arvados-controller\n\nh3. Red Hat, AlmaLinux, and Rocky Linux 8\n\n<notextile>\n<pre><code># <span class=\"userinput\">dnf install --enablerepo=powertools arvados-api-server arvados-controller</span>\n</code></pre>\n</notextile>\n\nh3. Red Hat, AlmaLinux, and Rocky Linux 9 or 10\n\n<notextile>\n<pre><code># <span class=\"userinput\">dnf install --enablerepo=devel arvados-api-server arvados-controller</span>\n</code></pre>\n</notextile>\n\nh3. Debian and Ubuntu\n\n<notextile>\n<pre><code># <span class=\"userinput\">apt install arvados-api-server arvados-controller</span>\n</code></pre>\n</notextile>\n\nh3(#railsapi-config). Configure Rails API server\n\nBy default, the Rails API server is configured to listen on @localhost:8004@, matching the example cluster configuration above. If you need to change this, edit the @arvados-railsapi.service@ definition to redefine the @PASSENGER_ADDRESS@ and @PASSENGER_PORT@ environment variables, like this:\n\n<notextile>\n<pre><code># <span class=\"userinput\">systemctl edit arvados-railsapi.service</span>\n### Editing /etc/systemd/system/arvados-railsapi.service.d/override.conf\n### Anything between here and the comment below will become the new contents of the file\n<span class=\"userinput\">[Service]\nEnvironment=PASSENGER_ADDRESS=<strong>0.0.0.0</strong>\nEnvironment=PASSENGER_PORT=<strong>8040</strong></span>\n### Lines below this comment will be discarded\n[...]\n</code></pre>\n</notextile>\n\nYou can similarly define other Passenger settings if desired. The \"Passenger Standalone reference\":https://www.phusionpassenger.com/library/config/standalone/reference/ documents all the available settings.\n\n{% assign arvados_component = 'arvados-railsapi arvados-controller' %}\n\n{% include 'start_service' %}\n\nh2(#confirm-working). Confirm working installation\n\nWe recommend using the \"Cluster diagnostics tool.\":diagnostics.html  The first few tests (10, 20, 30) will succeed if you have a working API server and controller.  Of course, tests for services that you have not yet installed and configured will fail.\n\nHere are some other checks you can perform manually.\n\nh3. Confirm working controller\n\n<notextile><pre><code>$ curl https://<span class=\"userinput\">ClusterID.example.com</span>/arvados/v1/config\n</code></pre></notextile>\n\nh3. Confirm working Rails API server\n\n<notextile><pre><code>$ curl https://<span class=\"userinput\">ClusterID.example.com</span>/discovery/v1/apis/arvados/v1/rest\n</code></pre></notextile>\n\nh3. Confirm that you can use the system root token to act as the system root user\n\n<notextile><pre><code>$ curl -H \"Authorization: Bearer $system_root_token\" https://<span class=\"userinput\">ClusterID.example.com</span>/arvados/v1/users/current\n</code></pre></notextile>\n\nh3. Troubleshooting\n\nIf you are getting TLS errors, make sure the @ssl_certificate@ directive in your nginx configuration has the \"full certificate chain\":http://nginx.org/en/docs/http/configuring_https_servers.html#chains.\n\nLogs can be found in @/var/www/arvados-api/current/log/production.log@ and using @journalctl -u arvados-controller@. See also the admin page on \"Logging\":{{site.baseurl}}/admin/logging.html.\n"
  },
  {
    "path": "doc/install/install-docker.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Set up Docker\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'install_compute_docker' %}\n"
  },
  {
    "path": "doc/install/install-keep-balance.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install Keep-balance\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Introduction\":#introduction\n# \"Update config.yml\":#update-config\n# \"Install keep-balance package\":#install-packages\n# \"Start the service\":#start-service\n\nh2(#introduction). Introduction\n\nKeep-balance deletes unreferenced and overreplicated blocks from Keep servers, makes additional copies of underreplicated blocks, and moves blocks into optimal locations as needed (e.g., after adding new servers). See \"Balancing Keep servers\":{{site.baseurl}}/admin/keep-balance.html for usage details.\n\nKeep-balance can be installed anywhere with network access to Keep services, arvados-controller, and PostgreSQL. Typically it runs on the same host as keepproxy.\n\n*A cluster should have only one instance of keep-balance running at a time.*\n\n{% include 'notebox_begin' %}\n\nIf you are installing keep-balance on an existing system with valuable data, you can run keep-balance in \"dry run\" mode first and review its logs as a precaution. To do this, set the @Collections.BalancePullLimit@ and @Collections.BalanceTrashLimit@ configuration entries to zero.\n\n{% include 'notebox_end' %}\n\nh2(#update-config). Update the cluster config\n\nEdit the cluster config at @config.yml@ and set @Services.Keepbalance.InternalURLs@.  This port is only used to publish metrics.\n\n<notextile>\n<pre><code>    Services:\n      Keepbalance:\n        InternalURLs:\n          \"http://<span class=\"userinput\">keep.ClusterID.example.com</span>:9005/\": {}\n</code></pre>\n</notextile>\n\nEnsure your cluster configuration has @Collections.BlobTrash: true@ (this is the default).\n\n<notextile>\n<pre><code># arvados-server config-dump | grep BlobTrash:\n      BlobTrash: true\n</code></pre>\n</notextile>\n\nIf BlobTrash is false, unneeded blocks will be counted and logged by keep-balance, but they will not be deleted.\n\n{% assign arvados_component = 'keep-balance' %}\n\n{% include 'install_packages' %}\n\n{% include 'start_service' %}\n"
  },
  {
    "path": "doc/install/install-keep-web.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install Keep-web server\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Introduction\":#introduction\n# \"Configure DNS\":#introduction\n# \"Configure anonymous user token\":#update-config\n# \"Update nginx configuration\":#update-nginx\n# \"Install keep-web package\":#install-packages\n# \"Start the service\":#start-service\n# \"Restart the API server and controller\":#restart-api\n# \"Confirm working installation\":#confirm-working\n\nh2(#introduction). Introduction\n\nThe Keep-web server provides read/write access to files stored in Keep using WebDAV and S3 protocols.  This makes it easy to access files in Keep from a browser, or mount Keep as a network folder using WebDAV support in various operating systems. It serves public data to unauthenticated clients, and serves private data to clients that supply Arvados API tokens. It can be installed anywhere with access to Keep services, controller, and the PostgreSQL server. It is typically installed behind a web proxy that provides TLS support. See the \"godoc page\":https://pkg.go.dev/git.arvados.org/arvados.git/services/keep-web for more detail.\n\nh2(#dns). Configure DNS\n\nIt is important to properly configure the keep-web service to so it does not open up cross-site-scripting (XSS) attacks.  A HTML file can be stored in collection.  If an attacker causes a victim to visit that HTML file through Workbench, it will be rendered by the browser.  If all collections are served at the same domain, the browser will consider collections as coming from the same origin and thus have access to the same browsing data (such as API token), enabling malicious Javascript in the HTML file to access Arvados as the victim.\n\nThere are two approaches to mitigate this.\n\n# The service can tell the browser that all files should go to download instead of in-browser preview, except in situations where an attacker is unlikely to be able to gain access to anything they didn't already have access to.\n# Each collection served by @keep-web@ is served on its own virtual host.  This allows for file with executable content to be displayed in-browser securely.  The virtual host embeds the collection uuid or portable data hash in the hostname.  For example, a collection with uuid @xxxxx-4zz18-tci4vn4fa95w0zx@ could be served as @xxxxx-4zz18-tci4vn4fa95w0zx.collections.ClusterID.example.com@ .  The portable data hash @dd755dbc8d49a67f4fe7dc843e4f10a6+54@ could be served at @dd755dbc8d49a67f4fe7dc843e4f10a6-54.collections.ClusterID.example.com@ .  This requires \"wildcard DNS record\":https://en.wikipedia.org/wiki/Wildcard_DNS_record and \"wildcard TLS certificate.\":https://en.wikipedia.org/wiki/Wildcard_certificate\n\nh3. Collections download URL\n\nDownloads links will served from the URL in @Services.WebDAVDownload.ExternalURL@ .  The collection uuid or PDH is put in the URL path.\n\nIf blank, serve links to WebDAV with @disposition=attachment@ query param.  Unlike preview links, browsers do not render attachments, so there is no risk of XSS.\n\nIf @WebDAVDownload@ is blank, and @WebDAV@ has a single origin (not wildcard, see below), then Workbench will show an error page\n\n<notextile>\n<pre><code>    Services:\n      WebDAVDownload:\n        ExternalURL: https://<span class=\"userinput\">download.ClusterID.example.com</span>\n</code></pre>\n</notextile>\n\nh3. Collections preview URL\n\nCollections will be served using the URL pattern in @Services.WebDAV.ExternalURL@ .  If blank, use @Services.WebDAVDownload.ExternalURL@ instead, and disable inline preview.  If both are empty, downloading collections from workbench will be impossible.  When wildcard domains configured, credentials are still required to access non-public data.\n\nh4. In their own subdomain\n\nCollections can be served from their own subdomain:\n\n<notextile>\n<pre><code>    Services:\n      WebDAV:\n        ExternalURL: https://<span class=\"userinput\">*.collections.ClusterID.example.com/</span>\n</code></pre>\n</notextile>\n\nThis option is preferred if you plan to access Keep using third-party S3 client software, because it accommodates S3 virtual host-style requests and path-style requests without any special client configuration.\n\nh4. Under the main domain\n\nAlternately, they can go under the main domain by including @--@:\n\n<notextile>\n<pre><code>    Services:\n      WebDAV:\n        ExternalURL: https://<span class=\"userinput\">*--collections.ClusterID.example.com/</span>\n</code></pre>\n</notextile>\n\nh4. From a single domain\n\nServe preview links from a single domain, setting uuid or pdh in the path (similar to downloads).  This configuration only allows previews of public data (data accessible by the anonymous user) and collection-sharing links (where the token is already embedded in the URL); it will ignore authorization headers, so a request for non-public data may return \"404 Not Found\" even if normally valid credentials were provided.\n\n<notextile>\n<pre><code>    Services:\n      WebDAV:\n        ExternalURL: https://<span class=\"userinput\">collections.ClusterID.example.com/</span>\n</code></pre>\n</notextile>\n\nNote the trailing slash.\n\n{% include 'notebox_begin' %}\nWhether you choose to serve collections from their own subdomain or from a single domain, it's important to keep in mind that they should be served from me same _site_ as Workbench for the inline previews to work.\n\nPlease check \"keep-web's URL pattern guide\":../api/keep-web-urls.html#same-site to learn more.\n{% include 'notebox_end' %}\n\nh2. Set InternalURLs\n\n<notextile>\n<pre><code>    Services:\n      WebDAV:\n        InternalURLs:\n          http://<span class=\"userinput\">localhost:9002</span>: {}\n</code></pre>\n</notextile>\n\nh2(#update-config). Configure anonymous user token\n\nIf you intend to use Keep-web to serve public data to anonymous clients, configure it with an anonymous token.\n\nGenerate a random string (>= 32 characters long) and put it in the @config.yml@ file, in the @AnonymousUserToken@ field.\n\n<notextile>\n<pre><code>    Users:\n      AnonymousUserToken: <span class=\"userinput\">\"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz\"</span>\n</code></pre>\n</notextile>\n\nSet @Users.AnonymousUserToken: \"\"@ (empty string) or leave it out if you do not want to serve public data.\n\nh3. Update nginx configuration\n\nPut a reverse proxy with SSL support in front of keep-web.  Keep-web itself runs on the port 9002 (or whatever is specified in @Services.WebDAV.InternalURL@) while the reverse proxy runs on port 443 and forwards requests to Keep-web.\n\nUse a text editor to create a new file @/etc/nginx/conf.d/keep-web.conf@ with the following configuration. Options that need attention are marked in <span class=\"userinput\">red</span>.\n\n<notextile><pre>\nupstream keep-web {\n  server                127.0.0.1:<span class=\"userinput\">9002</span>;\n}\n\nserver {\n  listen                443 ssl;\n  server_name           <span class=\"userinput\">download.ClusterID.example.com</span>\n                        <span class=\"userinput\">collections.ClusterID.example.com</span>\n                        <span class=\"userinput\">*.collections.ClusterID.example.com</span>\n                        <span class=\"userinput\">~.*--collections\\.ClusterID\\.example\\.com</span>;\n\n  proxy_connect_timeout 90s;\n  proxy_read_timeout    300s;\n\n  ssl                   on;\n  ssl_certificate       <span class=\"userinput\">/YOUR/PATH/TO/cert.pem</span>;\n  ssl_certificate_key   <span class=\"userinput\">/YOUR/PATH/TO/cert.key</span>;\n\n  location / {\n    proxy_pass          http://keep-web;\n    proxy_set_header    Host            $host;\n    proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;\n\n    client_max_body_size    0;\n    proxy_http_version      1.1;\n    proxy_request_buffering off;\n    proxy_max_temp_file_size 0;\n  }\n}\n</pre></notextile>\n\n{% include 'notebox_begin' %}\nIf you restrict access to your Arvados services based on network topology -- for example, your proxy server is not reachable from the public internet -- additional proxy configuration might be needed to thwart cross-site scripting attacks that would circumvent your restrictions.\n\nNormally, Keep-web accepts requests for multiple collections using the same host name, provided the client's credentials are not being used. This provides insufficient XSS protection in an installation where the \"anonymously accessible\" data is not truly public, but merely protected by network topology.\n\nIn such cases -- for example, a site which is not reachable from the internet, where some data is world-readable from Arvados's perspective but is intended to be available only to users within the local network -- the downstream proxy should configured to return 401 for all paths beginning with \"/c=\"\n{% include 'notebox_end' %}\n\nh3. Configure filesystem cache size\n\nKeep-web stores copies of recently accessed data blocks in @/var/cache/arvados/keep@. The cache size defaults to 10% of the size of the filesystem where that directory is located (typically @/var@) and can be customized with the @DiskCacheSize@ config entry.\n\n<notextile>\n<pre><code>  Collections:\n    WebDAVCache:\n      DiskCacheSize: 20 GiB</code></pre></notextile>\n\n{% assign arvados_component = 'keep-web' %}\n\n{% include 'install_packages' %}\n\n{% include 'start_service' %}\n\n{% include 'restart_api' %}\n\nh2(#confirm-working). Confirm working installation\n\nWe recommend using the \"Cluster diagnostics tool.\":diagnostics.html\n\nHere are some other checks you can perform manually.\n\n<notextile>\n<pre><code>$ curl -H \"Authorization: Bearer $system_root_token\" https://<span class=\"userinput\">download.ClusterID.example.com</span>/c=59389a8f9ee9d399be35462a0f92541c-53/_/hello.txt</code></pre>\n</notextile>\n\nIf wildcard collections domains are configured:\n\n<notextile>\n<pre><code>$ curl -H \"Authorization: Bearer $system_root_token\" https://<span class=\"userinput\">59389a8f9ee9d399be35462a0f92541c-53.collections.ClusterID.example.com</span>/hello.txt</code></pre>\n</notextile>\n\nIf using a single collections preview domain:\n\n<notextile>\n<pre><code>$ curl https://<span class=\"userinput\">collections.ClusterID.example.com</span>/c=59389a8f9ee9d399be35462a0f92541c-53/t=$system_root_token/_/hello.txt</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/install/install-keepproxy.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install Keepproxy server\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Introduction\":#introduction\n# \"Update config.yml\":#update-config\n# \"Update nginx configuration\":#update-nginx\n# \"Install keepproxy package\":#install-packages\n# \"Start the service\":#start-service\n# \"Restart the API server and controller\":#restart-api\n# \"Confirm working installation\":#confirm-working\n\nh2(#introduction). Introduction\n\nThe Keepproxy server is a gateway into your Keep storage. Unlike the Keepstore servers, which are only accessible on the local LAN, Keepproxy is suitable for clients located elsewhere on the internet. Specifically, in contrast to Keepstore:\n* A client writing through Keepproxy sends a single copy of a data block, and Keepproxy distributes copies to the appropriate Keepstore servers.\n* A client can write through Keepproxy without precomputing content hashes.\n* Keepproxy checks API token validity before processing requests. (Clients that can connect directly to Keepstore can use it as scratch space even without a valid API token.)\n\nBy convention, we use the following hostname for the Keepproxy server:\n\n<div class=\"offset1\">\ntable(table table-bordered table-condensed).\n|_. Hostname|\n|@keep.ClusterID.example.com@|\n</div>\n\nThis hostname should resolve from anywhere on the internet.\n\nh2(#update-config). Update config.yml\n\nEdit the cluster config at @config.yml@ and set @Services.Keepproxy.ExternalURL@ and @Services.Keepproxy.InternalURLs@.\n\n<notextile>\n<pre><code>    Services:\n      Keepproxy:\n        ExternalURL: <span class=\"userinput\">https://keep.ClusterID.example.com</span>\n        InternalURLs:\n          <span class=\"userinput\">\"http://localhost:25107\": {}</span>\n</span></code></pre>\n</notextile>\n\nh2(#update-nginx). Update Nginx configuration\n\nPut a reverse proxy with SSL support in front of Keepproxy. Keepproxy itself runs on the port 25107 (or whatever is specified in @Services.Keepproxy.InternalURL@) while the reverse proxy runs on port 443 and forwards requests to Keepproxy.\n\nUse a text editor to create a new file @/etc/nginx/conf.d/keepproxy.conf@ with the following configuration. Options that need attention are marked in <span class=\"userinput\">red</span>.\n\n<notextile><pre><code>upstream keepproxy {\n  server                127.0.0.1:<span class=\"userinput\">25107</span>;\n}\n\nserver {\n  listen                  443 ssl;\n  server_name             <span class=\"userinput\">keep.ClusterID.example.com</span>;\n\n  proxy_connect_timeout   90s;\n  proxy_read_timeout      300s;\n  proxy_set_header        X-Real-IP $remote_addr;\n  proxy_http_version      1.1;\n  proxy_request_buffering off;\n  proxy_max_temp_file_size 0;\n\n  ssl_certificate     <span class=\"userinput\">/YOUR/PATH/TO/cert.pem</span>;\n  ssl_certificate_key <span class=\"userinput\">/YOUR/PATH/TO/cert.key</span>;\n\n  # Clients need to be able to upload blocks of data up to 64MiB in size.\n  client_max_body_size    64m;\n\n  location / {\n    proxy_pass            http://keepproxy;\n  }\n}\n</code></pre></notextile>\n\nNote: if the Web uploader is failing to upload data and there are no logs from keepproxy, be sure to check the nginx proxy logs.  In addition to \"GET\" and \"PUT\", The nginx proxy must pass \"OPTIONS\" requests to keepproxy, which should respond with appropriate Cross-origin resource sharing headers.  If the CORS headers are not present, brower security policy will cause the upload request to silently fail.  The CORS headers are generated by keepproxy and should not be set in nginx.\n\n{% assign arvados_component = 'keepproxy' %}\n\n{% include 'install_packages' %}\n\n{% include 'start_service' %}\n\n{% include 'restart_api' %}\n\nh2(#confirm-working). Confirm working installation\n\nWe recommend using the \"Cluster diagnostics tool.\":diagnostics.html  Because Keepproxy is specifically a gateway used by outside clients, for this test you should run the diagnostics from a client machine outside the Arvados private network, and provide the @-external-client@ parameter.\n\nHere are some other checks you can perform manually.\n\nLog into a host that is on a network external to your private Arvados network.  The host should be able to contact your keepproxy server (eg @keep.ClusterID.example.com@), but not your keepstore servers (eg keep[0-9].ClusterID.example.com).\n\n@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ must be set in the environment.\n\n@ARVADOS_API_HOST@ should be the hostname of the API server.\n\n@ARVADOS_API_TOKEN@ should be the system root token.\n\nInstall the \"Command line SDK\":{{site.baseurl}}/sdk/cli/install.html\n\nCheck that the keepproxy server is in the @keep_service@ \"accessible\" list:\n\n<notextile>\n<pre><code>\n$ <span class=\"userinput\">arv keep_service accessible</span>\n[...]\n</code></pre>\n</notextile>\n\nIf keepstore does not show up in the \"accessible\" list, and you are accessing it from within the private network, check that you have \"properly configured the @geo@ block for the API server\":install-api-server.html#update-nginx .\n\nInstall the \"Python SDK\":{{site.baseurl}}/sdk/python/sdk-python.html\n\nYou should now be able to use @arv-put@ to upload collections and @arv-get@ to fetch collections.  Be sure to execute this from _outside_ the cluster's private network.\n\n{% include 'arv_put_example' %}\n"
  },
  {
    "path": "doc/install/install-keepstore.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install Keepstore servers\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Introduction\":#introduction\n# \"Update config.yml\":#update-config\n# \"Install keepstore package\":#install-packages\n# \"Restart the API server and controller\":#restart-api\n# \"Confirm working installation\":#confirm-working\n# \"Note on storage management\":#note\n\nh2. Introduction\n\nKeepstore provides access to underlying storage for reading and writing content-addressed blocks, with enforcement of Arvados permissions.  Keepstore supports a variety of cloud object storage and POSIX filesystems for its backing store.\n\nh3. Plan your storage layout\n\nIn the steps below, you will configure a number of backend storage volumes (like local filesystems and S3 buckets) and specify which keepstore servers have read-only and read-write access to which volumes.\n\nIt is possible to configure arbitrary server/volume layouts. However, in order to provide good performance and efficient use of storage resources, we strongly recommend using one of the following layouts:\n\n# Each volume is writable by exactly one server, and optionally readable by one or more other servers. The total capacity of all writable volumes is the same for each server.\n# Each volume is writable by all servers. Each volume has enough built-in redundancy to satisfy your requirements, i.e., you do not need Arvados to mirror data across multiple volumes.\n\nWe recommend starting off with two Keepstore servers.  Exact server specifications will be site and workload specific, but in general keepstore will be I/O bound and should be set up to maximize aggregate bandwidth with compute nodes.  To increase capacity (either space or throughput) it is straightforward to add additional servers, or (in cloud environments) to increase the machine size of the existing servers.\n\nBy convention, we use the following hostname pattern:\n\n<div class=\"offset1\">\ntable(table table-bordered table-condensed).\n|_. Hostname|\n|@keep0.ClusterID.example.com@|\n|@keep1.ClusterID.example.com@|\n</div>\n\nKeepstore servers should not be directly accessible from the Internet (they are accessed via \"keepproxy\":install-keepproxy.html), so the hostnames only need to resolve on the private network.\n\nh2(#update-config). Update cluster config\n\nh3. Configure storage volumes\n\nFill in the @Volumes@ section of @config.yml@ for each storage volume.  Available storage volume types include POSIX filesystems and cloud object storage.  It is possible to have different volume types in the same cluster.\n\n* To use a POSIX filesystem, including both local filesystems (ext4, xfs) and network file system such as GPFS or Lustre, follow the setup instructions on \"Filesystem storage\":configure-fs-storage.html\n* If you are using S3-compatible object storage (including Amazon S3, Google Cloud Storage, and Ceph RADOS), follow the setup instructions on \"S3 Object Storage\":configure-s3-object-storage.html\n* If you are using Azure Blob Storage, follow the setup instructions on \"Azure Blob Storage\":configure-azure-blob-storage.html\n\nThere are a number of general configuration parameters for Keepstore. They are described in the \"configuration reference\":{{site.baseurl}}/admin/config.html. In particular, you probably want to change @API/MaxKeepBlobBuffers@ to align Keepstore's memory usage with the available memory on the machine that hosts it.\n\nh3. List services\n\nAdd each keepstore server to the @Services.Keepstore@ section of @/etc/arvados/config.yml@ .\n\n<notextile>\n<pre><code>    Services:\n      Keepstore:\n        # No ExternalURL because they are only accessed by the internal subnet.\n        InternalURLs:\n          \"http://<span class=\"userinput\">keep0.ClusterID.example.com</span>:25107\": {}\n          \"http://<span class=\"userinput\">keep1.ClusterID.example.com</span>:25107\": {}\n          # and so forth\n</code></pre>\n</notextile>\n\n{% assign arvados_component = 'keepstore' %}\n\n{% include 'install_packages' %}\n\n{% include 'start_service' %}\n\n{% include 'restart_api' %}\n\nh2(#confirm-working). Confirm working installation\n\nWe recommend using the \"Cluster diagnostics tool.\":diagnostics.html\n\nHere are some other checks you can perform manually.\n\nLog into a host that is on your private Arvados network.  The host should be able to contact your your keepstore servers (eg keep[0-9].ClusterID.example.com).\n\n@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ must be set in the environment.\n\n@ARVADOS_API_HOST@ should be the hostname of the API server.\n\n@ARVADOS_API_TOKEN@ should be the system root token.\n\nInstall the \"Command line SDK\":{{site.baseurl}}/sdk/cli/install.html\n\nCheck that the keepstore server is in the @keep_service@ \"accessible\" list:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv keep_service accessible</span>\n[...]\n</code></pre>\n</notextile>\n\nIf keepstore does not show up in the \"accessible\" list, and you are accessing it from within the private network, check that you have \"properly configured the @geo@ block for the API server\":install-api-server.html#update-nginx .\n\nNext, install the \"Python SDK\":{{site.baseurl}}/sdk/python/sdk-python.html\n\nYou should now be able to use @arv-put@ to upload collections and @arv-get@ to fetch collections.  Be sure to execute this from _inside_ the cluster's private network.  You will be able to access keep from _outside_ the private network after setting up \"keepproxy\":install-keepproxy.html .\n\n{% include 'arv_put_example' %}\n\nh2(#note). Note on storage management\n\nOn its own, a keepstore server never deletes data. Instead, the keep-balance service determines which blocks are candidates for deletion and instructs the keepstore to move those blocks to the trash. Please see the \"Balancing Keep servers\":{{site.baseurl}}/admin/keep-balance.html for more details.\n"
  },
  {
    "path": "doc/install/install-manual-prerequisites.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Planning and prerequisites\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nBefore attempting installation, you should begin by reviewing supported platforms, choosing backends for identity, storage, and scheduling, and decide how you will distribute Arvados services onto machines.  You should also choose an Arvados Cluster ID, choose your hostnames, and aquire TLS certificates.  It may be helpful to make notes as you go along using one of these worksheets:  \"New cluster checklist for AWS\":new_cluster_checklist_AWS.xlsx - \"New cluster checklist for Azure\":new_cluster_checklist_Azure.xlsx - \"New cluster checklist for on premises Slurm\":new_cluster_checklist_slurm.xlsx\n\nThe installation guide describes how to set up a basic standalone Arvados instance.  Additional configuration for features including \"federation,\":{{site.baseurl}}/admin/federation.html \"collection versioning,\":{{site.baseurl}}/admin/collection-versioning.html \"managed properties,\":{{site.baseurl}}/admin/collection-managed-properties.html and \"storage classes\":{{site.baseurl}}/admin/collection-managed-properties.html are described in the \"Admin guide.\":{{site.baseurl}}/admin/\n\nThe Arvados storage subsystem is called \"keep\".  The compute subsystem is called \"crunch\".\n\n# \"Supported GNU/Linux distributions\":#supportedlinux\n# \"Choosing which components to install\":#components\n# \"Identity provider\":#identity\n# \"Storage backend (Keep)\":#storage\n# \"Container compute scheduler (Crunch)\":#scheduler\n# \"Hardware or virtual machines\":#machines\n# \"Arvados Cluster ID\":#clusterid\n# \"DNS and TLS\":#dnstls\n\n\nh2(#supportedlinux). Supported GNU/Linux distributions\n\n{% include 'supportedlinux' %}\n\nh2(#components). Choosing which components to install\n\nArvados consists of many components, some of which may be omitted (at the cost of reduced functionality.)  It may also be helpful to review the \"Arvados Architecture\":{{site.baseurl}}/architecture/ to understand how these components interact.\n\ntable(table table-bordered table-condensed).\n|\\3=. *Core*|\n|\"PostgreSQL database\":install-postgresql.html |Stores data for the API server.|Required.|\n|\"API server + Controller\":install-api-server.html |Core Arvados logic for managing users, groups, collections, containers, and enforcing permissions.|Required.|\n|\\3=. *Keep (storage)*|\n|\"Keepstore\":install-keepstore.html |Stores content-addressed blocks in a variety of backends (local filesystem, cloud object storage).|Required.|\n|\"Keepproxy\":install-keepproxy.html |Gateway service to access keep servers from external networks.|Required to be able to use arv-put, arv-get, or arv-mount outside the private Arvados network.|\n|\"Keep-web\":install-keep-web.html |Gateway service providing read/write HTTP and WebDAV support on top of Keep.|Required to access files from Workbench.|\n|\"Keep-balance\":install-keep-balance.html |Storage cluster maintenance daemon responsible for moving blocks to their optimal server location, adjusting block replication levels, and trashing unreferenced blocks.|Required to free deleted data from underlying storage, and to ensure proper replication and block distribution (including support for storage classes).|\n|\\3=. *User interface*|\n|\"Workbench2\":install-workbench2-app.html |Primary graphical user interface for working with file collections and running containers.|Optional.  Depends on API server, keep-web, websockets server.|\n|\\3=. *Additional services*|\n|\"Websockets server\":install-ws.html |Event distribution server.|Required to view streaming container logs in Workbench.|\n|\"Shell server\":install-shell-server.html |Grant Arvados users access to Unix shell accounts on dedicated shell nodes.|Optional.|\n|\\3=. *Crunch (running containers)*|\n|\"arvados-dispatch-cloud\":crunch2-cloud/install-dispatch-cloud.html |Run analysis workflows on cloud by allocating and freeing cloud VM instances on demand.|Optional|\n|\"crunch-dispatch-slurm\":crunch2-slurm/install-dispatch.html |Run analysis workflows distributed across a Slurm cluster.|Optional|\n|\"crunch-dispatch-lsf\":crunch2-lsf/install-dispatch.html |Run analysis workflows distributed across an LSF cluster.|Optional|\n\nh2(#identity). Identity provider\n\nChoose which backend you will use to authenticate users.\n\n* Google login to authenticate users with a Google account.\n* OpenID Connect (OIDC) if you have Single-Sign-On (SSO) service that supports the OpenID Connect standard.\n* LDAP login to authenticate users by username/password using the LDAP protocol, supported by many services such as OpenLDAP and Active Directory.\n* PAM login to authenticate users by username/password according to the PAM configuration on the controller node.\n\nh2(#postgresql). PostgreSQL\n\nArvados works well with a standalone PostgreSQL installation. When deploying on AWS, Aurora RDS also works but Aurora Serverless is not recommended.\n\nh2(#storage). Storage backend\n\nChoose which backend you will use for storing and retrieving content-addressed Keep blocks.\n\n* File systems storage, such as ext4 or xfs, or network file systems such as GPFS or Lustre\n* Amazon S3, or other object storage that supports the S3 API including Google Cloud Storage and Ceph.\n* Azure blob storage\n\nYou should also determine the desired replication factor for your data.  A replication factor of 1 means only a single copy of a given data block is kept.  With a conventional file system backend and a replication factor of 1, a hard drive failure is likely to lose data.  For this reason the default replication factor is 2 (two copies are kept).\n\nA backend may have its own replication factor (such as durability guarantees of cloud buckets) and Arvados will take this into account when writing a new data block.\n\nh2(#scheduler). Container compute scheduler\n\nChoose which backend you will use to schedule computation.\n\n* On AWS EC2 and Azure, you probably want to use @arvados-dispatch-cloud@ to manage the full lifecycle of cloud compute nodes: starting up nodes sized to the container request, executing containers on those nodes, and shutting nodes down when no longer needed.\n* For on-premises HPC clusters using \"slurm\":https://slurm.schedmd.com/ use @crunch-dispatch-slurm@ to execute containers with slurm job submissions.\n* For on-premises HPC clusters using \"LSF\":https://www.ibm.com/products/hpc-workload-management/ use @crunch-dispatch-lsf@ to execute containers with slurm job submissions.\n* For single node demos, use @crunch-dispatch-local@ to execute containers directly.\n\nh2(#machines). Hardware (or virtual machines)\n\nChoose how to allocate Arvados services to machines.  We recommend that each machine start with a clean installation of a supported GNU/Linux distribution.\n\nFor a production installation, this is a reasonable starting point:\n\n<div class=\"offset1\">\ntable(table table-bordered table-condensed).\n|_. Function|_. Number of nodes|_. Recommended specs|\n|PostgreSQL database, Arvados API server, Arvados controller, Websockets, Container dispatcher|1|16+ GiB RAM, 4+ cores, fast disk for database|\n|Workbench, Keepproxy, Keep-web, Keep-balance|1|8 GiB RAM, 2+ cores|\n|Keepstore servers ^1^|2+|4 GiB RAM|\n|Compute worker nodes ^1^|0+ |Depends on workload; scaled dynamically in the cloud|\n|User shell nodes ^2^|0+|Depends on workload|\n</div>\n\n^1^ Should be scaled up as needed\n^2^ Refers to shell nodes managed by Arvados that provide ssh access for users to interact with Arvados at the command line.  Optional.\n\n{% include 'notebox_begin' %}\nFor a small demo installation, it is possible to run all the Arvados services on a single node.  Special considerations for single-node installs will be noted in boxes like this.\n{% include 'notebox_end' %}\n\nh2(#clusterid). Arvados Cluster ID\n\nEach Arvados installation is identified by a cluster identifier, which is a unique 5-character lowercase alphanumeric string. There are 36 5 = 60466176 possible cluster identifiers.\n\n* For automated test purposes, use “z****”\n* For experimental/local-only/private clusters that won’t ever be visible on the public Internet, use “x****”\n* For long-lived clusters, we recommend reserving a cluster id.  Contact \"info@curii.com\":mailto:info@curii.com for more information.\n\nHere is one way to make a random 5-character string:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">tr -dc 0-9a-z &lt;/dev/urandom | head -c5; echo</span>\n</code></pre>\n</notextile>\n\nYou may also use a different method to pick the cluster identifier. The cluster identifier will be part of the hostname of the services in your Arvados cluster. The rest of this documentation will refer to it as your @ClusterID@.  Whenever @ClusterID@ appears in a configuration example, replace it with your five-character cluster identifier.\n\nh2(#dnstls). DNS entries and TLS certificates\n\nThe following services are normally public-facing and require DNS entries and corresponding TLS certificates.  Get certificates from your preferred TLS certificate provider.  We recommend using \"Let's Encrypt\":https://letsencrypt.org/.  You can run several services on the same node, but each distinct DNS name requires a valid, matching TLS certificate.\n\nThis guide uses the following DNS name conventions.  A later part of this guide will describe how to set up Nginx virtual hosts.\nIt is possible to use custom DNS names for the Arvados services.\n\n<div class=\"offset1\">\ntable(table table-bordered table-condensed).\n|_. Function|_. DNS name|\n|Arvados API|@ClusterID.example.com@|\n|Arvados Webshell|webshell.@ClusterID.example.com@|\n|Arvados Websockets endpoint|ws.@ClusterID.example.com@|\n|Arvados Workbench|workbench.@ClusterID.example.com@|\n|Arvados Workbench 2|workbench2.@ClusterID.example.com@|\n|Arvados Keepproxy server|keep.@ClusterID.example.com@|\n|Arvados Keep-web server|download.@ClusterID.example.com@\n_and_\n*.collections.@ClusterID.example.com@ _or_\n*<notextile>--</notextile>collections.@ClusterID.example.com@ _or_\ncollections.@ClusterID.example.com@ (see the \"keep-web install docs\":install-keep-web.html)|\n|Container web services|*.containers.@ClusterID.example.com@ _or_\n*<notextile>--</notextile>containers.@ClusterID.example.com@|\n</div>\n\nSetting up Arvados is easiest when Wildcard TLS and wildcard DNS are available. It is also possible to set up Arvados without wildcard TLS and DNS, but some functionality will be unavailable:\n* A wildcard for @keep-web@ (e.g., *.collections.@ClusterID.example.com@) is needed to allow users to view Arvados-hosted data in their browsers. More information on this tradeoff caused by the CORS rules applied by modern browsers is available in the \"keep-web URL pattern guide\":../api/keep-web-urls.html.\n* A wildcard for @controller@ (e.g., *.containers.@ClusterID.example.com@) is needed to allow users to connect to Arvados-hosted services in their browsers.\n\nThe table below lists the required TLS certificates and DNS names in each scenario.\n\n<div class=\"offset1\">\ntable(table table-bordered table-condensed).\n||_. Wildcard TLS and DNS available|_. Wildcard TLS available|_. Other|\n|TLS|@ClusterID.example.com@\n*.@ClusterID.example.com@\n*.collections.@ClusterID.example.com@\n*.containers.@ClusterID.example.com@|*.@ClusterID.example.com@\n@ClusterID.example.com@|@ClusterID.example.com@\ngit.@ClusterID.example.com@\nwebshell.@ClusterID.example.com@\nws.@ClusterID.example.com@\nworkbench.@ClusterID.example.com@\nworkbench2.@ClusterID.example.com@\nkeep.@ClusterID.example.com@\ndownload.@ClusterID.example.com@\ncollections.@ClusterID.example.com@|\n|DNS|@ClusterID.example.com@\ngit.@ClusterID.example.com@\nwebshell.@ClusterID.example.com@\nws.@ClusterID.example.com@\nworkbench.@ClusterID.example.com@\nworkbench2.@ClusterID.example.com@\nkeep.@ClusterID.example.com@\ndownload.@ClusterID.example.com@\n*.collections.@ClusterID.example.com@\n*.containers.@ClusterID.example.com@|@ClusterID.example.com@\ngit.@ClusterID.example.com@\nwebshell.@ClusterID.example.com@\nws.@ClusterID.example.com@\nworkbench.@ClusterID.example.com@\nworkbench2.@ClusterID.example.com@\nkeep.@ClusterID.example.com@\ndownload.@ClusterID.example.com@\ncollections.@ClusterID.example.com@|@ClusterID.example.com@\ngit.@ClusterID.example.com@\nwebshell.@ClusterID.example.com@\nws.@ClusterID.example.com@\nworkbench.@ClusterID.example.com@\nworkbench2.@ClusterID.example.com@\nkeep.@ClusterID.example.com@\ndownload.@ClusterID.example.com@\ncollections.@ClusterID.example.com@|\n</div>\n\n{% include 'notebox_begin' %}\nIt is also possible to create your own certificate authority, issue server certificates, and install a custom root certificate in the browser.  This is out of scope for this guide.\n{% include 'notebox_end' %}\n"
  },
  {
    "path": "doc/install/install-multi-host.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Multi-Host Arvados\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Introduction\":#introduction\n# \"Prerequisites and planning\":#prerequisites\n# \"Download the installer\":#download\n# \"Initialize the installer\":#copy_config\n# \"Set up your infrastructure\":#setup-infra\n## \"Create AWS infrastructure with Terraform\":#terraform\n## \"Create required infrastructure manually\":#inframanual\n# \"Edit local.params* files\":#localparams\n# \"Configure Keep storage\":#keep\n# \"Choose the SSL configuration\":#certificates\n## \"Using a Let's Encrypt certificates\":#lets-encrypt\n## \"Bring your own certificates\":#bring-your-own\n### \"Securing your TLS certificate keys\":#secure-tls-keys\n# \"Create a compute image\":#create_a_compute_image\n# \"Begin installation\":#installation\n# \"Further customization of the installation\":#further_customization\n# \"Confirm the cluster is working\":#test-install\n## \"Debugging issues\":#debugging\n## \"Iterating on config changes\":#iterating\n## \"Common problems and solutions\":#common-problems\n# \"Initial user and login\":#initial_user\n# \"Monitoring and Metrics\":#monitoring\n# \"Load balancing controllers\":#load_balancing\n# \"After the installation\":#post_install\n\nh2(#introduction). Introduction\n\nThis multi host installer is the recommended way to set up a production Arvados cluster.  These instructions include specific details for installing on Amazon Web Services (AWS), which are marked as \"AWS specific\".  However with additional customization the installer can be used as a template for deployment on other cloud provider or HPC systems.\n\nh2(#prerequisites). Prerequisites and planning\n\nh3. Cluster ID and base domain\n\nChoose a 5-character cluster identifier that will represent the cluster.  Here are \"guidelines on choosing a cluster identifier\":../architecture/federation.html#cluster_id .  Only lowercase letters and digits 0-9 are allowed.  Examples will use @xarv1@ or @${CLUSTER}@, you should substitute the cluster id you have selected.\n\nDetermine the base domain for the cluster.  This will be referred to as @${DOMAIN}@.\n\nFor example, if DOMAIN is @xarv1.example.com@, then @controller.${DOMAIN}@ means @controller.xarv1.example.com@.\n\nh3(#DNS). DNS hostnames for each service\n\nYou will need a DNS entry for each service.  When using the \"Terraform script\":#terraform to set up your infrastructure, these domains will be created automatically using AWS Route 53.\n\nIn the default configuration these are:\n\n# @controller.${DOMAIN}@\n# @ws.${DOMAIN}@\n# @keep0.${DOMAIN}@\n# @keep1.${DOMAIN}@\n# @keep.${DOMAIN}@\n# @download.${DOMAIN}@\n# @*.collections.${DOMAIN}@  -- a wildcard DNS resolving to the @keepweb@ service\n# @*.containers.${DOMAIN}@  -- a wildcard DNS resolving to the @controller@ service\n# @workbench.${DOMAIN}@\n# @workbench2.${DOMAIN}@\n# @webshell.${DOMAIN}@\n# @shell.${DOMAIN}@\n# @prometheus.${DOMAIN}@\n# @grafana.${DOMAIN}@\n\nFor more information, see \"DNS entries and TLS certificates\":install-manual-prerequisites.html#dnstls.\n\nh2(#download). Download the installer\n\n{% assign local_params_src = 'multiple_hosts' %}\n{% assign config_examples_src = 'multi_host/aws' %}\n{% assign terraform_src = 'terraform/aws' %}\n{% include 'download_installer' %}\n\nh2(#setup-infra). Set up your infrastructure\n\n## \"Create AWS infrastructure with Terraform\":#terraform\n## \"Create required infrastructure manually\":#inframanual\n\nh3(#terraform). Create AWS infrastructure with Terraform (AWS specific)\n\nWe provide a set of Terraform code files that you can run to create the necessary infrastructure on Amazon Web Services.\n\nThese files are located in the @terraform@ installer directory and are divided in three sections:\n\n# The @terraform/vpc/@ subdirectory controls the network related infrastructure of your cluster, including firewall rules and split-horizon DNS resolution.\n# The @terraform/data-storage/@ subdirectory controls the stateful part of your cluster, currently only sets up the S3 bucket for holding the Keep blocks and in the future it'll also manage the database service.\n# The @terraform/services/@ subdirectory controls the hosts that will run the different services on your cluster, makes sure that they have the required software for the installer to do its job.\n\nh4. Software requirements & considerations\n\n{% include 'notebox_begin' %}\nThe Terraform state files (that keep crucial infrastructure information from the cloud) will be saved inside each subdirectory, under the @terraform.tfstate@ name.  These will be committed to the git repository used to coordinate deployment.  It is very important to keep this git repository secure, only sysadmins that will be responsible for maintaining your Arvados cluster should have access to it.\n{% include 'notebox_end' %}\n\nh4. Terraform code configuration\n\nEach section described above contain a @terraform.tfvars@ file with some configuration values that you should set before applying each configuration. You should at least set the AWS region, cluster prefix and domain name in @terraform/vpc/terraform.tfvars@:\n\n<pre><code>{% include 'terraform_vpc_tfvars' %}</code></pre>\n\nIf you don't set the main configuration variables at @vpc/terraform.tfvars@ file, you will be asked to re-enter these parameters every time you run Terraform.\n\nThe @data-storage/terraform.tfvars@ and @services/terraform.tfvars@ let you configure additional details, including the SSH public key for deployment, instance & volume sizes, etc. All these configurations are provided with sensible defaults:\n\n<pre><code>{% include 'terraform_datastorage_tfvars' %}</code></pre>\n\n<pre><code>{% include 'terraform_services_tfvars' %}</code></pre>\n\nh4. Set credentials\n\nYou will need an AWS access key and secret key to create the infrastructure.\n\n<pre><code class=\"userinput\">export AWS_ACCESS_KEY_ID=\"anaccesskey\"\nexport AWS_SECRET_ACCESS_KEY=\"asecretkey\"</code></pre>\n\nh4. Create the infrastructure\n\nBuild the infrastructure by running @./installer.sh terraform@.  The last stage will output the information needed to set up the cluster's domain and continue with the installer. for example:\n\n<pre><code class=\"userinput\">./installer.sh terraform\n...\nApply complete! Resources: 16 added, 0 changed, 0 destroyed.\n\nOutputs:\n\narvados_sg_id = \"sg-02f999a99973999d7\"\narvados_subnet_id = \"subnet-01234567abc\"\ncluster_int_cidr = \"10.1.0.0/16\"\ncluster_name = \"xarv1\"\ncompute_subnet_id = \"subnet-abcdef12345\"\ndeploy_user = \"admin\"\ndomain_name = \"xarv1.example.com\"\nletsencrypt_iam_access_key_id = \"AKAA43MAAAWAKAADAASD\"\nprivate_ip = {\n  \"controller\" = \"10.1.1.1\"\n  \"keep0\" = \"10.1.1.3\"\n  \"keep1\" = \"10.1.1.4\"\n  \"keepproxy\" = \"10.1.1.2\"\n  \"shell\" = \"10.1.1.7\"\n  \"workbench\" = \"10.1.1.5\"\n}\npublic_ip = {\n  \"controller\" = \"18.235.116.23\"\n  \"keep0\" = \"34.202.85.86\"\n  \"keep1\" = \"38.22.123.98\"\n  \"keepproxy\" = \"34.231.9.201\"\n  \"shell\" = \"44.208.155.240\"\n  \"workbench\" = \"52.204.134.136\"\n}\nregion_name = \"us-east-1\"\nroute53_dns_ns = tolist([\n  \"ns-1119.awsdns-11.org\",\n  \"ns-1812.awsdns-34.co.uk\",\n  \"ns-437.awsdns-54.com\",\n  \"ns-809.awsdns-37.net\",\n])\nssl_password_secret_name = \"xarv1-arvados-ssl-privkey-password\"\nvpc_id = \"vpc-0999994998399923a\"\nletsencrypt_iam_secret_access_key = \"XXXXXSECRETACCESSKEYXXXX\"\ndatabase_password = <not set>\n</code></pre>\n\n\nh4. Additional DNS configuration\n\nOnce Terraform has completed, the infrastructure for your Arvados cluster is up and running.  One last piece of DNS configuration is required.\n\nThe domain names for your cluster (e.g.: controller.xarv1.example.com) are managed via \"Route 53\":https://aws.amazon.com/route53/ and the TLS certificates will be issued using \"Let's Encrypt\":https://letsencrypt.org/ .\n\nYou need to configure the parent domain to delegate to the newly created zone.  For example, you need to configure \"example.com\" to delegate the subdomain \"xarv1.example.com\" to the nameservers for the Arvados hostname records created by Terraform.  You do this by creating a @NS@ record on the parent domain that refers to the name servers listed in the Terraform output parameter @route53_dns_ns@.\n\nIf your parent domain is also controlled by Route 53, the process will be like this:\n\n# Log in to the AWS Console and navigate to the service page for *Route 53*\n# Go to the list of *Hosted zones* and click on the zone for the parent domain\n# Click on *Create record*\n# For *Record name* put the cluster id\n# For *Record type* choose @NS - Name servers for a hosted zone@\n# For *Value* add the values from Terraform output parameter @route53_dns_ns@, one hostname per line, with punctuation (quotes and commas) removed.\n# Click *Create records*\n\nIf the parent domain is controlled by some other service, follow the guide for the the appropriate service.\n\nh4. Other important output parameters\n\nThe certificates will be requested from Let's Encrypt when you run the installer.\n\n* @cluster_int_cidr@ will be used to set @CLUSTER_INT_CIDR@\n\n* You'll also need @compute_subnet_id@ and @arvados_sg_id@ to set @COMPUTE_SUBNET@ and @COMPUTE_SG@ in @local.params@ and when you \"create a compute image\":#create_a_compute_image.\n\nYou can now proceed to \"edit local.params* files\":#localparams.\n\nh3(#inframanual). Create required infrastructure manually\n\nIf you will be setting up infrastructure without using the provided Terraform script, here are the recommendations you will need to consider.\n\nh4. Virtual Private Cloud (AWS specific)\n\nWe recommend setting Arvados up in its own \"Virtual Private Cloud (VPC)\":https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html\n\nWhen you do so, you need to configure a couple of additional things:\n\n# \"Create a subnet for the compute nodes\":https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html\n# You should set up a \"security group which allows SSH access (port 22)\":https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html\n# Make sure to add a \"VPC S3 endpoint\":https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints-s3.html\n\nh4(#keep-bucket). S3 Bucket (AWS specific)\n\nWe recommend \"creating an S3 bucket\":https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html for data storage named @${CLUSTER}-nyw5e-000000000000000-volume@.  We recommend creating an IAM role called @${CLUSTER}-keepstore-00-iam-role@ with a \"policy that can read, write, list and delete objects in the bucket\":configure-s3-object-storage.html#IAM .  With the example cluster id @xarv1@ the bucket would be called @xarv1-nyw5e-000000000000000-volume@ and the role would be called @xarv1-keepstore-00-iam-role@.\n\nThese names are recommended because they are default names used in the configuration template.  If you use different names, you will need to edit the configuration template later.\n\nh4(#hosts). Required hosts\n\nYou will need to allocate several hosts (physical or virtual machines) for the fixed infrastructure of the Arvados cluster.  These machines should have at least 2 cores and 8 GiB of RAM, running a supported Linux distribution.\n\n{% include 'supportedlinux' %}\n\nAllocate the following hosts as appropriate for your site.  On AWS you may choose to do it manually with the AWS console, or using a DevOps tool such as CloudFormation or Terraform.  With the exception of \"keep0\" and \"keep1\", all of these hosts should have external (public) IP addresses if you intend for them to be accessible outside of the private network or VPC.\n\nThe installer will set up the Arvados services on your machines.  Here is the default assignment of services to machines:\n\n# API node\n## postgresql server\n## arvados api server\n## arvados controller  (recommended hostname @controller.${DOMAIN}@ and @*.containers.${DOMAIN}@)\n# KEEPSTORE nodes (at least 1 if using S3 as a Keep backend, else 2)\n## arvados keepstore   (recommended hostname @keep0.${DOMAIN}@ and @keep1.${DOMAIN}@)\n# WORKBENCH node\n## arvados legacy workbench URLs   (recommended hostname @workbench.${DOMAIN}@)\n## arvados workbench2              (recommended hostname @workbench2.${DOMAIN}@)\n## arvados webshell                (recommended hostname @webshell.${DOMAIN}@)\n## arvados websocket               (recommended hostname @ws.${DOMAIN}@)\n## arvados cloud dispatcher\n## arvados keepbalance\n## arvados keepproxy   (recommended hostname @keep.${DOMAIN}@)\n## arvados keepweb     (recommended hostname @download.${DOMAIN}@ and @*.collections.${DOMAIN}@)\n# SHELL node  (optional)\n## arvados shell       (recommended hostname @shell.${DOMAIN}@)\n\nWhen using the database installed by Arvados (and not an \"external database\":#ext-database), the database is stored under @/var/lib/postgresql@.  Arvados logs are also kept in @/var/log@ and @/var/www/arvados-api/shared/log@.  Accordingly, you should ensure that the disk partition containing @/var@ has adequate storage for your planned usage.  We suggest starting with 50GiB of free space on the database host.\n\nh4. Additional prerequisites when preparing machines to run the installer\n\n# From the account where you are performing the install, passwordless @ssh@ to each machine\nThis means the client's public key should added to @~/.ssh/authorized_keys@ on each node.\n# Passwordless @sudo@ access on the account on each machine you will @ssh@ in to\nThis usually means adding the account to the @sudo@ group and having a rule like this in @/etc/sudoers.d/arvados_passwordless@ that allows members of group @sudo@ to execute any command without entering a password.\n<pre>%sudo ALL=(ALL:ALL) NOPASSWD:ALL</pre>\n# @git@ installed on each machine\n# Port 443 reachable by clients\n\n(AWS specific) The machine that runs the arvados cloud dispatcher will need an \"IAM role that allows it to manage EC2 instances.\":{{site.baseurl}}/install/crunch2-cloud/install-dispatch-cloud.html#IAM\n\nIf your infrastructure differs from the setup proposed above (ie, different hostnames), you can still use the installer, but \"additional customization may be necessary\":#further_customization .\n\nh2(#localparams). Edit @local.params*@ files\n\nThe cluster configuration parameters are included in two files: @local.params@ and @local.params.secrets@. These files can be found wherever you choose to initialize the installation files (e.g., @~/setup-arvados-xarv1@ in these examples).\n\nThe @local.params.secrets@ file is intended to store security-sensitive data such as passwords, private keys, tokens, etc. Depending on the security requirements of the cluster deployment, you may wish to store this file in a secrets store like AWS Secrets Manager or Jenkins credentials.\n\nh3. Parameters from @local.params@:\n\n# Set @CLUSTER@ to the 5-character cluster identifier. (e.g. \"xarv1\")\n# Set @DOMAIN@ to the base DNS domain of the environment. (e.g. \"xarv1.example.com\")\n# Set the @*_INT_IP@ variables with the internal (private) IP addresses of each host. Since services share hosts, some hosts are the same.  See \"note about /etc/hosts\":#etchosts\n# Edit @CLUSTER_INT_CIDR@, this should be the CIDR of the private network that Arvados is running on, e.g. the VPC.  If you used terraform, this is emitted as @cluster_int_cidr@.\n_CIDR stands for \"Classless Inter-Domain Routing\" and describes which portion of the IP address that refers to the network.  For example 192.168.3.0/24 means that the first 24 bits are the network (192.168.3) and the last 8 bits are a specific host on that network._\n_AWS Specific: Go to the AWS console and into the VPC service, there is a column in this table view of the VPCs that gives the CIDR for the VPC (IPv4 CIDR)._\n# Set @INITIAL_USER_EMAIL@ to your email address, as you will be the first admin user of the system.\n\nh3. Parameters from @local.params.secrets@:\n\n# Set each @KEY@ / @TOKEN@ / @PASSWORD@ to a random string.  You can use @installer.sh generate-tokens@\n<pre><code class=\"userinput\">./installer.sh generate-tokens\nBLOB_SIGNING_KEY=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\nMANAGEMENT_TOKEN=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\nSYSTEM_ROOT_TOKEN=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\nANONYMOUS_USER_TOKEN=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\nDATABASE_PASSWORD=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n</code></pre>\n# Set @DATABASE_PASSWORD@ to a random string (unless you \"already have a database\":#ext-database then you should set it to that database's password)\n   Important! If this contains any non-alphanumeric characters, in particular ampersand ('&'), it is necessary to add backslash quoting.\n   For example, if the password is @Lq&MZ<V']d?j@\n   With backslash quoting the special characters it should appear like this in local.params:\n<pre><code>DATABASE_PASSWORD=\"Lq\\&MZ\\<V\\'\\]d\\?j\"</code></pre>\n# Set @LE_AWS_*@ credentials to allow Let's Encrypt do authentication through Route53\n# Set @DISPATCHER_SSH_PRIVKEY@ to a SSH private key that @arvados-dispatch-cloud@ will use to connect to the compute nodes:\n<pre><code>DISPATCHER_SSH_PRIVKEY=\"-----BEGIN OPENSSH PRIVATE KEY-----\nb3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn\n...\ns4VY40kNxs6MsAAAAPbHVjYXNAaW5zdGFsbGVyAQIDBA==\n-----END OPENSSH PRIVATE KEY-----\"\n</code></pre>You can create one by following the steps described on the \"building a compute node documentation\":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html#sshkeypair page.\n\nh3(#etchosts). Note on @/etc/hosts@\n\nBecause Arvados services are typically accessed by external clients, they are likely to have both a public IP address and a internal IP address.\n\nOn cloud providers such as AWS, sending internal traffic to a service's public IP address can incur egress costs and throttling.  Thus it is very important for internal traffic to stay on the internal network.  The installer implements this by updating @/etc/hosts@ on each node to associate each service's hostname with the internal IP address, so that when Arvados services communicate with one another, they always use the internal network address.  This is NOT a substitute for DNS, you still need to set up DNS names for all of the services that have public IP addresses (it does, however, avoid a complex \"split-horizon\" DNS configuration).\n\nIt is important to be aware of this because if you mistype the IP address for any of the @*_INT_IP@ variables, hosts may unexpectedly fail to be able to communicate with one another.  If this happens, check and edit as necessary the file @/etc/hosts@ on the host that is failing to make an outgoing connection.\n\nh2(#keep). Configure Keep storage\n\nThe @multi_host/aws@ template uses S3 for storage.  Arvados also supports \"filesystem storage\":configure-fs-storage.html and \"Azure blob storage\":configure-azure-blob-storage.html .  Keep storage configuration can be found in in the @arvados.cluster.Volumes@ section of @local_config_dir/pillars/arvados.sls@.\n\nh3. Object storage in S3 (AWS Specific)\n\nIf you \"followed the recommended naming scheme\":#keep-bucket for both the bucket and role (or used the provided Terraform script), you're done.\n\nIf you did not follow the recommended naming scheme for either the bucket or role, you'll need to update these parameters in @local.params@:\n\n# Set @KEEP_AWS_S3_BUCKET@ to the value of \"keepstore bucket you created earlier\":#keep-bucket\n# Set @KEEP_AWS_IAM_ROLE@ to \"keepstore role you created earlier\":#keep-bucket\n\nYou can also configure a specific AWS Region for the S3 bucket by setting @KEEP_AWS_REGION@.\n\n{% include 'ssl_config_multi' %}\n\nh2(#authentication). Configure your authentication provider (optional, recommended)\n\nBy default, the installer will use the \"Test\" provider, which is a list of usernames and cleartext passwords stored in the Arvados config file.  *This is low security configuration and you are strongly advised to configure one of the other \"supported authentication methods\":setup-login.html* .\n\nh2(#ext-database). Using an external database (optional)\n\nThe standard behavior of the installer is to install and configure PostgreSQL for use by Arvados.  You can optionally configure it to use a separately managed database instead.\n\nArvados requires a database that is compatible with PostgreSQL 9.5 or later.  For example, Arvados is known to work with Amazon Aurora (note: even idle, Arvados services will periodically poll the database, so we strongly advise using \"provisioned\" mode).\n\n# In @local.params@, remove 'database' from the list of roles assigned to the controller node:\n<pre><code>NODES=(\n  [controller.${DOMAIN}]=controller,websocket,dispatcher,keepbalance\n  ...\n)\n</code></pre>\n# In @local.params@, set @DATABASE_INT_IP@ to empty string and @DATABASE_EXTERNAL_SERVICE_HOST_OR_IP@ to the database endpoint (can be a hostname, does not have to be an IP address).\n<pre><code>DATABASE_INT_IP=\"\"\n...\nDATABASE_EXTERNAL_SERVICE_HOST_OR_IP=\"arvados.xxxxxxx.eu-east-1.rds.amazonaws.com\"\n</code></pre>\n# In @local.params.secrets@, set @DATABASE_PASSWORD@ to the correct value.  \"See the previous section describing correct quoting\":#localparams\n# In @local.params@ you may need to adjust the database name and user.\n\nh2(#further_customization). Further customization of the installation (optional)\n\nIf you are installing on AWS and have followed all of the naming conventions recommend in this guide, you probably don't need to do any further customization.\n\nIf you are installing on a different cloud provider or on HPC, other changes may require editing the Saltstack pillars and states files found in @local_config_dir@.  In particular, @local_config_dir/pillars/arvados.sls@ contains the template (in the @arvados.cluster@ section) used to produce the Arvados configuration file that is distributed to all the nodes.  Consult the \"Configuration reference\":config.html for a comprehensive list of configuration keys.\n\nAny extra Salt \"state\" files you add under @local_config_dir/states@ will be added to the Salt run and applied to the hosts.\n\nh2(#create_a_compute_image). Configure compute nodes\n\n{% include 'branchname' %}\n\nIf you will use fixed compute nodes with an HPC scheduler such as SLURM or LSF, you will need to \"Set up your compute nodes with Docker\":{{site.baseurl}}/install/crunch2/install-compute-node-docker.html or \"Set up your compute nodes with Singularity\":{{site.baseurl}}/install/crunch2/install-compute-node-singularity.html.\n\nOn cloud installations, containers are dispatched in Docker daemons running in the _compute instances_, which need some additional setup.\n\nh3. Build the compute image\n\nFollow \"the instructions to build a cloud compute node image\":{{site.baseurl}}/install/crunch2-cloud/install-compute-node.html using the compute image builder script found in @arvados/tools/compute-images@ in your Arvados clone from \"step 3\":#download.\n\nh3. Configure the compute image\n\nOnce the image has been created, open @local.params@ and edit as follows (AWS specific settings described here, you will need to make custom changes for other cloud providers):\n\n# Set @COMPUTE_AMI@ to the AMI produced by Packer\n# Set @COMPUTE_AWS_REGION@ to the appropriate AWS region\n# Set @COMPUTE_USER@ to the admin user account on the image\n# Set the @COMPUTE_SG@ list to the VPC security group which you set up to allow SSH connections to these nodes\n# Set @COMPUTE_SUBNET@ to the value of SubnetId of your VPC\n# Update @arvados.cluster.InstanceTypes@ in @local_config_dir/pillars/arvados.sls@ as necessary.  The example instance types are for AWS, other cloud providers will of course have different instance types with different names and specifications.\n(AWS specific) If m5/c5 node types are not available, replace them with m4/c4. You'll need to double check the values for Price and IncludedScratch/AddedScratch for each type that is changed.\n\nh2(#installation). Begin installation\n\nAt this point, you are ready to run the installer script in deploy mode that will conduct all of the Arvados installation.\n\nRun this in the @~/arvados-setup-xarv1@ directory:\n\n<pre><code class=\"userinput\">./installer.sh deploy</code></pre>\n\nThis will install and configure Arvados on all the nodes.  It will take a while and produce a lot of logging.  If it runs into an error, it will stop.\n\nh2(#test-install). Confirm the cluster is working\n\nWhen everything has finished, you can run the diagnostics. There's a couple ways of doing this listed below.\n\nh3. Running diagnostics from the same system as the installer\n\nThe requirements to run diagnostics are having @arvados-client@ and @docker@ installed. If this is not possible you can run them on your Arvados shell node as explained in the next section.\n\nDepending on where you are running the installer, you need to provide @-internal-client@ or @-external-client@. If you are running the installer from a host connected to the Arvados private network, use @-internal-client@. Otherwise, use @-external-client@.\n\n<pre><code class=\"userinput\">./installer.sh diagnostics (-internal-client|-external-client)</code></pre>\n\nh3. Running diagnostics from a cluster node\n\nYou can run the diagnostics from the cluster's shell node. This has the advantage that you don't need to manage any software on your local system, but might not be a possibility if your Arvados cluster doesn't include a shell node.\n\n<pre><code class=\"userinput\">./installer.sh diagnostics-internal</code></pre>\n\nh3(#debugging). Debugging issues\n\nThe installer records log files for each deployment.\n\nMost service logs go to @/var/log/syslog@.\n\nThe logs for Rails API server can be found in @/var/www/arvados-api/current/log/production.log@ on the appropriate instance(s).\n\nWorkbench 2 is a client-side Javascript application.  If you are having trouble loading Workbench 2, check the browser's developer console (this can be found in \"Tools &rarr; Developer Tools\").\n\nh3(#iterating). Iterating on config changes\n\nYou can iterate on the config and maintain the cluster by making changes to @local.params@ and @local_config_dir@ and running @installer.sh deploy@ again.\n\nIf you are debugging a configuration issue on a specific node, you can speed up the cycle a bit by deploying just one node:\n\n<pre><code class=\"userinput\">./installer.sh deploy keep0.xarv1.example.com</code></pre>\n\nHowever, once you have a final configuration, you should run a full deploy to ensure that the configuration has been synchronized on all the nodes.\n\nh3(#common-problems). Common problems and solutions\n\nh4. PG::UndefinedTable: ERROR:  relation \\\"api_clients\\\" does not exist\n\nThe arvados-api-server package sets up the database as a post-install script.  If the database host or password wasn't set correctly (or quoted correctly) at the time that package is installed, it won't be able to set up the database.\n\nThis will manifest as an error like this:\n\n<pre>\n#<ActiveRecord::StatementInvalid: PG::UndefinedTable: ERROR:  relation \\\"api_clients\\\" does not exist\n</pre>\n\nIf this happens, you need to\n\n1. correct the database information\n2. run @./installer.sh deploy xarv1.example.com@ to update the configuration on the API/controller node\n3. Log in to the API/controller server node, then run this command to re-run the post-install script, which will set up the database:\n<pre><code class=\"userinput\">dpkg-reconfigure arvados-api-server</code></pre>\n4. Re-run @./installer.sh deploy@ again to synchronize everything, and so that the install steps that need to contact the API server are run successfully.\n\nh4. Missing ENA support (AWS Specific)\n\nIf the AMI wasn't built with ENA (extended networking) support and the instance type requires it, it'll fail to start.  You'll see an error in syslog on the node that runs @arvados-dispatch-cloud@.  The solution is to build a new AMI with --aws-ena-support true\n\nh2(#initial_user). Initial user and login\n\nAt this point you should be able to log into the Arvados cluster. The initial URL will be\n\n@https://workbench.${DOMAIN}@\n\nIf you did *not* \"configure a different authentication provider\":#authentication you will be using the \"Test\" provider, and the provision script creates an initial user for testing purposes. This user is configured as administrator of the newly created cluster.  It uses the values of @INITIAL_USER@ and @INITIAL_USER_PASSWORD@ from the @local.params*@ file.\n\nIf you *did* configure a different authentication provider, the first user to log in will automatically be given Arvados admin privileges.\n\nh2(#monitoring). Monitoring and Metrics\n\nYou can monitor the health and performance of the system using the admin dashboard:\n\n@https://grafana.${DOMAIN}@\n\nTo log in, use username \"admin\" and @${INITIAL_USER_PASSWORD}@ from @local.params.secrets@.\n\nOnce logged in, you will want to add the dashboards to the front page.\n\n# On the left icon bar, click on \"Browse\"\n# You should see a folder called \"Arvados Cluster\", click to open it\n## If you don't see anything, make sure the check box next to \"Starred\" is not selected\n# You should see three dashboards \"Arvados cluster overview\", \"Node exporter\" and \"Postgres exporter\"\n# Visit each dashboard, at the top of the page click on the star next to the title to \"Mark as favorite\"\n# They should now be linked on the front page.\n\nh2(#load_balancing). Load balancing controllers (optional)\n\nIn order to handle high loads and perform rolling upgrades, the controller service can be scaled to a number of hosts and the installer make this implementation a fairly simple task.\n\nFirst, you should take care of the infrastructure deployment: if you use our Terraform code, you will need to set up the @terraform.tfvars@ in @terraform/vpc/@ so that in addition to the node named @controller@ (the load-balancer), a number of @controllerN@ nodes (backends) are defined as needed, and added to the @internal_service_hosts@ list.\n\nWe suggest that the backend nodes just hold the controller service and nothing else, so they can be easily created or destroyed as needed without other service disruption.\n\nThe following is an example @terraform/vpc/terraform.tfvars@ file that describes a cluster with a load-balancer, 2 backend nodes, a separate database node, a shell node, a keepstore node and a workbench node that will also hold other miscelaneous services:\n\n<pre><code>region_name = \"us-east-1\"\ncluster_name = \"xarv1\"\ndomain_name = \"xarv1.example.com\"\n# Include controller nodes in this list so instances are assigned to the\n# private subnet. Only the balancer node should be connecting to them.\ninternal_service_hosts = [ \"keep0\", \"shell\", \"database\", \"controller1\", \"controller2\" ]\n\n# Assign private IPs for the controller nodes. These will be used to create\n# internal DNS resolutions that will get used by the balancer and database nodes.\nprivate_ip = {\n  controller = \"10.1.1.11\"\n  workbench = \"10.1.1.15\"\n  database = \"10.1.2.12\"\n  controller1 = \"10.1.2.21\"\n  controller2 = \"10.1.2.22\"\n  shell = \"10.1.2.17\"\n  keep0 = \"10.1.2.13\"\n}</code></pre>\n\nOnce the infrastructure is deployed, you'll then need to define which node will be using the @balancer@ role and which will be the @controller@ nodes in @local.params@, as it's being shown in this partial example:\n\n<pre><code>NODES=(\n  [controller.${DOMAIN}]=balancer\n  [controller1.${DOMAIN}]=controller\n  [controller2.${DOMAIN}]=controller\n  [database.${DOMAIN}]=database\n  ...\n)\n</code></pre>\n\nNote that we also set the @database@ role to its own node instead of just leaving it in a shared controller node.\n\nEach time you run @installer.sh deploy@, the system will automatically do rolling upgrades. This means it will make changes to one controller node at a time, after removing it from the balancer so that there's no downtime.\n\nh2(#post_install). After the installation\n\nAs part of the operation of @installer.sh@, it automatically creates a @git@ repository with your configuration templates.  You should retain this repository but *be aware that it contains sensitive information* (passwords and tokens used by the Arvados services as well as cloud credentials if you used Terraform to create the infrastructure).\n\nAs described in \"Iterating on config changes\":#iterating you may use @installer.sh deploy@ to re-run the Salt to deploy configuration changes and upgrades.  However, be aware that the configuration templates created for you by @installer.sh@ are a snapshot which are not automatically kept up to date.\n\nWhen deploying upgrades, consult the \"Arvados upgrade notes\":{{site.baseurl}}/admin/upgrading.html to see if changes need to be made to the configuration file template in @local_config_dir/pillars/arvados.sls@.  To specify the version to upgrade to, set the @VERSION@ parameter in @local.params@.\n\nSee also \"Maintenance and upgrading\":{{site.baseurl}}/admin/maintenance-and-upgrading.html for more information.\n"
  },
  {
    "path": "doc/install/install-postgresql.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install PostgreSQL 9.4+\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados requires at least version *9.4* of PostgreSQL. We recommend using version 10 or newer.\n\n* \"AWS\":#aws\n* \"Red Hat, AlmaLinux, and Rocky Linux\":#rh8\n* \"Debian or Ubuntu\":#debian\n\nh3(#aws). AWS\n\nWhen deploying on AWS, Arvados can use an Aurora RDS PostgreSQL database. Aurora Serverless is not recommended.\n\nh3(#rh8). Red Hat, AlmaLinux, and Rocky Linux\n\n{% comment %}\nThe default version on RH8 is PostgreSQL 10. You can install up to PostgreSQL 13.\n{% endcomment %}\n\n# Install PostgreSQL\n  <notextile><pre># <span class=\"userinput\">dnf install postgresql-server postgresql-contrib</span></pre></notextile>\n# Initialize the database\n  <notextile><pre># <span class=\"userinput\">postgresql-setup initdb</span></pre></notextile>\n# Configure the database to accept password connections from localhost\n  <notextile><pre><code># <span class=\"userinput\">sed -ri -e 's/^(host +all +all +(127\\.0\\.0\\.1\\/32|::1\\/128) +)ident$/\\1md5/' /var/lib/pgsql/data/pg_hba.conf</span></code></pre></notextile>\n# Configure the database to accept password connections from the local network (replace @10.9.8.0/24@ with your private network mask)\n  <notextile><pre><code># <span class=\"userinput\">echo 'host all all 10.9.8.0/24 md5' | tee -a /var/lib/pgsql/data/pg_hba.conf</span></code></pre></notextile>\n# Configure the database to launch at boot and start now\n  <notextile><pre># <span class=\"userinput\">systemctl enable --now postgresql</span></pre></notextile>\n\nh3(#debian). Debian or Ubuntu\n\nAll supported versions of Debian and Ubuntu include a version of PostgreSQL you can use with Arvados.\n\n# Install PostgreSQL\n<notextile><pre># <span class=\"userinput\">apt --no-install-recommends install postgresql postgresql-contrib</span></pre></notextile>\n# Configure PostgreSQL to accept password connections from the local network (replace @10.9.8.0/24@ with your private network mask)\n<notextile><pre># <span class=\"userinput\">echo 'host all all 10.9.8.0/24 md5' | tee -a /etc/postgresql/*/main/pg_hba.conf</span></pre></notextile>\n# Configure the database to launch at boot and start now\n<notextile><pre># <span class=\"userinput\">systemctl enable --now postgresql</span></pre></notextile>\n"
  },
  {
    "path": "doc/install/install-shell-server.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Set up a shell node\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Introduction\":#introduction\n# \"Install Dependencies and SDKs\":#dependencies\n# \"Install git and curl\":#install-packages\n# \"Create record for VM\":#vm-record\n# \"Install arvados-login-sync\":#arvados-login-sync\n# \"Confirm working installation\":#confirm-working\n\nh2(#introduction). Introduction\n\nArvados support for shell nodes allows you to use Arvados permissions to grant Linux shell accounts to users.\n\nA shell node runs the @arvados-login-sync@ service to manage user accounts, and typically has Arvados utilities and SDKs pre-installed.  Users are allowed to log in and run arbitrary programs.  For optimal performance, the Arvados shell server should be on the same LAN as the Arvados cluster.\n\nBecause Arvados @config.yml@ _contains secrets_ it should *not* be present on shell nodes.\n\nShell nodes should be separate virtual machines from the VMs running other Arvados services.  You may choose to grant root access to users so that they can customize the node, for example, installing new programs.  This has security considerations depending on whether a shell node is single-user or multi-user.\n\nA single-user shell node should be set up so that it only stores Arvados access tokens that belong to that user.  In that case, that user can be safely granted root access without compromising other Arvados users.\n\nIn the multi-user shell node case, a malicious user with @root@ access could access other user's Arvados tokens.  Users should only be given @root@ access on a multi-user shell node if you would trust them to be Arvados administrators.  Be aware that with access to the @docker@ daemon, it is trival to gain *root* access to any file on the system, so giving users @docker@ access should be considered equivalent to @root@ access.\n\nh2(#dependencies). Install Dependencies and SDKs\n\n# \"Install Ruby and Bundler\":ruby.html\n# \"Install the Python SDK\":{{site.baseurl}}/sdk/python/sdk-python.html\n# \"Install the FUSE driver\":{{site.baseurl}}/sdk/fuse/install.html\n# \"Install the CLI\":{{site.baseurl}}/sdk/cli/install.html\n# \"Install the R SDK\":{{site.baseurl}}/sdk/R/index.html (optional)\n# \"Install Docker\":install-docker.html (optional)\n\n{% assign arvados_component = 'git curl' %}\n\n{% include 'install_packages' %}\n\nh2(#vm-record). Create record for VM\n\nAs an admin, create an Arvados virtual_machine object representing this shell server. This will return a uuid.\n\n<notextile>\n<pre>\n<code>apiserver:~$ <span class=\"userinput\">arv --format=uuid virtual_machine create --virtual-machine '{\"hostname\":\"<b>shell.ClusterID.example.com</b>\"}'</span>\nzzzzz-2x53u-zzzzzzzzzzzzzzz</code>\n</pre>\n</notextile>\n\nh2(#arvados-login-sync). Install arvados-login-sync\n\nThe @arvados-login-sync@ service makes it possible for Arvados users to log in to the shell server.  It sets up login accounts, updates group membership, adds each user's SSH public keys to the @~/.ssh/authorized_keys@ file, and adds an Arvados token to @~/.config/arvados/settings.conf@ .\n\nInstall the @arvados-login-sync@ program from RubyGems.\n\n<notextile>\n<pre>\n<code>shellserver:# <span class=\"userinput\">gem install arvados-login-sync</span></code>\n</pre>\n</notextile>\n\nh2(#arvados-login-sync). Run arvados-login-sync periodically\n\nCreate a cron job to run the @arvados-login-sync@ program every 2 minutes.  This will synchronize user accounts.\n\nIf this is a single-user shell node, then @ARVADOS_API_TOKEN@ should be a token for that user.  See \"Create a token for a user\":{{site.baseurl}}/admin/user-management-cli.html#create-token .\n\nIf this is a multi-user shell node, then @ARVADOS_API_TOKEN@ should be an administrator token such as the @SystemRootToken@.  See discussion in the \"introduction\":#introduction about security on multi-user shell nodes.\n\nSet @ARVADOS_VIRTUAL_MACHINE_UUID@ to the UUID from \"Create record for VM\":#vm-record\n\nh3. Standalone cluster\n\n<notextile>\n<pre>\n<code>shellserver:# <span class=\"userinput\">umask 0700; tee /etc/cron.d/arvados-login-sync &lt;&lt;EOF\nARVADOS_API_HOST=\"<strong>ClusterID.example.com</strong>\"\nARVADOS_API_TOKEN=\"<strong>xxxxxxxxxxxxxxxxx</strong>\"\nARVADOS_VIRTUAL_MACHINE_UUID=\"<strong>zzzzz-2x53u-zzzzzzzzzzzzzzz</strong>\"\n*/2 * * * * root arvados-login-sync\nEOF</span></code>\n</pre>\n</notextile>\n\nh3. Part of a LoginCluster federation\n\nIf the cluster is part of a \"federation with centralized user management\":../admin/federation.html#LoginCluster , the login sync script needs to be given an admin token from the login cluster.\n\n<notextile>\n<pre>\n<code>shellserver:# <span class=\"userinput\">umask 0700; tee /etc/cron.d/arvados-login-sync &lt;&lt;EOF\nARVADOS_API_HOST=\"<strong>ClusterID.example.com</strong>\"\nARVADOS_API_TOKEN=\"<strong>yyyloginclusteradmintokenyyyy</strong>\"\nARVADOS_VIRTUAL_MACHINE_UUID=\"<strong>zzzzz-2x53u-zzzzzzzzzzzzzzz</strong>\"\n*/2 * * * * root arvados-login-sync\nEOF</span></code>\n</pre>\n</notextile>\n\n\nh2(#confirm-working). Confirm working installation\n\nA user should be able to log in to the shell server when the following conditions are satisfied:\n\n# As an admin user, you have given the user permission to log in using the Workbench &rarr; Admin menu &rarr; \"Users\" item &rarr; \"Show\" button &rarr; \"Admin\" tab &rarr; \"Setup account\" button.\n# The cron job has run.\n\nIn order to log in via SSH, the user must also upload an SSH public key.  Alternately, if configured, users can log in using \"Webshell\":install-webshell.html .\n\nSee also \"how to add a VM login permission link at the command line\":../admin/user-management-cli.html\n"
  },
  {
    "path": "doc/install/install-single-host.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Single host Arvados\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Limitations of the single host install\":#limitations\n# \"Prerequisites and planning\":#prerequisites\n# \"Download the installer\":#download\n# \"Install Ansible\":#install-ansible\n# \"Set up cluster configuration\":#localparams\n# \"Set up cluster inventory\":#inventory\n# \"Run the installer playbook\":#run-playbook\n# \"Test the cluster\":#test-install\n# \"Changing your configuration\":#further_customization\n# \"Upgrading your Arvados cluster\":#post_install\n\nh2(#limitations). Limitations of the single host install\n\n*NOTE: The single host installation is a good choice for evaluating Arvados, but it is not recommended for production use.*\n\nUsing the default configuration, the single host install has scaling limitations compared to a production multi-host install:\n\n* It uses the local @/var@ partition to store all user data and logs.\n* It uses the @crunch-dispatch-local@ dispatcher, which has a limit of eight concurrent jobs.\n* Because jobs and Arvados services all run on the same machine, they will compete for CPU/RAM resources.\n\nh2(#prerequisites). Prerequisites and planning\n\nh3. Cluster ID\n\nChoose a 5-character cluster identifier that will represent the cluster. Refer to \"our guidelines on choosing a cluster identifier\":../architecture/federation.html#cluster_id.  Only lowercase letters and digits 0-9 are allowed.  Our documentation uses @xurid@ throughout. You should replace this each time it appears with your chosen cluster identifier.\n\nh3. Cluster host\n\nYou will need a dedicated (virtual) machine for your Arvados server with at least 2 cores and 8 GiB of RAM (4+ cores / 16+ GiB recommended if you are running workflows) running a supported Linux distribution:\n\n{% include 'supportedlinux' %}\n\nThe single host install stores all user data and logs under @/var@. You should ensure that this partition has adequate storage for your planned usage.  We suggest starting with at least 50GiB of free space.\n\nYou must be able to connect to this host via SSH. Your account must have permission to run arbitrary commands with @sudo@.\n\nh2(#download). Download the installer\n\nThe Ansible installer is only available in the Arvados source tree. Clone a copy of the Arvados source for the version of Arvados you're using in a directory convenient for you:\n\n{% include 'branchname' %}\n<notextile>\n<pre><code>~$ <span class=\"userinput\">git clone --depth=1 --branch=<strong>{{ branchname }}</strong> https://github.com/arvados/arvados ~/<strong>arvados</strong></span>\n</code></pre>\n</notextile>\n\nh2(#install-ansible). Install Ansible\n\n{% include 'install_ansible' header_level: 'h3' %}\n\nh2(#localparams). Set up cluster configuration\n\nCopy the example cluster configuration from the Arvados source tree to a location outside it. We recommend you use your chosen cluster ID in the filename to help keep it unique. For example:\n\n<notextile>\n<pre>$ <code class=\"userinput\">cp arvados/tools/ansible/examples/simple-cluster-config.yml ~/<strong>xurid-</strong>config.yml\n</code></pre>\n</notextile>\n\nOpen the copy you created in your editor, and make changes following the instructions at the top of the file.\n\nh2(#inventory). Set up cluster inventory\n\nCopy the example cluster inventory from the Arvados source tree to a location outside it. We recommend you use your chosen cluster ID in the filename to help keep it unique. For example:\n\n<notextile>\n<pre>$ <code class=\"userinput\">cp arvados/tools/ansible/examples/simple-cluster-inventory.yml ~/<strong>xurid-</strong>inventory.yml\n</code></pre>\n</notextile>\n\nOpen the copy you created in your editor and make these changes noted in comments:\n\n* Under @hosts:@, change @hostname.example@ to the hostname or address of your cluster node.\n* Change @arvados_config_file@ to the path of the cluster configuration you created in the previous step.\n* Change @arvados_cluster_id@ to your chosen cluster ID.\n\nYou may make other changes noted in comments, but the changes listed above are required.\n\nh2(#run-playbook). Run the installer playbook\n\nWith your cluster configuration and inventory complete, you can use them to run the installer playbook:\n\n<notextile>\n<pre>$ <code class=\"userinput\">cd arvados/tools/ansible</code>\narvados/tools/ansible $ <code class=\"userinput\">ansible-playbook -Ki <strong>~/xurid-inventory.yml</strong> install-arvados-cluster.yml</code>\n</pre>\n</notextile>\n\nThis will prompt you for a @BECOME password:@. Enter your sudo password on the cluster node. Ansible will use this to perform privileged system configuration. You will see it start to log tasks like:\n\n<notextile>\n<pre>PLAY [Bootstrap nodes] *********************************************************\n\nTASK [Load Arvados configuration file] *****************************************\nok: [hostname.example -> localhost]\n\nTASK [Load Arvados cluster configuration] **************************************\nok: [hostname.example]\n\nTASK [ansible.builtin.include_role : distro_bootstrap] *************************\n\nTASK [distro_bootstrap : Get distribution IDs] *********************************\nchanged: [hostname.example]\n</pre>\n</notextile>\n\nIf all goes well, it will log finish with a @PLAY RECAP@ reporting @failed=0@, which indicates all tasks were successful:\n\n<notextile>\n<pre>PLAY RECAP *********************************************************************\nhostname.example : ok=161  changed=34   unreachable=0    <strong>failed=0</strong>    skipped=23   rescued=0    ignored=0\n</pre>\n</notextile>\n\nh3(#playbook-problems). Diagnosing problems with the playbook run\n\nIf the @PLAY RECAP@ indicates that a task failed, that will typically be logged with a message like this:\n\n<notextile>\n<pre>TASK [arvados_controller : Start and enable arvados-controller.service] ********\nfatal: [hostname.example]: FAILED! => {\"changed\": false, \"msg\": \"Unable to restart service arvados-controller.service: Job for arvados-controller.service failed because the control process exited with error code.\\nSee \\\"systemctl status arvados-controller.service\\\" and \\\"journalctl -xeu arvados-controller.service\\\" for details.\\n\"}\n</pre>\n</notextile>\n\nThe @TASK@ line gives you some context for what failed. The first part (@arvados_controller@ in this example) describes generally what Arvados service it was configuring. The rest of the line describes the specific step it was taking (starting @arvados-controller.service@ in this example). This context can suggest where you might check your configuration for problems or look on the cluster node for additional information. This example problem was caused by the Controller service in the cluster configuration trying to use an already-claimed port in one of the @InternalURLs@.\n\nh2(#test-install). Test the cluster\n\nh3. Run diagnostics\n\nThe @arvados-client diagnostics@ command can check all services on a cluster to identify problems with inconsistent configuration. *On your cluster node*, install and run it like this:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">sudo apt install arvados-client</span>\n$ <span class=\"userinput\">sudo arvados-client sudo diagnostics -internal-client</span>\nINFO       5: running health check (same as `arvados-server check`)\nINFO      10: getting discovery document from https://hostname.example:8443/discovery/v1/apis/arvados/v1/rest\nINFO      20: getting exported config from https://hostname.example:8443/arvados/v1/config\n[…]\nINFO     160: running a container\nINFO      ... container request uuid = xurid-xvhdp-12345abcde67890\nINFO      ... container request submitted, waiting up to 10m for container to run\nINFO    9990: deleting temporary collection\nINFO    --- no errors ---\n</code></pre>\n</notextile>\n\nh3. Access Workbench\n\nThe default Ansible inventory deploys Arvados with a self-signed certificate. If you deployed this way, you will have the best Workbench experience if you configure your browser to trust that certificate for it and supporting services. Follow the instructions for your specific browser below.\n\nIf you configured the inventory with a different certificate that is already trusted by your browser, you can skip these steps. You should be able to open the URL from @Services.Workbench2.ExternalURL@ from your cluster configuration in your browser.\n\nh4. Trusting self-signed certificates in Chrome\n\n{% comment %}\nLast updated for Chrome v138\n{% endcomment %}\n\n# Find the @arvados_tls.Default@ setting in your Ansible inventory.\n# If those options specify @remote: true@, copy the @cert@ path from your cluster host to the host where you're running the browser. Note you _only_ need the @cert@ file, not the @key@ file.\n# In the URL bar, enter @chrome://certificate-manager/@ and open that.\n# Under the \"Custom\" header, open \"Installed by you.\"\n# Next to \"Trusted Cerficates,\" press the \"Import\" button.\n# In the file picker dialog, open your copy of the @arvados_tls.Default.cert@ file.\n\nNow you should be able to open the URL from @Services.Workbench2.ExternalURL@ from your cluster configuration in your browser. You can skip the next section unless you also want to set up Firefox.\n\nh4. Trusting self-signed certificates in Firefox\n\n{% comment %}\nLast updated for Firefox 140\n{% endcomment %}\n\n# Open the \"Edit\" menu and select \"Settings.\"\n# Find and press the \"View Certificates…\" button to open the Certificate Manager.\n# Open the \"Servers\" tab.\n# Press the \"Add Exception…\" button.\n# Enter the @ExternalURL@ in your cluster configuration for the @Workbench2@ service.\n# Press the \"Get Certificate\" button.\n# Press the \"Confirm Security Exception\" button.\n# Repeat the process from step 4 with your configured URLs for the @Controller@, @Keepproxy@, @WebDAV@, and @WebDAVDownload@ services.\n\nNow you should be able to open the URL from @Services.Workbench2.ExternalURL@ from your cluster configuration in your browser.\n\nh2(#further_customization). Changing your configuration\n\nIn the future, if you want to make changes to your Arvados cluster or Ansible inventory configuration, simply edit those files and \"run the playbook again\":#run-playbook. The playbook will deploy your changes to all the component services.\n\nh2(#post_install). Upgrading your Arvados cluster\n\nWhen a new version of Arvados is released, the general process to upgrade the cluster is:\n\n# In your Arvados checkout directory, @git fetch@ and then @git switch@ to the branch or tag that corresponds to the release you want to use.\n# Consult the \"Arvados upgrade notes\":{{site.baseurl}}/admin/upgrading.html to see if you need or want to make change to your cluster configuration file.\n# \"Run the playbook again\":#run-playbook with your cluster inventory.\n\nSee also \"Maintenance and upgrading\":{{site.baseurl}}/admin/maintenance-and-upgrading.html for more information.\n"
  },
  {
    "path": "doc/install/install-webshell.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Configure webshell\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Introduction\":#introduction\n# \"Prerequisites\":#prerequisites\n# \"Update config.yml\":#configure\n# \"Update nginx configuration\":#update-nginx\n# \"Install packages\":#install-packages\n# \"Configure shellinabox\":#config-shellinabox\n# \"Configure pam\":#config-pam\n# \"Confirm working installation\":#confirm-working\n\nh2(#introduction). Introduction\n\nArvados supports @webshell@, which allows ssh access to shell nodes via the browser. This functionality is integrated in @Workbench@.\n\n@Webshell@ is provided by the @shellinabox@ package which runs on each shell node for which webshell is enabled. For authentication, a supported @pam library@ that allows authentication against Arvados is also required. One Nginx (or similar web server) virtualhost is also needed to expose all the @shellinabox@ instances via https.\n\nh2(#prerequisites). Prerequisites\n\n# \"Install Workbench 2\":{{site.baseurl}}/install/install-workbench2-app.html\n# \"Set up a shell node\":{{site.baseurl}}/install/install-shell-server.html\n\nh2(#configure). Update config.yml\n\nEdit the cluster config at @config.yml@ and set @Services.WebShell.ExternalURL@.  Replace @zzzzz@ with your cluster id. Workbench will use this information to activate its support for webshell.\n\n<notextile>\n<pre><code>    Services:\n      WebShell:\n        InternalURLs: {}\n        ExternalURL: <span class=\"userinput\">https://webshell.ClusterID.example.com/</span>\n</span></code></pre>\n</notextile>\n\nh2(#update-nginx). Update Nginx configuration\n\nThe arvados-webshell service will be accessible from anywhere on the internet, so we recommend using SSL for transport encryption. This Nginx virtualhost could live on your Workbench server, or any other server that is reachable by your Workbench users and can access the @shell-in-a-box@ service on the shell node(s) on port 4200.\n\nUse a text editor to create a new file @/etc/nginx/conf.d/arvados-webshell.conf@ with the following configuration.  Options that need attention are marked in <span class=\"userinput\">red</span>.\n\n<notextile><pre>\nupstream arvados-webshell {\n  server                <span class=\"userinput\">shell.ClusterID.example.com</span>:<span class=\"userinput\">4200</span>;\n}\n\nserver {\n  listen                443 ssl;\n  server_name           webshell.<span class=\"userinput\">ClusterID.example.com</span>;\n\n  proxy_connect_timeout 90s;\n  proxy_read_timeout    300s;\n\n  ssl                   on;\n  ssl_certificate       <span class=\"userinput\">/YOUR/PATH/TO/cert.pem</span>;\n  ssl_certificate_key   <span class=\"userinput\">/YOUR/PATH/TO/cert.key</span>;\n\n  location /<span class=\"userinput\">shell.ClusterID</span> {\n    if ($request_method = 'OPTIONS') {\n       add_header 'Access-Control-Allow-Origin' '*';\n       add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';\n       add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';\n       add_header 'Access-Control-Max-Age' 1728000;\n       add_header 'Content-Type' 'text/plain charset=UTF-8';\n       add_header 'Content-Length' 0;\n       return 204;\n    }\n    if ($request_method = 'POST') {\n       add_header 'Access-Control-Allow-Origin' '*';\n       add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';\n       add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';\n    }\n    if ($request_method = 'GET') {\n       add_header 'Access-Control-Allow-Origin' '*';\n       add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';\n       add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';\n    }\n\n    proxy_ssl_session_reuse off;\n    proxy_read_timeout  90;\n    proxy_set_header    X-Forwarded-Proto https;\n    proxy_set_header    Host $http_host;\n    proxy_set_header    X-Real-IP $remote_addr;\n    proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;\n    proxy_pass          http://arvados-webshell;\n  }\n}\n</pre></notextile>\n\nNote that the location line in the nginx config matches your shell node hostname *without domain*, because that is how the shell node was defined in the \"Set up a shell node\":{{site.baseurl}}/install/install-shell-server.html#vm-record instructions. It makes for a more user friendly experience in Workbench.\n\nFor additional shell nodes with @shell-in-a-box@, add @location@ and @upstream@ sections as needed.\n\n{% assign arvados_component = 'shellinabox libpam-arvados-go' %}\n\n{% include 'install_packages' %}\n\nh2(#config-shellinabox). Configure shellinabox\n\nh3. Red Hat, AlmaLinux, and Rocky Linux\n\nEdit @/etc/sysconfig/shellinaboxd@:\n\n<notextile><pre>\n# TCP port that shellinboxd's webserver listens on\nPORT=4200\n\n# SSL is disabled because it is terminated in Nginx. Adjust as needed.\nOPTS=\"--disable-ssl --no-beep --service=/<span class=\"userinput\">shell.ClusterID.example.com</span>:AUTH:HOME:SHELL\"\n</pre></notextile>\n\n<notextile>\n<pre>\n<code># <span class=\"userinput\">systemctl enable shellinabox</span></code>\n<code># <span class=\"userinput\">systemctl start shellinabox</span></code>\n</pre>\n</notextile>\n\nh3. Debian and Ubuntu\n\nEdit @/etc/default/shellinabox@:\n\n<notextile><pre>\n# TCP port that shellinboxd's webserver listens on\nSHELLINABOX_PORT=4200\n\n# SSL is disabled because it is terminated in Nginx. Adjust as needed.\nSHELLINABOX_ARGS=\"--disable-ssl --no-beep --service=/<span class=\"userinput\">shell.ClusterID.example.com</span>:AUTH:HOME:SHELL\"\n</pre></notextile>\n\n<notextile>\n<pre>\n<code># <span class=\"userinput\">systemctl enable shellinabox</span></code>\n<code># <span class=\"userinput\">systemctl start shellinabox</span></code>\n</pre>\n</notextile>\n\n\nh2(#config-pam). Configure pam\n\nUse a text editor to create a new file @/etc/pam.d/shellinabox@ with the following configuration.  Options that need attention are marked in <span class=\"userinput\">red</span>.\n\n<notextile><pre>\n# This example is a stock debian \"login\" file with pam_arvados\n# replacing pam_unix. It can be installed as /etc/pam.d/shellinabox .\n\nauth       optional   pam_faildelay.so  delay=3000000\nauth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so\nauth       requisite  pam_nologin.so\nsession [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close\nsession       required   pam_env.so readenv=1\nsession       required   pam_env.so readenv=1 envfile=/etc/default/locale\n\n# The first argument is the address of the API server.  The second\n# argument is this shell node's hostname.  The hostname must match the\n# \"hostname\" field of the virtual_machine record.\nauth [success=1 default=ignore] /usr/lib/pam_arvados.so <span class=\"userinput\">ClusterID.example.com</span> <span class=\"userinput\">shell.ClusterID.example.com</span>\n\nauth    requisite            pam_deny.so\nauth    required            pam_permit.so\n\nauth       optional   pam_group.so\nsession    required   pam_limits.so\nsession    optional   pam_lastlog.so\nsession    optional   pam_motd.so  motd=/run/motd.dynamic\nsession    optional   pam_motd.so\nsession    optional   pam_mail.so standard\n\n@include common-account\n@include common-session\n@include common-password\n\nsession [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open\n</pre></notextile>\n\nh2(#confirm-working). Confirm working installation\n\nWe recommend using the \"Cluster diagnostics tool.\":diagnostics.html\n\nHere are some other checks you can perform manually.\n\nA user should now be able to log in to the shell server, using webshell via workbench. Please refer to \"Accessing an Arvados VM with Webshell\":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html\n"
  },
  {
    "path": "doc/install/install-workbench2-app.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install Workbench 2\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Update config.yml\":#update-config\n# \"Update Nginx configuration\":#update-nginx\n# \"Install arvados-workbench2\":#install-packages\n# \"Restart the API server and controller\":#restart-api\n# \"Confirm working installation\":#confirm-working\n# \"Trusted client setting\":#trusted_client\n\nWorkbench2 is the web-based user interface for Arvados.\n\n{% include 'notebox_begin' %}\nWorkbench2 is the replacement for Arvados Workbench. Workbench2 is suitable for day-to-day use, but does not yet implement every feature of the traditional Workbench.\n{% include 'notebox_end' %}\n\nh2(#configure). Update config.yml\n\nEdit @config.yml@ to set the keys below.  The full set of configuration options are in the \"Workbench section of config.yml\":{{site.baseurl}}/admin/config.html\n\n<notextile>\n<pre><code>    Services:\n      Workbench2:\n        ExternalURL: <span class=\"userinput\">\"https://workbench2.ClusterID.example.com\"</span>\n</code></pre>\n</notextile>\n\nh2(#update-nginx). Update Nginx configuration\n\nWorkbench2 does not require its own database. It is a set of html, javascript and css files that are served as static files from Nginx.\n\nUse a text editor to create a new file @/etc/nginx/conf.d/arvados-workbench2.conf@ with the following configuration.  Options that need attention are marked in <span class=\"userinput\">red</span>.\n\n<notextile>\n<pre><code>server {\n    listen       80;\n    server_name  workbench2.<span class=\"userinput\">ClusterID.example.com</span>;\n    return 301   https://workbench2.<span class=\"userinput\">ClusterID.example.com</span>$request_uri;\n}\n\nserver {\n  listen       443 ssl;\n  server_name  workbench2.<span class=\"userinput\">ClusterID.example.com</span>;\n\n  ssl_certificate     <span class=\"userinput\">/YOUR/PATH/TO/cert.pem</span>;\n  ssl_certificate_key <span class=\"userinput\">/YOUR/PATH/TO/cert.key</span>;\n\n  index  index.html;\n\n  # <span class=\"userinput\">Workbench2 uses a call to /config.json to bootstrap itself</span>\n  # <span class=\"userinput\">and find out where to contact the API server.</span>\n  location /config.json {\n    return 200 '{ \"API_HOST\": \"<span class=\"userinput\">ClusterID.example.com</span>\" }';\n  }\n\n  location / {\n    root      /var/www/arvados-workbench2/workbench2;\n    index     index.html;\n    try_files $uri $uri/ /index.html;\n    if (-f $document_root/maintenance.html) {\n      return 503;\n    }\n  }\n}\n</code></pre>\n</notextile>\n\nh2. Vocabulary configuration\n\nWorkbench2 will load, if available, a vocabulary definition which lists available metadata properties for groups and collections.  To learn how to configure the property vocabulary definition, please visit the \"Metadata Vocabulary Format\":{{site.baseurl}}/admin/metadata-vocabulary.html page in the Admin section.\n\n{% assign arvados_component = 'arvados-workbench2' %}\n\n{% include 'install_packages' %}\n\n{% include 'restart_api' %}\n\nh2(#confirm-working). Confirm working installation\n\nVisit @https://workbench2.ClusterID.example.com@ in a browser.  You should be able to log in using the login method you configured in the previous step.  If @Users.AutoAdminFirstUser@ is true, you will be an admin user.\n"
  },
  {
    "path": "doc/install/install-ws.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install the websocket server\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe arvados-ws server provides event notifications to websocket clients. It can be installed anywhere with access to Postgres database and the Arvados API server, typically behind a web proxy that provides SSL support. See the \"godoc page\":http://godoc.org/github.com/arvados/arvados/services/ws for additional information.\n\n# \"Update config.yml\":#update-config\n# \"Update nginx configuration\":#update-nginx\n# \"Install arvados-ws package\":#install-packages\n# \"Start the service\":#start-service\n# \"Restart the API server and controller\":#restart-api\n# \"Confirm working installation\":#confirm-working\n\nh2(#configure). Update config.yml\n\nEdit the cluster config at @config.yml@ and set @Services.Websocket.ExternalURL@ and @Services.Websocket.InternalURLs@.  Replace @zzzzz@ with your cluster id.\n\n<notextile>\n<pre><code>    Services:\n      Websocket:\n        InternalURLs:\n\t  \"http://localhost:8005\"</span>: {}\n        ExternalURL: <span class=\"userinput\">wss://ws.ClusterID.example.com/websocket</span>\n</span></code></pre>\n</notextile>\n\nh2(#update-nginx). Update Nginx configuration\n\nThe arvados-ws service will be accessible from anywhere on the internet, so we recommend using SSL for transport encryption.\n\nUse a text editor to create a new file @/etc/nginx/conf.d/arvados-ws.conf@ with the following configuration.  Options that need attention are marked in <span class=\"userinput\">red</span>.\n\n<notextile><pre>\nupstream arvados-ws {\n  server                127.0.0.1:<span class=\"userinput\">8005</span>;\n}\n\nserver {\n  listen                443 ssl;\n  server_name           ws.<span class=\"userinput\">ClusterID.example.com</span>;\n\n  proxy_connect_timeout 90s;\n  proxy_read_timeout    300s;\n\n  ssl                   on;\n  ssl_certificate       <span class=\"userinput\">/YOUR/PATH/TO/cert.pem</span>;\n  ssl_certificate_key   <span class=\"userinput\">/YOUR/PATH/TO/cert.key</span>;\n\n  location / {\n    proxy_pass          http://arvados-ws;\n    proxy_set_header    Upgrade         $http_upgrade;\n    proxy_set_header    Connection      \"upgrade\";\n    proxy_set_header    Host            $host;\n    proxy_set_header    X-Forwarded-For $proxy_add_x_forwarded_for;\n  }\n}\n</pre></notextile>\n\n{% assign arvados_component = 'arvados-ws' %}\n\n{% include 'install_packages' %}\n\n{% include 'start_service' %}\n\n{% include 'restart_api' %}\n\nh2(#confirm). Confirm working installation\n\nWe recommend using the \"Cluster diagnostics tool.\":diagnostics.html\n\nHere are some other checks you can perform manually.\n\nConfirm the service is listening on its assigned port and responding to requests.\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">curl https://<span class=\"userinput\">ws.ClusterID.example.com</span>/websocket</span>\nnot websocket protocol\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/install/nginx.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install Nginx\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh3. Red Hat, AlmaLinux, and Rocky Linux\n\n<notextile>\n<pre><code># <span class=\"userinput\">dnf install nginx</span></code></pre>\n</notextile>\n\nh3. Debian and Ubuntu\n\n<notextile>\n<pre><code># <span class=\"userinput\">apt --no-install-recommends install nginx</span></code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/install/packages.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Arvados package repositories\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nOn any host where you install Arvados software, you'll need to add the Arvados package repository.  They're available for several popular distributions.\n\n* \"Red Hat, AlmaLinux, and Rocky Linux\":#redhat\n* \"Debian and Ubuntu\":#debian\n\n<notextile>\n<a id=\"centos7\" style=\"display: none;\"></a>\n</notextile>\n\nh3(#redhat). Red Hat, AlmaLinux, and Rocky Linux\n\nPackages are available for the following Red Hat-based distributions:\n\n* AlmaLinux 10 (since 10.0)\n* AlmaLinux 9 (since 9.2)\n* AlmaLinux 8 (since 8.8)\n* RHEL 10 (since 10.0)\n* RHEL 9 (since 9.2)\n* RHEL 8 (since 8.8)\n* Rocky Linux 10 (since 10.0)\n* Rocky Linux 9 (since 9.2)\n* Rocky Linux 8 (since 8.8)\n\n{% include 'setup_redhat_repo' %}\n\nh3(#debian). Debian and Ubuntu\n\nPackages are available for the following Debian-based distributions:\n\n* Debian 12 (\"bookworm\")\n* Ubuntu 24.04 (\"noble\")\n* Ubuntu 22.04 (\"jammy\")\n\n{% include 'setup_debian_repo' %}\n"
  },
  {
    "path": "doc/install/ruby.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Install Ruby and Bundler\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'install_ruby_and_bundler' %}\n"
  },
  {
    "path": "doc/install/salt-multi-host.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Multi-Host Arvados\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n\"This page has moved.\":install-multi-host.html\n"
  },
  {
    "path": "doc/install/salt-single-host.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Single host Arvados\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n\"This page has moved.\":install-single-host.html\n"
  },
  {
    "path": "doc/install/setup-login.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Set up web based login\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nSelect one of the following login mechanisms for your cluster.\n\n# If all users will authenticate with Google, \"configure Google login\":#google.\n# If all users will authenticate with an OpenID Connect provider (other than Google), \"configure OpenID Connect\":#oidc.\n# If all users will authenticate with an existing LDAP service, \"configure LDAP\":#ldap.\n# If all users will authenticate using PAM as configured on your controller node, \"configure PAM\":#pam.\n\nh2(#google). Google login\n\nWith this configuration, users will sign in with their Google accounts.\n\nUse the <a href=\"https://console.developers.google.com\" target=\"_blank\">Google Developers Console</a> to create a set of client credentials.\n# Select or create a project.\n# Click *+ Enable APIs and Services*.\n#* Search for *Google People API* and click *Enable API*.\n#* Navigate back to the main \"APIs & Services\" page.\n# On the sidebar, click *OAuth consent screen*.\n#* On consent screen settings, enter your identifying details.\n#* Under *Branding* &rarr; *Authorized domains* add your domain (@example.com@).\n#* Click *Save*.\n# On the sidebar, click *Clients*, then click *+ Create client*, arriving at the *OAuth client ID* setup page.\n# Under *Application type* select *Web application*.\n# Add the JavaScript origin: @https://workbench2.ClusterID.example.com@. This should match the Web origin where you will host Workbench. Note that it can only include the schema, hostname, and port parts; the path, in particular a trailing @/@, is not allowed.\n# Add the Redirect URI: @https://ClusterID.example.com/login@. The host part of this URI should match the @ExternalURL@ of the Arvados controller service as specified in the configuration file @/etc/arvados/config.yml@, including the port if specified.\n# Copy the values of *Client ID* and *Client secret* to the @Login.Google@ section of @/etc/arvados/config.yml@.\n\n{% codeblock as yaml %}\n    Login:\n      Google:\n        Enable: true\n        ClientID: \"0000000000000-zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.apps.googleusercontent.com\"\n        ClientSecret: \"zzzzzzzzzzzzzzzzzzzzzzzz\"\n{% endcodeblock %}\n\nh2(#oidc). OpenID Connect\n\nWith this configuration, users will sign in with a third-party OpenID Connect provider such as GitHub, Auth0, Okta, or PingFederate.\n\nSimilar to the Google login section above, you will need to register your Arvados cluster with the provider as an application (relying party). When asked for a redirect URL or callback URL, use @https://ClusterID.example.com/login@ (the external URL of your controller service, plus @/login@).\n\nThe provider will supply an issuer URL, client ID, and client secret. Add these to your Arvados configuration.\n\n{% codeblock as yaml %}\n    Login:\n      OpenIDConnect:\n        Enable: true\n        Issuer: https://accounts.example.com/\n        ClientID: \"0123456789abcdef\"\n        ClientSecret: \"zzzzzzzzzzzzzzzzzzzzzzzz\"\n{% endcodeblock %}\n\nh3. Accepting OpenID bearer tokens as Arvados API tokens\n\nArvados can also be configured to accept provider-issued access tokens as Arvados API tokens by setting @Login.OpenIDConnect.AcceptAccessToken@ to @true@. This can be useful for integrating third party applications.\n\n{% codeblock as yaml %}\n    Login:\n      OpenIDConnect:\n        AcceptAccessToken: true\n        AcceptAccessTokenScope: \"arvados\"\n{% endcodeblock %}\n\n# If the provider-issued tokens are JWTs, and @Login.OpenIDConnect.AcceptAccessTokenScope@ is not empty, Arvados will check that the token contains the configured scope, and reject tokens that do not have the configured scope.  This can be used to control which users or applications are permitted to access your Arvados instance.\n# Tokens are validated by presenting them to the UserInfo endpoint advertised by the OIDC provider.\n# Once validated, a token is cached and accepted without re-checking for up to 10 minutes.\n# A token that fails validation is cached and will not be re-checked for up to 5 minutes.\n# Network errors and HTTP 5xx responses from the provider's UserInfo endpoint are not cached.\n# The OIDC token cache size is currently limited to 1000 tokens, if the number of distinct tokens used in a 5 minute period is greater than this, tokens may be checked more frequently.\n\nCheck the OpenIDConnect section in the \"default config file\":{{site.baseurl}}/admin/config.html for more details and configuration options.\n\nh2(#ldap). LDAP\n\nWith this configuration, authentication uses an external LDAP service like OpenLDAP or Active Directory.\n\nEnable LDAP authentication and provide your LDAP server's host, port, and credentials (if needed to search the directory) in @config.yml@:\n\n{% codeblock as yaml %}\n    Login:\n      LDAP:\n        Enable: true\n        URL: ldap://ldap.example.com:389\n        SearchBindUser: cn=lookupuser,dc=example,dc=com\n        SearchBindPassword: xxxxxxxx\n        SearchBase: ou=Users,dc=example,dc=com\n{% endcodeblock %}\n\nThe email address reported by LDAP will be used as primary key for Arvados accounts. This means *users must not be able to edit their own email addresses* in the directory.\n\nAdditional configuration settings are available:\n* @StartTLS@ is enabled by default.\n* @StripDomain@ and @AppendDomain@ modify the username entered by the user before searching for it in the directory.\n* @SearchAttribute@ (default @uid@) is the LDAP attribute used when searching for usernames.\n* @SearchFilters@ accepts LDAP filter expressions to control which users can log in.\n\nCheck the LDAP section in the \"default config file\":{{site.baseurl}}/admin/config.html for more details and configuration options.\n\nh2(#pam). PAM\n\nWith this configuration, authentication is done according to the Linux PAM (\"Pluggable Authentication Modules\") configuration on your controller host.\n\nEnable PAM authentication in @config.yml@:\n\n{% codeblock as yaml %}\n    Login:\n      PAM:\n        Enable: true\n{% endcodeblock %}\n\nCheck the \"default config file\":{{site.baseurl}}/admin/config.html for more PAM configuration options.\n\nThe default PAM configuration on most Linux systems uses the local user/password database in @/etc/passwd@ and @/etc/shadow@ for all logins. In this case, in order to log in to Arvados, users must have a UNIX account and password on the controller host itself. This can be convenient for a single-user or test cluster. Configuring a user account with a shell of @/bin/false@ will enable the user to log into Arvados but not log into shell login on the controller host.\n\nPAM can also be configured to use other authentication systems such such as NIS or Kerberos. In a production environment, PAM configuration should use the service name (\"arvados\" by default) and set a separate policy for Arvados login.  In this case, Arvados users should not have shell accounts on the controller node.\n\nFor information about configuring PAM, refer to the \"PAM System Administrator's Guide\":http://www.linux-pam.org/Linux-PAM-html/Linux-PAM_SAG.html.\n"
  },
  {
    "path": "doc/install/workbench.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: installguide\ntitle: Customizing Workbench\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2. Site name\n\nUse the @Workbench.SiteName@ configuration option to set the site name rendered at the top of Workbench.\n\n{% codeblock as yaml %}\n    Workbench:\n      SiteName: Arvados Workbench\n{% endcodeblock %}\n\nh2. Welcome page\n\nUse the @Workbench.WelcomePageHTML@ configuration option to set the text that is rendered when a user arrives at the front page (and has not yet logged in).\n\n{% codeblock as yaml %}\n    Workbench:\n      WelcomePageHTML: |\n        <img src=\"/arvados-logo-big.png\" style=\"width: 20%; float: right; padding: 1em;\" />\n        <h2>Please log in.</h2>\n\n        <p>If you have never used Arvados Workbench before, logging in\n        for the first time will automatically create a new\n        account.</p>\n\n        <i>Arvados Workbench uses your information only for\n        identification, and does not retrieve any other personal\n        information.</i>\n{% endcodeblock %}\n\nh2. Inactive user page\n\nUse the @Workbench.InactivePageHTML@ configuration option to set the text that is rendered when a user logs in but is inactive.\n\n{% codeblock as yaml %}\n    Workbench:\n      InactivePageHTML: |\n        <img src=\"/arvados-logo-big.png\" style=\"width: 20%; float: right; padding: 1em;\" />\n        <h3>Hi! You're logged in, but...</h3>\n        <p>Your account is inactive.</p>\n        <p>An administrator must activate your account before you can get\n        any further.</p>\n{% endcodeblock %}\n\nh2(#banner). Message banner on login and custom tooltips\n\nSet the @Workbench.BannerUUID@ configuration option to the UUID of a collection.  *This collection should be shared with all users.*\n\n{% codeblock as yaml %}\n    Workbench:\n      BannerUUID: zzzzz-4zz18-0123456789abcde\n{% endcodeblock %}\n\nh3. Banner\n\nYou can have box pop up when users load Workbench to give information such as links to site-specific documentation or notification about anticipated downtime.\n\nThe banner appears when a user loads workbench and have not yet viewed the current banner text.  Users can also view the banner after dismissing it by selecting the *Restore Banner* option from the *Notifications* menu.\n\nThe banner text (HTML formatted) is loaded from the file @banner.html@ in the collection provided in @BannerUUID@.  The banner does _not_ need to be wrapped by *html* or *body* tags (if present, they will be removed).\n\n{% include 'html_tags' %}\n\nh3. Tooltips\n\nYou can provide a custom tooltip overlay to provide site-specific guidance for using workbench.  Users can opt-out by selecting *Disable Tooltips* from the *Notifications* menu.\n\nThe tooltips are loaded from the file @tooltips.json@ in the collection provided in @BannerUUID@.\n\nThe format of this file is a JSON object where the key is a \"CSS selector\":https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors and the value is the text of the tooltip.  Here is an example:\n\n{% codeblock as yaml %}\n{\n    \"[data-cy=side-panel-button]\": \"Click here to create a new project!\",\n    \"[data-cy=project-panel] tbody tr:nth-child(1)\": \"First element in the project list\"\n}\n{% endcodeblock %}\n\nThe first example adds a tooltip displaying \"Click here to create a new project!\" to the HTML node with the attribute @data-cy=\"side-panel-button\"@.\n\nThe second example adds a tooltip displaying \"First element in the project list\" by finding the project panel element, finding the table body element within the project panel, then matching the first table row.\n\nUse the web developer tools offer by your browser to determine what identifiers are available and construct selections that will anchor your tooltips to the desired workbench components.\n"
  },
  {
    "path": "doc/js/bootstrap.js",
    "content": "/*!\n * Bootstrap v3.1.0 (http://getbootstrap.com)\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n */\n\nif (typeof jQuery === 'undefined') { throw new Error('Bootstrap requires jQuery') }\n\n/* ========================================================================\n * Bootstrap: transition.js v3.1.0\n * http://getbootstrap.com/javascript/#transitions\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // CSS TRANSITION SUPPORT (Shoutout: http://www.modernizr.com/)\n  // ============================================================\n\n  function transitionEnd() {\n    var el = document.createElement('bootstrap')\n\n    var transEndEventNames = {\n      'WebkitTransition' : 'webkitTransitionEnd',\n      'MozTransition'    : 'transitionend',\n      'OTransition'      : 'oTransitionEnd otransitionend',\n      'transition'       : 'transitionend'\n    }\n\n    for (var name in transEndEventNames) {\n      if (el.style[name] !== undefined) {\n        return { end: transEndEventNames[name] }\n      }\n    }\n\n    return false // explicit for ie8 (  ._.)\n  }\n\n  // http://blog.alexmaccaw.com/css-transitions\n  $.fn.emulateTransitionEnd = function (duration) {\n    var called = false, $el = this\n    $(this).one($.support.transition.end, function () { called = true })\n    var callback = function () { if (!called) $($el).trigger($.support.transition.end) }\n    setTimeout(callback, duration)\n    return this\n  }\n\n  $(function () {\n    $.support.transition = transitionEnd()\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: alert.js v3.1.0\n * http://getbootstrap.com/javascript/#alerts\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // ALERT CLASS DEFINITION\n  // ======================\n\n  var dismiss = '[data-dismiss=\"alert\"]'\n  var Alert   = function (el) {\n    $(el).on('click', dismiss, this.close)\n  }\n\n  Alert.prototype.close = function (e) {\n    var $this    = $(this)\n    var selector = $this.attr('data-target')\n\n    if (!selector) {\n      selector = $this.attr('href')\n      selector = selector && selector.replace(/.*(?=#[^\\s]*$)/, '') // strip for ie7\n    }\n\n    var $parent = $(selector)\n\n    if (e) e.preventDefault()\n\n    if (!$parent.length) {\n      $parent = $this.hasClass('alert') ? $this : $this.parent()\n    }\n\n    $parent.trigger(e = $.Event('close.bs.alert'))\n\n    if (e.isDefaultPrevented()) return\n\n    $parent.removeClass('in')\n\n    function removeElement() {\n      $parent.trigger('closed.bs.alert').remove()\n    }\n\n    $.support.transition && $parent.hasClass('fade') ?\n      $parent\n        .one($.support.transition.end, removeElement)\n        .emulateTransitionEnd(150) :\n      removeElement()\n  }\n\n\n  // ALERT PLUGIN DEFINITION\n  // =======================\n\n  var old = $.fn.alert\n\n  $.fn.alert = function (option) {\n    return this.each(function () {\n      var $this = $(this)\n      var data  = $this.data('bs.alert')\n\n      if (!data) $this.data('bs.alert', (data = new Alert(this)))\n      if (typeof option == 'string') data[option].call($this)\n    })\n  }\n\n  $.fn.alert.Constructor = Alert\n\n\n  // ALERT NO CONFLICT\n  // =================\n\n  $.fn.alert.noConflict = function () {\n    $.fn.alert = old\n    return this\n  }\n\n\n  // ALERT DATA-API\n  // ==============\n\n  $(document).on('click.bs.alert.data-api', dismiss, Alert.prototype.close)\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: button.js v3.1.0\n * http://getbootstrap.com/javascript/#buttons\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // BUTTON PUBLIC CLASS DEFINITION\n  // ==============================\n\n  var Button = function (element, options) {\n    this.$element  = $(element)\n    this.options   = $.extend({}, Button.DEFAULTS, options)\n    this.isLoading = false\n  }\n\n  Button.DEFAULTS = {\n    loadingText: 'loading...'\n  }\n\n  Button.prototype.setState = function (state) {\n    var d    = 'disabled'\n    var $el  = this.$element\n    var val  = $el.is('input') ? 'val' : 'html'\n    var data = $el.data()\n\n    state = state + 'Text'\n\n    if (!data.resetText) $el.data('resetText', $el[val]())\n\n    $el[val](data[state] || this.options[state])\n\n    // push to event loop to allow forms to submit\n    setTimeout($.proxy(function () {\n      if (state == 'loadingText') {\n        this.isLoading = true\n        $el.addClass(d).attr(d, d)\n      } else if (this.isLoading) {\n        this.isLoading = false\n        $el.removeClass(d).removeAttr(d)\n      }\n    }, this), 0)\n  }\n\n  Button.prototype.toggle = function () {\n    var changed = true\n    var $parent = this.$element.closest('[data-toggle=\"buttons\"]')\n\n    if ($parent.length) {\n      var $input = this.$element.find('input')\n      if ($input.prop('type') == 'radio') {\n        if ($input.prop('checked') && this.$element.hasClass('active')) changed = false\n        else $parent.find('.active').removeClass('active')\n      }\n      if (changed) $input.prop('checked', !this.$element.hasClass('active')).trigger('change')\n    }\n\n    if (changed) this.$element.toggleClass('active')\n  }\n\n\n  // BUTTON PLUGIN DEFINITION\n  // ========================\n\n  var old = $.fn.button\n\n  $.fn.button = function (option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.button')\n      var options = typeof option == 'object' && option\n\n      if (!data) $this.data('bs.button', (data = new Button(this, options)))\n\n      if (option == 'toggle') data.toggle()\n      else if (option) data.setState(option)\n    })\n  }\n\n  $.fn.button.Constructor = Button\n\n\n  // BUTTON NO CONFLICT\n  // ==================\n\n  $.fn.button.noConflict = function () {\n    $.fn.button = old\n    return this\n  }\n\n\n  // BUTTON DATA-API\n  // ===============\n\n  $(document).on('click.bs.button.data-api', '[data-toggle^=button]', function (e) {\n    var $btn = $(e.target)\n    if (!$btn.hasClass('btn')) $btn = $btn.closest('.btn')\n    $btn.button('toggle')\n    e.preventDefault()\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: carousel.js v3.1.0\n * http://getbootstrap.com/javascript/#carousel\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // CAROUSEL CLASS DEFINITION\n  // =========================\n\n  var Carousel = function (element, options) {\n    this.$element    = $(element)\n    this.$indicators = this.$element.find('.carousel-indicators')\n    this.options     = options\n    this.paused      =\n    this.sliding     =\n    this.interval    =\n    this.$active     =\n    this.$items      = null\n\n    this.options.pause == 'hover' && this.$element\n      .on('mouseenter', $.proxy(this.pause, this))\n      .on('mouseleave', $.proxy(this.cycle, this))\n  }\n\n  Carousel.DEFAULTS = {\n    interval: 5000,\n    pause: 'hover',\n    wrap: true\n  }\n\n  Carousel.prototype.cycle =  function (e) {\n    e || (this.paused = false)\n\n    this.interval && clearInterval(this.interval)\n\n    this.options.interval\n      && !this.paused\n      && (this.interval = setInterval($.proxy(this.next, this), this.options.interval))\n\n    return this\n  }\n\n  Carousel.prototype.getActiveIndex = function () {\n    this.$active = this.$element.find('.item.active')\n    this.$items  = this.$active.parent().children()\n\n    return this.$items.index(this.$active)\n  }\n\n  Carousel.prototype.to = function (pos) {\n    var that        = this\n    var activeIndex = this.getActiveIndex()\n\n    if (pos > (this.$items.length - 1) || pos < 0) return\n\n    if (this.sliding)       return this.$element.one('slid.bs.carousel', function () { that.to(pos) })\n    if (activeIndex == pos) return this.pause().cycle()\n\n    return this.slide(pos > activeIndex ? 'next' : 'prev', $(this.$items[pos]))\n  }\n\n  Carousel.prototype.pause = function (e) {\n    e || (this.paused = true)\n\n    if (this.$element.find('.next, .prev').length && $.support.transition) {\n      this.$element.trigger($.support.transition.end)\n      this.cycle(true)\n    }\n\n    this.interval = clearInterval(this.interval)\n\n    return this\n  }\n\n  Carousel.prototype.next = function () {\n    if (this.sliding) return\n    return this.slide('next')\n  }\n\n  Carousel.prototype.prev = function () {\n    if (this.sliding) return\n    return this.slide('prev')\n  }\n\n  Carousel.prototype.slide = function (type, next) {\n    var $active   = this.$element.find('.item.active')\n    var $next     = next || $active[type]()\n    var isCycling = this.interval\n    var direction = type == 'next' ? 'left' : 'right'\n    var fallback  = type == 'next' ? 'first' : 'last'\n    var that      = this\n\n    if (!$next.length) {\n      if (!this.options.wrap) return\n      $next = this.$element.find('.item')[fallback]()\n    }\n\n    if ($next.hasClass('active')) return this.sliding = false\n\n    var e = $.Event('slide.bs.carousel', { relatedTarget: $next[0], direction: direction })\n    this.$element.trigger(e)\n    if (e.isDefaultPrevented()) return\n\n    this.sliding = true\n\n    isCycling && this.pause()\n\n    if (this.$indicators.length) {\n      this.$indicators.find('.active').removeClass('active')\n      this.$element.one('slid.bs.carousel', function () {\n        var $nextIndicator = $(that.$indicators.children()[that.getActiveIndex()])\n        $nextIndicator && $nextIndicator.addClass('active')\n      })\n    }\n\n    if ($.support.transition && this.$element.hasClass('slide')) {\n      $next.addClass(type)\n      $next[0].offsetWidth // force reflow\n      $active.addClass(direction)\n      $next.addClass(direction)\n      $active\n        .one($.support.transition.end, function () {\n          $next.removeClass([type, direction].join(' ')).addClass('active')\n          $active.removeClass(['active', direction].join(' '))\n          that.sliding = false\n          setTimeout(function () { that.$element.trigger('slid.bs.carousel') }, 0)\n        })\n        .emulateTransitionEnd($active.css('transition-duration').slice(0, -1) * 1000)\n    } else {\n      $active.removeClass('active')\n      $next.addClass('active')\n      this.sliding = false\n      this.$element.trigger('slid.bs.carousel')\n    }\n\n    isCycling && this.cycle()\n\n    return this\n  }\n\n\n  // CAROUSEL PLUGIN DEFINITION\n  // ==========================\n\n  var old = $.fn.carousel\n\n  $.fn.carousel = function (option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.carousel')\n      var options = $.extend({}, Carousel.DEFAULTS, $this.data(), typeof option == 'object' && option)\n      var action  = typeof option == 'string' ? option : options.slide\n\n      if (!data) $this.data('bs.carousel', (data = new Carousel(this, options)))\n      if (typeof option == 'number') data.to(option)\n      else if (action) data[action]()\n      else if (options.interval) data.pause().cycle()\n    })\n  }\n\n  $.fn.carousel.Constructor = Carousel\n\n\n  // CAROUSEL NO CONFLICT\n  // ====================\n\n  $.fn.carousel.noConflict = function () {\n    $.fn.carousel = old\n    return this\n  }\n\n\n  // CAROUSEL DATA-API\n  // =================\n\n  $(document).on('click.bs.carousel.data-api', '[data-slide], [data-slide-to]', function (e) {\n    var $this   = $(this), href\n    var $target = $($this.attr('data-target') || (href = $this.attr('href')) && href.replace(/.*(?=#[^\\s]+$)/, '')) //strip for ie7\n    var options = $.extend({}, $target.data(), $this.data())\n    var slideIndex = $this.attr('data-slide-to')\n    if (slideIndex) options.interval = false\n\n    $target.carousel(options)\n\n    if (slideIndex = $this.attr('data-slide-to')) {\n      $target.data('bs.carousel').to(slideIndex)\n    }\n\n    e.preventDefault()\n  })\n\n  $(window).on('load', function () {\n    $('[data-ride=\"carousel\"]').each(function () {\n      var $carousel = $(this)\n      $carousel.carousel($carousel.data())\n    })\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: collapse.js v3.1.0\n * http://getbootstrap.com/javascript/#collapse\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // COLLAPSE PUBLIC CLASS DEFINITION\n  // ================================\n\n  var Collapse = function (element, options) {\n    this.$element      = $(element)\n    this.options       = $.extend({}, Collapse.DEFAULTS, options)\n    this.transitioning = null\n\n    if (this.options.parent) this.$parent = $(this.options.parent)\n    if (this.options.toggle) this.toggle()\n  }\n\n  Collapse.DEFAULTS = {\n    toggle: true\n  }\n\n  Collapse.prototype.dimension = function () {\n    var hasWidth = this.$element.hasClass('width')\n    return hasWidth ? 'width' : 'height'\n  }\n\n  Collapse.prototype.show = function () {\n    if (this.transitioning || this.$element.hasClass('in')) return\n\n    var startEvent = $.Event('show.bs.collapse')\n    this.$element.trigger(startEvent)\n    if (startEvent.isDefaultPrevented()) return\n\n    var actives = this.$parent && this.$parent.find('> .panel > .in')\n\n    if (actives && actives.length) {\n      var hasData = actives.data('bs.collapse')\n      if (hasData && hasData.transitioning) return\n      actives.collapse('hide')\n      hasData || actives.data('bs.collapse', null)\n    }\n\n    var dimension = this.dimension()\n\n    this.$element\n      .removeClass('collapse')\n      .addClass('collapsing')\n      [dimension](0)\n\n    this.transitioning = 1\n\n    var complete = function () {\n      this.$element\n        .removeClass('collapsing')\n        .addClass('collapse in')\n        [dimension]('auto')\n      this.transitioning = 0\n      this.$element.trigger('shown.bs.collapse')\n    }\n\n    if (!$.support.transition) return complete.call(this)\n\n    var scrollSize = $.camelCase(['scroll', dimension].join('-'))\n\n    this.$element\n      .one($.support.transition.end, $.proxy(complete, this))\n      .emulateTransitionEnd(350)\n      [dimension](this.$element[0][scrollSize])\n  }\n\n  Collapse.prototype.hide = function () {\n    if (this.transitioning || !this.$element.hasClass('in')) return\n\n    var startEvent = $.Event('hide.bs.collapse')\n    this.$element.trigger(startEvent)\n    if (startEvent.isDefaultPrevented()) return\n\n    var dimension = this.dimension()\n\n    this.$element\n      [dimension](this.$element[dimension]())\n      [0].offsetHeight\n\n    this.$element\n      .addClass('collapsing')\n      .removeClass('collapse')\n      .removeClass('in')\n\n    this.transitioning = 1\n\n    var complete = function () {\n      this.transitioning = 0\n      this.$element\n        .trigger('hidden.bs.collapse')\n        .removeClass('collapsing')\n        .addClass('collapse')\n    }\n\n    if (!$.support.transition) return complete.call(this)\n\n    this.$element\n      [dimension](0)\n      .one($.support.transition.end, $.proxy(complete, this))\n      .emulateTransitionEnd(350)\n  }\n\n  Collapse.prototype.toggle = function () {\n    this[this.$element.hasClass('in') ? 'hide' : 'show']()\n  }\n\n\n  // COLLAPSE PLUGIN DEFINITION\n  // ==========================\n\n  var old = $.fn.collapse\n\n  $.fn.collapse = function (option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.collapse')\n      var options = $.extend({}, Collapse.DEFAULTS, $this.data(), typeof option == 'object' && option)\n\n      if (!data && options.toggle && option == 'show') option = !option\n      if (!data) $this.data('bs.collapse', (data = new Collapse(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  $.fn.collapse.Constructor = Collapse\n\n\n  // COLLAPSE NO CONFLICT\n  // ====================\n\n  $.fn.collapse.noConflict = function () {\n    $.fn.collapse = old\n    return this\n  }\n\n\n  // COLLAPSE DATA-API\n  // =================\n\n  $(document).on('click.bs.collapse.data-api', '[data-toggle=collapse]', function (e) {\n    var $this   = $(this), href\n    var target  = $this.attr('data-target')\n        || e.preventDefault()\n        || (href = $this.attr('href')) && href.replace(/.*(?=#[^\\s]+$)/, '') //strip for ie7\n    var $target = $(target)\n    var data    = $target.data('bs.collapse')\n    var option  = data ? 'toggle' : $this.data()\n    var parent  = $this.attr('data-parent')\n    var $parent = parent && $(parent)\n\n    if (!data || !data.transitioning) {\n      if ($parent) $parent.find('[data-toggle=collapse][data-parent=\"' + parent + '\"]').not($this).addClass('collapsed')\n      $this[$target.hasClass('in') ? 'addClass' : 'removeClass']('collapsed')\n    }\n\n    $target.collapse(option)\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: dropdown.js v3.1.0\n * http://getbootstrap.com/javascript/#dropdowns\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // DROPDOWN CLASS DEFINITION\n  // =========================\n\n  var backdrop = '.dropdown-backdrop'\n  var toggle   = '[data-toggle=dropdown]'\n  var Dropdown = function (element) {\n    $(element).on('click.bs.dropdown', this.toggle)\n  }\n\n  Dropdown.prototype.toggle = function (e) {\n    var $this = $(this)\n\n    if ($this.is('.disabled, :disabled')) return\n\n    var $parent  = getParent($this)\n    var isActive = $parent.hasClass('open')\n\n    clearMenus()\n\n    if (!isActive) {\n      if ('ontouchstart' in document.documentElement && !$parent.closest('.navbar-nav').length) {\n        // if mobile we use a backdrop because click events don't delegate\n        $('<div class=\"dropdown-backdrop\"/>').insertAfter($(this)).on('click', clearMenus)\n      }\n\n      var relatedTarget = { relatedTarget: this }\n      $parent.trigger(e = $.Event('show.bs.dropdown', relatedTarget))\n\n      if (e.isDefaultPrevented()) return\n\n      $parent\n        .toggleClass('open')\n        .trigger('shown.bs.dropdown', relatedTarget)\n\n      $this.focus()\n    }\n\n    return false\n  }\n\n  Dropdown.prototype.keydown = function (e) {\n    if (!/(38|40|27)/.test(e.keyCode)) return\n\n    var $this = $(this)\n\n    e.preventDefault()\n    e.stopPropagation()\n\n    if ($this.is('.disabled, :disabled')) return\n\n    var $parent  = getParent($this)\n    var isActive = $parent.hasClass('open')\n\n    if (!isActive || (isActive && e.keyCode == 27)) {\n      if (e.which == 27) $parent.find(toggle).focus()\n      return $this.click()\n    }\n\n    var desc = ' li:not(.divider):visible a'\n    var $items = $parent.find('[role=menu]' + desc + ', [role=listbox]' + desc)\n\n    if (!$items.length) return\n\n    var index = $items.index($items.filter(':focus'))\n\n    if (e.keyCode == 38 && index > 0)                 index--                        // up\n    if (e.keyCode == 40 && index < $items.length - 1) index++                        // down\n    if (!~index)                                      index = 0\n\n    $items.eq(index).focus()\n  }\n\n  function clearMenus(e) {\n    $(backdrop).remove()\n    $(toggle).each(function () {\n      var $parent = getParent($(this))\n      var relatedTarget = { relatedTarget: this }\n      if (!$parent.hasClass('open')) return\n      $parent.trigger(e = $.Event('hide.bs.dropdown', relatedTarget))\n      if (e.isDefaultPrevented()) return\n      $parent.removeClass('open').trigger('hidden.bs.dropdown', relatedTarget)\n    })\n  }\n\n  function getParent($this) {\n    var selector = $this.attr('data-target')\n\n    if (!selector) {\n      selector = $this.attr('href')\n      selector = selector && /#[A-Za-z]/.test(selector) && selector.replace(/.*(?=#[^\\s]*$)/, '') //strip for ie7\n    }\n\n    var $parent = selector && $(selector)\n\n    return $parent && $parent.length ? $parent : $this.parent()\n  }\n\n\n  // DROPDOWN PLUGIN DEFINITION\n  // ==========================\n\n  var old = $.fn.dropdown\n\n  $.fn.dropdown = function (option) {\n    return this.each(function () {\n      var $this = $(this)\n      var data  = $this.data('bs.dropdown')\n\n      if (!data) $this.data('bs.dropdown', (data = new Dropdown(this)))\n      if (typeof option == 'string') data[option].call($this)\n    })\n  }\n\n  $.fn.dropdown.Constructor = Dropdown\n\n\n  // DROPDOWN NO CONFLICT\n  // ====================\n\n  $.fn.dropdown.noConflict = function () {\n    $.fn.dropdown = old\n    return this\n  }\n\n\n  // APPLY TO STANDARD DROPDOWN ELEMENTS\n  // ===================================\n\n  $(document)\n    .on('click.bs.dropdown.data-api', clearMenus)\n    .on('click.bs.dropdown.data-api', '.dropdown form', function (e) { e.stopPropagation() })\n    .on('click.bs.dropdown.data-api', toggle, Dropdown.prototype.toggle)\n    .on('keydown.bs.dropdown.data-api', toggle + ', [role=menu], [role=listbox]', Dropdown.prototype.keydown)\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: modal.js v3.1.0\n * http://getbootstrap.com/javascript/#modals\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // MODAL CLASS DEFINITION\n  // ======================\n\n  var Modal = function (element, options) {\n    this.options   = options\n    this.$element  = $(element)\n    this.$backdrop =\n    this.isShown   = null\n\n    if (this.options.remote) {\n      this.$element\n        .find('.modal-content')\n        .load(this.options.remote, $.proxy(function () {\n          this.$element.trigger('loaded.bs.modal')\n        }, this))\n    }\n  }\n\n  Modal.DEFAULTS = {\n    backdrop: true,\n    keyboard: true,\n    show: true\n  }\n\n  Modal.prototype.toggle = function (_relatedTarget) {\n    return this[!this.isShown ? 'show' : 'hide'](_relatedTarget)\n  }\n\n  Modal.prototype.show = function (_relatedTarget) {\n    var that = this\n    var e    = $.Event('show.bs.modal', { relatedTarget: _relatedTarget })\n\n    this.$element.trigger(e)\n\n    if (this.isShown || e.isDefaultPrevented()) return\n\n    this.isShown = true\n\n    this.escape()\n\n    this.$element.on('click.dismiss.bs.modal', '[data-dismiss=\"modal\"]', $.proxy(this.hide, this))\n\n    this.backdrop(function () {\n      var transition = $.support.transition && that.$element.hasClass('fade')\n\n      if (!that.$element.parent().length) {\n        that.$element.appendTo(document.body) // don't move modals dom position\n      }\n\n      that.$element\n        .show()\n        .scrollTop(0)\n\n      if (transition) {\n        that.$element[0].offsetWidth // force reflow\n      }\n\n      that.$element\n        .addClass('in')\n        .attr('aria-hidden', false)\n\n      that.enforceFocus()\n\n      var e = $.Event('shown.bs.modal', { relatedTarget: _relatedTarget })\n\n      transition ?\n        that.$element.find('.modal-dialog') // wait for modal to slide in\n          .one($.support.transition.end, function () {\n            that.$element.focus().trigger(e)\n          })\n          .emulateTransitionEnd(300) :\n        that.$element.focus().trigger(e)\n    })\n  }\n\n  Modal.prototype.hide = function (e) {\n    if (e) e.preventDefault()\n\n    e = $.Event('hide.bs.modal')\n\n    this.$element.trigger(e)\n\n    if (!this.isShown || e.isDefaultPrevented()) return\n\n    this.isShown = false\n\n    this.escape()\n\n    $(document).off('focusin.bs.modal')\n\n    this.$element\n      .removeClass('in')\n      .attr('aria-hidden', true)\n      .off('click.dismiss.bs.modal')\n\n    $.support.transition && this.$element.hasClass('fade') ?\n      this.$element\n        .one($.support.transition.end, $.proxy(this.hideModal, this))\n        .emulateTransitionEnd(300) :\n      this.hideModal()\n  }\n\n  Modal.prototype.enforceFocus = function () {\n    $(document)\n      .off('focusin.bs.modal') // guard against infinite focus loop\n      .on('focusin.bs.modal', $.proxy(function (e) {\n        if (this.$element[0] !== e.target && !this.$element.has(e.target).length) {\n          this.$element.focus()\n        }\n      }, this))\n  }\n\n  Modal.prototype.escape = function () {\n    if (this.isShown && this.options.keyboard) {\n      this.$element.on('keyup.dismiss.bs.modal', $.proxy(function (e) {\n        e.which == 27 && this.hide()\n      }, this))\n    } else if (!this.isShown) {\n      this.$element.off('keyup.dismiss.bs.modal')\n    }\n  }\n\n  Modal.prototype.hideModal = function () {\n    var that = this\n    this.$element.hide()\n    this.backdrop(function () {\n      that.removeBackdrop()\n      that.$element.trigger('hidden.bs.modal')\n    })\n  }\n\n  Modal.prototype.removeBackdrop = function () {\n    this.$backdrop && this.$backdrop.remove()\n    this.$backdrop = null\n  }\n\n  Modal.prototype.backdrop = function (callback) {\n    var animate = this.$element.hasClass('fade') ? 'fade' : ''\n\n    if (this.isShown && this.options.backdrop) {\n      var doAnimate = $.support.transition && animate\n\n      this.$backdrop = $('<div class=\"modal-backdrop ' + animate + '\" />')\n        .appendTo(document.body)\n\n      this.$element.on('click.dismiss.bs.modal', $.proxy(function (e) {\n        if (e.target !== e.currentTarget) return\n        this.options.backdrop == 'static'\n          ? this.$element[0].focus.call(this.$element[0])\n          : this.hide.call(this)\n      }, this))\n\n      if (doAnimate) this.$backdrop[0].offsetWidth // force reflow\n\n      this.$backdrop.addClass('in')\n\n      if (!callback) return\n\n      doAnimate ?\n        this.$backdrop\n          .one($.support.transition.end, callback)\n          .emulateTransitionEnd(150) :\n        callback()\n\n    } else if (!this.isShown && this.$backdrop) {\n      this.$backdrop.removeClass('in')\n\n      $.support.transition && this.$element.hasClass('fade') ?\n        this.$backdrop\n          .one($.support.transition.end, callback)\n          .emulateTransitionEnd(150) :\n        callback()\n\n    } else if (callback) {\n      callback()\n    }\n  }\n\n\n  // MODAL PLUGIN DEFINITION\n  // =======================\n\n  var old = $.fn.modal\n\n  $.fn.modal = function (option, _relatedTarget) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.modal')\n      var options = $.extend({}, Modal.DEFAULTS, $this.data(), typeof option == 'object' && option)\n\n      if (!data) $this.data('bs.modal', (data = new Modal(this, options)))\n      if (typeof option == 'string') data[option](_relatedTarget)\n      else if (options.show) data.show(_relatedTarget)\n    })\n  }\n\n  $.fn.modal.Constructor = Modal\n\n\n  // MODAL NO CONFLICT\n  // =================\n\n  $.fn.modal.noConflict = function () {\n    $.fn.modal = old\n    return this\n  }\n\n\n  // MODAL DATA-API\n  // ==============\n\n  $(document).on('click.bs.modal.data-api', '[data-toggle=\"modal\"]', function (e) {\n    var $this   = $(this)\n    var href    = $this.attr('href')\n    var $target = $($this.attr('data-target') || (href && href.replace(/.*(?=#[^\\s]+$)/, ''))) //strip for ie7\n    var option  = $target.data('bs.modal') ? 'toggle' : $.extend({ remote: !/#/.test(href) && href }, $target.data(), $this.data())\n\n    if ($this.is('a')) e.preventDefault()\n\n    $target\n      .modal(option, this)\n      .one('hide', function () {\n        $this.is(':visible') && $this.focus()\n      })\n  })\n\n  $(document)\n    .on('show.bs.modal', '.modal', function () { $(document.body).addClass('modal-open') })\n    .on('hidden.bs.modal', '.modal', function () { $(document.body).removeClass('modal-open') })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: tooltip.js v3.1.0\n * http://getbootstrap.com/javascript/#tooltip\n * Inspired by the original jQuery.tipsy by Jason Frame\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // TOOLTIP PUBLIC CLASS DEFINITION\n  // ===============================\n\n  var Tooltip = function (element, options) {\n    this.type       =\n    this.options    =\n    this.enabled    =\n    this.timeout    =\n    this.hoverState =\n    this.$element   = null\n\n    this.init('tooltip', element, options)\n  }\n\n  Tooltip.DEFAULTS = {\n    animation: true,\n    placement: 'top',\n    selector: false,\n    template: '<div class=\"tooltip\"><div class=\"tooltip-arrow\"></div><div class=\"tooltip-inner\"></div></div>',\n    trigger: 'hover focus',\n    title: '',\n    delay: 0,\n    html: false,\n    container: false\n  }\n\n  Tooltip.prototype.init = function (type, element, options) {\n    this.enabled  = true\n    this.type     = type\n    this.$element = $(element)\n    this.options  = this.getOptions(options)\n\n    var triggers = this.options.trigger.split(' ')\n\n    for (var i = triggers.length; i--;) {\n      var trigger = triggers[i]\n\n      if (trigger == 'click') {\n        this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))\n      } else if (trigger != 'manual') {\n        var eventIn  = trigger == 'hover' ? 'mouseenter' : 'focusin'\n        var eventOut = trigger == 'hover' ? 'mouseleave' : 'focusout'\n\n        this.$element.on(eventIn  + '.' + this.type, this.options.selector, $.proxy(this.enter, this))\n        this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))\n      }\n    }\n\n    this.options.selector ?\n      (this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :\n      this.fixTitle()\n  }\n\n  Tooltip.prototype.getDefaults = function () {\n    return Tooltip.DEFAULTS\n  }\n\n  Tooltip.prototype.getOptions = function (options) {\n    options = $.extend({}, this.getDefaults(), this.$element.data(), options)\n\n    if (options.delay && typeof options.delay == 'number') {\n      options.delay = {\n        show: options.delay,\n        hide: options.delay\n      }\n    }\n\n    return options\n  }\n\n  Tooltip.prototype.getDelegateOptions = function () {\n    var options  = {}\n    var defaults = this.getDefaults()\n\n    this._options && $.each(this._options, function (key, value) {\n      if (defaults[key] != value) options[key] = value\n    })\n\n    return options\n  }\n\n  Tooltip.prototype.enter = function (obj) {\n    var self = obj instanceof this.constructor ?\n      obj : $(obj.currentTarget)[this.type](this.getDelegateOptions()).data('bs.' + this.type)\n\n    clearTimeout(self.timeout)\n\n    self.hoverState = 'in'\n\n    if (!self.options.delay || !self.options.delay.show) return self.show()\n\n    self.timeout = setTimeout(function () {\n      if (self.hoverState == 'in') self.show()\n    }, self.options.delay.show)\n  }\n\n  Tooltip.prototype.leave = function (obj) {\n    var self = obj instanceof this.constructor ?\n      obj : $(obj.currentTarget)[this.type](this.getDelegateOptions()).data('bs.' + this.type)\n\n    clearTimeout(self.timeout)\n\n    self.hoverState = 'out'\n\n    if (!self.options.delay || !self.options.delay.hide) return self.hide()\n\n    self.timeout = setTimeout(function () {\n      if (self.hoverState == 'out') self.hide()\n    }, self.options.delay.hide)\n  }\n\n  Tooltip.prototype.show = function () {\n    var e = $.Event('show.bs.' + this.type)\n\n    if (this.hasContent() && this.enabled) {\n      this.$element.trigger(e)\n\n      if (e.isDefaultPrevented()) return\n      var that = this;\n\n      var $tip = this.tip()\n\n      this.setContent()\n\n      if (this.options.animation) $tip.addClass('fade')\n\n      var placement = typeof this.options.placement == 'function' ?\n        this.options.placement.call(this, $tip[0], this.$element[0]) :\n        this.options.placement\n\n      var autoToken = /\\s?auto?\\s?/i\n      var autoPlace = autoToken.test(placement)\n      if (autoPlace) placement = placement.replace(autoToken, '') || 'top'\n\n      $tip\n        .detach()\n        .css({ top: 0, left: 0, display: 'block' })\n        .addClass(placement)\n\n      this.options.container ? $tip.appendTo(this.options.container) : $tip.insertAfter(this.$element)\n\n      var pos          = this.getPosition()\n      var actualWidth  = $tip[0].offsetWidth\n      var actualHeight = $tip[0].offsetHeight\n\n      if (autoPlace) {\n        var $parent = this.$element.parent()\n\n        var orgPlacement = placement\n        var docScroll    = document.documentElement.scrollTop || document.body.scrollTop\n        var parentWidth  = this.options.container == 'body' ? window.innerWidth  : $parent.outerWidth()\n        var parentHeight = this.options.container == 'body' ? window.innerHeight : $parent.outerHeight()\n        var parentLeft   = this.options.container == 'body' ? 0 : $parent.offset().left\n\n        placement = placement == 'bottom' && pos.top   + pos.height  + actualHeight - docScroll > parentHeight  ? 'top'    :\n                    placement == 'top'    && pos.top   - docScroll   - actualHeight < 0                         ? 'bottom' :\n                    placement == 'right'  && pos.right + actualWidth > parentWidth                              ? 'left'   :\n                    placement == 'left'   && pos.left  - actualWidth < parentLeft                               ? 'right'  :\n                    placement\n\n        $tip\n          .removeClass(orgPlacement)\n          .addClass(placement)\n      }\n\n      var calculatedOffset = this.getCalculatedOffset(placement, pos, actualWidth, actualHeight)\n\n      this.applyPlacement(calculatedOffset, placement)\n      this.hoverState = null\n\n      var complete = function() {\n        that.$element.trigger('shown.bs.' + that.type)\n      }\n\n      $.support.transition && this.$tip.hasClass('fade') ?\n        $tip\n          .one($.support.transition.end, complete)\n          .emulateTransitionEnd(150) :\n        complete()\n    }\n  }\n\n  Tooltip.prototype.applyPlacement = function (offset, placement) {\n    var replace\n    var $tip   = this.tip()\n    var width  = $tip[0].offsetWidth\n    var height = $tip[0].offsetHeight\n\n    // manually read margins because getBoundingClientRect includes difference\n    var marginTop = parseInt($tip.css('margin-top'), 10)\n    var marginLeft = parseInt($tip.css('margin-left'), 10)\n\n    // we must check for NaN for ie 8/9\n    if (isNaN(marginTop))  marginTop  = 0\n    if (isNaN(marginLeft)) marginLeft = 0\n\n    offset.top  = offset.top  + marginTop\n    offset.left = offset.left + marginLeft\n\n    // $.fn.offset doesn't round pixel values\n    // so we use setOffset directly with our own function B-0\n    $.offset.setOffset($tip[0], $.extend({\n      using: function (props) {\n        $tip.css({\n          top: Math.round(props.top),\n          left: Math.round(props.left)\n        })\n      }\n    }, offset), 0)\n\n    $tip.addClass('in')\n\n    // check to see if placing tip in new offset caused the tip to resize itself\n    var actualWidth  = $tip[0].offsetWidth\n    var actualHeight = $tip[0].offsetHeight\n\n    if (placement == 'top' && actualHeight != height) {\n      replace = true\n      offset.top = offset.top + height - actualHeight\n    }\n\n    if (/bottom|top/.test(placement)) {\n      var delta = 0\n\n      if (offset.left < 0) {\n        delta       = offset.left * -2\n        offset.left = 0\n\n        $tip.offset(offset)\n\n        actualWidth  = $tip[0].offsetWidth\n        actualHeight = $tip[0].offsetHeight\n      }\n\n      this.replaceArrow(delta - width + actualWidth, actualWidth, 'left')\n    } else {\n      this.replaceArrow(actualHeight - height, actualHeight, 'top')\n    }\n\n    if (replace) $tip.offset(offset)\n  }\n\n  Tooltip.prototype.replaceArrow = function (delta, dimension, position) {\n    this.arrow().css(position, delta ? (50 * (1 - delta / dimension) + '%') : '')\n  }\n\n  Tooltip.prototype.setContent = function () {\n    var $tip  = this.tip()\n    var title = this.getTitle()\n\n    $tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title)\n    $tip.removeClass('fade in top bottom left right')\n  }\n\n  Tooltip.prototype.hide = function () {\n    var that = this\n    var $tip = this.tip()\n    var e    = $.Event('hide.bs.' + this.type)\n\n    function complete() {\n      if (that.hoverState != 'in') $tip.detach()\n      that.$element.trigger('hidden.bs.' + that.type)\n    }\n\n    this.$element.trigger(e)\n\n    if (e.isDefaultPrevented()) return\n\n    $tip.removeClass('in')\n\n    $.support.transition && this.$tip.hasClass('fade') ?\n      $tip\n        .one($.support.transition.end, complete)\n        .emulateTransitionEnd(150) :\n      complete()\n\n    this.hoverState = null\n\n    return this\n  }\n\n  Tooltip.prototype.fixTitle = function () {\n    var $e = this.$element\n    if ($e.attr('title') || typeof($e.attr('data-original-title')) != 'string') {\n      $e.attr('data-original-title', $e.attr('title') || '').attr('title', '')\n    }\n  }\n\n  Tooltip.prototype.hasContent = function () {\n    return this.getTitle()\n  }\n\n  Tooltip.prototype.getPosition = function () {\n    var el = this.$element[0]\n    return $.extend({}, (typeof el.getBoundingClientRect == 'function') ? el.getBoundingClientRect() : {\n      width: el.offsetWidth,\n      height: el.offsetHeight\n    }, this.$element.offset())\n  }\n\n  Tooltip.prototype.getCalculatedOffset = function (placement, pos, actualWidth, actualHeight) {\n    return placement == 'bottom' ? { top: pos.top + pos.height,   left: pos.left + pos.width / 2 - actualWidth / 2  } :\n           placement == 'top'    ? { top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2  } :\n           placement == 'left'   ? { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth } :\n        /* placement == 'right' */ { top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width   }\n  }\n\n  Tooltip.prototype.getTitle = function () {\n    var title\n    var $e = this.$element\n    var o  = this.options\n\n    title = $e.attr('data-original-title')\n      || (typeof o.title == 'function' ? o.title.call($e[0]) :  o.title)\n\n    return title\n  }\n\n  Tooltip.prototype.tip = function () {\n    return this.$tip = this.$tip || $(this.options.template)\n  }\n\n  Tooltip.prototype.arrow = function () {\n    return this.$arrow = this.$arrow || this.tip().find('.tooltip-arrow')\n  }\n\n  Tooltip.prototype.validate = function () {\n    if (!this.$element[0].parentNode) {\n      this.hide()\n      this.$element = null\n      this.options  = null\n    }\n  }\n\n  Tooltip.prototype.enable = function () {\n    this.enabled = true\n  }\n\n  Tooltip.prototype.disable = function () {\n    this.enabled = false\n  }\n\n  Tooltip.prototype.toggleEnabled = function () {\n    this.enabled = !this.enabled\n  }\n\n  Tooltip.prototype.toggle = function (e) {\n    var self = e ? $(e.currentTarget)[this.type](this.getDelegateOptions()).data('bs.' + this.type) : this\n    self.tip().hasClass('in') ? self.leave(self) : self.enter(self)\n  }\n\n  Tooltip.prototype.destroy = function () {\n    clearTimeout(this.timeout)\n    this.hide().$element.off('.' + this.type).removeData('bs.' + this.type)\n  }\n\n\n  // TOOLTIP PLUGIN DEFINITION\n  // =========================\n\n  var old = $.fn.tooltip\n\n  $.fn.tooltip = function (option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.tooltip')\n      var options = typeof option == 'object' && option\n\n      if (!data && option == 'destroy') return\n      if (!data) $this.data('bs.tooltip', (data = new Tooltip(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  $.fn.tooltip.Constructor = Tooltip\n\n\n  // TOOLTIP NO CONFLICT\n  // ===================\n\n  $.fn.tooltip.noConflict = function () {\n    $.fn.tooltip = old\n    return this\n  }\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: popover.js v3.1.0\n * http://getbootstrap.com/javascript/#popovers\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // POPOVER PUBLIC CLASS DEFINITION\n  // ===============================\n\n  var Popover = function (element, options) {\n    this.init('popover', element, options)\n  }\n\n  if (!$.fn.tooltip) throw new Error('Popover requires tooltip.js')\n\n  Popover.DEFAULTS = $.extend({}, $.fn.tooltip.Constructor.DEFAULTS, {\n    placement: 'right',\n    trigger: 'click',\n    content: '',\n    template: '<div class=\"popover\"><div class=\"arrow\"></div><h3 class=\"popover-title\"></h3><div class=\"popover-content\"></div></div>'\n  })\n\n\n  // NOTE: POPOVER EXTENDS tooltip.js\n  // ================================\n\n  Popover.prototype = $.extend({}, $.fn.tooltip.Constructor.prototype)\n\n  Popover.prototype.constructor = Popover\n\n  Popover.prototype.getDefaults = function () {\n    return Popover.DEFAULTS\n  }\n\n  Popover.prototype.setContent = function () {\n    var $tip    = this.tip()\n    var title   = this.getTitle()\n    var content = this.getContent()\n\n    $tip.find('.popover-title')[this.options.html ? 'html' : 'text'](title)\n    $tip.find('.popover-content')[ // we use append for html objects to maintain js events\n      this.options.html ? (typeof content == 'string' ? 'html' : 'append') : 'text'\n    ](content)\n\n    $tip.removeClass('fade top bottom left right in')\n\n    // IE8 doesn't accept hiding via the `:empty` pseudo selector, we have to do\n    // this manually by checking the contents.\n    if (!$tip.find('.popover-title').html()) $tip.find('.popover-title').hide()\n  }\n\n  Popover.prototype.hasContent = function () {\n    return this.getTitle() || this.getContent()\n  }\n\n  Popover.prototype.getContent = function () {\n    var $e = this.$element\n    var o  = this.options\n\n    return $e.attr('data-content')\n      || (typeof o.content == 'function' ?\n            o.content.call($e[0]) :\n            o.content)\n  }\n\n  Popover.prototype.arrow = function () {\n    return this.$arrow = this.$arrow || this.tip().find('.arrow')\n  }\n\n  Popover.prototype.tip = function () {\n    if (!this.$tip) this.$tip = $(this.options.template)\n    return this.$tip\n  }\n\n\n  // POPOVER PLUGIN DEFINITION\n  // =========================\n\n  var old = $.fn.popover\n\n  $.fn.popover = function (option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.popover')\n      var options = typeof option == 'object' && option\n\n      if (!data && option == 'destroy') return\n      if (!data) $this.data('bs.popover', (data = new Popover(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  $.fn.popover.Constructor = Popover\n\n\n  // POPOVER NO CONFLICT\n  // ===================\n\n  $.fn.popover.noConflict = function () {\n    $.fn.popover = old\n    return this\n  }\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: scrollspy.js v3.1.0\n * http://getbootstrap.com/javascript/#scrollspy\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // SCROLLSPY CLASS DEFINITION\n  // ==========================\n\n  function ScrollSpy(element, options) {\n    var href\n    var process  = $.proxy(this.process, this)\n\n    this.$element       = $(element).is('body') ? $(window) : $(element)\n    this.$body          = $('body')\n    this.$scrollElement = this.$element.on('scroll.bs.scroll-spy.data-api', process)\n    this.options        = $.extend({}, ScrollSpy.DEFAULTS, options)\n    this.selector       = (this.options.target\n      || ((href = $(element).attr('href')) && href.replace(/.*(?=#[^\\s]+$)/, '')) //strip for ie7\n      || '') + ' .nav li > a'\n    this.offsets        = $([])\n    this.targets        = $([])\n    this.activeTarget   = null\n\n    this.refresh()\n    this.process()\n  }\n\n  ScrollSpy.DEFAULTS = {\n    offset: 10\n  }\n\n  ScrollSpy.prototype.refresh = function () {\n    var offsetMethod = this.$element[0] == window ? 'offset' : 'position'\n\n    this.offsets = $([])\n    this.targets = $([])\n\n    var self     = this\n    var $targets = this.$body\n      .find(this.selector)\n      .map(function () {\n        var $el   = $(this)\n        var href  = $el.data('target') || $el.attr('href')\n        var $href = /^#./.test(href) && $(href)\n\n        return ($href\n          && $href.length\n          && $href.is(':visible')\n          && [[ $href[offsetMethod]().top + (!$.isWindow(self.$scrollElement.get(0)) && self.$scrollElement.scrollTop()), href ]]) || null\n      })\n      .sort(function (a, b) { return a[0] - b[0] })\n      .each(function () {\n        self.offsets.push(this[0])\n        self.targets.push(this[1])\n      })\n  }\n\n  ScrollSpy.prototype.process = function () {\n    var scrollTop    = this.$scrollElement.scrollTop() + this.options.offset\n    var scrollHeight = this.$scrollElement[0].scrollHeight || this.$body[0].scrollHeight\n    var maxScroll    = scrollHeight - this.$scrollElement.height()\n    var offsets      = this.offsets\n    var targets      = this.targets\n    var activeTarget = this.activeTarget\n    var i\n\n    if (scrollTop >= maxScroll) {\n      return activeTarget != (i = targets.last()[0]) && this.activate(i)\n    }\n\n    if (activeTarget && scrollTop <= offsets[0]) {\n      return activeTarget != (i = targets[0]) && this.activate(i)\n    }\n\n    for (i = offsets.length; i--;) {\n      activeTarget != targets[i]\n        && scrollTop >= offsets[i]\n        && (!offsets[i + 1] || scrollTop <= offsets[i + 1])\n        && this.activate( targets[i] )\n    }\n  }\n\n  ScrollSpy.prototype.activate = function (target) {\n    this.activeTarget = target\n\n    $(this.selector)\n      .parentsUntil(this.options.target, '.active')\n      .removeClass('active')\n\n    var selector = this.selector +\n        '[data-target=\"' + target + '\"],' +\n        this.selector + '[href=\"' + target + '\"]'\n\n    var active = $(selector)\n      .parents('li')\n      .addClass('active')\n\n    if (active.parent('.dropdown-menu').length) {\n      active = active\n        .closest('li.dropdown')\n        .addClass('active')\n    }\n\n    active.trigger('activate.bs.scrollspy')\n  }\n\n\n  // SCROLLSPY PLUGIN DEFINITION\n  // ===========================\n\n  var old = $.fn.scrollspy\n\n  $.fn.scrollspy = function (option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.scrollspy')\n      var options = typeof option == 'object' && option\n\n      if (!data) $this.data('bs.scrollspy', (data = new ScrollSpy(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  $.fn.scrollspy.Constructor = ScrollSpy\n\n\n  // SCROLLSPY NO CONFLICT\n  // =====================\n\n  $.fn.scrollspy.noConflict = function () {\n    $.fn.scrollspy = old\n    return this\n  }\n\n\n  // SCROLLSPY DATA-API\n  // ==================\n\n  $(window).on('load', function () {\n    $('[data-spy=\"scroll\"]').each(function () {\n      var $spy = $(this)\n      $spy.scrollspy($spy.data())\n    })\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: tab.js v3.1.0\n * http://getbootstrap.com/javascript/#tabs\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // TAB CLASS DEFINITION\n  // ====================\n\n  var Tab = function (element) {\n    this.element = $(element)\n  }\n\n  Tab.prototype.show = function () {\n    var $this    = this.element\n    var $ul      = $this.closest('ul:not(.dropdown-menu)')\n    var selector = $this.data('target')\n\n    if (!selector) {\n      selector = $this.attr('href')\n      selector = selector && selector.replace(/.*(?=#[^\\s]*$)/, '') //strip for ie7\n    }\n\n    if ($this.parent('li').hasClass('active')) return\n\n    var previous = $ul.find('.active:last a')[0]\n    var e        = $.Event('show.bs.tab', {\n      relatedTarget: previous\n    })\n\n    $this.trigger(e)\n\n    if (e.isDefaultPrevented()) return\n\n    var $target = $(selector)\n\n    this.activate($this.parent('li'), $ul)\n    this.activate($target, $target.parent(), function () {\n      $this.trigger({\n        type: 'shown.bs.tab',\n        relatedTarget: previous\n      })\n    })\n  }\n\n  Tab.prototype.activate = function (element, container, callback) {\n    var $active    = container.find('> .active')\n    var transition = callback\n      && $.support.transition\n      && $active.hasClass('fade')\n\n    function next() {\n      $active\n        .removeClass('active')\n        .find('> .dropdown-menu > .active')\n        .removeClass('active')\n\n      element.addClass('active')\n\n      if (transition) {\n        element[0].offsetWidth // reflow for transition\n        element.addClass('in')\n      } else {\n        element.removeClass('fade')\n      }\n\n      if (element.parent('.dropdown-menu')) {\n        element.closest('li.dropdown').addClass('active')\n      }\n\n      callback && callback()\n    }\n\n    transition ?\n      $active\n        .one($.support.transition.end, next)\n        .emulateTransitionEnd(150) :\n      next()\n\n    $active.removeClass('in')\n  }\n\n\n  // TAB PLUGIN DEFINITION\n  // =====================\n\n  var old = $.fn.tab\n\n  $.fn.tab = function ( option ) {\n    return this.each(function () {\n      var $this = $(this)\n      var data  = $this.data('bs.tab')\n\n      if (!data) $this.data('bs.tab', (data = new Tab(this)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  $.fn.tab.Constructor = Tab\n\n\n  // TAB NO CONFLICT\n  // ===============\n\n  $.fn.tab.noConflict = function () {\n    $.fn.tab = old\n    return this\n  }\n\n\n  // TAB DATA-API\n  // ============\n\n  $(document).on('click.bs.tab.data-api', '[data-toggle=\"tab\"], [data-toggle=\"pill\"]', function (e) {\n    e.preventDefault()\n    $(this).tab('show')\n  })\n\n}(jQuery);\n\n/* ========================================================================\n * Bootstrap: affix.js v3.1.0\n * http://getbootstrap.com/javascript/#affix\n * ========================================================================\n * Copyright 2011-2014 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * ======================================================================== */\n\n\n+function ($) {\n  'use strict';\n\n  // AFFIX CLASS DEFINITION\n  // ======================\n\n  var Affix = function (element, options) {\n    this.options = $.extend({}, Affix.DEFAULTS, options)\n    this.$window = $(window)\n      .on('scroll.bs.affix.data-api', $.proxy(this.checkPosition, this))\n      .on('click.bs.affix.data-api',  $.proxy(this.checkPositionWithEventLoop, this))\n\n    this.$element     = $(element)\n    this.affixed      =\n    this.unpin        =\n    this.pinnedOffset = null\n\n    this.checkPosition()\n  }\n\n  Affix.RESET = 'affix affix-top affix-bottom'\n\n  Affix.DEFAULTS = {\n    offset: 0\n  }\n\n  Affix.prototype.getPinnedOffset = function () {\n    if (this.pinnedOffset) return this.pinnedOffset\n    this.$element.removeClass(Affix.RESET).addClass('affix')\n    var scrollTop = this.$window.scrollTop()\n    var position  = this.$element.offset()\n    return (this.pinnedOffset = position.top - scrollTop)\n  }\n\n  Affix.prototype.checkPositionWithEventLoop = function () {\n    setTimeout($.proxy(this.checkPosition, this), 1)\n  }\n\n  Affix.prototype.checkPosition = function () {\n    if (!this.$element.is(':visible')) return\n\n    var scrollHeight = $(document).height()\n    var scrollTop    = this.$window.scrollTop()\n    var position     = this.$element.offset()\n    var offset       = this.options.offset\n    var offsetTop    = offset.top\n    var offsetBottom = offset.bottom\n\n    if (this.affixed == 'top') position.top += scrollTop\n\n    if (typeof offset != 'object')         offsetBottom = offsetTop = offset\n    if (typeof offsetTop == 'function')    offsetTop    = offset.top(this.$element)\n    if (typeof offsetBottom == 'function') offsetBottom = offset.bottom(this.$element)\n\n    var affix = this.unpin   != null && (scrollTop + this.unpin <= position.top) ? false :\n                offsetBottom != null && (position.top + this.$element.height() >= scrollHeight - offsetBottom) ? 'bottom' :\n                offsetTop    != null && (scrollTop <= offsetTop) ? 'top' : false\n\n    if (this.affixed === affix) return\n    if (this.unpin) this.$element.css('top', '')\n\n    var affixType = 'affix' + (affix ? '-' + affix : '')\n    var e         = $.Event(affixType + '.bs.affix')\n\n    this.$element.trigger(e)\n\n    if (e.isDefaultPrevented()) return\n\n    this.affixed = affix\n    this.unpin = affix == 'bottom' ? this.getPinnedOffset() : null\n\n    this.$element\n      .removeClass(Affix.RESET)\n      .addClass(affixType)\n      .trigger($.Event(affixType.replace('affix', 'affixed')))\n\n    if (affix == 'bottom') {\n      this.$element.offset({ top: scrollHeight - offsetBottom - this.$element.height() })\n    }\n  }\n\n\n  // AFFIX PLUGIN DEFINITION\n  // =======================\n\n  var old = $.fn.affix\n\n  $.fn.affix = function (option) {\n    return this.each(function () {\n      var $this   = $(this)\n      var data    = $this.data('bs.affix')\n      var options = typeof option == 'object' && option\n\n      if (!data) $this.data('bs.affix', (data = new Affix(this, options)))\n      if (typeof option == 'string') data[option]()\n    })\n  }\n\n  $.fn.affix.Constructor = Affix\n\n\n  // AFFIX NO CONFLICT\n  // =================\n\n  $.fn.affix.noConflict = function () {\n    $.fn.affix = old\n    return this\n  }\n\n\n  // AFFIX DATA-API\n  // ==============\n\n  $(window).on('load', function () {\n    $('[data-spy=\"affix\"]').each(function () {\n      var $spy = $(this)\n      var data = $spy.data()\n\n      data.offset = data.offset || {}\n\n      if (data.offsetBottom) data.offset.bottom = data.offsetBottom\n      if (data.offsetTop)    data.offset.top    = data.offsetTop\n\n      $spy.affix(data)\n    })\n  })\n\n}(jQuery);\n"
  },
  {
    "path": "doc/pysdk_pdoc.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\"\"\"pysdk_pdoc.py - Run pdoc with extra rendering options\n\nThis script is a wrapper around the standard `pdoc` tool that enables the\n`admonitions` and `smarty-pants` extras for nicer rendering.\n\nIf run without arguments, it uses arguments to build the Arvados Python SDK\ndocumentation.\n\"\"\"\n\nimport collections\nimport functools\nimport os\nimport sys\n\nfrom pathlib import Path\n\ntry:\n    import pdoc.__main__\n    import pdoc.render_helpers\nexcept ImportError as err:\n    if __name__ == '__main__':\n        _imp_err = err\n    else:\n        raise\nelse:\n    _imp_err = None\n\ntry:\n    import arvados\nexcept ImportError:\n    DEFAULT_ARGLIST = []\nelse:\n    DEFAULT_ARGLIST = [\n        '--output-directory=sdk/python',\n        str(Path(arvados.__file__).parent),\n        # Because the module is prviate, pdoc does not build documentation for any\n        # of it. The exclusion below additionally prevents pdoc from hyperlinking\n        # references under arvados._internal that appear in method signatures, etc.\n        '!arvados._internal',\n    ]\n\nMD_EXTENSIONS = {\n    'admonitions': None,\n    'smarty-pants': None,\n}\n\ndef main(arglist=None):\n    if _imp_err is not None:\n        print(\"error: failed to import pdoc:\", _imp_err, file=sys.stderr)\n        return os.EX_SOFTWARE\n\n    # Configure pdoc to use extras we want.\n    pdoc.render_helpers.markdown_extensions = collections.ChainMap(\n        pdoc.render_helpers.markdown_extensions,\n        MD_EXTENSIONS,\n    )\n    pdoc.__main__.cli(arglist)\n    return os.EX_OK\n\nif __name__ == '__main__':\n    sys.exit(main(sys.argv[1:] or DEFAULT_ARGLIST))\n"
  },
  {
    "path": "doc/sdk/cli/index.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Command line tools (CLI SDK)\ntitle: \"Overview\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe @arv@ CLI tool provide provides a convenient interface to manipulate API resources. Additionally, it provides access to a number of subcommands.\n\nh3. Syntax\n\nThe @arv@ command takes the following arguments:\n\n<pre>\nArvados command line client\nUsage: arv [--flags] subcommand|resource [method] [--parameters]\n\nAvailable flags:\n  -n, --dry-run       Don't actually do anything\n  -v, --verbose       Print some things on stderr\n  -f, --format=<s>    Set the output format. Must be one of json (default),\n                      yaml or uuid. (Default: json)\n  -s, --short         Return only UUIDs (equivalent to --format=uuid)\n\nUse 'arv subcommand|resource --help' to get more information about a particular\ncommand or resource.\n\nAvailable subcommands: copy, create, edit, keep, run, tag, ws\n\nAvailable resources: api_client_authorization, api_client,\nauthorized_key, collection, container, container_request,\nuser_agreement, group, keep_service, link, log, user, virtual_machine,\nworkflow\n\nAdditional options:\n  -e, --version       Print version and exit\n  -h, --help          Show this message\n</pre>\n\nh4. Flags: @--format@\n\n- @--format=json@ := Output response as JSON. This is the default format.\n\n- @--format=yaml@ := Output response as YAML\n\n- @--format=uuid@ := Output only the UUIDs of object(s) in the API response, one per line.\n\n\n\nh3. Resources\n\nSee the \"arv reference\":{{site.baseurl}}/sdk/cli/reference.html page.\n\nh3. Subcommands\n\nSee the \"arv subcommands\":{{site.baseurl}}/sdk/cli/subcommands.html page.\n"
  },
  {
    "path": "doc/sdk/cli/install.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Command line tools (CLI SDK)\ntitle: \"Installation\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados CLI tools are written in Ruby and Python.  To use the @arv@ command, you can either install the @arvados-cli@ gem via RubyGems or build and install the package from source.  The @arv@ command also relies on other Arvados tools.  To get those, install the @arvados-python-client@ and @arvados-cwl-runner@ packages, either from PyPI or source.\n\nh2. Prerequisites\n\n# \"Install Ruby\":../../install/ruby.html\n# \"Install the Python SDK\":../python/sdk-python.html\n\nThe SDK uses @curl@ which depends on the @libcurl@ C library.  To build the module you may have to install additional packages.  On supported versions of Debian and Ubuntu, run:\n\n<notextile>\n<pre><code>\n# <span class=\"userinput\">apt install build-essential libcurl4-openssl-dev\n</code></pre>\n</notextile>\n\nh2. Install from RubyGems\n\n<notextile>\n<pre>\n# <code class=\"userinput\">gem install arvados-cli</code>\n</pre>\n</notextile>\n"
  },
  {
    "path": "doc/sdk/cli/reference.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Command line tools (CLI SDK)\ntitle: \"arv reference\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n_In order to use the @arv@ command, make sure that you have a \"working environment.\":{{site.baseurl}}/user/getting_started/check-environment.html_\n\nh3. Usage\n\nSee the \"CLI overview\":{{site.baseurl}}/sdk/cli/index.html page.\n\nh3. Resource types and methods\n\nGet list of resource types\n@arv --help@\n\nGet list of resource methods for the \"user\" resource type\n@arv user --help@\n\n\nh3. Basic examples\n\nGet record for current user\n@arv user current@\n\nGet entire record for some specific user\n@arv user get --uuid 6dnxa-tpzed-iimd25zhzh84gbk@\n\nUpdate user record\n@arv user update --uuid 6dnxa-tpzed-iimd25zhzh84gbk --user '{\"first_name\":\"Bob\"}'@\n\nGet list of groups\n@arv group list@\n\nDelete a group\n@arv group delete --uuid 6dnxa-j7d0g-iw7i6n43d37jtog@\n\nCreate an empty collection\n@arv collection create --collection '{\"name\": \"test collection\"}'@\n\nh3. Common commands\n\nMost @arv@ resources accept the following commands:\n\n* @get@\n* @list@\n* @create@\n* @update@\n* @delete@\n\n\nh4. @list@\n\nArguments accepted by the @list@ subcommand include:\n\n<pre>\n  -l, --limit=<i>        Maximum number of items to return. (Default: 100)\n  -o, --offset=<i>       Number of items to skip before first returned record. (Default: 0)\n  -f, --filters=<s>      Conditions for filtering items.\n  -r, --order=<s>        Order in which to return matching items.\n  -s, --select=<s>       Select which fields to return.\n  -d, --distinct         Return each distinct object.\n  -c, --count=<s>        Type of count to return in items_available ('none' or 'exact'). (Default: exact)\n</pre>\n\nThe @--filters@ option takes a string describing a JSON list of filters on which the returned resources should be returned. Each filter is a three-element list of _[field, operator, value]_, where the _operator_ may be one of @=@, @<@, @<=@, @>@, @>=@, @!=@, @like@, or @ilike@.\n\nExample:\n\n@arv collection list --filters '[[\"name\", \"=\", \"PGP VAR inputs\"], [\"created_at\", \">=\", \"2014-10-01\"]]'@\n\nwill return a list of all collections visible to the current user which are named \"PGP VAR inputs\" and were created on or after October 1, 2014. See the \"Common resource methods\":{{site.baseurl}}/api/methods.html#index page for more details on using @list@ and @--filters@.\n"
  },
  {
    "path": "doc/sdk/cli/subcommands.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Command line tools (CLI SDK)\ntitle: \"arv subcommands\"\n\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n_In order to use the @arv@ command, make sure that you have a \"working environment.\":{{site.baseurl}}/user/getting_started/check-environment.html_\n\nh3(#arv-create). arv create\n\n@arv create@ can be used to create Arvados objects from the command line. Arv create opens up the editor of your choice (set the EDITOR environment variable) and allows you to type or paste a json or yaml description. When saved the object will be created on the API server, if it passes validation.\n\n<notextile>\n<pre>\n$ <code class=\"userinput\">arv create --help</code>\nOptions:\n  --project-uuid, -p &lt;s&gt;:   Project uuid in which to create the object\n              --help, -h:   Show this message\n</pre>\n</notextile>\n\nh3(#arv-get). arv get\n\n@arv get@ can be used to get a textual representation of Arvados objects from the command line. The output can be limited to a subset of the object's fields. This command can be used with only the knowledge of an object's UUID.\n\n<notextile>\n<pre>\n$ <code class=\"userinput\">arv get --help</code>\nUsage: arv [--format json|yaml] get [uuid] [fields...]\n\nFetch the specified Arvados object, select the specified fields,\nand print a text representation.\n</pre>\n</notextile>\n\nh3(#arv-edit). arv edit\n\n@arv edit@ can be used to edit Arvados objects from the command line. Arv edit opens up the editor of your choice (set the EDITOR environment variable) with the json or yaml description of the object. Saving the file will update the Arvados object on the API server, if it passes validation.\n\n<notextile>\n<pre>\n$ <code class=\"userinput\">arv edit --help</code>\nArvados command line client\nUsage: arv edit [uuid] [fields...]\n\nFetch the specified Arvados object, select the specified fields,\nopen an interactive text editor on a text representation (json or\nyaml, use --format) and then update the object.  Will use 'nano'\nby default, customize with the EDITOR or VISUAL environment variable.\n</pre>\n</notextile>\n\nh3(#arv-copy). arv copy\n\n@arv copy@ can be used to copy a pipeline instance, template or collection from one Arvados instance to another. It takes care of copying the object and all its dependencies.\n\n<notextile>\n<pre>\n$ <code class=\"userinput\">arv copy --help</code>\nusage: arv-copy [-h] [--version] [-v] [--progress] [--no-progress] [-f]\n                [--src SOURCE_ARVADOS] [--dst DESTINATION_ARVADOS]\n                [--recursive] [--no-recursive] [--project-uuid PROJECT_UUID]\n                [--replication N] [--storage-classes STORAGE_CLASSES]\n                [--varying-url-params VARYING_URL_PARAMS]\n                [--prefer-cached-downloads] [--retries RETRIES]\n                object_uuid\n\nCopy a workflow, collection or project from one Arvados instance to another.\nOn success, the uuid of the copied object is printed to stdout.\n\npositional arguments:\n  object_uuid           The UUID of the object to be copied.\n\noptional arguments:\n  -h, --help            show this help message and exit\n  --version             Print version and exit.\n  -v, --verbose         Verbose output.\n  --progress            Report progress on copying collections. (default)\n  --no-progress         Do not report progress on copying collections.\n  -f, --force           Perform copy even if the object appears to exist at\n                        the remote destination.\n  --src SOURCE_ARVADOS  Client configuration location for the source Arvados\n                        cluster. May be either a configuration file path, or a\n                        plain identifier like `foo` to search for a\n                        configuration file `foo.conf` under a systemd or XDG\n                        configuration directory. If not provided, will search\n                        for a configuration file named after the cluster ID of\n                        the source object UUID.\n  --dst DESTINATION_ARVADOS\n                        Client configuration location for the destination\n                        Arvados cluster. May be either a configuration file\n                        path, or a plain identifier like `foo` to search for a\n                        configuration file `foo.conf` under a systemd or XDG\n                        configuration directory. If not provided, will use the\n                        default client configuration from the environment or\n                        `settings.conf`.\n  --recursive           Recursively copy any dependencies for this object, and\n                        subprojects. (default)\n  --no-recursive        Do not copy any dependencies or subprojects.\n  --project-uuid PROJECT_UUID\n                        The UUID of the project at the destination to which\n                        the collection or workflow should be copied.\n  --replication N\n                        Number of replicas per storage class for the copied\n                        collections at the destination. If not provided (or if\n                        provided with invalid value), use the destination's\n                        default replication-level setting (if found), or the\n                        fallback value 2.\n  --storage-classes STORAGE_CLASSES\n                        Comma separated list of storage classes to be used\n                        when saving data to the destinaton Arvados instance.\n  --varying-url-params VARYING_URL_PARAMS\n                        A comma separated list of URL query parameters that\n                        should be ignored when storing HTTP URLs in Keep.\n  --prefer-cached-downloads\n                        If a HTTP URL is found in Keep, skip upstream URL\n                        freshness check (will not notice if the upstream has\n                        changed, but also not error if upstream is\n                        unavailable).\n  --retries RETRIES     Maximum number of times to retry server requests that\n                        encounter temporary failures (e.g., server down).\n                        Default 10.\n</pre>\n</notextile>\n\nh3(#arv-tag). arv tag\n\n@arv tag@ is used to tag Arvados objects.\n\n<notextile>\n<pre>\n$ <code class=\"userinput\">arv tag --help</code>\n\nUsage:\narv tag add tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]\narv tag remove tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]\narv tag remove --all\n\n  --dry-run, -n:   Don't actually do anything\n  --verbose, -v:   Print some things on stderr\n     --uuid, -u:   Return the UUIDs of the objects in the response, one per\n                   line (default)\n     --json, -j:   Return the entire response received from the API server, as\n                   a JSON object\n    --human, -h:   Return the response received from the API server, as a JSON\n                   object with whitespace added for human consumption\n   --pretty, -p:   Synonym of --human\n     --yaml, -y:   Return the response received from the API server, in YAML\n                   format\n     --help, -e:   Show this message\n</pre>\n</notextile>\n\n\nh3(#arv-ws). arv ws\n\nThis is a frontend to @arv-ws@.\n\n@arv ws@ provides access to the websockets event stream.\n\n<notextile>\n<pre>\n$ <code class=\"userinput\">arv ws --help</code>\nusage: arv-ws [-h] [-u UUID] [-f FILTERS]\n              [--poll-interval POLL_INTERVAL | --no-poll]\n              [-p PIPELINE | -j JOB]\n\noptional arguments:\n  -h, --help            show this help message and exit\n  -u UUID, --uuid UUID  Filter events on object_uuid\n  -f FILTERS, --filters FILTERS\n                        Arvados query filter to apply to log events (JSON\n                        encoded)\n  --poll-interval POLL_INTERVAL\n                        If websockets is not available, specify the polling\n                        interval, default is every 15 seconds\n  --no-poll             Do not poll if websockets are not available, just fail\n  -p PIPELINE, --pipeline PIPELINE\n                        Supply pipeline uuid, print log output from pipeline\n                        and its jobs\n  -j JOB, --job JOB     Supply job uuid, print log output from jobs\n</pre>\n</notextile>\n\nh3(#arv-keep). arv keep\n\n@arv keep@ commands for accessing the Keep storage service.\n\n<notextile>\n<pre>\n$ <code class=\"userinput\">arv keep --help</code>\nUsage: arv keep [method] [--parameters]\nUse 'arv keep [method] --help' to get more information about specific methods.\n\nAvailable methods: ls, get, put, docker\n</pre>\n</notextile>\n\nh3(#arv-keep-ls). arv keep ls\n\nThis is a frontend to @arv-ls@.\n\n<notextile>\n<pre>\n$ <code class=\"userinput\">arv keep ls --help</code>\nusage: arv-ls [-h] [--retries RETRIES] [-s] locator\n\nList contents of a manifest\n\npositional arguments:\n  locator            Collection UUID or locator\n\noptional arguments:\n  -h, --help         show this help message and exit\n  --retries RETRIES  Maximum number of times to retry server requests that\n                     encounter temporary failures (e.g., server down). Default\n                     3.\n  -s                 List file sizes, in KiB.\n</pre>\n</notextile>\n\nh3(#arv-keep-get). arv keep get\n\nThis is a frontend to @arv-get@.\n\n<notextile>\n<pre>\n$ <code class=\"userinput\">arv keep get --help</code>\nusage: arv-get [-h] [--retries RETRIES] [--version]\n               [--progress | --no-progress | --batch-progress]\n               [--hash HASH | --md5sum] [-n] [-r]\n               [-f | -v | --skip-existing | --strip-manifest] [--threads N]\n               locator [destination]\n\nCopy data from Keep to a local file or pipe.\n\npositional arguments:\n  locator            Collection locator, optionally with a file path or\n                     prefix.\n  destination        Local file or directory where the data is to be written.\n                     Default: stdout.\n\noptional arguments:\n  -h, --help         show this help message and exit\n  --retries RETRIES  Maximum number of times to retry server requests that\n                     encounter temporary failures (e.g., server down).\n                     Default 3.\n  --version          Print version and exit.\n  --progress         Display human-readable progress on stderr (bytes and, if\n                     possible, percentage of total data size). This is the\n                     default behavior when it is not expected to interfere\n                     with the output: specifically, stderr is a tty _and_\n                     either stdout is not a tty, or output is being written\n                     to named files rather than stdout.\n  --no-progress      Do not display human-readable progress on stderr.\n  --batch-progress   Display machine-readable progress on stderr (bytes and,\n                     if known, total data size).\n  --hash HASH        Display the hash of each file as it is read from Keep,\n                     using the given hash algorithm. Supported algorithms\n                     include md5, sha1, sha224, sha256, sha384, and sha512.\n  --md5sum           Display the MD5 hash of each file as it is read from\n                     Keep.\n  -n                 Do not write any data -- just read from Keep, and report\n                     md5sums if requested.\n  -r                 Retrieve all files in the specified collection/prefix.\n                     This is the default behavior if the \"locator\" argument\n                     ends with a forward slash.\n  -f                 Overwrite existing files while writing. The default\n                     behavior is to refuse to write *anything* if any of the\n                     output files already exist. As a special case, -f is not\n                     needed to write to stdout.\n  -v                 Once for verbose mode, twice for debug mode.\n  --skip-existing    Skip files that already exist. The default behavior is\n                     to refuse to write *anything* if any files exist that\n                     would have to be overwritten. This option causes even\n                     devices, sockets, and fifos to be skipped.\n  --strip-manifest   When getting a collection manifest, strip its access\n                     tokens before writing it.\n  --threads N        Set the number of download threads to be used. Take into\n                     account that using lots of threads will increase the RAM\n                     requirements. Default is to use 4 threads. On high\n                     latency installations, using a greater number will\n                     improve overall throughput.\n</pre>\n</notextile>\n\nh3(#arv-keep-put). arv keep put\n\nThis is a frontend to @arv-put@.\n\n<notextile>\n<pre>\n$ <code class=\"userinput\">arv keep put --help</code>\nusage: arv-put [-h] [--max-manifest-depth N | --normalize]\n               [--as-stream | --stream | --as-manifest | --in-manifest | --manifest | --as-raw | --raw]\n               [--use-filename FILENAME] [--filename FILENAME]\n               [--portable-data-hash] [--replication N]\n               [--project-uuid UUID] [--name NAME]\n               [--progress | --no-progress | --batch-progress]\n               [--resume | --no-resume] [--retries RETRIES]\n               [path [path ...]]\n\nCopy data from the local filesystem to Keep.\n\npositional arguments:\n  path                  Local file or directory. Default: read from standard\n                        input.\n\noptional arguments:\n  -h, --help            show this help message and exit\n  --max-manifest-depth N\n                        Maximum depth of directory tree to represent in the\n                        manifest structure. A directory structure deeper than\n                        this will be represented as a single stream in the\n                        manifest. If N=0, the manifest will contain a single\n                        stream. Default: -1 (unlimited), i.e., exactly one\n                        manifest stream per filesystem directory that contains\n                        files.\n  --normalize           Normalize the manifest by re-ordering files and\n                        streams after writing data.\n  --as-stream           Synonym for --stream.\n  --stream              Store the file content and display the resulting\n                        manifest on stdout. Do not write the manifest to Keep\n                        or save a Collection object in Arvados.\n  --as-manifest         Synonym for --manifest.\n  --in-manifest         Synonym for --manifest.\n  --manifest            Store the file data and resulting manifest in Keep,\n                        save a Collection object in Arvados, and display the\n                        manifest locator (Collection uuid) on stdout. This is\n                        the default behavior.\n  --as-raw              Synonym for --raw.\n  --raw                 Store the file content and display the data block\n                        locators on stdout, separated by commas, with a\n                        trailing newline. Do not store a manifest.\n  --use-filename FILENAME\n                        Synonym for --filename.\n  --filename FILENAME   Use the given filename in the manifest, instead of the\n                        name of the local file. This is useful when \"-\" or\n                        \"/dev/stdin\" is given as an input file. It can be used\n                        only if there is exactly one path given and it is not\n                        a directory. Implies --manifest.\n  --portable-data-hash  Print the portable data hash instead of the Arvados\n                        UUID for the collection created by the upload.\n  --replication N       Set the replication level for the new collection: how\n                        many different physical storage devices (e.g., disks)\n                        should have a copy of each data block. Default is to\n                        use the server-provided default (if any) or 2.\n  --project-uuid UUID   Store the collection in the specified project, instead\n                        of your Home project.\n  --name NAME           Save the collection with the specified name.\n  --progress            Display human-readable progress on stderr (bytes and,\n                        if possible, percentage of total data size). This is\n                        the default behavior when stderr is a tty.\n  --no-progress         Do not display human-readable progress on stderr, even\n                        if stderr is a tty.\n  --batch-progress      Display machine-readable progress on stderr (bytes\n                        and, if known, total data size).\n  --resume              Continue interrupted uploads from cached state\n                        (default).\n  --no-resume           Do not continue interrupted uploads from cached state.\n  --retries RETRIES     Maximum number of times to retry server requests that\n                        encounter temporary failures (e.g., server down).\n                        Default 3.\n</pre>\n</notextile>\n"
  },
  {
    "path": "doc/sdk/fuse/install.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: FUSE Driver\ntitle: Installing the FUSE Driver\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe Arvados FUSE driver is a Python utility that allows you to browse Arvados projects and collections in a filesystem, so you can access that data using existing Unix tools.\n\nh2. Installation\n\nIf you are logged in to a managed Arvados VM, the @arv-mount@ utility should already be installed.\n\nTo use the FUSE driver elsewhere, you can install from a distribution package or pip.\n\nh2. Option 1: Install from distribution packages\n\nFirst, \"add the appropriate package repository for your distribution\":{{ site.baseurl }}/install/packages.html.\n\n{% assign arvados_component = 'python3-arvados-fuse' %}\n\n{% include 'install_packages' %}\n\nh2. Option 2: Install with pip\n\nRun @pip install arvados_fuse@ in an appropriate installation environment, such as a virtualenv.\n\nNote: The FUSE driver depends on the @libcurl@ and @libfuse@ C libraries.  To install the module you may need to install development headers from your distribution.  On Debian-based distributions you can install them by running:\n\n<notextile>\n<pre><code># <span class=\"userinput\">apt install build-essential python3-dev libcurl4-openssl-dev libfuse-dev libssl-dev</span>\n</code></pre>\n</notextile>\n\nh2. Usage\n\nFor an introduction of how to mount and navigate data, refer to the \"Access Keep as a GNU/Linux filesystem\":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-gnu-linux.html tutorial.\n"
  },
  {
    "path": "doc/sdk/fuse/options.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: FUSE Driver\ntitle: arv-mount options\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis page documents all available @arv-mount@ options with some usage examples.\n\n# \"Mount contents\":#contents\n# \"Mount custom layout and filtering\":#layout\n## \"@--filters@ usage and limitations\":#filters\n# \"Mount access and permissions\":#access\n# \"Mount lifecycle management\":#lifecycle\n# \"Mount logging and statistics\":#logging\n# \"Mount local cache setup\":#cache\n# \"Mount interactions with Arvados and Linux\":#plumbing\n# \"Examples\":#examples\n## \"Using @--exec@\":#exec\n## \"Running arv-mount as a systemd service\":#systemd\n\nh2(#contents). Mount contents\n\ntable(table table-bordered table-condensed).\n|_. Option(s)|_. Description|\n|@--all@|Mount a subdirectory for each mode: @home@, @shared@, @by_id@, and @by_tag@ (default if no @--mount-*@ options are given)|\n|@--custom@|Mount a subdirectory for each mode specified by a @--mount-*@ option (default if any @--mount-*@ options are given; see \"Mount custom layout and filtering\":#layout section)|\n|@--collection UUID_OR_PDH@|Mount the specified collection|\n|@--home@|Mount your home project|\n|@--project UUID@|Mount the specified project|\n|@--shared@|Mount a subdirectory for each project shared with you|\n|@--by-id@|Mount a magic directory where collections and projects are accessible through subdirectories named after their UUID or portable data hash|\n|@--by-pdh@|Mount a magic directory where collections are accessible through subdirectories named after their portable data hash|\n|@--by-tag@|Mount a subdirectory for each tag attached to a collection or project|\n\nh2(#layout). Mount custom layout and filtering\n\ntable(table table-bordered table-condensed).\n|_. Option(s)|_. Description|\n|@--filters FILTERS@|Filters to apply to all project, shared, and tag directory contents. Pass filters as either a JSON string or a path to a JSON file. The JSON object should be a list of filters in \"Arvados API list filter syntax\":{{ site.baseurl }}/api/methods.html#filters. See the \"example filters\":#filters.|\n|@--mount-home PATH@|Make your home project available under the mount at @PATH@|\n|@--mount-shared PATH@|Make projects shared with you available under the mount at @PATH@|\n|@--mount-tmp PATH@|Make a new temporary writable collection available under the mount at @PATH@. This collection is deleted when the mount is unmounted.|\n|@--mount-by-id PATH@|Make a magic directory available under the mount at @PATH@ where collections and projects are accessible through subdirectories named after their UUID or portable data hash|\n|@--mount-by-pdh PATH@|Make a magic directory available under the mount at @PATH@ where collections are accessible through subdirectories named after portable data hash|\n|@--mount-by-tag PATH@|Make a subdirectory for each tag attached to a collection or project available under the mount at @PATH@|\n\nh3(#filters). @--filters@ usage and limitations\n\nYour argument to @--filters@ should be a JSON list of filters in \"Arvados API list filter syntax\":{{ site.baseurl }}/api/methods.html#filters. If your filter checks any field besides @uuid@, you should prefix it with the @<resource type>.@ Taken together, here's an example that mounts your home directory excluding filter groups, workflow intermediate output collections, and workflow log collections:\n\n<notextile>\n<pre><code>$ arv-mount --home <span class=\"userinput\">--filters '[[\"groups.group_class\", \"!=\", \"filter\"], [\"collections.properties.type\", \"not in\", [\"intermediate\", \"log\"]]]'</span> ...\n</code></pre>\n</notextile>\n\nBecause filters can be awkward to write on the command line, you can also write them in a file, and pass that file path to the @--filters@ option. This example does the same filtering:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">cat &gt;~/arv-mount-filters.json &lt;&lt;EOF\n[\n  [\n    \"groups.group_class\",\n    \"!=\",\n    \"filter\"\n  ],\n  [\n    \"collections.properties.type\",\n    \"not in\",\n    [\n      \"intermediate\",\n      \"log\"\n    ]\n  ]\n]\nEOF</span>\n$ arv-mount --home <span class=\"userinput\">--filters ~/arv-mount-filters.json</span> ...\n</code></pre>\n</notextile>\n\nThe current implementation of @--filters@ has a few limitations. These may be lifted in a future release:\n\n* You can always access any project or collection by UUID or portable data hash under a magic directory. If you access a project this way, your filters _will_ apply to the project contents.\n* Tag directory listings are generated by querying tags alone. Only filters that apply to @links@ will affect these listings.\n\nh2(#access). Mount access and permissions\n\ntable(table table-bordered table-condensed).\n|_. Option(s)|_. Description|\n|@--allow-other@|Let other users on this system read mounted data (default false)|\n|@--read-only@|Mounted data cannot be modified from the mount (default)|\n|@--read-write@|Mounted data can be modified from the mount|\n\nh2(#lifecycle). Mount lifecycle management\n\ntable(table table-bordered table-condensed).\n|_. Option(s)|_. Description|\n|@--exec ...@|Mount data, run the specified command, then unmount and exit. @--exec@ reads all remaining options as the command to run, so it must be the last option you specify. Either end your command arguments (and other options) with a @--@ argument, or specify @--exec@ after your mount point.|\n|@--foreground@|Run mount process in the foreground instead of daemonizing (default false)|\n|@--subtype SUBTYPE@|Set mounted filesystem type to @fuse.SUBTYPE@ (default is just @fuse@)|\n|@--replace@|If a FUSE mount is already mounted at the given directory, unmount it before mounting the requested data. If @--subtype@ is specified, unmount only if the mount has that subtype. WARNING: This command can affect any kind of FUSE mount, not just arv-mount.|\n|@--unmount@|If a FUSE mount is already mounted at the given directory, unmount it and exit. If @--subtype@ is specified, unmount only if the mount has that subtype. WARNING: This command can affect any kind of FUSE mount, not just arv-mount.|\n|@--unmount-all@|Unmount all FUSE mounts at or below the given directory, then exit. If @--subtype@ is specified, unmount only if the mount has that subtype. WARNING: This command can affect any kind of FUSE mount, not just arv-mount.|\n|@--unmount-timeout SECONDS@|The number of seconds to wait for a clean unmount after an @--exec@ command has exited (default 2.0). After this time, the mount will be forcefully unmounted.|\n\nh2(#logging). Mount logging and statistics\n\ntable(table table-bordered table-condensed).\n|_. Option(s)|_. Description|\n|@--crunchstat-interval SECONDS@|Write stats to stderr every N seconds (default disabled)|\n|@--debug@|Log debug information|\n|@--logfile LOGFILE@|Write debug logs and errors to the specified file (default stderr)|\n\nh2(#cache). Mount local cache setup\n\ntable(table table-bordered table-condensed).\n|_. Option(s)|_. Description|\n|@--disk-cache@|Cache data on the local filesystem (default)|\n|@--ram-cache@|Cache data in memory|\n|@--disk-cache-dir DIRECTORY@|Set custom filesystem cache location|\n|@--directory-cache BYTES@|Size of directory data cache in bytes (default 128 MiB)|\n|@--file-cache BYTES@|Size of file data cache in bytes (default 8 GiB for filesystem cache, 256 MiB for memory cache)|\n\nh2(#plumbing). Mount interactions with Arvados and Linux\n\ntable(table table-bordered table-condensed).\n|_. Option(s)|_. Description|\n|@--disable-event-listening@|Don't subscribe to events on the API server to update mount contents|\n|@--encoding ENCODING@|Filesystem character encoding (default 'utf-8'; specify a name from the \"Python codec registry\":https://docs.python.org/3/library/codecs.html#standard-encodings)|\n|@--retries RETRIES@|Maximum number of times to retry server requests that encounter temporary failures (e.g., server down). Default 10.|\n|@--storage-classes CLASSES@|Comma-separated list of storage classes to request for new collections|\n\nh2(#examples). Examples\n\nh3(#exec). Using @--exec@\n\nThere are a couple of details that are important to understand when you use @--exec@:\n\n* @--exec@ reads all remaining options as the command to run, so it must be the last option you specify. Either end your command arguments (and other options) with a @--@ argument, or specify @--exec@ after your mount point.\n* The command you specify runs from the same directory that you started @arv-mount@ from. To access data inside the mount, you will generally need to pass the path to the mount as an argument.\n\nFor example, this generates a recursive listing of all the projects and collections under your home project:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv-mount --home --exec find -type d ArvadosHome -- ArvadosHome</span>\n</code></pre>\n</notextile>\n\nThe first @ArvadosHome@ is a path argument to @find@. The second is the mount point argument to @arv-mount@.\n\nh3(#systemd). Running arv-mount as a systemd service\n\nIf you want to run @arv-mount@ as a long-running service, it's easy to write a systemd service definition for it. We do not publish one because the entire definition tends to be site-specific, but you can start from this template. You must change the @ExecStart@ path. Comments detail other changes you might want to make.\n\n<notextile>\n<pre><code>[Unit]\nDescription=Arvados FUSE mount\nDocumentation={{ site.baseurl }}/sdk/fuse/options.html\n\n[Service]\nType=simple\n\n# arv-mount will cache data under a `keep` subdirectory of CacheDirectory.\n# If this is a system service installed under /etc/systemd/system,\n# the cache will be at /var/cache/arvados/keep.\n# The default value of `arvados` lets arv-mount share the cache with other\n# tools.\nCacheDirectory=arvados\n\n# arv-mount will get Arvados API credentials from the `settings.conf` file\n# under ConfigurationDirectory.\n# If this is a system service installed under /etc/systemd/system,\n# the configuration will be read from /etc/arvados/settings.conf.\n# The default value of `arvados` lets arv-mount read configuration from the\n# same location as other tools.\nConfigurationDirectory=arvados\n\n# This unit makes the mount available as `Arvados` under the runtime directory root.\n# If this is a system service installed under /etc/systemd/system,\n# the mount will be at /run/Arvados.\n# If this is a user service installed under ~/.config/systemd/user,\n# the mount will be at $XDG_RUNTIME_DIR/Arvados.\n# If you want to mount at another location on the filesystem, remove RuntimeDirectory\n# and replace both instances of %t/Arvados with your desired path.\nRuntimeDirectory=Arvados\n\n# The arv-mount path must be the absolute path where you installed the command.\n# If you installed from a distribution package, make this /usr/bin/arv-mount.\n# If you installed from pip, replace ... with the path to your virtualenv.\n# You can add options to select what gets mounted, access permissions,\n# cache size, log level, etc.\nExecStart=<span class=\"userinput\">...</span>/bin/arv-mount --foreground %t/Arvados\nExecStop=/usr/bin/fusermount -u %t/Arvados\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/sdk/go/example.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Go\ntitle: Examples\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nSee \"Arvados GoDoc\":https://godoc.org/git.arvados.org/arvados.git/sdk/go for detailed documentation.\n\nIn these examples, the site prefix is @aaaaa@.\n\nh2.  Initialize SDK\n\n{% codeblock as go %}\nimport (\n  \"git.arvados.org/arvados.git/sdk/go/arvados\"\n  \"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n}\n\nfunc main() {\n  arv, err := arvadosclient.MakeArvadosClient()\n  if err != nil {\n    log.Fatalf(\"Error setting up arvados client %s\", err.Error())\n  }\n}\n{% endcodeblock %}\n\nh2. create\n\n{% codeblock as go %}\n  var collection arvados.Collection\n  err := api.Create(\"collections\", Dict{\"collection\": Dict{\"name\": \"create example\"}}, &collection)\n{% endcodeblock %}\n\nh2. delete\n\n{% codeblock as go %}\n  var collection arvados.Collection\n  err := api.Delete(\"collections\", \"aaaaa-4zz18-ccccccccccccccc\", Dict{}, &collection)\n{% endcodeblock %}\n\nh2. get\n\n{% codeblock as go %}\n  var collection arvados.Collection\n  err := api.Get(\"collections\", \"aaaaa-4zz18-ccccccccccccccc\", Dict{}, &collection)\n{% endcodeblock %}\n\nh2. list\n\n{% codeblock as go %}\n  var collection arvados.Collection\n  err := api.List(\"collections\", Dict{}, &collection)\n{% endcodeblock %}\n\nh2. update\n\n{% codeblock as go %}\n  var collection arvados.Collection\n  err := api.Update(\"collections\", \"aaaaa-4zz18-ccccccccccccccc\", Dict{\"collection\": Dict{\"name\": \"update example\"}}, &collection)\n{% endcodeblock %}\n\nh2. Get current user\n\n{% codeblock as go %}\n  var user arvados.User\n  err := api.Get(\"users\", \"current\", Dict{}, &user)\n{% endcodeblock %}\n\nh2. Example program\n\nYou can save this source as a .go file and run it:\n\n<notextile>{% code example_sdk_go as go %}</notextile>\n\nA few more usage examples can be found in the \"services/keepproxy\":https://github.com/arvados/arvados/tree/main/services/keepproxy and \"sdk/go/keepclient\":https://github.com/arvados/arvados/tree/main/sdk/go/keepclient directories in the arvados source tree.\n"
  },
  {
    "path": "doc/sdk/go/index.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Go\ntitle: \"Installation\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe Go (\"Golang\":http://golang.org) SDK provides a generic set of wrappers so you can make API calls easily.\n\nSee \"Arvados GoDoc\":https://godoc.org/git.arvados.org/arvados.git/sdk/go for detailed documentation.\n\nh3. Installation\n\nUse @go get git.arvados.org/arvados.git/sdk/go/arvadosclient@.  The go tools will fetch the relevant code and dependencies for you.\n\n{% codeblock as go %}\nimport (\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n)\n{% endcodeblock %}\n\nIf you need pre-release client code, you can use the latest version from the repo by following \"these instructions.\":https://dev.arvados.org/projects/arvados/wiki/Go#Using-Go-with-Arvados\n"
  },
  {
    "path": "doc/sdk/index.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\ntitle: \"SDK Reference\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis section documents client tools and language bindings for the \"Arvados API\":{{site.baseurl}}/api/index.html and Keep that are available for various programming languages. The most mature, popular packages are:\n\n* \"Python SDK\":{{site.baseurl}}/sdk/python/sdk-python.html (also includes essential command line tools such as @arv-put@ and @arv-get@)\n* \"Command line SDK\":{{site.baseurl}}/sdk/cli/install.html (includes the @arv@ tool)\n\nMany Arvados Workbench pages provide examples of using the Python SDK and command line tools to access a given resource. Open \"API details\" from the action menu and open the tab with the example you're interested in.\n\nWe provide API bindings for several other languages, but these SDKs may be missing some features or documentation:\n\n* \"Go SDK\":{{site.baseurl}}/sdk/go/index.html\n* \"Java SDK\":{{site.baseurl}}/sdk/java-v2/index.html\n* \"R SDK\":{{site.baseurl}}/sdk/R/index.html\n* \"Ruby SDK\":{{site.baseurl}}/sdk/ruby/index.html\n\nConsult the \"Arvados API\":{{site.baseurl}}/api/index.html section for detailed documentation about Arvados API calls available on each resource.\n"
  },
  {
    "path": "doc/sdk/java-v2/example.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Java\ntitle: Examples\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nIn these examples, the site prefix is @aaaaa@.\n\nh2.  Initialize SDK\n\n{% codeblock as java %}\npackage org.arvados.example;\n\nimport java.util.List;\nimport org.arvados.client.config.ConfigProvider;\nimport org.arvados.client.config.ExternalConfigProvider;\nimport org.arvados.client.api.model.CollectionList;\nimport org.arvados.client.api.model.Collection;\nimport org.arvados.client.api.client.CollectionsApiClient;\n\npublic class CollectionExample {\n    public static void main(String[] argv) {\n\tConfigProvider conf = ExternalConfigProvider.builder().\n\t    apiProtocol(\"https\").\n\t    apiHost(\"zzzzz.arvadosapi.com\").\n\t    apiPort(443).\n\t    apiToken(\"...\").\n\t    build();\n\tCollectionsApiClient collectionsApi = new CollectionsApiClient(conf);\n\t/* ... */\n    }\n}\n{% endcodeblock %}\n\nh2. list\n\n{% codeblock as java %}\n\tCollectionList cl = collectionsApi.list();\n\tList<Collection> items = cl.getItems();\n\tfor (int i = 0; i < items.size(); i++) {\n\t    System.out.println(items.get(i));\n\t}\n{% endcodeblock %}\n"
  },
  {
    "path": "doc/sdk/java-v2/index.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Java\ntitle: \"Installation\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe Arvados Java SDK v2 provides a high level API for working with Arvados resources.\n\n{% include 'contrib_component' component_name: \"The Java SDK v2\" %}\n\nh2. Using the SDK\n\nThe SDK is packaged as a JAR named @arvados-java-<version>.jar@, which is published to Maven Central and can be included using Maven, Gradle, or by hand.\n\nHere is an example @build.gradle@ file that uses the Arvados java sdk:\n\n<pre>\napply plugin: 'application'\napply plugin: 'java-library'\napply plugin: 'maven'\n\nrepositories {\n    mavenCentral()\n}\n\napplication {\n    mainClassName = \"org.arvados.example.CollectionExample\"\n}\n\ndependencies {\n    api 'org.arvados:arvados-java-sdk:0.1.1'\n}\n</pre>\n\nSee \"Java SDK Examples\":example.html to get started using the SDK.\n\nh3. Logging\n\nThe SDK uses the SLF4J facade library for logging. A concrete logging \"binding\":https://www.slf4j.org/manual.html#swapping (and configuration, if required) must be provided by a client. For small applications, you can use the Simple implementation by adding slf4j-simple-1.8.0-beta4.jar to your classpath.\n\nh3. Configuration\n\n\"TypeSafe Configuration\":https://github.com/lightbend/config is used for configuring this library.\n\nPlease review src/main/resources/reference.conf for default values provided with this library.\n\n* **keepweb-host** - host of your Keep-Web server (default: localhost)\n* **keepweb-port** - port of your Keep-Web server (default: 8000)\n* **host** - host of your Arvados API server\n* **port** - port of your Arvados API server\n* **token** - Arvados token to authenticate registered user, one must provide \"token obtained from Arvados Workbench\":https://doc.arvados.org/user/reference/api-tokens.html\n* **protocol** - don't change to unless really needed (default: https)\n* **host-insecure** - ignores SSL certificate verification if true (default: false Don't change to *true* unless really needed)\n* **split-size** - size of chunk files in megabytes (default: 64)\n* **temp-dir** - temporary chunk files storage\n* **copies** - amount of chunk files duplicates per Keep server\n* **retries** - UNIMPLEMENTED\n\nIn order to override default settings one can create an application.conf file in an application.  Example: src/test/resources/application.conf.\n\nAlternatively @ExternalConfigProvider@ class can be used to pass configuration via code.  @ExternalConfigProvider@ comes with a builder and all of the above values must be provided in order for it to work properly.\n\n@ArvadosFacade@ has two constructors, one without arguments that uses values from application.conf and second one taking @ExternalConfigProvider@ as an argument.\n\nh3. API clients\n\nAll API clients inherit from @BaseStandardApiClient@. This class contains implementation of all common methods as described in \"Arvados Common Resource Methods\":http://doc.arvados.org/api/methods.html.\n\nParameters provided to common or specific methods are String UUID or fields wrapped in Java objects. For example:\n\n{% codeblock as java %}\nString uuid = \"ardev-4zz18-rxcql7qwyakg1r1\";\n\nCollection actual = client.get(uuid);\n{% endcodeblock %}\n\n{% codeblock as java %}\nListArgument listArgument = ListArgument.builder()\n        .filters(Arrays.asList(\n                Filter.of(\"owner_uuid\", Operator.LIKE, \"ardev%\"),\n                Filter.of(\"name\", Operator.LIKE, \"Super%\"),\n                Filter.of(\"portable_data_hash\", Operator.IN, Lists.newArrayList(\"54f6d9f59065d3c009d4306660989379+65\")\n            )))\n        .build();\n\nCollectionList actual = client.list(listArgument);\n{% endcodeblock %}\n\nNon-standard API clients must inherit from BaseApiClient. For example: KeepServerApiClient communicates directly with Keep servers using exclusively non-common methods.\n\nh3. Business logic\n\nMore advanced API data handling could be implemented as *Facade* classes. In current version functionalities provided by SDK are handled by @ArvadosFacade@. They include:\n\n* **downloading single file from collection** - using Keep-Web\n* **downloading whole collection** - using Keep-Web or Keep Server API\n* **listing file info from certain collection** - information is returned as list of *FileTokens* providing file details\n* **uploading single file** - to either new or existing collection\n* **uploading list of files** - to either new or existing collection\n* **creating an empty collection**\n* **getting current user info**\n* **listing current user's collections**\n* **creating new project**\n* **deleting certain collection**\n\nh3. Note regarding Keep-Web\n\nThe Java SDK requires Keep Web (which is part of the standard configuration) as well as the API server and Keep server(s).\n\nh3. Integration tests\n\nIn order to run the integration tests, all fields within following configuration file must be provided: @src/test/resources/integration-test-appliation.conf@\n\n\nThe parameter @integration-tests.project-uuid@ should contain UUID of one project available to user who's token was provided within configuration file.\n\nIntegration tests require connection to a real Arvados server.\n\nh3. Note regarding file naming\n\nWhen uploading via the current implementation of the Java SDK all uploaded files within single collection must have different names. This applies also to uploading files to already existing collection. Renaming files with duplicate names is not currently implemented.\n\nh3. Javadoc\n\nSee \"Javadoc\":javadoc.html\n\nh2. Building the Arvados SDK\n\nDependencies:\n* JDK for Java 8 or later \"https://www.oracle.com/technetwork/java/javase/downloads/index.html\":https://www.oracle.com/technetwork/java/javase/downloads/index.html\n* Gradle \"https://gradle.org/install/\":https://gradle.org/install/\n\n\n<notextile>\n<pre>\n$ <code class=\"userinput\">git clone https://github.com/arvados/arvados.git</code>\n$ <code class=\"userinput\">cd arvados/contrib/java-sdk-v2</code>\n$ <code class=\"userinput\">gradle test</code>\n$ <code class=\"userinput\">gradle jar -Pversion=0.1.1</code>\n</pre>\nThis will build the SDK and run all unit tests, then generate an Arvados Java sdk jar file in build/libs/arvados-java-0.1.1.jar\n</notextile>\n"
  },
  {
    "path": "doc/sdk/java-v2/javadoc.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Java\ntitle: \"Javadoc Reference\"\n\nno_nav_left: true\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nnotextile. <iframe src=\"javadoc/index.html\" style=\"width:100%; height:100%; border:none\" />\n"
  },
  {
    "path": "doc/sdk/python/api-client.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Python\ntitle: Arvados API Client\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n{% comment %}\nA note about scope for future authors: This page is meant to be a general guide to using the API client. It is intentionally limited to using the general resource methods as examples, because those are widely available and should be sufficient to give the reader a general understanding of how the API client works. In my opinion we should not cover resource-specific API methods here, and instead prefer to cover them in the cookbook or reference documentation, which have a more appropriate scope.  --Brett 2022-12-06\n{% endcomment %}\n\nThe Arvados Python SDK provides a complete client interface to the \"Arvados API\":{{site.baseurl}}/api/index.html. You can use this client interface directly to send requests to your Arvados API server, and many of the higher-level interfaces in the Python SDK accept a client object in their constructor for their use. Any Arvados software you write in Python will likely use these client objects.\n\nThis document explains how to instantiate the client object, and how its methods map to the full \"Arvados API\":{{site.baseurl}}/api/index.html. Refer to the API documentation for full details about all available resources and methods. The rest of the Python SDK documentation after this covers the higher-level interfaces it provides.\n\nh2. Initializing the API client\n\nIn the simplest case, you can import the @arvados@ module and call its @api@ method with an API version number:\n\n{% codeblock as python %}\nimport arvados\narv_client = arvados.api('v1')\n{% endcodeblock %}\n\nWhen called this way, the SDK gets Arvados API credentials from the first source it finds in this list:\n\n# The environment variables @ARVADOS_API_HOST@, @ARVADOS_API_TOKEN@, and @ARVADOS_API_HOST_INSECURE@.\n# The @settings.conf@ file under the directories listed in systemd's @CONFIGURATION_DIRECTORY@ environment variable.\n# The @arvados/settings.conf@ file under the directory in the @XDG_CONFIG_HOME@ environment variable. This defaults to @~/.config/arvados/settings.conf@ if @XDG_CONFIG_HOME@ is not set.\n# The @arvados/settings.conf@ file under the directories in the @XDG_CONFIG_DIRS@ environment variable.\n\nYou can alternatively pass these settings as arguments:\n\n{% codeblock as python %}\nimport arvados\narv_client = arvados.api(\n    'v1',\n    host='api.arvados.example.com',\n    token='ExampleToken',\n    insecure=False,\n)\n{% endcodeblock %}\n\nEither way, you can now use the @arv_client@ object to send requests to the Arvados API server you specified, using the configured token. The client object queries the API server for its supported API version and methods, so this client object will always support the same API the server does, even when there is a version mismatch between it and the Python SDK.\n\nh2. Resources, methods, and requests\n\nThe API client has a method that corresponds to each \"type of resource supported by the Arvados API server\":{{site.baseurl}}/api/ (listed in the documentation sidebar). You call these methods without any arguments. They return a resource object you use to call a method on that resource type.\n\nEach resource object has a method that corresponds to each API method supported by that resource type. You call these methods with the keyword arguments and values documented in the API reference. They return an API request object.\n\nEach API request object has an @execute()@ method. If it succeeds, it returns the kind of object documented in the API reference for that method. Usually that's a dictionary with details about the object you requested. If there's a problem, it raises an exception.\n\nPutting it all together, basic API requests usually look like:\n\n{% codeblock as python %}\narv_object = arv_client.resource_type().api_method(\n    argument=...,\n    other_argument=...,\n).execute()\n{% endcodeblock %}\n\nLater sections detail how to call \"common resource methods in the API\":{{site.baseurl}}/api/methods.html with more concrete examples. Additional methods may be available on specific resource types.\n\nh3. Retrying failed requests\n\nIf you execute an API request and it fails because of a temporary error like a network problem, the SDK waits with randomized exponential back-off, then retries the request. You can specify the maximum number of retries by passing a @num_retries@ integer to either @arvados.api@ or the @execute()@ method; the SDK will use whichever number is greater. The default number of retries is 10, which means that an API request could take up to about 35 minutes if the temporary problem persists that long. To disable automatic retries, just pass @num_retries=0@ to @arvados.api@:\n\n{% codeblock as python %}\nimport arvados\narv_client = arvados.api('v1', num_retries=0, ...)\n{% endcodeblock %}\n\nh2. get method\n\nTo fetch a single Arvados object, call the @get@ method of the resource type. You must pass a @uuid@ argument string that identifies the object to fetch. The method returns a dictionary with the object's fields.\n\n{% codeblock as python %}\n# Get a workflow and output its Common Workflow Language definition\nworkflow = api.workflows().get(uuid='zzzzz-7fd4e-12345abcde67890').execute()\nprint(workflow['definition'])\n{% endcodeblock %}\n\nYou can pass a @select@ argument that's a list of field names to return in the included object. Doing this avoids the overhead of de/serializing and transmitting data that you won't use. Skipping a large field over a series of requests can yield a noticeable performance improvement.\n\n{% codeblock as python %}\n# Get a workflow and output its name and description.\n# Don't load the workflow definition, which might be large and we're not going to use.\nworkflow = api.workflows().get(\n    uuid='zzzzz-7fd4e-12345abcde67890',\n    select=['name', 'description'],\n).execute()\nprint(f\"## {workflow['name']} ##\\n\\n{workflow['description']}\")\n\n# ERROR: This raises a KeyError because we didn't load this field in\n# the `select` argument.\nworkflow['created_at']\n{% endcodeblock %}\n\nh2. list method\n\nTo fetch multiple Arvados objects of the same type, call the @list@ method for that resource type. The list method takes a number of arguments. Refer to the \"list method API reference\":{{site.baseurl}}/api/methods.html#index for details about them. The method returns a dictionary also documented at the bottom of that section. The most interesting field is @'items'@, which is a list of dictionaries where each one corresponds to an Arvados object that matched your search. To work with a single page of results:\n\n{% codeblock as python %}\n# Output the exit codes of the 10 most recently run containers.\ncontainer_list = arv_client.containers().list(\n    limit=10,\n    order=['finished_at desc'],\n).execute()\nfor container in container_list['items']:\n    print(f\"{container['uuid']}: {container['exit_code']}\")\n{% endcodeblock %}\n\nIf you need to retrieve all of the results for a query, you may need to call the @list@ method multiple times: the default @limit@ is 100 items, and the API server will never return more than 1000. The SDK function @arvados.util.keyset_list_all@ can help orchestrate this for you. Call it with the @list@ method you want to query (don't call it yourself!) and the same keyword arguments you would pass to that method. You can control ordering by passing an @order_key@ string that names the field to use, and an @ascending@ bool that indicates whether that key should be sorted in ascending or descending order. The function returns an iterator of dictionaries, where each dictionary corresponds to an Arvados object that matched your query.\n\n{% codeblock as python %}\n# Output all the portable data hashes in a project.\nproject_data = set()\nfor collection in arvados.util.keyset_list_all(\n    # Note we pass the `list` method without calling it\n    arv_client.collections().list,\n    # The UUID of the project we're searching\n    filters=[['owner_uuid', '=', 'zzzzz-j7d0g-12345abcde67890']],\n):\n    project_data.add(collection['portable_data_hash'])\nprint('\\n'.join(project_data))\n{% endcodeblock %}\n\nWhen you list many objects, the following can help improve performance:\n\n* Call the list method with @count='none'@ to avoid the overhead of counting all results with each request.\n* Call the list method with a @select@ argument to only request the data you need. This cuts out some overhead from de/serializing and transferring data you won't use.\n\nh2. create method\n\nTo create a new Arvados object, call the @create@ method for that resource type. You must pass a @body@ dictionary with a single item. Its key is the resource type you're creating as a string, and its value is dictionary of data fields for that resource. The method returns a dictionary with the new object's fields.\n\nIf the resource type has a @name@ field, you may pass an @ensure_unique_name@ boolean argument. If true, the method will automatically update the name of the new object to make it unique if necessary.\n\n{% codeblock as python %}\n# Create a new project and output its UUID.\nproject = arv_client.groups().create(\n    body={\n        'group': {\n            'name': 'Python SDK Test Project',\n            'group_class': 'project',\n        },\n    },\n    ensure_unique_name=True,\n).execute()\nprint(project['uuid'])\n{% endcodeblock %}\n\nh2. update method\n\nTo modify an existing Arvados object, call the @update@ method for that resource type. You must pass a @uuid@ string argument that identifies the object to update, and a @body@ dictionary with a single item. Its key is the resource type you're creating as a string, and its value is dictionary of data fields to update on the resource. The method returns a dictionary with the updated object's fields.\n\nIf the resource type has a @name@ field, you may pass an @ensure_unique_name@ boolean argument. If true, the method will automatically update the name of the new object to make it unique if necessary.\n\n{% codeblock as python %}\n# Update the name of a container request,\n# finalize it to submit it to Crunch for processing,\n# and output its priority.\nsubmitted_container_request = arv_client.container_requests().update(\n    uuid='zzzzz-xvhdp-12345abcde67890',\n    body={\n        'container_request': {\n            'name': 'Container Request Committed by Python SDK',\n            'state': 'Committed',\n        },\n    },\n    ensure_unique_name=True,\n).execute()\nprint(submitted_container_request['priority'])\n{% endcodeblock %}\n\nh2. delete method\n\nTo delete an existing Arvados object, call the @delete@ method for that resource type. You must pass a @uuid@ string argument that identifies the object to delete. The method returns a dictionary with the deleted object's fields.\n\n{% codeblock as python %}\n# Delete a collection and output its name\ndeleted_collection = arv_client.collections().delete(\n    uuid='zzzzz-4zz18-12345abcde67890',\n).execute()\nprint(deleted_collection['name'])\n{% endcodeblock %}\n\nFor resource types that support being trashed, you can untrash the object by calling the resource type's @untrash@ method with a @uuid@ argument identifying the object to restore.\n"
  },
  {
    "path": "doc/sdk/python/arvados-cwl-runner.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Python\ntitle: Arvados CWL Runner\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe Arvados CWL Runner is a Python tool that allows you to register and submit workflows to Arvados. You can oversee a running workflow on your local system, or let that run inside an Arvados container. This tool requires the Python SDK installed in order to access Arvados services.\n\nh2. Installation\n\nIf you are logged in to a managed Arvados VM, the @arvados-cwl-runner@ utility should already be installed.\n\nTo use the CWL Runner elsewhere, you can install it from a distribution package or PyPI.\n\nh2. Option 1: Install from distribution packages\n\nFirst, \"add the appropriate package repository for your distribution\":{{ site.baseurl }}/install/packages.html.\n\n{% assign arvados_component = 'python3-arvados-cwl-runner' %}\n\n{% include 'install_packages' %}\n\nh2. Option 2: Install with pip\n\nRun @pip install arvados-cwl-runner@ in an appropriate installation environment, such as a virtualenv.\n\nNote:\n\nThe CWL Runner uses @pycurl@ which depends on the @libcurl@ C library.  To build the module you may have to first install additional packages.  On Debian-based distributions you can install them by running:\n\n<notextile>\n<pre><code># <span class=\"userinput\">apt install git build-essential python3-dev libcurl4-openssl-dev libssl-dev</span>\n</code></pre>\n</notextile>\n\nh2. Check Docker access\n\nIn order to pull and upload Docker images, @arvados-cwl-runner@ requires access to Docker.  You do not need Docker if the Docker images you intend to use are already available in Arvados.\n\nYou can determine if you have access to Docker by running @docker version@:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">docker version</span>\nClient:\n Version:      1.9.1\n API version:  1.21\n Go version:   go1.4.2\n Git commit:   a34a1d5\n Built:        Fri Nov 20 12:59:02 UTC 2015\n OS/Arch:      linux/amd64\n\nServer:\n Version:      1.9.1\n API version:  1.21\n Go version:   go1.4.2\n Git commit:   a34a1d5\n Built:        Fri Nov 20 12:59:02 UTC 2015\n OS/Arch:      linux/amd64\n</code></pre>\n</notextile>\n\nIf this returns an error, contact the sysadmin of your cluster for assistance.\n\nh2. Usage\n\nPlease refer to the \"Starting a Workflow at the Command Line\":{{site.baseurl}}/user/cwl/cwl-runner.html tutorial for more information.\n"
  },
  {
    "path": "doc/sdk/python/cookbook.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Python\ntitle: Code cookbook\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Introduction\":#introduction\n# \"Working with the current user\":#working-with-current-user\n## \"Fetch the current user\":#fetch-current-user\n## \"List objects shared with the current user\":#list-shared-objects\n# \"Working with projects\":#working-with-projects\n## \"Create a project\":#create-a-project\n## \"List the contents of a project\":#list-project-contents\n# \"Working with permissions\":#working-with-permissions\n## \"Grant permission to an object\":#grant-permission\n## \"Modify permission on an object\":#modify-permission\n## \"Revoke permission from an object\":#revoke-permission\n# \"Working with properties\":#working-with-properties\n## \"Update the properties of an object\":#update-properties\n## \"Translate between vocabulary identifiers and labels\":#translating-between-vocabulary-identifiers-and-labels\n## \"Query the vocabulary definition\":#querying-the-vocabulary-definition\n# \"Working with collections\":#working-with-collections\n## \"Load and update an existing collection\":#load-collection\n## \"Create and save a new collection\":#create-collection\n## \"Read a file from a collection\":#read-a-file-from-a-collection\n## \"Download a file from a collection\":#download-a-file-from-a-collection\n## \"Write a file to a collection\":#write-a-file-into-a-new-collection\n## \"Upload a file to a collection\":#upload-a-file-into-a-new-collection\n## \"Delete a file from a collection\":#delete-a-file-from-an-existing-collection\n## \"Delete a directory from a collection recursively\":#delete-a-directory-from-a-collection\n## \"Walk over all files in a collection\":#walk-collection\n## \"Copy a file between collections\":#copy-files-from-a-collection-to-another-collection\n## \"Combine two or more collections\":#combine-two-or-more-collections\n## \"Create a collection sharing link\":#sharing-link\n# \"Working with containers and workflow runs\":#working-with-containers\n## \"Get input of a container\":#get-input-of-a-container\n## \"Get input of a CWL workflow run\":#get-input-of-a-cwl-workflow\n## \"Get output of a container\":#get-output-of-a-container\n## \"Get output of a CWL workflow run\":#get-output-of-a-cwl-workflow\n## \"Get logs of a container or CWL workflow run\":#get-log-of-a-child-request\n## \"Get status of a container or CWL workflow run\":#get-state-of-a-cwl-workflow\n## \"List child requests of a container or CWL workflow run\":#list-failed-child-requests\n## \"List child requests of a container request\":#list-child-requests-of-container-request\n# \"Working with the container request queue\":#working-with-container-request-queue\n## \"List completed container requests\":#list-completed-container-requests\n## \"Cancel a container request\":#cancel-a-container-request\n## \"Cancel multiple pending container requests\":#cancel-all-container-requests\n\nh2(#introduction). Introduction\n\nThis page provides example code to perform various high-level tasks using Arvados' Python SDK. This page assumes you've already read the \"API client documentation\":{{ site.baseurl }}/sdk/python/api-client.html and understand the basics of using the Python SDK client. You don't have to have the details of every API method memorized, but you should at least be comfortable with the pattern of calling a resource type, API method, and @execute()@, as well as the dictionaries these methods return.\n\nThe code examples assume you've built the @arv_client@ object by doing something like:\n\n{% codeblock as python %}\nimport arvados\narv_client = arvados.api('v1', ...)\n{% endcodeblock %}\n\nThese examples work no matter how you call @arvados.api()@, or if you use another constructor from \"@arvados.api@ module\":{{ site.baseurl }}/sdk/python/arvados/api.html. Just understand that @arv_client@ represents your client object, no matter how you built it.\n\nWhenever you see the Ellipsis object @...@ in these examples, that means you may need or want to fill something in. That might be list items, function arguments, or your own code. Comments will provide additional guidance.\n\nWhenever you see the example UUID @zzzzz-zzzzz-12345abcde67890@, you should provide your own UUID from input.\n\nh2(#working-with-current-user). Working with the current user\n\nh3(#fetch-current-user). Fetch the current user\n\nThe API provides a \"dedicated users method named @current@\":{{ site.baseurl }}/api/methods/users.html#current. It returns the user object that is authenticated by your current API token. Use this method to get the current user's UUID to use in other API calls, or include user details like name in your output.\n\n{% codeblock as python %}\ncurrent_user = arv_client.users().current().execute()\n{% endcodeblock %}\n\nh3(#list-shared-objects). List objects shared with the current user\n\nThe API provides a \"dedicated groups method named @shared@\":{{ site.baseurl }}/api/methods/groups.html#shared to do this. Call it like you would any other list method. This example illustrates some popular arguments. Check the API reference for full details of all possible arguments.\n\n{% codeblock as python %}\nfor item in arvados.util.keyset_list_all(\n    # Pass the method keyset_list_all will call to retrieve items.\n    # Do not call it yourself.\n    arv_client.groups().shared,\n    # Pass filters to limit what objects are returned.\n    # This example returns only subprojects.\n    filters=[\n        ['uuid', 'is_a', 'arvados#group'],\n        ['group_class', '=', 'project'],\n    ],\n    # Pass order_key and ascending to control how the contents are sorted.\n    # This example lists projects in ascending creation time (the default).\n    order_key='created_at',\n    ascending=True,\n):\n    ...  # Work on item as desired\n{% endcodeblock %}\n\nh2(#working-with-projects). Working with projects\n\nh3(#create-a-project). Create a project\n\nA project is represented in the Arvados API as a group with its @group_class@ field set to @\"project\"@.\n\n{% codeblock as python %}\nnew_project = arv_client.groups().create(\n    body={\n        'group': {\n            'group_class': 'project',\n            'name': 'Python SDK Test Project',\n            # owner_uuid can be the UUID for an Arvados user or group.\n            # Specify the UUID of an existing project to make a subproject.\n            # If not specified, the current user is the default owner.\n            'owner_uuid': 'zzzzz-j7d0g-12345abcde67890',\n        },\n    },\n    ensure_unique_name=True,\n).execute()\n{% endcodeblock %}\n\nh3(#list-project-contents). List the contents of a project\n\nThe API provides a \"dedicated groups method named @contents@\":{{ site.baseurl }}/api/methods/groups.html#contents to do this. Call it like you would any other list method. This example illustrates some popular arguments. Check the API reference for full details of all possible arguments.\n\n{% codeblock as python %}\ncurrent_user = arv_client.users().current().execute()\nfor item in arvados.util.keyset_list_all(\n    # Pass the method keyset_list_all will call to retrieve items.\n    # Do not call it yourself.\n    arv_client.groups().contents,\n    # The UUID of the project whose contents we're listing.\n    # Pass a user UUID to list their home project.\n    # This example lists the current user's home project.\n    uuid=current_user['uuid'],\n    # Pass filters to limit what objects are returned.\n    # This example returns only subprojects.\n    filters=[\n        ['uuid', 'is_a', 'arvados#group'],\n        ['group_class', '=', 'project'],\n    ],\n    # Pass recursive=True to include results from subprojects in the listing.\n    recursive=False,\n    # Pass include_trash=True to include objects in the listing whose\n    # trashed_at time is passed.\n    include_trash=False,\n):\n    ...  # Work on item as desired\n{% endcodeblock %}\n\nh2(#working-with-permissions). Working with permissions\n\nIn brief, a permission is represented in Arvados as a link object with the following values:\n\n* @link_class@ is @\"permission\"@.\n* @name@ is one of @\"can_read\"@, @\"can_write\"@, @\"can_manage\"@, or @\"can_login\"@.\n* @tail_uuid@ identifies the user or role group that receives the permission.\n* @head_uuid@ identifies the Arvados object this permission grants access to.\n\nFor details, refer to the \"Permissions model documentation\":{{ site.baseurl }}/api/permission-model.html. Managing permissions is just a matter of ensuring the desired links exist using the standard @create@, @update@, and @delete@ methods.\n\nh3(#grant-permission). Grant permission to an object\n\nCreate a link with values as documented above.\n\n{% codeblock as python %}\npermission = arv_client.links().create(\n    body={\n        'link': {\n            'link_class': 'permission',\n            # Adjust name for the level of permission you want to grant\n            'name': 'can_read',\n            # tail_uuid must identify a user or role group\n            'tail_uuid': 'zzzzz-tpzed-12345abcde67890',\n            # head_uuid can identify any Arvados object\n            'head_uuid': 'zzzzz-4zz18-12345abcde67890',\n        },\n    },\n).execute()\n{% endcodeblock %}\n\nh3(#modify-permission). Modify permission on an object\n\nTo modify an existing permission—for example, to change its access level—find the existing link object for the permission, then update it with the new values you want. This example shows changing all read-write permissions on a specific collection to read-only. Adjust the filters appropriately to find the permission(s) you want to modify.\n\n{% codeblock as python %}\nimport arvados.util\nfor permission in arvados.util.keyset_list_all(\n    # Pass the method keyset_list_all will call to retrieve items.\n    # Do not call it yourself.\n    arv_client.links().list,\n    filters=[\n        # You should use this filter for all permission searches,\n        # to exclude other kinds of links.\n        ['link_class', '=', 'permission'],\n        # Add other filters as desired.\n        ['name', '=', 'can_write'],\n        ['head_uuid', '=', 'zzzzz-4zz18-12345abcde67890'],\n        ...,\n    ],\n):\n    arv_client.links().update(\n        uuid=permission['uuid'],\n        body={\n            'link': {\n                'name': 'can_read',\n            },\n       },\n    ).execute()\n{% endcodeblock %}\n\nh3(#revoke-permission). Revoke permission from an object\n\nTo revoke an existing permission, find the existing link object for the permission, then delete it. This example shows revoking one user's permission to log into any virtual machines. Adjust the filters appropriately to find the permission(s) you want to revoke.\n\n{% codeblock as python %}\nimport arvados.util\nfor permission in arvados.util.keyset_list_all(\n    # Pass the method keyset_list_all will call to retrieve items.\n    # Do not call it yourself.\n    arv_client.links().list,\n    filters=[\n        # You should use this filter for all permission searches,\n        # to exclude other kinds of links.\n        ['link_class', '=', 'permission'],\n        # Add other filters as desired.\n        ['name', '=', 'can_login'],\n        ['tail_uuid', '=', 'zzzzz-tpzed-12345abcde67890'],\n        ...,\n    ],\n):\n    arv_client.links().delete(\n        uuid=permission['uuid'],\n    ).execute()\n{% endcodeblock %}\n\nh2(#working-with-properties). Working with properties\n\nContainer requests, collections, groups, and links can have metadata properties set through their @properties@ field. For details, refer to the \"Metadata properties API reference\":{{ site.baseurl }}/api/properties.html.\n\nAn Arvados cluster can be configured to use a metadata vocabulary. If this is set up, the vocabulary defines standard identifiers and specific properties and their values. These identifiers can also have more human-friendly aliases. The cluster can also be configured to use the vocabulary strictly, so clients may _only_ set properties on objects that are defined in the vocabulary. For more information about configuring a metadata vocabulary, refer to the \"Metadata vocabulary administration documentation\":{{ site.baseurl }}/admin/metadata-vocabulary.html.\n\nh3(#update-properties). Update the properties of an object\n\nTo set an object's properties to a new value, just call the resource's @update@ method with a new @properties@ field in the body. If you want to make changes to the current set of properties, @get@ the object, build a new dictionary based on its @properties@ field, then call the resource's @update@ method with your new dictionary as the @properties@. Below is an example for a container request.\n\n{% codeblock as python %}\ncontainer_request = arv_client.container_requests().get(\n    uuid='zzzzz-xvhdp-12345abcde67890',\n).execute()\nnew_properties = dict(container_request['properties'])\n...  # Make your desired changes to new_proprties\ncontainer_request = arv_client.container_requests().update(\n    uuid=container_request['uuid'],\n    body={\n        'container_request': {\n            'properties': new_properties,\n        },\n    },\n).execute()\n{% endcodeblock %}\n\nh3(#translating-between-vocabulary-identifiers-and-labels). Translate between vocabulary identifiers and labels\n\nClient software might need to present properties to the user in a human-readable form or take input from the user without requiring them to remember identifiers. The \"@Vocabulary.convert_to_labels@\":{{ site.baseurl }}/sdk/python/arvados/vocabulary.html#arvados.vocabulary.Vocabulary.convert_to_labels and \"@Vocabulary.convert_to_identifiers@\":{{ site.baseurl }}/sdk/python/arvados/vocabulary.html#arvados.vocabulary.Vocabulary.convert_to_identifiers methods help with these tasks, respectively.\n\n{% codeblock as python %}\nimport arvados.vocabulary\nvocabulary = arvados.vocabulary.load_vocabulary(arv_client)\n\n# The argument should be a mapping of vocabulary keys and values using any\n# defined aliases, like this:\n#   {'Creature': 'Human', 'Priority': 'Normal'}\n# The return value will be an analogous mapping where all the aliases have\n# been translated to identifiers, like this:\n#   {'IDTAGANIMALS': 'IDVALANIMALS2', 'IDTAGIMPORTANCES': 'IDTAGIMPORTANCES1'}\nproperties_by_identifier = vocabulary.convert_to_identifiers({...})\n\n# You can use this to set metadata properties on objects that support them.\nproject = arv_client.groups().update(\n    uuid='zzzzz-j7d0g-12345abcde67890',\n    body={\n        'group': {\n            'properties': properties_by_identifier,\n        },\n    },\n).execute()\n\n# You can report properties to the user by their preferred name.\nprint(f\"{project['name']} ({project['group_class']} {project['uuid']}) updated with properties:\")\nfor key, value in vocabulary.convert_to_labels(project['properties']).items():\n    print(f\"↳ {key}: {value}\")\n{% endcodeblock %}\n\nh3(#querying-the-vocabulary-definition). Query the vocabulary definition\n\nThe @arvados.vocabulary@ module provides facilities to interact with the \"active metadata vocabulary\":{{ site.baseurl }}/admin/metadata-vocabulary.html in the system. The \"@Vocabulary@ class\":{{ site.baseurl }}/sdk/python/arvados/vocabulary.html#arvados.vocabulary.Vocabulary provides a mapping-like view of a cluster's configured vocabulary.\n\n{% codeblock as python %}\nimport arvados.vocabulary\nvocabulary = arvados.vocabulary.load_vocabulary(arv_client)\n\n# You can use the vocabulary object to access specific keys and values by\n# case-insensitive mapping, like this:\n#   vocabulary_value = vocabulary[key_alias][value_alias]\n# You can also access the `key_aliases` and `value_aliases` mapping\n# attributes directly to view the entire vocabulary. The example below\n# writes a plaintext table of the vocabulary.\nfor vocabulary_key in set(vocabulary.key_aliases.values()):\n    print(\n        vocabulary_key.identifier,\n        vocabulary_key.preferred_label,\n        ', '.join(vocabulary_key.aliases[1:]),\n        sep='\\t',\n    )\n    for vocabulary_value in set(vocabulary_key.value_aliases.values()):\n        print(\n            f'↳ {vocabulary_value.identifier}',\n            vocabulary_value.preferred_label,\n            ', '.join(vocabulary_value.aliases[1:]),\n            sep='\\t',\n        )\n{% endcodeblock %}\n\nh2(#working-with-collections). Working with collections\n\nThe \"@arvados.collection.Collection@ class\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.Collection provides a high-level interface to read, create, and update collections. It orchestrates multiple requests to API and Keep so you don't have to worry about the low-level details of keeping everything in sync. It uses threads to make multiple requests to Keep in parallel.\n\nThis page only shows you how to perform common tasks using the @Collection@ class. To see all the supported constructor arguments and methods, refer to \"the @Collection@ class documentation\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.Collection.\n\nh3(#load-collection). Load and update an existing collection\n\nConstruct the @Collection@ class with the UUID of a collection you want to read. You can pass additional constructor arguments as needed.\n\n{% codeblock as python %}\nimport arvados.collection\ncollection = arvados.collection.Collection('zzzzz-4zz18-12345abcde67890', ...)\n{% endcodeblock %}\n\nIf you make changes to the collection and want to update the existing collection, call the \"@Collection.save@ method\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.Collection.save:\n\n{% codeblock as python %}\ncollection.save()\n{% endcodeblock %}\n\nIf you would rather save your changes as a new collection object, call the \"@Collection.save_new@ method\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.Collection.save_new. This example illustrates some popular arguments. Check the API reference for full details of all possible arguments.\n\n{% codeblock as python %}\ncollection.save_new(\n    name='Collection updated by Python SDK',\n    # owner_uuid can be the UUID for an Arvados user or group.\n    # Specify the UUID of a project to add this collection to it.\n    owner_uuid='zzzzz-j7d0g-12345abcde67890',\n)\n{% endcodeblock %}\n\nh3(#create-collection). Create and save a new collection\n\nConstruct the @Collection@ class without an existing collection UUID or manifest text. You can pass additional constructor arguments as needed.\n\n{% codeblock as python %}\nimport arvados.collection\nnew_collection = arvados.collection.Collection(...)\n{% endcodeblock %}\n\nUsually you'll upload or copy files to the new collection. Once you're done with that and ready to save your changes, call the \"@Collection.save_new@ method\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.Collection.save_new. This example illustrates some popular arguments. Check the API reference for full details of all possible arguments.\n\n{% codeblock as python %}\nnew_collection.save_new(\n    name='Collection created by Python SDK',\n    # owner_uuid can be the UUID for an Arvados user or group.\n    # Specify the UUID of a project to add this collection to it.\n    owner_uuid='zzzzz-j7d0g-12345abcde67890',\n)\n{% endcodeblock %}\n\nh3(#read-a-file-from-a-collection). Read a file from a collection\n\nOnce you have a @Collection@ object, the \"@Collection.open@ method\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.RichCollectionBase.open lets you open files from a collection the same way you would open files from disk using Python's built-in @open@ function. It returns a file-like object that you can use in many of the same ways you would use any other file object. This example prints all non-empty lines from @ExampleFile@ in your collection:\n\n{% codeblock as python %}\nimport arvados.collection\ncollection = arvados.collection.Collection(...)\nwith collection.open('ExampleFile') as my_file:\n    # Read from my_file as desired.\n    # This example prints all non-empty lines from the file to stdout.\n    for line in my_file:\n        if not line.isspace():\n            print(line, end='')\n{% endcodeblock %}\n\nh3(#download-a-file-from-a-collection). Download a file from a collection\n\nOnce you have a @Collection@ object, the \"@Collection.open@ method\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.RichCollectionBase.open lets you open files from a collection the same way you would open files from disk using Python's built-in @open@ function. You pass a second mode argument like @'rb'@ to open the file in binary mode. It returns a file-like object that you can use in many of the same ways you would use any other file object. You can pass it as a source to Python's standard \"@shutil.copyfileobj@ function\":https://docs.python.org/3/library/shutil.html#shutil.copyfileobj to download it. This code downloads @ExampleFile@ from your collection and saves it to the current working directory as @ExampleDownload@:\n\n{% codeblock as python %}\nimport arvados.collection\nimport shutil\ncollection = arvados.collection.Collection(...)\nwith (\n  collection.open('ExampleFile', 'rb') as src_file,\n  open('ExampleDownload', 'wb') as dst_file,\n):\n    shutil.copyfileobj(src_file, dst_file)\n{% endcodeblock %}\n\nh3(#write-a-file-into-a-new-collection). Write a file to a collection\n\nOnce you have a @Collection@ object, the \"@Collection.open@ method\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.RichCollectionBase.open lets you open files from a collection the same way you would open files from disk using Python's built-in @open@ function. Pass a second mode argument like @'w'@, @'a'@, or @'wb'@ to write a file in the collection. It returns a file-like object that you can use in many of the same ways you would use any other file object. This example writes @Hello, Arvados!@ to a file named @ExampleHello@ in your collection:\n\n{% codeblock as python %}\nimport arvados.collection\ncollection = arvados.collection.Collection(...)\nwith collection.open('ExampleFile', 'w') as my_file:\n    # Write to my_file as desired.\n    # This example writes \"Hello, Arvados!\" to the file.\n    print(\"Hello, Arvados!\", file=my_file)\ncollection.save_new(...)  # or collection.save() to update an existing collection\n{% endcodeblock %}\n\nh3(#upload-a-file-into-a-new-collection). Upload a file to a collection\n\nOnce you have a @Collection@ object, the \"@Collection.open@ method\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.RichCollectionBase.open lets you open files from a collection the same way you would open files from disk using Python's built-in @open@ function. Pass a second mode argument like @'w'@, @'a'@, or @'wb'@ to write a file in the collection. It returns a file-like object that you can use in many of the same ways you would use any other file object. You can pass it as a destination to Python's standard \"@shutil.copyfileobj@ function\":https://docs.python.org/3/library/shutil.html#shutil.copyfileobj to upload data from a source file. This example reads @ExampleFile@ from the current working directory and uploads it into your collection as @ExampleUpload@:\n\n{% codeblock as python %}\nimport arvados.collection\nimport shutil\ncollection = arvados.collection.Collection(...)\nwith (\n  open('ExampleFile', 'rb') as src_file,\n  collection.open('ExampleUpload', 'wb') as dst_file,\n):\n    shutil.copyfileobj(src_file, dst_file)\ncollection.save_new(...)  # or collection.save() to update an existing collection\n{% endcodeblock %}\n\nh3(#delete-a-file-from-an-existing-collection). Delete a file from a collection\n\nOnce you have a @Collection@ object, call the \"@Collection.remove@ method\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.Collection.remove with a file path to remove that file or directory from the collection.\n\n{% codeblock as python %}\nimport arvados.collection\ncollection = arvados.collection.Collection(...)\ncollection.remove('ExamplePath')\ncollection.save_new(...)  # or collection.save() to update an existing collection\n{% endcodeblock %}\n\nh3(#delete-a-directory-from-a-collection). Delete a directory from a collection recursively\n\nOnce you have a @Collection@ object, call the \"@Collection.remove@ method\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.Collection.remove with a directory path and @recursive=True@ to delete everything under that directory from the collection.\n\n{% codeblock as python %}\nimport arvados.collection\ncollection = arvados.collection.Collection(...)\ncollection.remove('ExampleDirectoryPath', recursive=True)\ncollection.save_new(...)  # or collection.save() to update an existing collection\n{% endcodeblock %}\n\nh3(#walk-collection). Walk over all files in a collection\n\nOnce you have a @Collection@ object, you can iterate over it to retrieve the names of all files and streams in it. Streams are like subdirectories: you can open them using the \"@Collection.find@ method\":{{ site.baseurl }}/sdk/python/python.html, and work with the files in them just like you would in the original collection. This example shows how to combine these techniques to iterate all files in a collection, including its streams.\n\n{% codeblock as python %}\nimport arvados.collection\nimport collections\nimport pathlib\nroot_collection = arvados.collection.Collection(...)\n# Start work from the base stream.\nstream_queue = collections.deque([pathlib.PurePosixPath('.')])\nwhile stream_queue:\n    stream_path = stream_queue.popleft()\n    collection = root_collection.find(str(stream_path))\n    for item_name in collection:\n        try:\n            my_file = collection.open(item_name)\n        except IsADirectoryError:\n            # item_name refers to a stream. Queue it to walk later.\n            stream_queue.append(stream_path / item_name)\n            continue\n        with my_file:\n            ...  # Work with my_file as desired\n{% endcodeblock %}\n\nh3(#copy-files-from-a-collection-to-another-collection). Copy a file between collections\n\nOnce you have one or more @Collection@ objects, call the \"@Collection.copy@ method\":{{ site.baseurl }}/sdk/python/arvados/collection.html#arvados.collection.RichCollectionBase.copy on the destination collection to copy files to it. This method doesn't re-upload data, so it's very efficient.\n\n{% codeblock as python %}\nimport arvados.collection\nsrc_collection = arvados.collection.Collection(...)\ndst_collection = arvados.collection.Collection(...)\ndst_collection.copy(\n    # The path of the source file or directory to copy\n    'ExamplePath',\n    # The path where the source file or directory will be copied.\n    # Pass an empty string like this to copy it to the same path.\n    '',\n    # The collection where the source file or directory comes from.\n    # If not specified, the default is the current collection (so you'll\n    # make multiple copies of the same data in the same collection).\n    source_collection=src_collection,\n    # Pass overwrite=True to force the method to overwrite any data\n    # that already exists at the given path in the current collection.\n    overwrite=False,\n)\ndst_collection.save_new(...)  # or dst_collection.save() to update an existing collection\n{% endcodeblock %}\n\nh3(#combine-two-or-more-collections). Combine two or more collections\n\nYou can concatenate manifest texts from multiple collections to create a single collection that contains all the data from the source collections. Note that if multiple source collections have data at the same path, the merged collection will have a single file at that path with concatenated data from the source collections.\n\n{% codeblock as python %}\nimport arvados.collection\n\n# Retrieve all of the source collection manifest texts\nsrc_collection_uuid_list = [\n    'zzzzz-4zz18-111111111111111',\n    'zzzzz-4zz18-222222222222222',\n    ...,\n]\nmanifest_texts = [\n    arvados.collection.Collection(uuid).manifest_text()\n    for uuid in src_collection_uuid_list\n]\n\n# Initialize a new collection object from the concatenated manifest text\nnew_collection = arvados.collection.Collection(''.join(manifest_texts), ...)\n\n# Record the new collection in Arvados\nnew_collection.save_new(\n    name='Collection merged by Python SDK',\n    owner_uuid='zzzzz-j7d0g-12345abcde67890',\n)\n{% endcodeblock %}\n\nh3(#sharing-link). Create a collection sharing link\n\nYou can create a sharing link for a collection by creating a new API token that is only allowed to read that collection; then constructing a link to your Keep web server that includes the collection UUID and the new token.\n\n{% codeblock as python %}\nimport urllib.parse\n\n# The UUID of the collection you want to share\ncollection_uuid = 'zzzzz-4zz18-12345abcde67890'\n\nsharing_token_scopes = [\n    'GET /arvados/v1/keep_services/accessible',\n    f'GET /arvados/v1/collections/{collection_uuid}',\n    f'GET /arvados/v1/collections/{collection_uuid}/',\n]\nsharing_token = arv_client.api_client_authorizations().create(\n    body={\n        'api_client_authorization': {\n            'scopes': sharing_token_scopes,\n        },\n    },\n).execute()\nplain_token = sharing_token['api_token']\ntoken_parts = plain_token.split('/')\nif token_parts[0] == 'v2':\n    plain_token = token_parts[2]\n\nsharing_url_parts = (\n    # The scheme your Keep web server uses. Change this to 'http' if necessary.\n    'https',\n    # The hostname, and optionally port, your Keep web server uses\n    'collections.zzzzz.example.com',\n    # You shouldn't need to change any other items\n    f'/c={collection_uuid}/t={plain_token}/_/',\n    None,\n    None,\n)\nsharing_url = urllib.parse.urlunsplit(sharing_url_parts)\nprint(sharing_url)\n{% endcodeblock %}\n\nh2(#working-with-containers). Working with containers\n\nIf you haven't already, start by reading the \"Computing with Crunch\":{{ site.baseurl }}/api/execution.html guide. It provides a high-level overview of how users submit work to Arvados as container requests; how Arvados dispatches that work to containers; and how Arvados records the association and results back on the original container request record.\n\nIf you have experience running CWL workflows on Workbench 2, it runs through this same API. When you start that workflow run, Workbench 2 creates a small container request to run a \"CWL runner\" tool with the specific inputs you gave it. Once Crunch dispatches a container for it, the CWL runner creates additional container requests to run each step of the workflow, and oversees the process until the workflow runs to completion. The UUID of this container is recorded in the @container_uuid@ field of the container request you submitted.\n\nThe UUID of the CWL runner container is recorded in the @requesting_container_uuid@ field of each container request it creates. You can list container requests with a filter on this field to inspect each step of the workflow individually, as shown below.\n\nThe next few examples show how to perform a task with a container request generally, and then provide a more specific example of working with a CWL runner container.\n\nh3(#get-input-of-a-container). Get input of a container\n\nA container request's most varied inputs are recorded in the @mounts@ field, which can include data from Keep, specific collections, Git checkouts, and static files. You might also be interested in the @environment@, @command@, @container_image@, and @secret_mounts@ fields. Refer to the \"container requests API documentation\":{{ site.baseurl }}/api/methods/container_requests.html for details.\n\n{% codeblock as python %}\ncontainer_request = arv_client.container_requests().get(\n    uuid='zzzzz-xvhdp-12345abcde67890',\n).execute()\n# From here, you can process any of the container request's input fields.\n# Below is an example of listing all the mounts.\nimport pprint\nfor mount_name, mount_source in container_request['mounts'].items():\n    mount_summary = []\n    # These are the fields that define different types of mounts.\n    # Try to collect them all. Just skip any that aren't set.\n    for key in ['kind', 'uuid', 'portable_data_hash', 'commit', 'path']:\n        try:\n            mount_summary.append(mount_source[key])\n        except KeyError:\n            pass\n    print(f\"{mount_name}: {' '.join(mount_summary)}\")\n    if mount_source.get('kind') == 'json':\n        pprint.pprint(mount_source.get('content'))\n{% endcodeblock %}\n\nh3(#get-input-of-a-cwl-workflow). Get input of a CWL workflow run\n\nWhen you run a CWL workflow, the CWL inputs are stored in the container request's @mounts@ field as a JSON mount named @/var/lib/cwl/cwl.input.json@.\n\n{% codeblock as python %}\ncontainer_request = arv_client.container_requests().get(\n    uuid='zzzzz-xvhdp-12345abcde67890',\n).execute()\ncwl_input = container_request['mounts']['/var/lib/cwl/cwl.input.json']['content']\n...  # Work with the cwl_input dictionary\n{% endcodeblock %}\n\nh3(#get-output-of-a-container). Get output of a container\n\nA container's output files are saved in a collection. The UUID of that collection is recorded in the @output_uuid@ of the container request, which you can load as you like.\n\n{% codeblock as python %}\nimport arvados.collection\ncontainer_request = arv_client.container_requests().get(\n    uuid='zzzzz-xvhdp-12345abcde67890',\n).execute()\ncontainer_output = arvados.collection.Collection(\n    container_request.get('output_uuid'),\n)\n...  # Work with the container_output collection object\n{% endcodeblock %}\n\nh3(#get-output-of-a-cwl-workflow). Get output of a CWL workflow run\n\nWhen you run a CWL workflow, the container request's output collection includes a file named @cwl.output.json@ that provides additional information about other files in the output.\n\n{% codeblock as python %}\nimport arvados.collection\nimport json\ncwl_container_request = arv_client.container_requests().get(\n    uuid='zzzzz-xvhdp-12345abcde67890',\n).execute()\ncwl_output_collection = arvados.collection.Collection(\n    cwl_container_request['output_uuid'],\n)\nwith cwl_output_collection.open('cwl.output.json') as cwl_output_file:\n    cwl_output = json.load(cwl_output_file)\n...  # Work with the cwl_output dictionary\n{% endcodeblock %}\n\nh3(#get-log-of-a-child-request). Get logs of a container or CWL workflow run\n\nA container's log files are saved in a collection. The UUID of that collection is recorded in the @log_uuid@ of the container request, which you can load as you like.\n\n{% codeblock as python %}\nimport arvados.collection\ncontainer_request = arv_client.container_requests().get(\n    uuid='zzzzz-xvhdp-12345abcde67890',\n).execute()\nlog_collection = arvados.collection.Collection(\n    container_request['log_uuid'],\n)\n# From here, you can process the container's log collection any way you like.\n# Below is an example that writes the container's stderr to this process' stderr.\nimport shutil\nimport sys\nwith log_collection.open('stderr.txt') as containter_stderr:\n    shutil.copyfileobj(container_stderr, sys.stderr)\n{% endcodeblock %}\n\nh3(#get-state-of-a-cwl-workflow). Get status of a container or CWL workflow run\n\nWorkbench shows users a single status badge for container requests. This status is synthesized from different fields on the container request and associated container. This code shows how to do analogous reporting using the Python SDK.\n\n{% codeblock as python %}\ncontainer_request = arv_client.container_requests().get(\n    uuid='zzzzz-xvhdp-12345abcde67890',\n).execute()\nif container_request['container_uuid'] is None:\n    status = container_request['state']\nelse:\n    container = arv_client.containers().get(\n        uuid=container_request['container_uuid'],\n    ).execute()\n    container_state = container['state']\n    if container_state == 'Queued' or container_state == 'Locked':\n        status = \"On hold\" if container['priority'] == 0 else \"Queued\"\n    elif container_state == 'Running':\n        if container['runtime_status'].get('error'):\n            status = \"Failing\"\n        elif container['runtime_status'].get('warning'):\n            status = \"Warning\"\n        else:\n            status = container_state\n    elif container_state == 'Cancelled':\n        status = container_state\n    elif container_state == 'Complete':\n        status = \"Completed\" if container['exit_code'] == 0 else \"Failed\"\n...  # Report status as desired\n{% endcodeblock %}\n\nh3(#list-failed-child-requests). List child requests of a container or CWL workflow run\n\nWhen a running container creates a container request to do additional work, the UUID of the source container is recorded in the @requesting_container_uuid@ field of the new container request. You can list container requests with this filter to find requests created by a specific container.\n\n{% codeblock as python %}\nimport arvados.util\nfor child_container_requests in arvados.util.keyset_list_all(\n    # Pass the method keyset_list_all will call to retrieve items.\n    # Do not call it yourself.\n    arv_client.container_requests().list,\n    filters=[\n        # Note this is a container UUID, *not* a container request UUID\n        ['requesting_container_uuid', '=', 'zzzzz-dz642-12345abcde67890'],\n        # You may add other filters for your listing.\n        # For example, you could filter by 'name' to find specific kinds\n        # of steps of a CWL workflow.\n        ...,\n    ],\n):\n    ...  # Work with each child container request\n{% endcodeblock %}\n\nh3(#list-child-requests-of-container-request). List child requests of a container request\n\nWhen a running container creates a container request to do additional work, the UUID of the source container is recorded in the @requesting_container_uuid@ field of the new container request. If all you have is the UUID of a container request, you can get that request, then list container requests with a filter where @requesting_container_uuid@ matches the @container_uuid@ of your request to find all its children.\n\n{% codeblock as python %}\nimport arvados.util\nparent_container_request = arv_client.container_requests().get(\n    uuid='zzzzz-xvhdp-12345abcde67890',\n).execute()\nparent_container_uuid = parent_container_request['container_uuid']\nif parent_container_uuid is None:\n    # No container has run for this request yet, so there cannot be child requests.\n    child_container_requests = ()\nelse:\n    child_container_requests = arvados.util.keyset_list_all(\n        # Pass the method keyset_list_all will call to retrieve items.\n        # Do not call it yourself.\n        arv_client.container_requests().list,\n        filters=[\n            ['requesting_container_uuid', '=', parent_container_uuid],\n            # You may add other filters for your listing.\n            # For example, you could filter by 'name' to find specific kinds\n            # of steps of a CWL workflow.\n            ...,\n        ],\n    )\nfor child_container_request in child_container_requests:\n    ...  # Work with each child container request\n{% endcodeblock %}\n\nWith each child container request, you could repeat any of the recipes listed earlier in this section: examine their status, inputs, outputs, logs, and so on.\n\nh2(#working-with-container-request-queue). Working with the container request queue\n\nh3(#list-completed-container-requests). List completed container requests\n\nCompleted container requests have their @state@ field set to @\"Final\"@. You can list container requests with this filter to find completed requests.\n\n{% codeblock as python %}\nimport arvados.util\nimport datetime\ntime_filter = datetime.datetime.utcnow()\ntime_filter -= datetime.timedelta(days=7)\n\nfor container_request in arvados.util.keyset_list_all(\n    # Pass the method keyset_list_all will call to retrieve items.\n    # Do not call it yourself.\n    arv_client.container_requests().list,\n    filters=[\n        # This is the filter you need to find completed container requests.\n        ['state', '=', 'Final'],\n        # There could be many completed container requests, so you should\n        # provide additional filters. This example limits the listing to\n        # container requests from the past week.\n        ['created_at', '>=', f'{time_filter.isoformat()}Z'],\n        ...,\n    ],\n):\n    # Work with each container_request as desired.\n    # This example provides a basic status table with the container request\n    # UUID, time the request was created, and time the container finished\n    # (both in UTC).\n    print(\n        container_request['uuid'],\n        container_request['created_at'],\n        container_request['modified_at'],\n    )\n{% endcodeblock %}\n\nh3(#cancel-a-container-request). Cancel a container request\n\nTo cancel a container request, update it to set its @priority@ field to 0. See the \"containers API reference\":{{ site.baseurl }}/api/methods/containers.html for details.\n\n{% codeblock as python %}\ncancelled_container_request = arv_client.container_requests().update(\n    uuid='zzzzz-xvhdp-12345abcde67890',\n    body={\n        'container_request': {\n            'priority': 0,\n        },\n    },\n).execute()\n{% endcodeblock %}\n\nh3(#cancel-all-container-requests). Cancel multiple pending container requests\n\nIf you want to cancel multiple pending container requests, you can list container requests with the @state@ field set to @\"Committed\"@, a @priority@ greater than zero, and any other filters you like. Then update each container request to set its @priority@ field to 0. See the \"containers API reference\":{{ site.baseurl }}/api/methods/containers.html for details.\n\n{% codeblock as python %}\nimport arvados.util\nfor container_request in arvados.util.keyset_list_all(\n    # Pass the method keyset_list_all will call to retrieve items.\n    # Do not call it yourself.\n    arv_client.container_requests().list,\n    filters=[\n        # These are the filters you need to find cancellable container requests.\n        ['state', '=', 'Committed'],\n        ['priority', '>', 0],\n        # You can add other filters as desired.\n        # For example, you might filter on `requesting_container_uuid` to\n        # cancel only steps of one specific workflow.\n        ...,\n    ],\n):\n    cancelled_container_request = arv_client.container_requests().update(\n        uuid=container_request['uuid'],\n        body={\n            'container_request': {\n                'priority': 0,\n            },\n        },\n    ).execute()\n{% endcodeblock %}\n"
  },
  {
    "path": "doc/sdk/python/events.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Python\ntitle: Subscribing to database events\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados applications can subscribe to a live event stream from the database.  Events are described in the \"Log resource.\":{{site.baseurl}}/api/methods/logs.html\n\n{% codeblock as python %}\n#!/usr/bin/env python3\n\nimport arvados\nimport arvados.events\n\n# 'ev' is a dict containing the log table record describing the change.\ndef on_message(ev):\n    if ev.get(\"event_type\") == \"create\" and ev.get(\"object_kind\") == \"arvados#collection\":\n        print \"A new collection was created: %s\" % ev[\"object_uuid\"]\n\napi = arvados.api(\"v1\")\nws = arvados.events.subscribe(api, [], on_message)\nws.run_forever()\n{% endcodeblock %}\n"
  },
  {
    "path": "doc/sdk/python/python.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Python\ntitle: \"PyDoc Reference\"\n\nno_nav_left: true\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nnotextile. <iframe src=\"arvados.html\" style=\"width:100%; height:100%; border:none\" />\n"
  },
  {
    "path": "doc/sdk/python/sdk-python.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Python\ntitle: \"Installation\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe Python SDK provides access from Python to the Arvados API and Keep, along with a number of command line tools for using and administering Arvados and Keep.\n\nh2. Installation\n\nIf you are logged in to an Arvados VM, the Python SDK should be installed.\n\nTo use the Python SDK elsewhere, you can install it \"from an Arvados distribution package\":#package-install or \"from PyPI using pip\":#pip-install.\n\n{% include 'notebox_begin_warning' %}\nAs of Arvados 3.0, the Python SDK requires Python 3.8+.\n{% include 'notebox_end' %}\n\nh2(#package-install). Install from a distribution package\n\nThis installation method is recommended to make the CLI tools available system-wide. It can coexist with the pip installation method described below.\n\nFirst, configure the \"Arvados package repositories\":../../install/packages.html\n\n{% assign arvados_component = 'python3-arvados-python-client' %}\n\n{% include 'install_packages' %}\n\nThe package includes a virtualenv, which means the correct Python environment needs to be loaded before the Arvados SDK can be imported. You can test the installation by doing that, then creating a client object. Ensure your \"@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ credentials are set up correctly\":{{site.baseurl}}/user/reference/api-tokens.html. Then you should be able to run the following without any errors:\n\n<notextile>\n<pre>~$ <code class=\"userinput\">source /usr/lib/python3-arvados-python-client/bin/activate</code>\n(python-arvados-python-client) ~$ <code class=\"userinput\">python</code>\nPython 3.7.3 (default, Jul 25 2020, 13:03:44)\n[GCC 8.3.0] on linux\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> <code class=\"userinput\">import arvados</code>\n>>> <code class=\"userinput\">arvados.api('v1')</code>\n&lt;apiclient.discovery.Resource object at 0x233bb50&gt;\n</pre>\n</notextile>\n\nAlternatively, you can run the Python executable inside the @virtualenv@ directly:\n\n<notextile>\n<pre>~$ <code class=\"userinput\">/usr/lib/python3-arvados-python-client/bin/python</code>\nPython 3.7.3 (default, Jul 25 2020, 13:03:44)\n[GCC 8.3.0] on linux\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> <code class=\"userinput\">import arvados</code>\n>>> <code class=\"userinput\">arvados.api('v1')</code>\n&lt;apiclient.discovery.Resource object at 0x233bb50&gt;\n</pre>\n</notextile>\n\nAfter you have successfully tested your installation, proceed to the the \"API client overview\":api-client.html and \"cookbook\":cookbook.html to learn how to use the SDK.\n\nh2(#pip-install). Install from PyPI with pip\n\nThis installation method is recommended to use the SDK in your own Python programs. If installed into a @virtualenv@, it can coexist with the system-wide installation method from a distribution package.\n\nNote the Python SDK uses @pycurl@ which depends on the @libcurl@ C library.  To build the module you may have to first install additional packages.  On Debian-based distributions you can install them by running:\n\n<notextile>\n<pre><code># <span class=\"userinput\">apt install git build-essential python3-dev libcurl4-openssl-dev libssl-dev</span>\n</code></pre>\n</notextile>\n\nRun @python3 -m pip install arvados-python-client@ in an appropriate installation environment, such as a @virtualenv@.\n\n{% include 'notebox_begin_warning' %}\nIf your version of @pip@ is 1.4 or newer, the @pip install@ command might give an error: \"Could not find a version that satisfies the requirement arvados-python-client\". If this happens, try @python3 -m pip install --pre arvados-python-client@.\n{% include 'notebox_end' %}\n\nYou can test the installation by creating a client object. Ensure your \"@ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ credentials are set up correctly\":{{site.baseurl}}/user/reference/api-tokens.html. Then you should be able to run the following without any errors:\n\n<notextile>\n<pre>~$ <code class=\"userinput\">python3</code>\nPython 3.7.3 (default, Jul 25 2020, 13:03:44)\n[GCC 8.3.0] on linux\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n>>> <code class=\"userinput\">import arvados</code>\n>>> <code class=\"userinput\">arvados.api('v1')</code>\n&lt;apiclient.discovery.Resource object at 0x233bb50&gt;\n</pre>\n</notextile>\n\nAfter you have successfully tested your installation, proceed to the the \"API client overview\":api-client.html and \"cookbook\":cookbook.html to learn how to use the SDK.\n"
  },
  {
    "path": "doc/sdk/ruby/example.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Python\ntitle: Examples\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2.  Initialize SDK\n\nImport the module and set up an API client user agent:\n\n{% codeblock as ruby %}\nrequire 'arvados'\narv = Arvados.new(apiVersion: 'v1')\n{% endcodeblock %}\n\nThe SDK retrieves the list of API methods from the server at run time. Therefore, the set of available methods is determined by the server version rather than the SDK version.\n\nh2. create\n\nCreate an object:\n\n{% codeblock as ruby %}\nnew_link = arv.link.create(link: {link_class: 'test', name: 'test'})\n{% endcodeblock %}\n\nh2. delete\n\nDelete an object:\n\n{% codeblock as ruby %}\narv.link.delete(uuid: new_link[:uuid])\n{% endcodeblock %}\n\nh2. get\n\nRetrieve an object by ID:\n\n{% codeblock as ruby %}\nsome_user = arv.user.get(uuid: current_user_uuid)\n{% endcodeblock %}\n\nh2. list\n\nGet a list of objects:\n\n{% codeblock as ruby %}\nrepos = arv.repository.list\nfirst_repo = repos[:items][0]\nputs \"UUID of first repo returned is #{first_repo[:uuid]}\"</code>\n{% endcodeblock %}\n\nUUID of first repo returned is zzzzz-s0uqq-b1bnybpx3u5temz\n\nh2. update\n\nUpdate an object:\n\n{% codeblock as ruby %}\nupdated_link = arv.link.update(uuid: new_link[:uuid],\n                               link: {properties: {foo: 'bar'}})\n{% endcodeblock %}\n\nh2. Get current user\n\nGet the User object for the current user:\n\n{% codeblock as ruby %}\ncurrent_user = arv.user.current\n{% endcodeblock %}\n\nGet the UUID of an object that was retrieved using the SDK:\n\n{% codeblock as ruby %}\ncurrent_user_uuid = current_user[:uuid]\n{% endcodeblock %}\n"
  },
  {
    "path": "doc/sdk/ruby/index.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: sdk\nnavmenu: Ruby\ntitle: \"Installation\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe Ruby SDK provides a generic set of wrappers so you can make API calls easily.\n\nh2. Installation\n\nIf you are logged in to an Arvados VM, the Ruby SDK should be installed.\n\nTo use it elsewhere, you can either install the @arvados@ gem via RubyGems or build and install the package using the arvados source tree.\n\nh3. Prerequisites\n\n# \"Install Ruby\":../../install/ruby.html\n\nThe SDK uses @curl@ which depends on the @libcurl@ C library.  To build the module you may have to install additional packages.  On supported versions of Debian and Ubuntu, run:\n\n<notextile>\n<pre><code>\n# <span class=\"userinput\">apt install build-essential libcurl4-openssl-dev\n</code></pre>\n</notextile>\n\nh3. Install with RubyGems\n\n<notextile>\n<pre>\n# <code class=\"userinput\">gem install arvados</code>\n</pre>\n</notextile>\n\nh3. Test installation\n\nIf the SDK is installed, @ruby -r arvados -e 'puts \"OK!\"'@ should produce no errors.\n\nIf your @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables are set up correctly (see \"api-tokens\":{{site.baseurl}}/user/reference/api-tokens.html for details), the following test script should work:\n\n<notextile>\n<pre>$ <code class=\"userinput\">ruby -r arvados &lt;&lt;'EOF'\narv = Arvados.new api_version: 'v1'\nmy_full_name = arv.user.current[:full_name]\nputs \"arvados.v1.users.current.full_name = '#{my_full_name}'\"\nEOF</code>\narvados.v1.users.current.full_name = 'Your Name'\n</pre>\n</notextile>\n"
  },
  {
    "path": "doc/user/copying/LICENSE-2.0.html",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Apache License\"\n...\n\n<div id=\"content\" class=\"grid_16\"><div class=\"section-content\"></br>Version 2.0, January 2004<br></br>\n<a href=\"http://www.apache.org/licenses/\">http://www.apache.org/licenses/</a> </p>\n<p>TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION</p>\n<p><strong><a name=\"definitions\">1. Definitions</a></strong>.</p>\n<p>\"License\" shall mean the terms and conditions for use, reproduction, and\ndistribution as defined by Sections 1 through 9 of this document.</p>\n<p>\"Licensor\" shall mean the copyright owner or entity authorized by the\ncopyright owner that is granting the License.</p>\n<p>\"Legal Entity\" shall mean the union of the acting entity and all other\nentities that control, are controlled by, or are under common control with\nthat entity. For the purposes of this definition, \"control\" means (i) the\npower, direct or indirect, to cause the direction or management of such\nentity, whether by contract or otherwise, or (ii) ownership of fifty\npercent (50%) or more of the outstanding shares, or (iii) beneficial\nownership of such entity.</p>\n<p>\"You\" (or \"Your\") shall mean an individual or Legal Entity exercising\npermissions granted by this License.</p>\n<p>\"Source\" form shall mean the preferred form for making modifications,\nincluding but not limited to software source code, documentation source,\nand configuration files.</p>\n<p>\"Object\" form shall mean any form resulting from mechanical transformation\nor translation of a Source form, including but not limited to compiled\nobject code, generated documentation, and conversions to other media types.</p>\n<p>\"Work\" shall mean the work of authorship, whether in Source or Object form,\nmade available under the License, as indicated by a copyright notice that\nis included in or attached to the work (an example is provided in the\nAppendix below).</p>\n<p>\"Derivative Works\" shall mean any work, whether in Source or Object form,\nthat is based on (or derived from) the Work and for which the editorial\nrevisions, annotations, elaborations, or other modifications represent, as\na whole, an original work of authorship. For the purposes of this License,\nDerivative Works shall not include works that remain separable from, or\nmerely link (or bind by name) to the interfaces of, the Work and Derivative\nWorks thereof.</p>\n<p>\"Contribution\" shall mean any work of authorship, including the original\nversion of the Work and any modifications or additions to that Work or\nDerivative Works thereof, that is intentionally submitted to Licensor for\ninclusion in the Work by the copyright owner or by an individual or Legal\nEntity authorized to submit on behalf of the copyright owner. For the\npurposes of this definition, \"submitted\" means any form of electronic,\nverbal, or written communication sent to the Licensor or its\nrepresentatives, including but not limited to communication on electronic\nmailing lists, source code control systems, and issue tracking systems that\nare managed by, or on behalf of, the Licensor for the purpose of discussing\nand improving the Work, but excluding communication that is conspicuously\nmarked or otherwise designated in writing by the copyright owner as \"Not a\nContribution.\"</p>\n<p>\"Contributor\" shall mean Licensor and any individual or Legal Entity on\nbehalf of whom a Contribution has been received by Licensor and\nsubsequently incorporated within the Work.</p>\n<p><strong><a name=\"copyright\">2. Grant of Copyright License</a></strong>. Subject to the\nterms and conditions of this License, each Contributor hereby grants to You\na perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable\ncopyright license to reproduce, prepare Derivative Works of, publicly\ndisplay, publicly perform, sublicense, and distribute the Work and such\nDerivative Works in Source or Object form.</p>\n<p><strong><a name=\"patent\">3. Grant of Patent License</a></strong>. Subject to the terms\nand conditions of this License, each Contributor hereby grants to You a\nperpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n(except as stated in this section) patent license to make, have made, use,\noffer to sell, sell, import, and otherwise transfer the Work, where such\nlicense applies only to those patent claims licensable by such Contributor\nthat are necessarily infringed by their Contribution(s) alone or by\ncombination of their Contribution(s) with the Work to which such\nContribution(s) was submitted. If You institute patent litigation against\nany entity (including a cross-claim or counterclaim in a lawsuit) alleging\nthat the Work or a Contribution incorporated within the Work constitutes\ndirect or contributory patent infringement, then any patent licenses\ngranted to You under this License for that Work shall terminate as of the\ndate such litigation is filed.</p>\n<p><strong><a name=\"redistribution\">4. Redistribution</a></strong>. You may reproduce and\ndistribute copies of the Work or Derivative Works thereof in any medium,\nwith or without modifications, and in Source or Object form, provided that\nYou meet the following conditions:</p>\n<ol style=\"list-style: lower-latin;\">\n<li>You must give any other recipients of the Work or Derivative Works a\ncopy of this License; and</li>\n\n<li>You must cause any modified files to carry prominent notices stating\nthat You changed the files; and</li>\n\n<li>You must retain, in the Source form of any Derivative Works that You\ndistribute, all copyright, patent, trademark, and attribution notices from\nthe Source form of the Work, excluding those notices that do not pertain to\nany part of the Derivative Works; and</li>\n\n<li>If the Work includes a \"NOTICE\" text file as part of its distribution,\nthen any Derivative Works that You distribute must include a readable copy\nof the attribution notices contained within such NOTICE file, excluding\nthose notices that do not pertain to any part of the Derivative Works, in\nat least one of the following places: within a NOTICE text file distributed\nas part of the Derivative Works; within the Source form or documentation,\nif provided along with the Derivative Works; or, within a display generated\nby the Derivative Works, if and wherever such third-party notices normally\nappear. The contents of the NOTICE file are for informational purposes only\nand do not modify the License. You may add Your own attribution notices\nwithin Derivative Works that You distribute, alongside or as an addendum to\nthe NOTICE text from the Work, provided that such additional attribution\nnotices cannot be construed as modifying the License.\n<br/>\n<br/>\nYou may add Your own copyright statement to Your modifications and may\nprovide additional or different license terms and conditions for use,\nreproduction, or distribution of Your modifications, or for any such\nDerivative Works as a whole, provided Your use, reproduction, and\ndistribution of the Work otherwise complies with the conditions stated in\nthis License.\n</li>\n\n</ol>\n\n<p><strong><a name=\"contributions\">5. Submission of Contributions</a></strong>. Unless You\nexplicitly state otherwise, any Contribution intentionally submitted for\ninclusion in the Work by You to the Licensor shall be under the terms and\nconditions of this License, without any additional terms or conditions.\nNotwithstanding the above, nothing herein shall supersede or modify the\nterms of any separate license agreement you may have executed with Licensor\nregarding such Contributions.</p>\n<p><strong><a name=\"trademarks\">6. Trademarks</a></strong>. This License does not grant\npermission to use the trade names, trademarks, service marks, or product\nnames of the Licensor, except as required for reasonable and customary use\nin describing the origin of the Work and reproducing the content of the\nNOTICE file.</p>\n<p><strong><a name=\"no-warranty\">7. Disclaimer of Warranty</a></strong>. Unless required by\napplicable law or agreed to in writing, Licensor provides the Work (and\neach Contributor provides its Contributions) on an \"AS IS\" BASIS, WITHOUT\nWARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including,\nwithout limitation, any warranties or conditions of TITLE,\nNON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You\nare solely responsible for determining the appropriateness of using or\nredistributing the Work and assume any risks associated with Your exercise\nof permissions under this License.</p>\n<p><strong><a name=\"no-liability\">8. Limitation of Liability</a></strong>. In no event and\nunder no legal theory, whether in tort (including negligence), contract, or\notherwise, unless required by applicable law (such as deliberate and\ngrossly negligent acts) or agreed to in writing, shall any Contributor be\nliable to You for damages, including any direct, indirect, special,\nincidental, or consequential damages of any character arising as a result\nof this License or out of the use or inability to use the Work (including\nbut not limited to damages for loss of goodwill, work stoppage, computer\nfailure or malfunction, or any and all other commercial damages or losses),\neven if such Contributor has been advised of the possibility of such\ndamages.</p>\n<p><strong><a name=\"additional\">9. Accepting Warranty or Additional Liability</a></strong>.\nWhile redistributing the Work or Derivative Works thereof, You may choose\nto offer, and charge a fee for, acceptance of support, warranty, indemnity,\nor other liability obligations and/or rights consistent with this License.\nHowever, in accepting such obligations, You may act only on Your own behalf\nand on Your sole responsibility, not on behalf of any other Contributor,\nand only if You agree to indemnify, defend, and hold each Contributor\nharmless for any liability incurred by, or claims asserted against, such\nContributor by reason of your accepting any such warranty or additional\nliability.</p>\n<p>END OF TERMS AND CONDITIONS</p>\n<h1 id=\"apply\">APPENDIX: How to apply the Apache License to your work</h1>\n<p>To apply the Apache License to your work, attach the following boilerplate\nnotice, with the fields enclosed by brackets \"[]\" replaced with your own\nidentifying information. (Don't include the brackets!) The text should be\nenclosed in the appropriate comment syntax for the file format. We also\nrecommend that a file or class name and description of purpose be included\non the same \"printed page\" as the copyright notice for easier\nidentification within third-party archives.</p>\n<div class=\"codehilite\"><pre>Copyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the &quot;License&quot;);\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an &quot;AS IS&quot; BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n</pre></div></div></div>\n"
  },
  {
    "path": "doc/user/copying/agpl-3.0.html",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"GNU Affero General Public License\"\n...\n\n<p style=\"text-align: center;\">Version 3, 19 November 2007</p>\n\n<p>Copyright &copy; 2007 Free Software Foundation,\nInc. &lt;<a href=\"http://www.fsf.org/\">http://fsf.org/</a>&gt;\n <br />\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.</p>\n\n<h3><a name=\"preamble\"></a>Preamble</h3>\n\n<p>The GNU Affero General Public License is a free, copyleft license\nfor software and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.</p>\n\n<p>The licenses for most software and other practical works are\ndesigned to take away your freedom to share and change the works.  By\ncontrast, our General Public Licenses are intended to guarantee your\nfreedom to share and change all versions of a program--to make sure it\nremains free software for all its users.</p>\n\n<p>When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.</p>\n\n<p>Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.</p>\n\n<p>A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.</p>\n\n<p>The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.</p>\n\n<p>An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.</p>\n\n<p>The precise terms and conditions for copying, distribution and\nmodification follow.</p>\n\n<h3><a name=\"terms\"></a>TERMS AND CONDITIONS</h3>\n\n<h4><a name=\"section0\"></a>0. Definitions.</h4>\n\n<p>&quot;This License&quot; refers to version 3 of the GNU Affero General Public\nLicense.</p>\n\n<p>&quot;Copyright&quot; also means copyright-like laws that apply to other kinds\nof works, such as semiconductor masks.</p>\n\n<p>&quot;The Program&quot; refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as &quot;you&quot;.  &quot;Licensees&quot; and\n&quot;recipients&quot; may be individuals or organizations.</p>\n\n<p>To &quot;modify&quot; a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a &quot;modified version&quot; of the\nearlier work or a work &quot;based on&quot; the earlier work.</p>\n\n<p>A &quot;covered work&quot; means either the unmodified Program or a work based\non the Program.</p>\n\n<p>To &quot;propagate&quot; a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.</p>\n\n<p>To &quot;convey&quot; a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.</p>\n\n<p>An interactive user interface displays &quot;Appropriate Legal Notices&quot;\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.</p>\n\n<h4><a name=\"section1\"></a>1. Source Code.</h4>\n\n<p>The &quot;source code&quot; for a work means the preferred form of the work\nfor making modifications to it.  &quot;Object code&quot; means any non-source\nform of a work.</p>\n\n<p>A &quot;Standard Interface&quot; means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.</p>\n\n<p>The &quot;System Libraries&quot; of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n&quot;Major Component&quot;, in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.</p>\n\n<p>The &quot;Corresponding Source&quot; for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.</p>\n\n<p>The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.</p>\n\n<p>The Corresponding Source for a work in source code form is that\nsame work.</p>\n\n<h4><a name=\"section2\"></a>2. Basic Permissions.</h4>\n\n<p>All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.</p>\n\n<p>You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.</p>\n\n<p>Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.</p>\n\n<h4><a name=\"section3\"></a>3. Protecting Users' Legal Rights From Anti-Circumvention Law.</h4>\n\n<p>No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.</p>\n\n<p>When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.</p>\n\n<h4><a name=\"section4\"></a>4. Conveying Verbatim Copies.</h4>\n\n<p>You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.</p>\n\n<p>You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.</p>\n\n<h4><a name=\"section5\"></a>5. Conveying Modified Source Versions.</h4>\n\n<p>You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:</p>\n\n<ul>\n\n<li>a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.</li>\n\n<li>b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    &quot;keep intact all notices&quot;.</li>\n\n<li>c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.</li>\n\n<li>d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.</li>\n\n</ul>\n\n<p>A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n&quot;aggregate&quot; if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.</p>\n\n<h4><a name=\"section6\"></a>6. Conveying Non-Source Forms.</h4>\n\n<p>You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:</p>\n\n<ul>\n\n<li>a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.</li>\n\n<li>b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.</li>\n\n<li>c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.</li>\n\n<li>d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.</li>\n\n<li>e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.</li>\n\n</ul>\n\n<p>A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.</p>\n\n<p>A &quot;User Product&quot; is either (1) a &quot;consumer product&quot;, which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, &quot;normally used&quot; refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.</p>\n\n<p>&quot;Installation Information&quot; for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.</p>\n\n<p>If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).</p>\n\n<p>The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.</p>\n\n<p>Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.</p>\n\n<h4><a name=\"section7\"></a>7. Additional Terms.</h4>\n\n<p>&quot;Additional permissions&quot; are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.</p>\n\n<p>When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.</p>\n\n<p>Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:</p>\n\n<ul>\n\n<li>a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or</li>\n\n<li>b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or</li>\n\n<li>c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or</li>\n\n<li>d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or</li>\n\n<li>e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or</li>\n\n<li>f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.</li>\n\n</ul>\n\n<p>All other non-permissive additional terms are considered &quot;further\nrestrictions&quot; within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further restriction,\nyou may remove that term.  If a license document contains a further\nrestriction but permits relicensing or conveying under this License, you\nmay add to a covered work material governed by the terms of that license\ndocument, provided that the further restriction does not survive such\nrelicensing or conveying.</p>\n\n<p>If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.</p>\n\n<p>Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.</p>\n\n<h4><a name=\"section8\"></a>8. Termination.</h4>\n\n<p>You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).</p>\n\n<p>However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.</p>\n\n<p>Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.</p>\n\n<p>Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.</p>\n\n<h4><a name=\"section9\"></a>9. Acceptance Not Required for Having Copies.</h4>\n\n<p>You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.</p>\n\n<h4><a name=\"section10\"></a>10. Automatic Licensing of Downstream Recipients.</h4>\n\n<p>Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.</p>\n\n<p>An &quot;entity transaction&quot; is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.</p>\n\n<p>You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.</p>\n\n<h4><a name=\"section11\"></a>11. Patents.</h4>\n\n<p>A &quot;contributor&quot; is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's &quot;contributor version&quot;.</p>\n\n<p>A contributor's &quot;essential patent claims&quot; are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, &quot;control&quot; includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.</p>\n\n<p>Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.</p>\n\n<p>In the following three paragraphs, a &quot;patent license&quot; is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To &quot;grant&quot; such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.</p>\n\n<p>If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  &quot;Knowingly relying&quot; means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.</p>\n\n<p>If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.</p>\n\n<p>A patent license is &quot;discriminatory&quot; if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.</p>\n\n<p>Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.</p>\n\n<h4><a name=\"section12\"></a>12. No Surrender of Others' Freedom.</h4>\n\n<p>If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.</p>\n\n<h4><a name=\"section13\"></a>13. Remote Network Interaction; Use with the GNU General Public License.</h4>\n\n<p>Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.</p>\n\n<p>Notwithstanding any other provision of this License, you have permission\nto link or combine any covered work with a work licensed under version 3\nof the GNU General Public License into a single combined work, and to\nconvey the resulting work.  The terms of this License will continue to\napply to the part which is the covered work, but the work with which it is\ncombined will remain governed by version 3 of the GNU General Public\nLicense.</p>\n\n<h4><a name=\"section14\"></a>14. Revised Versions of this License.</h4>\n\n<p>The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new\nversions will be similar in spirit to the present version, but may differ\nin detail to address new problems or concerns.</p>\n\n<p>Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero\nGeneral Public License &quot;or any later version&quot; applies to it, you have\nthe option of following the terms and conditions either of that\nnumbered version or of any later version published by the Free\nSoftware Foundation.  If the Program does not specify a version number\nof the GNU Affero General Public License, you may choose any version\never published by the Free Software Foundation.</p>\n\n<p>If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that\nproxy's public statement of acceptance of a version permanently\nauthorizes you to choose that version for the Program.</p>\n\n<p>Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.</p>\n\n<h4><a name=\"section15\"></a>15. Disclaimer of Warranty.</h4>\n\n<p>THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM &quot;AS IS&quot; WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.</p>\n\n<h4><a name=\"section16\"></a>16. Limitation of Liability.</h4>\n\n<p>IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.</p>\n\n<h4><a name=\"section17\"></a>17. Interpretation of Sections 15 and 16.</h4>\n\n<p>If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.</p>\n\n<p>END OF TERMS AND CONDITIONS</p>\n\n<h3><a name=\"howto\"></a>How to Apply These Terms to Your New Programs</h3>\n\n<p>If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.</p>\n\n<p>To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe &quot;copyright&quot; line and a pointer to where the full notice is found.</p>\n\n<pre>    &lt;one line to give the program's name and a brief idea of what it does.&gt;\n    Copyright (C) &lt;year&gt;  &lt;name of author&gt;\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as\n    published by the Free Software Foundation, either version 3 of the\n    License, or (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see &lt;http://www.gnu.org/licenses/&gt;.\n</pre>\n\n<p>Also add information on how to contact you by electronic and paper mail.</p>\n\n<p>If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a &quot;Source&quot; link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.</p>\n\n<p>You should also get your employer (if you work as a programmer) or school,\nif any, to sign a &quot;copyright disclaimer&quot; for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n&lt;<a href=\"http://www.gnu.org/licenses/\">http://www.gnu.org/licenses/</a>&gt;.</p>\n\n\n"
  },
  {
    "path": "doc/user/copying/by-sa-3.0.html",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Creative Commons\"\n...\n\n<div id=\"deed\" class=\"green\">\n    <div id=\"deed-head\">\n\n      <div id=\"deed-license\">\n        <h2>Attribution-ShareAlike 3.0 United States</h2>\n      </div>\n    </div>\n\n        <h3><em>License</em></h3>\n\n        <p>THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS\n        OF THIS CREATIVE COMMONS PUBLIC LICENSE (\"CCPL\" OR\n        \"LICENSE\"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER\n        APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS\n        AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS\n        PROHIBITED.</p>\n\n        <p>BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU\n        ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE.\n        TO THE EXTENT THIS LICENSE MAY BE CONSIDERED TO BE A\n        CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE\n        IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND\n        CONDITIONS.</p>\n\n        <p><strong>1. Definitions</strong></p>\n\n        <ol type=\"a\">\n          <li><strong>\"Collective Work\"</strong> means a work, such\n          as a periodical issue, anthology or encyclopedia, in\n          which the Work in its entirety in unmodified form, along\n          with one or more other contributions, constituting\n          separate and independent works in themselves, are\n          assembled into a collective whole. A work that\n          constitutes a Collective Work will not be considered a\n          Derivative Work (as defined below) for the purposes of\n          this License.</li>\n\n          <li><strong>\"Creative Commons Compatible\n          License\"</strong> means a license that is listed at\n          http://creativecommons.org/compatiblelicenses that has\n          been approved by Creative Commons as being essentially\n          equivalent to this License, including, at a minimum,\n          because that license: (i) contains terms that have the\n          same purpose, meaning and effect as the License Elements\n          of this License; and, (ii) explicitly permits the\n          relicensing of derivatives of works made available under\n          that license under this License or either a Creative\n          Commons unported license or a Creative Commons\n          jurisdiction license with the same License Elements as\n          this License.</li>\n\n          <li><strong>\"Derivative Work\"</strong> means a work based\n          upon the Work or upon the Work and other pre-existing\n          works, such as a translation, musical arrangement,\n          dramatization, fictionalization, motion picture version,\n          sound recording, art reproduction, abridgment,\n          condensation, or any other form in which the Work may be\n          recast, transformed, or adapted, except that a work that\n          constitutes a Collective Work will not be considered a\n          Derivative Work for the purpose of this License. For the\n          avoidance of doubt, where the Work is a musical\n          composition or sound recording, the synchronization of\n          the Work in timed-relation with a moving image\n          (\"synching\") will be considered a Derivative Work for the\n          purpose of this License.</li>\n\n          <li><strong>\"License Elements\"</strong> means the\n          following high-level license attributes as selected by\n          Licensor and indicated in the title of this License:\n          Attribution, ShareAlike.</li>\n\n          <li><strong>\"Licensor\"</strong> means the individual,\n          individuals, entity or entities that offers the Work\n          under the terms of this License.</li>\n\n          <li><strong>\"Original Author\"</strong> means the\n          individual, individuals, entity or entities who created\n          the Work.</li>\n\n          <li><strong>\"Work\"</strong> means the copyrightable work\n          of authorship offered under the terms of this\n          License.</li>\n\n          <li><strong>\"You\"</strong> means an individual or entity\n          exercising rights under this License who has not\n          previously violated the terms of this License with\n          respect to the Work, or who has received express\n          permission from the Licensor to exercise rights under\n          this License despite a previous violation.</li>\n        </ol>\n\n        <p><strong>2. Fair Use Rights.</strong> Nothing in this\n        license is intended to reduce, limit, or restrict any\n        rights arising from fair use, first sale or other\n        limitations on the exclusive rights of the copyright owner\n        under copyright law or other applicable laws.</p>\n\n        <p><strong>3. License Grant.</strong> Subject to the terms\n        and conditions of this License, Licensor hereby grants You\n        a worldwide, royalty-free, non-exclusive, perpetual (for\n        the duration of the applicable copyright) license to\n        exercise the rights in the Work as stated below:</p>\n\n        <ol type=\"a\">\n          <li>to reproduce the Work, to incorporate the Work into\n          one or more Collective Works, and to reproduce the Work\n          as incorporated in the Collective Works;</li>\n\n          <li>to create and reproduce Derivative Works provided\n          that any such Derivative Work, including any translation\n          in any medium, takes reasonable steps to clearly label,\n          demarcate or otherwise identify that changes were made to\n          the original Work. For example, a translation could be\n          marked \"The original work was translated from English to\n          Spanish,\" or a modification could indicate \"The original\n          work has been modified.\";</li>\n\n          <li>to distribute copies or phonorecords of, display\n          publicly, perform publicly, and perform publicly by means\n          of a digital audio transmission the Work including as\n          incorporated in Collective Works;</li>\n\n          <li>to distribute copies or phonorecords of, display\n          publicly, perform publicly, and perform publicly by means\n          of a digital audio transmission Derivative Works.</li>\n\n          <li>\n            <p>For the avoidance of doubt, where the Work is a\n            musical composition:</p>\n\n            <ol type=\"i\">\n              <li><strong>Performance Royalties Under Blanket\n              Licenses</strong>. Licensor waives the exclusive\n              right to collect, whether individually or, in the\n              event that Licensor is a member of a performance\n              rights society (e.g. ASCAP, BMI, SESAC), via that\n              society, royalties for the public performance or\n              public digital performance (e.g. webcast) of the\n              Work.</li>\n\n              <li><strong>Mechanical Rights and Statutory\n              Royalties</strong>. Licensor waives the exclusive\n              right to collect, whether individually or via a music\n              rights agency or designated agent (e.g. Harry Fox\n              Agency), royalties for any phonorecord You create\n              from the Work (\"cover version\") and distribute,\n              subject to the compulsory license created by 17 USC\n              Section 115 of the US Copyright Act (or the\n              equivalent in other jurisdictions).</li>\n            </ol>\n          </li>\n\n          <li><strong>Webcasting Rights and Statutory\n          Royalties</strong>. For the avoidance of doubt, where the\n          Work is a sound recording, Licensor waives the exclusive\n          right to collect, whether individually or via a\n          performance-rights society (e.g. SoundExchange),\n          royalties for the public digital performance (e.g.\n          webcast) of the Work, subject to the compulsory license\n          created by 17 USC Section 114 of the US Copyright Act (or\n          the equivalent in other jurisdictions).</li>\n        </ol>\n\n        <p>The above rights may be exercised in all media and\n        formats whether now known or hereafter devised. The above\n        rights include the right to make such modifications as are\n        technically necessary to exercise the rights in other media\n        and formats. All rights not expressly granted by Licensor\n        are hereby reserved.</p>\n\n        <p><strong>4. Restrictions.</strong> The license granted in\n        Section 3 above is expressly made subject to and limited by\n        the following restrictions:</p>\n\n        <ol type=\"a\">\n          <li>You may distribute, publicly display, publicly\n          perform, or publicly digitally perform the Work only\n          under the terms of this License, and You must include a\n          copy of, or the Uniform Resource Identifier for, this\n          License with every copy or phonorecord of the Work You\n          distribute, publicly display, publicly perform, or\n          publicly digitally perform. You may not offer or impose\n          any terms on the Work that restrict the terms of this\n          License or the ability of a recipient of the Work to\n          exercise of the rights granted to that recipient under\n          the terms of the License. You may not sublicense the\n          Work. You must keep intact all notices that refer to this\n          License and to the disclaimer of warranties. When You\n          distribute, publicly display, publicly perform, or\n          publicly digitally perform the Work, You may not impose\n          any technological measures on the Work that restrict the\n          ability of a recipient of the Work from You to exercise\n          of the rights granted to that recipient under the terms\n          of the License. This Section 4(a) applies to the Work as\n          incorporated in a Collective Work, but this does not\n          require the Collective Work apart from the Work itself to\n          be made subject to the terms of this License. If You\n          create a Collective Work, upon notice from any Licensor\n          You must, to the extent practicable, remove from the\n          Collective Work any credit as required by Section 4(c),\n          as requested. If You create a Derivative Work, upon\n          notice from any Licensor You must, to the extent\n          practicable, remove from the Derivative Work any credit\n          as required by Section 4(c), as requested.</li>\n\n          <li>You may distribute, publicly display, publicly\n          perform, or publicly digitally perform a Derivative Work\n          only under: (i) the terms of this License; (ii) a later\n          version of this License with the same License Elements as\n          this License; (iii) either the Creative Commons\n          (Unported) license or a Creative Commons jurisdiction\n          license (either this or a later license version) that\n          contains the same License Elements as this License (e.g.\n          Attribution-ShareAlike 3.0 (Unported)); (iv) a Creative\n          Commons Compatible License. If you license the Derivative\n          Work under one of the licenses mentioned in (iv), you\n          must comply with the terms of that license. If you\n          license the Derivative Work under the terms of any of the\n          licenses mentioned in (i), (ii) or (iii) (the \"Applicable\n          License\"), you must comply with the terms of the\n          Applicable License generally and with the following\n          provisions: (I) You must include a copy of, or the\n          Uniform Resource Identifier for, the Applicable License\n          with every copy or phonorecord of each Derivative Work\n          You distribute, publicly display, publicly perform, or\n          publicly digitally perform; (II) You may not offer or\n          impose any terms on the Derivative Works that restrict\n          the terms of the Applicable License or the ability of a\n          recipient of the Work to exercise the rights granted to\n          that recipient under the terms of the Applicable License;\n          (III) You must keep intact all notices that refer to the\n          Applicable License and to the disclaimer of warranties;\n          and, (IV) when You distribute, publicly display, publicly\n          perform, or publicly digitally perform the Work, You may\n          not impose any technological measures on the Derivative\n          Work that restrict the ability of a recipient of the\n          Derivative Work from You to exercise the rights granted\n          to that recipient under the terms of the Applicable\n          License. This Section 4(b) applies to the Derivative Work\n          as incorporated in a Collective Work, but this does not\n          require the Collective Work apart from the Derivative\n          Work itself to be made subject to the terms of the\n          Applicable License.</li>\n\n          <li>If You distribute, publicly display, publicly\n          perform, or publicly digitally perform the Work (as\n          defined in Section 1 above) or any Derivative Works (as\n          defined in Section 1 above) or Collective Works (as\n          defined in Section 1 above), You must, unless a request\n          has been made pursuant to Section 4(a), keep intact all\n          copyright notices for the Work and provide, reasonable to\n          the medium or means You are utilizing: (i) the name of\n          the Original Author (or pseudonym, if applicable) if\n          supplied, and/or (ii) if the Original Author and/or\n          Licensor designate another party or parties (e.g. a\n          sponsor institute, publishing entity, journal) for\n          attribution (\"Attribution Parties\") in Licensor's\n          copyright notice, terms of service or by other reasonable\n          means, the name of such party or parties; the title of\n          the Work if supplied; to the extent reasonably\n          practicable, the Uniform Resource Identifier, if any,\n          that Licensor specifies to be associated with the Work,\n          unless such URI does not refer to the copyright notice or\n          licensing information for the Work; and, consistent with\n          Section 3(b) in the case of a Derivative Work, a credit\n          identifying the use of the Work in the Derivative Work\n          (e.g., \"French translation of the Work by Original\n          Author,\" or \"Screenplay based on original Work by\n          Original Author\"). The credit required by this Section\n          4(c) may be implemented in any reasonable manner;\n          provided, however, that in the case of a Derivative Work\n          or Collective Work, at a minimum such credit will appear,\n          if a credit for all contributing authors of the\n          Derivative Work or Collective Work appears, then as part\n          of these credits and in a manner at least as prominent as\n          the credits for the other contributing authors. For the\n          avoidance of doubt, You may only use the credit required\n          by this Section for the purpose of attribution in the\n          manner set out above and, by exercising Your rights under\n          this License, You may not implicitly or explicitly assert\n          or imply any connection with, sponsorship or endorsement\n          by the Original Author, Licensor and/or Attribution\n          Parties, as appropriate, of You or Your use of the Work,\n          without the separate, express prior written permission of\n          the Original Author, Licensor and/or Attribution\n          Parties.</li>\n        </ol>\n\n        <p><strong>5. Representations, Warranties and\n        Disclaimer</strong></p>\n\n        <p>UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN\n        WRITING, LICENSOR OFFERS THE WORK AS-IS AND ONLY TO THE\n        EXTENT OF ANY RIGHTS HELD IN THE LICENSED WORK BY THE\n        LICENSOR. THE LICENSOR MAKES NO REPRESENTATIONS OR\n        WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS,\n        IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT\n        LIMITATION, WARRANTIES OF TITLE, MARKETABILITY,\n        MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE,\n        NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS,\n        ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR\n        NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE\n        EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT\n        APPLY TO YOU.</p>\n\n        <p><strong>6. Limitation on Liability.</strong> EXCEPT TO\n        THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL\n        LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY\n        SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY\n        DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK,\n        EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF\n        SUCH DAMAGES.</p>\n\n        <p><strong>7. Termination</strong></p>\n\n        <ol type=\"a\">\n          <li>This License and the rights granted hereunder will\n          terminate automatically upon any breach by You of the\n          terms of this License. Individuals or entities who have\n          received Derivative Works or Collective Works from You\n          under this License, however, will not have their licenses\n          terminated provided such individuals or entities remain\n          in full compliance with those licenses. Sections 1, 2, 5,\n          6, 7, and 8 will survive any termination of this\n          License.</li>\n\n          <li>Subject to the above terms and conditions, the\n          license granted here is perpetual (for the duration of\n          the applicable copyright in the Work). Notwithstanding\n          the above, Licensor reserves the right to release the\n          Work under different license terms or to stop\n          distributing the Work at any time; provided, however that\n          any such election will not serve to withdraw this License\n          (or any other license that has been, or is required to\n          be, granted under the terms of this License), and this\n          License will continue in full force and effect unless\n          terminated as stated above.</li>\n        </ol>\n\n        <p><strong>8. Miscellaneous</strong></p>\n\n        <ol type=\"a\">\n          <li>Each time You distribute or publicly digitally\n          perform the Work (as defined in Section 1 above) or a\n          Collective Work (as defined in Section 1 above), the\n          Licensor offers to the recipient a license to the Work on\n          the same terms and conditions as the license granted to\n          You under this License.</li>\n\n          <li>Each time You distribute or publicly digitally\n          perform a Derivative Work, Licensor offers to the\n          recipient a license to the original Work on the same\n          terms and conditions as the license granted to You under\n          this License.</li>\n\n          <li>If any provision of this License is invalid or\n          unenforceable under applicable law, it shall not affect\n          the validity or enforceability of the remainder of the\n          terms of this License, and without further action by the\n          parties to this agreement, such provision shall be\n          reformed to the minimum extent necessary to make such\n          provision valid and enforceable.</li>\n\n          <li>No term or provision of this License shall be deemed\n          waived and no breach consented to unless such waiver or\n          consent shall be in writing and signed by the party to be\n          charged with such waiver or consent.</li>\n\n          <li>This License constitutes the entire agreement between\n          the parties with respect to the Work licensed here. There\n          are no understandings, agreements or representations with\n          respect to the Work not specified here. Licensor shall\n          not be bound by any additional provisions that may appear\n          in any communication from You. This License may not be\n          modified without the mutual written agreement of the\n          Licensor and You.</li>\n        </ol>\n        <!-- BREAKOUT FOR CC NOTICE.  NOT A PART OF THE LICENSE -->\n\n        <blockquote>\n          <h3>Creative Commons Notice</h3>\n\n          <p>Creative Commons is not a party to this License, and\n          makes no warranty whatsoever in connection with the Work.\n          Creative Commons will not be liable to You or any party\n          on any legal theory for any damages whatsoever, including\n          without limitation any general, special, incidental or\n          consequential damages arising in connection to this\n          license. Notwithstanding the foregoing two (2) sentences,\n          if Creative Commons has expressly identified itself as\n          the Licensor hereunder, it shall have all rights and\n          obligations of Licensor.</p>\n\n          <p>Except for the limited purpose of indicating to the\n          public that the Work is licensed under the CCPL, Creative\n          Commons does not authorize the use by either party of the\n          trademark \"Creative Commons\" or any related trademark or\n          logo of Creative Commons without the prior written\n          consent of Creative Commons. Any permitted use will be in\n          compliance with Creative Commons' then-current trademark\n          usage guidelines, as may be published on its website or\n          otherwise made available upon request from time to time.\n          For the avoidance of doubt, this trademark restriction\n          does not form part of this License.</p>\n\n          <p>Creative Commons may be contacted at <a href=\n          \"http://creativecommons.org/\">http://creativecommons.org/</a>.</p>\n        </blockquote>\n      </div>\n    </div>\n\n  </div>\n"
  },
  {
    "path": "doc/user/copying/copying.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Arvados Free Software Licenses\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nEvery source file has @SPDX-License-Identifier@ header that identifies the exact copyright license that applies to that file.\n\nIn general, Arvados server-side components and system administration tools contained in the @services/@, @lib/@, @apps/@, @cmd/@, and @tools/@ directories, including the API Server, Workbench, Keep, and Crunch, are licenced under the \"GNU Affero General Public License version 3\":agpl-3.0.html.\n\nThe Arvados client tools and Software Development Kits contained in the @sdk/@ directory and code samples in the Arvados documentation are licensed under the \"Apache License, Version 2.0\":LICENSE-2.0.html.\n\nThe Arvados Documentation located in the @doc/@ directory is licensed under the \"Creative Commons Attribution-Share Alike 3.0 United States\":by-sa-3.0.html.\n"
  },
  {
    "path": "doc/user/cwl/arvados-vscode-training.html.md.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Developing CWL Workflows with VSCode\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n\nImported from https://github.com/arvados/arvados-vscode-cwl-training\ngit hash: f39d44c1bdb2f82ec8f22ade874ca70544531289\n\n{% endcomment %}\n\nThese lessons give step by step instructions for using Visual Studio\nCode (abbreviated \"vscode\") to develop CWL workflows on Arvados.\n\n1. Set up SSH\n1. Install vscode and necessary extensions, then use vscode to connect to an Arvados shell node for development\n1. Register a workflow, run it on workbench, and view the log\n1. Upload input, run a workflow on it, and view the output\n1. Register a workflow with default inputs\n1. Run a workflow without registering it\n\n## 1. SSH Setup\n\n1. (Windows only) Install Git for Windows [https://git-scm.com/download/win](https://git-scm.com/download/win)\n   1. Choose \"64-bit Git for Windows Setup\".  It does not require admin privileges to install.\n   1. Hit \"Next\" a bunch of times to accept the defaults\n   1. The most important things is that \"install git bash\" and \"install OpenSSH\" are enabled (this is the default).\n   1. At the end of the installation, you can launch tick a box to git bash directly.\n   1. Open \"Git Bash\" (installed in the \"Git\" folder of the start menu)\n1. (All operating systems) Starting from bash shell (on MacOS or Linux you will open \"Terminal\")\n   1. Shell: Run `ssh-keygen`\n      1. Hit enter to save to a default location\n      1. You can choose to protect the key with a password, or just hit enter for no password.\n   1. Shell: Look for a message like `Your public key has been saved\n      in /c/Users/MyUsername/.ssh/id_rsa.pub` (Windows git bash\n      example, on MacOS or Linux this will probably start with `/Users` or `/home`)\n      1. Shell: Run `cat /c/Users/MyUsername/.ssh/id_rsa.pub`\n   1. Shell: Use the pointer to highlight and copy the lines starting\n      with `ssh-rsa …` up to the next blank line.  Right click and\n      select \"Copy\"\n1. Open Arvados workbench 2.  If necessary, go to the user menu and\n   select \"Go to Workbench 2\"\n   1. Workbench: Go to `SSH keys` in the user menu\n   1. Workbench:Click `+Add new ssh key`\n   1. Workbench: Paste the key into `Public key` and enter something for `name`\n   1. Workbench: Go to `Virtual Machines` in the user menu\n   1. Workbench: Highlight and copy the value in in the `Command line` column.\n1. At the git bash command line\n   1. Shell: paste the `ssh shell…` command line you got from workbench.\n   1. Shell: type \"yes\" if it asks `Are you sure you want to continue connecting`.\n   1. Note: it can take up to two minutes for the SSH key to be copied to\n      the shell node.  If you get \"Permission denied\" the first time, wait 60\n      seconds and try again.\n   1. Shell: You should now be logged into the Arvados shell node.\n   1. Shell: Log out by typing `exit`\n\n## 2. VSCode setup\n\n1. Install [Visual Studio Code](https://code.visualstudio.com/) and start it up\n1. Vscode: On the left sidebar, select `Extensions` ![](images/Extensions.png)\n   1. In `Search Extensions in Marketplace` enter \"remote development\".\n   1. Choose and install the \"Remote Development\" extension pack from Microsoft\n   1. In `Search Extensions in Marketplace` enter \"remote-ssh\".\n   1. Choose and install the \"Remote - SSH\" extension pack from Microsoft\n1. Vscode: On the left sidebar, choose `Remote Explorer` ![](images/RemoteExplorer.png)\n   1. At the top of the Remote Explorer panel choose `SSH targets` ![](images/SSHTargets.png)\n   1. Click `Add New` ![](images/AddNew.png)\n   1. Enter the `ssh shell…` command line you used in the previous section, step 1.4.1\n      1. If it asks you `Select SSH configuration file to update` choose the first one in the list.\n   1. Right click the newly added ssh target in the list and select “connect to host in current window`\n   1. If it asks `Select platform of the remote host` select `Linux`.\n1. Vscode: On the left sidebar, go back to `Extensions` ![](images/Extensions.png)\n   1. Search for \"benten\", then look for `CWL (Rabix/Benten)` and click `Install`\n   1. On the information page for `CWL (Rabix/Benten)`\n      1. If you see a warning `Install the extension on 'SSH: ...' to enable` then click the button `Install in SSH: ...`\n   1. You should now see a message `Extension is enabled on 'SSH: ...' and disabled locally.`\n1. Vscode: On the left sidebar, choose `Explorer` ![](images/Explorer.png)\n   1. Select `Clone Repository` and enter [https://github.com/arvados/arvados-vscode-cwl-training](https://github.com/arvados/arvados-vscode-cwl-training), then click `Open`\n   1. If asked `Would you like to open the cloned repository?` choose `Open`\n1. Go to Arvados Workbench\n   1. Workbench: In the user menu, select `Current token`\n   1. Workbench: Click on `Copy to Clipboard`.\n   1. Workbench: You should see a notification `Token copied to clipboard`.\n   1. Go to Vscode\n   1. Vscode: Click on the `Terminal` menu\n   1. Vscode: Click `Run Task…`\n   1. Vscode: Select `Set Arvados Host`\n   1. Vscode: Paste the value of API Host from the Workbench `Get API Token` dialog (found in the User menu) at the prompt\n   1. Vscode: Next, run task `Set Arvados Token`\n   1. Vscode: Paste the value of API Token from the Workbench `Get API Token` dialog\n   1. Vscode: These will create files called `API_HOST` and `API_TOKEN`\n\n## 3. Register & run a workflow\n\n1. Vscode: Click on the `lesson1/main.cwl` file\n   1. Click on the `Terminal` menu\n   1. Click `Run Task…`\n   1. Select `Register or update CWL workflow on Arvados Workbench`\n   1. This will create a file called `WORKFLOW_UUID`\n1. Workbench: Go to `Home Projects`, click `+NEW` and select `New project`\n   1. Enter a name for the project like \"Lesson 1\"\n   1. You should arrive at the panel for the new project\n1. Workbench: With `Lesson 1` selected\n   1. Click on `+NEW` and select `Run a process`\n   1. Select `CWL training lesson 1` from the list and click `Next`\n   1. Enter a name for this run like `First training run`\n   1. Enter a message (under `#main/message`) like \"Hello world\"\n   1. Click `Run process`\n   1. This should take you to a panel showing the workflow run status\n1. Workbench: workflow run status panel\n   1. Wait for the badge in the upper right to say `Completed`\n   1. In the subprocesses tab, click on the `echo` workflow step\n   1. This will take you to the status panel for the `echo` step\n   1. Click the `Logs` tab\n   1. This will take you to the log viewer panel\n   1. In the `Event Type` dropdown (which should show `Main logs` by default) choose `stdout`\n   1. You should see your message\n\n## 4. Working with input and output files\n\n1. Vscode: Click on the `lesson2/main.cwl` file\n   1. Click on the `Terminal` menu\n   1. Click `Run Task…`\n   1. Select `Register or update CWL workflow on Arvados Workbench`\n1. Go to your desktop\n   1. Right click on the desktop, select `New > Text Document`\n   1. Name the file `message`\n   1. Enter a message like \"Hello earth\" and save\n1. Workbench: Go to `+NEW` and select `New project`\n   1. Enter a name for the project like \"Lesson 2\"\n   1. You should arrive at the panel for the new project\n1. Arvados workbench: With `Lesson 2` project selected\n   1. Click on +NEW and select `New collection`\n   1. For Collection Name enter \"my message\"\n   1. Drag and drop `message.txt` into the browser\n   1. Click `Create a collection`\n   1. The file should be uploaded and then you will be on the collection page\n1. Workbench: Select the `Lesson 2` project\n   1. Click on `+NEW` and select `Run a process`\n   1. Select `CWL training lesson 2` from the list and click `Next`\n   1. Enter a name for this run like \"Second training run\"\n   1. Click on `#main/message`\n   1. A selection dialog box will appear\n   1. Navigate to the collection you created in step (4.4.4) and choose `message.txt`\n   1. Click `Run process`\n   1. This should take you to a panel showing the workflow run status\n1. Workbench: workflow run status panel\n   1. Wait for the process to complete\n   1. Click on the dot menu\n   1. Choose `Outputs`\n   1. Right click on `reverse.txt`\n   1. Click on `Open in new tab`\n   1. The results should be visible in a new browser tab.\n\n## 5. Register a workflow with default inputs\n\nThe default value for the `message` parameter will taken from the `lesson3/defaults.yaml` file\n\n1. Vscode: Click on the `lesson3/main.cwl` file\n   1. Click on the `Terminal` menu\n   1. Click `Run Task…`\n   1. Select `Register or update CWL workflow on Arvados Workbench`\n1. Workbench: Go to `Home Projects` then click `+NEW` and select `New project`\n   1. Enter a name for the project like \"Lesson 3\"\n   1. You should arrive at the panel for the new project\n1. Workbench: With `Lesson 3` selected\n   1. Click on `+NEW` and select `Run a process`\n   1. Select `CWL training lesson 3` from the list and click `Next`\n   1. Enter a name for this run like \"Third training run\"\n   1. The `#main/message` parameter will be pre-filled with your default value.  You can choose to change it or use the default.\n   1. Click `Run process`\n   1. This should take you to the status page for this workflow\n   1. The greeting will appear in the `Log` of the `echo` task, which\n      can be found the same way as described earlier in section 3.\n\n## 6. Run a workflow without registering it\n\nThe `message` parameter will be taken from the file `lesson4/main-input.yaml`.  This is useful during development.\n\n1. Workbench: Go to `Home Projects` then click `+NEW` and select `New project`\n   1. Enter a name for the project like \"Lesson 4\"\n   1. You should arrive at the panel for the new project\n   1. Click on `Additional info` in the upper right to expand the `info` panel\n   1. Under `Project UUID` click the `Copy to clipboard` button\n1. Vscode: Select the file `lesson4/main.cwl`\n   1. Click on the `Terminal` menu\n   1. Click `Run Task…`\n   1. Select `Set Arvados project UUID`\n   1. Paste the project UUID from workbench at the prompt\n1. Vscode: Select the file `lesson4/main.cwl`\n   1. Click on the `Terminal` menu\n   1. Click `Run Task…`\n   1. Select `Run CWL workflow on Arvados`\n1. Vscode: In the bottom panel select the `Terminal` tab\n   1. In the upper right corner of the Terminal tab select `Task - Run CWL Workflow` from the drop-down\n   1. Look for logging text like `submitted container_request zzzzz-xvhdp-0123456789abcde`\n   1. Highlight and copy the workflow identifier (this the string containing `-xvhdp-` in the middle)\n   1. The results of this run will appear in the terminal when the run completes.\n1. Workbench: Paste the workflow identifier into the search box\n   1. This will take you to the status page for this workflow\n\n\n## Notes\n\nIf you need to change something about the environment of the user on\nthe remote host (for example, the user has been added to a new unix\ngroup) you need to restart the vscode server that runs on the remote\nhost.  Do this in vscode:\n\nctrl+shift+p: `Remote-SSH: Kill VS Code Server on Host`\n\nThis is because the vscode server remains running on the remote host\neven after you disconnect, so exiting/restarting vscode on the desktop\nhas no effect.\n"
  },
  {
    "path": "doc/user/cwl/bwa-mem/bwa-mem-input-local.yml",
    "content": "#!/usr/bin/env cwltool\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\ncwl:tool: bwa-mem.cwl\nreference:\n  class: File\n  location: 19.fasta.bwt\nread_p1:\n  class: File\n  location: HWI-ST1027_129_D0THKACXX.1_1.fastq\nread_p2:\n  class: File\n  location: HWI-ST1027_129_D0THKACXX.1_2.fastq\ngroup_id: arvados_tutorial\nsample_id: HWI-ST1027_129\nPL: illumina\n"
  },
  {
    "path": "doc/user/cwl/bwa-mem/bwa-mem-input-mixed.yml",
    "content": "#!/usr/bin/env cwl-runner\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# Example input file providing both content addresses and UUIDs.  The\n# collections identified by 'collectionUUID' will be checked that the\n# current content of the collection record matches the content address\n# in the 'location' field.\n\n$namespaces:\n  arv: 'http://arvados.org/cwl#'\n\ncwl:tool: bwa-mem.cwl\nreference:\n  class: File\n  location: keep:2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt\n  arv:collectionUUID: jutro-4zz18-tv416l321i4r01e\nread_p1:\n  class: File\n  location: keep:ae480c5099b81e17267b7445e35b4bc7+180/HWI-ST1027_129_D0THKACXX.1_1.fastq\n  arv:collectionUUID: jutro-4zz18-8k5hsvee0izv2g3\nread_p2:\n  class: File\n  location: keep:ae480c5099b81e17267b7445e35b4bc7+180/HWI-ST1027_129_D0THKACXX.1_2.fastq\n  arv:collectionUUID: jutro-4zz18-8k5hsvee0izv2g3\ngroup_id: arvados_tutorial\nsample_id: HWI-ST1027_129\nPL: illumina\n"
  },
  {
    "path": "doc/user/cwl/bwa-mem/bwa-mem-input-uuids.yml",
    "content": "#!/usr/bin/env cwl-runner\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# Example input file using UUIDs to reference input collections. These\n# will be resolved to content addresses before running the workflow.\n\ncwl:tool: bwa-mem.cwl\nreference:\n  class: File\n  location: keep:jutro-4zz18-tv416l321i4r01e/19.fasta.bwt\nread_p1:\n  class: File\n  location: keep:jutro-4zz18-8k5hsvee0izv2g3/HWI-ST1027_129_D0THKACXX.1_1.fastq\nread_p2:\n  class: File\n  location: keep:jutro-4zz18-8k5hsvee0izv2g3/HWI-ST1027_129_D0THKACXX.1_2.fastq\ngroup_id: arvados_tutorial\nsample_id: HWI-ST1027_129\nPL: illumina\n"
  },
  {
    "path": "doc/user/cwl/bwa-mem/bwa-mem-input.yml",
    "content": "#!/usr/bin/env cwl-runner\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\ncwl:tool: bwa-mem.cwl\nreference:\n  class: File\n  location: keep:2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt\nread_p1:\n  class: File\n  location: keep:ae480c5099b81e17267b7445e35b4bc7+180/HWI-ST1027_129_D0THKACXX.1_1.fastq\nread_p2:\n  class: File\n  location: keep:ae480c5099b81e17267b7445e35b4bc7+180/HWI-ST1027_129_D0THKACXX.1_2.fastq\ngroup_id: arvados_tutorial\nsample_id: HWI-ST1027_129\nPL: illumina\n"
  },
  {
    "path": "doc/user/cwl/bwa-mem/bwa-mem-template.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nreference:\n  class: File\n  location: keep:2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt\nPL: illumina\n"
  },
  {
    "path": "doc/user/cwl/bwa-mem/bwa-mem.cwl",
    "content": "#!/usr/bin/env cwl-runner\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\n\nhints:\n  DockerRequirement:\n    dockerPull: quay.io/biocontainers/bwa:0.7.17--ha92aebf_3\n\nbaseCommand: [bwa, mem]\n\narguments:\n  - {prefix: \"-t\", valueFrom: $(runtime.cores)}\n  - {prefix: \"-R\", valueFrom: '@RG\\\\\\tID:$(inputs.group_id)\\\\\\tPL:$(inputs.PL)\\\\\\tSM:$(inputs.sample_id)'}\n\ninputs:\n  reference:\n    type: File\n    inputBinding:\n      position: 1\n      valueFrom: $(self.dirname)/$(self.nameroot)\n    secondaryFiles:\n      - ^.ann\n      - ^.amb\n      - ^.pac\n      - ^.sa\n    doc: The index files produced by `bwa index`\n  read_p1:\n    type: File\n    inputBinding:\n      position: 2\n    doc: The reads, in fastq format.\n  read_p2:\n    type: File?\n    inputBinding:\n      position: 3\n    doc:  For mate paired reads, the second file (optional).\n  group_id: string\n  sample_id: string\n  PL: string\n\nstdout: $(inputs.read_p1.nameroot).sam\n\noutputs:\n  aligned_sam:\n    type: stdout\n"
  },
  {
    "path": "doc/user/cwl/costanalyzer.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Analyzing workflow cost (cloud only)\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2(#introduction). Introduction\n\nThis tool reports on the data and workflows in an Arvados cluster to help administrators understand growth and costs. It reports what it has access to: any Arvados user can run it to get a report of their own workflows and others they can see. An Arvados administrator can run a report on all data and workflows in the cluster. If you provide credentials for a Prometheus server in your Arvados cluster, the report includes additional information about compute use.\n\n{% include 'notebox_begin' %}\n\nCost information is generally only available when Arvados runs in a cloud environment and @arvados-dispatch-cloud@ is used to dispatch containers. The per node-hour price for each defined InstanceType must be supplied in \"config.yml\":{{site.baseurl}}/admin/config.html.\n\n{% include 'notebox_end' %}\n\n{% include 'tutorial_expectations' %}\n\nh2(#workbench-workflow). Running as a workflow from Workbench\n\n{% include 'branchname' %}\n\nWe provide a CWL workflow to generate this report. It's available as a \"single file in the Arvados source\":https://github.com/arvados/arvados/blob/{{ branchname \n}}/tools/cluster-activity/cluster-activity.cwl. You can register the workflow on your cluster by running:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados-cwl-runner [--project-uuid=UUID] --create-workflow cluster-activity.cwl</span>\n</code></pre>\n</notextile>\n\nThen you can launch the workflow from Workbench. All inputs have documented formats and values.\n\nh2(#cli-workflow). Running as a workflow from the command line\n\nAlternatively, you can run the workflow directly with @arvados-cwl-runner@. Write an input file following this YAML template:\n\n{% codeblock as yaml %}\n# Report start date as a `YYYY-MM-DD` string\nreporting_start: \"YYYY-MM-DD\"\n\n# Report end date as a `YYYY-MM-DD` string. Default today.\n#reporting_end: \"YYYY-MM-DD\"\n\n# The base URL of your Arvados cluster's Prometheus server, like\n# `https://prometheus.arvados.example/`\n#prometheus_host: \"\"\n\n# Prometheus API token\n#prometheus_apikey: \"\"\n\n# Prometheus API username\n#prometheus_user: \"\"\n\n# Prometheus API password\n#prometheus_password: \"\"\n\n# A string with a Python regular expression.\n# Workflows whose name match the expression will be excluded from the report.\n#exclude: \"\"\n\n# A boolean. If true, individual workflow steps will be reported alongside\n# their parent workflows.\ninclude_workflow_steps: false\n{% endcodeblock %}\n\nThen run \"the workflow\":https://github.com/arvados/arvados/blob/{{ branchname }}/tools/cluster-activity/cluster-activity.cwl like this:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados-cwl-runner [--project-uuid=UUID] [options ...] cluster-activity.cwl <strong>YOUR-INPUTS.yml</strong></span>\n</code></pre>\n</notextile>\n\nh2(#running-cli). Running as a command line tool\n\nYou can install a command line tool to generate reports on your own system. Install it with \"pipx\":https://pipx.pypa.io/stable/ like:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">pipx install \"arvados-cluster-activity[prometheus]\"</span>\n</code></pre>\n</notextile>\n\nIf you don't have a Prometheus server or don't want Prometheus support, remove @[prometheus]@ from the command line. Advanced users can install the tool to their own virtualenv or elsewhere.\n\nThe command line tool provides options to control the report generation. These correspond to the workflow inputs. Run the tool with @--help@ for the full list:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv-cluster-activity --help</span>\n</code></pre>\n</notextile>\n\nThe tool gets Prometheus credentials from the @PROMETHEUS_HOST@, @PROMETHEUS_APIKEY@, @PROMETHEUS_USER@, and @PROMETHEUS_PASSWORD@ environment variables. The values follow the format of the workflow inputs above. You can write these environment variables in a dedicated file and load that with the tool's @--prometheus-auth@ option.\n"
  },
  {
    "path": "doc/user/cwl/crunchstat-summary.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Analyzing workflow performance\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'tutorial_expectations' %}\n\n*Note:* Starting from Arvados 2.7.2, these reports are generated automatically by @arvados-cwl-runner@ and can be found as @usage_report.html@ in a container request's log collection.\n\nThe @crunchstat-summary@ tool can be used to analyze workflow and container performance. It can be installed from packages (@apt install python3-crunchstat-summary@ or @dnf install python3-crunchstat-summary@), or in a Python virtualenv (@pip install crunchstat_summary@). @crunchstat-summary@ analyzes the crunchstat lines from the logs of a container or workflow and generates a report in text or html format.\n\nh2(#syntax). Syntax\n\nThe @crunchstat-summary@ tool has a number of command line arguments:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">crunchstat-summary -h</span>\nusage: crunchstat-summary [-h]\n                          [--job UUID | --container UUID | --pipeline-instance UUID | --log-file LOG_FILE]\n                          [--skip-child-jobs] [--format {html,text}]\n                          [--threads THREADS] [--verbose]\n\nSummarize resource usage of an Arvados Crunch job\n\noptional arguments:\n  -h, --help            show this help message and exit\n  --job UUID, --container-request UUID\n                        Look up the specified job or container request and\n                        read its log data from Keep (or from the Arvados event\n                        log, if the job is still running)\n  --container UUID      [Deprecated] Look up the specified container find its\n                        container request and read its log data from Keep (or\n                        from the Arvados event log, if the job is still\n                        running)\n  --pipeline-instance UUID\n                        [Deprecated] Summarize each component of the given\n                        pipeline instance (historical pre-1.4)\n  --log-file LOG_FILE   Read log data from a regular file\n  --skip-child-jobs     Do not include stats from child jobs/containers\n  --format {html,text}  Report format\n  --threads THREADS     Maximum worker threads to run\n  --verbose, -v         Log more information (once for progress, twice for\n                        debug)\n</code></pre>\n</notextile>\n\nWhen @crunchstat-summary@ is given a container or container request uuid for a toplevel workflow runner container, it will generate a report for the whole workflow. If the workflow is big, it can take a long time to generate the report.\n\nh2(#examples). Examples\n\n@crunchstat-summary@ prints to stdout. The html report, in particular, should be redirected to a file and then loaded in a browser.\n\nThe html report can be generated as follows:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">crunchstat-summary --container-request pirca-xvhdp-rs0ef250emtmbj8 --format html > report.html</span>\n</code></pre>\n</notextile>\n\nWhen loaded in a browser:\n\n!(full-width)images/crunchstat-summary-html.png!\n\n<br>\n\nUsing @--format text@ will print detailed usage and summary:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">crunchstat-summary --container-request pirca-xvhdp-rs0ef250emtmbj8 --format text</span>\ncategory\tmetric\ttask_max\ttask_max_rate\tjob_total\nblkio:0:0\tread\t63067755822\t53687091.20\t63067755822\nblkio:0:0\twrite\t64484253320\t16376234.80\t64484253320\ncpu\tcpus\t16\t-\t-\ncpu\tsys\t2147.29\t0.60\t2147.29\ncpu\tuser\t549046.22\t15.99\t549046.22\ncpu\tuser+sys\t551193.51\t16.00\t551193.51\nfuseop:create\tcount\t1\t0.10\t1\nfuseop:create\ttime\t0.01\t0.00\t0.01\nfuseop:destroy\tcount\t0\t0\t0\nfuseop:destroy\ttime\t0\t0\t0.00\nfuseop:flush\tcount\t12\t0.70\t12\nfuseop:flush\ttime\t0.00\t0.00\t0.00\nfuseop:forget\tcount\t0\t0\t0\nfuseop:forget\ttime\t0\t0\t0.00\nfuseop:getattr\tcount\t40\t2.70\t40\nfuseop:getattr\ttime\t0.00\t0.00\t0.00\nfuseop:lookup\tcount\t36\t2.90\t36\nfuseop:lookup\ttime\t0.67\t0.07\t0.67\nfuseop:mkdir\tcount\t0\t0\t0\nfuseop:mkdir\ttime\t0\t0\t0.00\nfuseop:on_event\tcount\t0\t0\t0\nfuseop:on_event\ttime\t0\t0\t0.00\nfuseop:open\tcount\t9\t0.30\t9\nfuseop:open\ttime\t0.00\t0.00\t0.00\nfuseop:opendir\tcount\t0\t0\t0\nfuseop:opendir\ttime\t0\t0\t0.00\nfuseop:read\tcount\t481185\t409.60\t481185\nfuseop:read\ttime\t370.11\t2.14\t370.11\nfuseop:readdir\tcount\t0\t0\t0\nfuseop:readdir\ttime\t0\t0\t0.00\nfuseop:release\tcount\t7\t0.30\t7\nfuseop:release\ttime\t0.00\t0.00\t0.00\nfuseop:rename\tcount\t0\t0\t0\nfuseop:rename\ttime\t0\t0\t0.00\nfuseop:rmdir\tcount\t0\t0\t0\nfuseop:rmdir\ttime\t0\t0\t0.00\nfuseop:setattr\tcount\t0\t0\t0\nfuseop:setattr\ttime\t0\t0\t0.00\nfuseop:statfs\tcount\t0\t0\t0\nfuseop:statfs\ttime\t0\t0\t0.00\nfuseop:unlink\tcount\t0\t0\t0\nfuseop:unlink\ttime\t0\t0\t0.00\nfuseop:write\tcount\t5414406\t1123.00\t5414406\nfuseop:write\ttime\t475.04\t0.11\t475.04\nfuseops\tread\t481185\t409.60\t481185\nfuseops\twrite\t5414406\t1123.00\t5414406\nkeepcache\thit\t961402\t819.20\t961402\nkeepcache\tmiss\t946\t0.90\t946\nkeepcalls\tget\t962348\t820.00\t962348\nkeepcalls\tput\t961\t0.30\t961\nmem\tcache\t22748987392\t-\t-\nmem\tpgmajfault\t0\t-\t0\nmem\trss\t27185491968\t-\t-\nnet:docker0\trx\t0\t-\t0\nnet:docker0\ttx\t0\t-\t0\nnet:docker0\ttx+rx\t0\t-\t0\nnet:ens5\trx\t1100398604\t-\t1100398604\nnet:ens5\ttx\t1445464\t-\t1445464\nnet:ens5\ttx+rx\t1101844068\t-\t1101844068\nnet:keep0\trx\t63086467386\t53687091.20\t63086467386\nnet:keep0\ttx\t64482237590\t20131128.60\t64482237590\nnet:keep0\ttx+rx\t127568704976\t53687091.20\t127568704976\nstatfs\tavailable\t398721179648\t-\t398721179648\nstatfs\ttotal\t400289181696\t-\t400289181696\nstatfs\tused\t1568198656\t0\t1568002048\ntime\telapsed\t34820\t-\t34820\n# Elapsed time: 9h 40m 20s\n# Assigned instance type: m5.4xlarge\n# Instance hourly price: $0.768\n# Max CPU usage in a single interval: 1599.52%\n# Overall CPU usage: 1582.98%\n# Requested CPU cores: 16\n# Instance VCPUs: 16\n# Max memory used: 25926.11MB\n# Requested RAM: 50000.00MB\n# Maximum RAM request for this instance type: 61736.70MB\n# Max network traffic: 127.57GB\n# Max network speed in a single interval: 53.69MB/s\n# Keep cache miss rate: 0.10%\n# Keep cache utilization: 99.97%\n# Temp disk utilization: 0.39%\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/user/cwl/cwl-extensions.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Arvados CWL Extensions\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados provides several extensions to CWL for workflow optimization, site-specific configuration, and to enable access the Arvados API.\n\nTo use Arvados CWL extensions, add the following @$namespaces@ section at the top of your CWL file:\n\n{% codeblock as yaml %}\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\n{% endcodeblock %}\n\nFor portability, most Arvados extensions should go into the @hints@ section of your CWL file.  This makes it possible for your workflows to run other CWL runners that do not recognize Arvados hints.  The difference between @hints@ and @requirements@ is that @hints@ are optional features that can be ignored by other runners and still produce the same output, whereas @requirements@ will fail the workflow if they cannot be fulfilled.  For example, @arv:IntermediateOutput@ should go in @hints@ as it will have no effect on non-Arvados platforms, however if your workflow explicitly accesses the Arvados API and will fail without it, you should put @arv:APIRequirement@ in @requirements@.\n\n* \"RunInSingleContainer\":#RunInSingleContainer\n* \"SeparateRunner\":#SeparateRunner\n* \"RuntimeConstraints\":#RuntimeConstraints\n* \"PartitionRequirement\":#PartitionRequirement\n* \"APIRequirement\":#APIRequirement\n* \"IntermediateOutput\":#IntermediateOutput\n* \"Secrets\":#Secrets\n* \"WorkflowRunnerResources\":#WorkflowRunnerResources\n* \"ClusterTarget\":#ClusterTarget\n* \"OutputStorageClass\":#OutputStorageClass\n* \"ProcessProperties\":#ProcessProperties\n* \"OutputCollectionProperties\":#OutputCollectionProperties\n* \"CUDARequirement\":#CUDARequirement\n* \"ROCmRequirement\":#ROCmRequirement\n* \"UsePreemptible\":#UsePreemptible\n* \"PreemptionBehavior\":#PreemptionBehavior\n* \"OutOfMemoryRetry\":#OutOfMemoryRetry\n* \"PublishPorts\":#PublishPorts\n\n{% codeblock as yaml %}\nhints:\n  arv:RunInSingleContainer: {}\n\n  arv:SeparateRunner:\n    runnerProcessName: $(inputs.sample_id)\n\n  arv:RuntimeConstraints:\n    keep_cache: 123456\n    outputDirType: keep_output_dir\n\n  arv:PartitionRequirement:\n    partition: dev_partition\n\n  arv:APIRequirement: {}\n\n  arv:IntermediateOutput:\n    outputTTL: 3600\n\n  cwltool:Secrets:\n    secrets: [input1, input2]\n\n  arv:WorkflowRunnerResources:\n    ramMin: 2048\n    coresMin: 2\n    keep_cache: 512\n\n  arv:ClusterTarget:\n    cluster_id: clsr1\n    project_uuid: clsr1-j7d0g-qxc4jcji7n4lafx\n\n  arv:OutputStorageClass:\n    intermediateStorageClass: fast_storage\n    finalStorageClass: robust_storage\n\n  arv:ProcessProperties:\n    processProperties:\n      property1: value1\n      property2: $(inputs.value2)\n\n  arv:OutputCollectionProperties:\n    outputProperties:\n      property1: value1\n      property2: $(inputs.value2)\n\n  cwltool:CUDARequirement:\n    cudaVersionMin: \"11.0\"\n    cudaComputeCapability: \"9.0\"\n    cudaDeviceCountMin: 1\n    cudaDeviceCountMax: 1\n    cudaVram: 8000\n\n  arv:ROCmRequirement:\n    rocmDriverVersion: \"6.2\"\n    rocmTarget: [\"gfx1100\", \"gfx1103\"]\n    rocmDeviceCountMin: 1\n    rocmDeviceCountMax: 1\n    rocmVram: 8000\n\n  arv:UsePreemptible:\n    usePreemptible: true\n\n  arv:PreemptionBehavior:\n    resubmitNonPreemptible: true\n\n  arv:OutOfMemoryRetry:\n    memoryRetryMultiplier: 2\n    memoryErrorRegex: \"custom memory error\"\n\n  arv:PublishPorts:\n    publishPorts:\n      \"80\":\n        initialPath: /welcome\n        label: Web Service\n        serviceAccess: public\n{% endcodeblock %}\n\nh2(#RunInSingleContainer). arv:RunInSingleContainer\n\nApply this to a workflow step that runs a subworkflow.  Indicates that all the steps of the subworkflow should run together in a single container and not be scheduled separately.  If you have a sequence of short-running steps (less than 1-2 minutes each) this enables you to avoid scheduling and data transfer overhead by running all the steps together at once.  To use this feature, @cwltool@ must be installed in the container image.\n\nh2(#SeparateRunner). arv:SeparateRunner\n\nApply this to a workflow step that runs a subworkflow.  Indicates that Arvados should launch a new workflow runner to manage that specific subworkflow instance.  If used on a scatter step, each scatter item is launched separately.  Using this option has three benefits:\n\n* Better organization in the \"Subprocesses\" table of the main workflow, including the ability to provide a custom name for the step\n* When re-running a batch that has run before, an entire subworkflow may be reused as a unit, which is faster than determining reuse for each step.\n* Significantly faster submit rate compared to invoking @arvados-cwl-runner@ to launch individual workflow instances separately.\n\nThe disadvantage of this option is that because it does launch an additional workflow runner, that workflow runner consumes more compute resources compared to having all the steps managed by a single runner.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|runnerProcessName|optional string|Name to assign to the subworkflow process.  May be an expression with an input context of the post-scatter workflow step invocation.|\n\nh2(#RuntimeConstraints). arv:RuntimeConstraints\n\nSet Arvados-specific runtime hints.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|keep_cache|int|Size of file data buffer for Keep mount in MiB. Default is 256 MiB. Increase this to reduce cache thrashing in situations such as accessing multiple large (64+ MiB) files at the same time, or performing random access on a large file.|\n|outputDirType|enum|Preferred backing store for output staging.  If not specified, the system may choose which one to use.  One of *local_output_dir* or *keep_output_dir*|\n\n*local_output_dir*: Use regular file system local to the compute node. There must be sufficient local scratch space to store entire output; specify this with @outdirMin@ of @ResourceRequirement@.  Files are batch uploaded to Keep when the process completes.  Most compatible, but upload step can be time consuming for very large files.\n\n*keep_output_dir*: Use writable Keep mount.  Files are streamed to Keep as they are written.  Does not consume local scratch space, but does consume RAM for output buffers (up to 192 MiB per file simultaneously open for writing.)  Best suited to processes which produce sequential output of large files (non-sequential writes may produced fragmented file manifests).  Supports regular files and directories, does not support special files such as symlinks, hard links, named pipes, named sockets, or device nodes.|\n\nh2(#PartitionRequirement). arv:PartitionRequirement\n\nSelect preferred compute partitions on which to run jobs.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|partition|string or array of strings||\n\nh2(#APIRequirement). arv:APIRequirement\n\nFor CWL v1.1 scripts, if a step requires network access but not specifically access to the Arvados API server, prefer the standard feature \"NetworkAccess\":https://www.commonwl.org/v1.1/CommandLineTool.html#NetworkAccess .  In the future, these may be differentiated by whether ARVADOS_API_HOST and ARVADOS_API_TOKEN is injected into the container or not.\n\nIndicates that process wants to access to the Arvados API.  Will be granted network access and have @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ set in the environment.  Tools which rely on the Arvados API being present should put @arv:APIRequirement@ in the @requirements@ section of the tool (rather than @hints@) to indicate that that it is not portable to non-Arvados CWL runners.\n\nUse @arv:APIRequirement@ in @hints@ to enable general (non-Arvados-specific) network access for a tool.\n\nh2(#IntermediateOutput). arv:IntermediateOutput\n\nSpecify desired handling of intermediate output collections.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|outputTTL|int|If the value is greater than zero, consider intermediate output collections to be temporary and should be automatically trashed. Temporary collections will be trashed @outputTTL@ seconds after creation.  A value of zero means intermediate output should be retained indefinitely (this is the default behavior).\nNote: arvados-cwl-runner currently does not take workflow dependencies into account when setting the TTL on an intermediate output collection. If the TTL is too short, it is possible for a collection to be trashed before downstream steps that consume it are started.  The recommended minimum value for TTL is the expected duration of the entire workflow.|\n\nh2(#Secrets). cwltool:Secrets\n\nIndicate that one or more input parameters are \"secret\".  Must be applied at the top level Workflow.  Secret parameters are not stored in keep, are hidden from logs and API responses, and are wiped from the database after the workflow completes.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|secrets|array<string>|Input parameters which are considered \"secret\".  Must be strings.|\n\nh2(#WorkflowRunnerResources). arv:WorkflowRunnerResources\n\nSpecify resource requirements for the workflow runner process (arvados-cwl-runner) that manages a workflow run.  Must be applied to the top level workflow.  Will also be set implicitly when using @--submit-runner-ram@ on the command line along with @--create-workflow@ or @--update-workflow@.  Use this to adjust the runner's allocation if the workflow runner is getting \"out of memory\" exceptions or being killed by the out-of-memory (OOM) killer.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|ramMin|int|RAM, in mebibytes, to reserve for the arvados-cwl-runner process. Default 1 GiB|\n|coresMin|int|Number of cores to reserve to the arvados-cwl-runner process. Default 1 core.|\n|keep_cache|int|Size of collection metadata cache for the workflow runner, in MiB.  Default 256 MiB.  Will be added on to the RAM request when determining node size to request.|\n\nh2(#ClusterTarget). arv:ClusterTarget\n\nSpecify which Arvados cluster should execute a container or subworkflow, and the parent project for the container request.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|cluster_id|string|The five-character alphanumeric cluster id (uuid prefix) where a container or subworkflow will execute.  May be an expression.|\n|project_uuid|string|The uuid of the project which will own container request and output of the container.  May be an expression.|\n\nh2(#OutputStorageClass). arv:OutputStorageClass\n\nSpecify the \"storage class\":{{site.baseurl}}/user/topics/storage-classes.html to use for intermediate and final outputs.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|intermediateStorageClass|string or array of strings|The storage class for output of intermediate steps.  For example, faster \"hot\" storage.|\n|finalStorageClass_uuid|string or array of strings|The storage class for the final output.  |\n\nh2(#ProcessProperties). arv:ProcessProperties\n\nSpecify extra \"properties\":{{site.baseurl}}/api/methods.html#subpropertyfilters that will be set on container requests created by the workflow.  May be set on a Workflow or a CommandLineTool.  Setting custom properties on a container request simplifies queries to find the workflow run later on.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|processProperties|key-value map, or list of objects with the fields {propertyName, propertyValue}|The properties that will be set on the container request.  May include expressions that reference @$(inputs)@ of the current workflow or tool.|\n\nh2(#OutputCollectionProperties). arv:OutputCollectionProperties\n\nSpecify custom \"properties\":{{site.baseurl}}/api/methods.html#subpropertyfilters that will be set on the output collection of the workflow step.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|outputProperties|key-value map, or list of objects with the fields {propertyName, propertyValue}|The properties that will be set on the output collection.  May include expressions that reference @$(inputs)@ of the current workflow or tool.|\n\nh2(#CUDARequirement). cwltool:CUDARequirement\n\nRequest support for Nvidia CUDA GPU acceleration in the container.  Assumes that the CUDA runtime (SDK) is installed in the container, and the host will inject the CUDA driver libraries into the container (equal or later to the version requested).\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|cudaVersionMin|string|Required.  The CUDA SDK version corresponding to the minimum driver version supported by the container (generally, the SDK version 'X.Y' the application was compiled against).|\n|cudaComputeCapability|string|Required.  The minimum CUDA hardware capability (in 'X.Y' format) required by the application's PTX or C++ GPU code (will be JIT compiled for the available hardware).|\n|cudaDeviceCountMin|integer|Minimum number of GPU devices to allocate on a single node. Required.|\n|cudaDeviceCountMax|integer|Maximum number of GPU devices to allocate on a single node. Optional.  If not specified, same as @cudaDeviceCountMin@.|\n|cudaVram|integer|Requested amount of VRAM per device, in mebibytes (2**20)|\n\nh2(#ROCmRequirement). cwltool:ROCmRequirement\n\nRequest support for AMD ROCm GPU acceleration in the container.  Assumes that the ROCm runtime (SDK) is installed in the container, and the host will inject the AMD devices (@/dev/kfd@ and @/dev/dri/renderD*@) container.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|rocmDriverVersion|string|Required.  The ROCm SDK version corresponding to the minimum driver version supported by the container (generally, the SDK version 'X.Y' the application was compiled against).|\n|rocmTarget|array of string|Required.  A list of one or more hardware targets (e.g. gfx1100) corresponding to the GPU architectures supported by the container.  Use @rocminfo@ to determine what hardware targets you have.  See also \"Accelerator and GPU hardware specifications\":https://rocm.docs.amd.com/en/latest/reference/gpu-arch-specs.html (use the column \"LLVM target name\") and \"LLVM AMDGPU backend documentation\":https://llvm.org/docs/AMDGPUUsage.html .|\n|rocmDeviceCountMin|integer|Minimum number of GPU devices to allocate on a single node. Required.|\n|rocmDeviceCountMax|integer|Maximum number of GPU devices to allocate on a single node. Optional.  If not specified, same as @rocmDeviceCountMin@.|\n|rocmVram|integer|Requested amount of VRAM per device, in mebibytes (2**20)|\n\nh2(#UsePreemptible). arv:UsePreemptible\n\nSpecify whether a workflow step should request preemptible (e.g. AWS Spot market) instances.  Such instances are generally cheaper, but can be taken back by the cloud provider at any time (preempted) causing the step to fail.  When this happens, Arvados will automatically re-try the step, up to the configuration value of @Containers.MaxRetryAttempts@ (default 3) times.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|usePreemptible|boolean|Required, true to opt-in to using preemptible instances, false to opt-out.|\n\nh2(#PreemptionBehavior). arv:PreemptionBehavior\n\nThis option determines the behavior when @arvados-cwl-runner@ detects that a workflow step was cancelled because the preemptible (spot market) instance it was running on was reclaimed by the cloud provider.  If 'true', instead of the retry behavior described above in 'UsePreemptible', on the first failure the workflow step will be re-submitted with preemption disabled, so it will be scheduled to run on non-preemptible (on-demand) instances.\n\nWhen preemptible instances are reclaimed, this is a signal that the cloud provider has restricted capacity for low priority preemptible instance.  As a result, the default behavior of turning around and rescheduling or launching on another preemptible instance has higher risk of being preempted a second or third time, spending more time and money but making no progress.  This option provides an alternate fallback behavior, by attempting to run the step on a preemptible instance the first time (saving money), but re-running the step as non-preemptible if the first attempt was preempted (ensuring continued progress).\n\nThis behavior applied to each step individually.  If a step is preempted, then successfully re-run as non-preemptible, it does not affect the behavior of the next step, which will first be launched as preemptible, and so forth.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|resubmitNonPreemptible|boolean|Required.  If true, then when a workflow step is cancelled because the instance was preempted, re-submit the step with preemption disabled.|\n\nh2(#OutOfMemoryRetry). arv:OutOfMemoryRetry\n\nSpecify that when a workflow step appears to have failed because it did not request enough RAM, it should be re-submitted with more RAM.  Out of memory conditions are detected either by the container being unexpectedly killed (exit code 137) or by matching a pattern in the container's output (see @memoryErrorRegex@).  Retrying will increase the base RAM request by the value of @memoryRetryMultiplier@.  For example, if the original RAM request was 10 GiB and the multiplier is 1.5, then it will re-submit with 15 GiB.\n\nContainers are only re-submitted once.  If it fails a second time after increasing RAM, then the worklow step will still fail.\n\nAlso note that expressions that use @$(runtime.ram)@ (such as dynamic command line parameters) are not reevaluated when the container is resubmitted.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|memoryRetryMultiplier|float|Optional, default value is 2.  The retry will multiply the base memory request by this factor to get the retry memory request.|\n|memoryErrorRegex|string|Optional, a custom regex that, if found in the stdout, stderr or crunch-run logging of a program, will trigger a retry with greater RAM.  If not provided, the default pattern matches \"out of memory\" (with or without spaces), \"memory error\" (with or without spaces), \"bad_alloc\" and \"container using over 90% of memory\".|\n\nh2(#PublishPorts). arv:PublishPorts\n\nThis extension submits the container request to Arvados as a service with the specified ports exposed. Refer to the \"container request published ports documentation\":{{ site.baseurl }}/api/methods/container_requests.html#published_ports for background. You will usually want to apply this as a hint on specific tools, or a requirement if the step only makes sense as a service container.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|publishPorts|key-value&nbsp;map|Each key is a string that identifies a port 1–65535 (inclusive) where the container runs a service. Each value follows the schema below.|\n\ntable(table table-bordered table-condensed).\n|_. Value Field |_. Type |_. Description |\n|initialPath|string|Optional. Clients like Workbench will use this path for the user's initial connection to the service.|\n|label|string|A brief descriptive name for this service. Clients like Workbench display this label in the connection interface.|\n|serviceAccess|enum|One of \"public\" or \"private\". If \"private\", Arvados will require clients to provide an @arvados_api_token@ parameter from a user who submitted the workflow to connect to this service port.|\n\nh2. arv:dockerCollectionPDH\n\nThis is an optional extension field appearing on the standard @DockerRequirement@.  It specifies the portable data hash of the Arvados collection containing the Docker image.  If present, it takes precedence over @dockerPull@ or @dockerImageId@.\n\n<pre>\nrequirements:\n  DockerRequirement:\n    dockerPull: \"debian:10\"\n    arv:dockerCollectionPDH: \"feaf1fc916103d7cdab6489e1f8c3a2b+174\"\n</pre>\n\nh1. Deprecated extensions\n\nThe following extensions are deprecated because equivalent features are part of the CWL v1.1 standard.\n\n{% codeblock as yaml %}\nhints:\n  cwltool:LoadListingRequirement:\n    loadListing: shallow_listing\n  arv:ReuseRequirement:\n    enableReuse: false\n  cwltool:TimeLimit:\n    timelimit: 14400\n{% endcodeblock %}\n\nh2. cwltool:LoadListingRequirement\n\nFor CWL v1.1 scripts, this is deprecated in favor of \"loadListing\":https://www.commonwl.org/v1.1/CommandLineTool.html#CommandInputParameter or \"LoadListingRequirement\":https://www.commonwl.org/v1.1/CommandLineTool.html#LoadListingRequirement\n\nIn CWL v1.0 documents, the default behavior for Directory objects is to recursively expand the @listing@ for access by parameter references an expressions.  For directory trees containing many files, this can be expensive in both time and memory usage.  Use @cwltool:LoadListingRequirement@ to change the behavior for expansion of directory listings in the workflow runner.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|loadListing|string|One of @no_listing@, @shallow_listing@, or @deep_listing@|\n\n*no_listing*: Do not expand directory listing at all.  The @listing@ field on the Directory object will be undefined.\n\n*shallow_listing*: Only expand the first level of directory listing.  The @listing@ field on the toplevel Directory object will contain the directory contents, however @listing@ will not be defined on subdirectories.\n\n*deep_listing*: Recursively expand all levels of directory listing.  The @listing@ field will be provided on the toplevel object and all subdirectories.\n\nh2. arv:ReuseRequirement\n\nFor CWL v1.1 scripts, this is deprecated in favor of \"WorkReuse\":https://www.commonwl.org/v1.1/CommandLineTool.html#WorkReuse .\n\nEnable/disable work reuse for current process.  Default true (work reuse enabled).\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|enableReuse|boolean|Enable/disable work reuse for current process.  Default true (work reuse enabled).|\n\nNote that the @enableReuse@ field of the standardized @WorkReuse@ requirement can also take an expression as its value.\n\nh2. cwltool:TimeLimit\n\nFor CWL v1.1 scripts, this is deprecated in favor of \"ToolTimeLimit\":https://www.commonwl.org/v1.1/CommandLineTool.html#ToolTimeLimit\n\nSet an upper limit on the execution time of a CommandLineTool or ExpressionTool.  A tool execution which exceeds the time limit may be preemptively terminated and considered failed.  May also be used by batch systems to make scheduling decisions.\n\ntable(table table-bordered table-condensed).\n|_. Field |_. Type |_. Description |\n|timelimit|int|Execution time limit in seconds. If set to zero, no limit is enforced.|\n"
  },
  {
    "path": "doc/user/cwl/cwl-run-options.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"arvados-cwl-runner options\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"*Command line options*\":#options\n# \"*Specify workflow and output names*\":#names\n# \"*Submit a workflow without waiting for the result*\":#nowait\n# \"*Control a workflow locally*\":#local\n# \"*Automatically delete intermediate outputs*\":#delete\n# \"*Run workflow on a remote federated cluster*\":#federation\n\nh3(#options). Command line options\n\nThe following command line options are available for @arvados-cwl-runner@:\n\ntable(table table-bordered table-condensed).\n|_. Option |_. Description |\n|==--basedir== BASEDIR|     Base directory used to resolve relative references in the input, default to directory of input object file or current directory (if inputs piped/provided on command line).|\n|==--eval-timeout EVAL_TIMEOUT==|Time to wait for a Javascript expression to evaluate before giving an error, default 20s.|\n|==--print-dot==|           Print workflow visualization in graphviz format and exit|\n|==--version==|             Print version and exit|\n|==--validate==|            Validate CWL document only.|\n|==--verbose==|             Default logging|\n|==--quiet==|               Only print warnings and errors.|\n|==--debug==|               Print even more logging|\n|==--metrics==|             Print timing metrics|\n|==--tool-help==|           Print command line help for tool|\n|==--enable-reuse==|        Enable container reuse (default)|\n|==--disable-reuse==|       Disable container reuse|\n|==--project-uuid UUID==|   Project that will own the workflow containers, if not provided, will go to home project.|\n|==--output-name OUTPUT_NAME==|Name to use for collection that stores the final output.|\n|==--output-tags OUTPUT_TAGS==|Tags for the final output collection separated by commas, e.g., =='--output-tags tag0,tag1,tag2'==.|\n|==--ignore-docker-for-reuse==|Ignore Docker image version when deciding whether to reuse past containers.|\n|==--submit==|              Submit workflow to run on Arvados.|\n|==--local==|               Run workflow on local host (submits containers to Arvados).|\n|==--create-template==|     (Deprecated) synonym for --create-workflow.|\n|==--create-workflow==|     Register an Arvados workflow that can be run from Workbench|\n|==--update-workflow== UUID|Update an existing Arvados workflow with the given UUID.|\n|==--wait==|                After submitting workflow runner, wait for completion.|\n|==--no-wait==|             Submit workflow runner and exit.|\n|==--log-timestamps==|      Prefix logging lines with timestamp|\n|==--no-log-timestamps==|   No timestamp on logging lines|\n|==--compute-checksum==|    Compute checksum of contents while collecting outputs|\n|==--submit-runner-ram== SUBMIT_RUNNER_RAM|RAM (in MiB) required for the workflow runner job (default 1024)|\n|==--submit-runner-image== SUBMIT_RUNNER_IMAGE|Docker image for workflow runner job|\n|==--always-submit-runner==|When invoked with --submit --wait, always submit a runner to manage the workflow, even when only running a single CommandLineTool|\n|==--match-submitter-images==|Where Arvados has more than one Docker image of the same name, use image from the Docker instance on the submitting node.|\n|==--submit-request-uuid== UUID|Update and commit to supplied container request instead of creating a new one.|\n|==--submit-runner-cluster== CLUSTER_ID|Submit workflow runner to a remote cluster|\n|==--collection-cache-size== COLLECTION_CACHE_SIZE|Collection cache size (in MiB, default 256).|\n|==--name== NAME|Name to use for workflow execution instance.|\n|==--on-error== {stop,continue}|Desired workflow behavior when a step fails.  One of 'stop' (do not submit any more steps) or 'continue' (may submit other steps that are not downstream from the error). Default is 'continue'.|\n|==--enable-dev==|Enable loading and running development versions of the CWL standards.|\n|==--storage-classes== STORAGE_CLASSES|Specify comma separated list of storage classes to be used when saving final workflow output to Keep.|\n|==--intermediate-storage-classes== INTERMEDIATE_STORAGE_CLASSES|Specify comma separated list of storage classes to be used when saving intermediate workflow output to Keep.|\n|==--intermediate-output-ttl== N|If N > 0, intermediate output collections will be trashed N seconds after creation. Default is 0 (don't trash).|\n|==--priority== PRIORITY|Workflow priority (range 1..1000, higher has precedence over lower)|\n|==--thread-count== THREAD_COUNT|Number of threads to use for job submit and output collection.|\n|==--http-timeout== HTTP_TIMEOUT|API request timeout in seconds. Default is 300 seconds (5 minutes).|\n|==--defer-downloads==|When submitting a workflow, defer downloading HTTP or S3 URLs to launch of the workflow runner container instead of downloading to Keep before submit.|\n|==--enable-aws-credential-capture==|When submitting a workflow that requires AWS credentials, capture them from the local environment for use by the workflow runner container.|\n|==--disable-aws-credential-capture==|Do not capture AWS credentials from the local environment, must use credentials registered with Arvados.|\n|==--s3-public-bucket==|Downloads are from a public bucket, so no AWS credentials are required.|\n|==--use-credential== SELECTED_CREDENTIAL|Name or uuid of a credential registered with Arvados that will be used to fetch external resources.|\n|==--varying-url-params== VARYING_URL_PARAMS|A comma separated list of URL query parameters that should be ignored when storing HTTP URLs in Keep.|\n|==--prefer-cached-downloads==|If a HTTP URL is found in Keep, skip upstream URL freshness check (will not notice if the upstream has changed, but also not error if upstream is unavailable).|\n|==--enable-preemptible==|Use preemptible instances. Control individual steps with arv:UsePreemptible hint.|\n|==--disable-preemptible==|Don't use preemptible instances.|\n|==--enable-resubmit-non-preemptible==|If a workflow step fails due to the instance it is running on being preempted, re-submit the container with the `preemptible` flag disabled. Control individual steps with arv:PreemptionBehavior hint.|\n|==--disable-resubmit-non-preemptible==|Don't resumbit when a preemptible instance is reclaimed.|\n|==--copy-deps==|         Copy dependencies into the destination project.|\n|==--no-copy-deps==|      Leave dependencies where they are.|\n|==--skip-schemas==|      Skip loading of schemas|\n|==--trash-intermediate==|Immediately trash intermediate outputs on workflow success.|\n|==--no-trash-intermediate==|Do not trash intermediate outputs (default).|\n|==--enable-usage-report==|Create usage_report.html with a summary of each step's resource usage.|\n|==--disable-usage-report==|Disable usage report.|\n\nh3(#names). Specify workflow and output names\n\nUse the @--name@ and @--output-name@ options to specify the name of the workflow and name of the output collection.\n\n<notextile>\n<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arvados-cwl-runner --name \"Example bwa run\" --output-name \"Example bwa output\" bwa-mem.cwl bwa-mem-input.yml</span>\narvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624\n2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Upload local files: \"bwa-mem.cwl\"\n2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to zzzzz-4zz18-h7ljh5u76760ww2\n2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job zzzzz-8i9sb-fm2n3b1w0l6bskg\n2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-fm2n3b1w0l6bskg) is Running\n2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-fm2n3b1w0l6bskg) is Complete\n2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success\n{\n    \"aligned_sam\": {\n        \"path\": \"keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam\",\n        \"checksum\": \"sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a\",\n        \"class\": \"File\",\n        \"size\": 30738986\n    }\n}\n</code></pre>\n</notextile>\n\nh3(#nowait). Submit a workflow without waiting for the result\n\nTo submit a workflow and exit immediately, use the @--no-wait@ option.  This will submit the workflow to Arvados, print out the UUID of the job that was submitted to standard output, and exit.\n\n<notextile>\n<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arvados-cwl-runner --no-wait bwa-mem.cwl bwa-mem-input.yml</span>\narvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624\n2016-06-30 15:07:52 arvados.arv-run[12480] INFO: Upload local files: \"bwa-mem.cwl\"\n2016-06-30 15:07:52 arvados.arv-run[12480] INFO: Uploaded to zzzzz-4zz18-eqnfwrow8aysa9q\n2016-06-30 15:07:52 arvados.cwl-runner[12480] INFO: Submitted job zzzzz-8i9sb-fm2n3b1w0l6bskg\nzzzzz-8i9sb-fm2n3b1w0l6bskg\n</code></pre>\n</notextile>\n\nh3(#local). Control a workflow locally\n\nTo run a workflow with local control, use @--local@.  This means that the host where you run @arvados-cwl-runner@ will be responsible for submitting containers, however, the containers themselves will still run on the Arvados cluster.  With @--local@, if you interrupt @arvados-cwl-runner@ or log out, the workflow will be terminated.\n\n<notextile>\n<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arvados-cwl-runner --local bwa-mem.cwl bwa-mem-input.yml</span>\narvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624\n2016-07-01 10:05:19 arvados.cwl-runner[16290] INFO: Pipeline instance zzzzz-d1hrv-92wcu6ldtio74r4\n2016-07-01 10:05:28 arvados.cwl-runner[16290] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-2nzzfbuf9zjrj4g) is Queued\n2016-07-01 10:05:29 arvados.cwl-runner[16290] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-2nzzfbuf9zjrj4g) is Running\n2016-07-01 10:05:45 arvados.cwl-runner[16290] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-2nzzfbuf9zjrj4g) is Complete\n2016-07-01 10:05:46 arvados.cwl-runner[16290] INFO: Overall process status is success\n{\n    \"aligned_sam\": {\n        \"size\": 30738986,\n        \"path\": \"keep:15f56bad0aaa7364819bf14ca2a27c63+88/HWI-ST1027_129_D0THKACXX.1_1.sam\",\n        \"checksum\": \"sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a\",\n        \"class\": \"File\"\n    }\n}\n</code></pre>\n</notextile>\n\nh3(#delete). Automatically delete intermediate outputs\n\nUse the @--intermediate-output-ttl@ and @--trash-intermediate@ options to specify how long intermediate outputs should be kept (in seconds) and whether to trash them immediately upon successful workflow completion.\n\nTemporary collections will be trashed @intermediate-output-ttl@ seconds after creation.  A value of zero (default) means intermediate output should be retained indefinitely.\n\nNote: arvados-cwl-runner currently does not take workflow dependencies into account when setting the TTL on an intermediate output collection. If the TTL is too short, it is possible for a collection to be trashed before downstream steps that consume it are started.  The recommended minimum value for TTL is the expected duration for the entire the workflow.\n\nUsing @--trash-intermediate@ without @--intermediate-output-ttl@ means that intermediate files will be trashed on successful completion, but will remain on workflow failure.\n\nUsing @--intermediate-output-ttl@ without @--trash-intermediate@ means that intermediate files will be trashed only after the TTL expires (regardless of workflow success or failure).\n\nh3(#federation). Run workflow on a remote federated cluster\n\nBy default, the workflow runner will run on the local (home) cluster.  Using @--submit-runner-cluster@ you can specify that the runner should be submitted to a remote federated cluster.  When doing this, @--project-uuid@ should specify a project on that cluster.  Steps making up the workflow will be submitted to the remote federated cluster by default, but the behavior of @arv:ClusterTarget@ is unchanged.  Note: when using this option, any resources that need to be uploaded in order to run the workflow (such as files or Docker images) will be uploaded to the local (home) cluster, and streamed to the federated cluster on demand.\n\nh3(#preemptible). Using preemptible (spot) instances\n\nPreemptible instances typically offer lower cost computation with a tradeoff of lower service guarantees.  If a compute node is preempted, Arvados will restart the computation on a new instance.\n\nIf the sitewide configuration @Containers.AlwaysUsePreemptibleInstances@ is true, workflow steps will always select preemptible instances, regardless of user option.\n\nIf @Containers.AlwaysUsePreemptibleInstances@ is false, you can request preemptible instances for a specific run with the @arvados-cwl-runner --enable-preemptible@ option.\n\nWithin the workflow, you can control whether individual steps should be preemptible with the \"arv:UsePreemptible\":cwl-extensions.html#UsePreemptible hint.\n\nIf a workflow requests preemptible instances with \"arv:UsePreemptible\":cwl-extensions.html#UsePreemptible , but you _do not_ want to use preemptible instances, you can override it for a specific run with the @arvados-cwl-runner --disable-preemptible@ option.\n\nh3(#gpu). Use GPU instances\n\nSee \"cwltool:CUDARequirement\":cwl-extensions.html#CUDARequirement (for Nvidia) and  \"arv:ROCmRequirement\":cwl-extensions.html#ROCmRequirement (for AMD).\n"
  },
  {
    "path": "doc/user/cwl/cwl-runner.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Starting a workflow at the command line\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'what_is_cwl' %}\n\n{% include 'tutorial_expectations' %}\n\nThis tutorial will demonstrate how to submit a workflow at the command line using @arvados-cwl-runner@.\n\n# \"Get the tutorial files\":#get-files\n# \"Submitting a workflow to an Arvados cluster\":#submitting\n# \"Registering a workflow to use in Workbench\":#registering\n# \"Make a workflow file directly executable\":#executable\n\nh2(#get-files). Get the tutorial files\n\nThe tutorial files are located in the documentation section of the Arvados source repository, which can be \"found on GitHub\":https://github.com/arvados/arvados/tree/main/doc/user/cwl/bwa-mem.\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">git clone https://github.com/arvados/arvados</span>\n~$ <span class=\"userinput\">cd arvados/doc/user/cwl/bwa-mem</span>\n</code></pre>\n</notextile>\n\nThe tutorial data is hosted on \"https://playground.arvados.org\":https://playground.arvados.org (also referred to by the identifier *pirca*).  If you are using a different Arvados instance, you may need to copy the data to your own instance.  One way to do this is with \"arv-copy\":{{site.baseurl}}/user/topics/arv-copy.html (this requires signing up for a free playground.arvados.org account).\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-copy --src pirca --dst settings 2463fa9efeb75e099685528b3b9071e0+438</span>\n~$ <span class=\"userinput\">arv-copy --src pirca --dst settings ae480c5099b81e17267b7445e35b4bc7+180</span>\n</code></pre>\n</notextile>\n\nIf you do not wish to create an account on \"https://playground.arvados.org\":https://playground.arvados.org, you may download the files anonymously and upload them to your local Arvados instance:\n\n\"https://collections.pirca.arvadosapi.com/c=2463fa9efeb75e099685528b3b9071e0+438/\":https://collections.pirca.arvadosapi.com/c=2463fa9efeb75e099685528b3b9071e0+438/\n\n\"https://collections.pirca.arvadosapi.com/c=ae480c5099b81e17267b7445e35b4bc7+180/\":https://collections.pirca.arvadosapi.com/c=ae480c5099b81e17267b7445e35b4bc7+180/\n\nh2(#submitting). Submitting a workflow to an Arvados cluster\n\nh3. Submit a workflow and wait for results\n\nUse @arvados-cwl-runner@ to submit CWL workflows to Arvados.  After submitting the job, it will wait for the workflow to complete and print out the final result to standard output.\n\n*Note:* Once submitted, the workflow runs entirely on Arvados, so even if you log out, the workflow will continue to run.  However, if you interrupt @arvados-cwl-runner@ with control-C it will cancel the workflow.\n\n<notextile>\n<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arvados-cwl-runner bwa-mem.cwl bwa-mem-input.yml</span>\narvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624\n2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Upload local files: \"bwa-mem.cwl\"\n2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to zzzzz-4zz18-h7ljh5u76760ww2\n2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job zzzzz-8i9sb-fm2n3b1w0l6bskg\n2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-fm2n3b1w0l6bskg) is Running\n2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-fm2n3b1w0l6bskg) is Complete\n2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success\n{\n    \"aligned_sam\": {\n        \"location\": \"keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam\",\n        \"checksum\": \"sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a\",\n        \"class\": \"File\",\n        \"size\": 30738986\n    }\n}\n</code></pre>\n</notextile>\n\nh3. Referencing files\n\nWhen running a workflow on an Arvados cluster, the input files must be stored in Keep.  There are several ways this can happen.\n\nA URI reference to Keep uses the @keep:@ scheme followed by either the portable data hash or UUID of the collection and then the location of the file inside the collection.  For example, @keep:2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt@ or @keep:zzzzz-4zz18-zzzzzzzzzzzzzzz/19.fasta.bwt@.\n\nIf you reference a file in \"arv-mount\":{{site.baseurl}}/user/tutorials/tutorial-keep-mount-gnu-linux.html, such as @/home/example/keep/by_id/2463fa9efeb75e099685528b3b9071e0+438/19.fasta.bwt@, then @arvados-cwl-runner@ will automatically determine the appropriate Keep URI reference.\n\nIf you reference a local file which is not in @arv-mount@, then @arvados-cwl-runner@ will upload the file to Keep and use the Keep URI reference from the upload.\n\nYou can also execute CWL files that have been uploaded Keep:\n\n<notextile>\n<pre><code>\n~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arv-put --portable-data-hash --name \"bwa-mem.cwl\" bwa-mem.cwl</span>\n2020-08-20 13:40:02 arvados.arv_put[12976] INFO: Collection saved as 'bwa-mem.cwl'\nf141fc27e7cfa7f7b6d208df5e0ee01b+59\n~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arvados-cwl-runner keep:f141fc27e7cfa7f7b6d208df5e0ee01b+59/bwa-mem.cwl bwa-mem-input.yml</span>\narvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624\n2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to zzzzz-4zz18-h7ljh5u76760ww2\n2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job zzzzz-8i9sb-fm2n3b1w0l6bskg\n2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-fm2n3b1w0l6bskg) is Running\n2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-fm2n3b1w0l6bskg) is Complete\n2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success\n{\n    \"aligned_sam\": {\n        \"location\": \"keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam\",\n        \"checksum\": \"sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a\",\n        \"class\": \"File\",\n        \"size\": 30738986\n    }\n}\n</code></pre>\n</notextile>\n\nNote: uploading a workflow file to Keep is _not_ the same as registering the workflow for use in Workbench.  See \"Registering a workflow to use in Workbench\":#registering below.\n\nh3. Work reuse\n\nWorkflows submitted with @arvados-cwl-runner@ will take advantage of Arvados job reuse.  If you submit a workflow which is identical to one that has run before, it will short cut the execution and return the result of the previous run.  This also applies to individual workflow steps.  For example, a two step workflow where the first step has run before will reuse results for first step and only execute the new second step.  You can disable this behavior with @--disable-reuse@.\n\nh3(#docker). Docker images\n\nDocker images referenced by the workflow must be uploaded to Arvados.  This requires @docker@ to be installed and usable by the user running @arvados-cwl-runner@.  If the image is not present in the local Docker instance, @arvados-cwl-runner@ will first attempt to pull the image using @docker pull@, then upload it.\n\nIf there is already a Docker image in Arvados with the same name, it will use the existing image.  In this case, the submitter will not use Docker.\n\nThe @--match-submitter-images@ option will check the id of the image in the local Docker instance and compare it to the id of the image already in Arvados with the same name and tag.  If they are different, it will choose the image matching the local image id, which will be uploaded it if necessary.  This helpful for development, if you locally rebuild the image with the 'latest' tag, the @--match-submitter-images@ will ensure that the newer version is used.\n\nh3(#dependencies). Dependencies\n\nDependencies include collections and Docker images referenced by the workflow.  Dependencies are automatically uploaded by @arvados-cwl-runner@ if they are not present or need to be updated.  When running a workflow, dependencies that already exist somewhere on the Arvados instance (from a previous upload) will not be uploaded or copied, regardless of the project they are located in.  Sometimes this creates problems when sharing a workflow run with others.  In this case, use @--copy-deps@ to indicate that you want all dependencies to be copied into the destination project (specified by @--project-uuid@).\n\nh3. Command line options\n\nSee \"arvados-cwl-runner options\":{{site.baseurl}}/user/cwl/cwl-run-options.html\n\nh2(#registering). Registering a workflow to use in Workbench\n\nUse @--create-workflow@ to register a CWL workflow with Arvados.  Use @--project-uuid@ to upload the workflow to a specific project, along with its dependencies.  You can share the workflow with other Arvados users by sharing that project.  You can run the workflow by clicking the <span class=\"btn btn-sm btn-primary\">+ NEW</span> → <i class=\"fa fa-fw fa-gear\"></i> *Run a process* menu items on the Workbench, and on the command line by UUID.\n\n<notextile>\n<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arvados-cwl-runner --project-uuid zzzzz-j7d0g-p32bi47ogkjke11 --create-workflow bwa-mem.cwl</span>\narvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624\n2016-07-01 12:21:01 arvados.arv-run[15796] INFO: Upload local files: \"bwa-mem.cwl\"\n2016-07-01 12:21:01 arvados.arv-run[15796] INFO: Uploaded to zzzzz-4zz18-7e0hedrmkuyoei3\n2016-07-01 12:21:01 arvados.cwl-runner[15796] INFO: Created template zzzzz-p5p6p-rjleou1dwr167v5\nzzzzz-p5p6p-rjleou1dwr167v5\n</code></pre>\n</notextile>\n\nYou can provide a partial input file to set default values for the workflow input parameters.  You can also use the @--name@ option to set the name of the workflow:\n\n<notextile>\n<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arvados-cwl-runner --name \"My workflow with defaults\" --project-uuid zzzzz-j7d0g-p32bi47ogkjke11 --create-workflow bwa-mem.cwl bwa-mem-template.yml</span>\narvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624\n2016-07-01 14:09:50 arvados.arv-run[3730] INFO: Upload local files: \"bwa-mem.cwl\"\n2016-07-01 14:09:50 arvados.arv-run[3730] INFO: Uploaded to zzzzz-4zz18-0f91qkovk4ml18o\n2016-07-01 14:09:50 arvados.cwl-runner[3730] INFO: Created template zzzzz-p5p6p-0deqe6nuuyqns2i\nzzzzz-p5p6p-zuniv58hn8d0qd8\n</code></pre>\n</notextile>\n\nUse @--update-workflow <uuid>@ to update an existing workflow.\n\n<notextile>\n<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arvados-cwl-runner --update-workflow zzzzz-p5p6p-zuniv58hn8d0qd8 bwa-mem.cwl</span>\narvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624\n2016-07-01 12:21:01 arvados.arv-run[15796] INFO: Upload local files: \"bwa-mem.cwl\"\n2016-07-01 12:21:01 arvados.arv-run[15796] INFO: Uploaded to zzzzz-4zz18-7e0hedrmkuyoei3\n2016-07-01 12:21:01 arvados.cwl-runner[15796] INFO: Created template zzzzz-p5p6p-zuniv58hn8d0qd8\nzzzzz-p5p6p-zuniv58hn8d0qd8\n</code></pre>\n</notextile>\n\nWhen using @--create-workflow@ or @--update-workflow@, the @--copy-deps@ and @--match-submitter-images@ options are enabled by default.\n\nh3. Running registered workflows at the command line\n\nYou can run a registered workflow at the command line by its UUID:\n\n<notextile>\n<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arvados-cwl-runner pirca-7fd4e-3nqbw08vtjl8ybz --help</span>\nINFO /home/peter/work/scripts/venv3/bin/arvados-cwl-runner 2.1.0.dev20200814195416, arvados-python-client 2.1.0.dev20200814195416, cwltool 3.0.20200807132242\nINFO Resolved 'pirca-7fd4e-3nqbw08vtjl8ybz' to 'arvwf:pirca-7fd4e-3nqbw08vtjl8ybz#main'\nusage: pirca-7fd4e-3nqbw08vtjl8ybz [-h] [--PL PL] [--group_id GROUP_ID]\n                                   [--read_p1 READ_P1] [--read_p2 READ_P2]\n                                   [--reference REFERENCE]\n                                   [--sample_id SAMPLE_ID]\n                                   [job_order]\n\npositional arguments:\n  job_order             Job input json file\n\noptional arguments:\n  -h, --help            show this help message and exit\n  --PL PL\n  --group_id GROUP_ID\n  --read_p1 READ_P1     The reads, in fastq format.\n  --read_p2 READ_P2     For mate paired reads, the second file (optional).\n  --reference REFERENCE\n                        The index files produced by `bwa index`\n  --sample_id SAMPLE_ID\n</code></pre>\n</notextile>\n\nh2(#executable). Make a workflow file directly executable\n\nYou can make a workflow file directly executable (@cwl-runner@ should be an alias to @arvados-cwl-runner@) by adding the following line to the top of the file:\n\n<notextile>\n<pre><code>#!/usr/bin/env cwl-runner\n</code></pre>\n</notextile>\n\n<notextile>\n<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">./bwa-mem.cwl bwa-mem-input.yml</span>\narvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624\n2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Upload local files: \"bwa-mem.cwl\"\n2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to zzzzz-4zz18-h7ljh5u76760ww2\n2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job zzzzz-8i9sb-fm2n3b1w0l6bskg\n2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-fm2n3b1w0l6bskg) is Running\n2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-fm2n3b1w0l6bskg) is Complete\n2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success\n{\n    \"aligned_sam\": {\n        \"path\": \"keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam\",\n        \"checksum\": \"sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a\",\n        \"class\": \"File\",\n        \"size\": 30738986\n    }\n}\n</code></pre>\n</notextile>\n\nYou can even make an input file directly executable the same way with the following two lines at the top:\n\n<notextile>\n<pre><code>#!/usr/bin/env cwl-runner\ncwl:tool: <span class=\"userinput\">bwa-mem.cwl</span>\n</code></pre>\n</notextile>\n\n<notextile>\n<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">./bwa-mem-input.yml</span>\narvados-cwl-runner 1.0.20160628195002, arvados-python-client 0.1.20160616015107, cwltool 1.0.20160629140624\n2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Upload local files: \"bwa-mem.cwl\"\n2016-06-30 14:56:36 arvados.arv-run[27002] INFO: Uploaded to zzzzz-4zz18-h7ljh5u76760ww2\n2016-06-30 14:56:40 arvados.cwl-runner[27002] INFO: Submitted job zzzzz-8i9sb-fm2n3b1w0l6bskg\n2016-06-30 14:56:41 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-fm2n3b1w0l6bskg) is Running\n2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Job bwa-mem.cwl (zzzzz-8i9sb-fm2n3b1w0l6bskg) is Complete\n2016-06-30 14:57:12 arvados.cwl-runner[27002] INFO: Overall process status is success\n{\n    \"aligned_sam\": {\n        \"path\": \"keep:54325254b226664960de07b3b9482349+154/HWI-ST1027_129_D0THKACXX.1_1.sam\",\n        \"checksum\": \"sha1$0dc46a3126d0b5d4ce213b5f0e86e2d05a54755a\",\n        \"class\": \"File\",\n        \"size\": 30738986\n    }\n}\n</code></pre>\n</notextile>\n\nh2(#setup). Setting up arvados-cwl-runner\n\nSee \"Arvados CWL Runner\":{{site.baseurl}}/sdk/python/arvados-cwl-runner.html\n"
  },
  {
    "path": "doc/user/cwl/cwl-style.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Guidelines for Writing High-Performance Portable Workflows\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n*Performance*\n# \"Does your application support NVIDIA GPU acceleration?\":#nvidiagpu\n# \"Trying to reduce costs?\":#preemptible\n# \"You have a sequence of short-running steps\":#RunInSingleContainer\n# \"Avoid declaring @InlineJavascriptRequirement@ or @ShellCommandRequirement@\":#avoidExcessRequirements\n# \"Prefer text substitution to Javascript\":#preferTextSubst\n# \"Use @ExpressionTool@ to efficiently rearrange input files\":#expressionTool\n# \"Limit RAM requests to what you really need\":#limitRAM\n# \"Avoid scattering by step by step\":#avoidScatterByStep\n\n*Portability*\n# \"Always provide @DockerRequirement@\":#DockerRequirement\n# \"Build a reusable library of components\":#reusecode\n# \"Supply scripts as input parameters\":#scriptsasinput\n# \"Getting the temporary and output directories\":#tempdirs\n# \"Specifying @ResourceRequirement@\":#ResourceRequirement\n\nh2(#performance). Performance\n\nTo get the best perfomance from your workflows, be aware of the following Arvados features, behaviors, and best practices.\n\nh3(#nvidiagpu). Does your application support NVIDIA GPU acceleration?\n\nUse \"cwltool:CUDARequirement\":cwl-extensions.html#CUDARequirement to request nodes with GPUs.\n\nh3(#preemptible). Trying to reduce costs?\n\nTry \"using preemptible (spot) instances\":cwl-run-options.html#preemptible .\n\nh3(#RunInSingleContainer). You have a sequence of short-running steps\n\nIf you have a sequence of short-running steps (less than 1-2 minutes each), use the Arvados extension \"arv:RunInSingleContainer\":cwl-extensions.html#RunInSingleContainer to avoid scheduling and data transfer overhead by running all the steps together in the same container on the same node.  To use this feature, @cwltool@ must be installed in the container image.  Example:\n\n{% codeblock as yaml %}\nclass: Workflow\ncwlVersion: v1.0\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\ninputs:\n  file: File\noutputs: []\nrequirements:\n  SubworkflowFeatureRequirement: {}\nsteps:\n  subworkflow-with-short-steps:\n    in:\n      file: file\n    out: [out]\n    # This hint indicates that the subworkflow should be bundled and\n    # run in a single container, instead of the normal behavior, which\n    # is to run each step in a separate container.  This greatly\n    # reduces overhead if you have a series of short jobs, without\n    # requiring any changes the CWL definition of the sub workflow.\n    hints:\n      - class: arv:RunInSingleContainer\n    run: subworkflow-with-short-steps.cwl\n{% endcodeblock %}\n\nh3(#avoidExcessRequirements). Avoid declaring @InlineJavascriptRequirement@ or @ShellCommandRequirement@\n\nAvoid declaring @InlineJavascriptRequirement@ or @ShellCommandRequirement@ unless you specifically need them.  Don't include them \"just in case\" because they change the default behavior and may add extra overhead.\n\nh3(#preferTextSubst). Prefer text substitution to Javascript\n\nWhen combining a parameter value with a string, such as adding a filename extension, write @$(inputs.file.basename).ext@ instead of @$(inputs.file.basename + 'ext')@.  The first form is evaluated as a simple text substitution, the second form (using the @+@ operator) is evaluated as an arbitrary Javascript expression and requires that you declare @InlineJavascriptRequirement@.\n\nh3(#expressionTool). Use @ExpressionTool@ to efficiently rearrange input files\n\nUse @ExpressionTool@ to efficiently rearrange input files between steps of a Workflow.  For example, the following expression accepts a directory containing files paired by @_R1_@ and @_R2_@ and produces an array of Directories containing each pair.\n\n{% codeblock as yaml %}\nclass: ExpressionTool\ncwlVersion: v1.0\ninputs:\n  inputdir: Directory\noutputs:\n  out: Directory[]\nrequirements:\n  InlineJavascriptRequirement: {}\nexpression: |\n  ${\n    var samples = {};\n    for (var i = 0; i < inputs.inputdir.listing.length; i++) {\n      var file = inputs.inputdir.listing[i];\n      var groups = file.basename.match(/^(.+)(_R[12]_)(.+)$/);\n      if (groups) {\n        if (!samples[groups[1]]) {\n          samples[groups[1]] = [];\n        }\n        samples[groups[1]].push(file);\n      }\n    }\n    var dirs = [];\n    for (var key in samples) {\n      dirs.push({\"class\": \"Directory\",\n                 \"basename\": key,\n                 \"listing\": [samples[key]]});\n    }\n    return {\"out\": dirs};\n  }\n{% endcodeblock %}\n\nh3(#limitRAM). Limit RAM requests to what you really need\n\nAvailable compute nodes types vary over time and across different cloud providers, so it is important to limit the RAM requirement to what the program actually needs.  However, if you need to target a specific compute node type, see this discussion on \"calculating RAM request and choosing instance type for containers.\":{{site.baseurl}}/api/execution.html#RAM\n\nh3(#avoidScatterByStep). Avoid scattering by step by step\n\nInstead of a scatter step that feeds into another scatter step, prefer to scatter over a subworkflow.\n\nWith the following pattern, @step1@ has to wait for all samples to complete before @step2@ can start computing on any samples.  This means a single long-running sample can prevent the rest of the workflow from moving on:\n\n{% codeblock as yaml %}\ncwlVersion: v1.0\nclass: Workflow\ninputs:\n  inp: File\nsteps:\n  step1:\n    in: {inp: inp}\n    scatter: inp\n    out: [out]\n    run: tool1.cwl\n  step2:\n    in: {inp: step1/inp}\n    scatter: inp\n    out: [out]\n    run: tool2.cwl\n  step3:\n    in: {inp: step2/inp}\n    scatter: inp\n    out: [out]\n    run: tool3.cwl\n{% endcodeblock %}\n\nInstead, scatter over a subworkflow.  In this pattern, a sample can proceed to @step2@ as soon as @step1@ is done, independently of any other samples.\nExample: (note, the subworkflow can also be put in a separate file)\n\n{% codeblock as yaml %}\ncwlVersion: v1.0\nclass: Workflow\nsteps:\n  step1:\n    in: {inp: inp}\n    scatter: inp\n    out: [out]\n    run:\n      class: Workflow\n      inputs:\n        inp: File\n      outputs:\n        out:\n          type: File\n          outputSource: step3/out\n      steps:\n        step1:\n          in: {inp: inp}\n          out: [out]\n          run: tool1.cwl\n        step2:\n          in: {inp: step1/inp}\n          out: [out]\n          run: tool2.cwl\n        step3:\n          in: {inp: step2/inp}\n          out: [out]\n          run: tool3.cwl\n{% endcodeblock %}\n\n\nh2. Portability\n\nTo write workflows that are easy to modify and portable across CWL runners (in the event you need to share your workflow with others), there are several best practices to follow:\n\nh3(#DockerRequirement). Always provide @DockerRequirement@\n\nWorkflows should always provide @DockerRequirement@ in the @hints@ or @requirements@ section.\n\nh3(#reusecode). Build a reusable library of components\n\nShare tool wrappers and subworkflows between projects.  Make use of and contribute to \"community maintained workflows and tools\":https://github.com/common-workflow-library and tool registries such as \"Dockstore\":http://dockstore.org .\n\nh3(#scriptsasinput). Supply scripts as input parameters\n\nCommandLineTools wrapping custom scripts should represent the script as an input parameter with the script file as a default value.  Use @secondaryFiles@ for scripts that consist of multiple files.  For example:\n\n{% codeblock as yaml %}\ncwlVersion: v1.0\nclass: CommandLineTool\nbaseCommand: python\ninputs:\n  script:\n    type: File\n    inputBinding: {position: 1}\n    default:\n      class: File\n      location: bclfastq.py\n      secondaryFiles:\n        - class: File\n          location: helper1.py\n        - class: File\n          location: helper2.py\n  inputfile:\n    type: File\n    inputBinding: {position: 2}\noutputs:\n  out:\n    type: File\n    outputBinding:\n      glob: \"*.fastq\"\n{% endcodeblock %}\n\nh3(#tempdirs). Getting the temporary and output directories\n\nYou can get the designated temporary directory using @$(runtime.tmpdir)@ in your CWL file, or from the @$TMPDIR@ environment variable in your script.\n\nSimilarly, you can get the designated output directory using @$(runtime.outdir)@, or from the @HOME@ environment variable in your script.\n\nh3(#ResourceRequirement). Specifying @ResourceRequirement@\n\nAvoid specifying resources in the @requirements@ section of a @CommandLineTool@, put it in the @hints@ section instead.  This enables you to override the tool resource hint with a workflow step level requirement:\n\n{% codeblock as yaml %}\ncwlVersion: v1.0\nclass: Workflow\ninputs:\n  inp: File\nsteps:\n  step1:\n    in: {inp: inp}\n    out: [out]\n    run: tool1.cwl\n  step2:\n    in: {inp: step1/inp}\n    out: [out]\n    run: tool2.cwl\n    requirements:\n      ResourceRequirement:\n        ramMin: 2000\n        coresMin: 2\n        tmpdirMin: 90000\n{% endcodeblock %}\n\n<a name=\"httpimport\"></a>\n<a name=\"s3import\"></a>\n\nh2. Data import\n\nInformation about running workflows with HTTP/S or S3 URL inputs now has its own page in the user guide: \"Using external data sources in workflows\":{{ site.baseurl }}/user/topics/external-inputs.html\n"
  },
  {
    "path": "doc/user/cwl/cwl-versions.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: CWL version support\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados supports CWL v1.0, v1.1 and v1.2.\n\nh2(#v12). Upgrading your workflows to CWL v1.2\n\nIf you are starting from a CWL v1.0 document, see \"Upgrading your workflows to CWL v1.1\":#v11 below.\n\nIf you are starting from a CWL v1.1 document, you should be able to trivially change @cwlVersion: v1.1@ to @cwlVersion: v1.2@ to be able to take advantage of the new features of v1.2, such as conditional workflow steps.\n\nh2(#v11). Upgrading your workflows to CWL v1.1\n\nCWL v1.1 introduces several features to the standard that were previously available as Arvados extensions.  CWL v1.1 syntax is backwards compatible with v1.0, so you can just change @cwlVersion: v1.0@ to @cwlVersion: v1.1@ and update your script to using the standard features.  On Arvados, there is only one behavior change between CWL v1.0 and v1.1 to be aware of: for performance reasons, Directory listings are no longer loaded by default.  To control loading Directory listings, use \"loadListing\":https://www.commonwl.org/v1.1/CommandLineTool.html#CommandInputParameter or \"LoadListingRequirement\":https://www.commonwl.org/v1.1/CommandLineTool.html#LoadListingRequirement (the extension @cwltool:LoadListingRequirement@ is deprecated.)\n\nIf a step requires network access, use \"NetworkAccess\":https://www.commonwl.org/v1.1/CommandLineTool.html#NetworkAccess instead of the Arvados-specific \"arv:APIRequirement\":cwl-extensions.html#APIRequirement .\n\nTo prevent misbehaving steps from running forever and wasting resources, you can fail the step if it exceeds a certain running time with \"ToolTimeLimit\":https://www.commonwl.org/v1.1/CommandLineTool.html#ToolTimeLimit instead of the deprecated @cwltool:TimeLimit@ .\n\nTo control if an individual step can be reused, use \"WorkReuse\":https://www.commonwl.org/v1.1/CommandLineTool.html#WorkReuse instead of the deprecated @arv:ReuseRequirement@.\n"
  },
  {
    "path": "doc/user/cwl/federated/FileOnCluster.yml",
    "content": "name: FileOnCluster\ntype: record\nfields:\n  file: File\n  cluster: string"
  },
  {
    "path": "doc/user/cwl/federated/colors_to_select.txt",
    "content": "green\nblue\n"
  },
  {
    "path": "doc/user/cwl/federated/extract.cwl",
    "content": "cwlVersion: v1.0\nclass: CommandLineTool\nrequirements:\n  SchemaDefRequirement:\n    types:\n      - $import: FileOnCluster.yml\ninputs:\n  select_column: string\n  select_values: File\n  dataset: 'FileOnCluster.yml#FileOnCluster'\n  extract_py:\n    type: File\n    default:\n      class: File\n      location: extract.py\noutputs:\n  out:\n    type: File\n    outputBinding:\n      glob: extracted.csv\n\narguments: [python, $(inputs.extract_py), $(inputs.select_column), $(inputs.select_values), $(inputs.dataset.file), $(inputs.dataset.cluster)]\n"
  },
  {
    "path": "doc/user/cwl/federated/extract.py",
    "content": "import csv\nimport sys\n\nselect_column = sys.argv[1]\nselect_values = sys.argv[2]\ndataset = sys.argv[3]\ncluster = sys.argv[4]\n\nsv = open(select_values, \"rt\")\nselectvals = [s.strip() for s in sv]\n\nprint(\"selectvals\", selectvals)\n\nds = csv.reader(open(dataset, \"rt\"))\nheader = next(ds)\nprint(\"header is\", header)\ncolumnindex = None\nfor i,v in enumerate(header):\n    if v == select_column:\n        columnindex = i\nif columnindex is None:\n    raise Exception(\"Column %s not found\" % select_column)\n\nprint(\"column index\", columnindex)\n\nex = csv.writer(open(\"extracted.csv\", \"wt\"))\nex.writerow([\"cluster\"]+list(header))\n\nfor row in ds:\n    if row[columnindex] in selectvals:\n        ex.writerow([cluster]+list(row))\n"
  },
  {
    "path": "doc/user/cwl/federated/feddemo.cwl",
    "content": "# Demonstrate Arvados federation features.  This example searches a\n# list of CSV files that are hosted on different Arvados clusters.\n# For each file, send a task to the remote cluster which will scan\n# file and extracts the rows where the column \"select_column\" has one\n# of the values appearing in the \"select_values\" file.  The home\n# cluster then runs a task which pulls the results from the remote\n# clusters and merges the results to produce a final report.\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  # When using Arvados extensions to CWL, must declare the 'arv' namespace\n  arv: \"http://arvados.org/cwl#\"\n\nrequirements:\n  InlineJavascriptRequirement: {}\n  ScatterFeatureRequirement: {}\n  StepInputExpressionRequirement: {}\n\n  DockerRequirement:\n    # Replace this with your own Docker container\n    dockerPull: arvados/jobs\n\n  # Define a record type so we can conveniently associate the input\n  # file and the cluster where the task should run.\n  SchemaDefRequirement:\n    types:\n      - $import: FileOnCluster.yml\n\ninputs:\n  select_column: string\n  select_values: File\n\n  datasets:\n    type:\n      type: array\n      items: FileOnCluster.yml#FileOnCluster\n\n  intermediate_projects: string[]\n\noutputs:\n  # Will produce an output file with the results of the distributed\n  # analysis jobs merged together.\n  joined:\n    type: File\n    outputSource: gather-results/out\n\nsteps:\n  distributed-analysis:\n    in:\n      select_column: select_column\n      select_values: select_values\n      dataset: datasets\n      intermediate_projects: intermediate_projects\n\n    # Scatter over shards, this means creating a parallel job for each\n    # element in the \"shards\" array.  Expressions are evaluated for\n    # each element.\n    scatter: [dataset, intermediate_projects]\n    scatterMethod: dotproduct\n\n    # Specify the cluster target for this task.  This means each\n    # separate scatter task will execute on the cluster that was\n    # specified in the \"cluster\" field.\n    #\n    # Arvados handles streaming data between clusters, for example,\n    # the Docker image containing the code for a particular tool will\n    # be fetched on demand, as long as it is available somewhere in\n    # the federation.\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.dataset.cluster)\n        project_uuid: $(inputs.intermediate_projects)\n\n    out: [out]\n    run: extract.cwl\n\n  # Collect the results of the distributed step and join them into a\n  # single output file.  Arvados handles streaming inputs,\n  # intermediate results, and outputs between clusters on demand.\n  gather-results:\n    in:\n      dataset: distributed-analysis/out\n    out: [out]\n    run: merge.cwl\n"
  },
  {
    "path": "doc/user/cwl/federated/items1.csv",
    "content": "color,item\nblue,ball\nyellow,ball\nred,ball\ngreen,book\npurple,book\nred,book\nyellow,flower\npurple,flower\nred,bicycle\nred,ball\ngreen,picture\nyellow,ball\npurple,flower\nyellow,ball\ngreen,bicycle\norange,book\ngreen,book\norange,picture\nblue,book\norange,car\nyellow,flower\npurple,ball\nblue,book\norange,book\norange,book\nyellow,book\norange,car\nyellow,car\n"
  },
  {
    "path": "doc/user/cwl/federated/items2.csv",
    "content": "color,item\ngreen,bicycle\nred,flower\nblue,bicycle\nyellow,flower\ngreen,ball\nred,book\nred,bicycle\nyellow,ball\nblue,picture\ngreen,book\norange,flower\nblue,ball\norange,car\ngreen,book\nyellow,car\norange,picture\norange,car\nyellow,flower\ngreen,ball\norange,car\npurple,book\ngreen,ball\nred,flower\nblue,car\norange,flower\nblue,book\nblue,bicycle\nred,picture\norange,flower\norange,book\nblue,flower\norange,book\n"
  },
  {
    "path": "doc/user/cwl/federated/items3.csv",
    "content": "color,item\npurple,book\ngreen,book\nred,bicycle\nyellow,book\norange,book\ngreen,car\ngreen,car\nblue,ball\nyellow,bicycle\norange,book\ngreen,bicycle\nblue,flower\nred,bicycle\npurple,bicycle\ngreen,bicycle\norange,ball\nyellow,car\norange,ball\nred,ball\nred,car\ngreen,picture\ngreen,flower\nblue,picture\ngreen,car\nyellow,flower\npurple,flower\ngreen,ball\nyellow,bicycle\norange,bicycle\norange,flower\nyellow,picture\npurple,flower\ngreen,picture\norange,car\norange,picture\nyellow,car\nyellow,picture\npurple,picture\npurple,picture\npurple,flower\n"
  },
  {
    "path": "doc/user/cwl/federated/merge.cwl",
    "content": "cwlVersion: v1.0\nclass: CommandLineTool\nrequirements:\n  SchemaDefRequirement:\n    types:\n      - $import: FileOnCluster.yml\ninputs:\n  dataset:\n    type: File[]\n    inputBinding:\n      position: 1\n  merge_py:\n    type: File\n    default:\n      class: File\n      location: merge.py\noutputs:\n  out:\n    type: File\n    outputBinding:\n      glob: merged.csv\n\narguments: [python, $(inputs.merge_py)]\n"
  },
  {
    "path": "doc/user/cwl/federated/merge.py",
    "content": "import sys\nimport csv\n\nmerged = open(\"merged.csv\", \"wt\")\n\nwroteheader = False\nfor s in sys.argv[1:]:\n    f = open(s, \"rt\")\n    header = next(f)\n    if not wroteheader:\n        merged.write(header)\n        wroteheader = True\n    for l in f:\n        merged.write(l)\n    f.close()\n"
  },
  {
    "path": "doc/user/cwl/federated/shards.yml",
    "content": "select_column: color\nselect_values:\n  class: File\n  location: colors_to_select.txt\n\ndatasets:\n  - cluster: clsr1\n    file:\n      class: File\n      location: keep:0dcf9310e5bf0c07270416d3a0cd6a43+56/items1.csv\n\n  - cluster: clsr2\n    file:\n      class: File\n      location: keep:12707d325a3f4687674b858bd32beae9+56/items2.csv\n\n  - cluster: clsr3\n    file:\n      class: File\n      location: keep:dbff6bb7fc43176527af5eb9dec28871+56/items3.csv\n\nintermediate_projects:\n  - clsr1-j7d0g-qxc4jcji7n4lafx\n  - clsr2-j7d0g-e7r20egb8hlgn53\n  - clsr3-j7d0g-vrl00zoku9spnen\n"
  },
  {
    "path": "doc/user/cwl/federated-workflows.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Federated Multi-Cluster Workflows\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nTo support running analysis on geographically dispersed data (avoiding expensive data transfers by sending the computation to the data), and \"hybrid cloud\" configurations where an on-premise cluster can expand its capabilities by delegating work to a cloud-hosted cluster, Arvados supports federated workflows.  In a federated workflow, different steps of a workflow may execute on different clusters.  Arvados manages data transfer and delegation of credentials, so that all that is required is adding \"arv:ClusterTarget\":cwl-extensions.html#ClusterTarget hints to your existing workflow.\n\n!(full-width)federated-workflow.svg!\n\nFor more information, visit the \"architecture\":{{site.baseurl}}/architecture/federation.html and \"admin\":{{site.baseurl}}/admin/federation.html sections about Arvados federation.\n\nh2. Get the example files\n\nThe tutorial files are located in the \"documentation section of the Arvados source repository:\":https://github.com/arvados/arvados/tree/main/doc/user/cwl/federated or \"see below\":#fed-example\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">git clone https://github.com/arvados/arvados</span>\n~$ <span class=\"userinput\">cd arvados/doc/user/cwl/federated</span>\n</code></pre>\n</notextile>\n\nh2. Run example\n\n{% include 'notebox_begin' %}\n\nAt this time, remote steps of a workflow on Workbench are not displayed.  As a workaround, you can find the UUIDs of the remote steps in the live logs of the workflow runner (the \"Logs\" tab).  You may visit the remote cluster's workbench and enter the UUID into the search box to view the details of the remote step.  This will be fixed in a future version of workbench.\n\n{% include 'notebox_end' %}\n\nRun it like any other workflow:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arvados-cwl-runner feddemo.cwl shards.cwl</span>\n</code></pre>\n</notextile>\n\nYou can also \"run a workflow on a remote federated cluster\":cwl-run-options.html#federation .\n\nh2(#fed-example). Federated scatter/gather example\n\nIn this following example, an analysis task is executed on three different clusters with different data, then the results are combined to produce the final output.\n\n{% codeblock as yaml %}\n{% include 'federated_cwl' %}\n{% endcodeblock %}\n\nExample input document:\n\n{% codeblock as yaml %}\n{% include 'shards_yml' %}\n{% endcodeblock %}\n"
  },
  {
    "path": "doc/user/cwl/rnaseq-cwl-training.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Getting Started with CWL\"\n\nno_nav_left: true\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nnotextile. <iframe src=\"https://doc.arvados.org/rnaseq-cwl-training\" style=\"width:100%; height:100%; border:none\" />\n"
  },
  {
    "path": "doc/user/debugging/container-shell-access.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Debugging workflows - shell access\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'tutorial_expectations' %}\n\n{% include 'notebox_begin' %}\n\nTo use this feature, your Arvados installation must be configured to allow container shell access. See \"the install guide\":{{site.baseurl}}/install/container-shell-access.html for more information.\n\n{% include 'notebox_end' %}\n\nThe @arvados-client@ program can be used to connect to a container in a running workflow. It can be installed from packages (@apt install arvados-client@ or @dnf install arvados-client@). The @arvados-client shell@ command provides an ssh connection into a running container.\n\nh2(#syntax). Syntax\n\nThe @arvados-client shell@ tool has the following syntax:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arvados-client shell -h</span>\narvados-client shell: open an interactive shell on a running container.\n\nUsage: arvados-client shell [options] [username@]container-uuid [ssh-options] [remote-command [args...]]\n\nOptions:\n  -detach-keys string\n      set detach key sequence, as in docker-attach(1) (default \"ctrl-],ctrl-]\")\n\n</code></pre>\n</notextile>\n\nThe @arvados-client shell@ command calls the ssh binary on your system to make the connection. Everything after _[username@]container-uuid_ is passed through to your OpenSSH client.\n\nh2(#Examples). Examples\n\nConnect to a running container, using the container request UUID:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arvados-client shell ce8i5-xvhdp-e6wnujfslyyqn4b</span>\nroot@0f13dcd755fa:~#\n</code></pre>\n</notextile>\n\nThe container UUID also works:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arvados-client shell ce8i5-dz642-h1cl0sa62d4i430</span>\nroot@0f13dcd755fa:~#\n</code></pre>\n</notextile>\n\nSSH port forwarding is supported:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arvados-client shell ce8i5-dz642-h1cl0sa62d4i430 -L8888:localhost:80</span>\nroot@0f13dcd755fa:~# nc -l -p 80\n</code></pre>\n</notextile>\n\nAnd then, connecting to port 8888 locally:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">echo hello | nc localhost 8888</span>\n</code></pre>\n</notextile>\n\nWhich appears on the other end:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arvados-client shell ce8i5-dz642-h1cl0sa62d4i430 -L8888:localhost:80</span>\nroot@0f13dcd755fa:~# nc -l -p 80\nhello\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/user/getting_started/check-environment.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Checking your environment\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nFirst, log into an Arvados VM instance (instructions for \"Webshell\":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or \"Unix\":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or \"Windows\":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login) or install the Arvados \"Command line SDK\":{{site.baseurl}}/sdk/cli/install.html and \"Python SDK\":{{site.baseurl}}/sdk/python/sdk-python.html on your workstation.\n\nCheck that you are able to access the Arvados API server using @arv user current@.  If it is able to access the API server, it will print out information about your account:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv user current</span>\n{\n \"kind\":\"arvados#user\",\n \"etag\":\"8u0xwb9f3otb2xx9hto4wyo03\",\n \"uuid\":\"zzzzz-tpzed-92d3kxnimy3d4e8\",\n \"owner_uuid\":\"zzzzz-tpzed-000000000000000\",\n \"created_at\":\"2013-12-02T17:05:47Z\",\n \"modified_by_user_uuid\":\"zzzzz-tpzed-23iddeohxta2r59\",\n \"modified_at\":\"2013-12-02T17:07:08Z\",\n \"updated_at\":\"2013-12-05T19:51:08Z\",\n \"email\":\"you@example.com\",\n \"full_name\":\"Example User\",\n \"first_name\":\"Example\",\n \"last_name\":\"User\",\n \"identity_url\":\"\",\n \"is_active\": true,\n \"is_admin\": false,\n \"prefs\":{}\n}\n</code></pre>\n</notextile>\n\nHowever, if you receive the following message:\n\nbc. ARVADOS_API_HOST and ARVADOS_API_TOKEN need to be defined as environment variables\n\nfollow the instructions for \"getting an API token,\":{{site.baseurl}}/user/reference/api-tokens.html and try @arv user current@ again.\n"
  },
  {
    "path": "doc/user/getting_started/community.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Arvados Community and Getting Help\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2. On the web\n\nThe Arvados Free Sofware project page is located at \"https://arvados.org\":https://arvados.org.  The \"Arvados GitHub project\":https://github.com/arvados/arvados is a collaborative site for improving Arvados.\n\nh2. Chat\n\nThe \"arvados/community\":https://gitter.im/arvados/community channel at \"gitter.im\":https://gitter.im is available for live discussion and support.\n\nh2. Bug tracking\n\nIf you think you have found a bug, or would like to make a feature request, check the \"Arvados issue tracker\":https://github.com/arvados/arvados/issues to see if has already been reported or \"add a new issue.\":https://github.com/arvados/arvados/issues/new\n"
  },
  {
    "path": "doc/user/getting_started/setup-cli.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Getting started at the command line\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nMany operations in Arvados can be performed using either the web Workbench or through command line tools.  Some operations can only be done using the command line.\n\nTo use the command line tools, you can either log into an Arvados virtual machine where those tools are pre-installed, or install the Arvados tools on your own system.\n\nh2. Option 1: Using an Arvados virtual machine\n\nThis is the command line interface we recommend for most day-to-day work, because the tools are all preinstalled and preconfigured for you. You can log in to any virtual machine where you have permission by using:\n\n* \"the Webshell client\":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html accessible through Arvados Workbench\n* \"Unix SSH clients\":{{site.baseurl}}/user/getting_started/ssh-access-unix.html\n* \"Windows SSH clients\":{{site.baseurl}}/user/getting_started/ssh-access-windows.html\n\nh2. Option 2: Installing Arvados tools on your own system\n\nThis option gives you more flexibility in your work, but takes more time to set up.\n\nh3. Install client tools on Red Hat, AlmaLinux, and Rocky Linux\n\n{% assign packages_to_install = \"arvados-client python3-arvados-python-client python3-arvados-cwl-runner python3-arvados-fuse python3-crunchstat-summary\" %}\n{% include 'setup_redhat_repo' %}\n\nProceed to build and install the Arvados CLI tools:\n\n<notextile>\n<pre><code># <span class=\"userinput\">dnf module enable ruby:3.1</span>\n# <span class=\"userinput\">dnf install ruby ruby-devel gcc-c++ make redhat-rpm-config glibc-devel glibc-headers curl-devel openssl-devel zlib-devel</span>\n# <span class=\"userinput\">gem install arvados-cli</span>\n</code></pre>\n</notextile>\n\nh3. Install client tools on Debian and Ubuntu\n\n{% include 'setup_debian_repo' %}\n\nProceed to build and install the Arvados CLI tools:\n\n<notextile>\n<pre><code># <span class=\"userinput\">apt install ruby ruby-dev gcc g++ make libc-dev libcurl4-openssl-dev zlib1g-dev</span>\n# <span class=\"userinput\">gem install arvados-cli</span>\n</code></pre>\n</notextile>\n\nh3. Proceed to configuration\n\nOnce you have the command line tools installed, proceed to \"getting an API token\":{{site.baseurl}}/user/reference/api-tokens.html.\n"
  },
  {
    "path": "doc/user/getting_started/ssh-access-unix.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Accessing an Arvados VM with SSH - Unix Environments\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis document is for accessing an Arvados VM using SSH keys in Unix-like environments (Linux, macOS, Cygwin, Windows Subsystem for Linux). If you would like to access VM through your browser, please visit the \"Accessing an Arvados VM with Webshell\":vm-login-with-webshell.html page. If you are using a Windows environment, please visit the \"Accessing an Arvados VM with SSH - Windows Environments\":ssh-access-windows.html page.\n\n{% include 'ssh_intro' %}\n\nh1(#gettingkey). Getting your SSH key\n\nh3(#unix). Generate a key using ssh-keygen\n\nStart by opening a terminal window.  Check if you have an existing public key:\n\nnotextile. <pre><code>$ <span class=\"userinput\">ls ~/.ssh/id_rsa.pub</span></code></pre>\n\nIf the file @id_rsa.pub@ exists, then you may use your existing key.  Copy the contents of @~/.ssh/id_rsa.pub@ onto the clipboard (this is your public key).  You can skip the rest of this section and proceed by \"adding your key to the Arvados Workbench.\":#workbench\n\nIf there is no file @~/.ssh/id_rsa.pub@, you must generate a new key.  Use @ssh-keygen@ to do this:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">ssh-keygen -t rsa -C \"you@example.com\"</span>\nGenerating public/private rsa key pair.\nEnter file in which to save the key (/home/example/.ssh/id_rsa):\nEnter passphrase (empty for no passphrase):\nEnter same passphrase again:\n</code></pre>\n</notextile>\n\n* @-t@ specifies the key type (must be \"rsa\")\n* @-C@ specifies a comment (to remember which account the key is associated with)\n\nWe strongly recommend that you protect your key with a passphrase.  This means that when the key is used, you will be required to enter the passphrase.  However, unlike logging into remote system using a password, the passphrase is never sent over the network; it is only used to decrypt your private key locally.\n\nDisplay the contents of @~/.ssh/id_rsa.pub@ (this is your public key) using @cat@, and then copy it onto the clipboard. The content of the public key may look similar to the following example:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">cat ~/.ssh/id_rsa.pub</span>\nssh-rsa AAAAB3NzaC1ycEDoNotUseExampleKeyDoNotUseExampleKeyDoNotUseExampleKeyDoNotUse9lmzkpBq983bQradKGT3LuKda9QOGe8MatI6wzSrJLSGhHm3hk6D8OWWUG4SneuCtKIk2bH0pgBj1G29+uzDIez90WzfCTZKbz4RcVQmPkowSSUAQDwb0ffwvRDhCgcJ1loT1wQAJzqJmljQ7xEYaCOIMqnfYE0lX7B3MSvCV6Ie2rWL33YecLp48LVtqiCOZU4XRyO8RSDFRFLVW+mjkLirwtDHZCRtORScaIEN0jw51p+T+9X5iA9QH/Mn+xlgk7fCgH+JtpBj808N/Qds2Gpff+Kb6ulUrVVfMK6L you@example.com\n</code></pre>\n</notextile>\n\n* The above is a specimen that cannot be used as a valid public key.\n\nNow you can set up @ssh-agent@ (next) or proceed with \"adding your key to the Arvados Workbench.\":#workbench\n\nh3. Set up ssh-agent (optional)\n\nIf you find you are entering your passphrase frequently, you can use @ssh-agent@ to manage your credentials.  Use @ssh-add -l@ to test if you already have ssh-agent running:\n\nnotextile. <pre><code>$ <span class=\"userinput\">ssh-add -l</span></code></pre>\n\nIf you get the error \"_Could not open a connection to your authentication agent_\", you will need to run @ssh-agent@ with the following command:\n\nnotextile. <pre><code>$ <span class=\"userinput\">eval \"$(ssh-agent -s)\"</span></code></pre>\n\n@ssh-agent -s@ runs an agent process in the background to hold your SSH credentials, and it prints out the values of environment variables @SSH_AUTH_SOCK@ and @SSH_AGENT_PID@.  By applying the shell builtin @eval@ to this output, as we show here using the shell command-substitution syntax, we set those variables in the current shell environment. In this way, subsequent invocations of @ssh@ in this shell session will be able to access the agent process for the credentials without asking you each time.\n\nAfter running @ssh-agent@, or if @ssh-add -l@ prints \"_The agent has no identities_\", add your private key to the SSH agent using the following command.  The passphrase to decrypt the key is the same one used to protect the key when it was created with @ssh-keygen@:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">ssh-add</span>\nEnter passphrase for /home/example/.ssh/id_rsa:\nIdentity added: /home/example/.ssh/id_rsa (/home/example/.ssh/id_rsa)\n</code></pre>\n</notextile>\n\nWhen everything is set up, @ssh-add -l@ should yield output that looks like this:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">ssh-add -l</span>\n2048 eb:fa:15:f2:44:26:95:58:37:37:f4:aa:ff:ee:c2:85 you@example.com (RSA)\n</code></pre>\n</notextile>\n\n{% include 'ssh_addkey' %}\n\nh3. Connecting directly\n\nIf the VM is available on the public Internet (or you are on the same private network as the VM), you can connect directly with @ssh@.  You can copy-and-paste the text from the *Command line* column (see the screenshot above) directly into a shell session.\n\nUse the following example command to connect, as the user \"_you_\" to the VM instance at the hostname \"_shell.ClusterID.example.com_\".  Replace *<code>you@shell.ClusterID.example.com</code>* at the end of the following command with your actual *login* and *hostname* from Workbench.\n\nnotextile. <pre><code>$ <span class=\"userinput\">ssh <b>you@shell.ClusterID.example.com</b></span></code></pre>\n\nh3. Connecting through switchyard\n\nSome Arvados installations use \"switchyard\" to isolate shell VMs from the public Internet.  In such cases, you cannot log in directly to virtual machines over the public Internet.  Instead, you log into a \"switchyard\" server and then tell the switchyard which virtual machine you want to connect to.\n\nUse the following example command to connect to the _shell_ VM instance as _you_.  Replace *<code>you@shell</code>* at the end of the following command with your *login* and *hostname* from Workbench:\n\nnotextile. <pre><code>$ <span class=\"userinput\">ssh -o \"ProxyCommand ssh -p2222 turnout@switchyard.ClusterID.example.com -x -a <b>shell</b>\" -x <b>you@shell</b></span></code></pre>\n\nThis command does several things at once.\n\n* @-o \"ProxyCommand ...\"@ configures SSH to run the specified command to create a proxy and route your connection through it.\n* @-p2222@ specifies that the switchyard is running on non-standard port 2222.\n* <code>turnout@switchyard.{{ site.arvados_api_host }}</code> specifies the user (@turnout@) and hostname (@switchyard.{{ site.arvados_api_host }}@) of the switchyard server that will proxy our connection to the VM.\n* @-x@ tells SSH not to forward your X session to the switchyard.\n* @-a@ tells SSH not to forward your ssh-agent credentials to the switchyard.\n* *@shell@* is the host name of the VM that we want to connect to.  In summary, the string inside the quotation marks is sent to the switchyard server, as if it were an SSH command, and the switchyard server connects to the VM on our behalf.\n* After the @ProxyCommand@ section, we repeat @-x@ to disable X session forwarding to the virtual machine.\n* Finally, *<code>you@shell</code>* specifies your login name and repeats the hostname of the VM.  The username can be found in the *logins* column in the VMs Workbench page, discussed in the previous section.\n\nYou should now be able to log into the Arvados VM and \"check your environment.\":check-environment.html\n\nh4. Configuration (recommended)\n\nThe command line above is cumbersome, but you can configure SSH to remember many of these settings.  Add this text to the file @.ssh/config@ in your home directory (create a new file if @.ssh/config@ doesn't exist):\n\n<notextile>\n<pre><code class=\"userinput\">Host *.{{ site.arvados_cluster_uuid }}\n  TCPKeepAlive yes\n  ServerAliveInterval 60\n  ProxyCommand ssh -p2222 turnout@switchyard.{{ site.arvados_api_host }} -x -a $SSH_PROXY_FLAGS %h\n  User <b>you</b>\n</code></pre>\n</notextile>\n\nThis will recognize any host ending in \".{{ site.arvados_cluster_uuid }}\" and automatically apply the proxy, user and forwarding settings from the configuration file, allowing you to log in with a much simpler command:\n\nnotextile. <pre><code>$ <span class=\"userinput\">ssh <b>shell</b>.{{ site.arvados_cluster_uuid }}</span></code></pre>\n\nYou should now be able to log into the Arvados VM and \"check your environment.\":check-environment.html\n"
  },
  {
    "path": "doc/user/getting_started/ssh-access-windows.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Accessing an Arvados VM with SSH - Windows Environments\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis document is for accessing an Arvados VM using SSH keys in Windows environments using PuTTY.  If you would like to use to access VM through your browser, please visit the \"Accessing an Arvados VM with Webshell\":vm-login-with-webshell.html page.  If you are using a Unix-like environment (Linux, macOS, Cygwin, or Windows Subsystem for Linux), please visit the \"Accessing an Arvados VM with SSH - Unix Environments\":ssh-access-unix.html page.\n\n{% include 'ssh_intro' %}\n\nh1(#gettingkey). Getting your SSH key\n\n(Note: If you are using the SSH client that comes with \"Cygwin\":http://cygwin.com or Windows Subsystem for Linux (WSL) please use instructions found in the \"Accessing an Arvados VM with SSH - Unix Environments\":ssh-access-unix.html page.)\n\nWe will be using PuTTY to connect to Arvados. \"PuTTY\":http://www.chiark.greenend.org.uk/~sgtatham/putty/ is a free (MIT-licensed) Win32 Telnet and SSH client. PuTTY includes all the tools a Windows user needs to create private keys and make SSH connections to your virtual machines in the Arvados Cloud.\n\nYou can \"download PuTTY from its Web site\":http://www.chiark.greenend.org.uk/~sgtatham/putty/.  Note that you should download the installer or .zip file with all of the PuTTY tools (PuTTYtel is not required).\n\nIf you downloaded the zip file, extract it to the location you wish to install the PuTTY applications. This document assumes that you installed PuTTY in the default directory under @C:\\Program Files\\@ or @C:\\Program Files (x86)\\@ (if you are using a 64 bit operating system).\n\nh3. Step 1 - Adding PuTTY to the PATH\n\n# After downloading PuTTY and installing it, you should have a PuTTY folder in @C:\\Program Files\\@ or @C:\\Program Files (x86)\\@ (if you are using a 64 bit operating system).\n# Open the Control Panel.\n# Select _Advanced System Settings_, and choose _Environment Variables_.\nIf you are using newer systems like Windows 10, you may use the following to open _Advanced System Settings_. Open Control Panel. Click on _System and Security_. Click on _System_. Click on _Advanced system settings_ and choose _Environment Variables..._\n# Under system variables, find and edit @PATH@.\n# If you installed PuTTY in @C:\\Program Files\\PuTTY\\@, add the following to the end of PATH:\n<code>;C:\\Program Files\\PuTTY</code>\nIf you installed PuTTY in @C:\\Program Files (x86)\\PuTTY\\@, add the following to the end of PATH:\n<code>;C:\\Program Files (x86)\\PuTTY</code>\n# Click through the OKs to close all the dialogs you’ve opened.\n\nh3. Step 2 - Creating a Public Key\n\n# Start PuTTYgen from the Start Menu or the folder where it was installed.\n# At the bottom of the window, make sure the ‘Number of bits in a generated key’ field is set to 4096.\n# Click Generate and follow the instructions to generate a key.\n# Click the _Save public key_ button.\n# Click the _Save private key_ button (we recommend using a strong passphrase).\n# Select the text of the Public Key and copy it to the clipboard.\n\nh3. Step 3 - Set up Pageant\n\nPageant is a PuTTY utility that manages your private keys so is not necessary to enter your private key passphrase every time you make a new SSH connection.\n\n# Start Pageant from the Start Menu or the folder where it was installed.\n# Pageant will now be running in the system tray. Click the Pageant icon to configure.\n# Choose _Add Key_ and add the private key which you created in the previous step.\n\n{% include 'ssh_addkey' %}\n\nh3. Initial configuration\n\nh4. Connecting directly\n\n# Open PuTTY from the Start Menu.\n# On the Session screen set the Host Name (or IP address) to “shell.ClusterID.example.com”, which is the hostname listed in the _Virtual Machines_ page.\n# On the Session screen set the Port to “22”.\n# On the Connection %(rarr)&rarr;% Data screen set the Auto-login username to the username listed in the *Login name* column on the Arvados Workbench Virtual machines_ page.\n# Return to the Session screen. In the Saved Sessions box, enter a name for this configuration and click Save.\n\nh4. Connecting through switchyard\n\n# Open PuTTY from the Start Menu.\n# On the Session screen set the Host Name (or IP address) to “shell”, which is the hostname listed in the _Virtual Machines_ page.\n# On the Session screen set the Port to “22”.\n# On the Connection %(rarr)&rarr;% Data screen set the Auto-login username to the username listed in the *Login name* column on the Arvados Workbench Virtual machines_ page.\n# On the Connection %(rarr)&rarr;% Proxy screen set the Proxy Type to “Local”.\n# On the Connection %(rarr)&rarr;% Proxy screen in the “Telnet command, or local proxy command” box enter:\n<code>plink -P 2222 turnout@switchyard.{{ site.arvados_api_host }} %host</code>\nMake sure there is no newline at the end of the text entry.\n# Return to the Session screen. In the Saved Sessions box, enter a name for this configuration and click Save.\n\n_Note: We recommend you do not delete the “Default” Saved Session._\n\nh3. Connecting to the VM\n\n# Open PuTTY from the Start Menu.\n# Click on the Saved Session name you created in the previous section.\n# Click Load to load those saved session settings.\n# Click Open to open the SSH window at the command prompt. You will now be logged into your virtual machine.\n\n_Note_: If you see a hung PuTTY terminal window with no further action: open a new _Command Prompt_ window using the Windows -> Start menu and type <code>plink -P 2222 turnout@switchyard.{{ site.arvados_api_host }} shell</code> in it. Please make sure to replace *shell* with the hostname listed in the _Virtual Machines_ page. Hit enter and type _y_ when prompted to cache the session state. Go back and start PuTTY session using the start menu button.\n\nYou should now be able to log into the Arvados VM and \"check your environment.\":check-environment.html\n"
  },
  {
    "path": "doc/user/getting_started/vm-login-with-webshell.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Accessing an Arvados VM with Webshell\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis document describes how to access an Arvados VM with Webshell from Workbench.\n\nh2(#webshell). Access VM using webshell\n\nWebshell gives you access to an arvados virtual machine from your browser with no additional setup.\n\n{% include 'notebox_begin' %}\nSome Arvados clusters may not have webshell set up.  If you do not see a \"Log in\" button or \"web shell\" column, you will have to follow the \"Unix\":ssh-access-unix.html or \"Windows\":ssh-access-windows.html @ssh@ instructions.\n{% include 'notebox_end' %}\n\nIn the Arvados Workbench, click on the dropdown menu icon <i class=\"fa fa-lg fa-user\"></i> in the upper right corner of the top navigation menu to access the _Account Management_ menu, and click on the menu item *Shell access* to see the list of virtual machines you can access.  If you do not have access to any virtual machines,  please click on <span class=\"btn btn-sm btn-primary\">SEND REQUEST FOR SHELL ACCESS</span> (if present) or contact your system administrator.  For the Arvados Playground, this is \"info@curii.com\":mailto:info@curii.com .\n\nEach row in the Shell access panel lists the hostname of the VM, along with a <span class=\"btn btn-sm btn-default\" style=\"background-color: #e0e0e0\">Log in as [your name]</span> button under the column \"*Web shell*\". Clicking on this button will open up a webshell terminal for you in a new browser tab and log you in.\n\n!{width: 100%;}{{ site.baseurl }}/images/vm-access-with-webshell.png!\n\nYou are now ready to work in your Arvados VM.\n"
  },
  {
    "path": "doc/user/getting_started/workbench.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Accessing Arvados Workbench\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'notebox_begin' %}\nThis guide covers modern Arvados Workbench web application, which may be referred to as \"Workbench 2\" to distinguish it from the previous Arvados Workbench web application (\"Workbench 1\").  Documentation for the classic Workbench can be found in \"older versions of the user guide\":https://doc.arvados.org/v2.6/user/getting_started/workbench.html .  See also \"Workbench 2 migration\":{{site.baseurl}}/user/topics/workbench-migration.html for more information.\n{% include 'notebox_end' %}\n\nYou can access the Arvados Workbench used in this guide using this link:\n\n<a href=\"{{site.arvados_workbench_host}}/\" target=\"_blank\">{{site.arvados_workbench_host}}</a>\n\nIf you are using a different Arvados instance replace @{{ site.arvados_workbench_host }}@ with your private instance in all of the examples in this guide.\n\nh2. Playground\n\nCurii operates a public demonstration instance of Arvados called the Arvados Playground, which can be found at <a href=\"https://playground.arvados.org\" target=\"_blank\">https://playground.arvados.org</a> .  Some examples in this guide involve getting data from the Playground instance.\n\nh2. Logging in\n\nYou will be asked to log in.  Arvados uses only your name and email address for identification, and will never access any personal information.  If you are accessing Arvados for the first time, the Workbench may indicate your account status is *New / inactive*.  If this is the case, contact the administrator of the Arvados instance to request activation of your account.\n\nOnce your account is active, logging in to the Workbench will present you with an overview of your Home Projects.  You are now ready to \"upload data\":{{ site.baseurl }}/user/tutorials/tutorial-keep.html or \"run your first workflow.\":{{ site.baseurl }}/user/tutorials/tutorial-workflow-workbench.html\n\n!{width: 100%;}{{ site.baseurl }}/images/workbench-first-page.png!\n"
  },
  {
    "path": "doc/user/index.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Welcome to Arvados&trade;!\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados is an \"open source\":copying/copying.html platform for managing, processing, and sharing genomic and other large scientific and biomedical data.  With Arvados, bioinformaticians run and scale compute-intensive workflows, developers create biomedical applications, and IT administrators manage large compute and storage resources.\n\nThis guide provides a reference for using Arvados to solve scientific big data problems.\n\nThe examples in this guide use the Arvados instance located at <a href=\"{{site.arvados_workbench_host}}/\" target=\"_blank\">{{site.arvados_workbench_host}}</a>.  If you are using a different Arvados instance replace @{{ site.arvados_workbench_host }}@ with your private instance in all of the examples in this guide.\n\nh2. Typographic conventions\n\nThis manual uses the following typographic conventions:\n\n<notextile>\n<ul>\n<li>Code blocks which are set aside from the text indicate user input to the system.  Commands that should be entered into a Unix shell are indicated by the directory where you should  enter the command ('~' indicates your home directory) followed by '$', followed by the highlighted <span class=\"userinput\">command to enter</span> (do not enter the '$'), and possibly followed by example command output in black.  For example, the following block indicates that you should type <code>ls foo.*</code> while in your home directory and the expected output will be \"foo.input\" and \"foo.output\".\n<pre><code>~$ <span class=\"userinput\">ls foo.*</span>\nfoo.input foo.output\n</code></pre>\n</li>\n\n<li>Code blocks inline with text emphasize specific <code>programs</code>, <code>files</code>, or <code>options</code> that are being discussed.</li>\n<li>Bold text emphasizes <b>specific items</b> to review on Arvados Workbench pages.</li>\n<li>A sequence of steps separated by right arrows (<span class=\"rarr\">&rarr;</span>) indicate a path the user should follow through the Arvados Workbench.  The steps indicate a menu, hyperlink, column name, field name, or other label on the page that guide the user where to look or click.\n</li>\n</ul>\n</notextile>\n"
  },
  {
    "path": "doc/user/reference/api-tokens.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Getting an API token\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe Arvados API token is a secret key that enables the Arvados command line tools to authenticate themselves.\n\nAccess the Arvados Workbench using this link: \"{{site.arvados_workbench_host}}/\":{{site.arvados_workbench_host}}/  (Replace the hostname portion with the hostname of your local Arvados instance if necessary.)\n\nOpen a shell on the system where you want to use the Arvados client. This may be your local workstation, or an Arvados virtual machine accessed with \"Webshell\":{{site.baseurl}}/user/getting_started/vm-login-with-webshell.html or SSH (instructions for \"Unix\":{{site.baseurl}}/user/getting_started/ssh-access-unix.html#login or \"Windows\":{{site.baseurl}}/user/getting_started/ssh-access-windows.html#login).\n\nIn the Arvados Workbench, click on the dropdown menu icon <span class=\"fa fa-lg fa-user\"></span> in the upper right corner of the top navigation menu to access the _Account Management_ menu. Then, in the pop-up menu, click on the menu item *Get API token*. This will open a dialog box that lists your current token and the instructions for setting up your environment.\n\nh2. Setting environment variables\n\nIn the dialog box opened after clicking on the *Get API token* menu item, there is a sequence of commands you may copy and paste directly into the shell.  It will look something as the following.\n\nbc. HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'\nexport ARVADOS_API_TOKEN=2jv9346o396exampledonotuseexampledonotuseexes7j1ld\nexport ARVADOS_API_HOST={{ site.arvados_api_host }}\nunset ARVADOS_API_HOST_INSECURE\n\n* The @export@ command puts a local shell variable into the environment that will be inherited by child processes such as the @arv@ client.\n\nh2. settings.conf\n\nArvados tools will also look for the authentication information in @~/.config/arvados/settings.conf@. If you have already put the variables into the environment following the instructions above, you can use these commands to create an Arvados configuration file:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">cat >~/.config/arvados/settings.conf &lt;&lt;EOF\nARVADOS_API_HOST=$ARVADOS_API_HOST\nARVADOS_API_TOKEN=$ARVADOS_API_TOKEN\nEOF\n</span></code></pre>\n</notextile>\n\n{% include 'notebox_begin' %}\nThis will overwrite the file @~/.config/arvados/settings.conf@.\n\nArvados tools written in Python (most notably the @arv keep@ commands, @arv copy@, and @arv-mount@) search for configuration files following the XDG Base Directory Specification. This is uncommon, but if you have customized the @XDG_CONFIG_HOME@ environment variable, you may need to add @$HOME/.config@ to the @XDG_CONFIG_DIRS@ envirnoment variable to have all the tools find the same configuration.\n{% include 'notebox_end' %}\n\nh2. .bashrc\n\nAlternately, you may add the definitions of @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ to the @~/.bashrc@ file on the system where you intend to use the Arvados client.  If you have already put the variables into the environment following the instructions above, you can use the commands below to append to your @~/.bashrc@, which tells Bash to export them as environment variables in newly-started interactive shell sessions:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">echo \"export ARVADOS_API_HOST=$ARVADOS_API_HOST\" >> ~/.bashrc</span>\n$ <span class=\"userinput\">echo \"export ARVADOS_API_TOKEN=$ARVADOS_API_TOKEN\" >> ~/.bashrc</span>\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/user/reference/cookbook.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Arvados SDK Examples\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nCode snippets for perform various tasks with the API are \"documented in the SDK section\":{{site.baseurl}}/sdk/python/cookbook.html .\n"
  },
  {
    "path": "doc/user/topics/arv-copy.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Using arv-copy\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis tutorial describes how to copy Arvados objects from one cluster to another by using @arv-copy@.\n\n{% include 'tutorial_expectations' %}\n\nh2. arv-copy\n\n@arv-copy@ allows users to copy collections, workflow definitions and projects from one cluster to another.  You can also use @arv-copy@ to import resources from HTTP URLs into Keep.\n\nFor projects, @arv-copy@ will copy all the collections workflow definitions owned by the project, and recursively copy subprojects.\n\nFor workflow definitions, @arv-copy@ will recursively go through the workflow and copy all associated dependencies (input collections and Docker images).\n\nFor example, let's copy from the <a href=\"https://playground.arvados.org/\">Arvados Playground</a>, also known as *pirca*, to *dstcl*. The names *pirca* and *dstcl* are interchangable with any cluster ID. You can find the cluster ID from the prefix of the UUID of the object you want to copy. For example, in <notextile><code><strong>zzzzz</strong>-4zz18-tci4vn4fa95w0zx</code></notextile>, the cluster name is *zzzzz*.\n\nIn order to communicate with both clusters, you must create custom configuration files for each cluster.  The \"Getting an API token\":{{site.baseurl}}/user/reference/api-tokens.html page describes how to get a token and create a configuration file.  However, instead of creating the default @~/.config/arvados/settings.conf@ you need two configuration files, one for each cluster, with filenames in the format of <notextile><code><strong>ClusterID</strong>.conf</code></notextile>. For this example, follow these steps:\n\n{% include 'notebox_begin' %}\n@arv-copy@ searches for configuration files following the XDG Base Directory Specification. This is uncommon, but if you have customized the @XDG_CONFIG_HOME@ environment variable, save both configuration files under @$XDG_CONFIG_HOME/arvados/@ instead of the default @~/.config/arvados/@ shown below.\n{% include 'notebox_end' %}\n\n# Open the \"Arvados Playground Workbench\":https://playground.arvados.org.\n# On the system where you'll run @arv-copy@, start a new file named @~/.config/arvados/pirca.conf@ in your editor.\n# In Workbench, open the user menu in the upper right, and select \"Get API token.\"\n# In the Workbench \"Get API Token\" dialog, under the \"API Host\" header, copy the value to your clipboard using the button.\n# In your editor, write the text @ARVADOS_API_HOST=@, then paste the \"API Host\" value you copied in the previous step, and start a new line.\n# In the Workbench \"Get API Token\" dialog, under the \"API Token\" header, copy the value to your clipboard using the button.\n# In your editor, write the text @ARVADOS_API_TOKEN=@, then paste the \"API Token\" value you copied in the previous step, and start a new line.\n# Review your work. In your editor, @pirca.conf@ should look like this, with a different value for @ARVADOS_API_TOKEN@:\n  <pre><code>ARVADOS_API_HOST=pirca.arvadosapi.com\nARVADOS_API_TOKEN=v2/jutro-gj3su-12345abcde67890/abcdefghijklmnopqrstuvwxyz1234567890\n</code></pre> If it looks right, save and close the file.\n# Open Workbench for your destination cluster *dstcl*.\n# On the system where you'll run @arv-copy@, start a new file named <notextile><code>~/.config/arvados/<b>dstcl</b>.conf</code></notextile> in your editor. Replace *@dstcl@* in the filename with the actual cluster ID of your destination cluster.\n# Repeat steps 3-8 to create a settings file with credentials for *dsctl*.\n\nh3. How to copy a collection\n\nFirst, determine the UUID or portable data hash of the collection you want to copy from the source cluster. The UUID can be copied with the \"Copy UUID\" toolbar button, found on the collection details panel, or from the URL bar (the part after @collections/...@)\n\nNow copy the collection from *pirca* to *dstcl*. We will use the UUID @pirca-4zz18-xa0i7qjide8td5d@ as an example. You can find this collection on <a href=\"https://playground.arvados.org/collections/pirca-4zz18-xa0i7qjide8td5d\">playground.arvados.org</a>.  Because the UUID starts with @pirca@, it will infer that the source cluster is @pirca@.\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-copy --dst dstcl pirca-4zz18-xa0i7qjide8td5d</span>\npirca-4zz18-xa0i7qjide8td5d: 6.1M / 6.1M 100.0%\narvados.arv-copy[1234] INFO: Success: created copy with uuid dstcl-4zz18-xxxxxxxxxxxxxxx\n</code></pre>\n</notextile>\n\nYou can also copy by content address.  In this case, the content address does not include a specific cluster id, and you need to include the source cluster with @--src@.\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-copy --src pirca --dst dstcl 2463fa9efeb75e099685528b3b9071e0+438</span>\n2463fa9efeb75e099685528b3b9071e0+438: 6.1M / 6.1M 100.0%\narvados.arv-copy[1234] INFO: Success: created copy with uuid dstcl-4zz18-xxxxxxxxxxxxxxx\n</code></pre>\n</notextile>\n\nThe output of arv-copy displays the UUID of the collection generated in the destination cluster. By default, the output is placed in your home project in the destination cluster. If you want to place your collection in an existing project, you can specify the project you want it to be in using the tag @--project-uuid@ followed by the project UUID.\n\nFor example, this will copy the collection to project @dstcl-j7d0g-a894213ukjhal12@ in the destination cluster.  It will infer the destination cluster from the project UUID.  Since it starts with @dstcl@, it will infer that the destination cluster is @dstcl@.\n\n<notextile> <pre><code>~$ <span class=\"userinput\">arv-copy --project-uuid dstcl-j7d0g-a894213ukjhal12 pirca-4zz18-xa0i7qjide8td5d</span>\n</code></pre>\n</notextile>\n\nAdditionally, if you need to specify the storage classes where to save the copied data on the destination cluster, you can do that by using the @--storage-classes LIST@ argument, where @LIST@ is a comma-separated list of storage class names.\n\nh3. How to copy a workflow\n\nCopying workflows requires @arvados-cwl-runner@ to be available in your @$PATH@.\n\nWe will use the UUID @jutro-7fd4e-mkmmq53m1ze6apx@ as an example workflow.\n\nArv-copy will infer the source cluster is @jutro@ from the object UUID, and destination cluster is @pirca@ from @--project-uuid@.\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-copy --project-uuid pirca-j7d0g-ecak8knpefz8ere jutro-7fd4e-mkmmq53m1ze6apx</span>\nae480c5099b81e17267b7445e35b4bc7+180: 23M / 23M 100.0%\n2463fa9efeb75e099685528b3b9071e0+438: 156M / 156M 100.0%\njutro-4zz18-vvvqlops0a0kpdl: 94M / 94M 100.0%\n2020-08-19 17:04:13 arvados.arv-copy[4789] INFO:\n2020-08-19 17:04:13 arvados.arv-copy[4789] INFO: Success: created copy with uuid pirca-7fd4e-s0tw9rfbkpo2fmx\n</code></pre>\n</notextile>\n\nThe name, description, and workflow definition from the original workflow will be used for the destination copy. In addition, any *collections* and *Docker images* referenced in the source workflow definition will also be copied to the destination.\n\nIf you would like to copy the object without dependencies, you can use the @--no-recursive@ flag.\n\nh3. How to copy a project\n\nWe will use the UUID @jutro-j7d0g-xj19djofle3aryq@ as an example project.\n\nArv-copy will infer the source cluster is @jutro@ from the source project UUID, and destination cluster is @pirca@ from @--project-uuid@.\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-copy --project-uuid pirca-j7d0g-lr8sq3tx3ovn68k jutro-j7d0g-xj19djofle3aryq</span>\n2021-09-08 21:29:32 arvados.arv-copy[6377] INFO:\n2021-09-08 21:29:32 arvados.arv-copy[6377] INFO: Success: created copy with uuid pirca-j7d0g-ig9gvu5piznducp\n</code></pre>\n</notextile>\n\nThe name and description of the original project will be used for the destination copy.  If a project already exists with the same name, collections and workflow definitions will be copied into the project with the same name.\n\nIf you would like to copy the project but not its subproject, you can use the @--no-recursive@ flag.\n\nh3. Importing HTTP resources to Keep\n\nYou can also use @arv-copy@ to copy the contents of a HTTP URL into Keep.  When you do this, Arvados keeps track of the original URL the resource came from.  This allows you to refer to the resource by its original URL in Workflow inputs, but actually read from the local copy in Keep.\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-copy --project-uuid tordo-j7d0g-lr8sq3tx3ovn68k https://example.com/index.html</span>\ntordo-4zz18-dhpb6y9km2byb94\n2023-10-06 10:15:36 arvados.arv-copy[374147] INFO: Success: created copy with uuid tordo-4zz18-dhpb6y9km2byb94\n</code></pre>\n</notextile>\n\nIn addition, when importing from HTTP URLs, you may provide a different cluster than the destination in @--src@. This tells @arv-copy@ to search the other cluster for a collection associated with that URL, and if found, copy the collection from that cluster instead of downloading from the original URL.\n\nThe following @arv-copy@ command line options affect the behavior of HTTP import.\n\ntable(table table-bordered table-condensed).\n|_. Option |_. Description |\n|==--varying-url-params== VARYING_URL_PARAMS|A comma separated list of URL query parameters that should be ignored when storing HTTP URLs in Keep.|\n|==--prefer-cached-downloads==|If a HTTP URL is found in Keep, skip upstream URL freshness check (will not notice if the upstream has changed, but also not error if upstream is unavailable).|\n"
  },
  {
    "path": "doc/user/topics/arv-docker.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Working with container images\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis page describes how to set up the runtime environment (e.g., the programs, libraries, and other dependencies needed to run a job) that a workflow step will be run in using \"Docker\":https://www.docker.com/ or \"Singularity\":https://sylabs.io/singularity/.  Docker and Singularity are tools for building and running containers that isolate applications from other applications running on the same node.  For detailed information, see the \"Docker User Guide\":https://docs.docker.com/userguide/ and the \"Introduction to Singularity\":https://sylabs.io/guides/3.5/user-guide/introduction.html.\n\nNote that Arvados always works with Docker images, even when it is configured to use Singularity to run containers. There are some differences between the two runtimes that can affect your containers. See the \"Singularity architecture\":{{site.baseurl}}/architecture/singularity.html page for details.\n\nThis page describes:\n\n# \"Create a custom image using a Dockerfile\":#create\n# \"Uploading an image to Arvados\":#upload\n# \"Sources of pre-built bioinformatics Docker images\":#sources\n\n{% include 'tutorial_expectations_workstation' %}\n\nYou also need to ensure that \"Docker is installed,\":https://docs.docker.com/installation/ the Docker daemon is running, and you have permission to access Docker.  You can test this by running @docker version@.  If you receive a permission denied error, your user account may need to be added to the @docker@ group.  If you have root access, you can add yourself to the @docker@ group using @$ sudo addgroup $USER docker@ then log out and log back in again; otherwise consult your local sysadmin.\n\nh2(#create). Create a custom image using a Dockerfile\n\nThis example shows how to create a Docker image and add the R package.\n\nFirst, create new directory called @docker-example@, in that directory create a file called @Dockerfile@.\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">mkdir docker-example-r-base</span>\n$ <span class=\"userinput\">cd docker-example-r-base</span>\n</code></pre>\n</notextile>\n\n<notextile>\n<pre><code>FROM ubuntu:bionic\nRUN apt-get update && apt-get -yq --no-install-recommends install r-base-core\n</code></pre>\n</notextile>\n\nThe \"RUN\" command is executed inside the container and can be any shell command line.  You are not limited to installing Debian packages.  You may compile programs or libraries from source and install them, edit systemwide configuration files, use other package managers such as @pip@ or @gem@, and perform any other customization necessary to run your program.\n\nYou can also visit the \"Docker tutorial\":https://docs.docker.com/get-started/part2/ for more information and examples.\n\nYou should add your Dockerfiles to the same source control repository as the Workflows that use them.\n\nh3. Create a new image\n\nWe're now ready to create a new Docker image.  Use @docker build@ to create a new image from the Dockerfile.\n\n<notextile>\n<pre><code>docker-example-r-base$ <span class=\"userinput\">docker build -t docker-example-r-base .</span>\n</code></pre>\n</notextile>\n\nh3. Verify image\n\nNow we can verify that \"R\" is installed:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">docker run -ti docker-example-r-base</span>\nroot@57ec8f8b2663:/# R\n\nR version 3.4.4 (2018-03-15) -- \"Someone to Lean On\"\nCopyright (C) 2018 The R Foundation for Statistical Computing\nPlatform: x86_64-pc-linux-gnu (64-bit)\n</code></pre>\n</notextile>\n\nh2(#upload). Upload your image\n\nFinally, we are ready to upload the new Docker image to Arvados.  Use @arv-keepdocker@ with the image repository name to upload the image.  Without arguments, @arv-keepdocker@ will print out the list of Docker images in Arvados that are available to you.\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv-keepdocker docker-example-r-base</span>\n2020-06-29 13:48:19 arvados.arv_put[769] INFO: Creating new cache file at /home/peter/.cache/arvados/arv-put/39ddb51ebf6c5fcb3d713b5969466967\n206M / 206M 100.0% 2020-06-29 13:48:21 arvados.arv_put[769] INFO:\n\n2020-06-29 13:48:21 arvados.arv_put[769] INFO: Collection saved as 'Docker image docker-example-r-base:latest sha256:edd10'\nzzzzz-4zz18-0tayximqcyb6uf8\n\n$ <span class=\"userinput\">arv-keepdocker images</span>\nREPOSITORY                      TAG         IMAGE ID      COLLECTION                     CREATED\ndocker-example-r-base           latest      sha256:edd10  zzzzz-4zz18-0tayximqcyb6uf8    Mon Jun 29 17:46:16 2020\n</code></pre>\n</notextile>\n\nYou are now able to specify the runtime environment for your program using @DockerRequirement@ in your workflow:\n\n<pre>\nhints:\n  DockerRequirement:\n    dockerPull: docker-example-r-base\n</pre>\n\nh3. Uploading Docker images to a shared project\n\nDocker images are subject to normal Arvados permissions.  If wish to share your Docker image with others you should use @arv-keepdocker@ with the @--project-uuid@ option to add the image to a shared project and ensure that metadata is set correctly.\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv-keepdocker docker-example-r-base --project-uuid zzzzz-j7d0g-xxxxxxxxxxxxxxx</span>\n</code></pre>\n</notextile>\n\nh2(#sources). Sources of pre-built images\n\nIn addition to creating your own contianers, there are a number of resources where you can find bioinformatics tools already wrapped in container images:\n\n\"BioContainers\":https://biocontainers.pro/\n\n\"Dockstore\":https://dockstore.org/\n\n\"Docker Hub\":https://hub.docker.com/\n"
  },
  {
    "path": "doc/user/topics/arvados-sync-external-sources.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: admin\ntitle: \"Synchronizing from external sources\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThe @arvados-sync-users@ and @arvados-sync-groups@ tools allow to manage Arvados users & groups from external sources.\n\nThese tools are designed to be run periodically reading a file created by a remote auth system (ie: LDAP) dump script, applying what's included on the file as the source of truth.\n\nbq. NOTE: Both tools need to perform several administrative tasks on Arvados, so must be run using a superuser token via @ARVADOS_API_HOST@ and @ARVADOS_API_TOKEN@ environment variables or @~/.config/arvados/settings.conf@ file.\n\nh1. Using arvados-sync-users\n\nThis tool reads a CSV (comma-separated values) file having information about user accounts and their expected state on Arvados.\n\nEvery line on the file should have 5 fields:\n\n# A user identifier: it could be an email address (default) or a username.\n# The user's first name.\n# The user's last name.\n# The intended user's active state.\n# The intended user's admin state: will always be read as @false@ when @active=false@.\n\nThe last 2 fields should be represented as @true@/@false@, @yes@/@no@, or @1@/@0@ values.\n\nh2. Options\n\nThe following command line options are supported:\n\ntable(table table-bordered table-condensed).\n|_. Option |_. Description |\n|==--help==|This list of options|\n|==--case-insensitive==|Uses case-insensitive username matching|\n|==--deactivate-unlisted==|Deactivate users that aren't listed on the input file. (Current & system users won't be affected)|\n|==--user-id==|Identifier to use in looking up user. One of 'email' or 'username' (Default: 'email')|\n|==--verbose==|Log informational messages|\n|==--version==|Print version and exit|\n\nThe tool will create users when needed, and update those existing records to match the desired state described by the fields on the CSV file.\nSystem users like the root and anonymous are unaffected by this tool.\nIn the case of a @LoginCluster@ federation, this tool should be run on the cluster that manages the user accounts, and will fail otherwise.\n\nh2. Example\n\nTo sync users using the username to identify every account, reading from some @external_users.csv@ file and deactivating existing users that aren't included in it, the command should be called as follows:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arvados-sync-users --deactivate-unlisted --user-id username /path/to/external_users.csv </span>\n</code></pre>\n</notextile>\n\nh1. Using arvados-sync-groups\n\nThis tool reads a CSV (comma-separated values) file having information about external groups and their members. When running it for the first time, it'll create a special group named 'Externally synchronized groups' meant to be the parent of all the remote groups.\n\nEvery line on the file should have 3 values: a group name, a local user identifier and a permission level, meaning that the named user is a member of the group with the provided permission. The tool will create the group if it doesn't exist, and add the user to it. If any group member is not present on the input file, it will be removed from the group.\n\nUsers can be identified by their email address or username: the tool will check if every user exist on the system, and report back when not found. Groups on the other hand, are identified by their name.\n\nPermission level can be one of the following: @can_read@, @can_write@ or @can_manage@, giving the group member read, read/write or managing privileges on the group. For backwards compatibility purposes, if any record omits the third (permission) field, it will default to @can_write@ permission. You can read more about permissions on the \"group management admin guide\":{{ site.baseurl }}/admin/group-management.html.\n\nWhen using @arvados-sync-groups@, consider setting @Users.CanCreateRoleGroups: false@ in your \"cluster configuration\":{{site.baseurl}}/admin/config.html to prevent users from creating additional groups.\n\nh2. Options\n\nThe following command line options are supported:\n\ntable(table table-bordered table-condensed).\n|_. Option |_. Description |\n|==--help==|This list of options|\n|==--case-insensitive==|Uses case-insensitive username matching|\n|==--parent-group-uuid==|UUID of group to own all the externally synchronized groups|\n|==--user-id==|Identifier to use in looking up user. One of 'email' or 'username' (Default: 'email')|\n|==--verbose==|Log informational messages (Default: False)|\n|==--version==|Print version and exit|\n\nh2. Examples\n\nTo sync groups using the username to identify every account, reading from some @external_groups.csv@ file, the command should be called as follows:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arvados-sync-groups --user-id username /path/to/external_groups.csv </span>\n</code></pre>\n</notextile>\n\nIf you want to use a specific preexisting group as the parent of all the remote groups, you can do it this way:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arvados-sync-groups --parent-group-uuid &lt;preexisting group UUID&gt; --user-id username /path/to/external_groups.csv </span>\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/user/topics/collection-versioning.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Using collection versioning\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nWhen collection versioning is enabled, updating certain collection attributes (@name@, @description@, @properties@, @manifest_text@) will save a copy of the collection state, previous to the update. This copy (a new collection record) will have its own @uuid@, and a @current_version_uuid@ attribute pointing to the current version's @uuid@.\n\nEvery collection has a @version@ attribute that indicates its version number, starting from 1 on new collections and incrementing by 1 with every versionable update. All collections point to their most current version via the @current_version_uuid@ attribute, being @uuid@ and @current_version_uuid@ equal on those collection records that are the current version of themselves. Note that the \"current version\" collection record doesn't change its @uuid@, \"past versions\" are saved as new records every time it's needed, pointing to the current collection record.\n\nA version will be saved when one of the following conditions is true:\n\nOne is by \"configuring (system-wide) the collection's idle time\":{{site.baseurl}}/admin/collection-versioning.html. This idle time is checked against the @modified_at@ attribute so that the version is saved when one or more of the previously enumerated attributes get updated and the @modified_at@ is at least at the configured idle time in the past. This way, a frequently updated collection won't create lots of version records that may not be useful.\n\nThe other way to trigger a version save, is by setting @preserve_version@ to @true@ on the current version collection record: this ensures that the current state will be preserved as a version the next time it gets updated. This includes either creating a new collection or updating a preexisting one. In the case of using @preserve_version = true@ on a collection's create call, the new record state will be preserved as a snapshot on the next update.\n\nh3. Collection's past versions behavior & limitations\n\nPast version collection records are read-only, if you need to make changes to one of them, the suggested approach is to copy it into a new collection before updating.\n\nSome attributes are automatically synced when they change on the current version: @owner_uuid@, @delete_at@, @trash_at@, @is_trashed@, @replication_desired@ and @storage_classes_desired@. This way, old versions follow the current one on several configurations. In the special case that a current version's @uuid@ gets updated, their past versions get also updated to point to the newer UUID. When a collection is deleted, any past versions are deleted along with it.\n\nPermissions on past versions are the same as their current version, the system does not allow attaching permission links to old versions. If you need to give special access to someone to a particular old version, the correct procedure is by copying it as a new collection.\n\nh3. Example: Accessing past versions of a collection\n\nTo request a particular collection with all its versions you should request a list filtering the current version's UUID and passing the @include_old_versions@ query parameter. For example, using the @arv@ command line client:\n\n<pre>\n$ arv collection index --filters '[[\"current_version_uuid\", \"=\", \"o967z-4zz18-ynmlhyjbg1arnr2\"]]' --include-old-versions\n{\n \"items\":[\n  {\n   \"uuid\":\"o967z-4zz18-i3ucessyo6xxadt\",\n   \"created_at\":\"2018-10-05T14:43:38.916885000Z\",\n   \"modified_at\":\"2018-10-05T14:44:31.098019000Z\",\n   \"version\":1,\n   \"current_version_uuid\":\"o967z-4zz18-ynmlhyjbg1arnr2\"\n  },\n  {\n   \"uuid\":\"o967z-4zz18-ynmlhyjbg1arnr2\",\n   \"created_at\":\"2018-10-05T14:43:38.916885000Z\",\n   \"modified_at\":\"2018-10-05T14:44:31.078643000Z\",\n   \"version\":2,\n   \"current_version_uuid\":\"o967z-4zz18-ynmlhyjbg1arnr2\"\n  }\n ],\n \"items_available\":2\n}\n</pre>\n\nTo access a specific collection version using filters:\n\n<pre>\n$ arv collection index --filters '[[\"current_version_uuid\", \"=\", \"o967z-4zz18-ynmlhyjbg1arnr2\"], [\"version\", \"=\", 1]]' --include-old-versions\n{\n \"items\":[\n  {\n   \"uuid\":\"o967z-4zz18-i3ucessyo6xxadt\",\n   \"created_at\":\"2018-10-05T14:43:38.916885000Z\",\n   \"modified_at\":\"2018-10-05T14:44:31.098019000Z\",\n   \"version\":1,\n   \"current_version_uuid\":\"o967z-4zz18-ynmlhyjbg1arnr2\"\n  }\n ],\n \"items_available\":1\n}\n</pre>\n\nYou can also access it directly via a GET request using its UUID:\n\n<pre>\n$ arv collection get --uuid o967z-4zz18-i3ucessyo6xxadt\n{\n \"uuid\":\"o967z-4zz18-i3ucessyo6xxadt\",\n \"created_at\":\"2018-10-05T14:43:38.916885000Z\",\n \"modified_at\":\"2018-10-05T14:44:31.098019000Z\",\n \"version\":1,\n \"current_version_uuid\":\"o967z-4zz18-ynmlhyjbg1arnr2\"\n}\n</pre>\n\nh3. Example: Ensuring a version is preserved\n\nAs stated before, regardless of the collection's auto-save idle time cluster configuration, the user has the ability to request that a particular collection state should be preserved.\n\nWhen working on a collection, if there's a need to preserve the current state as a new version, the @preserve_version@ attribute should be set to @true@. This will trigger a new version creation on the next update, keeping this \"version 2\" state as a snapshot.\n\n<pre>\n$ arv collection update --uuid o967z-4zz18-ynmlhyjbg1arnr2 -c '{\"preserve_version\":true}'\n{\n \"uuid\":\"o967z-4zz18-ynmlhyjbg1arnr2\",\n \"created_at\":\"2018-10-05T14:43:38.916885000Z\",\n \"modified_at\":\"2018-10-05T15:12:57.986454000Z\",\n \"version\":2,\n \"current_version_uuid\":\"o967z-4zz18-ynmlhyjbg1arnr2\",\n \"preserve_version\":true\n}\n</pre>\n\nOnce the @preserve_version@ attribute is set to @true@, it cannot be changed to @false@ and it will only be reset when a versionable update on the collection triggers a version save.\n"
  },
  {
    "path": "doc/user/topics/external-inputs.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Using external data sources in workflows\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Introduction\":#introduction\n# \"Limitations\":#limitations\n# \"Accessing S3 data in public buckets\":#public-buckets\n# \"Accessing S3 data with an access key\":#s3-access-key\n## \"Adding an access key in Arvados Workbench\":#add-access-key-workbench\n## \"Adding an access key via the Arvados API\":#add-access-key-api\n## \"Running a workflow using a stored access key\":#use-s3-access-key\n## \"Troubleshooting download errors using stored access keys\":#troubleshooting-access-keys\n### \"Multiple credentials found\":#multiple-credentials-found\n### \"boto3 did not find any local AWS credentials to use to download from S3\":#aws-credentials-not-found\n## \"Running a workflow using a local access key\":#local-access-key\n# \"Controlling when Arvados downloads external data\":#download-options\n## \"Download data from the Arvados compute node\":#defer-downloads\n## \"Prioritize cached data collections\":#prefer-cached-downloads\n## \"Ignore varying URL parameters when caching collections\":#varying-url-params\n\nh2(#introduction). Introduction\n\nMany useful references and data sets are available on the web and in S3. To help you work with this data, Arvados lets you specify workflow inputs with URL paths like:\n\n{% codeblock as yaml %}\nexample_web_input:\n  class: File\n  path: \"https://HOST_NAME/FILE_PATH\"\nexample_s3_input:\n  class: File\n  path: \"s3://BUCKET_NAME/FILE_PATH\"\n{% endcodeblock %}\n\nWhen Arvados starts this workflow, before it starts any workflow steps, it will automatically download each input URL to an Arvados collection. This ensures you retain a complete record of the analysis you ran. Arvados stores details about the data source as collection metadata and can avoid re-downloading inputs it has downloaded before.\n\nh2(#limitations). Limitations\n\nExternal inputs have some limitations you should be aware of before you start. These limitations may be lifted in a future release of Arvados.\n\nExternal input URLs can only refer to a single file. You cannot specify an entire S3 bucket or subdirectory as an input. You must list each file you want to work with as a separate input.\n\nArvados only knows how to work with one S3 access key at a time. If you need to work with data sets that require different credentials, first transfer them to Keep, then analyze them from there.\n\nh2(#public-buckets). Accessing S3 data in public buckets\n\nIf your inputs refer to public S3 buckets and don't require an access key, run @arvados-cwl-runner@ with the @--s3-public-bucket@ option. For example:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados-cwl-runner <strong>--s3-public-bucket</strong> [… other options…] --submit <strong>WORKFLOW.cwl PUBLIC-S3-FILES.yml</strong>\n</code></pre>\n</notextile>\n\nh2(#s3-access-key). Accessing S3 data with an access key\n\nIf you want to access data in an S3 bucket that requires an access key, you can register the access key with Arvados. Workflows will be able to find and use the correct access key automatically.\n\nh3(#add-access-key-workbench). Adding an S3 access key in Arvados Workbench\n\nIn the left-hand navigation, open \"External Credentials.\" In the upper right, use the blue ＋&nbsp;New&nbsp;External&nbsp;Credential button to add an S3 access key.\n\n!{max-width: 100%;}{{ site.baseurl }}/images/workbench-external-credentials-list.png(Screenshot of the Arvados Workbench External Credentials listing.)!\n\nFill out the New External Credential dialog as follows for S3 credentials:\n\n* Give the credential a useful name, and optionally a description.\n* The Credential Class must be *exactly* @arv:aws_access_key@.\n* The External ID is the access key ID (the alphanumeric string that usually starts with \"AKIA\").\n* The Secret is the secret access key (the random string).\n* Set the \"Expires at\" date to a date no later than the expiry of the underlying access key. Arvados will automatically stop using access keys after they have expired.\n* The applicable scopes identify which S3 bucket(s) this access key should be used for. Enter each scope in the format <notextile><code>s3://<strong>BUCKET_NAME</strong></code></notextile> for each bucket the access key can access. Each scope is listed under the input as you add it and can be removed if you enter a scope incorrectly. You can enter the special value @s3://*@ if you want this credential to be used for any S3 inputs.\n\nFor illustration, the Example Credential being filled out below can be used to access the @arvados-example@ and @arvados-doc@ S3 buckets.\n\n!{max-width: 100%; box-shadow: 0 0 6px black;}{{ site.baseurl }}/images/workbench-external-credentials-add.png(Screenshot of the Arvados Workbench New External Credential dialog with fields filled in with sample values.)!\n\nAfter you create the credential, you can control who is allowed to use it by sharing it with other users and groups with at least Read access. In the left-hand navigation, open \"External Credentials.\" Find your credential in the listing and right-click it. Select \"Share\" from the context menu. Use the dialog to add and remove permissions.\n\nOnce you have finished setting up access keys, you can \"run a workflow with S3 inputs\":#use-s3-access-key.\n\nh3(#add-access-key-api). Adding an S3 access key via the Arvados API\n\nS3 access keys are stored in Arvados as \"credentials\":{{ site.baseurl }}/api/methods/credentials.html. Below is a body you could use with either the command-line tool @arv credential create --credential=…@ or an SDK like:\n\n{% codeblock as python %}\narv_client.credentials().create(\n    body={'credential': ...}\n).execute()\n{% endcodeblock %}\n\n* Give the credential a useful @name@ and optionally a @description@.\n* The @credential_class@ must be *exactly* @\"arv:aws_access_key\"@.\n* The @external_id@ is the access key ID (the alphanumeric string that usually starts with \"AKIA\").\n* The @secret@ is the secret access key (the random string).\n* Set the @expires_at@ timestamp to a date no later than the expiry of the underlying access key. Arvados will automatically stop using access keys after they have expired.\n* The @scopes@ identify which S3 bucket(s) this access key should be used for. Enter each scope in the format <notextile><code>\"s3://<strong>BUCKET_NAME</strong>\"</code></notextile> for each bucket the access key can access. The example credential below can be used to access the @arvados-example@ and @arvados-doc@ S3 buckets. You can enter the special value @s3://*@ if you want this credential to be used for any S3 inputs.\n\n<notextile>\n<pre><code class=\"json\">{\n  \"name\": \"Example Credential\",\n  \"description\": \"&lt;p&gt;This is an example credential for the Arvados documentation.&lt;/p&gt;\",\n  \"credential_class\": \"arv:aws_access_key\",\n  \"external_id\": \"AKIAS3ABCDEFGHIJKLMN\",\n  \"secret\": \"ZYXWVUTSRQPONMLKJIHGFEDCBA\",\n  \"expires_at\": \"2038-01-19T03:14:07Z\",\n  \"scopes\": [\n    \"s3://arvados-example\",\n    \"s3://arvados-doc\"\n  ]\n}\n</code></pre>\n</notextile>\n\nAfter you create the credential, you can control who is allowed to use it by creating permission links to other users and groups. For more information, refer to the \"Working with permissions section of the Python SDK code cookbook\":{{ site.baseurl }}/sdk/python/cookbook.html#working-with-permissions and \"links API reference\":{{ site.baseurl }}/api/methods/links.html.\n\nh3(#use-s3-access-key). Running a workflow using a stored access key\n\nAfter you register an access key for an S3 bucket, you can use an S3 URL for that bucket in the place of any workflow file input. When Arvados starts this workflow, before it starts any workflow steps, it will automatically find the right credentials to download the file from the bucket. For example, you can submit the workflow from the command line by running:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados-cwl-runner [… other options…] --defer-downloads --submit <strong>WORKFLOW.cwl PRIVATE-S3-FILES.yml</strong>\n</code></pre>\n</notextile>\n\nNote that you *must* \"use @--defer-downloads@ in this case\":#defer-downloads.\n\nh3(#troubleshooting-access-keys). Troubleshooting download errors using stored access keys\n\nh4(#multiple-credentials-found). \"Multiple credentials found\"\n\nIf you submit a workflow to Arvados and it reports this error:\n\n<notextile>\n<pre>WARNING Download error: Multiple AWS access keys with scope 's3://BUCKET_NAME' found in Arvados. Run `arvados-cwl-runner` with the `--use-credential` option to provide the UUID of the credential to use.\n</pre>\n</notextile>\n\nYou can run @arvados-cwl-runner@ with the @--use-credential@ option to specify the UUID of the credential to use:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados-cwl-runner <strong>--use-credential=zzzzz-oss07-abcde12345fghij</strong> [… other options…] --defer-downloads --submit <strong>WORKFLOW.cwl PRIVATE-S3-FILES.yml</strong>\n</code></pre>\n</notextile>\n\nh4(#aws-credentials-not-found). \"boto3 did not find any local AWS credentials to use to download from S3\"\n\nIf you submit a workflow to Arvados and it fails with logs like this:\n\n<notextile>\n<pre>WARNING Download error: boto3 did not find any local AWS credentials to use to download from S3. If you want to use credentials registered with Arvados, use --defer-downloads. If the bucket is public, use --s3-public-bucket.\nERROR Workflow error, try again with --debug for more information:\nCan't handle 's3://example-bucket/example-file'\nContainer exited with status code 1\n</pre>\n</notextile>\n\nThis means Arvados did not find an access key to use for this bucket. Double-check:\n\n* Is the access key registered as a credential in Arvados?\n* Does the user running the workflow have permission to use that credential? Can they see it in Workbench or get it from the API?\n* Does the credential have @s3://example-bucket@ or @s3://*@ in its list of scopes? Check both the credential scope and the workflow input to make sure the bucket name matches and doesn't have any typos.\n* Is the external credential expired?\n\nh3(#local-access-key). Running a workflow using a local access key\n\nIf you are running @arvados-cwl-runner@ on a system that already has credentials to access your S3 input files, you can run it with the @--enable-aws-credential-capture@ option to have Arvados download inputs with the same credentials that the @aws@ tool would use. This can be useful to run one-off workflows where you don't plan to reuse an access key. For example:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados-cwl-runner <strong>--enable-aws-credential-capture</strong> [… other options…] --submit <strong>WORKFLOW.cwl PRIVATE-S3-FILES.yml</strong>\n</code></pre>\n</notextile>\n\nh2(#download-options). Controlling when Arvados downloads external data\n\nWhen Arvados downloads external input data, the default behavior is designed to prioritize the predictability and reproducibility of your workflows. Several options are available to customize this behavior.\n\nh3(#defer-downloads). Download data from the Arvados compute node\n\nBy default, @arvados-cwl-runner@ downloads input data from the system where you launch it. This aims to let you know about any problems with the input sources as soon as possible. However, the system where you run @arvados-cwl-runner@ may not be the best suited to download very large input files. If you prefer to download input files from the Arvados compute node that runs your workflow, run @arvados-cwl-runner@ with the @--defer-downloads@ option.\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados-cwl-runner <strong>--defer-downloads</strong> [… other options…] --submit <strong>WORKFLOW.cwl EXTERNAL-INPUTS.yml</strong>\n</code></pre>\n</notextile>\n\nh3(#prefer-cached-downloads). Prioritize cached data collections\n\nBy default, @arvados-cwl-runner@ checks the headers of every input URL to determine whether the external data has been updated and needs to be re-downloaded. These checks take a little time and will fail if the external data is no longer accessible. You can run @arvados-cwl-runner@ with the @--prefer-cached-downloads@ option to skip these checks and use any available collection caches. This will let you run the workflow even if the external data is no longer accessible, but that means the workflow may not be reproducible on Arvados clusters that don't have the collection cache.\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados-cwl-runner <strong>--prefer-cached-downloads</strong> [… other options…] --submit <strong>WORKFLOW.cwl EXTERNAL-INPUTS.yml</strong>\n</code></pre>\n</notextile>\n\nh3(#varying-url-params). Ignore varying URL parameters when caching collections\n\nBy default, @arvados-cwl-runner@ expects that every unique URL may refer to a unique resource and downloads each to a new collection cache. Some HTTP/S URLs include time-sensitive signatures or tokens in their query parameters that refer to the same underlying resource. You can identify those parameters to Arvados by running @arvados-cwl-runner@ with the @--varying-url-params@ option. This option takes a comma-separated list of parameter name(s). Arvados will ignore the values of these parameters in the URL when determining whether or not a resource has already been downloaded so you can avoid redundant downloads. For example:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados-cwl-runner <strong>--varying-url-params=\"NAME1,NAME2,…\"</strong> [… other options…] --submit <strong>WORKFLOW.cwl WEB-FILES.yml</strong>\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/user/topics/link-accounts.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Linking alternate login accounts\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nThis page describes how to link additional login accounts to the same Arvados account.  This can be used to migrate login accounts, for example, from one Google account to another.  It can also be used to migrate login providers, for example from LDAP to Google.  In order to do this, you must be able to log into both the \"old\" and \"new\" accounts.\n\nbq. NOTE: If you need to link your accounts on an Arvados cluster federation where user management is centralized, this feature may not be available. If that's the case, the federation admin can do the linking manually.\n\nh2. Link accounts\n\nFollow this process to link the \"new\" login to the \"old\" login.\n\n# Log in using the \"old\" account\n# Under the users menu, choose *Link account*\n# On the link accounts page, press the button *Add another login to this account*\n# Follow login instructions from the login provider (eg Google)\n# You will be returned to the *Link accounts* confirmation page.\n# Press the *Link account* button to confirm.\n# After the accounts are linked, you will be returned to the dashboard.\n# Both the \"old\" and \"new\" logins will now log in to the same Arvados account.\n\nh2. Link accounts (alternate flow)\n\nYou can also link accounts starting with logging into the \"new\" account first.\n\n# Log in using the \"new\" account\n# Under the users menu, choose *Link account* (if the user is inactive, there will be a link on the inactive user page)\n# On the link accounts page, press the button *Use this login to access another account*\n# Follow login instructions from the login provider (eg Google)\n# You will be returned to the *Link accounts* confirmation page.\n# Press the *Link account* button to confirm.\n# After the accounts are linked, you will be returned to the dashboard.\n# Both the \"old\" and \"new\" logins will now log in to the same Arvados account.\n"
  },
  {
    "path": "doc/user/topics/service-containers.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Running web services in Arvados containers\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n# \"Introduction to service containers\":#introduction\n# \"Limitations\":#limitations\n# \"Example service container: nginx web server\":#eg-intro\n# \"Running a service container\":#eg-running\n## \"Running with CWL\":#eg-cwl\n## \"Running with an Arvados API client\":#eg-api\n# \"Connecting to a service container\":#eg-connecting\n## \"Connecting through Workbench\":#eg-workbench\n## \"Getting a connection URL from the Arvados API\":#eg-url\n# \"Stopping a service container\":#eg-stopping\n# \"Further reading\":#further-reading\n\nh2(#introduction). Introduction to service containers\n\nArvados 3.2.0 introduces service containers. If your container runs a web service, you can have Arvados expose that service at a dynamically assigned hostname+port combination. After you run a workflow in Arvados, you can run interactive data analysis and visualization tools as service containers to explore the results.\n\nServices can have different access levels:\n\n* If you expose a public service, Arvados will let anyone connect to it.\n* If you expose a private service, Arvados will require the initial connection to provide an Arvados API token from a user who requested this container. Arvados will refuse any other connections.\n\nh2(#limitations). Limitations\n\nUnfortunately, not every interactive container can be run as an Arvados service container. Here are some limitations you should be aware of before you start. These limitations may be lifted in a future release of Arvados.\n\nService containers require the Arvados administrator to configure the hostname(s) and port(s) to use for service containers. If you're not sure whether your Arvados cluster supports service containers, check with your administrator.\n\nArvados can only expose services that use plain HTTP in the container. Other protocols are currently not supported. For example, Jupyter notebooks require websockets to retrieve the results of running code. If you try to run a Jupyter notebook as an Arvados service container, you'll be able to connect and start a notebook, but you won't be able to run any code inside of it.\n\nIf you run a service container as part of a larger workflow, the workflow supervisor will wait for the service container to finish before it considers the workflow finished. This is okay if the service reports progress or provides debugging for a main process that takes a limited time, but you probably don't want this if your service container waits for user input to quit or runs indefinitely. To run dedicated interactive services, consider starting them separately from your main workflow, and launching them with @arvados-cwl-runner --local@ to avoid the overhead of a separate supervisor container.\n\nh2(#eg-intro). Example service container: nginx web server\n\nThis page demonstrates different ways to launch and access a service container using the \"nginx web server\":https://hub.docker.com/_/nginx as an example. This page assumes you are already familiar with basic \"data(Arvados organizing data tutorial)\":{{ site.baseurl }}/user/tutorials/tutorial-projects.html and \"workflow(Arvados workflow running tutorial)\":{{ site.baseurl }}/user/tutorials/tutorial-workflow-workbench.html management in Arvados.\n\nh2(#eg-running). Running a service container\n\nUsers who want to start service containers on demand can do so using CWL. If you're writing automation for Arvados, you can also submit service container requests directly through the Arvados API.\n\nh3(#eg-cwl). Running with CWL\n\nThis CWL runs nginx, exposes port 80 through Arvados, and takes one input with the directory of files to serve. Save this content as @nginx.cwl@:\n\n{% codeblock as yaml %}\n#!/usr/bin/env cwl-runner\n# nginx.cwl\ncwlVersion: v1.2\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\n\nclass: CommandLineTool\ninputs:\n  siteFiles:\n    type: Directory\nrequirements:\n  DockerRequirement:\n    dockerPull: library/nginx:1.29\n  InitialWorkDirRequirement:\n    listing:\n      - entry: \"$(inputs.siteFiles)\"\n        entryname: /usr/share/nginx/html\nhints:\n  ResourceRequirement:\n    # You may adjust these values as desired.\n    coresMin: 2\n    coresMax: 8\n    ramMin: 128\n    ramMax: 1024\n  arv:PublishPorts:\n    publishPorts:\n      \"80\":\n        serviceAccess: public\n        label: Web Server\nbaseCommand: nginx\narguments:\n  - \"-g\"\n  - \"daemon off;\"\noutputs: {}\n{% endcodeblock %}\n\nNext, you'll need a directory on your system that contains some HTML and supporting content you want to serve from the container. You can use any web content you like. If you don't have anything handy, create a new directory and save this web page as @index.html@ inside it:\n\n{% codeblock as html %}\n<!-- Example index.html -->\n<!doctype html>\n<html lang=en>\n  <head>\n    <meta charset=utf-8>\n    <title>Test Page</title>\n  </head>\n  <body>\n    <p>Hello from inside an Arvados service workflow!</p>\n  </body>\n</html>\n{% endcodeblock %}\n\nWrite an input file @nginx-in.yml@ that points @siteFiles@ to the directory with your HTML content:\n\n<notextile>\n<pre><code># nginx-in.yml\nsiteFiles:\n  class: Directory\n  path: \"<span class=\"userinput\">/home/you/arvados-nginx-site</span>\"\n</code></pre>\n</notextile>\n\nLaunch this CWL with your input using @arvados-cwl-runner@:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arvados-cwl-runner --local nginx.cwl nginx-in.yml</span>\n</code></pre>\n</notextile>\n\nThe tool will upload all the dependencies to Arvados, then report:\n\n<notextile>\n<pre>INFO [container nginx.cwl] zzzzz-xvhdp-abcde12345fghij state is Committed\n</pre>\n</notextile>\n\nAfter Arvados launches the container, you'll be able to \"connect to your web server\":#eg-connecting.\n\nh3(#eg-api). Running with an Arvados API client\n\nIf you are comfortable writing your own Arvados tools, you can submit service container requests directly to the Arvados API. Before you start, you must make sure the container image and data you want to use is already in Arvados.\n\nFor this example, save the official nginx Docker image to Arvados using @arv-keepdocker@, then get the portable data hash of that container:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv-keepdocker library/nginx 1.29</span>\n[…]\nzzzzz-4zz18-wyfcjuvi7ankgp2\n$ <span class=\"userinput\">arv collection get --select='[\"portable_data_hash\"]' --uuid=<strong>zzzzz-4zz18-wyfcjuvi7ankgp2</strong></span>\n{\n \"etag\":\"\",\n \"kind\":\"arvados#collection\",\n \"portable_data_hash\":\"<strong>23a275c1761b645edb84355a91702cee+219</strong>\"\n}\n</code></pre>\n</notextile>\n\nNext, create a collection that contains some HTML and supporting content you want to serve from the container. You can use any web content you like. If you don't have anything handy, create a new directory and save this web page as @index.html@ inside it:\n\n{% codeblock as html %}\n<!-- Example index.html -->\n<!doctype html>\n<html lang=en>\n  <head>\n    <meta charset=utf-8>\n    <title>Test Page</title>\n  </head>\n  <body>\n    <p>Hello from inside an Arvados service container!</p>\n  </body>\n</html>\n{% endcodeblock %}\n\nCreate a collection from your content directory:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv-put <strong>/home/you/arvados-nginx-site</strong></span>\n[…]\n<strong>zzzzz-4zz18-5jh66sx5f2xo6kd</strong>\n</code></pre>\n</notextile>\n\nNow you are ready to submit your container request. Below is a body you could use with either the command-line tool @arv container_request create --container-request=…@ or an SDK like:\n\n{% codeblock as python %}\narv_client.container_requests().create(\n    body={'container_request': ...}\n).execute()\n{% endcodeblock %}\n\n* The value of @container_image@ is the portable data hash of the nginx Docker image you uploaded.\n* In the @mounts@ value for @/usr/share/nginx/html@, the value of @uuid@ is the UUID of your site content collection. You could alternatively specify a @portable_data_hash@.\n* The runtime constraint @API@ must be set @true@ for the container to be accessible over the network. You can adjust the other runtime constraints as you like.\n* @command@, @cwd@, and @output_path@ are based on the Docker image we're using.\n* From @state@ on, you can modify these fields as desired, or add others like @owner_uuid@.\n\n<notextile>\n<pre><code class=\"json\">{\n  \"container_image\": \"<strong>23a275c1761b645edb84355a91702cee+219</strong>\",\n  \"service\": true,\n  \"use_existing\": false,\n  \"published_ports\": {\n    \"80\": {\n      \"access\": \"public\",\n      \"label\": \"Web Server\",\n      \"initial_path\": \"\"\n    }\n  },\n  \"mounts\": {\n    \"/usr/share/nginx/html\": {\n      \"kind\": \"collection\",\n      \"uuid\": \"<strong>zzzzz-4zz18-5jh66sx5f2xo6kd</strong>\"\n    },\n    \"/run/nginx.out\": {\n      \"kind\": \"collection\",\n      \"writable\": true\n    }\n  },\n  \"runtime_constraints\": {\n    <strong>\"API\": true</strong>,\n    \"ram\": 209715200,\n    \"vcpus\": 2\n  },\n\n  \"command\": [\n    \"nginx\",\n    \"-g\",\n    \"daemon off;\"\n  ],\n  \"cwd\": \".\",\n  \"output_path\": \"/run/nginx.out\",\n\n  \"state\": \"Committed\",\n  \"name\": \"nginx server\",\n  \"priority\": 500\n}\n</code></pre>\n</notextile>\n\nh2(#eg-connecting). Connecting to a service container\n\nAfter Arvados starts a service container, the container record includes the URL users should use to access the service(s). You can open the service URL through Workbench or retrieve it from the Arvados API.\n\nh3(#eg-workbench). Connecting through Workbench\n\nWhen you view the process page for a running service container, a blue button appears next to the process name to connect to that service. If the container runs multiple services, you'll be able to select the one you want to connect to from a pulldown.\n\n!{max-width: 100%;}{{ site.baseurl }}/images/workbench-running-service-container.png(Screenshot from the top of an Arvados Workbench process page showing a running nginx service container with a \"Connect to web server\" button.)!\n\nh3(#eg-url). Getting a connection URL from the Arvados API\n\nThis section will illustrate how to get this information from Arvados API records using the CLI tools. You can follow this same process to make analogous API calls with any SDK. After you submit your container request, get its corresponding @container_uuid@:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv container_request get --select='[\"container_uuid\",\"state\"]' --uuid=<strong>zzzzz-xvhdp-abcde12345fghij</strong></span>\n{\n \"container_uuid\":\"<strong>zzzzz-dz642-y87pdppv4afp4da</strong>\",\n \"etag\":\"\",\n \"kind\":\"arvados#containerRequest\",\n \"state\":\"Committed\"\n}\n</code></pre>\n</notextile>\n\nIf @state@ is @Committed@ but @container_uuid@ is @null@, then your request has not been dispatched yet. Wait a little bit and try again. Once you have a @container_uuid@, request the @published_ports@ field of that container record:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv container get --select='[\"published_ports\"]' --uuid=<strong>zzzzz-dz642-y87pdppv4afp4da</strong></span>\n{\n \"published_ports\":{\n  \"80\":{\n   \"label\":\"Web Server\",\n   \"access\":\"public\",\n   \"base_url\":\"https://zzzzz.arvados.example:8900/\",\n   <strong>\"initial_url\":\"https://zzzzz.arvados.example:8900/\"</strong>,\n   \"initial_path\":\"\",\n   \"external_port\":8900\n  }\n }\n}\n</code></pre>\n</notextile>\n\nFor each service, the @initial_url@ provides the URL where you can connect to the corresponding service over HTTP. If @access@ is @private@, you should add an @arvados_api_token@ query parameter for a user who requested this container. For example, if the container's published port above had @\"access\":\"private\"@, the full URL to connect to it would look like:\n\n<notextile>\n<pre>https://zzzzz.arvados.example:8900/<span class=\"userinput\">?arvados_api_token=v2/zzzzz-gj3su-y2tncmjag9gajm8/1234567890</span></pre>\n</notextile>\n\nh2(#eg-stopping). Stopping a service container\n\nCancel a service container just like any other running container. You can use the red ⏹&nbsp;Cancel button that appears next to the process name in Workbench or the ⏹&nbsp;Cancel button in the action toolbar:\n\n!{max-width: 100%;}{{ site.baseurl }}/images/workbench-running-service-container.png(Screenshot from the top of an Arvados Workbench process page showing a running nginx service container with a \"Cancel\" button.)!\n\nOr if you're using the Arvados API directly, update your container request to set its @priority@ to 0:\n\n<notextile>\n<pre><code>$ <span class=\"userinput\">arv container_request update  --container-request='{\"priority\":0}' --uuid=<strong>zzzzz-xvhdp-abcde12345fghij</strong></span>\n</code></pre>\n</notextile>\n\nh2(#further-reading). Further reading\n\n* The \"Arvados container requests API reference\":{{ site.baseurl }}/api/methods/container_requests.html, especially the \"published ports section\":{{ site.baseurl }}/api/methods/container_requests.html#published_ports\n* The \"arv:PublishPorts CWL extension reference\":{{ site.baseurl }}/user/cwl/cwl-extensions.html#PublishPorts\n* If you're an Arvados administrator who wants to enable service containers, refer to the settings under @Services.ContainerWebServices@ in the \"configuration reference\":{{ site.baseurl }}/admin/config.html\n"
  },
  {
    "path": "doc/user/topics/storage-classes.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: Using storage classes\n...\n\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nStorage classes (sometimes called as \"storage tiers\") allow you to control which back-end storage volumes should be used to store the data blocks of a particular collection.  This can be used to implement data storage policies such as assigning data collections to \"fast\", \"robust\" or \"archival\" storage.\n\nNames of storage classes are internal to the cluster and decided by the administrator.  Aside from \"default\", Arvados currently does not define any standard storage class names.  Consult your cluster administrator for guidance on what storage classes are available to use on your specific Arvados instance.\n\nNote that when changing the storage class of an existing collection, it does not take effect immediately, the blocks are asynchronously copied to the new storage class and removed from the old one.  The collection field \"storage_classes_confirmed\" is updated to reflect when data blocks have been successfully copied.\n\nh3. arv-put\n\nYou may specify one or more desired storage classes for a collection uploaded using @arv-put@:\n\n<pre>\n$ arv-put --storage-classes=hot,archival myfile.txt\n</pre>\n\nh3. arv-mount\n\nYou can ask @arv-mount@ to use specific storage classes when creating new collections:\n\n<pre>\n$ arv-mount --storage-classes=transient --mount-tmp=scratch keep\n</pre>\n\nh3. arvados-cwl-runner\n\nYou may specify the desired storage class for the intermediate and final output collections produced by @arvados-cwl-runner@ on the command line or using the \"arv:OutputStorageClass hint\":{{site.baseurl}}/user/cwl/cwl-extensions.html#OutputStorageClass .\n\n<pre>\n$ arvados-cwl-runner --intermediate-storage-classes=hot_storage --storage-classes=robust_storage myworkflow.cwl myinput.yml\n</pre>\n\nh3. arv command line\n\nYou may set the storage class on an existing collection by setting the \"storage_classes_desired\" field of a Collection.  For example, at the command line:\n\n<pre>\n$ arv collection update --uuid zzzzz-4zz18-dhhm0ay8k8cqkvg --collection '{\"storage_classes_desired\": [\"archival\"]}'\n</pre>\n\nBy setting \"storage_classes_desired\" to \"archival\", the blocks that make up the collection will be preferentially moved to keepstore volumes which are configured with the \"archival\" storage class.\n\nh3. Storage class notes\n\nCollection blocks will be in the cluster's configured default storage class(es) if not otherwise specified.\n\nAny user with write access to a collection may set any storage class on that collection.\n"
  },
  {
    "path": "doc/user/topics/workbench-migration.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Workbench 2 migration\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nBeginning in version 2.7, Arvados now defaults to a new web application, referred to as \"Workbench 2\".  This is a major step in the migration from the classic web application, referred to as \"Workbench 1\".  Workbench 1 should be considered deprecated and suppport for the Workbench 1 application will be dropped in a future Arvados release.\n\n!{width: 90%}{{ site.baseurl }}/images/wb2-example.png!\n\nWorkbench 2 is the new Workbench web application that replaces Workbench 1. Workbench 2 is being built based on user feedback, and has feature parity with Workbench 1.  Workbench 2 has a modern look and feel and offers many advanced features and performance enhancements over the previous Workbench application.\n\nSome major improvements of Workbench 2 include:\n\nh2. General\n\n* More responsive, only loads data needed for display\n* More familiar user interface, modeled on the file explorer of MacOS and Windows.\n* Advanced search capabilities\n\nh2. Project browsing\n\n* Expanded informational columns\n* Expanded filtering options\n* Right side informational panel providing details about selected item without navigating away from the project\n* Support for adding and querying user-supplied metadata properties on Projects\n\nh2. Collection browsing\n\n* Able to browse collections with millions of files\n* Support for adding and querying user-supplied metadata properties on Collections\n* Support for viewing past versions of a collection\n\nh2. User and Group management\n\n* Able to create user groups through the GUI\n* Able to add/view/remove members of user groups, and what permissions are shared with the group\n* Able to add/view/remove permissions shared with individual users\n"
  },
  {
    "path": "doc/user/tutorials/tutorial-keep-collection-lifecycle.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Trashing and untrashing data\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nCollections have a sophisticated data lifecycle, which is documented in the architecture guide at \"Collection lifecycle\":{{ site.baseurl }}/architecture/keep-data-lifecycle.html#collection_lifecycle.\n\nArvados supports trashing (deletion) of collections. For a period of time after a collection is trashed, it can be \"untrashed\". After that period, the collection is permanently deleted, though there may still be ways to recover the data, see \"Recovering data\":{{ site.baseurl }}/admin/keep-recovering-data.html in the admin guide for more details.\n\n# \"*Trashing (deleting) collections*\":#delete-collection\n# \"*Recovering trashed collections*\":#trash-recovery\n\n{% include 'tutorial_expectations' %}\n\nh2(#delete-collection). Trashing (deleting) collections\n\nA collection can be trashed using workbench or the arv command line tool.\n\nh3. Trashing a collection using Workbench\n\nTo trash a collection using Workbench, open the ︙ action menu for the collection, and select *Move to trash*. You can do this from the collection page directly, or from the project listing that contains the collection.\n\nh3. Trashing a collection using arv command line tool\n\n<pre>\narv collection delete --uuid=zzzzz-4zz18-xxxxxxxxxxxxxxx\n</pre>\n\nh2(#trash-recovery). Recovering trashed collections\n\nA collection can be untrashed / recovered using workbench or the arv command line tool.\n\nh3. Untrashing a collection using Workbench\n\nTo untrash a collection using Workbench, open the *Trash* page from the left navigation menu. For each collection in this listing, you can press the *Restore* button on the far right to untrash it. You can also open a collection to review its contents. From that collection page, you can open the ︙ action menu and select *Restore* to untrash the collection.\n\n!{width: 80%}{{ site.baseurl }}/images/trash-buttons.png!\n\nh3. Untrashing a collection using arv command line tool\n\nYou can list the trashed collections using the list command.\n\n<pre>\narv collection list --include-trash=true --filters '[[\"is_trashed\", \"=\", \"true\"]]'\n</pre>\n\nYou can then untrash a particular collection using arv using it's uuid.\n\n<pre>\narv collection untrash --uuid=zzzzz-4zz18-xxxxxxxxxxxxxxx\n</pre>\n\nThe architecture section has a more detailed description of the \"data lifecycle\":{{ site.baseurl }}/architecture/keep-data-lifecycle.html  in Keep.\n"
  },
  {
    "path": "doc/user/tutorials/tutorial-keep-get.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Downloading data\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados Data collections can be downloaded using either the arv commands or using Workbench.\n\n# \"*Download using Workbench*\":#download-using-workbench\n# \"*Creating a special download URL for a collection*\":#download-shared-collection\n# \"*Download using command line tools*\":#download-using-arv\n\nh2(#download-using-workbench). Download using Workbench\n\nYou can also download Arvados data collections using the Workbench.\n\nWhen you visit a project in Workbench (for instance, the <i class=\"fa fa-fw fa-folder\"></i> *Home Projects* or any projects under it), the collections will show up on the Data tab, with \"_Data collection_\" in the *Type* column.\n\nClicking on a collection will bring you to its details page. There, the Files tab contains a file manager where you can navigate to or search for files, select them for actions, and download them.\n\nTo download a file, simply click on the file, or bring up the context menu using right-click or the triple-dot button on its row, and then select the menu item *Download*.\n\nh2(#download-shared-collection). Creating a special download URL for a collection\n\nTo share a collection with users that do not have an account on your Arvados cluster, locate the collection and then select *Share* in the the right-click menu. There, select the *SHARING URLS* tab.\n\nYou can then generate a new sharing URL using the <span class=\"btn btn-sm btn-primary\">CREATE SHARING URL</span> button, with the option to set an expiration time for the URL. You can then copy the URL to the clipboard for sharing with others. To revoke (that is, delete) a sharing URL, click on the cross icon beside it.\n\n<figure>!{width: 80%}{{ site.baseurl }}/images/sharing-collection-url.png!<figcaption>_The_ *SHARING URLS* _tab in the_ *Sharing settings* _dialog box, showing the created URL with an expiration time_</figcaption></figure>\n\nAny user with the sharing URL can download this collection by simply accessing this URL using browser. It will present a downloadable version of the collection as shown below.\n\n!{display: block;margin-left: 25px;margin-right: auto;border:1px solid lightgray;}{{ site.baseurl }}/images/download-shared-collection.png!\n\nWhen a collection is being shared by URL, in the *WITH USERS/GROUS* tab of *Sharing settings*, the following message will appear if *General access* is Private: _Although there aren't specific permissions set, this is publicly accessible via Sharing URL(s)._\n\n* *Note:* Sharing by URL is specific to collections. Projects or individual files cannot be shared in this way.\n\nh2(#download-using-arv). Download using command line tools\n\n{% include 'tutorial_expectations' %}\n\nYou can download Arvados data collections using the command line tools @arv-ls@ and @arv-get@.\n\nUse @arv-ls@ to view the contents of a collection:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-ls ae480c5099b81e17267b7445e35b4bc7+180</span>\n./HWI-ST1027_129_D0THKACXX.1_1.fastq\n./HWI-ST1027_129_D0THKACXX.1_2.fastq\n</code></pre>\n\nUse <code>-s</code> to print file sizes, in kilobytes, rounded up:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-ls -s ae480c5099b81e17267b7445e35b4bc7+180</span>\n     12258 ./HWI-ST1027_129_D0THKACXX.1_1.fastq\n     12258 ./HWI-ST1027_129_D0THKACXX.1_2.fastq\n</code></pre>\n</notextile>\n\nUse @arv-get@ to download the contents of a collection and place it in the directory specified in the second argument (in this example, @.@ for the current directory):\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">$ arv-get ae480c5099b81e17267b7445e35b4bc7+180/ .</span>\n23 MiB / 23 MiB 100.0%\n~$ <span class=\"userinput\">ls</span>\nHWI-ST1027_129_D0THKACXX.1_1.fastq  HWI-ST1027_129_D0THKACXX.1_2.fastq\n</code></pre>\n</notextile>\n\nYou can also download individual files:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-get ae480c5099b81e17267b7445e35b4bc7+180/HWI-ST1027_129_D0THKACXX.1_1.fastq .</span>\n11 MiB / 11 MiB 100.0%\n</code></pre>\n</notextile>\n\nh3. Federated downloads\n\nIf your cluster is \"configured to be part of a federation\":{{site.baseurl}}/admin/federation.html you can also download collections hosted on other clusters (with appropriate permissions).\n\nIf you request a collection by portable data hash, it will first search the home cluster, then search federated clusters.\n\nYou may also request a collection by UUID.  In this case, it will contact the cluster named in the UUID prefix (in this example, @zzzzz@).\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-get zzzzz-4zz18-fw6dnjxtkvzdewt/ .</span>\n</code></pre>\n</notextile>\n"
  },
  {
    "path": "doc/user/tutorials/tutorial-keep-mount-gnu-linux.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Access Keep as a GNU/Linux filesystem\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nGNU/Linux users can use @arv-mount@ or Gnome to mount Keep as a file system in order to access Arvados collections using traditional filesystem tools.\n\n{% include 'tutorial_expectations' %}\n\n# \"*Mounting at the command line with arv-mount*\":#arv-mount\n# \"*Mounting in Gnome File manager*\":#gnome\n\nh2(#arv-mount). Arv-mount\n\n@arv-mount@ provides a file system view of Arvados Keep using File System in Userspace (FUSE).  You can browse, open and read Keep entries as if they are regular files, and existing tools can access files in Keep.  Data is streamed on demand.  It is not necessary to download an entire file or collection to start processing.\n\nThe default mode permits browsing any collection in Arvados as a subdirectory under the mount directory.  To avoid having to fetch a potentially large list of all collections, collection directories only come into existence when explicitly accessed by UUID or portable data hash. For instance, a collection may be found by its content hash in the @keep/by_id@ directory.\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">mkdir -p keep</span>\n~$ <span class=\"userinput\">arv-mount keep</span>\n~$ <span class=\"userinput\">cd keep/by_id/c1bad4b39ca5a924e481008009d94e32+210</span>\n~/keep/by_id/c1bad4b39ca5a924e481008009d94e32+210$ <span class=\"userinput\">ls</span>\nvar-GS000016015-ASM.tsv.bz2\n~/keep/by_id/c1bad4b39ca5a924e481008009d94e32+210$ <span class=\"userinput\">md5sum var-GS000016015-ASM.tsv.bz2</span>\n44b8ae3fde7a8a88d2f7ebd237625b4f  var-GS000016015-ASM.tsv.bz2\n~/keep/by_id/c1bad4b39ca5a924e481008009d94e32+210$ <span class=\"userinput\">cd ../..</span>\n~$ <span class=\"userinput\">fusermount -u keep</span>\n</code></pre>\n</notextile>\n\nThe last line unmounts Keep.  Subdirectories will no longer be accessible.\n\nIn the top level directory of each collection, arv-mount provides a special file called @.arvados#collection@ that contains a JSON-formatted API record for the collection. This can be used to determine the collection's @portable_data_hash@, @uuid@, etc. This file does not show up in @ls@ or @ls -a@.\n\nh3. Modifying files and directories in Keep\n\nBy default, all files in the Keep mount are read only.  However, @arv-mount --read-write@ enables you to perform the following operations using normal Unix command line tools (@touch@, @mv@, @rm@, @mkdir@, @rmdir@) and your own programs using standard POSIX file system APIs:\n\n* Create, update, rename and delete individual files within collections\n* Create and delete subdirectories inside collections\n* Move files and directories within and between collections\n* Create and delete collections within a project (using @mkdir@ and @rmdir@ in a project directory)\n\nNot supported:\n\n* Symlinks, hard links\n* Changing permissions\n* Extended attributes\n* Moving a subdirectory of a collection into a project, or moving a collection from a project into another collection\n\nIf multiple clients (separate instances of arv-mount or other arvados applications) modify the same file in the same collection within a short time interval, this may result in a conflict.  In this case, the most recent commit wins, and the \"loser\" will be renamed to a conflict file in the form @name~YYYYMMDD-HHMMSS~conflict~@.\n\nPlease note this feature is in beta testing.  In particular, the conflict mechanism is itself currently subject to race conditions with potential for data loss when a collection is being modified simultaneously by multiple clients.  This issue will be resolved in future development.\n\nh2(#gnome). Mounting in Gnome File manager\n\nAs an alternative to @arv-mount@ you can also access the WebDAV mount through the Gnome File manager.\n\n# Open \"Files\"\n# On the left sidebar, click on \"Other Locations\"\n# At the bottom of the window, enter @davs://collections.ClusterID.example.com/@  When prompted for credentials, enter username \"arvados\" and a valid Arvados token in the @Password@ field.\n"
  },
  {
    "path": "doc/user/tutorials/tutorial-keep-mount-os-x.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Access Keep from macOS Finder\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nUsers of macOS can browse Keep read-only via WebDAV. Specific collections can also be accessed read-write via WebDAV.\n\nh3. Browsing Keep in Finder (read-only)\n\nIn Finder, use \"Connect to Server...\" under the \"Go\" menu and enter @https://collections.ClusterID.example.com/@ in popup dialog. When prompted for credentials, enter username \"arvados\" and paste a valid Arvados token for the @Password@ field.\n\nThis mount is read-only. Write support for the @/users/@ directory is planned for a future release.\n\nh3. Accessing a specific collection in Keep (read-write)\n\nIn Finder, use \"Connect to Server...\" under the \"Go\" menu and enter @https://collections.ClusterID.example.com/c=your-collection-uuid@ in popup dialog. When prompted for credentials, put a valid Arvados token in the @Password@ field and anything in the Name field (it will be ignored by Arvados).\n\nThis collection is now accessible read/write.\n"
  },
  {
    "path": "doc/user/tutorials/tutorial-keep-mount-windows.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Access Keep from Windows File Explorer\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nWindows users can browse Keep read-only via WebDAV. Specific collections can also be accessed read-write via WebDAV.\n\nh3. Browsing Keep in File Explorer (read-only)\n\nUse the 'Map network drive' functionality, and enter @https://collections.ClusterID.example.com/@ in the Folder field. When prompted for credentials, you can fill in an arbitrary string for @Username@, it is ignored by Arvados. Windows will not accept an empty @Username@. Put a valid Arvados token in the @Password@ field.\n\nThis mount is read-only. Write support for the @/users/@ directory is planned for a future release.\n\nh3. Accessing a specific collection in Keep (read-write)\n\nUse the 'Map network drive' functionality, and enter @https://collections.ClusterID.example.com/c=your-collection-uuid@ in the Folder field. When prompted for credentials, you can fill in an arbitrary string for @Username@, it is ignored by Arvados. Windows will not accept an empty @Username@. Put a valid token in the @Password@ field.\n\nThis collection is now accessible read/write.\n"
  },
  {
    "path": "doc/user/tutorials/tutorial-keep.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Uploading data\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nArvados Data collections can be uploaded using either Workbench or the @arv-put@ command line tool.\n\n# \"*Upload using Workbench*\":#upload-using-workbench\n# \"*Upload using command line tool*\":#upload-using-command\n\nh2(#upload-using-workbench). Upload using Workbench\n\nTo upload using Workbench, first identify the project to upload the files into. This is done by browsing your projects in the navigation menu on the left, or to search for the project using the search field on the top.\n\nHaving navigated to the project, click on the <span class=\"btn btn-sm btn-primary\">+ NEW</span> button in the top-left corner. In the pop-up menu, select the item *New collection*.\n\n<figure> !{width: 80%;}{{ site.baseurl }}/images/add-new-collection-wb2.png! <figcaption> _Creating a new collection in the project \"WGS Processing Tutorial\"_ </figcaption></figure>\n\nIn the dialog box that follows, you will be prompted to create a new collection in your chosen project. Here, the *Collection Name* field is required. After entering the name for this new collection (and optionally other fields), you have the choice to create it with new file updates -- by drag-and-drop into the *Files* area or with the traditional file-upload dialog opened by your browser.\n\n<figure>!{width: 100%;}{{ site.baseurl }}/images/new-collection-modal-wb2.png!<figcaption>_Providing the new collection with a name (required). Optionally, you can upload files in this step._</figcaption></figure>\n\nYou can then click on the <span class=\"btn btn-sm btn-primary\">CREATE A COLLECTION</span> button and proceed to the newly-created collection's page. If you don't upload any data when creating the collection, the new collection will be empty, and you can upload files into it later.\n\n<figure>!{width: 100%;}{{ site.baseurl }}/images/newly-created-collection-empty-wb2.png!<figcaption>_The newly-created collection without any files yet._</figcaption></figure>\n\nIn the <span class=\"btn btn-sm btn-primary\">FILES</span> tab, there is a button labeled <span class=\"btn btn-sm btn-primary\">UPLOAD DATA</span>. Click on it, and you will be prompted to upload files by drag-and-drop or the file-selection dialog opened by your browser.\n\nThe files you choose to upload will then be displayed, and you can review them before clicking on the <span class=\"btn btn-sm btn-primary\">UPLOAD DATA</span> button to initiate the actual file transfer.\n\n<figure>!{width: 100%;}{{ site.baseurl }}/images/upload-data-prompt-with-files-wb2.png!<figcaption>_Selecting the files to upload_</figcaption></figure>\n\nOnce the file upload completes, you will be notified by a message, and the files will appear under the <span class=\"btn btn-sm btn-primary\">FILES</span> tab shortly.\n\n<figure>!{width: 100%;}{{ site.baseurl }}/images/upload-data-progress-wb2.png!<figcaption>_Upload status being displayed, with the uploaded files in the files tab_</figcaption></figure>\n\n*Note:* If you leave the collection page during the upload, the upload process will be aborted and you will need to upload the files again.\n\n*Note:* You can also use the <span class=\"btn btn-sm btn-primary\">UPLOAD DATA</span> button to add additional files to an existing collection.\n\nnotextile. <div class=\"spaced-out\">\n\n\nh2(#upload-using-command). Upload using command line tool\n\n{% include 'tutorial_expectations' %}\n\nTo upload a file to Keep using @arv-put@:\n<notextile>\n<pre><code>~$ <span class=\"userinput\">arv-put var-GS000016015-ASM.tsv.bz2</span>\n216M / 216M 100.0%\nCollection saved as ...\nzzzzz-4zz18-xxxxxxxxxxxxxxx\n</code></pre>\n</notextile>\n\n\nThe output value @zzzzz-4zz18-xxxxxxxxxxxxxxx@ is the uuid of the Arvados collection created.\n\nNote: The file used in this example is a freely available TSV file containing variant annotations from the \"Personal Genome Project (PGP)\":http://www.pgp-hms.org participant \"hu599905\":https://my.pgp-hms.org/profile/hu599905), downloadable \"here\":https://warehouse.pgp-hms.org/warehouse/f815ec01d5d2f11cb12874ab2ed50daa+234+K@ant/var-GS000016015-ASM.tsv.bz2. Alternatively, you can replace @var-GS000016015-ASM.tsv.bz2@ with the name of any file you have locally, or you could get the TSV file by \"downloading it from Keep.\":{{site.baseurl}}/user/tutorials/tutorial-keep-get.html\n\n<notextile><a name=\"dir\"></a></notextile>It is also possible to upload an entire directory with @arv-put@:\n\n<notextile>\n<pre><code>~$ <span class=\"userinput\">mkdir tmp</span>\n~$ <span class=\"userinput\">echo \"hello alice\" > tmp/alice.txt</span>\n~$ <span class=\"userinput\">echo \"hello bob\" > tmp/bob.txt</span>\n~$ <span class=\"userinput\">echo \"hello carol\" > tmp/carol.txt</span>\n~$ <span class=\"userinput\">arv-put tmp</span>\n0M / 0M 100.0%\nCollection saved as ...\nzzzzz-4zz18-yyyyyyyyyyyyyyy\n</code></pre>\n</notextile>\n\nIn both examples, the @arv-put@ command created a collection. The first collection contains the single uploaded file. The second collection contains the entire uploaded directory.\n\n@arv-put@ accepts quite a few optional command line arguments, which are described on the \"arv subcommands\":{{site.baseurl}}/sdk/cli/subcommands.html#arv-keep-put page.\n\nh3. Locate your collection in Workbench\n\nVisit the Workbench and go to your <i class=\"fa fa-fw fa-folder\"></i> *Home Projects*.  Your newly uploaded collection should appear in the \"Data\" tab.  The collection name printed by @arv-put@ will appear under the *Name* column, and its *Type* will be \"_Data collection_\".\n\nClick on the collection's name will lead you to its Workbench page, where you can see the collection's contents and download individual files.\n\nTo move the collection to a different project, locate the collection and right-click on it. This will bring up a context menu with *Move to*. Click on this item, and you will see a dialog box where you can select the target project to move this collection to, by search or navigation. This menu is also available in the toolbar after clicking the checkbox next to the collection name in the \"Data\" tab.\n\n<figure>!{width: 80%;}{{ site.baseurl }}/images/workbench-move-wb2.png!<figcaption> _Context menu with the_ *Move to* _item_ </figcaption></figure>\n\nnotextile. </div>\n"
  },
  {
    "path": "doc/user/tutorials/tutorial-projects.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Organizing data\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nh2. Projects and Collections\n\nIn Arvados, files are organized into \"collections\", and collections are organized by \"project\".\n\nOnly collections can contain files.  A collection is a distinct database record identified by a universal unique id (UUID).  Arvados maintains a history of changes to the collection.  Every collection version has an immutable identifier called a \"portable data hash\" which is computed from the file content of the collection.  This can be used to refer to the immutable file content independently of the collection UUID.  If two collections have the same portable data hash, they have the same file content.\n\nProjects contain collections, workflows and workflow runs, and other projects (subprojects).  Both collections and projects can have user-provided metadata.\n\nProjects are the main unit of organization and sharing.  See \"Sharing collections\":#sharing-projects for information about sharing projects and collections with other users.\n\nh2(#creating-projects). Creating a project\n\nWhen you have navigated to any existing project, clicking on <span class=\"btn btn-sm btn-primary\">+ NEW</span> <span class=\"rarr\">&rarr;</span> <i class=\"fa fa-fw fa-folder\"></i> *New project* will prompt you to create a new subproject under the current project.\n\nIf you're at the top-level <i class=\"fa fa-fw fa-folder\"></i> *Home Projects*, a new top-level project will be created.\n\nAlternatively, you can right-click on the link to an existing project to bring up a context menu, and select *New project*.\n\nh2(#sharing-projects). Sharing projects\n\nProjects can be shared with other users on the Arvados cluster.  First, locate the collection or project using any available means (for instance, by manually navigating in the Workbench, or using the Search bar), then right-click on its link in a listing. You will find the menu item *Share*, which opens the dialog box *Sharing settings*.\n\nTo share with other Arvados users, select the *WITH USERS/GROUPS* tab in the *Sharing settings* dialog box. Under *Add people and groups*, in the input field you can search for the user or group names. Select one you will be sharing with, choose the *Authorization* level (Read/Write/Manage) in the drop-down menu, and click on the plus sign (+) on the right. This can be repeated for other users or groups, each with their own *Authorization* level. The selected ones will appear under *People with access*. You can revisit the *Sharing settings* dialog box to modify the users or their access levels at a later time.\n\nThe *General access* drop-down menu controls the default sharing setting, with the following choices:\n\n* *Private*: This is the initial state when no users or groups have been selected for sharing. At any time, by setting *General access* to private, the current sharing setting will be cleared, and any users or groups formerly with access will lose that access.\n* *Public*: This means the list of *People with access* will include _Anonymous users_, even if they are not users of the current cluster. You can further set their access level in the *Authorization* level.\n* *All users*: This means sharing with other users who are logged in on the current cluster.\n* *Shared*: When you choose to share with specific people or groups, *General access* will be set to *Shared*. From this state, you can further specify the default sharing settings for *Public* and *All users*.\n\nh2(#descriptions). Descriptions and metadata\n\nYou can add descriptions to projects, collections, workflows and workflow runs when you create them, or later using the \"Edit\" dialog.  Descriptions are included when performing full-text search on records.\n\nDescriptions are formatted using HTML.  Workbench provides a rich text editor for editing HTML descriptions.\n\n{% include 'html_tags' %}\n\nYou can also add key:value metadata to projects, collections, and workflow runs when you create them, or later by using the \"Edit\" dialog.  Properties can be queried in the advanced search.  For more information, see \"metadata properties\":{{site.baseurl}}/api/properties.html ."
  },
  {
    "path": "doc/user/tutorials/tutorial-workflow-workbench.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Running a workflow using Workbench\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\nA \"workflow\" (sometimes called a \"pipeline\" in other systems) is a sequence of steps that apply various programs or tools to transform input data to output data.  Workflows are the principal means of performing computation with Arvados.  This tutorial demonstrates how to run a single-stage workflow to take a small data set of paired-end reads from a sample \"exome\":https://en.wikipedia.org/wiki/Exome in \"FASTQ\":https://en.wikipedia.org/wiki/FASTQ_format format and align them to \"Chromosome 19\":https://en.wikipedia.org/wiki/Chromosome_19_%28human%29 using the \"bwa mem\":http://bio-bwa.sourceforge.net/ tool, producing a \"Sequence Alignment/Map (SAM)\":https://samtools.github.io/ file.  This tutorial will introduce the following Arvados features:\n\n<div>\n* How to create a new process from an existing workflow.\n* How to browse and select input data for the workflow and submit the process to run on the Arvados cluster.\n* How to access your process results.\n</div>\n\nh3. Steps\n\nnotextile. <div class=\"spaced-out\">\n\n# Click on the <span class=\"btn btn-sm btn-primary\">+ NEW</span> button in the top-left.\n# In the pop-up menu, select *<i class=\"fa fa-fw fa-gear\"></i> Run a workflow*.  This will open the _Run Process_ panel in the Workbench.\n# In the search field under *Choose a workflow*, type in _bwa-mem.cwl_.\n# Select *bwa-mem.cwl* in the search results, and click the <span class=\"btn btn-sm btn-primary\" >NEXT</span> button.  This will create a new process in one of your Home Projects and will open it. To specify the project for the workflow run, click on the input line below \"*Project where the workflow will run*\", and in the pop-up dialog box, choose a project under your Home Projects.\n# You can now supply the inputs for the process. Please note that all required inputs are populated with default values and you can change them if you prefer.\n# For example, let's see how to set read pair *read_p1* and *read_p2* for this workflow. Click on the input line under the *read_p1* header.  This will open a dialog box titled *Choose a file*.\n# Enter the search terms _user guide resources_ into the *Search for a Project* field on the left.  You will see one or more collections in the search results appearing below and, among them, the one with the exact title *<i class=\"fa fa-fw fa-folder\"></i> User guide resources*. Your goal is to locate the file _HWI-ST1027_129_D0THKACXX.1_1.fastq_.\n# You may either locate the file manually, by clicking on the triangles ▶ to the left of each item to expand them (projects and the collections under it) until you find the file, or by filtering the search results using the *Filter Collections list in Projects* field, for example, with a term like \"_HWI-ST1027_\".\n# Either way, you will find the file <i class=\"fa fa-fw fa-file\"></i> *HWI-ST1027_129_D0THKACXX.1_1.fastq* in the search results. Click on it, and then the <span class=\"btn btn-sm btn-primary\">OK</span> button in the bottom-right.\n# Repeat the steps 7--9 to set the value for *read_p2*, except selecting the file ending in \"_2\"\n# Scroll to the bottom of the \"Inputs\" panel and click on the <span class=\"btn btn-sm btn-primary\" >RUN WORKFLOW</span> button.  The page updates to show you that the process has been queued to run on the Arvados cluster.\n# Once the process starts running, you can track the progress by watching the log messages from the component(s) (scroll down to the *Logs* panel).  This page refreshes automatically, and you can also click on the <span class=\"btn btn-sm btn-primary\">REFRESH</span> button on the top of the page. You will see a <span class=\"label label-success\">Completed</span> label when the process completes successfully.\n# The output of the workflow can be found by following the link \"Output from bwa-mem.cwl\" under the heading *Output collection* in the main or <span class=\"btn btn-sm btn-primary\">DETAILS</span> panel, or in the <span class=\"btn btn-sm btn-primary\">OUTPUTS</span> panel further down. Click on the *Output from bwa-mem.cwl* link to see the detailed results from the workflow run.  This will lead you to a page that lists the metadata of the outputs, and you'll see the output SAM file there, in the <span class=\"btn btn-sm btn-primary\">FILES</span> panel.\n# To download your results, simply click on the SAM file name.\n\nnotextile. </div>\n"
  },
  {
    "path": "doc/user/tutorials/wgs-tutorial.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"Processing Whole Genome Sequences\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n<div style=\"max-width: 600px; margin-left: 30px\">\n\nh2. 1. A Brief Introduction to Arvados\n\nArvados is an open source platform for managing, processing, and sharing genomic and other large scientific and biomedical data.   Arvados helps bioinformaticians run and scale compute-intensive workflows.  By running their workflows in Arvados, they can scale their calculations dynamically in the cloud, track methods and datasets, and easily re-run workflow steps or whole workflows when necessary. This tutorial walkthrough shows examples of running a “real-world” workflow and how to navigate and use the Arvados working environment.\n\nWhen you log into your account on the Arvados playground (\"https://playground.arvados.org\":https://playground.arvados.org), you see the Arvados Workbench which is the web application that allows users to interactively access Arvados functionality.  For this tutorial, we will largely focus on using the Arvados Workbench since that is an easy way to get started using Arvados.  You can also access Arvados via your command line and/or using the available REST API and SDKs.   If you are interested, this tutorial walkthrough will have an optional component that will cover using the command line.\n\nBy using the Arvados Workbench or using the command line, you can submit your workflows to run on your Arvados cluster.  An Arvados cluster can be hosted in the cloud as well as on premise and on hybrid clusters. The Arvados playground cluster is currently hosted in the cloud.\n\nYou can also use the workbench or command line to access data in the Arvados storage system called Keep which is designed for managing and storing large collections of files on your Arvados cluster. The running of workflows is managed by Crunch. Crunch is designed to maintain data provenance and workflow reproducibility. Crunch automatically tracks data inputs and outputs through Keep and executes workflow processes in Docker containers. In a cloud environment, Crunch optimizes costs by scaling compute on demand.\n\n_Ways to Learn More About Arvados_\n* To learn more in general about Arvados, please visit the Arvados website here: \"https://arvados.org/\":https://arvados.org/\n* For a deeper dive into Arvados, the Arvados documentation can be found here: \"https://doc.arvados.org/\":https://doc.arvados.org/\n* For help on Arvados, visit the Gitter channel here: \"https://gitter.im/arvados/community\":https://gitter.im/arvados/community\n\n\nh2. 2. A Brief Introduction to the Whole Genome Sequencing (WGS) Processing Tutorial\n\nThe workflow used in this tutorial walkthrough serves as a “real-world” workflow example that takes in WGS data (paired FASTQs) and returns GVCFs and accompanying variant reports.  In this walkthrough, we will be processing approximately 10 public genomes made available by the Personal Genome Project.  This set of data is from the PGP-UK (\"https://www.personalgenomes.org.uk/\":https://www.personalgenomes.org.uk/).\n\nThe overall steps in the workflow include:\n* Check of FASTQ quality using FastQC (\"https://www.bioinformatics.babraham.ac.uk/projects/fastqc/\":https://www.bioinformatics.babraham.ac.uk/projects/fastqc/)\n* Local alignment using BWA-MEM (\"http://bio-bwa.sourceforge.net/bwa.shtml\":http://bio-bwa.sourceforge.net/bwa.shtml)\n* Variant calling in parallel using GATK Haplotype Caller (\"https://gatk.broadinstitute.org/hc/en-us\":https://gatk.broadinstitute.org/hc/en-us)\n* Generation of an HTML report comparing variants against ClinVar archive (\"https://www.ncbi.nlm.nih.gov/clinvar/\":https://www.ncbi.nlm.nih.gov/clinvar/)\n\nThe workflow is written in \"Common Workflow Language\":https://commonwl.org (CWL), the primary way to develop and run workflows for Arvados.\n\nBelow are diagrams of the main workflow which runs the processing across multiple sets of fastq and the main subworkflow (run multiple times in parallel by the main workflow) which processes a single set of FASTQs.  This main subworkflow also calls other additional subworkflows including subworkflows that perform variant calling using GATK in parallel by regions and generate the ClinVar HTML variant report.  These CWL diagrams (generated using \"CWL viewer\":https://view.commonwl.org) will give you a basic idea of the flow, input/outputs and workflow steps involved in the tutorial example.  However, if you aren’t used to looking at CWL workflow diagrams and/or aren’t particularly interested in this level of detail, do not worry.  You will not need to know these particulars to run the workflow.\n\n<figure> !{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image2.png!\n<figcaption> _*Figure 1*:  Main CWL Workflow for WGS Processing Tutorial.  This runs the same WGS subworkflow over multiple pairs FASTQs files._ </figcaption> </figure>\n\n<figure> !{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image3.png!\n<figcaption> _*Figure 2*:  Main subworkflow for the WGS Processing Tutorial.  This subworkflow does alignment, deduplication, variant calling and reporting._ </figcaption> </figure>\n\n_Ways to Learn More About CWL_\n\n* The CWL website has lots of good content including the CWL User Guide: \"https://www.commonwl.org/\":https://www.commonwl.org/\n* Commonly Asked Questions and Answers can be found in the Discourse Group, here: \"https://cwl.discourse.group/\":https://cwl.discourse.group/\n* For help on CWL, visit the Gitter channel here: \"https://gitter.im/common-workflow-language/common-workflow-language\":https://gitter.im/common-workflow-language/common-workflow-language\n* Repository of CWL CommandLineTool descriptions for commons tools in bioinformatics:\n\"https://github.com/common-workflow-library/bio-cwl-tools/\":https://github.com/common-workflow-library/bio-cwl-tools/\n\n\nh2. 3. Setting Up to Run the WGS Processing Workflow\n\nLet’s get a little familiar with the Arvados Workbench while also setting up to run the WGS processing tutorial workflow.  Logging into the workbench will present you with the front page. This gives a summary of your projects in your Arvados instance (i.e. the Arvados Playground) as well as a left hand side navigation bar, top search bar, and help, profile settings, and notifications on the top right.  The front page will only give you information about projects and activities that you have permissions to view and/or access.  Other users' private or restricted projects and activities will not be visible by design.\n\nh3. 3a. Setting up a New Project\n\nProjects in Arvados help you organize and track your work - and can contain data, workflow code, details about workflow runs, and results.  Let’s begin by setting up a new project for the work you will be doing in this walkthrough.\n\nTo create a new project, go to the Projects dropdown menu and select the \"+NEW\" button, then select “New project”.\n\n<figure> !{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image4.png!\n<figcaption> _*Figure 3*:  Adding a new project using Arvados Workbench, select the \"+NEW\" button in the upper left-hand corner and click \"New project\"._ </figcaption> </figure>\n\nLet’s name your project “WGS Processing Tutorial”. You can also add a description of your project by typing in the **Description - optional** field. The universally unique identifier (UUID) of the project can be found in the URL, or by clicking the info button on the upper right-hand corner.\n\n<figure> !{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image6.png!\n<figcaption> _*Figure 4*:  Renaming new project using Arvados Workbench, enter the name in the \"Project Name\" box._ </figcaption> </figure>\n\n<figure> !{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image7.png!\n<figcaption> _*Figure 5*: The UUID of the project can be found in the overview tab, under \"UUID\" and copied using the copy to clipboard option, highlighted in yellow in this image for emphasis._ </figcaption> </figure>\n\nIf you choose to use another name for your project, just keep in mind when the project name is referenced in the walkthrough later on.\n\nh3. 3b. Working with Collections\n\nCollections in Arvados help organize and manage your data. You can upload your existing data into a collection or reuse data from one or more existing collections. Collections allow us to reorganize our files without duplicating or physically moving the data, making them very efficient to use even when working with terabytes of data.   Each collection has a universally unique identifier (collection UUID).  This is a constant for this collection, even if we add or remove files -- or rename the collection.  You use this if we want to to identify the most recent version of our collection to use in our workflows.\n\nArvados uses a content-addressable filesystem (i.e. Keep) where the addresses of files are derived from their contents.  A major benefit of this is that Arvados can then verify that when a dataset is retrieved it is the dataset you requested  and can track the exact datasets that were used for each of our previous calculations.  This is what allows you to be certain that we are always working with the data that you think you are using.  You use the portable data hash of a collection when you want to guarantee that you use the same version as input to your workflow.\n\n<figure> !{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image1.png!\n<figcaption> _*Figure 6*:  A collection in Arvados as viewed via the Arvados Workbench. You will find a panel that contains: the name of the collection (this is editable, if you have write access and click the \"Edit collection\" button), a description of the collection (also editable through the same way), the collection UUID, the portable data hash, content size, and some other information like version number._ </figcaption> </figure>\n\nLet’s start working with collections by copying the existing collection that stores the FASTQ data being processed into our new “WGS Processing Tutorial” project.\n\nFirst, you must find the collection you are interested in copying over to your project.  There are several ways to search for a collection: by collection name, by UUID or by portable data hash.  In this case, let’s search for our collection by name.\n\nIn this case it is called “PGP UK FASTQs” and by searching for it in the “Search” box.  It will come up in the search results and you can navigate to it by clicking on the name.  You would do similarly if you would want to search by UUID or portable data hash.\n\nNow that you have found the collection of FASTQs you want to copy to your project, you can simply click the \"Make a copy\" button in the toolbar and select your new project to copy the collection there.  You can rename your collection whatever you wish, or use the default name on copy and add whatever description you would like.\n\nWe want to do the same thing for the other inputs to our WGS workflow. Similar to the “PGP UK FASTQs (ten genomes)” collection there is a collection of inputs entitled “WGS Processing reference data” and that collection can be copied over in a similar fashion.\n\nNow that we are a bit more familiar with the Arvados Workbench, projects and collections.  Let’s move onto running a workflow.\n\nh2. 4. Running the WGS Processing Workflow\n\nIn this section, we will be discussing three ways to run the tutorial workflow using Arvados.  We will start using the easiest way and then progress to the more involved ways to run a workflow via the command line which will allow you more control over your inputs, workflow parameters and setup.  Feel free to end your walkthrough after the first way or to pick and choose the ways that appeal the most to you, fit your experience and/or preferred way of working.\n\nh3. 4a. Interactively Running a Workflow Using Workbench\n\nWorkflows can be registered in Arvados. Registration allows you to share a workflow with other Arvados users, and let’s them run the workflow by clicking the  \"+NEW\" button and selecting \"Run a workflow\" on the Workbench Dashboard or on the command line by specifying the workflow UUID.  Default values can be specified for workflow inputs.\n\nWe have already previously registered the WGS workflow and set default input values for this set of the walkthrough.\n\nLet’s find the registered WGS Processing Workflow and run it interactively in our newly created project.\n\n# To find the registered workflow, in the left-hand navigation bar, select \"Public Favorites\". That listing will include the \"WGS Processing Workflow\" project. Open that project, and it will include the workflow \"WGS processing workflow scattered over samples\". Open that workflow.\n# Once you have found the registered workflow, you can run it your project by using the \"Run Workflow\" button in the toolbar and selecting your project (\"WGS Processing Tutorial\") that you set up in Section 3a. Click OK to continue.\n<figure> !{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image8.png!\n<figcaption> _*Figure 7*: This is the dialog that pops up when you press \"Run Workflow\". The project that you should select is highlighted in yellow._ </figcaption> </figure>\n# Default inputs to the registered workflow will be automatically filled in.  These inputs will work as-is, but can be edited.  You can verify that they match the portable data hashes of the collections you copied over to your new project.\n# Now, you can submit your workflow by selecting the \"Run Workflow\" button.\n\nCongratulations! You have now submitted your workflow to run. You can move to Section 5 to learn how to check the state of your submitted workflow and Section 6 to learn how to examine the results of and logs from your workflow.\n\nLet’s now say instead of running a registered workflow you want to run a workflow using the command line.  This is a completely optional step in the walkthrough.  To do this, you can specify cwl files to define the workflow you want to run and the yml files to specify the inputs to our workflow.  In this walkthrough we will give two options (4b) and (4c) for running the workflow on the commandline.  Option 4b uses a virtual machine provided by Arvados made accessible via a browser that requires no additional setup. Option 4c allows you to submit from your personal machine but you must install necessary packages and edit configurations to allow you to submit to the Arvados cluster.  Please choose whichever works best for you.\n\nh3. 4b. Optional: Setting up to Run a Workflow Using Command Line and an Arvados Virtual Machine\n\nArvados provides a virtual machine which has all the necessary client-side libraries installed to submit to your Arvados cluster using the command line.  Webshell gives you access to an Arvados Virtual Machine (VM) from your browser with no additional setup.  You can access webshell through the Arvados Workbench.  It is the easiest way to try out submitting a workflow to Arvados via the command line.\n\nNew users are playground are automatically given access to a shell account.\n\n_Note_: the shell accounts are created on an interval and it may take up to two minutes from your initial log in before the shell account is created.\n\nYou can follow the instructions here to access the machine using the browser (also known as using webshell):\n* \"Accessing an Arvados VM with Webshell\":{{ site.baseurl }}/user/getting_started/vm-login-with-webshell.html\n\nArvados also allows you to ssh into the shell machine and other hosted VMs instead of using the webshell capabilities. However this tutorial does not cover that option in-depth.  If you like to explore it on your own, you can allow the instructions in the documentation here:\n* \"Accessing an Arvados VM with SSH - Unix Environments\":{{ site.baseurl }}/user/getting_started/ssh-access-unix.html\n* \"Accessing an Arvados VM with SSH - Windows Environments\":{{ site.baseurl }}/user/getting_started/ssh-access-windows.html\n\nOnce you can use webshell, you can proceed to section *“4d. Running a Workflow Using the Command Line”* .\n\nh3. 4c. Optional: Setting up to Run a Workflow Using Command Line and Your Computer\n\nInstead of using a virtual machine provided by Arvados, you can install the necessary libraries and configure your computer to be able to submit to your Arvados cluster directly.  This is more of an advanced option and is for users who are comfortable installing software and libraries and configuring them on their machines.\n\nTo be able to submit workflows to the Arvados cluster, you will need to install the Python SDK on your machine.  Additional features can be made available by installing additional libraries, but this is the bare minimum you need to install to do this walkthrough tutorial.  You can follow the instructions in the Arvados documentment to install the Python SDK and set the appropriate configurations to access the Arvados Playground.\n\n* \"Installing the Arvados CWL Runner\":{{ site.baseurl }}/sdk/python/arvados-cwl-runner.html\n* \"Setting Configurations to Access the Arvados Playground\":{{ site.baseurl }}/user/reference/api-tokens.html\n\nOnce you have your machine set up to submit to the Arvados Playground Cluster, you can proceed to section *“4d. Running a Workflow Using the Command Line”* .\n\nh3. 4d. Optional: Running a Workflow Using the Command Line\n\nNow that we have access to a machine that can submit to the Arvados Playground, let’s download the relevant files containing the workflow description and inputs.\n\nFirst, we will\n* Clone the tutorial repository from GitHub (\"https://github.com/arvados/arvados-tutorial\":https://github.com/arvados/arvados-tutorial)\n* Change directories into the WGS tutorial folder\n\n<pre><code>$ git clone https://github.com/arvados/arvados-tutorial.git\n$ cd arvados-tutorial/WGS-processing\n</code></pre>\n\nRecall that CWL is a way to describe command line tools and connect them together to create workflows.  YML files can be used to specify input values into these individual command line tools or overarching workflows.\n\nThe tutorial directories are as follows:\n* @cwl@ - contains CWL descriptions of workflows and command line tools for the tutorial\n* @yml@ - contains YML files for inputs for the main workflow or to test subworkflows command line tools\n* @src@ - contains any source code necessary for the tutorial\n* @docker@ - contains dockerfiles necessary to re-create any needed docker images used in the tutorial\n\nBefore we run the WGS processing workflow, we want to adjust the inputs to match those in your new project.  The workflow that we want to submit is described by the file @/cwl/@ and the inputs are given by the file @/yml/@.  Note: while all the cwl files are needed to describe the full workflow only the single yml with the workflow inputs is needed to run the workflow. The additional yml files (in the helper folder) are provided for testing purposes or if one might want to test or run an underlying subworkflow or cwl for a command line tool by itself.\n\nSeveral of the inputs in the yml file point to original portable data hashes of collections that you make copies of in our New Project.  These still work because even though we made copies of the collections into our new project we haven’t changed the underlying contents. However, by changing this file is in general how you would alter the inputs in the accompanying yml file for a given workflow.\n\nThe command to submit to the Arvados Playground Cluster is @arvados-cwl-runner@.\nTo submit the WGS processing workflow , you need to run the following command replacing YOUR_PROJECT_UUID with the UUID of the new project you created for this tutorial.\n\n<pre><code>$ arvados-cwl-runner --no-wait --project-uuid YOUR_PROJECT_UUID ./cwl/wgs-processing-wf.cwl ./yml/wgs-processing-wf.yml\n</code></pre>\n\nThe @--no-wait@ option will submit the workflow to Arvados, print out the UUID of the job that was submitted to standard output, and exit instead of waiting until the job is finished to return the command prompt.\n\nThe @--project-uuid@ option specifies the project you want the workflow to run in, that means the outputs and log collections as well as the workflow process will be saved in that project\n\nIf the workflow submitted successfully, you should see the following at the end of the output to the screen\n\n<pre><code>INFO Final process status is success\n</code></pre>\n\nNow, you are ready to check the state of your submitted workflow.\n\nh2. 5.  Checking the State Of a Submitted Workflow\n\nOnce you have submitted your workflow, you can examine its state interactively using the Arvados Workbench.  If you aren’t already viewing your workflow process on the workbench, you can navigate there via your project. You will want to go back to your new project, using the projects pulldown menu (the list of projects on the left) or searching for the project name.  Note: You can mark a project as a favorite (if/when you have multiple projects) to make it easier to find on the pulldown menu by right-clicking on the project name on the project pulldown menu and selecting \"Add to favorites\".\n\nThe process you will be looking for will be titled “WGS processing workflow scattered over samples” (if you submitted via the command line/Workbench).\n\nOnce you have found your workflow, you can clearly see the state of the overall workflow and underlying steps below by their label.\n\nCommon states you will see are as follows:\n\n* \"Queued\" - Workflow or step is waiting to run\n* \"Running\" or \"Active\" - Workflow is currently running\n* \"Complete\" - Workflow or step has successfully completed\n* \"Failing\"- Workflow is running but has steps that have failed\n* \"Failed\"- Workflow or step did not complete successfully\n* \"Cancelled\" - Workflow or step was either manually cancelled or was cancelled by Arvados due to a system error\n\nSince Arvados Crunch reuses steps and workflows if possible, this workflow should run relatively quickly since this workflow has been run before and you have access to those previously run steps.  You may notice an initial period where the top level job shows the option of cancelling while the other steps are filled in with already finished steps.\n\nh2. 6.  Examining a Finished Workflow\n\nOnce your workflow has finished, you can see how long it took the workflow to run, see scaling information, and examine the logs and outputs.  Outputs will be only available for steps that have been successfully completed.   Outputs will be saved for every step in the workflow and be saved for the workflow itself.  Outputs are saved in collections.  You can access each collection by clicking on the link corresponding to the output.\n\n<figure> !{width: 100%}{{ site.baseurl }}/images/wgs-tutorial/image5.png!\n<figcaption> _*Figure 8*:  A completed workflow process in Arvados as viewed via the Arvados Workbench. You can click on the outputs link (highlighted in yellow) to view the outputs. Outputs of a workflow are stored in a collection._ </figcaption> </figure>\n\nIf we click on the outputs of the workflow, we will see the output collection. It contains the GVCF, tabix index file, and HTML ClinVar report for each analyzed sample (e.g., set of FASTQs). You can open a report in the browser by selecting it from the listing. You can also download a file to your local machine by right-clicking a file and selecting \"Download\" from the context menu, or from the action menu available from the far right of each listing.\n\nLogs for the main process can be found back on the workflow process page by selecting the \"LOGS\" tab. You can view the logs directly through that panel, or in the upper right-hand corner select the \"Outputs\" button.\n\nThere are several logs available, so here is a basic summary of what some of the more commonly used logs contain.  Let's first define a few terms that will help us understand what the logs are tracking.\n\nAs you may recall, Arvados Crunch manages the running of workflows. A _container request_ is an order sent to Arvados Crunch to perform some computational work. Crunch fulfils a request by either choosing a worker node to execute a container, or finding an identical/equivalent container that has already run. You can use _container request_ or _container_ to distinguish between a work order that is submitted to be run and a work order that is actually running or has been run. So our container request in this case is just the submitted workflow we sent to the Arvados cluster.\n\nA _node_ is a compute resource where Arvardos can schedule work.  In our case since the Arvados Playground is running on a cloud, our nodes are virtual machines.  @arvados-cwl-runner@ (acr) executes CWL workflows by submitting the individual parts to Arvados as containers and crunch-run is an internal component that runs on nodes and executes containers.\n\n* @stderr.txt@\n** Captures everything written to standard error by the programs run by the executing container\n* @node-info.txt@ and @node.json@\n** Contains information about the nodes that executed this container. For the Arvados Playground, this gives information about the virtual machine instance that ran the container.\nnode.json gives a high level overview about the instance such as name, price, and RAM while node-info.txt gives more detailed information about the virtual machine (e.g., CPU of each processor)\n* @crunch-run.txt@ and @crunchstat.txt@\n** @crunch-run.txt@ has info about how the container's execution environment was set up (e.g., time spent loading the docker image) and timing/results of copying output data to Keep (if applicable)\n** @crunchstat.txt@ has info about resource consumption (RAM, cpu, disk, network) by the container while it was running.\n* @usage_report.html@ can be viewed directly in the browser by clicking on it.  It provides a summary and chart of the resource consumption derived from the raw data in @crunchstat.txt@.  (Available starting with @arvados-cwl-runner@ 2.7.2).\n* @container.json@\n** Describes the container (unit of work to be done), contains CWL code, runtime constraints (RAM, vcpus) amongst other details\n* @arv-mount.txt@\n** Contains information using Arvados Keep on the node executing the container\n* @hoststat.txt@\n** Contains about resource consumption (RAM, cpu, disk, network) on the node while it was running\nThis is different from the log crunchstat.txt because it includes resource consumption of Arvados components that run on the node outside the container such as crunch-run and other processes related to the Keep file system.\n\nFor the highest level logs, the logs are tracking the container that ran the @arvados-cwl-runner@ process which you can think of as the “workflow runner”. It tracks which parts of the CWL workflow need to be run when, which have been run already, what order they need to be run, which can be run simultaneously, and so forth and then creates the necessary container requests.  Each step has its own logs related to containers running a CWL step of the workflow including a log of standard error that contains the standard error of the code run in that CWL step.  Those logs can be found by expanding the steps and clicking on the link to the log collection.\n\nLet’s take a peek at a few of these logs to get you more familiar with them.  First, we can look at the @stderr.txt@ of the highest level process.  Again recall this should be of the “workflow runner” @arvados-cwl-runner@ process.  You can click on the log to download it to your local machine, and when you look at the contents - you should see something like the following...\n\n<pre><code>2020-06-22T20:30:04.737703197Z INFO /usr/bin/arvados-cwl-runner 2.0.3, arvados-python-client 2.0.3, cwltool 1.0.20190831161204\n2020-06-22T20:30:04.743250012Z INFO Resolved '/var/lib/cwl/workflow.json#main' to 'file:///var/lib/cwl/workflow.json#main'\n2020-06-22T20:30:20.749884298Z INFO Using empty collection d41d8cd98f00b204e9800998ecf8427e+0\n[removing some log contents here for brevity]\n2020-06-22T20:30:35.629783939Z INFO Running inside container su92l-dz642-uaqhoebfh91zsfd\n2020-06-22T20:30:35.741778080Z INFO [workflow WGS processing workflow] start\n2020-06-22T20:30:35.741778080Z INFO [workflow WGS processing workflow] starting step getfastq\n2020-06-22T20:30:35.741778080Z INFO [step getfastq] start\n2020-06-22T20:30:36.085839313Z INFO [step getfastq] completed success\n2020-06-22T20:30:36.212789670Z INFO [workflow WGS processing workflow] starting step bwamem-gatk-report\n2020-06-22T20:30:36.213545871Z INFO [step bwamem-gatk-report] start\n2020-06-22T20:30:36.234224197Z INFO [workflow bwamem-gatk-report] start\n2020-06-22T20:30:36.234892498Z INFO [workflow bwamem-gatk-report] starting step fastqc\n2020-06-22T20:30:36.235154798Z INFO [step fastqc] start\n2020-06-22T20:30:36.237328201Z INFO Using empty collection d41d8cd98f00b204e9800998ecf8427e+0\n</code></pre>\n\nYou can see the output of all the work that arvados-cwl-runner does by managing the execution of the CWL workflow and all the underlying steps and subworkflows.\n\nNow, let’s explore the logs for a subprocess in the workflow. Start by navigating back to the workflow process page. The logs can be found by selecting the appropriate subprocess under the \"Subprocesses\" tab, and getting the logs in the way as mentioned above.  Let’s look at the log for the subprocess that does the alignment.  That subprocess is named bwamem-samtools-view.  We can see there are 10 of them because we are aligning 10 genomes.  Let’s look at *bwamem-samtools-view_2.*\n\nWe click on the subprocess to open it and then can go down to the \"Logs\" section to access the logs.  You may notice there are two sets of seemingly identical logs.  One listed under a directory named for a container and one up in the main directory.  This is done in case your subprocess had to be automatically re-run due to any issues and gives the logs of each re-run. The logs in the main directory are the logs for the successful run. In most cases this does not happen, you will just see one directory and one those logs will match the logs in the main directory.  Let’s open the logs labeled node-info.txt and stderr.txt.\n\n@node-info.txt@ gives us information about detailed information about the virtual machine this step was run on.  The tail end of the log should look like the following:\n\n<pre><code>Memory Information\nMemTotal:       64465820 kB\nMemFree:        61617620 kB\nMemAvailable:   62590172 kB\nBuffers:           15872 kB\nCached:          1493300 kB\nSwapCached:            0 kB\nActive:          1070868 kB\nInactive:        1314248 kB\nActive(anon):     873716 kB\nInactive(anon):     8444 kB\nActive(file):     197152 kB\nInactive(file):  1305804 kB\nUnevictable:           0 kB\nMlocked:               0 kB\nSwapTotal:             0 kB\nSwapFree:              0 kB\nDirty:               952 kB\nWriteback:             0 kB\nAnonPages:        874968 kB\nMapped:           115352 kB\nShmem:              8604 kB\nSlab:             251844 kB\nSReclaimable:     106580 kB\nSUnreclaim:       145264 kB\nKernelStack:        5584 kB\nPageTables:         3832 kB\nNFS_Unstable:          0 kB\nBounce:                0 kB\nWritebackTmp:          0 kB\nCommitLimit:    32232908 kB\nCommitted_AS:    2076668 kB\nVmallocTotal:   34359738367 kB\nVmallocUsed:           0 kB\nVmallocChunk:          0 kB\nPercpu:             5120 kB\nAnonHugePages:    743424 kB\nShmemHugePages:        0 kB\nShmemPmdMapped:        0 kB\nHugePages_Total:       0\nHugePages_Free:        0\nHugePages_Rsvd:        0\nHugePages_Surp:        0\nHugepagesize:       2048 kB\nHugetlb:               0 kB\nDirectMap4k:      155620 kB\nDirectMap2M:     6703104 kB\nDirectMap1G:    58720256 kB\n\nDisk Space\nFilesystem      1M-blocks  Used Available Use% Mounted on\n/dev/nvme1n1p1       7874  1678      5778  23% /\n/dev/mapper/tmp    381746  1496    380251   1% /tmp\n\nDisk INodes\nFilesystem         Inodes IUsed     IFree IUse% Mounted on\n/dev/nvme1n1p1     516096 42253    473843    9% /\n/dev/mapper/tmp 195549184 44418 195504766    1% /tmp\n</code></pre>\n\nWe can see all the details of the virtual machine used for this step, including that it has 16 cores and 64 GIB of RAM.\n\n@stderr.txt@ gives us everything written to standard error by the programs run in this step.  This step ran successfully so we don’t need to use this to debug our step currently. We are just taking a look for practice.\n\nThe tail end of our log should be similar to the following:\n\n<pre><code>2020-08-04T04:37:19.674225566Z [main] CMD: /bwa-0.7.17/bwa mem -M -t 16 -R @RG\\tID:sample\\tSM:sample\\tLB:sample\\tPL:ILLUMINA\\tPU:sample1 -c 250 /keep/18657d75efb4afd31a14bb204d073239+13611/GRCh38_no_alt_plus_hs38d1_analysis_set.fna /keep/a146a06222f9a66b7d141e078fc67660+376237/ERR2122554_1.fastq.gz /keep/a146a06222f9a66b7d141e078fc67660+376237/ERR2122554_2.fastq.gz\n2020-08-04T04:37:19.674225566Z [main] Real time: 35859.344 sec; CPU: 553120.701 sec\n</code></pre>\n\nThis is the command we ran to invoke bwa-mem, and the scaling information for running bwa-mem multi-threaded across 16 cores (15.4x).\n\nYou can also view outputs for the subprocess just like you do for the main workflow process. Back on the subprocess page for *bwamem-samtools-view_2*, the Outputs pane shows the output files of this specific subprocess. In this case, it is a single BAM file. This way, if your workflow succeeds but produces a surprising result, you can download and review the intermediate outputs to investigate further.\n\nWe hope that now that you have a bit more familiarity with the logs you can continue to use them to debug and optimize your own workflows as you move forward with using Arvados if your own work in the future.\n\nh2. 7.  Conclusion\n\nThank you for working through this walkthrough tutorial.  Hopefully this tutorial has helped you get a feel for working with Arvados. This tutorial just covered the basic capabilities of Arvados. There are many more capabilities to explore.  Please see the links featured at the end of Section 1 for ways to learn more about Arvados or get help while you are working with Arvados.\n\nIf you would like help setting up your own production instance of Arvados, please contact us at \"info@curii.com.\":mailto:info@curii.com\n\n</div>\n"
  },
  {
    "path": "doc/user/tutorials/writing-cwl-workflow.html.textile.liquid",
    "content": "---\nlayout: default\nnavsection: userguide\ntitle: \"CWL Resources\"\n...\n{% comment %}\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: CC-BY-SA-3.0\n{% endcomment %}\n\n{% include 'what_is_cwl' %}\n\n{% include 'tutorial_expectations' %}\n\nh2. Developing workflows\n\nFor an introduction and and detailed documentation about writing CWL, see the \"CWL User Guide\":https://www.commonwl.org/user_guide and the \"CWL Specification\":http://commonwl.org/v1.2 .\n\nSee \"Writing Portable High-Performance Workflows\":{{site.baseurl}}/user/cwl/cwl-style.html and \"Arvados CWL Extensions\":{{site.baseurl}}/user/cwl/cwl-extensions.html for additional information about using CWL on Arvados.\n\nSee \"Repositories of CWL Tools and Workflows\":https://www.commonwl.org/#Repositories_of_CWL_Tools_and_Workflows for links to repositories of existing tools for reuse.\n\nSee \"Software for working with CWL\":https://www.commonwl.org/#Software_for_working_with_CWL for links to software tools to help create CWL documents.\n\nh2. Using cwltool\n\nWhen developing a workflow, it is often helpful to run it on the local host to avoid the overhead of submitting to the cluster.  To execute a workflow only on the local host (without submitting jobs to an Arvados cluster) you can use the @cwltool@ command.  Note that when using @cwltool@ you must have the input data accessible on the local file system using either @arv-mount@ or @arv-get@ to fetch the data from Keep.\n\n<notextile>\n<pre><code>~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arv-get 2463fa9efeb75e099685528b3b9071e0+438/ .</span>\n156 MiB / 156 MiB 100.0%\n~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">arv-get ae480c5099b81e17267b7445e35b4bc7+180/ .</span>\n23 MiB / 23 MiB 100.0%\n~/arvados/doc/user/cwl/bwa-mem$ <span class=\"userinput\">cwltool bwa-mem-input.yml bwa-mem-input-local.yml</span>\ncwltool 1.0.20160629140624\n[job bwa-mem.cwl] /home/example/arvados/doc/user/cwl/bwa-mem$ docker \\\n    run \\\n    -i \\\n    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.ann:/var/lib/cwl/job979368791_bwa-mem/19.fasta.ann:ro \\\n    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq:/var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq:ro \\\n    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.sa:/var/lib/cwl/job979368791_bwa-mem/19.fasta.sa:ro \\\n    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.amb:/var/lib/cwl/job979368791_bwa-mem/19.fasta.amb:ro \\\n    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.pac:/var/lib/cwl/job979368791_bwa-mem/19.fasta.pac:ro \\\n    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq:/var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq:ro \\\n    --volume=/home/example/arvados/doc/user/cwl/bwa-mem/19.fasta.bwt:/var/lib/cwl/job979368791_bwa-mem/19.fasta.bwt:ro \\\n    --volume=/home/example/arvados/doc/user/cwl/bwa-mem:/var/spool/cwl:rw \\\n    --volume=/tmp/tmpgzyou9:/tmp:rw \\\n    --workdir=/var/spool/cwl \\\n    --read-only=true \\\n    --log-driver=none \\\n    --user=1001 \\\n    --rm \\\n    --env=TMPDIR=/tmp \\\n    --env=HOME=/var/spool/cwl \\\n    biodckr/bwa \\\n    bwa \\\n    mem \\\n    -t \\\n    1 \\\n    -R \\\n    '@RG\tID:arvados_tutorial\tPL:illumina\tSM:HWI-ST1027_129' \\\n    /var/lib/cwl/job979368791_bwa-mem/19.fasta \\\n    /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq \\\n    /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq > /home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.sam\n[M::bwa_idx_load_from_disk] read 0 ALT contigs\n[M::process] read 100000 sequences (10000000 bp)...\n[M::mem_pestat] # candidate unique pairs for (FF, FR, RF, RR): (0, 4745, 1, 0)\n[M::mem_pestat] skip orientation FF as there are not enough pairs\n[M::mem_pestat] analyzing insert size distribution for orientation FR...\n[M::mem_pestat] (25, 50, 75) percentile: (154, 181, 214)\n[M::mem_pestat] low and high boundaries for computing mean and std.dev: (34, 334)\n[M::mem_pestat] mean and std.dev: (185.63, 44.88)\n[M::mem_pestat] low and high boundaries for proper pairs: (1, 394)\n[M::mem_pestat] skip orientation RF as there are not enough pairs\n[M::mem_pestat] skip orientation RR as there are not enough pairs\n[M::mem_process_seqs] Processed 100000 reads in 9.848 CPU sec, 9.864 real sec\n[main] Version: 0.7.12-r1039\n[main] CMD: bwa mem -t 1 -R @RG\tID:arvados_tutorial\tPL:illumina\tSM:HWI-ST1027_129 /var/lib/cwl/job979368791_bwa-mem/19.fasta /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.fastq /var/lib/cwl/job979368791_bwa-mem/HWI-ST1027_129_D0THKACXX.1_2.fastq\n[main] Real time: 10.061 sec; CPU: 10.032 sec\nFinal process status is success\n{\n    \"aligned_sam\": {\n        \"size\": 30738959,\n        \"path\": \"/home/example/arvados/doc/user/cwl/bwa-mem/HWI-ST1027_129_D0THKACXX.1_1.sam\",\n        \"checksum\": \"sha1$0c668cca45fef02397bb5302880526d300ee4dac\",\n        \"class\": \"File\"\n    }\n}\n</code></pre>\n</notextile>\n\nIf you get the error @JavascriptException: Long-running script killed after 20 seconds.@ this may be due to the Dockerized Node.js engine taking too long to start.  You may address this by installing Node.js locally (run @apt-get install nodejs@ on Debian or Ubuntu) or by specifying a longer timeout with the @--eval-timeout@ option.  For example, run the workflow with @cwltool --eval-timeout=40@ for a 40-second timeout.\n"
  },
  {
    "path": "doc/zenweb-fix-body.rb",
    "content": "require 'zenweb'\n\nmodule ZenwebTextile\n  VERSION = '0.0.1'\nend\n\nmodule Zenweb\n  class Page\n    alias_method :old_body, :body\n    def body\n      # Don't try to parse binary files as text\n      if /\\.(?:#{Site.binary_files.join(\"|\")})$/ =~ path\n        @body ||= File.binread path\n      else\n        @body ||= begin\n                    _, body = Zenweb::Config.split path\n                    body.strip\n                  end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "doc/zenweb-liquid.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nrequire 'zenweb'\nrequire 'liquid'\n\nmodule ZenwebLiquid\n  VERSION = '0.0.1'\nend\n\nmodule Zenweb\n\n  class Page\n\n    def render_liquid page, content\n      liquid self.body, content, page, binding\n    end\n\n    ##\n    # Render a page's liquid and return the intermediate result\n    def liquid template, content, page, binding = TOPLEVEL_BINDING\n      Liquid::Template.file_system = Liquid::LocalFileSystem.new(File.join(File.dirname(Rake.application().rakefile), \"_includes\"))\n      unless defined? @liquid_template\n        @liquid_template = Liquid::Template.parse(template)\n      end\n\n      vars = {}\n      vars[\"content\"] = content\n\n      vars[\"site\"] = site.config.h.clone\n      pages = {}\n      site.pages.each do |f, p|\n        pages[f] = p.config.h.clone\n        pages[f][\"url\"] = p.url\n      end\n      vars[\"site\"][\"pages\"] = pages\n\n      vars[\"page\"] = page.config.h.clone\n      vars[\"page\"][\"url\"] = page.url\n\n      @liquid_template.render(vars)\n    end\n  end\n\n  class LiquidCode < Liquid::Include\n    Syntax = /(#{Liquid::QuotedFragment}+)(\\s+(?:as)\\s+(#{Liquid::QuotedFragment}+))?/o\n\n    def initialize(tag_name, markup, tokens)\n      Liquid::Tag.instance_method(:initialize).bind(self).call(tag_name, markup, tokens)\n\n      if markup =~ Syntax\n        @template_name_expr = $1\n        @language = $3\n        @attributes    = {}\n      else\n        raise SyntaxError.new(\"Error in tag 'code' - Valid syntax: include '[code_file]' as '[language]'\")\n      end\n    end\n\n    def render(context)\n      require 'coderay'\n\n      partial = load_cached_partial(@template_name_expr, context)\n      html = ''\n\n      # be explicit about errors\n      context.exception_renderer = lambda do |exc|\n        exc.is_a?(Liquid::InternalError) ? \"Liquid error: #{exc.cause.message}\" : exc\n      end\n\n      context.stack do\n        html = CodeRay.scan(partial.root.nodelist.join, @language).div\n      end\n\n      html\n    end\n\n    Liquid::Template.register_tag('code', LiquidCode)\n  end\n\n  class LiquidCodeBlock < Liquid::Block\n    Syntax = /((?:as)\\s+(#{Liquid::QuotedFragment}+))?/o\n\n    def initialize(tag_name, markup, tokens)\n      Liquid::Tag.instance_method(:initialize).bind(self).call(tag_name, markup, tokens)\n\n      if markup =~ Syntax\n        @language = $2\n        @attributes = {}\n      else\n        raise SyntaxError.new(\"Error in tag 'code' - Valid syntax: codeblock as '[language]'\")\n      end\n    end\n\n    def render(context)\n      require 'coderay'\n\n      partial = super\n      html = ''\n\n      if partial[0] == '\\n'\n        partial = partial[1..-1]\n      end\n\n      # be explicit about errors\n      context.exception_renderer = lambda do |exc|\n        exc.is_a?(Liquid::InternalError) ? \"Liquid error: #{exc.cause.message}\" : exc\n      end\n\n      context.stack do\n        html = CodeRay.scan(partial, @language).div\n      end\n\n      \"<notextile>#{html}</notextile>\"\n    end\n\n    Liquid::Template.register_tag('codeblock', LiquidCodeBlock)\n  end\nend\n"
  },
  {
    "path": "doc/zenweb-textile.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nrequire 'zenweb'\n\nmodule ZenwebTextile\n  VERSION = '0.0.1'\nend\n\nmodule Zenweb\n  class Page\n    \n    ##\n    # Render a page's textile and return the resulting html\n    def render_textile page, content\n      require 'RedCloth'\n      RedCloth.new(content ? content : self.body).to_html\n    end\n  end\nend\n"
  },
  {
    "path": "go.mod",
    "content": "module git.arvados.org/arvados.git\n\ngo 1.25.0\n\ntoolchain go1.25.6\n\nrequire (\n\tdario.cat/mergo v1.0.0\n\tgithub.com/AdRoll/goamz v0.0.0-20170825154802-2731d20f46f4\n\tgithub.com/Azure/azure-sdk-for-go v68.0.0+incompatible\n\tgithub.com/Azure/go-autorest/autorest v0.11.29\n\tgithub.com/Azure/go-autorest/autorest/azure/auth v0.5.12\n\tgithub.com/Azure/go-autorest/autorest/to v0.4.0\n\tgithub.com/arvados/cgofuse v1.2.0\n\tgithub.com/aws/aws-sdk-go v1.44.256\n\tgithub.com/aws/aws-sdk-go-v2 v1.41.5\n\tgithub.com/aws/aws-sdk-go-v2/config v1.27.16\n\tgithub.com/aws/aws-sdk-go-v2/credentials v1.17.16\n\tgithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.21\n\tgithub.com/aws/aws-sdk-go-v2/service/ec2 v1.161.4\n\tgithub.com/aws/aws-sdk-go-v2/service/s3 v1.97.3\n\tgithub.com/aws/smithy-go v1.24.2\n\tgithub.com/bmatcuk/doublestar/v4 v4.6.1\n\tgithub.com/coreos/go-oidc/v3 v3.10.0\n\tgithub.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf\n\tgithub.com/creack/pty v1.1.21\n\tgithub.com/docker/docker v26.1.5+incompatible\n\tgithub.com/dustin/go-humanize v1.0.1\n\tgithub.com/fsnotify/fsnotify v1.7.0\n\tgithub.com/ghodss/yaml v1.0.0\n\tgithub.com/go-ldap/ldap v3.0.3+incompatible\n\tgithub.com/gogo/protobuf v1.3.2\n\tgithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510\n\tgithub.com/gorilla/mux v1.8.0\n\tgithub.com/gotd/contrib v0.20.0\n\tgithub.com/hashicorp/go-retryablehttp v0.7.7\n\tgithub.com/hashicorp/golang-lru v1.0.2\n\tgithub.com/hashicorp/yamux v0.1.1\n\tgithub.com/jmcvetta/randutil v0.0.0-20150817122601-2bb1b664bcff\n\tgithub.com/jmoiron/sqlx v1.4.0\n\tgithub.com/johannesboyne/gofakes3 v0.0.0-20240513200200-99de01ee122d\n\tgithub.com/julienschmidt/httprouter v1.3.0\n\tgithub.com/lib/pq v1.10.9\n\tgithub.com/msteinert/pam v1.2.0\n\tgithub.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58\n\tgithub.com/prometheus/client_golang v1.20.5\n\tgithub.com/prometheus/client_model v0.6.1\n\tgithub.com/prometheus/common v0.55.0\n\tgithub.com/sirupsen/logrus v1.9.3\n\tgolang.org/x/crypto v0.49.0\n\tgolang.org/x/mod v0.33.0\n\tgolang.org/x/net v0.52.0\n\tgolang.org/x/oauth2 v0.35.0\n\tgolang.org/x/sys v0.42.0\n\tgoogle.golang.org/api v0.181.0\n\tgopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c\n\tgopkg.in/go-jose/go-jose.v2 v2.6.3\n\trsc.io/getopt v0.0.0-20170811000552-20be20937449\n)\n\nrequire (\n\tcloud.google.com/go/auth v0.4.2 // indirect\n\tcloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect\n\tcloud.google.com/go/compute/metadata v0.9.0 // indirect\n\tgithub.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect\n\tgithub.com/Azure/go-autorest v14.2.0+incompatible // indirect\n\tgithub.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect\n\tgithub.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect\n\tgithub.com/Azure/go-autorest/autorest/date v0.3.0 // indirect\n\tgithub.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect\n\tgithub.com/Azure/go-autorest/logger v0.2.1 // indirect\n\tgithub.com/Azure/go-autorest/tracing v0.6.0 // indirect\n\tgithub.com/Microsoft/go-winio v0.6.2 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/sso v1.20.9 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/sts v1.28.10 // indirect\n\tgithub.com/beorn7/perks v1.0.1 // indirect\n\tgithub.com/cespare/xxhash/v2 v2.3.0 // indirect\n\tgithub.com/containerd/log v0.1.0 // indirect\n\tgithub.com/dimchansky/utfbom v1.1.1 // indirect\n\tgithub.com/distribution/reference v0.6.0 // indirect\n\tgithub.com/dnaeon/go-vcr v1.2.0 // indirect\n\tgithub.com/docker/go-connections v0.5.0 // indirect\n\tgithub.com/docker/go-units v0.5.0 // indirect\n\tgithub.com/felixge/httpsnoop v1.0.4 // indirect\n\tgithub.com/go-jose/go-jose/v4 v4.1.4 // indirect\n\tgithub.com/go-logr/logr v1.4.3 // indirect\n\tgithub.com/go-logr/stdr v1.2.2 // indirect\n\tgithub.com/gofrs/uuid v4.4.0+incompatible // indirect\n\tgithub.com/golang-jwt/jwt/v4 v4.5.2 // indirect\n\tgithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect\n\tgithub.com/golang/protobuf v1.5.4 // indirect\n\tgithub.com/google/s2a-go v0.1.7 // indirect\n\tgithub.com/google/uuid v1.6.0 // indirect\n\tgithub.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect\n\tgithub.com/googleapis/gax-go/v2 v2.12.4 // indirect\n\tgithub.com/hashicorp/go-cleanhttp v0.5.2 // indirect\n\tgithub.com/jmespath/go-jmespath v0.4.0 // indirect\n\tgithub.com/klauspost/compress v1.17.9 // indirect\n\tgithub.com/kr/pretty v0.3.1 // indirect\n\tgithub.com/kr/text v0.2.0 // indirect\n\tgithub.com/kylelemons/godebug v1.1.0 // indirect\n\tgithub.com/mitchellh/go-homedir v1.1.0 // indirect\n\tgithub.com/moby/docker-image-spec v1.3.1 // indirect\n\tgithub.com/moby/term v0.5.0 // indirect\n\tgithub.com/morikuni/aec v1.0.0 // indirect\n\tgithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect\n\tgithub.com/opencontainers/go-digest v1.0.0 // indirect\n\tgithub.com/opencontainers/image-spec v1.1.0 // indirect\n\tgithub.com/pkg/errors v0.9.1 // indirect\n\tgithub.com/prometheus/procfs v0.15.1 // indirect\n\tgithub.com/rogpeppe/go-internal v1.14.1 // indirect\n\tgithub.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect\n\tgithub.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 // indirect\n\tgo.opencensus.io v0.24.0 // indirect\n\tgo.opentelemetry.io/auto/sdk v1.2.1 // indirect\n\tgo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect\n\tgo.opentelemetry.io/otel v1.43.0 // indirect\n\tgo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 // indirect\n\tgo.opentelemetry.io/otel/metric v1.43.0 // indirect\n\tgo.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect\n\tgo.opentelemetry.io/otel/trace v1.43.0 // indirect\n\tgolang.org/x/text v0.35.0 // indirect\n\tgolang.org/x/tools v0.42.0 // indirect\n\tgoogle.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect\n\tgoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect\n\tgoogle.golang.org/grpc v1.80.0 // indirect\n\tgoogle.golang.org/protobuf v1.36.11 // indirect\n\tgopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect\n\tgopkg.in/yaml.v2 v2.4.0 // indirect\n\tgotest.tools/v3 v3.0.3 // indirect\n)\n\nreplace github.com/AdRoll/goamz => github.com/arvados/goamz v0.0.0-20190905141525-1bba09f407ef\n\nreplace gopkg.in/yaml.v2 => github.com/arvados/yaml v0.0.0-20210427145106-92a1cab0904b\n\n// Avoid v1.8.1, see https://dev.arvados.org/issues/21705#note-16\nreplace github.com/gorilla/mux => github.com/gorilla/mux v1.8.0\n"
  },
  {
    "path": "go.sum",
    "content": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg=\ncloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc=\ncloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=\ncloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=\ncloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=\ncloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=\ndario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=\ndario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=\nfilippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=\nfilippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=\ngithub.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=\ngithub.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=\ngithub.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=\ngithub.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=\ngithub.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=\ngithub.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=\ngithub.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=\ngithub.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=\ngithub.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=\ngithub.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c=\ngithub.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk=\ngithub.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=\ngithub.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=\ngithub.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc=\ngithub.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0=\ngithub.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=\ngithub.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=\ngithub.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=\ngithub.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=\ngithub.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=\ngithub.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=\ngithub.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=\ngithub.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=\ngithub.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=\ngithub.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=\ngithub.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=\ngithub.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=\ngithub.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=\ngithub.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=\ngithub.com/arvados/cgofuse v1.2.0 h1:sWgVxyvSFjH965Uc7ReScn/cBl9Jemc9SeUNlEmjRH4=\ngithub.com/arvados/cgofuse v1.2.0/go.mod h1:79WFV98hrkRHK9XPhh2IGGOwpFSjocsWubgxAs2KhRc=\ngithub.com/arvados/goamz v0.0.0-20190905141525-1bba09f407ef h1:cl7DIRbiAYNqaVxg3CZY8qfZoBOKrj06H/x9SPGaxas=\ngithub.com/arvados/goamz v0.0.0-20190905141525-1bba09f407ef/go.mod h1:rCtgyMmBGEbjTm37fCuBYbNL0IhztiALzo3OB9HyiOM=\ngithub.com/arvados/yaml v0.0.0-20210427145106-92a1cab0904b h1:hK0t0aJTTXI64lpXln2A1SripqOym+GVNTnwsLes39Y=\ngithub.com/arvados/yaml v0.0.0-20210427145106-92a1cab0904b/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\ngithub.com/aws/aws-sdk-go v1.44.256 h1:O8VH+bJqgLDguqkH/xQBFz5o/YheeZqgcOYIgsTVWY4=\ngithub.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=\ngithub.com/aws/aws-sdk-go-v2 v1.41.5 h1:dj5kopbwUsVUVFgO4Fi5BIT3t4WyqIDjGKCangnV/yY=\ngithub.com/aws/aws-sdk-go-v2 v1.41.5/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8 h1:eBMB84YGghSocM7PsjmmPffTa+1FBUeNvGvFou6V/4o=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.8/go.mod h1:lyw7GFp3qENLh7kwzf7iMzAxDn+NzjXEAGjKS2UOKqI=\ngithub.com/aws/aws-sdk-go-v2/config v1.27.16 h1:knpCuH7laFVGYTNd99Ns5t+8PuRjDn4HnnZK48csipM=\ngithub.com/aws/aws-sdk-go-v2/config v1.27.16/go.mod h1:vutqgRhDUktwSge3hrC3nkuirzkJ4E/mLj5GvI0BQas=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.17.16 h1:7d2QxY83uYl0l58ceyiSpxg9bSbStqBC6BeEeHEchwo=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.17.16/go.mod h1:Ae6li/6Yc6eMzysRL2BXlPYvnrLLBg3D11/AmOjw50k=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3 h1:dQLK4TjtnlRGb0czOht2CevZ5l6RSyRWAnKeGd7VAFE=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.3/go.mod h1:TL79f2P6+8Q7dTsILpiVST+AL9lkF6PPGI167Ny0Cjw=\ngithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.21 h1:1v8Ii0MRVGYB/sdhkbxrtolCA7Tp+lGh+5OJTs5vmZ8=\ngithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.21/go.mod h1:cxdd1rc8yxCjKz28hi30XN1jDXr2DxZvD44vLxTz/bg=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 h1:Rgg6wvjjtX8bNHcvi9OnXWwcE0a2vGpbwmtICOsvcf4=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21/go.mod h1:A/kJFst/nm//cyqonihbdpQZwiUhhzpqTsdbhDdRF9c=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 h1:PEgGVtPoB6NTpPrBgqSE5hE/o47Ij9qk/SEZFbUOe9A=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21/go.mod h1:p+hz+PRAYlY3zcpJhPwXlLC4C+kqn70WIHwnzAfs6ps=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=\ngithub.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22 h1:rWyie/PxDRIdhNf4DzRk0lvjVOqFJuNnO8WwaIRVxzQ=\ngithub.com/aws/aws-sdk-go-v2/internal/v4a v1.4.22/go.mod h1:zd/JsJ4P7oGfUhXn1VyLqaRZwPmZwg44Jf2dS84Dm3Y=\ngithub.com/aws/aws-sdk-go-v2/service/ec2 v1.161.4 h1:JBcPadBAnSwqUZQ1o2XOkTXy7GBcidpupkXZf02parw=\ngithub.com/aws/aws-sdk-go-v2/service/ec2 v1.161.4/go.mod h1:iJ2sQeUTkjNp3nL7kE/Bav0xXYhtiRCRP5ZXk4jFhCQ=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI=\ngithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13 h1:JRaIgADQS/U6uXDqlPiefP32yXTda7Kqfx+LgspooZM=\ngithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.13/go.mod h1:CEuVn5WqOMilYl+tbccq8+N2ieCy0gVn3OtRb0vBNNM=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 h1:c31//R3xgIJMSC8S6hEVq+38DcvUlgFY0FM6mSI5oto=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21/go.mod h1:r6+pf23ouCB718FUxaqzZdbpYFyDtehyZcmP5KL9FkA=\ngithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21 h1:ZlvrNcHSFFWURB8avufQq9gFsheUgjVD9536obIknfM=\ngithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.21/go.mod h1:cv3TNhVrssKR0O/xxLJVRfd2oazSnZnkUeTf6ctUwfQ=\ngithub.com/aws/aws-sdk-go-v2/service/s3 v1.97.3 h1:HwxWTbTrIHm5qY+CAEur0s/figc3qwvLWsNkF4RPToo=\ngithub.com/aws/aws-sdk-go-v2/service/s3 v1.97.3/go.mod h1:uoA43SdFwacedBfSgfFSjjCvYe8aYBS7EnU5GZ/YKMM=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.20.9 h1:aD7AGQhvPuAxlSUfo0CWU7s6FpkbyykMhGYMvlqTjVs=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.20.9/go.mod h1:c1qtZUWtygI6ZdvKppzCSXsDOq5I4luJPZ0Ud3juFCA=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 h1:Pav5q3cA260Zqez42T9UhIlsd9QeypszRPwC9LdSSsQ=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3/go.mod h1:9lmoVDVLz/yUZwLaQ676TK02fhCu4+PgRSmMaKR1ozk=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.28.10 h1:69tpbPED7jKPyzMcrwSvhWcJ9bPnZsZs18NT40JwM0g=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.28.10/go.mod h1:0Aqn1MnEuitqfsCNyKsdKLhDUOr4txD/g19EfiUqgws=\ngithub.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng=\ngithub.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I=\ngithub.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=\ngithub.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=\ngithub.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=\ngithub.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=\ngithub.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=\ngithub.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=\ngithub.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac=\ngithub.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=\ngithub.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=\ngithub.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=\ngithub.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=\ngithub.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=\ngithub.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=\ngithub.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=\ngithub.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=\ngithub.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=\ngithub.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g=\ngithub.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=\ngithub.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=\ngithub.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=\ngithub.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=\ngithub.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=\ngithub.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=\ngithub.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=\ngithub.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=\ngithub.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=\ngithub.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=\ngithub.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=\ngithub.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=\ngithub.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=\ngithub.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=\ngithub.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=\ngithub.com/go-jose/go-jose/v4 v4.1.4 h1:moDMcTHmvE6Groj34emNPLs/qtYXRVcd6S7NHbHz3kA=\ngithub.com/go-jose/go-jose/v4 v4.1.4/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=\ngithub.com/go-ldap/ldap v3.0.3+incompatible h1:HTeSZO8hWMS1Rgb2Ziku6b8a7qRIZZMHjsvuZyatzwk=\ngithub.com/go-ldap/ldap v3.0.3+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=\ngithub.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=\ngithub.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=\ngithub.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=\ngithub.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=\ngithub.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=\ngithub.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=\ngithub.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=\ngithub.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=\ngithub.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=\ngithub.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=\ngithub.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=\ngithub.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=\ngithub.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=\ngithub.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=\ngithub.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=\ngithub.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=\ngithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=\ngithub.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=\ngithub.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=\ngithub.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=\ngithub.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=\ngithub.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=\ngithub.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=\ngithub.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=\ngithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=\ngithub.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=\ngithub.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=\ngithub.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=\ngithub.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=\ngithub.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=\ngithub.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=\ngithub.com/gotd/contrib v0.20.0 h1:1Wc4+HMQiIKYQuGHVwVksIx152HFTP6B5n88dDe0ZYw=\ngithub.com/gotd/contrib v0.20.0/go.mod h1:P6o8W4niqhDPHLA0U+SA/L7l3BQHYLULpeHfRSePn9o=\ngithub.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=\ngithub.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=\ngithub.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=\ngithub.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=\ngithub.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=\ngithub.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=\ngithub.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=\ngithub.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=\ngithub.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=\ngithub.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=\ngithub.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=\ngithub.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=\ngithub.com/jmcvetta/randutil v0.0.0-20150817122601-2bb1b664bcff h1:6NvhExg4omUC9NfA+l4Oq3ibNNeJUdiAF3iBVB0PlDk=\ngithub.com/jmcvetta/randutil v0.0.0-20150817122601-2bb1b664bcff/go.mod h1:ddfPX8Z28YMjiqoaJhNBzWHapTHXejnB5cDCUWDwriw=\ngithub.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=\ngithub.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=\ngithub.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=\ngithub.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=\ngithub.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=\ngithub.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=\ngithub.com/johannesboyne/gofakes3 v0.0.0-20240513200200-99de01ee122d h1:9dIJ/sx3yapvuq3kvTSVQ6UVS2HxfOB4MCwWiH8JcvQ=\ngithub.com/johannesboyne/gofakes3 v0.0.0-20240513200200-99de01ee122d/go.mod h1:AxgWC4DDX54O2WDoQO1Ceabtn6IbktjU/7bigor+66g=\ngithub.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=\ngithub.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=\ngithub.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=\ngithub.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=\ngithub.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=\ngithub.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=\ngithub.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=\ngithub.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=\ngithub.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=\ngithub.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=\ngithub.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=\ngithub.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=\ngithub.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=\ngithub.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=\ngithub.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=\ngithub.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=\ngithub.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=\ngithub.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=\ngithub.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=\ngithub.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=\ngithub.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=\ngithub.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=\ngithub.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=\ngithub.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=\ngithub.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=\ngithub.com/msteinert/pam v1.2.0 h1:mYfjlvN2KYs2Pb9G6nb/1f/nPfAttT/Jee5Sq9r3bGE=\ngithub.com/msteinert/pam v1.2.0/go.mod h1:d2n0DCUK8rGecChV3JzvmsDjOY4R7AYbsNxAT+ftQl0=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=\ngithub.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=\ngithub.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=\ngithub.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=\ngithub.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=\ngithub.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=\ngithub.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=\ngithub.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=\ngithub.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=\ngithub.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=\ngithub.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=\ngithub.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=\ngithub.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=\ngithub.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=\ngithub.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=\ngithub.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=\ngithub.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=\ngithub.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=\ngithub.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=\ngithub.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 h1:WnNuhiq+FOY3jNj6JXFT+eLN3CQ/oPIsDPRanvwsmbI=\ngithub.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500/go.mod h1:+njLrG5wSeoG4Ds61rFgEzKvenR2UHbjMoDHsczxly0=\ngithub.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=\ngithub.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=\ngithub.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=\ngithub.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=\ngithub.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngithub.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=\ngo.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=\ngo.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=\ngo.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=\ngo.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=\ngo.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=\ngo.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=\ngo.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0/go.mod h1:Vl1/iaggsuRlrHf/hfPJPvVag77kKyvrLeD10kpMl+A=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 h1:3iZJKlCZufyRzPzlQhUIWVmfltrXuGyfjREgGP3UUjc=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0/go.mod h1:/G+nUPfhq2e+qiXMGxMwumDrP5jtzU+mWN7/sjT2rak=\ngo.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM=\ngo.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY=\ngo.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg=\ngo.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg=\ngo.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw=\ngo.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A=\ngo.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A=\ngo.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=\ngo.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g=\ngo.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=\ngolang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=\ngolang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=\ngolang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=\ngolang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=\ngolang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=\ngolang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=\ngolang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=\ngolang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=\ngolang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=\ngolang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=\ngolang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=\ngolang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=\ngolang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ=\ngolang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=\ngolang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=\ngolang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=\ngolang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=\ngolang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU=\ngolang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=\ngolang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=\ngolang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=\ngolang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=\ngolang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=\ngolang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=\ngolang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=\ngolang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=\ngolang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=\ngolang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=\ngolang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=\ngolang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=\ngolang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=\ngonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=\ngoogle.golang.org/api v0.181.0 h1:rPdjwnWgiPPOJx3IcSAQ2III5aX5tCer6wMpa/xmZi4=\ngoogle.golang.org/api v0.181.0/go.mod h1:MnQ+M0CFsfUwA5beZ+g/vCBCPXvtmZwRz2qzZk8ih1k=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 h1:m8qni9SQFH0tJc1X0vmnpw/0t+AImlSvp30sEupozUg=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=\ngoogle.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=\ngoogle.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM=\ngoogle.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4=\ngoogle.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=\ngoogle.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=\ngoogle.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=\ngoogle.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=\ngoogle.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=\ngoogle.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=\ngoogle.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=\ngoogle.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=\ngopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=\ngopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs=\ngopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI=\ngopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=\ngotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nrsc.io/getopt v0.0.0-20170811000552-20be20937449 h1:UukjJOsjQH0DIuyyrcod6CXHS6cdaMMuJmrt+SN1j4A=\nrsc.io/getopt v0.0.0-20170811000552-20be20937449/go.mod h1:dhCdeqAxkyt5u3/sKRkUXuHaMXUu1Pt13GTQAM2xnig=\n"
  },
  {
    "path": "lib/boot/cert.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"context\"\n\t\"crypto/rsa\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"encoding/pem\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/crypto/acme\"\n\t\"golang.org/x/crypto/acme/autocert\"\n)\n\nconst stagingDirectoryURL = \"https://acme-staging-v02.api.letsencrypt.org/directory\"\n\nvar errInvalidHost = errors.New(\"unrecognized target host in incoming TLS request\")\n\ntype createCertificates struct{}\n\nfunc (createCertificates) String() string {\n\treturn \"certificates\"\n}\n\nfunc (createCertificates) Run(ctx context.Context, fail func(error), super *Supervisor) error {\n\tif super.cluster.TLS.ACME.Server != \"\" {\n\t\treturn bootAutoCert(ctx, fail, super)\n\t} else if super.cluster.TLS.Key == \"\" && super.cluster.TLS.Certificate == \"\" {\n\t\treturn createSelfSignedCert(ctx, fail, super)\n\t} else {\n\t\treturn nil\n\t}\n}\n\n// bootAutoCert uses Let's Encrypt to get certificates for all the\n// domains appearing in ExternalURLs, writes them to files where Nginx\n// can load them, and updates super.cluster.TLS fields (Key and\n// Certificiate) to point to those files.\n//\n// It also runs a background task to keep the files up to date.\n//\n// After bootAutoCert returns, other service components will get the\n// certificates they need by reading these files or by using a\n// read-only autocert cache.\n//\n// Currently this only works when port 80 of every ExternalURL domain\n// is routed to this host, i.e., on a single-node cluster. Wildcard\n// domains [for WebDAV] are not supported.\nfunc bootAutoCert(ctx context.Context, fail func(error), super *Supervisor) error {\n\thosts := map[string]bool{}\n\tfor _, svc := range super.cluster.Services.Map() {\n\t\tu := url.URL(svc.ExternalURL)\n\t\tif u.Scheme == \"https\" || u.Scheme == \"wss\" {\n\t\t\thosts[strings.ToLower(u.Hostname())] = true\n\t\t}\n\t}\n\tmgr := &autocert.Manager{\n\t\tCache:  autocert.DirCache(super.tempdir + \"/autocert\"),\n\t\tPrompt: autocert.AcceptTOS,\n\t\tHostPolicy: func(ctx context.Context, host string) error {\n\t\t\tif hosts[strings.ToLower(host)] {\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\treturn errInvalidHost\n\t\t\t}\n\t\t},\n\t}\n\tif srv := super.cluster.TLS.ACME.Server; srv == \"LE\" {\n\t\t// Leaving mgr.Client == nil means use Let's Encrypt\n\t\t// production environment\n\t} else if srv == \"LE-staging\" {\n\t\tmgr.Client = &acme.Client{DirectoryURL: stagingDirectoryURL}\n\t} else if strings.HasPrefix(srv, \"https://\") {\n\t\tmgr.Client = &acme.Client{DirectoryURL: srv}\n\t} else {\n\t\treturn fmt.Errorf(\"autocert setup: invalid directory URL in TLS.ACME.Server: %q\", srv)\n\t}\n\tgo func() {\n\t\terr := http.ListenAndServe(\":80\", mgr.HTTPHandler(nil))\n\t\tfail(fmt.Errorf(\"autocert http-01 challenge handler stopped: %w\", err))\n\t}()\n\tu := url.URL(super.cluster.Services.Controller.ExternalURL)\n\textHost := u.Hostname()\n\tupdate := func() error {\n\t\tfor h := range hosts {\n\t\t\tcert, err := mgr.GetCertificate(&tls.ClientHelloInfo{ServerName: h})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif h == extHost {\n\t\t\t\terr = writeCert(super.tempdir, \"server.key\", \"server.crt\", cert)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\terr := update()\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\tfor range time.NewTicker(time.Hour).C {\n\t\t\terr := update()\n\t\t\tif err != nil {\n\t\t\t\tsuper.logger.WithError(err).Error(\"error getting certificate from autocert\")\n\t\t\t}\n\t\t}\n\t}()\n\tsuper.cluster.TLS.Key = \"file://\" + super.tempdir + \"/server.key\"\n\tsuper.cluster.TLS.Certificate = \"file://\" + super.tempdir + \"/server.crt\"\n\treturn nil\n}\n\n// Save cert chain and key in a format Nginx can read.\nfunc writeCert(outdir, keyfile, certfile string, cert *tls.Certificate) error {\n\tkeytmp, err := os.CreateTemp(outdir, keyfile+\".tmp.*\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer keytmp.Close()\n\tdefer os.Remove(keytmp.Name())\n\n\tcerttmp, err := os.CreateTemp(outdir, certfile+\".tmp.*\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer certtmp.Close()\n\tdefer os.Remove(certtmp.Name())\n\n\tswitch privkey := cert.PrivateKey.(type) {\n\tcase *rsa.PrivateKey:\n\t\terr = pem.Encode(keytmp, &pem.Block{\n\t\t\tType:  \"RSA PRIVATE KEY\",\n\t\t\tBytes: x509.MarshalPKCS1PrivateKey(privkey),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\tbuf, err := x509.MarshalPKCS8PrivateKey(privkey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = pem.Encode(keytmp, &pem.Block{\n\t\t\tType:  \"PRIVATE KEY\",\n\t\t\tBytes: buf,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = keytmp.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, cert := range cert.Certificate {\n\t\terr = pem.Encode(certtmp, &pem.Block{\n\t\t\tType:  \"CERTIFICATE\",\n\t\t\tBytes: cert,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = certtmp.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.Rename(keytmp.Name(), filepath.Join(outdir, keyfile))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.Rename(certtmp.Name(), filepath.Join(outdir, certfile))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// Create a root CA key and use it to make a new server\n// certificate+key pair.\n//\n// In future we'll make one root CA key per host instead of one per\n// cluster, so it only needs to be imported to a browser once for\n// ongoing dev/test usage.\nfunc createSelfSignedCert(ctx context.Context, fail func(error), super *Supervisor) error {\n\tsan := \"DNS:localhost,DNS:localhost.localdomain\"\n\tif net.ParseIP(super.ListenHost) != nil {\n\t\tsan += fmt.Sprintf(\",IP:%s\", super.ListenHost)\n\t} else {\n\t\tsan += fmt.Sprintf(\",DNS:%s\", super.ListenHost)\n\t}\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"hostname: %w\", err)\n\t}\n\tif hostname != super.ListenHost {\n\t\tsan += \",DNS:\" + hostname\n\t}\n\n\t// Generate root key\n\terr = super.RunProgram(ctx, super.tempdir, runOptions{}, \"openssl\", \"genrsa\", \"-out\", \"rootCA.key\", \"4096\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Generate a self-signed root certificate\n\terr = super.RunProgram(ctx, super.tempdir, runOptions{}, \"openssl\", \"req\", \"-x509\", \"-new\", \"-nodes\", \"-key\", \"rootCA.key\", \"-sha256\", \"-days\", \"3650\", \"-out\", \"rootCA.crt\", \"-subj\", \"/C=US/ST=MA/O=Example Org/CN=localhost\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Generate server key\n\terr = super.RunProgram(ctx, super.tempdir, runOptions{}, \"openssl\", \"genrsa\", \"-out\", \"server.key\", \"2048\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Build config file for signing request\n\tdefaultconf, err := ioutil.ReadFile(\"/etc/ssl/openssl.cnf\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tconf := append(defaultconf, []byte(fmt.Sprintf(\"\\n[SAN]\\nsubjectAltName=%s\\n\", san))...)\n\terr = ioutil.WriteFile(filepath.Join(super.tempdir, \"server.cfg\"), conf, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Generate signing request\n\terr = super.RunProgram(ctx, super.tempdir, runOptions{}, \"openssl\", \"req\", \"-new\", \"-sha256\", \"-key\", \"server.key\", \"-subj\", \"/C=US/ST=MA/O=Example Org/CN=localhost\", \"-reqexts\", \"SAN\", \"-config\", \"server.cfg\", \"-out\", \"server.csr\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Sign certificate\n\terr = super.RunProgram(ctx, super.tempdir, runOptions{}, \"openssl\", \"x509\", \"-req\", \"-in\", \"server.csr\", \"-CA\", \"rootCA.crt\", \"-CAkey\", \"rootCA.key\", \"-CAcreateserial\", \"-out\", \"server.crt\", \"-extfile\", \"server.cfg\", \"-extensions\", \"SAN\", \"-days\", \"3650\", \"-sha256\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tsuper.cluster.TLS.Key = \"file://\" + super.tempdir + \"/server.key\"\n\tsuper.cluster.TLS.Certificate = \"file://\" + super.tempdir + \"/server.crt\"\n\treturn nil\n}\n"
  },
  {
    "path": "lib/boot/cmd.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/coreos/go-systemd/daemon\"\n)\n\nvar Command cmd.Handler = bootCommand{}\n\ntype supervisedTask interface {\n\t// Execute the task. Run should return nil when the task is\n\t// done enough to satisfy a dependency relationship (e.g., the\n\t// service is running and ready). If the task starts a\n\t// goroutine that fails after Run returns (e.g., the service\n\t// shuts down), it should call fail().\n\tRun(ctx context.Context, fail func(error), super *Supervisor) error\n\tString() string\n}\n\nvar errNeedConfigReload = errors.New(\"config changed, restart needed\")\nvar errParseFlags = errors.New(\"error parsing command line arguments\")\n\ntype bootCommand struct{}\n\nfunc (bcmd bootCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tlogger := ctxlog.New(stderr, \"json\", \"info\")\n\tctx := ctxlog.Context(context.Background(), logger)\n\tfor {\n\t\terr := bcmd.run(ctx, prog, args, stdin, stdout, stderr)\n\t\tif err == errNeedConfigReload {\n\t\t\tcontinue\n\t\t} else if err == errParseFlags {\n\t\t\treturn 2\n\t\t} else if err != nil {\n\t\t\tlogger.WithError(err).Info(\"exiting\")\n\t\t\treturn 1\n\t\t} else {\n\t\t\treturn 0\n\t\t}\n\t}\n}\n\nfunc (bcmd bootCommand) run(ctx context.Context, prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) error {\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tsuper := &Supervisor{\n\t\tStdin:  stdin,\n\t\tStderr: stderr,\n\t\tlogger: ctxlog.FromContext(ctx),\n\t}\n\n\tflags := flag.NewFlagSet(prog, flag.ContinueOnError)\n\tversionFlag := flags.Bool(\"version\", false, \"Write version information to stdout and exit 0\")\n\tflags.StringVar(&super.ConfigPath, \"config\", \"/etc/arvados/config.yml\", \"arvados config file `path`\")\n\tflags.StringVar(&super.SourcePath, \"source\", \".\", \"arvados source tree `directory`\")\n\tflags.StringVar(&super.ClusterType, \"type\", \"test\", \"cluster `type`: development or test\")\n\tflags.StringVar(&super.ListenHost, \"listen-host\", \"127.0.0.1\", \"host name or interface address for internal services whose InternalURLs are not configured\")\n\tflags.StringVar(&super.ControllerAddr, \"controller-address\", \":0\", \"desired controller address, `host:port` or `:port`\")\n\tflags.BoolVar(&super.NoWorkbench1, \"no-workbench1\", true, \"do not run workbench1\")\n\tflags.BoolVar(&super.NoWorkbench2, \"no-workbench2\", false, \"do not run workbench2\")\n\tflags.BoolVar(&super.OwnTemporaryDatabase, \"own-temporary-database\", false, \"bring up a postgres server and create a temporary database\")\n\ttimeout := flags.Duration(\"timeout\", 0, \"maximum time to wait for cluster to be ready\")\n\tshutdown := flags.Bool(\"shutdown\", false, \"shut down when the cluster becomes ready\")\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"\", stderr); !ok {\n\t\tif code == 0 {\n\t\t\treturn nil\n\t\t} else {\n\t\t\treturn errParseFlags\n\t\t}\n\t} else if *versionFlag {\n\t\tcmd.Version.RunCommand(prog, args, stdin, stdout, stderr)\n\t\treturn nil\n\t} else if super.ClusterType != \"development\" && super.ClusterType != \"test\" {\n\t\treturn fmt.Errorf(\"cluster type must be 'development' or 'test'\")\n\t\t// There are checks for `ClusterType == \"production\"` in the code. This\n\t\t// was a planned feature that was never fully developed, and we have\n\t\t// since broken some of the assumptions the original code relied on.\n\t\t// They generally indicate places where the code should use system paths\n\t\t// rather than temporary ones, so the branches remain in place to save\n\t\t// that knowledge, but now they all error out until we have new\n\t\t// implementations.\n\t}\n\n\tsuper.Start(ctx)\n\tdefer super.Stop()\n\n\tvar timer *time.Timer\n\tif *timeout > 0 {\n\t\ttimer = time.AfterFunc(*timeout, super.Stop)\n\t}\n\n\tok := super.WaitReady()\n\tif timer != nil && !timer.Stop() {\n\t\treturn errors.New(\"boot timed out\")\n\t} else if !ok {\n\t\tsuper.logger.Error(\"boot failed\")\n\t} else {\n\t\t// Write each cluster's controller URL, id, and URL\n\t\t// host:port to stdout.  Nothing else goes to stdout,\n\t\t// so this allows a calling script to determine when\n\t\t// the cluster is ready to use, and the controller's\n\t\t// host:port (which may have been dynamically assigned\n\t\t// depending on config/options).\n\t\t//\n\t\t// Sort output by cluster ID for convenience.\n\t\tvar ids []string\n\t\tfor id := range super.Clusters() {\n\t\t\tids = append(ids, id)\n\t\t}\n\t\tsort.Strings(ids)\n\t\tfor _, id := range ids {\n\t\t\tcc := super.Cluster(id)\n\t\t\t// Providing both scheme://host:port and\n\t\t\t// host:port is redundant, but convenient.\n\t\t\tfmt.Fprintln(stdout, cc.Services.Controller.ExternalURL, id, cc.Services.Controller.ExternalURL.Host)\n\t\t}\n\t\t// Write \".\\n\" to mark the end of the list of\n\t\t// controllers, in case the caller doesn't already\n\t\t// know how many clusters are coming up.\n\t\tfmt.Fprintln(stdout, \".\")\n\t\tif *shutdown {\n\t\t\tsuper.Stop()\n\t\t\t// Wait for children to exit. Don't report the\n\t\t\t// ensuing \"context cancelled\" error, though:\n\t\t\t// return nil to indicate successful startup.\n\t\t\t_ = super.Wait()\n\t\t\tfmt.Fprintln(stderr, \"PASS - all services booted successfully\")\n\t\t\treturn nil\n\t\t}\n\t}\n\tif _, err := daemon.SdNotify(false, \"READY=1\"); err != nil {\n\t\tsuper.logger.WithError(err).Errorf(\"error notifying init daemon\")\n\t}\n\t// Wait for signal/crash + orderly shutdown\n\treturn super.Wait()\n}\n"
  },
  {
    "path": "lib/boot/example.sh",
    "content": "#!/bin/bash\n\n# Example of using `arvados-server boot` in a script. Bring up a test\n# cluster, wait for it to come up, fetch something from its discovery\n# doc, and shut down.\n\nset -e -o pipefail\n\ncleanup() {\n    set -x\n    kill ${boot_PID} ${consume_stdout_PID}\n    wait ${boot_PID} ${consume_stdout_PID} || true\n    echo >&2 \"done\"\n}\n\ncoproc boot (arvados-server boot -type test -config doc/examples/config/zzzzz.yml -own-temporary-database -timeout 20m)\ntrap cleanup ERR EXIT\n\nread controllerURL <&\"${boot[0]}\"\n\n# Copy coproc's stdout to stderr, to ensure `arvados-server boot`\n# doesn't get blocked trying to write stdout.\nexec 7<&\"${boot[0]}\"; coproc consume_stdout (cat <&7 >&2)\n\nkeepwebURL=$(curl --silent --fail --insecure \"${controllerURL}/discovery/v1/apis/arvados/v1/rest\" | jq -r .keepWebServiceUrl)\necho >&2 \"controller is at $controllerURL\"\necho >&2 \"keep-web is at $keepwebURL\"\n"
  },
  {
    "path": "lib/boot/helpers.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"gopkg.in/check.v1\"\n)\n\n// ClientsWithToken returns Context, Arvados.Client and keepclient structs\n// initialized to connect to the cluster with the supplied Arvados token.\nfunc (super *Supervisor) ClientsWithToken(clusterID, token string) (context.Context, *arvados.Client, *keepclient.KeepClient) {\n\tcl := super.cluster\n\tif super.children != nil {\n\t\tcl = super.children[clusterID].cluster\n\t} else if clusterID != cl.ClusterID {\n\t\tpanic(\"bad clusterID \" + clusterID)\n\t}\n\tctx := auth.NewContext(super.ctx, auth.NewCredentials(token))\n\tac, err := arvados.NewClientFromConfig(cl)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tac.AuthToken = token\n\tarv, err := arvadosclient.New(ac)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tkc := keepclient.New(arv)\n\treturn ctx, ac, kc\n}\n\n// UserClients logs in as a user called \"example\", get the user's API token,\n// initialize clients with the API token, set up the user and\n// optionally activate the user.  Return client structs for\n// communicating with the cluster on behalf of the 'example' user.\nfunc (super *Supervisor) UserClients(clusterID string, rootctx context.Context, c *check.C, conn *rpc.Conn, authEmail string, activate bool) (context.Context, *arvados.Client, *keepclient.KeepClient, arvados.User) {\n\tlogin, err := conn.UserSessionCreate(rootctx, rpc.UserSessionCreateOptions{\n\t\tReturnTo: \",https://controller.api.client.invalid\",\n\t\tAuthInfo: rpc.UserSessionAuthInfo{\n\t\t\tEmail:     authEmail,\n\t\t\tFirstName: \"Example\",\n\t\t\tLastName:  \"User\",\n\t\t\tUsername:  \"example\",\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\tredirURL, err := url.Parse(login.RedirectLocation)\n\tc.Assert(err, check.IsNil)\n\tuserToken := redirURL.Query().Get(\"api_token\")\n\tc.Logf(\"user token: %q\", userToken)\n\tctx, ac, kc := super.ClientsWithToken(clusterID, userToken)\n\tuser, err := conn.UserGetCurrent(ctx, arvados.GetOptions{})\n\tc.Assert(err, check.IsNil)\n\t_, err = conn.UserSetup(rootctx, arvados.UserSetupOptions{UUID: user.UUID})\n\tc.Assert(err, check.IsNil)\n\tif activate {\n\t\t_, err = conn.UserActivate(rootctx, arvados.UserActivateOptions{UUID: user.UUID})\n\t\tc.Assert(err, check.IsNil)\n\t\tuser, err = conn.UserGetCurrent(ctx, arvados.GetOptions{})\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Logf(\"user UUID: %q\", user.UUID)\n\t\tif !user.IsActive {\n\t\t\tc.Fatalf(\"failed to activate user -- %#v\", user)\n\t\t}\n\t}\n\treturn ctx, ac, kc, user\n}\n\n// RootClients returns Context, arvados.Client and keepclient structs initialized\n// to communicate with the cluster as the system root user.\nfunc (super *Supervisor) RootClients(clusterID string) (context.Context, *arvados.Client, *keepclient.KeepClient) {\n\treturn super.ClientsWithToken(clusterID, super.Cluster(clusterID).SystemRootToken)\n}\n\n// AnonymousClients returns Context, arvados.Client and keepclient structs initialized\n// to communicate with the cluster as the anonymous user.\nfunc (super *Supervisor) AnonymousClients(clusterID string) (context.Context, *arvados.Client, *keepclient.KeepClient) {\n\treturn super.ClientsWithToken(clusterID, super.Cluster(clusterID).Users.AnonymousUserToken)\n}\n\n// Conn gets rpc connection struct initialized to communicate with the\n// specified cluster.\nfunc (super *Supervisor) Conn(clusterID string) *rpc.Conn {\n\tcontrollerURL := url.URL(super.Cluster(clusterID).Services.Controller.ExternalURL)\n\treturn rpc.NewConn(clusterID, &controllerURL, true, rpc.PassthroughTokenProvider)\n}\n"
  },
  {
    "path": "lib/boot/nginx.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// Run an Nginx process that proxies the supervisor's configured\n// ExternalURLs to the appropriate InternalURLs.\ntype runNginx struct{}\n\nfunc (runNginx) String() string {\n\treturn \"nginx\"\n}\n\nfunc (runNginx) Run(ctx context.Context, fail func(error), super *Supervisor) error {\n\terr := super.wait(ctx, createCertificates{})\n\tif err != nil {\n\t\treturn err\n\t}\n\textListenHost := \"0.0.0.0\"\n\tif super.ClusterType == \"test\" {\n\t\t// Our dynamic port number assignment strategy (choose\n\t\t// an available port, write it in a config file, and\n\t\t// have another process/goroutine bind to it) is prone\n\t\t// to races when used by concurrent supervisors. In\n\t\t// test mode we don't accept remote connections, so we\n\t\t// can avoid collisions by using the per-cluster\n\t\t// loopback address instead of 0.0.0.0.\n\t\textListenHost = super.ListenHost\n\t}\n\tvars := map[string]string{\n\t\t\"LISTENHOST\":       extListenHost,\n\t\t\"UPSTREAMHOST\":     super.ListenHost,\n\t\t\"INTERNALSUBNETS\":  internalSubnets(super.logger),\n\t\t\"SSLCERT\":          filepath.Join(super.tempdir, \"server.crt\"),\n\t\t\"SSLKEY\":           filepath.Join(super.tempdir, \"server.key\"),\n\t\t\"ACCESSLOG\":        filepath.Join(super.tempdir, \"nginx_access.log\"),\n\t\t\"ERRORLOG\":         filepath.Join(super.tempdir, \"nginx_error.log\"),\n\t\t\"TMPDIR\":           super.wwwtempdir,\n\t\t\"ARVADOS_API_HOST\": super.cluster.Services.Controller.ExternalURL.Host,\n\t}\n\tu := url.URL(super.cluster.Services.Controller.ExternalURL)\n\tctrlHost := u.Hostname()\n\tif strings.HasPrefix(super.cluster.TLS.Certificate, \"file:/\") && strings.HasPrefix(super.cluster.TLS.Key, \"file:/\") {\n\t\tvars[\"SSLCERT\"] = filepath.Clean(super.cluster.TLS.Certificate[5:])\n\t\tvars[\"SSLKEY\"] = filepath.Clean(super.cluster.TLS.Key[5:])\n\t} else if f, err := os.Open(\"/var/lib/acme/live/\" + ctrlHost + \"/privkey\"); err == nil {\n\t\tf.Close()\n\t\tvars[\"SSLCERT\"] = \"/var/lib/acme/live/\" + ctrlHost + \"/cert\"\n\t\tvars[\"SSLKEY\"] = \"/var/lib/acme/live/\" + ctrlHost + \"/privkey\"\n\t}\n\tfor _, cmpt := range []struct {\n\t\tvarname string\n\t\tsvc     arvados.Service\n\t}{\n\t\t{\"CONTROLLER\", super.cluster.Services.Controller},\n\t\t{\"KEEPWEB\", super.cluster.Services.WebDAV},\n\t\t{\"KEEPWEBDL\", super.cluster.Services.WebDAVDownload},\n\t\t{\"KEEPPROXY\", super.cluster.Services.Keepproxy},\n\t\t{\"HEALTH\", super.cluster.Services.Health},\n\t\t{\"WORKBENCH1\", super.cluster.Services.Workbench1},\n\t\t{\"WORKBENCH2\", super.cluster.Services.Workbench2},\n\t\t{\"WS\", super.cluster.Services.Websocket},\n\t} {\n\t\tvar host, port string\n\t\tif len(cmpt.svc.InternalURLs) == 0 {\n\t\t\t// We won't run this service, but we need an\n\t\t\t// upstream port to write in our templated\n\t\t\t// nginx config. Choose a port that will\n\t\t\t// return 502 Bad Gateway.\n\t\t\tport = \"9\"\n\t\t} else if host, port, err = internalPort(cmpt.svc); err != nil {\n\t\t\treturn fmt.Errorf(\"%s internal port: %w (%v)\", cmpt.varname, err, cmpt.svc)\n\t\t} else if ok, err := addrIsLocal(net.JoinHostPort(host, port)); !ok || err != nil {\n\t\t\treturn fmt.Errorf(\"%s addrIsLocal() failed for host %q port %q: %v\", cmpt.varname, host, port, err)\n\t\t}\n\t\tvars[cmpt.varname+\"PORT\"] = port\n\n\t\tport, err = externalPort(cmpt.svc)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%s external port: %w (%v)\", cmpt.varname, err, cmpt.svc)\n\t\t}\n\t\tlistenAddr := net.JoinHostPort(super.ListenHost, port)\n\t\tif ok, err := addrIsLocal(listenAddr); !ok || err != nil {\n\t\t\treturn fmt.Errorf(\"%s addrIsLocal(%q) failed: %w\", cmpt.varname, listenAddr, err)\n\t\t}\n\t\tvars[cmpt.varname+\"SSLPORT\"] = port\n\t}\n\tif portmin, portmax := super.cluster.Services.ContainerWebServices.ExternalPortMin,\n\t\tsuper.cluster.Services.ContainerWebServices.ExternalPortMax; 0 < portmin && portmin <= portmax {\n\t\tvars[\"CONTROLLERLISTENEXTRA\"] = fmt.Sprintf(\"listen %s:%d-%d ssl;\", extListenHost, portmin, portmax)\n\t}\n\tvar conftemplate string\n\tif super.ClusterType == \"production\" {\n\t\t// FIXME: This used to return paths set up by `arvados-server install`,\n\t\t// which is no longer a thing.\n\t\treturn fmt.Errorf(\"production cluster type not implemented\")\n\t} else {\n\t\tconftemplate = filepath.Join(super.SourcePath, \"sdk\", \"python\", \"tests\", \"nginx.conf\")\n\t}\n\ttmpl, err := ioutil.ReadFile(conftemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\tconf := regexp.MustCompile(`{{.*?}}`).ReplaceAllStringFunc(string(tmpl), func(src string) string {\n\t\tif len(src) < 4 {\n\t\t\treturn src\n\t\t}\n\t\treturn vars[src[2:len(src)-2]]\n\t})\n\tconffile := filepath.Join(super.tempdir, \"nginx.conf\")\n\terr = ioutil.WriteFile(conffile, []byte(conf), 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnginx := \"nginx\"\n\tif _, err := exec.LookPath(nginx); err != nil {\n\t\tfor _, dir := range []string{\"/sbin\", \"/usr/sbin\", \"/usr/local/sbin\"} {\n\t\t\tif _, err = os.Stat(dir + \"/nginx\"); err == nil {\n\t\t\t\tnginx = dir + \"/nginx\"\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tconfigs := \"error_log stderr warn; \"\n\tconfigs += \"pid \" + filepath.Join(super.wwwtempdir, \"nginx.pid\") + \"; \"\n\tconfigs += \"user www-data; \"\n\n\tsuper.waitShutdown.Add(1)\n\tgo func() {\n\t\tdefer super.waitShutdown.Done()\n\t\tfail(super.RunProgram(ctx, \".\", runOptions{}, nginx, \"-g\", configs, \"-c\", conffile))\n\t}()\n\t// Choose one of the ports where Nginx should listen, and wait\n\t// here until we can connect. If ExternalURL is https://foo\n\t// (with no port) then we connect to \"foo:https\"\n\ttesturl := url.URL(super.cluster.Services.Controller.ExternalURL)\n\tif testurl.Port() == \"\" {\n\t\ttesturl.Host = net.JoinHostPort(testurl.Host, testurl.Scheme)\n\t}\n\treturn waitForConnect(ctx, testurl.Host)\n}\n\n// Return 0 or more local subnets as \"geo\" fragments for Nginx config,\n// e.g., \"1.2.3.0/24 0; 10.1.0.0/16 0;\".\nfunc internalSubnets(logger logrus.FieldLogger) string {\n\tiproutes, err := exec.Command(\"ip\", \"route\").CombinedOutput()\n\tif err != nil {\n\t\tlogger.Warnf(\"treating all clients as external because `ip route` failed: %s (%q)\", err, iproutes)\n\t\treturn \"\"\n\t}\n\tsubnets := \"\"\n\tfor _, line := range bytes.Split(iproutes, []byte(\"\\n\")) {\n\t\tfields := strings.Fields(string(line))\n\t\tif len(fields) > 2 && fields[1] == \"dev\" {\n\t\t\t// lan example:\n\t\t\t// 192.168.86.0/24 dev ens3 proto kernel scope link src 192.168.86.196\n\t\t\t// gcp example (private subnet):\n\t\t\t// 10.47.0.0/24 dev eth0 proto kernel scope link src 10.47.0.5\n\t\t\t// gcp example (no private subnet):\n\t\t\t// 10.128.0.1 dev ens4 scope link\n\t\t\tsubnets += fields[0] + \" 0; \"\n\t\t}\n\t}\n\treturn subnets\n}\n"
  },
  {
    "path": "lib/boot/passenger.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// Don't trust \"passenger-config\" (or \"bundle install\") to handle\n// concurrent installs.\nvar passengerInstallMutex sync.Mutex\n\nvar railsEnv = []string{\n\t\"ARVADOS_RAILS_LOG_TO_STDOUT=1\",\n\t\"ARVADOS_CONFIG_NOLEGACY=1\", // don't load database.yml from source tree\n}\n\n// Install a Rails application's dependencies, including phusion\n// passenger.\ntype installPassenger struct {\n\tsrc       string // path to app in source tree\n\tvarlibdir string // path to app in OS package: \"railsapi\" or \"workbench1\"\n\tdepends   []supervisedTask\n}\n\nfunc (runner installPassenger) String() string {\n\treturn \"installPassenger:\" + runner.src\n}\n\nfunc (runner installPassenger) Run(ctx context.Context, fail func(error), super *Supervisor) error {\n\tif super.ClusterType == \"production\" {\n\t\t// FIXME: This used to return paths set up by `arvados-server install`,\n\t\t// which is no longer a thing.\n\t\treturn fmt.Errorf(\"production cluster type not implemented\")\n\t}\n\terr := super.wait(ctx, runner.depends...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpassengerInstallMutex.Lock()\n\tdefer passengerInstallMutex.Unlock()\n\n\tappdir := runner.src\n\tif super.ClusterType == \"test\" {\n\t\t// In the multi-cluster test setup, if we run multiple\n\t\t// Rails instances directly from the source tree, they\n\t\t// step on one another's files in {source}/tmp, log,\n\t\t// etc. So instead we copy the source directory into a\n\t\t// temp dir and run the Rails app from there.\n\t\tappdir = filepath.Join(super.tempdir, runner.varlibdir)\n\t\terr = super.RunProgram(ctx, super.tempdir, runOptions{}, \"mkdir\", \"-p\", appdir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = super.RunProgram(ctx, filepath.Join(super.SourcePath, runner.src), runOptions{}, \"rsync\",\n\t\t\t\"-a\", \"--no-owner\", \"--no-group\", \"--delete-after\", \"--delete-excluded\",\n\t\t\t\"--exclude\", \"/coverage\",\n\t\t\t\"--exclude\", \"/log\",\n\t\t\t\"--exclude\", \"/node_modules\",\n\t\t\t\"--exclude\", \"/tmp\",\n\t\t\t\"--exclude\", \"/public/assets\",\n\t\t\t\"--exclude\", \"/vendor\",\n\t\t\t\"--exclude\", \"/config/environments\",\n\t\t\t\"./\",\n\t\t\tappdir+\"/\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar buf bytes.Buffer\n\terr = super.RunProgram(ctx, appdir, runOptions{output: &buf}, \"gem\", \"list\", \"--details\", \"bundler\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = super.RunProgram(ctx, appdir, runOptions{}, \"gem\", \"install\", \"--user\", \"--conservative\", \"--no-document\", \"--version\", \"~> 2.5.0\", \"bundler\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = super.RunProgram(ctx, appdir, runOptions{}, \"bundle\", \"config\", \"--set\", \"local\", \"path\", filepath.Join(os.Getenv(\"HOME\"), \".gem\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = super.RunProgram(ctx, appdir, runOptions{}, \"bundle\", \"install\", \"--jobs\", fmt.Sprintf(\"%d\", runtime.NumCPU()))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = super.RunProgram(ctx, appdir, runOptions{}, \"bundle\", \"exec\", \"passenger-config\", \"build-native-support\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = super.RunProgram(ctx, appdir, runOptions{}, \"bundle\", \"exec\", \"passenger-config\", \"install-standalone-runtime\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = super.RunProgram(ctx, appdir, runOptions{}, \"bundle\", \"exec\", \"passenger-config\", \"validate-install\")\n\tif err != nil && !strings.Contains(err.Error(), \"exit status 2\") {\n\t\t// Exit code 2 indicates there were warnings (like\n\t\t// \"other passenger installations have been detected\",\n\t\t// which we can't expect to avoid) but no errors.\n\t\t// Other non-zero exit codes (1, 9) indicate errors.\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype runPassenger struct {\n\tsrc       string // path to app in source tree\n\tvarlibdir string // path to app in OS package: \"railsapi\" or \"workbench1\"\n\tsvc       arvados.Service\n\tdepends   []supervisedTask\n}\n\nfunc (runner runPassenger) String() string {\n\treturn \"runPassenger:\" + runner.src\n}\n\nfunc (runner runPassenger) Run(ctx context.Context, fail func(error), super *Supervisor) error {\n\terr := super.wait(ctx, runner.depends...)\n\tif err != nil {\n\t\treturn err\n\t}\n\thost, port, err := internalPort(runner.svc)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"bug: no internalPort for %q: %v (%#v)\", runner, err, runner.svc)\n\t}\n\tvar appdir string\n\tswitch super.ClusterType {\n\tcase \"production\":\n\t\t// FIXME: This used to return paths set up by `arvados-server install`,\n\t\t// which is no longer a thing.\n\t\treturn fmt.Errorf(\"production cluster type not implemented\")\n\tcase \"test\":\n\t\tappdir = filepath.Join(super.tempdir, runner.varlibdir)\n\tdefault:\n\t\tappdir = runner.src\n\t}\n\tloglevel := \"4\"\n\tif lvl, ok := map[string]string{\n\t\t\"debug\":   \"5\",\n\t\t\"info\":    \"4\",\n\t\t\"warn\":    \"2\",\n\t\t\"warning\": \"2\",\n\t\t\"error\":   \"1\",\n\t\t\"fatal\":   \"0\",\n\t\t\"panic\":   \"0\",\n\t}[super.cluster.SystemLogs.LogLevel]; ok {\n\t\tloglevel = lvl\n\t}\n\tsuper.waitShutdown.Add(1)\n\tgo func() {\n\t\tdefer super.waitShutdown.Done()\n\t\tcmdline := []string{\n\t\t\t\"bundle\", \"exec\",\n\t\t\t\"passenger\", \"start\",\n\t\t\t\"--address\", host,\n\t\t\t\"--port\", port,\n\t\t\t\"--log-level\", loglevel,\n\t\t\t\"--no-friendly-error-pages\",\n\t\t\t\"--disable-anonymous-telemetry\",\n\t\t\t\"--disable-security-update-check\",\n\t\t\t\"--no-compile-runtime\",\n\t\t\t\"--no-install-runtime\",\n\t\t\t\"--pid-file\", filepath.Join(super.wwwtempdir, \"passenger.\"+strings.Replace(appdir, \"/\", \"_\", -1)+\".pid\"),\n\t\t}\n\t\topts := runOptions{\n\t\t\tenv: append([]string{\n\t\t\t\t\"TMPDIR=\" + super.wwwtempdir,\n\t\t\t}, railsEnv...),\n\t\t}\n\t\tif super.ClusterType == \"production\" {\n\t\t\topts.user = \"www-data\"\n\t\t\topts.env = append(opts.env, \"HOME=/var/www\")\n\t\t} else {\n\t\t\t// This would be desirable when changing uid\n\t\t\t// too, but it fails because /dev/stderr is a\n\t\t\t// symlink to a pty owned by root: \"nginx:\n\t\t\t// [emerg] open() \"/dev/stderr\" failed (13:\n\t\t\t// Permission denied)\"\n\t\t\tcmdline = append(cmdline, \"--log-file\", \"/dev/stderr\")\n\t\t}\n\t\terr = super.RunProgram(ctx, appdir, opts, cmdline[0], cmdline[1:]...)\n\t\tfail(err)\n\t}()\n\treturn nil\n}\n"
  },
  {
    "path": "lib/boot/postgresql.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"database/sql\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/lib/pq\"\n)\n\n// Run a postgresql server in a private data directory. Set up a db\n// user, database, and TCP listener that match the supervisor's\n// configured database connection info.\ntype runPostgreSQL struct{}\n\nfunc (runPostgreSQL) String() string {\n\treturn \"postgresql\"\n}\n\nfunc (runPostgreSQL) Run(ctx context.Context, fail func(error), super *Supervisor) error {\n\terr := super.wait(ctx, createCertificates{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif super.ClusterType == \"production\" {\n\t\treturn nil\n\t}\n\n\tpostgresUser, err := user.Current()\n\tiamroot := postgresUser.Uid == \"0\"\n\tif err != nil {\n\t\treturn fmt.Errorf(\"user.Current(): %w\", err)\n\t} else if iamroot {\n\t\tpostgresUser, err = user.Lookup(\"postgres\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"user.Lookup(\\\"postgres\\\"): %s\", err)\n\t\t}\n\t}\n\n\tbuf := bytes.NewBuffer(nil)\n\terr = super.RunProgram(ctx, super.tempdir, runOptions{output: buf}, \"pg_config\", \"--bindir\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tbindir := strings.TrimSpace(buf.String())\n\n\tdatadir := filepath.Join(super.tempdir, \"pgdata\")\n\terr = os.Mkdir(datadir, 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\tprog, args := filepath.Join(bindir, \"initdb\"), []string{\"-D\", datadir, \"-E\", \"utf8\"}\n\topts := runOptions{}\n\topts.env = append(opts.env,\n\t\t\"PGHOST=\"+super.cluster.PostgreSQL.Connection[\"host\"],\n\t\t\"PGPORT=\"+super.cluster.PostgreSQL.Connection[\"port\"],\n\t\t\"PGUSER=\"+postgresUser.Username,\n\t\t\"PGDATABASE=\",\n\t\t\"PGPASSFILE=\",\n\t)\n\tif iamroot {\n\t\tpostgresUID, err := strconv.Atoi(postgresUser.Uid)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"user.Lookup(\\\"postgres\\\"): non-numeric uid?: %q\", postgresUser.Uid)\n\t\t}\n\t\tpostgresGid, err := strconv.Atoi(postgresUser.Gid)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"user.Lookup(\\\"postgres\\\"): non-numeric gid?: %q\", postgresUser.Gid)\n\t\t}\n\t\terr = os.Chown(super.tempdir, 0, postgresGid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = os.Chmod(super.tempdir, 0710)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = os.Chown(datadir, postgresUID, 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\topts.user = \"postgres\"\n\t}\n\terr = super.RunProgram(ctx, super.tempdir, opts, prog, args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = super.RunProgram(ctx, super.tempdir, runOptions{}, \"cp\", \"server.crt\", \"server.key\", datadir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif iamroot {\n\t\terr = super.RunProgram(ctx, super.tempdir, runOptions{}, \"chown\", \"postgres\", datadir+\"/server.crt\", datadir+\"/server.key\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsuper.waitShutdown.Add(1)\n\tgo func() {\n\t\tdefer super.waitShutdown.Done()\n\t\tprog, args := filepath.Join(bindir, \"postgres\"), []string{\n\t\t\t\"-l\",          // enable ssl\n\t\t\t\"-D\", datadir, // data dir\n\t\t\t\"-k\", datadir, // socket dir\n\t\t\t\"-h\", super.cluster.PostgreSQL.Connection[\"host\"],\n\t\t\t\"-p\", super.cluster.PostgreSQL.Connection[\"port\"],\n\t\t}\n\t\tfail(super.RunProgram(ctx, super.tempdir, opts, prog, args...))\n\t}()\n\n\tfor {\n\t\tif ctx.Err() != nil {\n\t\t\treturn ctx.Err()\n\t\t}\n\t\terr := super.RunProgram(ctx, super.tempdir, opts, \"pg_isready\", \"--timeout=10\")\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second / 2)\n\t}\n\tpgconn := arvados.PostgreSQLConnection{\n\t\t\"host\":   datadir,\n\t\t\"port\":   super.cluster.PostgreSQL.Connection[\"port\"],\n\t\t\"user\":   postgresUser.Username,\n\t\t\"dbname\": \"postgres\",\n\t}\n\tdb, err := sql.Open(\"postgres\", pgconn.String())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"db open failed: %s\", err)\n\t}\n\tdefer db.Close()\n\tconn, err := db.Conn(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"db conn failed: %s\", err)\n\t}\n\tdefer conn.Close()\n\t_, err = conn.ExecContext(ctx, `CREATE USER `+pq.QuoteIdentifier(super.cluster.PostgreSQL.Connection[\"user\"])+` WITH SUPERUSER ENCRYPTED PASSWORD `+pq.QuoteLiteral(super.cluster.PostgreSQL.Connection[\"password\"]))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"createuser failed: %s\", err)\n\t}\n\t_, err = conn.ExecContext(ctx, `CREATE DATABASE `+pq.QuoteIdentifier(super.cluster.PostgreSQL.Connection[\"dbname\"])+` WITH TEMPLATE template0 ENCODING 'utf8'`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"createdb failed: %s\", err)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "lib/boot/rails_db.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/dblock\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype railsDatabase struct{}\n\nfunc (runner railsDatabase) String() string {\n\treturn \"railsDatabase\"\n}\n\n// Run checks for and applies any pending Rails database migrations.\n//\n// If running a dev/test environment, and the database is empty, it\n// initializes the database.\nfunc (runner railsDatabase) Run(ctx context.Context, fail func(error), super *Supervisor) error {\n\terr := super.wait(ctx, runPostgreSQL{}, installPassenger{src: \"services/api\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// determine path to installed rails app or source tree\n\tvar appdir string\n\tif super.ClusterType == \"production\" {\n\t\t// FIXME: This used to return paths set up by `arvados-server install`,\n\t\t// which is no longer a thing.\n\t\treturn fmt.Errorf(\"production cluster type not implemented\")\n\t} else {\n\t\tappdir = filepath.Join(super.SourcePath, \"services/api\")\n\t}\n\n\t// Check for pending migrations before running rake.\n\t//\n\t// In principle, we could use \"rake db:migrate:status\" or skip\n\t// this check entirely and let \"rake db:migrate\" be a no-op\n\t// most of the time.  However, in the most common case when\n\t// there are no new migrations, that would add ~2s to startup\n\t// time / downtime during service restart.\n\n\ttodo, err := migrationList(appdir, super.logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// read schema_migrations table (list of migrations already\n\t// applied) and remove those entries from todo\n\tdbconnector := ctrlctx.DBConnector{PostgreSQL: super.cluster.PostgreSQL}\n\tdefer dbconnector.Close()\n\tdb, err := dbconnector.GetDB(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\trows, err := db.QueryContext(ctx, `SELECT version FROM schema_migrations`)\n\tif err != nil {\n\t\tif super.ClusterType == \"production\" {\n\t\t\treturn err\n\t\t}\n\t\tsuper.logger.WithError(err).Info(\"schema_migrations query failed, trying db:setup\")\n\t\treturn super.RunProgram(ctx, \"services/api\", runOptions{env: railsEnv}, \"bundle\", \"exec\", \"rake\", \"db:setup\")\n\t}\n\tfor rows.Next() {\n\t\tvar v string\n\t\terr = rows.Scan(&v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdelete(todo, v)\n\t}\n\terr = rows.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// if nothing remains in todo, all available migrations are\n\t// done, so return without running any [relatively slow]\n\t// ruby/rake commands\n\tif len(todo) == 0 {\n\t\treturn nil\n\t}\n\n\tsuper.logger.Infof(\"%d migrations pending\", len(todo))\n\tif !dblock.RailsMigrations.Lock(ctx, dbconnector.GetDB) {\n\t\treturn context.Canceled\n\t}\n\tdefer dblock.RailsMigrations.Unlock()\n\treturn super.RunProgram(ctx, appdir, runOptions{env: railsEnv}, \"bundle\", \"exec\", \"rake\", \"db:migrate\")\n}\n\nfunc migrationList(dir string, log logrus.FieldLogger) (map[string]bool, error) {\n\ttodo := map[string]bool{}\n\n\t// list versions in db/migrate/{version}_{name}.rb\n\terr := fs.WalkDir(os.DirFS(dir), \"db/migrate\", func(path string, d fs.DirEntry, err error) error {\n\t\tif d.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tfnm := d.Name()\n\t\tif strings.HasSuffix(fnm, \"~\") {\n\t\t\treturn nil\n\t\t}\n\t\tif !strings.HasSuffix(fnm, \".rb\") {\n\t\t\tlog.Warnf(\"unexpected file in db/migrate dir: %s\", fnm)\n\t\t\treturn nil\n\t\t}\n\t\tfor i, c := range fnm {\n\t\t\tif i > 0 && c == '_' {\n\t\t\t\ttodo[fnm[:i]] = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif c < '0' || c > '9' {\n\t\t\t\t// non-numeric character before the\n\t\t\t\t// first '_' means this is not a\n\t\t\t\t// migration\n\t\t\t\tlog.Warnf(\"unexpected file in db/migrate dir: %s\", fnm)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn todo, nil\n}\n"
  },
  {
    "path": "lib/boot/rails_db_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"bytes\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"gopkg.in/check.v1\"\n)\n\ntype railsDBSuite struct{}\n\nvar _ = check.Suite(&railsDBSuite{})\n\n// Check services/api/db/migrate/*.rb match schema_migrations\nfunc (s *railsDBSuite) TestMigrationList(c *check.C) {\n\tvar logbuf bytes.Buffer\n\tlog := ctxlog.New(&logbuf, \"text\", \"info\")\n\ttodo, err := migrationList(\"../../services/api\", log)\n\tc.Check(err, check.IsNil)\n\tc.Check(todo[\"20220804133317\"], check.Equals, true)\n\tc.Check(logbuf.String(), check.Equals, \"\")\n\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\tdb := arvadostest.DB(c, cluster)\n\trows, err := db.Query(`SELECT version FROM schema_migrations`)\n\tfor rows.Next() {\n\t\tvar v string\n\t\terr = rows.Scan(&v)\n\t\tc.Assert(err, check.IsNil)\n\t\tif !todo[v] {\n\t\t\tc.Errorf(\"version is in schema_migrations but not services/api/db/migrate/: %q\", v)\n\t\t}\n\t\tdelete(todo, v)\n\t}\n\terr = rows.Close()\n\tc.Assert(err, check.IsNil)\n\n\t// In the test suite, the database should be fully migrated.\n\t// So, if there's anything left in todo here, there is\n\t// something wrong with our \"db/migrate/*.rb ==\n\t// schema_migrations\" reasoning.\n\tc.Check(todo, check.HasLen, 0)\n}\n"
  },
  {
    "path": "lib/boot/service.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"path/filepath\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// Run a service using the arvados-server binary.\n//\n// In future this will bring up the service in the current process,\n// but for now (at least until the subcommand handlers get a shutdown\n// mechanism) it starts a child process using the arvados-server\n// binary, which the supervisor is assumed to have installed in\n// {super.tempdir}/bin/.\ntype runServiceCommand struct {\n\tname    string           // arvados-server subcommand, e.g., \"controller\"\n\tsvc     arvados.Service  // cluster.Services.* entry with the desired InternalURLs\n\tdepends []supervisedTask // wait for these tasks before starting\n}\n\nfunc (runner runServiceCommand) String() string {\n\treturn runner.name\n}\n\nfunc (runner runServiceCommand) Run(ctx context.Context, fail func(error), super *Supervisor) error {\n\tbinfile := filepath.Join(super.bindir, \"arvados-server\")\n\terr := super.RunProgram(ctx, super.bindir, runOptions{}, binfile, \"-version\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tsuper.wait(ctx, createCertificates{})\n\tsuper.wait(ctx, runner.depends...)\n\tfor u := range runner.svc.InternalURLs {\n\t\tu := u\n\t\tif islocal, err := addrIsLocal(u.Host); err != nil {\n\t\t\treturn err\n\t\t} else if !islocal {\n\t\t\tcontinue\n\t\t}\n\t\tsuper.waitShutdown.Add(1)\n\t\tgo func() {\n\t\t\tdefer super.waitShutdown.Done()\n\t\t\tfail(super.RunProgram(ctx, super.tempdir, runOptions{\n\t\t\t\tenv: []string{\n\t\t\t\t\t\"ARVADOS_SERVICE_INTERNAL_URL=\" + u.String(),\n\t\t\t\t\t// Child process should not\n\t\t\t\t\t// try to tell systemd that we\n\t\t\t\t\t// are ready.\n\t\t\t\t\t\"NOTIFY_SOCKET=\",\n\t\t\t\t},\n\t\t\t}, binfile, runner.name, \"-config\", super.configfile))\n\t\t}()\n\t}\n\treturn nil\n}\n\n// Run a Go service that isn't bundled in arvados-server.\ntype runGoProgram struct {\n\tsrc     string           // source dir, e.g., \"services/keepproxy\"\n\tsvc     arvados.Service  // cluster.Services.* entry with the desired InternalURLs\n\tdepends []supervisedTask // wait for these tasks before starting\n}\n\nfunc (runner runGoProgram) String() string {\n\t_, basename := filepath.Split(runner.src)\n\treturn basename\n}\n\nfunc (runner runGoProgram) Run(ctx context.Context, fail func(error), super *Supervisor) error {\n\tif len(runner.svc.InternalURLs) == 0 {\n\t\treturn errors.New(\"bug: runGoProgram needs non-empty svc.InternalURLs\")\n\t}\n\n\tbinfile, err := super.installGoProgram(ctx, runner.src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ctx.Err() != nil {\n\t\treturn ctx.Err()\n\t}\n\n\terr = super.RunProgram(ctx, super.tempdir, runOptions{}, binfile, \"-version\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsuper.wait(ctx, createCertificates{})\n\tsuper.wait(ctx, runner.depends...)\n\tfor u := range runner.svc.InternalURLs {\n\t\tu := u\n\t\tif islocal, err := addrIsLocal(u.Host); err != nil {\n\t\t\treturn err\n\t\t} else if !islocal {\n\t\t\tcontinue\n\t\t}\n\t\tsuper.waitShutdown.Add(1)\n\t\tgo func() {\n\t\t\tdefer super.waitShutdown.Done()\n\t\t\tfail(super.RunProgram(ctx, super.tempdir, runOptions{env: []string{\"ARVADOS_SERVICE_INTERNAL_URL=\" + u.String()}}, binfile))\n\t\t}()\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "lib/boot/supervisor.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/rand\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"os/signal\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/service\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/health\"\n\t\"github.com/fsnotify/fsnotify\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype Supervisor struct {\n\t// Config file location like \"/etc/arvados/config.yml\", or \"-\"\n\t// to read from Stdin (see below).\n\tConfigPath string\n\t// Literal config file (useful for test suites). If non-empty,\n\t// this is used instead of ConfigPath.\n\tConfigYAML string\n\t// Path to arvados source tree. Only used for dev/test\n\t// clusters.\n\tSourcePath string\n\t// Version number to build into binaries. Only used for\n\t// dev/test clusters.\n\tSourceVersion string\n\t// \"development\" or \"test\".\n\tClusterType string\n\t// Listening address for external services, and internal\n\t// services whose InternalURLs are not explicitly configured.\n\t// If blank, listen on the configured controller ExternalURL\n\t// host; if that is also blank, listen on all addresses\n\t// (0.0.0.0).\n\tListenHost string\n\t// Default host:port for controller ExternalURL if not\n\t// explicitly configured in config file. If blank, use a\n\t// random port on ListenHost.\n\tControllerAddr string\n\n\tNoWorkbench1         bool\n\tNoWorkbench2         bool\n\tOwnTemporaryDatabase bool\n\tStdin                io.Reader\n\tStderr               io.Writer\n\n\tlogger   logrus.FieldLogger\n\tcluster  *arvados.Cluster       // nil if this is a multi-cluster supervisor\n\tchildren map[string]*Supervisor // nil if this is a single-cluster supervisor\n\n\tctx           context.Context\n\tcancel        context.CancelFunc\n\tdone          chan struct{}      // closed when child procs/services have shut down\n\terr           error              // error that caused shutdown (valid when done is closed)\n\thealthChecker *health.Aggregator // nil if this is a multi-cluster supervisor, or still booting\n\ttasksReady    map[string]chan bool\n\twaitShutdown  sync.WaitGroup\n\n\tbindir     string\n\ttempdir    string // in production mode, this is accessible only to root\n\twwwtempdir string // in production mode, this is accessible only to www-data\n\tconfigfile string\n\tenviron    []string // for child processes\n}\n\nfunc (super *Supervisor) Clusters() map[string]*arvados.Cluster {\n\tm := map[string]*arvados.Cluster{}\n\tif super.cluster != nil {\n\t\tm[super.cluster.ClusterID] = super.cluster\n\t}\n\tfor id, super2 := range super.children {\n\t\tm[id] = super2.Cluster(\"\")\n\t}\n\treturn m\n}\n\nfunc (super *Supervisor) Cluster(id string) *arvados.Cluster {\n\tif super.children != nil {\n\t\treturn super.children[id].Cluster(id)\n\t} else {\n\t\treturn super.cluster\n\t}\n}\n\nfunc (super *Supervisor) Start(ctx context.Context) {\n\tsuper.logger = ctxlog.FromContext(ctx)\n\tsuper.ctx, super.cancel = context.WithCancel(ctx)\n\tsuper.done = make(chan struct{})\n\n\tsigch := make(chan os.Signal, 1)\n\tsignal.Notify(sigch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP)\n\tgo func() {\n\t\tdefer signal.Stop(sigch)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase sig := <-sigch:\n\t\t\t\tsuper.logger.WithField(\"signal\", sig).Info(\"caught signal\")\n\t\t\t\tif super.err == nil {\n\t\t\t\t\tif sig == syscall.SIGHUP {\n\t\t\t\t\t\tsuper.err = errNeedConfigReload\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsuper.err = fmt.Errorf(\"caught signal %s\", sig)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tsuper.cancel()\n\t\t\t}\n\t\t}\n\t}()\n\n\tloaderStdin := super.Stdin\n\tif super.ConfigYAML != \"\" {\n\t\tloaderStdin = bytes.NewBufferString(super.ConfigYAML)\n\t}\n\tloader := config.NewLoader(loaderStdin, super.logger)\n\tloader.SkipLegacy = true\n\tloader.SkipAPICalls = true\n\tloader.Path = super.ConfigPath\n\tif super.ConfigYAML != \"\" {\n\t\tloader.Path = \"-\"\n\t}\n\tcfg, err := loader.Load()\n\tif err != nil {\n\t\tsuper.err = err\n\t\tclose(super.done)\n\t\tsuper.cancel()\n\t\treturn\n\t}\n\n\tif super.ConfigPath != \"\" && super.ConfigPath != \"-\" && cfg.AutoReloadConfig {\n\t\tgo watchConfig(super.ctx, super.logger, super.ConfigPath, copyConfig(cfg), func() {\n\t\t\tif super.err == nil {\n\t\t\t\tsuper.err = errNeedConfigReload\n\t\t\t}\n\t\t\tsuper.cancel()\n\t\t})\n\t}\n\n\tif len(cfg.Clusters) > 1 {\n\t\tsuper.startFederation(cfg)\n\t\tgo func() {\n\t\t\tdefer super.cancel()\n\t\t\tdefer close(super.done)\n\t\t\tfor _, super2 := range super.children {\n\t\t\t\terr := super2.Wait()\n\t\t\t\tif super.err == nil {\n\t\t\t\t\tsuper.err = err\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t} else {\n\t\tgo func() {\n\t\t\tdefer super.cancel()\n\t\t\tdefer close(super.done)\n\t\t\tsuper.cluster, super.err = cfg.GetCluster(\"\")\n\t\t\tif super.err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr := super.runCluster()\n\t\t\tif err != nil {\n\t\t\t\tsuper.logger.WithError(err).Info(\"supervisor shut down\")\n\t\t\t\tif super.err == nil {\n\t\t\t\t\tsuper.err = err\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// Wait returns when all child processes and goroutines have exited.\nfunc (super *Supervisor) Wait() error {\n\t<-super.done\n\treturn super.err\n}\n\n// startFederation starts a child Supervisor for each cluster in the\n// given config. Each is a copy of the original/parent with the\n// original config reduced to a single cluster.\nfunc (super *Supervisor) startFederation(cfg *arvados.Config) {\n\tsuper.children = map[string]*Supervisor{}\n\tfor id, cc := range cfg.Clusters {\n\t\tyaml, err := json.Marshal(arvados.Config{Clusters: map[string]arvados.Cluster{id: cc}})\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"json.Marshal partial config: %s\", err))\n\t\t}\n\t\tsuper2 := &Supervisor{\n\t\t\tConfigPath:           \"-\",\n\t\t\tConfigYAML:           string(yaml),\n\t\t\tSourcePath:           super.SourcePath,\n\t\t\tSourceVersion:        super.SourceVersion,\n\t\t\tClusterType:          super.ClusterType,\n\t\t\tListenHost:           super.ListenHost,\n\t\t\tControllerAddr:       super.ControllerAddr,\n\t\t\tNoWorkbench1:         super.NoWorkbench1,\n\t\t\tNoWorkbench2:         super.NoWorkbench2,\n\t\t\tOwnTemporaryDatabase: super.OwnTemporaryDatabase,\n\t\t\tStdin:                super.Stdin,\n\t\t\tStderr:               super.Stderr,\n\t\t}\n\t\tif super2.ClusterType == \"test\" {\n\t\t\tsuper2.Stderr = &service.LogPrefixer{\n\t\t\t\tWriter: super.Stderr,\n\t\t\t\tPrefix: []byte(\"[\" + id + \"] \"),\n\t\t\t}\n\t\t}\n\t\tsuper2.Start(super.ctx)\n\t\tsuper.children[id] = super2\n\t}\n}\n\nfunc (super *Supervisor) runCluster() error {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif super.ClusterType == \"test\" && super.SourcePath == \"\" {\n\t\t// When invoked by test suite, default to current\n\t\t// source tree\n\t\tbuf, err := exec.Command(\"git\", \"rev-parse\", \"--show-toplevel\").CombinedOutput()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"git rev-parse: %w\", err)\n\t\t}\n\t\tsuper.SourcePath = strings.TrimSuffix(string(buf), \"\\n\")\n\t} else if !strings.HasPrefix(super.SourcePath, \"/\") {\n\t\tsuper.SourcePath = filepath.Join(cwd, super.SourcePath)\n\t}\n\tsuper.SourcePath, err = filepath.EvalSymlinks(super.SourcePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif super.ListenHost == \"\" {\n\t\tu := url.URL(super.cluster.Services.Controller.ExternalURL)\n\t\tsuper.ListenHost = u.Hostname()\n\t\tif super.ListenHost == \"\" {\n\t\t\tsuper.ListenHost = \"0.0.0.0\"\n\t\t}\n\t}\n\n\tif super.ClusterType == \"production\" {\n\t\t// FIXME: This used to return paths set up by `arvados-server install`,\n\t\t// which is no longer a thing.\n\t\treturn fmt.Errorf(\"production cluster type not implemented\")\n\t} else {\n\t\tsuper.tempdir, err = ioutil.TempDir(\"\", \"arvados-server-boot-\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer os.RemoveAll(super.tempdir)\n\t\tsuper.wwwtempdir = super.tempdir\n\t\tsuper.bindir = filepath.Join(super.tempdir, \"bin\")\n\t\tif err := os.Mkdir(super.bindir, 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Fill in any missing config keys, and write the resulting\n\t// config in the temp dir for child services to use.\n\terr = super.autofillConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tconffile, err := os.OpenFile(filepath.Join(super.wwwtempdir, \"config.yml\"), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conffile.Close()\n\terr = json.NewEncoder(conffile).Encode(arvados.Config{\n\t\tClusters: map[string]arvados.Cluster{\n\t\t\tsuper.cluster.ClusterID: *super.cluster}})\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = conffile.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsuper.configfile = conffile.Name()\n\n\tsuper.environ = os.Environ()\n\tsuper.cleanEnv([]string{\"ARVADOS_\"})\n\tif super.ClusterType == \"test\" {\n\t\tsuper.setEnv(\"ARVADOS_TEST_PRIVESC\", os.Getenv(\"ARVADOS_TEST_PRIVESC\"))\n\t}\n\tsuper.setEnv(\"ARVADOS_CONFIG\", super.configfile)\n\tsuper.setEnv(\"RAILS_ENV\", super.ClusterType)\n\tsuper.setEnv(\"TMPDIR\", super.tempdir)\n\tif super.ClusterType != \"production\" {\n\t\tsuper.prependEnv(\"PATH\", super.tempdir+\"/bin:\")\n\t}\n\tsuper.setEnv(\"ARVADOS_SERVER_ADDRESS\", super.ListenHost)\n\tif super.ClusterType == \"test\" {\n\t\tsuper.setEnv(\"ARVADOS_USE_KEEP_ACCESSIBLE_API\", \"true\")\n\t}\n\n\t// Now that we have the config, replace the bootstrap logger\n\t// with a new one according to the logging config.\n\tloglevel := super.cluster.SystemLogs.LogLevel\n\tif s := os.Getenv(\"ARVADOS_DEBUG\"); s != \"\" && s != \"0\" {\n\t\tloglevel = \"debug\"\n\t}\n\tsuper.logger = ctxlog.New(super.Stderr, super.cluster.SystemLogs.Format, loglevel).WithFields(logrus.Fields{\n\t\t\"PID\": os.Getpid(),\n\t})\n\n\tif super.SourceVersion == \"\" && super.ClusterType == \"production\" {\n\t\t// don't need SourceVersion\n\t} else if super.SourceVersion == \"\" {\n\t\t// Find current source tree version.\n\t\tvar buf bytes.Buffer\n\t\terr = super.RunProgram(super.ctx, super.SourcePath, runOptions{output: &buf}, \"git\", \"diff\", \"--shortstat\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdirty := buf.Len() > 0\n\t\tbuf.Reset()\n\t\terr = super.RunProgram(super.ctx, super.SourcePath, runOptions{output: &buf}, \"git\", \"log\", \"-n1\", \"--format=%H\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsuper.SourceVersion = strings.TrimSpace(buf.String())\n\t\tif dirty {\n\t\t\tsuper.SourceVersion += \"+uncommitted\"\n\t\t}\n\t} else {\n\t\treturn errors.New(\"specifying a version to run is not yet supported\")\n\t}\n\n\t_, err = super.installGoProgram(super.ctx, \"cmd/arvados-server\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = super.setupRubyEnv()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttasks := []supervisedTask{\n\t\tcreateCertificates{},\n\t\trunPostgreSQL{},\n\t\trunNginx{},\n\t\trailsDatabase{},\n\t\trunServiceCommand{name: \"controller\", svc: super.cluster.Services.Controller, depends: []supervisedTask{railsDatabase{}}},\n\t\trunServiceCommand{name: \"health\", svc: super.cluster.Services.Health},\n\t\trunServiceCommand{name: \"keepproxy\", svc: super.cluster.Services.Keepproxy, depends: []supervisedTask{runPassenger{src: \"services/api\"}}},\n\t\trunServiceCommand{name: \"keepstore\", svc: super.cluster.Services.Keepstore},\n\t\trunServiceCommand{name: \"keep-web\", svc: super.cluster.Services.WebDAV},\n\t\trunServiceCommand{name: \"ws\", svc: super.cluster.Services.Websocket, depends: []supervisedTask{railsDatabase{}}},\n\t\tinstallPassenger{src: \"services/api\", varlibdir: \"railsapi\"},\n\t\trunPassenger{src: \"services/api\", varlibdir: \"railsapi\", svc: super.cluster.Services.RailsAPI, depends: []supervisedTask{\n\t\t\tcreateCertificates{},\n\t\t\tinstallPassenger{src: \"services/api\", varlibdir: \"railsapi\"},\n\t\t\trailsDatabase{},\n\t\t}},\n\t}\n\tif !super.NoWorkbench1 {\n\t\treturn errors.New(\"workbench1 is no longer supported\")\n\t}\n\tif !super.NoWorkbench2 {\n\t\ttasks = append(tasks,\n\t\t\trunWorkbench2{svc: super.cluster.Services.Workbench2},\n\t\t)\n\t}\n\tif super.ClusterType != \"test\" {\n\t\ttasks = append(tasks,\n\t\t\trunServiceCommand{name: \"keep-balance\", svc: super.cluster.Services.Keepbalance},\n\t\t)\n\t}\n\tif super.cluster.Containers.CloudVMs.Enable {\n\t\ttasks = append(tasks,\n\t\t\trunServiceCommand{name: \"dispatch-cloud\", svc: super.cluster.Services.DispatchCloud},\n\t\t)\n\t}\n\tsuper.tasksReady = map[string]chan bool{}\n\tfor _, task := range tasks {\n\t\tsuper.tasksReady[task.String()] = make(chan bool)\n\t}\n\tfor _, task := range tasks {\n\t\ttask := task\n\t\tfail := func(err error) {\n\t\t\tif super.ctx.Err() != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsuper.cancel()\n\t\t\tsuper.logger.WithField(\"task\", task.String()).WithError(err).Error(\"task failed\")\n\t\t}\n\t\tgo func() {\n\t\t\tsuper.logger.WithField(\"task\", task.String()).Info(\"starting\")\n\t\t\terr := task.Run(super.ctx, fail, super)\n\t\t\tif err != nil {\n\t\t\t\tfail(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tclose(super.tasksReady[task.String()])\n\t\t}()\n\t}\n\terr = super.wait(super.ctx, tasks...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsuper.logger.Info(\"all startup tasks are complete; starting health checks\")\n\tsuper.healthChecker = &health.Aggregator{Cluster: super.cluster}\n\t<-super.ctx.Done()\n\tsuper.logger.Info(\"shutting down\")\n\tsuper.waitShutdown.Wait()\n\treturn super.ctx.Err()\n}\n\nfunc (super *Supervisor) wait(ctx context.Context, tasks ...supervisedTask) error {\n\tticker := time.NewTicker(15 * time.Second)\n\tdefer ticker.Stop()\n\tfor _, task := range tasks {\n\t\tch, ok := super.tasksReady[task.String()]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"no such task: %s\", task)\n\t\t}\n\t\tsuper.logger.WithField(\"task\", task.String()).Info(\"waiting\")\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ch:\n\t\t\t\tsuper.logger.WithField(\"task\", task.String()).Info(\"ready\")\n\t\t\tcase <-ctx.Done():\n\t\t\t\tsuper.logger.WithField(\"task\", task.String()).Info(\"task was never ready\")\n\t\t\t\treturn ctx.Err()\n\t\t\tcase <-ticker.C:\n\t\t\t\tsuper.logger.WithField(\"task\", task.String()).Info(\"still waiting...\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\n// Stop shuts down all child processes and goroutines, and returns\n// when all of them have exited.\nfunc (super *Supervisor) Stop() {\n\tsuper.cancel()\n\t<-super.done\n}\n\n// WaitReady waits for the cluster(s) to be ready to handle requests,\n// then returns true. If startup fails, it returns false.\nfunc (super *Supervisor) WaitReady() bool {\n\tif super.children != nil {\n\t\tfor id, super2 := range super.children {\n\t\t\tsuper.logger.Infof(\"waiting for %s to be ready\", id)\n\t\t\tif !super2.WaitReady() {\n\t\t\t\tsuper.logger.Infof(\"%s startup failed\", id)\n\t\t\t\tsuper.Stop()\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tsuper.logger.Infof(\"%s is ready\", id)\n\t\t}\n\t\tsuper.logger.Info(\"all clusters are ready\")\n\t\treturn true\n\t}\n\tticker := time.NewTicker(time.Second)\n\tdefer ticker.Stop()\n\tfor waiting := \"all\"; waiting != \"\"; {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\tcase <-super.ctx.Done():\n\t\t\tsuper.Stop()\n\t\t\treturn false\n\t\t}\n\t\tif super.healthChecker == nil {\n\t\t\t// not set up yet\n\t\t\tcontinue\n\t\t}\n\t\tresp := super.healthChecker.ClusterHealth()\n\t\t// The overall health check (resp.Health==\"OK\") might\n\t\t// never pass due to missing components (like\n\t\t// arvados-dispatch-cloud in a test cluster), so\n\t\t// instead we wait for all configured components to\n\t\t// pass.\n\t\twaiting = \"\"\n\t\tfor target, check := range resp.Checks {\n\t\t\tif check.Health != \"OK\" {\n\t\t\t\twaiting += \" \" + target\n\t\t\t}\n\t\t}\n\t\tif waiting != \"\" {\n\t\t\tsuper.logger.WithField(\"targets\", waiting[1:]).Info(\"waiting\")\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (super *Supervisor) prependEnv(key, prepend string) {\n\tfor i, s := range super.environ {\n\t\tif strings.HasPrefix(s, key+\"=\") {\n\t\t\tsuper.environ[i] = key + \"=\" + prepend + s[len(key)+1:]\n\t\t\treturn\n\t\t}\n\t}\n\tsuper.environ = append(super.environ, key+\"=\"+prepend)\n}\n\nfunc (super *Supervisor) cleanEnv(prefixes []string) {\n\tvar cleaned []string\n\tfor _, s := range super.environ {\n\t\tdrop := false\n\t\tfor _, p := range prefixes {\n\t\t\tif strings.HasPrefix(s, p) {\n\t\t\t\tdrop = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !drop {\n\t\t\tcleaned = append(cleaned, s)\n\t\t}\n\t}\n\tsuper.environ = cleaned\n}\n\nfunc (super *Supervisor) setEnv(key, val string) {\n\tfor i, s := range super.environ {\n\t\tif strings.HasPrefix(s, key+\"=\") {\n\t\t\tsuper.environ[i] = key + \"=\" + val\n\t\t\treturn\n\t\t}\n\t}\n\tsuper.environ = append(super.environ, key+\"=\"+val)\n}\n\n// Remove all but the first occurrence of each env var.\nfunc dedupEnv(in []string) []string {\n\tsaw := map[string]bool{}\n\tvar out []string\n\tfor _, kv := range in {\n\t\tif split := strings.Index(kv, \"=\"); split < 1 {\n\t\t\tpanic(\"invalid environment var: \" + kv)\n\t\t} else if saw[kv[:split]] {\n\t\t\tcontinue\n\t\t} else {\n\t\t\tsaw[kv[:split]] = true\n\t\t\tout = append(out, kv)\n\t\t}\n\t}\n\treturn out\n}\n\nfunc (super *Supervisor) installGoProgram(ctx context.Context, srcpath string) (string, error) {\n\t_, basename := filepath.Split(srcpath)\n\tbinfile := filepath.Join(super.bindir, basename)\n\tif super.ClusterType == \"production\" {\n\t\treturn binfile, nil\n\t}\n\terr := super.RunProgram(ctx, filepath.Join(super.SourcePath, srcpath), runOptions{env: []string{\"GOBIN=\" + super.bindir}}, \"go\", \"install\", \"-ldflags\", \"-X git.arvados.org/arvados.git/lib/cmd.version=\"+super.SourceVersion+\" -X main.version=\"+super.SourceVersion)\n\treturn binfile, err\n}\n\nfunc (super *Supervisor) setupRubyEnv() error {\n\tsuper.cleanEnv([]string{\n\t\t\"GEM_HOME=\",\n\t\t\"GEM_PATH=\",\n\t})\n\tcmd := exec.Command(\"gem\", \"env\", \"gempath\")\n\tif super.ClusterType == \"production\" {\n\t\tcmd.Args = append([]string{\"sudo\", \"-u\", \"www-data\", \"-E\", \"HOME=/var/www\"}, cmd.Args...)\n\t\tpath, err := exec.LookPath(\"sudo\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"LookPath(\\\"sudo\\\"): %w\", err)\n\t\t}\n\t\tcmd.Path = path\n\t}\n\tcmd.Stderr = super.Stderr\n\tcmd.Env = super.environ\n\tbuf, err := cmd.Output() // /var/www/.local/share/gem/ruby/3.1.0/bin:...\n\tif err != nil || len(buf) == 0 {\n\t\treturn fmt.Errorf(\"gem env gempath: %w\", err)\n\t}\n\tgempath := string(bytes.Split(buf, []byte{':'})[0])\n\tsuper.prependEnv(\"PATH\", gempath+\"/bin:\")\n\tsuper.setEnv(\"GEM_HOME\", gempath)\n\tsuper.setEnv(\"GEM_PATH\", gempath)\n\t// Passenger install doesn't work unless $HOME is ~user\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsuper.setEnv(\"HOME\", u.HomeDir)\n\treturn nil\n}\n\nfunc (super *Supervisor) lookPath(prog string) string {\n\tfor _, val := range super.environ {\n\t\tif strings.HasPrefix(val, \"PATH=\") {\n\t\t\tfor _, dir := range filepath.SplitList(val[5:]) {\n\t\t\t\tpath := filepath.Join(dir, prog)\n\t\t\t\tif fi, err := os.Stat(path); err == nil && fi.Mode()&0111 != 0 {\n\t\t\t\t\treturn path\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn prog\n}\n\ntype runOptions struct {\n\toutput io.Writer // attach stdout\n\tenv    []string  // add/replace environment variables\n\tuser   string    // run as specified user\n\tstdin  io.Reader\n}\n\n// RunProgram runs prog with args, using dir as working directory. If ctx is\n// cancelled while the child is running, RunProgram terminates the child, waits\n// for it to exit, then returns.\n//\n// Child's environment will have our env vars, plus any given in env.\n//\n// Child's stdout will be written to output if non-nil, otherwise the\n// boot command's stderr.\nfunc (super *Supervisor) RunProgram(ctx context.Context, dir string, opts runOptions, prog string, args ...string) error {\n\tcmdline := fmt.Sprintf(\"%s\", append([]string{prog}, args...))\n\tsuper.logger.WithField(\"command\", cmdline).WithField(\"dir\", dir).Info(\"executing\")\n\n\tlogprefix := prog\n\t{\n\t\tinnerargs := args\n\t\tif logprefix == \"sudo\" {\n\t\t\tfor i := 0; i < len(args); i++ {\n\t\t\t\tif args[i] == \"-u\" {\n\t\t\t\t\ti++\n\t\t\t\t} else if args[i] == \"-E\" || strings.Contains(args[i], \"=\") {\n\t\t\t\t} else {\n\t\t\t\t\tlogprefix = args[i]\n\t\t\t\t\tinnerargs = args[i+1:]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tlogprefix = strings.TrimPrefix(logprefix, super.tempdir+\"/bin/\")\n\t\tif logprefix == \"bundle\" && len(innerargs) > 2 && innerargs[0] == \"exec\" {\n\t\t\t_, dirbase := filepath.Split(dir)\n\t\t\tlogprefix = innerargs[1] + \"@\" + dirbase\n\t\t} else if logprefix == \"arvados-server\" && len(args) > 1 {\n\t\t\tlogprefix = args[0]\n\t\t}\n\t\tif !strings.HasPrefix(dir, \"/\") {\n\t\t\tlogprefix = dir + \": \" + logprefix\n\t\t}\n\t}\n\n\tcmd := exec.Command(super.lookPath(prog), args...)\n\tcmd.Stdin = opts.stdin\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogwriter := &service.LogPrefixer{Writer: super.Stderr, Prefix: []byte(\"[\" + logprefix + \"] \")}\n\tvar copiers sync.WaitGroup\n\tcopiers.Add(1)\n\tgo func() {\n\t\tio.Copy(logwriter, stderr)\n\t\tcopiers.Done()\n\t}()\n\tcopiers.Add(1)\n\tgo func() {\n\t\tif opts.output == nil {\n\t\t\tio.Copy(logwriter, stdout)\n\t\t} else {\n\t\t\tio.Copy(opts.output, stdout)\n\t\t}\n\t\tcopiers.Done()\n\t}()\n\n\tif strings.HasPrefix(dir, \"/\") {\n\t\tcmd.Dir = dir\n\t} else {\n\t\tcmd.Dir = filepath.Join(super.SourcePath, dir)\n\t}\n\tenv := append([]string(nil), opts.env...)\n\tenv = append(env, super.environ...)\n\tcmd.Env = dedupEnv(env)\n\n\tif opts.user != \"\" {\n\t\t// Note: We use this approach instead of \"sudo\"\n\t\t// because in certain circumstances (we are pid 1 in a\n\t\t// docker container, and our passenger child process\n\t\t// changes to pgid 1) the intermediate sudo process\n\t\t// notices we have the same pgid as our child and\n\t\t// refuses to propagate signals from us to our child,\n\t\t// so we can't signal/shutdown our passenger/rails\n\t\t// apps. \"chpst\" or \"setuidgid\" would work, but these\n\t\t// few lines avoid depending on runit/daemontools.\n\t\tu, err := user.Lookup(opts.user)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"user.Lookup(%q): %w\", opts.user, err)\n\t\t}\n\t\tuid, _ := strconv.Atoi(u.Uid)\n\t\tgid, _ := strconv.Atoi(u.Gid)\n\t\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\t\tCredential: &syscall.Credential{\n\t\t\t\tUid: uint32(uid),\n\t\t\t\tGid: uint32(gid),\n\t\t\t},\n\t\t}\n\t}\n\n\texited := false\n\tdefer func() { exited = true }()\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tlog := ctxlog.FromContext(ctx).WithFields(logrus.Fields{\"dir\": dir, \"cmdline\": cmdline})\n\t\tfor !exited {\n\t\t\tif cmd.Process == nil {\n\t\t\t\tlog.Debug(\"waiting for child process to start\")\n\t\t\t\ttime.Sleep(time.Second / 2)\n\t\t\t} else {\n\t\t\t\tlog.WithField(\"PID\", cmd.Process.Pid).Debug(\"sending SIGTERM\")\n\t\t\t\tcmd.Process.Signal(syscall.SIGTERM)\n\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\tif !exited {\n\t\t\t\t\tstdout.Close()\n\t\t\t\t\tstderr.Close()\n\t\t\t\t\tlog.WithField(\"PID\", cmd.Process.Pid).Warn(\"still waiting for child process to exit 5s after SIGTERM\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcopiers.Wait()\n\terr = cmd.Wait()\n\tif ctx.Err() != nil {\n\t\t// Return \"context canceled\", instead of the \"killed\"\n\t\t// error that was probably caused by the context being\n\t\t// canceled.\n\t\treturn ctx.Err()\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"%s: error: %v\", cmdline, err)\n\t}\n\treturn nil\n}\n\nfunc (super *Supervisor) autofillConfig() error {\n\tusedPort := map[string]bool{}\n\tnextPort := func(host string) (string, error) {\n\t\tfor {\n\t\t\tport, err := AvailablePort(host)\n\t\t\tif err != nil {\n\t\t\t\tport, err = AvailablePort(super.ListenHost)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tif usedPort[port] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tusedPort[port] = true\n\t\t\treturn port, nil\n\t\t}\n\t}\n\tif super.cluster.Services.Controller.ExternalURL.Host == \"\" {\n\t\th, p, err := net.SplitHostPort(super.ControllerAddr)\n\t\tif err != nil && super.ControllerAddr != \"\" {\n\t\t\treturn fmt.Errorf(\"SplitHostPort(ControllerAddr %q): %w\", super.ControllerAddr, err)\n\t\t}\n\t\tif h == \"\" {\n\t\t\th = super.ListenHost\n\t\t}\n\t\tif p == \"0\" || p == \"\" {\n\t\t\tp, err = nextPort(h)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tsuper.cluster.Services.Controller.ExternalURL = arvados.URL{Scheme: \"https\", Host: net.JoinHostPort(h, p), Path: \"/\"}\n\t}\n\tu := url.URL(super.cluster.Services.Controller.ExternalURL)\n\tdefaultExtHost := u.Hostname()\n\tfor _, svc := range []*arvados.Service{\n\t\t&super.cluster.Services.Controller,\n\t\t&super.cluster.Services.DispatchCloud,\n\t\t&super.cluster.Services.Health,\n\t\t&super.cluster.Services.Keepproxy,\n\t\t&super.cluster.Services.Keepstore,\n\t\t&super.cluster.Services.RailsAPI,\n\t\t&super.cluster.Services.WebDAV,\n\t\t&super.cluster.Services.WebDAVDownload,\n\t\t&super.cluster.Services.Websocket,\n\t\t&super.cluster.Services.Workbench1,\n\t\t&super.cluster.Services.Workbench2,\n\t} {\n\t\tif svc.ExternalURL.Host == \"\" {\n\t\t\tport, err := nextPort(defaultExtHost)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thost := net.JoinHostPort(defaultExtHost, port)\n\t\t\tif svc == &super.cluster.Services.Controller ||\n\t\t\t\tsvc == &super.cluster.Services.Health ||\n\t\t\t\tsvc == &super.cluster.Services.Keepproxy ||\n\t\t\t\tsvc == &super.cluster.Services.WebDAV ||\n\t\t\t\tsvc == &super.cluster.Services.WebDAVDownload ||\n\t\t\t\tsvc == &super.cluster.Services.Workbench1 ||\n\t\t\t\tsvc == &super.cluster.Services.Workbench2 {\n\t\t\t\tsvc.ExternalURL = arvados.URL{Scheme: \"https\", Host: host, Path: \"/\"}\n\t\t\t} else if svc == &super.cluster.Services.Websocket {\n\t\t\t\tsvc.ExternalURL = arvados.URL{Scheme: \"wss\", Host: host, Path: \"/websocket\"}\n\t\t\t}\n\t\t}\n\t\tif super.NoWorkbench1 && svc == &super.cluster.Services.Workbench1 ||\n\t\t\tsuper.NoWorkbench2 && svc == &super.cluster.Services.Workbench2 ||\n\t\t\t!super.cluster.Containers.CloudVMs.Enable && svc == &super.cluster.Services.DispatchCloud {\n\t\t\t// When Workbench is disabled, it gets an\n\t\t\t// ExternalURL (so we have a valid listening\n\t\t\t// port to write in our Nginx config) but no\n\t\t\t// InternalURLs (so health checker doesn't\n\t\t\t// complain).\n\t\t\tcontinue\n\t\t}\n\t\tif len(svc.InternalURLs) == 0 {\n\t\t\tport, err := nextPort(super.ListenHost)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\thost := net.JoinHostPort(super.ListenHost, port)\n\t\t\tsvc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{\n\t\t\t\t{Scheme: \"http\", Host: host, Path: \"/\"}: {},\n\t\t\t}\n\t\t}\n\t}\n\tif super.ClusterType != \"production\" {\n\t\tif super.cluster.SystemRootToken == \"\" {\n\t\t\tsuper.cluster.SystemRootToken = randomHexString(64)\n\t\t}\n\t\tif super.cluster.ManagementToken == \"\" {\n\t\t\tsuper.cluster.ManagementToken = randomHexString(64)\n\t\t}\n\t\tif super.cluster.Collections.BlobSigningKey == \"\" {\n\t\t\tsuper.cluster.Collections.BlobSigningKey = randomHexString(64)\n\t\t}\n\t\tif super.cluster.Users.AnonymousUserToken == \"\" {\n\t\t\tsuper.cluster.Users.AnonymousUserToken = randomHexString(64)\n\t\t}\n\t\tif super.cluster.Containers.DispatchPrivateKey == \"\" {\n\t\t\tbuf, err := ioutil.ReadFile(filepath.Join(super.SourcePath, \"lib\", \"dispatchcloud\", \"test\", \"sshkey_dispatch\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsuper.cluster.Containers.DispatchPrivateKey = string(buf)\n\t\t}\n\t\tsuper.cluster.TLS.Insecure = true\n\t}\n\tif super.ClusterType == \"test\" {\n\t\t// Add a second keepstore process.\n\t\tport, err := nextPort(super.ListenHost)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thost := net.JoinHostPort(super.ListenHost, port)\n\t\tsuper.cluster.Services.Keepstore.InternalURLs[arvados.URL{Scheme: \"http\", Host: host, Path: \"/\"}] = arvados.ServiceInstance{}\n\n\t\t// Create a directory-backed volume for each keepstore\n\t\t// process.\n\t\tsuper.cluster.Volumes = map[string]arvados.Volume{}\n\t\tfor url := range super.cluster.Services.Keepstore.InternalURLs {\n\t\t\tvolnum := len(super.cluster.Volumes)\n\t\t\tdatadir := fmt.Sprintf(\"%s/keep%d.data\", super.tempdir, volnum)\n\t\t\tif _, err = os.Stat(datadir + \"/.\"); err == nil {\n\t\t\t} else if !os.IsNotExist(err) {\n\t\t\t\treturn err\n\t\t\t} else if err = os.Mkdir(datadir, 0755); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsuper.cluster.Volumes[fmt.Sprintf(super.cluster.ClusterID+\"-nyw5e-%015d\", volnum)] = arvados.Volume{\n\t\t\t\tDriver:           \"Directory\",\n\t\t\t\tDriverParameters: json.RawMessage(fmt.Sprintf(`{\"Root\":%q}`, datadir)),\n\t\t\t\tAccessViaHosts: map[arvados.URL]arvados.VolumeAccess{\n\t\t\t\t\turl: {},\n\t\t\t\t},\n\t\t\t\tStorageClasses: map[string]bool{\n\t\t\t\t\t\"default\": true,\n\t\t\t\t\t\"foo\":     true,\n\t\t\t\t\t\"bar\":     true,\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t\tsuper.cluster.StorageClasses = map[string]arvados.StorageClassConfig{\n\t\t\t\"default\": {Default: true},\n\t\t\t\"foo\":     {},\n\t\t\t\"bar\":     {},\n\t\t}\n\t}\n\tif super.OwnTemporaryDatabase {\n\t\tport, err := nextPort(\"localhost\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsuper.cluster.PostgreSQL.Connection = arvados.PostgreSQLConnection{\n\t\t\t\"client_encoding\": \"utf8\",\n\t\t\t\"host\":            \"localhost\",\n\t\t\t\"port\":            port,\n\t\t\t\"dbname\":          \"arvados_test\",\n\t\t\t\"user\":            \"arvados\",\n\t\t\t\"password\":        \"insecure_arvados_test\",\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc addrIsLocal(addr string) (bool, error) {\n\tif h, _, err := net.SplitHostPort(addr); err != nil {\n\t\treturn false, err\n\t} else {\n\t\taddr = net.JoinHostPort(h, \"0\")\n\t}\n\tlistener, err := net.Listen(\"tcp\", addr)\n\tif err == nil {\n\t\tlistener.Close()\n\t\treturn true, nil\n\t} else if strings.Contains(err.Error(), \"cannot assign requested address\") {\n\t\treturn false, nil\n\t} else {\n\t\treturn false, err\n\t}\n}\n\nfunc randomHexString(chars int) string {\n\tb := make([]byte, chars/2)\n\t_, err := rand.Read(b)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn fmt.Sprintf(\"%x\", b)\n}\n\nfunc internalPort(svc arvados.Service) (host, port string, err error) {\n\tif len(svc.InternalURLs) > 1 {\n\t\treturn \"\", \"\", errors.New(\"internalPort() doesn't work with multiple InternalURLs\")\n\t}\n\tfor u := range svc.InternalURLs {\n\t\tu := url.URL(u)\n\t\thost, port = u.Hostname(), u.Port()\n\t\tswitch {\n\t\tcase port != \"\":\n\t\tcase u.Scheme == \"https\", u.Scheme == \"ws\":\n\t\t\tport = \"443\"\n\t\tdefault:\n\t\t\tport = \"80\"\n\t\t}\n\t\treturn\n\t}\n\treturn \"\", \"\", fmt.Errorf(\"service has no InternalURLs\")\n}\n\nfunc externalPort(svc arvados.Service) (string, error) {\n\tu := url.URL(svc.ExternalURL)\n\tif p := u.Port(); p != \"\" {\n\t\treturn p, nil\n\t} else if u.Scheme == \"https\" || u.Scheme == \"wss\" {\n\t\treturn \"443\", nil\n\t} else {\n\t\treturn \"80\", nil\n\t}\n}\n\n// Return a TCP port that is not in use on the given local interface\n// address.  The host argument may be an IP address, a hostname, or\n// empty.\n//\n// AvailablePort(\"\") returns a TCP port that is not in use on any\n// local interface.\nfunc AvailablePort(host string) (string, error) {\n\tln, err := net.Listen(\"tcp\", net.JoinHostPort(host, \"0\"))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer ln.Close()\n\t_, port, err := net.SplitHostPort(ln.Addr().String())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn port, nil\n}\n\n// Try to connect to addr until it works, then close ch. Give up if\n// ctx cancels.\nfunc waitForConnect(ctx context.Context, addr string) error {\n\tctxlog.FromContext(ctx).WithField(\"addr\", addr).Info(\"waitForConnect\")\n\tdialer := net.Dialer{Timeout: time.Second}\n\tfor ctx.Err() == nil {\n\t\tconn, err := dialer.DialContext(ctx, \"tcp\", addr)\n\t\tif err != nil {\n\t\t\ttime.Sleep(time.Second / 10)\n\t\t\tcontinue\n\t\t}\n\t\tconn.Close()\n\t\treturn nil\n\t}\n\treturn ctx.Err()\n}\n\nfunc copyConfig(cfg *arvados.Config) *arvados.Config {\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\terr := json.NewEncoder(pw).Encode(cfg)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tpw.Close()\n\t}()\n\tcfg2 := new(arvados.Config)\n\terr := json.NewDecoder(pr).Decode(cfg2)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn cfg2\n}\n\nfunc watchConfig(ctx context.Context, logger logrus.FieldLogger, cfgPath string, prevcfg *arvados.Config, fn func()) {\n\twatcher, err := fsnotify.NewWatcher()\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"fsnotify setup failed\")\n\t\treturn\n\t}\n\tdefer watcher.Close()\n\n\terr = watcher.Add(cfgPath)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"fsnotify watcher failed\")\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase err, ok := <-watcher.Errors:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlogger.WithError(err).Warn(\"fsnotify watcher reported error\")\n\t\tcase _, ok := <-watcher.Events:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor len(watcher.Events) > 0 {\n\t\t\t\t<-watcher.Events\n\t\t\t}\n\t\t\tloader := config.NewLoader(&bytes.Buffer{}, &logrus.Logger{Out: ioutil.Discard})\n\t\t\tloader.Path = cfgPath\n\t\t\tloader.SkipAPICalls = true\n\t\t\tcfg, err := loader.Load()\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Warn(\"error reloading config file after change detected; ignoring new config for now\")\n\t\t\t} else if reflect.DeepEqual(cfg, prevcfg) {\n\t\t\t\tlogger.Debug(\"config file changed but is still DeepEqual to the existing config\")\n\t\t\t} else {\n\t\t\t\tlogger.Debug(\"config changed, notifying supervisor\")\n\t\t\t\tfn()\n\t\t\t\tprevcfg = cfg\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/boot/supervisor_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"net\"\n\t\"testing\"\n\n\t\"golang.org/x/net/nettest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\ntype supervisorSuite struct{}\n\nvar _ = check.Suite(&supervisorSuite{})\n\nfunc (s *supervisorSuite) TestAddrIsLocal(c *check.C) {\n\tis, err := addrIsLocal(\"0.0.0.0:0\")\n\tc.Check(err, check.IsNil)\n\tc.Check(is, check.Equals, true)\n\n\tis, err = addrIsLocal(\"127.0.0.1:9\")\n\tc.Check(err, check.IsNil)\n\tc.Check(is, check.Equals, true)\n\n\tis, err = addrIsLocal(\"127.0.0.127:32767\")\n\tc.Check(err, check.IsNil)\n\tc.Check(is, check.Equals, true)\n\n\tif nettest.SupportsIPv6() {\n\t\tis, err = addrIsLocal(\"[::1]:32767\")\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(is, check.Equals, true)\n\t}\n\n\tis, err = addrIsLocal(\"8.8.8.8:32767\")\n\tc.Check(err, check.IsNil)\n\tc.Check(is, check.Equals, false)\n\n\tis, err = addrIsLocal(\"example.com:32767\")\n\tc.Check(err, check.IsNil)\n\tc.Check(is, check.Equals, false)\n\n\tis, err = addrIsLocal(\"1.2.3.4.5:32767\")\n\tc.Check(err, check.NotNil)\n\n\tln, err := net.Listen(\"tcp\", \":\")\n\tc.Assert(err, check.IsNil)\n\tdefer ln.Close()\n\tis, err = addrIsLocal(ln.Addr().String())\n\tc.Check(err, check.IsNil)\n\tc.Check(is, check.Equals, true)\n\n}\n"
  },
  {
    "path": "lib/boot/workbench2.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage boot\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"io/ioutil\"\n\t\"os\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\ntype runWorkbench2 struct {\n\tsvc arvados.Service\n}\n\nfunc (runner runWorkbench2) String() string {\n\treturn \"runWorkbench2\"\n}\n\nfunc (runner runWorkbench2) Run(ctx context.Context, fail func(error), super *Supervisor) error {\n\t_, port, err := internalPort(runner.svc)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"bug: no internalPort for %q: %v (%#v)\", runner, err, runner.svc)\n\t}\n\tsuper.waitShutdown.Add(1)\n\tgo func() {\n\t\tdefer super.waitShutdown.Done()\n\t\tif super.ClusterType == \"production\" {\n\t\t\t// FIXME: This used to return paths set up by\n\t\t\t// `arvados-server install`, which is no longer a thing.\n\t\t\tfail(errors.New(\"production cluster type not implemented\"))\n\t\t} else {\n\t\t\t// super.SourcePath might be readonly, so for\n\t\t\t// dev/test mode we make a copy in a writable\n\t\t\t// dir.\n\t\t\tlivedir := super.wwwtempdir + \"/workbench2\"\n\t\t\tif err := super.RunProgram(ctx, super.SourcePath+\"/services/workbench2\", runOptions{}, \"rsync\", \"-a\", \"--delete-after\", super.SourcePath+\"/services/workbench2/\", livedir); err != nil {\n\t\t\t\tfail(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err = os.Mkdir(livedir+\"/public/_health\", 0777); err != nil && !errors.Is(err, fs.ErrExist) {\n\t\t\t\tfail(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err = ioutil.WriteFile(livedir+\"/public/_health/ping\", []byte(`{\"health\":\"OK\"}`), 0666); err != nil {\n\t\t\t\tfail(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tstdinr, stdinw := io.Pipe()\n\t\t\tdefer stdinw.Close()\n\t\t\tgo func() {\n\t\t\t\t<-ctx.Done()\n\t\t\t\tstdinw.Close()\n\t\t\t}()\n\t\t\terr = super.RunProgram(ctx, livedir, runOptions{\n\t\t\t\tenv: []string{\n\t\t\t\t\t\"CI=true\",\n\t\t\t\t\t\"HTTPS=false\",\n\t\t\t\t\t\"PORT=\" + port,\n\t\t\t\t\t\"REACT_APP_ARVADOS_API_HOST=\" + super.cluster.Services.Controller.ExternalURL.Host,\n\t\t\t\t},\n\t\t\t\t// If we don't connect stdin, \"yarn start\" just exits.\n\t\t\t\tstdin: stdinr,\n\t\t\t}, \"yarn\", \"start\")\n\t\t\tfail(errors.New(\"`yarn start` exited\"))\n\t\t}\n\t\tfail(err)\n\t}()\n\treturn nil\n}\n"
  },
  {
    "path": "lib/cli/external.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cli\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os/exec\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n)\n\nvar (\n\tCreate = rubyArvCmd{\"create\"}\n\tEdit   = rubyArvCmd{\"edit\"}\n\n\tCopy = externalCmd{\"arv-copy\"}\n\tTag  = externalCmd{\"arv-tag\"}\n\tWs   = externalCmd{\"arv-ws\"}\n\n\tKeep = cmd.Multi(map[string]cmd.Handler{\n\t\t\"get\":       externalCmd{\"arv-get\"},\n\t\t\"put\":       externalCmd{\"arv-put\"},\n\t\t\"ls\":        externalCmd{\"arv-ls\"},\n\t\t\"normalize\": externalCmd{\"arv-normalize\"},\n\t\t\"docker\":    externalCmd{\"arv-keepdocker\"},\n\t})\n\t// user, group, container, specimen, etc.\n\tAPICall = apiCallCmd{}\n)\n\n// When using the ruby \"arv\" command, flags must come before the\n// subcommand: \"arv --format=yaml get foo\" works, but \"arv get\n// --format=yaml foo\" does not work.\nfunc legacyFlagsToFront(subcommand string, argsin []string) (argsout []string) {\n\tflags, _ := LegacyFlagSet()\n\tflags.SetOutput(ioutil.Discard)\n\tflags.Parse(argsin)\n\tnarg := flags.NArg()\n\targsout = append(argsout, argsin[:len(argsin)-narg]...)\n\targsout = append(argsout, subcommand)\n\targsout = append(argsout, argsin[len(argsin)-narg:]...)\n\treturn\n}\n\ntype apiCallCmd struct{}\n\nfunc (cmd apiCallCmd) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tsplit := strings.Split(prog, \" \")\n\tif len(split) < 2 {\n\t\tfmt.Fprintf(stderr, \"internal error: no api model in %q\\n\", prog)\n\t\treturn 2\n\t}\n\tmodel := split[len(split)-1]\n\treturn rubyArvCmd{model}.RunCommand(prog, args, stdin, stdout, stderr)\n}\n\ntype rubyArvCmd struct {\n\tsubcommand string\n}\n\nfunc (rc rubyArvCmd) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\twrapprog := \"arv\"\n\treturn externalCmd{wrapprog}.RunCommand(wrapprog, legacyFlagsToFront(rc.subcommand, args), stdin, stdout, stderr)\n}\n\ntype externalCmd struct {\n\tprog string\n}\n\nfunc (ec externalCmd) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tcmd := exec.Command(ec.prog, args...)\n\tcmd.Stdin = stdin\n\tcmd.Stdout = stdout\n\tcmd.Stderr = stderr\n\terr := cmd.Run()\n\tswitch err := err.(type) {\n\tcase nil:\n\t\treturn 0\n\tcase *exec.ExitError:\n\t\tstatus := err.Sys().(syscall.WaitStatus)\n\t\tif status.Exited() {\n\t\t\treturn status.ExitStatus()\n\t\t}\n\t\tfmt.Fprintf(stderr, \"%s failed: %s\\n\", ec.prog, err)\n\t\treturn 1\n\tcase *exec.Error:\n\t\tfmt.Fprintln(stderr, err)\n\t\tif ec.prog == \"arv\" {\n\t\t\tfmt.Fprint(stderr, rubyInstallHints)\n\t\t} else if strings.HasPrefix(ec.prog, \"arv-\") {\n\t\t\tfmt.Fprint(stderr, pythonInstallHints)\n\t\t}\n\t\treturn 1\n\tdefault:\n\t\tfmt.Fprintf(stderr, \"error running %s: %s\\n\", ec.prog, err)\n\t\treturn 1\n\t}\n}\n\nvar (\n\trubyInstallHints = `\nNote: This subcommand uses the arvados-cli Ruby gem. If that is not\ninstalled, try \"gem install arvados-cli\", or see\nhttps://doc.arvados.org/install for more details.\n\n`\n\tpythonInstallHints = `\nNote: This subcommand uses the \"arvados\" Python module. If that is\nnot installed, try:\n* \"pip install arvados\" (either as root or in a virtualenv), or\n* \"sudo apt-get install python3-arvados-python-client\", or\n* see https://doc.arvados.org/install for more details.\n\n`\n)\n"
  },
  {
    "path": "lib/cli/flags.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cli\n\nimport (\n\t\"flag\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"rsc.io/getopt\"\n)\n\ntype LegacyFlagValues struct {\n\tFormat  string\n\tDryRun  bool\n\tShort   bool\n\tVerbose bool\n}\n\nfunc LegacyFlagSet() (cmd.FlagSet, *LegacyFlagValues) {\n\tvalues := &LegacyFlagValues{Format: \"json\"}\n\tflags := getopt.NewFlagSet(\"\", flag.ContinueOnError)\n\tflags.BoolVar(&values.DryRun, \"dry-run\", false, \"Don't actually do anything\")\n\tflags.Alias(\"n\", \"dry-run\")\n\tflags.StringVar(&values.Format, \"format\", values.Format, \"Output format: json, yaml, or uuid\")\n\tflags.Alias(\"f\", \"format\")\n\tflags.BoolVar(&values.Short, \"short\", false, \"Return only UUIDs (equivalent to --format=uuid)\")\n\tflags.Alias(\"s\", \"short\")\n\tflags.BoolVar(&values.Verbose, \"verbose\", false, \"Print more debug/progress messages on stderr\")\n\tflags.Alias(\"v\", \"verbose\")\n\treturn flags, values\n}\n"
  },
  {
    "path": "lib/cli/get.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cli\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/ghodss/yaml\"\n)\n\nvar Get cmd.Handler = getCmd{}\n\ntype getCmd struct{}\n\nfunc (getCmd) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(stderr, \"%s\\n\", err)\n\t\t}\n\t}()\n\n\tflags, opts := LegacyFlagSet()\n\tflags.SetOutput(stderr)\n\terr = flags.Parse(args)\n\tif err != nil {\n\t\treturn cmd.EXIT_INVALIDARGUMENT\n\t}\n\tif len(flags.Args()) != 1 {\n\t\tfmt.Fprintf(stderr, \"usage of %s:\\n\", prog)\n\t\tflags.PrintDefaults()\n\t\treturn cmd.EXIT_INVALIDARGUMENT\n\t}\n\tif opts.Short {\n\t\topts.Format = \"uuid\"\n\t}\n\n\tid := flags.Args()[0]\n\tclient := arvados.NewClientFromEnv()\n\tpath, err := client.PathForUUID(\"get\", id)\n\tif err != nil {\n\t\treturn 1\n\t}\n\n\tvar obj map[string]interface{}\n\terr = client.RequestAndDecode(&obj, \"GET\", path, nil, nil)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"GET %s: %s\", path, err)\n\t\treturn 1\n\t}\n\tif opts.Format == \"yaml\" {\n\t\tvar buf []byte\n\t\tbuf, err = yaml.Marshal(obj)\n\t\tif err == nil {\n\t\t\t_, err = stdout.Write(buf)\n\t\t}\n\t} else if opts.Format == \"uuid\" {\n\t\tfmt.Fprintln(stdout, obj[\"uuid\"])\n\t} else {\n\t\tenc := json.NewEncoder(stdout)\n\t\tenc.SetIndent(\"\", \"  \")\n\t\terr = enc.Encode(obj)\n\t}\n\tif err != nil {\n\t\terr = fmt.Errorf(\"encoding: %s\", err)\n\t\treturn 1\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "lib/cli/get_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cli\n\nimport (\n\t\"bytes\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&GetSuite{})\n\ntype GetSuite struct{}\n\nfunc (s *GetSuite) TestGetCollectionJSON(c *check.C) {\n\tstdout := bytes.NewBuffer(nil)\n\tstderr := bytes.NewBuffer(nil)\n\texited := Get.RunCommand(\"arvados-client get\", []string{arvadostest.FooCollection}, bytes.NewReader(nil), stdout, stderr)\n\tc.Check(stdout.String(), check.Matches, `(?ms){.*\"uuid\": \"`+arvadostest.FooCollection+`\".*}\\n`)\n\tc.Check(stdout.String(), check.Matches, `(?ms){.*\"portable_data_hash\": \"`+regexp.QuoteMeta(arvadostest.FooCollectionPDH)+`\".*}\\n`)\n\tc.Check(stderr.String(), check.Equals, \"\")\n\tc.Check(exited, check.Equals, 0)\n}\n"
  },
  {
    "path": "lib/cloud/azure/azure.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage azure\n\nimport (\n\t\"context\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute\"\n\t\"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-06-01/network\"\n\tstorageacct \"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2018-02-01/storage\"\n\t\"github.com/Azure/azure-sdk-for-go/storage\"\n\t\"github.com/Azure/go-autorest/autorest\"\n\t\"github.com/Azure/go-autorest/autorest/azure\"\n\t\"github.com/Azure/go-autorest/autorest/azure/auth\"\n\t\"github.com/Azure/go-autorest/autorest/to\"\n\t\"github.com/jmcvetta/randutil\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\n// Driver is the azure implementation of the cloud.Driver interface.\nvar Driver = cloud.DriverFunc(newAzureInstanceSet)\n\ntype azureInstanceSetConfig struct {\n\tSubscriptionID                 string\n\tClientID                       string\n\tClientSecret                   string\n\tTenantID                       string\n\tCloudEnvironment               string\n\tResourceGroup                  string\n\tImageResourceGroup             string\n\tLocation                       string\n\tNetwork                        string\n\tNetworkResourceGroup           string\n\tSubnet                         string\n\tStorageAccount                 string\n\tBlobContainer                  string\n\tSharedImageGalleryName         string\n\tSharedImageGalleryImageVersion string\n\tDeleteDanglingResourcesAfter   arvados.Duration\n\tAdminUsername                  string\n}\n\ntype containerWrapper interface {\n\tGetBlobReference(name string) *storage.Blob\n\tListBlobs(params storage.ListBlobsParameters) (storage.BlobListResponse, error)\n}\n\ntype virtualMachinesClientWrapper interface {\n\tcreateOrUpdate(ctx context.Context,\n\t\tresourceGroupName string,\n\t\tVMName string,\n\t\tparameters compute.VirtualMachine) (result compute.VirtualMachine, err error)\n\tdelete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error)\n\tlistComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error)\n}\n\ntype virtualMachinesClientImpl struct {\n\tinner compute.VirtualMachinesClient\n}\n\nfunc (cl *virtualMachinesClientImpl) createOrUpdate(ctx context.Context,\n\tresourceGroupName string,\n\tVMName string,\n\tparameters compute.VirtualMachine) (result compute.VirtualMachine, err error) {\n\n\tfuture, err := cl.inner.CreateOrUpdate(ctx, resourceGroupName, VMName, parameters)\n\tif err != nil {\n\t\treturn compute.VirtualMachine{}, wrapAzureError(err)\n\t}\n\tfuture.WaitForCompletionRef(ctx, cl.inner.Client)\n\tr, err := future.Result(cl.inner)\n\treturn r, wrapAzureError(err)\n}\n\nfunc (cl *virtualMachinesClientImpl) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {\n\tfuture, err := cl.inner.Delete(ctx, resourceGroupName, VMName)\n\tif err != nil {\n\t\treturn nil, wrapAzureError(err)\n\t}\n\terr = future.WaitForCompletionRef(ctx, cl.inner.Client)\n\treturn future.Response(), wrapAzureError(err)\n}\n\nfunc (cl *virtualMachinesClientImpl) listComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error) {\n\tr, err := cl.inner.ListComplete(ctx, resourceGroupName)\n\treturn r, wrapAzureError(err)\n}\n\ntype interfacesClientWrapper interface {\n\tcreateOrUpdate(ctx context.Context,\n\t\tresourceGroupName string,\n\t\tnetworkInterfaceName string,\n\t\tparameters network.Interface) (result network.Interface, err error)\n\tdelete(ctx context.Context, resourceGroupName string, networkInterfaceName string) (result *http.Response, err error)\n\tlistComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error)\n}\n\ntype interfacesClientImpl struct {\n\tinner network.InterfacesClient\n}\n\nfunc (cl *interfacesClientImpl) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {\n\tfuture, err := cl.inner.Delete(ctx, resourceGroupName, VMName)\n\tif err != nil {\n\t\treturn nil, wrapAzureError(err)\n\t}\n\terr = future.WaitForCompletionRef(ctx, cl.inner.Client)\n\treturn future.Response(), wrapAzureError(err)\n}\n\nfunc (cl *interfacesClientImpl) createOrUpdate(ctx context.Context,\n\tresourceGroupName string,\n\tnetworkInterfaceName string,\n\tparameters network.Interface) (result network.Interface, err error) {\n\n\tfuture, err := cl.inner.CreateOrUpdate(ctx, resourceGroupName, networkInterfaceName, parameters)\n\tif err != nil {\n\t\treturn network.Interface{}, wrapAzureError(err)\n\t}\n\tfuture.WaitForCompletionRef(ctx, cl.inner.Client)\n\tr, err := future.Result(cl.inner)\n\treturn r, wrapAzureError(err)\n}\n\nfunc (cl *interfacesClientImpl) listComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error) {\n\tr, err := cl.inner.ListComplete(ctx, resourceGroupName)\n\treturn r, wrapAzureError(err)\n}\n\ntype disksClientWrapper interface {\n\tlistByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.DiskListPage, err error)\n\tdelete(ctx context.Context, resourceGroupName string, diskName string) (result compute.DisksDeleteFuture, err error)\n}\n\ntype disksClientImpl struct {\n\tinner compute.DisksClient\n}\n\nfunc (cl *disksClientImpl) listByResourceGroup(ctx context.Context, resourceGroupName string) (result compute.DiskListPage, err error) {\n\tr, err := cl.inner.ListByResourceGroup(ctx, resourceGroupName)\n\treturn r, wrapAzureError(err)\n}\n\nfunc (cl *disksClientImpl) delete(ctx context.Context, resourceGroupName string, diskName string) (result compute.DisksDeleteFuture, err error) {\n\tr, err := cl.inner.Delete(ctx, resourceGroupName, diskName)\n\treturn r, wrapAzureError(err)\n}\n\nvar quotaRe = regexp.MustCompile(`(?i:exceed|quota|limit)`)\n\ntype azureRateLimitError struct {\n\tazure.RequestError\n\tfirstRetry time.Time\n}\n\nfunc (ar *azureRateLimitError) EarliestRetry() time.Time {\n\treturn ar.firstRetry\n}\n\ntype azureQuotaError struct {\n\tazure.RequestError\n}\n\nfunc (ar *azureQuotaError) IsQuotaError() bool {\n\treturn true\n}\n\nfunc wrapAzureError(err error) error {\n\tde, ok := err.(autorest.DetailedError)\n\tif !ok {\n\t\treturn err\n\t}\n\trq, ok := de.Original.(*azure.RequestError)\n\tif !ok {\n\t\treturn err\n\t}\n\tif rq.Response == nil {\n\t\treturn err\n\t}\n\tif rq.Response.StatusCode == 429 || len(rq.Response.Header[\"Retry-After\"]) >= 1 {\n\t\t// API throttling\n\t\tra := rq.Response.Header[\"Retry-After\"][0]\n\t\tearliestRetry, parseErr := http.ParseTime(ra)\n\t\tif parseErr != nil {\n\t\t\t// Could not parse as a timestamp, must be number of seconds\n\t\t\tdur, parseErr := strconv.ParseInt(ra, 10, 64)\n\t\t\tif parseErr == nil {\n\t\t\t\tearliestRetry = time.Now().Add(time.Duration(dur) * time.Second)\n\t\t\t} else {\n\t\t\t\t// Couldn't make sense of retry-after,\n\t\t\t\t// so set retry to 20 seconds\n\t\t\t\tearliestRetry = time.Now().Add(20 * time.Second)\n\t\t\t}\n\t\t}\n\t\treturn &azureRateLimitError{*rq, earliestRetry}\n\t}\n\tif rq.ServiceError == nil {\n\t\treturn err\n\t}\n\tif quotaRe.FindString(rq.ServiceError.Code) != \"\" || quotaRe.FindString(rq.ServiceError.Message) != \"\" {\n\t\treturn &azureQuotaError{*rq}\n\t}\n\treturn err\n}\n\ntype azureInstanceSet struct {\n\tazconfig           azureInstanceSetConfig\n\tvmClient           virtualMachinesClientWrapper\n\tnetClient          interfacesClientWrapper\n\tdisksClient        disksClientWrapper\n\timageResourceGroup string\n\tblobcont           containerWrapper\n\tazureEnv           azure.Environment\n\tinterfaces         map[string]network.Interface\n\tdispatcherID       string\n\tnamePrefix         string\n\tctx                context.Context\n\tstopFunc           context.CancelFunc\n\tstopWg             sync.WaitGroup\n\tdeleteNIC          chan string\n\tdeleteBlob         chan storage.Blob\n\tdeleteDisk         chan compute.Disk\n\tlogger             logrus.FieldLogger\n}\n\nfunc newAzureInstanceSet(config json.RawMessage, dispatcherID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (prv cloud.InstanceSet, err error) {\n\tazcfg := azureInstanceSetConfig{}\n\terr = json.Unmarshal(config, &azcfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taz := azureInstanceSet{logger: logger}\n\taz.ctx, az.stopFunc = context.WithCancel(context.Background())\n\terr = az.setup(azcfg, string(dispatcherID))\n\tif err != nil {\n\t\taz.stopFunc()\n\t\treturn nil, err\n\t}\n\treturn &az, nil\n}\n\nfunc (az *azureInstanceSet) setup(azcfg azureInstanceSetConfig, dispatcherID string) (err error) {\n\taz.azconfig = azcfg\n\tvmClient := compute.NewVirtualMachinesClient(az.azconfig.SubscriptionID)\n\tnetClient := network.NewInterfacesClient(az.azconfig.SubscriptionID)\n\tdisksClient := compute.NewDisksClient(az.azconfig.SubscriptionID)\n\tstorageAcctClient := storageacct.NewAccountsClient(az.azconfig.SubscriptionID)\n\n\taz.azureEnv, err = azure.EnvironmentFromName(az.azconfig.CloudEnvironment)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tauthorizer, err := auth.ClientCredentialsConfig{\n\t\tClientID:     az.azconfig.ClientID,\n\t\tClientSecret: az.azconfig.ClientSecret,\n\t\tTenantID:     az.azconfig.TenantID,\n\t\tResource:     az.azureEnv.ResourceManagerEndpoint,\n\t\tAADEndpoint:  az.azureEnv.ActiveDirectoryEndpoint,\n\t}.Authorizer()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvmClient.Authorizer = authorizer\n\tnetClient.Authorizer = authorizer\n\tdisksClient.Authorizer = authorizer\n\tstorageAcctClient.Authorizer = authorizer\n\n\taz.vmClient = &virtualMachinesClientImpl{vmClient}\n\taz.netClient = &interfacesClientImpl{netClient}\n\taz.disksClient = &disksClientImpl{disksClient}\n\n\taz.imageResourceGroup = az.azconfig.ImageResourceGroup\n\tif az.imageResourceGroup == \"\" {\n\t\taz.imageResourceGroup = az.azconfig.ResourceGroup\n\t}\n\n\tvar client storage.Client\n\tif az.azconfig.StorageAccount != \"\" && az.azconfig.BlobContainer != \"\" {\n\t\tresult, err := storageAcctClient.ListKeys(az.ctx, az.azconfig.ResourceGroup, az.azconfig.StorageAccount)\n\t\tif err != nil {\n\t\t\taz.logger.WithError(err).Warn(\"Couldn't get account keys\")\n\t\t\treturn err\n\t\t}\n\n\t\tkey1 := *(*result.Keys)[0].Value\n\t\tclient, err = storage.NewBasicClientOnSovereignCloud(az.azconfig.StorageAccount, key1, az.azureEnv)\n\t\tif err != nil {\n\t\t\taz.logger.WithError(err).Warn(\"Couldn't make client\")\n\t\t\treturn err\n\t\t}\n\n\t\tblobsvc := client.GetBlobService()\n\t\taz.blobcont = blobsvc.GetContainerReference(az.azconfig.BlobContainer)\n\t} else if az.azconfig.StorageAccount != \"\" || az.azconfig.BlobContainer != \"\" {\n\t\taz.logger.Error(\"Invalid configuration: StorageAccount and BlobContainer must both be empty or both be set\")\n\t}\n\n\taz.dispatcherID = dispatcherID\n\taz.namePrefix = fmt.Sprintf(\"compute-%s-\", az.dispatcherID)\n\n\taz.stopWg.Add(1)\n\tgo func() {\n\t\tdefer az.stopWg.Done()\n\n\t\ttk := time.NewTicker(5 * time.Minute)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-az.ctx.Done():\n\t\t\t\ttk.Stop()\n\t\t\t\treturn\n\t\t\tcase <-tk.C:\n\t\t\t\tif az.blobcont != nil {\n\t\t\t\t\taz.manageBlobs()\n\t\t\t\t}\n\t\t\t\taz.manageDisks()\n\t\t\t}\n\t\t}\n\t}()\n\n\taz.deleteNIC = make(chan string)\n\taz.deleteBlob = make(chan storage.Blob)\n\taz.deleteDisk = make(chan compute.Disk)\n\n\tfor i := 0; i < 4; i++ {\n\t\tgo func() {\n\t\t\tfor nicname := range az.deleteNIC {\n\t\t\t\t_, delerr := az.netClient.delete(context.Background(), az.azconfig.ResourceGroup, nicname)\n\t\t\t\tif delerr != nil {\n\t\t\t\t\taz.logger.WithError(delerr).Warnf(\"Error deleting %v\", nicname)\n\t\t\t\t} else {\n\t\t\t\t\taz.logger.Printf(\"Deleted NIC %v\", nicname)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tgo func() {\n\t\t\tfor blob := range az.deleteBlob {\n\t\t\t\terr := blob.Delete(nil)\n\t\t\t\tif err != nil {\n\t\t\t\t\taz.logger.WithError(err).Warnf(\"Error deleting %v\", blob.Name)\n\t\t\t\t} else {\n\t\t\t\t\taz.logger.Printf(\"Deleted blob %v\", blob.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\tgo func() {\n\t\t\tfor disk := range az.deleteDisk {\n\t\t\t\t_, err := az.disksClient.delete(az.ctx, az.imageResourceGroup, *disk.Name)\n\t\t\t\tif err != nil {\n\t\t\t\t\taz.logger.WithError(err).Warnf(\"Error deleting disk %+v\", *disk.Name)\n\t\t\t\t} else {\n\t\t\t\t\taz.logger.Printf(\"Deleted disk %v\", *disk.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\treturn nil\n}\n\nfunc (az *azureInstanceSet) cleanupNic(nic network.Interface) {\n\t_, delerr := az.netClient.delete(context.Background(), az.azconfig.ResourceGroup, *nic.Name)\n\tif delerr != nil {\n\t\taz.logger.WithError(delerr).Warnf(\"Error cleaning up NIC after failed create\")\n\t}\n}\n\nfunc (az *azureInstanceSet) Create(\n\tinstanceType arvados.InstanceType,\n\timageID cloud.ImageID,\n\tnewTags cloud.InstanceTags,\n\tinitCommand cloud.InitCommand,\n\tpublicKey ssh.PublicKey) (cloud.Instance, error) {\n\n\taz.stopWg.Add(1)\n\tdefer az.stopWg.Done()\n\n\tif instanceType.AddedScratch > 0 {\n\t\treturn nil, fmt.Errorf(\"cannot create instance type %q: driver does not implement non-zero AddedScratch (%d)\", instanceType.Name, instanceType.AddedScratch)\n\t}\n\n\tname, err := randutil.String(15, \"abcdefghijklmnopqrstuvwxyz0123456789\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tname = az.namePrefix + name\n\n\ttags := map[string]*string{}\n\tfor k, v := range newTags {\n\t\ttags[k] = to.StringPtr(v)\n\t}\n\ttags[\"created-at\"] = to.StringPtr(time.Now().Format(time.RFC3339Nano))\n\n\tnetworkResourceGroup := az.azconfig.NetworkResourceGroup\n\tif networkResourceGroup == \"\" {\n\t\tnetworkResourceGroup = az.azconfig.ResourceGroup\n\t}\n\n\tnicParameters := network.Interface{\n\t\tLocation: &az.azconfig.Location,\n\t\tTags:     tags,\n\t\tInterfacePropertiesFormat: &network.InterfacePropertiesFormat{\n\t\t\tIPConfigurations: &[]network.InterfaceIPConfiguration{\n\t\t\t\t{\n\t\t\t\t\tName: to.StringPtr(\"ip1\"),\n\t\t\t\t\tInterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{\n\t\t\t\t\t\tSubnet: &network.Subnet{\n\t\t\t\t\t\t\tID: to.StringPtr(fmt.Sprintf(\"/subscriptions/%s/resourceGroups/%s/providers\"+\n\t\t\t\t\t\t\t\t\"/Microsoft.Network/virtualnetworks/%s/subnets/%s\",\n\t\t\t\t\t\t\t\taz.azconfig.SubscriptionID,\n\t\t\t\t\t\t\t\tnetworkResourceGroup,\n\t\t\t\t\t\t\t\taz.azconfig.Network,\n\t\t\t\t\t\t\t\taz.azconfig.Subnet)),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPrivateIPAllocationMethod: network.Dynamic,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tnic, err := az.netClient.createOrUpdate(az.ctx, az.azconfig.ResourceGroup, name+\"-nic\", nicParameters)\n\tif err != nil {\n\t\treturn nil, wrapAzureError(err)\n\t}\n\n\tvar blobname string\n\tcustomData := base64.StdEncoding.EncodeToString([]byte(\"#!/bin/sh\\n\" + initCommand + \"\\n\"))\n\tvar storageProfile *compute.StorageProfile\n\n\tre := regexp.MustCompile(`^http(s?)://`)\n\tif re.MatchString(string(imageID)) {\n\t\tif az.blobcont == nil {\n\t\t\taz.cleanupNic(nic)\n\t\t\treturn nil, wrapAzureError(errors.New(\"Invalid configuration: can't configure unmanaged image URL without StorageAccount and BlobContainer\"))\n\t\t}\n\t\tblobname = fmt.Sprintf(\"%s-os.vhd\", name)\n\t\tinstanceVhd := fmt.Sprintf(\"https://%s.blob.%s/%s/%s\",\n\t\t\taz.azconfig.StorageAccount,\n\t\t\taz.azureEnv.StorageEndpointSuffix,\n\t\t\taz.azconfig.BlobContainer,\n\t\t\tblobname)\n\t\taz.logger.Warn(\"using deprecated unmanaged image, see https://doc.arvados.org/ to migrate to managed disks\")\n\t\tstorageProfile = &compute.StorageProfile{\n\t\t\tOsDisk: &compute.OSDisk{\n\t\t\t\tOsType:       compute.Linux,\n\t\t\t\tName:         to.StringPtr(name + \"-os\"),\n\t\t\t\tCreateOption: compute.DiskCreateOptionTypesFromImage,\n\t\t\t\tImage: &compute.VirtualHardDisk{\n\t\t\t\t\tURI: to.StringPtr(string(imageID)),\n\t\t\t\t},\n\t\t\t\tVhd: &compute.VirtualHardDisk{\n\t\t\t\t\tURI: &instanceVhd,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t} else {\n\t\tid := to.StringPtr(\"/subscriptions/\" + az.azconfig.SubscriptionID + \"/resourceGroups/\" + az.imageResourceGroup + \"/providers/Microsoft.Compute/images/\" + string(imageID))\n\t\tif az.azconfig.SharedImageGalleryName != \"\" && az.azconfig.SharedImageGalleryImageVersion != \"\" {\n\t\t\tid = to.StringPtr(\"/subscriptions/\" + az.azconfig.SubscriptionID + \"/resourceGroups/\" + az.imageResourceGroup + \"/providers/Microsoft.Compute/galleries/\" + az.azconfig.SharedImageGalleryName + \"/images/\" + string(imageID) + \"/versions/\" + az.azconfig.SharedImageGalleryImageVersion)\n\t\t} else if az.azconfig.SharedImageGalleryName != \"\" || az.azconfig.SharedImageGalleryImageVersion != \"\" {\n\t\t\taz.cleanupNic(nic)\n\t\t\treturn nil, wrapAzureError(errors.New(\"Invalid configuration: SharedImageGalleryName and SharedImageGalleryImageVersion must both be set or both be empty\"))\n\t\t}\n\t\tstorageProfile = &compute.StorageProfile{\n\t\t\tImageReference: &compute.ImageReference{\n\t\t\t\tID: id,\n\t\t\t},\n\t\t\tOsDisk: &compute.OSDisk{\n\t\t\t\tOsType:       compute.Linux,\n\t\t\t\tName:         to.StringPtr(name + \"-os\"),\n\t\t\t\tCreateOption: compute.DiskCreateOptionTypesFromImage,\n\t\t\t},\n\t\t}\n\t}\n\n\tvmParameters := compute.VirtualMachine{\n\t\tLocation: &az.azconfig.Location,\n\t\tTags:     tags,\n\t\tVirtualMachineProperties: &compute.VirtualMachineProperties{\n\t\t\tHardwareProfile: &compute.HardwareProfile{\n\t\t\t\tVMSize: compute.VirtualMachineSizeTypes(instanceType.ProviderType),\n\t\t\t},\n\t\t\tStorageProfile: storageProfile,\n\t\t\tNetworkProfile: &compute.NetworkProfile{\n\t\t\t\tNetworkInterfaces: &[]compute.NetworkInterfaceReference{\n\t\t\t\t\t{\n\t\t\t\t\t\tID: nic.ID,\n\t\t\t\t\t\tNetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{\n\t\t\t\t\t\t\tPrimary: to.BoolPtr(true),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOsProfile: &compute.OSProfile{\n\t\t\t\tComputerName:  &name,\n\t\t\t\tAdminUsername: to.StringPtr(az.azconfig.AdminUsername),\n\t\t\t\tLinuxConfiguration: &compute.LinuxConfiguration{\n\t\t\t\t\tDisablePasswordAuthentication: to.BoolPtr(true),\n\t\t\t\t},\n\t\t\t\tCustomData: &customData,\n\t\t\t},\n\t\t},\n\t}\n\n\tif publicKey != nil {\n\t\tvmParameters.VirtualMachineProperties.OsProfile.LinuxConfiguration.SSH = &compute.SSHConfiguration{\n\t\t\tPublicKeys: &[]compute.SSHPublicKey{\n\t\t\t\t{\n\t\t\t\t\tPath:    to.StringPtr(\"/home/\" + az.azconfig.AdminUsername + \"/.ssh/authorized_keys\"),\n\t\t\t\t\tKeyData: to.StringPtr(string(ssh.MarshalAuthorizedKey(publicKey))),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tif instanceType.Preemptible {\n\t\t// Setting maxPrice to -1 is the equivalent of paying spot price, up to the\n\t\t// normal price. This means the node will not be pre-empted for price\n\t\t// reasons. It may still be pre-empted for capacity reasons though. And\n\t\t// Azure offers *no* SLA on spot instances.\n\t\tvar maxPrice float64 = -1\n\t\tvmParameters.VirtualMachineProperties.Priority = compute.Spot\n\t\tvmParameters.VirtualMachineProperties.EvictionPolicy = compute.Delete\n\t\tvmParameters.VirtualMachineProperties.BillingProfile = &compute.BillingProfile{MaxPrice: &maxPrice}\n\t}\n\n\tvm, err := az.vmClient.createOrUpdate(az.ctx, az.azconfig.ResourceGroup, name, vmParameters)\n\tif err != nil {\n\t\t// Do some cleanup. Otherwise, an unbounded number of new unused nics and\n\t\t// blobs can pile up during times when VMs can't be created and the\n\t\t// dispatcher keeps retrying, because the garbage collection in manageBlobs\n\t\t// and manageNics is only triggered periodically. This is most important\n\t\t// for nics, because those are subject to a quota.\n\t\taz.cleanupNic(nic)\n\n\t\tif blobname != \"\" {\n\t\t\t_, delerr := az.blobcont.GetBlobReference(blobname).DeleteIfExists(nil)\n\t\t\tif delerr != nil {\n\t\t\t\taz.logger.WithError(delerr).Warnf(\"Error cleaning up vhd blob after failed create\")\n\t\t\t}\n\t\t}\n\n\t\t// Leave cleaning up of managed disks to the garbage collection in manageDisks()\n\n\t\treturn nil, wrapAzureError(err)\n\t}\n\n\treturn &azureInstance{\n\t\tprovider: az,\n\t\tnic:      nic,\n\t\tvm:       vm,\n\t}, nil\n}\n\nfunc (az *azureInstanceSet) Instances(cloud.InstanceTags) ([]cloud.Instance, error) {\n\taz.stopWg.Add(1)\n\tdefer az.stopWg.Done()\n\n\tinterfaces, err := az.manageNics()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult, err := az.vmClient.listComplete(az.ctx, az.azconfig.ResourceGroup)\n\tif err != nil {\n\t\treturn nil, wrapAzureError(err)\n\t}\n\n\tvar instances []cloud.Instance\n\tfor ; result.NotDone(); err = result.Next() {\n\t\tif err != nil {\n\t\t\treturn nil, wrapAzureError(err)\n\t\t}\n\t\tinstances = append(instances, &azureInstance{\n\t\t\tprovider: az,\n\t\t\tvm:       result.Value(),\n\t\t\tnic:      interfaces[*(*result.Value().NetworkProfile.NetworkInterfaces)[0].ID],\n\t\t})\n\t}\n\treturn instances, nil\n}\n\n// manageNics returns a list of Azure network interface resources.\n// Also performs garbage collection of NICs which have \"namePrefix\",\n// are not associated with a virtual machine and have a \"created-at\"\n// time more than DeleteDanglingResourcesAfter (to prevent racing and\n// deleting newly created NICs) in the past are deleted.\nfunc (az *azureInstanceSet) manageNics() (map[string]network.Interface, error) {\n\taz.stopWg.Add(1)\n\tdefer az.stopWg.Done()\n\n\tresult, err := az.netClient.listComplete(az.ctx, az.azconfig.ResourceGroup)\n\tif err != nil {\n\t\treturn nil, wrapAzureError(err)\n\t}\n\n\tinterfaces := make(map[string]network.Interface)\n\n\ttimestamp := time.Now()\n\tfor ; result.NotDone(); err = result.Next() {\n\t\tif err != nil {\n\t\t\taz.logger.WithError(err).Warnf(\"Error listing nics\")\n\t\t\treturn interfaces, nil\n\t\t}\n\t\tif strings.HasPrefix(*result.Value().Name, az.namePrefix) {\n\t\t\tif result.Value().VirtualMachine != nil {\n\t\t\t\tinterfaces[*result.Value().ID] = result.Value()\n\t\t\t} else {\n\t\t\t\tif result.Value().Tags[\"created-at\"] != nil {\n\t\t\t\t\tcreatedAt, err := time.Parse(time.RFC3339Nano, *result.Value().Tags[\"created-at\"])\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tif timestamp.Sub(createdAt) > az.azconfig.DeleteDanglingResourcesAfter.Duration() {\n\t\t\t\t\t\t\taz.logger.Printf(\"Will delete %v because it is older than %s\", *result.Value().Name, az.azconfig.DeleteDanglingResourcesAfter)\n\t\t\t\t\t\t\taz.deleteNIC <- *result.Value().Name\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn interfaces, nil\n}\n\n// manageBlobs garbage collects blobs (VM disk images) in the\n// configured storage account container.  It will delete blobs which\n// have \"namePrefix\", are \"available\" (which means they are not\n// leased to a VM) and haven't been modified for\n// DeleteDanglingResourcesAfter seconds.\nfunc (az *azureInstanceSet) manageBlobs() {\n\n\tpage := storage.ListBlobsParameters{Prefix: az.namePrefix}\n\ttimestamp := time.Now()\n\n\tfor {\n\t\tresponse, err := az.blobcont.ListBlobs(page)\n\t\tif err != nil {\n\t\t\taz.logger.WithError(err).Warn(\"Error listing blobs\")\n\t\t\treturn\n\t\t}\n\t\tfor _, b := range response.Blobs {\n\t\t\tage := timestamp.Sub(time.Time(b.Properties.LastModified))\n\t\t\tif b.Properties.BlobType == storage.BlobTypePage &&\n\t\t\t\tb.Properties.LeaseState == \"available\" &&\n\t\t\t\tb.Properties.LeaseStatus == \"unlocked\" &&\n\t\t\t\tage.Seconds() > az.azconfig.DeleteDanglingResourcesAfter.Duration().Seconds() {\n\n\t\t\t\taz.logger.Printf(\"Blob %v is unlocked and not modified for %v seconds, will delete\", b.Name, age.Seconds())\n\t\t\t\taz.deleteBlob <- b\n\t\t\t}\n\t\t}\n\t\tif response.NextMarker != \"\" {\n\t\t\tpage.Marker = response.NextMarker\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n// manageDisks garbage collects managed compute disks (VM disk images) in the\n// configured resource group.  It will delete disks which have \"namePrefix\",\n// are \"unattached\" (which means they are not leased to a VM) and were created\n// more than DeleteDanglingResourcesAfter seconds ago.  (Azure provides no\n// modification timestamp on managed disks, there is only a creation timestamp)\nfunc (az *azureInstanceSet) manageDisks() {\n\n\tre := regexp.MustCompile(`^` + regexp.QuoteMeta(az.namePrefix) + `.*-os$`)\n\tthreshold := time.Now().Add(-az.azconfig.DeleteDanglingResourcesAfter.Duration())\n\n\tresponse, err := az.disksClient.listByResourceGroup(az.ctx, az.imageResourceGroup)\n\tif err != nil {\n\t\taz.logger.WithError(err).Warn(\"Error listing disks\")\n\t\treturn\n\t}\n\n\tfor ; response.NotDone(); err = response.Next() {\n\t\tif err != nil {\n\t\t\taz.logger.WithError(err).Warn(\"Error getting next page of disks\")\n\t\t\treturn\n\t\t}\n\t\tfor _, d := range response.Values() {\n\t\t\tif d.DiskProperties.DiskState == compute.Unattached &&\n\t\t\t\td.Name != nil && re.MatchString(*d.Name) &&\n\t\t\t\td.DiskProperties.TimeCreated.ToTime().Before(threshold) {\n\n\t\t\t\taz.logger.Printf(\"Disk %v is unlocked and was created at %+v, will delete\", *d.Name, d.DiskProperties.TimeCreated.ToTime())\n\t\t\t\taz.deleteDisk <- d\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (az *azureInstanceSet) InstanceQuotaGroup(arvados.InstanceType) cloud.InstanceQuotaGroup {\n\t// All instance types share one quota.\n\treturn \"\"\n}\n\nfunc (az *azureInstanceSet) Stop() {\n\taz.stopFunc()\n\taz.stopWg.Wait()\n\tclose(az.deleteNIC)\n\tclose(az.deleteBlob)\n\tclose(az.deleteDisk)\n}\n\ntype azureInstance struct {\n\tprovider *azureInstanceSet\n\tnic      network.Interface\n\tvm       compute.VirtualMachine\n}\n\nfunc (ai *azureInstance) ID() cloud.InstanceID {\n\treturn cloud.InstanceID(*ai.vm.ID)\n}\n\nfunc (ai *azureInstance) String() string {\n\treturn *ai.vm.Name\n}\n\nfunc (ai *azureInstance) ProviderType() string {\n\treturn string(ai.vm.VirtualMachineProperties.HardwareProfile.VMSize)\n}\n\nfunc (ai *azureInstance) SetTags(newTags cloud.InstanceTags) error {\n\tai.provider.stopWg.Add(1)\n\tdefer ai.provider.stopWg.Done()\n\n\ttags := map[string]*string{}\n\tfor k, v := range ai.vm.Tags {\n\t\ttags[k] = v\n\t}\n\tfor k, v := range newTags {\n\t\ttags[k] = to.StringPtr(v)\n\t}\n\n\tvmParameters := compute.VirtualMachine{\n\t\tLocation: &ai.provider.azconfig.Location,\n\t\tTags:     tags,\n\t}\n\tvm, err := ai.provider.vmClient.createOrUpdate(ai.provider.ctx, ai.provider.azconfig.ResourceGroup, *ai.vm.Name, vmParameters)\n\tif err != nil {\n\t\treturn wrapAzureError(err)\n\t}\n\tai.vm = vm\n\n\treturn nil\n}\n\nfunc (ai *azureInstance) Tags() cloud.InstanceTags {\n\ttags := cloud.InstanceTags{}\n\tfor k, v := range ai.vm.Tags {\n\t\ttags[k] = *v\n\t}\n\treturn tags\n}\n\nfunc (ai *azureInstance) Destroy() error {\n\tai.provider.stopWg.Add(1)\n\tdefer ai.provider.stopWg.Done()\n\n\t_, err := ai.provider.vmClient.delete(ai.provider.ctx, ai.provider.azconfig.ResourceGroup, *ai.vm.Name)\n\treturn wrapAzureError(err)\n}\n\nfunc (ai *azureInstance) Address() string {\n\tif iprops := ai.nic.InterfacePropertiesFormat; iprops == nil {\n\t\treturn \"\"\n\t} else if ipconfs := iprops.IPConfigurations; ipconfs == nil || len(*ipconfs) == 0 {\n\t\treturn \"\"\n\t} else if ipconfprops := (*ipconfs)[0].InterfaceIPConfigurationPropertiesFormat; ipconfprops == nil {\n\t\treturn \"\"\n\t} else if addr := ipconfprops.PrivateIPAddress; addr == nil {\n\t\treturn \"\"\n\t} else {\n\t\treturn *addr\n\t}\n}\n\nfunc (ai *azureInstance) PriceHistory(arvados.InstanceType) []cloud.InstancePrice {\n\treturn nil\n}\n\nfunc (ai *azureInstance) RemoteUser() string {\n\treturn ai.provider.azconfig.AdminUsername\n}\n\nfunc (ai *azureInstance) VerifyHostKey(ssh.PublicKey, *ssh.Client) error {\n\treturn cloud.ErrNotImplemented\n}\n"
  },
  {
    "path": "lib/cloud/azure/azure_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n//\n//\n// How to manually run individual tests against the real cloud:\n//\n// $ go test -v git.arvados.org/arvados.git/lib/cloud/azure -live-azure-cfg azconfig.yml -check.f=TestCreate\n//\n// Tests should be run individually and in the order they are listed in the file:\n//\n// Example azconfig.yml:\n//\n// ImageIDForTestSuite: \"https://example.blob.core.windows.net/system/Microsoft.Compute/Images/images/zzzzz-compute-osDisk.XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX.vhd\"\n// DriverParameters:\n// \t SubscriptionID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n// \t ClientID: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n// \t Location: centralus\n// \t CloudEnvironment: AzurePublicCloud\n// \t ClientSecret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n// \t TenantId: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\n// \t ResourceGroup: zzzzz\n// \t Network: zzzzz\n// \t Subnet: zzzzz-subnet-private\n// \t StorageAccount: example\n// \t BlobContainer: vhds\n// \t DeleteDanglingResourcesAfter: 20s\n//\t AdminUsername: crunch\n\npackage azure\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/test\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/config\"\n\t\"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute\"\n\t\"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-06-01/network\"\n\t\"github.com/Azure/azure-sdk-for-go/storage\"\n\t\"github.com/Azure/go-autorest/autorest\"\n\t\"github.com/Azure/go-autorest/autorest/azure\"\n\t\"github.com/Azure/go-autorest/autorest/to\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\ntype AzureInstanceSetSuite struct{}\n\nvar _ = check.Suite(&AzureInstanceSetSuite{})\n\nconst testNamePrefix = \"compute-test123-\"\n\ntype VirtualMachinesClientStub struct {\n\tvmParameters compute.VirtualMachine\n}\n\nfunc (stub *VirtualMachinesClientStub) createOrUpdate(ctx context.Context,\n\tresourceGroupName string,\n\tVMName string,\n\tparameters compute.VirtualMachine) (result compute.VirtualMachine, err error) {\n\tparameters.ID = &VMName\n\tparameters.Name = &VMName\n\tstub.vmParameters = parameters\n\treturn parameters, nil\n}\n\nfunc (*VirtualMachinesClientStub) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {\n\treturn nil, nil\n}\n\nfunc (*VirtualMachinesClientStub) listComplete(ctx context.Context, resourceGroupName string) (result compute.VirtualMachineListResultIterator, err error) {\n\treturn compute.VirtualMachineListResultIterator{}, nil\n}\n\ntype InterfacesClientStub struct{}\n\nfunc (*InterfacesClientStub) createOrUpdate(ctx context.Context,\n\tresourceGroupName string,\n\tnicName string,\n\tparameters network.Interface) (result network.Interface, err error) {\n\tparameters.ID = to.StringPtr(nicName)\n\t(*parameters.IPConfigurations)[0].PrivateIPAddress = to.StringPtr(\"192.168.5.5\")\n\treturn parameters, nil\n}\n\nfunc (*InterfacesClientStub) delete(ctx context.Context, resourceGroupName string, VMName string) (result *http.Response, err error) {\n\treturn nil, nil\n}\n\nfunc (*InterfacesClientStub) listComplete(ctx context.Context, resourceGroupName string) (result network.InterfaceListResultIterator, err error) {\n\treturn network.InterfaceListResultIterator{}, nil\n}\n\ntype BlobContainerStub struct{}\n\nfunc (*BlobContainerStub) GetBlobReference(name string) *storage.Blob {\n\treturn nil\n}\n\nfunc (*BlobContainerStub) ListBlobs(params storage.ListBlobsParameters) (storage.BlobListResponse, error) {\n\treturn storage.BlobListResponse{}, nil\n}\n\ntype testConfig struct {\n\tImageIDForTestSuite string\n\tDriverParameters    json.RawMessage\n}\n\nvar live = flag.String(\"live-azure-cfg\", \"\", \"Test with real azure API, provide config file\")\n\nfunc GetInstanceSet() (*azureInstanceSet, cloud.ImageID, arvados.Cluster, error) {\n\tcluster := arvados.Cluster{\n\t\tInstanceTypes: arvados.InstanceTypeMap(map[string]arvados.InstanceType{\n\t\t\t\"tiny\": {\n\t\t\t\tName:         \"tiny\",\n\t\t\t\tProviderType: \"Standard_D1_v2\",\n\t\t\t\tVCPUs:        1,\n\t\t\t\tRAM:          4000000000,\n\t\t\t\tScratch:      10000000000,\n\t\t\t\tPrice:        .02,\n\t\t\t\tPreemptible:  false,\n\t\t\t},\n\t\t\t\"tinyp\": {\n\t\t\t\tName:         \"tiny\",\n\t\t\t\tProviderType: \"Standard_D1_v2\",\n\t\t\t\tVCPUs:        1,\n\t\t\t\tRAM:          4000000000,\n\t\t\t\tScratch:      10000000000,\n\t\t\t\tPrice:        .002,\n\t\t\t\tPreemptible:  true,\n\t\t\t},\n\t\t})}\n\tif *live != \"\" {\n\t\tvar exampleCfg testConfig\n\t\terr := config.LoadFile(&exampleCfg, *live)\n\t\tif err != nil {\n\t\t\treturn nil, cloud.ImageID(\"\"), cluster, err\n\t\t}\n\n\t\tap, err := newAzureInstanceSet(exampleCfg.DriverParameters, \"test123\", nil, logrus.StandardLogger(), nil)\n\t\treturn ap.(*azureInstanceSet), cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, err\n\t}\n\tap := azureInstanceSet{\n\t\tazconfig: azureInstanceSetConfig{\n\t\t\tBlobContainer: \"vhds\",\n\t\t},\n\t\tdispatcherID: \"test123\",\n\t\tnamePrefix:   testNamePrefix,\n\t\tlogger:       logrus.StandardLogger(),\n\t\tdeleteNIC:    make(chan string),\n\t\tdeleteBlob:   make(chan storage.Blob),\n\t\tdeleteDisk:   make(chan compute.Disk),\n\t}\n\tap.ctx, ap.stopFunc = context.WithCancel(context.Background())\n\tap.vmClient = &VirtualMachinesClientStub{}\n\tap.netClient = &InterfacesClientStub{}\n\tap.blobcont = &BlobContainerStub{}\n\treturn &ap, cloud.ImageID(\"blob\"), cluster, nil\n}\n\nfunc (*AzureInstanceSetSuite) TestCreate(c *check.C) {\n\tap, img, cluster, err := GetInstanceSet()\n\tif err != nil {\n\t\tc.Fatal(\"Error making provider\", err)\n\t}\n\n\tpk, _ := test.LoadTestKey(c, \"../../dispatchcloud/test/sshkey_dispatch\")\n\tc.Assert(err, check.IsNil)\n\n\tinst, err := ap.Create(cluster.InstanceTypes[\"tiny\"],\n\t\timg, map[string]string{\n\t\t\t\"TestTagName\": \"test tag value\",\n\t\t}, \"umask 0600; echo -n test-file-data >/var/run/test-file\", pk)\n\n\tc.Assert(err, check.IsNil)\n\n\ttags := inst.Tags()\n\tc.Check(tags[\"TestTagName\"], check.Equals, \"test tag value\")\n\tc.Logf(\"inst.String()=%v Address()=%v Tags()=%v\", inst.String(), inst.Address(), tags)\n\tif *live == \"\" {\n\t\tc.Check(ap.vmClient.(*VirtualMachinesClientStub).vmParameters.VirtualMachineProperties.OsProfile.LinuxConfiguration.SSH, check.NotNil)\n\t}\n\n\tinstPreemptable, err := ap.Create(cluster.InstanceTypes[\"tinyp\"],\n\t\timg, map[string]string{\n\t\t\t\"TestTagName\": \"test tag value\",\n\t\t}, \"umask 0600; echo -n test-file-data >/var/run/test-file\", nil)\n\n\tc.Assert(err, check.IsNil)\n\n\ttags = instPreemptable.Tags()\n\tc.Check(tags[\"TestTagName\"], check.Equals, \"test tag value\")\n\tc.Logf(\"instPreemptable.String()=%v Address()=%v Tags()=%v\", instPreemptable.String(), instPreemptable.Address(), tags)\n\tif *live == \"\" {\n\t\t// Should not have set SSH option, because publickey\n\t\t// arg was nil\n\t\tc.Check(ap.vmClient.(*VirtualMachinesClientStub).vmParameters.VirtualMachineProperties.OsProfile.LinuxConfiguration.SSH, check.IsNil)\n\t}\n}\n\nfunc (*AzureInstanceSetSuite) TestListInstances(c *check.C) {\n\tap, _, _, err := GetInstanceSet()\n\tif err != nil {\n\t\tc.Fatal(\"Error making provider\", err)\n\t}\n\n\tl, err := ap.Instances(nil)\n\n\tc.Assert(err, check.IsNil)\n\n\tfor _, i := range l {\n\t\ttg := i.Tags()\n\t\tlog.Printf(\"%v %v %v\", i.String(), i.Address(), tg)\n\t}\n}\n\nfunc (*AzureInstanceSetSuite) TestManageNics(c *check.C) {\n\tap, _, _, err := GetInstanceSet()\n\tif err != nil {\n\t\tc.Fatal(\"Error making provider\", err)\n\t}\n\n\tap.manageNics()\n\tap.Stop()\n}\n\nfunc (*AzureInstanceSetSuite) TestManageBlobs(c *check.C) {\n\tap, _, _, err := GetInstanceSet()\n\tif err != nil {\n\t\tc.Fatal(\"Error making provider\", err)\n\t}\n\n\tap.manageBlobs()\n\tap.Stop()\n}\n\nfunc (*AzureInstanceSetSuite) TestDestroyInstances(c *check.C) {\n\tap, _, _, err := GetInstanceSet()\n\tif err != nil {\n\t\tc.Fatal(\"Error making provider\", err)\n\t}\n\n\tl, err := ap.Instances(nil)\n\tc.Assert(err, check.IsNil)\n\n\tfor _, i := range filterInstances(c, l) {\n\t\tc.Check(i.Destroy(), check.IsNil)\n\t}\n}\n\nfunc (*AzureInstanceSetSuite) TestDeleteFake(c *check.C) {\n\tap, _, _, err := GetInstanceSet()\n\tif err != nil {\n\t\tc.Fatal(\"Error making provider\", err)\n\t}\n\n\t_, err = ap.netClient.delete(context.Background(), \"fakefakefake\", \"fakefakefake\")\n\n\tde, ok := err.(autorest.DetailedError)\n\tif ok {\n\t\trq := de.Original.(*azure.RequestError)\n\n\t\tlog.Printf(\"%v %q %q\", rq.Response.StatusCode, rq.ServiceError.Code, rq.ServiceError.Message)\n\t}\n}\n\nfunc (*AzureInstanceSetSuite) TestWrapError(c *check.C) {\n\tretryError := autorest.DetailedError{\n\t\tOriginal: &azure.RequestError{\n\t\t\tDetailedError: autorest.DetailedError{\n\t\t\t\tResponse: &http.Response{\n\t\t\t\t\tStatusCode: 429,\n\t\t\t\t\tHeader:     map[string][]string{\"Retry-After\": {\"123\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t\tServiceError: &azure.ServiceError{},\n\t\t},\n\t}\n\twrapped := wrapAzureError(retryError)\n\t_, ok := wrapped.(cloud.RateLimitError)\n\tc.Check(ok, check.Equals, true)\n\n\tquotaError := autorest.DetailedError{\n\t\tOriginal: &azure.RequestError{\n\t\t\tDetailedError: autorest.DetailedError{\n\t\t\t\tResponse: &http.Response{\n\t\t\t\t\tStatusCode: 503,\n\t\t\t\t},\n\t\t\t},\n\t\t\tServiceError: &azure.ServiceError{\n\t\t\t\tMessage: \"No more quota\",\n\t\t\t},\n\t\t},\n\t}\n\twrapped = wrapAzureError(quotaError)\n\t_, ok = wrapped.(cloud.QuotaError)\n\tc.Check(ok, check.Equals, true)\n}\n\nfunc (*AzureInstanceSetSuite) TestSetTags(c *check.C) {\n\tap, _, _, err := GetInstanceSet()\n\tif err != nil {\n\t\tc.Fatal(\"Error making provider\", err)\n\t}\n\n\tl, err := ap.Instances(nil)\n\tc.Assert(err, check.IsNil)\n\tl = filterInstances(c, l)\n\tif len(l) > 0 {\n\t\terr = l[0].SetTags(map[string]string{\"foo\": \"bar\"})\n\t\tif err != nil {\n\t\t\tc.Fatal(\"Error setting tags\", err)\n\t\t}\n\t}\n\n\tl, err = ap.Instances(nil)\n\tc.Assert(err, check.IsNil)\n\tl = filterInstances(c, l)\n\n\tif len(l) > 0 {\n\t\ttg := l[0].Tags()\n\t\tlog.Printf(\"tags are %v\", tg)\n\t}\n}\n\nfunc (*AzureInstanceSetSuite) TestSSH(c *check.C) {\n\tap, _, _, err := GetInstanceSet()\n\tif err != nil {\n\t\tc.Fatal(\"Error making provider\", err)\n\t}\n\tl, err := ap.Instances(nil)\n\tc.Assert(err, check.IsNil)\n\tl = filterInstances(c, l)\n\n\tif len(l) > 0 {\n\t\tsshclient, err := SetupSSHClient(c, l[0])\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer sshclient.Conn.Close()\n\n\t\tsess, err := sshclient.NewSession()\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer sess.Close()\n\t\t_, err = sess.Output(\"find /var/run/test-file -maxdepth 0 -user root -perm 0600\")\n\t\tc.Assert(err, check.IsNil)\n\n\t\tsess, err = sshclient.NewSession()\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer sess.Close()\n\t\tout, err := sess.Output(\"sudo cat /var/run/test-file\")\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(string(out), check.Equals, \"test-file-data\")\n\t}\n}\n\nfunc SetupSSHClient(c *check.C, inst cloud.Instance) (*ssh.Client, error) {\n\taddr := inst.Address() + \":2222\"\n\tif addr == \"\" {\n\t\treturn nil, errors.New(\"instance has no address\")\n\t}\n\n\tf, err := os.Open(\"azconfig_sshkey\")\n\tc.Assert(err, check.IsNil)\n\n\tkeybytes, err := ioutil.ReadAll(f)\n\tc.Assert(err, check.IsNil)\n\n\tpriv, err := ssh.ParsePrivateKey(keybytes)\n\tc.Assert(err, check.IsNil)\n\n\tvar receivedKey ssh.PublicKey\n\tclient, err := ssh.Dial(\"tcp\", addr, &ssh.ClientConfig{\n\t\tUser: \"crunch\",\n\t\tAuth: []ssh.AuthMethod{\n\t\t\tssh.PublicKeys(priv),\n\t\t},\n\t\tHostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {\n\t\t\treceivedKey = key\n\t\t\treturn nil\n\t\t},\n\t\tTimeout: time.Minute,\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else if receivedKey == nil {\n\t\treturn nil, errors.New(\"BUG: key was never provided to HostKeyCallback\")\n\t}\n\n\terr = inst.VerifyHostKey(receivedKey, client)\n\tc.Assert(err, check.IsNil)\n\n\treturn client, nil\n}\n\nfunc filterInstances(c *check.C, instances []cloud.Instance) []cloud.Instance {\n\tvar r []cloud.Instance\n\tfor _, i := range instances {\n\t\tif !strings.HasPrefix(i.String(), testNamePrefix) {\n\t\t\tc.Logf(\"ignoring instance %s\", i)\n\t\t\tcontinue\n\t\t}\n\t\tr = append(r, i)\n\t}\n\treturn r\n}\n"
  },
  {
    "path": "lib/cloud/cloudtest/cmd.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage cloudtest\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n)\n\nvar Command command\n\ntype command struct{}\n\nfunc (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(stderr, \"%s\\n\", err)\n\t\t}\n\t}()\n\n\tflags := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tflags.SetOutput(stderr)\n\tconfigFile := flags.String(\"config\", arvados.DefaultConfigFile, \"Site configuration `file`\")\n\tinstanceSetID := flags.String(\"instance-set-id\", \"zzzzz-zzzzz-zzzzzzcloudtest\", \"InstanceSetID tag `value` to use on the test instance\")\n\timageID := flags.String(\"image-id\", \"\", \"Image ID to use when creating the test instance (if empty, use cluster config)\")\n\tinstanceType := flags.String(\"instance-type\", \"\", \"Instance type to create (if empty, use cheapest type in config)\")\n\tdestroyExisting := flags.Bool(\"destroy-existing\", false, \"Destroy any existing instances tagged with our InstanceSetID, instead of erroring out\")\n\tshellCommand := flags.String(\"command\", \"\", \"Run an interactive shell command on the test instance when it boots\")\n\tpauseBeforeDestroy := flags.Bool(\"pause-before-destroy\", false, \"Prompt and wait before destroying the test instance\")\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"\", stderr); !ok {\n\t\treturn code\n\t}\n\tlogger := ctxlog.New(stderr, \"text\", \"info\")\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"fatal\")\n\t\t\t// suppress output from the other error-printing func\n\t\t\terr = nil\n\t\t}\n\t\tlogger.Info(\"exiting\")\n\t}()\n\n\tloader := config.NewLoader(stdin, logger)\n\tloader.Path = *configFile\n\tcfg, err := loader.Load()\n\tif err != nil {\n\t\treturn 1\n\t}\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn 1\n\t}\n\tkey, err := config.LoadSSHKey(cluster.Containers.DispatchPrivateKey)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error loading Containers.DispatchPrivateKey: %s\", err)\n\t\treturn 1\n\t}\n\tdriver, ok := dispatchcloud.Drivers[cluster.Containers.CloudVMs.Driver]\n\tif !ok {\n\t\terr = fmt.Errorf(\"unsupported cloud driver %q\", cluster.Containers.CloudVMs.Driver)\n\t\treturn 1\n\t}\n\tif *imageID == \"\" {\n\t\t*imageID = cluster.Containers.CloudVMs.ImageID\n\t}\n\tit, err := chooseInstanceType(cluster, *instanceType)\n\tif err != nil {\n\t\treturn 1\n\t}\n\ttags := cloud.SharedResourceTags(cluster.Containers.CloudVMs.ResourceTags)\n\ttagKeyPrefix := cluster.Containers.CloudVMs.TagKeyPrefix\n\ttags[tagKeyPrefix+\"CloudTestPID\"] = fmt.Sprintf(\"%d\", os.Getpid())\n\tif !(&tester{\n\t\tLogger:              logger,\n\t\tTags:                tags,\n\t\tTagKeyPrefix:        tagKeyPrefix,\n\t\tSetID:               cloud.InstanceSetID(*instanceSetID),\n\t\tDestroyExisting:     *destroyExisting,\n\t\tProbeInterval:       cluster.Containers.CloudVMs.ProbeInterval.Duration(),\n\t\tSyncInterval:        cluster.Containers.CloudVMs.SyncInterval.Duration(),\n\t\tTimeoutBooting:      cluster.Containers.CloudVMs.TimeoutBooting.Duration(),\n\t\tDriver:              driver,\n\t\tDriverParameters:    cluster.Containers.CloudVMs.DriverParameters,\n\t\tImageID:             cloud.ImageID(*imageID),\n\t\tInstanceType:        it,\n\t\tSSHKey:              key,\n\t\tSSHPort:             cluster.Containers.CloudVMs.SSHPort,\n\t\tDeployPublicKey:     cluster.Containers.CloudVMs.DeployPublicKey,\n\t\tBootProbeCommand:    cluster.Containers.CloudVMs.BootProbeCommand,\n\t\tInstanceInitCommand: cloud.InitCommand(cluster.Containers.CloudVMs.InstanceInitCommand),\n\t\tShellCommand:        *shellCommand,\n\t\tPauseBeforeDestroy: func() {\n\t\t\tif *pauseBeforeDestroy {\n\t\t\t\tlogger.Info(\"waiting for operator to press Enter\")\n\t\t\t\tfmt.Fprint(stderr, \"Press Enter to continue: \")\n\t\t\t\tbufio.NewReader(stdin).ReadString('\\n')\n\t\t\t}\n\t\t},\n\t}).Run() {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\n// Return the named instance type, or the cheapest type if name==\"\".\nfunc chooseInstanceType(cluster *arvados.Cluster, name string) (arvados.InstanceType, error) {\n\tif len(cluster.InstanceTypes) == 0 {\n\t\treturn arvados.InstanceType{}, errors.New(\"no instance types are configured\")\n\t} else if name == \"\" {\n\t\tfirst := true\n\t\tvar best arvados.InstanceType\n\t\tfor _, it := range cluster.InstanceTypes {\n\t\t\tif first || best.Price > it.Price {\n\t\t\t\tbest = it\n\t\t\t\tfirst = false\n\t\t\t}\n\t\t}\n\t\treturn best, nil\n\t} else if it, ok := cluster.InstanceTypes[name]; !ok {\n\t\treturn it, fmt.Errorf(\"requested instance type %q is not configured\", name)\n\t} else {\n\t\treturn it, nil\n\t}\n}\n"
  },
  {
    "path": "lib/cloud/cloudtest/tester.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage cloudtest\n\nimport (\n\t\"crypto/rand\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/worker\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\nvar (\n\terrTestInstanceNotFound = errors.New(\"test instance missing from cloud provider's list\")\n)\n\n// A tester does a sequence of operations to test a cloud driver and\n// configuration. Run() should be called only once, after assigning\n// suitable values to public fields.\ntype tester struct {\n\tLogger              logrus.FieldLogger\n\tTags                cloud.SharedResourceTags\n\tTagKeyPrefix        string\n\tSetID               cloud.InstanceSetID\n\tDestroyExisting     bool\n\tProbeInterval       time.Duration\n\tSyncInterval        time.Duration\n\tTimeoutBooting      time.Duration\n\tDriver              cloud.Driver\n\tDriverParameters    json.RawMessage\n\tInstanceType        arvados.InstanceType\n\tImageID             cloud.ImageID\n\tSSHKey              ssh.Signer\n\tSSHPort             string\n\tDeployPublicKey     bool\n\tBootProbeCommand    string\n\tInstanceInitCommand cloud.InitCommand\n\tShellCommand        string\n\tPauseBeforeDestroy  func()\n\n\tis              cloud.InstanceSet\n\ttestInstance    *worker.TagVerifier\n\tsecret          string\n\texecutor        *sshexecutor.Executor\n\tshowedLoginInfo bool\n\n\tfailed bool\n}\n\n// Run the test suite once for each applicable permutation of\n// DriverParameters.  Return true if everything worked.\n//\n// Currently this means run once for each configured SubnetID.\nfunc (t *tester) Run() bool {\n\tvar dp map[string]interface{}\n\tif len(t.DriverParameters) > 0 {\n\t\terr := json.Unmarshal(t.DriverParameters, &dp)\n\t\tif err != nil {\n\t\t\tt.Logger.WithError(err).Error(\"error decoding configured CloudVMs.DriverParameters\")\n\t\t\treturn false\n\t\t}\n\t}\n\tsubnets, ok := dp[\"SubnetID\"].([]interface{})\n\tif !ok || len(subnets) <= 1 {\n\t\t// Easy, only one SubnetID to test.\n\t\treturn t.runWithDriverParameters(t.DriverParameters)\n\t}\n\n\tdeferredError := false\n\tfor i, subnet := range subnets {\n\t\tsubnet, ok := subnet.(string)\n\t\tif !ok {\n\t\t\tt.Logger.Errorf(\"CloudVMs.DriverParameters.SubnetID[%d] is invalid -- must be a string\", i)\n\t\t\tdeferredError = true\n\t\t\tcontinue\n\t\t}\n\t\tdp[\"SubnetID\"] = subnet\n\t\tt.Logger.Infof(\"running tests using SubnetID[%d] %q\", i, subnet)\n\t\tdpjson, err := json.Marshal(dp)\n\t\tif err != nil {\n\t\t\tt.Logger.WithError(err).Error(\"error encoding driver parameters\")\n\t\t\tdeferredError = true\n\t\t\tcontinue\n\t\t}\n\t\tok = t.runWithDriverParameters(dpjson)\n\t\tif !ok {\n\t\t\tt.Logger.Infof(\"failed tests using SubnetID[%d] %q\", i, subnet)\n\t\t\tdeferredError = true\n\t\t}\n\t}\n\treturn !deferredError\n}\n\n// Run the test suite as specified, clean up as needed, and return\n// true (everything is OK) or false (something went wrong).\nfunc (t *tester) runWithDriverParameters(driverParameters json.RawMessage) bool {\n\t// This flag gets set when we encounter a non-fatal error, so\n\t// we can continue doing more tests but remember to return\n\t// false (failure) at the end.\n\tdeferredError := false\n\n\tvar err error\n\tt.is, err = t.Driver.InstanceSet(driverParameters, t.SetID, t.Tags, t.Logger, nil)\n\tif err != nil {\n\t\tt.Logger.WithError(err).Info(\"error initializing driver\")\n\t\treturn false\n\t}\n\n\tfor {\n\t\t// Don't send the driver any filters when getting the\n\t\t// initial instance list. This way we can log an\n\t\t// instance count (N=...)  that includes all instances\n\t\t// in this service account, even if they don't have\n\t\t// the same InstanceSetID.\n\t\tinsts, err := t.getInstances(nil)\n\t\tif err != nil {\n\t\t\tt.Logger.WithError(err).Info(\"error getting list of instances\")\n\t\t\treturn false\n\t\t}\n\n\t\tfoundExisting := false\n\t\tfor _, i := range insts {\n\t\t\tif i.Tags()[t.TagKeyPrefix+\"InstanceSetID\"] != string(t.SetID) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlgr := t.Logger.WithFields(logrus.Fields{\n\t\t\t\t\"Instance\":      i.ID(),\n\t\t\t\t\"InstanceSetID\": t.SetID,\n\t\t\t})\n\t\t\tfoundExisting = true\n\t\t\tif t.DestroyExisting {\n\t\t\t\tlgr.Info(\"destroying existing instance with our InstanceSetID\")\n\t\t\t\tt0 := time.Now()\n\t\t\t\terr := i.Destroy()\n\t\t\t\tlgr := lgr.WithField(\"Duration\", time.Since(t0))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlgr.WithError(err).Error(\"error destroying existing instance\")\n\t\t\t\t} else {\n\t\t\t\t\tlgr.Info(\"Destroy() call succeeded\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlgr.Error(\"found existing instance with our InstanceSetID\")\n\t\t\t}\n\t\t}\n\t\tif !foundExisting {\n\t\t\tbreak\n\t\t} else if t.DestroyExisting {\n\t\t\tt.sleepSyncInterval()\n\t\t} else {\n\t\t\tt.Logger.Error(\"cannot continue with existing instances -- clean up manually, use -destroy-existing=true, or choose a different -instance-set-id\")\n\t\t\treturn false\n\t\t}\n\t}\n\n\tt.secret = randomHex(40)\n\n\ttags := cloud.InstanceTags{}\n\tfor k, v := range t.Tags {\n\t\ttags[k] = v\n\t}\n\ttags[t.TagKeyPrefix+\"InstanceSetID\"] = string(t.SetID)\n\ttags[t.TagKeyPrefix+\"InstanceSecret\"] = t.secret\n\n\tdefer t.destroyTestInstance()\n\n\tbootDeadline := time.Now().Add(t.TimeoutBooting)\n\tinitCommand := worker.TagVerifier{Instance: nil, Secret: t.secret, ReportVerified: nil}.InitCommand() + \"\\n\" + t.InstanceInitCommand\n\n\tinstallPublicKey := t.SSHKey.PublicKey()\n\tif !t.DeployPublicKey {\n\t\tinstallPublicKey = nil\n\t}\n\n\tt.Logger.WithFields(logrus.Fields{\n\t\t\"InstanceType\":         t.InstanceType.Name,\n\t\t\"ProviderInstanceType\": t.InstanceType.ProviderType,\n\t\t\"ImageID\":              t.ImageID,\n\t\t\"Tags\":                 tags,\n\t\t\"InitCommand\":          initCommand,\n\t\t\"DeployPublicKey\":      installPublicKey != nil,\n\t}).Info(\"creating instance\")\n\tt0 := time.Now()\n\tinst, err := t.is.Create(t.InstanceType, t.ImageID, tags, initCommand, installPublicKey)\n\tlgrC := t.Logger.WithField(\"Duration\", time.Since(t0))\n\tif err != nil {\n\t\t// Create() might have failed due to a bug or network\n\t\t// error even though the creation was successful, so\n\t\t// it's safer to wait a bit for an instance to appear.\n\t\tdeferredError = true\n\t\tlgrC.WithError(err).Error(\"error creating test instance\")\n\t\tt.Logger.WithField(\"Deadline\", bootDeadline).Info(\"waiting for instance to appear anyway, in case the Create response was incorrect\")\n\t\tfor err = t.refreshTestInstance(); err != nil; err = t.refreshTestInstance() {\n\t\t\tif time.Now().After(bootDeadline) {\n\t\t\t\tt.Logger.Error(\"timed out\")\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tt.sleepSyncInterval()\n\t\t}\n\t\tt.Logger.WithField(\"Instance\", t.testInstance.ID()).Info(\"new instance appeared\")\n\t\tt.showLoginInfo()\n\t} else {\n\t\t// Create() succeeded. Make sure the new instance\n\t\t// appears right away in the Instances() list.\n\t\tlgrC.WithField(\"Instance\", inst.ID()).Info(\"created instance\")\n\t\tt.testInstance = &worker.TagVerifier{Instance: inst, Secret: t.secret, ReportVerified: nil}\n\t\tt.showLoginInfo()\n\t\terr = t.refreshTestInstance()\n\t\tif err == errTestInstanceNotFound {\n\t\t\tt.Logger.WithError(err).Error(\"cloud/driver Create succeeded, but instance is not in list\")\n\t\t\tdeferredError = true\n\t\t} else if err != nil {\n\t\t\tt.Logger.WithError(err).Error(\"error getting list of instances\")\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif !t.checkTags() {\n\t\t// checkTags() already logged the errors\n\t\tdeferredError = true\n\t}\n\n\tif !t.waitForBoot(bootDeadline) {\n\t\tdeferredError = true\n\t}\n\n\tif t.ShellCommand != \"\" {\n\t\terr = t.runShellCommand(t.ShellCommand)\n\t\tif err != nil {\n\t\t\tt.Logger.WithError(err).Error(\"shell command failed\")\n\t\t\tdeferredError = true\n\t\t}\n\t}\n\n\tif fn := t.PauseBeforeDestroy; fn != nil {\n\t\tt.showLoginInfo()\n\t\tfn()\n\t}\n\n\treturn !deferredError\n}\n\n// If the test instance has an address, log an \"ssh user@host\" command\n// line that the operator can paste into another terminal, and set\n// t.showedLoginInfo.\n//\n// If the test instance doesn't have an address yet, do nothing.\nfunc (t *tester) showLoginInfo() {\n\tt.updateExecutor()\n\thost, port := t.executor.TargetHostPort()\n\tif host == \"\" {\n\t\treturn\n\t}\n\tuser := t.testInstance.RemoteUser()\n\tt.Logger.WithField(\"Command\", fmt.Sprintf(\"ssh -p%s %s@%s\", port, user, host)).Info(\"showing login information\")\n\tt.showedLoginInfo = true\n}\n\n// Get the latest instance list from the driver. If our test instance\n// is found, assign it to t.testIntance.\nfunc (t *tester) refreshTestInstance() error {\n\tinsts, err := t.getInstances(cloud.InstanceTags{t.TagKeyPrefix + \"InstanceSetID\": string(t.SetID)})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, i := range insts {\n\t\tif t.testInstance == nil {\n\t\t\t// Filter by InstanceSetID tag value\n\t\t\tif i.Tags()[t.TagKeyPrefix+\"InstanceSetID\"] != string(t.SetID) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\t// Filter by instance ID\n\t\t\tif i.ID() != t.testInstance.ID() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tt.Logger.WithFields(logrus.Fields{\n\t\t\t\"Instance\": i.ID(),\n\t\t\t\"Address\":  i.Address(),\n\t\t}).Info(\"found our instance in returned list\")\n\t\tt.testInstance = &worker.TagVerifier{Instance: i, Secret: t.secret, ReportVerified: nil}\n\t\tif !t.showedLoginInfo {\n\t\t\tt.showLoginInfo()\n\t\t}\n\t\treturn nil\n\t}\n\treturn errTestInstanceNotFound\n}\n\n// Get the list of instances, passing the given tags to the cloud\n// driver to filter results.\n//\n// Return only the instances that have our InstanceSetID tag.\nfunc (t *tester) getInstances(tags cloud.InstanceTags) ([]cloud.Instance, error) {\n\tvar ret []cloud.Instance\n\tt.Logger.WithField(\"FilterTags\", tags).Info(\"getting instance list\")\n\tt0 := time.Now()\n\tinsts, err := t.is.Instances(tags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.Logger.WithFields(logrus.Fields{\n\t\t\"Duration\": time.Since(t0),\n\t\t\"N\":        len(insts),\n\t}).Info(\"got instance list\")\n\tfor _, i := range insts {\n\t\tif i.Tags()[t.TagKeyPrefix+\"InstanceSetID\"] == string(t.SetID) {\n\t\t\tret = append(ret, i)\n\t\t}\n\t}\n\treturn ret, nil\n}\n\n// Check that t.testInstance has every tag in t.Tags. If not, log an\n// error and return false.\nfunc (t *tester) checkTags() bool {\n\tok := true\n\tfor k, v := range t.Tags {\n\t\tif got := t.testInstance.Tags()[k]; got != v {\n\t\t\tok = false\n\t\t\tt.Logger.WithFields(logrus.Fields{\n\t\t\t\t\"Key\":           k,\n\t\t\t\t\"ExpectedValue\": v,\n\t\t\t\t\"GotValue\":      got,\n\t\t\t}).Error(\"tag is missing from test instance\")\n\t\t}\n\t}\n\tif ok {\n\t\tt.Logger.Info(\"all expected tags are present\")\n\t}\n\treturn ok\n}\n\n// Run t.BootProbeCommand on t.testInstance until it succeeds or the\n// deadline arrives.\nfunc (t *tester) waitForBoot(deadline time.Time) bool {\n\tfor time.Now().Before(deadline) {\n\t\terr := t.runShellCommand(t.BootProbeCommand)\n\t\tif err == nil {\n\t\t\treturn true\n\t\t}\n\t\tt.sleepProbeInterval()\n\t\tt.refreshTestInstance()\n\t}\n\tt.Logger.Error(\"timed out\")\n\treturn false\n}\n\n// Create t.executor and/or update its target to t.testInstance's\n// current address.\nfunc (t *tester) updateExecutor() {\n\tif t.executor == nil {\n\t\tt.executor = sshexecutor.New(t.testInstance)\n\t\tt.executor.SetTargetPort(t.SSHPort)\n\t\tt.executor.SetSigners(t.SSHKey)\n\t} else {\n\t\tt.executor.SetTarget(t.testInstance)\n\t}\n}\n\nfunc (t *tester) runShellCommand(cmd string) error {\n\tt.updateExecutor()\n\tt.Logger.WithFields(logrus.Fields{\n\t\t\"Command\": cmd,\n\t}).Info(\"executing remote command\")\n\tt0 := time.Now()\n\tstdout, stderr, err := t.executor.Execute(nil, cmd, nil)\n\tlgr := t.Logger.WithFields(logrus.Fields{\n\t\t\"Duration\": time.Since(t0),\n\t\t\"Command\":  cmd,\n\t\t\"stdout\":   string(stdout),\n\t\t\"stderr\":   string(stderr),\n\t})\n\tif err != nil {\n\t\tlgr.WithError(err).Info(\"remote command failed\")\n\t} else {\n\t\tlgr.Info(\"remote command succeeded\")\n\t}\n\treturn err\n}\n\n// currently, this tries forever until it can return true (success).\nfunc (t *tester) destroyTestInstance() bool {\n\tif t.testInstance == nil {\n\t\treturn true\n\t}\n\tfor {\n\t\tlgr := t.Logger.WithField(\"Instance\", t.testInstance.ID())\n\t\tlgr.Info(\"destroying instance\")\n\t\tt0 := time.Now()\n\n\t\terr := t.testInstance.Destroy()\n\t\tlgrDur := lgr.WithField(\"Duration\", time.Since(t0))\n\t\tif err != nil {\n\t\t\tlgrDur.WithError(err).Error(\"error destroying instance\")\n\t\t} else {\n\t\t\tlgrDur.Info(\"destroyed instance\")\n\t\t}\n\n\t\terr = t.refreshTestInstance()\n\t\tif err == errTestInstanceNotFound {\n\t\t\tlgr.Info(\"instance no longer appears in list\")\n\t\t\tt.testInstance = nil\n\t\t\treturn true\n\t\t} else if err == nil {\n\t\t\tlgr.Info(\"instance still exists after calling Destroy\")\n\t\t\tt.sleepSyncInterval()\n\t\t\tcontinue\n\t\t} else {\n\t\t\tt.Logger.WithError(err).Error(\"error getting list of instances\")\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc (t *tester) sleepSyncInterval() {\n\tt.Logger.WithField(\"Duration\", t.SyncInterval).Info(\"waiting SyncInterval\")\n\ttime.Sleep(t.SyncInterval)\n}\n\nfunc (t *tester) sleepProbeInterval() {\n\tt.Logger.WithField(\"Duration\", t.ProbeInterval).Info(\"waiting ProbeInterval\")\n\ttime.Sleep(t.ProbeInterval)\n}\n\n// Return a random string of n hexadecimal digits (n*4 random bits). n\n// must be even.\nfunc randomHex(n int) string {\n\tbuf := make([]byte, n/2)\n\t_, err := rand.Read(buf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn fmt.Sprintf(\"%x\", buf)\n}\n"
  },
  {
    "path": "lib/cloud/cloudtest/tester_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage cloudtest\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/test\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"golang.org/x/crypto/ssh\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&TesterSuite{})\n\ntype TesterSuite struct {\n\tstubDriver *test.StubDriver\n\tcluster    *arvados.Cluster\n\ttester     *tester\n\tlog        bytes.Buffer\n}\n\nfunc (s *TesterSuite) SetUpTest(c *check.C) {\n\tpubkey, privkey := test.LoadTestKey(c, \"../../dispatchcloud/test/sshkey_dispatch\")\n\t_, privhostkey := test.LoadTestKey(c, \"../../dispatchcloud/test/sshkey_vm\")\n\ts.stubDriver = &test.StubDriver{\n\t\tHostKey:                   privhostkey,\n\t\tAuthorizedKeys:            []ssh.PublicKey{pubkey},\n\t\tErrorRateDestroy:          0.1,\n\t\tMinTimeBetweenCreateCalls: time.Millisecond,\n\t}\n\ttagKeyPrefix := \"tagprefix:\"\n\ts.cluster = &arvados.Cluster{\n\t\tManagementToken: \"test-management-token\",\n\t\tContainers: arvados.ContainersConfig{\n\t\t\tCloudVMs: arvados.CloudVMsConfig{\n\t\t\t\tSyncInterval:   arvados.Duration(10 * time.Millisecond),\n\t\t\t\tTimeoutBooting: arvados.Duration(150 * time.Millisecond),\n\t\t\t\tTimeoutProbe:   arvados.Duration(15 * time.Millisecond),\n\t\t\t\tProbeInterval:  arvados.Duration(5 * time.Millisecond),\n\t\t\t\tResourceTags:   map[string]string{\"testtag\": \"test value\"},\n\t\t\t},\n\t\t},\n\t\tInstanceTypes: arvados.InstanceTypeMap{\n\t\t\ttest.InstanceType(1).Name: test.InstanceType(1),\n\t\t\ttest.InstanceType(2).Name: test.InstanceType(2),\n\t\t\ttest.InstanceType(3).Name: test.InstanceType(3),\n\t\t},\n\t}\n\ts.tester = &tester{\n\t\tLogger:           ctxlog.New(&s.log, \"text\", \"info\"),\n\t\tTags:             cloud.SharedResourceTags{\"testtagkey\": \"testtagvalue\"},\n\t\tTagKeyPrefix:     tagKeyPrefix,\n\t\tSetID:            cloud.InstanceSetID(\"test-instance-set-id\"),\n\t\tProbeInterval:    5 * time.Millisecond,\n\t\tSyncInterval:     10 * time.Millisecond,\n\t\tTimeoutBooting:   150 * time.Millisecond,\n\t\tDriver:           s.stubDriver,\n\t\tDriverParameters: nil,\n\t\tInstanceType:     test.InstanceType(2),\n\t\tImageID:          \"test-image-id\",\n\t\tSSHKey:           privkey,\n\t\tBootProbeCommand: \"crunch-run --list\",\n\t\tShellCommand:     \"true\",\n\t}\n}\n\nfunc (s *TesterSuite) TestSuccess(c *check.C) {\n\ts.tester.Logger = ctxlog.TestLogger(c)\n\tok := s.tester.Run()\n\tc.Check(ok, check.Equals, true)\n}\n\nfunc (s *TesterSuite) TestBootFail(c *check.C) {\n\ts.tester.BootProbeCommand = \"falsey\"\n\tok := s.tester.Run()\n\tc.Check(ok, check.Equals, false)\n\tc.Check(s.log.String(), check.Matches, `(?ms).*\\\\\"falsey\\\\\": command not found.*`)\n}\n\nfunc (s *TesterSuite) TestShellCommandFail(c *check.C) {\n\ts.tester.ShellCommand = \"falsey\"\n\tok := s.tester.Run()\n\tc.Check(ok, check.Equals, false)\n\tc.Check(s.log.String(), check.Matches, `(?ms).*\\\\\"falsey\\\\\": command not found.*`)\n}\n"
  },
  {
    "path": "lib/cloud/ec2/ec2.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ec2\n\nimport (\n\t\"context\"\n\t\"crypto/md5\"\n\t\"crypto/rsa\"\n\t\"crypto/sha1\"\n\t\"crypto/x509\"\n\t\"encoding/base64\"\n\t\"encoding/hex\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\t\"unicode\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/aws/retry\"\n\tconfig \"github.com/aws/aws-sdk-go-v2/config\"\n\t\"github.com/aws/aws-sdk-go-v2/credentials\"\n\t\"github.com/aws/aws-sdk-go-v2/service/ec2\"\n\t\"github.com/aws/aws-sdk-go-v2/service/ec2/types\"\n\t\"github.com/aws/smithy-go\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\n// Driver is the ec2 implementation of the cloud.Driver interface.\nvar Driver = cloud.DriverFunc(newEC2InstanceSet)\n\nconst (\n\tthrottleDelayMin = time.Second\n\tthrottleDelayMax = time.Minute\n)\n\ntype ec2InstanceSetConfig struct {\n\tAccessKeyID             string\n\tSecretAccessKey         string\n\tRegion                  string\n\tSecurityGroupIDs        arvados.StringSet\n\tSubnetID                sliceOrSingleString\n\tAdminUsername           string\n\tEBSVolumeType           types.VolumeType\n\tEBSPrice                float64\n\tIAMInstanceProfile      string\n\tSpotPriceUpdateInterval arvados.Duration\n\tInstanceTypeQuotaGroups map[string]string\n}\n\ntype sliceOrSingleString []string\n\n// UnmarshalJSON unmarshals an array of strings, and also accepts \"\"\n// as [], and \"foo\" as [\"foo\"].\nfunc (ss *sliceOrSingleString) UnmarshalJSON(data []byte) error {\n\tif len(data) == 0 {\n\t\t*ss = nil\n\t} else if data[0] == '[' {\n\t\tvar slice []string\n\t\terr := json.Unmarshal(data, &slice)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(slice) == 0 {\n\t\t\t*ss = nil\n\t\t} else {\n\t\t\t*ss = slice\n\t\t}\n\t} else {\n\t\tvar str string\n\t\terr := json.Unmarshal(data, &str)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif str == \"\" {\n\t\t\t*ss = nil\n\t\t} else {\n\t\t\t*ss = []string{str}\n\t\t}\n\t}\n\treturn nil\n}\n\ntype ec2Interface interface {\n\tDescribeKeyPairs(context.Context, *ec2.DescribeKeyPairsInput, ...func(*ec2.Options)) (*ec2.DescribeKeyPairsOutput, error)\n\tImportKeyPair(context.Context, *ec2.ImportKeyPairInput, ...func(*ec2.Options)) (*ec2.ImportKeyPairOutput, error)\n\tRunInstances(context.Context, *ec2.RunInstancesInput, ...func(*ec2.Options)) (*ec2.RunInstancesOutput, error)\n\tDescribeInstances(context.Context, *ec2.DescribeInstancesInput, ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error)\n\tDescribeInstanceStatus(context.Context, *ec2.DescribeInstanceStatusInput, ...func(*ec2.Options)) (*ec2.DescribeInstanceStatusOutput, error)\n\tDescribeSpotPriceHistory(context.Context, *ec2.DescribeSpotPriceHistoryInput, ...func(*ec2.Options)) (*ec2.DescribeSpotPriceHistoryOutput, error)\n\tCreateTags(context.Context, *ec2.CreateTagsInput, ...func(*ec2.Options)) (*ec2.CreateTagsOutput, error)\n\tTerminateInstances(context.Context, *ec2.TerminateInstancesInput, ...func(*ec2.Options)) (*ec2.TerminateInstancesOutput, error)\n}\n\ntype ec2InstanceSet struct {\n\tec2config              ec2InstanceSetConfig\n\tcurrentSubnetIDIndex   int32\n\tinstanceSetID          cloud.InstanceSetID\n\tlogger                 logrus.FieldLogger\n\tclient                 ec2Interface\n\tkeysMtx                sync.Mutex\n\tkeys                   map[string]string\n\tthrottleDelayCreate    atomic.Value\n\tthrottleDelayInstances atomic.Value\n\n\tprices        map[priceKey][]cloud.InstancePrice\n\tpricesLock    sync.Mutex\n\tpricesUpdated map[priceKey]time.Time\n\n\tmInstances      *prometheus.GaugeVec\n\tmInstanceStarts *prometheus.CounterVec\n}\n\nfunc newEC2InstanceSet(confRaw json.RawMessage, instanceSetID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (prv cloud.InstanceSet, err error) {\n\tinstanceSet := &ec2InstanceSet{\n\t\tinstanceSetID: instanceSetID,\n\t\tlogger:        logger,\n\t}\n\terr = json.Unmarshal(confRaw, &instanceSet.ec2config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tawsConfig, err := config.LoadDefaultConfig(context.Background(),\n\t\tconfig.WithRetryer(func() aws.Retryer { return aws.NopRetryer{} }),\n\t\tconfig.WithRegion(instanceSet.ec2config.Region),\n\t\tconfig.WithCredentialsCacheOptions(func(o *aws.CredentialsCacheOptions) {\n\t\t\to.ExpiryWindow = 5 * time.Minute\n\t\t}),\n\t\tfunc(o *config.LoadOptions) error {\n\t\t\tif instanceSet.ec2config.AccessKeyID == \"\" && instanceSet.ec2config.SecretAccessKey == \"\" {\n\t\t\t\t// Use default SDK behavior (IAM role\n\t\t\t\t// via IMDSv2)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\to.Credentials = credentials.StaticCredentialsProvider{\n\t\t\t\tValue: aws.Credentials{\n\t\t\t\t\tAccessKeyID:     instanceSet.ec2config.AccessKeyID,\n\t\t\t\t\tSecretAccessKey: instanceSet.ec2config.SecretAccessKey,\n\t\t\t\t\tSource:          \"Arvados configuration\",\n\t\t\t\t},\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinstanceSet.client = ec2.NewFromConfig(awsConfig)\n\tinstanceSet.keys = make(map[string]string)\n\tif instanceSet.ec2config.EBSVolumeType == \"\" {\n\t\tinstanceSet.ec2config.EBSVolumeType = \"gp2\"\n\t}\n\n\t// Set up metrics\n\tinstanceSet.mInstances = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"ec2_instances\",\n\t\tHelp:      \"Number of instances running\",\n\t}, []string{\"subnet_id\"})\n\tinstanceSet.mInstanceStarts = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"ec2_instance_starts_total\",\n\t\tHelp:      \"Number of attempts to start a new instance\",\n\t}, []string{\"subnet_id\", \"success\"})\n\t// Initialize all of the series we'll be reporting.  Otherwise\n\t// the {subnet=A, success=0} series doesn't appear in metrics\n\t// at all until there's a failure in subnet A.\n\tfor _, subnet := range instanceSet.ec2config.SubnetID {\n\t\tinstanceSet.mInstanceStarts.WithLabelValues(subnet, \"0\").Add(0)\n\t\tinstanceSet.mInstanceStarts.WithLabelValues(subnet, \"1\").Add(0)\n\t}\n\tif len(instanceSet.ec2config.SubnetID) == 0 {\n\t\tinstanceSet.mInstanceStarts.WithLabelValues(\"\", \"0\").Add(0)\n\t\tinstanceSet.mInstanceStarts.WithLabelValues(\"\", \"1\").Add(0)\n\t}\n\tif reg != nil {\n\t\treg.MustRegister(instanceSet.mInstances)\n\t\treg.MustRegister(instanceSet.mInstanceStarts)\n\t}\n\n\treturn instanceSet, nil\n}\n\n// Calculate the public key fingerprints that AWS might use for a\n// given key.  For an rsa key, return the AWS MD5 and SHA-1\n// fingerprints in that order, like\n// {\"02:d8:ca:c4:67:58:7b:46:64:50:41:59:3d:90:33:40\",\n// \"da:39:a3:ee:5e:6b:4b:0d:32:55:bf:ef:95:60:18:90:af:d8:07:09\"}.\n// For an ed25519 key, return the SHA-256 fingerprint with and without\n// padding, like\n// {\"SHA256:jgxbPn8JspgUBbZo3nRPWJ5e2h4v6FbiwlTe49NsNKE=\",\n// \"SHA256:jgxbPn8JspgUBbZo3nRPWJ5e2h4v6FbiwlTe49NsNKE\"}.\n//\n// \"When Amazon EC2 calculates a fingerprint, Amazon EC2 might append\n// padding to the fingerprint with = characters.\"\n//\n// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/verify-keys.html\nfunc awsKeyFingerprints(pk ssh.PublicKey) ([]string, error) {\n\tif pk.Type() != \"ssh-rsa\" {\n\t\t// sha256 is always 256 bits, so the padded base64\n\t\t// encoding will always be the unpadded encoding (as\n\t\t// returned by ssh.FingerprintSHA256) plus a final\n\t\t// \"=\".\n\t\thash2 := ssh.FingerprintSHA256(pk)\n\t\thash1 := hash2 + \"=\"\n\t\treturn []string{hash1, hash2}, nil\n\t}\n\t// AWS key fingerprints don't use the usual key fingerprint\n\t// you get from ssh-keygen or ssh.FingerprintLegacyMD5()\n\t// (you can get that from md5.Sum(pk.Marshal())\n\t//\n\t// AWS uses the md5 or sha1 of the PKIX DER encoding of the\n\t// public key, so calculate those fingerprints here.\n\tvar rsaPub struct {\n\t\tName string\n\t\tE    *big.Int\n\t\tN    *big.Int\n\t}\n\tif err := ssh.Unmarshal(pk.Marshal(), &rsaPub); err != nil {\n\t\treturn nil, fmt.Errorf(\"Unmarshal failed to parse public key: %w\", err)\n\t}\n\trsaPk := rsa.PublicKey{\n\t\tE: int(rsaPub.E.Int64()),\n\t\tN: rsaPub.N,\n\t}\n\tpkix, _ := x509.MarshalPKIXPublicKey(&rsaPk)\n\tsum1 := md5.Sum(pkix)\n\tsum2 := sha1.Sum(pkix)\n\treturn []string{\n\t\thexFingerprint(sum1[:]),\n\t\thexFingerprint(sum2[:]),\n\t}, nil\n}\n\n// Return hex-fingerprint representation of sum, like \"12:34:56:...\".\nfunc hexFingerprint(sum []byte) string {\n\thexarray := make([]string, len(sum))\n\tfor i, c := range sum {\n\t\thexarray[i] = hex.EncodeToString([]byte{c})\n\t}\n\treturn strings.Join(hexarray, \":\")\n}\n\nfunc (instanceSet *ec2InstanceSet) Create(\n\tinstanceType arvados.InstanceType,\n\timageID cloud.ImageID,\n\tnewTags cloud.InstanceTags,\n\tinitCommand cloud.InitCommand,\n\tpublicKey ssh.PublicKey) (cloud.Instance, error) {\n\n\tec2tags := []types.Tag{}\n\tfor k, v := range newTags {\n\t\tec2tags = append(ec2tags, types.Tag{\n\t\t\tKey:   aws.String(k),\n\t\t\tValue: aws.String(v),\n\t\t})\n\t}\n\n\tvar groups []string\n\tfor sg := range instanceSet.ec2config.SecurityGroupIDs {\n\t\tgroups = append(groups, sg)\n\t}\n\n\trii := ec2.RunInstancesInput{\n\t\tImageId:      aws.String(string(imageID)),\n\t\tInstanceType: types.InstanceType(instanceType.ProviderType),\n\t\tMaxCount:     aws.Int32(1),\n\t\tMinCount:     aws.Int32(1),\n\n\t\tNetworkInterfaces: []types.InstanceNetworkInterfaceSpecification{{\n\t\t\tAssociatePublicIpAddress: aws.Bool(false),\n\t\t\tDeleteOnTermination:      aws.Bool(true),\n\t\t\tDeviceIndex:              aws.Int32(0),\n\t\t\tGroups:                   groups,\n\t\t}},\n\t\tDisableApiTermination:             aws.Bool(false),\n\t\tInstanceInitiatedShutdownBehavior: types.ShutdownBehaviorTerminate,\n\t\tTagSpecifications: []types.TagSpecification{\n\t\t\t{\n\t\t\t\tResourceType: types.ResourceTypeInstance,\n\t\t\t\tTags:         ec2tags,\n\t\t\t}},\n\t\tMetadataOptions: &types.InstanceMetadataOptionsRequest{\n\t\t\t// Require IMDSv2, as described at\n\t\t\t// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-IMDS-new-instances.html\n\t\t\tHttpEndpoint: types.InstanceMetadataEndpointStateEnabled,\n\t\t\tHttpTokens:   types.HttpTokensStateRequired,\n\t\t},\n\t\tUserData: aws.String(base64.StdEncoding.EncodeToString([]byte(\"#!/bin/sh\\n\" + initCommand + \"\\n\"))),\n\t}\n\n\tif publicKey != nil {\n\t\tkeyname, err := instanceSet.getKeyName(publicKey)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trii.KeyName = &keyname\n\t}\n\n\tif instanceType.AddedScratch > 0 {\n\t\trii.BlockDeviceMappings = []types.BlockDeviceMapping{{\n\t\t\tDeviceName: aws.String(\"/dev/xvdt\"),\n\t\t\tEbs: &types.EbsBlockDevice{\n\t\t\t\tDeleteOnTermination: aws.Bool(true),\n\t\t\t\tVolumeSize:          aws.Int32(int32((int64(instanceType.AddedScratch) + (1<<30 - 1)) >> 30)),\n\t\t\t\tVolumeType:          instanceSet.ec2config.EBSVolumeType,\n\t\t\t}}}\n\t}\n\n\tif instanceType.Preemptible {\n\t\trii.InstanceMarketOptions = &types.InstanceMarketOptionsRequest{\n\t\t\tMarketType: types.MarketTypeSpot,\n\t\t\tSpotOptions: &types.SpotMarketOptions{\n\t\t\t\tInstanceInterruptionBehavior: types.InstanceInterruptionBehaviorTerminate,\n\t\t\t\tMaxPrice:                     aws.String(fmt.Sprintf(\"%v\", instanceType.Price)),\n\t\t\t}}\n\t}\n\n\tif instanceSet.ec2config.IAMInstanceProfile != \"\" {\n\t\trii.IamInstanceProfile = &types.IamInstanceProfileSpecification{\n\t\t\tName: aws.String(instanceSet.ec2config.IAMInstanceProfile),\n\t\t}\n\t}\n\n\tvar rsv *ec2.RunInstancesOutput\n\tvar errToReturn error\n\tvar returningCapacityError bool\n\tsubnets := instanceSet.ec2config.SubnetID\n\tcurrentSubnetIDIndex := int(atomic.LoadInt32(&instanceSet.currentSubnetIDIndex))\n\tfor tryOffset := 0; ; tryOffset++ {\n\t\ttryIndex := 0\n\t\ttrySubnet := \"\"\n\t\tif len(subnets) > 0 {\n\t\t\ttryIndex = (currentSubnetIDIndex + tryOffset) % len(subnets)\n\t\t\ttrySubnet = subnets[tryIndex]\n\t\t\trii.NetworkInterfaces[0].SubnetId = aws.String(trySubnet)\n\t\t}\n\t\tvar err error\n\t\trsv, err = instanceSet.client.RunInstances(context.Background(), &rii)\n\t\tinstanceSet.mInstanceStarts.WithLabelValues(trySubnet, boolLabelValue[err == nil]).Add(1)\n\t\tif instcap, groupcap := isErrorCapacity(err); !returningCapacityError || instcap || groupcap {\n\t\t\t// We want to return the last capacity error,\n\t\t\t// if any; otherwise the last non-capacity\n\t\t\t// error.\n\t\t\terrToReturn = err\n\t\t\treturningCapacityError = instcap || groupcap\n\t\t}\n\t\tif isErrorSubnetSpecific(err) &&\n\t\t\ttryOffset < len(subnets)-1 {\n\t\t\tinstanceSet.logger.WithError(err).WithField(\"SubnetID\", subnets[tryIndex]).\n\t\t\t\tWarn(\"RunInstances failed, trying next subnet\")\n\t\t\tcontinue\n\t\t}\n\t\t// Succeeded, or exhausted all subnets, or got a\n\t\t// non-subnet-related error.\n\t\t//\n\t\t// We intentionally update currentSubnetIDIndex even\n\t\t// in the non-retryable-failure case here to avoid a\n\t\t// situation where successive calls to Create() keep\n\t\t// returning errors for the same subnet (perhaps\n\t\t// \"subnet full\") and never reveal the errors for the\n\t\t// other configured subnets (perhaps \"subnet ID\n\t\t// invalid\").\n\t\tatomic.StoreInt32(&instanceSet.currentSubnetIDIndex, int32(tryIndex))\n\t\tbreak\n\t}\n\tif rsv == nil || len(rsv.Instances) == 0 {\n\t\treturn nil, wrapError(errToReturn, &instanceSet.throttleDelayCreate)\n\t}\n\treturn &ec2Instance{\n\t\tprovider: instanceSet,\n\t\tinstance: rsv.Instances[0],\n\t}, nil\n}\n\nfunc (instanceSet *ec2InstanceSet) getKeyName(publicKey ssh.PublicKey) (string, error) {\n\tinstanceSet.keysMtx.Lock()\n\tdefer instanceSet.keysMtx.Unlock()\n\tfingerprints, err := awsKeyFingerprints(publicKey)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Could not make key fingerprint: %w\", err)\n\t}\n\tif keyname, ok := instanceSet.keys[fingerprints[0]]; ok {\n\t\treturn keyname, nil\n\t}\n\tkeyout, err := instanceSet.client.DescribeKeyPairs(context.Background(), &ec2.DescribeKeyPairsInput{\n\t\tFilters: []types.Filter{{\n\t\t\tName:   aws.String(\"fingerprint\"),\n\t\t\tValues: fingerprints,\n\t\t}},\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Could not search for keypair: %w\", err)\n\t}\n\tif len(keyout.KeyPairs) > 0 {\n\t\treturn *(keyout.KeyPairs[0].KeyName), nil\n\t}\n\tkeyname := \"arvados-dispatch-keypair-\" + fingerprints[0]\n\t_, err = instanceSet.client.ImportKeyPair(context.Background(), &ec2.ImportKeyPairInput{\n\t\tKeyName:           &keyname,\n\t\tPublicKeyMaterial: ssh.MarshalAuthorizedKey(publicKey),\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Could not import keypair: %w\", err)\n\t}\n\tinstanceSet.keys[fingerprints[0]] = keyname\n\treturn keyname, nil\n}\n\nfunc (instanceSet *ec2InstanceSet) Instances(tags cloud.InstanceTags) (instances []cloud.Instance, err error) {\n\tvar filters []types.Filter\n\tfor k, v := range tags {\n\t\tfilters = append(filters, types.Filter{\n\t\t\tName:   aws.String(\"tag:\" + k),\n\t\t\tValues: []string{v},\n\t\t})\n\t}\n\tneedAZs := false\n\tdii := &ec2.DescribeInstancesInput{Filters: filters}\n\tfor {\n\t\tdio, err := instanceSet.client.DescribeInstances(context.Background(), dii)\n\t\terr = wrapError(err, &instanceSet.throttleDelayInstances)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, rsv := range dio.Reservations {\n\t\t\tfor _, inst := range rsv.Instances {\n\t\t\t\tswitch inst.State.Name {\n\t\t\t\tcase types.InstanceStateNameShuttingDown:\n\t\t\t\tcase types.InstanceStateNameTerminated:\n\t\t\t\tdefault:\n\t\t\t\t\tinstances = append(instances, &ec2Instance{\n\t\t\t\t\t\tprovider: instanceSet,\n\t\t\t\t\t\tinstance: inst,\n\t\t\t\t\t})\n\t\t\t\t\tif inst.InstanceLifecycle == types.InstanceLifecycleTypeSpot {\n\t\t\t\t\t\tneedAZs = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif dio.NextToken == nil || *dio.NextToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tdii.NextToken = dio.NextToken\n\t}\n\tif needAZs && instanceSet.ec2config.SpotPriceUpdateInterval > 0 {\n\t\taz := map[string]string{}\n\t\tdisi := &ec2.DescribeInstanceStatusInput{IncludeAllInstances: aws.Bool(true)}\n\t\tfor {\n\t\t\tpage, err := instanceSet.client.DescribeInstanceStatus(context.Background(), disi)\n\t\t\tif err != nil {\n\t\t\t\tinstanceSet.logger.WithError(err).Warn(\"error getting instance statuses\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfor _, ent := range page.InstanceStatuses {\n\t\t\t\taz[*ent.InstanceId] = *ent.AvailabilityZone\n\t\t\t}\n\t\t\tif page.NextToken == nil || *page.NextToken == \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdisi.NextToken = page.NextToken\n\t\t}\n\t\tfor _, inst := range instances {\n\t\t\tinst := inst.(*ec2Instance)\n\t\t\tinst.availabilityZone = az[*inst.instance.InstanceId]\n\t\t}\n\t\tinstanceSet.updateSpotPrices(instances)\n\t}\n\n\t// Count instances in each subnet, and report in metrics.\n\tsubnetInstances := map[string]int{\"\": 0}\n\tfor _, subnet := range instanceSet.ec2config.SubnetID {\n\t\tsubnetInstances[subnet] = 0\n\t}\n\tfor _, inst := range instances {\n\t\tsubnet := inst.(*ec2Instance).instance.SubnetId\n\t\tif subnet != nil {\n\t\t\tsubnetInstances[*subnet]++\n\t\t} else {\n\t\t\tsubnetInstances[\"\"]++\n\t\t}\n\t}\n\tfor subnet, count := range subnetInstances {\n\t\tinstanceSet.mInstances.WithLabelValues(subnet).Set(float64(count))\n\t}\n\n\treturn instances, err\n}\n\ntype priceKey struct {\n\tinstanceType     string\n\tspot             bool\n\tavailabilityZone string\n}\n\n// Refresh recent spot instance pricing data for the given instances,\n// unless we already have recent pricing data for all relevant types.\nfunc (instanceSet *ec2InstanceSet) updateSpotPrices(instances []cloud.Instance) {\n\tif len(instances) == 0 {\n\t\treturn\n\t}\n\n\tinstanceSet.pricesLock.Lock()\n\tdefer instanceSet.pricesLock.Unlock()\n\tif instanceSet.prices == nil {\n\t\tinstanceSet.prices = map[priceKey][]cloud.InstancePrice{}\n\t\tinstanceSet.pricesUpdated = map[priceKey]time.Time{}\n\t}\n\n\tupdateTime := time.Now()\n\tstaleTime := updateTime.Add(-instanceSet.ec2config.SpotPriceUpdateInterval.Duration())\n\tneedUpdate := false\n\tallTypes := map[types.InstanceType]bool{}\n\n\tfor _, inst := range instances {\n\t\tec2inst := inst.(*ec2Instance).instance\n\t\tif ec2inst.InstanceLifecycle == types.InstanceLifecycleTypeSpot {\n\t\t\tpk := priceKey{\n\t\t\t\tinstanceType:     string(ec2inst.InstanceType),\n\t\t\t\tspot:             true,\n\t\t\t\tavailabilityZone: inst.(*ec2Instance).availabilityZone,\n\t\t\t}\n\t\t\tif instanceSet.pricesUpdated[pk].Before(staleTime) {\n\t\t\t\tneedUpdate = true\n\t\t\t}\n\t\t\tallTypes[ec2inst.InstanceType] = true\n\t\t}\n\t}\n\tif !needUpdate {\n\t\treturn\n\t}\n\tvar typeFilterValues []string\n\tfor instanceType := range allTypes {\n\t\ttypeFilterValues = append(typeFilterValues, string(instanceType))\n\t}\n\t// Get 3x update interval worth of pricing data. (Ideally the\n\t// AWS API would tell us \"we have shown you all of the price\n\t// changes up to time T\", but it doesn't, so we'll just ask\n\t// for 3 intervals worth of data on each update, de-duplicate\n\t// the data points, and not worry too much about occasionally\n\t// missing some data points when our lookups fail twice in a\n\t// row.\n\tdsphi := &ec2.DescribeSpotPriceHistoryInput{\n\t\tStartTime: aws.Time(updateTime.Add(-3 * instanceSet.ec2config.SpotPriceUpdateInterval.Duration())),\n\t\tFilters: []types.Filter{\n\t\t\ttypes.Filter{Name: aws.String(\"instance-type\"), Values: typeFilterValues},\n\t\t\ttypes.Filter{Name: aws.String(\"product-description\"), Values: []string{\"Linux/UNIX\"}},\n\t\t},\n\t}\n\tfor {\n\t\tpage, err := instanceSet.client.DescribeSpotPriceHistory(context.Background(), dsphi)\n\t\tif err != nil {\n\t\t\tinstanceSet.logger.WithError(err).Warn(\"error retrieving spot instance prices\")\n\t\t\tbreak\n\t\t}\n\t\tfor _, ent := range page.SpotPriceHistory {\n\t\t\tif ent.InstanceType == \"\" || ent.SpotPrice == nil || ent.Timestamp == nil {\n\t\t\t\t// bogus record?\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprice, err := strconv.ParseFloat(*ent.SpotPrice, 64)\n\t\t\tif err != nil {\n\t\t\t\t// bogus record?\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpk := priceKey{\n\t\t\t\tinstanceType:     string(ent.InstanceType),\n\t\t\t\tspot:             true,\n\t\t\t\tavailabilityZone: *ent.AvailabilityZone,\n\t\t\t}\n\t\t\tinstanceSet.prices[pk] = append(instanceSet.prices[pk], cloud.InstancePrice{\n\t\t\t\tStartTime: *ent.Timestamp,\n\t\t\t\tPrice:     price,\n\t\t\t})\n\t\t\tinstanceSet.pricesUpdated[pk] = updateTime\n\t\t}\n\t\tif page.NextToken == nil || *page.NextToken == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tdsphi.NextToken = page.NextToken\n\t}\n\n\texpiredTime := updateTime.Add(-64 * instanceSet.ec2config.SpotPriceUpdateInterval.Duration())\n\tfor pk, last := range instanceSet.pricesUpdated {\n\t\tif last.Before(expiredTime) {\n\t\t\tdelete(instanceSet.pricesUpdated, pk)\n\t\t\tdelete(instanceSet.prices, pk)\n\t\t}\n\t}\n\tfor pk, prices := range instanceSet.prices {\n\t\tinstanceSet.prices[pk] = cloud.NormalizePriceHistory(prices)\n\t}\n}\n\nfunc (instanceSet *ec2InstanceSet) Stop() {\n}\n\nfunc (instanceSet *ec2InstanceSet) InstanceQuotaGroup(it arvados.InstanceType) cloud.InstanceQuotaGroup {\n\t// https://docs.aws.amazon.com/ec2/latest/instancetypes/ec2-instance-quotas.html\n\t// 2024-09-09\n\tvar quotaGroup string\n\tpt := strings.ToLower(it.ProviderType)\n\tfor i, c := range pt {\n\t\tif !unicode.IsLower(c) && quotaGroup == \"\" {\n\t\t\t// Fall back to the alphabetic prefix of\n\t\t\t// ProviderType.\n\t\t\tquotaGroup = pt[:i]\n\t\t}\n\t\tif conf := instanceSet.ec2config.InstanceTypeQuotaGroups[pt[:i]]; conf != \"\" && quotaGroup != \"\" {\n\t\t\t// Prefer the longest prefix of ProviderType\n\t\t\t// that is listed explicitly in config.\n\t\t\t//\n\t\t\t// (But don't look up a too-short prefix --\n\t\t\t// for an instance type like \"trn1.234\", use\n\t\t\t// the config for \"trn\" or \"trn1\" but not\n\t\t\t// \"t\".)\n\t\t\tquotaGroup = conf\n\t\t}\n\t}\n\tif it.Preemptible {\n\t\t// Spot instance quotas are separate from demand\n\t\t// quotas.\n\t\tquotaGroup += \"-spot\"\n\t}\n\treturn cloud.InstanceQuotaGroup(quotaGroup)\n}\n\ntype ec2Instance struct {\n\tprovider         *ec2InstanceSet\n\tinstance         types.Instance\n\tavailabilityZone string // sometimes available for spot instances\n}\n\nfunc (inst *ec2Instance) ID() cloud.InstanceID {\n\treturn cloud.InstanceID(*inst.instance.InstanceId)\n}\n\nfunc (inst *ec2Instance) String() string {\n\treturn *inst.instance.InstanceId\n}\n\nfunc (inst *ec2Instance) ProviderType() string {\n\treturn string(inst.instance.InstanceType)\n}\n\nfunc (inst *ec2Instance) SetTags(newTags cloud.InstanceTags) error {\n\tvar ec2tags []types.Tag\n\tfor k, v := range newTags {\n\t\tec2tags = append(ec2tags, types.Tag{\n\t\t\tKey:   aws.String(k),\n\t\t\tValue: aws.String(v),\n\t\t})\n\t}\n\n\t_, err := inst.provider.client.CreateTags(context.Background(), &ec2.CreateTagsInput{\n\t\tResources: []string{*inst.instance.InstanceId},\n\t\tTags:      ec2tags,\n\t})\n\n\treturn err\n}\n\nfunc (inst *ec2Instance) Tags() cloud.InstanceTags {\n\ttags := make(map[string]string)\n\n\tfor _, t := range inst.instance.Tags {\n\t\ttags[*t.Key] = *t.Value\n\t}\n\n\treturn tags\n}\n\nfunc (inst *ec2Instance) Destroy() error {\n\t_, err := inst.provider.client.TerminateInstances(context.Background(), &ec2.TerminateInstancesInput{\n\t\tInstanceIds: []string{*inst.instance.InstanceId},\n\t})\n\treturn err\n}\n\nfunc (inst *ec2Instance) Address() string {\n\tif inst.instance.PrivateIpAddress != nil {\n\t\treturn *inst.instance.PrivateIpAddress\n\t}\n\treturn \"\"\n}\n\nfunc (inst *ec2Instance) RemoteUser() string {\n\treturn inst.provider.ec2config.AdminUsername\n}\n\nfunc (inst *ec2Instance) VerifyHostKey(ssh.PublicKey, *ssh.Client) error {\n\treturn cloud.ErrNotImplemented\n}\n\n// PriceHistory returns the price history for this specific instance.\n//\n// AWS documentation is elusive about whether the hourly cost of a\n// given spot instance changes as the current spot price changes for\n// the corresponding instance type and availability zone. Our\n// implementation assumes the answer is yes, based on the following\n// hints.\n//\n// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html\n// says: \"After your Spot Instance is running, if the Spot price rises\n// above your maximum price, Amazon EC2 interrupts your Spot\n// Instance.\" (This doesn't address what happens when the spot price\n// rises *without* exceeding your maximum price.)\n//\n// https://docs.aws.amazon.com/whitepapers/latest/cost-optimization-leveraging-ec2-spot-instances/how-spot-instances-work.html\n// says: \"You pay the Spot price that's in effect, billed to the\n// nearest second.\" (But it's not explicitly stated whether \"the price\n// in effect\" changes over time for a given instance.)\n//\n// The same page also says, in a discussion about the effect of\n// specifying a maximum price: \"Note that you never pay more than the\n// Spot price that is in effect when your Spot Instance is running.\"\n// (The use of the phrase \"is running\", as opposed to \"was launched\",\n// hints that pricing is dynamic.)\nfunc (inst *ec2Instance) PriceHistory(instType arvados.InstanceType) []cloud.InstancePrice {\n\tinst.provider.pricesLock.Lock()\n\tdefer inst.provider.pricesLock.Unlock()\n\t// Note updateSpotPrices currently populates\n\t// inst.provider.prices only for spot instances, so if\n\t// spot==false here, we will return no data.\n\tpk := priceKey{\n\t\tinstanceType:     string(inst.instance.InstanceType),\n\t\tspot:             inst.instance.InstanceLifecycle == types.InstanceLifecycleTypeSpot,\n\t\tavailabilityZone: inst.availabilityZone,\n\t}\n\tvar prices []cloud.InstancePrice\n\tfor _, price := range inst.provider.prices[pk] {\n\t\t// ceil(added scratch space in GiB)\n\t\tgib := (instType.AddedScratch + 1<<30 - 1) >> 30\n\t\tmonthly := inst.provider.ec2config.EBSPrice * float64(gib)\n\t\thourly := monthly / 30 / 24\n\t\tprice.Price += hourly\n\t\tprices = append(prices, price)\n\t}\n\treturn prices\n}\n\ntype rateLimitError struct {\n\terror\n\tearliestRetry time.Time\n}\n\nfunc (err rateLimitError) EarliestRetry() time.Time {\n\treturn err.earliestRetry\n}\n\ntype capacityError struct {\n\terror\n\tisInstanceQuotaGroupSpecific bool\n\tisInstanceTypeSpecific       bool\n}\n\nfunc (er *capacityError) IsCapacityError() bool {\n\treturn true\n}\n\nfunc (er *capacityError) IsInstanceQuotaGroupSpecific() bool {\n\treturn er.isInstanceQuotaGroupSpecific\n}\n\nfunc (er *capacityError) IsInstanceTypeSpecific() bool {\n\treturn er.isInstanceTypeSpecific\n}\n\nvar isCodeQuota = map[string]bool{\n\t\"InstanceLimitExceeded\":             true,\n\t\"InsufficientAddressCapacity\":       true,\n\t\"InsufficientFreeAddressesInSubnet\": true,\n\t\"InsufficientVolumeCapacity\":        true,\n\t\"MaxSpotInstanceCountExceeded\":      true,\n}\n\n// isErrorQuota returns whether the error indicates we have reached\n// some usage quota/limit -- i.e., immediately retrying with an equal\n// or larger instance type will probably not work.\n//\n// Returns false if error is nil.\nfunc isErrorQuota(err error) bool {\n\tvar aerr smithy.APIError\n\tif errors.As(err, &aerr) {\n\t\tif _, ok := isCodeQuota[aerr.ErrorCode()]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nvar reSubnetSpecificInvalidParameterMessage = regexp.MustCompile(`(?ms).*( subnet |sufficient free [Ii]pv[46] addresses).*`)\n\n// isErrorSubnetSpecific returns true if the problem encountered by\n// RunInstances might be avoided by trying a different subnet.\nfunc isErrorSubnetSpecific(err error) bool {\n\tvar aerr smithy.APIError\n\tif !errors.As(err, &aerr) {\n\t\treturn false\n\t}\n\tcode := aerr.ErrorCode()\n\treturn strings.Contains(code, \"Subnet\") ||\n\t\tcode == \"InsufficientInstanceCapacity\" ||\n\t\tcode == \"InsufficientVolumeCapacity\" ||\n\t\tcode == \"Unsupported\" ||\n\t\t// See TestIsErrorSubnetSpecific for examples of why\n\t\t// we look for substrings in code/message instead of\n\t\t// only using specific codes here.\n\t\t(strings.Contains(code, \"InvalidParameter\") &&\n\t\t\treSubnetSpecificInvalidParameterMessage.MatchString(aerr.ErrorMessage()))\n}\n\n// isErrorCapacity determines whether the given error indicates lack\n// of capacity -- either temporary or permanent -- to run a specific\n// instance type (i.e., retrying with any other instance type might\n// succeed) or an instance quota group (i.e., retrying with an\n// instance type in a different instance quota group might succeed).\nfunc isErrorCapacity(err error) (instcap bool, groupcap bool) {\n\tvar aerr smithy.APIError\n\tif !errors.As(err, &aerr) {\n\t\treturn false, false\n\t}\n\tcode := aerr.ErrorCode()\n\tif code == \"VcpuLimitExceeded\" {\n\t\treturn false, true\n\t}\n\tif code == \"InsufficientInstanceCapacity\" ||\n\t\t(code == \"Unsupported\" && strings.Contains(aerr.ErrorMessage(), \"requested instance type\")) {\n\t\treturn true, false\n\t}\n\treturn false, false\n}\n\ntype ec2QuotaError struct {\n\terror\n}\n\nfunc (er *ec2QuotaError) IsQuotaError() bool {\n\treturn true\n}\n\nfunc isThrottleError(err error) bool {\n\tvar aerr smithy.APIError\n\tif !errors.As(err, &aerr) {\n\t\treturn false\n\t}\n\t_, is := retry.DefaultThrottleErrorCodes[aerr.ErrorCode()]\n\treturn is\n}\n\nfunc wrapError(err error, throttleValue *atomic.Value) error {\n\tif isThrottleError(err) {\n\t\t// Back off exponentially until an upstream call\n\t\t// either succeeds or returns a non-throttle error.\n\t\td, _ := throttleValue.Load().(time.Duration)\n\t\td = d*3/2 + time.Second\n\t\tif d < throttleDelayMin {\n\t\t\td = throttleDelayMin\n\t\t} else if d > throttleDelayMax {\n\t\t\td = throttleDelayMax\n\t\t}\n\t\tthrottleValue.Store(d)\n\t\treturn rateLimitError{error: err, earliestRetry: time.Now().Add(d)}\n\t} else if isErrorQuota(err) {\n\t\treturn &ec2QuotaError{error: err}\n\t} else if instcap, groupcap := isErrorCapacity(err); instcap || groupcap {\n\t\treturn &capacityError{\n\t\t\terror:                        err,\n\t\t\tisInstanceTypeSpecific:       !groupcap,\n\t\t\tisInstanceQuotaGroupSpecific: groupcap,\n\t\t}\n\t} else if err != nil {\n\t\tthrottleValue.Store(time.Duration(0))\n\t\treturn err\n\t}\n\tthrottleValue.Store(time.Duration(0))\n\treturn nil\n}\n\nvar boolLabelValue = map[bool]string{false: \"0\", true: \"1\"}\n"
  },
  {
    "path": "lib/cloud/ec2/ec2_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n//\n//\n// How to manually run individual tests against the real cloud:\n//\n// $ go test -v git.arvados.org/arvados.git/lib/cloud/ec2 -live-ec2-cfg ec2config.yml -check.f=TestCreate\n//\n// Tests should be run individually and in the order they are listed in the file:\n//\n// Example ec2config.yml:\n//\n// ImageIDForTestSuite: ami-xxxxxxxxxxxxxxxxx\n// DriverParameters:\n//       AccessKeyID: XXXXXXXXXXXXXX\n//       SecretAccessKey: xxxxxxxxxxxxxxxxxxxx\n//       Region: us-east-1\n//       SecurityGroupIDs: [sg-xxxxxxxx]\n//       SubnetID: subnet-xxxxxxxx\n//       AdminUsername: crunch\n\npackage ec2\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"os/exec\"\n\t\"regexp\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\tlibconfig \"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/test\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/ec2\"\n\t\"github.com/aws/aws-sdk-go-v2/service/ec2/types\"\n\t\"github.com/aws/smithy-go\"\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar live = flag.String(\"live-ec2-cfg\", \"\", \"Test with real EC2 API, provide config file\")\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\ntype sliceOrStringSuite struct{}\n\nvar _ = check.Suite(&sliceOrStringSuite{})\n\nfunc (s *sliceOrStringSuite) TestUnmarshal(c *check.C) {\n\tvar conf ec2InstanceSetConfig\n\tfor _, trial := range []struct {\n\t\tinput  string\n\t\toutput sliceOrSingleString\n\t}{\n\t\t{``, nil},\n\t\t{`\"\"`, nil},\n\t\t{`[]`, nil},\n\t\t{`\"foo\"`, sliceOrSingleString{\"foo\"}},\n\t\t{`[\"foo\"]`, sliceOrSingleString{\"foo\"}},\n\t\t{`[foo]`, sliceOrSingleString{\"foo\"}},\n\t\t{`[\"foo\", \"bar\"]`, sliceOrSingleString{\"foo\", \"bar\"}},\n\t\t{`[foo-bar, baz]`, sliceOrSingleString{\"foo-bar\", \"baz\"}},\n\t} {\n\t\tc.Logf(\"trial: %+v\", trial)\n\t\terr := yaml.Unmarshal([]byte(\"SubnetID: \"+trial.input+\"\\n\"), &conf)\n\t\tif !c.Check(err, check.IsNil) {\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(conf.SubnetID, check.DeepEquals, trial.output)\n\t}\n}\n\ntype EC2InstanceSetSuite struct{}\n\nvar _ = check.Suite(&EC2InstanceSetSuite{})\n\ntype testConfig struct {\n\tImageIDForTestSuite string\n\tDriverParameters    json.RawMessage\n}\n\ntype ec2stub struct {\n\tc                     *check.C\n\treftime               time.Time\n\timportKeyPairCalls    []*ec2.ImportKeyPairInput\n\tdescribeKeyPairsCalls []*ec2.DescribeKeyPairsInput\n\trunInstancesCalls     []*ec2.RunInstancesInput\n\t// {subnetID => error}: RunInstances returns error if subnetID\n\t// matches.\n\tsubnetErrorOnRunInstances map[string]error\n}\n\nfunc (e *ec2stub) ImportKeyPair(ctx context.Context, input *ec2.ImportKeyPairInput, _ ...func(*ec2.Options)) (*ec2.ImportKeyPairOutput, error) {\n\te.importKeyPairCalls = append(e.importKeyPairCalls, input)\n\treturn nil, nil\n}\n\nfunc (e *ec2stub) DescribeKeyPairs(ctx context.Context, input *ec2.DescribeKeyPairsInput, _ ...func(*ec2.Options)) (*ec2.DescribeKeyPairsOutput, error) {\n\te.describeKeyPairsCalls = append(e.describeKeyPairsCalls, input)\n\treturn &ec2.DescribeKeyPairsOutput{}, nil\n}\n\nfunc (e *ec2stub) RunInstances(ctx context.Context, input *ec2.RunInstancesInput, _ ...func(*ec2.Options)) (*ec2.RunInstancesOutput, error) {\n\te.runInstancesCalls = append(e.runInstancesCalls, input)\n\tif len(input.NetworkInterfaces) > 0 && input.NetworkInterfaces[0].SubnetId != nil {\n\t\terr := e.subnetErrorOnRunInstances[*input.NetworkInterfaces[0].SubnetId]\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &ec2.RunInstancesOutput{Instances: []types.Instance{{\n\t\tInstanceId:   aws.String(\"i-123\"),\n\t\tInstanceType: types.InstanceTypeT2Micro,\n\t\tTags:         input.TagSpecifications[0].Tags,\n\t}}}, nil\n}\n\nfunc (e *ec2stub) DescribeInstances(ctx context.Context, input *ec2.DescribeInstancesInput, _ ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) {\n\treturn &ec2.DescribeInstancesOutput{\n\t\tReservations: []types.Reservation{{\n\t\t\tInstances: []types.Instance{{\n\t\t\t\tInstanceId:        aws.String(\"i-123\"),\n\t\t\t\tInstanceLifecycle: types.InstanceLifecycleTypeSpot,\n\t\t\t\tInstanceType:      types.InstanceTypeT2Micro,\n\t\t\t\tPrivateIpAddress:  aws.String(\"10.1.2.3\"),\n\t\t\t\tState:             &types.InstanceState{Name: types.InstanceStateNameRunning, Code: aws.Int32(16)},\n\t\t\t}, {\n\t\t\t\tInstanceId:        aws.String(\"i-124\"),\n\t\t\t\tInstanceLifecycle: types.InstanceLifecycleTypeSpot,\n\t\t\t\tInstanceType:      types.InstanceTypeT2Micro,\n\t\t\t\tPrivateIpAddress:  aws.String(\"10.1.2.4\"),\n\t\t\t\tState:             &types.InstanceState{Name: types.InstanceStateNameRunning, Code: aws.Int32(16)},\n\t\t\t}},\n\t\t}},\n\t}, nil\n}\n\nfunc (e *ec2stub) DescribeInstanceStatus(ctx context.Context, input *ec2.DescribeInstanceStatusInput, _ ...func(*ec2.Options)) (*ec2.DescribeInstanceStatusOutput, error) {\n\treturn &ec2.DescribeInstanceStatusOutput{\n\t\tInstanceStatuses: []types.InstanceStatus{{\n\t\t\tInstanceId:       aws.String(\"i-123\"),\n\t\t\tAvailabilityZone: aws.String(\"aa-east-1a\"),\n\t\t}, {\n\t\t\tInstanceId:       aws.String(\"i-124\"),\n\t\t\tAvailabilityZone: aws.String(\"aa-east-1a\"),\n\t\t}},\n\t}, nil\n}\n\nfunc (e *ec2stub) DescribeSpotPriceHistory(ctx context.Context, input *ec2.DescribeSpotPriceHistoryInput, _ ...func(*ec2.Options)) (*ec2.DescribeSpotPriceHistoryOutput, error) {\n\tif input.NextToken == nil || *input.NextToken == \"\" {\n\t\treturn &ec2.DescribeSpotPriceHistoryOutput{\n\t\t\tSpotPriceHistory: []types.SpotPrice{\n\t\t\t\ttypes.SpotPrice{\n\t\t\t\t\tInstanceType:     types.InstanceTypeT2Micro,\n\t\t\t\t\tAvailabilityZone: aws.String(\"aa-east-1a\"),\n\t\t\t\t\tSpotPrice:        aws.String(\"0.005\"),\n\t\t\t\t\tTimestamp:        aws.Time(e.reftime.Add(-9 * time.Minute)),\n\t\t\t\t},\n\t\t\t\ttypes.SpotPrice{\n\t\t\t\t\tInstanceType:     types.InstanceTypeT2Micro,\n\t\t\t\t\tAvailabilityZone: aws.String(\"aa-east-1a\"),\n\t\t\t\t\tSpotPrice:        aws.String(\"0.015\"),\n\t\t\t\t\tTimestamp:        aws.Time(e.reftime.Add(-5 * time.Minute)),\n\t\t\t\t},\n\t\t\t},\n\t\t\tNextToken: aws.String(\"stubnexttoken\"),\n\t\t}, nil\n\t} else {\n\t\treturn &ec2.DescribeSpotPriceHistoryOutput{\n\t\t\tSpotPriceHistory: []types.SpotPrice{\n\t\t\t\ttypes.SpotPrice{\n\t\t\t\t\tInstanceType:     types.InstanceTypeT2Micro,\n\t\t\t\t\tAvailabilityZone: aws.String(\"aa-east-1a\"),\n\t\t\t\t\tSpotPrice:        aws.String(\"0.01\"),\n\t\t\t\t\tTimestamp:        aws.Time(e.reftime.Add(-2 * time.Minute)),\n\t\t\t\t},\n\t\t\t},\n\t\t\tNextToken: aws.String(\"\"), // see bug #22400\n\t\t}, nil\n\t}\n}\n\nfunc (e *ec2stub) CreateTags(ctx context.Context, input *ec2.CreateTagsInput, _ ...func(*ec2.Options)) (*ec2.CreateTagsOutput, error) {\n\treturn nil, nil\n}\n\nfunc (e *ec2stub) TerminateInstances(ctx context.Context, input *ec2.TerminateInstancesInput, _ ...func(*ec2.Options)) (*ec2.TerminateInstancesOutput, error) {\n\treturn nil, nil\n}\n\ntype ec2stubError = smithy.GenericAPIError\n\n// Ensure ec2stubError satisfies the smithy.APIError interface\nvar _ = smithy.APIError(&ec2stubError{})\n\nfunc GetInstanceSet(c *check.C, conf string) (*ec2InstanceSet, cloud.ImageID, arvados.Cluster, *prometheus.Registry) {\n\treg := prometheus.NewRegistry()\n\tcluster := arvados.Cluster{\n\t\tInstanceTypes: arvados.InstanceTypeMap(map[string]arvados.InstanceType{\n\t\t\t\"tiny\": {\n\t\t\t\tName:         \"tiny\",\n\t\t\t\tProviderType: \"t2.micro\",\n\t\t\t\tVCPUs:        1,\n\t\t\t\tRAM:          4000000000,\n\t\t\t\tScratch:      10000000000,\n\t\t\t\tPrice:        .02,\n\t\t\t\tPreemptible:  false,\n\t\t\t},\n\t\t\t\"tiny-with-extra-scratch\": {\n\t\t\t\tName:         \"tiny-with-extra-scratch\",\n\t\t\t\tProviderType: \"t2.micro\",\n\t\t\t\tVCPUs:        1,\n\t\t\t\tRAM:          4000000000,\n\t\t\t\tPrice:        .02,\n\t\t\t\tPreemptible:  false,\n\t\t\t\tAddedScratch: 20000000000,\n\t\t\t},\n\t\t\t\"tiny-preemptible\": {\n\t\t\t\tName:         \"tiny-preemptible\",\n\t\t\t\tProviderType: \"t2.micro\",\n\t\t\t\tVCPUs:        1,\n\t\t\t\tRAM:          4000000000,\n\t\t\t\tScratch:      10000000000,\n\t\t\t\tPrice:        .02,\n\t\t\t\tPreemptible:  true,\n\t\t\t},\n\t\t})}\n\tif *live != \"\" {\n\t\tvar exampleCfg testConfig\n\t\terr := config.LoadFile(&exampleCfg, *live)\n\t\tc.Assert(err, check.IsNil)\n\n\t\tis, err := newEC2InstanceSet(exampleCfg.DriverParameters, \"test123\", nil, logrus.StandardLogger(), reg)\n\t\tc.Assert(err, check.IsNil)\n\t\treturn is.(*ec2InstanceSet), cloud.ImageID(exampleCfg.ImageIDForTestSuite), cluster, reg\n\t} else {\n\t\tis, err := newEC2InstanceSet(json.RawMessage(conf), \"test123\", nil, ctxlog.TestLogger(c), reg)\n\t\tc.Assert(err, check.IsNil)\n\t\tis.(*ec2InstanceSet).client = &ec2stub{c: c, reftime: time.Now().UTC()}\n\t\treturn is.(*ec2InstanceSet), cloud.ImageID(\"blob\"), cluster, reg\n\t}\n}\n\nfunc (*EC2InstanceSetSuite) TestCreate(c *check.C) {\n\tap, img, cluster, _ := GetInstanceSet(c, \"{}\")\n\tpk, _ := test.LoadTestKey(c, \"../../dispatchcloud/test/sshkey_dispatch\")\n\n\tinst, err := ap.Create(cluster.InstanceTypes[\"tiny\"],\n\t\timg, map[string]string{\n\t\t\t\"TestTagName\": \"test tag value\",\n\t\t}, \"umask 0600; echo -n test-file-data >/var/run/test-file\", pk)\n\tc.Assert(err, check.IsNil)\n\n\ttags := inst.Tags()\n\tc.Check(tags[\"TestTagName\"], check.Equals, \"test tag value\")\n\tc.Logf(\"inst.String()=%v Address()=%v Tags()=%v\", inst.String(), inst.Address(), tags)\n\n\tif *live == \"\" {\n\t\tc.Check(ap.client.(*ec2stub).describeKeyPairsCalls, check.HasLen, 1)\n\t\tc.Check(ap.client.(*ec2stub).importKeyPairCalls, check.HasLen, 1)\n\n\t\truncalls := ap.client.(*ec2stub).runInstancesCalls\n\t\tif c.Check(runcalls, check.HasLen, 1) {\n\t\t\tc.Check(runcalls[0].MetadataOptions.HttpEndpoint, check.DeepEquals, types.InstanceMetadataEndpointStateEnabled)\n\t\t\tc.Check(runcalls[0].MetadataOptions.HttpTokens, check.DeepEquals, types.HttpTokensStateRequired)\n\t\t}\n\t}\n}\n\nfunc (*EC2InstanceSetSuite) TestCreateWithExtraScratch(c *check.C) {\n\tap, img, cluster, _ := GetInstanceSet(c, \"{}\")\n\tinst, err := ap.Create(cluster.InstanceTypes[\"tiny-with-extra-scratch\"],\n\t\timg, map[string]string{\n\t\t\t\"TestTagName\": \"test tag value\",\n\t\t}, \"umask 0600; echo -n test-file-data >/var/run/test-file\", nil)\n\n\tc.Assert(err, check.IsNil)\n\n\ttags := inst.Tags()\n\tc.Check(tags[\"TestTagName\"], check.Equals, \"test tag value\")\n\tc.Logf(\"inst.String()=%v Address()=%v Tags()=%v\", inst.String(), inst.Address(), tags)\n\n\tif *live == \"\" {\n\t\t// Should not have called key pair APIs, because\n\t\t// publickey arg was nil\n\t\tc.Check(ap.client.(*ec2stub).describeKeyPairsCalls, check.HasLen, 0)\n\t\tc.Check(ap.client.(*ec2stub).importKeyPairCalls, check.HasLen, 0)\n\t}\n}\n\nfunc (*EC2InstanceSetSuite) TestCreatePreemptible(c *check.C) {\n\tap, img, cluster, _ := GetInstanceSet(c, \"{}\")\n\tpk, _ := test.LoadTestKey(c, \"../../dispatchcloud/test/sshkey_dispatch\")\n\n\tinst, err := ap.Create(cluster.InstanceTypes[\"tiny-preemptible\"],\n\t\timg, map[string]string{\n\t\t\t\"TestTagName\": \"test tag value\",\n\t\t}, \"umask 0600; echo -n test-file-data >/var/run/test-file\", pk)\n\n\tc.Assert(err, check.IsNil)\n\n\ttags := inst.Tags()\n\tc.Check(tags[\"TestTagName\"], check.Equals, \"test tag value\")\n\tc.Logf(\"inst.String()=%v Address()=%v Tags()=%v\", inst.String(), inst.Address(), tags)\n\n}\n\nfunc (*EC2InstanceSetSuite) TestCreateFailoverSecondSubnet(c *check.C) {\n\tif *live != \"\" {\n\t\tc.Skip(\"not applicable in live mode\")\n\t\treturn\n\t}\n\n\tap, img, cluster, reg := GetInstanceSet(c, `{\"SubnetID\":[\"subnet-full\",\"subnet-good\"]}`)\n\tap.client.(*ec2stub).subnetErrorOnRunInstances = map[string]error{\n\t\t\"subnet-full\": &ec2stubError{\n\t\t\tCode:    \"InsufficientFreeAddressesInSubnet\",\n\t\t\tMessage: \"subnet is full\",\n\t\t},\n\t}\n\tinst, err := ap.Create(cluster.InstanceTypes[\"tiny\"], img, nil, \"\", nil)\n\tc.Check(err, check.IsNil)\n\tc.Check(inst, check.NotNil)\n\tc.Check(ap.client.(*ec2stub).runInstancesCalls, check.HasLen, 2)\n\tmetrics := arvadostest.GatherMetricsAsString(reg)\n\tc.Check(metrics, check.Matches, `(?ms).*`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-full\",success=\"0\"} 1\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-full\",success=\"1\"} 0\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-good\",success=\"0\"} 0\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-good\",success=\"1\"} 1\\n`+\n\t\t`.*`)\n\n\t// Next RunInstances call should try the working subnet first\n\tinst, err = ap.Create(cluster.InstanceTypes[\"tiny\"], img, nil, \"\", nil)\n\tc.Check(err, check.IsNil)\n\tc.Check(inst, check.NotNil)\n\tc.Check(ap.client.(*ec2stub).runInstancesCalls, check.HasLen, 3)\n\tmetrics = arvadostest.GatherMetricsAsString(reg)\n\tc.Check(metrics, check.Matches, `(?ms).*`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-full\",success=\"0\"} 1\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-full\",success=\"1\"} 0\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-good\",success=\"0\"} 0\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-good\",success=\"1\"} 2\\n`+\n\t\t`.*`)\n}\n\nfunc (*EC2InstanceSetSuite) TestIsErrorSubnetSpecific(c *check.C) {\n\tc.Check(isErrorSubnetSpecific(nil), check.Equals, false)\n\tc.Check(isErrorSubnetSpecific(errors.New(\"misc error\")), check.Equals, false)\n\n\tc.Check(isErrorSubnetSpecific(&ec2stubError{\n\t\tCode: \"InsufficientInstanceCapacity\",\n\t}), check.Equals, true)\n\n\tc.Check(isErrorSubnetSpecific(&ec2stubError{\n\t\tCode: \"InsufficientVolumeCapacity\",\n\t}), check.Equals, true)\n\n\tc.Check(isErrorSubnetSpecific(&ec2stubError{\n\t\tCode:    \"InsufficientFreeAddressesInSubnet\",\n\t\tMessage: \"Not enough free addresses in subnet subnet-abcdefg\\n\\tstatus code: 400, request id: abcdef01-2345-6789-abcd-ef0123456789\",\n\t}), check.Equals, true)\n\n\t// #21603: (Sometimes?) EC2 returns code InvalidParameterValue\n\t// even though the code \"InsufficientFreeAddressesInSubnet\"\n\t// seems like it must be meant for exactly this error.\n\tc.Check(isErrorSubnetSpecific(&ec2stubError{\n\t\tCode:    \"InvalidParameterValue\",\n\t\tMessage: \"Not enough free addresses in subnet subnet-abcdefg\\n\\tstatus code: 400, request id: abcdef01-2345-6789-abcd-ef0123456789\",\n\t}), check.Equals, true)\n\n\t// Similarly, AWS docs\n\t// (https://repost.aws/knowledge-center/vpc-insufficient-ip-errors)\n\t// suggest the following code/message combinations also exist.\n\tc.Check(isErrorSubnetSpecific(&ec2stubError{\n\t\tCode:    \"Client.InvalidParameterValue\",\n\t\tMessage: \"There aren't sufficient free Ipv4 addresses or prefixes\",\n\t}), check.Equals, true)\n\tc.Check(isErrorSubnetSpecific(&ec2stubError{\n\t\tCode:    \"InvalidParameterValue\",\n\t\tMessage: \"There aren't sufficient free Ipv4 addresses or prefixes\",\n\t}), check.Equals, true)\n\t// Meanwhile, other AWS docs\n\t// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html)\n\t// suggest Client.InvalidParameterValue is not a real code but\n\t// ClientInvalidParameterValue is.\n\tc.Check(isErrorSubnetSpecific(&ec2stubError{\n\t\tCode:    \"ClientInvalidParameterValue\",\n\t\tMessage: \"There aren't sufficient free Ipv4 addresses or prefixes\",\n\t}), check.Equals, true)\n\n\tc.Check(isErrorSubnetSpecific(&ec2stubError{\n\t\tCode:    \"InvalidParameterValue\",\n\t\tMessage: \"Some other invalid parameter error\",\n\t}), check.Equals, false)\n}\n\nfunc (*EC2InstanceSetSuite) TestCreateAllSubnetsFailing(c *check.C) {\n\tif *live != \"\" {\n\t\tc.Skip(\"not applicable in live mode\")\n\t\treturn\n\t}\n\n\tap, img, cluster, reg := GetInstanceSet(c, `{\"SubnetID\":[\"subnet-full\",\"subnet-broken\"]}`)\n\tap.client.(*ec2stub).subnetErrorOnRunInstances = map[string]error{\n\t\t\"subnet-full\": &ec2stubError{\n\t\t\tCode:    \"InsufficientFreeAddressesInSubnet\",\n\t\t\tMessage: \"subnet is full\",\n\t\t},\n\t\t\"subnet-broken\": &ec2stubError{\n\t\t\tCode:    \"InvalidSubnetId.NotFound\",\n\t\t\tMessage: \"bogus subnet id\",\n\t\t},\n\t}\n\t_, err := ap.Create(cluster.InstanceTypes[\"tiny\"], img, nil, \"\", nil)\n\tc.Check(err, check.NotNil)\n\tc.Check(err, check.ErrorMatches, `.*InvalidSubnetId\\.NotFound.*`)\n\tc.Check(ap.client.(*ec2stub).runInstancesCalls, check.HasLen, 2)\n\tmetrics := arvadostest.GatherMetricsAsString(reg)\n\tc.Check(metrics, check.Matches, `(?ms).*`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-broken\",success=\"0\"} 1\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-broken\",success=\"1\"} 0\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-full\",success=\"0\"} 1\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-full\",success=\"1\"} 0\\n`+\n\t\t`.*`)\n\n\t_, err = ap.Create(cluster.InstanceTypes[\"tiny\"], img, nil, \"\", nil)\n\tc.Check(err, check.NotNil)\n\tc.Check(err, check.ErrorMatches, `.*InsufficientFreeAddressesInSubnet.*`)\n\tc.Check(ap.client.(*ec2stub).runInstancesCalls, check.HasLen, 4)\n\tmetrics = arvadostest.GatherMetricsAsString(reg)\n\tc.Check(metrics, check.Matches, `(?ms).*`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-broken\",success=\"0\"} 2\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-broken\",success=\"1\"} 0\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-full\",success=\"0\"} 2\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-full\",success=\"1\"} 0\\n`+\n\t\t`.*`)\n}\n\nfunc (*EC2InstanceSetSuite) TestCreateOneSubnetFailingCapacity(c *check.C) {\n\tif *live != \"\" {\n\t\tc.Skip(\"not applicable in live mode\")\n\t\treturn\n\t}\n\tap, img, cluster, reg := GetInstanceSet(c, `{\"SubnetID\":[\"subnet-full\",\"subnet-broken\"]}`)\n\tap.client.(*ec2stub).subnetErrorOnRunInstances = map[string]error{\n\t\t\"subnet-full\": &ec2stubError{\n\t\t\tCode:    \"InsufficientFreeAddressesInSubnet\",\n\t\t\tMessage: \"subnet is full\",\n\t\t},\n\t\t\"subnet-broken\": &ec2stubError{\n\t\t\tCode:    \"InsufficientInstanceCapacity\",\n\t\t\tMessage: \"insufficient capacity\",\n\t\t},\n\t}\n\tfor i := 0; i < 3; i++ {\n\t\t_, err := ap.Create(cluster.InstanceTypes[\"tiny\"], img, nil, \"\", nil)\n\t\tc.Check(err, check.NotNil)\n\t\tc.Check(err, check.ErrorMatches, `.*InsufficientInstanceCapacity.*`)\n\t}\n\tc.Check(ap.client.(*ec2stub).runInstancesCalls, check.HasLen, 6)\n\tmetrics := arvadostest.GatherMetricsAsString(reg)\n\tc.Check(metrics, check.Matches, `(?ms).*`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-broken\",success=\"0\"} 3\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-broken\",success=\"1\"} 0\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-full\",success=\"0\"} 3\\n`+\n\t\t`arvados_dispatchcloud_ec2_instance_starts_total{subnet_id=\"subnet-full\",success=\"1\"} 0\\n`+\n\t\t`.*`)\n}\n\nfunc (*EC2InstanceSetSuite) TestTagInstances(c *check.C) {\n\tap, _, _, _ := GetInstanceSet(c, \"{}\")\n\tl, err := ap.Instances(nil)\n\tc.Assert(err, check.IsNil)\n\n\tfor _, i := range l {\n\t\ttg := i.Tags()\n\t\ttg[\"TestTag2\"] = \"123 test tag 2\"\n\t\tc.Check(i.SetTags(tg), check.IsNil)\n\t}\n}\n\nfunc (*EC2InstanceSetSuite) TestListInstances(c *check.C) {\n\tap, _, _, reg := GetInstanceSet(c, \"{}\")\n\tl, err := ap.Instances(nil)\n\tc.Assert(err, check.IsNil)\n\n\tfor _, i := range l {\n\t\ttg := i.Tags()\n\t\tc.Logf(\"%v %v %v\", i.String(), i.Address(), tg)\n\t}\n\n\tmetrics := arvadostest.GatherMetricsAsString(reg)\n\tc.Check(metrics, check.Matches, `(?ms).*`+\n\t\t`arvados_dispatchcloud_ec2_instances{subnet_id=\"[^\"]*\"} \\d+\\n`+\n\t\t`.*`)\n}\n\nfunc (*EC2InstanceSetSuite) TestDestroyInstances(c *check.C) {\n\tap, _, _, _ := GetInstanceSet(c, \"{}\")\n\tl, err := ap.Instances(nil)\n\tc.Assert(err, check.IsNil)\n\n\tfor _, i := range l {\n\t\tc.Check(i.Destroy(), check.IsNil)\n\t}\n}\n\nfunc (*EC2InstanceSetSuite) TestInstancePriceHistory(c *check.C) {\n\tap, img, cluster, _ := GetInstanceSet(c, \"{}\")\n\tpk, _ := test.LoadTestKey(c, \"../../dispatchcloud/test/sshkey_dispatch\")\n\ttags := cloud.InstanceTags{\"arvados-ec2-driver\": \"test\"}\n\n\tdefer func() {\n\t\tinstances, err := ap.Instances(tags)\n\t\tc.Assert(err, check.IsNil)\n\t\tfor _, inst := range instances {\n\t\t\tc.Logf(\"cleanup: destroy instance %s\", inst)\n\t\t\tc.Check(inst.Destroy(), check.IsNil)\n\t\t}\n\t}()\n\n\tap.ec2config.SpotPriceUpdateInterval = arvados.Duration(time.Hour)\n\tap.ec2config.EBSPrice = 0.1 // $/GiB/month\n\tinst1, err := ap.Create(cluster.InstanceTypes[\"tiny-preemptible\"], img, tags, \"true\", pk)\n\tc.Assert(err, check.IsNil)\n\tdefer inst1.Destroy()\n\tinst2, err := ap.Create(cluster.InstanceTypes[\"tiny-preemptible\"], img, tags, \"true\", pk)\n\tc.Assert(err, check.IsNil)\n\tdefer inst2.Destroy()\n\n\t// in live mode, we need to wait for the instances to reach\n\t// running state before we can discover their availability\n\t// zones and look up the appropriate prices.\n\tvar instances []cloud.Instance\n\tfor deadline := time.Now().Add(5 * time.Minute); ; {\n\t\tif deadline.Before(time.Now()) {\n\t\t\tc.Fatal(\"timed out\")\n\t\t}\n\t\tinstances, err = ap.Instances(tags)\n\t\trunning := 0\n\t\tfor _, inst := range instances {\n\t\t\tec2i := inst.(*ec2Instance).instance\n\t\t\tif ec2i.InstanceLifecycle == types.InstanceLifecycleTypeSpot && *ec2i.State.Code&16 != 0 {\n\t\t\t\trunning++\n\t\t\t}\n\t\t}\n\t\tif running >= 2 {\n\t\t\tc.Logf(\"instances are running, and identifiable as spot instances\")\n\t\t\tbreak\n\t\t}\n\t\tc.Logf(\"waiting for instances to reach running state so their availability zone becomes visible...\")\n\t\ttime.Sleep(10 * time.Second)\n\t}\n\n\tfor _, inst := range instances {\n\t\thist := inst.PriceHistory(arvados.InstanceType{})\n\t\tc.Logf(\"%s price history: %v\", inst.ID(), hist)\n\t\tc.Check(len(hist) > 0, check.Equals, true)\n\n\t\thistWithScratch := inst.PriceHistory(arvados.InstanceType{AddedScratch: 640 << 30})\n\t\tc.Logf(\"%s price history with 640 GiB scratch: %v\", inst.ID(), histWithScratch)\n\n\t\tfor i, ip := range hist {\n\t\t\tc.Check(ip.Price, check.Not(check.Equals), 0.0)\n\t\t\tif i > 0 {\n\t\t\t\tc.Check(ip.StartTime.Before(hist[i-1].StartTime), check.Equals, true)\n\t\t\t}\n\t\t\tc.Check(ip.Price < histWithScratch[i].Price, check.Equals, true)\n\t\t}\n\t}\n}\n\nfunc (*EC2InstanceSetSuite) TestWrapError(c *check.C) {\n\tretryError := &ec2stubError{Code: \"Throttling\"}\n\twrapped := wrapError(retryError, &atomic.Value{})\n\t_, ok := wrapped.(cloud.RateLimitError)\n\tc.Check(ok, check.Equals, true)\n\n\tquotaError := &ec2stubError{Code: \"InstanceLimitExceeded\"}\n\twrapped = wrapError(quotaError, nil)\n\t_, ok = wrapped.(cloud.QuotaError)\n\tc.Check(ok, check.Equals, true)\n\n\tfor _, trial := range []struct {\n\t\tcode               string\n\t\tmsg                string\n\t\ttypeSpecific       bool\n\t\tquotaGroupSpecific bool\n\t}{\n\t\t{\n\t\t\tcode:               \"InsufficientInstanceCapacity\",\n\t\t\tmsg:                \"\",\n\t\t\ttypeSpecific:       true,\n\t\t\tquotaGroupSpecific: false,\n\t\t},\n\t\t{\n\t\t\tcode:               \"Unsupported\",\n\t\t\tmsg:                \"Your requested instance type (t3.micro) is not supported in your requested Availability Zone (us-east-1e). Please retry your request by not specifying an Availability Zone or choosing us-east-1a, us-east-1b, us-east-1c, us-east-1d, us-east-1f.\",\n\t\t\ttypeSpecific:       true,\n\t\t\tquotaGroupSpecific: false,\n\t\t},\n\t\t{\n\t\t\tcode:               \"VcpuLimitExceeded\",\n\t\t\tmsg:                \"You have requested more vCPU capacity than your current vCPU limit of 64 allows for the instance bucket that the specified instance type belongs to. Please visit http://aws.amazon.com/contact-us/ec2-request to request an adjustment to this limit.\",\n\t\t\ttypeSpecific:       false,\n\t\t\tquotaGroupSpecific: true,\n\t\t},\n\t} {\n\t\tcapacityError := &ec2stubError{Code: trial.code, Message: trial.msg}\n\t\twrapped = wrapError(capacityError, nil)\n\t\tcaperr, ok := wrapped.(cloud.CapacityError)\n\t\tc.Check(ok, check.Equals, true)\n\t\tc.Check(caperr.IsCapacityError(), check.Equals, true)\n\t\tc.Check(caperr.IsInstanceTypeSpecific(), check.Equals, trial.typeSpecific)\n\t\tc.Check(caperr.IsInstanceQuotaGroupSpecific(), check.Equals, trial.quotaGroupSpecific)\n\t}\n}\n\nfunc (*EC2InstanceSetSuite) TestInstanceQuotaGroup(c *check.C) {\n\tap, _, _, _ := GetInstanceSet(c, `{\n  \"InstanceTypeQuotaGroups\": {\n    \"a\": \"standard\",\n    \"m\": \"standard\",\n    \"t\": \"standard\",\n    \"p5\": \"p5\"\n  }\n}`)\n\n\tfor _, trial := range []struct {\n\t\tptype      string\n\t\tspot       bool\n\t\tquotaGroup cloud.InstanceQuotaGroup\n\t}{\n\t\t{ptype: \"g1.large\", quotaGroup: \"g\"},\n\t\t{ptype: \"x1.large\", quotaGroup: \"x\"},\n\t\t{ptype: \"inf1.2xlarge\", quotaGroup: \"inf\"},\n\t\t{ptype: \"a1.small\", quotaGroup: \"standard\"},\n\t\t{ptype: \"m1.xlarge\", quotaGroup: \"standard\"},\n\t\t{ptype: \"m1.xlarge\", spot: true, quotaGroup: \"standard-spot\"},\n\t\t{ptype: \"p4.xlarge\", spot: true, quotaGroup: \"p-spot\"},\n\t\t{ptype: \"p5.xlarge\", spot: true, quotaGroup: \"p5-spot\"},\n\t\t{ptype: \"t3.2xlarge\", quotaGroup: \"standard\"},\n\t\t{ptype: \"trn1.2xlarge\", quotaGroup: \"trn\"},\n\t\t{ptype: \"trn1.2xlarge\", spot: true, quotaGroup: \"trn-spot\"},\n\t\t{ptype: \"imaginary9.5xlarge\", quotaGroup: \"imaginary\"},\n\t\t{ptype: \"\", quotaGroup: \"\"},\n\t} {\n\t\tc.Check(ap.InstanceQuotaGroup(arvados.InstanceType{\n\t\t\tProviderType: trial.ptype,\n\t\t\tPreemptible:  trial.spot,\n\t\t}), check.Equals, trial.quotaGroup)\n\t}\n}\n\nfunc (*EC2InstanceSetSuite) TestAWSKeyFingerprints(c *check.C) {\n\tfor _, keytype := range []string{\"rsa\", \"ed25519\"} {\n\t\ttmpdir := c.MkDir()\n\t\tbuf, err := exec.Command(\"ssh-keygen\", \"-f\", tmpdir+\"/key\", \"-N\", \"\", \"-t\", keytype).CombinedOutput()\n\t\tc.Assert(err, check.IsNil, check.Commentf(\"ssh-keygen: %s\", buf))\n\t\tvar expectfps []string\n\t\tswitch keytype {\n\t\tcase \"rsa\":\n\t\t\tfor _, hash := range []string{\"md5\", \"sha1\"} {\n\t\t\t\tcmd := exec.Command(\"bash\", \"-c\", \"set -e -o pipefail; ssh-keygen -ef key -m PEM | openssl rsa -RSAPublicKey_in -outform DER | openssl \"+hash+\" -c\")\n\t\t\t\tcmd.Dir = tmpdir\n\t\t\t\tbuf, err := cmd.CombinedOutput()\n\t\t\t\tc.Assert(err, check.IsNil, check.Commentf(\"bash: %s\", buf))\n\t\t\t\texpectfps = append(expectfps, string(regexp.MustCompile(`[0-9a-f:]{20,}`).Find(buf)))\n\t\t\t}\n\t\tcase \"ed25519\":\n\t\t\tbuf, err := exec.Command(\"ssh-keygen\", \"-l\", \"-f\", tmpdir+\"/key\").CombinedOutput()\n\t\t\tc.Assert(err, check.IsNil, check.Commentf(\"ssh-keygen: %s\", buf))\n\t\t\tsum := string(regexp.MustCompile(`SHA256:\\S+`).Find(buf))\n\t\t\texpectfps = []string{sum + \"=\", sum}\n\t\tdefault:\n\t\t\tc.Error(\"don't know how to test fingerprint for key type \" + keytype)\n\t\t\tcontinue\n\t\t}\n\t\tpk, err := libconfig.LoadSSHKey(\"file://\" + tmpdir + \"/key\")\n\t\tc.Assert(err, check.IsNil)\n\t\tfingerprints, err := awsKeyFingerprints(pk.PublicKey())\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(fingerprints, check.DeepEquals, expectfps)\n\t}\n}\n"
  },
  {
    "path": "lib/cloud/interfaces.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage cloud\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"io\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\n// A RateLimitError should be returned by an InstanceSet when the\n// cloud service indicates it is rejecting all API calls for some time\n// interval.\ntype RateLimitError interface {\n\t// Time before which the caller should expect requests to\n\t// fail.\n\tEarliestRetry() time.Time\n\terror\n}\n\n// A QuotaError should be returned by an InstanceSet when the cloud\n// service indicates the account cannot create more VMs than already\n// exist.\ntype QuotaError interface {\n\t// If true, don't create more instances until some existing\n\t// instances are destroyed. If false, don't handle the error\n\t// as a quota error.\n\tIsQuotaError() bool\n\terror\n}\n\n// A CapacityError should be returned by an InstanceSet's Create\n// method when the cloud service indicates it has insufficient\n// capacity to create new instances -- i.e., we shouldn't retry right\n// away.\ntype CapacityError interface {\n\t// If true, wait before trying to create more instances.\n\tIsCapacityError() bool\n\t// If true, the condition is specific to the requested\n\t// instance type.  Wait before trying to create more instances\n\t// of that same type.\n\tIsInstanceTypeSpecific() bool\n\t// If true, the condition affects all instance types in the\n\t// same instance family.  This implies\n\t// IsInstanceTypeSpecific() returns false.\n\tIsInstanceQuotaGroupSpecific() bool\n\terror\n}\n\ntype SharedResourceTags map[string]string\ntype InstanceSetID string\ntype InstanceTags map[string]string\ntype InstanceID string\ntype InstanceQuotaGroup string\ntype ImageID string\n\n// An Executor executes commands on an ExecutorTarget.\ntype Executor interface {\n\t// Update the set of private keys used to authenticate to\n\t// targets.\n\tSetSigners(...ssh.Signer)\n\n\t// Set the target used for subsequent command executions.\n\tSetTarget(ExecutorTarget)\n\n\t// Return the current target.\n\tTarget() ExecutorTarget\n\n\t// Execute a shell command and return the resulting stdout and\n\t// stderr. stdin can be nil.\n\tExecute(cmd string, stdin io.Reader) (stdout, stderr []byte, err error)\n}\n\nvar ErrNotImplemented = errors.New(\"not implemented\")\n\n// An ExecutorTarget is a remote command execution service.\ntype ExecutorTarget interface {\n\t// SSH server hostname or IP address with optional :port, or\n\t// empty string if unknown while instance is booting.\n\tAddress() string\n\n\t// Remote username to send during SSH authentication.\n\tRemoteUser() string\n\n\t// Return nil if the given public key matches the instance's\n\t// SSH server key. If the provided Dialer is not nil,\n\t// VerifyHostKey can use it to make outgoing network\n\t// connections from the instance -- e.g., to use the cloud's\n\t// \"this instance's metadata\" API.\n\t//\n\t// Return ErrNotImplemented if no verification mechanism is\n\t// available.\n\tVerifyHostKey(ssh.PublicKey, *ssh.Client) error\n}\n\n// Instance is implemented by the provider-specific instance types.\ntype Instance interface {\n\tExecutorTarget\n\n\t// ID returns the provider's instance ID. It must be stable\n\t// for the life of the instance.\n\tID() InstanceID\n\n\t// String typically returns the cloud-provided instance ID.\n\tString() string\n\n\t// Cloud provider's \"instance type\" ID. Matches a ProviderType\n\t// in the cluster's InstanceTypes configuration.\n\tProviderType() string\n\n\t// Get current tags\n\tTags() InstanceTags\n\n\t// Replace tags with the given tags\n\tSetTags(InstanceTags) error\n\n\t// Get recent price history, if available. The InstanceType is\n\t// supplied as an argument so the driver implementation can\n\t// account for AddedScratch cost without requesting the volume\n\t// attachment information from the provider's API.\n\tPriceHistory(arvados.InstanceType) []InstancePrice\n\n\t// Shut down the node\n\tDestroy() error\n}\n\n// An InstanceSet manages a set of VM instances created by an elastic\n// cloud provider like AWS, GCE, or Azure.\n//\n// All public methods of an InstanceSet, and all public methods of the\n// instances it returns, are goroutine safe.\ntype InstanceSet interface {\n\t// Create a new instance with the given type, image, and\n\t// initial set of tags. If supported by the driver, add the\n\t// provided public key to /root/.ssh/authorized_keys.\n\t//\n\t// The given InitCommand should be executed on the newly\n\t// created instance. This is optional for a driver whose\n\t// instances' VerifyHostKey() method never returns\n\t// ErrNotImplemented. InitCommand will be under 1 KiB.\n\t//\n\t// The returned error should implement RateLimitError and\n\t// QuotaError where applicable.\n\tCreate(arvados.InstanceType, ImageID, InstanceTags, InitCommand, ssh.PublicKey) (Instance, error)\n\n\t// Return all instances, including ones that are booting or\n\t// shutting down. Optionally, filter out nodes that don't have\n\t// all of the given InstanceTags (the caller will ignore these\n\t// anyway).\n\t//\n\t// An instance returned by successive calls to Instances() may\n\t// -- but does not need to -- be represented by the same\n\t// Instance object each time. Thus, the caller is responsible\n\t// for de-duplicating the returned instances by comparing the\n\t// InstanceIDs returned by the instances' ID() methods.\n\tInstances(InstanceTags) ([]Instance, error)\n\n\t// Return the instance quota group of the given instance type.\n\t// See (CapacityError)IsInstanceQuotaGroupSpecific().\n\t//\n\t// This should return different values for preemptible and\n\t// non-preemptible instance types, since they surely have\n\t// separate quotas.\n\tInstanceQuotaGroup(arvados.InstanceType) InstanceQuotaGroup\n\n\t// Stop any background tasks and release other resources.\n\tStop()\n}\n\ntype InstancePrice struct {\n\tStartTime time.Time\n\tPrice     float64\n}\n\ntype InitCommand string\n\n// A Driver returns an InstanceSet that uses the given InstanceSetID\n// and driver-dependent configuration parameters.\n//\n// If the driver creates cloud resources that aren't attached to a\n// single VM instance (like SSH key pairs on AWS) and support tagging,\n// they should be tagged with the provided SharedResourceTags.\n//\n// The supplied id will be of the form \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\"\n// where each z can be any alphanum. The returned InstanceSet must use\n// this id to tag long-lived cloud resources that it creates, and must\n// assume control of any existing resources that are tagged with the\n// same id. Tagging can be accomplished by including the ID in\n// resource names, using the cloud provider's tagging feature, or any\n// other mechanism. The tags must be visible to another instance of\n// the same driver running on a different host.\n//\n// The returned InstanceSet must not modify or delete cloud resources\n// unless they are tagged with the given InstanceSetID or the caller\n// (dispatcher) calls Destroy() on them. It may log a summary of\n// untagged resources once at startup, though. Thus, two identically\n// configured InstanceSets running on different hosts with different\n// ids should log about the existence of each other's resources at\n// startup, but will not interfere with each other.\n//\n// The dispatcher always passes the InstanceSetID as a tag when\n// calling Create() and Instances(), so the driver does not need to\n// tag/filter VMs by InstanceSetID itself.\n//\n// Example:\n//\n//\ttype exampleInstanceSet struct {\n//\t\townID     string\n//\t\tAccessKey string\n//\t}\n//\n//\ttype exampleDriver struct {}\n//\n//\tfunc (*exampleDriver) InstanceSet(config json.RawMessage, id cloud.InstanceSetID, tags cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (cloud.InstanceSet, error) {\n//\t\tvar is exampleInstanceSet\n//\t\tif err := json.Unmarshal(config, &is); err != nil {\n//\t\t\treturn nil, err\n//\t\t}\n//\t\tis.ownID = id\n//\t\treturn &is, nil\n//\t}\ntype Driver interface {\n\tInstanceSet(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (InstanceSet, error)\n}\n\n// DriverFunc makes a Driver using the provided function as its\n// InstanceSet method. This is similar to http.HandlerFunc.\nfunc DriverFunc(fn func(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (InstanceSet, error)) Driver {\n\treturn driverFunc(fn)\n}\n\ntype driverFunc func(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (InstanceSet, error)\n\nfunc (df driverFunc) InstanceSet(config json.RawMessage, id InstanceSetID, tags SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (InstanceSet, error) {\n\treturn df(config, id, tags, logger, reg)\n}\n"
  },
  {
    "path": "lib/cloud/loopback/loopback.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage loopback\n\nimport (\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"os/user\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/test\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\n// Driver is the loopback implementation of the cloud.Driver interface.\nvar Driver = cloud.DriverFunc(newInstanceSet)\n\nvar (\n\terrUnimplemented = errors.New(\"function not implemented by loopback driver\")\n\terrQuota         = quotaError(\"loopback driver is always at quota\")\n)\n\ntype quotaError string\n\nfunc (e quotaError) IsQuotaError() bool { return true }\nfunc (e quotaError) Error() string      { return string(e) }\n\ntype instanceSet struct {\n\tinstanceSetID cloud.InstanceSetID\n\tlogger        logrus.FieldLogger\n\tinstances     []*instance\n\tmtx           sync.Mutex\n}\n\nfunc newInstanceSet(config json.RawMessage, instanceSetID cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (cloud.InstanceSet, error) {\n\tis := &instanceSet{\n\t\tinstanceSetID: instanceSetID,\n\t\tlogger:        logger,\n\t}\n\treturn is, nil\n}\n\nfunc (is *instanceSet) Create(it arvados.InstanceType, _ cloud.ImageID, tags cloud.InstanceTags, _ cloud.InitCommand, pubkey ssh.PublicKey) (cloud.Instance, error) {\n\tis.mtx.Lock()\n\tdefer is.mtx.Unlock()\n\tif len(is.instances) > 0 {\n\t\treturn nil, errQuota\n\t}\n\t// A crunch-run process running in a previous instance may\n\t// have marked the node as broken. In the loopback scenario a\n\t// destroy+create cycle doesn't fix whatever was broken -- but\n\t// nothing else will either, so the best we can do is remove\n\t// the \"broken\" flag and try again.\n\tif err := os.Remove(\"/var/lock/crunch-run-broken\"); err == nil {\n\t\tis.logger.Info(\"removed /var/lock/crunch-run-broken\")\n\t} else if !errors.Is(err, os.ErrNotExist) {\n\t\treturn nil, err\n\t}\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thostRSAKey, err := rsa.GenerateKey(rand.Reader, 1024)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thostKey, err := ssh.NewSignerFromKey(hostRSAKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thostPubKey, err := ssh.NewPublicKey(hostRSAKey.Public())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinst := &instance{\n\t\tis:           is,\n\t\tinstanceType: it,\n\t\tadminUser:    u.Username,\n\t\ttags:         tags,\n\t\thostPubKey:   hostPubKey,\n\t\tsshService: test.SSHService{\n\t\t\tHostKey:        hostKey,\n\t\t\tAuthorizedUser: u.Username,\n\t\t\tAuthorizedKeys: []ssh.PublicKey{pubkey},\n\t\t},\n\t}\n\tinst.sshService.Exec = inst.sshExecFunc\n\tgo inst.sshService.Start()\n\tis.instances = []*instance{inst}\n\treturn inst, nil\n}\n\nfunc (is *instanceSet) Instances(cloud.InstanceTags) ([]cloud.Instance, error) {\n\tis.mtx.Lock()\n\tdefer is.mtx.Unlock()\n\tvar ret []cloud.Instance\n\tfor _, inst := range is.instances {\n\t\tret = append(ret, inst)\n\t}\n\treturn ret, nil\n}\n\nfunc (is *instanceSet) InstanceQuotaGroup(arvados.InstanceType) cloud.InstanceQuotaGroup {\n\treturn \"\"\n}\n\nfunc (is *instanceSet) Stop() {\n\tis.mtx.Lock()\n\tdefer is.mtx.Unlock()\n\tfor _, inst := range is.instances {\n\t\tinst.sshService.Close()\n\t}\n}\n\ntype instance struct {\n\tis           *instanceSet\n\tinstanceType arvados.InstanceType\n\tadminUser    string\n\ttags         cloud.InstanceTags\n\thostPubKey   ssh.PublicKey\n\tsshService   test.SSHService\n}\n\nfunc (i *instance) ID() cloud.InstanceID                                    { return cloud.InstanceID(i.instanceType.ProviderType) }\nfunc (i *instance) String() string                                          { return i.instanceType.ProviderType }\nfunc (i *instance) ProviderType() string                                    { return i.instanceType.ProviderType }\nfunc (i *instance) Address() string                                         { return i.sshService.Address() }\nfunc (i *instance) PriceHistory(arvados.InstanceType) []cloud.InstancePrice { return nil }\nfunc (i *instance) RemoteUser() string                                      { return i.adminUser }\nfunc (i *instance) Tags() cloud.InstanceTags                                { return i.tags }\nfunc (i *instance) SetTags(tags cloud.InstanceTags) error {\n\ti.tags = tags\n\treturn nil\n}\nfunc (i *instance) Destroy() error {\n\ti.is.mtx.Lock()\n\tdefer i.is.mtx.Unlock()\n\ti.is.instances = i.is.instances[:0]\n\treturn nil\n}\nfunc (i *instance) VerifyHostKey(pubkey ssh.PublicKey, _ *ssh.Client) error {\n\tif !bytes.Equal(pubkey.Marshal(), i.hostPubKey.Marshal()) {\n\t\treturn errors.New(\"host key mismatch\")\n\t}\n\treturn nil\n}\nfunc (i *instance) sshExecFunc(env map[string]string, command string, stdin io.Reader, stdout, stderr io.Writer) uint32 {\n\tcmd := exec.Command(\"sh\", \"-c\", strings.TrimPrefix(command, \"sudo \"))\n\tcmd.Stdin = stdin\n\tcmd.Stdout = stdout\n\tcmd.Stderr = stderr\n\tfor k, v := range env {\n\t\tcmd.Env = append(cmd.Env, k+\"=\"+v)\n\t}\n\t// Prevent child process from using our tty.\n\tcmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}\n\terr := cmd.Run()\n\tif err == nil {\n\t\treturn 0\n\t} else if err, ok := err.(*exec.ExitError); !ok {\n\t\treturn 1\n\t} else if code := err.ExitCode(); code < 0 {\n\t\treturn 1\n\t} else {\n\t\treturn uint32(code)\n\t}\n}\n"
  },
  {
    "path": "lib/cloud/loopback/loopback_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage loopback\n\nimport (\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"encoding/json\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"golang.org/x/crypto/ssh\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\ntype suite struct{}\n\nvar _ = check.Suite(&suite{})\n\nfunc (*suite) TestCreateListExecDestroy(c *check.C) {\n\tlogger := ctxlog.TestLogger(c)\n\tis, err := Driver.InstanceSet(json.RawMessage(\"{}\"), \"testInstanceSetID\", cloud.SharedResourceTags{\"sharedTag\": \"sharedTagValue\"}, logger, nil)\n\tc.Assert(err, check.IsNil)\n\n\tclientRSAKey, err := rsa.GenerateKey(rand.Reader, 1024)\n\tc.Assert(err, check.IsNil)\n\tclientSSHKey, err := ssh.NewSignerFromKey(clientRSAKey)\n\tc.Assert(err, check.IsNil)\n\tclientSSHPubKey, err := ssh.NewPublicKey(clientRSAKey.Public())\n\tc.Assert(err, check.IsNil)\n\n\tit := arvados.InstanceType{\n\t\tName:         \"localhost\",\n\t\tProviderType: \"localhost\",\n\t\tRAM:          1002003004,\n\t\tVCPUs:        5,\n\t}\n\n\t// First call to Create should succeed, and the returned\n\t// instance's SSH target address should be available in << 1s.\n\tinst, err := is.Create(it, \"testImageID\", cloud.InstanceTags{\"instanceTag\": \"instanceTagValue\"}, \"testInitCommand\", clientSSHPubKey)\n\tc.Assert(err, check.IsNil)\n\tfor deadline := time.Now().Add(time.Second); inst.Address() == \"\"; time.Sleep(time.Second / 100) {\n\t\tif deadline.Before(time.Now()) {\n\t\t\tc.Fatal(\"timed out\")\n\t\t}\n\t}\n\n\t// Another call to Create should fail with a quota error.\n\tinst2, err := is.Create(it, \"testImageID\", cloud.InstanceTags{\"instanceTag\": \"instanceTagValue\"}, \"testInitCommand\", clientSSHPubKey)\n\tc.Check(inst2, check.IsNil)\n\tqerr, ok := err.(cloud.QuotaError)\n\tif c.Check(ok, check.Equals, true, check.Commentf(\"expect cloud.QuotaError, got %#v\", err)) {\n\t\tc.Check(qerr.IsQuotaError(), check.Equals, true)\n\t}\n\n\t// Instance list should now have one entry, for the new\n\t// instance.\n\tlist, err := is.Instances(nil)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(list, check.HasLen, 1)\n\tinst = list[0]\n\tc.Check(inst.String(), check.Equals, \"localhost\")\n\n\t// Instance's SSH server should execute shell commands.\n\texr := sshexecutor.New(inst)\n\texr.SetSigners(clientSSHKey)\n\n\tstdout, stderr, err := exr.Execute(nil, \"echo ok\", nil)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(stdout), check.Equals, \"ok\\n\")\n\tc.Check(string(stderr), check.Equals, \"\")\n\n\t// SSH server should propagate stderr and non-zero exit\n\t// status.\n\tstdout, stderr, err = exr.Execute(nil, \"echo fail && echo -n fail2 >&2 && false\", nil)\n\tc.Check(err, check.FitsTypeOf, &ssh.ExitError{})\n\tc.Check(string(stdout), check.Equals, \"fail\\n\")\n\tc.Check(string(stderr), check.Equals, \"fail2\")\n\n\t// SSH server should strip \"sudo\" from the front of the\n\t// command.\n\twithoutsudo, _, err := exr.Execute(nil, \"whoami\", nil)\n\tc.Check(err, check.IsNil)\n\twithsudo, _, err := exr.Execute(nil, \"sudo whoami\", nil)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(withsudo), check.Equals, string(withoutsudo))\n\n\t// SSH server should reject keys other than the one whose\n\t// public key we passed to Create.\n\tbadRSAKey, err := rsa.GenerateKey(rand.Reader, 1024)\n\tc.Assert(err, check.IsNil)\n\tbadSSHKey, err := ssh.NewSignerFromKey(badRSAKey)\n\tc.Assert(err, check.IsNil)\n\t// Create a new executor here, otherwise Execute would reuse\n\t// the existing connection instead of authenticating with\n\t// badRSAKey.\n\texr = sshexecutor.New(inst)\n\texr.SetSigners(badSSHKey)\n\tstdout, stderr, err = exr.Execute(nil, \"true\", nil)\n\tc.Check(err, check.ErrorMatches, `.*unable to authenticate.*`)\n\n\t// Destroying the instance causes it to disappear from the\n\t// list, and allows us to create one more.\n\terr = inst.Destroy()\n\tc.Check(err, check.IsNil)\n\tlist, err = is.Instances(nil)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(list, check.HasLen, 0)\n\t_, err = is.Create(it, \"testImageID\", cloud.InstanceTags{\"instanceTag\": \"instanceTagValue\"}, \"testInitCommand\", clientSSHPubKey)\n\tc.Check(err, check.IsNil)\n\t_, err = is.Create(it, \"testImageID\", cloud.InstanceTags{\"instanceTag\": \"instanceTagValue\"}, \"testInitCommand\", clientSSHPubKey)\n\tc.Check(err, check.NotNil)\n\tlist, err = is.Instances(nil)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(list, check.HasLen, 1)\n}\n"
  },
  {
    "path": "lib/cloud/price.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage cloud\n\nimport (\n\t\"sort\"\n)\n\n// NormalizePriceHistory de-duplicates and sorts instance prices, most\n// recent first.\nfunc NormalizePriceHistory(prices []InstancePrice) []InstancePrice {\n\t// copy provided slice instead of modifying it in place\n\tprices = append([]InstancePrice(nil), prices...)\n\t// sort by timestamp, newest first\n\tsort.Slice(prices, func(i, j int) bool {\n\t\treturn prices[i].StartTime.After(prices[j].StartTime)\n\t})\n\t// remove duplicate data points, keeping the oldest\n\tfor i := 0; i < len(prices)-1; i++ {\n\t\tif prices[i].StartTime == prices[i+1].StartTime || prices[i].Price == prices[i+1].Price {\n\t\t\tprices = append(prices[:i], prices[i+1:]...)\n\t\t\ti--\n\t\t}\n\t}\n\treturn prices\n}\n"
  },
  {
    "path": "lib/cloud/price_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage cloud\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t. \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype cloudSuite struct{}\n\nvar _ = Suite(&cloudSuite{})\n\nfunc (s *cloudSuite) TestNormalizePriceHistory(c *C) {\n\tt0, err := time.Parse(time.RFC3339, \"2023-01-01T01:00:00Z\")\n\tc.Assert(err, IsNil)\n\th := []InstancePrice{\n\t\t{t0.Add(1 * time.Minute), 1.0},\n\t\t{t0.Add(4 * time.Minute), 1.2}, // drop: unchanged price\n\t\t{t0.Add(5 * time.Minute), 1.1},\n\t\t{t0.Add(3 * time.Minute), 1.2},\n\t\t{t0.Add(5 * time.Minute), 1.1}, // drop: duplicate\n\t\t{t0.Add(2 * time.Minute), 1.0}, // drop: out of order, unchanged price\n\t}\n\tc.Check(NormalizePriceHistory(h), DeepEquals, []InstancePrice{h[2], h[3], h[0]})\n}\n"
  },
  {
    "path": "lib/cmd/cmd.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// Package cmd helps define reusable functions that can be exposed as\n// [subcommands of] command line programs.\npackage cmd\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"runtime/debug\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst EXIT_INVALIDARGUMENT = 2\n\ntype Handler interface {\n\tRunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int\n}\n\ntype HandlerFunc func(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int\n\nfunc (f HandlerFunc) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\treturn f(prog, args, stdin, stdout, stderr)\n}\n\n// Version is a Handler that prints the package version (set at build\n// time using -ldflags) and Go runtime version to stdout, and returns\n// 0.\nvar Version versionCommand\n\nvar (\n\t// These default version/commit strings should be set at build\n\t// time: `go install -buildvcs=false -ldflags \"-X\n\t// git.arvados.org/arvados.git/lib/cmd.version=1.2.3\"`\n\tversion = \"dev\"\n\tcommit  = \"0000000000000000000000000000000000000000\"\n)\n\ntype versionCommand struct{}\n\nfunc (versionCommand) String() string {\n\treturn fmt.Sprintf(\"%s (%s)\", version, runtime.Version())\n}\n\nfunc (versionCommand) Commit() string {\n\tif bi, ok := debug.ReadBuildInfo(); ok {\n\t\tfor _, bs := range bi.Settings {\n\t\t\tif bs.Key == \"vcs.revision\" {\n\t\t\t\treturn bs.Value\n\t\t\t}\n\t\t}\n\t}\n\treturn commit\n}\n\nfunc (versionCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tprog = regexp.MustCompile(` -*version$`).ReplaceAllLiteralString(prog, \"\")\n\tfmt.Fprintf(stdout, \"%s %s (%s)\\n\", prog, version, runtime.Version())\n\treturn 0\n}\n\n// Multi is a Handler that looks up its first argument in a map (after\n// stripping any \"arvados-\" or \"crunch-\" prefix), and invokes the\n// resulting Handler with the remaining args.\n//\n// Example:\n//\n//\tos.Exit(Multi(map[string]Handler{\n//\t        \"foobar\": HandlerFunc(func(prog string, args []string) int {\n//\t                fmt.Println(args[0])\n//\t                return 2\n//\t        }),\n//\t})(\"/usr/bin/multi\", []string{\"foobar\", \"baz\"}, os.Stdin, os.Stdout, os.Stderr))\n//\n// ...prints \"baz\" and exits 2.\ntype Multi map[string]Handler\n\nfunc (m Multi) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\t_, basename := filepath.Split(prog)\n\tif i := strings.Index(basename, \"~\"); i >= 0 {\n\t\t// drop \"~anything\" suffix (arvados-dispatch-cloud's\n\t\t// DeployRunnerBinary feature relies on this)\n\t\tbasename = basename[:i]\n\t}\n\tcmd, ok := m[basename]\n\tif !ok {\n\t\t// \"controller\" command exists, and binary is named \"arvados-controller\"\n\t\tcmd, ok = m[strings.TrimPrefix(basename, \"arvados-\")]\n\t}\n\tif !ok {\n\t\t// \"dispatch-slurm\" command exists, and binary is named \"crunch-dispatch-slurm\"\n\t\tcmd, ok = m[strings.TrimPrefix(basename, \"crunch-\")]\n\t}\n\tif ok {\n\t\treturn cmd.RunCommand(prog, args, stdin, stdout, stderr)\n\t} else if len(args) < 1 {\n\t\tfmt.Fprintf(stderr, \"usage: %s command [args]\\n\", prog)\n\t\tm.Usage(stderr)\n\t\treturn EXIT_INVALIDARGUMENT\n\t} else if cmd, ok = m[args[0]]; ok {\n\t\treturn cmd.RunCommand(prog+\" \"+args[0], args[1:], stdin, stdout, stderr)\n\t} else {\n\t\tfmt.Fprintf(stderr, \"%s: unrecognized command %q\\n\", prog, args[0])\n\t\tm.Usage(stderr)\n\t\treturn EXIT_INVALIDARGUMENT\n\t}\n}\n\nfunc (m Multi) Usage(stderr io.Writer) {\n\tfmt.Fprintf(stderr, \"\\nAvailable commands:\\n\")\n\tm.listSubcommands(stderr, \"\")\n}\n\nfunc (m Multi) listSubcommands(out io.Writer, prefix string) {\n\tvar subcommands []string\n\tfor sc := range m {\n\t\tif strings.HasPrefix(sc, \"-\") {\n\t\t\t// Some subcommands have alternate versions\n\t\t\t// like \"--version\" for compatibility. Don't\n\t\t\t// clutter the subcommand summary with those.\n\t\t\tcontinue\n\t\t}\n\t\tsubcommands = append(subcommands, sc)\n\t}\n\tsort.Strings(subcommands)\n\tfor _, sc := range subcommands {\n\t\tswitch cmd := m[sc].(type) {\n\t\tcase Multi:\n\t\t\tcmd.listSubcommands(out, prefix+sc+\" \")\n\t\tdefault:\n\t\t\tfmt.Fprintf(out, \"    %s%s\\n\", prefix, sc)\n\t\t}\n\t}\n}\n\ntype FlagSet interface {\n\tInit(string, flag.ErrorHandling)\n\tArgs() []string\n\tNArg() int\n\tParse([]string) error\n\tSetOutput(io.Writer)\n\tPrintDefaults()\n}\n\n// SubcommandToFront silently parses args using flagset, and returns a\n// copy of args with the first non-flag argument moved to the\n// front. If parsing fails or consumes all of args, args is returned\n// unchanged.\n//\n// SubcommandToFront invokes methods on flagset that have side\n// effects, including Parse. In typical usage, flagset will not used\n// for anything else after being passed to SubcommandToFront.\nfunc SubcommandToFront(args []string, flagset FlagSet) []string {\n\tflagset.Init(\"\", flag.ContinueOnError)\n\tflagset.SetOutput(ioutil.Discard)\n\tif err := flagset.Parse(args); err != nil || flagset.NArg() == 0 {\n\t\t// No subcommand found.\n\t\treturn args\n\t}\n\t// Move subcommand to the front.\n\tflagargs := len(args) - flagset.NArg()\n\tnewargs := make([]string, len(args))\n\tnewargs[0] = args[flagargs]\n\tcopy(newargs[1:flagargs+1], args[:flagargs])\n\tcopy(newargs[flagargs+1:], args[flagargs+1:])\n\treturn newargs\n}\n\ntype NoPrefixFormatter struct{}\n\nfunc (NoPrefixFormatter) Format(entry *logrus.Entry) ([]byte, error) {\n\treturn []byte(entry.Message + \"\\n\"), nil\n}\n"
  },
  {
    "path": "lib/cmd/cmd_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cmd\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"git.arvados.org/arvados.git/lib/cmdtest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&CmdSuite{})\n\ntype CmdSuite struct{}\n\nvar testCmd = Multi(map[string]Handler{\n\t\"echo\": HandlerFunc(func(prog string, args []string, stdin io.Reader, stdout io.Writer, stderr io.Writer) int {\n\t\tfmt.Fprintln(stdout, strings.Join(args, \" \"))\n\t\treturn 0\n\t}),\n})\n\nfunc (s *CmdSuite) TestHello(c *check.C) {\n\tdefer cmdtest.LeakCheck(c)()\n\tstdout := bytes.NewBuffer(nil)\n\tstderr := bytes.NewBuffer(nil)\n\texited := testCmd.RunCommand(\"prog\", []string{\"echo\", \"hello\", \"world\"}, bytes.NewReader(nil), stdout, stderr)\n\tc.Check(exited, check.Equals, 0)\n\tc.Check(stdout.String(), check.Equals, \"hello world\\n\")\n\tc.Check(stderr.String(), check.Equals, \"\")\n}\n\nfunc (s *CmdSuite) TestHelloViaProg(c *check.C) {\n\tdefer cmdtest.LeakCheck(c)()\n\tstdout := bytes.NewBuffer(nil)\n\tstderr := bytes.NewBuffer(nil)\n\texited := testCmd.RunCommand(\"/usr/local/bin/echo\", []string{\"hello\", \"world\"}, bytes.NewReader(nil), stdout, stderr)\n\tc.Check(exited, check.Equals, 0)\n\tc.Check(stdout.String(), check.Equals, \"hello world\\n\")\n\tc.Check(stderr.String(), check.Equals, \"\")\n}\n\nfunc (s *CmdSuite) TestUsage(c *check.C) {\n\tdefer cmdtest.LeakCheck(c)()\n\tstdout := bytes.NewBuffer(nil)\n\tstderr := bytes.NewBuffer(nil)\n\texited := testCmd.RunCommand(\"prog\", []string{\"nosuchcommand\", \"hi\"}, bytes.NewReader(nil), stdout, stderr)\n\tc.Check(exited, check.Equals, 2)\n\tc.Check(stdout.String(), check.Equals, \"\")\n\tc.Check(stderr.String(), check.Matches, `(?ms)^prog: unrecognized command \"nosuchcommand\"\\n.*echo.*\\n`)\n}\n\nfunc (s *CmdSuite) TestSubcommandToFront(c *check.C) {\n\tdefer cmdtest.LeakCheck(c)()\n\tflags := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tflags.String(\"format\", \"json\", \"\")\n\tflags.Bool(\"n\", false, \"\")\n\targs := SubcommandToFront([]string{\"--format=yaml\", \"-n\", \"-format\", \"beep\", \"echo\", \"hi\"}, flags)\n\tc.Check(args, check.DeepEquals, []string{\"echo\", \"--format=yaml\", \"-n\", \"-format\", \"beep\", \"hi\"})\n}\n"
  },
  {
    "path": "lib/cmd/parseflags.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage cmd\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n)\n\n// Hack to enable checking whether a given FlagSet's Usage method is\n// the (private) default one.\nvar defaultFlagSet = flag.NewFlagSet(\"none\", flag.ContinueOnError)\n\n// ParseFlags calls f.Parse(args) and prints appropriate error/help\n// messages to stderr.\n//\n// The positional argument is \"\" if no positional arguments are\n// accepted, otherwise a string to print with the usage message,\n// \"Usage: {prog} [options] {positional}\".\n//\n// The first return value, ok, is true if the program should continue\n// running normally, or false if it should exit now.\n//\n// If ok is false, the second return value is an appropriate exit\n// code: 0 if \"-help\" was given, 2 if there was a usage error.\nfunc ParseFlags(f FlagSet, prog string, args []string, positional string, stderr io.Writer) (ok bool, exitCode int) {\n\tf.Init(prog, flag.ContinueOnError)\n\tf.SetOutput(io.Discard)\n\terr := f.Parse(args)\n\tswitch err {\n\tcase nil:\n\t\tif f.NArg() > 0 && positional == \"\" {\n\t\t\tfmt.Fprintf(stderr, \"unrecognized command line arguments: %v (try -help)\\n\", f.Args())\n\t\t\treturn false, EXIT_INVALIDARGUMENT\n\t\t}\n\t\treturn true, 0\n\tcase flag.ErrHelp:\n\t\t// Use our own default usage func, not the one\n\t\t// provided by the flag pkg, if the caller hasn't set\n\t\t// one. (We use reflect to determine whether f.Usage\n\t\t// is the private defaultUsage func that\n\t\t// flag.NewFlagSet uses.)\n\t\tif f, ok := f.(*flag.FlagSet); ok && f.Usage != nil && reflect.ValueOf(f.Usage).String() != reflect.ValueOf(defaultFlagSet.Usage).String() {\n\t\t\tf.SetOutput(stderr)\n\t\t\tf.Usage()\n\t\t} else {\n\t\t\tfmt.Fprintf(stderr, \"Usage: %s [options] %s\\n\", prog, positional)\n\t\t\tf.SetOutput(stderr)\n\t\t\tf.PrintDefaults()\n\t\t}\n\t\treturn false, 0\n\tdefault:\n\t\tfmt.Fprintf(stderr, \"error parsing command line arguments: %s (try -help)\\n\", err)\n\t\treturn false, EXIT_INVALIDARGUMENT\n\t}\n}\n"
  },
  {
    "path": "lib/cmdtest/leakcheck.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// Package cmdtest provides tools for testing command line tools.\npackage cmdtest\n\nimport (\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// LeakCheck tests for output being leaked to os.Stdout and os.Stderr\n// that should be sent elsewhere (e.g., the stdout and stderr streams\n// passed to a cmd.RunFunc).\n//\n// It redirects os.Stdout and os.Stderr to a tempfile, and returns a\n// func, which the caller is expected to defer, that restores os.* and\n// checks that the tempfile is empty.\n//\n// Example:\n//\n//\tfunc (s *Suite) TestSomething(c *check.C) {\n//\t\tdefer cmdtest.LeakCheck(c)()\n//\t\t// ... do things that shouldn't print to os.Stderr or os.Stdout\n//\t}\nfunc LeakCheck(c *check.C) func() {\n\ttmpfiles := map[string]*os.File{\"stdout\": nil, \"stderr\": nil}\n\tfor i := range tmpfiles {\n\t\tvar err error\n\t\ttmpfiles[i], err = ioutil.TempFile(\"\", \"\")\n\t\tc.Assert(err, check.IsNil)\n\t\terr = os.Remove(tmpfiles[i].Name())\n\t\tc.Assert(err, check.IsNil)\n\t}\n\n\tstdout, stderr := os.Stdout, os.Stderr\n\tos.Stdout, os.Stderr = tmpfiles[\"stdout\"], tmpfiles[\"stderr\"]\n\treturn func() {\n\t\tos.Stdout, os.Stderr = stdout, stderr\n\n\t\tfor i, tmpfile := range tmpfiles {\n\t\t\tc.Logf(\"checking %s\", i)\n\t\t\t_, err := tmpfile.Seek(0, io.SeekStart)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tleaked, err := ioutil.ReadAll(tmpfile)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Check(string(leaked), check.Equals, \"\")\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/config/cmd.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nvar DumpCommand dumpCommand\n\ntype dumpCommand struct{}\n\nfunc (dumpCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(stderr, \"%s\\n\", err)\n\t\t}\n\t}()\n\n\tloader := &Loader{\n\t\tStdin:  stdin,\n\t\tLogger: ctxlog.New(stderr, \"text\", \"info\"),\n\t}\n\n\tflags := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tloader.SetupFlags(flags)\n\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"\", stderr); !ok {\n\t\treturn code\n\t}\n\tcfg, err := loader.Load()\n\tif err != nil {\n\t\treturn 1\n\t}\n\tout, err := yaml.Marshal(cfg)\n\tif err != nil {\n\t\treturn 1\n\t}\n\t_, err = stdout.Write(out)\n\tif err != nil {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nvar CheckCommand checkCommand\n\ntype checkCommand struct{}\n\nfunc (checkCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tvar err error\n\tvar logbuf = &bytes.Buffer{}\n\tdefer func() {\n\t\tio.Copy(stderr, logbuf)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(stderr, \"%s\\n\", err)\n\t\t}\n\t}()\n\n\tlogger := logrus.New()\n\tlogger.Out = logbuf\n\tloader := &Loader{\n\t\tStdin:  stdin,\n\t\tLogger: logger,\n\t}\n\n\tflags := flag.NewFlagSet(prog, flag.ContinueOnError)\n\tloader.SetupFlags(flags)\n\tstrict := flags.Bool(\"strict\", true, \"Strict validation of configuration file (warnings result in non-zero exit code)\")\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"\", stderr); !ok {\n\t\treturn code\n\t}\n\n\t// Load the config twice -- once without loading deprecated\n\t// keys/files, once with -- and then compare the two resulting\n\t// configs. This reveals whether the deprecated keys/files\n\t// have any effect on the final configuration.\n\t//\n\t// If they do, show the operator how to update their config\n\t// such that the deprecated keys/files are superfluous and can\n\t// be deleted.\n\tloader.SkipDeprecated = true\n\tloader.SkipLegacy = true\n\twithoutDepr, err := loader.Load()\n\tif err != nil {\n\t\treturn 1\n\t}\n\t// Reset() to avoid printing the same warnings twice when they\n\t// are logged by both without-legacy and with-legacy loads.\n\tlogbuf.Reset()\n\tloader.SkipDeprecated = false\n\tloader.SkipLegacy = false\n\twithDepr, err := loader.Load()\n\tif err != nil {\n\t\treturn 1\n\t}\n\n\t// Check for configured vocabulary validity.\n\tfor id, cc := range withDepr.Clusters {\n\t\tif cc.API.VocabularyPath == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tvd, err := os.ReadFile(cc.API.VocabularyPath)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\t\t// If the vocabulary path doesn't exist, it might mean that\n\t\t\t\t// the current node isn't the controller; so it's not an\n\t\t\t\t// error.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlogger.Errorf(\"Error reading vocabulary file %q for cluster %s: %s\\n\", cc.API.VocabularyPath, id, err)\n\t\t\tcontinue\n\t\t}\n\t\tmk := make([]string, 0, len(cc.Collections.ManagedProperties))\n\t\tfor k := range cc.Collections.ManagedProperties {\n\t\t\tmk = append(mk, k)\n\t\t}\n\t\t_, err = arvados.NewVocabulary(vd, mk)\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"Error loading vocabulary file %q for cluster %s:\\n%s\\n\", cc.API.VocabularyPath, id, err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tcmd := exec.Command(\"diff\", \"-u\", \"--label\", \"without-deprecated-configs\", \"--label\", \"relying-on-deprecated-configs\", \"/dev/fd/3\", \"/dev/fd/4\")\n\tfor _, obj := range []interface{}{withoutDepr, withDepr} {\n\t\ty, _ := yaml.Marshal(obj)\n\t\tpr, pw, err := os.Pipe()\n\t\tif err != nil {\n\t\t\treturn 1\n\t\t}\n\t\tdefer pr.Close()\n\t\tgo func() {\n\t\t\tio.Copy(pw, bytes.NewBuffer(y))\n\t\t\tpw.Close()\n\t\t}()\n\t\tcmd.ExtraFiles = append(cmd.ExtraFiles, pr)\n\t}\n\tdiff, err := cmd.CombinedOutput()\n\tif bytes.HasPrefix(diff, []byte(\"--- \")) {\n\t\tfmt.Fprintln(stdout, \"Your configuration is relying on deprecated entries. Suggest making the following changes.\")\n\t\tstdout.Write(diff)\n\t\terr = nil\n\t\tif *strict {\n\t\t\treturn 1\n\t\t}\n\t} else if len(diff) > 0 {\n\t\tfmt.Fprintf(stderr, \"Unexpected diff output:\\n%s\", diff)\n\t\tif *strict {\n\t\t\treturn 1\n\t\t}\n\t} else if err != nil {\n\t\treturn 1\n\t}\n\tif logbuf.Len() > 0 {\n\t\tif *strict {\n\t\t\treturn 1\n\t\t}\n\t}\n\treturn 0\n}\n\nvar DumpDefaultsCommand defaultsCommand\n\ntype defaultsCommand struct{}\n\nfunc (defaultsCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\t_, err := stdout.Write(DefaultYAML)\n\tif err != nil {\n\t\tfmt.Fprintln(stderr, err)\n\t\treturn 1\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "lib/config/cmd_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&CommandSuite{})\n\nvar (\n\t// Commands must satisfy cmd.Handler interface\n\t_ cmd.Handler = dumpCommand{}\n\t_ cmd.Handler = checkCommand{}\n)\n\ntype CommandSuite struct{}\n\nfunc (s *CommandSuite) SetUpSuite(c *check.C) {\n\tos.Unsetenv(\"ARVADOS_API_HOST\")\n\tos.Unsetenv(\"ARVADOS_API_HOST_INSECURE\")\n\tos.Unsetenv(\"ARVADOS_API_TOKEN\")\n}\n\nfunc (s *CommandSuite) TestDump_BadArg(c *check.C) {\n\tvar stderr bytes.Buffer\n\tcode := DumpCommand.RunCommand(\"arvados config-dump\", []string{\"-badarg\"}, bytes.NewBuffer(nil), bytes.NewBuffer(nil), &stderr)\n\tc.Check(code, check.Equals, cmd.EXIT_INVALIDARGUMENT)\n\tc.Check(stderr.String(), check.Equals, \"error parsing command line arguments: flag provided but not defined: -badarg (try -help)\\n\")\n}\n\nfunc (s *CommandSuite) TestDump_EmptyInput(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tcode := DumpCommand.RunCommand(\"arvados config-dump\", []string{\"-config\", \"-\"}, &bytes.Buffer{}, &stdout, &stderr)\n\tc.Check(code, check.Equals, 1)\n\tc.Check(stderr.String(), check.Matches, `config does not define any clusters\\n`)\n}\n\nfunc (s *CommandSuite) TestCheck_NoWarnings(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tin := `\nClusters:\n z1234:\n  ManagementToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  API:\n    MaxItemsPerResponse: 1234\n    VocabularyPath: /this/path/does/not/exist\n  Collections:\n    BlobSigningKey: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  PostgreSQL:\n    Connection:\n      sslmode: require\n  Services:\n    RailsAPI:\n      InternalURLs:\n        \"http://0.0.0.0:8000\": {}\n  Workbench:\n    UserProfileFormFields:\n      color:\n        Type: select\n        Options:\n          fuchsia: {}\n`\n\tcode := CheckCommand.RunCommand(\"arvados config-check\", []string{\"-config\", \"-\"}, bytes.NewBufferString(in), &stdout, &stderr)\n\tc.Check(code, check.Equals, 0)\n\tc.Check(stdout.String(), check.Equals, \"\")\n\tc.Check(stderr.String(), check.Equals, \"\")\n}\n\nfunc (s *CommandSuite) TestCheck_VocabularyErrors(c *check.C) {\n\ttmpFile, err := ioutil.TempFile(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(tmpFile.Name())\n\t_, err = tmpFile.WriteString(`\n{\n \"tags\": {\n  \"IDfoo\": {\n   \"labels\": [\n    {\"label\": \"foo\"}\n   ]\n  },\n  \"IDfoo\": {\n   \"labels\": [\n    {\"label\": \"baz\"}\n   ]\n  }\n }\n}`)\n\tc.Assert(err, check.IsNil)\n\ttmpFile.Close()\n\tvocPath := tmpFile.Name()\n\tvar stdout, stderr bytes.Buffer\n\tin := `\nClusters:\n z1234:\n  ManagementToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  API:\n    MaxItemsPerResponse: 1234\n    VocabularyPath: ` + vocPath + `\n  Collections:\n    BlobSigningKey: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  PostgreSQL:\n    Connection:\n      sslmode: require\n  Services:\n    RailsAPI:\n      InternalURLs:\n        \"http://0.0.0.0:8000\": {}\n  Workbench:\n    UserProfileFormFields:\n      color:\n        Type: select\n        Options:\n          fuchsia: {}\n`\n\tcode := CheckCommand.RunCommand(\"arvados config-check\", []string{\"-config\", \"-\"}, bytes.NewBufferString(in), &stdout, &stderr)\n\tc.Check(code, check.Equals, 1)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*Error loading vocabulary file.*for cluster.*duplicate JSON key.*tags.IDfoo.*`)\n}\n\nfunc (s *CommandSuite) TestCheck_DeprecatedKeys(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tin := `\nClusters:\n z1234:\n  RequestLimits:\n    MaxItemsPerResponse: 1234\n`\n\tcode := CheckCommand.RunCommand(\"arvados config-check\", []string{\"-config\", \"-\"}, bytes.NewBufferString(in), &stdout, &stderr)\n\tc.Check(code, check.Equals, 1)\n\tc.Check(stdout.String(), check.Matches, `(?ms).*\\n\\- +.*MaxItemsPerResponse: 1000\\n\\+ +MaxItemsPerResponse: 1234\\n.*`)\n}\n\nfunc (s *CommandSuite) TestCheck_OldKeepstoreConfigFile(c *check.C) {\n\tf, err := ioutil.TempFile(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(f.Name())\n\n\tio.WriteString(f, \"Listen: :12345\\nDebug: true\\n\")\n\n\tvar stdout, stderr bytes.Buffer\n\tin := `\nClusters:\n z1234:\n  SystemLogs:\n    LogLevel: info\n`\n\tcode := CheckCommand.RunCommand(\"arvados config-check\", []string{\"-config\", \"-\", \"-legacy-keepstore-config\", f.Name()}, bytes.NewBufferString(in), &stdout, &stderr)\n\tc.Check(code, check.Equals, 1)\n\tc.Check(stdout.String(), check.Matches, `(?ms).*\\n\\- +.*LogLevel: info\\n\\+ +LogLevel: debug\\n.*`)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*you should remove the legacy keepstore config file.*\\n`)\n}\n\nfunc (s *CommandSuite) TestCheck_UnknownKey(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tin := `\nClusters:\n z1234:\n  Bogus1: foo\n  BogusSection:\n    Bogus2: foo\n  API:\n    Bogus3:\n     Bogus4: true\n  PostgreSQL:\n    ConnectionPool:\n      {Bogus5: true}\n`\n\tcode := CheckCommand.RunCommand(\"arvados config-check\", []string{\"-config\", \"-\"}, bytes.NewBufferString(in), &stdout, &stderr)\n\tc.Log(stderr.String())\n\tc.Check(code, check.Equals, 1)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.Bogus1\"\\n.*`)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.BogusSection\"\\n.*`)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.API.Bogus3\"\\n.*`)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*unexpected object in config entry: Clusters.z1234.PostgreSQL.ConnectionPool\"\\n.*`)\n}\n\nfunc (s *CommandSuite) TestCheck_DuplicateWarnings(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tin := `\nClusters:\n z1234: {}\n`\n\tcode := CheckCommand.RunCommand(\"arvados config-check\", []string{\"-config\", \"-\"}, bytes.NewBufferString(in), &stdout, &stderr)\n\tc.Check(code, check.Equals, 1)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*SystemRootToken.*`)\n\tc.Check(stderr.String(), check.Not(check.Matches), `(?ms).*SystemRootToken.*SystemRootToken.*`)\n}\n\nfunc (s *CommandSuite) TestDump_Formatting(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tin := `\nClusters:\n z1234:\n  Containers:\n   CloudVMs:\n    TimeoutBooting: 600s\n  Services:\n   Controller:\n    InternalURLs:\n     http://localhost:12345: {}\n`\n\tcode := DumpCommand.RunCommand(\"arvados config-dump\", []string{\"-config\", \"-\"}, bytes.NewBufferString(in), &stdout, &stderr)\n\tc.Check(code, check.Equals, 0)\n\tc.Check(stdout.String(), check.Matches, `(?ms).*TimeoutBooting: 10m\\n.*`)\n\tc.Check(stdout.String(), check.Matches, `(?ms).*http://localhost:12345/:\\n +ListenURL: \"\"\\n.*`)\n}\n\nfunc (s *CommandSuite) TestDump_UnknownKey(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tin := `\nClusters:\n z1234:\n  UnknownKey: foobar\n  ManagementToken: secret\n`\n\tcode := DumpCommand.RunCommand(\"arvados config-dump\", []string{\"-config\", \"-\"}, bytes.NewBufferString(in), &stdout, &stderr)\n\tc.Check(code, check.Equals, 0)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*deprecated or unknown config entry: Clusters.z1234.UnknownKey.*`)\n\tc.Check(stdout.String(), check.Matches, `(?ms)(.*\\n)?Clusters:\\n  z1234:\\n.*`)\n\tc.Check(stdout.String(), check.Matches, `(?ms).*\\n *ManagementToken: secret\\n.*`)\n\tc.Check(stdout.String(), check.Not(check.Matches), `(?ms).*UnknownKey.*`)\n}\n\nfunc (s *CommandSuite) TestDump_KeyOrder(c *check.C) {\n\tin := `\nClusters:\n z1234:\n  Login:\n   Test:\n    Users:\n     a: {}\n     d: {}\n     c: {}\n     b: {}\n     e: {}\n`\n\tfor trial := 0; trial < 20; trial++ {\n\t\tvar stdout, stderr bytes.Buffer\n\t\tcode := DumpCommand.RunCommand(\"arvados config-dump\", []string{\"-config\", \"-\"}, bytes.NewBufferString(in), &stdout, &stderr)\n\t\tc.Assert(code, check.Equals, 0)\n\t\tif !c.Check(stdout.String(), check.Matches, `(?ms).*a:.*b:.*c:.*d:.*e:.*`) {\n\t\t\tc.Logf(\"config-dump did not use lexical key order on trial %d\", trial)\n\t\t\tc.Log(\"stdout:\\n\", stdout.String())\n\t\t\tc.Log(\"stderr:\\n\", stderr.String())\n\t\t\tc.FailNow()\n\t\t}\n\t}\n}\n\nfunc (s *CommandSuite) TestCheck_KeyOrder(c *check.C) {\n\tin := `\nClusters:\n z1234:\n  ManagementToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  Collections:\n   BlobSigningKey: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  InstanceTypes:\n   a32a: {}\n   a48a: {}\n   a4a: {}\n`\n\tfor trial := 0; trial < 20; trial++ {\n\t\tvar stdout, stderr bytes.Buffer\n\t\tcode := CheckCommand.RunCommand(\"arvados config-check\", []string{\"-config=-\", \"-strict=true\"}, bytes.NewBufferString(in), &stdout, &stderr)\n\t\tif !c.Check(code, check.Equals, 0) || stdout.String() != \"\" || stderr.String() != \"\" {\n\t\t\tc.Logf(\"config-check returned error or non-empty output on trial %d\", trial)\n\t\t\tc.Log(\"stdout:\\n\", stdout.String())\n\t\t\tc.Log(\"stderr:\\n\", stderr.String())\n\t\t\tc.FailNow()\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/config/config.default.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# This file provides documentation and default values for all Arvados\n# configuration entries.\n#\n# It is NOT intended to be copied and used as a starting point for a\n# site configuration file.  If you do that, the \"SAMPLE\" entries will\n# be misinterpreted as real configuration entries, and future upgrades\n# will leave your configuration file with stale defaults and\n# documentation.  Instead, you should create /etc/arvados/config.yml\n# with only the entries you want to override, and refer to the latest\n# version of this file for documentation.\n#\n# To show the entire configuration, computed from the current defaults\n# and your local site configuration, run `arvados-server config-dump`.\n\nClusters:\n  xxxxx:\n    # Token used internally by Arvados components to authenticate to\n    # one another. Use a string of at least 50 random alphanumerics.\n    SystemRootToken: \"\"\n\n    # Token to be included in all healthcheck requests. Disabled by default.\n    # Server expects request header of the format \"Authorization: Bearer xxx\"\n    ManagementToken: \"\"\n\n    Services:\n\n      # Each of the service sections below specifies InternalURLs\n      # (each with optional ListenURL) and ExternalURL.\n      #\n      # InternalURLs specify how other Arvados service processes will\n      # connect to the service. Typically these use internal hostnames\n      # and high port numbers. Example:\n      #\n      # InternalURLs:\n      #   \"http://host1.internal.example:12345\": {}\n      #   \"http://host2.internal.example:12345\": {}\n      #\n      # ListenURL specifies the address and port the service process's\n      # HTTP server should listen on, if different from the\n      # InternalURL itself. Example, using an intermediate TLS proxy:\n      #\n      # InternalURLs:\n      #   \"https://host1.internal.example\":\n      #     ListenURL: \"http://10.0.0.7:12345\"\n      #\n      # When there are multiple InternalURLs configured, the service\n      # process will try listening on each InternalURLs (using\n      # ListenURL if provided) until one works. If you use a ListenURL\n      # like \"0.0.0.0\" which can be bound on any machine, use an\n      # environment variable\n      # ARVADOS_SERVICE_INTERNAL_URL=http://host1.internal.example to\n      # control which entry to use.\n      #\n      # ExternalURL specifies how applications/clients will connect to\n      # the service, regardless of whether they are inside or outside\n      # the cluster. Example:\n      #\n      # ExternalURL: \"https://keep.zzzzz.example.com/\"\n      #\n      # To avoid routing internal traffic through external networks,\n      # use split-horizon DNS for ExternalURL host names: inside the\n      # cluster's private network \"host.zzzzz.example.com\" resolves to\n      # the host's private IP address, while outside the cluster\n      # \"host.zzzzz.example.com\" resolves to the host's public IP\n      # address (or its external gateway or load balancer).\n\n      RailsAPI:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      Controller:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      Websocket:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      Keepbalance:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      DispatchCloud:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      DispatchLSF:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      DispatchSLURM:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      Keepproxy:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      WebDAV:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        # Base URL for Workbench inline preview.  If blank, use\n        # WebDAVDownload instead, and disable inline preview.\n        # If both are empty, downloading collections from workbench\n        # will be impossible.\n        #\n        # It is important to properly configure the download service\n        # to migitate cross-site-scripting (XSS) attacks.  A HTML page\n        # can be stored in collection.  If an attacker causes a victim\n        # to visit that page through Workbench, it will be rendered by\n        # the browser.  If all collections are served at the same\n        # domain, the browser will consider collections as coming from\n        # the same origin and having access to the same browsing data,\n        # enabling malicious Javascript on that page to access Arvados\n        # on behalf of the victim.\n        #\n        # This is mitigating by having separate domains for each\n        # collection, or limiting preview to circumstances where the\n        # collection is not accessed with the user's regular\n        # full-access token.\n        #\n        # Serve preview links using uuid or pdh in subdomain\n        # (requires wildcard DNS and TLS certificate)\n        #   https://*.collections.uuid_prefix.arvadosapi.com\n        #\n        # Serve preview links using uuid or pdh in main domain\n        # (requires wildcard DNS and TLS certificate)\n        #   https://*--collections.uuid_prefix.arvadosapi.com\n        #\n        # Serve preview links by setting uuid or pdh in the path.\n        # This configuration only allows previews of public data or\n        # collection-sharing links, because these use the anonymous\n        # user token or the token is already embedded in the URL.\n        # Other data must be handled as downloads via WebDAVDownload:\n        #   https://collections.uuid_prefix.arvadosapi.com\n        #\n        ExternalURL: \"\"\n\n      WebDAVDownload:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        # Base URL for download links. If blank, serve links to WebDAV\n        # with disposition=attachment query param.  Unlike preview links,\n        # browsers do not render attachments, so there is no risk of XSS.\n        #\n        # If WebDAVDownload is blank, and WebDAV uses a\n        # single-origin form, then Workbench will show an error page\n        #\n        # Serve download links by setting uuid or pdh in the path:\n        #   https://download.uuid_prefix.arvadosapi.com\n        #\n        ExternalURL: \"\"\n\n      Keepstore:\n        InternalURLs:\n          SAMPLE:\n            ListenURL: \"\"\n            # Rendezvous is normally empty/omitted. When changing the\n            # URL of a Keepstore service, Rendezvous should be set to\n            # the old URL (with trailing slash omitted) to preserve\n            # rendezvous ordering.\n            Rendezvous: \"\"\n        ExternalURL: \"\"\n      Composer:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      WebShell:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        # ShellInABox service endpoint URL for a given VM.  If empty, do not\n        # offer web shell logins.\n        #\n        # E.g., using a path-based proxy server to forward connections to shell hosts:\n        # https://webshell.uuid_prefix.arvadosapi.com\n        #\n        # E.g., using a name-based proxy server to forward connections to shell hosts:\n        # https://*.webshell.uuid_prefix.arvadosapi.com\n        ExternalURL: \"\"\n      Workbench1:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      Workbench2:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      Health:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        ExternalURL: \"\"\n      ContainerWebServices:\n        InternalURLs: {SAMPLE: {ListenURL: \"\"}}\n        # URL used to make HTTP requests that are proxied to\n        # containers (which may host web apps or APIs).  Requires\n        # wildcard DNS and TLS certificate.\n        #\n        # From a subdomain:\n        #   https://*.containers.uuid_prefix.arvadosapi.com\n        #\n        # From the main domain:\n        #   https://*--containers.uuid_prefix.arvadosapi.com\n        ExternalURL: \"\"\n        # If ExternalPortMin and ExternalPortMax are non-zero, and\n        # ExternalURL is not a wildcard, container services listed in\n        # published_ports can be reached via dynamically assigned\n        # ports in the range [ExternalPortMin, ExternalPortMax].  For\n        # example, if ExternalURL is https://example.com/ and\n        # ExternalPortMin is 8000, Arvados will dynamically assign\n        # https://example.com:8000/, https://example.com:8001/, etc.,\n        # as proxy addresses for services in running containers.\n        ExternalPortMin: 0\n        ExternalPortMax: 0\n\n    PostgreSQL:\n      # max concurrent connections per arvados server daemon\n      ConnectionPool: 32\n      Connection:\n        # All parameters here are passed to the PG client library in a connection string;\n        # see https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS\n        host: \"\"\n        port: \"\"\n        user: \"\"\n        password: \"\"\n        dbname: \"\"\n        SAMPLE: \"\"\n    API:\n      # Limits for how long a client token created by regular users can be valid,\n      # and also is used as a default expiration policy when no expiration date is\n      # specified.\n      # Default value zero means token expirations don't get clamped and no\n      # default expiration is set.\n      MaxTokenLifetime: 0s\n\n      # Maximum size (in bytes) allowed for a single API request.  This\n      # limit is published in the discovery document for use by clients.\n      # Note: You must separately configure the upstream web server or\n      # proxy to actually enforce the desired maximum request size on the\n      # server side.\n      MaxRequestSize: 134217728\n\n      # Limit the number of bytes read from the database during an index\n      # request (by retrieving and returning fewer rows than would\n      # normally be returned in a single response).\n      # Note 1: This setting never reduces the number of returned rows to\n      # zero, no matter how big the first data row is.\n      # Note 2: Only columns that *can* grow large count against this limit.\n      # Small fixed-width columns like UUIDs and datetimes never do.\n      MaxIndexDatabaseRead: 134217728\n\n      # Maximum number of items to return when responding to a APIs that\n      # can return partial result sets using limit and offset parameters\n      # (e.g., *.index, groups.contents). If a request specifies a \"limit\"\n      # parameter higher than this value, this value is used instead.\n      MaxItemsPerResponse: 1000\n\n      # Maximum number of concurrent requests to process concurrently\n      # in a single service process, or 0 for no limit.\n      #\n      # Note this applies to all Arvados services (controller, webdav,\n      # websockets, etc.). Concurrency in the controller service is\n      # also effectively limited by MaxConcurrentRailsRequests (see\n      # below) because most controller requests proxy through to the\n      # RailsAPI service.\n      #\n      # HTTP proxies and load balancers downstream of arvados services\n      # should be configured to allow at least {MaxConcurrentRequest +\n      # MaxQueuedRequests + MaxGatewayTunnels} concurrent requests.\n      MaxConcurrentRequests: 64\n\n      # Maximum number of concurrent requests to process concurrently\n      # in a single RailsAPI service process, or 0 for no limit.\n      MaxConcurrentRailsRequests: 16\n\n      # Maximum number of incoming requests to hold in a priority\n      # queue waiting for one of the MaxConcurrentRequests slots to be\n      # free. When the queue is longer than this, respond 503 to the\n      # lowest priority request.\n      #\n      # If MaxQueuedRequests is 0, respond 503 immediately to\n      # additional requests while at the MaxConcurrentRequests limit.\n      MaxQueuedRequests: 128\n\n      # Maximum time a \"lock container\" request is allowed to wait in\n      # the incoming request queue before returning 503.\n      MaxQueueTimeForLockRequests: 2s\n\n      # Maximum number of active gateway tunnel connections. One slot\n      # is consumed by each \"container shell\" connection. If using an\n      # HPC dispatcher (LSF or Slurm), one slot is consumed by each\n      # running container.  These do not count toward\n      # MaxConcurrentRequests.\n      MaxGatewayTunnels: 1000\n\n      # Maximum number of 64MiB memory buffers per Keepstore server process, or\n      # 0 for no limit. When this limit is reached, up to\n      # (MaxConcurrentRequests - MaxKeepBlobBuffers) HTTP requests requiring\n      # buffers (like GET and PUT) will wait for buffer space to be released.\n      # Any HTTP requests beyond MaxConcurrentRequests will receive an\n      # immediate 503 response.\n      #\n      # MaxKeepBlobBuffers should be set such that (MaxKeepBlobBuffers * 64MiB\n      # * 1.1) fits comfortably in memory. On a host dedicated to running\n      # Keepstore, divide total memory by 88MiB to suggest a suitable value.\n      # For example, if grep MemTotal /proc/meminfo reports MemTotal: 7125440\n      # kB, compute 7125440 / (88 * 1024)=79 and set MaxKeepBlobBuffers: 79\n      MaxKeepBlobBuffers: 128\n\n      # API methods to disable. Disabled methods are not listed in the\n      # discovery document, and respond 404 to all requests.\n      # Example: {\"jobs.create\":{}, \"pipeline_instances.create\": {}}\n      DisabledAPIs: {}\n\n      # Interval (seconds) between asynchronous permission view updates. Any\n      # permission-updating API called with the 'async' parameter schedules a an\n      # update on the permission view in the future, if not already scheduled.\n      AsyncPermissionsUpdateInterval: 20s\n\n      # Maximum number of concurrent outgoing requests to make while\n      # serving a single incoming multi-cluster (federated) request.\n      MaxRequestAmplification: 4\n\n      # Maximum wall clock time to spend handling an incoming request.\n      RequestTimeout: 5m\n\n      # Websocket will send a periodic empty event after 'SendTimeout'\n      # if there is no other activity to maintain the connection /\n      # detect dropped connections.\n      SendTimeout: 60s\n\n      WebsocketClientEventQueue: 64\n      WebsocketServerEventQueue: 4\n\n      # Timeout on requests to internal Keep services.\n      KeepServiceRequestTimeout: 15s\n\n      # Vocabulary file path, local to the node running the controller.\n      # This JSON file should contain the description of what's allowed\n      # as object's metadata. Its format is described at:\n      # https://doc.arvados.org/admin/metadata-vocabulary.html\n      VocabularyPath: \"\"\n\n      # If true, a project must have a non-empty description field in\n      # order to be frozen.\n      FreezeProjectRequiresDescription: false\n\n      # Project properties that must have non-empty values in order to\n      # freeze a project. Example: \"property_name\": {}\n      FreezeProjectRequiresProperties:\n        SAMPLE: {}\n\n      # If true, only an admin user can un-freeze a project. If false,\n      # any user with \"manage\" permission can un-freeze.\n      UnfreezeProjectRequiresAdmin: false\n\n      # (Experimental) Use row-level locking on update API calls.\n      LockBeforeUpdate: false\n\n    Users:\n      # Config parameters to automatically setup new users.  If enabled,\n      # this users will be able to self-activate.  Enable this if you want\n      # to run an open instance where anyone can create an account and use\n      # the system without requiring manual approval.\n      #\n      # The params AutoSetupNewUsersWith* are meaningful only when AutoSetupNewUsers is turned on.\n      # AutoSetupUsernameBlacklist is a list of usernames to be blacklisted for auto setup.\n      AutoSetupNewUsers: false\n      AutoSetupNewUsersWithVmUUID: \"\"\n      AutoSetupUsernameBlacklist:\n        arvados: {}\n        git: {}\n        gitolite: {}\n        gitolite-admin: {}\n        root: {}\n        syslog: {}\n        SAMPLE: {}\n\n      # When NewUsersAreActive is set to true, new users will be active\n      # immediately.  This skips the \"self-activate\" step which enforces\n      # user agreements.  Should only be enabled for development.\n      NewUsersAreActive: false\n\n      # Newly activated users (whether set up by an admin or via\n      # AutoSetupNewUsers) immediately become visible to other active\n      # users.\n      #\n      # On a multi-tenant cluster, where the intent is for users to be\n      # invisible to one another unless they have been added to the\n      # same group(s) via Workbench admin interface, change this to\n      # false.\n      ActivatedUsersAreVisibleToOthers: true\n\n      # If a user creates an account with this email address, they\n      # will be automatically set to admin.\n      AutoAdminUserWithEmail: \"\"\n\n      # If AutoAdminFirstUser is set to true, the first user to log in when no\n      # other admin users exist will automatically become an admin user.\n      AutoAdminFirstUser: false\n\n      # Support email address to display in Workbench.\n      SupportEmailAddress: \"arvados@example.com\"\n\n      # Outgoing email configuration:\n      #\n      # In order to send mail, Arvados expects a default SMTP server\n      # on localhost:25.  It cannot require authentication on\n      # connections from localhost.  That server should be configured\n      # to relay mail to a \"real\" SMTP server that is able to send\n      # email on behalf of your domain.\n\n      # Recipient for notification email sent out when a user sets a\n      # profile on their account.\n      UserProfileNotificationAddress: \"\"\n\n      # When sending a NewUser, NewInactiveUser, or UserProfile\n      # notification, this is the 'From' address to use\n      AdminNotifierEmailFrom: arvados@example.com\n\n      # Prefix for email subjects for NewUser and NewInactiveUser emails\n      EmailSubjectPrefix: \"[ARVADOS] \"\n\n      # When sending a welcome email to the user, the 'From' address to use\n      UserNotifierEmailFrom: arvados@example.com\n\n      # The welcome email sent to new users will be blind copied to\n      # these addresses.\n      UserNotifierEmailBcc:\n        SAMPLE: {}\n\n      # Recipients for notification email sent out when a user account\n      # is created and already set up to be able to log in\n      NewUserNotificationRecipients:\n        SAMPLE: {}\n\n      # Recipients for notification email sent out when a user account\n      # has been created but the user cannot log in until they are\n      # set up by an admin.\n      NewInactiveUserNotificationRecipients:\n        SAMPLE: {}\n\n      # Set AnonymousUserToken to enable anonymous user access. Populate this\n      # field with a random string at least 50 characters long.\n      AnonymousUserToken: \"\"\n\n      # The login provider for a user may supply a primary email\n      # address and one or more alternate email addresses.  If a new\n      # user has an alternate email address with the domain given\n      # here, use the username from the alternate email to generate\n      # the user's Arvados username. Otherwise, the username from\n      # user's primary email address is used for the Arvados username.\n      # Currently implemented for OpenID Connect only.\n      PreferDomainForUsername: \"\"\n\n      # Send an email to each user when their account has been set up\n      # (meaning they are able to log in).\n      SendUserSetupNotificationEmail: false\n\n      # Ruby ERB template used for the email sent out to users when\n      # they have been set up.\n      UserSetupMailText: |\n        <% if not @user.full_name.empty? -%>\n        <%= @user.full_name %>,\n        <% else -%>\n        Hi there,\n        <% end -%>\n\n        Your Arvados account has been set up.  You can log in at\n\n        <%= Rails.configuration.Services.Workbench1.ExternalURL %>\n\n        Thanks,\n        Your Arvados administrator.\n\n      # If RoleGroupsVisibleToAll is true, all role groups are visible\n      # to all active users.\n      #\n      # If false, users must be granted permission to role groups in\n      # order to see them. This is more appropriate for a multi-tenant\n      # cluster.\n      RoleGroupsVisibleToAll: true\n\n      # If CanCreateRoleGroups is true, regular (non-admin) users can\n      # create new role groups.\n      #\n      # If false, only admins can create new role groups.\n      CanCreateRoleGroups: true\n\n      # During each period, a log entry with event_type=\"activity\"\n      # will be recorded for each user who is active during that\n      # period. The object_uuid attribute will indicate the user's\n      # UUID.\n      #\n      # Multiple log entries for the same user may be generated during\n      # a period if there are multiple controller processes or a\n      # controller process is restarted.\n      #\n      # Use 0 to disable activity logging.\n      ActivityLoggingPeriod: 24h\n\n      # The SyncUser* options control what system resources are managed by\n      # arvados-login-sync on shell nodes. They correspond to:\n      # * SyncUserAccounts: The user's Unix account on the shell node\n      # * SyncUserGroups: The group memberships of that account\n      # * SyncUserSSHKeys: Whether to authorize the user's Arvados SSH keys\n      # * SyncUserAPITokens: Whether to set up the user's Arvados API token\n      # All default to true.\n      SyncUserAccounts: true\n      SyncUserGroups: true\n      SyncUserSSHKeys: true\n      SyncUserAPITokens: true\n\n      # If SyncUserGroups=true, then arvados-login-sync will ensure that all\n      # managed accounts are members of the Unix groups listed in\n      # SyncRequiredGroups, in addition to any groups listed in their Arvados\n      # login permission. The default list includes the \"fuse\" group so\n      # users can use arv-mount. You can require no groups by specifying an\n      # empty list (i.e., `SyncRequiredGroups: []`).\n      SyncRequiredGroups:\n        - fuse\n\n      # SyncIgnoredGroups is a list of group names. arvados-login-sync will\n      # never modify these groups. If user login permissions list any groups\n      # in SyncIgnoredGroups, they will be ignored. If a user's Unix account\n      # belongs to any of these groups, arvados-login-sync will not remove\n      # the account from that group. The default is a set of particularly\n      # security-sensitive groups across Debian- and Red Hat-based\n      # distributions.\n      SyncIgnoredGroups:\n        - adm\n        - disk\n        - kmem\n        - mem\n        - root\n        - shadow\n        - staff\n        - sudo\n        - sys\n        - utempter\n        - utmp\n        - wheel\n\n    AuditLogs:\n      # Time to keep audit logs, in seconds. (An audit log is a row added\n      # to the \"logs\" table in the PostgreSQL database each time an\n      # Arvados object is created, modified, or deleted.)\n      #\n      # Currently, websocket event notifications rely on audit logs, so\n      # this should not be set lower than 300 (5 minutes).\n      MaxAge: 336h\n\n      # Maximum number of log rows to delete in a single SQL transaction.\n      #\n      # If MaxDeleteBatch is 0, log entries will never be\n      # deleted by Arvados. Cleanup can be done by an external process\n      # without affecting any Arvados system processes, as long as very\n      # recent (<5 minutes old) logs are not deleted.\n      #\n      # 100000 is a reasonable batch size for most sites.\n      MaxDeleteBatch: 0\n\n      # Attributes to suppress in events and audit logs.  Notably,\n      # specifying {\"manifest_text\": {}} here typically makes the database\n      # smaller and faster.\n      #\n      # Warning: Using any non-empty value here can have undesirable side\n      # effects for any client or component that relies on event logs.\n      # Use at your own risk.\n      UnloggedAttributes: {}\n\n    SystemLogs:\n\n      # Logging threshold: panic, fatal, error, warn, info, debug, or\n      # trace\n      LogLevel: info\n\n      # Logging format: json or text\n      Format: json\n\n      # Maximum characters of (JSON-encoded) query parameters to include\n      # in each request log entry. When params exceed this size, they will\n      # be JSON-encoded, truncated to this size, and logged as\n      # params_truncated.\n      MaxRequestLogParamsSize: 2000\n\n      # In all services except RailsAPI, periodically check whether\n      # the incoming HTTP request queue is nearly full (see\n      # MaxConcurrentRequests) and, if so, write a snapshot of the\n      # request queue to {service}-requests.json in the specified\n      # directory.\n      #\n      # Leave blank to disable.\n      RequestQueueDumpDirectory: \"\"\n\n    Collections:\n\n      # Enable access controls for data stored in Keep. This should\n      # always be set to true on a production cluster.\n      BlobSigning: true\n\n      # BlobSigningKey is a string of alphanumeric characters used to\n      # generate permission signatures for Keep locators. It must be\n      # identical to the permission key given to Keep. IMPORTANT: This\n      # is a site secret. It should be at least 50 characters.\n      #\n      # Modifying BlobSigningKey will invalidate all existing\n      # signatures, which can cause programs to fail (e.g., arv-put,\n      # arv-get, and Crunch jobs).  To avoid errors, rotate keys only\n      # when no such processes are running.\n      BlobSigningKey: \"\"\n\n      # Enable garbage collection of unreferenced blobs in Keep.\n      BlobTrash: true\n\n      # Time to leave unreferenced blobs in \"trashed\" state before\n      # deleting them, or 0 to skip the \"trashed\" state entirely and\n      # delete unreferenced blobs.\n      #\n      # If you use any Amazon S3 buckets as storage volumes, this\n      # must be at least 24h to avoid occasional data loss.\n      BlobTrashLifetime: 336h\n\n      # How often to check for (and delete) trashed blocks whose\n      # BlobTrashLifetime has expired.\n      BlobTrashCheckInterval: 24h\n\n      # Maximum number of concurrent \"trash blob\" and \"delete trashed\n      # blob\" operations conducted by a single keepstore process. Each\n      # of these can be set to 0 to disable the respective operation.\n      #\n      # If BlobTrashLifetime is zero, \"trash\" and \"delete trash\"\n      # happen at once, so only the lower of these two values is used.\n      BlobTrashConcurrency: 4\n      BlobDeleteConcurrency: 4\n\n      # Maximum number of concurrent \"create additional replica of\n      # existing blob\" operations conducted by a single keepstore\n      # process.\n      BlobReplicateConcurrency: 4\n\n      # Default replication level for collections. This is used when a\n      # collection's replication_desired attribute is nil.\n      DefaultReplication: 2\n\n      # BlobSigningTTL determines the minimum lifetime of transient\n      # data, i.e., blocks that are not referenced by\n      # collections. Unreferenced blocks exist for two reasons:\n      #\n      # 1) A data block must be written to a disk/cloud backend device\n      # before a collection can be created/updated with a reference to\n      # it.\n      #\n      # 2) Deleting or updating a collection can remove the last\n      # remaining reference to a data block.\n      #\n      # If BlobSigningTTL is too short, long-running\n      # processes/containers will fail when they take too long (a)\n      # between writing blocks and writing collections that reference\n      # them, or (b) between reading collections and reading the\n      # referenced blocks.\n      #\n      # If BlobSigningTTL is too long, data will still be stored long\n      # after the referring collections are deleted, and you will\n      # needlessly fill up disks or waste money on cloud storage.\n      #\n      # Modifying BlobSigningTTL invalidates existing signatures; see\n      # BlobSigningKey note above.\n      #\n      # The default is 2 weeks.\n      BlobSigningTTL: 336h\n\n      # When running keep-balance, this is the destination filename for\n      # the list of lost block hashes if there are any, one per line.\n      # Updated automically during each successful run.\n      BlobMissingReport: \"\"\n\n      # keep-balance operates periodically, i.e.: do a\n      # scan/balance operation, sleep, repeat.\n      #\n      # BalancePeriod determines the interval between start times of\n      # successive scan/balance operations. If a scan/balance operation\n      # takes longer than BalancePeriod, the next one will follow it\n      # immediately.\n      #\n      # If SIGUSR1 is received during an idle period between operations,\n      # the next operation will start immediately.\n      BalancePeriod: 6h\n\n      # Limits the number of collections retrieved by keep-balance per\n      # API transaction. If this is zero, page size is\n      # determined by the API server's own page size limits (see\n      # API.MaxItemsPerResponse and API.MaxIndexDatabaseRead).\n      BalanceCollectionBatch: 0\n\n      # The size of keep-balance's internal queue of\n      # collections. Higher values may improve throughput by allowing\n      # keep-balance to fetch collections from the database while the\n      # current collection are still being processed, at the expense of\n      # using more memory.  If this is zero or omitted, pages are\n      # processed serially.\n      BalanceCollectionBuffers: 4\n\n      # Maximum time for a rebalancing run. This ensures keep-balance\n      # eventually gives up and retries if, for example, a network\n      # error causes a hung connection that is never closed by the\n      # OS. It should be long enough that it doesn't interrupt a\n      # long-running balancing operation.\n      BalanceTimeout: 6h\n\n      # Maximum number of replication_confirmed /\n      # storage_classes_confirmed updates to write to the database\n      # after a rebalancing run. When many updates are needed, this\n      # spreads them over a few runs rather than applying them all at\n      # once.\n      BalanceUpdateLimit: 100000\n\n      # Maximum number of \"pull block from other server\" and \"trash\n      # block\" requests to send to each keepstore server at a\n      # time. Smaller values use less memory in keepstore and\n      # keep-balance. Larger values allow more progress per\n      # keep-balance iteration. A zero value computes all of the\n      # needed changes but does not apply any.\n      BalancePullLimit: 100000\n      BalanceTrashLimit: 100000\n\n      # Default lifetime for ephemeral collections: 2 weeks. This must not\n      # be less than BlobSigningTTL.\n      DefaultTrashLifetime: 336h\n\n      # Interval (seconds) between trash sweeps. During a trash sweep,\n      # collections are marked as trash if their trash_at time has\n      # arrived, and deleted if their delete_at time has arrived.\n      TrashSweepInterval: 60s\n\n      # If true, enable collection versioning.\n      # When a collection's preserve_version field is true or the current version\n      # is older than the amount of seconds defined on PreserveVersionIfIdle,\n      # a snapshot of the collection's previous state is created and linked to\n      # the current collection.\n      CollectionVersioning: true\n\n      #   0s = auto-create a new version on every update.\n      #  -1s = never auto-create new versions.\n      # > 0s = auto-create a new version when older than the specified number of seconds.\n      PreserveVersionIfIdle: 10s\n\n      # If non-empty, allow project and collection names to contain\n      # the \"/\" character (slash/stroke/solidus), and replace \"/\" with\n      # the given string in the filesystem hierarchy presented by\n      # WebDAV. Example values are \"%2f\" and \"{slash}\". Names that\n      # contain the substitution string itself may result in confusing\n      # behavior, so a value like \"_\" is not recommended.\n      #\n      # If the default empty value is used, the server will reject\n      # requests to create or rename a collection when the new name\n      # contains \"/\".\n      #\n      # If the value \"/\" is used, project and collection names\n      # containing \"/\" will be allowed, but they will not be\n      # accessible via WebDAV.\n      #\n      # Use of this feature is not recommended, if it can be avoided.\n      ForwardSlashNameSubstitution: \"\"\n\n      # Include \"folder objects\" in S3 ListObjects responses.\n      S3FolderObjects: true\n\n      # Managed collection properties. At creation time, if the client didn't\n      # provide the listed keys, they will be automatically populated following\n      # one of the following behaviors:\n      #\n      # * UUID of the user who owns the containing project.\n      #   responsible_person_uuid: {Function: original_owner, Protected: true}\n      #\n      # * Default concrete value.\n      #   foo_bar: {Value: baz, Protected: false}\n      #\n      # If Protected is true, only an admin user can modify its value.\n      ManagedProperties:\n        SAMPLE: {Function: original_owner, Protected: true}\n\n      # In \"trust all content\" mode, Workbench will redirect download\n      # requests to WebDAV preview link, even in the cases when\n      # WebDAV would have to expose XSS vulnerabilities in order to\n      # handle the redirect (see discussion on Services.WebDAV).\n      #\n      # This setting has no effect in the recommended configuration, where the\n      # WebDAV service is configured to have a separate domain for every\n      # collection and XSS protection is provided by browsers' same-origin\n      # policy.\n      #\n      # The default setting (false) is appropriate for a multi-user site.\n      TrustAllContent: false\n\n      # Cache parameters for WebDAV content serving:\n      WebDAVCache:\n        # Time to cache manifests, permission checks, and sessions.\n        TTL: 300s\n\n        # Maximum amount of data cached in /var/cache/arvados/keep.\n        # Can be given as a percentage of filesystem size (\"10%\") or a\n        # number of bytes (\"10 GiB\")\n        DiskCacheSize: 10%\n\n        # Approximate memory limit (in bytes) for session cache.\n        #\n        # Note this applies to the in-memory representation of\n        # projects and collections -- metadata, block locators,\n        # filenames, etc. -- not the file data itself (see\n        # DiskCacheSize).\n        MaxCollectionBytes: 100 MB\n\n        # Persistent sessions.\n        MaxSessions: 100\n\n      # Selectively set permissions for regular users and admins to\n      # download or upload data files using the upload/download\n      # features for Workbench, WebDAV and S3 API support.\n      WebDAVPermission:\n        User:\n          Download: true\n          Upload: true\n        Admin:\n          Download: true\n          Upload: true\n\n      # Selectively set permissions for regular users and admins to be\n      # able to download or upload blocks using arv-put and\n      # arv-get from outside the cluster.\n      KeepproxyPermission:\n        User:\n          Download: true\n          Upload: true\n        Admin:\n          Download: true\n          Upload: true\n\n      # Post upload / download events to the API server logs table, so\n      # that they can be included in the arv-user-activity report.\n      # You can disable this if you find that it is creating excess\n      # load on the API server and you don't need it.\n      WebDAVLogEvents: true\n\n      # If a client requests partial content past the start of a file,\n      # and a request from the same client for the same file was logged\n      # within the past WebDAVLogDownloadInterval, do not write a new log.\n      # This throttling applies to both printed and API server logs.\n      # This reduces log output when clients like `aws s3 cp` download\n      # one file in small chunks in parallel.\n      # Set this to 0 to disable throttling and log all requests.\n      WebDAVLogDownloadInterval: 30s\n\n      # Per-connection output buffer for WebDAV downloads. May improve\n      # throughput for large files, particularly when storage volumes\n      # have high latency.\n      #\n      # Size be specified as a number of bytes (\"0\") or with units\n      # (\"128KiB\", \"1 MB\").\n      WebDAVOutputBuffer: 0\n\n    Login:\n      # One of the following mechanisms (Google, PAM, LDAP, or\n      # LoginCluster) should be enabled; see\n      # https://doc.arvados.org/install/setup-login.html\n\n      Google:\n        # Authenticate with Google.\n        Enable: false\n\n        # Use the Google Cloud console to enable the People API (APIs\n        # and Services > Enable APIs and services > Google People API\n        # > Enable), generate a Client ID and secret (APIs and\n        # Services > Credentials > Create credentials > OAuth client\n        # ID > Web application) and add your controller's /login URL\n        # (e.g., \"https://zzzzz.example.com/login\") as an authorized\n        # redirect URL.\n        ClientID: \"\"\n        ClientSecret: \"\"\n\n        # Allow users to log in to existing accounts using any verified\n        # email address listed by their Google account. If true, the\n        # Google People API must be enabled in order for Google login to\n        # work. If false, only the primary email address will be used.\n        AlternateEmailAddresses: true\n\n        # Send additional parameters with authentication requests. See\n        # https://developers.google.com/identity/protocols/oauth2/openid-connect#authenticationuriparameters\n        # for a list of supported parameters.\n        AuthenticationRequestParameters:\n          # Show the \"choose which Google account\" page, even if the\n          # client is currently logged in to exactly one Google\n          # account.\n          prompt: select_account\n\n          SAMPLE: \"\"\n\n      OpenIDConnect:\n        # Authenticate with an OpenID Connect provider.\n        Enable: false\n\n        # Issuer URL, e.g., \"https://login.example.com\".\n        #\n        # This must be exactly equal to the URL returned by the issuer\n        # itself in its config response (\"isser\" key). If the\n        # configured value is \"https://example\" and the provider\n        # returns \"https://example:443\" or \"https://example/\" then\n        # login will fail, even though those URLs are equivalent (RFC\n        # 3986).\n        Issuer: \"\"\n\n        # Your client ID and client secret (supplied by the provider).\n        ClientID: \"\"\n        ClientSecret: \"\"\n\n        # OpenID claim field containing the user's email\n        # address. Normally \"email\"; see\n        # https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims\n        EmailClaim: \"email\"\n\n        # OpenID claim field containing the email verification\n        # flag. Normally \"email_verified\".  To accept every returned\n        # email address without checking a \"verified\" field at all,\n        # use an empty string \"\".\n        EmailVerifiedClaim: \"email_verified\"\n\n        # OpenID claim field containing the user's preferred\n        # username. If empty, use the mailbox part of the user's email\n        # address.\n        UsernameClaim: \"\"\n\n        # Send additional parameters with authentication requests,\n        # like {display: page, prompt: consent}. See\n        # https://openid.net/specs/openid-connect-core-1_0.html#AuthRequest\n        # and refer to your provider's documentation for supported\n        # parameters.\n        AuthenticationRequestParameters:\n          SAMPLE: \"\"\n\n        # Accept an OIDC access token as an API token if the OIDC\n        # provider's UserInfo endpoint accepts it.\n        #\n        # AcceptAccessTokenScope should also be used when enabling\n        # this feature.\n        AcceptAccessToken: false\n\n        # Before accepting an OIDC access token as an API token, first\n        # check that it is a JWT whose \"scope\" value includes this\n        # value. Example: \"https://zzzzz.example.com/\" (your Arvados\n        # API endpoint).\n        #\n        # If this value is empty and AcceptAccessToken is true, all\n        # access tokens will be accepted regardless of scope,\n        # including non-JWT tokens. This is not recommended.\n        AcceptAccessTokenScope: \"\"\n\n      PAM:\n        # Use PAM to authenticate users.\n        Enable: false\n\n        # PAM service name. PAM will apply the policy in the\n        # corresponding config file (e.g., /etc/pam.d/arvados) or, if\n        # there is none, the default \"other\" config.\n        Service: arvados\n\n        # Domain name (e.g., \"example.com\") to use to construct the\n        # user's email address if PAM authentication returns a\n        # username with no \"@\". If empty, use the PAM username as the\n        # user's email address, whether or not it contains \"@\".\n        #\n        # Note that the email address is used as the primary key for\n        # user records when logging in. Therefore, if you change\n        # PAMDefaultEmailDomain after the initial installation, you\n        # should also update existing user records to reflect the new\n        # domain. Otherwise, next time those users log in, they will\n        # be given new accounts instead of accessing their existing\n        # accounts.\n        DefaultEmailDomain: \"\"\n\n      LDAP:\n        # Use an LDAP service to authenticate users.\n        Enable: false\n\n        # Server URL, like \"ldap://ldapserver.example.com:389\" or\n        # \"ldaps://ldapserver.example.com:636\".\n        URL: \"ldap://ldap:389\"\n\n        # Use StartTLS upon connecting to the server.\n        StartTLS: true\n\n        # Skip TLS certificate name verification.\n        InsecureTLS: false\n\n        # Mininum TLS version to negotiate when connecting to server\n        # (ldaps://... or StartTLS). It may be necessary to set this\n        # to \"1.1\" for compatibility with older LDAP servers that fail\n        # with 'LDAP Result Code 200 \"Network Error\": TLS handshake\n        # failed (tls: server selected unsupported protocol version\n        # 301)'.\n        #\n        # If blank, use the recommended minimum version (1.2).\n        MinTLSVersion: \"\"\n\n        # Strip the @domain part if a user supplies an email-style\n        # username with this domain. If \"*\", strip any user-provided\n        # domain. If \"\", never strip the domain part. Example:\n        # \"example.com\"\n        StripDomain: \"\"\n\n        # If, after applying StripDomain, the username contains no \"@\"\n        # character, append this domain to form an email-style\n        # username. Example: \"example.com\"\n        AppendDomain: \"\"\n\n        # The LDAP attribute to filter on when looking up a username\n        # (after applying StripDomain and AppendDomain).\n        SearchAttribute: uid\n\n        # Bind with this username (DN or UPN) and password when\n        # looking up the user record.\n        #\n        # Example user: \"cn=admin,dc=example,dc=com\"\n        SearchBindUser: \"\"\n        SearchBindPassword: \"\"\n\n        # Directory base for username lookup. Example:\n        # \"ou=Users,dc=example,dc=com\"\n        SearchBase: \"\"\n\n        # Additional filters to apply when looking up users' LDAP\n        # entries. This can be used to restrict access to a subset of\n        # LDAP users, or to disambiguate users from other directory\n        # entries that have the SearchAttribute present.\n        #\n        # Special characters in assertion values must be escaped (see\n        # RFC 4515).\n        #\n        # Example: \"(objectClass=person)\"\n        SearchFilters: \"\"\n\n        # LDAP attribute to use as the user's email address.\n        #\n        # Important: This must not be an attribute whose value can be\n        # edited in the directory by the users themselves. Otherwise,\n        # users can take over other users' Arvados accounts trivially\n        # (email address is the primary key for Arvados accounts.)\n        EmailAttribute: mail\n\n        # LDAP attribute to use as the preferred Arvados username. If\n        # no value is found (or this config is empty) the username\n        # originally supplied by the user will be used.\n        UsernameAttribute: uid\n\n      Test:\n        # Authenticate users listed here in the config file. This\n        # feature is intended to be used in test environments, and\n        # should not be used in production.\n        Enable: false\n        Users:\n          SAMPLE:\n            Email: alice@example.com\n            Password: xyzzy\n\n      # The cluster ID to delegate the user database.  When set,\n      # logins on this cluster will be redirected to the login cluster\n      # (login cluster must appear in RemoteClusters with Proxy: true)\n      LoginCluster: \"\"\n\n      # How long a cached token belonging to a remote cluster will\n      # remain valid before it needs to be revalidated.\n      RemoteTokenRefresh: 5m\n\n      # How long a client token created from a login flow will be valid without\n      # asking the user to re-login. Example values: 60m, 8h.\n      # Default value zero means tokens don't have expiration.\n      TokenLifetime: 0s\n\n      # If true (default), tokens are allowed to create new tokens and\n      # view existing tokens belonging to the same user.\n      # If false, tokens are not allowed to view or create other\n      # tokens. New tokens can only be created by going through login\n      # again.\n      IssueTrustedTokens: true\n\n      # Origins (scheme://host[:port]) of clients trusted to receive\n      # new tokens via login process.  The ExternalURLs of the local\n      # Workbench1 and Workbench2 are trusted implicitly and do not\n      # need to be listed here.  If this is a LoginCluster, you\n      # probably want to include the other Workbench instances in the\n      # federation in this list.\n      #\n      # A wildcard like \"https://*.example\" will match client URLs\n      # like \"https://a.example\" and \"https://a.b.c.example\".\n      #\n      # Example:\n      #\n      # TrustedClients:\n      #   \"https://workbench.other-cluster.example\": {}\n      #   \"https://workbench2.other-cluster.example\": {}\n      TrustedClients:\n        SAMPLE: {}\n\n      # Treat any origin whose host part is \"localhost\" or a private\n      # IP address (e.g., http://10.0.0.123:3000/) as if it were\n      # listed in TrustedClients.\n      #\n      # Intended only for test/development use. Not appropriate for\n      # production use.\n      TrustPrivateNetworks: false\n\n    TLS:\n      # Use \"file:///var/lib/acme/live/example.com/cert\" and\n      # \".../privkey\" to load externally managed certificates.\n      Certificate: \"\"\n      Key: \"\"\n\n      # Accept invalid certificates when connecting to servers. Never\n      # use this in production.\n      Insecure: false\n\n      ACME:\n        # Obtain certificates automatically for ExternalURL domains\n        # using an ACME server and http-01 validation.\n        #\n        # To use Let's Encrypt, specify \"LE\".  To use the Let's\n        # Encrypt staging environment, specify \"LE-staging\".  To use a\n        # different ACME server, specify the full directory URL\n        # (\"https://...\").\n        #\n        # Note: this feature is not yet implemented in released\n        # versions, only in the alpha/prerelease arvados-server-easy\n        # package.\n        #\n        # Implies agreement with the server's terms of service.\n        Server: \"\"\n\n    Containers:\n      # List of supported Docker Registry image formats that compute nodes\n      # are able to use. `arv keep docker` will error out if a user tries\n      # to store an image with an unsupported format. Use an empty array\n      # to skip the compatibility check (and display a warning message to\n      # that effect).\n      #\n      # Example for sites running docker < 1.10: {\"v1\": {}}\n      # Example for sites running docker >= 1.10: {\"v2\": {}}\n      # Example for disabling check: {}\n      SupportedDockerImageFormats:\n        \"v2\": {}\n        SAMPLE: {}\n\n      # Include details about job reuse decisions in the server log. This\n      # causes additional database queries to run, so it should not be\n      # enabled unless you expect to examine the resulting logs for\n      # troubleshooting purposes.\n      LogReuseDecisions: false\n\n      # Default value for keep_cache_ram of a container's\n      # runtime_constraints.  Note: this gets added to the RAM request\n      # used to allocate a VM or submit an HPC job.\n      #\n      # If this is zero, container requests that don't specify RAM or\n      # disk cache size will use a disk cache, sized to the\n      # container's RAM requirement (but with minimum 2 GiB and\n      # maximum 32 GiB).\n      #\n      # Note: If you change this value, containers that used the previous\n      # default value will only be reused by container requests that\n      # explicitly specify the previous value in their keep_cache_ram\n      # runtime constraint.\n      DefaultKeepCacheRAM: 0\n\n      # Number of times a container can be unlocked before being\n      # automatically cancelled.\n      MaxDispatchAttempts: 10\n\n      # Default value for container_count_max for container requests.  This is the\n      # number of times Arvados will create a new container to satisfy a container\n      # request.  If a container is cancelled it will retry a new container if\n      # container_count < container_count_max on any container requests associated\n      # with the cancelled container.\n      MaxRetryAttempts: 3\n\n      # Schedule all child containers on preemptible instances (e.g. AWS\n      # Spot Instances) even if not requested by the submitter.\n      #\n      # If false, containers are scheduled on preemptible instances\n      # only when requested by the submitter.\n      #\n      # This flag is ignored if no preemptible instance types are\n      # configured, and has no effect on top-level containers.\n      AlwaysUsePreemptibleInstances: false\n\n      # Automatically add a preemptible variant for every\n      # non-preemptible entry in InstanceTypes below. The maximum bid\n      # price for the preemptible variant will be the non-preemptible\n      # price multiplied by PreemptiblePriceFactor. If 0, preemptible\n      # variants are not added automatically.\n      #\n      # A price factor of 1.0 is a reasonable starting point.\n      PreemptiblePriceFactor: 0\n\n      # When the lowest-priced instance type for a given container is\n      # not available, try other instance types, up to the indicated\n      # maximum price factor.\n      #\n      # For example, with AvailabilityPriceFactor 1.5, if the\n      # lowest-cost instance type A suitable for a given container\n      # costs $2/h, Arvados may run the container on any instance type\n      # B costing $3/h or less when instance type A is not available\n      # or an idle instance of type B is already running.\n      MaximumPriceFactor: 1.5\n\n      # PEM encoded SSH key (RSA, DSA, ECDSA, or ED25519) used by the\n      # cloud dispatcher for executing containers on worker VMs.\n      # Begins with \"-----BEGIN RSA PRIVATE KEY-----\\n\"\n      # and ends with \"\\n-----END RSA PRIVATE KEY-----\\n\".\n      #\n      # Use \"file:///absolute/path/to/key\" to load the key from a\n      # separate file instead of embedding it in the configuration\n      # file.\n      #\n      # Amazon EC2 only supports RSA and ED25519 keys.\n      DispatchPrivateKey: \"\"\n\n      # Maximum time to wait for workers to come up before abandoning\n      # stale locks from a previous dispatch process.\n      StaleLockTimeout: 1m\n\n      # The crunch-run command used to start a container on a worker node.\n      #\n      # When dispatching to cloud VMs, this is used only if\n      # DeployRunnerBinary in the CloudVMs section is set to the empty\n      # string.\n      CrunchRunCommand: \"crunch-run\"\n\n      # Extra arguments to add to crunch-run invocation\n      # Example: [\"--cgroup-parent-subsystem=memory\"]\n      CrunchRunArgumentsList: []\n\n      # Extra RAM to reserve on the node, in addition to\n      # the amount specified in the container's RuntimeConstraints\n      ReserveExtraRAM: 550MiB\n\n      # Minimum time between two attempts to run the same container\n      MinRetryPeriod: 0s\n\n      # Container runtime: \"docker\" (default) or \"singularity\"\n      RuntimeEngine: docker\n\n      # When running a container, run a dedicated keepstore process,\n      # using the specified number of 64 MiB memory buffers per\n      # allocated CPU core (VCPUs in the container's runtime\n      # constraints). The dedicated keepstore handles I/O for\n      # collections mounted in the container, as well as saving\n      # container logs.\n      #\n      # A zero value disables this feature.\n      #\n      # In order for this feature to be activated, no volume may use\n      # AccessViaHosts, and no writable volume may have Replication\n      # lower than Collections.DefaultReplication. If these\n      # requirements are not satisfied, the feature is disabled\n      # automatically regardless of the value given here.\n      #\n      # When an HPC dispatcher is in use (see SLURM and LSF sections),\n      # this feature depends on the operator to ensure an up-to-date\n      # cluster configuration file (/etc/arvados/config.yml) is\n      # available on all compute nodes. If it is missing or not\n      # readable by the crunch-run user, the feature will be disabled\n      # automatically. To read it from a different location, add a\n      # \"-config=/path/to/config.yml\" argument to\n      # CrunchRunArgumentsList above.\n      #\n      # When the cloud dispatcher is in use (see CloudVMs section) and\n      # this configuration is enabled, the entire cluster\n      # configuration file, including the system root token, is copied\n      # to the worker node and held in memory for the duration of the\n      # container.\n      LocalKeepBlobBuffersPerVCPU: 1\n\n      # When running a dedicated keepstore process for a container\n      # (see LocalKeepBlobBuffersPerVCPU), write keepstore log\n      # messages to keepstore.txt in the container's log collection.\n      #\n      # These log messages can reveal some volume configuration\n      # details, error messages from the cloud storage provider, etc.,\n      # which are not otherwise visible to users.\n      #\n      # Accepted values:\n      # * \"none\" -- no keepstore.txt file\n      # * \"all\" -- all logs, including request and response lines\n      # * \"errors\" -- all logs except \"response\" logs with 2xx\n      #   response codes and \"request\" logs\n      LocalKeepLogsToContainerLog: none\n\n      Logging:\n        # Container logs are written to Keep and saved in a\n        # collection, which is updated periodically while the\n        # container runs.  This value sets the interval between\n        # collection updates.\n        LogUpdatePeriod: 30m\n\n        # The log collection is also updated when the specified amount of\n        # log data (given in bytes) is produced in less than one update\n        # period.\n        LogUpdateSize: 32MiB\n\n      ShellAccess:\n        # An admin user can use \"arvados-client shell\" to start an\n        # interactive shell (with any user ID) in any running\n        # container.\n        Admin: true\n\n        # Any user can use \"arvados-client shell\" to start an\n        # interactive shell (with any user ID) in any running\n        # container that they started, provided it isn't also\n        # associated with a different user's container request.\n        #\n        # Interactive sessions make it easy to alter the container's\n        # runtime environment in ways that aren't recorded or\n        # reproducible. Consider the implications for automatic\n        # container reuse before enabling and using this feature. In\n        # particular, note that starting an interactive session does\n        # not disqualify a container from being reused by a different\n        # user/workflow in the future.\n        User: false\n\n      SLURM:\n        PrioritySpread: 0\n\n        # Arguments to sbatch when submitting Arvados containers as\n        # SLURM jobs.\n        #\n        # Template variables starting with % will be substituted as follows:\n        #\n        # %U uuid\n        # %C number of VCPUs\n        # %M memory in MiB\n        # %T tmp in MiB\n        # %G number of GPU devices (runtime_constraints.gpu.device_count)\n        # %W maximum run time in minutes\n        # %P comma separated partitions (scheduling_parameters.partitions)\n        # %I optimal instance type (if instance types are configured)\n        # %% a single % character\n        #\n        # Any argument containing %P or %W will be omitted if the\n        # corresponding parameter is empty, e.g., with the default\n        # configuration, \"--partition=%P\" will be omitted for\n        # containers that have no scheduling_parameters.partitions.\n        #\n        # Arvados prepends some additional non-configurable sbatch\n        # arguments, including \"--no-requeue\" and \"--nice=...\".\n        SbatchArgumentsList: [\"--job-name=%U\", \"--mem=%M\", \"--cpus-per-task=%C\", \"--tmp=%T\", \"--partition=%P\"]\n\n        # Additional arguments to sbatch when submitting containers\n        # that have runtime_constraints.gpu.device_count > 0\n        SbatchGPUArgumentsList: [\"--gpus=%G\"]\n\n        SbatchEnvironmentVariables:\n          SAMPLE: \"\"\n\n      LSF:\n        # Arguments to bsub when submitting Arvados containers as LSF jobs.\n        #\n        # Template variables starting with % will be substituted as follows:\n        #\n        # %U uuid\n        # %C number of VCPUs\n        # %M memory in MiB\n        # %T tmp in MiB\n        # %G number of GPU devices (runtime_constraints.gpu.device_count)\n        # %W maximum run time in minutes (see MaxRunTimeOverhead and\n        #    MaxRunTimeDefault below)\n        #\n        # Use %% to express a literal %. For example, the %%J in the\n        # default argument list will be changed to %J, which is\n        # interpreted by bsub itself.\n        #\n        # Note that the default arguments cause LSF to write two files\n        # in /tmp on the compute node each time an Arvados container\n        # runs. Ensure you have something in place to delete old files\n        # from /tmp, or adjust the \"-o\" and \"-e\" arguments accordingly.\n        #\n        # If [\"-We\", \"%W\"] or [\"-W\", \"%W\"] appear in this argument\n        # list, and MaxRunTimeDefault is not set (see below), both of\n        # those arguments will be dropped from the argument list when\n        # running a container that has no max_run_time value.\n        BsubArgumentsList: [\"-o\", \"/tmp/crunch-run.%%J.out\", \"-e\", \"/tmp/crunch-run.%%J.err\", \"-J\", \"%U\", \"-n\", \"%C\", \"-D\", \"%MMB\", \"-R\", \"rusage[mem=%MMB:tmp=%TMB] span[hosts=1]\", \"-R\", \"select[mem>=%MMB]\", \"-R\", \"select[tmp>=%TMB]\", \"-R\", \"select[ncpus>=%C]\", \"-We\", \"%W\"]\n\n        # Arguments that will be appended to the bsub command line\n        # when submitting Arvados containers as LSF jobs with\n        # runtime_constraints.gpu.device_count > 0\n        BsubGPUArguments: [\"-gpu\", \"num=%G\"]\n\n        # Use sudo to switch to this user account when submitting LSF\n        # jobs.\n        #\n        # This account must exist on the hosts where LSF jobs run\n        # (\"execution hosts\"), as well as on the host where the\n        # Arvados LSF dispatcher runs (\"submission host\").\n        BsubSudoUser: \"crunch\"\n\n        # When passing the scheduling_constraints.max_run_time value\n        # to LSF via \"%W\", add this much time to account for\n        # crunch-run startup/shutdown overhead.\n        MaxRunTimeOverhead: 5m\n\n        # If non-zero, MaxRunTimeDefault is used as the default value\n        # for max_run_time for containers that do not specify a time\n        # limit.  MaxRunTimeOverhead will be added to this.\n        #\n        # Example:\n        # MaxRunTimeDefault: 2h\n        MaxRunTimeDefault: 0\n\n      CloudVMs:\n        # Enable the cloud scheduler.\n        Enable: false\n\n        # Name/number of port where workers' SSH services listen.\n        SSHPort: \"22\"\n\n        # Interval between queue polls.\n        PollInterval: 10s\n\n        # Shell command to execute on each worker to determine whether\n        # the worker is booted and ready to run containers. It should\n        # exit zero if the worker is ready.\n        BootProbeCommand: \"systemctl is-system-running\"\n\n        # Minimum interval between consecutive probes to a single\n        # worker.\n        ProbeInterval: 10s\n\n        # Maximum probes per second, across all workers in a pool.\n        MaxProbesPerSecond: 10\n\n        # Time before repeating SIGTERM when killing a container.\n        TimeoutSignal: 5s\n\n        # Time to give up on a process (most likely arv-mount) that\n        # still holds a container lockfile after its main supervisor\n        # process has exited, and declare the instance broken.\n        TimeoutStaleRunLock: 5s\n\n        # Time to give up on SIGTERM and write off the worker.\n        TimeoutTERM: 2m\n\n        # Maximum create/destroy-instance operations per second (0 =\n        # unlimited).\n        MaxCloudOpsPerSecond: 10\n\n        # Maximum concurrent instance creation operations (0 = unlimited).\n        #\n        # MaxConcurrentInstanceCreateOps limits the number of instance creation\n        # requests that can be in flight at any one time, whereas\n        # MaxCloudOpsPerSecond limits the number of create/destroy operations\n        # that can be started per second.\n        #\n        # Because the API for instance creation on Azure is synchronous, it is\n        # recommended to increase MaxConcurrentInstanceCreateOps when running\n        # on Azure. When using managed images, a value of 20 would be\n        # appropriate. When using Azure Shared Image Galeries, it could be set\n        # higher. For more information, see\n        # https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image\n        #\n        # MaxConcurrentInstanceCreateOps can be increased for other cloud\n        # providers too, if desired.\n        MaxConcurrentInstanceCreateOps: 1\n\n        # The maximum number of instances to run at a time, or 0 for\n        # unlimited.\n        #\n        # If more instances than this are already running and busy\n        # when the dispatcher starts up, the running containers will\n        # be allowed to finish before the excess instances are shut\n        # down.\n        MaxInstances: 64\n\n        # Maximum number of containers to run at a time on a single\n        # instance when it has enough RAM, CPU, and scratch space to\n        # do so, or 0 for unlimited.\n        MaxRunningContainersPerInstance: 0\n\n        # The minimum number of instances expected to be runnable\n        # without reaching a provider-imposed quota.\n        #\n        # This is used as the initial value for the dispatcher's\n        # dynamic instance limit, which increases (up to MaxInstances)\n        # as containers start up successfully and decreases in\n        # response to high API load and cloud quota errors.\n        #\n        # Setting this to 0 means the dynamic instance limit will\n        # start at MaxInstances.\n        #\n        # Situations where you may want to set this (to a value less\n        # than MaxInstances) would be when there is significant\n        # variability or uncertainty in the actual cloud resources\n        # available.  Upon reaching InitialQuotaEstimate the\n        # dispatcher will switch to a more conservative behavior with\n        # slower instance start to avoid over-shooting cloud resource\n        # limits.\n        InitialQuotaEstimate: 0\n\n        # Maximum fraction of available instance capacity allowed to\n        # run \"supervisor\" containers at any given time. A supervisor\n        # is a container whose purpose is mainly to submit and manage\n        # other containers, such as arvados-cwl-runner workflow\n        # runner.\n        #\n        # If there is a hard limit on the amount of concurrent\n        # containers that the cluster can run, it is important to\n        # avoid crowding out the containers doing useful work with\n        # containers who just create more work.\n        #\n        # For example, with the default MaxInstances of 64, it will\n        # schedule at most floor(64*0.50) = 32 concurrent workflow\n        # runners, ensuring 32 slots are available for work.\n        SupervisorFraction: 0.50\n\n        # Interval between cloud provider syncs/updates (\"list all\n        # instances\").\n        SyncInterval: 1m\n\n        # Time to leave an idle worker running (in case new containers\n        # appear in the queue that it can run) before shutting it\n        # down.\n        TimeoutIdle: 1m\n\n        # Time to wait for a new worker to boot (i.e., pass\n        # BootProbeCommand) before giving up and shutting it down.\n        TimeoutBooting: 10m\n\n        # Maximum time a worker can stay alive with no successful\n        # probes before being automatically shut down.\n        TimeoutProbe: 10m\n\n        # Time after shutting down a worker to retry the\n        # shutdown/destroy operation.\n        TimeoutShutdown: 10s\n\n        # Worker VM image ID.\n        # (aws) AMI identifier\n        # (azure) managed disks: the name of the managed disk image\n        # (azure) shared image gallery: the name of the image definition. Also\n        # see the SharedImageGalleryName and SharedImageGalleryImageVersion fields.\n        # (azure) unmanaged disks (deprecated): the complete URI of the VHD, e.g.\n        # https://xxxxx.blob.core.windows.net/system/Microsoft.Compute/Images/images/xxxxx.vhd\n        ImageID: \"\"\n\n        # Shell script to run on new instances using the cloud\n        # provider's UserData (EC2) or CustomData (Azure) feature.\n        #\n        # It is not necessary to include a #!/bin/sh line.\n        InstanceInitCommand: \"\"\n\n        # An executable file (located on the dispatcher host) to be\n        # copied to cloud instances at runtime and used as the\n        # \"crunch-run\" container runner/supervisor. The default value\n        # is the dispatcher program itself.\n        #\n        # Use an empty string to disable this step: nothing will be\n        # copied, and cloud instances are assumed to have a suitable\n        # version of crunch-run installed; see CrunchRunCommand above.\n        DeployRunnerBinary: \"/proc/self/exe\"\n\n        # Directory to store the crunch-run binary on cloud instances\n        # (see DeployRunnerBinary above).  The \"mkdir -p\" command will\n        # be used to create the directory and its parents if needed.\n        DeployRunnerDirectory: /tmp/arvados-crunch-run\n\n        # Install the Dispatcher's SSH public key (derived from\n        # DispatchPrivateKey) when creating new cloud\n        # instances. Change this to false if you are using a different\n        # mechanism to pre-install the public key on new instances.\n        DeployPublicKey: true\n\n        # Tags to add on all resources (VMs, NICs, disks) created by\n        # the container dispatcher. (Arvados's own tags --\n        # InstanceType, IdleBehavior, and InstanceSecret -- will also\n        # be added.)\n        ResourceTags:\n          SAMPLE: \"tag value\"\n\n        # Prefix for predefined tags used by Arvados (InstanceSetID,\n        # InstanceType, InstanceSecret, IdleBehavior). With the\n        # default value \"Arvados\", tags are \"ArvadosInstanceSetID\",\n        # \"ArvadosInstanceSecret\", etc.\n        #\n        # This should only be changed while no cloud resources are in\n        # use and the cloud dispatcher is not running. Otherwise,\n        # VMs/resources that were added using the old tag prefix will\n        # need to be detected and cleaned up manually.\n        TagKeyPrefix: Arvados\n\n        # Cloud driver: \"azure\" (Microsoft Azure), \"ec2\" (Amazon AWS),\n        # or \"loopback\" (run containers on dispatch host for testing\n        # purposes).\n        Driver: ec2\n\n        # Cloud-specific driver parameters.\n        DriverParameters:\n\n          # (ec2) Credentials. Omit or leave blank if using IAM role.\n          AccessKeyID: \"\"\n          SecretAccessKey: \"\"\n\n          # (ec2) Instance configuration.\n\n          # (ec2) Region, like \"us-east-1\".\n          Region: \"\"\n\n          # (ec2) Security group IDs. Omit or use {} to use the\n          # default security group.\n          SecurityGroupIDs:\n            \"SAMPLE\": {}\n\n          # (ec2) One or more subnet IDs. Omit or leave empty to let\n          # AWS choose a default subnet from your default VPC. If\n          # multiple subnets are configured here (enclosed in brackets\n          # like [subnet-abc123, subnet-def456]) the cloud dispatcher\n          # will detect subnet-related errors and retry using a\n          # different subnet. Most sites specify one subnet.\n          SubnetID: \"\"\n\n          EBSVolumeType: gp2\n\n          # (ec2) name of the IAMInstanceProfile for instances started by\n          # the cloud dispatcher. Leave blank when not needed.\n          IAMInstanceProfile: \"\"\n\n          # (ec2) how often to look up spot instance pricing data\n          # (only while running spot instances) for the purpose of\n          # calculating container cost estimates. A value of 0\n          # disables spot price lookups entirely.\n          SpotPriceUpdateInterval: 24h\n\n          # (ec2) per-GiB-month cost of EBS volumes. Matches\n          # EBSVolumeType. Used to account for AddedScratch when\n          # calculating container cost estimates. Note that\n          # https://aws.amazon.com/ebs/pricing/ defines GB to mean\n          # GiB, so an advertised price $0.10/GB indicates a real\n          # price of $0.10/GiB and can be entered here as 0.10.\n          EBSPrice: 0.10\n\n          # (ec2) Mapping of alphabetic instance type prefix to\n          # instance quota group. Any prefix not listed here will be\n          # treated as a distinct instance quota group. For example,\n          # \"trn1.2xlarge\" will implicitly belong to instance quota\n          # group \"trn\".\n          #\n          # Knowing that multiple instance types belong to the same\n          # quota group enables the dispatcher to minimize futile\n          # attempts to create new instances when a quota has been\n          # reached.\n          #\n          # All keys must be lowercase.\n          InstanceTypeQuotaGroups:\n            a: standard\n            c: standard\n            d: standard\n            h: standard\n            i: standard\n            m: standard\n            r: standard\n            t: standard\n            z: standard\n            vt: g\n            p5: p5\n\n          # (azure) Credentials.\n          SubscriptionID: \"\"\n          ClientID: \"\"\n          ClientSecret: \"\"\n          TenantID: \"\"\n\n          # (azure) Instance configuration.\n          CloudEnvironment: AzurePublicCloud\n          Location: centralus\n\n          # (azure) The resource group where the VM and virtual NIC will be\n          # created.\n          ResourceGroup: \"\"\n\n          # (azure) The resource group of the Network to use for the virtual\n          # NIC (if different from ResourceGroup)\n          NetworkResourceGroup: \"\"\n          Network: \"\"\n          Subnet: \"\"\n\n          # (azure) managed disks: The resource group where the managed disk\n          # image can be found (if different from ResourceGroup).\n          ImageResourceGroup: \"\"\n\n          # (azure) shared image gallery: the name of the gallery\n          SharedImageGalleryName: \"\"\n          # (azure) shared image gallery: the version of the image definition\n          SharedImageGalleryImageVersion: \"\"\n\n          # (azure) unmanaged disks (deprecated): Where to store the VM VHD blobs\n          StorageAccount: \"\"\n          BlobContainer: \"\"\n\n          # (azure) How long to wait before deleting VHD and NIC\n          # objects that are no longer being used.\n          DeleteDanglingResourcesAfter: 20s\n\n          # Account (that already exists in the VM image) that will be\n          # set up with an ssh authorized key to allow the compute\n          # dispatcher to connect.\n          AdminUsername: crunch\n\n    InstanceTypes:\n\n      # Use the instance type name as the key (in place of \"SAMPLE\" in\n      # this sample entry).\n      SAMPLE:\n        # Cloud provider's instance type. Defaults to the configured type name.\n        ProviderType: \"\"\n        VCPUs: 1\n        RAM: 128MiB\n        IncludedScratch: 16GB\n        AddedScratch: 0\n\n        # Hourly price ($), used to select node types for containers,\n        # and to calculate estimated container costs. For spot\n        # instances on EC2, this is also used as the maximum price\n        # when launching spot instances, while the estimated container\n        # cost is computed based on the current spot price according\n        # to AWS. On Azure, and on-demand instances on EC2, the price\n        # given here is used to compute container cost estimates.\n        Price: 0.1\n        Preemptible: false\n\n        # Include this section if the instance type includes GPU support\n        GPU:\n          # The software stack, currently \"cuda\" or \"rocm\"\n          Stack: \"cuda\"\n\n          # The version of the driver installed on this instance, in\n          # X.Y format\n          DriverVersion: \"11.0\"\n\n          # The architecture or capabilities of the GPU hardware\n          #\n          #   For 'cuda', this is the Compute Capability in X.Y\n          #   format.\n          #\n          #   For 'rocm', this is the LLVM target (e.g. gfx1100) for\n          #   the GPU hardware.\n          HardwareTarget: \"9.0\"\n\n          # The number GPUs on this instance\n          DeviceCount: 1\n\n          # The amount of VRAM per GPU\n          VRAM: 8MiB\n\n    StorageClasses:\n\n      # If you use multiple storage classes, specify them here, using\n      # the storage class name as the key (in place of \"SAMPLE\" in\n      # this sample entry).\n      #\n      # Further info/examples:\n      # https://doc.arvados.org/admin/storage-classes.html\n      SAMPLE:\n\n        # Priority determines the order volumes should be searched\n        # when reading data, in cases where a keepstore server has\n        # access to multiple volumes with different storage classes.\n        Priority: 0\n\n        # Default determines which storage class(es) should be used\n        # when a user/client writes data or saves a new collection\n        # without specifying storage classes.\n        #\n        # If any StorageClasses are configured, at least one of them\n        # must have Default: true.\n        Default: true\n\n    Volumes:\n      SAMPLE:\n        # AccessViaHosts specifies which keepstore processes can read\n        # and write data on the volume.\n        #\n        # For a local filesystem, AccessViaHosts has one entry,\n        # indicating which server the filesystem is located on.\n        #\n        # For a network-attached backend accessible by all keepstore\n        # servers, like a cloud storage bucket or an NFS mount,\n        # AccessViaHosts can be empty/omitted.\n        #\n        # Further info/examples:\n        # https://doc.arvados.org/install/configure-fs-storage.html\n        # https://doc.arvados.org/install/configure-s3-object-storage.html\n        # https://doc.arvados.org/install/configure-azure-blob-storage.html\n        AccessViaHosts:\n          SAMPLE:\n            ReadOnly: false\n          \"http://host1.example:25107\": {}\n        ReadOnly: false\n        # AllowTrashWhenReadOnly enables unused and overreplicated\n        # blocks to be trashed/deleted even when ReadOnly is\n        # true. Normally, this is false and ReadOnly prevents all\n        # trash/delete operations as well as writes.\n        AllowTrashWhenReadOnly: false\n        Replication: 1\n        StorageClasses:\n          # If you have configured storage classes (see StorageClasses\n          # section above), add an entry here for each storage class\n          # satisfied by this volume.\n          SAMPLE: true\n        Driver: S3\n        DriverParameters:\n          # for s3 driver -- see\n          # https://doc.arvados.org/install/configure-s3-object-storage.html\n          AccessKeyID: aaaaa\n          SecretAccessKey: aaaaa\n          Endpoint: \"\"\n          Region: us-east-1\n          Bucket: aaaaa\n          LocationConstraint: false\n          V2Signature: false\n          UsePathStyle: false\n          IndexPageSize: 1000\n          ConnectTimeout: 1m\n          ReadTimeout: 10m\n          RaceWindow: 24h\n          PrefixLength: 0\n\n          # For S3 driver, potentially unsafe tuning parameter,\n          # intentionally excluded from main documentation.\n          #\n          # Enable deletion (garbage collection) even when the\n          # configured BlobTrashLifetime is zero.  WARNING: eventual\n          # consistency may result in race conditions that can cause\n          # data loss.  Do not enable this unless you understand and\n          # accept the risk.\n          UnsafeDelete: false\n\n          # for azure driver -- see\n          # https://doc.arvados.org/install/configure-azure-blob-storage.html\n          StorageAccountName: aaaaa\n          StorageAccountKey: aaaaa\n          StorageBaseURL: core.windows.net\n          ContainerName: aaaaa\n          RequestTimeout: 30s\n          ListBlobsRetryDelay: 10s\n          ListBlobsMaxAttempts: 10\n          MaxGetBytes: 0\n          WriteRaceInterval: 15s\n          WriteRacePollTime: 1s\n\n          # for local directory driver -- see\n          # https://doc.arvados.org/install/configure-fs-storage.html\n          Root: /var/lib/arvados/keep-data\n\n          # For local directory driver, potentially confusing tuning\n          # parameter, intentionally excluded from main documentation.\n          #\n          # When true, read and write operations (for whole 64MiB\n          # blocks) on an individual volume will queued and issued\n          # serially.  When false, read and write operations will be\n          # issued concurrently.\n          #\n          # May possibly improve throughput if you have physical spinning disks\n          # and experience contention when there are multiple requests\n          # to the same volume.\n          #\n          # Otherwise, when using SSDs, RAID, or a shared network filesystem, you\n          # should leave this alone.\n          Serialize: false\n\n    RemoteClusters:\n      \"*\":\n        Host: \"\"\n        Proxy: false\n        Scheme: https\n        Insecure: false\n        ActivateUsers: false\n      SAMPLE:\n        # API endpoint host or host:port; default is {id}.arvadosapi.com\n        Host: sample.arvadosapi.com\n\n        # Perform a proxy request when a local client requests an\n        # object belonging to this remote.\n        Proxy: false\n\n        # Default \"https\". Can be set to \"http\" for testing.\n        Scheme: https\n\n        # Disable TLS verify. Can be set to true for testing.\n        Insecure: false\n\n        # When users present tokens issued by this remote cluster, and\n        # their accounts are active on the remote cluster, activate\n        # them on this cluster too.\n        ActivateUsers: false\n\n    Workbench:\n      # Workbench1 configs\n      Theme: default\n      ActivationContactLink: mailto:info@arvados.org\n      ArvadosDocsite: https://doc.arvados.org\n      ArvadosPublicDataDocURL: https://playground.arvados.org/projects/public\n      ShowUserAgreementInline: false\n\n      # Set this configuration to true to avoid providing an easy way for users\n      # to share data with unauthenticated users; this may be necessary on\n      # installations where strict data access controls are needed.\n      DisableSharingURLsUI: false\n\n      # Below is a sample setting of user_profile_form_fields config parameter.\n      # This configuration parameter should be set to either false (to disable) or\n      # to a map as shown below.\n      # Configure the map of input fields to be displayed in the profile page\n      # using the attribute \"key\" for each of the input fields.\n      # This sample shows configuration with one required and one optional form fields.\n      # For each of these input fields:\n      #   You can specify \"Type\" as \"text\" or \"select\".\n      #   List the \"Options\" to be displayed for each of the \"select\" menu.\n      #   Set \"Required\" as \"true\" for any of these fields to make them required.\n      # If any of the required fields are missing in the user's profile, the user will be\n      # redirected to the profile page before they can access any Workbench features.\n      UserProfileFormFields:\n        SAMPLE:\n          Type: select\n          FormFieldTitle: Best color\n          FormFieldDescription: your favorite color\n          Required: false\n          Position: 1\n          Options:\n            red: {}\n            blue: {}\n            green: {}\n            SAMPLE: {}\n\n        # exampleTextValue:  # key that will be set in properties\n        #   Type: text  #\n        #   FormFieldTitle: \"\"\n        #   FormFieldDescription: \"\"\n        #   Required: true\n        #   Position: 1\n        # exampleOptionsValue:\n        #   Type: select\n        #   FormFieldTitle: \"\"\n        #   FormFieldDescription: \"\"\n        #   Required: true\n        #   Position: 1\n        #   Options:\n        #     red: {}\n        #     blue: {}\n        #     yellow: {}\n\n      # Use \"UserProfileFormMessage to configure the message you want\n      # to display on the profile page.\n      UserProfileFormMessage: 'Welcome to Arvados. All <span style=\"color:red\">required fields</span> must be completed before you can proceed.'\n\n      SiteName: Arvados Workbench\n\n      # Workbench2 configs\n      FileViewersConfigURL: \"\"\n\n      # Idle time after which the user's session will be auto closed.\n      # This feature is disabled when set to zero.\n      IdleTimeout: 0s\n\n      # UUID of a collection.  This collection should be shared with\n      # all users.  Workbench will look for a file \"banner.html\" in\n      # this collection and display its contents (should be\n      # HTML-formatted text) when users first log in to Workbench.\n      BannerUUID: \"\"\n\n      # Workbench welcome screen, this is HTML text that will be\n      # incorporated directly onto the page.\n      WelcomePageHTML: |\n        <img src=\"/arvados-logo-big.png\" style=\"width: 20%; float: right; padding: 1em;\" />\n        <h2>Please log in.</h2>\n\n        <p>If you have never used Arvados Workbench before, logging in\n        for the first time will automatically create a new\n        account.</p>\n\n        <i>Arvados Workbench uses your information only for\n        identification, and does not retrieve any other personal\n        information.</i>\n\n      # Workbench screen displayed to inactive users.  This is HTML\n      # text that will be incorporated directly onto the page.\n      InactivePageHTML: |\n        <img src=\"/arvados-logo-big.png\" style=\"width: 20%; float: right; padding: 1em;\" />\n        <h3>Hi! You're logged in, but...</h3>\n        <p>Your account is inactive.</p>\n        <p>An administrator must activate your account before you can get\n        any further.</p>\n\n      # Connecting to Arvados shell VMs tends to be site-specific.\n      # Put any special instructions here. This is HTML text that will\n      # be incorporated directly onto the Workbench page.\n      SSHHelpPageHTML: |\n        <a href=\"https://doc.arvados.org/user/getting_started/ssh-access-unix.html\">Accessing an Arvados VM with SSH</a> (generic instructions).\n        Site configurations vary.  Contact your local cluster administrator if you have difficulty accessing an Arvados shell node.\n\n      # Sample text if you are using a \"switchyard\" ssh proxy.\n      # Replace \"zzzzz\" with your Cluster ID.\n      #SSHHelpPageHTML: |\n      # <p>Add a section like this to your SSH configuration file ( <i>~/.ssh/config</i>):</p>\n      # <pre>Host *.zzzzz\n      #  TCPKeepAlive yes\n      #  ServerAliveInterval 60\n      #  ProxyCommand ssh -p2222 turnout@switchyard.zzzzz.arvadosapi.com -x -a $SSH_PROXY_FLAGS %h\n      # </pre>\n\n      # If you are using a switchyard ssh proxy, shell node hostnames\n      # may require a special hostname suffix.  In the sample ssh\n      # configuration above, this would be \".zzzzz\"\n      # This is added to the hostname in the \"command line\" column\n      # the Workbench \"shell VMs\" page.\n      #\n      # If your shell nodes are directly accessible by users without a\n      # proxy and have fully qualified host names, you should leave\n      # this blank.\n      SSHHelpHostSuffix: \"\"\n\n# (Experimental) Restart services automatically when config file\n# changes are detected. Only supported by `arvados-server boot` in\n# dev/test mode.\nAutoReloadConfig: false\n"
  },
  {
    "path": "lib/config/default.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t. \"gopkg.in/check.v1\"\n)\n\nfunc DefaultCluster(c *C, clusterID string) (arvados.Cluster, error) {\n\tlogger := ctxlog.New(os.Stderr, \"text\", \"info\")\n\tconfdata := []byte(`Clusters: {zzzzz: {}}`)\n\tloader := NewLoader(bytes.NewBuffer(confdata), logger)\n\tloader.SkipLegacy = true\n\tloader.Path = \"-\"\n\tcfg, err := loader.Load()\n\tif err != nil {\n\t\treturn arvados.Cluster{}, err\n\t}\n\treturn cfg.Clusters[\"zzzzz\"], nil\n}\n"
  },
  {
    "path": "lib/config/deprecated.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/ghodss/yaml\"\n)\n\ntype deprRequestLimits struct {\n\tMaxItemsPerResponse            *int\n\tMultiClusterRequestConcurrency *int\n}\n\ntype deprCluster struct {\n\tRequestLimits deprRequestLimits\n\tNodeProfiles  map[string]nodeProfile\n\tLogin         struct {\n\t\tGoogleClientID                *string\n\t\tGoogleClientSecret            *string\n\t\tGoogleAlternateEmailAddresses *bool\n\t\tProviderAppID                 *string\n\t\tProviderAppSecret             *string\n\t}\n\tMail struct {\n\t\tSendUserSetupNotificationEmail *bool\n\t\tSupportEmailAddress            *string\n\t}\n\tContainers struct {\n\t\tLSF struct {\n\t\t\tBsubCUDAArguments *[]string\n\t\t}\n\t}\n}\n\ntype deprecatedConfig struct {\n\tClusters map[string]deprCluster\n}\n\ntype nodeProfile struct {\n\tController    systemServiceInstance `json:\"arvados-controller\"`\n\tHealth        systemServiceInstance `json:\"arvados-health\"`\n\tKeepbalance   systemServiceInstance `json:\"keep-balance\"`\n\tKeepproxy     systemServiceInstance `json:\"keepproxy\"`\n\tKeepstore     systemServiceInstance `json:\"keepstore\"`\n\tKeepweb       systemServiceInstance `json:\"keep-web\"`\n\tDispatchCloud systemServiceInstance `json:\"arvados-dispatch-cloud\"`\n\tRailsAPI      systemServiceInstance `json:\"arvados-api-server\"`\n\tWebsocket     systemServiceInstance `json:\"arvados-ws\"`\n\tWorkbench1    systemServiceInstance `json:\"arvados-workbench\"`\n}\n\ntype systemServiceInstance struct {\n\tListen   string\n\tTLS      bool\n\tInsecure bool\n}\n\nfunc (ldr *Loader) applyDeprecatedConfig(cfg *arvados.Config) error {\n\tvar dc deprecatedConfig\n\terr := yaml.Unmarshal(ldr.configdata, &dc)\n\tif err != nil {\n\t\treturn err\n\t}\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor id, dcluster := range dc.Clusters {\n\t\tcluster, ok := cfg.Clusters[id]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"can't load legacy config %q that is not present in current config\", id)\n\t\t}\n\t\tfor name, np := range dcluster.NodeProfiles {\n\t\t\tif name == \"*\" || name == os.Getenv(\"ARVADOS_NODE_PROFILE\") || name == hostname {\n\t\t\t\tname = \"localhost\"\n\t\t\t} else if ldr.Logger != nil {\n\t\t\t\tldr.Logger.Warnf(\"overriding Clusters.%s.Services using Clusters.%s.NodeProfiles.%s (guessing %q is a hostname)\", id, id, name, name)\n\t\t\t}\n\t\t\tapplyDeprecatedNodeProfile(name, np.RailsAPI, &cluster.Services.RailsAPI)\n\t\t\tapplyDeprecatedNodeProfile(name, np.Controller, &cluster.Services.Controller)\n\t\t\tapplyDeprecatedNodeProfile(name, np.DispatchCloud, &cluster.Services.DispatchCloud)\n\t\t}\n\t\tif dst, n := &cluster.API.MaxItemsPerResponse, dcluster.RequestLimits.MaxItemsPerResponse; n != nil && *n != *dst {\n\t\t\t*dst = *n\n\t\t}\n\t\tif dst, n := &cluster.API.MaxRequestAmplification, dcluster.RequestLimits.MultiClusterRequestConcurrency; n != nil && *n != *dst {\n\t\t\t*dst = *n\n\t\t}\n\t\tif dst, addr := &cluster.Users.SupportEmailAddress, dcluster.Mail.SupportEmailAddress; addr != nil {\n\t\t\t*dst = *addr\n\t\t\tldr.Logger.Warnf(\"using your old config key Mail.SupportEmailAddress -- but you should rename it to Users.SupportEmailAddress\")\n\t\t}\n\t\tif dst, b := &cluster.Users.SendUserSetupNotificationEmail, dcluster.Mail.SendUserSetupNotificationEmail; b != nil {\n\t\t\t*dst = *b\n\t\t\tldr.Logger.Warnf(\"using your old config key Mail.SendUserSetupNotificationEmail -- but you should rename it to Users.SendUserSetupNotificationEmail\")\n\t\t}\n\t\tif dst, n := &cluster.Containers.LSF.BsubGPUArguments, dcluster.Containers.LSF.BsubCUDAArguments; n != nil {\n\t\t\t*dst = *n\n\t\t\tldr.Logger.Warnf(\"using your old config key Containers.LSF.BsubCUDAArguments -- but you should rename it to Containers.LSF.BsubGPUArguments\")\n\t\t}\n\n\t\t// Google* moved to Google.*\n\t\tif dst, n := &cluster.Login.Google.ClientID, dcluster.Login.GoogleClientID; n != nil && *n != *dst {\n\t\t\t*dst = *n\n\t\t\tif *n != \"\" {\n\t\t\t\t// In old config, non-empty ClientID meant enable\n\t\t\t\tcluster.Login.Google.Enable = true\n\t\t\t}\n\t\t}\n\t\tif dst, n := &cluster.Login.Google.ClientSecret, dcluster.Login.GoogleClientSecret; n != nil && *n != *dst {\n\t\t\t*dst = *n\n\t\t}\n\t\tif dst, n := &cluster.Login.Google.AlternateEmailAddresses, dcluster.Login.GoogleAlternateEmailAddresses; n != nil && *n != *dst {\n\t\t\t*dst = *n\n\t\t}\n\t\tcfg.Clusters[id] = cluster\n\t}\n\treturn nil\n}\n\nfunc (ldr *Loader) applyDeprecatedVolumeDriverParameters(cfg *arvados.Config) error {\n\tfor clusterID, cluster := range cfg.Clusters {\n\t\tfor volID, vol := range cluster.Volumes {\n\t\t\tif vol.Driver == \"S3\" {\n\t\t\t\tvar params struct {\n\t\t\t\t\tAccessKey       string `json:\",omitempty\"`\n\t\t\t\t\tSecretKey       string `json:\",omitempty\"`\n\t\t\t\t\tAccessKeyID     string\n\t\t\t\t\tSecretAccessKey string\n\t\t\t\t}\n\t\t\t\terr := json.Unmarshal(vol.DriverParameters, &params)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"error loading %s.Volumes.%s.DriverParameters: %w\", clusterID, volID, err)\n\t\t\t\t}\n\t\t\t\tif params.AccessKey != \"\" || params.SecretKey != \"\" {\n\t\t\t\t\tif params.AccessKeyID != \"\" || params.SecretAccessKey != \"\" {\n\t\t\t\t\t\treturn fmt.Errorf(\"cannot use old keys (AccessKey/SecretKey) and new keys (AccessKeyID/SecretAccessKey) at the same time in %s.Volumes.%s.DriverParameters -- you must remove the old config keys\", clusterID, volID)\n\t\t\t\t\t}\n\t\t\t\t\tvar allparams map[string]interface{}\n\t\t\t\t\terr = json.Unmarshal(vol.DriverParameters, &allparams)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"error loading %s.Volumes.%s.DriverParameters: %w\", clusterID, volID, err)\n\t\t\t\t\t}\n\t\t\t\t\tfor k := range allparams {\n\t\t\t\t\t\tif lk := strings.ToLower(k); lk == \"accesskey\" || lk == \"secretkey\" {\n\t\t\t\t\t\t\tdelete(allparams, k)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tldr.Logger.Warnf(\"using your old config keys %s.Volumes.%s.DriverParameters.AccessKey/SecretKey -- but you should rename them to AccessKeyID/SecretAccessKey\", clusterID, volID)\n\t\t\t\t\tallparams[\"AccessKeyID\"] = params.AccessKey\n\t\t\t\t\tallparams[\"SecretAccessKey\"] = params.SecretKey\n\t\t\t\t\tvol.DriverParameters, err = json.Marshal(allparams)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tcluster.Volumes[volID] = vol\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc applyDeprecatedNodeProfile(hostname string, ssi systemServiceInstance, svc *arvados.Service) {\n\tscheme := \"https\"\n\tif !ssi.TLS {\n\t\tscheme = \"http\"\n\t}\n\tif svc.InternalURLs == nil {\n\t\tsvc.InternalURLs = map[arvados.URL]arvados.ServiceInstance{}\n\t}\n\thost := ssi.Listen\n\tif host == \"\" {\n\t\treturn\n\t}\n\tif strings.HasPrefix(host, \":\") {\n\t\thost = hostname + host\n\t}\n\tsvc.InternalURLs[arvados.URL{Scheme: scheme, Host: host, Path: \"/\"}] = arvados.ServiceInstance{}\n}\n\nfunc (ldr *Loader) loadOldConfigHelper(component, path string, target interface{}) error {\n\tif path == \"\" {\n\t\treturn nil\n\t}\n\tbuf, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tldr.Logger.Warnf(\"you should remove the legacy %v config file (%s) after migrating all config keys to the cluster configuration file (%s)\", component, path, ldr.Path)\n\n\terr = yaml.Unmarshal(buf, target)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: %s\", path, err)\n\t}\n\treturn nil\n}\n\ntype oldCrunchDispatchSlurmConfig struct {\n\tClient *arvados.Client\n\n\tSbatchArguments *[]string\n\tPollPeriod      *arvados.Duration\n\tPrioritySpread  *int64\n\n\t// crunch-run command to invoke. The container UUID will be\n\t// appended. If nil, []string{\"crunch-run\"} will be used.\n\t//\n\t// Example: []string{\"crunch-run\", \"--cgroup-parent-subsystem=memory\"}\n\tCrunchRunCommand *[]string\n\n\t// Extra RAM to reserve (in Bytes) for SLURM job, in addition\n\t// to the amount specified in the container's RuntimeConstraints\n\tReserveExtraRAM *int64\n\n\t// Minimum time between two attempts to run the same container\n\tMinRetryPeriod *arvados.Duration\n\n\t// Batch size for container queries\n\tBatchSize *int64\n}\n\nconst defaultCrunchDispatchSlurmConfigPath = \"/etc/arvados/crunch-dispatch-slurm/crunch-dispatch-slurm.yml\"\n\nfunc loadOldClientConfig(cluster *arvados.Cluster, client *arvados.Client) {\n\tif client == nil {\n\t\treturn\n\t}\n\tif client.APIHost != \"\" {\n\t\tcluster.Services.Controller.ExternalURL.Host = client.APIHost\n\t\tcluster.Services.Controller.ExternalURL.Path = \"/\"\n\t}\n\tif client.Scheme != \"\" {\n\t\tcluster.Services.Controller.ExternalURL.Scheme = client.Scheme\n\t} else {\n\t\tcluster.Services.Controller.ExternalURL.Scheme = \"https\"\n\t}\n\tif client.AuthToken != \"\" {\n\t\tcluster.SystemRootToken = client.AuthToken\n\t}\n\tcluster.TLS.Insecure = client.Insecure\n\tks := \"\"\n\tfor i, u := range client.KeepServiceURIs {\n\t\tif i > 0 {\n\t\t\tks += \" \"\n\t\t}\n\t\tks += u\n\t}\n\tcluster.Containers.SLURM.SbatchEnvironmentVariables = map[string]string{\"ARVADOS_KEEP_SERVICES\": ks}\n}\n\n// update config using values from an crunch-dispatch-slurm config file.\nfunc (ldr *Loader) loadOldCrunchDispatchSlurmConfig(cfg *arvados.Config) error {\n\tif ldr.CrunchDispatchSlurmPath == \"\" {\n\t\treturn nil\n\t}\n\tvar oc oldCrunchDispatchSlurmConfig\n\terr := ldr.loadOldConfigHelper(\"crunch-dispatch-slurm\", ldr.CrunchDispatchSlurmPath, &oc)\n\tif os.IsNotExist(err) && (ldr.CrunchDispatchSlurmPath == defaultCrunchDispatchSlurmConfigPath) {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tloadOldClientConfig(cluster, oc.Client)\n\n\tif oc.SbatchArguments != nil {\n\t\tcluster.Containers.SLURM.SbatchArgumentsList = *oc.SbatchArguments\n\t}\n\tif oc.PollPeriod != nil {\n\t\tcluster.Containers.CloudVMs.PollInterval = *oc.PollPeriod\n\t}\n\tif oc.PrioritySpread != nil {\n\t\tcluster.Containers.SLURM.PrioritySpread = *oc.PrioritySpread\n\t}\n\tif oc.CrunchRunCommand != nil {\n\t\tif len(*oc.CrunchRunCommand) >= 1 {\n\t\t\tcluster.Containers.CrunchRunCommand = (*oc.CrunchRunCommand)[0]\n\t\t}\n\t\tif len(*oc.CrunchRunCommand) >= 2 {\n\t\t\tcluster.Containers.CrunchRunArgumentsList = (*oc.CrunchRunCommand)[1:]\n\t\t}\n\t}\n\tif oc.ReserveExtraRAM != nil {\n\t\tcluster.Containers.ReserveExtraRAM = arvados.ByteSize(*oc.ReserveExtraRAM)\n\t}\n\tif oc.MinRetryPeriod != nil {\n\t\tcluster.Containers.MinRetryPeriod = *oc.MinRetryPeriod\n\t}\n\tif oc.BatchSize != nil {\n\t\tcluster.API.MaxItemsPerResponse = int(*oc.BatchSize)\n\t}\n\n\tcfg.Clusters[cluster.ClusterID] = *cluster\n\treturn nil\n}\n\ntype oldWsConfig struct {\n\tClient       *arvados.Client\n\tPostgres     *arvados.PostgreSQLConnection\n\tPostgresPool *int\n\tListen       *string\n\tLogLevel     *string\n\tLogFormat    *string\n\n\tPingTimeout      *arvados.Duration\n\tClientEventQueue *int\n\tServerEventQueue *int\n\n\tManagementToken *string\n}\n\nconst defaultWebsocketConfigPath = \"/etc/arvados/ws/ws.yml\"\n\n// update config using values from an crunch-dispatch-slurm config file.\nfunc (ldr *Loader) loadOldWebsocketConfig(cfg *arvados.Config) error {\n\tif ldr.WebsocketPath == \"\" {\n\t\treturn nil\n\t}\n\tvar oc oldWsConfig\n\terr := ldr.loadOldConfigHelper(\"arvados-ws\", ldr.WebsocketPath, &oc)\n\tif os.IsNotExist(err) && ldr.WebsocketPath == defaultWebsocketConfigPath {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tloadOldClientConfig(cluster, oc.Client)\n\n\tif oc.Postgres != nil {\n\t\tcluster.PostgreSQL.Connection = *oc.Postgres\n\t}\n\tif oc.PostgresPool != nil {\n\t\tcluster.PostgreSQL.ConnectionPool = *oc.PostgresPool\n\t}\n\tif oc.Listen != nil {\n\t\tcluster.Services.Websocket.InternalURLs[arvados.URL{Host: *oc.Listen, Path: \"/\"}] = arvados.ServiceInstance{}\n\t}\n\tif oc.LogLevel != nil {\n\t\tcluster.SystemLogs.LogLevel = *oc.LogLevel\n\t}\n\tif oc.LogFormat != nil {\n\t\tcluster.SystemLogs.Format = *oc.LogFormat\n\t}\n\tif oc.PingTimeout != nil {\n\t\tcluster.API.SendTimeout = *oc.PingTimeout\n\t}\n\tif oc.ClientEventQueue != nil {\n\t\tcluster.API.WebsocketClientEventQueue = *oc.ClientEventQueue\n\t}\n\tif oc.ServerEventQueue != nil {\n\t\tcluster.API.WebsocketServerEventQueue = *oc.ServerEventQueue\n\t}\n\tif oc.ManagementToken != nil {\n\t\tcluster.ManagementToken = *oc.ManagementToken\n\t}\n\n\tcfg.Clusters[cluster.ClusterID] = *cluster\n\treturn nil\n}\n\ntype oldKeepProxyConfig struct {\n\tClient          *arvados.Client\n\tListen          *string\n\tDisableGet      *bool\n\tDisablePut      *bool\n\tDefaultReplicas *int\n\tTimeout         *arvados.Duration\n\tPIDFile         *string\n\tDebug           *bool\n\tManagementToken *string\n}\n\nconst defaultKeepproxyConfigPath = \"/etc/arvados/keepproxy/keepproxy.yml\"\n\nfunc (ldr *Loader) loadOldKeepproxyConfig(cfg *arvados.Config) error {\n\tif ldr.KeepproxyPath == \"\" {\n\t\treturn nil\n\t}\n\tvar oc oldKeepProxyConfig\n\terr := ldr.loadOldConfigHelper(\"keepproxy\", ldr.KeepproxyPath, &oc)\n\tif os.IsNotExist(err) && ldr.KeepproxyPath == defaultKeepproxyConfigPath {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tloadOldClientConfig(cluster, oc.Client)\n\n\tif oc.Listen != nil {\n\t\tcluster.Services.Keepproxy.InternalURLs[arvados.URL{Host: *oc.Listen, Path: \"/\"}] = arvados.ServiceInstance{}\n\t}\n\tif oc.DefaultReplicas != nil {\n\t\tcluster.Collections.DefaultReplication = *oc.DefaultReplicas\n\t}\n\tif oc.Timeout != nil {\n\t\tcluster.API.KeepServiceRequestTimeout = *oc.Timeout\n\t}\n\tif oc.Debug != nil {\n\t\tif *oc.Debug && cluster.SystemLogs.LogLevel != \"debug\" {\n\t\t\tcluster.SystemLogs.LogLevel = \"debug\"\n\t\t} else if !*oc.Debug && cluster.SystemLogs.LogLevel != \"info\" {\n\t\t\tcluster.SystemLogs.LogLevel = \"info\"\n\t\t}\n\t}\n\tif oc.ManagementToken != nil {\n\t\tcluster.ManagementToken = *oc.ManagementToken\n\t}\n\n\t// The following legacy options are no longer supported. If they are set to\n\t// true or PIDFile has a value, error out and notify the user\n\tunsupportedEntry := func(cfgEntry string) error {\n\t\treturn fmt.Errorf(\"the keepproxy %s configuration option is no longer supported, please remove it from your configuration file\", cfgEntry)\n\t}\n\tif oc.DisableGet != nil && *oc.DisableGet {\n\t\treturn unsupportedEntry(\"DisableGet\")\n\t}\n\tif oc.DisablePut != nil && *oc.DisablePut {\n\t\treturn unsupportedEntry(\"DisablePut\")\n\t}\n\tif oc.PIDFile != nil && *oc.PIDFile != \"\" {\n\t\treturn unsupportedEntry(\"PIDFile\")\n\t}\n\n\tcfg.Clusters[cluster.ClusterID] = *cluster\n\treturn nil\n}\n\nconst defaultKeepWebConfigPath = \"/etc/arvados/keep-web/keep-web.yml\"\n\ntype oldKeepWebConfig struct {\n\tClient *arvados.Client\n\n\tListen *string\n\n\tAnonymousTokens    *[]string\n\tAttachmentOnlyHost *string\n\tTrustAllContent    *bool\n\n\tCache struct {\n\t\tTTL                  *arvados.Duration\n\t\tUUIDTTL              *arvados.Duration\n\t\tMaxCollectionEntries *int\n\t\tMaxCollectionBytes   *int64\n\t\tMaxUUIDEntries       *int\n\t}\n\n\t// Hack to support old command line flag, which is a bool\n\t// meaning \"get actual token from environment\".\n\tdeprecatedAllowAnonymous *bool\n\n\t// Authorization token to be included in all health check requests.\n\tManagementToken *string\n}\n\nfunc (ldr *Loader) loadOldKeepWebConfig(cfg *arvados.Config) error {\n\tif ldr.KeepWebPath == \"\" {\n\t\treturn nil\n\t}\n\tvar oc oldKeepWebConfig\n\terr := ldr.loadOldConfigHelper(\"keep-web\", ldr.KeepWebPath, &oc)\n\tif os.IsNotExist(err) && ldr.KeepWebPath == defaultKeepWebConfigPath {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tloadOldClientConfig(cluster, oc.Client)\n\n\tif oc.Listen != nil {\n\t\tcluster.Services.WebDAV.InternalURLs[arvados.URL{Host: *oc.Listen, Path: \"/\"}] = arvados.ServiceInstance{}\n\t\tcluster.Services.WebDAVDownload.InternalURLs[arvados.URL{Host: *oc.Listen, Path: \"/\"}] = arvados.ServiceInstance{}\n\t}\n\tif oc.AttachmentOnlyHost != nil {\n\t\tcluster.Services.WebDAVDownload.ExternalURL = arvados.URL{Host: *oc.AttachmentOnlyHost, Path: \"/\"}\n\t}\n\tif oc.ManagementToken != nil {\n\t\tcluster.ManagementToken = *oc.ManagementToken\n\t}\n\tif oc.TrustAllContent != nil {\n\t\tcluster.Collections.TrustAllContent = *oc.TrustAllContent\n\t}\n\tif oc.Cache.TTL != nil {\n\t\tcluster.Collections.WebDAVCache.TTL = *oc.Cache.TTL\n\t}\n\tif oc.Cache.MaxCollectionBytes != nil {\n\t\tcluster.Collections.WebDAVCache.MaxCollectionBytes = arvados.ByteSize(*oc.Cache.MaxCollectionBytes)\n\t}\n\tif oc.AnonymousTokens != nil {\n\t\tif len(*oc.AnonymousTokens) > 0 {\n\t\t\tcluster.Users.AnonymousUserToken = (*oc.AnonymousTokens)[0]\n\t\t\tif len(*oc.AnonymousTokens) > 1 {\n\t\t\t\tldr.Logger.Warn(\"More than 1 anonymous tokens configured, using only the first and discarding the rest.\")\n\t\t\t}\n\t\t}\n\t}\n\n\tcfg.Clusters[cluster.ClusterID] = *cluster\n\treturn nil\n}\n\nconst defaultKeepBalanceConfigPath = \"/etc/arvados/keep-balance/keep-balance.yml\"\n\ntype oldKeepBalanceConfig struct {\n\tClient              *arvados.Client\n\tListen              *string\n\tKeepServiceTypes    *[]string\n\tKeepServiceList     *arvados.KeepServiceList\n\tRunPeriod           *arvados.Duration\n\tCollectionBatchSize *int\n\tCollectionBuffers   *int\n\tRequestTimeout      *arvados.Duration\n\tLostBlocksFile      *string\n\tManagementToken     *string\n}\n\nfunc (ldr *Loader) loadOldKeepBalanceConfig(cfg *arvados.Config) error {\n\tif ldr.KeepBalancePath == \"\" {\n\t\treturn nil\n\t}\n\tvar oc oldKeepBalanceConfig\n\terr := ldr.loadOldConfigHelper(\"keep-balance\", ldr.KeepBalancePath, &oc)\n\tif os.IsNotExist(err) && ldr.KeepBalancePath == defaultKeepBalanceConfigPath {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tloadOldClientConfig(cluster, oc.Client)\n\n\tif oc.Listen != nil {\n\t\tcluster.Services.Keepbalance.InternalURLs[arvados.URL{Host: *oc.Listen}] = arvados.ServiceInstance{}\n\t}\n\tif oc.ManagementToken != nil {\n\t\tcluster.ManagementToken = *oc.ManagementToken\n\t}\n\tif oc.RunPeriod != nil {\n\t\tcluster.Collections.BalancePeriod = *oc.RunPeriod\n\t}\n\tif oc.LostBlocksFile != nil {\n\t\tcluster.Collections.BlobMissingReport = *oc.LostBlocksFile\n\t}\n\tif oc.CollectionBatchSize != nil {\n\t\tcluster.Collections.BalanceCollectionBatch = *oc.CollectionBatchSize\n\t}\n\tif oc.CollectionBuffers != nil {\n\t\tcluster.Collections.BalanceCollectionBuffers = *oc.CollectionBuffers\n\t}\n\tif oc.RequestTimeout != nil {\n\t\tcluster.API.KeepServiceRequestTimeout = *oc.RequestTimeout\n\t}\n\n\tmsg := \"The %s configuration option is no longer supported. Please remove it from your configuration file. See the keep-balance upgrade notes at https://doc.arvados.org/admin/upgrading.html for more details.\"\n\n\t// If the keep service type provided is \"disk\" silently ignore it, since\n\t// this is what ends up being done anyway.\n\tif oc.KeepServiceTypes != nil {\n\t\tnumTypes := len(*oc.KeepServiceTypes)\n\t\tif numTypes != 0 && !(numTypes == 1 && (*oc.KeepServiceTypes)[0] == \"disk\") {\n\t\t\treturn fmt.Errorf(msg, \"KeepServiceTypes\")\n\t\t}\n\t}\n\n\tif oc.KeepServiceList != nil {\n\t\treturn fmt.Errorf(msg, \"KeepServiceList\")\n\t}\n\n\tcfg.Clusters[cluster.ClusterID] = *cluster\n\treturn nil\n}\n\nfunc (ldr *Loader) loadOldEnvironmentVariables(cfg *arvados.Config) error {\n\tif os.Getenv(\"ARVADOS_API_TOKEN\") == \"\" && os.Getenv(\"ARVADOS_API_HOST\") == \"\" {\n\t\treturn nil\n\t}\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif tok := os.Getenv(\"ARVADOS_API_TOKEN\"); tok != \"\" && cluster.SystemRootToken == \"\" {\n\t\tldr.Logger.Warn(\"SystemRootToken missing from cluster config, falling back to ARVADOS_API_TOKEN environment variable\")\n\t\tcluster.SystemRootToken = tok\n\t}\n\tif apihost := os.Getenv(\"ARVADOS_API_HOST\"); apihost != \"\" && cluster.Services.Controller.ExternalURL.Host == \"\" {\n\t\tldr.Logger.Warn(\"Services.Controller.ExternalURL missing from cluster config, falling back to ARVADOS_API_HOST(_INSECURE) environment variables\")\n\t\tu, err := url.Parse(\"https://\" + apihost)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot parse ARVADOS_API_HOST: %s\", err)\n\t\t}\n\t\tcluster.Services.Controller.ExternalURL = arvados.URL(*u)\n\t\tif i := os.Getenv(\"ARVADOS_API_HOST_INSECURE\"); i != \"\" && i != \"0\" {\n\t\t\tcluster.TLS.Insecure = true\n\t\t}\n\t}\n\tcfg.Clusters[cluster.ClusterID] = *cluster\n\treturn nil\n}\n"
  },
  {
    "path": "lib/config/deprecated_keepstore.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math/big\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst defaultKeepstoreConfigPath = \"/etc/arvados/keepstore/keepstore.yml\"\n\ntype oldKeepstoreConfig struct {\n\tDebug  *bool\n\tListen *string\n\n\tLogFormat *string\n\n\tPIDFile *string\n\n\tMaxBuffers  *int\n\tMaxRequests *int\n\n\tBlobSignatureTTL    *arvados.Duration\n\tBlobSigningKeyFile  *string\n\tRequireSignatures   *bool\n\tSystemAuthTokenFile *string\n\tEnableDelete        *bool\n\tTrashLifetime       *arvados.Duration\n\tTrashCheckInterval  *arvados.Duration\n\tPullWorkers         *int\n\tTrashWorkers        *int\n\tEmptyTrashWorkers   *int\n\tTLSCertificateFile  *string\n\tTLSKeyFile          *string\n\n\tVolumes *oldKeepstoreVolumeList\n\n\tManagementToken *string\n\n\tDiscoverVolumesFromMountsFile string // not a real legacy config -- just useful for tests\n}\n\ntype oldKeepstoreVolumeList []oldKeepstoreVolume\n\ntype oldKeepstoreVolume struct {\n\tarvados.Volume\n\tType string `json:\",omitempty\"`\n\n\t// Azure driver configs\n\tStorageAccountName    string           `json:\",omitempty\"`\n\tStorageAccountKeyFile string           `json:\",omitempty\"`\n\tStorageBaseURL        string           `json:\",omitempty\"`\n\tContainerName         string           `json:\",omitempty\"`\n\tAzureReplication      int              `json:\",omitempty\"`\n\tRequestTimeout        arvados.Duration `json:\",omitempty\"`\n\tListBlobsRetryDelay   arvados.Duration `json:\",omitempty\"`\n\tListBlobsMaxAttempts  int              `json:\",omitempty\"`\n\n\t// S3 driver configs\n\tAccessKeyFile      string           `json:\",omitempty\"`\n\tSecretKeyFile      string           `json:\",omitempty\"`\n\tEndpoint           string           `json:\",omitempty\"`\n\tRegion             string           `json:\",omitempty\"`\n\tBucket             string           `json:\",omitempty\"`\n\tLocationConstraint bool             `json:\",omitempty\"`\n\tIndexPageSize      int              `json:\",omitempty\"`\n\tS3Replication      int              `json:\",omitempty\"`\n\tConnectTimeout     arvados.Duration `json:\",omitempty\"`\n\tReadTimeout        arvados.Duration `json:\",omitempty\"`\n\tRaceWindow         arvados.Duration `json:\",omitempty\"`\n\tUnsafeDelete       bool             `json:\",omitempty\"`\n\n\t// Directory driver configs\n\tRoot                 string\n\tDirectoryReplication int\n\tSerialize            bool\n\n\t// Common configs\n\tReadOnly       bool     `json:\",omitempty\"`\n\tStorageClasses []string `json:\",omitempty\"`\n}\n\n// update config using values from an old-style keepstore config file.\nfunc (ldr *Loader) loadOldKeepstoreConfig(cfg *arvados.Config) error {\n\tif ldr.KeepstorePath == \"\" {\n\t\treturn nil\n\t}\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting hostname: %s\", err)\n\t}\n\n\tvar oc oldKeepstoreConfig\n\terr = ldr.loadOldConfigHelper(\"keepstore\", ldr.KeepstorePath, &oc)\n\tif os.IsNotExist(err) && ldr.KeepstorePath == defaultKeepstoreConfigPath {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmyURL := arvados.URL{Scheme: \"http\", Path: \"/\"}\n\tif oc.TLSCertificateFile != nil && oc.TLSKeyFile != nil {\n\t\tmyURL.Scheme = \"https\"\n\t}\n\n\tif v := oc.Debug; v == nil {\n\t} else if *v && cluster.SystemLogs.LogLevel != \"debug\" {\n\t\tcluster.SystemLogs.LogLevel = \"debug\"\n\t} else if !*v && cluster.SystemLogs.LogLevel != \"info\" {\n\t\tcluster.SystemLogs.LogLevel = \"info\"\n\t}\n\n\tif v := oc.TLSCertificateFile; v != nil {\n\t\tcluster.TLS.Certificate = \"file://\" + *v\n\t}\n\tif v := oc.TLSKeyFile; v != nil {\n\t\tcluster.TLS.Key = \"file://\" + *v\n\t}\n\tif v := oc.Listen; v != nil {\n\t\tif _, ok := cluster.Services.Keepstore.InternalURLs[arvados.URL{Scheme: myURL.Scheme, Host: *v, Path: \"/\"}]; ok {\n\t\t\t// already listed\n\t\t\tmyURL.Host = *v\n\t\t} else if len(*v) > 1 && (*v)[0] == ':' {\n\t\t\tmyURL.Host = net.JoinHostPort(hostname, (*v)[1:])\n\t\t\tcluster.Services.Keepstore.InternalURLs[myURL] = arvados.ServiceInstance{}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"unable to migrate Listen value %q -- you must update Services.Keepstore.InternalURLs manually, and comment out the Listen entry in your legacy keepstore config file\", *v)\n\t\t}\n\t} else {\n\t\tfor url := range cluster.Services.Keepstore.InternalURLs {\n\t\t\tif host, _, _ := net.SplitHostPort(url.Host); host == hostname {\n\t\t\t\tmyURL = url\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif myURL.Host == \"\" {\n\t\t\treturn fmt.Errorf(\"unable to migrate legacy keepstore config: no 'Listen' key, and hostname %q does not match an entry in Services.Keepstore.InternalURLs\", hostname)\n\t\t}\n\t}\n\n\tif v := oc.LogFormat; v != nil {\n\t\tcluster.SystemLogs.Format = *v\n\t}\n\tif v := oc.MaxBuffers; v != nil {\n\t\tcluster.API.MaxKeepBlobBuffers = *v\n\t}\n\tif v := oc.MaxRequests; v != nil {\n\t\tcluster.API.MaxConcurrentRequests = *v\n\t}\n\tif v := oc.BlobSignatureTTL; v != nil {\n\t\tcluster.Collections.BlobSigningTTL = *v\n\t}\n\tif v := oc.BlobSigningKeyFile; v != nil {\n\t\tbuf, err := ioutil.ReadFile(*v)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading BlobSigningKeyFile: %s\", err)\n\t\t}\n\t\tif key := strings.TrimSpace(string(buf)); key != cluster.Collections.BlobSigningKey {\n\t\t\tcluster.Collections.BlobSigningKey = key\n\t\t}\n\t}\n\tif v := oc.RequireSignatures; v != nil {\n\t\tcluster.Collections.BlobSigning = *v\n\t}\n\tif v := oc.SystemAuthTokenFile; v != nil {\n\t\tf, err := os.Open(*v)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error opening SystemAuthTokenFile: %s\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tbuf, err := ioutil.ReadAll(f)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading SystemAuthTokenFile: %s\", err)\n\t\t}\n\t\tif key := strings.TrimSpace(string(buf)); key != cluster.SystemRootToken {\n\t\t\tcluster.SystemRootToken = key\n\t\t}\n\t}\n\tif v := oc.EnableDelete; v != nil {\n\t\tcluster.Collections.BlobTrash = *v\n\t}\n\tif v := oc.TrashLifetime; v != nil {\n\t\tcluster.Collections.BlobTrashLifetime = *v\n\t}\n\tif v := oc.TrashCheckInterval; v != nil {\n\t\tcluster.Collections.BlobTrashCheckInterval = *v\n\t}\n\tif v := oc.TrashWorkers; v != nil {\n\t\tcluster.Collections.BlobTrashConcurrency = *v\n\t}\n\tif v := oc.EmptyTrashWorkers; v != nil {\n\t\tcluster.Collections.BlobDeleteConcurrency = *v\n\t}\n\tif v := oc.PullWorkers; v != nil {\n\t\tcluster.Collections.BlobReplicateConcurrency = *v\n\t}\n\tif oc.Volumes == nil || len(*oc.Volumes) == 0 {\n\t\tldr.Logger.Warn(\"no volumes in legacy config; discovering local directory volumes\")\n\t\terr := ldr.discoverLocalVolumes(cluster, oc.DiscoverVolumesFromMountsFile, myURL)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error discovering local directory volumes: %s\", err)\n\t\t}\n\t} else {\n\t\terr := ldr.migrateOldKeepstoreVolumes(cluster, oc, myURL)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := ldr.checkPendingKeepstoreMigrations(cluster); err != nil {\n\t\treturn err\n\t}\n\n\tcfg.Clusters[cluster.ClusterID] = *cluster\n\treturn nil\n}\n\n// Merge Volumes section of old keepstore config into cluster config.\nfunc (ldr *Loader) migrateOldKeepstoreVolumes(cluster *arvados.Cluster, oc oldKeepstoreConfig, myURL arvados.URL) error {\n\tfor i, oldvol := range *oc.Volumes {\n\t\tvar accessViaHosts map[arvados.URL]arvados.VolumeAccess\n\t\toldUUID, found := ldr.alreadyMigrated(oldvol, cluster.Volumes, myURL)\n\t\tif found {\n\t\t\taccessViaHosts = cluster.Volumes[oldUUID].AccessViaHosts\n\t\t\twriters := false\n\t\t\tfor _, va := range accessViaHosts {\n\t\t\t\tif !va.ReadOnly {\n\t\t\t\t\twriters = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif writers || len(accessViaHosts) == 0 {\n\t\t\t\tldr.Logger.Infof(\"ignoring volume #%d's parameters in legacy keepstore config: using matching entry in cluster config instead\", i)\n\t\t\t\tif len(accessViaHosts) > 0 {\n\t\t\t\t\tcluster.Volumes[oldUUID].AccessViaHosts[myURL] = arvados.VolumeAccess{ReadOnly: oldvol.ReadOnly}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tvar newvol arvados.Volume\n\t\tif found {\n\t\t\tldr.Logger.Infof(\"ignoring volume #%d's parameters in legacy keepstore config: using matching entry in cluster config instead\", i)\n\t\t\tnewvol = cluster.Volumes[oldUUID]\n\t\t\t// Remove the old entry. It will be added back\n\t\t\t// below, possibly with a new UUID.\n\t\t\tdelete(cluster.Volumes, oldUUID)\n\t\t} else {\n\t\t\tv, err := ldr.translateOldKeepstoreVolume(oldvol)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnewvol = v\n\t\t}\n\t\tif accessViaHosts == nil {\n\t\t\taccessViaHosts = make(map[arvados.URL]arvados.VolumeAccess, 1)\n\t\t}\n\t\taccessViaHosts[myURL] = arvados.VolumeAccess{ReadOnly: oldvol.ReadOnly}\n\t\tnewvol.AccessViaHosts = accessViaHosts\n\n\t\tvolUUID := oldUUID\n\t\tif oldvol.ReadOnly {\n\t\t} else if oc.Listen == nil {\n\t\t\tldr.Logger.Warn(\"cannot find optimal volume UUID because Listen address is not given in legacy keepstore config\")\n\t\t} else if uuid, _, err := findKeepServicesItem(cluster, *oc.Listen); err != nil {\n\t\t\tldr.Logger.WithError(err).Warn(\"cannot find optimal volume UUID: failed to find a matching keep_service listing for this legacy keepstore config\")\n\t\t} else if len(uuid) != 27 {\n\t\t\tldr.Logger.WithField(\"UUID\", uuid).Warn(\"cannot find optimal volume UUID: keep_service UUID does not have expected format\")\n\t\t} else {\n\t\t\trendezvousUUID := cluster.ClusterID + \"-nyw5e-\" + uuid[12:]\n\t\t\tif _, ok := cluster.Volumes[rendezvousUUID]; ok {\n\t\t\t\tldr.Logger.Warn(\"suggesting a random volume UUID because the volume ID matching our keep_service UUID is already in use\")\n\t\t\t} else {\n\t\t\t\tvolUUID = rendezvousUUID\n\t\t\t}\n\t\t\tsi := cluster.Services.Keepstore.InternalURLs[myURL]\n\t\t\tsi.Rendezvous = uuid[12:]\n\t\t\tcluster.Services.Keepstore.InternalURLs[myURL] = si\n\t\t}\n\t\tif volUUID == \"\" {\n\t\t\tvolUUID = newUUID(cluster.ClusterID, \"nyw5e\")\n\t\t\tldr.Logger.WithField(\"UUID\", volUUID).Infof(\"suggesting a random volume UUID for volume #%d in legacy config\", i)\n\t\t}\n\t\tcluster.Volumes[volUUID] = newvol\n\t}\n\treturn nil\n}\n\nfunc (ldr *Loader) translateOldKeepstoreVolume(oldvol oldKeepstoreVolume) (arvados.Volume, error) {\n\tvar newvol arvados.Volume\n\tvar params interface{}\n\tswitch oldvol.Type {\n\tcase \"S3\":\n\t\taccesskeydata, err := ioutil.ReadFile(oldvol.AccessKeyFile)\n\t\tif err != nil && oldvol.AccessKeyFile != \"\" {\n\t\t\treturn newvol, fmt.Errorf(\"error reading AccessKeyFile: %s\", err)\n\t\t}\n\t\tsecretkeydata, err := ioutil.ReadFile(oldvol.SecretKeyFile)\n\t\tif err != nil && oldvol.SecretKeyFile != \"\" {\n\t\t\treturn newvol, fmt.Errorf(\"error reading SecretKeyFile: %s\", err)\n\t\t}\n\t\tnewvol = arvados.Volume{\n\t\t\tDriver:         \"S3\",\n\t\t\tReadOnly:       oldvol.ReadOnly,\n\t\t\tReplication:    oldvol.S3Replication,\n\t\t\tStorageClasses: array2boolmap(oldvol.StorageClasses),\n\t\t}\n\t\tparams = arvados.S3VolumeDriverParameters{\n\t\t\tAccessKeyID:        string(bytes.TrimSpace(accesskeydata)),\n\t\t\tSecretAccessKey:    string(bytes.TrimSpace(secretkeydata)),\n\t\t\tEndpoint:           oldvol.Endpoint,\n\t\t\tRegion:             oldvol.Region,\n\t\t\tBucket:             oldvol.Bucket,\n\t\t\tLocationConstraint: oldvol.LocationConstraint,\n\t\t\tIndexPageSize:      oldvol.IndexPageSize,\n\t\t\tConnectTimeout:     oldvol.ConnectTimeout,\n\t\t\tReadTimeout:        oldvol.ReadTimeout,\n\t\t\tRaceWindow:         oldvol.RaceWindow,\n\t\t\tUnsafeDelete:       oldvol.UnsafeDelete,\n\t\t}\n\tcase \"Azure\":\n\t\tkeydata, err := ioutil.ReadFile(oldvol.StorageAccountKeyFile)\n\t\tif err != nil && oldvol.StorageAccountKeyFile != \"\" {\n\t\t\treturn newvol, fmt.Errorf(\"error reading StorageAccountKeyFile: %s\", err)\n\t\t}\n\t\tnewvol = arvados.Volume{\n\t\t\tDriver:         \"Azure\",\n\t\t\tReadOnly:       oldvol.ReadOnly,\n\t\t\tReplication:    oldvol.AzureReplication,\n\t\t\tStorageClasses: array2boolmap(oldvol.StorageClasses),\n\t\t}\n\t\tparams = arvados.AzureVolumeDriverParameters{\n\t\t\tStorageAccountName:   oldvol.StorageAccountName,\n\t\t\tStorageAccountKey:    string(bytes.TrimSpace(keydata)),\n\t\t\tStorageBaseURL:       oldvol.StorageBaseURL,\n\t\t\tContainerName:        oldvol.ContainerName,\n\t\t\tRequestTimeout:       oldvol.RequestTimeout,\n\t\t\tListBlobsRetryDelay:  oldvol.ListBlobsRetryDelay,\n\t\t\tListBlobsMaxAttempts: oldvol.ListBlobsMaxAttempts,\n\t\t}\n\tcase \"Directory\":\n\t\tnewvol = arvados.Volume{\n\t\t\tDriver:         \"Directory\",\n\t\t\tReadOnly:       oldvol.ReadOnly,\n\t\t\tReplication:    oldvol.DirectoryReplication,\n\t\t\tStorageClasses: array2boolmap(oldvol.StorageClasses),\n\t\t}\n\t\tparams = arvados.DirectoryVolumeDriverParameters{\n\t\t\tRoot:      oldvol.Root,\n\t\t\tSerialize: oldvol.Serialize,\n\t\t}\n\tdefault:\n\t\treturn newvol, fmt.Errorf(\"unsupported volume type %q\", oldvol.Type)\n\t}\n\tdp, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn newvol, err\n\t}\n\tnewvol.DriverParameters = json.RawMessage(dp)\n\tif newvol.Replication < 1 {\n\t\tnewvol.Replication = 1\n\t}\n\treturn newvol, nil\n}\n\nfunc (ldr *Loader) alreadyMigrated(oldvol oldKeepstoreVolume, newvols map[string]arvados.Volume, myURL arvados.URL) (string, bool) {\n\tfor uuid, newvol := range newvols {\n\t\tif oldvol.Type != newvol.Driver {\n\t\t\tcontinue\n\t\t}\n\t\tswitch oldvol.Type {\n\t\tcase \"S3\":\n\t\t\tvar params arvados.S3VolumeDriverParameters\n\t\t\tif err := json.Unmarshal(newvol.DriverParameters, &params); err == nil &&\n\t\t\t\toldvol.Endpoint == params.Endpoint &&\n\t\t\t\toldvol.Region == params.Region &&\n\t\t\t\toldvol.Bucket == params.Bucket &&\n\t\t\t\toldvol.LocationConstraint == params.LocationConstraint {\n\t\t\t\treturn uuid, true\n\t\t\t}\n\t\tcase \"Azure\":\n\t\t\tvar params arvados.AzureVolumeDriverParameters\n\t\t\tif err := json.Unmarshal(newvol.DriverParameters, &params); err == nil &&\n\t\t\t\toldvol.StorageAccountName == params.StorageAccountName &&\n\t\t\t\toldvol.StorageBaseURL == params.StorageBaseURL &&\n\t\t\t\toldvol.ContainerName == params.ContainerName {\n\t\t\t\treturn uuid, true\n\t\t\t}\n\t\tcase \"Directory\":\n\t\t\tvar params arvados.DirectoryVolumeDriverParameters\n\t\t\tif err := json.Unmarshal(newvol.DriverParameters, &params); err == nil &&\n\t\t\t\toldvol.Root == params.Root {\n\t\t\t\tif _, ok := newvol.AccessViaHosts[myURL]; ok || len(newvol.AccessViaHosts) == 0 {\n\t\t\t\t\treturn uuid, true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", false\n}\n\nfunc (ldr *Loader) discoverLocalVolumes(cluster *arvados.Cluster, mountsFile string, myURL arvados.URL) error {\n\tif mountsFile == \"\" {\n\t\tmountsFile = \"/proc/mounts\"\n\t}\n\tf, err := os.Open(mountsFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error opening %s: %s\", mountsFile, err)\n\t}\n\tdefer f.Close()\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\targs := strings.Fields(scanner.Text())\n\t\tdev, mount := args[0], args[1]\n\t\tif mount == \"/\" {\n\t\t\tcontinue\n\t\t}\n\t\tif dev != \"tmpfs\" && !strings.HasPrefix(dev, \"/dev/\") {\n\t\t\tcontinue\n\t\t}\n\t\tkeepdir := mount + \"/keep\"\n\t\tif st, err := os.Stat(keepdir); err != nil || !st.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tro := false\n\t\tfor _, fsopt := range strings.Split(args[3], \",\") {\n\t\t\tif fsopt == \"ro\" {\n\t\t\t\tro = true\n\t\t\t}\n\t\t}\n\n\t\tuuid := newUUID(cluster.ClusterID, \"nyw5e\")\n\t\tldr.Logger.WithFields(logrus.Fields{\n\t\t\t\"UUID\":                       uuid,\n\t\t\t\"Driver\":                     \"Directory\",\n\t\t\t\"DriverParameters.Root\":      keepdir,\n\t\t\t\"DriverParameters.Serialize\": false,\n\t\t\t\"ReadOnly\":                   ro,\n\t\t\t\"Replication\":                1,\n\t\t}).Warn(\"adding local directory volume\")\n\n\t\tp, err := json.Marshal(arvados.DirectoryVolumeDriverParameters{\n\t\t\tRoot:      keepdir,\n\t\t\tSerialize: false,\n\t\t})\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tcluster.Volumes[uuid] = arvados.Volume{\n\t\t\tDriver:           \"Directory\",\n\t\t\tDriverParameters: p,\n\t\t\tReadOnly:         ro,\n\t\t\tReplication:      1,\n\t\t\tAccessViaHosts: map[arvados.URL]arvados.VolumeAccess{\n\t\t\t\tmyURL: {ReadOnly: ro},\n\t\t\t},\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn fmt.Errorf(\"reading %s: %s\", mountsFile, err)\n\t}\n\treturn nil\n}\n\nfunc array2boolmap(keys []string) map[string]bool {\n\tm := map[string]bool{}\n\tfor _, k := range keys {\n\t\tm[k] = true\n\t}\n\treturn m\n}\n\nfunc newUUID(clusterID, infix string) string {\n\trandint, err := rand.Int(rand.Reader, big.NewInt(0).Exp(big.NewInt(36), big.NewInt(15), big.NewInt(0)))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\trandstr := randint.Text(36)\n\tfor len(randstr) < 15 {\n\t\trandstr = \"0\" + randstr\n\t}\n\treturn fmt.Sprintf(\"%s-%s-%s\", clusterID, infix, randstr)\n}\n\n// Return the UUID and URL for the controller's keep_services listing\n// corresponding to this host/process.\nfunc findKeepServicesItem(cluster *arvados.Cluster, listen string) (uuid string, url arvados.URL, err error) {\n\tclient, err := arvados.NewClientFromConfig(cluster)\n\tif err != nil {\n\t\treturn\n\t}\n\tclient.AuthToken = cluster.SystemRootToken\n\tvar svcList arvados.KeepServiceList\n\terr = client.RequestAndDecode(&svcList, \"GET\", \"arvados/v1/keep_services\", nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error getting hostname: %s\", err)\n\t\treturn\n\t}\n\tvar tried []string\n\tfor _, ks := range svcList.Items {\n\t\tif ks.ServiceType == \"proxy\" {\n\t\t\tcontinue\n\t\t} else if keepServiceIsMe(ks, hostname, listen) {\n\t\t\treturn ks.UUID, keepServiceURL(ks), nil\n\t\t} else {\n\t\t\ttried = append(tried, fmt.Sprintf(\"%s:%d\", ks.ServiceHost, ks.ServicePort))\n\t\t}\n\t}\n\terr = fmt.Errorf(\"listen address %q does not match any of the non-proxy keep_services entries %q\", listen, tried)\n\treturn\n}\n\nfunc keepServiceURL(ks arvados.KeepService) arvados.URL {\n\turl := arvados.URL{\n\t\tScheme: \"http\",\n\t\tHost:   net.JoinHostPort(ks.ServiceHost, strconv.Itoa(ks.ServicePort)),\n\t\tPath:   \"/\",\n\t}\n\tif ks.ServiceSSLFlag {\n\t\turl.Scheme = \"https\"\n\t}\n\treturn url\n}\n\nvar localhostOrAllInterfaces = map[string]bool{\n\t\"localhost\": true,\n\t\"127.0.0.1\": true,\n\t\"::1\":       true,\n\t\"::\":        true,\n\t\"\":          true,\n}\n\n// Return true if the given KeepService entry matches the given\n// hostname and (keepstore config file) listen address.\n//\n// If the KeepService host is some variant of \"localhost\", we assume\n// this is a testing or single-node environment, ignore the given\n// hostname, and return true if the port numbers match.\n//\n// The hostname isn't assumed to be a FQDN: a hostname \"foo.bar\" will\n// match a KeepService host \"foo.bar\", but also \"foo.bar.example\",\n// \"foo.bar.example.org\", etc.\nfunc keepServiceIsMe(ks arvados.KeepService, hostname string, listen string) bool {\n\t// Extract the port name/number from listen, and resolve it to\n\t// a port number to compare with ks.ServicePort.\n\t_, listenport, err := net.SplitHostPort(listen)\n\tif err != nil && strings.HasPrefix(listen, \":\") {\n\t\tlistenport = listen[1:]\n\t}\n\tif lp, err := net.LookupPort(\"tcp\", listenport); err != nil {\n\t\treturn false\n\t} else if !(lp == ks.ServicePort ||\n\t\t(lp == 0 && ks.ServicePort == 80)) {\n\t\treturn false\n\t}\n\n\tkshost := strings.ToLower(ks.ServiceHost)\n\treturn localhostOrAllInterfaces[kshost] || strings.HasPrefix(kshost+\".\", strings.ToLower(hostname)+\".\")\n}\n\n// Warn about pending keepstore migration tasks that haven't already\n// been warned about in loadOldKeepstoreConfig() -- i.e., unmigrated\n// keepstore hosts other than the present host, and obsolete content\n// in the keep_services table.\nfunc (ldr *Loader) checkPendingKeepstoreMigrations(cluster *arvados.Cluster) error {\n\tif cluster.Services.Controller.ExternalURL.String() == \"\" {\n\t\tldr.Logger.Debug(\"Services.Controller.ExternalURL not configured -- skipping check for pending keepstore config migrations\")\n\t\treturn nil\n\t}\n\tif ldr.SkipAPICalls {\n\t\tldr.Logger.Debug(\"(Loader).SkipAPICalls == true -- skipping check for pending keepstore config migrations\")\n\t\treturn nil\n\t}\n\tclient, err := arvados.NewClientFromConfig(cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient.AuthToken = cluster.SystemRootToken\n\tvar svcList arvados.KeepServiceList\n\terr = client.RequestAndDecode(&svcList, \"GET\", \"arvados/v1/keep_services\", nil, nil)\n\tif err != nil {\n\t\tldr.Logger.WithError(err).Warn(\"error retrieving keep_services list -- skipping check for pending keepstore config migrations\")\n\t\treturn nil\n\t}\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting hostname: %s\", err)\n\t}\n\tsawTimes := map[time.Time]bool{}\n\tfor _, ks := range svcList.Items {\n\t\tsawTimes[ks.CreatedAt] = true\n\t\tsawTimes[ks.ModifiedAt] = true\n\t}\n\tif len(sawTimes) <= 1 {\n\t\t// If all timestamps in the arvados/v1/keep_services\n\t\t// response are identical, it's a clear sign the\n\t\t// response was generated on the fly from the cluster\n\t\t// config, rather than real database records. In that\n\t\t// case (as well as the case where none are listed at\n\t\t// all) it's pointless to look for entries that\n\t\t// haven't yet been migrated to the config file.\n\t\treturn nil\n\t}\n\tneedDBRows := false\n\tfor _, ks := range svcList.Items {\n\t\tif ks.ServiceType == \"proxy\" {\n\t\t\tif len(cluster.Services.Keepproxy.InternalURLs) == 0 {\n\t\t\t\tneedDBRows = true\n\t\t\t\tldr.Logger.Warn(\"you should migrate your keepproxy configuration to the cluster configuration file\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tkshost := strings.ToLower(ks.ServiceHost)\n\t\tif localhostOrAllInterfaces[kshost] || strings.HasPrefix(kshost+\".\", strings.ToLower(hostname)+\".\") {\n\t\t\t// it would be confusing to recommend\n\t\t\t// migrating *this* host's legacy keepstore\n\t\t\t// config immediately after explaining that\n\t\t\t// very migration process in more detail.\n\t\t\tcontinue\n\t\t}\n\t\tksurl := keepServiceURL(ks)\n\t\tif _, ok := cluster.Services.Keepstore.InternalURLs[ksurl]; ok {\n\t\t\t// already added to InternalURLs\n\t\t\tcontinue\n\t\t}\n\t\tldr.Logger.Warnf(\"you should migrate the legacy keepstore configuration file on host %s\", ks.ServiceHost)\n\t}\n\tif !needDBRows {\n\t\tldr.Logger.Warn(\"you should delete all of your manually added keep_services listings using `arv --format=uuid keep_service list | xargs -n1 arv keep_service delete --uuid` -- when those are deleted, the services listed in your cluster configuration will be used instead\")\n\t}\n\treturn nil\n}\n\n// Warn about keepstore servers that have no volumes.\nfunc (ldr *Loader) checkEmptyKeepstores(cluster arvados.Cluster) error {\nservers:\n\tfor url := range cluster.Services.Keepstore.InternalURLs {\n\t\tfor _, vol := range cluster.Volumes {\n\t\t\tif len(vol.AccessViaHosts) == 0 {\n\t\t\t\t// accessible by all servers\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif _, ok := vol.AccessViaHosts[url]; ok {\n\t\t\t\tcontinue servers\n\t\t\t}\n\t\t}\n\t\tldr.Logger.Warnf(\"keepstore configured at %s does not have access to any volumes\", url)\n\t}\n\treturn nil\n}\n\n// Warn about AccessViaHosts entries that don't correspond to any of\n// the listed keepstore services.\nfunc (ldr *Loader) checkUnlistedKeepstores(cluster arvados.Cluster) error {\n\tfor uuid, vol := range cluster.Volumes {\n\t\tif uuid == \"SAMPLE\" {\n\t\t\tcontinue\n\t\t}\n\t\tfor url := range vol.AccessViaHosts {\n\t\t\tif _, ok := cluster.Services.Keepstore.InternalURLs[url]; !ok {\n\t\t\t\tldr.Logger.Warnf(\"Volumes.%s.AccessViaHosts refers to nonexistent keepstore server %s\", uuid, url)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "lib/config/deprecated_keepstore_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\ntype KeepstoreMigrationSuite struct {\n\thostname string // blank = use test system's hostname\n\tksByPort map[int]arvados.KeepService\n}\n\nvar _ = check.Suite(&KeepstoreMigrationSuite{})\n\nfunc (s *KeepstoreMigrationSuite) SetUpSuite(c *check.C) {\n\tos.Setenv(\"ARVADOS_API_HOST\", os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\tos.Setenv(\"ARVADOS_API_HOST_INSECURE\", \"1\")\n\tos.Setenv(\"ARVADOS_API_TOKEN\", arvadostest.AdminToken)\n\n\t// We don't need the keepstore servers, but we do need\n\t// keep_services listings that point to localhost, rather than\n\t// the apiserver fixtures that point to fictional hosts\n\t// keep*.zzzzz.arvadosapi.com.\n\n\tclient := arvados.NewClientFromEnv()\n\n\t// Delete existing non-proxy listings.\n\tvar svcList arvados.KeepServiceList\n\terr := client.RequestAndDecode(&svcList, \"GET\", \"arvados/v1/keep_services\", nil, nil)\n\tc.Assert(err, check.IsNil)\n\tfor _, ks := range svcList.Items {\n\t\tif ks.ServiceType != \"proxy\" {\n\t\t\terr = client.RequestAndDecode(new(struct{}), \"DELETE\", \"arvados/v1/keep_services/\"+ks.UUID, nil, nil)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t}\n\t}\n\t// Add new fake listings.\n\ts.ksByPort = map[int]arvados.KeepService{}\n\tfor _, port := range []int{25107, 25108} {\n\t\tvar ks arvados.KeepService\n\t\terr = client.RequestAndDecode(&ks, \"POST\", \"arvados/v1/keep_services\", nil, map[string]interface{}{\n\t\t\t\"keep_service\": map[string]interface{}{\n\t\t\t\t\"service_type\": \"disk\",\n\t\t\t\t\"service_host\": \"localhost\",\n\t\t\t\t\"service_port\": port,\n\t\t\t},\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t\ts.ksByPort[port] = ks\n\t}\n}\n\nfunc (s *KeepstoreMigrationSuite) checkEquivalentWithKeepstoreConfig(c *check.C, keepstoreconfig, clusterconfig, expectedconfig string) {\n\tkeepstorefile, err := ioutil.TempFile(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(keepstorefile.Name())\n\t_, err = io.WriteString(keepstorefile, keepstoreconfig)\n\tc.Assert(err, check.IsNil)\n\terr = keepstorefile.Close()\n\tc.Assert(err, check.IsNil)\n\n\tgotldr := testLoader(c, clusterconfig, nil)\n\tgotldr.KeepstorePath = keepstorefile.Name()\n\texpectedldr := testLoader(c, expectedconfig, nil)\n\tcheckEquivalentLoaders(c, gotldr, expectedldr)\n}\n\nfunc (s *KeepstoreMigrationSuite) TestDeprecatedKeepstoreConfig(c *check.C) {\n\tkeyfile, err := ioutil.TempFile(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(keyfile.Name())\n\tio.WriteString(keyfile, \"blobsigningkey\\n\")\n\n\thostname, err := os.Hostname()\n\tc.Assert(err, check.IsNil)\n\n\ts.checkEquivalentWithKeepstoreConfig(c, `\nListen: \":25107\"\nDebug: true\nLogFormat: text\nMaxBuffers: 1234\nMaxRequests: 2345\nBlobSignatureTTL: 123m\nBlobSigningKeyFile: `+keyfile.Name()+`\nVolumes:\n- Type: Directory\n  Root: /tmp\n`, `\nClusters:\n  z1111:\n    SystemRootToken: `+arvadostest.AdminToken+`\n    TLS: {Insecure: true}\n    Services:\n      Controller:\n        ExternalURL: \"https://`+os.Getenv(\"ARVADOS_API_HOST\")+`/\"\n`, `\nClusters:\n  z1111:\n    SystemRootToken: `+arvadostest.AdminToken+`\n    TLS: {Insecure: true}\n    Services:\n      Keepstore:\n        InternalURLs:\n          \"http://`+hostname+`:25107\": {Rendezvous: `+s.ksByPort[25107].UUID[12:]+`}\n      Controller:\n        ExternalURL: \"https://`+os.Getenv(\"ARVADOS_API_HOST\")+`/\"\n    SystemLogs:\n      Format: text\n      LogLevel: debug\n    API:\n      MaxKeepBlobBuffers: 1234\n      MaxConcurrentRequests: 2345\n    Collections:\n      BlobSigningTTL: 123m\n      BlobSigningKey: blobsigningkey\n    Volumes:\n      z1111-nyw5e-`+s.ksByPort[25107].UUID[12:]+`:\n        AccessViaHosts:\n          \"http://`+hostname+`:25107\":\n            ReadOnly: false\n        Driver: Directory\n        DriverParameters:\n          Root: /tmp\n          Serialize: false\n        ReadOnly: false\n        Replication: 1\n        StorageClasses: {}\n`)\n}\n\nfunc (s *KeepstoreMigrationSuite) TestDiscoverLocalVolumes(c *check.C) {\n\ttmpd, err := ioutil.TempDir(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.RemoveAll(tmpd)\n\terr = os.Mkdir(tmpd+\"/keep\", 0777)\n\tc.Assert(err, check.IsNil)\n\n\ttmpf, err := ioutil.TempFile(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(tmpf.Name())\n\n\t// read/write\n\t_, err = fmt.Fprintf(tmpf, \"/dev/xvdb %s ext4 rw,noexec 0 0\\n\", tmpd)\n\tc.Assert(err, check.IsNil)\n\n\ts.testDeprecatedVolume(c, \"DiscoverVolumesFromMountsFile: \"+tmpf.Name(), arvados.Volume{\n\t\tDriver:      \"Directory\",\n\t\tReadOnly:    false,\n\t\tReplication: 1,\n\t}, &arvados.DirectoryVolumeDriverParameters{\n\t\tRoot:      tmpd + \"/keep\",\n\t\tSerialize: false,\n\t}, &arvados.DirectoryVolumeDriverParameters{})\n\n\t// read-only\n\ttmpf.Seek(0, os.SEEK_SET)\n\ttmpf.Truncate(0)\n\t_, err = fmt.Fprintf(tmpf, \"/dev/xvdb %s ext4 ro,noexec 0 0\\n\", tmpd)\n\tc.Assert(err, check.IsNil)\n\n\ts.testDeprecatedVolume(c, \"DiscoverVolumesFromMountsFile: \"+tmpf.Name(), arvados.Volume{\n\t\tDriver:      \"Directory\",\n\t\tReadOnly:    true,\n\t\tReplication: 1,\n\t}, &arvados.DirectoryVolumeDriverParameters{\n\t\tRoot:      tmpd + \"/keep\",\n\t\tSerialize: false,\n\t}, &arvados.DirectoryVolumeDriverParameters{})\n}\n\nfunc (s *KeepstoreMigrationSuite) TestDeprecatedVolumes(c *check.C) {\n\taccesskeyfile, err := ioutil.TempFile(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(accesskeyfile.Name())\n\tio.WriteString(accesskeyfile, \"accesskeydata\\n\")\n\n\tsecretkeyfile, err := ioutil.TempFile(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(secretkeyfile.Name())\n\tio.WriteString(secretkeyfile, \"secretkeydata\\n\")\n\n\t// s3, empty/default\n\ts.testDeprecatedVolume(c, `\nVolumes:\n- Type: S3\n`, arvados.Volume{\n\t\tDriver:      \"S3\",\n\t\tReplication: 1,\n\t}, &arvados.S3VolumeDriverParameters{}, &arvados.S3VolumeDriverParameters{})\n\n\t// s3, fully configured\n\ts.testDeprecatedVolume(c, `\nVolumes:\n- Type: S3\n  AccessKeyFile: `+accesskeyfile.Name()+`\n  SecretKeyFile: `+secretkeyfile.Name()+`\n  Endpoint: https://storage.googleapis.com\n  Region: us-east-1z\n  Bucket: testbucket\n  LocationConstraint: true\n  IndexPageSize: 1234\n  S3Replication: 4\n  ConnectTimeout: 3m\n  ReadTimeout: 4m\n  RaceWindow: 5m\n  UnsafeDelete: true\n`, arvados.Volume{\n\t\tDriver:      \"S3\",\n\t\tReplication: 4,\n\t}, &arvados.S3VolumeDriverParameters{\n\t\tAccessKeyID:        \"accesskeydata\",\n\t\tSecretAccessKey:    \"secretkeydata\",\n\t\tEndpoint:           \"https://storage.googleapis.com\",\n\t\tRegion:             \"us-east-1z\",\n\t\tBucket:             \"testbucket\",\n\t\tLocationConstraint: true,\n\t\tIndexPageSize:      1234,\n\t\tConnectTimeout:     arvados.Duration(time.Minute * 3),\n\t\tReadTimeout:        arvados.Duration(time.Minute * 4),\n\t\tRaceWindow:         arvados.Duration(time.Minute * 5),\n\t\tUnsafeDelete:       true,\n\t}, &arvados.S3VolumeDriverParameters{})\n\n\t// azure, empty/default\n\ts.testDeprecatedVolume(c, `\nVolumes:\n- Type: Azure\n`, arvados.Volume{\n\t\tDriver:      \"Azure\",\n\t\tReplication: 1,\n\t}, &arvados.AzureVolumeDriverParameters{}, &arvados.AzureVolumeDriverParameters{})\n\n\t// azure, fully configured\n\ts.testDeprecatedVolume(c, `\nVolumes:\n- Type: Azure\n  ReadOnly: true\n  StorageAccountName: storageacctname\n  StorageAccountKeyFile: `+secretkeyfile.Name()+`\n  StorageBaseURL: https://example.example/\n  ContainerName: testctr\n  LocationConstraint: true\n  AzureReplication: 4\n  RequestTimeout: 3m\n  ListBlobsRetryDelay: 4m\n  ListBlobsMaxAttempts: 5\n`, arvados.Volume{\n\t\tDriver:      \"Azure\",\n\t\tReadOnly:    true,\n\t\tReplication: 4,\n\t}, &arvados.AzureVolumeDriverParameters{\n\t\tStorageAccountName:   \"storageacctname\",\n\t\tStorageAccountKey:    \"secretkeydata\",\n\t\tStorageBaseURL:       \"https://example.example/\",\n\t\tContainerName:        \"testctr\",\n\t\tRequestTimeout:       arvados.Duration(time.Minute * 3),\n\t\tListBlobsRetryDelay:  arvados.Duration(time.Minute * 4),\n\t\tListBlobsMaxAttempts: 5,\n\t}, &arvados.AzureVolumeDriverParameters{})\n\n\t// directory, empty/default\n\ts.testDeprecatedVolume(c, `\nVolumes:\n- Type: Directory\n  Root: /tmp/xyzzy\n`, arvados.Volume{\n\t\tDriver:      \"Directory\",\n\t\tReplication: 1,\n\t}, &arvados.DirectoryVolumeDriverParameters{\n\t\tRoot: \"/tmp/xyzzy\",\n\t}, &arvados.DirectoryVolumeDriverParameters{})\n\n\t// directory, fully configured\n\ts.testDeprecatedVolume(c, `\nVolumes:\n- Type: Directory\n  ReadOnly: true\n  Root: /tmp/xyzzy\n  DirectoryReplication: 4\n  Serialize: true\n`, arvados.Volume{\n\t\tDriver:      \"Directory\",\n\t\tReadOnly:    true,\n\t\tReplication: 4,\n\t}, &arvados.DirectoryVolumeDriverParameters{\n\t\tRoot:      \"/tmp/xyzzy\",\n\t\tSerialize: true,\n\t}, &arvados.DirectoryVolumeDriverParameters{})\n}\n\nfunc (s *KeepstoreMigrationSuite) testDeprecatedVolume(c *check.C, oldconfigdata string, expectvol arvados.Volume, expectparams interface{}, paramsdst interface{}) {\n\thostname := s.hostname\n\tif hostname == \"\" {\n\t\th, err := os.Hostname()\n\t\tc.Assert(err, check.IsNil)\n\t\thostname = h\n\t}\n\n\toldconfig, err := ioutil.TempFile(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(oldconfig.Name())\n\tio.WriteString(oldconfig, \"Listen: :12345\\n\"+oldconfigdata)\n\tif !strings.Contains(oldconfigdata, \"DiscoverVolumesFromMountsFile\") {\n\t\t// Prevent tests from looking at the real /proc/mounts on the test host.\n\t\tio.WriteString(oldconfig, \"\\nDiscoverVolumesFromMountsFile: /dev/null\\n\")\n\t}\n\n\tldr := testLoader(c, \"Clusters: {z1111: {}}\", nil)\n\tldr.KeepstorePath = oldconfig.Name()\n\tcfg, err := ldr.Load()\n\tc.Assert(err, check.IsNil)\n\tcc := cfg.Clusters[\"z1111\"]\n\tc.Check(cc.Volumes, check.HasLen, 1)\n\tfor uuid, v := range cc.Volumes {\n\t\tc.Check(uuid, check.HasLen, 27)\n\t\tc.Check(v.Driver, check.Equals, expectvol.Driver)\n\t\tc.Check(v.Replication, check.Equals, expectvol.Replication)\n\n\t\tavh, ok := v.AccessViaHosts[arvados.URL{Scheme: \"http\", Host: hostname + \":12345\", Path: \"/\"}]\n\t\tc.Check(ok, check.Equals, true)\n\t\tc.Check(avh.ReadOnly, check.Equals, expectvol.ReadOnly)\n\n\t\terr := json.Unmarshal(v.DriverParameters, paramsdst)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(paramsdst, check.DeepEquals, expectparams)\n\t}\n}\n\n// How we handle a volume from a legacy keepstore config file depends\n// on whether it's writable, whether a volume using the same cloud\n// backend already exists in the cluster config, and (if so) whether\n// it already has an AccessViaHosts entry for this host.\n//\n// In all cases, we should end up with an AccessViaHosts entry for\n// this host, to indicate that the current host's volumes have been\n// migrated.\n\n// Same backend already referenced in cluster config, this host\n// already listed in AccessViaHosts --> no change, except possibly\n// updating the ReadOnly flag on the AccessViaHosts entry.\nfunc (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_AlreadyMigrated(c *check.C) {\n\tbefore, after := s.loadWithKeepstoreConfig(c, `\nListen: :12345\nVolumes:\n- Type: S3\n  Endpoint: https://storage.googleapis.com\n  Region: us-east-1z\n  Bucket: alreadymigrated\n  S3Replication: 3\n`)\n\tcheckEqualYAML(c, after, before)\n}\n\n// Writable volume, same cloud backend already referenced in cluster\n// config --> change UUID to match this keepstore's UUID.\nfunc (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_UpdateUUID(c *check.C) {\n\tport, expectUUID := s.getTestKeepstorePortAndMatchingVolumeUUID(c)\n\n\tbefore, after := s.loadWithKeepstoreConfig(c, `\nListen: :`+strconv.Itoa(port)+`\nVolumes:\n- Type: S3\n  Endpoint: https://storage.googleapis.com\n  Region: us-east-1z\n  Bucket: readonlyonother\n  S3Replication: 3\n`)\n\tc.Check(after, check.HasLen, len(before))\n\tnewuuids := s.findAddedVolumes(c, before, after, 1)\n\tnewvol := after[newuuids[0]]\n\n\tvar params arvados.S3VolumeDriverParameters\n\tjson.Unmarshal(newvol.DriverParameters, &params)\n\tc.Check(params.Bucket, check.Equals, \"readonlyonother\")\n\tc.Check(newuuids[0], check.Equals, expectUUID)\n}\n\n// Writable volume, same cloud backend not yet referenced --> add a\n// new volume, with UUID to match this keepstore's UUID.\nfunc (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_AddCloudVolume(c *check.C) {\n\tport, expectUUID := s.getTestKeepstorePortAndMatchingVolumeUUID(c)\n\n\tbefore, after := s.loadWithKeepstoreConfig(c, `\nListen: :`+strconv.Itoa(port)+`\nVolumes:\n- Type: S3\n  Endpoint: https://storage.googleapis.com\n  Region: us-east-1z\n  Bucket: bucket-to-migrate\n  S3Replication: 3\n`)\n\tnewuuids := s.findAddedVolumes(c, before, after, 1)\n\tnewvol := after[newuuids[0]]\n\n\tvar params arvados.S3VolumeDriverParameters\n\tjson.Unmarshal(newvol.DriverParameters, &params)\n\tc.Check(params.Bucket, check.Equals, \"bucket-to-migrate\")\n\tc.Check(newvol.Replication, check.Equals, 3)\n\n\tc.Check(newuuids[0], check.Equals, expectUUID)\n}\n\n// Writable volume, same filesystem backend already referenced in\n// cluster config, but this host isn't in AccessViaHosts --> add a new\n// volume, with UUID to match this keepstore's UUID (filesystem-backed\n// volumes are assumed to be different on different hosts, even if\n// paths are the same).\nfunc (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_AddLocalVolume(c *check.C) {\n\tbefore, after := s.loadWithKeepstoreConfig(c, `\nListen: :12345\nVolumes:\n- Type: Directory\n  Root: /data/sdd\n  DirectoryReplication: 2\n`)\n\tnewuuids := s.findAddedVolumes(c, before, after, 1)\n\tnewvol := after[newuuids[0]]\n\n\tvar params arvados.DirectoryVolumeDriverParameters\n\tjson.Unmarshal(newvol.DriverParameters, &params)\n\tc.Check(params.Root, check.Equals, \"/data/sdd\")\n\tc.Check(newvol.Replication, check.Equals, 2)\n}\n\n// Writable volume, same filesystem backend already referenced in\n// cluster config, and this host is already listed in AccessViaHosts\n// --> already migrated, don't change anything.\nfunc (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_LocalVolumeAlreadyMigrated(c *check.C) {\n\tbefore, after := s.loadWithKeepstoreConfig(c, `\nListen: :12345\nVolumes:\n- Type: Directory\n  Root: /data/sde\n  DirectoryReplication: 2\n`)\n\tcheckEqualYAML(c, after, before)\n}\n\n// Multiple writable cloud-backed volumes --> one of them will get a\n// UUID matching this keepstore's UUID.\nfunc (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_AddMultipleCloudVolumes(c *check.C) {\n\tport, expectUUID := s.getTestKeepstorePortAndMatchingVolumeUUID(c)\n\n\tbefore, after := s.loadWithKeepstoreConfig(c, `\nListen: :`+strconv.Itoa(port)+`\nVolumes:\n- Type: S3\n  Endpoint: https://storage.googleapis.com\n  Region: us-east-1z\n  Bucket: first-bucket-to-migrate\n  S3Replication: 3\n- Type: S3\n  Endpoint: https://storage.googleapis.com\n  Region: us-east-1z\n  Bucket: second-bucket-to-migrate\n  S3Replication: 3\n`)\n\tnewuuids := s.findAddedVolumes(c, before, after, 2)\n\t// Sort by bucket name (so \"first\" comes before \"second\")\n\tparams := map[string]arvados.S3VolumeDriverParameters{}\n\tfor _, uuid := range newuuids {\n\t\tvar p arvados.S3VolumeDriverParameters\n\t\tjson.Unmarshal(after[uuid].DriverParameters, &p)\n\t\tparams[uuid] = p\n\t}\n\tsort.Slice(newuuids, func(i, j int) bool { return params[newuuids[i]].Bucket < params[newuuids[j]].Bucket })\n\tnewvol0, newvol1 := after[newuuids[0]], after[newuuids[1]]\n\tparams0, params1 := params[newuuids[0]], params[newuuids[1]]\n\n\tc.Check(params0.Bucket, check.Equals, \"first-bucket-to-migrate\")\n\tc.Check(newvol0.Replication, check.Equals, 3)\n\n\tc.Check(params1.Bucket, check.Equals, \"second-bucket-to-migrate\")\n\tc.Check(newvol1.Replication, check.Equals, 3)\n\n\t// Don't care which one gets the special UUID\n\tif newuuids[0] != expectUUID {\n\t\tc.Check(newuuids[1], check.Equals, expectUUID)\n\t}\n}\n\n// Non-writable volume, same cloud backend already referenced in\n// cluster config --> add this host to AccessViaHosts with\n// ReadOnly==true\nfunc (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_UpdateWithReadOnly(c *check.C) {\n\tport, _ := s.getTestKeepstorePortAndMatchingVolumeUUID(c)\n\tbefore, after := s.loadWithKeepstoreConfig(c, `\nListen: :`+strconv.Itoa(port)+`\nVolumes:\n- Type: S3\n  Endpoint: https://storage.googleapis.com\n  Region: us-east-1z\n  Bucket: readonlyonother\n  S3Replication: 3\n  ReadOnly: true\n`)\n\thostname, err := os.Hostname()\n\tc.Assert(err, check.IsNil)\n\turl := arvados.URL{\n\t\tScheme: \"http\",\n\t\tHost:   fmt.Sprintf(\"%s:%d\", hostname, port),\n\t\tPath:   \"/\",\n\t}\n\t_, ok := before[\"zzzzz-nyw5e-readonlyonother\"].AccessViaHosts[url]\n\tc.Check(ok, check.Equals, false)\n\t_, ok = after[\"zzzzz-nyw5e-readonlyonother\"].AccessViaHosts[url]\n\tc.Check(ok, check.Equals, true)\n}\n\n// Writable volume, same cloud backend already writable by another\n// keepstore server --> add this host to AccessViaHosts with\n// ReadOnly==true\nfunc (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_UpdateAlreadyWritable(c *check.C) {\n\tport, _ := s.getTestKeepstorePortAndMatchingVolumeUUID(c)\n\tbefore, after := s.loadWithKeepstoreConfig(c, `\nListen: :`+strconv.Itoa(port)+`\nVolumes:\n- Type: S3\n  Endpoint: https://storage.googleapis.com\n  Region: us-east-1z\n  Bucket: writableonother\n  S3Replication: 3\n  ReadOnly: false\n`)\n\thostname, err := os.Hostname()\n\tc.Assert(err, check.IsNil)\n\turl := arvados.URL{\n\t\tScheme: \"http\",\n\t\tHost:   fmt.Sprintf(\"%s:%d\", hostname, port),\n\t\tPath:   \"/\",\n\t}\n\t_, ok := before[\"zzzzz-nyw5e-writableonother\"].AccessViaHosts[url]\n\tc.Check(ok, check.Equals, false)\n\t_, ok = after[\"zzzzz-nyw5e-writableonother\"].AccessViaHosts[url]\n\tc.Check(ok, check.Equals, true)\n}\n\n// Non-writable volume, same cloud backend not already referenced in\n// cluster config --> assign a new random volume UUID.\nfunc (s *KeepstoreMigrationSuite) TestIncrementalVolumeMigration_AddReadOnly(c *check.C) {\n\tport, _ := s.getTestKeepstorePortAndMatchingVolumeUUID(c)\n\tbefore, after := s.loadWithKeepstoreConfig(c, `\nListen: :`+strconv.Itoa(port)+`\nVolumes:\n- Type: S3\n  Endpoint: https://storage.googleapis.com\n  Region: us-east-1z\n  Bucket: differentbucket\n  S3Replication: 3\n`)\n\tnewuuids := s.findAddedVolumes(c, before, after, 1)\n\tnewvol := after[newuuids[0]]\n\n\tvar params arvados.S3VolumeDriverParameters\n\tjson.Unmarshal(newvol.DriverParameters, &params)\n\tc.Check(params.Bucket, check.Equals, \"differentbucket\")\n\n\thostname, err := os.Hostname()\n\tc.Assert(err, check.IsNil)\n\t_, ok := newvol.AccessViaHosts[arvados.URL{Scheme: \"http\", Host: fmt.Sprintf(\"%s:%d\", hostname, port), Path: \"/\"}]\n\tc.Check(ok, check.Equals, true)\n}\n\n// Ensure logs mention unmigrated servers.\nfunc (s *KeepstoreMigrationSuite) TestPendingKeepstoreMigrations(c *check.C) {\n\tclient := arvados.NewClientFromEnv()\n\tfor _, host := range []string{\"keep0\", \"keep1\"} {\n\t\terr := client.RequestAndDecode(new(struct{}), \"POST\", \"arvados/v1/keep_services\", nil, map[string]interface{}{\n\t\t\t\"keep_service\": map[string]interface{}{\n\t\t\t\t\"service_type\": \"disk\",\n\t\t\t\t\"service_host\": host + \".zzzzz.example.com\",\n\t\t\t\t\"service_port\": 25107,\n\t\t\t},\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t}\n\n\tport, _ := s.getTestKeepstorePortAndMatchingVolumeUUID(c)\n\tlogs := s.logsWithKeepstoreConfig(c, `\nListen: :`+strconv.Itoa(port)+`\nVolumes:\n- Type: S3\n  Endpoint: https://storage.googleapis.com\n  Bucket: foo\n`)\n\tc.Check(logs, check.Matches, `(?ms).*you should remove the legacy keepstore config file.*`)\n\tc.Check(logs, check.Matches, `(?ms).*you should migrate the legacy keepstore configuration file on host keep1.zzzzz.example.com.*`)\n\tc.Check(logs, check.Not(check.Matches), `(?ms).*should migrate.*keep0.zzzzz.example.com.*`)\n\tc.Check(logs, check.Matches, `(?ms).*keepstore configured at http://keep2.zzzzz.example.com:25107/ does not have access to any volumes.*`)\n\tc.Check(logs, check.Matches, `(?ms).*Volumes.zzzzz-nyw5e-possconfigerror.AccessViaHosts refers to nonexistent keepstore server http://keep00.zzzzz.example.com:25107.*`)\n}\n\nconst clusterConfigForKeepstoreMigrationTest = `\nClusters:\n  zzzzz:\n    SystemRootToken: ` + arvadostest.AdminToken + `\n    Services:\n      Keepstore:\n        InternalURLs:\n          \"http://{{.hostname}}:12345\": {}\n          \"http://keep0.zzzzz.example.com:25107\": {}\n          \"http://keep2.zzzzz.example.com:25107\": {}\n      Controller:\n        ExternalURL: \"https://{{.controller}}\"\n    TLS:\n      Insecure: true\n    Volumes:\n\n      zzzzz-nyw5e-alreadymigrated:\n        AccessViaHosts:\n          \"http://{{.hostname}}:12345\": {}\n        Driver: S3\n        DriverParameters:\n          Endpoint: https://storage.googleapis.com\n          Region: us-east-1z\n          Bucket: alreadymigrated\n        Replication: 3\n\n      zzzzz-nyw5e-readonlyonother:\n        AccessViaHosts:\n          \"http://keep0.zzzzz.example.com:25107\": {ReadOnly: true}\n        Driver: S3\n        DriverParameters:\n          Endpoint: https://storage.googleapis.com\n          Region: us-east-1z\n          Bucket: readonlyonother\n        Replication: 3\n\n      zzzzz-nyw5e-writableonother:\n        AccessViaHosts:\n          \"http://keep0.zzzzz.example.com:25107\": {}\n        Driver: S3\n        DriverParameters:\n          Endpoint: https://storage.googleapis.com\n          Region: us-east-1z\n          Bucket: writableonother\n        Replication: 3\n\n      zzzzz-nyw5e-localfilesystem:\n        AccessViaHosts:\n          \"http://keep0.zzzzz.example.com:25107\": {}\n        Driver: Directory\n        DriverParameters:\n          Root: /data/sdd\n        Replication: 1\n\n      zzzzz-nyw5e-localismigrated:\n        AccessViaHosts:\n          \"http://{{.hostname}}:12345\": {}\n        Driver: Directory\n        DriverParameters:\n          Root: /data/sde\n        Replication: 1\n\n      zzzzz-nyw5e-possconfigerror:\n        AccessViaHosts:\n          \"http://keep00.zzzzz.example.com:25107\": {}\n        Driver: Directory\n        DriverParameters:\n          Root: /data/sdf\n        Replication: 1\n`\n\n// Determine the effect of combining the given legacy keepstore config\n// YAML (just the \"Volumes\" entries of an old keepstore config file)\n// with the example clusterConfigForKeepstoreMigrationTest config.\n//\n// Return two Volumes configs -- one without loading keepstoreYAML\n// (\"before\") and one with (\"after\") -- for the caller to compare.\nfunc (s *KeepstoreMigrationSuite) loadWithKeepstoreConfig(c *check.C, keepstoreYAML string) (before, after map[string]arvados.Volume) {\n\tldr := testLoader(c, s.clusterConfigYAML(c), nil)\n\tcBefore, err := ldr.Load()\n\tc.Assert(err, check.IsNil)\n\n\tkeepstoreconfig, err := ioutil.TempFile(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(keepstoreconfig.Name())\n\tio.WriteString(keepstoreconfig, keepstoreYAML)\n\n\tldr = testLoader(c, s.clusterConfigYAML(c), nil)\n\tldr.KeepstorePath = keepstoreconfig.Name()\n\tcAfter, err := ldr.Load()\n\tc.Assert(err, check.IsNil)\n\n\treturn cBefore.Clusters[\"zzzzz\"].Volumes, cAfter.Clusters[\"zzzzz\"].Volumes\n}\n\n// Return the log messages emitted when loading keepstoreYAML along\n// with clusterConfigForKeepstoreMigrationTest.\nfunc (s *KeepstoreMigrationSuite) logsWithKeepstoreConfig(c *check.C, keepstoreYAML string) string {\n\tvar logs bytes.Buffer\n\n\tkeepstoreconfig, err := ioutil.TempFile(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(keepstoreconfig.Name())\n\tio.WriteString(keepstoreconfig, keepstoreYAML)\n\n\tldr := testLoader(c, s.clusterConfigYAML(c), &logs)\n\tldr.KeepstorePath = keepstoreconfig.Name()\n\t_, err = ldr.Load()\n\tc.Assert(err, check.IsNil)\n\n\treturn logs.String()\n}\n\nfunc (s *KeepstoreMigrationSuite) clusterConfigYAML(c *check.C) string {\n\thostname, err := os.Hostname()\n\tc.Assert(err, check.IsNil)\n\n\ttmpl := template.Must(template.New(\"config\").Parse(clusterConfigForKeepstoreMigrationTest))\n\n\tvar clusterconfigdata bytes.Buffer\n\terr = tmpl.Execute(&clusterconfigdata, map[string]interface{}{\n\t\t\"hostname\":   hostname,\n\t\t\"controller\": os.Getenv(\"ARVADOS_API_HOST\"),\n\t})\n\tc.Assert(err, check.IsNil)\n\n\treturn clusterconfigdata.String()\n}\n\n// Return the uuids of volumes that appear in \"after\" but not\n// \"before\".\n//\n// Assert the returned slice has at least minAdded entries.\nfunc (s *KeepstoreMigrationSuite) findAddedVolumes(c *check.C, before, after map[string]arvados.Volume, minAdded int) (uuids []string) {\n\tfor uuid := range after {\n\t\tif _, ok := before[uuid]; !ok {\n\t\t\tuuids = append(uuids, uuid)\n\t\t}\n\t}\n\tif len(uuids) < minAdded {\n\t\tc.Assert(uuids, check.HasLen, minAdded)\n\t}\n\treturn\n}\n\nfunc (s *KeepstoreMigrationSuite) getTestKeepstorePortAndMatchingVolumeUUID(c *check.C) (int, string) {\n\tfor port, ks := range s.ksByPort {\n\t\tc.Assert(ks.UUID, check.HasLen, 27)\n\t\treturn port, \"zzzzz-nyw5e-\" + ks.UUID[12:]\n\t}\n\tc.Fatal(\"s.ksByPort is empty\")\n\treturn 0, \"\"\n}\n\nfunc (s *KeepstoreMigrationSuite) TestKeepServiceIsMe(c *check.C) {\n\tfor i, trial := range []struct {\n\t\tmatch       bool\n\t\thostname    string\n\t\tlisten      string\n\t\tserviceHost string\n\t\tservicePort int\n\t}{\n\t\t{true, \"keep0\", \"keep0\", \"keep0\", 80},\n\t\t{true, \"keep0\", \"[::1]:http\", \"keep0\", 80},\n\t\t{true, \"keep0\", \"[::]:http\", \"keep0\", 80},\n\t\t{true, \"keep0\", \"keep0:25107\", \"keep0\", 25107},\n\t\t{true, \"keep0\", \":25107\", \"keep0\", 25107},\n\t\t{true, \"keep0.domain\", \":25107\", \"keep0.domain.example\", 25107},\n\t\t{true, \"keep0.domain.example\", \":25107\", \"keep0.domain.example\", 25107},\n\t\t{true, \"keep0\", \":25107\", \"keep0.domain.example\", 25107},\n\t\t{true, \"keep0\", \":25107\", \"Keep0.domain.example\", 25107},\n\t\t{true, \"keep0\", \":http\", \"keep0.domain.example\", 80},\n\t\t{true, \"keep0\", \":25107\", \"localhost\", 25107},\n\t\t{true, \"keep0\", \":25107\", \"::1\", 25107},\n\t\t{false, \"keep0\", \":25107\", \"keep0\", 1111},              // different port\n\t\t{false, \"keep0\", \":25107\", \"localhost\", 1111},          // different port\n\t\t{false, \"keep0\", \":http\", \"keep0.domain.example\", 443}, // different port\n\t\t{false, \"keep0\", \":bogussss\", \"keep0\", 25107},          // unresolvable port\n\t\t{false, \"keep0\", \":25107\", \"keep1\", 25107},             // different hostname\n\t\t{false, \"keep1\", \":25107\", \"keep10\", 25107},            // different hostname (prefix, but not on a \".\" boundary)\n\t} {\n\t\tc.Check(keepServiceIsMe(arvados.KeepService{ServiceHost: trial.serviceHost, ServicePort: trial.servicePort}, trial.hostname, trial.listen), check.Equals, trial.match, check.Commentf(\"trial #%d: %#v\", i, trial))\n\t}\n}\n"
  },
  {
    "path": "lib/config/deprecated_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Configured at: sdk/python/tests/run_test_server.py\nconst TestServerManagementToken = \"e687950a23c3a9bceec28c6223a06c79\"\n\nfunc testLoadLegacyConfig(content []byte, mungeFlag string, c *check.C) (*arvados.Cluster, error) {\n\ttmpfile, err := ioutil.TempFile(\"\", \"example\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer os.Remove(tmpfile.Name())\n\n\tif _, err := tmpfile.Write(content); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := tmpfile.Close(); err != nil {\n\t\treturn nil, err\n\t}\n\tflags := flag.NewFlagSet(\"test\", flag.ExitOnError)\n\tldr := testLoader(c, \"Clusters: {zzzzz: {}}\", nil)\n\tldr.SetupFlags(flags)\n\targs := ldr.MungeLegacyConfigArgs(ldr.Logger, []string{\"-config\", tmpfile.Name()}, mungeFlag)\n\terr = flags.Parse(args)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(flags.NArg(), check.Equals, 0)\n\tcfg, err := ldr.Load()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn cluster, nil\n}\n\nfunc (s *LoadSuite) TestOldEmailConfiguration(c *check.C) {\n\tlogs := checkEquivalent(c, `\nClusters:\n z1111:\n  Mail:\n    SendUserSetupNotificationEmail: false\n    SupportEmailAddress: \"support@example.invalid\"\n`, `\nClusters:\n z1111:\n  Users:\n    SendUserSetupNotificationEmail: false\n    SupportEmailAddress: \"support@example.invalid\"\n`)\n\tc.Check(logs, check.Matches, `(?ms).*deprecated or unknown config entry: .*Mail\\.SendUserSetupNotificationEmail.*`)\n\tc.Check(logs, check.Matches, `(?ms).*deprecated or unknown config entry: .*Mail\\.SupportEmailAddress.*`)\n\tc.Check(logs, check.Matches, `(?ms).*using your old config key Mail\\.SendUserSetupNotificationEmail -- but you should rename it to Users\\.SendUserSetupNotificationEmail.*`)\n\tc.Check(logs, check.Matches, `(?ms).*using your old config key Mail\\.SupportEmailAddress -- but you should rename it to Users\\.SupportEmailAddress.*`)\n}\n\nfunc (s *LoadSuite) TestLegacyVolumeDriverParameters(c *check.C) {\n\tlogs := checkEquivalent(c, `\nClusters:\n z1111:\n  Volumes:\n   z1111-nyw5e-aaaaaaaaaaaaaaa:\n    Driver: S3\n    DriverParameters:\n     AccessKey: exampleaccesskey\n     SecretKey: examplesecretkey\n     Region: foobar\n     ReadTimeout: 1200s\n`, `\nClusters:\n z1111:\n  Volumes:\n   z1111-nyw5e-aaaaaaaaaaaaaaa:\n    Driver: S3\n    DriverParameters:\n     AccessKeyID: exampleaccesskey\n     SecretAccessKey: examplesecretkey\n     Region: foobar\n     ReadTimeout: 1200s\n`)\n\tc.Check(logs, check.Matches, `(?ms).*deprecated or unknown config entry: .*AccessKey.*`)\n\tc.Check(logs, check.Matches, `(?ms).*deprecated or unknown config entry: .*SecretKey.*`)\n\tc.Check(logs, check.Matches, `(?ms).*using your old config keys z1111\\.Volumes\\.z1111-nyw5e-aaaaaaaaaaaaaaa\\.DriverParameters\\.AccessKey/SecretKey -- but you should rename them to AccessKeyID/SecretAccessKey.*`)\n\n\t_, err := testLoader(c, `\nClusters:\n z1111:\n  Volumes:\n   z1111-nyw5e-aaaaaaaaaaaaaaa:\n    Driver: S3\n    DriverParameters:\n     AccessKey: exampleaccesskey\n     SecretKey: examplesecretkey\n     AccessKeyID: exampleaccesskey\n`, nil).Load()\n\tc.Check(err, check.ErrorMatches, `(?ms).*cannot use .*SecretKey.*and.*SecretAccessKey.*in z1111.Volumes.z1111-nyw5e-aaaaaaaaaaaaaaa.DriverParameters.*`)\n}\n\nfunc (s *LoadSuite) TestDeprecatedNodeProfilesToServices(c *check.C) {\n\thostname, err := os.Hostname()\n\tc.Assert(err, check.IsNil)\n\tcheckEquivalent(c, `\nClusters:\n z1111:\n  NodeProfiles:\n   \"*\":\n    arvados-controller:\n     listen: \":9004\"\n   `+hostname+`:\n    arvados-api-server:\n     listen: \":8000\"\n   dispatch-host:\n    arvados-dispatch-cloud:\n     listen: \":9006\"\n`, `\nClusters:\n z1111:\n  Services:\n   RailsAPI:\n    InternalURLs:\n     \"http://localhost:8000\": {}\n   Controller:\n    InternalURLs:\n     \"http://localhost:9004\": {}\n   DispatchCloud:\n    InternalURLs:\n     \"http://dispatch-host:9006\": {}\n  NodeProfiles:\n   \"*\":\n    arvados-controller:\n     listen: \":9004\"\n   `+hostname+`:\n    arvados-api-server:\n     listen: \":8000\"\n   dispatch-host:\n    arvados-dispatch-cloud:\n     listen: \":9006\"\n`)\n}\n\nfunc (s *LoadSuite) TestDeprecatedLoginBackend(c *check.C) {\n\tcheckEquivalent(c, `\nClusters:\n z1111:\n  Login:\n   GoogleClientID: aaaa\n   GoogleClientSecret: bbbb\n   GoogleAlternateEmailAddresses: true\n`, `\nClusters:\n z1111:\n  Login:\n   Google:\n    Enable: true\n    ClientID: aaaa\n    ClientSecret: bbbb\n    AlternateEmailAddresses: true\n`)\n\tcheckEquivalent(c, `\nClusters:\n z1111:\n  Login:\n   ProviderAppID: aaaa\n   ProviderAppSecret: bbbb\n`, `\nClusters:\n z1111:\n  Login:\n   SSO:\n    Enable: true\n    ProviderAppID: aaaa\n    ProviderAppSecret: bbbb\n`)\n}\n\nfunc (s *LoadSuite) TestLegacyKeepWebConfig(c *check.C) {\n\tcontent := []byte(`\n{\n\t\"Client\": {\n\t\t\"Scheme\": \"\",\n\t\t\"APIHost\": \"example.com\",\n\t\t\"AuthToken\": \"abcdefg\",\n\t},\n\t\"Listen\": \":80\",\n\t\"AnonymousTokens\": [\n\t\t\"anonusertoken\"\n\t],\n\t\"AttachmentOnlyHost\": \"download.example.com\",\n\t\"TrustAllContent\": true,\n\t\"Cache\": {\n\t\t\"TTL\": \"1m\",\n\t\t\"UUIDTTL\": \"1s\",\n\t\t\"MaxCollectionEntries\": 42,\n\t\t\"MaxCollectionBytes\": 1234567890,\n\t\t\"MaxUUIDEntries\": 100\n\t},\n\t\"ManagementToken\": \"xyzzy\"\n}\n`)\n\tcluster, err := testLoadLegacyConfig(content, \"-legacy-keepweb-config\", c)\n\tc.Assert(err, check.IsNil)\n\n\tc.Check(cluster.Services.Controller.ExternalURL, check.Equals, arvados.URL{Scheme: \"https\", Host: \"example.com\", Path: \"/\"})\n\tc.Check(cluster.SystemRootToken, check.Equals, \"abcdefg\")\n\n\tc.Check(cluster.Collections.WebDAVCache.TTL, check.Equals, arvados.Duration(60*time.Second))\n\tc.Check(cluster.Collections.WebDAVCache.MaxCollectionBytes, check.Equals, arvados.ByteSize(1234567890))\n\n\tc.Check(cluster.Services.WebDAVDownload.ExternalURL, check.Equals, arvados.URL{Host: \"download.example.com\", Path: \"/\"})\n\tc.Check(cluster.Services.WebDAVDownload.InternalURLs[arvados.URL{Host: \":80\"}], check.NotNil)\n\tc.Check(cluster.Services.WebDAV.InternalURLs[arvados.URL{Host: \":80\"}], check.NotNil)\n\n\tc.Check(cluster.Collections.TrustAllContent, check.Equals, true)\n\tc.Check(cluster.Users.AnonymousUserToken, check.Equals, \"anonusertoken\")\n\tc.Check(cluster.ManagementToken, check.Equals, \"xyzzy\")\n}\n\n// Tests fix for https://dev.arvados.org/issues/15642\nfunc (s *LoadSuite) TestLegacyKeepWebConfigDoesntDisableMissingItems(c *check.C) {\n\tcontent := []byte(`\n{\n\t\"Client\": {\n\t\t\"Scheme\": \"\",\n\t\t\"APIHost\": \"example.com\",\n\t\t\"AuthToken\": \"abcdefg\",\n\t}\n}\n`)\n\tcluster, err := testLoadLegacyConfig(content, \"-legacy-keepweb-config\", c)\n\tc.Assert(err, check.IsNil)\n\t// The resulting ManagementToken should be the one set up on the test server.\n\tc.Check(cluster.ManagementToken, check.Equals, TestServerManagementToken)\n}\n\nfunc (s *LoadSuite) TestLegacyKeepproxyConfig(c *check.C) {\n\tf := \"-legacy-keepproxy-config\"\n\tcontent := []byte(fmtKeepproxyConfig(\"\", true))\n\tcluster, err := testLoadLegacyConfig(content, f, c)\n\n\tc.Assert(err, check.IsNil)\n\tc.Assert(cluster, check.NotNil)\n\tc.Check(cluster.Services.Controller.ExternalURL, check.Equals, arvados.URL{Scheme: \"https\", Host: \"example.com\", Path: \"/\"})\n\tc.Check(cluster.SystemRootToken, check.Equals, \"abcdefg\")\n\tc.Check(cluster.ManagementToken, check.Equals, \"xyzzy\")\n\tc.Check(cluster.Services.Keepproxy.InternalURLs[arvados.URL{Host: \":80\"}], check.Equals, arvados.ServiceInstance{})\n\tc.Check(cluster.Collections.DefaultReplication, check.Equals, 0)\n\tc.Check(cluster.API.KeepServiceRequestTimeout.String(), check.Equals, \"15s\")\n\tc.Check(cluster.SystemLogs.LogLevel, check.Equals, \"debug\")\n\n\tcontent = []byte(fmtKeepproxyConfig(\"\", false))\n\tcluster, err = testLoadLegacyConfig(content, f, c)\n\tc.Check(err, check.IsNil)\n\tc.Check(cluster.SystemLogs.LogLevel, check.Equals, \"info\")\n\n\tcontent = []byte(fmtKeepproxyConfig(`\"DisableGet\": true,`, true))\n\t_, err = testLoadLegacyConfig(content, f, c)\n\tc.Check(err, check.NotNil)\n\n\tcontent = []byte(fmtKeepproxyConfig(`\"DisablePut\": true,`, true))\n\t_, err = testLoadLegacyConfig(content, f, c)\n\tc.Check(err, check.NotNil)\n\n\tcontent = []byte(fmtKeepproxyConfig(`\"PIDFile\": \"test\",`, true))\n\t_, err = testLoadLegacyConfig(content, f, c)\n\tc.Check(err, check.NotNil)\n\n\tcontent = []byte(fmtKeepproxyConfig(`\"DisableGet\": false, \"DisablePut\": false, \"PIDFile\": \"\",`, true))\n\t_, err = testLoadLegacyConfig(content, f, c)\n\tc.Check(err, check.IsNil)\n}\n\nfunc fmtKeepproxyConfig(param string, debugLog bool) string {\n\treturn fmt.Sprintf(`\n{\n\t\"Client\": {\n\t\t\"Scheme\": \"\",\n\t\t\"APIHost\": \"example.com\",\n\t\t\"AuthToken\": \"abcdefg\",\n\t\t\"Insecure\": false\n\t},\n\t\"Listen\": \":80\",\n\t\"DefaultReplicas\": 0,\n\t\"Timeout\": \"15s\",\n\t\"Debug\": %t,\n\t%s\n\t\"ManagementToken\": \"xyzzy\"\n}\n`, debugLog, param)\n}\n\nfunc (s *LoadSuite) TestLegacyKeepBalanceConfig(c *check.C) {\n\tf := \"-legacy-keepbalance-config\"\n\tcontent := []byte(fmtKeepBalanceConfig(\"\"))\n\tcluster, err := testLoadLegacyConfig(content, f, c)\n\n\tc.Assert(err, check.IsNil)\n\tc.Assert(cluster, check.NotNil)\n\tc.Check(cluster.ManagementToken, check.Equals, \"xyzzy\")\n\tc.Check(cluster.Services.Keepbalance.InternalURLs[arvados.URL{Host: \":80\"}], check.Equals, arvados.ServiceInstance{})\n\tc.Check(cluster.Collections.BalanceCollectionBuffers, check.Equals, 1000)\n\tc.Check(cluster.Collections.BalanceCollectionBatch, check.Equals, 100000)\n\tc.Check(cluster.Collections.BalancePeriod.String(), check.Equals, \"10m\")\n\tc.Check(cluster.Collections.BlobMissingReport, check.Equals, \"testfile\")\n\tc.Check(cluster.API.KeepServiceRequestTimeout.String(), check.Equals, \"30m\")\n\n\tcontent = []byte(fmtKeepBalanceConfig(`\"KeepServiceTypes\":[\"disk\"],`))\n\t_, err = testLoadLegacyConfig(content, f, c)\n\tc.Check(err, check.IsNil)\n\n\tcontent = []byte(fmtKeepBalanceConfig(`\"KeepServiceTypes\":[],`))\n\t_, err = testLoadLegacyConfig(content, f, c)\n\tc.Check(err, check.IsNil)\n\n\tcontent = []byte(fmtKeepBalanceConfig(`\"KeepServiceTypes\":[\"proxy\"],`))\n\t_, err = testLoadLegacyConfig(content, f, c)\n\tc.Check(err, check.NotNil)\n\n\tcontent = []byte(fmtKeepBalanceConfig(`\"KeepServiceTypes\":[\"disk\", \"proxy\"],`))\n\t_, err = testLoadLegacyConfig(content, f, c)\n\tc.Check(err, check.NotNil)\n\n\tcontent = []byte(fmtKeepBalanceConfig(`\"KeepServiceList\":{},`))\n\t_, err = testLoadLegacyConfig(content, f, c)\n\tc.Check(err, check.NotNil)\n}\n\nfunc fmtKeepBalanceConfig(param string) string {\n\treturn fmt.Sprintf(`\n{\n\t\"Client\": {\n\t\t\"Scheme\": \"\",\n\t\t\"APIHost\": \"example.com\",\n\t\t\"AuthToken\": \"abcdefg\",\n\t\t\"Insecure\": false\n\t},\n\t\"Listen\": \":80\",\n\t%s\n\t\"RunPeriod\": \"10m\",\n\t\"CollectionBatchSize\": 100000,\n\t\"CollectionBuffers\": 1000,\n\t\"RequestTimeout\": \"30m\",\n\t\"ManagementToken\": \"xyzzy\",\n\t\"LostBlocksFile\": \"testfile\"\n}\n`, param)\n}\n\nfunc (s *LoadSuite) TestDeprecatedCUDA(c *check.C) {\n\tcheckEquivalent(c, `\nClusters:\n z1111:\n  Containers:\n    LSF:\n      BsubCUDAArguments: [\"-gpu\"]\n`, `\nClusters:\n z1111:\n  Containers:\n    LSF:\n      BsubGPUArguments: [\"-gpu\"]\n`)\n}\n"
  },
  {
    "path": "lib/config/export.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// ExportJSON writes a JSON object with the safe (non-secret) portions\n// of the cluster config to w.\nfunc ExportJSON(w io.Writer, cluster *arvados.Cluster) error {\n\tbuf, err := json.Marshal(cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar m map[string]interface{}\n\terr = json.Unmarshal(buf, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// ClusterID is not marshalled by default (see `json:\"-\"`).\n\t// Add it back here so it is included in the exported config.\n\tm[\"ClusterID\"] = cluster.ClusterID\n\terr = redactUnsafe(m, \"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.NewEncoder(w).Encode(m)\n}\n\n// whitelist classifies configs as safe/unsafe to reveal through the API\n// endpoint. Note that endpoint does not require authentication.\n//\n// Every config entry must either be listed explicitly here along with\n// all of its parent keys (e.g., \"API\" + \"API.RequestTimeout\"), or\n// have an ancestor listed as false (e.g.,\n// \"PostgreSQL.Connection.password\" has an ancestor\n// \"PostgreSQL.Connection\" with a false value). Otherwise, it is a bug\n// which should be caught by tests.\n//\n// Example: API.RequestTimeout is safe because whitelist[\"API\"] == and\n// whitelist[\"API.RequestTimeout\"] == true.\n//\n// Example: PostgreSQL.Connection.password is not safe because\n// whitelist[\"PostgreSQL.Connection\"] == false.\n//\n// Example: PostgreSQL.BadKey would cause an error because\n// whitelist[\"PostgreSQL\"] isn't false, and neither\n// whitelist[\"PostgreSQL.BadKey\"] nor whitelist[\"PostgreSQL.*\"]\n// exists.\nvar whitelist = map[string]bool{\n\t// | sort -t'\"' -k2,2\n\t\"API\":                                                 true,\n\t\"API.AsyncPermissionsUpdateInterval\":                  false,\n\t\"API.DisabledAPIs\":                                    false,\n\t\"API.FreezeProjectRequiresDescription\":                true,\n\t\"API.FreezeProjectRequiresProperties\":                 true,\n\t\"API.FreezeProjectRequiresProperties.*\":               true,\n\t\"API.KeepServiceRequestTimeout\":                       false,\n\t\"API.LockBeforeUpdate\":                                false,\n\t\"API.MaxConcurrentRailsRequests\":                      false,\n\t\"API.MaxConcurrentRequests\":                           false,\n\t\"API.MaxGatewayTunnels\":                               false,\n\t\"API.MaxIndexDatabaseRead\":                            false,\n\t\"API.MaxItemsPerResponse\":                             true,\n\t\"API.MaxKeepBlobBuffers\":                              false,\n\t\"API.MaxQueuedRequests\":                               false,\n\t\"API.MaxQueueTimeForLockRequests\":                     false,\n\t\"API.MaxRequestAmplification\":                         false,\n\t\"API.MaxRequestSize\":                                  true,\n\t\"API.MaxTokenLifetime\":                                false,\n\t\"API.RequestTimeout\":                                  true,\n\t\"API.SendTimeout\":                                     true,\n\t\"API.UnfreezeProjectRequiresAdmin\":                    true,\n\t\"API.VocabularyPath\":                                  false,\n\t\"API.WebsocketClientEventQueue\":                       false,\n\t\"API.WebsocketServerEventQueue\":                       false,\n\t\"AuditLogs\":                                           false,\n\t\"AuditLogs.MaxAge\":                                    false,\n\t\"AuditLogs.MaxDeleteBatch\":                            false,\n\t\"AuditLogs.UnloggedAttributes\":                        false,\n\t\"ClusterID\":                                           true,\n\t\"Collections\":                                         true,\n\t\"Collections.BalanceCollectionBatch\":                  false,\n\t\"Collections.BalanceCollectionBuffers\":                false,\n\t\"Collections.BalancePeriod\":                           false,\n\t\"Collections.BalancePullLimit\":                        false,\n\t\"Collections.BalanceTimeout\":                          false,\n\t\"Collections.BalanceTrashLimit\":                       false,\n\t\"Collections.BalanceUpdateLimit\":                      false,\n\t\"Collections.BlobDeleteConcurrency\":                   false,\n\t\"Collections.BlobMissingReport\":                       false,\n\t\"Collections.BlobReplicateConcurrency\":                false,\n\t\"Collections.BlobSigning\":                             true,\n\t\"Collections.BlobSigningKey\":                          false,\n\t\"Collections.BlobSigningTTL\":                          true,\n\t\"Collections.BlobTrash\":                               false,\n\t\"Collections.BlobTrashCheckInterval\":                  false,\n\t\"Collections.BlobTrashConcurrency\":                    false,\n\t\"Collections.BlobTrashLifetime\":                       false,\n\t\"Collections.CollectionVersioning\":                    true,\n\t\"Collections.DefaultReplication\":                      true,\n\t\"Collections.DefaultTrashLifetime\":                    true,\n\t\"Collections.ForwardSlashNameSubstitution\":            true,\n\t\"Collections.KeepproxyPermission\":                     false,\n\t\"Collections.ManagedProperties\":                       true,\n\t\"Collections.ManagedProperties.*\":                     true,\n\t\"Collections.ManagedProperties.*.*\":                   true,\n\t\"Collections.PreserveVersionIfIdle\":                   true,\n\t\"Collections.S3FolderObjects\":                         true,\n\t\"Collections.TrashSweepInterval\":                      false,\n\t\"Collections.TrustAllContent\":                         true,\n\t\"Collections.WebDAVCache\":                             false,\n\t\"Collections.WebDAVLogEvents\":                         false,\n\t\"Collections.WebDAVLogDownloadInterval\":               false,\n\t\"Collections.WebDAVOutputBuffer\":                      false,\n\t\"Collections.WebDAVPermission\":                        false,\n\t\"Containers\":                                          true,\n\t\"Containers.AlwaysUsePreemptibleInstances\":            true,\n\t\"Containers.CloudVMs\":                                 false,\n\t\"Containers.CrunchRunArgumentsList\":                   false,\n\t\"Containers.CrunchRunCommand\":                         false,\n\t\"Containers.DefaultKeepCacheRAM\":                      true,\n\t\"Containers.DispatchPrivateKey\":                       false,\n\t\"Containers.LocalKeepBlobBuffersPerVCPU\":              false,\n\t\"Containers.LocalKeepLogsToContainerLog\":              false,\n\t\"Containers.Logging\":                                  false,\n\t\"Containers.LogReuseDecisions\":                        false,\n\t\"Containers.LSF\":                                      false,\n\t\"Containers.MaxDispatchAttempts\":                      false,\n\t\"Containers.MaximumPriceFactor\":                       true,\n\t\"Containers.MaxRetryAttempts\":                         true,\n\t\"Containers.MinRetryPeriod\":                           true,\n\t\"Containers.MaxRunningContainersPerInstance\":          true,\n\t\"Containers.PreemptiblePriceFactor\":                   false,\n\t\"Containers.ReserveExtraRAM\":                          true,\n\t\"Containers.RuntimeEngine\":                            true,\n\t\"Containers.ShellAccess\":                              true,\n\t\"Containers.ShellAccess.Admin\":                        true,\n\t\"Containers.ShellAccess.User\":                         true,\n\t\"Containers.SLURM\":                                    false,\n\t\"Containers.StaleLockTimeout\":                         false,\n\t\"Containers.SupportedDockerImageFormats\":              true,\n\t\"Containers.SupportedDockerImageFormats.*\":            true,\n\t\"InstanceTypes\":                                       true,\n\t\"InstanceTypes.*\":                                     true,\n\t\"InstanceTypes.*.*\":                                   true,\n\t\"InstanceTypes.*.*.*\":                                 true,\n\t\"Login\":                                               true,\n\t\"Login.Google\":                                        true,\n\t\"Login.Google.AlternateEmailAddresses\":                false,\n\t\"Login.Google.AuthenticationRequestParameters\":        false,\n\t\"Login.Google.ClientID\":                               false,\n\t\"Login.Google.ClientSecret\":                           false,\n\t\"Login.Google.Enable\":                                 true,\n\t\"Login.IssueTrustedTokens\":                            false,\n\t\"Login.LDAP\":                                          true,\n\t\"Login.LDAP.AppendDomain\":                             false,\n\t\"Login.LDAP.EmailAttribute\":                           false,\n\t\"Login.LDAP.Enable\":                                   true,\n\t\"Login.LDAP.InsecureTLS\":                              false,\n\t\"Login.LDAP.MinTLSVersion\":                            false,\n\t\"Login.LDAP.SearchAttribute\":                          false,\n\t\"Login.LDAP.SearchBase\":                               false,\n\t\"Login.LDAP.SearchBindPassword\":                       false,\n\t\"Login.LDAP.SearchBindUser\":                           false,\n\t\"Login.LDAP.SearchFilters\":                            false,\n\t\"Login.LDAP.StartTLS\":                                 false,\n\t\"Login.LDAP.StripDomain\":                              false,\n\t\"Login.LDAP.URL\":                                      false,\n\t\"Login.LDAP.UsernameAttribute\":                        false,\n\t\"Login.LoginCluster\":                                  true,\n\t\"Login.OpenIDConnect\":                                 true,\n\t\"Login.OpenIDConnect.AcceptAccessToken\":               false,\n\t\"Login.OpenIDConnect.AcceptAccessTokenScope\":          false,\n\t\"Login.OpenIDConnect.AuthenticationRequestParameters\": false,\n\t\"Login.OpenIDConnect.ClientID\":                        false,\n\t\"Login.OpenIDConnect.ClientSecret\":                    false,\n\t\"Login.OpenIDConnect.EmailClaim\":                      false,\n\t\"Login.OpenIDConnect.EmailVerifiedClaim\":              false,\n\t\"Login.OpenIDConnect.Enable\":                          true,\n\t\"Login.OpenIDConnect.Issuer\":                          false,\n\t\"Login.OpenIDConnect.UsernameClaim\":                   false,\n\t\"Login.PAM\":                                           true,\n\t\"Login.PAM.DefaultEmailDomain\":                        false,\n\t\"Login.PAM.Enable\":                                    true,\n\t\"Login.PAM.Service\":                                   false,\n\t\"Login.RemoteTokenRefresh\":                            true,\n\t\"Login.Test\":                                          true,\n\t\"Login.Test.Enable\":                                   true,\n\t\"Login.Test.Users\":                                    false,\n\t\"Login.TokenLifetime\":                                 false,\n\t\"Login.TrustedClients\":                                false,\n\t\"Login.TrustPrivateNetworks\":                          false,\n\t\"ManagementToken\":                                     false,\n\t\"PostgreSQL\":                                          false,\n\t\"RemoteClusters\":                                      true,\n\t\"RemoteClusters.*\":                                    true,\n\t\"RemoteClusters.*.ActivateUsers\":                      true,\n\t\"RemoteClusters.*.Host\":                               true,\n\t\"RemoteClusters.*.Insecure\":                           true,\n\t\"RemoteClusters.*.Proxy\":                              true,\n\t\"RemoteClusters.*.Scheme\":                             true,\n\t\"Services\":                                            true,\n\t\"Services.*\":                                          true,\n\t\"Services.*.ExternalPortMax\":                          false,\n\t\"Services.*.ExternalPortMin\":                          false,\n\t\"Services.*.ExternalURL\":                              true,\n\t\"Services.*.InternalURLs\":                             false,\n\t\"StorageClasses\":                                      true,\n\t\"StorageClasses.*\":                                    true,\n\t\"StorageClasses.*.Default\":                            true,\n\t\"StorageClasses.*.Priority\":                           true,\n\t\"SystemLogs\":                                          false,\n\t\"SystemRootToken\":                                     false,\n\t\"TLS\":                                                 false,\n\t\"TLS.Certificate\":                                     false,\n\t\"TLS.Insecure\":                                        true,\n\t\"TLS.Key\":                                             false,\n\t\"Users\":                                               true,\n\t\"Users.ActivatedUsersAreVisibleToOthers\":              false,\n\t\"Users.ActivityLoggingPeriod\":                         false,\n\t\"Users.AdminNotifierEmailFrom\":                        false,\n\t\"Users.AnonymousUserToken\":                            true,\n\t\"Users.AutoAdminFirstUser\":                            false,\n\t\"Users.AutoAdminUserWithEmail\":                        false,\n\t\"Users.AutoSetupNewUsers\":                             false,\n\t\"Users.AutoSetupNewUsersWithVmUUID\":                   false,\n\t\"Users.AutoSetupUsernameBlacklist\":                    false,\n\t\"Users.CanCreateRoleGroups\":                           true,\n\t\"Users.EmailSubjectPrefix\":                            false,\n\t\"Users.NewInactiveUserNotificationRecipients\":         false,\n\t\"Users.NewUserNotificationRecipients\":                 false,\n\t\"Users.NewUsersAreActive\":                             false,\n\t\"Users.PreferDomainForUsername\":                       false,\n\t\"Users.RoleGroupsVisibleToAll\":                        false,\n\t\"Users.SendUserSetupNotificationEmail\":                false,\n\t\"Users.SupportEmailAddress\":                           true,\n\t\"Users.SyncIgnoredGroups\":                             true,\n\t\"Users.SyncRequiredGroups\":                            true,\n\t\"Users.SyncUserAccounts\":                              true,\n\t\"Users.SyncUserAPITokens\":                             true,\n\t\"Users.SyncUserGroups\":                                true,\n\t\"Users.SyncUserSSHKeys\":                               true,\n\t\"Users.UserNotifierEmailBcc\":                          false,\n\t\"Users.UserNotifierEmailFrom\":                         false,\n\t\"Users.UserProfileNotificationAddress\":                false,\n\t\"Users.UserSetupMailText\":                             false,\n\t\"Volumes\":                                             true,\n\t\"Volumes.*\":                                           true,\n\t\"Volumes.*.*\":                                         false,\n\t\"Volumes.*.AccessViaHosts\":                            true,\n\t\"Volumes.*.AccessViaHosts.*\":                          true,\n\t\"Volumes.*.AccessViaHosts.*.ReadOnly\":                 true,\n\t\"Volumes.*.ReadOnly\":                                  true,\n\t\"Volumes.*.Replication\":                               true,\n\t\"Volumes.*.StorageClasses\":                            true,\n\t\"Volumes.*.StorageClasses.*\":                          true,\n\t\"Workbench\":                                           true,\n\t\"Workbench.ActivationContactLink\":                     false,\n\t\"Workbench.APIClientConnectTimeout\":                   true,\n\t\"Workbench.APIClientReceiveTimeout\":                   true,\n\t\"Workbench.APIResponseCompression\":                    true,\n\t\"Workbench.ApplicationMimetypesWithViewIcon\":          true,\n\t\"Workbench.ApplicationMimetypesWithViewIcon.*\":        true,\n\t\"Workbench.ArvadosDocsite\":                            true,\n\t\"Workbench.ArvadosPublicDataDocURL\":                   true,\n\t\"Workbench.BannerUUID\":                                true,\n\t\"Workbench.DefaultOpenIdPrefix\":                       false,\n\t\"Workbench.DisableSharingURLsUI\":                      true,\n\t\"Workbench.EnableGettingStartedPopup\":                 true,\n\t\"Workbench.EnablePublicProjectsPage\":                  true,\n\t\"Workbench.FileViewersConfigURL\":                      true,\n\t\"Workbench.IdleTimeout\":                               true,\n\t\"Workbench.InactivePageHTML\":                          true,\n\t\"Workbench.LogViewerMaxBytes\":                         true,\n\t\"Workbench.MultiSiteSearch\":                           true,\n\t\"Workbench.ProfilingEnabled\":                          true,\n\t\"Workbench.Repositories\":                              false,\n\t\"Workbench.RepositoryCache\":                           false,\n\t\"Workbench.RunningJobLogRecordsToFetch\":               true,\n\t\"Workbench.ShowRecentCollectionsOnDashboard\":          true,\n\t\"Workbench.ShowUserAgreementInline\":                   true,\n\t\"Workbench.ShowUserNotifications\":                     true,\n\t\"Workbench.SiteName\":                                  true,\n\t\"Workbench.SSHHelpHostSuffix\":                         true,\n\t\"Workbench.SSHHelpPageHTML\":                           true,\n\t\"Workbench.Theme\":                                     true,\n\t\"Workbench.UserProfileFormFields\":                     true,\n\t\"Workbench.UserProfileFormFields.*\":                   true,\n\t\"Workbench.UserProfileFormFields.*.*\":                 true,\n\t\"Workbench.UserProfileFormFields.*.*.*\":               true,\n\t\"Workbench.UserProfileFormMessage\":                    true,\n\t\"Workbench.WelcomePageHTML\":                           true,\n}\n\nfunc redactUnsafe(m map[string]interface{}, mPrefix, lookupPrefix string) error {\n\tvar errs []string\n\tfor k, v := range m {\n\t\tlookupKey := k\n\t\tsafe, ok := whitelist[lookupPrefix+k]\n\t\tif !ok {\n\t\t\tlookupKey = \"*\"\n\t\t\tsafe, ok = whitelist[lookupPrefix+\"*\"]\n\t\t}\n\t\tif !ok {\n\t\t\terrs = append(errs, fmt.Sprintf(\"config bug: key %q not in whitelist map\", lookupPrefix+k))\n\t\t\tcontinue\n\t\t}\n\t\tif !safe {\n\t\t\tdelete(m, k)\n\t\t\tcontinue\n\t\t}\n\t\tif v, ok := v.(map[string]interface{}); ok {\n\t\t\terr := redactUnsafe(v, mPrefix+k+\".\", lookupPrefix+lookupKey+\".\")\n\t\t\tif err != nil {\n\t\t\t\terrs = append(errs, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\tif len(errs) > 0 {\n\t\treturn errors.New(strings.Join(errs, \"\\n\"))\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "lib/config/export_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"regexp\"\n\t\"strings\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&ExportSuite{})\n\ntype ExportSuite struct{}\n\nfunc (s *ExportSuite) TestExport(c *check.C) {\n\tconfdata := strings.Replace(string(DefaultYAML), \"SAMPLE\", \"12345\", -1)\n\tcfg, err := testLoader(c, confdata, nil).Load()\n\tc.Assert(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"xxxxx\")\n\tc.Assert(err, check.IsNil)\n\tcluster.ManagementToken = \"abcdefg\"\n\n\tvar exported bytes.Buffer\n\terr = ExportJSON(&exported, cluster)\n\tc.Check(err, check.IsNil)\n\tif err != nil {\n\t\tc.Logf(\"If all the new keys are safe, add these to whitelist in export.go:\")\n\t\tfor _, k := range regexp.MustCompile(`\"[^\"]*\"`).FindAllString(err.Error(), -1) {\n\t\t\tc.Logf(\"\\t%q: true,\", strings.Replace(k, `\"`, \"\", -1))\n\t\t}\n\t}\n\tvar exportedStr = exported.String()\n\tc.Check(exportedStr, check.Matches, `(?ms).*ClusterID\":\"xxxxx.*`)\n\tc.Check(exportedStr, check.Not(check.Matches), `(?ms).*abcdefg.*`)\n}\n"
  },
  {
    "path": "lib/config/load.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"crypto/sha256\"\n\t_ \"embed\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"dario.cat/mergo\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n\t\"golang.org/x/sys/unix\"\n)\n\n//go:embed config.default.yml\nvar DefaultYAML []byte\n\nvar ErrNoClustersDefined = errors.New(\"config does not define any clusters\")\n\ntype Loader struct {\n\tStdin          io.Reader\n\tLogger         logrus.FieldLogger\n\tSkipDeprecated bool // Don't load deprecated config keys\n\tSkipLegacy     bool // Don't load legacy config files\n\tSkipAPICalls   bool // Don't do checks that call RailsAPI/controller\n\n\tPath                    string\n\tKeepstorePath           string\n\tKeepWebPath             string\n\tCrunchDispatchSlurmPath string\n\tWebsocketPath           string\n\tKeepproxyPath           string\n\tKeepBalancePath         string\n\n\tconfigdata []byte\n\t// UTC time for configdata: either the modtime of the file we\n\t// read configdata from, or the time when we read configdata\n\t// from a pipe.\n\tsourceTimestamp time.Time\n\t// UTC time when configdata was read.\n\tloadTimestamp time.Time\n}\n\n// NewLoader returns a new Loader with Stdin and Logger set to the\n// given values, and all config paths set to their default values.\nfunc NewLoader(stdin io.Reader, logger logrus.FieldLogger) *Loader {\n\tldr := &Loader{Stdin: stdin, Logger: logger}\n\t// Calling SetupFlags on a throwaway FlagSet has the side\n\t// effect of assigning default values to the configurable\n\t// fields.\n\tldr.SetupFlags(flag.NewFlagSet(\"\", flag.ContinueOnError))\n\treturn ldr\n}\n\n// SetupFlags configures a flagset so arguments like -config X can be\n// used to change the loader's Path fields.\n//\n//\tldr := NewLoader(os.Stdin, logrus.New())\n//\tflagset := flag.NewFlagSet(\"\", flag.ContinueOnError)\n//\tldr.SetupFlags(flagset)\n//\t// ldr.Path == \"/etc/arvados/config.yml\"\n//\tflagset.Parse([]string{\"-config\", \"/tmp/c.yaml\"})\n//\t// ldr.Path == \"/tmp/c.yaml\"\nfunc (ldr *Loader) SetupFlags(flagset *flag.FlagSet) {\n\tflagset.StringVar(&ldr.Path, \"config\", arvados.DefaultConfigFile, \"Site configuration `file` (default may be overridden by setting an ARVADOS_CONFIG environment variable)\")\n\tif !ldr.SkipLegacy {\n\t\tflagset.StringVar(&ldr.KeepstorePath, \"legacy-keepstore-config\", defaultKeepstoreConfigPath, \"Legacy keepstore configuration `file`\")\n\t\tflagset.StringVar(&ldr.KeepWebPath, \"legacy-keepweb-config\", defaultKeepWebConfigPath, \"Legacy keep-web configuration `file`\")\n\t\tflagset.StringVar(&ldr.CrunchDispatchSlurmPath, \"legacy-crunch-dispatch-slurm-config\", defaultCrunchDispatchSlurmConfigPath, \"Legacy crunch-dispatch-slurm configuration `file`\")\n\t\tflagset.StringVar(&ldr.WebsocketPath, \"legacy-ws-config\", defaultWebsocketConfigPath, \"Legacy arvados-ws configuration `file`\")\n\t\tflagset.StringVar(&ldr.KeepproxyPath, \"legacy-keepproxy-config\", defaultKeepproxyConfigPath, \"Legacy keepproxy configuration `file`\")\n\t\tflagset.StringVar(&ldr.KeepBalancePath, \"legacy-keepbalance-config\", defaultKeepBalanceConfigPath, \"Legacy keep-balance configuration `file`\")\n\t\tflagset.BoolVar(&ldr.SkipLegacy, \"skip-legacy\", false, \"Don't load legacy config files\")\n\t}\n}\n\n// MungeLegacyConfigArgs checks args for a -config flag whose argument\n// is a regular file (or a symlink to one), but doesn't have a\n// top-level \"Clusters\" key and therefore isn't a valid cluster\n// configuration file. If it finds such a flag, it replaces -config\n// with legacyConfigArg (e.g., \"-legacy-keepstore-config\").\n//\n// This is used by programs that still need to accept \"-config\" as a\n// way to specify a per-component config file until their config has\n// been migrated.\n//\n// If any errors are encountered while reading or parsing a config\n// file, the given args are not munged. We presume the same errors\n// will be encountered again and reported later on when trying to load\n// cluster configuration from the same file, regardless of which\n// struct we end up using.\nfunc (ldr *Loader) MungeLegacyConfigArgs(lgr logrus.FieldLogger, args []string, legacyConfigArg string) []string {\n\tmunged := append([]string(nil), args...)\n\tfor i := 0; i < len(args); i++ {\n\t\tif !strings.HasPrefix(args[i], \"-\") || strings.SplitN(strings.TrimPrefix(args[i], \"-\"), \"=\", 2)[0] != \"config\" {\n\t\t\tcontinue\n\t\t}\n\t\tvar operand string\n\t\tif strings.Contains(args[i], \"=\") {\n\t\t\toperand = strings.SplitN(args[i], \"=\", 2)[1]\n\t\t} else if i+1 < len(args) && !strings.HasPrefix(args[i+1], \"-\") {\n\t\t\ti++\n\t\t\toperand = args[i]\n\t\t} else {\n\t\t\tcontinue\n\t\t}\n\t\tif fi, err := os.Stat(operand); err != nil || !fi.Mode().IsRegular() {\n\t\t\tcontinue\n\t\t}\n\t\tf, err := os.Open(operand)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdefer f.Close()\n\t\tbuf, err := ioutil.ReadAll(f)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tvar cfg arvados.Config\n\t\terr = yaml.Unmarshal(buf, &cfg)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif len(cfg.Clusters) == 0 {\n\t\t\tlgr.Warnf(\"%s is not a cluster config file -- interpreting %s as %s (please migrate your config!)\", operand, \"-config\", legacyConfigArg)\n\t\t\tif operand == args[i] {\n\t\t\t\tmunged[i-1] = legacyConfigArg\n\t\t\t} else {\n\t\t\t\tmunged[i] = legacyConfigArg + \"=\" + operand\n\t\t\t}\n\t\t}\n\t}\n\n\t// Disable legacy config loading for components other than the\n\t// one that was specified\n\tif legacyConfigArg != \"-legacy-keepstore-config\" {\n\t\tldr.KeepstorePath = \"\"\n\t}\n\tif legacyConfigArg != \"-legacy-crunch-dispatch-slurm-config\" {\n\t\tldr.CrunchDispatchSlurmPath = \"\"\n\t}\n\tif legacyConfigArg != \"-legacy-ws-config\" {\n\t\tldr.WebsocketPath = \"\"\n\t}\n\tif legacyConfigArg != \"-legacy-keepweb-config\" {\n\t\tldr.KeepWebPath = \"\"\n\t}\n\tif legacyConfigArg != \"-legacy-keepproxy-config\" {\n\t\tldr.KeepproxyPath = \"\"\n\t}\n\tif legacyConfigArg != \"-legacy-keepbalance-config\" {\n\t\tldr.KeepBalancePath = \"\"\n\t}\n\n\treturn munged\n}\n\nfunc (ldr *Loader) loadBytes(path string) (buf []byte, sourceTime, loadTime time.Time, err error) {\n\tloadTime = time.Now().UTC()\n\tif path == \"-\" {\n\t\tbuf, err = ioutil.ReadAll(ldr.Stdin)\n\t\tsourceTime = loadTime\n\t\treturn\n\t}\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn\n\t}\n\tsourceTime = fi.ModTime().UTC()\n\tbuf, err = ioutil.ReadAll(f)\n\treturn\n}\n\nfunc (ldr *Loader) Load() (*arvados.Config, error) {\n\tif ldr.configdata == nil {\n\t\tbuf, sourceTime, loadTime, err := ldr.loadBytes(ldr.Path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tldr.configdata = buf\n\t\tldr.sourceTimestamp = sourceTime\n\t\tldr.loadTimestamp = loadTime\n\t}\n\n\t// FIXME: We should reject YAML if the same key is used twice\n\t// in a map/object, like {foo: bar, foo: baz}. Maybe we'll get\n\t// this fixed free when we upgrade ghodss/yaml to a version\n\t// that uses go-yaml v3.\n\n\t// Load the config into a dummy map to get the cluster ID\n\t// keys, discarding the values; then set up defaults for each\n\t// cluster ID; then load the real config on top of the\n\t// defaults.\n\tvar dummy struct {\n\t\tClusters map[string]struct{}\n\t}\n\terr := yaml.Unmarshal(ldr.configdata, &dummy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(dummy.Clusters) == 0 {\n\t\treturn nil, ErrNoClustersDefined\n\t}\n\n\t// We can't merge deep structs here; instead, we unmarshal the\n\t// default & loaded config files into generic maps, merge\n\t// those, and then json-encode+decode the result into the\n\t// config struct type.\n\tvar merged map[string]interface{}\n\tfor id := range dummy.Clusters {\n\t\tvar src map[string]interface{}\n\t\terr = yaml.Unmarshal(bytes.Replace(DefaultYAML, []byte(\" xxxxx:\"), []byte(\" \"+id+\":\"), -1), &src)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"loading defaults for %s: %s\", id, err)\n\t\t}\n\t\terr = mergo.Merge(&merged, src, mergo.WithOverride)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"merging defaults for %s: %s\", id, err)\n\t\t}\n\t}\n\tvar src map[string]interface{}\n\terr = yaml.Unmarshal(ldr.configdata, &src)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"loading config data: %s\", err)\n\t}\n\tldr.logExtraKeys(merged, src, \"\")\n\tremoveSampleKeys(merged)\n\t// We merge the loaded config into the default, overriding any existing keys.\n\t// Make sure we do not override a default with a key that has a 'null' value.\n\tremoveNullKeys(src)\n\terr = mergo.Merge(&merged, src, mergo.WithOverride)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"merging config data: %s\", err)\n\t}\n\n\t// map[string]interface{} => json => arvados.Config\n\tvar cfg arvados.Config\n\tvar errEnc error\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\terrEnc = json.NewEncoder(pw).Encode(merged)\n\t\tpw.Close()\n\t}()\n\terr = json.NewDecoder(pr).Decode(&cfg)\n\tif errEnc != nil {\n\t\terr = errEnc\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"transcoding config data: %s\", err)\n\t}\n\n\tvar loadFuncs []func(*arvados.Config) error\n\tif !ldr.SkipDeprecated {\n\t\tloadFuncs = append(loadFuncs,\n\t\t\tldr.applyDeprecatedConfig,\n\t\t\tldr.applyDeprecatedVolumeDriverParameters,\n\t\t)\n\t}\n\tif !ldr.SkipLegacy {\n\t\t// legacy file is required when either:\n\t\t// * a non-default location was specified\n\t\t// * no primary config was loaded, and this is the\n\t\t// legacy config file for the current component\n\t\tloadFuncs = append(loadFuncs,\n\t\t\tldr.loadOldEnvironmentVariables,\n\t\t\tldr.loadOldKeepstoreConfig,\n\t\t\tldr.loadOldKeepWebConfig,\n\t\t\tldr.loadOldCrunchDispatchSlurmConfig,\n\t\t\tldr.loadOldWebsocketConfig,\n\t\t\tldr.loadOldKeepproxyConfig,\n\t\t\tldr.loadOldKeepBalanceConfig,\n\t\t)\n\t}\n\tloadFuncs = append(loadFuncs,\n\t\tldr.setImplicitStorageClasses,\n\t\tldr.setLoopbackInstanceType,\n\t)\n\tfor _, f := range loadFuncs {\n\t\terr = f(&cfg)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Preprocess/automate some configs\n\tfor id, cc := range cfg.Clusters {\n\t\tldr.autofillPreemptible(\"Clusters.\"+id, &cc)\n\n\t\tif strings.Count(cc.Users.AnonymousUserToken, \"/\") == 3 {\n\t\t\t// V2 token, strip it to just a secret\n\t\t\ttmp := strings.Split(cc.Users.AnonymousUserToken, \"/\")\n\t\t\tcc.Users.AnonymousUserToken = tmp[2]\n\t\t}\n\n\t\tcfg.Clusters[id] = cc\n\t}\n\n\t// Check for known mistakes\n\tfor id, cc := range cfg.Clusters {\n\t\tfor remote := range cc.RemoteClusters {\n\t\t\tif remote == \"*\" || remote == \"SAMPLE\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = ldr.checkClusterID(fmt.Sprintf(\"Clusters.%s.RemoteClusters.%s\", id, remote), remote, true)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tfor _, err = range []error{\n\t\t\tldr.checkClusterID(fmt.Sprintf(\"Clusters.%s\", id), id, false),\n\t\t\tldr.checkClusterID(fmt.Sprintf(\"Clusters.%s.Login.LoginCluster\", id), cc.Login.LoginCluster, true),\n\t\t\tldr.checkToken(fmt.Sprintf(\"Clusters.%s.ManagementToken\", id), cc.ManagementToken, true, false),\n\t\t\tldr.checkToken(fmt.Sprintf(\"Clusters.%s.SystemRootToken\", id), cc.SystemRootToken, true, false),\n\t\t\tldr.checkToken(fmt.Sprintf(\"Clusters.%s.Users.AnonymousUserToken\", id), cc.Users.AnonymousUserToken, false, true),\n\t\t\tldr.checkToken(fmt.Sprintf(\"Clusters.%s.Collections.BlobSigningKey\", id), cc.Collections.BlobSigningKey, true, false),\n\t\t\tcheckKeyConflict(fmt.Sprintf(\"Clusters.%s.PostgreSQL.Connection\", id), cc.PostgreSQL.Connection),\n\t\t\tldr.checkEnum(\"Containers.LocalKeepLogsToContainerLog\", cc.Containers.LocalKeepLogsToContainerLog, \"none\", \"all\", \"errors\"),\n\t\t\tldr.checkEmptyKeepstores(cc),\n\t\t\tldr.checkUnlistedKeepstores(cc),\n\t\t\tldr.checkLocalKeepBlobBuffers(cc),\n\t\t\tldr.checkStorageClasses(cc),\n\t\t\tldr.checkGPUVersions(cc),\n\t\t\t// TODO: check non-empty Rendezvous on\n\t\t\t// services other than Keepstore\n\t\t} {\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\tcfg.SourceTimestamp = ldr.sourceTimestamp\n\tcfg.SourceSHA256 = fmt.Sprintf(\"%x\", sha256.Sum256(ldr.configdata))\n\treturn &cfg, nil\n}\n\nvar acceptableClusterIDRe = regexp.MustCompile(`^[a-z0-9]{5}$`)\n\nfunc (ldr *Loader) checkClusterID(label, clusterID string, emptyStringOk bool) error {\n\tif emptyStringOk && clusterID == \"\" {\n\t\treturn nil\n\t} else if !acceptableClusterIDRe.MatchString(clusterID) {\n\t\treturn fmt.Errorf(\"%s: cluster ID should be 5 lowercase alphanumeric characters\", label)\n\t}\n\treturn nil\n}\n\nvar acceptableTokenRe = regexp.MustCompile(`^[a-zA-Z0-9]+$`)\nvar acceptableTokenLength = 32\n\nfunc (ldr *Loader) checkToken(label, token string, mandatory bool, acceptV2 bool) error {\n\tif len(token) == 0 {\n\t\tif !mandatory {\n\t\t\t// when a token is not mandatory, the acceptable length and content is only checked if its length is non-zero\n\t\t\treturn nil\n\t\t} else {\n\t\t\tif ldr.Logger != nil {\n\t\t\t\tldr.Logger.Warnf(\"%s: secret token is not set (use %d+ random characters from a-z, A-Z, 0-9)\", label, acceptableTokenLength)\n\t\t\t}\n\t\t}\n\t} else if !acceptableTokenRe.MatchString(token) {\n\t\tif !acceptV2 {\n\t\t\treturn fmt.Errorf(\"%s: unacceptable characters in token (only a-z, A-Z, 0-9 are acceptable)\", label)\n\t\t}\n\t\t// Test for a proper V2 token\n\t\ttmp := strings.SplitN(token, \"/\", 3)\n\t\tif len(tmp) != 3 {\n\t\t\treturn fmt.Errorf(\"%s: unacceptable characters in token (only a-z, A-Z, 0-9 are acceptable)\", label)\n\t\t}\n\t\tif !strings.HasPrefix(token, \"v2/\") {\n\t\t\treturn fmt.Errorf(\"%s: unacceptable characters in token (only a-z, A-Z, 0-9 are acceptable)\", label)\n\t\t}\n\t\tif !acceptableTokenRe.MatchString(tmp[2]) {\n\t\t\treturn fmt.Errorf(\"%s: unacceptable characters in V2 token secret (only a-z, A-Z, 0-9 are acceptable)\", label)\n\t\t}\n\t\tif len(tmp[2]) < acceptableTokenLength {\n\t\t\tldr.Logger.Warnf(\"%s: secret is too short (should be at least %d characters)\", label, acceptableTokenLength)\n\t\t}\n\t} else if len(token) < acceptableTokenLength {\n\t\tif ldr.Logger != nil {\n\t\t\tldr.Logger.Warnf(\"%s: token is too short (should be at least %d characters)\", label, acceptableTokenLength)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ldr *Loader) checkEnum(label, value string, accepted ...string) error {\n\tfor _, s := range accepted {\n\t\tif s == value {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"%s: unacceptable value %q: must be one of %q\", label, value, accepted)\n}\n\nfunc (ldr *Loader) setLoopbackInstanceType(cfg *arvados.Config) error {\n\tfor id, cc := range cfg.Clusters {\n\t\tif !cc.Containers.CloudVMs.Enable || cc.Containers.CloudVMs.Driver != \"loopback\" {\n\t\t\tcontinue\n\t\t}\n\t\tif len(cc.InstanceTypes) == 1 {\n\t\t\tcontinue\n\t\t}\n\t\tif len(cc.InstanceTypes) > 1 {\n\t\t\treturn fmt.Errorf(\"Clusters.%s.InstanceTypes: cannot use multiple InstanceTypes with loopback driver\", id)\n\t\t}\n\t\t// No InstanceTypes configured. Fill in implicit\n\t\t// default.\n\t\thostram, err := getHostRAM()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tscratch, err := getFilesystemSize(os.TempDir())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcc.InstanceTypes = arvados.InstanceTypeMap{\"localhost\": {\n\t\t\tName:            \"localhost\",\n\t\t\tProviderType:    \"localhost\",\n\t\t\tVCPUs:           runtime.NumCPU(),\n\t\t\tRAM:             hostram,\n\t\t\tScratch:         scratch,\n\t\t\tIncludedScratch: scratch,\n\t\t\tPrice:           1.0,\n\t\t}}\n\t\tcfg.Clusters[id] = cc\n\t}\n\treturn nil\n}\n\nfunc getFilesystemSize(path string) (arvados.ByteSize, error) {\n\tvar stat unix.Statfs_t\n\terr := unix.Statfs(path, &stat)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn arvados.ByteSize(stat.Blocks * uint64(stat.Bsize)), nil\n}\n\nvar reMemTotal = regexp.MustCompile(`(^|\\n)MemTotal: *(\\d+) kB\\n`)\n\nfunc getHostRAM() (arvados.ByteSize, error) {\n\tbuf, err := os.ReadFile(\"/proc/meminfo\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tm := reMemTotal.FindSubmatch(buf)\n\tif m == nil {\n\t\treturn 0, errors.New(\"error parsing /proc/meminfo: no MemTotal\")\n\t}\n\tkb, err := strconv.ParseInt(string(m[2]), 10, 64)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"error parsing /proc/meminfo: %q: %w\", m[2], err)\n\t}\n\treturn arvados.ByteSize(kb) * 1024, nil\n}\n\nfunc (ldr *Loader) setImplicitStorageClasses(cfg *arvados.Config) error {\ncluster:\n\tfor id, cc := range cfg.Clusters {\n\t\tif len(cc.StorageClasses) > 0 {\n\t\t\tcontinue cluster\n\t\t}\n\t\tfor _, vol := range cc.Volumes {\n\t\t\tif len(vol.StorageClasses) > 0 {\n\t\t\t\tcontinue cluster\n\t\t\t}\n\t\t}\n\t\t// No explicit StorageClasses config info at all; fill\n\t\t// in implicit defaults.\n\t\tfor id, vol := range cc.Volumes {\n\t\t\tvol.StorageClasses = map[string]bool{\"default\": true}\n\t\t\tcc.Volumes[id] = vol\n\t\t}\n\t\tcc.StorageClasses = map[string]arvados.StorageClassConfig{\"default\": {Default: true}}\n\t\tcfg.Clusters[id] = cc\n\t}\n\treturn nil\n}\n\nfunc (ldr *Loader) checkLocalKeepBlobBuffers(cc arvados.Cluster) error {\n\tkbb := cc.Containers.LocalKeepBlobBuffersPerVCPU\n\tif kbb == 0 {\n\t\treturn nil\n\t}\n\tfor uuid, vol := range cc.Volumes {\n\t\tif len(vol.AccessViaHosts) > 0 {\n\t\t\tldr.Logger.Warnf(\"LocalKeepBlobBuffersPerVCPU is %d but will not be used because at least one volume (%s) uses AccessViaHosts -- suggest changing to 0\", kbb, uuid)\n\t\t\treturn nil\n\t\t}\n\t\tif !vol.ReadOnly && vol.Replication < cc.Collections.DefaultReplication {\n\t\t\tldr.Logger.Warnf(\"LocalKeepBlobBuffersPerVCPU is %d but will not be used because at least one volume (%s) has lower replication than DefaultReplication (%d < %d) -- suggest changing to 0\", kbb, uuid, vol.Replication, cc.Collections.DefaultReplication)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ldr *Loader) checkStorageClasses(cc arvados.Cluster) error {\n\tclassOnVolume := map[string]bool{}\n\tfor volid, vol := range cc.Volumes {\n\t\tif len(vol.StorageClasses) == 0 {\n\t\t\treturn fmt.Errorf(\"%s: volume has no StorageClasses listed\", volid)\n\t\t}\n\t\tfor classid := range vol.StorageClasses {\n\t\t\tif _, ok := cc.StorageClasses[classid]; !ok {\n\t\t\t\treturn fmt.Errorf(\"%s: volume refers to storage class %q that is not defined in StorageClasses\", volid, classid)\n\t\t\t}\n\t\t\tclassOnVolume[classid] = true\n\t\t}\n\t}\n\thaveDefault := false\n\tfor classid, sc := range cc.StorageClasses {\n\t\tif !classOnVolume[classid] && len(cc.Volumes) > 0 {\n\t\t\tldr.Logger.Warnf(\"there are no volumes providing storage class %q\", classid)\n\t\t}\n\t\tif sc.Default {\n\t\t\thaveDefault = true\n\t\t}\n\t}\n\tif !haveDefault {\n\t\treturn fmt.Errorf(\"there is no default storage class (at least one entry in StorageClasses must have Default: true)\")\n\t}\n\treturn nil\n}\n\nfunc (ldr *Loader) checkGPUVersions(cc arvados.Cluster) error {\n\tfor _, it := range cc.InstanceTypes {\n\t\tif it.GPU.DeviceCount == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err := strconv.ParseFloat(it.GPU.DriverVersion, 64)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"InstanceType %q has invalid GPU.DriverVersion %q, expected format X.Y (%v)\", it.Name, it.GPU.DriverVersion, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc checkKeyConflict(label string, m map[string]string) error {\n\tsaw := map[string]bool{}\n\tfor k := range m {\n\t\tk = strings.ToLower(k)\n\t\tif saw[k] {\n\t\t\treturn fmt.Errorf(\"%s: multiple entries for %q (fix by using same capitalization as default/example file)\", label, k)\n\t\t}\n\t\tsaw[k] = true\n\t}\n\treturn nil\n}\n\nfunc removeNullKeys(m map[string]interface{}) {\n\tfor k, v := range m {\n\t\tif v == nil {\n\t\t\tdelete(m, k)\n\t\t}\n\t\tif v, _ := v.(map[string]interface{}); v != nil {\n\t\t\tremoveNullKeys(v)\n\t\t}\n\t}\n}\n\nfunc removeSampleKeys(m map[string]interface{}) {\n\tdelete(m, \"SAMPLE\")\n\tfor _, v := range m {\n\t\tif v, _ := v.(map[string]interface{}); v != nil {\n\t\t\tremoveSampleKeys(v)\n\t\t}\n\t}\n}\n\nfunc (ldr *Loader) logExtraKeys(expected, supplied map[string]interface{}, prefix string) {\n\tif ldr.Logger == nil {\n\t\treturn\n\t}\n\tfor k, vsupp := range supplied {\n\t\tif k == \"SAMPLE\" {\n\t\t\t// entry will be dropped in removeSampleKeys anyway\n\t\t\tcontinue\n\t\t}\n\t\tvexp, ok := expected[k]\n\t\tif expected[\"SAMPLE\"] != nil {\n\t\t\t// use the SAMPLE entry's keys as the\n\t\t\t// \"expected\" map when checking vsupp\n\t\t\t// recursively.\n\t\t\tvexp = expected[\"SAMPLE\"]\n\t\t} else if !ok {\n\t\t\t// check for a case-insensitive match\n\t\t\thint := \"\"\n\t\t\tfor ek := range expected {\n\t\t\t\tif strings.EqualFold(k, ek) {\n\t\t\t\t\thint = \" (perhaps you meant \" + ek + \"?)\"\n\t\t\t\t\t// If we don't delete this, it\n\t\t\t\t\t// will end up getting merged,\n\t\t\t\t\t// unpredictably\n\t\t\t\t\t// merging/overriding the\n\t\t\t\t\t// default.\n\t\t\t\t\tdelete(supplied, k)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tldr.Logger.Warnf(\"deprecated or unknown config entry: %s%s%s\", prefix, k, hint)\n\t\t\tcontinue\n\t\t}\n\t\tif vsupp, ok := vsupp.(map[string]interface{}); !ok {\n\t\t\t// if vsupp is a map but vexp isn't map, this\n\t\t\t// will be caught elsewhere; see TestBadType.\n\t\t\tcontinue\n\t\t} else if vexp, ok := vexp.(map[string]interface{}); !ok {\n\t\t\tldr.Logger.Warnf(\"unexpected object in config entry: %s%s\", prefix, k)\n\t\t} else {\n\t\t\tldr.logExtraKeys(vexp, vsupp, prefix+k+\".\")\n\t\t}\n\t}\n}\n\nfunc (ldr *Loader) autofillPreemptible(label string, cc *arvados.Cluster) {\n\tif factor := cc.Containers.PreemptiblePriceFactor; factor > 0 {\n\t\tfor name, it := range cc.InstanceTypes {\n\t\t\tif !it.Preemptible {\n\t\t\t\tit.Preemptible = true\n\t\t\t\tit.Price = it.Price * factor\n\t\t\t\tit.Name = name + \".preemptible\"\n\t\t\t\tif it2, exists := cc.InstanceTypes[it.Name]; exists && it2 != it {\n\t\t\t\t\tldr.Logger.Warnf(\"%s.InstanceTypes[%s]: already exists, so not automatically adding a preemptible variant of %s\", label, it.Name, name)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcc.InstanceTypes[it.Name] = it\n\t\t\t}\n\t\t}\n\t}\n\n}\n\n// RegisterMetrics registers metrics showing the timestamp and content\n// hash of the currently loaded config.\n//\n// Must not be called more than once for a given registry. Must not be\n// called before Load(). Metrics are not updated by subsequent calls\n// to Load().\nfunc (ldr *Loader) RegisterMetrics(reg *prometheus.Registry) {\n\thash := fmt.Sprintf(\"%x\", sha256.Sum256(ldr.configdata))\n\tvec := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"config\",\n\t\tName:      \"source_timestamp_seconds\",\n\t\tHelp:      \"Timestamp of config file when it was loaded.\",\n\t}, []string{\"sha256\"})\n\tvec.WithLabelValues(hash).Set(float64(ldr.sourceTimestamp.UnixNano()) / 1e9)\n\treg.MustRegister(vec)\n\n\tvec = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"config\",\n\t\tName:      \"load_timestamp_seconds\",\n\t\tHelp:      \"Time when config file was loaded.\",\n\t}, []string{\"sha256\"})\n\tvec.WithLabelValues(hash).Set(float64(ldr.loadTimestamp.UnixNano()) / 1e9)\n\treg.MustRegister(vec)\n}\n\n// Load an SSH private key from the given confvalue, which is either\n// the literal key or an absolute path to a file containing the key.\nfunc LoadSSHKey(confvalue string) (ssh.Signer, error) {\n\tif fnm := strings.TrimPrefix(confvalue, \"file://\"); fnm != confvalue && strings.HasPrefix(fnm, \"/\") {\n\t\tkeydata, err := os.ReadFile(fnm)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn ssh.ParsePrivateKey(keydata)\n\t} else {\n\t\treturn ssh.ParsePrivateKey([]byte(confvalue))\n\t}\n}\n"
  },
  {
    "path": "lib/config/load_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/exec\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/sys/unix\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&LoadSuite{})\n\nvar emptyConfigYAML = `Clusters: {\"z1111\": {}}`\n\n// Return a new Loader that reads cluster config from configdata\n// (instead of the usual default /etc/arvados/config.yml), and logs to\n// logdst or (if that's nil) c.Log.\nfunc testLoader(c *check.C, configdata string, logdst io.Writer) *Loader {\n\tlogger := ctxlog.TestLogger(c)\n\tif logdst != nil {\n\t\tlgr := logrus.New()\n\t\tlgr.Out = logdst\n\t\tlogger = lgr\n\t}\n\tldr := NewLoader(bytes.NewBufferString(configdata), logger)\n\tldr.Path = \"-\"\n\treturn ldr\n}\n\ntype LoadSuite struct{}\n\nfunc (s *LoadSuite) SetUpSuite(c *check.C) {\n\tos.Unsetenv(\"ARVADOS_API_HOST\")\n\tos.Unsetenv(\"ARVADOS_API_HOST_INSECURE\")\n\tos.Unsetenv(\"ARVADOS_API_TOKEN\")\n}\n\nfunc (s *LoadSuite) TestEmpty(c *check.C) {\n\tcfg, err := testLoader(c, \"\", nil).Load()\n\tc.Check(cfg, check.IsNil)\n\tc.Assert(err, check.ErrorMatches, `config does not define any clusters`)\n}\n\nfunc (s *LoadSuite) TestNoConfigs(c *check.C) {\n\tcfg, err := testLoader(c, emptyConfigYAML, nil).Load()\n\tc.Assert(err, check.IsNil)\n\tc.Assert(cfg.Clusters, check.HasLen, 1)\n\tcc, err := cfg.GetCluster(\"z1111\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(cc.ClusterID, check.Equals, \"z1111\")\n\tc.Check(cc.API.MaxRequestAmplification, check.Equals, 4)\n\tc.Check(cc.API.MaxItemsPerResponse, check.Equals, 1000)\n}\n\nfunc (s *LoadSuite) TestNullKeyDoesNotOverrideDefault(c *check.C) {\n\tldr := testLoader(c, `{\"Clusters\":{\"z1111\":{\"API\":}}}`, nil)\n\tldr.SkipDeprecated = true\n\tcfg, err := ldr.Load()\n\tc.Assert(err, check.IsNil)\n\tc1, err := cfg.GetCluster(\"z1111\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(c1.ClusterID, check.Equals, \"z1111\")\n\tc.Check(c1.API.MaxRequestAmplification, check.Equals, 4)\n\tc.Check(c1.API.MaxItemsPerResponse, check.Equals, 1000)\n}\n\nfunc (s *LoadSuite) TestMungeLegacyConfigArgs(c *check.C) {\n\tf, err := ioutil.TempFile(\"\", \"\")\n\tc.Check(err, check.IsNil)\n\tdefer os.Remove(f.Name())\n\tio.WriteString(f, \"Debug: true\\n\")\n\toldfile := f.Name()\n\n\tf, err = ioutil.TempFile(\"\", \"\")\n\tc.Check(err, check.IsNil)\n\tdefer os.Remove(f.Name())\n\tio.WriteString(f, emptyConfigYAML)\n\tnewfile := f.Name()\n\n\tfor _, trial := range []struct {\n\t\targsIn  []string\n\t\targsOut []string\n\t}{\n\t\t{\n\t\t\t[]string{\"-config\", oldfile},\n\t\t\t[]string{\"-old-config\", oldfile},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-config=\" + oldfile},\n\t\t\t[]string{\"-old-config=\" + oldfile},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-config\", newfile},\n\t\t\t[]string{\"-config\", newfile},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-config=\" + newfile},\n\t\t\t[]string{\"-config=\" + newfile},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-foo\", oldfile},\n\t\t\t[]string{\"-foo\", oldfile},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-foo=\" + oldfile},\n\t\t\t[]string{\"-foo=\" + oldfile},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-foo\", \"-config=\" + oldfile},\n\t\t\t[]string{\"-foo\", \"-old-config=\" + oldfile},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-foo\", \"bar\", \"-config=\" + oldfile},\n\t\t\t[]string{\"-foo\", \"bar\", \"-old-config=\" + oldfile},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-foo=bar\", \"baz\", \"-config=\" + oldfile},\n\t\t\t[]string{\"-foo=bar\", \"baz\", \"-old-config=\" + oldfile},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-config=/dev/null\"},\n\t\t\t[]string{\"-config=/dev/null\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-config=-\"},\n\t\t\t[]string{\"-config=-\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-config=\"},\n\t\t\t[]string{\"-config=\"},\n\t\t},\n\t\t{\n\t\t\t[]string{\"-foo=bar\", \"baz\", \"-config\"},\n\t\t\t[]string{\"-foo=bar\", \"baz\", \"-config\"},\n\t\t},\n\t\t{\n\t\t\t[]string{},\n\t\t\tnil,\n\t\t},\n\t} {\n\t\tvar logbuf bytes.Buffer\n\t\tlogger := logrus.New()\n\t\tlogger.Out = &logbuf\n\n\t\tvar ldr Loader\n\t\targs := ldr.MungeLegacyConfigArgs(logger, trial.argsIn, \"-old-config\")\n\t\tc.Check(args, check.DeepEquals, trial.argsOut)\n\t\tif fmt.Sprintf(\"%v\", trial.argsIn) != fmt.Sprintf(\"%v\", trial.argsOut) {\n\t\t\tc.Check(logbuf.String(), check.Matches, `.*`+oldfile+` is not a cluster config file -- interpreting -config as -old-config.*\\n`)\n\t\t}\n\t}\n}\n\nfunc (s *LoadSuite) TestSampleKeys(c *check.C) {\n\tfor _, yaml := range []string{\n\t\t`{\"Clusters\":{\"z1111\":{}}}`,\n\t\t`{\"Clusters\":{\"z1111\":{\"InstanceTypes\":{\"Foo\":{\"RAM\": \"12345M\"}}}}}`,\n\t} {\n\t\tcfg, err := testLoader(c, yaml, nil).Load()\n\t\tc.Assert(err, check.IsNil)\n\t\tcc, err := cfg.GetCluster(\"z1111\")\n\t\tc.Assert(err, check.IsNil)\n\t\t_, hasSample := cc.InstanceTypes[\"SAMPLE\"]\n\t\tc.Check(hasSample, check.Equals, false)\n\t\tif strings.Contains(yaml, \"Foo\") {\n\t\t\tc.Check(cc.InstanceTypes[\"Foo\"].RAM, check.Equals, arvados.ByteSize(12345000000))\n\t\t\tc.Check(cc.InstanceTypes[\"Foo\"].Price, check.Equals, 0.0)\n\t\t}\n\t}\n}\n\nfunc (s *LoadSuite) TestMultipleClusters(c *check.C) {\n\tldr := testLoader(c, `{\"Clusters\":{\"z1111\":{},\"z2222\":{}}}`, nil)\n\tldr.SkipDeprecated = true\n\tcfg, err := ldr.Load()\n\tc.Assert(err, check.IsNil)\n\tc1, err := cfg.GetCluster(\"z1111\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(c1.ClusterID, check.Equals, \"z1111\")\n\tc2, err := cfg.GetCluster(\"z2222\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(c2.ClusterID, check.Equals, \"z2222\")\n}\n\nfunc (s *LoadSuite) TestDeprecatedOrUnknownWarning(c *check.C) {\n\tvar logbuf bytes.Buffer\n\t_, err := testLoader(c, `\nClusters:\n  zzzzz:\n    ManagementToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n    SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n    Collections:\n     BlobSigningKey: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n    PostgreSQL: {}\n    BadKey1: {}\n    Containers:\n      RunTimeEngine: abc\n    RemoteClusters:\n      z2222:\n        Host: z2222.arvadosapi.com\n        Proxy: true\n        BadKey2: badValue\n    Services:\n      KeepStore:\n        InternalURLs:\n          \"http://host.example:12345\": {}\n      Keepstore:\n        InternalURLs:\n          \"http://host.example:12345\":\n            RendezVous: x\n    ServiceS:\n      Keepstore:\n        InternalURLs:\n          \"http://host.example:12345\": {}\n    Volumes:\n      zzzzz-nyw5e-aaaaaaaaaaaaaaa: {Replication: 2}\n`, &logbuf).Load()\n\tc.Assert(err, check.IsNil)\n\tc.Log(logbuf.String())\n\tlogs := strings.Split(strings.TrimSuffix(logbuf.String(), \"\\n\"), \"\\n\")\n\tfor _, log := range logs {\n\t\tc.Check(log, check.Matches, `.*deprecated or unknown config entry:.*(RunTimeEngine.*RuntimeEngine|BadKey1|BadKey2|KeepStore|ServiceS|RendezVous).*`)\n\t}\n\tc.Check(logs, check.HasLen, 6)\n}\n\nfunc (s *LoadSuite) checkSAMPLEKeys(c *check.C, path string, x interface{}) {\n\tv := reflect.Indirect(reflect.ValueOf(x))\n\tswitch v.Kind() {\n\tcase reflect.Map:\n\t\tvar stringKeys, sampleKey bool\n\t\titer := v.MapRange()\n\t\tfor iter.Next() {\n\t\t\tk := iter.Key()\n\t\t\tif k.Kind() == reflect.String {\n\t\t\t\tstringKeys = true\n\t\t\t\tif k.String() == \"SAMPLE\" || k.String() == \"xxxxx\" {\n\t\t\t\t\tsampleKey = true\n\t\t\t\t\ts.checkSAMPLEKeys(c, path+\".\"+k.String(), iter.Value().Interface())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif stringKeys && !sampleKey {\n\t\t\tc.Errorf(\"%s is a map with string keys (type %T) but config.default.yml has no SAMPLE key\", path, x)\n\t\t}\n\t\treturn\n\tcase reflect.Struct:\n\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\tval := v.Field(i)\n\t\t\tif val.CanInterface() {\n\t\t\t\ts.checkSAMPLEKeys(c, path+\".\"+v.Type().Field(i).Name, val.Interface())\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *LoadSuite) TestDefaultConfigHasAllSAMPLEKeys(c *check.C) {\n\tvar logbuf bytes.Buffer\n\tloader := testLoader(c, string(DefaultYAML), &logbuf)\n\tcfg, err := loader.Load()\n\tc.Assert(err, check.IsNil)\n\ts.checkSAMPLEKeys(c, \"\", cfg)\n}\n\nfunc (s *LoadSuite) TestNoUnrecognizedKeysInDefaultConfig(c *check.C) {\n\tvar logbuf bytes.Buffer\n\tvar supplied map[string]interface{}\n\tyaml.Unmarshal(DefaultYAML, &supplied)\n\n\tloader := testLoader(c, string(DefaultYAML), &logbuf)\n\tcfg, err := loader.Load()\n\tc.Assert(err, check.IsNil)\n\tvar loaded map[string]interface{}\n\tbuf, err := yaml.Marshal(cfg)\n\tc.Assert(err, check.IsNil)\n\terr = yaml.Unmarshal(buf, &loaded)\n\tc.Assert(err, check.IsNil)\n\n\tc.Check(logbuf.String(), check.Matches, `(?ms).*SystemRootToken: secret token is not set.*`)\n\tc.Check(logbuf.String(), check.Matches, `(?ms).*ManagementToken: secret token is not set.*`)\n\tc.Check(logbuf.String(), check.Matches, `(?ms).*Collections.BlobSigningKey: secret token is not set.*`)\n\tlogbuf.Reset()\n\tloader.logExtraKeys(loaded, supplied, \"\")\n\tc.Check(logbuf.String(), check.Equals, \"\")\n}\n\nfunc (s *LoadSuite) TestNoWarningsForDumpedConfig(c *check.C) {\n\tvar logbuf bytes.Buffer\n\tcfg, err := testLoader(c, `\nClusters:\n zzzzz:\n  ManagementToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  Collections:\n   BlobSigningKey: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  InstanceTypes:\n   abc:\n    IncludedScratch: 123456\n`, &logbuf).Load()\n\tc.Assert(err, check.IsNil)\n\tyaml, err := yaml.Marshal(cfg)\n\tc.Assert(err, check.IsNil)\n\t// Well, *nearly* no warnings. SourceTimestamp and\n\t// SourceSHA256 are included in a config-dump, but not\n\t// expected in a real config file.\n\tyaml = regexp.MustCompile(`(^|\\n)(Source(Timestamp|SHA256): .*?\\n)+`).ReplaceAll(yaml, []byte(\"$1\"))\n\tcfgDumped, err := testLoader(c, string(yaml), &logbuf).Load()\n\tc.Assert(err, check.IsNil)\n\t// SourceTimestamp and SourceSHA256 aren't expected to be\n\t// preserved through dump+load\n\tcfgDumped.SourceTimestamp = cfg.SourceTimestamp\n\tcfgDumped.SourceSHA256 = cfg.SourceSHA256\n\tc.Check(cfg, check.DeepEquals, cfgDumped)\n\tc.Check(logbuf.String(), check.Equals, \"\")\n}\n\nfunc (s *LoadSuite) TestUnacceptableTokens(c *check.C) {\n\tfor _, trial := range []struct {\n\t\tshort      bool\n\t\tconfigPath string\n\t\texample    string\n\t}{\n\t\t{false, \"SystemRootToken\", \"SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_b_c\"},\n\t\t{false, \"ManagementToken\", \"ManagementToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa b c\"},\n\t\t{false, \"ManagementToken\", \"ManagementToken: \\\"$aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabc\\\"\"},\n\t\t{false, \"Collections.BlobSigningKey\", \"Collections: {BlobSigningKey: \\\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa⛵\\\"}\"},\n\t\t{true, \"SystemRootToken\", \"SystemRootToken: a_b_c\"},\n\t\t{true, \"ManagementToken\", \"ManagementToken: a b c\"},\n\t\t{true, \"ManagementToken\", \"ManagementToken: \\\"$abc\\\"\"},\n\t\t{true, \"Collections.BlobSigningKey\", \"Collections: {BlobSigningKey: \\\"⛵\\\"}\"},\n\t} {\n\t\tc.Logf(\"trying bogus config: %s\", trial.example)\n\t\t_, err := testLoader(c, \"Clusters:\\n zzzzz:\\n  \"+trial.example, nil).Load()\n\t\tc.Check(err, check.ErrorMatches, `Clusters.zzzzz.`+trial.configPath+`: unacceptable characters in token.*`)\n\t}\n}\n\nfunc (s *LoadSuite) TestPostgreSQLKeyConflict(c *check.C) {\n\t_, err := testLoader(c, `\nClusters:\n zzzzz:\n  PostgreSQL:\n   Connection:\n     DBName: dbname\n     Host: host\n`, nil).Load()\n\tc.Check(err, check.ErrorMatches, `Clusters.zzzzz.PostgreSQL.Connection: multiple entries for \"(dbname|host)\".*`)\n}\n\nfunc (s *LoadSuite) TestBadClusterIDs(c *check.C) {\n\tfor _, data := range []string{`\nClusters:\n 123456:\n  ManagementToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  Collections:\n   BlobSigningKey: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n`, `\nClusters:\n 12345:\n  ManagementToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  Collections:\n   BlobSigningKey: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  RemoteClusters:\n   Zzzzz:\n    Host: Zzzzz.arvadosapi.com\n    Proxy: true\n`, `\nClusters:\n abcde:\n  ManagementToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  Collections:\n   BlobSigningKey: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  Login:\n   LoginCluster: zz-zz\n`,\n\t} {\n\t\tc.Log(data)\n\t\tv, err := testLoader(c, data, nil).Load()\n\t\tif v != nil {\n\t\t\tc.Logf(\"%#v\", v.Clusters)\n\t\t}\n\t\tc.Check(err, check.ErrorMatches, `.*cluster ID should be 5 lowercase alphanumeric characters.*`)\n\t}\n}\n\nfunc (s *LoadSuite) TestBadType(c *check.C) {\n\tfor _, data := range []string{`\nClusters:\n zzzzz:\n  PostgreSQL: true\n`, `\nClusters:\n zzzzz:\n  PostgreSQL:\n   ConnectionPool: true\n`, `\nClusters:\n zzzzz:\n  PostgreSQL:\n   ConnectionPool: \"foo\"\n`, `\nClusters:\n zzzzz:\n  PostgreSQL:\n   ConnectionPool: []\n`, `\nClusters:\n zzzzz:\n  PostgreSQL:\n   ConnectionPool: [] # {foo: bar} isn't caught here; we rely on config-check\n`,\n\t} {\n\t\tc.Log(data)\n\t\tv, err := testLoader(c, data, nil).Load()\n\t\tif v != nil {\n\t\t\tc.Logf(\"%#v\", v.Clusters[\"zzzzz\"].PostgreSQL.ConnectionPool)\n\t\t}\n\t\tc.Check(err, check.ErrorMatches, `.*cannot unmarshal .*PostgreSQL.*`)\n\t}\n}\n\nfunc (s *LoadSuite) TestMovedKeys(c *check.C) {\n\tcheckEquivalent(c, `# config has old keys only\nClusters:\n zzzzz:\n  RequestLimits:\n   MultiClusterRequestConcurrency: 3\n   MaxItemsPerResponse: 999\n`, `\nClusters:\n zzzzz:\n  API:\n   MaxRequestAmplification: 3\n   MaxItemsPerResponse: 999\n`)\n\tcheckEquivalent(c, `# config has both old and new keys; old values win\nClusters:\n zzzzz:\n  RequestLimits:\n   MultiClusterRequestConcurrency: 0\n   MaxItemsPerResponse: 555\n  API:\n   MaxRequestAmplification: 3\n   MaxItemsPerResponse: 999\n`, `\nClusters:\n zzzzz:\n  API:\n   MaxRequestAmplification: 0\n   MaxItemsPerResponse: 555\n`)\n}\n\nfunc checkEquivalent(c *check.C, goty, expectedy string) string {\n\tvar logbuf bytes.Buffer\n\tgotldr := testLoader(c, goty, &logbuf)\n\texpectedldr := testLoader(c, expectedy, nil)\n\tcheckEquivalentLoaders(c, gotldr, expectedldr)\n\treturn logbuf.String()\n}\n\nfunc checkEqualYAML(c *check.C, got, expected interface{}) {\n\texpectedyaml, err := yaml.Marshal(expected)\n\tc.Assert(err, check.IsNil)\n\tgotyaml, err := yaml.Marshal(got)\n\tc.Assert(err, check.IsNil)\n\tif !bytes.Equal(gotyaml, expectedyaml) {\n\t\tcmd := exec.Command(\"diff\", \"-u\", \"--label\", \"expected\", \"--label\", \"got\", \"/dev/fd/3\", \"/dev/fd/4\")\n\t\tfor _, y := range [][]byte{expectedyaml, gotyaml} {\n\t\t\tpr, pw, err := os.Pipe()\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tdefer pr.Close()\n\t\t\tgo func(data []byte) {\n\t\t\t\tpw.Write(data)\n\t\t\t\tpw.Close()\n\t\t\t}(y)\n\t\t\tcmd.ExtraFiles = append(cmd.ExtraFiles, pr)\n\t\t}\n\t\tdiff, err := cmd.CombinedOutput()\n\t\t// diff should report differences and exit non-zero.\n\t\tc.Check(err, check.NotNil)\n\t\tc.Log(string(diff))\n\t\tc.Error(\"got != expected; see diff (-expected +got) above\")\n\t}\n}\n\nfunc checkEquivalentLoaders(c *check.C, gotldr, expectedldr *Loader) {\n\tgot, err := gotldr.Load()\n\tc.Assert(err, check.IsNil)\n\texpected, err := expectedldr.Load()\n\tc.Assert(err, check.IsNil)\n\t// The inputs generally aren't even files, so SourceTimestamp\n\t// can't be expected to match.\n\tgot.SourceTimestamp = expected.SourceTimestamp\n\t// Obviously the content isn't identical -- otherwise we\n\t// wouldn't need to check that it's equivalent.\n\tgot.SourceSHA256 = expected.SourceSHA256\n\tcheckEqualYAML(c, got, expected)\n}\n\nfunc checkListKeys(path string, x interface{}) (err error) {\n\tv := reflect.Indirect(reflect.ValueOf(x))\n\tswitch v.Kind() {\n\tcase reflect.Map:\n\t\titer := v.MapRange()\n\t\tfor iter.Next() {\n\t\t\tk := iter.Key()\n\t\t\tif k.Kind() == reflect.String {\n\t\t\t\tif err = checkListKeys(path+\".\"+k.String(), iter.Value().Interface()); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn\n\n\tcase reflect.Struct:\n\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\tval := v.Field(i)\n\t\t\tstructField := v.Type().Field(i)\n\t\t\tfieldname := structField.Name\n\t\t\tendsWithList := strings.HasSuffix(fieldname, \"List\")\n\t\t\tisAnArray := structField.Type.Kind() == reflect.Slice\n\t\t\tif endsWithList != isAnArray {\n\t\t\t\tif endsWithList {\n\t\t\t\t\terr = fmt.Errorf(\"%s.%s ends with 'List' but field is not an array (type %v)\", path, fieldname, val.Kind())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif isAnArray && structField.Type.Elem().Kind() != reflect.Uint8 {\n\t\t\t\t\terr = fmt.Errorf(\"%s.%s is an array but field name does not end in 'List' (slice of %v)\", path, fieldname, structField.Type.Elem().Kind())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tif val.CanInterface() {\n\t\t\t\tcheckListKeys(path+\".\"+fieldname, val.Interface())\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (s *LoadSuite) TestListKeys(c *check.C) {\n\tv1 := struct {\n\t\tEndInList []string\n\t}{[]string{\"a\", \"b\"}}\n\tvar m1 = make(map[string]interface{})\n\tm1[\"c\"] = &v1\n\tif err := checkListKeys(\"\", m1); err != nil {\n\t\tc.Error(err)\n\t}\n\n\tv2 := struct {\n\t\tDoesNot []string\n\t}{[]string{\"a\", \"b\"}}\n\tvar m2 = make(map[string]interface{})\n\tm2[\"c\"] = &v2\n\tif err := checkListKeys(\"\", m2); err == nil {\n\t\tc.Errorf(\"Should have produced an error\")\n\t}\n\n\tv3 := struct {\n\t\tEndInList string\n\t}{\"a\"}\n\tvar m3 = make(map[string]interface{})\n\tm3[\"c\"] = &v3\n\tif err := checkListKeys(\"\", m3); err == nil {\n\t\tc.Errorf(\"Should have produced an error\")\n\t}\n\n\tloader := testLoader(c, string(DefaultYAML), nil)\n\tcfg, err := loader.Load()\n\tc.Assert(err, check.IsNil)\n\tif err := checkListKeys(\"\", cfg); err != nil {\n\t\tc.Error(err)\n\t}\n}\n\nfunc (s *LoadSuite) TestLoopbackInstanceTypes(c *check.C) {\n\tldr := testLoader(c, `\nClusters:\n z1111:\n  Containers:\n   CloudVMs:\n    Enable: true\n    Driver: loopback\n  InstanceTypes:\n   a: {}\n   b: {}\n`, nil)\n\tcfg, err := ldr.Load()\n\tc.Check(err, check.ErrorMatches, `Clusters\\.z1111\\.InstanceTypes: cannot use multiple InstanceTypes with loopback driver`)\n\n\tldr = testLoader(c, `\nClusters:\n z1111:\n  Containers:\n   CloudVMs:\n    Enable: true\n    Driver: loopback\n`, nil)\n\tcfg, err = ldr.Load()\n\tc.Assert(err, check.IsNil)\n\tcc, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(cc.InstanceTypes, check.HasLen, 1)\n\tc.Check(cc.InstanceTypes[\"localhost\"].VCPUs, check.Equals, runtime.NumCPU())\n\n\tldr = testLoader(c, `\nClusters:\n z1111:\n  Containers:\n   CloudVMs:\n    Enable: true\n    Driver: loopback\n  InstanceTypes:\n   a:\n    VCPUs: 9\n`, nil)\n\tcfg, err = ldr.Load()\n\tc.Assert(err, check.IsNil)\n\tcc, err = cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(cc.InstanceTypes, check.HasLen, 1)\n\tc.Check(cc.InstanceTypes[\"a\"].VCPUs, check.Equals, 9)\n}\n\nfunc (s *LoadSuite) TestWarnUnusedLocalKeep(c *check.C) {\n\tvar logbuf bytes.Buffer\n\t_, err := testLoader(c, `\nClusters:\n z1111:\n  Volumes:\n   z:\n    Replication: 1\n`, &logbuf).Load()\n\tc.Assert(err, check.IsNil)\n\tc.Check(logbuf.String(), check.Matches, `(?ms).*LocalKeepBlobBuffersPerVCPU is 1 but will not be used because at least one volume \\(z\\) has lower replication than DefaultReplication \\(1 < 2\\) -- suggest changing to 0.*`)\n\n\tlogbuf.Reset()\n\t_, err = testLoader(c, `\nClusters:\n z1111:\n  Volumes:\n   z:\n    AccessViaHosts:\n     \"http://0.0.0.0:12345\": {}\n`, &logbuf).Load()\n\tc.Assert(err, check.IsNil)\n\tc.Check(logbuf.String(), check.Matches, `(?ms).*LocalKeepBlobBuffersPerVCPU is 1 but will not be used because at least one volume \\(z\\) uses AccessViaHosts -- suggest changing to 0.*`)\n}\n\nfunc (s *LoadSuite) TestImplicitStorageClasses(c *check.C) {\n\t// If StorageClasses and Volumes.*.StorageClasses are all\n\t// empty, there is a default storage class named \"default\".\n\tldr := testLoader(c, `{\"Clusters\":{\"z1111\":{}}}`, nil)\n\tcfg, err := ldr.Load()\n\tc.Assert(err, check.IsNil)\n\tcc, err := cfg.GetCluster(\"z1111\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(cc.StorageClasses, check.HasLen, 1)\n\tc.Check(cc.StorageClasses[\"default\"].Default, check.Equals, true)\n\tc.Check(cc.StorageClasses[\"default\"].Priority, check.Equals, 0)\n\n\t// The implicit \"default\" storage class is used by all\n\t// volumes.\n\tldr = testLoader(c, `\nClusters:\n z1111:\n  Volumes:\n   z: {}`, nil)\n\tcfg, err = ldr.Load()\n\tc.Assert(err, check.IsNil)\n\tcc, err = cfg.GetCluster(\"z1111\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(cc.StorageClasses, check.HasLen, 1)\n\tc.Check(cc.StorageClasses[\"default\"].Default, check.Equals, true)\n\tc.Check(cc.StorageClasses[\"default\"].Priority, check.Equals, 0)\n\tc.Check(cc.Volumes[\"z\"].StorageClasses[\"default\"], check.Equals, true)\n\n\t// The \"default\" storage class isn't implicit if any classes\n\t// are configured explicitly.\n\tldr = testLoader(c, `\nClusters:\n z1111:\n  StorageClasses:\n   local:\n    Default: true\n    Priority: 111\n  Volumes:\n   z:\n    StorageClasses:\n     local: true`, nil)\n\tcfg, err = ldr.Load()\n\tc.Assert(err, check.IsNil)\n\tcc, err = cfg.GetCluster(\"z1111\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(cc.StorageClasses, check.HasLen, 1)\n\tc.Check(cc.StorageClasses[\"local\"].Default, check.Equals, true)\n\tc.Check(cc.StorageClasses[\"local\"].Priority, check.Equals, 111)\n\n\t// It is an error for a volume to refer to a storage class\n\t// that isn't listed in StorageClasses.\n\tldr = testLoader(c, `\nClusters:\n z1111:\n  StorageClasses:\n   local:\n    Default: true\n    Priority: 111\n  Volumes:\n   z:\n    StorageClasses:\n     nx: true`, nil)\n\t_, err = ldr.Load()\n\tc.Assert(err, check.ErrorMatches, `z: volume refers to storage class \"nx\" that is not defined.*`)\n\n\t// It is an error for a volume to refer to a storage class\n\t// that isn't listed in StorageClasses ... even if it's\n\t// \"default\", which would exist implicitly if it weren't\n\t// referenced explicitly by a volume.\n\tldr = testLoader(c, `\nClusters:\n z1111:\n  Volumes:\n   z:\n    StorageClasses:\n     default: true`, nil)\n\t_, err = ldr.Load()\n\tc.Assert(err, check.ErrorMatches, `z: volume refers to storage class \"default\" that is not defined.*`)\n\n\t// If the \"default\" storage class is configured explicitly, it\n\t// is not used implicitly by any volumes, even if it's the\n\t// only storage class.\n\tvar logbuf bytes.Buffer\n\tldr = testLoader(c, `\nClusters:\n z1111:\n  StorageClasses:\n   default:\n    Default: true\n    Priority: 111\n  Volumes:\n   z: {}`, &logbuf)\n\t_, err = ldr.Load()\n\tc.Assert(err, check.ErrorMatches, `z: volume has no StorageClasses listed`)\n\n\t// If StorageClasses are configured explicitly, there must be\n\t// at least one with Default: true. (Calling one \"default\" is\n\t// not sufficient.)\n\tldr = testLoader(c, `\nClusters:\n z1111:\n  StorageClasses:\n   default:\n    Priority: 111\n  Volumes:\n   z:\n    StorageClasses:\n     default: true`, nil)\n\t_, err = ldr.Load()\n\tc.Assert(err, check.ErrorMatches, `there is no default storage class.*`)\n}\n\nfunc (s *LoadSuite) TestPreemptiblePriceFactor(c *check.C) {\n\tyaml := `\nClusters:\n z1111:\n  InstanceTypes:\n   Type1:\n    RAM: 12345M\n    VCPUs: 8\n    Price: 1.23\n z2222:\n  Containers:\n   PreemptiblePriceFactor: 0.5\n  InstanceTypes:\n   Type1:\n    RAM: 12345M\n    VCPUs: 8\n    Price: 1.23\n z3333:\n  Containers:\n   PreemptiblePriceFactor: 0.5\n  InstanceTypes:\n   Type1:\n    RAM: 12345M\n    VCPUs: 8\n    Price: 1.23\n   Type1.preemptible: # higher price than the auto-added variant would use -- should generate warning\n    ProviderType: Type1\n    RAM: 12345M\n    VCPUs: 8\n    Price: 1.23\n    Preemptible: true\n   Type2:\n    RAM: 23456M\n    VCPUs: 16\n    Price: 2.46\n   Type2.preemptible: # identical to the auto-added variant -- so no warning\n    ProviderType: Type2\n    RAM: 23456M\n    VCPUs: 16\n    Price: 1.23\n    Preemptible: true\n`\n\tvar logbuf bytes.Buffer\n\tcfg, err := testLoader(c, yaml, &logbuf).Load()\n\tc.Assert(err, check.IsNil)\n\tcc, err := cfg.GetCluster(\"z1111\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(cc.InstanceTypes[\"Type1\"].Price, check.Equals, 1.23)\n\tc.Check(cc.InstanceTypes, check.HasLen, 1)\n\n\tcc, err = cfg.GetCluster(\"z2222\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(cc.InstanceTypes[\"Type1\"].Preemptible, check.Equals, false)\n\tc.Check(cc.InstanceTypes[\"Type1\"].Price, check.Equals, 1.23)\n\tc.Check(cc.InstanceTypes[\"Type1.preemptible\"].Preemptible, check.Equals, true)\n\tc.Check(cc.InstanceTypes[\"Type1.preemptible\"].Price, check.Equals, 1.23/2)\n\tc.Check(cc.InstanceTypes[\"Type1.preemptible\"].ProviderType, check.Equals, \"Type1\")\n\tc.Check(cc.InstanceTypes, check.HasLen, 2)\n\n\tcc, err = cfg.GetCluster(\"z3333\")\n\tc.Assert(err, check.IsNil)\n\t// Don't overwrite the explicitly configured preemptible variant\n\tc.Check(cc.InstanceTypes[\"Type1.preemptible\"].Price, check.Equals, 1.23)\n\tc.Check(cc.InstanceTypes, check.HasLen, 4)\n\tc.Check(logbuf.String(), check.Matches, `(?ms).*Clusters\\.z3333\\.InstanceTypes\\[Type1\\.preemptible\\]: already exists, so not automatically adding a preemptible variant of Type1.*`)\n\tc.Check(logbuf.String(), check.Not(check.Matches), `(?ms).*Type2\\.preemptible.*`)\n\tc.Check(logbuf.String(), check.Not(check.Matches), `(?ms).*(z1111|z2222)[^\\n]*InstanceTypes.*`)\n}\n\nfunc (s *LoadSuite) TestSourceTimestamp(c *check.C) {\n\tconftime, err := time.Parse(time.RFC3339, \"2022-03-04T05:06:07-08:00\")\n\tc.Assert(err, check.IsNil)\n\tconfdata := `Clusters: {zzzzz: {}}`\n\tconffile := c.MkDir() + \"/config.yml\"\n\tioutil.WriteFile(conffile, []byte(confdata), 0777)\n\ttv := unix.NsecToTimeval(conftime.UnixNano())\n\tunix.Lutimes(conffile, []unix.Timeval{tv, tv})\n\tfor _, trial := range []struct {\n\t\tconfigarg  string\n\t\texpectTime time.Time\n\t}{\n\t\t{\"-\", time.Now()},\n\t\t{conffile, conftime},\n\t} {\n\t\tc.Logf(\"trial: %+v\", trial)\n\t\tldr := NewLoader(strings.NewReader(confdata), ctxlog.TestLogger(c))\n\t\tldr.Path = trial.configarg\n\t\tcfg, err := ldr.Load()\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(cfg.SourceTimestamp, check.Equals, cfg.SourceTimestamp.UTC())\n\t\tc.Check(cfg.SourceTimestamp, check.Equals, ldr.sourceTimestamp)\n\t\tc.Check(int(cfg.SourceTimestamp.Sub(trial.expectTime).Seconds()), check.Equals, 0)\n\t\tc.Check(int(ldr.loadTimestamp.Sub(time.Now()).Seconds()), check.Equals, 0)\n\n\t\treg := prometheus.NewRegistry()\n\t\tldr.RegisterMetrics(reg)\n\t\tmetrics := arvadostest.GatherMetricsAsString(reg)\n\t\tc.Check(metrics, check.Matches, `# HELP .*\n# TYPE .*\narvados_config_load_timestamp_seconds{sha256=\"83aea5d82eb1d53372cd65c936c60acc1c6ef946e61977bbca7cfea709d201a8\"} \\Q`+fmt.Sprintf(\"%g\", float64(ldr.loadTimestamp.UnixNano())/1e9)+`\\E\n# HELP .*\n# TYPE .*\narvados_config_source_timestamp_seconds{sha256=\"83aea5d82eb1d53372cd65c936c60acc1c6ef946e61977bbca7cfea709d201a8\"} \\Q`+fmt.Sprintf(\"%g\", float64(cfg.SourceTimestamp.UnixNano())/1e9)+`\\E\n`)\n\t}\n}\n\nfunc (s *LoadSuite) TestGetHostRAM(c *check.C) {\n\thostram, err := getHostRAM()\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"getHostRAM() == %v\", hostram)\n}\n\nfunc (s *LoadSuite) TestGetFilesystemSize(c *check.C) {\n\tpath := c.MkDir()\n\tsize, err := getFilesystemSize(path)\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"getFilesystemSize(%q) == %v\", path, size)\n}\n\nfunc (s *LoadSuite) TestLoadSSHKey(c *check.C) {\n\tcwd, err := os.Getwd()\n\tc.Assert(err, check.IsNil)\n\t_, err = LoadSSHKey(\"file://\" + cwd + \"/../dispatchcloud/test/sshkey_dispatch\")\n\tc.Check(err, check.IsNil)\n}\n\nfunc (s *LoadSuite) TestLoadSSHKeyTypes(c *check.C) {\n\tfor _, format := range []string{\"PEM\", \"RFC4716\", \"PKCS8\"} {\n\t\tfor _, keytype := range []string{\"ecdsa\", \"ed25519\", \"rsa\"} {\n\t\t\tc.Logf(\"=== keytype %s\", keytype)\n\t\t\ttmpdir := c.MkDir()\n\t\t\tbuf, err := exec.Command(\"ssh-keygen\", \"-N\", \"\", \"-t\", keytype, \"-m\", format, \"-f\", tmpdir+\"/key\").CombinedOutput()\n\t\t\tif !c.Check(err, check.IsNil, check.Commentf(\"(keytype %s, format %s) %s\", keytype, format, buf)) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t_, err = LoadSSHKey(\"file://\" + tmpdir + \"/key\")\n\t\t\tc.Check(err, check.IsNil, check.Commentf(\"LoadSSHKey failed on keytype %s in format %s\", keytype, format))\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/config/uptodate_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage config\n\nimport (\n\t\"bytes\"\n\t\"io/ioutil\"\n\t\"testing\"\n)\n\nfunc TestUpToDate(t *testing.T) {\n\tsrc := \"config.default.yml\"\n\tsrcdata, err := ioutil.ReadFile(src)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(srcdata, DefaultYAML) {\n\t\tt.Fatalf(\"content of %s differs from DefaultYAML -- you need to run 'go generate' and commit\", src)\n\t}\n}\n"
  },
  {
    "path": "lib/controller/api/routable.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// Package api provides types used by controller/server-component\n// packages.\npackage api\n\nimport \"context\"\n\n// A RoutableFunc calls an API method (sometimes via a wrapped\n// RoutableFunc) that has real argument types.\n//\n// (It is used by ctrlctx to manage database transactions, so moving\n// it to the router package would cause a circular dependency\n// router->arvadostest->ctrlctx->router.)\ntype RoutableFunc func(ctx context.Context, opts interface{}) (interface{}, error)\n\ntype RoutableFuncWrapper func(RoutableFunc) RoutableFunc\n\n// ComposeWrappers (w1, w2, w3, ...) returns a RoutableFuncWrapper that\n// composes w1, w2, w3, ... such that w1 is the outermost wrapper.\nfunc ComposeWrappers(wraps ...RoutableFuncWrapper) RoutableFuncWrapper {\n\treturn func(f RoutableFunc) RoutableFunc {\n\t\tfor i := len(wraps) - 1; i >= 0; i-- {\n\t\t\tf = wraps[i](f)\n\t\t}\n\t\treturn f\n\t}\n}\n"
  },
  {
    "path": "lib/controller/auth_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nvar _ = check.Suite(&AuthSuite{})\n\ntype AuthSuite struct {\n\tlog logrus.FieldLogger\n\t// testServer and testHandler are the controller being tested,\n\t// \"zhome\".\n\ttestServer  *httpserver.Server\n\ttestHandler *Handler\n\t// remoteServer (\"zzzzz\") forwards requests to the Rails API\n\t// provided by the integration test environment.\n\tremoteServer *httpserver.Server\n\t// remoteMock (\"zmock\") appends each incoming request to\n\t// remoteMockRequests, and returns 200 with an empty JSON\n\t// object.\n\tremoteMock         *httpserver.Server\n\tremoteMockRequests []http.Request\n\n\tfakeProvider *arvadostest.OIDCProvider\n}\n\nfunc (s *AuthSuite) SetUpTest(c *check.C) {\n\ts.log = ctxlog.TestLogger(c)\n\n\ts.remoteServer = newServerFromIntegrationTestEnv(c)\n\tc.Assert(s.remoteServer.Start(), check.IsNil)\n\n\ts.remoteMock = newServerFromIntegrationTestEnv(c)\n\ts.remoteMock.Server.Handler = http.HandlerFunc(http.NotFound)\n\tc.Assert(s.remoteMock.Start(), check.IsNil)\n\n\ts.fakeProvider = arvadostest.NewOIDCProvider(c)\n\ts.fakeProvider.AuthEmail = \"active-user@arvados.local\"\n\ts.fakeProvider.AuthEmailVerified = true\n\ts.fakeProvider.AuthName = \"Fake User Name\"\n\ts.fakeProvider.ValidCode = fmt.Sprintf(\"abcdefgh-%d\", time.Now().Unix())\n\ts.fakeProvider.PeopleAPIResponse = map[string]interface{}{}\n\ts.fakeProvider.ValidClientID = \"test%client$id\"\n\ts.fakeProvider.ValidClientSecret = \"test#client/secret\"\n\n\tcluster := &arvados.Cluster{\n\t\tClusterID:       \"zhome\",\n\t\tPostgreSQL:      integrationTestCluster().PostgreSQL,\n\t\tSystemRootToken: arvadostest.SystemRootToken,\n\t}\n\tcluster.TLS.Insecure = true\n\tcluster.API.MaxItemsPerResponse = 1000\n\tcluster.API.MaxRequestAmplification = 4\n\tcluster.API.RequestTimeout = arvados.Duration(5 * time.Minute)\n\tarvadostest.SetServiceURL(&cluster.Services.RailsAPI, \"https://\"+os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\tarvadostest.SetServiceURL(&cluster.Services.Controller, \"http://localhost/\")\n\n\tcluster.RemoteClusters = map[string]arvados.RemoteCluster{\n\t\t\"zzzzz\": {\n\t\t\tHost:   s.remoteServer.Addr,\n\t\t\tProxy:  true,\n\t\t\tScheme: \"http\",\n\t\t},\n\t\t\"zmock\": {\n\t\t\tHost:   s.remoteMock.Addr,\n\t\t\tProxy:  true,\n\t\t\tScheme: \"http\",\n\t\t},\n\t\t\"*\": {\n\t\t\tScheme: \"https\",\n\t\t},\n\t}\n\tcluster.Login.OpenIDConnect.Enable = true\n\tcluster.Login.OpenIDConnect.Issuer = s.fakeProvider.Issuer.URL\n\tcluster.Login.OpenIDConnect.ClientID = s.fakeProvider.ValidClientID\n\tcluster.Login.OpenIDConnect.ClientSecret = s.fakeProvider.ValidClientSecret\n\tcluster.Login.OpenIDConnect.EmailClaim = \"email\"\n\tcluster.Login.OpenIDConnect.EmailVerifiedClaim = \"email_verified\"\n\tcluster.Login.OpenIDConnect.AcceptAccessToken = true\n\tcluster.Login.OpenIDConnect.AcceptAccessTokenScope = \"\"\n\n\ts.testHandler = &Handler{Cluster: cluster, BackgroundContext: ctxlog.Context(context.Background(), s.log)}\n\ts.testServer = newServerFromIntegrationTestEnv(c)\n\ts.testServer.Server.BaseContext = func(net.Listener) context.Context {\n\t\treturn ctxlog.Context(context.Background(), s.log)\n\t}\n\ts.testServer.Server.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(s.testHandler))\n\tc.Assert(s.testServer.Start(), check.IsNil)\n}\n\nfunc (s *AuthSuite) TestLocalOIDCAccessToken(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/users/current\", nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+s.fakeProvider.ValidAccessToken())\n\trr := httptest.NewRecorder()\n\ts.testServer.Server.Handler.ServeHTTP(rr, req)\n\tresp := rr.Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tvar u arvados.User\n\tc.Check(json.NewDecoder(resp.Body).Decode(&u), check.IsNil)\n\tc.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)\n\tc.Check(u.OwnerUUID, check.Equals, \"zzzzz-tpzed-000000000000000\")\n\n\t// Request again to exercise cache.\n\treq = httptest.NewRequest(\"GET\", \"/arvados/v1/users/current\", nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+s.fakeProvider.ValidAccessToken())\n\trr = httptest.NewRecorder()\n\ts.testServer.Server.Handler.ServeHTTP(rr, req)\n\tresp = rr.Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n}\n"
  },
  {
    "path": "lib/controller/cmd.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"context\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/service\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\n// Command starts a controller service. See cmd/arvados-server/cmd.go\nvar Command cmd.Handler = service.Command(arvados.ServiceNameController, newHandler)\n\nfunc newHandler(ctx context.Context, cluster *arvados.Cluster, _ string, _ *prometheus.Registry) service.Handler {\n\treturn &Handler{Cluster: cluster, BackgroundContext: ctx}\n}\n"
  },
  {
    "path": "lib/controller/dblock/dblock.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dblock\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/jmoiron/sqlx\"\n)\n\nvar (\n\tTrashSweep         = &DBLocker{key: 10001}\n\tContainerLogSweep  = &DBLocker{key: 10002}\n\tKeepBalanceService = &DBLocker{key: 10003} // keep-balance service in periodic-sweep loop\n\tKeepBalanceActive  = &DBLocker{key: 10004} // keep-balance sweep in progress (either -once=true or service loop)\n\tDispatch           = &DBLocker{key: 10005} // any dispatcher running\n\tRailsMigrations    = &DBLocker{key: 10006}\n\tretryDelay         = 5 * time.Second\n)\n\n// DBLocker uses pg_advisory_lock to maintain a cluster-wide lock for\n// a long-running task like \"do X every N seconds\".\ntype DBLocker struct {\n\tkey   int\n\tmtx   sync.Mutex\n\tctx   context.Context\n\tgetdb func(context.Context) (*sqlx.DB, error)\n\tconn  *sql.Conn // != nil if advisory lock has been acquired\n}\n\n// Lock acquires the advisory lock, waiting/reconnecting if needed.\n//\n// Returns false if ctx is canceled before the lock is acquired.\nfunc (dbl *DBLocker) Lock(ctx context.Context, getdb func(context.Context) (*sqlx.DB, error)) bool {\n\tlogger := ctxlog.FromContext(ctx).WithField(\"ID\", dbl.key)\n\tvar lastHeldBy string\n\tfor ; ; time.Sleep(retryDelay) {\n\t\tdbl.mtx.Lock()\n\t\tif dbl.conn != nil {\n\t\t\t// Another goroutine is already locked/waiting\n\t\t\t// on this lock. Wait for them to release.\n\t\t\tdbl.mtx.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\tif ctx.Err() != nil {\n\t\t\tdbl.mtx.Unlock()\n\t\t\treturn false\n\t\t}\n\t\tdb, err := getdb(ctx)\n\t\tif err == context.Canceled {\n\t\t\tdbl.mtx.Unlock()\n\t\t\treturn false\n\t\t} else if err != nil {\n\t\t\tlogger.WithError(err).Info(\"error getting database pool\")\n\t\t\tdbl.mtx.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\tconn, err := db.Conn(ctx)\n\t\tif err == context.Canceled {\n\t\t\tdbl.mtx.Unlock()\n\t\t\treturn false\n\t\t} else if err != nil {\n\t\t\tlogger.WithError(err).Info(\"error getting database connection\")\n\t\t\tdbl.mtx.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\tvar locked bool\n\t\terr = conn.QueryRowContext(ctx, `SELECT pg_try_advisory_lock($1)`, dbl.key).Scan(&locked)\n\t\tif err == context.Canceled {\n\t\t\treturn false\n\t\t} else if err != nil {\n\t\t\tlogger.WithError(err).Info(\"error getting pg_try_advisory_lock\")\n\t\t\tconn.Close()\n\t\t\tdbl.mtx.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\tif !locked {\n\t\t\tvar host string\n\t\t\tvar port int\n\t\t\terr = conn.QueryRowContext(ctx, `SELECT client_addr, client_port FROM pg_stat_activity WHERE pid IN\n\t\t\t\t(SELECT pid FROM pg_locks\n\t\t\t\t WHERE locktype = $1 AND objid = $2)`, \"advisory\", dbl.key).Scan(&host, &port)\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Info(\"error getting other client info\")\n\t\t\t} else {\n\t\t\t\theldBy := net.JoinHostPort(host, fmt.Sprintf(\"%d\", port))\n\t\t\t\tif lastHeldBy != heldBy {\n\t\t\t\t\tlogger.WithField(\"DBClient\", heldBy).Info(\"waiting for other process to release lock\")\n\t\t\t\t\tlastHeldBy = heldBy\n\t\t\t\t}\n\t\t\t}\n\t\t\tconn.Close()\n\t\t\tdbl.mtx.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\tlogger.Debug(\"acquired pg_advisory_lock\")\n\t\tdbl.ctx, dbl.getdb, dbl.conn = ctx, getdb, conn\n\t\tdbl.mtx.Unlock()\n\t\treturn true\n\t}\n}\n\n// Check confirms that the lock is still active (i.e., the session is\n// still alive), and re-acquires if needed. Panics if Lock is not\n// acquired first.\n//\n// Returns false if the context passed to Lock() is canceled before\n// the lock is confirmed or reacquired.\nfunc (dbl *DBLocker) Check() bool {\n\tdbl.mtx.Lock()\n\terr := dbl.conn.PingContext(dbl.ctx)\n\tif err == context.Canceled {\n\t\tdbl.mtx.Unlock()\n\t\treturn false\n\t} else if err == nil {\n\t\tctxlog.FromContext(dbl.ctx).WithField(\"ID\", dbl.key).Debug(\"connection still alive\")\n\t\tdbl.mtx.Unlock()\n\t\treturn true\n\t}\n\tctxlog.FromContext(dbl.ctx).WithError(err).Info(\"database connection ping failed\")\n\tdbl.conn.Close()\n\tctx, getdb := dbl.ctx, dbl.getdb\n\tdbl.ctx, dbl.getdb, dbl.conn = nil, nil, nil\n\tdbl.mtx.Unlock()\n\treturn dbl.Lock(ctx, getdb)\n}\n\nfunc (dbl *DBLocker) Unlock() {\n\tdbl.mtx.Lock()\n\tdefer dbl.mtx.Unlock()\n\tif dbl.conn != nil {\n\t\t_, err := dbl.conn.ExecContext(context.Background(), `SELECT pg_advisory_unlock($1)`, dbl.key)\n\t\tif err != nil {\n\t\t\tctxlog.FromContext(dbl.ctx).WithError(err).WithField(\"ID\", dbl.key).Info(\"error releasing pg_advisory_lock\")\n\t\t} else {\n\t\t\tctxlog.FromContext(dbl.ctx).WithField(\"ID\", dbl.key).Debug(\"released pg_advisory_lock\")\n\t\t}\n\t\tdbl.conn.Close()\n\t}\n\t// Ensure we don't interfere with garbage collection by\n\t// retaining references to objects/closures provided by the\n\t// caller for us to use while locked.\n\tdbl.ctx, dbl.getdb, dbl.conn = nil, nil, nil\n}\n"
  },
  {
    "path": "lib/controller/dblock/dblock_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dblock\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/jmoiron/sqlx\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&suite{})\n\ntype suite struct {\n\tcluster *arvados.Cluster\n\tdb      *sqlx.DB\n\tgetdb   func(context.Context) (*sqlx.DB, error)\n}\n\nvar testLocker = &DBLocker{key: 999}\n\nfunc (s *suite) SetUpSuite(c *check.C) {\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, check.IsNil)\n\ts.cluster, err = cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\ts.db = arvadostest.DB(c, s.cluster)\n\ts.getdb = func(context.Context) (*sqlx.DB, error) { return s.db, nil }\n}\n\nfunc (s *suite) TestLock(c *check.C) {\n\tretryDelay = 10 * time.Millisecond\n\n\tvar logbuf bytes.Buffer\n\tlogger := ctxlog.New(&logbuf, \"text\", \"debug\")\n\tlogger.Level = logrus.DebugLevel\n\tctx := ctxlog.Context(context.Background(), logger)\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\ttestLocker.Lock(ctx, s.getdb)\n\ttestLocker.Check()\n\n\tlock2 := make(chan bool)\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\ttestLocker2 := &DBLocker{key: 999}\n\t\ttestLocker2.Lock(ctx, s.getdb)\n\t\tclose(lock2)\n\t\ttestLocker2.Check()\n\t\ttestLocker2.Unlock()\n\t}()\n\n\t// Second lock should wait for first to Unlock\n\tselect {\n\tcase <-time.After(time.Second / 10):\n\t\tc.Check(logbuf.String(), check.Matches, `(?ms).*level=info.*DBClient=\"[^\"]+:\\d+\".*ID=999.*`)\n\tcase <-lock2:\n\t\tc.Log(\"double-lock\")\n\t\tc.Fail()\n\t}\n\n\ttestLocker.Check()\n\ttestLocker.Unlock()\n\n\t// Now the second lock should succeed within retryDelay\n\tselect {\n\tcase <-time.After(retryDelay * 2):\n\t\tc.Log(\"timed out\")\n\t\tc.Fail()\n\tcase <-lock2:\n\t}\n\tc.Logf(\"%s\", logbuf.String())\n}\n"
  },
  {
    "path": "lib/controller/fed_generic.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"sync\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n)\n\ntype federatedRequestDelegate func(\n\th *genericFederatedRequestHandler,\n\teffectiveMethod string,\n\tclusterID *string,\n\tuuid string,\n\tremainder string,\n\tw http.ResponseWriter,\n\treq *http.Request) bool\n\ntype genericFederatedRequestHandler struct {\n\tnext      http.Handler\n\thandler   *Handler\n\tmatcher   *regexp.Regexp\n\tdelegates []federatedRequestDelegate\n}\n\nfunc (h *genericFederatedRequestHandler) remoteQueryUUIDs(w http.ResponseWriter,\n\treq *http.Request,\n\tclusterID string, uuids []string) (rp []map[string]interface{}, kind string, err error) {\n\n\tfound := make(map[string]bool)\n\tprevLenUuids := len(uuids) + 1\n\t// Loop while\n\t// (1) there are more uuids to query\n\t// (2) we're making progress - on each iteration the set of\n\t// uuids we are expecting for must shrink.\n\tfor len(uuids) > 0 && len(uuids) < prevLenUuids {\n\t\tvar remoteReq http.Request\n\t\tremoteReq.Header = req.Header\n\t\tremoteReq.Method = \"POST\"\n\t\tremoteReq.URL = &url.URL{Path: req.URL.Path}\n\t\tremoteParams := make(url.Values)\n\t\tremoteParams.Set(\"_method\", \"GET\")\n\t\tremoteParams.Set(\"count\", \"none\")\n\t\tif req.Form.Get(\"select\") != \"\" {\n\t\t\tremoteParams.Set(\"select\", req.Form.Get(\"select\"))\n\t\t}\n\t\tcontent, err := json.Marshal(uuids)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\tremoteParams[\"filters\"] = []string{fmt.Sprintf(`[[\"uuid\", \"in\", %s]]`, content)}\n\t\tenc := remoteParams.Encode()\n\t\tremoteReq.Body = ioutil.NopCloser(bytes.NewBufferString(enc))\n\n\t\trc := multiClusterQueryResponseCollector{clusterID: clusterID}\n\n\t\tvar resp *http.Response\n\t\tif clusterID == h.handler.Cluster.ClusterID {\n\t\t\tresp, err = h.handler.localClusterRequest(&remoteReq)\n\t\t} else {\n\t\t\tresp, err = h.handler.remoteClusterRequest(clusterID, &remoteReq)\n\t\t}\n\t\trc.collectResponse(resp, err)\n\n\t\tif rc.error != nil {\n\t\t\treturn nil, \"\", rc.error\n\t\t}\n\n\t\tkind = rc.kind\n\n\t\tif len(rc.responses) == 0 {\n\t\t\t// We got zero responses, no point in doing\n\t\t\t// another query.\n\t\t\treturn rp, kind, nil\n\t\t}\n\n\t\trp = append(rp, rc.responses...)\n\n\t\t// Go through the responses and determine what was\n\t\t// returned.  If there are remaining items, loop\n\t\t// around and do another request with just the\n\t\t// stragglers.\n\t\tfor _, i := range rc.responses {\n\t\t\tuuid, ok := i[\"uuid\"].(string)\n\t\t\tif ok {\n\t\t\t\tfound[uuid] = true\n\t\t\t}\n\t\t}\n\n\t\tl := []string{}\n\t\tfor _, u := range uuids {\n\t\t\tif !found[u] {\n\t\t\t\tl = append(l, u)\n\t\t\t}\n\t\t}\n\t\tprevLenUuids = len(uuids)\n\t\tuuids = l\n\t}\n\n\treturn rp, kind, nil\n}\n\nfunc (h *genericFederatedRequestHandler) handleMultiClusterQuery(w http.ResponseWriter,\n\treq *http.Request, clusterID *string) bool {\n\n\tvar filters [][]interface{}\n\terr := json.Unmarshal([]byte(req.Form.Get(\"filters\")), &filters)\n\tif err != nil {\n\t\thttpserver.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn true\n\t}\n\n\t// Split the list of uuids by prefix\n\tqueryClusters := make(map[string][]string)\n\texpectCount := 0\n\tfor _, filter := range filters {\n\t\tif len(filter) != 3 {\n\t\t\treturn false\n\t\t}\n\n\t\tif lhs, ok := filter[0].(string); !ok || lhs != \"uuid\" {\n\t\t\treturn false\n\t\t}\n\n\t\top, ok := filter[1].(string)\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tif op == \"in\" {\n\t\t\tif rhs, ok := filter[2].([]interface{}); ok {\n\t\t\t\tfor _, i := range rhs {\n\t\t\t\t\tif u, ok := i.(string); ok && len(u) == 27 {\n\t\t\t\t\t\t*clusterID = u[0:5]\n\t\t\t\t\t\tqueryClusters[u[0:5]] = append(queryClusters[u[0:5]], u)\n\t\t\t\t\t\texpectCount++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else if op == \"=\" {\n\t\t\tif u, ok := filter[2].(string); ok && len(u) == 27 {\n\t\t\t\t*clusterID = u[0:5]\n\t\t\t\tqueryClusters[u[0:5]] = append(queryClusters[u[0:5]], u)\n\t\t\t\texpectCount++\n\t\t\t}\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\n\t}\n\n\tif len(queryClusters) <= 1 {\n\t\t// Query does not search for uuids across multiple\n\t\t// clusters.\n\t\treturn false\n\t}\n\n\t// Validations\n\tcount := req.Form.Get(\"count\")\n\tif count != \"\" && count != `none` && count != `\"none\"` {\n\t\thttpserver.Error(w, \"Federated multi-object query must have 'count=none'\", http.StatusBadRequest)\n\t\treturn true\n\t}\n\tif req.Form.Get(\"limit\") != \"\" || req.Form.Get(\"offset\") != \"\" || req.Form.Get(\"order\") != \"\" {\n\t\thttpserver.Error(w, \"Federated multi-object may not provide 'limit', 'offset' or 'order'.\", http.StatusBadRequest)\n\t\treturn true\n\t}\n\tif max := h.handler.Cluster.API.MaxItemsPerResponse; expectCount > max {\n\t\thttpserver.Error(w, fmt.Sprintf(\"Federated multi-object request for %v objects which is more than max page size %v.\",\n\t\t\texpectCount, max), http.StatusBadRequest)\n\t\treturn true\n\t}\n\tif req.Form.Get(\"select\") != \"\" {\n\t\tfoundUUID := false\n\t\tvar selects []string\n\t\terr := json.Unmarshal([]byte(req.Form.Get(\"select\")), &selects)\n\t\tif err != nil {\n\t\t\thttpserver.Error(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn true\n\t\t}\n\n\t\tfor _, r := range selects {\n\t\t\tif r == \"uuid\" {\n\t\t\t\tfoundUUID = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !foundUUID {\n\t\t\thttpserver.Error(w, \"Federated multi-object request must include 'uuid' in 'select'\", http.StatusBadRequest)\n\t\t\treturn true\n\t\t}\n\t}\n\n\t// Perform concurrent requests to each cluster\n\n\tacquire, release := semaphore(h.handler.Cluster.API.MaxRequestAmplification)\n\twg := sync.WaitGroup{}\n\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\tmtx := sync.Mutex{}\n\terrors := []error{}\n\tvar completeResponses []map[string]interface{}\n\tvar kind string\n\n\tfor k, v := range queryClusters {\n\t\tif len(v) == 0 {\n\t\t\t// Nothing to query\n\t\t\tcontinue\n\t\t}\n\t\tacquire()\n\t\twg.Add(1)\n\t\tgo func(k string, v []string) {\n\t\t\tdefer release()\n\t\t\tdefer wg.Done()\n\t\t\trp, kn, err := h.remoteQueryUUIDs(w, req, k, v)\n\t\t\tmtx.Lock()\n\t\t\tdefer mtx.Unlock()\n\t\t\tif err == nil {\n\t\t\t\tcompleteResponses = append(completeResponses, rp...)\n\t\t\t\tkind = kn\n\t\t\t} else {\n\t\t\t\terrors = append(errors, err)\n\t\t\t}\n\t\t}(k, v)\n\t}\n\twg.Wait()\n\n\tif len(errors) > 0 {\n\t\tvar strerr []string\n\t\tfor _, e := range errors {\n\t\t\tstrerr = append(strerr, e.Error())\n\t\t}\n\t\thttpserver.Errors(w, strerr, http.StatusBadGateway)\n\t\treturn true\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\titemList := make(map[string]interface{})\n\titemList[\"items\"] = completeResponses\n\titemList[\"kind\"] = kind\n\tjson.NewEncoder(w).Encode(itemList)\n\n\treturn true\n}\n\nfunc (h *genericFederatedRequestHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tm := h.matcher.FindStringSubmatch(req.URL.Path)\n\tclusterID := \"\"\n\n\tif len(m) > 0 && m[2] != \"\" {\n\t\tclusterID = m[2]\n\t}\n\n\t// Get form parameters from URL and form body (if POST).\n\tif err := loadParamsFromForm(req); err != nil {\n\t\thttpserver.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// Check if the parameters have an explicit cluster_id\n\tif req.Form.Get(\"cluster_id\") != \"\" {\n\t\tclusterID = req.Form.Get(\"cluster_id\")\n\t}\n\n\t// Handle the POST-as-GET special case (workaround for large\n\t// GET requests that potentially exceed maximum URL length,\n\t// like multi-object queries where the filter has 100s of\n\t// items)\n\teffectiveMethod := req.Method\n\tif req.Method == \"POST\" && req.Form.Get(\"_method\") != \"\" {\n\t\teffectiveMethod = req.Form.Get(\"_method\")\n\t}\n\n\tif effectiveMethod == \"GET\" &&\n\t\tclusterID == \"\" &&\n\t\treq.Form.Get(\"filters\") != \"\" &&\n\t\th.handleMultiClusterQuery(w, req, &clusterID) {\n\t\treturn\n\t}\n\n\tvar uuid string\n\tif len(m[1]) > 0 {\n\t\t// trim leading slash\n\t\tuuid = m[1][1:]\n\t}\n\tfor _, d := range h.delegates {\n\t\tif d(h, effectiveMethod, &clusterID, uuid, m[3], w, req) {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif clusterID == \"\" || clusterID == h.handler.Cluster.ClusterID {\n\t\th.next.ServeHTTP(w, req)\n\t} else {\n\t\tresp, err := h.handler.remoteClusterRequest(clusterID, req)\n\t\th.handler.proxy.ForwardResponse(w, resp, err)\n\t}\n}\n\ntype multiClusterQueryResponseCollector struct {\n\tresponses []map[string]interface{}\n\terror     error\n\tkind      string\n\tclusterID string\n}\n\nfunc (c *multiClusterQueryResponseCollector) collectResponse(resp *http.Response,\n\trequestError error) (newResponse *http.Response, err error) {\n\tif requestError != nil {\n\t\tc.error = requestError\n\t\treturn nil, nil\n\t}\n\n\tdefer resp.Body.Close()\n\tvar loadInto struct {\n\t\tKind   string                   `json:\"kind\"`\n\t\tItems  []map[string]interface{} `json:\"items\"`\n\t\tErrors []string                 `json:\"errors\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&loadInto)\n\n\tif err != nil {\n\t\tc.error = fmt.Errorf(\"error fetching from %v (%v): %v\", c.clusterID, resp.Status, err)\n\t\treturn nil, nil\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tc.error = fmt.Errorf(\"error fetching from %v (%v): %v\", c.clusterID, resp.Status, loadInto.Errors)\n\t\treturn nil, nil\n\t}\n\n\tc.responses = loadInto.Items\n\tc.kind = loadInto.Kind\n\n\treturn nil, nil\n}\n"
  },
  {
    "path": "lib/controller/federation/collection_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage federation\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&collectionSuite{})\n\ntype collectionSuite struct {\n\tFederationSuite\n}\n\nfunc (s *collectionSuite) TestMultipleBackendFailureStatus(c *check.C) {\n\tnxPDH := \"a4f995dd0c08216f37cb1bdec990f0cd+1234\"\n\ts.cluster.ClusterID = \"local\"\n\tfor _, trial := range []struct {\n\t\tlabel        string\n\t\ttoken        string\n\t\tlocalStatus  int\n\t\tremoteStatus map[string]int\n\t\texpectStatus int\n\t}{\n\t\t{\n\t\t\t\"all backends return 404 => 404\",\n\t\t\tarvadostest.SystemRootToken,\n\t\t\thttp.StatusNotFound,\n\t\t\tmap[string]int{\n\t\t\t\t\"aaaaa\": http.StatusNotFound,\n\t\t\t\t\"bbbbb\": http.StatusNotFound,\n\t\t\t},\n\t\t\thttp.StatusNotFound,\n\t\t},\n\t\t{\n\t\t\t\"all backends return 401 => 401 (e.g., bad token)\",\n\t\t\tarvadostest.SystemRootToken,\n\t\t\thttp.StatusUnauthorized,\n\t\t\tmap[string]int{\n\t\t\t\t\"aaaaa\": http.StatusUnauthorized,\n\t\t\t\t\"bbbbb\": http.StatusUnauthorized,\n\t\t\t},\n\t\t\thttp.StatusUnauthorized,\n\t\t},\n\t\t{\n\t\t\t\"local 404, remotes 403 => 422 (mix of non-retryable errors)\",\n\t\t\tarvadostest.SystemRootToken,\n\t\t\thttp.StatusNotFound,\n\t\t\tmap[string]int{\n\t\t\t\t\"aaaaa\": http.StatusForbidden,\n\t\t\t\t\"bbbbb\": http.StatusForbidden,\n\t\t\t},\n\t\t\thttp.StatusUnprocessableEntity,\n\t\t},\n\t\t{\n\t\t\t\"local 404, remotes 401/403/404 => 422 (mix of non-retryable errors)\",\n\t\t\tarvadostest.SystemRootToken,\n\t\t\thttp.StatusNotFound,\n\t\t\tmap[string]int{\n\t\t\t\t\"aaaaa\": http.StatusUnauthorized,\n\t\t\t\t\"bbbbb\": http.StatusForbidden,\n\t\t\t\t\"ccccc\": http.StatusNotFound,\n\t\t\t},\n\t\t\thttp.StatusUnprocessableEntity,\n\t\t},\n\t\t{\n\t\t\t\"local 404, remotes 401/403/500 => 502 (at least one remote is retryable)\",\n\t\t\tarvadostest.SystemRootToken,\n\t\t\thttp.StatusNotFound,\n\t\t\tmap[string]int{\n\t\t\t\t\"aaaaa\": http.StatusUnauthorized,\n\t\t\t\t\"bbbbb\": http.StatusForbidden,\n\t\t\t\t\"ccccc\": http.StatusInternalServerError,\n\t\t\t},\n\t\t\thttp.StatusBadGateway,\n\t\t},\n\t} {\n\t\tc.Logf(\"trial: %v\", trial)\n\t\ts.fed = New(s.ctx, s.cluster, nil, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)\n\t\ts.fed.local = &arvadostest.APIStub{Error: httpserver.ErrorWithStatus(fmt.Errorf(\"stub error %d\", trial.localStatus), trial.localStatus)}\n\t\tfor id, status := range trial.remoteStatus {\n\t\t\ts.addDirectRemote(c, id, &arvadostest.APIStub{Error: httpserver.ErrorWithStatus(fmt.Errorf(\"stub error %d\", status), status)})\n\t\t}\n\n\t\tctx := context.Background()\n\t\tctx = ctxlog.Context(ctx, ctxlog.TestLogger(c))\n\t\tif trial.token != \"\" {\n\t\t\tctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{trial.token}})\n\t\t}\n\n\t\t_, err := s.fed.CollectionGet(s.ctx, arvados.GetOptions{UUID: nxPDH})\n\t\tc.Check(err.(httpserver.HTTPStatusError).HTTPStatus(), check.Equals, trial.expectStatus)\n\t}\n}\n"
  },
  {
    "path": "lib/controller/federation/conn.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage federation\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/controller/localdb\"\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/health\"\n\t\"github.com/jmoiron/sqlx\"\n)\n\ntype Conn struct {\n\tbgCtx   context.Context\n\tcluster *arvados.Cluster\n\tlocal   backend\n\tremotes map[string]backend\n}\n\nfunc New(bgCtx context.Context, cluster *arvados.Cluster, healthFuncs *map[string]health.Func, getdb func(context.Context) (*sqlx.DB, error)) *Conn {\n\tlocal := localdb.NewConn(bgCtx, cluster, getdb)\n\tremotes := map[string]backend{}\n\tfor id, remote := range cluster.RemoteClusters {\n\t\tif !remote.Proxy || id == cluster.ClusterID {\n\t\t\tcontinue\n\t\t}\n\t\tconn := rpc.NewConn(id, &url.URL{Scheme: remote.Scheme, Host: remote.Host}, remote.Insecure, saltedTokenProvider(cluster, local, id))\n\t\t// Older versions of controller rely on the Via header\n\t\t// to detect loops.\n\t\tconn.SendHeader = http.Header{\"Via\": {\"HTTP/1.1 arvados-controller\"}}\n\t\tremotes[id] = conn\n\t}\n\n\tif healthFuncs != nil {\n\t\thf := map[string]health.Func{\"vocabulary\": local.LastVocabularyError}\n\t\t*healthFuncs = hf\n\t}\n\n\treturn &Conn{\n\t\tbgCtx:   bgCtx,\n\t\tcluster: cluster,\n\t\tlocal:   local,\n\t\tremotes: remotes,\n\t}\n}\n\n// Return a new rpc.TokenProvider that takes the client-provided\n// tokens from an incoming request context, determines whether they\n// should (and can) be salted for the given remoteID, and returns the\n// resulting tokens.\nfunc saltedTokenProvider(cluster *arvados.Cluster, local backend, remoteID string) rpc.TokenProvider {\n\treturn func(ctx context.Context) ([]string, error) {\n\t\tvar tokens []string\n\t\tincoming, ok := auth.FromContext(ctx)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"no token provided\")\n\t\t}\n\t\tfor _, token := range incoming.Tokens {\n\t\t\tif strings.HasPrefix(token, \"v2/\"+cluster.ClusterID+\"-\") &&\n\t\t\t\t!strings.HasPrefix(token, \"v2/\"+cluster.ClusterID+\"-gj3su-anonymouspublic/\") &&\n\t\t\t\tremoteID == cluster.Login.LoginCluster {\n\t\t\t\t// If we did this, the login cluster would call back to us and then\n\t\t\t\t// reject our response because the user UUID prefix (i.e., the\n\t\t\t\t// LoginCluster prefix) won't match the token UUID prefix (i.e., our\n\t\t\t\t// prefix). The anonymous token is OK to forward, because (unlike other\n\t\t\t\t// local tokens for real users) the validation callback will return the\n\t\t\t\t// locally issued anonymous user ID instead of a login-cluster user ID.\n\t\t\t\t// That anonymous user ID gets mapped to the local anonymous user\n\t\t\t\t// automatically on the login cluster.\n\t\t\t\treturn nil, httpErrorf(http.StatusUnauthorized, \"cannot use a locally issued token to forward a request to our login cluster (%s)\", remoteID)\n\t\t\t}\n\t\t\tsalted, err := auth.SaltToken(token, remoteID)\n\t\t\tswitch err {\n\t\t\tcase nil:\n\t\t\t\ttokens = append(tokens, salted)\n\t\t\tcase auth.ErrSalted:\n\t\t\t\ttokens = append(tokens, token)\n\t\t\tcase auth.ErrTokenFormat:\n\t\t\t\t// pass through unmodified (assume it's an OIDC access token)\n\t\t\t\ttokens = append(tokens, token)\n\t\t\tcase auth.ErrObsoleteToken:\n\t\t\t\tctx := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{token}})\n\t\t\t\taca, err := local.APIClientAuthorizationCurrent(ctx, arvados.GetOptions{})\n\t\t\t\tif errStatus(err) == http.StatusUnauthorized {\n\t\t\t\t\t// pass through unmodified\n\t\t\t\t\ttokens = append(tokens, token)\n\t\t\t\t\tcontinue\n\t\t\t\t} else if err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tif strings.HasPrefix(aca.UUID, remoteID) {\n\t\t\t\t\t// We have it cached here, but\n\t\t\t\t\t// the token belongs to the\n\t\t\t\t\t// remote target itself, so\n\t\t\t\t\t// pass it through unmodified.\n\t\t\t\t\ttokens = append(tokens, token)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsalted, err := auth.SaltToken(aca.TokenV2(), remoteID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\ttokens = append(tokens, salted)\n\t\t\tdefault:\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn tokens, nil\n\t}\n}\n\n// Return suitable backend for a query about the given cluster ID\n// (\"aaaaa\") or object UUID (\"aaaaa-dz642-abcdefghijklmno\").\nfunc (conn *Conn) chooseBackend(id string) backend {\n\tif len(id) == 27 {\n\t\tid = id[:5]\n\t} else if len(id) != 5 {\n\t\t// PDH or bogus ID\n\t\treturn conn.local\n\t}\n\tif id == conn.cluster.ClusterID {\n\t\treturn conn.local\n\t} else if be, ok := conn.remotes[id]; ok {\n\t\treturn be\n\t} else {\n\t\t// TODO: return an \"always error\" backend?\n\t\treturn conn.local\n\t}\n}\n\nfunc (conn *Conn) localOrLoginCluster() backend {\n\tif conn.cluster.Login.LoginCluster != \"\" {\n\t\treturn conn.chooseBackend(conn.cluster.Login.LoginCluster)\n\t}\n\treturn conn.local\n}\n\n// Call fn with the local backend; then, if fn returned 404, call fn\n// on the available remote backends (possibly concurrently) until one\n// succeeds.\n//\n// The second argument to fn is the cluster ID of the remote backend,\n// or \"\" for the local backend.\n//\n// A non-nil error means all backends failed.\nfunc (conn *Conn) tryLocalThenRemotes(ctx context.Context, forwardedFor string, fn func(context.Context, string, backend) error) error {\n\tif err := fn(ctx, \"\", conn.local); err == nil || errStatus(err) != http.StatusNotFound || forwardedFor != \"\" {\n\t\t// Note: forwardedFor != \"\" means this request came\n\t\t// from a remote cluster, so we don't take a second\n\t\t// hop. This avoids cycles, redundant calls to a\n\t\t// mutually reachable remote, and use of double-salted\n\t\t// tokens.\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\terrchan := make(chan error, len(conn.remotes))\n\tfor remoteID, be := range conn.remotes {\n\t\tremoteID, be := remoteID, be\n\t\tgo func() {\n\t\t\terrchan <- fn(ctx, remoteID, be)\n\t\t}()\n\t}\n\treturncode := http.StatusNotFound\n\tvar errs []error\n\tfor i := 0; i < cap(errchan); i++ {\n\t\terr := <-errchan\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\terrs = append(errs, err)\n\t\tif code := errStatus(err); code >= 500 || code == http.StatusTooManyRequests {\n\t\t\t// If any of the remotes have a retryable\n\t\t\t// error (and none succeed) we'll return 502.\n\t\t\treturncode = http.StatusBadGateway\n\t\t} else if code != http.StatusNotFound && returncode != http.StatusBadGateway {\n\t\t\t// If some of the remotes have non-retryable\n\t\t\t// non-404 errors (and none succeed or have\n\t\t\t// retryable errors) we'll return 422.\n\t\t\treturncode = http.StatusUnprocessableEntity\n\t\t}\n\t}\n\tif returncode == http.StatusNotFound {\n\t\treturn notFoundError{}\n\t}\n\treturn httpErrorf(returncode, \"errors: %v\", errs)\n}\n\nfunc (conn *Conn) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {\n\treturn conn.chooseBackend(options.ClusterID).CollectionCreate(ctx, options)\n}\n\nfunc (conn *Conn) CollectionUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Collection, error) {\n\treturn conn.chooseBackend(options.UUID).CollectionUpdate(ctx, options)\n}\n\nfunc rewriteManifest(mt, remoteID string) string {\n\treturn regexp.MustCompile(` [0-9a-f]{32}\\+[^ ]*`).ReplaceAllStringFunc(mt, func(tok string) string {\n\t\treturn strings.Replace(tok, \"+A\", \"+R\"+remoteID+\"-\", -1)\n\t})\n}\n\nfunc (conn *Conn) ConfigGet(ctx context.Context) (json.RawMessage, error) {\n\tvar buf bytes.Buffer\n\terr := config.ExportJSON(&buf, conn.cluster)\n\treturn json.RawMessage(buf.Bytes()), err\n}\n\nfunc (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {\n\treturn conn.local.VocabularyGet(ctx)\n}\n\nfunc (conn *Conn) DiscoveryDocument(ctx context.Context) (arvados.DiscoveryDocument, error) {\n\treturn conn.local.DiscoveryDocument(ctx)\n}\n\nfunc (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {\n\tif id := conn.cluster.Login.LoginCluster; id != \"\" && id != conn.cluster.ClusterID {\n\t\t// defer entire login procedure to designated cluster\n\t\tremote, ok := conn.remotes[id]\n\t\tif !ok {\n\t\t\treturn arvados.LoginResponse{}, fmt.Errorf(\"configuration problem: designated login cluster %q is not defined\", id)\n\t\t}\n\t\tbaseURL := remote.BaseURL()\n\t\ttarget, err := baseURL.Parse(arvados.EndpointLogin.Path)\n\t\tif err != nil {\n\t\t\treturn arvados.LoginResponse{}, fmt.Errorf(\"internal error getting redirect target: %s\", err)\n\t\t}\n\t\tparams := url.Values{\n\t\t\t\"return_to\": []string{options.ReturnTo},\n\t\t}\n\t\tif options.Remote != \"\" {\n\t\t\tparams.Set(\"remote\", options.Remote)\n\t\t}\n\t\ttarget.RawQuery = params.Encode()\n\t\treturn arvados.LoginResponse{\n\t\t\tRedirectLocation: target.String(),\n\t\t}, nil\n\t}\n\treturn conn.local.Login(ctx, options)\n}\n\nvar v2TokenRegexp = regexp.MustCompile(`^v2/[a-z0-9]{5}-gj3su-[a-z0-9]{15}/`)\n\nfunc (conn *Conn) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\t// If the token was issued by another cluster, we want to issue a logout\n\t// request to the issuing instance to invalidate the token federation-wide.\n\t// If this federation has a login cluster, that's always considered the\n\t// issuing cluster.\n\t// Otherwise, if this is a v2 token, use the UUID to find the issuing\n\t// cluster.\n\t// Note that remoteBE may still be conn.local even *after* one of these\n\t// conditions is true.\n\tvar remoteBE backend = conn.local\n\tif conn.cluster.Login.LoginCluster != \"\" {\n\t\tremoteBE = conn.chooseBackend(conn.cluster.Login.LoginCluster)\n\t} else {\n\t\treqauth, ok := auth.FromContext(ctx)\n\t\tif ok && len(reqauth.Tokens) > 0 && v2TokenRegexp.MatchString(reqauth.Tokens[0]) {\n\t\t\tremoteBE = conn.chooseBackend(reqauth.Tokens[0][3:8])\n\t\t}\n\t}\n\n\t// We always want to invalidate the token locally. Start that process.\n\tvar localResponse arvados.LogoutResponse\n\tvar localErr error\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\tgo func() {\n\t\tlocalResponse, localErr = conn.local.Logout(ctx, options)\n\t\twg.Done()\n\t}()\n\n\t// If the token was issued by another cluster, log out there too.\n\tif remoteBE != conn.local {\n\t\tresponse, err := remoteBE.Logout(ctx, options)\n\t\t// If the issuing cluster returns a redirect or error, that's more\n\t\t// important to return to the user than anything that happens locally.\n\t\tif response.RedirectLocation != \"\" || err != nil {\n\t\t\treturn response, err\n\t\t}\n\t}\n\n\t// Either the local cluster is the issuing cluster, or the issuing cluster's\n\t// response was uninteresting.\n\twg.Wait()\n\treturn localResponse, localErr\n}\n\nfunc (conn *Conn) AuthorizedKeyCreate(ctx context.Context, options arvados.CreateOptions) (arvados.AuthorizedKey, error) {\n\treturn conn.chooseBackend(options.ClusterID).AuthorizedKeyCreate(ctx, options)\n}\n\nfunc (conn *Conn) AuthorizedKeyUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.AuthorizedKey, error) {\n\treturn conn.chooseBackend(options.UUID).AuthorizedKeyUpdate(ctx, options)\n}\n\nfunc (conn *Conn) AuthorizedKeyGet(ctx context.Context, options arvados.GetOptions) (arvados.AuthorizedKey, error) {\n\treturn conn.chooseBackend(options.UUID).AuthorizedKeyGet(ctx, options)\n}\n\nfunc (conn *Conn) AuthorizedKeyList(ctx context.Context, options arvados.ListOptions) (arvados.AuthorizedKeyList, error) {\n\treturn conn.generated_AuthorizedKeyList(ctx, options)\n}\n\nfunc (conn *Conn) AuthorizedKeyDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.AuthorizedKey, error) {\n\treturn conn.chooseBackend(options.UUID).AuthorizedKeyDelete(ctx, options)\n}\n\nfunc (conn *Conn) CollectionGet(ctx context.Context, options arvados.GetOptions) (arvados.Collection, error) {\n\tif len(options.UUID) == 27 {\n\t\t// UUID is really a UUID\n\t\tc, err := conn.chooseBackend(options.UUID).CollectionGet(ctx, options)\n\t\tif err == nil && options.UUID[:5] != conn.cluster.ClusterID {\n\t\t\tc.ManifestText = rewriteManifest(c.ManifestText, options.UUID[:5])\n\t\t}\n\t\treturn c, err\n\t}\n\tif len(options.UUID) < 34 || options.UUID[32] != '+' {\n\t\treturn arvados.Collection{}, httpErrorf(http.StatusNotFound, \"invalid UUID or PDH %q\", options.UUID)\n\t}\n\t// UUID is a PDH\n\tfirst := make(chan arvados.Collection, 1)\n\terr := conn.tryLocalThenRemotes(ctx, options.ForwardedFor, func(ctx context.Context, remoteID string, be backend) error {\n\t\tremoteOpts := options\n\t\tremoteOpts.ForwardedFor = conn.cluster.ClusterID + \"-\" + options.ForwardedFor\n\t\tc, err := be.CollectionGet(ctx, remoteOpts)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thaveManifest := true\n\t\tif options.Select != nil {\n\t\t\thaveManifest = false\n\t\t\tfor _, s := range options.Select {\n\t\t\t\tif s == \"manifest_text\" {\n\t\t\t\t\thaveManifest = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif haveManifest {\n\t\t\tpdh := arvados.PortableDataHash(c.ManifestText)\n\t\t\t// options.UUID is either hash+size or\n\t\t\t// hash+size+hints; only hash+size need to\n\t\t\t// match the computed PDH.\n\t\t\tif pdh != options.UUID && !strings.HasPrefix(options.UUID, pdh+\"+\") {\n\t\t\t\terr = httpErrorf(http.StatusBadGateway, \"bad portable data hash %q received from remote %q (expected %q)\", pdh, remoteID, options.UUID)\n\t\t\t\tctxlog.FromContext(ctx).Warn(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif remoteID != \"\" {\n\t\t\tc.ManifestText = rewriteManifest(c.ManifestText, remoteID)\n\t\t}\n\t\tselect {\n\t\tcase first <- c:\n\t\t\treturn nil\n\t\tdefault:\n\t\t\t// lost race, return value doesn't matter\n\t\t\treturn nil\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn arvados.Collection{}, err\n\t}\n\treturn <-first, nil\n}\n\nfunc (conn *Conn) CollectionList(ctx context.Context, options arvados.ListOptions) (arvados.CollectionList, error) {\n\treturn conn.generated_CollectionList(ctx, options)\n}\n\nfunc (conn *Conn) CollectionProvenance(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {\n\treturn conn.chooseBackend(options.UUID).CollectionProvenance(ctx, options)\n}\n\nfunc (conn *Conn) CollectionUsedBy(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {\n\treturn conn.chooseBackend(options.UUID).CollectionUsedBy(ctx, options)\n}\n\nfunc (conn *Conn) CollectionDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {\n\treturn conn.chooseBackend(options.UUID).CollectionDelete(ctx, options)\n}\n\nfunc (conn *Conn) CollectionTrash(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {\n\treturn conn.chooseBackend(options.UUID).CollectionTrash(ctx, options)\n}\n\nfunc (conn *Conn) CollectionUntrash(ctx context.Context, options arvados.UntrashOptions) (arvados.Collection, error) {\n\treturn conn.chooseBackend(options.UUID).CollectionUntrash(ctx, options)\n}\n\nfunc (conn *Conn) ComputedPermissionList(ctx context.Context, options arvados.ListOptions) (arvados.ComputedPermissionList, error) {\n\treturn conn.local.ComputedPermissionList(ctx, options)\n}\n\nfunc (conn *Conn) ContainerList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerList, error) {\n\treturn conn.generated_ContainerList(ctx, options)\n}\n\nfunc (conn *Conn) ContainerCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Container, error) {\n\treturn conn.chooseBackend(options.ClusterID).ContainerCreate(ctx, options)\n}\n\nfunc (conn *Conn) ContainerUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerUpdate(ctx, options)\n}\n\nfunc (conn *Conn) ContainerPriorityUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerPriorityUpdate(ctx, options)\n}\n\nfunc (conn *Conn) ContainerGet(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerGet(ctx, options)\n}\n\nfunc (conn *Conn) ContainerDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Container, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerDelete(ctx, options)\n}\n\nfunc (conn *Conn) ContainerLock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerLock(ctx, options)\n}\n\nfunc (conn *Conn) ContainerUnlock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerUnlock(ctx, options)\n}\n\nfunc (conn *Conn) ContainerHTTPProxy(ctx context.Context, options arvados.ContainerHTTPProxyOptions) (http.Handler, error) {\n\tif len(options.Target) >= 29 && options.Target[27] == '-' && arvadosclient.UUIDMatch(options.Target[:27]) {\n\t\treturn conn.chooseBackend(options.Target[:27]).ContainerHTTPProxy(ctx, options)\n\t} else {\n\t\treturn conn.local.ContainerHTTPProxy(ctx, options)\n\t}\n}\n\nfunc (conn *Conn) ContainerSSH(ctx context.Context, options arvados.ContainerSSHOptions) (arvados.ConnectionResponse, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerSSH(ctx, options)\n}\n\nfunc (conn *Conn) ContainerGatewayTunnel(ctx context.Context, options arvados.ContainerGatewayTunnelOptions) (arvados.ConnectionResponse, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerGatewayTunnel(ctx, options)\n}\n\nfunc (conn *Conn) ContainerRequestList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerRequestList, error) {\n\treturn conn.generated_ContainerRequestList(ctx, options)\n}\n\nfunc (conn *Conn) ContainerRequestCreate(ctx context.Context, options arvados.CreateOptions) (arvados.ContainerRequest, error) {\n\tbe := conn.chooseBackend(options.ClusterID)\n\tif be == conn.local {\n\t\treturn be.ContainerRequestCreate(ctx, options)\n\t}\n\tif _, ok := options.Attrs[\"runtime_token\"]; !ok {\n\t\t// If runtime_token is not set, create a new token\n\t\taca, err := conn.local.APIClientAuthorizationCurrent(ctx, arvados.GetOptions{})\n\t\tif err != nil {\n\t\t\t// This should probably be StatusUnauthorized\n\t\t\t// (need to update test in\n\t\t\t// lib/controller/federation_test.go):\n\t\t\t// When RoR is out of the picture this should be:\n\t\t\t// return arvados.ContainerRequest{}, httpErrorf(http.StatusUnauthorized, \"%w\", err)\n\t\t\treturn arvados.ContainerRequest{}, httpErrorf(http.StatusForbidden, \"%s\", \"invalid API token\")\n\t\t}\n\t\tuser, err := conn.local.UserGetCurrent(ctx, arvados.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn arvados.ContainerRequest{}, err\n\t\t}\n\t\tif len(aca.Scopes) == 0 || aca.Scopes[0] != \"all\" {\n\t\t\treturn arvados.ContainerRequest{}, httpErrorf(http.StatusForbidden, \"token scope is not [all]\")\n\t\t}\n\t\tif strings.HasPrefix(aca.UUID, conn.cluster.ClusterID) {\n\t\t\t// Local user, submitting to a remote cluster.\n\t\t\t// Create a new time-limited token.\n\t\t\tlocal, ok := conn.local.(*localdb.Conn)\n\t\t\tif !ok {\n\t\t\t\treturn arvados.ContainerRequest{}, httpErrorf(http.StatusInternalServerError, \"bug: local backend is a %T, not a *localdb.Conn\", conn.local)\n\t\t\t}\n\t\t\taca, err = local.CreateAPIClientAuthorization(ctx, conn.cluster.SystemRootToken, rpc.UserSessionAuthInfo{UserUUID: user.UUID,\n\t\t\t\tExpiresAt: time.Now().UTC().Add(conn.cluster.Collections.BlobSigningTTL.Duration())})\n\t\t\tif err != nil {\n\t\t\t\treturn arvados.ContainerRequest{}, err\n\t\t\t}\n\t\t\toptions.Attrs[\"runtime_token\"] = aca.TokenV2()\n\t\t} else {\n\t\t\t// Remote user. Container request will use the\n\t\t\t// current token, minus the trailing portion\n\t\t\t// (optional container uuid).\n\t\t\toptions.Attrs[\"runtime_token\"] = aca.TokenV2()\n\t\t}\n\t}\n\treturn be.ContainerRequestCreate(ctx, options)\n}\n\nfunc (conn *Conn) ContainerRequestUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.ContainerRequest, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerRequestUpdate(ctx, options)\n}\n\nfunc (conn *Conn) ContainerRequestGet(ctx context.Context, options arvados.GetOptions) (arvados.ContainerRequest, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerRequestGet(ctx, options)\n}\n\nfunc (conn *Conn) ContainerRequestDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.ContainerRequest, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerRequestDelete(ctx, options)\n}\n\nfunc (conn *Conn) ContainerRequestContainerStatus(ctx context.Context, options arvados.GetOptions) (arvados.ContainerStatus, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerRequestContainerStatus(ctx, options)\n}\n\nfunc (conn *Conn) ContainerRequestLog(ctx context.Context, options arvados.ContainerLogOptions) (http.Handler, error) {\n\treturn conn.chooseBackend(options.UUID).ContainerRequestLog(ctx, options)\n}\n\nfunc (conn *Conn) GroupCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Group, error) {\n\treturn conn.chooseBackend(options.ClusterID).GroupCreate(ctx, options)\n}\n\nfunc (conn *Conn) GroupUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Group, error) {\n\treturn conn.chooseBackend(options.UUID).GroupUpdate(ctx, options)\n}\n\nfunc (conn *Conn) GroupGet(ctx context.Context, options arvados.GetOptions) (arvados.Group, error) {\n\treturn conn.chooseBackend(options.UUID).GroupGet(ctx, options)\n}\n\nfunc (conn *Conn) GroupList(ctx context.Context, options arvados.ListOptions) (arvados.GroupList, error) {\n\treturn conn.generated_GroupList(ctx, options)\n}\n\nvar userUuidRe = regexp.MustCompile(`^[0-9a-z]{5}-tpzed-[0-9a-z]{15}$`)\n\nfunc (conn *Conn) GroupContents(ctx context.Context, options arvados.GroupContentsOptions) (arvados.ObjectList, error) {\n\tif options.ClusterID != \"\" {\n\t\t// explicitly selected cluster\n\t\treturn conn.chooseBackend(options.ClusterID).GroupContents(ctx, options)\n\t} else if userUuidRe.MatchString(options.UUID) {\n\t\t// user, get the things they own on the local cluster\n\t\treturn conn.local.GroupContents(ctx, options)\n\t} else {\n\t\t// a group, potentially want to make federated request\n\t\treturn conn.chooseBackend(options.UUID).GroupContents(ctx, options)\n\t}\n}\n\nfunc (conn *Conn) GroupShared(ctx context.Context, options arvados.ListOptions) (arvados.GroupList, error) {\n\treturn conn.chooseBackend(options.ClusterID).GroupShared(ctx, options)\n}\n\nfunc (conn *Conn) GroupDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Group, error) {\n\treturn conn.chooseBackend(options.UUID).GroupDelete(ctx, options)\n}\n\nfunc (conn *Conn) GroupTrash(ctx context.Context, options arvados.DeleteOptions) (arvados.Group, error) {\n\treturn conn.chooseBackend(options.UUID).GroupTrash(ctx, options)\n}\n\nfunc (conn *Conn) GroupUntrash(ctx context.Context, options arvados.UntrashOptions) (arvados.Group, error) {\n\treturn conn.chooseBackend(options.UUID).GroupUntrash(ctx, options)\n}\n\nfunc (conn *Conn) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {\n\treturn conn.chooseBackend(options.ClusterID).LinkCreate(ctx, options)\n}\n\nfunc (conn *Conn) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {\n\treturn conn.chooseBackend(options.UUID).LinkUpdate(ctx, options)\n}\n\nfunc (conn *Conn) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {\n\treturn conn.chooseBackend(options.UUID).LinkGet(ctx, options)\n}\n\nfunc (conn *Conn) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {\n\treturn conn.generated_LinkList(ctx, options)\n}\n\nfunc (conn *Conn) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {\n\treturn conn.chooseBackend(options.UUID).LinkDelete(ctx, options)\n}\n\nfunc (conn *Conn) LogCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Log, error) {\n\treturn conn.chooseBackend(options.ClusterID).LogCreate(ctx, options)\n}\n\nfunc (conn *Conn) LogUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Log, error) {\n\treturn conn.chooseBackend(options.UUID).LogUpdate(ctx, options)\n}\n\nfunc (conn *Conn) LogGet(ctx context.Context, options arvados.GetOptions) (arvados.Log, error) {\n\treturn conn.chooseBackend(options.UUID).LogGet(ctx, options)\n}\n\nfunc (conn *Conn) LogList(ctx context.Context, options arvados.ListOptions) (arvados.LogList, error) {\n\treturn conn.generated_LogList(ctx, options)\n}\n\nfunc (conn *Conn) LogDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Log, error) {\n\treturn conn.chooseBackend(options.UUID).LogDelete(ctx, options)\n}\n\nfunc (conn *Conn) SysTrashSweep(ctx context.Context, options struct{}) (struct{}, error) {\n\treturn conn.local.SysTrashSweep(ctx, options)\n}\n\nvar userAttrsCachedFromLoginCluster = map[string]bool{\n\t\"created_at\":  true,\n\t\"email\":       true,\n\t\"first_name\":  true,\n\t\"is_active\":   true,\n\t\"is_admin\":    true,\n\t\"is_invited\":  true,\n\t\"last_name\":   true,\n\t\"modified_at\": true,\n\t\"prefs\":       true,\n\t\"username\":    true,\n\t\"kind\":        true,\n\n\t\"etag\":                    false,\n\t\"full_name\":               false,\n\t\"identity_url\":            false,\n\t\"modified_by_client_uuid\": false,\n\t\"modified_by_user_uuid\":   false,\n\t\"owner_uuid\":              false,\n\t\"uuid\":                    false,\n\t\"writable_by\":             false,\n\t\"can_write\":               false,\n\t\"can_manage\":              false,\n}\n\nfunc (conn *Conn) batchUpdateUsers(ctx context.Context,\n\toptions arvados.ListOptions,\n\titems []arvados.User,\n\tincludeAdminAndInvited bool) (err error) {\n\n\tid := conn.cluster.Login.LoginCluster\n\tlogger := ctxlog.FromContext(ctx)\n\tbatchOpts := arvados.UserBatchUpdateOptions{Updates: map[string]map[string]interface{}{}}\n\tfor _, user := range items {\n\t\tif !strings.HasPrefix(user.UUID, id) {\n\t\t\tcontinue\n\t\t}\n\t\tlogger.Debugf(\"cache user info for uuid %q\", user.UUID)\n\n\t\t// If the remote cluster has null timestamps\n\t\t// (e.g., test server with incomplete\n\t\t// fixtures) use dummy timestamps (instead of\n\t\t// the zero time, which causes a Rails API\n\t\t// error \"year too big to marshal: 1 UTC\").\n\t\tif user.ModifiedAt.IsZero() {\n\t\t\tuser.ModifiedAt = time.Now()\n\t\t}\n\t\tif user.CreatedAt.IsZero() {\n\t\t\tuser.CreatedAt = time.Now()\n\t\t}\n\n\t\tvar allFields map[string]interface{}\n\t\tbuf, err := json.Marshal(user)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error encoding user record from remote response: %s\", err)\n\t\t}\n\t\terr = json.Unmarshal(buf, &allFields)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error transcoding user record from remote response: %s\", err)\n\t\t}\n\t\tupdates := allFields\n\t\tif len(options.Select) > 0 {\n\t\t\tupdates = map[string]interface{}{}\n\t\t\tfor _, k := range options.Select {\n\t\t\t\tif v, ok := allFields[k]; ok && userAttrsCachedFromLoginCluster[k] {\n\t\t\t\t\tupdates[k] = v\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor k := range updates {\n\t\t\t\tif !userAttrsCachedFromLoginCluster[k] {\n\t\t\t\t\tdelete(updates, k)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !includeAdminAndInvited {\n\t\t\t// make sure we don't send these fields.\n\t\t\tdelete(updates, \"is_admin\")\n\t\t\tdelete(updates, \"is_invited\")\n\t\t}\n\t\tbatchOpts.Updates[user.UUID] = updates\n\t}\n\tif len(batchOpts.Updates) > 0 {\n\t\tctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})\n\t\t_, err = conn.local.UserBatchUpdate(ctxRoot, batchOpts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error updating local user records: %s\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (conn *Conn) includeAdminAndInvitedInBatchUpdate(ctx context.Context, be backend, updateUserUUID string) (bool, error) {\n\t// API versions prior to 20231117 would only include the\n\t// is_invited and is_admin fields if the current user is an\n\t// admin, or is requesting their own user record.  If those\n\t// fields aren't actually valid then we don't want to\n\t// send them in the batch update.\n\tdd, err := be.DiscoveryDocument(ctx)\n\tif err != nil {\n\t\t// couldn't get discovery document\n\t\treturn false, err\n\t}\n\tif dd.Revision >= \"20231117\" {\n\t\t// newer version, fields are valid.\n\t\treturn true, nil\n\t}\n\tselfuser, err := be.UserGetCurrent(ctx, arvados.GetOptions{})\n\tif err != nil {\n\t\t// couldn't get our user record\n\t\treturn false, err\n\t}\n\tif selfuser.IsAdmin || selfuser.UUID == updateUserUUID {\n\t\t// we are an admin, or the current user is the same as\n\t\t// the user that we are updating.\n\t\treturn true, nil\n\t}\n\t// Better safe than sorry.\n\treturn false, nil\n}\n\nfunc (conn *Conn) UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {\n\tif id := conn.cluster.Login.LoginCluster; id != \"\" && id != conn.cluster.ClusterID && !options.BypassFederation {\n\t\tbe := conn.chooseBackend(id)\n\t\tresp, err := be.UserList(ctx, options)\n\t\tif err != nil {\n\t\t\treturn resp, err\n\t\t}\n\t\tincludeAdminAndInvited, err := conn.includeAdminAndInvitedInBatchUpdate(ctx, be, \"\")\n\t\tif err != nil {\n\t\t\treturn arvados.UserList{}, err\n\t\t}\n\t\terr = conn.batchUpdateUsers(ctx, options, resp.Items, includeAdminAndInvited)\n\t\tif err != nil {\n\t\t\treturn arvados.UserList{}, err\n\t\t}\n\t\treturn resp, nil\n\t}\n\treturn conn.generated_UserList(ctx, options)\n}\n\nfunc (conn *Conn) UserCreate(ctx context.Context, options arvados.CreateOptions) (arvados.User, error) {\n\treturn conn.chooseBackend(options.ClusterID).UserCreate(ctx, options)\n}\n\nfunc (conn *Conn) UserUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.User, error) {\n\tif options.BypassFederation {\n\t\treturn conn.local.UserUpdate(ctx, options)\n\t}\n\tbe := conn.chooseBackend(options.UUID)\n\tresp, err := be.UserUpdate(ctx, options)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tif !strings.HasPrefix(options.UUID, conn.cluster.ClusterID) {\n\t\tincludeAdminAndInvited, err := conn.includeAdminAndInvitedInBatchUpdate(ctx, be, options.UUID)\n\t\tif err != nil {\n\t\t\treturn arvados.User{}, err\n\t\t}\n\t\t// Copy the updated user record to the local cluster\n\t\terr = conn.batchUpdateUsers(ctx, arvados.ListOptions{}, []arvados.User{resp}, includeAdminAndInvited)\n\t\tif err != nil {\n\t\t\treturn arvados.User{}, err\n\t\t}\n\t}\n\treturn resp, err\n}\n\nfunc (conn *Conn) UserMerge(ctx context.Context, options arvados.UserMergeOptions) (arvados.User, error) {\n\treturn conn.local.UserMerge(ctx, options)\n}\n\nfunc (conn *Conn) UserActivate(ctx context.Context, options arvados.UserActivateOptions) (arvados.User, error) {\n\treturn conn.localOrLoginCluster().UserActivate(ctx, options)\n}\n\nfunc (conn *Conn) UserSetup(ctx context.Context, options arvados.UserSetupOptions) (map[string]interface{}, error) {\n\tupstream := conn.localOrLoginCluster()\n\tif upstream != conn.local {\n\t\t// When LoginCluster is in effect, and we're setting\n\t\t// up a remote user, and we want to give that user\n\t\t// access to a local VM, we can't include the VM in\n\t\t// the setup call, because the remote cluster won't\n\t\t// recognize it.\n\n\t\t// Similarly, if we want to create a git repo,\n\t\t// it should be created on the local cluster,\n\t\t// not the remote one.\n\n\t\tupstreamOptions := options\n\t\tupstreamOptions.VMUUID = \"\"\n\t\tupstreamOptions.RepoName = \"\"\n\n\t\tret, err := upstream.UserSetup(ctx, upstreamOptions)\n\t\tif err != nil {\n\t\t\treturn ret, err\n\t\t}\n\t}\n\n\treturn conn.local.UserSetup(ctx, options)\n}\n\nfunc (conn *Conn) UserUnsetup(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\treturn conn.localOrLoginCluster().UserUnsetup(ctx, options)\n}\n\nfunc (conn *Conn) UserGet(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\tbe := conn.chooseBackend(options.UUID)\n\tresp, err := be.UserGet(ctx, options)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tif options.UUID != resp.UUID {\n\t\treturn arvados.User{}, httpErrorf(http.StatusBadGateway, \"Had requested %v but response was for %v\", options.UUID, resp.UUID)\n\t}\n\tif options.UUID[:5] != conn.cluster.ClusterID {\n\t\tincludeAdminAndInvited, err := conn.includeAdminAndInvitedInBatchUpdate(ctx, be, options.UUID)\n\t\tif err != nil {\n\t\t\treturn arvados.User{}, err\n\t\t}\n\t\terr = conn.batchUpdateUsers(ctx, arvados.ListOptions{Select: options.Select}, []arvados.User{resp}, includeAdminAndInvited)\n\t\tif err != nil {\n\t\t\treturn arvados.User{}, err\n\t\t}\n\t}\n\treturn resp, nil\n}\n\nfunc (conn *Conn) UserGetCurrent(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\treturn conn.local.UserGetCurrent(ctx, options)\n}\n\nfunc (conn *Conn) UserGetSystem(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\treturn conn.chooseBackend(options.UUID).UserGetSystem(ctx, options)\n}\n\nfunc (conn *Conn) UserDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.User, error) {\n\treturn conn.chooseBackend(options.UUID).UserDelete(ctx, options)\n}\n\nfunc (conn *Conn) UserBatchUpdate(ctx context.Context, options arvados.UserBatchUpdateOptions) (arvados.UserList, error) {\n\treturn conn.local.UserBatchUpdate(ctx, options)\n}\n\nfunc (conn *Conn) UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {\n\treturn conn.local.UserAuthenticate(ctx, options)\n}\n\nfunc (conn *Conn) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {\n\treturn conn.chooseBackend(options.UUID).APIClientAuthorizationCurrent(ctx, options)\n}\n\nfunc (conn *Conn) APIClientAuthorizationCreate(ctx context.Context, options arvados.CreateOptions) (arvados.APIClientAuthorization, error) {\n\tif conn.cluster.Login.LoginCluster != \"\" {\n\t\treturn conn.chooseBackend(conn.cluster.Login.LoginCluster).APIClientAuthorizationCreate(ctx, options)\n\t}\n\townerUUID, ok := options.Attrs[\"owner_uuid\"].(string)\n\tif ok && ownerUUID != \"\" {\n\t\treturn conn.chooseBackend(ownerUUID).APIClientAuthorizationCreate(ctx, options)\n\t}\n\treturn conn.local.APIClientAuthorizationCreate(ctx, options)\n}\n\nfunc (conn *Conn) APIClientAuthorizationUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.APIClientAuthorization, error) {\n\tif options.BypassFederation {\n\t\treturn conn.local.APIClientAuthorizationUpdate(ctx, options)\n\t}\n\treturn conn.chooseBackend(options.UUID).APIClientAuthorizationUpdate(ctx, options)\n}\n\nfunc (conn *Conn) APIClientAuthorizationDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.APIClientAuthorization, error) {\n\treturn conn.chooseBackend(options.UUID).APIClientAuthorizationDelete(ctx, options)\n}\n\nfunc (conn *Conn) APIClientAuthorizationList(ctx context.Context, options arvados.ListOptions) (arvados.APIClientAuthorizationList, error) {\n\tif id := conn.cluster.Login.LoginCluster; id != \"\" && id != conn.cluster.ClusterID && !options.BypassFederation {\n\t\treturn conn.chooseBackend(conn.cluster.Login.LoginCluster).APIClientAuthorizationList(ctx, options)\n\t}\n\treturn conn.generated_APIClientAuthorizationList(ctx, options)\n}\n\nfunc (conn *Conn) APIClientAuthorizationGet(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {\n\treturn conn.chooseBackend(options.UUID).APIClientAuthorizationGet(ctx, options)\n}\n\ntype backend interface {\n\tarvados.API\n\tBaseURL() url.URL\n}\n\ntype notFoundError struct{}\n\nfunc (notFoundError) HTTPStatus() int { return http.StatusNotFound }\nfunc (notFoundError) Error() string   { return \"not found\" }\n\nfunc errStatus(err error) int {\n\tif httpErr, ok := err.(interface{ HTTPStatus() int }); ok {\n\t\treturn httpErr.HTTPStatus()\n\t}\n\treturn http.StatusInternalServerError\n}\n"
  },
  {
    "path": "lib/controller/federation/federation_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage federation\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\t\"os\"\n\t\"testing\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/controller/router\"\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/jmoiron/sqlx\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\n// FederationSuite does some generic setup/teardown. Don't add Test*\n// methods to FederationSuite itself.\ntype FederationSuite struct {\n\tintegrationTestCluster *arvados.Cluster\n\tcluster                *arvados.Cluster\n\tctx                    context.Context\n\ttx                     *sqlx.Tx\n\tfed                    *Conn\n}\n\nfunc (s *FederationSuite) SetUpSuite(c *check.C) {\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, check.IsNil)\n\ts.integrationTestCluster, err = cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (s *FederationSuite) SetUpTest(c *check.C) {\n\ts.cluster = &arvados.Cluster{\n\t\tClusterID:       \"aaaaa\",\n\t\tSystemRootToken: arvadostest.SystemRootToken,\n\t\tRemoteClusters: map[string]arvados.RemoteCluster{\n\t\t\t\"aaaaa\": {\n\t\t\t\tHost: os.Getenv(\"ARVADOS_API_HOST\"),\n\t\t\t},\n\t\t},\n\t\tPostgreSQL: s.integrationTestCluster.PostgreSQL,\n\t}\n\tarvadostest.SetServiceURL(&s.cluster.Services.RailsAPI, \"https://\"+os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\ts.cluster.TLS.Insecure = true\n\ts.cluster.API.MaxItemsPerResponse = 3\n\n\ttx, err := arvadostest.DB(c, s.cluster).Beginx()\n\tc.Assert(err, check.IsNil)\n\ts.tx = tx\n\n\tctx := context.Background()\n\tctx = ctxlog.Context(ctx, ctxlog.TestLogger(c))\n\tctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})\n\tctx = ctrlctx.NewWithTransaction(ctx, s.tx)\n\ts.ctx = ctx\n\n\ts.fed = New(ctx, s.cluster, nil, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)\n}\n\nfunc (s *FederationSuite) TearDownTest(c *check.C) {\n\ts.tx.Rollback()\n}\n\nfunc (s *FederationSuite) addDirectRemote(c *check.C, id string, backend backend) {\n\ts.cluster.RemoteClusters[id] = arvados.RemoteCluster{\n\t\tHost: \"in-process.local\",\n\t}\n\ts.fed.remotes[id] = backend\n}\n\nfunc (s *FederationSuite) addHTTPRemote(c *check.C, id string, backend backend) {\n\tsrv := httpserver.Server{Addr: \":\"}\n\tsrv.Handler = router.New(backend, router.Config{})\n\tc.Check(srv.Start(), check.IsNil)\n\ts.cluster.RemoteClusters[id] = arvados.RemoteCluster{\n\t\tScheme: \"http\",\n\t\tHost:   srv.Addr,\n\t\tProxy:  true,\n\t}\n\ts.fed.remotes[id] = rpc.NewConn(id, &url.URL{Scheme: \"http\", Host: srv.Addr}, true, saltedTokenProvider(s.cluster, s.fed.local, id))\n}\n"
  },
  {
    "path": "lib/controller/federation/generate.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n//go:build ignore\n// +build ignore\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/exec\"\n\t\"regexp\"\n)\n\nfunc main() {\n\tcheckOnly := false\n\tif len(os.Args) == 2 && os.Args[1] == \"-check\" {\n\t\tcheckOnly = true\n\t} else if len(os.Args) != 1 {\n\t\tpanic(\"usage: go run generate.go [-check]\")\n\t}\n\n\tin, err := os.Open(\"list.go\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbuf, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\torig := regexp.MustCompile(`(?ms)\\nfunc [^\\n]*generated_CollectionList\\(.*?\\n}\\n`).Find(buf)\n\tif len(orig) == 0 {\n\t\tpanic(\"can't find CollectionList func\")\n\t}\n\n\toutfile, err := os.OpenFile(\"generated.go~\", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgofmt := exec.Command(\"goimports\")\n\tgofmt.Stdout = outfile\n\tgofmt.Stderr = os.Stderr\n\tout, err := gofmt.StdinPipe()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgo func() {\n\t\tdefer out.Close()\n\t\tout.Write(regexp.MustCompile(`(?ms)^.*package .*?import.*?\\n\\)\\n`).Find(buf))\n\t\tio.WriteString(out, \"//\\n// -- this file is auto-generated -- do not edit -- edit list.go and run \\\"go generate\\\" instead --\\n//\\n\\n\")\n\t\tfor _, t := range []string{\"AuthorizedKey\", \"Container\", \"ContainerRequest\", \"Group\", \"User\", \"Link\", \"Log\", \"APIClientAuthorization\"} {\n\t\t\t_, err := out.Write(bytes.ReplaceAll(orig, []byte(\"Collection\"), []byte(t)))\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t}()\n\terr = gofmt.Run()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = outfile.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif checkOnly {\n\t\tdiff := exec.Command(\"diff\", \"-u\", \"/dev/fd/3\", \"/dev/fd/4\")\n\t\tfor _, fnm := range []string{\"generated.go\", \"generated.go~\"} {\n\t\t\tf, err := os.Open(fnm)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer f.Close()\n\t\t\tdiff.ExtraFiles = append(diff.ExtraFiles, f)\n\t\t}\n\t\tdiff.Stdout = os.Stdout\n\t\tdiff.Stderr = os.Stderr\n\t\terr = diff.Run()\n\t\tif err != nil {\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\terr = os.Rename(\"generated.go~\", \"generated.go\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/controller/federation/generated.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage federation\n\nimport (\n\t\"context\"\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n//\n// -- this file is auto-generated -- do not edit -- edit list.go and run \"go generate\" instead --\n//\n\nfunc (conn *Conn) generated_AuthorizedKeyList(ctx context.Context, options arvados.ListOptions) (arvados.AuthorizedKeyList, error) {\n\tvar mtx sync.Mutex\n\tvar merged arvados.AuthorizedKeyList\n\tvar needSort atomic.Value\n\tneedSort.Store(false)\n\terr := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {\n\t\toptions.ForwardedFor = conn.cluster.ClusterID + \"-\" + options.ForwardedFor\n\t\tcl, err := backend.AuthorizedKeyList(ctx, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmtx.Lock()\n\t\tdefer mtx.Unlock()\n\t\tif len(merged.Items) == 0 {\n\t\t\tmerged = cl\n\t\t} else if len(cl.Items) > 0 {\n\t\t\tmerged.Items = append(merged.Items, cl.Items...)\n\t\t\tneedSort.Store(true)\n\t\t}\n\t\tuuids := make([]string, 0, len(cl.Items))\n\t\tfor _, item := range cl.Items {\n\t\t\tuuids = append(uuids, item.UUID)\n\t\t}\n\t\treturn uuids, nil\n\t})\n\tif needSort.Load().(bool) {\n\t\t// Apply the default/implied order, \"modified_at desc\"\n\t\tsort.Slice(merged.Items, func(i, j int) bool {\n\t\t\tmi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt\n\t\t\treturn mj.Before(mi)\n\t\t})\n\t}\n\tif merged.Items == nil {\n\t\t// Return empty results as [], not null\n\t\t// (https://github.com/golang/go/issues/27589 might be\n\t\t// a better solution in the future)\n\t\tmerged.Items = []arvados.AuthorizedKey{}\n\t}\n\treturn merged, err\n}\n\nfunc (conn *Conn) generated_ContainerList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerList, error) {\n\tvar mtx sync.Mutex\n\tvar merged arvados.ContainerList\n\tvar needSort atomic.Value\n\tneedSort.Store(false)\n\terr := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {\n\t\toptions.ForwardedFor = conn.cluster.ClusterID + \"-\" + options.ForwardedFor\n\t\tcl, err := backend.ContainerList(ctx, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmtx.Lock()\n\t\tdefer mtx.Unlock()\n\t\tif len(merged.Items) == 0 {\n\t\t\tmerged = cl\n\t\t} else if len(cl.Items) > 0 {\n\t\t\tmerged.Items = append(merged.Items, cl.Items...)\n\t\t\tneedSort.Store(true)\n\t\t}\n\t\tuuids := make([]string, 0, len(cl.Items))\n\t\tfor _, item := range cl.Items {\n\t\t\tuuids = append(uuids, item.UUID)\n\t\t}\n\t\treturn uuids, nil\n\t})\n\tif needSort.Load().(bool) {\n\t\t// Apply the default/implied order, \"modified_at desc\"\n\t\tsort.Slice(merged.Items, func(i, j int) bool {\n\t\t\tmi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt\n\t\t\treturn mj.Before(mi)\n\t\t})\n\t}\n\tif merged.Items == nil {\n\t\t// Return empty results as [], not null\n\t\t// (https://github.com/golang/go/issues/27589 might be\n\t\t// a better solution in the future)\n\t\tmerged.Items = []arvados.Container{}\n\t}\n\treturn merged, err\n}\n\nfunc (conn *Conn) generated_ContainerRequestList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerRequestList, error) {\n\tvar mtx sync.Mutex\n\tvar merged arvados.ContainerRequestList\n\tvar needSort atomic.Value\n\tneedSort.Store(false)\n\terr := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {\n\t\toptions.ForwardedFor = conn.cluster.ClusterID + \"-\" + options.ForwardedFor\n\t\tcl, err := backend.ContainerRequestList(ctx, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmtx.Lock()\n\t\tdefer mtx.Unlock()\n\t\tif len(merged.Items) == 0 {\n\t\t\tmerged = cl\n\t\t} else if len(cl.Items) > 0 {\n\t\t\tmerged.Items = append(merged.Items, cl.Items...)\n\t\t\tneedSort.Store(true)\n\t\t}\n\t\tuuids := make([]string, 0, len(cl.Items))\n\t\tfor _, item := range cl.Items {\n\t\t\tuuids = append(uuids, item.UUID)\n\t\t}\n\t\treturn uuids, nil\n\t})\n\tif needSort.Load().(bool) {\n\t\t// Apply the default/implied order, \"modified_at desc\"\n\t\tsort.Slice(merged.Items, func(i, j int) bool {\n\t\t\tmi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt\n\t\t\treturn mj.Before(mi)\n\t\t})\n\t}\n\tif merged.Items == nil {\n\t\t// Return empty results as [], not null\n\t\t// (https://github.com/golang/go/issues/27589 might be\n\t\t// a better solution in the future)\n\t\tmerged.Items = []arvados.ContainerRequest{}\n\t}\n\treturn merged, err\n}\n\nfunc (conn *Conn) generated_GroupList(ctx context.Context, options arvados.ListOptions) (arvados.GroupList, error) {\n\tvar mtx sync.Mutex\n\tvar merged arvados.GroupList\n\tvar needSort atomic.Value\n\tneedSort.Store(false)\n\terr := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {\n\t\toptions.ForwardedFor = conn.cluster.ClusterID + \"-\" + options.ForwardedFor\n\t\tcl, err := backend.GroupList(ctx, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmtx.Lock()\n\t\tdefer mtx.Unlock()\n\t\tif len(merged.Items) == 0 {\n\t\t\tmerged = cl\n\t\t} else if len(cl.Items) > 0 {\n\t\t\tmerged.Items = append(merged.Items, cl.Items...)\n\t\t\tneedSort.Store(true)\n\t\t}\n\t\tuuids := make([]string, 0, len(cl.Items))\n\t\tfor _, item := range cl.Items {\n\t\t\tuuids = append(uuids, item.UUID)\n\t\t}\n\t\treturn uuids, nil\n\t})\n\tif needSort.Load().(bool) {\n\t\t// Apply the default/implied order, \"modified_at desc\"\n\t\tsort.Slice(merged.Items, func(i, j int) bool {\n\t\t\tmi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt\n\t\t\treturn mj.Before(mi)\n\t\t})\n\t}\n\tif merged.Items == nil {\n\t\t// Return empty results as [], not null\n\t\t// (https://github.com/golang/go/issues/27589 might be\n\t\t// a better solution in the future)\n\t\tmerged.Items = []arvados.Group{}\n\t}\n\treturn merged, err\n}\n\nfunc (conn *Conn) generated_UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {\n\tvar mtx sync.Mutex\n\tvar merged arvados.UserList\n\tvar needSort atomic.Value\n\tneedSort.Store(false)\n\terr := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {\n\t\toptions.ForwardedFor = conn.cluster.ClusterID + \"-\" + options.ForwardedFor\n\t\tcl, err := backend.UserList(ctx, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmtx.Lock()\n\t\tdefer mtx.Unlock()\n\t\tif len(merged.Items) == 0 {\n\t\t\tmerged = cl\n\t\t} else if len(cl.Items) > 0 {\n\t\t\tmerged.Items = append(merged.Items, cl.Items...)\n\t\t\tneedSort.Store(true)\n\t\t}\n\t\tuuids := make([]string, 0, len(cl.Items))\n\t\tfor _, item := range cl.Items {\n\t\t\tuuids = append(uuids, item.UUID)\n\t\t}\n\t\treturn uuids, nil\n\t})\n\tif needSort.Load().(bool) {\n\t\t// Apply the default/implied order, \"modified_at desc\"\n\t\tsort.Slice(merged.Items, func(i, j int) bool {\n\t\t\tmi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt\n\t\t\treturn mj.Before(mi)\n\t\t})\n\t}\n\tif merged.Items == nil {\n\t\t// Return empty results as [], not null\n\t\t// (https://github.com/golang/go/issues/27589 might be\n\t\t// a better solution in the future)\n\t\tmerged.Items = []arvados.User{}\n\t}\n\treturn merged, err\n}\n\nfunc (conn *Conn) generated_LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {\n\tvar mtx sync.Mutex\n\tvar merged arvados.LinkList\n\tvar needSort atomic.Value\n\tneedSort.Store(false)\n\terr := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {\n\t\toptions.ForwardedFor = conn.cluster.ClusterID + \"-\" + options.ForwardedFor\n\t\tcl, err := backend.LinkList(ctx, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmtx.Lock()\n\t\tdefer mtx.Unlock()\n\t\tif len(merged.Items) == 0 {\n\t\t\tmerged = cl\n\t\t} else if len(cl.Items) > 0 {\n\t\t\tmerged.Items = append(merged.Items, cl.Items...)\n\t\t\tneedSort.Store(true)\n\t\t}\n\t\tuuids := make([]string, 0, len(cl.Items))\n\t\tfor _, item := range cl.Items {\n\t\t\tuuids = append(uuids, item.UUID)\n\t\t}\n\t\treturn uuids, nil\n\t})\n\tif needSort.Load().(bool) {\n\t\t// Apply the default/implied order, \"modified_at desc\"\n\t\tsort.Slice(merged.Items, func(i, j int) bool {\n\t\t\tmi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt\n\t\t\treturn mj.Before(mi)\n\t\t})\n\t}\n\tif merged.Items == nil {\n\t\t// Return empty results as [], not null\n\t\t// (https://github.com/golang/go/issues/27589 might be\n\t\t// a better solution in the future)\n\t\tmerged.Items = []arvados.Link{}\n\t}\n\treturn merged, err\n}\n\nfunc (conn *Conn) generated_LogList(ctx context.Context, options arvados.ListOptions) (arvados.LogList, error) {\n\tvar mtx sync.Mutex\n\tvar merged arvados.LogList\n\tvar needSort atomic.Value\n\tneedSort.Store(false)\n\terr := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {\n\t\toptions.ForwardedFor = conn.cluster.ClusterID + \"-\" + options.ForwardedFor\n\t\tcl, err := backend.LogList(ctx, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmtx.Lock()\n\t\tdefer mtx.Unlock()\n\t\tif len(merged.Items) == 0 {\n\t\t\tmerged = cl\n\t\t} else if len(cl.Items) > 0 {\n\t\t\tmerged.Items = append(merged.Items, cl.Items...)\n\t\t\tneedSort.Store(true)\n\t\t}\n\t\tuuids := make([]string, 0, len(cl.Items))\n\t\tfor _, item := range cl.Items {\n\t\t\tuuids = append(uuids, item.UUID)\n\t\t}\n\t\treturn uuids, nil\n\t})\n\tif needSort.Load().(bool) {\n\t\t// Apply the default/implied order, \"modified_at desc\"\n\t\tsort.Slice(merged.Items, func(i, j int) bool {\n\t\t\tmi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt\n\t\t\treturn mj.Before(mi)\n\t\t})\n\t}\n\tif merged.Items == nil {\n\t\t// Return empty results as [], not null\n\t\t// (https://github.com/golang/go/issues/27589 might be\n\t\t// a better solution in the future)\n\t\tmerged.Items = []arvados.Log{}\n\t}\n\treturn merged, err\n}\n\nfunc (conn *Conn) generated_APIClientAuthorizationList(ctx context.Context, options arvados.ListOptions) (arvados.APIClientAuthorizationList, error) {\n\tvar mtx sync.Mutex\n\tvar merged arvados.APIClientAuthorizationList\n\tvar needSort atomic.Value\n\tneedSort.Store(false)\n\terr := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {\n\t\toptions.ForwardedFor = conn.cluster.ClusterID + \"-\" + options.ForwardedFor\n\t\tcl, err := backend.APIClientAuthorizationList(ctx, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmtx.Lock()\n\t\tdefer mtx.Unlock()\n\t\tif len(merged.Items) == 0 {\n\t\t\tmerged = cl\n\t\t} else if len(cl.Items) > 0 {\n\t\t\tmerged.Items = append(merged.Items, cl.Items...)\n\t\t\tneedSort.Store(true)\n\t\t}\n\t\tuuids := make([]string, 0, len(cl.Items))\n\t\tfor _, item := range cl.Items {\n\t\t\tuuids = append(uuids, item.UUID)\n\t\t}\n\t\treturn uuids, nil\n\t})\n\tif needSort.Load().(bool) {\n\t\t// Apply the default/implied order, \"modified_at desc\"\n\t\tsort.Slice(merged.Items, func(i, j int) bool {\n\t\t\tmi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt\n\t\t\treturn mj.Before(mi)\n\t\t})\n\t}\n\tif merged.Items == nil {\n\t\t// Return empty results as [], not null\n\t\t// (https://github.com/golang/go/issues/27589 might be\n\t\t// a better solution in the future)\n\t\tmerged.Items = []arvados.APIClientAuthorization{}\n\t}\n\treturn merged, err\n}\n"
  },
  {
    "path": "lib/controller/federation/generated_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage federation\n\nimport (\n\t\"os/exec\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&UptodateSuite{})\n\ntype UptodateSuite struct{}\n\nfunc (*UptodateSuite) TestUpToDate(c *check.C) {\n\toutput, err := exec.Command(\"go\", \"run\", \"generate.go\", \"-check\").CombinedOutput()\n\tif err != nil {\n\t\tc.Log(string(output))\n\t\tc.Error(\"generated.go is out of date -- run 'go generate' to update it\")\n\t}\n}\n"
  },
  {
    "path": "lib/controller/federation/group_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage federation\n\nimport (\n\t\"context\"\n\t\"errors\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&GroupSuite{})\n\ntype GroupSuite struct {\n\tFederationSuite\n}\n\nfunc makeConn() (*Conn, *arvadostest.APIStub, *arvadostest.APIStub) {\n\tlocalAPIstub := &arvadostest.APIStub{Error: errors.New(\"No result\")}\n\tremoteAPIstub := &arvadostest.APIStub{Error: errors.New(\"No result\")}\n\treturn &Conn{context.Background(), &arvados.Cluster{ClusterID: \"local\"}, localAPIstub, map[string]backend{\"zzzzz\": remoteAPIstub}}, localAPIstub, remoteAPIstub\n}\n\nfunc (s *UserSuite) TestGroupContents(c *check.C) {\n\tconn, localAPIstub, remoteAPIstub := makeConn()\n\tconn.GroupContents(s.ctx, arvados.GroupContentsOptions{UUID: \"local-tpzed-xurymjxw79nv3jz\"})\n\tc.Check(len(localAPIstub.Calls(nil)), check.Equals, 1)\n\tc.Check(len(remoteAPIstub.Calls(nil)), check.Equals, 0)\n\n\tconn, localAPIstub, remoteAPIstub = makeConn()\n\tconn.GroupContents(s.ctx, arvados.GroupContentsOptions{UUID: \"zzzzz-tpzed-xurymjxw79nv3jz\"})\n\tc.Check(len(localAPIstub.Calls(nil)), check.Equals, 1)\n\tc.Check(len(remoteAPIstub.Calls(nil)), check.Equals, 0)\n\n\tconn, localAPIstub, remoteAPIstub = makeConn()\n\tconn.GroupContents(s.ctx, arvados.GroupContentsOptions{UUID: \"local-j7d0g-xurymjxw79nv3jz\"})\n\tc.Check(len(localAPIstub.Calls(nil)), check.Equals, 1)\n\tc.Check(len(remoteAPIstub.Calls(nil)), check.Equals, 0)\n\n\tconn, localAPIstub, remoteAPIstub = makeConn()\n\tconn.GroupContents(s.ctx, arvados.GroupContentsOptions{UUID: \"zzzzz-j7d0g-xurymjxw79nv3jz\"})\n\tc.Check(len(localAPIstub.Calls(nil)), check.Equals, 0)\n\tc.Check(len(remoteAPIstub.Calls(nil)), check.Equals, 1)\n\n\tconn, localAPIstub, remoteAPIstub = makeConn()\n\tconn.GroupContents(s.ctx, arvados.GroupContentsOptions{UUID: \"zzzzz-tpzed-xurymjxw79nv3jz\", ClusterID: \"zzzzz\"})\n\tc.Check(len(localAPIstub.Calls(nil)), check.Equals, 0)\n\tc.Check(len(remoteAPIstub.Calls(nil)), check.Equals, 1)\n}\n"
  },
  {
    "path": "lib/controller/federation/list.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage federation\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n)\n\n//go:generate go run generate.go\n\n// CollectionList is used as a template to auto-generate List()\n// methods for other types; see generate.go.\n\nfunc (conn *Conn) generated_CollectionList(ctx context.Context, options arvados.ListOptions) (arvados.CollectionList, error) {\n\tvar mtx sync.Mutex\n\tvar merged arvados.CollectionList\n\tvar needSort atomic.Value\n\tneedSort.Store(false)\n\terr := conn.splitListRequest(ctx, options, func(ctx context.Context, _ string, backend arvados.API, options arvados.ListOptions) ([]string, error) {\n\t\toptions.ForwardedFor = conn.cluster.ClusterID + \"-\" + options.ForwardedFor\n\t\tcl, err := backend.CollectionList(ctx, options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmtx.Lock()\n\t\tdefer mtx.Unlock()\n\t\tif len(merged.Items) == 0 {\n\t\t\tmerged = cl\n\t\t} else if len(cl.Items) > 0 {\n\t\t\tmerged.Items = append(merged.Items, cl.Items...)\n\t\t\tneedSort.Store(true)\n\t\t}\n\t\tuuids := make([]string, 0, len(cl.Items))\n\t\tfor _, item := range cl.Items {\n\t\t\tuuids = append(uuids, item.UUID)\n\t\t}\n\t\treturn uuids, nil\n\t})\n\tif needSort.Load().(bool) {\n\t\t// Apply the default/implied order, \"modified_at desc\"\n\t\tsort.Slice(merged.Items, func(i, j int) bool {\n\t\t\tmi, mj := merged.Items[i].ModifiedAt, merged.Items[j].ModifiedAt\n\t\t\treturn mj.Before(mi)\n\t\t})\n\t}\n\tif merged.Items == nil {\n\t\t// Return empty results as [], not null\n\t\t// (https://github.com/golang/go/issues/27589 might be\n\t\t// a better solution in the future)\n\t\tmerged.Items = []arvados.Collection{}\n\t}\n\treturn merged, err\n}\n\n// Call fn on one or more local/remote backends if opts indicates a\n// federation-wide list query, i.e.:\n//\n//   - There is at least one filter of the form\n//     [\"uuid\",\"in\",[a,b,c,...]] or [\"uuid\",\"=\",a]\n//\n//   - One or more of the supplied UUIDs (a,b,c,...) has a non-local\n//     prefix.\n//\n//   - There are no other filters\n//\n// (If opts doesn't indicate a federation-wide list query, fn is just\n// called once with the local backend.)\n//\n// fn is called more than once only if the query meets the following\n// restrictions:\n//\n//   - Count==\"none\"\n//\n//   - Limit<0\n//\n//   - len(Order)==0\n//\n//   - Each filter is either \"uuid = ...\" or \"uuid in [...]\".\n//\n//   - The maximum possible response size (total number of objects\n//     that could potentially be matched by all of the specified\n//     filters) exceeds the local cluster's response page size limit.\n//\n// If the query involves multiple backends but doesn't meet these\n// restrictions, an error is returned without calling fn.\n//\n// Thus, the caller can assume that either:\n//\n//   - splitListRequest() returns an error, or\n//\n//   - fn is called exactly once, or\n//\n//   - fn is called more than once, with options that satisfy the above\n//     restrictions.\n//\n// Each call to fn indicates a single (local or remote) backend and a\n// corresponding options argument suitable for sending to that\n// backend.\nfunc (conn *Conn) splitListRequest(ctx context.Context, opts arvados.ListOptions, fn func(context.Context, string, arvados.API, arvados.ListOptions) ([]string, error)) error {\n\n\tif opts.BypassFederation || opts.ForwardedFor != \"\" {\n\t\t// Client requested no federation.  Pass through.\n\t\t_, err := fn(ctx, conn.cluster.ClusterID, conn.local, opts)\n\t\treturn err\n\t}\n\tif opts.ClusterID != \"\" {\n\t\t// Client explicitly selected cluster\n\t\t_, err := fn(ctx, conn.cluster.ClusterID, conn.chooseBackend(opts.ClusterID), opts)\n\t\treturn err\n\t}\n\n\tcannotSplit := false\n\tvar matchAllFilters map[string]bool\n\tfor _, f := range opts.Filters {\n\t\tmatchThisFilter := map[string]bool{}\n\t\tif f.Attr != \"uuid\" {\n\t\t\tcannotSplit = true\n\t\t\tcontinue\n\t\t}\n\t\tif f.Operator == \"=\" {\n\t\t\tif uuid, ok := f.Operand.(string); ok {\n\t\t\t\tmatchThisFilter[uuid] = true\n\t\t\t} else {\n\t\t\t\treturn httpErrorf(http.StatusBadRequest, \"invalid operand type %T for filter %q\", f.Operand, f)\n\t\t\t}\n\t\t} else if f.Operator == \"in\" {\n\t\t\tif operand, ok := f.Operand.([]interface{}); ok {\n\t\t\t\t// skip any elements that aren't\n\t\t\t\t// strings (thus can't match a UUID,\n\t\t\t\t// thus can't affect the response).\n\t\t\t\tfor _, v := range operand {\n\t\t\t\t\tif uuid, ok := v.(string); ok {\n\t\t\t\t\t\tmatchThisFilter[uuid] = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if strings, ok := f.Operand.([]string); ok {\n\t\t\t\tfor _, uuid := range strings {\n\t\t\t\t\tmatchThisFilter[uuid] = true\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn httpErrorf(http.StatusBadRequest, \"invalid operand type %T in filter %q\", f.Operand, f)\n\t\t\t}\n\t\t} else {\n\t\t\tcannotSplit = true\n\t\t\tcontinue\n\t\t}\n\n\t\tif matchAllFilters == nil {\n\t\t\tmatchAllFilters = matchThisFilter\n\t\t} else {\n\t\t\t// Reduce matchAllFilters to the intersection\n\t\t\t// of matchAllFilters ∩ matchThisFilter.\n\t\t\tfor uuid := range matchAllFilters {\n\t\t\t\tif !matchThisFilter[uuid] {\n\t\t\t\t\tdelete(matchAllFilters, uuid)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif matchAllFilters == nil {\n\t\t// Not filtering by UUID at all; just query the local\n\t\t// cluster.\n\t\t_, err := fn(ctx, conn.cluster.ClusterID, conn.local, opts)\n\t\treturn err\n\t}\n\n\t// Collate UUIDs in matchAllFilters by remote cluster ID --\n\t// e.g., todoByRemote[\"aaaaa\"][\"aaaaa-4zz18-000000000000000\"]\n\t// will be true -- and count the total number of UUIDs we're\n\t// filtering on, so we can compare it to our max page size\n\t// limit.\n\tnUUIDs := 0\n\ttodoByRemote := map[string]map[string]bool{}\n\tfor uuid := range matchAllFilters {\n\t\tif len(uuid) != 27 {\n\t\t\t// Cannot match anything, just drop it\n\t\t} else {\n\t\t\tif todoByRemote[uuid[:5]] == nil {\n\t\t\t\ttodoByRemote[uuid[:5]] = map[string]bool{}\n\t\t\t}\n\t\t\ttodoByRemote[uuid[:5]][uuid] = true\n\t\t\tnUUIDs++\n\t\t}\n\t}\n\n\tif len(todoByRemote) == 0 {\n\t\treturn nil\n\t}\n\tif len(todoByRemote) == 1 && todoByRemote[conn.cluster.ClusterID] != nil {\n\t\t// All UUIDs are local, so proxy a single request. The\n\t\t// generic case has some limitations (see below) which\n\t\t// we don't want to impose on local requests.\n\t\t_, err := fn(ctx, conn.cluster.ClusterID, conn.local, opts)\n\t\treturn err\n\t}\n\tif cannotSplit {\n\t\treturn httpErrorf(http.StatusBadRequest, \"cannot execute federated list query: each filter must be either 'uuid = ...' or 'uuid in [...]'\")\n\t}\n\tif opts.Count != \"none\" {\n\t\treturn httpErrorf(http.StatusBadRequest, \"cannot execute federated list query unless count==\\\"none\\\"\")\n\t}\n\tif (opts.Limit >= 0 && opts.Limit < int64(nUUIDs)) || opts.Offset != 0 || len(opts.Order) > 0 {\n\t\treturn httpErrorf(http.StatusBadRequest, \"cannot execute federated list query with limit (%d) < nUUIDs (%d), offset (%d) > 0, or order (%v) parameter\", opts.Limit, nUUIDs, opts.Offset, opts.Order)\n\t}\n\tif max := conn.cluster.API.MaxItemsPerResponse; nUUIDs > max {\n\t\treturn httpErrorf(http.StatusBadRequest, \"cannot execute federated list query because number of UUIDs (%d) exceeds page size limit %d\", nUUIDs, max)\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\terrs := make(chan error, len(todoByRemote))\n\tfor clusterID, todo := range todoByRemote {\n\t\tgo func(clusterID string, todo map[string]bool) {\n\t\t\t// This goroutine sends exactly one value to\n\t\t\t// errs.\n\t\t\tbatch := make([]string, 0, len(todo))\n\t\t\tfor uuid := range todo {\n\t\t\t\tbatch = append(batch, uuid)\n\t\t\t}\n\n\t\t\tvar backend arvados.API\n\t\t\tif clusterID == conn.cluster.ClusterID {\n\t\t\t\tbackend = conn.local\n\t\t\t} else if backend = conn.remotes[clusterID]; backend == nil {\n\t\t\t\terrs <- httpErrorf(http.StatusNotFound, \"cannot execute federated list query: no proxy available for cluster %q\", clusterID)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tremoteOpts := opts\n\t\t\tif remoteOpts.Select != nil {\n\t\t\t\t// We always need to select UUIDs to\n\t\t\t\t// use the response, even if our\n\t\t\t\t// caller doesn't.\n\t\t\t\tremoteOpts.Select = append([]string{\"uuid\"}, remoteOpts.Select...)\n\t\t\t}\n\t\t\tfor len(todo) > 0 {\n\t\t\t\tif len(batch) > len(todo) {\n\t\t\t\t\t// Reduce batch to just the todo's\n\t\t\t\t\tbatch = batch[:0]\n\t\t\t\t\tfor uuid := range todo {\n\t\t\t\t\t\tbatch = append(batch, uuid)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tremoteOpts.Filters = []arvados.Filter{{\"uuid\", \"in\", batch}}\n\n\t\t\t\tdone, err := fn(ctx, clusterID, backend, remoteOpts)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrs <- httpErrorf(http.StatusBadGateway, \"%s\", err.Error())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tprogress := false\n\t\t\t\tfor _, uuid := range done {\n\t\t\t\t\tif _, ok := todo[uuid]; ok {\n\t\t\t\t\t\tprogress = true\n\t\t\t\t\t\tdelete(todo, uuid)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(done) == 0 {\n\t\t\t\t\t// Zero items == no more\n\t\t\t\t\t// results exist, no need to\n\t\t\t\t\t// get another page.\n\t\t\t\t\tbreak\n\t\t\t\t} else if !progress {\n\t\t\t\t\terrs <- httpErrorf(http.StatusBadGateway, \"cannot make progress in federated list query: cluster %q returned %d items but none had the requested UUIDs\", clusterID, len(done))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\terrs <- nil\n\t\t}(clusterID, todo)\n\t}\n\n\t// Wait for all goroutines to return, then return the first\n\t// non-nil error, if any.\n\tvar firstErr error\n\tfor range todoByRemote {\n\t\tif err := <-errs; err != nil && firstErr == nil {\n\t\t\tfirstErr = err\n\t\t\t// Signal to any remaining fn() calls that\n\t\t\t// further effort is futile.\n\t\t\tcancel()\n\t\t}\n\t}\n\treturn firstErr\n}\n\nfunc httpErrorf(code int, format string, args ...interface{}) error {\n\treturn httpserver.ErrorWithStatus(fmt.Errorf(format, args...), code)\n}\n"
  },
  {
    "path": "lib/controller/federation/list_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage federation\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"reflect\"\n\t\"sort\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&CollectionListSuite{})\n\ntype collectionLister struct {\n\tarvadostest.APIStub\n\tItemsToReturn []arvados.Collection\n\tMaxPageSize   int\n}\n\nfunc (cl *collectionLister) matchFilters(c arvados.Collection, filters []arvados.Filter) bool {\nnextfilter:\n\tfor _, f := range filters {\n\t\tif f.Attr == \"uuid\" && f.Operator == \"=\" {\n\t\t\ts, ok := f.Operand.(string)\n\t\t\tif ok && s == c.UUID {\n\t\t\t\tcontinue nextfilter\n\t\t\t}\n\t\t} else if f.Attr == \"uuid\" && f.Operator == \"in\" {\n\t\t\tif operand, ok := f.Operand.([]string); ok {\n\t\t\t\tfor _, s := range operand {\n\t\t\t\t\tif s == c.UUID {\n\t\t\t\t\t\tcontinue nextfilter\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if operand, ok := f.Operand.([]interface{}); ok {\n\t\t\t\tfor _, s := range operand {\n\t\t\t\t\tif s, ok := s.(string); ok && s == c.UUID {\n\t\t\t\t\t\tcontinue nextfilter\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (cl *collectionLister) CollectionList(ctx context.Context, options arvados.ListOptions) (resp arvados.CollectionList, _ error) {\n\tcl.APIStub.CollectionList(ctx, options)\n\tfor _, c := range cl.ItemsToReturn {\n\t\tif cl.MaxPageSize > 0 && len(resp.Items) >= cl.MaxPageSize {\n\t\t\tbreak\n\t\t}\n\t\tif options.Limit >= 0 && int64(len(resp.Items)) >= options.Limit {\n\t\t\tbreak\n\t\t}\n\t\tif cl.matchFilters(c, options.Filters) {\n\t\t\tif reflect.DeepEqual(options.Select, []string{\"uuid\", \"name\"}) {\n\t\t\t\tc = arvados.Collection{UUID: c.UUID, Name: c.Name}\n\t\t\t} else if reflect.DeepEqual(options.Select, []string{\"name\"}) {\n\t\t\t\tc = arvados.Collection{Name: c.Name}\n\t\t\t} else if len(options.Select) > 0 {\n\t\t\t\tpanic(fmt.Sprintf(\"not implemented: options=%#v\", options))\n\t\t\t}\n\t\t\tresp.Items = append(resp.Items, c)\n\t\t}\n\t}\n\treturn\n}\n\ntype CollectionListSuite struct {\n\tFederationSuite\n\tids      []string   // aaaaa, bbbbb, ccccc\n\tuuids    [][]string // [[aa-*, aa-*, aa-*], [bb-*, bb-*, ...], ...]\n\tbackends []*collectionLister\n}\n\nfunc (s *CollectionListSuite) SetUpTest(c *check.C) {\n\ts.FederationSuite.SetUpTest(c)\n\n\ts.ids = nil\n\ts.uuids = nil\n\ts.backends = nil\n\tfor i, id := range []string{\"aaaaa\", \"bbbbb\", \"ccccc\"} {\n\t\tcl := &collectionLister{}\n\t\ts.ids = append(s.ids, id)\n\t\ts.uuids = append(s.uuids, nil)\n\t\tfor j := 0; j < 5; j++ {\n\t\t\tuuid := fmt.Sprintf(\"%s-4zz18-%s%010d\", id, id, j)\n\t\t\ts.uuids[i] = append(s.uuids[i], uuid)\n\t\t\tcl.ItemsToReturn = append(cl.ItemsToReturn, arvados.Collection{\n\t\t\t\tUUID: uuid,\n\t\t\t})\n\t\t}\n\t\ts.backends = append(s.backends, cl)\n\t\tif i == 0 {\n\t\t\ts.fed.local = cl\n\t\t} else if i%1 == 0 {\n\t\t\t// call some backends directly via API\n\t\t\ts.addDirectRemote(c, id, cl)\n\t\t} else {\n\t\t\t// call some backends through rpc->router->API\n\t\t\t// to ensure nothing is lost in translation\n\t\t\ts.addHTTPRemote(c, id, cl)\n\t\t}\n\t}\n}\n\ntype listTrial struct {\n\tcount        string\n\tlimit        int64\n\toffset       int64\n\torder        []string\n\tfilters      []arvados.Filter\n\tselectfields []string\n\texpectUUIDs  []string\n\texpectCalls  []int // number of API calls to backends\n\texpectStatus int\n}\n\nfunc (s *CollectionListSuite) TestCollectionListNoUUIDFilters(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:       \"none\",\n\t\tlimit:       1,\n\t\texpectUUIDs: []string{s.uuids[0][0]},\n\t\texpectCalls: []int{1, 0, 0},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListOneLocal(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:       \"none\",\n\t\tlimit:       -1,\n\t\tfilters:     []arvados.Filter{{\"uuid\", \"=\", s.uuids[0][0]}},\n\t\texpectUUIDs: []string{s.uuids[0][0]},\n\t\texpectCalls: []int{1, 0, 0},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListOneRemote(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:       \"none\",\n\t\tlimit:       -1,\n\t\tfilters:     []arvados.Filter{{\"uuid\", \"=\", s.uuids[1][0]}},\n\t\texpectUUIDs: []string{s.uuids[1][0]},\n\t\texpectCalls: []int{0, 1, 0},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListOneLocalDeselectingUUID(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:        \"none\",\n\t\tlimit:        -1,\n\t\tfilters:      []arvados.Filter{{\"uuid\", \"=\", s.uuids[0][0]}},\n\t\tselectfields: []string{\"name\"},\n\t\texpectUUIDs:  []string{\"\"}, // select=name is honored\n\t\texpectCalls:  []int{1, 0, 0},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListOneLocalUsingInOperator(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:       \"none\",\n\t\tlimit:       -1,\n\t\tfilters:     []arvados.Filter{{\"uuid\", \"in\", []string{s.uuids[0][0]}}},\n\t\texpectUUIDs: []string{s.uuids[0][0]},\n\t\texpectCalls: []int{1, 0, 0},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListOneRemoteUsingInOperator(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:       \"none\",\n\t\tlimit:       -1,\n\t\tfilters:     []arvados.Filter{{\"uuid\", \"in\", []string{s.uuids[1][1]}}},\n\t\texpectUUIDs: []string{s.uuids[1][1]},\n\t\texpectCalls: []int{0, 1, 0},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListOneRemoteDeselectingUUID(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:        \"none\",\n\t\tlimit:        -1,\n\t\tfilters:      []arvados.Filter{{\"uuid\", \"=\", s.uuids[1][0]}},\n\t\tselectfields: []string{\"name\"},\n\t\texpectUUIDs:  []string{s.uuids[1][0]}, // uuid is returned, despite not being selected\n\t\texpectCalls:  []int{0, 1, 0},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListOneLocalOneRemote(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:       \"none\",\n\t\tlimit:       -1,\n\t\tfilters:     []arvados.Filter{{\"uuid\", \"in\", []string{s.uuids[0][0], s.uuids[1][0]}}},\n\t\texpectUUIDs: []string{s.uuids[0][0], s.uuids[1][0]},\n\t\texpectCalls: []int{1, 1, 0},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListOneLocalOneRemoteDeselectingUUID(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:        \"none\",\n\t\tlimit:        -1,\n\t\tfilters:      []arvados.Filter{{\"uuid\", \"in\", []string{s.uuids[0][0], s.uuids[1][0]}}},\n\t\tselectfields: []string{\"name\"},\n\t\texpectUUIDs:  []string{s.uuids[0][0], s.uuids[1][0]}, // uuid is returned, despite not being selected\n\t\texpectCalls:  []int{1, 1, 0},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListTwoRemotes(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:       \"none\",\n\t\tlimit:       -1,\n\t\tfilters:     []arvados.Filter{{\"uuid\", \"in\", []string{s.uuids[2][0], s.uuids[1][0]}}},\n\t\texpectUUIDs: []string{s.uuids[1][0], s.uuids[2][0]},\n\t\texpectCalls: []int{0, 1, 1},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListSatisfyAllFilters(c *check.C) {\n\ts.cluster.API.MaxItemsPerResponse = 2\n\ts.test(c, listTrial{\n\t\tcount: \"none\",\n\t\tlimit: -1,\n\t\tfilters: []arvados.Filter{\n\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], s.uuids[1][1], s.uuids[2][0], s.uuids[2][1], s.uuids[2][2]}},\n\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], s.uuids[1][2], s.uuids[2][1]}},\n\t\t},\n\t\texpectUUIDs: []string{s.uuids[0][0], s.uuids[2][1]},\n\t\texpectCalls: []int{1, 0, 1},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListEmptySet(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:       \"none\",\n\t\tlimit:       -1,\n\t\tfilters:     []arvados.Filter{{\"uuid\", \"in\", []string{}}},\n\t\texpectUUIDs: []string{},\n\t\texpectCalls: []int{0, 0, 0},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListUnmatchableUUID(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount: \"none\",\n\t\tlimit: -1,\n\t\tfilters: []arvados.Filter{\n\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], \"abcdefg\"}},\n\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], \"bbbbb-4zz18-bogus\"}},\n\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], \"bogus-4zz18-bogus\"}},\n\t\t},\n\t\texpectUUIDs: []string{s.uuids[0][0]},\n\t\texpectCalls: []int{1, 0, 0},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListMultiPage(c *check.C) {\n\tfor i := range s.backends {\n\t\ts.uuids[i] = s.uuids[i][:3]\n\t\ts.backends[i].ItemsToReturn = s.backends[i].ItemsToReturn[:3]\n\t}\n\ts.cluster.API.MaxItemsPerResponse = 9\n\tfor _, stub := range s.backends {\n\t\tstub.MaxPageSize = 2\n\t}\n\tallUUIDs := append(append(append([]string(nil), s.uuids[0]...), s.uuids[1]...), s.uuids[2]...)\n\ts.test(c, listTrial{\n\t\tcount:       \"none\",\n\t\tlimit:       -1,\n\t\tfilters:     []arvados.Filter{{\"uuid\", \"in\", append([]string(nil), allUUIDs...)}},\n\t\texpectUUIDs: allUUIDs,\n\t\texpectCalls: []int{2, 2, 2},\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListMultiSiteExtraFilters(c *check.C) {\n\t// not [yet] supported\n\ts.test(c, listTrial{\n\t\tcount: \"none\",\n\t\tlimit: -1,\n\t\tfilters: []arvados.Filter{\n\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], s.uuids[1][0]}},\n\t\t\t{\"uuid\", \"is_a\", \"teapot\"},\n\t\t},\n\t\texpectCalls:  []int{0, 0, 0},\n\t\texpectStatus: http.StatusBadRequest,\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListMultiSiteWithCount(c *check.C) {\n\tfor _, count := range []string{\"\", \"exact\"} {\n\t\ts.SetUpTest(c) // Reset backends / call counters\n\t\ts.test(c, listTrial{\n\t\t\tcount: count,\n\t\t\tlimit: -1,\n\t\t\tfilters: []arvados.Filter{\n\t\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], s.uuids[1][0]}},\n\t\t\t\t{\"uuid\", \"is_a\", \"teapot\"},\n\t\t\t},\n\t\t\texpectCalls:  []int{0, 0, 0},\n\t\t\texpectStatus: http.StatusBadRequest,\n\t\t})\n\t}\n}\n\nfunc (s *CollectionListSuite) TestCollectionListMultiSiteWithLimit(c *check.C) {\n\tfor _, limit := range []int64{0, 1, 2} {\n\t\ts.SetUpTest(c) // Reset backends / call counters\n\t\ts.test(c, listTrial{\n\t\t\tcount: \"none\",\n\t\t\tlimit: limit,\n\t\t\tfilters: []arvados.Filter{\n\t\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], s.uuids[1][0], s.uuids[2][0]}},\n\t\t\t\t{\"uuid\", \"is_a\", \"teapot\"},\n\t\t\t},\n\t\t\texpectCalls:  []int{0, 0, 0},\n\t\t\texpectStatus: http.StatusBadRequest,\n\t\t})\n\t}\n}\n\nfunc (s *CollectionListSuite) TestCollectionListMultiSiteWithHighLimit(c *check.C) {\n\tuuids := []string{s.uuids[0][0], s.uuids[1][0], s.uuids[2][0]}\n\tfor _, limit := range []int64{3, 4, 1234567890} {\n\t\ts.SetUpTest(c) // Reset backends / call counters\n\t\ts.test(c, listTrial{\n\t\t\tcount: \"none\",\n\t\t\tlimit: limit,\n\t\t\tfilters: []arvados.Filter{\n\t\t\t\t{\"uuid\", \"in\", uuids},\n\t\t\t},\n\t\t\texpectUUIDs: uuids,\n\t\t\texpectCalls: []int{1, 1, 1},\n\t\t})\n\t}\n}\n\nfunc (s *CollectionListSuite) TestCollectionListMultiSiteWithOffset(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount:  \"none\",\n\t\tlimit:  -1,\n\t\toffset: 1,\n\t\tfilters: []arvados.Filter{\n\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], s.uuids[1][0]}},\n\t\t\t{\"uuid\", \"is_a\", \"teapot\"},\n\t\t},\n\t\texpectCalls:  []int{0, 0, 0},\n\t\texpectStatus: http.StatusBadRequest,\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListMultiSiteWithOrder(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount: \"none\",\n\t\tlimit: -1,\n\t\torder: []string{\"uuid desc\"},\n\t\tfilters: []arvados.Filter{\n\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], s.uuids[1][0]}},\n\t\t\t{\"uuid\", \"is_a\", \"teapot\"},\n\t\t},\n\t\texpectCalls:  []int{0, 0, 0},\n\t\texpectStatus: http.StatusBadRequest,\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListInvalidFilters(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount: \"none\",\n\t\tlimit: -1,\n\t\tfilters: []arvados.Filter{\n\t\t\t{\"uuid\", \"in\", \"teapot\"},\n\t\t},\n\t\texpectCalls:  []int{0, 0, 0},\n\t\texpectStatus: http.StatusBadRequest,\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListRemoteUnknown(c *check.C) {\n\ts.test(c, listTrial{\n\t\tcount: \"none\",\n\t\tlimit: -1,\n\t\tfilters: []arvados.Filter{\n\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], \"bogus-4zz18-000001111122222\"}},\n\t\t},\n\t\texpectStatus: http.StatusNotFound,\n\t})\n}\n\nfunc (s *CollectionListSuite) TestCollectionListRemoteError(c *check.C) {\n\ts.addDirectRemote(c, \"bbbbb\", &arvadostest.APIStub{Error: fmt.Errorf(\"stub backend error\")})\n\ts.test(c, listTrial{\n\t\tcount: \"none\",\n\t\tlimit: -1,\n\t\tfilters: []arvados.Filter{\n\t\t\t{\"uuid\", \"in\", []string{s.uuids[0][0], s.uuids[1][0]}},\n\t\t},\n\t\texpectStatus: http.StatusBadGateway,\n\t})\n}\n\nfunc (s *CollectionListSuite) test(c *check.C, trial listTrial) {\n\tresp, err := s.fed.CollectionList(s.ctx, arvados.ListOptions{\n\t\tCount:   trial.count,\n\t\tLimit:   trial.limit,\n\t\tOffset:  trial.offset,\n\t\tOrder:   trial.order,\n\t\tFilters: trial.filters,\n\t\tSelect:  trial.selectfields,\n\t})\n\tif trial.expectStatus != 0 {\n\t\tc.Assert(err, check.NotNil)\n\t\terr, ok := err.(interface{ HTTPStatus() int })\n\t\tc.Assert(ok, check.Equals, true) // err must implement interface{ HTTPStatus() int }\n\t\tc.Check(err.HTTPStatus(), check.Equals, trial.expectStatus)\n\t\tc.Logf(\"returned error is %#v\", err)\n\t\tc.Logf(\"returned error string is %q\", err)\n\t} else {\n\t\tc.Check(err, check.IsNil)\n\t\texpectItems := []arvados.Collection{}\n\t\tfor _, uuid := range trial.expectUUIDs {\n\t\t\texpectItems = append(expectItems, arvados.Collection{UUID: uuid})\n\t\t}\n\t\t// expectItems is sorted by UUID, so sort resp.Items\n\t\t// by UUID before checking DeepEquals.\n\t\tsort.Slice(resp.Items, func(i, j int) bool { return resp.Items[i].UUID < resp.Items[j].UUID })\n\t\tc.Check(resp, check.DeepEquals, arvados.CollectionList{\n\t\t\tItems: expectItems,\n\t\t})\n\t}\n\n\tfor i, stub := range s.backends {\n\t\tif i >= len(trial.expectCalls) {\n\t\t\tbreak\n\t\t}\n\t\tcalls := stub.Calls(nil)\n\t\tc.Check(calls, check.HasLen, trial.expectCalls[i])\n\t\tif len(calls) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\topts := calls[0].Options.(arvados.ListOptions)\n\t\tc.Check(opts.Limit, check.Equals, trial.limit)\n\t}\n}\n"
  },
  {
    "path": "lib/controller/federation/login_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage federation\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&LoginSuite{})\n\ntype LoginSuite struct {\n\tFederationSuite\n}\n\nfunc (s *LoginSuite) TestDeferToLoginCluster(c *check.C) {\n\ts.addHTTPRemote(c, \"zhome\", &arvadostest.APIStub{})\n\ts.cluster.Login.LoginCluster = \"zhome\"\n\n\treturnTo := \"https://app.example.com/foo?bar\"\n\tfor _, remote := range []string{\"\", \"ccccc\"} {\n\t\tresp, err := s.fed.Login(context.Background(), arvados.LoginOptions{Remote: remote, ReturnTo: returnTo})\n\t\tc.Check(err, check.IsNil)\n\t\tc.Logf(\"remote %q -- RedirectLocation %q\", remote, resp.RedirectLocation)\n\t\ttarget, err := url.Parse(resp.RedirectLocation)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(target.Host, check.Equals, s.cluster.RemoteClusters[\"zhome\"].Host)\n\t\tc.Check(target.Scheme, check.Equals, \"http\")\n\t\tc.Check(target.Query().Get(\"return_to\"), check.Equals, returnTo)\n\t\tc.Check(target.Query().Get(\"remote\"), check.Equals, remote)\n\t\t_, remotePresent := target.Query()[\"remote\"]\n\t\tc.Check(remotePresent, check.Equals, remote != \"\")\n\t}\n}\n"
  },
  {
    "path": "lib/controller/federation/logout_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage federation\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/url\"\n\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&LogoutSuite{})\nvar emptyURL = &url.URL{}\n\ntype LogoutStub struct {\n\tarvadostest.APIStub\n\tredirectLocation *url.URL\n}\n\nfunc (as *LogoutStub) CheckCalls(c *check.C, returnURL *url.URL) bool {\n\tactual := as.APIStub.Calls(as.APIStub.Logout)\n\tallOK := c.Check(actual, check.Not(check.HasLen), 0,\n\t\tcheck.Commentf(\"Logout stub never called\"))\n\texpected := returnURL.String()\n\tfor _, call := range actual {\n\t\topts, ok := call.Options.(arvados.LogoutOptions)\n\t\tallOK = c.Check(ok, check.Equals, true,\n\t\t\tcheck.Commentf(\"call options were not LogoutOptions\")) &&\n\t\t\tc.Check(opts.ReturnTo, check.Equals, expected) &&\n\t\t\tallOK\n\t}\n\treturn allOK\n}\n\nfunc (as *LogoutStub) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\tas.APIStub.Logout(ctx, options)\n\tloc := as.redirectLocation.String()\n\tif loc == \"\" {\n\t\tloc = options.ReturnTo\n\t}\n\treturn arvados.LogoutResponse{\n\t\tRedirectLocation: loc,\n\t}, as.Error\n}\n\ntype LogoutSuite struct {\n\tFederationSuite\n}\n\nfunc (s *LogoutSuite) badReturnURL(path string) *url.URL {\n\treturn &url.URL{\n\t\tScheme: \"https\",\n\t\tHost:   \"example.net\",\n\t\tPath:   path,\n\t}\n}\n\nfunc (s *LogoutSuite) goodReturnURL(path string) *url.URL {\n\tu, _ := url.Parse(s.cluster.Services.Workbench2.ExternalURL.String())\n\tu.Path = path\n\treturn u\n}\n\nfunc (s *LogoutSuite) setupFederation(loginCluster string) {\n\tif loginCluster == \"\" {\n\t\ts.cluster.Login.Test.Enable = true\n\t} else {\n\t\ts.cluster.Login.LoginCluster = loginCluster\n\t}\n\tdbconn := ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}\n\ts.fed = New(s.ctx, s.cluster, nil, dbconn.GetDB)\n}\n\nfunc (s *LogoutSuite) setupStub(c *check.C, id string, stubURL *url.URL, stubErr error) *LogoutStub {\n\tloc, err := url.Parse(stubURL.String())\n\tc.Check(err, check.IsNil)\n\tstub := LogoutStub{redirectLocation: loc}\n\tstub.Error = stubErr\n\tif id == s.cluster.ClusterID {\n\t\ts.fed.local = &stub\n\t} else {\n\t\ts.addDirectRemote(c, id, &stub)\n\t}\n\treturn &stub\n}\n\nfunc (s *LogoutSuite) v2Token(clusterID string) string {\n\treturn fmt.Sprintf(\"v2/%s-gj3su-12345abcde67890/abcdefghijklmnopqrstuvwxy\", clusterID)\n}\n\nfunc (s *LogoutSuite) TestLocalLogoutOK(c *check.C) {\n\ts.setupFederation(\"\")\n\tresp, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, s.cluster.Services.Workbench2.ExternalURL.String())\n}\n\nfunc (s *LogoutSuite) TestLocalLogoutRedirect(c *check.C) {\n\ts.setupFederation(\"\")\n\texpURL := s.cluster.Services.Workbench1.ExternalURL\n\topts := arvados.LogoutOptions{ReturnTo: expURL.String()}\n\tresp, err := s.fed.Logout(s.ctx, opts)\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, expURL.String())\n}\n\nfunc (s *LogoutSuite) TestLocalLogoutBadRequestError(c *check.C) {\n\ts.setupFederation(\"\")\n\treturnTo := s.badReturnURL(\"TestLocalLogoutBadRequestError\")\n\topts := arvados.LogoutOptions{ReturnTo: returnTo.String()}\n\t_, err := s.fed.Logout(s.ctx, opts)\n\tc.Check(err, check.NotNil)\n}\n\nfunc (s *LogoutSuite) TestRemoteLogoutRedirect(c *check.C) {\n\ts.setupFederation(\"zhome\")\n\tredirect := url.URL{Scheme: \"https\", Host: \"example.com\"}\n\tloginStub := s.setupStub(c, \"zhome\", &redirect, nil)\n\treturnTo := s.goodReturnURL(\"TestRemoteLogoutRedirect\")\n\tresp, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, redirect.String())\n\tloginStub.CheckCalls(c, returnTo)\n}\n\nfunc (s *LogoutSuite) TestRemoteLogoutError(c *check.C) {\n\ts.setupFederation(\"zhome\")\n\texpErr := errors.New(\"TestRemoteLogoutError expErr\")\n\tloginStub := s.setupStub(c, \"zhome\", emptyURL, expErr)\n\treturnTo := s.goodReturnURL(\"TestRemoteLogoutError\")\n\t_, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})\n\tc.Check(err, check.Equals, expErr)\n\tloginStub.CheckCalls(c, returnTo)\n}\n\nfunc (s *LogoutSuite) TestRemoteLogoutLocalRedirect(c *check.C) {\n\ts.setupFederation(\"zhome\")\n\tloginStub := s.setupStub(c, \"zhome\", emptyURL, nil)\n\tredirect := url.URL{Scheme: \"https\", Host: \"example.com\"}\n\tlocalStub := s.setupStub(c, \"aaaaa\", &redirect, nil)\n\tresp, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, redirect.String())\n\t// emptyURL to match the empty LogoutOptions\n\tloginStub.CheckCalls(c, emptyURL)\n\tlocalStub.CheckCalls(c, emptyURL)\n}\n\nfunc (s *LogoutSuite) TestRemoteLogoutLocalError(c *check.C) {\n\ts.setupFederation(\"zhome\")\n\texpErr := errors.New(\"TestRemoteLogoutLocalError expErr\")\n\tloginStub := s.setupStub(c, \"zhome\", emptyURL, nil)\n\tlocalStub := s.setupStub(c, \"aaaaa\", emptyURL, expErr)\n\t_, err := s.fed.Logout(s.ctx, arvados.LogoutOptions{})\n\tc.Check(err, check.Equals, expErr)\n\tloginStub.CheckCalls(c, emptyURL)\n\tlocalStub.CheckCalls(c, emptyURL)\n}\n\nfunc (s *LogoutSuite) TestV2TokenRedirect(c *check.C) {\n\ts.setupFederation(\"\")\n\tredirect := url.URL{Scheme: \"https\", Host: \"example.com\"}\n\treturnTo := s.goodReturnURL(\"TestV2TokenRedirect\")\n\tlocalErr := errors.New(\"TestV2TokenRedirect error\")\n\ttokenStub := s.setupStub(c, \"zzzzz\", &redirect, nil)\n\ts.setupStub(c, \"aaaaa\", emptyURL, localErr)\n\ttokens := []string{s.v2Token(\"zzzzz\")}\n\tctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})\n\tresp, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, redirect.String())\n\ttokenStub.CheckCalls(c, returnTo)\n}\n\nfunc (s *LogoutSuite) TestV2TokenError(c *check.C) {\n\ts.setupFederation(\"\")\n\treturnTo := s.goodReturnURL(\"TestV2TokenError\")\n\ttokenErr := errors.New(\"TestV2TokenError error\")\n\ttokenStub := s.setupStub(c, \"zzzzz\", emptyURL, tokenErr)\n\ts.setupStub(c, \"aaaaa\", emptyURL, nil)\n\ttokens := []string{s.v2Token(\"zzzzz\")}\n\tctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})\n\t_, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})\n\tc.Check(err, check.Equals, tokenErr)\n\ttokenStub.CheckCalls(c, returnTo)\n}\n\nfunc (s *LogoutSuite) TestV2TokenLocalRedirect(c *check.C) {\n\ts.setupFederation(\"\")\n\tredirect := url.URL{Scheme: \"https\", Host: \"example.com\"}\n\ttokenStub := s.setupStub(c, \"zzzzz\", emptyURL, nil)\n\tlocalStub := s.setupStub(c, \"aaaaa\", &redirect, nil)\n\ttokens := []string{s.v2Token(\"zzzzz\")}\n\tctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})\n\tresp, err := s.fed.Logout(ctx, arvados.LogoutOptions{})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, redirect.String())\n\ttokenStub.CheckCalls(c, emptyURL)\n\tlocalStub.CheckCalls(c, emptyURL)\n}\n\nfunc (s *LogoutSuite) TestV2TokenLocalError(c *check.C) {\n\ts.setupFederation(\"\")\n\ttokenErr := errors.New(\"TestV2TokenLocalError error\")\n\ttokenStub := s.setupStub(c, \"zzzzz\", emptyURL, nil)\n\tlocalStub := s.setupStub(c, \"aaaaa\", emptyURL, tokenErr)\n\ttokens := []string{s.v2Token(\"zzzzz\")}\n\tctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})\n\t_, err := s.fed.Logout(ctx, arvados.LogoutOptions{})\n\tc.Check(err, check.Equals, tokenErr)\n\ttokenStub.CheckCalls(c, emptyURL)\n\tlocalStub.CheckCalls(c, emptyURL)\n}\n\nfunc (s *LogoutSuite) TestV2LocalTokenRedirect(c *check.C) {\n\ts.setupFederation(\"\")\n\tredirect := url.URL{Scheme: \"https\", Host: \"example.com\"}\n\treturnTo := s.goodReturnURL(\"TestV2LocalTokenRedirect\")\n\tlocalStub := s.setupStub(c, \"aaaaa\", &redirect, nil)\n\ttokens := []string{s.v2Token(\"aaaaa\")}\n\tctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})\n\tresp, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, redirect.String())\n\tlocalStub.CheckCalls(c, returnTo)\n}\n\nfunc (s *LogoutSuite) TestV2LocalTokenError(c *check.C) {\n\ts.setupFederation(\"\")\n\treturnTo := s.goodReturnURL(\"TestV2LocalTokenError\")\n\ttokenErr := errors.New(\"TestV2LocalTokenError error\")\n\tlocalStub := s.setupStub(c, \"aaaaa\", emptyURL, tokenErr)\n\ttokens := []string{s.v2Token(\"aaaaa\")}\n\tctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: tokens})\n\t_, err := s.fed.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo.String()})\n\tc.Check(err, check.Equals, tokenErr)\n\tlocalStub.CheckCalls(c, returnTo)\n}\n"
  },
  {
    "path": "lib/controller/federation/user_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage federation\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"math\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&UserSuite{})\n\ntype UserSuite struct {\n\tFederationSuite\n}\n\nfunc (s *UserSuite) TestLoginClusterUserList(c *check.C) {\n\ts.cluster.ClusterID = \"local\"\n\ts.cluster.Login.LoginCluster = \"zzzzz\"\n\ts.fed = New(s.ctx, s.cluster, nil, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)\n\ts.addDirectRemote(c, \"zzzzz\", rpc.NewConn(\"zzzzz\", &url.URL{Scheme: \"https\", Host: os.Getenv(\"ARVADOS_API_HOST\")}, true, rpc.PassthroughTokenProvider))\n\n\tfor _, updateFail := range []bool{false, true} {\n\t\tfor _, opts := range []arvados.ListOptions{\n\t\t\t{Offset: 0, Limit: -1, Select: nil},\n\t\t\t{Offset: 0, Limit: math.MaxInt64, Select: nil},\n\t\t\t{Offset: 1, Limit: 1, Select: nil},\n\t\t\t{Offset: 0, Limit: 2, Select: []string{\"uuid\"}},\n\t\t\t{Offset: 0, Limit: 2, Select: []string{\"uuid\", \"email\"}},\n\t\t} {\n\t\t\tc.Logf(\"updateFail %v, opts %#v\", updateFail, opts)\n\t\t\tspy := arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)\n\t\t\tstub := &arvadostest.APIStub{Error: errors.New(\"local cluster failure\")}\n\t\t\tif updateFail {\n\t\t\t\ts.fed.local = stub\n\t\t\t} else {\n\t\t\t\ts.fed.local = rpc.NewConn(s.cluster.ClusterID, spy.URL, true, rpc.PassthroughTokenProvider)\n\t\t\t}\n\t\t\tuserlist, err := s.fed.UserList(s.ctx, opts)\n\t\t\tif err != nil {\n\t\t\t\tc.Logf(\"... UserList failed %q\", err)\n\t\t\t}\n\t\t\tif updateFail && err == nil {\n\t\t\t\t// All local updates fail, so the only\n\t\t\t\t// cases expected to succeed are the\n\t\t\t\t// ones with 0 results.\n\t\t\t\tc.Check(userlist.Items, check.HasLen, 0)\n\t\t\t\tc.Check(stub.Calls(nil), check.HasLen, 0)\n\t\t\t} else if updateFail {\n\t\t\t\tc.Logf(\"... err %#v\", err)\n\t\t\t\tcalls := stub.Calls(stub.UserBatchUpdate)\n\t\t\t\tif c.Check(calls, check.HasLen, 1) {\n\t\t\t\t\tc.Logf(\"... stub.UserUpdate called with options: %#v\", calls[0].Options)\n\t\t\t\t\tshouldUpdate := map[string]bool{\n\t\t\t\t\t\t\"uuid\":       false,\n\t\t\t\t\t\t\"email\":      true,\n\t\t\t\t\t\t\"first_name\": true,\n\t\t\t\t\t\t\"last_name\":  true,\n\t\t\t\t\t\t\"is_admin\":   true,\n\t\t\t\t\t\t\"is_active\":  true,\n\t\t\t\t\t\t\"prefs\":      true,\n\t\t\t\t\t\t// can't safely update locally\n\t\t\t\t\t\t\"owner_uuid\":   false,\n\t\t\t\t\t\t\"identity_url\": false,\n\t\t\t\t\t\t// virtual attrs\n\t\t\t\t\t\t\"full_name\":  false,\n\t\t\t\t\t\t\"is_invited\": true,\n\t\t\t\t\t}\n\t\t\t\t\tif opts.Select != nil {\n\t\t\t\t\t\t// Only the selected\n\t\t\t\t\t\t// fields (minus uuid)\n\t\t\t\t\t\t// should be updated.\n\t\t\t\t\t\tfor k := range shouldUpdate {\n\t\t\t\t\t\t\tshouldUpdate[k] = false\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor _, k := range opts.Select {\n\t\t\t\t\t\t\tif k != \"uuid\" {\n\t\t\t\t\t\t\t\tshouldUpdate[k] = true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tvar uuid string\n\t\t\t\t\tfor uuid = range calls[0].Options.(arvados.UserBatchUpdateOptions).Updates {\n\t\t\t\t\t}\n\t\t\t\t\tfor k, shouldFind := range shouldUpdate {\n\t\t\t\t\t\t_, found := calls[0].Options.(arvados.UserBatchUpdateOptions).Updates[uuid][k]\n\t\t\t\t\t\tc.Check(found, check.Equals, shouldFind, check.Commentf(\"offending attr: %s\", k))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tupdates := 0\n\t\t\t\tfor _, d := range spy.RequestDumps {\n\t\t\t\t\td := string(d)\n\t\t\t\t\tif strings.Contains(d, \"PATCH /arvados/v1/users/batch\") {\n\t\t\t\t\t\tc.Check(d, check.Matches, `(?ms).*Authorization: Bearer `+arvadostest.SystemRootToken+`.*`)\n\t\t\t\t\t\tupdates++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\tc.Check(updates, check.Equals, 1)\n\t\t\t\tc.Logf(\"... response items %#v\", userlist.Items)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *UserSuite) TestLoginClusterUserGet(c *check.C) {\n\ts.cluster.ClusterID = \"local\"\n\ts.cluster.Login.LoginCluster = \"zzzzz\"\n\ts.fed = New(s.ctx, s.cluster, nil, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)\n\ts.addDirectRemote(c, \"zzzzz\", rpc.NewConn(\"zzzzz\", &url.URL{Scheme: \"https\", Host: os.Getenv(\"ARVADOS_API_HOST\")}, true, rpc.PassthroughTokenProvider))\n\n\topts := arvados.GetOptions{UUID: \"zzzzz-tpzed-xurymjxw79nv3jz\", Select: []string{\"uuid\", \"email\"}}\n\n\tstub := &arvadostest.APIStub{Error: errors.New(\"local cluster failure\")}\n\ts.fed.local = stub\n\ts.fed.UserGet(s.ctx, opts)\n\n\tcalls := stub.Calls(stub.UserBatchUpdate)\n\tif c.Check(calls, check.HasLen, 1) {\n\t\tc.Logf(\"... stub.UserUpdate called with options: %#v\", calls[0].Options)\n\t\tshouldUpdate := map[string]bool{\n\t\t\t\"uuid\":       false,\n\t\t\t\"email\":      true,\n\t\t\t\"first_name\": true,\n\t\t\t\"last_name\":  true,\n\t\t\t\"is_admin\":   true,\n\t\t\t\"is_active\":  true,\n\t\t\t\"prefs\":      true,\n\t\t\t// can't safely update locally\n\t\t\t\"owner_uuid\":   false,\n\t\t\t\"identity_url\": false,\n\t\t\t// virtual attrs\n\t\t\t\"full_name\":  false,\n\t\t\t\"is_invited\": true,\n\t\t}\n\t\tif opts.Select != nil {\n\t\t\t// Only the selected\n\t\t\t// fields (minus uuid)\n\t\t\t// should be updated.\n\t\t\tfor k := range shouldUpdate {\n\t\t\t\tshouldUpdate[k] = false\n\t\t\t}\n\t\t\tfor _, k := range opts.Select {\n\t\t\t\tif k != \"uuid\" {\n\t\t\t\t\tshouldUpdate[k] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tvar uuid string\n\t\tfor uuid = range calls[0].Options.(arvados.UserBatchUpdateOptions).Updates {\n\t\t}\n\t\tfor k, shouldFind := range shouldUpdate {\n\t\t\t_, found := calls[0].Options.(arvados.UserBatchUpdateOptions).Updates[uuid][k]\n\t\t\tc.Check(found, check.Equals, shouldFind, check.Commentf(\"offending attr: %s\", k))\n\t\t}\n\t}\n\n}\n\nfunc (s *UserSuite) TestLoginClusterUserListBypassFederation(c *check.C) {\n\ts.cluster.ClusterID = \"local\"\n\ts.cluster.Login.LoginCluster = \"zzzzz\"\n\ts.fed = New(s.ctx, s.cluster, nil, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)\n\ts.addDirectRemote(c, \"zzzzz\", rpc.NewConn(\"zzzzz\", &url.URL{Scheme: \"https\", Host: os.Getenv(\"ARVADOS_API_HOST\")},\n\t\ttrue, rpc.PassthroughTokenProvider))\n\n\tspy := arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)\n\ts.fed.local = rpc.NewConn(s.cluster.ClusterID, spy.URL, true, rpc.PassthroughTokenProvider)\n\n\t_, err := s.fed.UserList(s.ctx, arvados.ListOptions{Offset: 0, Limit: math.MaxInt64, Select: nil, BypassFederation: true})\n\t// this will fail because it is not using a root token\n\tc.Check(err.(*arvados.TransactionError).StatusCode, check.Equals, 403)\n\n\t// Now use SystemRootToken\n\tctx := context.Background()\n\tctx = ctxlog.Context(ctx, ctxlog.TestLogger(c))\n\tctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{arvadostest.SystemRootToken}})\n\n\t// Assert that it did not try to batch update users.\n\t_, err = s.fed.UserList(ctx, arvados.ListOptions{Offset: 0, Limit: math.MaxInt64, Select: nil, BypassFederation: true})\n\tfor _, d := range spy.RequestDumps {\n\t\td := string(d)\n\t\tif strings.Contains(d, \"PATCH /arvados/v1/users/batch\") {\n\t\t\tc.Fail()\n\t\t}\n\t}\n\tc.Check(err, check.IsNil)\n}\n\n// userAttrsCachedFromLoginCluster must have an entry for every field\n// in the User struct.\nfunc (s *UserSuite) TestUserAttrsUpdateWhitelist(c *check.C) {\n\tbuf, err := json.Marshal(&arvados.User{})\n\tc.Assert(err, check.IsNil)\n\tvar allFields map[string]interface{}\n\terr = json.Unmarshal(buf, &allFields)\n\tc.Assert(err, check.IsNil)\n\tfor k := range allFields {\n\t\t_, ok := userAttrsCachedFromLoginCluster[k]\n\t\tc.Check(ok, check.Equals, true, check.Commentf(\"field name %q missing from userAttrsCachedFromLoginCluster\", k))\n\t}\n}\n"
  },
  {
    "path": "lib/controller/federation.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"bytes\"\n\t\"database/sql\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"mime\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/jmcvetta/randutil\"\n)\n\nvar pathPattern = `^/arvados/v1/%s(/([0-9a-z]{5})-%s-[0-9a-z]{15})?(.*)$`\nvar wfRe = regexp.MustCompile(fmt.Sprintf(pathPattern, \"workflows\", \"7fd4e\"))\nvar containersRe = regexp.MustCompile(fmt.Sprintf(pathPattern, \"containers\", \"dz642\"))\nvar containerRequestsRe = regexp.MustCompile(fmt.Sprintf(pathPattern, \"container_requests\", \"xvhdp\"))\nvar collectionsRe = regexp.MustCompile(fmt.Sprintf(pathPattern, \"collections\", \"4zz18\"))\nvar collectionsByPDHRe = regexp.MustCompile(`^/arvados/v1/collections/([0-9a-fA-F]{32}\\+[0-9]+)+$`)\nvar linksRe = regexp.MustCompile(fmt.Sprintf(pathPattern, \"links\", \"o0j2j\"))\n\nfunc (h *Handler) remoteClusterRequest(remoteID string, req *http.Request) (*http.Response, error) {\n\tremote, ok := h.Cluster.RemoteClusters[remoteID]\n\tif !ok {\n\t\treturn nil, HTTPError{fmt.Sprintf(\"no proxy available for cluster %v\", remoteID), http.StatusNotFound}\n\t}\n\tscheme := remote.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"https\"\n\t}\n\tsaltedReq, err := h.saltAuthToken(req, remoteID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\turlOut := &url.URL{\n\t\tScheme:   scheme,\n\t\tHost:     remote.Host,\n\t\tPath:     saltedReq.URL.Path,\n\t\tRawPath:  saltedReq.URL.RawPath,\n\t\tRawQuery: saltedReq.URL.RawQuery,\n\t}\n\tclient := h.secureClient\n\tif remote.Insecure {\n\t\tclient = h.insecureClient\n\t}\n\treturn h.proxy.Do(saltedReq, urlOut, client)\n}\n\n// Buffer request body, parse form parameters in request, and then\n// replace original body with the buffer so it can be re-read by\n// downstream proxy steps.\nfunc loadParamsFromForm(req *http.Request) error {\n\tvar postBody *bytes.Buffer\n\tif ct := req.Header.Get(\"Content-Type\"); ct == \"\" {\n\t\t// Assume application/octet-stream, i.e., no form to parse.\n\t} else if ct, _, err := mime.ParseMediaType(ct); err != nil {\n\t\treturn err\n\t} else if ct == \"application/x-www-form-urlencoded\" && req.Body != nil {\n\t\tvar cl int64\n\t\tif req.ContentLength > 0 {\n\t\t\tcl = req.ContentLength\n\t\t}\n\t\tpostBody = bytes.NewBuffer(make([]byte, 0, cl))\n\t\toriginalBody := req.Body\n\t\tdefer originalBody.Close()\n\t\treq.Body = ioutil.NopCloser(io.TeeReader(req.Body, postBody))\n\t}\n\n\terr := req.ParseForm()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif req.Body != nil && postBody != nil {\n\t\treq.Body = ioutil.NopCloser(postBody)\n\t}\n\treturn nil\n}\n\nfunc (h *Handler) setupProxyRemoteCluster(next http.Handler) http.Handler {\n\tmux := http.NewServeMux()\n\n\twfHandler := &genericFederatedRequestHandler{next, h, wfRe, nil}\n\tcontainersHandler := &genericFederatedRequestHandler{next, h, containersRe, nil}\n\tlinksRequestsHandler := &genericFederatedRequestHandler{next, h, linksRe, nil}\n\n\tmux.Handle(\"/arvados/v1/workflows\", wfHandler)\n\tmux.Handle(\"/arvados/v1/workflows/\", wfHandler)\n\tmux.Handle(\"/arvados/v1/containers\", containersHandler)\n\tmux.Handle(\"/arvados/v1/containers/\", containersHandler)\n\tmux.Handle(\"/arvados/v1/links\", linksRequestsHandler)\n\tmux.Handle(\"/arvados/v1/links/\", linksRequestsHandler)\n\tmux.Handle(\"/\", next)\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tparts := strings.Split(req.Header.Get(\"Authorization\"), \"/\")\n\t\talreadySalted := (len(parts) == 3 && parts[0] == \"Bearer v2\" && len(parts[2]) == 40)\n\n\t\tif alreadySalted ||\n\t\t\tstrings.Index(req.Header.Get(\"Via\"), \"arvados-controller\") != -1 {\n\t\t\t// The token is already salted, or this is a\n\t\t\t// request from another instance of\n\t\t\t// arvados-controller.  In either case, we\n\t\t\t// don't want to proxy this query, so just\n\t\t\t// continue down the instance handler stack.\n\t\t\tnext.ServeHTTP(w, req)\n\t\t\treturn\n\t\t}\n\n\t\tmux.ServeHTTP(w, req)\n\t})\n}\n\ntype CurrentUser struct {\n\tAuthorization arvados.APIClientAuthorization\n\tUUID          string\n}\n\n// validateAPItoken extracts the token from the provided http request,\n// checks it again api_client_authorizations table in the database,\n// and fills in the token scope and user UUID.  Does not handle remote\n// tokens unless they are already in the database and not expired.\n//\n// Return values are:\n//\n// nil, false, non-nil -- if there was an internal error\n//\n// nil, false, nil -- if the token is invalid\n//\n// non-nil, true, nil -- if the token is valid\nfunc (h *Handler) validateAPItoken(req *http.Request, token string) (*CurrentUser, bool, error) {\n\tuser := CurrentUser{Authorization: arvados.APIClientAuthorization{APIToken: token}}\n\tdb, err := h.dbConnector.GetDB(req.Context())\n\tif err != nil {\n\t\tctxlog.FromContext(req.Context()).WithError(err).Debugf(\"validateAPItoken(%s): database error\", token)\n\t\treturn nil, false, err\n\t}\n\n\tvar uuid string\n\tif strings.HasPrefix(token, \"v2/\") {\n\t\tsp := strings.Split(token, \"/\")\n\t\tuuid = sp[1]\n\t\ttoken = sp[2]\n\t}\n\tuser.Authorization.APIToken = token\n\tvar scopes string\n\terr = db.QueryRowContext(req.Context(), `\n\t\tSELECT api_client_authorizations.uuid, api_client_authorizations.scopes, users.uuid\n\t\tFROM api_client_authorizations\n\t\tJOIN users on api_client_authorizations.user_id=users.id\n\t\tWHERE api_token=$1\n\t\t\tAND (expires_at IS NULL OR expires_at > current_timestamp AT TIME ZONE 'UTC')\n\t\t\tAND (refreshes_at IS NULL OR refreshes_at > current_timestamp AT TIME ZONE 'UTC')\n\t\tLIMIT 1`, token).Scan(&user.Authorization.UUID, &scopes, &user.UUID)\n\tif err == sql.ErrNoRows {\n\t\tctxlog.FromContext(req.Context()).Debugf(\"validateAPItoken(%s): not found in database\", token)\n\t\treturn nil, false, nil\n\t} else if err != nil {\n\t\tctxlog.FromContext(req.Context()).WithError(err).Debugf(\"validateAPItoken(%s): database error\", token)\n\t\treturn nil, false, err\n\t}\n\tif uuid != \"\" && user.Authorization.UUID != uuid {\n\t\t// secret part matches, but UUID doesn't -- somewhat surprising\n\t\tctxlog.FromContext(req.Context()).Debugf(\"validateAPItoken(%s): secret part found, but with different UUID: %s\", token, user.Authorization.UUID)\n\t\treturn nil, false, nil\n\t}\n\terr = json.Unmarshal([]byte(scopes), &user.Authorization.Scopes)\n\tif err != nil {\n\t\tctxlog.FromContext(req.Context()).WithError(err).Debugf(\"validateAPItoken(%s): error parsing scopes from db\", token)\n\t\treturn nil, false, err\n\t}\n\tctxlog.FromContext(req.Context()).Debugf(\"validateAPItoken(%s): ok\", token)\n\treturn &user, true, nil\n}\n\nfunc (h *Handler) createAPItoken(req *http.Request, userUUID string, scopes []string) (*arvados.APIClientAuthorization, error) {\n\tdb, err := h.dbConnector.GetDB(req.Context())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trd, err := randutil.String(15, \"abcdefghijklmnopqrstuvwxyz0123456789\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tuuid := fmt.Sprintf(\"%v-gj3su-%v\", h.Cluster.ClusterID, rd)\n\ttoken, err := randutil.String(50, \"abcdefghijklmnopqrstuvwxyz0123456789\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(scopes) == 0 {\n\t\tscopes = append(scopes, \"all\")\n\t}\n\tscopesjson, err := json.Marshal(scopes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = db.ExecContext(req.Context(),\n\t\t`INSERT INTO api_client_authorizations\n(uuid, api_token, expires_at, scopes,\nuser_id,\ncreated_at, updated_at)\nVALUES ($1, $2, CURRENT_TIMESTAMP AT TIME ZONE 'UTC' + INTERVAL '2 weeks', $3,\n(SELECT id FROM users WHERE users.uuid=$4 LIMIT 1),\nCURRENT_TIMESTAMP AT TIME ZONE 'UTC', CURRENT_TIMESTAMP AT TIME ZONE 'UTC')`,\n\t\tuuid, token, string(scopesjson), userUUID)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &arvados.APIClientAuthorization{\n\t\tUUID:     uuid,\n\t\tAPIToken: token,\n\t\tScopes:   scopes}, nil\n}\n\n// Extract the auth token supplied in req, and replace it with a\n// salted token for the remote cluster.\nfunc (h *Handler) saltAuthToken(req *http.Request, remote string) (updatedReq *http.Request, err error) {\n\tupdatedReq = (&http.Request{\n\t\tMethod:        req.Method,\n\t\tURL:           req.URL,\n\t\tHeader:        req.Header,\n\t\tBody:          req.Body,\n\t\tContentLength: req.ContentLength,\n\t\tHost:          req.Host,\n\t}).WithContext(req.Context())\n\n\tcreds := auth.NewCredentials()\n\tcreds.LoadTokensFromHTTPRequest(updatedReq)\n\tif len(creds.Tokens) == 0 && updatedReq.Header.Get(\"Content-Type\") == \"application/x-www-form-encoded\" {\n\t\t// Override ParseForm's 10MiB limit by ensuring\n\t\t// req.Body is a *http.maxBytesReader.\n\t\tupdatedReq.Body = http.MaxBytesReader(nil, updatedReq.Body, 1<<28) // 256MiB. TODO: use MaxRequestSize from discovery doc or config.\n\t\tif err := creds.LoadTokensFromHTTPRequestBody(updatedReq); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Replace req.Body with a buffer that re-encodes the\n\t\t// form without api_token, in case we end up\n\t\t// forwarding the request.\n\t\tif updatedReq.PostForm != nil {\n\t\t\tupdatedReq.PostForm.Del(\"api_token\")\n\t\t}\n\t\tupdatedReq.Body = ioutil.NopCloser(bytes.NewBufferString(updatedReq.PostForm.Encode()))\n\t}\n\tif len(creds.Tokens) == 0 {\n\t\treturn updatedReq, nil\n\t}\n\n\tctxlog.FromContext(req.Context()).Debugf(\"saltAuthToken: cluster %s token %s remote %s\", h.Cluster.ClusterID, creds.Tokens[0], remote)\n\ttoken, err := auth.SaltToken(creds.Tokens[0], remote)\n\n\tif err == auth.ErrObsoleteToken || err == auth.ErrTokenFormat {\n\t\t// If the token exists in our own database for our own\n\t\t// user, salt it for the remote. Otherwise, assume it\n\t\t// was issued by the remote, and pass it through\n\t\t// unmodified.\n\t\tcurrentUser, ok, err := h.validateAPItoken(req, creds.Tokens[0])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if !ok || strings.HasPrefix(currentUser.UUID, remote) {\n\t\t\t// Unknown, or cached + belongs to remote;\n\t\t\t// pass through unmodified.\n\t\t\ttoken = creds.Tokens[0]\n\t\t} else {\n\t\t\t// Found; make V2 version and salt it.\n\t\t\ttoken, err = auth.SaltToken(currentUser.Authorization.TokenV2(), remote)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\tupdatedReq.Header = http.Header{}\n\tfor k, v := range req.Header {\n\t\tif k != \"Authorization\" {\n\t\t\tupdatedReq.Header[k] = v\n\t\t}\n\t}\n\tupdatedReq.Header.Set(\"Authorization\", \"Bearer \"+token)\n\n\t// Remove api_token=... from the query string, in case we\n\t// end up forwarding the request.\n\tif values, err := url.ParseQuery(updatedReq.URL.RawQuery); err != nil {\n\t\treturn nil, err\n\t} else if _, ok := values[\"api_token\"]; ok {\n\t\tdelete(values, \"api_token\")\n\t\tupdatedReq.URL = &url.URL{\n\t\t\tScheme:   req.URL.Scheme,\n\t\t\tHost:     req.URL.Host,\n\t\t\tPath:     req.URL.Path,\n\t\t\tRawPath:  req.URL.RawPath,\n\t\t\tRawQuery: values.Encode(),\n\t\t}\n\t}\n\treturn updatedReq, nil\n}\n"
  },
  {
    "path": "lib/controller/federation_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nvar _ = check.Suite(&FederationSuite{})\n\ntype FederationSuite struct {\n\tctx    context.Context\n\tcancel context.CancelFunc\n\n\tlog logrus.FieldLogger\n\t// testServer and testHandler are the controller being tested,\n\t// \"zhome\".\n\ttestServer  *httpserver.Server\n\ttestHandler *Handler\n\t// remoteServer (\"zzzzz\") forwards requests to the Rails API\n\t// provided by the integration test environment.\n\tremoteServer *httpserver.Server\n\t// remoteMock (\"zmock\") appends each incoming request to\n\t// remoteMockRequests, and returns 200 with an empty JSON\n\t// object.\n\tremoteMock         *httpserver.Server\n\tremoteMockRequests []http.Request\n}\n\nfunc (s *FederationSuite) SetUpTest(c *check.C) {\n\ts.ctx, s.cancel = context.WithCancel(context.Background())\n\ts.log = ctxlog.TestLogger(c)\n\n\ts.remoteServer = newServerFromIntegrationTestEnv(c)\n\tc.Assert(s.remoteServer.Start(), check.IsNil)\n\n\ts.remoteMock = newServerFromIntegrationTestEnv(c)\n\ts.remoteMock.Server.Handler = http.HandlerFunc(s.remoteMockHandler)\n\tc.Assert(s.remoteMock.Start(), check.IsNil)\n\n\tcluster := &arvados.Cluster{\n\t\tClusterID:  \"zhome\",\n\t\tPostgreSQL: integrationTestCluster().PostgreSQL,\n\t}\n\tcluster.TLS.Insecure = true\n\tcluster.API.MaxItemsPerResponse = 1000\n\tcluster.API.MaxRequestAmplification = 4\n\tcluster.API.RequestTimeout = arvados.Duration(5 * time.Minute)\n\tcluster.Collections.BlobSigning = true\n\tcluster.Collections.BlobSigningKey = arvadostest.BlobSigningKey\n\tcluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour * 24 * 14)\n\tarvadostest.SetServiceURL(&cluster.Services.RailsAPI, \"http://localhost:1/\")\n\tarvadostest.SetServiceURL(&cluster.Services.Controller, \"http://localhost:/\")\n\ts.testHandler = &Handler{Cluster: cluster, BackgroundContext: ctxlog.Context(s.ctx, s.log)}\n\ts.testServer = newServerFromIntegrationTestEnv(c)\n\ts.testServer.Server.BaseContext = func(net.Listener) context.Context {\n\t\treturn ctxlog.Context(context.Background(), s.log)\n\t}\n\ts.testServer.Server.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(s.testHandler))\n\n\tcluster.RemoteClusters = map[string]arvados.RemoteCluster{\n\t\t\"zzzzz\": {\n\t\t\tHost:   s.remoteServer.Addr,\n\t\t\tProxy:  true,\n\t\t\tScheme: \"http\",\n\t\t},\n\t\t\"zmock\": {\n\t\t\tHost:   s.remoteMock.Addr,\n\t\t\tProxy:  true,\n\t\t\tScheme: \"http\",\n\t\t},\n\t\t\"*\": {\n\t\t\tScheme: \"https\",\n\t\t},\n\t}\n\n\tc.Assert(s.testServer.Start(), check.IsNil)\n\n\ts.remoteMockRequests = nil\n}\n\nfunc (s *FederationSuite) remoteMockHandler(w http.ResponseWriter, req *http.Request) {\n\tb := &bytes.Buffer{}\n\tio.Copy(b, req.Body)\n\treq.Body.Close()\n\treq.Body = ioutil.NopCloser(b)\n\ts.remoteMockRequests = append(s.remoteMockRequests, *req)\n\t// Repond 200 with a valid JSON object\n\tfmt.Fprint(w, \"{}\")\n}\n\nfunc (s *FederationSuite) TearDownTest(c *check.C) {\n\tif s.remoteServer != nil {\n\t\ts.remoteServer.Close()\n\t}\n\tif s.testServer != nil {\n\t\ts.testServer.Close()\n\t}\n\ts.cancel()\n}\n\nfunc (s *FederationSuite) testRequest(req *http.Request) *httptest.ResponseRecorder {\n\tresp := httptest.NewRecorder()\n\ts.testServer.Server.Handler.ServeHTTP(resp, req)\n\treturn resp\n}\n\nfunc (s *FederationSuite) TestLocalRequest(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/workflows/\"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, \"zzzzz-\", \"zhome-\", 1), nil)\n\tresp := s.testRequest(req).Result()\n\ts.checkHandledLocally(c, resp)\n}\n\nfunc (s *FederationSuite) checkHandledLocally(c *check.C, resp *http.Response) {\n\t// Our \"home\" controller can't handle local requests because\n\t// it doesn't have its own stub/test Rails API, so we rely on\n\t// \"connection refused\" to indicate the controller tried to\n\t// proxy the request to its local Rails API.\n\tc.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)\n\ts.checkJSONErrorMatches(c, resp, `.*connection refused`)\n}\n\nfunc (s *FederationSuite) TestNoAuth(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/workflows/\"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)\n\ts.checkJSONErrorMatches(c, resp, `Not logged in.*`)\n}\n\nfunc (s *FederationSuite) TestBadAuth(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/workflows/\"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\")\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)\n\ts.checkJSONErrorMatches(c, resp, `Not logged in.*`)\n}\n\nfunc (s *FederationSuite) TestNoAccess(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/workflows/\"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.SpectatorToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusNotFound)\n\ts.checkJSONErrorMatches(c, resp, `.*not found.*`)\n}\n\nfunc (s *FederationSuite) TestGetUnknownRemote(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/workflows/\"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, \"zzzzz-\", \"zz404-\", 1), nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusNotFound)\n\ts.checkJSONErrorMatches(c, resp, `.*no proxy available for cluster zz404`)\n}\n\nfunc (s *FederationSuite) TestRemoteError(c *check.C) {\n\trc := s.testHandler.Cluster.RemoteClusters[\"zzzzz\"]\n\trc.Scheme = \"https\"\n\ts.testHandler.Cluster.RemoteClusters[\"zzzzz\"] = rc\n\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/workflows/\"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)\n\ts.checkJSONErrorMatches(c, resp, `.*HTTP response to HTTPS client`)\n}\n\nfunc (s *FederationSuite) TestGetRemoteWorkflow(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/workflows/\"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tvar wf arvados.Workflow\n\tc.Check(json.NewDecoder(resp.Body).Decode(&wf), check.IsNil)\n\tc.Check(wf.UUID, check.Equals, arvadostest.WorkflowWithDefinitionYAMLUUID)\n\tc.Check(wf.OwnerUUID, check.Equals, arvadostest.ActiveUserUUID)\n}\n\nfunc (s *FederationSuite) TestOptionsMethod(c *check.C) {\n\treq := httptest.NewRequest(\"OPTIONS\", \"/arvados/v1/workflows/\"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)\n\treq.Header.Set(\"Origin\", \"https://example.com\")\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(body), check.Equals, \"\")\n\tc.Check(resp.Header.Get(\"Access-Control-Allow-Origin\"), check.Equals, \"*\")\n\tfor _, hdr := range []string{\"Authorization\", \"Content-Type\"} {\n\t\tc.Check(resp.Header.Get(\"Access-Control-Allow-Headers\"), check.Matches, \".*\"+hdr+\".*\")\n\t}\n\tfor _, method := range []string{\"GET\", \"HEAD\", \"PUT\", \"POST\", \"DELETE\"} {\n\t\tc.Check(resp.Header.Get(\"Access-Control-Allow-Methods\"), check.Matches, \".*\"+method+\".*\")\n\t}\n}\n\nfunc (s *FederationSuite) TestRemoteWithTokenInQuery(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/workflows/\"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, \"zzzzz-\", \"zmock-\", 1)+\"?api_token=\"+arvadostest.ActiveToken, nil)\n\ts.testRequest(req).Result()\n\tc.Assert(s.remoteMockRequests, check.HasLen, 1)\n\tpr := s.remoteMockRequests[0]\n\t// Token is salted and moved from query to Authorization header.\n\tc.Check(pr.URL.String(), check.Not(check.Matches), `.*api_token=.*`)\n\tc.Check(pr.Header.Get(\"Authorization\"), check.Equals, \"Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/7fd31b61f39c0e82a4155592163218272cedacdc\")\n}\n\nfunc (s *FederationSuite) TestLocalTokenSalted(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\tfor _, path := range []string{\n\t\t// During the transition to the strongly typed\n\t\t// controller implementation (#14287), workflows and\n\t\t// collections test different code paths.\n\t\t\"/arvados/v1/workflows/\" + strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, \"zzzzz-\", \"zmock-\", 1),\n\t\t\"/arvados/v1/collections/\" + strings.Replace(arvadostest.UserAgreementCollection, \"zzzzz-\", \"zmock-\", 1),\n\t} {\n\t\tc.Log(\"testing path \", path)\n\t\ts.remoteMockRequests = nil\n\t\treq := httptest.NewRequest(\"GET\", path, nil)\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\t\ts.testRequest(req).Result()\n\t\tc.Assert(s.remoteMockRequests, check.HasLen, 1)\n\t\tpr := s.remoteMockRequests[0]\n\t\t// The salted token here has a \"zzzzz-\" UUID instead of a\n\t\t// \"ztest-\" UUID because ztest's local database has the\n\t\t// \"zzzzz-\" test fixtures. The \"secret\" part is HMAC(sha1,\n\t\t// arvadostest.ActiveToken, \"zmock\") = \"7fd3...\".\n\t\tc.Check(pr.Header.Get(\"Authorization\"), check.Equals, \"Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/7fd31b61f39c0e82a4155592163218272cedacdc\")\n\t}\n}\n\nfunc (s *FederationSuite) TestRemoteTokenNotSalted(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\t// remoteToken can be any v1 token that doesn't appear in\n\t// ztest's local db.\n\tremoteToken := \"abcdef00000000000000000000000000000000000000000000\"\n\n\tfor _, path := range []string{\n\t\t// During the transition to the strongly typed\n\t\t// controller implementation (#14287), workflows and\n\t\t// collections test different code paths.\n\t\t\"/arvados/v1/workflows/\" + strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, \"zzzzz-\", \"zmock-\", 1),\n\t\t\"/arvados/v1/collections/\" + strings.Replace(arvadostest.UserAgreementCollection, \"zzzzz-\", \"zmock-\", 1),\n\t} {\n\t\tc.Log(\"testing path \", path)\n\t\ts.remoteMockRequests = nil\n\t\treq := httptest.NewRequest(\"GET\", path, nil)\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+remoteToken)\n\t\ts.testRequest(req).Result()\n\t\tc.Assert(s.remoteMockRequests, check.HasLen, 1)\n\t\tpr := s.remoteMockRequests[0]\n\t\tc.Check(pr.Header.Get(\"Authorization\"), check.Equals, \"Bearer \"+remoteToken)\n\t}\n}\n\nfunc (s *FederationSuite) TestWorkflowCRUD(c *check.C) {\n\tvar wf arvados.Workflow\n\t{\n\t\treq := httptest.NewRequest(\"POST\", \"/arvados/v1/workflows\", strings.NewReader(url.Values{\n\t\t\t\"workflow\": {`{\"description\": \"TestCRUD\"}`},\n\t\t}.Encode()))\n\t\treq.Header.Set(\"Content-type\", \"application/x-www-form-urlencoded\")\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\t\trec := httptest.NewRecorder()\n\t\ts.remoteServer.Server.Handler.ServeHTTP(rec, req) // direct to remote -- can't proxy a create req because no uuid\n\t\tresp := rec.Result()\n\t\ts.checkResponseOK(c, resp)\n\t\tjson.NewDecoder(resp.Body).Decode(&wf)\n\n\t\tdefer func() {\n\t\t\treq := httptest.NewRequest(\"DELETE\", \"/arvados/v1/workflows/\"+wf.UUID, nil)\n\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\t\t\ts.remoteServer.Server.Handler.ServeHTTP(httptest.NewRecorder(), req)\n\t\t}()\n\t\tc.Check(wf.UUID, check.Not(check.Equals), \"\")\n\n\t\tc.Assert(wf.ModifiedAt, check.NotNil)\n\t\tc.Logf(\"wf.ModifiedAt: %v\", wf.ModifiedAt)\n\t\tc.Check(time.Since(*wf.ModifiedAt) < time.Minute, check.Equals, true)\n\t}\n\tfor _, method := range []string{\"PATCH\", \"PUT\", \"POST\"} {\n\t\tform := url.Values{\n\t\t\t\"workflow\": {`{\"description\": \"Updated with ` + method + `\"}`},\n\t\t}\n\t\tif method == \"POST\" {\n\t\t\tform[\"_method\"] = []string{\"PATCH\"}\n\t\t}\n\t\treq := httptest.NewRequest(method, \"/arvados/v1/workflows/\"+wf.UUID, strings.NewReader(form.Encode()))\n\t\treq.Header.Set(\"Content-type\", \"application/x-www-form-urlencoded\")\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\t\tresp := s.testRequest(req).Result()\n\t\ts.checkResponseOK(c, resp)\n\t\terr := json.NewDecoder(resp.Body).Decode(&wf)\n\t\tc.Check(err, check.IsNil)\n\n\t\tc.Check(wf.Description, check.Equals, \"Updated with \"+method)\n\t}\n\t{\n\t\treq := httptest.NewRequest(\"DELETE\", \"/arvados/v1/workflows/\"+wf.UUID, nil)\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\t\tresp := s.testRequest(req).Result()\n\t\ts.checkResponseOK(c, resp)\n\t\terr := json.NewDecoder(resp.Body).Decode(&wf)\n\t\tc.Check(err, check.IsNil)\n\t}\n\t{\n\t\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/workflows/\"+wf.UUID, nil)\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\t\tresp := s.testRequest(req).Result()\n\t\tc.Check(resp.StatusCode, check.Equals, http.StatusNotFound)\n\t}\n}\n\nfunc (s *FederationSuite) checkResponseOK(c *check.C, resp *http.Response) {\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tc.Logf(\"... response body = %q, %v\\n\", body, err)\n\t}\n}\n\nfunc (s *FederationSuite) checkJSONErrorMatches(c *check.C, resp *http.Response, re string) {\n\tvar jresp httpserver.ErrorResponse\n\terr := json.NewDecoder(resp.Body).Decode(&jresp)\n\tc.Check(err, check.IsNil)\n\tc.Assert(jresp.Errors, check.HasLen, 1)\n\tc.Check(jresp.Errors[0], check.Matches, re)\n}\n\nfunc (s *FederationSuite) localServiceHandler(c *check.C, h http.Handler) *httpserver.Server {\n\tsrv := &httpserver.Server{\n\t\tServer: http.Server{\n\t\t\tHandler: h,\n\t\t},\n\t}\n\tc.Assert(srv.Start(), check.IsNil)\n\tarvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, \"http://\"+srv.Addr)\n\treturn srv\n}\n\nfunc (s *FederationSuite) localServiceReturns404(c *check.C) *httpserver.Server {\n\treturn s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tif req.URL.Path == \"/arvados/v1/api_client_authorizations/current\" {\n\t\t\tif req.Header.Get(\"Authorization\") == \"Bearer \"+arvadostest.ActiveToken {\n\t\t\t\tjson.NewEncoder(w).Encode(arvados.APIClientAuthorization{UUID: arvadostest.ActiveTokenUUID, APIToken: arvadostest.ActiveToken, Scopes: []string{\"all\"}})\n\t\t\t} else {\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t}\n\t\t} else if req.URL.Path == \"/arvados/v1/users/current\" {\n\t\t\tif req.Header.Get(\"Authorization\") == \"Bearer \"+arvadostest.ActiveToken {\n\t\t\t\tjson.NewEncoder(w).Encode(arvados.User{UUID: arvadostest.ActiveUserUUID})\n\t\t\t} else {\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t}\n\t\t} else {\n\t\t\tw.WriteHeader(404)\n\t\t}\n\t}))\n}\n\nfunc (s *FederationSuite) TestGetLocalCollection(c *check.C) {\n\ts.testHandler.Cluster.ClusterID = \"zzzzz\"\n\tarvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, \"https://\"+os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\n\t// HTTP GET\n\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/collections/\"+arvadostest.UserAgreementCollection, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tvar col arvados.Collection\n\tc.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)\n\tc.Check(col.UUID, check.Equals, arvadostest.UserAgreementCollection)\n\tc.Check(col.ManifestText, check.Matches,\n\t\t`\\. 6a4ff0499484c6c79c95cd8c566bd25f\\+249025\\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf\n`)\n\n\t// HTTP POST with _method=GET as a form parameter\n\n\treq = httptest.NewRequest(\"POST\", \"/arvados/v1/collections/\"+arvadostest.UserAgreementCollection, bytes.NewBufferString((url.Values{\n\t\t\"_method\": {\"GET\"},\n\t}).Encode()))\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded; charset=UTF-8\")\n\tresp = s.testRequest(req).Result()\n\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tcol = arvados.Collection{}\n\tc.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)\n\tc.Check(col.UUID, check.Equals, arvadostest.UserAgreementCollection)\n\tc.Check(col.ManifestText, check.Matches,\n\t\t`\\. 6a4ff0499484c6c79c95cd8c566bd25f\\+249025\\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf\n`)\n}\n\nfunc (s *FederationSuite) TestGetRemoteCollection(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/collections/\"+arvadostest.UserAgreementCollection, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tvar col arvados.Collection\n\tc.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)\n\tc.Check(col.UUID, check.Equals, arvadostest.UserAgreementCollection)\n\tc.Check(col.ManifestText, check.Matches,\n\t\t`\\. 6a4ff0499484c6c79c95cd8c566bd25f\\+249025\\+Rzzzzz-[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf\n`)\n}\n\nfunc (s *FederationSuite) TestGetRemoteCollectionError(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/collections/zzzzz-4zz18-fakefakefakefak\", nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusNotFound)\n}\n\nfunc (s *FederationSuite) TestSignedLocatorPattern(c *check.C) {\n\t// Confirm the regular expression identifies other groups of hints correctly\n\tc.Check(keepclient.SignedLocatorRe.FindStringSubmatch(`6a4ff0499484c6c79c95cd8c566bd25f+249025+B1+C2+A05227438989d04712ea9ca1c91b556cef01d5cc7@5ba5405b+D3+E4`),\n\t\tcheck.DeepEquals,\n\t\t[]string{\"6a4ff0499484c6c79c95cd8c566bd25f+249025+B1+C2+A05227438989d04712ea9ca1c91b556cef01d5cc7@5ba5405b+D3+E4\",\n\t\t\t\"6a4ff0499484c6c79c95cd8c566bd25f\",\n\t\t\t\"+249025\",\n\t\t\t\"+B1+C2\", \"+C2\",\n\t\t\t\"+A05227438989d04712ea9ca1c91b556cef01d5cc7@5ba5405b\",\n\t\t\t\"05227438989d04712ea9ca1c91b556cef01d5cc7\", \"5ba5405b\",\n\t\t\t\"+D3+E4\", \"+E4\"})\n}\n\nfunc (s *FederationSuite) TestGetLocalCollectionByPDH(c *check.C) {\n\tarvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, \"https://\"+os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/collections/\"+arvadostest.UserAgreementPDH, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tvar col arvados.Collection\n\tc.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)\n\tc.Check(col.PortableDataHash, check.Equals, arvadostest.UserAgreementPDH)\n\tc.Check(col.ManifestText, check.Matches,\n\t\t`\\. 6a4ff0499484c6c79c95cd8c566bd25f\\+249025\\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf\n`)\n}\n\nfunc (s *FederationSuite) TestGetRemoteCollectionByPDH(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/collections/\"+arvadostest.UserAgreementPDH, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\n\tvar col arvados.Collection\n\tc.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)\n\tc.Check(col.PortableDataHash, check.Equals, arvadostest.UserAgreementPDH)\n\tc.Check(col.ManifestText, check.Matches,\n\t\t`\\. 6a4ff0499484c6c79c95cd8c566bd25f\\+249025\\+Rzzzzz-[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf\n`)\n}\n\nfunc (s *FederationSuite) TestGetCollectionByPDHError(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\n\t// zmock's normal response (200 with an empty body) would\n\t// change the outcome from 404 to 502\n\tdelete(s.testHandler.Cluster.RemoteClusters, \"zmock\")\n\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/collections/99999999999999999999999999999999+99\", nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\n\tresp := s.testRequest(req).Result()\n\tdefer resp.Body.Close()\n\n\tc.Check(resp.StatusCode, check.Equals, http.StatusNotFound)\n}\n\nfunc (s *FederationSuite) TestGetCollectionByPDHErrorBadHash(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\n\t// zmock's normal response (200 with an empty body) would\n\t// change the outcome\n\tdelete(s.testHandler.Cluster.RemoteClusters, \"zmock\")\n\n\tsrv2 := &httpserver.Server{\n\t\tServer: http.Server{\n\t\t\tHandler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\t\tw.WriteHeader(200)\n\t\t\t\t// Return a collection where the hash\n\t\t\t\t// of the manifest text doesn't match\n\t\t\t\t// PDH that was requested.\n\t\t\t\tvar col arvados.Collection\n\t\t\t\tcol.PortableDataHash = \"99999999999999999999999999999999+99\"\n\t\t\t\tcol.ManifestText = `. 6a4ff0499484c6c79c95cd8c566bd25f\\+249025 0:249025:GNU_General_Public_License,_version_3.pdf\n`\n\t\t\t\tenc := json.NewEncoder(w)\n\t\t\t\tenc.Encode(col)\n\t\t\t}),\n\t\t},\n\t}\n\n\tc.Assert(srv2.Start(), check.IsNil)\n\tdefer srv2.Close()\n\n\t// Direct zzzzz to service that returns a 200 result with a bogus manifest_text\n\ts.testHandler.Cluster.RemoteClusters[\"zzzzz\"] = arvados.RemoteCluster{\n\t\tHost:   srv2.Addr,\n\t\tProxy:  true,\n\t\tScheme: \"http\",\n\t}\n\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/collections/99999999999999999999999999999999+99\", nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\n\tresp := s.testRequest(req).Result()\n\tdefer resp.Body.Close()\n\n\tc.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)\n}\n\nfunc (s *FederationSuite) TestSaltedTokenGetCollectionByPDH(c *check.C) {\n\tarvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, \"https://\"+os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/collections/\"+arvadostest.UserAgreementPDH, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065\")\n\tresp := s.testRequest(req).Result()\n\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tvar col arvados.Collection\n\tc.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)\n\tc.Check(col.PortableDataHash, check.Equals, arvadostest.UserAgreementPDH)\n\tc.Check(col.ManifestText, check.Matches,\n\t\t`\\. 6a4ff0499484c6c79c95cd8c566bd25f\\+249025\\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf\n`)\n}\n\nfunc (s *FederationSuite) TestSaltedTokenGetCollectionByPDHError(c *check.C) {\n\tarvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, \"https://\"+os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\n\t// zmock's normal response (200 with an empty body) would\n\t// change the outcome\n\tdelete(s.testHandler.Cluster.RemoteClusters, \"zmock\")\n\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/collections/99999999999999999999999999999999+99\", nil)\n\treq.Header.Set(\"Authorization\", \"Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065\")\n\tresp := s.testRequest(req).Result()\n\n\tc.Check(resp.StatusCode, check.Equals, http.StatusNotFound)\n}\n\nfunc (s *FederationSuite) TestGetRemoteContainerRequest(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/container_requests/\"+arvadostest.QueuedContainerRequestUUID, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tvar cr arvados.ContainerRequest\n\tc.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)\n\tc.Check(cr.UUID, check.Equals, arvadostest.QueuedContainerRequestUUID)\n\tc.Check(cr.Priority, check.Equals, 1)\n}\n\nfunc (s *FederationSuite) TestUpdateRemoteContainerRequest(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\tsetPri := func(pri int) {\n\t\treq := httptest.NewRequest(\"PATCH\", \"/arvados/v1/container_requests/\"+arvadostest.QueuedContainerRequestUUID,\n\t\t\tstrings.NewReader(fmt.Sprintf(`{\"container_request\": {\"priority\": %d}}`, pri)))\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\t\treq.Header.Set(\"Content-type\", \"application/json\")\n\t\tresp := s.testRequest(req).Result()\n\t\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\t\tvar cr arvados.ContainerRequest\n\t\tc.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)\n\t\tc.Check(cr.UUID, check.Equals, arvadostest.QueuedContainerRequestUUID)\n\t\tc.Check(cr.Priority, check.Equals, pri)\n\t}\n\tsetPri(696)\n\tsetPri(1) // Reset fixture so side effect doesn't break other tests.\n}\n\nfunc (s *FederationSuite) TestCreateContainerRequestBadToken(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\t// pass cluster_id via query parameter, this allows arvados-controller\n\t// to avoid parsing the body\n\treq := httptest.NewRequest(\"POST\", \"/arvados/v1/container_requests?cluster_id=zzzzz\",\n\t\tstrings.NewReader(`{\"container_request\":{}}`))\n\treq.Header.Set(\"Authorization\", \"Bearer abcdefg\")\n\treq.Header.Set(\"Content-type\", \"application/json\")\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusForbidden)\n\tvar e map[string][]string\n\tc.Check(json.NewDecoder(resp.Body).Decode(&e), check.IsNil)\n\tc.Check(e[\"errors\"], check.DeepEquals, []string{\"invalid API token\"})\n}\n\nfunc (s *FederationSuite) TestCreateRemoteContainerRequest(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\t// pass cluster_id via query parameter, this allows arvados-controller\n\t// to avoid parsing the body\n\treq := httptest.NewRequest(\"POST\", \"/arvados/v1/container_requests?cluster_id=zzzzz\",\n\t\tstrings.NewReader(`{\n  \"container_request\": {\n    \"name\": \"hello world\",\n    \"state\": \"Uncommitted\",\n    \"output_path\": \"/tmp\",\n    \"container_image\": \"123\",\n    \"command\": [\"abc\"],\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\", \"capacity\": 1000000}}\n  }\n}\n`))\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\treq.Header.Set(\"Content-type\", \"application/json\")\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tvar cr arvados.ContainerRequest\n\tc.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)\n\tc.Check(cr.Name, check.Equals, \"hello world\")\n\tc.Check(strings.HasPrefix(cr.UUID, \"zzzzz-\"), check.Equals, true)\n}\n\n// getCRfromMockRequest returns a ContainerRequest with the content of the\n// request sent to the remote mock. This function takes into account the\n// Content-Type and acts accordingly.\nfunc (s *FederationSuite) getCRfromMockRequest(c *check.C) arvados.ContainerRequest {\n\n\t// Body can be a json formated or something like:\n\t//  cluster_id=zmock&container_request=%7B%22command%22%3A%5B%22abc%22%5D%2C%22container_image%22%3A%22123%22%2C%22...7D\n\t// or:\n\t//  \"{\\\"container_request\\\":{\\\"command\\\":[\\\"abc\\\"],\\\"container_image\\\":\\\"12...Uncommitted\\\"}}\"\n\n\tvar cr arvados.ContainerRequest\n\tdata, err := ioutil.ReadAll(s.remoteMockRequests[0].Body)\n\tc.Check(err, check.IsNil)\n\n\tif s.remoteMockRequests[0].Header.Get(\"Content-Type\") == \"application/json\" {\n\t\t// legacy code path sends a JSON request body\n\t\tvar answerCR struct {\n\t\t\tContainerRequest arvados.ContainerRequest `json:\"container_request\"`\n\t\t}\n\t\tc.Check(json.Unmarshal(data, &answerCR), check.IsNil)\n\t\tcr = answerCR.ContainerRequest\n\t} else if s.remoteMockRequests[0].Header.Get(\"Content-Type\") == \"application/x-www-form-urlencoded\" {\n\t\t// new code path sends a form-encoded request body with a JSON-encoded parameter value\n\t\tdecodedValue, err := url.ParseQuery(string(data))\n\t\tc.Check(err, check.IsNil)\n\t\tdecodedValueCR := decodedValue.Get(\"container_request\")\n\t\tc.Check(json.Unmarshal([]byte(decodedValueCR), &cr), check.IsNil)\n\t} else {\n\t\t// mock needs to have Content-Type that we can parse.\n\t\tc.Fail()\n\t}\n\n\treturn cr\n}\n\nfunc (s *FederationSuite) TestCreateRemoteContainerRequestCheckRuntimeToken(c *check.C) {\n\t// Send request to zmock and check that outgoing request has\n\t// runtime_token set with a new random v2 token.\n\n\tdefer s.localServiceReturns404(c).Close()\n\treq := httptest.NewRequest(\"POST\", \"/arvados/v1/container_requests?cluster_id=zmock\",\n\t\tstrings.NewReader(`{\n\t  \"container_request\": {\n\t    \"name\": \"hello world\",\n\t    \"state\": \"Uncommitted\",\n\t    \"output_path\": \"/\",\n\t    \"container_image\": \"123\",\n\t    \"command\": [\"abc\"],\n\t    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\", \"capacity\": 1000000}}\n\t  }\n\t}\n\t`))\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveTokenV2)\n\treq.Header.Set(\"Content-type\", \"application/json\")\n\n\t// We replace zhome with zzzzz values (RailsAPI, ClusterID, SystemRootToken)\n\t// SystemRoot token is needed because we check the\n\t// https://[RailsAPI]/arvados/v1/api_client_authorizations/current\n\t// https://[RailsAPI]/arvados/v1/users/current and\n\t// https://[RailsAPI]/auth/controller/callback\n\tarvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, \"https://\"+os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\ts.testHandler.Cluster.ClusterID = \"zzzzz\"\n\ts.testHandler.Cluster.SystemRootToken = arvadostest.SystemRootToken\n\ts.testHandler.Cluster.API.MaxTokenLifetime = arvados.Duration(time.Hour)\n\n\tresp := s.testRequest(req).Result()\n\tc.Assert(resp.StatusCode, check.Equals, http.StatusOK)\n\n\tcr := s.getCRfromMockRequest(c)\n\n\t// Runtime token must match zzzzz cluster\n\tc.Check(cr.RuntimeToken, check.Matches, \"v2/zzzzz-gj3su-.*\")\n\n\t// RuntimeToken must be different than the Original Token we originally did the request with.\n\tc.Check(cr.RuntimeToken, check.Not(check.Equals), arvadostest.ActiveTokenV2)\n\n\t// Runtime token should not have an expiration based on API.MaxTokenLifetime\n\treq2 := httptest.NewRequest(\"GET\", \"/arvados/v1/api_client_authorizations/current\", nil)\n\treq2.Header.Set(\"Authorization\", \"Bearer \"+cr.RuntimeToken)\n\treq2.Header.Set(\"Content-type\", \"application/json\")\n\tresp = s.testRequest(req2).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tvar aca arvados.APIClientAuthorization\n\tc.Check(json.NewDecoder(resp.Body).Decode(&aca), check.IsNil)\n\tc.Check(aca.ExpiresAt, check.NotNil) // Time.Now()+BlobSigningTTL\n\tt := aca.ExpiresAt\n\tc.Check(t.After(time.Now().Add(s.testHandler.Cluster.API.MaxTokenLifetime.Duration())), check.Equals, true)\n\tc.Check(t.Before(time.Now().Add(s.testHandler.Cluster.Collections.BlobSigningTTL.Duration())), check.Equals, true)\n}\n\nfunc (s *FederationSuite) TestCreateRemoteContainerRequestCheckSetRuntimeToken(c *check.C) {\n\t// Send request to zmock and check that outgoing request has\n\t// runtime_token set with the explicitly provided token.\n\n\tdefer s.localServiceReturns404(c).Close()\n\t// pass cluster_id via query parameter, this allows arvados-controller\n\t// to avoid parsing the body\n\treq := httptest.NewRequest(\"POST\", \"/arvados/v1/container_requests?cluster_id=zmock\",\n\t\tstrings.NewReader(`{\n\t  \"container_request\": {\n\t    \"name\": \"hello world\",\n\t    \"state\": \"Uncommitted\",\n\t    \"output_path\": \"/\",\n\t    \"container_image\": \"123\",\n\t    \"command\": [\"abc\"],\n\t    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\", \"capacity\": 1000000}},\n\t    \"runtime_token\": \"xyz\"\n\t  }\n\t}\n\t`))\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\treq.Header.Set(\"Content-type\", \"application/json\")\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\n\tcr := s.getCRfromMockRequest(c)\n\n\t// After mocking around now making sure the runtime_token we sent is still there.\n\tc.Check(cr.RuntimeToken, check.Equals, \"xyz\")\n}\n\nfunc (s *FederationSuite) TestCreateRemoteContainerRequestError(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\t// pass cluster_id via query parameter, this allows arvados-controller\n\t// to avoid parsing the body\n\treq := httptest.NewRequest(\"POST\", \"/arvados/v1/container_requests?cluster_id=zz404\",\n\t\tstrings.NewReader(`{\n  \"container_request\": {\n    \"name\": \"hello world\",\n    \"state\": \"Uncommitted\",\n    \"output_path\": \"/\",\n    \"container_image\": \"123\",\n    \"command\": [\"abc\"],\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\", \"capacity\": 1000000}}\n  }\n}\n`))\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\treq.Header.Set(\"Content-type\", \"application/json\")\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusNotFound)\n}\n\nfunc (s *FederationSuite) TestGetRemoteContainer(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/containers/\"+arvadostest.QueuedContainerUUID, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req)\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\tvar cn arvados.Container\n\tc.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)\n\tc.Check(cn.UUID, check.Equals, arvadostest.QueuedContainerUUID)\n}\n\nfunc (s *FederationSuite) TestListRemoteContainer(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/containers?count=none&filters=\"+\n\t\turl.QueryEscape(fmt.Sprintf(`[[\"uuid\", \"in\", [\"%v\"]]]`, arvadostest.QueuedContainerUUID)), nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tvar cn arvados.ContainerList\n\tc.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)\n\tc.Assert(cn.Items, check.HasLen, 1)\n\tc.Check(cn.Items[0].UUID, check.Equals, arvadostest.QueuedContainerUUID)\n}\n\nfunc (s *FederationSuite) TestListMultiRemoteContainers(c *check.C) {\n\tdefer s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tbd, _ := ioutil.ReadAll(req.Body)\n\t\tc.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%5D%5D%5D&select=%5B%22uuid%22%2C+%22command%22%5D`)\n\t\tw.WriteHeader(200)\n\t\tw.Write([]byte(`{\"kind\": \"arvados#containerList\", \"items\": [{\"uuid\": \"zhome-xvhdp-cr5queuedcontnr\", \"command\": [\"abc\"]}]}`))\n\t})).Close()\n\treq := httptest.NewRequest(\"GET\", fmt.Sprintf(\"/arvados/v1/containers?count=none&filters=%s&select=%s\",\n\t\turl.QueryEscape(fmt.Sprintf(`[[\"uuid\", \"in\", [\"%v\", \"zhome-xvhdp-cr5queuedcontnr\"]]]`,\n\t\t\tarvadostest.QueuedContainerUUID)),\n\t\turl.QueryEscape(`[\"uuid\", \"command\"]`)),\n\t\tnil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tvar cn arvados.ContainerList\n\tc.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)\n\tc.Check(cn.Items, check.HasLen, 2)\n\tmp := make(map[string]arvados.Container)\n\tfor _, cr := range cn.Items {\n\t\tmp[cr.UUID] = cr\n\t}\n\tc.Check(mp[arvadostest.QueuedContainerUUID].Command, check.DeepEquals, []string{\"echo\", \"hello\"})\n\tc.Check(mp[arvadostest.QueuedContainerUUID].ContainerImage, check.Equals, \"\")\n\tc.Check(mp[\"zhome-xvhdp-cr5queuedcontnr\"].Command, check.DeepEquals, []string{\"abc\"})\n\tc.Check(mp[\"zhome-xvhdp-cr5queuedcontnr\"].ContainerImage, check.Equals, \"\")\n}\n\nfunc (s *FederationSuite) TestListMultiRemoteContainerError(c *check.C) {\n\tdefer s.localServiceReturns404(c).Close()\n\treq := httptest.NewRequest(\"GET\", fmt.Sprintf(\"/arvados/v1/containers?count=none&filters=%s&select=%s\",\n\t\turl.QueryEscape(fmt.Sprintf(`[[\"uuid\", \"in\", [\"%v\", \"zhome-xvhdp-cr5queuedcontnr\"]]]`,\n\t\t\tarvadostest.QueuedContainerUUID)),\n\t\turl.QueryEscape(`[\"uuid\", \"command\"]`)),\n\t\tnil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)\n\ts.checkJSONErrorMatches(c, resp, `error fetching from zhome \\(404 Not Found\\): EOF`)\n}\n\nfunc (s *FederationSuite) TestListMultiRemoteContainersPaged(c *check.C) {\n\n\tcallCount := 0\n\tdefer s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tbd, _ := ioutil.ReadAll(req.Body)\n\t\tif callCount == 0 {\n\t\t\tc.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%2C%22zhome-xvhdp-cr6queuedcontnr%22%5D%5D%5D`)\n\t\t\tw.WriteHeader(200)\n\t\t\tw.Write([]byte(`{\"kind\": \"arvados#containerList\", \"items\": [{\"uuid\": \"zhome-xvhdp-cr5queuedcontnr\", \"command\": [\"abc\"]}]}`))\n\t\t} else if callCount == 1 {\n\t\t\tc.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr6queuedcontnr%22%5D%5D%5D`)\n\t\t\tw.WriteHeader(200)\n\t\t\tw.Write([]byte(`{\"kind\": \"arvados#containerList\", \"items\": [{\"uuid\": \"zhome-xvhdp-cr6queuedcontnr\", \"command\": [\"efg\"]}]}`))\n\t\t}\n\t\tcallCount++\n\t})).Close()\n\treq := httptest.NewRequest(\"GET\", fmt.Sprintf(\"/arvados/v1/containers?count=none&filters=%s\",\n\t\turl.QueryEscape(fmt.Sprintf(`[[\"uuid\", \"in\", [\"%v\", \"zhome-xvhdp-cr5queuedcontnr\", \"zhome-xvhdp-cr6queuedcontnr\"]]]`,\n\t\t\tarvadostest.QueuedContainerUUID))),\n\t\tnil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tc.Check(callCount, check.Equals, 2)\n\tvar cn arvados.ContainerList\n\tc.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)\n\tc.Check(cn.Items, check.HasLen, 3)\n\tmp := make(map[string]arvados.Container)\n\tfor _, cr := range cn.Items {\n\t\tmp[cr.UUID] = cr\n\t}\n\tc.Check(mp[arvadostest.QueuedContainerUUID].Command, check.DeepEquals, []string{\"echo\", \"hello\"})\n\tc.Check(mp[\"zhome-xvhdp-cr5queuedcontnr\"].Command, check.DeepEquals, []string{\"abc\"})\n\tc.Check(mp[\"zhome-xvhdp-cr6queuedcontnr\"].Command, check.DeepEquals, []string{\"efg\"})\n}\n\nfunc (s *FederationSuite) TestListMultiRemoteContainersMissing(c *check.C) {\n\n\tcallCount := 0\n\tdefer s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tbd, _ := ioutil.ReadAll(req.Body)\n\t\tif callCount == 0 {\n\t\t\tc.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%2C%22zhome-xvhdp-cr6queuedcontnr%22%5D%5D%5D`)\n\t\t\tw.WriteHeader(200)\n\t\t\tw.Write([]byte(`{\"kind\": \"arvados#containerList\", \"items\": [{\"uuid\": \"zhome-xvhdp-cr6queuedcontnr\", \"command\": [\"efg\"]}]}`))\n\t\t} else if callCount == 1 {\n\t\t\tc.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%5D%5D%5D`)\n\t\t\tw.WriteHeader(200)\n\t\t\tw.Write([]byte(`{\"kind\": \"arvados#containerList\", \"items\": []}`))\n\t\t}\n\t\tcallCount++\n\t})).Close()\n\treq := httptest.NewRequest(\"GET\", fmt.Sprintf(\"/arvados/v1/containers?count=none&filters=%s\",\n\t\turl.QueryEscape(fmt.Sprintf(`[[\"uuid\", \"in\", [\"%v\", \"zhome-xvhdp-cr5queuedcontnr\", \"zhome-xvhdp-cr6queuedcontnr\"]]]`,\n\t\t\tarvadostest.QueuedContainerUUID))),\n\t\tnil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tc.Check(callCount, check.Equals, 2)\n\tvar cn arvados.ContainerList\n\tc.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)\n\tc.Check(cn.Items, check.HasLen, 2)\n\tmp := make(map[string]arvados.Container)\n\tfor _, cr := range cn.Items {\n\t\tmp[cr.UUID] = cr\n\t}\n\tc.Check(mp[arvadostest.QueuedContainerUUID].Command, check.DeepEquals, []string{\"echo\", \"hello\"})\n\tc.Check(mp[\"zhome-xvhdp-cr6queuedcontnr\"].Command, check.DeepEquals, []string{\"efg\"})\n}\n\nfunc (s *FederationSuite) TestListMultiRemoteContainerPageSizeError(c *check.C) {\n\ts.testHandler.Cluster.API.MaxItemsPerResponse = 1\n\treq := httptest.NewRequest(\"GET\", fmt.Sprintf(\"/arvados/v1/containers?count=none&filters=%s\",\n\t\turl.QueryEscape(fmt.Sprintf(`[[\"uuid\", \"in\", [\"%v\", \"zhome-xvhdp-cr5queuedcontnr\"]]]`,\n\t\t\tarvadostest.QueuedContainerUUID))),\n\t\tnil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)\n\ts.checkJSONErrorMatches(c, resp, `Federated multi-object request for 2 objects which is more than max page size 1.`)\n}\n\nfunc (s *FederationSuite) TestListMultiRemoteContainerLimitError(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", fmt.Sprintf(\"/arvados/v1/containers?count=none&filters=%s&limit=1\",\n\t\turl.QueryEscape(fmt.Sprintf(`[[\"uuid\", \"in\", [\"%v\", \"zhome-xvhdp-cr5queuedcontnr\"]]]`,\n\t\t\tarvadostest.QueuedContainerUUID))),\n\t\tnil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)\n\ts.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)\n}\n\nfunc (s *FederationSuite) TestListMultiRemoteContainerOffsetError(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", fmt.Sprintf(\"/arvados/v1/containers?count=none&filters=%s&offset=1\",\n\t\turl.QueryEscape(fmt.Sprintf(`[[\"uuid\", \"in\", [\"%v\", \"zhome-xvhdp-cr5queuedcontnr\"]]]`,\n\t\t\tarvadostest.QueuedContainerUUID))),\n\t\tnil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)\n\ts.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)\n}\n\nfunc (s *FederationSuite) TestListMultiRemoteContainerOrderError(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", fmt.Sprintf(\"/arvados/v1/containers?count=none&filters=%s&order=uuid\",\n\t\turl.QueryEscape(fmt.Sprintf(`[[\"uuid\", \"in\", [\"%v\", \"zhome-xvhdp-cr5queuedcontnr\"]]]`,\n\t\t\tarvadostest.QueuedContainerUUID))),\n\t\tnil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)\n\ts.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)\n}\n\nfunc (s *FederationSuite) TestListMultiRemoteContainerSelectError(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", fmt.Sprintf(\"/arvados/v1/containers?count=none&filters=%s&select=%s\",\n\t\turl.QueryEscape(fmt.Sprintf(`[[\"uuid\", \"in\", [\"%v\", \"zhome-xvhdp-cr5queuedcontnr\"]]]`,\n\t\t\tarvadostest.QueuedContainerUUID)),\n\t\turl.QueryEscape(`[\"command\"]`)),\n\t\tnil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := s.testRequest(req).Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)\n\ts.checkJSONErrorMatches(c, resp, `Federated multi-object request must include 'uuid' in 'select'`)\n}\n"
  },
  {
    "path": "lib/controller/handler.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"mime\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/api\"\n\t\"git.arvados.org/arvados.git/lib/controller/federation\"\n\t\"git.arvados.org/arvados.git/lib/controller/localdb\"\n\t\"git.arvados.org/arvados.git/lib/controller/railsproxy\"\n\t\"git.arvados.org/arvados.git/lib/controller/router\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/health\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\n\t// sqlx needs lib/pq to talk to PostgreSQL\n\t_ \"github.com/lib/pq\"\n)\n\ntype Handler struct {\n\tCluster           *arvados.Cluster\n\tBackgroundContext context.Context\n\n\tsetupOnce      sync.Once\n\tfederation     *federation.Conn\n\thandlerStack   http.Handler\n\trouter         http.Handler\n\tproxy          *proxy\n\tsecureClient   *http.Client\n\tinsecureClient *http.Client\n\tdbConnector    ctrlctx.DBConnector\n\n\tcache map[string]*cacheEnt\n}\n\nfunc (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\th.setupOnce.Do(h.setup)\n\tif req.Method != \"GET\" && req.Method != \"HEAD\" {\n\t\t// http.ServeMux returns 301 with a cleaned path if\n\t\t// the incoming request has a double slash. Some\n\t\t// clients (including the Go standard library) change\n\t\t// the request method to GET when following a 301\n\t\t// redirect if the original method was not HEAD (RFC\n\t\t// 7231 6.4.2 specifically allows this in the case of\n\t\t// POST). Thus \"POST //foo\" gets misdirected to \"GET\n\t\t// /foo\". To avoid this, eliminate double slashes\n\t\t// before passing the request to ServeMux.\n\t\tfor strings.Contains(req.URL.Path, \"//\") {\n\t\t\treq.URL.Path = strings.Replace(req.URL.Path, \"//\", \"/\", -1)\n\t\t}\n\t}\n\tif len(req.Host) > 28 && arvadosclient.UUIDMatch(req.Host[:27]) && req.Host[27] == '-' {\n\t\t// Requests to a vhost like\n\t\t// \"{ctr-uuid}-{port}.example.com\" go straight to\n\t\t// controller-specific routing, bypassing\n\t\t// handlerStack's logic about proxying\n\t\t// non-controller-specific paths through to RailsAPI.\n\t\th.router.ServeHTTP(w, req)\n\t\treturn\n\t}\n\th.handlerStack.ServeHTTP(w, req)\n}\n\nfunc (h *Handler) CheckHealth() error {\n\th.setupOnce.Do(h.setup)\n\t_, err := h.dbConnector.GetDB(context.TODO())\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, _, err = railsproxy.FindRailsAPI(h.Cluster)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif h.Cluster.API.VocabularyPath != \"\" {\n\t\treq, err := http.NewRequest(\"GET\", \"/arvados/v1/vocabulary\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar resp httptest.ResponseRecorder\n\t\th.handlerStack.ServeHTTP(&resp, req)\n\t\tif resp.Result().StatusCode != http.StatusOK {\n\t\t\treturn fmt.Errorf(\"%d %s\", resp.Result().StatusCode, resp.Result().Status)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (h *Handler) Done() <-chan struct{} {\n\treturn nil\n}\n\nfunc neverRedirect(*http.Request, []*http.Request) error { return http.ErrUseLastResponse }\n\nfunc (h *Handler) setup() {\n\tmux := http.NewServeMux()\n\thealthFuncs := make(map[string]health.Func)\n\n\th.dbConnector = ctrlctx.DBConnector{PostgreSQL: h.Cluster.PostgreSQL}\n\tgo func() {\n\t\t<-h.BackgroundContext.Done()\n\t\th.dbConnector.Close()\n\t}()\n\toidcAuthorizer := localdb.OIDCAccessTokenAuthorizer(h.Cluster, h.dbConnector.GetDB)\n\th.federation = federation.New(h.BackgroundContext, h.Cluster, &healthFuncs, h.dbConnector.GetDB)\n\th.router = router.New(h.federation, router.Config{\n\t\tContainerWebServices: &h.Cluster.Services.ContainerWebServices,\n\t\tMaxRequestSize:       h.Cluster.API.MaxRequestSize,\n\t\tWrapCalls: api.ComposeWrappers(\n\t\t\tctrlctx.WrapCallsInTransactions(h.dbConnector.GetDB),\n\t\t\toidcAuthorizer.WrapCalls,\n\t\t\tctrlctx.WrapCallsWithAuth(h.Cluster)),\n\t})\n\n\thealthRoutes := health.Routes{\"ping\": func() error { _, err := h.dbConnector.GetDB(context.TODO()); return err }}\n\tfor name, f := range healthFuncs {\n\t\thealthRoutes[name] = f\n\t}\n\tmux.Handle(\"/_health/\", &health.Handler{\n\t\tToken:  h.Cluster.ManagementToken,\n\t\tPrefix: \"/_health/\",\n\t\tRoutes: healthRoutes,\n\t})\n\tmux.Handle(\"/arvados/v1/config\", h.router)\n\tmux.Handle(\"/arvados/v1/vocabulary\", h.router)\n\tmux.Handle(\"/\"+arvados.EndpointUserAuthenticate.Path, h.router) // must come before .../users/\n\tmux.Handle(\"/arvados/v1/collections\", h.router)\n\tmux.Handle(\"/arvados/v1/collections/\", h.router)\n\tmux.Handle(\"/arvados/v1/users\", h.router)\n\tmux.Handle(\"/arvados/v1/users/\", h.router)\n\tmux.Handle(\"/arvados/v1/connect/\", h.router)\n\tmux.Handle(\"/arvados/v1/container_requests\", h.router)\n\tmux.Handle(\"/arvados/v1/container_requests/\", h.router)\n\tmux.Handle(\"/arvados/v1/groups\", h.router)\n\tmux.Handle(\"/arvados/v1/groups/\", h.router)\n\tmux.Handle(\"/arvados/v1/links\", h.router)\n\tmux.Handle(\"/arvados/v1/links/\", h.router)\n\tmux.Handle(\"/arvados/v1/authorized_keys\", h.router)\n\tmux.Handle(\"/arvados/v1/authorized_keys/\", h.router)\n\tmux.Handle(\"/login\", h.router)\n\tmux.Handle(\"/logout\", h.router)\n\tmux.Handle(\"/arvados/v1/api_client_authorizations\", h.router)\n\tmux.Handle(\"/arvados/v1/api_client_authorizations/\", h.router)\n\n\ths := http.NotFoundHandler()\n\ths = prepend(hs, h.proxyRailsAPI)\n\ths = prepend(hs, h.routeContainerEndpoints(h.router))\n\ths = prepend(hs, h.routeServiceContainerPorts(h.router))\n\ths = h.setupProxyRemoteCluster(hs)\n\ths = prepend(hs, oidcAuthorizer.Middleware)\n\tmux.Handle(\"/\", hs)\n\th.handlerStack = mux\n\n\tsc := *arvados.DefaultSecureClient\n\tsc.CheckRedirect = neverRedirect\n\th.secureClient = &sc\n\n\tic := *arvados.InsecureHTTPClient\n\tic.CheckRedirect = neverRedirect\n\th.insecureClient = &ic\n\n\th.proxy = &proxy{\n\t\tName: \"arvados-controller\",\n\t}\n\th.cache = map[string]*cacheEnt{\n\t\t\"/discovery/v1/apis/arvados/v1/rest\": &cacheEnt{validate: validateDiscoveryDoc},\n\t}\n\n\tgo h.trashSweepWorker()\n\tgo h.containerLogSweepWorker()\n}\n\ntype middlewareFunc func(http.ResponseWriter, *http.Request, http.Handler)\n\nfunc prepend(next http.Handler, middleware middlewareFunc) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tmiddleware(w, req, next)\n\t})\n}\n\nfunc (h *Handler) localClusterRequest(req *http.Request) (*http.Response, error) {\n\turlOut, insecure, err := railsproxy.FindRailsAPI(h.Cluster)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\turlOut = &url.URL{\n\t\tScheme:   urlOut.Scheme,\n\t\tHost:     urlOut.Host,\n\t\tPath:     req.URL.Path,\n\t\tRawPath:  req.URL.RawPath,\n\t\tRawQuery: req.URL.RawQuery,\n\t}\n\tclient := h.secureClient\n\tif insecure {\n\t\tclient = h.insecureClient\n\t}\n\t// Clearing the Host field here causes the Go http client to\n\t// use the host part of urlOut as the Host header in the\n\t// outgoing request, instead of the Host value from the\n\t// original request we received.\n\treq.Host = \"\"\n\treturn h.proxy.Do(req, urlOut, client)\n}\n\n// Route /arvados/v1/containers/{uuid}/log*, .../ssh, and\n// .../gateway_tunnel to rtr, pass everything else to next.\n//\n// (http.ServeMux doesn't let us route these without also routing\n// everything under /containers/, which we don't want yet.)\nfunc (h *Handler) routeContainerEndpoints(rtr http.Handler) middlewareFunc {\n\treturn func(w http.ResponseWriter, req *http.Request, next http.Handler) {\n\t\ttrim := strings.TrimPrefix(req.URL.Path, \"/arvados/v1/containers/\")\n\t\tif trim != req.URL.Path && (strings.Index(trim, \"/log\") == 27 ||\n\t\t\tstrings.Index(trim, \"/ssh\") == 27 ||\n\t\t\tstrings.Index(trim, \"/gateway_tunnel\") == 27) {\n\t\t\trtr.ServeHTTP(w, req)\n\t\t} else {\n\t\t\tnext.ServeHTTP(w, req)\n\t\t}\n\t}\n}\n\n// Route ContainerWebServices requests through rtr, pass through\n// everything else.\nfunc (h *Handler) routeServiceContainerPorts(rtr http.Handler) middlewareFunc {\n\treturn func(w http.ResponseWriter, req *http.Request, next http.Handler) {\n\t\tif router.ContainerHTTPProxyTarget(&h.Cluster.Services.ContainerWebServices, req) != \"\" {\n\t\t\trtr.ServeHTTP(w, req)\n\t\t} else {\n\t\t\tnext.ServeHTTP(w, req)\n\t\t}\n\t}\n}\n\n// cacheEnt implements a basic stale-while-revalidate cache, suitable\n// for the Arvados discovery document.\ntype cacheEnt struct {\n\tvalidate     func(body []byte) error\n\tmtx          sync.Mutex\n\theader       http.Header\n\tbody         []byte\n\texpireAfter  time.Time\n\trefreshAfter time.Time\n\trefreshLock  sync.Mutex\n}\n\nconst (\n\tcacheTTL    = 5 * time.Minute\n\tcacheExpire = 24 * time.Hour\n)\n\nfunc (ent *cacheEnt) refresh(path string, do func(*http.Request) (*http.Response, error)) (http.Header, []byte, error) {\n\tent.refreshLock.Lock()\n\tdefer ent.refreshLock.Unlock()\n\tif header, body, needRefresh := ent.response(); !needRefresh {\n\t\t// another goroutine refreshed successfully while we\n\t\t// were waiting for refreshLock\n\t\treturn header, body, nil\n\t} else if body != nil {\n\t\t// Cache is present, but expired. We'll try to refresh\n\t\t// below. Meanwhile, other refresh() calls will queue\n\t\t// up for refreshLock -- and we don't want them to\n\t\t// turn into N upstream requests, even if upstream is\n\t\t// failing.  (If we succeed we'll update the expiry\n\t\t// time again below with the real cacheTTL -- this\n\t\t// just takes care of the error case.)\n\t\tent.mtx.Lock()\n\t\tent.refreshAfter = time.Now().Add(time.Second)\n\t\tent.mtx.Unlock()\n\t}\n\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))\n\tdefer cancel()\n\t// \"http://localhost\" is just a placeholder here -- we'll fill\n\t// in req.URL.Path below, and then do(), which is\n\t// localClusterRequest(), will replace the scheme and host\n\t// parts with the real proxy destination.\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, \"http://localhost\", nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq.URL.Path = path\n\tresp, err := do(req)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, nil, fmt.Errorf(\"HTTP status %d\", resp.StatusCode)\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Read error: %w\", err)\n\t}\n\theader := http.Header{}\n\tfor k, v := range resp.Header {\n\t\tif !dropHeaders[k] && k != \"X-Request-Id\" {\n\t\t\theader[k] = v\n\t\t}\n\t}\n\tif ent.validate != nil {\n\t\tif err := ent.validate(body); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t} else if mediatype, _, err := mime.ParseMediaType(header.Get(\"Content-Type\")); err == nil && mediatype == \"application/json\" {\n\t\tif !json.Valid(body) {\n\t\t\treturn nil, nil, errors.New(\"invalid JSON encoding in response\")\n\t\t}\n\t}\n\tent.mtx.Lock()\n\tdefer ent.mtx.Unlock()\n\tent.header = header\n\tent.body = body\n\tent.refreshAfter = time.Now().Add(cacheTTL)\n\tent.expireAfter = time.Now().Add(cacheExpire)\n\treturn ent.header, ent.body, nil\n}\n\nfunc (ent *cacheEnt) response() (http.Header, []byte, bool) {\n\tent.mtx.Lock()\n\tdefer ent.mtx.Unlock()\n\tif ent.expireAfter.Before(time.Now()) {\n\t\tent.header, ent.body, ent.refreshAfter = nil, nil, time.Time{}\n\t}\n\treturn ent.header, ent.body, ent.refreshAfter.Before(time.Now())\n}\n\nfunc (ent *cacheEnt) ServeHTTP(ctx context.Context, w http.ResponseWriter, path string, do func(*http.Request) (*http.Response, error)) {\n\theader, body, needRefresh := ent.response()\n\tif body == nil {\n\t\t// need to fetch before we can return anything\n\t\tvar err error\n\t\theader, body, err = ent.refresh(path, do)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusBadGateway)\n\t\t\treturn\n\t\t}\n\t} else if needRefresh {\n\t\t// re-fetch in background\n\t\tgo func() {\n\t\t\t_, _, err := ent.refresh(path, do)\n\t\t\tif err != nil {\n\t\t\t\tctxlog.FromContext(ctx).WithError(err).WithField(\"path\", path).Warn(\"error refreshing cache\")\n\t\t\t}\n\t\t}()\n\t}\n\tfor k, v := range header {\n\t\tw.Header()[k] = v\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(body)\n}\n\nfunc (h *Handler) proxyRailsAPI(w http.ResponseWriter, req *http.Request, next http.Handler) {\n\tif ent, ok := h.cache[req.URL.Path]; ok && req.Method == http.MethodGet {\n\t\tent.ServeHTTP(req.Context(), w, req.URL.Path, h.localClusterRequest)\n\t\treturn\n\t}\n\tresp, err := h.localClusterRequest(req)\n\tn, err := h.proxy.ForwardResponse(w, resp, err)\n\tif err != nil {\n\t\thttpserver.Logger(req).WithError(err).WithField(\"bytesCopied\", n).Error(\"error copying response body\")\n\t}\n}\n\n// Use a localhost entry from Services.RailsAPI.InternalURLs if one is\n// present, otherwise choose an arbitrary entry.\nfunc findRailsAPI(cluster *arvados.Cluster) (*url.URL, bool, error) {\n\tvar best *url.URL\n\tfor target := range cluster.Services.RailsAPI.InternalURLs {\n\t\ttarget := url.URL(target)\n\t\tbest = &target\n\t\tif strings.HasPrefix(target.Host, \"localhost:\") || strings.HasPrefix(target.Host, \"127.0.0.1:\") || strings.HasPrefix(target.Host, \"[::1]:\") {\n\t\t\tbreak\n\t\t}\n\t}\n\tif best == nil {\n\t\treturn nil, false, fmt.Errorf(\"Services.RailsAPI.InternalURLs is empty\")\n\t}\n\treturn best, cluster.TLS.Insecure, nil\n}\n\nfunc validateDiscoveryDoc(body []byte) error {\n\tvar dd arvados.DiscoveryDocument\n\terr := json.Unmarshal(body, &dd)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding JSON response: %w\", err)\n\t}\n\tif dd.BasePath == \"\" {\n\t\treturn errors.New(\"error in discovery document: no value for basePath\")\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "lib/controller/handler_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/dblock\"\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&HandlerSuite{})\n\ntype HandlerSuite struct {\n\tcluster  *arvados.Cluster\n\thandler  *Handler\n\trailsSpy *arvadostest.Proxy\n\tlogbuf   *bytes.Buffer\n\tctx      context.Context\n\tcancel   context.CancelFunc\n}\n\nfunc (s *HandlerSuite) SetUpTest(c *check.C) {\n\ts.logbuf = &bytes.Buffer{}\n\ts.ctx, s.cancel = context.WithCancel(context.Background())\n\ts.ctx = ctxlog.Context(s.ctx, ctxlog.New(io.MultiWriter(os.Stderr, s.logbuf), \"json\", \"debug\"))\n\ts.cluster = &arvados.Cluster{\n\t\tClusterID:  \"zzzzz\",\n\t\tPostgreSQL: integrationTestCluster().PostgreSQL,\n\t}\n\ts.cluster.API.RequestTimeout = arvados.Duration(5 * time.Minute)\n\ts.cluster.TLS.Insecure = true\n\tarvadostest.SetServiceURL(&s.cluster.Services.RailsAPI, \"https://\"+os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\ts.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)\n\tarvadostest.SetServiceURL(&s.cluster.Services.RailsAPI, s.railsSpy.URL.String())\n\tarvadostest.SetServiceURL(&s.cluster.Services.Controller, \"http://localhost:/\")\n\ts.handler = newHandler(s.ctx, s.cluster, \"\", prometheus.NewRegistry()).(*Handler)\n}\n\nfunc (s *HandlerSuite) TearDownTest(c *check.C) {\n\ts.cancel()\n\n\t// Wait for dblocks to be released. Otherwise, a subsequent\n\t// test might time out waiting to acquire them.\n\ttimeout := time.After(10 * time.Second)\n\tfor _, locker := range []*dblock.DBLocker{dblock.TrashSweep, dblock.ContainerLogSweep} {\n\t\tok := make(chan struct{})\n\t\tgo func() {\n\t\t\tif locker.Lock(context.Background(), s.handler.dbConnector.GetDB) {\n\t\t\t\tlocker.Unlock()\n\t\t\t}\n\t\t\tclose(ok)\n\t\t}()\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\tc.Log(\"timed out waiting for dblocks\")\n\t\t\tc.Fail()\n\t\tcase <-ok:\n\t\t}\n\t}\n}\n\nfunc (s *HandlerSuite) TestConfigExport(c *check.C) {\n\ts.cluster.ManagementToken = \"secret\"\n\ts.cluster.SystemRootToken = \"secret\"\n\ts.cluster.Collections.BlobSigning = true\n\ts.cluster.Collections.BlobSigningTTL = arvados.Duration(23 * time.Second)\n\tfor _, method := range []string{\"GET\", \"OPTIONS\"} {\n\t\treq := httptest.NewRequest(method, \"/arvados/v1/config\", nil)\n\t\tresp := httptest.NewRecorder()\n\t\ts.handler.ServeHTTP(resp, req)\n\t\tc.Log(resp.Body.String())\n\t\tif !c.Check(resp.Code, check.Equals, http.StatusOK) {\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(resp.Header().Get(\"Access-Control-Allow-Origin\"), check.Equals, `*`)\n\t\tc.Check(resp.Header().Get(\"Access-Control-Allow-Methods\"), check.Matches, `.*\\bGET\\b.*`)\n\t\tc.Check(resp.Header().Get(\"Access-Control-Allow-Headers\"), check.Matches, `.+`)\n\t\tif method == \"OPTIONS\" {\n\t\t\tc.Check(resp.Body.String(), check.HasLen, 0)\n\t\t\tcontinue\n\t\t}\n\t\tvar cluster arvados.Cluster\n\t\terr := json.Unmarshal(resp.Body.Bytes(), &cluster)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(cluster.ManagementToken, check.Equals, \"\")\n\t\tc.Check(cluster.SystemRootToken, check.Equals, \"\")\n\t\tc.Check(cluster.Collections.BlobSigning, check.Equals, true)\n\t\tc.Check(cluster.Collections.BlobSigningTTL, check.Equals, arvados.Duration(23*time.Second))\n\t}\n}\n\nfunc (s *HandlerSuite) TestDiscoveryDocCache(c *check.C) {\n\tcountRailsReqs := func() int {\n\t\ts.railsSpy.Wait()\n\t\tn := 0\n\t\tfor _, req := range s.railsSpy.RequestDumps {\n\t\t\tif bytes.Contains(req, []byte(\"/discovery/v1/apis/arvados/v1/rest\")) {\n\t\t\t\tn++\n\t\t\t}\n\t\t}\n\t\treturn n\n\t}\n\tgetDD := func() int {\n\t\treq := httptest.NewRequest(http.MethodGet, \"/discovery/v1/apis/arvados/v1/rest\", nil)\n\t\tresp := httptest.NewRecorder()\n\t\ts.handler.ServeHTTP(resp, req)\n\t\tif resp.Code == http.StatusOK {\n\t\t\tvar dd arvados.DiscoveryDocument\n\t\t\terr := json.Unmarshal(resp.Body.Bytes(), &dd)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tc.Check(dd.Schemas[\"Collection\"].UUIDPrefix, check.Equals, \"4zz18\")\n\t\t}\n\t\treturn resp.Code\n\t}\n\tgetDDConcurrently := func(n int, expectCode int, checkArgs ...interface{}) *sync.WaitGroup {\n\t\tvar wg sync.WaitGroup\n\t\tfor i := 0; i < n; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tc.Check(getDD(), check.Equals, append([]interface{}{expectCode}, checkArgs...)...)\n\t\t\t}()\n\t\t}\n\t\treturn &wg\n\t}\n\tclearCache := func() {\n\t\tfor _, ent := range s.handler.cache {\n\t\t\tent.refreshLock.Lock()\n\t\t\tent.mtx.Lock()\n\t\t\tent.body, ent.header, ent.refreshAfter = nil, nil, time.Time{}\n\t\t\tent.mtx.Unlock()\n\t\t\tent.refreshLock.Unlock()\n\t\t}\n\t}\n\twaitPendingUpdates := func() {\n\t\tfor _, ent := range s.handler.cache {\n\t\t\tent.refreshLock.Lock()\n\t\t\tdefer ent.refreshLock.Unlock()\n\t\t\tent.mtx.Lock()\n\t\t\tdefer ent.mtx.Unlock()\n\t\t}\n\t}\n\trefreshNow := func() {\n\t\twaitPendingUpdates()\n\t\tfor _, ent := range s.handler.cache {\n\t\t\tent.refreshAfter = time.Now()\n\t\t}\n\t}\n\texpireNow := func() {\n\t\twaitPendingUpdates()\n\t\tfor _, ent := range s.handler.cache {\n\t\t\tent.expireAfter = time.Now()\n\t\t}\n\t}\n\n\t// Easy path: first req fetches, subsequent reqs use cache.\n\tc.Check(countRailsReqs(), check.Equals, 0)\n\tc.Check(getDD(), check.Equals, http.StatusOK)\n\tc.Check(countRailsReqs(), check.Equals, 1)\n\tc.Check(getDD(), check.Equals, http.StatusOK)\n\tc.Check(countRailsReqs(), check.Equals, 1)\n\tc.Check(getDD(), check.Equals, http.StatusOK)\n\tc.Check(countRailsReqs(), check.Equals, 1)\n\n\t// To guarantee we have concurrent requests, we set up\n\t// railsSpy to hold up the Handler's outgoing requests until\n\t// we send to (or close) holdReqs.\n\tholdReqs := make(chan struct{})\n\ts.railsSpy.Director = func(*http.Request) {\n\t\t<-holdReqs\n\t}\n\n\t// Race at startup: first req fetches, other concurrent reqs\n\t// wait for the initial fetch to complete, then all return.\n\tclearCache()\n\treqsBefore := countRailsReqs()\n\twg := getDDConcurrently(5, http.StatusOK, check.Commentf(\"race at startup\"))\n\tclose(holdReqs)\n\twg.Wait()\n\tc.Check(countRailsReqs(), check.Equals, reqsBefore+1)\n\n\t// Race after expiry: concurrent reqs return the cached data\n\t// but initiate a new fetch in the background.\n\trefreshNow()\n\tholdReqs = make(chan struct{})\n\twg = getDDConcurrently(5, http.StatusOK, check.Commentf(\"race after expiry\"))\n\treqsBefore = countRailsReqs()\n\tclose(holdReqs)\n\twg.Wait()\n\tfor deadline := time.Now().Add(time.Second); time.Now().Before(deadline) && countRailsReqs() < reqsBefore+1; {\n\t\ttime.Sleep(time.Second / 100)\n\t}\n\tc.Check(countRailsReqs(), check.Equals, reqsBefore+1)\n\n\t// Configure railsSpy to return an error or bad content\n\t// depending on flags.\n\tvar wantError, wantBadContent bool\n\ts.railsSpy.Director = func(req *http.Request) {\n\t\t<-holdReqs\n\t\tif wantError {\n\t\t\t// The Passenger server hosting RailsAPI will drop HTTP requests\n\t\t\t// unrecognized names. Make a request with a real method that\n\t\t\t// RailsAPI doesn't implement.\n\t\t\treq.Method = \"TRACE\"\n\t\t} else if wantBadContent {\n\t\t\treq.URL.Path = \"/_health/ping\"\n\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ManagementToken)\n\t\t}\n\t}\n\n\t// Error at startup (empty cache) => caller gets error, and we\n\t// make an upstream attempt for each incoming request because\n\t// we have nothing better to return\n\tclearCache()\n\twantError, wantBadContent = true, false\n\treqsBefore = countRailsReqs()\n\tholdReqs = make(chan struct{})\n\twg = getDDConcurrently(5, http.StatusBadGateway, check.Commentf(\"error at startup\"))\n\tclose(holdReqs)\n\twg.Wait()\n\tc.Check(countRailsReqs(), check.Equals, reqsBefore+5)\n\n\t// Response status is OK but body is not a discovery document\n\twantError, wantBadContent = false, true\n\treqsBefore = countRailsReqs()\n\tc.Check(getDD(), check.Equals, http.StatusBadGateway)\n\tc.Check(countRailsReqs(), check.Equals, reqsBefore+1)\n\n\t// Error condition clears => caller gets OK, cache is warmed\n\t// up\n\twantError, wantBadContent = false, false\n\treqsBefore = countRailsReqs()\n\tgetDDConcurrently(5, http.StatusOK, check.Commentf(\"success after errors at startup\")).Wait()\n\tc.Check(countRailsReqs(), check.Equals, reqsBefore+1)\n\n\t// Error with warm cache => caller gets OK (with no attempt to\n\t// re-fetch)\n\twantError, wantBadContent = true, false\n\treqsBefore = countRailsReqs()\n\tgetDDConcurrently(5, http.StatusOK, check.Commentf(\"error with warm cache\")).Wait()\n\tc.Check(countRailsReqs(), check.Equals, reqsBefore)\n\n\tcheckBackgroundRefresh := func(reqsExpected int) {\n\t\t// There is no guarantee that a background refresh has\n\t\t// progressed far enough that we can detect it\n\t\t// directly (the first line of refresh() might not\n\t\t// have run).  So, to avoid false positives, we just\n\t\t// need to poll until it happens.\n\t\tfor deadline := time.Now().Add(time.Second); countRailsReqs() == reqsBefore && time.Now().Before(deadline); {\n\t\t\tc.Logf(\"countRailsReqs = %d\", countRailsReqs())\n\t\t\ttime.Sleep(time.Second / 100)\n\t\t}\n\t\t// Similarly, to ensure there are no additional\n\t\t// refreshes, we just need to wait.\n\t\ttime.Sleep(time.Second / 2)\n\t\tc.Check(countRailsReqs(), check.Equals, reqsExpected)\n\t}\n\n\t// Error with stale cache => caller gets OK with stale data\n\t// while the re-fetch is attempted in the background\n\trefreshNow()\n\twantError, wantBadContent = true, false\n\treqsBefore = countRailsReqs()\n\tholdReqs = make(chan struct{})\n\tgetDDConcurrently(5, http.StatusOK, check.Commentf(\"error with stale cache\")).Wait()\n\tclose(holdReqs)\n\t// After piling up 5 requests (holdReqs having ensured the\n\t// first update took long enough for the last incoming request\n\t// to arrive) there should be only one attempt to re-fetch.\n\tcheckBackgroundRefresh(reqsBefore + 1)\n\n\trefreshNow()\n\twantError, wantBadContent = false, false\n\treqsBefore = countRailsReqs()\n\tholdReqs = make(chan struct{})\n\tgetDDConcurrently(5, http.StatusOK, check.Commentf(\"refresh cache after error condition clears\")).Wait()\n\tclose(holdReqs)\n\tcheckBackgroundRefresh(reqsBefore + 1)\n\n\t// Make sure expireAfter is getting set\n\twaitPendingUpdates()\n\texp := s.handler.cache[\"/discovery/v1/apis/arvados/v1/rest\"].expireAfter.Sub(time.Now())\n\tc.Check(exp > cacheTTL, check.Equals, true)\n\tc.Check(exp < cacheExpire, check.Equals, true)\n\n\t// After the cache *expires* it behaves as if uninitialized:\n\t// each incoming request does a new upstream request until one\n\t// succeeds.\n\t//\n\t// First check failure after expiry:\n\texpireNow()\n\twantError, wantBadContent = true, false\n\treqsBefore = countRailsReqs()\n\tholdReqs = make(chan struct{})\n\twg = getDDConcurrently(5, http.StatusBadGateway, check.Commentf(\"error after expiry\"))\n\tclose(holdReqs)\n\twg.Wait()\n\tc.Check(countRailsReqs(), check.Equals, reqsBefore+5)\n\n\t// Success after expiry:\n\twantError, wantBadContent = false, false\n\treqsBefore = countRailsReqs()\n\tholdReqs = make(chan struct{})\n\twg = getDDConcurrently(5, http.StatusOK, check.Commentf(\"success after expiry\"))\n\tclose(holdReqs)\n\twg.Wait()\n\tc.Check(countRailsReqs(), check.Equals, reqsBefore+1)\n}\n\nfunc (s *HandlerSuite) TestVocabularyExport(c *check.C) {\n\tvoc := `{\n\t\t\"strict_tags\": false,\n\t\t\"tags\": {\n\t\t\t\"IDTAGIMPORTANCE\": {\n\t\t\t\t\"strict\": false,\n\t\t\t\t\"labels\": [{\"label\": \"Importance\"}],\n\t\t\t\t\"values\": {\n\t\t\t\t\t\"HIGH\": {\n\t\t\t\t\t\t\"labels\": [{\"label\": \"High\"}]\n\t\t\t\t\t},\n\t\t\t\t\t\"LOW\": {\n\t\t\t\t\t\t\"labels\": [{\"label\": \"Low\"}]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}`\n\tf, err := os.CreateTemp(\"\", \"test-vocabulary-*.json\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(f.Name())\n\t_, err = f.WriteString(voc)\n\tc.Assert(err, check.IsNil)\n\tf.Close()\n\ts.cluster.API.VocabularyPath = f.Name()\n\tfor _, method := range []string{\"GET\", \"OPTIONS\"} {\n\t\tc.Log(c.TestName()+\" \", method)\n\t\treq := httptest.NewRequest(method, \"/arvados/v1/vocabulary\", nil)\n\t\tresp := httptest.NewRecorder()\n\t\ts.handler.ServeHTTP(resp, req)\n\t\tc.Log(resp.Body.String())\n\t\tif !c.Check(resp.Code, check.Equals, http.StatusOK) {\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(resp.Header().Get(\"Access-Control-Allow-Origin\"), check.Equals, `*`)\n\t\tc.Check(resp.Header().Get(\"Access-Control-Allow-Methods\"), check.Matches, `.*\\bGET\\b.*`)\n\t\tc.Check(resp.Header().Get(\"Access-Control-Allow-Headers\"), check.Matches, `.+`)\n\t\tif method == \"OPTIONS\" {\n\t\t\tc.Check(resp.Body.String(), check.HasLen, 0)\n\t\t\tcontinue\n\t\t}\n\t\tvar expectedVoc, receivedVoc *arvados.Vocabulary\n\t\terr := json.Unmarshal([]byte(voc), &expectedVoc)\n\t\tc.Check(err, check.IsNil)\n\t\terr = json.Unmarshal(resp.Body.Bytes(), &receivedVoc)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(receivedVoc, check.DeepEquals, expectedVoc)\n\t}\n}\n\nfunc (s *HandlerSuite) TestVocabularyFailedCheckStatus(c *check.C) {\n\tvoc := `{\n\t\t\"strict_tags\": false,\n\t\t\"tags\": {\n\t\t\t\"IDTAGIMPORTANCE\": {\n\t\t\t\t\"strict\": true,\n\t\t\t\t\"labels\": [{\"label\": \"Importance\"}],\n\t\t\t\t\"values\": {\n\t\t\t\t\t\"HIGH\": {\n\t\t\t\t\t\t\"labels\": [{\"label\": \"High\"}]\n\t\t\t\t\t},\n\t\t\t\t\t\"LOW\": {\n\t\t\t\t\t\t\"labels\": [{\"label\": \"Low\"}]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}`\n\tf, err := os.CreateTemp(\"\", \"test-vocabulary-*.json\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(f.Name())\n\t_, err = f.WriteString(voc)\n\tc.Assert(err, check.IsNil)\n\tf.Close()\n\ts.cluster.API.VocabularyPath = f.Name()\n\n\treq := httptest.NewRequest(\"POST\", \"/arvados/v1/collections\",\n\t\tstrings.NewReader(`{\n\t\t\t\"collection\": {\n\t\t\t\t\"properties\": {\n\t\t\t\t\t\"IDTAGIMPORTANCE\": \"Critical\"\n\t\t\t\t}\n\t\t\t}\n\t\t}`))\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\treq.Header.Set(\"Content-type\", \"application/json\")\n\n\tresp := httptest.NewRecorder()\n\ts.handler.ServeHTTP(resp, req)\n\tc.Log(resp.Body.String())\n\tc.Assert(resp.Code, check.Equals, http.StatusBadRequest)\n\tvar jresp httpserver.ErrorResponse\n\terr = json.Unmarshal(resp.Body.Bytes(), &jresp)\n\tc.Check(err, check.IsNil)\n\tc.Assert(len(jresp.Errors), check.Equals, 1)\n\tc.Check(jresp.Errors[0], check.Matches, `.*tag value.*is not valid for key.*`)\n}\n\nfunc (s *HandlerSuite) TestProxyDiscoveryDoc(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/discovery/v1/apis/arvados/v1/rest\", nil)\n\tresp := httptest.NewRecorder()\n\ts.handler.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\tvar dd arvados.DiscoveryDocument\n\terr := json.Unmarshal(resp.Body.Bytes(), &dd)\n\tc.Check(err, check.IsNil)\n\tc.Check(dd.BlobSignatureTTL, check.Not(check.Equals), int64(0))\n\tc.Check(dd.BlobSignatureTTL > 0, check.Equals, true)\n\tc.Check(len(dd.Resources), check.Not(check.Equals), 0)\n\tc.Check(len(dd.Schemas), check.Not(check.Equals), 0)\n}\n\n// Handler should give up and exit early if request context is\n// cancelled due to client hangup, httpserver.HandlerWithDeadline,\n// etc.\nfunc (s *HandlerSuite) TestRequestCancel(c *check.C) {\n\tctx, cancel := context.WithCancel(context.Background())\n\treq := httptest.NewRequest(\"GET\", \"/static/login_failure\", nil).WithContext(ctx)\n\tresp := httptest.NewRecorder()\n\tcancel()\n\ts.handler.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusBadGateway)\n\tvar jresp httpserver.ErrorResponse\n\terr := json.Unmarshal(resp.Body.Bytes(), &jresp)\n\tc.Check(err, check.IsNil)\n\tc.Assert(len(jresp.Errors), check.Equals, 1)\n\tc.Check(jresp.Errors[0], check.Matches, `.*context canceled`)\n}\n\nfunc (s *HandlerSuite) TestProxyWithoutToken(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/users/current\", nil)\n\tresp := httptest.NewRecorder()\n\ts.handler.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusUnauthorized)\n\tjresp := map[string]interface{}{}\n\terr := json.Unmarshal(resp.Body.Bytes(), &jresp)\n\tc.Check(err, check.IsNil)\n\tc.Check(jresp[\"errors\"], check.FitsTypeOf, []interface{}{})\n}\n\nfunc (s *HandlerSuite) TestProxyWithToken(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/users/current\", nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := httptest.NewRecorder()\n\ts.handler.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\tvar u arvados.User\n\terr := json.Unmarshal(resp.Body.Bytes(), &u)\n\tc.Check(err, check.IsNil)\n\tc.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)\n}\n\nfunc (s *HandlerSuite) TestProxyWithTokenInRequestBody(c *check.C) {\n\treq := httptest.NewRequest(\"POST\", \"/arvados/v1/users/current\", strings.NewReader(url.Values{\n\t\t\"_method\":   {\"GET\"},\n\t\t\"api_token\": {arvadostest.ActiveToken},\n\t}.Encode()))\n\treq.Header.Set(\"Content-type\", \"application/x-www-form-urlencoded\")\n\tresp := httptest.NewRecorder()\n\ts.handler.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\tvar u arvados.User\n\terr := json.Unmarshal(resp.Body.Bytes(), &u)\n\tc.Check(err, check.IsNil)\n\tc.Check(u.UUID, check.Equals, arvadostest.ActiveUserUUID)\n}\n\nfunc (s *HandlerSuite) TestProxyNotFound(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/xyzzy\", nil)\n\tresp := httptest.NewRecorder()\n\ts.handler.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusNotFound)\n\tjresp := map[string]interface{}{}\n\terr := json.Unmarshal(resp.Body.Bytes(), &jresp)\n\tc.Check(err, check.IsNil)\n\tc.Check(jresp[\"errors\"], check.FitsTypeOf, []interface{}{})\n}\n\nfunc (s *HandlerSuite) TestLogoutGoogle(c *check.C) {\n\ts.cluster.Services.Workbench2.ExternalURL = arvados.URL{Scheme: \"https\", Host: \"wb2.example\", Path: \"/\"}\n\ts.cluster.Login.Google.Enable = true\n\ts.cluster.Login.Google.ClientID = \"test\"\n\treq := httptest.NewRequest(\"GET\", \"https://0.0.0.0:1/logout?return_to=https://wb2.example/\", nil)\n\tresp := httptest.NewRecorder()\n\ts.handler.ServeHTTP(resp, req)\n\tif !c.Check(resp.Code, check.Equals, http.StatusFound) {\n\t\tc.Log(resp.Body.String())\n\t}\n\tc.Check(resp.Header().Get(\"Location\"), check.Equals, \"https://wb2.example/\")\n}\n\nfunc (s *HandlerSuite) TestValidateV1APIToken(c *check.C) {\n\tc.Assert(s.handler.CheckHealth(), check.IsNil)\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/users/current\", nil)\n\tuser, ok, err := s.handler.validateAPItoken(req, arvadostest.ActiveToken)\n\tc.Assert(err, check.IsNil)\n\tc.Check(ok, check.Equals, true)\n\tc.Check(user.Authorization.UUID, check.Equals, arvadostest.ActiveTokenUUID)\n\tc.Check(user.Authorization.APIToken, check.Equals, arvadostest.ActiveToken)\n\tc.Check(user.Authorization.Scopes, check.DeepEquals, []string{\"all\"})\n\tc.Check(user.UUID, check.Equals, arvadostest.ActiveUserUUID)\n}\n\nfunc (s *HandlerSuite) TestValidateV2APIToken(c *check.C) {\n\tc.Assert(s.handler.CheckHealth(), check.IsNil)\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/users/current\", nil)\n\tuser, ok, err := s.handler.validateAPItoken(req, arvadostest.ActiveTokenV2)\n\tc.Assert(err, check.IsNil)\n\tc.Check(ok, check.Equals, true)\n\tc.Check(user.Authorization.UUID, check.Equals, arvadostest.ActiveTokenUUID)\n\tc.Check(user.Authorization.APIToken, check.Equals, arvadostest.ActiveToken)\n\tc.Check(user.Authorization.Scopes, check.DeepEquals, []string{\"all\"})\n\tc.Check(user.UUID, check.Equals, arvadostest.ActiveUserUUID)\n\tc.Check(user.Authorization.TokenV2(), check.Equals, arvadostest.ActiveTokenV2)\n}\n\nfunc (s *HandlerSuite) TestValidateRemoteToken(c *check.C) {\n\tsaltedToken, err := auth.SaltToken(arvadostest.ActiveTokenV2, \"abcde\")\n\tc.Assert(err, check.IsNil)\n\tfor _, trial := range []struct {\n\t\tcode  int\n\t\ttoken string\n\t}{\n\t\t{http.StatusOK, saltedToken},\n\t\t{http.StatusUnauthorized, \"bogus\"},\n\t} {\n\t\treq := httptest.NewRequest(\"GET\", \"https://0.0.0.0:1/arvados/v1/users/current?remote=abcde\", nil)\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+trial.token)\n\t\tresp := httptest.NewRecorder()\n\t\ts.handler.ServeHTTP(resp, req)\n\t\tif !c.Check(resp.Code, check.Equals, trial.code) {\n\t\t\tc.Logf(\"HTTP %d: %s\", resp.Code, resp.Body.String())\n\t\t}\n\t}\n}\n\nfunc (s *HandlerSuite) TestLogTokenUUID(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"https://0.0.0.0/arvados/v1/users/current\", nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveTokenV2)\n\treq = req.WithContext(s.ctx)\n\tresp := httptest.NewRecorder()\n\thttpserver.LogRequests(s.handler).ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\tc.Check(s.logbuf.String(), check.Matches, `(?ms).*\"tokenUUIDs\":\\[\"`+strings.Split(arvadostest.ActiveTokenV2, \"/\")[1]+`\"\\].*`)\n}\n\nfunc (s *HandlerSuite) TestCreateAPIToken(c *check.C) {\n\tc.Assert(s.handler.CheckHealth(), check.IsNil)\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/users/current\", nil)\n\tauth, err := s.handler.createAPItoken(req, arvadostest.ActiveUserUUID, nil)\n\tc.Assert(err, check.IsNil)\n\tc.Check(auth.Scopes, check.DeepEquals, []string{\"all\"})\n\n\tuser, ok, err := s.handler.validateAPItoken(req, auth.TokenV2())\n\tc.Assert(err, check.IsNil)\n\tc.Check(ok, check.Equals, true)\n\tc.Check(user.Authorization.UUID, check.Equals, auth.UUID)\n\tc.Check(user.Authorization.APIToken, check.Equals, auth.APIToken)\n\tc.Check(user.Authorization.Scopes, check.DeepEquals, []string{\"all\"})\n\tc.Check(user.UUID, check.Equals, arvadostest.ActiveUserUUID)\n\tc.Check(user.Authorization.TokenV2(), check.Equals, auth.TokenV2())\n}\n\nfunc (s *HandlerSuite) CheckObjectType(c *check.C, url string, token string, skippedFields map[string]bool) {\n\tvar proxied, direct map[string]interface{}\n\tvar err error\n\n\t// Get collection from controller\n\treq := httptest.NewRequest(\"GET\", url, nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+token)\n\tresp := httptest.NewRecorder()\n\ts.handler.ServeHTTP(resp, req)\n\tif !c.Check(resp.Code, check.Equals, http.StatusOK,\n\t\tcheck.Commentf(\"Wasn't able to get data from the controller at %q: %q\", url, resp.Body.String())) {\n\t\treturn\n\t}\n\terr = json.Unmarshal(resp.Body.Bytes(), &proxied)\n\tc.Check(err, check.Equals, nil)\n\n\t// Get collection directly from RailsAPI\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t},\n\t}\n\tresp2, err := client.Get(s.cluster.Services.RailsAPI.ExternalURL.String() + url + \"/?api_token=\" + token)\n\tc.Check(err, check.Equals, nil)\n\tdefer resp2.Body.Close()\n\tif !c.Check(resp2.StatusCode, check.Equals, http.StatusOK,\n\t\tcheck.Commentf(\"Wasn't able to get data from the RailsAPI at %q\", url)) {\n\t\treturn\n\t}\n\tdb, err := ioutil.ReadAll(resp2.Body)\n\tc.Check(err, check.Equals, nil)\n\terr = json.Unmarshal(db, &direct)\n\tc.Check(err, check.Equals, nil)\n\n\t// Check that all RailsAPI provided keys exist on the controller response.\n\tfor k := range direct {\n\t\tif _, ok := skippedFields[k]; ok {\n\t\t\tcontinue\n\t\t} else if val, ok := proxied[k]; !ok {\n\t\t\tc.Errorf(\"%s's key %q missing on controller's response.\", direct[\"kind\"], k)\n\t\t} else if direct[\"kind\"] == \"arvados#collection\" && k == \"manifest_text\" {\n\t\t\t// Tokens differ from request to request\n\t\t\tc.Check(strings.Split(val.(string), \"+A\")[0], check.Equals, strings.Split(direct[k].(string), \"+A\")[0])\n\t\t} else {\n\t\t\tc.Check(val, check.DeepEquals, direct[k],\n\t\t\t\tcheck.Commentf(\"RailsAPI %s key %q's value %q differs from controller's %q.\", direct[\"kind\"], k, direct[k], val))\n\t\t}\n\t}\n\n\t// The \"href\" field has been removed. We don't particularly\n\t// care whether Rails returns it, as long as controller\n\t// doesn't.\n\t_, hasHref := proxied[\"href\"]\n\tc.Check(hasHref, check.Equals, false)\n}\n\nfunc (s *HandlerSuite) TestGetObjects(c *check.C) {\n\t// Get the 1st keep service's uuid from the running test server.\n\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/keep_services/\", nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.AdminToken)\n\tresp := httptest.NewRecorder()\n\ts.handler.ServeHTTP(resp, req)\n\tc.Assert(resp.Code, check.Equals, http.StatusOK)\n\tvar ksList arvados.KeepServiceList\n\tjson.Unmarshal(resp.Body.Bytes(), &ksList)\n\tc.Assert(len(ksList.Items), check.Not(check.Equals), 0)\n\tksUUID := ksList.Items[0].UUID\n\t// Create a new token for the test user so that we're not comparing\n\t// the ones from the fixtures.\n\treq = httptest.NewRequest(\"POST\", \"/arvados/v1/api_client_authorizations\",\n\t\tstrings.NewReader(`{\n\t\t\t\"api_client_authorization\": {\n\t\t\t\t\"owner_uuid\": \"`+arvadostest.AdminUserUUID+`\",\n\t\t\t\t\"created_by_ip_address\": \"::1\",\n\t\t\t\t\"last_used_by_ip_address\": \"::1\"\n\t\t\t}\n\t\t}`))\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.SystemRootToken)\n\treq.Header.Set(\"Content-type\", \"application/json\")\n\tresp = httptest.NewRecorder()\n\ts.handler.ServeHTTP(resp, req)\n\tc.Assert(resp.Code, check.Equals, http.StatusOK,\n\t\tcheck.Commentf(\"%s\", resp.Body.String()))\n\tvar auth arvados.APIClientAuthorization\n\tjson.Unmarshal(resp.Body.Bytes(), &auth)\n\tc.Assert(auth.UUID, check.Not(check.Equals), \"\")\n\n\ttestCases := map[string]map[string]bool{\n\t\t\"api_client_authorizations/\" + auth.UUID:                       {\"modified_by_client_uuid\": true, \"modified_by_user_uuid\": true},\n\t\t\"authorized_keys/\" + arvadostest.AdminAuthorizedKeysUUID:       nil,\n\t\t\"collections/\" + arvadostest.CollectionWithUniqueWordsUUID:     nil,\n\t\t\"containers/\" + arvadostest.RunningContainerUUID:               nil,\n\t\t\"container_requests/\" + arvadostest.QueuedContainerRequestUUID: {\"mounts\": true},\n\t\t\"groups/\" + arvadostest.AProjectUUID:                           nil,\n\t\t\"keep_services/\" + ksUUID:                                      nil,\n\t\t\"links/\" + arvadostest.ActiveUserCanReadAllUsersLinkUUID:       nil,\n\t\t\"logs/\" + arvadostest.CrunchstatForRunningContainerLogUUID:     nil,\n\t\t\"users/\" + arvadostest.ActiveUserUUID:                          nil,\n\t\t\"virtual_machines/\" + arvadostest.TestVMUUID:                   nil,\n\t\t\"workflows/\" + arvadostest.WorkflowWithDefinitionYAMLUUID:      nil,\n\t}\n\tfor url, skippedFields := range testCases {\n\t\tc.Logf(\"Testing %q\", url)\n\t\ts.CheckObjectType(c, \"/arvados/v1/\"+url, auth.TokenV2(), skippedFields)\n\t}\n}\n\nfunc (s *HandlerSuite) TestRedactRailsAPIHostFromErrors(c *check.C) {\n\treq := httptest.NewRequest(\"GET\", \"https://0.0.0.0:1/arvados/v1/collections/zzzzz-4zz18-abcdefghijklmno\", nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp := httptest.NewRecorder()\n\ts.handler.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusNotFound)\n\tvar jresp struct {\n\t\tErrors []string\n\t}\n\tc.Log(resp.Body.String())\n\tc.Assert(json.NewDecoder(resp.Body).Decode(&jresp), check.IsNil)\n\tc.Assert(jresp.Errors, check.HasLen, 1)\n\tc.Check(jresp.Errors[0], check.Matches, `.*//railsapi\\.internal/arvados/v1/collections/.*: 404 Not Found.*`)\n\tc.Check(jresp.Errors[0], check.Not(check.Matches), `(?ms).*127.0.0.1.*`)\n}\n\nfunc (s *HandlerSuite) TestTrashSweep(c *check.C) {\n\ts.cluster.SystemRootToken = arvadostest.SystemRootToken\n\ts.cluster.Collections.TrashSweepInterval = arvados.Duration(time.Second / 10)\n\ts.handler.CheckHealth()\n\tctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})\n\tcoll, err := s.handler.federation.CollectionCreate(ctx, arvados.CreateOptions{Attrs: map[string]interface{}{\"name\": \"test trash sweep\"}, EnsureUniqueName: true})\n\tc.Assert(err, check.IsNil)\n\tdefer s.handler.federation.CollectionDelete(ctx, arvados.DeleteOptions{UUID: coll.UUID})\n\tdb, err := s.handler.dbConnector.GetDB(s.ctx)\n\tc.Assert(err, check.IsNil)\n\t_, err = db.ExecContext(s.ctx, `update collections set trash_at = $1, delete_at = $2 where uuid = $3`, time.Now().UTC().Add(time.Second/10), time.Now().UTC().Add(time.Hour), coll.UUID)\n\tc.Assert(err, check.IsNil)\n\tdeadline := time.Now().Add(5 * time.Second)\n\tfor {\n\t\tif time.Now().After(deadline) {\n\t\t\tc.Log(\"timed out\")\n\t\t\tc.FailNow()\n\t\t}\n\t\tupdated, err := s.handler.federation.CollectionGet(ctx, arvados.GetOptions{UUID: coll.UUID, IncludeTrash: true})\n\t\tc.Assert(err, check.IsNil)\n\t\tif updated.IsTrashed {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second / 10)\n\t}\n}\n\nfunc (s *HandlerSuite) TestContainerLogSweep(c *check.C) {\n\ts.cluster.SystemRootToken = arvadostest.SystemRootToken\n\ts.cluster.Collections.TrashSweepInterval = arvados.Duration(2 * time.Second)\n\ts.handler.CheckHealth()\n\tctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: []string{arvadostest.ActiveTokenV2}})\n\tlogentry, err := s.handler.federation.LogCreate(ctx, arvados.CreateOptions{Attrs: map[string]interface{}{\n\t\t\"object_uuid\": arvadostest.CompletedContainerUUID,\n\t\t\"event_type\":  \"stderr\",\n\t\t\"properties\": map[string]interface{}{\n\t\t\t\"text\": \"test container log sweep\\n\",\n\t\t},\n\t}})\n\tc.Assert(err, check.IsNil)\n\tdefer s.handler.federation.LogDelete(ctx, arvados.DeleteOptions{UUID: logentry.UUID})\n\tdeadline := time.Now().Add(5 * time.Second)\n\tfor {\n\t\tif time.Now().After(deadline) {\n\t\t\tc.Log(\"timed out\")\n\t\t\tc.FailNow()\n\t\t}\n\t\tlogentries, err := s.handler.federation.LogList(ctx, arvados.ListOptions{Filters: []arvados.Filter{{\"uuid\", \"=\", logentry.UUID}}, Limit: -1})\n\t\tc.Assert(err, check.IsNil)\n\t\tif len(logentries.Items) == 0 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second / 10)\n\t}\n}\n\nfunc (s *HandlerSuite) TestLogActivity(c *check.C) {\n\ts.cluster.SystemRootToken = arvadostest.SystemRootToken\n\ts.cluster.Users.ActivityLoggingPeriod = arvados.Duration(24 * time.Hour)\n\ts.handler.CheckHealth()\n\n\ttestServer := newServerFromIntegrationTestEnv(c)\n\ttestServer.Server.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(s.handler))\n\tc.Assert(testServer.Start(), check.IsNil)\n\tdefer testServer.Close()\n\n\tu, _ := url.Parse(\"http://\" + testServer.Addr)\n\tclient := rpc.NewConn(s.cluster.ClusterID, u, true, rpc.PassthroughTokenProvider)\n\n\tstarttime := time.Now()\n\tfor i := 0; i < 4; i++ {\n\t\tfor _, token := range []string{\n\t\t\tarvadostest.ActiveTokenV2,\n\t\t\tarvadostest.ActiveToken,\n\t\t\tarvadostest.SpectatorToken,\n\t\t} {\n\t\t\tctx := auth.NewContext(s.ctx, &auth.Credentials{Tokens: []string{token}})\n\t\t\t_, err := client.CollectionList(ctx, arvados.ListOptions{})\n\t\t\tc.Assert(err, check.IsNil)\n\t\t}\n\t}\n\tdb, err := s.handler.dbConnector.GetDB(s.ctx)\n\tc.Assert(err, check.IsNil)\n\tfor _, userUUID := range []string{arvadostest.ActiveUserUUID, arvadostest.SpectatorUserUUID} {\n\t\tvar rows int\n\t\terr = db.QueryRowContext(s.ctx, `select count(uuid) from logs where object_uuid = $1 and event_at > $2`, arvadostest.ActiveUserUUID, starttime.UTC()).Scan(&rows)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(rows, check.Equals, 1, check.Commentf(\"expect 1 row for user uuid %s\", userUUID))\n\t}\n}\n"
  },
  {
    "path": "lib/controller/integration_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"database/sql\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/boot\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&IntegrationSuite{})\n\ntype IntegrationSuite struct {\n\tsuper        *boot.Supervisor\n\toidcprovider *arvadostest.OIDCProvider\n}\n\nfunc (s *IntegrationSuite) SetUpSuite(c *check.C) {\n\tif output, err := exec.Command(\"docker\", \"info\").CombinedOutput(); err != nil {\n\t\t// See TestContainerHTTPProxy for details about why we\n\t\t// depend on docker instead of singularity.\n\t\tc.Fatalf(\"this test suite depends on docker, which does not appear to be working. `docker info` reports:\\n\\n%s\", output)\n\t}\n\ts.oidcprovider = arvadostest.NewOIDCProvider(c)\n\ts.oidcprovider.AuthEmail = \"user@example.com\"\n\ts.oidcprovider.AuthEmailVerified = true\n\ts.oidcprovider.AuthName = \"Example User\"\n\ts.oidcprovider.ValidClientID = \"clientid\"\n\ts.oidcprovider.ValidClientSecret = \"clientsecret\"\n\n\thostport := map[string]string{}\n\tfor _, id := range []string{\"z1111\", \"z2222\", \"z3333\"} {\n\t\thostport[id] = func() string {\n\t\t\t// TODO: Instead of expecting random ports on\n\t\t\t// 127.0.0.11, 22, 33 to be race-safe, try\n\t\t\t// different 127.x.y.z until finding one that\n\t\t\t// isn't in use.\n\t\t\tln, err := net.Listen(\"tcp\", \":0\")\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tln.Close()\n\t\t\t_, port, err := net.SplitHostPort(ln.Addr().String())\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\treturn \"127.0.0.\" + id[3:] + \":\" + port\n\t\t}()\n\t}\n\tyaml := \"Clusters:\\n\"\n\tfor id := range hostport {\n\t\tyaml += `\n  ` + id + `:\n    Services:\n      Controller:\n        ExternalURL: https://` + hostport[id] + `\n      ContainerWebServices:\n        ExternalURL: https://` + hostport[id] + `\n        ExternalPortMin: 2000\n        ExternalPortMax: 2099\n    TLS:\n      Insecure: true\n    SystemLogs:\n      Format: text\n    API:\n      MaxConcurrentRequests: 128\n    Containers:\n      CloudVMs:\n        Enable: true\n        Driver: loopback\n        BootProbeCommand: \"rm -f /var/lock/crunch-run-broken\"\n        ProbeInterval: 1s\n        PollInterval: 5s\n        SyncInterval: 10s\n        TimeoutIdle: 1s\n        TimeoutBooting: 2s\n      # Note RuntimeEngine: singularity would almost work, except see\n      # comment on TestContainerHTTPProxy()\n      RuntimeEngine: docker\n      CrunchRunArgumentsList: [\"--broken-node-hook\", \"true\"]\n    RemoteClusters:\n      z1111:\n        Host: ` + hostport[\"z1111\"] + `\n        Scheme: https\n        Insecure: true\n        Proxy: true\n        ActivateUsers: true\n`\n\t\tif id != \"z2222\" {\n\t\t\tyaml += `      z2222:\n        Host: ` + hostport[\"z2222\"] + `\n        Scheme: https\n        Insecure: true\n        Proxy: true\n        ActivateUsers: true\n`\n\t\t}\n\t\tif id != \"z3333\" {\n\t\t\tyaml += `      z3333:\n        Host: ` + hostport[\"z3333\"] + `\n        Scheme: https\n        Insecure: true\n        Proxy: true\n        ActivateUsers: true\n`\n\t\t}\n\t\tif id == \"z1111\" {\n\t\t\tyaml += `\n    Login:\n      LoginCluster: z1111\n      OpenIDConnect:\n        Enable: true\n        Issuer: ` + s.oidcprovider.Issuer.URL + `\n        ClientID: ` + s.oidcprovider.ValidClientID + `\n        ClientSecret: ` + s.oidcprovider.ValidClientSecret + `\n        EmailClaim: email\n        EmailVerifiedClaim: email_verified\n        AcceptAccessToken: true\n        AcceptAccessTokenScope: \"\"\n`\n\t\t} else {\n\t\t\tyaml += `\n    Login:\n      LoginCluster: z1111\n`\n\t\t}\n\t}\n\ts.super = &boot.Supervisor{\n\t\tClusterType:          \"test\",\n\t\tConfigYAML:           yaml,\n\t\tStderr:               ctxlog.LogWriter(c.Log),\n\t\tNoWorkbench1:         true,\n\t\tNoWorkbench2:         true,\n\t\tOwnTemporaryDatabase: true,\n\t}\n\n\t// Give up if startup takes longer than 3m\n\ttimeout := time.AfterFunc(3*time.Minute, s.super.Stop)\n\tdefer timeout.Stop()\n\ts.super.Start(context.Background())\n\tok := s.super.WaitReady()\n\tc.Assert(ok, check.Equals, true)\n}\n\nfunc (s *IntegrationSuite) TearDownSuite(c *check.C) {\n\tif s.super != nil {\n\t\ts.super.Stop()\n\t\ts.super.Wait()\n\t}\n}\n\nfunc (s *IntegrationSuite) TestDefaultStorageClassesOnCollections(c *check.C) {\n\tconn := s.super.Conn(\"z1111\")\n\trootctx, _, _ := s.super.RootClients(\"z1111\")\n\tuserctx, _, kc, _ := s.super.UserClients(\"z1111\", rootctx, c, conn, s.oidcprovider.AuthEmail, true)\n\tc.Assert(len(kc.DefaultStorageClasses) > 0, check.Equals, true)\n\tcoll, err := conn.CollectionCreate(userctx, arvados.CreateOptions{})\n\tc.Assert(err, check.IsNil)\n\tc.Assert(coll.StorageClassesDesired, check.DeepEquals, kc.DefaultStorageClasses)\n}\n\nfunc (s *IntegrationSuite) createTestCollectionManifest(c *check.C, ac *arvados.Client, kc *keepclient.KeepClient, content string) string {\n\tfs, err := (&arvados.Collection{}).FileSystem(ac, kc)\n\tc.Assert(err, check.IsNil)\n\tf, err := fs.OpenFile(\"test.txt\", os.O_CREATE|os.O_RDWR, 0777)\n\tc.Assert(err, check.IsNil)\n\t_, err = io.WriteString(f, content)\n\tc.Assert(err, check.IsNil)\n\terr = f.Close()\n\tc.Assert(err, check.IsNil)\n\tmtxt, err := fs.MarshalManifest(\".\")\n\tc.Assert(err, check.IsNil)\n\treturn mtxt\n}\n\nfunc (s *IntegrationSuite) TestGetCollectionByPDH(c *check.C) {\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\tconn3 := s.super.Conn(\"z3333\")\n\tuserctx1, ac1, kc1, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)\n\n\t// Create the collection to find its PDH (but don't save it\n\t// anywhere yet)\n\tmtxt := s.createTestCollectionManifest(c, ac1, kc1, c.TestName())\n\tpdh := arvados.PortableDataHash(mtxt)\n\n\t// Looking up the PDH before saving returns 404 if cycle\n\t// detection is working.\n\t_, err := conn1.CollectionGet(userctx1, arvados.GetOptions{UUID: pdh})\n\tc.Assert(err, check.ErrorMatches, `.*404 Not Found.*`)\n\n\t// Save the collection on cluster z1111.\n\t_, err = conn1.CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{\n\t\t\"manifest_text\": mtxt,\n\t}})\n\tc.Assert(err, check.IsNil)\n\n\t// Retrieve the collection from cluster z3333.\n\tcoll2, err := conn3.CollectionGet(userctx1, arvados.GetOptions{UUID: pdh})\n\tc.Check(err, check.IsNil)\n\tc.Check(coll2.PortableDataHash, check.Equals, pdh)\n}\n\nfunc (s *IntegrationSuite) TestFederation_Write1Read2(c *check.C) {\n\ts.testFederationCollectionAccess(c, \"z1111\", \"z2222\")\n}\n\nfunc (s *IntegrationSuite) TestFederation_Write2Read1(c *check.C) {\n\ts.testFederationCollectionAccess(c, \"z2222\", \"z1111\")\n}\n\nfunc (s *IntegrationSuite) TestFederation_Write2Read3(c *check.C) {\n\ts.testFederationCollectionAccess(c, \"z2222\", \"z3333\")\n}\n\nfunc (s *IntegrationSuite) testFederationCollectionAccess(c *check.C, writeCluster, readCluster string) {\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\t_, ac1, _, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)\n\n\tconnW := s.super.Conn(writeCluster)\n\tuserctxW, acW, kcW := s.super.ClientsWithToken(writeCluster, ac1.AuthToken)\n\tkcW.DiskCacheSize = keepclient.DiskCacheDisabled\n\tconnR := s.super.Conn(readCluster)\n\tuserctxR, acR, kcR := s.super.ClientsWithToken(readCluster, ac1.AuthToken)\n\tkcR.DiskCacheSize = keepclient.DiskCacheDisabled\n\n\tfiledata := fmt.Sprintf(\"%s: write to %s, read from %s\", c.TestName(), writeCluster, readCluster)\n\tmtxt := s.createTestCollectionManifest(c, acW, kcW, filedata)\n\tcollW, err := connW.CollectionCreate(userctxW, arvados.CreateOptions{Attrs: map[string]interface{}{\n\t\t\"manifest_text\": mtxt,\n\t}})\n\tc.Assert(err, check.IsNil)\n\n\tcollR, err := connR.CollectionGet(userctxR, arvados.GetOptions{UUID: collW.UUID})\n\tif !c.Check(err, check.IsNil) {\n\t\treturn\n\t}\n\tfsR, err := collR.FileSystem(acR, kcR)\n\tif !c.Check(err, check.IsNil) {\n\t\treturn\n\t}\n\tbuf, err := fs.ReadFile(arvados.FS(fsR), \"test.txt\")\n\tif !c.Check(err, check.IsNil) {\n\t\treturn\n\t}\n\tc.Check(string(buf), check.Equals, filedata)\n}\n\n// Tests bug #18004\nfunc (s *IntegrationSuite) TestRemoteUserAndTokenCacheRace(c *check.C) {\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\trootctx2, _, _ := s.super.RootClients(\"z2222\")\n\tconn2 := s.super.Conn(\"z2222\")\n\tuserctx1, _, _, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, \"user2@example.com\", true)\n\n\tvar wg1, wg2 sync.WaitGroup\n\tcreqs := 100\n\n\t// Make concurrent requests to z2222 with a local token to make sure more\n\t// than one worker is listening.\n\twg1.Add(1)\n\tfor i := 0; i < creqs; i++ {\n\t\twg2.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg2.Done()\n\t\t\twg1.Wait()\n\t\t\t_, err := conn2.UserGetCurrent(rootctx2, arvados.GetOptions{})\n\t\t\tc.Check(err, check.IsNil, check.Commentf(\"warm up phase failed\"))\n\t\t}()\n\t}\n\twg1.Done()\n\twg2.Wait()\n\n\t// Real test pass -- use a new remote token than the one used in the warm-up\n\t// phase.\n\twg1.Add(1)\n\tfor i := 0; i < creqs; i++ {\n\t\twg2.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg2.Done()\n\t\t\twg1.Wait()\n\t\t\t// Retrieve the remote collection from cluster z2222.\n\t\t\t_, err := conn2.UserGetCurrent(userctx1, arvados.GetOptions{})\n\t\t\tc.Check(err, check.IsNil, check.Commentf(\"testing phase failed\"))\n\t\t}()\n\t}\n\twg1.Done()\n\twg2.Wait()\n}\n\n// After using a token issued by z1111 to call the Logout endpoint on\n// z2222, the token should be expired and rejected by both z1111 and\n// z2222.\nfunc (s *IntegrationSuite) TestLogoutUsingLoginCluster(c *check.C) {\n\tconn1 := s.super.Conn(\"z1111\")\n\tconn2 := s.super.Conn(\"z2222\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\t_, ac1, _, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, \"user1@example.com\", true)\n\tuserctx2, ac2, _ := s.super.ClientsWithToken(\"z2222\", ac1.AuthToken)\n\tc.Assert(ac2.AuthToken, check.Matches, `^v2/z1111-.*`)\n\t_, err := conn1.CollectionCreate(userctx2, arvados.CreateOptions{})\n\tc.Assert(err, check.IsNil)\n\t_, err = conn2.CollectionCreate(userctx2, arvados.CreateOptions{})\n\tc.Assert(err, check.IsNil)\n\n\t_, err = conn2.Logout(userctx2, arvados.LogoutOptions{})\n\tc.Assert(err, check.IsNil)\n\n\t_, err = conn1.CollectionCreate(userctx2, arvados.CreateOptions{})\n\tse, ok := err.(httpserver.HTTPStatusError)\n\tif c.Check(ok, check.Equals, true, check.Commentf(\"after logging out, token should have been rejected by login cluster\")) {\n\t\tc.Check(se.HTTPStatus(), check.Equals, 401)\n\t}\n\n\t_, err = conn2.CollectionCreate(userctx2, arvados.CreateOptions{})\n\tse, ok = err.(httpserver.HTTPStatusError)\n\tif c.Check(ok, check.Equals, true, check.Commentf(\"after logging out, token should have been rejected by remote cluster\")) {\n\t\tc.Check(se.HTTPStatus(), check.Equals, 401)\n\t}\n\n}\n\nfunc (s *IntegrationSuite) TestS3WithFederatedToken(c *check.C) {\n\tif _, err := exec.LookPath(\"s3cmd\"); err != nil {\n\t\tc.Skip(\"s3cmd not in PATH\")\n\t\treturn\n\t}\n\n\ttestText := \"IntegrationSuite.TestS3WithFederatedToken\"\n\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\tuserctx1, ac1, _, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)\n\tconn3 := s.super.Conn(\"z3333\")\n\n\tcreateColl := func(clusterID string) arvados.Collection {\n\t\t_, ac, kc := s.super.ClientsWithToken(clusterID, ac1.AuthToken)\n\t\tvar coll arvados.Collection\n\t\tfs, err := coll.FileSystem(ac, kc)\n\t\tc.Assert(err, check.IsNil)\n\t\tf, err := fs.OpenFile(\"test.txt\", os.O_CREATE|os.O_RDWR, 0777)\n\t\tc.Assert(err, check.IsNil)\n\t\t_, err = io.WriteString(f, testText)\n\t\tc.Assert(err, check.IsNil)\n\t\terr = f.Close()\n\t\tc.Assert(err, check.IsNil)\n\t\tmtxt, err := fs.MarshalManifest(\".\")\n\t\tc.Assert(err, check.IsNil)\n\t\tcoll, err = s.super.Conn(clusterID).CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{\n\t\t\t\"manifest_text\": mtxt,\n\t\t}})\n\t\tc.Assert(err, check.IsNil)\n\t\treturn coll\n\t}\n\n\tfor _, trial := range []struct {\n\t\tclusterID string // create the collection on this cluster (then use z3333 to access it)\n\t\ttoken     string\n\t}{\n\t\t// Try the hardest test first: z3333 hasn't seen\n\t\t// z1111's token yet, and we're just passing the\n\t\t// opaque secret part, so z3333 has to guess that it\n\t\t// belongs to z1111.\n\t\t{\"z1111\", strings.Split(ac1.AuthToken, \"/\")[2]},\n\t\t{\"z3333\", strings.Split(ac1.AuthToken, \"/\")[2]},\n\t\t{\"z1111\", strings.Replace(ac1.AuthToken, \"/\", \"_\", -1)},\n\t\t{\"z3333\", strings.Replace(ac1.AuthToken, \"/\", \"_\", -1)},\n\t} {\n\t\tc.Logf(\"================ %v\", trial)\n\t\tcoll := createColl(trial.clusterID)\n\n\t\tcfgjson, err := conn3.ConfigGet(userctx1)\n\t\tc.Assert(err, check.IsNil)\n\t\tvar cluster arvados.Cluster\n\t\terr = json.Unmarshal(cfgjson, &cluster)\n\t\tc.Assert(err, check.IsNil)\n\n\t\tc.Logf(\"TokenV2 is %s\", ac1.AuthToken)\n\t\thost := cluster.Services.WebDAV.ExternalURL.Host\n\t\ts3args := []string{\n\t\t\t\"--ssl\", \"--no-check-certificate\",\n\t\t\t\"--host=\" + host, \"--host-bucket=\" + host,\n\t\t\t\"--access_key=\" + trial.token, \"--secret_key=\" + trial.token,\n\t\t}\n\t\tbuf, err := exec.Command(\"s3cmd\", append(s3args, \"ls\", \"s3://\"+coll.UUID)...).CombinedOutput()\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(buf), check.Matches, `.* `+fmt.Sprintf(\"%d\", len(testText))+` +s3://`+coll.UUID+`/test.txt\\n`)\n\n\t\tbuf, _ = exec.Command(\"s3cmd\", append(s3args, \"get\", \"s3://\"+coll.UUID+\"/test.txt\", c.MkDir()+\"/tmpfile\")...).CombinedOutput()\n\t\t// Command fails because we don't return Etag header.\n\t\tflen := strconv.Itoa(len(testText))\n\t\tc.Check(string(buf), check.Matches, `(?ms).*`+flen+` (bytes in|of `+flen+`).*`)\n\t}\n}\n\nfunc (s *IntegrationSuite) TestGetCollectionAsAnonymous(c *check.C) {\n\tconn1 := s.super.Conn(\"z1111\")\n\tconn3 := s.super.Conn(\"z3333\")\n\trootctx1, rootac1, rootkc1 := s.super.RootClients(\"z1111\")\n\tanonctx3, anonac3, _ := s.super.AnonymousClients(\"z3333\")\n\n\t// Make sure anonymous token was set\n\tc.Assert(anonac3.AuthToken, check.Not(check.Equals), \"\")\n\n\t// Create the collection to find its PDH (but don't save it\n\t// anywhere yet)\n\tvar coll1 arvados.Collection\n\tfs1, err := coll1.FileSystem(rootac1, rootkc1)\n\tc.Assert(err, check.IsNil)\n\tf, err := fs1.OpenFile(\"test.txt\", os.O_CREATE|os.O_RDWR, 0777)\n\tc.Assert(err, check.IsNil)\n\t_, err = io.WriteString(f, \"IntegrationSuite.TestGetCollectionAsAnonymous\")\n\tc.Assert(err, check.IsNil)\n\terr = f.Close()\n\tc.Assert(err, check.IsNil)\n\tmtxt, err := fs1.MarshalManifest(\".\")\n\tc.Assert(err, check.IsNil)\n\tpdh := arvados.PortableDataHash(mtxt)\n\n\t// Save the collection on cluster z1111.\n\tcoll1, err = conn1.CollectionCreate(rootctx1, arvados.CreateOptions{Attrs: map[string]interface{}{\n\t\t\"manifest_text\": mtxt,\n\t}})\n\tc.Assert(err, check.IsNil)\n\n\t// Share it with the anonymous users group.\n\tvar outLink arvados.Link\n\terr = rootac1.RequestAndDecode(&outLink, \"POST\", \"/arvados/v1/links\", nil,\n\t\tmap[string]interface{}{\"link\": map[string]interface{}{\n\t\t\t\"link_class\": \"permission\",\n\t\t\t\"name\":       \"can_read\",\n\t\t\t\"tail_uuid\":  \"z1111-j7d0g-anonymouspublic\",\n\t\t\t\"head_uuid\":  coll1.UUID,\n\t\t},\n\t\t})\n\tc.Check(err, check.IsNil)\n\n\t// Current user should be z3 anonymous user\n\toutUser, err := anonac3.CurrentUser()\n\tc.Check(err, check.IsNil)\n\tc.Check(outUser.UUID, check.Equals, \"z3333-tpzed-anonymouspublic\")\n\n\t// Get the token uuid\n\tvar outAuth arvados.APIClientAuthorization\n\terr = anonac3.RequestAndDecode(&outAuth, \"GET\", \"/arvados/v1/api_client_authorizations/current\", nil, nil)\n\tc.Check(err, check.IsNil)\n\n\t// Make a v2 token of the z3 anonymous user, and use it on z1\n\t_, anonac1, _ := s.super.ClientsWithToken(\"z1111\", outAuth.TokenV2())\n\toutUser2, err := anonac1.CurrentUser()\n\tc.Check(err, check.IsNil)\n\t// z3 anonymous user will be mapped to the z1 anonymous user\n\tc.Check(outUser2.UUID, check.Equals, \"z1111-tpzed-anonymouspublic\")\n\n\t// Retrieve the collection (which is on z1) using anonymous from cluster z3333.\n\tcoll, err := conn3.CollectionGet(anonctx3, arvados.GetOptions{UUID: coll1.UUID})\n\tc.Check(err, check.IsNil)\n\tc.Check(coll.PortableDataHash, check.Equals, pdh)\n}\n\n// z3333 should forward the locally-issued anonymous user token to its login\n// cluster z1111. That is no problem because the login cluster controller will\n// map any anonymous user token to its local anonymous user.\n//\n// This needs to work because wb1 has a tendency to slap the local anonymous\n// user token on every request as a reader_token, which gets folded into the\n// request token list controller.\n//\n// Use a z1111 user token and the anonymous token from z3333 passed in as a\n// reader_token to do a request on z3333, asking for the z1111 anonymous user\n// object. The request will be forwarded to the z1111 cluster. The presence of\n// the z3333 anonymous user token should not prohibit the request from being\n// forwarded.\nfunc (s *IntegrationSuite) TestForwardAnonymousTokenToLoginCluster(c *check.C) {\n\tconn1 := s.super.Conn(\"z1111\")\n\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\t_, anonac3, _ := s.super.AnonymousClients(\"z3333\")\n\n\t// Make a user connection to z3333 (using a z1111 user, because that's the login cluster)\n\t_, userac1, _, _ := s.super.UserClients(\"z3333\", rootctx1, c, conn1, \"user@example.com\", true)\n\n\t// Get the anonymous user token for z3333\n\tvar anon3Auth arvados.APIClientAuthorization\n\terr := anonac3.RequestAndDecode(&anon3Auth, \"GET\", \"/arvados/v1/api_client_authorizations/current\", nil, nil)\n\tc.Check(err, check.IsNil)\n\n\tvar userList arvados.UserList\n\twhere := make(map[string]string)\n\twhere[\"uuid\"] = \"z1111-tpzed-anonymouspublic\"\n\terr = userac1.RequestAndDecode(&userList, \"GET\", \"/arvados/v1/users\", nil,\n\t\tmap[string]interface{}{\n\t\t\t\"reader_tokens\": []string{anon3Auth.TokenV2()},\n\t\t\t\"where\":         where,\n\t\t},\n\t)\n\t// The local z3333 anonymous token must be allowed to be forwarded to the login cluster\n\tc.Check(err, check.IsNil)\n\n\tuserac1.AuthToken = \"v2/z1111-gj3su-asdfasdfasdfasd/this-token-does-not-validate-so-anonymous-token-will-be-used-instead\"\n\terr = userac1.RequestAndDecode(&userList, \"GET\", \"/arvados/v1/users\", nil,\n\t\tmap[string]interface{}{\n\t\t\t\"reader_tokens\": []string{anon3Auth.TokenV2()},\n\t\t\t\"where\":         where,\n\t\t},\n\t)\n\tc.Check(err, check.IsNil)\n}\n\n// Get a token from the login cluster (z1111), use it to submit a\n// container request on z2222.\nfunc (s *IntegrationSuite) TestCreateContainerRequestWithFedToken(c *check.C) {\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\t_, ac1, _, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)\n\n\t// Use ac2 to get the discovery doc with a blank token, so the\n\t// SDK doesn't magically pass the z1111 token to z2222 before\n\t// we're ready to start our test.\n\t_, ac2, _ := s.super.ClientsWithToken(\"z2222\", \"\")\n\tvar dd map[string]interface{}\n\terr := ac2.RequestAndDecode(&dd, \"GET\", \"discovery/v1/apis/arvados/v1/rest\", nil, nil)\n\tc.Assert(err, check.IsNil)\n\n\tvar (\n\t\tbody bytes.Buffer\n\t\treq  *http.Request\n\t\tresp *http.Response\n\t\tu    arvados.User\n\t\tcr   arvados.ContainerRequest\n\t)\n\tjson.NewEncoder(&body).Encode(map[string]interface{}{\n\t\t\"container_request\": map[string]interface{}{\n\t\t\t\"command\":         []string{\"echo\"},\n\t\t\t\"container_image\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n\t\t\t\"cwd\":             \"/\",\n\t\t\t\"output_path\":     \"/tmp\",\n\t\t\t\"mounts\": map[string]arvados.Mount{\n\t\t\t\t\"/tmp\": arvados.Mount{\n\t\t\t\t\tKind:     \"tmp\",\n\t\t\t\t\tCapacity: 1000000,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tac2.AuthToken = ac1.AuthToken\n\n\tc.Log(\"...post CR with good (but not yet cached) token\")\n\tcr = arvados.ContainerRequest{}\n\treq, err = http.NewRequest(\"POST\", \"https://\"+ac2.APIHost+\"/arvados/v1/container_requests\", bytes.NewReader(body.Bytes()))\n\tc.Assert(err, check.IsNil)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\terr = ac2.DoAndDecode(&cr, req)\n\tc.Assert(err, check.IsNil)\n\tc.Logf(\"err == %#v\", err)\n\n\tc.Log(\"...get user with good token\")\n\tu = arvados.User{}\n\treq, err = http.NewRequest(\"GET\", \"https://\"+ac2.APIHost+\"/arvados/v1/users/current\", nil)\n\tc.Assert(err, check.IsNil)\n\terr = ac2.DoAndDecode(&u, req)\n\tc.Check(err, check.IsNil)\n\tc.Check(u.UUID, check.Matches, \"z1111-tpzed-.*\")\n\n\tc.Log(\"...post CR with good cached token\")\n\tcr = arvados.ContainerRequest{}\n\treq, err = http.NewRequest(\"POST\", \"https://\"+ac2.APIHost+\"/arvados/v1/container_requests\", bytes.NewReader(body.Bytes()))\n\tc.Assert(err, check.IsNil)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\terr = ac2.DoAndDecode(&cr, req)\n\tc.Check(err, check.IsNil)\n\tc.Check(cr.UUID, check.Matches, \"z2222-.*\")\n\n\tc.Log(\"...post with good cached token ('Bearer ...')\")\n\tcr = arvados.ContainerRequest{}\n\treq, err = http.NewRequest(\"POST\", \"https://\"+ac2.APIHost+\"/arvados/v1/container_requests\", bytes.NewReader(body.Bytes()))\n\tc.Assert(err, check.IsNil)\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\treq.Header.Set(\"Authorization\", \"Bearer \"+ac2.AuthToken)\n\tresp, err = arvados.InsecureHTTPClient.Do(req)\n\tc.Assert(err, check.IsNil)\n\tdefer resp.Body.Close()\n\terr = json.NewDecoder(resp.Body).Decode(&cr)\n\tc.Check(err, check.IsNil)\n\tc.Check(cr.UUID, check.Matches, \"z2222-.*\")\n}\n\nfunc (s *IntegrationSuite) TestCreateContainerRequestWithBadToken(c *check.C) {\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\t_, ac1, _, au := s.super.UserClients(\"z1111\", rootctx1, c, conn1, \"user@example.com\", true)\n\n\ttests := []struct {\n\t\tname         string\n\t\ttoken        string\n\t\texpectedCode int\n\t}{\n\t\t{\"Good token\", ac1.AuthToken, http.StatusOK},\n\t\t{\"Bogus token\", \"abcdef\", http.StatusUnauthorized},\n\t\t{\"v1-looking token\", \"badtoken00badtoken00badtoken00badtoken00b\", http.StatusUnauthorized},\n\t\t{\"v2-looking token\", \"v2/\" + au.UUID + \"/badtoken00badtoken00badtoken00badtoken00b\", http.StatusUnauthorized},\n\t}\n\n\tbody, _ := json.Marshal(map[string]interface{}{\n\t\t\"container_request\": map[string]interface{}{\n\t\t\t\"command\":         []string{\"echo\"},\n\t\t\t\"container_image\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n\t\t\t\"cwd\":             \"/\",\n\t\t\t\"output_path\":     \"/tmp\",\n\t\t\t\"mounts\": map[string]arvados.Mount{\n\t\t\t\t\"/tmp\": arvados.Mount{\n\t\t\t\t\tKind:     \"tmp\",\n\t\t\t\t\tCapacity: 1000000,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName() + \" \" + tt.name)\n\t\tac1.AuthToken = tt.token\n\t\treq, err := http.NewRequest(\"POST\", \"https://\"+ac1.APIHost+\"/arvados/v1/container_requests\", bytes.NewReader(body))\n\t\tc.Assert(err, check.IsNil)\n\t\treq.Header.Set(\"Content-Type\", \"application/json\")\n\t\tresp, err := ac1.Do(req)\n\t\tif c.Check(err, check.IsNil) {\n\t\t\tc.Assert(resp.StatusCode, check.Equals, tt.expectedCode)\n\t\t\tresp.Body.Close()\n\t\t}\n\t}\n}\n\nfunc (s *IntegrationSuite) TestRequestIDHeader(c *check.C) {\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\tuserctx1, ac1, _, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, \"user@example.com\", true)\n\n\tcoll, err := conn1.CollectionCreate(userctx1, arvados.CreateOptions{})\n\tc.Check(err, check.IsNil)\n\n\ttests := []struct {\n\t\tpath            string\n\t\treqIdProvided   bool\n\t\tnotFoundRequest bool\n\t}{\n\t\t{\"/arvados/v1/collections\", false, false},\n\t\t{\"/arvados/v1/collections\", true, false},\n\t\t{\"/arvados/v1/nonexistant\", false, true},\n\t\t{\"/arvados/v1/nonexistant\", true, true},\n\t\t{\"/arvados/v1/collections/\" + coll.UUID, false, false},\n\t\t{\"/arvados/v1/collections/\" + coll.UUID, true, false},\n\t\t// new code path (lib/controller/router etc) - single-cluster request\n\t\t{\"/arvados/v1/collections/z1111-4zz18-0123456789abcde\", false, true},\n\t\t{\"/arvados/v1/collections/z1111-4zz18-0123456789abcde\", true, true},\n\t\t// new code path (lib/controller/router etc) - federated request\n\t\t{\"/arvados/v1/collections/z2222-4zz18-0123456789abcde\", false, true},\n\t\t{\"/arvados/v1/collections/z2222-4zz18-0123456789abcde\", true, true},\n\t\t// old code path (proxyRailsAPI) - single-cluster request\n\t\t{\"/arvados/v1/containers/z1111-dz642-0123456789abcde\", false, true},\n\t\t{\"/arvados/v1/containers/z1111-dz642-0123456789abcde\", true, true},\n\t\t// old code path (setupProxyRemoteCluster) - federated request\n\t\t{\"/arvados/v1/workflows/z2222-7fd4e-0123456789abcde\", false, true},\n\t\t{\"/arvados/v1/workflows/z2222-7fd4e-0123456789abcde\", true, true},\n\t}\n\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName() + \" \" + tt.path)\n\t\treq, err := http.NewRequest(\"GET\", \"https://\"+ac1.APIHost+tt.path, nil)\n\t\tc.Assert(err, check.IsNil)\n\t\tcustomReqId := \"abcdeG\"\n\t\tif !tt.reqIdProvided {\n\t\t\tc.Assert(req.Header.Get(\"X-Request-Id\"), check.Equals, \"\")\n\t\t} else {\n\t\t\treq.Header.Set(\"X-Request-Id\", customReqId)\n\t\t}\n\t\tresp, err := ac1.Do(req)\n\t\tc.Assert(err, check.IsNil)\n\t\tif tt.notFoundRequest {\n\t\t\tc.Check(resp.StatusCode, check.Equals, http.StatusNotFound)\n\t\t} else {\n\t\t\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\t\t}\n\t\trespHdr := resp.Header.Get(\"X-Request-Id\")\n\t\tif tt.reqIdProvided {\n\t\t\tc.Check(respHdr, check.Equals, customReqId)\n\t\t} else {\n\t\t\tc.Check(respHdr, check.Matches, `req-[0-9a-zA-Z]{20}`)\n\t\t}\n\t\tif tt.notFoundRequest {\n\t\t\tvar jresp httpserver.ErrorResponse\n\t\t\terr := json.NewDecoder(resp.Body).Decode(&jresp)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tif c.Check(jresp.Errors, check.HasLen, 1) {\n\t\t\t\tc.Check(jresp.Errors[0], check.Matches, `.*\\(`+respHdr+`\\).*`)\n\t\t\t}\n\t\t}\n\t\tresp.Body.Close()\n\t}\n}\n\n// We test the direct access to the database\n// normally an integration test would not have a database access, but in this case we need\n// to test tokens that are secret, so there is no API response that will give them back\nfunc (s *IntegrationSuite) dbConn(c *check.C, clusterID string) (*sql.DB, *sql.Conn) {\n\tctx := context.Background()\n\tdb, err := sql.Open(\"postgres\", s.super.Cluster(clusterID).PostgreSQL.Connection.String())\n\tc.Assert(err, check.IsNil)\n\n\tconn, err := db.Conn(ctx)\n\tc.Assert(err, check.IsNil)\n\n\trows, err := conn.ExecContext(ctx, `SELECT 1`)\n\tc.Assert(err, check.IsNil)\n\tn, err := rows.RowsAffected()\n\tc.Assert(err, check.IsNil)\n\tc.Assert(n, check.Equals, int64(1))\n\treturn db, conn\n}\n\n// TestRuntimeTokenInCR will test several different tokens in the runtime attribute\n// and check the expected results accessing directly to the database if needed.\nfunc (s *IntegrationSuite) TestRuntimeTokenInCR(c *check.C) {\n\tdb, dbconn := s.dbConn(c, \"z1111\")\n\tdefer db.Close()\n\tdefer dbconn.Close()\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\tuserctx1, ac1, _, au := s.super.UserClients(\"z1111\", rootctx1, c, conn1, \"user@example.com\", true)\n\n\ttests := []struct {\n\t\tname                 string\n\t\ttoken                string\n\t\texpectAToGetAValidCR bool\n\t\texpectedToken        *string\n\t}{\n\t\t{\"Good token z1111 user\", ac1.AuthToken, true, &ac1.AuthToken},\n\t\t{\"Bogus token\", \"abcdef\", false, nil},\n\t\t{\"v1-looking token\", \"badtoken00badtoken00badtoken00badtoken00b\", false, nil},\n\t\t{\"v2-looking token\", \"v2/\" + au.UUID + \"/badtoken00badtoken00badtoken00badtoken00b\", false, nil},\n\t}\n\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName() + \" \" + tt.name)\n\n\t\trq := map[string]interface{}{\n\t\t\t\"command\":         []string{\"echo\"},\n\t\t\t\"container_image\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n\t\t\t\"cwd\":             \"/\",\n\t\t\t\"output_path\":     \"/tmp\",\n\t\t\t\"mounts\": map[string]arvados.Mount{\n\t\t\t\t\"/tmp\": arvados.Mount{\n\t\t\t\t\tKind:     \"tmp\",\n\t\t\t\t\tCapacity: 1000000,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"runtime_token\": tt.token,\n\t\t}\n\t\tcr, err := conn1.ContainerRequestCreate(userctx1, arvados.CreateOptions{Attrs: rq})\n\t\tif tt.expectAToGetAValidCR {\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tc.Check(cr, check.NotNil)\n\t\t\tc.Check(cr.UUID, check.Not(check.Equals), \"\")\n\t\t}\n\n\t\tif tt.expectedToken == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tc.Logf(\"cr.UUID: %s\", cr.UUID)\n\t\trow := dbconn.QueryRowContext(rootctx1, `SELECT runtime_token from container_requests where uuid=$1`, cr.UUID)\n\t\tc.Check(row, check.NotNil)\n\t\tvar token sql.NullString\n\t\trow.Scan(&token)\n\t\tif c.Check(token.Valid, check.Equals, true) {\n\t\t\tc.Check(token.String, check.Equals, *tt.expectedToken)\n\t\t}\n\t}\n}\n\n// TestIntermediateCluster will send a container request to\n// one cluster with another cluster as the destination\n// and check the tokens are being handled properly\nfunc (s *IntegrationSuite) TestIntermediateCluster(c *check.C) {\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\tuctx1, ac1, _, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, \"user@example.com\", true)\n\n\ttests := []struct {\n\t\tname                 string\n\t\ttoken                string\n\t\texpectedRuntimeToken string\n\t\texpectedUUIDprefix   string\n\t}{\n\t\t{\"Good token z1111 user sending a CR to z2222\", ac1.AuthToken, \"\", \"z2222-xvhdp-\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName() + \" \" + tt.name)\n\t\trq := map[string]interface{}{\n\t\t\t\"command\":         []string{\"echo\"},\n\t\t\t\"container_image\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n\t\t\t\"cwd\":             \"/\",\n\t\t\t\"output_path\":     \"/tmp\",\n\t\t\t\"mounts\": map[string]arvados.Mount{\n\t\t\t\t\"/tmp\": arvados.Mount{\n\t\t\t\t\tKind:     \"tmp\",\n\t\t\t\t\tCapacity: 1000000,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"runtime_token\": tt.token,\n\t\t}\n\t\tcr, err := conn1.ContainerRequestCreate(uctx1, arvados.CreateOptions{ClusterID: \"z2222\", Attrs: rq})\n\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(strings.HasPrefix(cr.UUID, tt.expectedUUIDprefix), check.Equals, true)\n\t\tc.Check(cr.RuntimeToken, check.Equals, tt.expectedRuntimeToken)\n\t}\n}\n\n// Test for #17785\nfunc (s *IntegrationSuite) TestFederatedApiClientAuthHandling(c *check.C) {\n\trootctx1, rootclnt1, _ := s.super.RootClients(\"z1111\")\n\tconn1 := s.super.Conn(\"z1111\")\n\n\t// Make sure LoginCluster is properly configured\n\tfor _, cls := range []string{\"z1111\", \"z3333\"} {\n\t\tc.Check(\n\t\t\ts.super.Cluster(cls).Login.LoginCluster,\n\t\t\tcheck.Equals, \"z1111\",\n\t\t\tcheck.Commentf(\"incorrect LoginCluster config on cluster %q\", cls))\n\t}\n\t// Get user's UUID & attempt to create a token for it on the remote cluster\n\t_, _, _, user := s.super.UserClients(\"z1111\", rootctx1, c, conn1,\n\t\t\"user@example.com\", true)\n\t_, rootclnt3, _ := s.super.ClientsWithToken(\"z3333\", rootclnt1.AuthToken)\n\tvar resp arvados.APIClientAuthorization\n\terr := rootclnt3.RequestAndDecode(\n\t\t&resp, \"POST\", \"arvados/v1/api_client_authorizations\", nil,\n\t\tmap[string]interface{}{\n\t\t\t\"api_client_authorization\": map[string]string{\n\t\t\t\t\"owner_uuid\": user.UUID,\n\t\t\t},\n\t\t},\n\t)\n\tc.Assert(err, check.IsNil)\n\tnewTok := resp.TokenV2()\n\tc.Assert(newTok, check.Not(check.Equals), \"\")\n\n\t// Confirm the token is from z1111\n\tc.Assert(strings.HasPrefix(newTok, \"v2/z1111-gj3su-\"), check.Equals, true)\n\n\t// Confirm the token works and is from the correct user\n\t_, rootclnt3bis, _ := s.super.ClientsWithToken(\"z3333\", newTok)\n\tvar curUser arvados.User\n\terr = rootclnt3bis.RequestAndDecode(\n\t\t&curUser, \"GET\", \"arvados/v1/users/current\", nil, nil,\n\t)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(curUser.UUID, check.Equals, user.UUID)\n\n\t// Request the ApiClientAuthorization list using the new token\n\t_, userClient, _ := s.super.ClientsWithToken(\"z3333\", newTok)\n\tvar acaLst arvados.APIClientAuthorizationList\n\terr = userClient.RequestAndDecode(\n\t\t&acaLst, \"GET\", \"arvados/v1/api_client_authorizations\", nil, nil,\n\t)\n\tc.Assert(err, check.IsNil)\n}\n\n// Test for bug #18076\nfunc (s *IntegrationSuite) TestStaleCachedUserRecord(c *check.C) {\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\tconn1 := s.super.Conn(\"z1111\")\n\tconn3 := s.super.Conn(\"z3333\")\n\n\t// Make sure LoginCluster is properly configured\n\tfor _, cls := range []string{\"z1111\", \"z3333\"} {\n\t\tc.Check(\n\t\t\ts.super.Cluster(cls).Login.LoginCluster,\n\t\t\tcheck.Equals, \"z1111\",\n\t\t\tcheck.Commentf(\"incorrect LoginCluster config on cluster %q\", cls))\n\t}\n\n\t// Create some users, request them on the federated cluster so they're cached.\n\tvar users []arvados.User\n\tfor userNr := 0; userNr < 2; userNr++ {\n\t\t_, _, _, user := s.super.UserClients(\"z1111\",\n\t\t\trootctx1,\n\t\t\tc,\n\t\t\tconn1,\n\t\t\tfmt.Sprintf(\"user0%d@example.com\", userNr),\n\t\t\ttrue)\n\t\tc.Assert(user.Username, check.Not(check.Equals), \"\")\n\t\tusers = append(users, user)\n\n\t\tlst, err := conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})\n\t\tc.Assert(err, check.Equals, nil)\n\t\tuserFound := false\n\t\tfor _, fedUser := range lst.Items {\n\t\t\tif fedUser.UUID == user.UUID {\n\t\t\t\tc.Assert(fedUser.Username, check.Equals, user.Username)\n\t\t\t\tuserFound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tc.Assert(userFound, check.Equals, true)\n\t}\n\n\t// Swap the usernames\n\t_, err := conn1.UserUpdate(rootctx1, arvados.UpdateOptions{\n\t\tUUID: users[0].UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"username\": \"\",\n\t\t},\n\t})\n\tc.Assert(err, check.Equals, nil)\n\t_, err = conn1.UserUpdate(rootctx1, arvados.UpdateOptions{\n\t\tUUID: users[1].UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"username\": users[0].Username,\n\t\t},\n\t})\n\tc.Assert(err, check.Equals, nil)\n\t_, err = conn1.UserUpdate(rootctx1, arvados.UpdateOptions{\n\t\tUUID: users[0].UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"username\": users[1].Username,\n\t\t},\n\t})\n\tc.Assert(err, check.Equals, nil)\n\n\t// Re-request the list on the federated cluster & check for updates\n\tlst, err := conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})\n\tc.Assert(err, check.Equals, nil)\n\tvar user0Found, user1Found bool\n\tfor _, user := range lst.Items {\n\t\tif user.UUID == users[0].UUID {\n\t\t\tuser0Found = true\n\t\t\tc.Assert(user.Username, check.Equals, users[1].Username)\n\t\t} else if user.UUID == users[1].UUID {\n\t\t\tuser1Found = true\n\t\t\tc.Assert(user.Username, check.Equals, users[0].Username)\n\t\t}\n\t}\n\tc.Assert(user0Found, check.Equals, true)\n\tc.Assert(user1Found, check.Equals, true)\n}\n\n// Test for bug #16263\nfunc (s *IntegrationSuite) TestListUsers(c *check.C) {\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\tconn1 := s.super.Conn(\"z1111\")\n\tconn3 := s.super.Conn(\"z3333\")\n\tuserctx1, _, _, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)\n\n\t// Make sure LoginCluster is properly configured\n\tfor _, cls := range []string{\"z1111\", \"z2222\", \"z3333\"} {\n\t\tc.Check(\n\t\t\ts.super.Cluster(cls).Login.LoginCluster,\n\t\t\tcheck.Equals, \"z1111\",\n\t\t\tcheck.Commentf(\"incorrect LoginCluster config on cluster %q\", cls))\n\t}\n\t// Make sure z1111 has users with NULL usernames\n\tlst, err := conn1.UserList(rootctx1, arvados.ListOptions{\n\t\tLimit: math.MaxInt64, // check that large limit works (see #16263)\n\t})\n\tnullUsername := false\n\tc.Assert(err, check.IsNil)\n\tc.Assert(len(lst.Items), check.Not(check.Equals), 0)\n\tfor _, user := range lst.Items {\n\t\tif user.Username == \"\" {\n\t\t\tnullUsername = true\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Assert(nullUsername, check.Equals, true)\n\n\tuser1, err := conn1.UserGetCurrent(userctx1, arvados.GetOptions{})\n\tc.Assert(err, check.IsNil)\n\tc.Check(user1.IsActive, check.Equals, true)\n\n\t// Ask for the user list on z3333 using z1111's system root token\n\tlst, err = conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})\n\tc.Assert(err, check.IsNil)\n\tfound := false\n\tfor _, user := range lst.Items {\n\t\tif user.UUID == user1.UUID {\n\t\t\tc.Check(user.IsActive, check.Equals, true)\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Check(found, check.Equals, true)\n\n\t// Deactivate user acct on z1111\n\t_, err = conn1.UserUnsetup(rootctx1, arvados.GetOptions{UUID: user1.UUID})\n\tc.Assert(err, check.IsNil)\n\n\t// Get user list from z3333, check the returned z1111 user is\n\t// deactivated\n\tlst, err = conn3.UserList(rootctx1, arvados.ListOptions{Limit: -1})\n\tc.Assert(err, check.IsNil)\n\tfound = false\n\tfor _, user := range lst.Items {\n\t\tif user.UUID == user1.UUID {\n\t\t\tc.Check(user.IsActive, check.Equals, false)\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Check(found, check.Equals, true)\n\n\t// Deactivated user no longer has working token\n\tuser1, err = conn3.UserGetCurrent(userctx1, arvados.GetOptions{})\n\tc.Assert(err, check.ErrorMatches, `.*401 Unauthorized.*`)\n}\n\nfunc (s *IntegrationSuite) TestSetupUserWithVM(c *check.C) {\n\tconn1 := s.super.Conn(\"z1111\")\n\tconn3 := s.super.Conn(\"z3333\")\n\trootctx1, rootac1, _ := s.super.RootClients(\"z1111\")\n\n\t// Create user on LoginCluster z1111\n\t_, _, _, user := s.super.UserClients(\"z1111\", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)\n\n\t// Make a new root token (because rootClients() uses SystemRootToken)\n\tvar outAuth arvados.APIClientAuthorization\n\terr := rootac1.RequestAndDecode(&outAuth, \"POST\", \"/arvados/v1/api_client_authorizations\", nil, nil)\n\tc.Check(err, check.IsNil)\n\n\t// Make a v2 root token to communicate with z3333\n\trootctx3, rootac3, _ := s.super.ClientsWithToken(\"z3333\", outAuth.TokenV2())\n\n\t// Create VM on z3333\n\tvar outVM arvados.VirtualMachine\n\terr = rootac3.RequestAndDecode(&outVM, \"POST\", \"/arvados/v1/virtual_machines\", nil,\n\t\tmap[string]interface{}{\"virtual_machine\": map[string]interface{}{\n\t\t\t\"hostname\": \"example\",\n\t\t},\n\t\t})\n\tc.Assert(err, check.IsNil)\n\tc.Check(outVM.UUID[0:5], check.Equals, \"z3333\")\n\n\t// Make sure z3333 user list is up to date\n\t_, err = conn3.UserList(rootctx3, arvados.ListOptions{Limit: 1000})\n\tc.Check(err, check.IsNil)\n\n\t// Try to set up user on z3333 with the VM\n\t_, err = conn3.UserSetup(rootctx3, arvados.UserSetupOptions{UUID: user.UUID, VMUUID: outVM.UUID})\n\tc.Check(err, check.IsNil)\n\n\tvar outLinks arvados.LinkList\n\terr = rootac3.RequestAndDecode(&outLinks, \"GET\", \"/arvados/v1/links\", nil,\n\t\tarvados.ListOptions{\n\t\t\tLimit: 1000,\n\t\t\tFilters: []arvados.Filter{\n\t\t\t\t{\n\t\t\t\t\tAttr:     \"tail_uuid\",\n\t\t\t\t\tOperator: \"=\",\n\t\t\t\t\tOperand:  user.UUID,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tAttr:     \"head_uuid\",\n\t\t\t\t\tOperator: \"=\",\n\t\t\t\t\tOperand:  outVM.UUID,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tAttr:     \"name\",\n\t\t\t\t\tOperator: \"=\",\n\t\t\t\t\tOperand:  \"can_login\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tAttr:     \"link_class\",\n\t\t\t\t\tOperator: \"=\",\n\t\t\t\t\tOperand:  \"permission\",\n\t\t\t\t}}})\n\tc.Check(err, check.IsNil)\n\n\tc.Check(len(outLinks.Items), check.Equals, 1)\n}\n\nfunc (s *IntegrationSuite) TestOIDCAccessTokenAuth(c *check.C) {\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\ts.super.UserClients(\"z1111\", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)\n\n\taccesstoken := s.oidcprovider.ValidAccessToken()\n\n\tfor _, clusterID := range []string{\"z1111\", \"z2222\"} {\n\n\t\tvar coll arvados.Collection\n\n\t\t// Write some file data and create a collection\n\t\t{\n\t\t\tc.Logf(\"save collection to %s\", clusterID)\n\n\t\t\tconn := s.super.Conn(clusterID)\n\t\t\tctx, ac, kc := s.super.ClientsWithToken(clusterID, accesstoken)\n\n\t\t\tfs, err := coll.FileSystem(ac, kc)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tf, err := fs.OpenFile(\"test.txt\", os.O_CREATE|os.O_RDWR, 0777)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\t_, err = io.WriteString(f, \"IntegrationSuite.TestOIDCAccessTokenAuth\")\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\terr = f.Close()\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tmtxt, err := fs.MarshalManifest(\".\")\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tcoll, err = conn.CollectionCreate(ctx, arvados.CreateOptions{Attrs: map[string]interface{}{\n\t\t\t\t\"manifest_text\": mtxt,\n\t\t\t}})\n\t\t\tc.Assert(err, check.IsNil)\n\t\t}\n\n\t\t// Read the collection & file data -- both from the\n\t\t// cluster where it was created, and from the other\n\t\t// cluster.\n\t\tfor _, readClusterID := range []string{\"z1111\", \"z2222\", \"z3333\"} {\n\t\t\tc.Logf(\"retrieve %s from %s\", coll.UUID, readClusterID)\n\n\t\t\tconn := s.super.Conn(readClusterID)\n\t\t\tctx, ac, kc := s.super.ClientsWithToken(readClusterID, accesstoken)\n\n\t\t\tuser, err := conn.UserGetCurrent(ctx, arvados.GetOptions{})\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Check(user.FullName, check.Equals, \"Example User\")\n\t\t\treadcoll, err := conn.CollectionGet(ctx, arvados.GetOptions{UUID: coll.UUID})\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Check(readcoll.ManifestText, check.Not(check.Equals), \"\")\n\t\t\tfs, err := readcoll.FileSystem(ac, kc)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tf, err := fs.Open(\"test.txt\")\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tbuf, err := ioutil.ReadAll(f)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Check(buf, check.DeepEquals, []byte(\"IntegrationSuite.TestOIDCAccessTokenAuth\"))\n\t\t}\n\t}\n}\n\n// z3333 should not forward a locally-issued container runtime token,\n// associated with a z1111 user, to its login cluster z1111. z1111\n// would only call back to z3333 and then reject the response because\n// the user ID does not match the token prefix. See\n// dev.arvados.org/issues/18346\nfunc (s *IntegrationSuite) TestForwardRuntimeTokenToLoginCluster(c *check.C) {\n\tdb3, db3conn := s.dbConn(c, \"z3333\")\n\tdefer db3.Close()\n\tdefer db3conn.Close()\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\trootctx3, _, _ := s.super.RootClients(\"z3333\")\n\tconn1 := s.super.Conn(\"z1111\")\n\tconn3 := s.super.Conn(\"z3333\")\n\tuserctx1, _, _, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, \"user@example.com\", true)\n\n\tuser1, err := conn1.UserGetCurrent(userctx1, arvados.GetOptions{})\n\tc.Assert(err, check.IsNil)\n\tc.Logf(\"user1 %+v\", user1)\n\n\timageColl, err := conn3.CollectionCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{\n\t\t\"manifest_text\": \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar\\n\",\n\t}})\n\tc.Assert(err, check.IsNil)\n\tc.Logf(\"imageColl %+v\", imageColl)\n\n\tcr, err := conn3.ContainerRequestCreate(userctx1, arvados.CreateOptions{Attrs: map[string]interface{}{\n\t\t\"state\":           \"Committed\",\n\t\t\"command\":         []string{\"echo\"},\n\t\t\"container_image\": imageColl.PortableDataHash,\n\t\t\"cwd\":             \"/\",\n\t\t\"output_path\":     \"/tmp\",\n\t\t\"mounts\": map[string]arvados.Mount{\n\t\t\t\"/tmp\": arvados.Mount{\n\t\t\t\tKind:     \"tmp\",\n\t\t\t\tCapacity: 1000000,\n\t\t\t},\n\t\t},\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": arvados.RuntimeConstraints{\n\t\t\tVCPUs: 1,\n\t\t\tRAM:   1000000000,\n\t\t},\n\t}})\n\tc.Assert(err, check.IsNil)\n\tc.Logf(\"container request %+v\", cr)\n\tctr, err := conn3.ContainerLock(rootctx3, arvados.GetOptions{UUID: cr.ContainerUUID})\n\tc.Assert(err, check.IsNil)\n\tc.Logf(\"container %+v\", ctr)\n\n\t// We could use conn3.ContainerAuth() here, but that API\n\t// hasn't been added to sdk/go/arvados/api.go yet.\n\trow := db3conn.QueryRowContext(context.Background(), `SELECT api_token from api_client_authorizations where uuid=$1`, ctr.AuthUUID)\n\tc.Check(row, check.NotNil)\n\tvar val sql.NullString\n\trow.Scan(&val)\n\tc.Assert(val.Valid, check.Equals, true)\n\truntimeToken := \"v2/\" + ctr.AuthUUID + \"/\" + val.String\n\tctrctx, _, _ := s.super.ClientsWithToken(\"z3333\", runtimeToken)\n\tc.Logf(\"container runtime token %+v\", runtimeToken)\n\n\t_, err = conn3.UserGet(ctrctx, arvados.GetOptions{UUID: user1.UUID})\n\tc.Assert(err, check.NotNil)\n\tc.Check(err, check.ErrorMatches, `request failed: .* 401 Unauthorized: cannot use a locally issued token to forward a request to our login cluster \\(z1111\\)`)\n\tc.Check(err, check.Not(check.ErrorMatches), `(?ms).*127\\.0\\.0\\.11.*`)\n}\n\nfunc (s *IntegrationSuite) TestRunTrivialContainer(c *check.C) {\n\toutcoll, _ := s.runContainer(c, \"z1111\", \"\", map[string]interface{}{\n\t\t\"command\":             []string{\"sh\", \"-c\", \"touch \\\"/out/hello world\\\" /out/ohai\"},\n\t\t\"container_image\":     \"busybox:uclibc\",\n\t\t\"cwd\":                 \"/tmp\",\n\t\t\"environment\":         map[string]string{},\n\t\t\"mounts\":              map[string]arvados.Mount{\"/out\": {Kind: \"tmp\", Capacity: 10000}},\n\t\t\"output_path\":         \"/out\",\n\t\t\"runtime_constraints\": arvados.RuntimeConstraints{RAM: 100000000, VCPUs: 1, KeepCacheRAM: 1 << 26},\n\t\t\"priority\":            1,\n\t\t\"state\":               arvados.ContainerRequestStateCommitted,\n\t}, 0, nil)\n\tc.Check(outcoll.ManifestText, check.Matches, `\\. d41d8.* 0:0:hello\\\\040world 0:0:ohai\\n`)\n\tc.Check(outcoll.PortableDataHash, check.Equals, \"8fa5dee9231a724d7cf377c5a2f4907c+65\")\n}\n\n// Note that unlike the rest of the suite, this test requires\n// {RuntimeEngine: docker} because:\n//\n//   - the singularity driver relies on ARVADOS_TEST_PRIVESC=sudo to\n//     connect to ports inside the container\n//\n//   - non-passwordless sudo does not work in crunch-run running under\n//     the loopback driver\nfunc (s *IntegrationSuite) TestContainerHTTPProxy(c *check.C) {\n\tctrport := \"12345\"\n\tvar success bool\n\tvar done atomic.Bool\n\tvar ctrLatest atomic.Value\n\tdefer func() { done.Store(true) }()\n\tgo func() {\n\t\tfor range time.NewTicker(time.Second).C {\n\t\t\tif done.Load() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tctr, ok := ctrLatest.Load().(arvados.Container)\n\t\t\tif !ok {\n\t\t\t\tc.Log(\"waiting for onStateUpdate...\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif ctr.State != arvados.ContainerStateRunning {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))\n\t\t\tdefer cancel()\n\t\t\texturl := ctr.PublishedPorts[ctrport].InitialURL + \"test-path\"\n\t\t\tc.Logf(\"poll: %s\", exturl)\n\t\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, exturl, nil)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tresp, err := arvados.InsecureHTTPClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\tc.Logf(\"poll: Do: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\tc.Logf(\"poll: error reading response body: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\tc.Logf(\"poll: resp.Status %q body %q\", resp.Status, body)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(body) == 0 {\n\t\t\t\tc.Log(\"poll: empty response body\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsuccess = true\n\t\t\tc.Logf(\"poll: %s, resp.Body %q\", resp.Status, body)\n\t\t\tbreak\n\t\t}\n\t}()\n\toutcoll, _ := s.runContainer(c, \"z1111\", \"\", map[string]interface{}{\n\t\t// Note \"$req\" and \"$hdr\" each have a trailing \\r here\n\t\t\"command\":             []string{\"sh\", \"-c\", `nc -l -p ` + ctrport + ` -w 30 -e sh -c 'read req; echo >&2 \"$req\"; hdr=zzz; while [ \"${#hdr}\" -gt 1 ]; do read hdr; echo >&2 \"${hdr}\"; done; printf \"HTTP/1.0 200 OK\\\\r\\\\n\\\\r\\\\n%s\\\\n\" \"$req\"' && touch /out/OK`},\n\t\t\"container_image\":     \"busybox:uclibc\",\n\t\t\"cwd\":                 \"/tmp\",\n\t\t\"environment\":         map[string]string{},\n\t\t\"mounts\":              map[string]arvados.Mount{\"/out\": {Kind: \"tmp\", Capacity: 10000}},\n\t\t\"output_path\":         \"/out\",\n\t\t\"runtime_constraints\": arvados.RuntimeConstraints{RAM: 100000000, VCPUs: 1, KeepCacheRAM: 1 << 26, API: true},\n\t\t\"priority\":            1,\n\t\t\"state\":               arvados.ContainerRequestStateCommitted,\n\t\t\"published_ports\": map[string]arvados.RequestPublishedPort{\n\t\t\tctrport: arvados.RequestPublishedPort{\n\t\t\t\tAccess: arvados.PublishedPortAccessPublic,\n\t\t\t\tLabel:  \"testport\",\n\t\t\t}},\n\t}, 0, func(ctr arvados.Container) { ctrLatest.Store(ctr) })\n\tc.Check(outcoll.ManifestText, check.Matches, `\\. d41d8.* 0:0:OK\\n`)\n\tc.Check(outcoll.PortableDataHash, check.Equals, \"c1c354b852802ee13ad87da784b9c28c+44\")\n\tc.Check(success, check.Equals, true)\n}\nfunc (s *IntegrationSuite) TestContainerInputOnDifferentCluster(c *check.C) {\n\tconn := s.super.Conn(\"z1111\")\n\trootctx, _, _ := s.super.RootClients(\"z1111\")\n\tuserctx, ac, _, _ := s.super.UserClients(\"z1111\", rootctx, c, conn, s.oidcprovider.AuthEmail, true)\n\tz1coll, err := conn.CollectionCreate(userctx, arvados.CreateOptions{Attrs: map[string]interface{}{\n\t\t\"manifest_text\": \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:ocelot\\n\",\n\t}})\n\tc.Assert(err, check.IsNil)\n\n\toutcoll, logcfs := s.runContainer(c, \"z2222\", ac.AuthToken, map[string]interface{}{\n\t\t\"command\":         []string{\"ls\", \"/in\"},\n\t\t\"container_image\": \"busybox:uclibc\",\n\t\t\"cwd\":             \"/tmp\",\n\t\t\"environment\":     map[string]string{},\n\t\t\"mounts\": map[string]arvados.Mount{\n\t\t\t\"/in\":  {Kind: \"collection\", PortableDataHash: z1coll.PortableDataHash},\n\t\t\t\"/out\": {Kind: \"tmp\", Capacity: 10000},\n\t\t},\n\t\t\"output_path\":         \"/out\",\n\t\t\"runtime_constraints\": arvados.RuntimeConstraints{RAM: 100000000, VCPUs: 1, KeepCacheRAM: 1 << 26},\n\t\t\"priority\":            1,\n\t\t\"state\":               arvados.ContainerRequestStateCommitted,\n\t\t\"container_count_max\": 1,\n\t}, -1, nil)\n\tif outcoll.UUID == \"\" {\n\t\tarvmountlog, err := fs.ReadFile(arvados.FS(logcfs), \"/arv-mount.txt\")\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(arvmountlog), check.Matches, `(?ms).*cannot use a locally issued token to forward a request to our login cluster \\(z1111\\).*`)\n\t\tc.Skip(\"this use case is not supported yet\")\n\t}\n\tstdout, err := fs.ReadFile(arvados.FS(logcfs), \"/stdout.txt\")\n\tc.Check(err, check.IsNil)\n\tc.Check(string(stdout), check.Equals, \"ocelot\\n\")\n}\n\nfunc (s *IntegrationSuite) runContainer(c *check.C, clusterID string, token string, ctrSpec map[string]interface{}, expectExitCode int, onStateUpdate func(arvados.Container)) (outcoll arvados.Collection, logcfs arvados.CollectionFileSystem) {\n\tconn := s.super.Conn(clusterID)\n\trootctx, _, _ := s.super.RootClients(clusterID)\n\tif token == \"\" {\n\t\t_, ac, _, _ := s.super.UserClients(clusterID, rootctx, c, conn, s.oidcprovider.AuthEmail, true)\n\t\ttoken = ac.AuthToken\n\t}\n\t_, ac, kc := s.super.ClientsWithToken(clusterID, token)\n\n\tc.Log(\"[docker load]\")\n\tout, err := exec.Command(\"docker\", \"load\", \"--input\", arvadostest.BusyboxDockerImage(c)).CombinedOutput()\n\tc.Logf(\"[docker load output] %s\", out)\n\tc.Check(err, check.IsNil)\n\n\tc.Log(\"[arv-keepdocker]\")\n\takd := exec.Command(\"arv-keepdocker\", \"--no-resume\", \"busybox:uclibc\")\n\takd.Env = append(os.Environ(), \"ARVADOS_API_HOST=\"+ac.APIHost, \"ARVADOS_API_HOST_INSECURE=1\", \"ARVADOS_API_TOKEN=\"+ac.AuthToken)\n\tout, err = akd.CombinedOutput()\n\tc.Logf(\"[arv-keepdocker output]\\n%s\", out)\n\tc.Check(err, check.IsNil)\n\n\tvar cr arvados.ContainerRequest\n\terr = ac.RequestAndDecode(&cr, \"POST\", \"/arvados/v1/container_requests\", nil, map[string]interface{}{\n\t\t\"container_request\": ctrSpec,\n\t})\n\tc.Assert(err, check.IsNil)\n\n\tshowlogs := func(collectionID string) arvados.CollectionFileSystem {\n\t\tvar logcoll arvados.Collection\n\t\terr = ac.RequestAndDecode(&logcoll, \"GET\", \"/arvados/v1/collections/\"+collectionID, nil, nil)\n\t\tc.Assert(err, check.IsNil)\n\t\tcfs, err := logcoll.FileSystem(ac, kc)\n\t\tc.Assert(err, check.IsNil)\n\t\tfs.WalkDir(arvados.FS(cfs), \"/\", func(path string, d fs.DirEntry, err error) error {\n\t\t\tif d.IsDir() || strings.HasPrefix(path, \"/log for container\") {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tf, err := cfs.Open(path)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tdefer f.Close()\n\t\t\tbuf, err := ioutil.ReadAll(f)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Logf(\"=== %s\\n%s\\n\", path, buf)\n\t\t\treturn nil\n\t\t})\n\t\treturn cfs\n\t}\n\n\tcheckwebdavlogs := func(cr arvados.ContainerRequest) {\n\t\treq, err := http.NewRequest(\"OPTIONS\", \"https://\"+ac.APIHost+\"/arvados/v1/container_requests/\"+cr.UUID+\"/log/\"+cr.ContainerUUID+\"/\", nil)\n\t\tc.Assert(err, check.IsNil)\n\t\treq.Header.Set(\"Origin\", \"http://example.example\")\n\t\tresp, err := ac.Do(req)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\t\t// Check for duplicate headers -- must use Header[], not Header.Get()\n\t\tc.Check(resp.Header[\"Access-Control-Allow-Origin\"], check.DeepEquals, []string{\"*\"})\n\t}\n\n\tvar ctr arvados.Container\n\tvar lastState arvados.ContainerState\n\tvar status, lastStatus arvados.ContainerStatus\n\tvar allStatus string\n\tcheckstatus := func() {\n\t\terr := ac.RequestAndDecode(&status, \"GET\", \"/arvados/v1/container_requests/\"+cr.UUID+\"/container_status\", nil, nil)\n\t\tc.Assert(err, check.IsNil)\n\t\tif status != lastStatus {\n\t\t\tc.Logf(\"container status: %s, %s\", status.State, status.SchedulingStatus)\n\t\t\tallStatus += fmt.Sprintf(\"%s, %s\\n\", status.State, status.SchedulingStatus)\n\t\t\tlastStatus = status\n\t\t}\n\t}\n\tdeadline := time.Now().Add(time.Minute)\n\tfor cr.State != arvados.ContainerRequestStateFinal || (lastStatus.State != arvados.ContainerStateComplete && lastStatus.State != arvados.ContainerStateCancelled) {\n\t\terr = ac.RequestAndDecode(&cr, \"GET\", \"/arvados/v1/container_requests/\"+cr.UUID, nil, nil)\n\t\tc.Assert(err, check.IsNil)\n\t\tcheckstatus()\n\t\terr = ac.RequestAndDecode(&ctr, \"GET\", \"/arvados/v1/containers/\"+cr.ContainerUUID, nil, nil)\n\t\tif err != nil {\n\t\t\tc.Logf(\"error getting container state: %s\", err)\n\t\t} else if ctr.State != lastState {\n\t\t\tc.Logf(\"container state changed to %q\", ctr.State)\n\t\t\tlastState = ctr.State\n\t\t\tif onStateUpdate != nil {\n\t\t\t\tonStateUpdate(ctr)\n\t\t\t}\n\t\t} else {\n\t\t\tif time.Now().After(deadline) {\n\t\t\t\tc.Errorf(\"timed out, container state is %q\", cr.State)\n\t\t\t\tif ctr.Log == \"\" {\n\t\t\t\t\tc.Logf(\"=== NO LOG COLLECTION saved for container\")\n\t\t\t\t} else {\n\t\t\t\t\tshowlogs(ctr.Log)\n\t\t\t\t}\n\t\t\t\tc.FailNow()\n\t\t\t}\n\t\t\ttime.Sleep(time.Second / 2)\n\t\t}\n\t}\n\tcheckstatus()\n\tc.Logf(\"cr.CumulativeCost == %f\", cr.CumulativeCost)\n\tc.Check(cr.CumulativeCost, check.Not(check.Equals), 0.0)\n\tif expectExitCode >= 0 {\n\t\tc.Check(ctr.State, check.Equals, arvados.ContainerStateComplete)\n\t\tc.Check(ctr.ExitCode, check.Equals, expectExitCode)\n\t\terr = ac.RequestAndDecode(&outcoll, \"GET\", \"/arvados/v1/collections/\"+cr.OutputUUID, nil, nil)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(allStatus, check.Matches, `(Queued, Waiting in queue\\.\\n)?`+\n\t\t\t// Occasionally the dispatcher will\n\t\t\t// unlock/retry, and we get state/status from\n\t\t\t// database/dispatcher via separate API calls,\n\t\t\t// so we can also see \"Queued, preparing\n\t\t\t// runtime environment\".\n\t\t\t`((Queued|Locked), (Waiting .*|Container is allocated to an instance and preparing to run\\.)\\n)*`+\n\t\t\t`(Running, \\n)?`+\n\t\t\t`Complete, \\n`)\n\t}\n\tlogcfs = showlogs(cr.LogUUID)\n\tcheckwebdavlogs(cr)\n\treturn outcoll, logcfs\n}\n\nfunc (s *IntegrationSuite) TestCUDAContainerReuse(c *check.C) {\n\t// Check that the legacy \"CUDA\" API still works.\n\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\t_, ac1, _, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)\n\n\tcrInput := map[string]interface{}{\n\t\t\"command\":         []string{\"echo\", \"hello\", \"/bin/sh\", \"-c\", \"'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'\"},\n\t\t\"cwd\":             \"/\",\n\t\t\"environment\":     map[string]interface{}{},\n\t\t\"output_path\":     \"/tmp\",\n\t\t\"output_glob\":     []string{},\n\t\t\"container_image\": \"fa3c1a9cb6783f85f2ecda037e07b8c3+167\",\n\t\t\"mounts\": map[string]arvados.Mount{\n\t\t\t\"/tmp\": arvados.Mount{\n\t\t\t\tKind:     \"tmp\",\n\t\t\t\tCapacity: 1000000,\n\t\t\t},\n\t\t},\n\t\t\"runtime_constraints\": map[string]interface{}{\n\t\t\t\"cuda\": map[string]interface{}{\n\t\t\t\t\"device_count\":        1,\n\t\t\t\t\"driver_version\":      \"11.0\",\n\t\t\t\t\"hardware_capability\": \"9.0\",\n\t\t\t},\n\t\t\t\"ram\":   12000000000,\n\t\t\t\"vcpus\": 4,\n\t\t},\n\t\t\"state\": \"Committed\",\n\t}\n\n\tvar outCR arvados.ContainerRequest\n\terr := ac1.RequestAndDecode(&outCR, \"POST\", \"/arvados/v1/container_requests\", nil,\n\t\tmap[string]interface{}{\"container_request\": crInput})\n\tc.Check(err, check.IsNil)\n\n\tc.Check(outCR.RuntimeConstraints.GPU.Stack, check.Equals, \"cuda\")\n\tc.Check(outCR.RuntimeConstraints.GPU.DriverVersion, check.Equals, \"11.0\")\n\tc.Check(outCR.RuntimeConstraints.GPU.HardwareTarget, check.DeepEquals, []string{\"9.0\"})\n\tc.Check(outCR.RuntimeConstraints.GPU.DeviceCount, check.Equals, 1)\n\tc.Check(outCR.RuntimeConstraints.GPU.VRAM, check.Equals, int64(0))\n\n\tvar outCR2 arvados.ContainerRequest\n\terr = ac1.RequestAndDecode(&outCR2, \"POST\", \"/arvados/v1/container_requests\", nil,\n\t\tmap[string]interface{}{\"container_request\": crInput})\n\tc.Check(err, check.IsNil)\n\n\tc.Check(outCR.ContainerUUID, check.Equals, outCR2.ContainerUUID)\n}\n\nfunc (s *IntegrationSuite) TestGPUContainerReuse(c *check.C) {\n\t// Test container reuse using the \"GPU\" API\n\tconn1 := s.super.Conn(\"z1111\")\n\trootctx1, _, _ := s.super.RootClients(\"z1111\")\n\t_, ac1, _, _ := s.super.UserClients(\"z1111\", rootctx1, c, conn1, s.oidcprovider.AuthEmail, true)\n\n\tcrInput := map[string]interface{}{\n\t\t\"command\":         []string{\"echo\", \"hello\", \"/bin/sh\", \"-c\", \"'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'\"},\n\t\t\"cwd\":             \"/\",\n\t\t\"environment\":     map[string]interface{}{},\n\t\t\"output_path\":     \"/tmp\",\n\t\t\"output_glob\":     []string{},\n\t\t\"container_image\": \"fa3c1a9cb6783f85f2ecda037e07b8c3+167\",\n\t\t\"mounts\": map[string]arvados.Mount{\n\t\t\t\"/tmp\": arvados.Mount{\n\t\t\t\tKind:     \"tmp\",\n\t\t\t\tCapacity: 1000000,\n\t\t\t},\n\t\t},\n\t\t\"runtime_constraints\": map[string]interface{}{\n\t\t\t\"gpu\": map[string]interface{}{\n\t\t\t\t\"stack\":           \"cuda\",\n\t\t\t\t\"device_count\":    1,\n\t\t\t\t\"driver_version\":  \"11.0\",\n\t\t\t\t\"hardware_target\": []string{\"9.0\"},\n\t\t\t\t\"vram\":            8000000000,\n\t\t\t},\n\t\t\t\"ram\":   12000000000,\n\t\t\t\"vcpus\": 4,\n\t\t},\n\t\t\"state\": \"Committed\",\n\t}\n\n\tvar outCR arvados.ContainerRequest\n\terr := ac1.RequestAndDecode(&outCR, \"POST\", \"/arvados/v1/container_requests\", nil,\n\t\tmap[string]interface{}{\"container_request\": crInput})\n\tc.Check(err, check.IsNil)\n\n\tc.Check(outCR.RuntimeConstraints.GPU.Stack, check.Equals, \"cuda\")\n\tc.Check(outCR.RuntimeConstraints.GPU.DriverVersion, check.Equals, \"11.0\")\n\tc.Check(outCR.RuntimeConstraints.GPU.HardwareTarget, check.DeepEquals, []string{\"9.0\"})\n\tc.Check(outCR.RuntimeConstraints.GPU.DeviceCount, check.Equals, 1)\n\tc.Check(outCR.RuntimeConstraints.GPU.VRAM, check.Equals, int64(8000000000))\n\n\tvar outCR2 arvados.ContainerRequest\n\terr = ac1.RequestAndDecode(&outCR2, \"POST\", \"/arvados/v1/container_requests\", nil,\n\t\tmap[string]interface{}{\"container_request\": crInput})\n\tc.Check(err, check.IsNil)\n\n\tc.Check(outCR.ContainerUUID, check.Equals, outCR2.ContainerUUID)\n}\n"
  },
  {
    "path": "lib/controller/localdb/authorized_key.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\n// AuthorizedKeyCreate checks that the provided public key is valid,\n// then proxies to railsproxy.\nfunc (conn *Conn) AuthorizedKeyCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.AuthorizedKey, error) {\n\tif err := validateKey(opts.Attrs); err != nil {\n\t\treturn arvados.AuthorizedKey{}, httpserver.ErrorWithStatus(err, http.StatusBadRequest)\n\t}\n\treturn conn.railsProxy.AuthorizedKeyCreate(ctx, opts)\n}\n\n// AuthorizedKeyUpdate checks that the provided public key is valid,\n// then proxies to railsproxy.\nfunc (conn *Conn) AuthorizedKeyUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.AuthorizedKey, error) {\n\tif err := validateKey(opts.Attrs); err != nil {\n\t\treturn arvados.AuthorizedKey{}, httpserver.ErrorWithStatus(err, http.StatusBadRequest)\n\t}\n\treturn conn.railsProxy.AuthorizedKeyUpdate(ctx, opts)\n}\n\nfunc validateKey(attrs map[string]interface{}) error {\n\tin, _ := attrs[\"public_key\"].(string)\n\tif in == \"\" {\n\t\treturn nil\n\t}\n\tin = strings.TrimSpace(in)\n\tif strings.IndexAny(in, \"\\r\\n\") >= 0 {\n\t\treturn errors.New(\"Public key does not appear to be valid: extra data after key\")\n\t}\n\tpubkey, _, _, rest, err := ssh.ParseAuthorizedKey([]byte(in))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Public key does not appear to be valid: %w\", err)\n\t}\n\tif len(rest) > 0 {\n\t\treturn errors.New(\"Public key does not appear to be valid: extra data after key\")\n\t}\n\tif i := strings.Index(in, \" \"); i < 0 {\n\t\treturn errors.New(\"Public key does not appear to be valid: no leading type field\")\n\t} else if in[:i] != pubkey.Type() {\n\t\treturn fmt.Errorf(\"Public key does not appear to be valid: leading type field %q does not match actual key type %q\", in[:i], pubkey.Type())\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "lib/controller/localdb/authorized_key_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t_ \"embed\"\n\t\"errors\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t. \"gopkg.in/check.v1\"\n)\n\nvar _ = Suite(&authorizedKeySuite{})\n\ntype authorizedKeySuite struct {\n\tlocaldbSuite\n}\n\n//go:embed testdata/rsa.pub\nvar testPubKey string\n\nfunc (s *authorizedKeySuite) TestAuthorizedKeyCreate(c *C) {\n\tak, err := s.localdb.AuthorizedKeyCreate(s.userctx, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"name\":     \"testkey\",\n\t\t\t\"key_type\": \"SSH\",\n\t\t}})\n\tc.Assert(err, IsNil)\n\tc.Check(ak.KeyType, Equals, \"SSH\")\n\tdefer s.localdb.AuthorizedKeyDelete(s.userctx, arvados.DeleteOptions{UUID: ak.UUID})\n\tupdated, err := s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID:  ak.UUID,\n\t\tAttrs: map[string]interface{}{\"name\": \"testkeyrenamed\"}})\n\tc.Check(err, IsNil)\n\tc.Check(updated.UUID, Equals, ak.UUID)\n\tc.Check(updated.Name, Equals, \"testkeyrenamed\")\n\tc.Check(updated.ModifiedByUserUUID, Equals, arvadostest.ActiveUserUUID)\n\n\t_, err = s.localdb.AuthorizedKeyCreate(s.userctx, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"name\":       \"testkey\",\n\t\t\t\"public_key\": \"ssh-dsa boguskey\\n\",\n\t\t}})\n\tc.Check(err, ErrorMatches, `Public key does not appear to be valid: ssh: no key found; .*`)\n\t_, err = s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: ak.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"public_key\": strings.Replace(testPubKey, \"A\", \"#\", 1),\n\t\t}})\n\tc.Check(err, ErrorMatches, `Public key does not appear to be valid: ssh: no key found; .*`)\n\t_, err = s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: ak.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"public_key\": testPubKey + testPubKey,\n\t\t}})\n\tc.Check(err, ErrorMatches, `Public key does not appear to be valid: extra data after key`)\n\t_, err = s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: ak.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"public_key\": testPubKey + \"# extra data\\n\",\n\t\t}})\n\tc.Check(err, ErrorMatches, `Public key does not appear to be valid: extra data after key`)\n\t_, err = s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: ak.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"public_key\": strings.Replace(testPubKey, \"ssh-rsa\", \"ssh-dsa\", 1),\n\t\t}})\n\tc.Check(err, ErrorMatches, `Public key does not appear to be valid: leading type field \"ssh-dsa\" does not match actual key type \"ssh-rsa\"`)\n\tvar se httpserver.HTTPStatusError\n\tif c.Check(errors.As(err, &se), Equals, true) {\n\t\tc.Check(se.HTTPStatus(), Equals, http.StatusBadRequest)\n\t}\n\n\tdirents, err := os.ReadDir(\"./testdata\")\n\tc.Assert(err, IsNil)\n\tc.Assert(dirents, Not(HasLen), 0)\n\tfor _, dirent := range dirents {\n\t\tif !strings.HasSuffix(dirent.Name(), \".pub\") {\n\t\t\tcontinue\n\t\t}\n\t\tpubkeyfile := \"./testdata/\" + dirent.Name()\n\t\tc.Logf(\"checking public key from %s\", pubkeyfile)\n\t\tpubkey, err := ioutil.ReadFile(pubkeyfile)\n\t\tif !c.Check(err, IsNil) {\n\t\t\tcontinue\n\t\t}\n\t\tupdated, err := s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{\n\t\t\tUUID: ak.UUID,\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"public_key\": string(pubkey),\n\t\t\t}})\n\t\tc.Check(err, IsNil)\n\t\tc.Check(updated.PublicKey, Equals, string(pubkey))\n\n\t\t_, err = s.localdb.AuthorizedKeyUpdate(s.userctx, arvados.UpdateOptions{\n\t\t\tUUID: ak.UUID,\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"public_key\": strings.Replace(string(pubkey), \" \", \"-bogus \", 1),\n\t\t\t}})\n\t\tc.Check(err, ErrorMatches, `.*type field \".*\" does not match actual key type \".*\"`)\n\t}\n\n\tdeleted, err := s.localdb.AuthorizedKeyDelete(s.userctx, arvados.DeleteOptions{UUID: ak.UUID})\n\tc.Check(err, IsNil)\n\tc.Check(deleted.UUID, Equals, ak.UUID)\n}\n"
  },
  {
    "path": "lib/controller/localdb/collection.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n)\n\n// CollectionGet defers to railsProxy for everything except blob\n// signatures.\nfunc (conn *Conn) CollectionGet(ctx context.Context, opts arvados.GetOptions) (arvados.Collection, error) {\n\tconn.logActivity(ctx)\n\tif len(opts.Select) > 0 {\n\t\t// We need to know IsTrashed and TrashAt to implement\n\t\t// signing properly, even if the caller doesn't want\n\t\t// them.\n\t\topts.Select = append([]string{\"is_trashed\", \"trash_at\"}, opts.Select...)\n\t}\n\tresp, err := conn.railsProxy.CollectionGet(ctx, opts)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tconn.signCollection(ctx, &resp)\n\treturn resp, nil\n}\n\n// CollectionList defers to railsProxy for everything except blob\n// signatures.\nfunc (conn *Conn) CollectionList(ctx context.Context, opts arvados.ListOptions) (arvados.CollectionList, error) {\n\tconn.logActivity(ctx)\n\tif len(opts.Select) > 0 {\n\t\t// We need to know IsTrashed and TrashAt to implement\n\t\t// signing properly, even if the caller doesn't want\n\t\t// them.\n\t\topts.Select = append([]string{\"is_trashed\", \"trash_at\"}, opts.Select...)\n\t}\n\tresp, err := conn.railsProxy.CollectionList(ctx, opts)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tfor i := range resp.Items {\n\t\tconn.signCollection(ctx, &resp.Items[i])\n\t}\n\treturn resp, nil\n}\n\n// CollectionCreate defers to railsProxy for everything except blob\n// signatures and vocabulary checking.\nfunc (conn *Conn) CollectionCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Collection, error) {\n\tconn.logActivity(ctx)\n\terr := conn.checkProperties(ctx, opts.Attrs[\"properties\"])\n\tif err != nil {\n\t\treturn arvados.Collection{}, err\n\t}\n\tif len(opts.Select) > 0 {\n\t\t// We need to know IsTrashed and TrashAt to implement\n\t\t// signing properly, even if the caller doesn't want\n\t\t// them.\n\t\topts.Select = append([]string{\"is_trashed\", \"trash_at\"}, opts.Select...)\n\t}\n\tif opts.Attrs, err = conn.applyReplaceFilesOption(ctx, \"\", opts.Attrs, opts.ReplaceFiles); err != nil {\n\t\treturn arvados.Collection{}, err\n\t}\n\tif opts.Attrs, err = conn.applyReplaceSegmentsOption(ctx, \"\", opts.Attrs, opts.ReplaceSegments); err != nil {\n\t\treturn arvados.Collection{}, err\n\t}\n\tresp, err := conn.railsProxy.CollectionCreate(ctx, opts)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tconn.signCollection(ctx, &resp)\n\treturn resp, nil\n}\n\n// CollectionUpdate defers to railsProxy for everything except blob\n// signatures and vocabulary checking.\nfunc (conn *Conn) CollectionUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Collection, error) {\n\tconn.logActivity(ctx)\n\terr := conn.checkProperties(ctx, opts.Attrs[\"properties\"])\n\tif err != nil {\n\t\treturn arvados.Collection{}, err\n\t}\n\tif len(opts.Select) > 0 {\n\t\t// We need to know IsTrashed and TrashAt to implement\n\t\t// signing properly, even if the caller doesn't want\n\t\t// them.\n\t\topts.Select = append([]string{\"is_trashed\", \"trash_at\"}, opts.Select...)\n\t}\n\terr = conn.lockUUID(ctx, opts.UUID)\n\tif err != nil {\n\t\treturn arvados.Collection{}, err\n\t}\n\tif opts.Attrs, err = conn.applyReplaceFilesOption(ctx, opts.UUID, opts.Attrs, opts.ReplaceFiles); err != nil {\n\t\treturn arvados.Collection{}, err\n\t}\n\tif opts.Attrs, err = conn.applyReplaceSegmentsOption(ctx, opts.UUID, opts.Attrs, opts.ReplaceSegments); err != nil {\n\t\treturn arvados.Collection{}, err\n\t}\n\tresp, err := conn.railsProxy.CollectionUpdate(ctx, opts)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tconn.signCollection(ctx, &resp)\n\treturn resp, nil\n}\n\nfunc (conn *Conn) signCollection(ctx context.Context, coll *arvados.Collection) {\n\tif coll.IsTrashed || coll.ManifestText == \"\" || !conn.cluster.Collections.BlobSigning {\n\t\treturn\n\t}\n\tvar token string\n\tif creds, ok := auth.FromContext(ctx); ok && len(creds.Tokens) > 0 {\n\t\ttoken = creds.Tokens[0]\n\t}\n\tif token == \"\" {\n\t\treturn\n\t}\n\tttl := conn.cluster.Collections.BlobSigningTTL.Duration()\n\texp := time.Now().Add(ttl)\n\tif coll.TrashAt != nil && !coll.TrashAt.IsZero() && coll.TrashAt.Before(exp) {\n\t\texp = *coll.TrashAt\n\t}\n\tcoll.ManifestText = arvados.SignManifest(coll.ManifestText, token, exp, ttl, []byte(conn.cluster.Collections.BlobSigningKey))\n}\n\nfunc (conn *Conn) lockUUID(ctx context.Context, uuid string) error {\n\ttx, err := ctrlctx.CurrentTx(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = tx.ExecContext(ctx, `insert into uuid_locks (uuid) values ($1) on conflict (uuid) do update set n=uuid_locks.n+1`, uuid)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// If replaceFiles is non-empty, populate attrs[\"manifest_text\"] by\n// starting with the content of fromUUID (or an empty collection if\n// fromUUID is empty) and applying the specified file/directory\n// replacements.\n//\n// Return value is the (possibly modified) attrs map.\nfunc (conn *Conn) applyReplaceFilesOption(ctx context.Context, fromUUID string, attrs map[string]interface{}, replaceFiles map[string]string) (map[string]interface{}, error) {\n\tif len(replaceFiles) == 0 {\n\t\treturn attrs, nil\n\t}\n\n\tprovidedManifestText, _ := attrs[\"manifest_text\"].(string)\n\tif providedManifestText != \"\" {\n\t\tused := false\n\t\tfor _, src := range replaceFiles {\n\t\t\tif strings.HasPrefix(src, \"manifest_text/\") {\n\t\t\t\tused = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !used {\n\t\t\treturn nil, httpserver.Errorf(http.StatusBadRequest, \"invalid request: attrs['manifest_text'] was provided, but would not be used because it is not referenced by any 'replace_files' entry\")\n\t\t}\n\t}\n\n\t// Load the current collection (if any) and set up an\n\t// in-memory filesystem.\n\tvar dst arvados.Collection\n\tif _, replacingRoot := replaceFiles[\"/\"]; !replacingRoot && fromUUID != \"\" {\n\t\tsrc, err := conn.CollectionGet(ctx, arvados.GetOptions{UUID: fromUUID})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdst = src\n\t}\n\tdstfs, err := dst.FileSystem(&arvados.StubClient{}, &arvados.StubClient{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Sort replacements by source collection to avoid redundant\n\t// reloads when a source collection is used more than\n\t// once. Note empty sources (which mean \"delete target path\")\n\t// sort first.\n\tdstTodo := make([]string, 0, len(replaceFiles))\n\t{\n\t\tsrcid := make(map[string]string, len(replaceFiles))\n\t\tfor dst, src := range replaceFiles {\n\t\t\tdstTodo = append(dstTodo, dst)\n\t\t\tif i := strings.IndexRune(src, '/'); i > 0 {\n\t\t\t\tsrcid[dst] = src[:i]\n\t\t\t}\n\t\t}\n\t\tsort.Slice(dstTodo, func(i, j int) bool {\n\t\t\treturn srcid[dstTodo[i]] < srcid[dstTodo[j]]\n\t\t})\n\t}\n\n\t// Reject attempt to replace a node as well as its descendant\n\t// (e.g., a/ and a/b/), which is unsupported, except where the\n\t// source for a/ is empty (i.e., delete).\n\tfor _, dst := range dstTodo {\n\t\tif dst != \"/\" && (strings.HasSuffix(dst, \"/\") ||\n\t\t\tstrings.HasSuffix(dst, \"/.\") ||\n\t\t\tstrings.HasSuffix(dst, \"/..\") ||\n\t\t\tstrings.Contains(dst, \"//\") ||\n\t\t\tstrings.Contains(dst, \"/./\") ||\n\t\t\tstrings.Contains(dst, \"/../\") ||\n\t\t\t!strings.HasPrefix(dst, \"/\")) {\n\t\t\treturn nil, httpserver.Errorf(http.StatusBadRequest, \"invalid replace_files target: %q\", dst)\n\t\t}\n\t\tfor i := 0; i < len(dst)-1; i++ {\n\t\t\tif dst[i] != '/' {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\touterdst := dst[:i]\n\t\t\tif outerdst == \"\" {\n\t\t\t\touterdst = \"/\"\n\t\t\t}\n\t\t\tif outersrc := replaceFiles[outerdst]; outersrc != \"\" {\n\t\t\t\treturn nil, httpserver.Errorf(http.StatusBadRequest, \"replace_files: cannot operate on target %q inside non-empty target %q\", dst, outerdst)\n\t\t\t}\n\t\t}\n\t}\n\n\tcurrent := make(map[string]*arvados.Subtree)\n\t// Check whether any sources are \"current/...\", and if so,\n\t// populate current with the relevant snapshot.  Doing this\n\t// ahead of time, before making any modifications to dstfs\n\t// below, ensures that even instructions like {/a: current/b,\n\t// b: current/a} will be handled correctly.\n\tfor _, src := range replaceFiles {\n\t\tif strings.HasPrefix(src, \"current/\") && current[src] == nil {\n\t\t\tcurrent[src], err = arvados.Snapshot(dstfs, src[8:])\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\treturn nil, httpserver.Errorf(http.StatusBadRequest, \"replace_files: nonexistent source %q\", src)\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"%s: %w\", src, err)\n\t\t\t}\n\t\t}\n\t}\n\n\tvar srcidloaded string\n\tvar srcfs arvados.FileSystem\n\t// Apply the requested replacements.\n\tfor _, dst := range dstTodo {\n\t\tsrc := replaceFiles[dst]\n\t\tif src == \"\" {\n\t\t\tif dst == \"/\" {\n\t\t\t\t// In this case we started with a\n\t\t\t\t// blank manifest, so there can't be\n\t\t\t\t// anything to delete.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := dstfs.RemoveAll(dst)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"RemoveAll(%s): %w\", dst, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tvar snap *arvados.Subtree\n\t\tsrcspec := strings.SplitN(src, \"/\", 2)\n\t\tsrcid, srcpath := srcspec[0], \"/\"\n\t\tif len(srcspec) == 2 && srcspec[1] != \"\" {\n\t\t\tsrcpath = srcspec[1]\n\t\t}\n\t\tswitch {\n\t\tcase srcid == \"current\":\n\t\t\tsnap = current[src]\n\t\t\tif snap == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"internal error: current[%s] == nil\", src)\n\t\t\t}\n\t\tcase srcid == \"manifest_text\":\n\t\t\tif srcidloaded == srcid {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsrcfs = nil\n\t\t\tsrccoll := &arvados.Collection{ManifestText: providedManifestText}\n\t\t\tsrcfs, err = srccoll.FileSystem(&arvados.StubClient{}, &arvados.StubClient{})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tsrcidloaded = srcid\n\t\tcase arvadosclient.PDHMatch(srcid):\n\t\t\tif srcidloaded == srcid {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsrcfs = nil\n\t\t\tsrccoll, err := conn.CollectionGet(ctx, arvados.GetOptions{UUID: srcid})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// We use StubClient here because we don't\n\t\t\t// want srcfs to read/write any file data or\n\t\t\t// sync collection state to/from the database.\n\t\t\tsrcfs, err = srccoll.FileSystem(&arvados.StubClient{}, &arvados.StubClient{})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tsrcidloaded = srcid\n\t\tdefault:\n\t\t\treturn nil, httpserver.Errorf(http.StatusBadRequest, \"invalid source %q for replace_files[%q]: must be \\\"\\\" or \\\"SRC\\\" or \\\"SRC/path\\\" where SRC is \\\"current\\\", \\\"manifest_text\\\", or a portable data hash\", src, dst)\n\t\t}\n\t\tif snap == nil {\n\t\t\tsnap, err = arvados.Snapshot(srcfs, srcpath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, httpserver.Errorf(http.StatusBadRequest, \"error getting snapshot of %q from %q: %w\", srcpath, srcid, err)\n\t\t\t}\n\t\t}\n\t\t// Create intermediate dirs, in case dst is\n\t\t// \"newdir1/newdir2/dst\".\n\t\tfor i := 1; i < len(dst)-1; i++ {\n\t\t\tif dst[i] == '/' {\n\t\t\t\terr = dstfs.Mkdir(dst[:i], 0777)\n\t\t\t\tif err != nil && !os.IsExist(err) {\n\t\t\t\t\treturn nil, httpserver.Errorf(http.StatusBadRequest, \"error creating parent dirs for %q: %w\", dst, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\terr = arvados.Splice(dstfs, dst, snap)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error splicing snapshot onto path %q: %w\", dst, err)\n\t\t}\n\t}\n\tmtxt, err := dstfs.MarshalManifest(\".\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif attrs == nil {\n\t\tattrs = make(map[string]interface{}, 1)\n\t}\n\tattrs[\"manifest_text\"] = mtxt\n\treturn attrs, nil\n}\n\nfunc (conn *Conn) applyReplaceSegmentsOption(ctx context.Context, fromUUID string, attrs map[string]interface{}, replaceSegments map[arvados.BlockSegment]arvados.BlockSegment) (map[string]interface{}, error) {\n\tif len(replaceSegments) == 0 {\n\t\treturn attrs, nil\n\t}\n\n\t// Load the current collection content (unless it's being\n\t// replaced by the provided manifest_text).\n\tvar dst arvados.Collection\n\tif txt, ok := attrs[\"manifest_text\"].(string); ok {\n\t\tdst.ManifestText = txt\n\t} else if fromUUID != \"\" {\n\t\tsrc, err := conn.CollectionGet(ctx, arvados.GetOptions{UUID: fromUUID})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdst = src\n\t}\n\tdstfs, err := dst.FileSystem(&arvados.StubClient{}, &arvados.StubClient{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif changed, err := dstfs.ReplaceSegments(replaceSegments); err != nil {\n\t\treturn nil, httpserver.Errorf(http.StatusBadRequest, \"replace_segments: %s\", err)\n\t} else if changed {\n\t\ttxt, err := dstfs.MarshalManifest(\".\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif attrs == nil {\n\t\t\tattrs = make(map[string]interface{})\n\t\t}\n\t\tattrs[\"manifest_text\"] = txt\n\t}\n\treturn attrs, nil\n}\n"
  },
  {
    "path": "lib/controller/localdb/collection_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"net/http\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&CollectionSuite{})\n\ntype CollectionSuite struct {\n\tlocaldbSuite\n}\n\nfunc (s *CollectionSuite) TestCollectionCreateAndUpdateWithProperties(c *check.C) {\n\ts.setUpVocabulary(c, \"\")\n\n\ttests := []struct {\n\t\tname    string\n\t\tprops   map[string]interface{}\n\t\tsuccess bool\n\t}{\n\t\t{\"Invalid prop key\", map[string]interface{}{\"Priority\": \"IDVALIMPORTANCES1\"}, false},\n\t\t{\"Invalid prop value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"high\"}, false},\n\t\t{\"Valid prop key & value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"IDVALIMPORTANCES1\"}, true},\n\t\t{\"Empty properties\", map[string]interface{}{}, true},\n\t}\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName()+\" \", tt.name)\n\n\t\t// Create with properties\n\t\tcoll, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{\n\t\t\tSelect: []string{\"uuid\", \"properties\"},\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"properties\": tt.props,\n\t\t\t}})\n\t\tif tt.success {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(coll.Properties, check.DeepEquals, tt.props)\n\t\t} else {\n\t\t\tc.Assert(err, check.NotNil)\n\t\t}\n\n\t\t// Create, then update with properties\n\t\tcoll, err = s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{})\n\t\tc.Assert(err, check.IsNil)\n\t\tcoll, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\t\tUUID:   coll.UUID,\n\t\t\tSelect: []string{\"uuid\", \"properties\"},\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"properties\": tt.props,\n\t\t\t}})\n\t\tif tt.success {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(coll.Properties, check.DeepEquals, tt.props)\n\t\t} else {\n\t\t\tc.Assert(err, check.NotNil)\n\t\t}\n\t}\n}\n\nfunc (s *CollectionSuite) TestSignatures(c *check.C) {\n\tresp, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ ]*\\+3\\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)\n\ts.checkSignatureExpiry(c, resp.ManifestText, time.Hour*24*7*2)\n\n\tresp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection, Select: []string{\"manifest_text\"}})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ ]*\\+3\\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)\n\n\tlresp, err := s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{\"uuid\", \"=\", arvadostest.FooCollection}}})\n\tc.Check(err, check.IsNil)\n\tif c.Check(lresp.Items, check.HasLen, 1) {\n\t\tc.Check(lresp.Items[0].UUID, check.Equals, arvadostest.FooCollection)\n\t\tc.Check(lresp.Items[0].ManifestText, check.Equals, \"\")\n\t\tc.Check(lresp.Items[0].UnsignedManifestText, check.Equals, \"\")\n\t}\n\n\tlresp, err = s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{\"uuid\", \"=\", arvadostest.FooCollection}}, Select: []string{\"manifest_text\"}})\n\tc.Check(err, check.IsNil)\n\tif c.Check(lresp.Items, check.HasLen, 1) {\n\t\tc.Check(lresp.Items[0].ManifestText, check.Matches, `(?ms).* acbd[^ ]*\\+3\\+A[0-9a-f]+@[0-9a-f]+ 0:.*`)\n\t\tc.Check(lresp.Items[0].UnsignedManifestText, check.Equals, \"\")\n\t}\n\n\tlresp, err = s.localdb.CollectionList(s.userctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{\"uuid\", \"=\", arvadostest.FooCollection}}, Select: []string{\"unsigned_manifest_text\"}})\n\tc.Check(err, check.IsNil)\n\tif c.Check(lresp.Items, check.HasLen, 1) {\n\t\tc.Check(lresp.Items[0].ManifestText, check.Equals, \"\")\n\t\tc.Check(lresp.Items[0].UnsignedManifestText, check.Matches, `(?ms).* acbd[^ ]*\\+3 0:.*`)\n\t}\n\n\t// early trash date causes lower signature TTL (even if\n\t// trash_at and is_trashed fields are unselected)\n\ttrashed, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{\n\t\tSelect: []string{\"uuid\", \"manifest_text\"},\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"manifest_text\": \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n\t\t\t\"trash_at\":      time.Now().UTC().Add(time.Hour),\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\ts.checkSignatureExpiry(c, trashed.ManifestText, time.Hour)\n\tresp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: trashed.UUID})\n\tc.Assert(err, check.IsNil)\n\ts.checkSignatureExpiry(c, resp.ManifestText, time.Hour)\n\n\t// distant future trash date does not cause higher signature TTL\n\ttrashed, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: trashed.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"trash_at\": time.Now().UTC().Add(time.Hour * 24 * 365),\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\ts.checkSignatureExpiry(c, trashed.ManifestText, time.Hour*24*7*2)\n\tresp, err = s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: trashed.UUID})\n\tc.Assert(err, check.IsNil)\n\ts.checkSignatureExpiry(c, resp.ManifestText, time.Hour*24*7*2)\n\n\t// Make sure groups/contents doesn't return manifest_text with\n\t// collections (if it did, we'd need to sign it).\n\tgresp, err := s.localdb.GroupContents(s.userctx, arvados.GroupContentsOptions{\n\t\tLimit:   -1,\n\t\tFilters: []arvados.Filter{{\"uuid\", \"=\", arvadostest.FooCollection}},\n\t\tSelect:  []string{\"uuid\", \"manifest_text\"},\n\t})\n\tif err != nil {\n\t\tc.Check(err, check.ErrorMatches, `.*Invalid attribute.*manifest_text.*`)\n\t} else if c.Check(gresp.Items, check.HasLen, 1) {\n\t\tc.Check(gresp.Items[0].(map[string]interface{})[\"uuid\"], check.Equals, arvadostest.FooCollection)\n\t\tc.Check(gresp.Items[0].(map[string]interface{})[\"manifest_text\"], check.Equals, nil)\n\t}\n}\n\nfunc (s *CollectionSuite) checkSignatureExpiry(c *check.C, manifestText string, expectedTTL time.Duration) {\n\tm := regexp.MustCompile(`@([[:xdigit:]]+)`).FindStringSubmatch(manifestText)\n\tc.Assert(m, check.HasLen, 2)\n\tsigexp, err := strconv.ParseInt(m[1], 16, 64)\n\tc.Assert(err, check.IsNil)\n\texpectedExp := time.Now().Add(expectedTTL).Unix()\n\tc.Check(sigexp > expectedExp-60, check.Equals, true)\n\tc.Check(sigexp <= expectedExp, check.Equals, true)\n}\n\nfunc (s *CollectionSuite) TestSignaturesDisabled(c *check.C) {\n\ts.localdb.cluster.Collections.BlobSigning = false\n\tresp, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: arvadostest.FooCollection})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.ManifestText, check.Matches, `(?ms).* acbd[^ +]*\\+3 0:.*`)\n}\n\nvar _ = check.Suite(&replaceFilesSuite{})\n\ntype replaceFilesSuite struct {\n\tlocaldbSuite\n\tclient *arvados.Client\n\tac     *arvadosclient.ArvadosClient\n\tkc     *keepclient.KeepClient\n\tfoo    arvados.Collection // contains /foo.txt\n\ttmp    arvados.Collection // working collection, initially contains /foo.txt\n}\n\nfunc (s *replaceFilesSuite) SetUpSuite(c *check.C) {\n\ts.localdbSuite.SetUpSuite(c)\n\tvar err error\n\ts.client = arvados.NewClientFromEnv()\n\ts.ac, err = arvadosclient.New(s.client)\n\tc.Assert(err, check.IsNil)\n\ts.kc, err = keepclient.MakeKeepClient(s.ac)\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (s *replaceFilesSuite) SetUpTest(c *check.C) {\n\ts.localdbSuite.SetUpTest(c)\n\t// Unlike most test suites, we need to COMMIT our setup --\n\t// otherwise, when our tests start additional\n\t// transactions/connections, they won't see our setup.\n\tctx, txFinish := ctrlctx.New(s.ctx, s.dbConnector.GetDB)\n\tdefer txFinish(new(error))\n\tadminctx := ctrlctx.NewWithToken(ctx, s.cluster, arvadostest.AdminToken)\n\tvar err error\n\ts.foo, err = s.localdb.railsProxy.CollectionCreate(adminctx, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"owner_uuid\":    arvadostest.ActiveUserUUID,\n\t\t\t\"manifest_text\": \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\\n\",\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\ts.tmp, err = s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{\n\t\tReplaceFiles: map[string]string{\n\t\t\t\"/foo.txt\": s.foo.PortableDataHash + \"/foo.txt\",\n\t\t},\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"owner_uuid\": arvadostest.ActiveUserUUID,\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\ts.expectFiles(c, s.tmp, \"foo.txt\")\n}\n\nfunc (s *replaceFilesSuite) TestCollectionReplaceFiles(c *check.C) {\n\tadminctx := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AdminToken)\n\tfoobarbaz, err := s.localdb.railsProxy.CollectionCreate(adminctx, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"owner_uuid\":    arvadostest.ActiveUserUUID,\n\t\t\t\"manifest_text\": \"./foo/bar 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz.txt\\n\",\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\twazqux, err := s.localdb.railsProxy.CollectionCreate(adminctx, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"owner_uuid\":    arvadostest.ActiveUserUUID,\n\t\t\t\"manifest_text\": \"./waz d85b1213473c2fd7c2045020a6b9c62b+3 0:3:qux.txt\\n\",\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\n\t// Create using content from existing collections\n\tdst, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{\n\t\tReplaceFiles: map[string]string{\n\t\t\t\"/f\": s.foo.PortableDataHash + \"/foo.txt\",\n\t\t\t\"/b\": foobarbaz.PortableDataHash + \"/foo/bar\",\n\t\t\t\"/q\": wazqux.PortableDataHash + \"/\",\n\t\t\t\"/w\": wazqux.PortableDataHash + \"/waz\",\n\t\t},\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"owner_uuid\": arvadostest.ActiveUserUUID,\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\ts.expectFiles(c, dst, \"f\", \"b/baz.txt\", \"q/waz/qux.txt\", \"w/qux.txt\")\n\n\t// Delete a file and a directory\n\tdst, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: dst.UUID,\n\t\tReplaceFiles: map[string]string{\n\t\t\t\"/f\":     \"\",\n\t\t\t\"/q/waz\": \"\",\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\ts.expectFiles(c, dst, \"b/baz.txt\", \"q/\", \"w/qux.txt\")\n\n\t// Move and copy content within collection\n\tdst, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: dst.UUID,\n\t\tReplaceFiles: map[string]string{\n\t\t\t// Note splicing content to /b/corge.txt but\n\t\t\t// removing everything else from /b\n\t\t\t\"/b\":              \"\",\n\t\t\t\"/b/corge.txt\":    dst.PortableDataHash + \"/b/baz.txt\",\n\t\t\t\"/quux/corge.txt\": dst.PortableDataHash + \"/b/baz.txt\",\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\ts.expectFiles(c, dst, \"b/corge.txt\", \"q/\", \"w/qux.txt\", \"quux/corge.txt\")\n\n\t// Remove everything except one file\n\tdst, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: dst.UUID,\n\t\tReplaceFiles: map[string]string{\n\t\t\t\"/\":            \"\",\n\t\t\t\"/b/corge.txt\": dst.PortableDataHash + \"/b/corge.txt\",\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\ts.expectFiles(c, dst, \"b/corge.txt\")\n\n\t// Copy entire collection to root\n\tdstcopy, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{\n\t\tReplaceFiles: map[string]string{\n\t\t\t\"/\": dst.PortableDataHash,\n\t\t}})\n\tc.Check(err, check.IsNil)\n\tc.Check(dstcopy.PortableDataHash, check.Equals, dst.PortableDataHash)\n\ts.expectFiles(c, dstcopy, \"b/corge.txt\")\n\n\t// Check invalid targets, sources, and combinations\n\tfor _, badrepl := range []map[string]string{\n\t\t{\n\t\t\t\"/foo/nope\": dst.PortableDataHash + \"/b\",\n\t\t\t\"/foo\":      dst.PortableDataHash + \"/b\",\n\t\t},\n\t\t{\n\t\t\t\"/foo\":      dst.PortableDataHash + \"/b\",\n\t\t\t\"/foo/nope\": \"\",\n\t\t},\n\t\t{\n\t\t\t\"/\":     dst.PortableDataHash + \"/\",\n\t\t\t\"/nope\": \"\",\n\t\t},\n\t\t{\n\t\t\t\"/\":     dst.PortableDataHash + \"/\",\n\t\t\t\"/nope\": dst.PortableDataHash + \"/b\",\n\t\t},\n\t\t{\"/bad/\": \"\"},\n\t\t{\"/./bad\": \"\"},\n\t\t{\"/b/./ad\": \"\"},\n\t\t{\"/b/../ad\": \"\"},\n\t\t{\"/b/.\": \"\"},\n\t\t{\".\": \"\"},\n\t\t{\"bad\": \"\"},\n\t\t{\"\": \"\"},\n\t\t{\"/bad\": \"/b\"},\n\t\t{\"/bad\": \"bad/b\"},\n\t\t{\"/bad\": dst.UUID + \"/b\"},\n\t} {\n\t\t_, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\t\tUUID:         dst.UUID,\n\t\t\tReplaceFiles: badrepl,\n\t\t})\n\t\tc.Logf(\"badrepl %#v\\n... got err: %s\", badrepl, err)\n\t\tc.Check(err, check.NotNil)\n\t}\n}\n\nfunc (s *replaceFilesSuite) TestMultipleRename(c *check.C) {\n\tadminctx := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AdminToken)\n\ttmp, err := s.localdb.CollectionUpdate(adminctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"manifest_text\": \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1 0:2:file2 0:3:file3\\n\"}})\n\tc.Assert(err, check.IsNil)\n\ttmp, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: tmp.UUID,\n\t\tReplaceFiles: map[string]string{\n\t\t\t\"/file1\":     \"current/file2\",\n\t\t\t\"/file2\":     \"current/file3\",\n\t\t\t\"/file3\":     \"current/file1\",\n\t\t\t\"/dir/file1\": \"current/file1\",\n\t\t}})\n\tc.Check(err, check.IsNil)\n\ts.expectFileSizes(c, tmp, map[string]int64{\n\t\t\"file1\":     2,\n\t\t\"file2\":     3,\n\t\t\"file3\":     1,\n\t\t\"dir/file1\": 1,\n\t})\n}\n\nfunc (s *replaceFilesSuite) TestNonexistentCurrentFile(c *check.C) {\n\tadminctx := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AdminToken)\n\ttmp, err := s.localdb.CollectionUpdate(adminctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"manifest_text\": \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:1:file1 0:2:file2 0:3:file3\\n\"}})\n\tc.Assert(err, check.IsNil)\n\t_, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: tmp.UUID,\n\t\tReplaceFiles: map[string]string{\n\t\t\t\"/dst\": \"current/file404\",\n\t\t}})\n\tvar se httpserver.HTTPStatusError\n\tc.Assert(errors.As(err, &se), check.Equals, true)\n\tc.Check(se.HTTPStatus(), check.Equals, http.StatusBadRequest)\n}\n\nfunc (s *replaceFilesSuite) TestConcurrentCopyFromPDH(c *check.C) {\n\tvar wg sync.WaitGroup\n\tvar expectFiles []string\n\tfor i := 0; i < 10; i++ {\n\t\tfnm := fmt.Sprintf(\"copy%d.txt\", i)\n\t\texpectFiles = append(expectFiles, fnm)\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tctx, txFinish := ctrlctx.New(s.ctx, s.dbConnector.GetDB)\n\t\t\tdefer txFinish(new(error))\n\t\t\tuserctx := ctrlctx.NewWithToken(ctx, s.cluster, arvadostest.ActiveTokenV2)\n\t\t\t_, err := s.localdb.CollectionUpdate(userctx, arvados.UpdateOptions{\n\t\t\t\tUUID: s.tmp.UUID,\n\t\t\t\tReplaceFiles: map[string]string{\n\t\t\t\t\t\"/\" + fnm:  s.foo.PortableDataHash + \"/foo.txt\",\n\t\t\t\t\t\"/foo.txt\": \"\",\n\t\t\t\t}})\n\t\t\tc.Check(err, check.IsNil)\n\t\t}()\n\t}\n\twg.Wait()\n\t// After N concurrent/overlapping requests to add different\n\t// files by copying from another collection, we should see all\n\t// N files.\n\tfinal, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: s.tmp.UUID})\n\tc.Assert(err, check.IsNil)\n\ts.expectFiles(c, final, expectFiles...)\n}\n\nfunc (s *replaceFilesSuite) TestConcurrentCopyFromProvidedManifestText(c *check.C) {\n\tblockLocator := strings.Split(s.tmp.ManifestText, \" \")[1]\n\tvar wg sync.WaitGroup\n\texpectFileSizes := make(map[string]int64)\n\tfor i := 0; i < 10; i++ {\n\t\tfnm := fmt.Sprintf(\"upload%d.txt\", i)\n\t\texpectFileSizes[fnm] = 2\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tctx, txFinish := ctrlctx.New(s.ctx, s.dbConnector.GetDB)\n\t\t\tdefer txFinish(new(error))\n\t\t\tuserctx := ctrlctx.NewWithToken(ctx, s.cluster, arvadostest.ActiveTokenV2)\n\t\t\t_, err := s.localdb.CollectionUpdate(userctx, arvados.UpdateOptions{\n\t\t\t\tUUID: s.tmp.UUID,\n\t\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\t\"manifest_text\": \". \" + blockLocator + \" 0:2:\" + fnm + \"\\n\",\n\t\t\t\t},\n\t\t\t\tReplaceFiles: map[string]string{\n\t\t\t\t\t\"/\" + fnm:  \"manifest_text/\" + fnm,\n\t\t\t\t\t\"/foo.txt\": \"\",\n\t\t\t\t}})\n\t\t\tc.Check(err, check.IsNil)\n\t\t}()\n\t}\n\twg.Wait()\n\t// After N concurrent/overlapping requests to add different\n\t// files, we should see all N files.\n\tfinal, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: s.tmp.UUID})\n\tc.Assert(err, check.IsNil)\n\ts.expectFileSizes(c, final, expectFileSizes)\n}\n\nfunc (s *replaceFilesSuite) TestUnusedManifestText_Create(c *check.C) {\n\tblockLocator := strings.Split(s.tmp.ManifestText, \" \")[1]\n\t_, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"manifest_text\": \". \" + blockLocator + \" 0:3:foo\\n\",\n\t\t},\n\t\tReplaceFiles: map[string]string{\n\t\t\t\"/foo.txt\": \"\",\n\t\t}})\n\tc.Check(err, check.ErrorMatches, `.*manifest_text.*would not be used.*`)\n}\n\nfunc (s *replaceFilesSuite) TestUnusedManifestText_Update(c *check.C) {\n\tblockLocator := strings.Split(s.tmp.ManifestText, \" \")[1]\n\t_, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"manifest_text\": \". \" + blockLocator + \" 0:3:foo\\n\",\n\t\t},\n\t\tReplaceFiles: map[string]string{\n\t\t\t\"/foo.txt\": \"\",\n\t\t}})\n\tc.Check(err, check.ErrorMatches, `.*manifest_text.*would not be used.*`)\n}\n\nfunc (s *replaceFilesSuite) TestConcurrentRename(c *check.C) {\n\tvar wg sync.WaitGroup\n\tvar renamed atomic.Int32\n\tn := 10\n\terrors := make(chan error, n)\n\tvar newnameOK string\n\tfor i := 0; i < n; i++ {\n\t\tnewname := fmt.Sprintf(\"newname%d.txt\", i)\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tctx, txFinish := ctrlctx.New(s.ctx, s.dbConnector.GetDB)\n\t\t\tdefer txFinish(new(error))\n\t\t\tuserctx := ctrlctx.NewWithToken(ctx, s.cluster, arvadostest.ActiveTokenV2)\n\t\t\tupd, err := s.localdb.CollectionUpdate(userctx, arvados.UpdateOptions{\n\t\t\t\tUUID: s.tmp.UUID,\n\t\t\t\tReplaceFiles: map[string]string{\n\t\t\t\t\t\"/\" + newname: \"current/foo.txt\",\n\t\t\t\t\t\"/foo.txt\":    \"\",\n\t\t\t\t}})\n\t\t\tif err != nil {\n\t\t\t\terrors <- err\n\t\t\t} else {\n\t\t\t\trenamed.Add(1)\n\t\t\t\ts.expectFiles(c, upd, newname)\n\t\t\t\tnewnameOK = newname\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\t// N concurrent/overlapping attempts to rename foo.txt should\n\t// have succeed exactly one time, and the final collection\n\t// content should correspond to the operation that returned\n\t// success.\n\tif !c.Check(int(renamed.Load()), check.Equals, 1) {\n\t\tclose(errors)\n\t\tfor err := range errors {\n\t\t\tc.Logf(\"err: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\tc.Assert(newnameOK, check.Not(check.Equals), \"\")\n\tfinal, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: s.tmp.UUID})\n\tc.Assert(err, check.IsNil)\n\ts.expectFiles(c, final, newnameOK)\n}\n\n// expectFiles checks coll's directory structure against the given\n// list of expected files and empty directories. An expected path with\n// a trailing slash indicates an empty directory.\nfunc (s *replaceFilesSuite) expectFiles(c *check.C, coll arvados.Collection, expected ...string) {\n\texpectSizes := make(map[string]int64)\n\tfor _, path := range expected {\n\t\texpectSizes[path] = -1\n\t}\n\ts.expectFileSizes(c, coll, expectSizes)\n}\n\n// expectFileSizes checks coll's directory structure against the given\n// map of path->size.  An expected path with a trailing slash\n// indicates an empty directory.  An expected size of -1 indicates the\n// file size does not need to be checked.\nfunc (s *replaceFilesSuite) expectFileSizes(c *check.C, coll arvados.Collection, expected map[string]int64) {\n\tcfs, err := coll.FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\tfound := make(map[string]int64)\n\tnonemptydirs := map[string]bool{}\n\tfs.WalkDir(arvados.FS(cfs), \"/\", func(path string, d fs.DirEntry, err error) error {\n\t\tdir, _ := filepath.Split(path)\n\t\tnonemptydirs[dir] = true\n\t\tif d.IsDir() {\n\t\t\tif path != \"/\" {\n\t\t\t\tpath += \"/\"\n\t\t\t}\n\t\t\tif !nonemptydirs[path] {\n\t\t\t\tnonemptydirs[path] = false\n\t\t\t}\n\t\t} else {\n\t\t\tfi, err := d.Info()\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tfound[path] = fi.Size()\n\t\t}\n\t\treturn nil\n\t})\n\tfor d, nonempty := range nonemptydirs {\n\t\tif !nonempty {\n\t\t\tfound[d] = 0\n\t\t}\n\t}\n\tfor path, size := range found {\n\t\tif trimmed := strings.TrimPrefix(path, \"/\"); trimmed != path && trimmed != \"\" {\n\t\t\tfound[trimmed] = size\n\t\t\tdelete(found, path)\n\t\t\tpath = trimmed\n\t\t}\n\t\tif expected[path] == -1 {\n\t\t\t// Path is expected to exist, and -1 means we\n\t\t\t// aren't supposed to check the size.  Change\n\t\t\t// \"found size\" to -1 as well, so this entry\n\t\t\t// will pass the DeepEquals check below.\n\t\t\tfound[path] = -1\n\t\t}\n\t}\n\tc.Check(found, check.DeepEquals, expected)\n}\n\nvar _ = check.Suite(&replaceSegmentsSuite{})\n\ntype replaceSegmentsSuite struct {\n\tlocaldbSuite\n\tclient  *arvados.Client\n\tac      *arvadosclient.ArvadosClient\n\tkc      *keepclient.KeepClient\n\tlocator []string           // locator[i] is a locator of a block consisting of i null bytes.\n\ttmp     arvados.Collection // each test case starts off with file1 and file2\n}\n\nfunc (s *replaceSegmentsSuite) SetUpSuite(c *check.C) {\n\ts.localdbSuite.SetUpSuite(c)\n\tvar err error\n\ts.client = arvados.NewClientFromEnv()\n\ts.client.AuthToken = arvadostest.ActiveTokenV2\n\ts.ac, err = arvadosclient.New(s.client)\n\tc.Assert(err, check.IsNil)\n\ts.kc, err = keepclient.MakeKeepClient(s.ac)\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (s *replaceSegmentsSuite) SetUpTest(c *check.C) {\n\ts.localdbSuite.SetUpTest(c)\n\tif s.locator == nil {\n\t\ts.locator = make([]string, 10)\n\t\tfor i := range s.locator {\n\t\t\tresp, err := s.kc.BlockWrite(s.userctx, arvados.BlockWriteOptions{Data: make([]byte, i)})\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\ts.locator[i] = resp.Locator\n\t\t\tc.Logf(\"locator %d %s\", i, s.locator[i])\n\t\t}\n\t}\n\tvar err error\n\ts.tmp, err = s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"manifest_text\": \". \" + s.locator[1] + \" \" + s.locator[2] + \" 0:1:file1 1:2:file2\\n\",\n\t\t}})\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (s *replaceSegmentsSuite) checkCollectionNotModified(c *check.C) {\n\t// Confirm the collection was not modified.\n\tcoll, err := s.localdb.CollectionGet(s.userctx, arvados.GetOptions{UUID: s.tmp.UUID})\n\tc.Assert(err, check.IsNil)\n\tc.Check(stripSignatures(coll.ManifestText), check.Equals, stripSignatures(s.tmp.ManifestText))\n\tc.Check(coll.ModifiedAt, check.Equals, s.tmp.ModifiedAt)\n}\n\nfunc (s *replaceSegmentsSuite) Test2to1_Simple(c *check.C) {\n\tcoll, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{\n\t\t\tarvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 0, 1},\n\t\t\tarvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 1, 2},\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\tc.Check(stripSignatures(coll.ManifestText), check.Equals, stripSignatures(\". \"+s.locator[3]+\" 0:1:file1 1:2:file2\\n\"))\n}\n\n// Apply replacements to provided manifest_text when creating a new\n// collection.\nfunc (s *replaceSegmentsSuite) TestCreate(c *check.C) {\n\tcoll, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"manifest_text\": \". \" + s.locator[2] + \" \" + s.locator[3] + \" 0:5:file5\\n\",\n\t\t},\n\t\tReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{\n\t\t\tarvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[5], 0, 2},\n\t\t\tarvados.BlockSegment{s.locator[3], 0, 3}: arvados.BlockSegment{s.locator[5], 2, 3},\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\tc.Check(stripSignatures(coll.ManifestText), check.Equals, stripSignatures(\". \"+s.locator[5]+\" 0:5:file5\\n\"))\n}\n\nfunc (s *replaceSegmentsSuite) TestSignatureCheck(c *check.C) {\n\tvar badlocator string\n\t{\n\t\tadminclient := arvados.NewClientFromEnv()\n\t\tac, err := arvadosclient.New(adminclient)\n\t\tc.Assert(err, check.IsNil)\n\t\tkc, err := keepclient.MakeKeepClient(ac)\n\t\tc.Assert(err, check.IsNil)\n\t\tresp, err := kc.BlockWrite(context.Background(), arvados.BlockWriteOptions{Data: make([]byte, 3)})\n\t\tc.Assert(err, check.IsNil)\n\t\tbadlocator = resp.Locator\n\t}\n\n\t// Replacement locator has an invalid signature (signed with a\n\t// different token) so this update should fail.\n\t_, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{\n\t\t\tarvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{badlocator, 0, 1},\n\t\t\tarvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{badlocator, 1, 2},\n\t\t}})\n\tc.Assert(err, check.ErrorMatches, `.*PermissionDenied.*`)\n\tvar se httpserver.HTTPStatusError\n\tc.Assert(errors.As(err, &se), check.Equals, true)\n\tc.Check(se.HTTPStatus(), check.Equals, http.StatusForbidden)\n\n\ts.checkCollectionNotModified(c)\n}\n\nfunc (s *replaceSegmentsSuite) Test2to1_Reordered(c *check.C) {\n\tcoll, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{\n\t\t\tarvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 2, 1},\n\t\t\tarvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 0, 2},\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\tc.Check(stripSignatures(coll.ManifestText), check.Equals, stripSignatures(\". \"+s.locator[3]+\" 2:1:file1 0:2:file2\\n\"))\n}\n\nfunc (s *replaceSegmentsSuite) Test2to1_MultipleReferences(c *check.C) {\n\tcoll, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"manifest_text\": \". \" + s.locator[1] + \" \" + s.locator[2] + \" 0:1:file1 1:2:file2\\n\" +\n\t\t\t\t\"./dir \" + s.locator[1] + \" 0:1:file3\\n\",\n\t\t}})\n\tcoll, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{\n\t\t\tarvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 0, 1},\n\t\t\tarvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 1, 2},\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\tc.Check(stripSignatures(coll.ManifestText), check.Equals,\n\t\tstripSignatures(\". \"+s.locator[3]+\" 0:1:file1 1:2:file2\\n\"+\n\t\t\t\"./dir \"+s.locator[3]+\" 0:1:file3\\n\"))\n}\n\n// Caller is asking to repack 1,2,4->7 and 5->8, but a different\n// caller has already repacked 1,2,3->6, so we skip 1,2,4->7 but apply\n// 5->8.\nfunc (s *replaceSegmentsSuite) TestSkipUnreferenced(c *check.C) {\n\torig := \". \" + s.locator[6] + \" \" + s.locator[4] + \" 0:1:file1 1:2:file2 3:3:file3 6:4:file4\\n\" +\n\t\t\"./dir \" + s.locator[5] + \" 0:5:file5\\n\"\n\tcoll, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"manifest_text\": orig,\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\tcoll, err = s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: coll.UUID,\n\t\tReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{\n\t\t\tarvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[7], 0, 1},\n\t\t\tarvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[7], 1, 2},\n\t\t\tarvados.BlockSegment{s.locator[4], 0, 4}: arvados.BlockSegment{s.locator[7], 3, 4},\n\t\t\tarvados.BlockSegment{s.locator[5], 0, 5}: arvados.BlockSegment{s.locator[8], 0, 5},\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\tc.Check(stripSignatures(coll.ManifestText), check.Equals,\n\t\tstripSignatures(\". \"+s.locator[6]+\" \"+s.locator[4]+\" 0:1:file1 1:2:file2 3:3:file3 6:4:file4\\n\"+\n\t\t\t\"./dir \"+s.locator[8]+\" 0:5:file5\\n\"))\n}\n\nfunc (s *replaceSegmentsSuite) TestLengthMismatch(c *check.C) {\n\t_, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{\n\t\t\tarvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 0, 2},\n\t\t\tarvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 0, 2},\n\t\t}})\n\tc.Check(err, check.ErrorMatches, `replace_segments: mismatched length: replacing segment length 1 with segment length 2`)\n\tvar se httpserver.HTTPStatusError\n\tc.Assert(errors.As(err, &se), check.Equals, true)\n\tc.Check(se.HTTPStatus(), check.Equals, http.StatusBadRequest)\n\ts.checkCollectionNotModified(c)\n}\n\nfunc (s *replaceSegmentsSuite) TestInvalidReplacementOffset(c *check.C) {\n\t_, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{\n\t\t\tarvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 0, 1},\n\t\t\tarvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 3, 2},\n\t\t}})\n\tc.Check(err, check.ErrorMatches, `replace_segments: invalid replacement: offset 3 \\+ length 2 > block size 3`)\n\tvar se httpserver.HTTPStatusError\n\tc.Assert(errors.As(err, &se), check.Equals, true)\n\tc.Check(se.HTTPStatus(), check.Equals, http.StatusBadRequest)\n\ts.checkCollectionNotModified(c)\n}\n\nfunc (s *replaceSegmentsSuite) TestInvalidReplacementLength(c *check.C) {\n\t_, err := s.localdb.CollectionUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: s.tmp.UUID,\n\t\tReplaceSegments: map[arvados.BlockSegment]arvados.BlockSegment{\n\t\t\tarvados.BlockSegment{s.locator[1], 0, 1}: arvados.BlockSegment{s.locator[3], 0, 1},\n\t\t\tarvados.BlockSegment{s.locator[2], 0, 2}: arvados.BlockSegment{s.locator[3], 4, 2},\n\t\t}})\n\tc.Check(err, check.ErrorMatches, `replace_segments: invalid replacement: offset 4 \\+ length 2 > block size 3`)\n\tvar se httpserver.HTTPStatusError\n\tc.Assert(errors.As(err, &se), check.Equals, true)\n\tc.Check(se.HTTPStatus(), check.Equals, http.StatusBadRequest)\n\ts.checkCollectionNotModified(c)\n}\n\nfunc stripSignatures(manifest string) string {\n\treturn regexp.MustCompile(`\\+A[^ ]+`).ReplaceAllString(manifest, \"\")\n}\n"
  },
  {
    "path": "lib/controller/localdb/conn.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/railsproxy\"\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/hashicorp/yamux\"\n\t\"github.com/jmoiron/sqlx\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype railsProxy = rpc.Conn\n\ntype Conn struct {\n\tcluster                    *arvados.Cluster\n\t*railsProxy                // handles API methods that aren't defined on Conn itself\n\tgetdb                      func(context.Context) (*sqlx.DB, error)\n\tvocabularyCache            *arvados.Vocabulary\n\tvocabularyFileModTime      time.Time\n\tlastVocabularyRefreshCheck time.Time\n\tlastVocabularyError        error\n\tloginController\n\tgwTunnels        map[string]*yamux.Session\n\tgwTunnelsLock    sync.Mutex\n\tactiveUsers      map[string]bool\n\tactiveUsersLock  sync.Mutex\n\tactiveUsersReset time.Time\n\n\twantContainerPriorityUpdate chan struct{}\n}\n\nfunc NewConn(bgCtx context.Context, cluster *arvados.Cluster, getdb func(context.Context) (*sqlx.DB, error)) *Conn {\n\trailsProxy := railsproxy.NewConn(cluster)\n\trailsProxy.RedactHostInErrors = true\n\tconn := Conn{\n\t\tcluster:                     cluster,\n\t\trailsProxy:                  railsProxy,\n\t\tgetdb:                       getdb,\n\t\twantContainerPriorityUpdate: make(chan struct{}, 1),\n\t}\n\tconn.loginController = chooseLoginController(cluster, &conn)\n\tgo conn.runContainerPriorityUpdateThread(bgCtx)\n\treturn &conn\n}\n\nfunc (conn *Conn) checkProperties(ctx context.Context, properties interface{}) error {\n\tif properties == nil {\n\t\treturn nil\n\t}\n\tvar props map[string]interface{}\n\tswitch properties := properties.(type) {\n\tcase string:\n\t\terr := json.Unmarshal([]byte(properties), &props)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase map[string]interface{}:\n\t\tprops = properties\n\tdefault:\n\t\treturn fmt.Errorf(\"unexpected properties type %T\", properties)\n\t}\n\tvoc, err := conn.VocabularyGet(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = voc.Check(props)\n\tif err != nil {\n\t\treturn httpserver.ErrorWithStatus(err, http.StatusBadRequest)\n\t}\n\treturn nil\n}\n\nfunc (conn *Conn) maybeRefreshVocabularyCache(logger logrus.FieldLogger) error {\n\tif conn.lastVocabularyRefreshCheck.Add(time.Second).After(time.Now()) {\n\t\t// Throttle the access to disk to at most once per second.\n\t\treturn nil\n\t}\n\tconn.lastVocabularyRefreshCheck = time.Now()\n\tfi, err := os.Stat(conn.cluster.API.VocabularyPath)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"couldn't stat vocabulary file %q: %v\", conn.cluster.API.VocabularyPath, err)\n\t\tconn.lastVocabularyError = err\n\t\treturn err\n\t}\n\tif fi.ModTime().After(conn.vocabularyFileModTime) {\n\t\terr = conn.loadVocabularyFile()\n\t\tif err != nil {\n\t\t\tconn.lastVocabularyError = err\n\t\t\treturn err\n\t\t}\n\t\tconn.vocabularyFileModTime = fi.ModTime()\n\t\tconn.lastVocabularyError = nil\n\t\tlogger.Info(\"vocabulary file reloaded successfully\")\n\t}\n\treturn nil\n}\n\nfunc (conn *Conn) loadVocabularyFile() error {\n\tvf, err := os.ReadFile(conn.cluster.API.VocabularyPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while reading the vocabulary file: %v\", err)\n\t}\n\tmk := make([]string, 0, len(conn.cluster.Collections.ManagedProperties))\n\tfor k := range conn.cluster.Collections.ManagedProperties {\n\t\tmk = append(mk, k)\n\t}\n\tvoc, err := arvados.NewVocabulary(vf, mk)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"while loading vocabulary file %q: %s\", conn.cluster.API.VocabularyPath, err)\n\t}\n\tconn.vocabularyCache = voc\n\treturn nil\n}\n\n// LastVocabularyError returns the last error encountered while loading the\n// vocabulary file.\n// Implements health.Func\nfunc (conn *Conn) LastVocabularyError() error {\n\tconn.maybeRefreshVocabularyCache(ctxlog.FromContext(context.Background()))\n\treturn conn.lastVocabularyError\n}\n\n// VocabularyGet refreshes the vocabulary cache if necessary and returns it.\nfunc (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {\n\tif conn.cluster.API.VocabularyPath == \"\" {\n\t\treturn arvados.Vocabulary{\n\t\t\tTags: map[string]arvados.VocabularyTag{},\n\t\t}, nil\n\t}\n\tlogger := ctxlog.FromContext(ctx)\n\tif conn.vocabularyCache == nil {\n\t\t// Initial load of vocabulary file.\n\t\terr := conn.loadVocabularyFile()\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"error loading vocabulary file\")\n\t\t\treturn arvados.Vocabulary{}, err\n\t\t}\n\t}\n\terr := conn.maybeRefreshVocabularyCache(logger)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error reloading vocabulary file - ignoring\")\n\t}\n\treturn *conn.vocabularyCache, nil\n}\n\n// Logout handles the logout of conn giving to the appropriate loginController\nfunc (conn *Conn) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\treturn conn.loginController.Logout(ctx, opts)\n}\n\n// Login handles the login of conn giving to the appropriate loginController\nfunc (conn *Conn) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {\n\treturn conn.loginController.Login(ctx, opts)\n}\n\n// UserAuthenticate handles the User Authentication of conn giving to the appropriate loginController\nfunc (conn *Conn) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {\n\treturn conn.loginController.UserAuthenticate(ctx, opts)\n}\n\nvar privateNetworks = func() (nets []*net.IPNet) {\n\tfor _, s := range []string{\n\t\t\"127.0.0.0/8\",\n\t\t\"10.0.0.0/8\",\n\t\t\"172.16.0.0/12\",\n\t\t\"192.168.0.0/16\",\n\t\t\"169.254.0.0/16\",\n\t\t\"::1/128\",\n\t\t\"fe80::/10\",\n\t\t\"fc00::/7\",\n\t} {\n\t\t_, n, err := net.ParseCIDR(s)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"privateNetworks: %q: %s\", s, err))\n\t\t}\n\t\tnets = append(nets, n)\n\t}\n\treturn\n}()\n\nfunc httpErrorf(code int, format string, args ...interface{}) error {\n\treturn httpserver.ErrorWithStatus(fmt.Errorf(format, args...), code)\n}\n"
  },
  {
    "path": "lib/controller/localdb/container.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// ContainerUpdate defers to railsProxy and then notifies the\n// container priority updater thread.\nfunc (conn *Conn) ContainerUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Container, error) {\n\tresp, err := conn.railsProxy.ContainerUpdate(ctx, opts)\n\tif err == nil {\n\t\tselect {\n\t\tcase conn.wantContainerPriorityUpdate <- struct{}{}:\n\t\tdefault:\n\t\t\t// update already pending\n\t\t}\n\t}\n\treturn resp, err\n}\n\nvar containerPriorityUpdateInterval = 5 * time.Minute\n\n// runContainerPriorityUpdateThread periodically (and immediately\n// after each container update request) corrects any inconsistent\n// container priorities caused by races.\nfunc (conn *Conn) runContainerPriorityUpdateThread(ctx context.Context) {\n\tctx = ctrlctx.NewWithToken(ctx, conn.cluster, conn.cluster.SystemRootToken)\n\tlog := ctxlog.FromContext(ctx).WithField(\"worker\", \"runContainerPriorityUpdateThread\")\n\tticker := time.NewTicker(containerPriorityUpdateInterval)\n\tfor ctx.Err() == nil {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\tcase <-conn.wantContainerPriorityUpdate:\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t\terr := conn.containerPriorityUpdate(ctx, log)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Warn(\"error updating container priorities\")\n\t\t}\n\t}\n}\n\nfunc (conn *Conn) containerPriorityUpdate(ctx context.Context, log logrus.FieldLogger) error {\n\tdb, err := conn.getdb(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getdb: %w\", err)\n\t}\n\t// Stage 1: Fix containers that have priority>0 but should\n\t// have priority=0 because there are no active\n\t// container_requests (unfinished, priority>0) associated with\n\t// them.\n\tres, err := db.ExecContext(ctx, `\n\t\tUPDATE containers\n\t\tSET priority=0\n\t\tWHERE state IN ('Queued', 'Locked', 'Running')\n\t\t AND priority>0\n\t\t AND uuid NOT IN (\n\t\t\tSELECT container_uuid\n\t\t\tFROM container_requests\n\t\t\tWHERE priority > 0\n\t\t\t AND state = 'Committed')`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"update: %w\", err)\n\t} else if rows, err := res.RowsAffected(); err != nil {\n\t\treturn fmt.Errorf(\"update: %w\", err)\n\t} else if rows > 0 {\n\t\tlog.Infof(\"found %d containers with priority>0 and no active requests, updated to priority=0\", rows)\n\t}\n\n\t// Stage 2: Fix containers that have priority=0 but should\n\t// have priority>0 because there are active container_requests\n\t// (priority>0, unfinished, and not children of cancelled\n\t// containers).\n\t//\n\t// Fixing here means calling out to RailsAPI to compute the\n\t// correct priority for the contianer and (if needed)\n\t// propagate that change to child containers.\n\n\t// In this loop we look for a single container that needs\n\t// fixing, call out to Rails to fix it, and repeat until we\n\t// don't find any more.\n\t//\n\t// We could get a batch of UUIDs that need attention by\n\t// increasing LIMIT 1, however, updating priority on one\n\t// container typically cascades to other containers, so we\n\t// would often end up repeating work.\n\tfor lastUUID := \"\"; ; {\n\t\tvar uuid string\n\t\terr := db.QueryRowxContext(ctx, `\n\t\t\tSELECT containers.uuid from containers\n\t\t\tJOIN container_requests\n\t\t\t ON container_requests.container_uuid = containers.uuid\n\t\t\t AND container_requests.state = 'Committed' AND container_requests.priority > 0\n\t\t\tLEFT JOIN containers parent\n\t\t\t ON parent.uuid = container_requests.requesting_container_uuid\n\t\t\tWHERE containers.state IN ('Queued', 'Locked', 'Running')\n\t\t\t AND containers.priority = 0\n\t\t\t AND (parent.uuid IS NULL OR parent.priority > 0)\n\t\t\tORDER BY containers.created_at\n\t\t\tLIMIT 1`).Scan(&uuid)\n\t\tif err == sql.ErrNoRows {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"join: %w\", err)\n\t\t}\n\t\tif uuid == lastUUID {\n\t\t\t// We don't want to keep hammering this\n\t\t\t// forever if the ContainerPriorityUpdate call\n\t\t\t// didn't achieve anything.\n\t\t\treturn fmt.Errorf(\"possible lack of progress: container %s still has priority=0 after updating\", uuid)\n\t\t}\n\t\tlastUUID = uuid\n\t\tupd, err := conn.railsProxy.ContainerPriorityUpdate(ctx, arvados.UpdateOptions{UUID: uuid, Select: []string{\"uuid\", \"priority\"}})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Debugf(\"updated container %s priority from 0 to %d\", uuid, upd.Priority)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "lib/controller/localdb/container_gateway.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/hmac\"\n\t\"crypto/sha256\"\n\t\"crypto/subtle\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"database/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/lib/service\"\n\t\"git.arvados.org/arvados.git/lib/webdavfs\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\tkeepweb \"git.arvados.org/arvados.git/services/keep-web\"\n\t\"github.com/hashicorp/yamux\"\n\t\"golang.org/x/net/webdav\"\n)\n\nvar (\n\t// forceProxyForTest enables test cases to exercise the \"proxy\n\t// to a different controller instance\" code path without\n\t// running a second controller instance.  If this is set, an\n\t// incoming request with NoForward==false is always proxied to\n\t// the configured controller instance that matches the\n\t// container gateway's tunnel endpoint, without checking\n\t// whether the tunnel is actually connected to the current\n\t// process.\n\tforceProxyForTest = false\n\n\t// forceInternalURLForTest is sent to the crunch-run gateway\n\t// when setting up a tunnel in a test suite where\n\t// service.URLFromContext() does not return anything.\n\tforceInternalURLForTest *arvados.URL\n)\n\n// ContainerRequestLog returns a WebDAV handler that reads logs from\n// the indicated container request. It works by proxying the incoming\n// HTTP request to\n//\n//   - the container gateway, if there is an associated container that\n//     is running\n//\n//   - a different controller process, if there is a running container\n//     whose gateway is accessible through a tunnel to a different\n//     controller process\n//\n//   - keep-web, if saved logs exist and there is no gateway (or the\n//     associated container is finished)\n//\n//   - an empty-collection stub, if there is no gateway and no saved\n//     log\n//\n// For an incoming request\n//\n//\tGET /arvados/v1/container_requests/{cr_uuid}/log/{c_uuid}{/c_log_path}\n//\n// The upstream request may be to {c_uuid}'s container gateway\n//\n//\tGET /arvados/v1/container_requests/{cr_uuid}/log/{c_uuid}{/c_log_path}\n//\tX-Webdav-Prefix: /arvados/v1/container_requests/{cr_uuid}/log/{c_uuid}\n//\tX-Webdav-Source: /log\n//\n// ...or the upstream request may be to keep-web (where {cr_log_uuid}\n// is the container request log collection UUID)\n//\n//\tGET /arvados/v1/container_requests/{cr_uuid}/log/{c_uuid}{/c_log_path}\n//\tHost: {cr_log_uuid}.internal\n//\tX-Webdav-Prefix: /arvados/v1/container_requests/{cr_uuid}/log\n//\tX-Arvados-Container-Uuid: {c_uuid}\n//\n// ...or the request may be handled locally using an empty-collection\n// stub.\nfunc (conn *Conn) ContainerRequestLog(ctx context.Context, opts arvados.ContainerLogOptions) (http.Handler, error) {\n\tif opts.Method == \"OPTIONS\" && opts.Header.Get(\"Access-Control-Request-Method\") != \"\" {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tif !keepweb.ServeCORSPreflight(w, opts.Header) {\n\t\t\t\t// Inconceivable.  We already checked\n\t\t\t\t// for the only condition where\n\t\t\t\t// ServeCORSPreflight returns false.\n\t\t\t\thttpserver.Error(w, \"unhandled CORS preflight request\", http.StatusInternalServerError)\n\t\t\t}\n\t\t}), nil\n\t}\n\tcr, err := conn.railsProxy.ContainerRequestGet(ctx, arvados.GetOptions{UUID: opts.UUID, Select: []string{\"uuid\", \"container_uuid\", \"log_uuid\"}})\n\tif err != nil {\n\t\tif se := httpserver.HTTPStatusError(nil); errors.As(err, &se) && se.HTTPStatus() == http.StatusUnauthorized {\n\t\t\t// Hint to WebDAV client that we accept HTTP basic auth.\n\t\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tw.Header().Set(\"Www-Authenticate\", \"Basic realm=\\\"collections\\\"\")\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t}), nil\n\t\t}\n\t\treturn nil, err\n\t}\n\tctr, err := conn.railsProxy.ContainerGet(ctx, arvados.GetOptions{UUID: cr.ContainerUUID, Select: []string{\"uuid\", \"state\", \"gateway_address\"}})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// .../log/{ctr.UUID} is a directory where the currently\n\t// assigned container's log data [will] appear (as opposed to\n\t// previous attempts in .../log/{previous_ctr_uuid}). Requests\n\t// that are outside that directory, and requests on a\n\t// non-running container, are proxied to keep-web instead of\n\t// going through the container gateway system.\n\t//\n\t// Side note: a depth>1 directory tree listing starting at\n\t// .../{cr_uuid}/log will only include subdirectories for\n\t// finished containers, i.e., will not include a subdirectory\n\t// with log data for a current (unfinished) container UUID.\n\t// In order to access live logs, a client must look up the\n\t// container_uuid field of the container request record, and\n\t// explicitly request a path under .../{cr_uuid}/log/{c_uuid}.\n\tif ctr.GatewayAddress == \"\" ||\n\t\t(ctr.State != arvados.ContainerStateLocked && ctr.State != arvados.ContainerStateRunning) ||\n\t\t!(opts.Path == \"/\"+ctr.UUID || strings.HasPrefix(opts.Path, \"/\"+ctr.UUID+\"/\")) {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tconn.serveContainerRequestLogViaKeepWeb(opts, cr, w, r)\n\t\t}), nil\n\t}\n\tdial, arpc, err := conn.findGateway(ctx, ctr, opts.NoForward)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif arpc != nil {\n\t\topts.NoForward = true\n\t\treturn arpc.ContainerRequestLog(ctx, opts)\n\t}\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tr = r.WithContext(ctx)\n\t\tvar proxyErr error\n\t\tgatewayProxy(dial, w, http.Header{\n\t\t\t\"X-Arvados-Container-Gateway-Uuid\": {ctr.UUID},\n\t\t\t\"X-Webdav-Prefix\":                  {\"/arvados/v1/container_requests/\" + cr.UUID + \"/log/\" + ctr.UUID},\n\t\t\t\"X-Webdav-Source\":                  {\"/log\"},\n\t\t}, &proxyErr).ServeHTTP(w, r)\n\t\tif proxyErr == nil {\n\t\t\t// proxy succeeded\n\t\t\treturn\n\t\t}\n\t\t// If proxying to the container gateway fails, it\n\t\t// might be caused by a race where crunch-run exited\n\t\t// after we decided (above) the log was not final.\n\t\t// In that case we should proxy to keep-web.\n\t\tctr, err := conn.railsProxy.ContainerGet(ctx, arvados.GetOptions{\n\t\t\tUUID:   ctr.UUID,\n\t\t\tSelect: []string{\"uuid\", \"state\", \"gateway_address\", \"log\"},\n\t\t})\n\t\tif err != nil {\n\t\t\t// Lost access to the container record?\n\t\t\thttpserver.Error(w, \"error re-fetching container record: \"+err.Error(), http.StatusServiceUnavailable)\n\t\t} else if ctr.State == arvados.ContainerStateLocked || ctr.State == arvados.ContainerStateRunning {\n\t\t\t// No race, proxyErr was the best we can do\n\t\t\thttpserver.Error(w, \"proxy error: \"+proxyErr.Error(), http.StatusServiceUnavailable)\n\t\t} else {\n\t\t\tconn.serveContainerRequestLogViaKeepWeb(opts, cr, w, r)\n\t\t}\n\t}), nil\n}\n\n// serveContainerLogViaKeepWeb handles a request for saved container\n// log content by proxying to one of the configured keep-web servers.\n//\n// It tries to choose a keep-web server that is running on this host.\nfunc (conn *Conn) serveContainerRequestLogViaKeepWeb(opts arvados.ContainerLogOptions, cr arvados.ContainerRequest, w http.ResponseWriter, r *http.Request) {\n\tif cr.LogUUID == \"\" {\n\t\t// Special case: if no log data exists yet, we serve\n\t\t// an empty collection by ourselves instead of\n\t\t// proxying to keep-web.\n\t\tconn.serveEmptyDir(\"/arvados/v1/container_requests/\"+cr.UUID+\"/log\", w, r)\n\t\treturn\n\t}\n\tmyURL, _ := service.URLFromContext(r.Context())\n\tu := url.URL(myURL)\n\tmyHostname := u.Hostname()\n\tvar webdavBase arvados.URL\n\tvar ok bool\n\tfor webdavBase = range conn.cluster.Services.WebDAV.InternalURLs {\n\t\tok = true\n\t\tu := url.URL(webdavBase)\n\t\tif h := u.Hostname(); h == \"127.0.0.1\" || h == \"0.0.0.0\" || h == \"::1\" || h == myHostname {\n\t\t\t// Prefer a keep-web service running on the\n\t\t\t// same host as us. (If we don't find one, we\n\t\t\t// pick one arbitrarily.)\n\t\t\tbreak\n\t\t}\n\t}\n\tif !ok {\n\t\thttpserver.Error(w, \"no internalURLs configured for WebDAV service\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tproxy := &httputil.ReverseProxy{\n\t\tDirector: func(r *http.Request) {\n\t\t\tr.URL.Scheme = webdavBase.Scheme\n\t\t\tr.URL.Host = webdavBase.Host\n\t\t\t// Outgoing Host header specifies the\n\t\t\t// collection ID.\n\t\t\tr.Host = cr.LogUUID + \".internal\"\n\t\t\t// We already checked permission on the\n\t\t\t// container, so we can use a root token here\n\t\t\t// instead of counting on the \"access to log\n\t\t\t// via container request and container\"\n\t\t\t// permission check, which can be racy when a\n\t\t\t// request gets retried with a new container.\n\t\t\tr.Header.Set(\"Authorization\", \"Bearer \"+conn.cluster.SystemRootToken)\n\t\t\t// We can't change r.URL.Path without\n\t\t\t// confusing WebDAV (request body and response\n\t\t\t// headers refer to the same paths) so we tell\n\t\t\t// keep-web to map the log collection onto the\n\t\t\t// containers/X/log/ namespace.\n\t\t\tr.Header.Set(\"X-Webdav-Prefix\", \"/arvados/v1/container_requests/\"+cr.UUID+\"/log\")\n\t\t\tif len(opts.Path) >= 28 && opts.Path[6:13] == \"-dz642-\" {\n\t\t\t\t// \"/arvados/v1/container_requests/{crUUID}/log/{cUUID}...\"\n\t\t\t\t// proxies to\n\t\t\t\t// \"/log for container {cUUID}...\"\n\t\t\t\tr.Header.Set(\"X-Webdav-Prefix\", \"/arvados/v1/container_requests/\"+cr.UUID+\"/log/\"+opts.Path[1:28])\n\t\t\t\tr.Header.Set(\"X-Webdav-Source\", \"/log for container \"+opts.Path[1:28]+\"/\")\n\t\t\t}\n\t\t},\n\t\tModifyResponse: func(resp *http.Response) error {\n\t\t\tpreemptivelyDeduplicateHeaders(w.Header(), resp.Header)\n\t\t\treturn nil\n\t\t},\n\t}\n\tif conn.cluster.TLS.Insecure {\n\t\tproxy.Transport = &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: conn.cluster.TLS.Insecure,\n\t\t\t},\n\t\t}\n\t}\n\tproxy.ServeHTTP(w, r)\n}\n\nfunc gatewayProxy(dial gatewayDialer, responseWriter http.ResponseWriter, setRequestHeader http.Header, proxyErr *error) *httputil.ReverseProxy {\n\tvar proxyReq *http.Request\n\tvar expectRespondAuth string\n\treturn &httputil.ReverseProxy{\n\t\t// Our custom Transport:\n\t\t//\n\t\t// - Uses a custom dialer to connect to the gateway\n\t\t// (either directly or through a tunnel set up though\n\t\t// ContainerTunnel)\n\t\t//\n\t\t// - Verifies the gateway's TLS certificate using\n\t\t// X-Arvados-Authorization headers.\n\t\t//\n\t\t// This involves modifying the outgoing request header\n\t\t// in DialTLSContext.  (ReverseProxy certainly doesn't\n\t\t// expect us to do this, but it works.)\n\t\tTransport: &http.Transport{\n\t\t\tDialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) {\n\t\t\t\ttlsconn, requestAuth, respondAuth, err := dial()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tproxyReq.Header.Set(\"X-Arvados-Authorization\", requestAuth)\n\t\t\t\texpectRespondAuth = respondAuth\n\t\t\t\treturn tlsconn, nil\n\t\t\t},\n\t\t\t// This transport is only used for a single\n\t\t\t// request, so http keep-alive would\n\t\t\t// accumulate open sockets without providing\n\t\t\t// any benefit.  So, disable keep-alive.\n\t\t\tDisableKeepAlives: true,\n\t\t\t// Use stdlib defaults.\n\t\t\tForceAttemptHTTP2:     http.DefaultTransport.(*http.Transport).ForceAttemptHTTP2,\n\t\t\tTLSHandshakeTimeout:   http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout,\n\t\t\tExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout,\n\t\t},\n\t\tDirector: func(r *http.Request) {\n\t\t\t// Scheme/host of incoming r.URL are\n\t\t\t// irrelevant now, and may even be\n\t\t\t// missing. Host is ignored by our\n\t\t\t// DialTLSContext, but we need a generic\n\t\t\t// syntactically correct URL for net/http to\n\t\t\t// work with.\n\t\t\tr.URL.Scheme = \"https\"\n\t\t\tr.URL.Host = \"0.0.0.0:0\"\n\t\t\tfor k, v := range setRequestHeader {\n\t\t\t\tr.Header[k] = v\n\t\t\t}\n\t\t\tproxyReq = r\n\t\t},\n\t\tModifyResponse: func(resp *http.Response) error {\n\t\t\tif resp.Header.Get(\"X-Arvados-Authorization-Response\") != expectRespondAuth {\n\t\t\t\t// Note this is how we detect\n\t\t\t\t// an attacker-in-the-middle.\n\t\t\t\treturn httpserver.ErrorWithStatus(errors.New(\"bad X-Arvados-Authorization-Response header\"), http.StatusBadGateway)\n\t\t\t}\n\t\t\tresp.Header.Del(\"X-Arvados-Authorization-Response\")\n\t\t\tpreemptivelyDeduplicateHeaders(responseWriter.Header(), resp.Header)\n\t\t\treturn nil\n\t\t},\n\t\tErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) {\n\t\t\tif proxyErr != nil {\n\t\t\t\t*proxyErr = err\n\t\t\t}\n\t\t},\n\t}\n}\n\n// httputil.ReverseProxy uses (http.Header)Add() to copy headers from\n// the upstream Response to the downstream ResponseWriter. If headers\n// have already been set on the downstream ResponseWriter, Add() will\n// result in duplicate headers. For example, if we set CORS headers\n// and then use ReverseProxy with an upstream that also sets CORS\n// headers, our client will receive\n//\n//\tAccess-Control-Allow-Origin: *\n//\tAccess-Control-Allow-Origin: *\n//\n// ...which is incorrect.\n//\n// preemptivelyDeduplicateHeaders, when called from a ModifyResponse\n// hook, solves this by removing any conflicting headers from\n// ResponseWriter. This way, when ReverseProxy calls Add(), it will\n// assign the new values without causing duplicates.\n//\n// dst is the downstream ResponseWriter's Header(). src is the\n// upstream resp.Header.\nfunc preemptivelyDeduplicateHeaders(dst, src http.Header) {\n\tfor hdr := range src {\n\t\tdst.Del(hdr)\n\t}\n}\n\n// serveEmptyDir handles read-only webdav requests as if there was an\n// empty collection rooted at the given path. It's equivalent to\n// proxying to an empty collection in keep-web, but avoids the extra\n// hop.\nfunc (conn *Conn) serveEmptyDir(path string, w http.ResponseWriter, r *http.Request) {\n\twh := webdav.Handler{\n\t\tPrefix:     path,\n\t\tFileSystem: webdav.NewMemFS(),\n\t\tLockSystem: webdavfs.NoLockSystem,\n\t\tLogger: func(r *http.Request, err error) {\n\t\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\t\tctxlog.FromContext(r.Context()).WithError(err).Info(\"webdav error on empty collection fs\")\n\t\t\t}\n\t\t},\n\t}\n\twh.ServeHTTP(w, r)\n}\n\n// ContainerSSH returns a connection to the SSH server in the\n// appropriate crunch-run process on the worker node where the\n// specified container is running.\n//\n// If the returned error is nil, the caller is responsible for closing\n// sshconn.Conn.\nfunc (conn *Conn) ContainerSSH(ctx context.Context, opts arvados.ContainerSSHOptions) (sshconn arvados.ConnectionResponse, err error) {\n\tuser, err := conn.railsProxy.UserGetCurrent(ctx, arvados.GetOptions{})\n\tif err != nil {\n\t\treturn sshconn, err\n\t}\n\tctr, err := conn.railsProxy.ContainerGet(ctx, arvados.GetOptions{UUID: opts.UUID, Select: []string{\"uuid\", \"state\", \"gateway_address\", \"interactive_session_started\"}})\n\tif err != nil {\n\t\treturn sshconn, err\n\t}\n\tif !user.IsAdmin || !conn.cluster.Containers.ShellAccess.Admin {\n\t\tif !conn.cluster.Containers.ShellAccess.User {\n\t\t\treturn sshconn, httpserver.ErrorWithStatus(errors.New(\"shell access is disabled in config\"), http.StatusServiceUnavailable)\n\t\t}\n\t\terr = conn.checkContainerLoginPermission(ctx, user.UUID, opts.UUID)\n\t\tif err != nil {\n\t\t\treturn sshconn, err\n\t\t}\n\t}\n\n\tif ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked {\n\t\treturn sshconn, httpserver.ErrorWithStatus(fmt.Errorf(\"container is not running yet (state is %q)\", ctr.State), http.StatusServiceUnavailable)\n\t} else if ctr.State != arvados.ContainerStateRunning {\n\t\treturn sshconn, httpserver.ErrorWithStatus(fmt.Errorf(\"container has ended (state is %q)\", ctr.State), http.StatusGone)\n\t}\n\n\tdial, arpc, err := conn.findGateway(ctx, ctr, opts.NoForward)\n\tif err != nil {\n\t\treturn sshconn, err\n\t}\n\tif arpc != nil {\n\t\topts.NoForward = true\n\t\treturn arpc.ContainerSSH(ctx, opts)\n\t}\n\n\ttlsconn, requestAuth, respondAuth, err := dial()\n\tif err != nil {\n\t\treturn sshconn, err\n\t}\n\tbufr := bufio.NewReader(tlsconn)\n\tbufw := bufio.NewWriter(tlsconn)\n\n\tu := url.URL{\n\t\tScheme: \"http\",\n\t\tHost:   tlsconn.RemoteAddr().String(),\n\t\tPath:   \"/ssh\",\n\t}\n\tpostform := url.Values{\n\t\t// uuid is only needed for older crunch-run versions\n\t\t// (current version uses X-Arvados-* header below)\n\t\t\"uuid\":           {opts.UUID},\n\t\t\"detach_keys\":    {opts.DetachKeys},\n\t\t\"login_username\": {opts.LoginUsername},\n\t\t\"no_forward\":     {fmt.Sprintf(\"%v\", opts.NoForward)},\n\t}\n\tpostdata := postform.Encode()\n\tbufw.WriteString(\"POST \" + u.String() + \" HTTP/1.1\\r\\n\")\n\tbufw.WriteString(\"Host: \" + u.Host + \"\\r\\n\")\n\tbufw.WriteString(\"Upgrade: ssh\\r\\n\")\n\tbufw.WriteString(\"X-Arvados-Container-Gateway-Uuid: \" + opts.UUID + \"\\r\\n\")\n\tbufw.WriteString(\"X-Arvados-Authorization: \" + requestAuth + \"\\r\\n\")\n\tbufw.WriteString(\"Content-Type: application/x-www-form-urlencoded\\r\\n\")\n\tfmt.Fprintf(bufw, \"Content-Length: %d\\r\\n\", len(postdata))\n\tbufw.WriteString(\"\\r\\n\")\n\tbufw.WriteString(postdata)\n\tbufw.Flush()\n\tresp, err := http.ReadResponse(bufr, &http.Request{Method: \"POST\"})\n\tif err != nil {\n\t\ttlsconn.Close()\n\t\treturn sshconn, httpserver.ErrorWithStatus(fmt.Errorf(\"error reading http response from gateway: %w\", err), http.StatusBadGateway)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusSwitchingProtocols {\n\t\tbody, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 1000))\n\t\ttlsconn.Close()\n\t\treturn sshconn, httpserver.ErrorWithStatus(fmt.Errorf(\"unexpected status %s %q\", resp.Status, body), http.StatusBadGateway)\n\t}\n\tif strings.ToLower(resp.Header.Get(\"Upgrade\")) != \"ssh\" ||\n\t\tstrings.ToLower(resp.Header.Get(\"Connection\")) != \"upgrade\" {\n\t\ttlsconn.Close()\n\t\treturn sshconn, httpserver.ErrorWithStatus(errors.New(\"bad upgrade\"), http.StatusBadGateway)\n\t}\n\tif resp.Header.Get(\"X-Arvados-Authorization-Response\") != respondAuth {\n\t\ttlsconn.Close()\n\t\treturn sshconn, httpserver.ErrorWithStatus(errors.New(\"bad X-Arvados-Authorization-Response header\"), http.StatusBadGateway)\n\t}\n\n\tif !ctr.InteractiveSessionStarted {\n\t\tctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})\n\t\t_, err = conn.railsProxy.ContainerUpdate(ctxRoot, arvados.UpdateOptions{\n\t\t\tUUID: opts.UUID,\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"interactive_session_started\": true,\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\ttlsconn.Close()\n\t\t\treturn sshconn, httpserver.ErrorWithStatus(err, http.StatusInternalServerError)\n\t\t}\n\t}\n\n\tsshconn.Conn = tlsconn\n\tsshconn.Bufrw = &bufio.ReadWriter{Reader: bufr, Writer: bufw}\n\tsshconn.Logger = ctxlog.FromContext(ctx)\n\tsshconn.Header = http.Header{\"Upgrade\": {\"ssh\"}}\n\treturn sshconn, nil\n}\n\n// Check that userUUID is permitted to start an interactive login\n// session in ctrUUID.  Any returned error has an HTTPStatus().\nfunc (conn *Conn) checkContainerLoginPermission(ctx context.Context, userUUID, ctrUUID string) error {\n\tctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})\n\tcrs, err := conn.railsProxy.ContainerRequestList(ctxRoot, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{\"container_uuid\", \"=\", ctrUUID}}})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, cr := range crs.Items {\n\t\tif cr.ModifiedByUserUUID != userUUID {\n\t\t\treturn httpserver.ErrorWithStatus(errors.New(\"permission denied: container is associated with requests submitted by other users\"), http.StatusForbidden)\n\t\t}\n\t}\n\tif crs.ItemsAvailable != len(crs.Items) {\n\t\treturn httpserver.ErrorWithStatus(errors.New(\"incomplete response while checking permission\"), http.StatusInternalServerError)\n\t}\n\treturn nil\n}\n\nvar errUnassignedPort = httpserver.ErrorWithStatus(errors.New(\"unassigned port\"), http.StatusGone)\n\n// ContainerHTTPProxy proxies an incoming request through to the\n// specified port on a running container, via crunch-run's container\n// gateway.\nfunc (conn *Conn) ContainerHTTPProxy(ctx context.Context, opts arvados.ContainerHTTPProxyOptions) (http.Handler, error) {\n\t// We'll use ctxRoot to do requests below that don't depend on\n\t// the supplied token.\n\tctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})\n\n\tneedTokenCookie := false\n\tif queryToken := opts.Request.URL.Query().Get(\"arvados_api_token\"); queryToken != \"\" {\n\t\t// If there's a token in the query, use it when\n\t\t// looking up the container/container request.  This\n\t\t// lets us reliably determine needClearSiteData, even\n\t\t// if the container UUID is not explicitly given in\n\t\t// the request.\n\t\tctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{queryToken}})\n\t\t// Ensure we ultimately send a redirect response to\n\t\t// move the token from query to cookie.\n\t\tneedTokenCookie = true\n\t}\n\n\tvar targetUUID string\n\tvar targetPort int\n\tif strings.HasPrefix(opts.Target, \":\") {\n\t\t// Target \":1234\" means \"the entry in the\n\t\t// container_ports table with external_port=1234\".\n\t\textport, err := strconv.Atoi(opts.Target[1:])\n\t\tif err != nil {\n\t\t\treturn nil, httpserver.ErrorWithStatus(fmt.Errorf(\"invalid port in target: %s\", opts.Target), http.StatusBadRequest)\n\t\t}\n\t\tdb, err := conn.getdb(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, httpserver.ErrorWithStatus(fmt.Errorf(\"getdb: %w\", err), http.StatusBadGateway)\n\t\t}\n\t\terr = db.QueryRowContext(ctx, `select container_uuid, container_port\n\t\t\tfrom container_ports\n\t\t\twhere external_port = $1`, extport).Scan(&targetUUID, &targetPort)\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn nil, errUnassignedPort\n\t\t} else if err != nil {\n\t\t\treturn nil, httpserver.ErrorWithStatus(err, http.StatusBadGateway)\n\t\t}\n\t} else if len(opts.Target) > 28 && arvadosclient.UUIDMatch(opts.Target[:27]) && opts.Target[27] == '-' {\n\t\ttargetUUID = opts.Target[:27]\n\t\tfmt.Sscanf(opts.Target[28:], \"%d\", &targetPort)\n\t\tif targetPort < 1 {\n\t\t\treturn nil, httpserver.ErrorWithStatus(fmt.Errorf(\"cannot parse port number from vhost prefix %q\", opts.Target), http.StatusBadRequest)\n\t\t}\n\t} else {\n\t\tlinks, err := conn.railsProxy.LinkList(ctxRoot, arvados.ListOptions{\n\t\t\tLimit: 1,\n\t\t\tFilters: []arvados.Filter{\n\t\t\t\t{\"link_class\", \"=\", \"published_port\"},\n\t\t\t\t{\"name\", \"=\", opts.Target}}})\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"lookup failed: %w\", err)\n\t\t}\n\t\tif len(links.Items) == 0 {\n\t\t\treturn nil, httpserver.ErrorWithStatus(fmt.Errorf(\"container web service not found: %q\", opts.Target), http.StatusNotFound)\n\t\t}\n\t\ttargetUUID = links.Items[0].HeadUUID\n\t\tport, ok := links.Items[0].Properties[\"port\"].(float64)\n\t\ttargetPort = int(port)\n\t\tif !ok || targetPort < 1 || targetPort > 65535 {\n\t\t\treturn nil, httpserver.ErrorWithStatus(fmt.Errorf(\"invalid port in published_port link: %v\", links.Items[0].Properties[\"port\"]), http.StatusInternalServerError)\n\t\t}\n\t}\n\n\tvar ctr arvados.Container\n\n\t// A redirect might be needed for one or two reasons: (1) to\n\t// avoid letting the container web service access it via\n\t// document.location, or showing the token in the browser's\n\t// location bar (even when returning an error), and/or (2) to\n\t// clear client-side state left over from a different\n\t// container that was previously available on the same\n\t// dynamically assigned port.\n\t//\n\t// maybeRedirect() returns (nil, nil) if the given err is nil\n\t// and there is no need to redirect.  Otherwise, it returns\n\t// suitable values for the main function to return: either\n\t// (nil, err), or (h, nil) where h implements a redirect.\n\tmaybeRedirect := func(err error) (http.Handler, error) {\n\t\tneedClearSiteData := false\n\t\tfor _, cookie := range opts.Request.CookiesNamed(\"arvados_container_uuid\") {\n\t\t\tif cookie.Value != ctr.UUID && ctr.UUID != \"\" {\n\t\t\t\t// The user agent might have\n\t\t\t\t// client-side state left over from a\n\t\t\t\t// different container that was\n\t\t\t\t// previously available on this port.\n\t\t\t\tneedClearSiteData = true\n\t\t\t}\n\t\t}\n\t\tif !needClearSiteData && !needTokenCookie {\n\t\t\t// Redirect not needed\n\t\t\treturn nil, err\n\t\t}\n\t\treturn containerHTTPProxyRedirect(needClearSiteData), nil\n\t}\n\n\t// First we need to fetch the container request (or container)\n\t// record as root, so we can check whether the requested port\n\t// is marked public in published_ports.  This needs to work\n\t// even if the request did not provide a token at all.\n\tvar isPublic bool\n\tif len(targetUUID) == 27 && targetUUID[6:11] == \"xvhdp\" {\n\t\t// Look up specified container request\n\t\tctrreq, err := conn.railsProxy.ContainerRequestGet(ctxRoot, arvados.GetOptions{\n\t\t\tUUID:   targetUUID,\n\t\t\tSelect: []string{\"uuid\", \"state\", \"published_ports\", \"container_uuid\"},\n\t\t})\n\t\tif err == nil && ctrreq.PublishedPorts[strconv.Itoa(targetPort)].Access == arvados.PublishedPortAccessPublic {\n\t\t\tisPublic = true\n\t\t\ttargetUUID = ctrreq.ContainerUUID\n\t\t}\n\t} else {\n\t\t// Look up specified container\n\t\tvar err error\n\t\tctr, err = conn.railsProxy.ContainerGet(ctxRoot, arvados.GetOptions{\n\t\t\tUUID:   targetUUID,\n\t\t\tSelect: []string{\"uuid\", \"state\", \"gateway_address\", \"published_ports\"},\n\t\t})\n\t\tif err == nil && ctr.PublishedPorts[strconv.Itoa(targetPort)].Access == arvados.PublishedPortAccessPublic {\n\t\t\tisPublic = true\n\t\t}\n\t}\n\n\tif !isPublic {\n\t\t// Re-fetch the container request record, this time as\n\t\t// the authenticated user instead of root.  This lets\n\t\t// us return 404 if the container is not readable by\n\t\t// this user, for example.\n\t\tif len(targetUUID) == 27 && targetUUID[6:11] == \"xvhdp\" {\n\t\t\tctrreq, err := conn.railsProxy.ContainerRequestGet(ctxRoot, arvados.GetOptions{\n\t\t\t\tUUID:   targetUUID,\n\t\t\t\tSelect: []string{\"uuid\", \"state\", \"published_ports\", \"container_uuid\"},\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn maybeRedirect(fmt.Errorf(\"container request lookup error: %w\", err))\n\t\t\t}\n\t\t\tif ctrreq.ContainerUUID == \"\" {\n\t\t\t\treturn maybeRedirect(httpserver.ErrorWithStatus(errors.New(\"container request does not have an assigned container\"), http.StatusBadRequest))\n\t\t\t}\n\t\t\ttargetUUID = ctrreq.ContainerUUID\n\t\t}\n\t\tvar err error\n\t\tctr, err = conn.railsProxy.ContainerGet(ctx, arvados.GetOptions{UUID: targetUUID, Select: []string{\"uuid\", \"state\", \"gateway_address\"}})\n\t\tif err != nil {\n\t\t\treturn maybeRedirect(fmt.Errorf(\"container lookup failed: %w\", err))\n\t\t}\n\t\tuser, err := conn.railsProxy.UserGetCurrent(ctx, arvados.GetOptions{})\n\t\tif err != nil {\n\t\t\treturn maybeRedirect(err)\n\t\t}\n\t\tif !user.IsAdmin {\n\t\t\t// For non-public ports, access is only granted to\n\t\t\t// admins and the user who submitted all of the\n\t\t\t// container requests that reference this container.\n\t\t\terr = conn.checkContainerLoginPermission(ctx, user.UUID, ctr.UUID)\n\t\t\tif err != nil {\n\t\t\t\treturn maybeRedirect(err)\n\t\t\t}\n\t\t}\n\t} else if ctr.UUID == \"\" {\n\t\t// isPublic, but we don't have the container record\n\t\t// yet because the request specified a container\n\t\t// request UUID.\n\t\tvar err error\n\t\tctr, err = conn.railsProxy.ContainerGet(ctxRoot, arvados.GetOptions{UUID: targetUUID, Select: []string{\"uuid\", \"state\", \"gateway_address\"}})\n\t\tif err != nil {\n\t\t\treturn maybeRedirect(fmt.Errorf(\"container lookup failed: %w\", err))\n\t\t}\n\t}\n\tdial, arpc, err := conn.findGateway(ctx, ctr, opts.NoForward)\n\tif err != nil {\n\t\treturn maybeRedirect(fmt.Errorf(\"cannot find gateway: %w\", err))\n\t}\n\tif arpc != nil {\n\t\tif h, err := maybeRedirect(nil); h != nil || err != nil {\n\t\t\treturn h, err\n\t\t}\n\t\topts.NoForward = true\n\t\treturn arpc.ContainerHTTPProxy(ctx, opts)\n\t}\n\n\t// Redirect if needed to clear site data and/or move the token\n\t// from the query to a cookie.\n\tif h, err := maybeRedirect(nil); h != nil || err != nil {\n\t\treturn h, err\n\t}\n\n\t// Remove arvados_api_token cookie to ensure the http service\n\t// in the container does not see it.\n\tcookies := opts.Request.Cookies()\n\topts.Request.Header.Del(\"Cookie\")\n\tfor _, cookie := range cookies {\n\t\tif cookie.Name != \"arvados_api_token\" {\n\t\t\topts.Request.AddCookie(cookie)\n\t\t}\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {\n\t\thttp.SetCookie(w, &http.Cookie{Name: \"arvados_container_uuid\", Value: ctr.UUID})\n\t\tvar proxyErr error\n\t\tgatewayProxy(dial, w, http.Header{\n\t\t\t\"X-Arvados-Container-Gateway-Uuid\": {targetUUID},\n\t\t\t\"X-Arvados-Container-Target-Port\":  {strconv.Itoa(targetPort)},\n\t\t}, &proxyErr).ServeHTTP(w, opts.Request)\n\t\tif proxyErr != nil {\n\t\t\thttpserver.Error(w, \"proxy error: \"+proxyErr.Error(), http.StatusBadGateway)\n\t\t}\n\t}), nil\n}\n\n// containerHTTPProxyRedirect returns a redirect handler that (1) if\n// there is a token in the query, moves it to a cookie, and (2) if\n// needClearSiteData is true, clears all other client-side state.\nfunc containerHTTPProxyRedirect(needClearSiteData bool) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tredir := *r.URL\n\t\tquery := redir.Query()\n\t\tneedTokenCookie := query.Get(\"arvados_api_token\")\n\t\tif needTokenCookie != \"\" {\n\t\t\tdelete(query, \"arvados_api_token\")\n\t\t\tredir.RawQuery = query.Encode()\n\t\t}\n\t\tif needTokenCookie != \"\" {\n\t\t\thttp.SetCookie(w, &http.Cookie{\n\t\t\t\tName:     \"arvados_api_token\",\n\t\t\t\tValue:    auth.EncodeTokenCookie([]byte(needTokenCookie)),\n\t\t\t\tPath:     \"/\",\n\t\t\t\tHttpOnly: true,\n\t\t\t\tSameSite: http.SameSiteLaxMode,\n\t\t\t})\n\t\t}\n\t\tif needClearSiteData {\n\t\t\tif r.Method != http.MethodHead && r.Method != http.MethodGet {\n\t\t\t\tw.WriteHeader(http.StatusGone)\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// We cannot use `Clear-Site-Data: \"cookies\"`\n\t\t\t// to clear cookies, because that applies to\n\t\t\t// all origins in the entire registered\n\t\t\t// domain.  We only want to clear cookies for\n\t\t\t// this dynamically assigned origin.\n\t\t\tfor _, cookie := range r.Cookies() {\n\t\t\t\tif cookie.Name != \"arvados_api_token\" {\n\t\t\t\t\tcookie.MaxAge = -1\n\t\t\t\t\tcookie.Expires = time.Time{}\n\t\t\t\t\tcookie.Value = \"\"\n\t\t\t\t\thttp.SetCookie(w, cookie)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Unlike the \"cookies\" directive, \"cache\" and\n\t\t\t// \"storage\" clear data for the current origin\n\t\t\t// only.\n\t\t\tw.Header().Set(\"Clear-Site-Data\", `\"cache\", \"storage\"`)\n\t\t}\n\t\tw.Header().Set(\"Location\", redir.String())\n\t\tw.WriteHeader(http.StatusSeeOther)\n\t})\n}\n\n// ContainerGatewayTunnel sets up a tunnel enabling us (controller) to\n// connect to the caller's (crunch-run's) gateway server.\nfunc (conn *Conn) ContainerGatewayTunnel(ctx context.Context, opts arvados.ContainerGatewayTunnelOptions) (resp arvados.ConnectionResponse, err error) {\n\th := hmac.New(sha256.New, []byte(conn.cluster.SystemRootToken))\n\tfmt.Fprint(h, opts.UUID)\n\tauthSecret := fmt.Sprintf(\"%x\", h.Sum(nil))\n\tif subtle.ConstantTimeCompare([]byte(authSecret), []byte(opts.AuthSecret)) != 1 {\n\t\tctxlog.FromContext(ctx).Info(\"received incorrect auth_secret\")\n\t\treturn resp, httpserver.ErrorWithStatus(errors.New(\"authentication error\"), http.StatusUnauthorized)\n\t}\n\n\tmuxconn, clientconn := net.Pipe()\n\ttunnel, err := yamux.Server(muxconn, nil)\n\tif err != nil {\n\t\tclientconn.Close()\n\t\treturn resp, httpserver.ErrorWithStatus(err, http.StatusInternalServerError)\n\t}\n\n\tconn.gwTunnelsLock.Lock()\n\tif conn.gwTunnels == nil {\n\t\tconn.gwTunnels = map[string]*yamux.Session{opts.UUID: tunnel}\n\t} else {\n\t\tconn.gwTunnels[opts.UUID] = tunnel\n\t}\n\tconn.gwTunnelsLock.Unlock()\n\n\tgo func() {\n\t\t<-tunnel.CloseChan()\n\t\tconn.gwTunnelsLock.Lock()\n\t\tif conn.gwTunnels[opts.UUID] == tunnel {\n\t\t\tdelete(conn.gwTunnels, opts.UUID)\n\t\t}\n\t\tconn.gwTunnelsLock.Unlock()\n\t}()\n\n\t// Assuming we're acting as the backend of an http server,\n\t// lib/controller/router will call resp's ServeHTTP handler,\n\t// which upgrades the incoming http connection to a raw socket\n\t// and connects it to our yamux.Server through our net.Pipe().\n\tresp.Conn = clientconn\n\tresp.Bufrw = &bufio.ReadWriter{Reader: bufio.NewReader(&bytes.Buffer{}), Writer: bufio.NewWriter(&bytes.Buffer{})}\n\tresp.Logger = ctxlog.FromContext(ctx)\n\tresp.Header = http.Header{\"Upgrade\": {\"tunnel\"}}\n\tif u, ok := service.URLFromContext(ctx); ok {\n\t\tresp.Header.Set(\"X-Arvados-Internal-Url\", u.String())\n\t} else if forceInternalURLForTest != nil {\n\t\tresp.Header.Set(\"X-Arvados-Internal-Url\", forceInternalURLForTest.String())\n\t}\n\treturn\n}\n\ntype gatewayDialer func() (conn net.Conn, requestAuth, respondAuth string, err error)\n\n// findGateway figures out how to connect to ctr's gateway.\n//\n// If the gateway can be contacted directly or through a tunnel on\n// this instance, the first return value is a non-nil dialer.\n//\n// If the gateway is only accessible through a tunnel through a\n// different controller process, the second return value is a non-nil\n// *rpc.Conn for that controller.\nfunc (conn *Conn) findGateway(ctx context.Context, ctr arvados.Container, noForward bool) (gatewayDialer, *rpc.Conn, error) {\n\tconn.gwTunnelsLock.Lock()\n\ttunnel := conn.gwTunnels[ctr.UUID]\n\tconn.gwTunnelsLock.Unlock()\n\n\tmyURL, _ := service.URLFromContext(ctx)\n\n\tif host, _, _ := net.SplitHostPort(ctr.GatewayAddress); host != \"\" &&\n\t\t(host != \"127.0.0.1\" || conn.cluster.Containers.CloudVMs.Driver == \"loopback\") {\n\t\t// If crunch-run provided a GatewayAddress like\n\t\t// host:port or [host]:port, and host is realistic\n\t\t// (127.0.0.1 is realistic only if we're running the\n\t\t// loopback driver), that means \"ipaddr\" is one of the\n\t\t// external interfaces where the gateway is\n\t\t// listening. In that case, it's the most\n\t\t// reliable/direct option, so we use it even if a\n\t\t// tunnel might also be available.\n\t\treturn func() (net.Conn, string, string, error) {\n\t\t\trawconn, err := (&net.Dialer{}).DialContext(ctx, \"tcp\", ctr.GatewayAddress)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", \"\", httpserver.ErrorWithStatus(err, http.StatusServiceUnavailable)\n\t\t\t}\n\t\t\treturn conn.dialGatewayTLS(ctx, ctr, rawconn)\n\t\t}, nil, nil\n\t}\n\tif tunnel != nil && !(forceProxyForTest && !noForward) {\n\t\t// If we can't connect directly, and the gateway has\n\t\t// established a yamux tunnel with us, connect through\n\t\t// the tunnel.\n\t\t//\n\t\t// ...except: forceProxyForTest means we are emulating\n\t\t// a situation where the gateway has established a\n\t\t// yamux tunnel with controller B, and the\n\t\t// ContainerSSH request arrives at controller A. If\n\t\t// noForward==false then we are acting as A, so\n\t\t// we pretend not to have a tunnel, and fall through\n\t\t// to the \"tunurl\" case below. If noForward==true\n\t\t// then the client is A and we are acting as B, so we\n\t\t// connect to our tunnel.\n\t\treturn func() (net.Conn, string, string, error) {\n\t\t\trawconn, err := tunnel.Open()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", \"\", httpserver.ErrorWithStatus(err, http.StatusServiceUnavailable)\n\t\t\t}\n\t\t\treturn conn.dialGatewayTLS(ctx, ctr, rawconn)\n\t\t}, nil, nil\n\t}\n\tif tunurl := strings.TrimPrefix(ctr.GatewayAddress, \"tunnel \"); tunurl != ctr.GatewayAddress &&\n\t\ttunurl != \"\" &&\n\t\ttunurl != myURL.String() &&\n\t\t!noForward {\n\t\t// If crunch-run provided a GatewayAddress like\n\t\t// \"tunnel https://10.0.0.10:1010/\", that means the\n\t\t// gateway has established a yamux tunnel with the\n\t\t// controller process at the indicated InternalURL\n\t\t// (which isn't us, otherwise we would have had\n\t\t// \"tunnel != nil\" above). We need to proxy through to\n\t\t// the other controller process in order to use the\n\t\t// tunnel.\n\t\tfor u := range conn.cluster.Services.Controller.InternalURLs {\n\t\t\tif u.String() == tunurl {\n\t\t\t\tctxlog.FromContext(ctx).Debugf(\"connecting to container gateway through other controller at %s\", u)\n\t\t\t\tu := url.URL(u)\n\t\t\t\treturn nil, rpc.NewConn(conn.cluster.ClusterID, &u, conn.cluster.TLS.Insecure, rpc.PassthroughTokenProvider), nil\n\t\t\t}\n\t\t}\n\t\tctxlog.FromContext(ctx).Warnf(\"container gateway provided a tunnel endpoint %s that is not one of Services.Controller.InternalURLs\", tunurl)\n\t\treturn nil, nil, httpserver.ErrorWithStatus(errors.New(\"container gateway is running but tunnel endpoint is invalid\"), http.StatusServiceUnavailable)\n\t}\n\tif ctr.GatewayAddress == \"\" {\n\t\treturn nil, nil, httpserver.ErrorWithStatus(errors.New(\"container is running but gateway is not available\"), http.StatusServiceUnavailable)\n\t} else {\n\t\treturn nil, nil, httpserver.ErrorWithStatus(errors.New(\"container is running but tunnel is down\"), http.StatusServiceUnavailable)\n\t}\n}\n\n// dialGatewayTLS negotiates a TLS connection to a container gateway\n// over the given raw connection.\nfunc (conn *Conn) dialGatewayTLS(ctx context.Context, ctr arvados.Container, rawconn net.Conn) (*tls.Conn, string, string, error) {\n\t// crunch-run uses a self-signed / unverifiable TLS\n\t// certificate, so we use the following scheme to ensure we're\n\t// not talking to an attacker-in-the-middle.\n\t//\n\t// 1. Compute ctrKey = HMAC-SHA256(sysRootToken,ctrUUID) --\n\t// this will be the same ctrKey that a-d-c supplied to\n\t// crunch-run in the GatewayAuthSecret env var.\n\t//\n\t// 2. Compute requestAuth = HMAC-SHA256(ctrKey,serverCert) and\n\t// send it to crunch-run as the X-Arvados-Authorization\n\t// header, proving that we know ctrKey. (Note a MITM cannot\n\t// replay the proof to a real crunch-run server, because the\n\t// real crunch-run server would have a different cert.)\n\t//\n\t// 3. Compute respondAuth = HMAC-SHA256(ctrKey,requestAuth)\n\t// and ensure the server returns it in the\n\t// X-Arvados-Authorization-Response header, proving that the\n\t// server knows ctrKey.\n\tvar requestAuth, respondAuth string\n\ttlsconn := tls.Client(rawconn, &tls.Config{\n\t\tInsecureSkipVerify: true,\n\t\tVerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {\n\t\t\tif len(rawCerts) == 0 {\n\t\t\t\treturn errors.New(\"no certificate received, cannot compute authorization header\")\n\t\t\t}\n\t\t\th := hmac.New(sha256.New, []byte(conn.cluster.SystemRootToken))\n\t\t\tfmt.Fprint(h, ctr.UUID)\n\t\t\tauthKey := fmt.Sprintf(\"%x\", h.Sum(nil))\n\t\t\th = hmac.New(sha256.New, []byte(authKey))\n\t\t\th.Write(rawCerts[0])\n\t\t\trequestAuth = fmt.Sprintf(\"%x\", h.Sum(nil))\n\t\t\th.Reset()\n\t\t\th.Write([]byte(requestAuth))\n\t\t\trespondAuth = fmt.Sprintf(\"%x\", h.Sum(nil))\n\t\t\treturn nil\n\t\t},\n\t})\n\terr := tlsconn.HandshakeContext(ctx)\n\tif err != nil {\n\t\treturn nil, \"\", \"\", httpserver.ErrorWithStatus(fmt.Errorf(\"TLS handshake failed: %w\", err), http.StatusBadGateway)\n\t}\n\tif respondAuth == \"\" {\n\t\ttlsconn.Close()\n\t\treturn nil, \"\", \"\", httpserver.ErrorWithStatus(errors.New(\"BUG: no respondAuth\"), http.StatusInternalServerError)\n\t}\n\treturn tlsconn, requestAuth, respondAuth, nil\n}\n"
  },
  {
    "path": "lib/controller/localdb/container_gateway_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/hmac\"\n\t\"crypto/sha256\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/cookiejar\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/router\"\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/lib/crunchrun\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"golang.org/x/crypto/ssh\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&ContainerGatewaySuite{})\n\ntype ContainerGatewaySuite struct {\n\tlocaldbSuite\n\tcontainerServices []*httpserver.Server\n\treqCreateOptions  arvados.CreateOptions\n\treqUUID           string\n\tctrUUID           string\n\tsrv               *httptest.Server\n\tgw                *crunchrun.Gateway\n\tassignedExtPort   atomic.Int32\n}\n\nconst (\n\ttestDynamicPortMin = 10000\n\ttestDynamicPortMax = 20000\n)\n\nfunc (s *ContainerGatewaySuite) SetUpSuite(c *check.C) {\n\ts.localdbSuite.SetUpSuite(c)\n\n\t// Set up 10 http servers to play the role of services running\n\t// inside a container. (crunchrun.GatewayTargetStub will allow\n\t// our crunchrun.Gateway to connect to them directly on\n\t// localhost, rather than actually running them inside a\n\t// container.)\n\tfor i := 0; i < 10; i++ {\n\t\tsrv := &httpserver.Server{\n\t\t\tAddr: \":0\",\n\t\t\tServer: http.Server{\n\t\t\t\tHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\t\tbody := fmt.Sprintf(\"handled %s %s with Host %s\", r.Method, r.URL.String(), r.Host)\n\t\t\t\t\tc.Logf(\"%s\", body)\n\t\t\t\t\tw.Write([]byte(body))\n\t\t\t\t}),\n\t\t\t},\n\t\t}\n\t\tsrv.Start()\n\t\ts.containerServices = append(s.containerServices, srv)\n\t}\n\n\t// s.containerServices[0] will be unlisted\n\t// s.containerServices[1] will be listed with access=public\n\t// s.containerServices[2,...] will be listed with access=private\n\tpublishedPorts := make(map[string]arvados.RequestPublishedPort)\n\tfor i, srv := range s.containerServices {\n\t\taccess := arvados.PublishedPortAccessPrivate\n\t\t_, port, _ := net.SplitHostPort(srv.Addr)\n\t\tif i == 1 {\n\t\t\taccess = arvados.PublishedPortAccessPublic\n\t\t}\n\t\tif i > 0 {\n\t\t\tpublishedPorts[port] = arvados.RequestPublishedPort{\n\t\t\t\tAccess: access,\n\t\t\t\tLabel:  \"port \" + port,\n\t\t\t}\n\t\t}\n\t}\n\n\ts.reqCreateOptions = arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"command\":             []string{\"echo\", time.Now().Format(time.RFC3339Nano)},\n\t\t\t\"container_count_max\": 1,\n\t\t\t\"container_image\":     \"arvados/apitestfixture:latest\",\n\t\t\t\"cwd\":                 \"/tmp\",\n\t\t\t\"environment\":         map[string]string{},\n\t\t\t\"output_path\":         \"/out\",\n\t\t\t\"priority\":            1,\n\t\t\t\"state\":               arvados.ContainerRequestStateCommitted,\n\t\t\t\"mounts\": map[string]interface{}{\n\t\t\t\t\"/out\": map[string]interface{}{\n\t\t\t\t\t\"kind\":     \"tmp\",\n\t\t\t\t\t\"capacity\": 1000000,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"runtime_constraints\": map[string]interface{}{\n\t\t\t\t\"vcpus\": 1,\n\t\t\t\t\"ram\":   2,\n\t\t\t},\n\t\t\t\"published_ports\": publishedPorts}}\n}\n\nfunc (s *ContainerGatewaySuite) TearDownSuite(c *check.C) {\n\tfor _, srv := range s.containerServices {\n\t\tgo srv.Close()\n\t}\n\ts.containerServices = nil\n\ts.localdbSuite.TearDownSuite(c)\n}\n\nfunc (s *ContainerGatewaySuite) SetUpTest(c *check.C) {\n\ts.localdbSuite.SetUpTest(c)\n\n\ts.localdb.cluster.Services.ContainerWebServices.ExternalURL.Host = \"*.containers.example.com\"\n\ts.localdb.cluster.Services.ContainerWebServices.ExternalPortMin = 0\n\ts.localdb.cluster.Services.ContainerWebServices.ExternalPortMax = 0\n\n\tcr, err := s.localdb.ContainerRequestCreate(s.userctx, s.reqCreateOptions)\n\tc.Assert(err, check.IsNil)\n\ts.reqUUID = cr.UUID\n\ts.ctrUUID = cr.ContainerUUID\n\n\th := hmac.New(sha256.New, []byte(s.cluster.SystemRootToken))\n\tfmt.Fprint(h, s.ctrUUID)\n\tauthKey := fmt.Sprintf(\"%x\", h.Sum(nil))\n\n\trtr := router.New(s.localdb, router.Config{\n\t\tContainerWebServices: &s.localdb.cluster.Services.ContainerWebServices,\n\t})\n\ts.srv = httptest.NewUnstartedServer(httpserver.AddRequestIDs(httpserver.LogRequests(rtr)))\n\ts.srv.StartTLS()\n\t// the test setup doesn't use lib/service so\n\t// service.URLFromContext() returns nothing -- instead, this\n\t// is how we advertise our internal URL and enable\n\t// proxy-to-other-controller mode,\n\tforceInternalURLForTest = &arvados.URL{Scheme: \"https\", Host: s.srv.Listener.Addr().String()}\n\ts.cluster.Services.Controller.InternalURLs[*forceInternalURLForTest] = arvados.ServiceInstance{}\n\tac := &arvados.Client{\n\t\tAPIHost:   s.srv.Listener.Addr().String(),\n\t\tAuthToken: arvadostest.SystemRootToken,\n\t\tInsecure:  true,\n\t}\n\ts.gw = &crunchrun.Gateway{\n\t\tContainerUUID: s.ctrUUID,\n\t\tAuthSecret:    authKey,\n\t\tAddress:       \"localhost:0\",\n\t\tLog:           ctxlog.TestLogger(c),\n\t\tTarget:        crunchrun.GatewayTargetStub{},\n\t\tArvadosClient: ac,\n\t}\n\tc.Assert(s.gw.Start(), check.IsNil)\n\n\trootctx := ctrlctx.NewWithToken(s.ctx, s.cluster, s.cluster.SystemRootToken)\n\t_, err = s.localdb.ContainerUpdate(rootctx, arvados.UpdateOptions{\n\t\tUUID: s.ctrUUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"state\": arvados.ContainerStateLocked}})\n\tc.Assert(err, check.IsNil)\n\t_, err = s.localdb.ContainerUpdate(rootctx, arvados.UpdateOptions{\n\t\tUUID: s.ctrUUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"state\":           arvados.ContainerStateRunning,\n\t\t\t\"gateway_address\": s.gw.Address}})\n\tc.Assert(err, check.IsNil)\n\n\ts.cluster.Containers.ShellAccess.Admin = true\n\ts.cluster.Containers.ShellAccess.User = true\n\t_, err = s.db.Exec(`update containers set interactive_session_started=$1 where uuid=$2`, false, s.ctrUUID)\n\tc.Check(err, check.IsNil)\n\n\ts.assignedExtPort.Store(testDynamicPortMin)\n}\n\nfunc (s *ContainerGatewaySuite) TearDownTest(c *check.C) {\n\tforceProxyForTest = false\n\tif s.reqUUID != \"\" {\n\t\t_, err := s.localdb.ContainerRequestDelete(s.userctx, arvados.DeleteOptions{UUID: s.reqUUID})\n\t\tc.Check(err, check.IsNil)\n\t}\n\tif s.srv != nil {\n\t\ts.srv.Close()\n\t\ts.srv = nil\n\t}\n\t_, err := s.db.Exec(`delete from container_ports where external_port >= $1 and external_port <= $2`, testDynamicPortMin, testDynamicPortMax)\n\tc.Check(err, check.IsNil)\n\ts.localdbSuite.TearDownTest(c)\n}\n\nfunc (s *ContainerGatewaySuite) TestConfig(c *check.C) {\n\tfor _, trial := range []struct {\n\t\tconfigAdmin bool\n\t\tconfigUser  bool\n\t\tsendToken   string\n\t\terrorCode   int\n\t}{\n\t\t{true, true, arvadostest.ActiveTokenV2, 0},\n\t\t{true, false, arvadostest.ActiveTokenV2, 503},\n\t\t{false, true, arvadostest.ActiveTokenV2, 0},\n\t\t{false, false, arvadostest.ActiveTokenV2, 503},\n\t\t{true, true, arvadostest.AdminToken, 0},\n\t\t{true, false, arvadostest.AdminToken, 0},\n\t\t{false, true, arvadostest.AdminToken, 403},\n\t\t{false, false, arvadostest.AdminToken, 503},\n\t} {\n\t\tc.Logf(\"trial %#v\", trial)\n\t\ts.cluster.Containers.ShellAccess.Admin = trial.configAdmin\n\t\ts.cluster.Containers.ShellAccess.User = trial.configUser\n\t\tctx := ctrlctx.NewWithToken(s.ctx, s.cluster, trial.sendToken)\n\t\tsshconn, err := s.localdb.ContainerSSH(ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})\n\t\tif trial.errorCode == 0 {\n\t\t\tif !c.Check(err, check.IsNil) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !c.Check(sshconn.Conn, check.NotNil) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsshconn.Conn.Close()\n\t\t} else {\n\t\t\tc.Check(err, check.NotNil)\n\t\t\terr, ok := err.(interface{ HTTPStatus() int })\n\t\t\tif c.Check(ok, check.Equals, true) {\n\t\t\t\tc.Check(err.HTTPStatus(), check.Equals, trial.errorCode)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *ContainerGatewaySuite) TestDirectTCP(c *check.C) {\n\t// Set up servers on a few TCP ports\n\tvar addrs []string\n\tfor i := 0; i < 3; i++ {\n\t\tln, err := net.Listen(\"tcp\", \":0\")\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer ln.Close()\n\t\taddrs = append(addrs, ln.Addr().String())\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tconn, err := ln.Accept()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tvar gotAddr string\n\t\t\t\tfmt.Fscanf(conn, \"%s\\n\", &gotAddr)\n\t\t\t\tc.Logf(\"stub server listening at %s received string %q from remote %s\", ln.Addr().String(), gotAddr, conn.RemoteAddr())\n\t\t\t\tif gotAddr == ln.Addr().String() {\n\t\t\t\t\tfmt.Fprintf(conn, \"%s\\n\", ln.Addr().String())\n\t\t\t\t}\n\t\t\t\tconn.Close()\n\t\t\t}\n\t\t}()\n\t}\n\n\tc.Logf(\"connecting to %s\", s.gw.Address)\n\tsshconn, err := s.localdb.ContainerSSH(s.userctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})\n\tc.Assert(err, check.IsNil)\n\tc.Assert(sshconn.Conn, check.NotNil)\n\tdefer sshconn.Conn.Close()\n\tconn, chans, reqs, err := ssh.NewClientConn(sshconn.Conn, \"zzzz-dz642-abcdeabcdeabcde\", &ssh.ClientConfig{\n\t\tHostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error { return nil },\n\t})\n\tc.Assert(err, check.IsNil)\n\tclient := ssh.NewClient(conn, chans, reqs)\n\tfor _, expectAddr := range addrs {\n\t\t_, port, err := net.SplitHostPort(expectAddr)\n\t\tc.Assert(err, check.IsNil)\n\n\t\tc.Logf(\"trying foo:%s\", port)\n\t\t{\n\t\t\tconn, err := client.Dial(\"tcp\", \"foo:\"+port)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tconn.SetDeadline(time.Now().Add(time.Second))\n\t\t\tbuf, err := ioutil.ReadAll(conn)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tc.Check(string(buf), check.Equals, \"\")\n\t\t}\n\n\t\tc.Logf(\"trying localhost:%s\", port)\n\t\t{\n\t\t\tconn, err := client.Dial(\"tcp\", \"localhost:\"+port)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tconn.SetDeadline(time.Now().Add(time.Second))\n\t\t\tconn.Write([]byte(expectAddr + \"\\n\"))\n\t\t\tvar gotAddr string\n\t\t\tfmt.Fscanf(conn, \"%s\\n\", &gotAddr)\n\t\t\tc.Check(gotAddr, check.Equals, expectAddr)\n\t\t}\n\t}\n}\n\n// Connect to crunch-run container gateway directly, using container\n// UUID.\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Direct(c *check.C) {\n\ts.testContainerHTTPProxy(c, s.ctrUUID, s.vhostAndTargetForWildcard)\n}\n\n// Connect to crunch-run container gateway directly, using container\n// request UUID.\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Direct_ContainerRequestUUID(c *check.C) {\n\ts.testContainerHTTPProxy(c, s.reqUUID, s.vhostAndTargetForWildcard)\n}\n\n// Connect through a tunnel terminated at this controller process.\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Tunnel(c *check.C) {\n\ts.gw = s.setupGatewayWithTunnel(c)\n\ts.testContainerHTTPProxy(c, s.ctrUUID, s.vhostAndTargetForWildcard)\n}\n\n// Connect through a tunnel terminated at a different controller\n// process.\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_ProxyTunnel(c *check.C) {\n\tforceProxyForTest = true\n\ts.gw = s.setupGatewayWithTunnel(c)\n\ts.testContainerHTTPProxy(c, s.ctrUUID, s.vhostAndTargetForWildcard)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_DynamicPort(c *check.C) {\n\ts.testContainerHTTPProxy(c, s.ctrUUID, s.vhostAndTargetForDynamicPort)\n}\n\nfunc (s *ContainerGatewaySuite) testContainerHTTPProxy(c *check.C, targetUUID string, vhostAndTargetFunc func(*check.C, string, string) (string, string)) {\n\ttestMethods := []string{\"GET\", \"POST\", \"PATCH\", \"OPTIONS\", \"DELETE\"}\n\n\tvar wg sync.WaitGroup\n\tfor idx, srv := range s.containerServices {\n\t\tidx, srv := idx, srv\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tmethod := testMethods[idx%len(testMethods)]\n\t\t\t_, port, err := net.SplitHostPort(srv.Addr)\n\t\t\tc.Assert(err, check.IsNil, check.Commentf(\"%s\", srv.Addr))\n\t\t\tvhost, target := vhostAndTargetFunc(c, targetUUID, port)\n\t\t\tcomment := check.Commentf(\"srv.Addr %s => proxy vhost %s, target %s\", srv.Addr, vhost, target)\n\t\t\tc.Logf(\"%s\", comment.CheckCommentString())\n\t\t\treq, err := http.NewRequest(method, \"https://\"+vhost+\"/via-\"+s.gw.Address, nil)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\t// Token is already passed to\n\t\t\t// ContainerHTTPProxy() call in s.userctx, but\n\t\t\t// we also need to add an auth cookie to the\n\t\t\t// http request: if the request gets passed\n\t\t\t// through http (see forceProxyForTest), the\n\t\t\t// target router will start with a fresh\n\t\t\t// context and load tokens from the request.\n\t\t\treq.AddCookie(&http.Cookie{\n\t\t\t\tName:  \"arvados_api_token\",\n\t\t\t\tValue: auth.EncodeTokenCookie([]byte(arvadostest.ActiveTokenV2)),\n\t\t\t})\n\t\t\thandler, err := s.localdb.ContainerHTTPProxy(s.userctx, arvados.ContainerHTTPProxyOptions{\n\t\t\t\tTarget:  target,\n\t\t\t\tRequest: req,\n\t\t\t})\n\t\t\tc.Assert(err, check.IsNil, comment)\n\t\t\trw := httptest.NewRecorder()\n\t\t\thandler.ServeHTTP(rw, req)\n\t\t\tresp := rw.Result()\n\t\t\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\t\t\tif cookie := getCookie(resp, \"arvados_container_uuid\"); c.Check(cookie, check.NotNil) {\n\t\t\t\tc.Check(cookie.Value, check.Equals, s.ctrUUID)\n\t\t\t}\n\t\t\tbody, err := io.ReadAll(resp.Body)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Check(string(body), check.Matches, `handled `+method+` /via-.* with Host \\Q`+vhost+`\\E`)\n\t\t}()\n\t}\n\twg.Wait()\n}\n\n// Return the virtualhost (in the http request) and opts.Target that\n// lib/controller/router.Router will pass to ContainerHTTPProxy() when\n// Services.ContainerWebServices.ExternalURL is a wildcard like\n// \"*.containers.example.com\".\nfunc (s *ContainerGatewaySuite) vhostAndTargetForWildcard(c *check.C, targetUUID, targetPort string) (string, string) {\n\treturn targetUUID + \"-\" + targetPort + \".containers.example.com\", fmt.Sprintf(\"%s-%s\", targetUUID, targetPort)\n}\n\n// Return the virtualhost (in the http request) and opts.Target that\n// lib/controller/router.Router will pass to ContainerHTTPProxy() when\n// Services.ContainerWebServices.ExternalPortMin and\n// Services.ContainerWebServices.ExternalPortMax are positive, and\n// Services.ContainerWebServices.ExternalURL is not a wildcard.\nfunc (s *ContainerGatewaySuite) vhostAndTargetForDynamicPort(c *check.C, targetUUID, targetPort string) (string, string) {\n\texthost := \"containers.example.com\"\n\ts.localdb.cluster.Services.ContainerWebServices.ExternalURL.Host = exthost\n\ts.localdb.cluster.Services.ContainerWebServices.ExternalPortMin = testDynamicPortMin\n\ts.localdb.cluster.Services.ContainerWebServices.ExternalPortMax = testDynamicPortMax\n\tassignedPort := s.assignedExtPort.Add(1)\n\t_, err := s.db.Exec(`insert into container_ports (external_port, container_uuid, container_port) values ($1, $2, $3)`,\n\t\tassignedPort, targetUUID, targetPort)\n\tc.Assert(err, check.IsNil)\n\treturn fmt.Sprintf(\"%s:%d\", exthost, assignedPort), fmt.Sprintf(\":%d\", assignedPort)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxyError_NoToken_Unlisted(c *check.C) {\n\ts.testContainerHTTPProxyError(c, 0, \"\", s.vhostAndTargetForWildcard, http.StatusUnauthorized)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxyError_NoToken_Private(c *check.C) {\n\ts.testContainerHTTPProxyError(c, 2, \"\", s.vhostAndTargetForWildcard, http.StatusUnauthorized)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxyError_InvalidToken(c *check.C) {\n\ts.testContainerHTTPProxyError(c, 0, arvadostest.ActiveTokenV2+\"bogus\", s.vhostAndTargetForWildcard, http.StatusUnauthorized)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxyError_AnonymousToken_Unlisted(c *check.C) {\n\ts.testContainerHTTPProxyError(c, 0, arvadostest.AnonymousToken, s.vhostAndTargetForWildcard, http.StatusNotFound)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxyError_AnonymousToken_Private(c *check.C) {\n\ts.testContainerHTTPProxyError(c, 2, arvadostest.AnonymousToken, s.vhostAndTargetForWildcard, http.StatusNotFound)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxyError_CRsDifferentUsers(c *check.C) {\n\trootctx := ctrlctx.NewWithToken(s.ctx, s.cluster, s.cluster.SystemRootToken)\n\tcr, err := s.localdb.ContainerRequestCreate(rootctx, s.reqCreateOptions)\n\tdefer s.localdb.ContainerRequestDelete(rootctx, arvados.DeleteOptions{UUID: cr.UUID})\n\tc.Assert(err, check.IsNil)\n\tc.Assert(cr.ContainerUUID, check.Equals, s.ctrUUID)\n\ts.testContainerHTTPProxyError(c, 0, arvadostest.ActiveTokenV2, s.vhostAndTargetForWildcard, http.StatusForbidden)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxyError_ContainerNotReadable(c *check.C) {\n\ts.testContainerHTTPProxyError(c, 0, arvadostest.SpectatorToken, s.vhostAndTargetForWildcard, http.StatusNotFound)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxyError_DynamicPort(c *check.C) {\n\ts.testContainerHTTPProxyError(c, 0, arvadostest.SpectatorToken, s.vhostAndTargetForDynamicPort, http.StatusNotFound)\n}\n\nfunc (s *ContainerGatewaySuite) testContainerHTTPProxyError(c *check.C, svcIdx int, token string, vhostAndTargetFunc func(*check.C, string, string) (string, string), expectCode int) {\n\t_, svcPort, err := net.SplitHostPort(s.containerServices[svcIdx].Addr)\n\tc.Assert(err, check.IsNil)\n\tctx := ctrlctx.NewWithToken(s.ctx, s.cluster, token)\n\tvhost, target := vhostAndTargetFunc(c, s.ctrUUID, svcPort)\n\treq, err := http.NewRequest(\"GET\", \"https://\"+vhost+\"/via-\"+s.gw.Address, nil)\n\tc.Assert(err, check.IsNil)\n\t_, err = s.localdb.ContainerHTTPProxy(ctx, arvados.ContainerHTTPProxyOptions{\n\t\tTarget:  target,\n\t\tRequest: req,\n\t})\n\tc.Check(err, check.NotNil)\n\tvar se httpserver.HTTPStatusError\n\tc.Assert(errors.As(err, &se), check.Equals, true)\n\tc.Check(se.HTTPStatus(), check.Equals, expectCode)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_CookieAuth(c *check.C) {\n\ts.testContainerHTTPProxyUsingCurl(c, 0, arvadostest.ActiveTokenV2, \"GET\", \"/foobar\")\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_CookieAuth_POST(c *check.C) {\n\ts.testContainerHTTPProxyUsingCurl(c, 0, arvadostest.ActiveTokenV2, \"POST\", \"/foobar\")\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_QueryAuth(c *check.C) {\n\ts.testContainerHTTPProxyUsingCurl(c, 0, \"\", \"GET\", \"/foobar?arvados_api_token=\"+arvadostest.ActiveTokenV2)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_QueryAuth_Tunnel(c *check.C) {\n\ts.gw = s.setupGatewayWithTunnel(c)\n\ts.testContainerHTTPProxyUsingCurl(c, 0, \"\", \"GET\", \"/foobar?arvados_api_token=\"+arvadostest.ActiveTokenV2)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_QueryAuth_ProxyTunnel(c *check.C) {\n\tforceProxyForTest = true\n\ts.gw = s.setupGatewayWithTunnel(c)\n\ts.testContainerHTTPProxyUsingCurl(c, 0, \"\", \"GET\", \"/foobar?arvados_api_token=\"+arvadostest.ActiveTokenV2)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_Anonymous(c *check.C) {\n\ts.testContainerHTTPProxyUsingCurl(c, 1, \"\", \"GET\", \"/foobar\")\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_Anonymous_OPTIONS(c *check.C) {\n\ts.testContainerHTTPProxyUsingCurl(c, 1, \"\", \"OPTIONS\", \"/foobar\")\n}\n\n// Check other query parameters are preserved in the\n// redirect-with-cookie.\n//\n// Note the original request has \"?baz&baz&...\" and this changes to\n// \"?baz=&baz=&...\" in the redirect location.  We trust the target\n// service won't be sensitive to this difference.\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_QueryAuth_PreserveQuery(c *check.C) {\n\tbody := s.testContainerHTTPProxyUsingCurl(c, 0, \"\", \"GET\", \"/foobar?baz&baz&arvados_api_token=\"+arvadostest.ActiveTokenV2+\"&waz=quux\")\n\tc.Check(body, check.Matches, `handled GET /foobar\\?baz=&baz=&waz=quux with Host `+s.ctrUUID+`.*`)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_Curl_Patch(c *check.C) {\n\tbody := s.testContainerHTTPProxyUsingCurl(c, 0, arvadostest.ActiveTokenV2, \"PATCH\", \"/foobar\")\n\tc.Check(body, check.Matches, `handled PATCH /foobar with Host `+s.ctrUUID+`.*`)\n}\n\n// Note there is no particular reason this test needs to use curl.  It\n// would make sense to rewrite it to use stdlib instead, as we did\n// with other tests in commit\n// 16f957acf378cd3384d4b9c6ce844fe3cefa600b.\nfunc (s *ContainerGatewaySuite) testContainerHTTPProxyUsingCurl(c *check.C, svcIdx int, cookietoken, method, path string) string {\n\t_, svcPort, err := net.SplitHostPort(s.containerServices[svcIdx].Addr)\n\tc.Assert(err, check.IsNil)\n\n\tvhost, err := url.Parse(s.srv.URL)\n\tc.Assert(err, check.IsNil)\n\tcontrollerHost := vhost.Host\n\tvhost.Host = s.ctrUUID + \"-\" + svcPort + \".containers.example.com\"\n\ttarget, err := vhost.Parse(path)\n\tc.Assert(err, check.IsNil)\n\n\ttempdir := c.MkDir()\n\tcmd := exec.Command(\"curl\")\n\tif cookietoken != \"\" {\n\t\tcmd.Args = append(cmd.Args, \"--cookie\", \"arvados_api_token=\"+string(auth.EncodeTokenCookie([]byte(cookietoken))))\n\t} else {\n\t\tcmd.Args = append(cmd.Args, \"--cookie-jar\", filepath.Join(tempdir, \"cookies.txt\"))\n\t}\n\tif method != \"GET\" {\n\t\tcmd.Args = append(cmd.Args, \"--request\", method)\n\t}\n\tcmd.Args = append(cmd.Args, \"--silent\", \"--insecure\", \"--location\", \"--connect-to\", vhost.Hostname()+\":443:\"+controllerHost, target.String())\n\tcmd.Dir = tempdir\n\tstdout, err := cmd.StdoutPipe()\n\tc.Assert(err, check.Equals, nil)\n\tcmd.Stderr = cmd.Stdout\n\tc.Logf(\"cmd: %v\", cmd.Args)\n\tgo cmd.Start()\n\n\tvar buf bytes.Buffer\n\t_, err = io.Copy(&buf, stdout)\n\tc.Check(err, check.Equals, nil)\n\terr = cmd.Wait()\n\tc.Check(err, check.Equals, nil)\n\tc.Check(buf.String(), check.Matches, `handled `+method+` /.*`)\n\treturn buf.String()\n}\n\n// See testContainerHTTPProxy_ReusedPort_FollowRedirs().  These\n// integration tests check the redirect-with-cookie behavior when a\n// request arrives on a dynamically-assigned port and it has cookies\n// indicating that the client has previously accessed a different\n// container's web services on this same port, i.e., it is susceptible\n// to leaking cache/cookie/localstorage data from the previous\n// container's service to the current container's service.\ntype testReusedPortFollowRedirs struct {\n\tsvcIdx      int\n\tmethod      string\n\tquerytoken  string\n\tcookietoken string\n}\n\n// Reject non-GET requests.  In principle we could 303 them, but in\n// the most obvious case (an AJAX request initiated by the previous\n// container's web application), delivering the request to the new\n// container would surely not be the intended behavior.\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_FollowRedirs_RejectPOST(c *check.C) {\n\tcode, body, redirs := s.testContainerHTTPProxy_ReusedPort_FollowRedirs(c, testReusedPortFollowRedirs{\n\t\tmethod:      \"POST\",\n\t\tcookietoken: arvadostest.ActiveTokenV2,\n\t})\n\tc.Check(code, check.Equals, http.StatusGone)\n\tc.Check(body, check.Equals, \"\")\n\tc.Check(redirs, check.HasLen, 0)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_FollowRedirs_WithoutToken_ClearApplicationCookie(c *check.C) {\n\tcode, body, redirs := s.testContainerHTTPProxy_ReusedPort_FollowRedirs(c, testReusedPortFollowRedirs{\n\t\tsvcIdx:      1,\n\t\tmethod:      \"GET\",\n\t\tcookietoken: arvadostest.ActiveTokenV2,\n\t})\n\tc.Check(code, check.Equals, http.StatusOK)\n\tc.Check(body, check.Matches, `handled GET /foobar with Host containers\\.example\\.com:\\d+`)\n\tc.Check(redirs, check.HasLen, 1)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_FollowRedirs_WithToken_ClearApplicationCookie(c *check.C) {\n\tcode, body, redirs := s.testContainerHTTPProxy_ReusedPort_FollowRedirs(c, testReusedPortFollowRedirs{\n\t\tmethod:     \"GET\",\n\t\tquerytoken: arvadostest.ActiveTokenV2,\n\t})\n\tc.Check(code, check.Equals, http.StatusOK)\n\tc.Check(body, check.Matches, `handled GET /foobar with Host containers\\.example\\.com:\\d+`)\n\tif c.Check(redirs, check.HasLen, 1) {\n\t\tc.Check(redirs[0], check.Matches, `https://containers\\.example\\.com:\\d+/foobar`)\n\t}\n}\n\nfunc (s *ContainerGatewaySuite) testContainerHTTPProxy_ReusedPort_FollowRedirs(c *check.C, t testReusedPortFollowRedirs) (responseCode int, responseBody string, redirectsFollowed []string) {\n\t_, svcPort, err := net.SplitHostPort(s.containerServices[t.svcIdx].Addr)\n\tc.Assert(err, check.IsNil)\n\n\tsrvurl, err := url.Parse(s.srv.URL)\n\tc.Assert(err, check.IsNil)\n\tcontrollerHost := srvurl.Host\n\n\tvhost, _ := s.vhostAndTargetForDynamicPort(c, s.ctrUUID, svcPort)\n\trequrl := url.URL{\n\t\tScheme: \"https\",\n\t\tHost:   vhost,\n\t\tPath:   \"/foobar\",\n\t}\n\tif t.querytoken != \"\" {\n\t\trequrl.RawQuery = \"arvados_api_token=\" + t.querytoken\n\t}\n\n\tcookies := []*http.Cookie{\n\t\t&http.Cookie{Name: \"arvados_container_uuid\", Value: arvadostest.CompletedContainerUUID},\n\t\t&http.Cookie{Name: \"stale_cookie\", Value: \"abcdefghij\"},\n\t}\n\tif t.cookietoken != \"\" {\n\t\tcookies = append(cookies, &http.Cookie{Name: \"arvados_api_token\", Value: string(auth.EncodeTokenCookie([]byte(t.cookietoken)))})\n\t}\n\tjar, err := cookiejar.New(nil)\n\tc.Assert(err, check.IsNil)\n\n\tclient := &http.Client{\n\t\tJar: jar,\n\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\tredirectsFollowed = append(redirectsFollowed, req.URL.String())\n\t\t\treturn nil\n\t\t},\n\t\tTransport: &http.Transport{\n\t\t\tDialTLSContext: func(ctx context.Context, network, addr string) (net.Conn, error) {\n\t\t\t\treturn tls.Dial(network, controllerHost, &tls.Config{\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t})\n\t\t\t},\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true}}}\n\tclient.Jar.SetCookies(&url.URL{Scheme: \"https\", Host: \"containers.example.com\"}, cookies)\n\treq, err := http.NewRequest(t.method, requrl.String(), nil)\n\tc.Assert(err, check.IsNil)\n\tresp, err := client.Do(req)\n\tc.Assert(err, check.IsNil)\n\tresponseCode = resp.StatusCode\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tc.Assert(err, check.IsNil)\n\tresponseBody = string(body)\n\tif responseCode < 400 {\n\t\tfor _, cookie := range client.Jar.Cookies(&url.URL{Scheme: \"https\", Host: \"containers.example.com\"}) {\n\t\t\tc.Check(cookie.Name, check.Not(check.Equals), \"stale_cookie\")\n\t\t\tif cookie.Name == \"arvados_container_uuid\" {\n\t\t\t\tc.Check(cookie.Value, check.Not(check.Equals), arvadostest.CompletedContainerUUID)\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n// Unit tests for clear-cookies-and-redirect behavior when the client\n// still has active cookies (and possibly client-side cache) from a\n// different container that used to be served on the same\n// dynamically-assigned port.\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_QueryToken(c *check.C) {\n\ts.testContainerHTTPProxy_ReusedPort(c, arvadostest.ActiveTokenV2, \"\")\n}\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_CookieToken(c *check.C) {\n\ts.testContainerHTTPProxy_ReusedPort(c, \"\", arvadostest.ActiveTokenV2)\n}\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_ReusedPort_NoToken(c *check.C) {\n\ts.testContainerHTTPProxy_ReusedPort(c, \"\", \"\")\n}\nfunc (s *ContainerGatewaySuite) testContainerHTTPProxy_ReusedPort(c *check.C, querytoken, cookietoken string) {\n\tsrv := s.containerServices[0]\n\tmethod := \"GET\"\n\t_, port, err := net.SplitHostPort(srv.Addr)\n\tc.Assert(err, check.IsNil, check.Commentf(\"%s\", srv.Addr))\n\tvhost, target := s.vhostAndTargetForDynamicPort(c, s.ctrUUID, port)\n\n\tvar tokenCookie *http.Cookie\n\tif cookietoken != \"\" {\n\t\ttokenCookie = &http.Cookie{\n\t\t\tName:  \"arvados_api_token\",\n\t\t\tValue: string(auth.EncodeTokenCookie([]byte(cookietoken))),\n\t\t}\n\t}\n\n\tinitialURL := \"https://\" + vhost + \"/via-\" + s.gw.Address + \"/preserve-path?preserve-param=preserve-value\"\n\tif querytoken != \"\" {\n\t\tinitialURL += \"&arvados_api_token=\" + querytoken\n\t}\n\treq, err := http.NewRequest(method, initialURL, nil)\n\tc.Assert(err, check.IsNil)\n\treq.Header.Add(\"Cookie\", \"arvados_container_uuid=zzzzz-dz642-compltcontainer\")\n\treq.Header.Add(\"Cookie\", \"stale_cookie=abcdefghij\")\n\tif tokenCookie != nil {\n\t\treq.Header.Add(\"Cookie\", tokenCookie.String())\n\t}\n\thandler, err := s.localdb.ContainerHTTPProxy(s.userctx, arvados.ContainerHTTPProxyOptions{\n\t\tTarget:  target,\n\t\tRequest: req,\n\t})\n\tc.Assert(err, check.IsNil)\n\trw := httptest.NewRecorder()\n\thandler.ServeHTTP(rw, req)\n\tresp := rw.Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusSeeOther)\n\tc.Logf(\"Received Location: %s\", resp.Header.Get(\"Location\"))\n\tc.Logf(\"Received cookies: %v\", resp.Cookies())\n\tnewTokenCookie := getCookie(resp, \"arvados_api_token\")\n\tif querytoken != \"\" {\n\t\tif c.Check(newTokenCookie, check.NotNil) {\n\t\t\tc.Check(newTokenCookie.Expires.IsZero(), check.Equals, true)\n\t\t}\n\t}\n\tif newTokenCookie != nil {\n\t\ttokenCookie = newTokenCookie\n\t}\n\tif staleCookie := getCookie(resp, \"stale_cookie\"); c.Check(staleCookie, check.NotNil) {\n\t\tc.Check(staleCookie.Expires.Before(time.Now()), check.Equals, true)\n\t\tc.Check(staleCookie.Value, check.Equals, \"\")\n\t}\n\tif ctrCookie := getCookie(resp, \"arvados_container_uuid\"); c.Check(ctrCookie, check.NotNil) {\n\t\tc.Check(ctrCookie.Expires.Before(time.Now()), check.Equals, true)\n\t\tc.Check(ctrCookie.Value, check.Equals, \"\")\n\t}\n\tc.Check(resp.Header.Get(\"Clear-Site-Data\"), check.Equals, `\"cache\", \"storage\"`)\n\n\treq, err = http.NewRequest(method, resp.Header.Get(\"Location\"), nil)\n\tc.Assert(err, check.IsNil)\n\treq.Header.Add(\"Cookie\", tokenCookie.String())\n\thandler, err = s.localdb.ContainerHTTPProxy(s.userctx, arvados.ContainerHTTPProxyOptions{\n\t\tTarget:  target,\n\t\tRequest: req,\n\t})\n\tc.Assert(err, check.IsNil)\n\trw = httptest.NewRecorder()\n\thandler.ServeHTTP(rw, req)\n\tresp = rw.Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tif ctrCookie := getCookie(resp, \"arvados_container_uuid\"); c.Check(ctrCookie, check.NotNil) {\n\t\tc.Check(ctrCookie.Expires.IsZero(), check.Equals, true)\n\t\tc.Check(ctrCookie.Value, check.Equals, s.ctrUUID)\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(body), check.Matches, `handled GET /via-localhost:\\d+/preserve-path\\?preserve-param=preserve-value with Host containers.example.com:\\d+`)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_PublishedPortByName_ProxyTunnel(c *check.C) {\n\tforceProxyForTest = true\n\ts.gw = s.setupGatewayWithTunnel(c)\n\ts.testContainerHTTPProxy_PublishedPortByName(c)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerHTTPProxy_PublishedPortByName(c *check.C) {\n\ts.testContainerHTTPProxy_PublishedPortByName(c)\n}\n\nfunc (s *ContainerGatewaySuite) testContainerHTTPProxy_PublishedPortByName(c *check.C) {\n\tsrv := s.containerServices[1]\n\t_, port, _ := net.SplitHostPort(srv.Addr)\n\tportnum, err := strconv.Atoi(port)\n\tc.Assert(err, check.IsNil)\n\tnamelink, err := s.localdb.LinkCreate(s.userctx, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"link_class\": \"published_port\",\n\t\t\t\"name\":       \"warthogfacedbuffoon\",\n\t\t\t\"head_uuid\":  s.reqUUID,\n\t\t\t\"properties\": map[string]interface{}{\n\t\t\t\t\"port\": portnum}}})\n\tc.Assert(err, check.IsNil)\n\tdefer s.localdb.LinkDelete(s.userctx, arvados.DeleteOptions{UUID: namelink.UUID})\n\n\tvhost := namelink.Name + \".containers.example.com\"\n\treq, err := http.NewRequest(\"METHOD\", \"https://\"+vhost+\"/path\", nil)\n\tc.Assert(err, check.IsNil)\n\t// Token is already passed to ContainerHTTPProxy() call in\n\t// s.userctx, but we also need to add an auth cookie to the\n\t// http request: if the request gets passed through http (see\n\t// forceProxyForTest), the target router will start with a\n\t// fresh context and load tokens from the request.\n\treq.AddCookie(&http.Cookie{\n\t\tName:  \"arvados_api_token\",\n\t\tValue: auth.EncodeTokenCookie([]byte(arvadostest.ActiveTokenV2)),\n\t})\n\thandler, err := s.localdb.ContainerHTTPProxy(s.userctx, arvados.ContainerHTTPProxyOptions{\n\t\tTarget:  namelink.Name,\n\t\tRequest: req,\n\t})\n\tc.Assert(err, check.IsNil)\n\trw := httptest.NewRecorder()\n\thandler.ServeHTTP(rw, req)\n\tresp := rw.Result()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tbody, err := io.ReadAll(resp.Body)\n\tc.Assert(err, check.IsNil)\n\tc.Check(string(body), check.Matches, `handled METHOD /path with Host \\Q`+vhost+`\\E`)\n}\n\nfunc (s *ContainerGatewaySuite) setupLogCollection(c *check.C) {\n\tfiles := map[string]string{\n\t\t\"stderr.txt\":   \"hello world\\n\",\n\t\t\"a/b/c/d.html\": \"<html></html>\\n\",\n\t}\n\tclient := arvados.NewClientFromEnv()\n\tac, err := arvadosclient.New(client)\n\tc.Assert(err, check.IsNil)\n\tkc, err := keepclient.MakeKeepClient(ac)\n\tc.Assert(err, check.IsNil)\n\tcfs, err := (&arvados.Collection{}).FileSystem(client, kc)\n\tc.Assert(err, check.IsNil)\n\tfor name, content := range files {\n\t\tfor i, ch := range name {\n\t\t\tif ch == '/' {\n\t\t\t\terr := cfs.Mkdir(\"/\"+name[:i], 0777)\n\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t}\n\t\t}\n\t\tf, err := cfs.OpenFile(\"/\"+name, os.O_CREATE|os.O_WRONLY, 0777)\n\t\tc.Assert(err, check.IsNil)\n\t\tf.Write([]byte(content))\n\t\terr = f.Close()\n\t\tc.Assert(err, check.IsNil)\n\t}\n\tcfs.Sync()\n\ts.gw.LogCollection = cfs\n}\n\nfunc (s *ContainerGatewaySuite) saveLogAndCloseGateway(c *check.C) {\n\trootctx := ctrlctx.NewWithToken(s.ctx, s.cluster, s.cluster.SystemRootToken)\n\ttxt, err := s.gw.LogCollection.MarshalManifest(\".\")\n\tc.Assert(err, check.IsNil)\n\tcoll, err := s.localdb.CollectionCreate(rootctx, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"manifest_text\": txt,\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\t_, err = s.localdb.ContainerUpdate(rootctx, arvados.UpdateOptions{\n\t\tUUID: s.ctrUUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"state\":     arvados.ContainerStateComplete,\n\t\t\t\"exit_code\": 0,\n\t\t\t\"log\":       coll.PortableDataHash,\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\tupdatedReq, err := s.localdb.ContainerRequestGet(rootctx, arvados.GetOptions{UUID: s.reqUUID})\n\tc.Assert(err, check.IsNil)\n\tc.Logf(\"container request log UUID is %s\", updatedReq.LogUUID)\n\tcrLog, err := s.localdb.CollectionGet(rootctx, arvados.GetOptions{UUID: updatedReq.LogUUID, Select: []string{\"manifest_text\"}})\n\tc.Assert(err, check.IsNil)\n\tc.Logf(\"collection log manifest:\\n%s\", crLog.ManifestText)\n\t// Ensure localdb can't circumvent the keep-web proxy test by\n\t// getting content from the container gateway.\n\ts.gw.LogCollection = nil\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerRequestLogViaTunnel(c *check.C) {\n\tforceProxyForTest = true\n\ts.gw = s.setupGatewayWithTunnel(c)\n\ts.setupLogCollection(c)\n\n\tfor _, broken := range []bool{false, true} {\n\t\tc.Logf(\"broken=%v\", broken)\n\n\t\tif broken {\n\t\t\tdelete(s.cluster.Services.Controller.InternalURLs, *forceInternalURLForTest)\n\t\t}\n\n\t\tr, err := http.NewRequestWithContext(s.userctx, \"GET\", \"https://controller.example/arvados/v1/container_requests/\"+s.reqUUID+\"/log/\"+s.ctrUUID+\"/stderr.txt\", nil)\n\t\tc.Assert(err, check.IsNil)\n\t\tr.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveTokenV2)\n\t\thandler, err := s.localdb.ContainerRequestLog(s.userctx, arvados.ContainerLogOptions{\n\t\t\tUUID: s.reqUUID,\n\t\t\tWebDAVOptions: arvados.WebDAVOptions{\n\t\t\t\tMethod: \"GET\",\n\t\t\t\tHeader: r.Header,\n\t\t\t\tPath:   \"/\" + s.ctrUUID + \"/stderr.txt\",\n\t\t\t},\n\t\t})\n\t\tif broken {\n\t\t\tc.Check(err, check.ErrorMatches, `.*tunnel endpoint is invalid.*`)\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(err, check.IsNil)\n\t\tc.Assert(handler, check.NotNil)\n\t\trec := httptest.NewRecorder()\n\t\thandler.ServeHTTP(rec, r)\n\t\tresp := rec.Result()\n\t\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\t\tbuf, err := ioutil.ReadAll(resp.Body)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(buf), check.Equals, \"hello world\\n\")\n\t}\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerRequestLogViaGateway(c *check.C) {\n\ts.setupLogCollection(c)\n\ts.testContainerRequestLog(c)\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerRequestLogViaKeepWeb(c *check.C) {\n\ts.setupLogCollection(c)\n\ts.saveLogAndCloseGateway(c)\n\ts.testContainerRequestLog(c)\n}\n\nfunc (s *ContainerGatewaySuite) testContainerRequestLog(c *check.C) {\n\tfor _, trial := range []struct {\n\t\tmethod          string\n\t\tpath            string\n\t\theader          http.Header\n\t\tunauthenticated bool\n\t\texpectStatus    int\n\t\texpectBodyRe    string\n\t\texpectHeader    http.Header\n\t}{\n\t\t{\n\t\t\tmethod:       \"GET\",\n\t\t\tpath:         s.ctrUUID + \"/stderr.txt\",\n\t\t\texpectStatus: http.StatusOK,\n\t\t\texpectBodyRe: \"hello world\\n\",\n\t\t\texpectHeader: http.Header{\n\t\t\t\t\"Content-Type\": {\"text/plain; charset=utf-8\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmethod: \"GET\",\n\t\t\tpath:   s.ctrUUID + \"/stderr.txt\",\n\t\t\theader: http.Header{\n\t\t\t\t\"Range\": {\"bytes=-6\"},\n\t\t\t},\n\t\t\texpectStatus: http.StatusPartialContent,\n\t\t\texpectBodyRe: \"world\\n\",\n\t\t\texpectHeader: http.Header{\n\t\t\t\t\"Content-Type\":  {\"text/plain; charset=utf-8\"},\n\t\t\t\t\"Content-Range\": {\"bytes 6-11/12\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmethod:       \"OPTIONS\",\n\t\t\tpath:         s.ctrUUID + \"/stderr.txt\",\n\t\t\texpectStatus: http.StatusOK,\n\t\t\texpectBodyRe: \"\",\n\t\t\texpectHeader: http.Header{\n\t\t\t\t\"Dav\":   {\"1, 2\"},\n\t\t\t\t\"Allow\": {\"OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmethod:          \"OPTIONS\",\n\t\t\tpath:            s.ctrUUID + \"/stderr.txt\",\n\t\t\tunauthenticated: true,\n\t\t\theader: http.Header{\n\t\t\t\t\"Access-Control-Request-Method\": {\"POST\"},\n\t\t\t},\n\t\t\texpectStatus: http.StatusOK,\n\t\t\texpectBodyRe: \"\",\n\t\t\texpectHeader: http.Header{\n\t\t\t\t\"Access-Control-Allow-Headers\": {\"Authorization, Content-Type, Range, Depth, Destination, If, Lock-Token, Overwrite, Timeout, Cache-Control\"},\n\t\t\t\t\"Access-Control-Allow-Methods\": {\"COPY, DELETE, GET, LOCK, MKCOL, MOVE, OPTIONS, POST, PROPFIND, PROPPATCH, PUT, RMCOL, UNLOCK\"},\n\t\t\t\t\"Access-Control-Allow-Origin\":  {\"*\"},\n\t\t\t\t\"Access-Control-Max-Age\":       {\"86400\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmethod:       \"PROPFIND\",\n\t\t\tpath:         s.ctrUUID + \"/\",\n\t\t\texpectStatus: http.StatusMultiStatus,\n\t\t\texpectBodyRe: `.*\\Q<D:displayname>stderr.txt</D:displayname>\\E.*>\\n?`,\n\t\t\texpectHeader: http.Header{\n\t\t\t\t\"Content-Type\": {\"text/xml; charset=utf-8\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmethod:       \"PROPFIND\",\n\t\t\tpath:         s.ctrUUID,\n\t\t\texpectStatus: http.StatusMultiStatus,\n\t\t\texpectBodyRe: `.*\\Q<D:displayname>stderr.txt</D:displayname>\\E.*>\\n?`,\n\t\t\texpectHeader: http.Header{\n\t\t\t\t\"Content-Type\": {\"text/xml; charset=utf-8\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmethod:       \"PROPFIND\",\n\t\t\tpath:         s.ctrUUID + \"/a/b/c/\",\n\t\t\texpectStatus: http.StatusMultiStatus,\n\t\t\texpectBodyRe: `.*\\Q<D:displayname>d.html</D:displayname>\\E.*>\\n?`,\n\t\t\texpectHeader: http.Header{\n\t\t\t\t\"Content-Type\": {\"text/xml; charset=utf-8\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmethod:       \"GET\",\n\t\t\tpath:         s.ctrUUID + \"/a/b/c/d.html\",\n\t\t\texpectStatus: http.StatusOK,\n\t\t\texpectBodyRe: \"<html></html>\\n\",\n\t\t\texpectHeader: http.Header{\n\t\t\t\t\"Content-Type\": {\"text/html; charset=utf-8\"},\n\t\t\t},\n\t\t},\n\t} {\n\t\tc.Logf(\"trial %#v\", trial)\n\t\tctx := s.userctx\n\t\tif trial.unauthenticated {\n\t\t\tctx = auth.NewContext(context.Background(), auth.CredentialsFromRequest(&http.Request{URL: &url.URL{}, Header: http.Header{}}))\n\t\t}\n\t\tr, err := http.NewRequestWithContext(ctx, trial.method, \"https://controller.example/arvados/v1/container_requests/\"+s.reqUUID+\"/log/\"+trial.path, nil)\n\t\tc.Assert(err, check.IsNil)\n\t\tfor k := range trial.header {\n\t\t\tr.Header.Set(k, trial.header.Get(k))\n\t\t}\n\t\thandler, err := s.localdb.ContainerRequestLog(ctx, arvados.ContainerLogOptions{\n\t\t\tUUID: s.reqUUID,\n\t\t\tWebDAVOptions: arvados.WebDAVOptions{\n\t\t\t\tMethod: trial.method,\n\t\t\t\tHeader: r.Header,\n\t\t\t\tPath:   \"/\" + trial.path,\n\t\t\t},\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Assert(handler, check.NotNil)\n\t\trec := httptest.NewRecorder()\n\t\thandler.ServeHTTP(rec, r)\n\t\tresp := rec.Result()\n\t\tc.Check(resp.StatusCode, check.Equals, trial.expectStatus)\n\t\tfor k := range trial.expectHeader {\n\t\t\tc.Check(resp.Header[k], check.DeepEquals, trial.expectHeader[k])\n\t\t}\n\t\tbuf, err := ioutil.ReadAll(resp.Body)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(buf), check.Matches, trial.expectBodyRe)\n\t}\n}\n\nfunc (s *ContainerGatewaySuite) TestContainerRequestLogViaCadaver(c *check.C) {\n\ts.setupLogCollection(c)\n\n\tout := s.runCadaver(c, arvadostest.ActiveToken, \"/arvados/v1/container_requests/\"+s.reqUUID+\"/log/\"+s.ctrUUID, \"ls\")\n\tc.Check(out, check.Matches, `(?ms).*stderr\\.txt\\s+12\\s.*`)\n\tc.Check(out, check.Matches, `(?ms).*a\\s+0\\s.*`)\n\n\tout = s.runCadaver(c, arvadostest.ActiveTokenV2, \"/arvados/v1/container_requests/\"+s.reqUUID+\"/log/\"+s.ctrUUID, \"get stderr.txt\")\n\tc.Check(out, check.Matches, `(?ms).*Downloading .* to \\W?stderr\\.txt\\W?: .* succeeded\\..*`)\n\n\ts.saveLogAndCloseGateway(c)\n\n\tout = s.runCadaver(c, arvadostest.ActiveTokenV2, \"/arvados/v1/container_requests/\"+s.reqUUID+\"/log/\"+s.ctrUUID, \"get stderr.txt\")\n\tc.Check(out, check.Matches, `(?ms).*Downloading .* to \\W?stderr\\.txt\\W?: .* succeeded\\..*`)\n}\n\nfunc (s *ContainerGatewaySuite) runCadaver(c *check.C, password, path, stdin string) string {\n\t// Replace s.srv with an HTTP server, otherwise cadaver will\n\t// just fail on TLS cert verification.\n\ts.srv.Close()\n\trtr := router.New(s.localdb, router.Config{})\n\ts.srv = httptest.NewUnstartedServer(httpserver.AddRequestIDs(httpserver.LogRequests(rtr)))\n\ts.srv.Start()\n\n\ttempdir, err := ioutil.TempDir(\"\", \"localdb-test-\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.RemoveAll(tempdir)\n\n\tcmd := exec.Command(\"cadaver\", s.srv.URL+path)\n\tif password != \"\" {\n\t\tcmd.Env = append(os.Environ(), \"HOME=\"+tempdir)\n\t\tf, err := os.OpenFile(filepath.Join(tempdir, \".netrc\"), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)\n\t\tc.Assert(err, check.IsNil)\n\t\t_, err = fmt.Fprintf(f, \"default login none password %s\\n\", password)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Assert(f.Close(), check.IsNil)\n\t}\n\tcmd.Stdin = bytes.NewBufferString(stdin)\n\tcmd.Dir = tempdir\n\tstdout, err := cmd.StdoutPipe()\n\tc.Assert(err, check.Equals, nil)\n\tcmd.Stderr = cmd.Stdout\n\tc.Logf(\"cmd: %v\", cmd.Args)\n\tgo cmd.Start()\n\n\tvar buf bytes.Buffer\n\t_, err = io.Copy(&buf, stdout)\n\tc.Check(err, check.Equals, nil)\n\terr = cmd.Wait()\n\tc.Check(err, check.Equals, nil)\n\treturn buf.String()\n}\n\nfunc (s *ContainerGatewaySuite) TestConnect(c *check.C) {\n\tc.Logf(\"connecting to %s\", s.gw.Address)\n\tsshconn, err := s.localdb.ContainerSSH(s.userctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})\n\tc.Assert(err, check.IsNil)\n\tc.Assert(sshconn.Conn, check.NotNil)\n\tdefer sshconn.Conn.Close()\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\n\t\t// Receive text banner\n\t\tbuf := make([]byte, 12)\n\t\t_, err := io.ReadFull(sshconn.Conn, buf)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(buf), check.Equals, \"SSH-2.0-Go\\r\\n\")\n\n\t\t// Send text banner\n\t\t_, err = sshconn.Conn.Write([]byte(\"SSH-2.0-Fake\\r\\n\"))\n\t\tc.Check(err, check.IsNil)\n\n\t\t// Receive binary\n\t\t_, err = io.ReadFull(sshconn.Conn, buf[:4])\n\t\tc.Check(err, check.IsNil)\n\n\t\t// If we can get this far into an SSH handshake...\n\t\tc.Logf(\"was able to read %x -- success, tunnel is working\", buf[:4])\n\t}()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(time.Second):\n\t\tc.Fail()\n\t}\n\tctr, err := s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.ctrUUID})\n\tc.Check(err, check.IsNil)\n\tc.Check(ctr.InteractiveSessionStarted, check.Equals, true)\n}\n\nfunc (s *ContainerGatewaySuite) TestConnectFail_NoToken(c *check.C) {\n\tctx := ctrlctx.NewWithToken(s.ctx, s.cluster, \"\")\n\t_, err := s.localdb.ContainerSSH(ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})\n\tc.Check(err, check.ErrorMatches, `.* 401 .*`)\n}\n\nfunc (s *ContainerGatewaySuite) TestConnectFail_AnonymousToken(c *check.C) {\n\tctx := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AnonymousToken)\n\t_, err := s.localdb.ContainerSSH(ctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})\n\tc.Check(err, check.ErrorMatches, `.* 404 .*`)\n}\n\nfunc (s *ContainerGatewaySuite) TestCreateTunnel(c *check.C) {\n\t// no AuthSecret\n\tconn, err := s.localdb.ContainerGatewayTunnel(s.userctx, arvados.ContainerGatewayTunnelOptions{\n\t\tUUID: s.ctrUUID,\n\t})\n\tc.Check(err, check.ErrorMatches, `authentication error`)\n\tc.Check(conn.Conn, check.IsNil)\n\n\t// bogus AuthSecret\n\tconn, err = s.localdb.ContainerGatewayTunnel(s.userctx, arvados.ContainerGatewayTunnelOptions{\n\t\tUUID:       s.ctrUUID,\n\t\tAuthSecret: \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\",\n\t})\n\tc.Check(err, check.ErrorMatches, `authentication error`)\n\tc.Check(conn.Conn, check.IsNil)\n\n\t// good AuthSecret\n\tconn, err = s.localdb.ContainerGatewayTunnel(s.userctx, arvados.ContainerGatewayTunnelOptions{\n\t\tUUID:       s.ctrUUID,\n\t\tAuthSecret: s.gw.AuthSecret,\n\t})\n\tc.Check(err, check.IsNil)\n\tc.Check(conn.Conn, check.NotNil)\n}\n\nfunc (s *ContainerGatewaySuite) TestConnectThroughTunnelWithProxyOK(c *check.C) {\n\tforceProxyForTest = true\n\ts.testConnectThroughTunnel(c, \"\")\n}\n\nfunc (s *ContainerGatewaySuite) TestConnectThroughTunnelWithProxyError(c *check.C) {\n\tforceProxyForTest = true\n\tdelete(s.cluster.Services.Controller.InternalURLs, *forceInternalURLForTest)\n\ts.testConnectThroughTunnel(c, `.*tunnel endpoint is invalid.*`)\n}\n\nfunc (s *ContainerGatewaySuite) TestConnectThroughTunnelNoProxyOK(c *check.C) {\n\ts.testConnectThroughTunnel(c, \"\")\n}\n\nfunc (s *ContainerGatewaySuite) setupGatewayWithTunnel(c *check.C) *crunchrun.Gateway {\n\trootctx := ctrlctx.NewWithToken(s.ctx, s.cluster, s.cluster.SystemRootToken)\n\t// Until the tunnel starts up, set gateway_address to a value\n\t// that can't work. We want to ensure the only way we can\n\t// reach the gateway is through the tunnel.\n\ttungw := &crunchrun.Gateway{\n\t\tContainerUUID: s.ctrUUID,\n\t\tAuthSecret:    s.gw.AuthSecret,\n\t\tLog:           ctxlog.TestLogger(c),\n\t\tTarget:        crunchrun.GatewayTargetStub{},\n\t\tArvadosClient: s.gw.ArvadosClient,\n\t\tUpdateTunnelURL: func(url string) {\n\t\t\tc.Logf(\"UpdateTunnelURL(%q)\", url)\n\t\t\tgwaddr := \"tunnel \" + url\n\t\t\ts.localdb.ContainerUpdate(rootctx, arvados.UpdateOptions{\n\t\t\t\tUUID: s.ctrUUID,\n\t\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\t\"gateway_address\": gwaddr}})\n\t\t},\n\t}\n\tc.Assert(tungw.Start(), check.IsNil)\n\n\t// We didn't supply an external hostname in the Address field,\n\t// so Start() should assign a local address.\n\thost, _, err := net.SplitHostPort(tungw.Address)\n\tc.Assert(err, check.IsNil)\n\tc.Check(host, check.Equals, \"127.0.0.1\")\n\n\t_, err = s.localdb.ContainerUpdate(rootctx, arvados.UpdateOptions{\n\t\tUUID: s.ctrUUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"state\": arvados.ContainerStateRunning,\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\n\tfor deadline := time.Now().Add(5 * time.Second); time.Now().Before(deadline); time.Sleep(time.Second / 2) {\n\t\tctr, err := s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.ctrUUID})\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(ctr.InteractiveSessionStarted, check.Equals, false)\n\t\tc.Logf(\"ctr.GatewayAddress == %s\", ctr.GatewayAddress)\n\t\tif strings.HasPrefix(ctr.GatewayAddress, \"tunnel \") {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn tungw\n}\n\nfunc (s *ContainerGatewaySuite) testConnectThroughTunnel(c *check.C, expectErrorMatch string) {\n\ts.setupGatewayWithTunnel(c)\n\tc.Log(\"connecting to gateway through tunnel\")\n\tarpc := rpc.NewConn(\"\", &url.URL{Scheme: \"https\", Host: s.gw.ArvadosClient.APIHost}, true, rpc.PassthroughTokenProvider)\n\tsshconn, err := arpc.ContainerSSH(s.userctx, arvados.ContainerSSHOptions{UUID: s.ctrUUID})\n\tif expectErrorMatch != \"\" {\n\t\tc.Check(err, check.ErrorMatches, expectErrorMatch)\n\t\treturn\n\t}\n\tc.Assert(err, check.IsNil)\n\tc.Assert(sshconn.Conn, check.NotNil)\n\tdefer sshconn.Conn.Close()\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\n\t\t// Receive text banner\n\t\tbuf := make([]byte, 12)\n\t\t_, err := io.ReadFull(sshconn.Conn, buf)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(buf), check.Equals, \"SSH-2.0-Go\\r\\n\")\n\n\t\t// Send text banner\n\t\t_, err = sshconn.Conn.Write([]byte(\"SSH-2.0-Fake\\r\\n\"))\n\t\tc.Check(err, check.IsNil)\n\n\t\t// Receive binary\n\t\t_, err = io.ReadFull(sshconn.Conn, buf[:4])\n\t\tc.Check(err, check.IsNil)\n\n\t\t// If we can get this far into an SSH handshake...\n\t\tc.Logf(\"was able to read %x -- success, tunnel is working\", buf[:4])\n\t}()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(time.Second):\n\t\tc.Fail()\n\t}\n\tctr, err := s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.ctrUUID})\n\tc.Check(err, check.IsNil)\n\tc.Check(ctr.InteractiveSessionStarted, check.Equals, true)\n}\n\nfunc getCookie(resp *http.Response, name string) *http.Cookie {\n\tfor _, cookie := range resp.Cookies() {\n\t\tif cookie.Name == name {\n\t\t\treturn cookie\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "lib/controller/localdb/container_request.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/scheduler\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n)\n\n// ContainerRequestCreate defers to railsProxy for everything except\n// vocabulary checking.\nfunc (conn *Conn) ContainerRequestCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.ContainerRequest, error) {\n\tconn.logActivity(ctx)\n\terr := conn.checkProperties(ctx, opts.Attrs[\"properties\"])\n\tif err != nil {\n\t\treturn arvados.ContainerRequest{}, err\n\t}\n\tresp, err := conn.railsProxy.ContainerRequestCreate(ctx, opts)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treturn resp, nil\n}\n\n// ContainerRequestUpdate defers to railsProxy for everything except\n// vocabulary checking.\nfunc (conn *Conn) ContainerRequestUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.ContainerRequest, error) {\n\tconn.logActivity(ctx)\n\terr := conn.checkProperties(ctx, opts.Attrs[\"properties\"])\n\tif err != nil {\n\t\treturn arvados.ContainerRequest{}, err\n\t}\n\tresp, err := conn.railsProxy.ContainerRequestUpdate(ctx, opts)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treturn resp, nil\n}\n\nfunc (conn *Conn) ContainerRequestGet(ctx context.Context, opts arvados.GetOptions) (arvados.ContainerRequest, error) {\n\tconn.logActivity(ctx)\n\treturn conn.railsProxy.ContainerRequestGet(ctx, opts)\n}\n\nfunc (conn *Conn) ContainerRequestList(ctx context.Context, opts arvados.ListOptions) (arvados.ContainerRequestList, error) {\n\tconn.logActivity(ctx)\n\treturn conn.railsProxy.ContainerRequestList(ctx, opts)\n}\n\nfunc (conn *Conn) ContainerRequestDelete(ctx context.Context, opts arvados.DeleteOptions) (arvados.ContainerRequest, error) {\n\tconn.logActivity(ctx)\n\treturn conn.railsProxy.ContainerRequestDelete(ctx, opts)\n}\n\nfunc (conn *Conn) ContainerRequestContainerStatus(ctx context.Context, opts arvados.GetOptions) (arvados.ContainerStatus, error) {\n\tconn.logActivity(ctx)\n\tvar ret arvados.ContainerStatus\n\tcr, err := conn.railsProxy.ContainerRequestGet(ctx, arvados.GetOptions{UUID: opts.UUID, Select: []string{\"uuid\", \"container_uuid\", \"log_uuid\"}})\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tif cr.ContainerUUID == \"\" {\n\t\tret.SchedulingStatus = \"No container is assigned.\"\n\t\treturn ret, nil\n\t}\n\t// We use admin credentials to get the container record so we\n\t// don't get an error when we're in a race with auto-retry and\n\t// the container became user-unreadable since we fetched the\n\t// CR above.\n\tctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{conn.cluster.SystemRootToken}})\n\tctr, err := conn.railsProxy.ContainerGet(ctxRoot, arvados.GetOptions{UUID: cr.ContainerUUID, Select: []string{\"uuid\", \"state\", \"priority\"}})\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tret.UUID = ctr.UUID\n\tret.State = ctr.State\n\tif ctr.State != arvados.ContainerStateQueued && ctr.State != arvados.ContainerStateLocked {\n\t\t// Scheduling status is not a thing once the container\n\t\t// is in running state.\n\t\treturn ret, nil\n\t}\n\tvar lastErr error\n\tfor dispatchurl := range conn.cluster.Services.DispatchCloud.InternalURLs {\n\t\tbaseurl := url.URL(dispatchurl)\n\t\tapiurl, err := baseurl.Parse(\"/arvados/v1/dispatch/container?container_uuid=\" + cr.ContainerUUID)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, apiurl.String(), nil)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+conn.cluster.ManagementToken)\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\tlastErr = fmt.Errorf(\"error getting status from dispatcher: %w\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif resp.StatusCode == http.StatusNotFound {\n\t\t\tcontinue\n\t\t} else if resp.StatusCode != http.StatusOK {\n\t\t\tlastErr = fmt.Errorf(\"error getting status from dispatcher: %s\", resp.Status)\n\t\t\tcontinue\n\t\t}\n\t\tvar qent scheduler.QueueEnt\n\t\terr = json.NewDecoder(resp.Body).Decode(&qent)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\t\tret.State = qent.Container.State // Prefer dispatcher's view of state if not equal to ctr.State\n\t\tret.SchedulingStatus = qent.SchedulingStatus\n\t\treturn ret, nil\n\t}\n\tif lastErr != nil {\n\t\t// If we got a non-nil error from a dispatchcloud\n\t\t// service, and the container state suggests\n\t\t// dispatchcloud should know about it, then we return\n\t\t// an error so the client knows to retry.\n\t\treturn ret, httpserver.ErrorWithStatus(lastErr, http.StatusBadGateway)\n\t}\n\t// All running dispatchcloud services confirm they don't have\n\t// this container (the dispatcher hasn't yet noticed it\n\t// appearing in the queue) or there are no dispatchcloud\n\t// services configured. Either way, all we can say is that\n\t// it's queued.\n\tif ctr.State == arvados.ContainerStateQueued && ctr.Priority < 1 {\n\t\t// If it hasn't been picked up by a dispatcher\n\t\t// already, it won't be -- it's just on hold.\n\t\t// Scheduling status does not apply.\n\t\treturn ret, nil\n\t}\n\tret.SchedulingStatus = \"Waiting in queue.\"\n\treturn ret, nil\n}\n"
  },
  {
    "path": "lib/controller/localdb/container_request_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&ContainerRequestSuite{})\n\ntype ContainerRequestSuite struct {\n\tlocaldbSuite\n}\n\nfunc (s *ContainerRequestSuite) TestCRCreateWithProperties(c *check.C) {\n\ts.setUpVocabulary(c, \"\")\n\n\ttests := []struct {\n\t\tname    string\n\t\tprops   map[string]interface{}\n\t\tsuccess bool\n\t}{\n\t\t{\"Invalid prop key\", map[string]interface{}{\"Priority\": \"IDVALIMPORTANCES1\"}, false},\n\t\t{\"Invalid prop value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"high\"}, false},\n\t\t{\"Valid prop key & value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"IDVALIMPORTANCES1\"}, true},\n\t\t{\"Empty properties\", map[string]interface{}{}, true},\n\t}\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName()+\" \", tt.name)\n\n\t\tcnt, err := s.localdb.ContainerRequestCreate(s.userctx, arvados.CreateOptions{\n\t\t\tSelect: []string{\"uuid\", \"properties\"},\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"command\":         []string{\"echo\", \"foo\"},\n\t\t\t\t\"container_image\": \"arvados/apitestfixture:latest\",\n\t\t\t\t\"cwd\":             \"/tmp\",\n\t\t\t\t\"environment\":     map[string]string{},\n\t\t\t\t\"mounts\": map[string]interface{}{\n\t\t\t\t\t\"/out\": map[string]interface{}{\n\t\t\t\t\t\t\"kind\":     \"tmp\",\n\t\t\t\t\t\t\"capacity\": 1000000,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"output_path\": \"/out\",\n\t\t\t\t\"runtime_constraints\": map[string]interface{}{\n\t\t\t\t\t\"vcpus\": 1,\n\t\t\t\t\t\"ram\":   2,\n\t\t\t\t},\n\t\t\t\t\"properties\": tt.props,\n\t\t\t}})\n\t\tif tt.success {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(cnt.Properties, check.DeepEquals, tt.props)\n\t\t} else {\n\t\t\tc.Assert(err, check.NotNil)\n\t\t}\n\t}\n}\n\nfunc (s *ContainerRequestSuite) TestCRUpdateWithProperties(c *check.C) {\n\ts.setUpVocabulary(c, \"\")\n\n\ttests := []struct {\n\t\tname    string\n\t\tprops   map[string]interface{}\n\t\tsuccess bool\n\t}{\n\t\t{\"Invalid prop key\", map[string]interface{}{\"Priority\": \"IDVALIMPORTANCES1\"}, false},\n\t\t{\"Invalid prop value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"high\"}, false},\n\t\t{\"Valid prop key & value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"IDVALIMPORTANCES1\"}, true},\n\t\t{\"Empty properties\", map[string]interface{}{}, true},\n\t}\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName()+\" \", tt.name)\n\t\tcnt, err := s.localdb.ContainerRequestCreate(s.userctx, arvados.CreateOptions{\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"command\":         []string{\"echo\", \"foo\"},\n\t\t\t\t\"container_image\": \"arvados/apitestfixture:latest\",\n\t\t\t\t\"cwd\":             \"/tmp\",\n\t\t\t\t\"environment\":     map[string]string{},\n\t\t\t\t\"mounts\": map[string]interface{}{\n\t\t\t\t\t\"/out\": map[string]interface{}{\n\t\t\t\t\t\t\"kind\":     \"tmp\",\n\t\t\t\t\t\t\"capacity\": 1000000,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"output_path\": \"/out\",\n\t\t\t\t\"runtime_constraints\": map[string]interface{}{\n\t\t\t\t\t\"vcpus\": 1,\n\t\t\t\t\t\"ram\":   2,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t\tcnt, err = s.localdb.ContainerRequestUpdate(s.userctx, arvados.UpdateOptions{\n\t\t\tUUID:   cnt.UUID,\n\t\t\tSelect: []string{\"uuid\", \"properties\"},\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"properties\": tt.props,\n\t\t\t}})\n\t\tif tt.success {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(cnt.Properties, check.DeepEquals, tt.props)\n\t\t} else {\n\t\t\tc.Assert(err, check.NotNil)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/controller/localdb/container_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t. \"gopkg.in/check.v1\"\n)\n\nvar _ = Suite(&containerSuite{})\n\ntype containerSuite struct {\n\tlocaldbSuite\n\ttopcr     arvados.ContainerRequest\n\ttopc      arvados.Container\n\tstarttime time.Time\n}\n\nfunc (s *containerSuite) crAttrs(c *C) map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"container_image\":     arvadostest.DockerImage112PDH,\n\t\t\"command\":             []string{c.TestName(), fmt.Sprintf(\"%d\", s.starttime.UnixMilli()), \"top\"},\n\t\t\"output_path\":         \"/out\",\n\t\t\"priority\":            1,\n\t\t\"state\":               \"Committed\",\n\t\t\"container_count_max\": 1,\n\t\t\"runtime_constraints\": arvados.RuntimeConstraints{\n\t\t\tRAM:   1,\n\t\t\tVCPUs: 1,\n\t\t},\n\t\t\"mounts\": map[string]arvados.Mount{\n\t\t\t\"/out\": arvados.Mount{Kind: \"tmp\", Capacity: 1000000},\n\t\t},\n\t}\n}\n\nfunc (s *containerSuite) SetUpTest(c *C) {\n\tcontainerPriorityUpdateInterval = 2 * time.Second\n\ts.localdbSuite.SetUpTest(c)\n\ts.starttime = time.Now()\n\tvar err error\n\ts.topcr, err = s.localdb.ContainerRequestCreate(s.userctx, arvados.CreateOptions{Attrs: s.crAttrs(c)})\n\tc.Assert(err, IsNil)\n\ts.topc, err = s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.topcr.ContainerUUID})\n\tc.Assert(err, IsNil)\n\tc.Assert(int(s.topc.Priority), Not(Equals), 0)\n\tc.Logf(\"topcr %s topc %s\", s.topcr.UUID, s.topc.UUID)\n}\n\nfunc (s *containerSuite) TearDownTest(c *C) {\n\tcontainerPriorityUpdateInterval = 5 * time.Minute\n\ts.localdbSuite.TearDownTest(c)\n}\n\nfunc (s *containerSuite) syncUpdatePriority(c *C) {\n\t// Sending 1x to the \"update now\" channel starts an update;\n\t// sending again fills the channel while the first update is\n\t// running; sending a third time blocks until the worker\n\t// receives the 2nd send, i.e., guarantees that the first\n\t// update has finished.\n\ts.localdb.wantContainerPriorityUpdate <- struct{}{}\n\ts.localdb.wantContainerPriorityUpdate <- struct{}{}\n\ts.localdb.wantContainerPriorityUpdate <- struct{}{}\n}\n\nfunc (s *containerSuite) TestUpdatePriorityShouldBeNonZero(c *C) {\n\t_, err := s.db.Exec(\"update containers set priority=0 where uuid=$1\", s.topc.UUID)\n\tc.Assert(err, IsNil)\n\ttopc, err := s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.topc.UUID})\n\tc.Assert(err, IsNil)\n\tc.Assert(int(topc.Priority), Equals, 0)\n\ts.syncUpdatePriority(c)\n\ttopc, err = s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.topc.UUID})\n\tc.Assert(err, IsNil)\n\tc.Check(int(topc.Priority), Not(Equals), 0)\n}\n\nfunc (s *containerSuite) TestUpdatePriorityShouldBeZero(c *C) {\n\t_, err := s.db.Exec(\"update container_requests set priority=0 where uuid=$1\", s.topcr.UUID)\n\tc.Assert(err, IsNil)\n\ttopc, err := s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.topc.UUID})\n\tc.Assert(err, IsNil)\n\tc.Assert(int(topc.Priority), Not(Equals), 0)\n\ts.syncUpdatePriority(c)\n\ttopc, err = s.localdb.ContainerGet(s.userctx, arvados.GetOptions{UUID: s.topc.UUID})\n\tc.Assert(err, IsNil)\n\tc.Check(int(topc.Priority), Equals, 0)\n}\n\nfunc (s *containerSuite) TestUpdatePriorityMultiLevelWorkflow(c *C) {\n\ttestCtx, testCancel := context.WithDeadline(s.ctx, time.Now().Add(30*time.Second))\n\tdefer testCancel()\n\tadminCtx := ctrlctx.NewWithToken(testCtx, s.cluster, s.cluster.SystemRootToken)\n\n\tchildCR := func(parent arvados.ContainerRequest, arg string) arvados.ContainerRequest {\n\t\tattrs := s.crAttrs(c)\n\t\tattrs[\"command\"] = []string{c.TestName(), fmt.Sprintf(\"%d\", s.starttime.UnixMilli()), arg}\n\t\tcr, err := s.localdb.ContainerRequestCreate(s.userctx, arvados.CreateOptions{Attrs: attrs})\n\t\tc.Assert(err, IsNil)\n\t\t_, err = s.db.Exec(\"update container_requests set requesting_container_uuid=$1 where uuid=$2\", parent.ContainerUUID, cr.UUID)\n\t\tc.Assert(err, IsNil)\n\t\t_, err = s.localdb.ContainerUpdate(adminCtx, arvados.UpdateOptions{\n\t\t\tUUID:  cr.ContainerUUID,\n\t\t\tAttrs: map[string]interface{}{\"state\": \"Locked\"},\n\t\t})\n\t\tc.Assert(err, IsNil)\n\t\t_, err = s.localdb.ContainerUpdate(adminCtx, arvados.UpdateOptions{\n\t\t\tUUID:  cr.ContainerUUID,\n\t\t\tAttrs: map[string]interface{}{\"state\": \"Running\"},\n\t\t})\n\t\tc.Assert(err, IsNil)\n\t\treturn cr\n\t}\n\t// Build a tree of container requests and containers (3 levels\n\t// deep below s.topcr)\n\tallcrs := []arvados.ContainerRequest{s.topcr}\n\tfor i := 0; i < 2; i++ {\n\t\tcri := childCR(s.topcr, fmt.Sprintf(\"i %d\", i))\n\t\tallcrs = append(allcrs, cri)\n\t\tfor j := 0; j < 3; j++ {\n\t\t\tcrj := childCR(cri, fmt.Sprintf(\"i %d j %d\", i, j))\n\t\t\tallcrs = append(allcrs, crj)\n\t\t\tfor k := 0; k < 4; k++ {\n\t\t\t\tcrk := childCR(crj, fmt.Sprintf(\"i %d j %d k %d\", i, j, k))\n\t\t\t\tallcrs = append(allcrs, crk)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Set priority=0 on a parent+child, plus 18 other randomly\n\t// selected containers in the tree\n\t//\n\t// First entries of needfix are allcrs[1] (which is \"i 0\") and\n\t// allcrs[2] (\"i 0 j 0\") -- we want to make sure to get at\n\t// least one parent/child pair -- and the rest were chosen\n\t// randomly.\n\tneedfix := []int{1, 2, 23, 12, 20, 14, 13, 15, 7, 17, 6, 22, 21, 11, 1, 17, 18}\n\tfor n, i := range needfix {\n\t\tneedfix[n] = i\n\t\tres, err := s.db.Exec(\"update containers set priority=0 where uuid=$1\", allcrs[i].ContainerUUID)\n\t\tc.Assert(err, IsNil)\n\t\tupdated, err := res.RowsAffected()\n\t\tc.Assert(err, IsNil)\n\t\tif n == 0 {\n\t\t\tc.Assert(int(updated), Equals, 1)\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\n\tchaosCtx, chaosCancel := context.WithCancel(adminCtx)\n\tdefer chaosCancel()\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\t// Flood the api with ContainerUpdate calls for the\n\t\t// same containers that need to have their priority\n\t\t// fixed\n\t\tfor chaosCtx.Err() == nil {\n\t\t\tn := rand.Intn(len(needfix))\n\t\t\t_, err := s.localdb.ContainerUpdate(chaosCtx, arvados.UpdateOptions{\n\t\t\t\tUUID: allcrs[needfix[n]].ContainerUUID,\n\t\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\t\"runtime_status\": map[string]string{\n\t\t\t\t\t\t\"info\": time.Now().Format(time.RFC3339Nano),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t\tif !errors.Is(err, context.Canceled) {\n\t\t\t\tc.Check(err, IsNil)\n\t\t\t}\n\t\t}\n\t}()\n\t// Find and fix the containers with wrong priority\n\ts.syncUpdatePriority(c)\n\t// Ensure they all got fixed\n\tfor _, cr := range allcrs {\n\t\tvar priority int\n\t\terr := s.db.QueryRow(\"select priority from containers where uuid=$1\", cr.ContainerUUID).Scan(&priority)\n\t\tc.Assert(err, IsNil)\n\t\tc.Check(priority, Not(Equals), 0)\n\t}\n\tchaosCancel()\n\n\t// Flood railsapi with priority updates. This can cause\n\t// database deadlock: one call acquires row locks in the order\n\t// {i0j0, i0, i0j1}, while another call acquires row locks in\n\t// the order {i0j1, i0, i0j0}.\n\tdeadlockCtx, deadlockCancel := context.WithDeadline(adminCtx, time.Now().Add(30*time.Second))\n\tdefer deadlockCancel()\n\tfor _, cr := range allcrs {\n\t\tif strings.Contains(cr.Command[2], \" j \") && !strings.Contains(cr.Command[2], \" k \") {\n\t\t\tcr := cr\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tfor _, p := range []int{1, 2, 3, 4} {\n\t\t\t\t\tvar err error\n\t\t\t\t\tfor {\n\t\t\t\t\t\t_, err = s.localdb.ContainerRequestUpdate(deadlockCtx, arvados.UpdateOptions{\n\t\t\t\t\t\t\tUUID: cr.UUID,\n\t\t\t\t\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\t\t\t\t\"priority\": p,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t})\n\t\t\t\t\t\tc.Check(err, IsNil)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n\twg.Wait()\n\n\t// Simulate cascading cancellation of the entire tree. For\n\t// this we need a goroutine to notice and cancel containers\n\t// with state=Running and priority=0, and cancel them\n\t// (this is normally done by a dispatcher).\n\tdispCtx, dispCancel := context.WithCancel(adminCtx)\n\tdefer dispCancel()\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor dispCtx.Err() == nil {\n\t\t\tneedcancel, err := s.localdb.ContainerList(dispCtx, arvados.ListOptions{\n\t\t\t\tLimit:   10,\n\t\t\t\tFilters: []arvados.Filter{{\"state\", \"=\", \"Running\"}, {\"priority\", \"=\", 0}},\n\t\t\t})\n\t\t\tif errors.Is(err, context.Canceled) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tc.Assert(err, IsNil)\n\t\t\tfor _, ctr := range needcancel.Items {\n\t\t\t\t_, err := s.localdb.ContainerUpdate(dispCtx, arvados.UpdateOptions{\n\t\t\t\t\tUUID: ctr.UUID,\n\t\t\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\t\t\"state\": \"Cancelled\",\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tif errors.Is(err, context.Canceled) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tc.Assert(err, IsNil)\n\t\t\t}\n\t\t\ttime.Sleep(time.Second / 10)\n\t\t}\n\t}()\n\n\t_, err := s.localdb.ContainerRequestUpdate(s.userctx, arvados.UpdateOptions{\n\t\tUUID: s.topcr.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"priority\": 0,\n\t\t},\n\t})\n\tc.Assert(err, IsNil)\n\n\tc.Logf(\"waiting for all %d containers to have priority=0 after cancelling top level CR\", len(allcrs))\n\tfor {\n\t\ttime.Sleep(time.Second / 2)\n\t\tif testCtx.Err() != nil {\n\t\t\tfor i, cr := range allcrs {\n\t\t\t\tvar ctr arvados.Container\n\t\t\t\tvar command string\n\t\t\t\terr = s.db.QueryRowContext(s.ctx, `select cr.priority, cr.state, cr.container_uuid, c.state, c.priority, cr.command\n\t\t\t\t\tfrom container_requests cr\n\t\t\t\t\tleft join containers c on cr.container_uuid = c.uuid\n\t\t\t\t\twhere cr.uuid=$1`, cr.UUID).Scan(&cr.Priority, &cr.State, &ctr.UUID, &ctr.State, &ctr.Priority, &command)\n\t\t\t\tc.Check(err, IsNil)\n\t\t\t\tc.Logf(\"allcrs[%d] cr.pri %d %s c.pri %d %s cr.uuid %s c.uuid %s cmd %s\", i, cr.Priority, cr.State, ctr.Priority, ctr.State, cr.UUID, ctr.UUID, command)\n\t\t\t}\n\t\t\tc.Fatal(\"timed out\")\n\t\t}\n\t\tdone := true\n\t\tfor _, cr := range allcrs {\n\t\t\tvar priority int\n\t\t\tvar crstate, command, ctrUUID string\n\t\t\tvar parent sql.NullString\n\t\t\terr := s.db.QueryRowContext(s.ctx, `select state, priority, container_uuid, requesting_container_uuid, command\n\t\t\t\tfrom container_requests where uuid=$1`, cr.UUID).Scan(&crstate, &priority, &ctrUUID, &parent, &command)\n\t\t\tif errors.Is(err, context.Canceled) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tc.Assert(err, IsNil)\n\t\t\tif crstate == \"Committed\" && priority > 0 {\n\t\t\t\tc.Logf(\"container request %s (%s; parent=%s) still has state %s priority %d\", cr.UUID, command, parent.String, crstate, priority)\n\t\t\t\tdone = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t\terr = s.db.QueryRowContext(s.ctx, \"select priority, command from containers where uuid=$1\", cr.ContainerUUID).Scan(&priority, &command)\n\t\t\tif errors.Is(err, context.Canceled) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tc.Assert(err, IsNil)\n\t\t\tif priority > 0 {\n\t\t\t\tc.Logf(\"container %s (%s) still has priority %d\", cr.ContainerUUID, command, priority)\n\t\t\t\tdone = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif done {\n\t\t\tc.Logf(\"success -- all %d containers have priority=0\", len(allcrs))\n\t\t\tbreak\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/controller/localdb/docker_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\ntype tcpProxy struct {\n\tnet.Listener\n}\n\n// newTCPProxy sets up a TCP proxy that forwards all connections to the\n// given host and port. This allows the caller to run a docker container that\n// can connect to cluster service on the test host's loopback interface.\n//\n// listenAddr is the IP address of the interface to listen on. Pass an empty\n// string to listen on all interfaces.\n//\n// Caller is responsible for calling Close() on the returned tcpProxy.\nfunc newTCPProxy(c *check.C, listenAddr, host, port string) *tcpProxy {\n\ttarget := net.JoinHostPort(host, port)\n\tln, err := net.Listen(\"tcp\", net.JoinHostPort(listenAddr, \"\"))\n\tc.Assert(err, check.IsNil)\n\tgo func() {\n\t\tfor {\n\t\t\tdownstream, err := ln.Accept()\n\t\t\tif err != nil && strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tgo func() {\n\t\t\t\tc.Logf(\"tcpProxy accepted connection from %s\", downstream.RemoteAddr().String())\n\t\t\t\tdefer downstream.Close()\n\t\t\t\tupstream, err := net.Dial(\"tcp\", target)\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.Logf(\"net.Dial(%q): %s\", target, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tdefer upstream.Close()\n\t\t\t\tgo io.Copy(downstream, upstream)\n\t\t\t\tio.Copy(upstream, downstream)\n\t\t\t}()\n\t\t}\n\t}()\n\tc.Logf(\"tcpProxy listening at %s\", ln.Addr().String())\n\treturn &tcpProxy{Listener: ln}\n}\n\nfunc (proxy *tcpProxy) Port() string {\n\t_, port, _ := net.SplitHostPort(proxy.Addr().String())\n\treturn port\n}\n\n// newPgProxy sets up a tcpProxy for the cluster's PostgreSQL database.\nfunc newPgProxy(c *check.C, cluster *arvados.Cluster, listenAddr string) *tcpProxy {\n\thost := cluster.PostgreSQL.Connection[\"host\"]\n\tif host == \"\" {\n\t\thost = \"localhost\"\n\t}\n\tport := cluster.PostgreSQL.Connection[\"port\"]\n\tif port == \"\" {\n\t\tport = \"5432\"\n\t}\n\treturn newTCPProxy(c, listenAddr, host, port)\n}\n\n// newInternalProxy sets up a tcpProxy for an InternalURL of the given service.\nfunc newInternalProxy(c *check.C, service arvados.Service, listenAddr string) *tcpProxy {\n\tfor intURL, _ := range service.InternalURLs {\n\t\thost, port, err := net.SplitHostPort(intURL.Host)\n\t\tif err == nil && port != \"\" {\n\t\t\treturn newTCPProxy(c, listenAddr, host, port)\n\t\t}\n\t}\n\tc.Fatal(\"no valid InternalURLs found for service\")\n\treturn nil\n}\n"
  },
  {
    "path": "lib/controller/localdb/group.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// GroupCreate defers to railsProxy for everything except vocabulary\n// checking.\nfunc (conn *Conn) GroupCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Group, error) {\n\tconn.logActivity(ctx)\n\terr := conn.checkProperties(ctx, opts.Attrs[\"properties\"])\n\tif err != nil {\n\t\treturn arvados.Group{}, err\n\t}\n\tresp, err := conn.railsProxy.GroupCreate(ctx, opts)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treturn resp, nil\n}\n\nfunc (conn *Conn) GroupGet(ctx context.Context, opts arvados.GetOptions) (arvados.Group, error) {\n\tconn.logActivity(ctx)\n\treturn conn.railsProxy.GroupGet(ctx, opts)\n}\n\n// GroupUpdate defers to railsProxy for everything except vocabulary\n// checking.\nfunc (conn *Conn) GroupUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Group, error) {\n\tconn.logActivity(ctx)\n\terr := conn.checkProperties(ctx, opts.Attrs[\"properties\"])\n\tif err != nil {\n\t\treturn arvados.Group{}, err\n\t}\n\tresp, err := conn.railsProxy.GroupUpdate(ctx, opts)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treturn resp, nil\n}\n\nfunc (conn *Conn) GroupList(ctx context.Context, opts arvados.ListOptions) (arvados.GroupList, error) {\n\tconn.logActivity(ctx)\n\treturn conn.railsProxy.GroupList(ctx, opts)\n}\n\nfunc (conn *Conn) GroupDelete(ctx context.Context, opts arvados.DeleteOptions) (arvados.Group, error) {\n\tconn.logActivity(ctx)\n\treturn conn.railsProxy.GroupDelete(ctx, opts)\n}\n\nfunc (conn *Conn) GroupContents(ctx context.Context, options arvados.GroupContentsOptions) (arvados.ObjectList, error) {\n\tconn.logActivity(ctx)\n\n\t// The requested UUID can be a user (virtual home project), which we just pass on to\n\t// the API server.\n\tif strings.Index(options.UUID, \"-j7d0g-\") != 5 {\n\t\treturn conn.railsProxy.GroupContents(ctx, options)\n\t}\n\n\tvar resp arvados.ObjectList\n\n\t// Get the group object\n\trespGroup, err := conn.GroupGet(ctx, arvados.GetOptions{UUID: options.UUID})\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\t// If the group has groupClass 'filter', apply the filters before getting the contents.\n\tif respGroup.GroupClass == \"filter\" {\n\t\tif filters, ok := respGroup.Properties[\"filters\"].([]interface{}); ok {\n\t\t\tfor _, f := range filters {\n\t\t\t\t// f is supposed to be a []string\n\t\t\t\ttmp, ok2 := f.([]interface{})\n\t\t\t\tif !ok2 || len(tmp) < 3 {\n\t\t\t\t\treturn resp, fmt.Errorf(\"filter unparsable: %T, %+v, original field: %T, %+v\\n\", tmp, tmp, f, f)\n\t\t\t\t}\n\t\t\t\tvar filter arvados.Filter\n\t\t\t\tif attr, ok2 := tmp[0].(string); ok2 {\n\t\t\t\t\tfilter.Attr = attr\n\t\t\t\t} else {\n\t\t\t\t\treturn resp, fmt.Errorf(\"filter unparsable: attribute must be string: %T, %+v, filter: %T, %+v\\n\", tmp[0], tmp[0], f, f)\n\t\t\t\t}\n\t\t\t\tif operator, ok2 := tmp[1].(string); ok2 {\n\t\t\t\t\tfilter.Operator = operator\n\t\t\t\t} else {\n\t\t\t\t\treturn resp, fmt.Errorf(\"filter unparsable: operator must be string: %T, %+v, filter: %T, %+v\\n\", tmp[1], tmp[1], f, f)\n\t\t\t\t}\n\t\t\t\tfilter.Operand = tmp[2]\n\t\t\t\toptions.Filters = append(options.Filters, filter)\n\t\t\t}\n\t\t} else {\n\t\t\treturn resp, fmt.Errorf(\"filter unparsable: not an array\\n\")\n\t\t}\n\t\t// Use the generic /groups/contents endpoint for filter groups\n\t\toptions.UUID = \"\"\n\t}\n\n\treturn conn.railsProxy.GroupContents(ctx, options)\n}\n"
  },
  {
    "path": "lib/controller/localdb/group_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&GroupSuite{})\n\ntype GroupSuite struct {\n\tlocaldbSuite\n}\n\nfunc (s *GroupSuite) TestGroupCreateWithProperties(c *check.C) {\n\ts.setUpVocabulary(c, \"\")\n\n\ttests := []struct {\n\t\tname    string\n\t\tprops   map[string]interface{}\n\t\tsuccess bool\n\t}{\n\t\t{\"Invalid prop key\", map[string]interface{}{\"Priority\": \"IDVALIMPORTANCES1\"}, false},\n\t\t{\"Invalid prop value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"high\"}, false},\n\t\t{\"Valid prop key & value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"IDVALIMPORTANCES1\"}, true},\n\t\t{\"Empty properties\", map[string]interface{}{}, true},\n\t}\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName()+\" \", tt.name)\n\n\t\tgrp, err := s.localdb.GroupCreate(s.userctx, arvados.CreateOptions{\n\t\t\tSelect: []string{\"uuid\", \"properties\"},\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"group_class\": \"project\",\n\t\t\t\t\"properties\":  tt.props,\n\t\t\t}})\n\t\tif tt.success {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(grp.Properties, check.DeepEquals, tt.props)\n\t\t} else {\n\t\t\tc.Assert(err, check.NotNil)\n\t\t}\n\t}\n}\n\nfunc (s *GroupSuite) TestGroupUpdateWithProperties(c *check.C) {\n\ts.setUpVocabulary(c, \"\")\n\n\ttests := []struct {\n\t\tname    string\n\t\tprops   map[string]interface{}\n\t\tsuccess bool\n\t}{\n\t\t{\"Invalid prop key\", map[string]interface{}{\"Priority\": \"IDVALIMPORTANCES1\"}, false},\n\t\t{\"Invalid prop value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"high\"}, false},\n\t\t{\"Valid prop key & value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"IDVALIMPORTANCES1\"}, true},\n\t\t{\"Empty properties\", map[string]interface{}{}, true},\n\t}\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName()+\" \", tt.name)\n\t\tgrp, err := s.localdb.GroupCreate(s.userctx, arvados.CreateOptions{\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"group_class\": \"project\",\n\t\t\t},\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t\tgrp, err = s.localdb.GroupUpdate(s.userctx, arvados.UpdateOptions{\n\t\t\tUUID:   grp.UUID,\n\t\t\tSelect: []string{\"uuid\", \"properties\"},\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"properties\": tt.props,\n\t\t\t}})\n\t\tif tt.success {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(grp.Properties, check.DeepEquals, tt.props)\n\t\t} else {\n\t\t\tc.Assert(err, check.NotNil)\n\t\t}\n\t}\n}\n\nfunc (s *GroupSuite) TestCanWriteCanManageResponses(c *check.C) {\n\tctxUser1 := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.ActiveTokenV2)\n\tctxUser2 := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.SpectatorToken)\n\tctxAdmin := ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.AdminToken)\n\tproject, err := s.localdb.GroupCreate(ctxUser1, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"group_class\": \"project\",\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\tc.Check(project.CanWrite, check.Equals, true)\n\tc.Check(project.CanManage, check.Equals, true)\n\n\tsubproject, err := s.localdb.GroupCreate(ctxUser1, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"owner_uuid\":  project.UUID,\n\t\t\t\"group_class\": \"project\",\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\tc.Check(subproject.CanWrite, check.Equals, true)\n\tc.Check(subproject.CanManage, check.Equals, true)\n\n\tprojlist, err := s.localdb.GroupList(ctxUser1, arvados.ListOptions{\n\t\tLimit:   -1,\n\t\tFilters: []arvados.Filter{{\"uuid\", \"in\", []string{project.UUID, subproject.UUID}}},\n\t})\n\tc.Assert(err, check.IsNil)\n\tc.Assert(projlist.Items, check.HasLen, 2)\n\tfor _, p := range projlist.Items {\n\t\tc.Check(p.CanWrite, check.Equals, true)\n\t\tc.Check(p.CanManage, check.Equals, true)\n\t}\n\n\t// Give 2nd user permission to read\n\tpermlink, err := s.localdb.LinkCreate(ctxAdmin, arvados.CreateOptions{\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"link_class\": \"permission\",\n\t\t\t\"name\":       \"can_read\",\n\t\t\t\"tail_uuid\":  arvadostest.SpectatorUserUUID,\n\t\t\t\"head_uuid\":  project.UUID,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\n\t// As 2nd user: can read, cannot manage, cannot write\n\tproject2, err := s.localdb.GroupGet(ctxUser2, arvados.GetOptions{UUID: project.UUID})\n\tc.Assert(err, check.IsNil)\n\tc.Check(project2.CanWrite, check.Equals, false)\n\tc.Check(project2.CanManage, check.Equals, false)\n\n\t_, err = s.localdb.LinkUpdate(ctxAdmin, arvados.UpdateOptions{\n\t\tUUID: permlink.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"name\": \"can_write\",\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\n\t// As 2nd user: cannot manage, can write\n\tproject2, err = s.localdb.GroupGet(ctxUser2, arvados.GetOptions{UUID: project.UUID})\n\tc.Assert(err, check.IsNil)\n\tc.Check(project2.CanWrite, check.Equals, true)\n\tc.Check(project2.CanManage, check.Equals, false)\n\n\t// As owner: after freezing, can manage (owner), cannot write (frozen)\n\tproject, err = s.localdb.GroupUpdate(ctxUser1, arvados.UpdateOptions{\n\t\tUUID: project.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"frozen_by_uuid\": arvadostest.ActiveUserUUID,\n\t\t}})\n\tc.Assert(err, check.IsNil)\n\tc.Check(project.CanWrite, check.Equals, false)\n\tc.Check(project.CanManage, check.Equals, true)\n\n\t// As admin: can manage (admin), cannot write (frozen)\n\tproject, err = s.localdb.GroupGet(ctxAdmin, arvados.GetOptions{UUID: project.UUID})\n\tc.Assert(err, check.IsNil)\n\tc.Check(project.CanWrite, check.Equals, false)\n\tc.Check(project.CanManage, check.Equals, true)\n\n\t// As 2nd user: cannot manage (perm), cannot write (frozen)\n\tproject2, err = s.localdb.GroupGet(ctxUser2, arvados.GetOptions{UUID: project.UUID})\n\tc.Assert(err, check.IsNil)\n\tc.Check(project2.CanWrite, check.Equals, false)\n\tc.Check(project2.CanManage, check.Equals, false)\n\n\t// After upgrading perm to \"manage\", as 2nd user: can manage (perm), cannot write (frozen)\n\t_, err = s.localdb.LinkUpdate(ctxAdmin, arvados.UpdateOptions{\n\t\tUUID: permlink.UUID,\n\t\tAttrs: map[string]interface{}{\n\t\t\t\"name\": \"can_manage\",\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\tproject2, err = s.localdb.GroupGet(ctxUser2, arvados.GetOptions{UUID: project.UUID})\n\tc.Assert(err, check.IsNil)\n\tc.Check(project2.CanWrite, check.Equals, false)\n\tc.Check(project2.CanManage, check.Equals, true)\n\n\t// 2nd user can also manage (but not write) the subject inside the frozen project\n\tsubproject2, err := s.localdb.GroupGet(ctxUser2, arvados.GetOptions{UUID: subproject.UUID})\n\tc.Assert(err, check.IsNil)\n\tc.Check(subproject2.CanWrite, check.Equals, false)\n\tc.Check(subproject2.CanManage, check.Equals, true)\n\n\tu, err := s.localdb.UserGet(ctxUser1, arvados.GetOptions{\n\t\tUUID: arvadostest.ActiveUserUUID,\n\t})\n\tc.Assert(err, check.IsNil)\n\tc.Check(u.CanWrite, check.Equals, true)\n\tc.Check(u.CanManage, check.Equals, true)\n\n\tfor _, selectParam := range [][]string{\n\t\tnil,\n\t\t{\"can_write\", \"can_manage\"},\n\t} {\n\t\tc.Logf(\"selectParam: %+v\", selectParam)\n\t\tulist, err := s.localdb.UserList(ctxUser1, arvados.ListOptions{\n\t\t\tLimit:   -1,\n\t\t\tFilters: []arvados.Filter{{\"uuid\", \"=\", arvadostest.ActiveUserUUID}},\n\t\t\tSelect:  selectParam,\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Assert(ulist.Items, check.HasLen, 1)\n\t\tc.Logf(\"%+v\", ulist.Items)\n\t\tfor _, u := range ulist.Items {\n\t\t\tc.Check(u.CanWrite, check.Equals, true)\n\t\t\tc.Check(u.CanManage, check.Equals, true)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/controller/localdb/link.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// LinkCreate defers to railsProxy for everything except vocabulary\n// checking.\nfunc (conn *Conn) LinkCreate(ctx context.Context, opts arvados.CreateOptions) (arvados.Link, error) {\n\terr := conn.checkProperties(ctx, opts.Attrs[\"properties\"])\n\tif err != nil {\n\t\treturn arvados.Link{}, err\n\t}\n\tresp, err := conn.railsProxy.LinkCreate(ctx, opts)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treturn resp, nil\n}\n\n// LinkUpdate defers to railsProxy for everything except vocabulary\n// checking.\nfunc (conn *Conn) LinkUpdate(ctx context.Context, opts arvados.UpdateOptions) (arvados.Link, error) {\n\terr := conn.checkProperties(ctx, opts.Attrs[\"properties\"])\n\tif err != nil {\n\t\treturn arvados.Link{}, err\n\t}\n\tresp, err := conn.railsProxy.LinkUpdate(ctx, opts)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\treturn resp, nil\n}\n"
  },
  {
    "path": "lib/controller/localdb/link_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&LinkSuite{})\n\ntype LinkSuite struct {\n\tlocaldbSuite\n}\n\nfunc (s *LinkSuite) TestLinkCreateWithProperties(c *check.C) {\n\ts.setUpVocabulary(c, \"\")\n\n\ttests := []struct {\n\t\tname    string\n\t\tprops   map[string]interface{}\n\t\tsuccess bool\n\t}{\n\t\t{\"Invalid prop key\", map[string]interface{}{\"Priority\": \"IDVALIMPORTANCES1\"}, false},\n\t\t{\"Invalid prop value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"high\"}, false},\n\t\t{\"Valid prop key & value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"IDVALIMPORTANCES1\"}, true},\n\t\t{\"Empty properties\", map[string]interface{}{}, true},\n\t}\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName()+\" \", tt.name)\n\n\t\tlnk, err := s.localdb.LinkCreate(s.userctx, arvados.CreateOptions{\n\t\t\tSelect: []string{\"uuid\", \"properties\"},\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"link_class\": \"star\",\n\t\t\t\t\"tail_uuid\":  \"zzzzz-j7d0g-publicfavorites\",\n\t\t\t\t\"head_uuid\":  arvadostest.FooCollection,\n\t\t\t\t\"properties\": tt.props,\n\t\t\t}})\n\t\tif tt.success {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(lnk.Properties, check.DeepEquals, tt.props)\n\t\t} else {\n\t\t\tc.Assert(err, check.NotNil)\n\t\t}\n\t}\n}\n\nfunc (s *LinkSuite) TestLinkUpdateWithProperties(c *check.C) {\n\ts.setUpVocabulary(c, \"\")\n\n\ttests := []struct {\n\t\tname    string\n\t\tprops   map[string]interface{}\n\t\tsuccess bool\n\t}{\n\t\t{\"Invalid prop key\", map[string]interface{}{\"Priority\": \"IDVALIMPORTANCES1\"}, false},\n\t\t{\"Invalid prop value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"high\"}, false},\n\t\t{\"Valid prop key & value\", map[string]interface{}{\"IDTAGIMPORTANCES\": \"IDVALIMPORTANCES1\"}, true},\n\t\t{\"Empty properties\", map[string]interface{}{}, true},\n\t}\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName()+\" \", tt.name)\n\t\tlnk, err := s.localdb.LinkCreate(s.userctx, arvados.CreateOptions{\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"link_class\": \"star\",\n\t\t\t\t\"tail_uuid\":  \"zzzzz-j7d0g-publicfavorites\",\n\t\t\t\t\"head_uuid\":  arvadostest.FooCollection,\n\t\t\t},\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t\tlnk, err = s.localdb.LinkUpdate(s.userctx, arvados.UpdateOptions{\n\t\t\tUUID:   lnk.UUID,\n\t\t\tSelect: []string{\"uuid\", \"properties\"},\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"properties\": tt.props,\n\t\t\t}})\n\t\tif tt.success {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(lnk.Properties, check.DeepEquals, tt.props)\n\t\t} else {\n\t\t\tc.Assert(err, check.NotNil)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/controller/localdb/localdb_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"errors\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/jmoiron/sqlx\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\ntype localdbSuite struct {\n\tctx         context.Context\n\tcancel      context.CancelFunc\n\tcluster     *arvados.Cluster\n\tdb          *sqlx.DB\n\tdbConnector *ctrlctx.DBConnector\n\ttx          *sqlx.Tx\n\ttxFinish    func(*error)\n\tuserctx     context.Context // uses ActiveUser token\n\tlocaldb     *Conn\n\trailsSpy    *arvadostest.Proxy\n}\n\nfunc (s *localdbSuite) SetUpSuite(c *check.C) {\n\tarvadostest.StartKeep(2, true)\n}\n\nfunc (s *localdbSuite) TearDownSuite(c *check.C) {\n\t// Undo any changes/additions to the user database so they\n\t// don't affect subsequent tests.\n\tarvadostest.ResetEnv()\n\tc.Check(arvados.NewClientFromEnv().RequestAndDecode(nil, \"POST\", \"database/reset\", nil, nil), check.IsNil)\n}\n\nfunc (s *localdbSuite) SetUpTest(c *check.C) {\n\t*s = localdbSuite{}\n\tlogger := ctxlog.TestLogger(c)\n\ts.ctx, s.cancel = context.WithCancel(context.Background())\n\ts.ctx = ctxlog.Context(s.ctx, logger)\n\tcfg, err := config.NewLoader(nil, logger).Load()\n\tc.Assert(err, check.IsNil)\n\ts.cluster, err = cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\ts.dbConnector = &ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}\n\ts.db, err = s.dbConnector.GetDB(s.ctx)\n\tc.Assert(err, check.IsNil)\n\ts.ctx, s.txFinish = ctrlctx.New(s.ctx, s.dbConnector.GetDB)\n\ts.tx, err = ctrlctx.CurrentTx(s.ctx)\n\tc.Assert(err, check.IsNil)\n\ts.localdb = NewConn(s.ctx, s.cluster, s.dbConnector.GetDB)\n\ts.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)\n\t*s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)\n\ts.userctx = ctrlctx.NewWithToken(s.ctx, s.cluster, arvadostest.ActiveTokenV2)\n}\n\nvar errRollbackAfterTest = errors.New(\"rollback after test\")\n\nfunc (s *localdbSuite) TearDownTest(c *check.C) {\n\tif s.tx != nil {\n\t\ts.tx.Rollback()\n\t}\n\tif s.txFinish != nil {\n\t\ts.txFinish(&errRollbackAfterTest)\n\t}\n\tif s.railsSpy != nil {\n\t\ts.railsSpy.Close()\n\t}\n\tif s.dbConnector != nil {\n\t\ts.dbConnector.Close()\n\t}\n\ts.cancel()\n}\n\nfunc (s *localdbSuite) setUpVocabulary(c *check.C, testVocabulary string) {\n\tif testVocabulary == \"\" {\n\t\ttestVocabulary = `{\n\t\t\t\"strict_tags\": false,\n\t\t\t\"tags\": {\n\t\t\t\t\"IDTAGIMPORTANCES\": {\n\t\t\t\t\t\"strict\": true,\n\t\t\t\t\t\"labels\": [{\"label\": \"Importance\"}, {\"label\": \"Priority\"}],\n\t\t\t\t\t\"values\": {\n\t\t\t\t\t\t\"IDVALIMPORTANCES1\": { \"labels\": [{\"label\": \"Critical\"}, {\"label\": \"Urgent\"}, {\"label\": \"High\"}] },\n\t\t\t\t\t\t\"IDVALIMPORTANCES2\": { \"labels\": [{\"label\": \"Normal\"}, {\"label\": \"Moderate\"}] },\n\t\t\t\t\t\t\"IDVALIMPORTANCES3\": { \"labels\": [{\"label\": \"Low\"}] }\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}`\n\t}\n\tvoc, err := arvados.NewVocabulary([]byte(testVocabulary), []string{})\n\tc.Assert(err, check.IsNil)\n\ts.localdb.vocabularyCache = voc\n\ts.cluster.API.VocabularyPath = \"foo\"\n}\n"
  },
  {
    "path": "lib/controller/localdb/log_activity.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n)\n\nvar loggedLogActivityDisabled = false\n\nfunc (conn *Conn) logActivity(ctx context.Context) {\n\tp := conn.cluster.Users.ActivityLoggingPeriod.Duration()\n\tif p < 1 {\n\t\tif !loggedLogActivityDisabled {\n\t\t\tctxlog.FromContext(ctx).Debug(\"logActivity disabled by config\")\n\t\t\tloggedLogActivityDisabled = true\n\t\t}\n\t\treturn\n\t}\n\tuser, _, err := ctrlctx.CurrentAuth(ctx)\n\tif err == ctrlctx.ErrUnauthenticated {\n\t\tctxlog.FromContext(ctx).Debug(\"logActivity skipped for unauthenticated request\")\n\t\treturn\n\t} else if err != nil {\n\t\tctxlog.FromContext(ctx).WithError(err).Error(\"logActivity CurrentAuth failed\")\n\t\treturn\n\t}\n\tnow := time.Now()\n\tconn.activeUsersLock.Lock()\n\tif conn.activeUsers == nil || conn.activeUsersReset.IsZero() || conn.activeUsersReset.Before(now) {\n\t\tconn.activeUsersReset = alignedPeriod(now, p)\n\t\tconn.activeUsers = map[string]bool{}\n\t}\n\tlogged := conn.activeUsers[user.UUID]\n\tif !logged {\n\t\t// Prevent other concurrent calls from logging about\n\t\t// this user until we finish.\n\t\tconn.activeUsers[user.UUID] = true\n\t}\n\tconn.activeUsersLock.Unlock()\n\tif logged {\n\t\treturn\n\t}\n\tdefer func() {\n\t\t// If we return without logging, reset the flag so we\n\t\t// try again on the user's next API call.\n\t\tif !logged {\n\t\t\tconn.activeUsersLock.Lock()\n\t\t\tconn.activeUsers[user.UUID] = false\n\t\t\tconn.activeUsersLock.Unlock()\n\t\t}\n\t}()\n\n\ttx, err := ctrlctx.NewTx(ctx)\n\tif err != nil {\n\t\tctxlog.FromContext(ctx).WithError(err).Error(\"logActivity NewTx failed\")\n\t\treturn\n\t}\n\tdefer tx.Rollback()\n\t_, err = tx.ExecContext(ctx, `\ninsert into logs\n (uuid,\n  owner_uuid, modified_by_user_uuid, object_owner_uuid,\n  event_type,\n  summary,\n  object_uuid,\n  properties,\n  event_at, created_at, updated_at, modified_at)\n values\n ($1, $2, $2, $2, $3, $4, $5, $6,\n  current_timestamp at time zone 'UTC',\n  current_timestamp at time zone 'UTC',\n  current_timestamp at time zone 'UTC',\n  current_timestamp at time zone 'UTC')\n returning id`,\n\t\tarvados.RandomUUID(conn.cluster.ClusterID, \"57u5n\"),\n\t\tconn.cluster.ClusterID+\"-tpzed-000000000000000\", // both modified_by and object_owner\n\t\t\"activity\",\n\t\t\"activity of \"+user.UUID,\n\t\tuser.UUID,\n\t\t\"{}\")\n\tif err != nil {\n\t\tctxlog.FromContext(ctx).WithError(err).Error(\"logActivity query failed\")\n\t\treturn\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\tctxlog.FromContext(ctx).WithError(err).Error(\"logActivity commit failed\")\n\t\treturn\n\t}\n\tlogged = true\n}\n\n// alignedPeriod computes a time interval that includes now and aligns\n// to local clock times that are multiples of p. For example, if local\n// time is UTC-5 and ActivityLoggingPeriod=4h, periodStart and\n// periodEnd will be 0000-0400, 0400-0800, etc., in local time. If p\n// is a multiple of 24h, periods will start and end at midnight.\n//\n// If DST starts or ends during this period, the boundaries will be\n// aligned based on either DST or non-DST time depending on whether\n// now is before or after the DST transition. The consequences are\n// presumed to be inconsequential, e.g., logActivity may unnecessarily\n// log activity more than once in a period that includes a DST\n// transition.\n//\n// In all cases, the period ends in the future.\n//\n// Only the end of the period is returned.\nfunc alignedPeriod(now time.Time, p time.Duration) time.Time {\n\t_, tzsec := now.Zone()\n\ttzoff := time.Duration(tzsec) * time.Second\n\tperiodStart := now.Add(tzoff).Truncate(p).Add(-tzoff)\n\treturn periodStart.Add(p)\n}\n"
  },
  {
    "path": "lib/controller/localdb/log_activity_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"database/sql\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&activityPeriodSuite{})\n\ntype activityPeriodSuite struct{}\n\n// The important thing is that, even when daylight savings time is\n// making things difficult, the current period ends in the future.\nfunc (*activityPeriodSuite) TestPeriod(c *check.C) {\n\ttoronto, err := time.LoadLocation(\"America/Toronto\")\n\tc.Assert(err, check.IsNil)\n\n\tformat := \"2006-01-02 15:04:05 MST\"\n\tdststartday, err := time.ParseInLocation(format, \"2022-03-13 00:00:00 EST\", toronto)\n\tc.Assert(err, check.IsNil)\n\tdstendday, err := time.ParseInLocation(format, \"2022-11-06 00:00:00 EDT\", toronto)\n\tc.Assert(err, check.IsNil)\n\n\tfor _, period := range []time.Duration{\n\t\ttime.Minute * 13,\n\t\ttime.Minute * 49,\n\t\ttime.Hour,\n\t\t4 * time.Hour,\n\t\t48 * time.Hour,\n\t} {\n\t\tfor offset := time.Duration(0); offset < 48*time.Hour; offset += 3 * time.Minute {\n\t\t\tt := dststartday.Add(offset)\n\t\t\tend := alignedPeriod(t, period)\n\t\t\tc.Check(end.After(t), check.Equals, true, check.Commentf(\"period %v offset %v\", period, offset))\n\n\t\t\tt = dstendday.Add(offset)\n\t\t\tend = alignedPeriod(t, period)\n\t\t\tc.Check(end.After(t), check.Equals, true, check.Commentf(\"period %v offset %v\", period, offset))\n\t\t}\n\t}\n}\n\nfunc (s *CollectionSuite) TestLogActivity(c *check.C) {\n\tstarttime := time.Now()\n\ts.localdb.activeUsersLock.Lock()\n\ts.localdb.activeUsersReset = starttime\n\ts.localdb.activeUsersLock.Unlock()\n\tfor i := 0; i < 2; i++ {\n\t\tlogthreshold := time.Now()\n\t\t_, err := s.localdb.CollectionCreate(s.userctx, arvados.CreateOptions{\n\t\t\tAttrs: map[string]interface{}{\n\t\t\t\t\"name\": \"test collection\",\n\t\t\t},\n\t\t\tEnsureUniqueName: true,\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t\tvar uuid string\n\t\terr = s.db.QueryRowContext(s.ctx, `select uuid from logs where object_uuid = $1 and event_at > $2`, arvadostest.ActiveUserUUID, logthreshold.UTC()).Scan(&uuid)\n\t\tif i == 0 {\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tc.Check(uuid, check.HasLen, 27)\n\t\t} else {\n\t\t\tc.Check(err, check.Equals, sql.ErrNoRows)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/controller/localdb/login.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n)\n\ntype loginController interface {\n\tLogin(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error)\n\tLogout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error)\n\tUserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error)\n}\n\nfunc chooseLoginController(cluster *arvados.Cluster, parent *Conn) loginController {\n\twantGoogle := cluster.Login.Google.Enable\n\twantOpenIDConnect := cluster.Login.OpenIDConnect.Enable\n\twantPAM := cluster.Login.PAM.Enable\n\twantLDAP := cluster.Login.LDAP.Enable\n\twantTest := cluster.Login.Test.Enable\n\twantLoginCluster := cluster.Login.LoginCluster != \"\" && cluster.Login.LoginCluster != cluster.ClusterID\n\tswitch {\n\tcase 1 != countTrue(wantGoogle, wantOpenIDConnect, wantPAM, wantLDAP, wantTest, wantLoginCluster):\n\t\treturn errorLoginController{\n\t\t\terror: errors.New(\"configuration problem: exactly one of Login.Google, Login.OpenIDConnect, Login.PAM, Login.LDAP, Login.Test, or Login.LoginCluster must be set\"),\n\t\t}\n\tcase wantGoogle:\n\t\treturn &oidcLoginController{\n\t\t\tCluster:            cluster,\n\t\t\tParent:             parent,\n\t\t\tIssuer:             \"https://accounts.google.com\",\n\t\t\tClientID:           cluster.Login.Google.ClientID,\n\t\t\tClientSecret:       cluster.Login.Google.ClientSecret,\n\t\t\tAuthParams:         cluster.Login.Google.AuthenticationRequestParameters,\n\t\t\tUseGooglePeopleAPI: cluster.Login.Google.AlternateEmailAddresses,\n\t\t\tEmailClaim:         \"email\",\n\t\t\tEmailVerifiedClaim: \"email_verified\",\n\t\t}\n\tcase wantOpenIDConnect:\n\t\treturn &oidcLoginController{\n\t\t\tCluster:                cluster,\n\t\t\tParent:                 parent,\n\t\t\tIssuer:                 cluster.Login.OpenIDConnect.Issuer,\n\t\t\tClientID:               cluster.Login.OpenIDConnect.ClientID,\n\t\t\tClientSecret:           cluster.Login.OpenIDConnect.ClientSecret,\n\t\t\tAuthParams:             cluster.Login.OpenIDConnect.AuthenticationRequestParameters,\n\t\t\tEmailClaim:             cluster.Login.OpenIDConnect.EmailClaim,\n\t\t\tEmailVerifiedClaim:     cluster.Login.OpenIDConnect.EmailVerifiedClaim,\n\t\t\tUsernameClaim:          cluster.Login.OpenIDConnect.UsernameClaim,\n\t\t\tAcceptAccessToken:      cluster.Login.OpenIDConnect.AcceptAccessToken,\n\t\t\tAcceptAccessTokenScope: cluster.Login.OpenIDConnect.AcceptAccessTokenScope,\n\t\t}\n\tcase wantPAM:\n\t\treturn &pamLoginController{Cluster: cluster, Parent: parent}\n\tcase wantLDAP:\n\t\treturn &ldapLoginController{Cluster: cluster, Parent: parent}\n\tcase wantTest:\n\t\treturn &testLoginController{Cluster: cluster, Parent: parent}\n\tcase wantLoginCluster:\n\t\treturn &federatedLoginController{Cluster: cluster}\n\tdefault:\n\t\treturn errorLoginController{\n\t\t\terror: errors.New(\"BUG: missing case in login controller setup switch\"),\n\t\t}\n\t}\n}\n\nfunc countTrue(vals ...bool) int {\n\tn := 0\n\tfor _, val := range vals {\n\t\tif val {\n\t\t\tn++\n\t\t}\n\t}\n\treturn n\n}\n\ntype errorLoginController struct{ error }\n\nfunc (ctrl errorLoginController) Login(context.Context, arvados.LoginOptions) (arvados.LoginResponse, error) {\n\treturn arvados.LoginResponse{}, ctrl.error\n}\nfunc (ctrl errorLoginController) Logout(context.Context, arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\treturn arvados.LogoutResponse{}, ctrl.error\n}\nfunc (ctrl errorLoginController) UserAuthenticate(context.Context, arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {\n\treturn arvados.APIClientAuthorization{}, ctrl.error\n}\n\ntype federatedLoginController struct {\n\tCluster *arvados.Cluster\n}\n\nfunc (ctrl federatedLoginController) Login(context.Context, arvados.LoginOptions) (arvados.LoginResponse, error) {\n\treturn arvados.LoginResponse{}, httpserver.ErrorWithStatus(errors.New(\"Should have been redirected to login cluster\"), http.StatusBadRequest)\n}\nfunc (ctrl federatedLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\treturn logout(ctx, ctrl.Cluster, opts)\n}\nfunc (ctrl federatedLoginController) UserAuthenticate(context.Context, arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {\n\treturn arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New(\"username/password authentication is not available\"), http.StatusBadRequest)\n}\n\nfunc (conn *Conn) CreateAPIClientAuthorization(ctx context.Context, rootToken string, authinfo rpc.UserSessionAuthInfo) (resp arvados.APIClientAuthorization, err error) {\n\tif rootToken == \"\" {\n\t\treturn arvados.APIClientAuthorization{}, errors.New(\"configuration error: empty SystemRootToken\")\n\t}\n\tctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{rootToken}})\n\tnewsession, err := conn.railsProxy.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{\n\t\t// Send a fake ReturnTo value instead of the caller's\n\t\t// opts.ReturnTo. We won't follow the resulting\n\t\t// redirect target anyway.\n\t\tReturnTo: \",https://controller.api.client.invalid\",\n\t\tAuthInfo: authinfo,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\ttarget, err := url.Parse(newsession.RedirectLocation)\n\tif err != nil {\n\t\treturn\n\t}\n\ttoken := target.Query().Get(\"api_token\")\n\ttx, err := ctrlctx.CurrentTx(ctx)\n\tif err != nil {\n\t\treturn\n\t}\n\ttokensecret := token\n\tif strings.Contains(token, \"/\") {\n\t\ttokenparts := strings.Split(token, \"/\")\n\t\tif len(tokenparts) >= 3 {\n\t\t\ttokensecret = tokenparts[2]\n\t\t}\n\t}\n\tvar exp sql.NullTime\n\tvar scopes []byte\n\terr = tx.QueryRowxContext(ctx, \"select uuid, api_token, expires_at, scopes from api_client_authorizations where api_token=$1\", tokensecret).Scan(&resp.UUID, &resp.APIToken, &exp, &scopes)\n\tif err != nil {\n\t\treturn\n\t}\n\tresp.ExpiresAt = exp.Time\n\tif len(scopes) > 0 {\n\t\terr = json.Unmarshal(scopes, &resp.Scopes)\n\t\tif err != nil {\n\t\t\treturn resp, fmt.Errorf(\"unmarshal scopes: %s\", err)\n\t\t}\n\t}\n\treturn\n}\n\nvar errUserinfoInRedirectTarget = errors.New(\"redirect target rejected because it contains userinfo\")\n\nfunc validateLoginRedirectTarget(cluster *arvados.Cluster, returnTo string) error {\n\tu, err := url.Parse(returnTo)\n\tif err != nil {\n\t\treturn err\n\t}\n\tu, err = u.Parse(\"/\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif u.User != nil {\n\t\treturn errUserinfoInRedirectTarget\n\t}\n\ttarget := origin(*u)\n\tfor trusted := range cluster.Login.TrustedClients {\n\t\ttrustedOrigin := origin(url.URL(trusted))\n\t\tif trustedOrigin == target {\n\t\t\treturn nil\n\t\t}\n\t\t// If TrustedClients has https://*.bar.example, we\n\t\t// trust https://foo.bar.example. Note origin() has\n\t\t// already stripped the incoming Path, so we won't\n\t\t// accidentally trust\n\t\t// https://attacker.example/pwn.bar.example here. See\n\t\t// tests.\n\t\tif strings.HasPrefix(trustedOrigin, u.Scheme+\"://*.\") && strings.HasSuffix(target, trustedOrigin[len(u.Scheme)+4:]) {\n\t\t\treturn nil\n\t\t}\n\t}\n\tif target == origin(url.URL(cluster.Services.Workbench1.ExternalURL)) ||\n\t\ttarget == origin(url.URL(cluster.Services.Workbench2.ExternalURL)) {\n\t\treturn nil\n\t}\n\tif cluster.Login.TrustPrivateNetworks {\n\t\tif u.Hostname() == \"localhost\" {\n\t\t\treturn nil\n\t\t}\n\t\tif ip := net.ParseIP(u.Hostname()); len(ip) > 0 {\n\t\t\tfor _, n := range privateNetworks {\n\t\t\t\tif n.Contains(ip) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn fmt.Errorf(\"requesting site is not listed in TrustedClients config\")\n}\n\n// origin returns the canonical origin of a URL, e.g.,\n// origin(\"https://example:443/foo\") returns \"https://example/\"\nfunc origin(u url.URL) string {\n\torigin := url.URL{\n\t\tScheme: u.Scheme,\n\t\tHost:   u.Host,\n\t\tPath:   \"/\",\n\t}\n\tif origin.Port() == \"80\" && origin.Scheme == \"http\" {\n\t\torigin.Host = origin.Hostname()\n\t} else if origin.Port() == \"443\" && origin.Scheme == \"https\" {\n\t\torigin.Host = origin.Hostname()\n\t}\n\treturn origin.String()\n}\n"
  },
  {
    "path": "lib/controller/localdb/login_docker_test/add_example_user.ldif",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ndn: cn=bar,dc=example,dc=org\nobjectClass: posixGroup\nobjectClass: top\ncn: bar\ngidNumber: 11111\ndescription: \"Example group 'bar'\"\n\ndn: uid=foo-bar,dc=example,dc=org\nuid: foo-bar\ncn: \"Foo Bar\"\ngivenName: Foo\nsn: Bar\nmail: foo-bar-baz@example.com\nobjectClass: inetOrgPerson\nobjectClass: posixAccount\nobjectClass: top\nobjectClass: shadowAccount\nshadowMax: -1\nshadowMin: 1\nshadowWarning: 7\nshadowLastChange: 10701\nloginShell: /bin/bash\nuidNumber: 11111\ngidNumber: 11111\nhomeDirectory: /home/foo-bar\nuserPassword: ${passwordhash}\n\ndn: uid=expired,dc=example,dc=org\nuid: expired\ncn: \"Exp Ired\"\ngivenName: Exp\nsn: Ired\nmail: expired@example.com\nobjectClass: inetOrgPerson\nobjectClass: posixAccount\nobjectClass: top\nobjectClass: shadowAccount\nshadowMax: 180\nshadowMin: 1\nshadowWarning: 7\nshadowLastChange: 10701\nloginShell: /bin/bash\nuidNumber: 11112\ngidNumber: 11111\nhomeDirectory: /home/expired\nuserPassword: ${passwordhash}\n"
  },
  {
    "path": "lib/controller/localdb/login_docker_test/run_controller.sh",
    "content": "#!/bin/bash\n#\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# This script is the entrypoint for test containers. If the test mounts a\n# /setup.sh script in the container, it runs that first, then starts the\n# controller.\n\nset -e\nset -u\nset -o pipefail\n\nif [[ -e /setup.sh ]]; then\n    . /setup.sh\nfi\nexec arvados-server controller\n"
  },
  {
    "path": "lib/controller/localdb/login_docker_test/setup_pam_test.sh",
    "content": "#!/bin/bash\n#\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# This script should be mounted in the PAM test controller at /setup.sh.\n# It creates the user account fixtures necessary for the test in passwd.\n\nset -e\nset -u\nset -o pipefail\n\nuseradd --no-create-home foo-bar\nuseradd --no-create-home expired\nchpasswd <<EOF\nfoo-bar:secret\nexpired:secret\nEOF\nusermod --expiredate 1970-01-07 expired\n"
  },
  {
    "path": "lib/controller/localdb/login_docker_test/setup_suite.sh",
    "content": "#!/bin/bash\n#\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# This script determines and records what image should be used for test\n# controller containers, then starts an LDAP server and adds user account\n# fixtures to it.\n\nset -e\nset -u\nset -o pipefail\n\ncd \"$(dirname \"$0\")\"\nnet_name=\"$1\"; shift\ntmpdir=\"$1\"; shift\n\n. /etc/os-release\ncase \"$ID\" in\n    debian|ubuntu)\n        controller_image=\"$ID:$VERSION_ID\"\n        ;;\n    *)\n        echo >&2 \"don't know what Docker image corresponds to $NAME $VERSION\"\n        exit 3  # EXIT_NOTIMPLEMENTED\n        ;;\nesac\n# Pull the image if we don't have it already\ndocker run --rm \"$controller_image\" true\necho \"$controller_image\" >\"$tmpdir/controller_image\"\n\ngo build -o \"${tmpdir}\" ../../../../cmd/arvados-server\n\ndocker run --rm --detach \\\n       --name=arvados-test-openldap \\\n       --network=\"$net_name\" \\\n       bitnamilegacy/openldap:2.6\n\nawk -v passhash=\"$(docker exec -i arvados-test-openldap slappasswd -s \"secret\")\" -- '\n($1 == \"userPassword:\") { $2 = passhash; }\n{ print; }\n' add_example_user.ldif >\"$tmpdir/add_example_user.ldif\"\n\ndocker run --rm \\\n       --entrypoint=/setup_suite_users.sh \\\n       --network=\"$net_name\" \\\n       -v \"$PWD/setup_suite_users.sh\":/setup_suite_users.sh:ro \\\n       -v \"${tmpdir}/add_example_user.ldif\":/add_example_user.ldif:ro \\\n       bitnamilegacy/openldap:2.6\n"
  },
  {
    "path": "lib/controller/localdb/login_docker_test/setup_suite_users.sh",
    "content": "#!/bin/bash\n#\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# This script is the entrypoint for a container run by setup_suite.sh to create\n# user account fixtures in LDAP.\n\nset -e\nset -u\nset -o pipefail\n\nresult=0\nfor tries in $(seq 9 -1 0); do\n    ldapadd \\\n        -H ldap://arvados-test-openldap:1389/ \\\n        -D cn=admin,dc=example,dc=org \\\n        -w adminpassword \\\n        -f /add_example_user.ldif ||\n        result=$?\n    # ldapadd uses exit code 68 to mean \"user already exists.\"\n    if [[ \"$result\" = 0 ]] || [[ \"$result\" = 68 ]]; then\n        exit 0\n    elif [[ \"$tries\" != 0 ]]; then\n        sleep 1\n    fi\ndone\n\necho 'error: failed to add user entry' >&2\nexit \"$result\"\n"
  },
  {
    "path": "lib/controller/localdb/login_docker_test/start_controller_container.sh",
    "content": "#!/bin/bash\n#\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# This script starts a test controller container, waits for it to respond, then\n# writes its IP address on stdout. It requires the Docker network name and test\n# temporary directory path as arguments. Additional arguments are passed through\n# to `docker run` so you can, e.g., mount additional files like `/setup.sh`.\n\nset -e\nset -u\nset -o pipefail\n\nnet_name=\"$1\"; shift\ntmpdir=\"$1\"; shift\nselfdir=\"$(readlink -e \"$(dirname \"$0\")\")\"\n\ndocker run --detach --rm \\\n       --cidfile=\"$tmpdir/controller.cid\" \\\n       --entrypoint=/run.sh \\\n       --network=\"$net_name\" \\\n       -v \"${tmpdir}/arvados.yml\":/etc/arvados/config.yml:ro \\\n       -v \"${tmpdir}/arvados-server\":/bin/arvados-server:ro \\\n       -v \"$(readlink -e ../../..)\":/arvados:ro \\\n       -v \"${selfdir}/run_controller.sh\":/run.sh:ro \\\n       \"$@\" \"$(cat \"$tmpdir/controller_image\")\"\n\ncont_addr=\"$(xargs -a \"$tmpdir/controller.cid\" docker inspect --format \"{{(index .NetworkSettings.Networks \\\"${net_name}\\\").IPAddress}}\")\"\ncont_url=\"http://${cont_addr}/arvados/v1/config\"\nfor tries in $(seq 19 -1 0); do\n    if curl -fsL \"$cont_url\" >/dev/null; then\n        # Write the container address for the Go test code to record.\n        # We had to get it here anyway so we might as well pass it up.\n        echo \"$cont_addr\"\n        exit\n    elif [[ \"$tries\" != 0 ]]; then\n        sleep 1\n    fi\ndone\n\necho \"error: controller did not come up\" >&2\nexit 7  # EXIT_NOTRUNNING\n"
  },
  {
    "path": "lib/controller/localdb/login_docker_test/teardown_suite.sh",
    "content": "#!/bin/bash\n#\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# This script stops all Docker containers running on the named network, then\n# removes the network itself.\n\nset -e\nset -u\nset -o pipefail\n\nnet_name=\"$1\"; shift\n\ndocker network inspect \"$net_name\" |\n    jq -r 'map(.Containers | keys) | flatten | join(\"\\n\")' |\n    xargs -r -d\\\\n docker stop\ndocker network rm \"$net_name\"\n"
  },
  {
    "path": "lib/controller/localdb/login_docker_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"slices\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&LoginDockerSuite{})\n\n// LoginDockerSuite is an integration test of controller's different Login\n// methods.  Each test creates a different Login configuration and runs\n// controller in a Docker container with it. It runs other Docker containers\n// for supporting services.\ntype LoginDockerSuite struct {\n\tlocaldbSuite\n\ttmpdir     string\n\tnetName    string\n\tnetAddr    string\n\tpgProxy    *tcpProxy\n\trailsProxy *tcpProxy\n}\n\nfunc (s *LoginDockerSuite) setUpDockerNetwork() (string, error) {\n\tnetName := \"arvados-net-\" + path.Base(path.Dir(s.tmpdir))\n\tcmd := exec.Command(\"docker\", \"network\", \"create\", netName)\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn netName, nil\n}\n\n// Run cmd and read stdout looking for an IP address on a line by itself.\n// Return the last one found.\nfunc (s *LoginDockerSuite) ipFromCmd(cmd *exec.Cmd) (string, error) {\n\tcmd.Stderr = os.Stderr\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tlines := bytes.Split(out, []byte{'\\n'})\n\tslices.Reverse(lines)\n\tfor _, line := range lines {\n\t\tif ip := net.ParseIP(string(line)); ip != nil {\n\t\t\treturn ip.String(), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no IP address found in the output of %v\", cmd)\n}\n\n// SetUpSuite creates a Docker network, starts an openldap server in it, and\n// creates user account fixtures in LDAP.\n// We used to use the LDAP server for multiple tests. We don't currently, but\n// there are pros and cons to starting it here vs. in each individaul test, so\n// it's staying here for now.\nfunc (s *LoginDockerSuite) SetUpSuite(c *check.C) {\n\ts.localdbSuite.SetUpSuite(c)\n\ts.tmpdir = c.MkDir()\n\tvar err error\n\ts.netName, err = s.setUpDockerNetwork()\n\tc.Assert(err, check.IsNil)\n\ts.netAddr, err = s.ipFromCmd(exec.Command(\"docker\", \"network\", \"inspect\",\n\t\t\"--format\", \"{{(index .IPAM.Config 0).Gateway}}\", s.netName))\n\tc.Assert(err, check.IsNil)\n\tsetup := exec.Command(\"login_docker_test/setup_suite.sh\", s.netName, s.tmpdir)\n\tsetup.Stderr = os.Stderr\n\terr = setup.Run()\n\tc.Assert(err, check.IsNil)\n}\n\n// TearDownSuite stops all containers running on the Docker network we set up,\n// then deletes the network itself.\nfunc (s *LoginDockerSuite) TearDownSuite(c *check.C) {\n\tif s.netName != \"\" {\n\t\tcmd := exec.Command(\"login_docker_test/teardown_suite.sh\", s.netName)\n\t\tcmd.Stderr = os.Stderr\n\t\terr := cmd.Run()\n\t\tc.Check(err, check.IsNil)\n\t}\n\ts.localdbSuite.TearDownSuite(c)\n}\n\n// Create a test cluster configuration in the test temporary directory.\n// Update it to use the current PostgreSQL and RailsAPI proxies.\nfunc (s *LoginDockerSuite) setUpConfig(c *check.C) {\n\tsrc, err := os.Open(os.Getenv(\"ARVADOS_CONFIG\"))\n\tc.Assert(err, check.IsNil)\n\tdefer src.Close()\n\tdst, err := os.Create(path.Join(s.tmpdir, \"arvados.yml\"))\n\tc.Assert(err, check.IsNil)\n\t_, err = io.Copy(dst, src)\n\tcloseErr := dst.Close()\n\tc.Assert(err, check.IsNil)\n\tc.Assert(closeErr, check.IsNil)\n\n\tpgconn := map[string]interface{}{\n\t\t\"host\": s.netAddr,\n\t\t\"port\": s.pgProxy.Port(),\n\t}\n\terr = s.updateConfig(\".Clusters.zzzzz.PostgreSQL.Connection |= (. * $arg)\", pgconn)\n\tc.Assert(err, check.IsNil)\n\tintVal := make(map[string]string)\n\tintURLs := make(map[string]interface{})\n\trailsURL := \"https://\" + net.JoinHostPort(s.netAddr, s.railsProxy.Port())\n\tintURLs[railsURL] = intVal\n\terr = s.updateConfig(\".Clusters.zzzzz.Services.RailsAPI.InternalURLs = $arg\", intURLs)\n\tc.Assert(err, check.IsNil)\n\tintURLs = make(map[string]interface{})\n\tintURLs[\"http://0.0.0.0:80\"] = intVal\n\terr = s.updateConfig(\".Clusters.zzzzz.Services.Controller.InternalURLs = $arg\", intURLs)\n\tc.Assert(err, check.IsNil)\n}\n\n// Update the test cluster configuration with the given yq expression.\n// The expression can use `$arg` to refer to the object passed in as `arg`.\nfunc (s *LoginDockerSuite) updateConfig(expr string, arg map[string]interface{}) error {\n\tjsonArg, err := json.Marshal(arg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := exec.Command(\"yq\", \"-yi\",\n\t\t\"--argjson\", \"arg\", string(jsonArg),\n\t\texpr, path.Join(s.tmpdir, \"arvados.yml\"))\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}\n\n// Update the test cluster configuration to use the named login method.\nfunc (s *LoginDockerSuite) enableLogin(key string) error {\n\tlogin := make(map[string]interface{})\n\tlogin[\"Test\"] = map[string]bool{\"Enable\": false}\n\tlogin[key] = map[string]bool{\"Enable\": true}\n\treturn s.updateConfig(\".Clusters.zzzzz.Login |= (. * $arg)\", login)\n}\n\n// SetUpTest does all the common preparation for a controller test container:\n// it creates TCP proxies for PostgreSQL and RailsAPI on the test host, then\n// writes a new Arvados cluster configuration pointed at those for servers to\n// use.\nfunc (s *LoginDockerSuite) SetUpTest(c *check.C) {\n\ts.localdbSuite.SetUpTest(c)\n\ts.pgProxy = newPgProxy(c, s.cluster, s.netAddr)\n\ts.railsProxy = newInternalProxy(c, s.cluster.Services.RailsAPI, s.netAddr)\n\ts.setUpConfig(c)\n}\n\n// TearDownTest looks for the `controller.cid` file created when we start the\n// test container. If found, it stops that container and deletes the file.\n// Then it closes the TCP proxies created by SetUpTest.\nfunc (s *LoginDockerSuite) TearDownTest(c *check.C) {\n\tcidPath := path.Join(s.tmpdir, \"controller.cid\")\n\tif cid, err := os.ReadFile(cidPath); err == nil {\n\t\tcmd := exec.Command(\"docker\", \"stop\", strings.TrimSpace(string(cid)))\n\t\tcmd.Stderr = os.Stderr\n\t\terr := cmd.Run()\n\t\tc.Check(err, check.IsNil)\n\t}\n\tif err := os.Remove(cidPath); err != nil {\n\t\tc.Check(os.IsNotExist(err), check.Equals, true)\n\t}\n\ts.railsProxy.Close()\n\ts.pgProxy.Close()\n\ts.localdbSuite.TearDownTest(c)\n}\n\nfunc (s *LoginDockerSuite) startController(args ...string) (*url.URL, error) {\n\targs = append([]string{s.netName, s.tmpdir}, args...)\n\tcmd := exec.Command(\"login_docker_test/start_controller_container.sh\", args...)\n\tip, err := s.ipFromCmd(cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &url.URL{\n\t\tScheme: \"http\",\n\t\tHost:   ip,\n\t}, nil\n}\n\nfunc (s *LoginDockerSuite) parseResponse(resp *http.Response, body any) error {\n\tdefer resp.Body.Close()\n\trespBody, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode < 400 {\n\t\treturn json.Unmarshal(respBody, body)\n\t}\n\tvar errResp struct {\n\t\tErrors []string\n\t}\n\terr = json.Unmarshal(respBody, &errResp)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s with malformed JSON response: %w\", resp.Status, err)\n\t} else if len(errResp.Errors) == 0 {\n\t\treturn fmt.Errorf(\"%s with no Errors in response\", resp.Status)\n\t} else {\n\t\treturn fmt.Errorf(\"%s: %s\", resp.Status, strings.Join(errResp.Errors, \":\"))\n\t}\n}\n\nfunc (s *LoginDockerSuite) authenticate(server *url.URL, username, password string) (*arvados.APIClientAuthorization, error) {\n\treqURL := server.JoinPath(\"/arvados/v1/users/authenticate\").String()\n\treqValues := url.Values{\n\t\t\"username\": {username},\n\t\t\"password\": {password},\n\t}\n\tresp, err := http.PostForm(reqURL, reqValues)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttoken := &arvados.APIClientAuthorization{}\n\terr = s.parseResponse(resp, token)\n\treturn token, err\n}\n\nfunc (s *LoginDockerSuite) getCurrentUser(server *url.URL, token string) (*arvados.User, error) {\n\treqURL := server.JoinPath(\"/arvados/v1/users/current\").String()\n\treq, err := http.NewRequest(\"GET\", reqURL, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Authorization\", \"Bearer \"+token)\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tuser := &arvados.User{}\n\terr = s.parseResponse(resp, user)\n\treturn user, err\n}\n\nfunc (s *LoginDockerSuite) TestLoginPAM(c *check.C) {\n\terr := s.enableLogin(\"PAM\")\n\tc.Assert(err, check.IsNil)\n\tsetupPath, err := filepath.Abs(\"login_docker_test/setup_pam_test.sh\")\n\tc.Assert(err, check.IsNil)\n\tarvURL, err := s.startController(\"-v\", setupPath+\":/setup.sh:ro\")\n\tc.Assert(err, check.IsNil)\n\n\t_, err = s.authenticate(arvURL, \"foo-bar\", \"nosecret\")\n\tc.Check(err, check.ErrorMatches,\n\t\t`401 Unauthorized: PAM: Authentication failure \\(with username \"foo-bar\" and password\\)`)\n\n\t_, err = s.authenticate(arvURL, \"expired\", \"secret\")\n\tc.Check(err, check.ErrorMatches,\n\t\t`401 Unauthorized: PAM: Authentication failure; \"Your account has expired; please contact your system administrator\\.\"`)\n\n\taca, err := s.authenticate(arvURL, \"foo-bar\", \"secret\")\n\tif c.Check(err, check.IsNil) {\n\t\tuser, err := s.getCurrentUser(arvURL, aca.TokenV2())\n\t\tif c.Check(err, check.IsNil) {\n\t\t\t// Check PAMDefaultEmailDomain was propagated as expected\n\t\t\tc.Check(user.Email, check.Equals, \"foo-bar@example.com\")\n\t\t}\n\t}\n}\n\nfunc (s *LoginDockerSuite) TestLoginLDAPBuiltin(c *check.C) {\n\terr := s.enableLogin(\"LDAP\")\n\tc.Assert(err, check.IsNil)\n\tarvURL, err := s.startController()\n\tc.Assert(err, check.IsNil)\n\n\t_, err = s.authenticate(arvURL, \"foo-bar\", \"nosecret\")\n\tc.Check(err, check.ErrorMatches,\n\t\t`401 Unauthorized: LDAP: Authentication failure \\(with username \"foo-bar\" and password\\)`)\n\n\taca, err := s.authenticate(arvURL, \"foo-bar\", \"secret\")\n\tif c.Check(err, check.IsNil) {\n\t\tuser, err := s.getCurrentUser(arvURL, aca.TokenV2())\n\t\tif c.Check(err, check.IsNil) {\n\t\t\t// User fields come from LDAP attributes\n\t\t\tc.Check(user.FirstName, check.Equals, \"Foo\")\n\t\t\tc.Check(user.LastName, check.Equals, \"Bar\")\n\t\t\t// \"-\" character removed by RailsAPI\n\t\t\tc.Check(user.Username, check.Equals, \"foobar\")\n\t\t\tc.Check(user.Email, check.Equals, \"foo-bar-baz@example.com\")\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/controller/localdb/login_ldap.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/go-ldap/ldap\"\n)\n\ntype ldapLoginController struct {\n\tCluster *arvados.Cluster\n\tParent  *Conn\n}\n\nfunc (ctrl *ldapLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\treturn logout(ctx, ctrl.Cluster, opts)\n}\n\nfunc (ctrl *ldapLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {\n\treturn arvados.LoginResponse{}, errors.New(\"interactive login is not available\")\n}\n\nfunc (ctrl *ldapLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {\n\tlog := ctxlog.FromContext(ctx)\n\tconf := ctrl.Cluster.Login.LDAP\n\terrFailed := httpserver.ErrorWithStatus(fmt.Errorf(\"LDAP: Authentication failure (with username %q and password)\", opts.Username), http.StatusUnauthorized)\n\n\tif conf.SearchAttribute == \"\" {\n\t\treturn arvados.APIClientAuthorization{}, errors.New(\"config error: SearchAttribute is blank\")\n\t}\n\tif opts.Password == \"\" {\n\t\tlog.WithField(\"username\", opts.Username).Error(\"refusing to authenticate with empty password\")\n\t\treturn arvados.APIClientAuthorization{}, errFailed\n\t}\n\n\tlog = log.WithField(\"URL\", conf.URL.String())\n\tvar l *ldap.Conn\n\tvar err error\n\tif conf.URL.Scheme == \"ldaps\" {\n\t\t// ldap.DialURL does not currently allow us to control\n\t\t// tls.Config, so we need to figure out the port\n\t\t// ourselves and call DialTLS.\n\t\thost, port, err := net.SplitHostPort(conf.URL.Host)\n\t\tif err != nil {\n\t\t\t// Assume error means no port given\n\t\t\thost = conf.URL.Host\n\t\t\tport = ldap.DefaultLdapsPort\n\t\t}\n\t\tl, err = ldap.DialTLS(\"tcp\", net.JoinHostPort(host, port), &tls.Config{\n\t\t\tServerName: host,\n\t\t\tMinVersion: uint16(conf.MinTLSVersion),\n\t\t})\n\t} else {\n\t\tl, err = ldap.DialURL(conf.URL.String())\n\t}\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"ldap connection failed\")\n\t\treturn arvados.APIClientAuthorization{}, err\n\t}\n\tdefer l.Close()\n\n\tif conf.StartTLS {\n\t\tvar tlsconfig tls.Config\n\t\ttlsconfig.MinVersion = uint16(conf.MinTLSVersion)\n\t\tif conf.InsecureTLS {\n\t\t\ttlsconfig.InsecureSkipVerify = true\n\t\t} else {\n\t\t\tif host, _, err := net.SplitHostPort(conf.URL.Host); err != nil {\n\t\t\t\t// Assume SplitHostPort error means\n\t\t\t\t// port was not specified\n\t\t\t\ttlsconfig.ServerName = conf.URL.Host\n\t\t\t} else {\n\t\t\t\ttlsconfig.ServerName = host\n\t\t\t}\n\t\t}\n\t\terr = l.StartTLS(&tlsconfig)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"ldap starttls failed\")\n\t\t\treturn arvados.APIClientAuthorization{}, err\n\t\t}\n\t}\n\n\tusername := opts.Username\n\tif at := strings.Index(username, \"@\"); at >= 0 {\n\t\tif conf.StripDomain == \"*\" || strings.ToLower(conf.StripDomain) == strings.ToLower(username[at+1:]) {\n\t\t\tusername = username[:at]\n\t\t}\n\t}\n\tif conf.AppendDomain != \"\" && !strings.Contains(username, \"@\") {\n\t\tusername = username + \"@\" + conf.AppendDomain\n\t}\n\n\tif conf.SearchBindUser != \"\" {\n\t\terr = l.Bind(conf.SearchBindUser, conf.SearchBindPassword)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).WithField(\"user\", conf.SearchBindUser).Error(\"ldap authentication failed\")\n\t\t\treturn arvados.APIClientAuthorization{}, err\n\t\t}\n\t}\n\n\tsearch := fmt.Sprintf(\"(%s=%s)\", ldap.EscapeFilter(conf.SearchAttribute), ldap.EscapeFilter(username))\n\tif conf.SearchFilters != \"\" {\n\t\tsearch = fmt.Sprintf(\"(&%s%s)\", conf.SearchFilters, search)\n\t}\n\tlog = log.WithField(\"search\", search)\n\treq := ldap.NewSearchRequest(\n\t\tconf.SearchBase,\n\t\tldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 1, 0, false,\n\t\tsearch,\n\t\t[]string{\"DN\", \"givenName\", \"SN\", conf.EmailAttribute, conf.UsernameAttribute},\n\t\tnil)\n\tresp, err := l.Search(req)\n\tif ldap.IsErrorWithCode(err, ldap.LDAPResultNoResultsReturned) ||\n\t\tldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) ||\n\t\t(err == nil && len(resp.Entries) == 0) {\n\t\tlog.WithError(err).Info(\"ldap lookup returned no results\")\n\t\treturn arvados.APIClientAuthorization{}, errFailed\n\t} else if err != nil {\n\t\tlog.WithError(err).Error(\"ldap lookup failed\")\n\t\treturn arvados.APIClientAuthorization{}, err\n\t}\n\tuserdn := resp.Entries[0].DN\n\tif userdn == \"\" {\n\t\tlog.Warn(\"refusing to authenticate with empty dn\")\n\t\treturn arvados.APIClientAuthorization{}, errFailed\n\t}\n\tlog = log.WithField(\"DN\", userdn)\n\n\tattrs := map[string]string{}\n\tfor _, attr := range resp.Entries[0].Attributes {\n\t\tif attr == nil || len(attr.Values) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tattrs[strings.ToLower(attr.Name)] = attr.Values[0]\n\t}\n\tlog.WithField(\"attrs\", attrs).Debug(\"ldap search succeeded\")\n\n\t// Now that we have the DN, try authenticating.\n\terr = l.Bind(userdn, opts.Password)\n\tif err != nil {\n\t\tlog.WithError(err).Info(\"ldap user authentication failed\")\n\t\treturn arvados.APIClientAuthorization{}, errFailed\n\t}\n\tlog.Debug(\"ldap authentication succeeded\")\n\n\temail := attrs[strings.ToLower(conf.EmailAttribute)]\n\tif email == \"\" {\n\t\tlog.Errorf(\"ldap returned no email address in %q attribute\", conf.EmailAttribute)\n\t\treturn arvados.APIClientAuthorization{}, errors.New(\"authentication succeeded but ldap returned no email address\")\n\t}\n\n\treturn ctrl.Parent.CreateAPIClientAuthorization(ctx, ctrl.Cluster.SystemRootToken, rpc.UserSessionAuthInfo{\n\t\tEmail:     email,\n\t\tFirstName: attrs[\"givenname\"],\n\t\tLastName:  attrs[\"sn\"],\n\t\tUsername:  attrs[strings.ToLower(conf.UsernameAttribute)],\n\t})\n}\n"
  },
  {
    "path": "lib/controller/localdb/login_oidc.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/hmac\"\n\t\"crypto/sha256\"\n\t\"database/sql\"\n\t\"encoding/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"text/template\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/api\"\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/coreos/go-oidc/v3/oidc\"\n\tlru \"github.com/hashicorp/golang-lru\"\n\t\"github.com/jmoiron/sqlx\"\n\t\"github.com/lib/pq\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/oauth2\"\n\t\"google.golang.org/api/option\"\n\t\"google.golang.org/api/people/v1\"\n\t\"gopkg.in/go-jose/go-jose.v2/jwt\"\n)\n\nvar (\n\ttokenCacheSize        = 1000\n\ttokenCacheNegativeTTL = time.Minute * 5\n\ttokenCacheTTL         = time.Minute * 10\n\ttokenCacheRaceWindow  = time.Minute\n\tpqCodeUniqueViolation = pq.ErrorCode(\"23505\")\n)\n\ntype tokenCacheEnt struct {\n\tvalid   bool\n\trefresh time.Time\n}\n\ntype oidcLoginController struct {\n\tCluster                *arvados.Cluster\n\tParent                 *Conn\n\tIssuer                 string // OIDC issuer URL, e.g., \"https://accounts.google.com\"\n\tClientID               string\n\tClientSecret           string\n\tUseGooglePeopleAPI     bool              // Use Google People API to look up alternate email addresses\n\tEmailClaim             string            // OpenID claim to use as email address; typically \"email\"\n\tEmailVerifiedClaim     string            // If non-empty, ensure claim value is true before accepting EmailClaim; typically \"email_verified\"\n\tUsernameClaim          string            // If non-empty, use as preferred username\n\tAcceptAccessToken      bool              // Accept access tokens as API tokens\n\tAcceptAccessTokenScope string            // If non-empty, don't accept access tokens as API tokens unless they contain this scope\n\tAuthParams             map[string]string // Additional parameters to pass with authentication request\n\n\t// override Google People API base URL for testing purposes\n\t// (normally empty, set by google pkg to\n\t// https://people.googleapis.com/)\n\tpeopleAPIBasePath string\n\n\tprovider      *oidc.Provider        // initialized by setup()\n\tendSessionURL *url.URL              // initialized by setup()\n\toauth2conf    *oauth2.Config        // initialized by setup()\n\tverifier      *oidc.IDTokenVerifier // initialized by setup()\n\tmu            sync.Mutex            // protects setup()\n}\n\n// Initialize ctrl.provider and ctrl.oauth2conf.\nfunc (ctrl *oidcLoginController) setup() error {\n\tctrl.mu.Lock()\n\tdefer ctrl.mu.Unlock()\n\tif ctrl.provider != nil {\n\t\t// already set up\n\t\treturn nil\n\t}\n\tredirURL, err := (*url.URL)(&ctrl.Cluster.Services.Controller.ExternalURL).Parse(\"/\" + arvados.EndpointLogin.Path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error making redirect URL: %s\", err)\n\t}\n\tprovider, err := oidc.NewProvider(context.Background(), ctrl.Issuer)\n\tif err != nil {\n\t\treturn err\n\t}\n\tctrl.oauth2conf = &oauth2.Config{\n\t\tClientID:     ctrl.ClientID,\n\t\tClientSecret: ctrl.ClientSecret,\n\t\tEndpoint:     provider.Endpoint(),\n\t\tScopes:       []string{oidc.ScopeOpenID, \"profile\", \"email\"},\n\t\tRedirectURL:  redirURL.String(),\n\t}\n\tctrl.verifier = provider.Verifier(&oidc.Config{\n\t\tClientID: ctrl.ClientID,\n\t})\n\tctrl.provider = provider\n\tvar claims struct {\n\t\tEndSessionEndpoint string `json:\"end_session_endpoint\"`\n\t}\n\terr = provider.Claims(&claims)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error parsing OIDC discovery metadata: %v\", err)\n\t} else if claims.EndSessionEndpoint == \"\" {\n\t\tctrl.endSessionURL = nil\n\t} else {\n\t\tu, err := url.Parse(claims.EndSessionEndpoint)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"OIDC end_session_endpoint is not a valid URL: %v\", err)\n\t\t} else if u.Scheme != \"https\" {\n\t\t\treturn fmt.Errorf(\"OIDC end_session_endpoint MUST use HTTPS but does not: %v\", u.String())\n\t\t} else {\n\t\t\tctrl.endSessionURL = u\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ctrl *oidcLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\terr := ctrl.setup()\n\tif err != nil {\n\t\treturn arvados.LogoutResponse{}, fmt.Errorf(\"error setting up OpenID Connect provider: %s\", err)\n\t}\n\tresp, err := logout(ctx, ctrl.Cluster, opts)\n\tif err != nil {\n\t\treturn arvados.LogoutResponse{}, err\n\t}\n\tcreds, credsOK := auth.FromContext(ctx)\n\tif ctrl.endSessionURL != nil && credsOK && len(creds.Tokens) > 0 {\n\t\tvalues := ctrl.endSessionURL.Query()\n\t\tvalues.Set(\"client_id\", ctrl.ClientID)\n\t\tvalues.Set(\"post_logout_redirect_uri\", resp.RedirectLocation)\n\t\tu := *ctrl.endSessionURL\n\t\tu.RawQuery = values.Encode()\n\t\tresp.RedirectLocation = u.String()\n\t}\n\treturn resp, err\n}\n\nfunc (ctrl *oidcLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {\n\terr := ctrl.setup()\n\tif err != nil {\n\t\treturn loginError(fmt.Errorf(\"error setting up OpenID Connect provider: %s\", err))\n\t}\n\tif opts.State == \"\" {\n\t\t// Initiate OIDC sign-in.\n\t\tif opts.ReturnTo == \"\" {\n\t\t\treturn loginError(errors.New(\"missing return_to parameter\"))\n\t\t}\n\t\tif err := validateLoginRedirectTarget(ctrl.Parent.cluster, opts.ReturnTo); err != nil {\n\t\t\treturn loginError(fmt.Errorf(\"invalid return_to parameter: %s\", err))\n\t\t}\n\t\tstate := ctrl.newOAuth2State([]byte(ctrl.Cluster.SystemRootToken), opts.Remote, opts.ReturnTo)\n\t\tvar authparams []oauth2.AuthCodeOption\n\t\tfor k, v := range ctrl.AuthParams {\n\t\t\tauthparams = append(authparams, oauth2.SetAuthURLParam(k, v))\n\t\t}\n\t\treturn arvados.LoginResponse{\n\t\t\tRedirectLocation: ctrl.oauth2conf.AuthCodeURL(state.String(), authparams...),\n\t\t}, nil\n\t}\n\t// Callback after OIDC sign-in.\n\tstate := ctrl.parseOAuth2State(opts.State)\n\tif !state.verify([]byte(ctrl.Cluster.SystemRootToken)) {\n\t\treturn loginError(errors.New(\"invalid OAuth2 state\"))\n\t}\n\toauth2Token, err := ctrl.oauth2conf.Exchange(ctx, opts.Code)\n\tif err != nil {\n\t\treturn loginError(fmt.Errorf(\"error in OAuth2 exchange: %s\", err))\n\t}\n\tctxlog.FromContext(ctx).WithField(\"oauth2Token\", oauth2Token).Debug(\"oauth2 exchange succeeded\")\n\trawIDToken, ok := oauth2Token.Extra(\"id_token\").(string)\n\tif !ok {\n\t\treturn loginError(errors.New(\"error in OAuth2 exchange: no ID token in OAuth2 token\"))\n\t}\n\tctxlog.FromContext(ctx).WithField(\"rawIDToken\", rawIDToken).Debug(\"oauth2Token provided ID token\")\n\tidToken, err := ctrl.verifier.Verify(ctx, rawIDToken)\n\tif err != nil {\n\t\treturn loginError(fmt.Errorf(\"error verifying ID token: %s\", err))\n\t}\n\tauthinfo, err := ctrl.getAuthInfo(ctx, oauth2Token, idToken)\n\tif err != nil {\n\t\treturn loginError(err)\n\t}\n\tctxRoot := auth.NewContext(ctx, &auth.Credentials{Tokens: []string{ctrl.Cluster.SystemRootToken}})\n\tresp, err := ctrl.Parent.UserSessionCreate(ctxRoot, rpc.UserSessionCreateOptions{\n\t\tReturnTo: state.Remote + \",https://controller.api.client.invalid\",\n\t\tAuthInfo: *authinfo,\n\t})\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\t// Extract token from rails' UserSessionCreate response, and\n\t// attach it to our caller's desired ReturnTo URL.  The Rails\n\t// handler explicitly disallows sending the real ReturnTo as a\n\t// belt-and-suspenders defence against Rails accidentally\n\t// exposing an additional login relay.\n\tu, err := url.Parse(resp.RedirectLocation)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\ttoken := u.Query().Get(\"api_token\")\n\tif token == \"\" {\n\t\tresp.RedirectLocation = state.ReturnTo\n\t} else {\n\t\tu, err := url.Parse(state.ReturnTo)\n\t\tif err != nil {\n\t\t\treturn resp, err\n\t\t}\n\t\tq := u.Query()\n\t\tif q == nil {\n\t\t\tq = url.Values{}\n\t\t}\n\t\tq.Set(\"api_token\", token)\n\t\tu.RawQuery = q.Encode()\n\t\tresp.RedirectLocation = u.String()\n\t}\n\treturn resp, nil\n}\n\nfunc (ctrl *oidcLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {\n\treturn arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New(\"username/password authentication is not available\"), http.StatusBadRequest)\n}\n\n// claimser can decode arbitrary claims into a map. Implemented by\n// *oauth2.IDToken and *oauth2.UserInfo.\ntype claimser interface {\n\tClaims(interface{}) error\n}\n\n// Use a person's token to get all of their email addresses, with the\n// primary address at index 0. The provided defaultAddr is always\n// included in the returned slice, and is used as the primary if the\n// Google API does not indicate one.\nfunc (ctrl *oidcLoginController) getAuthInfo(ctx context.Context, token *oauth2.Token, claimser claimser) (*rpc.UserSessionAuthInfo, error) {\n\tvar ret rpc.UserSessionAuthInfo\n\tdefer ctxlog.FromContext(ctx).WithField(\"ret\", &ret).Debug(\"getAuthInfo returned\")\n\n\tvar claims map[string]interface{}\n\tif err := claimser.Claims(&claims); err != nil {\n\t\treturn nil, fmt.Errorf(\"error extracting claims from token: %s\", err)\n\t} else if verified, _ := claims[ctrl.EmailVerifiedClaim].(bool); verified || ctrl.EmailVerifiedClaim == \"\" {\n\t\t// Fall back to this info if the People API call\n\t\t// (below) doesn't return a primary && verified email.\n\t\tgivenName, _ := claims[\"given_name\"].(string)\n\t\tfamilyName, _ := claims[\"family_name\"].(string)\n\t\tif givenName != \"\" && familyName != \"\" {\n\t\t\tret.FirstName = givenName\n\t\t\tret.LastName = familyName\n\t\t} else {\n\t\t\tname, _ := claims[\"name\"].(string)\n\t\t\tif names := strings.Fields(strings.TrimSpace(name)); len(names) > 1 {\n\t\t\t\tret.FirstName = strings.Join(names[0:len(names)-1], \" \")\n\t\t\t\tret.LastName = names[len(names)-1]\n\t\t\t} else if len(names) > 0 {\n\t\t\t\tret.FirstName = names[0]\n\t\t\t}\n\t\t}\n\t\tret.Email, _ = claims[ctrl.EmailClaim].(string)\n\t}\n\n\tif ctrl.UsernameClaim != \"\" {\n\t\tret.Username, _ = claims[ctrl.UsernameClaim].(string)\n\t}\n\n\tif !ctrl.UseGooglePeopleAPI {\n\t\tif ret.Email == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"cannot log in with unverified email address %q\", claims[ctrl.EmailClaim])\n\t\t}\n\t\treturn &ret, nil\n\t}\n\n\tsvc, err := people.NewService(ctx, option.WithTokenSource(ctrl.oauth2conf.TokenSource(ctx, token)), option.WithScopes(people.UserEmailsReadScope))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error setting up People API: %s\", err)\n\t}\n\tif p := ctrl.peopleAPIBasePath; p != \"\" {\n\t\t// Override normal API endpoint (for testing)\n\t\tsvc.BasePath = p\n\t}\n\tperson, err := people.NewPeopleService(svc).Get(\"people/me\").PersonFields(\"emailAddresses,names\").Do()\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"Error 403\") && strings.Contains(err.Error(), \"accessNotConfigured\") {\n\t\t\t// Log the original API error, but display\n\t\t\t// only the \"fix config\" advice to the user.\n\t\t\tctxlog.FromContext(ctx).WithError(err).WithField(\"email\", ret.Email).Error(\"People API is not enabled\")\n\t\t\treturn nil, errors.New(\"configuration error: Login.GoogleAlternateEmailAddresses is true, but Google People API is not enabled\")\n\t\t}\n\t\treturn nil, fmt.Errorf(\"error getting profile info from People API: %s\", err)\n\t}\n\n\t// The given/family names returned by the People API and\n\t// flagged as \"primary\" (if any) take precedence over the\n\t// split-by-whitespace result from above.\n\tfor _, name := range person.Names {\n\t\tif name.Metadata != nil && name.Metadata.Primary {\n\t\t\tret.FirstName = name.GivenName\n\t\t\tret.LastName = name.FamilyName\n\t\t\tbreak\n\t\t}\n\t}\n\n\taltEmails := map[string]bool{}\n\tif ret.Email != \"\" {\n\t\taltEmails[ret.Email] = true\n\t}\n\tfor _, ea := range person.EmailAddresses {\n\t\tif ea.Metadata == nil || !ea.Metadata.Verified {\n\t\t\tctxlog.FromContext(ctx).WithField(\"address\", ea.Value).Info(\"skipping unverified email address\")\n\t\t\tcontinue\n\t\t}\n\t\taltEmails[ea.Value] = true\n\t\tif ea.Metadata.Primary || ret.Email == \"\" {\n\t\t\tret.Email = ea.Value\n\t\t}\n\t}\n\tif len(altEmails) == 0 {\n\t\treturn nil, errors.New(\"cannot log in without a verified email address\")\n\t}\n\tfor ae := range altEmails {\n\t\tif ae == ret.Email {\n\t\t\tcontinue\n\t\t}\n\t\tret.AlternateEmails = append(ret.AlternateEmails, ae)\n\t\tif ret.Username == \"\" {\n\t\t\ti := strings.Index(ae, \"@\")\n\t\t\tif i > 0 && strings.ToLower(ae[i+1:]) == strings.ToLower(ctrl.Cluster.Users.PreferDomainForUsername) {\n\t\t\t\tret.Username = strings.SplitN(ae[:i], \"+\", 2)[0]\n\t\t\t}\n\t\t}\n\t}\n\treturn &ret, nil\n}\n\nfunc loginError(sendError error) (resp arvados.LoginResponse, err error) {\n\ttmpl, err := template.New(\"error\").Parse(`<h2>Login error:</h2><p>{{.}}</p>`)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = tmpl.Execute(&resp.HTML, sendError.Error())\n\treturn\n}\n\nfunc (ctrl *oidcLoginController) newOAuth2State(key []byte, remote, returnTo string) oauth2State {\n\ts := oauth2State{\n\t\tTime:     time.Now().Unix(),\n\t\tRemote:   remote,\n\t\tReturnTo: returnTo,\n\t}\n\ts.HMAC = s.computeHMAC(key)\n\treturn s\n}\n\ntype oauth2State struct {\n\tHMAC     []byte // hash of other fields; see computeHMAC()\n\tTime     int64  // creation time (unix timestamp)\n\tRemote   string // remote cluster if requesting a salted token, otherwise blank\n\tReturnTo string // redirect target\n}\n\nfunc (ctrl *oidcLoginController) parseOAuth2State(encoded string) (s oauth2State) {\n\t// Errors are not checked. If decoding/parsing fails, the\n\t// token will be rejected by verify().\n\tdecoded, _ := base64.RawURLEncoding.DecodeString(encoded)\n\tf := strings.Split(string(decoded), \"\\n\")\n\tif len(f) != 4 {\n\t\treturn\n\t}\n\tfmt.Sscanf(f[0], \"%x\", &s.HMAC)\n\tfmt.Sscanf(f[1], \"%x\", &s.Time)\n\tfmt.Sscanf(f[2], \"%s\", &s.Remote)\n\tfmt.Sscanf(f[3], \"%s\", &s.ReturnTo)\n\treturn\n}\n\nfunc (s oauth2State) verify(key []byte) bool {\n\tif delta := time.Now().Unix() - s.Time; delta < 0 || delta > 300 {\n\t\treturn false\n\t}\n\treturn hmac.Equal(s.computeHMAC(key), s.HMAC)\n}\n\nfunc (s oauth2State) String() string {\n\tvar buf bytes.Buffer\n\tenc := base64.NewEncoder(base64.RawURLEncoding, &buf)\n\tfmt.Fprintf(enc, \"%x\\n%x\\n%s\\n%s\", s.HMAC, s.Time, s.Remote, s.ReturnTo)\n\tenc.Close()\n\treturn buf.String()\n}\n\nfunc (s oauth2State) computeHMAC(key []byte) []byte {\n\tmac := hmac.New(sha256.New, key)\n\tfmt.Fprintf(mac, \"%x %s %s\", s.Time, s.Remote, s.ReturnTo)\n\treturn mac.Sum(nil)\n}\n\nfunc OIDCAccessTokenAuthorizer(cluster *arvados.Cluster, getdb func(context.Context) (*sqlx.DB, error)) *oidcTokenAuthorizer {\n\t// We want ctrl to be nil if the chosen controller is not a\n\t// *oidcLoginController, so we can ignore the 2nd return value\n\t// of this type cast.\n\tctrl, _ := NewConn(context.Background(), cluster, getdb).loginController.(*oidcLoginController)\n\tcache, err := lru.New2Q(tokenCacheSize)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn &oidcTokenAuthorizer{\n\t\tctrl:  ctrl,\n\t\tgetdb: getdb,\n\t\tcache: cache,\n\t}\n}\n\ntype oidcTokenAuthorizer struct {\n\tctrl  *oidcLoginController\n\tgetdb func(context.Context) (*sqlx.DB, error)\n\tcache *lru.TwoQueueCache\n}\n\nfunc (ta *oidcTokenAuthorizer) Middleware(w http.ResponseWriter, r *http.Request, next http.Handler) {\n\tif ta.ctrl == nil {\n\t\t// Not using a compatible (OIDC) login controller.\n\t} else if authhdr := strings.Split(r.Header.Get(\"Authorization\"), \" \"); len(authhdr) > 1 && (authhdr[0] == \"OAuth2\" || authhdr[0] == \"Bearer\") {\n\t\terr := ta.registerToken(r.Context(), authhdr[1])\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\tnext.ServeHTTP(w, r)\n}\n\nfunc (ta *oidcTokenAuthorizer) WrapCalls(origFunc api.RoutableFunc) api.RoutableFunc {\n\tif ta.ctrl == nil {\n\t\t// Not using a compatible (OIDC) login controller.\n\t\treturn origFunc\n\t}\n\treturn func(ctx context.Context, opts interface{}) (_ interface{}, err error) {\n\t\tcreds, ok := auth.FromContext(ctx)\n\t\tif !ok {\n\t\t\treturn origFunc(ctx, opts)\n\t\t}\n\t\t// Check each token in the incoming request. If any\n\t\t// are valid OAuth2 access tokens, insert/update them\n\t\t// in the database so RailsAPI's auth code accepts\n\t\t// them.\n\t\tfor _, tok := range creds.Tokens {\n\t\t\terr = ta.registerToken(ctx, tok)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn origFunc(ctx, opts)\n\t}\n}\n\n// Matches error from oidc UserInfo() when receiving HTTP status 5xx\nvar re5xxError = regexp.MustCompile(`^5\\d\\d `)\n\n// registerToken checks whether tok is a valid OIDC Access Token and,\n// if so, ensures that an api_client_authorizations row exists so that\n// RailsAPI will accept it as an Arvados token.\nfunc (ta *oidcTokenAuthorizer) registerToken(ctx context.Context, tok string) error {\n\tif tok == ta.ctrl.Cluster.SystemRootToken || strings.HasPrefix(tok, \"v2/\") {\n\t\treturn nil\n\t}\n\tif ent, hit := ta.cache.Get(tok); !hit {\n\t\t// Fall through to database and OIDC provider checks\n\t\t// below\n\t} else if ent := ent.(tokenCacheEnt); !ent.valid {\n\t\t// cached negative result\n\t\tif time.Now().Before(ent.refresh) {\n\t\t\treturn nil\n\t\t}\n\t\tta.cache.Remove(tok)\n\t} else if ent.refresh.IsZero() || ent.refresh.After(time.Now().Add(time.Minute)) {\n\t\t// cached positive result, and we're not at/near\n\t\t// refresh time\n\t\treturn nil\n\t}\n\n\tdb, err := ta.getdb(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttx, err := db.Beginx()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\tctx = ctrlctx.NewWithTransaction(ctx, tx)\n\n\t// We use hmac-sha256(accesstoken,systemroottoken) as the\n\t// secret part of our own token, and avoid storing the auth\n\t// provider's real secret in our database.\n\tmac := hmac.New(sha256.New, []byte(ta.ctrl.Cluster.SystemRootToken))\n\tio.WriteString(mac, tok)\n\thmac := fmt.Sprintf(\"%x\", mac.Sum(nil))\n\n\tvar needRefresh bool\n\terr = tx.QueryRowContext(ctx, `\n\t\tselect (least(expires_at, refreshes_at) is not null\n\t\t\tand least(expires_at, refreshes_at) - interval '1 minute' <= current_timestamp at time zone 'UTC')\n\t\tfrom api_client_authorizations\n\t\twhere api_token=$1`, hmac).Scan(&needRefresh)\n\tif err != nil && err != sql.ErrNoRows {\n\t\treturn fmt.Errorf(\"database error while checking token: %w\", err)\n\t} else if err == nil && !needRefresh {\n\t\t// Token is already in the database as an Arvados\n\t\t// token, and isn't about to expire, so we can pass it\n\t\t// through to RailsAPI etc. regardless of whether it's\n\t\t// an OIDC access token.\n\t\treturn nil\n\t}\n\t// err is either nil (meaning we need to update an existing\n\t// row) or sql.ErrNoRows (meaning we need to insert a new row)\n\tupdating := err == nil\n\n\t// Check whether the token is a valid OIDC access token. If\n\t// so, swap it out for an Arvados token (creating/updating an\n\t// api_client_authorizations row if needed) which downstream\n\t// server components will accept.\n\terr = ta.ctrl.setup()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error setting up OpenID Connect provider: %s\", err)\n\t}\n\tif ok, err := ta.checkAccessTokenScope(ctx, tok); err != nil || !ok {\n\t\t// Note checkAccessTokenScope logs any interesting errors\n\t\tta.cache.Add(tok, tokenCacheEnt{\n\t\t\tvalid:   false,\n\t\t\trefresh: time.Now().Add(tokenCacheNegativeTTL),\n\t\t})\n\t\treturn err\n\t}\n\toauth2Token := &oauth2.Token{\n\t\tAccessToken: tok,\n\t}\n\tuserinfo, err := ta.ctrl.provider.UserInfo(ctx, oauth2.StaticTokenSource(oauth2Token))\n\tif err != nil {\n\t\tif neterr := net.Error(nil); errors.As(err, &neterr) || re5xxError.MatchString(err.Error()) {\n\t\t\t// If this token is in fact a valid OIDC\n\t\t\t// token, but we failed to validate it here\n\t\t\t// because of a network problem or internal\n\t\t\t// server error, we error out now with a 5xx\n\t\t\t// error, indicating to the client that they\n\t\t\t// can try again.  If we didn't error out now,\n\t\t\t// the unrecognized token would eventually\n\t\t\t// cause a 401 error further down the stack,\n\t\t\t// which the caller would interpret as an\n\t\t\t// unrecoverable failure.\n\t\t\tctxlog.FromContext(ctx).WithError(err).Debugf(\"treating OIDC UserInfo lookup error type %T as transient; failing request instead of forwarding token blindly\", err)\n\t\t\treturn err\n\t\t}\n\t\tctxlog.FromContext(ctx).WithError(err).WithField(\"HMAC\", hmac).Debug(\"UserInfo failed (not an OIDC token?), caching negative result\")\n\t\tta.cache.Add(tok, tokenCacheEnt{\n\t\t\tvalid:   false,\n\t\t\trefresh: time.Now().Add(tokenCacheNegativeTTL),\n\t\t})\n\t\treturn nil\n\t}\n\tctxlog.FromContext(ctx).WithField(\"userinfo\", userinfo).Debug(\"(*oidcTokenAuthorizer)registerToken: got userinfo\")\n\tauthinfo, err := ta.ctrl.getAuthInfo(ctx, oauth2Token, userinfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Refresh time for our token is one minute longer than our\n\t// cache TTL, so we don't pass it through to RailsAPI just as\n\t// the refresh time is arriving.\n\trefresh := time.Now().UTC().Add(tokenCacheTTL + tokenCacheRaceWindow)\n\n\tif updating {\n\t\t_, err = tx.ExecContext(ctx, `update api_client_authorizations set expires_at=null, refreshes_at=$1 where api_token=$2`, refresh, hmac)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error updating token refresh time: %w\", err)\n\t\t}\n\t\tctxlog.FromContext(ctx).WithField(\"HMAC\", hmac).Debug(\"(*oidcTokenAuthorizer)registerToken: updated api_client_authorizations row\")\n\t} else {\n\t\taca, err := ta.ctrl.Parent.CreateAPIClientAuthorization(ctx, ta.ctrl.Cluster.SystemRootToken, *authinfo)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tx.ExecContext(ctx, `savepoint upd`)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = tx.ExecContext(ctx, `update api_client_authorizations set api_token=$1, expires_at=null, refreshes_at=$2 where uuid=$3`, hmac, refresh, aca.UUID)\n\t\tif e, ok := err.(*pq.Error); ok && e.Code == pqCodeUniqueViolation {\n\t\t\t// unique_violation, given that the above\n\t\t\t// query did not find a row with matching\n\t\t\t// api_token, means another thread/process\n\t\t\t// also received this same token and won the\n\t\t\t// race to insert it -- in which case this\n\t\t\t// thread doesn't need to update the database.\n\t\t\t// Discard the redundant row.\n\t\t\t_, err = tx.ExecContext(ctx, `rollback to savepoint upd`)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_, err = tx.ExecContext(ctx, `delete from api_client_authorizations where uuid=$1`, aca.UUID)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tctxlog.FromContext(ctx).WithField(\"HMAC\", hmac).Debug(\"(*oidcTokenAuthorizer)registerToken: api_client_authorizations row inserted by another thread\")\n\t\t} else if err != nil {\n\t\t\tctxlog.FromContext(ctx).Errorf(\"%#v\", err)\n\t\t\treturn fmt.Errorf(\"error adding OIDC access token to database: %w\", err)\n\t\t} else {\n\t\t\tctxlog.FromContext(ctx).WithFields(logrus.Fields{\"UUID\": aca.UUID, \"HMAC\": hmac}).Debug(\"(*oidcTokenAuthorizer)registerToken: inserted api_client_authorizations row\")\n\t\t}\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tta.cache.Add(tok, tokenCacheEnt{\n\t\tvalid:   true,\n\t\trefresh: refresh,\n\t})\n\treturn nil\n}\n\n// Check that the provided access token is a JWT with the required\n// scope. If it is a valid JWT but missing the required scope, we\n// return a 403 error, otherwise true (acceptable as an API token) or\n// false (pass through unmodified).\n//\n// Return false if configured not to accept access tokens at all.\n//\n// Note we don't check signature or expiry here. We are relying on the\n// caller to verify those separately (e.g., by calling the UserInfo\n// endpoint).\nfunc (ta *oidcTokenAuthorizer) checkAccessTokenScope(ctx context.Context, tok string) (bool, error) {\n\tif !ta.ctrl.AcceptAccessToken {\n\t\treturn false, nil\n\t} else if ta.ctrl.AcceptAccessTokenScope == \"\" {\n\t\treturn true, nil\n\t}\n\tvar claims struct {\n\t\tScope string `json:\"scope\"`\n\t}\n\tif t, err := jwt.ParseSigned(tok); err != nil {\n\t\tctxlog.FromContext(ctx).WithError(err).Debug(\"error parsing jwt\")\n\t\treturn false, nil\n\t} else if err = t.UnsafeClaimsWithoutVerification(&claims); err != nil {\n\t\tctxlog.FromContext(ctx).WithError(err).Debug(\"error extracting jwt claims\")\n\t\treturn false, nil\n\t}\n\tfor _, s := range strings.Split(claims.Scope, \" \") {\n\t\tif s == ta.ctrl.AcceptAccessTokenScope {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\tctxlog.FromContext(ctx).WithFields(logrus.Fields{\"have\": claims.Scope, \"need\": ta.ctrl.AcceptAccessTokenScope}).Info(\"unacceptable access token scope\")\n\treturn false, httpserver.ErrorWithStatus(errors.New(\"unacceptable access token scope\"), http.StatusUnauthorized)\n}\n"
  },
  {
    "path": "lib/controller/localdb/login_oidc_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/hmac\"\n\t\"crypto/sha256\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"github.com/jmoiron/sqlx\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&OIDCLoginSuite{})\n\ntype OIDCLoginSuite struct {\n\tlocaldbSuite\n\ttrustedURL   *arvados.URL\n\tfakeProvider *arvadostest.OIDCProvider\n}\n\nfunc (s *OIDCLoginSuite) SetUpTest(c *check.C) {\n\ts.trustedURL = &arvados.URL{Scheme: \"https\", Host: \"app.example.com:443\", Path: \"/\"}\n\n\ts.fakeProvider = arvadostest.NewOIDCProvider(c)\n\ts.fakeProvider.AuthEmail = \"active-user@arvados.local\"\n\ts.fakeProvider.AuthEmailVerified = true\n\ts.fakeProvider.AuthName = \"Fake User Name\"\n\ts.fakeProvider.AuthGivenName = \"Fake\"\n\ts.fakeProvider.AuthFamilyName = \"User Name\"\n\ts.fakeProvider.ValidCode = fmt.Sprintf(\"abcdefgh-%d\", time.Now().Unix())\n\ts.fakeProvider.PeopleAPIResponse = map[string]interface{}{}\n\n\ts.localdbSuite.SetUpTest(c)\n\n\ts.cluster.Login.Test.Enable = false\n\ts.cluster.Login.Google.Enable = true\n\ts.cluster.Login.Google.ClientID = \"test%client$id\"\n\ts.cluster.Login.Google.ClientSecret = \"test#client/secret\"\n\ts.cluster.Login.TrustedClients = map[arvados.URL]struct{}{*s.trustedURL: {}}\n\ts.cluster.Users.PreferDomainForUsername = \"PreferDomainForUsername.example.com\"\n\ts.fakeProvider.ValidClientID = \"test%client$id\"\n\ts.fakeProvider.ValidClientSecret = \"test#client/secret\"\n\n\ts.localdb = NewConn(s.ctx, s.cluster, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)\n\tc.Assert(s.localdb.loginController, check.FitsTypeOf, (*oidcLoginController)(nil))\n\ts.localdb.loginController.(*oidcLoginController).Issuer = s.fakeProvider.Issuer.URL\n\ts.localdb.loginController.(*oidcLoginController).peopleAPIBasePath = s.fakeProvider.PeopleAPI.URL\n\n\t*s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogout(c *check.C) {\n\ts.cluster.Login.TrustedClients[arvados.URL{Scheme: \"https\", Host: \"foo.example\", Path: \"/\"}] = struct{}{}\n\ts.cluster.Login.TrustPrivateNetworks = false\n\n\tresp, err := s.localdb.Logout(context.Background(), arvados.LogoutOptions{ReturnTo: \"https://foo.example.com/bar\"})\n\tc.Check(err, check.NotNil)\n\tc.Check(resp.RedirectLocation, check.Equals, \"\")\n\n\tresp, err = s.localdb.Logout(context.Background(), arvados.LogoutOptions{ReturnTo: \"https://127.0.0.1/bar\"})\n\tc.Check(err, check.NotNil)\n\tc.Check(resp.RedirectLocation, check.Equals, \"\")\n\n\tresp, err = s.localdb.Logout(context.Background(), arvados.LogoutOptions{ReturnTo: \"https://foo.example/bar\"})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, \"https://foo.example/bar\")\n\n\ts.cluster.Login.TrustPrivateNetworks = true\n\n\tresp, err = s.localdb.Logout(context.Background(), arvados.LogoutOptions{ReturnTo: \"https://192.168.1.1/bar\"})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, \"https://192.168.1.1/bar\")\n}\n\nfunc (s *OIDCLoginSuite) checkRPInitiatedLogout(c *check.C, returnTo string) {\n\tif !c.Check(s.fakeProvider.EndSessionEndpoint, check.NotNil,\n\t\tcheck.Commentf(\"buggy test: EndSessionEndpoint not configured\")) {\n\t\treturn\n\t}\n\texpURL, err := url.Parse(s.fakeProvider.Issuer.URL)\n\tif !c.Check(err, check.IsNil, check.Commentf(\"error parsing expected URL\")) {\n\t\treturn\n\t}\n\texpURL.Path = expURL.Path + s.fakeProvider.EndSessionEndpoint.Path\n\n\taccessToken := s.fakeProvider.ValidAccessToken()\n\tctx := ctrlctx.NewWithToken(s.ctx, s.cluster, accessToken)\n\tresp, err := s.localdb.Logout(ctx, arvados.LogoutOptions{ReturnTo: returnTo})\n\tif !c.Check(err, check.IsNil) {\n\t\treturn\n\t}\n\tloc, err := url.Parse(resp.RedirectLocation)\n\tif !c.Check(err, check.IsNil, check.Commentf(\"error parsing response URL\")) {\n\t\treturn\n\t}\n\n\tc.Check(loc.Scheme, check.Equals, \"https\")\n\tc.Check(loc.Host, check.Equals, expURL.Host)\n\tc.Check(loc.Path, check.Equals, expURL.Path)\n\n\tvar expReturn string\n\tswitch returnTo {\n\tcase \"\":\n\t\texpReturn = s.cluster.Services.Workbench2.ExternalURL.String()\n\tdefault:\n\t\texpReturn = returnTo\n\t}\n\tvalues := loc.Query()\n\tc.Check(values.Get(\"client_id\"), check.Equals, s.cluster.Login.Google.ClientID)\n\tc.Check(values.Get(\"post_logout_redirect_uri\"), check.Equals, expReturn)\n}\n\nfunc (s *OIDCLoginSuite) TestRPInitiatedLogoutWithoutReturnTo(c *check.C) {\n\ts.fakeProvider.EndSessionEndpoint = &url.URL{Path: \"/logout/fromRP\"}\n\ts.checkRPInitiatedLogout(c, \"\")\n}\n\nfunc (s *OIDCLoginSuite) TestRPInitiatedLogoutWithReturnTo(c *check.C) {\n\ts.fakeProvider.EndSessionEndpoint = &url.URL{Path: \"/rp_logout\"}\n\tu := arvados.URL{Scheme: \"https\", Host: \"foo.example\", Path: \"/\"}\n\ts.cluster.Login.TrustedClients[u] = struct{}{}\n\ts.checkRPInitiatedLogout(c, u.String())\n}\n\nfunc (s *OIDCLoginSuite) TestEndSessionEndpointBadScheme(c *check.C) {\n\t// RP-Initiated Logout 1.0 says: \"This URL MUST use the https scheme...\"\n\tu := url.URL{Scheme: \"http\", Host: \"example.com\"}\n\ts.fakeProvider.EndSessionEndpoint = &u\n\t_, err := s.localdb.Logout(s.ctx, arvados.LogoutOptions{})\n\tc.Check(err, check.ErrorMatches,\n\t\t`.*\\bend_session_endpoint MUST use HTTPS but does not: `+regexp.QuoteMeta(u.String()))\n}\n\nfunc (s *OIDCLoginSuite) TestNoRPInitiatedLogoutWithoutToken(c *check.C) {\n\tendPath := \"/TestNoRPInitiatedLogoutWithoutToken\"\n\ts.fakeProvider.EndSessionEndpoint = &url.URL{Path: endPath}\n\tresp, _ := s.localdb.Logout(s.ctx, arvados.LogoutOptions{})\n\tu, err := url.Parse(resp.RedirectLocation)\n\tc.Check(err, check.IsNil)\n\tc.Check(strings.HasSuffix(u.Path, endPath), check.Equals, false,\n\t\tcheck.Commentf(\"logout redirected to end_session_endpoint without token\"))\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogin_Start_Bogus(c *check.C) {\n\tresp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, \"\")\n\tc.Check(resp.HTML.String(), check.Matches, `.*missing return_to parameter.*`)\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogin_Start(c *check.C) {\n\tfor _, remote := range []string{\"\", \"zzzzz\"} {\n\t\tresp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{Remote: remote, ReturnTo: \"https://app.example.com/foo?bar\"})\n\t\tc.Check(err, check.IsNil)\n\t\ttarget, err := url.Parse(resp.RedirectLocation)\n\t\tc.Check(err, check.IsNil)\n\t\tissuerURL, _ := url.Parse(s.fakeProvider.Issuer.URL)\n\t\tc.Check(target.Host, check.Equals, issuerURL.Host)\n\t\tq := target.Query()\n\t\tc.Check(q.Get(\"client_id\"), check.Equals, \"test%client$id\")\n\t\tstate := s.localdb.loginController.(*oidcLoginController).parseOAuth2State(q.Get(\"state\"))\n\t\tc.Check(state.verify([]byte(s.cluster.SystemRootToken)), check.Equals, true)\n\t\tc.Check(state.Time, check.Not(check.Equals), 0)\n\t\tc.Check(state.Remote, check.Equals, remote)\n\t\tc.Check(state.ReturnTo, check.Equals, \"https://app.example.com/foo?bar\")\n\t}\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogin_UnknownClient(c *check.C) {\n\tresp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{ReturnTo: \"https://bad-app.example.com/foo?bar\"})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, \"\")\n\tc.Check(resp.HTML.String(), check.Matches, `(?ms).*requesting site is not listed in TrustedClients.*`)\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogin_InvalidCode(c *check.C) {\n\tstate := s.startLogin(c)\n\tresp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{\n\t\tCode:  \"first-try-a-bogus-code\",\n\t\tState: state,\n\t})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, \"\")\n\tc.Check(resp.HTML.String(), check.Matches, `(?ms).*error in OAuth2 exchange.*cannot fetch token.*`)\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogin_InvalidState(c *check.C) {\n\ts.startLogin(c)\n\tresp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{\n\t\tCode:  s.fakeProvider.ValidCode,\n\t\tState: \"bogus-state\",\n\t})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, \"\")\n\tc.Check(resp.HTML.String(), check.Matches, `(?ms).*invalid OAuth2 state.*`)\n}\n\nfunc (s *OIDCLoginSuite) setupPeopleAPIError(c *check.C) {\n\ts.fakeProvider.PeopleAPI = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\tfmt.Fprintln(w, `Error 403: accessNotConfigured`)\n\t}))\n\ts.localdb.loginController.(*oidcLoginController).peopleAPIBasePath = s.fakeProvider.PeopleAPI.URL\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogin_PeopleAPIDisabled(c *check.C) {\n\ts.localdb.loginController.(*oidcLoginController).UseGooglePeopleAPI = false\n\ts.fakeProvider.AuthEmail = \"joe.smith@primary.example.com\"\n\ts.setupPeopleAPIError(c)\n\tstate := s.startLogin(c)\n\t_, err := s.localdb.Login(context.Background(), arvados.LoginOptions{\n\t\tCode:  s.fakeProvider.ValidCode,\n\t\tState: state,\n\t})\n\tc.Check(err, check.IsNil)\n\tauthinfo := getCallbackAuthInfo(c, s.railsSpy)\n\tc.Check(authinfo.Email, check.Equals, \"joe.smith@primary.example.com\")\n}\n\nfunc (s *OIDCLoginSuite) TestConfig(c *check.C) {\n\ts.cluster.Login.Google.Enable = false\n\ts.cluster.Login.OpenIDConnect.Enable = true\n\ts.cluster.Login.OpenIDConnect.Issuer = \"https://accounts.example.com/\"\n\ts.cluster.Login.OpenIDConnect.ClientID = \"oidc-client-id\"\n\ts.cluster.Login.OpenIDConnect.ClientSecret = \"oidc-client-secret\"\n\ts.cluster.Login.OpenIDConnect.AuthenticationRequestParameters = map[string]string{\"testkey\": \"testvalue\"}\n\tlocaldb := NewConn(context.Background(), s.cluster, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)\n\tctrl := localdb.loginController.(*oidcLoginController)\n\tc.Check(ctrl.Issuer, check.Equals, \"https://accounts.example.com/\")\n\tc.Check(ctrl.ClientID, check.Equals, \"oidc-client-id\")\n\tc.Check(ctrl.ClientSecret, check.Equals, \"oidc-client-secret\")\n\tc.Check(ctrl.UseGooglePeopleAPI, check.Equals, false)\n\tc.Check(ctrl.AuthParams[\"testkey\"], check.Equals, \"testvalue\")\n\n\tfor _, enableAltEmails := range []bool{false, true} {\n\t\ts.cluster.Login.OpenIDConnect.Enable = false\n\t\ts.cluster.Login.Google.Enable = true\n\t\ts.cluster.Login.Google.ClientID = \"google-client-id\"\n\t\ts.cluster.Login.Google.ClientSecret = \"google-client-secret\"\n\t\ts.cluster.Login.Google.AlternateEmailAddresses = enableAltEmails\n\t\ts.cluster.Login.Google.AuthenticationRequestParameters = map[string]string{\"testkey\": \"testvalue\"}\n\t\tlocaldb = NewConn(context.Background(), s.cluster, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)\n\t\tctrl = localdb.loginController.(*oidcLoginController)\n\t\tc.Check(ctrl.Issuer, check.Equals, \"https://accounts.google.com\")\n\t\tc.Check(ctrl.ClientID, check.Equals, \"google-client-id\")\n\t\tc.Check(ctrl.ClientSecret, check.Equals, \"google-client-secret\")\n\t\tc.Check(ctrl.UseGooglePeopleAPI, check.Equals, enableAltEmails)\n\t\tc.Check(ctrl.AuthParams[\"testkey\"], check.Equals, \"testvalue\")\n\t}\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogin_PeopleAPIError(c *check.C) {\n\ts.setupPeopleAPIError(c)\n\tstate := s.startLogin(c)\n\tresp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{\n\t\tCode:  s.fakeProvider.ValidCode,\n\t\tState: state,\n\t})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, \"\")\n}\n\nfunc (s *OIDCLoginSuite) TestOIDCAuthorizer(c *check.C) {\n\ts.cluster.Login.Google.Enable = false\n\ts.cluster.Login.OpenIDConnect.Enable = true\n\tjson.Unmarshal([]byte(fmt.Sprintf(\"%q\", s.fakeProvider.Issuer.URL)), &s.cluster.Login.OpenIDConnect.Issuer)\n\ts.cluster.Login.OpenIDConnect.ClientID = \"oidc#client#id\"\n\ts.cluster.Login.OpenIDConnect.ClientSecret = \"oidc#client#secret\"\n\ts.cluster.Login.OpenIDConnect.AcceptAccessToken = true\n\ts.cluster.Login.OpenIDConnect.AcceptAccessTokenScope = \"\"\n\ts.fakeProvider.ValidClientID = \"oidc#client#id\"\n\ts.fakeProvider.ValidClientSecret = \"oidc#client#secret\"\n\tdb := arvadostest.DB(c, s.cluster)\n\n\ttokenCacheTTL = time.Millisecond\n\ttokenCacheRaceWindow = time.Millisecond\n\ttokenCacheNegativeTTL = time.Millisecond\n\n\toidcAuthorizer := OIDCAccessTokenAuthorizer(s.cluster, func(context.Context) (*sqlx.DB, error) { return db, nil })\n\taccessToken := s.fakeProvider.ValidAccessToken()\n\n\tmac := hmac.New(sha256.New, []byte(s.cluster.SystemRootToken))\n\tio.WriteString(mac, accessToken)\n\tapiToken := fmt.Sprintf(\"%x\", mac.Sum(nil))\n\n\tcheckTokenInDB := func() time.Time {\n\t\tvar exp time.Time\n\t\terr := db.QueryRow(`select greatest(expires_at, refreshes_at) at time zone 'UTC' from api_client_authorizations where api_token=$1`, apiToken).Scan(&exp)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(exp.Sub(time.Now()) > -time.Second, check.Equals, true)\n\t\tc.Check(exp.Sub(time.Now()) < time.Second, check.Equals, true)\n\t\treturn exp\n\t}\n\tcleanup := func() {\n\t\toidcAuthorizer.cache.Purge()\n\t\t_, err := db.Exec(`delete from api_client_authorizations where api_token=$1`, apiToken)\n\t\tc.Check(err, check.IsNil)\n\t}\n\tcleanup()\n\tdefer cleanup()\n\n\tctx := ctrlctx.NewWithToken(s.ctx, s.cluster, accessToken)\n\n\t// Check behavior on 5xx/network errors (don't cache) vs 4xx\n\t// (do cache)\n\t{\n\t\tcall := oidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\treturn nil, nil\n\t\t})\n\n\t\t// If fakeProvider UserInfo endpoint returns 502, we\n\t\t// should fail, return an error, and *not* cache the\n\t\t// negative result.\n\t\ttokenCacheNegativeTTL = time.Minute\n\t\ts.fakeProvider.UserInfoErrorStatus = 502\n\t\t_, err := call(ctx, nil)\n\t\tc.Check(err, check.NotNil)\n\n\t\t// The negative result was not cached, so retrying\n\t\t// immediately (with UserInfo working now) should\n\t\t// succeed.\n\t\ts.fakeProvider.UserInfoErrorStatus = 0\n\t\t_, err = call(ctx, nil)\n\t\tc.Check(err, check.IsNil)\n\t\tcheckTokenInDB()\n\n\t\tcleanup()\n\n\t\t// UserInfo 401 => cache the negative result, but\n\t\t// don't return an error (just pass the token through\n\t\t// as a v1 token)\n\t\ts.fakeProvider.UserInfoErrorStatus = 401\n\t\t_, err = call(ctx, nil)\n\t\tc.Check(err, check.IsNil)\n\t\tent, ok := oidcAuthorizer.cache.Get(accessToken)\n\t\tif c.Check(ok, check.Equals, true) {\n\t\t\tc.Check(ent.(tokenCacheEnt).valid, check.Equals, false)\n\t\t}\n\n\t\t// UserInfo succeeds now, but we still have a cached\n\t\t// negative result.\n\t\ts.fakeProvider.UserInfoErrorStatus = 0\n\t\t_, err = call(ctx, nil)\n\t\tc.Check(err, check.IsNil)\n\t\tent, ok = oidcAuthorizer.cache.Get(accessToken)\n\t\tif c.Check(ok, check.Equals, true) {\n\t\t\tc.Check(ent.(tokenCacheEnt).valid, check.Equals, false)\n\t\t}\n\n\t\ttokenCacheNegativeTTL = time.Millisecond\n\t\tcleanup()\n\t}\n\n\tvar exp1 time.Time\n\tconcurrent := 4\n\ts.fakeProvider.HoldUserInfo = make(chan *http.Request)\n\ts.fakeProvider.ReleaseUserInfo = make(chan struct{})\n\tgo func() {\n\t\tfor i := 0; ; i++ {\n\t\t\tif i == concurrent {\n\t\t\t\tclose(s.fakeProvider.ReleaseUserInfo)\n\t\t\t}\n\t\t\t<-s.fakeProvider.HoldUserInfo\n\t\t}\n\t}()\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < concurrent; i++ {\n\t\ti := i\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t_, err := oidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\tc.Logf(\"concurrent req %d/%d\", i, concurrent)\n\n\t\t\t\tcreds, ok := auth.FromContext(ctx)\n\t\t\t\tc.Assert(ok, check.Equals, true)\n\t\t\t\tc.Assert(creds.Tokens, check.HasLen, 1)\n\t\t\t\tc.Check(creds.Tokens[0], check.Equals, accessToken)\n\t\t\t\texp := checkTokenInDB()\n\t\t\t\tif i == 0 {\n\t\t\t\t\texp1 = exp\n\t\t\t\t}\n\t\t\t\treturn nil, nil\n\t\t\t})(ctx, nil)\n\t\t\tc.Check(err, check.IsNil)\n\t\t}()\n\t}\n\twg.Wait()\n\tif c.Failed() {\n\t\tc.Fatal(\"giving up\")\n\t}\n\n\t// If the token is used again after the in-memory cache\n\t// expires, oidcAuthorizer must re-check the token and update\n\t// the refreshes_at value in the database.\n\ttime.Sleep(3 * time.Millisecond)\n\toidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\texp := checkTokenInDB()\n\t\tc.Check(exp.Sub(exp1) > 0, check.Equals, true, check.Commentf(\"expect %v > 0\", exp.Sub(exp1)))\n\t\tc.Check(exp.Sub(exp1) < time.Second, check.Equals, true, check.Commentf(\"expect %v < 1s\", exp.Sub(exp1)))\n\t\treturn nil, nil\n\t})(ctx, nil)\n\n\ts.fakeProvider.AccessTokenPayload = map[string]interface{}{\"scope\": \"openid profile foobar\"}\n\taccessToken = s.fakeProvider.ValidAccessToken()\n\tctx = ctrlctx.NewWithToken(s.ctx, s.cluster, accessToken)\n\n\tmac = hmac.New(sha256.New, []byte(s.cluster.SystemRootToken))\n\tio.WriteString(mac, accessToken)\n\tapiToken = fmt.Sprintf(\"%x\", mac.Sum(nil))\n\n\tfor _, trial := range []struct {\n\t\tconfigEnable bool\n\t\tconfigScope  string\n\t\tacceptable   bool\n\t\tshouldRun    bool\n\t}{\n\t\t{true, \"foobar\", true, true},\n\t\t{true, \"foo\", false, false},\n\t\t{true, \"\", true, true},\n\t\t{false, \"\", false, true},\n\t\t{false, \"foobar\", false, true},\n\t} {\n\t\tc.Logf(\"trial = %+v\", trial)\n\t\tcleanup()\n\t\ts.cluster.Login.OpenIDConnect.AcceptAccessToken = trial.configEnable\n\t\ts.cluster.Login.OpenIDConnect.AcceptAccessTokenScope = trial.configScope\n\t\toidcAuthorizer = OIDCAccessTokenAuthorizer(s.cluster, func(context.Context) (*sqlx.DB, error) { return db, nil })\n\t\tchecked := false\n\t\toidcAuthorizer.WrapCalls(func(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\tvar n int\n\t\t\terr := db.QueryRowContext(ctx, `select count(*) from api_client_authorizations where api_token=$1`, apiToken).Scan(&n)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tif trial.acceptable {\n\t\t\t\tc.Check(n, check.Equals, 1)\n\t\t\t} else {\n\t\t\t\tc.Check(n, check.Equals, 0)\n\t\t\t}\n\t\t\tchecked = true\n\t\t\treturn nil, nil\n\t\t})(ctx, nil)\n\t\tc.Check(checked, check.Equals, trial.shouldRun)\n\t}\n}\n\nfunc (s *OIDCLoginSuite) TestGenericOIDCLogin(c *check.C) {\n\ts.cluster.Login.Google.Enable = false\n\ts.cluster.Login.OpenIDConnect.Enable = true\n\tjson.Unmarshal([]byte(fmt.Sprintf(\"%q\", s.fakeProvider.Issuer.URL)), &s.cluster.Login.OpenIDConnect.Issuer)\n\ts.cluster.Login.OpenIDConnect.ClientID = \"oidc#client#id\"\n\ts.cluster.Login.OpenIDConnect.ClientSecret = \"oidc#client#secret\"\n\ts.cluster.Login.OpenIDConnect.AuthenticationRequestParameters = map[string]string{\"testkey\": \"testvalue\"}\n\ts.fakeProvider.ValidClientID = \"oidc#client#id\"\n\ts.fakeProvider.ValidClientSecret = \"oidc#client#secret\"\n\tfor _, trial := range []struct {\n\t\texpectEmail string // \"\" if failure expected\n\t\tsetup       func()\n\t}{\n\t\t{\n\t\t\texpectEmail: \"user@oidc.example.com\",\n\t\t\tsetup: func() {\n\t\t\t\tc.Log(\"=== succeed because email_verified is false but not required\")\n\t\t\t\ts.fakeProvider.AuthEmail = \"user@oidc.example.com\"\n\t\t\t\ts.fakeProvider.AuthEmailVerified = false\n\t\t\t\ts.cluster.Login.OpenIDConnect.EmailClaim = \"email\"\n\t\t\t\ts.cluster.Login.OpenIDConnect.EmailVerifiedClaim = \"\"\n\t\t\t\ts.cluster.Login.OpenIDConnect.UsernameClaim = \"\"\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\texpectEmail: \"\",\n\t\t\tsetup: func() {\n\t\t\t\tc.Log(\"=== fail because email_verified is false and required\")\n\t\t\t\ts.fakeProvider.AuthEmail = \"user@oidc.example.com\"\n\t\t\t\ts.fakeProvider.AuthEmailVerified = false\n\t\t\t\ts.cluster.Login.OpenIDConnect.EmailClaim = \"email\"\n\t\t\t\ts.cluster.Login.OpenIDConnect.EmailVerifiedClaim = \"email_verified\"\n\t\t\t\ts.cluster.Login.OpenIDConnect.UsernameClaim = \"\"\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\texpectEmail: \"user@oidc.example.com\",\n\t\t\tsetup: func() {\n\t\t\t\tc.Log(\"=== succeed because email_verified is false but config uses custom 'verified' claim\")\n\t\t\t\ts.fakeProvider.AuthEmail = \"user@oidc.example.com\"\n\t\t\t\ts.fakeProvider.AuthEmailVerified = false\n\t\t\t\ts.cluster.Login.OpenIDConnect.EmailClaim = \"email\"\n\t\t\t\ts.cluster.Login.OpenIDConnect.EmailVerifiedClaim = \"alt_verified\"\n\t\t\t\ts.cluster.Login.OpenIDConnect.UsernameClaim = \"\"\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\texpectEmail: \"alt_email@example.com\",\n\t\t\tsetup: func() {\n\t\t\t\tc.Log(\"=== succeed with custom 'email' and 'email_verified' claims\")\n\t\t\t\ts.fakeProvider.AuthEmail = \"bad@wrong.example.com\"\n\t\t\t\ts.fakeProvider.AuthEmailVerified = false\n\t\t\t\ts.cluster.Login.OpenIDConnect.EmailClaim = \"alt_email\"\n\t\t\t\ts.cluster.Login.OpenIDConnect.EmailVerifiedClaim = \"alt_verified\"\n\t\t\t\ts.cluster.Login.OpenIDConnect.UsernameClaim = \"alt_username\"\n\t\t\t},\n\t\t},\n\t} {\n\t\ttrial.setup()\n\t\tif s.railsSpy != nil {\n\t\t\ts.railsSpy.Close()\n\t\t}\n\t\ts.railsSpy = arvadostest.NewProxy(c, s.cluster.Services.RailsAPI)\n\t\ts.localdb = NewConn(context.Background(), s.cluster, (&ctrlctx.DBConnector{PostgreSQL: s.cluster.PostgreSQL}).GetDB)\n\t\t*s.localdb.railsProxy = *rpc.NewConn(s.cluster.ClusterID, s.railsSpy.URL, true, rpc.PassthroughTokenProvider)\n\n\t\tstate := s.startLogin(c, func(form url.Values) {\n\t\t\tc.Check(form.Get(\"testkey\"), check.Equals, \"testvalue\")\n\t\t})\n\t\tresp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{\n\t\t\tCode:  s.fakeProvider.ValidCode,\n\t\t\tState: state,\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t\tif trial.expectEmail == \"\" {\n\t\t\tc.Check(resp.HTML.String(), check.Matches, `(?ms).*Login error.*`)\n\t\t\tc.Check(resp.RedirectLocation, check.Equals, \"\")\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(resp.HTML.String(), check.Equals, \"\")\n\t\ttarget, err := url.Parse(resp.RedirectLocation)\n\t\tc.Assert(err, check.IsNil)\n\t\ttoken := target.Query().Get(\"api_token\")\n\t\tc.Check(token, check.Matches, `v2/zzzzz-gj3su-.{15}/.{32,50}`)\n\t\tauthinfo := getCallbackAuthInfo(c, s.railsSpy)\n\t\tc.Check(authinfo.Email, check.Equals, trial.expectEmail)\n\n\t\tswitch s.cluster.Login.OpenIDConnect.UsernameClaim {\n\t\tcase \"alt_username\":\n\t\t\tc.Check(authinfo.Username, check.Equals, \"desired-username\")\n\t\tcase \"\":\n\t\t\tc.Check(authinfo.Username, check.Equals, \"\")\n\t\tdefault:\n\t\t\tc.Fail() // bad test case\n\t\t}\n\t}\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogin_Success(c *check.C) {\n\ts.cluster.Login.Google.AuthenticationRequestParameters[\"prompt\"] = \"consent\"\n\ts.cluster.Login.Google.AuthenticationRequestParameters[\"foo\"] = \"bar\"\n\tstate := s.startLogin(c, func(form url.Values) {\n\t\tc.Check(form.Get(\"foo\"), check.Equals, \"bar\")\n\t\tc.Check(form.Get(\"prompt\"), check.Equals, \"consent\")\n\t})\n\tresp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{\n\t\tCode:  s.fakeProvider.ValidCode,\n\t\tState: state,\n\t})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.HTML.String(), check.Equals, \"\")\n\ttarget, err := url.Parse(resp.RedirectLocation)\n\tc.Check(err, check.IsNil)\n\tc.Check(target.Host, check.Equals, \"app.example.com\")\n\tc.Check(target.Path, check.Equals, \"/foo\")\n\ttoken := target.Query().Get(\"api_token\")\n\tc.Check(token, check.Matches, `v2/zzzzz-gj3su-.{15}/.{32,50}`)\n\n\tauthinfo := getCallbackAuthInfo(c, s.railsSpy)\n\tc.Check(authinfo.FirstName, check.Equals, \"Fake\")\n\tc.Check(authinfo.LastName, check.Equals, \"User Name\")\n\tc.Check(authinfo.Email, check.Equals, \"active-user@arvados.local\")\n\tc.Check(authinfo.AlternateEmails, check.HasLen, 0)\n\n\t// Try using the returned Arvados token.\n\tc.Logf(\"trying an API call with new token %q\", token)\n\tctx := ctrlctx.NewWithToken(s.ctx, s.cluster, token)\n\tcl, err := s.localdb.CollectionList(ctx, arvados.ListOptions{Limit: -1})\n\tc.Check(cl.ItemsAvailable, check.Not(check.Equals), 0)\n\tc.Check(cl.Items, check.Not(check.HasLen), 0)\n\tc.Check(err, check.IsNil)\n\n\t// Might as well check that bogus tokens aren't accepted.\n\tbadtoken := token + \"plussomeboguschars\"\n\tc.Logf(\"trying an API call with mangled token %q\", badtoken)\n\tctx = ctrlctx.NewWithToken(s.ctx, s.cluster, badtoken)\n\tcl, err = s.localdb.CollectionList(ctx, arvados.ListOptions{Limit: -1})\n\tc.Check(cl.Items, check.HasLen, 0)\n\tc.Check(err, check.NotNil)\n\tc.Check(err, check.ErrorMatches, `.*401 Unauthorized: Not logged in.*`)\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogin_RealName(c *check.C) {\n\ts.fakeProvider.AuthEmail = \"joe.smith@primary.example.com\"\n\ts.fakeProvider.AuthEmailVerified = true\n\ts.fakeProvider.PeopleAPIResponse = map[string]interface{}{\n\t\t\"names\": []map[string]interface{}{\n\t\t\t{\n\t\t\t\t\"metadata\":   map[string]interface{}{\"primary\": false},\n\t\t\t\t\"givenName\":  \"Joe\",\n\t\t\t\t\"familyName\": \"Smith\",\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"metadata\":   map[string]interface{}{\"primary\": true},\n\t\t\t\t\"givenName\":  \"Joseph\",\n\t\t\t\t\"familyName\": \"Psmith\",\n\t\t\t},\n\t\t},\n\t}\n\tstate := s.startLogin(c)\n\ts.localdb.Login(context.Background(), arvados.LoginOptions{\n\t\tCode:  s.fakeProvider.ValidCode,\n\t\tState: state,\n\t})\n\n\tauthinfo := getCallbackAuthInfo(c, s.railsSpy)\n\tc.Check(authinfo.FirstName, check.Equals, \"Joseph\")\n\tc.Check(authinfo.LastName, check.Equals, \"Psmith\")\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogin_OIDCNameWithoutGivenAndFamilyNames(c *check.C) {\n\ts.fakeProvider.AuthName = \"Joe P. Smith\"\n\ts.fakeProvider.AuthGivenName = \"\"\n\ts.fakeProvider.AuthFamilyName = \"\"\n\ts.fakeProvider.AuthEmail = \"joe.smith@primary.example.com\"\n\tstate := s.startLogin(c)\n\ts.localdb.Login(context.Background(), arvados.LoginOptions{\n\t\tCode:  s.fakeProvider.ValidCode,\n\t\tState: state,\n\t})\n\n\tauthinfo := getCallbackAuthInfo(c, s.railsSpy)\n\tc.Check(authinfo.FirstName, check.Equals, \"Joe P.\")\n\tc.Check(authinfo.LastName, check.Equals, \"Smith\")\n}\n\n// People API returns some additional email addresses.\nfunc (s *OIDCLoginSuite) TestGoogleLogin_AlternateEmailAddresses(c *check.C) {\n\ts.fakeProvider.AuthEmail = \"joe.smith@primary.example.com\"\n\ts.fakeProvider.PeopleAPIResponse = map[string]interface{}{\n\t\t\"emailAddresses\": []map[string]interface{}{\n\t\t\t{\n\t\t\t\t\"metadata\": map[string]interface{}{\"verified\": true},\n\t\t\t\t\"value\":    \"joe.smith@work.example.com\",\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"value\": \"joe.smith@unverified.example.com\", // unverified, so this one will be ignored\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"metadata\": map[string]interface{}{\"verified\": true},\n\t\t\t\t\"value\":    \"joe.smith@home.example.com\",\n\t\t\t},\n\t\t},\n\t}\n\tstate := s.startLogin(c)\n\ts.localdb.Login(context.Background(), arvados.LoginOptions{\n\t\tCode:  s.fakeProvider.ValidCode,\n\t\tState: state,\n\t})\n\n\tauthinfo := getCallbackAuthInfo(c, s.railsSpy)\n\tc.Check(authinfo.Email, check.Equals, \"joe.smith@primary.example.com\")\n\tc.Check(authinfo.AlternateEmails, check.DeepEquals, []string{\"joe.smith@home.example.com\", \"joe.smith@work.example.com\"})\n}\n\n// Primary address is not the one initially returned by oidc.\nfunc (s *OIDCLoginSuite) TestGoogleLogin_AlternateEmailAddresses_Primary(c *check.C) {\n\ts.fakeProvider.AuthEmail = \"joe.smith@alternate.example.com\"\n\ts.fakeProvider.PeopleAPIResponse = map[string]interface{}{\n\t\t\"emailAddresses\": []map[string]interface{}{\n\t\t\t{\n\t\t\t\t\"metadata\": map[string]interface{}{\"verified\": true, \"primary\": true},\n\t\t\t\t\"value\":    \"joe.smith@primary.example.com\",\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"metadata\": map[string]interface{}{\"verified\": true},\n\t\t\t\t\"value\":    \"joe.smith@alternate.example.com\",\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"metadata\": map[string]interface{}{\"verified\": true},\n\t\t\t\t\"value\":    \"jsmith+123@preferdomainforusername.example.com\",\n\t\t\t},\n\t\t},\n\t}\n\tstate := s.startLogin(c)\n\ts.localdb.Login(context.Background(), arvados.LoginOptions{\n\t\tCode:  s.fakeProvider.ValidCode,\n\t\tState: state,\n\t})\n\tauthinfo := getCallbackAuthInfo(c, s.railsSpy)\n\tc.Check(authinfo.Email, check.Equals, \"joe.smith@primary.example.com\")\n\tc.Check(authinfo.AlternateEmails, check.DeepEquals, []string{\"joe.smith@alternate.example.com\", \"jsmith+123@preferdomainforusername.example.com\"})\n\tc.Check(authinfo.Username, check.Equals, \"jsmith\")\n}\n\nfunc (s *OIDCLoginSuite) TestGoogleLogin_NoPrimaryEmailAddress(c *check.C) {\n\ts.fakeProvider.AuthEmail = \"joe.smith@unverified.example.com\"\n\ts.fakeProvider.AuthEmailVerified = false\n\ts.fakeProvider.PeopleAPIResponse = map[string]interface{}{\n\t\t\"emailAddresses\": []map[string]interface{}{\n\t\t\t{\n\t\t\t\t\"metadata\": map[string]interface{}{\"verified\": true},\n\t\t\t\t\"value\":    \"joe.smith@work.example.com\",\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"metadata\": map[string]interface{}{\"verified\": true},\n\t\t\t\t\"value\":    \"joe.smith@home.example.com\",\n\t\t\t},\n\t\t},\n\t}\n\tstate := s.startLogin(c)\n\ts.localdb.Login(context.Background(), arvados.LoginOptions{\n\t\tCode:  s.fakeProvider.ValidCode,\n\t\tState: state,\n\t})\n\n\tauthinfo := getCallbackAuthInfo(c, s.railsSpy)\n\tc.Check(authinfo.Email, check.Equals, \"joe.smith@work.example.com\") // first verified email in People response\n\tc.Check(authinfo.AlternateEmails, check.DeepEquals, []string{\"joe.smith@home.example.com\"})\n\tc.Check(authinfo.Username, check.Equals, \"\")\n}\n\nfunc (s *OIDCLoginSuite) startLogin(c *check.C, checks ...func(url.Values)) (state string) {\n\t// Initiate login, but instead of following the redirect to\n\t// the provider, just grab state from the redirect URL.\n\tresp, err := s.localdb.Login(context.Background(), arvados.LoginOptions{ReturnTo: \"https://app.example.com/foo?bar\"})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.HTML.String(), check.Not(check.Matches), `(?ms).*error:.*`)\n\ttarget, err := url.Parse(resp.RedirectLocation)\n\tc.Check(err, check.IsNil)\n\tstate = target.Query().Get(\"state\")\n\tif !c.Check(state, check.Not(check.Equals), \"\") {\n\t\tc.Logf(\"Redirect target: %q\", target)\n\t\tc.Logf(\"HTML: %q\", resp.HTML)\n\t}\n\tfor _, fn := range checks {\n\t\tfn(target.Query())\n\t}\n\ts.cluster.Login.OpenIDConnect.AuthenticationRequestParameters = map[string]string{\"testkey\": \"testvalue\"}\n\treturn\n}\n\nfunc (s *OIDCLoginSuite) TestValidateLoginRedirectTarget(c *check.C) {\n\tfor _, trial := range []struct {\n\t\tpermit       bool\n\t\ttrustPrivate bool\n\t\turl          string\n\t}{\n\t\t// wb1, wb2 => accept\n\t\t{true, false, s.cluster.Services.Workbench1.ExternalURL.String()},\n\t\t{true, false, s.cluster.Services.Workbench2.ExternalURL.String()},\n\t\t// explicitly listed host => accept\n\t\t{true, false, \"https://app.example.com/\"},\n\t\t{true, false, \"https://app.example.com:443/foo?bar=baz\"},\n\t\t// non-listed hostname => deny (regardless of TrustPrivateNetworks)\n\t\t{false, false, \"https://bad.example/\"},\n\t\t{false, true, \"https://bad.example/\"},\n\t\t// non-listed non-private IP addr => deny (regardless of TrustPrivateNetworks)\n\t\t{false, false, \"https://1.2.3.4/\"},\n\t\t{false, true, \"https://1.2.3.4/\"},\n\t\t{false, true, \"https://[ab::cd]:1234/\"},\n\t\t// localhost or non-listed private IP addr => accept only if TrustPrivateNetworks is set\n\t\t{false, false, \"https://localhost/\"},\n\t\t{true, true, \"https://localhost/\"},\n\t\t{false, false, \"https://10.9.8.7:80/foo\"},\n\t\t{true, true, \"https://10.9.8.7:80/foo\"},\n\t\t{false, false, \"https://[::1]:80/foo\"},\n\t\t{true, true, \"https://[::1]:80/foo\"},\n\t\t{true, true, \"http://192.168.1.1/\"},\n\t\t{true, true, \"http://172.17.2.0/\"},\n\t\t// bad url => deny\n\t\t{false, true, \"https://10.1.1.1:blorp/foo\"},        // non-numeric port\n\t\t{false, true, \"https://app.example.com:blorp/foo\"}, // non-numeric port\n\t\t{false, true, \"https://[10.9.8.7]:80/foo\"},\n\t\t{false, true, \"https://]:443\"},\n\t\t{false, true, \"https://\"},\n\t\t{false, true, \"https:\"},\n\t\t{false, true, \"\"},\n\t\t// explicitly listed host but different port, protocol, or user/pass => deny\n\t\t{false, true, \"http://app.example.com/\"},\n\t\t{false, true, \"http://app.example.com:443/\"},\n\t\t{false, true, \"https://app.example.com:80/\"},\n\t\t{false, true, \"https://app.example.com:4433/\"},\n\t\t{false, true, \"https://u:p@app.example.com:443/foo?bar=baz\"},\n\t} {\n\t\tc.Logf(\"trial %+v\", trial)\n\t\ts.cluster.Login.TrustPrivateNetworks = trial.trustPrivate\n\t\terr := validateLoginRedirectTarget(s.cluster, trial.url)\n\t\tc.Check(err == nil, check.Equals, trial.permit)\n\t}\n\n}\n\nfunc getCallbackAuthInfo(c *check.C, railsSpy *arvadostest.Proxy) (authinfo rpc.UserSessionAuthInfo) {\n\tfor _, dump := range railsSpy.RequestDumps {\n\t\tc.Logf(\"spied request: %q\", dump)\n\t\tsplit := bytes.Split(dump, []byte(\"\\r\\n\\r\\n\"))\n\t\tc.Assert(split, check.HasLen, 2)\n\t\thdr, body := string(split[0]), string(split[1])\n\t\tif strings.Contains(hdr, \"POST /auth/controller/callback\") {\n\t\t\tvs, err := url.ParseQuery(body)\n\t\t\tc.Check(json.Unmarshal([]byte(vs.Get(\"auth_info\")), &authinfo), check.IsNil)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tsort.Strings(authinfo.AlternateEmails)\n\t\t\treturn\n\t\t}\n\t}\n\tc.Error(\"callback not found\")\n\treturn\n}\n"
  },
  {
    "path": "lib/controller/localdb/login_pam.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n//go:build !static\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/msteinert/pam\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype pamLoginController struct {\n\tCluster *arvados.Cluster\n\tParent  *Conn\n}\n\nfunc (ctrl *pamLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\treturn logout(ctx, ctrl.Cluster, opts)\n}\n\nfunc (ctrl *pamLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {\n\treturn arvados.LoginResponse{}, errors.New(\"interactive login is not available\")\n}\n\nfunc (ctrl *pamLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {\n\terrorMessage := \"\"\n\tsentPassword := false\n\ttx, err := pam.StartFunc(ctrl.Cluster.Login.PAM.Service, opts.Username, func(style pam.Style, message string) (string, error) {\n\t\tctxlog.FromContext(ctx).Debugf(\"pam conversation: style=%v message=%q\", style, message)\n\t\tswitch style {\n\t\tcase pam.ErrorMsg:\n\t\t\tctxlog.FromContext(ctx).WithField(\"Message\", message).Info(\"pam.ErrorMsg\")\n\t\t\terrorMessage = message\n\t\t\treturn \"\", nil\n\t\tcase pam.TextInfo:\n\t\t\tctxlog.FromContext(ctx).WithField(\"Message\", message).Info(\"pam.TextInfo\")\n\t\t\treturn \"\", nil\n\t\tcase pam.PromptEchoOn, pam.PromptEchoOff:\n\t\t\tsentPassword = true\n\t\t\treturn opts.Password, nil\n\t\tdefault:\n\t\t\treturn \"\", fmt.Errorf(\"unrecognized message style %d\", style)\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn arvados.APIClientAuthorization{}, err\n\t}\n\t// Check that the given credentials are valid.\n\terr = tx.Authenticate(pam.DisallowNullAuthtok)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"PAM: %s\", err)\n\t\tif errorMessage != \"\" {\n\t\t\t// Perhaps the error message in the\n\t\t\t// conversation is helpful.\n\t\t\terr = fmt.Errorf(\"%s; %q\", err, errorMessage)\n\t\t}\n\t\tif sentPassword {\n\t\t\terr = fmt.Errorf(\"%s (with username %q and password)\", err, opts.Username)\n\t\t} else {\n\t\t\t// This might hint that the username was\n\t\t\t// invalid.\n\t\t\terr = fmt.Errorf(\"%s (with username %q; password was never requested by PAM service)\", err, opts.Username)\n\t\t}\n\t\treturn arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(err, http.StatusUnauthorized)\n\t}\n\tif errorMessage != \"\" {\n\t\treturn arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(errors.New(errorMessage), http.StatusUnauthorized)\n\t}\n\t// Check that the account/user is permitted to access this host.\n\terr = tx.AcctMgmt(pam.DisallowNullAuthtok)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"PAM: %s\", err)\n\t\tif errorMessage != \"\" {\n\t\t\terr = fmt.Errorf(\"%s; %q\", err, errorMessage)\n\t\t}\n\t\treturn arvados.APIClientAuthorization{}, httpserver.ErrorWithStatus(err, http.StatusUnauthorized)\n\t}\n\tuser, err := tx.GetItem(pam.User)\n\tif err != nil {\n\t\treturn arvados.APIClientAuthorization{}, err\n\t}\n\temail := user\n\tif domain := ctrl.Cluster.Login.PAM.DefaultEmailDomain; domain != \"\" && !strings.Contains(email, \"@\") {\n\t\temail = email + \"@\" + domain\n\t}\n\tctxlog.FromContext(ctx).WithFields(logrus.Fields{\n\t\t\"user\":  user,\n\t\t\"email\": email,\n\t}).Debug(\"pam authentication succeeded\")\n\treturn ctrl.Parent.CreateAPIClientAuthorization(ctx, ctrl.Cluster.SystemRootToken, rpc.UserSessionAuthInfo{\n\t\tUsername: user,\n\t\tEmail:    email,\n\t})\n}\n"
  },
  {
    "path": "lib/controller/localdb/login_pam_static.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n//go:build static\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"errors\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\ntype pamLoginController struct {\n\tCluster *arvados.Cluster\n\tParent  *Conn\n}\n\nfunc (ctrl *pamLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\treturn logout(ctx, ctrl.Cluster, opts)\n}\n\nfunc (ctrl *pamLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {\n\treturn arvados.LoginResponse{}, errors.New(\"interactive login is not available\")\n}\n\nfunc (ctrl *pamLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {\n\treturn arvados.APIClientAuthorization{}, errors.New(\"support not available due to static compilation\")\n}\n"
  },
  {
    "path": "lib/controller/localdb/login_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"encoding/json\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&loginSuite{})\n\ntype loginSuite struct{}\n\nfunc (s *loginSuite) TestValidateLoginRedirectTarget(c *check.C) {\n\tvar cluster arvados.Cluster\n\tfor _, trial := range []struct {\n\t\tpass    bool\n\t\twb1     string\n\t\twb2     string\n\t\ttrusted string\n\t\ttarget  string\n\t}{\n\t\t{true, \"https://wb1.example/\", \"https://wb2.example/\", \"\", \"https://wb2.example/\"},\n\t\t{true, \"https://wb1.example:443/\", \"https://wb2.example:443/\", \"\", \"https://wb2.example/\"},\n\t\t{true, \"https://wb1.example:443/\", \"https://wb2.example:443/\", \"\", \"https://wb2.example\"},\n\t\t{true, \"https://wb1.example:443\", \"https://wb2.example:443\", \"\", \"https://wb2.example/\"},\n\t\t{true, \"http://wb1.example:80/\", \"http://wb2.example:80/\", \"\", \"http://wb2.example/\"},\n\t\t{false, \"https://wb1.example:80/\", \"https://wb2.example:80/\", \"\", \"https://wb2.example/\"},\n\t\t{false, \"https://wb1.example:1234/\", \"https://wb2.example:1234/\", \"\", \"https://wb2.example/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"\", \"https://bad.wb2.example/\"},\n\t\t{true, \"https://wb1.example/\", \"https://wb2.example/\", \"https://good.wb2.example/\", \"https://good.wb2.example\"},\n\t\t{true, \"https://wb1.example/\", \"https://wb2.example/\", \"https://good.wb2.example:443/\", \"https://good.wb2.example\"},\n\t\t{true, \"https://wb1.example/\", \"https://wb2.example/\", \"https://good.wb2.example:443\", \"https://good.wb2.example/\"},\n\n\t\t{true, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"https://ok.wildcard.example/\"},\n\t\t{true, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"https://ok.ok.wildcard.example/\"},\n\t\t{true, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example:443\", \"https://ok.wildcard.example/\"},\n\t\t{true, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"https://ok.wildcard.example:443/\"},\n\t\t{true, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example:443\", \"https://ok.wildcard.example:443/\"},\n\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"http://wildcard.example/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"http://.wildcard.example/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"http://wrongscheme.wildcard.example/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"http://wrongscheme.wildcard.example:443/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"https://wrongport.wildcard.example:80/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"https://notmatching-wildcard.example/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"http://notmatching.wildcard.example/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example:443\", \"https://attacker.example/ok.wildcard.example/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"https://attacker.example/ok.wildcard.example/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"https://attacker.example/?https://ok.wildcard.example/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"https://attacker.example/#https://ok.wildcard.example/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*-wildcard.example\", \"https://notsupported-wildcard.example/\"},\n\t\t{false, \"https://wb1.example/\", \"https://wb2.example/\", \"https://*.wildcard.example\", \"https://[notipv6.wildcard.example]:443/\"},\n\t} {\n\t\tc.Logf(\"trial %+v\", trial)\n\t\t// We use json.Unmarshal() to load the test strings\n\t\t// because we're testing behavior when the config file\n\t\t// contains string X.\n\t\terr := json.Unmarshal([]byte(`\"`+trial.wb1+`\"`), &cluster.Services.Workbench1.ExternalURL)\n\t\tc.Assert(err, check.IsNil)\n\t\terr = json.Unmarshal([]byte(`\"`+trial.wb2+`\"`), &cluster.Services.Workbench2.ExternalURL)\n\t\tc.Assert(err, check.IsNil)\n\t\tif trial.trusted != \"\" {\n\t\t\terr = json.Unmarshal([]byte(`{\"`+trial.trusted+`\": {}}`), &cluster.Login.TrustedClients)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t}\n\t\terr = validateLoginRedirectTarget(&cluster, trial.target)\n\t\tc.Check(err == nil, check.Equals, trial.pass)\n\t}\n}\n"
  },
  {
    "path": "lib/controller/localdb/login_testuser.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"html/template\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype testLoginController struct {\n\tCluster *arvados.Cluster\n\tParent  *Conn\n}\n\nfunc (ctrl *testLoginController) Logout(ctx context.Context, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\treturn logout(ctx, ctrl.Cluster, opts)\n}\n\nfunc (ctrl *testLoginController) Login(ctx context.Context, opts arvados.LoginOptions) (arvados.LoginResponse, error) {\n\ttmpl, err := template.New(\"form\").Parse(loginform)\n\tif err != nil {\n\t\treturn arvados.LoginResponse{}, err\n\t}\n\tvar buf bytes.Buffer\n\terr = tmpl.Execute(&buf, opts)\n\tif err != nil {\n\t\treturn arvados.LoginResponse{}, err\n\t}\n\treturn arvados.LoginResponse{HTML: buf}, nil\n}\n\nfunc (ctrl *testLoginController) UserAuthenticate(ctx context.Context, opts arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {\n\tfor username, user := range ctrl.Cluster.Login.Test.Users {\n\t\tif (opts.Username == username || opts.Username == user.Email) && opts.Password == user.Password {\n\t\t\tctxlog.FromContext(ctx).WithFields(logrus.Fields{\n\t\t\t\t\"username\": username,\n\t\t\t\t\"email\":    user.Email,\n\t\t\t}).Debug(\"test authentication succeeded\")\n\t\t\treturn ctrl.Parent.CreateAPIClientAuthorization(ctx, ctrl.Cluster.SystemRootToken, rpc.UserSessionAuthInfo{\n\t\t\t\tUsername: username,\n\t\t\t\tEmail:    user.Email,\n\t\t\t})\n\t\t}\n\t}\n\treturn arvados.APIClientAuthorization{}, fmt.Errorf(\"authentication failed for user %q with password len=%d\", opts.Username, len(opts.Password))\n}\n\nconst loginform = `\n<!doctype html>\n<html>\n  <head><title>Arvados test login</title>\n    <script>\n      async function authenticate(event) {\n        event.preventDefault()\n\tdocument.getElementById('error').innerHTML = ''\n\tconst resp = await fetch('/arvados/v1/users/authenticate', {\n\t  method: 'POST',\n\t  mode: 'same-origin',\n\t  headers: {'Content-Type': 'application/json'},\n\t  body: JSON.stringify({\n\t    username: document.getElementById('username').value,\n\t    password: document.getElementById('password').value,\n\t  }),\n\t})\n\tif (!resp.ok) {\n\t  document.getElementById('error').innerHTML = '<p>Authentication failed.</p><p>The \"test login\" users are defined in Clusters.[ClusterID].Login.Test.Users section of config.yml</p>'\n\t  return\n\t}\n\tvar redir = document.getElementById('return_to').value\n\tif (redir.indexOf('?') > 0) {\n\t  redir += '&'\n\t} else {\n\t  redir += '?'\n\t}\n        const respj = await resp.json()\n\tdocument.location = redir + \"api_token=v2/\" + respj.uuid + \"/\" + respj.api_token\n      }\n    </script>\n  </head>\n  <body>\n    <h3>Arvados test login</h3>\n    <form method=\"POST\">\n      <input id=\"return_to\" type=\"hidden\" name=\"return_to\" value=\"{{.ReturnTo}}\">\n      username <input id=\"username\" type=\"text\" name=\"username\" autofocus size=16>\n      password <input id=\"password\" type=\"password\" name=\"password\" size=16>\n      <input type=\"submit\" value=\"Log in\">\n      <br>\n      <p id=\"error\"></p>\n    </form>\n  </body>\n  <script>\n    document.getElementsByTagName('form')[0].onsubmit = authenticate\n  </script>\n</html>\n`\n"
  },
  {
    "path": "lib/controller/localdb/login_testuser_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"database/sql\"\n\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&TestUserSuite{})\n\ntype TestUserSuite struct {\n\tlocaldbSuite\n}\n\nfunc (s *TestUserSuite) SetUpTest(c *check.C) {\n\ts.localdbSuite.SetUpTest(c)\n\ts.cluster.Login.Test.Enable = true\n\ts.cluster.Login.Test.Users = map[string]arvados.TestUser{\n\t\t\"valid\": {Email: \"valid@example.com\", Password: \"v@l1d\"},\n\t}\n\ts.localdb.loginController = &testLoginController{\n\t\tCluster: s.cluster,\n\t\tParent:  s.localdb,\n\t}\n}\n\nfunc (s *TestUserSuite) TestLogin(c *check.C) {\n\tfor _, trial := range []struct {\n\t\tsuccess  bool\n\t\tusername string\n\t\tpassword string\n\t}{\n\t\t{false, \"foo\", \"bar\"},\n\t\t{false, \"\", \"\"},\n\t\t{false, \"valid\", \"\"},\n\t\t{false, \"\", \"v@l1d\"},\n\t\t{true, \"valid\", \"v@l1d\"},\n\t\t{true, \"valid@example.com\", \"v@l1d\"},\n\t} {\n\t\tc.Logf(\"=== %#v\", trial)\n\t\tresp, err := s.localdb.UserAuthenticate(s.ctx, arvados.UserAuthenticateOptions{\n\t\t\tUsername: trial.username,\n\t\t\tPassword: trial.password,\n\t\t})\n\t\tif trial.success {\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tc.Check(resp.APIToken, check.Not(check.Equals), \"\")\n\t\t\tc.Check(resp.UUID, check.Matches, `zzzzz-gj3su-.*`)\n\t\t\tc.Check(resp.Scopes, check.DeepEquals, []string{\"all\"})\n\n\t\t\tauthinfo := getCallbackAuthInfo(c, s.railsSpy)\n\t\t\tc.Check(authinfo.Email, check.Equals, \"valid@example.com\")\n\t\t\tc.Check(authinfo.AlternateEmails, check.DeepEquals, []string(nil))\n\t\t} else {\n\t\t\tc.Check(err, check.ErrorMatches, `authentication failed.*`)\n\t\t}\n\t}\n}\n\nfunc (s *TestUserSuite) TestLoginForm(c *check.C) {\n\tresp, err := s.localdb.Login(s.ctx, arvados.LoginOptions{\n\t\tReturnTo: \"https://localhost:12345/example\",\n\t})\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.HTML.String(), check.Matches, `(?ms).*<form method=\"POST\".*`)\n\tc.Check(resp.HTML.String(), check.Matches, `(?ms).*<input id=\"return_to\" type=\"hidden\" name=\"return_to\" value=\"https://localhost:12345/example\">.*`)\n}\n\nfunc (s *TestUserSuite) TestExpireTokenOnLogout(c *check.C) {\n\ts.cluster.Login.TrustPrivateNetworks = true\n\treturnTo := \"https://[::1]:12345/logout\"\n\tfor _, trial := range []struct {\n\t\trequestToken      string\n\t\texpiringTokenUUID string\n\t\tshouldExpireToken bool\n\t}{\n\t\t// v2 token\n\t\t{arvadostest.ActiveTokenV2, arvadostest.ActiveTokenUUID, true},\n\t\t// v1 token\n\t\t{arvadostest.AdminToken, arvadostest.AdminTokenUUID, true},\n\t\t// inexistent v1 token -- logout shouldn't fail\n\t\t{\"thisdoesntexistasatoken\", \"\", false},\n\t\t// inexistent v2 token -- logout shouldn't fail\n\t\t{\"v2/some-fake-uuid/thisdoesntexistasatoken\", \"\", false},\n\t} {\n\t\tc.Logf(\"=== %#v\", trial)\n\t\tctx := ctrlctx.NewWithToken(s.ctx, s.cluster, trial.requestToken)\n\n\t\tvar tokenUUID string\n\t\tvar err error\n\t\tqry := `SELECT uuid FROM api_client_authorizations\n\t\t\tWHERE uuid=$1\n\t\t\t\tAND (least(expires_at, refreshes_at) IS NULL OR least(expires_at, refreshes_at) > current_timestamp AT TIME ZONE 'UTC')\n\t\t\tLIMIT 1`\n\n\t\tif trial.shouldExpireToken {\n\t\t\terr = s.tx.QueryRowContext(ctx, qry, trial.expiringTokenUUID).Scan(&tokenUUID)\n\t\t\tc.Check(err, check.IsNil)\n\t\t}\n\n\t\tresp, err := s.localdb.Logout(ctx, arvados.LogoutOptions{\n\t\t\tReturnTo: returnTo,\n\t\t})\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(resp.RedirectLocation, check.Equals, returnTo)\n\n\t\tif trial.shouldExpireToken {\n\t\t\terr = s.tx.QueryRowContext(ctx, qry, trial.expiringTokenUUID).Scan(&tokenUUID)\n\t\t\tc.Check(err, check.Equals, sql.ErrNoRows)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/controller/localdb/logout.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage localdb\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n)\n\nfunc logout(ctx context.Context, cluster *arvados.Cluster, opts arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\terr := expireAPIClientAuthorization(ctx)\n\tif err != nil {\n\t\tctxlog.FromContext(ctx).Errorf(\"attempting to expire token on logout: %q\", err)\n\t\treturn arvados.LogoutResponse{}, httpserver.ErrorWithStatus(errors.New(\"could not expire token on logout\"), http.StatusInternalServerError)\n\t}\n\n\ttarget := opts.ReturnTo\n\tif target == \"\" {\n\t\tif cluster.Services.Workbench2.ExternalURL.Host != \"\" {\n\t\t\ttarget = cluster.Services.Workbench2.ExternalURL.String()\n\t\t} else {\n\t\t\ttarget = cluster.Services.Workbench1.ExternalURL.String()\n\t\t}\n\t} else if err := validateLoginRedirectTarget(cluster, target); err != nil {\n\t\treturn arvados.LogoutResponse{}, httpserver.ErrorWithStatus(fmt.Errorf(\"invalid return_to parameter: %s\", err), http.StatusBadRequest)\n\t}\n\treturn arvados.LogoutResponse{RedirectLocation: target}, nil\n}\n\nfunc expireAPIClientAuthorization(ctx context.Context) error {\n\tcreds, ok := auth.FromContext(ctx)\n\tif !ok {\n\t\t// Tests could be passing empty contexts\n\t\tctxlog.FromContext(ctx).Debugf(\"expireAPIClientAuthorization: credentials not found from context\")\n\t\treturn nil\n\t}\n\n\tif len(creds.Tokens) == 0 {\n\t\t// Old client may not have provided the token to expire\n\t\treturn nil\n\t}\n\n\ttx, err := ctrlctx.CurrentTx(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttoken := creds.Tokens[0]\n\ttokenSecret := token\n\tvar tokenUuid string\n\tif strings.HasPrefix(token, \"v2/\") {\n\t\ttokenParts := strings.Split(token, \"/\")\n\t\tif len(tokenParts) >= 3 {\n\t\t\ttokenUuid = tokenParts[1]\n\t\t\ttokenSecret = tokenParts[2]\n\t\t}\n\t}\n\n\tvar retrievedUuid string\n\terr = tx.QueryRowContext(ctx, `\n\t\tSELECT uuid\n\t\tFROM api_client_authorizations\n\t\tWHERE api_token=$1\n\t\t\tAND (LEAST(expires_at, refreshes_at) IS NULL OR LEAST(expires_at, refreshes_at) > current_timestamp AT TIME ZONE 'UTC')\n\t\tLIMIT 1`, tokenSecret).Scan(&retrievedUuid)\n\tif err == sql.ErrNoRows {\n\t\tctxlog.FromContext(ctx).Debugf(\"expireAPIClientAuthorization(%s): not found in database\", token)\n\t\treturn nil\n\t} else if err != nil {\n\t\tctxlog.FromContext(ctx).WithError(err).Debugf(\"expireAPIClientAuthorization(%s): database error\", token)\n\t\treturn err\n\t}\n\n\tif tokenUuid != \"\" && retrievedUuid != tokenUuid {\n\t\t// secret part matches, but UUID doesn't -- somewhat surprising\n\t\tctxlog.FromContext(ctx).Debugf(\"expireAPIClientAuthorization(%s): secret part found, but with different UUID: %s\", tokenSecret, retrievedUuid)\n\t\treturn nil\n\t}\n\n\tres, err := tx.ExecContext(ctx, \"UPDATE api_client_authorizations SET expires_at=current_timestamp AT TIME ZONE 'UTC' WHERE uuid=$1\", retrievedUuid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trows, err := res.RowsAffected()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rows == 0 {\n\t\tctxlog.FromContext(ctx).Debugf(\"expireAPIClientAuthorization(%s): no rows were updated\", tokenSecret)\n\t\treturn fmt.Errorf(\"couldn't expire provided token\")\n\t} else if rows > 1 {\n\t\tctxlog.FromContext(ctx).Debugf(\"expireAPIClientAuthorization(%s): multiple (%d) rows updated\", tokenSecret, rows)\n\t} else {\n\t\tctxlog.FromContext(ctx).Debugf(\"expireAPIClientAuthorization(%s): ok\", tokenSecret)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "lib/controller/localdb/testdata/dsa.pub",
    "content": "ssh-dss AAAAB3NzaC1kc3MAAACBAIS5sFWjsFPK5yEa/TjXEEudJrBaFjQ6WvYLiJmh8AmCqWlC83ETv5gEFeIwJo8om8bat4n6l6IKkG4wDo7uxNN0lEWGnOBXatpWOcrJphb0PgYMstZnW7K5GBpTY52TDShx5OS5nvb9iJiQjd1/WQ63knmYoVZH3Ijhv6vDikL3AAAAFQDotNYD4D4IjS8BjJFk8qCGg1FWGQAAAIBlqZ/KwlJpJiekR2Yv+8k456kiFhPUasjeDqx+zGP//+0xNGx2yYzdkPlmvYrdG3YvRjA8KX5C+qJT9CfS1FMcY8/3cXWmDCxi3zKvaXjUcLk1nfVbhsPHdaebpSX3N+C6meehjoQIhYIgZghdPuWOgyGjwIavO9DYMlTGVhHRCgAAAIAjqJonYsmaSd3/0SoD2NGKBvRhngKcaTu63OLIY/V2kdg4Zrph7Ptx//S994rlhugLq68c0wnNoeq4vjVoRY8gDaCy8KXsk9Sq8THbxNseFeqa04txJJXe7g8/6nopfqrhi0NgpIyaNn/0BfqjWOErQuhzxhMqZ5if0aRi1k+g5A== tom@slab\n"
  },
  {
    "path": "lib/controller/localdb/testdata/ecdsa-sk.pub",
    "content": "sk-ecdsa-sha2-nistp256@openssh.com AAAAInNrLWVjZHNhLXNoYTItbmlzdHAyNTZAb3BlbnNzaC5jb20AAAAIbmlzdHAyNTYAAABBBFj1zodcmSKWeUgNxzDOv7m9TeLhNRb64wa9oQwQK4tFZzLQRgcsmaVQmMx/ZbY+ThZbHLHSpKRxaByINu99NKUAAAAEc3NoOg== tom@slab\n"
  },
  {
    "path": "lib/controller/localdb/testdata/ecdsa.pub",
    "content": "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDLajzRPnSI3FBChDvvNJyIBPdyA/nC7GWFWwizK93XL8HkQ5+X6D/xaqowq6iIPq/XHSdbZ3ebdb0OH81ovrCQ= tom@slab\n"
  },
  {
    "path": "lib/controller/localdb/testdata/ed25519-sk.pub",
    "content": "sk-ssh-ed25519@openssh.com AAAAGnNrLXNzaC1lZDI1NTE5QG9wZW5zc2guY29tAAAAIJMteBo9BvwQTeiBq4FvS4qJ83YjoCvKrH6EnvrOCILmAAAABHNzaDo= test key\n"
  },
  {
    "path": "lib/controller/localdb/testdata/ed25519.pub",
    "content": "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIElzlGk8QUevhJQ2mhf8p73lUAh044icWqssl3bMoCaT tom@slab\n"
  },
  {
    "path": "lib/controller/localdb/testdata/generate",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# This script uses ssh-keygen to generate an example public key for\n# each supported type, to be used by test cases. Private keys are\n# discarded. If ${keytype}.pub already exists, it is left alone.\n\nset -e\n\nerr=\nkeytypes=$(ssh-keygen -_ 2>&1 | grep -- -t | tr -d '[|]' | tr ' ' '\\n' | grep -vw t)\nfor keytype in ${keytypes[@]}; do\n    if [[ ! -e \"./${keytype}.pub\" ]]; then\n        if ssh-keygen -t \"${keytype}\" -f \"./${keytype}\" -N \"\"; then\n            # discard private key\n            rm \"./${keytype}\"\n        else\n            echo >&2 \"ssh-keygen -t ${keytype} failed\"\n            err=1\n        fi\n    fi\ndone\nexit $err\n"
  },
  {
    "path": "lib/controller/localdb/testdata/rsa.pub",
    "content": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCtlBJsNterzUR26k/3tbXi2LViRj0vPyyJ7msqyGtRjJKuMqZkVJz6GN42/+aESeHfJw9FNlwW4oMa3Z4BB5llvZSG8yhY1HXbBlK5sURjSo9tid/U+PlKPGqteiXTguXLj5PAwoAoQ4JnGKR/+YphWxuWy+VR4toLcuKG9pX5d6iwkmWU1/smUnF6+vq38Xrhv94EpeNmyTEPC6OijDdmcas3rwDGW/I2Vij/Bxdj9DY/tHLv9V+yznbV1YB9yxda0YeIGMa2d35dOIxBeWmXzAGczVNQeXE7ooFOH6zCyoJZ4HH/AhAZ9GHyNGsf72CM+WkTBUEYmBmRIDHtMXY32KxyreRWUU1l47md5gefkb4c57OI369AQed154SVQaoiiVqIXinXGGezmfa09nnaSelD54Hky71GC/qqMvzkv7pXkETB37hYC2z2NixXQ6pf21vRHZLAtA8LK9OB5yxdr9b5buMIdTLViKufr3pPk8bcJrlB7tilw5X/PUioWws= tom@slab\n"
  },
  {
    "path": "lib/controller/proxy.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n)\n\ntype proxy struct {\n\tName string // to use in Via header\n}\n\ntype HTTPError struct {\n\tMessage string\n\tCode    int\n}\n\nfunc (h HTTPError) Error() string {\n\treturn h.Message\n}\n\nvar dropHeaders = map[string]bool{\n\t// Headers that shouldn't be forwarded when proxying. See\n\t// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers\n\t\"Connection\":          true,\n\t\"Keep-Alive\":          true,\n\t\"Proxy-Authenticate\":  true,\n\t\"Proxy-Authorization\": true,\n\t// (comment/space here makes gofmt1.10 agree with gofmt1.11)\n\t\"TE\":      true,\n\t\"Trailer\": true,\n\t\"Upgrade\": true,\n\n\t// Headers that would interfere with Go's automatic\n\t// compression/decompression if we forwarded them.\n\t\"Accept-Encoding\":   true,\n\t\"Content-Encoding\":  true,\n\t\"Transfer-Encoding\": true,\n\n\t// Content-Length depends on encoding.\n\t\"Content-Length\": true,\n\n\t// Defend against Rails vulnerability CVE-2023-22795 -\n\t// we don't use this functionality anyway, so it costs us nothing.\n\t// <https://discuss.rubyonrails.org/t/cve-2023-22795-possible-redos-based-dos-vulnerability-in-action-dispatch/82118>\n\t\"If-None-Match\": true,\n}\n\ntype ResponseFilter func(*http.Response, error) (*http.Response, error)\n\n// Forward a request to upstream service, and return response or error.\nfunc (p *proxy) Do(\n\treqIn *http.Request,\n\turlOut *url.URL,\n\tclient *http.Client) (*http.Response, error) {\n\n\t// Copy headers from incoming request, then add/replace proxy\n\t// headers like Via and X-Forwarded-For.\n\thdrOut := http.Header{}\n\tfor k, v := range reqIn.Header {\n\t\tif !dropHeaders[k] {\n\t\t\thdrOut[k] = v\n\t\t}\n\t}\n\txff := \"\"\n\tfor _, xffIn := range reqIn.Header[\"X-Forwarded-For\"] {\n\t\tif xffIn != \"\" {\n\t\t\txff += xffIn + \",\"\n\t\t}\n\t}\n\txff += reqIn.RemoteAddr\n\thdrOut.Set(\"X-Forwarded-For\", xff)\n\tif hdrOut.Get(\"X-Forwarded-Proto\") == \"\" {\n\t\thdrOut.Set(\"X-Forwarded-Proto\", reqIn.URL.Scheme)\n\t}\n\thdrOut.Add(\"Via\", reqIn.Proto+\" arvados-controller\")\n\n\treqOut := (&http.Request{\n\t\tMethod: reqIn.Method,\n\t\tURL:    urlOut,\n\t\tHost:   reqIn.Host,\n\t\tHeader: hdrOut,\n\t\tBody:   reqIn.Body,\n\t}).WithContext(reqIn.Context())\n\treturn client.Do(reqOut)\n}\n\n// Copy a response (or error) to the downstream client\nfunc (p *proxy) ForwardResponse(w http.ResponseWriter, resp *http.Response, err error) (int64, error) {\n\tif err != nil {\n\t\tif he, ok := err.(HTTPError); ok {\n\t\t\thttpserver.Error(w, he.Message, he.Code)\n\t\t} else {\n\t\t\thttpserver.Error(w, err.Error(), http.StatusBadGateway)\n\t\t}\n\t\treturn 0, nil\n\t}\n\n\tdefer resp.Body.Close()\n\tfor k, v := range resp.Header {\n\t\tfor _, v := range v {\n\t\t\tw.Header().Add(k, v)\n\t\t}\n\t}\n\tw.WriteHeader(resp.StatusCode)\n\treturn io.Copy(w, resp.Body)\n}\n"
  },
  {
    "path": "lib/controller/rails_restart_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"bytes\"\n\t\"crypto/sha256\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&railsRestartSuite{})\n\ntype railsRestartSuite struct{}\n\n// This tests RailsAPI, not controller -- but tests RailsAPI's\n// integration with passenger, so it needs to run against the\n// run-tests.sh environment where RailsAPI runs under passenger, not\n// in the Rails test environment.\nfunc (s *railsRestartSuite) TestConfigReload(c *check.C) {\n\thc := http.Client{Transport: &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}}\n\n\tconfdata, err := os.ReadFile(os.Getenv(\"ARVADOS_CONFIG\"))\n\tc.Assert(err, check.IsNil)\n\toldhash := fmt.Sprintf(\"%x\", sha256.Sum256(confdata))\n\tc.Logf(\"oldhash %s\", oldhash)\n\n\tldr := config.NewLoader(&bytes.Buffer{}, ctxlog.TestLogger(c))\n\tcfg, err := ldr.Load()\n\tc.Assert(err, check.IsNil)\n\tcc, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\tvar metricsURL string\n\tfor u := range cc.Services.RailsAPI.InternalURLs {\n\t\tu := url.URL(u)\n\t\tmu, err := u.Parse(\"/metrics\")\n\t\tc.Assert(err, check.IsNil)\n\t\tmetricsURL = mu.String()\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, metricsURL, nil)\n\tc.Assert(err, check.IsNil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ManagementToken)\n\n\tresp, err := hc.Do(req)\n\tc.Assert(err, check.IsNil)\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tc.Assert(err, check.IsNil)\n\tc.Check(string(body), check.Matches, `(?ms).*`+oldhash+`.*`)\n\n\tf, err := os.OpenFile(os.Getenv(\"ARVADOS_CONFIG\"), os.O_WRONLY|os.O_APPEND, 0)\n\tc.Assert(err, check.IsNil)\n\t_, err = f.Write([]byte{'\\n'})\n\tc.Assert(err, check.IsNil)\n\terr = f.Close()\n\tc.Assert(err, check.IsNil)\n\tnewhash := fmt.Sprintf(\"%x\", sha256.Sum256(append(confdata, '\\n')))\n\tc.Logf(\"newhash %s\", newhash)\n\n\t// Wait for RailsAPI's 1 Hz reload_config thread to poll and\n\t// hit restart.txt\n\tpollstart := time.Now()\n\tfor deadline := time.Now().Add(20 * time.Second); time.Now().Before(deadline); time.Sleep(time.Second) {\n\t\tresp, err = hc.Do(req)\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer resp.Body.Close()\n\t\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\tc.Assert(err, check.IsNil)\n\t\tresp.Body.Close()\n\t\tif strings.Contains(string(body), newhash) {\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Logf(\"waited %s for rails to restart\", time.Now().Sub(pollstart))\n\tc.Check(string(body), check.Matches, `(?ms).*`+newhash+`.*`)\n}\n"
  },
  {
    "path": "lib/controller/railsproxy/railsproxy.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// Package railsproxy implements Arvados APIs by proxying to the\n// RailsAPI server on the local machine.\npackage railsproxy\n\nimport (\n\t\"fmt\"\n\t\"net/url\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// FindRailsAPI always uses the rails API running on this node, for now.\nfunc FindRailsAPI(cluster *arvados.Cluster) (*url.URL, bool, error) {\n\tvar best *url.URL\n\tfor target := range cluster.Services.RailsAPI.InternalURLs {\n\t\ttarget := url.URL(target)\n\t\tbest = &target\n\t\tif strings.HasPrefix(target.Host, \"localhost:\") || strings.HasPrefix(target.Host, \"127.0.0.1:\") || strings.HasPrefix(target.Host, \"[::1]:\") {\n\t\t\tbreak\n\t\t}\n\t}\n\tif best == nil {\n\t\treturn nil, false, fmt.Errorf(\"Services.RailsAPI.InternalURLs is empty\")\n\t}\n\treturn best, cluster.TLS.Insecure, nil\n}\n\nfunc NewConn(cluster *arvados.Cluster) *rpc.Conn {\n\turl, insecure, err := FindRailsAPI(cluster)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn rpc.NewConn(cluster.ClusterID, url, insecure, rpc.PassthroughTokenProvider)\n}\n"
  },
  {
    "path": "lib/controller/router/checker_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage router\n\nimport (\n\t\"reflect\"\n\t\"runtime\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// a Gocheck checker for testing the name of a function. Used with\n// (*arvadostest.APIStub).Calls() to check that an HTTP request has\n// been routed to the correct arvados.API method.\n//\n//\tc.Check(bytes.NewBuffer().Read, isMethodNamed, \"Read\")\nvar isMethodNamed check.Checker = &chkIsMethodNamed{\n\tCheckerInfo: &check.CheckerInfo{\n\t\tName:   \"isMethodNamed\",\n\t\tParams: []string{\"obtained\", \"expected\"},\n\t},\n}\n\ntype chkIsMethodNamed struct{ *check.CheckerInfo }\n\nfunc (*chkIsMethodNamed) Check(params []interface{}, names []string) (bool, string) {\n\tmethodName := runtime.FuncForPC(reflect.ValueOf(params[0]).Pointer()).Name()\n\tregex := `.*\\)\\.` + params[1].(string) + `(-.*)?`\n\treturn check.Matches.Check([]interface{}{methodName, regex}, names)\n}\n"
  },
  {
    "path": "lib/controller/router/error.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage router\n\ntype errorWithStatus struct {\n\tcode int\n\terror\n}\n\nfunc (err errorWithStatus) HTTPStatus() int {\n\treturn err.code\n}\n\nfunc httpError(code int, err error) error {\n\treturn errorWithStatus{code: code, error: err}\n}\n"
  },
  {
    "path": "lib/controller/router/request.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage router\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"mime\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/gorilla/mux\"\n)\n\nfunc guessAndParse(k, v string) (interface{}, error) {\n\t// All of these form values arrive as strings, so we need some\n\t// type-guessing to accept non-string inputs:\n\t//\n\t// Values for parameters that take ints (limit=1) or bools\n\t// (include_trash=1) are parsed accordingly.\n\t//\n\t// \"null\" and \"\" are nil.\n\t//\n\t// Values that look like JSON objects, arrays, or strings are\n\t// parsed as JSON.\n\t//\n\t// The rest are left as strings.\n\tswitch {\n\tcase intParams[k]:\n\t\treturn strconv.ParseInt(v, 10, 64)\n\tcase boolParams[k]:\n\t\treturn stringToBool(v), nil\n\tcase v == \"null\" || v == \"\":\n\t\treturn nil, nil\n\tcase strings.HasPrefix(v, \"[\"):\n\t\tvar j []interface{}\n\t\terr := json.Unmarshal([]byte(v), &j)\n\t\treturn j, err\n\tcase strings.HasPrefix(v, \"{\"):\n\t\tvar j map[string]interface{}\n\t\terr := json.Unmarshal([]byte(v), &j)\n\t\treturn j, err\n\tcase strings.HasPrefix(v, \"\\\"\"):\n\t\tvar j string\n\t\terr := json.Unmarshal([]byte(v), &j)\n\t\treturn j, err\n\tdefault:\n\t\treturn v, nil\n\t}\n\t// TODO: Need to accept \"?foo[]=bar&foo[]=baz\" as\n\t// foo=[\"bar\",\"baz\"]?\n}\n\n// Return a map of incoming HTTP request parameters. Also load\n// parameters into opts, unless opts is nil.\n//\n// If the request has a parameter whose name is attrsKey (e.g.,\n// \"collection\"), it is renamed to \"attrs\".\nfunc (rtr *router) loadRequestParams(req *http.Request, attrsKey string, opts interface{}) (map[string]interface{}, error) {\n\t// Here we call ParseForm and ParseMultipartForm explicitly\n\t// (even though ParseMultipartForm calls ParseForm if\n\t// necessary) to ensure we catch errors encountered in\n\t// ParseForm. In the non-multipart-form case,\n\t// ParseMultipartForm returns ErrNotMultipart and hides the\n\t// ParseForm error.\n\terr := req.ParseForm()\n\tif err == nil {\n\t\terr = req.ParseMultipartForm(int64(rtr.config.MaxRequestSize))\n\t\tif err == http.ErrNotMultipart {\n\t\t\terr = nil\n\t\t}\n\t}\n\tif err != nil {\n\t\tif err.Error() == \"http: request body too large\" {\n\t\t\treturn nil, httpError(http.StatusRequestEntityTooLarge, err)\n\t\t} else {\n\t\t\treturn nil, httpError(http.StatusBadRequest, err)\n\t\t}\n\t}\n\tparams := map[string]interface{}{}\n\n\t// Load parameters from req.Form, which (after\n\t// req.ParseForm()) includes the query string and -- when\n\t// Content-Type is application/x-www-form-urlencoded -- the\n\t// request body.\n\tfor k, values := range req.Form {\n\t\tfor _, v := range values {\n\t\t\tparams[k], err = guessAndParse(k, v)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\t// Decode body as JSON if Content-Type request header is\n\t// missing or application/json.\n\tmt := req.Header.Get(\"Content-Type\")\n\tif ct, _, err := mime.ParseMediaType(mt); err != nil && mt != \"\" {\n\t\treturn nil, fmt.Errorf(\"error parsing media type %q: %s\", mt, err)\n\t} else if (ct == \"application/json\" || mt == \"\") && req.ContentLength != 0 {\n\t\tjsonParams := map[string]interface{}{}\n\t\terr := json.NewDecoder(req.Body).Decode(&jsonParams)\n\t\tif err != nil {\n\t\t\treturn nil, httpError(http.StatusBadRequest, err)\n\t\t}\n\t\tfor k, v := range jsonParams {\n\t\t\tswitch v := v.(type) {\n\t\t\tcase string:\n\t\t\t\t// The Ruby \"arv\" cli tool sends a\n\t\t\t\t// JSON-encode params map with\n\t\t\t\t// JSON-encoded values.\n\t\t\t\tdec, err := guessAndParse(k, v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tjsonParams[k] = dec\n\t\t\t\tparams[k] = dec\n\t\t\tdefault:\n\t\t\t\tparams[k] = v\n\t\t\t}\n\t\t}\n\t\tif attrsKey != \"\" && params[attrsKey] == nil {\n\t\t\t// Copy top-level parameters from JSON request\n\t\t\t// body into params[attrsKey]. Some SDKs rely\n\t\t\t// on this Rails API feature; see\n\t\t\t// https://api.rubyonrails.org/v5.2.1/classes/ActionController/ParamsWrapper.html\n\t\t\tparams[attrsKey] = jsonParams\n\t\t}\n\t}\n\n\tfor k, v := range mux.Vars(req) {\n\t\tparams[k] = v\n\t}\n\n\tif v, ok := params[attrsKey]; ok && attrsKey != \"\" {\n\t\tparams[\"attrs\"] = v\n\t\tdelete(params, attrsKey)\n\t}\n\n\tfor _, paramname := range []string{\"include\", \"order\"} {\n\t\t// We must accept strings (\"foo, bar desc\") and arrays\n\t\t// ([\"foo\", \"bar desc\"]) because RailsAPI does.\n\t\t// Convert to an array here before trying to unmarshal\n\t\t// into options structs.\n\t\tif val, ok := params[paramname].(string); ok {\n\t\t\tif val == \"\" {\n\t\t\t\tdelete(params, paramname)\n\t\t\t} else {\n\t\t\t\tparams[paramname] = strings.Split(val, \",\")\n\t\t\t}\n\t\t}\n\t}\n\n\tif opts != nil {\n\t\t// Load all path, query, and form params into opts.\n\t\terr = rtr.transcode(params, opts)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"transcode: %w\", err)\n\t\t}\n\n\t\t// Special case: if opts has Method or Header fields, load the\n\t\t// request method/header.\n\t\terr = rtr.transcode(struct {\n\t\t\tMethod string\n\t\t\tHeader http.Header\n\t\t}{req.Method, req.Header}, opts)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"transcode: %w\", err)\n\t\t}\n\t}\n\n\treturn params, nil\n}\n\n// Copy src to dst, using json as an intermediate format in order to\n// invoke src's json-marshaling and dst's json-unmarshaling behaviors.\nfunc (rtr *router) transcode(src interface{}, dst interface{}) error {\n\tvar errw error\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\tdefer pw.Close()\n\t\terrw = json.NewEncoder(pw).Encode(src)\n\t}()\n\tdefer pr.Close()\n\terr := json.NewDecoder(pr).Decode(dst)\n\tif errw != nil {\n\t\treturn errw\n\t}\n\treturn err\n}\n\nvar intParams = map[string]bool{\n\t\"limit\":  true,\n\t\"offset\": true,\n}\n\nvar boolParams = map[string]bool{\n\t\"distinct\":                true,\n\t\"ensure_unique_name\":      true,\n\t\"include_trash\":           true,\n\t\"include_old_versions\":    true,\n\t\"redirect_to_new_user\":    true,\n\t\"send_notification_email\": true,\n\t\"bypass_federation\":       true,\n\t\"recursive\":               true,\n\t\"exclude_home_project\":    true,\n\t\"no_forward\":              true,\n}\n\nfunc stringToBool(s string) bool {\n\tswitch s {\n\tcase \"\", \"false\", \"0\":\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}\n"
  },
  {
    "path": "lib/controller/router/request_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage router\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"io\"\n\t\"mime/multipart\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\ntype testReq struct {\n\tmethod   string\n\tpath     string\n\ttoken    string // default is ActiveTokenV2; use noToken to omit\n\tparam    map[string]interface{}\n\tattrs    map[string]interface{}\n\tattrsKey string\n\theader   http.Header\n\n\t// variations on request formatting\n\tjson            bool\n\tjsonAttrsTop    bool\n\tjsonStringParam bool\n\ttokenInBody     bool\n\ttokenInQuery    bool\n\tnoContentType   bool\n\n\tbody        *bytes.Buffer // provided by caller\n\tbodyContent []byte        // set by (*testReq)Request() if body not provided by caller\n}\n\nconst noToken = \"(no token)\"\n\nfunc (tr *testReq) Request() *http.Request {\n\tparam := map[string]interface{}{}\n\tfor k, v := range tr.param {\n\t\tparam[k] = v\n\t}\n\n\tvar body *bytes.Buffer\n\tif tr.body != nil {\n\t\t// caller provided a buffer\n\t\tbody = tr.body\n\t} else if tr.json {\n\t\tif tr.jsonAttrsTop {\n\t\t\tfor k, v := range tr.attrs {\n\t\t\t\tif tr.jsonStringParam {\n\t\t\t\t\tj, err := json.Marshal(v)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t\tparam[k] = string(j)\n\t\t\t\t} else {\n\t\t\t\t\tparam[k] = v\n\t\t\t\t}\n\t\t\t}\n\t\t} else if tr.attrs != nil {\n\t\t\tif tr.jsonStringParam {\n\t\t\t\tj, err := json.Marshal(tr.attrs)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tparam[tr.attrsKey] = string(j)\n\t\t\t} else {\n\t\t\t\tparam[tr.attrsKey] = tr.attrs\n\t\t\t}\n\t\t}\n\t\tbody = bytes.NewBuffer(nil)\n\t\terr := json.NewEncoder(body).Encode(param)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttr.bodyContent = body.Bytes()\n\t} else {\n\t\tvalues := make(url.Values)\n\t\tfor k, v := range param {\n\t\t\tif vs, ok := v.(string); ok && !tr.jsonStringParam {\n\t\t\t\tvalues.Set(k, vs)\n\t\t\t} else {\n\t\t\t\tjv, err := json.Marshal(v)\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tvalues.Set(k, string(jv))\n\t\t\t}\n\t\t}\n\t\tif tr.attrs != nil {\n\t\t\tjattrs, err := json.Marshal(tr.attrs)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tvalues.Set(tr.attrsKey, string(jattrs))\n\t\t}\n\t\tbody = bytes.NewBuffer(nil)\n\t\tio.WriteString(body, values.Encode())\n\t\ttr.bodyContent = body.Bytes()\n\t}\n\tmethod := tr.method\n\tif method == \"\" {\n\t\tmethod = \"GET\"\n\t}\n\tpath := tr.path\n\tif path == \"\" {\n\t\tpath = \"example/test/path\"\n\t}\n\treq := httptest.NewRequest(method, \"https://an.example/\"+path, body)\n\ttoken := tr.token\n\tif token == \"\" {\n\t\ttoken = arvadostest.ActiveTokenV2\n\t}\n\tif token != noToken {\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+token)\n\t}\n\tif tr.json {\n\t\treq.Header.Set(\"Content-Type\", \"application/json\")\n\t} else if tr.header.Get(\"Content-Type\") == \"\" {\n\t\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t}\n\tfor k, v := range tr.header {\n\t\treq.Header[k] = append([]string(nil), v...)\n\t}\n\treturn req\n}\n\nfunc (s *RouterSuite) TestAttrsInBody(c *check.C) {\n\tattrs := map[string]interface{}{\"foo\": \"bar\"}\n\n\tmultipartBody := new(bytes.Buffer)\n\tmultipartWriter := multipart.NewWriter(multipartBody)\n\tmultipartWriter.WriteField(\"attrs\", `{\"foo\":\"bar\"}`)\n\tmultipartWriter.Close()\n\n\tfor _, tr := range []testReq{\n\t\t{attrsKey: \"model_name\", json: true, attrs: attrs},\n\t\t{attrsKey: \"model_name\", json: true, attrs: attrs, jsonAttrsTop: true},\n\t\t{attrsKey: \"model_name\", json: true, attrs: attrs, jsonAttrsTop: true, jsonStringParam: true},\n\t\t{attrsKey: \"model_name\", json: true, attrs: attrs, jsonAttrsTop: false, jsonStringParam: true},\n\t\t{body: multipartBody, header: http.Header{\"Content-Type\": []string{multipartWriter.FormDataContentType()}}},\n\t} {\n\t\tc.Logf(\"tr: %#v\", tr)\n\t\treq := tr.Request()\n\t\tvar opts struct{ Attrs struct{ Foo string } }\n\t\tparams, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)\n\t\tc.Logf(\"params: %#v\", params)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(params, check.NotNil)\n\t\tc.Check(opts.Attrs.Foo, check.Equals, \"bar\")\n\t\tif c.Check(params[\"attrs\"], check.FitsTypeOf, map[string]interface{}{}) {\n\t\t\tc.Check(params[\"attrs\"].(map[string]interface{})[\"foo\"], check.Equals, \"bar\")\n\t\t}\n\t}\n}\n\nfunc (s *RouterSuite) TestBoolParam(c *check.C) {\n\ttestKey := \"ensure_unique_name\"\n\n\tfor i, tr := range []testReq{\n\t\t{method: \"POST\", param: map[string]interface{}{testKey: false}, json: true},\n\t\t{method: \"POST\", param: map[string]interface{}{testKey: false}},\n\t\t{method: \"POST\", param: map[string]interface{}{testKey: \"false\"}},\n\t\t{method: \"POST\", param: map[string]interface{}{testKey: \"0\"}},\n\t\t{method: \"POST\", param: map[string]interface{}{testKey: \"\"}},\n\t} {\n\t\tc.Logf(\"#%d, tr: %#v\", i, tr)\n\t\treq := tr.Request()\n\t\tc.Logf(\"tr.body: %s\", tr.bodyContent)\n\t\tvar opts struct{ EnsureUniqueName bool }\n\t\tparams, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)\n\t\tc.Logf(\"params: %#v\", params)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(opts.EnsureUniqueName, check.Equals, false)\n\t\tif c.Check(params, check.NotNil) {\n\t\t\tc.Check(params[testKey], check.Equals, false)\n\t\t}\n\t}\n\n\tfor i, tr := range []testReq{\n\t\t{method: \"POST\", param: map[string]interface{}{testKey: true}, json: true},\n\t\t{method: \"POST\", param: map[string]interface{}{testKey: true}},\n\t\t{method: \"POST\", param: map[string]interface{}{testKey: \"true\"}},\n\t\t{method: \"POST\", param: map[string]interface{}{testKey: \"1\"}},\n\t} {\n\t\tc.Logf(\"#%d, tr: %#v\", i, tr)\n\t\treq := tr.Request()\n\t\tc.Logf(\"tr.body: %s\", tr.bodyContent)\n\t\tvar opts struct {\n\t\t\tEnsureUniqueName bool `json:\"ensure_unique_name\"`\n\t\t}\n\t\tparams, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)\n\t\tc.Logf(\"params: %#v\", params)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(opts.EnsureUniqueName, check.Equals, true)\n\t\tif c.Check(params, check.NotNil) {\n\t\t\tc.Check(params[testKey], check.Equals, true)\n\t\t}\n\t}\n}\n\nfunc (s *RouterSuite) TestStringOrArrayParam(c *check.C) {\n\tfor _, paramname := range []string{\"order\", \"include\"} {\n\t\tfor i, tr := range []testReq{\n\t\t\t{method: \"POST\", param: map[string]interface{}{paramname: \"\"}, json: true},\n\t\t\t{method: \"POST\", param: map[string]interface{}{paramname: \"\"}, json: false},\n\t\t\t{method: \"POST\", param: map[string]interface{}{paramname: []string{}}, json: true},\n\t\t\t{method: \"POST\", param: map[string]interface{}{paramname: []string{}}, json: false},\n\t\t\t{method: \"POST\", param: map[string]interface{}{}, json: true},\n\t\t\t{method: \"POST\", param: map[string]interface{}{}, json: false},\n\t\t} {\n\t\t\tc.Logf(\"%s #%d, tr: %#v\", paramname, i, tr)\n\t\t\treq := tr.Request()\n\t\t\tc.Logf(\"tr.body: %s\", tr.bodyContent)\n\t\t\tparams, err := s.rtr.loadRequestParams(req, tr.attrsKey, nil)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(params, check.NotNil)\n\t\t\tif order, ok := params[paramname]; ok && order != nil {\n\t\t\t\tc.Check(order, check.DeepEquals, []interface{}{})\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i, tr := range []testReq{\n\t\t{method: \"POST\", param: map[string]interface{}{\"order\": \"foo,bar desc\"}, json: true},\n\t\t{method: \"POST\", param: map[string]interface{}{\"order\": \"foo,bar desc\"}, json: false},\n\t\t{method: \"POST\", param: map[string]interface{}{\"order\": \"[\\\"foo\\\", \\\"bar desc\\\"]\"}, json: false},\n\t\t{method: \"POST\", param: map[string]interface{}{\"order\": []string{\"foo\", \"bar desc\"}}, json: true},\n\t\t{method: \"POST\", param: map[string]interface{}{\"order\": []string{\"foo\", \"bar desc\"}}, json: false},\n\t} {\n\t\tc.Logf(\"#%d, tr: %#v\", i, tr)\n\t\treq := tr.Request()\n\t\tc.Logf(\"tr.body: %s\", tr.bodyContent)\n\t\tvar opts arvados.ListOptions\n\t\tparams, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(opts.Order, check.DeepEquals, []string{\"foo\", \"bar desc\"})\n\t\tif _, ok := params[\"order\"].([]string); ok {\n\t\t\tc.Check(params[\"order\"], check.DeepEquals, []string{\"foo\", \"bar desc\"})\n\t\t} else {\n\t\t\tc.Check(params[\"order\"], check.DeepEquals, []interface{}{\"foo\", \"bar desc\"})\n\t\t}\n\t}\n\n\tfor i, tr := range []testReq{\n\t\t{method: \"POST\", param: map[string]interface{}{\"include\": \"container_uuid,owner_uuid\"}, json: true},\n\t\t{method: \"POST\", param: map[string]interface{}{\"include\": \"container_uuid,owner_uuid\"}, json: false},\n\t\t{method: \"POST\", param: map[string]interface{}{\"include\": \"[\\\"container_uuid\\\", \\\"owner_uuid\\\"]\"}, json: false},\n\t\t{method: \"POST\", param: map[string]interface{}{\"include\": []string{\"container_uuid\", \"owner_uuid\"}}, json: true},\n\t\t{method: \"POST\", param: map[string]interface{}{\"include\": []string{\"container_uuid\", \"owner_uuid\"}}, json: false},\n\t} {\n\t\tc.Logf(\"#%d, tr: %#v\", i, tr)\n\t\t{\n\t\t\treq := tr.Request()\n\t\t\tc.Logf(\"tr.body: %s\", tr.bodyContent)\n\t\t\tvar opts arvados.ListOptions\n\t\t\tparams, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Check(opts.Include, check.DeepEquals, []string{\"container_uuid\", \"owner_uuid\"})\n\t\t\tif _, ok := params[\"include\"].([]string); ok {\n\t\t\t\tc.Check(params[\"include\"], check.DeepEquals, []string{\"container_uuid\", \"owner_uuid\"})\n\t\t\t} else {\n\t\t\t\tc.Check(params[\"include\"], check.DeepEquals, []interface{}{\"container_uuid\", \"owner_uuid\"})\n\t\t\t}\n\t\t}\n\t\t{\n\t\t\treq := tr.Request()\n\t\t\tc.Logf(\"tr.body: %s\", tr.bodyContent)\n\t\t\tvar opts arvados.GroupContentsOptions\n\t\t\tparams, err := s.rtr.loadRequestParams(req, tr.attrsKey, &opts)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Check(opts.Include, check.DeepEquals, []string{\"container_uuid\", \"owner_uuid\"})\n\t\t\tif _, ok := params[\"include\"].([]string); ok {\n\t\t\t\tc.Check(params[\"include\"], check.DeepEquals, []string{\"container_uuid\", \"owner_uuid\"})\n\t\t\t} else {\n\t\t\t\tc.Check(params[\"include\"], check.DeepEquals, []interface{}{\"container_uuid\", \"owner_uuid\"})\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/controller/router/response.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage router\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n)\n\nconst rfc3339NanoFixed = \"2006-01-02T15:04:05.000000000Z07:00\"\n\ntype responseOptions struct {\n\tSelect []string\n\tCount  string\n}\n\nfunc (rtr *router) responseOptions(opts interface{}) (responseOptions, error) {\n\tvar rOpts responseOptions\n\tswitch opts := opts.(type) {\n\tcase *arvados.CreateOptions:\n\t\trOpts.Select = opts.Select\n\tcase *arvados.UpdateOptions:\n\t\trOpts.Select = opts.Select\n\tcase *arvados.GetOptions:\n\t\trOpts.Select = opts.Select\n\tcase *arvados.ListOptions:\n\t\trOpts.Select = opts.Select\n\t\trOpts.Count = opts.Count\n\tcase *arvados.GroupContentsOptions:\n\t\trOpts.Select = opts.Select\n\t\trOpts.Count = opts.Count\n\t}\n\treturn rOpts, nil\n}\n\nfunc applySelectParam(selectParam []string, orig map[string]interface{}) map[string]interface{} {\n\tif len(selectParam) == 0 {\n\t\treturn orig\n\t}\n\tselected := map[string]interface{}{}\n\tfor _, attr := range selectParam {\n\t\tif v, ok := orig[attr]; ok {\n\t\t\tselected[attr] = v\n\t\t}\n\t}\n\t// Some keys are always preserved, even if not requested\n\tfor _, k := range []string{\"etag\", \"kind\", \"writable_by\"} {\n\t\tif v, ok := orig[k]; ok {\n\t\t\tselected[k] = v\n\t\t}\n\t}\n\treturn selected\n}\n\nfunc (rtr *router) sendResponse(w http.ResponseWriter, req *http.Request, resp interface{}, opts responseOptions) {\n\tvar tmp map[string]interface{}\n\n\tif resp, ok := resp.(http.Handler); ok {\n\t\t// resp knows how to write its own http response\n\t\t// header and body.\n\t\tresp.ServeHTTP(w, req)\n\t\treturn\n\t}\n\n\terr := rtr.transcode(resp, &tmp)\n\tif err != nil {\n\t\trtr.sendError(w, err)\n\t\treturn\n\t}\n\n\trespKind := kind(resp)\n\tif respKind != \"\" {\n\t\ttmp[\"kind\"] = respKind\n\t}\n\tif included, ok := tmp[\"included\"]; ok && included == nil {\n\t\ttmp[\"included\"] = make([]interface{}, 0)\n\t}\n\tdefaultItemKind := \"\"\n\tif strings.HasSuffix(respKind, \"List\") {\n\t\tdefaultItemKind = strings.TrimSuffix(respKind, \"List\")\n\t}\n\n\tif _, isListResponse := tmp[\"items\"].([]interface{}); isListResponse {\n\t\titems, _ := tmp[\"items\"].([]interface{})\n\t\tincluded, _ := tmp[\"included\"].([]interface{})\n\t\tfor _, slice := range [][]interface{}{items, included} {\n\t\t\tfor i, item := range slice {\n\t\t\t\t// Fill in \"kind\" by inspecting UUID/PDH if\n\t\t\t\t// possible; fall back on assuming each\n\t\t\t\t// Items[] entry in an \"arvados#fooList\"\n\t\t\t\t// response should have kind=\"arvados#foo\".\n\t\t\t\titem, _ := item.(map[string]interface{})\n\t\t\t\tinfix := \"\"\n\t\t\t\tif uuid, _ := item[\"uuid\"].(string); len(uuid) == 27 {\n\t\t\t\t\tinfix = uuid[6:11]\n\t\t\t\t}\n\t\t\t\tif k := kind(infixMap[infix]); k != \"\" {\n\t\t\t\t\titem[\"kind\"] = k\n\t\t\t\t} else if pdh, _ := item[\"portable_data_hash\"].(string); pdh != \"\" {\n\t\t\t\t\titem[\"kind\"] = \"arvados#collection\"\n\t\t\t\t} else if defaultItemKind != \"\" {\n\t\t\t\t\titem[\"kind\"] = defaultItemKind\n\t\t\t\t}\n\t\t\t\titem = applySelectParam(opts.Select, item)\n\t\t\t\trtr.mungeItemFields(item)\n\t\t\t\tslice[i] = item\n\t\t\t}\n\t\t}\n\t\tif opts.Count == \"none\" || req.URL.Path == \"/arvados/v1/computed_permissions\" {\n\t\t\tdelete(tmp, \"items_available\")\n\t\t}\n\t} else {\n\t\ttmp = applySelectParam(opts.Select, tmp)\n\t\trtr.mungeItemFields(tmp)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tenc := json.NewEncoder(w)\n\tenc.SetEscapeHTML(false)\n\tenc.Encode(tmp)\n}\n\nfunc (rtr *router) sendError(w http.ResponseWriter, err error) {\n\tcode := http.StatusInternalServerError\n\tif err, ok := err.(interface{ HTTPStatus() int }); ok {\n\t\tcode = err.HTTPStatus()\n\t}\n\thttpserver.Error(w, err.Error(), code)\n}\n\nvar infixMap = map[string]interface{}{\n\t\"2x53u\": arvados.VirtualMachine{},\n\t\"4zz18\": arvados.Collection{},\n\t\"57u5n\": arvados.Link{},\n\t\"7fd4e\": arvados.Workflow{},\n\t\"bi6l4\": arvados.KeepService{},\n\t\"dz642\": arvados.Container{},\n\t\"fngyi\": arvados.AuthorizedKey{},\n\t\"gj3su\": arvados.APIClientAuthorization{},\n\t\"j7d0g\": arvados.Group{},\n\t\"o0j2j\": arvados.Link{},\n\t\"tpzed\": arvados.User{},\n\t\"xvhdp\": arvados.ContainerRequest{},\n}\n\nvar specialKindTransforms = map[string]string{\n\t\"arvados.APIClientAuthorization\":     \"arvados#apiClientAuthorization\",\n\t\"arvados.APIClientAuthorizationList\": \"arvados#apiClientAuthorizationList\",\n}\n\nvar mungeKind = regexp.MustCompile(`\\..`)\n\nfunc kind(resp interface{}) string {\n\tt := fmt.Sprintf(\"%T\", resp)\n\tif !strings.HasPrefix(t, \"arvados.\") {\n\t\treturn \"\"\n\t}\n\tif k, ok := specialKindTransforms[t]; ok {\n\t\treturn k\n\t}\n\treturn mungeKind.ReplaceAllStringFunc(t, func(s string) string {\n\t\t// \"arvados.CollectionList\" => \"arvados#collectionList\"\n\t\treturn \"#\" + strings.ToLower(s[1:])\n\t})\n}\n\nfunc (rtr *router) mungeItemFields(tmp map[string]interface{}) {\n\tfor k, v := range tmp {\n\t\tif strings.HasSuffix(k, \"_at\") {\n\t\t\t// Format non-nil timestamps as\n\t\t\t// rfc3339NanoFixed (otherwise they would use\n\t\t\t// the default time encoding, which omits\n\t\t\t// trailing zeroes).\n\t\t\tswitch tv := v.(type) {\n\t\t\tcase *time.Time:\n\t\t\t\tif tv == nil || tv.IsZero() {\n\t\t\t\t\ttmp[k] = nil\n\t\t\t\t} else {\n\t\t\t\t\ttmp[k] = tv.Format(rfc3339NanoFixed)\n\t\t\t\t}\n\t\t\tcase time.Time:\n\t\t\t\tif tv.IsZero() {\n\t\t\t\t\ttmp[k] = nil\n\t\t\t\t} else {\n\t\t\t\t\ttmp[k] = tv.Format(rfc3339NanoFixed)\n\t\t\t\t}\n\t\t\tcase string:\n\t\t\t\tif tv == \"\" {\n\t\t\t\t\ttmp[k] = nil\n\t\t\t\t} else if t, err := time.Parse(time.RFC3339Nano, tv); err != nil {\n\t\t\t\t\t// pass through an invalid time value (?)\n\t\t\t\t} else if t.IsZero() {\n\t\t\t\t\ttmp[k] = nil\n\t\t\t\t} else {\n\t\t\t\t\ttmp[k] = t.Format(rfc3339NanoFixed)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Arvados API spec says when these fields are empty\n\t\t// they appear in responses as null, rather than a\n\t\t// zero value.\n\t\tswitch k {\n\t\tcase \"output_uuid\", \"output_name\", \"log_uuid\", \"description\", \"requesting_container_uuid\", \"container_uuid\", \"modified_by_client_uuid\", \"frozen_by_uuid\":\n\t\t\tif v == \"\" {\n\t\t\t\ttmp[k] = nil\n\t\t\t}\n\t\tcase \"container_count_max\":\n\t\t\tif v == float64(0) {\n\t\t\t\ttmp[k] = nil\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/controller/router/router.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage router\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/api\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/gorilla/mux\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype router struct {\n\tmux     *mux.Router\n\tbackend arvados.API\n\tconfig  Config\n}\n\ntype Config struct {\n\t// Services.ContainerWebServices section from cluster config.\n\tContainerWebServices *arvados.ServiceWithPortRange\n\n\t// Return an error if request body exceeds this size. 0 means\n\t// unlimited.\n\tMaxRequestSize int\n\n\t// If wrapCalls is not nil, it is called once for each API\n\t// method, and the returned method is used in its place. This\n\t// can be used to install hooks before and after each API call\n\t// and alter responses; see localdb.WrapCallsInTransaction for\n\t// an example.\n\tWrapCalls func(api.RoutableFunc) api.RoutableFunc\n}\n\n// New returns a new router (which implements the http.Handler\n// interface) that serves requests by calling Arvados API methods on\n// the given backend.\nfunc New(backend arvados.API, config Config) *router {\n\trtr := &router{\n\t\tmux:     mux.NewRouter(),\n\t\tbackend: backend,\n\t\tconfig:  config,\n\t}\n\trtr.addRoutes()\n\treturn rtr\n}\n\nfunc (rtr *router) addRoutes() {\n\tfor _, route := range []struct {\n\t\tendpoint    arvados.APIEndpoint\n\t\tdefaultOpts func() interface{}\n\t\texec        api.RoutableFunc\n\t}{\n\t\t{\n\t\t\tarvados.EndpointConfigGet,\n\t\t\tfunc() interface{} { return &struct{}{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ConfigGet(ctx)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointVocabularyGet,\n\t\t\tfunc() interface{} { return &struct{}{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.VocabularyGet(ctx)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLogin,\n\t\t\tfunc() interface{} { return &arvados.LoginOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.Login(ctx, *opts.(*arvados.LoginOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLogout,\n\t\t\tfunc() interface{} { return &arvados.LogoutOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.Logout(ctx, *opts.(*arvados.LogoutOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointAuthorizedKeyCreate,\n\t\t\tfunc() interface{} { return &arvados.CreateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.AuthorizedKeyCreate(ctx, *opts.(*arvados.CreateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointAuthorizedKeyUpdate,\n\t\t\tfunc() interface{} { return &arvados.UpdateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.AuthorizedKeyUpdate(ctx, *opts.(*arvados.UpdateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointAuthorizedKeyGet,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.AuthorizedKeyGet(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointAuthorizedKeyList,\n\t\t\tfunc() interface{} { return &arvados.ListOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.AuthorizedKeyList(ctx, *opts.(*arvados.ListOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointAuthorizedKeyDelete,\n\t\t\tfunc() interface{} { return &arvados.DeleteOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.AuthorizedKeyDelete(ctx, *opts.(*arvados.DeleteOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointCollectionCreate,\n\t\t\tfunc() interface{} { return &arvados.CreateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.CollectionCreate(ctx, *opts.(*arvados.CreateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointCollectionUpdate,\n\t\t\tfunc() interface{} { return &arvados.UpdateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.CollectionUpdate(ctx, *opts.(*arvados.UpdateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointCollectionGet,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.CollectionGet(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointCollectionList,\n\t\t\tfunc() interface{} { return &arvados.ListOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.CollectionList(ctx, *opts.(*arvados.ListOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointCollectionProvenance,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.CollectionProvenance(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointCollectionUsedBy,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.CollectionUsedBy(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointCollectionDelete,\n\t\t\tfunc() interface{} { return &arvados.DeleteOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.CollectionDelete(ctx, *opts.(*arvados.DeleteOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointCollectionTrash,\n\t\t\tfunc() interface{} { return &arvados.DeleteOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.CollectionTrash(ctx, *opts.(*arvados.DeleteOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointCollectionUntrash,\n\t\t\tfunc() interface{} { return &arvados.UntrashOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.CollectionUntrash(ctx, *opts.(*arvados.UntrashOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointComputedPermissionList,\n\t\t\tfunc() interface{} { return &arvados.ListOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ComputedPermissionList(ctx, *opts.(*arvados.ListOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerCreate,\n\t\t\tfunc() interface{} { return &arvados.CreateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerCreate(ctx, *opts.(*arvados.CreateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerPriorityUpdate,\n\t\t\tfunc() interface{} { return &arvados.UpdateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerPriorityUpdate(ctx, *opts.(*arvados.UpdateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerUpdate,\n\t\t\tfunc() interface{} { return &arvados.UpdateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerUpdate(ctx, *opts.(*arvados.UpdateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerGet,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerGet(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerList,\n\t\t\tfunc() interface{} { return &arvados.ListOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerList(ctx, *opts.(*arvados.ListOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerDelete,\n\t\t\tfunc() interface{} { return &arvados.DeleteOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerDelete(ctx, *opts.(*arvados.DeleteOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerLock,\n\t\t\tfunc() interface{} {\n\t\t\t\treturn &arvados.GetOptions{Select: []string{\"uuid\", \"state\", \"priority\", \"auth_uuid\", \"locked_by_uuid\"}}\n\t\t\t},\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerLock(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerUnlock,\n\t\t\tfunc() interface{} {\n\t\t\t\treturn &arvados.GetOptions{Select: []string{\"uuid\", \"state\", \"priority\", \"auth_uuid\", \"locked_by_uuid\"}}\n\t\t\t},\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerUnlock(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerSSH,\n\t\t\tfunc() interface{} { return &arvados.ContainerSSHOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerSSH(ctx, *opts.(*arvados.ContainerSSHOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerSSHCompat,\n\t\t\tfunc() interface{} { return &arvados.ContainerSSHOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerSSH(ctx, *opts.(*arvados.ContainerSSHOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// arvados-client built before commit\n\t\t\t// bdc29d3129f6d75aa9ce0a24ffb849a272b06f08\n\t\t\t// used GET with params in headers instead of\n\t\t\t// POST form\n\t\t\tarvados.APIEndpoint{\"GET\", \"arvados/v1/connect/{uuid}/ssh\", \"\"},\n\t\t\tfunc() interface{} { return &arvados.ContainerSSHOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn nil, httpError(http.StatusGone, fmt.Errorf(\"API endpoint is obsolete -- please upgrade your arvados-client program\"))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerGatewayTunnel,\n\t\t\tfunc() interface{} { return &arvados.ContainerGatewayTunnelOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerGatewayTunnel(ctx, *opts.(*arvados.ContainerGatewayTunnelOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerGatewayTunnelCompat,\n\t\t\tfunc() interface{} { return &arvados.ContainerGatewayTunnelOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerGatewayTunnel(ctx, *opts.(*arvados.ContainerGatewayTunnelOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerRequestCreate,\n\t\t\tfunc() interface{} { return &arvados.CreateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerRequestCreate(ctx, *opts.(*arvados.CreateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerRequestUpdate,\n\t\t\tfunc() interface{} { return &arvados.UpdateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerRequestUpdate(ctx, *opts.(*arvados.UpdateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerRequestGet,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerRequestGet(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerRequestList,\n\t\t\tfunc() interface{} { return &arvados.ListOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerRequestList(ctx, *opts.(*arvados.ListOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerRequestDelete,\n\t\t\tfunc() interface{} { return &arvados.DeleteOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerRequestDelete(ctx, *opts.(*arvados.DeleteOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerRequestContainerStatus,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerRequestContainerStatus(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointContainerRequestLog,\n\t\t\tfunc() interface{} { return &arvados.ContainerLogOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.ContainerRequestLog(ctx, *opts.(*arvados.ContainerLogOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointGroupCreate,\n\t\t\tfunc() interface{} { return &arvados.CreateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.GroupCreate(ctx, *opts.(*arvados.CreateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointGroupUpdate,\n\t\t\tfunc() interface{} { return &arvados.UpdateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.GroupUpdate(ctx, *opts.(*arvados.UpdateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointGroupList,\n\t\t\tfunc() interface{} { return &arvados.ListOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.GroupList(ctx, *opts.(*arvados.ListOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointGroupContents,\n\t\t\tfunc() interface{} { return &arvados.GroupContentsOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.GroupContents(ctx, *opts.(*arvados.GroupContentsOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointGroupContentsUUIDInPath,\n\t\t\tfunc() interface{} { return &arvados.GroupContentsOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.GroupContents(ctx, *opts.(*arvados.GroupContentsOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointGroupShared,\n\t\t\tfunc() interface{} { return &arvados.ListOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.GroupShared(ctx, *opts.(*arvados.ListOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointGroupGet,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.GroupGet(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointGroupDelete,\n\t\t\tfunc() interface{} { return &arvados.DeleteOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.GroupDelete(ctx, *opts.(*arvados.DeleteOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointGroupTrash,\n\t\t\tfunc() interface{} { return &arvados.DeleteOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.GroupTrash(ctx, *opts.(*arvados.DeleteOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointGroupUntrash,\n\t\t\tfunc() interface{} { return &arvados.UntrashOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.GroupUntrash(ctx, *opts.(*arvados.UntrashOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLinkCreate,\n\t\t\tfunc() interface{} { return &arvados.CreateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.LinkCreate(ctx, *opts.(*arvados.CreateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLinkUpdate,\n\t\t\tfunc() interface{} { return &arvados.UpdateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.LinkUpdate(ctx, *opts.(*arvados.UpdateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLinkList,\n\t\t\tfunc() interface{} { return &arvados.ListOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.LinkList(ctx, *opts.(*arvados.ListOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLinkGet,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.LinkGet(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLinkDelete,\n\t\t\tfunc() interface{} { return &arvados.DeleteOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.LinkDelete(ctx, *opts.(*arvados.DeleteOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLogCreate,\n\t\t\tfunc() interface{} { return &arvados.CreateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.LogCreate(ctx, *opts.(*arvados.CreateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLogUpdate,\n\t\t\tfunc() interface{} { return &arvados.UpdateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.LogUpdate(ctx, *opts.(*arvados.UpdateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLogList,\n\t\t\tfunc() interface{} { return &arvados.ListOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.LogList(ctx, *opts.(*arvados.ListOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLogGet,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.LogGet(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointLogDelete,\n\t\t\tfunc() interface{} { return &arvados.DeleteOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.LogDelete(ctx, *opts.(*arvados.DeleteOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointAPIClientAuthorizationCreate,\n\t\t\tfunc() interface{} { return &arvados.CreateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.APIClientAuthorizationCreate(ctx, *opts.(*arvados.CreateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointAPIClientAuthorizationUpdate,\n\t\t\tfunc() interface{} { return &arvados.UpdateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.APIClientAuthorizationUpdate(ctx, *opts.(*arvados.UpdateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointAPIClientAuthorizationDelete,\n\t\t\tfunc() interface{} { return &arvados.DeleteOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.APIClientAuthorizationDelete(ctx, *opts.(*arvados.DeleteOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointAPIClientAuthorizationList,\n\t\t\tfunc() interface{} { return &arvados.ListOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.APIClientAuthorizationList(ctx, *opts.(*arvados.ListOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointAPIClientAuthorizationCurrent,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.APIClientAuthorizationCurrent(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointAPIClientAuthorizationGet,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.APIClientAuthorizationGet(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserCreate,\n\t\t\tfunc() interface{} { return &arvados.CreateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserCreate(ctx, *opts.(*arvados.CreateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserMerge,\n\t\t\tfunc() interface{} { return &arvados.UserMergeOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserMerge(ctx, *opts.(*arvados.UserMergeOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserActivate,\n\t\t\tfunc() interface{} { return &arvados.UserActivateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserActivate(ctx, *opts.(*arvados.UserActivateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserSetup,\n\t\t\tfunc() interface{} { return &arvados.UserSetupOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserSetup(ctx, *opts.(*arvados.UserSetupOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserUnsetup,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserUnsetup(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserGetCurrent,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserGetCurrent(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserGetSystem,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserGetSystem(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserGet,\n\t\t\tfunc() interface{} { return &arvados.GetOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserGet(ctx, *opts.(*arvados.GetOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserUpdate,\n\t\t\tfunc() interface{} { return &arvados.UpdateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserUpdate(ctx, *opts.(*arvados.UpdateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserList,\n\t\t\tfunc() interface{} { return &arvados.ListOptions{Limit: -1} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserList(ctx, *opts.(*arvados.ListOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserBatchUpdate,\n\t\t\tfunc() interface{} { return &arvados.UserBatchUpdateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserBatchUpdate(ctx, *opts.(*arvados.UserBatchUpdateOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserDelete,\n\t\t\tfunc() interface{} { return &arvados.DeleteOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserDelete(ctx, *opts.(*arvados.DeleteOptions))\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tarvados.EndpointUserAuthenticate,\n\t\t\tfunc() interface{} { return &arvados.UserAuthenticateOptions{} },\n\t\t\tfunc(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\t\treturn rtr.backend.UserAuthenticate(ctx, *opts.(*arvados.UserAuthenticateOptions))\n\t\t\t},\n\t\t},\n\t} {\n\t\texec := route.exec\n\t\tif rtr.config.WrapCalls != nil {\n\t\t\texec = rtr.config.WrapCalls(exec)\n\t\t}\n\t\trtr.addRoute(route.endpoint, route.defaultOpts, exec)\n\t}\n\trtr.mux.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tif req.Method == \"OPTIONS\" {\n\t\t\t// For non-webdav endpoints, return an empty\n\t\t\t// response with the CORS headers we already\n\t\t\t// added in ServeHTTP.\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\treturn\n\t\t}\n\t\thttpserver.Errors(w, []string{\"API endpoint not found\"}, http.StatusNotFound)\n\t})\n\trtr.mux.MethodNotAllowedHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tif req.Method == \"OPTIONS\" {\n\t\t\t// For non-webdav endpoints, return an empty\n\t\t\t// response with the CORS headers we already\n\t\t\t// added in ServeHTTP.\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\treturn\n\t\t}\n\t\thttpserver.Errors(w, []string{\"API endpoint not found\"}, http.StatusMethodNotAllowed)\n\t})\n}\n\nvar altMethod = map[string]string{\n\t\"PATCH\": \"PUT\",  // Accept PUT as a synonym for PATCH\n\t\"GET\":   \"HEAD\", // Accept HEAD at any GET route\n}\n\nfunc (rtr *router) addRoute(endpoint arvados.APIEndpoint, defaultOpts func() interface{}, exec api.RoutableFunc) {\n\tmethods := []string{endpoint.Method}\n\tif alt, ok := altMethod[endpoint.Method]; ok {\n\t\tmethods = append(methods, alt)\n\t}\n\tif strings.HasSuffix(endpoint.Path, \".*}\") {\n\t\t// webdav methods\n\t\tmethods = append(methods, \"OPTIONS\", \"PROPFIND\")\n\t}\n\trtr.mux.Methods(methods...).Path(\"/\" + endpoint.Path).HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tlogger := ctxlog.FromContext(req.Context())\n\t\topts := defaultOpts()\n\t\tparams, err := rtr.loadRequestParams(req, endpoint.AttrsKey, opts)\n\t\tif err != nil {\n\t\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\t\"req\":      req,\n\t\t\t\t\"method\":   endpoint.Method,\n\t\t\t\t\"endpoint\": endpoint,\n\t\t\t}).WithError(err).Debug(\"error loading request params\")\n\t\t\trtr.sendError(w, err)\n\t\t\treturn\n\t\t}\n\t\trespOpts, err := rtr.responseOptions(opts)\n\t\tif err != nil {\n\t\t\tlogger.WithField(\"opts\", opts).WithError(err).Debugf(\"error getting response options from %T\", opts)\n\t\t\trtr.sendError(w, err)\n\t\t\treturn\n\t\t}\n\n\t\tcreds := auth.CredentialsFromRequest(req)\n\t\terr = creds.LoadTokensFromHTTPRequestBody(req)\n\t\tif err != nil {\n\t\t\trtr.sendError(w, fmt.Errorf(\"error loading tokens from request body: %s\", err))\n\t\t\treturn\n\t\t}\n\t\tif rt, _ := params[\"reader_tokens\"].([]interface{}); len(rt) > 0 {\n\t\t\tfor _, t := range rt {\n\t\t\t\tif t, ok := t.(string); ok {\n\t\t\t\t\tcreds.Tokens = append(creds.Tokens, t)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tctx := auth.NewContext(req.Context(), creds)\n\t\tctx = arvados.ContextWithRequestID(ctx, req.Header.Get(\"X-Request-Id\"))\n\t\treq = req.WithContext(ctx)\n\n\t\thttpserver.SetResponseLogFields(ctx, logrus.Fields{\"tokenUUIDs\": creds.TokenUUIDs()})\n\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"apiEndpoint\": endpoint,\n\t\t\t\"apiOptsType\": fmt.Sprintf(\"%T\", opts),\n\t\t\t\"apiOpts\":     opts,\n\t\t}).Debug(\"exec\")\n\t\tresp, err := exec(ctx, opts)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Debugf(\"returning error type %T\", err)\n\t\t\trtr.sendError(w, err)\n\t\t\treturn\n\t\t}\n\t\trtr.sendResponse(w, req, resp, respOpts)\n\t})\n}\n\nfunc (rtr *router) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif target := ContainerHTTPProxyTarget(rtr.config.ContainerWebServices, r); target != \"\" {\n\t\trtr.serveContainerHTTPProxy(w, r, target)\n\t\treturn\n\t}\n\tswitch strings.SplitN(strings.TrimLeft(r.URL.Path, \"/\"), \"/\", 2)[0] {\n\tcase \"login\", \"logout\", \"auth\":\n\tdefault:\n\t\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\tw.Header().Set(\"Access-Control-Allow-Methods\", \"GET, HEAD, OPTIONS, PROPFIND, PUT, POST, PATCH, DELETE\")\n\t\tw.Header().Set(\"Access-Control-Allow-Headers\", \"Authorization, Content-Type, Range, X-Http-Method-Override\")\n\t\tw.Header().Set(\"Access-Control-Expose-Headers\", \"Content-Range\")\n\t\tw.Header().Set(\"Access-Control-Max-Age\", \"86486400\")\n\t}\n\tif r.Body != nil {\n\t\t// Wrap r.Body in a http.MaxBytesReader(), otherwise\n\t\t// r.ParseForm() uses a default max request body size\n\t\t// of 10 megabytes. Note we rely on the Nginx\n\t\t// configuration to enforce the real max body size.\n\t\tmax := int64(rtr.config.MaxRequestSize)\n\t\tif max < 1 {\n\t\t\tmax = math.MaxInt64 - 1\n\t\t}\n\t\tr.Body = http.MaxBytesReader(w, r.Body, max)\n\t}\n\tif r.Method == \"POST\" {\n\t\terr := r.ParseForm()\n\t\tif err != nil {\n\t\t\tif err.Error() == \"http: request body too large\" {\n\t\t\t\terr = httpError(http.StatusRequestEntityTooLarge, err)\n\t\t\t}\n\t\t\trtr.sendError(w, err)\n\t\t\treturn\n\t\t}\n\t\tif m := r.FormValue(\"_method\"); m != \"\" {\n\t\t\tr2 := *r\n\t\t\tr = &r2\n\t\t\tr.Method = m\n\t\t} else if m = r.Header.Get(\"X-Http-Method-Override\"); m != \"\" {\n\t\t\tr2 := *r\n\t\t\tr = &r2\n\t\t\tr.Method = m\n\t\t}\n\t}\n\trtr.mux.ServeHTTP(w, r)\n}\n\n// If -- given the provided ContainerWebServices config section -- req\n// is a container http proxy request, return the appropriate target\n// (\":{portnum}\", a container UUID, or a container request UUID).\n// Otherwise, return \"\".\nfunc ContainerHTTPProxyTarget(svc *arvados.ServiceWithPortRange, req *http.Request) string {\n\tif svc == nil {\n\t\treturn \"\"\n\t}\n\tconfigurl := url.URL(svc.ExternalURL)\n\tconfhostname := configurl.Hostname()\n\n\t// Use req.Host (not req.URL), but use url.URL to parse it,\n\t// which differs from net.SplitHostPort (port must be numeric,\n\t// [] are stripped even if there is no port).\n\trequrl := url.URL{Host: req.Host}\n\treqhostname := requrl.Hostname()\n\treqport := requrl.Port()\n\treqportnum, _ := strconv.Atoi(reqport)\n\n\tif strings.EqualFold(confhostname, reqhostname) &&\n\t\tsvc.ExternalPortMin > 0 &&\n\t\tsvc.ExternalPortMin <= reqportnum &&\n\t\tsvc.ExternalPortMax >= reqportnum {\n\t\t// Config uses a port range instead of a wildcard\n\t\t// host.  Pass the port number (like \":1234\") as the\n\t\t// target.  The ContainerHTTPProxy API method will\n\t\t// figure out which container it is currently assigned\n\t\t// to.\n\t\treturn fmt.Sprintf(\":%d\", reqportnum)\n\t} else if !strings.HasPrefix(confhostname, \"*\") {\n\t\t// Feature disabled by config\n\t\treturn \"\"\n\t}\n\n\t// Check that the requested port matches the ExternalURL port.\n\t// We don't know the request scheme, so we just assume it was\n\t// \"https\" for the purpose of comparing implicit/explicit ways\n\t// of spelling \"default port for this scheme\".\n\tconfport := configurl.Port()\n\tif !(reqport == confport ||\n\t\t(reqport == \"\" && confport == \"443\") ||\n\t\t(reqport == \"443\" && confport == \"\")) {\n\t\treturn \"\"\n\t}\n\ttargetlen := len(reqhostname) - len(confhostname) + 1\n\tif targetlen < 1 ||\n\t\t!strings.EqualFold(reqhostname[targetlen:], confhostname[1:]) {\n\t\t// Request host does not match config wildcard, so\n\t\t// this is not a container http proxy request.\n\t\treturn \"\"\n\t}\n\treturn reqhostname[:targetlen]\n}\n\nfunc (rtr *router) serveContainerHTTPProxy(w http.ResponseWriter, req *http.Request, target string) {\n\t// This API bypasses the generic auth middleware in\n\t// addRoute(), so here we need to load tokens into ctx, log\n\t// their UUIDs, and propagate the incoming X-Request-Id.\n\tctx := req.Context()\n\tif cookie, err := req.Cookie(\"arvados_api_token\"); err == nil && len(cookie.Value) != 0 {\n\t\tif token, err := auth.DecodeTokenCookie(cookie.Value); err == nil {\n\t\t\tcreds := auth.NewCredentials(string(token))\n\t\t\tctx = auth.NewContext(ctx, creds)\n\t\t\thttpserver.SetResponseLogFields(ctx, logrus.Fields{\"tokenUUIDs\": creds.TokenUUIDs()})\n\t\t}\n\t}\n\n\tctx = arvados.ContextWithRequestID(ctx, req.Header.Get(\"X-Request-Id\"))\n\treq = req.WithContext(ctx)\n\n\t// Load the NoForward value from the X-Arvados-No-Forward\n\t// header, but don't pass the header through in the proxied\n\t// request.\n\tnoForward := req.Header.Get(\"X-Arvados-No-Forward\") != \"\"\n\treq.Header.Del(\"X-Arvados-No-Forward\")\n\n\thandler, err := rtr.backend.ContainerHTTPProxy(req.Context(), arvados.ContainerHTTPProxyOptions{\n\t\tTarget:    target,\n\t\tRequest:   req,\n\t\tNoForward: noForward,\n\t})\n\tif err != nil {\n\t\trtr.sendError(w, err)\n\t\treturn\n\t}\n\thandler.ServeHTTP(w, req)\n}\n"
  },
  {
    "path": "lib/controller/router/router_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage router\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"github.com/gorilla/mux\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&RouterSuite{})\n\ntype RouterSuite struct {\n\trtr  *router\n\tstub arvadostest.APIStub\n}\n\nfunc (s *RouterSuite) SetUpTest(c *check.C) {\n\ts.stub = arvadostest.APIStub{}\n\ts.rtr = &router{\n\t\tmux:     mux.NewRouter(),\n\t\tbackend: &s.stub,\n\t\tconfig: Config{\n\t\t\tContainerWebServices: &arvados.ServiceWithPortRange{\n\t\t\t\tService: arvados.Service{\n\t\t\t\t\tExternalURL: arvados.URL{Host: \"*.containers.zzzzz.example.com\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\ts.rtr.addRoutes()\n}\n\nfunc (s *RouterSuite) TestOptions(c *check.C) {\n\ttoken := arvadostest.ActiveToken\n\tfor _, trial := range []struct {\n\t\tcomment         string // unparsed -- only used to help match test failures to trials\n\t\tmethod          string\n\t\tpath            string\n\t\theader          http.Header\n\t\tbody            string\n\t\tunauthenticated bool\n\t\tshouldStatus    int // zero value means 200\n\t\tshouldCall      string\n\t\twithOptions     interface{}\n\t\tcheckOptions    func(interface{}) // if non-nil, call instead of checking withOptions\n\t}{\n\t\t{\n\t\t\tmethod:      \"GET\",\n\t\t\tpath:        \"/arvados/v1/collections/\" + arvadostest.FooCollection,\n\t\t\tshouldCall:  \"CollectionGet\",\n\t\t\twithOptions: arvados.GetOptions{UUID: arvadostest.FooCollection},\n\t\t},\n\t\t{\n\t\t\tmethod:      \"PUT\",\n\t\t\tpath:        \"/arvados/v1/collections/\" + arvadostest.FooCollection,\n\t\t\tshouldCall:  \"CollectionUpdate\",\n\t\t\twithOptions: arvados.UpdateOptions{UUID: arvadostest.FooCollection},\n\t\t},\n\t\t{\n\t\t\tmethod:      \"PATCH\",\n\t\t\tpath:        \"/arvados/v1/collections/\" + arvadostest.FooCollection,\n\t\t\tshouldCall:  \"CollectionUpdate\",\n\t\t\twithOptions: arvados.UpdateOptions{UUID: arvadostest.FooCollection},\n\t\t},\n\t\t{\n\t\t\tmethod:      \"DELETE\",\n\t\t\tpath:        \"/arvados/v1/collections/\" + arvadostest.FooCollection,\n\t\t\tshouldCall:  \"CollectionDelete\",\n\t\t\twithOptions: arvados.DeleteOptions{UUID: arvadostest.FooCollection},\n\t\t},\n\t\t{\n\t\t\tmethod:      \"POST\",\n\t\t\tpath:        \"/arvados/v1/collections\",\n\t\t\tshouldCall:  \"CollectionCreate\",\n\t\t\twithOptions: arvados.CreateOptions{},\n\t\t},\n\t\t{\n\t\t\tmethod:      \"GET\",\n\t\t\tpath:        \"/arvados/v1/collections\",\n\t\t\tshouldCall:  \"CollectionList\",\n\t\t\twithOptions: arvados.ListOptions{Limit: -1},\n\t\t},\n\t\t{\n\t\t\tmethod:      \"GET\",\n\t\t\tpath:        \"/arvados/v1/api_client_authorizations\",\n\t\t\tshouldCall:  \"APIClientAuthorizationList\",\n\t\t\twithOptions: arvados.ListOptions{Limit: -1},\n\t\t},\n\t\t{\n\t\t\tmethod:      \"GET\",\n\t\t\tpath:        \"/arvados/v1/collections?limit=123&offset=456&include_trash=true&include_old_versions=1\",\n\t\t\tshouldCall:  \"CollectionList\",\n\t\t\twithOptions: arvados.ListOptions{Limit: 123, Offset: 456, IncludeTrash: true, IncludeOldVersions: true},\n\t\t},\n\t\t{\n\t\t\tmethod:      \"POST\",\n\t\t\tpath:        \"/arvados/v1/collections?limit=123&_method=GET\",\n\t\t\tbody:        `{\"offset\":456,\"include_trash\":true,\"include_old_versions\":true}`,\n\t\t\tshouldCall:  \"CollectionList\",\n\t\t\twithOptions: arvados.ListOptions{Limit: 123, Offset: 456, IncludeTrash: true, IncludeOldVersions: true},\n\t\t},\n\t\t{\n\t\t\tmethod:      \"POST\",\n\t\t\tpath:        \"/arvados/v1/collections?limit=123\",\n\t\t\tbody:        `{\"offset\":456,\"include_trash\":true,\"include_old_versions\":true}`,\n\t\t\theader:      http.Header{\"X-Http-Method-Override\": {\"GET\"}, \"Content-Type\": {\"application/json\"}},\n\t\t\tshouldCall:  \"CollectionList\",\n\t\t\twithOptions: arvados.ListOptions{Limit: 123, Offset: 456, IncludeTrash: true, IncludeOldVersions: true},\n\t\t},\n\t\t{\n\t\t\tmethod:      \"POST\",\n\t\t\tpath:        \"/arvados/v1/collections?limit=123\",\n\t\t\tbody:        \"offset=456&include_trash=true&include_old_versions=1&_method=GET\",\n\t\t\theader:      http.Header{\"Content-Type\": {\"application/x-www-form-urlencoded\"}},\n\t\t\tshouldCall:  \"CollectionList\",\n\t\t\twithOptions: arvados.ListOptions{Limit: 123, Offset: 456, IncludeTrash: true, IncludeOldVersions: true},\n\t\t},\n\t\t{\n\t\t\tcomment:     \"form-encoded expression filter in query string\",\n\t\t\tmethod:      \"GET\",\n\t\t\tpath:        \"/arvados/v1/collections?filters=[%22(foo<bar)%22]\",\n\t\t\tshouldCall:  \"CollectionList\",\n\t\t\twithOptions: arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{\"(foo<bar)\", \"=\", true}}},\n\t\t},\n\t\t{\n\t\t\tcomment:     \"form-encoded expression filter in POST body\",\n\t\t\tmethod:      \"POST\",\n\t\t\tpath:        \"/arvados/v1/collections\",\n\t\t\tbody:        \"filters=[\\\"(foo<bar)\\\"]&_method=GET\",\n\t\t\theader:      http.Header{\"Content-Type\": {\"application/x-www-form-urlencoded\"}},\n\t\t\tshouldCall:  \"CollectionList\",\n\t\t\twithOptions: arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{\"(foo<bar)\", \"=\", true}}},\n\t\t},\n\t\t{\n\t\t\tcomment:     \"json-encoded expression filter in POST body\",\n\t\t\tmethod:      \"POST\",\n\t\t\tpath:        \"/arvados/v1/collections?_method=GET\",\n\t\t\tbody:        `{\"filters\":[\"(foo<bar)\",[\"bar\",\"=\",\"baz\"]],\"limit\":2}`,\n\t\t\theader:      http.Header{\"Content-Type\": {\"application/json\"}},\n\t\t\tshouldCall:  \"CollectionList\",\n\t\t\twithOptions: arvados.ListOptions{Limit: 2, Filters: []arvados.Filter{{\"(foo<bar)\", \"=\", true}, {\"bar\", \"=\", \"baz\"}}},\n\t\t},\n\t\t{\n\t\t\tcomment:     \"json-encoded select param in query string\",\n\t\t\tmethod:      \"GET\",\n\t\t\tpath:        \"/arvados/v1/collections/\" + arvadostest.FooCollection + \"?select=[%22portable_data_hash%22]\",\n\t\t\tshouldCall:  \"CollectionGet\",\n\t\t\twithOptions: arvados.GetOptions{UUID: arvadostest.FooCollection, Select: []string{\"portable_data_hash\"}},\n\t\t},\n\t\t{\n\t\t\tmethod:       \"PATCH\",\n\t\t\tpath:         \"/arvados/v1/collections\",\n\t\t\tshouldStatus: http.StatusMethodNotAllowed,\n\t\t},\n\t\t{\n\t\t\tmethod:       \"PUT\",\n\t\t\tpath:         \"/arvados/v1/collections\",\n\t\t\tshouldStatus: http.StatusMethodNotAllowed,\n\t\t},\n\t\t{\n\t\t\tmethod:       \"DELETE\",\n\t\t\tpath:         \"/arvados/v1/collections\",\n\t\t\tshouldStatus: http.StatusMethodNotAllowed,\n\t\t},\n\t\t{\n\t\t\tcomment:    \"container log webdav GET root\",\n\t\t\tmethod:     \"GET\",\n\t\t\tpath:       \"/arvados/v1/container_requests/\" + arvadostest.CompletedContainerRequestUUID + \"/log/\" + arvadostest.CompletedContainerUUID + \"/\",\n\t\t\tshouldCall: \"ContainerRequestLog\",\n\t\t\twithOptions: arvados.ContainerLogOptions{\n\t\t\t\tUUID: arvadostest.CompletedContainerRequestUUID,\n\t\t\t\tWebDAVOptions: arvados.WebDAVOptions{\n\t\t\t\t\tMethod: \"GET\",\n\t\t\t\t\tHeader: http.Header{\"Authorization\": {\"Bearer \" + arvadostest.ActiveToken}},\n\t\t\t\t\tPath:   \"/\" + arvadostest.CompletedContainerUUID + \"/\"}},\n\t\t},\n\t\t{\n\t\t\tcomment:    \"container log webdav GET root without trailing slash\",\n\t\t\tmethod:     \"GET\",\n\t\t\tpath:       \"/arvados/v1/container_requests/\" + arvadostest.CompletedContainerRequestUUID + \"/log/\" + arvadostest.CompletedContainerUUID + \"\",\n\t\t\tshouldCall: \"ContainerRequestLog\",\n\t\t\twithOptions: arvados.ContainerLogOptions{\n\t\t\t\tUUID: arvadostest.CompletedContainerRequestUUID,\n\t\t\t\tWebDAVOptions: arvados.WebDAVOptions{\n\t\t\t\t\tMethod: \"GET\",\n\t\t\t\t\tHeader: http.Header{\"Authorization\": {\"Bearer \" + arvadostest.ActiveToken}},\n\t\t\t\t\tPath:   \"/\" + arvadostest.CompletedContainerUUID}},\n\t\t},\n\t\t{\n\t\t\tcomment:    \"container log webdav OPTIONS root\",\n\t\t\tmethod:     \"OPTIONS\",\n\t\t\tpath:       \"/arvados/v1/container_requests/\" + arvadostest.CompletedContainerRequestUUID + \"/log/\" + arvadostest.CompletedContainerUUID + \"/\",\n\t\t\tshouldCall: \"ContainerRequestLog\",\n\t\t\twithOptions: arvados.ContainerLogOptions{\n\t\t\t\tUUID: arvadostest.CompletedContainerRequestUUID,\n\t\t\t\tWebDAVOptions: arvados.WebDAVOptions{\n\t\t\t\t\tMethod: \"OPTIONS\",\n\t\t\t\t\tHeader: http.Header{\"Authorization\": {\"Bearer \" + arvadostest.ActiveToken}},\n\t\t\t\t\tPath:   \"/\" + arvadostest.CompletedContainerUUID + \"/\"}},\n\t\t},\n\t\t{\n\t\t\tcomment:    \"container log webdav OPTIONS root without trailing slash\",\n\t\t\tmethod:     \"OPTIONS\",\n\t\t\tpath:       \"/arvados/v1/container_requests/\" + arvadostest.CompletedContainerRequestUUID + \"/log/\" + arvadostest.CompletedContainerUUID,\n\t\t\tshouldCall: \"ContainerRequestLog\",\n\t\t\twithOptions: arvados.ContainerLogOptions{\n\t\t\t\tUUID: arvadostest.CompletedContainerRequestUUID,\n\t\t\t\tWebDAVOptions: arvados.WebDAVOptions{\n\t\t\t\t\tMethod: \"OPTIONS\",\n\t\t\t\t\tHeader: http.Header{\"Authorization\": {\"Bearer \" + arvadostest.ActiveToken}},\n\t\t\t\t\tPath:   \"/\" + arvadostest.CompletedContainerUUID}},\n\t\t},\n\t\t{\n\t\t\tcomment:         \"container log webdav OPTIONS for CORS\",\n\t\t\tunauthenticated: true,\n\t\t\tmethod:          \"OPTIONS\",\n\t\t\tpath:            \"/arvados/v1/container_requests/\" + arvadostest.CompletedContainerRequestUUID + \"/log/\" + arvadostest.CompletedContainerUUID + \"/\",\n\t\t\theader:          http.Header{\"Access-Control-Request-Method\": {\"POST\"}},\n\t\t\tshouldCall:      \"ContainerRequestLog\",\n\t\t\twithOptions: arvados.ContainerLogOptions{\n\t\t\t\tUUID: arvadostest.CompletedContainerRequestUUID,\n\t\t\t\tWebDAVOptions: arvados.WebDAVOptions{\n\t\t\t\t\tMethod: \"OPTIONS\",\n\t\t\t\t\tHeader: http.Header{\n\t\t\t\t\t\t\"Access-Control-Request-Method\": {\"POST\"},\n\t\t\t\t\t},\n\t\t\t\t\tPath: \"/\" + arvadostest.CompletedContainerUUID + \"/\"}},\n\t\t},\n\t\t{\n\t\t\tcomment:    \"container log webdav PROPFIND root\",\n\t\t\tmethod:     \"PROPFIND\",\n\t\t\tpath:       \"/arvados/v1/container_requests/\" + arvadostest.CompletedContainerRequestUUID + \"/log/\" + arvadostest.CompletedContainerUUID + \"/\",\n\t\t\tshouldCall: \"ContainerRequestLog\",\n\t\t\twithOptions: arvados.ContainerLogOptions{\n\t\t\t\tUUID: arvadostest.CompletedContainerRequestUUID,\n\t\t\t\tWebDAVOptions: arvados.WebDAVOptions{\n\t\t\t\t\tMethod: \"PROPFIND\",\n\t\t\t\t\tHeader: http.Header{\"Authorization\": {\"Bearer \" + arvadostest.ActiveToken}},\n\t\t\t\t\tPath:   \"/\" + arvadostest.CompletedContainerUUID + \"/\"}},\n\t\t},\n\t\t{\n\t\t\tcomment:    \"container log webdav PROPFIND root without trailing slash\",\n\t\t\tmethod:     \"PROPFIND\",\n\t\t\tpath:       \"/arvados/v1/container_requests/\" + arvadostest.CompletedContainerRequestUUID + \"/log/\" + arvadostest.CompletedContainerUUID + \"\",\n\t\t\tshouldCall: \"ContainerRequestLog\",\n\t\t\twithOptions: arvados.ContainerLogOptions{\n\t\t\t\tUUID: arvadostest.CompletedContainerRequestUUID,\n\t\t\t\tWebDAVOptions: arvados.WebDAVOptions{\n\t\t\t\t\tMethod: \"PROPFIND\",\n\t\t\t\t\tHeader: http.Header{\"Authorization\": {\"Bearer \" + arvadostest.ActiveToken}},\n\t\t\t\t\tPath:   \"/\" + arvadostest.CompletedContainerUUID}},\n\t\t},\n\t\t{\n\t\t\tcomment:    \"container log webdav no_forward=true\",\n\t\t\tmethod:     \"GET\",\n\t\t\tpath:       \"/arvados/v1/container_requests/\" + arvadostest.CompletedContainerRequestUUID + \"/log/\" + arvadostest.CompletedContainerUUID + \"/?no_forward=true\",\n\t\t\tshouldCall: \"ContainerRequestLog\",\n\t\t\twithOptions: arvados.ContainerLogOptions{\n\t\t\t\tUUID:      arvadostest.CompletedContainerRequestUUID,\n\t\t\t\tNoForward: true,\n\t\t\t\tWebDAVOptions: arvados.WebDAVOptions{\n\t\t\t\t\tMethod: \"GET\",\n\t\t\t\t\tHeader: http.Header{\"Authorization\": {\"Bearer \" + arvadostest.ActiveToken}},\n\t\t\t\t\tPath:   \"/\" + arvadostest.CompletedContainerUUID + \"/\"}},\n\t\t},\n\t\t{\n\t\t\tcomment:      \"/logX does not route to ContainerRequestLog\",\n\t\t\tmethod:       \"GET\",\n\t\t\tpath:         \"/arvados/v1/containers/\" + arvadostest.CompletedContainerRequestUUID + \"/logX\",\n\t\t\tshouldStatus: http.StatusNotFound,\n\t\t\tshouldCall:   \"\",\n\t\t},\n\t\t{\n\t\t\tcomment:         \"container http proxy no_forward=true\",\n\t\t\tunauthenticated: true,\n\t\t\tmethod:          \"POST\",\n\t\t\tpath:            \"/foo/bar\",\n\t\t\theader: http.Header{\n\t\t\t\t\"Cookie\":               {\"arvados_api_token=\" + auth.EncodeTokenCookie([]byte(arvadostest.ActiveToken))},\n\t\t\t\t\"Host\":                 {arvadostest.RunningContainerUUID + \"-12345.containers.zzzzz.example.com\"},\n\t\t\t\t\"X-Arvados-No-Forward\": {\"1\"},\n\t\t\t\t\"X-Example-Header\":     {\"preserved header value\"},\n\t\t\t},\n\t\t\tshouldCall: \"ContainerHTTPProxy\",\n\t\t\tcheckOptions: func(gotOptions interface{}) {\n\t\t\t\topts, _ := gotOptions.(arvados.ContainerHTTPProxyOptions)\n\t\t\t\tif !c.Check(opts, check.NotNil) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tc.Check(opts.Request.Method, check.Equals, \"POST\")\n\t\t\t\tc.Check(opts.Request.URL.Path, check.Equals, \"/foo/bar\")\n\t\t\t\tc.Check(opts.Request.Host, check.Equals, arvadostest.RunningContainerUUID+\"-12345.containers.zzzzz.example.com\")\n\t\t\t\tc.Check(opts.Request.Header, check.DeepEquals, http.Header{\n\t\t\t\t\t\"Cookie\":           {\"arvados_api_token=\" + auth.EncodeTokenCookie([]byte(arvadostest.ActiveToken))},\n\t\t\t\t\t\"X-Example-Header\": {\"preserved header value\"},\n\t\t\t\t})\n\t\t\t\topts.Request = nil\n\t\t\t\tc.Check(opts, check.DeepEquals, arvados.ContainerHTTPProxyOptions{\n\t\t\t\t\tTarget:    arvadostest.RunningContainerUUID + \"-12345\",\n\t\t\t\t\tNoForward: true,\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t} {\n\t\t// Reset calls captured in previous trial\n\t\ts.stub = arvadostest.APIStub{}\n\n\t\tc.Logf(\"trial: %+v\", trial)\n\t\tcomment := check.Commentf(\"trial comment: %s\", trial.comment)\n\n\t\t_, rr := doRequest(c, s.rtr, token, trial.method, trial.path, !trial.unauthenticated, trial.header, bytes.NewBufferString(trial.body), nil)\n\t\tif trial.shouldStatus == 0 {\n\t\t\tc.Check(rr.Code, check.Equals, http.StatusOK, comment)\n\t\t} else {\n\t\t\tc.Check(rr.Code, check.Equals, trial.shouldStatus, comment)\n\t\t}\n\t\tcalls := s.stub.Calls(nil)\n\t\tif trial.shouldCall == \"\" {\n\t\t\tc.Check(calls, check.HasLen, 0, comment)\n\t\t\tcontinue\n\t\t}\n\t\tif !c.Check(calls, check.HasLen, 1, comment) {\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(calls[0].Method, isMethodNamed, trial.shouldCall, comment)\n\t\tif trial.checkOptions != nil {\n\t\t\ttrial.checkOptions(calls[0].Options)\n\t\t} else {\n\t\t\tc.Check(calls[0].Options, check.DeepEquals, trial.withOptions, comment)\n\t\t}\n\t}\n}\n\nvar _ = check.Suite(&RouterIntegrationSuite{})\n\ntype RouterIntegrationSuite struct {\n\trtr *router\n}\n\nfunc (s *RouterIntegrationSuite) SetUpTest(c *check.C) {\n\tcluster := &arvados.Cluster{}\n\tcluster.TLS.Insecure = true\n\tarvadostest.SetServiceURL(&cluster.Services.RailsAPI, \"https://\"+os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\tarvadostest.SetServiceURL(&cluster.Services.ContainerWebServices.Service, \"https://*.containers.zzzzz.example.com\")\n\turl, _ := url.Parse(\"https://\" + os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\ts.rtr = New(rpc.NewConn(\"zzzzz\", url, true, rpc.PassthroughTokenProvider), Config{})\n}\n\nfunc (s *RouterIntegrationSuite) TearDownSuite(c *check.C) {\n\terr := arvados.NewClientFromEnv().RequestAndDecode(nil, \"POST\", \"database/reset\", nil, nil)\n\tc.Check(err, check.IsNil)\n}\n\nfunc (s *RouterIntegrationSuite) TestCollectionResponses(c *check.C) {\n\ttoken := arvadostest.ActiveTokenV2\n\n\t// Check \"get collection\" response has \"kind\" key\n\tjresp := map[string]interface{}{}\n\t_, rr := doRequest(c, s.rtr, token, \"GET\", `/arvados/v1/collections`, true, nil, bytes.NewBufferString(`{\"include_trash\":true}`), jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(jresp[\"items\"], check.FitsTypeOf, []interface{}{})\n\tc.Check(jresp[\"kind\"], check.Equals, \"arvados#collectionList\")\n\tc.Check(jresp[\"items\"].([]interface{})[0].(map[string]interface{})[\"kind\"], check.Equals, \"arvados#collection\")\n\n\t// Check items in list response have a \"kind\" key regardless\n\t// of whether a uuid/pdh is selected.\n\tfor _, selectj := range []string{\n\t\t``,\n\t\t`,\"select\":[\"portable_data_hash\"]`,\n\t\t`,\"select\":[\"name\"]`,\n\t\t`,\"select\":[\"uuid\"]`,\n\t} {\n\t\tjresp := map[string]interface{}{}\n\t\t_, rr = doRequest(c, s.rtr, token, \"GET\", `/arvados/v1/collections`, true, nil, bytes.NewBufferString(`{\"where\":{\"uuid\":[\"`+arvadostest.FooCollection+`\"]}`+selectj+`}`), jresp)\n\t\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\t\tc.Check(jresp[\"items\"], check.FitsTypeOf, []interface{}{})\n\t\tc.Check(jresp[\"items_available\"], check.FitsTypeOf, float64(0))\n\t\tc.Check(jresp[\"kind\"], check.Equals, \"arvados#collectionList\")\n\t\titem0 := jresp[\"items\"].([]interface{})[0].(map[string]interface{})\n\t\tc.Check(item0[\"kind\"], check.Equals, \"arvados#collection\")\n\t\tif selectj == \"\" || strings.Contains(selectj, \"portable_data_hash\") {\n\t\t\tc.Check(item0[\"portable_data_hash\"], check.Equals, arvadostest.FooCollectionPDH)\n\t\t} else {\n\t\t\tc.Check(item0[\"portable_data_hash\"], check.IsNil)\n\t\t}\n\t\tif selectj == \"\" || strings.Contains(selectj, \"name\") {\n\t\t\tc.Check(item0[\"name\"], check.FitsTypeOf, \"\")\n\t\t} else {\n\t\t\tc.Check(item0[\"name\"], check.IsNil)\n\t\t}\n\t\tif selectj == \"\" || strings.Contains(selectj, \"uuid\") {\n\t\t\tc.Check(item0[\"uuid\"], check.Equals, arvadostest.FooCollection)\n\t\t} else {\n\t\t\tc.Check(item0[\"uuid\"], check.IsNil)\n\t\t}\n\t}\n\n\t// Check \"create collection\" response has \"kind\" key\n\tjresp = map[string]interface{}{}\n\t_, rr = doRequest(c, s.rtr, token, \"POST\", `/arvados/v1/collections`, true, http.Header{\"Content-Type\": {\"application/x-www-form-urlencoded\"}}, bytes.NewBufferString(`ensure_unique_name=true`), jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(jresp[\"uuid\"], check.FitsTypeOf, \"\")\n\tc.Check(jresp[\"kind\"], check.Equals, \"arvados#collection\")\n}\n\nfunc (s *RouterIntegrationSuite) TestMaxRequestSize(c *check.C) {\n\ttoken := arvadostest.ActiveTokenV2\n\tfor _, maxRequestSize := range []int{\n\t\t// Ensure 5M limit is enforced.\n\t\t5000000,\n\t\t// Ensure 50M limit is enforced, and that a >25M body\n\t\t// is accepted even though the default Go request size\n\t\t// limit is 10M.\n\t\t50000000,\n\t} {\n\t\ts.rtr.config.MaxRequestSize = maxRequestSize\n\t\tokstr := \"a\"\n\t\tfor len(okstr) < maxRequestSize/2 {\n\t\t\tokstr = okstr + okstr\n\t\t}\n\n\t\thdr := http.Header{\"Content-Type\": {\"application/x-www-form-urlencoded\"}}\n\n\t\tbody := bytes.NewBufferString(url.Values{\"foo_bar\": {okstr}}.Encode())\n\t\t_, rr := doRequest(c, s.rtr, token, \"POST\", `/arvados/v1/collections`, true, hdr, body, nil)\n\t\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\n\t\tbody = bytes.NewBufferString(url.Values{\"foo_bar\": {okstr + okstr}}.Encode())\n\t\t_, rr = doRequest(c, s.rtr, token, \"POST\", `/arvados/v1/collections`, true, hdr, body, nil)\n\t\tc.Check(rr.Code, check.Equals, http.StatusRequestEntityTooLarge)\n\t}\n}\n\nfunc (s *RouterIntegrationSuite) TestContainerList(c *check.C) {\n\ttoken := arvadostest.ActiveTokenV2\n\n\tjresp := map[string]interface{}{}\n\t_, rr := doRequest(c, s.rtr, token, \"GET\", `/arvados/v1/containers?limit=0`, true, nil, nil, jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(jresp[\"items_available\"], check.FitsTypeOf, float64(0))\n\tc.Check(jresp[\"items_available\"].(float64) > 2, check.Equals, true)\n\tc.Check(jresp[\"items\"], check.NotNil)\n\tc.Check(jresp[\"items\"], check.HasLen, 0)\n\n\tjresp = map[string]interface{}{}\n\t_, rr = doRequest(c, s.rtr, token, \"GET\", `/arvados/v1/containers?filters=[[\"uuid\",\"in\",[]]]`, true, nil, nil, jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(jresp[\"items_available\"], check.Equals, float64(0))\n\tc.Check(jresp[\"items\"], check.NotNil)\n\tc.Check(jresp[\"items\"], check.HasLen, 0)\n\n\tjresp = map[string]interface{}{}\n\t_, rr = doRequest(c, s.rtr, token, \"GET\", `/arvados/v1/containers?limit=2&select=[\"uuid\",\"command\"]`, true, nil, nil, jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(jresp[\"items_available\"], check.FitsTypeOf, float64(0))\n\tc.Check(jresp[\"items_available\"].(float64) > 2, check.Equals, true)\n\tc.Check(jresp[\"items\"], check.HasLen, 2)\n\titem0 := jresp[\"items\"].([]interface{})[0].(map[string]interface{})\n\tc.Check(item0[\"uuid\"], check.HasLen, 27)\n\tc.Check(item0[\"command\"], check.FitsTypeOf, []interface{}{})\n\tc.Check(item0[\"command\"].([]interface{})[0], check.FitsTypeOf, \"\")\n\tc.Check(item0[\"mounts\"], check.IsNil)\n\n\tjresp = map[string]interface{}{}\n\t_, rr = doRequest(c, s.rtr, token, \"GET\", `/arvados/v1/containers`, true, nil, nil, jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(jresp[\"items_available\"], check.FitsTypeOf, float64(0))\n\tc.Check(jresp[\"items_available\"].(float64) > 2, check.Equals, true)\n\tavail := int(jresp[\"items_available\"].(float64))\n\tc.Check(jresp[\"items\"], check.HasLen, avail)\n\titem0 = jresp[\"items\"].([]interface{})[0].(map[string]interface{})\n\tc.Check(item0[\"uuid\"], check.HasLen, 27)\n\tc.Check(item0[\"command\"], check.FitsTypeOf, []interface{}{})\n\tc.Check(item0[\"command\"].([]interface{})[0], check.FitsTypeOf, \"\")\n\tc.Check(item0[\"mounts\"], check.NotNil)\n}\n\nfunc (s *RouterIntegrationSuite) TestContainerLock(c *check.C) {\n\tuuid := arvadostest.QueuedContainerUUID\n\ttoken := arvadostest.AdminToken\n\n\tjresp := map[string]interface{}{}\n\t_, rr := doRequest(c, s.rtr, token, \"POST\", \"/arvados/v1/containers/\"+uuid+\"/lock\", true, nil, nil, jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(jresp[\"uuid\"], check.HasLen, 27)\n\tc.Check(jresp[\"state\"], check.Equals, \"Locked\")\n\n\t_, rr = doRequest(c, s.rtr, token, \"POST\", \"/arvados/v1/containers/\"+uuid+\"/lock\", true, nil, nil, nil)\n\tc.Check(rr.Code, check.Equals, http.StatusUnprocessableEntity)\n\tc.Check(rr.Body.String(), check.Not(check.Matches), `.*\"uuid\":.*`)\n\n\tjresp = map[string]interface{}{}\n\t_, rr = doRequest(c, s.rtr, token, \"POST\", \"/arvados/v1/containers/\"+uuid+\"/unlock\", true, nil, nil, jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(jresp[\"uuid\"], check.HasLen, 27)\n\tc.Check(jresp[\"state\"], check.Equals, \"Queued\")\n\tc.Check(jresp[\"environment\"], check.IsNil)\n\n\tjresp = map[string]interface{}{}\n\t_, rr = doRequest(c, s.rtr, token, \"POST\", \"/arvados/v1/containers/\"+uuid+\"/unlock\", true, nil, nil, jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusUnprocessableEntity)\n\tc.Check(jresp[\"uuid\"], check.IsNil)\n}\n\nfunc (s *RouterIntegrationSuite) TestWritableBy(c *check.C) {\n\tjresp := map[string]interface{}{}\n\t_, rr := doRequest(c, s.rtr, arvadostest.ActiveTokenV2, \"GET\", `/arvados/v1/users/`+arvadostest.ActiveUserUUID, true, nil, nil, jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(jresp[\"writable_by\"], check.DeepEquals, []interface{}{\"zzzzz-tpzed-000000000000000\", \"zzzzz-tpzed-xurymjxw79nv3jz\", \"zzzzz-j7d0g-48foin4vonvc2at\"})\n}\n\nfunc (s *RouterIntegrationSuite) TestFullTimestampsInResponse(c *check.C) {\n\tuuid := arvadostest.CollectionReplicationDesired2Confirmed2UUID\n\ttoken := arvadostest.ActiveTokenV2\n\n\tjresp := map[string]interface{}{}\n\t_, rr := doRequest(c, s.rtr, token, \"GET\", `/arvados/v1/collections/`+uuid, true, nil, nil, jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(jresp[\"uuid\"], check.Equals, uuid)\n\texpectNS := map[string]int{\n\t\t\"created_at\":  596506000, // fixture says 596506247, but truncated by postgresql\n\t\t\"modified_at\": 596338000, // fixture says 596338465, but truncated by postgresql\n\t}\n\tfor key, ns := range expectNS {\n\t\tmt, ok := jresp[key].(string)\n\t\tc.Logf(\"jresp[%q] == %q\", key, mt)\n\t\tc.Assert(ok, check.Equals, true)\n\t\tt, err := time.Parse(time.RFC3339Nano, mt)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(t.Nanosecond(), check.Equals, ns)\n\t}\n}\n\nfunc (s *RouterIntegrationSuite) TestSelectParam(c *check.C) {\n\tuuid := arvadostest.QueuedContainerUUID\n\ttoken := arvadostest.ActiveTokenV2\n\t// GET\n\tfor _, sel := range [][]string{\n\t\t{\"uuid\", \"command\"},\n\t\t{\"uuid\", \"command\", \"uuid\"},\n\t} {\n\t\tj, err := json.Marshal(sel)\n\t\tc.Assert(err, check.IsNil)\n\t\tjresp := map[string]interface{}{}\n\t\t_, rr := doRequest(c, s.rtr, token, \"GET\", \"/arvados/v1/containers/\"+uuid+\"?select=\"+string(j), true, nil, nil, jresp)\n\t\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\n\t\tc.Check(jresp[\"kind\"], check.Equals, \"arvados#container\")\n\t\tc.Check(jresp[\"uuid\"], check.HasLen, 27)\n\t\tc.Check(jresp[\"command\"], check.HasLen, 2)\n\t\tc.Check(jresp[\"mounts\"], check.IsNil)\n\t\t_, hasMounts := jresp[\"mounts\"]\n\t\tc.Check(hasMounts, check.Equals, false)\n\t}\n\t// POST & PUT\n\tuuid = arvadostest.FooCollection\n\tj, err := json.Marshal([]string{\"uuid\", \"description\"})\n\tc.Assert(err, check.IsNil)\n\tfor _, method := range []string{\"PUT\", \"POST\"} {\n\t\tdesc := \"Today is \" + time.Now().String()\n\t\treqBody := \"{\\\"description\\\":\\\"\" + desc + \"\\\"}\"\n\t\tjresp := map[string]interface{}{}\n\t\tvar rr *httptest.ResponseRecorder\n\t\tif method == \"PUT\" {\n\t\t\t_, rr = doRequest(c, s.rtr, token, method, \"/arvados/v1/collections/\"+uuid+\"?select=\"+string(j), true, nil, bytes.NewReader([]byte(reqBody)), jresp)\n\t\t} else {\n\t\t\t_, rr = doRequest(c, s.rtr, token, method, \"/arvados/v1/collections?select=\"+string(j), true, nil, bytes.NewReader([]byte(reqBody)), jresp)\n\t\t}\n\t\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\t\tc.Check(jresp[\"kind\"], check.Equals, \"arvados#collection\")\n\t\tc.Check(jresp[\"uuid\"], check.HasLen, 27)\n\t\tc.Check(jresp[\"description\"], check.Equals, desc)\n\t\tc.Check(jresp[\"manifest_text\"], check.IsNil)\n\t}\n}\n\nfunc (s *RouterIntegrationSuite) TestIncluded(c *check.C) {\n\tfor _, trial := range []struct {\n\t\tuuid            string\n\t\texpectOwnerUUID string\n\t\texpectOwnerKind string\n\t}{\n\t\t{\n\t\t\tuuid:            arvadostest.ASubprojectUUID,\n\t\t\texpectOwnerUUID: arvadostest.AProjectUUID,\n\t\t\texpectOwnerKind: \"arvados#group\",\n\t\t},\n\t\t{\n\t\t\tuuid:            arvadostest.AProjectUUID,\n\t\t\texpectOwnerUUID: arvadostest.ActiveUserUUID,\n\t\t\texpectOwnerKind: \"arvados#user\",\n\t\t},\n\t} {\n\t\tc.Logf(\"trial: %#v\", trial)\n\t\ttoken := arvadostest.ActiveTokenV2\n\t\tjresp := map[string]interface{}{}\n\t\t_, rr := doRequest(c, s.rtr, token, \"GET\", `/arvados/v1/groups/contents?include=owner_uuid&filters=[[\"uuid\",\"=\",\"`+trial.uuid+`\"]]`, true, nil, nil, jresp)\n\t\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\n\t\tc.Assert(jresp[\"included\"], check.FitsTypeOf, []interface{}{})\n\t\tincluded, ok := jresp[\"included\"].([]interface{})\n\t\tc.Assert(ok, check.Equals, true)\n\t\tc.Assert(included, check.HasLen, 1)\n\t\towner, ok := included[0].(map[string]interface{})\n\t\tc.Assert(ok, check.Equals, true)\n\t\tc.Check(owner[\"kind\"], check.Equals, trial.expectOwnerKind)\n\t\tc.Check(owner[\"uuid\"], check.Equals, trial.expectOwnerUUID)\n\t}\n}\n\nfunc (s *RouterIntegrationSuite) TestHEAD(c *check.C) {\n\t_, rr := doRequest(c, s.rtr, arvadostest.ActiveTokenV2, \"HEAD\", \"/arvados/v1/containers/\"+arvadostest.QueuedContainerUUID, true, nil, nil, nil)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n}\n\nfunc (s *RouterIntegrationSuite) TestRouteNotFound(c *check.C) {\n\ttoken := arvadostest.ActiveTokenV2\n\treq := (&testReq{\n\t\tmethod: \"POST\",\n\t\tpath:   \"arvados/v1/collections/\" + arvadostest.FooCollection + \"/error404pls\",\n\t\ttoken:  token,\n\t}).Request()\n\trr := httptest.NewRecorder()\n\ts.rtr.ServeHTTP(rr, req)\n\tc.Check(rr.Code, check.Equals, http.StatusNotFound)\n\tc.Logf(\"body: %q\", rr.Body.String())\n\tvar j map[string]interface{}\n\terr := json.Unmarshal(rr.Body.Bytes(), &j)\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"decoded: %v\", j)\n\tc.Assert(j[\"errors\"], check.FitsTypeOf, []interface{}{})\n\tc.Check(j[\"errors\"].([]interface{})[0], check.Equals, \"API endpoint not found\")\n}\n\nfunc (s *RouterIntegrationSuite) TestCORS(c *check.C) {\n\ttoken := arvadostest.ActiveTokenV2\n\treq := (&testReq{\n\t\tmethod: \"OPTIONS\",\n\t\tpath:   \"arvados/v1/collections/\" + arvadostest.FooCollection,\n\t\theader: http.Header{\"Origin\": {\"https://example.com\"}},\n\t\ttoken:  token,\n\t}).Request()\n\trr := httptest.NewRecorder()\n\ts.rtr.ServeHTTP(rr, req)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(rr.Body.String(), check.HasLen, 0)\n\tc.Check(rr.Result().Header.Get(\"Access-Control-Allow-Origin\"), check.Equals, \"*\")\n\tfor _, hdr := range []string{\"Authorization\", \"Content-Type\"} {\n\t\tc.Check(rr.Result().Header.Get(\"Access-Control-Allow-Headers\"), check.Matches, \".*\"+hdr+\".*\")\n\t}\n\tfor _, method := range []string{\"GET\", \"HEAD\", \"PUT\", \"POST\", \"PATCH\", \"DELETE\"} {\n\t\tc.Check(rr.Result().Header.Get(\"Access-Control-Allow-Methods\"), check.Matches, \".*\"+method+\".*\")\n\t}\n\n\tfor _, unsafe := range []string{\"login\", \"logout\", \"auth\", \"auth/foo\", \"login/?blah\"} {\n\t\treq := (&testReq{\n\t\t\tmethod: \"OPTIONS\",\n\t\t\tpath:   unsafe,\n\t\t\theader: http.Header{\"Origin\": {\"https://example.com\"}},\n\t\t\ttoken:  token,\n\t\t}).Request()\n\t\trr := httptest.NewRecorder()\n\t\ts.rtr.ServeHTTP(rr, req)\n\t\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\t\tc.Check(rr.Body.String(), check.HasLen, 0)\n\t\tc.Check(rr.Result().Header.Get(\"Access-Control-Allow-Origin\"), check.Equals, \"\")\n\t\tc.Check(rr.Result().Header.Get(\"Access-Control-Allow-Methods\"), check.Equals, \"\")\n\t\tc.Check(rr.Result().Header.Get(\"Access-Control-Allow-Headers\"), check.Equals, \"\")\n\n\t\treq = (&testReq{\n\t\t\tmethod: \"POST\",\n\t\t\tpath:   unsafe,\n\t\t\theader: http.Header{\"Origin\": {\"https://example.com\"}},\n\t\t\ttoken:  token,\n\t\t}).Request()\n\t\trr = httptest.NewRecorder()\n\t\ts.rtr.ServeHTTP(rr, req)\n\t\tc.Check(rr.Result().Header.Get(\"Access-Control-Allow-Origin\"), check.Equals, \"\")\n\t\tc.Check(rr.Result().Header.Get(\"Access-Control-Allow-Methods\"), check.Equals, \"\")\n\t\tc.Check(rr.Result().Header.Get(\"Access-Control-Allow-Headers\"), check.Equals, \"\")\n\t}\n}\n\nfunc (s *RouterIntegrationSuite) TestComputedPermissionList(c *check.C) {\n\ttoken := arvadostest.AdminToken\n\n\tjresp := map[string]interface{}{}\n\t_, rr := doRequest(c, s.rtr, token, \"GET\", `/arvados/v1/computed_permissions?filters=[[\"user_uuid\",\"=\",\"`+arvadostest.ActiveUserUUID+`\"],[\"target_uuid\",\"=\",\"`+arvadostest.AProjectUUID+`\"]]&select=[\"perm_level\"]`, true, nil, nil, jresp)\n\tc.Check(rr.Code, check.Equals, http.StatusOK)\n\tc.Check(jresp[\"items_available\"], check.IsNil)\n\tif c.Check(jresp[\"items\"], check.HasLen, 1) {\n\t\titem := jresp[\"items\"].([]interface{})[0].(map[string]interface{})\n\t\tc.Check(item, check.DeepEquals, map[string]interface{}{\n\t\t\t\"kind\":       \"arvados#computedPermission\",\n\t\t\t\"perm_level\": \"can_manage\",\n\t\t})\n\t}\n}\n\nfunc doRequest(c *check.C, rtr http.Handler, token, method, path string, auth bool, hdrs http.Header, body io.Reader, jresp map[string]interface{}) (*http.Request, *httptest.ResponseRecorder) {\n\treq := httptest.NewRequest(method, path, body)\n\tfor k, v := range hdrs {\n\t\tif k == \"Host\" && len(v) == 1 {\n\t\t\treq.Host = v[0]\n\t\t} else {\n\t\t\treq.Header[k] = v\n\t\t}\n\t}\n\tif auth {\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+token)\n\t}\n\trr := httptest.NewRecorder()\n\trtr.ServeHTTP(rr, req)\n\trespbody := rr.Body.String()\n\tif len(respbody) > 10000 {\n\t\trespbody = respbody[:10000] + \"[...]\"\n\t}\n\tc.Logf(\"response body: %s\", respbody)\n\tif jresp != nil {\n\t\terr := json.Unmarshal(rr.Body.Bytes(), &jresp)\n\t\tc.Check(err, check.IsNil)\n\t}\n\treturn req, rr\n}\n"
  },
  {
    "path": "lib/controller/rpc/conn.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage rpc\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n)\n\nconst rfc3339NanoFixed = \"2006-01-02T15:04:05.000000000Z07:00\"\n\ntype TokenProvider func(context.Context) ([]string, error)\n\nfunc PassthroughTokenProvider(ctx context.Context) ([]string, error) {\n\tincoming, ok := auth.FromContext(ctx)\n\tif !ok {\n\t\treturn nil, errors.New(\"no token provided\")\n\t}\n\treturn incoming.Tokens, nil\n}\n\ntype Conn struct {\n\tSendHeader         http.Header\n\tRedactHostInErrors bool\n\n\tclusterID                string\n\thttpClient               http.Client\n\tbaseURL                  url.URL\n\ttokenProvider            TokenProvider\n\tdiscoveryDocument        *arvados.DiscoveryDocument\n\tdiscoveryDocumentMtx     sync.Mutex\n\tdiscoveryDocumentExpires time.Time\n}\n\nfunc NewConn(clusterID string, url *url.URL, insecure bool, tp TokenProvider) *Conn {\n\ttransport := http.DefaultTransport\n\tif insecure {\n\t\t// It's not safe to copy *http.DefaultTransport\n\t\t// because it has a mutex (which might be locked)\n\t\t// protecting a private map (which might not be nil).\n\t\t// So we build our own, using the Go 1.23 default\n\t\t// values, ignoring any changes the application has\n\t\t// made to http.DefaultTransport.\n\t\ttransport = &http.Transport{\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout:   30 * time.Second,\n\t\t\t\tKeepAlive: 30 * time.Second,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\t\t\tForceAttemptHTTP2:     true,\n\t\t\tMaxIdleConns:          100,\n\t\t\tIdleConnTimeout:       90 * time.Second,\n\t\t\tTLSHandshakeTimeout:   10 * time.Second,\n\t\t\tExpectContinueTimeout: 1 * time.Second,\n\t\t\tTLSClientConfig:       &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t}\n\treturn &Conn{\n\t\tclusterID: clusterID,\n\t\thttpClient: http.Client{\n\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse },\n\t\t\tTransport:     transport,\n\t\t},\n\t\tbaseURL:       *url,\n\t\ttokenProvider: tp,\n\t}\n}\n\nfunc (conn *Conn) requestAndDecode(ctx context.Context, dst interface{}, ep arvados.APIEndpoint, body io.Reader, opts interface{}) error {\n\taClient := arvados.Client{\n\t\tClient:     &conn.httpClient,\n\t\tScheme:     conn.baseURL.Scheme,\n\t\tAPIHost:    conn.baseURL.Host,\n\t\tSendHeader: conn.SendHeader,\n\t\t// Disable auto-retry\n\t\tTimeout: 0,\n\t}\n\ttokens, err := conn.tokenProvider(ctx)\n\tif err != nil {\n\t\treturn err\n\t} else if len(tokens) > 0 {\n\t\tctx = arvados.ContextWithAuthorization(ctx, \"Bearer \"+tokens[0])\n\t} else {\n\t\t// Use a non-empty auth string to ensure we override\n\t\t// any default token set on aClient -- and to avoid\n\t\t// having the remote prompt us to send a token by\n\t\t// responding 401.\n\t\tctx = arvados.ContextWithAuthorization(ctx, \"Bearer -\")\n\t}\n\n\t// Encode opts to JSON and decode from there to a\n\t// map[string]interface{}, so we can munge the query params\n\t// using the JSON key names specified by opts' struct tags.\n\tj, err := json.Marshal(opts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%T: requestAndDecode: Marshal opts: %s\", conn, err)\n\t}\n\tvar params map[string]interface{}\n\tdec := json.NewDecoder(bytes.NewBuffer(j))\n\tdec.UseNumber()\n\terr = dec.Decode(&params)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%T: requestAndDecode: Decode opts: %s\", conn, err)\n\t}\n\tif attrs, ok := params[\"attrs\"]; ok && ep.AttrsKey != \"\" {\n\t\tparams[ep.AttrsKey] = attrs\n\t\tdelete(params, \"attrs\")\n\t}\n\tif limitStr, ok := params[\"limit\"]; ok {\n\t\tif limit, err := strconv.ParseInt(string(limitStr.(json.Number)), 10, 64); err == nil && limit < 0 {\n\t\t\t// Negative limit means \"not specified\" here, but some\n\t\t\t// servers/versions do not accept that, so we need to\n\t\t\t// remove it entirely.\n\t\t\tdelete(params, \"limit\")\n\t\t}\n\t}\n\n\tif authinfo, ok := params[\"auth_info\"]; ok {\n\t\tif tmp, ok2 := authinfo.(map[string]interface{}); ok2 {\n\t\t\tfor k, v := range tmp {\n\t\t\t\tif strings.HasSuffix(k, \"_at\") {\n\t\t\t\t\t// Change zero times values to nil\n\t\t\t\t\tif v, ok3 := v.(string); ok3 && (strings.HasPrefix(v, \"0001-01-01T00:00:00\") || v == \"\") {\n\t\t\t\t\t\ttmp[k] = nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(tokens) > 1 {\n\t\tif params == nil {\n\t\t\tparams = make(map[string]interface{})\n\t\t}\n\t\tparams[\"reader_tokens\"] = tokens[1:]\n\t}\n\tpath := ep.Path\n\tif strings.Contains(ep.Path, \"/{uuid}\") && params != nil {\n\t\tuuid, _ := params[\"uuid\"].(string)\n\t\tpath = strings.Replace(path, \"/{uuid}\", \"/\"+uuid, 1)\n\t\tdelete(params, \"uuid\")\n\t}\n\terr = aClient.RequestAndDecodeContext(ctx, dst, ep.Method, path, body, params)\n\tif err != nil && conn.RedactHostInErrors {\n\t\tredacted := strings.Replace(err.Error(), strings.TrimSuffix(conn.baseURL.String(), \"/\"), \"//railsapi.internal\", -1)\n\t\tif strings.HasPrefix(redacted, \"request failed: \") {\n\t\t\tredacted = strings.Replace(redacted, \"request failed: \", \"\", -1)\n\t\t}\n\t\tif redacted != err.Error() {\n\t\t\tif err, ok := err.(httpStatusError); ok {\n\t\t\t\treturn wrapHTTPStatusError(err, redacted)\n\t\t\t} else {\n\t\t\t\treturn errors.New(redacted)\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}\n\nfunc (conn *Conn) BaseURL() url.URL {\n\treturn conn.baseURL\n}\n\nfunc (conn *Conn) ConfigGet(ctx context.Context) (json.RawMessage, error) {\n\tep := arvados.EndpointConfigGet\n\tvar resp json.RawMessage\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, nil)\n\treturn resp, err\n}\n\nfunc (conn *Conn) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {\n\tep := arvados.EndpointVocabularyGet\n\tvar resp arvados.Vocabulary\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, nil)\n\treturn resp, err\n}\n\nfunc (conn *Conn) DiscoveryDocument(ctx context.Context) (arvados.DiscoveryDocument, error) {\n\tconn.discoveryDocumentMtx.Lock()\n\tdefer conn.discoveryDocumentMtx.Unlock()\n\tif conn.discoveryDocument != nil && time.Now().Before(conn.discoveryDocumentExpires) {\n\t\treturn *conn.discoveryDocument, nil\n\t}\n\tvar dd arvados.DiscoveryDocument\n\terr := conn.requestAndDecode(ctx, &dd, arvados.EndpointDiscoveryDocument, nil, nil)\n\tif err != nil {\n\t\treturn dd, err\n\t}\n\tconn.discoveryDocument = &dd\n\tconn.discoveryDocumentExpires = time.Now().Add(time.Hour)\n\treturn *conn.discoveryDocument, nil\n}\n\nfunc (conn *Conn) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {\n\tep := arvados.EndpointLogin\n\tvar resp arvados.LoginResponse\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\tresp.RedirectLocation = conn.relativeToBaseURL(resp.RedirectLocation)\n\treturn resp, err\n}\n\nfunc (conn *Conn) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\tep := arvados.EndpointLogout\n\tvar resp arvados.LogoutResponse\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\tresp.RedirectLocation = conn.relativeToBaseURL(resp.RedirectLocation)\n\treturn resp, err\n}\n\n// If the given location is a valid URL and its origin is the same as\n// conn.baseURL, return it as a relative URL. Otherwise, return it\n// unmodified.\nfunc (conn *Conn) relativeToBaseURL(location string) string {\n\tu, err := url.Parse(location)\n\tif err == nil && u.Scheme == conn.baseURL.Scheme && strings.ToLower(u.Host) == strings.ToLower(conn.baseURL.Host) {\n\t\tu.Opaque = \"\"\n\t\tu.Scheme = \"\"\n\t\tu.User = nil\n\t\tu.Host = \"\"\n\t\treturn u.String()\n\t}\n\treturn location\n}\n\nfunc (conn *Conn) AuthorizedKeyCreate(ctx context.Context, options arvados.CreateOptions) (arvados.AuthorizedKey, error) {\n\tep := arvados.EndpointAuthorizedKeyCreate\n\tvar resp arvados.AuthorizedKey\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) AuthorizedKeyUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.AuthorizedKey, error) {\n\tep := arvados.EndpointAuthorizedKeyUpdate\n\tvar resp arvados.AuthorizedKey\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) AuthorizedKeyGet(ctx context.Context, options arvados.GetOptions) (arvados.AuthorizedKey, error) {\n\tep := arvados.EndpointAuthorizedKeyGet\n\tvar resp arvados.AuthorizedKey\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) AuthorizedKeyList(ctx context.Context, options arvados.ListOptions) (arvados.AuthorizedKeyList, error) {\n\tep := arvados.EndpointAuthorizedKeyList\n\tvar resp arvados.AuthorizedKeyList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) AuthorizedKeyDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.AuthorizedKey, error) {\n\tep := arvados.EndpointAuthorizedKeyDelete\n\tvar resp arvados.AuthorizedKey\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {\n\tep := arvados.EndpointCollectionCreate\n\tvar resp arvados.Collection\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) CollectionUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Collection, error) {\n\tep := arvados.EndpointCollectionUpdate\n\tvar resp arvados.Collection\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) CollectionGet(ctx context.Context, options arvados.GetOptions) (arvados.Collection, error) {\n\tep := arvados.EndpointCollectionGet\n\tvar resp arvados.Collection\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) CollectionList(ctx context.Context, options arvados.ListOptions) (arvados.CollectionList, error) {\n\tep := arvados.EndpointCollectionList\n\tvar resp arvados.CollectionList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) CollectionProvenance(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {\n\tep := arvados.EndpointCollectionProvenance\n\tvar resp map[string]interface{}\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) CollectionUsedBy(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {\n\tep := arvados.EndpointCollectionUsedBy\n\tvar resp map[string]interface{}\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) CollectionDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {\n\tep := arvados.EndpointCollectionDelete\n\tvar resp arvados.Collection\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) CollectionTrash(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {\n\tep := arvados.EndpointCollectionTrash\n\tvar resp arvados.Collection\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) CollectionUntrash(ctx context.Context, options arvados.UntrashOptions) (arvados.Collection, error) {\n\tep := arvados.EndpointCollectionUntrash\n\tvar resp arvados.Collection\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ComputedPermissionList(ctx context.Context, options arvados.ListOptions) (arvados.ComputedPermissionList, error) {\n\tep := arvados.EndpointComputedPermissionList\n\tvar resp arvados.ComputedPermissionList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Container, error) {\n\tep := arvados.EndpointContainerCreate\n\tvar resp arvados.Container\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {\n\tep := arvados.EndpointContainerUpdate\n\tvar resp arvados.Container\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerPriorityUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {\n\tep := arvados.EndpointContainerPriorityUpdate\n\tvar resp arvados.Container\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerGet(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {\n\tep := arvados.EndpointContainerGet\n\tvar resp arvados.Container\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerList, error) {\n\tep := arvados.EndpointContainerList\n\tvar resp arvados.ContainerList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Container, error) {\n\tep := arvados.EndpointContainerDelete\n\tvar resp arvados.Container\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerLock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {\n\tep := arvados.EndpointContainerLock\n\tvar resp arvados.Container\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerUnlock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {\n\tep := arvados.EndpointContainerUnlock\n\tvar resp arvados.Container\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerHTTPProxy(ctx context.Context, options arvados.ContainerHTTPProxyOptions) (http.Handler, error) {\n\treturn conn.reverseProxy(\"\", http.Header{\"X-Arvados-No-Forward\": {\"1\"}}), nil\n}\n\n// ContainerSSH returns a connection to the out-of-band SSH server for\n// a running container. If the returned error is nil, the caller is\n// responsible for closing sshconn.Conn.\nfunc (conn *Conn) ContainerSSH(ctx context.Context, options arvados.ContainerSSHOptions) (sshconn arvados.ConnectionResponse, err error) {\n\tu, err := conn.baseURL.Parse(\"/\" + strings.Replace(arvados.EndpointContainerSSHCompat.Path, \"{uuid}\", options.UUID, -1))\n\tif err != nil {\n\t\terr = fmt.Errorf(\"url.Parse: %w\", err)\n\t\treturn\n\t}\n\treturn conn.socket(ctx, u, \"ssh\", url.Values{\n\t\t\"detach_keys\":    {options.DetachKeys},\n\t\t\"login_username\": {options.LoginUsername},\n\t\t\"no_forward\":     {fmt.Sprintf(\"%v\", options.NoForward)},\n\t})\n}\n\n// ContainerGatewayTunnel returns a connection to a yamux session on\n// the controller. The caller should connect the returned resp.Conn to\n// a client-side yamux session.\nfunc (conn *Conn) ContainerGatewayTunnel(ctx context.Context, options arvados.ContainerGatewayTunnelOptions) (tunnelconn arvados.ConnectionResponse, err error) {\n\tu, err := conn.baseURL.Parse(\"/\" + strings.Replace(arvados.EndpointContainerGatewayTunnelCompat.Path, \"{uuid}\", options.UUID, -1))\n\tif err != nil {\n\t\terr = fmt.Errorf(\"url.Parse: %w\", err)\n\t\treturn\n\t}\n\treturn conn.socket(ctx, u, \"tunnel\", url.Values{\n\t\t\"auth_secret\": {options.AuthSecret},\n\t})\n}\n\n// socket sets up a socket using the specified API endpoint and\n// upgrade header.\nfunc (conn *Conn) socket(ctx context.Context, u *url.URL, upgradeHeader string, postform url.Values) (connresp arvados.ConnectionResponse, err error) {\n\taddr := conn.baseURL.Host\n\tif strings.Index(addr, \":\") < 1 || (strings.Contains(addr, \"::\") && addr[0] != '[') {\n\t\t// hostname or ::1 or 1::1\n\t\taddr = net.JoinHostPort(addr, \"https\")\n\t}\n\tinsecure := false\n\tif tlsconf := conn.httpClient.Transport.(*http.Transport).TLSClientConfig; tlsconf != nil && tlsconf.InsecureSkipVerify {\n\t\tinsecure = true\n\t}\n\tnetconn, err := tls.Dial(\"tcp\", addr, &tls.Config{InsecureSkipVerify: insecure})\n\tif err != nil {\n\t\treturn connresp, fmt.Errorf(\"tls.Dial: %w\", err)\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tnetconn.Close()\n\t\t}\n\t}()\n\tbufr := bufio.NewReader(netconn)\n\tbufw := bufio.NewWriter(netconn)\n\n\ttokens, err := conn.tokenProvider(ctx)\n\tif err != nil {\n\t\treturn connresp, err\n\t} else if len(tokens) < 1 {\n\t\treturn connresp, httpserver.ErrorWithStatus(errors.New(\"unauthorized\"), http.StatusUnauthorized)\n\t}\n\tpostdata := postform.Encode()\n\tbufw.WriteString(\"POST \" + u.String() + \" HTTP/1.1\\r\\n\")\n\tbufw.WriteString(\"Authorization: Bearer \" + tokens[0] + \"\\r\\n\")\n\tbufw.WriteString(\"Host: \" + u.Host + \"\\r\\n\")\n\tbufw.WriteString(\"Upgrade: \" + upgradeHeader + \"\\r\\n\")\n\tbufw.WriteString(\"Content-Type: application/x-www-form-urlencoded\\r\\n\")\n\tfmt.Fprintf(bufw, \"Content-Length: %d\\r\\n\", len(postdata))\n\tbufw.WriteString(\"\\r\\n\")\n\tbufw.WriteString(postdata)\n\tbufw.Flush()\n\tresp, err := http.ReadResponse(bufr, &http.Request{Method: \"POST\"})\n\tif err != nil {\n\t\treturn connresp, fmt.Errorf(\"http.ReadResponse: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusSwitchingProtocols {\n\t\tctxlog.FromContext(ctx).Infof(\"rpc.Conn.socket: server %s did not switch protocols, got status %s\", u.String(), resp.Status)\n\t\tbody, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 10000))\n\t\tvar message string\n\t\tvar errDoc httpserver.ErrorResponse\n\t\tif err := json.Unmarshal(body, &errDoc); err == nil {\n\t\t\tmessage = strings.Join(errDoc.Errors, \"; \")\n\t\t} else {\n\t\t\tmessage = fmt.Sprintf(\"%q\", body)\n\t\t}\n\t\treturn connresp, httpserver.ErrorWithStatus(fmt.Errorf(\"server did not provide a tunnel: %s: %s\", resp.Status, message), resp.StatusCode)\n\t}\n\tif strings.ToLower(resp.Header.Get(\"Upgrade\")) != upgradeHeader ||\n\t\tstrings.ToLower(resp.Header.Get(\"Connection\")) != \"upgrade\" {\n\t\treturn connresp, httpserver.ErrorWithStatus(fmt.Errorf(\"bad response from server: Upgrade %q Connection %q\", resp.Header.Get(\"Upgrade\"), resp.Header.Get(\"Connection\")), http.StatusBadGateway)\n\t}\n\tconnresp.Conn = netconn\n\tconnresp.Bufrw = &bufio.ReadWriter{Reader: bufr, Writer: bufw}\n\tconnresp.Header = resp.Header\n\treturn connresp, nil\n}\n\nfunc (conn *Conn) ContainerRequestCreate(ctx context.Context, options arvados.CreateOptions) (arvados.ContainerRequest, error) {\n\tep := arvados.EndpointContainerRequestCreate\n\tvar resp arvados.ContainerRequest\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerRequestUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.ContainerRequest, error) {\n\tep := arvados.EndpointContainerRequestUpdate\n\tvar resp arvados.ContainerRequest\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerRequestGet(ctx context.Context, options arvados.GetOptions) (arvados.ContainerRequest, error) {\n\tep := arvados.EndpointContainerRequestGet\n\tvar resp arvados.ContainerRequest\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerRequestList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerRequestList, error) {\n\tep := arvados.EndpointContainerRequestList\n\tvar resp arvados.ContainerRequestList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerRequestDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.ContainerRequest, error) {\n\tep := arvados.EndpointContainerRequestDelete\n\tvar resp arvados.ContainerRequest\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerRequestContainerStatus(ctx context.Context, options arvados.GetOptions) (arvados.ContainerStatus, error) {\n\tep := arvados.EndpointContainerRequestContainerStatus\n\tvar resp arvados.ContainerStatus\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) ContainerRequestLog(ctx context.Context, options arvados.ContainerLogOptions) (resp http.Handler, err error) {\n\treturn conn.reverseProxy(fmt.Sprintf(\"no_forward=%v\", options.NoForward), nil), nil\n}\n\nfunc (conn *Conn) reverseProxy(setRawQuery string, setHeader http.Header) http.Handler {\n\treturn &httputil.ReverseProxy{\n\t\tTransport: conn.httpClient.Transport,\n\t\tDirector: func(r *http.Request) {\n\t\t\tu := conn.baseURL\n\t\t\tu.Path = r.URL.Path\n\t\t\tif setRawQuery != \"\" {\n\t\t\t\tu.RawQuery = setRawQuery\n\t\t\t}\n\t\t\tfor k, v := range setHeader {\n\t\t\t\tr.Header[k] = v\n\t\t\t}\n\t\t\tr.URL = &u\n\t\t},\n\t}\n}\n\nfunc (conn *Conn) GroupCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Group, error) {\n\tep := arvados.EndpointGroupCreate\n\tvar resp arvados.Group\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) GroupUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Group, error) {\n\tep := arvados.EndpointGroupUpdate\n\tvar resp arvados.Group\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) GroupGet(ctx context.Context, options arvados.GetOptions) (arvados.Group, error) {\n\tep := arvados.EndpointGroupGet\n\tvar resp arvados.Group\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) GroupList(ctx context.Context, options arvados.ListOptions) (arvados.GroupList, error) {\n\tep := arvados.EndpointGroupList\n\tvar resp arvados.GroupList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) GroupContents(ctx context.Context, options arvados.GroupContentsOptions) (arvados.ObjectList, error) {\n\tep := arvados.EndpointGroupContents\n\tvar resp arvados.ObjectList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) GroupShared(ctx context.Context, options arvados.ListOptions) (arvados.GroupList, error) {\n\tep := arvados.EndpointGroupShared\n\tvar resp arvados.GroupList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) GroupDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Group, error) {\n\tep := arvados.EndpointGroupDelete\n\tvar resp arvados.Group\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) GroupTrash(ctx context.Context, options arvados.DeleteOptions) (arvados.Group, error) {\n\tep := arvados.EndpointGroupTrash\n\tvar resp arvados.Group\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) GroupUntrash(ctx context.Context, options arvados.UntrashOptions) (arvados.Group, error) {\n\tep := arvados.EndpointGroupUntrash\n\tvar resp arvados.Group\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {\n\tep := arvados.EndpointLinkCreate\n\tvar resp arvados.Link\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {\n\tep := arvados.EndpointLinkUpdate\n\tvar resp arvados.Link\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {\n\tep := arvados.EndpointLinkGet\n\tvar resp arvados.Link\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {\n\tep := arvados.EndpointLinkList\n\tvar resp arvados.LinkList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {\n\tep := arvados.EndpointLinkDelete\n\tvar resp arvados.Link\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) LogCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Log, error) {\n\tep := arvados.EndpointLogCreate\n\tvar resp arvados.Log\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) LogUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Log, error) {\n\tep := arvados.EndpointLogUpdate\n\tvar resp arvados.Log\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) LogGet(ctx context.Context, options arvados.GetOptions) (arvados.Log, error) {\n\tep := arvados.EndpointLogGet\n\tvar resp arvados.Log\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) LogList(ctx context.Context, options arvados.ListOptions) (arvados.LogList, error) {\n\tep := arvados.EndpointLogList\n\tvar resp arvados.LogList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) LogDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Log, error) {\n\tep := arvados.EndpointLogDelete\n\tvar resp arvados.Log\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) SysTrashSweep(ctx context.Context, options struct{}) (struct{}, error) {\n\tep := arvados.EndpointSysTrashSweep\n\tvar resp struct{}\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) UserCreate(ctx context.Context, options arvados.CreateOptions) (arvados.User, error) {\n\tep := arvados.EndpointUserCreate\n\tvar resp arvados.User\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) UserUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.User, error) {\n\tep := arvados.EndpointUserUpdate\n\tvar resp arvados.User\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) UserMerge(ctx context.Context, options arvados.UserMergeOptions) (arvados.User, error) {\n\tep := arvados.EndpointUserMerge\n\tvar resp arvados.User\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) UserActivate(ctx context.Context, options arvados.UserActivateOptions) (arvados.User, error) {\n\tep := arvados.EndpointUserActivate\n\tvar resp arvados.User\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) UserSetup(ctx context.Context, options arvados.UserSetupOptions) (map[string]interface{}, error) {\n\tep := arvados.EndpointUserSetup\n\tvar resp map[string]interface{}\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) UserUnsetup(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\tep := arvados.EndpointUserUnsetup\n\tvar resp arvados.User\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) UserGet(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\tep := arvados.EndpointUserGet\n\tvar resp arvados.User\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) UserGetCurrent(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\tep := arvados.EndpointUserGetCurrent\n\tvar resp arvados.User\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) UserGetSystem(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\tep := arvados.EndpointUserGetSystem\n\tvar resp arvados.User\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {\n\tep := arvados.EndpointUserList\n\tvar resp arvados.UserList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) UserDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.User, error) {\n\tep := arvados.EndpointUserDelete\n\tvar resp arvados.User\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {\n\tep := arvados.EndpointAPIClientAuthorizationCurrent\n\tvar resp arvados.APIClientAuthorization\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) APIClientAuthorizationCreate(ctx context.Context, options arvados.CreateOptions) (arvados.APIClientAuthorization, error) {\n\tep := arvados.EndpointAPIClientAuthorizationCreate\n\tvar resp arvados.APIClientAuthorization\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) APIClientAuthorizationUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.APIClientAuthorization, error) {\n\tep := arvados.EndpointAPIClientAuthorizationUpdate\n\tvar resp arvados.APIClientAuthorization\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) APIClientAuthorizationDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.APIClientAuthorization, error) {\n\tep := arvados.EndpointAPIClientAuthorizationDelete\n\tvar resp arvados.APIClientAuthorization\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) APIClientAuthorizationList(ctx context.Context, options arvados.ListOptions) (arvados.APIClientAuthorizationList, error) {\n\tep := arvados.EndpointAPIClientAuthorizationList\n\tvar resp arvados.APIClientAuthorizationList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\nfunc (conn *Conn) APIClientAuthorizationGet(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {\n\tep := arvados.EndpointAPIClientAuthorizationGet\n\tvar resp arvados.APIClientAuthorization\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\ntype UserSessionAuthInfo struct {\n\tUserUUID        string    `json:\"user_uuid\"`\n\tEmail           string    `json:\"email\"`\n\tAlternateEmails []string  `json:\"alternate_emails\"`\n\tFirstName       string    `json:\"first_name\"`\n\tLastName        string    `json:\"last_name\"`\n\tUsername        string    `json:\"username\"`\n\tExpiresAt       time.Time `json:\"expires_at\"`\n}\n\ntype UserSessionCreateOptions struct {\n\tAuthInfo UserSessionAuthInfo `json:\"auth_info\"`\n\tReturnTo string              `json:\"return_to\"`\n}\n\nfunc (conn *Conn) UserSessionCreate(ctx context.Context, options UserSessionCreateOptions) (arvados.LoginResponse, error) {\n\tep := arvados.APIEndpoint{Method: \"POST\", Path: \"auth/controller/callback\"}\n\tvar resp arvados.LoginResponse\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) UserBatchUpdate(ctx context.Context, options arvados.UserBatchUpdateOptions) (arvados.UserList, error) {\n\tep := arvados.EndpointUserBatchUpdate\n\tvar resp arvados.UserList\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\nfunc (conn *Conn) UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {\n\tep := arvados.EndpointUserAuthenticate\n\tvar resp arvados.APIClientAuthorization\n\terr := conn.requestAndDecode(ctx, &resp, ep, nil, options)\n\treturn resp, err\n}\n\n// httpStatusError is an error with an HTTP status code that can be\n// propagated by lib/controller/router, etc.\ntype httpStatusError interface {\n\terror\n\tHTTPStatus() int\n}\n\n// wrappedHTTPStatusError is used to augment/replace an error message\n// while preserving the HTTP status code indicated by the original\n// error.\ntype wrappedHTTPStatusError struct {\n\thttpStatusError\n\tmessage string\n}\n\nfunc wrapHTTPStatusError(err httpStatusError, message string) httpStatusError {\n\treturn wrappedHTTPStatusError{err, message}\n}\n\nfunc (err wrappedHTTPStatusError) Error() string {\n\treturn err.message\n}\n"
  },
  {
    "path": "lib/controller/rpc/conn_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage rpc\n\nimport (\n\t\"context\"\n\t\"net/url\"\n\t\"os\"\n\t\"testing\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&RPCSuite{})\n\ntype key int\n\nconst (\n\tcontextKeyTestTokens key = iota\n)\n\ntype RPCSuite struct {\n\tlog  logrus.FieldLogger\n\tctx  context.Context\n\tconn *Conn\n}\n\nfunc (s *RPCSuite) SetUpTest(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\ts.ctx = context.WithValue(ctx, contextKeyTestTokens, []string{arvadostest.ActiveToken})\n}\n\nfunc (s *RPCSuite) setupConn(c *check.C, host string) {\n\ts.conn = NewConn(\"zzzzz\", &url.URL{Scheme: \"https\", Host: host}, true, func(ctx context.Context) ([]string, error) {\n\t\ttokens, _ := ctx.Value(contextKeyTestTokens).([]string)\n\t\treturn tokens, nil\n\t})\n}\n\nfunc (s *RPCSuite) workbench2URL(c *check.C) string {\n\tloader := config.NewLoader(nil, s.log)\n\tcfg, err := loader.Load()\n\tc.Assert(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\treturn cluster.Services.Workbench2.ExternalURL.String()\n}\n\nfunc (s *RPCSuite) TestRailsLogin404(c *check.C) {\n\ts.setupConn(c, os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\ts.ctx = context.Background()\n\topts := arvados.LoginOptions{\n\t\tReturnTo: \"https://foo.example.com/bar\",\n\t}\n\t_, err := s.conn.Login(s.ctx, opts)\n\tc.Check(err.(*arvados.TransactionError).StatusCode, check.Equals, 404)\n}\n\nfunc (s *RPCSuite) TestRailsLogout404(c *check.C) {\n\ts.setupConn(c, os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\ts.ctx = context.Background()\n\topts := arvados.LogoutOptions{\n\t\tReturnTo: \"https://foo.example.com/bar\",\n\t}\n\t_, err := s.conn.Logout(s.ctx, opts)\n\tc.Check(err.(*arvados.TransactionError).StatusCode, check.Equals, 404)\n}\n\nfunc (s *RPCSuite) TestControllerLogout(c *check.C) {\n\ts.setupConn(c, os.Getenv(\"ARVADOS_API_HOST\"))\n\ts.ctx = context.Background()\n\turl := s.workbench2URL(c)\n\topts := arvados.LogoutOptions{\n\t\tReturnTo: url,\n\t}\n\tresp, err := s.conn.Logout(s.ctx, opts)\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.RedirectLocation, check.Equals, url)\n}\n\nfunc (s *RPCSuite) TestCollectionCreate(c *check.C) {\n\ts.setupConn(c, os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\tcoll, err := s.conn.CollectionCreate(s.ctx, arvados.CreateOptions{Attrs: map[string]interface{}{\n\t\t\"owner_uuid\":         arvadostest.ActiveUserUUID,\n\t\t\"portable_data_hash\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n\t}})\n\tc.Check(err, check.IsNil)\n\tc.Check(coll.UUID, check.HasLen, 27)\n}\n\nfunc (s *RPCSuite) TestGroupCRUD(c *check.C) {\n\ts.setupConn(c, os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\tsp, err := s.conn.GroupCreate(s.ctx, arvados.CreateOptions{Attrs: map[string]interface{}{\n\t\t\"group_class\": \"project\",\n\t\t\"owner_uuid\":  arvadostest.ActiveUserUUID,\n\t\t\"properties\":  map[string]string{\"foo\": \"bar\"},\n\t}})\n\tc.Check(err, check.IsNil)\n\tc.Check(sp.UUID, check.HasLen, 27)\n\tc.Check(sp.Properties, check.HasLen, 1)\n\tc.Check(sp.Properties[\"foo\"], check.Equals, \"bar\")\n\n\tspGet, err := s.conn.GroupGet(s.ctx, arvados.GetOptions{UUID: sp.UUID})\n\tc.Check(err, check.IsNil)\n\tc.Check(spGet.UUID, check.Equals, sp.UUID)\n\tc.Check(spGet.Properties[\"foo\"], check.Equals, \"bar\")\n\n\tspList, err := s.conn.GroupList(s.ctx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{\"uuid\", \"=\", sp.UUID}}})\n\tc.Check(err, check.IsNil)\n\tc.Check(spList.ItemsAvailable, check.Equals, 1)\n\tc.Assert(spList.Items, check.HasLen, 1)\n\tc.Check(spList.Items[0].UUID, check.Equals, sp.UUID)\n\tc.Check(spList.Items[0].Properties[\"foo\"], check.Equals, \"bar\")\n\n\tanonCtx := context.WithValue(context.Background(), contextKeyTestTokens, []string{arvadostest.AnonymousToken})\n\tspList, err = s.conn.GroupList(anonCtx, arvados.ListOptions{Limit: -1, Filters: []arvados.Filter{{\"uuid\", \"=\", sp.UUID}}})\n\tc.Check(err, check.IsNil)\n\tc.Check(spList.ItemsAvailable, check.Equals, 0)\n\tc.Check(spList.Items, check.HasLen, 0)\n\n\tspDel, err := s.conn.GroupDelete(s.ctx, arvados.DeleteOptions{UUID: sp.UUID})\n\tc.Check(err, check.IsNil)\n\tc.Check(spDel.UUID, check.Equals, sp.UUID)\n}\n"
  },
  {
    "path": "lib/controller/semaphore.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nfunc semaphore(max int) (acquire, release func()) {\n\tif max > 0 {\n\t\tch := make(chan bool, max)\n\t\treturn func() { ch <- true }, func() { <-ch }\n\t}\n\treturn func() {}, func() {}\n}\n"
  },
  {
    "path": "lib/controller/server_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nfunc integrationTestCluster() *arvados.Cluster {\n\tcfg, err := arvados.GetConfig(filepath.Join(os.Getenv(\"WORKSPACE\"), \"tmp\", \"arvados.yml\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcc, err := cfg.GetCluster(\"zzzzz\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn cc\n}\n\n// Return a new unstarted controller server, using the Rails API\n// provided by the integration-testing environment.\nfunc newServerFromIntegrationTestEnv(c *check.C) *httpserver.Server {\n\tlog := ctxlog.TestLogger(c)\n\tctx := ctxlog.Context(context.Background(), log)\n\thandler := &Handler{\n\t\tCluster: &arvados.Cluster{\n\t\t\tClusterID:  \"zzzzz\",\n\t\t\tPostgreSQL: integrationTestCluster().PostgreSQL,\n\t\t},\n\t\tBackgroundContext: ctx,\n\t}\n\thandler.Cluster.TLS.Insecure = true\n\thandler.Cluster.Collections.BlobSigning = true\n\thandler.Cluster.Collections.BlobSigningKey = arvadostest.BlobSigningKey\n\thandler.Cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour * 24 * 14)\n\tarvadostest.SetServiceURL(&handler.Cluster.Services.RailsAPI, \"https://\"+os.Getenv(\"ARVADOS_TEST_API_HOST\"))\n\tarvadostest.SetServiceURL(&handler.Cluster.Services.Controller, \"http://localhost:/\")\n\n\tsrv := &httpserver.Server{\n\t\tServer: http.Server{\n\t\t\tBaseContext: func(net.Listener) context.Context { return ctx },\n\t\t\tHandler:     httpserver.AddRequestIDs(httpserver.LogRequests(handler)),\n\t\t},\n\t\tAddr: \":\",\n\t}\n\treturn srv\n}\n"
  },
  {
    "path": "lib/controller/trash.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage controller\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/dblock\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n)\n\nfunc (h *Handler) periodicWorker(workerName string, interval time.Duration, locker *dblock.DBLocker, run func(context.Context) error) {\n\tlogger := ctxlog.FromContext(h.BackgroundContext).WithField(\"worker\", workerName)\n\tctx := ctxlog.Context(h.BackgroundContext, logger)\n\tif interval <= 0 {\n\t\tlogger.Debugf(\"interval is %v, not running worker\", interval)\n\t\treturn\n\t}\n\tif !locker.Lock(ctx, h.dbConnector.GetDB) {\n\t\t// context canceled\n\t\treturn\n\t}\n\tdefer locker.Unlock()\n\tfor ctxSleep(ctx, interval); ctx.Err() == nil; ctxSleep(ctx, interval) {\n\t\tif !locker.Check() {\n\t\t\t// context canceled\n\t\t\treturn\n\t\t}\n\t\terr := run(ctx)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Infof(\"%s failed\", workerName)\n\t\t}\n\t}\n}\n\nfunc (h *Handler) trashSweepWorker() {\n\th.periodicWorker(\"trash sweep\", h.Cluster.Collections.TrashSweepInterval.Duration(), dblock.TrashSweep, func(ctx context.Context) error {\n\t\tctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{h.Cluster.SystemRootToken}})\n\t\t_, err := h.federation.SysTrashSweep(ctx, struct{}{})\n\t\treturn err\n\t})\n}\n\nfunc (h *Handler) containerLogSweepWorker() {\n\t// Since #21611 we don't expect any new log entries, so the\n\t// periodic worker only runs once, then becomes a no-op.\n\t//\n\t// The old Containers.Logging.SweepInterval config is removed.\n\t// We use TrashSweepInterval here instead, for testing\n\t// reasons: it prevents the default integration-testing\n\t// controller service (whose TrashSweepInterval is 0) from\n\t// acquiring the dblock.\n\tdone := false\n\th.periodicWorker(\"container log sweep\", h.Cluster.Collections.TrashSweepInterval.Duration(), dblock.ContainerLogSweep, func(ctx context.Context) error {\n\t\tif done {\n\t\t\treturn nil\n\t\t}\n\t\tdb, err := h.dbConnector.GetDB(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tres, err := db.ExecContext(ctx, `\nDELETE FROM logs\n USING containers\n WHERE logs.object_uuid=containers.uuid\n AND logs.event_type in ('stdout', 'stderr', 'arv-mount', 'crunch-run', 'crunchstat', 'hoststat', 'node', 'container', 'keepstore')\n AND containers.log IS NOT NULL`)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlogger := ctxlog.FromContext(ctx)\n\t\trows, err := res.RowsAffected()\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Warn(\"unexpected error from RowsAffected()\")\n\t\t} else {\n\t\t\tlogger.WithField(\"rows\", rows).Info(\"deleted rows from logs table\")\n\t\t\tif rows == 0 {\n\t\t\t\tdone = true\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}\n\n// Sleep for the given duration, but return early if ctx cancels\n// before that.\nfunc ctxSleep(ctx context.Context, d time.Duration) {\n\tselect {\n\tcase <-ctx.Done():\n\tcase <-time.After(d):\n\t}\n}\n"
  },
  {
    "path": "lib/crunchrun/background.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\nvar (\n\tlockdir    = \"/var/lock\"\n\tlockprefix = \"crunch-run-\"\n\tlocksuffix = \".lock\"\n\tbrokenfile = \"crunch-run-broken\"\n\tpricesfile = \"crunch-run-prices.json\"\n)\n\n// procinfo is saved in each process's lockfile.\ntype procinfo struct {\n\tUUID string\n\tPID  int\n}\n\n// Detach acquires a lock for the given uuid, and starts the current\n// program as a child process (with -no-detach prepended to the given\n// arguments so the child knows not to detach again). The lock is\n// passed along to the child process.\n//\n// Stdout and stderr in the child process are sent to the systemd\n// journal using the systemd-cat program.\nfunc Detach(uuid string, prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\treturn exitcode(stderr, detach(uuid, prog, args, stdin, stdout))\n}\nfunc detach(uuid string, prog string, args []string, stdin io.Reader, stdout io.Writer) error {\n\tlockfile, err := func() (*os.File, error) {\n\t\t// We must hold the dir-level lock between\n\t\t// opening/creating the lockfile and acquiring LOCK_EX\n\t\t// on it, to avoid racing with the ListProcess's\n\t\t// alive-checking and garbage collection.\n\t\tdirlock, err := lockall()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer dirlock.Close()\n\t\tlockfilename := filepath.Join(lockdir, lockprefix+uuid+locksuffix)\n\t\tlockfile, err := os.OpenFile(lockfilename, os.O_CREATE|os.O_RDWR, 0700)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"open %s: %s\", lockfilename, err)\n\t\t}\n\t\terr = syscall.Flock(int(lockfile.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)\n\t\tif err != nil {\n\t\t\tlockfile.Close()\n\t\t\treturn nil, fmt.Errorf(\"lock %s: %s\", lockfilename, err)\n\t\t}\n\t\treturn lockfile, nil\n\t}()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer lockfile.Close()\n\tlockfile.Truncate(0)\n\n\texecargs := append([]string{\"-no-detach\"}, args...)\n\tif strings.HasSuffix(prog, \" crunch-run\") {\n\t\t// invoked as \"/path/to/arvados-server crunch-run\"\n\t\t// (see arvados/lib/cmd.Multi)\n\t\texecargs = append([]string{strings.TrimSuffix(prog, \" crunch-run\"), \"crunch-run\"}, execargs...)\n\t} else {\n\t\t// invoked as \"/path/to/crunch-run\"\n\t\texecargs = append([]string{prog}, execargs...)\n\t}\n\tif _, err := exec.LookPath(\"systemd-cat\"); err == nil {\n\t\texecargs = append([]string{\n\t\t\t// Here, if the inner systemd-cat can't exec\n\t\t\t// crunch-run, it writes an error message to\n\t\t\t// stderr, and the outer systemd-cat writes it\n\t\t\t// to the journal where the operator has a\n\t\t\t// chance to discover it. (If we only used one\n\t\t\t// systemd-cat command, it would be up to us\n\t\t\t// to report the error -- but we are going to\n\t\t\t// detach and exit, not wait for something to\n\t\t\t// appear on stderr.)  Note these systemd-cat\n\t\t\t// calls don't result in additional processes\n\t\t\t// -- they just connect stderr/stdout to\n\t\t\t// sockets and call exec().\n\t\t\t\"systemd-cat\", \"--identifier=crunch-run\",\n\t\t\t\"systemd-cat\", \"--identifier=crunch-run\",\n\t\t}, execargs...)\n\t}\n\n\tcmd := exec.Command(execargs[0], execargs[1:]...)\n\t// Child inherits lockfile.\n\tcmd.ExtraFiles = []*os.File{lockfile}\n\t// Ensure child isn't interrupted even if we receive signals\n\t// from parent (sshd) while sending lockfile content to\n\t// caller.\n\tcmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}\n\t// We need to manage our own OS pipe here to ensure the child\n\t// process reads all of our stdin pipe before we return.\n\tpiper, pipew, err := os.Pipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer pipew.Close()\n\tcmd.Stdin = piper\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"exec %s: %s\", cmd.Path, err)\n\t}\n\t_, err = io.Copy(pipew, stdin)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = pipew.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw := io.MultiWriter(stdout, lockfile)\n\treturn json.NewEncoder(w).Encode(procinfo{\n\t\tUUID: uuid,\n\t\tPID:  cmd.Process.Pid,\n\t})\n}\n\n// KillProcess finds the crunch-run process corresponding to the given\n// uuid, and sends the given signal to it. It then waits up to 1\n// second for the process to die. It returns 0 if the process is\n// successfully killed or didn't exist in the first place.\nfunc KillProcess(uuid string, signal syscall.Signal, stdout, stderr io.Writer) int {\n\treturn exitcode(stderr, kill(uuid, signal, stdout, stderr))\n}\n\nfunc kill(uuid string, signal syscall.Signal, stdout, stderr io.Writer) error {\n\tpath := filepath.Join(lockdir, lockprefix+uuid+locksuffix)\n\tf, err := os.Open(path)\n\tif os.IsNotExist(err) {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"open %s: %s\", path, err)\n\t}\n\tdefer f.Close()\n\n\tvar pi procinfo\n\terr = json.NewDecoder(f).Decode(&pi)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"decode %s: %s\", path, err)\n\t}\n\n\tif pi.UUID != uuid || pi.PID == 0 {\n\t\treturn fmt.Errorf(\"%s: bogus procinfo: %+v\", path, pi)\n\t}\n\n\tproc, err := os.FindProcess(pi.PID)\n\tif err != nil {\n\t\t// FindProcess should have succeeded, even if the\n\t\t// process does not exist.\n\t\treturn fmt.Errorf(\"%s: find process %d: %s\", uuid, pi.PID, err)\n\t}\n\n\t// Send the requested signal once, then send signal 0 a few\n\t// times.  When proc.Signal() returns an error (process no\n\t// longer exists), return success.  If that doesn't happen\n\t// within 1 second, return an error.\n\terr = proc.Signal(signal)\n\tfor deadline := time.Now().Add(time.Second); err == nil && time.Now().Before(deadline); time.Sleep(time.Second / 100) {\n\t\terr = proc.Signal(syscall.Signal(0))\n\t}\n\tif err == nil {\n\t\t// Reached deadline without a proc.Signal() error.\n\t\treturn fmt.Errorf(\"%s: pid %d: sent signal %d (%s) but process is still alive\", uuid, pi.PID, signal, signal)\n\t}\n\tfmt.Fprintf(stderr, \"%s: pid %d: %s\\n\", uuid, pi.PID, err)\n\treturn nil\n}\n\n// ListProcesses lists UUIDs of active crunch-run processes.\nfunc ListProcesses(pricesdata io.Reader, stdout, stderr io.Writer) int {\n\tif pricesdata == nil {\n\t\t// skip price update\n\t} else if buf, err := io.ReadAll(pricesdata); err == nil && len(buf) > 0 {\n\t\t// write latest pricing data to disk where\n\t\t// current/future crunch-run processes can load it\n\t\tfnm := filepath.Join(lockdir, pricesfile)\n\t\tfnmtmp := fmt.Sprintf(\"%s~%d\", fnm, os.Getpid())\n\t\terr := os.WriteFile(fnmtmp, buf, 0777)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(stderr, \"error writing price data to %s: %s\", fnmtmp, err)\n\t\t} else if err = os.Rename(fnmtmp, fnm); err != nil {\n\t\t\tfmt.Fprintf(stderr, \"error renaming %s to %s: %s\", fnmtmp, fnm, err)\n\t\t\tos.Remove(fnmtmp)\n\t\t}\n\t}\n\t// filepath.Walk does not follow symlinks, so we must walk\n\t// lockdir+\"/.\" in case lockdir itself is a symlink.\n\twalkdir := lockdir + \"/.\"\n\treturn exitcode(stderr, filepath.Walk(walkdir, func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() && path != walkdir {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\tif name := info.Name(); name == brokenfile {\n\t\t\tfmt.Fprintln(stdout, \"broken\")\n\t\t\treturn nil\n\t\t} else if !strings.HasPrefix(name, lockprefix) || !strings.HasSuffix(name, locksuffix) {\n\t\t\treturn nil\n\t\t}\n\t\tif info.Size() == 0 {\n\t\t\t// race: process has opened/locked but hasn't yet written pid/uuid\n\t\t\treturn nil\n\t\t}\n\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tdefer f.Close()\n\n\t\t// Ensure other processes don't acquire this lockfile\n\t\t// after we have decided it is abandoned but before we\n\t\t// have deleted it.\n\t\tdirlock, err := lockall()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = syscall.Flock(int(f.Fd()), syscall.LOCK_SH|syscall.LOCK_NB)\n\t\tif err == nil {\n\t\t\t// lockfile is stale\n\t\t\terr := os.Remove(path)\n\t\t\tdirlock.Close()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(stderr, \"unlink %s: %s\\n\", f.Name(), err)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tdirlock.Close()\n\n\t\tvar pi procinfo\n\t\terr = json.NewDecoder(f).Decode(&pi)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(stderr, \"%s: %s\\n\", path, err)\n\t\t\treturn nil\n\t\t}\n\t\tif pi.UUID == \"\" || pi.PID == 0 {\n\t\t\tfmt.Fprintf(stderr, \"%s: bogus procinfo: %+v\", path, pi)\n\t\t\treturn nil\n\t\t}\n\n\t\tproc, err := os.FindProcess(pi.PID)\n\t\tif err != nil {\n\t\t\t// FindProcess should have succeeded, even if the\n\t\t\t// process does not exist.\n\t\t\tfmt.Fprintf(stderr, \"%s: find process %d: %s\", path, pi.PID, err)\n\t\t\treturn nil\n\t\t}\n\t\terr = proc.Signal(syscall.SIGUSR2)\n\t\tif err != nil {\n\t\t\t// Process is dead, even though lockfile was\n\t\t\t// still locked. Most likely a stuck arv-mount\n\t\t\t// process that inherited the lock from\n\t\t\t// crunch-run. Report container UUID as\n\t\t\t// \"stale\".\n\t\t\tfmt.Fprintln(stdout, pi.UUID, \"stale\")\n\t\t\treturn nil\n\t\t}\n\n\t\tfmt.Fprintln(stdout, pi.UUID)\n\t\treturn nil\n\t}))\n}\n\n// If err is nil, return 0 (\"success\"); otherwise, print err to stderr\n// and return 1.\nfunc exitcode(stderr io.Writer, err error) int {\n\tif err != nil {\n\t\tfmt.Fprintln(stderr, err)\n\t\treturn 1\n\t}\n\treturn 0\n}\n\n// Acquire a dir-level lock. Must be held while creating or deleting\n// container-specific lockfiles, to avoid races during the intervals\n// when those container-specific lockfiles are open but not locked.\n//\n// Caller releases the lock by closing the returned file.\nfunc lockall() (*os.File, error) {\n\tlockfile := filepath.Join(lockdir, lockprefix+\"all\"+locksuffix)\n\tf, err := os.OpenFile(lockfile, os.O_CREATE|os.O_RDWR, 0700)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"open %s: %s\", lockfile, err)\n\t}\n\terr = syscall.Flock(int(f.Fd()), syscall.LOCK_EX)\n\tif err != nil {\n\t\tf.Close()\n\t\treturn nil, fmt.Errorf(\"lock %s: %s\", lockfile, err)\n\t}\n\treturn f, nil\n}\n"
  },
  {
    "path": "lib/crunchrun/bufthenwrite.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"sync\"\n)\n\ntype bufThenWrite struct {\n\tbuf bytes.Buffer\n\tw   io.Writer\n\tmtx sync.Mutex\n}\n\nfunc (btw *bufThenWrite) SetWriter(w io.Writer) error {\n\tbtw.mtx.Lock()\n\tdefer btw.mtx.Unlock()\n\tbtw.w = w\n\t_, err := io.Copy(w, &btw.buf)\n\treturn err\n}\n\nfunc (btw *bufThenWrite) Write(p []byte) (int, error) {\n\tbtw.mtx.Lock()\n\tdefer btw.mtx.Unlock()\n\tif btw.w == nil {\n\t\tbtw.w = &btw.buf\n\t}\n\treturn btw.w.Write(p)\n}\n"
  },
  {
    "path": "lib/crunchrun/cgroup.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"os\"\n\t\"os/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"sync\"\n)\n\n// Return the current process's cgroup for the given subsystem.\n//\n// If the host has cgroups v2 and not v1 (i.e., unified mode), return\n// the current process's cgroup.\nfunc findCgroup(fsys fs.FS, subsystem string) (string, error) {\n\tsubsys := []byte(subsystem)\n\tcgroups, err := fs.ReadFile(fsys, \"proc/self/cgroup\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, line := range bytes.Split(cgroups, []byte(\"\\n\")) {\n\t\ttoks := bytes.SplitN(line, []byte(\":\"), 4)\n\t\tif len(toks) < 3 {\n\t\t\tcontinue\n\t\t}\n\t\tif len(toks[1]) == 0 && string(toks[0]) == \"0\" {\n\t\t\t// cgroups v2: \"0::$PATH\"\n\t\t\t//\n\t\t\t// In \"hybrid\" mode, this entry is last, so we\n\t\t\t// use it when the specified subsystem doesn't\n\t\t\t// match a cgroups v1 entry.\n\t\t\t//\n\t\t\t// In \"unified\" mode, this is the only entry,\n\t\t\t// so we use it regardless of which subsystem\n\t\t\t// was specified.\n\t\t\treturn string(toks[2]), nil\n\t\t}\n\t\tfor _, s := range bytes.Split(toks[1], []byte(\",\")) {\n\t\t\t// cgroups v1: \"7:cpu,cpuacct:/user.slice\"\n\t\t\tif bytes.Compare(s, subsys) == 0 {\n\t\t\t\treturn string(toks[2]), nil\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"subsystem %q not found in /proc/self/cgroup\", subsystem)\n}\n\nvar (\n\t// After calling checkCgroupSupport, cgroupSupport indicates\n\t// support for singularity resource limits.\n\t//\n\t// E.g., cgroupSupport[\"memory\"]==true if systemd is installed\n\t// and configured such that singularity can use the \"memory\"\n\t// cgroup controller to set resource limits.\n\tcgroupSupport     map[string]bool\n\tcgroupSupportLock sync.Mutex\n)\n\n// checkCgroupSupport should be called before looking up strings like\n// \"memory\" and \"cpu\" in cgroupSupport.\nfunc checkCgroupSupport(logf func(string, ...interface{})) {\n\tcgroupSupportLock.Lock()\n\tdefer cgroupSupportLock.Unlock()\n\tif cgroupSupport != nil {\n\t\treturn\n\t}\n\tcgroupSupport = make(map[string]bool)\n\tif os.Getuid() != 0 {\n\t\txrd := os.Getenv(\"XDG_RUNTIME_DIR\")\n\t\tif xrd == \"\" || os.Getenv(\"DBUS_SESSION_BUS_ADDRESS\") == \"\" {\n\t\t\tlogf(\"not running as root, and empty XDG_RUNTIME_DIR or DBUS_SESSION_BUS_ADDRESS -- singularity resource limits are not supported\")\n\t\t\treturn\n\t\t}\n\t\tif fi, err := os.Stat(xrd + \"/systemd\"); err != nil || !fi.IsDir() {\n\t\t\tlogf(\"not running as root, and %s/systemd is not a directory -- singularity resource limits are not supported\", xrd)\n\t\t\treturn\n\t\t}\n\t\tversion, err := exec.Command(\"systemd-run\", \"--version\").CombinedOutput()\n\t\tif match := regexp.MustCompile(`^systemd (\\d+)`).FindSubmatch(version); err != nil || match == nil {\n\t\t\tlogf(\"not running as root, and could not get systemd version -- singularity resource limits are not supported\")\n\t\t\treturn\n\t\t} else if v, _ := strconv.ParseInt(string(match[1]), 10, 64); v < 224 {\n\t\t\tlogf(\"not running as root, and systemd version %s < minimum 224 -- singularity resource limits are not supported\", match[1])\n\t\t\treturn\n\t\t}\n\t}\n\tmount, err := cgroupMount()\n\tif err != nil {\n\t\tif os.Getuid() == 0 && checkCgroup1Support(os.DirFS(\"/\"), logf) {\n\t\t\t// If running as root, singularity also\n\t\t\t// supports cgroups v1.\n\t\t\treturn\n\t\t}\n\t\tlogf(\"no cgroup support: %s\", err)\n\t\treturn\n\t}\n\tcgroup, err := findCgroup(os.DirFS(\"/\"), \"\")\n\tif err != nil {\n\t\tlogf(\"cannot find cgroup: %s\", err)\n\t\treturn\n\t}\n\tcontrollers, err := os.ReadFile(mount + cgroup + \"/cgroup.controllers\")\n\tif err != nil {\n\t\tlogf(\"cannot read cgroup.controllers file: %s\", err)\n\t\treturn\n\t}\n\tfor _, controller := range bytes.Split(bytes.TrimRight(controllers, \"\\n\"), []byte{' '}) {\n\t\tcgroupSupport[string(controller)] = true\n\t}\n\tif !cgroupSupport[\"memory\"] && !cgroupSupport[\"cpu\"] && os.Getuid() == 0 {\n\t\t// On a system running in \"unified\" mode, the\n\t\t// controllers we need might be mounted under the v1\n\t\t// hierarchy, in which case we will not have seen them\n\t\t// in the cgroup2 mount, but (if running as root)\n\t\t// singularity can use them through v1.  See #22185.\n\t\tcheckCgroup1Support(os.DirFS(\"/\"), logf)\n\t}\n}\n\n// Check for legacy cgroups v1 support. Caller must have\n// cgroupSupportLock.\nfunc checkCgroup1Support(fsys fs.FS, logf func(string, ...interface{})) bool {\n\tcgroup, err := fs.ReadFile(fsys, \"proc/self/cgroup\")\n\tif err != nil {\n\t\tlogf(\"%s\", err)\n\t\treturn false\n\t}\n\tfor _, line := range bytes.Split(cgroup, []byte{'\\n'}) {\n\t\tif toks := bytes.SplitN(line, []byte{':'}, 3); len(toks) == 3 && len(toks[1]) > 0 {\n\t\t\tfor _, controller := range bytes.Split(toks[1], []byte{','}) {\n\t\t\t\tcgroupSupport[string(controller)] = true\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\n// Return the cgroup2 mount point, typically \"/sys/fs/cgroup\".\nfunc cgroupMount() (string, error) {\n\tmounts, err := os.ReadFile(\"/proc/mounts\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, mount := range bytes.Split(mounts, []byte{'\\n'}) {\n\t\ttoks := bytes.Split(mount, []byte{' '})\n\t\tif len(toks) > 2 && bytes.Equal(toks[0], []byte(\"cgroup2\")) {\n\t\t\treturn string(toks[1]), nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"cgroup2 mount not found\")\n}\n"
  },
  {
    "path": "lib/crunchrun/cgroup_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n\n\t\"github.com/sirupsen/logrus\"\n\t. \"gopkg.in/check.v1\"\n)\n\ntype CgroupSuite struct{}\n\nvar _ = Suite(&CgroupSuite{})\n\nfunc (s *CgroupSuite) TestFindCgroup(c *C) {\n\tvar testfiles []string\n\tbuf, err := exec.Command(\"find\", \"../crunchstat/testdata\", \"-name\", \"cgroup\", \"-type\", \"f\").Output()\n\tc.Assert(err, IsNil)\n\tfor _, testfile := range bytes.Split(buf, []byte{'\\n'}) {\n\t\tif len(testfile) > 0 {\n\t\t\ttestfiles = append(testfiles, string(testfile))\n\t\t}\n\t}\n\ttestfiles = append(testfiles, \"/proc/self/cgroup\")\n\n\ttmpdir := c.MkDir()\n\terr = os.MkdirAll(tmpdir+\"/proc/self\", 0777)\n\tc.Assert(err, IsNil)\n\tfsys := os.DirFS(tmpdir)\n\n\tfor _, trial := range []struct {\n\t\tmatch  string // if non-empty, only check testfiles containing this string\n\t\tsubsys string\n\t\texpect string // empty means \"any\" (we never actually expect empty string)\n\t}{\n\t\t{\"debian11\", \"blkio\", \"/user.slice/user-1000.slice/session-5424.scope\"},\n\t\t{\"debian12\", \"cpuacct\", \"/user.slice/user-1000.slice/session-4.scope\"},\n\t\t{\"debian12\", \"bogus-does-not-matter\", \"/user.slice/user-1000.slice/session-4.scope\"},\n\t\t{\"ubuntu1804\", \"blkio\", \"/user.slice\"},\n\t\t{\"ubuntu1804\", \"cpuacct\", \"/user.slice\"},\n\t\t{\"\", \"cpu\", \"\"},\n\t\t{\"\", \"cpuset\", \"\"},\n\t\t{\"\", \"devices\", \"\"},\n\t\t{\"\", \"bogus-does-not-matter\", \"\"},\n\t} {\n\t\tfor _, testfile := range testfiles {\n\t\t\tif !strings.Contains(testfile, trial.match) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.Logf(\"trial %+v testfile %s\", trial, testfile)\n\n\t\t\t// Copy cgroup file into our fake proc/self/ dir\n\t\t\tbuf, err := os.ReadFile(testfile)\n\t\t\tc.Assert(err, IsNil)\n\t\t\terr = os.WriteFile(tmpdir+\"/proc/self/cgroup\", buf, 0777)\n\t\t\tc.Assert(err, IsNil)\n\n\t\t\tcgroup, err := findCgroup(fsys, trial.subsys)\n\t\t\tif !c.Check(err, IsNil) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.Logf(\"\\tcgroup = %q\", cgroup)\n\t\t\tc.Check(cgroup, Not(Equals), \"\")\n\t\t\tif trial.expect != \"\" {\n\t\t\t\tc.Check(cgroup, Equals, trial.expect)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *CgroupSuite) TestCgroupSupport(c *C) {\n\tvar logbuf bytes.Buffer\n\tlogger := logrus.New()\n\tlogger.Out = &logbuf\n\tcheckCgroupSupport(logger.Printf)\n\tc.Check(logbuf.String(), Equals, \"\")\n\tc.Check(cgroupSupport, NotNil)\n\tc.Check(cgroupSupport[\"memory\"], Equals, true)\n\tc.Check(cgroupSupport[\"entropy\"], Equals, false)\n}\n\nfunc (s *CgroupSuite) TestCgroup1Support(c *C) {\n\tdefer func() {\n\t\t// Reset global state.  Other tests need to re-check\n\t\t// the real system config instead of using the results\n\t\t// from our fake /proc/self/cgroup.\n\t\tcgroupSupport = nil\n\t}()\n\ttmpdir := c.MkDir()\n\terr := os.MkdirAll(tmpdir+\"/proc/self\", 0777)\n\tc.Assert(err, IsNil)\n\terr = os.WriteFile(tmpdir+\"/proc/self/cgroup\", []byte(`12:blkio:/user.slice\n11:perf_event:/\n10:freezer:/\n9:pids:/user.slice/user-1000.slice/session-5.scope\n8:hugetlb:/\n7:rdma:/\n6:cpu,cpuacct:/user.slice\n5:devices:/user.slice\n4:memory:/user.slice/user-1000.slice/session-5.scope\n3:net_cls,net_prio:/\n2:cpuset:/\n1:name=systemd:/user.slice/user-1000.slice/session-5.scope\n0::/user.slice/user-1000.slice/session-5.scope\n`), 0777)\n\tc.Assert(err, IsNil)\n\tcgroupSupport = map[string]bool{}\n\tok := checkCgroup1Support(os.DirFS(tmpdir), c.Logf)\n\tc.Check(ok, Equals, true)\n\tc.Check(cgroupSupport, DeepEquals, map[string]bool{\n\t\t\"blkio\":        true,\n\t\t\"cpu\":          true,\n\t\t\"cpuacct\":      true,\n\t\t\"cpuset\":       true,\n\t\t\"devices\":      true,\n\t\t\"freezer\":      true,\n\t\t\"hugetlb\":      true,\n\t\t\"memory\":       true,\n\t\t\"name=systemd\": true,\n\t\t\"net_cls\":      true,\n\t\t\"net_prio\":     true,\n\t\t\"perf_event\":   true,\n\t\t\"pids\":         true,\n\t\t\"rdma\":         true,\n\t})\n}\n"
  },
  {
    "path": "lib/crunchrun/container_gateway.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"context\"\n\t\"crypto/hmac\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/sha256\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/rpc\"\n\t\"git.arvados.org/arvados.git/lib/selfsigned\"\n\t\"git.arvados.org/arvados.git/lib/webdavfs\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/creack/pty\"\n\t\"github.com/google/shlex\"\n\t\"github.com/hashicorp/yamux\"\n\t\"golang.org/x/crypto/ssh\"\n\t\"golang.org/x/net/webdav\"\n)\n\ntype GatewayTarget interface {\n\t// Command that will execute cmd inside the container\n\tInjectCommand(ctx context.Context, detachKeys, username string, usingTTY bool, cmd []string) (*exec.Cmd, error)\n\n\t// IP address inside container\n\tIPAddress() (string, error)\n}\n\ntype GatewayTargetStub struct{}\n\nfunc (GatewayTargetStub) IPAddress() (string, error) { return \"127.0.0.1\", nil }\nfunc (GatewayTargetStub) InjectCommand(ctx context.Context, detachKeys, username string, usingTTY bool, cmd []string) (*exec.Cmd, error) {\n\treturn exec.CommandContext(ctx, cmd[0], cmd[1:]...), nil\n}\n\ntype Gateway struct {\n\tContainerUUID string\n\t// Caller should set Address to \"\", or \"host:0\" or \"host:port\"\n\t// where host is a known external IP address; port is a\n\t// desired port number to listen on; and \":0\" chooses an\n\t// available dynamic port.\n\t//\n\t// If Address is \"\", Start() listens only on the loopback\n\t// interface (and changes Address to \"127.0.0.1:port\").\n\t// Otherwise it listens on all interfaces.\n\t//\n\t// If Address is \"host:0\", Start() updates Address to\n\t// \"host:port\".\n\tAddress    string\n\tAuthSecret string\n\tTarget     GatewayTarget\n\tLog        interface {\n\t\tPrintf(fmt string, args ...interface{})\n\t}\n\t// If non-nil, set up a ContainerGatewayTunnel, so that the\n\t// controller can connect to us even if our external IP\n\t// address is unknown or not routable from controller.\n\tArvadosClient *arvados.Client\n\n\t// When a tunnel is connected or reconnected, this func (if\n\t// not nil) will be called with the InternalURL of the\n\t// controller process at the other end of the tunnel.\n\tUpdateTunnelURL func(url string)\n\n\t// Source for serving WebDAV requests with\n\t// X-Webdav-Source: /log\n\tLogCollection arvados.CollectionFileSystem\n\n\tsshConfig   ssh.ServerConfig\n\trequestAuth string\n\trespondAuth string\n}\n\n// Start starts an http server that allows authenticated clients to open an\n// interactive \"docker exec\" session and (in future) connect to tcp ports\n// inside the docker container.\nfunc (gw *Gateway) Start() error {\n\tgw.sshConfig = ssh.ServerConfig{\n\t\tNoClientAuth: true,\n\t\tPasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {\n\t\t\tif c.User() == \"_\" {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"cannot specify user %q via ssh client\", c.User())\n\t\t},\n\t\tPublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) {\n\t\t\tif c.User() == \"_\" {\n\t\t\t\treturn &ssh.Permissions{\n\t\t\t\t\tExtensions: map[string]string{\n\t\t\t\t\t\t\"pubkey-fp\": ssh.FingerprintSHA256(pubKey),\n\t\t\t\t\t},\n\t\t\t\t}, nil\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"cannot specify user %q via ssh client\", c.User())\n\t\t},\n\t}\n\tpvt, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = pvt.Validate()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsigner, err := ssh.NewSignerFromKey(pvt)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgw.sshConfig.AddHostKey(signer)\n\n\t// Address (typically provided by arvados-dispatch-cloud) is\n\t// HOST:PORT where HOST is our IP address or hostname as seen\n\t// from arvados-controller, and PORT is either the desired\n\t// port where we should run our gateway server, or \"0\" if we\n\t// should choose an available port.\n\textAddr := gw.Address\n\t// Generally we can't know which local interface corresponds\n\t// to an externally reachable IP address, so if we expect to\n\t// be reachable by external hosts, we listen on all\n\t// interfaces.\n\tlistenHost := \"\"\n\tif extAddr == \"\" {\n\t\t// If the dispatcher doesn't tell us our external IP\n\t\t// address, controller will only be able to connect\n\t\t// through the tunnel (see runTunnel), so our gateway\n\t\t// server only needs to listen on the loopback\n\t\t// interface.\n\t\textAddr = \"127.0.0.1:0\"\n\t\tlistenHost = \"127.0.0.1\"\n\t}\n\textHost, extPort, err := net.SplitHostPort(extAddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcert, err := selfsigned.CertGenerator{}.Generate()\n\tif err != nil {\n\t\treturn err\n\t}\n\th := hmac.New(sha256.New, []byte(gw.AuthSecret))\n\th.Write(cert.Certificate[0])\n\tgw.requestAuth = fmt.Sprintf(\"%x\", h.Sum(nil))\n\th.Reset()\n\th.Write([]byte(gw.requestAuth))\n\tgw.respondAuth = fmt.Sprintf(\"%x\", h.Sum(nil))\n\n\tsrv := &httpserver.Server{\n\t\tServer: http.Server{\n\t\t\tHandler: gw,\n\t\t\tTLSConfig: &tls.Config{\n\t\t\t\tCertificates: []tls.Certificate{cert},\n\t\t\t},\n\t\t\t// Typically the client is arvados-controller,\n\t\t\t// which disables keep-alive, so we mostly\n\t\t\t// don't rely on IdleTimeout.  But in general,\n\t\t\t// these timeouts prevent abandoned open\n\t\t\t// sockets from piling up if client\n\t\t\t// connections don't get terminated properly\n\t\t\t// (e.g., network mishap).\n\t\t\tIdleTimeout:       time.Minute,\n\t\t\tReadHeaderTimeout: time.Minute,\n\t\t},\n\t\tAddr: net.JoinHostPort(listenHost, extPort),\n\t}\n\terr = srv.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\terr := srv.Wait()\n\t\tgw.Log.Printf(\"gateway server stopped: %s\", err)\n\t}()\n\t// Get the port number we are listening on (extPort might be\n\t// \"0\" or a port name, in which case this will be different).\n\t_, listenPort, err := net.SplitHostPort(srv.Addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// When changing state to Running, the caller will want to set\n\t// gateway_address to a \"HOST:PORT\" that, if controller\n\t// connects to it, will reach this gateway server.\n\t//\n\t// The most likely thing to work is: HOST is our external\n\t// hostname/IP as provided by the caller\n\t// (arvados-dispatch-cloud) or 127.0.0.1 to indicate\n\t// non-tunnel connections aren't available; and PORT is the\n\t// port number we are listening on.\n\tgw.Address = net.JoinHostPort(extHost, listenPort)\n\tgw.Log.Printf(\"gateway server listening at %s\", gw.Address)\n\tif gw.ArvadosClient != nil {\n\t\tgo gw.maintainTunnel(gw.Address)\n\t}\n\treturn nil\n}\n\nfunc (gw *Gateway) maintainTunnel(addr string) {\n\tfor ; ; time.Sleep(5 * time.Second) {\n\t\terr := gw.runTunnel(addr)\n\t\t// Note: err is never nil here, see runTunnel comment.\n\t\tgw.Log.Printf(\"runTunnel: %s\", err)\n\t}\n}\n\n// runTunnel connects to controller and sets up a tunnel through\n// which controller can connect to the gateway server at the given\n// addr.\n//\n// runTunnel aims to run forever (i.e., until the current process\n// exits). If it returns at all, it returns a non-nil error indicating\n// why the tunnel was shut down.\nfunc (gw *Gateway) runTunnel(addr string) error {\n\tctx := auth.NewContext(context.Background(), auth.NewCredentials(gw.ArvadosClient.AuthToken))\n\tarpc := rpc.NewConn(\"\", &url.URL{Scheme: \"https\", Host: gw.ArvadosClient.APIHost}, gw.ArvadosClient.Insecure, rpc.PassthroughTokenProvider)\n\ttun, err := arpc.ContainerGatewayTunnel(ctx, arvados.ContainerGatewayTunnelOptions{\n\t\tUUID:       gw.ContainerUUID,\n\t\tAuthSecret: gw.AuthSecret,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating gateway tunnel: %w\", err)\n\t}\n\tmux, err := yamux.Client(tun.Conn, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error setting up mux client end: %s\", err)\n\t}\n\tif url := tun.Header.Get(\"X-Arvados-Internal-Url\"); url != \"\" && gw.UpdateTunnelURL != nil {\n\t\tgw.UpdateTunnelURL(url)\n\t}\n\tfor {\n\t\tmuxconn, err := mux.AcceptStream()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgo func() {\n\t\t\tdefer muxconn.Close()\n\t\t\tgwconn, err := net.Dial(\"tcp\", addr)\n\t\t\tif err != nil {\n\t\t\t\tgw.Log.Printf(\"tunnel connection %d: error connecting to %s: %s\", muxconn.StreamID(), addr, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer gwconn.Close()\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(2)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, err := io.Copy(gwconn, muxconn)\n\t\t\t\tif err != nil {\n\t\t\t\t\tgw.Log.Printf(\"tunnel connection %d: mux end: %s\", muxconn.StreamID(), err)\n\t\t\t\t}\n\t\t\t\tgwconn.Close()\n\t\t\t}()\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t_, err := io.Copy(muxconn, gwconn)\n\t\t\t\tif err != nil {\n\t\t\t\t\tgw.Log.Printf(\"tunnel connection %d: gateway end: %s\", muxconn.StreamID(), err)\n\t\t\t\t}\n\t\t\t\tmuxconn.Close()\n\t\t\t}()\n\t\t\twg.Wait()\n\t\t}()\n\t}\n}\n\nvar webdavMethod = map[string]bool{\n\t\"GET\":      true,\n\t\"OPTIONS\":  true,\n\t\"PROPFIND\": true,\n}\n\nfunc (gw *Gateway) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Vary\", \"X-Arvados-Authorization, X-Arvados-Container-Gateway-Uuid, X-Arvados-Container-Target-Port, X-Webdav-Prefix, X-Webdav-Source\")\n\treqUUID := req.Header.Get(\"X-Arvados-Container-Gateway-Uuid\")\n\tif reqUUID == \"\" {\n\t\t// older controller versions only send UUID as query param\n\t\treq.ParseForm()\n\t\treqUUID = req.Form.Get(\"uuid\")\n\t}\n\tif reqUUID != gw.ContainerUUID {\n\t\thttp.Error(w, fmt.Sprintf(\"misdirected request: meant for %q but received by crunch-run %q\", reqUUID, gw.ContainerUUID), http.StatusBadGateway)\n\t\treturn\n\t}\n\tif req.Header.Get(\"X-Arvados-Authorization\") != gw.requestAuth {\n\t\thttp.Error(w, \"bad X-Arvados-Authorization header\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\tw.Header().Set(\"X-Arvados-Authorization-Response\", gw.respondAuth)\n\tswitch {\n\tcase req.Method == \"POST\" && req.Header.Get(\"Upgrade\") == \"ssh\":\n\t\t// SSH tunnel from\n\t\t// (*lib/controller/localdb.Conn)ContainerSSH()\n\t\tgw.handleSSH(w, req)\n\tcase req.Header.Get(\"X-Webdav-Source\") == \"/log\":\n\t\t// WebDAV request for container log data\n\t\tif !webdavMethod[req.Method] {\n\t\t\thttp.Error(w, \"method not allowed\", http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\t\tgw.handleLogsWebDAV(w, req)\n\tcase req.Header.Get(\"X-Arvados-Container-Target-Port\") != \"\":\n\t\t// HTTP forwarded through\n\t\t// (*lib/controller/localdb.Conn)ContainerHTTPProxy()\n\t\tgw.handleForwardedHTTP(w, req)\n\tdefault:\n\t\thttp.Error(w, \"path not found\", http.StatusNotFound)\n\t}\n}\n\nfunc (gw *Gateway) handleLogsWebDAV(w http.ResponseWriter, r *http.Request) {\n\tprefix := r.Header.Get(\"X-Webdav-Prefix\")\n\tif !strings.HasPrefix(r.URL.Path, prefix) {\n\t\thttp.Error(w, \"X-Webdav-Prefix header is not a prefix of the requested path\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif gw.LogCollection == nil {\n\t\thttp.Error(w, \"Not found\", http.StatusNotFound)\n\t\treturn\n\t}\n\twh := webdav.Handler{\n\t\tPrefix: prefix,\n\t\tFileSystem: &webdavfs.FS{\n\t\t\tFileSystem:    gw.LogCollection,\n\t\t\tPrefix:        \"\",\n\t\t\tWriting:       false,\n\t\t\tAlwaysReadEOF: r.Method == \"PROPFIND\",\n\t\t},\n\t\tLockSystem: webdavfs.NoLockSystem,\n\t\tLogger:     gw.webdavLogger,\n\t}\n\twh.ServeHTTP(w, r)\n}\n\nfunc (gw *Gateway) webdavLogger(r *http.Request, err error) {\n\tif err != nil && !os.IsNotExist(err) {\n\t\tctxlog.FromContext(r.Context()).WithError(err).Info(\"error reported by webdav handler\")\n\t} else {\n\t\tctxlog.FromContext(r.Context()).WithError(err).Debug(\"webdav request log\")\n\t}\n}\n\nfunc (gw *Gateway) handleForwardedHTTP(w http.ResponseWriter, reqIn *http.Request) {\n\tport := reqIn.Header.Get(\"X-Arvados-Container-Target-Port\")\n\tvar host string\n\tvar err error\n\tif gw.Target != nil {\n\t\thost, err = gw.Target.IPAddress()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"container has no IP address: \"+err.Error(), http.StatusServiceUnavailable)\n\t\t\treturn\n\t\t}\n\t}\n\tif host == \"\" {\n\t\thttp.Error(w, \"container has no IP address\", http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\tclient := http.Client{\n\t\tCheckRedirect: func(*http.Request, []*http.Request) error { return http.ErrUseLastResponse },\n\t}\n\turl := *reqIn.URL\n\turl.Scheme = \"http\"\n\turl.Host = net.JoinHostPort(host, port)\n\treq, err := http.NewRequestWithContext(reqIn.Context(), reqIn.Method, url.String(), reqIn.Body)\n\treq.Host = reqIn.Host\n\treq.Header = reqIn.Header\n\treq.Header.Del(\"X-Arvados-Container-Gateway-Uuid\")\n\treq.Header.Del(\"X-Arvados-Container-Target-Port\")\n\treq.Header.Del(\"X-Arvados-Authorization\")\n\treq.Header.Add(\"Via\", \"HTTP/1.1 arvados-crunch-run\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadGateway)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tfor k, v := range resp.Header {\n\t\tw.Header()[k] = v\n\t}\n\tw.WriteHeader(resp.StatusCode)\n\tio.Copy(w, resp.Body)\n}\n\n// handleSSH connects to an SSH server that allows the caller to run\n// interactive commands as root (or any other desired user) inside the\n// container. The tunnel itself can only be created by an\n// authenticated caller, so the SSH server itself is wide open (any\n// password or key will be accepted).\n//\n// Requests must have path \"/ssh\" and the following headers:\n//\n// Connection: upgrade\n// Upgrade: ssh\n// X-Arvados-Container-Gateway-Uuid: uuid of container\n// X-Arvados-Authorization: must match\n// hmac(AuthSecret,certfingerprint) (this prevents other containers\n// and shell nodes from connecting directly)\n//\n// Optional headers:\n//\n// X-Arvados-Detach-Keys: argument to \"docker exec --detach-keys\",\n// e.g., \"ctrl-p,ctrl-q\"\n// X-Arvados-Login-Username: argument to \"docker exec --user\": account\n// used to run command(s) inside the container.\nfunc (gw *Gateway) handleSSH(w http.ResponseWriter, req *http.Request) {\n\treq.ParseForm()\n\tdetachKeys := req.Form.Get(\"detach_keys\")\n\tusername := req.Form.Get(\"login_username\")\n\tif username == \"\" {\n\t\tusername = \"root\"\n\t}\n\tnetconn, _, err := http.NewResponseController(w).Hijack()\n\tif err != nil {\n\t\thttp.Error(w, \"connection upgrade failed: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer netconn.Close()\n\tw.Header().Set(\"Connection\", \"upgrade\")\n\tw.Header().Set(\"Upgrade\", \"ssh\")\n\tnetconn.Write([]byte(\"HTTP/1.1 101 Switching Protocols\\r\\n\"))\n\tw.Header().Write(netconn)\n\tnetconn.Write([]byte(\"\\r\\n\"))\n\thttpserver.ExemptFromDeadline(req)\n\n\tctx := req.Context()\n\n\tconn, newchans, reqs, err := ssh.NewServerConn(netconn, &gw.sshConfig)\n\tif err == io.EOF {\n\t\treturn\n\t} else if err != nil {\n\t\tgw.Log.Printf(\"ssh.NewServerConn: %s\", err)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\tgo ssh.DiscardRequests(reqs)\n\tfor newch := range newchans {\n\t\tswitch newch.ChannelType() {\n\t\tcase \"direct-tcpip\":\n\t\t\tgo gw.handleDirectTCPIP(ctx, newch)\n\t\tcase \"session\":\n\t\t\tgo gw.handleSession(ctx, newch, detachKeys, username)\n\t\tdefault:\n\t\t\tgo newch.Reject(ssh.UnknownChannelType, fmt.Sprintf(\"unsupported channel type %q\", newch.ChannelType()))\n\t\t}\n\t}\n}\n\nfunc (gw *Gateway) handleDirectTCPIP(ctx context.Context, newch ssh.NewChannel) {\n\tch, reqs, err := newch.Accept()\n\tif err != nil {\n\t\tgw.Log.Printf(\"accept direct-tcpip channel: %s\", err)\n\t\treturn\n\t}\n\tdefer ch.Close()\n\tgo ssh.DiscardRequests(reqs)\n\n\t// RFC 4254 7.2 (copy of channelOpenDirectMsg in\n\t// golang.org/x/crypto/ssh)\n\tvar msg struct {\n\t\tRaddr string\n\t\tRport uint32\n\t\tLaddr string\n\t\tLport uint32\n\t}\n\terr = ssh.Unmarshal(newch.ExtraData(), &msg)\n\tif err != nil {\n\t\tfmt.Fprintf(ch.Stderr(), \"unmarshal direct-tcpip extradata: %s\\n\", err)\n\t\treturn\n\t}\n\tswitch msg.Raddr {\n\tcase \"localhost\", \"0.0.0.0\", \"127.0.0.1\", \"::1\", \"::\":\n\tdefault:\n\t\tfmt.Fprintf(ch.Stderr(), \"cannot forward to ports on %q, only localhost\\n\", msg.Raddr)\n\t\treturn\n\t}\n\n\tdstaddr, err := gw.Target.IPAddress()\n\tif err != nil {\n\t\tfmt.Fprintf(ch.Stderr(), \"container has no IP address: %s\\n\", err)\n\t\treturn\n\t} else if dstaddr == \"\" {\n\t\tfmt.Fprintf(ch.Stderr(), \"container has no IP address\\n\")\n\t\treturn\n\t}\n\n\tdst := net.JoinHostPort(dstaddr, fmt.Sprintf(\"%d\", msg.Rport))\n\ttcpconn, err := net.Dial(\"tcp\", dst)\n\tif err != nil {\n\t\tfmt.Fprintf(ch.Stderr(), \"%s: %s\\n\", dst, err)\n\t\treturn\n\t}\n\tgo func() {\n\t\tn, _ := io.Copy(ch, tcpconn)\n\t\tctxlog.FromContext(ctx).Debugf(\"tcpip: sent %d bytes\\n\", n)\n\t\tch.CloseWrite()\n\t}()\n\tn, _ := io.Copy(tcpconn, ch)\n\tctxlog.FromContext(ctx).Debugf(\"tcpip: received %d bytes\\n\", n)\n}\n\nfunc (gw *Gateway) handleSession(ctx context.Context, newch ssh.NewChannel, detachKeys, username string) {\n\tch, reqs, err := newch.Accept()\n\tif err != nil {\n\t\tgw.Log.Printf(\"error accepting session channel: %s\", err)\n\t\treturn\n\t}\n\tdefer ch.Close()\n\n\tvar pty0, tty0 *os.File\n\t// Where to send errors/messages for the client to see\n\tlogw := io.Writer(ch.Stderr())\n\t// How to end lines when sending errors/messages to the client\n\t// (changes to \\r\\n when using a pty)\n\teol := \"\\n\"\n\t// Env vars to add to child process\n\ttermEnv := []string(nil)\n\n\tstarted := 0\n\twantClose := make(chan struct{})\n\tfor {\n\t\tvar req *ssh.Request\n\t\tselect {\n\t\tcase r, ok := <-reqs:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\treq = r\n\t\tcase <-wantClose:\n\t\t\treturn\n\t\t}\n\t\tok := false\n\t\tswitch req.Type {\n\t\tcase \"shell\", \"exec\":\n\t\t\tif started++; started != 1 {\n\t\t\t\t// RFC 4254 6.5: \"Only one of these\n\t\t\t\t// requests can succeed per channel.\"\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tok = true\n\t\t\tvar payload struct {\n\t\t\t\tCommand string\n\t\t\t}\n\t\t\tssh.Unmarshal(req.Payload, &payload)\n\t\t\texecargs, err := shlex.Split(payload.Command)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(logw, \"error parsing supplied command: %s\"+eol, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(execargs) == 0 {\n\t\t\t\texecargs = []string{\"/bin/bash\", \"-login\"}\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\tvar resp struct {\n\t\t\t\t\tStatus uint32\n\t\t\t\t}\n\t\t\t\tdefer func() {\n\t\t\t\t\tch.SendRequest(\"exit-status\", false, ssh.Marshal(&resp))\n\t\t\t\t\tclose(wantClose)\n\t\t\t\t}()\n\n\t\t\t\tcmd, err := gw.Target.InjectCommand(ctx, detachKeys, username, tty0 != nil, execargs)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Fprintln(ch.Stderr(), err)\n\t\t\t\t\tch.CloseWrite()\n\t\t\t\t\tresp.Status = 1\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif tty0 != nil {\n\t\t\t\t\tcmd.Stdin = tty0\n\t\t\t\t\tcmd.Stdout = tty0\n\t\t\t\t\tcmd.Stderr = tty0\n\t\t\t\t\tgo io.Copy(ch, pty0)\n\t\t\t\t\tgo io.Copy(pty0, ch)\n\t\t\t\t\t// Send our own debug messages to tty as well.\n\t\t\t\t\tlogw = tty0\n\t\t\t\t} else {\n\t\t\t\t\t// StdinPipe may seem\n\t\t\t\t\t// superfluous here, but it's\n\t\t\t\t\t// not: it causes cmd.Run() to\n\t\t\t\t\t// return when the subprocess\n\t\t\t\t\t// exits. Without it, Run()\n\t\t\t\t\t// waits for stdin to close,\n\t\t\t\t\t// which causes \"ssh ... echo\n\t\t\t\t\t// ok\" (with the client's\n\t\t\t\t\t// stdin connected to a\n\t\t\t\t\t// terminal or something) to\n\t\t\t\t\t// hang.\n\t\t\t\t\tstdin, err := cmd.StdinPipe()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Fprintln(ch.Stderr(), err)\n\t\t\t\t\t\tch.CloseWrite()\n\t\t\t\t\t\tresp.Status = 1\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tio.Copy(stdin, ch)\n\t\t\t\t\t\tstdin.Close()\n\t\t\t\t\t}()\n\t\t\t\t\tcmd.Stdout = ch\n\t\t\t\t\tcmd.Stderr = ch.Stderr()\n\t\t\t\t}\n\t\t\t\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\t\t\t\tSetctty: tty0 != nil,\n\t\t\t\t\tSetsid:  true,\n\t\t\t\t}\n\t\t\t\tcmd.Env = append(os.Environ(), termEnv...)\n\t\t\t\terr = cmd.Run()\n\t\t\t\tif exiterr, ok := err.(*exec.ExitError); ok {\n\t\t\t\t\tif status, ok := exiterr.Sys().(syscall.WaitStatus); ok {\n\t\t\t\t\t\tresp.Status = uint32(status.ExitStatus())\n\t\t\t\t\t}\n\t\t\t\t} else if err != nil {\n\t\t\t\t\t// Propagate errors like `exec: \"docker\": executable file not found in $PATH`\n\t\t\t\t\tfmt.Fprintln(ch.Stderr(), err)\n\t\t\t\t}\n\t\t\t\terrClose := ch.CloseWrite()\n\t\t\t\tif resp.Status == 0 && (err != nil || errClose != nil) {\n\t\t\t\t\tresp.Status = 1\n\t\t\t\t}\n\t\t\t}()\n\t\tcase \"pty-req\":\n\t\t\teol = \"\\r\\n\"\n\t\t\tp, t, err := pty.Open()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(ch.Stderr(), \"pty failed: %s\"+eol, err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdefer p.Close()\n\t\t\tdefer t.Close()\n\t\t\tpty0, tty0 = p, t\n\t\t\tok = true\n\t\t\tvar payload struct {\n\t\t\t\tTerm string\n\t\t\t\tCols uint32\n\t\t\t\tRows uint32\n\t\t\t\tX    uint32\n\t\t\t\tY    uint32\n\t\t\t}\n\t\t\tssh.Unmarshal(req.Payload, &payload)\n\t\t\ttermEnv = []string{\"TERM=\" + payload.Term, \"USE_TTY=1\"}\n\t\t\terr = pty.Setsize(pty0, &pty.Winsize{Rows: uint16(payload.Rows), Cols: uint16(payload.Cols), X: uint16(payload.X), Y: uint16(payload.Y)})\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(logw, \"pty-req: setsize failed: %s\"+eol, err)\n\t\t\t}\n\t\tcase \"window-change\":\n\t\t\tvar payload struct {\n\t\t\t\tCols uint32\n\t\t\t\tRows uint32\n\t\t\t\tX    uint32\n\t\t\t\tY    uint32\n\t\t\t}\n\t\t\tssh.Unmarshal(req.Payload, &payload)\n\t\t\terr := pty.Setsize(pty0, &pty.Winsize{Rows: uint16(payload.Rows), Cols: uint16(payload.Cols), X: uint16(payload.X), Y: uint16(payload.Y)})\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(logw, \"window-change: setsize failed: %s\"+eol, err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tok = true\n\t\tcase \"env\":\n\t\t\t// TODO: implement \"env\"\n\t\t\t// requests by setting env\n\t\t\t// vars in the docker-exec\n\t\t\t// command (not docker-exec's\n\t\t\t// own environment, which\n\t\t\t// would be a gaping security\n\t\t\t// hole).\n\t\tdefault:\n\t\t\t// fmt.Fprintf(logw, \"declined request %q on ssh channel\"+eol, req.Type)\n\t\t}\n\t\tif req.WantReply {\n\t\t\treq.Reply(ok, nil)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/crunchrun/copier.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"github.com/bmatcuk/doublestar/v4\"\n)\n\ntype printfer interface {\n\tPrintf(string, ...interface{})\n}\n\nvar errTooManySymlinks = errors.New(\"too many symlinks, or symlink cycle\")\n\nconst limitFollowSymlinks = 10\n\ntype filetodo struct {\n\tsrc  string\n\tdst  string\n\tsize int64\n}\n\n// copier copies data from a finished container's output path to a new\n// Arvados collection.\n//\n// Regular files (and symlinks to regular files) in hostOutputDir are\n// copied from the local filesystem.\n//\n// Symlinks to mounted collections, and any collections mounted under\n// ctrOutputDir, are copied by reference, without moving any data\n// around.\n//\n// Symlinks to other parts of the container's filesystem result in\n// errors.\n//\n// Use:\n//\n//\tmanifest, err := (&copier{...}).Copy()\ntype copier struct {\n\tclient        *arvados.Client\n\tkeepClient    IKeepClient\n\thostOutputDir string\n\tctrOutputDir  string\n\tglobs         []string\n\tbindmounts    map[string]bindmount\n\tmounts        map[string]arvados.Mount\n\tsecretMounts  map[string]arvados.Mount\n\tlogger        printfer\n\n\tdirs   []string\n\tfiles  []filetodo\n\tstaged arvados.CollectionFileSystem\n\n\tmanifestCache map[string]string\n\n\t// tmpfs is the filesystem representation of the source\n\t// collection that was most recently handled in\n\t// copyFromCollection.  This improves performance slightly in\n\t// the special case where many mounts reference the same\n\t// source collection.\n\ttmpfs             arvados.CollectionFileSystem\n\ttmpfsManifestText string\n}\n\n// Copy copies data as needed, and returns a new manifest.\n//\n// Copy should not be called more than once.\nfunc (cp *copier) Copy() (string, error) {\n\tvar err error\n\tcp.staged, err = (&arvados.Collection{}).FileSystem(cp.client, cp.keepClient)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error creating Collection.FileSystem: %v\", err)\n\t}\n\terr = cp.walkMount(\"\", cp.ctrOutputDir, limitFollowSymlinks, true)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error scanning files to copy to output: %v\", err)\n\t}\n\n\t// Remove files/dirs that don't match globs (the files/dirs\n\t// that were added during cp.walkMount() by copying subtree\n\t// manifests into cp.staged).\n\terr = cp.applyGlobsToStaged()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error while removing non-matching files from output collection: %w\", err)\n\t}\n\t// Remove files/dirs that don't match globs (the files/dirs\n\t// that are stored on the local filesystem and would need to\n\t// be copied in copyFile() below).\n\tcp.applyGlobsToFilesAndDirs()\n\tfor _, d := range cp.dirs {\n\t\terr = cp.staged.Mkdir(d, 0777)\n\t\tif err != nil && err != os.ErrExist {\n\t\t\treturn \"\", fmt.Errorf(\"error making directory %q in output collection: %v\", d, err)\n\t\t}\n\t}\n\n\tvar unflushed int64\n\tvar lastparentdir string\n\tfor _, f := range cp.files {\n\t\t// If a dir has just had its last file added, do a\n\t\t// full Flush. Otherwise, do a partial Flush (write\n\t\t// full-size blocks, but leave the last short block\n\t\t// open so f's data can be packed with it).\n\t\tdir, _ := filepath.Split(f.dst)\n\t\tif dir != lastparentdir || unflushed > keepclient.BLOCKSIZE {\n\t\t\tif err := cp.staged.Flush(\"/\"+lastparentdir, dir != lastparentdir); err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"error flushing output collection file data: %v\", err)\n\t\t\t}\n\t\t\tunflushed = 0\n\t\t}\n\t\tlastparentdir = dir\n\n\t\tn, err := cp.copyFile(cp.staged, f)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"error copying file %q into output collection: %v\", f, err)\n\t\t}\n\t\tunflushed += n\n\t}\n\treturn cp.staged.MarshalManifest(\".\")\n}\n\nfunc (cp *copier) matchGlobs(path string, isDir bool) bool {\n\t// An entry in the top level of the output directory looks\n\t// like \"/foo\", but globs look like \"foo\", so we strip the\n\t// leading \"/\" before matching.\n\tpath = strings.TrimLeft(path, \"/\")\n\tfor _, glob := range cp.globs {\n\t\tif !isDir && strings.HasSuffix(glob, \"/**\") {\n\t\t\t// doublestar.Match(\"f*/**\", \"ff\") and\n\t\t\t// doublestar.Match(\"f*/**\", \"ff/gg\") both\n\t\t\t// return true, but (to be compatible with\n\t\t\t// bash shopt) \"ff\" should match only if it is\n\t\t\t// a directory.\n\t\t\t//\n\t\t\t// To avoid errant matches, we add the file's\n\t\t\t// basename to the end of the pattern:\n\t\t\t//\n\t\t\t// Match(\"f*/**/ff\", \"ff\") => false\n\t\t\t// Match(\"f*/**/gg\", \"ff/gg\") => true\n\t\t\t//\n\t\t\t// Of course, we need to escape basename in\n\t\t\t// case it contains *, ?, \\, etc.\n\t\t\t_, name := filepath.Split(path)\n\t\t\tescapedName := strings.TrimSuffix(strings.Replace(name, \"\", \"\\\\\", -1), \"\\\\\")\n\t\t\tif match, _ := doublestar.Match(glob+\"/\"+escapedName, path); match {\n\t\t\t\treturn true\n\t\t\t}\n\t\t} else if match, _ := doublestar.Match(glob, path); match {\n\t\t\treturn true\n\t\t} else if isDir {\n\t\t\t// Workaround doublestar bug (v4.6.1).\n\t\t\t// \"foo*/**\" should match \"foo\", but does not,\n\t\t\t// because isZeroLengthPattern does not accept\n\t\t\t// \"*/**\" as a zero length pattern.\n\t\t\tif trunc := strings.TrimSuffix(glob, \"*/**\"); trunc != glob {\n\t\t\t\tif match, _ := doublestar.Match(trunc, path); match {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n// Delete entries from cp.files that do not match cp.globs.\n//\n// Delete entries from cp.dirs that do not match cp.globs.\n//\n// Ensure parent/ancestor directories of remaining cp.files and\n// cp.dirs entries are still present in cp.dirs, even if they do not\n// match cp.globs themselves.\nfunc (cp *copier) applyGlobsToFilesAndDirs() {\n\tif len(cp.globs) == 0 {\n\t\treturn\n\t}\n\tkeepdirs := make(map[string]bool)\n\tfor _, path := range cp.dirs {\n\t\tif cp.matchGlobs(path, true) {\n\t\t\tkeepdirs[path] = true\n\t\t}\n\t}\n\tfor path := range keepdirs {\n\t\tfor i, c := range path {\n\t\t\tif i > 0 && c == '/' {\n\t\t\t\tkeepdirs[path[:i]] = true\n\t\t\t}\n\t\t}\n\t}\n\tvar keepfiles []filetodo\n\tfor _, file := range cp.files {\n\t\tif cp.matchGlobs(file.dst, false) {\n\t\t\tkeepfiles = append(keepfiles, file)\n\t\t}\n\t}\n\tfor _, file := range keepfiles {\n\t\tfor i, c := range file.dst {\n\t\t\tif i > 0 && c == '/' {\n\t\t\t\tkeepdirs[file.dst[:i]] = true\n\t\t\t}\n\t\t}\n\t}\n\tcp.dirs = nil\n\tfor path := range keepdirs {\n\t\tcp.dirs = append(cp.dirs, path)\n\t}\n\tsort.Strings(cp.dirs)\n\tcp.files = keepfiles\n}\n\n// Delete files in cp.staged that do not match cp.globs.  Also delete\n// directories that are empty (after deleting non-matching files) and\n// do not match cp.globs themselves.\nfunc (cp *copier) applyGlobsToStaged() error {\n\tif len(cp.globs) == 0 {\n\t\treturn nil\n\t}\n\tinclude := make(map[string]bool)\n\terr := fs.WalkDir(arvados.FS(cp.staged), \"\", func(path string, ent fs.DirEntry, err error) error {\n\t\tif cp.matchGlobs(path, ent.IsDir()) {\n\t\t\tfor i, c := range path {\n\t\t\t\tif i > 0 && c == '/' {\n\t\t\t\t\tinclude[path[:i]] = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tinclude[path] = true\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = fs.WalkDir(arvados.FS(cp.staged), \"\", func(path string, ent fs.DirEntry, err error) error {\n\t\tif err != nil || path == \"\" {\n\t\t\treturn err\n\t\t}\n\t\tif !include[path] {\n\t\t\terr := cp.staged.RemoveAll(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif ent.IsDir() {\n\t\t\t\treturn fs.SkipDir\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\treturn err\n}\n\n// Return true if it's possible for any descendant of the given path\n// to match anything in cp.globs.  Used by walkMount to avoid loading\n// collections that are mounted underneath ctrOutputPath but excluded\n// by globs.\nfunc (cp *copier) subtreeCouldMatch(path string) bool {\n\tif len(cp.globs) == 0 {\n\t\treturn true\n\t}\n\tpathdepth := 1 + strings.Count(path, \"/\")\n\tfor _, glob := range cp.globs {\n\t\tglobdepth := 0\n\t\tlastsep := 0\n\t\tfor i, c := range glob {\n\t\t\tif c != '/' || !doublestar.ValidatePattern(glob[:i]) {\n\t\t\t\t// Escaped \"/\", or \"/\" in a character\n\t\t\t\t// class, is not a path separator.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif glob[lastsep:i] == \"**\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tlastsep = i + 1\n\t\t\tif globdepth++; globdepth == pathdepth {\n\t\t\t\tif match, _ := doublestar.Match(glob[:i]+\"/*\", path+\"/z\"); match {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif globdepth < pathdepth && glob[lastsep:] == \"**\" {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (cp *copier) copyFile(fs arvados.CollectionFileSystem, f filetodo) (int64, error) {\n\tcp.logger.Printf(\"copying %q (%d bytes)\", strings.TrimLeft(f.dst, \"/\"), f.size)\n\tdst, err := fs.OpenFile(f.dst, os.O_CREATE|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tsrc, err := os.Open(f.src)\n\tif err != nil {\n\t\tdst.Close()\n\t\treturn 0, err\n\t}\n\tdefer src.Close()\n\tn, err := io.Copy(dst, src)\n\tif err != nil {\n\t\tdst.Close()\n\t\treturn n, err\n\t}\n\treturn n, dst.Close()\n}\n\n// Add to cp.staged, cp.files, and cp.dirs so as to copy src (an\n// absolute path in the container's filesystem) to dest (an absolute\n// path in the output collection, or \"\" for output root).\n//\n// src must be (or be a descendant of) a readonly \"collection\" mount,\n// a writable collection mounted at ctrOutputPath, or a \"tmp\" mount.\n//\n// If walkMountsBelow is true, include contents of any collection\n// mounted below src as well.\nfunc (cp *copier) walkMount(dest, src string, maxSymlinks int, walkMountsBelow bool) error {\n\t// srcRoot, srcMount indicate the innermost mount that\n\t// contains src.\n\tvar srcRoot string\n\tvar srcMount arvados.Mount\n\tfor root, mnt := range cp.mounts {\n\t\tif len(root) > len(srcRoot) && strings.HasPrefix(src+\"/\", root+\"/\") {\n\t\t\tsrcRoot, srcMount = root, mnt\n\t\t}\n\t}\n\tfor root := range cp.secretMounts {\n\t\tif len(root) > len(srcRoot) && strings.HasPrefix(src+\"/\", root+\"/\") {\n\t\t\t// Silently omit secrets, and symlinks to\n\t\t\t// secrets.\n\t\t\treturn nil\n\t\t}\n\t}\n\tif srcRoot == \"\" {\n\t\treturn fmt.Errorf(\"cannot output file %q: not in any mount\", src)\n\t}\n\n\t// srcRelPath is the path to the file/dir we are trying to\n\t// copy, relative to its mount point -- \".\", \"./foo.txt\", ...\n\tsrcRelPath := filepath.Join(\".\", srcMount.Path, src[len(srcRoot):])\n\n\t// outputRelPath is the destination path relative to the\n\t// output directory. Used for logging and glob matching.\n\tvar outputRelPath = \"\"\n\tif strings.HasPrefix(src, cp.ctrOutputDir) {\n\t\toutputRelPath = strings.TrimPrefix(src[len(cp.ctrOutputDir):], \"/\")\n\t}\n\tif outputRelPath == \"\" {\n\t\t// blank means copy a whole directory, so replace it\n\t\t// with a wildcard to make it a little clearer what's\n\t\t// going on since outputRelPath is only used for logging\n\t\toutputRelPath = \"*\"\n\t}\n\n\tswitch {\n\tcase srcMount.ExcludeFromOutput:\n\tcase outputRelPath != \"*\" && !cp.subtreeCouldMatch(outputRelPath):\n\t\tcp.logger.Printf(\"not copying %q because contents cannot match output globs\", outputRelPath)\n\t\treturn nil\n\tcase srcMount.Kind == \"tmp\":\n\t\t// Handle by walking the host filesystem.\n\t\treturn cp.walkHostFS(dest, src, maxSymlinks, walkMountsBelow)\n\tcase srcMount.Kind != \"collection\":\n\t\treturn fmt.Errorf(\"%q: unsupported mount %q in output (kind is %q)\", src, srcRoot, srcMount.Kind)\n\tcase !srcMount.Writable:\n\t\tcp.logger.Printf(\"copying %q from %v/%v\", outputRelPath, srcMount.PortableDataHash, strings.TrimPrefix(srcRelPath, \"./\"))\n\t\tmft, err := cp.getManifest(srcMount.PortableDataHash)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = cp.copyFromCollection(dest, &arvados.Collection{ManifestText: mft}, srcRelPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\tcp.logger.Printf(\"copying %q\", outputRelPath)\n\t\thostRoot, err := cp.hostRoot(srcRoot)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tf, err := os.Open(filepath.Join(hostRoot, \".arvados#collection\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\t\tvar coll arvados.Collection\n\t\terr = json.NewDecoder(f).Decode(&coll)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = cp.copyFromCollection(dest, &coll, srcRelPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tcp.tmpfs = nil\n\tcp.tmpfsManifestText = \"\"\n\tif walkMountsBelow {\n\t\treturn cp.walkMountsBelow(dest, src)\n\t}\n\treturn nil\n}\n\nfunc (cp *copier) copyFromCollection(dest string, coll *arvados.Collection, srcRelPath string) error {\n\tif coll.ManifestText == \"\" || coll.ManifestText != cp.tmpfsManifestText {\n\t\ttmpfs, err := coll.FileSystem(cp.client, cp.keepClient)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcp.tmpfs = tmpfs\n\t\tcp.tmpfsManifestText = coll.ManifestText\n\t}\n\tsnap, err := arvados.Snapshot(cp.tmpfs, srcRelPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Create ancestors of dest, if necessary.\n\tfor i, c := range dest {\n\t\tif i > 0 && c == '/' {\n\t\t\terr = cp.staged.Mkdir(dest[:i], 0777)\n\t\t\tif err != nil && !os.IsExist(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn arvados.Splice(cp.staged, dest, snap)\n}\n\nfunc (cp *copier) walkMountsBelow(dest, src string) error {\n\tfor mnt, mntinfo := range cp.mounts {\n\t\tif !strings.HasPrefix(mnt, src+\"/\") {\n\t\t\tcontinue\n\t\t}\n\t\tif cp.copyRegularFiles(mntinfo) {\n\t\t\t// These got copied into the nearest parent\n\t\t\t// mount as regular files during setup, so\n\t\t\t// they get copied as regular files when we\n\t\t\t// process the parent. Output will reflect any\n\t\t\t// changes and deletions done by the\n\t\t\t// container.\n\t\t\tcontinue\n\t\t}\n\t\t// Example: we are processing dest=/foo src=/mnt1/dir1\n\t\t// (perhaps we followed a symlink /outdir/foo ->\n\t\t// /mnt1/dir1). Caller has already processed the\n\t\t// collection mounted at /mnt1, but now we find that\n\t\t// /mnt1/dir1/mnt2 is also a mount, so we need to copy\n\t\t// src=/mnt1/dir1/mnt2 to dest=/foo/mnt2.\n\t\t//\n\t\t// We handle all descendants of /mnt1/dir1 in this\n\t\t// loop instead of using recursion:\n\t\t// /mnt1/dir1/mnt2/mnt3 is a child of both /mnt1 and\n\t\t// /mnt1/dir1/mnt2, but we only want to walk it\n\t\t// once. (This simplification is safe because mounted\n\t\t// collections cannot contain symlinks.)\n\t\terr := cp.walkMount(dest+mnt[len(src):], mnt, 0, false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// Add entries to cp.dirs and cp.files so as to copy src (an absolute\n// path in the container's filesystem which corresponds to a real file\n// or directory in cp.hostOutputDir) to dest (an absolute path in the\n// output collection, or \"\" for output root).\n//\n// Always follow symlinks.\n//\n// If includeMounts is true, include mounts at and below src.\n// Otherwise, skip them.\nfunc (cp *copier) walkHostFS(dest, src string, maxSymlinks int, includeMounts bool) error {\n\tif includeMounts {\n\t\terr := cp.walkMountsBelow(dest, src)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\thostsrc := cp.hostOutputDir + src[len(cp.ctrOutputDir):]\n\n\t// If src is a symlink, walk its target.\n\tfi, err := os.Lstat(hostsrc)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"lstat %q: %s\", src, err)\n\t}\n\tif fi.Mode()&os.ModeSymlink != 0 {\n\t\tif maxSymlinks < 0 {\n\t\t\treturn errTooManySymlinks\n\t\t}\n\t\ttarget, err := os.Readlink(hostsrc)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"readlink %q: %s\", src, err)\n\t\t}\n\t\tif !strings.HasPrefix(target, \"/\") {\n\t\t\ttarget = filepath.Join(filepath.Dir(src), target)\n\t\t}\n\t\treturn cp.walkMount(dest, target, maxSymlinks-1, true)\n\t}\n\n\t// If src is a regular directory, append it to cp.dirs and\n\t// walk each of its children. (If there are no children,\n\t// create an empty file \"dest/.keep\".)\n\tif fi.Mode().IsDir() {\n\t\tif dest != \"\" {\n\t\t\tcp.dirs = append(cp.dirs, dest)\n\t\t}\n\t\tdir, err := os.Open(hostsrc)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"open %q: %s\", src, err)\n\t\t}\n\t\tnames, err := dir.Readdirnames(-1)\n\t\tdir.Close()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"readdirnames %q: %s\", src, err)\n\t\t}\n\t\tif len(names) == 0 {\n\t\t\tif dest != \"\" {\n\t\t\t\tcp.files = append(cp.files, filetodo{\n\t\t\t\t\tsrc: os.DevNull,\n\t\t\t\t\tdst: dest + \"/.keep\",\n\t\t\t\t})\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tsort.Strings(names)\n\t\tfor _, name := range names {\n\t\t\tdest, src := dest+\"/\"+name, src+\"/\"+name\n\t\t\tif _, isSecret := cp.secretMounts[src]; isSecret {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif mntinfo, isMount := cp.mounts[src]; isMount && !cp.copyRegularFiles(mntinfo) {\n\t\t\t\t// If a regular file/dir somehow\n\t\t\t\t// exists at a path that's also a\n\t\t\t\t// mount target, ignore the file --\n\t\t\t\t// the mount has already been included\n\t\t\t\t// with walkMountsBelow().\n\t\t\t\t//\n\t\t\t\t// (...except mount types that are\n\t\t\t\t// handled as regular files.)\n\t\t\t\tcontinue\n\t\t\t} else if isMount && !cp.subtreeCouldMatch(src[len(cp.ctrOutputDir)+1:]) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = cp.walkHostFS(dest, src, maxSymlinks, false)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\t// If src is a regular file, append it to cp.files.\n\tif fi.Mode().IsRegular() {\n\t\tcp.files = append(cp.files, filetodo{\n\t\t\tsrc:  hostsrc,\n\t\t\tdst:  dest,\n\t\t\tsize: fi.Size(),\n\t\t})\n\t\treturn nil\n\t}\n\tcp.logger.Printf(\"Skipping unsupported file type (mode %o) in output dir: %q\", fi.Mode(), src)\n\treturn nil\n}\n\n// Return the host path that was mounted at the given path in the\n// container.\nfunc (cp *copier) hostRoot(ctrRoot string) (string, error) {\n\tif ctrRoot == cp.ctrOutputDir {\n\t\treturn cp.hostOutputDir, nil\n\t}\n\tif mnt, ok := cp.bindmounts[ctrRoot]; ok {\n\t\treturn mnt.HostPath, nil\n\t}\n\treturn \"\", fmt.Errorf(\"not bind-mounted: %q\", ctrRoot)\n}\n\nfunc (cp *copier) copyRegularFiles(m arvados.Mount) bool {\n\treturn m.Kind == \"text\" || m.Kind == \"json\" || (m.Kind == \"collection\" && m.Writable)\n}\n\nfunc (cp *copier) getManifest(pdh string) (string, error) {\n\tif mft, ok := cp.manifestCache[pdh]; ok {\n\t\treturn mft, nil\n\t}\n\tvar coll arvados.Collection\n\terr := cp.client.RequestAndDecode(&coll, \"GET\", \"arvados/v1/collections/\"+pdh, nil, nil)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error retrieving collection record for %q: %s\", pdh, err)\n\t}\n\tif cp.manifestCache == nil {\n\t\tcp.manifestCache = make(map[string]string)\n\t}\n\tcp.manifestCache[pdh] = coll.ManifestText\n\treturn coll.ManifestText, nil\n}\n"
  },
  {
    "path": "lib/crunchrun/copier_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"sort\"\n\t\"syscall\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&copierSuite{})\n\ntype copierSuite struct {\n\tcp  copier\n\tlog bytes.Buffer\n}\n\nfunc (s *copierSuite) SetUpTest(c *check.C) {\n\ttmpdir := c.MkDir()\n\ts.log = bytes.Buffer{}\n\n\tcl, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, check.IsNil)\n\tkc, err := keepclient.MakeKeepClient(cl)\n\tc.Assert(err, check.IsNil)\n\tcollfs, err := (&arvados.Collection{}).FileSystem(arvados.NewClientFromEnv(), kc)\n\tc.Assert(err, check.IsNil)\n\n\ts.cp = copier{\n\t\tclient:        arvados.NewClientFromEnv(),\n\t\tkeepClient:    kc,\n\t\thostOutputDir: tmpdir,\n\t\tctrOutputDir:  \"/ctr/outdir\",\n\t\tmounts: map[string]arvados.Mount{\n\t\t\t\"/ctr/outdir\": {Kind: \"tmp\"},\n\t\t},\n\t\tsecretMounts: map[string]arvados.Mount{\n\t\t\t\"/secret_text\": {Kind: \"text\", Content: \"xyzzy\"},\n\t\t},\n\t\tlogger: &logrus.Logger{Out: &s.log, Formatter: &logrus.TextFormatter{}, Level: logrus.InfoLevel},\n\t\tstaged: collfs,\n\t}\n}\n\nfunc (s *copierSuite) TestEmptyOutput(c *check.C) {\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.IsNil)\n\tc.Check(s.cp.dirs, check.DeepEquals, []string(nil))\n\tc.Check(len(s.cp.files), check.Equals, 0)\n}\n\nfunc (s *copierSuite) TestEmptyWritableMount(c *check.C) {\n\ts.writeFileInOutputDir(c, \".arvados#collection\", `{\"manifest_text\":\"\"}`)\n\ts.cp.mounts[s.cp.ctrOutputDir] = arvados.Mount{\n\t\tKind:     \"collection\",\n\t\tWritable: true,\n\t}\n\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Assert(err, check.IsNil)\n\tc.Check(s.cp.dirs, check.DeepEquals, []string(nil))\n\tc.Check(len(s.cp.files), check.Equals, 0)\n\trootdir, err := s.cp.staged.Open(\".\")\n\tc.Assert(err, check.IsNil)\n\tdefer rootdir.Close()\n\tfis, err := rootdir.Readdir(-1)\n\tc.Assert(err, check.IsNil)\n\tc.Check(fis, check.HasLen, 0)\n}\n\nfunc (s *copierSuite) TestOutputCollectionWithOnlySubmounts(c *check.C) {\n\ts.writeFileInOutputDir(c, \".arvados#collection\", `{\"manifest_text\":\"\"}`)\n\ts.cp.mounts[s.cp.ctrOutputDir] = arvados.Mount{\n\t\tKind:     \"collection\",\n\t\tWritable: true,\n\t}\n\ts.cp.mounts[path.Join(s.cp.ctrOutputDir, \"foo\")] = arvados.Mount{\n\t\tKind:             \"collection\",\n\t\tPath:             \"foo\",\n\t\tPortableDataHash: arvadostest.FooCollectionPDH,\n\t}\n\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Assert(err, check.IsNil)\n\n\t// s.cp.dirs and s.cp.files are empty, because nothing needs\n\t// to be copied from disk.\n\tc.Check(s.cp.dirs, check.DeepEquals, []string(nil))\n\tc.Check(len(s.cp.files), check.Equals, 0)\n\n\t// The \"foo\" file has already been copied from FooCollection\n\t// to s.cp.staged via Snapshot+Splice.\n\trootdir, err := s.cp.staged.Open(\".\")\n\tc.Assert(err, check.IsNil)\n\tdefer rootdir.Close()\n\tfis, err := rootdir.Readdir(-1)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(fis, check.HasLen, 1)\n\tc.Check(fis[0].Size(), check.Equals, int64(3))\n}\n\nfunc (s *copierSuite) TestRepetitiveMountsInOutputDir(c *check.C) {\n\tvar memstats0 runtime.MemStats\n\truntime.ReadMemStats(&memstats0)\n\n\ts.writeFileInOutputDir(c, \".arvados#collection\", `{\"manifest_text\":\"\"}`)\n\ts.cp.mounts[s.cp.ctrOutputDir] = arvados.Mount{\n\t\tKind:     \"collection\",\n\t\tWritable: true,\n\t}\n\tnmounts := 200\n\tncollections := 1\n\tpdh := make([]string, ncollections)\n\ts.cp.manifestCache = make(map[string]string)\n\tfor i := 0; i < ncollections; i++ {\n\t\tmtxt := arvadostest.FakeManifest(1, nmounts, 2, 4<<20)\n\t\tpdh[i] = arvados.PortableDataHash(mtxt)\n\t\ts.cp.manifestCache[pdh[i]] = mtxt\n\t}\n\tfor i := 0; i < nmounts; i++ {\n\t\tfilename := fmt.Sprintf(\"file%d\", i)\n\t\ts.cp.mounts[path.Join(s.cp.ctrOutputDir, filename)] = arvados.Mount{\n\t\t\tKind:             \"collection\",\n\t\t\tPath:             fmt.Sprintf(\"dir0/dir%d/file%d\", i, i),\n\t\t\tPortableDataHash: pdh[i%ncollections],\n\t\t}\n\t}\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Assert(err, check.IsNil)\n\n\t// Files mounted under output dir have been copied from the\n\t// fake collections to s.cp.staged via Snapshot+Splice.\n\trootdir, err := s.cp.staged.Open(\".\")\n\tc.Assert(err, check.IsNil)\n\tdefer rootdir.Close()\n\tfis, err := rootdir.Readdir(-1)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(fis, check.HasLen, nmounts)\n\n\t// nmounts -- Δalloc before -> Δalloc after fixing #22827\n\t// 500 -- 1542 MB -> 15 MB\n\t// 200 -- 254 MB -> 5 MB\n\tvar memstats runtime.MemStats\n\truntime.ReadMemStats(&memstats)\n\tdelta := (int64(memstats.Alloc) - int64(memstats0.Alloc)) / 1000000\n\tc.Logf(\"Δalloc %d MB\", delta)\n\tc.Check(delta < 40, check.Equals, true, check.Commentf(\"Δalloc %d MB is suspiciously high, expect ~ 5 MB\", delta))\n}\n\nfunc (s *copierSuite) TestRegularFilesAndDirs(c *check.C) {\n\terr := os.MkdirAll(s.cp.hostOutputDir+\"/dir1/dir2/dir3\", 0755)\n\tc.Assert(err, check.IsNil)\n\tf, err := os.OpenFile(s.cp.hostOutputDir+\"/dir1/foo\", os.O_CREATE|os.O_WRONLY, 0644)\n\tc.Assert(err, check.IsNil)\n\t_, err = io.WriteString(f, \"foo\")\n\tc.Assert(err, check.IsNil)\n\tc.Assert(f.Close(), check.IsNil)\n\terr = syscall.Mkfifo(s.cp.hostOutputDir+\"/dir1/fifo\", 0644)\n\tc.Assert(err, check.IsNil)\n\n\terr = s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.IsNil)\n\tc.Check(s.cp.dirs, check.DeepEquals, []string{\"/dir1\", \"/dir1/dir2\", \"/dir1/dir2/dir3\"})\n\tc.Check(s.cp.files, check.DeepEquals, []filetodo{\n\t\t{src: os.DevNull, dst: \"/dir1/dir2/dir3/.keep\"},\n\t\t{src: s.cp.hostOutputDir + \"/dir1/foo\", dst: \"/dir1/foo\", size: 3},\n\t})\n\tc.Check(s.log.String(), check.Matches, `.* msg=\"Skipping unsupported file type \\(mode 200000644\\) in output dir: \\\\\"/ctr/outdir/dir1/fifo\\\\\"\"\\n`)\n}\n\nfunc (s *copierSuite) TestSymlinkCycle(c *check.C) {\n\tc.Assert(os.Mkdir(s.cp.hostOutputDir+\"/dir1\", 0755), check.IsNil)\n\tc.Assert(os.Mkdir(s.cp.hostOutputDir+\"/dir2\", 0755), check.IsNil)\n\tc.Assert(os.Symlink(\"../dir2\", s.cp.hostOutputDir+\"/dir1/l_dir2\"), check.IsNil)\n\tc.Assert(os.Symlink(\"../dir1\", s.cp.hostOutputDir+\"/dir2/l_dir1\"), check.IsNil)\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.ErrorMatches, `.*cycle.*`)\n}\n\nfunc (s *copierSuite) TestSymlinkTargetMissing(c *check.C) {\n\tc.Assert(os.Symlink(\"./missing\", s.cp.hostOutputDir+\"/symlink\"), check.IsNil)\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.ErrorMatches, `.*/ctr/outdir/missing.*`)\n}\n\nfunc (s *copierSuite) TestSymlinkTargetNotMounted(c *check.C) {\n\tc.Assert(os.Symlink(\"../boop\", s.cp.hostOutputDir+\"/symlink\"), check.IsNil)\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.ErrorMatches, `.*/ctr/boop.*`)\n}\n\nfunc (s *copierSuite) TestSymlinkToSecret(c *check.C) {\n\tc.Assert(os.Symlink(\"/secret_text\", s.cp.hostOutputDir+\"/symlink\"), check.IsNil)\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(s.cp.dirs), check.Equals, 0)\n\tc.Check(len(s.cp.files), check.Equals, 0)\n}\n\nfunc (s *copierSuite) TestSecretInOutputDir(c *check.C) {\n\ts.cp.secretMounts[\"/ctr/outdir/secret_text\"] = s.cp.secretMounts[\"/secret_text\"]\n\ts.writeFileInOutputDir(c, \"secret_text\", \"xyzzy\")\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(s.cp.dirs), check.Equals, 0)\n\tc.Check(len(s.cp.files), check.Equals, 0)\n}\n\nfunc (s *copierSuite) TestSymlinkToMountedCollection(c *check.C) {\n\t// simulate mounted read-only collection\n\ts.cp.mounts[\"/mnt\"] = arvados.Mount{\n\t\tKind:             \"collection\",\n\t\tPortableDataHash: arvadostest.FooCollectionPDH,\n\t}\n\n\t// simulate mounted writable collection\n\tbindtmp := c.MkDir()\n\tf, err := os.OpenFile(bindtmp+\"/.arvados#collection\", os.O_CREATE|os.O_WRONLY, 0644)\n\tc.Assert(err, check.IsNil)\n\t_, err = io.WriteString(f, `{\"manifest_text\":\". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"}`)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(f.Close(), check.IsNil)\n\ts.cp.mounts[\"/mnt-w\"] = arvados.Mount{\n\t\tKind:             \"collection\",\n\t\tPortableDataHash: arvadostest.FooCollectionPDH,\n\t\tWritable:         true,\n\t}\n\ts.cp.bindmounts = map[string]bindmount{\n\t\t\"/mnt-w\": bindmount{HostPath: bindtmp, ReadOnly: false},\n\t}\n\n\tc.Assert(os.Symlink(\"../../mnt\", s.cp.hostOutputDir+\"/l_dir\"), check.IsNil)\n\tc.Assert(os.Symlink(\"/mnt/foo\", s.cp.hostOutputDir+\"/l_file\"), check.IsNil)\n\tc.Assert(os.Symlink(\"/mnt-w/bar\", s.cp.hostOutputDir+\"/l_file_w\"), check.IsNil)\n\n\terr = s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.IsNil)\n\ts.checkStagedFile(c, \"l_dir/foo\", 3)\n\ts.checkStagedFile(c, \"l_file\", 3)\n\ts.checkStagedFile(c, \"l_file_w\", 3)\n}\n\nfunc (s *copierSuite) checkStagedFile(c *check.C, path string, size int64) {\n\tfi, err := s.cp.staged.Stat(path)\n\tif c.Check(err, check.IsNil) {\n\t\tc.Check(fi.Size(), check.Equals, size)\n\t}\n}\n\nfunc (s *copierSuite) TestSymlink(c *check.C) {\n\thostfile := s.cp.hostOutputDir + \"/dir1/file\"\n\n\terr := os.MkdirAll(s.cp.hostOutputDir+\"/dir1/dir2/dir3\", 0755)\n\tc.Assert(err, check.IsNil)\n\ts.writeFileInOutputDir(c, \"dir1/file\", \"file\")\n\tfor _, err := range []error{\n\t\tos.Symlink(s.cp.ctrOutputDir+\"/dir1/file\", s.cp.hostOutputDir+\"/l_abs_file\"),\n\t\tos.Symlink(s.cp.ctrOutputDir+\"/dir1/dir2\", s.cp.hostOutputDir+\"/l_abs_dir2\"),\n\t\tos.Symlink(\"../../dir1/file\", s.cp.hostOutputDir+\"/dir1/dir2/l_rel_file\"),\n\t\tos.Symlink(\"dir1/file\", s.cp.hostOutputDir+\"/l_rel_file\"),\n\t\tos.MkdirAll(s.cp.hostOutputDir+\"/morelinks\", 0755),\n\t\tos.Symlink(\"../dir1/dir2\", s.cp.hostOutputDir+\"/morelinks/l_rel_dir2\"),\n\t\tos.Symlink(\"dir1/dir2/dir3\", s.cp.hostOutputDir+\"/l_rel_dir3\"),\n\t\t// rel. symlink -> rel. symlink -> regular file\n\t\tos.Symlink(\"../dir1/dir2/l_rel_file\", s.cp.hostOutputDir+\"/morelinks/l_rel_l_rel_file\"),\n\t} {\n\t\tc.Assert(err, check.IsNil)\n\t}\n\n\terr = s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.IsNil)\n\tc.Check(s.cp.dirs, check.DeepEquals, []string{\n\t\t\"/dir1\", \"/dir1/dir2\", \"/dir1/dir2/dir3\",\n\t\t\"/l_abs_dir2\", \"/l_abs_dir2/dir3\",\n\t\t\"/l_rel_dir3\",\n\t\t\"/morelinks\", \"/morelinks/l_rel_dir2\", \"/morelinks/l_rel_dir2/dir3\",\n\t})\n\tc.Check(s.cp.files, check.DeepEquals, []filetodo{\n\t\t{dst: \"/dir1/dir2/dir3/.keep\", src: os.DevNull},\n\t\t{dst: \"/dir1/dir2/l_rel_file\", src: hostfile, size: 4},\n\t\t{dst: \"/dir1/file\", src: hostfile, size: 4},\n\t\t{dst: \"/l_abs_dir2/dir3/.keep\", src: os.DevNull},\n\t\t{dst: \"/l_abs_dir2/l_rel_file\", src: hostfile, size: 4},\n\t\t{dst: \"/l_abs_file\", src: hostfile, size: 4},\n\t\t{dst: \"/l_rel_dir3/.keep\", src: os.DevNull},\n\t\t{dst: \"/l_rel_file\", src: hostfile, size: 4},\n\t\t{dst: \"/morelinks/l_rel_dir2/dir3/.keep\", src: os.DevNull},\n\t\t{dst: \"/morelinks/l_rel_dir2/l_rel_file\", src: hostfile, size: 4},\n\t\t{dst: \"/morelinks/l_rel_l_rel_file\", src: hostfile, size: 4},\n\t})\n}\n\nfunc (s *copierSuite) TestUnsupportedOutputMount(c *check.C) {\n\ts.cp.mounts[\"/ctr/outdir\"] = arvados.Mount{Kind: \"waz\"}\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.NotNil)\n}\n\nfunc (s *copierSuite) TestUnsupportedMountKindBelow(c *check.C) {\n\ts.cp.mounts[\"/ctr/outdir/dirk\"] = arvados.Mount{Kind: \"waz\"}\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.NotNil)\n}\n\nfunc (s *copierSuite) TestWritableMountBelow(c *check.C) {\n\ts.cp.mounts[\"/ctr/outdir/mount\"] = arvados.Mount{\n\t\tKind:             \"collection\",\n\t\tPortableDataHash: arvadostest.FooCollectionPDH,\n\t\tWritable:         true,\n\t}\n\tc.Assert(os.MkdirAll(s.cp.hostOutputDir+\"/mount\", 0755), check.IsNil)\n\ts.writeFileInOutputDir(c, \"file\", \"file\")\n\ts.writeFileInOutputDir(c, \"mount/foo\", \"foo\")\n\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.IsNil)\n\tc.Check(s.cp.dirs, check.DeepEquals, []string{\"/mount\"})\n\tc.Check(s.cp.files, check.DeepEquals, []filetodo{\n\t\t{src: s.cp.hostOutputDir + \"/file\", dst: \"/file\", size: 4},\n\t\t{src: s.cp.hostOutputDir + \"/mount/foo\", dst: \"/mount/foo\", size: 3},\n\t})\n}\n\n// Check some glob-matching edge cases. In particular, check that\n// patterns like \"foo/**\" do not match regular files named \"foo\"\n// (unless of course they are inside a directory named \"foo\").\nfunc (s *copierSuite) TestMatchGlobs(c *check.C) {\n\ts.cp.globs = []string{\"foo*/**\"}\n\tc.Check(s.cp.matchGlobs(\"foo\", true), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"food\", true), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"foo\", false), check.Equals, false)\n\tc.Check(s.cp.matchGlobs(\"food\", false), check.Equals, false)\n\tc.Check(s.cp.matchGlobs(\"foo/bar\", false), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"food/bar\", false), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"foo/bar\", true), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"food/bar\", true), check.Equals, true)\n\n\ts.cp.globs = []string{\"ba[!/]/foo*/**\"}\n\tc.Check(s.cp.matchGlobs(\"bar/foo\", true), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"bar/food\", true), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"bar/foo\", false), check.Equals, false)\n\tc.Check(s.cp.matchGlobs(\"bar/food\", false), check.Equals, false)\n\tc.Check(s.cp.matchGlobs(\"bar/foo/z\\\\[\", true), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"bar/food/z\\\\[\", true), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"bar/foo/z\\\\[\", false), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"bar/food/z\\\\[\", false), check.Equals, true)\n\n\ts.cp.globs = []string{\"waz/**/foo*/**\"}\n\tc.Check(s.cp.matchGlobs(\"waz/quux/foo\", true), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"waz/quux/food\", true), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"waz/quux/foo\", false), check.Equals, false)\n\tc.Check(s.cp.matchGlobs(\"waz/quux/food\", false), check.Equals, false)\n\tc.Check(s.cp.matchGlobs(\"waz/quux/foo/foo\", true), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"waz/quux/food/foo\", true), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"waz/quux/foo/foo\", false), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"waz/quux/food/foo\", false), check.Equals, true)\n\n\ts.cp.globs = []string{\"foo/**/*\"}\n\tc.Check(s.cp.matchGlobs(\"foo\", false), check.Equals, false)\n\tc.Check(s.cp.matchGlobs(\"foo/bar\", false), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"foo/bar/baz\", false), check.Equals, true)\n\tc.Check(s.cp.matchGlobs(\"foo/bar/baz/waz\", false), check.Equals, true)\n}\n\nfunc (s *copierSuite) TestSubtreeCouldMatch(c *check.C) {\n\tfor _, trial := range []struct {\n\t\tmount string // relative to output dir\n\t\tglob  string\n\t\tcould bool\n\t}{\n\t\t{mount: \"abc\", glob: \"*\"},\n\t\t{mount: \"abc\", glob: \"abc/*\", could: true},\n\t\t{mount: \"abc\", glob: \"a*/**\", could: true},\n\t\t{mount: \"abc\", glob: \"**\", could: true},\n\t\t{mount: \"abc\", glob: \"*/*\", could: true},\n\t\t{mount: \"abc\", glob: \"**/*.txt\", could: true},\n\t\t{mount: \"abc/def\", glob: \"*\"},\n\t\t{mount: \"abc/def\", glob: \"*/*\"},\n\t\t{mount: \"abc/def\", glob: \"*/*.txt\"},\n\t\t{mount: \"abc/def\", glob: \"*/*/*\", could: true},\n\t\t{mount: \"abc/def\", glob: \"**\", could: true},\n\t\t{mount: \"abc/def\", glob: \"**/bar\", could: true},\n\t\t{mount: \"abc/def\", glob: \"abc/**\", could: true},\n\t\t{mount: \"abc/def/ghi\", glob: \"*c/**/bar\", could: true},\n\t\t{mount: \"abc/def/ghi\", glob: \"*c/*f/bar\"},\n\t\t{mount: \"abc/def/ghi\", glob: \"abc/d[^/]f/ghi/*\", could: true},\n\t} {\n\t\tc.Logf(\"=== %+v\", trial)\n\t\tgot := (&copier{\n\t\t\tglobs: []string{trial.glob},\n\t\t}).subtreeCouldMatch(trial.mount)\n\t\tc.Check(got, check.Equals, trial.could)\n\t}\n}\n\nfunc (s *copierSuite) TestCopyFromLargeCollection_Readonly(c *check.C) {\n\ts.testCopyFromLargeCollection(c, false)\n}\n\nfunc (s *copierSuite) TestCopyFromLargeCollection_Writable(c *check.C) {\n\ts.testCopyFromLargeCollection(c, true)\n}\n\nfunc (s *copierSuite) testCopyFromLargeCollection(c *check.C, writable bool) {\n\tbindtmp := c.MkDir()\n\tmtxt := arvadostest.FakeManifest(100, 100, 2, 4<<20)\n\tpdh := arvados.PortableDataHash(mtxt)\n\tjson, err := json.Marshal(arvados.Collection{ManifestText: mtxt, PortableDataHash: pdh})\n\tc.Assert(err, check.IsNil)\n\terr = os.WriteFile(bindtmp+\"/.arvados#collection\", json, 0644)\n\t// This symlink tricks walkHostFS into calling walkMount on\n\t// the fakecollection dir. If we did the obvious thing instead\n\t// (i.e., mount a collection under the output dir) walkMount\n\t// would see that our fakecollection dir is actually a regular\n\t// directory, conclude that the mount has been deleted and\n\t// replaced by a regular directory tree, and process the tree\n\t// as regular files, bypassing the manifest-copying code path\n\t// we're trying to test.\n\terr = os.Symlink(\"/fakecollection\", s.cp.hostOutputDir+\"/fakecollection\")\n\tc.Assert(err, check.IsNil)\n\ts.cp.mounts[\"/fakecollection\"] = arvados.Mount{\n\t\tKind:             \"collection\",\n\t\tPortableDataHash: pdh,\n\t\tWritable:         writable,\n\t}\n\ts.cp.bindmounts = map[string]bindmount{\n\t\t\"/fakecollection\": bindmount{HostPath: bindtmp, ReadOnly: !writable},\n\t}\n\ts.cp.manifestCache = map[string]string{pdh: mtxt}\n\terr = s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.IsNil)\n\tc.Log(s.log.String())\n\n\t// Check some files to ensure they were copied properly.\n\t// Specifically, arbitrarily check every 17th file in every\n\t// 13th dir.  (This is better than checking all of the files\n\t// only in that it's less likely to show up as a distracting\n\t// signal in CPU profiling.)\n\tfor i := 0; i < 100; i += 13 {\n\t\tfor j := 0; j < 100; j += 17 {\n\t\t\tfnm := fmt.Sprintf(\"/fakecollection/dir%d/dir%d/file%d\", i, j, j)\n\t\t\t_, err := s.cp.staged.Stat(fnm)\n\t\t\tc.Assert(err, check.IsNil, check.Commentf(\"%s\", fnm))\n\t\t}\n\t}\n}\n\nfunc (s *copierSuite) TestMountBelowExcludedByGlob(c *check.C) {\n\tbindtmp := c.MkDir()\n\ts.cp.mounts[\"/ctr/outdir/include/includer\"] = arvados.Mount{\n\t\tKind:             \"collection\",\n\t\tPortableDataHash: arvadostest.FooCollectionPDH,\n\t}\n\ts.cp.mounts[\"/ctr/outdir/include/includew\"] = arvados.Mount{\n\t\tKind:             \"collection\",\n\t\tPortableDataHash: arvadostest.FooCollectionPDH,\n\t\tWritable:         true,\n\t}\n\ts.cp.mounts[\"/ctr/outdir/exclude/excluder\"] = arvados.Mount{\n\t\tKind:             \"collection\",\n\t\tPortableDataHash: arvadostest.FooCollectionPDH,\n\t}\n\ts.cp.mounts[\"/ctr/outdir/exclude/excludew\"] = arvados.Mount{\n\t\tKind:             \"collection\",\n\t\tPortableDataHash: arvadostest.FooCollectionPDH,\n\t\tWritable:         true,\n\t}\n\ts.cp.mounts[\"/ctr/outdir/nonexistent/collection\"] = arvados.Mount{\n\t\t// As extra assurance, plant a collection that will\n\t\t// fail if copier attempts to load its manifest.  (For\n\t\t// performance reasons it's important that copier\n\t\t// doesn't try to load the manifest before deciding\n\t\t// not to copy the contents.)\n\t\tKind:             \"collection\",\n\t\tPortableDataHash: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1234\",\n\t}\n\ts.cp.globs = []string{\n\t\t\"?ncl*/*r/*\",\n\t\t\"*/?ncl*/**\",\n\t}\n\tc.Assert(os.MkdirAll(s.cp.hostOutputDir+\"/include/includer\", 0755), check.IsNil)\n\tc.Assert(os.MkdirAll(s.cp.hostOutputDir+\"/include/includew\", 0755), check.IsNil)\n\tc.Assert(os.MkdirAll(s.cp.hostOutputDir+\"/exclude/excluder\", 0755), check.IsNil)\n\tc.Assert(os.MkdirAll(s.cp.hostOutputDir+\"/exclude/excludew\", 0755), check.IsNil)\n\ts.writeFileInOutputDir(c, \"include/includew/foo\", \"foo\")\n\ts.writeFileInOutputDir(c, \"exclude/excludew/foo\", \"foo\")\n\ts.cp.bindmounts = map[string]bindmount{\n\t\t\"/ctr/outdir/include/includew\": bindmount{HostPath: bindtmp, ReadOnly: false},\n\t}\n\ts.cp.bindmounts = map[string]bindmount{\n\t\t\"/ctr/outdir/include/excludew\": bindmount{HostPath: bindtmp, ReadOnly: false},\n\t}\n\n\terr := s.cp.walkMount(\"\", s.cp.ctrOutputDir, 10, true)\n\tc.Check(err, check.IsNil)\n\tc.Log(s.log.String())\n\n\t// Note it's OK that \"/exclude\" is not excluded by walkMount:\n\t// it is just a local filesystem directory, not a mount point\n\t// that's expensive to walk.  In real-life usage, it will be\n\t// removed from cp.dirs before any copying happens.\n\tc.Check(s.cp.dirs, check.DeepEquals, []string{\"/exclude\", \"/include\", \"/include/includew\"})\n\tc.Check(s.cp.files, check.DeepEquals, []filetodo{\n\t\t{src: s.cp.hostOutputDir + \"/include/includew/foo\", dst: \"/include/includew/foo\", size: 3},\n\t})\n\tmanifest, err := s.cp.staged.MarshalManifest(\".\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(manifest, check.Matches, `(?ms).*\\./include/includer .*`)\n\tc.Check(manifest, check.Not(check.Matches), `(?ms).*exclude.*`)\n\tc.Check(s.log.String(), check.Matches, `(?ms).*not copying \\\\\"exclude/excluder\\\\\".*`)\n\tc.Check(s.log.String(), check.Matches, `(?ms).*not copying \\\\\"nonexistent/collection\\\\\".*`)\n}\n\nfunc (s *copierSuite) writeFileInOutputDir(c *check.C, path, data string) {\n\tf, err := os.OpenFile(s.cp.hostOutputDir+\"/\"+path, os.O_CREATE|os.O_WRONLY, 0644)\n\tc.Assert(err, check.IsNil)\n\t_, err = io.WriteString(f, data)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(f.Close(), check.IsNil)\n}\n\n// applyGlobsToFilesAndDirs uses the same glob-matching code as\n// applyGlobsToStaged, so we don't need to test all of the same\n// glob-matching behavior covered in TestApplyGlobsToCollectionFS.  We\n// do need to check that (a) the glob is actually being used to filter\n// out files, and (b) non-matching dirs still included if and only if\n// they are ancestors of matching files.\nfunc (s *copierSuite) TestApplyGlobsToFilesAndDirs(c *check.C) {\n\tdirs := []string{\"dir1\", \"dir1/dir11\", \"dir1/dir12\", \"dir2\"}\n\tfiles := []string{\"dir1/file11\", \"dir1/dir11/file111\", \"dir2/file2\"}\n\tfor _, trial := range []struct {\n\t\tglobs []string\n\t\tdirs  []string\n\t\tfiles []string\n\t}{\n\t\t{\n\t\t\tglobs: []string{},\n\t\t\tdirs:  append([]string{}, dirs...),\n\t\t\tfiles: append([]string{}, files...),\n\t\t},\n\t\t{\n\t\t\tglobs: []string{\"**\"},\n\t\t\tdirs:  append([]string{}, dirs...),\n\t\t\tfiles: append([]string{}, files...),\n\t\t},\n\t\t{\n\t\t\tglobs: []string{\"**/file111\"},\n\t\t\tdirs:  []string{\"dir1\", \"dir1/dir11\"},\n\t\t\tfiles: []string{\"dir1/dir11/file111\"},\n\t\t},\n\t\t{\n\t\t\tglobs: []string{\"nothing\"},\n\t\t\tdirs:  nil,\n\t\t\tfiles: nil,\n\t\t},\n\t\t{\n\t\t\tglobs: []string{\"**/dir12\"},\n\t\t\tdirs:  []string{\"dir1\", \"dir1/dir12\"},\n\t\t\tfiles: nil,\n\t\t},\n\t\t{\n\t\t\tglobs: []string{\"**/file*\"},\n\t\t\tdirs:  []string{\"dir1\", \"dir1/dir11\", \"dir2\"},\n\t\t\tfiles: append([]string{}, files...),\n\t\t},\n\t\t{\n\t\t\tglobs: []string{\"**/dir1[12]\"},\n\t\t\tdirs:  []string{\"dir1\", \"dir1/dir11\", \"dir1/dir12\"},\n\t\t\tfiles: nil,\n\t\t},\n\t\t{\n\t\t\tglobs: []string{\"**/dir1[^2]\"},\n\t\t\tdirs:  []string{\"dir1\", \"dir1/dir11\"},\n\t\t\tfiles: nil,\n\t\t},\n\t\t{\n\t\t\tglobs: []string{\"dir1/**\"},\n\t\t\tdirs:  []string{\"dir1\", \"dir1/dir11\", \"dir1/dir12\"},\n\t\t\tfiles: []string{\"dir1/file11\", \"dir1/dir11/file111\"},\n\t\t},\n\t} {\n\t\tc.Logf(\"=== globs: %q\", trial.globs)\n\t\tcp := copier{\n\t\t\tglobs: trial.globs,\n\t\t\tdirs:  dirs,\n\t\t}\n\t\tfor _, path := range files {\n\t\t\tcp.files = append(cp.files, filetodo{dst: path})\n\t\t}\n\t\tcp.applyGlobsToFilesAndDirs()\n\t\tvar gotFiles []string\n\t\tfor _, file := range cp.files {\n\t\t\tgotFiles = append(gotFiles, file.dst)\n\t\t}\n\t\tc.Check(cp.dirs, check.DeepEquals, trial.dirs)\n\t\tc.Check(gotFiles, check.DeepEquals, trial.files)\n\t}\n}\n\nfunc (s *copierSuite) TestApplyGlobsToCollectionFS(c *check.C) {\n\tfor _, trial := range []struct {\n\t\tglobs  []string\n\t\texpect []string\n\t}{\n\t\t{\n\t\t\tglobs:  nil,\n\t\t\texpect: []string{\"foo\", \"bar\", \"baz/quux\", \"baz/parent1/item1\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"foo\"},\n\t\t\texpect: []string{\"foo\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"baz/parent1/item1\"},\n\t\t\texpect: []string{\"baz/parent1/item1\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"**\"},\n\t\t\texpect: []string{\"foo\", \"bar\", \"baz/quux\", \"baz/parent1/item1\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"**/*\"},\n\t\t\texpect: []string{\"foo\", \"bar\", \"baz/quux\", \"baz/parent1/item1\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"*\"},\n\t\t\texpect: []string{\"foo\", \"bar\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"baz\"},\n\t\t\texpect: nil,\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"b*/**\"},\n\t\t\texpect: []string{\"baz/quux\", \"baz/parent1/item1\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"baz\"},\n\t\t\texpect: nil,\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"baz/**\"},\n\t\t\texpect: []string{\"baz/quux\", \"baz/parent1/item1\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"baz/*\"},\n\t\t\texpect: []string{\"baz/quux\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"baz/**/*uu?\"},\n\t\t\texpect: []string{\"baz/quux\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"**/*m1\"},\n\t\t\texpect: []string{\"baz/parent1/item1\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"*/*/*/**/*1\"},\n\t\t\texpect: nil,\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"f*\", \"**/q*\"},\n\t\t\texpect: []string{\"foo\", \"baz/quux\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"\\\\\"}, // invalid pattern matches nothing\n\t\t\texpect: nil,\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"\\\\\", \"foo\"},\n\t\t\texpect: []string{\"foo\"},\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"foo/**\"},\n\t\t\texpect: nil,\n\t\t},\n\t\t{\n\t\t\tglobs:  []string{\"foo*/**\"},\n\t\t\texpect: nil,\n\t\t},\n\t} {\n\t\tc.Logf(\"=== globs: %q\", trial.globs)\n\t\tcollfs, err := (&arvados.Collection{ManifestText: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo 0:0:bar 0:0:baz/quux 0:0:baz/parent1/item1\\n\"}).FileSystem(nil, nil)\n\t\tc.Assert(err, check.IsNil)\n\t\tcp := copier{globs: trial.globs, staged: collfs}\n\t\terr = cp.applyGlobsToStaged()\n\t\tif !c.Check(err, check.IsNil) {\n\t\t\tcontinue\n\t\t}\n\t\tvar got []string\n\t\tfs.WalkDir(arvados.FS(collfs), \"\", func(path string, ent fs.DirEntry, err error) error {\n\t\t\tif !ent.IsDir() {\n\t\t\t\tgot = append(got, path)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tsort.Strings(got)\n\t\tsort.Strings(trial.expect)\n\t\tc.Check(got, check.DeepEquals, trial.expect)\n\t}\n}\n"
  },
  {
    "path": "lib/crunchrun/crunchrun.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/exec\"\n\t\"os/signal\"\n\t\"os/user\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"runtime/pprof\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/crunchstat\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"golang.org/x/sys/unix\"\n)\n\ntype command struct{}\n\nvar arvadosCertPath = \"/etc/arvados/ca-certificates.crt\"\n\nvar Command = command{}\n\n// ConfigData contains environment variables and (when needed) cluster\n// configuration, passed from dispatchcloud to crunch-run on stdin.\ntype ConfigData struct {\n\tEnv          map[string]string\n\tKeepBuffers  int\n\tEC2SpotCheck bool\n\tCluster      *arvados.Cluster\n}\n\n// IArvadosClient is the minimal Arvados API methods used by crunch-run.\ntype IArvadosClient interface {\n\tCreate(resourceType string, parameters arvadosclient.Dict, output interface{}) error\n\tGet(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error\n\tUpdate(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error\n\tCall(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error\n\tCallRaw(method string, resourceType string, uuid string, action string, parameters arvadosclient.Dict) (reader io.ReadCloser, err error)\n\tDiscovery(key string) (interface{}, error)\n}\n\n// ErrCancelled is the error returned when the container is cancelled.\nvar ErrCancelled = errors.New(\"Cancelled\")\n\n// IKeepClient is the minimal Keep API methods used by crunch-run.\ntype IKeepClient interface {\n\tBlockRead(context.Context, arvados.BlockReadOptions) (int, error)\n\tBlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error)\n\tReadAt(locator string, p []byte, off int) (int, error)\n\tLocalLocator(locator string) (string, error)\n\tSetStorageClasses(sc []string)\n}\n\ntype RunArvMount func(cmdline []string, tok string) (*exec.Cmd, error)\n\ntype MkTempDir func(string, string) (string, error)\n\ntype PsProcess interface {\n\tCmdlineSlice() ([]string, error)\n}\n\n// ContainerRunner is the main stateful struct used for a single execution of a\n// container.\ntype ContainerRunner struct {\n\texecutor       containerExecutor\n\texecutorStdin  io.Closer\n\texecutorStdout io.Closer\n\texecutorStderr io.Closer\n\n\t// Dispatcher client is initialized with the Dispatcher token.\n\t// This is a privileged token used to manage container status\n\t// and logs.\n\t//\n\t// We have both dispatcherClient and DispatcherArvClient\n\t// because there are two different incompatible Arvados Go\n\t// SDKs and we have to use both (hopefully this gets fixed in\n\t// #14467)\n\tdispatcherClient     *arvados.Client\n\tDispatcherArvClient  IArvadosClient\n\tDispatcherKeepClient IKeepClient\n\n\t// Container client is initialized with the Container token\n\t// This token controls the permissions of the container, and\n\t// must be used for operations such as reading collections.\n\t//\n\t// Same comment as above applies to\n\t// containerClient/ContainerArvClient.\n\tcontainerClient     *arvados.Client\n\tContainerArvClient  IArvadosClient\n\tContainerKeepClient IKeepClient\n\n\tContainer     arvados.Container\n\ttoken         string\n\tExitCode      *int\n\tCrunchLog     *logWriter\n\tlogUUID       string\n\tlogPDH        string\n\tlogMtx        sync.Mutex\n\tLogCollection arvados.CollectionFileSystem\n\tlogPDHFinal   *string\n\tRunArvMount   RunArvMount\n\tMkTempDir     MkTempDir\n\tArvMount      *exec.Cmd\n\tArvMountPoint string\n\tHostOutputDir string\n\tVolumes       map[string]struct{}\n\tOutputPDH     *string\n\tSigChan       chan os.Signal\n\tArvMountExit  chan error\n\tSecretMounts  map[string]arvados.Mount\n\tMkArvClient   func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error)\n\tfinalState    string\n\tparentTemp    string\n\tcostStartTime time.Time\n\n\tkeepstore        *exec.Cmd\n\tkeepstoreLogger  io.WriteCloser\n\tkeepstoreLogbuf  *bufThenWrite\n\tstatLogger       io.WriteCloser\n\tstatReporter     *crunchstat.Reporter\n\thoststatLogger   io.WriteCloser\n\thoststatReporter *crunchstat.Reporter\n\tstatInterval     time.Duration\n\t// What we tell docker to use as the container's cgroup\n\t// parent.\n\tsetCgroupParent string\n\t// Fake root dir where crunchstat.Reporter should read OS\n\t// files, for testing.\n\tcrunchstatFakeFS fs.FS\n\n\tcStateLock sync.Mutex\n\tcCancelled bool // StopContainer() invoked\n\n\tenableMemoryLimit bool\n\tenableNetwork     string // one of \"default\" or \"always\"\n\tnetworkMode       string // \"none\", \"host\", or \"\" -- passed through to executor\n\tbrokenNodeHook    string // script to run if node appears to be broken\n\tarvMountLog       io.WriteCloser\n\n\tcontainerWatchdogInterval time.Duration\n\n\tgateway Gateway\n\n\tprices     []cloud.InstancePrice\n\tpricesLock sync.Mutex\n}\n\n// setupSignals sets up signal handling to gracefully terminate the\n// underlying container and update state when receiving a TERM, INT or\n// QUIT signal.\nfunc (runner *ContainerRunner) setupSignals() {\n\trunner.SigChan = make(chan os.Signal, 1)\n\tsignal.Notify(runner.SigChan, syscall.SIGTERM)\n\tsignal.Notify(runner.SigChan, syscall.SIGINT)\n\tsignal.Notify(runner.SigChan, syscall.SIGQUIT)\n\n\tgo func(sig chan os.Signal) {\n\t\tfor s := range sig {\n\t\t\trunner.stop(s)\n\t\t}\n\t}(runner.SigChan)\n}\n\n// stop the underlying container.\nfunc (runner *ContainerRunner) stop(sig os.Signal) {\n\trunner.cStateLock.Lock()\n\tdefer runner.cStateLock.Unlock()\n\tif sig != nil {\n\t\trunner.CrunchLog.Printf(\"caught signal: %v\", sig)\n\t}\n\trunner.cCancelled = true\n\trunner.CrunchLog.Printf(\"stopping container\")\n\terr := runner.executor.Stop()\n\tif err != nil {\n\t\trunner.CrunchLog.Printf(\"error stopping container: %s\", err)\n\t}\n}\n\nvar errorBlacklist = []string{\n\t\"(?ms).*[Cc]annot connect to the Docker daemon.*\",\n\t\"(?ms).*oci runtime error.*starting container process.*container init.*mounting.*to rootfs.*no such file or directory.*\",\n\t\"(?ms).*grpc: the connection is unavailable.*\",\n\t\"(?ms).*no space left on device.*\",\n}\n\nfunc (runner *ContainerRunner) runBrokenNodeHook() {\n\tif runner.brokenNodeHook == \"\" {\n\t\tpath := filepath.Join(lockdir, brokenfile)\n\t\trunner.CrunchLog.Printf(\"Writing %s to mark node as broken\", path)\n\t\tf, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700)\n\t\tif err != nil {\n\t\t\trunner.CrunchLog.Printf(\"Error writing %s: %s\", path, err)\n\t\t\treturn\n\t\t}\n\t\tf.Close()\n\t} else {\n\t\trunner.CrunchLog.Printf(\"Running broken node hook %q\", runner.brokenNodeHook)\n\t\t// run killme script\n\t\tc := exec.Command(runner.brokenNodeHook)\n\t\tc.Stdout = runner.CrunchLog\n\t\tc.Stderr = runner.CrunchLog\n\t\terr := c.Run()\n\t\tif err != nil {\n\t\t\trunner.CrunchLog.Printf(\"Error running broken node hook: %v\", err)\n\t\t}\n\t}\n}\n\nfunc (runner *ContainerRunner) checkBrokenNode(goterr error) bool {\n\tfor _, d := range errorBlacklist {\n\t\tif m, e := regexp.MatchString(d, goterr.Error()); m && e == nil {\n\t\t\trunner.CrunchLog.Printf(\"Error suggests node is unable to run containers: %v\", goterr)\n\t\t\trunner.runBrokenNodeHook()\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// LoadImage determines the docker image id from the container record and\n// checks if it is available in the local Docker image store.  If not, it loads\n// the image from Keep.\nfunc (runner *ContainerRunner) LoadImage() (string, error) {\n\trunner.CrunchLog.Printf(\"Fetching Docker image from collection '%s'\", runner.Container.ContainerImage)\n\n\td, err := os.Open(runner.ArvMountPoint + \"/by_id/\" + runner.Container.ContainerImage)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer d.Close()\n\tallfiles, err := d.Readdirnames(-1)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar tarfiles []string\n\tfor _, fnm := range allfiles {\n\t\tif strings.HasSuffix(fnm, \".tar\") {\n\t\t\ttarfiles = append(tarfiles, fnm)\n\t\t}\n\t}\n\tif len(tarfiles) == 0 {\n\t\treturn \"\", fmt.Errorf(\"image collection does not include a .tar image file\")\n\t}\n\tif len(tarfiles) > 1 {\n\t\treturn \"\", fmt.Errorf(\"cannot choose from multiple tar files in image collection: %v\", tarfiles)\n\t}\n\timageID := tarfiles[0][:len(tarfiles[0])-4]\n\timageTarballPath := runner.ArvMountPoint + \"/by_id/\" + runner.Container.ContainerImage + \"/\" + imageID + \".tar\"\n\trunner.CrunchLog.Printf(\"Using Docker image id %q\", imageID)\n\n\trunner.CrunchLog.Print(\"Loading Docker image from keep\")\n\terr = runner.executor.LoadImage(imageID, imageTarballPath, runner.Container, runner.ArvMountPoint,\n\t\trunner.containerClient)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn imageID, nil\n}\n\nfunc (runner *ContainerRunner) ArvMountCmd(cmdline []string, token string) (c *exec.Cmd, err error) {\n\tc = exec.Command(cmdline[0], cmdline[1:]...)\n\n\t// Copy our environment, but override ARVADOS_API_TOKEN with\n\t// the container auth token.\n\tc.Env = nil\n\tfor _, s := range os.Environ() {\n\t\tif !strings.HasPrefix(s, \"ARVADOS_API_TOKEN=\") {\n\t\t\tc.Env = append(c.Env, s)\n\t\t}\n\t}\n\tc.Env = append(c.Env, \"ARVADOS_API_TOKEN=\"+token)\n\n\trunner.arvMountLog, err = runner.openLogFile(\"arv-mount\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscanner := logScanner{\n\t\tPatterns: []string{\n\t\t\t\"Keep write error\",\n\t\t\t\"Block not found error\",\n\t\t\t\"Unhandled exception during FUSE operation\",\n\t\t},\n\t\tReportFunc: func(pattern, text string) {\n\t\t\trunner.updateRuntimeStatus(arvadosclient.Dict{\n\t\t\t\t\"warning\":       \"arv-mount: \" + pattern,\n\t\t\t\t\"warningDetail\": text,\n\t\t\t})\n\t\t},\n\t}\n\tc.Stdout = newTimestamper(io.MultiWriter(runner.arvMountLog, os.Stderr))\n\tc.Stderr = io.MultiWriter(&scanner, newTimestamper(io.MultiWriter(runner.arvMountLog, os.Stderr)))\n\n\trunner.CrunchLog.Printf(\"Running %v\", c.Args)\n\n\terr = c.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstatReadme := make(chan bool)\n\trunner.ArvMountExit = make(chan error)\n\n\tkeepStatting := true\n\tgo func() {\n\t\tfor keepStatting {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t_, err = os.Stat(fmt.Sprintf(\"%s/by_id/README\", runner.ArvMountPoint))\n\t\t\tif err == nil {\n\t\t\t\tkeepStatting = false\n\t\t\t\tstatReadme <- true\n\t\t\t}\n\t\t}\n\t\tclose(statReadme)\n\t}()\n\n\tgo func() {\n\t\tmnterr := c.Wait()\n\t\tif mnterr != nil {\n\t\t\trunner.CrunchLog.Printf(\"Arv-mount exit error: %v\", mnterr)\n\t\t}\n\t\trunner.ArvMountExit <- mnterr\n\t\tclose(runner.ArvMountExit)\n\t}()\n\n\tselect {\n\tcase <-statReadme:\n\t\tbreak\n\tcase err := <-runner.ArvMountExit:\n\t\trunner.ArvMount = nil\n\t\tkeepStatting = false\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\nfunc (runner *ContainerRunner) SetupArvMountPoint(prefix string) (err error) {\n\tif runner.ArvMountPoint == \"\" {\n\t\trunner.ArvMountPoint, err = runner.MkTempDir(runner.parentTemp, prefix)\n\t}\n\treturn\n}\n\nfunc copyfile(src string, dst string) (err error) {\n\tsrcfile, err := os.Open(src)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tos.MkdirAll(path.Dir(dst), 0777)\n\n\tdstfile, err := os.Create(dst)\n\tif err != nil {\n\t\treturn\n\t}\n\t_, err = io.Copy(dstfile, srcfile)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = srcfile.Close()\n\terr2 := dstfile.Close()\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif err2 != nil {\n\t\treturn err2\n\t}\n\n\treturn nil\n}\n\nfunc (runner *ContainerRunner) SetupMounts() (map[string]bindmount, error) {\n\tbindmounts := map[string]bindmount{}\n\terr := runner.SetupArvMountPoint(\"keep\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"While creating keep mount temp dir: %v\", err)\n\t}\n\n\ttoken, err := runner.ContainerToken()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not get container token: %s\", err)\n\t}\n\trunner.CrunchLog.Printf(\"container token %q\", token)\n\n\tpdhOnly := true\n\ttmpcount := 0\n\tarvMountCmd := []string{\n\t\t\"arv-mount\",\n\t\t\"--foreground\",\n\t\t\"--read-write\",\n\t\t\"--storage-classes\", strings.Join(runner.Container.OutputStorageClasses, \",\"),\n\t\tfmt.Sprintf(\"--crunchstat-interval=%v\", runner.statInterval.Seconds())}\n\n\tif _, isdocker := runner.executor.(*dockerExecutor); isdocker {\n\t\tarvMountCmd = append(arvMountCmd, \"--allow-other\")\n\t}\n\n\tif runner.Container.RuntimeConstraints.KeepCacheDisk > 0 {\n\t\tkeepcachedir, err := runner.MkTempDir(runner.parentTemp, \"keepcache\")\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"while creating keep cache temp dir: %v\", err)\n\t\t}\n\t\tarvMountCmd = append(arvMountCmd, \"--disk-cache\", \"--disk-cache-dir\", keepcachedir, \"--file-cache\", fmt.Sprintf(\"%d\", runner.Container.RuntimeConstraints.KeepCacheDisk))\n\t} else if runner.Container.RuntimeConstraints.KeepCacheRAM > 0 {\n\t\tarvMountCmd = append(arvMountCmd, \"--ram-cache\", \"--file-cache\", fmt.Sprintf(\"%d\", runner.Container.RuntimeConstraints.KeepCacheRAM))\n\t}\n\n\tcollectionPaths := []string{}\n\tneedCertMount := true\n\ttype copyFile struct {\n\t\tsrc  string\n\t\tbind string\n\t}\n\tvar copyFiles []copyFile\n\n\tvar binds []string\n\tfor bind := range runner.Container.Mounts {\n\t\tbinds = append(binds, bind)\n\t}\n\tfor bind := range runner.SecretMounts {\n\t\tif _, ok := runner.Container.Mounts[bind]; ok {\n\t\t\treturn nil, fmt.Errorf(\"secret mount %q conflicts with regular mount\", bind)\n\t\t}\n\t\tif runner.SecretMounts[bind].Kind != \"json\" &&\n\t\t\trunner.SecretMounts[bind].Kind != \"text\" {\n\t\t\treturn nil, fmt.Errorf(\"secret mount %q type is %q but only 'json' and 'text' are permitted\",\n\t\t\t\tbind, runner.SecretMounts[bind].Kind)\n\t\t}\n\t\tbinds = append(binds, bind)\n\t}\n\tsort.Strings(binds)\n\n\tfor _, bind := range binds {\n\t\tmnt, notSecret := runner.Container.Mounts[bind]\n\t\tif !notSecret {\n\t\t\tmnt = runner.SecretMounts[bind]\n\t\t}\n\t\tif bind == \"stdout\" || bind == \"stderr\" {\n\t\t\t// Is it a \"file\" mount kind?\n\t\t\tif mnt.Kind != \"file\" {\n\t\t\t\treturn nil, fmt.Errorf(\"unsupported mount kind '%s' for %s: only 'file' is supported\", mnt.Kind, bind)\n\t\t\t}\n\n\t\t\t// Does path start with OutputPath?\n\t\t\tprefix := runner.Container.OutputPath\n\t\t\tif !strings.HasSuffix(prefix, \"/\") {\n\t\t\t\tprefix += \"/\"\n\t\t\t}\n\t\t\tif !strings.HasPrefix(mnt.Path, prefix) {\n\t\t\t\treturn nil, fmt.Errorf(\"%s path does not start with OutputPath: %s, %s\", strings.Title(bind), mnt.Path, prefix)\n\t\t\t}\n\t\t}\n\n\t\tif bind == \"stdin\" {\n\t\t\t// Is it a \"collection\" mount kind?\n\t\t\tif mnt.Kind != \"collection\" && mnt.Kind != \"json\" {\n\t\t\t\treturn nil, fmt.Errorf(\"unsupported mount kind '%s' for stdin: only 'collection' and 'json' are supported\", mnt.Kind)\n\t\t\t}\n\t\t}\n\n\t\tif bind == arvadosCertPath {\n\t\t\tneedCertMount = false\n\t\t}\n\n\t\tif strings.HasPrefix(bind, runner.Container.OutputPath+\"/\") && bind != runner.Container.OutputPath+\"/\" {\n\t\t\tif mnt.Kind != \"collection\" && mnt.Kind != \"text\" && mnt.Kind != \"json\" {\n\t\t\t\treturn nil, fmt.Errorf(\"only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path for %q, was %q\", bind, mnt.Kind)\n\t\t\t}\n\t\t}\n\n\t\tswitch {\n\t\tcase mnt.Kind == \"collection\" && bind != \"stdin\":\n\t\t\tvar src string\n\t\t\tif mnt.UUID != \"\" && mnt.PortableDataHash != \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot specify both 'uuid' and 'portable_data_hash' for a collection mount\")\n\t\t\t}\n\t\t\tif mnt.UUID != \"\" {\n\t\t\t\tif mnt.Writable {\n\t\t\t\t\treturn nil, fmt.Errorf(\"writing to existing collections currently not permitted\")\n\t\t\t\t}\n\t\t\t\tpdhOnly = false\n\t\t\t\tsrc = fmt.Sprintf(\"%s/by_id/%s\", runner.ArvMountPoint, mnt.UUID)\n\t\t\t} else if mnt.PortableDataHash != \"\" {\n\t\t\t\tif mnt.Writable && !strings.HasPrefix(bind, runner.Container.OutputPath+\"/\") {\n\t\t\t\t\treturn nil, fmt.Errorf(\"can never write to a collection specified by portable data hash\")\n\t\t\t\t}\n\t\t\t\tidx := strings.Index(mnt.PortableDataHash, \"/\")\n\t\t\t\tif idx > 0 {\n\t\t\t\t\tmnt.Path = path.Clean(mnt.PortableDataHash[idx:])\n\t\t\t\t\tmnt.PortableDataHash = mnt.PortableDataHash[0:idx]\n\t\t\t\t\trunner.Container.Mounts[bind] = mnt\n\t\t\t\t}\n\t\t\t\tsrc = fmt.Sprintf(\"%s/by_id/%s\", runner.ArvMountPoint, mnt.PortableDataHash)\n\t\t\t\tif mnt.Path != \"\" && mnt.Path != \".\" {\n\t\t\t\t\tif strings.HasPrefix(mnt.Path, \"./\") {\n\t\t\t\t\t\tmnt.Path = mnt.Path[2:]\n\t\t\t\t\t} else if strings.HasPrefix(mnt.Path, \"/\") {\n\t\t\t\t\t\tmnt.Path = mnt.Path[1:]\n\t\t\t\t\t}\n\t\t\t\t\tsrc += \"/\" + mnt.Path\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tsrc = fmt.Sprintf(\"%s/tmp%d\", runner.ArvMountPoint, tmpcount)\n\t\t\t\tarvMountCmd = append(arvMountCmd, \"--mount-tmp\", fmt.Sprintf(\"tmp%d\", tmpcount))\n\t\t\t\ttmpcount++\n\t\t\t}\n\t\t\tif mnt.Writable {\n\t\t\t\tif bind == runner.Container.OutputPath {\n\t\t\t\t\trunner.HostOutputDir = src\n\t\t\t\t\tbindmounts[bind] = bindmount{HostPath: src}\n\t\t\t\t} else if strings.HasPrefix(bind, runner.Container.OutputPath+\"/\") {\n\t\t\t\t\tcopyFiles = append(copyFiles, copyFile{src, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})\n\t\t\t\t} else {\n\t\t\t\t\tbindmounts[bind] = bindmount{HostPath: src}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbindmounts[bind] = bindmount{HostPath: src, ReadOnly: true}\n\t\t\t}\n\t\t\tcollectionPaths = append(collectionPaths, src)\n\n\t\tcase mnt.Kind == \"tmp\":\n\t\t\tvar tmpdir string\n\t\t\ttmpdir, err = runner.MkTempDir(runner.parentTemp, \"tmp\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"while creating mount temp dir: %v\", err)\n\t\t\t}\n\t\t\tst, staterr := os.Stat(tmpdir)\n\t\t\tif staterr != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"while Stat on temp dir: %v\", staterr)\n\t\t\t}\n\t\t\terr = os.Chmod(tmpdir, st.Mode()|os.ModeSetgid|0777)\n\t\t\tif staterr != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"while Chmod temp dir: %v\", err)\n\t\t\t}\n\t\t\tbindmounts[bind] = bindmount{HostPath: tmpdir}\n\t\t\tif bind == runner.Container.OutputPath {\n\t\t\t\trunner.HostOutputDir = tmpdir\n\t\t\t}\n\n\t\tcase mnt.Kind == \"json\" || mnt.Kind == \"text\":\n\t\t\tvar filedata []byte\n\t\t\tif mnt.Kind == \"json\" {\n\t\t\t\tfiledata, err = json.Marshal(mnt.Content)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"encoding json data: %v\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ttext, ok := mnt.Content.(string)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, fmt.Errorf(\"content for mount %q must be a string\", bind)\n\t\t\t\t}\n\t\t\t\tfiledata = []byte(text)\n\t\t\t}\n\n\t\t\ttmpdir, err := runner.MkTempDir(runner.parentTemp, mnt.Kind)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"creating temp dir: %v\", err)\n\t\t\t}\n\t\t\ttmpfn := filepath.Join(tmpdir, \"mountdata.\"+mnt.Kind)\n\t\t\terr = ioutil.WriteFile(tmpfn, filedata, 0444)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"writing temp file: %v\", err)\n\t\t\t}\n\t\t\tif strings.HasPrefix(bind, runner.Container.OutputPath+\"/\") && (notSecret || runner.Container.Mounts[runner.Container.OutputPath].Kind != \"collection\") {\n\t\t\t\t// In most cases, if the container\n\t\t\t\t// specifies a literal file inside the\n\t\t\t\t// output path, we copy it into the\n\t\t\t\t// output directory (either a mounted\n\t\t\t\t// collection or a staging area on the\n\t\t\t\t// host fs). If it's a secret, it will\n\t\t\t\t// be skipped when copying output from\n\t\t\t\t// staging to Keep later.\n\t\t\t\tcopyFiles = append(copyFiles, copyFile{tmpfn, runner.HostOutputDir + bind[len(runner.Container.OutputPath):]})\n\t\t\t} else {\n\t\t\t\t// If a secret is outside OutputPath,\n\t\t\t\t// we bind mount the secret file\n\t\t\t\t// directly just like other mounts. We\n\t\t\t\t// also use this strategy when a\n\t\t\t\t// secret is inside OutputPath but\n\t\t\t\t// OutputPath is a live collection, to\n\t\t\t\t// avoid writing the secret to\n\t\t\t\t// Keep. Attempting to remove a\n\t\t\t\t// bind-mounted secret file from\n\t\t\t\t// inside the container will return a\n\t\t\t\t// \"Device or resource busy\" error\n\t\t\t\t// that might not be handled well by\n\t\t\t\t// the container, which is why we\n\t\t\t\t// don't use this strategy when\n\t\t\t\t// OutputPath is a staging directory.\n\t\t\t\tbindmounts[bind] = bindmount{HostPath: tmpfn, ReadOnly: true}\n\t\t\t}\n\t\t}\n\t}\n\n\tif runner.HostOutputDir == \"\" {\n\t\treturn nil, fmt.Errorf(\"output path does not correspond to a writable mount point\")\n\t}\n\n\tif needCertMount && runner.Container.RuntimeConstraints.API {\n\t\tfor _, certfile := range []string{\n\t\t\t// Populated by caller, or sdk/go/arvados init(), or test suite:\n\t\t\tos.Getenv(\"SSL_CERT_FILE\"),\n\t\t\t// Copied from Go 1.21 stdlib (src/crypto/x509/root_linux.go):\n\t\t\t\"/etc/ssl/certs/ca-certificates.crt\",                // Debian/Ubuntu/Gentoo etc.\n\t\t\t\"/etc/pki/tls/certs/ca-bundle.crt\",                  // Fedora/RHEL 6\n\t\t\t\"/etc/ssl/ca-bundle.pem\",                            // OpenSUSE\n\t\t\t\"/etc/pki/tls/cacert.pem\",                           // OpenELEC\n\t\t\t\"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem\", // CentOS/RHEL 7\n\t\t\t\"/etc/ssl/cert.pem\",                                 // Alpine Linux\n\t\t} {\n\t\t\tif _, err := os.Stat(certfile); err == nil {\n\t\t\t\tbindmounts[arvadosCertPath] = bindmount{HostPath: certfile, ReadOnly: true}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif pdhOnly {\n\t\t// If we are only mounting collections by pdh, make\n\t\t// sure we don't subscribe to websocket events to\n\t\t// avoid putting undesired load on the API server\n\t\tarvMountCmd = append(arvMountCmd, \"--mount-by-pdh\", \"by_id\", \"--disable-event-listening\")\n\t} else {\n\t\tarvMountCmd = append(arvMountCmd, \"--mount-by-id\", \"by_id\")\n\t}\n\t// the by_uuid mount point is used by singularity when writing\n\t// out docker images converted to SIF\n\tarvMountCmd = append(arvMountCmd, \"--mount-by-id\", \"by_uuid\")\n\tarvMountCmd = append(arvMountCmd, runner.ArvMountPoint)\n\n\trunner.ArvMount, err = runner.RunArvMount(arvMountCmd, token)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"while trying to start arv-mount: %v\", err)\n\t}\n\tif runner.hoststatReporter != nil && runner.ArvMount != nil {\n\t\trunner.hoststatReporter.ReportPID(\"arv-mount\", runner.ArvMount.Process.Pid)\n\t}\n\n\tfor _, p := range collectionPaths {\n\t\t_, err = os.Stat(p)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"while checking that input files exist: %v\", err)\n\t\t}\n\t}\n\n\tfor _, cp := range copyFiles {\n\t\tst, err := os.Stat(cp.src)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"while staging writable file from %q to %q: %v\", cp.src, cp.bind, err)\n\t\t}\n\t\tif st.IsDir() {\n\t\t\terr = filepath.Walk(cp.src, func(walkpath string, walkinfo os.FileInfo, walkerr error) error {\n\t\t\t\tif walkerr != nil {\n\t\t\t\t\treturn walkerr\n\t\t\t\t}\n\t\t\t\ttarget := path.Join(cp.bind, walkpath[len(cp.src):])\n\t\t\t\tif walkinfo.Mode().IsRegular() {\n\t\t\t\t\tcopyerr := copyfile(walkpath, target)\n\t\t\t\t\tif copyerr != nil {\n\t\t\t\t\t\treturn copyerr\n\t\t\t\t\t}\n\t\t\t\t\treturn os.Chmod(target, walkinfo.Mode()|0777)\n\t\t\t\t} else if walkinfo.Mode().IsDir() {\n\t\t\t\t\tmkerr := os.MkdirAll(target, 0777)\n\t\t\t\t\tif mkerr != nil {\n\t\t\t\t\t\treturn mkerr\n\t\t\t\t\t}\n\t\t\t\t\treturn os.Chmod(target, walkinfo.Mode()|os.ModeSetgid|0777)\n\t\t\t\t} else {\n\t\t\t\t\treturn fmt.Errorf(\"source %q is not a regular file or directory\", cp.src)\n\t\t\t\t}\n\t\t\t})\n\t\t} else if st.Mode().IsRegular() {\n\t\t\terr = copyfile(cp.src, cp.bind)\n\t\t\tif err == nil {\n\t\t\t\terr = os.Chmod(cp.bind, st.Mode()|0777)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"while staging writable file from %q to %q: %v\", cp.src, cp.bind, err)\n\t\t}\n\t}\n\n\treturn bindmounts, nil\n}\n\nfunc (runner *ContainerRunner) stopHoststat() error {\n\tif runner.hoststatReporter == nil {\n\t\treturn nil\n\t}\n\trunner.hoststatReporter.Stop()\n\trunner.hoststatReporter.LogProcessMemMax(runner.CrunchLog)\n\terr := runner.hoststatLogger.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error closing hoststat logs: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc (runner *ContainerRunner) startHoststat() error {\n\tvar err error\n\trunner.hoststatLogger, err = runner.openLogFile(\"hoststat\")\n\tif err != nil {\n\t\treturn err\n\t}\n\trunner.hoststatReporter = &crunchstat.Reporter{\n\t\tLogger: newLogWriter(newTimestamper(runner.hoststatLogger)),\n\t\t// Our own cgroup is the \"host\" cgroup, in the sense\n\t\t// that it accounts for resource usage outside the\n\t\t// container. It doesn't count _all_ resource usage on\n\t\t// the system.\n\t\t//\n\t\t// TODO?: Use the furthest ancestor of our own cgroup\n\t\t// that has stats available. (Currently crunchstat\n\t\t// does not have that capability.)\n\t\tPid:        os.Getpid,\n\t\tPollPeriod: runner.statInterval,\n\t}\n\trunner.hoststatReporter.Start()\n\trunner.hoststatReporter.ReportPID(\"crunch-run\", os.Getpid())\n\treturn nil\n}\n\nfunc (runner *ContainerRunner) startCrunchstat() error {\n\tvar err error\n\trunner.statLogger, err = runner.openLogFile(\"crunchstat\")\n\tif err != nil {\n\t\treturn err\n\t}\n\trunner.statReporter = &crunchstat.Reporter{\n\t\tPid:    runner.executor.Pid,\n\t\tFS:     runner.crunchstatFakeFS,\n\t\tLogger: newLogWriter(newTimestamper(runner.statLogger)),\n\t\tMemThresholds: map[string][]crunchstat.Threshold{\n\t\t\t\"rss\": crunchstat.NewThresholdsFromPercentages(runner.Container.RuntimeConstraints.RAM, []int64{90, 95, 99}),\n\t\t},\n\t\tPollPeriod:      runner.statInterval,\n\t\tTempDir:         runner.parentTemp,\n\t\tThresholdLogger: runner.CrunchLog,\n\t}\n\trunner.statReporter.Start()\n\treturn nil\n}\n\ntype infoCommand struct {\n\tlabel string\n\tcmd   []string\n}\n\n// LogHostInfo logs info about the current host, for debugging and\n// accounting purposes. Although it's logged as \"node-info\", this is\n// about the environment where crunch-run is actually running, which\n// might differ from what's described in the node record (see\n// LogNodeRecord).\nfunc (runner *ContainerRunner) LogHostInfo() (err error) {\n\tw, err := runner.openLogFile(\"node-info\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tcommands := []infoCommand{\n\t\t{\n\t\t\tlabel: \"Host Information\",\n\t\t\tcmd:   []string{\"uname\", \"-a\"},\n\t\t},\n\t\t{\n\t\t\tlabel: \"CPU Information\",\n\t\t\tcmd:   []string{\"cat\", \"/proc/cpuinfo\"},\n\t\t},\n\t\t{\n\t\t\tlabel: \"Memory Information\",\n\t\t\tcmd:   []string{\"cat\", \"/proc/meminfo\"},\n\t\t},\n\t\t{\n\t\t\tlabel: \"Disk Space\",\n\t\t\tcmd:   []string{\"df\", \"-m\", \"/\", os.TempDir()},\n\t\t},\n\t\t{\n\t\t\tlabel: \"Disk INodes\",\n\t\t\tcmd:   []string{\"df\", \"-i\", \"/\", os.TempDir()},\n\t\t},\n\t}\n\n\t// Run commands with informational output to be logged.\n\tfor _, command := range commands {\n\t\tfmt.Fprintln(w, command.label)\n\t\tcmd := exec.Command(command.cmd[0], command.cmd[1:]...)\n\t\tcmd.Stdout = w\n\t\tcmd.Stderr = w\n\t\tif err := cmd.Run(); err != nil {\n\t\t\terr = fmt.Errorf(\"While running command %q: %v\", command.cmd, err)\n\t\t\tfmt.Fprintln(w, err)\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintln(w, \"\")\n\t}\n\n\terr = w.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"While closing node-info logs: %v\", err)\n\t}\n\treturn nil\n}\n\n// LogContainerRecord gets and saves the raw JSON container record from the API server\nfunc (runner *ContainerRunner) LogContainerRecord() error {\n\tlogged, err := runner.logAPIResponse(\"container\", \"containers\", map[string]interface{}{\"filters\": [][]string{{\"uuid\", \"=\", runner.Container.UUID}}})\n\tif !logged && err == nil {\n\t\terr = fmt.Errorf(\"error: no container record found for %s\", runner.Container.UUID)\n\t}\n\treturn err\n}\n\n// LogNodeRecord logs the current host's InstanceType config entry, if\n// running via arvados-dispatch-cloud.\nfunc (runner *ContainerRunner) LogNodeRecord() error {\n\tit := os.Getenv(\"InstanceType\")\n\tif it == \"\" {\n\t\t// Not dispatched by arvados-dispatch-cloud.\n\t\treturn nil\n\t}\n\t// Save InstanceType config fragment received from dispatcher\n\t// on stdin.\n\tw, err := runner.LogCollection.OpenFile(\"node.json\", os.O_CREATE|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer w.Close()\n\t_, err = io.WriteString(w, it)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn w.Close()\n}\n\nfunc (runner *ContainerRunner) logAPIResponse(label, path string, params map[string]interface{}) (logged bool, err error) {\n\twriter, err := runner.LogCollection.OpenFile(label+\".json\", os.O_CREATE|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treader, err := runner.DispatcherArvClient.CallRaw(\"GET\", path, \"\", \"\", arvadosclient.Dict(params))\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"error getting %s record: %v\", label, err)\n\t}\n\tdefer reader.Close()\n\n\tdec := json.NewDecoder(reader)\n\tdec.UseNumber()\n\tvar resp map[string]interface{}\n\tif err = dec.Decode(&resp); err != nil {\n\t\treturn false, fmt.Errorf(\"error decoding %s list response: %v\", label, err)\n\t}\n\titems, ok := resp[\"items\"].([]interface{})\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"error decoding %s list response: no \\\"items\\\" key in API list response\", label)\n\t} else if len(items) < 1 {\n\t\treturn false, nil\n\t}\n\t// Re-encode it using indentation to improve readability\n\tenc := json.NewEncoder(writer)\n\tenc.SetIndent(\"\", \"    \")\n\tif err = enc.Encode(items[0]); err != nil {\n\t\treturn false, fmt.Errorf(\"error logging %s record: %v\", label, err)\n\t}\n\terr = writer.Close()\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"error closing %s.json in log collection: %v\", label, err)\n\t}\n\treturn true, nil\n}\n\nfunc (runner *ContainerRunner) getStdoutFile(mntPath string) (*os.File, error) {\n\tstdoutPath := mntPath[len(runner.Container.OutputPath):]\n\tindex := strings.LastIndex(stdoutPath, \"/\")\n\tif index > 0 {\n\t\tsubdirs := stdoutPath[:index]\n\t\tif subdirs != \"\" {\n\t\t\tst, err := os.Stat(runner.HostOutputDir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"While Stat on temp dir: %v\", err)\n\t\t\t}\n\t\t\tstdoutPath := filepath.Join(runner.HostOutputDir, subdirs)\n\t\t\terr = os.MkdirAll(stdoutPath, st.Mode()|os.ModeSetgid|0777)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"While MkdirAll %q: %v\", stdoutPath, err)\n\t\t\t}\n\t\t}\n\t}\n\tstdoutFile, err := os.Create(filepath.Join(runner.HostOutputDir, stdoutPath))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"While creating file %q: %v\", stdoutPath, err)\n\t}\n\n\treturn stdoutFile, nil\n}\n\n// CreateContainer creates the docker container.\nfunc (runner *ContainerRunner) CreateContainer(imageID string, bindmounts map[string]bindmount) error {\n\tvar stdin io.Reader\n\tif mnt, ok := runner.Container.Mounts[\"stdin\"]; ok {\n\t\tswitch mnt.Kind {\n\t\tcase \"collection\":\n\t\t\tvar collID string\n\t\t\tif mnt.UUID != \"\" {\n\t\t\t\tcollID = mnt.UUID\n\t\t\t} else {\n\t\t\t\tcollID = mnt.PortableDataHash\n\t\t\t}\n\t\t\tpath := runner.ArvMountPoint + \"/by_id/\" + collID + \"/\" + mnt.Path\n\t\t\tf, err := os.Open(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tstdin = f\n\t\t\trunner.executorStdin = f\n\t\tcase \"json\":\n\t\t\tj, err := json.Marshal(mnt.Content)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error encoding stdin json data: %v\", err)\n\t\t\t}\n\t\t\tstdin = bytes.NewReader(j)\n\t\t\trunner.executorStdin = io.NopCloser(nil)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"stdin mount has unsupported kind %q\", mnt.Kind)\n\t\t}\n\t} else {\n\t\tstdin = bytes.NewReader(nil)\n\t\trunner.executorStdin = ioutil.NopCloser(nil)\n\t}\n\n\tvar stdout, stderr io.Writer\n\tif mnt, ok := runner.Container.Mounts[\"stdout\"]; ok {\n\t\tf, err := runner.getStdoutFile(mnt.Path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstdout = f\n\t\trunner.executorStdout = f\n\t} else if w, err := runner.openLogFile(\"stdout\"); err != nil {\n\t\treturn err\n\t} else {\n\t\tstdout = newTimestamper(w)\n\t\trunner.executorStdout = w\n\t}\n\n\tif mnt, ok := runner.Container.Mounts[\"stderr\"]; ok {\n\t\tf, err := runner.getStdoutFile(mnt.Path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstderr = f\n\t\trunner.executorStderr = f\n\t} else if w, err := runner.openLogFile(\"stderr\"); err != nil {\n\t\treturn err\n\t} else {\n\t\tstderr = newTimestamper(w)\n\t\trunner.executorStderr = w\n\t}\n\n\tenv := runner.Container.Environment\n\tenableNetwork := runner.enableNetwork == \"always\"\n\tif runner.Container.RuntimeConstraints.API {\n\t\tenableNetwork = true\n\t\ttok, err := runner.ContainerToken()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tenv = map[string]string{}\n\t\tfor k, v := range runner.Container.Environment {\n\t\t\tenv[k] = v\n\t\t}\n\t\tenv[\"ARVADOS_API_TOKEN\"] = tok\n\t\tenv[\"ARVADOS_API_HOST\"] = os.Getenv(\"ARVADOS_API_HOST\")\n\t\tenv[\"ARVADOS_API_HOST_INSECURE\"] = os.Getenv(\"ARVADOS_API_HOST_INSECURE\")\n\t\tenv[\"ARVADOS_KEEP_SERVICES\"] = os.Getenv(\"ARVADOS_KEEP_SERVICES\")\n\t}\n\tworkdir := runner.Container.Cwd\n\tif workdir == \".\" {\n\t\t// both \"\" and \".\" mean default\n\t\tworkdir = \"\"\n\t}\n\tram := runner.Container.RuntimeConstraints.RAM\n\tif !runner.enableMemoryLimit {\n\t\tram = 0\n\t}\n\n\tif runner.Container.RuntimeConstraints.GPU.Stack == \"cuda\" {\n\t\tnvidiaModprobe(runner.CrunchLog)\n\t}\n\n\treturn runner.executor.Create(containerSpec{\n\t\tImage:          imageID,\n\t\tVCPUs:          runner.Container.RuntimeConstraints.VCPUs,\n\t\tRAM:            ram,\n\t\tWorkingDir:     workdir,\n\t\tEnv:            env,\n\t\tBindMounts:     bindmounts,\n\t\tCommand:        runner.Container.Command,\n\t\tEnableNetwork:  enableNetwork,\n\t\tGPUStack:       runner.Container.RuntimeConstraints.GPU.Stack,\n\t\tGPUDeviceCount: runner.Container.RuntimeConstraints.GPU.DeviceCount,\n\t\tNetworkMode:    runner.networkMode,\n\t\tCgroupParent:   runner.setCgroupParent,\n\t\tStdin:          stdin,\n\t\tStdout:         stdout,\n\t\tStderr:         stderr,\n\t})\n}\n\n// StartContainer starts the docker container created by CreateContainer.\nfunc (runner *ContainerRunner) StartContainer() error {\n\trunner.CrunchLog.Printf(\"Starting container\")\n\trunner.cStateLock.Lock()\n\tdefer runner.cStateLock.Unlock()\n\tif runner.cCancelled {\n\t\treturn ErrCancelled\n\t}\n\terr := runner.executor.Start()\n\tif err != nil {\n\t\tvar advice string\n\t\tif m, e := regexp.MatchString(\"(?ms).*(exec|System error).*(no such file or directory|file not found).*\", err.Error()); m && e == nil {\n\t\t\tadvice = fmt.Sprintf(\"\\nPossible causes: command %q is missing, the interpreter given in #! is missing, or script has Windows line endings.\", runner.Container.Command[0])\n\t\t}\n\t\treturn fmt.Errorf(\"could not start container: %v%s\", err, advice)\n\t}\n\treturn nil\n}\n\n// WaitFinish waits for the container to terminate, capture the exit code, and\n// close the stdout/stderr logging.\nfunc (runner *ContainerRunner) WaitFinish() error {\n\trunner.CrunchLog.Print(\"Waiting for container to finish\")\n\tvar timeout <-chan time.Time\n\tif s := runner.Container.SchedulingParameters.MaxRunTime; s > 0 {\n\t\ttimeout = time.After(time.Duration(s) * time.Second)\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tgo func() {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\trunner.CrunchLog.Printf(\"maximum run time exceeded. Stopping container.\")\n\t\t\trunner.stop(nil)\n\t\tcase <-runner.ArvMountExit:\n\t\t\trunner.CrunchLog.Printf(\"arv-mount exited while container is still running. Stopping container.\")\n\t\t\trunner.stop(nil)\n\t\tcase <-ctx.Done():\n\t\t}\n\t}()\n\texitcode, err := runner.executor.Wait(ctx)\n\tif err != nil {\n\t\trunner.checkBrokenNode(err)\n\t\treturn err\n\t}\n\trunner.ExitCode = &exitcode\n\n\textra := \"\"\n\tif exitcode&0x80 != 0 {\n\t\t// Convert raw exit status (0x80 + signal number) to a\n\t\t// string to log after the code, like \" (signal 101)\"\n\t\t// or \" (signal 9, killed)\"\n\t\tsig := syscall.WaitStatus(exitcode).Signal()\n\t\tif name := unix.SignalName(sig); name != \"\" {\n\t\t\textra = fmt.Sprintf(\" (signal %d, %s)\", sig, name)\n\t\t} else {\n\t\t\textra = fmt.Sprintf(\" (signal %d)\", sig)\n\t\t}\n\t}\n\trunner.CrunchLog.Printf(\"Container exited with status code %d%s\", exitcode, extra)\n\terr = runner.DispatcherArvClient.Update(\"containers\", runner.Container.UUID, arvadosclient.Dict{\n\t\t\"select\":    []string{\"uuid\"},\n\t\t\"container\": arvadosclient.Dict{\"exit_code\": exitcode},\n\t}, nil)\n\tif err != nil {\n\t\trunner.CrunchLog.Printf(\"ignoring error updating exit_code: %s\", err)\n\t}\n\n\tvar returnErr error\n\tif err = runner.executorStdin.Close(); err != nil {\n\t\terr = fmt.Errorf(\"error closing container stdin: %s\", err)\n\t\trunner.CrunchLog.Printf(\"%s\", err)\n\t\treturnErr = err\n\t}\n\tif err = runner.executorStdout.Close(); err != nil {\n\t\terr = fmt.Errorf(\"error closing container stdout: %s\", err)\n\t\trunner.CrunchLog.Printf(\"%s\", err)\n\t\tif returnErr == nil {\n\t\t\treturnErr = err\n\t\t}\n\t}\n\tif err = runner.executorStderr.Close(); err != nil {\n\t\terr = fmt.Errorf(\"error closing container stderr: %s\", err)\n\t\trunner.CrunchLog.Printf(\"%s\", err)\n\t\tif returnErr == nil {\n\t\t\treturnErr = err\n\t\t}\n\t}\n\n\tif runner.statReporter != nil {\n\t\trunner.statReporter.Stop()\n\t\trunner.statReporter.LogMaxima(runner.CrunchLog, map[string]int64{\n\t\t\t\"rss\": runner.Container.RuntimeConstraints.RAM,\n\t\t})\n\t\terr = runner.statLogger.Close()\n\t\tif err != nil {\n\t\t\trunner.CrunchLog.Printf(\"error closing crunchstat logs: %v\", err)\n\t\t}\n\t}\n\treturn returnErr\n}\n\nfunc (runner *ContainerRunner) updateLogs() {\n\tticker := time.NewTicker(crunchLogUpdatePeriod / 360)\n\tdefer ticker.Stop()\n\n\tsigusr1 := make(chan os.Signal, 1)\n\tsignal.Notify(sigusr1, syscall.SIGUSR1)\n\tdefer signal.Stop(sigusr1)\n\n\tsaveAtTime := time.Now().Add(crunchLogUpdatePeriod)\n\tsaveAtSize := crunchLogUpdateSize\n\tvar savedSize int64\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\tcase <-sigusr1:\n\t\t\tsaveAtTime = time.Now()\n\t\t}\n\t\trunner.logMtx.Lock()\n\t\tdone := runner.logPDHFinal != nil\n\t\trunner.logMtx.Unlock()\n\t\tif done {\n\t\t\treturn\n\t\t}\n\t\tsize := runner.LogCollection.Size()\n\t\tif size == savedSize || (time.Now().Before(saveAtTime) && size < saveAtSize) {\n\t\t\tcontinue\n\t\t}\n\t\tsaveAtTime = time.Now().Add(crunchLogUpdatePeriod)\n\t\tsaveAtSize = runner.LogCollection.Size() + crunchLogUpdateSize\n\t\terr := runner.saveLogCollection(false)\n\t\tif err != nil {\n\t\t\trunner.CrunchLog.Printf(\"error updating log collection: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tsavedSize = size\n\t}\n}\n\nvar spotInterruptionCheckInterval = 5 * time.Second\nvar ec2MetadataBaseURL = \"http://169.254.169.254\"\n\nconst ec2TokenTTL = time.Second * 21600\n\nfunc (runner *ContainerRunner) checkSpotInterruptionNotices() {\n\ttype ec2metadata struct {\n\t\tAction string    `json:\"action\"`\n\t\tTime   time.Time `json:\"time\"`\n\t}\n\trunner.CrunchLog.Printf(\"Checking for spot instance interruptions every %v using instance metadata at %s\", spotInterruptionCheckInterval, ec2MetadataBaseURL)\n\tvar metadata ec2metadata\n\tvar token string\n\tvar tokenExp time.Time\n\tcheck := func() error {\n\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))\n\t\tdefer cancel()\n\t\tif token == \"\" || tokenExp.Sub(time.Now()) < time.Minute {\n\t\t\treq, err := http.NewRequestWithContext(ctx, http.MethodPut, ec2MetadataBaseURL+\"/latest/api/token\", nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treq.Header.Set(\"X-aws-ec2-metadata-token-ttl-seconds\", fmt.Sprintf(\"%d\", int(ec2TokenTTL/time.Second)))\n\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\treturn fmt.Errorf(\"%s\", resp.Status)\n\t\t\t}\n\t\t\tnewtoken, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttoken = strings.TrimSpace(string(newtoken))\n\t\t\ttokenExp = time.Now().Add(ec2TokenTTL)\n\t\t}\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, ec2MetadataBaseURL+\"/latest/meta-data/spot/instance-action\", nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Set(\"X-aws-ec2-metadata-token\", token)\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\tbreak\n\t\tcase http.StatusNotFound:\n\t\t\t// \"If Amazon EC2 is not preparing to stop or\n\t\t\t// terminate the instance, or if you\n\t\t\t// terminated the instance yourself,\n\t\t\t// instance-action is not present in the\n\t\t\t// instance metadata and you receive an HTTP\n\t\t\t// 404 error when you try to retrieve it.\"\n\t\t\tmetadata = ec2metadata{}\n\t\t\treturn nil\n\t\tcase http.StatusUnauthorized:\n\t\t\ttoken = \"\"\n\t\t\treturn fmt.Errorf(\"%s\", resp.Status)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%s\", resp.Status)\n\t\t}\n\t\tnextmetadata := ec2metadata{}\n\t\terr = json.NewDecoder(resp.Body).Decode(&nextmetadata)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmetadata = nextmetadata\n\t\treturn nil\n\t}\n\tfailures := 0\n\tvar lastmetadata ec2metadata\n\tfor range time.NewTicker(spotInterruptionCheckInterval).C {\n\t\terr := check()\n\t\tif err != nil {\n\t\t\tmessage := fmt.Sprintf(\"Spot instance interruption check was inconclusive: %s\", err)\n\t\t\tif failures++; failures > 5 {\n\t\t\t\trunner.CrunchLog.Printf(\"%s -- now giving up after too many consecutive errors\", message)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\trunner.CrunchLog.Printf(\"%s -- will retry in %v\", message, spotInterruptionCheckInterval)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tfailures = 0\n\t\tif metadata.Action != \"\" && metadata != lastmetadata {\n\t\t\tlastmetadata = metadata\n\t\t\ttext := fmt.Sprintf(\"Cloud provider scheduled instance %s at %s\", metadata.Action, metadata.Time.UTC().Format(time.RFC3339))\n\t\t\trunner.CrunchLog.Printf(\"%s\", text)\n\t\t\trunner.updateRuntimeStatus(arvadosclient.Dict{\n\t\t\t\t\"warning\":          \"preemption notice\",\n\t\t\t\t\"warningDetail\":    text,\n\t\t\t\t\"preemptionNotice\": text,\n\t\t\t})\n\t\t\tif proc, err := os.FindProcess(os.Getpid()); err == nil {\n\t\t\t\t// trigger updateLogs\n\t\t\t\tproc.Signal(syscall.SIGUSR1)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (runner *ContainerRunner) updateRuntimeStatus(status arvadosclient.Dict) {\n\terr := runner.DispatcherArvClient.Update(\"containers\", runner.Container.UUID, arvadosclient.Dict{\n\t\t\"select\": []string{\"uuid\"},\n\t\t\"container\": arvadosclient.Dict{\n\t\t\t\"runtime_status\": status,\n\t\t},\n\t}, nil)\n\tif err != nil {\n\t\trunner.CrunchLog.Printf(\"error updating container runtime_status: %s\", err)\n\t}\n}\n\n// CaptureOutput saves data from the container's output directory if\n// needed, and updates the container output accordingly.\nfunc (runner *ContainerRunner) CaptureOutput(bindmounts map[string]bindmount) error {\n\tif runner.Container.RuntimeConstraints.API {\n\t\t// Output may have been set directly by the container, so\n\t\t// refresh the container record to check.\n\t\terr := runner.DispatcherArvClient.Get(\"containers\", runner.Container.UUID,\n\t\t\tarvadosclient.Dict{\n\t\t\t\t\"select\": []string{\"output\"},\n\t\t\t}, &runner.Container)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif runner.Container.Output != \"\" {\n\t\t\t// Container output is already set.\n\t\t\trunner.OutputPDH = &runner.Container.Output\n\t\t\treturn nil\n\t\t}\n\t}\n\n\ttxt, err := (&copier{\n\t\tclient:        runner.containerClient,\n\t\tkeepClient:    runner.ContainerKeepClient,\n\t\thostOutputDir: runner.HostOutputDir,\n\t\tctrOutputDir:  runner.Container.OutputPath,\n\t\tglobs:         runner.Container.OutputGlob,\n\t\tbindmounts:    bindmounts,\n\t\tmounts:        runner.Container.Mounts,\n\t\tsecretMounts:  runner.SecretMounts,\n\t\tlogger:        runner.CrunchLog,\n\t}).Copy()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif n := len(regexp.MustCompile(` [0-9a-f]+\\+\\S*\\+R`).FindAllStringIndex(txt, -1)); n > 0 {\n\t\trunner.CrunchLog.Printf(\"Copying %d data blocks from remote input collections...\", n)\n\t\tfs, err := (&arvados.Collection{ManifestText: txt}).FileSystem(runner.containerClient, runner.ContainerKeepClient)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttxt, err = fs.MarshalManifest(\".\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tvar resp arvados.Collection\n\terr = runner.ContainerArvClient.Create(\"collections\", arvadosclient.Dict{\n\t\t\"ensure_unique_name\": true,\n\t\t\"select\":             []string{\"portable_data_hash\"},\n\t\t\"collection\": arvadosclient.Dict{\n\t\t\t\"is_trashed\":    true,\n\t\t\t\"name\":          \"output for \" + runner.Container.UUID,\n\t\t\t\"manifest_text\": txt,\n\t\t},\n\t}, &resp)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating output collection: %v\", err)\n\t}\n\trunner.OutputPDH = &resp.PortableDataHash\n\treturn nil\n}\n\nfunc (runner *ContainerRunner) CleanupDirs() {\n\tif runner.ArvMount != nil {\n\t\tvar delay int64 = 8\n\t\tumount := exec.Command(\"arv-mount\", fmt.Sprintf(\"--unmount-timeout=%d\", delay), \"--unmount\", runner.ArvMountPoint)\n\t\tumount.Stdout = runner.CrunchLog\n\t\tumount.Stderr = runner.CrunchLog\n\t\trunner.CrunchLog.Printf(\"Running %v\", umount.Args)\n\t\tumnterr := umount.Start()\n\n\t\tif umnterr != nil {\n\t\t\trunner.CrunchLog.Printf(\"Error unmounting: %v\", umnterr)\n\t\t\trunner.ArvMount.Process.Kill()\n\t\t} else {\n\t\t\t// If arv-mount --unmount gets stuck for any reason, we\n\t\t\t// don't want to wait for it forever.  Do Wait() in a goroutine\n\t\t\t// so it doesn't block crunch-run.\n\t\t\tumountExit := make(chan error)\n\t\t\tgo func() {\n\t\t\t\tmnterr := umount.Wait()\n\t\t\t\tif mnterr != nil {\n\t\t\t\t\trunner.CrunchLog.Printf(\"Error unmounting: %v\", mnterr)\n\t\t\t\t}\n\t\t\t\tumountExit <- mnterr\n\t\t\t}()\n\n\t\t\tfor again := true; again; {\n\t\t\t\tagain = false\n\t\t\t\tselect {\n\t\t\t\tcase <-umountExit:\n\t\t\t\t\tumount = nil\n\t\t\t\t\tagain = true\n\t\t\t\tcase <-runner.ArvMountExit:\n\t\t\t\t\tbreak\n\t\t\t\tcase <-time.After(time.Duration((delay + 1) * int64(time.Second))):\n\t\t\t\t\trunner.CrunchLog.Printf(\"Timed out waiting for unmount\")\n\t\t\t\t\tif umount != nil {\n\t\t\t\t\t\tumount.Process.Kill()\n\t\t\t\t\t}\n\t\t\t\t\trunner.ArvMount.Process.Kill()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\trunner.ArvMount = nil\n\t}\n\n\tif runner.ArvMountPoint != \"\" {\n\t\tif rmerr := os.Remove(runner.ArvMountPoint); rmerr != nil {\n\t\t\trunner.CrunchLog.Printf(\"While cleaning up arv-mount directory %s: %v\", runner.ArvMountPoint, rmerr)\n\t\t}\n\t\trunner.ArvMountPoint = \"\"\n\t}\n\n\tif rmerr := os.RemoveAll(runner.parentTemp); rmerr != nil {\n\t\trunner.CrunchLog.Printf(\"While cleaning up temporary directory %s: %v\", runner.parentTemp, rmerr)\n\t}\n}\n\n// CommitLogs posts the collection containing the final container logs.\nfunc (runner *ContainerRunner) CommitLogs() error {\n\tfunc() {\n\t\t// Hold cStateLock to prevent races on CrunchLog (e.g., stop()).\n\t\trunner.cStateLock.Lock()\n\t\tdefer runner.cStateLock.Unlock()\n\n\t\trunner.CrunchLog.Print(runner.finalState)\n\n\t\tif runner.arvMountLog != nil {\n\t\t\trunner.arvMountLog.Close()\n\t\t}\n\n\t\t// From now on just log to stderr, in case there are\n\t\t// any other further errors (such as failing to write\n\t\t// the log to Keep!)  while shutting down\n\t\trunner.CrunchLog = newLogWriter(newTimestamper(newStringPrefixer(os.Stderr, runner.Container.UUID+\" \")))\n\t}()\n\n\tif runner.keepstoreLogger != nil {\n\t\t// Flush any buffered logs from our local keepstore\n\t\t// process.  Discard anything logged after this point\n\t\t// -- it won't end up in the log collection, so\n\t\t// there's no point writing it to the collectionfs.\n\t\trunner.keepstoreLogbuf.SetWriter(io.Discard)\n\t\trunner.keepstoreLogger.Close()\n\t\trunner.keepstoreLogger = nil\n\t}\n\n\tif runner.logPDHFinal != nil {\n\t\t// If we have already assigned something to logPDHFinal,\n\t\t// we must be closing the re-opened log, which won't\n\t\t// end up getting attached to the container record and\n\t\t// therefore doesn't need to be saved as a collection\n\t\t// -- it exists only to send logs to other channels.\n\t\treturn nil\n\t}\n\n\treturn runner.saveLogCollection(true)\n}\n\n// Flush buffered logs to Keep and create/update the log collection.\n//\n// Also update the container record with the updated log PDH -- except\n// this part is skipped if (a) the container hasn't entered Running\n// state yet, meaning we can't assign a log value, or (b) final==true,\n// meaning the caller will immediately update the container record to\n// Completed state and update the log PDH in the same API call.\nfunc (runner *ContainerRunner) saveLogCollection(final bool) error {\n\trunner.logMtx.Lock()\n\tdefer runner.logMtx.Unlock()\n\tif runner.logPDHFinal != nil {\n\t\t// Already finalized.\n\t\treturn nil\n\t}\n\tupdates := arvadosclient.Dict{\n\t\t\"name\": \"logs for \" + runner.Container.UUID,\n\t}\n\tmt, errFlush := runner.LogCollection.MarshalManifest(\".\")\n\tif errFlush == nil {\n\t\t// Only send updated manifest text if there was no\n\t\t// error.\n\t\tupdates[\"manifest_text\"] = mt\n\t}\n\n\t// Even if flushing the manifest had an error, we still want\n\t// to update the log record, if possible, to push the trash_at\n\t// and delete_at times into the future.  Details on bug\n\t// #17293.\n\tif final {\n\t\tupdates[\"is_trashed\"] = true\n\t} else {\n\t\t// We set trash_at so this collection gets\n\t\t// automatically cleaned up eventually.  It used to be\n\t\t// 12 hours but we had a situation where the API\n\t\t// server was down over a weekend but the containers\n\t\t// kept running such that the log collection got\n\t\t// trashed, so now we make it 2 weeks.  refs #20378\n\t\texp := time.Now().Add(time.Duration(24*14) * time.Hour)\n\t\tupdates[\"trash_at\"] = exp\n\t\tupdates[\"delete_at\"] = exp\n\t}\n\treqBody := arvadosclient.Dict{\n\t\t\"select\":     []string{\"uuid\", \"portable_data_hash\"},\n\t\t\"collection\": updates,\n\t}\n\tvar saved arvados.Collection\n\tvar errUpdate error\n\tif runner.logUUID == \"\" {\n\t\treqBody[\"ensure_unique_name\"] = true\n\t\terrUpdate = runner.DispatcherArvClient.Create(\"collections\", reqBody, &saved)\n\t} else {\n\t\terrUpdate = runner.DispatcherArvClient.Update(\"collections\", runner.logUUID, reqBody, &saved)\n\t}\n\tif errUpdate == nil {\n\t\trunner.logUUID = saved.UUID\n\t\trunner.logPDH = saved.PortableDataHash\n\t}\n\n\tif errFlush != nil || errUpdate != nil {\n\t\treturn fmt.Errorf(\"error recording logs: %q, %q\", errFlush, errUpdate)\n\t}\n\tif final {\n\t\trunner.logPDHFinal = &saved.PortableDataHash\n\t}\n\tif final || runner.finalState == \"Queued\" {\n\t\t// If final, the caller (Run -> CommitLogs) will\n\t\t// immediately update the log attribute to logPDHFinal\n\t\t// while setting state to Complete, so it would be\n\t\t// redundant to do it here.\n\t\t//\n\t\t// If runner.finalState==\"Queued\", the container state\n\t\t// has not changed to \"Running\", so updating the log\n\t\t// attribute is not allowed.\n\t\treturn nil\n\t}\n\treturn runner.DispatcherArvClient.Update(\"containers\", runner.Container.UUID, arvadosclient.Dict{\n\t\t\"select\": []string{\"uuid\"},\n\t\t\"container\": arvadosclient.Dict{\n\t\t\t\"log\": saved.PortableDataHash,\n\t\t},\n\t}, nil)\n}\n\n// UpdateContainerRunning updates the container state to \"Running\"\nfunc (runner *ContainerRunner) UpdateContainerRunning() error {\n\trunner.logMtx.Lock()\n\tlogPDH := runner.logPDH\n\trunner.logMtx.Unlock()\n\n\trunner.cStateLock.Lock()\n\tdefer runner.cStateLock.Unlock()\n\tif runner.cCancelled {\n\t\treturn ErrCancelled\n\t}\n\treturn runner.DispatcherArvClient.Update(\n\t\t\"containers\",\n\t\trunner.Container.UUID,\n\t\tarvadosclient.Dict{\n\t\t\t\"select\": []string{\"uuid\"},\n\t\t\t\"container\": arvadosclient.Dict{\n\t\t\t\t\"gateway_address\": runner.gateway.Address,\n\t\t\t\t\"state\":           \"Running\",\n\t\t\t\t\"log\":             logPDH,\n\t\t\t},\n\t\t},\n\t\tnil,\n\t)\n}\n\n// ContainerToken returns the api_token the container (and any\n// arv-mount processes) are allowed to use.\nfunc (runner *ContainerRunner) ContainerToken() (string, error) {\n\tif runner.token != \"\" {\n\t\treturn runner.token, nil\n\t}\n\n\tvar auth arvados.APIClientAuthorization\n\terr := runner.DispatcherArvClient.Call(\"GET\", \"containers\", runner.Container.UUID, \"auth\", nil, &auth)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\trunner.token = fmt.Sprintf(\"v2/%s/%s/%s\", auth.UUID, auth.APIToken, runner.Container.UUID)\n\treturn runner.token, nil\n}\n\n// UpdateContainerFinal updates the container record state on API\n// server to \"Complete\" or \"Cancelled\"\nfunc (runner *ContainerRunner) UpdateContainerFinal() error {\n\tupdate := arvadosclient.Dict{}\n\tupdate[\"state\"] = runner.finalState\n\tif runner.logPDHFinal != nil {\n\t\tupdate[\"log\"] = *runner.logPDHFinal\n\t}\n\tif runner.ExitCode != nil {\n\t\tupdate[\"exit_code\"] = *runner.ExitCode\n\t} else {\n\t\tupdate[\"exit_code\"] = nil\n\t}\n\tif runner.finalState == \"Complete\" && runner.OutputPDH != nil {\n\t\tupdate[\"output\"] = *runner.OutputPDH\n\t}\n\tupdate[\"cost\"] = runner.calculateCost(time.Now())\n\treturn runner.DispatcherArvClient.Update(\"containers\", runner.Container.UUID, arvadosclient.Dict{\n\t\t\"select\":    []string{\"uuid\"},\n\t\t\"container\": update,\n\t}, nil)\n}\n\n// IsCancelled returns the value of Cancelled, with goroutine safety.\nfunc (runner *ContainerRunner) IsCancelled() bool {\n\trunner.cStateLock.Lock()\n\tdefer runner.cStateLock.Unlock()\n\treturn runner.cCancelled\n}\n\nfunc (runner *ContainerRunner) openLogFile(name string) (io.WriteCloser, error) {\n\treturn runner.LogCollection.OpenFile(name+\".txt\", os.O_CREATE|os.O_WRONLY, 0666)\n}\n\n// Run the full container lifecycle.\nfunc (runner *ContainerRunner) Run() (err error) {\n\trunner.CrunchLog.Printf(\"crunch-run %s started\", cmd.Version.String())\n\trunner.CrunchLog.Printf(\"%s\", currentUserAndGroups())\n\tv, _ := exec.Command(\"arv-mount\", \"--version\").CombinedOutput()\n\trunner.CrunchLog.Printf(\"Using FUSE mount: %s\", v)\n\trunner.CrunchLog.Printf(\"Using container runtime: %s\", runner.executor.Runtime())\n\trunner.CrunchLog.Printf(\"Executing container: %s\", runner.Container.UUID)\n\trunner.costStartTime = time.Now()\n\n\thostname, hosterr := os.Hostname()\n\tif hosterr != nil {\n\t\trunner.CrunchLog.Printf(\"Error getting hostname '%v'\", hosterr)\n\t} else {\n\t\trunner.CrunchLog.Printf(\"Executing on host '%s'\", hostname)\n\t}\n\n\tsigusr2 := make(chan os.Signal, 1)\n\tsignal.Notify(sigusr2, syscall.SIGUSR2)\n\tdefer signal.Stop(sigusr2)\n\trunner.loadPrices()\n\tgo runner.handleSIGUSR2(sigusr2)\n\n\trunner.finalState = \"Queued\"\n\n\tdefer func() {\n\t\trunner.CleanupDirs()\n\t\trunner.CrunchLog.Printf(\"crunch-run finished\")\n\t}()\n\n\terr = runner.fetchContainerRecord()\n\tif err != nil {\n\t\treturn\n\t}\n\tif runner.Container.State != \"Locked\" {\n\t\treturn fmt.Errorf(\"dispatch error detected: container %q has state %q\", runner.Container.UUID, runner.Container.State)\n\t}\n\n\tvar bindmounts map[string]bindmount\n\tdefer func() {\n\t\t// checkErr prints e (unless it's nil) and sets err to\n\t\t// e (unless err is already non-nil). Thus, if err\n\t\t// hasn't already been assigned when Run() returns,\n\t\t// this cleanup func will cause Run() to return the\n\t\t// first non-nil error that is passed to checkErr().\n\t\tcheckErr := func(errorIn string, e error) {\n\t\t\tif e == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\trunner.CrunchLog.Printf(\"error in %s: %v\", errorIn, e)\n\t\t\tif err == nil {\n\t\t\t\terr = e\n\t\t\t}\n\t\t\tif runner.finalState == \"Complete\" {\n\t\t\t\t// There was an error in the finalization.\n\t\t\t\trunner.finalState = \"Cancelled\"\n\t\t\t}\n\t\t}\n\n\t\t// Log the error encountered in Run(), if any\n\t\tcheckErr(\"Run\", err)\n\n\t\tif runner.finalState == \"Queued\" {\n\t\t\trunner.UpdateContainerFinal()\n\t\t\treturn\n\t\t}\n\n\t\tif runner.IsCancelled() {\n\t\t\trunner.finalState = \"Cancelled\"\n\t\t\t// but don't return yet -- we still want to\n\t\t\t// capture partial output and write logs\n\t\t}\n\n\t\tif bindmounts != nil {\n\t\t\tif errSave := runner.saveLogCollection(false); errSave != nil {\n\t\t\t\t// This doesn't merit failing the\n\t\t\t\t// container, but should be logged.\n\t\t\t\trunner.CrunchLog.Printf(\"error saving log collection: %v\", errSave)\n\t\t\t}\n\t\t\tcheckErr(\"CaptureOutput\", runner.CaptureOutput(bindmounts))\n\t\t}\n\t\tcheckErr(\"stopHoststat\", runner.stopHoststat())\n\t\tcheckErr(\"CommitLogs\", runner.CommitLogs())\n\t\trunner.CleanupDirs()\n\t\tcheckErr(\"UpdateContainerFinal\", runner.UpdateContainerFinal())\n\t}()\n\n\trunner.setupSignals()\n\terr = runner.startHoststat()\n\tif err != nil {\n\t\treturn\n\t}\n\tif runner.keepstore != nil {\n\t\trunner.hoststatReporter.ReportPID(\"keepstore\", runner.keepstore.Process.Pid)\n\t}\n\n\t// set up FUSE mount and binds\n\tbindmounts, err = runner.SetupMounts()\n\tif err != nil {\n\t\trunner.finalState = \"Cancelled\"\n\t\terr = fmt.Errorf(\"While setting up mounts: %v\", err)\n\t\treturn\n\t}\n\n\t// check for and/or load image\n\timageID, err := runner.LoadImage()\n\tif err != nil {\n\t\tif !runner.checkBrokenNode(err) {\n\t\t\t// Failed to load image but not due to a \"broken node\"\n\t\t\t// condition, probably user error.\n\t\t\trunner.finalState = \"Cancelled\"\n\t\t}\n\t\terr = fmt.Errorf(\"failed to load container image: %v\", err)\n\t\treturn\n\t}\n\n\terr = runner.CreateContainer(imageID, bindmounts)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = runner.LogHostInfo()\n\tif err != nil {\n\t\treturn\n\t}\n\terr = runner.LogNodeRecord()\n\tif err != nil {\n\t\treturn\n\t}\n\terr = runner.LogContainerRecord()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif runner.IsCancelled() {\n\t\treturn\n\t}\n\n\terr = runner.saveLogCollection(false)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = runner.UpdateContainerRunning()\n\tif err != nil {\n\t\treturn\n\t}\n\trunner.finalState = \"Cancelled\"\n\n\terr = runner.startCrunchstat()\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = runner.StartContainer()\n\tif err != nil {\n\t\trunner.checkBrokenNode(err)\n\t\treturn\n\t}\n\n\terr = runner.WaitFinish()\n\tif err == nil && !runner.IsCancelled() {\n\t\trunner.finalState = \"Complete\"\n\t}\n\treturn\n}\n\n// Fetch the current container record (uuid = runner.Container.UUID)\n// into runner.Container.\nfunc (runner *ContainerRunner) fetchContainerRecord() error {\n\treader, err := runner.DispatcherArvClient.CallRaw(\"GET\", \"containers\", runner.Container.UUID, \"\", nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error fetching container record: %v\", err)\n\t}\n\tdefer reader.Close()\n\n\tdec := json.NewDecoder(reader)\n\tdec.UseNumber()\n\terr = dec.Decode(&runner.Container)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error decoding container record: %v\", err)\n\t}\n\n\tvar sm struct {\n\t\tSecretMounts map[string]arvados.Mount `json:\"secret_mounts\"`\n\t}\n\n\tcontainerToken, err := runner.ContainerToken()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting container token: %v\", err)\n\t}\n\n\trunner.ContainerArvClient, runner.ContainerKeepClient,\n\t\trunner.containerClient, err = runner.MkArvClient(containerToken)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating container API client: %v\", err)\n\t}\n\n\trunner.ContainerKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)\n\trunner.DispatcherKeepClient.SetStorageClasses(runner.Container.OutputStorageClasses)\n\n\terr = runner.ContainerArvClient.Call(\"GET\", \"containers\", runner.Container.UUID, \"secret_mounts\", nil, &sm)\n\tif err != nil {\n\t\tif apierr, ok := err.(arvadosclient.APIServerError); !ok || apierr.HttpStatusCode != 404 {\n\t\t\treturn fmt.Errorf(\"error fetching secret_mounts: %v\", err)\n\t\t}\n\t\t// ok && apierr.HttpStatusCode == 404, which means\n\t\t// secret_mounts isn't supported by this API server.\n\t}\n\trunner.SecretMounts = sm.SecretMounts\n\n\treturn nil\n}\n\n// NewContainerRunner creates a new container runner.\nfunc NewContainerRunner(dispatcherClient *arvados.Client,\n\tdispatcherArvClient IArvadosClient,\n\tdispatcherKeepClient IKeepClient,\n\tcontainerUUID string) (*ContainerRunner, error) {\n\n\tcr := &ContainerRunner{\n\t\tdispatcherClient:     dispatcherClient,\n\t\tDispatcherArvClient:  dispatcherArvClient,\n\t\tDispatcherKeepClient: dispatcherKeepClient,\n\t}\n\tcr.RunArvMount = cr.ArvMountCmd\n\tcr.MkTempDir = ioutil.TempDir\n\tcr.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {\n\t\tcl, err := arvadosclient.MakeArvadosClient()\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tcl.Retries = 10\n\t\tcl.ApiToken = token\n\t\tkc, err := keepclient.MakeKeepClient(cl)\n\t\tif err != nil {\n\t\t\treturn nil, nil, nil, err\n\t\t}\n\t\tkc.Retries = 10\n\t\tc2 := arvados.NewClientFromEnv()\n\t\tc2.AuthToken = token\n\t\treturn cl, kc, c2, nil\n\t}\n\tvar err error\n\tcr.LogCollection, err = (&arvados.Collection{}).FileSystem(cr.dispatcherClient, cr.DispatcherKeepClient)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcr.Container.UUID = containerUUID\n\tf, err := cr.openLogFile(\"crunch-run\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcr.CrunchLog = newLogWriter(newTimestamper(io.MultiWriter(f, newStringPrefixer(os.Stderr, cr.Container.UUID+\" \"))))\n\n\tgo cr.updateLogs()\n\n\treturn cr, nil\n}\n\nfunc (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tlog := log.New(stderr, \"\", 0)\n\tflags := flag.NewFlagSet(prog, flag.ContinueOnError)\n\tstatInterval := flags.Duration(\"crunchstat-interval\", 10*time.Second, \"sampling period for periodic resource usage reporting\")\n\tflags.String(\"cgroup-root\", \"/sys/fs/cgroup\", \"path to sysfs cgroup tree (obsolete, ignored)\")\n\tflags.String(\"cgroup-parent\", \"docker\", \"name of container's parent cgroup (obsolete, ignored)\")\n\tcgroupParentSubsystem := flags.String(\"cgroup-parent-subsystem\", \"\", \"use current cgroup for given `subsystem` as parent cgroup for container (subsystem argument is only relevant for cgroups v1; in cgroups v2 / unified mode, any non-empty value means use current cgroup); if empty, use the docker daemon's default cgroup parent. See https://doc.arvados.org/install/crunch2-slurm/install-dispatch.html#CrunchRunCommand-cgroups\")\n\tcaCertsPath := flags.String(\"ca-certs\", \"\", \"Path to TLS root certificates\")\n\tdetach := flags.Bool(\"detach\", false, \"Detach from parent process and run in the background\")\n\tstdinConfig := flags.Bool(\"stdin-config\", false, \"Load config and environment variables from JSON message on stdin\")\n\tstdinPrices := flags.Bool(\"stdin-prices\", false, \"Load instance prices from stdin and notify existing crunch-run processes (only applies in -list mode)\")\n\tconfigFile := flags.String(\"config\", arvados.DefaultConfigFile, \"filename of cluster config file to try loading if -stdin-config=false (default is $ARVADOS_CONFIG)\")\n\tsleep := flags.Duration(\"sleep\", 0, \"Delay before starting (testing use only)\")\n\tkill := flags.Int(\"kill\", -1, \"Send signal to an existing crunch-run process for given UUID\")\n\tlist := flags.Bool(\"list\", false, \"List UUIDs of existing crunch-run processes\")\n\tenableMemoryLimit := flags.Bool(\"enable-memory-limit\", true, \"tell container runtime to limit container's memory usage\")\n\tenableNetwork := flags.String(\"container-enable-networking\", \"default\", \"enable networking \\\"always\\\" (for all containers) or \\\"default\\\" (for containers that request it)\")\n\tnetworkMode := flags.String(\"container-network-mode\", \"default\", `Docker network mode for container (use any argument valid for docker --net)`)\n\tmemprofile := flags.String(\"memprofile\", \"\", \"write memory profile to `file` after running container\")\n\truntimeEngine := flags.String(\"runtime-engine\", \"docker\", \"container runtime: docker or singularity\")\n\tbrokenNodeHook := flags.String(\"broken-node-hook\", \"\", \"script to run if node is detected to be broken (for example, Docker daemon is not running)\")\n\tflags.Duration(\"check-containerd\", 0, \"Ignored. Exists for compatibility with older versions.\")\n\tversion := flags.Bool(\"version\", false, \"Write version information to stdout and exit 0.\")\n\n\tignoreDetachFlag := false\n\tif len(args) > 0 && args[0] == \"-no-detach\" {\n\t\t// This process was invoked by a parent process, which\n\t\t// has passed along its own arguments, including\n\t\t// -detach, after the leading -no-detach flag.  Strip\n\t\t// the leading -no-detach flag (it's not recognized by\n\t\t// flags.Parse()) and ignore the -detach flag that\n\t\t// comes later.\n\t\targs = args[1:]\n\t\tignoreDetachFlag = true\n\t}\n\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"container-uuid\", stderr); !ok {\n\t\treturn code\n\t} else if *version {\n\t\tfmt.Fprintln(stdout, prog, cmd.Version.String())\n\t\treturn 0\n\t} else if !*list && flags.NArg() != 1 {\n\t\tfmt.Fprintf(stderr, \"missing required argument: container-uuid (try -help)\\n\")\n\t\treturn 2\n\t}\n\n\tcontainerUUID := flags.Arg(0)\n\n\tswitch {\n\tcase *detach && !ignoreDetachFlag:\n\t\treturn Detach(containerUUID, prog, args, stdin, stdout, stderr)\n\tcase *kill >= 0:\n\t\treturn KillProcess(containerUUID, syscall.Signal(*kill), stdout, stderr)\n\tcase *list:\n\t\tif !*stdinPrices {\n\t\t\tstdin = nil\n\t\t}\n\t\treturn ListProcesses(stdin, stdout, stderr)\n\t}\n\n\tif len(containerUUID) != 27 {\n\t\tlog.Printf(\"usage: %s [options] UUID\", prog)\n\t\treturn 1\n\t}\n\n\tvar keepstoreLogbuf bufThenWrite\n\tvar conf ConfigData\n\tif *stdinConfig {\n\t\terr := json.NewDecoder(stdin).Decode(&conf)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"decode stdin: %s\", err)\n\t\t\treturn 1\n\t\t}\n\t\tfor k, v := range conf.Env {\n\t\t\terr = os.Setenv(k, v)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"setenv(%q): %s\", k, err)\n\t\t\t\treturn 1\n\t\t\t}\n\t\t}\n\t\tif conf.Cluster != nil {\n\t\t\t// ClusterID is missing from the JSON\n\t\t\t// representation, but we need it to generate\n\t\t\t// a valid config file for keepstore, so we\n\t\t\t// fill it using the container UUID prefix.\n\t\t\tconf.Cluster.ClusterID = containerUUID[:5]\n\t\t}\n\t} else {\n\t\tconf = hpcConfData(containerUUID, *configFile, io.MultiWriter(&keepstoreLogbuf, stderr))\n\t}\n\n\tlog.Printf(\"crunch-run %s started\", cmd.Version.String())\n\ttime.Sleep(*sleep)\n\n\tif *caCertsPath != \"\" {\n\t\tos.Setenv(\"SSL_CERT_FILE\", *caCertsPath)\n\t}\n\n\tkeepstore, err := startLocalKeepstore(conf, io.MultiWriter(&keepstoreLogbuf, stderr))\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn 1\n\t}\n\tif keepstore != nil {\n\t\tdefer keepstore.Process.Kill()\n\t}\n\n\tapi, err := arvadosclient.MakeArvadosClient()\n\tif err != nil {\n\t\tlog.Printf(\"%s: %v\", containerUUID, err)\n\t\treturn 1\n\t}\n\t// arvadosclient now interprets Retries=10 to mean\n\t// Timeout=10m, retrying with exponential backoff + jitter.\n\tapi.Retries = 10\n\n\tkc, err := keepclient.MakeKeepClient(api)\n\tif err != nil {\n\t\tlog.Printf(\"%s: %v\", containerUUID, err)\n\t\treturn 1\n\t}\n\tkc.Retries = 10\n\n\tcr, err := NewContainerRunner(arvados.NewClientFromEnv(), api, kc, containerUUID)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn 1\n\t}\n\n\tcr.keepstore = keepstore\n\tif keepstore == nil {\n\t\t// Log explanation (if any) for why we're not running\n\t\t// a local keepstore.\n\t\tvar buf bytes.Buffer\n\t\tkeepstoreLogbuf.SetWriter(&buf)\n\t\tif buf.Len() > 0 {\n\t\t\tcr.CrunchLog.Printf(\"%s\", strings.TrimSpace(buf.String()))\n\t\t}\n\t} else if logWhat := conf.Cluster.Containers.LocalKeepLogsToContainerLog; logWhat == \"none\" {\n\t\tcr.CrunchLog.Printf(\"using local keepstore process (pid %d) at %s\", keepstore.Process.Pid, os.Getenv(\"ARVADOS_KEEP_SERVICES\"))\n\t\tkeepstoreLogbuf.SetWriter(io.Discard)\n\t} else {\n\t\tcr.CrunchLog.Printf(\"using local keepstore process (pid %d) at %s, writing logs to keepstore.txt in log collection\", keepstore.Process.Pid, os.Getenv(\"ARVADOS_KEEP_SERVICES\"))\n\t\tcr.keepstoreLogger, err = cr.openLogFile(\"keepstore\")\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\treturn 1\n\t\t}\n\n\t\tvar writer io.WriteCloser = cr.keepstoreLogger\n\t\tif logWhat == \"errors\" {\n\t\t\twriter = &filterKeepstoreErrorsOnly{WriteCloser: writer}\n\t\t} else if logWhat != \"all\" {\n\t\t\t// should have been caught earlier by\n\t\t\t// dispatcher's config loader\n\t\t\tlog.Printf(\"invalid value for Containers.LocalKeepLogsToContainerLog: %q\", logWhat)\n\t\t\treturn 1\n\t\t}\n\t\terr = keepstoreLogbuf.SetWriter(writer)\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\treturn 1\n\t\t}\n\t\tcr.keepstoreLogbuf = &keepstoreLogbuf\n\t}\n\n\tswitch *runtimeEngine {\n\tcase \"docker\":\n\t\tcr.executor, err = newDockerExecutor(containerUUID, cr.CrunchLog.Printf, cr.containerWatchdogInterval)\n\tcase \"singularity\":\n\t\tcr.executor, err = newSingularityExecutor(cr.CrunchLog.Printf)\n\tdefault:\n\t\tcr.CrunchLog.Printf(\"%s: unsupported RuntimeEngine %q\", containerUUID, *runtimeEngine)\n\t\treturn 1\n\t}\n\tif err != nil {\n\t\tcr.CrunchLog.Printf(\"%s: %v\", containerUUID, err)\n\t\tcr.checkBrokenNode(err)\n\t\treturn 1\n\t}\n\tdefer cr.executor.Close()\n\n\tcr.brokenNodeHook = *brokenNodeHook\n\n\tgwAuthSecret := os.Getenv(\"GatewayAuthSecret\")\n\tos.Unsetenv(\"GatewayAuthSecret\")\n\tif gwAuthSecret == \"\" {\n\t\t// not safe to run a gateway service without an auth\n\t\t// secret\n\t\tcr.CrunchLog.Printf(\"Not starting a gateway server (GatewayAuthSecret was not provided by dispatcher)\")\n\t} else {\n\t\tgwListen := os.Getenv(\"GatewayAddress\")\n\t\tcr.gateway = Gateway{\n\t\t\tAddress:       gwListen,\n\t\t\tAuthSecret:    gwAuthSecret,\n\t\t\tContainerUUID: containerUUID,\n\t\t\tTarget:        cr.executor,\n\t\t\tLog:           cr.CrunchLog,\n\t\t\tLogCollection: cr.LogCollection,\n\t\t}\n\t\tif gwListen == \"\" {\n\t\t\t// Direct connection won't work, so we use the\n\t\t\t// gateway_address field to indicate the\n\t\t\t// internalURL of the controller process that\n\t\t\t// has the current tunnel connection.\n\t\t\tcr.gateway.ArvadosClient = cr.dispatcherClient\n\t\t\tcr.gateway.UpdateTunnelURL = func(url string) {\n\t\t\t\tcr.gateway.Address = \"tunnel \" + url\n\t\t\t\tcr.DispatcherArvClient.Update(\"containers\", containerUUID,\n\t\t\t\t\tarvadosclient.Dict{\n\t\t\t\t\t\t\"select\":    []string{\"uuid\"},\n\t\t\t\t\t\t\"container\": arvadosclient.Dict{\"gateway_address\": cr.gateway.Address},\n\t\t\t\t\t}, nil)\n\t\t\t}\n\t\t}\n\t\terr = cr.gateway.Start()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error starting gateway server: %s\", err)\n\t\t\treturn 1\n\t\t}\n\t}\n\n\tparentTemp, tmperr := cr.MkTempDir(\"\", \"crunch-run.\"+containerUUID+\".\")\n\tif tmperr != nil {\n\t\tlog.Printf(\"%s: %v\", containerUUID, tmperr)\n\t\treturn 1\n\t}\n\n\tcr.parentTemp = parentTemp\n\tcr.statInterval = *statInterval\n\tcr.enableMemoryLimit = *enableMemoryLimit\n\tcr.enableNetwork = *enableNetwork\n\tcr.networkMode = *networkMode\n\tif *cgroupParentSubsystem != \"\" {\n\t\tp, err := findCgroup(os.DirFS(\"/\"), *cgroupParentSubsystem)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"fatal: cgroup parent subsystem: %s\", err)\n\t\t\treturn 1\n\t\t}\n\t\tcr.setCgroupParent = p\n\t}\n\n\tif conf.EC2SpotCheck {\n\t\tgo cr.checkSpotInterruptionNotices()\n\t}\n\n\trunerr := cr.Run()\n\n\tif *memprofile != \"\" {\n\t\tf, err := os.Create(*memprofile)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"could not create memory profile: %s\", err)\n\t\t}\n\t\truntime.GC() // get up-to-date statistics\n\t\tif err := pprof.WriteHeapProfile(f); err != nil {\n\t\t\tlog.Printf(\"could not write memory profile: %s\", err)\n\t\t}\n\t\tcloseerr := f.Close()\n\t\tif closeerr != nil {\n\t\t\tlog.Printf(\"closing memprofile file: %s\", err)\n\t\t}\n\t}\n\n\tif runerr != nil {\n\t\tlog.Printf(\"%s: %v\", containerUUID, runerr)\n\t\treturn 1\n\t}\n\treturn 0\n}\n\n// Try to load ConfigData in hpc (slurm/lsf) environment. This means\n// loading the cluster config from the specified file and (if that\n// works) getting the runtime_constraints container field from\n// controller to determine # VCPUs so we can calculate KeepBuffers.\nfunc hpcConfData(uuid string, configFile string, stderr io.Writer) ConfigData {\n\tvar conf ConfigData\n\tconf.Cluster = loadClusterConfigFile(configFile, stderr)\n\tif conf.Cluster == nil {\n\t\t// skip loading the container record -- we won't be\n\t\t// able to start local keepstore anyway.\n\t\treturn conf\n\t}\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tif err != nil {\n\t\tfmt.Fprintf(stderr, \"error setting up arvadosclient: %s\\n\", err)\n\t\treturn conf\n\t}\n\t// arvadosclient now interprets Retries=10 to mean\n\t// Timeout=10m, retrying with exponential backoff + jitter.\n\tarv.Retries = 10\n\tvar ctr arvados.Container\n\terr = arv.Call(\"GET\", \"containers\", uuid, \"\", arvadosclient.Dict{\"select\": []string{\"runtime_constraints\"}}, &ctr)\n\tif err != nil {\n\t\tfmt.Fprintf(stderr, \"error getting container record: %s\\n\", err)\n\t\treturn conf\n\t}\n\tif ctr.RuntimeConstraints.VCPUs > 0 {\n\t\tconf.KeepBuffers = ctr.RuntimeConstraints.VCPUs * conf.Cluster.Containers.LocalKeepBlobBuffersPerVCPU\n\t}\n\treturn conf\n}\n\n// Load cluster config file from given path. If an error occurs, log\n// the error to stderr and return nil.\nfunc loadClusterConfigFile(path string, stderr io.Writer) *arvados.Cluster {\n\tldr := config.NewLoader(&bytes.Buffer{}, ctxlog.New(stderr, \"plain\", \"info\"))\n\tldr.Path = path\n\tcfg, err := ldr.Load()\n\tif err != nil {\n\t\tfmt.Fprintf(stderr, \"could not load config file %s: %s\\n\", path, err)\n\t\treturn nil\n\t}\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\tfmt.Fprintf(stderr, \"could not use config file %s: %s\\n\", path, err)\n\t\treturn nil\n\t}\n\tfmt.Fprintf(stderr, \"loaded config file %s\\n\", path)\n\treturn cluster\n}\n\nfunc startLocalKeepstore(configData ConfigData, logbuf io.Writer) (*exec.Cmd, error) {\n\tif configData.KeepBuffers < 1 {\n\t\tfmt.Fprintf(logbuf, \"not starting a local keepstore process because KeepBuffers=%v in config\\n\", configData.KeepBuffers)\n\t\treturn nil, nil\n\t}\n\tif configData.Cluster == nil {\n\t\tfmt.Fprint(logbuf, \"not starting a local keepstore process because cluster config file was not loaded\\n\")\n\t\treturn nil, nil\n\t}\n\tfor uuid, vol := range configData.Cluster.Volumes {\n\t\tif len(vol.AccessViaHosts) > 0 {\n\t\t\tfmt.Fprintf(logbuf, \"not starting a local keepstore process because a volume (%s) uses AccessViaHosts\\n\", uuid)\n\t\t\treturn nil, nil\n\t\t}\n\t\tif !vol.ReadOnly && vol.Replication < configData.Cluster.Collections.DefaultReplication {\n\t\t\tfmt.Fprintf(logbuf, \"not starting a local keepstore process because a writable volume (%s) has replication less than Collections.DefaultReplication (%d < %d)\\n\", uuid, vol.Replication, configData.Cluster.Collections.DefaultReplication)\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\t// Rather than have an alternate way to tell keepstore how\n\t// many buffers to use, etc., when starting it this way, we\n\t// just modify the cluster configuration that we feed it on\n\t// stdin.\n\tccfg := *configData.Cluster\n\tccfg.API.MaxKeepBlobBuffers = configData.KeepBuffers\n\tccfg.Collections.BlobTrash = false\n\tccfg.Collections.BlobTrashConcurrency = 0\n\tccfg.Collections.BlobDeleteConcurrency = 0\n\n\taddrs, err := processIPs(os.Getpid())\n\tif err != nil {\n\t\tfmt.Fprintf(logbuf, \"warning: could not get list of host IP addresses: %v\\n\", err)\n\t\t// ...and proceed with zero IPs\n\t}\n\tlocaladdr := localKeepstoreAddr(addrs)\n\tln, err := net.Listen(\"tcp\", net.JoinHostPort(localaddr, \"0\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, port, err := net.SplitHostPort(ln.Addr().String())\n\tif err != nil {\n\t\tln.Close()\n\t\treturn nil, err\n\t}\n\tln.Close()\n\turl := \"http://\" + net.JoinHostPort(localaddr, port)\n\n\tfmt.Fprintf(logbuf, \"starting keepstore on %s\\n\", url)\n\n\tvar confJSON bytes.Buffer\n\terr = json.NewEncoder(&confJSON).Encode(arvados.Config{\n\t\tClusters: map[string]arvados.Cluster{\n\t\t\tccfg.ClusterID: ccfg,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmd := exec.Command(\"/proc/self/exe\", \"keepstore\", \"-config=-\")\n\tif target, err := os.Readlink(cmd.Path); err == nil && strings.HasSuffix(target, \".test\") {\n\t\t// If we're a 'go test' process, running\n\t\t// /proc/self/exe would start the test suite in a\n\t\t// child process, which is not what we want.\n\t\tcmd.Path, _ = exec.LookPath(\"go\")\n\t\tcmd.Args = append([]string{\"go\", \"run\", \"../../cmd/arvados-server\"}, cmd.Args[1:]...)\n\t\tcmd.Env = os.Environ()\n\t}\n\tcmd.Stdin = &confJSON\n\tcmd.Stdout = logbuf\n\tcmd.Stderr = logbuf\n\tcmd.Env = append(cmd.Env,\n\t\t\"GOGC=10\",\n\t\t\"ARVADOS_SERVICE_INTERNAL_URL=\"+url)\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error starting keepstore process: %w\", err)\n\t}\n\tcmdExited := false\n\tgo func() {\n\t\tcmd.Wait()\n\t\tcmdExited = true\n\t}()\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*10))\n\tdefer cancel()\n\tpoll := time.NewTicker(time.Second / 10)\n\tdefer poll.Stop()\n\tclient := http.Client{}\n\tfor range poll.C {\n\t\ttestReq, err := http.NewRequestWithContext(ctx, \"GET\", url+\"/_health/ping\", nil)\n\t\ttestReq.Header.Set(\"Authorization\", \"Bearer \"+configData.Cluster.ManagementToken)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresp, err := client.Do(testReq)\n\t\tif err == nil {\n\t\t\tresp.Body.Close()\n\t\t\tif resp.StatusCode == http.StatusOK {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif cmdExited {\n\t\t\treturn nil, fmt.Errorf(\"keepstore child process exited\")\n\t\t}\n\t\tif ctx.Err() != nil {\n\t\t\treturn nil, fmt.Errorf(\"timed out waiting for new keepstore process to report healthy\")\n\t\t}\n\t}\n\tos.Setenv(\"ARVADOS_KEEP_SERVICES\", url)\n\treturn cmd, nil\n}\n\n// return current uid, gid, groups in a format suitable for logging:\n// \"crunch-run process has uid=1234(arvados) gid=1234(arvados)\n// groups=1234(arvados),114(fuse)\"\nfunc currentUserAndGroups() string {\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"error getting current user ID: %s\", err)\n\t}\n\ts := fmt.Sprintf(\"crunch-run process has uid=%s(%s) gid=%s\", u.Uid, u.Username, u.Gid)\n\tif g, err := user.LookupGroupId(u.Gid); err == nil {\n\t\ts += fmt.Sprintf(\"(%s)\", g.Name)\n\t}\n\ts += \" groups=\"\n\tif gids, err := u.GroupIds(); err == nil {\n\t\tfor i, gid := range gids {\n\t\t\tif i > 0 {\n\t\t\t\ts += \",\"\n\t\t\t}\n\t\t\ts += gid\n\t\t\tif g, err := user.LookupGroupId(gid); err == nil {\n\t\t\t\ts += fmt.Sprintf(\"(%s)\", g.Name)\n\t\t\t}\n\t\t}\n\t}\n\treturn s\n}\n\n// Return a suitable local interface address for a local keepstore\n// service. Currently this is the numerically lowest non-loopback ipv4\n// address assigned to a local interface that is not in any of the\n// link-local/vpn/loopback ranges 169.254/16, 100.64/10, or 127/8.\nfunc localKeepstoreAddr(addrs map[string]bool) string {\n\tvar ips []net.IP\n\tfor addr := range addrs {\n\t\tip := net.ParseIP(addr)\n\t\tif ip == nil {\n\t\t\t// invalid\n\t\t\tcontinue\n\t\t}\n\t\tif ip.Mask(net.CIDRMask(8, 32)).Equal(net.IPv4(127, 0, 0, 0)) ||\n\t\t\tip.Mask(net.CIDRMask(16, 32)).Equal(net.IPv4(169, 254, 0, 0)) {\n\t\t\t// unsuitable\n\t\t\tcontinue\n\t\t}\n\t\tips = append(ips, ip)\n\t}\n\tif len(ips) == 0 {\n\t\treturn \"0.0.0.0\"\n\t}\n\tsort.Slice(ips, func(ii, jj int) bool {\n\t\ti, j := ips[ii], ips[jj]\n\t\tif len(i) != len(j) {\n\t\t\t// Prefer IPv4 over IPv6.\n\t\t\treturn len(i) < len(j)\n\t\t}\n\t\tif icg, jcg := isCGNAT(i), isCGNAT(j); icg != jcg {\n\t\t\t// Prefer non-CGNAT over CGNAT, so that when\n\t\t\t// there is a Tailscale VPN address as well as\n\t\t\t// a routable address, we choose the routable\n\t\t\t// address.\n\t\t\treturn icg == false\n\t\t}\n\t\tfor x := range i {\n\t\t\tif i[x] != j[x] {\n\t\t\t\t// Prefer lower IP number, just to\n\t\t\t\t// make the result predictable.\n\t\t\t\treturn i[x] < j[x]\n\t\t\t}\n\t\t}\n\t\treturn false\n\t})\n\treturn ips[0].String()\n}\n\nfunc isCGNAT(ip net.IP) bool {\n\treturn ip.Mask(net.CIDRMask(10, 32)).Equal(net.IPv4(100, 64, 0, 0))\n}\n\nfunc (cr *ContainerRunner) loadPrices() {\n\tbuf, err := os.ReadFile(filepath.Join(lockdir, pricesfile))\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\tcr.CrunchLog.Printf(\"loadPrices: read: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\tvar prices []cloud.InstancePrice\n\terr = json.Unmarshal(buf, &prices)\n\tif err != nil {\n\t\tcr.CrunchLog.Printf(\"loadPrices: decode: %s\", err)\n\t\treturn\n\t}\n\tcr.pricesLock.Lock()\n\tdefer cr.pricesLock.Unlock()\n\tvar lastKnown time.Time\n\tif len(cr.prices) > 0 {\n\t\tlastKnown = cr.prices[0].StartTime\n\t}\n\tcr.prices = cloud.NormalizePriceHistory(append(prices, cr.prices...))\n\tfor i := len(cr.prices) - 1; i >= 0; i-- {\n\t\tprice := cr.prices[i]\n\t\tif price.StartTime.After(lastKnown) {\n\t\t\tcr.CrunchLog.Printf(\"Instance price changed to %#.3g at %s\", price.Price, price.StartTime.UTC())\n\t\t}\n\t}\n}\n\nfunc (cr *ContainerRunner) calculateCost(now time.Time) float64 {\n\tcr.pricesLock.Lock()\n\tdefer cr.pricesLock.Unlock()\n\n\t// First, make a \"prices\" slice with the real data as far back\n\t// as it goes, and (if needed) a \"since the beginning of time\"\n\t// placeholder containing a reasonable guess about what the\n\t// price was between cr.costStartTime and the earliest real\n\t// data point.\n\tprices := cr.prices\n\tif len(prices) == 0 {\n\t\t// use price info in InstanceType record initially\n\t\t// provided by cloud dispatcher\n\t\tvar p float64\n\t\tvar it arvados.InstanceType\n\t\tif j := os.Getenv(\"InstanceType\"); j != \"\" && json.Unmarshal([]byte(j), &it) == nil && it.Price > 0 {\n\t\t\tp = it.Price\n\t\t}\n\t\tprices = []cloud.InstancePrice{{Price: p}}\n\t} else if prices[len(prices)-1].StartTime.After(cr.costStartTime) {\n\t\t// guess earlier pricing was the same as the earliest\n\t\t// price we know about\n\t\tfiller := prices[len(prices)-1]\n\t\tfiller.StartTime = time.Time{}\n\t\tprices = append(prices, filler)\n\t}\n\n\t// Now that our history of price changes goes back at least as\n\t// far as cr.costStartTime, add up the costs for each\n\t// interval.\n\tcost := 0.0\n\tspanEnd := now\n\tfor _, ip := range prices {\n\t\tspanStart := ip.StartTime\n\t\tif spanStart.After(now) {\n\t\t\t// pricing information from the future -- not\n\t\t\t// expected from AWS, but possible in\n\t\t\t// principle, and exercised by tests.\n\t\t\tcontinue\n\t\t}\n\t\tlast := false\n\t\tif spanStart.Before(cr.costStartTime) {\n\t\t\tspanStart = cr.costStartTime\n\t\t\tlast = true\n\t\t}\n\t\tcost += ip.Price * spanEnd.Sub(spanStart).Seconds() / 3600\n\t\tif last {\n\t\t\tbreak\n\t\t}\n\t\tspanEnd = spanStart\n\t}\n\n\treturn cost\n}\n\nfunc (runner *ContainerRunner) handleSIGUSR2(sigchan chan os.Signal) {\n\tfor range sigchan {\n\t\trunner.loadPrices()\n\t\tupdate := arvadosclient.Dict{\n\t\t\t\"select\": []string{\"uuid\"},\n\t\t\t\"container\": arvadosclient.Dict{\n\t\t\t\t\"cost\": runner.calculateCost(time.Now()),\n\t\t\t},\n\t\t}\n\t\trunner.DispatcherArvClient.Update(\"containers\", runner.Container.UUID, update, nil)\n\t}\n}\n"
  },
  {
    "path": "lib/crunchrun/crunchrun_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/md5\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path\"\n\t\"regexp\"\n\t\"runtime/pprof\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\n\t. \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc TestCrunchExec(t *testing.T) {\n\tTestingT(t)\n}\n\nfunc init() {\n\tarvadostest.StartKeep(2, true)\n}\n\nconst logLineStart = `(?m)(.*\\n)*\\d{4}-\\d\\d-\\d\\dT\\d\\d:\\d\\d:\\d\\d\\.\\d+Z `\n\nvar _ = Suite(&TestSuite{})\n\ntype TestSuite struct {\n\tclient                   *arvados.Client\n\tapi                      *ArvTestClient\n\trunner                   *ContainerRunner\n\texecutor                 *stubExecutor\n\tkeepmount                string\n\tkeepmountTmp             []string\n\ttestDispatcherKeepClient KeepTestClient\n\ttestContainerKeepClient  KeepTestClient\n\tdebian12MemoryCurrent    int64\n\tdebian12SwapCurrent      int64\n}\n\nfunc (s *TestSuite) SetUpSuite(c *C) {\n\tbuf, err := os.ReadFile(\"../crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.current\")\n\tc.Assert(err, IsNil)\n\t_, err = fmt.Sscanf(string(buf), \"%d\", &s.debian12MemoryCurrent)\n\tc.Assert(err, IsNil)\n\n\tbuf, err = os.ReadFile(\"../crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.swap.current\")\n\tc.Assert(err, IsNil)\n\t_, err = fmt.Sscanf(string(buf), \"%d\", &s.debian12SwapCurrent)\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *TestSuite) SetUpTest(c *C) {\n\ts.client = arvados.NewClientFromEnv()\n\ts.executor = &stubExecutor{}\n\tvar err error\n\ts.api = &ArvTestClient{}\n\ts.runner, err = NewContainerRunner(s.client, s.api, &s.testDispatcherKeepClient, \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\")\n\tc.Assert(err, IsNil)\n\ts.runner.executor = s.executor\n\ts.runner.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {\n\t\treturn s.api, &s.testContainerKeepClient, s.client, nil\n\t}\n\ts.runner.RunArvMount = func(cmd []string, tok string) (*exec.Cmd, error) {\n\t\ts.runner.ArvMountPoint = s.keepmount\n\t\tfor i, opt := range cmd {\n\t\t\tif opt == \"--mount-tmp\" {\n\t\t\t\terr := os.Mkdir(s.keepmount+\"/\"+cmd[i+1], 0700)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\ts.keepmountTmp = append(s.keepmountTmp, cmd[i+1])\n\t\t\t}\n\t\t}\n\t\treturn nil, nil\n\t}\n\ts.keepmount = c.MkDir()\n\terr = os.Mkdir(s.keepmount+\"/by_id\", 0755)\n\ts.keepmountTmp = nil\n\tc.Assert(err, IsNil)\n\terr = os.Mkdir(s.keepmount+\"/by_id/\"+arvadostest.DockerImage112PDH, 0755)\n\tc.Assert(err, IsNil)\n\terr = ioutil.WriteFile(s.keepmount+\"/by_id/\"+arvadostest.DockerImage112PDH+\"/\"+arvadostest.DockerImage112Filename, []byte(\"#notarealtarball\"), 0644)\n\terr = os.Mkdir(s.keepmount+\"/by_id/\"+fakeInputCollectionPDH, 0755)\n\tc.Assert(err, IsNil)\n\terr = ioutil.WriteFile(s.keepmount+\"/by_id/\"+fakeInputCollectionPDH+\"/input.json\", []byte(`{\"input\":true}`), 0644)\n\tc.Assert(err, IsNil)\n\ts.runner.ArvMountPoint = s.keepmount\n\tos.Setenv(\"InstanceType\", `{\"ProviderType\":\"a1.2xlarge\",\"Price\":1.2}`)\n}\n\ntype ArvTestClient struct {\n\tTotal   int64\n\tCalls   int\n\tContent []arvadosclient.Dict\n\tarvados.Container\n\tsecretMounts []byte\n\tsync.Mutex\n\tWasSetRunning bool\n\tcallraw       bool\n}\n\ntype KeepTestClient struct {\n\tCalled         bool\n\tStorageClasses []string\n\tblocks         sync.Map\n}\n\ntype stubExecutor struct {\n\timageLoaded bool\n\tloaded      string\n\tloadErr     error\n\texitCode    int\n\tcreateErr   error\n\tcreated     containerSpec\n\tstartErr    error\n\twaitSleep   time.Duration\n\twaitErr     error\n\tstopErr     error\n\tstopped     bool\n\tclosed      bool\n\trunFunc     func() int\n\texit        chan int\n}\n\nfunc (e *stubExecutor) LoadImage(imageId string, tarball string, container arvados.Container, keepMount string,\n\tcontainerClient *arvados.Client) error {\n\te.loaded = tarball\n\treturn e.loadErr\n}\nfunc (e *stubExecutor) Runtime() string                 { return \"stub\" }\nfunc (e *stubExecutor) Version() string                 { return \"stub \" + cmd.Version.String() }\nfunc (e *stubExecutor) Create(spec containerSpec) error { e.created = spec; return e.createErr }\nfunc (e *stubExecutor) Start() error {\n\te.exit = make(chan int, 1)\n\tgo func() { e.exit <- e.runFunc() }()\n\treturn e.startErr\n}\nfunc (e *stubExecutor) Pid() int    { return 1115883 } // matches pid in ../crunchstat/testdata/debian12/proc/\nfunc (e *stubExecutor) Stop() error { e.stopped = true; go func() { e.exit <- -1 }(); return e.stopErr }\nfunc (e *stubExecutor) Close()      { e.closed = true }\nfunc (e *stubExecutor) Wait(context.Context) (int, error) {\n\treturn <-e.exit, e.waitErr\n}\nfunc (e *stubExecutor) InjectCommand(ctx context.Context, _, _ string, _ bool, _ []string) (*exec.Cmd, error) {\n\treturn nil, errors.New(\"unimplemented\")\n}\nfunc (e *stubExecutor) IPAddress() (string, error) { return \"\", errors.New(\"unimplemented\") }\n\nconst fakeInputCollectionPDH = \"ffffffffaaaaaaaa88888888eeeeeeee+1234\"\n\nvar hwManifest = \". 82ab40c24fc8df01798e57ba66795bb1+841216+Aa124ac75e5168396c73c0a18eda641a4f41791c0@569fa8c3 0:841216:9c31ee32b3d15268a0754e8edc74d4f815ee014b693bc5109058e431dd5caea7.tar\\n\"\nvar hwPDH = \"a45557269dcb65a6b78f9ac061c0850b+120\"\nvar hwImageID = \"9c31ee32b3d15268a0754e8edc74d4f815ee014b693bc5109058e431dd5caea7\"\n\nvar otherManifest = \". 68a84f561b1d1708c6baff5e019a9ab3+46+Ae5d0af96944a3690becb1decdf60cc1c937f556d@5693216f 0:46:md5sum.txt\\n\"\nvar otherPDH = \"a3e8f74c6f101eae01fa08bfb4e49b3a+54\"\n\nvar normalizedManifestWithSubdirs = `. 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 0:9:file1_in_main.txt 9:18:file2_in_main.txt 0:27:zzzzz-8i9sb-bcdefghijkdhvnk.log.txt\n./subdir1 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt\n./subdir1/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt\n`\n\nvar normalizedWithSubdirsPDH = \"a0def87f80dd594d4675809e83bd4f15+367\"\n\nvar denormalizedManifestWithSubdirs = \". 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 0:9:file1_in_main.txt 9:18:file2_in_main.txt 0:27:zzzzz-8i9sb-bcdefghijkdhvnk.log.txt 0:10:subdir1/file1_in_subdir1.txt 10:17:subdir1/file2_in_subdir1.txt\\n\"\nvar denormalizedWithSubdirsPDH = \"b0def87f80dd594d4675809e83bd4f15+367\"\n\nvar fakeAuthUUID = \"zzzzz-gj3su-55pqoyepgi2glem\"\nvar fakeAuthToken = \"a3ltuwzqcu2u4sc0q7yhpc2w7s00fdcqecg5d6e0u3pfohmbjt\"\n\nfunc (client *ArvTestClient) Create(resourceType string,\n\tparameters arvadosclient.Dict,\n\toutput interface{}) error {\n\n\tclient.Mutex.Lock()\n\tdefer client.Mutex.Unlock()\n\n\tclient.Calls++\n\tclient.Content = append(client.Content, parameters)\n\n\tif resourceType == \"logs\" {\n\t\tpanic(\"logs.create called\")\n\t}\n\n\tif resourceType == \"collections\" && output != nil {\n\t\tmt := parameters[\"collection\"].(arvadosclient.Dict)[\"manifest_text\"].(string)\n\t\tmd5sum := md5.Sum([]byte(mt))\n\t\toutmap := output.(*arvados.Collection)\n\t\toutmap.PortableDataHash = fmt.Sprintf(\"%x+%d\", md5sum, len(mt))\n\t\toutmap.UUID = fmt.Sprintf(\"zzzzz-4zz18-%015x\", md5sum[:7])\n\t}\n\n\treturn nil\n}\n\nfunc (client *ArvTestClient) Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error {\n\tswitch {\n\tcase method == \"GET\" && resourceType == \"containers\" && action == \"auth\":\n\t\treturn json.Unmarshal([]byte(`{\n\t\t\t\"kind\": \"arvados#api_client_authorization\",\n\t\t\t\"uuid\": \"`+fakeAuthUUID+`\",\n\t\t\t\"api_token\": \"`+fakeAuthToken+`\"\n\t\t\t}`), output)\n\tcase method == \"GET\" && resourceType == \"containers\" && action == \"secret_mounts\":\n\t\tif client.secretMounts != nil {\n\t\t\treturn json.Unmarshal(client.secretMounts, output)\n\t\t}\n\t\treturn json.Unmarshal([]byte(`{\"secret_mounts\":{}}`), output)\n\tdefault:\n\t\treturn fmt.Errorf(\"Not found\")\n\t}\n}\n\nfunc (client *ArvTestClient) CallRaw(method, resourceType, uuid, action string,\n\tparameters arvadosclient.Dict) (reader io.ReadCloser, err error) {\n\tvar j []byte\n\tif method == \"GET\" && resourceType == \"nodes\" && uuid == \"\" && action == \"\" {\n\t\tj = []byte(`{\n\t\t\t\"kind\": \"arvados#nodeList\",\n\t\t\t\"items\": [{\n\t\t\t\t\"uuid\": \"zzzzz-7ekkf-2z3mc76g2q73aio\",\n\t\t\t\t\"hostname\": \"compute2\",\n\t\t\t\t\"properties\": {\"total_cpu_cores\": 16}\n\t\t\t}]}`)\n\t} else if method == \"GET\" && resourceType == \"containers\" && action == \"\" && !client.callraw {\n\t\tif uuid == \"\" {\n\t\t\tj, err = json.Marshal(map[string]interface{}{\n\t\t\t\t\"items\": []interface{}{client.Container},\n\t\t\t\t\"kind\":  \"arvados#nodeList\",\n\t\t\t})\n\t\t} else {\n\t\t\tj, err = json.Marshal(client.Container)\n\t\t}\n\t} else {\n\t\tj = []byte(`{\n\t\t\t\"command\": [\"sleep\", \"1\"],\n\t\t\t\"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n\t\t\t\"cwd\": \".\",\n\t\t\t\"environment\": {},\n\t\t\t\"mounts\": {\"/tmp\": {\"kind\": \"tmp\"}, \"/json\": {\"kind\": \"json\", \"content\": {\"number\": 123456789123456789}}},\n\t\t\t\"output_path\": \"/tmp\",\n\t\t\t\"priority\": 1,\n\t\t\t\"runtime_constraints\": {}\n\t\t}`)\n\t}\n\treturn ioutil.NopCloser(bytes.NewReader(j)), err\n}\n\nfunc (client *ArvTestClient) Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error {\n\tif resourceType == \"collections\" {\n\t\tif uuid == hwPDH {\n\t\t\toutput.(*arvados.Collection).ManifestText = hwManifest\n\t\t} else if uuid == otherPDH {\n\t\t\toutput.(*arvados.Collection).ManifestText = otherManifest\n\t\t} else if uuid == normalizedWithSubdirsPDH {\n\t\t\toutput.(*arvados.Collection).ManifestText = normalizedManifestWithSubdirs\n\t\t} else if uuid == denormalizedWithSubdirsPDH {\n\t\t\toutput.(*arvados.Collection).ManifestText = denormalizedManifestWithSubdirs\n\t\t}\n\t}\n\tif resourceType == \"containers\" {\n\t\t(*output.(*arvados.Container)) = client.Container\n\t}\n\treturn nil\n}\n\nfunc (client *ArvTestClient) Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error) {\n\tclient.Mutex.Lock()\n\tdefer client.Mutex.Unlock()\n\tclient.Calls++\n\tclient.Content = append(client.Content, parameters)\n\tif resourceType == \"containers\" {\n\t\tif parameters[\"container\"].(arvadosclient.Dict)[\"state\"] == \"Running\" {\n\t\t\tclient.WasSetRunning = true\n\t\t}\n\t} else if resourceType == \"collections\" && output != nil {\n\t\tmt := parameters[\"collection\"].(arvadosclient.Dict)[\"manifest_text\"].(string)\n\t\toutput.(*arvados.Collection).UUID = uuid\n\t\toutput.(*arvados.Collection).PortableDataHash = arvados.PortableDataHash(mt)\n\t}\n\treturn nil\n}\n\nvar discoveryMap = map[string]interface{}{\n\t\"crunchLogUpdateSize\":   float64(crunchLogUpdateSize),\n\t\"crunchLogUpdatePeriod\": float64(crunchLogUpdatePeriod.Seconds()),\n}\n\nfunc (client *ArvTestClient) Discovery(key string) (interface{}, error) {\n\treturn discoveryMap[key], nil\n}\n\n// CalledWith returns the parameters from the first API call whose\n// parameters match jpath/string. E.g., CalledWith(c, \"foo.bar\",\n// \"baz\") returns parameters with parameters[\"foo\"][\"bar\"]==\"baz\". If\n// no call matches, it returns nil.\nfunc (client *ArvTestClient) CalledWith(jpath string, expect interface{}) arvadosclient.Dict {\ncall:\n\tfor _, content := range client.Content {\n\t\tvar v interface{} = content\n\t\tfor _, k := range strings.Split(jpath, \".\") {\n\t\t\tif dict, ok := v.(arvadosclient.Dict); !ok {\n\t\t\t\tcontinue call\n\t\t\t} else {\n\t\t\t\tv = dict[k]\n\t\t\t}\n\t\t}\n\t\tif v == expect {\n\t\t\treturn content\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (client *KeepTestClient) LocalLocator(locator string) (string, error) {\n\treturn locator, nil\n}\n\nfunc (client *KeepTestClient) BlockWrite(_ context.Context, opts arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {\n\tlocator := fmt.Sprintf(\"%x+%d\", md5.Sum(opts.Data), len(opts.Data))\n\tclient.blocks.Store(locator, append([]byte(nil), opts.Data...))\n\treturn arvados.BlockWriteResponse{\n\t\tLocator: locator,\n\t}, nil\n}\n\nfunc (client *KeepTestClient) BlockRead(_ context.Context, opts arvados.BlockReadOptions) (int, error) {\n\tloaded, ok := client.blocks.Load(opts.Locator)\n\tif !ok {\n\t\treturn 0, os.ErrNotExist\n\t}\n\tn, err := io.Copy(opts.WriteTo, bytes.NewReader(loaded.([]byte)))\n\treturn int(n), err\n}\n\nfunc (client *KeepTestClient) ReadAt(locator string, dst []byte, offset int) (int, error) {\n\tloaded, ok := client.blocks.Load(locator)\n\tif !ok {\n\t\treturn 0, os.ErrNotExist\n\t}\n\tdata := loaded.([]byte)\n\tif offset >= len(data) {\n\t\treturn 0, io.EOF\n\t}\n\treturn copy(dst, data[offset:]), nil\n}\n\nfunc (client *KeepTestClient) Close() {\n\tclient.blocks.Range(func(locator, value interface{}) bool {\n\t\tclient.blocks.Delete(locator)\n\t\treturn true\n\t})\n}\n\nfunc (client *KeepTestClient) SetStorageClasses(sc []string) {\n\tclient.StorageClasses = sc\n}\n\ntype FileWrapper struct {\n\tio.ReadCloser\n\tlen int64\n}\n\nfunc (fw FileWrapper) Readdir(n int) ([]os.FileInfo, error) {\n\treturn nil, errors.New(\"not implemented\")\n}\n\nfunc (fw FileWrapper) Seek(int64, int) (int64, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc (fw FileWrapper) Size() int64 {\n\treturn fw.len\n}\n\nfunc (fw FileWrapper) Stat() (os.FileInfo, error) {\n\treturn nil, errors.New(\"not implemented\")\n}\n\nfunc (fw FileWrapper) Truncate(int64) error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (fw FileWrapper) Write([]byte) (int, error) {\n\treturn 0, errors.New(\"not implemented\")\n}\n\nfunc (fw FileWrapper) Sync() error {\n\treturn errors.New(\"not implemented\")\n}\n\nfunc (fw FileWrapper) Snapshot() (*arvados.Subtree, error) {\n\treturn nil, errors.New(\"not implemented\")\n}\n\nfunc (fw FileWrapper) Splice(*arvados.Subtree) error {\n\treturn errors.New(\"not implemented\")\n}\n\ntype apiStubServer struct {\n\tserver    *httptest.Server\n\tproxy     *httputil.ReverseProxy\n\tintercept func(http.ResponseWriter, *http.Request) bool\n\n\tcontainer arvados.Container\n}\n\nfunc apiStub() (*arvados.Client, *apiStubServer) {\n\tclient := arvados.NewClientFromEnv()\n\tapistub := &apiStubServer{}\n\tapistub.server = httptest.NewTLSServer(apistub)\n\tapistub.proxy = httputil.NewSingleHostReverseProxy(&url.URL{Scheme: \"https\", Host: client.APIHost})\n\tif client.Insecure {\n\t\tapistub.proxy.Transport = arvados.InsecureHTTPClient.Transport\n\t}\n\tclient.APIHost = apistub.server.Listener.Addr().String()\n\treturn client, apistub\n}\n\nfunc (apistub *apiStubServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif apistub.intercept != nil && apistub.intercept(w, r) {\n\t\treturn\n\t}\n\tif r.Method == \"GET\" && r.URL.Path == \"/arvados/v1/collections/\"+hwPDH {\n\t\tjson.NewEncoder(w).Encode(arvados.Collection{ManifestText: hwManifest})\n\t\treturn\n\t}\n\tif r.Method == \"GET\" && r.URL.Path == \"/arvados/v1/collections/\"+otherPDH {\n\t\tjson.NewEncoder(w).Encode(arvados.Collection{ManifestText: otherManifest})\n\t\treturn\n\t}\n\tif r.Method == \"GET\" && r.URL.Path == \"/arvados/v1/collections/\"+normalizedWithSubdirsPDH {\n\t\tjson.NewEncoder(w).Encode(arvados.Collection{ManifestText: normalizedManifestWithSubdirs})\n\t\treturn\n\t}\n\tif r.Method == \"GET\" && r.URL.Path == \"/arvados/v1/collections/\"+denormalizedWithSubdirsPDH {\n\t\tjson.NewEncoder(w).Encode(arvados.Collection{ManifestText: denormalizedManifestWithSubdirs})\n\t\treturn\n\t}\n\tif r.Method == \"GET\" && r.URL.Path == \"/arvados/v1/containers/\"+apistub.container.UUID {\n\t\tjson.NewEncoder(w).Encode(apistub.container)\n\t\treturn\n\t}\n\tapistub.proxy.ServeHTTP(w, r)\n}\n\nfunc (s *TestSuite) TestLoadImage(c *C) {\n\ts.runner.Container.ContainerImage = arvadostest.DockerImage112PDH\n\ts.runner.Container.Mounts = map[string]arvados.Mount{\n\t\t\"/out\": {Kind: \"tmp\", Writable: true},\n\t}\n\ts.runner.Container.OutputPath = \"/out\"\n\n\t_, err := s.runner.SetupMounts()\n\tc.Assert(err, IsNil)\n\n\timageID, err := s.runner.LoadImage()\n\tc.Check(err, IsNil)\n\tc.Check(s.executor.loaded, Matches, \".*\"+regexp.QuoteMeta(arvadostest.DockerImage112Filename))\n\tc.Check(imageID, Equals, strings.TrimSuffix(arvadostest.DockerImage112Filename, \".tar\"))\n\n\ts.runner.Container.ContainerImage = arvadostest.DockerImage112PDH\n\ts.executor.imageLoaded = false\n\ts.executor.loaded = \"\"\n\ts.executor.loadErr = errors.New(\"bork\")\n\timageID, err = s.runner.LoadImage()\n\tc.Check(err, ErrorMatches, \".*bork\")\n\tc.Check(s.executor.loaded, Matches, \".*\"+regexp.QuoteMeta(arvadostest.DockerImage112Filename))\n\n\ts.runner.Container.ContainerImage = fakeInputCollectionPDH\n\ts.executor.imageLoaded = false\n\ts.executor.loaded = \"\"\n\ts.executor.loadErr = nil\n\timageID, err = s.runner.LoadImage()\n\tc.Check(err, ErrorMatches, \"image collection does not include a \\\\.tar image file\")\n\tc.Check(s.executor.loaded, Equals, \"\")\n}\n\ntype ArvErrorTestClient struct{}\n\nfunc (ArvErrorTestClient) Create(resourceType string,\n\tparameters arvadosclient.Dict,\n\toutput interface{}) error {\n\treturn nil\n}\n\nfunc (ArvErrorTestClient) Call(method, resourceType, uuid, action string, parameters arvadosclient.Dict, output interface{}) error {\n\tif method == \"GET\" && resourceType == \"containers\" && action == \"auth\" {\n\t\treturn nil\n\t}\n\treturn errors.New(\"ArvError\")\n}\n\nfunc (ArvErrorTestClient) CallRaw(method, resourceType, uuid, action string,\n\tparameters arvadosclient.Dict) (reader io.ReadCloser, err error) {\n\treturn nil, errors.New(\"ArvError\")\n}\n\nfunc (ArvErrorTestClient) Get(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) error {\n\treturn errors.New(\"ArvError\")\n}\n\nfunc (ArvErrorTestClient) Update(resourceType string, uuid string, parameters arvadosclient.Dict, output interface{}) (err error) {\n\treturn nil\n}\n\nfunc (ArvErrorTestClient) Discovery(key string) (interface{}, error) {\n\treturn discoveryMap[key], nil\n}\n\ntype KeepErrorTestClient struct {\n\tKeepTestClient\n}\n\nfunc (*KeepErrorTestClient) BlockWrite(context.Context, arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {\n\treturn arvados.BlockWriteResponse{}, errors.New(\"KeepError\")\n}\n\nfunc (*KeepErrorTestClient) LocalLocator(string) (string, error) {\n\treturn \"\", errors.New(\"KeepError\")\n}\n\ntype KeepReadErrorTestClient struct {\n\tKeepTestClient\n}\n\nfunc (*KeepReadErrorTestClient) ReadAt(string, []byte, int) (int, error) {\n\treturn 0, errors.New(\"KeepError\")\n}\n\nfunc dockerLog(fd byte, msg string) []byte {\n\tby := []byte(msg)\n\theader := make([]byte, 8+len(by))\n\theader[0] = fd\n\theader[7] = byte(len(by))\n\tcopy(header[8:], by)\n\treturn header\n}\n\nfunc (s *TestSuite) TestRunContainer(c *C) {\n\ts.executor.runFunc = func() int {\n\t\tfmt.Fprintf(s.executor.created.Stdout, \"Hello world\\n\")\n\t\treturn 0\n\t}\n\n\ts.runner.Container.ContainerImage = arvadostest.DockerImage112PDH\n\ts.runner.Container.Command = []string{\"./hw\"}\n\ts.runner.Container.OutputStorageClasses = []string{\"default\"}\n\n\timageID, err := s.runner.LoadImage()\n\tc.Assert(err, IsNil)\n\n\terr = s.runner.CreateContainer(imageID, nil)\n\tc.Assert(err, IsNil)\n\n\terr = s.runner.StartContainer()\n\tc.Assert(err, IsNil)\n\n\terr = s.runner.WaitFinish()\n\tc.Assert(err, IsNil)\n\n\tc.Check(logFileContent(c, s.runner, \"stdout.txt\"), Matches, `2\\S+Z Hello world\\n`)\n\tc.Check(logFileContent(c, s.runner, \"stderr.txt\"), Matches, ``)\n}\n\nfunc (s *TestSuite) TestCommitLogs(c *C) {\n\tapi := &ArvTestClient{}\n\tkc := &KeepTestClient{}\n\tdefer kc.Close()\n\tcr, err := NewContainerRunner(s.client, api, kc, \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\")\n\tc.Assert(err, IsNil)\n\tf, err := cr.openLogFile(\"crunch-run\")\n\tc.Assert(err, IsNil)\n\tcr.CrunchLog = newLogWriter(newTestTimestamper(f))\n\n\tcr.CrunchLog.Print(\"Hello world!\")\n\tcr.CrunchLog.Print(\"Goodbye\")\n\tcr.finalState = \"Complete\"\n\n\terr = cr.CommitLogs()\n\tc.Check(err, IsNil)\n\n\tc.Check(api.Calls, Equals, 1)\n\tc.Check(api.Content[0][\"ensure_unique_name\"], Equals, true)\n\tc.Check(api.Content[0][\"collection\"].(arvadosclient.Dict)[\"name\"], Equals, \"logs for zzzzz-zzzzz-zzzzzzzzzzzzzzz\")\n\tc.Check(api.Content[0][\"collection\"].(arvadosclient.Dict)[\"manifest_text\"], Equals, \". 744b2e4553123b02fa7b452ec5c18993+123 0:123:crunch-run.txt\\n\")\n\tc.Check(*cr.logPDHFinal, Equals, \"63da7bdacf08c40f604daad80c261e9a+60\")\n}\n\nfunc (s *TestSuite) TestUpdateContainerRunning(c *C) {\n\tapi := &ArvTestClient{}\n\tkc := &KeepTestClient{}\n\tdefer kc.Close()\n\tcr, err := NewContainerRunner(s.client, api, kc, \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\")\n\tc.Assert(err, IsNil)\n\n\terr = cr.UpdateContainerRunning()\n\tc.Check(err, IsNil)\n\n\tc.Check(api.Content[0][\"container\"].(arvadosclient.Dict)[\"state\"], Equals, \"Running\")\n}\n\nfunc (s *TestSuite) TestUpdateContainerComplete(c *C) {\n\tapi := &ArvTestClient{}\n\tkc := &KeepTestClient{}\n\tdefer kc.Close()\n\tcr, err := NewContainerRunner(s.client, api, kc, \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\")\n\tc.Assert(err, IsNil)\n\n\tcr.logPDHFinal = new(string)\n\t*cr.logPDHFinal = \"d3a229d2fe3690c2c3e75a71a153c6a3+60\"\n\n\tcr.ExitCode = new(int)\n\t*cr.ExitCode = 42\n\tcr.finalState = \"Complete\"\n\n\terr = cr.UpdateContainerFinal()\n\tc.Check(err, IsNil)\n\n\tc.Check(api.Content[0][\"container\"].(arvadosclient.Dict)[\"log\"], Equals, *cr.logPDHFinal)\n\tc.Check(api.Content[0][\"container\"].(arvadosclient.Dict)[\"exit_code\"], Equals, *cr.ExitCode)\n\tc.Check(api.Content[0][\"container\"].(arvadosclient.Dict)[\"state\"], Equals, \"Complete\")\n}\n\nfunc (s *TestSuite) TestUpdateContainerCancelled(c *C) {\n\tapi := &ArvTestClient{}\n\tkc := &KeepTestClient{}\n\tdefer kc.Close()\n\tcr, err := NewContainerRunner(s.client, api, kc, \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\")\n\tc.Assert(err, IsNil)\n\tcr.cCancelled = true\n\tcr.finalState = \"Cancelled\"\n\n\terr = cr.UpdateContainerFinal()\n\tc.Check(err, IsNil)\n\n\tc.Check(api.Content[0][\"container\"].(arvadosclient.Dict)[\"log\"], IsNil)\n\tc.Check(api.Content[0][\"container\"].(arvadosclient.Dict)[\"exit_code\"], IsNil)\n\tc.Check(api.Content[0][\"container\"].(arvadosclient.Dict)[\"state\"], Equals, \"Cancelled\")\n}\n\n// Used by the TestFullRun*() test below to DRY up boilerplate setup to do full\n// dress rehearsal of the Run() function, starting from a JSON container record.\nfunc (s *TestSuite) fullRunHelper(c *C, record string, extraMounts []string, fn func() int) (*ArvTestClient, *ContainerRunner, string) {\n\terr := json.Unmarshal([]byte(record), &s.api.Container)\n\tc.Assert(err, IsNil)\n\tinitialState := s.api.Container.State\n\n\tvar sm struct {\n\t\tSecretMounts map[string]arvados.Mount `json:\"secret_mounts\"`\n\t}\n\terr = json.Unmarshal([]byte(record), &sm)\n\tc.Check(err, IsNil)\n\tsecretMounts, err := json.Marshal(sm)\n\tc.Assert(err, IsNil)\n\tc.Logf(\"SecretMounts decoded %v json %q\", sm, secretMounts)\n\n\ts.executor.runFunc = fn\n\n\ts.runner.statInterval = 100 * time.Millisecond\n\ts.runner.containerWatchdogInterval = time.Second\n\n\trealTemp := c.MkDir()\n\ttempcount := 0\n\ts.runner.MkTempDir = func(_, prefix string) (string, error) {\n\t\ttempcount++\n\t\td := fmt.Sprintf(\"%s/%s%d\", realTemp, prefix, tempcount)\n\t\terr := os.Mkdir(d, os.ModePerm)\n\t\tif err != nil && strings.Contains(err.Error(), \": file exists\") {\n\t\t\t// Test case must have pre-populated the tempdir\n\t\t\terr = nil\n\t\t}\n\t\treturn d, err\n\t}\n\tclient, _ := apiStub()\n\ts.runner.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {\n\t\treturn &ArvTestClient{secretMounts: secretMounts}, &s.testContainerKeepClient, client, nil\n\t}\n\n\tif extraMounts != nil && len(extraMounts) > 0 {\n\t\terr := s.runner.SetupArvMountPoint(\"keep\")\n\t\tc.Check(err, IsNil)\n\n\t\tfor _, m := range extraMounts {\n\t\t\tos.MkdirAll(s.runner.ArvMountPoint+\"/by_id/\"+m, os.ModePerm)\n\t\t}\n\t}\n\n\terr = s.runner.Run()\n\tif s.api.CalledWith(\"container.state\", \"Complete\") != nil {\n\t\tc.Check(err, IsNil)\n\t}\n\tif s.executor.loadErr == nil && s.executor.createErr == nil && initialState != \"Running\" {\n\t\tc.Check(s.api.WasSetRunning, Equals, true)\n\t\tvar lastupdate arvadosclient.Dict\n\t\tfor _, content := range s.api.Content {\n\t\t\tif content[\"container\"] != nil {\n\t\t\t\tlastupdate = content[\"container\"].(arvadosclient.Dict)\n\t\t\t}\n\t\t}\n\t\tif lastupdate[\"log\"] == nil {\n\t\t\tc.Errorf(\"no container update with non-nil log -- updates were: %v\", s.api.Content)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tdumpAllLogFiles(c, s.runner)\n\t}\n\n\treturn s.api, s.runner, realTemp\n}\n\nfunc (s *TestSuite) TestFullRunHello(c *C) {\n\ts.runner.enableMemoryLimit = true\n\ts.runner.networkMode = \"default\"\n\ts.fullRunHelper(c, `{\n    \"command\": [\"echo\", \"hello world\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \".\",\n    \"environment\": {\"foo\":\"bar\",\"baz\":\"waz\"},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {\"vcpus\":1,\"ram\":1000000},\n    \"state\": \"Locked\",\n    \"output_storage_classes\": [\"default\"]\n}`, nil, func() int {\n\t\tc.Check(s.executor.created.Command, DeepEquals, []string{\"echo\", \"hello world\"})\n\t\tc.Check(s.executor.created.Image, Equals, \"sha256:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678\")\n\t\tc.Check(s.executor.created.Env, DeepEquals, map[string]string{\"foo\": \"bar\", \"baz\": \"waz\"})\n\t\tc.Check(s.executor.created.VCPUs, Equals, 1)\n\t\tc.Check(s.executor.created.RAM, Equals, int64(1000000))\n\t\tc.Check(s.executor.created.NetworkMode, Equals, \"default\")\n\t\tc.Check(s.executor.created.EnableNetwork, Equals, false)\n\t\tc.Check(s.executor.created.GPUDeviceCount, Equals, 0)\n\t\tfmt.Fprintln(s.executor.created.Stdout, \"hello world\")\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(logFileContent(c, s.runner, \"stdout.txt\"), Matches, `2\\S+Z hello world\\n`)\n\tc.Check(s.testDispatcherKeepClient.StorageClasses, DeepEquals, []string{\"default\"})\n\tc.Check(s.testContainerKeepClient.StorageClasses, DeepEquals, []string{\"default\"})\n}\n\nfunc (s *TestSuite) TestRunAlreadyRunning(c *C) {\n\tvar ran bool\n\ts.fullRunHelper(c, `{\n    \"command\": [\"sleep\", \"3\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \".\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"scheduling_parameters\":{\"max_run_time\": 1},\n    \"state\": \"Running\"\n}`, nil, func() int {\n\t\tran = true\n\t\treturn 2\n\t})\n\tc.Check(s.api.CalledWith(\"container.state\", \"Cancelled\"), IsNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), IsNil)\n\tc.Check(ran, Equals, false)\n}\n\nfunc ec2MetadataServerStub(c *C, token *string, failureRate float64, stoptime *atomic.Value) *httptest.Server {\n\tfailedOnce := false\n\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif !failedOnce || rand.Float64() < failureRate {\n\t\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\t\tfailedOnce = true\n\t\t\treturn\n\t\t}\n\t\tswitch r.URL.Path {\n\t\tcase \"/latest/api/token\":\n\t\t\tfmt.Fprintln(w, *token)\n\t\tcase \"/latest/meta-data/spot/instance-action\":\n\t\t\tif r.Header.Get(\"X-aws-ec2-metadata-token\") != *token {\n\t\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\t} else if t, _ := stoptime.Load().(time.Time); t.IsZero() {\n\t\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(w, `{\"action\":\"stop\",\"time\":\"%s\"}`, t.Format(time.RFC3339))\n\t\t\t}\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t}\n\t}))\n}\n\nfunc (s *TestSuite) TestSpotInterruptionNotice(c *C) {\n\ts.testSpotInterruptionNotice(c, 0.1)\n}\n\nfunc (s *TestSuite) TestSpotInterruptionNoticeNotAvailable(c *C) {\n\ts.testSpotInterruptionNotice(c, 1)\n}\n\nfunc (s *TestSuite) testSpotInterruptionNotice(c *C, failureRate float64) {\n\tvar stoptime atomic.Value\n\ttoken := \"fake-ec2-metadata-token\"\n\tstub := ec2MetadataServerStub(c, &token, failureRate, &stoptime)\n\tdefer stub.Close()\n\n\tdefer func(i time.Duration, u string) {\n\t\tspotInterruptionCheckInterval = i\n\t\tec2MetadataBaseURL = u\n\t}(spotInterruptionCheckInterval, ec2MetadataBaseURL)\n\tspotInterruptionCheckInterval = time.Second / 8\n\tec2MetadataBaseURL = stub.URL\n\n\tcheckedLogs := false\n\tcheckLogs := func() {\n\t\tcheckedLogs = true\n\t\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, `(?ms).*Checking for spot instance interruptions every 125ms using instance metadata at http://.*`)\n\t\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, `(?ms).*Spot instance interruption check was inconclusive: 503 Service Unavailable -- will retry in 125ms.*`)\n\t\tif failureRate == 1 {\n\t\t\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, `(?ms).*Spot instance interruption check was inconclusive: 503 Service Unavailable -- now giving up after too many consecutive errors.*`)\n\t\t\treturn\n\t\t}\n\t\ttext := `Cloud provider scheduled instance stop at ` + stoptime.Load().(time.Time).Format(time.RFC3339)\n\t\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, `(?ms).*`+text+`.*`)\n\t\tc.Check(s.api.CalledWith(\"container.runtime_status.warning\", \"preemption notice\"), NotNil)\n\t\tc.Check(s.api.CalledWith(\"container.runtime_status.warningDetail\", text), NotNil)\n\t\tc.Check(s.api.CalledWith(\"container.runtime_status.preemptionNotice\", text), NotNil)\n\n\t\t// Check that the log collection was saved, and the\n\t\t// container record updated with the new PDH,\n\t\t// immediately after the preemption notice was\n\t\t// received -- i.e., while the container is still\n\t\t// running.\n\t\tlastpdh := \"\"\n\t\tsaved := make(map[string]string) // pdh => manifest_text\n\t\tfor _, call := range s.api.Content {\n\t\t\tif ctr, ok := call[\"container\"].(arvadosclient.Dict); ok {\n\t\t\t\tif pdh, ok := ctr[\"log\"].(string); ok {\n\t\t\t\t\tlastpdh = pdh\n\t\t\t\t}\n\t\t\t}\n\t\t\tif coll, ok := call[\"collection\"].(arvadosclient.Dict); ok {\n\t\t\t\tmt, _ := coll[\"manifest_text\"].(string)\n\t\t\t\tif strings.Contains(mt, \":crunch-run.txt\") {\n\t\t\t\t\tsaved[arvados.PortableDataHash(mt)] = mt\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tlogfs, err := (&arvados.Collection{ManifestText: saved[lastpdh]}).FileSystem(s.runner.dispatcherClient, s.runner.DispatcherKeepClient)\n\t\tc.Assert(err, IsNil)\n\t\tlog, err := fs.ReadFile(arvados.FS(logfs), \"crunch-run.txt\")\n\t\tc.Check(err, IsNil)\n\t\tc.Check(string(log), Matches, `(?ms).*\\Q`+text+`\\E.*`)\n\t}\n\n\tgo s.runner.checkSpotInterruptionNotices()\n\ts.fullRunHelper(c, `{\n    \"command\": [\"sleep\", \"3\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \".\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"state\": \"Locked\"\n}`, nil, func() int {\n\t\ttime.Sleep(time.Second)\n\t\tstoptime.Store(time.Now().Add(time.Minute).UTC())\n\t\ttoken = \"different-fake-ec2-metadata-token\"\n\t\ttime.Sleep(time.Second)\n\t\tcheckLogs()\n\t\treturn 0\n\t})\n\tc.Check(checkedLogs, Equals, true)\n}\n\nfunc (s *TestSuite) TestRunTimeExceeded(c *C) {\n\ts.fullRunHelper(c, `{\n    \"command\": [\"sleep\", \"3\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \".\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"scheduling_parameters\":{\"max_run_time\": 1},\n    \"state\": \"Locked\"\n}`, nil, func() int {\n\t\ttime.Sleep(3 * time.Second)\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.state\", \"Cancelled\"), NotNil)\n\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, \"(?ms).*maximum run time exceeded.*\")\n}\n\nfunc (s *TestSuite) TestContainerWaitFails(c *C) {\n\ts.fullRunHelper(c, `{\n    \"command\": [\"sleep\", \"3\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \".\",\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"state\": \"Locked\"\n}`, nil, func() int {\n\t\ts.executor.waitErr = errors.New(\"Container is not running\")\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.state\", \"Cancelled\"), NotNil)\n\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, \"(?ms).*Container is not running.*\")\n}\n\nfunc (s *TestSuite) TestCrunchstat(c *C) {\n\ts.runner.crunchstatFakeFS = os.DirFS(\"../crunchstat/testdata/debian12\")\n\ts.fullRunHelper(c, `{\n\t\t\"command\": [\"sleep\", \"1\"],\n\t\t\"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n\t\t\"cwd\": \".\",\n\t\t\"environment\": {},\n\t\t\"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`, nil, func() int {\n\t\ttime.Sleep(time.Second)\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\n\tc.Check(logFileContent(c, s.runner, \"crunchstat.txt\"), Matches, `(?ms).*`+reTimestamp+` mem \\d+ swap \\d+ pgmajfault \\d+ rss.*`)\n\tc.Check(logFileContent(c, s.runner, \"hoststat.txt\"), Matches, `(?ms).*`+reTimestamp+` mem \\d+ swap \\d+ pgmajfault \\d+ rss.*`)\n\n\t// Check that we called (*crunchstat.Reporter)Stop().\n\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, `(?ms).*`+reTimestamp+` Maximum crunch-run memory rss usage was \\d+ bytes\\n.*`)\n}\n\nfunc (s *TestSuite) TestNodeInfoLog(c *C) {\n\ts.fullRunHelper(c, `{\n\t\t\"command\": [\"true\"],\n\t\t\"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n\t\t\"cwd\": \".\",\n\t\t\"environment\": {},\n\t\t\"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`, nil, func() int {\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\n\tjson := logFileContent(c, s.runner, \"node.json\")\n\tc.Check(json, Matches, `(?ms).*\"ProviderType\": *\"a1\\.2xlarge\".*`)\n\tc.Check(json, Matches, `(?ms).*\"Price\": *1\\.2.*`)\n\n\tnodeinfo := logFileContent(c, s.runner, \"node-info.txt\")\n\tc.Check(nodeinfo, Matches, `(?ms).*Host Information.*`)\n\tc.Check(nodeinfo, Matches, `(?ms).*CPU Information.*`)\n\tc.Check(nodeinfo, Matches, `(?ms).*Memory Information.*`)\n\tc.Check(nodeinfo, Matches, `(?ms).*Disk Space.*`)\n\tc.Check(nodeinfo, Matches, `(?ms).*Disk INodes.*`)\n}\n\nfunc (s *TestSuite) TestLogVersionAndRuntime(c *C) {\n\ts.fullRunHelper(c, `{\n\t\t\"command\": [\"sleep\", \"1\"],\n\t\t\"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n\t\t\"cwd\": \".\",\n\t\t\"environment\": {},\n\t\t\"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`, nil, func() int {\n\t\treturn 0\n\t})\n\n\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, `(?ms).*crunch-run \\S+ \\(go\\S+\\) start.*`)\n\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, `(?ms).*crunch-run process has uid=\\d+\\(.+\\) gid=\\d+\\(.+\\) groups=\\d+\\(.+\\)(,\\d+\\(.+\\))*\\n.*`)\n\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, `(?ms).*Executing container: zzzzz-zzzzz-zzzzzzzzzzzzzzz.*`)\n\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, `(?ms).*Using container runtime: stub.*`)\n}\n\nfunc (s *TestSuite) testLogRSSThresholds(c *C, ram int64, expected []int, notExpected int) {\n\ts.runner.crunchstatFakeFS = os.DirFS(\"../crunchstat/testdata/debian12\")\n\ts.fullRunHelper(c, `{\n\t\t\"command\": [\"true\"],\n\t\t\"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n\t\t\"cwd\": \".\",\n\t\t\"environment\": {},\n\t\t\"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {\"ram\": `+fmt.Sprintf(\"%d\", ram)+`},\n\t\t\"state\": \"Locked\"\n\t}`, nil, func() int { return 0 })\n\tlogs := logFileContent(c, s.runner, \"crunch-run.txt\")\n\tc.Log(\"=== crunchstat logs\")\n\tc.Log(logs)\n\tpattern := logLineStart + `Container using over %d%% of memory \\(rss %d/%d bytes\\)`\n\tvar threshold int\n\tfor _, threshold = range expected {\n\t\tc.Check(logs, Matches, fmt.Sprintf(pattern, threshold, s.debian12MemoryCurrent, ram))\n\t}\n\tif notExpected > threshold {\n\t\tc.Check(logs, Not(Matches), fmt.Sprintf(pattern, notExpected, s.debian12MemoryCurrent, ram))\n\t}\n}\n\nfunc (s *TestSuite) TestLogNoRSSThresholds(c *C) {\n\ts.testLogRSSThresholds(c, s.debian12MemoryCurrent*10, []int{}, 90)\n}\n\nfunc (s *TestSuite) TestLogSomeRSSThresholds(c *C) {\n\tonePercentRSS := s.debian12MemoryCurrent / 100\n\ts.testLogRSSThresholds(c, 102*onePercentRSS, []int{90, 95}, 99)\n}\n\nfunc (s *TestSuite) TestLogAllRSSThresholds(c *C) {\n\ts.testLogRSSThresholds(c, s.debian12MemoryCurrent, []int{90, 95, 99}, 0)\n}\n\nfunc (s *TestSuite) TestLogMaximaAfterRun(c *C) {\n\ts.runner.crunchstatFakeFS = os.DirFS(\"../crunchstat/testdata/debian12\")\n\ts.runner.parentTemp = c.MkDir()\n\ts.fullRunHelper(c, `{\n        \"command\": [\"true\"],\n        \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n        \"cwd\": \".\",\n        \"environment\": {},\n        \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n        \"output_path\": \"/tmp\",\n        \"priority\": 1,\n        \"runtime_constraints\": {\"ram\": `+fmt.Sprintf(\"%d\", s.debian12MemoryCurrent*10)+`},\n        \"state\": \"Locked\"\n    }`, nil, func() int { return 0 })\n\tlogs := logFileContent(c, s.runner, \"crunch-run.txt\")\n\tfor _, expected := range []string{\n\t\t`Maximum disk usage was \\d+%, \\d+/\\d+ bytes`,\n\t\tfmt.Sprintf(`Maximum container memory swap usage was %d bytes`, s.debian12SwapCurrent),\n\t\t`Maximum container memory pgmajfault usage was \\d+ faults`,\n\t\tfmt.Sprintf(`Maximum container memory rss usage was 10%%, %d/%d bytes`, s.debian12MemoryCurrent, s.debian12MemoryCurrent*10),\n\t\t`Maximum crunch-run memory rss usage was \\d+ bytes`,\n\t} {\n\t\tc.Check(logs, Matches, logLineStart+expected)\n\t}\n}\n\nfunc (s *TestSuite) TestCommitNodeInfoBeforeStart(c *C) {\n\tvar collection_create, container_update arvadosclient.Dict\n\ts.fullRunHelper(c, `{\n\t\t\"command\": [\"true\"],\n\t\t\"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n\t\t\"cwd\": \".\",\n\t\t\"environment\": {},\n\t\t\"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\",\n\t\t\"uuid\": \"zzzzz-dz642-202301121543210\"\n\t}`, nil, func() int {\n\t\tcollection_create = s.api.CalledWith(\"ensure_unique_name\", true)\n\t\tcontainer_update = s.api.CalledWith(\"container.state\", \"Running\")\n\t\treturn 0\n\t})\n\n\tc.Assert(collection_create, NotNil)\n\tlog_collection := collection_create[\"collection\"].(arvadosclient.Dict)\n\tc.Check(log_collection[\"name\"], Equals, \"logs for zzzzz-dz642-202301121543210\")\n\tmanifest_text := log_collection[\"manifest_text\"].(string)\n\t// We check that the file size is at least two digits as an easy way to\n\t// check the file isn't empty.\n\tc.Check(manifest_text, Matches, `\\. .+ \\d+:\\d{2,}:node-info\\.txt( .+)?\\n`)\n\tc.Check(manifest_text, Matches, `\\. .+ \\d+:\\d{2,}:node\\.json( .+)?\\n`)\n\n\tc.Assert(container_update, NotNil)\n\t// As of Arvados 2.5.0, the container update must specify its log in PDH\n\t// format for the API server to propagate it to container requests, which\n\t// is what we care about for this test.\n\texpect_pdh := fmt.Sprintf(\"%x+%d\", md5.Sum([]byte(manifest_text)), len(manifest_text))\n\tc.Check(container_update[\"container\"].(arvadosclient.Dict)[\"log\"], Equals, expect_pdh)\n}\n\nfunc (s *TestSuite) TestContainerRecordLog(c *C) {\n\ts.fullRunHelper(c, `{\n\t\t\"command\": [\"sleep\", \"1\"],\n\t\t\"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n\t\t\"cwd\": \".\",\n\t\t\"environment\": {},\n\t\t\"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`, nil,\n\t\tfunc() int {\n\t\t\ttime.Sleep(time.Second)\n\t\t\treturn 0\n\t\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\n\tc.Check(logFileContent(c, s.runner, \"container.json\"), Matches, `(?ms).*container_image.*`)\n}\n\nfunc (s *TestSuite) TestFullRunStderr(c *C) {\n\ts.fullRunHelper(c, `{\n    \"command\": [\"/bin/sh\", \"-c\", \"echo hello ; echo world 1>&2 ; exit 1\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \".\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"state\": \"Locked\"\n}`, nil, func() int {\n\t\tfmt.Fprintln(s.executor.created.Stdout, \"hello\")\n\t\tfmt.Fprintln(s.executor.created.Stderr, \"world\")\n\t\treturn 1\n\t})\n\n\tfinal := s.api.CalledWith(\"container.state\", \"Complete\")\n\tc.Assert(final, NotNil)\n\tc.Check(final[\"container\"].(arvadosclient.Dict)[\"exit_code\"], Equals, 1)\n\tc.Check(final[\"container\"].(arvadosclient.Dict)[\"log\"], NotNil)\n\n\tc.Check(logFileContent(c, s.runner, \"stdout.txt\"), Matches, \".*hello\\n\")\n\tc.Check(logFileContent(c, s.runner, \"stderr.txt\"), Matches, \".*world\\n\")\n}\n\nfunc (s *TestSuite) TestFullRunDefaultCwd(c *C) {\n\ts.fullRunHelper(c, `{\n    \"command\": [\"pwd\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \".\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"state\": \"Locked\"\n}`, nil, func() int {\n\t\tfmt.Fprintf(s.executor.created.Stdout, \"workdir=%q\", s.executor.created.WorkingDir)\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(logFileContent(c, s.runner, \"stdout.txt\"), Matches, `.*workdir=\"\"`)\n}\n\nfunc (s *TestSuite) TestFullRunSetCwd(c *C) {\n\ts.fullRunHelper(c, `{\n    \"command\": [\"pwd\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \"/bin\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"state\": \"Locked\"\n}`, nil, func() int {\n\t\tfmt.Fprintln(s.executor.created.Stdout, s.executor.created.WorkingDir)\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(logFileContent(c, s.runner, \"stdout.txt\"), Matches, \".*/bin\\n\")\n}\n\nfunc (s *TestSuite) TestFullRunSetOutputStorageClasses(c *C) {\n\ts.fullRunHelper(c, `{\n    \"command\": [\"pwd\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \"/bin\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"state\": \"Locked\",\n    \"output_storage_classes\": [\"foo\", \"bar\"]\n}`, nil, func() int {\n\t\tfmt.Fprintln(s.executor.created.Stdout, s.executor.created.WorkingDir)\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(logFileContent(c, s.runner, \"stdout.txt\"), Matches, \".*/bin\\n\")\n\tc.Check(s.testDispatcherKeepClient.StorageClasses, DeepEquals, []string{\"foo\", \"bar\"})\n\tc.Check(s.testContainerKeepClient.StorageClasses, DeepEquals, []string{\"foo\", \"bar\"})\n}\n\nfunc (s *TestSuite) TestEnableCUDADeviceCount(c *C) {\n\ts.fullRunHelper(c, `{\n    \"command\": [\"pwd\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \"/bin\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {\"gpu\": {\"device_count\": 2, \"stack\": \"cuda\", \"hardware_target\": [\"9.0\"], \"driver_version\": \"11.0\", \"vram\": 8000000000}},\n    \"state\": \"Locked\",\n    \"output_storage_classes\": [\"foo\", \"bar\"]\n}`, nil, func() int {\n\t\tfmt.Fprintln(s.executor.created.Stdout, \"ok\")\n\t\treturn 0\n\t})\n\tc.Check(s.executor.created.GPUDeviceCount, Equals, 2)\n\tc.Check(s.executor.created.GPUStack, Equals, \"cuda\")\n}\n\nfunc (s *TestSuite) TestStopOnSignal(c *C) {\n\ts.executor.runFunc = func() int {\n\t\ts.executor.created.Stdout.Write([]byte(\"foo\\n\"))\n\t\ts.runner.SigChan <- syscall.SIGINT\n\t\ttime.Sleep(10 * time.Second)\n\t\treturn 0\n\t}\n\ts.testStopContainer(c)\n}\n\nfunc (s *TestSuite) TestStopOnArvMountDeath(c *C) {\n\ts.executor.runFunc = func() int {\n\t\ts.executor.created.Stdout.Write([]byte(\"foo\\n\"))\n\t\ts.runner.ArvMountExit <- nil\n\t\tclose(s.runner.ArvMountExit)\n\t\ttime.Sleep(10 * time.Second)\n\t\treturn 0\n\t}\n\ts.runner.ArvMountExit = make(chan error)\n\ts.testStopContainer(c)\n}\n\nfunc (s *TestSuite) testStopContainer(c *C) {\n\trecord := `{\n    \"command\": [\"/bin/sh\", \"-c\", \"echo foo && sleep 30 && echo bar\"],\n    \"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n    \"cwd\": \".\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"state\": \"Locked\"\n}`\n\n\terr := json.Unmarshal([]byte(record), &s.api.Container)\n\tc.Assert(err, IsNil)\n\n\ts.runner.RunArvMount = func([]string, string) (*exec.Cmd, error) { return nil, nil }\n\ts.runner.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {\n\t\treturn &ArvTestClient{}, &KeepTestClient{}, nil, nil\n\t}\n\n\tdone := make(chan error)\n\tgo func() {\n\t\tdone <- s.runner.Run()\n\t}()\n\tselect {\n\tcase <-time.After(20 * time.Second):\n\t\tpprof.Lookup(\"goroutine\").WriteTo(os.Stderr, 1)\n\t\tc.Fatal(\"timed out\")\n\tcase err = <-done:\n\t\tc.Check(err, IsNil)\n\t}\n\tdumpAllLogFiles(c, s.runner)\n\n\tc.Check(s.api.CalledWith(\"container.log\", nil), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Cancelled\"), NotNil)\n\tc.Check(logFileContent(c, s.runner, \"stdout.txt\"), Matches, \"(?ms).*foo\\n$\")\n}\n\nfunc (s *TestSuite) TestFullRunSetEnv(c *C) {\n\ts.fullRunHelper(c, `{\n    \"command\": [\"/bin/sh\", \"-c\", \"echo $FROBIZ\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \"/bin\",\n    \"environment\": {\"FROBIZ\": \"bilbo\"},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"state\": \"Locked\"\n}`, nil, func() int {\n\t\tfmt.Fprintf(s.executor.created.Stdout, \"%v\", s.executor.created.Env)\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(logFileContent(c, s.runner, \"stdout.txt\"), Matches, `.*map\\[FROBIZ:bilbo\\]`)\n}\n\ntype ArvMountCmdLine struct {\n\tCmd   []string\n\ttoken string\n}\n\nfunc (am *ArvMountCmdLine) ArvMountTest(c []string, token string) (*exec.Cmd, error) {\n\tam.Cmd = c\n\tam.token = token\n\treturn nil, nil\n}\n\nfunc stubCert(c *C, temp string) string {\n\tpath := temp + \"/ca-certificates.crt\"\n\terr := os.WriteFile(path, []byte{}, 0666)\n\tc.Assert(err, IsNil)\n\tos.Setenv(\"SSL_CERT_FILE\", path)\n\treturn path\n}\n\nfunc (s *TestSuite) TestSetupMounts(c *C) {\n\tcr := s.runner\n\tam := &ArvMountCmdLine{}\n\tcr.RunArvMount = am.ArvMountTest\n\tcr.containerClient, _ = apiStub()\n\tcr.ContainerArvClient = &ArvTestClient{}\n\tcr.ContainerKeepClient = &KeepTestClient{}\n\tcr.Container.OutputStorageClasses = []string{\"default\"}\n\n\trealTemp := c.MkDir()\n\tcertTemp := c.MkDir()\n\tstubCertPath := stubCert(c, certTemp)\n\tcr.parentTemp = realTemp\n\n\ti := 0\n\tcr.MkTempDir = func(_ string, prefix string) (string, error) {\n\t\ti++\n\t\td := fmt.Sprintf(\"%s/%s%d\", realTemp, prefix, i)\n\t\terr := os.Mkdir(d, os.ModePerm)\n\t\tif err != nil && strings.Contains(err.Error(), \": file exists\") {\n\t\t\t// Test case must have pre-populated the tempdir\n\t\t\terr = nil\n\t\t}\n\t\treturn d, err\n\t}\n\n\tcheckEmpty := func() {\n\t\t// Should be deleted.\n\t\t_, err := os.Stat(realTemp)\n\t\tc.Assert(os.IsNotExist(err), Equals, true)\n\n\t\t// Now recreate it for the next test.\n\t\tc.Assert(os.Mkdir(realTemp, 0777), IsNil)\n\t}\n\n\t{\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.Mounts = make(map[string]arvados.Mount)\n\t\tcr.Container.Mounts[\"/tmp\"] = arvados.Mount{Kind: \"tmp\"}\n\t\tcr.Container.OutputPath = \"/tmp\"\n\t\tcr.statInterval = 5 * time.Second\n\t\tbindmounts, err := cr.SetupMounts()\n\t\tc.Check(err, IsNil)\n\t\tc.Check(am.Cmd, DeepEquals, []string{\"arv-mount\", \"--foreground\",\n\t\t\t\"--read-write\", \"--storage-classes\", \"default\", \"--crunchstat-interval=5\",\n\t\t\t\"--mount-by-pdh\", \"by_id\", \"--disable-event-listening\", \"--mount-by-id\", \"by_uuid\", realTemp + \"/keep1\"})\n\t\tc.Check(bindmounts, DeepEquals, map[string]bindmount{\"/tmp\": {realTemp + \"/tmp2\", false}})\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\t}\n\n\t{\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.Mounts = make(map[string]arvados.Mount)\n\t\tcr.Container.Mounts[\"/out\"] = arvados.Mount{Kind: \"tmp\"}\n\t\tcr.Container.Mounts[\"/tmp\"] = arvados.Mount{Kind: \"tmp\"}\n\t\tcr.Container.OutputPath = \"/out\"\n\t\tcr.Container.OutputStorageClasses = []string{\"foo\", \"bar\"}\n\n\t\tbindmounts, err := cr.SetupMounts()\n\t\tc.Check(err, IsNil)\n\t\tc.Check(am.Cmd, DeepEquals, []string{\"arv-mount\", \"--foreground\",\n\t\t\t\"--read-write\", \"--storage-classes\", \"foo,bar\", \"--crunchstat-interval=5\",\n\t\t\t\"--mount-by-pdh\", \"by_id\", \"--disable-event-listening\", \"--mount-by-id\", \"by_uuid\", realTemp + \"/keep1\"})\n\t\tc.Check(bindmounts, DeepEquals, map[string]bindmount{\"/out\": {realTemp + \"/tmp2\", false}, \"/tmp\": {realTemp + \"/tmp3\", false}})\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\t}\n\n\t{\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.Mounts = make(map[string]arvados.Mount)\n\t\tcr.Container.Mounts[\"/tmp\"] = arvados.Mount{Kind: \"tmp\"}\n\t\tcr.Container.OutputPath = \"/tmp\"\n\t\tcr.Container.RuntimeConstraints.API = true\n\t\tcr.Container.OutputStorageClasses = []string{\"default\"}\n\n\t\tbindmounts, err := cr.SetupMounts()\n\t\tc.Check(err, IsNil)\n\t\tc.Check(am.Cmd, DeepEquals, []string{\"arv-mount\", \"--foreground\",\n\t\t\t\"--read-write\", \"--storage-classes\", \"default\", \"--crunchstat-interval=5\",\n\t\t\t\"--mount-by-pdh\", \"by_id\", \"--disable-event-listening\", \"--mount-by-id\", \"by_uuid\", realTemp + \"/keep1\"})\n\t\tc.Check(bindmounts, DeepEquals, map[string]bindmount{\"/tmp\": {realTemp + \"/tmp2\", false}, \"/etc/arvados/ca-certificates.crt\": {stubCertPath, true}})\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\n\t\tcr.Container.RuntimeConstraints.API = false\n\t}\n\n\t{\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.Mounts = map[string]arvados.Mount{\n\t\t\t\"/keeptmp\": {Kind: \"collection\", Writable: true},\n\t\t}\n\t\tcr.Container.OutputPath = \"/keeptmp\"\n\n\t\tos.MkdirAll(realTemp+\"/keep1/tmp0\", os.ModePerm)\n\n\t\tbindmounts, err := cr.SetupMounts()\n\t\tc.Check(err, IsNil)\n\t\tc.Check(am.Cmd, DeepEquals, []string{\"arv-mount\", \"--foreground\",\n\t\t\t\"--read-write\", \"--storage-classes\", \"default\", \"--crunchstat-interval=5\",\n\t\t\t\"--mount-tmp\", \"tmp0\", \"--mount-by-pdh\", \"by_id\", \"--disable-event-listening\", \"--mount-by-id\", \"by_uuid\", realTemp + \"/keep1\"})\n\t\tc.Check(bindmounts, DeepEquals, map[string]bindmount{\"/keeptmp\": {realTemp + \"/keep1/tmp0\", false}})\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\t}\n\n\t{\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.Mounts = map[string]arvados.Mount{\n\t\t\t\"/keepinp\": {Kind: \"collection\", PortableDataHash: \"59389a8f9ee9d399be35462a0f92541c+53\"},\n\t\t\t\"/keepout\": {Kind: \"collection\", Writable: true},\n\t\t}\n\t\tcr.Container.OutputPath = \"/keepout\"\n\n\t\tos.MkdirAll(realTemp+\"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53\", os.ModePerm)\n\t\tos.MkdirAll(realTemp+\"/keep1/tmp0\", os.ModePerm)\n\n\t\tbindmounts, err := cr.SetupMounts()\n\t\tc.Check(err, IsNil)\n\t\tc.Check(am.Cmd, DeepEquals, []string{\"arv-mount\", \"--foreground\",\n\t\t\t\"--read-write\", \"--storage-classes\", \"default\", \"--crunchstat-interval=5\",\n\t\t\t\"--mount-tmp\", \"tmp0\", \"--mount-by-pdh\", \"by_id\", \"--disable-event-listening\", \"--mount-by-id\", \"by_uuid\", realTemp + \"/keep1\"})\n\t\tc.Check(bindmounts, DeepEquals, map[string]bindmount{\n\t\t\t\"/keepinp\": {realTemp + \"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53\", true},\n\t\t\t\"/keepout\": {realTemp + \"/keep1/tmp0\", false},\n\t\t})\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\t}\n\n\t{\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.RuntimeConstraints.KeepCacheRAM = 512\n\t\tcr.Container.Mounts = map[string]arvados.Mount{\n\t\t\t\"/keepinp\": {Kind: \"collection\", PortableDataHash: \"59389a8f9ee9d399be35462a0f92541c+53\"},\n\t\t\t\"/keepout\": {Kind: \"collection\", Writable: true},\n\t\t}\n\t\tcr.Container.OutputPath = \"/keepout\"\n\n\t\tos.MkdirAll(realTemp+\"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53\", os.ModePerm)\n\t\tos.MkdirAll(realTemp+\"/keep1/tmp0\", os.ModePerm)\n\n\t\tbindmounts, err := cr.SetupMounts()\n\t\tc.Check(err, IsNil)\n\t\tc.Check(am.Cmd, DeepEquals, []string{\"arv-mount\", \"--foreground\",\n\t\t\t\"--read-write\", \"--storage-classes\", \"default\", \"--crunchstat-interval=5\", \"--ram-cache\",\n\t\t\t\"--file-cache\", \"512\", \"--mount-tmp\", \"tmp0\", \"--mount-by-pdh\", \"by_id\", \"--disable-event-listening\", \"--mount-by-id\", \"by_uuid\", realTemp + \"/keep1\"})\n\t\tc.Check(bindmounts, DeepEquals, map[string]bindmount{\n\t\t\t\"/keepinp\": {realTemp + \"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53\", true},\n\t\t\t\"/keepout\": {realTemp + \"/keep1/tmp0\", false},\n\t\t})\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\t}\n\n\tfor _, test := range []struct {\n\t\tin  interface{}\n\t\tout string\n\t}{\n\t\t{in: \"foo\", out: `\"foo\"`},\n\t\t{in: nil, out: `null`},\n\t\t{in: map[string]int64{\"foo\": 123456789123456789}, out: `{\"foo\":123456789123456789}`},\n\t} {\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.Mounts = map[string]arvados.Mount{\n\t\t\t\"/mnt/test.json\": {Kind: \"json\", Content: test.in},\n\t\t}\n\t\tbindmounts, err := cr.SetupMounts()\n\t\tc.Check(err, IsNil)\n\t\tc.Check(bindmounts, DeepEquals, map[string]bindmount{\n\t\t\t\"/mnt/test.json\": {realTemp + \"/json2/mountdata.json\", true},\n\t\t})\n\t\tcontent, err := ioutil.ReadFile(realTemp + \"/json2/mountdata.json\")\n\t\tc.Check(err, IsNil)\n\t\tc.Check(content, DeepEquals, []byte(test.out))\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\t}\n\n\tfor _, test := range []struct {\n\t\tin  interface{}\n\t\tout string\n\t}{\n\t\t{in: \"foo\", out: `foo`},\n\t\t{in: nil, out: \"error\"},\n\t\t{in: map[string]int64{\"foo\": 123456789123456789}, out: \"error\"},\n\t} {\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.Mounts = map[string]arvados.Mount{\n\t\t\t\"/mnt/test.txt\": {Kind: \"text\", Content: test.in},\n\t\t}\n\t\tbindmounts, err := cr.SetupMounts()\n\t\tif test.out == \"error\" {\n\t\t\tc.Check(err.Error(), Equals, \"content for mount \\\"/mnt/test.txt\\\" must be a string\")\n\t\t} else {\n\t\t\tc.Check(err, IsNil)\n\t\t\tc.Check(bindmounts, DeepEquals, map[string]bindmount{\n\t\t\t\t\"/mnt/test.txt\": {realTemp + \"/text2/mountdata.text\", true},\n\t\t\t})\n\t\t\tcontent, err := ioutil.ReadFile(realTemp + \"/text2/mountdata.text\")\n\t\t\tc.Check(err, IsNil)\n\t\t\tc.Check(content, DeepEquals, []byte(test.out))\n\t\t}\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\t}\n\n\t// Read-only mount points are allowed underneath output_dir mount point\n\t{\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.Mounts = make(map[string]arvados.Mount)\n\t\tcr.Container.Mounts = map[string]arvados.Mount{\n\t\t\t\"/tmp\":     {Kind: \"tmp\"},\n\t\t\t\"/tmp/foo\": {Kind: \"collection\"},\n\t\t}\n\t\tcr.Container.OutputPath = \"/tmp\"\n\n\t\tos.MkdirAll(realTemp+\"/keep1/tmp0\", os.ModePerm)\n\n\t\tbindmounts, err := cr.SetupMounts()\n\t\tc.Check(err, IsNil)\n\t\tc.Check(am.Cmd, DeepEquals, []string{\"arv-mount\", \"--foreground\",\n\t\t\t\"--read-write\", \"--storage-classes\", \"default\", \"--crunchstat-interval=5\", \"--ram-cache\",\n\t\t\t\"--file-cache\", \"512\", \"--mount-tmp\", \"tmp0\", \"--mount-by-pdh\", \"by_id\", \"--disable-event-listening\", \"--mount-by-id\", \"by_uuid\", realTemp + \"/keep1\"})\n\t\tc.Check(bindmounts, DeepEquals, map[string]bindmount{\n\t\t\t\"/tmp\":     {realTemp + \"/tmp2\", false},\n\t\t\t\"/tmp/foo\": {realTemp + \"/keep1/tmp0\", true},\n\t\t})\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\t}\n\n\t// Writable mount points copied to output_dir mount point\n\t{\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.Mounts = make(map[string]arvados.Mount)\n\t\tcr.Container.Mounts = map[string]arvados.Mount{\n\t\t\t\"/tmp\": {Kind: \"tmp\"},\n\t\t\t\"/tmp/foo\": {Kind: \"collection\",\n\t\t\t\tPortableDataHash: \"59389a8f9ee9d399be35462a0f92541c+53\",\n\t\t\t\tWritable:         true},\n\t\t\t\"/tmp/bar\": {Kind: \"collection\",\n\t\t\t\tPortableDataHash: \"59389a8f9ee9d399be35462a0f92541d+53\",\n\t\t\t\tPath:             \"baz\",\n\t\t\t\tWritable:         true},\n\t\t}\n\t\tcr.Container.OutputPath = \"/tmp\"\n\n\t\tos.MkdirAll(realTemp+\"/keep1/by_id/59389a8f9ee9d399be35462a0f92541c+53\", os.ModePerm)\n\t\tos.MkdirAll(realTemp+\"/keep1/by_id/59389a8f9ee9d399be35462a0f92541d+53/baz\", os.ModePerm)\n\n\t\trf, _ := os.Create(realTemp + \"/keep1/by_id/59389a8f9ee9d399be35462a0f92541d+53/baz/quux\")\n\t\trf.Write([]byte(\"bar\"))\n\t\trf.Close()\n\n\t\t_, err := cr.SetupMounts()\n\t\tc.Check(err, IsNil)\n\t\t_, err = os.Stat(cr.HostOutputDir + \"/foo\")\n\t\tc.Check(err, IsNil)\n\t\t_, err = os.Stat(cr.HostOutputDir + \"/bar/quux\")\n\t\tc.Check(err, IsNil)\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\t}\n\n\t// Only mount points of kind 'collection' are allowed underneath output_dir mount point\n\t{\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.Mounts = make(map[string]arvados.Mount)\n\t\tcr.Container.Mounts = map[string]arvados.Mount{\n\t\t\t\"/tmp\":     {Kind: \"tmp\"},\n\t\t\t\"/tmp/foo\": {Kind: \"tmp\"},\n\t\t}\n\t\tcr.Container.OutputPath = \"/tmp\"\n\n\t\t_, err := cr.SetupMounts()\n\t\tc.Check(err, NotNil)\n\t\tc.Check(err, ErrorMatches, `only mount points of kind 'collection', 'text' or 'json' are supported underneath the output_path.*`)\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\t}\n\n\t// Only mount point of kind 'collection' is allowed for stdin\n\t{\n\t\ti = 0\n\t\tcr.ArvMountPoint = \"\"\n\t\tcr.Container.Mounts = make(map[string]arvados.Mount)\n\t\tcr.Container.Mounts = map[string]arvados.Mount{\n\t\t\t\"stdin\": {Kind: \"tmp\"},\n\t\t}\n\n\t\t_, err := cr.SetupMounts()\n\t\tc.Check(err, NotNil)\n\t\tc.Check(err, ErrorMatches, `unsupported mount kind 'tmp' for stdin.*`)\n\t\tos.RemoveAll(cr.ArvMountPoint)\n\t\tcr.CleanupDirs()\n\t\tcheckEmpty()\n\t}\n}\n\nfunc (s *TestSuite) TestStdout(c *C) {\n\thelperRecord := `{\n\t\t\"command\": [\"/bin/sh\", \"-c\", \"echo $FROBIZ\"],\n\t\t\"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n\t\t\"cwd\": \"/bin\",\n\t\t\"environment\": {\"FROBIZ\": \"bilbo\"},\n\t\t\"mounts\": {\"/tmp\": {\"kind\": \"tmp\"}, \"stdout\": {\"kind\": \"file\", \"path\": \"/tmp/a/b/c.out\"} },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`\n\n\ts.fullRunHelper(c, helperRecord, nil, func() int {\n\t\tfmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env[\"FROBIZ\"])\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(s.runner.ContainerArvClient.(*ArvTestClient).CalledWith(\"collection.manifest_text\", \"./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\\n\"), NotNil)\n}\n\n// Used by the TestStdoutWithWrongPath*()\nfunc (s *TestSuite) stdoutErrorRunHelper(c *C, record string, fn func() int) (*ArvTestClient, *ContainerRunner, error) {\n\terr := json.Unmarshal([]byte(record), &s.api.Container)\n\tc.Assert(err, IsNil)\n\ts.executor.runFunc = fn\n\ts.runner.RunArvMount = (&ArvMountCmdLine{}).ArvMountTest\n\ts.runner.MkArvClient = func(token string) (IArvadosClient, IKeepClient, *arvados.Client, error) {\n\t\treturn s.api, &KeepTestClient{}, nil, nil\n\t}\n\treturn s.api, s.runner, s.runner.Run()\n}\n\nfunc (s *TestSuite) TestStdoutWithWrongPath(c *C) {\n\t_, _, err := s.stdoutErrorRunHelper(c, `{\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"}, \"stdout\": {\"kind\": \"file\", \"path\":\"/tmpa.out\"} },\n    \"output_path\": \"/tmp\",\n    \"state\": \"Locked\"\n}`, func() int { return 0 })\n\tc.Check(err, ErrorMatches, \".*Stdout path does not start with OutputPath.*\")\n}\n\nfunc (s *TestSuite) TestStdoutWithWrongKindTmp(c *C) {\n\t_, _, err := s.stdoutErrorRunHelper(c, `{\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"}, \"stdout\": {\"kind\": \"tmp\", \"path\":\"/tmp/a.out\"} },\n    \"output_path\": \"/tmp\",\n    \"state\": \"Locked\"\n}`, func() int { return 0 })\n\tc.Check(err, ErrorMatches, \".*unsupported mount kind 'tmp' for stdout.*\")\n}\n\nfunc (s *TestSuite) TestStdoutWithWrongKindCollection(c *C) {\n\t_, _, err := s.stdoutErrorRunHelper(c, `{\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"}, \"stdout\": {\"kind\": \"collection\", \"path\":\"/tmp/a.out\"} },\n    \"output_path\": \"/tmp\",\n    \"state\": \"Locked\"\n}`, func() int { return 0 })\n\tc.Check(err, ErrorMatches, \".*unsupported mount kind 'collection' for stdout.*\")\n}\n\nfunc (s *TestSuite) TestFullRunWithAPI(c *C) {\n\ts.fullRunHelper(c, `{\n    \"command\": [\"/bin/sh\", \"-c\", \"true $ARVADOS_API_HOST\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \"/bin\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {\"API\": true},\n    \"state\": \"Locked\"\n}`, nil, func() int {\n\t\tc.Check(s.executor.created.Env[\"ARVADOS_API_HOST\"], Equals, os.Getenv(\"ARVADOS_API_HOST\"))\n\t\treturn 3\n\t})\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 3), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, `(?ms).*`+reTimestamp+` Container exited with status code 3\\n.*`)\n}\n\nfunc (s *TestSuite) TestFullRunSetOutput(c *C) {\n\tdefer os.Setenv(\"ARVADOS_API_HOST\", os.Getenv(\"ARVADOS_API_HOST\"))\n\tos.Setenv(\"ARVADOS_API_HOST\", \"test.arvados.org\")\n\ts.fullRunHelper(c, `{\n    \"command\": [\"/bin/sh\", \"-c\", \"echo $ARVADOS_API_HOST\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \"/bin\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {\"API\": true},\n    \"state\": \"Locked\"\n}`, nil, func() int {\n\t\ts.api.Container.Output = arvadostest.DockerImage112PDH\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(s.api.CalledWith(\"container.output\", arvadostest.DockerImage112PDH), NotNil)\n}\n\nfunc (s *TestSuite) TestArvMountRuntimeStatusWarning(c *C) {\n\ts.runner.RunArvMount = func([]string, string) (*exec.Cmd, error) {\n\t\tos.Mkdir(s.runner.ArvMountPoint+\"/by_id\", 0666)\n\t\tioutil.WriteFile(s.runner.ArvMountPoint+\"/by_id/README\", nil, 0666)\n\t\treturn s.runner.ArvMountCmd([]string{\"bash\", \"-c\", \"echo >&2 Test: Keep write error: I am a teapot; sleep 3\"}, \"\")\n\t}\n\ts.executor.runFunc = func() int {\n\t\ttime.Sleep(time.Second)\n\t\treturn 137\n\t}\n\trecord := `{\n    \"command\": [\"sleep\", \"1\"],\n    \"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n    \"cwd\": \"/bin\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {\"API\": true},\n    \"state\": \"Locked\"\n}`\n\terr := json.Unmarshal([]byte(record), &s.api.Container)\n\tc.Assert(err, IsNil)\n\terr = s.runner.Run()\n\tc.Assert(err, IsNil)\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 137), NotNil)\n\tc.Check(s.api.CalledWith(\"container.runtime_status.warning\", \"arv-mount: Keep write error\"), NotNil)\n\tc.Check(s.api.CalledWith(\"container.runtime_status.warningDetail\", \"Test: Keep write error: I am a teapot\"), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, `(?ms).*`+reTimestamp+` Container exited with status code 137 \\(signal 9, SIGKILL\\).*`)\n\tc.Check(logFileContent(c, s.runner, \"arv-mount.txt\"), Matches, reTimestamp+` Test: Keep write error: I am a teapot\\n`)\n}\n\nfunc (s *TestSuite) TestStdoutWithExcludeFromOutputMountPointUnderOutputDir(c *C) {\n\thelperRecord := `{\n\t\t\"command\": [\"/bin/sh\", \"-c\", \"echo $FROBIZ\"],\n\t\t\"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n\t\t\"cwd\": \"/bin\",\n\t\t\"environment\": {\"FROBIZ\": \"bilbo\"},\n\t\t\"mounts\": {\n        \"/tmp\": {\"kind\": \"tmp\"},\n        \"/tmp/foo\": {\"kind\": \"collection\",\n                     \"portable_data_hash\": \"a3e8f74c6f101eae01fa08bfb4e49b3a+54\",\n                     \"exclude_from_output\": true\n        },\n        \"stdout\": {\"kind\": \"file\", \"path\": \"/tmp/a/b/c.out\"}\n    },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`\n\n\textraMounts := []string{\"a3e8f74c6f101eae01fa08bfb4e49b3a+54\"}\n\n\ts.fullRunHelper(c, helperRecord, extraMounts, func() int {\n\t\tfmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env[\"FROBIZ\"])\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(s.runner.ContainerArvClient.(*ArvTestClient).CalledWith(\"collection.manifest_text\", \"./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\\n\"), NotNil)\n}\n\nfunc (s *TestSuite) TestStdoutWithMultipleMountPointsUnderOutputDir(c *C) {\n\thelperRecord := `{\n\t\t\"command\": [\"/bin/sh\", \"-c\", \"echo $FROBIZ\"],\n\t\t\"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n\t\t\"cwd\": \"/bin\",\n\t\t\"environment\": {\"FROBIZ\": \"bilbo\"},\n\t\t\"mounts\": {\n        \"/tmp\": {\"kind\": \"tmp\"},\n        \"/tmp/foo/bar\": {\"kind\": \"collection\", \"portable_data_hash\": \"a0def87f80dd594d4675809e83bd4f15+367\", \"path\":\"/file2_in_main.txt\"},\n        \"/tmp/foo/sub1\": {\"kind\": \"collection\", \"portable_data_hash\": \"a0def87f80dd594d4675809e83bd4f15+367\", \"path\":\"/subdir1\"},\n        \"/tmp/foo/sub1file2\": {\"kind\": \"collection\", \"portable_data_hash\": \"a0def87f80dd594d4675809e83bd4f15+367\", \"path\":\"/subdir1/file2_in_subdir1.txt\"},\n        \"/tmp/foo/baz/sub2file2\": {\"kind\": \"collection\", \"portable_data_hash\": \"a0def87f80dd594d4675809e83bd4f15+367\", \"path\":\"/subdir1/subdir2/file2_in_subdir2.txt\"},\n        \"stdout\": {\"kind\": \"file\", \"path\": \"/tmp/a/b/c.out\"}\n    },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\",\n\t\t\"uuid\": \"zzzzz-dz642-202301130848001\"\n\t}`\n\n\textraMounts := []string{\n\t\t\"a0def87f80dd594d4675809e83bd4f15+367/file2_in_main.txt\",\n\t\t\"a0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt\",\n\t\t\"a0def87f80dd594d4675809e83bd4f15+367/subdir1/subdir2/file2_in_subdir2.txt\",\n\t}\n\n\tapi, _, realtemp := s.fullRunHelper(c, helperRecord, extraMounts, func() int {\n\t\tfmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env[\"FROBIZ\"])\n\t\treturn 0\n\t})\n\n\tc.Check(s.executor.created.BindMounts, DeepEquals, map[string]bindmount{\n\t\t\"/tmp\":                   {realtemp + \"/tmp1\", false},\n\t\t\"/tmp/foo/bar\":           {s.keepmount + \"/by_id/a0def87f80dd594d4675809e83bd4f15+367/file2_in_main.txt\", true},\n\t\t\"/tmp/foo/baz/sub2file2\": {s.keepmount + \"/by_id/a0def87f80dd594d4675809e83bd4f15+367/subdir1/subdir2/file2_in_subdir2.txt\", true},\n\t\t\"/tmp/foo/sub1\":          {s.keepmount + \"/by_id/a0def87f80dd594d4675809e83bd4f15+367/subdir1\", true},\n\t\t\"/tmp/foo/sub1file2\":     {s.keepmount + \"/by_id/a0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt\", true},\n\t})\n\n\tc.Check(api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\toutput_count := uint(0)\n\tfor _, v := range s.runner.ContainerArvClient.(*ArvTestClient).Content {\n\t\tif v[\"collection\"] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tcollection := v[\"collection\"].(arvadosclient.Dict)\n\t\tif collection[\"name\"].(string) != \"output for zzzzz-dz642-202301130848001\" {\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(v[\"ensure_unique_name\"], Equals, true)\n\t\tc.Check(collection[\"manifest_text\"].(string), Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\n./foo 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 9:18:bar 36:18:sub1file2\n./foo/baz 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 9:18:sub2file2\n./foo/sub1 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396cabcdefghij6419876543234@569fa8c4 0:9:file1_in_subdir1.txt 9:18:file2_in_subdir1.txt\n./foo/sub1/subdir2 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0bcdefghijk544332211@569fa8c5 0:9:file1_in_subdir2.txt 9:18:file2_in_subdir2.txt\n`)\n\t\toutput_count++\n\t}\n\tc.Check(output_count, Not(Equals), uint(0))\n}\n\nfunc (s *TestSuite) TestStdoutWithMountPointsUnderOutputDirDenormalizedManifest(c *C) {\n\thelperRecord := `{\n\t\t\"command\": [\"/bin/sh\", \"-c\", \"echo $FROBIZ\"],\n\t\t\"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n\t\t\"cwd\": \"/bin\",\n\t\t\"environment\": {\"FROBIZ\": \"bilbo\"},\n\t\t\"mounts\": {\n        \"/tmp\": {\"kind\": \"tmp\"},\n        \"/tmp/foo/bar\": {\"kind\": \"collection\", \"portable_data_hash\": \"b0def87f80dd594d4675809e83bd4f15+367\", \"path\": \"/subdir1/file2_in_subdir1.txt\"},\n        \"stdout\": {\"kind\": \"file\", \"path\": \"/tmp/a/b/c.out\"}\n    },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\",\n\t\t\"uuid\": \"zzzzz-dz642-202301130848002\"\n\t}`\n\n\textraMounts := []string{\n\t\t\"b0def87f80dd594d4675809e83bd4f15+367/subdir1/file2_in_subdir1.txt\",\n\t}\n\n\ts.fullRunHelper(c, helperRecord, extraMounts, func() int {\n\t\tfmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env[\"FROBIZ\"])\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\toutput_count := uint(0)\n\tfor _, v := range s.runner.ContainerArvClient.(*ArvTestClient).Content {\n\t\tif v[\"collection\"] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tcollection := v[\"collection\"].(arvadosclient.Dict)\n\t\tif collection[\"name\"].(string) != \"output for zzzzz-dz642-202301130848002\" {\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(collection[\"manifest_text\"].(string), Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\n./foo 3e426d509afffb85e06c4c96a7c15e91+27+Aa124ac75e5168396c73c0abcdefgh11234567890@569fa8c3 10:17:bar\n`)\n\t\toutput_count++\n\t}\n\tc.Check(output_count, Not(Equals), uint(0))\n}\n\nfunc (s *TestSuite) TestOutputError(c *C) {\n\thelperRecord := `{\n\t\t\"command\": [\"/bin/sh\", \"-c\", \"echo $FROBIZ\"],\n\t\t\"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n\t\t\"cwd\": \"/bin\",\n\t\t\"environment\": {\"FROBIZ\": \"bilbo\"},\n\t\t\"mounts\": {\n\t\t\t\"/tmp\": {\"kind\": \"tmp\"}\n\t\t},\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`\n\ts.fullRunHelper(c, helperRecord, nil, func() int {\n\t\tos.Symlink(\"/etc/hosts\", s.runner.HostOutputDir+\"/baz\")\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.state\", \"Cancelled\"), NotNil)\n}\n\nfunc (s *TestSuite) TestStdinCollectionMountPoint(c *C) {\n\thelperRecord := `{\n\t\t\"command\": [\"/bin/sh\", \"-c\", \"echo $FROBIZ\"],\n\t\t\"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n\t\t\"cwd\": \"/bin\",\n\t\t\"environment\": {\"FROBIZ\": \"bilbo\"},\n\t\t\"mounts\": {\n        \"/tmp\": {\"kind\": \"tmp\"},\n        \"stdin\": {\"kind\": \"collection\", \"portable_data_hash\": \"b0def87f80dd594d4675809e83bd4f15+367\", \"path\": \"/file1_in_main.txt\"},\n        \"stdout\": {\"kind\": \"file\", \"path\": \"/tmp/a/b/c.out\"}\n    },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`\n\n\textraMounts := []string{\n\t\t\"b0def87f80dd594d4675809e83bd4f15+367/file1_in_main.txt\",\n\t}\n\n\tapi, _, _ := s.fullRunHelper(c, helperRecord, extraMounts, func() int {\n\t\tfmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env[\"FROBIZ\"])\n\t\treturn 0\n\t})\n\n\tc.Check(api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tfor _, v := range api.Content {\n\t\tif v[\"collection\"] != nil {\n\t\t\tcollection := v[\"collection\"].(arvadosclient.Dict)\n\t\t\tif strings.Index(collection[\"name\"].(string), \"output\") == 0 {\n\t\t\t\tmanifest := collection[\"manifest_text\"].(string)\n\t\t\t\tc.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\n`)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *TestSuite) TestStdinJsonMountPoint(c *C) {\n\thelperRecord := `{\n\t\t\"command\": [\"/bin/sh\", \"-c\", \"echo $FROBIZ\"],\n\t\t\"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n\t\t\"cwd\": \"/bin\",\n\t\t\"environment\": {\"FROBIZ\": \"bilbo\"},\n\t\t\"mounts\": {\n        \"/tmp\": {\"kind\": \"tmp\"},\n        \"stdin\": {\"kind\": \"json\", \"content\": \"foo\"},\n        \"stdout\": {\"kind\": \"file\", \"path\": \"/tmp/a/b/c.out\"}\n    },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`\n\n\tapi, _, _ := s.fullRunHelper(c, helperRecord, nil, func() int {\n\t\tfmt.Fprintln(s.executor.created.Stdout, s.executor.created.Env[\"FROBIZ\"])\n\t\treturn 0\n\t})\n\n\tc.Check(api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tfor _, v := range api.Content {\n\t\tif v[\"collection\"] != nil {\n\t\t\tcollection := v[\"collection\"].(arvadosclient.Dict)\n\t\t\tif strings.Index(collection[\"name\"].(string), \"output\") == 0 {\n\t\t\t\tmanifest := collection[\"manifest_text\"].(string)\n\t\t\t\tc.Check(manifest, Equals, `./a/b 307372fa8fd5c146b22ae7a45b49bc31+6 0:6:c.out\n`)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *TestSuite) TestStderrMount(c *C) {\n\tapi, cr, _ := s.fullRunHelper(c, `{\n    \"command\": [\"/bin/sh\", \"-c\", \"echo hello;exit 1\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \".\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"},\n               \"stdout\": {\"kind\": \"file\", \"path\": \"/tmp/a/out.txt\"},\n               \"stderr\": {\"kind\": \"file\", \"path\": \"/tmp/b/err.txt\"}},\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"state\": \"Locked\"\n}`, nil, func() int {\n\t\tfmt.Fprintln(s.executor.created.Stdout, \"hello\")\n\t\tfmt.Fprintln(s.executor.created.Stderr, \"oops\")\n\t\treturn 1\n\t})\n\n\tfinal := api.CalledWith(\"container.state\", \"Complete\")\n\tc.Assert(final, NotNil)\n\tc.Check(final[\"container\"].(arvadosclient.Dict)[\"exit_code\"], Equals, 1)\n\tc.Check(final[\"container\"].(arvadosclient.Dict)[\"log\"], NotNil)\n\n\tc.Check(cr.ContainerArvClient.(*ArvTestClient).CalledWith(\"collection.manifest_text\", \"./a b1946ac92492d2347c6235b4d2611184+6 0:6:out.txt\\n./b 38af5c54926b620264ab1501150cf189+5 0:5:err.txt\\n\"), NotNil)\n}\n\nfunc (s *TestSuite) TestNumberRoundTrip(c *C) {\n\ts.api.callraw = true\n\terr := s.runner.fetchContainerRecord()\n\tc.Assert(err, IsNil)\n\tjsondata, err := json.Marshal(s.runner.Container.Mounts[\"/json\"].Content)\n\tc.Logf(\"%#v\", s.runner.Container)\n\tc.Check(err, IsNil)\n\tc.Check(string(jsondata), Equals, `{\"number\":123456789123456789}`)\n}\n\nfunc (s *TestSuite) TestFullBrokenDocker(c *C) {\n\tnextState := \"\"\n\tfor _, setup := range []func(){\n\t\tfunc() {\n\t\t\tc.Log(\"// waitErr = ocl runtime error\")\n\t\t\ts.executor.waitErr = errors.New(`Error response from daemon: oci runtime error: container_linux.go:247: starting container process caused \"process_linux.go:359: container init caused \\\"rootfs_linux.go:54: mounting \\\\\\\"/tmp/keep453790790/by_id/99999999999999999999999999999999+99999/myGenome\\\\\\\" to rootfs \\\\\\\"/tmp/docker/overlay2/9999999999999999999999999999999999999999999999999999999999999999/merged\\\\\\\" at \\\\\\\"/tmp/docker/overlay2/9999999999999999999999999999999999999999999999999999999999999999/merged/keep/99999999999999999999999999999999+99999/myGenome\\\\\\\" caused \\\\\\\"no such file or directory\\\\\\\"\\\"\"`)\n\t\t\tnextState = \"Cancelled\"\n\t\t},\n\t\tfunc() {\n\t\t\tc.Log(\"// loadErr = cannot connect\")\n\t\t\ts.executor.loadErr = errors.New(\"Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?\")\n\t\t\ts.runner.brokenNodeHook = c.MkDir() + \"/broken-node-hook\"\n\t\t\terr := ioutil.WriteFile(s.runner.brokenNodeHook, []byte(\"#!/bin/sh\\nexec echo killme\\n\"), 0700)\n\t\t\tc.Assert(err, IsNil)\n\t\t\tnextState = \"Queued\"\n\t\t},\n\t} {\n\t\ts.SetUpTest(c)\n\t\tsetup()\n\t\ts.fullRunHelper(c, `{\n    \"command\": [\"echo\", \"hello world\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \".\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"state\": \"Locked\"\n}`, nil, func() int { return 0 })\n\t\tc.Check(s.api.CalledWith(\"container.state\", nextState), NotNil)\n\t\tlogs := logFileContent(c, s.runner, \"crunch-run.txt\")\n\t\tc.Check(logs, Matches, \"(?ms).*unable to run containers.*\")\n\t\tif s.runner.brokenNodeHook != \"\" {\n\t\t\tc.Check(logs, Matches, \"(?ms).*Running broken node hook.*\")\n\t\t\tc.Check(logs, Matches, \"(?ms).*killme.*\")\n\t\t\tc.Check(logs, Not(Matches), \"(?ms).*Writing /var/lock/crunch-run-broken to mark node as broken.*\")\n\t\t} else {\n\t\t\tc.Check(logs, Matches, \"(?ms).*Writing /var/lock/crunch-run-broken to mark node as broken.*\")\n\t\t}\n\t}\n}\n\nfunc (s *TestSuite) TestBadCommand(c *C) {\n\tfor _, startError := range []string{\n\t\t`panic: standard_init_linux.go:175: exec user process caused \"no such file or directory\"`,\n\t\t`Error response from daemon: Cannot start container 41f26cbc43bcc1280f4323efb1830a394ba8660c9d1c2b564ba42bf7f7694845: [8] System error: no such file or directory`,\n\t\t`Error response from daemon: Cannot start container 58099cd76c834f3dc2a4fb76c8028f049ae6d4fdf0ec373e1f2cfea030670c2d: [8] System error: exec: \"foobar\": executable file not found in $PATH`,\n\t} {\n\t\ts.SetUpTest(c)\n\t\ts.executor.startErr = errors.New(startError)\n\t\ts.fullRunHelper(c, `{\n    \"command\": [\"echo\", \"hello world\"],\n    \"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n    \"cwd\": \".\",\n    \"environment\": {},\n    \"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n    \"output_path\": \"/tmp\",\n    \"priority\": 1,\n    \"runtime_constraints\": {},\n    \"state\": \"Locked\"\n}`, nil, func() int { return 0 })\n\t\tc.Check(s.api.CalledWith(\"container.state\", \"Cancelled\"), NotNil)\n\t\tc.Check(logFileContent(c, s.runner, \"crunch-run.txt\"), Matches, \"(?ms).*Possible causes:.*is missing.*\")\n\t}\n}\n\nfunc (s *TestSuite) TestSecretTextMountPoint(c *C) {\n\thelperRecord := `{\n\t\t\"command\": [\"true\"],\n\t\t\"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n\t\t\"cwd\": \"/bin\",\n\t\t\"mounts\": {\n                    \"/tmp\": {\"kind\": \"tmp\"},\n                    \"/tmp/secret.conf\": {\"kind\": \"text\", \"content\": \"mypassword\"}\n                },\n                \"secret_mounts\": {\n                },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`\n\n\ts.fullRunHelper(c, helperRecord, nil, func() int {\n\t\tcontent, err := ioutil.ReadFile(s.runner.HostOutputDir + \"/secret.conf\")\n\t\tc.Check(err, IsNil)\n\t\tc.Check(string(content), Equals, \"mypassword\")\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(s.runner.ContainerArvClient.(*ArvTestClient).CalledWith(\"collection.manifest_text\", \". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\\n\"), NotNil)\n\tc.Check(s.runner.ContainerArvClient.(*ArvTestClient).CalledWith(\"collection.manifest_text\", \"\"), IsNil)\n\n\t// under secret mounts, not captured in output\n\thelperRecord = `{\n\t\t\"command\": [\"true\"],\n\t\t\"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n\t\t\"cwd\": \"/bin\",\n\t\t\"mounts\": {\n                    \"/tmp\": {\"kind\": \"tmp\"}\n                },\n                \"secret_mounts\": {\n                    \"/tmp/secret.conf\": {\"kind\": \"text\", \"content\": \"mypassword\"}\n                },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`\n\n\ts.SetUpTest(c)\n\ts.fullRunHelper(c, helperRecord, nil, func() int {\n\t\tcontent, err := ioutil.ReadFile(s.runner.HostOutputDir + \"/secret.conf\")\n\t\tc.Check(err, IsNil)\n\t\tc.Check(string(content), Equals, \"mypassword\")\n\t\treturn 0\n\t})\n\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(s.runner.ContainerArvClient.(*ArvTestClient).CalledWith(\"collection.manifest_text\", \". 34819d7beeabb9260a5c854bc85b3e44+10 0:10:secret.conf\\n\"), IsNil)\n\tc.Check(s.runner.ContainerArvClient.(*ArvTestClient).CalledWith(\"collection.manifest_text\", \"\"), NotNil)\n\n\t// under secret mounts, output dir is a collection, not captured in output\n\thelperRecord = `{\n\t\t\"command\": [\"true\"],\n\t\t\"container_image\": \"` + arvadostest.DockerImage112PDH + `\",\n\t\t\"cwd\": \"/bin\",\n\t\t\"mounts\": {\n                    \"/tmp\": {\"kind\": \"collection\", \"writable\": true}\n                },\n                \"secret_mounts\": {\n                    \"/tmp/secret.conf\": {\"kind\": \"text\", \"content\": \"mypassword\"}\n                },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\"\n\t}`\n\n\ts.SetUpTest(c)\n\t_, _, realtemp := s.fullRunHelper(c, helperRecord, nil, func() int {\n\t\t// secret.conf should be provisioned as a separate\n\t\t// bind mount, i.e., it should not appear in the\n\t\t// (fake) fuse filesystem as viewed from the host.\n\t\tcontent, err := ioutil.ReadFile(s.runner.HostOutputDir + \"/secret.conf\")\n\t\tif !c.Check(errors.Is(err, os.ErrNotExist), Equals, true) {\n\t\t\tc.Logf(\"secret.conf: content %q, err %#v\", content, err)\n\t\t}\n\t\terr = ioutil.WriteFile(s.runner.HostOutputDir+\"/.arvados#collection\", []byte(`{\"manifest_text\":\". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\\n\"}`), 0700)\n\t\tc.Check(err, IsNil)\n\t\treturn 0\n\t})\n\n\tcontent, err := ioutil.ReadFile(realtemp + \"/text1/mountdata.text\")\n\tc.Check(err, IsNil)\n\tc.Check(string(content), Equals, \"mypassword\")\n\tc.Check(s.executor.created.BindMounts[\"/tmp/secret.conf\"], DeepEquals, bindmount{realtemp + \"/text1/mountdata.text\", true})\n\tc.Check(s.api.CalledWith(\"container.exit_code\", 0), NotNil)\n\tc.Check(s.api.CalledWith(\"container.state\", \"Complete\"), NotNil)\n\tc.Check(s.runner.ContainerArvClient.(*ArvTestClient).CalledWith(\"collection.manifest_text\", \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\\n\"), NotNil)\n}\n\nfunc (s *TestSuite) TestCalculateCost(c *C) {\n\tdefer func(s string) { lockdir = s }(lockdir)\n\tlockdir = c.MkDir()\n\tnow := time.Now()\n\tcr := s.runner\n\tcr.costStartTime = now.Add(-time.Hour)\n\tvar logbuf bytes.Buffer\n\tcr.CrunchLog = newLogWriter(&logbuf)\n\n\t// if there's no InstanceType env var, cost is calculated as 0\n\tos.Unsetenv(\"InstanceType\")\n\tcost := cr.calculateCost(now)\n\tc.Check(cost, Equals, 0.0)\n\n\t// with InstanceType env var and loadPrices() hasn't run (or\n\t// hasn't found any data), cost is calculated based on\n\t// InstanceType env var\n\tos.Setenv(\"InstanceType\", `{\"Price\":1.2}`)\n\tcost = cr.calculateCost(now)\n\tc.Check(cost, Equals, 1.2)\n\n\t// first update tells us the spot price was $1/h until 30\n\t// minutes ago when it increased to $2/h\n\tj, err := json.Marshal([]cloud.InstancePrice{\n\t\t{StartTime: now.Add(-4 * time.Hour), Price: 1.0},\n\t\t{StartTime: now.Add(-time.Hour / 2), Price: 2.0},\n\t})\n\tc.Assert(err, IsNil)\n\tos.WriteFile(lockdir+\"/\"+pricesfile, j, 0777)\n\tcr.loadPrices()\n\tcost = cr.calculateCost(now)\n\tc.Check(cost, Equals, 1.5)\n\n\t// next update (via --list + SIGUSR2) tells us the spot price\n\t// increased to $3/h 15 minutes ago\n\tj, err = json.Marshal([]cloud.InstancePrice{\n\t\t{StartTime: now.Add(-time.Hour / 3), Price: 2.0}, // dup of -time.Hour/2 price\n\t\t{StartTime: now.Add(-time.Hour / 4), Price: 3.0},\n\t})\n\tc.Assert(err, IsNil)\n\tos.WriteFile(lockdir+\"/\"+pricesfile, j, 0777)\n\tcr.loadPrices()\n\tcost = cr.calculateCost(now)\n\tc.Check(cost, Equals, 1.0/2+2.0/4+3.0/4)\n\n\tcost = cr.calculateCost(now.Add(-time.Hour / 2))\n\tc.Check(cost, Equals, 0.5)\n\n\tc.Logf(\"%s\", logbuf.String())\n\tc.Check(logbuf.String(), Matches, `(?ms).*Instance price changed to 1\\.00 at 20.* changed to 2\\.00 .* changed to 3\\.00 .*`)\n\tc.Check(logbuf.String(), Not(Matches), `(?ms).*changed to 2\\.00 .* changed to 2\\.00 .*`)\n}\n\nfunc (s *TestSuite) TestSIGUSR2CostUpdate(c *C) {\n\tpid := os.Getpid()\n\tnow := time.Now()\n\tpricesJSON, err := json.Marshal([]cloud.InstancePrice{\n\t\t{StartTime: now.Add(-4 * time.Hour), Price: 2.4},\n\t\t{StartTime: now.Add(-2 * time.Hour), Price: 2.6},\n\t})\n\tc.Assert(err, IsNil)\n\n\tos.Setenv(\"InstanceType\", `{\"Price\":2.2}`)\n\tdefer func(s string) { lockdir = s }(lockdir)\n\tlockdir = c.MkDir()\n\n\t// We can't use s.api.CalledWith because timing differences will yield\n\t// different cost values across runs. getCostUpdate iterates over API\n\t// calls until it finds one that sets the cost, then writes that value\n\t// to the next index of costUpdates.\n\tdeadline := now.Add(time.Second)\n\tcostUpdates := make([]float64, 2)\n\tcostIndex := 0\n\tapiIndex := 0\n\tgetCostUpdate := func() {\n\t\tfor ; time.Now().Before(deadline); time.Sleep(time.Second / 10) {\n\t\t\tfor apiIndex < len(s.api.Content) {\n\t\t\t\tupdate := s.api.Content[apiIndex]\n\t\t\t\tapiIndex++\n\t\t\t\tvar ok bool\n\t\t\t\tvar cost float64\n\t\t\t\tif update, ok = update[\"container\"].(arvadosclient.Dict); !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif cost, ok = update[\"cost\"].(float64); !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tc.Logf(\"API call #%d updates cost to %v\", apiIndex-1, cost)\n\t\t\t\tcostUpdates[costIndex] = cost\n\t\t\t\tcostIndex++\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\ts.fullRunHelper(c, `{\n\t\t\"command\": [\"true\"],\n\t\t\"container_image\": \"`+arvadostest.DockerImage112PDH+`\",\n\t\t\"cwd\": \".\",\n\t\t\"environment\": {},\n\t\t\"mounts\": {\"/tmp\": {\"kind\": \"tmp\"} },\n\t\t\"output_path\": \"/tmp\",\n\t\t\"priority\": 1,\n\t\t\"runtime_constraints\": {},\n\t\t\"state\": \"Locked\",\n\t\t\"uuid\": \"zzzzz-dz642-20230320101530a\"\n\t}`, nil, func() int {\n\t\ts.runner.costStartTime = now.Add(-3 * time.Hour)\n\t\terr := syscall.Kill(pid, syscall.SIGUSR2)\n\t\tc.Check(err, IsNil, Commentf(\"error sending first SIGUSR2 to runner\"))\n\t\tgetCostUpdate()\n\n\t\terr = os.WriteFile(path.Join(lockdir, pricesfile), pricesJSON, 0o700)\n\t\tc.Check(err, IsNil, Commentf(\"error writing JSON prices file\"))\n\t\terr = syscall.Kill(pid, syscall.SIGUSR2)\n\t\tc.Check(err, IsNil, Commentf(\"error sending second SIGUSR2 to runner\"))\n\t\tgetCostUpdate()\n\n\t\treturn 0\n\t})\n\t// Comparing with format strings makes it easy to ignore minor variations\n\t// in cost across runs while keeping diagnostics pretty.\n\tc.Check(fmt.Sprintf(\"%.3f\", costUpdates[0]), Equals, \"6.600\")\n\tc.Check(fmt.Sprintf(\"%.3f\", costUpdates[1]), Equals, \"7.600\")\n}\n\nfunc (s *TestSuite) TestLocalKeepstoreAddr(c *C) {\n\tc.Check(localKeepstoreAddr(nil), Equals, \"0.0.0.0\")\n\n\t// Choose * if there are no non-loopback addrs\n\tc.Check(localKeepstoreAddr(map[string]bool{\n\t\t\"127.0.0.1\": true,\n\t\t\"127.0.0.2\": true,\n\t}), Equals, \"0.0.0.0\")\n\n\t// Choose routable over loopback\n\tc.Check(localKeepstoreAddr(map[string]bool{\n\t\t\"127.0.0.1\": true,\n\t\t\"1.2.3.4\":   true,\n\t}), Equals, \"1.2.3.4\")\n\n\t// Choose routable over loopback (even if loopback address\n\t// sorts first numerically)\n\tc.Check(localKeepstoreAddr(map[string]bool{\n\t\t\"127.0.0.1\": true,\n\t\t\"201.2.3.4\": true,\n\t}), Equals, \"201.2.3.4\")\n\n\t// Choose routable over loopback and CGNAT\n\tc.Check(localKeepstoreAddr(map[string]bool{\n\t\t\"127.0.0.1\":  true,\n\t\t\"1.2.3.4\":    true,\n\t\t\"100.64.0.1\": true,\n\t}), Equals, \"1.2.3.4\")\n\n\t// Choose routable over loopback and CGNAT (even if CGNAT\n\t// address sorts first numerically)\n\tc.Check(localKeepstoreAddr(map[string]bool{\n\t\t\"127.0.0.1\":   true,\n\t\t\"192.168.0.1\": true,\n\t\t\"100.64.0.1\":  true,\n\t}), Equals, \"192.168.0.1\")\n\n\t// Choose CGNAT only if there are no routable addrs\n\tc.Check(localKeepstoreAddr(map[string]bool{\n\t\t\"127.0.0.1\":  true,\n\t\t\"100.64.0.1\": true,\n\t}), Equals, \"100.64.0.1\")\n}\n\nfunc logFileContent(c *C, cr *ContainerRunner, fnm string) string {\n\tbuf, err := fs.ReadFile(arvados.FS(cr.LogCollection), fnm)\n\tc.Assert(err, IsNil)\n\treturn string(buf)\n}\n\nfunc dumpAllLogFiles(c *C, cr *ContainerRunner) {\n\td, err := cr.LogCollection.OpenFile(\"/\", os.O_RDONLY, 0)\n\tc.Assert(err, IsNil)\n\tfis, err := d.Readdir(-1)\n\tc.Assert(err, IsNil)\n\tfor _, fi := range fis {\n\t\tc.Logf(\"=== %s\", fi.Name())\n\t\tc.Log(logFileContent(c, cr, fi.Name()))\n\t}\n}\n\nfunc (s *TestSuite) TestCommand_List(c *C) {\n\terr := os.Remove(path.Join(lockdir, brokenfile))\n\tif !os.IsNotExist(err) {\n\t\tc.Assert(err, IsNil)\n\t}\n\tdeadlockreader, _ := io.Pipe()\n\tvar stdout, stderr bytes.Buffer\n\tcode := Command.RunCommand(\"crunch-run\", []string{\"--list\"}, deadlockreader, &stdout, &stderr)\n\tc.Check(code, Equals, 0)\n\tc.Check(stdout.String(), Equals, \"\")\n\tc.Check(stderr.String(), Equals, \"\")\n}\n"
  },
  {
    "path": "lib/crunchrun/cuda.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os/exec\"\n)\n\n// nvidiaModprobe makes sure all the nvidia kernel modules and devices\n// are set up.  If we don't have all the modules/devices set up we get\n// \"CUDA_ERROR_UNKNOWN\".\nfunc nvidiaModprobe(writer io.Writer) {\n\t// The underlying problem is that when normally running\n\t// directly on the host, the CUDA SDK will automatically\n\t// detect and set up the devices on demand.  However, when\n\t// running inside a container, it lacks sufficient permissions\n\t// to do that.  So, it needs to be set up before the container\n\t// can be started.\n\t//\n\t// The Singularity documentation hints about this but isn't\n\t// very helpful with a solution.\n\t// https://sylabs.io/guides/3.7/user-guide/gpu.html#cuda-error-unknown-when-everything-seems-to-be-correctly-configured\n\t//\n\t// If we're running \"nvidia-persistenced\", it sets up most of\n\t// these things on system boot.\n\t//\n\t// However, it seems that doesn't include /dev/nvidia-uvm\n\t// We're also no guaranteed to be running\n\t// \"nvidia-persistenced\" or otherwise have the devices set up\n\t// for us.  So the most robust solution is to do it ourselves.\n\t//\n\t// These are idempotent operations so it is harmless in the\n\t// case that everything was actually already set up.\n\n\t// Running nvida-smi the first time loads the core 'nvidia'\n\t// kernel module creates /dev/nvidiactl the per-GPU\n\t// /dev/nvidia* devices\n\tnvidiaSmi := exec.Command(\"nvidia-smi\", \"-L\")\n\tnvidiaSmi.Stdout = writer\n\tnvidiaSmi.Stderr = writer\n\terr := nvidiaSmi.Run()\n\tif err != nil {\n\t\tfmt.Fprintf(writer, \"Warning %v: %v\\n\", nvidiaSmi.Args, err)\n\t}\n\n\t// Load the kernel modules & devices associated with\n\t// /dev/nvidia-modeset, /dev/nvidia-nvlink, /dev/nvidia-uvm\n\t// and /dev/nvidia-uvm-tools (-m, -l and -u).  Annoyingly,\n\t// these don't have multiple devices but you need to supply\n\t// \"-c0\" anyway or it won't make the device file.\n\n\t// Nvswitch devices are multi-GPU interconnects for up to 16\n\t// GPUs.  The \"-c0 -s\" flag will create /dev/nvidia-nvswitch0.\n\t// If someone runs Arvados on a system with multiple\n\t// nvswitches (i.e. more than 16 GPUs) they'll have to ensure\n\t// that all the /dev/nvidia-nvswitch* devices exist before\n\t// crunch-run starts.\n\tfor _, opt := range []string{\"-m\", \"-l\", \"-u\", \"-s\"} {\n\t\tnvmodprobe := exec.Command(\"nvidia-modprobe\", \"-c0\", opt)\n\t\tnvmodprobe.Stdout = writer\n\t\tnvmodprobe.Stderr = writer\n\t\terr = nvmodprobe.Run()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(writer, \"Warning %v: %v\\n\", nvmodprobe.Args, err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/crunchrun/docker.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\npackage crunchrun\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\tdockercontainer \"github.com/docker/docker/api/types/container\"\n\tdockerclient \"github.com/docker/docker/client\"\n\t\"github.com/docker/docker/pkg/jsonmessage\"\n)\n\n// Docker daemon won't let you set a limit less than ~10 MiB\nconst minDockerRAM = int64(16 * 1024 * 1024)\n\n// DockerAPIVersion is the API version we use to communicate with the\n// docker service. We were standardized on Docker 28 when 3.2.0 released, so\n// we use its corresponding API version. See\n// https://docs.docker.com/reference/api/engine/.\nconst DockerAPIVersion = \"1.48\"\n\n// Number of consecutive \"inspect container\" failures before\n// concluding Docker is unresponsive, giving up, and cancelling the\n// container.\nconst dockerWatchdogThreshold = 5\n\ntype dockerExecutor struct {\n\tcontainerUUID    string\n\tlogf             func(string, ...interface{})\n\twatchdogInterval time.Duration\n\tdockerclient     *dockerclient.Client\n\tcontainerID      string\n\tsavedIPAddress   atomic.Value\n\tdoneIO           chan struct{}\n\terrIO            error\n}\n\nfunc newDockerExecutor(containerUUID string, logf func(string, ...interface{}), watchdogInterval time.Duration) (*dockerExecutor, error) {\n\t// API version 1.21 corresponds to Docker 1.9, which is\n\t// currently the minimum version we want to support.\n\tclient, err := dockerclient.NewClient(dockerclient.DefaultDockerHost, DockerAPIVersion, nil, nil)\n\tif watchdogInterval < 1 {\n\t\twatchdogInterval = time.Minute * 2\n\t}\n\treturn &dockerExecutor{\n\t\tcontainerUUID:    containerUUID,\n\t\tlogf:             logf,\n\t\twatchdogInterval: watchdogInterval,\n\t\tdockerclient:     client,\n\t}, err\n}\n\nfunc (e *dockerExecutor) Runtime() string {\n\tv, _ := e.dockerclient.ServerVersion(context.Background())\n\tinfo := \"\"\n\tfor _, cv := range v.Components {\n\t\tif info != \"\" {\n\t\t\tinfo += \", \"\n\t\t}\n\t\tinfo += cv.Name + \" \" + cv.Version\n\t}\n\tif info == \"\" {\n\t\tinfo = \"(unknown version)\"\n\t}\n\treturn \"docker \" + info\n}\n\nfunc (e *dockerExecutor) LoadImage(imageID string, imageTarballPath string, container arvados.Container, arvMountPoint string,\n\tcontainerClient *arvados.Client) error {\n\t_, _, err := e.dockerclient.ImageInspectWithRaw(context.TODO(), imageID)\n\tif err == nil {\n\t\t// already loaded\n\t\treturn nil\n\t}\n\n\tf, err := os.Open(imageTarballPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tresp, err := e.dockerclient.ImageLoad(context.TODO(), f, true)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ImageLoad: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\tvar message jsonmessage.JSONMessage\n\terr = json.NewDecoder(resp.Body).Decode(&message)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not parse Docker response: %w\", err)\n\t}\n\tif message.Error != nil {\n\t\treturn fmt.Errorf(\"ImageLoad: %w\", message.Error)\n\t}\n\t// message.Stream is typically \"Loaded image: hello-world:latest\\n\"\n\te.logf(\"%s\", strings.TrimSuffix(message.Stream, \"\\n\"))\n\treturn nil\n}\n\nfunc (e *dockerExecutor) config(spec containerSpec) (dockercontainer.Config, dockercontainer.HostConfig) {\n\te.logf(\"Creating Docker container\")\n\tcfg := dockercontainer.Config{\n\t\tImage:        spec.Image,\n\t\tCmd:          spec.Command,\n\t\tWorkingDir:   spec.WorkingDir,\n\t\tVolumes:      map[string]struct{}{},\n\t\tOpenStdin:    spec.Stdin != nil,\n\t\tStdinOnce:    spec.Stdin != nil,\n\t\tAttachStdin:  spec.Stdin != nil,\n\t\tAttachStdout: true,\n\t\tAttachStderr: true,\n\t}\n\tif cfg.WorkingDir == \".\" {\n\t\tcfg.WorkingDir = \"\"\n\t}\n\tfor k, v := range spec.Env {\n\t\tcfg.Env = append(cfg.Env, k+\"=\"+v)\n\t}\n\tif spec.RAM > 0 && spec.RAM < minDockerRAM {\n\t\tspec.RAM = minDockerRAM\n\t}\n\thostCfg := dockercontainer.HostConfig{\n\t\tLogConfig: dockercontainer.LogConfig{\n\t\t\tType: \"none\",\n\t\t},\n\t\tNetworkMode: dockercontainer.NetworkMode(\"none\"),\n\t\tResources: dockercontainer.Resources{\n\t\t\tCgroupParent: spec.CgroupParent,\n\t\t\tNanoCPUs:     int64(spec.VCPUs) * 1000000000,\n\t\t\tMemory:       spec.RAM, // RAM\n\t\t\tMemorySwap:   spec.RAM, // RAM+swap\n\t\t\tKernelMemory: spec.RAM, // kernel portion\n\t\t},\n\t}\n\tif spec.GPUStack == \"cuda\" && spec.GPUDeviceCount > 0 {\n\t\tvar deviceIds []string\n\t\tif cudaVisibleDevices := os.Getenv(\"CUDA_VISIBLE_DEVICES\"); cudaVisibleDevices != \"\" {\n\t\t\t// If a resource manager such as slurm or LSF told\n\t\t\t// us to select specific devices we need to propagate that.\n\t\t\tdeviceIds = strings.Split(cudaVisibleDevices, \",\")\n\t\t}\n\n\t\tdeviceCount := spec.GPUDeviceCount\n\t\tif len(deviceIds) > 0 {\n\t\t\t// Docker won't accept both non-empty\n\t\t\t// DeviceIDs and a non-zero Count\n\t\t\t//\n\t\t\t// (it turns out \"Count\" is a dumb fallback\n\t\t\t// that just allocates device 0, 1, 2, ...,\n\t\t\t// Count-1)\n\t\t\tdeviceCount = 0\n\t\t}\n\n\t\t// Capabilities are confusing.  The driver has generic\n\t\t// capabilities \"gpu\" and \"nvidia\" but then there's\n\t\t// additional capabilities \"compute\" and \"utility\"\n\t\t// that are passed to nvidia-container-cli.\n\t\t//\n\t\t// \"compute\" means include the CUDA libraries and\n\t\t// \"utility\" means include the CUDA utility programs\n\t\t// (like nvidia-smi).\n\t\t//\n\t\t// https://github.com/moby/moby/blob/7b9275c0da707b030e62c96b679a976f31f929d3/daemon/nvidia_linux.go#L37\n\t\t// https://github.com/containerd/containerd/blob/main/contrib/nvidia/nvidia.go\n\t\thostCfg.Resources.DeviceRequests = append(hostCfg.Resources.DeviceRequests, dockercontainer.DeviceRequest{\n\t\t\tDriver:       \"nvidia\",\n\t\t\tCount:        deviceCount,\n\t\t\tDeviceIDs:    deviceIds,\n\t\t\tCapabilities: [][]string{[]string{\"gpu\", \"nvidia\", \"compute\", \"utility\"}},\n\t\t})\n\t}\n\tif spec.GPUStack == \"rocm\" && spec.GPUDeviceCount > 0 {\n\t\t// there's no container toolkit or builtin Docker\n\t\t// support for ROCm so we just provide the devices to\n\t\t// the container ourselves.\n\n\t\t// fortunately, the minimum version of this seems to be this:\n\t\t// rendergroup=$(getent group render | cut -d: -f3)\n\t\t// videogroup=$(getent group video | cut -d: -f3)\n\t\t// docker run -it --device=/dev/kfd --device=/dev/dri/renderD128 --user $(id -u) --group-add $videogroup --group-add $rendergroup \"$@\"\n\n\t\thostCfg.Devices = append(hostCfg.Devices, dockercontainer.DeviceMapping{\n\t\t\tPathInContainer:   \"/dev/kfd\",\n\t\t\tPathOnHost:        \"/dev/kfd\",\n\t\t\tCgroupPermissions: \"rwm\",\n\t\t})\n\t\tinfo, _ := os.Stat(\"/dev/kfd\")\n\t\tif stat, ok := info.Sys().(*syscall.Stat_t); ok {\n\t\t\t// Make sure the container has access\n\t\t\t// to the group id that allow it to\n\t\t\t// access the device.\n\t\t\thostCfg.GroupAdd = append(hostCfg.GroupAdd, fmt.Sprintf(\"%v\", stat.Gid))\n\t\t}\n\n\t\tvar deviceIndexes []int\n\t\tif amdVisibleDevices := os.Getenv(\"AMD_VISIBLE_DEVICES\"); amdVisibleDevices != \"\" {\n\t\t\t// If a resource manager/dispatcher told us to\n\t\t\t// select specific devices, so we need to\n\t\t\t// propagate that.\n\t\t\tfor _, dev := range strings.Split(amdVisibleDevices, \",\") {\n\t\t\t\tintDev, err := strconv.Atoi(dev)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdeviceIndexes = append(deviceIndexes, intDev)\n\t\t\t}\n\t\t} else {\n\t\t\t// Try every device, we'll check below to see\n\t\t\t// which ones actually exists.\n\t\t\tfor i := 0; i < 128; i++ {\n\t\t\t\tdeviceIndexes = append(deviceIndexes, i)\n\t\t\t}\n\t\t}\n\t\tfor _, intDev := range deviceIndexes {\n\t\t\tdevPath := fmt.Sprintf(\"/dev/dri/renderD%v\", 128+intDev)\n\t\t\tinfo, err := os.Stat(devPath)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\thostCfg.Devices = append(hostCfg.Devices, dockercontainer.DeviceMapping{\n\t\t\t\tPathInContainer:   devPath,\n\t\t\t\tPathOnHost:        devPath,\n\t\t\t\tCgroupPermissions: \"rwm\",\n\t\t\t})\n\t\t\tif stat, ok := info.Sys().(*syscall.Stat_t); ok {\n\t\t\t\t// Make sure the container has access\n\t\t\t\t// to the group id that allow it to\n\t\t\t\t// access the device.\n\t\t\t\tif !slices.Contains(hostCfg.GroupAdd, fmt.Sprintf(\"%v\", stat.Gid)) {\n\t\t\t\t\thostCfg.GroupAdd = append(hostCfg.GroupAdd, fmt.Sprintf(\"%v\", stat.Gid))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor path, mount := range spec.BindMounts {\n\t\tbind := mount.HostPath + \":\" + path\n\t\tif mount.ReadOnly {\n\t\t\tbind += \":ro\"\n\t\t}\n\t\thostCfg.Binds = append(hostCfg.Binds, bind)\n\t}\n\tif spec.EnableNetwork {\n\t\thostCfg.NetworkMode = dockercontainer.NetworkMode(spec.NetworkMode)\n\t}\n\treturn cfg, hostCfg\n}\n\nfunc (e *dockerExecutor) Create(spec containerSpec) error {\n\tcfg, hostCfg := e.config(spec)\n\tcreated, err := e.dockerclient.ContainerCreate(context.TODO(), &cfg, &hostCfg, nil, nil, e.containerUUID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"While creating container: %v\", err)\n\t}\n\te.containerID = created.ID\n\treturn e.startIO(spec.Stdin, spec.Stdout, spec.Stderr)\n}\n\nfunc (e *dockerExecutor) Pid() int {\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second))\n\tdefer cancel()\n\tctr, err := e.dockerclient.ContainerInspect(ctx, e.containerID)\n\tif err == nil && ctr.State != nil {\n\t\treturn ctr.State.Pid\n\t} else {\n\t\treturn 0\n\t}\n}\n\nfunc (e *dockerExecutor) Start() error {\n\treturn e.dockerclient.ContainerStart(context.TODO(), e.containerID, dockercontainer.StartOptions{})\n}\n\nfunc (e *dockerExecutor) Stop() error {\n\terr := e.dockerclient.ContainerRemove(context.TODO(), e.containerID, dockercontainer.RemoveOptions{Force: true})\n\tif err != nil && strings.Contains(err.Error(), \"No such container: \"+e.containerID) {\n\t\terr = nil\n\t}\n\treturn err\n}\n\n// Wait for the container to terminate, capture the exit code, and\n// wait for stdout/stderr logging to finish.\nfunc (e *dockerExecutor) Wait(ctx context.Context) (int, error) {\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\twatchdogErr := make(chan error, 1)\n\tgo func() {\n\t\tticker := time.NewTicker(e.watchdogInterval)\n\t\tdefer ticker.Stop()\n\t\tfor range ticker.C {\n\t\t\tdctx, dcancel := context.WithDeadline(ctx, time.Now().Add(e.watchdogInterval))\n\t\t\tctr, err := e.dockerclient.ContainerInspect(dctx, e.containerID)\n\t\t\tdcancel()\n\t\t\tif ctx.Err() != nil {\n\t\t\t\t// Either the container already\n\t\t\t\t// exited, or our caller is trying to\n\t\t\t\t// kill it.\n\t\t\t\treturn\n\t\t\t} else if err != nil {\n\t\t\t\twatchdogErr <- fmt.Errorf(\"error inspecting container: %s\", err)\n\t\t\t} else if ctr.State == nil || !(ctr.State.Running || ctr.State.Status == \"created\") {\n\t\t\t\twatchdogErr <- fmt.Errorf(\"container is not running: State=%v\", ctr.State)\n\t\t\t} else {\n\t\t\t\twatchdogErr <- nil\n\t\t\t}\n\t\t}\n\t}()\n\n\twaitOk, waitErr := e.dockerclient.ContainerWait(ctx, e.containerID, dockercontainer.WaitConditionNotRunning)\n\terrors := 0\n\tfor {\n\t\tselect {\n\t\tcase waitBody := <-waitOk:\n\t\t\t// wait for stdout/stderr to complete\n\t\t\tselect {\n\t\t\tcase <-e.doneIO:\n\t\t\t\treturn int(waitBody.StatusCode), nil\n\t\t\tcase <-time.After(5 * time.Minute):\n\t\t\t\treturn -1, fmt.Errorf(\"container finished, but stdout/stderr did not complete: timed out\")\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn -1, fmt.Errorf(\"container finished, but stdout/stderr did not complete: %w\", ctx.Err())\n\t\t\t}\n\t\tcase err := <-waitErr:\n\t\t\treturn -1, fmt.Errorf(\"container wait: %v\", err)\n\n\t\tcase <-ctx.Done():\n\t\t\treturn -1, ctx.Err()\n\n\t\tcase err := <-watchdogErr:\n\t\t\tif err == nil {\n\t\t\t\terrors = 0\n\t\t\t} else {\n\t\t\t\te.logf(\"docker watchdog: %s\", err)\n\t\t\t\terrors++\n\t\t\t\tif errors >= dockerWatchdogThreshold {\n\t\t\t\t\te.logf(\"docker watchdog: giving up\")\n\t\t\t\t\treturn -1, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (e *dockerExecutor) startIO(stdin io.Reader, stdout, stderr io.Writer) error {\n\tresp, err := e.dockerclient.ContainerAttach(context.TODO(), e.containerID, dockercontainer.AttachOptions{\n\t\tStream: true,\n\t\tStdin:  stdin != nil,\n\t\tStdout: true,\n\t\tStderr: true,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error attaching container stdin/stdout/stderr streams: %v\", err)\n\t}\n\tvar errStdin error\n\tif stdin != nil {\n\t\tgo func() {\n\t\t\terrStdin = e.handleStdin(stdin, resp.Conn, resp.CloseWrite)\n\t\t}()\n\t}\n\te.doneIO = make(chan struct{})\n\tgo func() {\n\t\te.errIO = e.handleStdoutStderr(stdout, stderr, resp.Reader)\n\t\tif e.errIO == nil && errStdin != nil {\n\t\t\te.errIO = errStdin\n\t\t}\n\t\tclose(e.doneIO)\n\t}()\n\treturn nil\n}\n\nfunc (e *dockerExecutor) handleStdin(stdin io.Reader, conn io.Writer, closeConn func() error) error {\n\tdefer closeConn()\n\t_, err := io.Copy(conn, stdin)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"While writing to docker container on stdin: %v\", err)\n\t}\n\treturn nil\n}\n\n// Handle docker log protocol; see\n// https://docs.docker.com/engine/reference/api/docker_remote_api_v1.15/#attach-to-a-container\nfunc (e *dockerExecutor) handleStdoutStderr(stdout, stderr io.Writer, reader io.Reader) error {\n\theader := make([]byte, 8)\n\tvar err error\n\tfor err == nil {\n\t\t_, err = io.ReadAtLeast(reader, header, 8)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\treadsize := int64(header[7]) | (int64(header[6]) << 8) | (int64(header[5]) << 16) | (int64(header[4]) << 24)\n\t\tif header[0] == 1 {\n\t\t\t_, err = io.CopyN(stdout, reader, readsize)\n\t\t} else {\n\t\t\t// stderr\n\t\t\t_, err = io.CopyN(stderr, reader, readsize)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error copying stdout/stderr from docker: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc (e *dockerExecutor) Close() {\n\te.dockerclient.ContainerRemove(context.TODO(), e.containerID, dockercontainer.RemoveOptions{Force: true})\n}\n\nfunc (e *dockerExecutor) InjectCommand(ctx context.Context, detachKeys, username string, usingTTY bool, injectcmd []string) (*exec.Cmd, error) {\n\tcmd := exec.CommandContext(ctx, \"docker\", \"exec\", \"-i\", \"--detach-keys=\"+detachKeys, \"--user=\"+username)\n\tif usingTTY {\n\t\tcmd.Args = append(cmd.Args, \"-t\")\n\t}\n\tcmd.Args = append(cmd.Args, e.containerID)\n\tcmd.Args = append(cmd.Args, injectcmd...)\n\treturn cmd, nil\n}\n\nfunc (e *dockerExecutor) IPAddress() (string, error) {\n\tif ip, ok := e.savedIPAddress.Load().(*string); ok {\n\t\treturn *ip, nil\n\t}\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))\n\tdefer cancel()\n\tctr, err := e.dockerclient.ContainerInspect(ctx, e.containerID)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"cannot get docker container info: %s\", err)\n\t}\n\tip := ctr.NetworkSettings.IPAddress\n\tif ip == \"\" {\n\t\t// TODO: try to enable networking if it wasn't\n\t\t// already enabled when the container was\n\t\t// created.\n\t\treturn \"\", fmt.Errorf(\"container has no IP address\")\n\t}\n\te.savedIPAddress.Store(&ip)\n\treturn ip, nil\n}\n"
  },
  {
    "path": "lib/crunchrun/docker_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"io/ioutil\"\n\t\"os/exec\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\tdockercontainer \"github.com/docker/docker/api/types/container\"\n\t. \"gopkg.in/check.v1\"\n)\n\nvar _ = Suite(&dockerSuite{})\n\ntype dockerSuite struct {\n\texecutorSuite\n}\n\nfunc (s *dockerSuite) SetUpSuite(c *C) {\n\t_, err := exec.LookPath(\"docker\")\n\tif err != nil {\n\t\tc.Skip(\"looks like docker is not installed\")\n\t}\n\ts.newExecutor = func(c *C) {\n\t\texec.Command(\"docker\", \"rm\", \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\").Run()\n\t\tvar err error\n\t\ts.executor, err = newDockerExecutor(\"zzzzz-zzzzz-zzzzzzzzzzzzzzz\", c.Logf, time.Second/2)\n\t\tc.Assert(err, IsNil)\n\t}\n}\n\nfunc (s *dockerSuite) TestLoadImageError(c *C) {\n\timagefile := c.MkDir() + \"/bogus-image.tar\"\n\terr := ioutil.WriteFile(imagefile, []byte(\"this is not a docker image\"), 0777)\n\tc.Assert(err, IsNil)\n\terr = s.executor.LoadImage(\"\", imagefile, arvados.Container{}, \"\", nil)\n\tc.Assert(err, ErrorMatches, \"ImageLoad: unexpected EOF\")\n}\n\nvar _ = Suite(&dockerStubSuite{})\n\n// dockerStubSuite tests don't really connect to the docker service,\n// so we can run them even if docker is not installed.\ntype dockerStubSuite struct{}\n\nfunc (s *dockerStubSuite) TestDockerContainerConfig(c *C) {\n\te, err := newDockerExecutor(\"zzzzz-zzzzz-zzzzzzzzzzzzzzz\", c.Logf, time.Second/2)\n\tc.Assert(err, IsNil)\n\tcfg, hostCfg := e.config(containerSpec{\n\t\tVCPUs:          4,\n\t\tRAM:            123123123,\n\t\tWorkingDir:     \"/WorkingDir\",\n\t\tEnv:            map[string]string{\"FOO\": \"bar\"},\n\t\tBindMounts:     map[string]bindmount{\"/mnt\": {HostPath: \"/hostpath\", ReadOnly: true}},\n\t\tEnableNetwork:  false,\n\t\tGPUStack:       \"cuda\",\n\t\tGPUDeviceCount: 3,\n\t})\n\tc.Check(cfg.WorkingDir, Equals, \"/WorkingDir\")\n\tc.Check(cfg.Env, DeepEquals, []string{\"FOO=bar\"})\n\tc.Check(hostCfg.NetworkMode, Equals, dockercontainer.NetworkMode(\"none\"))\n\tc.Check(hostCfg.Resources.NanoCPUs, Equals, int64(4000000000))\n\tc.Check(hostCfg.Resources.Memory, Equals, int64(123123123))\n\tc.Check(hostCfg.Resources.MemorySwap, Equals, int64(123123123))\n\tc.Check(hostCfg.Resources.KernelMemory, Equals, int64(123123123))\n\tc.Check(hostCfg.Resources.DeviceRequests, DeepEquals, []dockercontainer.DeviceRequest{{\n\t\tDriver:       \"nvidia\",\n\t\tCount:        3,\n\t\tCapabilities: [][]string{{\"gpu\", \"nvidia\", \"compute\", \"utility\"}},\n\t}})\n}\n"
  },
  {
    "path": "lib/crunchrun/executor.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\npackage crunchrun\n\nimport (\n\t\"context\"\n\t\"io\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\ntype bindmount struct {\n\tHostPath string\n\tReadOnly bool\n}\n\ntype containerSpec struct {\n\tImage          string\n\tVCPUs          int\n\tRAM            int64\n\tWorkingDir     string\n\tEnv            map[string]string\n\tBindMounts     map[string]bindmount\n\tCommand        []string\n\tEnableNetwork  bool\n\tGPUStack       string\n\tGPUDeviceCount int\n\tNetworkMode    string // docker network mode, normally \"default\"\n\tCgroupParent   string\n\tStdin          io.Reader\n\tStdout         io.Writer\n\tStderr         io.Writer\n}\n\n// containerExecutor is an interface to a container runtime\n// (docker/singularity).\ntype containerExecutor interface {\n\t// ImageLoad loads the image from the given tarball such that\n\t// it can be used to create/start a container.\n\tLoadImage(imageID string, imageTarballPath string, container arvados.Container, keepMount string,\n\t\tcontainerClient *arvados.Client) error\n\n\t// Wait for the container process to finish, and return its\n\t// exit code. If applicable, also remove the stopped container\n\t// before returning.\n\tWait(context.Context) (int, error)\n\n\t// Create a container, but don't start it yet.\n\tCreate(containerSpec) error\n\n\t// Start the container\n\tStart() error\n\n\t// Process ID of a process in the container.  Return 0 if\n\t// container is finished or no process has started yet.\n\tPid() int\n\n\t// Stop the container immediately\n\tStop() error\n\n\t// Release resources (temp dirs, stopped containers)\n\tClose()\n\n\t// Name and version of runtime engine (\"docker 20.10.16\", \"singularity-ce version 3.9.9\")\n\tRuntime() string\n\n\tGatewayTarget\n}\n"
  },
  {
    "path": "lib/crunchrun/executor_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/netip\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/diagnostics\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t. \"gopkg.in/check.v1\"\n)\n\ntype nopWriteCloser struct{ io.Writer }\n\nfunc (nopWriteCloser) Close() error { return nil }\n\n// embedded by dockerSuite and singularitySuite so they can share\n// tests.\ntype executorSuite struct {\n\tnewExecutor func(*C) // embedding struct's SetUpSuite method must set this\n\texecutor    containerExecutor\n\tspec        containerSpec\n\tstdout      bytes.Buffer\n\tstderr      bytes.Buffer\n}\n\nfunc (s *executorSuite) SetUpTest(c *C) {\n\ts.newExecutor(c)\n\ts.stdout = bytes.Buffer{}\n\ts.stderr = bytes.Buffer{}\n\ts.spec = containerSpec{\n\t\tImage:       \"busybox:uclibc\",\n\t\tVCPUs:       1,\n\t\tWorkingDir:  \"\",\n\t\tEnv:         map[string]string{\"PATH\": \"/bin:/usr/bin\"},\n\t\tNetworkMode: \"default\",\n\t\tStdout:      nopWriteCloser{&s.stdout},\n\t\tStderr:      nopWriteCloser{&s.stderr},\n\t}\n\terr := s.executor.LoadImage(\"\", arvadostest.BusyboxDockerImage(c), arvados.Container{}, \"\", nil)\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *executorSuite) TearDownTest(c *C) {\n\ts.executor.Close()\n}\n\nfunc (s *executorSuite) TestExecTrivialContainer(c *C) {\n\tc.Logf(\"Using container runtime: %s\", s.executor.Runtime())\n\ts.spec.Command = []string{\"echo\", \"ok\"}\n\ts.checkRun(c, 0)\n\tc.Check(s.stdout.String(), Equals, \"ok\\n\")\n\tc.Check(s.stderr.String(), Equals, \"\")\n}\n\nfunc (s *executorSuite) TestDiagnosticsImage(c *C) {\n\ts.newExecutor(c)\n\timagefile := c.MkDir() + \"/hello-world.tar\"\n\terr := ioutil.WriteFile(imagefile, diagnostics.HelloWorldDockerImage, 0777)\n\tc.Assert(err, IsNil)\n\terr = s.executor.LoadImage(\"\", imagefile, arvados.Container{}, \"\", nil)\n\tc.Assert(err, IsNil)\n\n\tc.Logf(\"Using container runtime: %s\", s.executor.Runtime())\n\ts.spec.Image = \"hello-world\"\n\ts.spec.Command = []string{\"/hello\"}\n\ts.checkRun(c, 0)\n\tc.Check(s.stdout.String(), Matches, `(?ms)\\nHello from Docker!\\n.*`)\n}\n\nfunc (s *executorSuite) TestExitStatus(c *C) {\n\ts.spec.Command = []string{\"false\"}\n\ts.checkRun(c, 1)\n}\n\nfunc (s *executorSuite) TestSignalExitStatus(c *C) {\n\tif _, isdocker := s.executor.(*dockerExecutor); isdocker {\n\t\t// It's not quite this easy to make busybox kill\n\t\t// itself in docker where it's pid 1.\n\t\tc.Skip(\"kill -9 $$ doesn't work on busybox with pid=1 in docker\")\n\t\treturn\n\t}\n\ts.spec.Command = []string{\"sh\", \"-c\", \"kill -9 $$\"}\n\ts.checkRun(c, 0x80+9)\n}\n\nfunc (s *executorSuite) TestExecStop(c *C) {\n\ts.spec.Command = []string{\"sh\", \"-c\", \"sleep 10; echo ok\"}\n\terr := s.executor.Create(s.spec)\n\tc.Assert(err, IsNil)\n\terr = s.executor.Start()\n\tc.Assert(err, IsNil)\n\tgo func() {\n\t\ttime.Sleep(time.Second / 10)\n\t\ts.executor.Stop()\n\t}()\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second))\n\tdefer cancel()\n\tcode, err := s.executor.Wait(ctx)\n\tc.Check(code, Not(Equals), 0)\n\tc.Check(err, IsNil)\n\tc.Check(s.stdout.String(), Equals, \"\")\n\tc.Check(s.stderr.String(), Equals, \"\")\n}\n\nfunc (s *executorSuite) TestExecCleanEnv(c *C) {\n\ts.spec.Command = []string{\"env\"}\n\ts.checkRun(c, 0)\n\tc.Check(s.stderr.String(), Equals, \"\")\n\tgot := map[string]string{}\n\tfor _, kv := range strings.Split(s.stdout.String(), \"\\n\") {\n\t\tif kv == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tkv := strings.SplitN(kv, \"=\", 2)\n\t\tswitch kv[0] {\n\t\tcase \"HOSTNAME\", \"HOME\":\n\t\t\t// docker sets these by itself\n\t\tcase \"LD_LIBRARY_PATH\", \"SINGULARITY_NAME\", \"PWD\", \"LANG\", \"SHLVL\", \"SINGULARITY_INIT\", \"SINGULARITY_CONTAINER\":\n\t\t\t// singularity sets these by itself (cf. https://sylabs.io/guides/3.5/user-guide/environment_and_metadata.html)\n\t\tcase \"SINGULARITY_APPNAME\":\n\t\t\t// singularity also sets this by itself (v3.5.2, but not v3.7.4)\n\t\tcase \"PROMPT_COMMAND\", \"PS1\", \"SINGULARITY_BIND\", \"SINGULARITY_COMMAND\", \"SINGULARITY_ENVIRONMENT\":\n\t\t\t// singularity also sets these by itself (v3.7.4)\n\t\tcase \"SINGULARITY_NO_EVAL\":\n\t\t\t// our singularity driver sets this to control\n\t\t\t// singularity behavior, and it gets passed\n\t\t\t// through to the container\n\t\tdefault:\n\t\t\tgot[kv[0]] = kv[1]\n\t\t}\n\t}\n\tc.Check(got, DeepEquals, s.spec.Env)\n}\nfunc (s *executorSuite) TestExecEnableNetwork(c *C) {\n\tfor _, enable := range []bool{false, true} {\n\t\ts.SetUpTest(c)\n\t\ts.spec.Command = []string{\"ip\", \"route\"}\n\t\ts.spec.EnableNetwork = enable\n\t\ts.checkRun(c, 0)\n\t\tif enable {\n\t\t\tc.Check(s.stdout.String(), Matches, \"(?ms).*default via.*\")\n\t\t} else {\n\t\t\tc.Check(s.stdout.String(), Equals, \"\")\n\t\t}\n\t}\n}\n\nfunc (s *executorSuite) TestExecWorkingDir(c *C) {\n\ts.spec.WorkingDir = \"/tmp\"\n\ts.spec.Command = []string{\"sh\", \"-c\", \"pwd\"}\n\ts.checkRun(c, 0)\n\tc.Check(s.stdout.String(), Equals, \"/tmp\\n\")\n}\n\nfunc (s *executorSuite) TestExecStdoutStderr(c *C) {\n\ts.spec.Command = []string{\"sh\", \"-c\", \"echo foo; echo -n bar >&2; echo baz; echo waz >&2\"}\n\ts.checkRun(c, 0)\n\tc.Check(s.stdout.String(), Equals, \"foo\\nbaz\\n\")\n\tc.Check(s.stderr.String(), Equals, \"barwaz\\n\")\n}\n\nfunc (s *executorSuite) TestEnableNetwork_Listen(c *C) {\n\t// Listen on an available port on the host.\n\tln, err := net.Listen(\"tcp\", net.JoinHostPort(\"0.0.0.0\", \"0\"))\n\tc.Assert(err, IsNil)\n\tdefer ln.Close()\n\t_, port, err := net.SplitHostPort(ln.Addr().String())\n\tc.Assert(err, IsNil)\n\n\t// Start a container that listens on the same port number that\n\t// is already in use on the host.\n\ts.spec.Command = []string{\"nc\", \"-l\", \"-p\", port, \"-e\", \"printf\", `HTTP/1.1 418 I'm a teapot\\r\\n\\r\\n`}\n\ts.spec.EnableNetwork = true\n\tc.Assert(s.executor.Create(s.spec), IsNil)\n\tc.Assert(s.executor.Start(), IsNil)\n\tstarttime := time.Now()\n\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second))\n\tdefer cancel()\n\n\tfor {\n\t\ttime.Sleep(time.Second / 10)\n\t\tif ctx.Err() != nil {\n\t\t\tc.Error(\"timed out\")\n\t\t\tbreak\n\t\t}\n\n\t\tip, err := s.executor.IPAddress()\n\t\tif err != nil {\n\t\t\tc.Logf(\"s.executor.IPAddress: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tc.Assert(ip, Not(Equals), \"\")\n\n\t\t// When we connect to the port using\n\t\t// s.executor.IPAddress(), we should reach the nc\n\t\t// process running inside the container, not the\n\t\t// net.Listen() running outside the container, even\n\t\t// though both listen on the same port.\n\t\tctx, cancel := context.WithDeadline(ctx, time.Now().Add(time.Second))\n\t\tdefer cancel()\n\t\treq, err := http.NewRequestWithContext(ctx, \"BREW\", \"http://\"+net.JoinHostPort(ip, port), nil)\n\t\tc.Assert(err, IsNil)\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\tc.Logf(\"%s (retrying...)\", err)\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(resp.StatusCode, Equals, http.StatusTeapot)\n\t\tc.Logf(\"%s %q: %s\", req.Method, req.URL, resp.Status)\n\t\tbreak\n\t}\n\n\ts.executor.Stop()\n\tcode, _ := s.executor.Wait(ctx)\n\tc.Logf(\"container ran for %v\", time.Now().Sub(starttime))\n\tc.Check(code, Equals, -1)\n\n\tc.Logf(\"stdout:\\n%s\\n\\n\", s.stdout.String())\n\tc.Logf(\"stderr:\\n%s\\n\\n\", s.stderr.String())\n}\n\nfunc (s *executorSuite) TestEnableNetwork_IPAddress(c *C) {\n\ts.spec.Command = []string{\"ip\", \"ad\"}\n\ts.spec.EnableNetwork = true\n\tc.Assert(s.executor.Create(s.spec), IsNil)\n\tc.Assert(s.executor.Start(), IsNil)\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second))\n\tdefer cancel()\n\tcode, _ := s.executor.Wait(ctx)\n\tc.Check(code, Equals, 0)\n\tc.Logf(\"stdout:\\n%s\\n\\n\", s.stdout.String())\n\tc.Logf(\"stderr:\\n%s\\n\\n\", s.stderr.String())\n\n\tfound := false\n\tfor _, m := range regexp.MustCompile(` inet (.+?)/`).FindAllStringSubmatch(s.stdout.String(), -1) {\n\t\tif addr, err := netip.ParseAddr(m[1]); err == nil && !addr.IsLoopback() {\n\t\t\tfound = true\n\t\t\tc.Logf(\"found non-loopback IP address %q\", m[1])\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Check(found, Equals, true, Commentf(\"container does not appear to have a non-loopback IP address\"))\n}\n\nfunc (s *executorSuite) TestInject(c *C) {\n\thostdir := c.MkDir()\n\tc.Assert(os.WriteFile(hostdir+\"/testfile\", []byte(\"first tube\"), 0777), IsNil)\n\tmountdir := fmt.Sprintf(\"/injecttest-%d\", os.Getpid())\n\ts.spec.Command = []string{\"sleep\", \"10\"}\n\ts.spec.BindMounts = map[string]bindmount{mountdir: {HostPath: hostdir, ReadOnly: true}}\n\tc.Assert(s.executor.Create(s.spec), IsNil)\n\tc.Assert(s.executor.Start(), IsNil)\n\tstarttime := time.Now()\n\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))\n\tdefer cancel()\n\n\t// Allow InjectCommand to fail a few times while the container\n\t// is starting\n\tfor ctx.Err() == nil {\n\t\t_, err := s.executor.InjectCommand(ctx, \"\", \"root\", false, []string{\"true\"})\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second / 10)\n\t}\n\n\tinjectcmd := []string{\"cat\", mountdir + \"/testfile\"}\n\tcmd, err := s.executor.InjectCommand(ctx, \"\", \"root\", false, injectcmd)\n\tc.Assert(err, IsNil)\n\tout, err := cmd.CombinedOutput()\n\tc.Logf(\"inject %s => %q\", injectcmd, out)\n\tc.Check(err, IsNil)\n\tc.Check(string(out), Equals, \"first tube\")\n\n\ts.executor.Stop()\n\tcode, _ := s.executor.Wait(ctx)\n\tc.Logf(\"container ran for %v\", time.Now().Sub(starttime))\n\tc.Check(code, Equals, -1)\n}\n\nfunc (s *executorSuite) checkRun(c *C, expectCode int) {\n\tc.Assert(s.executor.Create(s.spec), IsNil)\n\tc.Assert(s.executor.Start(), IsNil)\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second))\n\tdefer cancel()\n\tcode, err := s.executor.Wait(ctx)\n\tc.Assert(err, IsNil)\n\tc.Check(code, Equals, expectCode)\n}\n"
  },
  {
    "path": "lib/crunchrun/integration_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t. \"gopkg.in/check.v1\"\n)\n\nvar _ = Suite(&integrationSuite{})\n\ntype integrationSuite struct {\n\tengine string\n\timage  arvados.Collection\n\tinput  arvados.Collection\n\tstdin  bytes.Buffer\n\tstdout bytes.Buffer\n\tstderr bytes.Buffer\n\targs   []string\n\tcr     arvados.ContainerRequest\n\tclient *arvados.Client\n\tac     *arvadosclient.ArvadosClient\n\tkc     *keepclient.KeepClient\n\n\tlogCollection    arvados.Collection\n\toutputCollection arvados.Collection\n\tlogFiles         map[string]string // filename => contents\n}\n\nfunc (s *integrationSuite) SetUpSuite(c *C) {\n\t_, err := exec.LookPath(\"docker\")\n\tif err != nil {\n\t\tc.Skip(\"looks like docker is not installed\")\n\t}\n\n\tout, err := exec.Command(\"docker\", \"load\", \"--input\", arvadostest.BusyboxDockerImage(c)).CombinedOutput()\n\tc.Log(string(out))\n\tc.Assert(err, IsNil)\n\tout, err = exec.Command(\"arv-keepdocker\", \"--no-resume\", \"busybox:uclibc\").Output()\n\timageUUID := strings.TrimSpace(string(out))\n\tc.Logf(\"image uuid %s\", imageUUID)\n\tif !c.Check(err, IsNil) {\n\t\tif err, ok := err.(*exec.ExitError); ok {\n\t\t\tc.Logf(\"%s\", err.Stderr)\n\t\t}\n\t\tc.Fail()\n\t}\n\terr = arvados.NewClientFromEnv().RequestAndDecode(&s.image, \"GET\", \"arvados/v1/collections/\"+imageUUID, nil, nil)\n\tc.Assert(err, IsNil)\n\tc.Logf(\"image pdh %s\", s.image.PortableDataHash)\n\n\ts.client = arvados.NewClientFromEnv()\n\ts.ac, err = arvadosclient.New(s.client)\n\tc.Assert(err, IsNil)\n\ts.kc = keepclient.New(s.ac)\n\tfs, err := s.input.FileSystem(s.client, s.kc)\n\tc.Assert(err, IsNil)\n\tf, err := fs.OpenFile(\"inputfile\", os.O_CREATE|os.O_WRONLY, 0755)\n\tc.Assert(err, IsNil)\n\t_, err = f.Write([]byte(\"inputdata\"))\n\tc.Assert(err, IsNil)\n\terr = f.Close()\n\tc.Assert(err, IsNil)\n\ts.input.ManifestText, err = fs.MarshalManifest(\".\")\n\tc.Assert(err, IsNil)\n\terr = s.client.RequestAndDecode(&s.input, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"ensure_unique_name\": true,\n\t\t\"collection\": map[string]interface{}{\n\t\t\t\"manifest_text\": s.input.ManifestText,\n\t\t},\n\t})\n\tc.Assert(err, IsNil)\n\tc.Logf(\"input pdh %s\", s.input.PortableDataHash)\n}\n\nfunc (s *integrationSuite) TearDownSuite(c *C) {\n\tos.Unsetenv(\"ARVADOS_KEEP_SERVICES\")\n\tif s.client == nil {\n\t\t// didn't set up\n\t\treturn\n\t}\n\terr := s.client.RequestAndDecode(nil, \"POST\", \"database/reset\", nil, nil)\n\tc.Check(err, IsNil)\n}\n\nfunc (s *integrationSuite) SetUpTest(c *C) {\n\tos.Unsetenv(\"ARVADOS_KEEP_SERVICES\")\n\ts.engine = \"docker\"\n\ts.args = nil\n\ts.stdin = bytes.Buffer{}\n\ts.stdout = bytes.Buffer{}\n\ts.stderr = bytes.Buffer{}\n\ts.logCollection = arvados.Collection{}\n\ts.outputCollection = arvados.Collection{}\n\ts.logFiles = map[string]string{}\n\ts.cr = arvados.ContainerRequest{\n\t\tPriority:       1,\n\t\tState:          \"Committed\",\n\t\tOutputPath:     \"/mnt/out\",\n\t\tContainerImage: s.image.PortableDataHash,\n\t\tMounts: map[string]arvados.Mount{\n\t\t\t\"/mnt/json\": {\n\t\t\t\tKind: \"json\",\n\t\t\t\tContent: []interface{}{\n\t\t\t\t\t\"foo\",\n\t\t\t\t\tmap[string]string{\"foo\": \"bar\"},\n\t\t\t\t\tnil,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"/mnt/in\": {\n\t\t\t\tKind:             \"collection\",\n\t\t\t\tPortableDataHash: s.input.PortableDataHash,\n\t\t\t},\n\t\t\t\"/mnt/out\": {\n\t\t\t\tKind:     \"tmp\",\n\t\t\t\tCapacity: 1000,\n\t\t\t},\n\t\t},\n\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\tRAM:   128000000,\n\t\t\tVCPUs: 1,\n\t\t\tAPI:   true,\n\t\t},\n\t}\n}\n\nfunc (s *integrationSuite) setup(c *C) {\n\terr := s.client.RequestAndDecode(&s.cr, \"POST\", \"arvados/v1/container_requests\", nil, map[string]interface{}{\"container_request\": map[string]interface{}{\n\t\t\"priority\":            s.cr.Priority,\n\t\t\"state\":               s.cr.State,\n\t\t\"command\":             s.cr.Command,\n\t\t\"output_path\":         s.cr.OutputPath,\n\t\t\"output_glob\":         s.cr.OutputGlob,\n\t\t\"container_image\":     s.cr.ContainerImage,\n\t\t\"mounts\":              s.cr.Mounts,\n\t\t\"runtime_constraints\": s.cr.RuntimeConstraints,\n\t\t\"use_existing\":        false,\n\t}})\n\tc.Assert(err, IsNil)\n\tc.Assert(s.cr.ContainerUUID, Not(Equals), \"\")\n\terr = s.client.RequestAndDecode(nil, \"POST\", \"arvados/v1/containers/\"+s.cr.ContainerUUID+\"/lock\", nil, nil)\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *integrationSuite) TestRunTrivialContainerWithDocker(c *C) {\n\ts.engine = \"docker\"\n\ts.testRunTrivialContainer(c)\n\tc.Check(s.logFiles[\"crunch-run.txt\"], Matches, `(?ms).*Using container runtime: docker Engine \\d+\\.\\d+.*`)\n}\n\nfunc (s *integrationSuite) TestRunTrivialContainerWithSingularity(c *C) {\n\ts.engine = \"singularity\"\n\ts.testRunTrivialContainer(c)\n\tc.Check(s.logFiles[\"crunch-run.txt\"], Matches, `(?ms).*Using container runtime: singularity.* version [34]\\.\\d+.*`)\n}\n\nfunc (s *integrationSuite) TestRunTrivialContainerWithLocalKeepstore(c *C) {\n\tfor _, trial := range []struct {\n\t\tlogConfig           string\n\t\tmatchGetReq         Checker\n\t\tmatchPutReq         Checker\n\t\tmatchStartupMessage Checker\n\t}{\n\t\t{\"none\", Not(Matches), Not(Matches), Not(Matches)},\n\t\t{\"all\", Matches, Matches, Matches},\n\t\t{\"errors\", Not(Matches), Not(Matches), Matches},\n\t} {\n\t\tc.Logf(\"=== testing with Containers.LocalKeepLogsToContainerLog: %q\", trial.logConfig)\n\t\ts.SetUpTest(c)\n\n\t\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\t\tc.Assert(err, IsNil)\n\t\tcluster, err := cfg.GetCluster(\"\")\n\t\tc.Assert(err, IsNil)\n\t\tfor uuid, volume := range cluster.Volumes {\n\t\t\tvolume.AccessViaHosts = nil\n\t\t\tvolume.Replication = 2\n\t\t\tcluster.Volumes[uuid] = volume\n\n\t\t\tvar v struct {\n\t\t\t\tRoot string\n\t\t\t}\n\t\t\terr = json.Unmarshal(volume.DriverParameters, &v)\n\t\t\tc.Assert(err, IsNil)\n\t\t\terr = os.Mkdir(v.Root, 0777)\n\t\t\tif !os.IsExist(err) {\n\t\t\t\tc.Assert(err, IsNil)\n\t\t\t}\n\t\t}\n\t\tcluster.Containers.LocalKeepLogsToContainerLog = trial.logConfig\n\n\t\ts.stdin.Reset()\n\t\terr = json.NewEncoder(&s.stdin).Encode(ConfigData{\n\t\t\tEnv:         nil,\n\t\t\tKeepBuffers: 1,\n\t\t\tCluster:     cluster,\n\t\t})\n\t\tc.Assert(err, IsNil)\n\n\t\ts.engine = \"docker\"\n\t\ts.testRunTrivialContainer(c)\n\n\t\tlog, logExists := s.logFiles[\"keepstore.txt\"]\n\t\tif trial.logConfig == \"none\" {\n\t\t\tc.Check(logExists, Equals, false)\n\t\t} else {\n\t\t\tc.Check(log, Matches, `(?ms).*not running trash worker.*`)\n\t\t\tc.Check(log, Matches, `(?ms).*not running trash emptier.*`)\n\t\t\tc.Check(log, trial.matchGetReq, `(?ms).*\"reqMethod\":\"GET\".*`)\n\t\t\tc.Check(log, trial.matchPutReq, `(?ms).*\"reqMethod\":\"PUT\".*,\"reqPath\":\"0e3bcff26d51c895a60ea0d4585e134d\".*`)\n\t\t}\n\n\t\tc.Check(s.logFiles[\"crunch-run.txt\"], Matches, `(?ms).*using local keepstore process .* at http://[\\d\\.]{7,}:\\d+.*`)\n\t\tc.Check(s.logFiles[\"crunch-run.txt\"], Not(Matches), `(?ms).* at http://127\\..*`)\n\t\tc.Check(s.logFiles[\"crunch-run.txt\"], Not(Matches), `(?ms).* at http://169\\.254\\..*`)\n\t\tc.Check(s.logFiles[\"stderr.txt\"], Matches, `(?ms).*ARVADOS_KEEP_SERVICES=http://[\\d\\.]{7,}:\\d+\\n.*`)\n\t}\n}\n\nfunc (s *integrationSuite) TestRunTrivialContainerWithNoLocalKeepstore(c *C) {\n\t// Check that (1) config is loaded from $ARVADOS_CONFIG when\n\t// not provided on stdin and (2) if a local keepstore is not\n\t// started, crunch-run.txt explains why not.\n\ts.SetUpTest(c)\n\ts.stdin.Reset()\n\ts.testRunTrivialContainer(c)\n\tc.Check(s.logFiles[\"crunch-run.txt\"], Matches, `(?ms).*not starting a local keepstore process because KeepBuffers=0 in config\\n.*`)\n\n\ts.SetUpTest(c)\n\ts.args = []string{\"-config\", c.MkDir() + \"/config.yaml\"}\n\ts.stdin.Reset()\n\tbuf, err := ioutil.ReadFile(os.Getenv(\"ARVADOS_CONFIG\"))\n\tc.Assert(err, IsNil)\n\terr = ioutil.WriteFile(s.args[1], bytes.Replace(buf, []byte(\"LocalKeepBlobBuffersPerVCPU: 0\"), []byte(\"LocalKeepBlobBuffersPerVCPU: 1\"), -1), 0666)\n\tc.Assert(err, IsNil)\n\ts.testRunTrivialContainer(c)\n\tc.Check(s.logFiles[\"crunch-run.txt\"], Matches, `(?ms).*not starting a local keepstore process because a volume \\(zzzzz-nyw5e-00000000000000\\d\\) uses AccessViaHosts\\n.*`)\n\n\t// Check that config read errors are logged\n\ts.SetUpTest(c)\n\ts.args = []string{\"-config\", c.MkDir() + \"/config-error.yaml\"}\n\ts.stdin.Reset()\n\ts.testRunTrivialContainer(c)\n\tc.Check(s.logFiles[\"crunch-run.txt\"], Matches, `(?ms).*could not load config file \\Q`+s.args[1]+`\\E:.* no such file or directory\\n.*`)\n\n\ts.SetUpTest(c)\n\ts.args = []string{\"-config\", c.MkDir() + \"/config-unreadable.yaml\"}\n\ts.stdin.Reset()\n\terr = ioutil.WriteFile(s.args[1], []byte{}, 0)\n\tc.Check(err, IsNil)\n\ts.testRunTrivialContainer(c)\n\tc.Check(s.logFiles[\"crunch-run.txt\"], Matches, `(?ms).*could not load config file \\Q`+s.args[1]+`\\E:.* permission denied\\n.*`)\n\n\ts.SetUpTest(c)\n\ts.stdin.Reset()\n\ts.testRunTrivialContainer(c)\n\tc.Check(s.logFiles[\"crunch-run.txt\"], Matches, `(?ms).*loaded config file \\Q`+os.Getenv(\"ARVADOS_CONFIG\")+`\\E\\n.*`)\n}\n\nfunc (s *integrationSuite) TestRunTrivialContainerWithOutputGlob(c *C) {\n\ts.cr.OutputGlob = []string{\"js?n\"}\n\ts.testRunTrivialContainer(c)\n\tfs, err := s.outputCollection.FileSystem(s.client, s.kc)\n\tc.Assert(err, IsNil)\n\t_, err = fs.Stat(\"json\")\n\tc.Check(err, IsNil)\n\t_, err = fs.Stat(\"inputfile\")\n\tc.Check(err, Equals, os.ErrNotExist)\n\t_, err = fs.Stat(\"emptydir\")\n\tc.Check(err, Equals, os.ErrNotExist)\n}\n\nfunc (s *integrationSuite) testRunTrivialContainer(c *C) {\n\tif err := exec.Command(\"which\", s.engine).Run(); err != nil {\n\t\tc.Skip(fmt.Sprintf(\"%s: %s\", s.engine, err))\n\t}\n\ts.cr.Command = []string{\"sh\", \"-c\", \"env >&2 && cat /mnt/in/inputfile >/mnt/out/inputfile && cat /mnt/json >/mnt/out/json && ! touch /mnt/in/shouldbereadonly && mkdir /mnt/out/emptydir\"}\n\ts.setup(c)\n\n\targs := []string{\n\t\t\"-runtime-engine=\" + s.engine,\n\t\t\"-enable-memory-limit=false\",\n\t}\n\tif s.stdin.Len() > 0 {\n\t\targs = append(args, \"-stdin-config=true\")\n\t}\n\targs = append(args, s.args...)\n\targs = append(args, s.cr.ContainerUUID)\n\tcode := command{}.RunCommand(\"crunch-run\", args, &s.stdin, io.MultiWriter(&s.stdout, os.Stderr), io.MultiWriter(&s.stderr, os.Stderr))\n\tc.Logf(\"\\n===== stdout =====\\n%s\", s.stdout.String())\n\tc.Logf(\"\\n===== stderr =====\\n%s\", s.stderr.String())\n\tc.Check(code, Equals, 0)\n\terr := s.client.RequestAndDecode(&s.cr, \"GET\", \"arvados/v1/container_requests/\"+s.cr.UUID, nil, nil)\n\tc.Assert(err, IsNil)\n\tc.Logf(\"Finished container request: %#v\", s.cr)\n\n\ts.loadLogAndOutputCollections(c)\n\n\tif len(s.cr.OutputGlob) == 0 {\n\t\tfs, err := s.outputCollection.FileSystem(s.client, s.kc)\n\t\tc.Assert(err, IsNil)\n\t\tif f, err := fs.Open(\"inputfile\"); c.Check(err, IsNil) {\n\t\t\tdefer f.Close()\n\t\t\tbuf, err := ioutil.ReadAll(f)\n\t\t\tc.Check(err, IsNil)\n\t\t\tc.Check(string(buf), Equals, \"inputdata\")\n\t\t}\n\t\tif f, err := fs.Open(\"json\"); c.Check(err, IsNil) {\n\t\t\tdefer f.Close()\n\t\t\tbuf, err := ioutil.ReadAll(f)\n\t\t\tc.Check(err, IsNil)\n\t\t\tc.Check(string(buf), Equals, `[\"foo\",{\"foo\":\"bar\"},null]`)\n\t\t}\n\t\tif fi, err := fs.Stat(\"emptydir\"); c.Check(err, IsNil) {\n\t\t\tc.Check(fi.IsDir(), Equals, true)\n\t\t}\n\t\tif d, err := fs.Open(\"emptydir\"); c.Check(err, IsNil) {\n\t\t\tdefer d.Close()\n\t\t\tfis, err := d.Readdir(-1)\n\t\t\tc.Assert(err, IsNil)\n\t\t\t// crunch-run still saves a \".keep\" file to preserve\n\t\t\t// empty dirs even though that shouldn't be\n\t\t\t// necessary. Ideally we would do:\n\t\t\t// c.Check(fis, HasLen, 0)\n\t\t\tfor _, fi := range fis {\n\t\t\t\tc.Check(fi.Name(), Equals, \".keep\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *integrationSuite) loadLogAndOutputCollections(c *C) {\n\tvar log arvados.Collection\n\terr := s.client.RequestAndDecode(&log, \"GET\", \"arvados/v1/collections/\"+s.cr.LogUUID, nil, nil)\n\tc.Assert(err, IsNil)\n\tfs, err := log.FileSystem(s.client, s.kc)\n\tc.Assert(err, IsNil)\n\tif d, err := fs.Open(\"/\"); c.Check(err, IsNil) {\n\t\tfis, err := d.Readdir(-1)\n\t\tc.Assert(err, IsNil)\n\t\tfor _, fi := range fis {\n\t\t\tif fi.IsDir() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf, err := fs.Open(fi.Name())\n\t\t\tc.Assert(err, IsNil)\n\t\t\tbuf, err := ioutil.ReadAll(f)\n\t\t\tc.Assert(err, IsNil)\n\t\t\tc.Logf(\"\\n===== %s =====\\n%s\", fi.Name(), buf)\n\t\t\ts.logFiles[fi.Name()] = string(buf)\n\t\t}\n\t}\n\ts.logCollection = log\n\n\tvar output arvados.Collection\n\terr = s.client.RequestAndDecode(&output, \"GET\", \"arvados/v1/collections/\"+s.cr.OutputUUID, nil, nil)\n\tc.Assert(err, IsNil)\n\ts.outputCollection = output\n}\n\nfunc (s *integrationSuite) TestRunContainer_CopyManyFiles(c *C) {\n\tbiginput := arvados.Collection{}\n\tfs, err := biginput.FileSystem(s.client, s.kc)\n\tc.Assert(err, IsNil)\n\tfor i := 0; i < 1000; i++ {\n\t\tf, err := fs.OpenFile(fmt.Sprintf(\"file%d\", i), os.O_CREATE|os.O_WRONLY, 0755)\n\t\tc.Assert(err, IsNil)\n\t\t_, err = f.Write([]byte{'a'})\n\t\tc.Assert(err, IsNil)\n\t\terr = f.Close()\n\t\tc.Assert(err, IsNil)\n\t}\n\tbiginput.ManifestText, err = fs.MarshalManifest(\".\")\n\tc.Assert(err, IsNil)\n\terr = s.client.RequestAndDecode(&biginput, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"ensure_unique_name\": true,\n\t\t\"collection\": map[string]interface{}{\n\t\t\t\"manifest_text\": biginput.ManifestText,\n\t\t},\n\t})\n\tc.Assert(err, IsNil)\n\ts.cr.Mounts[\"/mnt/out/in\"] = arvados.Mount{\n\t\tKind:             \"collection\",\n\t\tPortableDataHash: biginput.PortableDataHash,\n\t}\n\ts.testRunContainer_ShellCommand(c, \"set -e; cd /mnt/out/in; ls | while read f; do cp $f ../out-$f; done; cd /mnt/out; ls -R | wc -l\")\n\ts.loadLogAndOutputCollections(c)\n\tc.Check(s.logFiles[\"crunch-run.txt\"], Matches, `(?ms).*\\Qcopying \"in\" from `+biginput.PortableDataHash+`/.\\E\\n.*`)\n\tc.Check(s.logFiles[\"crunch-run.txt\"], Matches, `(?ms).*\\Qcopying \"out-file999\" (1 bytes)\\E\\n.*`)\n\tc.Check(s.logFiles[\"stdout.txt\"], Matches, `.* 2004\\n`)\n}\n\nfunc (s *integrationSuite) testRunContainer_ShellCommand(c *C, cmdline string) {\n\tif err := exec.Command(\"which\", s.engine).Run(); err != nil {\n\t\tc.Skip(fmt.Sprintf(\"%s: %s\", s.engine, err))\n\t}\n\ts.cr.Command = []string{\"sh\", \"-c\", cmdline}\n\ts.setup(c)\n\targs := []string{\n\t\t\"-runtime-engine=\" + s.engine,\n\t\t\"-enable-memory-limit=false\",\n\t}\n\tif s.stdin.Len() > 0 {\n\t\targs = append(args, \"-stdin-config=true\")\n\t}\n\targs = append(args, s.args...)\n\targs = append(args, s.cr.ContainerUUID)\n\tcode := command{}.RunCommand(\"crunch-run\", args, &s.stdin, io.MultiWriter(&s.stdout, os.Stderr), io.MultiWriter(&s.stderr, os.Stderr))\n\tc.Logf(\"\\n===== stdout =====\\n%s\", s.stdout.String())\n\tc.Logf(\"\\n===== stderr =====\\n%s\", s.stderr.String())\n\tc.Check(code, Equals, 0)\n\terr := s.client.RequestAndDecode(&s.cr, \"GET\", \"arvados/v1/container_requests/\"+s.cr.UUID, nil, nil)\n\tc.Assert(err, IsNil)\n\tc.Logf(\"Finished container request: %#v\", s.cr)\n}\n"
  },
  {
    "path": "lib/crunchrun/logging.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"io\"\n\t\"log\"\n\t\"time\"\n)\n\n// rfc3339NanoFixed is a fixed-width version of time.RFC3339Nano.\nconst rfc3339NanoFixed = \"2006-01-02T15:04:05.000000000Z07:00\"\n\n// prefixer wraps an io.Writer, inserting a string returned by\n// prefixFunc at the beginning of each line.\ntype prefixer struct {\n\twriter     io.Writer\n\tprefixFunc func() string\n\tunfinished bool // true if the most recent write ended with a non-newline char\n}\n\n// newTimestamper wraps an io.Writer, inserting an RFC3339NanoFixed\n// timestamp at the beginning of each line.\nfunc newTimestamper(w io.Writer) *prefixer {\n\treturn &prefixer{\n\t\twriter:     w,\n\t\tprefixFunc: func() string { return time.Now().UTC().Format(rfc3339NanoFixed + \" \") },\n\t}\n}\n\n// newStringPrefixer wraps an io.Writer, inserting the given string at\n// the beginning of each line. The given string should include a\n// trailing space for readability.\nfunc newStringPrefixer(w io.Writer, s string) *prefixer {\n\treturn &prefixer{\n\t\twriter:     w,\n\t\tprefixFunc: func() string { return s },\n\t}\n}\n\nfunc (tp *prefixer) Write(p []byte) (n int, err error) {\n\tfor len(p) > 0 && err == nil {\n\t\tif !tp.unfinished {\n\t\t\t_, err = io.WriteString(tp.writer, tp.prefixFunc())\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tnewline := bytes.IndexRune(p, '\\n')\n\t\tvar nn int\n\t\tif newline < 0 {\n\t\t\ttp.unfinished = true\n\t\t\tnn, err = tp.writer.Write(p)\n\t\t\tp = nil\n\t\t} else {\n\t\t\ttp.unfinished = false\n\t\t\tnn, err = tp.writer.Write(p[:newline+1])\n\t\t\tp = p[nn:]\n\t\t}\n\t\tn += nn\n\t}\n\treturn\n}\n\n// logWriter adds log.Logger methods to an io.Writer.\ntype logWriter struct {\n\tio.Writer\n\t*log.Logger\n}\n\nfunc newLogWriter(w io.Writer) *logWriter {\n\treturn &logWriter{\n\t\tWriter: w,\n\t\tLogger: log.New(w, \"\", 0),\n\t}\n}\n\nvar crunchLogUpdatePeriod = time.Hour / 2\nvar crunchLogUpdateSize = int64(1 << 25)\n\n// load the rate limit discovery config parameters\nfunc loadLogThrottleParams(clnt IArvadosClient) {\n\tloadDuration := func(dst *time.Duration, key string) {\n\t\tif param, err := clnt.Discovery(key); err != nil {\n\t\t\treturn\n\t\t} else if d, ok := param.(float64); !ok {\n\t\t\treturn\n\t\t} else {\n\t\t\t*dst = time.Duration(d) * time.Second\n\t\t}\n\t}\n\tloadInt64 := func(dst *int64, key string) {\n\t\tif param, err := clnt.Discovery(key); err != nil {\n\t\t\treturn\n\t\t} else if val, ok := param.(float64); !ok {\n\t\t\treturn\n\t\t} else {\n\t\t\t*dst = int64(val)\n\t\t}\n\t}\n\n\tloadInt64(&crunchLogUpdateSize, \"crunchLogUpdateSize\")\n\tloadDuration(&crunchLogUpdatePeriod, \"crunchLogUpdatePeriod\")\n\n}\n\ntype filterKeepstoreErrorsOnly struct {\n\tio.WriteCloser\n\tbuf []byte\n}\n\nfunc (f *filterKeepstoreErrorsOnly) Write(p []byte) (int, error) {\n\tf.buf = append(f.buf, p...)\n\tstart := 0\n\tfor i := len(f.buf) - len(p); i < len(f.buf); i++ {\n\t\tif f.buf[i] == '\\n' {\n\t\t\tif f.check(f.buf[start:i]) {\n\t\t\t\t_, err := f.WriteCloser.Write(f.buf[start : i+1])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tstart = i + 1\n\t\t}\n\t}\n\tif start > 0 {\n\t\tcopy(f.buf, f.buf[start:])\n\t\tf.buf = f.buf[:len(f.buf)-start]\n\t}\n\treturn len(p), nil\n}\n\nfunc (f *filterKeepstoreErrorsOnly) check(line []byte) bool {\n\tif len(line) == 0 {\n\t\treturn false\n\t}\n\tif line[0] != '{' {\n\t\treturn true\n\t}\n\tvar m map[string]interface{}\n\terr := json.Unmarshal(line, &m)\n\tif err != nil {\n\t\treturn true\n\t}\n\tif m[\"msg\"] == \"request\" {\n\t\treturn false\n\t}\n\tif m[\"msg\"] == \"response\" {\n\t\tif code, _ := m[\"respStatusCode\"].(float64); code >= 200 && code < 300 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "lib/crunchrun/logging_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t. \"gopkg.in/check.v1\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nconst reTimestamp = `....-..-..T..:..:..\\..........Z`\n\n// newTestTimestamper wraps an io.Writer, inserting a predictable\n// RFC3339NanoFixed timestamp at the beginning of each line.\nfunc newTestTimestamper(w io.Writer) *prefixer {\n\tcount := 0\n\treturn &prefixer{\n\t\twriter: w,\n\t\tprefixFunc: func() string {\n\t\t\tcount++\n\t\t\treturn fmt.Sprintf(\"2015-12-29T15:51:45.%09dZ \", count)\n\t\t},\n\t}\n}\n\ntype LoggingTestSuite struct {\n\tclient *arvados.Client\n}\n\n// Gocheck boilerplate\nvar _ = Suite(&LoggingTestSuite{})\n\nfunc (s *LoggingTestSuite) SetUpTest(c *C) {\n\ts.client = arvados.NewClientFromEnv()\n\tcrunchLogUpdatePeriod = time.Hour * 24 * 365\n\tcrunchLogUpdateSize = 1 << 50\n}\n\nfunc (s *LoggingTestSuite) TestWriteLogs(c *C) {\n\tapi := &ArvTestClient{}\n\tkc := &KeepTestClient{}\n\tdefer kc.Close()\n\tcr, err := NewContainerRunner(s.client, api, kc, \"zzzzz-dz642-zzzzzzzzzzzzzzz\")\n\tc.Assert(err, IsNil)\n\tf, err := cr.openLogFile(\"crunch-run\")\n\tc.Assert(err, IsNil)\n\tcr.CrunchLog = newLogWriter(newTestTimestamper(f))\n\n\tcr.CrunchLog.Print(\"Hello world!\")\n\tcr.CrunchLog.Print(\"Goodbye\")\n\n\tc.Check(api.Calls, Equals, 0)\n\n\tlogs := logFileContent(c, cr, \"crunch-run.txt\")\n\tc.Check(logs, Matches, reTimestamp+` Hello world!\\n`+\n\t\treTimestamp+` Goodbye\\n`)\n}\n\nfunc (s *LoggingTestSuite) TestWriteLogsLarge(c *C) {\n\tif testing.Short() {\n\t\treturn\n\t}\n\tapi := &ArvTestClient{}\n\tkc := &KeepTestClient{}\n\tdefer kc.Close()\n\tcr, err := NewContainerRunner(s.client, api, kc, \"zzzzz-zzzzzzzzzzzzzzz\")\n\tc.Assert(err, IsNil)\n\tf, err := cr.openLogFile(\"crunch-run\")\n\tc.Assert(err, IsNil)\n\tcr.CrunchLog = newLogWriter(newTestTimestamper(f))\n\tfor i := 0; i < 2000000; i++ {\n\t\tcr.CrunchLog.Printf(\"Hello %d\", i)\n\t}\n\tcr.CrunchLog.Print(\"Goodbye\")\n\n\tlogs := logFileContent(c, cr, \"crunch-run.txt\")\n\tc.Check(strings.Count(logs, \"\\n\"), Equals, 2000001)\n\t// Redact most of the logs except the start/end for the regexp\n\t// match -- otherwise, when the regexp fails, gocheck spams\n\t// the test logs with tens of megabytes of quoted strings.\n\tc.Assert(len(logs) > 10000, Equals, true)\n\tc.Check(logs[:500]+\"\\n...\\n\"+logs[len(logs)-500:], Matches, `(?ms)2015-12-29T15:51:45.000000001Z Hello 0\n2015-12-29T15:51:45.000000002Z Hello 1\n2015-12-29T15:51:45.000000003Z Hello 2\n2015-12-29T15:51:45.000000004Z Hello 3\n.*\n2015-12-29T15:51:45.001999998Z Hello 1999997\n2015-12-29T15:51:45.001999999Z Hello 1999998\n2015-12-29T15:51:45.002000000Z Hello 1999999\n2015-12-29T15:51:45.002000001Z Goodbye\n`)\n\n\tmt, err := cr.LogCollection.MarshalManifest(\".\")\n\tc.Check(err, IsNil)\n\tc.Check(mt, Equals, \". 9c2c05d1fae6aaa8af85113ba725716d+67108864 80b821383a07266c2a66a4566835e26e+21780065 0:88888929:crunch-run.txt\\n\")\n}\n\nfunc (s *LoggingTestSuite) TestLogUpdate(c *C) {\n\tfor _, trial := range []struct {\n\t\tmaxBytes    int64\n\t\tmaxDuration time.Duration\n\t}{\n\t\t{1000, 10 * time.Second},\n\t\t{1000000, time.Millisecond},\n\t} {\n\t\tc.Logf(\"max %d bytes, %s\", trial.maxBytes, trial.maxDuration)\n\t\tcrunchLogUpdateSize = trial.maxBytes\n\t\tcrunchLogUpdatePeriod = trial.maxDuration\n\n\t\tapi := &ArvTestClient{}\n\t\tkc := &KeepTestClient{}\n\t\tdefer kc.Close()\n\t\tcr, err := NewContainerRunner(s.client, api, kc, \"zzzzz-dz642-zzzzzzzzzzzzzzz\")\n\t\tc.Assert(err, IsNil)\n\t\tf, err := cr.openLogFile(\"crunch-run\")\n\t\tc.Assert(err, IsNil)\n\t\tcr.CrunchLog = newLogWriter(newTestTimestamper(f))\n\t\tstdout, err := cr.openLogFile(\"stdout\")\n\t\tc.Assert(err, IsNil)\n\n\t\tc.Check(cr.logUUID, Equals, \"\")\n\t\tcr.CrunchLog.Printf(\"Hello %1000s\", \"space\")\n\t\tfor i, t := 0, time.NewTicker(time.Millisecond); i < 5000 && cr.logUUID == \"\"; i++ {\n\t\t\t<-t.C\n\t\t}\n\t\tc.Check(cr.logUUID, Not(Equals), \"\")\n\t\tcr.CrunchLog.Print(\"Goodbye\")\n\t\tfmt.Fprintln(stdout, \"Goodbye\")\n\n\t\tc.Check(logFileContent(c, cr, \"crunch-run.txt\"), Matches, reTimestamp+` Hello  {995}space\\n`+\n\t\t\treTimestamp+` Goodbye\\n`)\n\t\tc.Check(logFileContent(c, cr, \"stdout.txt\"), Matches, `Goodbye\\n`)\n\n\t\tmt, err := cr.LogCollection.MarshalManifest(\".\")\n\t\tc.Check(err, IsNil)\n\t\tc.Check(mt, Matches, `. 4dc76e0a212bfa30c39d76d8c16da0c0\\+1038 5be52044a8c51e7b62dd62be07872968\\+47 0:1077:crunch-run.txt 1077:8:stdout.txt\\n`)\n\t}\n}\n\ntype filterSuite struct{}\n\nvar _ = Suite(&filterSuite{})\n\nfunc (*filterSuite) TestFilterKeepstoreErrorsOnly(c *check.C) {\n\tvar buf bytes.Buffer\n\tf := filterKeepstoreErrorsOnly{WriteCloser: nopCloser{&buf}}\n\tfor _, s := range []string{\n\t\t\"not j\",\n\t\t\"son\\n\" + `{\"msg\":\"foo\"}` + \"\\n{}\\n\" + `{\"msg\":\"request\"}` + \"\\n\" + `{\"msg\":1234}` + \"\\n\\n\",\n\t\t\"\\n[\\n\",\n\t\t`{\"msg\":\"response\",\"respStatusCode\":404,\"foo\": \"bar\"}` + \"\\n\",\n\t\t`{\"msg\":\"response\",\"respStatusCode\":206}` + \"\\n\",\n\t} {\n\t\tf.Write([]byte(s))\n\t}\n\tc.Check(buf.String(), check.Equals, `not json\n{\"msg\":\"foo\"}\n{}\n{\"msg\":1234}\n[\n{\"msg\":\"response\",\"respStatusCode\":404,\"foo\": \"bar\"}\n`)\n}\n\ntype nopCloser struct {\n\tio.Writer\n}\n\nfunc (nopCloser) Close() error { return nil }\n"
  },
  {
    "path": "lib/crunchrun/logscanner.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n)\n\n// logScanner is an io.Writer that calls ReportFunc(pattern) the first\n// time one of the Patterns appears in the data. Patterns must not\n// contain newlines.\ntype logScanner struct {\n\tPatterns   []string\n\tReportFunc func(pattern, text string)\n\treported   bool\n\tbuf        bytes.Buffer\n}\n\nfunc (s *logScanner) Write(p []byte) (int, error) {\n\tif s.reported {\n\t\t// We only call reportFunc once. Once we've called it\n\t\t// there's no need to buffer/search subsequent writes.\n\t\treturn len(p), nil\n\t}\n\tsplit := bytes.LastIndexByte(p, '\\n')\n\tif split < 0 {\n\t\treturn s.buf.Write(p)\n\t}\n\ts.buf.Write(p[:split+1])\n\ttxt := s.buf.String()\n\tfor _, pattern := range s.Patterns {\n\t\tif found := strings.Index(txt, pattern); found >= 0 {\n\t\t\t// Report the entire line where the pattern\n\t\t\t// was found.\n\t\t\ttxt = txt[strings.LastIndexByte(txt[:found], '\\n')+1:]\n\t\t\tif end := strings.IndexByte(txt, '\\n'); end >= 0 {\n\t\t\t\ttxt = txt[:end]\n\t\t\t}\n\t\t\ts.ReportFunc(pattern, txt)\n\t\t\ts.reported = true\n\t\t\treturn len(p), nil\n\t\t}\n\t}\n\ts.buf.Reset()\n\tif split == len(p) {\n\t\treturn len(p), nil\n\t}\n\tn, err := s.buf.Write(p[split+1:])\n\treturn n + split + 1, err\n}\n"
  },
  {
    "path": "lib/crunchrun/logscanner_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&logScannerSuite{})\n\ntype logScannerSuite struct {\n}\n\nfunc (s *logScannerSuite) TestCallReportFuncOnce(c *check.C) {\n\tvar reported []string\n\tls := logScanner{\n\t\tPatterns: []string{\"foobar\", \"barbaz\"},\n\t\tReportFunc: func(pattern, detail string) {\n\t\t\treported = append(reported, pattern, detail)\n\t\t},\n\t}\n\tls.Write([]byte(\"foo\\nbar\\n2021-01-01T00:00:00.000Z: bar\"))\n\tls.Write([]byte(\"baz: it's a detail\\nwaz\\nqux\"))\n\tls.Write([]byte(\"\\nfoobar\\n\"))\n\tc.Check(reported, check.DeepEquals, []string{\"barbaz\", \"2021-01-01T00:00:00.000Z: barbaz: it's a detail\"})\n}\n\nfunc (s *logScannerSuite) TestOneWritePerLine(c *check.C) {\n\tvar reported []string\n\tls := logScanner{\n\t\tPatterns: []string{\"barbaz\"},\n\t\tReportFunc: func(pattern, detail string) {\n\t\t\treported = append(reported, pattern, detail)\n\t\t},\n\t}\n\tls.Write([]byte(\"foo\\n\"))\n\tls.Write([]byte(\"2021-01-01T00:00:00.000Z: barbaz: it's a detail\\n\"))\n\tls.Write([]byte(\"waz\\n\"))\n\tc.Check(reported, check.DeepEquals, []string{\"barbaz\", \"2021-01-01T00:00:00.000Z: barbaz: it's a detail\"})\n}\n\nfunc (s *logScannerSuite) TestNoDetail(c *check.C) {\n\tvar reported []string\n\tls := logScanner{\n\t\tPatterns: []string{\"barbaz\"},\n\t\tReportFunc: func(pattern, detail string) {\n\t\t\treported = append(reported, pattern, detail)\n\t\t},\n\t}\n\tls.Write([]byte(\"foo\\n\"))\n\tls.Write([]byte(\"barbaz\\n\"))\n\tls.Write([]byte(\"waz\\n\"))\n\tc.Check(reported, check.DeepEquals, []string{\"barbaz\", \"barbaz\"})\n}\n"
  },
  {
    "path": "lib/crunchrun/singularity.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"os/exec\"\n\t\"os/user\"\n\t\"path\"\n\t\"regexp\"\n\t\"slices\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\ntype singularityExecutor struct {\n\tlogf          func(string, ...interface{})\n\tsudo          bool // use sudo to run singularity (only used by tests)\n\tspec          containerSpec\n\ttmpdir        string\n\tchild         *exec.Cmd\n\timageFilename string // \"sif\" image\n}\n\nfunc newSingularityExecutor(logf func(string, ...interface{})) (*singularityExecutor, error) {\n\ttmpdir, err := ioutil.TempDir(\"\", \"crunch-run-singularity-\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsudo := os.Getenv(\"ARVADOS_TEST_PRIVESC\") == \"sudo\" && strings.Contains(os.Args[0], \"crunch-run~\")\n\treturn &singularityExecutor{\n\t\tlogf:   logf,\n\t\ttmpdir: tmpdir,\n\t\tsudo:   sudo,\n\t}, nil\n}\n\nfunc (e *singularityExecutor) Runtime() string {\n\tbuf, err := exec.Command(\"singularity\", \"--version\").CombinedOutput()\n\tif err != nil {\n\t\treturn \"singularity (unknown version)\"\n\t}\n\treturn strings.TrimSuffix(string(buf), \"\\n\")\n}\n\nfunc (e *singularityExecutor) getOrCreateProject(ownerUuid string, name string, containerClient *arvados.Client) (*arvados.Group, error) {\n\tvar gp arvados.GroupList\n\terr := containerClient.RequestAndDecode(&gp,\n\t\tarvados.EndpointGroupList.Method,\n\t\tarvados.EndpointGroupList.Path,\n\t\tnil, arvados.ListOptions{Filters: []arvados.Filter{\n\t\t\tarvados.Filter{\"owner_uuid\", \"=\", ownerUuid},\n\t\t\tarvados.Filter{\"name\", \"=\", name},\n\t\t\tarvados.Filter{\"group_class\", \"=\", \"project\"},\n\t\t},\n\t\t\tLimit: 1})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(gp.Items) == 1 {\n\t\treturn &gp.Items[0], nil\n\t}\n\n\tvar rgroup arvados.Group\n\terr = containerClient.RequestAndDecode(&rgroup,\n\t\tarvados.EndpointGroupCreate.Method,\n\t\tarvados.EndpointGroupCreate.Path,\n\t\tnil, map[string]interface{}{\n\t\t\t\"group\": map[string]string{\n\t\t\t\t\"owner_uuid\":  ownerUuid,\n\t\t\t\t\"name\":        name,\n\t\t\t\t\"group_class\": \"project\",\n\t\t\t},\n\t\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rgroup, nil\n}\n\nfunc (e *singularityExecutor) getImageCacheProject(userUUID string, containerClient *arvados.Client) (*arvados.Group, error) {\n\tcacheProject, err := e.getOrCreateProject(userUUID, \".cache\", containerClient)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting '.cache' project: %v\", err)\n\t}\n\timageProject, err := e.getOrCreateProject(cacheProject.UUID, \"auto-generated singularity images\", containerClient)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting 'auto-generated singularity images' project: %s\", err)\n\t}\n\treturn imageProject, nil\n}\n\nfunc (e *singularityExecutor) imageCacheExp() time.Time {\n\treturn time.Now().Add(e.imageCacheTTL()).UTC()\n}\n\nfunc (e *singularityExecutor) imageCacheTTL() time.Duration {\n\treturn 24 * 7 * 2 * time.Hour\n}\n\n// getCacheCollection returns an existing collection with a cached\n// singularity image with the given name, or nil if none exists.\n//\n// Note that if there is no existing collection, this is not\n// considered an error -- all return values will be nil/empty.\nfunc (e *singularityExecutor) getCacheCollection(collectionName string, containerClient *arvados.Client, cacheProject *arvados.Group, arvMountPoint string) (collection *arvados.Collection, imageFile string, err error) {\n\tvar cl arvados.CollectionList\n\terr = containerClient.RequestAndDecode(&cl,\n\t\tarvados.EndpointCollectionList.Method,\n\t\tarvados.EndpointCollectionList.Path,\n\t\tnil, arvados.ListOptions{Filters: []arvados.Filter{\n\t\t\tarvados.Filter{\"owner_uuid\", \"=\", cacheProject.UUID},\n\t\t\tarvados.Filter{\"name\", \"=\", collectionName},\n\t\t},\n\t\t\tLimit: 1})\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"error querying for collection %q in project %s: %w\", collectionName, cacheProject.UUID, err)\n\t}\n\tif len(cl.Items) == 0 {\n\t\t// Successfully discovered that there's no cached\n\t\t// image collection.\n\t\treturn nil, \"\", nil\n\t}\n\t// Check that the collection actually contains an \"image.sif\"\n\t// file.  If not, we can't use it, and trying to create a new\n\t// cache collection will probably fail too, so the caller\n\t// should not bother trying.\n\tcoll := cl.Items[0]\n\tsifFile := path.Join(arvMountPoint, \"by_id\", coll.PortableDataHash, \"image.sif\")\n\t_, err = os.Stat(sifFile)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"found collection %s (%s), but it did not contain an image file: %s\", coll.UUID, coll.PortableDataHash, err)\n\t}\n\tif coll.TrashAt != nil && coll.TrashAt.Sub(time.Now()) < e.imageCacheTTL()*9/10 {\n\t\t// If the remaining TTL is less than 90% of our target\n\t\t// TTL, extend trash_at.  This avoids prematurely\n\t\t// trashing and re-converting images that are being\n\t\t// used regularly.\n\t\terr = containerClient.RequestAndDecode(nil,\n\t\t\tarvados.EndpointCollectionUpdate.Method,\n\t\t\t\"arvados/v1/collections/\"+coll.UUID,\n\t\t\tnil, map[string]interface{}{\n\t\t\t\t\"collection\": map[string]string{\n\t\t\t\t\t\"trash_at\": e.imageCacheExp().Format(time.RFC3339),\n\t\t\t\t},\n\t\t\t})\n\t\tif err != nil {\n\t\t\te.logf(\"could not update expiry time of cached image collection (proceeding anyway): %s\", err)\n\t\t}\n\t}\n\treturn &coll, sifFile, nil\n}\n\nfunc (e *singularityExecutor) createCacheCollection(collectionName string, containerClient *arvados.Client, cacheProject *arvados.Group) (*arvados.Collection, error) {\n\tvar coll arvados.Collection\n\terr := containerClient.RequestAndDecode(&coll,\n\t\tarvados.EndpointCollectionCreate.Method,\n\t\tarvados.EndpointCollectionCreate.Path,\n\t\tnil, map[string]interface{}{\n\t\t\t\"collection\": map[string]string{\n\t\t\t\t\"owner_uuid\": cacheProject.UUID,\n\t\t\t\t\"name\":       collectionName,\n\t\t\t\t\"trash_at\":   e.imageCacheExp().Format(time.RFC3339),\n\t\t\t},\n\t\t\t\"ensure_unique_name\": true,\n\t\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error creating '%v' collection: %s\", collectionName, err)\n\t}\n\treturn &coll, nil\n}\n\nfunc (e *singularityExecutor) convertDockerImage(srcPath, dstPath string) error {\n\t// Make sure the docker image is readable.\n\tif _, err := os.Stat(srcPath); err != nil {\n\t\treturn err\n\t}\n\n\te.logf(\"building singularity image\")\n\t// \"singularity build\" does not accept a\n\t// docker-archive://... filename containing a \":\" character,\n\t// as in \"/path/to/sha256:abcd...1234.tar\". Workaround: make a\n\t// symlink that doesn't have \":\" chars.\n\terr := os.Symlink(srcPath, e.tmpdir+\"/image.tar\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Set up a cache and tmp dir for singularity build\n\terr = os.Mkdir(e.tmpdir+\"/cache\", 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(e.tmpdir + \"/cache\")\n\terr = os.Mkdir(e.tmpdir+\"/tmp\", 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.RemoveAll(e.tmpdir + \"/tmp\")\n\n\tbuild := exec.Command(\"singularity\", \"build\", dstPath, \"docker-archive://\"+e.tmpdir+\"/image.tar\")\n\tbuild.Env = os.Environ()\n\tbuild.Env = append(build.Env, \"SINGULARITY_CACHEDIR=\"+e.tmpdir+\"/cache\")\n\tbuild.Env = append(build.Env, \"SINGULARITY_TMPDIR=\"+e.tmpdir+\"/tmp\")\n\te.logf(\"%v\", build.Args)\n\tout, err := build.CombinedOutput()\n\t// INFO:    Starting build...\n\t// Getting image source signatures\n\t// Copying blob ab15617702de done\n\t// Copying config 651e02b8a2 done\n\t// Writing manifest to image destination\n\t// Storing signatures\n\t// 2021/04/22 14:42:14  info unpack layer: sha256:21cbfd3a344c52b197b9fa36091e66d9cbe52232703ff78d44734f85abb7ccd3\n\t// INFO:    Creating SIF file...\n\t// INFO:    Build complete: arvados-jobs.latest.sif\n\te.logf(\"%s\", out)\n\treturn err\n}\n\n// LoadImage converts the given docker image to a singularity\n// image.\n//\n// If containerClient is not nil, LoadImage first tries to use an\n// existing image (in Home -> .cache -> auto-generated singularity\n// images) and, if none was found there and the image was converted on\n// the fly, tries to save the converted image to the cache so it can\n// be reused next time.\n//\n// If containerClient is nil or a cache project/collection cannot be\n// found or created, LoadImage converts the image on the fly and\n// writes it to the local filesystem instead.\nfunc (e *singularityExecutor) LoadImage(dockerImageID string, imageTarballPath string, container arvados.Container, arvMountPoint string, containerClient *arvados.Client) error {\n\tconvertWithoutCache := func(err error) error {\n\t\tif err != nil {\n\t\t\te.logf(\"cannot use singularity image cache: %s\", err)\n\t\t}\n\t\te.imageFilename = path.Join(e.tmpdir, \"image.sif\")\n\t\treturn e.convertDockerImage(imageTarballPath, e.imageFilename)\n\t}\n\n\tif containerClient == nil {\n\t\treturn convertWithoutCache(nil)\n\t}\n\tcacheProject, err := e.getImageCacheProject(container.RuntimeUserUUID, containerClient)\n\tif err != nil {\n\t\treturn convertWithoutCache(err)\n\t}\n\tcacheCollectionName := fmt.Sprintf(\"singularity image for %s\", dockerImageID)\n\texistingCollection, sifFile, err := e.getCacheCollection(cacheCollectionName, containerClient, cacheProject, arvMountPoint)\n\tif err != nil {\n\t\treturn convertWithoutCache(err)\n\t}\n\tif existingCollection != nil {\n\t\te.imageFilename = sifFile\n\t\treturn nil\n\t}\n\n\tnewCollection, err := e.createCacheCollection(\"converting \"+cacheCollectionName, containerClient, cacheProject)\n\tif err != nil {\n\t\treturn convertWithoutCache(err)\n\t}\n\tdstDir := path.Join(arvMountPoint, \"by_uuid\", newCollection.UUID)\n\tdstFile := path.Join(dstDir, \"image.sif\")\n\terr = e.convertDockerImage(imageTarballPath, dstFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuf, err := os.ReadFile(path.Join(dstDir, \".arvados#collection\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not sync image collection: %w\", err)\n\t}\n\tvar synced arvados.Collection\n\terr = json.Unmarshal(buf, &synced)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not parse .arvados#collection: %w\", err)\n\t}\n\te.logf(\"saved converted image in %s with PDH %s\", newCollection.UUID, synced.PortableDataHash)\n\te.imageFilename = path.Join(arvMountPoint, \"by_id\", synced.PortableDataHash, \"image.sif\")\n\n\tif errRename := containerClient.RequestAndDecode(nil,\n\t\tarvados.EndpointCollectionUpdate.Method,\n\t\t\"arvados/v1/collections/\"+newCollection.UUID,\n\t\tnil, map[string]interface{}{\n\t\t\t\"collection\": map[string]string{\n\t\t\t\t\"name\": cacheCollectionName,\n\t\t\t},\n\t\t}); errRename != nil {\n\t\t// Error is probably a name collision caused by\n\t\t// another crunch-run process is converting the same\n\t\t// image concurrently.  In that case, we prefer to use\n\t\t// the one that won the race -- the resulting images\n\t\t// should be equivalent, but if they do differ at all,\n\t\t// it's better if all containers use the same\n\t\t// conversion.\n\t\tif existingCollection, sifFile, err := e.getCacheCollection(cacheCollectionName, containerClient, cacheProject, arvMountPoint); err == nil {\n\t\t\te.logf(\"lost race -- abandoning our conversion in %s (%s) and using image from %s (%s) instead\", newCollection.UUID, synced.PortableDataHash, existingCollection.UUID, existingCollection.PortableDataHash)\n\t\t\te.imageFilename = sifFile\n\t\t} else {\n\t\t\te.logf(\"using newly converted image anyway, despite error renaming collection: %v\", errRename)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (e *singularityExecutor) Create(spec containerSpec) error {\n\te.spec = spec\n\treturn nil\n}\n\nfunc (e *singularityExecutor) execCmd(path string) *exec.Cmd {\n\targs := []string{path, \"exec\", \"--containall\", \"--cleanenv\", \"--pwd=\" + e.spec.WorkingDir}\n\tif !e.spec.EnableNetwork {\n\t\targs = append(args, \"--net\", \"--network=none\")\n\t} else if u, err := user.Current(); err == nil && u.Uid == \"0\" || e.sudo {\n\t\t// Specifying --network=bridge fails unless\n\t\t// singularity is running as root.\n\t\t//\n\t\t// Note this used to be possible with --fakeroot, or\n\t\t// configuring singularity like so:\n\t\t//\n\t\t// singularity config global --set 'allow net networks' bridge\n\t\t// singularity config global --set 'allow net groups' mygroup\n\t\t//\n\t\t// However, these options no longer work (as of debian\n\t\t// bookworm) because iptables now refuses to run in a\n\t\t// setuid environment.\n\t\targs = append(args, \"--net\", \"--network=bridge\")\n\t} else {\n\t\t// If we don't pass a --net argument at all, the\n\t\t// container will be in the same network namespace as\n\t\t// the host.\n\t\t//\n\t\t// Note this allows the container to listen on the\n\t\t// host's external ports.\n\t}\n\tif e.spec.GPUStack == \"cuda\" && e.spec.GPUDeviceCount > 0 {\n\t\targs = append(args, \"--nv\")\n\t}\n\tif e.spec.GPUStack == \"rocm\" && e.spec.GPUDeviceCount > 0 {\n\t\targs = append(args, \"--rocm\")\n\t}\n\n\t// If we ask for resource limits that aren't supported,\n\t// singularity will not run the container at all. So we probe\n\t// for support first, and only apply the limits that appear to\n\t// be supported.\n\t//\n\t// Default debian configuration lets non-root users set memory\n\t// limits but not CPU limits, so we enable/disable those\n\t// limits independently.\n\t//\n\t// https://rootlesscontaine.rs/getting-started/common/cgroup2/\n\tcheckCgroupSupport(e.logf)\n\tif e.spec.VCPUs > 0 {\n\t\tif cgroupSupport[\"cpu\"] {\n\t\t\targs = append(args, \"--cpus\", fmt.Sprintf(\"%d\", e.spec.VCPUs))\n\t\t} else {\n\t\t\te.logf(\"cpu limits are not supported by current systemd/cgroup configuration, not setting --cpu %d\", e.spec.VCPUs)\n\t\t}\n\t}\n\tif e.spec.RAM > 0 {\n\t\tif cgroupSupport[\"memory\"] {\n\t\t\targs = append(args, \"--memory\", fmt.Sprintf(\"%d\", e.spec.RAM))\n\t\t} else {\n\t\t\te.logf(\"memory limits are not supported by current systemd/cgroup configuration, not setting --memory %d\", e.spec.RAM)\n\t\t}\n\t}\n\n\treadonlyflag := map[bool]string{\n\t\tfalse: \"rw\",\n\t\ttrue:  \"ro\",\n\t}\n\tvar binds []string\n\tfor path, _ := range e.spec.BindMounts {\n\t\tbinds = append(binds, path)\n\t}\n\tsort.Strings(binds)\n\tfor _, path := range binds {\n\t\tmount := e.spec.BindMounts[path]\n\t\tif path == e.spec.Env[\"HOME\"] {\n\t\t\t// Singularity treats $HOME as special case\n\t\t\targs = append(args, \"--home\", mount.HostPath+\":\"+path)\n\t\t} else {\n\t\t\targs = append(args, \"--bind\", mount.HostPath+\":\"+path+\":\"+readonlyflag[mount.ReadOnly])\n\t\t}\n\t}\n\n\t// This is for singularity 3.5.2. There are some behaviors\n\t// that will change in singularity 3.6, please see:\n\t// https://sylabs.io/guides/3.7/user-guide/environment_and_metadata.html\n\t// https://sylabs.io/guides/3.5/user-guide/environment_and_metadata.html\n\tenv := make([]string, 0, len(e.spec.Env))\n\tfor k, v := range e.spec.Env {\n\t\tif k == \"HOME\" {\n\t\t\t// Singularity treats $HOME as special case,\n\t\t\t// this is handled with --home above\n\t\t\tcontinue\n\t\t}\n\t\tenv = append(env, \"SINGULARITYENV_\"+k+\"=\"+v)\n\t}\n\n\t// Singularity always makes all nvidia devices visible to the\n\t// container.  If a resource manager such as slurm or LSF told\n\t// us to select specific devices we need to propagate that.\n\tif cudaVisibleDevices := os.Getenv(\"CUDA_VISIBLE_DEVICES\"); cudaVisibleDevices != \"\" {\n\t\t// If a resource manager such as slurm or LSF told\n\t\t// us to select specific devices we need to propagate that.\n\t\tenv = append(env, \"SINGULARITYENV_CUDA_VISIBLE_DEVICES=\"+cudaVisibleDevices)\n\t}\n\t// Singularity's default behavior is to evaluate each\n\t// SINGULARITYENV_* env var with a shell as a double-quoted\n\t// string and pass the result to the contained\n\t// process. Singularity 3.10+ has an option to pass env vars\n\t// through literally without evaluating, which is what we\n\t// want. See https://github.com/sylabs/singularity/pull/704\n\t// and https://dev.arvados.org/issues/19081\n\tenv = append(env, \"SINGULARITY_NO_EVAL=1\")\n\n\t// If we don't propagate XDG_RUNTIME_DIR and\n\t// DBUS_SESSION_BUS_ADDRESS, singularity resource limits fail\n\t// with \"FATAL: container creation failed: while applying\n\t// cgroups config: system configuration does not support\n\t// cgroup management\" or \"FATAL: container creation failed:\n\t// while applying cgroups config: rootless cgroups require a\n\t// D-Bus session - check that XDG_RUNTIME_DIR and\n\t// DBUS_SESSION_BUS_ADDRESS are set\".\n\tenv = append(env, \"XDG_RUNTIME_DIR=\"+os.Getenv(\"XDG_RUNTIME_DIR\"))\n\tenv = append(env, \"DBUS_SESSION_BUS_ADDRESS=\"+os.Getenv(\"DBUS_SESSION_BUS_ADDRESS\"))\n\n\targs = append(args, e.imageFilename)\n\targs = append(args, e.spec.Command...)\n\n\treturn &exec.Cmd{\n\t\tPath:   path,\n\t\tArgs:   args,\n\t\tEnv:    env,\n\t\tStdin:  e.spec.Stdin,\n\t\tStdout: e.spec.Stdout,\n\t\tStderr: e.spec.Stderr,\n\t}\n}\n\nfunc (e *singularityExecutor) Start() error {\n\tpath, err := exec.LookPath(\"singularity\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tchild := e.execCmd(path)\n\tif e.sudo {\n\t\tchild.Args = append([]string{child.Path}, child.Args...)\n\t\tchild.Path, err = exec.LookPath(\"sudo\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = child.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\te.child = child\n\treturn nil\n}\n\nfunc (e *singularityExecutor) Pid() int {\n\tchildproc, err := e.containedProcess()\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn childproc\n}\n\nfunc (e *singularityExecutor) Stop() error {\n\tif e.child == nil || e.child.Process == nil {\n\t\t// no process started, or Wait already called\n\t\treturn nil\n\t}\n\tif err := e.child.Process.Signal(syscall.Signal(0)); err != nil {\n\t\t// process already exited\n\t\treturn nil\n\t}\n\treturn e.child.Process.Signal(syscall.SIGKILL)\n}\n\nfunc (e *singularityExecutor) Wait(context.Context) (int, error) {\n\terr := e.child.Wait()\n\tif err, ok := err.(*exec.ExitError); ok {\n\t\treturn err.ProcessState.ExitCode(), nil\n\t}\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn e.child.ProcessState.ExitCode(), nil\n}\n\nfunc (e *singularityExecutor) Close() {\n\terr := os.RemoveAll(e.tmpdir)\n\tif err != nil {\n\t\te.logf(\"error removing temp dir: %s\", err)\n\t}\n}\n\nfunc (e *singularityExecutor) InjectCommand(ctx context.Context, detachKeys, username string, usingTTY bool, injectcmd []string) (*exec.Cmd, error) {\n\ttarget, err := e.containedProcess()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmd := exec.CommandContext(ctx, \"nsenter\", append([]string{fmt.Sprintf(\"--target=%d\", target), \"--all\"}, injectcmd...)...)\n\tif e.sudo {\n\t\tcmd.Args = slices.Insert(cmd.Args, 0, cmd.Path)\n\t\tcmd.Path, err = exec.LookPath(\"sudo\")\n\t\treturn cmd, err\n\t}\n\treturn cmd, nil\n}\n\nvar (\n\terrContainerHasNoIPAddress = errors.New(\"container has no IP address distinct from host\")\n)\n\nfunc (e *singularityExecutor) IPAddress() (string, error) {\n\ttarget, err := e.containedProcess()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttargetIPs, err := processIPs(target)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tselfIPs, err := processIPs(os.Getpid())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor ip := range targetIPs {\n\t\tif !selfIPs[ip] {\n\t\t\treturn ip, nil\n\t\t}\n\t}\n\treturn \"\", errContainerHasNoIPAddress\n}\n\nfunc processIPs(pid int) (map[string]bool, error) {\n\tfibtrie, err := os.ReadFile(fmt.Sprintf(\"/proc/%d/net/fib_trie\", pid))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddrs := map[string]bool{}\n\t// When we see a pair of lines like this:\n\t//\n\t//              |-- 10.1.2.3\n\t//                 /32 host LOCAL\n\t//\n\t// ...we set addrs[\"10.1.2.3\"] = true\n\tlines := bytes.Split(fibtrie, []byte{'\\n'})\n\tfor linenumber, line := range lines {\n\t\tif !bytes.HasSuffix(line, []byte(\"/32 host LOCAL\")) {\n\t\t\tcontinue\n\t\t}\n\t\tif linenumber < 1 {\n\t\t\tcontinue\n\t\t}\n\t\ti := bytes.LastIndexByte(lines[linenumber-1], ' ')\n\t\tif i < 0 || i >= len(line)-7 {\n\t\t\tcontinue\n\t\t}\n\t\taddr := string(lines[linenumber-1][i+1:])\n\t\tif net.ParseIP(addr).To4() != nil {\n\t\t\taddrs[addr] = true\n\t\t}\n\t}\n\treturn addrs, nil\n}\n\nvar (\n\terrContainerNotStarted = errors.New(\"container has not started yet\")\n\terrCannotFindChild     = errors.New(\"failed to find any process inside the container\")\n\treProcStatusPPid       = regexp.MustCompile(`\\nPPid:\\t(\\d+)\\n`)\n)\n\n// Return the PID of a process that is inside the container (not\n// necessarily the topmost/pid=1 process in the container).\nfunc (e *singularityExecutor) containedProcess() (int, error) {\n\tif e.child == nil || e.child.Process == nil {\n\t\treturn 0, errContainerNotStarted\n\t}\n\tcmd := exec.Command(\"lsns\")\n\tif e.sudo {\n\t\tcmd = exec.Command(\"sudo\", \"lsns\")\n\t}\n\tlsns, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"lsns: %w\", err)\n\t}\n\tfor _, line := range bytes.Split(lsns, []byte{'\\n'}) {\n\t\tfields := bytes.Fields(line)\n\t\tif len(fields) < 4 {\n\t\t\tcontinue\n\t\t}\n\t\tif !bytes.Equal(fields[1], []byte(\"pid\")) {\n\t\t\tcontinue\n\t\t}\n\t\tpid, err := strconv.ParseInt(string(fields[3]), 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"error parsing PID field in lsns output: %q\", fields[3])\n\t\t}\n\t\tfor parent := pid; ; {\n\t\t\tprocstatus, err := os.ReadFile(fmt.Sprintf(\"/proc/%d/status\", parent))\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tm := reProcStatusPPid.FindSubmatch(procstatus)\n\t\t\tif m == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tparent, err = strconv.ParseInt(string(m[1]), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif int(parent) == e.child.Process.Pid {\n\t\t\t\treturn int(pid), nil\n\t\t\t}\n\t\t}\n\t}\n\treturn 0, errCannotFindChild\n}\n"
  },
  {
    "path": "lib/crunchrun/singularity_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchrun\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t. \"gopkg.in/check.v1\"\n)\n\nvar _ = Suite(&singularitySuite{})\n\ntype singularitySuite struct {\n\texecutorSuite\n}\n\nfunc (s *singularitySuite) SetUpSuite(c *C) {\n\t_, err := exec.LookPath(\"singularity\")\n\tif err != nil {\n\t\tc.Skip(\"looks like singularity is not installed\")\n\t}\n\ts.newExecutor = func(c *C) {\n\t\tvar err error\n\t\ts.executor, err = newSingularityExecutor(c.Logf)\n\t\tc.Assert(err, IsNil)\n\t}\n}\n\nfunc (s *singularitySuite) TearDownSuite(c *C) {\n\tif s.executor != nil {\n\t\ts.executor.Close()\n\t}\n}\n\n// With modern iptables, singularity (as of 4.2.1) cannot enable\n// networking when invoked by a regular user. Under\n// arvados-dispatch-cloud, crunch-run runs as root, so it's OK. For\n// testing, assuming tests are not running as root, we use sudo -- but\n// only if requested via environment variable.\n//\n// This also applies to the \"nsenter\" command used by Inject: the\n// standard dev/test environment installs nsenter without special\n// privileges, so it is only useful if we are/become root.\nfunc (s *singularitySuite) useRootOrSkip(c *C) {\n\tif os.Getuid() == 0 {\n\t\t// already root\n\t} else if os.Getenv(\"ARVADOS_TEST_PRIVESC\") == \"sudo\" {\n\t\tc.Logf(\"ARVADOS_TEST_PRIVESC is 'sudo', invoking 'sudo singularity ...'\")\n\t\ts.executor.(*singularityExecutor).sudo = true\n\t} else {\n\t\tc.Skip(\"test case needs to run singularity as root -- set ARVADOS_TEST_PRIVESC=sudo to enable this test\")\n\t}\n}\n\nfunc (s *singularitySuite) TestEnableNetwork_Listen(c *C) {\n\ts.useRootOrSkip(c)\n\ts.executorSuite.TestEnableNetwork_Listen(c)\n}\n\nfunc (s *singularitySuite) TestInject(c *C) {\n\ts.useRootOrSkip(c)\n\ts.executorSuite.TestInject(c)\n}\n\nvar _ = Suite(&singularityStubSuite{})\n\n// singularityStubSuite tests don't really invoke singularity, so we\n// can run them even if singularity is not installed.\ntype singularityStubSuite struct{}\n\nfunc (s *singularityStubSuite) TestSingularityExecArgs(c *C) {\n\te, err := newSingularityExecutor(c.Logf)\n\tc.Assert(err, IsNil)\n\terr = e.Create(containerSpec{\n\t\tWorkingDir:     \"/WorkingDir\",\n\t\tEnv:            map[string]string{\"FOO\": \"bar\"},\n\t\tBindMounts:     map[string]bindmount{\"/mnt\": {HostPath: \"/hostpath\", ReadOnly: true}},\n\t\tEnableNetwork:  false,\n\t\tGPUStack:       \"cuda\",\n\t\tGPUDeviceCount: 3,\n\t\tVCPUs:          2,\n\t\tRAM:            12345678,\n\t})\n\tc.Check(err, IsNil)\n\te.imageFilename = \"/fake/image.sif\"\n\tcmd := e.execCmd(\"./singularity\")\n\texpectArgs := []string{\"./singularity\", \"exec\", \"--containall\", \"--cleanenv\", \"--pwd=/WorkingDir\", \"--net\", \"--network=none\", \"--nv\"}\n\tif cgroupSupport[\"cpu\"] {\n\t\texpectArgs = append(expectArgs, \"--cpus\", \"2\")\n\t}\n\tif cgroupSupport[\"memory\"] {\n\t\texpectArgs = append(expectArgs, \"--memory\", \"12345678\")\n\t}\n\texpectArgs = append(expectArgs, \"--bind\", \"/hostpath:/mnt:ro\", \"/fake/image.sif\")\n\tc.Check(cmd.Args, DeepEquals, expectArgs)\n\tc.Check(cmd.Env, DeepEquals, []string{\n\t\t\"SINGULARITYENV_FOO=bar\",\n\t\t\"SINGULARITY_NO_EVAL=1\",\n\t\t\"XDG_RUNTIME_DIR=\" + os.Getenv(\"XDG_RUNTIME_DIR\"),\n\t\t\"DBUS_SESSION_BUS_ADDRESS=\" + os.Getenv(\"DBUS_SESSION_BUS_ADDRESS\"),\n\t})\n}\n\nfunc (s *singularitySuite) setupMount(c *C) (mountdir string) {\n\tmountdir = c.MkDir()\n\tcmd := exec.Command(\"arv-mount\",\n\t\t\"--foreground\", \"--read-write\",\n\t\t\"--storage-classes\", \"default\",\n\t\t\"--mount-by-pdh\", \"by_id\", \"--mount-by-id\", \"by_uuid\",\n\t\t\"--disable-event-listening\",\n\t\tmountdir)\n\tcmd.Stdout = os.Stderr\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Start()\n\tc.Assert(err, IsNil)\n\treturn\n}\n\nfunc (s *singularitySuite) teardownMount(c *C, mountdir string) {\n\texec.Command(\"arv-mount\", \"--unmount\", mountdir).Run()\n}\n\ntype singularitySuiteLoadTestSetup struct {\n\tcontainerClient   *arvados.Client\n\timageCacheProject *arvados.Group\n\tdockerImageID     string\n\tcollectionName    string\n}\n\nfunc (s *singularitySuite) setupLoadTest(c *C, e *singularityExecutor) (setup singularitySuiteLoadTestSetup) {\n\t// remove symlink and converted image already written by\n\t// (executorSuite)SetupTest\n\tos.Remove(e.tmpdir + \"/image.tar\")\n\tos.Remove(e.tmpdir + \"/image.sif\")\n\n\tsetup.containerClient = arvados.NewClientFromEnv()\n\tsetup.containerClient.AuthToken = arvadostest.ActiveTokenV2\n\n\tvar err error\n\tsetup.imageCacheProject, err = e.getImageCacheProject(arvadostest.ActiveUserUUID, setup.containerClient)\n\tc.Assert(err, IsNil)\n\n\tsetup.dockerImageID = \"sha256:388056c9a6838deea3792e8f00705b35b439cf57b3c9c2634fb4e95cfc896de6\"\n\tsetup.collectionName = fmt.Sprintf(\"singularity image for %s\", setup.dockerImageID)\n\n\t// Remove existing cache entry, if any.\n\tvar cl arvados.CollectionList\n\terr = setup.containerClient.RequestAndDecode(&cl,\n\t\tarvados.EndpointCollectionList.Method,\n\t\tarvados.EndpointCollectionList.Path,\n\t\tnil, arvados.ListOptions{Filters: []arvados.Filter{\n\t\t\tarvados.Filter{\"owner_uuid\", \"=\", setup.imageCacheProject.UUID},\n\t\t\tarvados.Filter{\"name\", \"=\", setup.collectionName},\n\t\t},\n\t\t\tLimit: 1})\n\tc.Assert(err, IsNil)\n\tif len(cl.Items) == 1 {\n\t\tsetup.containerClient.RequestAndDecode(nil, \"DELETE\", \"arvados/v1/collections/\"+cl.Items[0].UUID, nil, nil)\n\t}\n\n\treturn\n}\n\nfunc (s *singularitySuite) checkCacheCollectionExists(c *C, setup singularitySuiteLoadTestSetup) {\n\tvar cl arvados.CollectionList\n\terr := setup.containerClient.RequestAndDecode(&cl,\n\t\tarvados.EndpointCollectionList.Method,\n\t\tarvados.EndpointCollectionList.Path,\n\t\tnil, arvados.ListOptions{Filters: []arvados.Filter{\n\t\t\tarvados.Filter{\"owner_uuid\", \"=\", setup.imageCacheProject.UUID},\n\t\t\tarvados.Filter{\"name\", \"=\", setup.collectionName},\n\t\t},\n\t\t\tLimit: 1})\n\tc.Assert(err, IsNil)\n\tif !c.Check(cl.Items, HasLen, 1) {\n\t\treturn\n\t}\n\tc.Check(cl.Items[0].PortableDataHash, Not(Equals), \"d41d8cd98f00b204e9800998ecf8427e+0\")\n}\n\nfunc (s *singularitySuite) TestImageCache_New(c *C) {\n\tmountdir := s.setupMount(c)\n\tdefer s.teardownMount(c, mountdir)\n\te, err := newSingularityExecutor(c.Logf)\n\tc.Assert(err, IsNil)\n\tsetup := s.setupLoadTest(c, e)\n\terr = e.LoadImage(setup.dockerImageID, arvadostest.BusyboxDockerImage(c), arvados.Container{RuntimeUserUUID: arvadostest.ActiveUserUUID}, mountdir, setup.containerClient)\n\tc.Check(err, IsNil)\n\t_, err = os.Stat(e.tmpdir + \"/image.sif\")\n\tc.Check(err, NotNil)\n\tc.Check(os.IsNotExist(err), Equals, true)\n\ts.checkCacheCollectionExists(c, setup)\n}\n\nfunc (s *singularitySuite) TestImageCache_SkipEmpty(c *C) {\n\tmountdir := s.setupMount(c)\n\tdefer s.teardownMount(c, mountdir)\n\te, err := newSingularityExecutor(c.Logf)\n\tc.Assert(err, IsNil)\n\tsetup := s.setupLoadTest(c, e)\n\n\tvar emptyCollection arvados.Collection\n\texp := time.Now().Add(24 * 7 * 2 * time.Hour)\n\terr = setup.containerClient.RequestAndDecode(&emptyCollection,\n\t\tarvados.EndpointCollectionCreate.Method,\n\t\tarvados.EndpointCollectionCreate.Path,\n\t\tnil, map[string]interface{}{\n\t\t\t\"collection\": map[string]string{\n\t\t\t\t\"owner_uuid\": setup.imageCacheProject.UUID,\n\t\t\t\t\"name\":       setup.collectionName,\n\t\t\t\t\"trash_at\":   exp.UTC().Format(time.RFC3339),\n\t\t\t},\n\t\t})\n\tc.Assert(err, IsNil)\n\n\terr = e.LoadImage(setup.dockerImageID, arvadostest.BusyboxDockerImage(c), arvados.Container{RuntimeUserUUID: arvadostest.ActiveUserUUID}, mountdir, setup.containerClient)\n\tc.Check(err, IsNil)\n\tc.Check(e.imageFilename, Equals, e.tmpdir+\"/image.sif\")\n\n\t// tmpdir should contain symlink to docker image archive.\n\ttarListing, err := exec.Command(\"tar\", \"tvf\", e.tmpdir+\"/image.tar\").CombinedOutput()\n\tc.Check(err, IsNil)\n\tc.Check(string(tarListing), Matches, `(?ms).*/layer.tar.*`)\n\n\t// converted singularity image should be non-empty.\n\tfi, err := os.Stat(e.imageFilename)\n\tif c.Check(err, IsNil) {\n\t\tc.Check(int(fi.Size()), Not(Equals), 0)\n\t}\n}\n\nfunc (s *singularitySuite) TestImageCache_Concurrency_1(c *C) {\n\ts.testImageCache(c, 1)\n}\n\nfunc (s *singularitySuite) TestImageCache_Concurrency_2(c *C) {\n\ts.testImageCache(c, 2)\n}\n\nfunc (s *singularitySuite) TestImageCache_Concurrency_10(c *C) {\n\ts.testImageCache(c, 10)\n}\n\nfunc (s *singularitySuite) testImageCache(c *C, concurrency int) {\n\tmountdirs := make([]string, concurrency)\n\texecs := make([]*singularityExecutor, concurrency)\n\tsetups := make([]singularitySuiteLoadTestSetup, concurrency)\n\tfor i := range execs {\n\t\tmountdirs[i] = s.setupMount(c)\n\t\tdefer s.teardownMount(c, mountdirs[i])\n\t\te, err := newSingularityExecutor(c.Logf)\n\t\tc.Assert(err, IsNil)\n\t\tdefer e.Close()\n\t\texecs[i] = e\n\t\tsetups[i] = s.setupLoadTest(c, e)\n\t}\n\n\tvar wg sync.WaitGroup\n\tfor i, e := range execs {\n\t\ti, e := i, e\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terr := e.LoadImage(setups[i].dockerImageID, arvadostest.BusyboxDockerImage(c), arvados.Container{RuntimeUserUUID: arvadostest.ActiveUserUUID}, mountdirs[i], setups[i].containerClient)\n\t\t\tc.Check(err, IsNil)\n\t\t}()\n\t}\n\twg.Wait()\n\n\tfor i, e := range execs {\n\t\tfusepath := strings.TrimPrefix(e.imageFilename, mountdirs[i])\n\t\t// imageFilename should be in the fuse mount, not\n\t\t// e.tmpdir.\n\t\tc.Check(fusepath, Not(Equals), execs[0].imageFilename)\n\t\t// Below fuse mountpoint, paths should all be equal.\n\t\tfusepath0 := strings.TrimPrefix(execs[0].imageFilename, mountdirs[0])\n\t\tc.Check(fusepath, Equals, fusepath0)\n\t}\n}\n"
  },
  {
    "path": "lib/crunchrun/testdata/fakestat/cgroup.procs",
    "content": ""
  },
  {
    "path": "lib/crunchrun/testdata/fakestat/cgroupid/cgroup.procs",
    "content": ""
  },
  {
    "path": "lib/crunchrun/testdata/fakestat/cgroupid/memory.stat",
    "content": "rss 734003200\npgmajfault 3200\ntotal_cache 73400320\ntotal_pgmajfault 20\ntotal_swap 320\n"
  },
  {
    "path": "lib/crunchstat/command.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchstat\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os/exec\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n)\n\nvar Command = command{}\n\ntype command struct{}\n\nfunc (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tflags := flag.NewFlagSet(prog, flag.ExitOnError)\n\tpoll := flags.Duration(\"poll\", 10*time.Second, \"reporting interval\")\n\tdebug := flags.Bool(\"debug\", false, \"show additional debug info\")\n\tdump := flags.String(\"dump\", \"\", \"save snapshot of OS files in given `directory` (for creating test cases)\")\n\tgetVersion := flags.Bool(\"version\", false, \"print version information and exit\")\n\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"program [args ...]\", stderr); !ok {\n\t\treturn code\n\t} else if *getVersion {\n\t\tfmt.Printf(\"%s %s\\n\", prog, cmd.Version.String())\n\t\treturn 0\n\t} else if flags.NArg() == 0 {\n\t\tfmt.Fprintf(stderr, \"missing required argument: program (try -help)\\n\")\n\t\treturn 2\n\t}\n\n\treporter := &Reporter{\n\t\tLogger:     log.New(stderr, prog+\": \", 0),\n\t\tDebug:      *debug,\n\t\tPollPeriod: *poll,\n\t}\n\treporter.Logger.Printf(\"%s %s\", prog, cmd.Version.String())\n\treporter.Logger.Printf(\"running %v\", flags.Args())\n\tcmd := exec.Command(flags.Arg(0), flags.Args()[1:]...)\n\n\t// Child process will use our stdin and stdout pipes (we close\n\t// our copies below)\n\tcmd.Stdin = stdin\n\tcmd.Stdout = stdout\n\t// Child process stderr and our stats will both go to stderr\n\tcmd.Stderr = stderr\n\n\tif err := cmd.Start(); err != nil {\n\t\treporter.Logger.Printf(\"error in cmd.Start: %v\", err)\n\t\treturn 1\n\t}\n\treporter.Pid = func() int {\n\t\treturn cmd.Process.Pid\n\t}\n\treporter.Start()\n\tdefer reporter.Stop()\n\tif stdin, ok := stdin.(io.Closer); ok {\n\t\tstdin.Close()\n\t}\n\tif stdout, ok := stdout.(io.Closer); ok {\n\t\tstdout.Close()\n\t}\n\n\tfailed := false\n\tif *dump != \"\" {\n\t\terr := reporter.dumpSourceFiles(*dump)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(stderr, \"error dumping source files: %s\\n\", err)\n\t\t\tfailed = true\n\t\t}\n\t}\n\n\terr := cmd.Wait()\n\n\tif err, ok := err.(*exec.ExitError); ok {\n\t\t// The program has exited with an exit code != 0\n\n\t\t// This works on both Unix and Windows. Although\n\t\t// package syscall is generally platform dependent,\n\t\t// WaitStatus is defined for both Unix and Windows and\n\t\t// in both cases has an ExitStatus() method with the\n\t\t// same signature.\n\t\tif status, ok := err.Sys().(syscall.WaitStatus); ok {\n\t\t\treturn status.ExitStatus()\n\t\t} else {\n\t\t\treporter.Logger.Printf(\"ExitError without WaitStatus: %v\", err)\n\t\t\treturn 1\n\t\t}\n\t} else if err != nil {\n\t\treporter.Logger.Printf(\"error running command: %v\", err)\n\t\treturn 1\n\t}\n\n\tif failed {\n\t\treturn 1\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "lib/crunchstat/crunchstat.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// Package crunchstat reports resource usage (CPU, memory, disk,\n// network) for a cgroup.\npackage crunchstat\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n)\n\n// crunchstat collects all memory statistics, but only reports these.\nvar memoryStats = [...]string{\"cache\", \"swap\", \"pgmajfault\", \"rss\"}\n\ntype logPrinter interface {\n\tPrintf(fmt string, args ...interface{})\n}\n\n// A Reporter gathers statistics for a cgroup and writes them to a\n// log.Logger.\ntype Reporter struct {\n\t// Func that returns the pid of a process inside the desired\n\t// cgroup. Reporter will call Pid periodically until it\n\t// returns a positive number, then start reporting stats for\n\t// the cgroup that process belongs to.\n\t//\n\t// Pid is used when cgroups v2 is available. For cgroups v1,\n\t// see below.\n\tPid func() int\n\n\t// Interval between samples. Must be positive.\n\tPollPeriod time.Duration\n\n\t// Temporary directory, will be monitored for available, used\n\t// & total space.\n\tTempDir string\n\n\t// Where to write statistics. Must not be nil.\n\tLogger logPrinter\n\n\t// When stats cross thresholds configured in the fields below,\n\t// they are reported to this logger.\n\tThresholdLogger logPrinter\n\n\t// MemThresholds maps memory stat names to slices of thresholds.\n\t// When the corresponding stat exceeds a threshold, that will be logged.\n\tMemThresholds map[string][]Threshold\n\n\t// Filesystem to read /proc entries and cgroup stats from.\n\t// Non-nil for testing, nil for real root filesystem.\n\tFS fs.FS\n\n\t// Enable debug messages.\n\tDebug bool\n\n\t// available cgroup hierarchies\n\tstatFiles struct {\n\t\tcpuMax            string // v2\n\t\tcpusetCpus        string // v1,v2 (via /proc/$PID/cpuset)\n\t\tcpuacctStat       string // v1 (via /proc/$PID/cgroup => cpuacct)\n\t\tcpuStat           string // v2\n\t\tioServiceBytes    string // v1 (via /proc/$PID/cgroup => blkio)\n\t\tioStat            string // v2\n\t\tmemoryStat        string // v1 and v2 (but v2 is missing some entries)\n\t\tmemoryCurrent     string // v2\n\t\tmemorySwapCurrent string // v2\n\t\tnetDev            string // /proc/$PID/net/dev\n\t}\n\n\tkernelPageSize      int64\n\tlastNetSample       map[string]ioSample\n\tlastDiskIOSample    map[string]ioSample\n\tlastCPUSample       cpuSample\n\tlastDiskSpaceSample diskSpaceSample\n\tlastMemSample       memSample\n\tmaxDiskSpaceSample  diskSpaceSample\n\tmaxMemSample        map[memoryKey]int64\n\n\t// process returned by Pid(), whose cgroup stats we are\n\t// reporting\n\tpid int\n\n\t// individual processes whose memory size we are reporting\n\treportPIDs   map[string]int\n\treportPIDsMu sync.Mutex\n\n\tdone    chan struct{} // closed when we should stop reporting\n\tready   chan struct{} // have pid and stat files\n\tflushed chan struct{} // closed when we have made our last report\n}\n\ntype Threshold struct {\n\tpercentage int64\n\tthreshold  int64\n\ttotal      int64\n}\n\nfunc NewThresholdFromPercentage(total int64, percentage int64) Threshold {\n\treturn Threshold{\n\t\tpercentage: percentage,\n\t\tthreshold:  total * percentage / 100,\n\t\ttotal:      total,\n\t}\n}\n\nfunc NewThresholdsFromPercentages(total int64, percentages []int64) (thresholds []Threshold) {\n\tfor _, percentage := range percentages {\n\t\tthresholds = append(thresholds, NewThresholdFromPercentage(total, percentage))\n\t}\n\treturn\n}\n\n// memoryKey is a key into Reporter.maxMemSample.\n// Initialize it with just statName to get the host/cgroup maximum.\n// Initialize it with all fields to get that process' maximum.\ntype memoryKey struct {\n\tprocessID   int\n\tprocessName string\n\tstatName    string\n}\n\n// Start starts monitoring in a new goroutine, and returns\n// immediately.\n//\n// The monitoring goroutine waits for a non-empty CIDFile to appear\n// (unless CID is non-empty). Then it waits for the accounting files\n// to appear for the monitored container. Then it collects and reports\n// statistics until Stop is called.\n//\n// Callers should not call Start more than once.\n//\n// Callers should not modify public data fields after calling Start.\nfunc (r *Reporter) Start() {\n\tr.done = make(chan struct{})\n\tr.ready = make(chan struct{})\n\tr.flushed = make(chan struct{})\n\tif r.FS == nil {\n\t\tr.FS = os.DirFS(\"/\")\n\t}\n\tgo r.run()\n}\n\n// ReportPID starts reporting stats for a specified process.\nfunc (r *Reporter) ReportPID(name string, pid int) {\n\tr.reportPIDsMu.Lock()\n\tdefer r.reportPIDsMu.Unlock()\n\tif r.reportPIDs == nil {\n\t\tr.reportPIDs = map[string]int{name: pid}\n\t} else {\n\t\tr.reportPIDs[name] = pid\n\t}\n}\n\n// Stop reporting. Do not call more than once, or before calling\n// Start.\n//\n// Nothing will be logged after Stop returns unless you call a Log* method.\nfunc (r *Reporter) Stop() {\n\tclose(r.done)\n\t<-r.flushed\n}\n\nvar v1keys = map[string]bool{\n\t\"blkio\":   true,\n\t\"cpuacct\": true,\n\t\"cpuset\":  true,\n\t\"memory\":  true,\n}\n\n// Find cgroup hierarchies in /proc/mounts, e.g.,\n//\n//\t{\n//\t\t\"blkio\": \"/sys/fs/cgroup/blkio\",\n//\t\t\"unified\": \"/sys/fs/cgroup/unified\",\n//\t}\nfunc (r *Reporter) cgroupMounts() map[string]string {\n\tprocmounts, err := fs.ReadFile(r.FS, \"proc/mounts\")\n\tif err != nil {\n\t\tr.Logger.Printf(\"error reading /proc/mounts: %s\", err)\n\t\treturn nil\n\t}\n\tmounts := map[string]string{}\n\tfor _, line := range bytes.Split(procmounts, []byte{'\\n'}) {\n\t\tfields := bytes.SplitN(line, []byte{' '}, 6)\n\t\tif len(fields) != 6 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch string(fields[2]) {\n\t\tcase \"cgroup2\":\n\t\t\t// cgroup /sys/fs/cgroup/unified cgroup2 rw,nosuid,nodev,noexec,relatime 0 0\n\t\t\tmounts[\"unified\"] = string(fields[1])\n\t\tcase \"cgroup\":\n\t\t\t// cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0\n\t\t\toptions := bytes.Split(fields[3], []byte{','})\n\t\t\tfor _, option := range options {\n\t\t\t\toption := string(option)\n\t\t\t\tif v1keys[option] {\n\t\t\t\t\tmounts[option] = string(fields[1])\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn mounts\n}\n\n// generate map of cgroup controller => path for r.pid.\n//\n// the \"unified\" controller represents cgroups v2.\nfunc (r *Reporter) cgroupPaths(mounts map[string]string) map[string]string {\n\tif len(mounts) == 0 {\n\t\treturn nil\n\t}\n\tprocdir := fmt.Sprintf(\"proc/%d\", r.pid)\n\tbuf, err := fs.ReadFile(r.FS, procdir+\"/cgroup\")\n\tif err != nil {\n\t\tr.Logger.Printf(\"error reading cgroup file: %s\", err)\n\t\treturn nil\n\t}\n\tpaths := map[string]string{}\n\tfor _, line := range bytes.Split(buf, []byte{'\\n'}) {\n\t\t// The entry for cgroup v2 is always in the format\n\t\t// \"0::$PATH\" --\n\t\t// https://docs.kernel.org/admin-guide/cgroup-v2.html\n\t\tif bytes.HasPrefix(line, []byte(\"0::/\")) && mounts[\"unified\"] != \"\" {\n\t\t\tpaths[\"unified\"] = mounts[\"unified\"] + string(line[3:])\n\t\t\tcontinue\n\t\t}\n\t\t// cgroups v1 entries look like\n\t\t// \"6:cpu,cpuacct:/user.slice\"\n\t\tfields := bytes.SplitN(line, []byte{':'}, 3)\n\t\tif len(fields) != 3 {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, key := range bytes.Split(fields[1], []byte{','}) {\n\t\t\tkey := string(key)\n\t\t\tif mounts[key] != \"\" {\n\t\t\t\tpaths[key] = mounts[key] + string(fields[2])\n\t\t\t}\n\t\t}\n\t}\n\t// In unified mode, /proc/$PID/cgroup doesn't have a cpuset\n\t// entry, but we still need it -- there's no cpuset.cpus file\n\t// in the cgroup2 subtree indicated by the 0::$PATH entry. We\n\t// have to get the right path from /proc/$PID/cpuset.\n\tif _, found := paths[\"cpuset\"]; !found && mounts[\"unified\"] != \"\" {\n\t\tbuf, _ := fs.ReadFile(r.FS, procdir+\"/cpuset\")\n\t\tcpusetPath := string(bytes.TrimRight(buf, \"\\n\"))\n\t\tpaths[\"cpuset\"] = mounts[\"unified\"] + cpusetPath\n\t}\n\treturn paths\n}\n\nfunc (r *Reporter) findStatFiles() {\n\tmounts := r.cgroupMounts()\n\tpaths := r.cgroupPaths(mounts)\n\tdone := map[*string]bool{}\n\tfor _, try := range []struct {\n\t\tstatFile *string\n\t\tpathkey  string\n\t\tfile     string\n\t}{\n\t\t{&r.statFiles.cpuMax, \"unified\", \"cpu.max\"},\n\t\t{&r.statFiles.cpusetCpus, \"cpuset\", \"cpuset.cpus.effective\"},\n\t\t{&r.statFiles.cpusetCpus, \"cpuset\", \"cpuset.cpus\"},\n\t\t{&r.statFiles.cpuacctStat, \"cpuacct\", \"cpuacct.stat\"},\n\t\t{&r.statFiles.cpuStat, \"unified\", \"cpu.stat\"},\n\t\t// blkio.throttle.io_service_bytes must precede\n\t\t// blkio.io_service_bytes -- on ubuntu1804, the latter\n\t\t// is present but reports 0\n\t\t{&r.statFiles.ioServiceBytes, \"blkio\", \"blkio.throttle.io_service_bytes\"},\n\t\t{&r.statFiles.ioServiceBytes, \"blkio\", \"blkio.io_service_bytes\"},\n\t\t{&r.statFiles.ioStat, \"unified\", \"io.stat\"},\n\t\t{&r.statFiles.memoryStat, \"unified\", \"memory.stat\"},\n\t\t{&r.statFiles.memoryStat, \"memory\", \"memory.stat\"},\n\t\t{&r.statFiles.memoryCurrent, \"unified\", \"memory.current\"},\n\t\t{&r.statFiles.memorySwapCurrent, \"unified\", \"memory.swap.current\"},\n\t} {\n\t\tstartpath, ok := paths[try.pathkey]\n\t\tif !ok || done[try.statFile] {\n\t\t\tcontinue\n\t\t}\n\t\t// /proc/$PID/cgroup says cgroup path is\n\t\t// /exa/mple/exa/mple, however, sometimes the file we\n\t\t// need is not under that path, it's only available in\n\t\t// a parent cgroup's dir.  So we start at\n\t\t// /sys/fs/cgroup/unified/exa/mple/exa/mple/ and walk\n\t\t// up to /sys/fs/cgroup/unified/ until we find the\n\t\t// desired file.\n\t\t//\n\t\t// This might mean our reported stats include more\n\t\t// cgroups in the cgroup tree, but it's the best we\n\t\t// can do.\n\t\tfor path := startpath; path != \"\" && path != \"/\" && (path == startpath || strings.HasPrefix(path, mounts[try.pathkey])); path, _ = filepath.Split(strings.TrimRight(path, \"/\")) {\n\t\t\ttarget := strings.TrimLeft(filepath.Join(path, try.file), \"/\")\n\t\t\tbuf, err := fs.ReadFile(r.FS, target)\n\t\t\tif err != nil || len(buf) == 0 || bytes.Equal(buf, []byte{'\\n'}) {\n\t\t\t\tif r.Debug {\n\t\t\t\t\tif os.IsNotExist(err) {\n\t\t\t\t\t\t// don't stutter\n\t\t\t\t\t\terr = os.ErrNotExist\n\t\t\t\t\t}\n\t\t\t\t\tr.Logger.Printf(\"skip /%s: %s\", target, err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t*try.statFile = target\n\t\t\tdone[try.statFile] = true\n\t\t\tr.Logger.Printf(\"notice: reading stats from /%s\", target)\n\t\t\tbreak\n\t\t}\n\t}\n\n\tnetdev := fmt.Sprintf(\"proc/%d/net/dev\", r.pid)\n\tif buf, err := fs.ReadFile(r.FS, netdev); err == nil && len(buf) > 0 {\n\t\tr.statFiles.netDev = netdev\n\t\tr.Logger.Printf(\"using /%s\", netdev)\n\t}\n}\n\nfunc (r *Reporter) reportMemoryMax(logger logPrinter, source, statName string, value, limit int64) {\n\tvar units string\n\tswitch statName {\n\tcase \"pgmajfault\":\n\t\tunits = \"faults\"\n\tdefault:\n\t\tunits = \"bytes\"\n\t}\n\tif limit > 0 {\n\t\tpercentage := 100 * value / limit\n\t\tlogger.Printf(\"Maximum %s memory %s usage was %d%%, %d/%d %s\",\n\t\t\tsource, statName, percentage, value, limit, units)\n\t} else {\n\t\tlogger.Printf(\"Maximum %s memory %s usage was %d %s\",\n\t\t\tsource, statName, value, units)\n\t}\n}\n\nfunc (r *Reporter) LogMaxima(logger logPrinter, memLimits map[string]int64) {\n\tif r.lastCPUSample.hasData {\n\t\tlogger.Printf(\"Total CPU usage was %f user and %f sys on %.2f CPUs\",\n\t\t\tr.lastCPUSample.user, r.lastCPUSample.sys, r.lastCPUSample.cpus)\n\t}\n\tfor disk, sample := range r.lastDiskIOSample {\n\t\tlogger.Printf(\"Total disk I/O on %s was %d bytes written and %d bytes read\",\n\t\t\tdisk, sample.txBytes, sample.rxBytes)\n\t}\n\tif r.maxDiskSpaceSample.total > 0 {\n\t\tpercentage := 100 * r.maxDiskSpaceSample.used / r.maxDiskSpaceSample.total\n\t\tlogger.Printf(\"Maximum disk usage was %d%%, %d/%d bytes\",\n\t\t\tpercentage, r.maxDiskSpaceSample.used, r.maxDiskSpaceSample.total)\n\t}\n\tfor _, statName := range memoryStats {\n\t\tvalue, ok := r.maxMemSample[memoryKey{statName: \"total_\" + statName}]\n\t\tif !ok {\n\t\t\tvalue, ok = r.maxMemSample[memoryKey{statName: statName}]\n\t\t}\n\t\tif ok {\n\t\t\tr.reportMemoryMax(logger, \"container\", statName, value, memLimits[statName])\n\t\t}\n\t}\n\tfor ifname, sample := range r.lastNetSample {\n\t\tlogger.Printf(\"Total network I/O on %s was %d bytes written and %d bytes read\",\n\t\t\tifname, sample.txBytes, sample.rxBytes)\n\t}\n}\n\nfunc (r *Reporter) LogProcessMemMax(logger logPrinter) {\n\tfor memKey, value := range r.maxMemSample {\n\t\tif memKey.processName == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tr.reportMemoryMax(logger, memKey.processName, memKey.statName, value, 0)\n\t}\n}\n\nfunc (r *Reporter) readAllOrWarn(in io.Reader) ([]byte, error) {\n\tcontent, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\tr.Logger.Printf(\"warning: %v\", err)\n\t}\n\treturn content, err\n}\n\ntype ioSample struct {\n\tsampleTime time.Time\n\ttxBytes    int64\n\trxBytes    int64\n}\n\nfunc (r *Reporter) doBlkIOStats() {\n\tvar sampleTime = time.Now()\n\tnewSamples := make(map[string]ioSample)\n\n\tif r.statFiles.ioStat != \"\" {\n\t\tstatfile, err := fs.ReadFile(r.FS, r.statFiles.ioStat)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tfor _, line := range bytes.Split(statfile, []byte{'\\n'}) {\n\t\t\t// 254:16 rbytes=72163328 wbytes=117370880 rios=3811 wios=3906 dbytes=0 dios=0\n\t\t\twords := bytes.Split(line, []byte{' '})\n\t\t\tif len(words) < 2 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tthisSample := ioSample{sampleTime, -1, -1}\n\t\t\tfor _, kv := range words[1:] {\n\t\t\t\tif bytes.HasPrefix(kv, []byte(\"rbytes=\")) {\n\t\t\t\t\tfmt.Sscanf(string(kv[7:]), \"%d\", &thisSample.rxBytes)\n\t\t\t\t} else if bytes.HasPrefix(kv, []byte(\"wbytes=\")) {\n\t\t\t\t\tfmt.Sscanf(string(kv[7:]), \"%d\", &thisSample.txBytes)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif thisSample.rxBytes >= 0 && thisSample.txBytes >= 0 {\n\t\t\t\tnewSamples[string(words[0])] = thisSample\n\t\t\t}\n\t\t}\n\t} else if r.statFiles.ioServiceBytes != \"\" {\n\t\tstatfile, err := fs.ReadFile(r.FS, r.statFiles.ioServiceBytes)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tfor _, line := range bytes.Split(statfile, []byte{'\\n'}) {\n\t\t\tvar device, op string\n\t\t\tvar val int64\n\t\t\tif _, err := fmt.Sscanf(string(line), \"%s %s %d\", &device, &op, &val); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar thisSample ioSample\n\t\t\tvar ok bool\n\t\t\tif thisSample, ok = newSamples[device]; !ok {\n\t\t\t\tthisSample = ioSample{sampleTime, -1, -1}\n\t\t\t}\n\t\t\tswitch op {\n\t\t\tcase \"Read\":\n\t\t\t\tthisSample.rxBytes = val\n\t\t\tcase \"Write\":\n\t\t\t\tthisSample.txBytes = val\n\t\t\t}\n\t\t\tnewSamples[device] = thisSample\n\t\t}\n\t}\n\n\tfor dev, sample := range newSamples {\n\t\tif sample.txBytes < 0 || sample.rxBytes < 0 {\n\t\t\tcontinue\n\t\t}\n\t\tdelta := \"\"\n\t\tif prev, ok := r.lastDiskIOSample[dev]; ok {\n\t\t\tdelta = fmt.Sprintf(\" -- interval %.4f seconds %d write %d read\",\n\t\t\t\tsample.sampleTime.Sub(prev.sampleTime).Seconds(),\n\t\t\t\tsample.txBytes-prev.txBytes,\n\t\t\t\tsample.rxBytes-prev.rxBytes)\n\t\t}\n\t\tr.Logger.Printf(\"blkio:%s %d write %d read%s\\n\", dev, sample.txBytes, sample.rxBytes, delta)\n\t\tr.lastDiskIOSample[dev] = sample\n\t}\n}\n\ntype memSample struct {\n\tsampleTime time.Time\n\tmemStat    map[string]int64\n}\n\nfunc (r *Reporter) getMemSample() {\n\tthisSample := memSample{time.Now(), make(map[string]int64)}\n\n\t// memory.stat contains \"pgmajfault\" in cgroups v1 and v2. It\n\t// also contains \"rss\", \"swap\", and \"cache\" in cgroups v1.\n\tc, err := r.FS.Open(r.statFiles.memoryStat)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer c.Close()\n\tb := bufio.NewScanner(c)\n\tfor b.Scan() {\n\t\tvar stat string\n\t\tvar val int64\n\t\tif _, err := fmt.Sscanf(string(b.Text()), \"%s %d\", &stat, &val); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tthisSample.memStat[stat] = val\n\t}\n\n\t// In cgroups v2, we need to read \"memory.current\" and\n\t// \"memory.swap.current\" as well.\n\tfor stat, fnm := range map[string]string{\n\t\t// memory.current includes cache. We don't get\n\t\t// separate rss/cache values, so we call\n\t\t// memory usage \"rss\" for compatibility, and\n\t\t// omit \"cache\".\n\t\t\"rss\":  r.statFiles.memoryCurrent,\n\t\t\"swap\": r.statFiles.memorySwapCurrent,\n\t} {\n\t\tif fnm == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tbuf, err := fs.ReadFile(r.FS, fnm)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tvar val int64\n\t\t_, err = fmt.Sscanf(string(buf), \"%d\", &val)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tthisSample.memStat[stat] = val\n\t}\n\tfor stat, val := range thisSample.memStat {\n\t\tmaxKey := memoryKey{statName: stat}\n\t\tif val > r.maxMemSample[maxKey] {\n\t\t\tr.maxMemSample[maxKey] = val\n\t\t}\n\t}\n\tr.lastMemSample = thisSample\n\n\tif r.ThresholdLogger != nil {\n\t\tfor statName, thresholds := range r.MemThresholds {\n\t\t\tstatValue, ok := thisSample.memStat[\"total_\"+statName]\n\t\t\tif !ok {\n\t\t\t\tstatValue, ok = thisSample.memStat[statName]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar index int\n\t\t\tvar statThreshold Threshold\n\t\t\tfor index, statThreshold = range thresholds {\n\t\t\t\tif statValue < statThreshold.threshold {\n\t\t\t\t\tbreak\n\t\t\t\t} else if statThreshold.percentage > 0 {\n\t\t\t\t\tr.ThresholdLogger.Printf(\"Container using over %d%% of memory (%s %d/%d bytes)\",\n\t\t\t\t\t\tstatThreshold.percentage, statName, statValue, statThreshold.total)\n\t\t\t\t} else {\n\t\t\t\t\tr.ThresholdLogger.Printf(\"Container using over %d of memory (%s %s bytes)\",\n\t\t\t\t\t\tstatThreshold.threshold, statName, statValue)\n\t\t\t\t}\n\t\t\t}\n\t\t\tr.MemThresholds[statName] = thresholds[index:]\n\t\t}\n\t}\n}\n\nfunc (r *Reporter) reportMemSample() {\n\tvar outstat bytes.Buffer\n\tfor _, key := range memoryStats {\n\t\t// Use \"total_X\" stats (entire hierarchy) if enabled,\n\t\t// otherwise just the single cgroup -- see\n\t\t// https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt\n\t\tif val, ok := r.lastMemSample.memStat[\"total_\"+key]; ok {\n\t\t\tfmt.Fprintf(&outstat, \" %d %s\", val, key)\n\t\t} else if val, ok := r.lastMemSample.memStat[key]; ok {\n\t\t\tfmt.Fprintf(&outstat, \" %d %s\", val, key)\n\t\t}\n\t}\n\tr.Logger.Printf(\"mem%s\\n\", outstat.String())\n}\n\nfunc (r *Reporter) doProcmemStats() {\n\tif r.kernelPageSize == 0 {\n\t\t// assign \"don't try again\" value in case we give up\n\t\t// and return without assigning the real value\n\t\tr.kernelPageSize = -1\n\t\tbuf, err := fs.ReadFile(r.FS, \"proc/self/smaps\")\n\t\tif err != nil {\n\t\t\tr.Logger.Printf(\"error reading /proc/self/smaps: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tm := regexp.MustCompile(`\\nKernelPageSize:\\s*(\\d+) kB\\n`).FindSubmatch(buf)\n\t\tif len(m) != 2 {\n\t\t\tr.Logger.Printf(\"error parsing /proc/self/smaps: KernelPageSize not found\")\n\t\t\treturn\n\t\t}\n\t\tsize, err := strconv.ParseInt(string(m[1]), 10, 64)\n\t\tif err != nil {\n\t\t\tr.Logger.Printf(\"error parsing /proc/self/smaps: KernelPageSize %q: %s\", m[1], err)\n\t\t\treturn\n\t\t}\n\t\tr.kernelPageSize = size * 1024\n\t} else if r.kernelPageSize < 0 {\n\t\t// already failed to determine page size, don't keep\n\t\t// trying/logging\n\t\treturn\n\t}\n\n\tr.reportPIDsMu.Lock()\n\tdefer r.reportPIDsMu.Unlock()\n\tprocnames := make([]string, 0, len(r.reportPIDs))\n\tfor name := range r.reportPIDs {\n\t\tprocnames = append(procnames, name)\n\t}\n\tsort.Strings(procnames)\n\tprocmem := \"\"\n\tfor _, procname := range procnames {\n\t\tpid := r.reportPIDs[procname]\n\t\tbuf, err := fs.ReadFile(r.FS, fmt.Sprintf(\"proc/%d/stat\", pid))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\t// If the executable name contains a ')' char,\n\t\t// /proc/$pid/stat will look like '1234 (exec name)) S\n\t\t// 123 ...' -- the last ')' is the end of the 2nd\n\t\t// field.\n\t\tparen := bytes.LastIndexByte(buf, ')')\n\t\tif paren < 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfields := bytes.SplitN(buf[paren:], []byte{' '}, 24)\n\t\tif len(fields) < 24 {\n\t\t\tcontinue\n\t\t}\n\t\t// rss is the 24th field in .../stat, and fields[0]\n\t\t// here is the last char ')' of the 2nd field, so\n\t\t// rss is fields[22]\n\t\trss, err := strconv.ParseInt(string(fields[22]), 10, 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tvalue := rss * r.kernelPageSize\n\t\tprocmem += fmt.Sprintf(\" %d %s\", value, procname)\n\t\tmaxKey := memoryKey{pid, procname, \"rss\"}\n\t\tif value > r.maxMemSample[maxKey] {\n\t\t\tr.maxMemSample[maxKey] = value\n\t\t}\n\t}\n\tif procmem != \"\" {\n\t\tr.Logger.Printf(\"procmem%s\\n\", procmem)\n\t}\n}\n\nfunc (r *Reporter) doNetworkStats() {\n\tif r.statFiles.netDev == \"\" {\n\t\treturn\n\t}\n\tsampleTime := time.Now()\n\tstats, err := r.FS.Open(r.statFiles.netDev)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer stats.Close()\n\tscanner := bufio.NewScanner(stats)\n\tfor scanner.Scan() {\n\t\tvar ifName string\n\t\tvar rx, tx int64\n\t\twords := strings.Fields(scanner.Text())\n\t\tif len(words) != 17 {\n\t\t\t// Skip lines with wrong format\n\t\t\tcontinue\n\t\t}\n\t\tifName = strings.TrimRight(words[0], \":\")\n\t\tif ifName == \"lo\" || ifName == \"\" {\n\t\t\t// Skip loopback interface and lines with wrong format\n\t\t\tcontinue\n\t\t}\n\t\tif tx, err = strconv.ParseInt(words[9], 10, 64); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif rx, err = strconv.ParseInt(words[1], 10, 64); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tnextSample := ioSample{}\n\t\tnextSample.sampleTime = sampleTime\n\t\tnextSample.txBytes = tx\n\t\tnextSample.rxBytes = rx\n\t\tvar delta string\n\t\tif prev, ok := r.lastNetSample[ifName]; ok {\n\t\t\tinterval := nextSample.sampleTime.Sub(prev.sampleTime).Seconds()\n\t\t\tdelta = fmt.Sprintf(\" -- interval %.4f seconds %d tx %d rx\",\n\t\t\t\tinterval,\n\t\t\t\ttx-prev.txBytes,\n\t\t\t\trx-prev.rxBytes)\n\t\t}\n\t\tr.Logger.Printf(\"net:%s %d tx %d rx%s\\n\", ifName, tx, rx, delta)\n\t\tr.lastNetSample[ifName] = nextSample\n\t}\n}\n\ntype diskSpaceSample struct {\n\thasData    bool\n\tsampleTime time.Time\n\ttotal      uint64\n\tused       uint64\n\tavailable  uint64\n}\n\nfunc (r *Reporter) doDiskSpaceStats() {\n\ts := syscall.Statfs_t{}\n\terr := syscall.Statfs(r.TempDir, &s)\n\tif err != nil {\n\t\treturn\n\t}\n\tbs := uint64(s.Bsize)\n\tnextSample := diskSpaceSample{\n\t\thasData:    true,\n\t\tsampleTime: time.Now(),\n\t\ttotal:      s.Blocks * bs,\n\t\tused:       (s.Blocks - s.Bfree) * bs,\n\t\tavailable:  s.Bavail * bs,\n\t}\n\tif nextSample.used > r.maxDiskSpaceSample.used {\n\t\tr.maxDiskSpaceSample = nextSample\n\t}\n\n\tvar delta string\n\tif r.lastDiskSpaceSample.hasData {\n\t\tprev := r.lastDiskSpaceSample\n\t\tinterval := nextSample.sampleTime.Sub(prev.sampleTime).Seconds()\n\t\tdelta = fmt.Sprintf(\" -- interval %.4f seconds %d used\",\n\t\t\tinterval,\n\t\t\tint64(nextSample.used-prev.used))\n\t}\n\tr.Logger.Printf(\"statfs %d available %d used %d total%s\\n\",\n\t\tnextSample.available, nextSample.used, nextSample.total, delta)\n\tr.lastDiskSpaceSample = nextSample\n}\n\ntype cpuSample struct {\n\thasData    bool // to distinguish the zero value from real data\n\tsampleTime time.Time\n\tuser       float64\n\tsys        float64\n\tcpus       float64\n}\n\n// Return the number of virtual CPUs available in the container. This\n// can be based on a scheduling ratio (which is not necessarily a\n// whole number) or a restricted set of accessible CPUs.\n//\n// Return the number of host processors based on /proc/cpuinfo if\n// cgroupfs doesn't reveal anything.\n//\n// Return 0 if even that doesn't work.\nfunc (r *Reporter) getCPUCount() float64 {\n\tif buf, err := fs.ReadFile(r.FS, r.statFiles.cpuMax); err == nil {\n\t\t// cpu.max looks like \"150000 100000\" if CPU usage is\n\t\t// restricted to 150% (docker run --cpus=1.5), or \"max\n\t\t// 100000\\n\" if not.\n\t\tvar max, period int64\n\t\tif _, err := fmt.Sscanf(string(buf), \"%d %d\", &max, &period); err == nil {\n\t\t\treturn float64(max) / float64(period)\n\t\t}\n\t}\n\tif buf, err := fs.ReadFile(r.FS, r.statFiles.cpusetCpus); err == nil {\n\t\t// cpuset.cpus looks like \"0,4-7\\n\" if only CPUs\n\t\t// 0,4,5,6,7 are available.\n\t\tcpus := 0\n\t\tfor _, v := range bytes.Split(buf, []byte{','}) {\n\t\t\tvar min, max int\n\t\t\tn, _ := fmt.Sscanf(string(v), \"%d-%d\", &min, &max)\n\t\t\tif n == 2 {\n\t\t\t\tcpus += (max - min) + 1\n\t\t\t} else {\n\t\t\t\tcpus++\n\t\t\t}\n\t\t}\n\t\treturn float64(cpus)\n\t}\n\tif buf, err := fs.ReadFile(r.FS, \"proc/cpuinfo\"); err == nil {\n\t\t// cpuinfo has a line like \"processor\\t: 0\\n\" for each\n\t\t// CPU.\n\t\tcpus := 0\n\t\tfor _, line := range bytes.Split(buf, []byte{'\\n'}) {\n\t\t\tif bytes.HasPrefix(line, []byte(\"processor\\t:\")) {\n\t\t\t\tcpus++\n\t\t\t}\n\t\t}\n\t\treturn float64(cpus)\n\t}\n\treturn 0\n}\n\nfunc (r *Reporter) doCPUStats() {\n\tvar nextSample cpuSample\n\tif r.statFiles.cpuStat != \"\" {\n\t\t// v2\n\t\tf, err := r.FS.Open(r.statFiles.cpuStat)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\t\tnextSample = cpuSample{\n\t\t\thasData:    true,\n\t\t\tsampleTime: time.Now(),\n\t\t\tcpus:       r.getCPUCount(),\n\t\t}\n\t\tfor {\n\t\t\tvar stat string\n\t\t\tvar val int64\n\t\t\tn, err := fmt.Fscanf(f, \"%s %d\\n\", &stat, &val)\n\t\t\tif err != nil || n != 2 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif stat == \"user_usec\" {\n\t\t\t\tnextSample.user = float64(val) / 1000000\n\t\t\t} else if stat == \"system_usec\" {\n\t\t\t\tnextSample.sys = float64(val) / 1000000\n\t\t\t}\n\t\t}\n\t} else if r.statFiles.cpuacctStat != \"\" {\n\t\t// v1\n\t\tb, err := fs.ReadFile(r.FS, r.statFiles.cpuacctStat)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tvar userTicks, sysTicks int64\n\t\tfmt.Sscanf(string(b), \"user %d\\nsystem %d\", &userTicks, &sysTicks)\n\t\tuserHz := float64(100)\n\t\tnextSample = cpuSample{\n\t\t\thasData:    true,\n\t\t\tsampleTime: time.Now(),\n\t\t\tuser:       float64(userTicks) / userHz,\n\t\t\tsys:        float64(sysTicks) / userHz,\n\t\t\tcpus:       r.getCPUCount(),\n\t\t}\n\t}\n\n\tdelta := \"\"\n\tif r.lastCPUSample.hasData {\n\t\tdelta = fmt.Sprintf(\" -- interval %.4f seconds %.4f user %.4f sys\",\n\t\t\tnextSample.sampleTime.Sub(r.lastCPUSample.sampleTime).Seconds(),\n\t\t\tnextSample.user-r.lastCPUSample.user,\n\t\t\tnextSample.sys-r.lastCPUSample.sys)\n\t}\n\tr.Logger.Printf(\"cpu %.4f user %.4f sys %.2f cpus%s\\n\",\n\t\tnextSample.user, nextSample.sys, nextSample.cpus, delta)\n\tr.lastCPUSample = nextSample\n}\n\nfunc (r *Reporter) doAllStats() {\n\tr.reportMemSample()\n\tr.doProcmemStats()\n\tr.doCPUStats()\n\tr.doBlkIOStats()\n\tr.doNetworkStats()\n\tr.doDiskSpaceStats()\n}\n\n// Report stats periodically until we learn (via r.done) that someone\n// called Stop.\nfunc (r *Reporter) run() {\n\tdefer close(r.flushed)\n\n\tr.maxMemSample = make(map[memoryKey]int64)\n\n\tif !r.waitForPid() {\n\t\treturn\n\t}\n\tr.findStatFiles()\n\tclose(r.ready)\n\n\tr.lastNetSample = make(map[string]ioSample)\n\tr.lastDiskIOSample = make(map[string]ioSample)\n\n\tif len(r.TempDir) == 0 {\n\t\t// Temporary dir not provided, try to get it from the environment.\n\t\tr.TempDir = os.Getenv(\"TMPDIR\")\n\t}\n\tif len(r.TempDir) > 0 {\n\t\tr.Logger.Printf(\"notice: monitoring temp dir %s\\n\", r.TempDir)\n\t}\n\n\tr.getMemSample()\n\tr.doAllStats()\n\n\tif r.PollPeriod < 1 {\n\t\tr.PollPeriod = time.Second * 10\n\t}\n\n\tmemTicker := time.NewTicker(time.Second)\n\tmainTicker := time.NewTicker(r.PollPeriod)\n\tfor {\n\t\tselect {\n\t\tcase <-r.done:\n\t\t\treturn\n\t\tcase <-memTicker.C:\n\t\t\tr.getMemSample()\n\t\tcase <-mainTicker.C:\n\t\t\tr.doAllStats()\n\t\t}\n\t}\n}\n\n// Wait for Pid() to return a real pid.  Return true if this succeeds\n// before Stop is called.\nfunc (r *Reporter) waitForPid() bool {\n\tticker := time.NewTicker(100 * time.Millisecond)\n\tdefer ticker.Stop()\n\twarningTimer := time.After(r.PollPeriod)\n\tfor {\n\t\tr.pid = r.Pid()\n\t\tif r.pid > 0 {\n\t\t\tbreak\n\t\t}\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\tcase <-warningTimer:\n\t\t\tr.Logger.Printf(\"warning: Pid() did not return a process ID after %v (config error?) -- still waiting...\", r.PollPeriod)\n\t\tcase <-r.done:\n\t\t\tr.Logger.Printf(\"warning: Pid() never returned a process ID\")\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (r *Reporter) dumpSourceFiles(destdir string) error {\n\tselect {\n\tcase <-r.done:\n\t\treturn errors.New(\"reporter was never ready\")\n\tcase <-r.ready:\n\t}\n\ttodo := []string{\n\t\tfmt.Sprintf(\"proc/%d/cgroup\", r.pid),\n\t\tfmt.Sprintf(\"proc/%d/cpuset\", r.pid),\n\t\t\"proc/cpuinfo\",\n\t\t\"proc/mounts\",\n\t\t\"proc/self/smaps\",\n\t\tr.statFiles.cpuMax,\n\t\tr.statFiles.cpusetCpus,\n\t\tr.statFiles.cpuacctStat,\n\t\tr.statFiles.cpuStat,\n\t\tr.statFiles.ioServiceBytes,\n\t\tr.statFiles.ioStat,\n\t\tr.statFiles.memoryStat,\n\t\tr.statFiles.memoryCurrent,\n\t\tr.statFiles.memorySwapCurrent,\n\t\tr.statFiles.netDev,\n\t}\n\tfor _, path := range todo {\n\t\tif path == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\terr := r.createParentsAndCopyFile(destdir, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tr.reportPIDsMu.Lock()\n\tr.reportPIDsMu.Unlock()\n\tfor _, pid := range r.reportPIDs {\n\t\tpath := fmt.Sprintf(\"proc/%d/stat\", pid)\n\t\terr := r.createParentsAndCopyFile(destdir, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif proc, err := os.FindProcess(r.pid); err != nil || proc.Signal(syscall.Signal(0)) != nil {\n\t\treturn fmt.Errorf(\"process %d no longer exists, snapshot is probably broken\", r.pid)\n\t}\n\treturn nil\n}\n\nfunc (r *Reporter) createParentsAndCopyFile(destdir, path string) error {\n\tbuf, err := fs.ReadFile(r.FS, path)\n\tif os.IsNotExist(err) {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\tif parent, _ := filepath.Split(path); parent != \"\" {\n\t\terr = os.MkdirAll(destdir+\"/\"+parent, 0777)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"mkdir %s: %s\", destdir+\"/\"+parent, err)\n\t\t}\n\t}\n\tdestfile := destdir + \"/\" + path\n\tr.Logger.Printf(\"copy %s to %s -- size %d\", path, destfile, len(buf))\n\treturn os.WriteFile(destfile, buf, 0777)\n}\n"
  },
  {
    "path": "lib/crunchstat/crunchstat_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage crunchstat\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t. \"gopkg.in/check.v1\"\n)\n\nconst logMsgPrefix = `(?m)(.*\\n)*.* msg=\"`\n\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\nvar _ = Suite(&suite{})\n\ntype testdatasource struct {\n\tfspath string\n\tpid    int\n}\n\nfunc (s testdatasource) Pid() int {\n\treturn s.pid\n}\nfunc (s testdatasource) FS() fs.FS {\n\treturn os.DirFS(s.fspath)\n}\n\n// To generate a test case for a new OS target, build\n// cmd/arvados-server and run\n//\n//\tarvados-server crunchstat -dump ./testdata/example1234 sleep 2\nvar testdata = map[string]testdatasource{\n\t\"debian10\":   {fspath: \"testdata/debian10\", pid: 3288},\n\t\"debian11\":   {fspath: \"testdata/debian11\", pid: 4153022},\n\t\"debian12\":   {fspath: \"testdata/debian12\", pid: 1115883},\n\t\"ubuntu1804\": {fspath: \"testdata/ubuntu1804\", pid: 2523},\n\t\"ubuntu2004\": {fspath: \"testdata/ubuntu2004\", pid: 1360},\n\t\"ubuntu2204\": {fspath: \"testdata/ubuntu2204\", pid: 1967},\n}\n\ntype suite struct {\n\tlogbuf                bytes.Buffer\n\tlogger                *logrus.Logger\n\tdebian12MemoryCurrent int64\n}\n\nfunc (s *suite) SetUpSuite(c *C) {\n\ts.logger = logrus.New()\n\ts.logger.Out = &s.logbuf\n\n\tbuf, err := os.ReadFile(\"testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.current\")\n\tc.Assert(err, IsNil)\n\t_, err = fmt.Sscanf(string(buf), \"%d\", &s.debian12MemoryCurrent)\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *suite) SetUpTest(c *C) {\n\ts.logbuf.Reset()\n}\n\n// Report stats for the current (go test) process's cgroup, using the\n// test host's real procfs/sysfs.\nfunc (s *suite) TestReportCurrent(c *C) {\n\tr := Reporter{\n\t\tPid:        os.Getpid,\n\t\tLogger:     s.logger,\n\t\tPollPeriod: time.Second,\n\t}\n\tr.Start()\n\tdefer r.Stop()\n\tcheckPatterns := []string{\n\t\t`(?ms).*rss.*`,\n\t\t`(?ms).*net:.*`,\n\t\t`(?ms).*blkio:.*`,\n\t\t`(?ms).* [\\d.]+ user [\\d.]+ sys ` + fmt.Sprintf(\"%.2f\", float64(runtime.NumCPU())) + ` cpus -- .*`,\n\t}\n\tfor deadline := time.Now().Add(4 * time.Second); !c.Failed(); time.Sleep(time.Millisecond) {\n\t\tdone := true\n\t\tfor _, pattern := range checkPatterns {\n\t\t\tif m := regexp.MustCompile(pattern).FindSubmatch(s.logbuf.Bytes()); len(m) == 0 {\n\t\t\t\tdone = false\n\t\t\t\tif time.Now().After(deadline) {\n\t\t\t\t\tc.Errorf(\"timed out waiting for %s\", pattern)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif done {\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Logf(\"%s\", s.logbuf.String())\n}\n\n// Report stats for a the current (go test) process.\nfunc (s *suite) TestReportPIDs(c *C) {\n\tr := Reporter{\n\t\tPid:        func() int { return 1 },\n\t\tLogger:     s.logger,\n\t\tPollPeriod: time.Second,\n\t}\n\tr.Start()\n\tdefer r.Stop()\n\tr.ReportPID(\"init\", 1)\n\tr.ReportPID(\"test_process\", os.Getpid())\n\tr.ReportPID(\"nonexistent\", 12345) // should be silently ignored/omitted\n\tfor deadline := time.Now().Add(10 * time.Second); ; time.Sleep(time.Millisecond) {\n\t\tif time.Now().After(deadline) {\n\t\t\tc.Error(\"timed out\")\n\t\t\tbreak\n\t\t}\n\t\tif m := regexp.MustCompile(`(?ms).*procmem \\d+ init (\\d+) test_process.*`).FindSubmatch(s.logbuf.Bytes()); len(m) > 0 {\n\t\t\tsize, err := strconv.ParseInt(string(m[1]), 10, 64)\n\t\t\tc.Check(err, IsNil)\n\t\t\t// Expect >1 MiB and <100 MiB -- otherwise we\n\t\t\t// are probably misinterpreting /proc/N/stat\n\t\t\t// or multiplying by the wrong page size.\n\t\t\tc.Check(size > 1000000, Equals, true)\n\t\t\tc.Check(size < 100000000, Equals, true)\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Logf(\"%s\", s.logbuf.String())\n}\n\nfunc (s *suite) TestAllTestdata(c *C) {\n\tfor platform, datasource := range testdata {\n\t\ts.logbuf.Reset()\n\t\tc.Logf(\"=== %s\", platform)\n\t\trep := Reporter{\n\t\t\tPid:             datasource.Pid,\n\t\t\tFS:              datasource.FS(),\n\t\t\tLogger:          s.logger,\n\t\t\tPollPeriod:      time.Second,\n\t\t\tThresholdLogger: s.logger,\n\t\t\tDebug:           true,\n\t\t}\n\t\trep.Start()\n\t\trep.Stop()\n\t\tlogs := s.logbuf.String()\n\t\tc.Logf(\"%s\", logs)\n\t\tc.Check(logs, Matches, `(?ms).* \\d\\d+ rss\\\\n.*`)\n\t\tc.Check(logs, Matches, `(?ms).*blkio:\\d+:\\d+ \\d+ write \\d+ read\\\\n.*`)\n\t\tc.Check(logs, Matches, `(?ms).*net:\\S+ \\d+ tx \\d+ rx\\\\n.*`)\n\t\tc.Check(logs, Matches, `(?ms).* [\\d.]+ user [\\d.]+ sys [2-9]\\d*\\.\\d\\d cpus.*`)\n\t}\n}\n\nfunc (s *suite) testRSSThresholds(c *C, rssPercentages []int64, alertCount int) {\n\tc.Assert(alertCount <= len(rssPercentages), Equals, true)\n\trep := Reporter{\n\t\tPid:    testdata[\"debian12\"].Pid,\n\t\tFS:     testdata[\"debian12\"].FS(),\n\t\tLogger: s.logger,\n\t\tMemThresholds: map[string][]Threshold{\n\t\t\t\"rss\": NewThresholdsFromPercentages(s.debian12MemoryCurrent*3/2, rssPercentages),\n\t\t},\n\t\tPollPeriod:      time.Second * 10,\n\t\tThresholdLogger: s.logger,\n\t}\n\trep.Start()\n\trep.Stop()\n\tlogs := s.logbuf.String()\n\tc.Logf(\"%s\", logs)\n\n\tfor index, expectPercentage := range rssPercentages[:alertCount] {\n\t\tvar logCheck Checker\n\t\tif index < alertCount {\n\t\t\tlogCheck = Matches\n\t\t} else {\n\t\t\tlogCheck = Not(Matches)\n\t\t}\n\t\tpattern := fmt.Sprintf(`%sContainer using over %d%% of memory \\(rss %d/%d bytes\\)\"`,\n\t\t\tlogMsgPrefix, expectPercentage, s.debian12MemoryCurrent, s.debian12MemoryCurrent*3/2)\n\t\tc.Check(logs, logCheck, pattern)\n\t}\n}\n\nfunc (s *suite) TestZeroRSSThresholds(c *C) {\n\ts.testRSSThresholds(c, []int64{}, 0)\n}\n\nfunc (s *suite) TestOneRSSThresholdPassed(c *C) {\n\ts.testRSSThresholds(c, []int64{55}, 1)\n}\n\nfunc (s *suite) TestOneRSSThresholdNotPassed(c *C) {\n\ts.testRSSThresholds(c, []int64{85}, 0)\n}\n\nfunc (s *suite) TestMultipleRSSThresholdsNonePassed(c *C) {\n\ts.testRSSThresholds(c, []int64{95, 97, 99}, 0)\n}\n\nfunc (s *suite) TestMultipleRSSThresholdsSomePassed(c *C) {\n\ts.testRSSThresholds(c, []int64{45, 60, 75, 90}, 2)\n}\n\nfunc (s *suite) TestMultipleRSSThresholdsAllPassed(c *C) {\n\ts.testRSSThresholds(c, []int64{1, 2, 3}, 3)\n}\n\nfunc (s *suite) TestLogMaxima(c *C) {\n\trep := Reporter{\n\t\tPid:        testdata[\"debian12\"].Pid,\n\t\tFS:         testdata[\"debian12\"].FS(),\n\t\tLogger:     s.logger,\n\t\tPollPeriod: time.Second * 10,\n\t\tTempDir:    \"/\",\n\t}\n\trep.Start()\n\trep.Stop()\n\trep.LogMaxima(s.logger, map[string]int64{\"rss\": s.debian12MemoryCurrent * 3 / 2})\n\tlogs := s.logbuf.String()\n\tc.Logf(\"%s\", logs)\n\n\texpectRSS := fmt.Sprintf(`Maximum container memory rss usage was %d%%, %d/%d bytes`,\n\t\t66, s.debian12MemoryCurrent, s.debian12MemoryCurrent*3/2)\n\tfor _, expected := range []string{\n\t\t`Maximum disk usage was \\d+%, \\d+/\\d+ bytes`,\n\t\t`Maximum container memory swap usage was \\d\\d+ bytes`,\n\t\t`Maximum container memory pgmajfault usage was \\d\\d+ faults`,\n\t\texpectRSS,\n\t} {\n\t\tpattern := logMsgPrefix + expected + `\"`\n\t\tc.Check(logs, Matches, pattern)\n\t}\n}\n\nfunc (s *suite) TestLogProcessMemMax(c *C) {\n\trep := Reporter{\n\t\tPid:        os.Getpid,\n\t\tLogger:     s.logger,\n\t\tPollPeriod: time.Second * 10,\n\t}\n\trep.ReportPID(\"test-run\", os.Getpid())\n\trep.Start()\n\trep.Stop()\n\trep.LogProcessMemMax(s.logger)\n\tlogs := s.logbuf.String()\n\tc.Logf(\"%s\", logs)\n\n\tpattern := logMsgPrefix + `Maximum test-run memory rss usage was \\d+ bytes\"`\n\tc.Check(logs, Matches, pattern)\n}\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/proc/3288/cgroup",
    "content": "0::/user.slice/user-1000.slice/session-7.scope\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/proc/3288/cpuset",
    "content": "/\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/proc/3288/net/dev",
    "content": "Inter-|   Receive                                                |  Transmit\n face |bytes    packets errs drop fifo frame compressed multicast|bytes    packets errs drop fifo colls carrier compressed\n  ens5: 168696850   62770    0    0    0     0          0         0  1202238   11890    0    0    0     0       0          0\n    lo:       0       0    0    0    0     0          0         0        0       0    0    0    0     0       0          0\ndocker0:       0       0    0    0    0     0          0         0     1080      12    0    0    0     0       0          0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/proc/cpuinfo",
    "content": "processor\t: 0\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 85\nmodel name\t: Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz\nstepping\t: 4\nmicrocode\t: 0x2007006\ncpu MHz\t\t: 2499.998\ncache size\t: 33792 KB\nphysical id\t: 0\nsiblings\t: 2\ncore id\t\t: 0\ncpu cores\t: 1\napicid\t\t: 0\ninitial apicid\t: 0\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 13\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single pti fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves ida arat pku ospke\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit mmio_stale_data retbleed gds\nbogomips\t: 4999.99\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 46 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 1\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 85\nmodel name\t: Intel(R) Xeon(R) Platinum 8175M CPU @ 2.50GHz\nstepping\t: 4\nmicrocode\t: 0x2007006\ncpu MHz\t\t: 2499.998\ncache size\t: 33792 KB\nphysical id\t: 0\nsiblings\t: 2\ncore id\t\t: 0\ncpu cores\t: 1\napicid\t\t: 1\ninitial apicid\t: 1\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 13\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single pti fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves ida arat pku ospke\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit mmio_stale_data retbleed gds\nbogomips\t: 4999.99\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 46 bits physical, 48 bits virtual\npower management:\n\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/proc/mounts",
    "content": "sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\nproc /proc proc rw,nosuid,nodev,noexec,relatime 0 0\nudev /dev devtmpfs rw,nosuid,relatime,size=992288k,nr_inodes=248072,mode=755 0 0\ndevpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\ntmpfs /run tmpfs rw,nosuid,noexec,relatime,size=200676k,mode=755 0 0\n/dev/nvme0n1p1 / ext4 rw,relatime,discard,errors=remount-ro 0 0\nsecurityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0\ntmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0\ntmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0\ncgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate 0 0\npstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0\nbpf /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0\nsystemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=33,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=9700 0 0\nmqueue /dev/mqueue mqueue rw,relatime 0 0\ndebugfs /sys/kernel/debug debugfs rw,relatime 0 0\nhugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0\n/dev/nvme0n1p15 /boot/efi vfat rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=ascii,shortname=mixed,utf8,errors=remount-ro 0 0\ntmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=200676k,mode=700,uid=1000,gid=1000 0 0\n/dev/mapper/autoscale_vg-autoscale_lv /tmp ext4 rw,relatime 0 0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/proc/self/smaps",
    "content": "00400000-00403000 r--p 00000000 103:01 268952                            /home/admin/arvados-server\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                  12 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:        12 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me dw sd \n00403000-01779000 r-xp 00003000 103:01 268952                            /home/admin/arvados-server\nSize:              19928 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               12376 kB\nPss:               12376 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:     12376 kB\nPrivate_Dirty:         0 kB\nReferenced:        12376 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd ex mr mw me dw sd \n01779000-02f2d000 r--p 01379000 103:01 268952                            /home/admin/arvados-server\nSize:              24272 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               10588 kB\nPss:               10588 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:      9084 kB\nPrivate_Dirty:      1504 kB\nReferenced:        10588 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me dw sd \n02f2e000-02f2f000 r--p 02b2d000 103:01 268952                            /home/admin/arvados-server\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me dw ac sd \n02f2f000-02fc6000 rw-p 02b2e000 103:01 268952                            /home/admin/arvados-server\nSize:                604 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 456 kB\nPss:                 456 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       456 kB\nReferenced:          456 kB\nAnonymous:           176 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd wr mr mw me dw ac sd \n02fc6000-0300d000 rw-p 00000000 00:00 0 \nSize:                284 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  96 kB\nPss:                  96 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        96 kB\nReferenced:           96 kB\nAnonymous:            96 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n03590000-035b1000 rw-p 00000000 00:00 0                                  [heap]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \nc000000000-c000800000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                8192 kB\nPss:                8192 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:      8192 kB\nReferenced:         8192 kB\nAnonymous:          8192 kB\nLazyFree:              0 kB\nAnonHugePages:      8192 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \nc000800000-c004000000 ---p 00000000 00:00 0 \nSize:              57344 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f851ffc0000-7f8520000000 rw-p 00000000 00:00 0 \nSize:                256 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 244 kB\nPss:                 244 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       244 kB\nReferenced:          244 kB\nAnonymous:           244 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f8520000000-7f8520021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me nr sd \n7f8520021000-7f8524000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me nr sd \n7f8524000000-7f8524021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me nr sd \n7f8524021000-7f8528000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me nr sd \n7f8528000000-7f8528021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me nr sd \n7f8528021000-7f852c000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me nr sd \n7f852c000000-7f852c021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me nr sd \n7f852c021000-7f8530000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me nr sd \n7f8530000000-7f8530021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me nr sd \n7f8530021000-7f8534000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me nr sd \n7f853401c000-7f853429c000 rw-p 00000000 00:00 0 \nSize:               2560 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                1028 kB\nPss:                1028 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:      1028 kB\nReferenced:         1028 kB\nAnonymous:          1028 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f853429c000-7f853429d000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f853429d000-7f8534a9d000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f8534a9d000-7f8534a9e000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f8534a9e000-7f85352de000 rw-p 00000000 00:00 0 \nSize:               8448 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 252 kB\nPss:                 252 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       252 kB\nReferenced:          252 kB\nAnonymous:           252 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f85352de000-7f85352df000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f85352df000-7f8535adf000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f8535adf000-7f8535ae0000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f8535ae0000-7f85362e0000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f85362e0000-7f85362e1000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f85362e1000-7f8538e00000 rw-p 00000000 00:00 0 \nSize:              44156 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                2200 kB\nPss:                2200 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:      2200 kB\nReferenced:         2200 kB\nAnonymous:          2200 kB\nLazyFree:              0 kB\nAnonHugePages:      2048 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f8538e00000-7f8539000000 rw-p 00000000 00:00 0 \nSize:               2048 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd hg \n7f8539000000-7f853911d000 rw-p 00000000 00:00 0 \nSize:               1140 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f853911d000-7f8549696000 ---p 00000000 00:00 0 \nSize:             267748 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f8549696000-7f8549697000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f8549697000-7f855b546000 ---p 00000000 00:00 0 \nSize:             293564 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f855b546000-7f855b547000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855b547000-7f855d91c000 ---p 00000000 00:00 0 \nSize:              36692 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f855d91c000-7f855d91d000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855d91d000-7f855dd96000 ---p 00000000 00:00 0 \nSize:               4580 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f855dd96000-7f855dd97000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855dd97000-7f855de16000 ---p 00000000 00:00 0 \nSize:                508 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f855de16000-7f855de79000 rw-p 00000000 00:00 0 \nSize:                396 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  56 kB\nPss:                  56 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        56 kB\nReferenced:           56 kB\nAnonymous:            56 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855de79000-7f855de7b000 r--p 00000000 103:01 2008                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   0 kB\nShared_Clean:          8 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            8 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855de7b000-7f855de7e000 r-xp 00002000 103:01 2008                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd ex mr mw me sd \n7f855de7e000-7f855de7f000 r--p 00005000 103:01 2008                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855de7f000-7f855de80000 r--p 00005000 103:01 2008                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me ac sd \n7f855de80000-7f855de81000 rw-p 00006000 103:01 2008                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855de81000-7f855de83000 rw-p 00000000 00:00 0 \nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855de83000-7f855de84000 r--p 00000000 103:01 2214                      /usr/lib/x86_64-linux-gnu/libdl-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855de84000-7f855de85000 r-xp 00001000 103:01 2214                      /usr/lib/x86_64-linux-gnu/libdl-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd ex mr mw me sd \n7f855de85000-7f855de86000 r--p 00002000 103:01 2214                      /usr/lib/x86_64-linux-gnu/libdl-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855de86000-7f855de87000 r--p 00002000 103:01 2214                      /usr/lib/x86_64-linux-gnu/libdl-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me ac sd \n7f855de87000-7f855de88000 rw-p 00003000 103:01 2214                      /usr/lib/x86_64-linux-gnu/libdl-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855de88000-7f855de8b000 r--p 00000000 103:01 324                       /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855de8b000-7f855de92000 r-xp 00003000 103:01 324                       /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 28 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  24 kB\nPss:                   1 kB\nShared_Clean:         24 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           24 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd ex mr mw me sd \n7f855de92000-7f855dea6000 r--p 0000a000 103:01 324                       /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 80 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855dea6000-7f855dea7000 ---p 0001e000 103:01 324                       /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f855dea7000-7f855dea8000 r--p 0001e000 103:01 324                       /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me ac sd \n7f855dea8000-7f855dea9000 rw-p 0001f000 103:01 324                       /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855dea9000-7f855deb3000 rw-p 00000000 00:00 0 \nSize:                 40 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855deb3000-7f855ded5000 r--p 00000000 103:01 2212                      /usr/lib/x86_64-linux-gnu/libc-2.28.so\nSize:                136 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 136 kB\nPss:                   4 kB\nShared_Clean:        136 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          136 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855ded5000-7f855e01c000 r-xp 00022000 103:01 2212                      /usr/lib/x86_64-linux-gnu/libc-2.28.so\nSize:               1308 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 612 kB\nPss:                  20 kB\nShared_Clean:        612 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          612 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd ex mr mw me sd \n7f855e01c000-7f855e068000 r--p 00169000 103:01 2212                      /usr/lib/x86_64-linux-gnu/libc-2.28.so\nSize:                304 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 128 kB\nPss:                   4 kB\nShared_Clean:        128 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          128 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855e068000-7f855e069000 ---p 001b5000 103:01 2212                      /usr/lib/x86_64-linux-gnu/libc-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f855e069000-7f855e06d000 r--p 001b5000 103:01 2212                      /usr/lib/x86_64-linux-gnu/libc-2.28.so\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me ac sd \n7f855e06d000-7f855e06f000 rw-p 001b9000 103:01 2212                      /usr/lib/x86_64-linux-gnu/libc-2.28.so\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855e06f000-7f855e073000 rw-p 00000000 00:00 0 \nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                  12 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        12 kB\nReferenced:           12 kB\nAnonymous:            12 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855e073000-7f855e076000 r--p 00000000 103:01 2514                      /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855e076000-7f855e07e000 r-xp 00003000 103:01 2514                      /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2\nSize:                 32 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  32 kB\nPss:                   2 kB\nShared_Clean:         32 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           32 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd ex mr mw me sd \n7f855e07e000-7f855e082000 r--p 0000b000 103:01 2514                      /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855e082000-7f855e083000 r--p 0000e000 103:01 2514                      /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me ac sd \n7f855e083000-7f855e084000 rw-p 0000f000 103:01 2514                      /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855e084000-7f855e08a000 r--p 00000000 103:01 2228                      /usr/lib/x86_64-linux-gnu/libpthread-2.28.so\nSize:                 24 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  24 kB\nPss:                   1 kB\nShared_Clean:         24 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           24 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855e08a000-7f855e099000 r-xp 00006000 103:01 2228                      /usr/lib/x86_64-linux-gnu/libpthread-2.28.so\nSize:                 60 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  60 kB\nPss:                   2 kB\nShared_Clean:         60 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           60 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd ex mr mw me sd \n7f855e099000-7f855e09f000 r--p 00015000 103:01 2228                      /usr/lib/x86_64-linux-gnu/libpthread-2.28.so\nSize:                 24 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855e09f000-7f855e0a0000 r--p 0001a000 103:01 2228                      /usr/lib/x86_64-linux-gnu/libpthread-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me ac sd \n7f855e0a0000-7f855e0a1000 rw-p 0001b000 103:01 2228                      /usr/lib/x86_64-linux-gnu/libpthread-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855e0a1000-7f855e0a5000 rw-p 00000000 00:00 0 \nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855e0a5000-7f855e0a9000 r--p 00000000 103:01 2229                      /usr/lib/x86_64-linux-gnu/libresolv-2.28.so\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                   2 kB\nShared_Clean:         16 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           16 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855e0a9000-7f855e0b6000 r-xp 00004000 103:01 2229                      /usr/lib/x86_64-linux-gnu/libresolv-2.28.so\nSize:                 52 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  52 kB\nPss:                   7 kB\nShared_Clean:         52 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           52 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd ex mr mw me sd \n7f855e0b6000-7f855e0ba000 r--p 00011000 103:01 2229                      /usr/lib/x86_64-linux-gnu/libresolv-2.28.so\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me sd \n7f855e0ba000-7f855e0bb000 ---p 00015000 103:01 2229                      /usr/lib/x86_64-linux-gnu/libresolv-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: mr mw me sd \n7f855e0bb000-7f855e0bc000 r--p 00015000 103:01 2229                      /usr/lib/x86_64-linux-gnu/libresolv-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me ac sd \n7f855e0bc000-7f855e0bd000 rw-p 00016000 103:01 2229                      /usr/lib/x86_64-linux-gnu/libresolv-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855e0bd000-7f855e0c1000 rw-p 00000000 00:00 0 \nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7f855e0cd000-7f855e0ce000 r--p 00000000 103:01 2204                      /usr/lib/x86_64-linux-gnu/ld-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me dw sd \n7f855e0ce000-7f855e0ec000 r-xp 00001000 103:01 2204                      /usr/lib/x86_64-linux-gnu/ld-2.28.so\nSize:                120 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 120 kB\nPss:                   3 kB\nShared_Clean:        120 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          120 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd ex mr mw me dw sd \n7f855e0ec000-7f855e0f4000 r--p 0001f000 103:01 2204                      /usr/lib/x86_64-linux-gnu/ld-2.28.so\nSize:                 32 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  32 kB\nPss:                   1 kB\nShared_Clean:         32 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           32 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me dw sd \n7f855e0f4000-7f855e0f5000 r--p 00026000 103:01 2204                      /usr/lib/x86_64-linux-gnu/ld-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr mw me dw ac sd \n7f855e0f5000-7f855e0f6000 rw-p 00027000 103:01 2204                      /usr/lib/x86_64-linux-gnu/ld-2.28.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd wr mr mw me dw ac sd \n7f855e0f6000-7f855e0f7000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me ac sd \n7fffd54dc000-7fffd54fd000 rw-p 00000000 00:00 0                          [stack]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nProtectionKey:         0\nVmFlags: rd wr mr mw me gd ac \n7fffd556f000-7fffd5572000 r--p 00000000 00:00 0                          [vvar]\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd mr pf io de dd sd \n7fffd5572000-7fffd5574000 r-xp 00000000 00:00 0                          [vdso]\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nProtectionKey:         0\nVmFlags: rd ex mr mw me de sd \n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/cpu.max",
    "content": "max 100000\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/io.stat",
    "content": "259:4 rbytes=12288 wbytes=123613184 rios=3 wios=482 dbytes=0 dios=0\n254:0 rbytes=12288 wbytes=123613184 rios=3 wios=482 dbytes=0 dios=0\n259:0 rbytes=4071424 wbytes=38789120 rios=248 wios=157 dbytes=0 dios=0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/cpu.stat",
    "content": "usage_usec 2670017\nuser_usec 1381923\nsystem_usec 1288094\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.current",
    "content": "133386240\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.stat",
    "content": "anon 16777216\nfile 109891584\nkernel_stack 98304\nslab 5595136\nsock 0\nshmem 0\nfile_mapped 23924736\nfile_dirty 7163904\nfile_writeback 135168\ninactive_anon 0\nactive_anon 16818176\ninactive_file 108355584\nactive_file 1560576\nunevictable 0\nslab_reclaimable 4489216\nslab_unreclaimable 1105920\npgfault 67947\npgmajfault 0\npgrefill 0\npgscan 0\npgsteal 0\npgactivate 0\npgdeactivate 0\npglazyfree 0\npglazyfreed 0\nworkingset_refault 0\nworkingset_activate 0\nworkingset_nodereclaim 0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian10/sys/fs/cgroup/user.slice/user-1000.slice/session-7.scope/memory.swap.current",
    "content": "0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/proc/4153022/cgroup",
    "content": "0::/user.slice/user-1000.slice/session-5424.scope\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/proc/4153022/cpuset",
    "content": "/user.slice\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/proc/4153022/net/dev",
    "content": "Inter-|   Receive                                                |  Transmit\n face |bytes    packets errs drop fifo frame compressed multicast|bytes    packets errs drop fifo colls carrier compressed\n    lo: 161155690314 90375905    0    0    0     0          0         0 161155690314 90375905    0    0    0     0       0          0\n  ens3: 163923112 1884265    0    0    0     0          0         0 43218121  239766    0    0    0     0       0          0\n  ens9: 24574250159 83081845    0    0    0     0          0         0 49312502353 91591944    0    0    0     0       0          0\ndocker0: 6958795  109630    0    0    0     0          0         0 671569248  187319    0    0    0     0       0          0\ntailscale0: 82192857  118550    0    0    0     0          0         0  6898232  100243    0    0    0     0       0          0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/proc/cpuinfo",
    "content": "processor\t: 0\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 61\nmodel name\t: Intel Core Processor (Broadwell)\nstepping\t: 2\nmicrocode\t: 0x1\ncpu MHz\t\t: 3292.366\ncache size\t: 4096 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 0\ncpu cores\t: 8\napicid\t\t: 0\ninitial apicid\t: 0\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 13\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.73\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 40 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 1\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 61\nmodel name\t: Intel Core Processor (Broadwell)\nstepping\t: 2\nmicrocode\t: 0x1\ncpu MHz\t\t: 3292.366\ncache size\t: 4096 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 1\ncpu cores\t: 8\napicid\t\t: 1\ninitial apicid\t: 1\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 13\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.73\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 40 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 2\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 61\nmodel name\t: Intel Core Processor (Broadwell)\nstepping\t: 2\nmicrocode\t: 0x1\ncpu MHz\t\t: 3292.366\ncache size\t: 4096 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 2\ncpu cores\t: 8\napicid\t\t: 2\ninitial apicid\t: 2\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 13\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.73\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 40 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 3\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 61\nmodel name\t: Intel Core Processor (Broadwell)\nstepping\t: 2\nmicrocode\t: 0x1\ncpu MHz\t\t: 3292.366\ncache size\t: 4096 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 3\ncpu cores\t: 8\napicid\t\t: 3\ninitial apicid\t: 3\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 13\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.73\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 40 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 4\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 61\nmodel name\t: Intel Core Processor (Broadwell)\nstepping\t: 2\nmicrocode\t: 0x1\ncpu MHz\t\t: 3292.366\ncache size\t: 4096 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 4\ncpu cores\t: 8\napicid\t\t: 4\ninitial apicid\t: 4\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 13\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.73\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 40 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 5\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 61\nmodel name\t: Intel Core Processor (Broadwell)\nstepping\t: 2\nmicrocode\t: 0x1\ncpu MHz\t\t: 3292.366\ncache size\t: 4096 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 5\ncpu cores\t: 8\napicid\t\t: 5\ninitial apicid\t: 5\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 13\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.73\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 40 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 6\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 61\nmodel name\t: Intel Core Processor (Broadwell)\nstepping\t: 2\nmicrocode\t: 0x1\ncpu MHz\t\t: 3292.366\ncache size\t: 4096 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 6\ncpu cores\t: 8\napicid\t\t: 6\ninitial apicid\t: 6\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 13\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.73\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 40 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 7\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 61\nmodel name\t: Intel Core Processor (Broadwell)\nstepping\t: 2\nmicrocode\t: 0x1\ncpu MHz\t\t: 3292.366\ncache size\t: 4096 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 7\ncpu cores\t: 8\napicid\t\t: 7\ninitial apicid\t: 7\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 13\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.73\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 40 bits physical, 48 bits virtual\npower management:\n\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/proc/mounts",
    "content": "sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\nproc /proc proc rw,nosuid,nodev,noexec,relatime 0 0\nudev /dev devtmpfs rw,nosuid,relatime,size=4055540k,nr_inodes=1013885,mode=755 0 0\ndevpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\ntmpfs /run tmpfs rw,nosuid,nodev,noexec,relatime,size=814692k,mode=755 0 0\n/dev/vdb1 / ext4 rw,relatime,errors=remount-ro 0 0\nsecurityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0\ntmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0\ntmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0\ncgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot 0 0\npstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0\nnone /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0\nsystemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=30,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=9589 0 0\nhugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0\nmqueue /dev/mqueue mqueue rw,nosuid,nodev,noexec,relatime 0 0\ndebugfs /sys/kernel/debug debugfs rw,nosuid,nodev,noexec,relatime 0 0\ntracefs /sys/kernel/tracing tracefs rw,nosuid,nodev,noexec,relatime 0 0\nconfigfs /sys/kernel/config configfs rw,nosuid,nodev,noexec,relatime 0 0\nfusectl /sys/fs/fuse/connections fusectl rw,nosuid,nodev,noexec,relatime 0 0\nnone /tmp tmpfs rw,relatime 0 0\ntmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=814688k,nr_inodes=203672,mode=700,uid=1000,gid=1000 0 0\nbinfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0\narvados-client /home/tom/keep fuse.arvados-client rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/proc/self/smaps",
    "content": "00400000-00403000 r--p 00000000 fe:11 1200832                            /home/tom/.cache/arvados-build/GOPATH/bin/arvados-server\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                  12 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:        12 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me dw sd \n00403000-01776000 r-xp 00003000 fe:11 1200832                            /home/tom/.cache/arvados-build/GOPATH/bin/arvados-server\nSize:              19916 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               12492 kB\nPss:               12492 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:     12492 kB\nPrivate_Dirty:         0 kB\nReferenced:        12492 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me dw sd \n01776000-02f28000 r--p 01376000 fe:11 1200832                            /home/tom/.cache/arvados-build/GOPATH/bin/arvados-server\nSize:              24264 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               10984 kB\nPss:               10984 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:     10984 kB\nPrivate_Dirty:         0 kB\nReferenced:        10984 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me dw sd \n02f28000-02f29000 r--p 02b27000 fe:11 1200832                            /home/tom/.cache/arvados-build/GOPATH/bin/arvados-server\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me dw ac sd \n02f29000-02fc0000 rw-p 02b28000 fe:11 1200832                            /home/tom/.cache/arvados-build/GOPATH/bin/arvados-server\nSize:                604 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 480 kB\nPss:                 480 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:       304 kB\nPrivate_Dirty:       176 kB\nReferenced:          480 kB\nAnonymous:           176 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me dw ac sd \n02fc0000-03007000 rw-p 00000000 00:00 0 \nSize:                284 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 100 kB\nPss:                 100 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       100 kB\nReferenced:          100 kB\nAnonymous:           100 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n03a45000-03a66000 rw-p 00000000 00:00 0                                  [heap]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \nc000000000-c000800000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                5684 kB\nPss:                5684 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:      5684 kB\nReferenced:         5684 kB\nAnonymous:          5684 kB\nLazyFree:              0 kB\nAnonHugePages:      2048 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \nc000800000-c004000000 ---p 00000000 00:00 0 \nSize:              57344 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me sd \n7f3efc000000-7f3efc021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f3efc021000-7f3f00000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f3f00000000-7f3f00021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f3f00021000-7f3f04000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f3f077ff000-7f3f07800000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f3f07800000-7f3f08000000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                  12 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        12 kB\nReferenced:           12 kB\nAnonymous:            12 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f3f08000000-7f3f08021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f3f08021000-7f3f0c000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f3f0c000000-7f3f0c021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f3f0c021000-7f3f10000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f3f10000000-7f3f10021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f3f10021000-7f3f14000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f3f14000000-7f3f14021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f3f14021000-7f3f18000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f3f18000000-7f3f18021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f3f18021000-7f3f1c000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f3f1c000000-7f3f1c021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f3f1c021000-7f3f20000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f3f20000000-7f3f20021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f3f20021000-7f3f24000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f3f24361000-7f3f24421000 rw-p 00000000 00:00 0 \nSize:                768 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 276 kB\nPss:                 276 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       276 kB\nReferenced:          276 kB\nAnonymous:           276 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f24421000-7f3f24422000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f3f24422000-7f3f24c62000 rw-p 00000000 00:00 0 \nSize:               8448 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f3f24c62000-7f3f24c63000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f3f24c63000-7f3f25543000 rw-p 00000000 00:00 0 \nSize:               9088 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 720 kB\nPss:                 720 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       720 kB\nReferenced:          720 kB\nAnonymous:           720 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f3f25543000-7f3f25544000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f3f25544000-7f3f25ee4000 rw-p 00000000 00:00 0 \nSize:               9856 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 288 kB\nPss:                 288 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       288 kB\nReferenced:          288 kB\nAnonymous:           288 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f3f25ee4000-7f3f25ee5000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f3f25ee5000-7f3f266e5000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f3f266e5000-7f3f266e6000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f3f266e6000-7f3f26f26000 rw-p 00000000 00:00 0 \nSize:               8448 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 264 kB\nPss:                 264 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       264 kB\nReferenced:          264 kB\nAnonymous:           264 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f3f26f26000-7f3f26f27000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f3f26f27000-7f3f27727000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f3f27727000-7f3f27728000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f3f27728000-7f3f27f28000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f3f27f28000-7f3f27f29000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f3f27f29000-7f3f2aa00000 rw-p 00000000 00:00 0 \nSize:              43868 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 164 kB\nPss:                 164 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       164 kB\nReferenced:          164 kB\nAnonymous:           164 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f3f2aa00000-7f3f2ac00000 rw-p 00000000 00:00 0 \nSize:               2048 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd hg \n7f3f2ac00000-7f3f2ad65000 rw-p 00000000 00:00 0 \nSize:               1428 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f2ad65000-7f3f3b2de000 ---p 00000000 00:00 0 \nSize:             267748 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me sd \n7f3f3b2de000-7f3f3b2df000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f3b2df000-7f3f4d18e000 ---p 00000000 00:00 0 \nSize:             293564 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me sd \n7f3f4d18e000-7f3f4d18f000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4d18f000-7f3f4f564000 ---p 00000000 00:00 0 \nSize:              36692 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me sd \n7f3f4f564000-7f3f4f565000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4f565000-7f3f4f9de000 ---p 00000000 00:00 0 \nSize:               4580 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me sd \n7f3f4f9de000-7f3f4f9df000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4f9df000-7f3f4fa5e000 ---p 00000000 00:00 0 \nSize:                508 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f3f4fa5e000-7f3f4fac1000 rw-p 00000000 00:00 0 \nSize:                396 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  72 kB\nPss:                  72 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        72 kB\nReferenced:           72 kB\nAnonymous:            72 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4fac1000-7f3f4fac3000 r--p 00000000 fe:11 131148                     /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   0 kB\nShared_Clean:          8 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            8 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4fac3000-7f3f4fac6000 r-xp 00002000 fe:11 131148                     /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f3f4fac6000-7f3f4fac7000 r--p 00005000 fe:11 131148                     /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4fac7000-7f3f4fac8000 r--p 00005000 fe:11 131148                     /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f3f4fac8000-7f3f4fac9000 rw-p 00006000 fe:11 131148                     /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4fac9000-7f3f4facb000 rw-p 00000000 00:00 0 \nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4facb000-7f3f4facc000 r--p 00000000 fe:11 131382                     /lib/x86_64-linux-gnu/libdl-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4facc000-7f3f4face000 r-xp 00001000 fe:11 131382                     /lib/x86_64-linux-gnu/libdl-2.31.so\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   0 kB\nShared_Clean:          8 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            8 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f3f4face000-7f3f4facf000 r--p 00003000 fe:11 131382                     /lib/x86_64-linux-gnu/libdl-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4facf000-7f3f4fad0000 r--p 00003000 fe:11 131382                     /lib/x86_64-linux-gnu/libdl-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f3f4fad0000-7f3f4fad1000 rw-p 00004000 fe:11 131382                     /lib/x86_64-linux-gnu/libdl-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4fad1000-7f3f4fad4000 r--p 00000000 fe:11 131116                     /lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4fad4000-7f3f4fadc000 r-xp 00003000 fe:11 131116                     /lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 32 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  32 kB\nPss:                   1 kB\nShared_Clean:         32 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           32 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f3f4fadc000-7f3f4faf0000 r--p 0000b000 fe:11 131116                     /lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 80 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4faf0000-7f3f4faf1000 r--p 0001e000 fe:11 131116                     /lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f3f4faf1000-7f3f4faf2000 rw-p 0001f000 fe:11 131116                     /lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4faf2000-7f3f4fb02000 rw-p 00000000 00:00 0 \nSize:                 64 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4fb02000-7f3f4fb24000 r--p 00000000 fe:11 131364                     /lib/x86_64-linux-gnu/libc-2.31.so\nSize:                136 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 136 kB\nPss:                   2 kB\nShared_Clean:        136 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          136 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4fb24000-7f3f4fc7d000 r-xp 00022000 fe:11 131364                     /lib/x86_64-linux-gnu/libc-2.31.so\nSize:               1380 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 688 kB\nPss:                  13 kB\nShared_Clean:        688 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          688 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f3f4fc7d000-7f3f4fccc000 r--p 0017b000 fe:11 131364                     /lib/x86_64-linux-gnu/libc-2.31.so\nSize:                316 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 128 kB\nPss:                   2 kB\nShared_Clean:        128 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          128 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4fccc000-7f3f4fcd0000 r--p 001c9000 fe:11 131364                     /lib/x86_64-linux-gnu/libc-2.31.so\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f3f4fcd0000-7f3f4fcd2000 rw-p 001cd000 fe:11 131364                     /lib/x86_64-linux-gnu/libc-2.31.so\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4fcd2000-7f3f4fcd6000 rw-p 00000000 00:00 0 \nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4fcd6000-7f3f4fcd9000 r--p 00000000 fe:11 131147                     /lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4fcd9000-7f3f4fce2000 r-xp 00003000 fe:11 131147                     /lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                 36 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  36 kB\nPss:                   1 kB\nShared_Clean:         36 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           36 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f3f4fce2000-7f3f4fce6000 r--p 0000c000 fe:11 131147                     /lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4fce6000-7f3f4fce7000 r--p 0000f000 fe:11 131147                     /lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f3f4fce7000-7f3f4fce8000 rw-p 00010000 fe:11 131147                     /lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4fce8000-7f3f4fcee000 r--p 00000000 fe:11 131619                     /lib/x86_64-linux-gnu/libpthread-2.31.so\nSize:                 24 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  24 kB\nPss:                   0 kB\nShared_Clean:         24 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           24 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4fcee000-7f3f4fcfe000 r-xp 00006000 fe:11 131619                     /lib/x86_64-linux-gnu/libpthread-2.31.so\nSize:                 64 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  64 kB\nPss:                   1 kB\nShared_Clean:         64 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           64 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f3f4fcfe000-7f3f4fd04000 r--p 00016000 fe:11 131619                     /lib/x86_64-linux-gnu/libpthread-2.31.so\nSize:                 24 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4fd04000-7f3f4fd05000 r--p 0001b000 fe:11 131619                     /lib/x86_64-linux-gnu/libpthread-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f3f4fd05000-7f3f4fd06000 rw-p 0001c000 fe:11 131619                     /lib/x86_64-linux-gnu/libpthread-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4fd06000-7f3f4fd0a000 rw-p 00000000 00:00 0 \nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4fd0a000-7f3f4fd0e000 r--p 00000000 fe:11 133617                     /lib/x86_64-linux-gnu/libresolv-2.31.so\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                   1 kB\nShared_Clean:         16 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           16 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4fd0e000-7f3f4fd1c000 r-xp 00004000 fe:11 133617                     /lib/x86_64-linux-gnu/libresolv-2.31.so\nSize:                 56 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  56 kB\nPss:                   4 kB\nShared_Clean:         56 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           56 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f3f4fd1c000-7f3f4fd20000 r--p 00012000 fe:11 133617                     /lib/x86_64-linux-gnu/libresolv-2.31.so\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f3f4fd20000-7f3f4fd21000 r--p 00015000 fe:11 133617                     /lib/x86_64-linux-gnu/libresolv-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f3f4fd21000-7f3f4fd22000 rw-p 00016000 fe:11 133617                     /lib/x86_64-linux-gnu/libresolv-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4fd22000-7f3f4fd26000 rw-p 00000000 00:00 0 \nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f3f4fd34000-7f3f4fd35000 r--p 00000000 fe:11 131157                     /lib/x86_64-linux-gnu/ld-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me dw sd \n7f3f4fd35000-7f3f4fd55000 r-xp 00001000 fe:11 131157                     /lib/x86_64-linux-gnu/ld-2.31.so\nSize:                128 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 128 kB\nPss:                   1 kB\nShared_Clean:        128 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          128 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me dw sd \n7f3f4fd55000-7f3f4fd5d000 r--p 00021000 fe:11 131157                     /lib/x86_64-linux-gnu/ld-2.31.so\nSize:                 32 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  32 kB\nPss:                   0 kB\nShared_Clean:         32 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           32 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me dw sd \n7f3f4fd5e000-7f3f4fd5f000 r--p 00029000 fe:11 131157                     /lib/x86_64-linux-gnu/ld-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me dw ac sd \n7f3f4fd5f000-7f3f4fd60000 rw-p 0002a000 fe:11 131157                     /lib/x86_64-linux-gnu/ld-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me dw ac sd \n7f3f4fd60000-7f3f4fd61000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7fff6be6f000-7fff6be90000 rw-p 00000000 00:00 0                          [stack]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  20 kB\nPss:                  20 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        20 kB\nReferenced:           20 kB\nAnonymous:            20 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me gd ac \n7fff6bee1000-7fff6bee5000 r--p 00000000 00:00 0                          [vvar]\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr pf io de dd sd \n7fff6bee5000-7fff6bee7000 r-xp 00000000 00:00 0                          [vdso]\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me de sd \n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/cpu.max",
    "content": "max 100000\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/cpuset.cpus.effective",
    "content": "0-7\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/io.stat",
    "content": "7:1 rbytes=7218176 wbytes=0 rios=240 wios=0 dbytes=0 dios=0\n7:2 rbytes=2115584 wbytes=0 rios=64 wios=0 dbytes=0 dios=0\n7:0 rbytes=218925056 wbytes=0 rios=7382 wios=0 dbytes=0 dios=0\n254:16 rbytes=268548554752 wbytes=121274503168 rios=32054623 wios=8793862 dbytes=0 dios=0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/cpu.stat",
    "content": "usage_usec 935017572836\nuser_usec 441034348821\nsystem_usec 493983224015\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.current",
    "content": "3662082048\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.stat",
    "content": "anon 869666816\nfile 2622799872\nkernel_stack 4276224\npercpu 0\nsock 0\nshmem 849936384\nfile_mapped 57311232\nfile_dirty 270336\nfile_writeback 135168\nanon_thp 553648128\ninactive_anon 391749632\nactive_anon 1332850688\ninactive_file 243453952\nactive_file 1529008128\nunevictable 0\nslab_reclaimable 135355928\nslab_unreclaimable 8377048\nslab 143732976\nworkingset_refault_anon 84645\nworkingset_refault_file 7429752\nworkingset_activate_anon 15444\nworkingset_activate_file 4704645\nworkingset_restore_anon 1551\nworkingset_restore_file 2826087\nworkingset_nodereclaim 0\npgfault 1688981547\npgmajfault 322476\npgrefill 24091451\npgscan 32183888\npgsteal 18202144\npgactivate 32572518\npgdeactivate 13641072\npglazyfree 1254\npglazyfreed 0\nthp_fault_alloc 149061\nthp_collapse_alloc 3267\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian11/sys/fs/cgroup/user.slice/user-1000.slice/session-5424.scope/memory.swap.current",
    "content": "2462470144\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/proc/1115883/cgroup",
    "content": "0::/user.slice/user-1000.slice/session-4.scope\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/proc/1115883/cpuset",
    "content": "/user.slice\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/proc/1115883/net/dev",
    "content": "Inter-|   Receive                                                |  Transmit\n face |bytes    packets errs drop fifo frame compressed multicast|bytes    packets errs drop fifo colls carrier compressed\n    lo: 44467931   32124    0    0    0     0          0         0 44467931   32124    0    0    0     0       0          0\nenp4s0: 76312173774 219652689    0   33    0     0          0    226563 52498381226 153789479    0    0    0     0       0          0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/proc/cpuinfo",
    "content": "processor\t: 0\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3591.771\ncache size\t: 6144 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 0\ncpu cores\t: 4\napicid\t\t: 0\ninitial apicid\t: 0\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown\nbogomips\t: 6584.91\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 1\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3591.750\ncache size\t: 6144 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 0\ncpu cores\t: 4\napicid\t\t: 1\ninitial apicid\t: 1\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown\nbogomips\t: 6584.91\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 2\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3602.533\ncache size\t: 6144 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 1\ncpu cores\t: 4\napicid\t\t: 2\ninitial apicid\t: 2\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown\nbogomips\t: 6584.91\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 3\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3607.600\ncache size\t: 6144 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 1\ncpu cores\t: 4\napicid\t\t: 3\ninitial apicid\t: 3\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown\nbogomips\t: 6584.91\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 4\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3600.169\ncache size\t: 6144 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 2\ncpu cores\t: 4\napicid\t\t: 4\ninitial apicid\t: 4\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown\nbogomips\t: 6584.91\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 5\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3609.318\ncache size\t: 6144 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 2\ncpu cores\t: 4\napicid\t\t: 5\ninitial apicid\t: 5\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown\nbogomips\t: 6584.91\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 6\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3591.905\ncache size\t: 6144 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 3\ncpu cores\t: 4\napicid\t\t: 6\ninitial apicid\t: 6\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown\nbogomips\t: 6584.91\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 7\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3591.804\ncache size\t: 6144 KB\nphysical id\t: 0\nsiblings\t: 8\ncore id\t\t: 3\ncpu cores\t: 4\napicid\t\t: 7\ninitial apicid\t: 7\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap intel_pt xsaveopt dtherm ida arat pln pts\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit srbds mmio_unknown\nbogomips\t: 6584.91\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/proc/mounts",
    "content": "sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\nproc /proc proc rw,relatime 0 0\nudev /dev devtmpfs rw,nosuid,relatime,size=16346052k,nr_inodes=4086513,mode=755,inode64 0 0\ndevpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\ntmpfs /run tmpfs rw,nosuid,nodev,noexec,relatime,size=3275420k,mode=755,inode64 0 0\n/dev/mapper/slab1-root / ext4 rw,relatime,errors=remount-ro,stripe=8191 0 0\nsecurityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0\ntmpfs /dev/shm tmpfs rw,nosuid,nodev,inode64 0 0\ntmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k,inode64 0 0\ncgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot 0 0\npstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0\nbpf /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0\nsystemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=29,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=16801 0 0\ntracefs /sys/kernel/tracing tracefs rw,nosuid,nodev,noexec,relatime 0 0\nmqueue /dev/mqueue mqueue rw,nosuid,nodev,noexec,relatime 0 0\ndebugfs /sys/kernel/debug debugfs rw,nosuid,nodev,noexec,relatime 0 0\nhugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0\nfusectl /sys/fs/fuse/connections fusectl rw,nosuid,nodev,noexec,relatime 0 0\nconfigfs /sys/kernel/config configfs rw,nosuid,nodev,noexec,relatime 0 0\nramfs /run/credentials/systemd-sysusers.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0\nramfs /run/credentials/systemd-sysctl.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0\nramfs /run/credentials/systemd-tmpfiles-setup-dev.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0\n/dev/mapper/slab1-home /home ext4 rw,relatime,errors=remount-ro 0 0\n/dev/md0p1 /boot ext4 rw,relatime,stripe=8191 0 0\nramfs /run/credentials/systemd-tmpfiles-setup.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0\nbinfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0\ntmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=3275416k,nr_inodes=818854,mode=700,uid=1000,gid=1000,inode64 0 0\ngvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0\n/dev/mapper/sea5a /sea5a ext4 rw,relatime 0 0\nportal /run/user/1000/doc fuse.portal rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0\ncurve:/ /tmp/c fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0\ntmpfs /run/user/0 tmpfs rw,nosuid,nodev,relatime,size=3275416k,nr_inodes=818854,mode=700,inode64 0 0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/proc/self/smaps",
    "content": "00400000-00403000 r--p 00000000 fd:01 2228820                            /tmp/arvados-server\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                  12 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:        12 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n00403000-01776000 r-xp 00003000 fd:01 2228820                            /tmp/arvados-server\nSize:              19916 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               12492 kB\nPss:               12492 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:     12492 kB\nPrivate_Dirty:         0 kB\nReferenced:        12492 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n01776000-02f28000 r--p 01376000 fd:01 2228820                            /tmp/arvados-server\nSize:              24264 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               10856 kB\nPss:               10856 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:     10856 kB\nPrivate_Dirty:         0 kB\nReferenced:        10856 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n02f28000-02f29000 r--p 02b27000 fd:01 2228820                            /tmp/arvados-server\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n02f29000-02fc0000 rw-p 02b28000 fd:01 2228820                            /tmp/arvados-server\nSize:                604 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 480 kB\nPss:                 480 kB\nPss_Dirty:           176 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:       304 kB\nPrivate_Dirty:       176 kB\nReferenced:          480 kB\nAnonymous:           176 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n02fc0000-03007000 rw-p 00000000 00:00 0 \nSize:                284 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 100 kB\nPss:                 100 kB\nPss_Dirty:           100 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       100 kB\nReferenced:          100 kB\nAnonymous:           100 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n04b9f000-04bc0000 rw-p 00000000 00:00 0                                  [heap]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \nc000000000-c000c00000 rw-p 00000000 00:00 0 \nSize:              12288 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                5596 kB\nPss:                5596 kB\nPss_Dirty:          5596 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:      5596 kB\nReferenced:         5596 kB\nAnonymous:          5596 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \nc000c00000-c004000000 ---p 00000000 00:00 0 \nSize:              53248 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me sd \n7f716c000000-7f716c021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f716c021000-7f7170000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f7170000000-7f7170021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f7170021000-7f7174000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f7174000000-7f7174021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f7174021000-7f7178000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f717a7fd000-7f717a7fe000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f717a7fe000-7f717affe000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f717affe000-7f717afff000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f717afff000-7f717b7ff000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f717b7ff000-7f717b800000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f717b800000-7f717c000000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                  12 kB\nPss_Dirty:            12 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        12 kB\nReferenced:           12 kB\nAnonymous:            12 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f717c000000-7f717c021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f717c021000-7f7180000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f7180000000-7f7180021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f7180021000-7f7184000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f7184000000-7f7184021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f7184021000-7f7188000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f7188000000-7f7188021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f7188021000-7f718c000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f718c000000-7f718c021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f718c021000-7f7190000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f7190000000-7f7190021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f7190021000-7f7194000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f7194000000-7f7194021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f7194021000-7f7198000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f71985d9000-7f7198769000 rw-p 00000000 00:00 0 \nSize:               1600 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 968 kB\nPss:                 968 kB\nPss_Dirty:           968 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       968 kB\nReferenced:          968 kB\nAnonymous:           968 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f7198769000-7f719876a000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f719876a000-7f7198faa000 rw-p 00000000 00:00 0 \nSize:               8448 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  24 kB\nPss:                  24 kB\nPss_Dirty:            24 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        24 kB\nReferenced:           24 kB\nAnonymous:            24 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f7198faa000-7f7198fab000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f7198fab000-7f71997fb000 rw-p 00000000 00:00 0 \nSize:               8512 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  32 kB\nPss:                  32 kB\nPss_Dirty:            32 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        32 kB\nReferenced:           32 kB\nAnonymous:            32 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f71997fb000-7f71997fc000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f71997fc000-7f7199ffc000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f7199ffc000-7f7199ffd000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f7199ffd000-7f719a7fd000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f719a7fd000-7f719a7fe000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f719a7fe000-7f719affe000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f719affe000-7f719afff000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f719afff000-7f719b7ff000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f719b7ff000-7f719b800000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f719b800000-7f719c000000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f719c000000-7f719c021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f719c021000-7f71a0000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me nr sd \n7f71a000b000-7f71a01eb000 rw-p 00000000 00:00 0 \nSize:               1920 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 524 kB\nPss:                 524 kB\nPss_Dirty:           524 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       524 kB\nReferenced:          524 kB\nAnonymous:           524 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71a01eb000-7f71a01ec000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f71a01ec000-7f71a2e00000 rw-p 00000000 00:00 0 \nSize:              45136 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 168 kB\nPss:                 168 kB\nPss_Dirty:           168 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       168 kB\nReferenced:          168 kB\nAnonymous:           168 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd \n7f71a2e00000-7f71a3000000 rw-p 00000000 00:00 0 \nSize:               2048 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd hg \n7f71a3000000-7f71a3018000 rw-p 00000000 00:00 0 \nSize:                 96 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71a3018000-7f71b3591000 ---p 00000000 00:00 0 \nSize:             267748 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me sd \n7f71b3591000-7f71b3592000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71b3592000-7f71c5441000 ---p 00000000 00:00 0 \nSize:             293564 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me sd \n7f71c5441000-7f71c5442000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c5442000-7f71c7817000 ---p 00000000 00:00 0 \nSize:              36692 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me sd \n7f71c7817000-7f71c7818000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7818000-7f71c7c91000 ---p 00000000 00:00 0 \nSize:               4580 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: mr mw me sd \n7f71c7c91000-7f71c7c92000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7c92000-7f71c7d11000 ---p 00000000 00:00 0 \nSize:                508 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f71c7d11000-7f71c7d74000 rw-p 00000000 00:00 0 \nSize:                396 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  72 kB\nPss:                  72 kB\nPss_Dirty:            72 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        72 kB\nReferenced:           72 kB\nAnonymous:            72 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7d74000-7f71c7d76000 r--p 00000000 fd:01 1609774                    /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   2 kB\nPss_Dirty:             0 kB\nShared_Clean:          8 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            8 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7d76000-7f71c7d79000 r-xp 00002000 fd:01 1609774                    /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   1 kB\nPss_Dirty:             0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f71c7d79000-7f71c7d7a000 r--p 00005000 fd:01 1609774                    /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   1 kB\nPss_Dirty:             0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7d7a000-7f71c7d7b000 r--p 00006000 fd:01 1609774                    /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f71c7d7b000-7f71c7d7c000 rw-p 00007000 fd:01 1609774                    /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7d7c000-7f71c7d7e000 rw-p 00000000 00:00 0 \nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7d7e000-7f71c7d81000 r--p 00000000 fd:01 1609746                    /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   3 kB\nPss_Dirty:             0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7d81000-7f71c7d88000 r-xp 00003000 fd:01 1609746                    /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 28 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  28 kB\nPss:                   1 kB\nPss_Dirty:             0 kB\nShared_Clean:         28 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           28 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f71c7d88000-7f71c7d9d000 r--p 0000a000 fd:01 1609746                    /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 84 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7d9d000-7f71c7d9e000 r--p 0001e000 fd:01 1609746                    /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f71c7d9e000-7f71c7d9f000 rw-p 0001f000 fd:01 1609746                    /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7d9f000-7f71c7daf000 rw-p 00000000 00:00 0 \nSize:                 64 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7daf000-7f71c7dd5000 r--p 00000000 fd:01 1576589                    /usr/lib/x86_64-linux-gnu/libc.so.6\nSize:                152 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 152 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:        152 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          152 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7dd5000-7f71c7f2a000 r-xp 00026000 fd:01 1576589                    /usr/lib/x86_64-linux-gnu/libc.so.6\nSize:               1364 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 940 kB\nPss:                   4 kB\nPss_Dirty:             0 kB\nShared_Clean:        940 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          940 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f71c7f2a000-7f71c7f7d000 r--p 0017b000 fd:01 1576589                    /usr/lib/x86_64-linux-gnu/libc.so.6\nSize:                332 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 128 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:        128 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          128 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7f7d000-7f71c7f81000 r--p 001ce000 fd:01 1576589                    /usr/lib/x86_64-linux-gnu/libc.so.6\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nPss_Dirty:            16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f71c7f81000-7f71c7f83000 rw-p 001d2000 fd:01 1576589                    /usr/lib/x86_64-linux-gnu/libc.so.6\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7f83000-7f71c7f90000 rw-p 00000000 00:00 0 \nSize:                 52 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  20 kB\nPss:                  20 kB\nPss_Dirty:            20 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        20 kB\nReferenced:           20 kB\nAnonymous:            20 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7f90000-7f71c7f93000 r--p 00000000 fd:01 1609792                    /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   3 kB\nPss_Dirty:             0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7f93000-7f71c7f9c000 r-xp 00003000 fd:01 1609792                    /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                 36 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  36 kB\nPss:                  11 kB\nPss_Dirty:             0 kB\nShared_Clean:         36 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           36 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f71c7f9c000-7f71c7fa0000 r--p 0000c000 fd:01 1609792                    /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7fa0000-7f71c7fa1000 r--p 0000f000 fd:01 1609792                    /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f71c7fa1000-7f71c7fa2000 rw-p 00010000 fd:01 1609792                    /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7fa2000-7f71c7fa3000 r--p 00000000 fd:01 1609844                    /usr/lib/x86_64-linux-gnu/libpthread.so.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7fa3000-7f71c7fa4000 r-xp 00001000 fd:01 1609844                    /usr/lib/x86_64-linux-gnu/libpthread.so.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f71c7fa4000-7f71c7fa5000 r--p 00002000 fd:01 1609844                    /usr/lib/x86_64-linux-gnu/libpthread.so.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7fa5000-7f71c7fa6000 r--p 00002000 fd:01 1609844                    /usr/lib/x86_64-linux-gnu/libpthread.so.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f71c7fa6000-7f71c7fa7000 rw-p 00003000 fd:01 1609844                    /usr/lib/x86_64-linux-gnu/libpthread.so.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7fa7000-7f71c7faa000 r--p 00000000 fd:01 1609840                    /usr/lib/x86_64-linux-gnu/libresolv.so.2\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7faa000-7f71c7fb2000 r-xp 00003000 fd:01 1609840                    /usr/lib/x86_64-linux-gnu/libresolv.so.2\nSize:                 32 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  28 kB\nPss:                   1 kB\nPss_Dirty:             0 kB\nShared_Clean:         28 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           28 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f71c7fb2000-7f71c7fb4000 r--p 0000b000 fd:01 1609840                    /usr/lib/x86_64-linux-gnu/libresolv.so.2\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7fb4000-7f71c7fb5000 r--p 0000d000 fd:01 1609840                    /usr/lib/x86_64-linux-gnu/libresolv.so.2\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f71c7fb5000-7f71c7fb6000 rw-p 0000e000 fd:01 1609840                    /usr/lib/x86_64-linux-gnu/libresolv.so.2\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nPss_Dirty:             4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7fb6000-7f71c7fb8000 rw-p 00000000 00:00 0 \nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7fbe000-7f71c7fd0000 rw-p 00000000 00:00 0 \nSize:                 72 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  20 kB\nPss:                  20 kB\nPss_Dirty:            20 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        20 kB\nReferenced:           20 kB\nAnonymous:            20 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f71c7fd0000-7f71c7fd1000 r--p 00000000 fd:01 1586742                    /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c7fd1000-7f71c7ff6000 r-xp 00001000 fd:01 1586742                    /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2\nSize:                148 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 148 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:        148 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          148 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f71c7ff6000-7f71c8000000 r--p 00026000 fd:01 1586742                    /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2\nSize:                 40 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  40 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:         40 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           40 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f71c8000000-7f71c8002000 r--p 00030000 fd:01 1586742                    /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f71c8002000-7f71c8004000 rw-p 00032000 fd:01 1586742                    /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nPss_Dirty:             8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7fff31879000-7fff3189a000 rw-p 00000000 00:00 0                          [stack]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nPss_Dirty:            16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me gd ac \n7fff3191c000-7fff31920000 r--p 00000000 00:00 0                          [vvar]\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr pf io de dd sd \n7fff31920000-7fff31922000 r-xp 00000000 00:00 0                          [vdso]\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nPss_Dirty:             0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me de sd \n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/cpuset.cpus.effective",
    "content": "0-7\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/io.stat",
    "content": "253:2 rbytes=2110803968 wbytes=8333664256 rios=515333 wios=1682507 dbytes=0 dios=0\n8:32 rbytes=50547765248 wbytes=0 rios=12340763 wios=0 dbytes=0 dios=0\n253:16 rbytes=50547765248 wbytes=3666890752 rios=12340763 wios=566510 dbytes=0 dios=0\n253:1 rbytes=9051578368 wbytes=3648737280 rios=879731 wios=167625 dbytes=0 dios=0\n8:16 rbytes=21434400768 wbytes=0 rios=2586700 wios=0 dbytes=0 dios=0\n9:0 rbytes=21434400768 wbytes=0 rios=2586700 wios=1033447 dbytes=0 dios=0\n253:0 rbytes=21433970688 wbytes=107989528576 rios=2586167 wios=5402495 dbytes=0 dios=0\n253:3 rbytes=10271588352 wbytes=181110276096 rios=1191103 wios=15544929 dbytes=0 dios=0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/cpu.max",
    "content": "max 100000\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/cpu.stat",
    "content": "usage_usec 1055978930168\nuser_usec 980146248781\nsystem_usec 75832681387\nnr_periods 0\nnr_throttled 0\nthrottled_usec 0\nnr_bursts 0\nburst_usec 0\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.current",
    "content": "12591513600\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.stat",
    "content": "anon 9158508544\nfile 2762801152\nkernel 503017472\nkernel_stack 27049984\npagetables 149635072\nsec_pagetables 0\npercpu 58040\nsock 217088\nvmalloc 630784\nshmem 2040651776\nzswap 0\nzswapped 0\nfile_mapped 445124608\nfile_dirty 7008256\nfile_writeback 0\nswapcached 170151936\nanon_thp 981467136\nfile_thp 0\nshmem_thp 0\ninactive_anon 6160973824\nactive_anon 4963110912\ninactive_file 213557248\nactive_file 508547072\nunevictable 240934912\nslab_reclaimable 227201576\nslab_unreclaimable 94041680\nslab 321243256\nworkingset_refault_anon 496572\nworkingset_refault_file 2613659\nworkingset_activate_anon 61432\nworkingset_activate_file 1430266\nworkingset_restore_anon 5935\nworkingset_restore_file 922840\nworkingset_nodereclaim 0\npgscan 18707280\npgsteal 10023314\npgscan_kswapd 14949081\npgscan_direct 3758199\npgsteal_kswapd 8515423\npgsteal_direct 1507891\npgfault 5724466729\npgmajfault 271316\npgrefill 5283337\npgactivate 130257374\npgdeactivate 3808695\npglazyfree 0\npglazyfreed 0\nzswpin 0\nzswpout 0\nthp_fault_alloc 102655\nthp_collapse_alloc 5073\n"
  },
  {
    "path": "lib/crunchstat/testdata/debian12/sys/fs/cgroup/user.slice/user-1000.slice/session-4.scope/memory.swap.current",
    "content": "3554775040\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu1804/proc/2523/cgroup",
    "content": "12:freezer:/\n11:rdma:/\n10:devices:/user.slice\n9:blkio:/user.slice\n8:net_cls,net_prio:/\n7:cpu,cpuacct:/user.slice\n6:memory:/user.slice\n5:cpuset:/\n4:perf_event:/\n3:pids:/user.slice/user-1000.slice/session-1.scope\n2:hugetlb:/\n1:name=systemd:/user.slice/user-1000.slice/session-1.scope\n0::/user.slice/user-1000.slice/session-1.scope\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu1804/proc/2523/cpuset",
    "content": "/\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu1804/proc/2523/net/dev",
    "content": "Inter-|   Receive                                                |  Transmit\n face |bytes    packets errs drop fifo frame compressed multicast|bytes    packets errs drop fifo colls carrier compressed\n    lo:    8492     102    0    0    0     0          0         0     8492     102    0    0    0     0       0          0\nenp1s0: 392046996  307389    0 31358    0     0          0         0  2402023   32125    0    0    0     0       0          0\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu1804/proc/cpuinfo",
    "content": "processor\t: 0\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3292.388\ncache size\t: 16384 KB\nphysical id\t: 0\nsiblings\t: 1\ncore id\t\t: 0\ncpu cores\t: 1\napicid\t\t: 0\ninitial apicid\t: 0\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.77\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 1\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3292.388\ncache size\t: 16384 KB\nphysical id\t: 1\nsiblings\t: 1\ncore id\t\t: 0\ncpu cores\t: 1\napicid\t\t: 1\ninitial apicid\t: 1\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.77\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu1804/proc/mounts",
    "content": "sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\nproc /proc proc rw,nosuid,nodev,noexec,relatime 0 0\nudev /dev devtmpfs rw,nosuid,relatime,size=986344k,nr_inodes=246586,mode=755 0 0\ndevpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\ntmpfs /run tmpfs rw,nosuid,noexec,relatime,size=204064k,mode=755 0 0\n/dev/mapper/ubuntu--vg-ubuntu--lv / ext4 rw,relatime,data=ordered 0 0\nsecurityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0\ntmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0\ntmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0\ntmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0\ncgroup /sys/fs/cgroup/unified cgroup2 rw,nosuid,nodev,noexec,relatime 0 0\ncgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,name=systemd 0 0\npstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0\ncgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0\ncgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0\ncgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0\ncgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0\ncgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0\ncgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0\ncgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0\ncgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0\ncgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0\ncgroup /sys/fs/cgroup/rdma cgroup rw,nosuid,nodev,noexec,relatime,rdma 0 0\ncgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0\nhugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0\nmqueue /dev/mqueue mqueue rw,relatime 0 0\nsystemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=38,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=12761 0 0\ndebugfs /sys/kernel/debug debugfs rw,relatime 0 0\nconfigfs /sys/kernel/config configfs rw,relatime 0 0\nfusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0\nbinfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,relatime 0 0\n/dev/vda2 /boot ext4 rw,relatime,data=ordered 0 0\nlxcfs /var/lib/lxcfs fuse.lxcfs rw,nosuid,nodev,relatime,user_id=0,group_id=0,allow_other 0 0\ntmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=204060k,mode=700,uid=1000,gid=1000 0 0\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu1804/proc/self/smaps",
    "content": "00400000-00403000 r--p 00000000 fd:00 135685                             /tmp/arvados-server\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                  12 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        12 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr mw me dw sd \n00403000-01776000 r-xp 00003000 fd:00 135685                             /tmp/arvados-server\nSize:              19916 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               12492 kB\nPss:               12492 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:     12492 kB\nReferenced:        12492 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd ex mr mw me dw sd \n01776000-02f28000 r--p 01376000 fd:00 135685                             /tmp/arvados-server\nSize:              24264 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               11368 kB\nPss:               11368 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:     11368 kB\nReferenced:        11368 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr mw me dw sd \n02f28000-02f29000 r--p 02b27000 fd:00 135685                             /tmp/arvados-server\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr mw me dw ac sd \n02f29000-02fc0000 rw-p 02b28000 fd:00 135685                             /tmp/arvados-server\nSize:                604 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 480 kB\nPss:                 480 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       480 kB\nReferenced:          480 kB\nAnonymous:           176 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me dw ac sd \n02fc0000-03007000 rw-p 00000000 00:00 0 \nSize:                284 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 100 kB\nPss:                 100 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       100 kB\nReferenced:          100 kB\nAnonymous:           100 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n03d38000-03d59000 rw-p 00000000 00:00 0                                  [heap]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \nc000000000-c000800000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                5996 kB\nPss:                5996 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:      5996 kB\nReferenced:         5996 kB\nAnonymous:          5996 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \nc000800000-c004000000 ---p 00000000 00:00 0 \nSize:              57344 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f7580000000-7f7580021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me nr sd \n7f7580021000-7f7584000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me nr sd \n7f7584000000-7f7584021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me nr sd \n7f7584021000-7f7588000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me nr sd \n7f7588000000-7f7588021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me nr sd \n7f7588021000-7f758c000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me nr sd \n7f758c000000-7f758c021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me nr sd \n7f758c021000-7f7590000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me nr sd \n7f7590000000-7f7590021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me nr sd \n7f7590021000-7f7594000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me nr sd \n7f7597cff000-7f7597d00000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f7597d00000-7f7598500000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f7599ffc000-7f7599ffd000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f7599ffd000-7f759a7fd000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f759a7fd000-7f759a7fe000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f759a7fe000-7f759affe000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f759affe000-7f759afff000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f759afff000-7f759b7ff000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f759b7ff000-7f759b800000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f759b800000-7f759c000000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f759c000000-7f759c021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me nr sd \n7f759c021000-7f75a0000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me nr sd \n7f75a0260000-7f75a0500000 rw-p 00000000 00:00 0 \nSize:               2688 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                1300 kB\nPss:                1300 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:      1300 kB\nReferenced:         1300 kB\nAnonymous:          1300 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75a0500000-7f75a0501000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75a0501000-7f75a3000000 rw-p 00000000 00:00 0 \nSize:              44028 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 136 kB\nPss:                 136 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       136 kB\nReferenced:          136 kB\nAnonymous:           136 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75a3000000-7f75a3200000 rw-p 00000000 00:00 0 \nSize:               2048 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd hg \n7f75a3200000-7f75a331d000 rw-p 00000000 00:00 0 \nSize:               1140 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75a331d000-7f75b3896000 ---p 00000000 00:00 0 \nSize:             267748 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75b3896000-7f75b3897000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75b3897000-7f75c5746000 ---p 00000000 00:00 0 \nSize:             293564 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c5746000-7f75c5747000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c5747000-7f75c7b1c000 ---p 00000000 00:00 0 \nSize:              36692 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c7b1c000-7f75c7b1d000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c7b1d000-7f75c7f16000 ---p 00000000 00:00 0 \nSize:               4068 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c7f16000-7f75c7f1a000 r-xp 00000000 fd:00 132041                     /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                   1 kB\nShared_Clean:         16 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           16 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd ex mr mw me sd \n7f75c7f1a000-7f75c8119000 ---p 00004000 fd:00 132041                     /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:               2044 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c8119000-7f75c811a000 r--p 00003000 fd:00 132041                     /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr mw me ac sd \n7f75c811a000-7f75c811b000 rw-p 00004000 fd:00 132041                     /lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c811b000-7f75c811e000 r-xp 00000000 fd:00 136252                     /lib/x86_64-linux-gnu/libdl-2.27.so\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd ex mr mw me sd \n7f75c811e000-7f75c831d000 ---p 00003000 fd:00 136252                     /lib/x86_64-linux-gnu/libdl-2.27.so\nSize:               2044 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c831d000-7f75c831e000 r--p 00002000 fd:00 136252                     /lib/x86_64-linux-gnu/libdl-2.27.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr mw me ac sd \n7f75c831e000-7f75c831f000 rw-p 00003000 fd:00 136252                     /lib/x86_64-linux-gnu/libdl-2.27.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c831f000-7f75c833c000 r-xp 00000000 fd:00 132036                     /lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                116 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  60 kB\nPss:                   6 kB\nShared_Clean:         60 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           60 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd ex mr mw me sd \n7f75c833c000-7f75c853c000 ---p 0001d000 fd:00 132036                     /lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:               2048 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c853c000-7f75c853d000 r--p 0001d000 fd:00 132036                     /lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr mw me ac sd \n7f75c853d000-7f75c853e000 rw-p 0001e000 fd:00 132036                     /lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c853e000-7f75c8548000 rw-p 00000000 00:00 0 \nSize:                 40 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c8548000-7f75c872f000 r-xp 00000000 fd:00 136249                     /lib/x86_64-linux-gnu/libc-2.27.so\nSize:               1948 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                1040 kB\nPss:                  41 kB\nShared_Clean:       1040 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:         1040 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd ex mr mw me sd \n7f75c872f000-7f75c892f000 ---p 001e7000 fd:00 136249                     /lib/x86_64-linux-gnu/libc-2.27.so\nSize:               2048 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c892f000-7f75c8933000 r--p 001e7000 fd:00 136249                     /lib/x86_64-linux-gnu/libc-2.27.so\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr mw me ac sd \n7f75c8933000-7f75c8935000 rw-p 001eb000 fd:00 136249                     /lib/x86_64-linux-gnu/libc-2.27.so\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c8935000-7f75c8939000 rw-p 00000000 00:00 0 \nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                  12 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        12 kB\nReferenced:           12 kB\nAnonymous:            12 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c8939000-7f75c8946000 r-xp 00000000 fd:00 131482                     /lib/x86_64-linux-gnu/libpam.so.0.83.1\nSize:                 52 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  52 kB\nPss:                   6 kB\nShared_Clean:         52 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           52 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd ex mr mw me sd \n7f75c8946000-7f75c8b45000 ---p 0000d000 fd:00 131482                     /lib/x86_64-linux-gnu/libpam.so.0.83.1\nSize:               2044 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c8b45000-7f75c8b46000 r--p 0000c000 fd:00 131482                     /lib/x86_64-linux-gnu/libpam.so.0.83.1\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr mw me ac sd \n7f75c8b46000-7f75c8b47000 rw-p 0000d000 fd:00 131482                     /lib/x86_64-linux-gnu/libpam.so.0.83.1\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c8b47000-7f75c8b61000 r-xp 00000000 fd:00 136264                     /lib/x86_64-linux-gnu/libpthread-2.27.so\nSize:                104 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 100 kB\nPss:                   4 kB\nShared_Clean:        100 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          100 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd ex mr mw me sd \n7f75c8b61000-7f75c8d60000 ---p 0001a000 fd:00 136264                     /lib/x86_64-linux-gnu/libpthread-2.27.so\nSize:               2044 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c8d60000-7f75c8d61000 r--p 00019000 fd:00 136264                     /lib/x86_64-linux-gnu/libpthread-2.27.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr mw me ac sd \n7f75c8d61000-7f75c8d62000 rw-p 0001a000 fd:00 136264                     /lib/x86_64-linux-gnu/libpthread-2.27.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c8d62000-7f75c8d66000 rw-p 00000000 00:00 0 \nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c8d66000-7f75c8d7d000 r-xp 00000000 fd:00 136265                     /lib/x86_64-linux-gnu/libresolv-2.27.so\nSize:                 92 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  60 kB\nPss:                   8 kB\nShared_Clean:         60 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           60 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd ex mr mw me sd \n7f75c8d7d000-7f75c8f7c000 ---p 00017000 fd:00 136265                     /lib/x86_64-linux-gnu/libresolv-2.27.so\nSize:               2044 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c8f7c000-7f75c8f7d000 r--p 00016000 fd:00 136265                     /lib/x86_64-linux-gnu/libresolv-2.27.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr mw me ac sd \n7f75c8f7d000-7f75c8f7e000 rw-p 00017000 fd:00 136265                     /lib/x86_64-linux-gnu/libresolv-2.27.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c8f7e000-7f75c8f80000 rw-p 00000000 00:00 0 \nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c8f80000-7f75c8fa9000 r-xp 00000000 fd:00 135731                     /lib/x86_64-linux-gnu/ld-2.27.so\nSize:                164 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 164 kB\nPss:                   6 kB\nShared_Clean:        164 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          164 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd ex mr mw me dw sd \n7f75c8fbc000-7f75c903c000 rw-p 00000000 00:00 0 \nSize:                512 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 272 kB\nPss:                 272 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       272 kB\nReferenced:          272 kB\nAnonymous:           272 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c903c000-7f75c90bc000 ---p 00000000 00:00 0 \nSize:                512 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c90bc000-7f75c90bd000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c90bd000-7f75c913c000 ---p 00000000 00:00 0 \nSize:                508 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: mr mw me sd \n7f75c913c000-7f75c91a3000 rw-p 00000000 00:00 0 \nSize:                412 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  68 kB\nPss:                  68 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        68 kB\nReferenced:           68 kB\nAnonymous:            68 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7f75c91a9000-7f75c91aa000 r--p 00029000 fd:00 135731                     /lib/x86_64-linux-gnu/ld-2.27.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr mw me dw ac sd \n7f75c91aa000-7f75c91ab000 rw-p 0002a000 fd:00 135731                     /lib/x86_64-linux-gnu/ld-2.27.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me dw ac sd \n7f75c91ab000-7f75c91ac000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me ac sd \n7ffdc0ff9000-7ffdc101a000 rw-p 00000000 00:00 0                          [stack]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd wr mr mw me gd ac \n7ffdc1151000-7ffdc1154000 r--p 00000000 00:00 0                          [vvar]\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd mr pf io de dd sd \n7ffdc1154000-7ffdc1156000 r-xp 00000000 00:00 0                          [vdso]\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd ex mr mw me de sd \nffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0                  [vsyscall]\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nVmFlags: rd ex \n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/blkio/user.slice/blkio.throttle.io_service_bytes",
    "content": "252:0 Read 6119424\n252:0 Write 0\n252:0 Sync 6119424\n252:0 Async 0\n252:0 Total 6119424\n253:0 Read 6119424\n253:0 Write 0\n253:0 Sync 6119424\n253:0 Async 0\n253:0 Total 6119424\nTotal 12238848\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/cpu,cpuacct/user.slice/cpuacct.stat",
    "content": "user 243\nsystem 255\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/cpuset/cpuset.cpus",
    "content": "0-1\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/memory/user.slice/memory.stat",
    "content": "cache 55107584\nrss 14348288\nrss_huge 0\nshmem 0\nmapped_file 25276416\ndirty 45821952\nwriteback 0\npgpgin 61677\npgpgout 44641\npgfault 85734\npgmajfault 66\ninactive_anon 0\nactive_anon 14536704\ninactive_file 25812992\nactive_file 29433856\nunevictable 0\nhierarchical_memory_limit 9223372036854771712\ntotal_cache 55107584\ntotal_rss 14348288\ntotal_rss_huge 0\ntotal_shmem 0\ntotal_mapped_file 25276416\ntotal_dirty 45821952\ntotal_writeback 0\ntotal_pgpgin 61677\ntotal_pgpgout 44641\ntotal_pgfault 85734\ntotal_pgmajfault 66\ntotal_inactive_anon 0\ntotal_active_anon 14536704\ntotal_inactive_file 25812992\ntotal_active_file 29433856\ntotal_unevictable 0\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu1804/sys/fs/cgroup/unified/user.slice/user-1000.slice/session-1.scope/cpu.stat",
    "content": "usage_usec 4947324\nuser_usec 2409841\nsystem_usec 2537483\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2004/proc/1360/cgroup",
    "content": "12:net_cls,net_prio:/\n11:pids:/user.slice/user-1000.slice/session-2.scope\n10:hugetlb:/\n9:cpuset:/\n8:perf_event:/\n7:cpu,cpuacct:/user.slice\n6:devices:/user.slice\n5:rdma:/\n4:blkio:/user.slice\n3:memory:/user.slice/user-1000.slice/session-2.scope\n2:freezer:/\n1:name=systemd:/user.slice/user-1000.slice/session-2.scope\n0::/user.slice/user-1000.slice/session-2.scope\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2004/proc/1360/cpuset",
    "content": "/\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2004/proc/1360/net/dev",
    "content": "Inter-|   Receive                                                |  Transmit\n face |bytes    packets errs drop fifo frame compressed multicast|bytes    packets errs drop fifo colls carrier compressed\n    lo:    7232      92    0    0    0     0          0         0     7232      92    0    0    0     0       0          0\nenp1s0: 48329280   34878    0 1282    0     0          0         0   257876    3434    0    0    0     0       0          0\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2004/proc/cpuinfo",
    "content": "processor\t: 0\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3292.388\ncache size\t: 16384 KB\nphysical id\t: 0\nsiblings\t: 1\ncore id\t\t: 0\ncpu cores\t: 1\napicid\t\t: 0\ninitial apicid\t: 0\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.77\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 1\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3292.388\ncache size\t: 16384 KB\nphysical id\t: 1\nsiblings\t: 1\ncore id\t\t: 0\ncpu cores\t: 1\napicid\t\t: 1\ninitial apicid\t: 1\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.77\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2004/proc/mounts",
    "content": "sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\nproc /proc proc rw,nosuid,nodev,noexec,relatime 0 0\nudev /dev devtmpfs rw,nosuid,noexec,relatime,size=1960772k,nr_inodes=490193,mode=755 0 0\ndevpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\ntmpfs /run tmpfs rw,nosuid,nodev,noexec,relatime,size=401380k,mode=755 0 0\n/dev/mapper/ubuntu--vg-ubuntu--lv / ext4 rw,relatime 0 0\nsecurityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0\ntmpfs /dev/shm tmpfs rw,nosuid,nodev 0 0\ntmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k 0 0\ntmpfs /sys/fs/cgroup tmpfs ro,nosuid,nodev,noexec,mode=755 0 0\ncgroup2 /sys/fs/cgroup/unified cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate 0 0\ncgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,name=systemd 0 0\npstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0\nnone /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0\ncgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0\ncgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0\ncgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0\ncgroup /sys/fs/cgroup/rdma cgroup rw,nosuid,nodev,noexec,relatime,rdma 0 0\ncgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0\ncgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0\ncgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0\ncgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0\ncgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0\ncgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0\ncgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0\nsystemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=28,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=16350 0 0\nhugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0\nmqueue /dev/mqueue mqueue rw,nosuid,nodev,noexec,relatime 0 0\ndebugfs /sys/kernel/debug debugfs rw,nosuid,nodev,noexec,relatime 0 0\ntracefs /sys/kernel/tracing tracefs rw,nosuid,nodev,noexec,relatime 0 0\nfusectl /sys/fs/fuse/connections fusectl rw,nosuid,nodev,noexec,relatime 0 0\nconfigfs /sys/kernel/config configfs rw,nosuid,nodev,noexec,relatime 0 0\nbinfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0\n/dev/loop1 /snap/core20/1974 squashfs ro,nodev,relatime 0 0\n/dev/loop0 /snap/lxd/24061 squashfs ro,nodev,relatime 0 0\n/dev/loop3 /snap/core20/1828 squashfs ro,nodev,relatime 0 0\n/dev/loop2 /snap/snapd/19457 squashfs ro,nodev,relatime 0 0\n/dev/loop4 /snap/snapd/18357 squashfs ro,nodev,relatime 0 0\n/dev/vda2 /boot ext4 rw,relatime 0 0\ntmpfs /run/snapd/ns tmpfs rw,nosuid,nodev,noexec,relatime,size=401380k,mode=755 0 0\nnsfs /run/snapd/ns/lxd.mnt nsfs rw 0 0\ntmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=401376k,mode=700,uid=1000,gid=1000 0 0\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2004/proc/self/smaps",
    "content": "00400000-00403000 r--p 00000000 fd:00 11041                              /tmp/arvados-server\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                  12 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        12 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me dw sd \n00403000-01776000 r-xp 00003000 fd:00 11041                              /tmp/arvados-server\nSize:              19916 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               12492 kB\nPss:               12492 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:     12492 kB\nReferenced:        12492 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd ex mr mw me dw sd \n01776000-02f28000 r--p 01376000 fd:00 11041                              /tmp/arvados-server\nSize:              24264 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               10856 kB\nPss:               10856 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:     10856 kB\nReferenced:        10856 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me dw sd \n02f28000-02f29000 r--p 02b27000 fd:00 11041                              /tmp/arvados-server\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me dw ac sd \n02f29000-02fc0000 rw-p 02b28000 fd:00 11041                              /tmp/arvados-server\nSize:                604 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 480 kB\nPss:                 480 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       480 kB\nReferenced:          480 kB\nAnonymous:           176 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me dw ac sd \n02fc0000-03007000 rw-p 00000000 00:00 0 \nSize:                284 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 104 kB\nPss:                 104 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       104 kB\nReferenced:          104 kB\nAnonymous:           104 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n04a85000-04aa6000 rw-p 00000000 00:00 0                                  [heap]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \nc000000000-c000800000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                5756 kB\nPss:                5756 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:      5756 kB\nReferenced:         5756 kB\nAnonymous:          5756 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \nc000800000-c004000000 ---p 00000000 00:00 0 \nSize:              57344 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8ce8000000-7f8ce8021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me nr sd \n7f8ce8021000-7f8cec000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me nr sd \n7f8cf0000000-7f8cf0021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me nr sd \n7f8cf0021000-7f8cf4000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me nr sd \n7f8cf4000000-7f8cf4021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me nr sd \n7f8cf4021000-7f8cf8000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me nr sd \n7f8cf8000000-7f8cf8021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me nr sd \n7f8cf8021000-7f8cfc000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me nr sd \n7f8cfc000000-7f8cfc021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me nr sd \n7f8cfc021000-7f8d00000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me nr sd \n7f8d00000000-7f8d00021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me nr sd \n7f8d00021000-7f8d04000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me nr sd \n7f8d05302000-7f8d05452000 rw-p 00000000 00:00 0 \nSize:               1344 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                1004 kB\nPss:                1004 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:      1004 kB\nReferenced:         1004 kB\nAnonymous:          1004 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d05452000-7f8d05453000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d05453000-7f8d05c63000 rw-p 00000000 00:00 0 \nSize:               8256 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  20 kB\nPss:                  20 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        20 kB\nReferenced:           20 kB\nAnonymous:            20 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d05c63000-7f8d05c64000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d05c64000-7f8d06604000 rw-p 00000000 00:00 0 \nSize:               9856 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 304 kB\nPss:                 304 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       304 kB\nReferenced:          304 kB\nAnonymous:           304 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d06604000-7f8d06605000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d06605000-7f8d06e45000 rw-p 00000000 00:00 0 \nSize:               8448 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 240 kB\nPss:                 240 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       240 kB\nReferenced:          240 kB\nAnonymous:           240 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d06e45000-7f8d06e46000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d06e46000-7f8d07646000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d07646000-7f8d07647000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d07647000-7f8d07e47000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d07e47000-7f8d07e48000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d07e48000-7f8d0aa00000 rw-p 00000000 00:00 0 \nSize:              44768 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 152 kB\nPss:                 152 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       152 kB\nReferenced:          152 kB\nAnonymous:           152 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d0aa00000-7f8d0ac00000 rw-p 00000000 00:00 0 \nSize:               2048 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t1\nVmFlags: rd wr mr mw me ac sd hg \n7f8d0ac00000-7f8d0ac84000 rw-p 00000000 00:00 0 \nSize:                528 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d0ac84000-7f8d1b1fd000 ---p 00000000 00:00 0 \nSize:             267748 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d1b1fd000-7f8d1b1fe000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d1b1fe000-7f8d2d0ad000 ---p 00000000 00:00 0 \nSize:             293564 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d2d0ad000-7f8d2d0ae000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2d0ae000-7f8d2f483000 ---p 00000000 00:00 0 \nSize:              36692 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d2f483000-7f8d2f484000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2f484000-7f8d2f8fd000 ---p 00000000 00:00 0 \nSize:               4580 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d2f8fd000-7f8d2f8fe000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2f8fe000-7f8d2f97d000 ---p 00000000 00:00 0 \nSize:                508 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d2f97d000-7f8d2f9e0000 rw-p 00000000 00:00 0 \nSize:                396 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  56 kB\nPss:                  56 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        56 kB\nReferenced:           56 kB\nAnonymous:            56 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2f9e0000-7f8d2f9e2000 r--p 00000000 fd:00 12252                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   0 kB\nShared_Clean:          8 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            8 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2f9e2000-7f8d2f9e5000 r-xp 00002000 fd:00 12252                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd ex mr mw me sd \n7f8d2f9e5000-7f8d2f9e6000 r--p 00005000 fd:00 12252                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2f9e6000-7f8d2f9e7000 r--p 00005000 fd:00 12252                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me ac sd \n7f8d2f9e7000-7f8d2f9e8000 rw-p 00006000 fd:00 12252                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2f9e8000-7f8d2f9ea000 rw-p 00000000 00:00 0 \nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2f9ea000-7f8d2f9eb000 r--p 00000000 fd:00 12268                      /usr/lib/x86_64-linux-gnu/libdl-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2f9eb000-7f8d2f9ed000 r-xp 00001000 fd:00 12268                      /usr/lib/x86_64-linux-gnu/libdl-2.31.so\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   0 kB\nShared_Clean:          8 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            8 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd ex mr mw me sd \n7f8d2f9ed000-7f8d2f9ee000 r--p 00003000 fd:00 12268                      /usr/lib/x86_64-linux-gnu/libdl-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2f9ee000-7f8d2f9ef000 r--p 00003000 fd:00 12268                      /usr/lib/x86_64-linux-gnu/libdl-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me ac sd \n7f8d2f9ef000-7f8d2f9f0000 rw-p 00004000 fd:00 12268                      /usr/lib/x86_64-linux-gnu/libdl-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2f9f0000-7f8d2f9f3000 r--p 00000000 fd:00 12234                      /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2f9f3000-7f8d2f9fb000 r-xp 00003000 fd:00 12234                      /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 32 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  28 kB\nPss:                   1 kB\nShared_Clean:         28 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           28 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd ex mr mw me sd \n7f8d2f9fb000-7f8d2fa0f000 r--p 0000b000 fd:00 12234                      /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 80 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2fa0f000-7f8d2fa10000 ---p 0001f000 fd:00 12234                      /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: mr mw me sd \n7f8d2fa10000-7f8d2fa11000 r--p 0001f000 fd:00 12234                      /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me ac sd \n7f8d2fa11000-7f8d2fa12000 rw-p 00020000 fd:00 12234                      /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2fa12000-7f8d2fa1c000 rw-p 00000000 00:00 0 \nSize:                 40 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2fa1c000-7f8d2fa3e000 r--p 00000000 fd:00 12250                      /usr/lib/x86_64-linux-gnu/libc-2.31.so\nSize:                136 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 136 kB\nPss:                   4 kB\nShared_Clean:        136 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          136 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2fa3e000-7f8d2fbb6000 r-xp 00022000 fd:00 12250                      /usr/lib/x86_64-linux-gnu/libc-2.31.so\nSize:               1504 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 768 kB\nPss:                  28 kB\nShared_Clean:        768 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          768 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd ex mr mw me sd \n7f8d2fbb6000-7f8d2fc04000 r--p 0019a000 fd:00 12250                      /usr/lib/x86_64-linux-gnu/libc-2.31.so\nSize:                312 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 128 kB\nPss:                   4 kB\nShared_Clean:        128 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          128 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2fc04000-7f8d2fc08000 r--p 001e7000 fd:00 12250                      /usr/lib/x86_64-linux-gnu/libc-2.31.so\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me ac sd \n7f8d2fc08000-7f8d2fc0a000 rw-p 001eb000 fd:00 12250                      /usr/lib/x86_64-linux-gnu/libc-2.31.so\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2fc0a000-7f8d2fc0e000 rw-p 00000000 00:00 0 \nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                  12 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        12 kB\nReferenced:           12 kB\nAnonymous:            12 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2fc0e000-7f8d2fc11000 r--p 00000000 fd:00 12406                      /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2fc11000-7f8d2fc1a000 r-xp 00003000 fd:00 12406                      /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2\nSize:                 36 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  36 kB\nPss:                   2 kB\nShared_Clean:         36 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           36 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd ex mr mw me sd \n7f8d2fc1a000-7f8d2fc1e000 r--p 0000c000 fd:00 12406                      /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2fc1e000-7f8d2fc1f000 r--p 0000f000 fd:00 12406                      /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me ac sd \n7f8d2fc1f000-7f8d2fc20000 rw-p 00010000 fd:00 12406                      /usr/lib/x86_64-linux-gnu/libpam.so.0.84.2\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2fc20000-7f8d2fc26000 r--p 00000000 fd:00 12434                      /usr/lib/x86_64-linux-gnu/libpthread-2.31.so\nSize:                 24 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  24 kB\nPss:                   0 kB\nShared_Clean:         24 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           24 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2fc26000-7f8d2fc37000 r-xp 00006000 fd:00 12434                      /usr/lib/x86_64-linux-gnu/libpthread-2.31.so\nSize:                 68 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  68 kB\nPss:                   2 kB\nShared_Clean:         68 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           68 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd ex mr mw me sd \n7f8d2fc37000-7f8d2fc3d000 r--p 00017000 fd:00 12434                      /usr/lib/x86_64-linux-gnu/libpthread-2.31.so\nSize:                 24 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2fc3d000-7f8d2fc3e000 r--p 0001c000 fd:00 12434                      /usr/lib/x86_64-linux-gnu/libpthread-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me ac sd \n7f8d2fc3e000-7f8d2fc3f000 rw-p 0001d000 fd:00 12434                      /usr/lib/x86_64-linux-gnu/libpthread-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2fc3f000-7f8d2fc43000 rw-p 00000000 00:00 0 \nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2fc43000-7f8d2fc47000 r--p 00000000 fd:00 12439                      /usr/lib/x86_64-linux-gnu/libresolv-2.31.so\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                   1 kB\nShared_Clean:         16 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           16 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2fc47000-7f8d2fc57000 r-xp 00004000 fd:00 12439                      /usr/lib/x86_64-linux-gnu/libresolv-2.31.so\nSize:                 64 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  64 kB\nPss:                   6 kB\nShared_Clean:         64 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           64 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd ex mr mw me sd \n7f8d2fc57000-7f8d2fc5b000 r--p 00014000 fd:00 12439                      /usr/lib/x86_64-linux-gnu/libresolv-2.31.so\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me sd \n7f8d2fc5b000-7f8d2fc5c000 r--p 00017000 fd:00 12439                      /usr/lib/x86_64-linux-gnu/libresolv-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me ac sd \n7f8d2fc5c000-7f8d2fc5d000 rw-p 00018000 fd:00 12439                      /usr/lib/x86_64-linux-gnu/libresolv-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2fc5d000-7f8d2fc61000 rw-p 00000000 00:00 0 \nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7f8d2fc68000-7f8d2fc69000 r--p 00000000 fd:00 12105                      /usr/lib/x86_64-linux-gnu/ld-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me dw sd \n7f8d2fc69000-7f8d2fc8c000 r-xp 00001000 fd:00 12105                      /usr/lib/x86_64-linux-gnu/ld-2.31.so\nSize:                140 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 140 kB\nPss:                   4 kB\nShared_Clean:        140 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          140 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd ex mr mw me dw sd \n7f8d2fc8c000-7f8d2fc94000 r--p 00024000 fd:00 12105                      /usr/lib/x86_64-linux-gnu/ld-2.31.so\nSize:                 32 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  32 kB\nPss:                   1 kB\nShared_Clean:         32 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           32 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me dw sd \n7f8d2fc95000-7f8d2fc96000 r--p 0002c000 fd:00 12105                      /usr/lib/x86_64-linux-gnu/ld-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr mw me dw ac sd \n7f8d2fc96000-7f8d2fc97000 rw-p 0002d000 fd:00 12105                      /usr/lib/x86_64-linux-gnu/ld-2.31.so\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me dw ac sd \n7f8d2fc97000-7f8d2fc98000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me ac sd \n7ffe58cef000-7ffe58d10000 rw-p 00000000 00:00 0                          [stack]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd wr mr mw me gd ac \n7ffe58d36000-7ffe58d39000 r--p 00000000 00:00 0                          [vvar]\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd mr pf io de dd sd \n7ffe58d39000-7ffe58d3a000 r-xp 00000000 00:00 0                          [vdso]\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: rd ex mr mw me de sd \nffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0                  [vsyscall]\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:        0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:\t\t0\nVmFlags: ex \n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/blkio/user.slice/blkio.throttle.io_service_bytes",
    "content": "252:0 Read 2322432\n252:0 Write 0\n252:0 Sync 2322432\n252:0 Async 0\n252:0 Discard 0\n252:0 Total 2322432\n253:0 Read 2322432\n253:0 Write 0\n253:0 Sync 2322432\n253:0 Async 0\n253:0 Discard 0\n253:0 Total 2322432\nTotal 4644864\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/cpu,cpuacct/user.slice/cpuacct.stat",
    "content": "user 31\nsystem 40\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/cpuset/cpuset.cpus",
    "content": "0-1\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/memory/user.slice/user-1000.slice/session-2.scope/memory.stat",
    "content": "cache 47984640\nrss 12845056\nrss_huge 0\nshmem 0\nmapped_file 24870912\ndirty 45821952\nwriteback 0\npgpgin 25839\npgpgout 10933\npgfault 18513\npgmajfault 0\ninactive_anon 0\nactive_anon 12840960\ninactive_file 47579136\nactive_file 270336\nunevictable 0\nhierarchical_memory_limit 9223372036854771712\ntotal_cache 47984640\ntotal_rss 12845056\ntotal_rss_huge 0\ntotal_shmem 0\ntotal_mapped_file 24870912\ntotal_dirty 45821952\ntotal_writeback 0\ntotal_pgpgin 25839\ntotal_pgpgout 10933\ntotal_pgfault 18513\ntotal_pgmajfault 0\ntotal_inactive_anon 0\ntotal_active_anon 12840960\ntotal_inactive_file 47579136\ntotal_active_file 270336\ntotal_unevictable 0\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2004/sys/fs/cgroup/unified/user.slice/user-1000.slice/session-2.scope/cpu.stat",
    "content": "usage_usec 843527\nuser_usec 355576\nsystem_usec 487951\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/proc/1967/cgroup",
    "content": "0::/user.slice/user-1000.slice/session-1.scope\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/proc/1967/cpuset",
    "content": "/user.slice\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/proc/1967/net/dev",
    "content": "Inter-|   Receive                                                |  Transmit\n face |bytes    packets errs drop fifo frame compressed multicast|bytes    packets errs drop fifo colls carrier compressed\n    lo:   10505     124    0    0    0     0          0         0    10505     124    0    0    0     0       0          0\nenp1s0: 227109019  173999    0 30971    0     0          0         0  1938868   25576    0    0    0     0       0          0\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/proc/cpuinfo",
    "content": "processor\t: 0\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3292.388\ncache size\t: 16384 KB\nphysical id\t: 0\nsiblings\t: 1\ncore id\t\t: 0\ncpu cores\t: 1\napicid\t\t: 0\ninitial apicid\t: 0\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.77\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\nprocessor\t: 1\nvendor_id\t: GenuineIntel\ncpu family\t: 6\nmodel\t\t: 71\nmodel name\t: Intel(R) Core(TM) i7-5775C CPU @ 3.30GHz\nstepping\t: 1\nmicrocode\t: 0x13\ncpu MHz\t\t: 3292.388\ncache size\t: 16384 KB\nphysical id\t: 1\nsiblings\t: 1\ncore id\t\t: 0\ncpu cores\t: 1\napicid\t\t: 1\ninitial apicid\t: 1\nfpu\t\t: yes\nfpu_exception\t: yes\ncpuid level\t: 20\nwp\t\t: yes\nflags\t\t: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl xtopology cpuid tsc_known_freq pni pclmulqdq vmx ssse3 fma cx16 pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch cpuid_fault invpcid_single pti tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm rdseed adx smap xsaveopt arat umip arch_capabilities\nvmx flags\t: vnmi preemption_timer invvpid ept_x_only ept_ad ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest shadow_vmcs pml\nbugs\t\t: cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa srbds mmio_unknown\nbogomips\t: 6584.77\nclflush size\t: 64\ncache_alignment\t: 64\naddress sizes\t: 39 bits physical, 48 bits virtual\npower management:\n\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/proc/mounts",
    "content": "sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0\nproc /proc proc rw,nosuid,nodev,noexec,relatime 0 0\nudev /dev devtmpfs rw,nosuid,relatime,size=1944524k,nr_inodes=486131,mode=755,inode64 0 0\ndevpts /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0\ntmpfs /run tmpfs rw,nosuid,nodev,noexec,relatime,size=400584k,mode=755,inode64 0 0\n/dev/mapper/ubuntu--vg-ubuntu--lv / ext4 rw,relatime 0 0\nsecurityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0\ntmpfs /dev/shm tmpfs rw,nosuid,nodev,inode64 0 0\ntmpfs /run/lock tmpfs rw,nosuid,nodev,noexec,relatime,size=5120k,inode64 0 0\ncgroup2 /sys/fs/cgroup cgroup2 rw,nosuid,nodev,noexec,relatime,nsdelegate,memory_recursiveprot 0 0\npstore /sys/fs/pstore pstore rw,nosuid,nodev,noexec,relatime 0 0\nbpf /sys/fs/bpf bpf rw,nosuid,nodev,noexec,relatime,mode=700 0 0\nsystemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=29,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=17759 0 0\nhugetlbfs /dev/hugepages hugetlbfs rw,relatime,pagesize=2M 0 0\nmqueue /dev/mqueue mqueue rw,nosuid,nodev,noexec,relatime 0 0\ndebugfs /sys/kernel/debug debugfs rw,nosuid,nodev,noexec,relatime 0 0\ntracefs /sys/kernel/tracing tracefs rw,nosuid,nodev,noexec,relatime 0 0\nfusectl /sys/fs/fuse/connections fusectl rw,nosuid,nodev,noexec,relatime 0 0\nconfigfs /sys/kernel/config configfs rw,nosuid,nodev,noexec,relatime 0 0\nnone /run/credentials/systemd-sysusers.service ramfs ro,nosuid,nodev,noexec,relatime,mode=700 0 0\n/dev/loop0 /snap/lxd/24322 squashfs ro,nodev,relatime,errors=continue 0 0\n/dev/loop1 /snap/snapd/18357 squashfs ro,nodev,relatime,errors=continue 0 0\n/dev/loop2 /snap/core20/1822 squashfs ro,nodev,relatime,errors=continue 0 0\n/dev/vda2 /boot ext4 rw,relatime 0 0\nbinfmt_misc /proc/sys/fs/binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,relatime 0 0\ntmpfs /run/snapd/ns tmpfs rw,nosuid,nodev,noexec,relatime,size=400584k,mode=755,inode64 0 0\nnsfs /run/snapd/ns/lxd.mnt nsfs rw 0 0\ntmpfs /run/user/1000 tmpfs rw,nosuid,nodev,relatime,size=400580k,nr_inodes=100145,mode=700,uid=1000,gid=1000,inode64 0 0\n/dev/loop3 /snap/snapd/19457 squashfs ro,nodev,relatime,errors=continue 0 0\n/dev/loop4 /snap/core20/1974 squashfs ro,nodev,relatime,errors=continue 0 0\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/proc/self/smaps",
    "content": "00400000-00403000 r--p 00000000 fd:00 393261                             /tmp/arvados-server\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                  12 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:        12 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n00403000-01776000 r-xp 00003000 fd:00 393261                             /tmp/arvados-server\nSize:              19916 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               12492 kB\nPss:               12492 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:     12492 kB\nPrivate_Dirty:         0 kB\nReferenced:        12492 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n01776000-02f28000 r--p 01376000 fd:00 393261                             /tmp/arvados-server\nSize:              24264 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:               11048 kB\nPss:               11048 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:     11048 kB\nPrivate_Dirty:         0 kB\nReferenced:        11048 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n02f28000-02f29000 r--p 02b27000 fd:00 393261                             /tmp/arvados-server\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n02f29000-02fc0000 rw-p 02b28000 fd:00 393261                             /tmp/arvados-server\nSize:                604 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 480 kB\nPss:                 480 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:       304 kB\nPrivate_Dirty:       176 kB\nReferenced:          480 kB\nAnonymous:           176 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n02fc0000-03007000 rw-p 00000000 00:00 0 \nSize:                284 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 100 kB\nPss:                 100 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       100 kB\nReferenced:          100 kB\nAnonymous:           100 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n03f05000-03f26000 rw-p 00000000 00:00 0                                  [heap]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \nc000000000-c000800000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                5500 kB\nPss:                5500 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:      5500 kB\nReferenced:         5500 kB\nAnonymous:          5500 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \nc000800000-c004000000 ---p 00000000 00:00 0 \nSize:              57344 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f750c000000-7f750c021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f750c021000-7f7510000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me nr sd \n7f7510000000-7f7510021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f7510021000-7f7514000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me nr sd \n7f7514000000-7f7514021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f7514021000-7f7518000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me nr sd \n7f7518000000-7f7518021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f7518021000-7f751c000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me nr sd \n7f751c000000-7f751c021000 rw-p 00000000 00:00 0 \nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me nr sd \n7f751c021000-7f7520000000 ---p 00000000 00:00 0 \nSize:              65404 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me nr sd \n7f7520f2f000-7f752108f000 rw-p 00000000 00:00 0 \nSize:               1408 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 992 kB\nPss:                 992 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       992 kB\nReferenced:          992 kB\nAnonymous:           992 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f752108f000-7f7521090000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f7521090000-7f7521a30000 rw-p 00000000 00:00 0 \nSize:               9856 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 332 kB\nPss:                 332 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       332 kB\nReferenced:          332 kB\nAnonymous:           332 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f7521a30000-7f7521a31000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f7521a31000-7f7522271000 rw-p 00000000 00:00 0 \nSize:               8448 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 244 kB\nPss:                 244 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       244 kB\nReferenced:          244 kB\nAnonymous:           244 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f7522271000-7f7522272000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f7522272000-7f7522a72000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f7522a72000-7f7522a73000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f7522a73000-7f7523273000 rw-p 00000000 00:00 0 \nSize:               8192 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f7523273000-7f7523274000 ---p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f7523274000-7f7525e00000 rw-p 00000000 00:00 0 \nSize:              44592 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 156 kB\nPss:                 156 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:       156 kB\nReferenced:          156 kB\nAnonymous:           156 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f7525e00000-7f7526000000 rw-p 00000000 00:00 0 \nSize:               2048 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    1\nVmFlags: rd wr mr mw me ac sd hg \n7f7526000000-7f75260b0000 rw-p 00000000 00:00 0 \nSize:                704 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f75260b0000-7f7536629000 ---p 00000000 00:00 0 \nSize:             267748 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f7536629000-7f753662a000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f753662a000-7f75484d9000 ---p 00000000 00:00 0 \nSize:             293564 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f75484d9000-7f75484da000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f75484da000-7f754a8af000 ---p 00000000 00:00 0 \nSize:              36692 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f754a8af000-7f754a8b0000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754a8b0000-7f754ad29000 ---p 00000000 00:00 0 \nSize:               4580 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f754ad29000-7f754ad2a000 rw-p 00000000 00:00 0 \nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754ad2a000-7f754ada9000 ---p 00000000 00:00 0 \nSize:                508 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: mr mw me sd \n7f754ada9000-7f754ae0c000 rw-p 00000000 00:00 0 \nSize:                396 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  56 kB\nPss:                  56 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        56 kB\nReferenced:           56 kB\nAnonymous:            56 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754ae0c000-7f754ae0e000 r--p 00000000 fd:00 11091                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   0 kB\nShared_Clean:          8 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            8 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754ae0e000-7f754ae11000 r-xp 00002000 fd:00 11091                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f754ae11000-7f754ae12000 r--p 00005000 fd:00 11091                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754ae12000-7f754ae13000 r--p 00005000 fd:00 11091                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f754ae13000-7f754ae14000 rw-p 00006000 fd:00 11091                      /usr/lib/x86_64-linux-gnu/libcap-ng.so.0.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754ae14000-7f754ae16000 rw-p 00000000 00:00 0 \nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754ae16000-7f754ae19000 r--p 00000000 fd:00 11071                      /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754ae19000-7f754ae21000 r-xp 00003000 fd:00 11071                      /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 32 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  32 kB\nPss:                   2 kB\nShared_Clean:         32 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           32 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f754ae21000-7f754ae36000 r--p 0000b000 fd:00 11071                      /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                 84 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754ae36000-7f754ae37000 r--p 0001f000 fd:00 11071                      /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f754ae37000-7f754ae38000 rw-p 00020000 fd:00 11071                      /usr/lib/x86_64-linux-gnu/libaudit.so.1.0.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754ae38000-7f754ae44000 rw-p 00000000 00:00 0 \nSize:                 48 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754ae44000-7f754ae6c000 r--p 00000000 fd:00 11089                      /usr/lib/x86_64-linux-gnu/libc.so.6\nSize:                160 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 160 kB\nPss:                   6 kB\nShared_Clean:        160 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          160 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754ae6c000-7f754b001000 r-xp 00028000 fd:00 11089                      /usr/lib/x86_64-linux-gnu/libc.so.6\nSize:               1620 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 912 kB\nPss:                  40 kB\nShared_Clean:        912 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          912 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f754b001000-7f754b059000 r--p 001bd000 fd:00 11089                      /usr/lib/x86_64-linux-gnu/libc.so.6\nSize:                352 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 128 kB\nPss:                   5 kB\nShared_Clean:        128 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          128 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754b059000-7f754b05d000 r--p 00214000 fd:00 11089                      /usr/lib/x86_64-linux-gnu/libc.so.6\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f754b05d000-7f754b05f000 rw-p 00218000 fd:00 11089                      /usr/lib/x86_64-linux-gnu/libc.so.6\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754b05f000-7f754b06c000 rw-p 00000000 00:00 0 \nSize:                 52 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  20 kB\nPss:                  20 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        20 kB\nReferenced:           20 kB\nAnonymous:            20 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754b06c000-7f754b06f000 r--p 00000000 fd:00 11245                      /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   0 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754b06f000-7f754b078000 r-xp 00003000 fd:00 11245                      /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                 36 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  36 kB\nPss:                   3 kB\nShared_Clean:         36 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           36 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f754b078000-7f754b07c000 r--p 0000c000 fd:00 11245                      /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754b07c000-7f754b07d000 r--p 0000f000 fd:00 11245                      /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f754b07d000-7f754b07e000 rw-p 00010000 fd:00 11245                      /usr/lib/x86_64-linux-gnu/libpam.so.0.85.1\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754b07e000-7f754b07f000 r--p 00000000 fd:00 11272                      /usr/lib/x86_64-linux-gnu/libpthread.so.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   2 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754b07f000-7f754b080000 r-xp 00001000 fd:00 11272                      /usr/lib/x86_64-linux-gnu/libpthread.so.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   2 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f754b080000-7f754b081000 r--p 00002000 fd:00 11272                      /usr/lib/x86_64-linux-gnu/libpthread.so.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754b081000-7f754b082000 r--p 00002000 fd:00 11272                      /usr/lib/x86_64-linux-gnu/libpthread.so.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f754b082000-7f754b083000 rw-p 00003000 fd:00 11272                      /usr/lib/x86_64-linux-gnu/libpthread.so.0\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754b083000-7f754b086000 r--p 00000000 fd:00 11276                      /usr/lib/x86_64-linux-gnu/libresolv.so.2\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  12 kB\nPss:                   3 kB\nShared_Clean:         12 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           12 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754b086000-7f754b090000 r-xp 00003000 fd:00 11276                      /usr/lib/x86_64-linux-gnu/libresolv.so.2\nSize:                 40 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  36 kB\nPss:                   9 kB\nShared_Clean:         36 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           36 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f754b090000-7f754b093000 r--p 0000d000 fd:00 11276                      /usr/lib/x86_64-linux-gnu/libresolv.so.2\nSize:                 12 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754b093000-7f754b094000 r--p 0000f000 fd:00 11276                      /usr/lib/x86_64-linux-gnu/libresolv.so.2\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f754b094000-7f754b095000 rw-p 00010000 fd:00 11276                      /usr/lib/x86_64-linux-gnu/libresolv.so.2\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   4 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         4 kB\nReferenced:            4 kB\nAnonymous:             4 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754b095000-7f754b097000 rw-p 00000000 00:00 0 \nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754b09c000-7f754b09e000 rw-p 00000000 00:00 0 \nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7f754b09e000-7f754b0a0000 r--p 00000000 fd:00 10938                      /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   0 kB\nShared_Clean:          8 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            8 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754b0a0000-7f754b0ca000 r-xp 00002000 fd:00 10938                      /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2\nSize:                168 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                 168 kB\nPss:                   6 kB\nShared_Clean:        168 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:          168 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me sd \n7f754b0ca000-7f754b0d5000 r--p 0002c000 fd:00 10938                      /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2\nSize:                 44 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  44 kB\nPss:                   1 kB\nShared_Clean:         44 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:           44 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me sd \n7f754b0d6000-7f754b0d8000 r--p 00037000 fd:00 10938                      /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr mw me ac sd \n7f754b0d8000-7f754b0da000 rw-p 00039000 fd:00 10938                      /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   8 kB\nPss:                   8 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         8 kB\nReferenced:            8 kB\nAnonymous:             8 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me ac sd \n7ffed2e14000-7ffed2e35000 rw-p 00000000 00:00 0                          [stack]\nSize:                132 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                  16 kB\nPss:                  16 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:        16 kB\nReferenced:           16 kB\nAnonymous:            16 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd wr mr mw me gd ac \n7ffed2fc4000-7ffed2fc8000 r--p 00000000 00:00 0                          [vvar]\nSize:                 16 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd mr pf io de dd sd \n7ffed2fc8000-7ffed2fca000 r-xp 00000000 00:00 0                          [vdso]\nSize:                  8 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   4 kB\nPss:                   0 kB\nShared_Clean:          4 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            4 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: rd ex mr mw me de sd \nffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0                  [vsyscall]\nSize:                  4 kB\nKernelPageSize:        4 kB\nMMUPageSize:           4 kB\nRss:                   0 kB\nPss:                   0 kB\nShared_Clean:          0 kB\nShared_Dirty:          0 kB\nPrivate_Clean:         0 kB\nPrivate_Dirty:         0 kB\nReferenced:            0 kB\nAnonymous:             0 kB\nLazyFree:              0 kB\nAnonHugePages:         0 kB\nShmemPmdMapped:        0 kB\nFilePmdMapped:         0 kB\nShared_Hugetlb:        0 kB\nPrivate_Hugetlb:       0 kB\nSwap:                  0 kB\nSwapPss:               0 kB\nLocked:                0 kB\nTHPeligible:    0\nVmFlags: ex \n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/cpu.max",
    "content": "max 100000\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/cpuset.cpus.effective",
    "content": "0-1\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/io.stat",
    "content": "252:0 rbytes=3551232 wbytes=147263488 rios=141 wios=208 dbytes=0 dios=0\n253:0 rbytes=3551232 wbytes=147263488 rios=141 wios=109 dbytes=0 dios=0\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/cpu.stat",
    "content": "usage_usec 1750563\nuser_usec 703305\nsystem_usec 1047257\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.current",
    "content": "68902912\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.stat",
    "content": "anon 13606912\nfile 52432896\nkernel_stack 180224\npagetables 438272\npercpu 0\nsock 0\nshmem 4096\nfile_mapped 25767936\nfile_dirty 86016\nfile_writeback 0\nswapcached 0\nanon_thp 0\nfile_thp 0\nshmem_thp 0\ninactive_anon 13574144\nactive_anon 20480\ninactive_file 26722304\nactive_file 25669632\nunevictable 0\nslab_reclaimable 1646344\nslab_unreclaimable 328072\nslab 1974416\nworkingset_refault_anon 0\nworkingset_refault_file 0\nworkingset_activate_anon 0\nworkingset_activate_file 0\nworkingset_restore_anon 0\nworkingset_restore_file 0\nworkingset_nodereclaim 0\npgfault 33355\npgmajfault 27\npgrefill 0\npgscan 0\npgsteal 0\npgactivate 6253\npgdeactivate 0\npglazyfree 0\npglazyfreed 0\nthp_fault_alloc 0\nthp_collapse_alloc 0\n"
  },
  {
    "path": "lib/crunchstat/testdata/ubuntu2204/sys/fs/cgroup/user.slice/user-1000.slice/session-1.scope/memory.swap.current",
    "content": "0\n"
  },
  {
    "path": "lib/ctrlctx/auth.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ctrlctx\n\nimport (\n\t\"context\"\n\t\"crypto/hmac\"\n\t\"crypto/sha256\"\n\t\"database/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/api\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"github.com/ghodss/yaml\"\n)\n\nvar (\n\tErrNoAuthContext   = errors.New(\"bug: there is no authorization in this context\")\n\tErrUnauthenticated = errors.New(\"unauthenticated request\")\n)\n\n// WrapCallsWithAuth returns a call wrapper (suitable for assigning to\n// router.router.WrapCalls) that makes CurrentUser(ctx) et al. work\n// from inside the wrapped functions.\n//\n// The incoming context must come from WrapCallsInTransactions or\n// NewWithTransaction.\nfunc WrapCallsWithAuth(cluster *arvados.Cluster) func(api.RoutableFunc) api.RoutableFunc {\n\tvar authcache authcache\n\treturn func(origFunc api.RoutableFunc) api.RoutableFunc {\n\t\treturn func(ctx context.Context, opts interface{}) (_ interface{}, err error) {\n\t\t\tvar tokens []string\n\t\t\tif creds, ok := auth.FromContext(ctx); ok {\n\t\t\t\ttokens = creds.Tokens\n\t\t\t}\n\t\t\treturn origFunc(context.WithValue(ctx, contextKeyAuth, &authcontext{\n\t\t\t\tauthcache: &authcache,\n\t\t\t\tcluster:   cluster,\n\t\t\t\ttokens:    tokens,\n\t\t\t}), opts)\n\t\t}\n\t}\n}\n\n// NewWithToken returns a context with the provided auth token.\n//\n// The incoming context must come from WrapCallsInTransactions or\n// NewWithTransaction.\n//\n// Used for attaching system auth to background threads.\n//\n// Also useful for tests, where context doesn't necessarily come from\n// a router that uses WrapCallsWithAuth.\n//\n// The returned context comes with its own token lookup cache, so\n// NewWithToken is not appropriate to use in a per-request code path.\nfunc NewWithToken(ctx context.Context, cluster *arvados.Cluster, token string) context.Context {\n\tctx = auth.NewContext(ctx, &auth.Credentials{Tokens: []string{token}})\n\treturn context.WithValue(ctx, contextKeyAuth, &authcontext{\n\t\tauthcache: &authcache{},\n\t\tcluster:   cluster,\n\t\ttokens:    []string{token},\n\t})\n}\n\n// CurrentAuth returns the arvados.User whose privileges should be\n// used in the given context, and the arvados.APIClientAuthorization\n// the caller presented in order to authenticate the current request.\n//\n// Returns ErrUnauthenticated if the current request was not\n// authenticated (no token provided, token is expired, etc).\nfunc CurrentAuth(ctx context.Context) (*arvados.User, *arvados.APIClientAuthorization, error) {\n\tac, ok := ctx.Value(contextKeyAuth).(*authcontext)\n\tif !ok {\n\t\treturn nil, nil, ErrNoAuthContext\n\t}\n\tac.lookupOnce.Do(func() {\n\t\t// We only validate/lookup the token once per API\n\t\t// call, even though authcache should be efficient\n\t\t// enough to do a lookup each time. This guarantees we\n\t\t// always return the same result when called multiple\n\t\t// times in the course of handling a single API call.\n\t\tfor _, token := range ac.tokens {\n\t\t\tuser, aca, err := ac.authcache.lookup(ctx, ac.cluster, token)\n\t\t\tif err != nil {\n\t\t\t\tac.err = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif user != nil {\n\t\t\t\tac.user, ac.apiClientAuthorization = user, aca\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tac.err = ErrUnauthenticated\n\t})\n\treturn ac.user, ac.apiClientAuthorization, ac.err\n}\n\ntype contextKeyA string\n\nvar contextKeyAuth = contextKeyT(\"auth\")\n\ntype authcontext struct {\n\tauthcache              *authcache\n\tcluster                *arvados.Cluster\n\ttokens                 []string\n\tuser                   *arvados.User\n\tapiClientAuthorization *arvados.APIClientAuthorization\n\terr                    error\n\tlookupOnce             sync.Once\n}\n\nvar authcacheTTL = time.Minute\n\ntype authcacheent struct {\n\texpireTime             time.Time\n\tapiClientAuthorization arvados.APIClientAuthorization\n\tuser                   arvados.User\n}\n\ntype authcache struct {\n\tmtx         sync.Mutex\n\tentries     map[string]*authcacheent\n\tnextCleanup time.Time\n}\n\n// lookup returns the user and aca info for a given token. Returns nil\n// if the token is not valid. Returns a non-nil error if there was an\n// unexpected error from the database, etc.\nfunc (ac *authcache) lookup(ctx context.Context, cluster *arvados.Cluster, token string) (*arvados.User, *arvados.APIClientAuthorization, error) {\n\tac.mtx.Lock()\n\tent := ac.entries[token]\n\tac.mtx.Unlock()\n\tif ent != nil && ent.expireTime.After(time.Now()) {\n\t\treturn &ent.user, &ent.apiClientAuthorization, nil\n\t}\n\tif token == \"\" {\n\t\treturn nil, nil, nil\n\t}\n\ttx, err := CurrentTx(ctx)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar aca arvados.APIClientAuthorization\n\tvar user arvados.User\n\n\tvar cond string\n\tvar args []interface{}\n\tif len(token) > 30 && strings.HasPrefix(token, \"v2/\") && token[30] == '/' {\n\t\tfields := strings.Split(token, \"/\")\n\t\tcond = `aca.uuid = $1 and aca.api_token = $2`\n\t\targs = []interface{}{fields[1], fields[2]}\n\t} else {\n\t\t// Bare token or OIDC access token\n\t\tmac := hmac.New(sha256.New, []byte(cluster.SystemRootToken))\n\t\tio.WriteString(mac, token)\n\t\thmac := fmt.Sprintf(\"%x\", mac.Sum(nil))\n\t\tcond = `aca.api_token in ($1, $2)`\n\t\targs = []interface{}{token, hmac}\n\t}\n\tvar expiresAt sql.NullTime\n\tvar scopesYAML []byte\n\terr = tx.QueryRowContext(ctx, `\nselect aca.uuid, aca.expires_at, aca.api_token, aca.scopes, users.uuid, users.is_active, users.is_admin\n from api_client_authorizations aca\n left join users on aca.user_id = users.id\n where `+cond+`\n and (least(expires_at, refreshes_at) is null or least(expires_at, refreshes_at) > current_timestamp at time zone 'UTC')`, args...).Scan(\n\t\t&aca.UUID, &expiresAt, &aca.APIToken, &scopesYAML,\n\t\t&user.UUID, &user.IsActive, &user.IsAdmin)\n\tif err == sql.ErrNoRows {\n\t\treturn nil, nil, nil\n\t} else if err != nil {\n\t\treturn nil, nil, err\n\t}\n\taca.ExpiresAt = expiresAt.Time\n\tif len(scopesYAML) > 0 {\n\t\terr = yaml.Unmarshal(scopesYAML, &aca.Scopes)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"loading scopes for %s: %w\", aca.UUID, err)\n\t\t}\n\t}\n\tent = &authcacheent{\n\t\texpireTime:             time.Now().Add(authcacheTTL),\n\t\tapiClientAuthorization: aca,\n\t\tuser:                   user,\n\t}\n\tac.mtx.Lock()\n\tdefer ac.mtx.Unlock()\n\tif ac.entries == nil {\n\t\tac.entries = map[string]*authcacheent{}\n\t}\n\tif ac.nextCleanup.IsZero() || ac.nextCleanup.Before(time.Now()) {\n\t\tfor token, ent := range ac.entries {\n\t\t\tif !ent.expireTime.After(time.Now()) {\n\t\t\t\tdelete(ac.entries, token)\n\t\t\t}\n\t\t}\n\t\tac.nextCleanup = time.Now().Add(authcacheTTL)\n\t}\n\tac.entries[token] = ent\n\treturn &ent.user, &ent.apiClientAuthorization, nil\n}\n"
  },
  {
    "path": "lib/ctrlctx/auth_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ctrlctx\n\nimport (\n\t\"context\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/jmoiron/sqlx\"\n\t_ \"github.com/lib/pq\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nfunc (*DatabaseSuite) TestAuthContext(c *check.C) {\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\n\tgetter := func(context.Context) (*sqlx.DB, error) {\n\t\treturn sqlx.Open(\"postgres\", cluster.PostgreSQL.Connection.String())\n\t}\n\tauthwrapper := WrapCallsWithAuth(cluster)\n\tdbwrapper := WrapCallsInTransactions(getter)\n\n\t// valid tokens\n\tfor _, token := range []string{\n\t\tarvadostest.ActiveToken,\n\t\tarvadostest.ActiveTokenV2,\n\t\tarvadostest.ActiveTokenV2 + \"/asdfasdfasdf\",\n\t\tarvadostest.ActiveTokenV2, // cached\n\t} {\n\t\tok, err := dbwrapper(authwrapper(func(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\tuser, aca, err := CurrentAuth(ctx)\n\t\t\tif c.Check(err, check.IsNil) {\n\t\t\t\tc.Check(user.UUID, check.Equals, \"zzzzz-tpzed-xurymjxw79nv3jz\")\n\t\t\t\tc.Check(aca.UUID, check.Equals, \"zzzzz-gj3su-077z32aux8dg2s1\")\n\t\t\t\tc.Check(aca.Scopes, check.DeepEquals, []string{\"all\"})\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}))(auth.NewContext(context.Background(), auth.NewCredentials(token)), \"blah\")\n\t\tc.Check(ok, check.Equals, true)\n\t\tc.Check(err, check.IsNil)\n\t}\n\n\t// bad tokens\n\tfor _, token := range []string{\n\t\tarvadostest.ActiveToken + \"X\",\n\t\tarvadostest.ActiveTokenV2 + \"X\",\n\t\tarvadostest.ActiveTokenV2[:30], // \"v2/{uuid}\"\n\t\tarvadostest.ActiveTokenV2[:31], // \"v2/{uuid}/\"\n\t\t\"bogus\",\n\t\t\"\",\n\t} {\n\t\tok, err := dbwrapper(authwrapper(func(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\tuser, aca, err := CurrentAuth(ctx)\n\t\t\tc.Check(err, check.Equals, ErrUnauthenticated)\n\t\t\tc.Check(user, check.IsNil)\n\t\t\tc.Check(aca, check.IsNil)\n\t\t\treturn true, err\n\t\t}))(auth.NewContext(context.Background(), auth.NewCredentials(token)), \"blah\")\n\t\tc.Check(ok, check.Equals, true)\n\t\tc.Check(err, check.Equals, ErrUnauthenticated)\n\t}\n\n\t// no auth context\n\t{\n\t\tok, err := dbwrapper(authwrapper(func(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\t\tuser, aca, err := CurrentAuth(ctx)\n\t\t\tc.Check(err, check.Equals, ErrUnauthenticated)\n\t\t\tc.Check(user, check.IsNil)\n\t\t\tc.Check(aca, check.IsNil)\n\t\t\treturn true, err\n\t\t}))(context.Background(), \"blah\")\n\t\tc.Check(ok, check.Equals, true)\n\t\tc.Check(err, check.Equals, ErrUnauthenticated)\n\t}\n}\n"
  },
  {
    "path": "lib/ctrlctx/db.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ctrlctx\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"sync\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/api\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/jmoiron/sqlx\"\n\n\t// sqlx needs lib/pq to talk to PostgreSQL\n\t_ \"github.com/lib/pq\"\n)\n\nvar (\n\tErrNoTransaction   = errors.New(\"bug: there is no transaction in this context\")\n\tErrContextFinished = errors.New(\"refusing to start a transaction after wrapped function already returned\")\n)\n\n// WrapCallsInTransactions returns a call wrapper (suitable for\n// assigning to router.router.WrapCalls) that starts a new transaction\n// for each API call, and commits only if the call succeeds.\n//\n// The wrapper calls getdb() to get a database handle before each API\n// call.\nfunc WrapCallsInTransactions(getdb func(context.Context) (*sqlx.DB, error)) func(api.RoutableFunc) api.RoutableFunc {\n\treturn func(origFunc api.RoutableFunc) api.RoutableFunc {\n\t\treturn func(ctx context.Context, opts interface{}) (_ interface{}, err error) {\n\t\t\tctx, finishtx := New(ctx, getdb)\n\t\t\tdefer finishtx(&err)\n\t\t\treturn origFunc(ctx, opts)\n\t\t}\n\t}\n}\n\n// NewWithTransaction returns a child context in which the given\n// transaction will be used by any localdb API call that needs one.\n// The caller is responsible for calling Commit or Rollback on tx.\nfunc NewWithTransaction(ctx context.Context, tx *sqlx.Tx) context.Context {\n\ttxn := &transaction{tx: tx}\n\ttxn.setup.Do(func() {})\n\treturn context.WithValue(ctx, contextKeyTransaction, txn)\n}\n\ntype contextKeyT string\n\nvar contextKeyTransaction = contextKeyT(\"transaction\")\n\ntype transaction struct {\n\ttx    *sqlx.Tx\n\terr   error\n\tgetdb func(context.Context) (*sqlx.DB, error)\n\tsetup sync.Once\n}\n\ntype finishFunc func(*error)\n\n// New returns a new child context that can be used with\n// CurrentTx(). It does not open a database transaction until the\n// first call to CurrentTx().\n//\n// The caller must eventually call the returned finishtx() func to\n// commit or rollback the transaction, if any.\n//\n//\tfunc example(ctx context.Context) (err error) {\n//\t\tctx, finishtx := New(ctx, getdb)\n//\t\tdefer finishtx(&err)\n//\t\t// ...\n//\t\ttx, err := CurrentTx(ctx)\n//\t\tif err != nil {\n//\t\t\treturn fmt.Errorf(\"example: %s\", err)\n//\t\t}\n//\t\treturn tx.ExecContext(...)\n//\t}\n//\n// If *err is nil, finishtx() commits the transaction and assigns any\n// resulting error to *err.\n//\n// If *err is non-nil, finishtx() rolls back the transaction, and\n// does not modify *err.\nfunc New(ctx context.Context, getdb func(context.Context) (*sqlx.DB, error)) (context.Context, finishFunc) {\n\ttxn := &transaction{getdb: getdb}\n\treturn context.WithValue(ctx, contextKeyTransaction, txn), func(err *error) {\n\t\ttxn.setup.Do(func() {\n\t\t\t// Using (*sync.Once)Do() prevents a future\n\t\t\t// call to CurrentTx() from opening a\n\t\t\t// transaction which would never get committed\n\t\t\t// or rolled back. If CurrentTx() hasn't been\n\t\t\t// called before now, future calls will return\n\t\t\t// this error.\n\t\t\ttxn.err = ErrContextFinished\n\t\t})\n\t\tif txn.tx == nil {\n\t\t\t// we never [successfully] started a transaction\n\t\t\treturn\n\t\t}\n\t\tif *err != nil {\n\t\t\tctxlog.FromContext(ctx).Debug(\"rollback\")\n\t\t\ttxn.tx.Rollback()\n\t\t\treturn\n\t\t}\n\t\t*err = txn.tx.Commit()\n\t}\n}\n\n// NewTx starts a new transaction. The caller is responsible for\n// calling Commit or Rollback. This is suitable for database queries\n// that are separate from the API transaction (see CurrentTx), e.g.,\n// ones that will be committed even if the API call fails, or held\n// open after the API call finishes.\nfunc NewTx(ctx context.Context) (*sqlx.Tx, error) {\n\ttxn, ok := ctx.Value(contextKeyTransaction).(*transaction)\n\tif !ok {\n\t\treturn nil, ErrNoTransaction\n\t}\n\tdb, err := txn.getdb(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn db.Beginx()\n}\n\n// CurrentTx returns a transaction that will be committed after the\n// current API call completes, or rolled back if the current API call\n// returns an error.\nfunc CurrentTx(ctx context.Context) (*sqlx.Tx, error) {\n\ttxn, ok := ctx.Value(contextKeyTransaction).(*transaction)\n\tif !ok {\n\t\treturn nil, ErrNoTransaction\n\t}\n\ttxn.setup.Do(func() {\n\t\tif db, err := txn.getdb(ctx); err != nil {\n\t\t\ttxn.err = err\n\t\t} else {\n\t\t\ttxn.tx, txn.err = db.Beginx()\n\t\t}\n\t})\n\treturn txn.tx, txn.err\n}\n\nvar errDBConnection = errors.New(\"database connection error\")\n\ntype DBConnector struct {\n\tPostgreSQL arvados.PostgreSQL\n\tpgdb       *sqlx.DB\n\tmtx        sync.Mutex\n}\n\nfunc (dbc *DBConnector) GetDB(ctx context.Context) (*sqlx.DB, error) {\n\tdbc.mtx.Lock()\n\tdefer dbc.mtx.Unlock()\n\tif dbc.pgdb != nil {\n\t\treturn dbc.pgdb, nil\n\t}\n\tdb, err := sqlx.Open(\"postgres\", dbc.PostgreSQL.Connection.String())\n\tif err != nil {\n\t\tctxlog.FromContext(ctx).WithError(err).Error(\"postgresql connect failed\")\n\t\treturn nil, errDBConnection\n\t}\n\tif p := dbc.PostgreSQL.ConnectionPool; p > 0 {\n\t\tdb.SetMaxOpenConns(p)\n\t}\n\tif err := db.Ping(); err != nil {\n\t\tctxlog.FromContext(ctx).WithError(err).Error(\"postgresql connect succeeded but ping failed\")\n\t\tdb.Close()\n\t\treturn nil, errDBConnection\n\t}\n\tdbc.pgdb = db\n\treturn db, nil\n}\n\nfunc (dbc *DBConnector) Close() error {\n\tdbc.mtx.Lock()\n\tdefer dbc.mtx.Unlock()\n\tvar err error\n\tif dbc.pgdb != nil {\n\t\terr = dbc.pgdb.Close()\n\t\tdbc.pgdb = nil\n\t}\n\treturn err\n}\n"
  },
  {
    "path": "lib/ctrlctx/db_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ctrlctx\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/jmoiron/sqlx\"\n\t_ \"github.com/lib/pq\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&DatabaseSuite{})\n\ntype DatabaseSuite struct{}\n\nfunc (*DatabaseSuite) TestTransactionContext(c *check.C) {\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\n\tvar getterCalled int64\n\tgetter := func(context.Context) (*sqlx.DB, error) {\n\t\tatomic.AddInt64(&getterCalled, 1)\n\t\tdb, err := sqlx.Open(\"postgres\", cluster.PostgreSQL.Connection.String())\n\t\tc.Assert(err, check.IsNil)\n\t\treturn db, nil\n\t}\n\twrapper := WrapCallsInTransactions(getter)\n\twrappedFunc := wrapper(func(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\ttxes := make([]*sqlx.Tx, 20)\n\t\tvar wg sync.WaitGroup\n\t\tfor i := range txes {\n\t\t\ti := i\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\t// Concurrent calls to CurrentTx(),\n\t\t\t\t// with different children of the same\n\t\t\t\t// parent context, will all return the\n\t\t\t\t// same transaction.\n\t\t\t\tdefer wg.Done()\n\t\t\t\tctx, cancel := context.WithCancel(ctx)\n\t\t\t\tdefer cancel()\n\t\t\t\ttx, err := CurrentTx(ctx)\n\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\ttxes[i] = tx\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\t\tfor i := range txes[1:] {\n\t\t\tc.Check(txes[i], check.Equals, txes[i+1])\n\t\t}\n\t\treturn true, nil\n\t})\n\n\tok, err := wrappedFunc(context.Background(), \"blah\")\n\tc.Check(ok, check.Equals, true)\n\tc.Check(err, check.IsNil)\n\tc.Check(getterCalled, check.Equals, int64(1))\n\n\t// When a wrapped func returns without calling CurrentTx(),\n\t// calling CurrentTx() later shouldn't start a new\n\t// transaction.\n\tvar savedctx context.Context\n\tok, err = wrapper(func(ctx context.Context, opts interface{}) (interface{}, error) {\n\t\tsavedctx = ctx\n\t\treturn true, nil\n\t})(context.Background(), \"blah\")\n\tc.Check(ok, check.Equals, true)\n\tc.Check(err, check.IsNil)\n\ttx, err := CurrentTx(savedctx)\n\tc.Check(tx, check.IsNil)\n\tc.Check(err, check.NotNil)\n}\n"
  },
  {
    "path": "lib/deduplicationreport/command.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage deduplicationreport\n\nimport (\n\t\"io\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n)\n\nvar Command command\n\ntype command struct{}\n\n// RunCommand implements the subcommand \"deduplication-report <collection> <collection> ...\"\nfunc (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tvar err error\n\tlogger := ctxlog.New(stderr, \"text\", \"info\")\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"fatal\")\n\t\t}\n\t}()\n\n\tlogger.SetFormatter(cmd.NoPrefixFormatter{})\n\n\texitcode := report(prog, args, logger, stdout, stderr)\n\n\treturn exitcode\n}\n"
  },
  {
    "path": "lib/deduplicationreport/report.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage deduplicationreport\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/blockdigest\"\n\n\t\"github.com/dustin/go-humanize\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc deDuplicate(inputs []string) (trimmed []string) {\n\tseen := make(map[string]bool)\n\tfor _, uuid := range inputs {\n\t\tif !seen[uuid] {\n\t\t\tseen[uuid] = true\n\t\t\ttrimmed = append(trimmed, uuid)\n\t\t}\n\t}\n\treturn\n}\n\n// parseFlags returns either some inputs to process, or (if there are\n// no inputs to process) a nil slice and a suitable exit code.\nfunc parseFlags(prog string, args []string, logger *logrus.Logger, stderr io.Writer) (inputs []string, exitcode int) {\n\tflags := flag.NewFlagSet(prog, flag.ContinueOnError)\n\tflags.Usage = func() {\n\t\tfmt.Fprintf(flags.Output(), `\nUsage:\n  %s [options ...] <collection-uuid> <collection-uuid> ...\n\n  %s [options ...] <collection-pdh>,<collection-uuid> \\\n     <collection-pdh>,<collection-uuid> ...\n\n  This program analyzes the overlap in blocks used by 2 or more collections. It\n  prints a deduplication report that shows the nominal space used by the\n  collections, as well as the actual size and the amount of space that is saved\n  by Keep's deduplication.\n\n  The list of collections may be provided in two ways. A list of collection\n  uuids is sufficient. Alternatively, the PDH for each collection may also be\n  provided. This is will greatly speed up operation when the list contains\n  multiple collections with the same PDH.\n\n  Exit status will be zero if there were no errors generating the report.\n\nExample:\n\n  Use the 'arv' and 'jq' commands to get the list of the 100\n  largest collections and generate the deduplication report:\n\n  arv collection list --order 'file_size_total desc' --limit 100 | \\\n    jq -r '.items[] | [.portable_data_hash,.uuid] |@csv' | \\\n    sed -e 's/\"//g'|tr '\\n' ' ' | \\\n    xargs %s\n\nOptions:\n`, prog, prog, prog)\n\t\tflags.PrintDefaults()\n\t}\n\tloglevel := flags.String(\"log-level\", \"info\", \"logging level (debug, info, ...)\")\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"collection-uuid [...]\", stderr); !ok {\n\t\treturn nil, code\n\t}\n\n\tinputs = deDuplicate(flags.Args())\n\n\tif len(inputs) < 1 {\n\t\tfmt.Fprintf(stderr, \"Error: no collections provided\\n\")\n\t\treturn nil, 2\n\t}\n\n\tlvl, err := logrus.ParseLevel(*loglevel)\n\tif err != nil {\n\t\tfmt.Fprintf(stderr, \"Error: cannot parse log level: %s\\n\", err)\n\t\treturn nil, 2\n\t}\n\tlogger.SetLevel(lvl)\n\treturn inputs, 0\n}\n\nfunc blockList(collection arvados.Collection) (blocks map[string]int) {\n\tblocks = make(map[string]int)\n\tfor _, token := range bytes.Split([]byte(collection.ManifestText), []byte{' '}) {\n\t\tif blockdigest.IsBlockLocator(string(token)) {\n\t\t\tloc, _ := blockdigest.ParseBlockLocator(string(token))\n\t\t\tblocks[loc.Digest.String()] = loc.Size\n\t\t}\n\t}\n\treturn\n}\n\nfunc report(prog string, args []string, logger *logrus.Logger, stdout, stderr io.Writer) (exitcode int) {\n\tvar inputs []string\n\n\tinputs, exitcode = parseFlags(prog, args, logger, stderr)\n\tif inputs == nil {\n\t\treturn\n\t}\n\n\t// Arvados Client setup\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tif err != nil {\n\t\tlogger.Errorf(\"Error creating Arvados object: %s\", err)\n\t\texitcode = 1\n\t\treturn\n\t}\n\n\ttype Col struct {\n\t\tFileSizeTotal int64\n\t\tFileCount     int64\n\t}\n\n\tblocks := make(map[string]map[string]int)\n\tpdhs := make(map[string]Col)\n\tvar nominalSize int64\n\n\tfor _, input := range inputs {\n\t\tvar uuid string\n\t\tvar pdh string\n\t\tif strings.Contains(input, \",\") {\n\t\t\t// The input is in the format pdh,uuid. This will allow us to save time on duplicate pdh's\n\t\t\ttmp := strings.Split(input, \",\")\n\t\t\tpdh = tmp[0]\n\t\t\tuuid = tmp[1]\n\t\t} else {\n\t\t\t// The input must be a plain uuid\n\t\t\tuuid = input\n\t\t}\n\t\tif !strings.Contains(uuid, \"-4zz18-\") {\n\t\t\tlogger.Errorf(\"Error: uuid must refer to collection object\")\n\t\t\texitcode = 1\n\t\t\treturn\n\t\t}\n\t\tif _, ok := pdhs[pdh]; ok {\n\t\t\t// We've processed a collection with this pdh already. Simply add its\n\t\t\t// size to the totals and move on to the next one.\n\t\t\t// Note that we simply trust the PDH matches the collection UUID here,\n\t\t\t// in other words, we use it over the UUID. If they don't match, the report\n\t\t\t// will be wrong.\n\t\t\tnominalSize += pdhs[pdh].FileSizeTotal\n\t\t} else {\n\t\t\tvar collection arvados.Collection\n\t\t\terr = arv.Get(\"collections\", uuid, nil, &collection)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Errorf(\"Error: unable to retrieve collection: %s\", err)\n\t\t\t\texitcode = 1\n\t\t\t\treturn\n\t\t\t}\n\t\t\tblocks[uuid] = make(map[string]int)\n\t\t\tblocks[uuid] = blockList(collection)\n\t\t\tif pdh != \"\" && collection.PortableDataHash != pdh {\n\t\t\t\tlogger.Errorf(\"Error: the collection with UUID %s has PDH %s, but a different PDH was provided in the arguments: %s\", uuid, collection.PortableDataHash, pdh)\n\t\t\t\texitcode = 1\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif pdh == \"\" {\n\t\t\t\tpdh = collection.PortableDataHash\n\t\t\t}\n\n\t\t\tcol := Col{}\n\t\t\tif collection.FileSizeTotal != 0 || collection.FileCount != 0 {\n\t\t\t\tnominalSize += collection.FileSizeTotal\n\t\t\t\tcol.FileSizeTotal = collection.FileSizeTotal\n\t\t\t\tcol.FileCount = int64(collection.FileCount)\n\t\t\t} else {\n\t\t\t\t// Collections created with old Arvados versions do not always have the total file size and count cached in the collections object\n\t\t\t\tvar collSize int64\n\t\t\t\tfor _, size := range blocks[uuid] {\n\t\t\t\t\tcollSize += int64(size)\n\t\t\t\t}\n\t\t\t\tnominalSize += collSize\n\t\t\t\tcol.FileSizeTotal = collSize\n\t\t\t}\n\t\t\tpdhs[pdh] = col\n\t\t}\n\n\t\tif pdhs[pdh].FileCount != 0 {\n\t\t\tfmt.Fprintf(stdout, \"Collection %s: pdh %s; nominal size %d (%s); file count %d\\n\", uuid, pdh, pdhs[pdh].FileSizeTotal, humanize.IBytes(uint64(pdhs[pdh].FileSizeTotal)), pdhs[pdh].FileCount)\n\t\t} else {\n\t\t\tfmt.Fprintf(stdout, \"Collection %s: pdh %s; nominal size %d (%s)\\n\", uuid, pdh, pdhs[pdh].FileSizeTotal, humanize.IBytes(uint64(pdhs[pdh].FileSizeTotal)))\n\t\t}\n\t}\n\n\tvar totalSize int64\n\tseen := make(map[string]bool)\n\tfor _, v := range blocks {\n\t\tfor pdh, size := range v {\n\t\t\tif !seen[pdh] {\n\t\t\t\tseen[pdh] = true\n\t\t\t\ttotalSize += int64(size)\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Fprintln(stdout)\n\tfmt.Fprintf(stdout, \"Collections:                 %15d\\n\", len(inputs))\n\tfmt.Fprintf(stdout, \"Nominal size of stored data: %15d bytes (%s)\\n\", nominalSize, humanize.IBytes(uint64(nominalSize)))\n\tfmt.Fprintf(stdout, \"Actual size of stored data:  %15d bytes (%s)\\n\", totalSize, humanize.IBytes(uint64(totalSize)))\n\tfmt.Fprintf(stdout, \"Saved by Keep deduplication: %15d bytes (%s)\\n\", nominalSize-totalSize, humanize.IBytes(uint64(nominalSize-totalSize)))\n\n\treturn exitcode\n}\n"
  },
  {
    "path": "lib/deduplicationreport/report_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage deduplicationreport\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&Suite{})\n\ntype Suite struct{}\n\nfunc (s *Suite) TearDownSuite(c *check.C) {\n\t// Undo any changes/additions to the database so they don't affect subsequent tests.\n\tarvadostest.ResetEnv()\n}\n\nfunc (*Suite) TestUsage(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\texitcode := Command.RunCommand(\"deduplicationreport.test\", []string{\"-h\", \"-log-level=debug\"}, &bytes.Buffer{}, &stdout, &stderr)\n\tc.Check(exitcode, check.Equals, 0)\n\tc.Check(stdout.String(), check.Equals, \"\")\n\tc.Log(stderr.String())\n\tc.Check(stderr.String(), check.Matches, `(?ms).*Usage:.*`)\n}\n\nfunc (*Suite) TestTwoIdenticalUUIDs(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\t// Run dedupreport with 2 identical uuids\n\texitcode := Command.RunCommand(\"deduplicationreport.test\", []string{arvadostest.FooCollection, arvadostest.FooCollection}, &bytes.Buffer{}, &stdout, &stderr)\n\tc.Check(exitcode, check.Equals, 0)\n\tc.Check(stdout.String(), check.Matches, \"(?ms).*Collections:[[:space:]]+1.*\")\n\tc.Check(stdout.String(), check.Matches, \"(?ms).*Nominal size of stored data:[[:space:]]+3 bytes \\\\(3 B\\\\).*\")\n\tc.Check(stdout.String(), check.Matches, \"(?ms).*Actual size of stored data:[[:space:]]+3 bytes \\\\(3 B\\\\).*\")\n\tc.Check(stdout.String(), check.Matches, \"(?ms).*Saved by Keep deduplication:[[:space:]]+0 bytes \\\\(0 B\\\\).*\")\n\tc.Log(stderr.String())\n}\n\nfunc (*Suite) TestTwoUUIDsInvalidPDH(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\t// Run dedupreport with pdh,uuid where pdh does not match\n\texitcode := Command.RunCommand(\"deduplicationreport.test\", []string{arvadostest.FooAndBarFilesInDirPDH + \",\" + arvadostest.FooCollection, arvadostest.FooCollection}, &bytes.Buffer{}, &stdout, &stderr)\n\tc.Check(exitcode, check.Equals, 1)\n\tc.Check(stdout.String(), check.Equals, \"\")\n\tc.Log(stderr.String())\n\tc.Check(stderr.String(), check.Matches, `(?ms).*Error: the collection with UUID zzzzz-4zz18-fy296fx3hot09f7 has PDH 1f4b0bc7583c2a7f9102c395f4ffc5e3\\+45, but a different PDH was provided in the arguments: 870369fc72738603c2fad16664e50e2d\\+58.*`)\n}\n\nfunc (*Suite) TestNonExistentCollection(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\t// Run dedupreport with many UUIDs\n\texitcode := Command.RunCommand(\"deduplicationreport.test\", []string{arvadostest.FooCollection, arvadostest.NonexistentCollection}, &bytes.Buffer{}, &stdout, &stderr)\n\tc.Check(exitcode, check.Equals, 1)\n\tc.Check(stdout.String(), check.Equals, \"Collection zzzzz-4zz18-fy296fx3hot09f7: pdh 1f4b0bc7583c2a7f9102c395f4ffc5e3+45; nominal size 3 (3 B)\\n\")\n\tc.Log(stderr.String())\n\tc.Check(stderr.String(), check.Matches, `(?ms).*Error: unable to retrieve collection:.*404 Not Found.*`)\n}\n\nfunc (*Suite) TestManyUUIDsNoOverlap(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\t// Run dedupreport with 5 UUIDs\n\texitcode := Command.RunCommand(\"deduplicationreport.test\", []string{arvadostest.FooCollection, arvadostest.HelloWorldCollection, arvadostest.FooBarDirCollection, arvadostest.WazVersion1Collection, arvadostest.UserAgreementCollection}, &bytes.Buffer{}, &stdout, &stderr)\n\tc.Check(exitcode, check.Equals, 0)\n\tc.Check(stdout.String(), check.Matches, \"(?ms).*Collections:[[:space:]]+5.*\")\n\tc.Check(stdout.String(), check.Matches, \"(?ms).*Nominal size of stored data:[[:space:]]+249049 bytes \\\\(243 KiB\\\\).*\")\n\tc.Check(stdout.String(), check.Matches, \"(?ms).*Actual size of stored data:[[:space:]]+249049 bytes \\\\(243 KiB\\\\).*\")\n\tc.Check(stdout.String(), check.Matches, \"(?ms).*Saved by Keep deduplication:[[:space:]]+0 bytes \\\\(0 B\\\\).*\")\n\tc.Log(stderr.String())\n\tc.Check(stderr.String(), check.Equals, \"\")\n}\n\nfunc (*Suite) TestTwoOverlappingCollections(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\t// Create two collections\n\tarv := arvados.NewClientFromEnv()\n\n\tvar c1 arvados.Collection\n\terr := arv.RequestAndDecode(&c1, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\"collection\": map[string]interface{}{\"manifest_text\": \". d3b07384d113edec49eaa6238ad5ff00+4 0:4:foo\\n\"}})\n\tc.Assert(err, check.Equals, nil)\n\n\tvar c2 arvados.Collection\n\terr = arv.RequestAndDecode(&c2, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\"collection\": map[string]interface{}{\"manifest_text\": \". c157a79031e1c40f85931829bc5fc552+4 d3b07384d113edec49eaa6238ad5ff00+4 0:4:bar 4:4:foo\\n\"}})\n\tc.Assert(err, check.Equals, nil)\n\n\tfor _, trial := range []struct {\n\t\tfield1 string\n\t\tfield2 string\n\t}{\n\t\t{\n\t\t\t// Run dedupreport with 2 arguments: uuid uuid\n\t\t\tfield1: c1.UUID,\n\t\t\tfield2: c2.UUID,\n\t\t},\n\t\t{\n\t\t\t// Run dedupreport with 2 arguments: pdh,uuid uuid\n\t\t\tfield1: c1.PortableDataHash + \",\" + c1.UUID,\n\t\t\tfield2: c2.UUID,\n\t\t},\n\t} {\n\t\texitcode := Command.RunCommand(\"deduplicationreport.test\", []string{trial.field1, trial.field2}, &bytes.Buffer{}, &stdout, &stderr)\n\t\tc.Check(exitcode, check.Equals, 0)\n\t\tc.Check(stdout.String(), check.Matches, \"(?ms).*Nominal size of stored data:[[:space:]]+12 bytes \\\\(12 B\\\\).*\")\n\t\tc.Check(stdout.String(), check.Matches, \"(?ms).*Actual size of stored data:[[:space:]]+8 bytes \\\\(8 B\\\\).*\")\n\t\tc.Check(stdout.String(), check.Matches, \"(?ms).*Saved by Keep deduplication:[[:space:]]+4 bytes \\\\(4 B\\\\).*\")\n\t\tc.Log(stderr.String())\n\t\tc.Check(stderr.String(), check.Equals, \"\")\n\t}\n}\n"
  },
  {
    "path": "lib/diagnostics/cmd.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage diagnostics\n\nimport (\n\t\"archive/tar\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/sha256\"\n\t_ \"embed\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/health\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype Command struct{}\n\nfunc (Command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tvar diag diagnoser\n\tf := flag.NewFlagSet(prog, flag.ContinueOnError)\n\tf.StringVar(&diag.projectName, \"project-name\", \"scratch area for diagnostics\", \"`name` of project to find/create in home project and use for temporary/test objects\")\n\tf.StringVar(&diag.logLevel, \"log-level\", \"info\", \"logging `level` (debug, info, warning, error)\")\n\tf.StringVar(&diag.dockerImage, \"docker-image\", \"\", \"`image` (tag or portable data hash) to use when running a test container, or \\\"hello-world\\\" to use embedded hello-world image (default: build a custom image containing this executable, and run diagnostics inside the container too)\")\n\tf.StringVar(&diag.dockerImageFrom, \"docker-image-from\", \"debian:bookworm-slim\", \"`base` image to use when building a custom image (see https://doc.arvados.org/main/admin/diagnostics.html#container-options)\")\n\tf.BoolVar(&diag.checkInternal, \"internal-client\", false, \"check that this host is considered an \\\"internal\\\" client\")\n\tf.BoolVar(&diag.checkExternal, \"external-client\", false, \"check that this host is considered an \\\"external\\\" client\")\n\tf.BoolVar(&diag.checkContainer, \"container-client\", false, \"check container connectivity, i.e., host is considered an \\\"internal\\\" client if ARVADOS_KEEP_SERVICES is not set\")\n\tf.BoolVar(&diag.verbose, \"v\", false, \"verbose: include more information in report\")\n\tf.IntVar(&diag.priority, \"priority\", 500, \"priority for test container (1..1000, or 0 to skip)\")\n\tf.DurationVar(&diag.timeout, \"timeout\", 10*time.Second, \"timeout for http requests\")\n\tif ok, code := cmd.ParseFlags(f, prog, args, \"\", stderr); !ok {\n\t\treturn code\n\t}\n\tdiag.stdout = stdout\n\tdiag.stderr = stderr\n\tdiag.logger = ctxlog.New(stdout, \"text\", diag.logLevel)\n\tdiag.logger.SetFormatter(&logrus.TextFormatter{DisableTimestamp: true, DisableLevelTruncation: true, PadLevelText: true})\n\tdiag.runtests()\n\tif len(diag.errors) == 0 {\n\t\tdiag.logger.Info(\"--- no errors ---\")\n\t\treturn 0\n\t} else {\n\t\tif diag.logger.Level > logrus.ErrorLevel {\n\t\t\tfmt.Fprint(stdout, \"\\n--- cut here --- error summary ---\\n\\n\")\n\t\t\tfor _, e := range diag.errors {\n\t\t\t\tdiag.logger.Error(e)\n\t\t\t}\n\t\t}\n\t\treturn 1\n\t}\n}\n\n// docker save hello-world > hello-world.tar\n//\n//go:embed hello-world.tar\nvar HelloWorldDockerImage []byte\n\ntype diagnoser struct {\n\tstdout          io.Writer\n\tstderr          io.Writer\n\tlogLevel        string\n\tpriority        int\n\tprojectName     string\n\tdockerImage     string\n\tdockerImageFrom string\n\tcheckInternal   bool\n\tcheckExternal   bool\n\tcheckContainer  bool\n\tverbose         bool\n\ttimeout         time.Duration\n\tlogger          *logrus.Logger\n\terrors          []string\n\tdone            map[int]bool\n}\n\nfunc (diag *diagnoser) debugf(f string, args ...interface{}) {\n\tdiag.logger.Debugf(\"  ... \"+f, args...)\n}\n\nfunc (diag *diagnoser) infof(f string, args ...interface{}) {\n\tdiag.logger.Infof(\"  ... \"+f, args...)\n}\n\nfunc (diag *diagnoser) verbosef(f string, args ...interface{}) {\n\tif diag.verbose {\n\t\tdiag.logger.Infof(\"  ... \"+f, args...)\n\t}\n}\n\nfunc (diag *diagnoser) warnf(f string, args ...interface{}) {\n\tdiag.logger.Warnf(\"  ... \"+f, args...)\n}\n\nfunc (diag *diagnoser) errorf(f string, args ...interface{}) {\n\tdiag.logger.Errorf(f, args...)\n\tdiag.errors = append(diag.errors, fmt.Sprintf(f, args...))\n}\n\n// Run the given func, logging appropriate messages before and after,\n// adding timing info, etc.\n//\n// The id argument should be unique among tests, and shouldn't change\n// when other tests are added/removed.\nfunc (diag *diagnoser) dotest(id int, title string, fn func() error) {\n\tif diag.done == nil {\n\t\tdiag.done = map[int]bool{}\n\t} else if diag.done[id] {\n\t\tdiag.errorf(\"(bug) reused test id %d\", id)\n\t}\n\tdiag.done[id] = true\n\n\tdiag.logger.Infof(\"%4d: %s\", id, title)\n\tt0 := time.Now()\n\terr := fn()\n\telapsed := fmt.Sprintf(\"%d ms\", time.Now().Sub(t0)/time.Millisecond)\n\tif err != nil {\n\t\tdiag.errorf(\"%4d: %s (%s): %s\", id, title, elapsed, err)\n\t} else {\n\t\tdiag.logger.Debugf(\"%4d: %s (%s): ok\", id, title, elapsed)\n\t}\n}\n\nfunc (diag *diagnoser) runtests() {\n\tclient := arvados.NewClientFromEnv()\n\t// Disable auto-retry, use context instead\n\tclient.Timeout = 0\n\n\tif client.APIHost == \"\" || client.AuthToken == \"\" {\n\t\tdiag.errorf(\"ARVADOS_API_HOST and ARVADOS_API_TOKEN environment variables are not set -- aborting without running any tests\")\n\t\treturn\n\t}\n\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\tdiag.warnf(\"error getting hostname: %s\")\n\t} else {\n\t\tdiag.verbosef(\"hostname = %s\", hostname)\n\t}\n\n\tdiag.dotest(5, \"running health check (same as `arvados-server check`)\", func() error {\n\t\tldr := config.NewLoader(&bytes.Buffer{}, ctxlog.New(&bytes.Buffer{}, \"text\", \"info\"))\n\t\tldr.SetupFlags(flag.NewFlagSet(\"diagnostics\", flag.ContinueOnError))\n\t\tcfg, err := ldr.Load()\n\t\tif err != nil {\n\t\t\tdiag.infof(\"skipping because config could not be loaded: %s\", err)\n\t\t\treturn nil\n\t\t}\n\t\tcluster, err := cfg.GetCluster(\"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif cluster.SystemRootToken != os.Getenv(\"ARVADOS_API_TOKEN\") {\n\t\t\treturn fmt.Errorf(\"diagnostics usage error: %s is readable but SystemRootToken does not match $ARVADOS_API_TOKEN (to fix, either run 'arvados-client sudo diagnostics' to load everything from config file, or set ARVADOS_CONFIG=- to load nothing from config file)\", ldr.Path)\n\t\t}\n\t\tagg := &health.Aggregator{Cluster: cluster}\n\t\tresp := agg.ClusterHealth()\n\t\tfor _, e := range resp.Errors {\n\t\t\tdiag.errorf(\"health check: %s\", e)\n\t\t}\n\t\tif len(resp.Errors) > 0 {\n\t\t\tdiag.infof(\"consider running `arvados-server check -yaml` for a comprehensive report\")\n\t\t}\n\t\tdiag.verbosef(\"reported clock skew = %v\", resp.ClockSkew)\n\t\treported := map[string]bool{}\n\t\tfor _, result := range resp.Checks {\n\t\t\tversion := strings.SplitN(result.Metrics.Version, \" (go\", 2)[0]\n\t\t\tif version != \"\" && !reported[version] {\n\t\t\t\tdiag.verbosef(\"arvados version = %s\", version)\n\t\t\t\treported[version] = true\n\t\t\t}\n\t\t}\n\t\treported = map[string]bool{}\n\t\tfor _, result := range resp.Checks {\n\t\t\tif result.Server != \"\" && !reported[result.Server] {\n\t\t\t\tdiag.verbosef(\"http frontend version = %s\", result.Server)\n\t\t\t\treported[result.Server] = true\n\t\t\t}\n\t\t}\n\t\treported = map[string]bool{}\n\t\tfor _, result := range resp.Checks {\n\t\t\tif sha := result.ConfigSourceSHA256; sha != \"\" && !reported[sha] {\n\t\t\t\tdiag.verbosef(\"config file sha256 = %s\", sha)\n\t\t\t\treported[sha] = true\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\tvar dd arvados.DiscoveryDocument\n\tddpath := \"discovery/v1/apis/arvados/v1/rest\"\n\tdiag.dotest(10, fmt.Sprintf(\"getting discovery document from https://%s/%s\", client.APIHost, ddpath), func() error {\n\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\tdefer cancel()\n\t\terr := client.RequestAndDecodeContext(ctx, &dd, \"GET\", ddpath, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdiag.verbosef(\"BlobSignatureTTL = %d\", dd.BlobSignatureTTL)\n\t\treturn nil\n\t})\n\n\tvar cluster arvados.Cluster\n\tcfgpath := \"arvados/v1/config\"\n\tcfgOK := false\n\tdiag.dotest(20, fmt.Sprintf(\"getting exported config from https://%s/%s\", client.APIHost, cfgpath), func() error {\n\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\tdefer cancel()\n\t\terr := client.RequestAndDecodeContext(ctx, &cluster, \"GET\", cfgpath, nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdiag.verbosef(\"Collections.BlobSigning = %v\", cluster.Collections.BlobSigning)\n\t\tcfgOK = true\n\t\treturn nil\n\t})\n\n\tvar user arvados.User\n\tdiag.dotest(30, \"getting current user record\", func() error {\n\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\tdefer cancel()\n\t\terr := client.RequestAndDecodeContext(ctx, &user, \"GET\", \"arvados/v1/users/current\", nil, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdiag.verbosef(\"user uuid = %s\", user.UUID)\n\t\treturn nil\n\t})\n\n\tif !cfgOK {\n\t\tdiag.errorf(\"cannot proceed without cluster config -- aborting without running any further tests\")\n\t\treturn\n\t}\n\n\t// uncomment to create some spurious errors\n\t// cluster.Services.WebDAVDownload.ExternalURL.Host = \"0.0.0.0:9\"\n\n\t// TODO: detect routing errors here, like finding wb2 at the\n\t// wb1 address.\n\tfor i, svc := range []struct {\n\t\tname   string\n\t\tconfig *arvados.Service\n\t}{\n\t\t{\"Keepproxy\", &cluster.Services.Keepproxy},\n\t\t{\"WebDAV\", &cluster.Services.WebDAV},\n\t\t{\"WebDAVDownload\", &cluster.Services.WebDAVDownload},\n\t\t{\"Websocket\", &cluster.Services.Websocket},\n\t\t{\"Workbench1\", &cluster.Services.Workbench1},\n\t\t{\"Workbench2\", &cluster.Services.Workbench2},\n\t} {\n\t\tu := url.URL(svc.config.ExternalURL)\n\t\tdiag.dotest(40+i, fmt.Sprintf(\"connecting to %s endpoint %s\", svc.name, u.String()), func() error {\n\t\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\t\tdefer cancel()\n\t\t\tif strings.HasPrefix(u.Scheme, \"ws\") {\n\t\t\t\t// We can do a real websocket test elsewhere,\n\t\t\t\t// but for now we'll just check the https\n\t\t\t\t// connection.\n\t\t\t\tu.Scheme = \"http\" + u.Scheme[2:]\n\t\t\t}\n\t\t\tif svc.config == &cluster.Services.WebDAV && strings.HasPrefix(u.Host, \"*\") {\n\t\t\t\tu.Host = \"d41d8cd98f00b204e9800998ecf8427e-0\" + u.Host[1:]\n\t\t\t}\n\t\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tresp.Body.Close()\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tfor i, url := range []string{\n\t\tcluster.Services.Controller.ExternalURL.String(),\n\t\tcluster.Services.Keepproxy.ExternalURL.String() + \"d41d8cd98f00b204e9800998ecf8427e+0\",\n\t\tcluster.Services.WebDAVDownload.ExternalURL.String(),\n\t} {\n\t\tdiag.dotest(50+i, fmt.Sprintf(\"checking CORS headers at %s\", url), func() error {\n\t\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\t\tdefer cancel()\n\t\t\treq, err := http.NewRequestWithContext(ctx, \"GET\", url, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treq.Header.Set(\"Origin\", \"https://example.com\")\n\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif hdr := resp.Header.Get(\"Access-Control-Allow-Origin\"); hdr != \"*\" {\n\t\t\t\treturn fmt.Errorf(\"expected \\\"Access-Control-Allow-Origin: *\\\", got %q\", hdr)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tvar keeplist arvados.KeepServiceList\n\tdiag.dotest(60, \"checking internal/external client detection\", func() error {\n\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\tdefer cancel()\n\t\terr := client.RequestAndDecodeContext(ctx, &keeplist, \"GET\", \"arvados/v1/keep_services/accessible\", nil, arvados.ListOptions{Limit: 999999})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error getting keep services list: %s\", err)\n\t\t} else if len(keeplist.Items) == 0 {\n\t\t\treturn fmt.Errorf(\"controller did not return any keep services\")\n\t\t}\n\t\tfound := map[string]int{}\n\t\tfor _, ks := range keeplist.Items {\n\t\t\tfound[ks.ServiceType]++\n\t\t}\n\t\tisInternal := found[\"proxy\"] == 0 && len(keeplist.Items) > 0\n\t\tisExternal := found[\"proxy\"] > 0 && found[\"proxy\"] == len(keeplist.Items)\n\t\tif isExternal {\n\t\t\tdiag.infof(\"controller returned only proxy services, this host is treated as \\\"external\\\"\")\n\t\t} else if isInternal {\n\t\t\tdiag.infof(\"controller returned only non-proxy services, this host is treated as \\\"internal\\\"\")\n\t\t}\n\t\tif diag.checkContainer {\n\t\t\tif os.Getenv(\"ARVADOS_KEEP_SERVICES\") == \"\" {\n\t\t\t\tdiag.checkInternal = true\n\t\t\t\tdiag.infof(\"ARVADOS_KEEP_SERVICES is not set, container connectivity relies on being treated as \\\"internal\\\"\")\n\t\t\t} else {\n\t\t\t\tdiag.infof(\"ARVADOS_KEEP_SERVICES is set, container connectivity does not rely on being treated as \\\"internal\\\"\")\n\t\t\t}\n\t\t}\n\t\tif (diag.checkInternal && !isInternal) || (diag.checkExternal && !isExternal) {\n\t\t\treturn fmt.Errorf(\"expecting internal=%v external=%v, but found internal=%v external=%v\", diag.checkInternal, diag.checkExternal, isInternal, isExternal)\n\t\t}\n\t\treturn nil\n\t})\n\n\tfor i, ks := range keeplist.Items {\n\t\tu := url.URL{\n\t\t\tScheme: \"http\",\n\t\t\tHost:   net.JoinHostPort(ks.ServiceHost, fmt.Sprintf(\"%d\", ks.ServicePort)),\n\t\t\tPath:   \"/\",\n\t\t}\n\t\tif ks.ServiceSSLFlag {\n\t\t\tu.Scheme = \"https\"\n\t\t}\n\t\tdiag.dotest(61+i, fmt.Sprintf(\"reading+writing via keep service at %s\", u.String()), func() error {\n\t\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\t\tdefer cancel()\n\t\t\treq, err := http.NewRequestWithContext(ctx, \"PUT\", u.String()+\"d41d8cd98f00b204e9800998ecf8427e\", nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+client.AuthToken)\n\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"reading response body: %s\", err)\n\t\t\t}\n\t\t\tloc := strings.TrimSpace(string(body))\n\t\t\tif !strings.HasPrefix(loc, \"d41d8\") {\n\t\t\t\treturn fmt.Errorf(\"unexpected response from write: %q\", body)\n\t\t\t}\n\n\t\t\treq, err = http.NewRequestWithContext(ctx, \"GET\", u.String()+loc, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+client.AuthToken)\n\t\t\tresp, err = http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"reading response body: %s\", err)\n\t\t\t}\n\t\t\tif len(body) != 0 {\n\t\t\t\treturn fmt.Errorf(\"unexpected response from read: %q\", body)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tvar project arvados.Group\n\tdiag.dotest(80, fmt.Sprintf(\"finding/creating %q project\", diag.projectName), func() error {\n\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\tdefer cancel()\n\t\tvar grplist arvados.GroupList\n\t\terr := client.RequestAndDecodeContext(ctx, &grplist, \"GET\", \"arvados/v1/groups\", nil, arvados.ListOptions{\n\t\t\tFilters: []arvados.Filter{\n\t\t\t\t{\"name\", \"=\", diag.projectName},\n\t\t\t\t{\"group_class\", \"=\", \"project\"},\n\t\t\t\t{\"owner_uuid\", \"=\", user.UUID}},\n\t\t\tLimit: 999999})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"list groups: %s\", err)\n\t\t}\n\t\tif len(grplist.Items) > 0 {\n\t\t\tproject = grplist.Items[0]\n\t\t\tdiag.verbosef(\"using existing project, uuid = %s\", project.UUID)\n\t\t\treturn nil\n\t\t}\n\t\tdiag.debugf(\"list groups: ok, no results\")\n\t\terr = client.RequestAndDecodeContext(ctx, &project, \"POST\", \"arvados/v1/groups\", nil, map[string]interface{}{\"group\": map[string]interface{}{\n\t\t\t\"name\":        diag.projectName,\n\t\t\t\"group_class\": \"project\",\n\t\t}})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"create project: %s\", err)\n\t\t}\n\t\tdiag.verbosef(\"created project, uuid = %s\", project.UUID)\n\t\treturn nil\n\t})\n\n\tvar collection arvados.Collection\n\tdiag.dotest(90, \"creating temporary collection\", func() error {\n\t\tif project.UUID == \"\" {\n\t\t\treturn fmt.Errorf(\"skipping, no project to work in\")\n\t\t}\n\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\tdefer cancel()\n\t\terr := client.RequestAndDecodeContext(ctx, &collection, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\t\"ensure_unique_name\": true,\n\t\t\t\"collection\": map[string]interface{}{\n\t\t\t\t\"owner_uuid\": project.UUID,\n\t\t\t\t\"name\":       \"test collection\",\n\t\t\t\t\"trash_at\":   time.Now().Add(time.Hour)}})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdiag.verbosef(\"ok, uuid = %s\", collection.UUID)\n\t\treturn nil\n\t})\n\n\tif collection.UUID != \"\" {\n\t\tdefer func() {\n\t\t\tdiag.dotest(9990, \"deleting temporary collection\", func() error {\n\t\t\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\t\t\tdefer cancel()\n\t\t\t\treturn client.RequestAndDecodeContext(ctx, nil, \"DELETE\", \"arvados/v1/collections/\"+collection.UUID, nil, nil)\n\t\t\t})\n\t\t}()\n\t}\n\n\ttempdir, err := ioutil.TempDir(\"\", \"arvados-diagnostics\")\n\tif err != nil {\n\t\tdiag.errorf(\"error creating temp dir: %s\", err)\n\t\treturn\n\t}\n\tdefer os.RemoveAll(tempdir)\n\n\tvar imageSHA2 string\n\tvar dockerImageData []byte\n\tif diag.dockerImage != \"\" || diag.priority < 1 {\n\t\t// We won't be using the self-built docker image, so\n\t\t// don't build it.  But we will write the embedded\n\t\t// \"hello-world\" image to our test collection to test\n\t\t// upload/download, whether or not we're using it as a\n\t\t// docker image.\n\t\tdockerImageData = HelloWorldDockerImage\n\n\t\tif diag.priority > 0 {\n\t\t\timageSHA2, err = getSHA2FromImageData(dockerImageData)\n\t\t\tif err != nil {\n\t\t\t\tdiag.errorf(\"internal error/bug: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t} else if selfbin, err := os.Readlink(\"/proc/self/exe\"); err != nil {\n\t\tdiag.errorf(\"readlink /proc/self/exe: %s\", err)\n\t\treturn\n\t} else if selfbindata, err := os.ReadFile(selfbin); err != nil {\n\t\tdiag.errorf(\"error reading %s: %s\", selfbin, err)\n\t\treturn\n\t} else {\n\t\tselfbinSha := fmt.Sprintf(\"%x\", sha256.Sum256(selfbindata))\n\t\ttag := \"arvados-client-diagnostics:\" + selfbinSha[:9]\n\t\terr := os.WriteFile(tempdir+\"/arvados-client\", selfbindata, 0777)\n\t\tif err != nil {\n\t\t\tdiag.errorf(\"error writing %s: %s\", tempdir+\"/arvados-client\", err)\n\t\t\treturn\n\t\t}\n\n\t\tdockerfile := \"FROM \" + diag.dockerImageFrom + \"\\n\"\n\t\tdockerfile += \"RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends libfuse2 ca-certificates && apt-get clean\\n\"\n\t\tdockerfile += \"COPY /arvados-client /arvados-client\\n\"\n\t\tcmd := exec.Command(\"docker\", \"build\", \"--tag\", tag, \"-f\", \"-\", tempdir)\n\t\tcmd.Stdin = strings.NewReader(dockerfile)\n\t\tcmd.Stdout = diag.stderr\n\t\tcmd.Stderr = diag.stderr\n\t\terr = cmd.Run()\n\t\tif err != nil {\n\t\t\tdiag.errorf(\"error building docker image: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tcheckversion, err := exec.Command(\"docker\", \"run\", tag, \"/arvados-client\", \"version\").CombinedOutput()\n\t\tif err != nil {\n\t\t\tdiag.errorf(\"docker image does not seem to work: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tdiag.infof(\"arvados-client version: %s\", checkversion)\n\n\t\tbuf, err := exec.Command(\"docker\", \"inspect\", \"--format={{.Id}}\", tag).Output()\n\t\tif err != nil {\n\t\t\tdiag.errorf(\"docker inspect --format={{.Id}} %s: %s\", tag, err)\n\t\t\treturn\n\t\t}\n\t\timageSHA2 = min64HexDigits.FindString(string(buf))\n\t\tif len(imageSHA2) != 64 {\n\t\t\tdiag.errorf(\"docker inspect --format={{.Id}} output %q does not seem to contain sha256 digest\", buf)\n\t\t\treturn\n\t\t}\n\n\t\tbuf, err = exec.Command(\"docker\", \"save\", tag).Output()\n\t\tif err != nil {\n\t\t\tdiag.errorf(\"docker save %s: %s\", tag, err)\n\t\t\treturn\n\t\t}\n\t\tdiag.infof(\"docker image size is %d\", len(buf))\n\t\tdockerImageData = buf\n\t}\n\n\ttarfilename := \"sha256:\" + imageSHA2 + \".tar\"\n\n\tdiag.dotest(100, \"uploading file via webdav\", func() error {\n\t\ttimeout := diag.timeout\n\t\tif len(dockerImageData) > 10<<20 && timeout < time.Minute {\n\t\t\t// Extend the normal http timeout if we're\n\t\t\t// uploading a substantial docker image.\n\t\t\ttimeout = time.Minute\n\t\t}\n\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(timeout))\n\t\tdefer cancel()\n\t\tif collection.UUID == \"\" {\n\t\t\treturn fmt.Errorf(\"skipping, no test collection\")\n\t\t}\n\t\tt0 := time.Now()\n\t\treq, err := http.NewRequestWithContext(ctx, \"PUT\", cluster.Services.WebDAVDownload.ExternalURL.String()+\"c=\"+collection.UUID+\"/\"+tarfilename, bytes.NewReader(dockerImageData))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"BUG? http.NewRequest: %s\", err)\n\t\t}\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+client.AuthToken)\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error performing http request: %s\", err)\n\t\t}\n\t\tresp.Body.Close()\n\t\tif resp.StatusCode != http.StatusCreated {\n\t\t\treturn fmt.Errorf(\"status %s\", resp.Status)\n\t\t}\n\t\tdiag.verbosef(\"upload ok, status %s, %f MB/s\", resp.Status, float64(len(dockerImageData))/time.Since(t0).Seconds()/1000000)\n\t\terr = client.RequestAndDecodeContext(ctx, &collection, \"GET\", \"arvados/v1/collections/\"+collection.UUID, nil, nil)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"get updated collection: %s\", err)\n\t\t}\n\t\tdiag.verbosef(\"upload pdh %s\", collection.PortableDataHash)\n\t\treturn nil\n\t})\n\n\tdavurl := cluster.Services.WebDAV.ExternalURL\n\tdavWildcard := strings.HasPrefix(davurl.Host, \"*--\") || strings.HasPrefix(davurl.Host, \"*.\")\n\tdiag.dotest(110, fmt.Sprintf(\"checking WebDAV ExternalURL wildcard (%s)\", davurl), func() error {\n\t\tif davurl.Host == \"\" {\n\t\t\treturn fmt.Errorf(\"host missing - content previews will not work\")\n\t\t}\n\t\tif !davWildcard && !cluster.Collections.TrustAllContent {\n\t\t\tdiag.warnf(\"WebDAV ExternalURL has no leading wildcard and TrustAllContent==false - content previews will not work\")\n\t\t}\n\t\treturn nil\n\t})\n\n\tfor i, trial := range []struct {\n\t\tneedcoll     bool\n\t\tneedWildcard bool\n\t\tstatus       int\n\t\tfileurl      string\n\t}{\n\t\t{false, false, http.StatusNotFound, strings.Replace(davurl.String(), \"*\", \"d41d8cd98f00b204e9800998ecf8427e-0\", 1) + \"foo\"},\n\t\t{false, false, http.StatusNotFound, strings.Replace(davurl.String(), \"*\", \"d41d8cd98f00b204e9800998ecf8427e-0\", 1) + tarfilename},\n\t\t{false, false, http.StatusNotFound, cluster.Services.WebDAVDownload.ExternalURL.String() + \"c=d41d8cd98f00b204e9800998ecf8427e+0/_/foo\"},\n\t\t{false, false, http.StatusNotFound, cluster.Services.WebDAVDownload.ExternalURL.String() + \"c=d41d8cd98f00b204e9800998ecf8427e+0/_/\" + tarfilename},\n\t\t{true, true, http.StatusOK, strings.Replace(davurl.String(), \"*\", strings.Replace(collection.PortableDataHash, \"+\", \"-\", -1), 1) + tarfilename},\n\t\t{true, false, http.StatusOK, cluster.Services.WebDAVDownload.ExternalURL.String() + \"c=\" + collection.UUID + \"/_/\" + tarfilename},\n\t} {\n\t\tdiag.dotest(120+i, fmt.Sprintf(\"downloading from webdav (%s)\", trial.fileurl), func() error {\n\t\t\tif trial.needWildcard && !davWildcard {\n\t\t\t\tdiag.warnf(\"skipping collection-id-in-vhost test because WebDAV ExternalURL has no leading wildcard\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\t\tdefer cancel()\n\t\t\tif trial.needcoll && collection.UUID == \"\" {\n\t\t\t\treturn fmt.Errorf(\"skipping, no test collection\")\n\t\t\t}\n\t\t\treq, err := http.NewRequestWithContext(ctx, \"GET\", trial.fileurl, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+client.AuthToken)\n\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"reading response: %s\", err)\n\t\t\t}\n\t\t\tif resp.StatusCode != trial.status {\n\t\t\t\treturn fmt.Errorf(\"unexpected response status: %s\", resp.Status)\n\t\t\t}\n\t\t\tif trial.status == http.StatusOK && !bytes.Equal(body, dockerImageData) {\n\t\t\t\texcerpt := body\n\t\t\t\tif len(excerpt) > 128 {\n\t\t\t\t\texcerpt = append([]byte(nil), body[:128]...)\n\t\t\t\t\texcerpt = append(excerpt, []byte(\"[...]\")...)\n\t\t\t\t}\n\t\t\t\treturn fmt.Errorf(\"unexpected response content: len %d, %q\", len(body), excerpt)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tvar vm arvados.VirtualMachine\n\tdiag.dotest(130, \"getting list of virtual machines\", func() error {\n\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\tdefer cancel()\n\t\tvar vmlist arvados.VirtualMachineList\n\t\terr := client.RequestAndDecodeContext(ctx, &vmlist, \"GET\", \"arvados/v1/virtual_machines\", nil, arvados.ListOptions{Limit: 999999})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(vmlist.Items) < 1 {\n\t\t\tdiag.warnf(\"no VMs found\")\n\t\t} else {\n\t\t\tvm = vmlist.Items[0]\n\t\t}\n\t\treturn nil\n\t})\n\n\tdiag.dotest(150, \"connecting to webshell service\", func() error {\n\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\tdefer cancel()\n\t\tu := cluster.Services.WebShell.ExternalURL\n\t\tif u == (arvados.URL{}) {\n\t\t\tdiag.infof(\"skipping, webshell not configured\")\n\t\t\treturn nil\n\t\t}\n\t\tif vm.UUID == \"\" {\n\t\t\tdiag.warnf(\"skipping, no vm available\")\n\t\t\treturn nil\n\t\t}\n\t\twebshellurl := u.String() + vm.Hostname + \"?\"\n\t\tif strings.HasPrefix(u.Host, \"*\") {\n\t\t\tu.Host = vm.Hostname + u.Host[1:]\n\t\t\twebshellurl = u.String() + \"?\"\n\t\t}\n\t\tdiag.debugf(\"url %s\", webshellurl)\n\t\treq, err := http.NewRequestWithContext(ctx, \"POST\", webshellurl, bytes.NewBufferString(url.Values{\n\t\t\t\"width\":   {\"80\"},\n\t\t\t\"height\":  {\"25\"},\n\t\t\t\"session\": {\"xyzzy\"},\n\t\t\t\"rooturl\": {webshellurl},\n\t\t}.Encode()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded; charset=UTF-8\")\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tdiag.debugf(\"response status %s\", resp.Status)\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reading response: %s\", err)\n\t\t}\n\t\tdiag.debugf(\"response body %q\", body)\n\t\t// We don't speak the protocol, so we get a 400 error\n\t\t// from the webshell server even if everything is\n\t\t// OK. Anything else (404, 502, ???) indicates a\n\t\t// problem.\n\t\tif resp.StatusCode != http.StatusBadRequest {\n\t\t\treturn fmt.Errorf(\"unexpected response status: %s, %q\", resp.Status, body)\n\t\t}\n\t\treturn nil\n\t})\n\n\tdiag.dotest(160, \"running a container\", func() error {\n\t\tif diag.priority < 1 {\n\t\t\tdiag.infof(\"skipping (use priority > 0 if you want to run a container)\")\n\t\t\treturn nil\n\t\t}\n\t\tif project.UUID == \"\" {\n\t\t\treturn fmt.Errorf(\"skipping, no project to work in\")\n\t\t}\n\n\t\ttimestamp := time.Now().Format(time.RFC3339)\n\n\t\tvar ctrCommand []string\n\t\tswitch diag.dockerImage {\n\t\tcase \"\":\n\t\t\tif collection.UUID == \"\" {\n\t\t\t\treturn fmt.Errorf(\"skipping, no test collection to use as docker image\")\n\t\t\t}\n\t\t\tdiag.dockerImage = collection.PortableDataHash\n\t\t\tctrCommand = []string{\"/arvados-client\", \"diagnostics\",\n\t\t\t\t\"-priority=0\", // don't run a container\n\t\t\t\t\"-log-level=\" + diag.logLevel,\n\t\t\t\t\"-container-client=true\"}\n\t\tcase \"hello-world\":\n\t\t\tif collection.UUID == \"\" {\n\t\t\t\treturn fmt.Errorf(\"skipping, no test collection to use as docker image\")\n\t\t\t}\n\t\t\tdiag.dockerImage = collection.PortableDataHash\n\t\t\tctrCommand = []string{\"/hello\"}\n\t\tdefault:\n\t\t\tctrCommand = []string{\"echo\", timestamp}\n\t\t}\n\n\t\tvar cr arvados.ContainerRequest\n\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\tdefer cancel()\n\n\t\terr := client.RequestAndDecodeContext(ctx, &cr, \"POST\", \"arvados/v1/container_requests\", nil, map[string]interface{}{\"container_request\": map[string]interface{}{\n\t\t\t\"owner_uuid\":      project.UUID,\n\t\t\t\"name\":            fmt.Sprintf(\"diagnostics container request %s\", timestamp),\n\t\t\t\"container_image\": diag.dockerImage,\n\t\t\t\"command\":         ctrCommand,\n\t\t\t\"use_existing\":    false,\n\t\t\t\"output_path\":     \"/mnt/output\",\n\t\t\t\"output_name\":     fmt.Sprintf(\"diagnostics output %s\", timestamp),\n\t\t\t\"priority\":        diag.priority,\n\t\t\t\"state\":           arvados.ContainerRequestStateCommitted,\n\t\t\t\"mounts\": map[string]map[string]interface{}{\n\t\t\t\t\"/mnt/output\": {\n\t\t\t\t\t\"kind\":     \"collection\",\n\t\t\t\t\t\"writable\": true,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"runtime_constraints\": arvados.RuntimeConstraints{\n\t\t\t\tAPI:          true,\n\t\t\t\tVCPUs:        1,\n\t\t\t\tRAM:          128 << 20,\n\t\t\t\tKeepCacheRAM: 64 << 20,\n\t\t\t},\n\t\t}})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdiag.infof(\"container request uuid = %s\", cr.UUID)\n\t\tdiag.verbosef(\"container uuid = %s\", cr.ContainerUUID)\n\n\t\ttimeout := 10 * time.Minute\n\t\tdiag.infof(\"container request submitted, waiting up to %v for container to run\", arvados.Duration(timeout))\n\t\tdeadline := time.Now().Add(timeout)\n\n\t\tvar c arvados.Container\n\t\tfor ; cr.State != arvados.ContainerRequestStateFinal && time.Now().Before(deadline); time.Sleep(2 * time.Second) {\n\t\t\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(diag.timeout))\n\t\t\tdefer cancel()\n\n\t\t\tcrStateWas := cr.State\n\t\t\terr := client.RequestAndDecodeContext(ctx, &cr, \"GET\", \"arvados/v1/container_requests/\"+cr.UUID, nil, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif cr.State != crStateWas {\n\t\t\t\tdiag.debugf(\"container request state = %s\", cr.State)\n\t\t\t}\n\n\t\t\tcStateWas := c.State\n\t\t\terr = client.RequestAndDecodeContext(ctx, &c, \"GET\", \"arvados/v1/containers/\"+cr.ContainerUUID, nil, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif c.State != cStateWas {\n\t\t\t\tdiag.debugf(\"container state = %s\", c.State)\n\t\t\t}\n\n\t\t\tcancel()\n\t\t}\n\n\t\tif cr.State != arvados.ContainerRequestStateFinal {\n\t\t\terr := client.RequestAndDecodeContext(context.Background(), &cr, \"PATCH\", \"arvados/v1/container_requests/\"+cr.UUID, nil, map[string]interface{}{\n\t\t\t\t\"container_request\": map[string]interface{}{\n\t\t\t\t\t\"priority\": 0,\n\t\t\t\t}})\n\t\t\tif err != nil {\n\t\t\t\tdiag.infof(\"error canceling container request %s: %s\", cr.UUID, err)\n\t\t\t} else {\n\t\t\t\tdiag.debugf(\"canceled container request %s\", cr.UUID)\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"timed out waiting for container to finish; container request %s state was %q, container %s state was %q\", cr.UUID, cr.State, c.UUID, c.State)\n\t\t}\n\t\tif c.State != arvados.ContainerStateComplete {\n\t\t\treturn fmt.Errorf(\"container request %s is final but container %s did not complete: container state = %q\", cr.UUID, cr.ContainerUUID, c.State)\n\t\t}\n\t\tif c.ExitCode != 0 {\n\t\t\treturn fmt.Errorf(\"container exited %d\", c.ExitCode)\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc getSHA2FromImageData(dockerImageData []byte) (string, error) {\n\ttr := tar.NewReader(bytes.NewReader(dockerImageData))\n\tfor {\n\t\thdr, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\treturn \"\", fmt.Errorf(\"cannot find manifest.json in docker image tar file\")\n\t\t}\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"cannot read docker image tar file: %s\", err)\n\t\t}\n\t\tif hdr.Name != \"manifest.json\" {\n\t\t\tcontinue\n\t\t}\n\t\tvar manifest []struct {\n\t\t\tConfig string\n\t\t}\n\t\terr = json.NewDecoder(tr).Decode(&manifest)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"cannot read manifest.json from docker image tar file: %s\", err)\n\t\t}\n\t\tif len(manifest) == 0 {\n\t\t\treturn \"\", fmt.Errorf(\"manifest.json is empty\")\n\t\t}\n\t\ts := min64HexDigits.FindString(manifest[0].Config)\n\t\tif len(s) != 64 {\n\t\t\treturn \"\", fmt.Errorf(\"found manifest.json but .[0].Config %q does not seem to contain sha256 digest\", manifest[0].Config)\n\t\t}\n\t\treturn s, nil\n\t}\n}\n\nvar min64HexDigits = regexp.MustCompile(`[0-9a-f]{64,}`)\n"
  },
  {
    "path": "lib/diagnostics/docker_image_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage diagnostics\n\nimport (\n\t\"testing\"\n\n\t. \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\nvar _ = Suite(&suite{})\n\ntype suite struct{}\n\nfunc (*suite) TestGetSHA2FromImageData(c *C) {\n\timageSHA2, err := getSHA2FromImageData(HelloWorldDockerImage)\n\tc.Check(err, IsNil)\n\tc.Check(imageSHA2, Matches, `[0-9a-f]{64}`)\n}\n"
  },
  {
    "path": "lib/dispatchcloud/adminclient.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchcloud\n\nimport (\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/worker\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n)\n\nvar InstanceCommand = cmd.Multi(map[string]cmd.Handler{\n\t\"list\":  instanceList{},\n\t\"kill\":  instanceAction{action: \"kill\", reason: true},\n\t\"drain\": instanceAction{action: \"drain\"},\n\t\"hold\":  instanceAction{action: \"hold\"},\n\t\"run\":   instanceAction{action: \"run\"},\n})\n\ntype instanceList struct{}\n\nfunc (instanceList) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tlogger := ctxlog.New(stderr, \"text\", \"info\")\n\tloader := config.NewLoader(stdin, logger)\n\tloader.SkipLegacy = true\n\tflags := flag.NewFlagSet(prog, flag.ContinueOnError)\n\tloader.SetupFlags(flags)\n\theader := flags.Bool(\"header\", false, \"print column headings\")\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"\", stderr); !ok {\n\t\treturn code\n\t}\n\tcfg, err := loader.Load()\n\tif err != nil {\n\t\tfmt.Fprintln(stderr, err)\n\t\treturn 1\n\t}\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\tfmt.Fprintln(stderr, err)\n\t\treturn 1\n\t}\n\tclient := http.DefaultClient\n\tif len(cluster.Services.DispatchCloud.InternalURLs) == 0 {\n\t\tfmt.Fprintf(stderr, \"no Services.DispatchCloud.InternalURLs configured\\n\")\n\t\treturn 1\n\t}\n\tif *header {\n\t\tfmt.Fprint(stdout, \"instance\\taddress\\tstate\\tidle-behavior\\tconfig-type\\tprovider-type\\tprice\\trunning-containers\\n\")\n\t}\n\tfor url := range cluster.Services.DispatchCloud.InternalURLs {\n\t\treq, err := http.NewRequest(http.MethodGet, url.String()+\"/arvados/v1/dispatch/instances\", nil)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(stderr, \"error setting up API request: %s\\n\", err)\n\t\t\treturn 1\n\t\t}\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+cluster.ManagementToken)\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(stderr, \"error doing API request: %s\\n\", err)\n\t\t\treturn 1\n\t\t}\n\t\tvar instances struct {\n\t\t\tItems []worker.InstanceView\n\t\t}\n\t\terr = json.NewDecoder(resp.Body).Decode(&instances)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(stderr, \"error decoding API response: %s\\n\", err)\n\t\t\treturn 1\n\t\t}\n\t\tfor _, inst := range instances.Items {\n\t\t\tif inst.Instance == \"\" {\n\t\t\t\tinst.Instance = \"-\"\n\t\t\t}\n\t\t\tif inst.Address == \"\" {\n\t\t\t\tinst.Address = \"-\"\n\t\t\t}\n\t\t\trunning := \"-\"\n\t\t\tif len(inst.RunningContainerUUIDs) > 0 {\n\t\t\t\trunning = strings.Join(inst.RunningContainerUUIDs, \",\")\n\t\t\t}\n\t\t\tfmt.Fprintf(stdout, \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%f\\t%s\\n\", inst.Instance, inst.Address, inst.WorkerState, inst.IdleBehavior, inst.ArvadosInstanceType, inst.ProviderInstanceType, inst.Price, running)\n\t\t}\n\t}\n\treturn 0\n}\n\ntype instanceAction struct {\n\taction string\n\treason bool // accept \"reason\" flag\n}\n\nfunc (ia instanceAction) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tlogger := ctxlog.New(stderr, \"text\", \"info\")\n\tloader := config.NewLoader(stdin, logger)\n\tloader.SkipLegacy = true\n\tflags := flag.NewFlagSet(prog, flag.ContinueOnError)\n\tloader.SetupFlags(flags)\n\treason := new(string)\n\tif ia.reason {\n\t\treason = flags.String(\"reason\", \"\", \"reason to write in dispatch system logs\")\n\t}\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"instance-id [...]\", stderr); !ok {\n\t\treturn code\n\t}\n\tif len(flags.Args()) == 0 {\n\t\tfmt.Fprintln(stderr, \"usage error: no instance IDs provided\")\n\t\treturn 2\n\t}\n\ttodo := map[string]bool{}\n\tfor _, id := range flags.Args() {\n\t\ttodo[id] = true\n\t}\n\tcfg, err := loader.Load()\n\tif err != nil {\n\t\tfmt.Fprintln(stderr, err)\n\t\treturn 1\n\t}\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\tfmt.Fprintln(stderr, err)\n\t\treturn 1\n\t}\n\tclient := http.DefaultClient\n\tif len(cluster.Services.DispatchCloud.InternalURLs) == 0 {\n\t\tfmt.Fprintf(stderr, \"no Services.DispatchCloud.InternalURLs configured\")\n\t\treturn 1\n\t}\n\tfor u := range cluster.Services.DispatchCloud.InternalURLs {\n\t\tu.Path = \"/arvados/v1/dispatch/instances/\" + ia.action\n\t\tfor id := range todo {\n\t\t\tu.RawQuery = url.Values{\n\t\t\t\t\"instance_id\": []string{id},\n\t\t\t\t\"reason\":      []string{*reason},\n\t\t\t}.Encode()\n\t\t\treq, err := http.NewRequest(http.MethodPost, u.String(), nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(stderr, \"%s: error setting up API request: %s\\n\", id, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+cluster.ManagementToken)\n\t\t\tresp, err := client.Do(req)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(stderr, \"%s: error doing API request: %s\\n\", id, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Fprintf(stderr, \"%s: %s (%s)\\n\", id, resp.Status, u.Host)\n\t\t\tif resp.StatusCode == http.StatusOK {\n\t\t\t\tdelete(todo, id)\n\t\t\t}\n\t\t}\n\t}\n\tif len(todo) > 0 {\n\t\treturn 1\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "lib/dispatchcloud/cmd.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchcloud\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/service\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar Command cmd.Handler = service.Command(arvados.ServiceNameDispatchCloud, newHandler)\n\nfunc newHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {\n\tac, err := arvados.NewClientFromConfig(cluster)\n\tif err != nil {\n\t\treturn service.ErrorHandler(ctx, cluster, fmt.Errorf(\"error initializing client from cluster config: %s\", err))\n\t}\n\t// Disable auto-retry.  We have transient failure recovery at\n\t// the application level, so we would rather receive/report\n\t// upstream errors right away.\n\tac.Timeout = 0\n\td := &dispatcher{\n\t\tCluster:   cluster,\n\t\tContext:   ctx,\n\t\tArvClient: ac,\n\t\tAuthToken: token,\n\t\tRegistry:  reg,\n\t}\n\tgo d.Start()\n\treturn d\n}\n"
  },
  {
    "path": "lib/dispatchcloud/container/node_size.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage container\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"regexp\"\n\t\"slices\"\n\t\"sort\"\n\t\"strconv\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\nvar ErrInstanceTypesNotConfigured = errors.New(\"site configuration does not list any instance types\")\n\nvar discountConfiguredRAMPercent = 5\n\n// ConstraintsNotSatisfiableError includes a list of available instance types\n// to be reported back to the user.\ntype ConstraintsNotSatisfiableError struct {\n\terror\n\tAvailableTypes []arvados.InstanceType\n}\n\n// InstanceResources are the allocatable resources needed to run a\n// container, including system overhead.\ntype InstanceResources struct {\n\tVCPUs   int\n\tRAM     arvados.ByteSize\n\tScratch arvados.ByteSize\n\tGPUs    int\n\tGPUVRAM arvados.ByteSize\n\n\t// One of the instance's remaining VCPUs has been allocated to\n\t// one or more 0-VCPU containers.\n\tsharedVCPUUsed bool\n}\n\n// Check whether an instance's remaining resources r are enough to\n// accommodate container r2.\n//\n// If an instance is running any 0-VCPU containers, only r.VCPUs-1 are\n// available for containers that request 1 or more VCPUs.\nfunc (r InstanceResources) Accommodates(r2 InstanceResources) bool {\n\treturn r.VCPUs >= r2.VCPUs &&\n\t\tr.RAM >= r2.RAM &&\n\t\tr.Scratch >= r2.Scratch &&\n\t\tr.GPUs >= r2.GPUs &&\n\t\tr.GPUVRAM >= r2.GPUVRAM &&\n\t\tr.VCPUs > 0 &&\n\t\t(!r.sharedVCPUUsed || r.VCPUs > r2.VCPUs)\n}\n\n// Subtract r2 from the resources r remaining on an instance.\nfunc (r InstanceResources) Sub(r2 InstanceResources) InstanceResources {\n\tr.VCPUs -= r2.VCPUs\n\tr.RAM -= r2.RAM\n\tr.Scratch -= r2.Scratch\n\tr.GPUs -= r2.GPUs\n\tr.GPUVRAM -= r2.GPUVRAM\n\tif r2.VCPUs == 0 {\n\t\tr.sharedVCPUUsed = true\n\t}\n\treturn r\n}\n\nvar pdhRegexp = regexp.MustCompile(`^[0-9a-f]{32}\\+(\\d+)$`)\n\n// estimateDockerImageSize estimates how much disk space will be used\n// by a Docker image, given the PDH of a collection containing a\n// Docker image that was created by \"arv-keepdocker\".  Returns\n// estimated number of bytes of disk space that should be reserved.\nfunc estimateDockerImageSize(collectionPDH string) int64 {\n\tm := pdhRegexp.FindStringSubmatch(collectionPDH)\n\tif m == nil {\n\t\treturn 0\n\t}\n\tn, err := strconv.ParseInt(m[1], 10, 64)\n\tif err != nil || n < 122 {\n\t\treturn 0\n\t}\n\t// To avoid having to fetch the collection, take advantage of\n\t// the fact that the manifest storing a container image\n\t// uploaded by arv-keepdocker has a predictable format, which\n\t// allows us to estimate the size of the image based on just\n\t// the size of the manifest.\n\t//\n\t// Use the following heuristic:\n\t// - Start with the length of the manifest (n)\n\t// - Subtract 80 characters for the filename and file segment\n\t// - Divide by 42 to get the number of block identifiers ('hash\\+size\\ ' is 32+1+8+1)\n\t// - Assume each block is full, multiply by 64 MiB\n\treturn ((n - 80) / 42) * (64 * 1024 * 1024)\n}\n\n// EstimateScratchSpace estimates how much available disk space (in\n// bytes) is needed to run the container by summing the capacity\n// requested by 'tmp' mounts plus disk space required to load the\n// Docker image plus arv-mount block cache.\nfunc EstimateScratchSpace(ctr *arvados.Container) (needScratch int64) {\n\tfor _, m := range ctr.Mounts {\n\t\tif m.Kind == \"tmp\" {\n\t\t\tneedScratch += m.Capacity\n\t\t}\n\t}\n\n\t// Account for disk space usage by Docker, assumes the following behavior:\n\t// - Layer tarballs are buffered to disk during \"docker load\".\n\t// - Individual layer tarballs are extracted from buffered\n\t// copy to the filesystem\n\tdockerImageSize := estimateDockerImageSize(ctr.ContainerImage)\n\n\t// The buffer is only needed during image load, so make sure\n\t// the baseline scratch space at least covers dockerImageSize,\n\t// and assume it will be released to the job afterwards.\n\tif needScratch < dockerImageSize {\n\t\tneedScratch = dockerImageSize\n\t}\n\n\t// Now reserve space for the extracted image on disk.\n\tneedScratch += dockerImageSize\n\n\t// Now reserve space the arv-mount disk cache\n\tneedScratch += ctr.RuntimeConstraints.KeepCacheDisk\n\n\treturn\n}\n\nfunc InstanceResourcesNeeded(cc *arvados.Cluster, ctr *arvados.Container) InstanceResources {\n\tneedRAM := ctr.RuntimeConstraints.RAM + ctr.RuntimeConstraints.KeepCacheRAM\n\tneedRAM += int64(cc.Containers.ReserveExtraRAM)\n\tif cc.Containers.LocalKeepBlobBuffersPerVCPU > 0 {\n\t\t// + 200 MiB for keepstore process + 10% for GOGC=10\n\t\tneedRAM += 220 << 20\n\t\t// + 64 MiB for each blob buffer + 10% for GOGC=10\n\t\tneedRAM += int64(cc.Containers.LocalKeepBlobBuffersPerVCPU * ctr.RuntimeConstraints.VCPUs * (1 << 26) * 11 / 10)\n\t}\n\tneedRAM = (needRAM * 100) / int64(100-discountConfiguredRAMPercent)\n\n\treturn InstanceResources{\n\t\tVCPUs:   ctr.RuntimeConstraints.VCPUs,\n\t\tRAM:     arvados.ByteSize(needRAM),\n\t\tScratch: arvados.ByteSize(EstimateScratchSpace(ctr)),\n\t\tGPUs:    ctr.RuntimeConstraints.GPU.DeviceCount,\n\t\tGPUVRAM: arvados.ByteSize(ctr.RuntimeConstraints.GPU.VRAM),\n\t}\n}\n\n// compareVersion returns true if vs1 < vs2, otherwise false\nfunc versionLess(vs1 string, vs2 string) (bool, error) {\n\tv1, err := strconv.ParseFloat(vs1, 64)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tv2, err := strconv.ParseFloat(vs2, 64)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn v1 < v2, nil\n}\n\n// ChooseInstanceType returns the arvados.InstanceTypes eligible to\n// run ctr, i.e., those that have enough RAM, VCPUs, etc., and are not\n// too expensive according to cluster configuration.\n//\n// The returned types are sorted with lower prices first.\n//\n// The error is non-nil if and only if the returned slice is empty.\nfunc ChooseInstanceType(cc *arvados.Cluster, ctr *arvados.Container) ([]arvados.InstanceType, error) {\n\tif len(cc.InstanceTypes) == 0 {\n\t\treturn nil, ErrInstanceTypesNotConfigured\n\t}\n\tneed := InstanceResourcesNeeded(cc, ctr)\n\tmaxPriceFactor := math.Max(cc.Containers.MaximumPriceFactor, 1)\n\tvar types []arvados.InstanceType\n\tvar maxPrice float64\n\tfor _, it := range cc.InstanceTypes {\n\t\tdriverInsuff, driverErr := versionLess(it.GPU.DriverVersion, ctr.RuntimeConstraints.GPU.DriverVersion)\n\n\t\tvar capabilityInsuff bool\n\t\tvar capabilityErr error\n\t\tswitch ctr.RuntimeConstraints.GPU.Stack {\n\t\tcase \"\":\n\t\tcase \"cuda\":\n\t\t\tif len(ctr.RuntimeConstraints.GPU.HardwareTarget) > 1 {\n\t\t\t\t// Check if the node's capability\n\t\t\t\t// exactly matches any of the\n\t\t\t\t// requested capability. For CUDA,\n\t\t\t\t// this is the hardware capability in\n\t\t\t\t// X.Y format.\n\t\t\t\tcapabilityInsuff = !slices.Contains(ctr.RuntimeConstraints.GPU.HardwareTarget, it.GPU.HardwareTarget)\n\t\t\t} else if len(ctr.RuntimeConstraints.GPU.HardwareTarget) == 1 {\n\t\t\t\t// version compare.\n\t\t\t\tcapabilityInsuff, capabilityErr = versionLess(it.GPU.HardwareTarget, ctr.RuntimeConstraints.GPU.HardwareTarget[0])\n\t\t\t} else {\n\t\t\t\tcapabilityInsuff = true\n\t\t\t}\n\t\tcase \"rocm\":\n\t\t\t// Check if the node's hardware matches any of\n\t\t\t// the requested hardware.  For rocm, this is\n\t\t\t// a gfxXXXX LLVM target.\n\t\t\tcapabilityInsuff = !slices.Contains(ctr.RuntimeConstraints.GPU.HardwareTarget, it.GPU.HardwareTarget)\n\t\tdefault:\n\t\t\treturn nil, ConstraintsNotSatisfiableError{\n\t\t\t\terror: fmt.Errorf(\"Invalid GPU stack %q, expected to be blank or one of 'cuda' or 'rocm'\", ctr.RuntimeConstraints.GPU.Stack),\n\t\t\t}\n\t\t}\n\n\t\tswitch {\n\t\t// reasons to reject a node\n\t\tcase maxPrice > 0 && it.Price > maxPrice: // too expensive\n\t\tcase it.Scratch < need.Scratch: // insufficient scratch\n\t\tcase it.RAM < need.RAM: // insufficient RAM\n\t\tcase it.VCPUs < need.VCPUs: // insufficient VCPUs\n\t\tcase it.Preemptible != ctr.SchedulingParameters.Preemptible: // wrong preemptable setting\n\t\tcase it.GPU.Stack != ctr.RuntimeConstraints.GPU.Stack: // incompatible GPU software stack (or none available)\n\t\tcase it.GPU.DeviceCount < ctr.RuntimeConstraints.GPU.DeviceCount: // insufficient GPU devices\n\t\tcase int64(it.GPU.VRAM) < ctr.RuntimeConstraints.GPU.VRAM: // insufficient VRAM per GPU\n\t\tcase ctr.RuntimeConstraints.GPU.DeviceCount > 0 && (driverInsuff || driverErr != nil): // insufficient driver version\n\t\tcase ctr.RuntimeConstraints.GPU.DeviceCount > 0 && (capabilityInsuff || capabilityErr != nil): // insufficient hardware capability\n\t\t\t// Don't select this node\n\t\tdefault:\n\t\t\t// Didn't reject the node, so select it\n\t\t\ttypes = append(types, it)\n\t\t\tif newmax := it.Price * maxPriceFactor; newmax < maxPrice || maxPrice == 0 {\n\t\t\t\tmaxPrice = newmax\n\t\t\t}\n\t\t}\n\t}\n\tif len(types) == 0 {\n\t\tavailableTypes := make([]arvados.InstanceType, 0, len(cc.InstanceTypes))\n\t\tfor _, t := range cc.InstanceTypes {\n\t\t\tavailableTypes = append(availableTypes, t)\n\t\t}\n\t\tsort.Slice(availableTypes, func(a, b int) bool {\n\t\t\treturn availableTypes[a].Price < availableTypes[b].Price\n\t\t})\n\t\treturn nil, ConstraintsNotSatisfiableError{\n\t\t\terror:          errors.New(\"constraints not satisfiable by any configured instance type\"),\n\t\t\tAvailableTypes: availableTypes,\n\t\t}\n\t}\n\tsort.Slice(types, func(i, j int) bool {\n\t\tif types[i].Price != types[j].Price {\n\t\t\t// prefer lower price\n\t\t\treturn types[i].Price < types[j].Price\n\t\t}\n\t\tif types[i].RAM != types[j].RAM {\n\t\t\t// if same price, prefer more RAM\n\t\t\treturn types[i].RAM > types[j].RAM\n\t\t}\n\t\tif types[i].VCPUs != types[j].VCPUs {\n\t\t\t// if same price and RAM, prefer more VCPUs\n\t\t\treturn types[i].VCPUs > types[j].VCPUs\n\t\t}\n\t\tif types[i].Scratch != types[j].Scratch {\n\t\t\t// if same price and RAM and VCPUs, prefer more scratch\n\t\t\treturn types[i].Scratch > types[j].Scratch\n\t\t}\n\t\t// no preference, just sort the same way each time\n\t\treturn types[i].Name < types[j].Name\n\t})\n\t// Truncate types at maxPrice. We rejected it.Price>maxPrice\n\t// in the loop above, but at that point maxPrice wasn't\n\t// necessarily the final (lowest) maxPrice.\n\tfor i, it := range types {\n\t\tif i > 0 && it.Price > maxPrice {\n\t\t\ttypes = types[:i]\n\t\t\tbreak\n\t\t}\n\t}\n\treturn types, nil\n}\n"
  },
  {
    "path": "lib/dispatchcloud/container/node_size_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage container\n\nimport (\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&NodeSizeSuite{})\n\nconst GiB = arvados.ByteSize(1 << 30)\n\ntype NodeSizeSuite struct{}\n\nfunc (*NodeSizeSuite) TestChooseNotConfigured(c *check.C) {\n\t_, err := ChooseInstanceType(&arvados.Cluster{}, &arvados.Container{\n\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\tRAM:   1234567890,\n\t\t\tVCPUs: 2,\n\t\t},\n\t})\n\tc.Check(err, check.Equals, ErrInstanceTypesNotConfigured)\n}\n\nfunc (*NodeSizeSuite) TestChooseUnsatisfiable(c *check.C) {\n\tcheckUnsatisfiable := func(ctr *arvados.Container) {\n\t\t_, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: map[string]arvados.InstanceType{\n\t\t\t\"small1\": {Price: 1.1, RAM: 1000000000, VCPUs: 2, Name: \"small1\"},\n\t\t\t\"small2\": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Name: \"small2\"},\n\t\t\t\"small4\": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Name: \"small4\", Scratch: GiB},\n\t\t}}, ctr)\n\t\tc.Check(err, check.FitsTypeOf, ConstraintsNotSatisfiableError{})\n\t}\n\n\tfor _, rc := range []arvados.RuntimeConstraints{\n\t\t{RAM: 9876543210, VCPUs: 2},\n\t\t{RAM: 1234567890, VCPUs: 20},\n\t\t{RAM: 1234567890, VCPUs: 2, KeepCacheRAM: 9876543210},\n\t} {\n\t\tcheckUnsatisfiable(&arvados.Container{RuntimeConstraints: rc})\n\t}\n\tcheckUnsatisfiable(&arvados.Container{\n\t\tMounts:             map[string]arvados.Mount{\"/tmp\": {Kind: \"tmp\", Capacity: int64(2 * GiB)}},\n\t\tRuntimeConstraints: arvados.RuntimeConstraints{RAM: 12345, VCPUs: 1},\n\t})\n}\n\nfunc (*NodeSizeSuite) TestChoose(c *check.C) {\n\tfor _, menu := range []map[string]arvados.InstanceType{\n\t\t{\n\t\t\t\"costly\": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: \"costly\"},\n\t\t\t\"best\":   {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"best\"},\n\t\t\t\"small\":  {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: \"small\"},\n\t\t},\n\t\t{\n\t\t\t\"costly\":     {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: \"costly\"},\n\t\t\t\"goodenough\": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"goodenough\"},\n\t\t\t\"best\":       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"best\"},\n\t\t\t\"small\":      {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: \"small\"},\n\t\t},\n\t\t{\n\t\t\t\"small\":      {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Name: \"small\"},\n\t\t\t\"goodenough\": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"goodenough\"},\n\t\t\t\"best\":       {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"best\"},\n\t\t\t\"costly\":     {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: \"costly\"},\n\t\t},\n\t\t{\n\t\t\t\"small\":  {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: GiB, Name: \"small\"},\n\t\t\t\"nearly\": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: GiB, Name: \"nearly\"},\n\t\t\t\"best\":   {Price: 3.3, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"best\"},\n\t\t\t\"costly\": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: \"costly\"},\n\t\t},\n\t\t{\n\t\t\t\"small\":  {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: GiB, Name: \"small\"},\n\t\t\t\"nearly\": {Price: 2.2, RAM: 1200000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"nearly\"},\n\t\t\t\"best\":   {Price: 3.3, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"best\"},\n\t\t\t\"costly\": {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Name: \"costly\"},\n\t\t},\n\t} {\n\t\tbest, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu, Containers: arvados.ContainersConfig{\n\t\t\tLocalKeepBlobBuffersPerVCPU: 1,\n\t\t\tReserveExtraRAM:             268435456,\n\t\t}}, &arvados.Container{\n\t\t\tMounts: map[string]arvados.Mount{\n\t\t\t\t\"/tmp\": {Kind: \"tmp\", Capacity: 2 * int64(GiB)},\n\t\t\t},\n\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\tVCPUs:        2,\n\t\t\t\tRAM:          987654321,\n\t\t\t\tKeepCacheRAM: 123456789,\n\t\t\t},\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Assert(best, check.Not(check.HasLen), 0)\n\t\tc.Check(best[0].Name, check.Equals, \"best\")\n\t\tc.Check(best[0].RAM >= 1234567890, check.Equals, true)\n\t\tc.Check(best[0].VCPUs >= 2, check.Equals, true)\n\t\tc.Check(best[0].Scratch >= 2*GiB, check.Equals, true)\n\t\tfor i := range best {\n\t\t\t// If multiple instance types are returned\n\t\t\t// then they should all have the same price,\n\t\t\t// because we didn't set MaximumPriceFactor>1.\n\t\t\tc.Check(best[i].Price, check.Equals, best[0].Price)\n\t\t}\n\t}\n}\n\nfunc (*NodeSizeSuite) TestMaximumPriceFactor(c *check.C) {\n\tmenu := map[string]arvados.InstanceType{\n\t\t\"best+7\":  {Price: 3.4, RAM: 8000000000, VCPUs: 8, Scratch: 64 * GiB, Name: \"best+7\"},\n\t\t\"best+5\":  {Price: 3.0, RAM: 8000000000, VCPUs: 8, Scratch: 16 * GiB, Name: \"best+5\"},\n\t\t\"best+3\":  {Price: 2.6, RAM: 4000000000, VCPUs: 8, Scratch: 16 * GiB, Name: \"best+3\"},\n\t\t\"best+2\":  {Price: 2.4, RAM: 4000000000, VCPUs: 8, Scratch: 4 * GiB, Name: \"best+2\"},\n\t\t\"best+1\":  {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 4 * GiB, Name: \"best+1\"},\n\t\t\"best\":    {Price: 2.0, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"best\"},\n\t\t\"small+1\": {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 16 * GiB, Name: \"small+1\"},\n\t\t\"small\":   {Price: 1.0, RAM: 2000000000, VCPUs: 2, Scratch: 1 * GiB, Name: \"small\"},\n\t}\n\tbest, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu, Containers: arvados.ContainersConfig{\n\t\tMaximumPriceFactor: 1.5,\n\t}}, &arvados.Container{\n\t\tMounts: map[string]arvados.Mount{\n\t\t\t\"/tmp\": {Kind: \"tmp\", Capacity: 2 * int64(GiB)},\n\t\t},\n\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\tVCPUs:        2,\n\t\t\tRAM:          987654321,\n\t\t\tKeepCacheRAM: 123456789,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\tc.Assert(best, check.HasLen, 5)\n\tc.Check(best[0].Name, check.Equals, \"best\") // best price is $2\n\tc.Check(best[1].Name, check.Equals, \"best+1\")\n\tc.Check(best[2].Name, check.Equals, \"best+2\")\n\tc.Check(best[3].Name, check.Equals, \"best+3\")\n\tc.Check(best[4].Name, check.Equals, \"best+5\") // max price is $2 * 1.5 = $3\n}\n\nfunc (*NodeSizeSuite) TestChooseWithBlobBuffersOverhead(c *check.C) {\n\tmenu := map[string]arvados.InstanceType{\n\t\t\"nearly\": {Price: 2.2, RAM: 4000000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"small\"},\n\t\t\"best\":   {Price: 3.3, RAM: 8000000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"best\"},\n\t\t\"costly\": {Price: 4.4, RAM: 12000000000, VCPUs: 8, Scratch: 2 * GiB, Name: \"costly\"},\n\t}\n\tbest, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu, Containers: arvados.ContainersConfig{\n\t\tLocalKeepBlobBuffersPerVCPU: 16, // 1 GiB per vcpu => 2 GiB\n\t\tReserveExtraRAM:             268435456,\n\t}}, &arvados.Container{\n\t\tMounts: map[string]arvados.Mount{\n\t\t\t\"/tmp\": {Kind: \"tmp\", Capacity: 2 * int64(GiB)},\n\t\t},\n\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\tVCPUs:        2,\n\t\t\tRAM:          987654321,\n\t\t\tKeepCacheRAM: 123456789,\n\t\t},\n\t})\n\tc.Check(err, check.IsNil)\n\tc.Assert(best, check.HasLen, 1)\n\tc.Check(best[0].Name, check.Equals, \"best\")\n}\n\nfunc (*NodeSizeSuite) TestChoosePreemptible(c *check.C) {\n\tmenu := map[string]arvados.InstanceType{\n\t\t\"costly\":      {Price: 4.4, RAM: 4000000000, VCPUs: 8, Scratch: 2 * GiB, Preemptible: true, Name: \"costly\"},\n\t\t\"almost best\": {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Name: \"almost best\"},\n\t\t\"best\":        {Price: 2.2, RAM: 2000000000, VCPUs: 4, Scratch: 2 * GiB, Preemptible: true, Name: \"best\"},\n\t\t\"small\":       {Price: 1.1, RAM: 1000000000, VCPUs: 2, Scratch: 2 * GiB, Preemptible: true, Name: \"small\"},\n\t}\n\tbest, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu}, &arvados.Container{\n\t\tMounts: map[string]arvados.Mount{\n\t\t\t\"/tmp\": {Kind: \"tmp\", Capacity: 2 * int64(GiB)},\n\t\t},\n\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\tVCPUs:        2,\n\t\t\tRAM:          987654321,\n\t\t\tKeepCacheRAM: 123456789,\n\t\t},\n\t\tSchedulingParameters: arvados.SchedulingParameters{\n\t\t\tPreemptible: true,\n\t\t},\n\t})\n\tc.Check(err, check.IsNil)\n\tc.Assert(best, check.HasLen, 1)\n\tc.Check(best[0].Name, check.Equals, \"best\")\n\tc.Check(best[0].RAM >= 1234567890, check.Equals, true)\n\tc.Check(best[0].VCPUs >= 2, check.Equals, true)\n\tc.Check(best[0].Scratch >= 2*GiB, check.Equals, true)\n\tc.Check(best[0].Preemptible, check.Equals, true)\n}\n\nfunc (*NodeSizeSuite) TestScratchForDockerImage(c *check.C) {\n\tn := EstimateScratchSpace(&arvados.Container{\n\t\tContainerImage: \"d5025c0f29f6eef304a7358afa82a822+342\",\n\t})\n\t// Actual image is 371.1 MiB (according to workbench)\n\t// Estimated size is 384 MiB (402653184 bytes)\n\t// Want to reserve 2x the estimated size, so 805306368 bytes\n\tc.Check(n, check.Equals, int64(805306368))\n\n\tn = EstimateScratchSpace(&arvados.Container{\n\t\tContainerImage: \"d5025c0f29f6eef304a7358afa82a822+-342\",\n\t})\n\t// Parse error will return 0\n\tc.Check(n, check.Equals, int64(0))\n\n\tn = EstimateScratchSpace(&arvados.Container{\n\t\tContainerImage: \"d5025c0f29f6eef304a7358afa82a822+34\",\n\t})\n\t// Short manifest will return 0\n\tc.Check(n, check.Equals, int64(0))\n}\n\nfunc (*NodeSizeSuite) TestChooseGPU(c *check.C) {\n\tmenu := map[string]arvados.InstanceType{\n\t\t\"costly\": {Price: 4.4, RAM: 4 * GiB, VCPUs: 8, Scratch: 2 * GiB, Name: \"costly\",\n\t\t\tGPU: arvados.GPUFeatures{Stack: \"cuda\", DeviceCount: 2, HardwareTarget: \"9.0\", DriverVersion: \"11.0\", VRAM: 2 * GiB}},\n\n\t\t\"low_capability\": {Price: 2.1, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: \"low_capability\",\n\t\t\tGPU: arvados.GPUFeatures{Stack: \"cuda\", DeviceCount: 1, HardwareTarget: \"8.0\", DriverVersion: \"11.0\", VRAM: 2 * GiB}},\n\n\t\t\"best\": {Price: 2.2, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: \"best\",\n\t\t\tGPU: arvados.GPUFeatures{Stack: \"cuda\", DeviceCount: 1, HardwareTarget: \"9.0\", DriverVersion: \"11.0\", VRAM: 2 * GiB}},\n\n\t\t\"low_driver\": {Price: 2.1, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: \"low_driver\",\n\t\t\tGPU: arvados.GPUFeatures{Stack: \"cuda\", DeviceCount: 1, HardwareTarget: \"9.0\", DriverVersion: \"10.0\", VRAM: 2 * GiB}},\n\n\t\t\"cheap_gpu\": {Price: 2.0, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: \"cheap_gpu\",\n\t\t\tGPU: arvados.GPUFeatures{Stack: \"cuda\", DeviceCount: 1, HardwareTarget: \"8.0\", DriverVersion: \"10.0\", VRAM: 2 * GiB}},\n\n\t\t\"more_vram\": {Price: 2.3, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: \"more_vram\",\n\t\t\tGPU: arvados.GPUFeatures{Stack: \"cuda\", DeviceCount: 1, HardwareTarget: \"8.0\", DriverVersion: \"10.0\", VRAM: 8 * GiB}},\n\n\t\t\"invalid_gpu\": {Price: 1.9, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: \"invalid_gpu\",\n\t\t\tGPU: arvados.GPUFeatures{Stack: \"cuda\", DeviceCount: 1, HardwareTarget: \"12.0.12\", DriverVersion: \"12.0.12\", VRAM: 2 * GiB}},\n\n\t\t\"gpu_rocm\": {Price: 2.0, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: \"gpu_rocm\",\n\t\t\tGPU: arvados.GPUFeatures{Stack: \"rocm\", DeviceCount: 1, HardwareTarget: \"gfx1100\", DriverVersion: \"6.2\", VRAM: 20 * GiB}},\n\n\t\t\"cheap_gpu_rocm\": {Price: 1.9, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: \"cheap_gpu_rocm\",\n\t\t\tGPU: arvados.GPUFeatures{Stack: \"rocm\", DeviceCount: 1, HardwareTarget: \"gfx1103\", DriverVersion: \"6.2\", VRAM: 8 * GiB}},\n\n\t\t// Unspecified VRAM was supported in Arvados 3.1 for backwards\n\t\t// compatibility with old \"CUDA\" configuration. Arvados 3.3 dropped\n\t\t// support for \"CUDA\" configuration completely, so this is no longer\n\t\t// treated specially. See the test case noted below.\n\t\t\"unspecified_vram\": {Price: 2.0, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: \"unspecified_vram\",\n\t\t\tGPU: arvados.GPUFeatures{Stack: \"rocm\", DeviceCount: 1, HardwareTarget: \"gfx1104\", DriverVersion: \"6.2\", VRAM: 0}},\n\n\t\t\"non_gpu\": {Price: 1.1, RAM: 2 * GiB, VCPUs: 4, Scratch: 2 * GiB, Name: \"non_gpu\"},\n\t}\n\n\ttype GPUTestCase struct {\n\t\tGPU              arvados.GPURuntimeConstraints\n\t\tSelectedInstance string\n\t}\n\tcases := []GPUTestCase{\n\t\tGPUTestCase{\n\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\tStack:          \"cuda\",\n\t\t\t\tDeviceCount:    1,\n\t\t\t\tHardwareTarget: []string{\"9.0\"},\n\t\t\t\tDriverVersion:  \"11.0\",\n\t\t\t\tVRAM:           2000000000,\n\t\t\t},\n\t\t\tSelectedInstance: \"best\",\n\t\t},\n\t\tGPUTestCase{\n\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\tStack:          \"cuda\",\n\t\t\t\tDeviceCount:    2,\n\t\t\t\tHardwareTarget: []string{\"9.0\"},\n\t\t\t\tDriverVersion:  \"11.0\",\n\t\t\t\tVRAM:           2000000000,\n\t\t\t},\n\t\t\tSelectedInstance: \"costly\",\n\t\t},\n\t\tGPUTestCase{\n\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\tStack:          \"cuda\",\n\t\t\t\tDeviceCount:    1,\n\t\t\t\tHardwareTarget: []string{\"8.0\"},\n\t\t\t\tDriverVersion:  \"11.0\",\n\t\t\t\tVRAM:           2000000000,\n\t\t\t},\n\t\t\tSelectedInstance: \"low_capability\",\n\t\t},\n\t\tGPUTestCase{\n\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\tStack:          \"cuda\",\n\t\t\t\tDeviceCount:    1,\n\t\t\t\tHardwareTarget: []string{\"9.0\"},\n\t\t\t\tDriverVersion:  \"10.0\",\n\t\t\t\tVRAM:           2000000000,\n\t\t\t},\n\t\t\tSelectedInstance: \"low_driver\",\n\t\t},\n\t\tGPUTestCase{\n\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\tStack:          \"cuda\",\n\t\t\t\tDeviceCount:    1,\n\t\t\t\tHardwareTarget: []string{\"8.0\"},\n\t\t\t\tDriverVersion:  \"11.0\",\n\t\t\t\tVRAM:           8000000000,\n\t\t\t},\n\t\t\tSelectedInstance: \"more_vram\",\n\t\t},\n\t\tGPUTestCase{\n\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\tStack:          \"cuda\",\n\t\t\t\tDeviceCount:    1,\n\t\t\t\tHardwareTarget: []string{},\n\t\t\t\tDriverVersion:  \"10.0\",\n\t\t\t\tVRAM:           2000000000,\n\t\t\t},\n\t\t\tSelectedInstance: \"\",\n\t\t},\n\t\tGPUTestCase{\n\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\tStack:          \"rocm\",\n\t\t\t\tDeviceCount:    1,\n\t\t\t\tHardwareTarget: []string{\"gfx1100\"},\n\t\t\t\tDriverVersion:  \"6.2\",\n\t\t\t\tVRAM:           2000000000,\n\t\t\t},\n\t\t\tSelectedInstance: \"gpu_rocm\",\n\t\t},\n\t\tGPUTestCase{\n\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\tStack:          \"rocm\",\n\t\t\t\tDeviceCount:    1,\n\t\t\t\tHardwareTarget: []string{\"gfx1100\", \"gfx1103\"},\n\t\t\t\tDriverVersion:  \"6.2\",\n\t\t\t\tVRAM:           2000000000,\n\t\t\t},\n\t\t\tSelectedInstance: \"cheap_gpu_rocm\",\n\t\t},\n\t\tGPUTestCase{\n\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\tStack:          \"rocm\",\n\t\t\t\tDeviceCount:    1,\n\t\t\t\tHardwareTarget: []string{\"gfx1104\"},\n\t\t\t\tDriverVersion:  \"6.2\",\n\t\t\t\tVRAM:           2000000000,\n\t\t\t},\n\t\t\t// This returned \"unspecified_vram\" from Arvados 3.1 until 3.3.\n\t\t\t// Now we check there is no suitable instance type.\n\t\t\tSelectedInstance: \"\",\n\t\t},\n\t\tGPUTestCase{\n\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\tStack:          \"\",\n\t\t\t\tDeviceCount:    0,\n\t\t\t\tHardwareTarget: []string{\"\"},\n\t\t\t\tDriverVersion:  \"\",\n\t\t\t\tVRAM:           0,\n\t\t\t},\n\t\t\tSelectedInstance: \"non_gpu\",\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tbest, err := ChooseInstanceType(&arvados.Cluster{InstanceTypes: menu}, &arvados.Container{\n\t\t\tMounts: map[string]arvados.Mount{\n\t\t\t\t\"/tmp\": {Kind: \"tmp\", Capacity: 2 * int64(GiB)},\n\t\t\t},\n\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\tVCPUs:        2,\n\t\t\t\tRAM:          987654321,\n\t\t\t\tKeepCacheRAM: 123456789,\n\t\t\t\tGPU:          tc.GPU,\n\t\t\t},\n\t\t})\n\t\tif len(best) > 0 {\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tc.Assert(best, check.HasLen, 1)\n\t\t\tc.Check(best[0].Name, check.Equals, tc.SelectedInstance)\n\t\t} else {\n\t\t\tc.Check(err, check.Not(check.IsNil))\n\t\t}\n\t}\n}\n\nfunc (*NodeSizeSuite) TestInstanceResources(c *check.C) {\n\tc.Check(InstanceResources{VCPUs: 2}.Sub(InstanceResources{VCPUs: 0}).sharedVCPUUsed, check.Equals, true)\n\tc.Check(InstanceResources{VCPUs: 2}.Sub(InstanceResources{VCPUs: 1}).sharedVCPUUsed, check.Equals, false)\n\tc.Check(InstanceResources{VCPUs: 2, sharedVCPUUsed: true}.Sub(InstanceResources{VCPUs: 0}).sharedVCPUUsed, check.Equals, true)\n\tc.Check(InstanceResources{VCPUs: 2, sharedVCPUUsed: true}.Sub(InstanceResources{VCPUs: 1}).sharedVCPUUsed, check.Equals, true)\n\tc.Check(InstanceResources{VCPUs: 2}.Accommodates(InstanceResources{VCPUs: 2}), check.Equals, true)\n\t// once sharedVCPUUsed is set, r.Accommodates(r2)==false when\n\t// r.VCPUs==r2.VCPUs.\n\tc.Check(InstanceResources{VCPUs: 2, sharedVCPUUsed: true}.Accommodates(InstanceResources{VCPUs: 2}), check.Equals, false)\n\tc.Check(InstanceResources{VCPUs: 2, sharedVCPUUsed: true}.Accommodates(InstanceResources{VCPUs: 1}), check.Equals, true)\n\tc.Check(InstanceResources{VCPUs: 2, sharedVCPUUsed: true}.Accommodates(InstanceResources{VCPUs: 0}), check.Equals, true)\n\tc.Check(InstanceResources{VCPUs: 1, sharedVCPUUsed: true}.Accommodates(InstanceResources{VCPUs: 1}), check.Equals, false)\n\tc.Check(InstanceResources{VCPUs: 1, sharedVCPUUsed: true}.Accommodates(InstanceResources{VCPUs: 0}), check.Equals, true)\n\t// once VCPUs is 0, r.Accommodates(...) returns false.\n\tc.Check(InstanceResources{VCPUs: 0, sharedVCPUUsed: true}.Accommodates(InstanceResources{VCPUs: 0}), check.Equals, false)\n\tc.Check(InstanceResources{VCPUs: 0}.Accommodates(InstanceResources{VCPUs: 0}), check.Equals, false)\n}\n"
  },
  {
    "path": "lib/dispatchcloud/container/queue.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage container\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// Stop fetching queued containers after this many of the highest\n// priority non-supervisor containers. Reduces API load when queue is\n// long. This also limits how quickly a large batch of queued\n// containers can be started, which improves reliability under high\n// load at the cost of increased under light load.\nconst queuedContainersTarget = 100\n\n// An APIClient performs Arvados API requests. It is typically an\n// *arvados.Client.\ntype APIClient interface {\n\tRequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error\n}\n\n// A QueueEnt is an entry in the queue, consisting of a container\n// record and the instance type that should be used to run it.\ntype QueueEnt struct {\n\t// The container to run. Only the UUID, State, Priority,\n\t// RuntimeConstraints, ContainerImage, SchedulingParameters,\n\t// and CreatedAt fields are populated.\n\tContainer         arvados.Container      `json:\"container\"`\n\tInstanceResources InstanceResources      `json:\"instance_resources\"`\n\tInstanceTypes     []arvados.InstanceType `json:\"instance_types\"`\n\tFirstSeenAt       time.Time              `json:\"first_seen_at\"`\n}\n\n// String implements fmt.Stringer by returning the queued container's\n// UUID.\nfunc (c *QueueEnt) String() string {\n\treturn c.Container.UUID\n}\n\n// A Queue is an interface to an Arvados cluster's container\n// database. It presents only the containers that are eligible to be\n// run by, are already being run by, or have recently been run by the\n// present dispatcher.\n//\n// The Entries, Get, and Forget methods do not block: they return\n// immediately, using cached data.\n//\n// The updating methods (Cancel, Lock, Unlock, Update) do block: they\n// return only after the operation has completed.\n//\n// A Queue's Update method should be called periodically to keep the\n// cache up to date.\ntype Queue struct {\n\tlogger  logrus.FieldLogger\n\tcluster *arvados.Cluster\n\tclient  APIClient\n\n\tauth    *arvados.APIClientAuthorization\n\tcurrent map[string]QueueEnt\n\tupdated time.Time\n\tmtx     sync.Mutex\n\n\t// Methods that modify the Queue (like Lock) add the affected\n\t// container UUIDs to dontupdate. When applying a batch of\n\t// updates received from the network, anything appearing in\n\t// dontupdate is skipped, in case the received update has\n\t// already been superseded by the locally initiated change.\n\t// When no network update is in progress, this protection is\n\t// not needed, and dontupdate is nil.\n\tdontupdate map[string]struct{}\n\n\t// active notification subscribers (see Subscribe)\n\tsubscribers map[<-chan struct{}]chan struct{}\n}\n\n// NewQueue returns a new Queue. When a new container appears in the\n// Arvados cluster's queue during Update, chooseType will be called to\n// assign an appropriate arvados.InstanceType for the queue entry.\nfunc NewQueue(logger logrus.FieldLogger, reg *prometheus.Registry, cluster *arvados.Cluster, client APIClient) *Queue {\n\tcq := &Queue{\n\t\tlogger:      logger,\n\t\tcluster:     cluster,\n\t\tclient:      client,\n\t\tcurrent:     map[string]QueueEnt{},\n\t\tsubscribers: map[<-chan struct{}]chan struct{}{},\n\t}\n\tif reg != nil {\n\t\tgo cq.runMetrics(reg)\n\t}\n\treturn cq\n}\n\n// Subscribe returns a channel that becomes ready to receive when an\n// entry in the Queue is updated.\n//\n//\tch := q.Subscribe()\n//\tdefer q.Unsubscribe(ch)\n//\tfor range ch {\n//\t\t// ...\n//\t}\nfunc (cq *Queue) Subscribe() <-chan struct{} {\n\tcq.mtx.Lock()\n\tdefer cq.mtx.Unlock()\n\tch := make(chan struct{}, 1)\n\tcq.subscribers[ch] = ch\n\treturn ch\n}\n\n// Unsubscribe stops sending updates to the given channel. See\n// Subscribe.\nfunc (cq *Queue) Unsubscribe(ch <-chan struct{}) {\n\tcq.mtx.Lock()\n\tdefer cq.mtx.Unlock()\n\tdelete(cq.subscribers, ch)\n}\n\n// Caller must have lock.\nfunc (cq *Queue) notify() {\n\tfor _, ch := range cq.subscribers {\n\t\tselect {\n\t\tcase ch <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n}\n\n// Forget drops the specified container from the cache. It should be\n// called on finalized containers to avoid leaking memory over\n// time. It is a no-op if the indicated container is not in a\n// finalized state.\nfunc (cq *Queue) Forget(uuid string) {\n\tcq.mtx.Lock()\n\tdefer cq.mtx.Unlock()\n\tctr := cq.current[uuid].Container\n\tif ctr.State == arvados.ContainerStateComplete || ctr.State == arvados.ContainerStateCancelled || (ctr.State == arvados.ContainerStateQueued && ctr.Priority == 0) {\n\t\tcq.delEnt(uuid, ctr.State)\n\t}\n}\n\n// Get returns the (partial) Container record for the specified\n// container. Like a map lookup, its second return value is false if\n// the specified container is not in the Queue.\nfunc (cq *Queue) Get(uuid string) (arvados.Container, bool) {\n\tcq.mtx.Lock()\n\tdefer cq.mtx.Unlock()\n\tctr, ok := cq.current[uuid]\n\tif !ok {\n\t\treturn arvados.Container{}, false\n\t}\n\treturn ctr.Container, true\n}\n\n// Entries returns all cache entries, keyed by container UUID.\n//\n// The returned threshold indicates the maximum age of any cached data\n// returned in the map. This makes it possible for a scheduler to\n// determine correctly the outcome of a remote process that updates\n// container state. It must first wait for the remote process to exit,\n// then wait for the Queue to start and finish its next Update --\n// i.e., it must wait until threshold > timeProcessExited.\nfunc (cq *Queue) Entries() (entries map[string]QueueEnt, threshold time.Time) {\n\tcq.mtx.Lock()\n\tdefer cq.mtx.Unlock()\n\tentries = make(map[string]QueueEnt, len(cq.current))\n\tfor uuid, ctr := range cq.current {\n\t\tentries[uuid] = ctr\n\t}\n\tthreshold = cq.updated\n\treturn\n}\n\n// Update refreshes the cache from the Arvados API. It adds newly\n// queued containers, and updates the state of previously queued\n// containers.\nfunc (cq *Queue) Update() error {\n\tcq.mtx.Lock()\n\tcq.dontupdate = map[string]struct{}{}\n\tupdateStarted := time.Now()\n\tcq.mtx.Unlock()\n\n\tnext, err := cq.poll()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcq.mtx.Lock()\n\tdefer cq.mtx.Unlock()\n\tfor uuid, ctr := range next {\n\t\tif _, dontupdate := cq.dontupdate[uuid]; dontupdate {\n\t\t\t// Don't clobber a local update that happened\n\t\t\t// after we started polling.\n\t\t\tcontinue\n\t\t}\n\t\tif cur, ok := cq.current[uuid]; !ok {\n\t\t\tcq.addEnt(uuid, *ctr)\n\t\t} else {\n\t\t\tcur.Container = *ctr\n\t\t\tcq.current[uuid] = cur\n\t\t}\n\t}\n\tfor uuid, ent := range cq.current {\n\t\tif _, dontupdate := cq.dontupdate[uuid]; dontupdate {\n\t\t\t// Don't expunge an entry that was\n\t\t\t// added/updated locally after we started\n\t\t\t// polling.\n\t\t\tcontinue\n\t\t} else if _, stillpresent := next[uuid]; !stillpresent {\n\t\t\t// Expunge an entry that no longer appears in\n\t\t\t// the poll response (evidently it's\n\t\t\t// cancelled, completed, deleted, or taken by\n\t\t\t// a different dispatcher).\n\t\t\tcq.delEnt(uuid, ent.Container.State)\n\t\t}\n\t}\n\tcq.dontupdate = nil\n\tcq.updated = updateStarted\n\tcq.notify()\n\treturn nil\n}\n\n// Caller must have lock.\nfunc (cq *Queue) delEnt(uuid string, state arvados.ContainerState) {\n\tcq.logger.WithFields(logrus.Fields{\n\t\t\"ContainerUUID\": uuid,\n\t\t\"State\":         state,\n\t}).Info(\"dropping container from queue\")\n\tdelete(cq.current, uuid)\n}\n\n// Caller must have lock.\nfunc (cq *Queue) addEnt(uuid string, ctr arvados.Container) {\n\tlogger := cq.logger.WithField(\"ContainerUUID\", ctr.UUID)\n\t// We didn't ask for the Mounts field when polling\n\t// controller/RailsAPI, because it can be expensive on the\n\t// Rails side, and most of the time we already have it.  But\n\t// this is the first time we're seeing this container, so we\n\t// need to fetch mounts in order to choose an instance type.\n\terr := cq.client.RequestAndDecode(&ctr, \"GET\", \"arvados/v1/containers/\"+ctr.UUID, nil, arvados.GetOptions{\n\t\tSelect: []string{\"mounts\"},\n\t})\n\tif err != nil {\n\t\tlogger.WithError(err).Warn(\"error getting mounts\")\n\t\tif strings.Contains(err.Error(), \"json: cannot unmarshal\") {\n\t\t\t// see https://dev.arvados.org/issues/21314\n\t\t\tgo cq.cancelUnsatisfiableContainer(ctr, \"error getting mounts from container record: \"+err.Error())\n\t\t}\n\t\treturn\n\t}\n\tresources := InstanceResourcesNeeded(cq.cluster, &ctr)\n\ttypes, err := ChooseInstanceType(cq.cluster, &ctr)\n\n\t// Avoid wasting memory on a large Mounts attr (we don't need\n\t// it after choosing type).\n\tctr.Mounts = nil\n\n\tif err != nil && (ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked) {\n\t\t// We assume here that any chooseType error is a hard\n\t\t// error: it wouldn't help to try again, or to leave\n\t\t// it for a different dispatcher process to attempt.\n\t\tlogger.WithError(err).Warn(\"cancel container with no suitable instance type\")\n\t\tgo cq.cancelUnsatisfiableContainer(ctr, err.Error())\n\t\treturn\n\t}\n\ttypeNames := \"\"\n\tfor _, it := range types {\n\t\tif typeNames != \"\" {\n\t\t\ttypeNames += \", \"\n\t\t}\n\t\ttypeNames += it.Name\n\t}\n\tcq.logger.WithFields(logrus.Fields{\n\t\t\"ContainerUUID\": ctr.UUID,\n\t\t\"State\":         ctr.State,\n\t\t\"Priority\":      ctr.Priority,\n\t\t\"InstanceTypes\": typeNames,\n\t}).Info(\"adding container to queue\")\n\tcq.current[uuid] = QueueEnt{Container: ctr, InstanceResources: resources, InstanceTypes: types, FirstSeenAt: time.Now()}\n}\n\nfunc (cq *Queue) cancelUnsatisfiableContainer(ctr arvados.Container, errorString string) {\n\tlogger := cq.logger.WithField(\"ContainerUUID\", ctr.UUID)\n\tif ctr.State == arvados.ContainerStateQueued {\n\t\t// Can't set runtime error without locking first.\n\t\terr := cq.Lock(ctr.UUID)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Warn(\"lock failed\")\n\t\t\treturn\n\t\t\t// ...and try again on the next Update, if the\n\t\t\t// problem still exists.\n\t\t}\n\t}\n\tvar err error\n\tdefer func() {\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t\t// On failure, check current container state, and\n\t\t// don't log the error if the failure came from losing\n\t\t// a race.\n\t\tvar latest arvados.Container\n\t\tcq.client.RequestAndDecode(&latest, \"GET\", \"arvados/v1/containers/\"+ctr.UUID, nil, map[string][]string{\"select\": {\"state\"}})\n\t\tif latest.State == arvados.ContainerStateCancelled {\n\t\t\treturn\n\t\t}\n\t\tlogger.WithError(err).Warn(\"error while trying to cancel unsatisfiable container\")\n\t}()\n\terr = cq.setRuntimeError(ctr.UUID, errorString)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = cq.Cancel(ctr.UUID)\n\tif err != nil {\n\t\treturn\n\t}\n}\n\n// Lock acquires the dispatch lock for the given container.\nfunc (cq *Queue) Lock(uuid string) error {\n\treturn cq.apiUpdate(uuid, \"lock\")\n}\n\n// Unlock releases the dispatch lock for the given container.\nfunc (cq *Queue) Unlock(uuid string) error {\n\treturn cq.apiUpdate(uuid, \"unlock\")\n}\n\n// setRuntimeError sets runtime_status[\"error\"] to the given value.\n// Container should already have state==Locked or Running.\nfunc (cq *Queue) setRuntimeError(uuid, errorString string) error {\n\treturn cq.client.RequestAndDecode(nil, \"PUT\", \"arvados/v1/containers/\"+uuid, nil, map[string]map[string]map[string]interface{}{\n\t\t\"container\": {\n\t\t\t\"runtime_status\": {\n\t\t\t\t\"error\": errorString,\n\t\t\t},\n\t\t},\n\t})\n}\n\n// Cancel cancels the given container.\nfunc (cq *Queue) Cancel(uuid string) error {\n\tvar resp arvados.Container\n\terr := cq.client.RequestAndDecode(&resp, \"PUT\", \"arvados/v1/containers/\"+uuid, nil, map[string]map[string]interface{}{\n\t\t\"container\": {\"state\": arvados.ContainerStateCancelled},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tcq.updateWithResp(uuid, resp)\n\treturn nil\n}\n\nfunc (cq *Queue) apiUpdate(uuid, action string) error {\n\tvar resp arvados.Container\n\terr := cq.client.RequestAndDecode(&resp, \"POST\", \"arvados/v1/containers/\"+uuid+\"/\"+action, nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcq.updateWithResp(uuid, resp)\n\treturn nil\n}\n\n// Update the local queue with the response received from a\n// state-changing API request (lock/unlock/cancel).\nfunc (cq *Queue) updateWithResp(uuid string, resp arvados.Container) {\n\tcq.mtx.Lock()\n\tdefer cq.mtx.Unlock()\n\tif cq.dontupdate != nil {\n\t\tcq.dontupdate[uuid] = struct{}{}\n\t}\n\tent, ok := cq.current[uuid]\n\tif !ok {\n\t\t// Container is not in queue (e.g., it was not added\n\t\t// because there is no suitable instance type, and\n\t\t// we're just locking/updating it in order to set an\n\t\t// error message). No need to add it, and we don't\n\t\t// necessarily have enough information to add it here\n\t\t// anyway because lock/unlock responses don't include\n\t\t// runtime_constraints.\n\t\treturn\n\t}\n\tent.Container.State, ent.Container.Priority, ent.Container.LockedByUUID = resp.State, resp.Priority, resp.LockedByUUID\n\tcq.current[uuid] = ent\n\tcq.notify()\n}\n\nfunc (cq *Queue) poll() (map[string]*arvados.Container, error) {\n\tcq.mtx.Lock()\n\tsize := len(cq.current)\n\tauth := cq.auth\n\tcq.mtx.Unlock()\n\n\tif auth == nil {\n\t\tauth = &arvados.APIClientAuthorization{}\n\t\terr := cq.client.RequestAndDecode(auth, \"GET\", \"arvados/v1/api_client_authorizations/current\", nil, nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcq.mtx.Lock()\n\t\tcq.auth = auth\n\t\tcq.mtx.Unlock()\n\t}\n\n\tnext := make(map[string]*arvados.Container, size)\n\tapply := func(updates []arvados.Container) {\n\t\tfor _, upd := range updates {\n\t\t\tif next[upd.UUID] == nil {\n\t\t\t\tnext[upd.UUID] = &arvados.Container{}\n\t\t\t}\n\t\t\t*next[upd.UUID] = upd\n\t\t}\n\t}\n\tselectParam := []string{\"uuid\", \"state\", \"priority\", \"runtime_constraints\", \"container_image\", \"scheduling_parameters\", \"created_at\"}\n\tlimitParam := 1000\n\n\tmine, err := cq.fetchAll(arvados.ResourceListParams{\n\t\tSelect:  selectParam,\n\t\tOrder:   \"uuid\",\n\t\tLimit:   &limitParam,\n\t\tCount:   \"none\",\n\t\tFilters: []arvados.Filter{{\"locked_by_uuid\", \"=\", auth.UUID}},\n\t}, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tapply(mine)\n\n\tavail, err := cq.fetchAll(arvados.ResourceListParams{\n\t\tSelect:  selectParam,\n\t\tOrder:   \"priority desc\",\n\t\tLimit:   &limitParam,\n\t\tCount:   \"none\",\n\t\tFilters: []arvados.Filter{{\"state\", \"=\", arvados.ContainerStateQueued}, {\"priority\", \">\", \"0\"}},\n\t}, queuedContainersTarget)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tapply(avail)\n\n\t// Check for containers that we already know about but weren't\n\t// returned by any of the above queries, and fetch them\n\t// explicitly by UUID. If they're in a final state we can drop\n\t// them, but otherwise we need to apply updates, e.g.,\n\t//\n\t// - Queued container priority has been reduced\n\t// - Locked container has been requeued with lower priority\n\tmissing := map[string]bool{}\n\tcq.mtx.Lock()\n\tfor uuid, ent := range cq.current {\n\t\tif next[uuid] == nil &&\n\t\t\tent.Container.State != arvados.ContainerStateCancelled &&\n\t\t\tent.Container.State != arvados.ContainerStateComplete {\n\t\t\tmissing[uuid] = true\n\t\t}\n\t}\n\tcq.mtx.Unlock()\n\n\tfor len(missing) > 0 {\n\t\tvar batch []string\n\t\tfor uuid := range missing {\n\t\t\tbatch = append(batch, uuid)\n\t\t\tif len(batch) == 20 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tfilters := []arvados.Filter{{\"uuid\", \"in\", batch}}\n\t\tended, err := cq.fetchAll(arvados.ResourceListParams{\n\t\t\tSelect:  selectParam,\n\t\t\tOrder:   \"uuid\",\n\t\t\tCount:   \"none\",\n\t\t\tFilters: filters,\n\t\t}, 0)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tapply(ended)\n\t\tif len(ended) == 0 {\n\t\t\t// This is the only case where we can conclude\n\t\t\t// a container has been deleted from the\n\t\t\t// database. A short (but non-zero) page, on\n\t\t\t// the other hand, can be caused by a response\n\t\t\t// size limit.\n\t\t\tfor _, uuid := range batch {\n\t\t\t\tcq.logger.WithField(\"ContainerUUID\", uuid).Warn(\"container not found by controller (deleted?)\")\n\t\t\t\tdelete(missing, uuid)\n\t\t\t\tcq.mtx.Lock()\n\t\t\t\tcq.delEnt(uuid, cq.current[uuid].Container.State)\n\t\t\t\tcq.mtx.Unlock()\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tfor _, ctr := range ended {\n\t\t\tif _, ok := missing[ctr.UUID]; !ok {\n\t\t\t\tmsg := \"BUG? server response did not match requested filters, erroring out rather than risk deadlock\"\n\t\t\t\tcq.logger.WithFields(logrus.Fields{\n\t\t\t\t\t\"ContainerUUID\": ctr.UUID,\n\t\t\t\t\t\"Filters\":       filters,\n\t\t\t\t}).Error(msg)\n\t\t\t\treturn nil, errors.New(msg)\n\t\t\t}\n\t\t\tdelete(missing, ctr.UUID)\n\t\t}\n\t}\n\treturn next, nil\n}\n\n// Fetch all pages of containers.\n//\n// Except: if maxNonSuper>0, stop fetching more pages after receving\n// that many non-supervisor containers. Along with {Order: \"priority\n// desc\"}, this enables fetching enough high priority scheduling-ready\n// containers to make progress, without necessarily fetching the\n// entire queue.\nfunc (cq *Queue) fetchAll(initialParams arvados.ResourceListParams, maxNonSuper int) ([]arvados.Container, error) {\n\tvar results []arvados.Container\n\tparams := initialParams\n\tparams.Offset = 0\n\tnonSuper := 0\n\tfor {\n\t\t// This list variable must be a new one declared\n\t\t// inside the loop: otherwise, items in the API\n\t\t// response would get deep-merged into the items\n\t\t// loaded in previous iterations.\n\t\tvar list arvados.ContainerList\n\n\t\terr := cq.client.RequestAndDecode(&list, \"GET\", \"arvados/v1/containers\", nil, params)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(list.Items) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\t// Conserve memory by deleting mounts that aren't\n\t\t// relevant to choosing the instance type.\n\t\tfor _, c := range list.Items {\n\t\t\tfor path, mnt := range c.Mounts {\n\t\t\t\tif mnt.Kind != \"tmp\" {\n\t\t\t\t\tdelete(c.Mounts, path)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !c.SchedulingParameters.Supervisor {\n\t\t\t\tnonSuper++\n\t\t\t}\n\t\t}\n\n\t\tresults = append(results, list.Items...)\n\t\tif maxNonSuper > 0 && nonSuper >= maxNonSuper {\n\t\t\tbreak\n\t\t} else if params.Order == \"uuid\" {\n\t\t\tparams.Filters = append(initialParams.Filters, arvados.Filter{\"uuid\", \">\", list.Items[len(list.Items)-1].UUID})\n\t\t} else {\n\t\t\tparams.Offset += len(list.Items)\n\t\t}\n\t}\n\treturn results, nil\n}\n\nfunc (cq *Queue) runMetrics(reg *prometheus.Registry) {\n\tmEntries := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"queue_entries\",\n\t\tHelp:      \"Number of active container entries in the controller database.\",\n\t}, []string{\"state\", \"instance_type\"})\n\treg.MustRegister(mEntries)\n\n\ttype entKey struct {\n\t\tstate arvados.ContainerState\n\t\tinst  string\n\t}\n\tcount := map[entKey]int{}\n\n\tch := cq.Subscribe()\n\tdefer cq.Unsubscribe(ch)\n\tfor range ch {\n\t\tfor k := range count {\n\t\t\tcount[k] = 0\n\t\t}\n\t\tents, _ := cq.Entries()\n\t\tfor _, ent := range ents {\n\t\t\tcount[entKey{ent.Container.State, ent.InstanceTypes[0].Name}]++\n\t\t}\n\t\tfor k, v := range count {\n\t\t\tmEntries.WithLabelValues(string(k.state), k.inst).Set(float64(v))\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/dispatchcloud/container/queue_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage container\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&IntegrationSuite{})\n\nfunc logger() logrus.FieldLogger {\n\tlogger := logrus.StandardLogger()\n\tif os.Getenv(\"ARVADOS_DEBUG\") != \"\" {\n\t\tlogger.SetLevel(logrus.DebugLevel)\n\t}\n\treturn logger\n}\n\ntype IntegrationSuite struct{}\n\nfunc (suite *IntegrationSuite) TearDownTest(c *check.C) {\n\terr := arvados.NewClientFromEnv().RequestAndDecode(nil, \"POST\", \"database/reset\", nil, nil)\n\tc.Check(err, check.IsNil)\n}\n\nfunc (suite *IntegrationSuite) TestGetLockUnlockCancel(c *check.C) {\n\tcluster := arvados.IntegrationTestCluster()\n\tcluster.InstanceTypes = map[string]arvados.InstanceType{\n\t\t\"testType\": {\n\t\t\tName:    \"testType\",\n\t\t\tVCPUs:   8,\n\t\t\tRAM:     16 << 30,\n\t\t\tScratch: 300 << 30,\n\t\t},\n\t}\n\n\tclient := arvados.NewClientFromEnv()\n\tcq := NewQueue(logger(), nil, cluster, client)\n\n\terr := cq.Update()\n\tc.Check(err, check.IsNil)\n\n\tents, threshold := cq.Entries()\n\tc.Check(len(ents), check.Not(check.Equals), 0)\n\tc.Check(time.Since(threshold) < time.Minute, check.Equals, true)\n\tc.Check(time.Since(threshold) > 0, check.Equals, true)\n\n\t_, ok := ents[arvadostest.QueuedContainerUUID]\n\tc.Check(ok, check.Equals, true)\n\n\tvar wg sync.WaitGroup\n\tfor uuid, ent := range ents {\n\t\tc.Check(ent.Container.UUID, check.Equals, uuid)\n\t\tc.Check(ent.InstanceTypes, check.HasLen, 1)\n\t\tc.Check(ent.InstanceTypes[0].Name, check.Equals, \"testType\")\n\t\tc.Check(ent.Container.State, check.Equals, arvados.ContainerStateQueued)\n\t\tc.Check(ent.Container.Priority > 0, check.Equals, true)\n\t\t// Mounts should be deleted to avoid wasting memory\n\t\tc.Check(ent.Container.Mounts, check.IsNil)\n\n\t\tctr, ok := cq.Get(uuid)\n\t\tc.Check(ok, check.Equals, true)\n\t\tc.Check(ctr.UUID, check.Equals, uuid)\n\n\t\twg.Add(1)\n\t\tgo func(uuid string) {\n\t\t\tdefer wg.Done()\n\t\t\terr := cq.Unlock(uuid)\n\t\t\tc.Check(err, check.NotNil)\n\t\t\tc.Check(err, check.ErrorMatches, \".*cannot unlock when Queued.*\")\n\n\t\t\terr = cq.Lock(uuid)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tctr, ok := cq.Get(uuid)\n\t\t\tc.Check(ok, check.Equals, true)\n\t\t\tc.Check(ctr.State, check.Equals, arvados.ContainerStateLocked)\n\t\t\terr = cq.Lock(uuid)\n\t\t\tc.Check(err, check.NotNil)\n\n\t\t\terr = cq.Unlock(uuid)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tctr, ok = cq.Get(uuid)\n\t\t\tc.Check(ok, check.Equals, true)\n\t\t\tc.Check(ctr.State, check.Equals, arvados.ContainerStateQueued)\n\t\t\terr = cq.Unlock(uuid)\n\t\t\tc.Check(err, check.NotNil)\n\n\t\t\terr = cq.Cancel(uuid)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tctr, ok = cq.Get(uuid)\n\t\t\tc.Check(ok, check.Equals, true)\n\t\t\tc.Check(ctr.State, check.Equals, arvados.ContainerStateCancelled)\n\t\t\terr = cq.Lock(uuid)\n\t\t\tc.Check(err, check.NotNil)\n\t\t}(uuid)\n\t}\n\twg.Wait()\n}\n\nfunc (suite *IntegrationSuite) TestCancel_NoInstanceType(c *check.C) {\n\tcluster := arvados.IntegrationTestCluster()\n\tcluster.InstanceTypes = map[string]arvados.InstanceType{\n\t\t\"testType\": {\n\t\t\tName:    \"testType\",\n\t\t\tVCPUs:   4,\n\t\t\tRAM:     16 << 30,\n\t\t\tScratch: 1 << 20, // insufficient, 24 GB needed\n\t\t},\n\t}\n\n\tclient := arvados.NewClientFromEnv()\n\tcq := NewQueue(logger(), nil, cluster, client)\n\n\tgo failIfContainerAppearsInQueue(c, cq, arvadostest.QueuedContainerUUID)\n\n\tvar ctr arvados.Container\n\terr := client.RequestAndDecode(&ctr, \"GET\", \"arvados/v1/containers/\"+arvadostest.QueuedContainerUUID, nil, nil)\n\tc.Check(err, check.IsNil)\n\tc.Check(ctr.State, check.Equals, arvados.ContainerStateQueued)\n\n\tgo cq.Update()\n\n\t// Wait for the cancel operation to take effect. Container\n\t// will have state=Cancelled or just disappear from the queue.\n\tsuite.waitfor(c, time.Second, func() bool {\n\t\terr := client.RequestAndDecode(&ctr, \"GET\", \"arvados/v1/containers/\"+arvadostest.QueuedContainerUUID, nil, nil)\n\t\treturn err == nil && ctr.State == arvados.ContainerStateCancelled\n\t})\n\tc.Check(ctr.RuntimeStatus[\"error\"], check.Equals, \"constraints not satisfiable by any configured instance type\")\n}\n\nfunc (suite *IntegrationSuite) TestCancel_InvalidMountsField(c *check.C) {\n\tcfg, err := arvados.GetConfig(filepath.Join(os.Getenv(\"WORKSPACE\"), \"tmp\", \"arvados.yml\"))\n\tc.Assert(err, check.IsNil)\n\tcc, err := cfg.GetCluster(\"zzzzz\")\n\tc.Assert(err, check.IsNil)\n\tdb, err := (&ctrlctx.DBConnector{PostgreSQL: cc.PostgreSQL}).GetDB(context.Background())\n\tc.Assert(err, check.IsNil)\n\t_, err = db.Exec(`update containers set mounts=$1 where uuid=$2`, `{\"stdin\":[\"bork\"]}`, arvadostest.QueuedContainerUUID)\n\tc.Assert(err, check.IsNil)\n\t// Note this setup gets cleaned up by the database reset in\n\t// TearDownTest.\n\n\tcluster := arvados.IntegrationTestCluster()\n\tcluster.InstanceTypes = map[string]arvados.InstanceType{\n\t\t\"testType\": {\n\t\t\tName:    \"testType\",\n\t\t\tVCPUs:   4,\n\t\t\tRAM:     16 << 30,\n\t\t\tScratch: 100 << 30,\n\t\t},\n\t}\n\tclient := arvados.NewClientFromEnv()\n\tcq := NewQueue(logger(), nil, cluster, client)\n\n\tgo failIfContainerAppearsInQueue(c, cq, arvadostest.QueuedContainerUUID)\n\n\tvar ctr arvados.Container\n\terr = client.RequestAndDecode(&ctr, \"GET\", \"arvados/v1/containers/\"+arvadostest.QueuedContainerUUID, nil, arvados.GetOptions{Select: []string{\"state\"}})\n\tc.Check(err, check.IsNil)\n\tc.Check(ctr.State, check.Equals, arvados.ContainerStateQueued)\n\n\tgo cq.Update()\n\n\t// Wait for the cancel operation to take effect. Container\n\t// will have state=Cancelled or just disappear from the queue.\n\tsuite.waitfor(c, time.Second, func() bool {\n\t\terr := client.RequestAndDecode(&ctr, \"GET\", \"arvados/v1/containers/\"+arvadostest.QueuedContainerUUID, nil, arvados.GetOptions{Select: []string{\"state\", \"runtime_status\"}})\n\t\treturn err == nil && ctr.State == arvados.ContainerStateCancelled\n\t})\n\tc.Logf(\"runtime_status: %v\", ctr.RuntimeStatus)\n\tc.Check(ctr.RuntimeStatus[\"error\"], check.Matches, `error getting mounts from container record: json: cannot unmarshal .*`)\n}\n\nfunc failIfContainerAppearsInQueue(c *check.C, cq *Queue, uuid string) {\n\tch := cq.Subscribe()\n\tdefer cq.Unsubscribe(ch)\n\tfor range ch {\n\t\t// Container should never be added to\n\t\t// queue. Note that polling the queue this way\n\t\t// doesn't guarantee a bug (container being\n\t\t// incorrectly added to the queue) will cause\n\t\t// a test failure.\n\t\t_, ok := cq.Get(uuid)\n\t\tif !c.Check(ok, check.Equals, false) {\n\t\t\t// Don't spam the log with more failures\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (suite *IntegrationSuite) waitfor(c *check.C, timeout time.Duration, fn func() bool) {\n\tdefer func() {\n\t\tc.Check(fn(), check.Equals, true)\n\t}()\n\tdeadline := time.Now().Add(timeout)\n\tfor !fn() && time.Now().Before(deadline) {\n\t\ttime.Sleep(timeout / 1000)\n\t}\n}\n"
  },
  {
    "path": "lib/dispatchcloud/dispatcher.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchcloud\n\nimport (\n\t\"context\"\n\t\"crypto/md5\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/controller/dblock\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/container\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/scheduler\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/worker\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/health\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/julienschmidt/httprouter\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\ntype pool interface {\n\tscheduler.WorkerPool\n\tCheckHealth() error\n\tInstances() []worker.InstanceView\n\tSetIdleBehavior(cloud.InstanceID, worker.IdleBehavior) error\n\tKillInstance(id cloud.InstanceID, reason string) error\n\tStop()\n}\n\ntype dispatcher struct {\n\tCluster       *arvados.Cluster\n\tContext       context.Context\n\tArvClient     *arvados.Client\n\tAuthToken     string\n\tRegistry      *prometheus.Registry\n\tInstanceSetID cloud.InstanceSetID\n\n\tdbConnector ctrlctx.DBConnector\n\tlogger      logrus.FieldLogger\n\tinstanceSet cloud.InstanceSet\n\tpool        pool\n\tqueue       scheduler.ContainerQueue\n\tsched       *scheduler.Scheduler\n\thttpHandler http.Handler\n\tsshKey      ssh.Signer\n\n\tsetupOnce sync.Once\n\tstop      chan struct{}\n\tstopped   chan struct{}\n\n\tschedQueueMtx       sync.Mutex\n\tschedQueueRefreshed time.Time\n\tschedQueue          []scheduler.QueueEnt\n\tschedQueueMap       map[string]scheduler.QueueEnt\n}\n\nvar schedQueueRefresh = time.Second\n\n// Start starts the dispatcher. Start can be called multiple times\n// with no ill effect.\nfunc (disp *dispatcher) Start() {\n\tdisp.setupOnce.Do(disp.setup)\n}\n\n// ServeHTTP implements service.Handler.\nfunc (disp *dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdisp.Start()\n\tdisp.httpHandler.ServeHTTP(w, r)\n}\n\n// CheckHealth implements service.Handler.\nfunc (disp *dispatcher) CheckHealth() error {\n\tdisp.Start()\n\treturn disp.pool.CheckHealth()\n}\n\n// Done implements service.Handler.\nfunc (disp *dispatcher) Done() <-chan struct{} {\n\treturn disp.stopped\n}\n\n// Stop dispatching containers and release resources. Typically used\n// in tests.\nfunc (disp *dispatcher) Close() {\n\tdisp.Start()\n\tselect {\n\tcase disp.stop <- struct{}{}:\n\tdefault:\n\t}\n\t<-disp.stopped\n}\n\n// Make a worker.Executor for the given instance.\nfunc (disp *dispatcher) newExecutor(inst cloud.Instance) worker.Executor {\n\texr := sshexecutor.New(inst)\n\texr.SetTargetPort(disp.Cluster.Containers.CloudVMs.SSHPort)\n\texr.SetSigners(disp.sshKey)\n\treturn exr\n}\n\nfunc (disp *dispatcher) setup() {\n\tdisp.initialize()\n\tgo disp.run()\n}\n\nfunc (disp *dispatcher) initialize() {\n\tdisp.logger = ctxlog.FromContext(disp.Context)\n\tdisp.dbConnector = ctrlctx.DBConnector{PostgreSQL: disp.Cluster.PostgreSQL}\n\n\tdisp.ArvClient.AuthToken = disp.AuthToken\n\n\tif disp.InstanceSetID == \"\" {\n\t\tif strings.HasPrefix(disp.AuthToken, \"v2/\") {\n\t\t\tdisp.InstanceSetID = cloud.InstanceSetID(strings.Split(disp.AuthToken, \"/\")[1])\n\t\t} else {\n\t\t\t// Use some other string unique to this token\n\t\t\t// that doesn't reveal the token itself.\n\t\t\tdisp.InstanceSetID = cloud.InstanceSetID(fmt.Sprintf(\"%x\", md5.Sum([]byte(disp.AuthToken))))\n\t\t}\n\t}\n\tdisp.stop = make(chan struct{}, 1)\n\tdisp.stopped = make(chan struct{})\n\n\tif key, err := config.LoadSSHKey(disp.Cluster.Containers.DispatchPrivateKey); err != nil {\n\t\tdisp.logger.Fatalf(\"error parsing configured Containers.DispatchPrivateKey: %s\", err)\n\t} else {\n\t\tdisp.sshKey = key\n\t}\n\tinstallPublicKey := disp.sshKey.PublicKey()\n\tif !disp.Cluster.Containers.CloudVMs.DeployPublicKey {\n\t\tinstallPublicKey = nil\n\t}\n\n\tinstanceSet, err := newInstanceSet(disp.Cluster, disp.InstanceSetID, disp.logger, disp.Registry)\n\tif err != nil {\n\t\tdisp.logger.Fatalf(\"error initializing driver: %s\", err)\n\t}\n\tdblock.Dispatch.Lock(disp.Context, disp.dbConnector.GetDB)\n\tdisp.instanceSet = instanceSet\n\tdisp.pool = worker.NewPool(disp.logger, disp.ArvClient, disp.Registry, disp.InstanceSetID, disp.instanceSet, disp.newExecutor, installPublicKey, disp.Cluster)\n\tif disp.queue == nil {\n\t\tdisp.queue = container.NewQueue(disp.logger, disp.Registry, disp.Cluster, disp.ArvClient)\n\t}\n\n\tdisp.sched = scheduler.New(disp.Context, disp.ArvClient, disp.queue, disp.pool, disp.Registry, disp.Cluster)\n\n\tif disp.Cluster.ManagementToken == \"\" {\n\t\tdisp.httpHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\thttp.Error(w, \"Management API authentication is not configured\", http.StatusForbidden)\n\t\t})\n\t} else {\n\t\tmux := httprouter.New()\n\t\tmux.HandlerFunc(\"GET\", \"/arvados/v1/dispatch/containers\", disp.apiContainers)\n\t\tmux.HandlerFunc(\"GET\", \"/arvados/v1/dispatch/container\", disp.apiContainer)\n\t\tmux.HandlerFunc(\"POST\", \"/arvados/v1/dispatch/containers/kill\", disp.apiContainerKill)\n\t\tmux.HandlerFunc(\"GET\", \"/arvados/v1/dispatch/instances\", disp.apiInstances)\n\t\tmux.HandlerFunc(\"POST\", \"/arvados/v1/dispatch/instances/hold\", disp.apiInstanceHold)\n\t\tmux.HandlerFunc(\"POST\", \"/arvados/v1/dispatch/instances/drain\", disp.apiInstanceDrain)\n\t\tmux.HandlerFunc(\"POST\", \"/arvados/v1/dispatch/instances/run\", disp.apiInstanceRun)\n\t\tmux.HandlerFunc(\"POST\", \"/arvados/v1/dispatch/instances/kill\", disp.apiInstanceKill)\n\t\tmetricsH := promhttp.HandlerFor(disp.Registry, promhttp.HandlerOpts{\n\t\t\tErrorLog: disp.logger,\n\t\t})\n\t\tmux.Handler(\"GET\", \"/metrics\", metricsH)\n\t\tmux.Handler(\"GET\", \"/metrics.json\", metricsH)\n\t\tmux.Handler(\"GET\", \"/_health/:check\", &health.Handler{\n\t\t\tToken:  disp.Cluster.ManagementToken,\n\t\t\tPrefix: \"/_health/\",\n\t\t\tRoutes: health.Routes{\"ping\": disp.CheckHealth},\n\t\t})\n\t\tdisp.httpHandler = auth.RequireLiteralToken(disp.Cluster.ManagementToken, mux)\n\t}\n}\n\nfunc (disp *dispatcher) run() {\n\tdefer dblock.Dispatch.Unlock()\n\tdefer close(disp.stopped)\n\tdefer disp.instanceSet.Stop()\n\tdefer disp.pool.Stop()\n\n\tdisp.sched.Start()\n\tdefer disp.sched.Stop()\n\n\t<-disp.stop\n}\n\n// Get a snapshot of the scheduler's queue, no older than\n// schedQueueRefresh.\n//\n// First return value is in the sorted order used by the scheduler.\n// Second return value is a map of the same entries, for efficiently\n// looking up a single container.\nfunc (disp *dispatcher) schedQueueCurrent() ([]scheduler.QueueEnt, map[string]scheduler.QueueEnt) {\n\tdisp.schedQueueMtx.Lock()\n\tdefer disp.schedQueueMtx.Unlock()\n\tif time.Since(disp.schedQueueRefreshed) > schedQueueRefresh {\n\t\tdisp.schedQueue = disp.sched.Queue()\n\t\tdisp.schedQueueMap = make(map[string]scheduler.QueueEnt)\n\t\tfor _, ent := range disp.schedQueue {\n\t\t\tdisp.schedQueueMap[ent.Container.UUID] = ent\n\t\t}\n\t\tdisp.schedQueueRefreshed = time.Now()\n\t}\n\treturn disp.schedQueue, disp.schedQueueMap\n}\n\n// Management API: scheduling queue entries for all active and queued\n// containers.\nfunc (disp *dispatcher) apiContainers(w http.ResponseWriter, r *http.Request) {\n\tvar resp struct {\n\t\tItems []scheduler.QueueEnt `json:\"items\"`\n\t}\n\tresp.Items, _ = disp.schedQueueCurrent()\n\tjson.NewEncoder(w).Encode(resp)\n}\n\n// Management API: scheduling queue entry for a specified container.\nfunc (disp *dispatcher) apiContainer(w http.ResponseWriter, r *http.Request) {\n\t_, sq := disp.schedQueueCurrent()\n\tent, ok := sq[r.FormValue(\"container_uuid\")]\n\tif !ok {\n\t\thttpserver.Error(w, \"container not found\", http.StatusNotFound)\n\t\treturn\n\t}\n\tjson.NewEncoder(w).Encode(ent)\n}\n\n// Management API: all active instances (cloud VMs).\nfunc (disp *dispatcher) apiInstances(w http.ResponseWriter, r *http.Request) {\n\tvar resp struct {\n\t\tItems []worker.InstanceView `json:\"items\"`\n\t}\n\tresp.Items = disp.pool.Instances()\n\tjson.NewEncoder(w).Encode(resp)\n}\n\n// Management API: set idle behavior to \"hold\" for specified instance.\nfunc (disp *dispatcher) apiInstanceHold(w http.ResponseWriter, r *http.Request) {\n\tdisp.apiInstanceIdleBehavior(w, r, worker.IdleBehaviorHold)\n}\n\n// Management API: set idle behavior to \"drain\" for specified instance.\nfunc (disp *dispatcher) apiInstanceDrain(w http.ResponseWriter, r *http.Request) {\n\tdisp.apiInstanceIdleBehavior(w, r, worker.IdleBehaviorDrain)\n}\n\n// Management API: set idle behavior to \"run\" for specified instance.\nfunc (disp *dispatcher) apiInstanceRun(w http.ResponseWriter, r *http.Request) {\n\tdisp.apiInstanceIdleBehavior(w, r, worker.IdleBehaviorRun)\n}\n\n// Management API: shutdown/destroy specified instance now.\nfunc (disp *dispatcher) apiInstanceKill(w http.ResponseWriter, r *http.Request) {\n\tid := cloud.InstanceID(r.FormValue(\"instance_id\"))\n\tif id == \"\" {\n\t\thttpserver.Error(w, \"instance_id parameter not provided\", http.StatusBadRequest)\n\t\treturn\n\t}\n\terr := disp.pool.KillInstance(id, \"via management API: \"+r.FormValue(\"reason\"))\n\tif err != nil {\n\t\thttpserver.Error(w, err.Error(), http.StatusNotFound)\n\t\treturn\n\t}\n}\n\n// Management API: send SIGTERM to specified container's crunch-run\n// process now.\nfunc (disp *dispatcher) apiContainerKill(w http.ResponseWriter, r *http.Request) {\n\tuuid := r.FormValue(\"container_uuid\")\n\tif uuid == \"\" {\n\t\thttpserver.Error(w, \"container_uuid parameter not provided\", http.StatusBadRequest)\n\t\treturn\n\t}\n\tif !disp.pool.KillContainer(uuid, \"via management API: \"+r.FormValue(\"reason\")) {\n\t\thttpserver.Error(w, \"container not found\", http.StatusNotFound)\n\t\treturn\n\t}\n}\n\nfunc (disp *dispatcher) apiInstanceIdleBehavior(w http.ResponseWriter, r *http.Request, want worker.IdleBehavior) {\n\tid := cloud.InstanceID(r.FormValue(\"instance_id\"))\n\tif id == \"\" {\n\t\thttpserver.Error(w, \"instance_id parameter not provided\", http.StatusBadRequest)\n\t\treturn\n\t}\n\terr := disp.pool.SetIdleBehavior(id, want)\n\tif err != nil {\n\t\thttpserver.Error(w, err.Error(), http.StatusNotFound)\n\t\treturn\n\t}\n}\n"
  },
  {
    "path": "lib/dispatchcloud/dispatcher_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchcloud\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/container\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/test\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"golang.org/x/crypto/ssh\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&DispatcherSuite{})\n\ntype DispatcherSuite struct {\n\tctx            context.Context\n\tcancel         context.CancelFunc\n\tcluster        *arvados.Cluster\n\tstubDriver     *test.StubDriver\n\tdisp           *dispatcher\n\terror503Server *httptest.Server\n}\n\nfunc (s *DispatcherSuite) SetUpTest(c *check.C) {\n\ts.ctx, s.cancel = context.WithCancel(context.Background())\n\ts.ctx = ctxlog.Context(s.ctx, ctxlog.TestLogger(c))\n\tdispatchpub, _ := test.LoadTestKey(c, \"test/sshkey_dispatch\")\n\tdispatchprivraw, err := ioutil.ReadFile(\"test/sshkey_dispatch\")\n\tc.Assert(err, check.IsNil)\n\n\t_, hostpriv := test.LoadTestKey(c, \"test/sshkey_vm\")\n\ts.stubDriver = &test.StubDriver{\n\t\tHostKey:                   hostpriv,\n\t\tAuthorizedKeys:            []ssh.PublicKey{dispatchpub},\n\t\tMinTimeBetweenCreateCalls: time.Millisecond,\n\t\tQuotaMaxInstances:         10,\n\t}\n\n\t// We need the postgresql connection info from the integration\n\t// test config.\n\tcfg, err := config.NewLoader(nil, ctxlog.FromContext(s.ctx)).Load()\n\tc.Assert(err, check.IsNil)\n\ttestcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\n\ts.cluster = &arvados.Cluster{\n\t\tManagementToken: \"test-management-token\",\n\t\tPostgreSQL:      testcluster.PostgreSQL,\n\t\tContainers: arvados.ContainersConfig{\n\t\t\tCrunchRunCommand:       \"crunch-run\",\n\t\t\tCrunchRunArgumentsList: []string{\"--foo\", \"--extra='args'\"},\n\t\t\tDispatchPrivateKey:     string(dispatchprivraw),\n\t\t\tStaleLockTimeout:       arvados.Duration(5 * time.Millisecond),\n\t\t\tRuntimeEngine:          \"stub\",\n\t\t\tMaxDispatchAttempts:    10,\n\t\t\tMaximumPriceFactor:     1.5,\n\t\t\tCloudVMs: arvados.CloudVMsConfig{\n\t\t\t\tDriver:               \"test\",\n\t\t\t\tSyncInterval:         arvados.Duration(10 * time.Millisecond),\n\t\t\t\tTimeoutIdle:          arvados.Duration(150 * time.Millisecond),\n\t\t\t\tTimeoutBooting:       arvados.Duration(150 * time.Millisecond),\n\t\t\t\tTimeoutProbe:         arvados.Duration(15 * time.Millisecond),\n\t\t\t\tTimeoutShutdown:      arvados.Duration(5 * time.Millisecond),\n\t\t\t\tMaxCloudOpsPerSecond: 500,\n\t\t\t\tInitialQuotaEstimate: 8,\n\t\t\t\tPollInterval:         arvados.Duration(5 * time.Millisecond),\n\t\t\t\tProbeInterval:        arvados.Duration(5 * time.Millisecond),\n\t\t\t\tMaxProbesPerSecond:   1000,\n\t\t\t\tTimeoutSignal:        arvados.Duration(3 * time.Millisecond),\n\t\t\t\tTimeoutStaleRunLock:  arvados.Duration(3 * time.Millisecond),\n\t\t\t\tTimeoutTERM:          arvados.Duration(20 * time.Millisecond),\n\t\t\t\tResourceTags:         map[string]string{\"testtag\": \"test value\"},\n\t\t\t\tTagKeyPrefix:         \"test:\",\n\t\t\t},\n\t\t},\n\t\tInstanceTypes: arvados.InstanceTypeMap{\n\t\t\ttest.InstanceType(1).Name:  test.InstanceType(1),\n\t\t\ttest.InstanceType(2).Name:  test.InstanceType(2),\n\t\t\ttest.InstanceType(3).Name:  test.InstanceType(3),\n\t\t\ttest.InstanceType(4).Name:  test.InstanceType(4),\n\t\t\ttest.InstanceType(6).Name:  test.InstanceType(6),\n\t\t\ttest.InstanceType(8).Name:  test.InstanceType(8),\n\t\t\ttest.InstanceType(16).Name: test.InstanceType(16),\n\t\t},\n\t}\n\tarvadostest.SetServiceURL(&s.cluster.Services.DispatchCloud, \"http://localhost:/\")\n\tarvadostest.SetServiceURL(&s.cluster.Services.Controller, \"https://\"+os.Getenv(\"ARVADOS_API_HOST\")+\"/\")\n\n\tarvClient, err := arvados.NewClientFromConfig(s.cluster)\n\tc.Assert(err, check.IsNil)\n\t// Disable auto-retry\n\tarvClient.Timeout = 0\n\n\ts.error503Server = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tc.Logf(\"503 stub: returning 503\")\n\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t}))\n\tarvClient.Client = &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tProxy: s.arvClientProxy(c),\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true}}}\n\n\ts.disp = &dispatcher{\n\t\tCluster:   s.cluster,\n\t\tContext:   s.ctx,\n\t\tArvClient: arvClient,\n\t\tAuthToken: arvadostest.AdminToken,\n\t\tRegistry:  prometheus.NewRegistry(),\n\t\t// Providing a stub queue here prevents\n\t\t// disp.initialize() from making a real one that uses\n\t\t// the integration test servers/database.\n\t\tqueue: &test.Queue{},\n\t}\n\t// Test cases can modify s.cluster before calling\n\t// initialize(), and then modify private state before calling\n\t// go run().\n}\n\nfunc (s *DispatcherSuite) TearDownTest(c *check.C) {\n\ts.cancel()\n\ts.disp.Close()\n\ts.error503Server.Close()\n}\n\n// Intercept outgoing API requests for \"/503\" and respond HTTP\n// 503. This lets us force (*arvados.Client)Last503() to return\n// something.\nfunc (s *DispatcherSuite) arvClientProxy(c *check.C) func(*http.Request) (*url.URL, error) {\n\treturn func(req *http.Request) (*url.URL, error) {\n\t\tif req.URL.Path == \"/503\" {\n\t\t\tc.Logf(\"arvClientProxy: proxying to 503 stub\")\n\t\t\treturn url.Parse(s.error503Server.URL)\n\t\t} else {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n}\n\n// DispatchToStubDriver checks that the dispatcher wires everything\n// together effectively. It uses a real scheduler and worker pool with\n// a fake queue and cloud driver. The fake cloud driver injects\n// artificial errors in order to exercise a variety of code paths.\nfunc (s *DispatcherSuite) TestDispatchToStubDriver(c *check.C) {\n\tDrivers[\"test\"] = s.stubDriver\n\ts.stubDriver.ErrorRateCreate = 0.1\n\ts.stubDriver.ErrorRateDestroy = 0.1\n\tqueue := &test.Queue{\n\t\tMaxDispatchAttempts: 5,\n\t\tChooseType: func(ctr *arvados.Container) ([]arvados.InstanceType, error) {\n\t\t\treturn container.ChooseInstanceType(s.cluster, ctr)\n\t\t},\n\t\tLogger: ctxlog.TestLogger(c),\n\t}\n\tfor i := 0; i < 200; i++ {\n\t\tqueue.Containers = append(queue.Containers, arvados.Container{\n\t\t\tUUID:     test.ContainerUUID(i + 1),\n\t\t\tState:    arvados.ContainerStateQueued,\n\t\t\tPriority: int64(i%20 + 1),\n\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\tRAM:   int64(i%3+1) << 30,\n\t\t\t\tVCPUs: i%8 + 1,\n\t\t\t},\n\t\t})\n\t}\n\ts.disp.queue = queue\n\ts.disp.setupOnce.Do(s.disp.initialize)\n\n\tvar mtx sync.Mutex\n\tdone := make(chan struct{})\n\twaiting := map[string]struct{}{}\n\tfor _, ctr := range queue.Containers {\n\t\twaiting[ctr.UUID] = struct{}{}\n\t}\n\tfinishContainer := func(ctr arvados.Container) {\n\t\tmtx.Lock()\n\t\tdefer mtx.Unlock()\n\t\tif _, ok := waiting[ctr.UUID]; !ok {\n\t\t\tc.Errorf(\"container completed twice: %s\", ctr.UUID)\n\t\t\treturn\n\t\t}\n\t\tdelete(waiting, ctr.UUID)\n\t\tif len(waiting) == 100 {\n\t\t\t// trigger scheduler maxContainers limit\n\t\t\tc.Logf(\"test: requesting 503 in order to trigger maxContainers limit\")\n\t\t\ts.disp.ArvClient.RequestAndDecode(nil, \"GET\", \"503\", nil, nil)\n\t\t}\n\t\tif len(waiting) == 0 {\n\t\t\tclose(done)\n\t\t}\n\t}\n\texecuteContainer := func(ctr arvados.Container) int {\n\t\tfinishContainer(ctr)\n\t\treturn int(rand.Uint32() & 0x3)\n\t}\n\tvar type4BrokenUntil time.Time\n\tvar countCapacityErrors int64\n\tvmCount := int32(0)\n\ts.stubDriver.Queue = queue\n\ts.stubDriver.SetupVM = func(stubvm *test.StubVM) error {\n\t\tif pt := stubvm.Instance().ProviderType(); pt == test.InstanceType(6).ProviderType {\n\t\t\tc.Logf(\"test: returning capacity error for instance type %s\", pt)\n\t\t\tatomic.AddInt64(&countCapacityErrors, 1)\n\t\t\treturn test.CapacityError{InstanceTypeSpecific: true}\n\t\t}\n\t\tn := atomic.AddInt32(&vmCount, 1)\n\t\tc.Logf(\"SetupVM: instance %s n=%d\", stubvm.Instance(), n)\n\t\tstubvm.Boot = time.Now().Add(time.Duration(rand.Int63n(int64(5 * time.Millisecond))))\n\t\tstubvm.CrunchRunDetachDelay = time.Duration(rand.Int63n(int64(10 * time.Millisecond)))\n\t\tstubvm.ExecuteContainer = executeContainer\n\t\tstubvm.CrashRunningContainer = finishContainer\n\t\tstubvm.ExtraCrunchRunArgs = \"'--runtime-engine=stub' '--foo' '--extra='\\\\''args'\\\\'''\"\n\t\tswitch {\n\t\tcase stubvm.Instance().ProviderType() == test.InstanceType(4).ProviderType &&\n\t\t\t(type4BrokenUntil.IsZero() || time.Now().Before(type4BrokenUntil)):\n\t\t\t// Initially (at least 2*TimeoutBooting), all\n\t\t\t// instances of this type are completely\n\t\t\t// broken. This ensures the\n\t\t\t// boot_outcomes{outcome=\"failure\"} metric is\n\t\t\t// not zero.\n\t\t\tstubvm.Broken = time.Now()\n\t\t\tif type4BrokenUntil.IsZero() {\n\t\t\t\ttype4BrokenUntil = time.Now().Add(2 * s.cluster.Containers.CloudVMs.TimeoutBooting.Duration())\n\t\t\t}\n\t\tcase n%7 == 0:\n\t\t\t// some instances start out OK but then stop\n\t\t\t// running any commands\n\t\t\tstubvm.Broken = time.Now().Add(time.Duration(rand.Int63n(90)) * time.Millisecond)\n\t\tcase n%7 == 1:\n\t\t\t// some instances never pass a run-probe\n\t\t\tstubvm.CrunchRunMissing = true\n\t\tcase n%7 == 2:\n\t\t\t// some instances start out OK but then start\n\t\t\t// reporting themselves as broken\n\t\t\tstubvm.ReportBroken = time.Now().Add(time.Duration(rand.Int63n(200)) * time.Millisecond)\n\t\tdefault:\n\t\t\tstubvm.CrunchRunCrashRate = 0.1\n\t\t\tstubvm.ArvMountDeadlockRate = 0.1\n\t\t}\n\t\treturn nil\n\t}\n\ts.stubDriver.Bugf = c.Errorf\n\n\tstart := time.Now()\n\tgo s.disp.run()\n\terr := s.disp.CheckHealth()\n\tc.Check(err, check.IsNil)\n\n\tfor len(waiting) > 0 {\n\t\twaswaiting := len(waiting)\n\t\tselect {\n\t\tcase <-done:\n\t\t\t// loop will end because len(waiting)==0\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tif len(waiting) >= waswaiting {\n\t\t\t\tc.Fatalf(\"timed out; no progress in 5 s while waiting for %d containers: %q\", len(waiting), waiting)\n\t\t\t}\n\t\t}\n\t}\n\tc.Logf(\"containers finished (%s), waiting for instances to shutdown and queue to clear\", time.Since(start))\n\n\tdeadline := time.Now().Add(5 * time.Second)\n\tfor range time.NewTicker(10 * time.Millisecond).C {\n\t\tinsts, err := s.stubDriver.InstanceSets()[0].Instances(nil)\n\t\tc.Check(err, check.IsNil)\n\t\tqueue.Update()\n\t\tents, _ := queue.Entries()\n\t\tif len(ents) == 0 && len(insts) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tif time.Now().After(deadline) {\n\t\t\tc.Fatalf(\"timed out with %d containers (%v), %d instances (%+v)\", len(ents), ents, len(insts), insts)\n\t\t}\n\t}\n\n\tc.Check(countCapacityErrors, check.Not(check.Equals), int64(0))\n\n\treq := httptest.NewRequest(\"GET\", \"/metrics\", nil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+s.cluster.ManagementToken)\n\tresp := httptest.NewRecorder()\n\ts.disp.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error=\"0\",operation=\"Create\"} [^0].*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error=\"0\",operation=\"List\"} [^0].*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error=\"0\",operation=\"Destroy\"} [^0].*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error=\"1\",operation=\"Create\"} [^0].*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*driver_operations{error=\"1\",operation=\"List\"} 0\\n.*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome=\"aborted\"} [0-9]+\\n.*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome=\"disappeared\"} [^0].*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome=\"failure\"} [^0].*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*boot_outcomes{outcome=\"success\"} [^0].*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*instances_disappeared{state=\"shutdown\"} [^0].*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*instances_disappeared{state=\"unknown\"} 0\\n.*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds{quantile=\"0.95\"} [0-9.]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds_count [0-9]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ssh_seconds_sum [0-9.]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds{quantile=\"0.95\"} [0-9.]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds_count [0-9]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*time_to_ready_for_container_seconds_sum [0-9.]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_count [0-9]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_shutdown_request_to_disappearance_seconds_sum [0-9.]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_count [0-9]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*time_from_queue_to_crunch_run_seconds_sum [0-9e+.]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome=\"success\"} [0-9]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome=\"success\"} [0-9e+.]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_count{outcome=\"fail\"} [0-9]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*run_probe_duration_seconds_sum{outcome=\"fail\"} [0-9e+.]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*last_503_time [1-9][0-9e+.]*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*max_concurrent_containers [1-9][0-9e+.]*`)\n}\n\nfunc (s *DispatcherSuite) TestManagementAPI_Permissions(c *check.C) {\n\ts.cluster.ManagementToken = \"abcdefgh\"\n\tDrivers[\"test\"] = s.stubDriver\n\ts.disp.setupOnce.Do(s.disp.initialize)\n\tgo s.disp.run()\n\n\tfor _, token := range []string{\"abc\", \"\"} {\n\t\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/dispatch/instances\", nil)\n\t\tif token != \"\" {\n\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+token)\n\t\t}\n\t\tresp := httptest.NewRecorder()\n\t\ts.disp.ServeHTTP(resp, req)\n\t\tif token == \"\" {\n\t\t\tc.Check(resp.Code, check.Equals, http.StatusUnauthorized)\n\t\t} else {\n\t\t\tc.Check(resp.Code, check.Equals, http.StatusForbidden)\n\t\t}\n\t}\n}\n\nfunc (s *DispatcherSuite) TestManagementAPI_Disabled(c *check.C) {\n\ts.cluster.ManagementToken = \"\"\n\tDrivers[\"test\"] = s.stubDriver\n\ts.disp.setupOnce.Do(s.disp.initialize)\n\tgo s.disp.run()\n\n\tfor _, token := range []string{\"abc\", \"\"} {\n\t\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/dispatch/instances\", nil)\n\t\tif token != \"\" {\n\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+token)\n\t\t}\n\t\tresp := httptest.NewRecorder()\n\t\ts.disp.ServeHTTP(resp, req)\n\t\tc.Check(resp.Code, check.Equals, http.StatusForbidden)\n\t}\n}\n\nfunc (s *DispatcherSuite) TestManagementAPI_Containers(c *check.C) {\n\ts.cluster.ManagementToken = \"abcdefgh\"\n\ts.cluster.Containers.CloudVMs.InitialQuotaEstimate = 4\n\tDrivers[\"test\"] = s.stubDriver\n\tqueue := &test.Queue{\n\t\tMaxDispatchAttempts: 5,\n\t\tChooseType: func(ctr *arvados.Container) ([]arvados.InstanceType, error) {\n\t\t\treturn container.ChooseInstanceType(s.cluster, ctr)\n\t\t},\n\t\tLogger: ctxlog.TestLogger(c),\n\t}\n\ts.stubDriver.Queue = queue\n\ts.stubDriver.QuotaMaxInstances = 4\n\ts.stubDriver.SetupVM = func(stubvm *test.StubVM) error {\n\t\tif stubvm.Instance().ProviderType() >= test.InstanceType(4).ProviderType {\n\t\t\treturn test.CapacityError{InstanceTypeSpecific: true}\n\t\t}\n\t\tstubvm.ExecuteContainer = func(ctr arvados.Container) int {\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t\treturn 0\n\t\t}\n\t\treturn nil\n\t}\n\ts.disp.queue = queue\n\ts.disp.setupOnce.Do(s.disp.initialize)\n\n\tgo s.disp.run()\n\n\ttype queueEnt struct {\n\t\tContainer        arvados.Container\n\t\tInstanceType     arvados.InstanceType `json:\"instance_type\"`\n\t\tSchedulingStatus string               `json:\"scheduling_status\"`\n\t}\n\ttype containersResponse struct {\n\t\tItems []queueEnt\n\t}\n\tgetContainers := func() containersResponse {\n\t\tschedQueueRefresh = time.Millisecond\n\t\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/dispatch/containers\", nil)\n\t\treq.Header.Set(\"Authorization\", \"Bearer abcdefgh\")\n\t\tresp := httptest.NewRecorder()\n\t\ts.disp.ServeHTTP(resp, req)\n\t\tvar cresp containersResponse\n\t\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\t\terr := json.Unmarshal(resp.Body.Bytes(), &cresp)\n\t\tc.Check(err, check.IsNil)\n\t\treturn cresp\n\t}\n\n\tc.Check(getContainers().Items, check.HasLen, 0)\n\n\tfor i := 0; i < 20; i++ {\n\t\tqueue.Containers = append(queue.Containers, arvados.Container{\n\t\t\tUUID:     test.ContainerUUID(i),\n\t\t\tState:    arvados.ContainerStateQueued,\n\t\t\tPriority: int64(100 - i),\n\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\tRAM:   int64(i%3+1) << 30,\n\t\t\t\tVCPUs: i%8 + 1,\n\t\t\t},\n\t\t})\n\t}\n\tqueue.Update()\n\n\texpect := `\n 0 zzzzz-dz642-000000000000000 (Running) \"\"\n 1 zzzzz-dz642-000000000000001 (Running) \"\"\n 2 zzzzz-dz642-000000000000002 (Running) \"\"\n 3 zzzzz-dz642-000000000000003 (Locked) \"Waiting in queue at position 1.  Cluster is at capacity for all eligible instance types (type4, type6) and cannot start a new instance right now.\"\n 4 zzzzz-dz642-000000000000004 (Queued) \"Waiting in queue at position 2.  Cluster is at capacity and cannot start any new instances right now.\"\n 5 zzzzz-dz642-000000000000005 (Queued) \"Waiting in queue at position 3.  Cluster is at capacity and cannot start any new instances right now.\"\n 6 zzzzz-dz642-000000000000006 (Queued) \"Waiting in queue at position 4.  Cluster is at capacity and cannot start any new instances right now.\"\n 7 zzzzz-dz642-000000000000007 (Queued) \"Waiting in queue at position 5.  Cluster is at capacity and cannot start any new instances right now.\"\n 8 zzzzz-dz642-000000000000008 (Queued) \"Waiting in queue at position 6.  Cluster is at capacity and cannot start any new instances right now.\"\n 9 zzzzz-dz642-000000000000009 (Queued) \"Waiting in queue at position 7.  Cluster is at capacity and cannot start any new instances right now.\"\n 10 zzzzz-dz642-000000000000010 (Queued) \"Waiting in queue at position 8.  Cluster is at capacity and cannot start any new instances right now.\"\n 11 zzzzz-dz642-000000000000011 (Queued) \"Waiting in queue at position 9.  Cluster is at capacity and cannot start any new instances right now.\"\n 12 zzzzz-dz642-000000000000012 (Queued) \"Waiting in queue at position 10.  Cluster is at capacity and cannot start any new instances right now.\"\n 13 zzzzz-dz642-000000000000013 (Queued) \"Waiting in queue at position 11.  Cluster is at capacity and cannot start any new instances right now.\"\n 14 zzzzz-dz642-000000000000014 (Queued) \"Waiting in queue at position 12.  Cluster is at capacity and cannot start any new instances right now.\"\n 15 zzzzz-dz642-000000000000015 (Queued) \"Waiting in queue at position 13.  Cluster is at capacity and cannot start any new instances right now.\"\n 16 zzzzz-dz642-000000000000016 (Queued) \"Waiting in queue at position 14.  Cluster is at capacity and cannot start any new instances right now.\"\n 17 zzzzz-dz642-000000000000017 (Queued) \"Waiting in queue at position 15.  Cluster is at capacity and cannot start any new instances right now.\"\n 18 zzzzz-dz642-000000000000018 (Queued) \"Waiting in queue at position 16.  Cluster is at capacity and cannot start any new instances right now.\"\n 19 zzzzz-dz642-000000000000019 (Queued) \"Waiting in queue at position 17.  Cluster is at capacity and cannot start any new instances right now.\"\n`\n\tsequence := make(map[string][]string)\n\tvar summary string\n\tfor deadline := time.Now().Add(time.Second); time.Now().Before(deadline); time.Sleep(time.Millisecond) {\n\t\tcresp := getContainers()\n\t\tsummary = \"\\n\"\n\t\tfor i, ent := range cresp.Items {\n\t\t\tsummary += fmt.Sprintf(\"% 2d %s (%s) %q\\n\", i, ent.Container.UUID, ent.Container.State, ent.SchedulingStatus)\n\t\t\ts := sequence[ent.Container.UUID]\n\t\t\tif len(s) == 0 || s[len(s)-1] != ent.SchedulingStatus {\n\t\t\t\tsequence[ent.Container.UUID] = append(s, ent.SchedulingStatus)\n\t\t\t}\n\t\t}\n\t\tif summary == expect {\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Check(summary, check.Equals, expect)\n\tfor i := 0; i < 5; i++ {\n\t\tc.Logf(\"sequence for container %d:\\n... %s\", i, strings.Join(sequence[test.ContainerUUID(i)], \"\\n... \"))\n\t}\n}\n\nfunc (s *DispatcherSuite) TestManagementAPI_Instances(c *check.C) {\n\ts.cluster.ManagementToken = \"abcdefgh\"\n\ts.cluster.Containers.CloudVMs.TimeoutBooting = arvados.Duration(time.Second)\n\tDrivers[\"test\"] = s.stubDriver\n\ts.disp.setupOnce.Do(s.disp.initialize)\n\tgo s.disp.run()\n\tdefer s.disp.Close()\n\n\ttype instance struct {\n\t\tInstance              string\n\t\tAddress               string\n\t\tPrice                 float64\n\t\tWorkerState           string    `json:\"worker_state\"`\n\t\tLastContainerUUID     string    `json:\"last_container_uuid\"`\n\t\tRunningContainerUUIDs []string  `json:\"running_container_uuids\"`\n\t\tArvadosInstanceType   string    `json:\"arvados_instance_type\"`\n\t\tProviderInstanceType  string    `json:\"provider_instance_type\"`\n\t\tIdleBehavior          string    `json:\"idle_behavior\"`\n\t\tLastBusy              time.Time `json:\"last_busy\"`\n\t}\n\ttype instancesResponse struct {\n\t\tItems []instance\n\t}\n\tgetInstances := func() instancesResponse {\n\t\treq := httptest.NewRequest(\"GET\", \"/arvados/v1/dispatch/instances\", nil)\n\t\treq.Header.Set(\"Authorization\", \"Bearer abcdefgh\")\n\t\tresp := httptest.NewRecorder()\n\t\ts.disp.ServeHTTP(resp, req)\n\t\tvar sr instancesResponse\n\t\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\t\terr := json.Unmarshal(resp.Body.Bytes(), &sr)\n\t\tc.Check(err, check.IsNil)\n\t\treturn sr\n\t}\n\n\tsr := getInstances()\n\tc.Check(len(sr.Items), check.Equals, 0)\n\n\tch := s.disp.pool.Subscribe()\n\tdefer s.disp.pool.Unsubscribe(ch)\n\t_, ok := s.disp.pool.Create(test.InstanceType(1))\n\tc.Check(ok, check.Equals, true)\n\t<-ch\n\n\tfor deadline := time.Now().Add(time.Second); time.Now().Before(deadline); {\n\t\tsr = getInstances()\n\t\tif len(sr.Items) > 0 && sr.Items[0].Instance != \"\" {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\tc.Assert(len(sr.Items), check.Equals, 1)\n\tc.Check(sr.Items[0].Instance, check.Matches, \"inst.*\")\n\tc.Check(sr.Items[0].WorkerState, check.Equals, \"booting\")\n\tc.Check(sr.Items[0].Price, check.Equals, 0.123)\n\tc.Check(sr.Items[0].LastContainerUUID, check.Equals, \"\")\n\tc.Check(sr.Items[0].RunningContainerUUIDs, check.HasLen, 0)\n\tc.Check(sr.Items[0].ProviderInstanceType, check.Equals, test.InstanceType(1).ProviderType)\n\tc.Check(sr.Items[0].ArvadosInstanceType, check.Equals, test.InstanceType(1).Name)\n\tc.Check(sr.Items[0].IdleBehavior, check.Equals, \"run\")\n}\n\nfunc (s *DispatcherSuite) TestManagementCommand_Instances(c *check.C) {\n\ts.cluster.ManagementToken = \"abcdefgh\"\n\tDrivers[\"test\"] = s.stubDriver\n\ts.disp.setupOnce.Do(s.disp.initialize)\n\tgo s.disp.run()\n\tdefer s.disp.Close()\n\t_, ok := s.disp.pool.Create(test.InstanceType(1))\n\tc.Check(ok, check.Equals, true)\n\n\t// Start an http server so we can test InstanceCommand against\n\t// s.disp's management API.\n\tsrv := httptest.NewServer(s.disp)\n\tsrvurl, _ := url.Parse(srv.URL)\n\n\t// Write a config file for InstanceCommand to use.\n\tcluster, err := config.DefaultCluster(c, \"zzzzz\")\n\tc.Assert(err, check.IsNil)\n\tcluster.Services.DispatchCloud.InternalURLs = map[arvados.URL]arvados.ServiceInstance{arvados.URL(*srvurl): arvados.ServiceInstance{}}\n\tcluster.ManagementToken = s.cluster.ManagementToken\n\tconffile := c.MkDir() + \"config.yml\"\n\tconfdata, err := json.Marshal(arvados.Config{Clusters: map[string]arvados.Cluster{\"zzzzz\": cluster}})\n\tc.Assert(err, check.IsNil)\n\terr = os.WriteFile(conffile, confdata, 0666)\n\tc.Assert(err, check.IsNil)\n\n\t// \"instance list\"\n\tstdout := bytes.NewBuffer(nil)\n\texitcode := InstanceCommand.RunCommand(\"arvados-server instance\", []string{\"list\", \"-header\", \"-config\", conffile}, bytes.NewBuffer(nil), stdout, os.Stderr)\n\tc.Check(exitcode, check.Equals, 0)          // `instance list` (after create)\n\tc.Check(stdout.String(), check.Matches, ``+ // `instance list` (after create)\n\t\t`instance\\t.*\\n`+\n\t\t`inst1,providertype1\\t(-|127\\.0\\.0\\.1:\\d+)\\t(booting|idle)\\trun\\ttype1\\tprovidertype1\\t0\\.123000\\t-\\n`)\n\n\t// \"instance hold\"\n\tstdout.Reset()\n\tstderr := bytes.NewBuffer(nil)\n\texitcode = InstanceCommand.RunCommand(\"arvados-server instance\", []string{\"hold\", \"-config\", conffile, \"inst1,providertype1\"}, bytes.NewBuffer(nil), stdout, stderr)\n\tc.Check(exitcode, check.Equals, 0)         // `instance hold` should succeed\n\tc.Check(stdout.String(), check.Equals, ``) // `instance hold` should output nothing\n\tc.Check(stderr.String(), check.Matches,    // `instance hold` should show feedback on stderr\n\t\t`(?ms)(.*\\n)?inst1,providertype1: 200 OK .*\\n`)\n\n\tstdout.Reset()\n\texitcode = InstanceCommand.RunCommand(\"arvados-server instance\", []string{\"list\", \"-config\", conffile}, bytes.NewBuffer(nil), stdout, os.Stderr)\n\tc.Check(exitcode, check.Equals, 0)      // `instance list` (after hold) should succeed\n\tc.Check(stdout.String(), check.Matches, // `instance list` (after hold) should show instance in hold state\n\t\t`inst1,providertype1\\t(-|127\\.0\\.0\\.1:\\d+)\\t(booting|idle)\\thold\\ttype1\\tprovidertype1\\t0\\.123000\\t-\\n`)\n\n\t// \"instance drain\"\n\tstdout.Reset()\n\tstderr.Reset()\n\texitcode = InstanceCommand.RunCommand(\"arvados-server instance\", []string{\"drain\", \"-config\", conffile, \"inst1,providertype1\"}, bytes.NewBuffer(nil), stdout, stderr)\n\tc.Check(exitcode, check.Equals, 0)         // `instance drain` should succeed\n\tc.Check(stdout.String(), check.Equals, ``) // `instance drain` should output nothing\n\tc.Check(stderr.String(), check.Matches,    // `instance drain` should show feedback on stderr\n\t\t`(?ms)(.*\\n)?inst1,providertype1: 200 OK .*\\n`)\n\n\tstdout.Reset()\n\texitcode = InstanceCommand.RunCommand(\"arvados-server instance\", []string{\"list\", \"-config\", conffile}, bytes.NewBuffer(nil), stdout, os.Stderr)\n\tc.Check(exitcode, check.Equals, 0) // `instance list` (after drain)\n\tif stdout.String() == \"\" {\n\t\t// Instance already drained/shutdown before we even\n\t\t// got our list.\n\t} else {\n\t\t// If the instance is still listed, it should be in\n\t\t// drain/shutdown state.\n\t\tc.Check(stdout.String(), check.Matches, `inst1,providertype1\\t(-|127\\.0\\.0\\.1:\\d+)\\tshutdown\\tdrain\\ttype1\\tprovidertype1\\t0\\.123000\\t-\\n`)\n\t}\n\n\t// \"instance drain\" with nonexistent instance ID\n\tstdout.Reset()\n\tstderr.Reset()\n\texitcode = InstanceCommand.RunCommand(\"arvados-server instance\", []string{\"drain\", \"-config\", conffile, \"inst404,providertype404\"}, bytes.NewBuffer(nil), stdout, stderr)\n\tc.Check(exitcode, check.Equals, 1)         // `instance drain {bad-id}` should fail\n\tc.Check(stdout.String(), check.Equals, ``) // `instance drain {bad-id}` should output nothing\n\tc.Check(stderr.String(), check.Matches,    // `instance drain {bad-id}` should 404\n\t\t`(?ms)(.*\\n)?inst404,providertype404: 404 Not Found .*\\n`)\n}\n"
  },
  {
    "path": "lib/dispatchcloud/driver.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchcloud\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/cloud/azure\"\n\t\"git.arvados.org/arvados.git/lib/cloud/ec2\"\n\t\"git.arvados.org/arvados.git/lib/cloud/loopback\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\n// Drivers is a map of available cloud drivers.\n// Clusters.*.Containers.CloudVMs.Driver configuration values\n// correspond to keys in this map.\nvar Drivers = map[string]cloud.Driver{\n\t\"azure\":    azure.Driver,\n\t\"ec2\":      ec2.Driver,\n\t\"loopback\": loopback.Driver,\n}\n\nfunc newInstanceSet(cluster *arvados.Cluster, setID cloud.InstanceSetID, logger logrus.FieldLogger, reg *prometheus.Registry) (cloud.InstanceSet, error) {\n\tdriver, ok := Drivers[cluster.Containers.CloudVMs.Driver]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unsupported cloud driver %q\", cluster.Containers.CloudVMs.Driver)\n\t}\n\tsharedResourceTags := cloud.SharedResourceTags(cluster.Containers.CloudVMs.ResourceTags)\n\tis, err := driver.InstanceSet(cluster.Containers.CloudVMs.DriverParameters, setID, sharedResourceTags, logger, reg)\n\tis = newInstrumentedInstanceSet(is, reg)\n\tif maxops := cluster.Containers.CloudVMs.MaxCloudOpsPerSecond; maxops > 0 {\n\t\tis = rateLimitedInstanceSet{\n\t\t\tInstanceSet: is,\n\t\t\tticker:      time.NewTicker(time.Second / time.Duration(maxops)),\n\t\t}\n\t}\n\tis = defaultTaggingInstanceSet{\n\t\tInstanceSet: is,\n\t\tdefaultTags: cloud.InstanceTags(cluster.Containers.CloudVMs.ResourceTags),\n\t}\n\tis = filteringInstanceSet{\n\t\tInstanceSet: is,\n\t\tlogger:      logger,\n\t}\n\treturn is, err\n}\n\ntype rateLimitedInstanceSet struct {\n\tcloud.InstanceSet\n\tticker *time.Ticker\n}\n\nfunc (is rateLimitedInstanceSet) Instances(tags cloud.InstanceTags) ([]cloud.Instance, error) {\n\t<-is.ticker.C\n\tinsts, err := is.InstanceSet.Instances(tags)\n\tfor i, inst := range insts {\n\t\tinsts[i] = &rateLimitedInstance{inst, is.ticker}\n\t}\n\treturn insts, err\n}\n\nfunc (is rateLimitedInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID, tags cloud.InstanceTags, init cloud.InitCommand, pk ssh.PublicKey) (cloud.Instance, error) {\n\t<-is.ticker.C\n\tinst, err := is.InstanceSet.Create(it, image, tags, init, pk)\n\treturn &rateLimitedInstance{inst, is.ticker}, err\n}\n\ntype rateLimitedInstance struct {\n\tcloud.Instance\n\tticker *time.Ticker\n}\n\nfunc (inst *rateLimitedInstance) Destroy() error {\n\t<-inst.ticker.C\n\treturn inst.Instance.Destroy()\n}\n\nfunc (inst *rateLimitedInstance) SetTags(tags cloud.InstanceTags) error {\n\t<-inst.ticker.C\n\treturn inst.Instance.SetTags(tags)\n}\n\n// Adds the specified defaultTags to every Create() call.\ntype defaultTaggingInstanceSet struct {\n\tcloud.InstanceSet\n\tdefaultTags cloud.InstanceTags\n}\n\nfunc (is defaultTaggingInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID, tags cloud.InstanceTags, init cloud.InitCommand, pk ssh.PublicKey) (cloud.Instance, error) {\n\tallTags := cloud.InstanceTags{}\n\tfor k, v := range is.defaultTags {\n\t\tallTags[k] = v\n\t}\n\tfor k, v := range tags {\n\t\tallTags[k] = v\n\t}\n\treturn is.InstanceSet.Create(it, image, allTags, init, pk)\n}\n\n// Filter the instances returned by the wrapped InstanceSet's\n// Instances() method (in case the wrapped InstanceSet didn't do this\n// itself).\ntype filteringInstanceSet struct {\n\tcloud.InstanceSet\n\tlogger logrus.FieldLogger\n}\n\nfunc (is filteringInstanceSet) Instances(tags cloud.InstanceTags) ([]cloud.Instance, error) {\n\tinstances, err := is.InstanceSet.Instances(tags)\n\n\tskipped := 0\n\tvar returning []cloud.Instance\nnextInstance:\n\tfor _, inst := range instances {\n\t\tinstTags := inst.Tags()\n\t\tfor k, v := range tags {\n\t\t\tif instTags[k] != v {\n\t\t\t\tskipped++\n\t\t\t\tcontinue nextInstance\n\t\t\t}\n\t\t}\n\t\treturning = append(returning, inst)\n\t}\n\tis.logger.WithFields(logrus.Fields{\n\t\t\"returning\": len(returning),\n\t\t\"skipped\":   skipped,\n\t}).WithError(err).Debugf(\"filteringInstanceSet returning instances\")\n\treturn returning, err\n}\n\nfunc newInstrumentedInstanceSet(is cloud.InstanceSet, reg *prometheus.Registry) cloud.InstanceSet {\n\tcv := prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"driver_operations\",\n\t\tHelp:      \"Number of instance-create/destroy/list operations performed via cloud driver.\",\n\t}, []string{\"operation\", \"error\"})\n\n\t// Create all counters, so they are reported with zero values\n\t// (instead of being missing) until they are incremented.\n\tfor _, op := range []string{\"Create\", \"List\", \"Destroy\", \"SetTags\"} {\n\t\tfor _, error := range []string{\"0\", \"1\"} {\n\t\t\tcv.WithLabelValues(op, error).Add(0)\n\t\t}\n\t}\n\n\treg.MustRegister(cv)\n\treturn instrumentedInstanceSet{is, cv}\n}\n\ntype instrumentedInstanceSet struct {\n\tcloud.InstanceSet\n\tcv *prometheus.CounterVec\n}\n\nfunc (is instrumentedInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID, tags cloud.InstanceTags, init cloud.InitCommand, pk ssh.PublicKey) (cloud.Instance, error) {\n\tinst, err := is.InstanceSet.Create(it, image, tags, init, pk)\n\tis.cv.WithLabelValues(\"Create\", boolLabelValue(err != nil)).Inc()\n\treturn instrumentedInstance{inst, is.cv}, err\n}\n\nfunc (is instrumentedInstanceSet) Instances(tags cloud.InstanceTags) ([]cloud.Instance, error) {\n\tinstances, err := is.InstanceSet.Instances(tags)\n\tis.cv.WithLabelValues(\"List\", boolLabelValue(err != nil)).Inc()\n\tvar instrumented []cloud.Instance\n\tfor _, i := range instances {\n\t\tinstrumented = append(instrumented, instrumentedInstance{i, is.cv})\n\t}\n\treturn instrumented, err\n}\n\ntype instrumentedInstance struct {\n\tcloud.Instance\n\tcv *prometheus.CounterVec\n}\n\nfunc (inst instrumentedInstance) Destroy() error {\n\terr := inst.Instance.Destroy()\n\tinst.cv.WithLabelValues(\"Destroy\", boolLabelValue(err != nil)).Inc()\n\treturn err\n}\n\nfunc (inst instrumentedInstance) SetTags(tags cloud.InstanceTags) error {\n\terr := inst.Instance.SetTags(tags)\n\tinst.cv.WithLabelValues(\"SetTags\", boolLabelValue(err != nil)).Inc()\n\treturn err\n}\n\nfunc boolLabelValue(v bool) string {\n\tif v {\n\t\treturn \"1\"\n\t}\n\treturn \"0\"\n}\n"
  },
  {
    "path": "lib/dispatchcloud/gocheck_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchcloud\n\nimport (\n\t\"testing\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n"
  },
  {
    "path": "lib/dispatchcloud/logger.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchcloud\n\nimport (\n\t\"sync\"\n\t\"time\"\n)\n\ntype logger interface {\n\tPrintf(string, ...interface{})\n\tWarnf(string, ...interface{})\n\tDebugf(string, ...interface{})\n}\n\nvar nextSpam = map[string]time.Time{}\nvar nextSpamMtx sync.Mutex\n\nfunc unspam(msg string) bool {\n\tnextSpamMtx.Lock()\n\tdefer nextSpamMtx.Unlock()\n\tif nextSpam[msg].Before(time.Now()) {\n\t\tnextSpam[msg] = time.Now().Add(time.Minute)\n\t\treturn true\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "lib/dispatchcloud/readme.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchcloud\n\n// A dispatcher comprises a container queue, a scheduler, a worker\n// pool, a remote command executor, and a cloud driver.\n// 1. Choose a provider.\n// 2. Start a worker pool.\n// 3. Start a container queue.\n// 4. Run the scheduler's stale-lock fixer.\n// 5. Run the scheduler's mapper.\n// 6. Run the scheduler's syncer.\n// 7. Wait for updates to the container queue or worker pool.\n// 8. Repeat from 5.\n//\n//\n// A cloud driver creates new cloud VM instances and gets the latest\n// list of instances. The returned instances are caches/proxies for\n// the provider's metadata and control interfaces (get IP address,\n// update tags, shutdown).\n//\n//\n// A worker pool tracks workers' instance types and readiness states\n// (available to do work now, booting, suffering a temporary network\n// outage, shutting down). It loads internal state from the cloud\n// provider's list of instances at startup, and syncs periodically\n// after that.\n//\n//\n// An executor maintains a multiplexed SSH connection to a cloud\n// instance, retrying/reconnecting as needed, so the worker pool can\n// execute commands. It asks the cloud driver's instance to verify its\n// SSH public key once when first connecting, and again later if the\n// key changes.\n//\n//\n// A container queue tracks the known state (according to\n// arvados-controller) of each container of interest -- i.e., queued,\n// or locked/running using our own dispatch token. It also proxies the\n// dispatcher's lock/unlock/cancel requests to the controller. It\n// handles concurrent refresh and update operations without exposing\n// out-of-order updates to its callers. (It drops any new information\n// that might have originated before its own most recent\n// lock/unlock/cancel operation.)\n//\n//\n// The scheduler's stale-lock fixer waits for any already-locked\n// containers (i.e., locked by a prior dispatcher process) to appear\n// on workers as the worker pool recovers its state. It\n// unlocks/requeues any that still remain when all workers are\n// recovered or shutdown, or its timer expires.\n//\n//\n// The scheduler's mapper chooses which containers to assign to which\n// idle workers, and decides what to do when there are not enough idle\n// workers (including shutting down some idle nodes).\n//\n//\n// The scheduler's syncer updates state to Cancelled when a running\n// container process dies without finalizing its entry in the\n// controller database. It also calls the worker pool to kill\n// containers that have priority=0 while locked or running.\n//\n//\n// An instance set proxy wraps a driver's instance set with\n// rate-limiting logic. After the wrapped instance set receives a\n// cloud.RateLimitError, the proxy starts returning errors to callers\n// immediately without calling through to the wrapped instance set.\n"
  },
  {
    "path": "lib/dispatchcloud/readme_states.txt",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# cpan -I -T install Graph::Easy\n# (eval `perl -I ~/perl5/lib/perl5 -Mlocal::lib`; cpan -T install Graph::Easy)\n# graph-easy --as=svg < readme_states.txt\n\n[Nonexistent] - appears in cloud list -> [Unknown]\n[Nonexistent] - create() returns ID -> [Booting]\n[Unknown] - create() returns ID -> [Booting]\n[Unknown] - boot timeout -> [Shutdown]\n[Booting] - boot+run probes succeed -> [Idle]\n[Idle] - idle timeout -> [Shutdown]\n[Idle] - probe timeout -> [Shutdown]\n[Idle] - want=drain -> [Shutdown]\n[Idle] - container starts -> [Running]\n[Running] - container ends -> [Idle]\n[Running] - container ends, want=drain -> [Shutdown]\n[Shutdown] - instance disappears from cloud -> [Gone]\n\n# Layouter fails if we add these\n#[Hold] - want=run -> [Booting]\n#[Hold] - want=drain -> [Shutdown]\n#[Running] - container ends, want=hold -> [Hold]\n#[Unknown] - want=hold -> [Hold]\n#[Booting] - want=hold -> [Hold]\n#[Idle] - want=hold -> [Hold]\n\n# Not worth saying?\n#[Booting] - boot probe succeeds, run probe fails -> [Booting]\n"
  },
  {
    "path": "lib/dispatchcloud/scheduler/fix_stale_locks.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage scheduler\n\nimport (\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/worker\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// fixStaleLocks waits for any already-locked containers (i.e., locked\n// by a prior dispatcher process) to appear on workers as the worker\n// pool recovers its state. It unlocks any that still remain when all\n// workers are recovered or shutdown, or its timer (StaleLockTimeout)\n// expires.\nfunc (sch *Scheduler) fixStaleLocks() {\n\twp := sch.pool.Subscribe()\n\tdefer sch.pool.Unsubscribe(wp)\n\n\tvar stale []string\n\ttimeout := time.NewTimer(time.Duration(sch.cluster.Containers.StaleLockTimeout))\nwaiting:\n\tfor sch.pool.CountWorkers()[worker.StateUnknown] > 0 {\n\t\trunning := sch.pool.Running()\n\t\tqEntries, _ := sch.queue.Entries()\n\n\t\tstale = nil\n\t\tfor uuid, ent := range qEntries {\n\t\t\tif ent.Container.State != arvados.ContainerStateLocked {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, running := running[uuid]; running {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstale = append(stale, uuid)\n\t\t}\n\t\tif len(stale) == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-wp:\n\t\tcase <-timeout.C:\n\t\t\t// Give up.\n\t\t\tbreak waiting\n\t\t}\n\t}\n\n\tfor _, uuid := range stale {\n\t\terr := sch.queue.Unlock(uuid)\n\t\tif err != nil {\n\t\t\tsch.logger.Warnf(\"Unlock %s: %s\", uuid, err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/dispatchcloud/scheduler/gocheck_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage scheduler\n\nimport (\n\t\"testing\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n"
  },
  {
    "path": "lib/dispatchcloud/scheduler/interfaces.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage scheduler\n\nimport (\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/container\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/worker\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// A ContainerQueue is a set of containers that need to be started or\n// stopped. Implemented by container.Queue and test stubs. See\n// container.Queue method documentation for details.\ntype ContainerQueue interface {\n\tEntries() (entries map[string]container.QueueEnt, updated time.Time)\n\tLock(uuid string) error\n\tUnlock(uuid string) error\n\tCancel(uuid string) error\n\tForget(uuid string)\n\tGet(uuid string) (arvados.Container, bool)\n\tSubscribe() <-chan struct{}\n\tUnsubscribe(<-chan struct{})\n\tUpdate() error\n}\n\n// A WorkerPool asynchronously starts and stops worker VMs, and starts\n// and stops containers on them. Implemented by worker.Pool and test\n// stubs. See worker.Pool method documentation for details.\ntype WorkerPool interface {\n\tRunning() map[string]time.Time\n\tInstances() []worker.InstanceView\n\tCountWorkers() map[worker.State]int\n\tAtCapacity(arvados.InstanceType) bool\n\tAtQuota() bool\n\tCreate(arvados.InstanceType) (worker.InstanceView, bool)\n\tShutdown(cloud.InstanceID) bool\n\tStartContainer(cloud.InstanceID, arvados.Container) bool\n\tKillContainer(uuid, reason string) bool\n\tForgetContainer(uuid string)\n\tSubscribe() <-chan struct{}\n\tUnsubscribe(<-chan struct{})\n}\n"
  },
  {
    "path": "lib/dispatchcloud/scheduler/run_queue.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage scheduler\n\nimport (\n\t\"fmt\"\n\t\"slices\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/container\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/worker\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nvar quietAfter503 = time.Minute\n\ntype QueueEnt struct {\n\tcontainer.QueueEnt\n\n\t// Human-readable scheduling status as of the last scheduling\n\t// iteration.\n\tSchedulingStatus string `json:\"scheduling_status\"`\n}\n\nconst (\n\tschedStatusPreparingRuntimeEnvironment = \"Container is allocated to an instance and preparing to run.\"\n\tschedStatusPriorityZero                = \"This container will not be scheduled to run because its priority is 0 and state is %v.\"\n\tschedStatusSupervisorLimitReached      = \"Waiting in workflow queue at position %v.  Cluster is at capacity and cannot start any new workflows right now.\"\n\tschedStatusWaitingForPreviousAttempt   = \"Waiting for previous container attempt to exit.\"\n\tschedStatusWaitingNewInstance          = \"Waiting for a %v instance to boot and be ready to accept work.\"\n\tschedStatusWaitingInstanceType         = \"Waiting in queue at position %v.  Cluster is at capacity for all eligible instance types (%v) and cannot start a new instance right now.\"\n\tschedStatusWaitingCloudResources       = \"Waiting in queue at position %v.  Cluster is at cloud account limits and cannot start any new instances right now.\"\n\tschedStatusWaitingClusterCapacity      = \"Waiting in queue at position %v.  Cluster is at capacity and cannot start any new instances right now.\"\n)\n\nfunc instanceResourcesForInstanceType(it arvados.InstanceType) container.InstanceResources {\n\treturn container.InstanceResources{\n\t\tVCPUs:   it.VCPUs,\n\t\tRAM:     it.RAM,\n\t\tScratch: it.Scratch,\n\t\tGPUs:    it.GPU.DeviceCount,\n\t\tGPUVRAM: it.GPU.VRAM,\n\t}\n}\n\n// Queue returns the sorted queue from the last scheduling iteration.\nfunc (sch *Scheduler) Queue() []QueueEnt {\n\tents, _ := sch.lastQueue.Load().([]QueueEnt)\n\treturn ents\n}\n\nfunc (sch *Scheduler) instanceSort(a, b worker.InstanceView) bool {\n\tif c := a.Price - b.Price; c != 0 {\n\t\treturn c < 0\n\t}\n\tita := sch.cluster.InstanceTypes[a.ArvadosInstanceType]\n\titb := sch.cluster.InstanceTypes[b.ArvadosInstanceType]\n\tif c := ita.VCPUs - itb.VCPUs; c != 0 {\n\t\treturn c > 0\n\t}\n\tif c := ita.RAM - itb.RAM; c != 0 {\n\t\treturn c > 0\n\t}\n\tif c := ita.Scratch - itb.Scratch; c != 0 {\n\t\treturn c > 0\n\t}\n\tif c := strings.Compare(string(a.Instance), string(b.Instance)); c != 0 {\n\t\treturn c < 0\n\t}\n\treturn false\n}\n\nfunc (sch *Scheduler) runQueue() {\n\trunning := sch.pool.Running()\n\tinstances := sch.pool.Instances()\n\tsort.Slice(instances, func(i, j int) bool {\n\t\treturn sch.instanceSort(instances[i], instances[j])\n\t})\n\t// instanceResources[i] tracks the remaining resources on\n\t// instances[i].  Resources consumed by already-running\n\t// containers are subtracted below.\n\tinstanceResources := make([]container.InstanceResources, len(instances))\n\tfor i, instance := range instances {\n\t\tit := sch.cluster.InstanceTypes[instance.ArvadosInstanceType]\n\t\tinstanceResources[i] = instanceResourcesForInstanceType(it)\n\t}\n\n\ttotalInstances := 0\n\tfor _, n := range sch.pool.CountWorkers() {\n\t\ttotalInstances += n\n\t}\n\n\tunsorted, _ := sch.queue.Entries()\n\tsorted := make([]QueueEnt, 0, len(unsorted))\n\tfor _, ent := range unsorted {\n\t\tsorted = append(sorted, QueueEnt{QueueEnt: ent})\n\t}\n\tsort.Slice(sorted, func(i, j int) bool {\n\t\t_, irunning := running[sorted[i].Container.UUID]\n\t\t_, jrunning := running[sorted[j].Container.UUID]\n\t\tif irunning != jrunning {\n\t\t\t// Ensure the \"tryrun\" loop (see below) sees\n\t\t\t// already-scheduled containers first, to\n\t\t\t// ensure existing supervisor containers are\n\t\t\t// properly counted before we decide whether\n\t\t\t// we have room for new ones.\n\t\t\treturn irunning\n\t\t}\n\t\tilocked := sorted[i].Container.State == arvados.ContainerStateLocked\n\t\tjlocked := sorted[j].Container.State == arvados.ContainerStateLocked\n\t\tif ilocked != jlocked {\n\t\t\t// Give precedence to containers that we have\n\t\t\t// already locked, even if higher-priority\n\t\t\t// containers have since arrived in the\n\t\t\t// queue. This avoids undesirable queue churn\n\t\t\t// effects including extra lock/unlock cycles\n\t\t\t// and bringing up new instances and quickly\n\t\t\t// shutting them down to make room for\n\t\t\t// different instance sizes.\n\t\t\treturn ilocked\n\t\t} else if pi, pj := sorted[i].Container.Priority, sorted[j].Container.Priority; pi != pj {\n\t\t\treturn pi > pj\n\t\t} else {\n\t\t\t// When containers have identical priority,\n\t\t\t// start them in the order we first noticed\n\t\t\t// them. This avoids extra lock/unlock cycles\n\t\t\t// when we unlock the containers that don't\n\t\t\t// fit in the available pool.\n\t\t\treturn sorted[i].FirstSeenAt.Before(sorted[j].FirstSeenAt)\n\t\t}\n\t})\n\n\tcontainers := map[string]*arvados.Container{}\n\tfor i := range sorted {\n\t\tcontainers[sorted[i].Container.UUID] = &sorted[i].Container\n\t}\n\tfor i := range instances {\n\t\tfor _, uuid := range instances[i].RunningContainerUUIDs {\n\t\t\tif containers[uuid] == nil {\n\t\t\t\t// Container size unknown.  Assume the\n\t\t\t\t// instance has no capacity to spare.\n\t\t\t\tinstanceResources[i] = container.InstanceResources{}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\trsc := container.InstanceResourcesNeeded(sch.cluster, containers[uuid])\n\t\t\tinstanceResources[i] = instanceResources[i].Sub(rsc)\n\t\t}\n\t}\n\n\tif t := sch.client.Last503(); t.After(sch.last503time) {\n\t\t// API has sent an HTTP 503 response since last time\n\t\t// we checked. Use current #containers - 1 as\n\t\t// maxContainers, i.e., try to stay just below the\n\t\t// level where we see 503s.\n\t\tsch.last503time = t\n\t\tif newlimit := len(running) - 1; newlimit < 1 {\n\t\t\tsch.maxContainers = 1\n\t\t} else {\n\t\t\tsch.maxContainers = newlimit\n\t\t}\n\t} else if sch.maxContainers > 0 && time.Since(sch.last503time) > quietAfter503 {\n\t\t// If we haven't seen any 503 errors lately, raise\n\t\t// limit to ~10% beyond the current workload.\n\t\t//\n\t\t// As we use the added 10% to schedule more\n\t\t// containers, len(running) will increase and we'll\n\t\t// push the limit up further. Soon enough,\n\t\t// maxContainers will get high enough to schedule the\n\t\t// entire queue, hit pool quota, or get 503s again.\n\t\tmax := len(running)*11/10 + 1\n\t\tif sch.maxContainers < max {\n\t\t\tsch.maxContainers = max\n\t\t}\n\t}\n\tif sch.last503time.IsZero() {\n\t\tsch.mLast503Time.Set(0)\n\t} else {\n\t\tsch.mLast503Time.Set(float64(sch.last503time.Unix()))\n\t}\n\tif sch.instancesWithinQuota > 0 && sch.instancesWithinQuota < totalInstances {\n\t\t// Evidently it is possible to run this many\n\t\t// instances, so raise our estimate.\n\t\tsch.instancesWithinQuota = totalInstances\n\t}\n\tif sch.pool.AtQuota() {\n\t\t// Consider current workload to be the maximum\n\t\t// allowed, for the sake of reporting metrics and\n\t\t// calculating max supervisors.\n\t\t//\n\t\t// Now that sch.maxContainers is set, we will only\n\t\t// raise it past len(running) by 10%.  This helps\n\t\t// avoid running an inappropriate number of\n\t\t// supervisors when we reach the cloud-imposed quota\n\t\t// (which may be based on # CPUs etc) long before the\n\t\t// configured MaxInstances.\n\t\tif sch.maxContainers == 0 || sch.maxContainers > totalInstances {\n\t\t\tif totalInstances == 0 {\n\t\t\t\tsch.maxContainers = 1\n\t\t\t} else {\n\t\t\t\tsch.maxContainers = totalInstances\n\t\t\t}\n\t\t}\n\t\tsch.instancesWithinQuota = totalInstances\n\t} else if sch.instancesWithinQuota > 0 && sch.maxContainers > sch.instancesWithinQuota+1 {\n\t\t// Once we've hit a quota error and started tracking\n\t\t// instancesWithinQuota (i.e., it's not zero), we\n\t\t// avoid exceeding that known-working level by more\n\t\t// than 1.\n\t\t//\n\t\t// If we don't do this, we risk entering a pattern of\n\t\t// repeatedly locking several containers, hitting\n\t\t// quota again, and unlocking them again each time the\n\t\t// driver stops reporting AtQuota, which tends to use\n\t\t// up the max lock/unlock cycles on the next few\n\t\t// containers in the queue, and cause them to fail.\n\t\tsch.maxContainers = sch.instancesWithinQuota + 1\n\t}\n\tsch.mMaxContainerConcurrency.Set(float64(sch.maxContainers))\n\n\tmaxSupervisors := int(float64(sch.maxContainers) * sch.cluster.Containers.CloudVMs.SupervisorFraction)\n\tif maxSupervisors < 1 && sch.cluster.Containers.CloudVMs.SupervisorFraction > 0 && sch.maxContainers > 0 {\n\t\tmaxSupervisors = 1\n\t}\n\n\tsch.logger.WithFields(logrus.Fields{\n\t\t\"Containers\":    len(sorted),\n\t\t\"Processes\":     len(running),\n\t\t\"maxContainers\": sch.maxContainers,\n\t}).Debug(\"runQueue\")\n\n\tvar atcapacity = map[string]bool{} // ProviderTypes reported as AtCapacity during this runQueue() invocation\n\tvar overquota []QueueEnt           // entries that are unmappable because of worker pool quota\n\tvar overmaxsuper []QueueEnt        // unmappable because max supervisors (these are not included in overquota)\n\tvar containerAllocatedWorkerBootingCount int\n\n\t// trying is #containers running + #containers we're trying to\n\t// start. We stop trying to start more containers if this\n\t// reaches the dynamic maxContainers limit.\n\ttrying := len(running)\n\n\tqpos := 0\n\tsupervisors := 0\n\ntryrun:\n\tfor i, ent := range sorted {\n\t\tctr, ctrResources, types := ent.Container, ent.InstanceResources, ent.InstanceTypes\n\t\tlogger := sch.logger.WithFields(logrus.Fields{\n\t\t\t\"ContainerUUID\": ctr.UUID,\n\t\t})\n\t\tif ctr.SchedulingParameters.Supervisor {\n\t\t\tsupervisors += 1\n\t\t}\n\t\tif _, running := running[ctr.UUID]; running {\n\t\t\tif ctr.State == arvados.ContainerStateQueued || ctr.State == arvados.ContainerStateLocked {\n\t\t\t\tsorted[i].SchedulingStatus = schedStatusPreparingRuntimeEnvironment\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif ctr.Priority < 1 {\n\t\t\tsorted[i].SchedulingStatus = fmt.Sprintf(schedStatusPriorityZero, string(ctr.State))\n\t\t\tcontinue\n\t\t}\n\t\tif ctr.SchedulingParameters.Supervisor && maxSupervisors > 0 && supervisors > maxSupervisors {\n\t\t\tovermaxsuper = append(overmaxsuper, sorted[i])\n\t\t\tsorted[i].SchedulingStatus = fmt.Sprintf(schedStatusSupervisorLimitReached, len(overmaxsuper))\n\t\t\tcontinue\n\t\t}\n\t\teligibleTypes := map[string]bool{}\n\t\tfor _, it := range types {\n\t\t\teligibleTypes[it.Name] = true\n\t\t}\n\t\t// bestInstIdx>=0 means instances[bestInstIdx] is where we should\n\t\t// try to run ctr (it's one of the eligible instance\n\t\t// types, and has enough resources to accommodate\n\t\t// ctr).  bestInstIdx<0 means we can't start ctr right now,\n\t\t// all we can do is request a new instance or just\n\t\t// wait.\n\t\tbestInstIdx := -1\n\t\tfor i, instance := range instances {\n\t\t\tswitch {\n\t\t\tcase instance.WorkerState != worker.StateUnknown &&\n\t\t\t\tinstance.WorkerState != worker.StateRunning &&\n\t\t\t\tinstance.WorkerState != worker.StateBooting &&\n\t\t\t\tinstance.WorkerState != worker.StateIdle:\n\t\t\t\t// Shutdown or invalid state\n\t\t\tcase instance.IdleBehavior != worker.IdleBehaviorRun:\n\t\t\t\t// Admin-hold/drain\n\t\t\tcase sch.cluster.Containers.CloudVMs.MaxRunningContainersPerInstance > 0 &&\n\t\t\t\tsch.cluster.Containers.CloudVMs.MaxRunningContainersPerInstance <= len(instance.RunningContainerUUIDs):\n\t\t\t\t// reached configured limit on #\n\t\t\t\t// containers per instance\n\t\t\tcase !eligibleTypes[instance.ArvadosInstanceType]:\n\t\t\t\t// incompatible or too expensive\n\t\t\tcase !instanceResources[i].Accommodates(ctrResources):\n\t\t\t\t// insufficient spare resources\n\t\t\t\tif instance.WorkerState == worker.StateIdle && len(instance.RunningContainerUUIDs) == 0 {\n\t\t\t\t\t// This should be impossible\n\t\t\t\t\t// -- it means the selected\n\t\t\t\t\t// node type is too small even\n\t\t\t\t\t// when idle.\n\t\t\t\t\tlogger.Infof(\"BUG? insufficient resources on idle instance %s type %s for container %s: ir %+v ctrr %+v\", instance.Instance, instance.ArvadosInstanceType, ctr.UUID, instanceResources[i], ctrResources)\n\t\t\t\t}\n\t\t\tcase bestInstIdx < 0:\n\t\t\t\t// first eligible instance found\n\t\t\t\tbestInstIdx = i\n\t\t\tcase len(instance.RunningContainerUUIDs) > len(instances[bestInstIdx].RunningContainerUUIDs):\n\t\t\t\t// already found an eligible instance,\n\t\t\t\t// but this one has more containers\n\t\t\t\t// running, which we prefer (if\n\t\t\t\t// workload decreases we want some\n\t\t\t\t// busy nodes and some idle nodes so\n\t\t\t\t// the idle ones can shut down)\n\t\t\t\tbestInstIdx = i\n\t\t\tcase (instances[bestInstIdx].WorkerState == worker.StateBooting ||\n\t\t\t\tinstances[bestInstIdx].WorkerState == worker.StateUnknown) &&\n\t\t\t\t(instance.WorkerState == worker.StateIdle ||\n\t\t\t\t\tinstance.WorkerState == worker.StateRunning):\n\t\t\t\t// prefer an idle/running instance\n\t\t\t\t// over a (possibly lower-priced)\n\t\t\t\t// booting/unprobed instance\n\t\t\t\tbestInstIdx = i\n\t\t\t}\n\t\t}\n\t\tif bestInstIdx >= 0 {\n\t\t\tlogger.Tracef(\"bestInstIdx %d is instance %s for container %s: instanceResources %v ctrResources %v\", bestInstIdx, instances[bestInstIdx].Instance, ctr.UUID, instanceResources[bestInstIdx], ctrResources)\n\t\t}\n\t\t// If the pool is not reporting AtCapacity for any of\n\t\t// the eligible instance types, availableOK is true\n\t\t// and availableType is the lowest-cost type.\n\t\tvar availableOK bool\n\t\tvar availableType arvados.InstanceType\n\t\tfor _, it := range types {\n\t\t\tcapkey := fmt.Sprintf(\"%s, preemptible=%v\", it.ProviderType, it.Preemptible)\n\t\t\tif atcapacity[capkey] {\n\t\t\t\tcontinue\n\t\t\t} else if sch.pool.AtCapacity(it) {\n\t\t\t\tatcapacity[capkey] = true\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tavailableOK = true\n\t\t\t\tavailableType = it\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tswitch ctr.State {\n\t\tcase arvados.ContainerStateQueued:\n\t\t\tif sch.maxContainers > 0 && trying >= sch.maxContainers {\n\t\t\t\tlogger.Tracef(\"not locking: already at maxContainers %d\", sch.maxContainers)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttrying++\n\t\t\tif bestInstIdx < 0 && sch.pool.AtQuota() {\n\t\t\t\tlogger.Trace(\"not starting: AtQuota and no workers with capacity\")\n\t\t\t\toverquota = sorted[i:]\n\t\t\t\tbreak tryrun\n\t\t\t}\n\t\t\tif bestInstIdx < 0 && !availableOK {\n\t\t\t\tlogger.Trace(\"not locking: AtCapacity and no workers with capacity\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif sch.pool.KillContainer(ctr.UUID, \"about to lock\") {\n\t\t\t\tlogger.Info(\"not locking: crunch-run process from previous attempt has not exited\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo sch.lockContainer(logger, ctr.UUID)\n\t\t\tif bestInstIdx >= 0 {\n\t\t\t\tinstanceResources[bestInstIdx] = instanceResources[bestInstIdx].Sub(ctrResources)\n\t\t\t\tinstances[bestInstIdx].RunningContainerUUIDs = append(instances[bestInstIdx].RunningContainerUUIDs, ctr.UUID)\n\t\t\t}\n\t\tcase arvados.ContainerStateLocked:\n\t\t\tif sch.maxContainers > 0 && trying >= sch.maxContainers {\n\t\t\t\tlogger.Tracef(\"not starting: already at maxContainers %d\", sch.maxContainers)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttrying++\n\t\t\tif bestInstIdx >= 0 {\n\t\t\t\t// We have a suitable instance type,\n\t\t\t\t// so mark it as allocated, and try to\n\t\t\t\t// start the container.\n\t\t\t\tinstanceResources[bestInstIdx] = instanceResources[bestInstIdx].Sub(ctrResources)\n\t\t\t\tinstances[bestInstIdx].RunningContainerUUIDs = append(instances[bestInstIdx].RunningContainerUUIDs, ctr.UUID)\n\t\t\t\tinst := instances[bestInstIdx]\n\t\t\t\tlogger = logger.WithFields(logrus.Fields{\n\t\t\t\t\t\"Instance\":     inst.Instance,\n\t\t\t\t\t\"InstanceType\": inst.ArvadosInstanceType,\n\t\t\t\t})\n\t\t\t\tif inst.Instance == \"\" {\n\t\t\t\t\tsorted[i].SchedulingStatus = fmt.Sprintf(schedStatusWaitingNewInstance, inst.ArvadosInstanceType)\n\t\t\t\t\tlogger.Trace(\"not trying to start: selected instance does not have an ID yet\")\n\t\t\t\t\tcontainerAllocatedWorkerBootingCount++\n\t\t\t\t} else if inst.WorkerState != worker.StateIdle && inst.WorkerState != worker.StateRunning {\n\t\t\t\t\tsorted[i].SchedulingStatus = fmt.Sprintf(schedStatusWaitingNewInstance, inst.ArvadosInstanceType)\n\t\t\t\t\tlogger.Tracef(\"not trying to start: selected instance state=%s\", inst.WorkerState)\n\t\t\t\t\tcontainerAllocatedWorkerBootingCount++\n\t\t\t\t} else if sch.pool.KillContainer(ctr.UUID, \"about to start\") {\n\t\t\t\t\tsorted[i].SchedulingStatus = schedStatusWaitingForPreviousAttempt\n\t\t\t\t\tlogger.Info(\"not restarting yet: crunch-run process from previous attempt has not exited\")\n\t\t\t\t} else if sch.pool.StartContainer(inst.Instance, ctr) {\n\t\t\t\t\tsorted[i].SchedulingStatus = schedStatusPreparingRuntimeEnvironment\n\t\t\t\t\tlogger.Trace(\"StartContainer => true\")\n\t\t\t\t} else {\n\t\t\t\t\tsorted[i].SchedulingStatus = fmt.Sprintf(schedStatusWaitingNewInstance, inst.ArvadosInstanceType)\n\t\t\t\t\tlogger.Trace(\"StartContainer => false\")\n\t\t\t\t\tcontainerAllocatedWorkerBootingCount++\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif sch.pool.AtQuota() {\n\t\t\t\t// Don't let lower-priority containers\n\t\t\t\t// starve this one by using keeping\n\t\t\t\t// idle workers alive on different\n\t\t\t\t// instance types.\n\t\t\t\tlogger.Trace(\"overquota\")\n\t\t\t\toverquota = sorted[i:]\n\t\t\t\tbreak tryrun\n\t\t\t}\n\t\t\tif !availableOK {\n\t\t\t\t// Continue trying lower-priority\n\t\t\t\t// containers in case they can run on\n\t\t\t\t// different instance types that are\n\t\t\t\t// available.\n\t\t\t\t//\n\t\t\t\t// The local \"atcapacity\" cache helps\n\t\t\t\t// when the pool's flag resets after\n\t\t\t\t// we look at container A but before\n\t\t\t\t// we look at lower-priority container\n\t\t\t\t// B. In that case we want to run\n\t\t\t\t// container A on the next call to\n\t\t\t\t// runQueue(), rather than run\n\t\t\t\t// container B now.\n\t\t\t\tqpos++\n\t\t\t\tvar typenames []string\n\t\t\t\tfor _, tp := range types {\n\t\t\t\t\ttypenames = append(typenames, tp.Name)\n\t\t\t\t}\n\t\t\t\tsorted[i].SchedulingStatus = fmt.Sprintf(schedStatusWaitingInstanceType, qpos, strings.Join(typenames, \", \"))\n\t\t\t\tlogger.Trace(\"all eligible types at capacity\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlogger = logger.WithField(\"InstanceType\", availableType.Name)\n\t\t\tnewInstance, ok := sch.pool.Create(availableType)\n\t\t\tif !ok {\n\t\t\t\t// Failed despite not being at quota,\n\t\t\t\t// e.g., cloud ops throttled.\n\t\t\t\tlogger.Trace(\"pool declined to create new instance\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Success. (Note pool.Create works\n\t\t\t// asynchronously and does its own logging\n\t\t\t// about the eventual outcome, so we don't\n\t\t\t// need to.)\n\t\t\tsorted[i].SchedulingStatus = fmt.Sprintf(schedStatusWaitingNewInstance, availableType.Name)\n\t\t\tlogger.Info(\"creating new instance\")\n\t\t\t// Don't bother trying to start the container\n\t\t\t// yet -- obviously the instance will take\n\t\t\t// some time to boot and become ready.\n\t\t\tcontainerAllocatedWorkerBootingCount += 1\n\n\t\t\t// Insert new entry in instances and\n\t\t\t// instanceResources.\n\t\t\tidx := 0\n\t\t\tfor ; idx < len(instances) && sch.instanceSort(instances[idx], newInstance); idx++ {\n\t\t\t}\n\t\t\tnewInstance.RunningContainerUUIDs = append(newInstance.RunningContainerUUIDs, ctr.UUID)\n\t\t\tinstances = slices.Insert(instances, idx, newInstance)\n\t\t\tinstanceResources = slices.Insert(instanceResources, idx, instanceResourcesForInstanceType(availableType).Sub(ctrResources))\n\t\t}\n\t}\n\n\tsch.mContainersAllocatedNotStarted.Set(float64(containerAllocatedWorkerBootingCount))\n\tsch.mContainersNotAllocatedOverQuota.Set(float64(len(overquota) + len(overmaxsuper)))\n\n\tvar qreason string\n\tif sch.pool.AtQuota() {\n\t\tqreason = schedStatusWaitingCloudResources\n\t} else {\n\t\tqreason = schedStatusWaitingClusterCapacity\n\t}\n\tfor i, ent := range sorted {\n\t\tif ent.SchedulingStatus == \"\" && (ent.Container.State == arvados.ContainerStateQueued || ent.Container.State == arvados.ContainerStateLocked) {\n\t\t\tqpos++\n\t\t\tsorted[i].SchedulingStatus = fmt.Sprintf(qreason, qpos)\n\t\t}\n\t}\n\tsch.lastQueue.Store(sorted)\n\n\tif len(overquota)+len(overmaxsuper) > 0 {\n\t\t// Unlock any containers that are unmappable while\n\t\t// we're at quota (but if they have already been\n\t\t// scheduled and they're loading docker images etc.,\n\t\t// let them run).\n\t\tvar unlock []QueueEnt\n\t\tunlock = append(unlock, overmaxsuper...)\n\t\tif totalInstances > 0 && len(overquota) > 1 {\n\t\t\t// We don't unlock the next-in-line container\n\t\t\t// when at quota.  This avoids a situation\n\t\t\t// where our \"at quota\" state expires, we lock\n\t\t\t// the next container and try to create an\n\t\t\t// instance, the cloud provider still returns\n\t\t\t// a quota error, we unlock the container, and\n\t\t\t// we repeat this until the container reaches\n\t\t\t// its limit of lock/unlock cycles.\n\t\t\tunlock = append(unlock, overquota[1:]...)\n\t\t} else {\n\t\t\t// However, if totalInstances is 0 and we're\n\t\t\t// still getting quota errors, then the\n\t\t\t// next-in-line container is evidently not\n\t\t\t// possible to run, so we should let it\n\t\t\t// exhaust its lock/unlock cycles and\n\t\t\t// eventually cancel, to avoid starvation.\n\t\t\tunlock = append(unlock, overquota...)\n\t\t}\n\t\tfor _, ctr := range unlock {\n\t\t\tctr := ctr.Container\n\t\t\t_, toolate := running[ctr.UUID]\n\t\t\tif ctr.State == arvados.ContainerStateLocked && !toolate {\n\t\t\t\tlogger := sch.logger.WithField(\"ContainerUUID\", ctr.UUID)\n\t\t\t\tlogger.Info(\"unlock because pool capacity is used by higher priority containers\")\n\t\t\t\terr := sch.queue.Unlock(ctr.UUID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.WithError(err).Warn(\"error unlocking\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif len(overquota) > 0 {\n\t\t// Shut down idle workers that didn't get any\n\t\t// containers mapped onto them before we hit quota.\n\t\tfor _, instance := range instances {\n\t\t\tif len(instance.RunningContainerUUIDs) == 0 {\n\t\t\t\tsch.pool.Shutdown(instance.Instance)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Lock the given container. Should be called in a new goroutine.\nfunc (sch *Scheduler) lockContainer(logger logrus.FieldLogger, uuid string) {\n\tif !sch.uuidLock(uuid, \"lock\") {\n\t\treturn\n\t}\n\tdefer sch.uuidUnlock(uuid)\n\tif ctr, ok := sch.queue.Get(uuid); !ok || ctr.State != arvados.ContainerStateQueued {\n\t\t// This happens if the container has been cancelled or\n\t\t// locked since runQueue called sch.queue.Entries(),\n\t\t// possibly by a lockContainer() call from a previous\n\t\t// runQueue iteration. In any case, we will respond\n\t\t// appropriately on the next runQueue iteration, which\n\t\t// will have already been triggered by the queue\n\t\t// update.\n\t\tlogger.WithField(\"State\", ctr.State).Debug(\"container no longer queued by the time we decided to lock it, doing nothing\")\n\t\treturn\n\t}\n\terr := sch.queue.Lock(uuid)\n\tif err != nil {\n\t\tlogger.WithError(err).Warn(\"error locking container\")\n\t\treturn\n\t}\n\tlogger.Debug(\"lock succeeded\")\n\tctr, ok := sch.queue.Get(uuid)\n\tif !ok {\n\t\tlogger.Error(\"(BUG?) container disappeared from queue after Lock succeeded\")\n\t} else if ctr.State != arvados.ContainerStateLocked {\n\t\tlogger.Warnf(\"(race?) container has state=%q after Lock succeeded\", ctr.State)\n\t}\n}\n\n// Acquire a non-blocking lock for specified UUID, returning true if\n// successful.  The op argument is used only for debug logs.\n//\n// If the lock is not available, uuidLock arranges to wake up the\n// scheduler after a short delay, so it can retry whatever operation\n// is trying to get the lock (if that operation is still worth doing).\n//\n// This mechanism helps avoid spamming the controller/database with\n// concurrent updates for any single container, even when the\n// scheduler loop is running frequently.\nfunc (sch *Scheduler) uuidLock(uuid, op string) bool {\n\tsch.mtx.Lock()\n\tdefer sch.mtx.Unlock()\n\tlogger := sch.logger.WithFields(logrus.Fields{\n\t\t\"ContainerUUID\": uuid,\n\t\t\"Op\":            op,\n\t})\n\tif op, locked := sch.uuidOp[uuid]; locked {\n\t\tlogger.Debugf(\"uuidLock not available, Op=%s in progress\", op)\n\t\t// Make sure the scheduler loop wakes up to retry.\n\t\tsch.wakeup.Reset(time.Second / 4)\n\t\treturn false\n\t}\n\tlogger.Debug(\"uuidLock acquired\")\n\tsch.uuidOp[uuid] = op\n\treturn true\n}\n\nfunc (sch *Scheduler) uuidUnlock(uuid string) {\n\tsch.mtx.Lock()\n\tdefer sch.mtx.Unlock()\n\tdelete(sch.uuidOp, uuid)\n}\n"
  },
  {
    "path": "lib/dispatchcloud/scheduler/run_queue_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage scheduler\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/container\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/test\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/worker\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\n\t\"github.com/prometheus/client_golang/prometheus/testutil\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\ntype stubPool struct {\n\tnotify    <-chan struct{}\n\tworkers   map[cloud.InstanceID]*worker.InstanceView\n\trunning   map[string]time.Time\n\tquota     int\n\tcapacity  map[string]int\n\tcanCreate int\n\tcreates   []arvados.InstanceType\n\tstarts    []string\n\tshutdowns int\n\tsync.Mutex\n}\n\nfunc (p *stubPool) AtQuota() bool {\n\tn := 0\n\tfor _, nn := range p.CountWorkers() {\n\t\tn += nn\n\t}\n\treturn n >= p.quota\n}\nfunc (p *stubPool) AtCapacity(it arvados.InstanceType) bool {\n\tp.Lock()\n\tdefer p.Unlock()\n\tsupply, ok := p.capacity[it.ProviderType]\n\tif !ok {\n\t\treturn false\n\t}\n\tfor _, wkr := range p.workers {\n\t\tif wkr.ProviderInstanceType == it.ProviderType {\n\t\t\tsupply--\n\t\t}\n\t}\n\treturn supply < 1\n}\nfunc (p *stubPool) Subscribe() <-chan struct{}  { return p.notify }\nfunc (p *stubPool) Unsubscribe(<-chan struct{}) {}\nfunc (p *stubPool) Running() map[string]time.Time {\n\tp.Lock()\n\tdefer p.Unlock()\n\tr := map[string]time.Time{}\n\tfor k, v := range p.running {\n\t\tr[k] = v\n\t}\n\treturn r\n}\nfunc (p *stubPool) Instances() (r []worker.InstanceView) {\n\tp.Lock()\n\tdefer p.Unlock()\n\tfor _, wkr := range p.workers {\n\t\tr = append(r, *wkr)\n\t}\n\treturn\n}\nfunc (p *stubPool) Create(it arvados.InstanceType) (worker.InstanceView, bool) {\n\tp.Lock()\n\tdefer p.Unlock()\n\tp.creates = append(p.creates, it)\n\tif p.canCreate < 1 {\n\t\treturn worker.InstanceView{}, false\n\t}\n\tp.canCreate--\n\tid := cloud.InstanceID(fmt.Sprintf(\"i-%07d\", len(p.creates)))\n\tif p.workers == nil {\n\t\tp.workers = map[cloud.InstanceID]*worker.InstanceView{}\n\t}\n\tp.workers[id] = &worker.InstanceView{\n\t\tInstance:             id,\n\t\tPrice:                it.Price,\n\t\tArvadosInstanceType:  it.Name,\n\t\tProviderInstanceType: it.ProviderType,\n\t\tWorkerState:          worker.StateBooting,\n\t\tIdleBehavior:         worker.IdleBehaviorRun,\n\t}\n\t// Returned InstanceView should have a blank instance ID, just\n\t// like a real pool (instances are created asynchronously so a\n\t// real cloud provider can't have provided an ID yet).\n\tcreated := *p.workers[id]\n\tcreated.Instance = \"\"\n\treturn created, true\n}\nfunc (p *stubPool) ForgetContainer(uuid string) {\n}\nfunc (p *stubPool) KillContainer(uuid, reason string) bool {\n\tp.Lock()\n\tdefer p.Unlock()\n\tdefer delete(p.running, uuid)\n\tt, ok := p.running[uuid]\n\treturn ok && t.IsZero()\n}\nfunc (p *stubPool) Shutdown(cloud.InstanceID) bool {\n\tp.shutdowns++\n\treturn false\n}\nfunc (p *stubPool) CountWorkers() map[worker.State]int {\n\tp.Lock()\n\tdefer p.Unlock()\n\tr := map[worker.State]int{}\n\tfor _, v := range p.workers {\n\t\tr[v.WorkerState]++\n\t}\n\treturn r\n}\nfunc (p *stubPool) StartContainer(id cloud.InstanceID, ctr arvados.Container) bool {\n\tp.Lock()\n\tdefer p.Unlock()\n\tp.starts = append(p.starts, ctr.UUID)\n\twkr := p.workers[id]\n\tif wkr == nil {\n\t\treturn false\n\t}\n\tif (wkr.WorkerState == worker.StateIdle || wkr.WorkerState == worker.StateRunning) &&\n\t\twkr.IdleBehavior == worker.IdleBehaviorRun {\n\t\tif p.running == nil {\n\t\t\tp.running = map[string]time.Time{}\n\t\t}\n\t\tp.running[ctr.UUID] = time.Time{}\n\t\twkr.WorkerState = worker.StateRunning\n\t\twkr.LastContainerUUID = ctr.UUID\n\t\twkr.RunningContainerUUIDs = append(wkr.RunningContainerUUIDs, ctr.UUID)\n\t\treturn true\n\t}\n\treturn false\n}\nfunc (p *stubPool) bootAllInstances() {\n\tp.Lock()\n\tdefer p.Unlock()\n\tfor _, wkr := range p.workers {\n\t\tif wkr.WorkerState == worker.StateBooting {\n\t\t\twkr.WorkerState = worker.StateIdle\n\t\t}\n\t}\n}\n\nvar _ = check.Suite(&SchedulerSuite{})\n\ntype SchedulerSuite struct {\n\ttestCluster arvados.Cluster\n}\n\nfunc (s *SchedulerSuite) SetUpTest(c *check.C) {\n\ts.testCluster = arvados.Cluster{}\n\ts.testCluster.Containers.StaleLockTimeout = arvados.Duration(time.Millisecond)\n\ts.testCluster.Containers.CloudVMs.PollInterval = arvados.Duration(time.Millisecond)\n\ts.testCluster.Containers.CloudVMs.MaxInstances = 10\n\ts.testCluster.Containers.CloudVMs.SupervisorFraction = 0.2\n\ts.testCluster.InstanceTypes = make(arvados.InstanceTypeMap)\n\tfor i := 1; i <= 16; i++ {\n\t\tit := test.InstanceType(i)\n\t\ts.testCluster.InstanceTypes[it.Name] = it\n\t}\n}\n\nfunc (s *SchedulerSuite) chooseType(ctr *arvados.Container) ([]arvados.InstanceType, error) {\n\treturn container.ChooseInstanceType(&s.testCluster, ctr)\n}\n\n// Assign priority=4 container to idle node. Create new instances for\n// the priority=3, 2, 1 containers.\nfunc (s *SchedulerSuite) TestUseIdleWorkers(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(1),\n\t\t\t\tPriority: 1,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(2),\n\t\t\t\tPriority: 2,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(3),\n\t\t\t\tPriority: 3,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(4),\n\t\t\t\tPriority: 4,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\tpool := stubPool{\n\t\tquota:     1000,\n\t\tcanCreate: 6,\n\t}\n\tpool.Create(test.InstanceType(1))\n\tpool.Create(test.InstanceType(2))\n\tpool.Create(test.InstanceType(2))\n\tpool.bootAllInstances()\n\tNew(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster).runQueue()\n\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{\n\t\ttest.InstanceType(1), test.InstanceType(2), test.InstanceType(2),\n\t\ttest.InstanceType(1), test.InstanceType(1), test.InstanceType(1),\n\t})\n\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})\n\tc.Check(pool.running, check.HasLen, 1)\n\tfor uuid := range pool.running {\n\t\tc.Check(uuid, check.Equals, test.ContainerUUID(4))\n\t}\n}\n\n// The smallest (type-2) instance can accommodate 2 containers, so we\n// create 2 of them to run 4 containers.\nfunc (s *SchedulerSuite) TestPackContainers_NewInstance(c *check.C) {\n\tqueue, pool := s.setupTestPackContainers(c)\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tNew(ctx, arvados.NewClientFromEnv(), queue, pool, nil, &s.testCluster).runQueue()\n\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{\n\t\ttest.InstanceType(2), test.InstanceType(2),\n\t})\n\tc.Check(pool.starts, check.HasLen, 0)\n}\n\n// A type-3 instance is available, and it's within MaximumPriceFactor\n// and fits 3 containers, so we start 3 containers on it and start a\n// type-2 instance to run the fourth container.\nfunc (s *SchedulerSuite) TestPackContainers_IdleInstance(c *check.C) {\n\tqueue, pool := s.setupTestPackContainers(c)\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tpool.Create(test.InstanceType(3))\n\tpool.bootAllInstances()\n\tNew(ctx, arvados.NewClientFromEnv(), queue, pool, nil, &s.testCluster).runQueue()\n\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{\n\t\ttest.InstanceType(3), test.InstanceType(2),\n\t})\n\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4), test.ContainerUUID(3), test.ContainerUUID(2)})\n}\n\n// A type-4 instance is idle, but shouldn't be used because its price\n// exceeds MaximumPriceFactor (1.5x smallest usable type-2).  Create\n// type-2 instances instead.\nfunc (s *SchedulerSuite) TestPackContainers_IdleInstance_TooBig(c *check.C) {\n\tqueue, pool := s.setupTestPackContainers(c)\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tpool.Create(test.InstanceType(4))\n\tpool.bootAllInstances()\n\tNew(ctx, arvados.NewClientFromEnv(), queue, pool, nil, &s.testCluster).runQueue()\n\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{\n\t\ttest.InstanceType(4), test.InstanceType(2), test.InstanceType(2),\n\t})\n\tc.Check(pool.starts, check.HasLen, 0)\n}\n\n// A type-3 instance is running a container, is within\n// MaximumPriceFactor, and has room for 2 more.  Start 2 containers on\n// the type-3 instance, and create a new instance for the last\n// container.\nfunc (s *SchedulerSuite) TestPackContainers_SpareResources(c *check.C) {\n\tqueue, pool := s.setupTestPackContainers(c)\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tpool.Create(test.InstanceType(3))\n\tpool.bootAllInstances()\n\tpool.StartContainer(pool.Instances()[0].Instance, queue.Containers[3])\n\tqueue.Containers[3].State = arvados.ContainerStateRunning\n\tqueue.Update()\n\tNew(ctx, arvados.NewClientFromEnv(), queue, pool, nil, &s.testCluster).runQueue()\n\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{\n\t\ttest.InstanceType(3), test.InstanceType(2),\n\t})\n\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4), test.ContainerUUID(3), test.ContainerUUID(2)})\n}\n\n// A type-3 instance is running a container, is within\n// MaximumPriceFactor, and has room for 2 more, but it is on\n// admin-hold (IdleBehaviorHold), so we don't start new containers on\n// it.  Instead we create two new instances for the other three\n// containers.\nfunc (s *SchedulerSuite) TestPackContainers_IdleBehaviorHold(c *check.C) {\n\tqueue, pool := s.setupTestPackContainers(c)\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tpool.Create(test.InstanceType(3))\n\tpool.bootAllInstances()\n\tfor _, wkr := range pool.workers {\n\t\twkr.IdleBehavior = worker.IdleBehaviorHold\n\t}\n\tpool.StartContainer(pool.Instances()[0].Instance, queue.Containers[3])\n\tNew(ctx, arvados.NewClientFromEnv(), queue, pool, nil, &s.testCluster).runQueue()\n\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{\n\t\ttest.InstanceType(3), test.InstanceType(2), test.InstanceType(2),\n\t})\n\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})\n}\n\n// A type-2 instance is idle, and can fit one more container, but\n// container packing is disabled by MaxRunningContainersPerInstance\n// config, so we start 1 container and create 3 new instances for the\n// other 3 containers.\nfunc (s *SchedulerSuite) TestPackContainers_DisabledInConfig(c *check.C) {\n\tqueue, pool := s.setupTestPackContainers(c)\n\ts.testCluster.Containers.CloudVMs.MaxRunningContainersPerInstance = 1\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tpool.Create(test.InstanceType(2))\n\tpool.bootAllInstances()\n\tNew(ctx, arvados.NewClientFromEnv(), queue, pool, nil, &s.testCluster).runQueue()\n\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{\n\t\ttest.InstanceType(2), test.InstanceType(2), test.InstanceType(2), test.InstanceType(2),\n\t})\n\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})\n}\n\n// A type-8 instance can fit all 4 containers, but\n// containers-per-instance is limited to 2 by\n// MaxRunningContainersPerInstance config, so we create 2 new\n// instances instead of just 1.\nfunc (s *SchedulerSuite) TestPackContainers_LimitedInConfig(c *check.C) {\n\t// Ensure smallest instance type is type-8\n\tfor i := range 8 {\n\t\tdelete(s.testCluster.InstanceTypes, test.InstanceType(i).Name)\n\t}\n\tqueue, pool := s.setupTestPackContainers(c)\n\ts.testCluster.Containers.CloudVMs.MaxRunningContainersPerInstance = 2\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tNew(ctx, arvados.NewClientFromEnv(), queue, pool, nil, &s.testCluster).runQueue()\n\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{\n\t\ttest.InstanceType(8), test.InstanceType(8),\n\t})\n\tc.Check(pool.starts, check.HasLen, 0)\n}\n\n// An existing type-2 instance is in StateUnknown (e.g., it was\n// created by our predecessor and we haven't had a successful probe\n// response yet) so we should only start one new instance. When the\n// new instance comes up, we should run the two higher-priority\n// containers on it.\nfunc (s *SchedulerSuite) TestPackContainers_InstanceStateUnknown(c *check.C) {\n\tqueue, pool := s.setupTestPackContainers(c)\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tsch := New(ctx, arvados.NewClientFromEnv(), queue, pool, nil, &s.testCluster)\n\n\tpool.Create(test.InstanceType(2))\n\tpool.workers[\"i-0000001\"].WorkerState = worker.StateUnknown\n\tsch.runQueue()\n\tc.Check(pool.creates, check.HasLen, 2)\n\tc.Check(pool.starts, check.HasLen, 0)\n\n\t// i-0000002 is now in StateBooting, still shouldn't try to\n\t// start\n\tsch.runQueue()\n\tc.Check(pool.creates, check.HasLen, 2)\n\tc.Check(pool.starts, check.HasLen, 0)\n\n\tpool.workers[\"i-0000002\"].WorkerState = worker.StateIdle\n\tsch.runQueue()\n\tc.Check(pool.creates, check.HasLen, 2)\n\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4), test.ContainerUUID(3)})\n}\n\nfunc (s *SchedulerSuite) setupTestPackContainers(c *check.C) (*test.Queue, *stubPool) {\n\tdelete(s.testCluster.InstanceTypes, test.InstanceType(1).Name)\n\ts.testCluster.Containers.MaximumPriceFactor = 1.5\n\tqueue := &test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(1),\n\t\t\t\tPriority: 1,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(2),\n\t\t\t\tPriority: 2,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(3),\n\t\t\t\tPriority: 3,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(4),\n\t\t\t\tPriority: 4,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\tpool := &stubPool{\n\t\tquota:     999,\n\t\tcanCreate: 999,\n\t}\n\treturn queue, pool\n}\n\nfunc (s *SchedulerSuite) TestPackContainers_Resources(c *check.C) {\n\t// Reduce the menu to instance types with 2^N CPUs.\n\tfor _, i := range []int{3, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15} {\n\t\tdelete(s.testCluster.InstanceTypes, test.InstanceType(i).Name)\n\t}\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t// Each of the following 3 containers needs at\n\t\t\t// least a type-8 instance (for CPU, RAM, or\n\t\t\t// scratch), and they can all fit on a single\n\t\t\t// type-8 instance.\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(1),\n\t\t\t\tPriority: 1,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   5 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(2),\n\t\t\t\tPriority: 2,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 5,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(3),\n\t\t\t\tPriority: 3,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs:         1,\n\t\t\t\t\tRAM:           1 << 30,\n\t\t\t\t\tKeepCacheDisk: 5 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t// This container needs a type-16 instance,\n\t\t\t// leaving enough capacity to run containers\n\t\t\t// 1,2,3 as well -- except we won't do that\n\t\t\t// because it costs 2x a type-8 instance and\n\t\t\t// MaximumPriceFactor is only 1.5.\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(4),\n\t\t\t\tPriority: 4,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 9,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Currently CRs with VCPUs==0 are not allowed\n\t\t\t// by API server, but if/when they are, they\n\t\t\t// can run on any instance with enough RAM and\n\t\t\t// scratch.\n\t\t\t//\n\t\t\t// The next 4 containers each need 128 MiB of\n\t\t\t// RAM and disk, so they should pack onto a\n\t\t\t// single type-1 node.\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(5),\n\t\t\t\tPriority: 5,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs:         0,\n\t\t\t\t\tRAM:           1 << 27,\n\t\t\t\t\tKeepCacheDisk: 1 << 27,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(6),\n\t\t\t\tPriority: 6,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs:         0,\n\t\t\t\t\tRAM:           1 << 27,\n\t\t\t\t\tKeepCacheDisk: 1 << 27,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(7),\n\t\t\t\tPriority: 7,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs:         0,\n\t\t\t\t\tRAM:           1 << 27,\n\t\t\t\t\tKeepCacheDisk: 1 << 27,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(8),\n\t\t\t\tPriority: 8,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs:         0,\n\t\t\t\t\tRAM:           1 << 27,\n\t\t\t\t\tKeepCacheDisk: 1 << 27,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\tpool := stubPool{\n\t\tquota:     999,\n\t\tcanCreate: 999,\n\t}\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tsch.runQueue()\n\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{\n\t\ttest.InstanceType(1), test.InstanceType(16), test.InstanceType(8),\n\t})\n\tc.Check(pool.starts, check.HasLen, 0)\n\tpool.bootAllInstances()\n\tsch.runQueue()\n\tfor _, wkr := range pool.workers {\n\t\tswitch wkr.ArvadosInstanceType {\n\t\tcase test.InstanceType(1).Name:\n\t\t\tc.Check(wkr.RunningContainerUUIDs, check.DeepEquals, []string{\n\t\t\t\ttest.ContainerUUID(8),\n\t\t\t\ttest.ContainerUUID(7),\n\t\t\t\ttest.ContainerUUID(6),\n\t\t\t\ttest.ContainerUUID(5),\n\t\t\t})\n\t\tcase test.InstanceType(2).Name:\n\t\t\tc.Check(wkr.RunningContainerUUIDs, check.HasLen, 0)\n\t\tcase test.InstanceType(8).Name:\n\t\t\tc.Check(wkr.RunningContainerUUIDs, check.DeepEquals, []string{\n\t\t\t\ttest.ContainerUUID(3),\n\t\t\t\ttest.ContainerUUID(2),\n\t\t\t\ttest.ContainerUUID(1),\n\t\t\t})\n\t\tcase test.InstanceType(16).Name:\n\t\t\tc.Check(wkr.RunningContainerUUIDs, check.DeepEquals, []string{\n\t\t\t\ttest.ContainerUUID(4),\n\t\t\t})\n\t\tdefault:\n\t\t\tc.Errorf(\"unexpected instance type %s\", wkr.ArvadosInstanceType)\n\t\t}\n\t}\n}\n\n// For any N > 0, an N-VCPU container should not share an N-VCPU\n// instance with anything, even 0-VCPU containers.  In other words, a\n// container requesting 0 VCPUs is considered to consume a tiny\n// fraction of a VCPU.\nfunc (s *SchedulerSuite) TestPackContainers_0VCPUs(c *check.C) {\n\tdelete(s.testCluster.InstanceTypes, test.InstanceType(1).Name)\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t// One 1-VCPU container and four 0-VCPU\n\t\t\t// containers can share one type-2 (2-VCPU)\n\t\t\t// instance.\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(1),\n\t\t\t\tPriority: 1,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs:         1,\n\t\t\t\t\tRAM:           1 << 27,\n\t\t\t\t\tKeepCacheDisk: 1 << 27,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(2),\n\t\t\t\tPriority: 2,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs:         0,\n\t\t\t\t\tRAM:           1 << 27,\n\t\t\t\t\tKeepCacheDisk: 1 << 27,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(3),\n\t\t\t\tPriority: 3,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs:         0,\n\t\t\t\t\tRAM:           1 << 27,\n\t\t\t\t\tKeepCacheDisk: 1 << 27,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(4),\n\t\t\t\tPriority: 4,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs:         0,\n\t\t\t\t\tRAM:           1 << 27,\n\t\t\t\t\tKeepCacheDisk: 1 << 27,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(5),\n\t\t\t\tPriority: 5,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs:         0,\n\t\t\t\t\tRAM:           1 << 27,\n\t\t\t\t\tKeepCacheDisk: 1 << 27,\n\t\t\t\t},\n\t\t\t},\n\t\t\t// A 2-VCPU container should get a type-2\n\t\t\t// instance all to itself, even though it\n\t\t\t// would have enough RAM and scratch space for\n\t\t\t// the four 0-VCPU containers.\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(6),\n\t\t\t\tPriority: 6,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs:         2,\n\t\t\t\t\tRAM:           1 << 27,\n\t\t\t\t\tKeepCacheDisk: 1 << 27,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\tpool := stubPool{\n\t\tquota:     999,\n\t\tcanCreate: 999,\n\t}\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tsch.runQueue()\n\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{\n\t\ttest.InstanceType(2), test.InstanceType(2),\n\t})\n\tc.Check(pool.starts, check.HasLen, 0)\n\tpool.bootAllInstances()\n\tsch.runQueue()\n\tc.Assert(pool.workers, check.HasLen, 2)\n\tc.Check(pool.workers[\"i-0000001\"].RunningContainerUUIDs, check.DeepEquals, []string{\n\t\ttest.ContainerUUID(6),\n\t})\n\tc.Check(pool.workers[\"i-0000002\"].RunningContainerUUIDs, check.DeepEquals, []string{\n\t\ttest.ContainerUUID(5),\n\t\ttest.ContainerUUID(4),\n\t\ttest.ContainerUUID(3),\n\t\ttest.ContainerUUID(2),\n\t\ttest.ContainerUUID(1),\n\t})\n}\n\n// If pool.AtQuota() is true, shutdown some unalloc nodes, and don't\n// call Create().\nfunc (s *SchedulerSuite) TestShutdownAtQuota(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tfor quota := 1; quota <= 3; quota++ {\n\t\tc.Logf(\"quota=%d\", quota)\n\t\tqueue := test.Queue{\n\t\t\tChooseType: s.chooseType,\n\t\t\tContainers: []arvados.Container{\n\t\t\t\t{\n\t\t\t\t\tUUID:     test.ContainerUUID(2),\n\t\t\t\t\tPriority: 2,\n\t\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\t\tVCPUs: 2,\n\t\t\t\t\t\tRAM:   2 << 30,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tUUID:     test.ContainerUUID(3),\n\t\t\t\t\tPriority: 3,\n\t\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\t\tVCPUs: 3,\n\t\t\t\t\t\tRAM:   3 << 30,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tqueue.Update()\n\t\tpool := stubPool{\n\t\t\tquota:     quota,\n\t\t\tcanCreate: 3,\n\t\t}\n\t\tpool.Create(test.InstanceType(2))\n\t\tpool.Create(test.InstanceType(2))\n\t\tpool.bootAllInstances()\n\t\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\t\tsch.sync()\n\t\tsch.runQueue()\n\t\tsch.sync()\n\t\tswitch quota {\n\t\tcase 1, 2:\n\t\t\t// Can't create a type3 node for ctr3, so we\n\t\t\t// shutdown the idle type2 nodes, and unlock\n\t\t\t// the 2nd-in-line container, but not the\n\t\t\t// 1st-in-line container.\n\t\t\tc.Check(pool.starts, check.HasLen, 0)\n\t\t\tc.Check(pool.shutdowns, check.Equals, 2)\n\t\t\tc.Check(pool.creates, check.HasLen, 2)\n\t\t\tc.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{\n\t\t\t\t{UUID: test.ContainerUUID(2), From: \"Locked\", To: \"Queued\"},\n\t\t\t})\n\t\tcase 3:\n\t\t\t// Creating a type3 instance works, so we\n\t\t\t// start ctr2 on a type2 instance, and leave\n\t\t\t// ctr3 locked while we wait for the new\n\t\t\t// instance to come up.\n\t\t\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(2)})\n\t\t\tc.Check(pool.shutdowns, check.Equals, 0)\n\t\t\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{\n\t\t\t\ttest.InstanceType(2),\n\t\t\t\ttest.InstanceType(2),\n\t\t\t\ttest.InstanceType(3),\n\t\t\t})\n\t\t\tc.Check(queue.StateChanges(), check.HasLen, 0)\n\t\tdefault:\n\t\t\tpanic(\"test not written for quota>3\")\n\t\t}\n\t}\n}\n\n// If pool.AtCapacity(it) is true for one instance type, try running a\n// lower-priority container that uses a different node type.  Don't\n// lock/unlock/start any container that requires the affected instance\n// type.\nfunc (s *SchedulerSuite) TestInstanceCapacity(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(1),\n\t\t\t\tPriority: 1,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(2),\n\t\t\t\tPriority: 2,\n\t\t\t\tState:    arvados.ContainerStateQueued,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 4,\n\t\t\t\t\tRAM:   4 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(3),\n\t\t\t\tPriority: 3,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 4,\n\t\t\t\t\tRAM:   4 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(4),\n\t\t\t\tPriority: 4,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 4,\n\t\t\t\t\tRAM:   4 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\tpool := stubPool{\n\t\tquota:     99,\n\t\tcapacity:  map[string]int{test.InstanceType(4).ProviderType: 1},\n\t\tcanCreate: 2,\n\t}\n\tpool.Create(test.InstanceType(4))\n\tpool.bootAllInstances()\n\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tsch.sync()\n\tsch.runQueue()\n\tsch.sync()\n\n\t// Start container4, but then pool reports AtCapacity for\n\t// type4, so we skip trying to create an instance for\n\t// container3, skip locking container2, but do try to create a\n\t// type1 instance for container1.\n\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4)})\n\tc.Check(pool.shutdowns, check.Equals, 0)\n\tc.Check(pool.creates, check.DeepEquals, []arvados.InstanceType{test.InstanceType(4), test.InstanceType(1)})\n\tc.Check(queue.StateChanges(), check.HasLen, 0)\n}\n\n// Don't unlock containers or shutdown unalloc (booting/idle) nodes\n// just because some 503 errors caused us to reduce maxContainers\n// below the current load level.\n//\n// We expect to raise maxContainers soon when we stop seeing 503s. If\n// that doesn't happen soon, the idle timeout will take care of the\n// excess nodes.\nfunc (s *SchedulerSuite) TestIdleIn503QuietPeriod(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t// scheduled on an instance (but not Running yet)\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(1),\n\t\t\t\tPriority: 1000,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 2,\n\t\t\t\t\tRAM:   2 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t// not yet scheduled\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(2),\n\t\t\t\tPriority: 1000,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 2,\n\t\t\t\t\tRAM:   2 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t// scheduled on an instance (but not Running yet)\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(3),\n\t\t\t\tPriority: 1000,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 3,\n\t\t\t\t\tRAM:   3 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t// not yet scheduled\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(4),\n\t\t\t\tPriority: 1000,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 3,\n\t\t\t\t\tRAM:   3 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t// not yet locked\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(5),\n\t\t\t\tPriority: 1000,\n\t\t\t\tState:    arvados.ContainerStateQueued,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 3,\n\t\t\t\t\tRAM:   3 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\tpool := stubPool{\n\t\tquota:     16,\n\t\tcreates:   []arvados.InstanceType{},\n\t\tstarts:    []string{},\n\t\tcanCreate: 4,\n\t}\n\tpool.Create(test.InstanceType(2))\n\tpool.Create(test.InstanceType(2))\n\tpool.Create(test.InstanceType(3))\n\tpool.Create(test.InstanceType(3))\n\tpool.bootAllInstances()\n\tinstances := pool.Instances()\n\tsort.Slice(instances, func(i, j int) bool { return instances[i].Price < instances[j].Price })\n\tpool.StartContainer(instances[0].Instance, queue.Containers[0])\n\tpool.StartContainer(instances[2].Instance, queue.Containers[2])\n\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tsch.last503time = time.Now()\n\tsch.maxContainers = 3\n\tsch.sync()\n\tsch.runQueue()\n\tsch.sync()\n\n\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(1), test.ContainerUUID(3), test.ContainerUUID(2)})\n\tc.Check(pool.shutdowns, check.Equals, 0)\n\tc.Check(pool.creates, check.HasLen, 4)\n\tc.Check(queue.StateChanges(), check.HasLen, 0)\n}\n\n// If we somehow have more supervisor containers in Locked state than\n// we should (e.g., config changed since they started), and some\n// appropriate-sized instances booting up, unlock the excess\n// supervisor containers, but let the instances keep booting.\nfunc (s *SchedulerSuite) TestUnlockExcessSupervisors(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t}\n\tfor i := 1; i <= 6; i++ {\n\t\tqueue.Containers = append(queue.Containers, arvados.Container{\n\t\t\tUUID:     test.ContainerUUID(i),\n\t\t\tPriority: int64(1000 - i),\n\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\tVCPUs: 2,\n\t\t\t\tRAM:   2 << 30,\n\t\t\t},\n\t\t\tSchedulingParameters: arvados.SchedulingParameters{\n\t\t\t\tSupervisor: true,\n\t\t\t},\n\t\t})\n\t}\n\tqueue.Update()\n\tpool := stubPool{\n\t\tquota:     16,\n\t\tcreates:   []arvados.InstanceType{},\n\t\tstarts:    []string{},\n\t\tcanCreate: 6,\n\t}\n\tfor i := 0; i < 6; i++ {\n\t\tpool.Create(test.InstanceType(2))\n\t}\n\tpool.bootAllInstances()\n\tfor i := 0; i < 4; i++ {\n\t\tpool.StartContainer(pool.Instances()[i].Instance, queue.Containers[i])\n\t}\n\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tsch.sync()\n\tsch.runQueue()\n\tsch.sync()\n\n\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(1), test.ContainerUUID(2), test.ContainerUUID(3), test.ContainerUUID(4)})\n\tc.Check(pool.shutdowns, check.Equals, 0)\n\tc.Check(pool.creates, check.HasLen, 6)\n\tc.Check(queue.StateChanges(), check.DeepEquals, []test.QueueStateChange{\n\t\t{UUID: test.ContainerUUID(5), From: \"Locked\", To: \"Queued\"},\n\t\t{UUID: test.ContainerUUID(6), From: \"Locked\", To: \"Queued\"},\n\t})\n}\n\n// Assuming we're not at quota, don't try to shutdown idle nodes\n// merely because we have more queued/locked supervisor containers\n// than MaxSupervisors -- it won't help.\nfunc (s *SchedulerSuite) TestExcessSupervisors(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t}\n\tfor i := 1; i <= 8; i++ {\n\t\tqueue.Containers = append(queue.Containers, arvados.Container{\n\t\t\tUUID:     test.ContainerUUID(i),\n\t\t\tPriority: int64(1000 + i),\n\t\t\tState:    arvados.ContainerStateQueued,\n\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\tVCPUs: 2,\n\t\t\t\tRAM:   2 << 30,\n\t\t\t},\n\t\t\tSchedulingParameters: arvados.SchedulingParameters{\n\t\t\t\tSupervisor: true,\n\t\t\t},\n\t\t})\n\t}\n\tfor i := 2; i < 4; i++ {\n\t\tqueue.Containers[i].State = arvados.ContainerStateLocked\n\t}\n\tqueue.Update()\n\tpool := stubPool{\n\t\tquota:     16,\n\t\tcreates:   []arvados.InstanceType{},\n\t\tstarts:    []string{},\n\t\tcanCreate: 3,\n\t}\n\tpool.Create(test.InstanceType(2))\n\tpool.Create(test.InstanceType(2))\n\tpool.Create(test.InstanceType(2))\n\tpool.bootAllInstances()\n\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tsch.sync()\n\tsch.runQueue()\n\tsch.sync()\n\n\tc.Check(pool.starts, check.HasLen, 2)\n\tc.Check(pool.shutdowns, check.Equals, 0)\n\tc.Check(pool.creates, check.HasLen, 3)\n\tc.Check(queue.StateChanges(), check.HasLen, 0)\n}\n\n// Don't flap lock/unlock when equal-priority containers compete for\n// limited workers.\n//\n// (Unless we use FirstSeenAt as a secondary sort key, each runQueue()\n// tends to choose a different one of the equal-priority containers as\n// the \"first\" one that should be locked, and unlock the one it chose\n// last time. This generates logging noise, and fails containers by\n// reaching MaxDispatchAttempts quickly.)\nfunc (s *SchedulerSuite) TestEqualPriorityContainers(c *check.C) {\n\tlogger := ctxlog.TestLogger(c)\n\tctx := ctxlog.Context(context.Background(), logger)\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tLogger:     logger,\n\t}\n\tfor i := 0; i < 8; i++ {\n\t\tqueue.Containers = append(queue.Containers, arvados.Container{\n\t\t\tUUID:     test.ContainerUUID(i),\n\t\t\tPriority: 333,\n\t\t\tState:    arvados.ContainerStateQueued,\n\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\tVCPUs: 3,\n\t\t\t\tRAM:   3 << 30,\n\t\t\t},\n\t\t})\n\t}\n\tqueue.Update()\n\tpool := stubPool{\n\t\tquota:     2,\n\t\tcreates:   []arvados.InstanceType{},\n\t\tstarts:    []string{},\n\t\tcanCreate: 2,\n\t}\n\tpool.Create(test.InstanceType(3))\n\tpool.Create(test.InstanceType(3))\n\tpool.bootAllInstances()\n\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tfor i := 0; i < 30; i++ {\n\t\tsch.runQueue()\n\t\tsch.sync()\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\tc.Check(pool.shutdowns, check.Equals, 0)\n\tc.Check(pool.starts, check.HasLen, 2)\n\tunlocked := map[string]int{}\n\tfor _, chg := range queue.StateChanges() {\n\t\tif chg.To == arvados.ContainerStateQueued {\n\t\t\tunlocked[chg.UUID]++\n\t\t}\n\t}\n\tfor uuid, count := range unlocked {\n\t\tc.Check(count, check.Equals, 1, check.Commentf(\"%s\", uuid))\n\t}\n}\n\n// Start lower-priority containers while waiting for new/existing\n// workers to come up for higher-priority containers.\nfunc (s *SchedulerSuite) TestStartWhileCreating(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t{\n\t\t\t\t// create a new worker\n\t\t\t\tUUID:     test.ContainerUUID(1),\n\t\t\t\tPriority: 1,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\t// tentatively map to unalloc worker\n\t\t\t\tUUID:     test.ContainerUUID(2),\n\t\t\t\tPriority: 2,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\t// start now on idle worker\n\t\t\t\tUUID:     test.ContainerUUID(3),\n\t\t\t\tPriority: 3,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\t// create a new worker\n\t\t\t\tUUID:     test.ContainerUUID(4),\n\t\t\t\tPriority: 4,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 2,\n\t\t\t\t\tRAM:   2 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\t// tentatively map to unalloc worker\n\t\t\t\tUUID:     test.ContainerUUID(5),\n\t\t\t\tPriority: 5,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 2,\n\t\t\t\t\tRAM:   2 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\t// start now on idle worker\n\t\t\t\tUUID:     test.ContainerUUID(6),\n\t\t\t\tPriority: 6,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 2,\n\t\t\t\t\tRAM:   2 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\n\tpool := stubPool{\n\t\tquota:     1000,\n\t\tcanCreate: 4,\n\t}\n\tpool.Create(test.InstanceType(1))\n\tpool.Create(test.InstanceType(1))\n\tpool.workers[\"i-0000002\"].WorkerState = worker.StateIdle\n\tpool.Create(test.InstanceType(2))\n\tpool.Create(test.InstanceType(2))\n\tpool.workers[\"i-0000004\"].WorkerState = worker.StateIdle\n\n\tNew(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster).runQueue()\n\tc.Check(pool.creates, check.HasLen, 6)\n\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(6), test.ContainerUUID(3)})\n\trunning := map[string]bool{}\n\tfor uuid, t := range pool.running {\n\t\tif t.IsZero() {\n\t\t\trunning[uuid] = false\n\t\t} else {\n\t\t\trunning[uuid] = true\n\t\t}\n\t}\n\tc.Check(running, check.DeepEquals, map[string]bool{test.ContainerUUID(3): false, test.ContainerUUID(6): false})\n}\n\nfunc (s *SchedulerSuite) TestKillNonexistentContainer(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t{\n\t\t\t\t// create a new worker\n\t\t\t\tUUID:     test.ContainerUUID(1),\n\t\t\t\tPriority: 1,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tpool := stubPool{\n\t\tquota:     1000,\n\t\tcanCreate: 1,\n\t}\n\tqueue.Update()\n\tpool.Create(test.InstanceType(2))\n\tpool.bootAllInstances()\n\tpool.StartContainer(\"i-0000001\", arvados.Container{UUID: test.ContainerUUID(2)})\n\tc.Check(pool.Running(), check.HasLen, 1)\n\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tc.Check(pool.running, check.HasLen, 1)\n\tsch.sync()\n\tfor deadline := time.Now().Add(time.Second); len(pool.Running()) > 0 && time.Now().Before(deadline); time.Sleep(time.Millisecond) {\n\t}\n\tc.Check(pool.Running(), check.HasLen, 0)\n}\n\nfunc (s *SchedulerSuite) TestContainersMetrics(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t{\n\t\t\t\tUUID:      test.ContainerUUID(1),\n\t\t\t\tPriority:  1,\n\t\t\t\tState:     arvados.ContainerStateLocked,\n\t\t\t\tCreatedAt: time.Now().Add(-10 * time.Second),\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\n\t// Create a pool with one worker in booting state.  The\n\t// container will be allocated but not started yet.\n\tpool := stubPool{\n\t\tcanCreate: 1,\n\t}\n\t_, ok := pool.Create(test.InstanceType(1))\n\tc.Assert(ok, check.Equals, true)\n\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tsch.runQueue()\n\tsch.updateMetrics()\n\n\tc.Check(int(testutil.ToFloat64(sch.mContainersAllocatedNotStarted)), check.Equals, 1)\n\tc.Check(int(testutil.ToFloat64(sch.mContainersNotAllocatedOverQuota)), check.Equals, 0)\n\tc.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 10)\n\n\t// Create a pool without workers. The queued container will\n\t// not be started, and the 'over quota' metric will be 1\n\t// because no workers are available and canCreate defaults to\n\t// zero.\n\tpool = stubPool{}\n\tsch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tsch.runQueue()\n\tsch.updateMetrics()\n\n\tc.Check(int(testutil.ToFloat64(sch.mContainersAllocatedNotStarted)), check.Equals, 0)\n\tc.Check(int(testutil.ToFloat64(sch.mContainersNotAllocatedOverQuota)), check.Equals, 1)\n\tc.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 10)\n\n\t// Reset the queue, and create a pool with an idle worker. The\n\t// queued container will be started immediately and\n\t// mLongestWaitTimeSinceQueue should be zero.\n\tqueue = test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t{\n\t\t\t\tUUID:      test.ContainerUUID(1),\n\t\t\t\tPriority:  1,\n\t\t\t\tState:     arvados.ContainerStateLocked,\n\t\t\t\tCreatedAt: time.Now().Add(-10 * time.Second),\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\n\tpool = stubPool{\n\t\tcanCreate: 1,\n\t}\n\tpool.Create(test.InstanceType(1))\n\tpool.bootAllInstances()\n\tsch = New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tsch.runQueue()\n\tsch.updateMetrics()\n\n\tc.Check(int(testutil.ToFloat64(sch.mLongestWaitTimeSinceQueue)), check.Equals, 0)\n}\n\n// Assign priority=4, 3 and 1 containers to idle nodes. Ignore the\n// supervisor at priority 2.\nfunc (s *SchedulerSuite) TestSkipSupervisors(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(1),\n\t\t\t\tPriority: 1,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(2),\n\t\t\t\tPriority: 2,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t\tSchedulingParameters: arvados.SchedulingParameters{\n\t\t\t\t\tSupervisor: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(3),\n\t\t\t\tPriority: 3,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t\tSchedulingParameters: arvados.SchedulingParameters{\n\t\t\t\t\tSupervisor: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(4),\n\t\t\t\tPriority: 4,\n\t\t\t\tState:    arvados.ContainerStateLocked,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t\tSchedulingParameters: arvados.SchedulingParameters{\n\t\t\t\t\tSupervisor: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\tpool := stubPool{\n\t\tquota:     1000,\n\t\trunning:   map[string]time.Time{},\n\t\tcanCreate: 8,\n\t}\n\tfor i := 0; i < 8; i++ {\n\t\tpool.Create(test.InstanceType(i/4 + 1))\n\t}\n\tpool.bootAllInstances()\n\tNew(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster).runQueue()\n\tc.Check(pool.creates, check.HasLen, 8)\n\tc.Check(pool.starts, check.DeepEquals, []string{test.ContainerUUID(4), test.ContainerUUID(3), test.ContainerUUID(1)})\n}\n"
  },
  {
    "path": "lib/dispatchcloud/scheduler/scheduler.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// Package scheduler uses a resizable worker pool to execute\n// containers in priority order.\npackage scheduler\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// A Scheduler maps queued containers onto unallocated workers in\n// priority order, creating new workers if needed. It locks containers\n// that can be mapped onto existing/pending workers, and starts them\n// if possible.\n//\n// A Scheduler unlocks any containers that are locked but can't be\n// mapped. (For example, this happens when the cloud provider reaches\n// quota/capacity and a previously mappable container's priority is\n// surpassed by a newer container.)\n//\n// If it encounters errors while creating new workers, a Scheduler\n// shuts down idle workers, in case they are consuming quota.\ntype Scheduler struct {\n\tlogger  logrus.FieldLogger\n\tclient  *arvados.Client\n\tqueue   ContainerQueue\n\tpool    WorkerPool\n\treg     *prometheus.Registry\n\tcluster *arvados.Cluster\n\n\tuuidOp map[string]string // operation in progress: \"lock\", \"cancel\", ...\n\tmtx    sync.Mutex\n\twakeup *time.Timer\n\n\trunOnce sync.Once\n\tstop    chan struct{}\n\tstopped chan struct{}\n\n\tlast503time          time.Time // last time API responded 503\n\tmaxContainers        int       // dynamic container limit (0 = unlimited), see runQueue()\n\tinstancesWithinQuota int       // max concurrency achieved since last quota error (0 = no quota error yet)\n\n\tmContainersAllocatedNotStarted   prometheus.Gauge\n\tmContainersNotAllocatedOverQuota prometheus.Gauge\n\tmLongestWaitTimeSinceQueue       prometheus.Gauge\n\tmLast503Time                     prometheus.Gauge\n\tmMaxContainerConcurrency         prometheus.Gauge\n\n\tlastQueue atomic.Value // stores a []QueueEnt\n}\n\n// New returns a new unstarted Scheduler.\n//\n// Any given queue and pool should not be used by more than one\n// scheduler at a time.\nfunc New(ctx context.Context, client *arvados.Client, queue ContainerQueue, pool WorkerPool, reg *prometheus.Registry, cluster *arvados.Cluster) *Scheduler {\n\tsch := &Scheduler{\n\t\tlogger:  ctxlog.FromContext(ctx),\n\t\tclient:  client,\n\t\tqueue:   queue,\n\t\tpool:    pool,\n\t\treg:     reg,\n\t\tcluster: cluster,\n\t\twakeup:  time.NewTimer(time.Second),\n\t\tstop:    make(chan struct{}),\n\t\tstopped: make(chan struct{}),\n\t\tuuidOp:  map[string]string{},\n\t}\n\tminQuota := cluster.Containers.CloudVMs.InitialQuotaEstimate\n\tif minQuota > 0 {\n\t\tsch.maxContainers = minQuota\n\t} else {\n\t\tsch.maxContainers = cluster.Containers.CloudVMs.MaxInstances\n\t}\n\tsch.registerMetrics(reg)\n\treturn sch\n}\n\nfunc (sch *Scheduler) registerMetrics(reg *prometheus.Registry) {\n\tif reg == nil {\n\t\treg = prometheus.NewRegistry()\n\t}\n\tsch.mContainersAllocatedNotStarted = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"containers_allocated_not_started\",\n\t\tHelp:      \"Number of containers allocated to a worker but not started yet (worker is booting).\",\n\t})\n\treg.MustRegister(sch.mContainersAllocatedNotStarted)\n\tsch.mContainersNotAllocatedOverQuota = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"containers_not_allocated_over_quota\",\n\t\tHelp:      \"Number of containers not allocated to a worker because the system has hit a quota.\",\n\t})\n\treg.MustRegister(sch.mContainersNotAllocatedOverQuota)\n\tsch.mLongestWaitTimeSinceQueue = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"containers_longest_wait_time_seconds\",\n\t\tHelp:      \"Current longest wait time of any container since queuing, and before the start of crunch-run.\",\n\t})\n\treg.MustRegister(sch.mLongestWaitTimeSinceQueue)\n\tsch.mLast503Time = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"last_503_time\",\n\t\tHelp:      \"Time of most recent 503 error received from API.\",\n\t})\n\treg.MustRegister(sch.mLast503Time)\n\tsch.mMaxContainerConcurrency = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"max_concurrent_containers\",\n\t\tHelp:      \"Dynamically assigned limit on number of containers scheduled concurrency, set after receiving 503 errors from API.\",\n\t})\n\treg.MustRegister(sch.mMaxContainerConcurrency)\n\treg.MustRegister(prometheus.NewGaugeFunc(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"at_quota\",\n\t\tHelp:      \"Flag indicating the cloud driver is reporting an at-quota condition.\",\n\t}, func() float64 {\n\t\tif sch.pool.AtQuota() {\n\t\t\treturn 1\n\t\t} else {\n\t\t\treturn 0\n\t\t}\n\t}))\n}\n\nfunc (sch *Scheduler) updateMetrics() {\n\tearliest := time.Time{}\n\tentries, _ := sch.queue.Entries()\n\trunning := sch.pool.Running()\n\tfor _, ent := range entries {\n\t\tif ent.Container.Priority > 0 &&\n\t\t\t(ent.Container.State == arvados.ContainerStateQueued || ent.Container.State == arvados.ContainerStateLocked) {\n\t\t\t// Exclude containers that are preparing to run the payload (i.e.\n\t\t\t// ContainerStateLocked and running on a worker, most likely loading the\n\t\t\t// payload image\n\t\t\tif _, ok := running[ent.Container.UUID]; !ok {\n\t\t\t\tif ent.Container.CreatedAt.Before(earliest) || earliest.IsZero() {\n\t\t\t\t\tearliest = ent.Container.CreatedAt\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif !earliest.IsZero() {\n\t\tsch.mLongestWaitTimeSinceQueue.Set(time.Since(earliest).Seconds())\n\t} else {\n\t\tsch.mLongestWaitTimeSinceQueue.Set(0)\n\t}\n}\n\n// Start starts the scheduler.\nfunc (sch *Scheduler) Start() {\n\tgo sch.runOnce.Do(sch.run)\n}\n\n// Stop stops the scheduler. No other method should be called after\n// Stop.\nfunc (sch *Scheduler) Stop() {\n\tclose(sch.stop)\n\t<-sch.stopped\n}\n\nfunc (sch *Scheduler) run() {\n\tdefer close(sch.stopped)\n\n\tupdateInterval := time.Duration(sch.cluster.Containers.CloudVMs.PollInterval)\n\n\t// Ensure the queue is fetched once before attempting anything.\n\tfor err := sch.queue.Update(); err != nil; err = sch.queue.Update() {\n\t\tsch.logger.Errorf(\"error updating queue: %s\", err)\n\t\td := updateInterval / 10\n\t\tif d < time.Second {\n\t\t\td = time.Second\n\t\t}\n\t\tsch.logger.Infof(\"waiting %s before retry\", d)\n\t\ttime.Sleep(d)\n\t}\n\n\t// Keep the queue up to date.\n\tgo func() {\n\t\tfor {\n\t\t\tstarttime := time.Now()\n\t\t\terr := sch.queue.Update()\n\t\t\tif err != nil {\n\t\t\t\tsch.logger.Errorf(\"error updating queue: %s\", err)\n\t\t\t}\n\t\t\t// If the previous update took a long time,\n\t\t\t// that probably means the server is\n\t\t\t// overloaded, so wait that long before doing\n\t\t\t// another. Otherwise, wait for the configured\n\t\t\t// poll interval.\n\t\t\tdelay := time.Since(starttime)\n\t\t\tif delay < updateInterval {\n\t\t\t\tdelay = updateInterval\n\t\t\t}\n\t\t\ttime.Sleep(delay)\n\t\t}\n\t}()\n\n\tt0 := time.Now()\n\tsch.logger.Infof(\"FixStaleLocks starting.\")\n\tsch.fixStaleLocks()\n\tsch.logger.Infof(\"FixStaleLocks finished (%s), starting scheduling.\", time.Since(t0))\n\n\tpoolNotify := sch.pool.Subscribe()\n\tdefer sch.pool.Unsubscribe(poolNotify)\n\n\tqueueNotify := sch.queue.Subscribe()\n\tdefer sch.queue.Unsubscribe(queueNotify)\n\n\tfor {\n\t\tsch.runQueue()\n\t\tsch.sync()\n\t\tsch.updateMetrics()\n\t\tselect {\n\t\tcase <-sch.stop:\n\t\t\treturn\n\t\tcase <-queueNotify:\n\t\tcase <-poolNotify:\n\t\tcase <-sch.wakeup.C:\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/dispatchcloud/scheduler/sync.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage scheduler\n\nimport (\n\t\"fmt\"\n\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/container\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/worker\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nvar reportedUnexpectedState = false\n\n// sync resolves discrepancies between the queue and the pool:\n//\n// Lingering crunch-run processes for finalized and unlocked/requeued\n// containers are killed.\n//\n// Locked containers whose crunch-run processes have exited are\n// requeued.\n//\n// Running containers whose crunch-run processes have exited are\n// cancelled.\nfunc (sch *Scheduler) sync() {\n\tanyUnknownWorkers := sch.pool.CountWorkers()[worker.StateUnknown] > 0\n\trunning := sch.pool.Running()\n\tqEntries, qUpdated := sch.queue.Entries()\n\tfor uuid, ent := range qEntries {\n\t\texited, running := running[uuid]\n\t\tswitch ent.Container.State {\n\t\tcase arvados.ContainerStateRunning:\n\t\t\tif !running {\n\t\t\t\tif !anyUnknownWorkers {\n\t\t\t\t\tgo sch.cancel(uuid, \"not running on any worker\")\n\t\t\t\t}\n\t\t\t} else if !exited.IsZero() && qUpdated.After(exited) {\n\t\t\t\tgo sch.cancel(uuid, \"state=Running after crunch-run exited\")\n\t\t\t} else if ent.Container.Priority == 0 {\n\t\t\t\tgo sch.kill(uuid, \"priority=0\")\n\t\t\t}\n\t\tcase arvados.ContainerStateComplete, arvados.ContainerStateCancelled:\n\t\t\tif running {\n\t\t\t\t// Kill crunch-run in case it's stuck;\n\t\t\t\t// nothing it does now will matter\n\t\t\t\t// anyway. If crunch-run has already\n\t\t\t\t// exited and we just haven't found\n\t\t\t\t// out about it yet, the only effect\n\t\t\t\t// of kill() will be to make the\n\t\t\t\t// worker available for the next\n\t\t\t\t// container.\n\t\t\t\tgo sch.kill(uuid, fmt.Sprintf(\"state=%s\", ent.Container.State))\n\t\t\t} else {\n\t\t\t\tsch.logger.WithFields(logrus.Fields{\n\t\t\t\t\t\"ContainerUUID\": uuid,\n\t\t\t\t\t\"State\":         ent.Container.State,\n\t\t\t\t}).Info(\"container finished -- dropping from queue\")\n\t\t\t\tsch.queue.Forget(uuid)\n\t\t\t}\n\t\tcase arvados.ContainerStateQueued:\n\t\t\tif running {\n\t\t\t\t// Can happen if a worker returns from\n\t\t\t\t// a network outage and is still\n\t\t\t\t// preparing to run a container that\n\t\t\t\t// has already been unlocked/requeued.\n\t\t\t\tgo sch.kill(uuid, fmt.Sprintf(\"pool says running, but queue says state=%s\", ent.Container.State))\n\t\t\t} else if ent.Container.Priority == 0 {\n\t\t\t\tsch.logger.WithFields(logrus.Fields{\n\t\t\t\t\t\"ContainerUUID\": uuid,\n\t\t\t\t\t\"State\":         ent.Container.State,\n\t\t\t\t\t\"Priority\":      ent.Container.Priority,\n\t\t\t\t}).Info(\"container on hold -- dropping from queue\")\n\t\t\t\tsch.queue.Forget(uuid)\n\t\t\t}\n\t\tcase arvados.ContainerStateLocked:\n\t\t\tif running && !exited.IsZero() && qUpdated.After(exited) {\n\t\t\t\tgo sch.requeue(ent, \"crunch-run exited\")\n\t\t\t} else if running && exited.IsZero() && ent.Container.Priority == 0 {\n\t\t\t\tgo sch.kill(uuid, \"priority=0\")\n\t\t\t} else if !running && ent.Container.Priority == 0 {\n\t\t\t\tgo sch.requeue(ent, \"priority=0\")\n\t\t\t}\n\t\tdefault:\n\t\t\tif !reportedUnexpectedState {\n\t\t\t\tsch.logger.WithFields(logrus.Fields{\n\t\t\t\t\t\"ContainerUUID\": uuid,\n\t\t\t\t\t\"State\":         ent.Container.State,\n\t\t\t\t}).Error(\"BUG: unexpected state\")\n\t\t\t\treportedUnexpectedState = true\n\t\t\t}\n\t\t}\n\t}\n\tfor uuid := range running {\n\t\tif _, known := qEntries[uuid]; !known {\n\t\t\tgo sch.kill(uuid, \"not in queue\")\n\t\t}\n\t}\n}\n\nfunc (sch *Scheduler) cancel(uuid string, reason string) {\n\tif !sch.uuidLock(uuid, \"cancel\") {\n\t\treturn\n\t}\n\tdefer sch.uuidUnlock(uuid)\n\tlogger := sch.logger.WithField(\"ContainerUUID\", uuid)\n\tlogger.Infof(\"cancelling container because %s\", reason)\n\terr := sch.queue.Cancel(uuid)\n\tif err != nil {\n\t\tlogger.WithError(err).Print(\"error cancelling container\")\n\t}\n}\n\nfunc (sch *Scheduler) kill(uuid string, reason string) {\n\tif !sch.uuidLock(uuid, \"kill\") {\n\t\treturn\n\t}\n\tdefer sch.uuidUnlock(uuid)\n\tsch.logger.WithFields(logrus.Fields{\n\t\t\"ContainerUUID\": uuid,\n\t\t\"reason\":        reason,\n\t}).Debug(\"kill\")\n\tsch.pool.KillContainer(uuid, reason)\n\tsch.pool.ForgetContainer(uuid)\n}\n\nfunc (sch *Scheduler) requeue(ent container.QueueEnt, reason string) {\n\tuuid := ent.Container.UUID\n\tif !sch.uuidLock(uuid, \"requeue\") {\n\t\treturn\n\t}\n\tdefer sch.uuidUnlock(uuid)\n\tlogger := sch.logger.WithFields(logrus.Fields{\n\t\t\"ContainerUUID\": uuid,\n\t\t\"State\":         ent.Container.State,\n\t\t\"Priority\":      ent.Container.Priority,\n\t})\n\tlogger.Infof(\"requeueing locked container because %s\", reason)\n\terr := sch.queue.Unlock(uuid)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error requeueing container\")\n\t}\n}\n"
  },
  {
    "path": "lib/dispatchcloud/scheduler/sync_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage scheduler\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/test\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/worker\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Ensure the scheduler expunges containers from the queue when they\n// are no longer relevant (completed and not running, queued with\n// priority 0, etc).\nfunc (s *SchedulerSuite) TestForgetIrrelevantContainers(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tpool := stubPool{}\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(1),\n\t\t\t\tPriority: 0,\n\t\t\t\tState:    arvados.ContainerStateQueued,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(2),\n\t\t\t\tPriority: 12345,\n\t\t\t\tState:    arvados.ContainerStateComplete,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\n\tents, _ := queue.Entries()\n\tc.Check(ents, check.HasLen, 1)\n\n\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\tsch.sync()\n\n\tents, _ = queue.Entries()\n\tc.Check(ents, check.HasLen, 0)\n}\n\nfunc (s *SchedulerSuite) TestCancelOrphanedContainers(c *check.C) {\n\tctx := ctxlog.Context(context.Background(), ctxlog.TestLogger(c))\n\tpool := stubPool{\n\t\tcanCreate: 1,\n\t}\n\tpool.Create(test.InstanceType(1))\n\tfor _, w := range pool.workers {\n\t\tw.WorkerState = worker.StateUnknown\n\t}\n\tc.Assert(pool.CountWorkers()[worker.StateUnknown], check.Equals, 1)\n\tqueue := test.Queue{\n\t\tChooseType: s.chooseType,\n\t\tContainers: []arvados.Container{\n\t\t\t{\n\t\t\t\tUUID:     test.ContainerUUID(1),\n\t\t\t\tPriority: 0,\n\t\t\t\tState:    arvados.ContainerStateRunning,\n\t\t\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\t\t\tVCPUs: 1,\n\t\t\t\t\tRAM:   1 << 30,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tqueue.Update()\n\n\tents, _ := queue.Entries()\n\tc.Check(ents, check.HasLen, 1)\n\n\tsch := New(ctx, arvados.NewClientFromEnv(), &queue, &pool, nil, &s.testCluster)\n\n\t// Sync shouldn't cancel the container because it might be\n\t// running on the VM with state==\"unknown\".\n\t//\n\t// (Cancel+forget happens asynchronously and requires multiple\n\t// sync() calls, so even after 10x sync-and-sleep iterations,\n\t// we aren't 100% confident that sync isn't trying to\n\t// cancel. But in the test environment, the goroutines started\n\t// by sync() access stubs and therefore run quickly, so it\n\t// works fine in practice. We accept that if the code is\n\t// broken, the test will still pass occasionally.)\n\tfor i := 0; i < 10; i++ {\n\t\tsch.sync()\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\tents, _ = queue.Entries()\n\tc.Check(ents, check.HasLen, 1)\n\tc.Check(ents[test.ContainerUUID(1)].Container.State, check.Equals, arvados.ContainerStateRunning)\n\n\t// Sync should cancel & forget the container when the\n\t// \"unknown\" node goes away.\n\t//\n\t// (As above, cancel+forget is async and requires multiple\n\t// sync() calls, but stubs are fast so in practice this takes\n\t// much less than 1s to complete.)\n\tpool.workers = nil\n\tfor deadline := time.Now().Add(time.Second); ; time.Sleep(time.Millisecond) {\n\t\tsch.sync()\n\t\tents, _ = queue.Entries()\n\t\tif len(ents) == 0 || time.Now().After(deadline) {\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Check(ents, check.HasLen, 0)\n}\n"
  },
  {
    "path": "lib/dispatchcloud/sshexecutor/executor.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// Package sshexecutor provides an implementation of pool.Executor\n// using a long-lived multiplexed SSH session.\npackage sshexecutor\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\nvar ErrNoAddress = errors.New(\"instance has no address\")\n\n// New returns a new Executor, using the given target.\nfunc New(t cloud.ExecutorTarget) *Executor {\n\treturn &Executor{target: t}\n}\n\n// An Executor uses a multiplexed SSH connection to execute shell\n// commands on a remote target. It reconnects automatically after\n// errors.\n//\n// When setting up a connection, the Executor accepts whatever host\n// key is provided by the remote server, then passes the received key\n// and the SSH connection to the target's VerifyHostKey method before\n// executing commands on the connection.\n//\n// A zero Executor must not be used before calling SetTarget.\n//\n// An Executor must not be copied.\ntype Executor struct {\n\ttarget     cloud.ExecutorTarget\n\ttargetPort string\n\ttargetUser string\n\tsigners    []ssh.Signer\n\tmtx        sync.RWMutex // controls access to instance after creation\n\n\tclient      *ssh.Client\n\tclientErr   error\n\tclientOnce  sync.Once     // initialized private state\n\tclientSetup chan bool     // len>0 while client setup is in progress\n\thostKey     ssh.PublicKey // most recent host key that passed verification, if any\n}\n\n// SetSigners updates the set of private keys that will be offered to\n// the target next time the Executor sets up a new connection.\nfunc (exr *Executor) SetSigners(signers ...ssh.Signer) {\n\texr.mtx.Lock()\n\tdefer exr.mtx.Unlock()\n\texr.signers = signers\n}\n\n// SetTarget sets the current target. The new target will be used next\n// time a new connection is set up; until then, the Executor will\n// continue to use the existing target.\n//\n// The new target is assumed to represent the same host as the\n// previous target, although its address and host key might differ.\nfunc (exr *Executor) SetTarget(t cloud.ExecutorTarget) {\n\texr.mtx.Lock()\n\tdefer exr.mtx.Unlock()\n\texr.target = t\n}\n\n// SetTargetPort sets the default port (name or number) to connect\n// to. This is used only when the address returned by the target's\n// Address() method does not specify a port. If the given port is\n// empty (or SetTargetPort is not called at all), the default port is\n// \"ssh\".\nfunc (exr *Executor) SetTargetPort(port string) {\n\texr.mtx.Lock()\n\tdefer exr.mtx.Unlock()\n\texr.targetPort = port\n}\n\n// Target returns the current target.\nfunc (exr *Executor) Target() cloud.ExecutorTarget {\n\texr.mtx.RLock()\n\tdefer exr.mtx.RUnlock()\n\treturn exr.target\n}\n\n// Execute runs cmd on the target. If an existing connection is not\n// usable, it sets up a new connection to the current target.\nfunc (exr *Executor) Execute(env map[string]string, cmd string, stdin io.Reader) ([]byte, []byte, error) {\n\tsession, err := exr.newSession()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer session.Close()\n\tfor k, v := range env {\n\t\terr = session.Setenv(k, v)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\tvar stdout, stderr bytes.Buffer\n\tsession.Stdin = stdin\n\tsession.Stdout = &stdout\n\tsession.Stderr = &stderr\n\terr = session.Run(cmd)\n\treturn stdout.Bytes(), stderr.Bytes(), err\n}\n\n// Close shuts down any active connections.\nfunc (exr *Executor) Close() {\n\t// Ensure exr is initialized\n\texr.sshClient(false)\n\n\texr.clientSetup <- true\n\tif exr.client != nil {\n\t\tdefer exr.client.Close()\n\t}\n\texr.client, exr.clientErr = nil, errors.New(\"closed\")\n\t<-exr.clientSetup\n}\n\n// Create a new SSH session. If session setup fails or the SSH client\n// hasn't been setup yet, setup a new SSH client and try again.\nfunc (exr *Executor) newSession() (*ssh.Session, error) {\n\ttry := func(create bool) (*ssh.Session, error) {\n\t\tclient, err := exr.sshClient(create)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn client.NewSession()\n\t}\n\tsession, err := try(false)\n\tif err != nil {\n\t\tsession, err = try(true)\n\t}\n\treturn session, err\n}\n\n// Get the latest SSH client. If another goroutine is in the process\n// of setting one up, wait for it to finish and return its result (or\n// the last successfully setup client, if it fails).\nfunc (exr *Executor) sshClient(create bool) (*ssh.Client, error) {\n\texr.clientOnce.Do(func() {\n\t\texr.clientSetup = make(chan bool, 1)\n\t\texr.clientErr = errors.New(\"client not yet created\")\n\t})\n\tdefer func() { <-exr.clientSetup }()\n\tselect {\n\tcase exr.clientSetup <- true:\n\t\tif create {\n\t\t\tclient, err := exr.setupSSHClient()\n\t\t\tif err == nil || exr.client == nil {\n\t\t\t\tif exr.client != nil {\n\t\t\t\t\t// Hang up the previous\n\t\t\t\t\t// (non-working) client\n\t\t\t\t\tgo exr.client.Close()\n\t\t\t\t}\n\t\t\t\texr.client, exr.clientErr = client, err\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\tdefault:\n\t\t// Another goroutine is doing the above case.  Wait\n\t\t// for it to finish and return whatever it leaves in\n\t\t// wkr.client.\n\t\texr.clientSetup <- true\n\t}\n\treturn exr.client, exr.clientErr\n}\n\nfunc (exr *Executor) TargetHostPort() (string, string) {\n\taddr := exr.Target().Address()\n\tif addr == \"\" {\n\t\treturn \"\", \"\"\n\t}\n\th, p, err := net.SplitHostPort(addr)\n\tif err != nil || p == \"\" {\n\t\t// Target address does not specify a port.  Use\n\t\t// targetPort, or \"ssh\".\n\t\tif h == \"\" {\n\t\t\th = addr\n\t\t}\n\t\tif p = exr.targetPort; p == \"\" {\n\t\t\tp = \"ssh\"\n\t\t}\n\t}\n\treturn h, p\n}\n\n// Create a new SSH client.\nfunc (exr *Executor) setupSSHClient() (*ssh.Client, error) {\n\taddr := net.JoinHostPort(exr.TargetHostPort())\n\tif addr == \":\" {\n\t\treturn nil, ErrNoAddress\n\t}\n\tvar receivedKey ssh.PublicKey\n\tclient, err := ssh.Dial(\"tcp\", addr, &ssh.ClientConfig{\n\t\tUser: exr.Target().RemoteUser(),\n\t\tAuth: []ssh.AuthMethod{\n\t\t\tssh.PublicKeys(exr.signers...),\n\t\t},\n\t\tHostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {\n\t\t\treceivedKey = key\n\t\t\treturn nil\n\t\t},\n\t\tTimeout: time.Minute,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t} else if receivedKey == nil {\n\t\treturn nil, errors.New(\"BUG: key was never provided to HostKeyCallback\")\n\t}\n\n\tif exr.hostKey == nil || !bytes.Equal(exr.hostKey.Marshal(), receivedKey.Marshal()) {\n\t\terr = exr.Target().VerifyHostKey(receivedKey, client)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\texr.hostKey = receivedKey\n\t}\n\treturn client, nil\n}\n"
  },
  {
    "path": "lib/dispatchcloud/sshexecutor/executor_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage sshexecutor\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/test\"\n\t\"golang.org/x/crypto/ssh\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&ExecutorSuite{})\n\ntype testTarget struct {\n\ttest.SSHService\n}\n\nfunc (*testTarget) VerifyHostKey(ssh.PublicKey, *ssh.Client) error {\n\treturn nil\n}\n\n// Address returns the wrapped SSHService's host, with the port\n// stripped. This ensures the executor won't work until\n// SetTargetPort() is called -- see (*testTarget)Port().\nfunc (tt *testTarget) Address() string {\n\th, _, err := net.SplitHostPort(tt.SSHService.Address())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn h\n}\n\nfunc (tt *testTarget) Port() string {\n\t_, p, err := net.SplitHostPort(tt.SSHService.Address())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn p\n}\n\ntype mitmTarget struct {\n\ttest.SSHService\n}\n\nfunc (*mitmTarget) VerifyHostKey(key ssh.PublicKey, client *ssh.Client) error {\n\treturn fmt.Errorf(\"host key failed verification: %#v\", key)\n}\n\ntype ExecutorSuite struct{}\n\nfunc (s *ExecutorSuite) TestBadHostKey(c *check.C) {\n\t_, hostpriv := test.LoadTestKey(c, \"../test/sshkey_vm\")\n\tclientpub, clientpriv := test.LoadTestKey(c, \"../test/sshkey_dispatch\")\n\ttarget := &mitmTarget{\n\t\tSSHService: test.SSHService{\n\t\t\tExec: func(map[string]string, string, io.Reader, io.Writer, io.Writer) uint32 {\n\t\t\t\tc.Error(\"Target Exec func called even though host key verification failed\")\n\t\t\t\treturn 0\n\t\t\t},\n\t\t\tHostKey:        hostpriv,\n\t\t\tAuthorizedUser: \"username\",\n\t\t\tAuthorizedKeys: []ssh.PublicKey{clientpub},\n\t\t},\n\t}\n\n\terr := target.Start()\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"target address %q\", target.Address())\n\tdefer target.Close()\n\n\texr := New(target)\n\texr.SetSigners(clientpriv)\n\n\t_, _, err = exr.Execute(nil, \"true\", nil)\n\tc.Check(err, check.ErrorMatches, \"host key failed verification: .*\")\n}\n\nfunc (s *ExecutorSuite) TestExecute(c *check.C) {\n\tcommand := `foo 'bar' \"baz\"`\n\tstdinData := \"foobar\\nbaz\\n\"\n\t_, hostpriv := test.LoadTestKey(c, \"../test/sshkey_vm\")\n\tclientpub, clientpriv := test.LoadTestKey(c, \"../test/sshkey_dispatch\")\n\tfor _, exitcode := range []int{0, 1, 2} {\n\t\ttarget := &testTarget{\n\t\t\tSSHService: test.SSHService{\n\t\t\t\tExec: func(env map[string]string, cmd string, stdin io.Reader, stdout, stderr io.Writer) uint32 {\n\t\t\t\t\tc.Check(env[\"TESTVAR\"], check.Equals, \"test value\")\n\t\t\t\t\tc.Check(cmd, check.Equals, command)\n\t\t\t\t\tvar wg sync.WaitGroup\n\t\t\t\t\twg.Add(2)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tio.WriteString(stdout, \"stdout\\n\")\n\t\t\t\t\t\twg.Done()\n\t\t\t\t\t}()\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tio.WriteString(stderr, \"stderr\\n\")\n\t\t\t\t\t\twg.Done()\n\t\t\t\t\t}()\n\t\t\t\t\tbuf, err := ioutil.ReadAll(stdin)\n\t\t\t\t\twg.Wait()\n\t\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn 99\n\t\t\t\t\t}\n\t\t\t\t\t_, err = stdout.Write(buf)\n\t\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\t\treturn uint32(exitcode)\n\t\t\t\t},\n\t\t\t\tHostKey:        hostpriv,\n\t\t\t\tAuthorizedUser: \"username\",\n\t\t\t\tAuthorizedKeys: []ssh.PublicKey{clientpub},\n\t\t\t},\n\t\t}\n\t\terr := target.Start()\n\t\tc.Check(err, check.IsNil)\n\t\tc.Logf(\"target address %q\", target.Address())\n\t\tdefer target.Close()\n\n\t\texr := New(target)\n\t\texr.SetSigners(clientpriv)\n\n\t\t// Use the default target port (ssh). Execute will\n\t\t// return a connection error or an authentication\n\t\t// error, depending on whether the test host is\n\t\t// running an SSH server.\n\t\t_, _, err = exr.Execute(nil, command, nil)\n\t\tc.Check(err, check.ErrorMatches, `.*(unable to authenticate|connection refused).*`)\n\n\t\t// Use a bogus target port. Execute will return a\n\t\t// connection error.\n\t\texr.SetTargetPort(\"0\")\n\t\t_, _, err = exr.Execute(nil, command, nil)\n\t\tc.Check(err, check.ErrorMatches, `.*connection refused.*`)\n\t\tc.Check(errors.As(err, new(*net.OpError)), check.Equals, true)\n\n\t\t// Use the test server's listening port.\n\t\texr.SetTargetPort(target.Port())\n\n\t\tdone := make(chan bool)\n\t\tgo func() {\n\t\t\tstdout, stderr, err := exr.Execute(map[string]string{\"TESTVAR\": \"test value\"}, command, bytes.NewBufferString(stdinData))\n\t\t\tif exitcode == 0 {\n\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t} else {\n\t\t\t\tc.Check(err, check.NotNil)\n\t\t\t\terr, ok := err.(*ssh.ExitError)\n\t\t\t\tc.Assert(ok, check.Equals, true)\n\t\t\t\tc.Check(err.ExitStatus(), check.Equals, exitcode)\n\t\t\t}\n\t\t\tc.Check(stdout, check.DeepEquals, []byte(\"stdout\\n\"+stdinData))\n\t\t\tc.Check(stderr, check.DeepEquals, []byte(\"stderr\\n\"))\n\t\t\tclose(done)\n\t\t}()\n\n\t\ttimeout := time.NewTimer(time.Second)\n\t\tselect {\n\t\tcase <-done:\n\t\tcase <-timeout.C:\n\t\t\tc.Fatal(\"timed out\")\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "lib/dispatchcloud/test/doc.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// Package test provides fakes and other tools for testing cloud\n// drivers and other dispatcher modules.\npackage test\n"
  },
  {
    "path": "lib/dispatchcloud/test/fixtures.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage test\n\nimport (\n\t\"fmt\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// ContainerUUID returns a fake container UUID.\nfunc ContainerUUID(i int) string {\n\treturn fmt.Sprintf(\"zzzzz-dz642-%015d\", i)\n}\n\n// InstanceType returns a fake arvados.InstanceType called \"type{i}\"\n// with i CPUs and i GiB of memory.\nfunc InstanceType(i int) arvados.InstanceType {\n\treturn arvados.InstanceType{\n\t\tName:         fmt.Sprintf(\"type%d\", i),\n\t\tProviderType: fmt.Sprintf(\"providertype%d\", i),\n\t\tVCPUs:        i,\n\t\tRAM:          arvados.ByteSize(i) << 30 * 11 / 10,\n\t\tScratch:      arvados.ByteSize(i) << 30,\n\t\tPrice:        float64(i) * 0.123,\n\t}\n}\n"
  },
  {
    "path": "lib/dispatchcloud/test/queue.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage test\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/container\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// Queue is a test stub for container.Queue. The caller specifies the\n// initial queue state.\ntype Queue struct {\n\t// Containers represent the API server database contents.\n\tContainers []arvados.Container\n\n\t// ChooseType will be called for each entry in Containers. It\n\t// must not be nil.\n\tChooseType func(*arvados.Container) ([]arvados.InstanceType, error)\n\n\t// Mimic railsapi implementation of MaxDispatchAttempts config\n\tMaxDispatchAttempts int\n\n\tLogger logrus.FieldLogger\n\n\tentries      map[string]container.QueueEnt\n\tupdTime      time.Time\n\tsubscribers  map[<-chan struct{}]chan struct{}\n\tstateChanges []QueueStateChange\n\n\tmtx sync.Mutex\n}\n\ntype QueueStateChange struct {\n\tUUID string\n\tFrom arvados.ContainerState\n\tTo   arvados.ContainerState\n}\n\n// All calls to Lock/Unlock/Cancel to date.\nfunc (q *Queue) StateChanges() []QueueStateChange {\n\tq.mtx.Lock()\n\tdefer q.mtx.Unlock()\n\treturn q.stateChanges\n}\n\n// Entries returns the containers that were queued when Update was\n// last called.\nfunc (q *Queue) Entries() (map[string]container.QueueEnt, time.Time) {\n\tq.mtx.Lock()\n\tdefer q.mtx.Unlock()\n\tupdTime := q.updTime\n\tr := map[string]container.QueueEnt{}\n\tfor uuid, ent := range q.entries {\n\t\tr[uuid] = ent\n\t}\n\treturn r, updTime\n}\n\n// Get returns the container from the cached queue, i.e., as it was\n// when Update was last called -- just like a container.Queue does. If\n// the state has been changed (via Lock, Unlock, or Cancel) since the\n// last Update, the updated state is returned.\nfunc (q *Queue) Get(uuid string) (arvados.Container, bool) {\n\tq.mtx.Lock()\n\tdefer q.mtx.Unlock()\n\tent, ok := q.entries[uuid]\n\treturn ent.Container, ok\n}\n\nfunc (q *Queue) Forget(uuid string) {\n\tq.mtx.Lock()\n\tdefer q.mtx.Unlock()\n\tdelete(q.entries, uuid)\n}\n\nfunc (q *Queue) Lock(uuid string) error {\n\tq.mtx.Lock()\n\tdefer q.mtx.Unlock()\n\treturn q.changeState(uuid, arvados.ContainerStateQueued, arvados.ContainerStateLocked)\n}\n\nfunc (q *Queue) Unlock(uuid string) error {\n\tq.mtx.Lock()\n\tdefer q.mtx.Unlock()\n\treturn q.changeState(uuid, arvados.ContainerStateLocked, arvados.ContainerStateQueued)\n}\n\nfunc (q *Queue) Cancel(uuid string) error {\n\tq.mtx.Lock()\n\tdefer q.mtx.Unlock()\n\treturn q.changeState(uuid, q.entries[uuid].Container.State, arvados.ContainerStateCancelled)\n}\n\nfunc (q *Queue) Subscribe() <-chan struct{} {\n\tq.mtx.Lock()\n\tdefer q.mtx.Unlock()\n\tif q.subscribers == nil {\n\t\tq.subscribers = map[<-chan struct{}]chan struct{}{}\n\t}\n\tch := make(chan struct{}, 1)\n\tq.subscribers[ch] = ch\n\treturn ch\n}\n\nfunc (q *Queue) Unsubscribe(ch <-chan struct{}) {\n\tq.mtx.Lock()\n\tdefer q.mtx.Unlock()\n\tdelete(q.subscribers, ch)\n}\n\n// caller must have lock.\nfunc (q *Queue) notify() {\n\tfor _, ch := range q.subscribers {\n\t\tselect {\n\t\tcase ch <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n}\n\n// caller must have lock.\nfunc (q *Queue) changeState(uuid string, from, to arvados.ContainerState) error {\n\tent := q.entries[uuid]\n\tq.stateChanges = append(q.stateChanges, QueueStateChange{uuid, from, to})\n\tif ent.Container.State != from {\n\t\treturn fmt.Errorf(\"changeState failed: state=%q\", ent.Container.State)\n\t}\n\tent.Container.State = to\n\tq.entries[uuid] = ent\n\tfor i, ctr := range q.Containers {\n\t\tif ctr.UUID == uuid {\n\t\t\tif max := q.MaxDispatchAttempts; max > 0 && ctr.LockCount >= max && to == arvados.ContainerStateQueued {\n\t\t\t\tq.Containers[i].State = arvados.ContainerStateCancelled\n\t\t\t\tq.Containers[i].RuntimeStatus = map[string]interface{}{\"error\": fmt.Sprintf(\"Failed to start: lock_count == %d\", ctr.LockCount)}\n\t\t\t} else {\n\t\t\t\tq.Containers[i].State = to\n\t\t\t\tif to == arvados.ContainerStateLocked {\n\t\t\t\t\tq.Containers[i].LockCount++\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tq.notify()\n\treturn nil\n}\n\n// Update rebuilds the current entries from the Containers slice.\nfunc (q *Queue) Update() error {\n\tq.mtx.Lock()\n\tdefer q.mtx.Unlock()\n\tupdTime := time.Now()\n\tupd := map[string]container.QueueEnt{}\n\tfor _, ctr := range q.Containers {\n\t\t_, exists := q.entries[ctr.UUID]\n\t\tif !exists && (ctr.State == arvados.ContainerStateComplete || ctr.State == arvados.ContainerStateCancelled) {\n\t\t\tcontinue\n\t\t}\n\t\tif ent, ok := upd[ctr.UUID]; ok {\n\t\t\tent.Container = ctr\n\t\t\tupd[ctr.UUID] = ent\n\t\t} else {\n\t\t\ttypes, _ := q.ChooseType(&ctr)\n\t\t\tctr.Mounts = nil\n\t\t\tupd[ctr.UUID] = container.QueueEnt{\n\t\t\t\tContainer:         ctr,\n\t\t\t\tInstanceResources: container.InstanceResourcesNeeded(&arvados.Cluster{}, &ctr),\n\t\t\t\tInstanceTypes:     types,\n\t\t\t\tFirstSeenAt:       time.Now(),\n\t\t\t}\n\t\t}\n\t}\n\tq.entries = upd\n\tq.updTime = updTime\n\tq.notify()\n\treturn nil\n}\n\n// Notify adds/updates an entry in the Containers slice.  This\n// simulates the effect of an API update from someone other than the\n// dispatcher -- e.g., crunch-run updating state to \"Complete\" when a\n// container exits.\n//\n// The resulting changes are not exposed through Get() or Entries()\n// until the next call to Update().\n//\n// Return value is true unless the update is rejected (invalid state\n// transition).\nfunc (q *Queue) Notify(upd arvados.Container) bool {\n\tq.mtx.Lock()\n\tdefer q.mtx.Unlock()\n\tfor i, ctr := range q.Containers {\n\t\tif ctr.UUID == upd.UUID {\n\t\t\tif allowContainerUpdate[ctr.State][upd.State] {\n\t\t\t\tq.Containers[i] = upd\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif q.Logger != nil {\n\t\t\t\tq.Logger.WithField(\"ContainerUUID\", ctr.UUID).Infof(\"test.Queue rejected update from %s to %s\", ctr.State, upd.State)\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\tq.Containers = append(q.Containers, upd)\n\treturn true\n}\n\nvar allowContainerUpdate = map[arvados.ContainerState]map[arvados.ContainerState]bool{\n\tarvados.ContainerStateQueued: {\n\t\tarvados.ContainerStateQueued:    true,\n\t\tarvados.ContainerStateLocked:    true,\n\t\tarvados.ContainerStateCancelled: true,\n\t},\n\tarvados.ContainerStateLocked: {\n\t\tarvados.ContainerStateQueued:    true,\n\t\tarvados.ContainerStateLocked:    true,\n\t\tarvados.ContainerStateRunning:   true,\n\t\tarvados.ContainerStateCancelled: true,\n\t},\n\tarvados.ContainerStateRunning: {\n\t\tarvados.ContainerStateRunning:   true,\n\t\tarvados.ContainerStateCancelled: true,\n\t\tarvados.ContainerStateComplete:  true,\n\t},\n}\n"
  },
  {
    "path": "lib/dispatchcloud/test/ssh_service.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"golang.org/x/crypto/ssh\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// LoadTestKey returns a public/private ssh keypair, read from the files\n// identified by the path of the private key.\nfunc LoadTestKey(c *check.C, fnm string) (ssh.PublicKey, ssh.Signer) {\n\trawpubkey, err := ioutil.ReadFile(fnm + \".pub\")\n\tc.Assert(err, check.IsNil)\n\tpubkey, _, _, _, err := ssh.ParseAuthorizedKey(rawpubkey)\n\tc.Assert(err, check.IsNil)\n\trawprivkey, err := ioutil.ReadFile(fnm)\n\tc.Assert(err, check.IsNil)\n\tprivkey, err := ssh.ParsePrivateKey(rawprivkey)\n\tc.Assert(err, check.IsNil)\n\treturn pubkey, privkey\n}\n\n// An SSHExecFunc handles an \"exec\" session on a multiplexed SSH\n// connection.\ntype SSHExecFunc func(env map[string]string, command string, stdin io.Reader, stdout, stderr io.Writer) uint32\n\n// An SSHService accepts SSH connections on an available TCP port and\n// passes clients' \"exec\" sessions to the provided SSHExecFunc.\ntype SSHService struct {\n\tExec           SSHExecFunc\n\tHostKey        ssh.Signer\n\tAuthorizedUser string\n\tAuthorizedKeys []ssh.PublicKey\n\n\tlistener net.Listener\n\tconn     *ssh.ServerConn\n\tsetup    sync.Once\n\tmtx      sync.Mutex\n\tstarted  chan bool\n\tclosed   bool\n\terr      error\n}\n\n// Address returns the host:port where the SSH server is listening. It\n// returns \"\" if called before the server is ready to accept\n// connections.\nfunc (ss *SSHService) Address() string {\n\tss.setup.Do(ss.start)\n\tss.mtx.Lock()\n\tln := ss.listener\n\tss.mtx.Unlock()\n\tif ln == nil {\n\t\treturn \"\"\n\t}\n\treturn ln.Addr().String()\n}\n\n// RemoteUser returns the username that will be accepted.\nfunc (ss *SSHService) RemoteUser() string {\n\treturn ss.AuthorizedUser\n}\n\n// Close shuts down the server and releases resources. Established\n// connections are unaffected.\nfunc (ss *SSHService) Close() {\n\tss.Start()\n\tss.mtx.Lock()\n\tln := ss.listener\n\tss.closed = true\n\tss.mtx.Unlock()\n\tif ln != nil {\n\t\tln.Close()\n\t}\n}\n\n// Start returns when the server is ready to accept connections.\nfunc (ss *SSHService) Start() error {\n\tss.setup.Do(ss.start)\n\t<-ss.started\n\treturn ss.err\n}\n\nfunc (ss *SSHService) start() {\n\tss.started = make(chan bool)\n\tgo ss.run()\n}\n\nfunc (ss *SSHService) run() {\n\tdefer close(ss.started)\n\tconfig := &ssh.ServerConfig{\n\t\tPublicKeyCallback: func(c ssh.ConnMetadata, pubKey ssh.PublicKey) (*ssh.Permissions, error) {\n\t\t\tfor _, ak := range ss.AuthorizedKeys {\n\t\t\t\tif bytes.Equal(ak.Marshal(), pubKey.Marshal()) {\n\t\t\t\t\treturn &ssh.Permissions{}, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"unknown public key for %q\", c.User())\n\t\t},\n\t}\n\tconfig.AddHostKey(ss.HostKey)\n\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:\")\n\tif err != nil {\n\t\tss.err = err\n\t\treturn\n\t}\n\n\tss.mtx.Lock()\n\tss.listener = listener\n\tss.mtx.Unlock()\n\n\tgo func() {\n\t\tfor {\n\t\t\tnConn, err := listener.Accept()\n\t\t\tif err != nil && strings.Contains(err.Error(), \"use of closed network connection\") && ss.closed {\n\t\t\t\treturn\n\t\t\t} else if err != nil {\n\t\t\t\tlog.Printf(\"accept: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgo ss.serveConn(nConn, config)\n\t\t}\n\t}()\n}\n\nfunc (ss *SSHService) serveConn(nConn net.Conn, config *ssh.ServerConfig) {\n\tdefer nConn.Close()\n\tconn, newchans, reqs, err := ssh.NewServerConn(nConn, config)\n\tif err != nil {\n\t\tlog.Printf(\"ssh.NewServerConn: %s\", err)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\tgo ssh.DiscardRequests(reqs)\n\tfor newch := range newchans {\n\t\tif newch.ChannelType() != \"session\" {\n\t\t\tnewch.Reject(ssh.UnknownChannelType, \"unknown channel type\")\n\t\t\tcontinue\n\t\t}\n\t\tch, reqs, err := newch.Accept()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"accept channel: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tdidExec := false\n\t\tsessionEnv := map[string]string{}\n\t\tgo func() {\n\t\t\tfor req := range reqs {\n\t\t\t\tswitch {\n\t\t\t\tcase didExec:\n\t\t\t\t\t// Reject anything after exec\n\t\t\t\t\treq.Reply(false, nil)\n\t\t\t\tcase req.Type == \"exec\":\n\t\t\t\t\tvar execReq struct {\n\t\t\t\t\t\tCommand string\n\t\t\t\t\t}\n\t\t\t\t\treq.Reply(true, nil)\n\t\t\t\t\tssh.Unmarshal(req.Payload, &execReq)\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\tvar resp struct {\n\t\t\t\t\t\t\tStatus uint32\n\t\t\t\t\t\t}\n\t\t\t\t\t\tresp.Status = ss.Exec(sessionEnv, execReq.Command, ch, ch, ch.Stderr())\n\t\t\t\t\t\tch.SendRequest(\"exit-status\", false, ssh.Marshal(&resp))\n\t\t\t\t\t\tch.Close()\n\t\t\t\t\t}()\n\t\t\t\t\tdidExec = true\n\t\t\t\tcase req.Type == \"env\":\n\t\t\t\t\tvar envReq struct {\n\t\t\t\t\t\tName  string\n\t\t\t\t\t\tValue string\n\t\t\t\t\t}\n\t\t\t\t\treq.Reply(true, nil)\n\t\t\t\t\tssh.Unmarshal(req.Payload, &envReq)\n\t\t\t\t\tsessionEnv[envReq.Name] = envReq.Value\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n"
  },
  {
    "path": "lib/dispatchcloud/test/sshkey_dispatch",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEAqYm4XsQHm8sBSZFwUX5VeW1OkGsfoNzcGPG2nzzYRhNhClYZ\n0ABHhUk82HkaC/8l6d/jpYTf42HrK42nNQ0r0Yzs7qw8yZMQioK4Yk+kFyVLF78E\nGRG4pGAWXFs6pUchs/lm8fo9zcda4R3XeqgI+NO+nEERXmdRJa1FhI+Za3/S/+CV\nmg+6O00wZz2+vKmDPptGN4MCKmQOCKsMJts7wSZGyVcTtdNv7jjfr6yPAIOIL8X7\nLtarBCFaK/pD7uWll/Uj7h7D8K48nIZUrvBJJjXL8Sm4LxCNoz3Z83k8J5ZzuDRD\ngRiQe/C085mhO6VL+2fypDLwcKt1tOL8fI81MwIDAQABAoIBACR3tEnmHsDbNOav\nOxq8cwRQh9K2yDHg8BMJgz/TZa4FIx2HEbxVIw0/iLADtJ+Z/XzGJQCIiWQuvtg6\nexoFQESt7JUWRWkSkj9JCQJUoTY9Vl7APtBpqG7rIEQzd3TvzQcagZNRQZQO6rR7\np8sBdBSZ72lK8cJ9tM3G7Kor/VNK7KgRZFNhEWnmvEa3qMd4hzDcQ4faOn7C9NZK\ndwJAuJVVfwOLlOORYcyEkvksLaDOK2DsB/p0AaCpfSmThRbBKN5fPXYaKgUdfp3w\n70Hpp27WWymb1cgjyqSH3DY+V/kvid+5QxgxCBRq865jPLn3FFT9bWEVS/0wvJRj\niMIRrjECgYEA4Ffv9rBJXqVXonNQbbstd2PaprJDXMUy9/UmfHL6pkq1xdBeuM7v\nyf2ocXheA8AahHtIOhtgKqwv/aRhVK0ErYtiSvIk+tXG+dAtj/1ZAKbKiFyxjkZV\nX72BH7cTlR6As5SRRfWM/HaBGEgED391gKsI5PyMdqWWdczT5KfxAksCgYEAwXYE\newPmV1GaR5fbh2RupoPnUJPMj36gJCnwls7sGaXDQIpdlq56zfKgrLocGXGgj+8f\nQH7FHTJQO15YCYebtsXWwB3++iG43gVlJlecPAydsap2CCshqNWC5JU5pan0QzsP\nexzNzWqfUPSbTkR2SRaN+MenZo2Y/WqScOAth7kCgYBgVoLujW9EXH5QfXJpXLq+\njTvE38I7oVcs0bJwOLPYGzcJtlwmwn6IYAwohgbhV2pLv+EZSs42JPEK278MLKxY\nlgVkp60npgunFTWroqDIvdc1TZDVxvA8h9VeODEJlSqxczgbMcIUXBM9yRctTI+5\n7DiKlMUA4kTFW2sWwuOlFwKBgGXvrYS0FVbFJKm8lmvMu5D5x5RpjEu/yNnFT4Pn\nG/iXoz4Kqi2PWh3STl804UF24cd1k94D7hDoReZCW9kJnz67F+C67XMW+bXi2d1O\nJIBvlVfcHb1IHMA9YG7ZQjrMRmx2Xj3ce4RVPgUGHh8ra7gvLjd72/Tpf0doNClN\nti/hAoGBAMW5D3LhU05LXWmOqpeT4VDgqk4MrTBcstVe7KdVjwzHrVHCAmI927vI\npjpphWzpC9m3x4OsTNf8m+g6H7f3IiQS0aiFNtduXYlcuT5FHS2fSATTzg5PBon9\n1E6BudOve+WyFyBs7hFWAqWFBdWujAl4Qk5Ek09U2ilFEPE7RTgJ\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "lib/dispatchcloud/test/sshkey_dispatch.pub",
    "content": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpibhexAebywFJkXBRflV5bU6Qax+g3NwY8bafPNhGE2EKVhnQAEeFSTzYeRoL/yXp3+OlhN/jYesrjac1DSvRjOzurDzJkxCKgrhiT6QXJUsXvwQZEbikYBZcWzqlRyGz+Wbx+j3Nx1rhHdd6qAj4076cQRFeZ1ElrUWEj5lrf9L/4JWaD7o7TTBnPb68qYM+m0Y3gwIqZA4Iqwwm2zvBJkbJVxO102/uON+vrI8Ag4gvxfsu1qsEIVor+kPu5aWX9SPuHsPwrjychlSu8EkmNcvxKbgvEI2jPdnzeTwnlnO4NEOBGJB78LTzmaE7pUv7Z/KkMvBwq3W04vx8jzUz tom@curve\n"
  },
  {
    "path": "lib/dispatchcloud/test/sshkey_vm",
    "content": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEApIfWk2StZGDtmunumIeXLJ46AQrbHHvuxrSAkQf6+zUwjB2I\nrse7ezBRHWcge9U5EsigixmhUM4ozFLnUQNwC862jbmsjbyA97arG/REECNlUrEB\nHQPYHhai5yyJ89AfjWVxKyINfW0K2HX1R8nl4kdVraAgpohPLh0dGjfwzm/BcXDG\n+TxW9zRz0KCs9ZRI6s2MNdv08ahKQ0azk8gRTqMADJmYNWIo3zPQ+fhlwyr6EZJ/\nHFbRtjpajEPMJPwoVPO+Wj6wztfHDYKkPIrIWbhMl6w+tEKdsmygd3Iq94ktLS3X\nAbRCfn4njS2QSlkKFEepkUJWCSSWZgFn6DLm2wIDAQABAoIBAQCb137LxcTnG1h0\nL7isCWKMBKN0cU/xvwIAfOB6f1CfuVXuodrhkpZmrPFoJFKEeQbCX/6RQwmlfGDw\niGZKOjNbO8V2oLRs3GxcNk4FAG2ny58hoD8puIZwmYhb57gTlMMOL1PuQyb78tkf\nBzv5b6ermV3yQ4Ypt1solrMGLo6NOZD0oDX9p0Zt9kueIhjzgP0v5//T1F4PGHZK\n+sLSsMiu9u6F+PB+Oc6uv0Zee9Lnts/QiWH5f18oEculjwKWFx+JwJWiLffGg2Bl\nvbpmvHFRoRWkHTpgSiLwSUqs0ZUWU9R5h11ROg5L39MLsxQoBvHsPEnP5ssN8jGt\naH86EZjBAoGBAM+A5B/UjhIn9m05EhDTDRzI92hGhM8f7uAwobbnjvIQyZbWlBwj\n2TmgbJdpTGVbD+iTBIwKQdcFBbWobTCZsNMpghqA/ir4YIAnZ5OX9VQ1Bc+bWE7V\ndPmMVpCgyg+ERAe+79FrYWcI3vhnBpHCsY/9p9pGQIKDzlGTWNF1HJGjAoGBAMr7\n2CTVnFImTgD3E+rH4AAAfkz+cyqfK6BUhli/NifFYZhWCs16r9QCGSORnp4gPhMY\n3mf7VBs9rk123zOMo89eJt3adTgbZ+QIxXeXilGXpbT3w1+CJMaZRrIy80E1tB5/\nKvDZcrZ78o8XWMNUa+9k55ukvgyC24ICAmOIWNlpAoGBALEFvphBF2r52MtZUsYz\npw4VjKvS7V5eWcW891k4tsRf+frK2NQg6SK2b63EUT5ur2W0dr6ZyY2MZVCSfYRm\nuWmMEchWn389IeZyt3Q8wTize1+foXivtflm9jqwUXFnXzpUc/du6kuiT8YO7pXP\nSPgUZ+xY3pP5qjwBvlYC2PqNAoGAZ1CKMi1bdGC0wT8BLzXuqHGX136HhcEgRmnf\nO5qPaOzJAO2CcBWrGuC6hOUgc+F7VuMIiKpeo8LgTeNcNfO2iNymMbN4iEdCuMlS\nIM3MBD2IhTS6h4lJSKBJYHgYYi+AbylQ5Of4wDMUQYqjjkAQ8/dK/2h5pwqPyXtW\nVezXNEkCgYEAq4S0++y9tjlLn+w9BIkmx3bAVRDQZIzIEwxTh+jpqaUp1J0iyseJ\n71pwqQojGNF6x8GglVXa6bMrETae21WhEeHnWmzlpCWIODsYPUQ+erjDuAWi9eGk\nHLklqSEoLB8pzC6zDqjxDw+CnGERIDSaoaeoWiNKZ95IH1WiEwYjuxU=\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "lib/dispatchcloud/test/sshkey_vm.pub",
    "content": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkh9aTZK1kYO2a6e6Yh5csnjoBCtsce+7GtICRB/r7NTCMHYiux7t7MFEdZyB71TkSyKCLGaFQzijMUudRA3ALzraNuayNvID3tqsb9EQQI2VSsQEdA9geFqLnLInz0B+NZXErIg19bQrYdfVHyeXiR1WtoCCmiE8uHR0aN/DOb8FxcMb5PFb3NHPQoKz1lEjqzYw12/TxqEpDRrOTyBFOowAMmZg1YijfM9D5+GXDKvoRkn8cVtG2OlqMQ8wk/ChU875aPrDO18cNgqQ8ishZuEyXrD60Qp2ybKB3cir3iS0tLdcBtEJ+fieNLZBKWQoUR6mRQlYJJJZmAWfoMubb tom@curve\n"
  },
  {
    "path": "lib/dispatchcloud/test/stub_driver.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage test\n\nimport (\n\t\"crypto/rand\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\tmath_rand \"math/rand\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/crunchrun\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\n// A StubDriver implements cloud.Driver by setting up local SSH\n// servers that do fake command executions.\ntype StubDriver struct {\n\tHostKey        ssh.Signer\n\tAuthorizedKeys []ssh.PublicKey\n\n\t// SetupVM, if set, is called upon creation of each new\n\t// StubVM. This is the caller's opportunity to customize the\n\t// VM's error rate and other behaviors.\n\t//\n\t// If SetupVM returns an error, that error will be returned to\n\t// the caller of Create(), and the new VM will be discarded.\n\tSetupVM func(*StubVM) error\n\n\t// Bugf, if set, is called if a bug is detected in the caller\n\t// or stub. Typically set to (*check.C)Errorf. If unset,\n\t// logger.Warnf is called instead.\n\tBugf func(string, ...interface{})\n\n\t// StubVM's fake crunch-run uses this Queue to read and update\n\t// container state.\n\tQueue *Queue\n\n\t// Frequency of artificially introduced errors on calls to\n\t// Create and Destroy. 0=always succeed, 1=always fail.\n\tErrorRateCreate  float64\n\tErrorRateDestroy float64\n\n\t// If Create() or Instances() is called too frequently, return\n\t// rate-limiting errors.\n\tMinTimeBetweenCreateCalls    time.Duration\n\tMinTimeBetweenInstancesCalls time.Duration\n\n\tQuotaMaxInstances int\n\n\t// If true, Create and Destroy calls block until Release() is\n\t// called.\n\tHoldCloudOps bool\n\n\tinstanceSets []*StubInstanceSet\n\tholdCloudOps chan bool\n}\n\n// InstanceSet returns a new *StubInstanceSet.\nfunc (sd *StubDriver) InstanceSet(params json.RawMessage, id cloud.InstanceSetID, _ cloud.SharedResourceTags, logger logrus.FieldLogger, reg *prometheus.Registry) (cloud.InstanceSet, error) {\n\tif sd.holdCloudOps == nil {\n\t\tsd.holdCloudOps = make(chan bool)\n\t}\n\tsis := StubInstanceSet{\n\t\tdriver:  sd,\n\t\tlogger:  logger,\n\t\tservers: map[cloud.InstanceID]*StubVM{},\n\t}\n\tsd.instanceSets = append(sd.instanceSets, &sis)\n\n\tvar err error\n\tif params != nil {\n\t\terr = json.Unmarshal(params, &sis)\n\t}\n\treturn &sis, err\n}\n\n// InstanceSets returns all instances that have been created by the\n// driver. This can be used to test a component that uses the driver\n// but doesn't expose the InstanceSets it has created.\nfunc (sd *StubDriver) InstanceSets() []*StubInstanceSet {\n\treturn sd.instanceSets\n}\n\n// ReleaseCloudOps releases n pending Create/Destroy calls. If there\n// are fewer than n blocked calls pending, it waits for the rest to\n// arrive.\nfunc (sd *StubDriver) ReleaseCloudOps(n int) {\n\tfor i := 0; i < n; i++ {\n\t\t<-sd.holdCloudOps\n\t}\n}\n\ntype StubInstanceSet struct {\n\tdriver  *StubDriver\n\tlogger  logrus.FieldLogger\n\tservers map[cloud.InstanceID]*StubVM\n\tmtx     sync.RWMutex\n\tstopped bool\n\n\tallowCreateCall    time.Time\n\tallowInstancesCall time.Time\n\tlastInstanceID     int\n}\n\nfunc (sis *StubInstanceSet) Create(it arvados.InstanceType, image cloud.ImageID, tags cloud.InstanceTags, initCommand cloud.InitCommand, authKey ssh.PublicKey) (cloud.Instance, error) {\n\tif sis.driver.HoldCloudOps {\n\t\tsis.driver.holdCloudOps <- true\n\t}\n\tsis.mtx.Lock()\n\tdefer sis.mtx.Unlock()\n\tif sis.stopped {\n\t\treturn nil, errors.New(\"StubInstanceSet: Create called after Stop\")\n\t}\n\tif sis.allowCreateCall.After(time.Now()) {\n\t\treturn nil, RateLimitError{sis.allowCreateCall}\n\t}\n\tif math_rand.Float64() < sis.driver.ErrorRateCreate {\n\t\treturn nil, fmt.Errorf(\"StubInstanceSet: rand < ErrorRateCreate %f\", sis.driver.ErrorRateCreate)\n\t}\n\tif max := sis.driver.QuotaMaxInstances; max > 0 && len(sis.servers) >= max {\n\t\treturn nil, QuotaError{fmt.Errorf(\"StubInstanceSet: reached QuotaMaxInstances %d\", max)}\n\t}\n\tsis.allowCreateCall = time.Now().Add(sis.driver.MinTimeBetweenCreateCalls)\n\tak := sis.driver.AuthorizedKeys\n\tif authKey != nil {\n\t\tak = append([]ssh.PublicKey{authKey}, ak...)\n\t}\n\tsis.lastInstanceID++\n\tsvm := &StubVM{\n\t\tInitCommand:  initCommand,\n\t\tsis:          sis,\n\t\tid:           cloud.InstanceID(fmt.Sprintf(\"inst%d,%s\", sis.lastInstanceID, it.ProviderType)),\n\t\ttags:         copyTags(tags),\n\t\tproviderType: it.ProviderType,\n\t\trunning:      map[string]stubProcess{},\n\t\tkilling:      map[string]bool{},\n\t}\n\tsvm.SSHService = SSHService{\n\t\tHostKey:        sis.driver.HostKey,\n\t\tAuthorizedUser: \"root\",\n\t\tAuthorizedKeys: ak,\n\t\tExec:           svm.Exec,\n\t}\n\tif setup := sis.driver.SetupVM; setup != nil {\n\t\terr := setup(svm)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tsis.servers[svm.id] = svm\n\treturn svm.Instance(), nil\n}\n\nfunc (sis *StubInstanceSet) Instances(cloud.InstanceTags) ([]cloud.Instance, error) {\n\tsis.mtx.RLock()\n\tdefer sis.mtx.RUnlock()\n\tif sis.allowInstancesCall.After(time.Now()) {\n\t\treturn nil, RateLimitError{sis.allowInstancesCall}\n\t}\n\tsis.allowInstancesCall = time.Now().Add(sis.driver.MinTimeBetweenInstancesCalls)\n\tvar r []cloud.Instance\n\tfor _, ss := range sis.servers {\n\t\tr = append(r, ss.Instance())\n\t}\n\treturn r, nil\n}\n\n// InstanceQuotaGroup returns the first character of the given\n// instance's ProviderType.  Use ProviderTypes like \"a1\", \"a2\", \"b1\",\n// \"b2\" to test instance quota group behaviors.\nfunc (sis *StubInstanceSet) InstanceQuotaGroup(it arvados.InstanceType) cloud.InstanceQuotaGroup {\n\tsuffix := \"\"\n\tif it.Preemptible {\n\t\tsuffix = \"-p\"\n\t}\n\treturn cloud.InstanceQuotaGroup(it.ProviderType[:1] + suffix)\n}\n\nfunc (sis *StubInstanceSet) Stop() {\n\tsis.mtx.Lock()\n\tdefer sis.mtx.Unlock()\n\tif sis.stopped {\n\t\tpanic(\"Stop called twice\")\n\t}\n\tsis.stopped = true\n}\n\nfunc (sis *StubInstanceSet) StubVMs() (svms []*StubVM) {\n\tsis.mtx.Lock()\n\tdefer sis.mtx.Unlock()\n\tfor _, vm := range sis.servers {\n\t\tsvms = append(svms, vm)\n\t}\n\treturn\n}\n\ntype RateLimitError struct{ Retry time.Time }\n\nfunc (e RateLimitError) Error() string            { return fmt.Sprintf(\"rate limited until %s\", e.Retry) }\nfunc (e RateLimitError) EarliestRetry() time.Time { return e.Retry }\n\nvar _ = cloud.RateLimitError(RateLimitError{}) // assert the interface is satisfied\n\ntype CapacityError struct {\n\tInstanceTypeSpecific       bool\n\tInstanceQuotaGroupSpecific bool\n}\n\nfunc (e CapacityError) Error() string                      { return \"insufficient capacity\" }\nfunc (e CapacityError) IsCapacityError() bool              { return true }\nfunc (e CapacityError) IsInstanceTypeSpecific() bool       { return e.InstanceTypeSpecific }\nfunc (e CapacityError) IsInstanceQuotaGroupSpecific() bool { return e.InstanceQuotaGroupSpecific }\n\nvar _ = cloud.CapacityError(CapacityError{}) // assert the interface is satisfied\n\n// StubVM is a fake server that runs an SSH service. It represents a\n// VM running in a fake cloud.\n//\n// Note this is distinct from a stubInstance, which is a snapshot of\n// the VM's metadata. Like a VM in a real cloud, a StubVM keeps\n// running (and might change IP addresses, shut down, etc.)  without\n// updating any stubInstances that have been returned to callers.\ntype StubVM struct {\n\tBoot                  time.Time\n\tBroken                time.Time\n\tReportBroken          time.Time\n\tCrunchRunMissing      bool\n\tCrunchRunCrashRate    float64\n\tCrunchRunDetachDelay  time.Duration\n\tArvMountMaxExitLag    time.Duration\n\tArvMountDeadlockRate  float64\n\tExecuteContainer      func(arvados.Container) int\n\tCrashRunningContainer func(arvados.Container)\n\tExtraCrunchRunArgs    string // extra args expected after \"crunch-run --detach --stdin-config \"\n\n\t// Populated by (*StubInstanceSet)Create()\n\tInitCommand cloud.InitCommand\n\n\tsis          *StubInstanceSet\n\tid           cloud.InstanceID\n\ttags         cloud.InstanceTags\n\tproviderType string\n\tSSHService   SSHService\n\trunning      map[string]stubProcess\n\tkilling      map[string]bool\n\tlastPID      int64\n\tdeadlocked   string\n\tstubprocs    sync.WaitGroup\n\tdestroying   bool\n\tsync.Mutex\n}\n\ntype stubProcess struct {\n\tpid int64\n\n\t// crunch-run has exited, but arv-mount process (or something)\n\t// still holds lock in /var/run/\n\texited bool\n}\n\nfunc (svm *StubVM) Instance() stubInstance {\n\tsvm.Lock()\n\tdefer svm.Unlock()\n\treturn stubInstance{\n\t\tsvm:  svm,\n\t\taddr: svm.SSHService.Address(),\n\t\t// We deliberately return a cached/stale copy of the\n\t\t// real tags here, so that (Instance)Tags() sometimes\n\t\t// returns old data after a call to\n\t\t// (Instance)SetTags().  This is permitted by the\n\t\t// driver interface, and this might help remind\n\t\t// callers that they need to tolerate it.\n\t\ttags: copyTags(svm.tags),\n\t}\n}\n\nfunc (svm *StubVM) Exec(env map[string]string, command string, stdin io.Reader, stdout, stderr io.Writer) uint32 {\n\t// Ensure we don't start any new stubprocs after Destroy()\n\t// has started Wait()ing for stubprocs to end.\n\tsvm.Lock()\n\tif svm.destroying {\n\t\tsvm.Unlock()\n\t\treturn 1\n\t}\n\tsvm.stubprocs.Add(1)\n\tdefer svm.stubprocs.Done()\n\tsvm.Unlock()\n\n\tstdinData, err := ioutil.ReadAll(stdin)\n\tif err != nil {\n\t\tfmt.Fprintf(stderr, \"error reading stdin: %s\\n\", err)\n\t\treturn 1\n\t}\n\tqueue := svm.sis.driver.Queue\n\tuuid := regexp.MustCompile(`.{5}-dz642-.{15}`).FindString(command)\n\tif eta := svm.Boot.Sub(time.Now()); eta > 0 {\n\t\tfmt.Fprintf(stderr, \"stub is booting, ETA %s\\n\", eta)\n\t\treturn 1\n\t}\n\tif !svm.Broken.IsZero() && svm.Broken.Before(time.Now()) {\n\t\tfmt.Fprintf(stderr, \"cannot fork\\n\")\n\t\treturn 2\n\t}\n\tif svm.CrunchRunMissing && strings.Contains(command, \"crunch-run\") {\n\t\tfmt.Fprint(stderr, \"crunch-run: command not found\\n\")\n\t\treturn 1\n\t}\n\tif strings.HasPrefix(command, \"crunch-run --detach --stdin-config \"+svm.ExtraCrunchRunArgs) {\n\t\tvar configData crunchrun.ConfigData\n\t\terr := json.Unmarshal(stdinData, &configData)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(stderr, \"unmarshal stdin: %s (stdin was: %q)\\n\", err, stdinData)\n\t\t\treturn 1\n\t\t}\n\t\tfor _, name := range []string{\"ARVADOS_API_HOST\", \"ARVADOS_API_TOKEN\"} {\n\t\t\tif configData.Env[name] == \"\" {\n\t\t\t\tfmt.Fprintf(stderr, \"%s env var missing from stdin %q\\n\", name, stdinData)\n\t\t\t\treturn 1\n\t\t\t}\n\t\t}\n\t\tsvm.Lock()\n\t\tsvm.lastPID++\n\t\tpid := svm.lastPID\n\t\tsvm.running[uuid] = stubProcess{pid: pid}\n\t\tsvm.Unlock()\n\n\t\ttime.Sleep(svm.CrunchRunDetachDelay)\n\n\t\tsvm.Lock()\n\t\tdefer svm.Unlock()\n\t\tif svm.destroying {\n\t\t\tfmt.Fprint(stderr, \"crunch-run: killed by system shutdown\\n\")\n\t\t\treturn 9\n\t\t}\n\t\tfmt.Fprintf(stderr, \"starting %s\\n\", uuid)\n\t\tlogger := svm.sis.logger.WithFields(logrus.Fields{\n\t\t\t\"Instance\":      svm.id,\n\t\t\t\"ContainerUUID\": uuid,\n\t\t\t\"PID\":           pid,\n\t\t})\n\t\tlogger.Printf(\"[test] starting crunch-run stub\")\n\t\tsvm.stubprocs.Add(1)\n\t\tgo func() {\n\t\t\tdefer svm.stubprocs.Done()\n\t\t\tvar ctr arvados.Container\n\t\t\tvar started, completed bool\n\t\t\tdefer func() {\n\t\t\t\tlogger.Print(\"[test] exiting crunch-run stub\")\n\t\t\t\tsvm.Lock()\n\t\t\t\tdefer svm.Unlock()\n\t\t\t\tif svm.destroying {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif svm.running[uuid].pid != pid {\n\t\t\t\t\tbugf := svm.sis.driver.Bugf\n\t\t\t\t\tif bugf == nil {\n\t\t\t\t\t\tbugf = logger.Warnf\n\t\t\t\t\t}\n\t\t\t\t\tbugf(\"[test] StubDriver bug or caller bug: pid %d exiting, running[%s].pid==%d\", pid, uuid, svm.running[uuid].pid)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif !completed {\n\t\t\t\t\tlogger.WithField(\"State\", ctr.State).Print(\"[test] crashing crunch-run stub\")\n\t\t\t\t\tif started && svm.CrashRunningContainer != nil {\n\t\t\t\t\t\tsvm.CrashRunningContainer(ctr)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tsproc := svm.running[uuid]\n\t\t\t\tsproc.exited = true\n\t\t\t\tsvm.running[uuid] = sproc\n\t\t\t\tsvm.Unlock()\n\t\t\t\ttime.Sleep(svm.ArvMountMaxExitLag * time.Duration(math_rand.Float64()))\n\t\t\t\tsvm.Lock()\n\t\t\t\tif math_rand.Float64() >= svm.ArvMountDeadlockRate {\n\t\t\t\t\tdelete(svm.running, uuid)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tcrashluck := math_rand.Float64()\n\t\t\twantCrash := crashluck < svm.CrunchRunCrashRate\n\t\t\twantCrashEarly := crashluck < svm.CrunchRunCrashRate/2\n\n\t\t\tctr, ok := queue.Get(uuid)\n\t\t\tif !ok {\n\t\t\t\tlogger.Print(\"[test] container not in queue\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttime.Sleep(time.Duration(math_rand.Float64()*20) * time.Millisecond)\n\n\t\t\tsvm.Lock()\n\t\t\tkilled := svm.killing[uuid]\n\t\t\tdelete(svm.killing, uuid)\n\t\t\tdestroying := svm.destroying\n\t\t\tsvm.Unlock()\n\t\t\tif killed || wantCrashEarly || destroying {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tctr.State = arvados.ContainerStateRunning\n\t\t\tstarted = queue.Notify(ctr)\n\t\t\tif !started {\n\t\t\t\tctr, _ = queue.Get(uuid)\n\t\t\t\tlogger.Print(\"[test] erroring out because state=Running update was rejected\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif wantCrash {\n\t\t\t\tlogger.WithField(\"State\", ctr.State).Print(\"[test] crashing crunch-run stub\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif svm.ExecuteContainer != nil {\n\t\t\t\tctr.ExitCode = svm.ExecuteContainer(ctr)\n\t\t\t}\n\t\t\tlogger.WithField(\"ExitCode\", ctr.ExitCode).Print(\"[test] completing container\")\n\t\t\tctr.State = arvados.ContainerStateComplete\n\t\t\tcompleted = queue.Notify(ctr)\n\t\t}()\n\t\treturn 0\n\t}\n\tif command == \"crunch-run --list\" {\n\t\tsvm.Lock()\n\t\tdefer svm.Unlock()\n\t\tfor uuid, sproc := range svm.running {\n\t\t\tif sproc.exited {\n\t\t\t\tfmt.Fprintf(stdout, \"%s stale\\n\", uuid)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(stdout, \"%s\\n\", uuid)\n\t\t\t}\n\t\t}\n\t\tif !svm.ReportBroken.IsZero() && svm.ReportBroken.Before(time.Now()) {\n\t\t\tfmt.Fprintln(stdout, \"broken\")\n\t\t}\n\t\tfmt.Fprintln(stdout, svm.deadlocked)\n\t\treturn 0\n\t}\n\tif strings.HasPrefix(command, \"crunch-run --kill \") {\n\t\tsvm.Lock()\n\t\tsproc, running := svm.running[uuid]\n\t\tif running && !sproc.exited {\n\t\t\tsvm.killing[uuid] = true\n\t\t\tsvm.Unlock()\n\t\t\ttime.Sleep(time.Duration(math_rand.Float64()*2) * time.Millisecond)\n\t\t\tsvm.Lock()\n\t\t\tsproc, running = svm.running[uuid]\n\t\t}\n\t\tsvm.Unlock()\n\t\tif running && !sproc.exited {\n\t\t\tfmt.Fprintf(stderr, \"%s: container is running\\n\", uuid)\n\t\t\treturn 1\n\t\t}\n\t\tfmt.Fprintf(stderr, \"%s: container is not running\\n\", uuid)\n\t\treturn 0\n\t}\n\tif command == \"true\" {\n\t\treturn 0\n\t}\n\tfmt.Fprintf(stderr, \"%q: command not found\", command)\n\treturn 1\n}\n\ntype stubInstance struct {\n\tsvm  *StubVM\n\taddr string\n\ttags cloud.InstanceTags\n}\n\nfunc (si stubInstance) ID() cloud.InstanceID {\n\treturn si.svm.id\n}\n\nfunc (si stubInstance) Address() string {\n\treturn si.addr\n}\n\nfunc (si stubInstance) RemoteUser() string {\n\treturn si.svm.SSHService.AuthorizedUser\n}\n\nfunc (si stubInstance) Destroy() error {\n\tsis := si.svm.sis\n\tif sis.driver.HoldCloudOps {\n\t\tsis.driver.holdCloudOps <- true\n\t}\n\tif math_rand.Float64() < si.svm.sis.driver.ErrorRateDestroy {\n\t\treturn errors.New(\"instance could not be destroyed\")\n\t}\n\tsi.svm.Lock()\n\tsi.svm.destroying = true\n\tsi.svm.Unlock()\n\tsi.svm.stubprocs.Wait()\n\tsi.svm.SSHService.Close()\n\tsis.mtx.Lock()\n\tdefer sis.mtx.Unlock()\n\tdelete(sis.servers, si.svm.id)\n\treturn nil\n}\n\nfunc (si stubInstance) ProviderType() string {\n\treturn si.svm.providerType\n}\n\nfunc (si stubInstance) SetTags(tags cloud.InstanceTags) error {\n\ttags = copyTags(tags)\n\tsvm := si.svm\n\tgo func() {\n\t\tsvm.Lock()\n\t\tdefer svm.Unlock()\n\t\tsvm.tags = tags\n\t}()\n\treturn nil\n}\n\nfunc (si stubInstance) Tags() cloud.InstanceTags {\n\t// Return a copy to ensure a caller can't change our saved\n\t// tags just by writing to the returned map.\n\treturn copyTags(si.tags)\n}\n\nfunc (si stubInstance) String() string {\n\treturn string(si.svm.id)\n}\n\nfunc (si stubInstance) VerifyHostKey(key ssh.PublicKey, client *ssh.Client) error {\n\tbuf := make([]byte, 512)\n\t_, err := io.ReadFull(rand.Reader, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsig, err := si.svm.sis.driver.HostKey.Sign(rand.Reader, buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn key.Verify(buf, sig)\n}\n\nfunc copyTags(src cloud.InstanceTags) cloud.InstanceTags {\n\tdst := cloud.InstanceTags{}\n\tfor k, v := range src {\n\t\tdst[k] = v\n\t}\n\treturn dst\n}\n\nfunc (si stubInstance) PriceHistory(arvados.InstanceType) []cloud.InstancePrice {\n\treturn nil\n}\n\ntype QuotaError struct {\n\terror\n}\n\nfunc (QuotaError) IsQuotaError() bool { return true }\n"
  },
  {
    "path": "lib/dispatchcloud/worker/gocheck_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage worker\n\nimport (\n\t\"testing\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n"
  },
  {
    "path": "lib/dispatchcloud/worker/pool.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage worker\n\nimport (\n\t\"crypto/hmac\"\n\t\"crypto/md5\"\n\t\"crypto/rand\"\n\t\"crypto/sha256\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\tmathrand \"math/rand\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\nconst (\n\ttagKeyInstanceType   = \"InstanceType\"\n\ttagKeyIdleBehavior   = \"IdleBehavior\"\n\ttagKeyInstanceSecret = \"InstanceSecret\"\n\ttagKeyInstanceSetID  = \"InstanceSetID\"\n)\n\n// An InstanceView shows a worker's current state and recent activity.\ntype InstanceView struct {\n\tInstance              cloud.InstanceID `json:\"instance\"`\n\tAddress               string           `json:\"address\"`\n\tPrice                 float64          `json:\"price\"`\n\tArvadosInstanceType   string           `json:\"arvados_instance_type\"`\n\tProviderInstanceType  string           `json:\"provider_instance_type\"`\n\tLastContainerUUID     string           `json:\"last_container_uuid\"`\n\tLastBusy              time.Time        `json:\"last_busy\"`\n\tRunningContainerUUIDs []string         `json:\"running_container_uuids\"`\n\tWorkerState           State            `json:\"worker_state\"`\n\tIdleBehavior          IdleBehavior     `json:\"idle_behavior\"`\n}\n\n// An Executor executes shell commands on a remote host.\ntype Executor interface {\n\t// Run cmd on the current target.\n\tExecute(env map[string]string, cmd string, stdin io.Reader) (stdout, stderr []byte, err error)\n\n\t// Use the given target for subsequent operations. The new\n\t// target is the same host as the previous target, but it\n\t// might return a different address and verify a different\n\t// host key.\n\t//\n\t// SetTarget is called frequently, and in most cases the new\n\t// target will behave exactly the same as the old one. An\n\t// implementation should optimize accordingly.\n\t//\n\t// SetTarget must not block on concurrent Execute calls.\n\tSetTarget(cloud.ExecutorTarget)\n\n\tClose()\n}\n\nconst (\n\tdefaultSyncInterval        = time.Minute\n\tdefaultProbeInterval       = time.Second * 10\n\tdefaultMaxProbesPerSecond  = 10\n\tdefaultTimeoutIdle         = time.Minute\n\tdefaultTimeoutBooting      = time.Minute * 10\n\tdefaultTimeoutProbe        = time.Minute * 10\n\tdefaultTimeoutShutdown     = time.Second * 10\n\tdefaultTimeoutTERM         = time.Minute * 2\n\tdefaultTimeoutSignal       = time.Second * 5\n\tdefaultTimeoutStaleRunLock = time.Second * 5\n\n\t// Time after a quota error to try again anyway, even if no\n\t// instances have been shutdown.\n\tquotaErrorTTL = time.Minute\n\n\t// Time after a capacity error to try again\n\tcapacityErrorTTL = time.Minute\n\n\t// Time between \"X failed because rate limiting\" messages\n\tlogRateLimitErrorInterval = time.Second * 10\n)\n\nfunc duration(conf arvados.Duration, def time.Duration) time.Duration {\n\tif conf > 0 {\n\t\treturn time.Duration(conf)\n\t}\n\treturn def\n}\n\n// NewPool creates a Pool of workers backed by instanceSet.\n//\n// New instances are configured and set up according to the given\n// cluster configuration.\nfunc NewPool(logger logrus.FieldLogger, arvClient *arvados.Client, reg *prometheus.Registry, instanceSetID cloud.InstanceSetID, instanceSet cloud.InstanceSet, newExecutor func(cloud.Instance) Executor, installPublicKey ssh.PublicKey, cluster *arvados.Cluster) *Pool {\n\twp := &Pool{\n\t\tlogger:                         logger,\n\t\tarvClient:                      arvClient,\n\t\tinstanceSetID:                  instanceSetID,\n\t\tinstanceSet:                    &throttledInstanceSet{InstanceSet: instanceSet},\n\t\tnewExecutor:                    newExecutor,\n\t\tcluster:                        cluster,\n\t\tbootProbeCommand:               cluster.Containers.CloudVMs.BootProbeCommand,\n\t\tinstanceInitCommand:            cloud.InitCommand(cluster.Containers.CloudVMs.InstanceInitCommand),\n\t\trunnerSource:                   cluster.Containers.CloudVMs.DeployRunnerBinary,\n\t\trunnerDeployDirectory:          cluster.Containers.CloudVMs.DeployRunnerDirectory,\n\t\timageID:                        cloud.ImageID(cluster.Containers.CloudVMs.ImageID),\n\t\tinstanceTypes:                  cluster.InstanceTypes,\n\t\tmaxProbesPerSecond:             cluster.Containers.CloudVMs.MaxProbesPerSecond,\n\t\tmaxConcurrentInstanceCreateOps: cluster.Containers.CloudVMs.MaxConcurrentInstanceCreateOps,\n\t\tmaxInstances:                   cluster.Containers.CloudVMs.MaxInstances,\n\t\tprobeInterval:                  duration(cluster.Containers.CloudVMs.ProbeInterval, defaultProbeInterval),\n\t\tsyncInterval:                   duration(cluster.Containers.CloudVMs.SyncInterval, defaultSyncInterval),\n\t\ttimeoutIdle:                    duration(cluster.Containers.CloudVMs.TimeoutIdle, defaultTimeoutIdle),\n\t\ttimeoutBooting:                 duration(cluster.Containers.CloudVMs.TimeoutBooting, defaultTimeoutBooting),\n\t\ttimeoutProbe:                   duration(cluster.Containers.CloudVMs.TimeoutProbe, defaultTimeoutProbe),\n\t\ttimeoutShutdown:                duration(cluster.Containers.CloudVMs.TimeoutShutdown, defaultTimeoutShutdown),\n\t\ttimeoutTERM:                    duration(cluster.Containers.CloudVMs.TimeoutTERM, defaultTimeoutTERM),\n\t\ttimeoutSignal:                  duration(cluster.Containers.CloudVMs.TimeoutSignal, defaultTimeoutSignal),\n\t\ttimeoutStaleRunLock:            duration(cluster.Containers.CloudVMs.TimeoutStaleRunLock, defaultTimeoutStaleRunLock),\n\t\tsystemRootToken:                cluster.SystemRootToken,\n\t\tinstallPublicKey:               installPublicKey,\n\t\ttagKeyPrefix:                   cluster.Containers.CloudVMs.TagKeyPrefix,\n\t\trunnerCmdDefault:               cluster.Containers.CrunchRunCommand,\n\t\trunnerArgs:                     append([]string{\"--runtime-engine=\" + cluster.Containers.RuntimeEngine}, cluster.Containers.CrunchRunArgumentsList...),\n\t\tstop:                           make(chan bool),\n\t}\n\twp.registerMetrics(reg)\n\tgo func() {\n\t\twp.setupOnce.Do(wp.setup)\n\t\tgo wp.runMetrics()\n\t\tgo wp.runProbes()\n\t\tgo wp.runSync()\n\t}()\n\treturn wp\n}\n\n// Pool is a resizable worker pool backed by a cloud.InstanceSet. A\n// zero Pool should not be used. Call NewPool to create a new Pool.\ntype Pool struct {\n\t// configuration\n\tlogger                         logrus.FieldLogger\n\tarvClient                      *arvados.Client\n\tinstanceSetID                  cloud.InstanceSetID\n\tinstanceSet                    *throttledInstanceSet\n\tnewExecutor                    func(cloud.Instance) Executor\n\tcluster                        *arvados.Cluster\n\tbootProbeCommand               string\n\tinstanceInitCommand            cloud.InitCommand\n\trunnerSource                   string\n\trunnerDeployDirectory          string\n\timageID                        cloud.ImageID\n\tinstanceTypes                  map[string]arvados.InstanceType\n\tsyncInterval                   time.Duration\n\tprobeInterval                  time.Duration\n\tmaxProbesPerSecond             int\n\tmaxConcurrentInstanceCreateOps int\n\tmaxInstances                   int\n\ttimeoutIdle                    time.Duration\n\ttimeoutBooting                 time.Duration\n\ttimeoutProbe                   time.Duration\n\ttimeoutShutdown                time.Duration\n\ttimeoutTERM                    time.Duration\n\ttimeoutSignal                  time.Duration\n\ttimeoutStaleRunLock            time.Duration\n\tsystemRootToken                string\n\tinstallPublicKey               ssh.PublicKey\n\ttagKeyPrefix                   string\n\trunnerCmdDefault               string   // crunch-run command to use if not deploying a binary\n\trunnerArgs                     []string // extra args passed to crunch-run\n\n\t// private state\n\tsubscribers                map[<-chan struct{}]chan<- struct{}\n\tcreating                   map[string]createCall // unfinished (cloud.InstanceSet)Create calls (key is instance secret)\n\tworkers                    map[cloud.InstanceID]*worker\n\tloaded                     bool                 // loaded list of instances from InstanceSet at least once\n\texited                     map[string]time.Time // containers whose crunch-run proc has exited, but ForgetContainer has not been called\n\tatQuotaUntilFewerInstances int\n\tatQuotaUntil               time.Time\n\tatQuotaErr                 cloud.QuotaError\n\tatCapacityUntil            map[interface{}]time.Time\n\tstop                       chan bool\n\tmtx                        sync.RWMutex\n\tsetupOnce                  sync.Once\n\trunnerData                 []byte\n\trunnerMD5                  [md5.Size]byte\n\trunnerCmd                  string\n\n\tmContainersRunning        prometheus.Gauge\n\tmInstances                *prometheus.GaugeVec\n\tmInstancesPrice           *prometheus.GaugeVec\n\tmVCPUs                    *prometheus.GaugeVec\n\tmMemory                   *prometheus.GaugeVec\n\tmBootOutcomes             *prometheus.CounterVec\n\tmDisappearances           *prometheus.CounterVec\n\tmTimeToSSH                prometheus.Summary\n\tmTimeToReadyForContainer  prometheus.Summary\n\tmTimeFromShutdownToGone   prometheus.Summary\n\tmTimeFromQueueToCrunchRun prometheus.Summary\n\tmRunProbeDuration         *prometheus.SummaryVec\n\tmProbeAgeMax              prometheus.Gauge\n\tmProbeAgeMedian           prometheus.Gauge\n}\n\ntype createCall struct {\n\ttime         time.Time\n\tinstanceType arvados.InstanceType\n}\n\nfunc (wp *Pool) CheckHealth() error {\n\twp.setupOnce.Do(wp.setup)\n\tif err := wp.loadRunnerData(); err != nil {\n\t\treturn fmt.Errorf(\"error loading runner binary: %s\", err)\n\t}\n\treturn nil\n}\n\n// Subscribe returns a buffered channel that becomes ready after any\n// change to the pool's state that could have scheduling implications:\n// a worker's state changes, a new worker appears, the cloud\n// provider's API rate limiting period ends, etc.\n//\n// Additional events that occur while the channel is already ready\n// will be dropped, so it is OK if the caller services the channel\n// slowly.\n//\n// Example:\n//\n//\tch := wp.Subscribe()\n//\tdefer wp.Unsubscribe(ch)\n//\tfor range ch {\n//\t\ttryScheduling(wp)\n//\t\tif done {\n//\t\t\tbreak\n//\t\t}\n//\t}\nfunc (wp *Pool) Subscribe() <-chan struct{} {\n\twp.setupOnce.Do(wp.setup)\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\tch := make(chan struct{}, 1)\n\twp.subscribers[ch] = ch\n\treturn ch\n}\n\n// Unsubscribe stops sending updates to the given channel.\nfunc (wp *Pool) Unsubscribe(ch <-chan struct{}) {\n\twp.setupOnce.Do(wp.setup)\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\tdelete(wp.subscribers, ch)\n}\n\n// Create a new instance with the given type, and add it to the worker\n// pool. The worker is added immediately; instance creation runs in\n// the background.\n//\n// Create returns false if a pre-existing error or a configuration\n// setting prevents it from even attempting to create a new\n// instance. Those errors are logged by the Pool, so the caller does\n// not need to log anything in such cases.\nfunc (wp *Pool) Create(it arvados.InstanceType) (iv InstanceView, created bool) {\n\tlogger := wp.logger.WithField(\"InstanceType\", it.Name)\n\twp.setupOnce.Do(wp.setup)\n\tif wp.loadRunnerData() != nil {\n\t\t// Boot probe is certain to fail.\n\t\treturn\n\t}\n\tif wp.AtCapacity(it) || wp.AtQuota() || wp.instanceSet.throttleCreate.Error() != nil {\n\t\treturn\n\t}\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\t// The maxConcurrentInstanceCreateOps knob throttles the number of node create\n\t// requests in flight. It was added to work around a limitation in Azure's\n\t// managed disks, which support no more than 20 concurrent node creation\n\t// requests from a single disk image (cf.\n\t// https://docs.microsoft.com/en-us/azure/virtual-machines/linux/capture-image).\n\t// The code assumes that node creation, from Azure's perspective, means the\n\t// period until the instance appears in the \"get all instances\" list.\n\tif wp.maxConcurrentInstanceCreateOps > 0 && len(wp.creating) >= wp.maxConcurrentInstanceCreateOps {\n\t\tlogger.Info(\"reached MaxConcurrentInstanceCreateOps\")\n\t\twp.instanceSet.throttleCreate.ErrorUntil(errors.New(\"reached MaxConcurrentInstanceCreateOps\"), time.Now().Add(5*time.Second), wp.notify)\n\t\treturn\n\t}\n\tnow := time.Now()\n\tsecret := randomHex(instanceSecretLength)\n\twp.creating[secret] = createCall{time: now, instanceType: it}\n\tgo func() {\n\t\tdefer wp.notify()\n\t\ttags := cloud.InstanceTags{\n\t\t\twp.tagKeyPrefix + tagKeyInstanceSetID:  string(wp.instanceSetID),\n\t\t\twp.tagKeyPrefix + tagKeyInstanceType:   it.Name,\n\t\t\twp.tagKeyPrefix + tagKeyIdleBehavior:   string(IdleBehaviorRun),\n\t\t\twp.tagKeyPrefix + tagKeyInstanceSecret: secret,\n\t\t}\n\t\tinitCmd := TagVerifier{nil, secret, nil}.InitCommand() + \"\\n\" + wp.instanceInitCommand\n\t\tinst, err := wp.instanceSet.Create(it, wp.imageID, tags, initCmd, wp.installPublicKey)\n\t\twp.mtx.Lock()\n\t\tdefer wp.mtx.Unlock()\n\t\t// delete() is deferred so the updateWorker() call\n\t\t// below knows to use StateBooting when adding a new\n\t\t// worker.\n\t\tdefer delete(wp.creating, secret)\n\t\tif err != nil {\n\t\t\tvar isQuotaError, isCapacityError bool\n\t\t\tif err, ok := err.(cloud.QuotaError); ok && err.IsQuotaError() {\n\t\t\t\tisQuotaError = true\n\t\t\t\twp.atQuotaErr = err\n\t\t\t\tn := len(wp.workers) + len(wp.creating) - 1\n\t\t\t\tif n < 1 {\n\t\t\t\t\t// Quota error with no\n\t\t\t\t\t// instances running --\n\t\t\t\t\t// nothing to do but wait\n\t\t\t\t\twp.atQuotaUntilFewerInstances = 0\n\t\t\t\t\twp.atQuotaUntil = time.Now().Add(quotaErrorTTL)\n\t\t\t\t\ttime.AfterFunc(quotaErrorTTL, wp.notify)\n\t\t\t\t\tlogger.WithField(\"atQuotaUntil\", wp.atQuotaUntil).Info(\"quota error with 0 running -- waiting for quotaErrorTTL\")\n\t\t\t\t} else if n < wp.atQuotaUntilFewerInstances || wp.atQuotaUntilFewerInstances == 0 {\n\t\t\t\t\t// Quota error with N\n\t\t\t\t\t// instances running -- report\n\t\t\t\t\t// AtQuota until some\n\t\t\t\t\t// instances shut down\n\t\t\t\t\twp.atQuotaUntilFewerInstances = n\n\t\t\t\t\twp.atQuotaUntil = time.Time{}\n\t\t\t\t\tlogger.WithField(\"atQuotaUntilFewerInstances\", n).Info(\"quota error -- waiting for next instance shutdown\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err, ok := err.(cloud.CapacityError); ok && err.IsCapacityError() {\n\t\t\t\tisCapacityError = true\n\t\t\t\tvar capKey interface{}\n\t\t\t\tif err.IsInstanceTypeSpecific() {\n\t\t\t\t\tif it.Preemptible {\n\t\t\t\t\t\tcapKey = it.ProviderType + \"-preemptible\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcapKey = it.ProviderType\n\t\t\t\t\t}\n\t\t\t\t} else if err.IsInstanceQuotaGroupSpecific() {\n\t\t\t\t\tcapKey = wp.instanceSet.InstanceQuotaGroup(it)\n\t\t\t\t} else {\n\t\t\t\t\tcapKey = \"\"\n\t\t\t\t}\n\t\t\t\tif wp.atCapacityUntil == nil {\n\t\t\t\t\twp.atCapacityUntil = map[interface{}]time.Time{}\n\t\t\t\t}\n\t\t\t\twp.atCapacityUntil[capKey] = time.Now().Add(capacityErrorTTL)\n\t\t\t\ttime.AfterFunc(capacityErrorTTL, wp.notify)\n\t\t\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\t\t\"capacityErrorTTL\":             capacityErrorTTL,\n\t\t\t\t\t\"capKey\":                       capKey,\n\t\t\t\t\t\"atCapacityUntil\":              wp.atCapacityUntil[capKey],\n\t\t\t\t\t\"isInstanceTypeSpecific\":       err.IsInstanceTypeSpecific(),\n\t\t\t\t\t\"isInstanceQuotaGroupSpecific\": err.IsInstanceQuotaGroupSpecific(),\n\t\t\t\t}).Info(\"capacity error -- pausing instance create calls\")\n\t\t\t}\n\t\t\tlogger.WithError(err).WithFields(logrus.Fields{\n\t\t\t\t\"isCapacityError\": isCapacityError,\n\t\t\t\t\"isQuotaError\":    isQuotaError,\n\t\t\t}).Error(\"create failed\")\n\t\t\twp.instanceSet.throttleCreate.CheckRateLimitError(err, wp.logger, \"create instance\", wp.notify)\n\t\t\treturn\n\t\t}\n\t\twp.updateWorker(inst, it)\n\t}()\n\tif len(wp.creating)+len(wp.workers) == wp.maxInstances {\n\t\tlogger.Infof(\"now at MaxInstances limit of %d instances\", wp.maxInstances)\n\t}\n\treturn InstanceView{\n\t\tPrice:                it.Price,\n\t\tArvadosInstanceType:  it.Name,\n\t\tProviderInstanceType: it.ProviderType,\n\t\tWorkerState:          StateBooting,\n\t\tIdleBehavior:         IdleBehaviorRun,\n\t}, true\n}\n\n// AtCapacity returns true if Create() is currently expected to fail\n// for the given instance type.\nfunc (wp *Pool) AtCapacity(it arvados.InstanceType) bool {\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\tvar typeKey interface{}\n\tif it.Preemptible {\n\t\ttypeKey = it.ProviderType + \"-preemptible\"\n\t} else {\n\t\ttypeKey = it.ProviderType\n\t}\n\tfor _, capKey := range []interface{}{\n\t\t\"\",                                    // all instance types\n\t\twp.instanceSet.InstanceQuotaGroup(it), // instance quota group\n\t\ttypeKey,                               // just this instance type\n\t} {\n\t\tif t, ok := wp.atCapacityUntil[capKey]; ok && time.Now().Before(t) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// AtQuota returns true if Create is not expected to work at the\n// moment (e.g., cloud provider has reported quota errors, or we are\n// already at our own configured quota).\nfunc (wp *Pool) AtQuota() bool {\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\treturn wp.atQuotaUntilFewerInstances > 0 ||\n\t\ttime.Now().Before(wp.atQuotaUntil) ||\n\t\t(wp.maxInstances > 0 && wp.maxInstances <= len(wp.workers)+len(wp.creating))\n}\n\n// SetIdleBehavior determines how the indicated instance will behave\n// when it has no containers running.\nfunc (wp *Pool) SetIdleBehavior(id cloud.InstanceID, idleBehavior IdleBehavior) error {\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\twkr, ok := wp.workers[id]\n\tif !ok {\n\t\treturn errors.New(\"requested instance does not exist\")\n\t}\n\twkr.setIdleBehavior(idleBehavior)\n\treturn nil\n}\n\n// Successful connection to the SSH daemon, update the mTimeToSSH metric\nfunc (wp *Pool) reportSSHConnected(inst cloud.Instance) {\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\twkr, ok := wp.workers[inst.ID()]\n\tif !ok {\n\t\t// race: inst was removed from the pool\n\t\treturn\n\t}\n\tif wkr.state != StateBooting || !wkr.firstSSHConnection.IsZero() {\n\t\t// the node is not in booting state (can happen if\n\t\t// a-d-c is restarted) OR this is not the first SSH\n\t\t// connection\n\t\treturn\n\t}\n\n\twkr.firstSSHConnection = time.Now()\n\tif wp.mTimeToSSH != nil {\n\t\twp.mTimeToSSH.Observe(wkr.firstSSHConnection.Sub(wkr.appeared).Seconds())\n\t}\n}\n\n// Add or update worker attached to the given instance.\n//\n// The second return value is true if a new worker is created.\n//\n// A newly added instance has state=StateBooting if its tags match an\n// entry in wp.creating, otherwise StateUnknown.\n//\n// Caller must have lock.\nfunc (wp *Pool) updateWorker(inst cloud.Instance, it arvados.InstanceType) (*worker, bool) {\n\tsecret := inst.Tags()[wp.tagKeyPrefix+tagKeyInstanceSecret]\n\tinst = TagVerifier{Instance: inst, Secret: secret, ReportVerified: wp.reportSSHConnected}\n\tid := inst.ID()\n\tif wkr := wp.workers[id]; wkr != nil {\n\t\twkr.executor.SetTarget(inst)\n\t\twkr.instance = inst\n\t\twkr.updated = time.Now()\n\t\twkr.saveTags()\n\t\treturn wkr, false\n\t}\n\n\tlogger := wp.logger.WithFields(logrus.Fields{\n\t\t\"InstanceType\": it.Name,\n\t\t\"Instance\":     inst.ID(),\n\t\t\"Address\":      inst.Address(),\n\t})\n\n\tstate := StateUnknown\n\tif _, ok := wp.creating[secret]; ok {\n\t\tstate = StateBooting\n\t}\n\n\t// If an instance has a valid IdleBehavior tag when it first\n\t// appears, initialize the new worker accordingly (this is how\n\t// we restore IdleBehavior that was set by a prior dispatch\n\t// process); otherwise, default to \"run\". After this,\n\t// wkr.idleBehavior is the source of truth, and will only be\n\t// changed via SetIdleBehavior().\n\tidleBehaviorTag := inst.Tags()[wp.tagKeyPrefix+tagKeyIdleBehavior]\n\tidleBehavior := IdleBehavior(idleBehaviorTag)\n\tif !validIdleBehavior[idleBehavior] {\n\t\tidleBehavior = IdleBehaviorRun\n\t}\n\t// ProviderType is blank if the InstanceType tag did not map\n\t// to a configured type.  Presumably this means it was started\n\t// by a previous dispatch process and its type has been\n\t// removed from our configuration.  It will not be eligible to\n\t// run any containers, so we put it in drain state so it shuts\n\t// down after the first probe succeeds and (if applicable) any\n\t// running containers finish.\n\tif it.ProviderType == \"\" {\n\t\tif idleBehavior != IdleBehaviorHold {\n\t\t\tidleBehavior = IdleBehaviorDrain\n\t\t}\n\t\tlogger.Infof(\"%sing instance that was tagged with an unrecognized InstanceType and IdleBehavior = %s\", idleBehavior, idleBehaviorTag)\n\t}\n\n\tlogger.WithFields(logrus.Fields{\n\t\t\"State\":        state,\n\t\t\"IdleBehavior\": idleBehavior,\n\t}).Infof(\"instance appeared in cloud\")\n\tnow := time.Now()\n\twkr := &worker{\n\t\tmtx:          &wp.mtx,\n\t\twp:           wp,\n\t\tlogger:       logger,\n\t\texecutor:     wp.newExecutor(inst),\n\t\tstate:        state,\n\t\tidleBehavior: idleBehavior,\n\t\tinstance:     inst,\n\t\tinstType:     it,\n\t\tappeared:     now,\n\t\tprobed:       now,\n\t\tbusy:         now,\n\t\tupdated:      now,\n\t\trunning:      make(map[string]*remoteRunner),\n\t\tstarting:     make(map[string]*remoteRunner),\n\t\tprobing:      make(chan struct{}, 1),\n\t}\n\twp.workers[id] = wkr\n\treturn wkr, true\n}\n\n// Shutdown shuts down the indicated instance, or returns false if it\n// is ineligible for shutdown.\nfunc (wp *Pool) Shutdown(id cloud.InstanceID) bool {\n\twp.setupOnce.Do(wp.setup)\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\tlogger := wp.logger.WithField(\"Instance\", id)\n\tlogger.Info(\"shutdown requested\")\n\twkr := wp.workers[id]\n\tif wkr == nil || wkr.idleBehavior == IdleBehaviorHold || !(wkr.state == StateBooting || wkr.state == StateIdle) {\n\t\treturn false\n\t}\n\tlogger.Info(\"shutting down\")\n\twkr.reportBootOutcome(BootOutcomeAborted)\n\twkr.shutdown()\n\treturn true\n}\n\n// CountWorkers returns the current number of workers in each state.\n//\n// CountWorkers blocks, if necessary, until the initial instance list\n// has been loaded from the cloud provider.\nfunc (wp *Pool) CountWorkers() map[State]int {\n\twp.setupOnce.Do(wp.setup)\n\twp.waitUntilLoaded()\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\tr := map[State]int{}\n\tfor _, w := range wp.workers {\n\t\tr[w.state]++\n\t}\n\treturn r\n}\n\n// Running returns the container UUIDs being prepared/run on workers.\n//\n// In the returned map, the time value indicates when the Pool\n// observed that the container process had exited. A container that\n// has not yet exited has a zero time value. The caller should use\n// ForgetContainer() to garbage-collect the entries for exited\n// containers.\nfunc (wp *Pool) Running() map[string]time.Time {\n\twp.setupOnce.Do(wp.setup)\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\tr := map[string]time.Time{}\n\tfor _, wkr := range wp.workers {\n\t\tfor uuid := range wkr.running {\n\t\t\tr[uuid] = time.Time{}\n\t\t}\n\t\tfor uuid := range wkr.starting {\n\t\t\tr[uuid] = time.Time{}\n\t\t}\n\t}\n\tfor uuid, exited := range wp.exited {\n\t\tr[uuid] = exited\n\t}\n\treturn r\n}\n\n// StartContainer starts a container on an idle worker immediately if\n// possible, otherwise returns false.\nfunc (wp *Pool) StartContainer(id cloud.InstanceID, ctr arvados.Container) bool {\n\twp.setupOnce.Do(wp.setup)\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\twkr := wp.workers[id]\n\tif wkr == nil || !(wkr.state == StateIdle || wkr.state == StateRunning) || wkr.idleBehavior != IdleBehaviorRun {\n\t\treturn false\n\t}\n\twkr.startContainer(ctr)\n\treturn true\n}\n\n// KillContainer kills the crunch-run process for the given container\n// UUID, if it's running on any worker.\n//\n// KillContainer returns immediately; the act of killing the container\n// takes some time, and runs in the background.\n//\n// KillContainer returns false if the container has already ended.\nfunc (wp *Pool) KillContainer(uuid string, reason string) bool {\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\tlogger := wp.logger.WithFields(logrus.Fields{\n\t\t\"ContainerUUID\": uuid,\n\t\t\"Reason\":        reason,\n\t})\n\tfor _, wkr := range wp.workers {\n\t\trr := wkr.running[uuid]\n\t\tif rr == nil {\n\t\t\trr = wkr.starting[uuid]\n\t\t}\n\t\tif rr != nil {\n\t\t\trr.Kill(reason)\n\t\t\treturn true\n\t\t}\n\t}\n\tlogger.Debug(\"cannot kill: already disappeared\")\n\treturn false\n}\n\n// ForgetContainer clears the placeholder for the given exited\n// container, so it isn't returned by subsequent calls to Running().\n//\n// ForgetContainer has no effect if the container has not yet exited.\n//\n// The \"container exited at time T\" placeholder (which necessitates\n// ForgetContainer) exists to make it easier for the caller\n// (scheduler) to distinguish a container that exited without\n// finalizing its state from a container that exited too recently for\n// its final state to have appeared in the scheduler's queue cache.\nfunc (wp *Pool) ForgetContainer(uuid string) {\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\tif _, ok := wp.exited[uuid]; ok {\n\t\twp.logger.WithField(\"ContainerUUID\", uuid).Debug(\"clearing placeholder for exited crunch-run process\")\n\t\tdelete(wp.exited, uuid)\n\t}\n}\n\nfunc (wp *Pool) registerMetrics(reg *prometheus.Registry) {\n\tif reg == nil {\n\t\treg = prometheus.NewRegistry()\n\t}\n\twp.mContainersRunning = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"containers_running\",\n\t\tHelp:      \"Number of containers reported running by cloud VMs.\",\n\t})\n\treg.MustRegister(wp.mContainersRunning)\n\twp.mProbeAgeMax = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"probe_age_seconds_max\",\n\t\tHelp:      \"Maximum number of seconds since an instance's most recent successful probe.\",\n\t})\n\treg.MustRegister(wp.mProbeAgeMax)\n\twp.mProbeAgeMedian = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"probe_age_seconds_median\",\n\t\tHelp:      \"Median number of seconds since an instance's most recent successful probe.\",\n\t})\n\treg.MustRegister(wp.mProbeAgeMedian)\n\twp.mInstances = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"instances_total\",\n\t\tHelp:      \"Number of cloud VMs.\",\n\t}, []string{\"category\", \"instance_type\"})\n\treg.MustRegister(wp.mInstances)\n\twp.mInstancesPrice = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"instances_price\",\n\t\tHelp:      \"Price of cloud VMs.\",\n\t}, []string{\"category\"})\n\treg.MustRegister(wp.mInstancesPrice)\n\twp.mVCPUs = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"vcpus_total\",\n\t\tHelp:      \"Total VCPUs on all cloud VMs.\",\n\t}, []string{\"category\"})\n\treg.MustRegister(wp.mVCPUs)\n\twp.mMemory = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"memory_bytes_total\",\n\t\tHelp:      \"Total memory on all cloud VMs.\",\n\t}, []string{\"category\"})\n\treg.MustRegister(wp.mMemory)\n\twp.mBootOutcomes = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"boot_outcomes\",\n\t\tHelp:      \"Boot outcomes by type.\",\n\t}, []string{\"outcome\"})\n\tfor k := range validBootOutcomes {\n\t\twp.mBootOutcomes.WithLabelValues(string(k)).Add(0)\n\t}\n\treg.MustRegister(wp.mBootOutcomes)\n\twp.mDisappearances = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"dispatchcloud\",\n\t\tName:      \"instances_disappeared\",\n\t\tHelp:      \"Number of occurrences of an instance disappearing from the cloud provider's list of instances.\",\n\t}, []string{\"state\"})\n\tfor s := range validStates {\n\t\twp.mDisappearances.WithLabelValues(string(s)).Add(0)\n\t}\n\treg.MustRegister(wp.mDisappearances)\n\twp.mTimeToSSH = prometheus.NewSummary(prometheus.SummaryOpts{\n\t\tNamespace:  \"arvados\",\n\t\tSubsystem:  \"dispatchcloud\",\n\t\tName:       \"instances_time_to_ssh_seconds\",\n\t\tHelp:       \"Number of seconds between instance creation and the first successful SSH connection.\",\n\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},\n\t})\n\treg.MustRegister(wp.mTimeToSSH)\n\twp.mTimeToReadyForContainer = prometheus.NewSummary(prometheus.SummaryOpts{\n\t\tNamespace:  \"arvados\",\n\t\tSubsystem:  \"dispatchcloud\",\n\t\tName:       \"instances_time_to_ready_for_container_seconds\",\n\t\tHelp:       \"Number of seconds between the first successful SSH connection and ready to run a container.\",\n\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},\n\t})\n\treg.MustRegister(wp.mTimeToReadyForContainer)\n\twp.mTimeFromShutdownToGone = prometheus.NewSummary(prometheus.SummaryOpts{\n\t\tNamespace:  \"arvados\",\n\t\tSubsystem:  \"dispatchcloud\",\n\t\tName:       \"instances_time_from_shutdown_request_to_disappearance_seconds\",\n\t\tHelp:       \"Number of seconds between the first shutdown attempt and the disappearance of the worker.\",\n\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},\n\t})\n\treg.MustRegister(wp.mTimeFromShutdownToGone)\n\twp.mTimeFromQueueToCrunchRun = prometheus.NewSummary(prometheus.SummaryOpts{\n\t\tNamespace:  \"arvados\",\n\t\tSubsystem:  \"dispatchcloud\",\n\t\tName:       \"containers_time_from_queue_to_crunch_run_seconds\",\n\t\tHelp:       \"Number of seconds between the queuing of a container and the start of crunch-run.\",\n\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},\n\t})\n\treg.MustRegister(wp.mTimeFromQueueToCrunchRun)\n\twp.mRunProbeDuration = prometheus.NewSummaryVec(prometheus.SummaryOpts{\n\t\tNamespace:  \"arvados\",\n\t\tSubsystem:  \"dispatchcloud\",\n\t\tName:       \"instances_run_probe_duration_seconds\",\n\t\tHelp:       \"Number of seconds per runProbe call.\",\n\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},\n\t}, []string{\"outcome\"})\n\treg.MustRegister(wp.mRunProbeDuration)\n}\n\nfunc (wp *Pool) runMetrics() {\n\tch := wp.Subscribe()\n\tdefer wp.Unsubscribe(ch)\n\twp.updateMetrics()\n\tfor range ch {\n\t\twp.updateMetrics()\n\t}\n}\n\nfunc (wp *Pool) updateMetrics() {\n\twp.mtx.RLock()\n\tdefer wp.mtx.RUnlock()\n\n\ttype entKey struct {\n\t\tcat      string\n\t\tinstType string\n\t}\n\tinstances := map[entKey]int64{}\n\tprice := map[string]float64{}\n\tcpu := map[string]int64{}\n\tmem := map[string]int64{}\n\tvar running int64\n\tnow := time.Now()\n\tvar probed []time.Time\n\tfor _, wkr := range wp.workers {\n\t\tvar cat string\n\t\tswitch {\n\t\tcase len(wkr.running)+len(wkr.starting) > 0:\n\t\t\tcat = \"inuse\"\n\t\tcase wkr.idleBehavior == IdleBehaviorHold:\n\t\t\tcat = \"hold\"\n\t\tcase wkr.state == StateBooting:\n\t\t\tcat = \"booting\"\n\t\tcase wkr.state == StateUnknown:\n\t\t\tcat = \"unknown\"\n\t\tdefault:\n\t\t\tcat = \"idle\"\n\t\t}\n\t\tinstances[entKey{cat, wkr.instType.Name}]++\n\t\tprice[cat] += wkr.instType.Price\n\t\tcpu[cat] += int64(wkr.instType.VCPUs)\n\t\tmem[cat] += int64(wkr.instType.RAM)\n\t\trunning += int64(len(wkr.running) + len(wkr.starting))\n\t\tprobed = append(probed, wkr.probed)\n\t}\n\tfor _, cat := range []string{\"inuse\", \"hold\", \"booting\", \"unknown\", \"idle\"} {\n\t\twp.mInstancesPrice.WithLabelValues(cat).Set(price[cat])\n\t\twp.mVCPUs.WithLabelValues(cat).Set(float64(cpu[cat]))\n\t\twp.mMemory.WithLabelValues(cat).Set(float64(mem[cat]))\n\t\t// make sure to reset gauges for non-existing category/nodetype combinations\n\t\tfor _, it := range wp.instanceTypes {\n\t\t\tif _, ok := instances[entKey{cat, it.Name}]; !ok {\n\t\t\t\twp.mInstances.WithLabelValues(cat, it.Name).Set(float64(0))\n\t\t\t}\n\t\t}\n\t}\n\tfor k, v := range instances {\n\t\twp.mInstances.WithLabelValues(k.cat, k.instType).Set(float64(v))\n\t}\n\twp.mContainersRunning.Set(float64(running))\n\n\tif len(probed) == 0 {\n\t\twp.mProbeAgeMax.Set(0)\n\t\twp.mProbeAgeMedian.Set(0)\n\t} else {\n\t\tsort.Slice(probed, func(i, j int) bool { return probed[i].Before(probed[j]) })\n\t\twp.mProbeAgeMax.Set(now.Sub(probed[0]).Seconds())\n\t\twp.mProbeAgeMedian.Set(now.Sub(probed[len(probed)/2]).Seconds())\n\t}\n}\n\nfunc (wp *Pool) runProbes() {\n\tmaxPPS := wp.maxProbesPerSecond\n\tif maxPPS < 1 {\n\t\tmaxPPS = defaultMaxProbesPerSecond\n\t}\n\tlimitticker := time.NewTicker(time.Second / time.Duration(maxPPS))\n\tdefer limitticker.Stop()\n\n\tprobeticker := time.NewTicker(wp.probeInterval)\n\tdefer probeticker.Stop()\n\n\tworkers := []cloud.InstanceID{}\n\tfor range probeticker.C {\n\t\t// Add some jitter. Without this, if probeInterval is\n\t\t// a multiple of syncInterval and sync is\n\t\t// instantaneous (as with the loopback driver), the\n\t\t// first few probes race with sync operations and\n\t\t// don't update the workers.\n\t\ttime.Sleep(time.Duration(mathrand.Int63n(int64(wp.probeInterval) / 23)))\n\n\t\tworkers = workers[:0]\n\t\twp.mtx.Lock()\n\t\tfor id, wkr := range wp.workers {\n\t\t\tif wkr.state == StateShutdown || wkr.shutdownIfIdle() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tworkers = append(workers, id)\n\t\t}\n\t\twp.mtx.Unlock()\n\n\t\tfor _, id := range workers {\n\t\t\twp.mtx.Lock()\n\t\t\twkr, ok := wp.workers[id]\n\t\t\twp.mtx.Unlock()\n\t\t\tif !ok {\n\t\t\t\t// Deleted while we were probing\n\t\t\t\t// others\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo wkr.ProbeAndUpdate()\n\t\t\tselect {\n\t\t\tcase <-wp.stop:\n\t\t\t\treturn\n\t\t\tcase <-limitticker.C:\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (wp *Pool) runSync() {\n\t// sync once immediately, then wait syncInterval, sync again,\n\t// etc.\n\ttimer := time.NewTimer(1)\n\tfor {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\terr := wp.getInstancesAndSync()\n\t\t\tif err != nil {\n\t\t\t\twp.logger.WithError(err).Warn(\"sync failed\")\n\t\t\t}\n\t\t\ttimer.Reset(wp.syncInterval)\n\t\tcase <-wp.stop:\n\t\t\twp.logger.Debug(\"worker.Pool stopped\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Stop synchronizing with the InstanceSet.\nfunc (wp *Pool) Stop() {\n\twp.setupOnce.Do(wp.setup)\n\tclose(wp.stop)\n}\n\n// Instances returns an InstanceView for each worker in the pool,\n// summarizing its current state and recent activity.\n//\n// Each pending Create call is represented by an InstanceView with a\n// blank Instance field.\nfunc (wp *Pool) Instances() []InstanceView {\n\tvar r []InstanceView\n\twp.setupOnce.Do(wp.setup)\n\twp.mtx.Lock()\n\tfor _, w := range wp.workers {\n\t\trunning := make([]string, 0, len(w.running)+len(w.starting))\n\t\tfor uuid := range w.running {\n\t\t\trunning = append(running, uuid)\n\t\t}\n\t\tfor uuid := range w.starting {\n\t\t\trunning = append(running, uuid)\n\t\t}\n\t\tsort.Strings(running)\n\t\tr = append(r, InstanceView{\n\t\t\tInstance:              w.instance.ID(),\n\t\t\tAddress:               w.instance.Address(),\n\t\t\tPrice:                 w.instType.Price,\n\t\t\tArvadosInstanceType:   w.instType.Name,\n\t\t\tProviderInstanceType:  w.instType.ProviderType,\n\t\t\tLastContainerUUID:     w.lastUUID,\n\t\t\tLastBusy:              w.busy,\n\t\t\tRunningContainerUUIDs: running,\n\t\t\tWorkerState:           w.state,\n\t\t\tIdleBehavior:          w.idleBehavior,\n\t\t})\n\t}\n\tfor _, cc := range wp.creating {\n\t\tr = append(r, InstanceView{\n\t\t\tPrice:                cc.instanceType.Price,\n\t\t\tArvadosInstanceType:  cc.instanceType.Name,\n\t\t\tProviderInstanceType: cc.instanceType.ProviderType,\n\t\t\tWorkerState:          StateBooting,\n\t\t\tIdleBehavior:         IdleBehaviorRun,\n\t\t})\n\t}\n\twp.mtx.Unlock()\n\tsort.Slice(r, func(i, j int) bool {\n\t\tif r[i].Instance == r[j].Instance {\n\t\t\treturn strings.Compare(r[i].ArvadosInstanceType, r[j].ArvadosInstanceType) < 0\n\t\t}\n\t\treturn strings.Compare(string(r[i].Instance), string(r[j].Instance)) < 0\n\t})\n\treturn r\n}\n\n// KillInstance destroys a cloud VM instance. It returns an error if\n// the given instance does not exist.\nfunc (wp *Pool) KillInstance(id cloud.InstanceID, reason string) error {\n\twp.setupOnce.Do(wp.setup)\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\twkr, ok := wp.workers[id]\n\tif !ok {\n\t\treturn errors.New(\"instance not found\")\n\t}\n\twkr.logger.WithField(\"Reason\", reason).Info(\"shutting down\")\n\twkr.reportBootOutcome(BootOutcomeAborted)\n\twkr.shutdown()\n\treturn nil\n}\n\nfunc (wp *Pool) setup() {\n\twp.creating = map[string]createCall{}\n\twp.exited = map[string]time.Time{}\n\twp.workers = map[cloud.InstanceID]*worker{}\n\twp.subscribers = map[<-chan struct{}]chan<- struct{}{}\n\twp.loadRunnerData()\n}\n\n// Load the runner program to be deployed on worker nodes into\n// wp.runnerData, if necessary. Errors are logged.\n//\n// If auto-deploy is disabled, len(wp.runnerData) will be 0.\n//\n// Caller must not have lock.\nfunc (wp *Pool) loadRunnerData() error {\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\tif wp.runnerData != nil {\n\t\treturn nil\n\t} else if wp.runnerSource == \"\" {\n\t\twp.runnerCmd = wp.runnerCmdDefault\n\t\twp.runnerData = []byte{}\n\t\treturn nil\n\t}\n\tlogger := wp.logger.WithField(\"source\", wp.runnerSource)\n\tlogger.Debug(\"loading runner\")\n\tbuf, err := ioutil.ReadFile(wp.runnerSource)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"failed to load runner program\")\n\t\treturn err\n\t}\n\twp.runnerData = buf\n\twp.runnerMD5 = md5.Sum(buf)\n\twp.runnerCmd = fmt.Sprintf(\"%s/crunch-run~%x\", wp.runnerDeployDirectory, wp.runnerMD5)\n\treturn nil\n}\n\nfunc (wp *Pool) notify() {\n\twp.mtx.RLock()\n\tdefer wp.mtx.RUnlock()\n\tfor _, send := range wp.subscribers {\n\t\tselect {\n\t\tcase send <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n}\n\nfunc (wp *Pool) getInstancesAndSync() error {\n\twp.setupOnce.Do(wp.setup)\n\tif err := wp.instanceSet.throttleInstances.Error(); err != nil {\n\t\treturn err\n\t}\n\twp.logger.Debug(\"getting instance list\")\n\tthreshold := time.Now()\n\tinstances, err := wp.instanceSet.Instances(cloud.InstanceTags{wp.tagKeyPrefix + tagKeyInstanceSetID: string(wp.instanceSetID)})\n\tif err != nil {\n\t\twp.instanceSet.throttleInstances.CheckRateLimitError(err, wp.logger, \"list instances\", wp.notify)\n\t\treturn err\n\t}\n\twp.sync(threshold, instances)\n\twp.logger.Debug(\"sync done\")\n\treturn nil\n}\n\n// Add/remove/update workers based on instances, which was obtained\n// from the instanceSet. However, don't clobber any other updates that\n// already happened after threshold.\nfunc (wp *Pool) sync(threshold time.Time, instances []cloud.Instance) {\n\twp.mtx.Lock()\n\tdefer wp.mtx.Unlock()\n\twp.logger.WithField(\"Instances\", len(instances)).Debug(\"sync instances\")\n\tnotify := false\n\n\tfor _, inst := range instances {\n\t\titTag := inst.Tags()[wp.tagKeyPrefix+tagKeyInstanceType]\n\t\tit, ok := wp.instanceTypes[itTag]\n\t\tif !ok {\n\t\t\t// updateWorker will notice this isn't a real\n\t\t\t// instance type, and set IdleBehavior to\n\t\t\t// drain.\n\t\t\tit.Name = itTag\n\t\t}\n\t\tif wkr, isNew := wp.updateWorker(inst, it); isNew {\n\t\t\tnotify = true\n\t\t} else if wkr.state == StateShutdown && time.Since(wkr.destroyed) > wp.timeoutShutdown {\n\t\t\twp.logger.WithField(\"Instance\", inst.ID()).Info(\"worker still listed after shutdown; retrying\")\n\t\t\twkr.shutdown()\n\t\t}\n\t}\n\n\tfor id, wkr := range wp.workers {\n\t\tif wkr.updated.After(threshold) {\n\t\t\tcontinue\n\t\t}\n\t\tlogger := wp.logger.WithFields(logrus.Fields{\n\t\t\t\"Instance\":    wkr.instance.ID(),\n\t\t\t\"WorkerState\": wkr.state,\n\t\t})\n\t\tlogger.Info(\"instance disappeared in cloud\")\n\t\twkr.reportBootOutcome(BootOutcomeDisappeared)\n\t\tif wp.mDisappearances != nil {\n\t\t\twp.mDisappearances.WithLabelValues(string(wkr.state)).Inc()\n\t\t}\n\t\t// wkr.destroyed.IsZero() can happen if instance disappeared but we weren't trying to shut it down\n\t\tif wp.mTimeFromShutdownToGone != nil && !wkr.destroyed.IsZero() {\n\t\t\twp.mTimeFromShutdownToGone.Observe(time.Now().Sub(wkr.destroyed).Seconds())\n\t\t}\n\t\tdelete(wp.workers, id)\n\t\tgo wkr.Close()\n\t\tnotify = true\n\t}\n\n\tif wp.atQuotaUntilFewerInstances > len(wp.workers)+len(wp.creating) {\n\t\t// After syncing, there are fewer instances (including\n\t\t// pending creates) than there were last time we saw a\n\t\t// quota error.  This might mean it's now possible to\n\t\t// create new instances.  Reset our \"at quota\" state.\n\t\twp.atQuotaUntilFewerInstances = 0\n\t}\n\n\tif !wp.loaded {\n\t\tnotify = true\n\t\twp.loaded = true\n\t\twp.logger.WithField(\"N\", len(wp.workers)).Info(\"loaded initial instance list\")\n\t}\n\n\tif notify {\n\t\tgo wp.notify()\n\t}\n}\n\nfunc (wp *Pool) waitUntilLoaded() {\n\tch := wp.Subscribe()\n\twp.mtx.RLock()\n\tdefer wp.mtx.RUnlock()\n\tfor !wp.loaded {\n\t\twp.mtx.RUnlock()\n\t\t<-ch\n\t\twp.mtx.RLock()\n\t}\n}\n\nfunc (wp *Pool) gatewayAuthSecret(uuid string) string {\n\th := hmac.New(sha256.New, []byte(wp.systemRootToken))\n\tfmt.Fprint(h, uuid)\n\treturn fmt.Sprintf(\"%x\", h.Sum(nil))\n}\n\n// Return a random string of n hexadecimal digits (n*4 random bits). n\n// must be even.\nfunc randomHex(n int) string {\n\tbuf := make([]byte, n/2)\n\t_, err := rand.Read(buf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn fmt.Sprintf(\"%x\", buf)\n}\n"
  },
  {
    "path": "lib/dispatchcloud/worker/pool_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage worker\n\nimport (\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/test\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nconst GiB arvados.ByteSize = 1 << 30\n\nvar _ = check.Suite(&PoolSuite{})\n\ntype lessChecker struct {\n\t*check.CheckerInfo\n}\n\nfunc (*lessChecker) Check(params []interface{}, names []string) (result bool, error string) {\n\treturn params[0].(int) < params[1].(int), \"\"\n}\n\nvar less = &lessChecker{&check.CheckerInfo{Name: \"less\", Params: []string{\"obtained\", \"expected\"}}}\n\ntype PoolSuite struct {\n\tlogger      logrus.FieldLogger\n\ttestCluster *arvados.Cluster\n}\n\nfunc (suite *PoolSuite) SetUpTest(c *check.C) {\n\tsuite.logger = ctxlog.TestLogger(c)\n\tcfg, err := config.NewLoader(nil, suite.logger).Load()\n\tc.Assert(err, check.IsNil)\n\tsuite.testCluster, err = cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (suite *PoolSuite) TestResumeAfterRestart(c *check.C) {\n\ttype1 := test.InstanceType(1)\n\ttype2 := test.InstanceType(2)\n\ttype3 := test.InstanceType(3)\n\ttype4 := test.InstanceType(4)\n\twaitForIdle := func(pool *Pool, notify <-chan struct{}) {\n\t\ttimeout := time.NewTimer(time.Second)\n\t\tfor {\n\t\t\tinstances := pool.Instances()\n\t\t\tsort.Slice(instances, func(i, j int) bool {\n\t\t\t\treturn strings.Compare(instances[i].ArvadosInstanceType, instances[j].ArvadosInstanceType) < 0\n\t\t\t})\n\t\t\tif len(instances) == 4 &&\n\t\t\t\tinstances[0].ArvadosInstanceType == type1.Name &&\n\t\t\t\tinstances[0].WorkerState == StateIdle &&\n\t\t\t\tinstances[1].ArvadosInstanceType == type1.Name &&\n\t\t\t\tinstances[1].WorkerState == StateIdle &&\n\t\t\t\tinstances[2].ArvadosInstanceType == type2.Name &&\n\t\t\t\tinstances[2].WorkerState == StateIdle &&\n\t\t\t\tinstances[3].ArvadosInstanceType == type4.Name &&\n\t\t\t\tinstances[3].WorkerState == StateIdle {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-timeout.C:\n\t\t\t\tc.Logf(\"pool.Instances() == %#v\", instances)\n\t\t\t\tc.Error(\"timed out\")\n\t\t\t\treturn\n\t\t\tcase <-notify:\n\t\t\t}\n\t\t}\n\t}\n\n\tdriver := &test.StubDriver{}\n\tinstanceSetID := cloud.InstanceSetID(\"test-instance-set-id\")\n\tis, err := driver.InstanceSet(nil, instanceSetID, nil, suite.logger, nil)\n\tc.Assert(err, check.IsNil)\n\tdefer is.Stop()\n\n\tnewExecutor := func(cloud.Instance) Executor {\n\t\treturn &stubExecutor{\n\t\t\tresponse: map[string]stubResp{\n\t\t\t\t\"crunch-run-custom --list\": {},\n\t\t\t\t\"true\":                     {},\n\t\t\t},\n\t\t}\n\t}\n\n\tsuite.testCluster.Containers.CloudVMs = arvados.CloudVMsConfig{\n\t\tBootProbeCommand:   \"true\",\n\t\tMaxProbesPerSecond: 1000,\n\t\tProbeInterval:      arvados.Duration(time.Millisecond * 10),\n\t\tSyncInterval:       arvados.Duration(time.Millisecond * 10),\n\t\tTagKeyPrefix:       \"testprefix:\",\n\t}\n\tsuite.testCluster.Containers.CrunchRunCommand = \"crunch-run-custom\"\n\tsuite.testCluster.InstanceTypes = arvados.InstanceTypeMap{\n\t\ttype1.Name: type1,\n\t\ttype2.Name: type2,\n\t\ttype3.Name: type3,\n\t\ttype4.Name: type4,\n\t}\n\n\tpool := NewPool(suite.logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), instanceSetID, is, newExecutor, nil, suite.testCluster)\n\tnotify := pool.Subscribe()\n\tdefer pool.Unsubscribe(notify)\n\tpool.Create(type1)\n\tpool.Create(type1)\n\tpool.Create(type2)\n\tpool.Create(type4)\n\twaitForIdle(pool, notify)\n\tvar heldInstanceID cloud.InstanceID\n\tfor _, inst := range pool.Instances() {\n\t\tif inst.ArvadosInstanceType == type2.Name {\n\t\t\theldInstanceID = cloud.InstanceID(inst.Instance)\n\t\t\tpool.SetIdleBehavior(heldInstanceID, IdleBehaviorHold)\n\t\t}\n\t}\n\t// Wait for the tags to save to the cloud provider\n\ttagKey := suite.testCluster.Containers.CloudVMs.TagKeyPrefix + tagKeyIdleBehavior\n\tdeadline := time.Now().Add(time.Second)\n\tfor !func() bool {\n\t\tpool.mtx.RLock()\n\t\tdefer pool.mtx.RUnlock()\n\t\tfor _, wkr := range pool.workers {\n\t\t\tif wkr.instType == type2 {\n\t\t\t\treturn wkr.instance.Tags()[tagKey] == string(IdleBehaviorHold)\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}() {\n\t\tif time.Now().After(deadline) {\n\t\t\tc.Fatal(\"timeout\")\n\t\t}\n\t\ttime.Sleep(time.Millisecond * 10)\n\t}\n\tpool.Stop()\n\n\tc.Log(\"------- starting new pool, waiting to recover state\")\n\n\tdelete(suite.testCluster.InstanceTypes, type4.Name)\n\tpool2 := NewPool(suite.logger, arvados.NewClientFromEnv(), prometheus.NewRegistry(), instanceSetID, is, newExecutor, nil, suite.testCluster)\n\tnotify2 := pool2.Subscribe()\n\tdefer pool2.Unsubscribe(notify2)\n\twaitForIdle(pool2, notify2)\n\tfor _, inst := range pool2.Instances() {\n\t\tif inst.ArvadosInstanceType == type2.Name {\n\t\t\tc.Check(inst.Instance, check.Equals, heldInstanceID)\n\t\t\tc.Check(inst.IdleBehavior, check.Equals, IdleBehaviorHold)\n\t\t} else if inst.ArvadosInstanceType == type4.Name {\n\t\t\t// type4 instance is tagged IdleBehaviorRun,\n\t\t\t// but type4 was removed from config, so the\n\t\t\t// worker should be added with\n\t\t\t// IdleBehaviorDrain.\n\t\t\tc.Check(inst.IdleBehavior, check.Equals, IdleBehaviorDrain)\n\t\t} else {\n\t\t\tc.Check(inst.IdleBehavior, check.Equals, IdleBehaviorRun)\n\t\t}\n\t}\n\tpool2.Stop()\n}\n\nfunc (suite *PoolSuite) TestDrain(c *check.C) {\n\tdriver := test.StubDriver{}\n\tinstanceSet, err := driver.InstanceSet(nil, \"test-instance-set-id\", nil, suite.logger, nil)\n\tc.Assert(err, check.IsNil)\n\tdefer instanceSet.Stop()\n\n\tac := arvados.NewClientFromEnv()\n\n\ttype1 := test.InstanceType(1)\n\tpool := &Pool{\n\t\tarvClient:   ac,\n\t\tlogger:      suite.logger,\n\t\tnewExecutor: func(cloud.Instance) Executor { return &stubExecutor{} },\n\t\tcluster:     suite.testCluster,\n\t\tinstanceSet: &throttledInstanceSet{InstanceSet: instanceSet},\n\t\tinstanceTypes: arvados.InstanceTypeMap{\n\t\t\ttype1.Name: type1,\n\t\t},\n\t}\n\tnotify := pool.Subscribe()\n\tdefer pool.Unsubscribe(notify)\n\n\tpool.Create(type1)\n\n\t// Wait for the instance to either return from its Create\n\t// call, or show up in a poll.\n\tsuite.wait(c, pool, notify, func() bool {\n\t\tpool.mtx.RLock()\n\t\tdefer pool.mtx.RUnlock()\n\t\treturn len(pool.workers) == 1\n\t})\n\n\ttests := []struct {\n\t\tstate        State\n\t\tidleBehavior IdleBehavior\n\t\tresult       bool\n\t}{\n\t\t{StateIdle, IdleBehaviorHold, false},\n\t\t{StateIdle, IdleBehaviorDrain, false},\n\t\t{StateIdle, IdleBehaviorRun, true},\n\t}\n\n\tfor _, test := range tests {\n\t\tfor _, wkr := range pool.workers {\n\t\t\twkr.state = test.state\n\t\t\twkr.idleBehavior = test.idleBehavior\n\t\t}\n\n\t\t// Try to start a container\n\t\tstarted := pool.StartContainer(pool.Instances()[0].Instance, arvados.Container{UUID: \"testcontainer\"})\n\t\tc.Check(started, check.Equals, test.result)\n\t}\n}\n\nfunc (suite *PoolSuite) TestNodeCreateThrottle(c *check.C) {\n\tdriver := test.StubDriver{HoldCloudOps: true}\n\tinstanceSet, err := driver.InstanceSet(nil, \"test-instance-set-id\", nil, suite.logger, nil)\n\tc.Assert(err, check.IsNil)\n\tdefer instanceSet.Stop()\n\n\ttype1 := test.InstanceType(1)\n\tpool := &Pool{\n\t\tlogger:                         suite.logger,\n\t\tinstanceSet:                    &throttledInstanceSet{InstanceSet: instanceSet},\n\t\tcluster:                        suite.testCluster,\n\t\tmaxConcurrentInstanceCreateOps: 1,\n\t\tinstanceTypes: arvados.InstanceTypeMap{\n\t\t\ttype1.Name: type1,\n\t\t},\n\t}\n\n\tc.Check(suite.instancesByType(pool, type1), check.HasLen, 0)\n\t_, res := pool.Create(type1)\n\tc.Check(suite.instancesByType(pool, type1), check.HasLen, 1)\n\tc.Check(res, check.Equals, true)\n\n\t_, res = pool.Create(type1)\n\tc.Check(suite.instancesByType(pool, type1), check.HasLen, 1)\n\tc.Check(res, check.Equals, false)\n\n\tpool.instanceSet.throttleCreate.err = nil\n\tpool.maxConcurrentInstanceCreateOps = 2\n\n\t_, res = pool.Create(type1)\n\tc.Check(suite.instancesByType(pool, type1), check.HasLen, 2)\n\tc.Check(res, check.Equals, true)\n\n\tpool.instanceSet.throttleCreate.err = nil\n\tpool.maxConcurrentInstanceCreateOps = 0\n\n\t_, res = pool.Create(type1)\n\tc.Check(suite.instancesByType(pool, type1), check.HasLen, 3)\n\tc.Check(res, check.Equals, true)\n}\n\nfunc (suite *PoolSuite) TestCreateUnallocShutdown(c *check.C) {\n\tdriver := test.StubDriver{HoldCloudOps: true}\n\tinstanceSet, err := driver.InstanceSet(nil, \"test-instance-set-id\", nil, suite.logger, nil)\n\tc.Assert(err, check.IsNil)\n\tdefer instanceSet.Stop()\n\n\ttype1 := arvados.InstanceType{Name: \"a1s\", ProviderType: \"a1.small\", VCPUs: 1, RAM: 1 * GiB, Price: .01}\n\ttype2 := arvados.InstanceType{Name: \"a2m\", ProviderType: \"a2.medium\", VCPUs: 2, RAM: 2 * GiB, Price: .02}\n\ttype3 := arvados.InstanceType{Name: \"a2l\", ProviderType: \"a2.large\", VCPUs: 4, RAM: 4 * GiB, Price: .04}\n\tpool := &Pool{\n\t\tlogger:      suite.logger,\n\t\tnewExecutor: func(cloud.Instance) Executor { return &stubExecutor{} },\n\t\tcluster:     suite.testCluster,\n\t\tinstanceSet: &throttledInstanceSet{InstanceSet: instanceSet},\n\t\tinstanceTypes: arvados.InstanceTypeMap{\n\t\t\ttype1.Name: type1,\n\t\t\ttype2.Name: type2,\n\t\t\ttype3.Name: type3,\n\t\t},\n\t\tinstanceInitCommand: \"echo 'instance init command goes here'\",\n\t}\n\tnotify := pool.Subscribe()\n\tdefer pool.Unsubscribe(notify)\n\tnotify2 := pool.Subscribe()\n\tdefer pool.Unsubscribe(notify2)\n\n\tc.Check(pool.Instances(), check.HasLen, 0)\n\tpool.Create(type2)\n\tpool.Create(type1)\n\tpool.Create(type2)\n\tpool.Create(type3)\n\n\t// Check the pending instances already appear in\n\t// pool.Instances() even though the cloud driver has not yet\n\t// responded to CreateInstance.\n\tc.Check(suite.instancesByType(pool, type1), check.HasLen, 1)\n\tc.Check(suite.instancesByType(pool, type2), check.HasLen, 2)\n\tc.Check(suite.instancesByType(pool, type3), check.HasLen, 1)\n\n\t// Unblock driver operations for the duration of the test.\n\tgo driver.ReleaseCloudOps(4444)\n\n\t// Wait for each instance to either return from its Create\n\t// call, or show up in a poll.\n\tsuite.wait(c, pool, notify, func() bool {\n\t\tpool.mtx.RLock()\n\t\tdefer pool.mtx.RUnlock()\n\t\treturn len(pool.workers) == 4\n\t})\n\n\tvms := instanceSet.(*test.StubInstanceSet).StubVMs()\n\tc.Check(string(vms[0].InitCommand), check.Matches, `umask 0177 && echo -n \"[0-9a-f]+\" >/var/run/arvados-instance-secret\\necho 'instance init command goes here'`)\n\n\t// Place type3 node on admin-hold\n\tivs := suite.instancesByType(pool, type3)\n\tc.Assert(ivs, check.HasLen, 1)\n\ttype3instanceID := ivs[0].Instance\n\terr = pool.SetIdleBehavior(type3instanceID, IdleBehaviorHold)\n\tc.Check(err, check.IsNil)\n\n\t// Check admin-hold behavior: refuse to shutdown, and\n\t// Instances() reports IdleBehaviorHold.\n\tc.Check(pool.Shutdown(type3instanceID), check.Equals, false)\n\tsuite.wait(c, pool, notify, func() bool {\n\t\treturn suite.instancesByType(pool, type3)[0].IdleBehavior == IdleBehaviorHold\n\t})\n\tc.Check(suite.instancesByType(pool, type3), check.HasLen, 1)\n\n\t// Shutdown both type2 nodes\n\tfor n, iv := range suite.instancesByType(pool, type2) {\n\t\tc.Check(pool.Shutdown(iv.Instance), check.Equals, true)\n\t\tsuite.wait(c, pool, notify, func() bool {\n\t\t\tpool.getInstancesAndSync()\n\t\t\treturn len(suite.instancesByType(pool, type1)) == 1 && len(suite.instancesByType(pool, type2)) == 1-n\n\t\t})\n\t}\n\tfor {\n\t\t// Consume any waiting notifications to ensure the\n\t\t// next one we get is from Shutdown.\n\t\tselect {\n\t\tcase <-notify:\n\t\t\tcontinue\n\t\tdefault:\n\t\t}\n\t\tbreak\n\t}\n\n\t// Shutdown type1 node\n\tc.Check(pool.Shutdown(suite.instancesByType(pool, type1)[0].Instance), check.Equals, true)\n\tsuite.wait(c, pool, notify, func() bool {\n\t\tpool.getInstancesAndSync()\n\t\treturn len(suite.instancesByType(pool, type1)) == 0\n\t})\n\tselect {\n\tcase <-notify2:\n\tcase <-time.After(time.Second):\n\t\tc.Error(\"notify did not receive\")\n\t}\n\n\t// Put type3 node back in service.\n\terr = pool.SetIdleBehavior(type3instanceID, IdleBehaviorRun)\n\tc.Check(err, check.IsNil)\n\tsuite.wait(c, pool, notify, func() bool {\n\t\treturn suite.instancesByType(pool, type3)[0].IdleBehavior == IdleBehaviorRun\n\t})\n\n\t// Check admin-drain behavior: shut down right away.\n\terr = pool.SetIdleBehavior(type3instanceID, IdleBehaviorDrain)\n\tc.Check(err, check.IsNil)\n\tsuite.wait(c, pool, notify, func() bool {\n\t\tivs := suite.instancesByType(pool, type3)\n\t\treturn len(ivs) == 1 && ivs[0].WorkerState == StateShutdown\n\t})\n\n\t// Sync until all instances disappear from the provider list.\n\tsuite.wait(c, pool, notify, func() bool {\n\t\tpool.getInstancesAndSync()\n\t\treturn len(pool.Instances()) == 0\n\t})\n}\n\nfunc (suite *PoolSuite) TestInstanceQuotaGroup(c *check.C) {\n\tdriver := test.StubDriver{}\n\tinstanceSet, err := driver.InstanceSet(nil, \"test-instance-set-id\", nil, suite.logger, nil)\n\tc.Assert(err, check.IsNil)\n\tdefer instanceSet.Stop()\n\n\t// Note the stub driver uses the first character of\n\t// ProviderType as the instance family, so we have two\n\t// instance families here, \"a\" and \"b\".\n\ttypeA1 := test.InstanceType(1)\n\ttypeA1.ProviderType = \"a1\"\n\ttypeA1p := test.InstanceType(1)\n\ttypeA1p.Name += \"-p\"\n\ttypeA1p.Preemptible = true\n\ttypeA1p.ProviderType = \"a1\"\n\ttypeA2 := test.InstanceType(2)\n\ttypeA2.ProviderType = \"a2\"\n\ttypeB3 := test.InstanceType(3)\n\ttypeB3.ProviderType = \"b3\"\n\ttypeB3p := test.InstanceType(3)\n\ttypeB3p.Name += \"-p\"\n\ttypeB3p.Preemptible = true\n\ttypeB3p.ProviderType = \"b3\"\n\ttypeB4 := test.InstanceType(4)\n\ttypeB4.ProviderType = \"b4\"\n\ttypeB4p := test.InstanceType(4)\n\ttypeB4p.Name += \"-p\"\n\ttypeB4p.Preemptible = true\n\ttypeB4p.ProviderType = \"b4\"\n\n\tpool := &Pool{\n\t\tlogger:      suite.logger,\n\t\tnewExecutor: func(cloud.Instance) Executor { return &stubExecutor{} },\n\t\tcluster:     suite.testCluster,\n\t\tinstanceSet: &throttledInstanceSet{InstanceSet: instanceSet},\n\t\tinstanceTypes: arvados.InstanceTypeMap{\n\t\t\ttypeA1.Name:  typeA1,\n\t\t\ttypeA1p.Name: typeA1p,\n\t\t\ttypeA2.Name:  typeA2,\n\t\t\ttypeB3.Name:  typeB3,\n\t\t\ttypeB4.Name:  typeB4,\n\t\t},\n\t}\n\n\t// Arrange for a quota-group-specific error on next\n\t// instanceSet.Create().\n\tdriver.SetupVM = func(*test.StubVM) error { return test.CapacityError{InstanceQuotaGroupSpecific: true} }\n\t// pool.Create() returns true when it starts a goroutine to\n\t// call instanceSet.Create() in the background.\n\t_, created := pool.Create(typeA1)\n\tc.Check(created, check.Equals, true)\n\t// Wait for the pool to start reporting that the provider is\n\t// at capacity for instance type A1.\n\tfor deadline := time.Now().Add(time.Second); !pool.AtCapacity(typeA1); time.Sleep(time.Millisecond) {\n\t\tif time.Now().After(deadline) {\n\t\t\tc.Fatal(\"timed out waiting for pool to report quota\")\n\t\t}\n\t}\n\n\t// Arrange for a type-specific error on next\n\t// instanceSet.Create().\n\tdriver.SetupVM = func(*test.StubVM) error { return test.CapacityError{InstanceTypeSpecific: true} }\n\t_, created = pool.Create(typeB4p)\n\tc.Check(created, check.Equals, true)\n\tfor deadline := time.Now().Add(time.Second); !pool.AtCapacity(typeB4p); time.Sleep(time.Millisecond) {\n\t\tif time.Now().After(deadline) {\n\t\t\tc.Fatal(\"timed out waiting for pool to report quota\")\n\t\t}\n\t}\n\n\t// The pool should now report AtCapacity for the affected\n\t// instance family (A1, A2) and specific instance type B4p,\n\t// and refuse to call instanceSet.Create() for those types --\n\t// but types A1p, B3, B4, and B3p are still usable.\n\tdriver.SetupVM = func(*test.StubVM) error { return nil }\n\tc.Check(pool.AtCapacity(typeA1), check.Equals, true)\n\tc.Check(pool.AtCapacity(typeA1p), check.Equals, false)\n\tc.Check(pool.AtCapacity(typeA2), check.Equals, true)\n\tc.Check(pool.AtCapacity(typeB3), check.Equals, false)\n\tc.Check(pool.AtCapacity(typeB3p), check.Equals, false)\n\tc.Check(pool.AtCapacity(typeB4), check.Equals, false)\n\tc.Check(pool.AtCapacity(typeB4p), check.Equals, true)\n\t_, created = pool.Create(typeA2)\n\tc.Check(created, check.Equals, false)\n\t_, created = pool.Create(typeB3)\n\tc.Check(created, check.Equals, true)\n\t_, created = pool.Create(typeB3p)\n\tc.Check(created, check.Equals, true)\n\t_, created = pool.Create(typeB4)\n\tc.Check(created, check.Equals, true)\n\t_, created = pool.Create(typeB4p)\n\tc.Check(created, check.Equals, false)\n\t_, created = pool.Create(typeA2)\n\tc.Check(created, check.Equals, false)\n\t_, created = pool.Create(typeA1)\n\tc.Check(created, check.Equals, false)\n\t_, created = pool.Create(typeA1p)\n\tc.Check(created, check.Equals, true)\n}\n\nfunc (suite *PoolSuite) instancesByType(pool *Pool, it arvados.InstanceType) []InstanceView {\n\tvar ivs []InstanceView\n\tfor _, iv := range pool.Instances() {\n\t\tif iv.ArvadosInstanceType == it.Name {\n\t\t\tivs = append(ivs, iv)\n\t\t}\n\t}\n\treturn ivs\n}\n\nfunc (suite *PoolSuite) wait(c *check.C, pool *Pool, notify <-chan struct{}, ready func() bool) {\n\ttimeout := time.NewTimer(time.Second).C\n\tfor !ready() {\n\t\tselect {\n\t\tcase <-notify:\n\t\t\tcontinue\n\t\tcase <-timeout:\n\t\t}\n\t\tbreak\n\t}\n\tc.Check(ready(), check.Equals, true)\n}\n"
  },
  {
    "path": "lib/dispatchcloud/worker/runner.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage worker\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/crunchrun\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// remoteRunner handles the starting and stopping of a crunch-run\n// process on a remote machine.\ntype remoteRunner struct {\n\tuuid          string\n\texecutor      Executor\n\tconfigJSON    json.RawMessage\n\trunnerCmd     string\n\trunnerArgs    []string\n\tremoteUser    string\n\ttimeoutTERM   time.Duration\n\ttimeoutSignal time.Duration\n\tonUnkillable  func(uuid string) // callback invoked when giving up on SIGTERM\n\tonKilled      func(uuid string) // callback invoked when process exits after SIGTERM\n\tlogger        logrus.FieldLogger\n\n\tstopping bool          // true if Stop() has been called\n\tgivenup  bool          // true if timeoutTERM has been reached\n\tclosed   chan struct{} // channel is closed if Close() has been called\n}\n\n// newRemoteRunner returns a new remoteRunner. Caller should ensure\n// Close() is called to release resources.\nfunc newRemoteRunner(uuid string, wkr *worker) *remoteRunner {\n\t// Send the instance type record as a JSON doc so crunch-run\n\t// can log it.\n\tvar instJSON bytes.Buffer\n\tenc := json.NewEncoder(&instJSON)\n\tenc.SetIndent(\"\", \"    \")\n\tif err := enc.Encode(wkr.instType); err != nil {\n\t\tpanic(err)\n\t}\n\tinstanceAddr := wkr.instance.Address()\n\tinstanceHost, _, err := net.SplitHostPort(instanceAddr)\n\tif err != nil {\n\t\t// assume instanceAddr is a bare IPv6 host\n\t\tinstanceHost = instanceAddr\n\t}\n\tvar configData crunchrun.ConfigData\n\tconfigData.Env = map[string]string{\n\t\t\"ARVADOS_API_HOST\":  wkr.wp.arvClient.APIHost,\n\t\t\"ARVADOS_API_TOKEN\": wkr.wp.arvClient.AuthToken,\n\t\t\"InstanceType\":      instJSON.String(),\n\t\t\"GatewayAddress\":    net.JoinHostPort(instanceHost, \"0\"),\n\t\t\"GatewayAuthSecret\": wkr.wp.gatewayAuthSecret(uuid),\n\t}\n\tif wkr.wp.arvClient.Insecure {\n\t\tconfigData.Env[\"ARVADOS_API_HOST_INSECURE\"] = \"1\"\n\t}\n\tif bufs := wkr.wp.cluster.Containers.LocalKeepBlobBuffersPerVCPU; bufs > 0 {\n\t\tconfigData.Cluster = wkr.wp.cluster\n\t\tconfigData.KeepBuffers = bufs * wkr.instType.VCPUs\n\t}\n\tif wkr.wp.cluster.Containers.CloudVMs.Driver == \"ec2\" && wkr.instType.Preemptible {\n\t\tconfigData.EC2SpotCheck = true\n\t}\n\tconfigJSON, err := json.Marshal(configData)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\trr := &remoteRunner{\n\t\tuuid:          uuid,\n\t\texecutor:      wkr.executor,\n\t\tconfigJSON:    configJSON,\n\t\trunnerCmd:     wkr.wp.runnerCmd,\n\t\trunnerArgs:    wkr.wp.runnerArgs,\n\t\tremoteUser:    wkr.instance.RemoteUser(),\n\t\ttimeoutTERM:   wkr.wp.timeoutTERM,\n\t\ttimeoutSignal: wkr.wp.timeoutSignal,\n\t\tonUnkillable:  wkr.onUnkillable,\n\t\tonKilled:      wkr.onKilled,\n\t\tlogger:        wkr.logger.WithField(\"ContainerUUID\", uuid),\n\t\tclosed:        make(chan struct{}),\n\t}\n\treturn rr\n}\n\n// Start a crunch-run process on the remote host.\n//\n// Start does not return any error encountered. The caller should\n// assume the remote process _might_ have started, at least until it\n// probes the worker and finds otherwise.\nfunc (rr *remoteRunner) Start() {\n\tcmd := rr.runnerCmd + \" --detach --stdin-config\"\n\tfor _, arg := range rr.runnerArgs {\n\t\tcmd += \" '\" + strings.Replace(arg, \"'\", \"'\\\\''\", -1) + \"'\"\n\t}\n\tcmd += \" '\" + rr.uuid + \"'\"\n\tif rr.remoteUser != \"root\" {\n\t\tcmd = \"sudo \" + cmd\n\t}\n\tstdin := bytes.NewBuffer(rr.configJSON)\n\tstdout, stderr, err := rr.executor.Execute(nil, cmd, stdin)\n\tif err != nil {\n\t\trr.logger.WithField(\"stdout\", string(stdout)).\n\t\t\tWithField(\"stderr\", string(stderr)).\n\t\t\tWithError(err).\n\t\t\tError(\"error starting crunch-run process\")\n\t\treturn\n\t}\n\trr.logger.Info(\"crunch-run process started\")\n}\n\n// Close abandons the remote process (if any) and releases\n// resources. Close must not be called more than once.\nfunc (rr *remoteRunner) Close() {\n\tclose(rr.closed)\n}\n\n// Kill starts a background task to kill the remote process, first\n// trying SIGTERM until reaching timeoutTERM, then calling\n// onUnkillable().\n//\n// SIGKILL is not used. It would merely kill the crunch-run supervisor\n// and thereby make the docker container, arv-mount, etc. invisible to\n// us without actually stopping them.\n//\n// Once Kill has been called, calling it again has no effect.\nfunc (rr *remoteRunner) Kill(reason string) {\n\tif rr.stopping {\n\t\treturn\n\t}\n\trr.stopping = true\n\trr.logger.WithField(\"Reason\", reason).Info(\"killing crunch-run process\")\n\tgo func() {\n\t\ttermDeadline := time.Now().Add(rr.timeoutTERM)\n\t\tt := time.NewTicker(rr.timeoutSignal)\n\t\tdefer t.Stop()\n\t\tfor ; ; <-t.C {\n\t\t\tswitch {\n\t\t\tcase rr.isClosed():\n\t\t\t\treturn\n\t\t\tcase time.Now().After(termDeadline):\n\t\t\t\trr.logger.Debug(\"giving up\")\n\t\t\t\trr.givenup = true\n\t\t\t\trr.onUnkillable(rr.uuid)\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\trr.kill(syscall.SIGTERM)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (rr *remoteRunner) kill(sig syscall.Signal) {\n\tlogger := rr.logger.WithField(\"Signal\", int(sig))\n\tlogger.Info(\"sending signal\")\n\tcmd := fmt.Sprintf(rr.runnerCmd+\" --kill %d %s\", sig, rr.uuid)\n\tif rr.remoteUser != \"root\" {\n\t\tcmd = \"sudo \" + cmd\n\t}\n\tstdout, stderr, err := rr.executor.Execute(nil, cmd, nil)\n\tif err != nil {\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"stderr\": string(stderr),\n\t\t\t\"stdout\": string(stdout),\n\t\t\t\"error\":  err,\n\t\t}).Info(\"kill attempt unsuccessful\")\n\t\treturn\n\t}\n\trr.onKilled(rr.uuid)\n}\n\nfunc (rr *remoteRunner) isClosed() bool {\n\tselect {\n\tcase <-rr.closed:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n"
  },
  {
    "path": "lib/dispatchcloud/worker/throttle.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage worker\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype throttle struct {\n\terr   error\n\tuntil time.Time\n\tmtx   sync.Mutex\n}\n\n// CheckRateLimitError checks whether the given error is a\n// cloud.RateLimitError, and if so, ensures Error() returns a non-nil\n// error until the rate limiting holdoff period expires.\n//\n// If a notify func is given, it will be called after the holdoff\n// period expires.\nfunc (thr *throttle) CheckRateLimitError(err error, logger logrus.FieldLogger, callType string, notify func()) {\n\trle, ok := err.(cloud.RateLimitError)\n\tif !ok {\n\t\treturn\n\t}\n\tuntil := rle.EarliestRetry()\n\tif !until.After(time.Now()) {\n\t\treturn\n\t}\n\tdur := until.Sub(time.Now())\n\tlogger.WithFields(logrus.Fields{\n\t\t\"CallType\": callType,\n\t\t\"Duration\": dur,\n\t\t\"ResumeAt\": until,\n\t}).Info(\"suspending remote calls due to rate-limit error\")\n\tthr.ErrorUntil(fmt.Errorf(\"remote calls are suspended for %s, until %s\", dur, until), until, notify)\n}\n\nfunc (thr *throttle) ErrorUntil(err error, until time.Time, notify func()) {\n\tthr.mtx.Lock()\n\tdefer thr.mtx.Unlock()\n\tthr.err, thr.until = err, until\n\tif notify != nil {\n\t\ttime.AfterFunc(until.Sub(time.Now()), notify)\n\t}\n}\n\nfunc (thr *throttle) Error() error {\n\tthr.mtx.Lock()\n\tdefer thr.mtx.Unlock()\n\tif thr.err != nil && time.Now().After(thr.until) {\n\t\tthr.err = nil\n\t}\n\treturn thr.err\n}\n\ntype throttledInstanceSet struct {\n\tcloud.InstanceSet\n\tthrottleCreate    throttle\n\tthrottleInstances throttle\n}\n"
  },
  {
    "path": "lib/dispatchcloud/worker/throttle_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage worker\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&ThrottleSuite{})\n\ntype ThrottleSuite struct{}\n\nfunc (s *ThrottleSuite) TestRateLimitError(c *check.C) {\n\tvar t throttle\n\tc.Check(t.Error(), check.IsNil)\n\tt.ErrorUntil(errors.New(\"wait\"), time.Now().Add(time.Second), nil)\n\tc.Check(t.Error(), check.NotNil)\n\tt.ErrorUntil(nil, time.Now(), nil)\n\tc.Check(t.Error(), check.IsNil)\n\n\tnotified := false\n\tt.ErrorUntil(errors.New(\"wait\"), time.Now().Add(time.Millisecond), func() { notified = true })\n\tc.Check(t.Error(), check.NotNil)\n\ttime.Sleep(time.Millisecond * 10)\n\tc.Check(t.Error(), check.IsNil)\n\tc.Check(notified, check.Equals, true)\n}\n"
  },
  {
    "path": "lib/dispatchcloud/worker/verify.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage worker\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\nvar (\n\terrBadInstanceSecret = errors.New(\"bad instance secret\")\n\n\t// filename on instance, as given to shell (quoted accordingly)\n\tinstanceSecretFilename = \"/var/run/arvados-instance-secret\"\n\tinstanceSecretLength   = 40 // hex digits\n)\n\ntype TagVerifier struct {\n\tcloud.Instance\n\tSecret         string\n\tReportVerified func(cloud.Instance)\n}\n\nfunc (tv TagVerifier) InitCommand() cloud.InitCommand {\n\treturn cloud.InitCommand(fmt.Sprintf(\"umask 0177 && echo -n %q >%s\", tv.Secret, instanceSecretFilename))\n}\n\nfunc (tv TagVerifier) VerifyHostKey(pubKey ssh.PublicKey, client *ssh.Client) error {\n\tif tv.ReportVerified != nil {\n\t\ttv.ReportVerified(tv.Instance)\n\t}\n\tif err := tv.Instance.VerifyHostKey(pubKey, client); err != cloud.ErrNotImplemented || tv.Secret == \"\" {\n\t\t// If the wrapped instance indicates it has a way to\n\t\t// verify the key, return that decision.\n\t\treturn err\n\t}\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer session.Close()\n\tvar stdout, stderr bytes.Buffer\n\tsession.Stdin = bytes.NewBuffer(nil)\n\tsession.Stdout = &stdout\n\tsession.Stderr = &stderr\n\tcmd := fmt.Sprintf(\"cat %s\", instanceSecretFilename)\n\tif u := tv.RemoteUser(); u != \"root\" {\n\t\tcmd = \"sudo \" + cmd\n\t}\n\terr = session.Run(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif stdout.String() != tv.Secret {\n\t\treturn errBadInstanceSecret\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "lib/dispatchcloud/worker/worker.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage worker\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/sshexecutor\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/stats\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\nconst (\n\t// TODO: configurable\n\tmaxPingFailTime = 10 * time.Minute\n)\n\n// State indicates whether a worker is available to do work, and (if\n// not) whether/when it is expected to become ready.\ntype State string\n\nconst (\n\tStateUnknown  State = \"unknown\"  // not created by this process, hasn't completed a probe yet\n\tStateBooting  State = \"booting\"  // instance is booting\n\tStateIdle     State = \"idle\"     // instance booted, no containers are running\n\tStateRunning  State = \"running\"  // instance is running one or more containers\n\tStateShutdown State = \"shutdown\" // worker has stopped monitoring the instance\n)\n\nvar validStates = map[State]bool{\n\tStateUnknown:  true,\n\tStateBooting:  true,\n\tStateIdle:     true,\n\tStateRunning:  true,\n\tStateShutdown: true,\n}\n\n// BootOutcome is the result of a worker boot. It is used as a label in a metric.\ntype BootOutcome string\n\nconst (\n\tBootOutcomeFailed      BootOutcome = \"failure\"\n\tBootOutcomeSucceeded   BootOutcome = \"success\"\n\tBootOutcomeAborted     BootOutcome = \"aborted\"\n\tBootOutcomeDisappeared BootOutcome = \"disappeared\"\n)\n\nvar validBootOutcomes = map[BootOutcome]bool{\n\tBootOutcomeFailed:      true,\n\tBootOutcomeSucceeded:   true,\n\tBootOutcomeAborted:     true,\n\tBootOutcomeDisappeared: true,\n}\n\n// IdleBehavior indicates the behavior desired when a node becomes idle.\ntype IdleBehavior string\n\nconst (\n\tIdleBehaviorRun   IdleBehavior = \"run\"   // run containers, or shutdown on idle timeout\n\tIdleBehaviorHold  IdleBehavior = \"hold\"  // don't shutdown or run more containers\n\tIdleBehaviorDrain IdleBehavior = \"drain\" // shutdown immediately when idle\n)\n\nvar validIdleBehavior = map[IdleBehavior]bool{\n\tIdleBehaviorRun:   true,\n\tIdleBehaviorHold:  true,\n\tIdleBehaviorDrain: true,\n}\n\ntype worker struct {\n\tlogger   logrus.FieldLogger\n\texecutor Executor\n\twp       *Pool\n\n\tmtx                 sync.Locker // must be wp's Locker.\n\tstate               State\n\tidleBehavior        IdleBehavior\n\tinstance            cloud.Instance\n\tinstType            arvados.InstanceType\n\tvcpus               int64\n\tmemory              int64\n\tappeared            time.Time\n\tprobed              time.Time\n\tupdated             time.Time\n\tbusy                time.Time\n\tdestroyed           time.Time\n\tfirstSSHConnection  time.Time\n\tlastUUID            string\n\trunning             map[string]*remoteRunner // remember to update state idle<->running when this changes\n\tstarting            map[string]*remoteRunner // remember to update state idle<->running when this changes\n\tprobing             chan struct{}\n\tbootOutcomeReported bool\n\ttimeToReadyReported bool\n\tstaleRunLockSince   time.Time\n}\n\nfunc (wkr *worker) onUnkillable(uuid string) {\n\twkr.mtx.Lock()\n\tdefer wkr.mtx.Unlock()\n\tlogger := wkr.logger.WithField(\"ContainerUUID\", uuid)\n\tif wkr.idleBehavior == IdleBehaviorHold {\n\t\tlogger.Warn(\"unkillable container, but worker has IdleBehavior=Hold\")\n\t\treturn\n\t}\n\tlogger.Warn(\"unkillable container, draining worker\")\n\twkr.setIdleBehavior(IdleBehaviorDrain)\n}\n\nfunc (wkr *worker) onKilled(uuid string) {\n\twkr.mtx.Lock()\n\tdefer wkr.mtx.Unlock()\n\twkr.closeRunner(uuid)\n\tgo wkr.wp.notify()\n}\n\n// caller must have lock.\nfunc (wkr *worker) reportBootOutcome(outcome BootOutcome) {\n\tif wkr.bootOutcomeReported {\n\t\treturn\n\t}\n\tif wkr.wp.mBootOutcomes != nil {\n\t\twkr.wp.mBootOutcomes.WithLabelValues(string(outcome)).Inc()\n\t}\n\twkr.bootOutcomeReported = true\n}\n\n// caller must have lock.\nfunc (wkr *worker) reportTimeBetweenFirstSSHAndReadyForContainer() {\n\tif wkr.timeToReadyReported {\n\t\treturn\n\t}\n\tif wkr.wp.mTimeToSSH != nil {\n\t\twkr.wp.mTimeToReadyForContainer.Observe(time.Since(wkr.firstSSHConnection).Seconds())\n\t}\n\twkr.timeToReadyReported = true\n}\n\n// caller must have lock.\nfunc (wkr *worker) setIdleBehavior(idleBehavior IdleBehavior) {\n\twkr.logger.WithField(\"IdleBehavior\", idleBehavior).Info(\"set idle behavior\")\n\twkr.idleBehavior = idleBehavior\n\twkr.saveTags()\n\twkr.shutdownIfIdle()\n}\n\n// caller must have lock.\nfunc (wkr *worker) startContainer(ctr arvados.Container) {\n\tlogger := wkr.logger.WithFields(logrus.Fields{\n\t\t\"ContainerUUID\": ctr.UUID,\n\t\t\"Priority\":      ctr.Priority,\n\t})\n\tlogger.Debug(\"starting container\")\n\trr := newRemoteRunner(ctr.UUID, wkr)\n\twkr.starting[ctr.UUID] = rr\n\tif wkr.state != StateRunning {\n\t\twkr.state = StateRunning\n\t\tgo wkr.wp.notify()\n\t}\n\tgo func() {\n\t\trr.Start()\n\t\tif wkr.wp.mTimeFromQueueToCrunchRun != nil {\n\t\t\twkr.wp.mTimeFromQueueToCrunchRun.Observe(time.Since(ctr.CreatedAt).Seconds())\n\t\t}\n\t\twkr.mtx.Lock()\n\t\tdefer wkr.mtx.Unlock()\n\t\tif wkr.starting[ctr.UUID] != rr {\n\t\t\t// Someone else (e.g., wkr.probeAndUpdate() ->\n\t\t\t// wkr.updateRunning() or wkr.Close()) already\n\t\t\t// moved our runner from wkr.starting to\n\t\t\t// wkr.running or deleted it while we were in\n\t\t\t// rr.Start().\n\t\t\treturn\n\t\t}\n\t\tnow := time.Now()\n\t\twkr.updated = now\n\t\twkr.busy = now\n\t\tdelete(wkr.starting, ctr.UUID)\n\t\twkr.running[ctr.UUID] = rr\n\t\twkr.lastUUID = ctr.UUID\n\t}()\n}\n\n// ProbeAndUpdate conducts appropriate boot/running probes (if any)\n// for the worker's current state. If a previous probe is still\n// running, it does nothing.\n//\n// It should be called in a new goroutine.\nfunc (wkr *worker) ProbeAndUpdate() {\n\tselect {\n\tcase wkr.probing <- struct{}{}:\n\t\twkr.probeAndUpdate()\n\t\t<-wkr.probing\n\tdefault:\n\t\twkr.logger.Debug(\"still waiting for last probe to finish\")\n\t}\n}\n\n// probeAndUpdate calls probeBooted and/or probeRunning if needed, and\n// updates state accordingly.\n//\n// In StateUnknown: Call both probeBooted and probeRunning.\n// In StateBooting: Call probeBooted; if successful, call probeRunning.\n// In StateRunning: Call probeRunning.\n// In StateIdle: Call probeRunning.\n// In StateShutdown: Do nothing.\n//\n// If both probes succeed, wkr.state changes to\n// StateIdle/StateRunning.\n//\n// If probeRunning succeeds, wkr.running is updated. (This means\n// wkr.running might be non-empty even in StateUnknown, if the boot\n// probe failed.)\n//\n// probeAndUpdate should be called in a new goroutine.\nfunc (wkr *worker) probeAndUpdate() {\n\twkr.mtx.Lock()\n\tupdated := wkr.updated\n\tinitialState := wkr.state\n\twkr.mtx.Unlock()\n\n\tvar (\n\t\tbooted   bool\n\t\tctrUUIDs []string\n\t\tok       bool\n\t\tstderr   []byte // from probeBooted\n\t\terrLast  error  // from probeBooted or copyRunnerData\n\t)\n\n\tswitch initialState {\n\tcase StateShutdown:\n\t\treturn\n\tcase StateIdle, StateRunning:\n\t\tbooted = true\n\tcase StateUnknown, StateBooting:\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unknown state %s\", initialState))\n\t}\n\n\tprobeStart := time.Now()\n\tlogger := wkr.logger.WithField(\"ProbeStart\", probeStart)\n\n\tif !booted {\n\t\tstderr, errLast = wkr.probeBooted()\n\t\tbooted = errLast == nil\n\t\tshouldCopy := booted || initialState == StateUnknown\n\t\tif !booted {\n\t\t\t// Pretend this probe succeeded if another\n\t\t\t// concurrent attempt succeeded.\n\t\t\twkr.mtx.Lock()\n\t\t\tif wkr.state == StateRunning || wkr.state == StateIdle {\n\t\t\t\tbooted = true\n\t\t\t\tshouldCopy = false\n\t\t\t}\n\t\t\twkr.mtx.Unlock()\n\t\t}\n\t\tif shouldCopy {\n\t\t\t_, stderrCopy, err := wkr.copyRunnerData()\n\t\t\tif err != nil {\n\t\t\t\tbooted = false\n\t\t\t\twkr.logger.WithError(err).WithField(\"stderr\", string(stderrCopy)).Warn(\"error copying runner binary\")\n\t\t\t\terrLast = err\n\t\t\t}\n\t\t}\n\t\tif booted {\n\t\t\tlogger.Info(\"instance booted; will try probeRunning\")\n\t\t}\n\t}\n\treportedBroken := false\n\tif booted || initialState == StateUnknown {\n\t\tctrUUIDs, reportedBroken, ok = wkr.probeRunning()\n\t}\n\twkr.mtx.Lock()\n\tdefer wkr.mtx.Unlock()\n\tif reportedBroken && wkr.idleBehavior == IdleBehaviorRun {\n\t\tlogger.Info(\"probe reported broken instance\")\n\t\twkr.reportBootOutcome(BootOutcomeFailed)\n\t\twkr.setIdleBehavior(IdleBehaviorDrain)\n\t}\n\tif !ok || (!booted && len(ctrUUIDs) == 0 && len(wkr.running) == 0) {\n\t\tif wkr.state == StateShutdown && wkr.updated.After(updated) {\n\t\t\t// Skip the logging noise if shutdown was\n\t\t\t// initiated during probe.\n\t\t\treturn\n\t\t}\n\t\t// Using the start time of the probe as the timeout\n\t\t// threshold ensures we always initiate at least one\n\t\t// probe attempt after the boot/probe timeout expires\n\t\t// (otherwise, a slow probe failure could cause us to\n\t\t// shutdown an instance even though it did in fact\n\t\t// boot/recover before the timeout expired).\n\t\tdur := probeStart.Sub(wkr.probed)\n\t\tif wkr.shutdownIfBroken(dur) {\n\t\t\t// stderr from failed run-probes will have\n\t\t\t// been logged already, but some boot-probe\n\t\t\t// failures are normal so they are logged only\n\t\t\t// at Debug level. This may be our chance to\n\t\t\t// log some evidence about why the node never\n\t\t\t// booted, even in non-debug mode.\n\t\t\tif !booted {\n\t\t\t\twkr.reportBootOutcome(BootOutcomeFailed)\n\t\t\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\t\t\"Duration\": dur,\n\t\t\t\t\t\"stderr\":   string(stderr),\n\t\t\t\t}).WithError(errLast).Info(\"boot failed\")\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tupdateTime := time.Now()\n\twkr.probed = updateTime\n\n\tif updated != wkr.updated {\n\t\t// Worker was updated after the probe began, so\n\t\t// wkr.running might have a container UUID that was\n\t\t// not yet running when ctrUUIDs was generated. Leave\n\t\t// wkr.running alone and wait for the next probe to\n\t\t// catch up on any changes.\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"updated\":     updated,\n\t\t\t\"wkr.updated\": wkr.updated,\n\t\t}).Debug(\"skipping worker state update due to probe/sync race\")\n\t\treturn\n\t}\n\n\tif len(ctrUUIDs) > 0 {\n\t\twkr.busy = updateTime\n\t\tif wkr.lastUUID == \"\" {\n\t\t\t// If we have never started a container on\n\t\t\t// this instance ourselves, all ctrUUIDs must\n\t\t\t// have been started by a previous dispatch\n\t\t\t// process, and the information about which\n\t\t\t// one started last is now gone.  So we just\n\t\t\t// pick one arbitrarily.\n\t\t\twkr.lastUUID = ctrUUIDs[0]\n\t\t}\n\t} else if len(wkr.running) > 0 {\n\t\t// Actual last-busy time was sometime between wkr.busy\n\t\t// and now. Now is the earliest opportunity to take\n\t\t// advantage of the non-busy state, though.\n\t\twkr.busy = updateTime\n\t}\n\n\tchanged := wkr.updateRunning(ctrUUIDs)\n\n\t// Update state if this was the first successful boot-probe.\n\tif booted && (wkr.state == StateUnknown || wkr.state == StateBooting) {\n\t\tif wkr.state == StateBooting {\n\t\t\twkr.reportTimeBetweenFirstSSHAndReadyForContainer()\n\t\t}\n\t\t// Note: this will change again below if\n\t\t// len(wkr.starting)+len(wkr.running) > 0.\n\t\twkr.state = StateIdle\n\t\tchanged = true\n\t}\n\n\t// If wkr.state and wkr.running aren't changing then there's\n\t// no need to log anything, notify the scheduler, move state\n\t// back and forth between idle/running, etc.\n\tif !changed {\n\t\treturn\n\t}\n\n\t// Log whenever a run-probe reveals crunch-run processes\n\t// appearing/disappearing before boot-probe succeeds.\n\tif wkr.state == StateUnknown && changed {\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"RunningContainers\": len(wkr.running),\n\t\t\t\"State\":             wkr.state,\n\t\t}).Info(\"crunch-run probe succeeded, but boot probe is still failing\")\n\t}\n\n\tif wkr.state == StateIdle && len(wkr.starting)+len(wkr.running) > 0 {\n\t\twkr.state = StateRunning\n\t} else if wkr.state == StateRunning && len(wkr.starting)+len(wkr.running) == 0 {\n\t\twkr.state = StateIdle\n\t}\n\twkr.updated = updateTime\n\tif booted && (initialState == StateUnknown || initialState == StateBooting) {\n\t\twkr.reportBootOutcome(BootOutcomeSucceeded)\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"RunningContainers\": len(wkr.running),\n\t\t\t\"State\":             wkr.state,\n\t\t}).Info(\"probes succeeded, instance is in service\")\n\t}\n\tgo wkr.wp.notify()\n}\n\nfunc (wkr *worker) probeRunning() (running []string, reportsBroken, ok bool) {\n\tcmd := wkr.wp.runnerCmd + \" --list\"\n\tif u := wkr.instance.RemoteUser(); u != \"root\" {\n\t\tcmd = \"sudo \" + cmd\n\t}\n\tbefore := time.Now()\n\tvar stdin io.Reader\n\tif prices := wkr.instance.PriceHistory(wkr.instType); len(prices) > 0 {\n\t\tj, _ := json.Marshal(prices)\n\t\tstdin = bytes.NewReader(j)\n\t\tcmd += \" --stdin-prices\"\n\t}\n\tstdout, stderr, err := wkr.executor.Execute(nil, cmd, stdin)\n\tif err != nil {\n\t\twkr.logger.WithFields(logrus.Fields{\n\t\t\t\"Command\": cmd,\n\t\t\t\"stdout\":  string(stdout),\n\t\t\t\"stderr\":  string(stderr),\n\t\t}).WithError(err).Warn(\"probe failed\")\n\t\twkr.wp.mRunProbeDuration.WithLabelValues(\"fail\").Observe(time.Now().Sub(before).Seconds())\n\t\treturn\n\t}\n\twkr.logger.WithFields(logrus.Fields{\n\t\t\"Command\": cmd,\n\t\t\"stdout\":  string(stdout),\n\t\t\"stderr\":  string(stderr),\n\t}).Debug(\"probe succeeded\")\n\twkr.wp.mRunProbeDuration.WithLabelValues(\"success\").Observe(time.Now().Sub(before).Seconds())\n\tok = true\n\n\tstaleRunLock := false\n\tfor _, s := range strings.Split(string(stdout), \"\\n\") {\n\t\t// Each line of the \"crunch-run --list\" output is one\n\t\t// of the following:\n\t\t//\n\t\t// * a container UUID, indicating that processes\n\t\t//   related to that container are currently running.\n\t\t//   Optionally followed by \" stale\", indicating that\n\t\t//   the crunch-run process itself has exited (the\n\t\t//   remaining process is probably arv-mount).\n\t\t//\n\t\t// * the string \"broken\", indicating that the instance\n\t\t//   appears incapable of starting containers.\n\t\t//\n\t\t// See ListProcesses() in lib/crunchrun/background.go.\n\t\tif s == \"\" {\n\t\t\t// empty string following final newline\n\t\t} else if s == \"broken\" {\n\t\t\treportsBroken = true\n\t\t} else if !strings.HasPrefix(s, wkr.wp.cluster.ClusterID) {\n\t\t\t// Ignore crunch-run processes that belong to\n\t\t\t// a different cluster (e.g., a single host\n\t\t\t// running multiple clusters with the loopback\n\t\t\t// driver)\n\t\t\tcontinue\n\t\t} else if toks := strings.Split(s, \" \"); len(toks) == 1 {\n\t\t\trunning = append(running, s)\n\t\t} else if toks[1] == \"stale\" {\n\t\t\twkr.logger.WithField(\"ContainerUUID\", toks[0]).Info(\"probe reported stale run lock\")\n\t\t\tstaleRunLock = true\n\t\t}\n\t}\n\twkr.mtx.Lock()\n\tdefer wkr.mtx.Unlock()\n\tif !staleRunLock {\n\t\twkr.staleRunLockSince = time.Time{}\n\t} else if wkr.staleRunLockSince.IsZero() {\n\t\twkr.staleRunLockSince = time.Now()\n\t} else if dur := time.Now().Sub(wkr.staleRunLockSince); dur > wkr.wp.timeoutStaleRunLock {\n\t\twkr.logger.WithField(\"Duration\", dur).Warn(\"reporting broken after reporting stale run lock for too long\")\n\t\treportsBroken = true\n\t}\n\treturn\n}\n\nfunc (wkr *worker) probeBooted() (stderr []byte, err error) {\n\tcmd := wkr.wp.bootProbeCommand\n\tif cmd == \"\" {\n\t\tcmd = \"true\"\n\t}\n\tstdout, stderr, err := wkr.executor.Execute(nil, cmd, nil)\n\tlogger := wkr.logger.WithFields(logrus.Fields{\n\t\t\"Command\": cmd,\n\t\t\"stdout\":  string(stdout),\n\t\t\"stderr\":  string(stderr),\n\t})\n\tif err != nil {\n\t\tif errors.Is(err, sshexecutor.ErrNoAddress) ||\n\t\t\terrors.As(err, new(*net.OpError)) ||\n\t\t\terrors.As(err, new(*ssh.ExitError)) {\n\t\t\t// These errors are expected while the\n\t\t\t// instance is booting, so we only log them at\n\t\t\t// debug level.\n\t\t\tlogger.WithError(err).Debug(\"boot probe failed\")\n\t\t} else {\n\t\t\t// Other errors are more likely to indicate a\n\t\t\t// configuration problem, and it's more\n\t\t\t// sysadmin-friendly to show them right away\n\t\t\t// instead of waiting until boot timeout and\n\t\t\t// only showing the last error.\n\t\t\t//\n\t\t\t// Example: \"ssh: handshake failed: ssh:\n\t\t\t// unable to authenticate, attempted methods\n\t\t\t// [none publickey], no supported methods\n\t\t\t// remain\"\n\t\t\tlogger.WithError(err).Warn(\"boot probe failed\")\n\t\t}\n\t\treturn stderr, err\n\t}\n\tlogger.Info(\"boot probe succeeded\")\n\treturn stderr, nil\n}\n\nfunc (wkr *worker) copyRunnerData() (stdout, stderr []byte, err error) {\n\tif err = wkr.wp.loadRunnerData(); err != nil {\n\t\twkr.logger.WithError(err).Warn(\"cannot boot worker: error loading runner binary\")\n\t\treturn\n\t} else if len(wkr.wp.runnerData) == 0 {\n\t\t// Assume crunch-run is already installed\n\t\treturn\n\t}\n\n\thash := fmt.Sprintf(\"%x\", wkr.wp.runnerMD5)\n\tdstdir, _ := filepath.Split(wkr.wp.runnerCmd)\n\tlogger := wkr.logger.WithFields(logrus.Fields{\n\t\t\"hash\": hash,\n\t\t\"path\": wkr.wp.runnerCmd,\n\t})\n\n\tstdout, stderr, err = wkr.executor.Execute(nil, `md5sum `+wkr.wp.runnerCmd, nil)\n\tif err == nil && len(stderr) == 0 && bytes.Equal(stdout, []byte(hash+\"  \"+wkr.wp.runnerCmd+\"\\n\")) {\n\t\tlogger.Info(\"runner binary already exists on worker, with correct hash\")\n\t\treturn\n\t}\n\n\t// Note touch+chmod come before writing data, to avoid the\n\t// possibility of md5 being correct while file mode is\n\t// incorrect.\n\tcmd := `set -e; dstdir=\"` + dstdir + `\"; dstfile=\"` + wkr.wp.runnerCmd + `\"; mkdir -p \"$dstdir\"; touch \"$dstfile\"; chmod 0755 \"$dstdir\" \"$dstfile\"; cat >\"$dstfile\"`\n\tif wkr.instance.RemoteUser() != \"root\" {\n\t\tcmd = `sudo sh -c '` + strings.Replace(cmd, \"'\", \"'\\\\''\", -1) + `'`\n\t}\n\tlogger.WithField(\"cmd\", cmd).Info(\"installing runner binary on worker\")\n\tstdout, stderr, err = wkr.executor.Execute(nil, cmd, bytes.NewReader(wkr.wp.runnerData))\n\treturn\n}\n\n// caller must have lock.\nfunc (wkr *worker) shutdownIfBroken(dur time.Duration) bool {\n\tif wkr.idleBehavior == IdleBehaviorHold {\n\t\t// Never shut down.\n\t\treturn false\n\t}\n\tprologue, epilogue, threshold := \"\", \"\", wkr.wp.timeoutProbe\n\tif wkr.state == StateUnknown || wkr.state == StateBooting {\n\t\tprologue = \"new \"\n\t\tepilogue = \" -- `arvados-server cloudtest` might help troubleshoot, see https://doc.arvados.org/main/admin/cloudtest.html\"\n\t\tthreshold = wkr.wp.timeoutBooting\n\t}\n\tif dur < threshold {\n\t\treturn false\n\t}\n\twkr.logger.WithFields(logrus.Fields{\n\t\t\"Duration\": dur,\n\t\t\"Since\":    wkr.probed,\n\t\t\"State\":    wkr.state,\n\t}).Warnf(\"%sinstance unresponsive, shutting down%s\", prologue, epilogue)\n\twkr.shutdown()\n\treturn true\n}\n\n// Returns true if the instance is eligible for shutdown: either it's\n// been idle too long, or idleBehavior=Drain and nothing is running.\n//\n// caller must have lock.\nfunc (wkr *worker) eligibleForShutdown() bool {\n\tif wkr.idleBehavior == IdleBehaviorHold {\n\t\treturn false\n\t}\n\tdraining := wkr.idleBehavior == IdleBehaviorDrain\n\tswitch wkr.state {\n\tcase StateBooting:\n\t\treturn draining\n\tcase StateIdle:\n\t\treturn draining || time.Since(wkr.busy) >= wkr.wp.timeoutIdle\n\tcase StateRunning:\n\t\tif !draining {\n\t\t\treturn false\n\t\t}\n\t\tfor _, rr := range wkr.running {\n\t\t\tif !rr.givenup {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tfor _, rr := range wkr.starting {\n\t\t\tif !rr.givenup {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\t// draining, and all remaining runners are just trying\n\t\t// to force-kill their crunch-run procs\n\t\treturn true\n\tcase StateUnknown:\n\t\t// instance was created by someone other than us\n\t\t// (probably the previous dispatchcloud process) and\n\t\t// has never responded to a probe\n\t\treturn time.Since(wkr.appeared) >= wkr.wp.timeoutBooting\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// caller must have lock.\nfunc (wkr *worker) shutdownIfIdle() bool {\n\tif !wkr.eligibleForShutdown() {\n\t\treturn false\n\t}\n\twkr.logger.WithFields(logrus.Fields{\n\t\t\"State\":        wkr.state,\n\t\t\"IdleDuration\": stats.Duration(time.Since(wkr.busy)),\n\t\t\"IdleBehavior\": wkr.idleBehavior,\n\t}).Info(\"shutdown worker\")\n\twkr.reportBootOutcome(BootOutcomeAborted)\n\twkr.shutdown()\n\treturn true\n}\n\n// caller must have lock.\nfunc (wkr *worker) shutdown() {\n\tnow := time.Now()\n\twkr.updated = now\n\twkr.destroyed = now\n\twkr.state = StateShutdown\n\tgo wkr.wp.notify()\n\tgo func() {\n\t\terr := wkr.instance.Destroy()\n\t\tif err != nil {\n\t\t\twkr.logger.WithError(err).Warn(\"shutdown failed\")\n\t\t\treturn\n\t\t}\n\t}()\n}\n\n// Save worker tags to cloud provider metadata, if they don't already\n// match. Caller must have lock.\nfunc (wkr *worker) saveTags() {\n\tinstance := wkr.instance\n\ttags := instance.Tags()\n\tupdate := cloud.InstanceTags{\n\t\twkr.wp.tagKeyPrefix + tagKeyInstanceType: wkr.instType.Name,\n\t\twkr.wp.tagKeyPrefix + tagKeyIdleBehavior: string(wkr.idleBehavior),\n\t}\n\tsave := false\n\tfor k, v := range update {\n\t\tif tags[k] != v {\n\t\t\ttags[k] = v\n\t\t\tsave = true\n\t\t}\n\t}\n\tif save {\n\t\tgo func() {\n\t\t\terr := instance.SetTags(tags)\n\t\t\tif err != nil {\n\t\t\t\twkr.wp.logger.WithField(\"Instance\", instance.ID()).WithError(err).Warnf(\"error updating tags\")\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (wkr *worker) Close() {\n\t// This might take time, so do it after unlocking mtx.\n\tdefer wkr.executor.Close()\n\n\twkr.mtx.Lock()\n\tdefer wkr.mtx.Unlock()\n\tfor uuid, rr := range wkr.running {\n\t\twkr.logger.WithField(\"ContainerUUID\", uuid).Info(\"crunch-run process abandoned\")\n\t\trr.Close()\n\t\tdelete(wkr.running, uuid)\n\t}\n\tfor uuid, rr := range wkr.starting {\n\t\twkr.logger.WithField(\"ContainerUUID\", uuid).Info(\"crunch-run process abandoned\")\n\t\trr.Close()\n\t\tdelete(wkr.starting, uuid)\n\t}\n}\n\n// Add/remove entries in wkr.running to match ctrUUIDs returned by a\n// probe. Returns true if anything was added or removed.\n//\n// Caller must have lock.\nfunc (wkr *worker) updateRunning(ctrUUIDs []string) (changed bool) {\n\talive := map[string]bool{}\n\tfor _, uuid := range ctrUUIDs {\n\t\talive[uuid] = true\n\t\tif _, ok := wkr.running[uuid]; ok {\n\t\t\t// unchanged\n\t\t} else if rr, ok := wkr.starting[uuid]; ok {\n\t\t\twkr.running[uuid] = rr\n\t\t\tdelete(wkr.starting, uuid)\n\t\t\tchanged = true\n\t\t} else {\n\t\t\t// We didn't start it -- it must have been\n\t\t\t// started by a previous dispatcher process.\n\t\t\twkr.logger.WithField(\"ContainerUUID\", uuid).Info(\"crunch-run process detected\")\n\t\t\twkr.running[uuid] = newRemoteRunner(uuid, wkr)\n\t\t\tchanged = true\n\t\t}\n\t}\n\tfor uuid := range wkr.running {\n\t\tif !alive[uuid] {\n\t\t\twkr.closeRunner(uuid)\n\t\t\tchanged = true\n\t\t}\n\t}\n\treturn\n}\n\n// caller must have lock.\nfunc (wkr *worker) closeRunner(uuid string) {\n\trr := wkr.running[uuid]\n\tif rr == nil {\n\t\treturn\n\t}\n\twkr.logger.WithField(\"ContainerUUID\", uuid).Info(\"crunch-run process ended\")\n\tdelete(wkr.running, uuid)\n\trr.Close()\n\n\tnow := time.Now()\n\twkr.updated = now\n\twkr.wp.exited[uuid] = now\n\tif wkr.state == StateRunning && len(wkr.running)+len(wkr.starting) == 0 {\n\t\twkr.state = StateIdle\n\t}\n}\n"
  },
  {
    "path": "lib/dispatchcloud/worker/worker_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage worker\n\nimport (\n\t\"bytes\"\n\t\"crypto/md5\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cloud\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/test\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&WorkerSuite{})\n\ntype WorkerSuite struct {\n\tlogger      logrus.FieldLogger\n\ttestCluster *arvados.Cluster\n}\n\nfunc (suite *WorkerSuite) SetUpTest(c *check.C) {\n\tsuite.logger = ctxlog.TestLogger(c)\n\tcfg, err := config.NewLoader(nil, suite.logger).Load()\n\tc.Assert(err, check.IsNil)\n\tsuite.testCluster, err = cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (suite *WorkerSuite) TestProbeAndUpdate(c *check.C) {\n\tbootTimeout := time.Minute\n\tprobeTimeout := time.Second\n\n\tac := arvados.NewClientFromEnv()\n\tis, err := (&test.StubDriver{}).InstanceSet(nil, \"test-instance-set-id\", nil, suite.logger, nil)\n\tc.Assert(err, check.IsNil)\n\tinst, err := is.Create(arvados.InstanceType{}, \"\", nil, \"echo InitCommand\", nil)\n\tc.Assert(err, check.IsNil)\n\n\ttype trialT struct {\n\t\ttestCaseComment string // displayed in test output to help identify failure case\n\t\tage             time.Duration\n\t\tstate           State\n\t\trunning         int\n\t\tstarting        int\n\t\trespBoot        stubResp // zero value is success\n\t\trespDeploy      stubResp // zero value is success\n\t\trespRun         stubResp // zero value is success + nothing running\n\t\trespRunDeployed stubResp\n\t\tdeployRunner    []byte\n\t\texpectStdin     []byte\n\t\texpectState     State\n\t\texpectRunning   int\n\t}\n\n\terrFail := errors.New(\"failed\")\n\trespFail := stubResp{\"\", \"command failed\\n\", errFail}\n\trespContainerRunning := stubResp{\"zzzzz-dz642-abcdefghijklmno\\n\", \"\", nil}\n\tfor idx, trial := range []trialT{\n\t\t{\n\t\t\ttestCaseComment: \"Unknown, probes fail\",\n\t\t\tstate:           StateUnknown,\n\t\t\trespBoot:        respFail,\n\t\t\trespRun:         respFail,\n\t\t\texpectState:     StateUnknown,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Unknown, boot probe fails, but one container is running\",\n\t\t\tstate:           StateUnknown,\n\t\t\trespBoot:        respFail,\n\t\t\trespRun:         respContainerRunning,\n\t\t\texpectState:     StateUnknown,\n\t\t\texpectRunning:   1,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Unknown, boot probe fails, previously running container has exited\",\n\t\t\tstate:           StateUnknown,\n\t\t\trunning:         1,\n\t\t\trespBoot:        respFail,\n\t\t\texpectState:     StateUnknown,\n\t\t\texpectRunning:   0,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Unknown, boot timeout exceeded, boot probe fails\",\n\t\t\tstate:           StateUnknown,\n\t\t\tage:             bootTimeout + time.Second,\n\t\t\trespBoot:        respFail,\n\t\t\trespRun:         respFail,\n\t\t\texpectState:     StateShutdown,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Unknown, boot timeout exceeded, boot probe succeeds but crunch-run fails\",\n\t\t\tstate:           StateUnknown,\n\t\t\tage:             bootTimeout * 2,\n\t\t\trespRun:         respFail,\n\t\t\texpectState:     StateShutdown,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Unknown, boot timeout exceeded, boot probe fails but crunch-run succeeds\",\n\t\t\tstate:           StateUnknown,\n\t\t\tage:             bootTimeout * 2,\n\t\t\trespBoot:        respFail,\n\t\t\texpectState:     StateShutdown,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Unknown, boot timeout exceeded, boot probe fails but container is running\",\n\t\t\tstate:           StateUnknown,\n\t\t\tage:             bootTimeout * 2,\n\t\t\trespBoot:        respFail,\n\t\t\trespRun:         respContainerRunning,\n\t\t\texpectState:     StateUnknown,\n\t\t\texpectRunning:   1,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Unknown, boot probe fails, deployRunner succeeds, container is running\",\n\t\t\tstate:           StateUnknown,\n\t\t\trespBoot:        respFail,\n\t\t\trespRun:         respFail,\n\t\t\trespRunDeployed: respContainerRunning,\n\t\t\tdeployRunner:    []byte(\"ELF\"),\n\t\t\texpectStdin:     []byte(\"ELF\"),\n\t\t\texpectState:     StateUnknown,\n\t\t\texpectRunning:   1,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Unknown, boot timeout exceeded, boot probe fails but deployRunner succeeds and container is running\",\n\t\t\tstate:           StateUnknown,\n\t\t\tage:             bootTimeout * 2,\n\t\t\trespBoot:        respFail,\n\t\t\trespRun:         respFail,\n\t\t\trespRunDeployed: respContainerRunning,\n\t\t\tdeployRunner:    []byte(\"ELF\"),\n\t\t\texpectStdin:     []byte(\"ELF\"),\n\t\t\texpectState:     StateUnknown,\n\t\t\texpectRunning:   1,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Unknown, boot timeout exceeded, boot probe fails but deployRunner succeeds and no container is running\",\n\t\t\tstate:           StateUnknown,\n\t\t\tage:             bootTimeout * 2,\n\t\t\trespBoot:        respFail,\n\t\t\trespRun:         respFail,\n\t\t\tdeployRunner:    []byte(\"ELF\"),\n\t\t\texpectStdin:     []byte(\"ELF\"),\n\t\t\texpectState:     StateShutdown,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Booting, boot probe fails, run probe fails\",\n\t\t\tstate:           StateBooting,\n\t\t\trespBoot:        respFail,\n\t\t\trespRun:         respFail,\n\t\t\texpectState:     StateBooting,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Booting, boot probe fails, run probe succeeds (but isn't expected to be called)\",\n\t\t\tstate:           StateBooting,\n\t\t\trespBoot:        respFail,\n\t\t\texpectState:     StateBooting,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Booting, boot probe succeeds, run probe fails\",\n\t\t\tstate:           StateBooting,\n\t\t\trespRun:         respFail,\n\t\t\texpectState:     StateBooting,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Booting, boot probe succeeds, run probe succeeds\",\n\t\t\tstate:           StateBooting,\n\t\t\texpectState:     StateIdle,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Booting, boot probe succeeds, run probe succeeds, container is running\",\n\t\t\tstate:           StateBooting,\n\t\t\trespRun:         respContainerRunning,\n\t\t\texpectState:     StateRunning,\n\t\t\texpectRunning:   1,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Booting, boot timeout exceeded\",\n\t\t\tstate:           StateBooting,\n\t\t\tage:             bootTimeout * 2,\n\t\t\trespRun:         respFail,\n\t\t\texpectState:     StateShutdown,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Idle, probe timeout exceeded, one container running\",\n\t\t\tstate:           StateIdle,\n\t\t\tage:             probeTimeout * 2,\n\t\t\trespRun:         respContainerRunning,\n\t\t\texpectState:     StateRunning,\n\t\t\texpectRunning:   1,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Idle, probe timeout exceeded, one container running, probe fails\",\n\t\t\tstate:           StateIdle,\n\t\t\tage:             probeTimeout * 2,\n\t\t\trunning:         1,\n\t\t\trespRun:         respFail,\n\t\t\texpectState:     StateShutdown,\n\t\t\texpectRunning:   1,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Idle, probe timeout exceeded, nothing running, probe fails\",\n\t\t\tstate:           StateIdle,\n\t\t\tage:             probeTimeout * 2,\n\t\t\trespRun:         respFail,\n\t\t\texpectState:     StateShutdown,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Running, one container still running\",\n\t\t\tstate:           StateRunning,\n\t\t\trunning:         1,\n\t\t\trespRun:         respContainerRunning,\n\t\t\texpectState:     StateRunning,\n\t\t\texpectRunning:   1,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Running, container has exited\",\n\t\t\tstate:           StateRunning,\n\t\t\trunning:         1,\n\t\t\texpectState:     StateIdle,\n\t\t\texpectRunning:   0,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Running, probe timeout exceeded, nothing running, new container being started\",\n\t\t\tstate:           StateRunning,\n\t\t\tage:             probeTimeout * 2,\n\t\t\tstarting:        1,\n\t\t\texpectState:     StateRunning,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Booting, boot probe succeeds, deployRunner succeeds, run probe succeeds\",\n\t\t\tstate:           StateBooting,\n\t\t\tdeployRunner:    []byte(\"ELF\"),\n\t\t\texpectStdin:     []byte(\"ELF\"),\n\t\t\trespRun:         respFail,\n\t\t\trespRunDeployed: respContainerRunning,\n\t\t\texpectRunning:   1,\n\t\t\texpectState:     StateRunning,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Booting, boot probe succeeds, deployRunner fails\",\n\t\t\tstate:           StateBooting,\n\t\t\tdeployRunner:    []byte(\"ELF\"),\n\t\t\trespDeploy:      respFail,\n\t\t\texpectStdin:     []byte(\"ELF\"),\n\t\t\texpectState:     StateBooting,\n\t\t},\n\t\t{\n\t\t\ttestCaseComment: \"Booting, boot probe succeeds, deployRunner skipped, run probe succeeds\",\n\t\t\tstate:           StateBooting,\n\t\t\tdeployRunner:    nil,\n\t\t\trespDeploy:      respFail,\n\t\t\texpectState:     StateIdle,\n\t\t},\n\t} {\n\t\tc.Logf(\"------- trial %d: %#v\", idx, trial)\n\t\tctime := time.Now().Add(-trial.age)\n\t\texr := &stubExecutor{\n\t\t\tresponse: map[string]stubResp{\n\t\t\t\t\"bootprobe\":         trial.respBoot,\n\t\t\t\t\"crunch-run --list\": trial.respRun,\n\t\t\t\t\"{deploy}\":          trial.respDeploy,\n\t\t\t},\n\t\t}\n\t\twp := &Pool{\n\t\t\tarvClient:        ac,\n\t\t\tnewExecutor:      func(cloud.Instance) Executor { return exr },\n\t\t\tcluster:          suite.testCluster,\n\t\t\tbootProbeCommand: \"bootprobe\",\n\t\t\ttimeoutBooting:   bootTimeout,\n\t\t\ttimeoutProbe:     probeTimeout,\n\t\t\texited:           map[string]time.Time{},\n\t\t\trunnerCmdDefault: \"crunch-run\",\n\t\t\trunnerArgs:       []string{\"--args=not used with --list\"},\n\t\t\trunnerCmd:        \"crunch-run\",\n\t\t\trunnerData:       trial.deployRunner,\n\t\t\trunnerMD5:        md5.Sum(trial.deployRunner),\n\t\t}\n\t\twp.registerMetrics(prometheus.NewRegistry())\n\t\tif trial.deployRunner != nil {\n\t\t\tsvHash := md5.Sum(trial.deployRunner)\n\t\t\twp.runnerCmd = fmt.Sprintf(\"/var/run/arvados/crunch-run~%x\", svHash)\n\t\t\texr.response[wp.runnerCmd+\" --list\"] = trial.respRunDeployed\n\t\t}\n\t\twkr := &worker{\n\t\t\tlogger:   suite.logger,\n\t\t\texecutor: exr,\n\t\t\twp:       wp,\n\t\t\tmtx:      &wp.mtx,\n\t\t\tstate:    trial.state,\n\t\t\tinstance: inst,\n\t\t\tappeared: ctime,\n\t\t\tbusy:     ctime,\n\t\t\tprobed:   ctime,\n\t\t\tupdated:  ctime,\n\t\t\trunning:  map[string]*remoteRunner{},\n\t\t\tstarting: map[string]*remoteRunner{},\n\t\t\tprobing:  make(chan struct{}, 1),\n\t\t}\n\t\tif trial.running > 0 {\n\t\t\tuuid := \"zzzzz-dz642-abcdefghijklmno\"\n\t\t\twkr.running = map[string]*remoteRunner{uuid: newRemoteRunner(uuid, wkr)}\n\t\t}\n\t\tif trial.starting > 0 {\n\t\t\tuuid := \"zzzzz-dz642-bcdefghijklmnop\"\n\t\t\twkr.starting = map[string]*remoteRunner{uuid: newRemoteRunner(uuid, wkr)}\n\t\t}\n\t\twkr.probeAndUpdate()\n\t\tc.Check(wkr.state, check.Equals, trial.expectState)\n\t\tc.Check(len(wkr.running), check.Equals, trial.expectRunning)\n\t\tc.Check(exr.stdin.String(), check.Equals, string(trial.expectStdin))\n\t}\n}\n\ntype stubResp struct {\n\tstdout string\n\tstderr string\n\terr    error\n}\n\ntype stubExecutor struct {\n\tresponse map[string]stubResp\n\tstdin    bytes.Buffer\n}\n\nfunc (se *stubExecutor) SetTarget(cloud.ExecutorTarget) {}\nfunc (se *stubExecutor) Close()                         {}\nfunc (se *stubExecutor) Execute(env map[string]string, cmd string, stdin io.Reader) (stdout, stderr []byte, err error) {\n\tif stdin != nil {\n\t\t_, err = io.Copy(&se.stdin, stdin)\n\t\tif err != nil {\n\t\t\treturn nil, []byte(err.Error()), err\n\t\t}\n\t}\n\tresp, ok := se.response[cmd]\n\tif !ok && strings.Contains(cmd, `; cat >\"$dstfile\"`) {\n\t\tresp, ok = se.response[\"{deploy}\"]\n\t}\n\tif !ok {\n\t\treturn nil, []byte(fmt.Sprintf(\"%s: command not found\\n\", cmd)), errors.New(\"command not found\")\n\t}\n\treturn []byte(resp.stdout), []byte(resp.stderr), resp.err\n}\n"
  },
  {
    "path": "lib/lsf/dispatch.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage lsf\n\nimport (\n\t\"context\"\n\t\"crypto/hmac\"\n\t\"crypto/sha256\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"net/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/controller/dblock\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/container\"\n\t\"git.arvados.org/arvados.git/lib/service\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/dispatch\"\n\t\"git.arvados.org/arvados.git/sdk/go/health\"\n\t\"github.com/julienschmidt/httprouter\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nvar DispatchCommand cmd.Handler = service.Command(arvados.ServiceNameDispatchLSF, newHandler)\n\nfunc newHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {\n\tac, err := arvados.NewClientFromConfig(cluster)\n\tif err != nil {\n\t\treturn service.ErrorHandler(ctx, cluster, fmt.Errorf(\"error initializing client from cluster config: %s\", err))\n\t}\n\tctx, cancel := context.WithCancel(ctx)\n\td := &dispatcher{\n\t\tCluster:   cluster,\n\t\tContext:   ctx,\n\t\tArvClient: ac,\n\t\tAuthToken: token,\n\t\tRegistry:  reg,\n\t\tcancel:    cancel,\n\t}\n\tgo d.Start()\n\treturn d\n}\n\ntype dispatcher struct {\n\tCluster   *arvados.Cluster\n\tContext   context.Context\n\tArvClient *arvados.Client\n\tAuthToken string\n\tRegistry  *prometheus.Registry\n\n\tlogger        logrus.FieldLogger\n\tdbConnector   ctrlctx.DBConnector\n\tlsfcli        lsfcli\n\tlsfqueue      lsfqueue\n\tarvDispatcher *dispatch.Dispatcher\n\thttpHandler   http.Handler\n\n\tinitOnce sync.Once\n\tstopped  chan struct{}\n\tcancel   context.CancelFunc\n}\n\n// Start starts the dispatcher. Start can be called multiple times\n// with no ill effect.\nfunc (disp *dispatcher) Start() {\n\tdisp.initOnce.Do(func() {\n\t\tdisp.init()\n\t\tdblock.Dispatch.Lock(context.Background(), disp.dbConnector.GetDB)\n\t\tgo func() {\n\t\t\tdisp.stopped = make(chan struct{})\n\t\t\tdefer close(disp.stopped)\n\t\t\tdefer dblock.Dispatch.Unlock()\n\t\t\tdisp.checkLsfQueueForOrphans()\n\t\t\terr := disp.arvDispatcher.Run(disp.Context)\n\t\t\tif err != nil {\n\t\t\t\tdisp.logger.Error(err)\n\t\t\t}\n\t\t}()\n\t})\n}\n\n// ServeHTTP implements service.Handler.\nfunc (disp *dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdisp.Start()\n\tdisp.httpHandler.ServeHTTP(w, r)\n}\n\n// CheckHealth implements service.Handler.\nfunc (disp *dispatcher) CheckHealth() error {\n\tdisp.Start()\n\tselect {\n\tcase <-disp.stopped:\n\t\treturn errors.New(\"stopped\")\n\tdefault:\n\t\treturn nil\n\t}\n}\n\n// Done implements service.Handler.\nfunc (disp *dispatcher) Done() <-chan struct{} {\n\treturn disp.stopped\n}\n\n// Stop dispatching containers and release resources. Used by tests.\nfunc (disp *dispatcher) Close() {\n\tdisp.Start()\n\tdisp.cancel()\n\t<-disp.stopped\n}\n\nfunc (disp *dispatcher) init() {\n\tdisp.logger = ctxlog.FromContext(disp.Context)\n\tdisp.lsfcli.logger = disp.logger\n\tdisp.lsfqueue = lsfqueue{\n\t\tlogger: disp.logger,\n\t\tperiod: disp.Cluster.Containers.CloudVMs.PollInterval.Duration(),\n\t\tlsfcli: &disp.lsfcli,\n\t}\n\tdisp.ArvClient.AuthToken = disp.AuthToken\n\tdisp.dbConnector = ctrlctx.DBConnector{PostgreSQL: disp.Cluster.PostgreSQL}\n\n\tarv, err := arvadosclient.New(disp.ArvClient)\n\tif err != nil {\n\t\tdisp.logger.Fatalf(\"Error making Arvados client: %v\", err)\n\t}\n\tarv.Retries = 25\n\tarv.ApiToken = disp.AuthToken\n\tdisp.arvDispatcher = &dispatch.Dispatcher{\n\t\tArv:            arv,\n\t\tLogger:         disp.logger,\n\t\tBatchSize:      disp.Cluster.API.MaxItemsPerResponse,\n\t\tRunContainer:   disp.runContainer,\n\t\tPollPeriod:     time.Duration(disp.Cluster.Containers.CloudVMs.PollInterval),\n\t\tMinRetryPeriod: time.Duration(disp.Cluster.Containers.MinRetryPeriod),\n\t}\n\n\tif disp.Cluster.ManagementToken == \"\" {\n\t\tdisp.httpHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\thttp.Error(w, \"Management API authentication is not configured\", http.StatusForbidden)\n\t\t})\n\t} else {\n\t\tmux := httprouter.New()\n\t\tmetricsH := promhttp.HandlerFor(disp.Registry, promhttp.HandlerOpts{\n\t\t\tErrorLog: disp.logger,\n\t\t})\n\t\tmux.Handler(\"GET\", \"/metrics\", metricsH)\n\t\tmux.Handler(\"GET\", \"/metrics.json\", metricsH)\n\t\tmux.Handler(\"GET\", \"/_health/:check\", &health.Handler{\n\t\t\tToken:  disp.Cluster.ManagementToken,\n\t\t\tPrefix: \"/_health/\",\n\t\t\tRoutes: health.Routes{\"ping\": disp.CheckHealth},\n\t\t})\n\t\tdisp.httpHandler = auth.RequireLiteralToken(disp.Cluster.ManagementToken, mux)\n\t}\n}\n\nfunc (disp *dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) error {\n\tctx, cancel := context.WithCancel(disp.Context)\n\tdefer cancel()\n\n\tif ctr.State != dispatch.Locked {\n\t\t// already started by prior invocation\n\t} else if _, ok := disp.lsfqueue.Lookup(ctr.UUID); !ok {\n\t\t_, err := container.ChooseInstanceType(disp.Cluster, &ctr)\n\t\tif err != nil && err != container.ErrInstanceTypesNotConfigured {\n\t\t\terr := disp.arvDispatcher.Arv.Update(\"containers\", ctr.UUID, arvadosclient.Dict{\n\t\t\t\t\"container\": map[string]interface{}{\n\t\t\t\t\t\"runtime_status\": map[string]string{\n\t\t\t\t\t\t\"error\": err.Error(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error setting runtime_status on %s: %s\", ctr.UUID, err)\n\t\t\t}\n\t\t\treturn disp.arvDispatcher.UpdateState(ctr.UUID, dispatch.Cancelled)\n\t\t}\n\t\tdisp.logger.Printf(\"Submitting container %s to LSF\", ctr.UUID)\n\t\tcmd := []string{disp.Cluster.Containers.CrunchRunCommand}\n\t\tcmd = append(cmd, \"--runtime-engine=\"+disp.Cluster.Containers.RuntimeEngine)\n\t\tcmd = append(cmd, disp.Cluster.Containers.CrunchRunArgumentsList...)\n\t\terr = disp.submit(ctr, cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tdisp.logger.Printf(\"Start monitoring container %v in state %q\", ctr.UUID, ctr.State)\n\tdefer disp.logger.Printf(\"Done monitoring container %s\", ctr.UUID)\n\n\tgo func(uuid string) {\n\t\tfor ctx.Err() == nil {\n\t\t\t_, ok := disp.lsfqueue.Lookup(uuid)\n\t\t\tif !ok {\n\t\t\t\t// If the container disappears from\n\t\t\t\t// the lsf queue, there is no point in\n\t\t\t\t// waiting for further dispatch\n\t\t\t\t// updates: just clean up and return.\n\t\t\t\tdisp.logger.Printf(\"container %s job disappeared from LSF queue\", uuid)\n\t\t\t\tcancel()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(ctr.UUID)\n\n\tfor done := false; !done; {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t// Disappeared from lsf queue\n\t\t\tif err := disp.arvDispatcher.Arv.Get(\"containers\", ctr.UUID, nil, &ctr); err != nil {\n\t\t\t\tdisp.logger.Printf(\"error getting final container state for %s: %s\", ctr.UUID, err)\n\t\t\t}\n\t\t\tswitch ctr.State {\n\t\t\tcase dispatch.Running:\n\t\t\t\tdisp.arvDispatcher.UpdateState(ctr.UUID, dispatch.Cancelled)\n\t\t\tcase dispatch.Locked:\n\t\t\t\tdisp.arvDispatcher.Unlock(ctr.UUID)\n\t\t\t}\n\t\t\treturn nil\n\t\tcase updated, ok := <-status:\n\t\t\tif !ok {\n\t\t\t\t// status channel is closed, which is\n\t\t\t\t// how arvDispatcher tells us to stop\n\t\t\t\t// touching the container record, kill\n\t\t\t\t// off any remaining LSF processes,\n\t\t\t\t// etc.\n\t\t\t\tdone = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif updated.State != ctr.State {\n\t\t\t\tdisp.logger.Infof(\"container %s changed state from %s to %s\", ctr.UUID, ctr.State, updated.State)\n\t\t\t}\n\t\t\tctr = updated\n\t\t\tif ctr.Priority < 1 {\n\t\t\t\tdisp.logger.Printf(\"container %s has state %s, priority %d: cancel lsf job\", ctr.UUID, ctr.State, ctr.Priority)\n\t\t\t\tdisp.bkill(ctr)\n\t\t\t} else {\n\t\t\t\tdisp.lsfqueue.SetPriority(ctr.UUID, int64(ctr.Priority))\n\t\t\t}\n\t\t}\n\t}\n\tdisp.logger.Printf(\"container %s is done\", ctr.UUID)\n\n\t// Try \"bkill\" every few seconds until the LSF job disappears\n\t// from the queue.\n\tticker := time.NewTicker(disp.Cluster.Containers.CloudVMs.PollInterval.Duration() / 2)\n\tdefer ticker.Stop()\n\tfor qent, ok := disp.lsfqueue.Lookup(ctr.UUID); ok; _, ok = disp.lsfqueue.Lookup(ctr.UUID) {\n\t\terr := disp.lsfcli.Bkill(qent.ID)\n\t\tif err != nil {\n\t\t\tdisp.logger.Warnf(\"%s: bkill(%s): %s\", ctr.UUID, qent.ID, err)\n\t\t}\n\t\t<-ticker.C\n\t}\n\treturn nil\n}\n\nfunc (disp *dispatcher) submit(container arvados.Container, crunchRunCommand []string) error {\n\t// Start with an empty slice here to ensure append() doesn't\n\t// modify crunchRunCommand's underlying array\n\tvar crArgs []string\n\tcrArgs = append(crArgs, crunchRunCommand...)\n\tcrArgs = append(crArgs, container.UUID)\n\n\th := hmac.New(sha256.New, []byte(disp.Cluster.SystemRootToken))\n\tfmt.Fprint(h, container.UUID)\n\tauthsecret := fmt.Sprintf(\"%x\", h.Sum(nil))\n\n\tcrScript := execScript(crArgs, map[string]string{\"GatewayAuthSecret\": authsecret})\n\n\tbsubArgs, err := disp.bsubArgs(container)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn disp.lsfcli.Bsub(crScript, bsubArgs, disp.ArvClient)\n}\n\nfunc (disp *dispatcher) bkill(ctr arvados.Container) {\n\tif qent, ok := disp.lsfqueue.Lookup(ctr.UUID); !ok {\n\t\tdisp.logger.Debugf(\"bkill(%s): redundant, job not in queue\", ctr.UUID)\n\t} else if err := disp.lsfcli.Bkill(qent.ID); err != nil {\n\t\tdisp.logger.Warnf(\"%s: bkill(%s): %s\", ctr.UUID, qent.ID, err)\n\t}\n}\n\nfunc (disp *dispatcher) bsubArgs(ctr arvados.Container) ([]string, error) {\n\targs := []string{\"bsub\"}\n\n\ttmp := int64(math.Ceil(float64(container.EstimateScratchSpace(&ctr)) / 1048576))\n\tvcpus := ctr.RuntimeConstraints.VCPUs\n\tmem := int64(math.Ceil(float64(ctr.RuntimeConstraints.RAM+\n\t\tctr.RuntimeConstraints.KeepCacheRAM+\n\t\tint64(disp.Cluster.Containers.ReserveExtraRAM)) / 1048576))\n\n\tmaxruntime := time.Duration(ctr.SchedulingParameters.MaxRunTime) * time.Second\n\tif maxruntime == 0 {\n\t\tmaxruntime = disp.Cluster.Containers.LSF.MaxRunTimeDefault.Duration()\n\t}\n\tif maxruntime > 0 {\n\t\tmaxruntime += disp.Cluster.Containers.LSF.MaxRunTimeOverhead.Duration()\n\t}\n\tmaxrunminutes := int64(math.Ceil(float64(maxruntime.Seconds()) / 60))\n\n\trepl := map[string]string{\n\t\t\"%%\": \"%\",\n\t\t\"%C\": fmt.Sprintf(\"%d\", vcpus),\n\t\t\"%M\": fmt.Sprintf(\"%d\", mem),\n\t\t\"%T\": fmt.Sprintf(\"%d\", tmp),\n\t\t\"%U\": ctr.UUID,\n\t\t\"%G\": fmt.Sprintf(\"%d\", ctr.RuntimeConstraints.GPU.DeviceCount),\n\t\t\"%W\": fmt.Sprintf(\"%d\", maxrunminutes),\n\t}\n\n\tre := regexp.MustCompile(`%.`)\n\tvar substitutionErrors string\n\targumentTemplate := disp.Cluster.Containers.LSF.BsubArgumentsList\n\tif ctr.RuntimeConstraints.GPU.DeviceCount > 0 {\n\t\targumentTemplate = append(argumentTemplate, disp.Cluster.Containers.LSF.BsubGPUArguments...)\n\t}\n\tfor idx, a := range argumentTemplate {\n\t\tif idx > 0 && (argumentTemplate[idx-1] == \"-W\" || argumentTemplate[idx-1] == \"-We\") && a == \"%W\" && maxrunminutes == 0 {\n\t\t\t// LSF docs don't specify an argument to \"-W\"\n\t\t\t// or \"-We\" that indicates \"unknown\", so\n\t\t\t// instead we drop the \"-W %W\" part of the\n\t\t\t// command line entirely when max runtime is\n\t\t\t// unknown.\n\t\t\targs = args[:len(args)-1]\n\t\t\tcontinue\n\t\t}\n\t\targs = append(args, re.ReplaceAllStringFunc(a, func(s string) string {\n\t\t\tsubst := repl[s]\n\t\t\tif len(subst) == 0 {\n\t\t\t\tsubstitutionErrors += fmt.Sprintf(\"Unknown substitution parameter %s in BsubArgumentsList, \", s)\n\t\t\t}\n\t\t\treturn subst\n\t\t}))\n\t}\n\tif len(substitutionErrors) != 0 {\n\t\treturn nil, fmt.Errorf(\"%s\", substitutionErrors[:len(substitutionErrors)-2])\n\t}\n\n\tif u := disp.Cluster.Containers.LSF.BsubSudoUser; u != \"\" {\n\t\targs = append([]string{\"sudo\", \"-E\", \"-u\", u}, args...)\n\t}\n\treturn args, nil\n}\n\n// Check the next bjobs report, and invoke TrackContainer for all the\n// containers in the report. This gives us a chance to cancel existing\n// Arvados LSF jobs (started by a previous dispatch process) that\n// never released their LSF job allocations even though their\n// container states are Cancelled or Complete. See\n// https://dev.arvados.org/issues/10979\nfunc (disp *dispatcher) checkLsfQueueForOrphans() {\n\tcontainerUuidPattern := regexp.MustCompile(`^[a-z0-9]{5}-dz642-[a-z0-9]{15}$`)\n\tfor _, uuid := range disp.lsfqueue.All() {\n\t\tif !containerUuidPattern.MatchString(uuid) || !strings.HasPrefix(uuid, disp.Cluster.ClusterID) {\n\t\t\tcontinue\n\t\t}\n\t\terr := disp.arvDispatcher.TrackContainer(uuid)\n\t\tif err != nil {\n\t\t\tdisp.logger.Warnf(\"checkLsfQueueForOrphans: TrackContainer(%s): %s\", uuid, err)\n\t\t}\n\t}\n}\n\nfunc execScript(args []string, env map[string]string) []byte {\n\ts := \"#!/bin/sh\\n\"\n\tfor k, v := range env {\n\t\ts += k + `='`\n\t\ts += strings.Replace(v, `'`, `'\\''`, -1)\n\t\ts += `' `\n\t}\n\ts += `exec`\n\tfor _, w := range args {\n\t\ts += ` '`\n\t\ts += strings.Replace(w, `'`, `'\\''`, -1)\n\t\ts += `'`\n\t}\n\treturn []byte(s + \"\\n\")\n}\n"
  },
  {
    "path": "lib/lsf/dispatch_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage lsf\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"os/exec\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&suite{})\n\ntype suite struct {\n\tdisp          *dispatcher\n\tcrTooBig      arvados.ContainerRequest\n\tcrPending     arvados.ContainerRequest\n\tcrCUDARequest arvados.ContainerRequest\n\tcrMaxRunTime  arvados.ContainerRequest\n}\n\nfunc (s *suite) TearDownTest(c *check.C) {\n\ts.disp.Close()\n\tarvadostest.ResetDB(c)\n}\n\nfunc (s *suite) SetUpTest(c *check.C) {\n\tarvadostest.ResetDB(c)\n\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\tcluster.Containers.ReserveExtraRAM = 256 << 20\n\tcluster.Containers.CloudVMs.PollInterval = arvados.Duration(time.Second / 4)\n\tcluster.Containers.MinRetryPeriod = arvados.Duration(time.Second / 4)\n\tcluster.InstanceTypes = arvados.InstanceTypeMap{\n\t\t\"biggest_available_node\": arvados.InstanceType{\n\t\t\tRAM:             100 << 30, // 100 GiB\n\t\t\tVCPUs:           4,\n\t\t\tIncludedScratch: 100 << 30,\n\t\t\tScratch:         100 << 30,\n\t\t},\n\t\t\"biggest_available_node_with_gpu\": arvados.InstanceType{\n\t\t\tRAM:             100 << 30, // 100 GiB\n\t\t\tVCPUs:           4,\n\t\t\tIncludedScratch: 100 << 30,\n\t\t\tScratch:         100 << 30,\n\t\t\tGPU: arvados.GPUFeatures{\n\t\t\t\tStack:          \"cuda\",\n\t\t\t\tDriverVersion:  \"11.0\",\n\t\t\t\tHardwareTarget: \"8.0\",\n\t\t\t\tDeviceCount:    2,\n\t\t\t\tVRAM:           8000000000,\n\t\t\t},\n\t\t}}\n\ts.disp = newHandler(context.Background(), cluster, arvadostest.SystemRootToken, prometheus.NewRegistry()).(*dispatcher)\n\ts.disp.lsfcli.stubCommand = func(string, ...string) *exec.Cmd {\n\t\treturn exec.Command(\"bash\", \"-c\", \"echo >&2 unimplemented stub; false\")\n\t}\n\terr = arvados.NewClientFromEnv().RequestAndDecode(&s.crTooBig, \"POST\", \"arvados/v1/container_requests\", nil, map[string]interface{}{\n\t\t\"container_request\": map[string]interface{}{\n\t\t\t\"runtime_constraints\": arvados.RuntimeConstraints{\n\t\t\t\tRAM:   1000000000000,\n\t\t\t\tVCPUs: 1,\n\t\t\t},\n\t\t\t\"container_image\":     arvadostest.DockerImage112PDH,\n\t\t\t\"command\":             []string{\"sleep\", \"1\"},\n\t\t\t\"mounts\":              map[string]arvados.Mount{\"/mnt/out\": {Kind: \"tmp\", Capacity: 1000}},\n\t\t\t\"output_path\":         \"/mnt/out\",\n\t\t\t\"state\":               arvados.ContainerRequestStateCommitted,\n\t\t\t\"priority\":            1,\n\t\t\t\"container_count_max\": 1,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\n\terr = arvados.NewClientFromEnv().RequestAndDecode(&s.crPending, \"POST\", \"arvados/v1/container_requests\", nil, map[string]interface{}{\n\t\t\"container_request\": map[string]interface{}{\n\t\t\t\"runtime_constraints\": arvados.RuntimeConstraints{\n\t\t\t\tRAM:           100000000,\n\t\t\t\tVCPUs:         2,\n\t\t\t\tKeepCacheDisk: 8 << 30,\n\t\t\t},\n\t\t\t\"container_image\":     arvadostest.DockerImage112PDH,\n\t\t\t\"command\":             []string{\"sleep\", \"1\"},\n\t\t\t\"mounts\":              map[string]arvados.Mount{\"/mnt/out\": {Kind: \"tmp\", Capacity: 1000}},\n\t\t\t\"output_path\":         \"/mnt/out\",\n\t\t\t\"state\":               arvados.ContainerRequestStateCommitted,\n\t\t\t\"priority\":            1,\n\t\t\t\"container_count_max\": 1,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\n\terr = arvados.NewClientFromEnv().RequestAndDecode(&s.crCUDARequest, \"POST\", \"arvados/v1/container_requests\", nil, map[string]interface{}{\n\t\t\"container_request\": map[string]interface{}{\n\t\t\t\"runtime_constraints\": arvados.RuntimeConstraints{\n\t\t\t\tRAM:   16000000000,\n\t\t\t\tVCPUs: 4,\n\t\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\t\tStack:          \"cuda\",\n\t\t\t\t\tDeviceCount:    1,\n\t\t\t\t\tDriverVersion:  \"11.0\",\n\t\t\t\t\tHardwareTarget: []string{\"8.0\"},\n\t\t\t\t\tVRAM:           8000000000,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"container_image\":     arvadostest.DockerImage112PDH,\n\t\t\t\"command\":             []string{\"sleep\", \"1\"},\n\t\t\t\"mounts\":              map[string]arvados.Mount{\"/mnt/out\": {Kind: \"tmp\", Capacity: 1000}},\n\t\t\t\"output_path\":         \"/mnt/out\",\n\t\t\t\"state\":               arvados.ContainerRequestStateCommitted,\n\t\t\t\"priority\":            1,\n\t\t\t\"container_count_max\": 1,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\n\terr = arvados.NewClientFromEnv().RequestAndDecode(&s.crMaxRunTime, \"POST\", \"arvados/v1/container_requests\", nil, map[string]interface{}{\n\t\t\"container_request\": map[string]interface{}{\n\t\t\t\"runtime_constraints\": arvados.RuntimeConstraints{\n\t\t\t\tRAM:   1000000,\n\t\t\t\tVCPUs: 1,\n\t\t\t},\n\t\t\t\"scheduling_parameters\": arvados.SchedulingParameters{\n\t\t\t\tMaxRunTime: 124,\n\t\t\t},\n\t\t\t\"container_image\":     arvadostest.DockerImage112PDH,\n\t\t\t\"command\":             []string{\"sleep\", \"123\"},\n\t\t\t\"mounts\":              map[string]arvados.Mount{\"/mnt/out\": {Kind: \"tmp\", Capacity: 1000}},\n\t\t\t\"output_path\":         \"/mnt/out\",\n\t\t\t\"state\":               arvados.ContainerRequestStateCommitted,\n\t\t\t\"priority\":            1,\n\t\t\t\"container_count_max\": 1,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n}\n\ntype lsfstub struct {\n\tsudoUser  string\n\terrorRate float64\n}\n\nfunc (stub lsfstub) stubCommand(s *suite, c *check.C) func(prog string, args ...string) *exec.Cmd {\n\tmtx := sync.Mutex{}\n\tnextjobid := 100\n\tfakejobq := map[int]string{}\n\treturn func(prog string, args ...string) *exec.Cmd {\n\t\tc.Logf(\"stubCommand: %q %q\", prog, args)\n\t\tif rand.Float64() < stub.errorRate {\n\t\t\treturn exec.Command(\"bash\", \"-c\", \"echo >&2 'stub random failure' && false\")\n\t\t}\n\t\tif stub.sudoUser != \"\" && len(args) > 3 &&\n\t\t\tprog == \"sudo\" &&\n\t\t\targs[0] == \"-E\" &&\n\t\t\targs[1] == \"-u\" &&\n\t\t\targs[2] == stub.sudoUser {\n\t\t\tprog, args = args[3], args[4:]\n\t\t}\n\t\tswitch prog {\n\t\tcase \"bsub\":\n\t\t\tc.Assert(len(args) > 5, check.Equals, true)\n\t\t\t// %%J must have been rewritten to %J\n\t\t\tc.Check(args[1], check.Equals, \"/tmp/crunch-run.%J.out\")\n\t\t\targs = args[4:]\n\t\t\tswitch args[1] {\n\t\t\tcase arvadostest.LockedContainerUUID:\n\t\t\t\tc.Check(args, check.DeepEquals, []string{\n\t\t\t\t\t\"-J\", arvadostest.LockedContainerUUID,\n\t\t\t\t\t\"-n\", \"4\",\n\t\t\t\t\t\"-D\", \"11701MB\",\n\t\t\t\t\t\"-R\", \"rusage[mem=11701MB:tmp=1MB] span[hosts=1]\",\n\t\t\t\t\t\"-R\", \"select[mem>=11701MB]\",\n\t\t\t\t\t\"-R\", \"select[tmp>=1MB]\",\n\t\t\t\t\t\"-R\", \"select[ncpus>=4]\"})\n\t\t\t\tmtx.Lock()\n\t\t\t\tfakejobq[nextjobid] = args[1]\n\t\t\t\tnextjobid++\n\t\t\t\tmtx.Unlock()\n\t\t\tcase arvadostest.QueuedContainerUUID:\n\t\t\t\tc.Check(args, check.DeepEquals, []string{\n\t\t\t\t\t\"-J\", arvadostest.QueuedContainerUUID,\n\t\t\t\t\t\"-n\", \"4\",\n\t\t\t\t\t\"-D\", \"11701MB\",\n\t\t\t\t\t\"-R\", \"rusage[mem=11701MB:tmp=45777MB] span[hosts=1]\",\n\t\t\t\t\t\"-R\", \"select[mem>=11701MB]\",\n\t\t\t\t\t\"-R\", \"select[tmp>=45777MB]\",\n\t\t\t\t\t\"-R\", \"select[ncpus>=4]\"})\n\t\t\t\tmtx.Lock()\n\t\t\t\tfakejobq[nextjobid] = args[1]\n\t\t\t\tnextjobid++\n\t\t\t\tmtx.Unlock()\n\t\t\tcase s.crPending.ContainerUUID:\n\t\t\t\tc.Check(args, check.DeepEquals, []string{\n\t\t\t\t\t\"-J\", s.crPending.ContainerUUID,\n\t\t\t\t\t\"-n\", \"2\",\n\t\t\t\t\t\"-D\", \"352MB\",\n\t\t\t\t\t\"-R\", \"rusage[mem=352MB:tmp=8448MB] span[hosts=1]\",\n\t\t\t\t\t\"-R\", \"select[mem>=352MB]\",\n\t\t\t\t\t\"-R\", \"select[tmp>=8448MB]\",\n\t\t\t\t\t\"-R\", \"select[ncpus>=2]\"})\n\t\t\t\tmtx.Lock()\n\t\t\t\tfakejobq[nextjobid] = args[1]\n\t\t\t\tnextjobid++\n\t\t\t\tmtx.Unlock()\n\t\t\tcase s.crCUDARequest.ContainerUUID:\n\t\t\t\tc.Check(args, check.DeepEquals, []string{\n\t\t\t\t\t\"-J\", s.crCUDARequest.ContainerUUID,\n\t\t\t\t\t\"-n\", \"4\",\n\t\t\t\t\t\"-D\", \"15515MB\",\n\t\t\t\t\t\"-R\", \"rusage[mem=15515MB:tmp=15515MB] span[hosts=1]\",\n\t\t\t\t\t\"-R\", \"select[mem>=15515MB]\",\n\t\t\t\t\t\"-R\", \"select[tmp>=15515MB]\",\n\t\t\t\t\t\"-R\", \"select[ncpus>=4]\",\n\t\t\t\t\t\"-gpu\", \"num=1\"})\n\t\t\t\tmtx.Lock()\n\t\t\t\tfakejobq[nextjobid] = args[1]\n\t\t\t\tnextjobid++\n\t\t\t\tmtx.Unlock()\n\t\t\tcase s.crMaxRunTime.ContainerUUID:\n\t\t\t\tc.Check(args, check.DeepEquals, []string{\n\t\t\t\t\t\"-J\", s.crMaxRunTime.ContainerUUID,\n\t\t\t\t\t\"-n\", \"1\",\n\t\t\t\t\t\"-D\", \"257MB\",\n\t\t\t\t\t\"-R\", \"rusage[mem=257MB:tmp=2304MB] span[hosts=1]\",\n\t\t\t\t\t\"-R\", \"select[mem>=257MB]\",\n\t\t\t\t\t\"-R\", \"select[tmp>=2304MB]\",\n\t\t\t\t\t\"-R\", \"select[ncpus>=1]\",\n\t\t\t\t\t\"-We\", \"8\", // 124s + 5m overhead + roundup = 8m\n\t\t\t\t})\n\t\t\t\tmtx.Lock()\n\t\t\t\tfakejobq[nextjobid] = args[1]\n\t\t\t\tnextjobid++\n\t\t\t\tmtx.Unlock()\n\t\t\tdefault:\n\t\t\t\tc.Errorf(\"unexpected uuid passed to bsub: args %q\", args)\n\t\t\t\treturn exec.Command(\"false\")\n\t\t\t}\n\t\t\treturn exec.Command(\"echo\", \"submitted job\")\n\t\tcase \"bjobs\":\n\t\t\tc.Check(args, check.DeepEquals, []string{\"-u\", \"all\", \"-o\", \"jobid stat job_name pend_reason\", \"-json\"})\n\t\t\tvar records []map[string]interface{}\n\t\t\tfor jobid, uuid := range fakejobq {\n\t\t\t\tstat, reason := \"RUN\", \"\"\n\t\t\t\tif uuid == s.crPending.ContainerUUID {\n\t\t\t\t\t// The real bjobs output includes a trailing ';' here:\n\t\t\t\t\tstat, reason = \"PEND\", \"There are no suitable hosts for the job;\"\n\t\t\t\t}\n\t\t\t\trecords = append(records, map[string]interface{}{\n\t\t\t\t\t\"JOBID\":       fmt.Sprintf(\"%d\", jobid),\n\t\t\t\t\t\"STAT\":        stat,\n\t\t\t\t\t\"JOB_NAME\":    uuid,\n\t\t\t\t\t\"PEND_REASON\": reason,\n\t\t\t\t})\n\t\t\t}\n\t\t\tout, err := json.Marshal(map[string]interface{}{\n\t\t\t\t\"COMMAND\": \"bjobs\",\n\t\t\t\t\"JOBS\":    len(fakejobq),\n\t\t\t\t\"RECORDS\": records,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tc.Logf(\"bjobs out: %s\", out)\n\t\t\treturn exec.Command(\"printf\", string(out))\n\t\tcase \"bkill\":\n\t\t\tkillid, _ := strconv.Atoi(args[0])\n\t\t\tif uuid, ok := fakejobq[killid]; !ok {\n\t\t\t\treturn exec.Command(\"bash\", \"-c\", fmt.Sprintf(\"printf >&2 'Job <%d>: No matching job found\\n'\", killid))\n\t\t\t} else if uuid == \"\" {\n\t\t\t\treturn exec.Command(\"bash\", \"-c\", fmt.Sprintf(\"printf >&2 'Job <%d>: Job has already finished\\n'\", killid))\n\t\t\t} else {\n\t\t\t\tgo func() {\n\t\t\t\t\ttime.Sleep(time.Millisecond)\n\t\t\t\t\tmtx.Lock()\n\t\t\t\t\tdelete(fakejobq, killid)\n\t\t\t\t\tmtx.Unlock()\n\t\t\t\t}()\n\t\t\t\treturn exec.Command(\"bash\", \"-c\", fmt.Sprintf(\"printf 'Job <%d> is being terminated\\n'\", killid))\n\t\t\t}\n\t\tdefault:\n\t\t\treturn exec.Command(\"bash\", \"-c\", fmt.Sprintf(\"echo >&2 'stub: command not found: %+q'\", prog))\n\t\t}\n\t}\n}\n\nfunc (s *suite) TestSubmit(c *check.C) {\n\ts.disp.lsfcli.stubCommand = lsfstub{\n\t\terrorRate: 0.1,\n\t\tsudoUser:  s.disp.Cluster.Containers.LSF.BsubSudoUser,\n\t}.stubCommand(s, c)\n\ts.disp.Start()\n\n\tdeadline := time.Now().Add(20 * time.Second)\n\tfor range time.NewTicker(time.Second).C {\n\t\tif time.Now().After(deadline) {\n\t\t\tc.Error(\"timed out\")\n\t\t\tbreak\n\t\t}\n\t\t// \"crTooBig\" should never be submitted to lsf because\n\t\t// it is bigger than any configured instance type\n\t\tif ent, ok := s.disp.lsfqueue.Lookup(s.crTooBig.ContainerUUID); ok {\n\t\t\tc.Errorf(\"Lookup(crTooBig) == true, ent = %#v\", ent)\n\t\t\tbreak\n\t\t}\n\t\t// \"queuedcontainer\" should be running\n\t\tif _, ok := s.disp.lsfqueue.Lookup(arvadostest.QueuedContainerUUID); !ok {\n\t\t\tc.Log(\"Lookup(queuedcontainer) == false\")\n\t\t\tcontinue\n\t\t}\n\t\t// \"crPending\" should be pending\n\t\tif ent, ok := s.disp.lsfqueue.Lookup(s.crPending.ContainerUUID); !ok {\n\t\t\tc.Logf(\"Lookup(crPending) == false\", ent)\n\t\t\tcontinue\n\t\t}\n\t\t// \"lockedcontainer\" should be cancelled because it\n\t\t// has priority 0 (no matching container requests)\n\t\tif ent, ok := s.disp.lsfqueue.Lookup(arvadostest.LockedContainerUUID); ok {\n\t\t\tc.Logf(\"Lookup(lockedcontainer) == true, ent = %#v\", ent)\n\t\t\tcontinue\n\t\t}\n\t\tvar ctr arvados.Container\n\t\tif err := s.disp.arvDispatcher.Arv.Get(\"containers\", arvadostest.LockedContainerUUID, nil, &ctr); err != nil {\n\t\t\tc.Logf(\"error getting container state for %s: %s\", arvadostest.LockedContainerUUID, err)\n\t\t\tcontinue\n\t\t} else if ctr.State != arvados.ContainerStateQueued {\n\t\t\tc.Logf(\"LockedContainer is not in the LSF queue but its arvados record has not been updated to state==Queued (state is %q)\", ctr.State)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := s.disp.arvDispatcher.Arv.Get(\"containers\", s.crTooBig.ContainerUUID, nil, &ctr); err != nil {\n\t\t\tc.Logf(\"error getting container state for %s: %s\", s.crTooBig.ContainerUUID, err)\n\t\t\tcontinue\n\t\t} else if ctr.State != arvados.ContainerStateCancelled {\n\t\t\tc.Logf(\"container %s is not in the LSF queue but its arvados record has not been updated to state==Cancelled (state is %q)\", s.crTooBig.ContainerUUID, ctr.State)\n\t\t\tcontinue\n\t\t} else {\n\t\t\tc.Check(ctr.RuntimeStatus[\"error\"], check.Equals, \"constraints not satisfiable by any configured instance type\")\n\t\t}\n\t\tc.Log(\"reached desired state\")\n\t\tbreak\n\t}\n}\n\nfunc (s *suite) TestBsubArgs(c *check.C) {\n\ts.disp.lsfcli.stubCommand = lsfstub{}.stubCommand(s, c)\n\ts.disp.Start()\n\n\ts.disp.Cluster.Containers.LSF.BsubArgumentsList = []string{\"--zebra\", \"%Z\"}\n\t_, err := s.disp.bsubArgs(arvados.Container{})\n\tc.Check(err, check.ErrorMatches, `Unknown substitution parameter %Z in BsubArgumentsList`)\n\n\ts.disp.Cluster.Containers.LSF.BsubArgumentsList = []string{\"--example\", \"%U\"}\n\targs, err := s.disp.bsubArgs(arvados.Container{UUID: \"zzzzz-dz642-asdfasdfasdfasd\"})\n\tc.Check(err, check.IsNil)\n\tc.Check(args, check.DeepEquals, []string{\"sudo\", \"-E\", \"-u\", \"crunch\", \"bsub\", \"--example\", \"zzzzz-dz642-asdfasdfasdfasd\"})\n}\n"
  },
  {
    "path": "lib/lsf/lsfcli.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage lsf\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype bjobsEntry struct {\n\tID         string `json:\"JOBID\"`\n\tName       string `json:\"JOB_NAME\"`\n\tStat       string `json:\"STAT\"`\n\tPendReason string `json:\"PEND_REASON\"`\n}\n\ntype lsfcli struct {\n\tlogger logrus.FieldLogger\n\t// (for testing) if non-nil, call stubCommand() instead of\n\t// exec.Command() when running lsf command line programs.\n\tstubCommand func(string, ...string) *exec.Cmd\n}\n\nfunc (cli lsfcli) command(prog string, args ...string) *exec.Cmd {\n\tif f := cli.stubCommand; f != nil {\n\t\treturn f(prog, args...)\n\t} else {\n\t\treturn exec.Command(prog, args...)\n\t}\n}\n\nfunc (cli lsfcli) Bsub(script []byte, args []string, arv *arvados.Client) error {\n\tcli.logger.Infof(\"bsub command %q script %q\", args, script)\n\tcmd := cli.command(args[0], args[1:]...)\n\tcmd.Env = append([]string(nil), os.Environ()...)\n\tcmd.Env = append(cmd.Env, \"ARVADOS_API_HOST=\"+arv.APIHost)\n\tcmd.Env = append(cmd.Env, \"ARVADOS_API_TOKEN=\"+arv.AuthToken)\n\tif arv.Insecure {\n\t\tcmd.Env = append(cmd.Env, \"ARVADOS_API_HOST_INSECURE=1\")\n\t}\n\tcmd.Stdin = bytes.NewReader(script)\n\tout, err := cmd.Output()\n\tcli.logger.WithField(\"stdout\", string(out)).Infof(\"bsub finished\")\n\treturn errWithStderr(err)\n}\n\nfunc (cli lsfcli) Bjobs() ([]bjobsEntry, error) {\n\tcli.logger.Debugf(\"Bjobs()\")\n\tcmd := cli.command(\"bjobs\", \"-u\", \"all\", \"-o\", \"jobid stat job_name pend_reason\", \"-json\")\n\tbuf, err := cmd.Output()\n\tif err != nil {\n\t\treturn nil, errWithStderr(err)\n\t}\n\tvar resp struct {\n\t\tRecords []bjobsEntry `json:\"RECORDS\"`\n\t}\n\terr = json.Unmarshal(buf, &resp)\n\treturn resp.Records, err\n}\n\nfunc (cli lsfcli) Bkill(id string) error {\n\tcli.logger.Infof(\"Bkill(%s)\", id)\n\tcmd := cli.command(\"bkill\", id)\n\tbuf, err := cmd.CombinedOutput()\n\tif err == nil || strings.Index(string(buf), \"already finished\") >= 0 {\n\t\treturn nil\n\t} else {\n\t\treturn fmt.Errorf(\"%s (%q)\", err, buf)\n\t}\n}\n\nfunc errWithStderr(err error) error {\n\tif err, ok := err.(*exec.ExitError); ok {\n\t\treturn fmt.Errorf(\"%s (%q)\", err, err.Stderr)\n\t}\n\treturn err\n}\n"
  },
  {
    "path": "lib/lsf/lsfqueue.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage lsf\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype lsfqueue struct {\n\tlogger logrus.FieldLogger\n\tperiod time.Duration\n\tlsfcli *lsfcli\n\n\tinitOnce  sync.Once\n\tmutex     sync.Mutex\n\tnextReady chan (<-chan struct{})\n\tupdated   *sync.Cond\n\tlatest    map[string]bjobsEntry\n}\n\n// Lookup waits for the next queue update (so even a job that was only\n// submitted a nanosecond ago will show up) and then returns the LSF\n// queue information corresponding to the given container UUID.\nfunc (q *lsfqueue) Lookup(uuid string) (bjobsEntry, bool) {\n\tent, ok := q.getNext()[uuid]\n\treturn ent, ok\n}\n\n// All waits for the next queue update, then returns the names of all\n// jobs in the queue. Used by checkLsfQueueForOrphans().\nfunc (q *lsfqueue) All() []string {\n\tlatest := q.getNext()\n\tnames := make([]string, 0, len(latest))\n\tfor name := range latest {\n\t\tnames = append(names, name)\n\t}\n\treturn names\n}\n\nfunc (q *lsfqueue) SetPriority(uuid string, priority int64) {\n\tq.initOnce.Do(q.init)\n\tq.logger.Debug(\"SetPriority is not implemented\")\n}\n\nfunc (q *lsfqueue) getNext() map[string]bjobsEntry {\n\tq.initOnce.Do(q.init)\n\t<-(<-q.nextReady)\n\tq.mutex.Lock()\n\tdefer q.mutex.Unlock()\n\treturn q.latest\n}\n\nfunc (q *lsfqueue) init() {\n\tq.updated = sync.NewCond(&q.mutex)\n\tq.nextReady = make(chan (<-chan struct{}))\n\tticker := time.NewTicker(q.period)\n\tgo func() {\n\t\tfor range ticker.C {\n\t\t\t// Send a new \"next update ready\" channel to\n\t\t\t// the next goroutine that wants one (and any\n\t\t\t// others that have already queued up since\n\t\t\t// the first one started waiting).\n\t\t\t//\n\t\t\t// Below, when we get a new update, we'll\n\t\t\t// signal that to the other goroutines by\n\t\t\t// closing the ready chan.\n\t\t\tready := make(chan struct{})\n\t\t\tq.nextReady <- ready\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase q.nextReady <- ready:\n\t\t\t\t\tcontinue\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Run bjobs repeatedly if needed, until we\n\t\t\t// get valid output.\n\t\t\tvar ents []bjobsEntry\n\t\t\tfor {\n\t\t\t\tq.logger.Debug(\"running bjobs\")\n\t\t\t\tvar err error\n\t\t\t\tents, err = q.lsfcli.Bjobs()\n\t\t\t\tif err == nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tq.logger.Warnf(\"bjobs: %s\", err)\n\t\t\t\t<-ticker.C\n\t\t\t}\n\t\t\tnext := make(map[string]bjobsEntry, len(ents))\n\t\t\tfor _, ent := range ents {\n\t\t\t\tnext[ent.Name] = ent\n\t\t\t}\n\t\t\t// Replace q.latest and notify all the\n\t\t\t// goroutines that the \"next update\" they\n\t\t\t// asked for is now ready.\n\t\t\tq.mutex.Lock()\n\t\t\tq.latest = next\n\t\t\tq.mutex.Unlock()\n\t\t\tclose(ready)\n\t\t}\n\t}()\n}\n"
  },
  {
    "path": "lib/mount/command.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage mount\n\nimport (\n\t\"flag\"\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"strings\"\n\n\t// pprof is only imported to register its HTTP handlers\n\t_ \"net/http/pprof\"\n\t\"os\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"github.com/arvados/cgofuse/fuse\"\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nvar Command = &mountCommand{}\n\ntype arrayFlags []string\n\nfunc (a *arrayFlags) String() string {\n\tif a == nil {\n\t\treturn \"\"\n\t}\n\treturn strings.Join(*a, \", \")\n}\n\nfunc (a *arrayFlags) Set(value string) error {\n\t*a = append(*a, value)\n\treturn nil\n}\n\ntype mountCommand struct {\n\t// ready, if non-nil, will be closed when the mount is\n\t// initialized.  If ready is non-nil, it RunCommand() should\n\t// not be called more than once, or when ready is already\n\t// closed.  Only intended for testing.\n\tready chan struct{}\n\t// It is safe to call Unmount only after ready has been\n\t// closed.\n\tUnmount func() (ok bool)\n}\n\n// RunCommand implements the subcommand \"mount <path> [fuse options]\".\n//\n// The \"-d\" fuse option (and perhaps other features) ignores the\n// stderr argument and prints to os.Stderr instead.\nfunc (c *mountCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tlogger := ctxlog.New(stderr, \"text\", \"info\")\n\tdefer logger.Debug(\"exiting\")\n\n\tflags := flag.NewFlagSet(prog, flag.ContinueOnError)\n\tro := flags.Bool(\"ro\", false, \"read-only\")\n\texperimental := flags.Bool(\"experimental\", false, \"acknowledge this is an experimental command, and should not be used in production (required)\")\n\tcacheSizeStr := flags.String(\"cache-size\", \"0\", \"cache size as `PERCENT` of home filesystem size (\\\"5%\\\") or size (\\\"10GiB\\\") or 0 for automatic\")\n\tlogLevel := flags.String(\"log-level\", \"info\", \"logging `LEVEL` (debug, info, ...)\")\n\tdebug := flags.Bool(\"debug\", false, \"alias for -log-level=debug\")\n\tpprof := flags.String(\"pprof\", \"\", \"serve Go profile data at `[addr]:port`\")\n\tcrunchstatInterval := flags.Float64(\"crunchstat-interval\", 0.0, \"interval in `SECONDS` between updates of crunch job stats in mounted filesystem\")\n\tvar mountByIdIds arrayFlags\n\tflags.Var(&mountByIdIds, \"mount-by-id\", \"Make a magic directory under the mount at `PATH` available where collections and \"+\n\t\t\"projects are accessible through subdirectories named after their UUID or \"+\n\t\t\"portable data hash.\")\n\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"[FUSE mount options]\", stderr); !ok {\n\t\treturn code\n\t}\n\tif !*experimental {\n\t\tlogger.Errorf(\"experimental command %q used without --experimental flag\", prog)\n\t\treturn 2\n\t}\n\tlvl, err := logrus.ParseLevel(*logLevel)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"invalid argument for -log-level flag\")\n\t\treturn 2\n\t}\n\tif *debug {\n\t\tlvl = logrus.DebugLevel\n\t}\n\tlogger.SetLevel(lvl)\n\tif *pprof != \"\" {\n\t\tgo func() {\n\t\t\tlog.Println(http.ListenAndServe(*pprof, nil))\n\t\t}()\n\t}\n\tif *crunchstatInterval < 0.0 {\n\t\tlogger.Error(\"-crunchstat-interval must be non-negative\")\n\t\treturn 2\n\t}\n\tclient := arvados.NewClientFromEnv()\n\tif err := yaml.Unmarshal([]byte(*cacheSizeStr), &client.DiskCacheSize); err != nil {\n\t\tlogger.Errorf(\"error parsing -cache-size argument: %s\", err)\n\t\treturn 2\n\t}\n\tac, err := arvadosclient.New(client)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn 1\n\t}\n\tkc, err := keepclient.MakeKeepClient(ac)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn 1\n\t}\n\tvar fsRoot arvados.CustomFileSystem\n\tif len(mountByIdIds) > 0 {\n\t\tfsRoot = client.CustomFileSystem(kc)\n\t\tfor _, dirName := range mountByIdIds {\n\t\t\tfsRoot.MountByID(dirName)\n\t\t}\n\t} else {\n\t\tfsRoot = client.SiteFileSystem(kc)\n\t\tfsRoot.MountProject(\"home\", \"\")\n\t}\n\n\tregistry := prometheus.NewRegistry()\n\tkc.RegisterMetrics(registry)\n\thost := fuse.NewFileSystemHost(&keepFS{\n\t\tReadOnly:      *ro,\n\t\tUid:           os.Getuid(),\n\t\tGid:           os.Getgid(),\n\t\tLogger:        logger,\n\t\tRoot:          fsRoot,\n\t\tStatsWriter:   stderr,\n\t\tStatsInterval: time.Duration(*crunchstatInterval * float64(time.Second)),\n\t\tRegistry:      registry,\n\t\tready:         c.ready,\n\t})\n\tc.Unmount = host.Unmount\n\n\tlogger.WithField(\"mountargs\", flags.Args()).Debug(\"mounting\")\n\tok := host.Mount(\"\", flags.Args())\n\tif !ok {\n\t\treturn 1\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "lib/mount/command_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage mount\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&CmdSuite{})\n\ntype CmdSuite struct {\n\tmnt    string\n\tstderr *bytes.Buffer\n}\n\nfunc (s *CmdSuite) SetUpTest(c *check.C) {\n\ttmpdir, err := ioutil.TempDir(\"\", \"\")\n\tc.Assert(err, check.IsNil)\n\ts.mnt = tmpdir\n\ts.stderr = bytes.NewBuffer(nil)\n}\n\nfunc (s *CmdSuite) TearDownTest(c *check.C) {\n\tc.Assert(os.RemoveAll(s.mnt), check.IsNil)\n}\n\nfunc (s *CmdSuite) TestMount(c *check.C) {\n\ts.mountAndCheck(c, []string{}, func() {\n\t\tmntF, err := os.Open(s.mnt)\n\t\tc.Assert(err, check.IsNil)\n\t\tdirsGot, err := mntF.Readdirnames(-1)\n\t\tc.Assert(err, check.IsNil)\n\t\tdirsWant := []string{\"by_id\", \"home\", \"users\"}\n\t\tsort.Strings(dirsGot)\n\t\tsort.Strings(dirsWant)\n\t\tc.Check(dirsGot, check.DeepEquals, dirsWant)\n\t\tmntF.Close()\n\n\t\tf, err := os.Open(s.mnt + \"/by_id/\" + arvadostest.FooCollection)\n\t\tc.Assert(err, check.IsNil)\n\t\tdirnames, err := f.Readdirnames(-1)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Assert(dirnames, check.DeepEquals, []string{\"foo\"})\n\t\tf.Close()\n\n\t\tbuf, err := ioutil.ReadFile(s.mnt + \"/by_id/\" + arvadostest.FooCollection + \"/.arvados#collection\")\n\t\tc.Assert(err, check.IsNil)\n\t\tvar m map[string]interface{}\n\t\terr = json.Unmarshal(buf, &m)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Assert(m[\"manifest_text\"], check.Matches, `\\. acbd.* 0:3:foo\\n`)\n\n\t\t_, err = os.Open(s.mnt + \"/by_id/zzzzz-4zz18-does-not-exist\")\n\t\tc.Assert(os.IsNotExist(err), check.Equals, true)\n\t})\n\tc.Assert(s.stderr.String(), check.Equals, \"\")\n}\n\nfunc (s *CmdSuite) TestMountById(c *check.C) {\n\ts.mountAndCheck(c, []string{\"--mount-by-id\", \"by_id_test_1\", \"--mount-by-id\", \"by_id_test_2\"}, func() {\n\t\tmntF, err := os.Open(s.mnt)\n\t\tc.Assert(err, check.IsNil)\n\t\tdirsGot, err := mntF.Readdirnames(-1)\n\t\tc.Assert(err, check.IsNil)\n\t\tdirsWant := []string{\"by_id_test_1\", \"by_id_test_2\"}\n\t\tsort.Strings(dirsGot)\n\t\tsort.Strings(dirsWant)\n\t\tc.Check(dirsGot, check.DeepEquals, dirsWant)\n\t\tmntF.Close()\n\n\t\tf_1, err := os.Open(s.mnt + \"/by_id_test_1/\" + arvadostest.FooCollection)\n\t\tc.Assert(err, check.IsNil)\n\t\tdirnames, err := f_1.Readdirnames(-1)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Assert(dirnames, check.DeepEquals, []string{\"foo\"})\n\n\t\tf_2, err := os.Open(s.mnt + \"/by_id_test_2/\" + arvadostest.FooCollection)\n\t\tc.Assert(err, check.IsNil)\n\t\tdirnames, err = f_2.Readdirnames(-1)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Assert(dirnames, check.DeepEquals, []string{\"foo\"})\n\n\t\tf_1.Close()\n\t\tf_2.Close()\n\t})\n}\n\nfunc (s *CmdSuite) TestCrunchstatLogger(c *check.C) {\n\ts.mountAndCheck(c, []string{\"--crunchstat-interval\", \"0.01\"}, func() {\n\t\tdata := make([]byte, 2048)\n\t\tfor i := range data {\n\t\t\tdata[i] = byte(i % 256)\n\t\t}\n\t\tcollectionPath := s.mnt + \"/by_id/\" + arvadostest.FooCollection + \"/testfile\"\n\n\t\tos.WriteFile(collectionPath, data, 0644)\n\t\tos.ReadFile(collectionPath)\n\t\ttime.Sleep(20 * time.Millisecond)\n\n\t\t// Check that any logging has occurred\n\t\tlogs := s.stderr.String()\n\t\tc.Assert(strings.Contains(logs, \"blkio:0:0 2048 write 2048 read\"), check.Equals, true)\n\t\tc.Assert(strings.Contains(logs, \"crunchstat: fuseop:open 1 count\"), check.Equals, true)\n\t})\n\t// Check that logging has stopped after unmount\n\tlen1 := s.stderr.Len()\n\ttime.Sleep(100 * time.Millisecond)\n\tlen2 := s.stderr.Len()\n\tc.Assert(len1, check.Equals, len2)\n}\n\nfunc (s *CmdSuite) mountAndCheck(c *check.C, testArgs []string, testFunc func()) {\n\texited := make(chan int)\n\tstdin := bytes.NewBufferString(\"stdin\")\n\tstdout := bytes.NewBuffer(nil)\n\tmountCmd := mountCommand{ready: make(chan struct{})}\n\tready := false\n\targs := append(append([]string{}, testArgs...), \"-experimental\", s.mnt)\n\tgo func() {\n\t\texited <- mountCmd.RunCommand(\"test mount\", args, stdin, stdout, s.stderr)\n\t}()\n\tgo func() {\n\t\t<-mountCmd.ready\n\t\tdefer func() {\n\t\t\tok := mountCmd.Unmount()\n\t\t\tc.Assert(ok, check.Equals, true)\n\t\t}()\n\t\ttestFunc()\n\t\tready = true\n\t}()\n\tselect {\n\tcase <-time.After(5 * time.Second):\n\t\tc.Fatal(\"timed out\")\n\tcase errCode, ok := <-exited:\n\t\tc.Assert(ok, check.Equals, true)\n\t\tc.Assert(errCode, check.Equals, 0)\n\t}\n\tc.Assert(ready, check.Equals, true)\n\tc.Assert(stdout.String(), check.Equals, \"\")\n\t// stdin should not have been read\n\tc.Assert(stdin.String(), check.Equals, \"stdin\")\n}\n"
  },
  {
    "path": "lib/mount/fs.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage mount\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"runtime/debug\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/arvados/cgofuse/fuse\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// sharedFile wraps arvados.File with a sync.Mutex, so fuse can safely\n// use a single filehandle concurrently on behalf of multiple\n// threads/processes.\ntype sharedFile struct {\n\tarvados.File\n\tsync.Mutex\n}\n\n// keepFS implements cgofuse's FileSystemInterface.\ntype keepFS struct {\n\tfuse.FileSystemBase\n\tReadOnly      bool\n\tUid           int\n\tGid           int\n\tLogger        logrus.FieldLogger\n\tRoot          arvados.CustomFileSystem\n\tStatsWriter   io.Writer\n\tStatsInterval time.Duration\n\tRegistry      *prometheus.Registry\n\n\topen     map[uint64]*sharedFile\n\tlastFH   uint64\n\tdone     chan struct{}\n\tmBytes   *prometheus.CounterVec\n\tmOps     *prometheus.CounterVec\n\tmSeconds *prometheus.CounterVec\n\tsync.RWMutex\n\n\t// If non-nil, this channel will be closed by Init() to notify\n\t// other goroutines that the mount is ready.\n\tready chan struct{}\n}\n\nvar (\n\tinvalidFH = ^uint64(0)\n)\n\n// newFH wraps f in a sharedFile, adds it to fs's lookup table using a\n// new handle number, and returns the handle number.\nfunc (fs *keepFS) newFH(f arvados.File) uint64 {\n\tfs.Lock()\n\tdefer fs.Unlock()\n\tif fs.open == nil {\n\t\tfs.open = make(map[uint64]*sharedFile)\n\t}\n\tfs.lastFH++\n\tfh := fs.lastFH\n\tfs.open[fh] = &sharedFile{File: f}\n\treturn fh\n}\n\nfunc (fs *keepFS) lookupFH(fh uint64) *sharedFile {\n\tfs.RLock()\n\tdefer fs.RUnlock()\n\treturn fs.open[fh]\n}\n\nfunc (fs *keepFS) Init() {\n\tdefer fs.debugPanics()\n\tfs.done = make(chan struct{})\n\tif fs.StatsInterval > 0 {\n\t\tfs.registerMetrics()\n\t\tvar previousMetrics map[string]float64\n\t\tticker := time.NewTicker(fs.StatsInterval)\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-fs.done:\n\t\t\t\t\treturn\n\t\t\t\tcase <-ticker.C:\n\t\t\t\t\tcurrentMetrics := gatherMetrics(fs.Registry)\n\t\t\t\t\twriter := fs.StatsWriter\n\t\t\t\t\tif writer == nil {\n\t\t\t\t\t\twriter = os.Stderr\n\t\t\t\t\t}\n\t\t\t\t\twriteMetrics(writer, currentMetrics, previousMetrics, fs.StatsInterval.Seconds())\n\t\t\t\t\tpreviousMetrics = currentMetrics\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\tif fs.ready != nil {\n\t\tclose(fs.ready)\n\t}\n}\n\nfunc (fs *keepFS) Destroy() {\n\tclose(fs.done)\n}\n\nfunc (fs *keepFS) registerMetrics() {\n\tfs.mBytes = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"fuse\",\n\t\tName:      \"bytes\",\n\t\tHelp:      \"Bytes read/written by the FUSE filesystem\",\n\t}, []string{\"fuseop\"})\n\tfs.mOps = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"fuse\",\n\t\tName:      \"ops\",\n\t\tHelp:      \"FUSE filesystem operations\",\n\t}, []string{\"fuseop\"})\n\tfs.mSeconds = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"fuse\",\n\t\tName:      \"seconds_total\",\n\t\tHelp:      \"Time spent in FUSE filesystem operations\",\n\t}, []string{\"fuseop\"})\n\n\tfs.Registry.MustRegister(fs.mBytes)\n\tfs.Registry.MustRegister(fs.mOps)\n\tfs.Registry.MustRegister(fs.mSeconds)\n}\n\nfunc gatherMetrics(reg *prometheus.Registry) map[string]float64 {\n\tmetricsMap := map[string]float64{}\n\tmetricFamilies, _ := reg.Gather()\n\tfor _, mf := range metricFamilies {\n\t\tfor _, metric := range mf.GetMetric() {\n\t\t\tmetricName := mf.GetName()\n\t\t\tif len(metric.GetLabel()) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlabels := \"\"\n\t\t\tfor i, label := range metric.GetLabel() {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tlabels += \",\"\n\t\t\t\t}\n\t\t\t\tlabels += label.GetName() + \"=\\\"\" + label.GetValue() + \"\\\"\"\n\t\t\t}\n\t\t\tmetricName = metricName + \"{\" + labels + \"}\"\n\n\t\t\tvar value float64\n\t\t\tif metric.Counter != nil {\n\t\t\t\tvalue = metric.GetCounter().GetValue()\n\t\t\t}\n\n\t\t\tmetricsMap[metricName] = value\n\t\t}\n\t}\n\treturn metricsMap\n}\n\nfunc (fs *keepFS) reportMetrics(op string, t0 time.Time, bytes *int) {\n\tif fs.mOps != nil {\n\t\tfs.mOps.WithLabelValues(op).Inc()\n\t}\n\tif fs.mSeconds != nil {\n\t\tfs.mSeconds.WithLabelValues(op).Add(time.Since(t0).Seconds())\n\t}\n\tif bytes != nil && fs.mBytes != nil {\n\t\tfs.mBytes.WithLabelValues(op).Add(float64(*bytes))\n\t}\n}\n\nfunc writeMetrics(out io.Writer, currentMetrics, previousMetrics map[string]float64, intervalSeconds float64) {\n\tgetCurrentAndDelta := func(name string) (float64, float64) {\n\t\tcurrent := currentMetrics[name]\n\t\tprevious := previousMetrics[name]\n\t\treturn current, current - previous\n\t}\n\n\t// Keep client network stats\n\tbytesIn, bytesInDelta := getCurrentAndDelta(`arvados_keepclient_backend_bytes{direction=\"in\"}`)\n\tbytesOut, bytesOutDelta := getCurrentAndDelta(`arvados_keepclient_backend_bytes{direction=\"out\"}`)\n\tfmt.Fprintf(out, \"crunchstat: net:keep0 %.0f tx %.0f rx -- interval %.4f seconds %.0f tx %.0f rx\\n\",\n\t\tbytesOut, bytesIn, intervalSeconds, bytesOutDelta, bytesInDelta)\n\n\t// Keep client call stats\n\tgetCalls, getCallsDelta := getCurrentAndDelta(`arvados_keepclient_ops{op=\"get\"}`)\n\tputCalls, putCallsDelta := getCurrentAndDelta(`arvados_keepclient_ops{op=\"put\"}`)\n\tfmt.Fprintf(out, \"crunchstat: keepcalls %.0f put %.0f get -- interval %.4f seconds %.0f put %.0f get\\n\",\n\t\tputCalls, getCalls, intervalSeconds, putCallsDelta, getCallsDelta)\n\n\t// Keep cache stats (if available)\n\tcacheHit, cacheHitDelta := getCurrentAndDelta(`arvados_keepclient_cache{event=\"hit\"}`)\n\tcacheMiss, cacheMissDelta := getCurrentAndDelta(`arvados_keepclient_cache{event=\"miss\"}`)\n\tfmt.Fprintf(out, \"crunchstat: keepcache %.0f hit %.0f miss -- interval %.4f seconds %.0f hit %.0f miss\\n\",\n\t\tcacheHit, cacheMiss, intervalSeconds, cacheHitDelta, cacheMissDelta)\n\n\t// Block I/O stats (FUSE layer bytes, not Keep backend bytes)\n\tfuseReadBytes, fuseReadBytesDelta := getCurrentAndDelta(`arvados_fuse_bytes{fuseop=\"read\"}`)\n\tfuseWriteBytes, fuseWriteBytesDelta := getCurrentAndDelta(`arvados_fuse_bytes{fuseop=\"write\"}`)\n\tfmt.Fprintf(out, \"crunchstat: blkio:0:0 %.0f write %.0f read -- interval %.4f seconds %.0f write %.0f read\\n\",\n\t\tfuseWriteBytes, fuseReadBytes, intervalSeconds, fuseWriteBytesDelta, fuseReadBytesDelta)\n\n\t// FUSE operation summary\n\treadOps, readOpsDelta := getCurrentAndDelta(`arvados_fuse_ops{fuseop=\"read\"}`)\n\twriteOps, writeOpsDelta := getCurrentAndDelta(`arvados_fuse_ops{fuseop=\"write\"}`)\n\tfmt.Fprintf(out, \"crunchstat: fuseops %.0f write %.0f read -- interval %.4f seconds %.0f write %.0f read\\n\",\n\t\twriteOps, readOps, intervalSeconds, writeOpsDelta, readOpsDelta)\n\n\t// Individual FUSE operation details\n\toperations := []string{\"getattr\", \"opendir\", \"readdir\", \"open\", \"create\",\n\t\t\"mknod\", \"mkdir\", \"unlink\", \"rmdir\", \"rename\", \"truncate\", \"utimens\",\n\t\t\"read\", \"write\", \"fsync\", \"fsyncdir\", \"release\", \"releasedir\"}\n\tfor _, op := range operations {\n\t\topCount, opCountDelta := getCurrentAndDelta(fmt.Sprintf(`arvados_fuse_ops{fuseop=\"%s\"}`, op))\n\t\topTime, opTimeDelta := getCurrentAndDelta(fmt.Sprintf(`arvados_fuse_seconds_total{fuseop=\"%s\"}`, op))\n\t\tfmt.Fprintf(out, \"crunchstat: fuseop:%s %.0f count %.6f time -- interval %.4f seconds %.0f count %.6f time\\n\",\n\t\t\top, opCount, opTime, intervalSeconds, opCountDelta, opTimeDelta)\n\t}\n}\n\nfunc (fs *keepFS) Create(path string, flags int, mode uint32) (errc int, fh uint64) {\n\tdefer fs.reportMetrics(\"create\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Create\", path)\n\tif fs.ReadOnly {\n\t\treturn -fuse.EROFS, invalidFH\n\t}\n\tf, err := fs.Root.OpenFile(path, flags|os.O_CREATE, os.FileMode(mode))\n\tif err == os.ErrExist {\n\t\treturn -fuse.EEXIST, invalidFH\n\t} else if err != nil {\n\t\treturn -fuse.EINVAL, invalidFH\n\t}\n\treturn 0, fs.newFH(f)\n}\n\nfunc (fs *keepFS) Mknod(path string, mode uint32, dev uint64) int {\n\tdefer fs.reportMetrics(\"mknod\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Mknod\", path)\n\tif filetype := mode & uint32(^os.ModePerm); filetype != 0 && filetype != uint32(fuse.S_IFREG) {\n\t\treturn -fuse.ENOSYS\n\t}\n\tif fs.ReadOnly {\n\t\t_, err := fs.Root.Stat(path)\n\t\tif err != nil {\n\t\t\treturn -fuse.EROFS\n\t\t} else {\n\t\t\treturn -fuse.EEXIST\n\t\t}\n\t}\n\tf, err := fs.Root.OpenFile(path, os.O_CREATE|os.O_EXCL, os.FileMode(mode)&os.ModePerm)\n\tif err != nil {\n\t\treturn fs.errCode(\"Mknod\", path, err)\n\t}\n\tf.Close()\n\treturn 0\n}\n\nfunc (fs *keepFS) Open(path string, flags int) (errc int, fh uint64) {\n\tdefer fs.reportMetrics(\"open\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Open\", path)\n\tif fs.ReadOnly && flags&(os.O_RDWR|os.O_WRONLY|os.O_CREATE) != 0 {\n\t\treturn -fuse.EROFS, invalidFH\n\t}\n\tf, err := fs.Root.OpenFile(path, flags, 0)\n\tif err != nil {\n\t\treturn -fuse.ENOENT, invalidFH\n\t} else if fi, err := f.Stat(); err != nil {\n\t\treturn -fuse.EIO, invalidFH\n\t} else if fi.IsDir() {\n\t\tf.Close()\n\t\treturn -fuse.EISDIR, invalidFH\n\t}\n\treturn 0, fs.newFH(f)\n}\n\nfunc (fs *keepFS) Utimens(path string, tmsp []fuse.Timespec) int {\n\tdefer fs.reportMetrics(\"utimens\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Utimens\", path)\n\tif fs.ReadOnly {\n\t\treturn -fuse.EROFS\n\t}\n\tf, err := fs.Root.OpenFile(path, 0, 0)\n\tif err != nil {\n\t\treturn fs.errCode(\"Utimens\", path, err)\n\t}\n\tf.Close()\n\treturn 0\n}\n\nfunc (fs *keepFS) errCode(op, path string, err error) (errc int) {\n\tif err == nil {\n\t\treturn 0\n\t}\n\tdefer func() {\n\t\tfs.Logger.WithFields(logrus.Fields{\n\t\t\t\"op\":    op,\n\t\t\t\"path\":  path,\n\t\t\t\"errno\": errc,\n\t\t\t\"error\": err,\n\t\t}).Debug(\"fuse call returned error\")\n\t}()\n\tif errors.Is(err, os.ErrNotExist) {\n\t\treturn -fuse.ENOENT\n\t}\n\tif errors.Is(err, os.ErrExist) {\n\t\treturn -fuse.EEXIST\n\t}\n\tif errors.Is(err, arvados.ErrInvalidArgument) {\n\t\treturn -fuse.EINVAL\n\t}\n\tif errors.Is(err, arvados.ErrInvalidOperation) {\n\t\treturn -fuse.ENOSYS\n\t}\n\tif errors.Is(err, arvados.ErrDirectoryNotEmpty) {\n\t\treturn -fuse.ENOTEMPTY\n\t}\n\treturn -fuse.EIO\n}\n\nfunc (fs *keepFS) Mkdir(path string, mode uint32) int {\n\tdefer fs.reportMetrics(\"mkdir\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Mkdir\", path)\n\tif fs.ReadOnly {\n\t\treturn -fuse.EROFS\n\t}\n\tf, err := fs.Root.OpenFile(path, os.O_CREATE|os.O_EXCL, os.FileMode(mode)|os.ModeDir)\n\tif err != nil {\n\t\treturn fs.errCode(\"Mkdir\", path, err)\n\t}\n\tf.Close()\n\treturn 0\n}\n\nfunc (fs *keepFS) Opendir(path string) (errc int, fh uint64) {\n\tdefer fs.reportMetrics(\"opendir\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Opendir\", path)\n\tf, err := fs.Root.OpenFile(path, 0, 0)\n\tif err != nil {\n\t\treturn fs.errCode(\"Opendir\", path, err), invalidFH\n\t} else if fi, err := f.Stat(); err != nil {\n\t\treturn fs.errCode(\"Opendir\", path, err), invalidFH\n\t} else if !fi.IsDir() {\n\t\tf.Close()\n\t\treturn -fuse.ENOTDIR, invalidFH\n\t}\n\treturn 0, fs.newFH(f)\n}\n\nfunc (fs *keepFS) Releasedir(path string, fh uint64) (errc int) {\n\tdefer fs.reportMetrics(\"releasedir\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Releasedir\", path)\n\treturn fs.Release(path, fh)\n}\n\nfunc (fs *keepFS) Rmdir(path string) int {\n\tdefer fs.reportMetrics(\"rmdir\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Rmdir\", path)\n\treturn fs.errCode(\"Rmdir\", path, fs.Root.Remove(path))\n}\n\nfunc (fs *keepFS) Release(path string, fh uint64) (errc int) {\n\tdefer fs.reportMetrics(\"release\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Release\", path)\n\tfs.Lock()\n\tdefer fs.Unlock()\n\tdefer delete(fs.open, fh)\n\tif f := fs.open[fh]; f != nil {\n\t\terr := f.Close()\n\t\tif err != nil {\n\t\t\treturn -fuse.EIO\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc (fs *keepFS) Rename(oldname, newname string) (errc int) {\n\tdefer fs.reportMetrics(\"rename\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Rename\", oldname+\" -> \"+newname)\n\tif fs.ReadOnly {\n\t\treturn -fuse.EROFS\n\t}\n\treturn fs.errCode(\"Rename\", oldname+\" -> \"+newname, fs.Root.Rename(oldname, newname))\n}\n\nfunc (fs *keepFS) Unlink(path string) (errc int) {\n\tdefer fs.reportMetrics(\"unlink\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Unlink\", path)\n\tif fs.ReadOnly {\n\t\treturn -fuse.EROFS\n\t}\n\treturn fs.errCode(\"Unlink\", path, fs.Root.Remove(path))\n}\n\nfunc (fs *keepFS) Truncate(path string, size int64, fh uint64) (errc int) {\n\tdefer fs.reportMetrics(\"truncate\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Truncate\", path)\n\tif fs.ReadOnly {\n\t\treturn -fuse.EROFS\n\t}\n\n\t// Sometimes fh is a valid filehandle and we don't need to\n\t// waste a name lookup.\n\tif f := fs.lookupFH(fh); f != nil {\n\t\treturn fs.errCode(\"Truncate\", path, f.Truncate(size))\n\t}\n\n\t// Other times, fh is invalid and we need to lookup path.\n\tf, err := fs.Root.OpenFile(path, os.O_RDWR, 0)\n\tif err != nil {\n\t\treturn fs.errCode(\"Truncate\", path, err)\n\t}\n\tdefer f.Close()\n\treturn fs.errCode(\"Truncate\", path, f.Truncate(size))\n}\n\nfunc (fs *keepFS) Getattr(path string, stat *fuse.Stat_t, fh uint64) (errc int) {\n\tdefer fs.reportMetrics(\"getattr\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Getattr\", path)\n\tvar fi os.FileInfo\n\tvar err error\n\tif f := fs.lookupFH(fh); f != nil {\n\t\t// Valid filehandle -- ignore path.\n\t\tfi, err = f.Stat()\n\t} else {\n\t\t// Invalid filehandle -- lookup path.\n\t\tfi, err = fs.Root.Stat(path)\n\t}\n\tif err != nil {\n\t\treturn fs.errCode(\"Getattr\", path, err)\n\t}\n\tfs.fillStat(stat, fi)\n\treturn 0\n}\n\nfunc (fs *keepFS) Chmod(path string, mode uint32) (errc int) {\n\tdefer fs.reportMetrics(\"chmod\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Chmod\", path)\n\tif fs.ReadOnly {\n\t\treturn -fuse.EROFS\n\t}\n\tif fi, err := fs.Root.Stat(path); err != nil {\n\t\treturn fs.errCode(\"Chmod\", path, err)\n\t} else if mode & ^uint32(fuse.S_IFREG|fuse.S_IFDIR|0777) != 0 {\n\t\t// Refuse to set mode bits other than\n\t\t// regfile/dir/perms\n\t\treturn -fuse.ENOSYS\n\t} else if (fi.Mode()&os.ModeDir != 0) != (mode&fuse.S_IFDIR != 0) {\n\t\t// Refuse to transform a regular file to a dir, or\n\t\t// vice versa\n\t\treturn -fuse.ENOSYS\n\t}\n\t// As long as the change isn't nonsense, chmod is a no-op,\n\t// because we don't save permission bits.\n\treturn 0\n}\n\nfunc (fs *keepFS) fillStat(stat *fuse.Stat_t, fi os.FileInfo) {\n\tdefer fs.debugPanics()\n\tvar m uint32\n\tif fi.IsDir() {\n\t\tm = m | fuse.S_IFDIR\n\t} else {\n\t\tm = m | fuse.S_IFREG\n\t}\n\tm = m | uint32(fi.Mode()&os.ModePerm)\n\tstat.Mode = m\n\tstat.Nlink = 1\n\tstat.Size = fi.Size()\n\tt := fuse.NewTimespec(fi.ModTime())\n\tstat.Mtim = t\n\tstat.Ctim = t\n\tstat.Atim = t\n\tstat.Birthtim = t\n\tstat.Blksize = 1024\n\tstat.Blocks = (stat.Size + stat.Blksize - 1) / stat.Blksize\n\tif fs.Uid > 0 && int64(fs.Uid) < 1<<31 {\n\t\tstat.Uid = uint32(fs.Uid)\n\t}\n\tif fs.Gid > 0 && int64(fs.Gid) < 1<<31 {\n\t\tstat.Gid = uint32(fs.Gid)\n\t}\n}\n\nfunc (fs *keepFS) Write(path string, buf []byte, ofst int64, fh uint64) (n int) {\n\tdefer fs.reportMetrics(\"write\", time.Now(), &n)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Write\", path)\n\tif fs.ReadOnly {\n\t\treturn -fuse.EROFS\n\t}\n\tf := fs.lookupFH(fh)\n\tif f == nil {\n\t\treturn -fuse.EBADF\n\t}\n\tf.Lock()\n\tdefer f.Unlock()\n\tif _, err := f.Seek(ofst, io.SeekStart); err != nil {\n\t\treturn fs.errCode(\"Write\", path, err)\n\t}\n\tn, err := f.Write(buf)\n\tif err != nil {\n\t\treturn fs.errCode(\"Write\", path, err)\n\t}\n\treturn n\n}\n\nfunc (fs *keepFS) Read(path string, buf []byte, ofst int64, fh uint64) (n int) {\n\tdefer fs.reportMetrics(\"read\", time.Now(), &n)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Read\", path)\n\tf := fs.lookupFH(fh)\n\tif f == nil {\n\t\treturn -fuse.EBADF\n\t}\n\tf.Lock()\n\tdefer f.Unlock()\n\tif _, err := f.Seek(ofst, io.SeekStart); err != nil {\n\t\treturn fs.errCode(\"Read\", path, err)\n\t}\n\tn, err := f.Read(buf)\n\tfor err == nil && n < len(buf) {\n\t\t// f is an io.Reader (\"If some data is available but\n\t\t// not len(p) bytes, Read conventionally returns what\n\t\t// is available instead of waiting for more\") -- but\n\t\t// our caller requires us to either fill buf or reach\n\t\t// EOF.\n\t\tdone := n\n\t\tn, err = f.Read(buf[done:])\n\t\tn += done\n\t}\n\tif err != nil && err != io.EOF {\n\t\treturn fs.errCode(\"Read\", path, err)\n\t}\n\treturn n\n}\n\nfunc (fs *keepFS) Readdir(path string,\n\tfill func(name string, stat *fuse.Stat_t, ofst int64) bool,\n\tofst int64,\n\tfh uint64) (errc int) {\n\tdefer fs.reportMetrics(\"readdir\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Readdir\", path)\n\tf := fs.lookupFH(fh)\n\tif f == nil {\n\t\treturn -fuse.EBADF\n\t}\n\tfill(\".\", nil, 0)\n\tfill(\"..\", nil, 0)\n\tvar stat fuse.Stat_t\n\tfis, err := f.Readdir(-1)\n\tif err != nil {\n\t\treturn fs.errCode(\"Readdir\", path, err)\n\t}\n\tfor _, fi := range fis {\n\t\tfs.fillStat(&stat, fi)\n\t\tfill(fi.Name(), &stat, 0)\n\t}\n\treturn 0\n}\n\nfunc (fs *keepFS) Fsync(path string, datasync bool, fh uint64) int {\n\tdefer fs.reportMetrics(\"fsync\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Fsync\", path)\n\tf := fs.lookupFH(fh)\n\tif f == nil {\n\t\treturn -fuse.EBADF\n\t}\n\treturn fs.errCode(\"Fsync\", path, f.Sync())\n}\n\nfunc (fs *keepFS) Fsyncdir(path string, datasync bool, fh uint64) int {\n\tdefer fs.reportMetrics(\"fsyncdir\", time.Now(), nil)\n\tdefer fs.debugPanics()\n\tfs.debugOp(\"Fsyncdir\", path)\n\treturn fs.Fsync(path, datasync, fh)\n}\n\n// debugPanics (when deferred by keepFS handlers) prints an error and\n// stack trace on stderr when a handler crashes. (Without this,\n// cgofuse recovers from panics silently and returns EIO.)\nfunc (fs *keepFS) debugPanics() {\n\tif err := recover(); err != nil {\n\t\tlog.Printf(\"(%T) %v\", err, err)\n\t\tdebug.PrintStack()\n\t\tpanic(err)\n\t}\n}\n\nfunc (fs *keepFS) debugOp(op, path string) {\n\tfs.Logger.WithFields(nil).Tracef(\"fuse call %s %s\", op, path)\n}\n"
  },
  {
    "path": "lib/mount/fs_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage mount\n\nimport (\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"syscall\"\n\t\"testing\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"github.com/arvados/cgofuse/fuse\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t. \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\nvar _ = Suite(&FSSuite{})\n\ntype FSSuite struct {\n\tfs *keepFS\n}\n\nfunc (s *FSSuite) SetUpTest(c *C) {\n\tclient := arvados.NewClientFromEnv()\n\tac, err := arvadosclient.New(client)\n\tc.Assert(err, IsNil)\n\tkc, err := keepclient.MakeKeepClient(ac)\n\tc.Assert(err, IsNil)\n\troot := client.SiteFileSystem(kc)\n\tregistry := prometheus.NewRegistry()\n\tkc.RegisterMetrics(registry)\n\ts.fs = &keepFS{\n\t\tLogger:   ctxlog.TestLogger(c),\n\t\tRoot:     root,\n\t\tRegistry: registry,\n\t}\n\ts.fs.Init()\n}\n\nfunc (s *FSSuite) TearDownTest(c *C) {\n\ts.fs.Destroy()\n}\n\nfunc (s *FSSuite) TestFuseInterface(c *C) {\n\tvar _ fuse.FileSystemInterface = s.fs\n}\n\nfunc (s *FSSuite) TestOpendir(c *C) {\n\terrc, fh := s.fs.Opendir(\"/by_id\")\n\tc.Check(errc, Equals, 0)\n\tc.Check(fh, Not(Equals), uint64(0))\n\tc.Check(fh, Not(Equals), invalidFH)\n\terrc, fh = s.fs.Opendir(\"/bogus\")\n\tc.Check(errc, Equals, -fuse.ENOENT)\n\tc.Check(fh, Equals, invalidFH)\n}\n\nfunc (s *FSSuite) TestMknod_ReadOnly(c *C) {\n\ts.fs.ReadOnly = true\n\tpath := \"/by_id/\" + arvadostest.FooCollection + \"/z\"\n\terrc := s.fs.Mknod(path, syscall.S_IFREG, 0)\n\tc.Check(errc, Equals, -fuse.EROFS)\n}\n\nfunc (s *FSSuite) TestMknod(c *C) {\n\tpath := \"/by_id/\" + arvadostest.FooCollection + \"/z\"\n\t_, err := s.fs.Root.Stat(path)\n\tc.Assert(err, Equals, os.ErrNotExist)\n\n\t// Should return error if mode indicates unsupported file type\n\tfor _, mode := range []uint32{\n\t\tsyscall.S_IFCHR,\n\t\tsyscall.S_IFBLK,\n\t\tsyscall.S_IFIFO,\n\t\tsyscall.S_IFSOCK,\n\t} {\n\t\terrc := s.fs.Mknod(path, mode, 0)\n\t\tc.Check(errc, Equals, -fuse.ENOSYS)\n\t\t_, err := s.fs.Root.Stat(path)\n\t\tc.Check(err, Equals, os.ErrNotExist)\n\t}\n\n\t// Should create file and return 0 if mode indicates regular\n\t// file\n\terrc := s.fs.Mknod(path, syscall.S_IFREG|0o644, 0)\n\tc.Check(errc, Equals, 0)\n\t_, err = s.fs.Root.Stat(path)\n\tc.Check(err, IsNil)\n\n\t// Special case: \"Zero file type is equivalent to type\n\t// S_IFREG.\" cf. mknod(2)\n\terrc = s.fs.Mknod(path+\"2\", 0o644, 0)\n\tc.Check(errc, Equals, 0)\n\t_, err = s.fs.Root.Stat(path + \"2\")\n\tc.Check(err, IsNil)\n\n\t// Should return error if target exists\n\terrc = s.fs.Mknod(path, syscall.S_IFREG|0o644, 0)\n\tc.Check(errc, Equals, -fuse.EEXIST)\n}\n\nfunc (s *FSSuite) TestWriteMetrics(c *C) {\n\t// Zero to first tick\n\tpreviousMetrics := map[string]float64{}\n\tcurrentMetrics := map[string]float64{\n\t\t// Keep client metrics\n\t\t`arvados_keepclient_backend_bytes{direction=\"out\"}`: 1024,\n\t\t`arvados_keepclient_backend_bytes{direction=\"in\"}`:  2048,\n\t\t`arvados_keepclient_ops{op=\"put\"}`:                  5,\n\t\t`arvados_keepclient_ops{op=\"get\"}`:                  10,\n\t\t`arvados_keepclient_cache{event=\"hit\"}`:             8,\n\t\t`arvados_keepclient_cache{event=\"miss\"}`:            2,\n\t\t// FUSE metrics\n\t\t`arvados_fuse_bytes{fuseop=\"read\"}`:            2048,\n\t\t`arvados_fuse_bytes{fuseop=\"write\"}`:           1024,\n\t\t`arvados_fuse_ops{fuseop=\"read\"}`:              5,\n\t\t`arvados_fuse_ops{fuseop=\"write\"}`:             3,\n\t\t`arvados_fuse_ops{fuseop=\"getattr\"}`:           10,\n\t\t`arvados_fuse_seconds_total{fuseop=\"read\"}`:    0.123456,\n\t\t`arvados_fuse_seconds_total{fuseop=\"write\"}`:   0.234567,\n\t\t`arvados_fuse_seconds_total{fuseop=\"getattr\"}`: 0.045678,\n\t}\n\n\tout1 := &strings.Builder{}\n\twriteMetrics(out1, currentMetrics, previousMetrics, 1.0)\n\n\tlines1 := strings.Split(strings.TrimSpace(out1.String()), \"\\n\")\n\n\t// pre-sorted to match sorted output\n\texpected1 := []string{\n\t\t\"crunchstat: blkio:0:0 1024 write 2048 read -- interval 1.0000 seconds 1024 write 2048 read\",\n\t\t\"crunchstat: fuseop:create 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:fsync 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:fsyncdir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:getattr 10 count 0.045678 time -- interval 1.0000 seconds 10 count 0.045678 time\",\n\t\t\"crunchstat: fuseop:mkdir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:mknod 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:open 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:opendir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:read 5 count 0.123456 time -- interval 1.0000 seconds 5 count 0.123456 time\",\n\t\t\"crunchstat: fuseop:readdir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:release 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:releasedir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:rename 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:rmdir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:truncate 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:unlink 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:utimens 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:write 3 count 0.234567 time -- interval 1.0000 seconds 3 count 0.234567 time\",\n\t\t\"crunchstat: fuseops 3 write 5 read -- interval 1.0000 seconds 3 write 5 read\",\n\t\t\"crunchstat: keepcache 8 hit 2 miss -- interval 1.0000 seconds 8 hit 2 miss\",\n\t\t\"crunchstat: keepcalls 5 put 10 get -- interval 1.0000 seconds 5 put 10 get\",\n\t\t\"crunchstat: net:keep0 1024 tx 2048 rx -- interval 1.0000 seconds 1024 tx 2048 rx\",\n\t}\n\n\tsort.Strings(lines1)\n\tc.Check(lines1, DeepEquals, expected1)\n\n\t// First tick to second tick\n\tpreviousMetrics = currentMetrics\n\tcurrentMetrics = map[string]float64{\n\t\t// Keep client metrics (increased)\n\t\t`arvados_keepclient_backend_bytes{direction=\"out\"}`: 2560,\n\t\t`arvados_keepclient_backend_bytes{direction=\"in\"}`:  5120,\n\t\t`arvados_keepclient_ops{op=\"put\"}`:                  10,\n\t\t`arvados_keepclient_ops{op=\"get\"}`:                  20,\n\t\t`arvados_keepclient_cache{event=\"hit\"}`:             16,\n\t\t`arvados_keepclient_cache{event=\"miss\"}`:            5,\n\t\t// FUSE metrics (increased)\n\t\t`arvados_fuse_bytes{fuseop=\"read\"}`:            5120,\n\t\t`arvados_fuse_bytes{fuseop=\"write\"}`:           2560,\n\t\t`arvados_fuse_ops{fuseop=\"read\"}`:              10,\n\t\t`arvados_fuse_ops{fuseop=\"write\"}`:             7,\n\t\t`arvados_fuse_ops{fuseop=\"getattr\"}`:           25,\n\t\t`arvados_fuse_seconds_total{fuseop=\"read\"}`:    0.273456,\n\t\t`arvados_fuse_seconds_total{fuseop=\"write\"}`:   0.384567,\n\t\t`arvados_fuse_seconds_total{fuseop=\"getattr\"}`: 0.090678,\n\t}\n\n\tout2 := &strings.Builder{}\n\twriteMetrics(out2, currentMetrics, previousMetrics, 1.0)\n\n\tlines2 := strings.Split(strings.TrimSpace(out2.String()), \"\\n\")\n\n\t// pre-sorted to match sorted output\n\texpected2 := []string{\n\t\t\"crunchstat: blkio:0:0 2560 write 5120 read -- interval 1.0000 seconds 1536 write 3072 read\",\n\t\t\"crunchstat: fuseop:create 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:fsync 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:fsyncdir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:getattr 25 count 0.090678 time -- interval 1.0000 seconds 15 count 0.045000 time\",\n\t\t\"crunchstat: fuseop:mkdir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:mknod 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:open 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:opendir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:read 10 count 0.273456 time -- interval 1.0000 seconds 5 count 0.150000 time\",\n\t\t\"crunchstat: fuseop:readdir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:release 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:releasedir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:rename 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:rmdir 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:truncate 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:unlink 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:utimens 0 count 0.000000 time -- interval 1.0000 seconds 0 count 0.000000 time\",\n\t\t\"crunchstat: fuseop:write 7 count 0.384567 time -- interval 1.0000 seconds 4 count 0.150000 time\",\n\t\t\"crunchstat: fuseops 7 write 10 read -- interval 1.0000 seconds 4 write 5 read\",\n\t\t\"crunchstat: keepcache 16 hit 5 miss -- interval 1.0000 seconds 8 hit 3 miss\",\n\t\t\"crunchstat: keepcalls 10 put 20 get -- interval 1.0000 seconds 5 put 10 get\",\n\t\t\"crunchstat: net:keep0 2560 tx 5120 rx -- interval 1.0000 seconds 1536 tx 3072 rx\",\n\t}\n\n\tsort.Strings(lines2)\n\tc.Check(lines2, DeepEquals, expected2)\n}\n\nfunc (s *FSSuite) TestGatherMetrics(c *C) {\n\n\tmetrics := gatherMetrics(s.fs.Registry)\n\tc.Check(len(metrics) > 0, Equals, true)\n\tc.Check(metrics[\"arvados_fuse_ops{fuseop=\\\"read\\\"}\"], NotNil)\n\tc.Check(metrics[\"arvados_keepclient_backend_bytes{direction=\\\"in\\\"}\"], NotNil)\n\tc.Check(metrics[\"arvados_keepclient_backend_bytes{direction=\\\"out\\\"}\"], NotNil)\n\tc.Check(metrics[\"arvados_keepclient_cache{event=\\\"hit\\\"}\"], NotNil)\n\tc.Check(metrics[\"arvados_keepclient_cache{event=\\\"miss\\\"}\"], NotNil)\n\tc.Check(metrics[\"arvados_keepclient_ops{op=\\\"get\\\"}\"], NotNil)\n\tc.Check(metrics[\"arvados_keepclient_ops{op=\\\"put\\\"}\"], NotNil)\n}\n"
  },
  {
    "path": "lib/pam/.gitignore",
    "content": "pam_arvados.h\npam_arvados.so\n"
  },
  {
    "path": "lib/pam/README",
    "content": "For configuration advice, please refer to https://doc.arvados.org/install/install-webshell.html\n\nUsage (in pam config):\n\n    pam_arvados.so arvados_api_host my_vm_hostname [\"insecure\"] [\"debug\"]\n\npam_arvados.so passes authentication if (according to\narvados_api_host) the supplied PAM token belongs to an Arvados user\nwho is allowed to log in to my_vm_host_name with the supplied PAM\nusername.\n\nIf my_vm_hostname is omitted or \"-\", the current hostname is used.\n\n\"insecure\" -- continue even if the TLS certificate presented by\narvados_api_host fails verification.\n\n\"debug\" -- enable debug-level log messages in syslog and (when not in\n\"silent\" mode) on the calling application's stderr.\n"
  },
  {
    "path": "lib/pam/docker_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"gopkg.in/check.v1\"\n)\n\ntype DockerSuite struct {\n\ttmpdir   string\n\thostip   string\n\tproxyln  net.Listener\n\tproxysrv *http.Server\n}\n\nvar _ = check.Suite(&DockerSuite{})\n\nfunc Test(t *testing.T) { check.TestingT(t) }\n\nfunc (s *DockerSuite) SetUpSuite(c *check.C) {\n\tif testing.Short() {\n\t\tc.Skip(\"skipping docker tests in short mode\")\n\t} else if _, err := exec.Command(\"docker\", \"info\").CombinedOutput(); err != nil {\n\t\tc.Skip(\"skipping docker tests because docker is not available\")\n\t}\n\n\ts.tmpdir = c.MkDir()\n\n\t// The integration-testing controller listens on the loopback\n\t// interface, so it won't be reachable directly from the\n\t// docker container -- so here we run a proxy on 0.0.0.0 for\n\t// the duration of the test.\n\thostips, err := exec.Command(\"hostname\", \"-I\").Output()\n\tc.Assert(err, check.IsNil)\n\ts.hostip = strings.Split(strings.Trim(string(hostips), \"\\n\"), \" \")[0]\n\tln, err := net.Listen(\"tcp\", s.hostip+\":0\")\n\tc.Assert(err, check.IsNil)\n\ts.proxyln = ln\n\tproxy := httputil.NewSingleHostReverseProxy(&url.URL{Scheme: \"https\", Host: os.Getenv(\"ARVADOS_API_HOST\")})\n\tproxy.Transport = &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: true,\n\t\t},\n\t}\n\ts.proxysrv = &http.Server{Handler: proxy}\n\tgo s.proxysrv.ServeTLS(ln, \"../../services/api/tmp/self-signed.pem\", \"../../services/api/tmp/self-signed.key\")\n\n\t// Build a pam module to install & configure in the docker\n\t// container.\n\tcmd := exec.Command(\"go\", \"build\", \"-buildmode=c-shared\", \"-o\", s.tmpdir+\"/pam_arvados.so\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Run()\n\tc.Assert(err, check.IsNil)\n\n\t// Build the testclient program that will (from inside the\n\t// docker container) configure the system to use the above PAM\n\t// config, and then try authentication.\n\tcmd = exec.Command(\"go\", \"build\", \"-o\", s.tmpdir+\"/testclient\", \"./testclient.go\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Run()\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (s *DockerSuite) TearDownSuite(c *check.C) {\n\tif s.proxysrv != nil {\n\t\ts.proxysrv.Close()\n\t}\n\tif s.proxyln != nil {\n\t\ts.proxyln.Close()\n\t}\n}\n\nfunc (s *DockerSuite) SetUpTest(c *check.C) {\n\t// Write a PAM config file that uses our proxy as\n\t// ARVADOS_API_HOST.\n\tproxyhost := s.proxyln.Addr().String()\n\tconfdata := fmt.Sprintf(`Name: Arvados authentication\nDefault: yes\nPriority: 256\nAuth-Type: Primary\nAuth:\n\t[success=end default=ignore]\t/usr/lib/pam_arvados.so %s testvm2.shell insecure\nAuth-Initial:\n\t[success=end default=ignore]\t/usr/lib/pam_arvados.so %s testvm2.shell insecure\n`, proxyhost, proxyhost)\n\terr := ioutil.WriteFile(s.tmpdir+\"/conffile\", []byte(confdata), 0755)\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (s *DockerSuite) runTestClient(c *check.C, args ...string) (stdout, stderr *bytes.Buffer, err error) {\n\n\tcmd := exec.Command(\"docker\", append([]string{\n\t\t\"run\", \"--rm\",\n\t\t\"--hostname\", \"testvm2.shell\",\n\t\t\"--add-host\", \"zzzzz.arvadosapi.com:\" + s.hostip,\n\t\t\"--mount\", \"type=bind,src=\" + s.tmpdir + \"/pam_arvados.so,dst=/usr/lib/pam_arvados.so,readonly\",\n\t\t\"--mount\", \"type=bind,src=\" + s.tmpdir + \"/conffile,dst=/usr/share/pam-configs/arvados,readonly\",\n\t\t\"--mount\", \"type=bind,src=\" + s.tmpdir + \"/testclient,dst=/testclient,readonly\",\n\t\t\"debian:bookworm\",\n\t\t\"/testclient\"}, args...)...)\n\tstdout = &bytes.Buffer{}\n\tstderr = &bytes.Buffer{}\n\tcmd.Stdout = stdout\n\tcmd.Stderr = stderr\n\terr = cmd.Run()\n\treturn\n}\n\nfunc (s *DockerSuite) TestSuccess(c *check.C) {\n\tstdout, stderr, err := s.runTestClient(c, \"try\", \"active\", arvadostest.ActiveTokenV2)\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"%s\", stderr.String())\n\tc.Check(stdout.String(), check.Equals, \"\")\n\tc.Check(stderr.String(), check.Matches, `(?ms).*authentication succeeded.*`)\n}\n\nfunc (s *DockerSuite) TestFailure(c *check.C) {\n\tfor _, trial := range []struct {\n\t\tlabel    string\n\t\tusername string\n\t\ttoken    string\n\t}{\n\t\t{\"bad token\", \"active\", arvadostest.ActiveTokenV2 + \"badtoken\"},\n\t\t{\"empty token\", \"active\", \"\"},\n\t\t{\"empty username\", \"\", arvadostest.ActiveTokenV2},\n\t\t{\"wrong username\", \"wrongusername\", arvadostest.ActiveTokenV2},\n\t} {\n\t\tc.Logf(\"trial: %s\", trial.label)\n\t\tstdout, stderr, err := s.runTestClient(c, \"try\", trial.username, trial.token)\n\t\tc.Logf(\"%s\", stderr.String())\n\t\tc.Check(err, check.NotNil)\n\t\tc.Check(stdout.String(), check.Equals, \"\")\n\t\tc.Check(stderr.String(), check.Matches, `(?ms).*authentication failed.*`)\n\t}\n}\n\nfunc (s *DockerSuite) TestDefaultHostname(c *check.C) {\n\tconfdata := fmt.Sprintf(`Name: Arvados authentication\nDefault: yes\nPriority: 256\nAuth-Type: Primary\nAuth:\n\t[success=end default=ignore]\t/usr/lib/pam_arvados.so %s - insecure debug\nAuth-Initial:\n\t[success=end default=ignore]\t/usr/lib/pam_arvados.so %s - insecure debug\n`, s.proxyln.Addr().String(), s.proxyln.Addr().String())\n\terr := ioutil.WriteFile(s.tmpdir+\"/conffile\", []byte(confdata), 0755)\n\tc.Assert(err, check.IsNil)\n\n\tstdout, stderr, err := s.runTestClient(c, \"try\", \"active\", arvadostest.ActiveTokenV2)\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"%s\", stderr.String())\n\tc.Check(stdout.String(), check.Equals, \"\")\n\tc.Check(stderr.String(), check.Matches, `(?ms).*authentication succeeded.*`)\n}\n"
  },
  {
    "path": "lib/pam/fpm-info.sh",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfpm_depends+=(ca-certificates)\ncase \"$TARGET\" in\n    rocky*) fpm_depends+=(pam) ;;\nesac\n\nfpm_args+=(--conflicts=libpam-arvados)\n"
  },
  {
    "path": "lib/pam/pam-configs-arvados",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# 1. Copy the contents of this file *minus all comment lines* to /usr/share/pam-configs/arvados-go\n# 2. Run `pam-auth-update` and choose Arvados authentication\n# 3. In /etc/pam.d/common-auth, change \"api.example\" to your ARVADOS_API_HOST\n# 4. In /etc/pam.d/common-auth, change \"shell.example\" to this host's hostname\n#    (as it appears in the Arvados virtual_machines list)\n\nName: Arvados authentication\nDefault: yes\nPriority: 256\nAuth-Type: Primary\nAuth:\n\t[success=end default=ignore]\t/usr/lib/pam_arvados.so api.example shell.example\nAuth-Initial:\n\t[success=end default=ignore]\t/usr/lib/pam_arvados.so api.example shell.example\n"
  },
  {
    "path": "lib/pam/pam_arvados.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// To enable, add an entry in /etc/pam.d/common-auth where pam_unix.so\n// would normally be. Examples:\n//\n// auth [success=1 default=ignore] /usr/lib/pam_arvados.so zzzzz.arvadosapi.com vmhostname.example\n// auth [success=1 default=ignore] /usr/lib/pam_arvados.so zzzzz.arvadosapi.com vmhostname.example insecure debug\n//\n// Replace zzzzz.arvadosapi.com with your controller host or\n// host:port.\n//\n// Replace vmhostname.example with the VM's name as it appears in the\n// Arvados virtual_machine object.\n//\n// Use \"insecure\" if your API server certificate does not pass name\n// verification.\n//\n// Use \"debug\" to enable debug log messages.\n\npackage main\n\nimport (\n\t\"io/ioutil\"\n\t\"log/syslog\"\n\t\"os\"\n\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/sirupsen/logrus\"\n\tlSyslog \"github.com/sirupsen/logrus/hooks/syslog\"\n\t\"golang.org/x/sys/unix\"\n)\n\n/*\n#cgo LDFLAGS: -lpam -fPIC\n#include <security/pam_ext.h>\nchar *stringindex(char** a, int i);\nconst char *get_user(pam_handle_t *pamh);\nconst char *get_authtoken(pam_handle_t *pamh);\n*/\nimport \"C\"\n\nfunc main() {}\n\nfunc init() {\n\tif err := unix.Prctl(syscall.PR_SET_DUMPABLE, 0, 0, 0, 0); err != nil {\n\t\tnewLogger(false).WithError(err).Warn(\"unable to disable ptrace\")\n\t}\n}\n\n//export pam_sm_setcred\nfunc pam_sm_setcred(pamh *C.pam_handle_t, flags, cArgc C.int, cArgv **C.char) C.int {\n\treturn C.PAM_IGNORE\n}\n\n//export pam_sm_authenticate\nfunc pam_sm_authenticate(pamh *C.pam_handle_t, flags, cArgc C.int, cArgv **C.char) C.int {\n\truntime.GOMAXPROCS(1)\n\tlogger := newLogger(flags&C.PAM_SILENT == 0)\n\tcUsername := C.get_user(pamh)\n\tif cUsername == nil {\n\t\treturn C.PAM_USER_UNKNOWN\n\t}\n\n\tcToken := C.get_authtoken(pamh)\n\tif cToken == nil {\n\t\treturn C.PAM_AUTH_ERR\n\t}\n\n\targv := make([]string, cArgc)\n\tfor i := 0; i < int(cArgc); i++ {\n\t\targv[i] = C.GoString(C.stringindex(cArgv, C.int(i)))\n\t}\n\n\terr := authenticate(logger, C.GoString(cUsername), C.GoString(cToken), argv)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"authentication failed\")\n\t\treturn C.PAM_AUTH_ERR\n\t}\n\treturn C.PAM_SUCCESS\n}\n\nfunc authenticate(logger *logrus.Logger, username, token string, argv []string) error {\n\thostname := \"\"\n\tapiHost := \"\"\n\tinsecure := false\n\tfor idx, arg := range argv {\n\t\tif idx == 0 {\n\t\t\tapiHost = arg\n\t\t} else if idx == 1 {\n\t\t\thostname = arg\n\t\t} else if arg == \"insecure\" {\n\t\t\tinsecure = true\n\t\t} else if arg == \"debug\" {\n\t\t\tlogger.SetLevel(logrus.DebugLevel)\n\t\t} else {\n\t\t\tlogger.Warnf(\"unknown option: %s\\n\", arg)\n\t\t}\n\t}\n\tif hostname == \"\" || hostname == \"-\" {\n\t\th, err := os.Hostname()\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Warnf(\"cannot get hostname -- try using an explicit hostname in pam config\")\n\t\t\treturn fmt.Errorf(\"cannot get hostname: %w\", err)\n\t\t}\n\t\thostname = h\n\t}\n\tlogger.Debugf(\"username=%q arvados_api_host=%q hostname=%q insecure=%t\", username, apiHost, hostname, insecure)\n\tif apiHost == \"\" {\n\t\tlogger.Warnf(\"cannot authenticate: config error: arvados_api_host and hostname must be non-empty\")\n\t\treturn errors.New(\"config error\")\n\t}\n\tarv := &arvados.Client{\n\t\tScheme:    \"https\",\n\t\tAPIHost:   apiHost,\n\t\tAuthToken: token,\n\t\tInsecure:  insecure,\n\t}\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))\n\tdefer cancel()\n\tvar vms arvados.VirtualMachineList\n\terr := arv.RequestAndDecodeContext(ctx, &vms, \"GET\", \"arvados/v1/virtual_machines\", nil, arvados.ListOptions{\n\t\tLimit: 2,\n\t\tFilters: []arvados.Filter{\n\t\t\t{\"hostname\", \"=\", hostname},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(vms.Items) == 0 {\n\t\t// It's possible there is no VM entry for the\n\t\t// configured hostname, but typically this just means\n\t\t// the user does not have permission to see (let alone\n\t\t// log in to) this VM.\n\t\treturn errors.New(\"permission denied\")\n\t} else if len(vms.Items) > 1 {\n\t\treturn fmt.Errorf(\"multiple results for hostname %q\", hostname)\n\t} else if vms.Items[0].Hostname != hostname {\n\t\treturn fmt.Errorf(\"looked up hostname %q but controller returned record with hostname %q\", hostname, vms.Items[0].Hostname)\n\t}\n\tvar user arvados.User\n\terr = arv.RequestAndDecodeContext(ctx, &user, \"GET\", \"arvados/v1/users/current\", nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar links arvados.LinkList\n\terr = arv.RequestAndDecodeContext(ctx, &links, \"GET\", \"arvados/v1/links\", nil, arvados.ListOptions{\n\t\tLimit: 1,\n\t\tFilters: []arvados.Filter{\n\t\t\t{\"link_class\", \"=\", \"permission\"},\n\t\t\t{\"name\", \"=\", \"can_login\"},\n\t\t\t{\"tail_uuid\", \"=\", user.UUID},\n\t\t\t{\"head_uuid\", \"=\", vms.Items[0].UUID},\n\t\t\t{\"properties.username\", \"=\", username},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(links.Items) < 1 || links.Items[0].Properties[\"username\"] != username {\n\t\treturn errors.New(\"permission denied\")\n\t}\n\tlogger.Debugf(\"permission granted based on link with UUID %s\", links.Items[0].UUID)\n\treturn nil\n}\n\nfunc newLogger(stderr bool) *logrus.Logger {\n\tlogger := logrus.New()\n\tif !stderr {\n\t\tlogger.Out = ioutil.Discard\n\t}\n\tif hook, err := lSyslog.NewSyslogHook(\"udp\", \"localhost:514\", syslog.LOG_AUTH|syslog.LOG_INFO, \"pam_arvados\"); err != nil {\n\t\tlogger.Hooks.Add(hook)\n\t}\n\treturn logger\n}\n"
  },
  {
    "path": "lib/pam/pam_c.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage main\n\n/*\n#cgo LDFLAGS: -lpam -fPIC\n#include <security/pam_ext.h>\nchar *stringindex(char** a, int i) { return a[i]; }\nconst char *get_user(pam_handle_t *pamh) {\n  const char *user;\n  if (pam_get_item(pamh, PAM_USER, (const void**)&user) != PAM_SUCCESS)\n    return NULL;\n  return user;\n}\nconst char *get_authtoken(pam_handle_t *pamh) {\n  const char *token;\n  if (pam_get_authtok(pamh, PAM_AUTHTOK, &token, NULL) != PAM_SUCCESS)\n    return NULL;\n  return token;\n}\n*/\nimport \"C\"\n"
  },
  {
    "path": "lib/pam/testclient.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n//go:build ignore\n// +build ignore\n\n// This file is compiled by docker_test.go to build a test client.\n// It's not part of the pam module itself.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\n\t\"github.com/msteinert/pam\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc main() {\n\tif len(os.Args) != 4 || os.Args[1] != \"try\" {\n\t\tlogrus.Print(\"usage: testclient try 'username' 'password'\")\n\t\tos.Exit(1)\n\t}\n\tusername := os.Args[2]\n\tpassword := os.Args[3]\n\n\t// Configure PAM to use arvados token auth by default.\n\tcmd := exec.Command(\"pam-auth-update\", \"--force\", \"arvados\", \"--remove\", \"unix\")\n\tcmd.Env = append([]string{\"DEBIAN_FRONTEND=noninteractive\"}, os.Environ()...)\n\tcmd.Stdin = nil\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"pam-auth-update failed\")\n\t\tos.Exit(1)\n\t}\n\n\t// Check that pam-auth-update actually added arvados config.\n\tcmd = exec.Command(\"grep\", \"-Hn\", \"arvados\", \"/etc/pam.d/common-auth\")\n\tcmd.Stdout = os.Stderr\n\tcmd.Stderr = os.Stderr\n\terr = cmd.Run()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlogrus.Debugf(\"starting pam: username=%q password=%q\", username, password)\n\n\tsentPassword := false\n\terrorMessage := \"\"\n\ttx, err := pam.StartFunc(\"default\", username, func(style pam.Style, message string) (string, error) {\n\t\tlogrus.Debugf(\"pam conversation: style=%v message=%q\", style, message)\n\t\tswitch style {\n\t\tcase pam.ErrorMsg:\n\t\t\tlogrus.WithField(\"Message\", message).Info(\"pam.ErrorMsg\")\n\t\t\terrorMessage = message\n\t\t\treturn \"\", nil\n\t\tcase pam.TextInfo:\n\t\t\tlogrus.WithField(\"Message\", message).Info(\"pam.TextInfo\")\n\t\t\terrorMessage = message\n\t\t\treturn \"\", nil\n\t\tcase pam.PromptEchoOn, pam.PromptEchoOff:\n\t\t\tsentPassword = true\n\t\t\treturn password, nil\n\t\tdefault:\n\t\t\treturn \"\", fmt.Errorf(\"unrecognized message style %d\", style)\n\t\t}\n\t})\n\tif err != nil {\n\t\tlogrus.WithError(err).Print(\"StartFunc failed\")\n\t\tos.Exit(1)\n\t}\n\terr = tx.Authenticate(pam.DisallowNullAuthtok)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"PAM: %s (message = %q, sentPassword = %v)\", err, errorMessage, sentPassword)\n\t\tlogrus.WithError(err).Print(\"authentication failed\")\n\t\tos.Exit(1)\n\t}\n\tlogrus.Print(\"authentication succeeded\")\n}\n"
  },
  {
    "path": "lib/recovercollection/cmd.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage recovercollection\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nvar Command command\n\ntype command struct{}\n\nfunc (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tvar err error\n\tlogger := ctxlog.New(stderr, \"text\", \"info\")\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"fatal\")\n\t\t}\n\t\tlogger.Info(\"exiting\")\n\t}()\n\n\tloader := config.NewLoader(stdin, logger)\n\tloader.SkipLegacy = true\n\n\tflags := flag.NewFlagSet(prog, flag.ContinueOnError)\n\tflags.Usage = func() {\n\t\tfmt.Fprintf(flags.Output(), `Usage:\n\t%s [options ...] { /path/to/manifest.txt | log-or-collection-uuid } [...]\n\n\tThis program recovers deleted collections. Recovery is\n\tpossible when the collection's manifest is still available and\n\tall of its data blocks are still available or recoverable\n\t(e.g., garbage collection is not enabled, the blocks are too\n\tnew for garbage collection, the blocks are referenced by other\n\tcollections, or the blocks have been trashed but not yet\n\tdeleted).\n\n\tThere are multiple ways to specify a collection to recover:\n\n        * Path to a local file containing a manifest with the desired\n\t  data\n\n\t* UUID of an Arvados log entry, typically a \"delete\" or\n\t  \"update\" event, whose \"old attributes\" have a manifest with\n\t  the desired data\n\n\t* UUID of an Arvados collection whose most recent log entry,\n          typically a \"delete\" or \"update\" event, has the desired\n          data in its \"old attributes\"\n\n\tFor each provided collection manifest, once all data blocks\n\tare recovered/protected from garbage collection, a new\n\tcollection is saved and its UUID is printed on stdout.\n\n\tRestored collections will belong to the system (root) user.\n\n\tExit status will be zero if recovery is successful, i.e., a\n\tcollection is saved for each provided manifest.\nOptions:\n`, prog)\n\t\tflags.PrintDefaults()\n\t}\n\tloader.SetupFlags(flags)\n\tloglevel := flags.String(\"log-level\", \"info\", \"logging level (debug, info, ...)\")\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"source [...]\", stderr); !ok {\n\t\treturn code\n\t} else if flags.NArg() == 0 {\n\t\tfmt.Fprintf(stderr, \"missing required arguments (try -help)\\n\")\n\t\treturn 2\n\t}\n\n\tlvl, err := logrus.ParseLevel(*loglevel)\n\tif err != nil {\n\t\treturn 2\n\t}\n\tlogger.SetLevel(lvl)\n\n\tcfg, err := loader.Load()\n\tif err != nil {\n\t\treturn 1\n\t}\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn 1\n\t}\n\tclient, err := arvados.NewClientFromConfig(cluster)\n\tif err != nil {\n\t\treturn 1\n\t}\n\tclient.AuthToken = cluster.SystemRootToken\n\trcvr := recoverer{\n\t\tclient:  client,\n\t\tcluster: cluster,\n\t\tlogger:  logger,\n\t}\n\n\texitcode := 0\n\tfor _, src := range flags.Args() {\n\t\tlogger := logger.WithField(\"src\", src)\n\t\tvar mtxt string\n\t\tif !strings.Contains(src, \"/\") && len(src) == 27 && src[5] == '-' && src[11] == '-' {\n\t\t\tvar filters []arvados.Filter\n\t\t\tif src[5:12] == \"-57u5n-\" {\n\t\t\t\tfilters = []arvados.Filter{{\"uuid\", \"=\", src}}\n\t\t\t} else if src[5:12] == \"-4zz18-\" {\n\t\t\t\tfilters = []arvados.Filter{{\"object_uuid\", \"=\", src}}\n\t\t\t} else {\n\t\t\t\tlogger.Error(\"looks like a UUID but not a log or collection UUID (if it's really a file, prepend './')\")\n\t\t\t\texitcode = 1\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar resp struct {\n\t\t\t\tItems []struct {\n\t\t\t\t\tUUID       string    `json:\"uuid\"`\n\t\t\t\t\tEventType  string    `json:\"event_type\"`\n\t\t\t\t\tEventAt    time.Time `json:\"event_at\"`\n\t\t\t\t\tObjectUUID string    `json:\"object_uuid\"`\n\t\t\t\t\tProperties struct {\n\t\t\t\t\t\tOldAttributes struct {\n\t\t\t\t\t\t\tManifestText string `json:\"manifest_text\"`\n\t\t\t\t\t\t} `json:\"old_attributes\"`\n\t\t\t\t\t} `json:\"properties\"`\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = client.RequestAndDecode(&resp, \"GET\", \"arvados/v1/logs\", nil, arvados.ListOptions{\n\t\t\t\tLimit:   1,\n\t\t\t\tOrder:   []string{\"event_at desc\"},\n\t\t\t\tFilters: filters,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"error looking up log entry\")\n\t\t\t\texitcode = 1\n\t\t\t\tcontinue\n\t\t\t} else if len(resp.Items) == 0 {\n\t\t\t\tlogger.Error(\"log entry not found\")\n\t\t\t\texitcode = 1\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlogent := resp.Items[0]\n\t\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\t\"uuid\":                logent.UUID,\n\t\t\t\t\"old_collection_uuid\": logent.ObjectUUID,\n\t\t\t\t\"logged_event_type\":   logent.EventType,\n\t\t\t\t\"logged_event_time\":   logent.EventAt,\n\t\t\t\t\"logged_object_uuid\":  logent.ObjectUUID,\n\t\t\t}).Info(\"loaded log entry\")\n\t\t\tmtxt = logent.Properties.OldAttributes.ManifestText\n\t\t\tif mtxt == \"\" {\n\t\t\t\tlogger.Error(\"log entry properties.old_attributes.manifest_text missing or empty\")\n\t\t\t\texitcode = 1\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tbuf, err := ioutil.ReadFile(src)\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"failed to load manifest data from file\")\n\t\t\t\texitcode = 1\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmtxt = string(buf)\n\t\t}\n\t\tuuid, err := rcvr.RecoverManifest(string(mtxt))\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Error(\"recovery failed\")\n\t\t\texitcode = 1\n\t\t\tcontinue\n\t\t}\n\t\tlogger.WithField(\"UUID\", uuid).Info(\"recovery succeeded\")\n\t\tfmt.Fprintln(stdout, uuid)\n\t}\n\treturn exitcode\n}\n\ntype recoverer struct {\n\tclient  *arvados.Client\n\tcluster *arvados.Cluster\n\tlogger  logrus.FieldLogger\n}\n\nvar errNotFound = errors.New(\"not found\")\n\n// Finds the timestamp of the newest copy of blk on svc. Returns\n// errNotFound if blk is not on svc at all.\nfunc (rcvr recoverer) newestMtime(ctx context.Context, logger logrus.FieldLogger, blk string, svc arvados.KeepService) (time.Time, error) {\n\tfound, err := svc.Index(ctx, rcvr.client, blk)\n\tif err != nil {\n\t\tlogger.WithError(err).Warn(\"error getting index\")\n\t\treturn time.Time{}, err\n\t} else if len(found) == 0 {\n\t\treturn time.Time{}, errNotFound\n\t}\n\tvar latest time.Time\n\tfor _, ent := range found {\n\t\tt := time.Unix(0, ent.Mtime)\n\t\tif t.After(latest) {\n\t\t\tlatest = t\n\t\t}\n\t}\n\tlogger.WithField(\"latest\", latest).Debug(\"found\")\n\treturn latest, nil\n}\n\nvar errTouchIneffective = errors.New(\"(BUG?) touch succeeded but had no effect -- reported timestamp is still too old\")\n\n// Ensures the given block exists on the given server and won't be\n// eligible for trashing until after our chosen deadline (blobsigexp).\n// Returns an error if the block doesn't exist on the given server, or\n// has an old timestamp and can't be updated.\n//\n// After we decide a block is \"safe\" (whether or not we had to untrash\n// it), keep-balance might notice that it's currently unreferenced and\n// decide to trash it, all before our recovered collection gets\n// saved. But if the block's timestamp is more recent than blobsigttl,\n// keepstore will refuse to trash it even if told to by keep-balance.\nfunc (rcvr recoverer) ensureSafe(ctx context.Context, logger logrus.FieldLogger, blk string, svc arvados.KeepService, blobsigttl time.Duration, blobsigexp time.Time) error {\n\tif latest, err := rcvr.newestMtime(ctx, logger, blk, svc); err != nil {\n\t\treturn err\n\t} else if latest.Add(blobsigttl).After(blobsigexp) {\n\t\treturn nil\n\t}\n\tif err := svc.Touch(ctx, rcvr.client, blk); err != nil {\n\t\treturn fmt.Errorf(\"error updating timestamp: %s\", err)\n\t}\n\tlogger.Debug(\"updated timestamp\")\n\tif latest, err := rcvr.newestMtime(ctx, logger, blk, svc); err == errNotFound {\n\t\treturn fmt.Errorf(\"(BUG?) touch succeeded, but then block did not appear in index\")\n\t} else if err != nil {\n\t\treturn err\n\t} else if latest.Add(blobsigttl).After(blobsigexp) {\n\t\treturn nil\n\t} else {\n\t\treturn errTouchIneffective\n\t}\n}\n\n// Untrash and update GC timestamps (as needed) on blocks referenced\n// by the given manifest, save a new collection and return the new\n// collection's UUID.\nfunc (rcvr recoverer) RecoverManifest(mtxt string) (string, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tcoll := arvados.Collection{ManifestText: mtxt}\n\tblks, err := coll.SizedDigests()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttodo := make(chan int, len(blks))\n\tfor idx := range blks {\n\t\ttodo <- idx\n\t}\n\tgo close(todo)\n\n\tvar services []arvados.KeepService\n\terr = rcvr.client.EachKeepService(func(svc arvados.KeepService) error {\n\t\tif svc.ServiceType == \"proxy\" {\n\t\t\trcvr.logger.WithField(\"service\", svc).Debug(\"ignore proxy service\")\n\t\t} else {\n\t\t\tservices = append(services, svc)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error getting list of keep services: %s\", err)\n\t}\n\trcvr.logger.WithField(\"services\", services).Debug(\"got list of services\")\n\n\t// blobsigexp is our deadline for saving the rescued\n\t// collection. This must be less than BlobSigningTTL\n\t// (otherwise our rescued blocks could be garbage collected\n\t// again before we protect them by saving the collection) but\n\t// the exact value is somewhat arbitrary. If it's too soon, it\n\t// will arrive before we're ready to save, and save will\n\t// fail. If it's too late, we'll needlessly update timestamps\n\t// on some blocks that were recently written/touched (e.g., by\n\t// a previous attempt to rescue this same collection) and\n\t// would have lived long enough anyway if left alone.\n\t// BlobSigningTTL/2 (typically around 1 week) is much longer\n\t// than than we need to recover even a very large collection.\n\tblobsigttl := rcvr.cluster.Collections.BlobSigningTTL.Duration()\n\tblobsigexp := time.Now().Add(blobsigttl / 2)\n\trcvr.logger.WithField(\"blobsigexp\", blobsigexp).Debug(\"chose save deadline\")\n\n\t// We'll start a number of threads, each working on\n\t// checking/recovering one block at a time. The threads\n\t// themselves don't need much CPU/memory, but to avoid hitting\n\t// limits on keepstore connections, backend storage bandwidth,\n\t// etc., we limit concurrency to 2 per keepstore node.\n\tworkerThreads := 2 * len(services)\n\n\tblkFound := make([]bool, len(blks))\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < workerThreads; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\tnextblk:\n\t\t\tfor idx := range todo {\n\t\t\t\tblk := strings.SplitN(string(blks[idx]), \"+\", 2)[0]\n\t\t\t\tlogger := rcvr.logger.WithField(\"block\", blk)\n\t\t\t\tfor _, untrashing := range []bool{false, true} {\n\t\t\t\t\tfor _, svc := range services {\n\t\t\t\t\t\tlogger := logger.WithField(\"service\", fmt.Sprintf(\"%s:%d\", svc.ServiceHost, svc.ServicePort))\n\t\t\t\t\t\tif untrashing {\n\t\t\t\t\t\t\tif err := svc.Untrash(ctx, rcvr.client, blk); err != nil {\n\t\t\t\t\t\t\t\tlogger.WithError(err).Debug(\"untrash failed\")\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tlogger.Info(\"untrashed\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\terr := rcvr.ensureSafe(ctx, logger, blk, svc, blobsigttl, blobsigexp)\n\t\t\t\t\t\tif err == errNotFound {\n\t\t\t\t\t\t\tlogger.Debug(err)\n\t\t\t\t\t\t} else if err != nil {\n\t\t\t\t\t\t\tlogger.Error(err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tblkFound[idx] = true\n\t\t\t\t\t\t\tcontinue nextblk\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlogger.Debug(\"unrecoverable\")\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\n\tvar have, havenot int\n\tfor _, ok := range blkFound {\n\t\tif ok {\n\t\t\thave++\n\t\t} else {\n\t\t\thavenot++\n\t\t}\n\t}\n\tif havenot > 0 {\n\t\tif have > 0 {\n\t\t\trcvr.logger.Warn(\"partial recovery is not implemented\")\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"unable to recover %d of %d blocks\", havenot, have+havenot)\n\t}\n\n\tif rcvr.cluster.Collections.BlobSigning {\n\t\tkey := []byte(rcvr.cluster.Collections.BlobSigningKey)\n\t\tcoll.ManifestText = arvados.SignManifest(coll.ManifestText, rcvr.client.AuthToken, blobsigexp, blobsigttl, key)\n\t}\n\trcvr.logger.WithField(\"manifest\", coll.ManifestText).Debug(\"updated blob signatures in manifest\")\n\terr = rcvr.client.RequestAndDecodeContext(ctx, &coll, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"collection\": map[string]interface{}{\n\t\t\t\"manifest_text\": coll.ManifestText,\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error saving new collection: %s\", err)\n\t}\n\trcvr.logger.WithField(\"UUID\", coll.UUID).Debug(\"created new collection\")\n\treturn coll.UUID, nil\n}\n"
  },
  {
    "path": "lib/recovercollection/cmd_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage recovercollection\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&Suite{})\n\ntype Suite struct{}\n\nfunc (*Suite) SetUpSuite(c *check.C) {\n\tarvadostest.StartKeep(2, true)\n}\n\nfunc (*Suite) TestUnrecoverableBlock(c *check.C) {\n\ttmp := c.MkDir()\n\tmfile := tmp + \"/manifest\"\n\tioutil.WriteFile(mfile, []byte(\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+410 0:410:Gone\\n\"), 0777)\n\tvar stdout, stderr bytes.Buffer\n\texitcode := Command.RunCommand(\"recovercollection.test\", []string{\"-log-level=debug\", mfile}, &bytes.Buffer{}, &stdout, &stderr)\n\tc.Check(exitcode, check.Equals, 1)\n\tc.Check(stdout.String(), check.Equals, \"\")\n\tc.Log(stderr.String())\n\tc.Check(stderr.String(), check.Matches, `(?ms).*msg=\"not found\" block=aaaaa.*`)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*msg=\"untrash failed\" block=aaaaa.*`)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*msg=unrecoverable block=aaaaa.*`)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*msg=\"recovery failed\".*`)\n}\n\nfunc (*Suite) TestUntrashAndTouchBlock(c *check.C) {\n\ttmp := c.MkDir()\n\tmfile := tmp + \"/manifest\"\n\tioutil.WriteFile(mfile, []byte(\". dcd0348cb2532ee90c99f1b846efaee7+13 0:13:test.txt\\n\"), 0777)\n\n\tlogger := ctxlog.TestLogger(c)\n\tloader := config.NewLoader(&bytes.Buffer{}, logger)\n\tcfg, err := loader.Load()\n\tc.Assert(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\tvar datadirs []string\n\tfor _, v := range cluster.Volumes {\n\t\tvar params struct {\n\t\t\tRoot string\n\t\t}\n\t\terr := json.Unmarshal(v.DriverParameters, &params)\n\t\tc.Assert(err, check.IsNil)\n\t\tif params.Root != \"\" {\n\t\t\tdatadirs = append(datadirs, params.Root)\n\t\t\terr := os.Remove(params.Root + \"/dcd/dcd0348cb2532ee90c99f1b846efaee7\")\n\t\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\t\tc.Error(err)\n\t\t\t}\n\t\t}\n\t}\n\tc.Logf(\"keepstore datadirs are %q\", datadirs)\n\n\t// Currently StartKeep(2, true) uses dirs called \"keep0\" and\n\t// \"keep1\" so we could just put our fake trashed file in keep0\n\t// ... but we don't want to rely on arvadostest's\n\t// implementation details, so we put a trashed file in every\n\t// dir that keepstore might be using.\n\tfor _, datadir := range datadirs {\n\t\tif fi, err := os.Stat(datadir); err != nil || !fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tc.Logf(\"placing backdated trashed block in datadir %q\", datadir)\n\t\ttrashfile := datadir + \"/dcd/dcd0348cb2532ee90c99f1b846efaee7.trash.999999999\"\n\t\tos.Mkdir(datadir+\"/dcd\", 0777)\n\t\terr = ioutil.WriteFile(trashfile, []byte(\"undelete test\"), 0777)\n\t\tc.Assert(err, check.IsNil)\n\t\tt := time.Now().Add(-time.Hour * 24 * 365)\n\t\terr = os.Chtimes(trashfile, t, t)\n\t\tc.Assert(err, check.IsNil)\n\t}\n\n\tvar stdout, stderr bytes.Buffer\n\texitcode := Command.RunCommand(\"recovercollection.test\", []string{\"-log-level=debug\", mfile}, &bytes.Buffer{}, &stdout, &stderr)\n\tc.Check(exitcode, check.Equals, 0)\n\tc.Check(stdout.String(), check.Matches, `zzzzz-4zz18-.{15}\\n`)\n\tc.Log(stderr.String())\n\tc.Check(stderr.String(), check.Matches, `(?ms).*msg=untrashed block=dcd0348.*`)\n\tc.Check(stderr.String(), check.Matches, `(?ms).*msg=\"updated timestamp\" block=dcd0348.*`)\n\n\tfound := false\n\tfor _, datadir := range datadirs {\n\t\tbuf, err := ioutil.ReadFile(datadir + \"/dcd/dcd0348cb2532ee90c99f1b846efaee7\")\n\t\tif err == nil {\n\t\t\tfound = true\n\t\t\tc.Check(buf, check.DeepEquals, []byte(\"undelete test\"))\n\t\t\tfi, err := os.Stat(datadir + \"/dcd/dcd0348cb2532ee90c99f1b846efaee7\")\n\t\t\tif c.Check(err, check.IsNil) {\n\t\t\t\tc.Logf(\"recovered block's modtime is %s\", fi.ModTime())\n\t\t\t\tc.Check(time.Now().Sub(fi.ModTime()) < time.Hour, check.Equals, true)\n\t\t\t}\n\t\t}\n\t}\n\tc.Check(found, check.Equals, true)\n}\n\nfunc (*Suite) TestUnusableManifestSourceArg(c *check.C) {\n\tfor _, trial := range []struct {\n\t\tsrcArg    string\n\t\terrRegexp string\n\t}{\n\t\t{\"zzzzz-4zz18-aaaaaaaaaaaaaaa\", `(?ms).*msg=\"log entry not found\".*`},\n\t\t{\"zzzzz-57u5n-aaaaaaaaaaaaaaa\", `(?ms).*msg=\"log entry not found.*`},\n\t\t{\"zzzzz-57u5n-containerlog006\", `(?ms).*msg=\"log entry properties\\.old_attributes\\.manifest_text missing or empty\".*`},\n\t\t{\"zzzzz-j7d0g-aaaaaaaaaaaaaaa\", `(?ms).*msg=\"looks like a UUID but not a log or collection UUID.*`},\n\t} {\n\t\tvar stdout, stderr bytes.Buffer\n\t\texitcode := Command.RunCommand(\"recovercollection.test\", []string{\"-log-level=debug\", trial.srcArg}, &bytes.Buffer{}, &stdout, &stderr)\n\t\tc.Check(exitcode, check.Equals, 1)\n\t\tc.Check(stdout.String(), check.Equals, \"\")\n\t\tc.Log(stderr.String())\n\t\tc.Check(stderr.String(), check.Matches, trial.errRegexp)\n\t}\n}\n"
  },
  {
    "path": "lib/selfsigned/cert.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage selfsigned\n\nimport (\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"crypto/x509/pkix\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"net\"\n\t\"time\"\n)\n\ntype CertGenerator struct {\n\tBits  int\n\tHosts []string\n\tIsCA  bool\n}\n\nfunc (gen CertGenerator) Generate() (cert tls.Certificate, err error) {\n\tkeyUsage := x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment\n\tif gen.IsCA {\n\t\tkeyUsage |= x509.KeyUsageCertSign\n\t}\n\tnotBefore := time.Now()\n\tnotAfter := time.Now().Add(time.Hour * 24 * 365)\n\tsnMax := new(big.Int).Lsh(big.NewInt(1), 128)\n\tsn, err := rand.Int(rand.Reader, snMax)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to generate serial number: %w\", err)\n\t\treturn\n\t}\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: sn,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"N/A\"},\n\t\t},\n\t\tNotBefore:             notBefore,\n\t\tNotAfter:              notAfter,\n\t\tKeyUsage:              keyUsage,\n\t\tExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t\tIsCA:                  gen.IsCA,\n\t}\n\tfor _, h := range gen.Hosts {\n\t\tif ip := net.ParseIP(h); ip != nil {\n\t\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\t\t} else {\n\t\t\ttemplate.DNSNames = append(template.DNSNames, h)\n\t\t}\n\t}\n\tbits := gen.Bits\n\tif bits == 0 {\n\t\tbits = 4096\n\t}\n\tpriv, err := rsa.GenerateKey(rand.Reader, bits)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error generating key: %w\", err)\n\t\treturn\n\t}\n\tcertder, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error creating certificate: %w\", err)\n\t\treturn\n\t}\n\tcert = tls.Certificate{\n\t\tCertificate: [][]byte{certder},\n\t\tPrivateKey:  priv,\n\t}\n\treturn\n}\n"
  },
  {
    "path": "lib/selfsigned/cert_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage selfsigned\n\nimport (\n\t\"testing\"\n)\n\nfunc TestCert(t *testing.T) {\n\tcert, err := CertGenerator{Bits: 1024, Hosts: []string{\"localhost\"}, IsCA: false}.Generate()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(cert.Certificate) < 1 {\n\t\tt.Error(\"no certificate!\")\n\t}\n\tcert, err = CertGenerator{Bits: 2048, Hosts: []string{\"localhost\"}, IsCA: true}.Generate()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(cert.Certificate) < 1 {\n\t\tt.Error(\"no certificate!\")\n\t}\n}\n"
  },
  {
    "path": "lib/service/cmd.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// Package service provides a cmd.Handler that brings up a system service.\npackage service\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t_ \"net/http/pprof\"\n\t\"net/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/health\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/coreos/go-systemd/daemon\"\n\t\"github.com/julienschmidt/httprouter\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype Handler interface {\n\thttp.Handler\n\tCheckHealth() error\n\t// Done returns a channel that closes when the handler shuts\n\t// itself down, or nil if this never happens.\n\tDone() <-chan struct{}\n}\n\ntype NewHandlerFunc func(_ context.Context, _ *arvados.Cluster, token string, registry *prometheus.Registry) Handler\n\ntype command struct {\n\tnewHandler NewHandlerFunc\n\tsvcName    arvados.ServiceName\n\tctx        context.Context // enables tests to shutdown service; no public API yet\n}\n\nvar requestQueueDumpCheckInterval = time.Minute\n\n// Command returns a cmd.Handler that loads site config, calls\n// newHandler with the current cluster and node configs, and brings up\n// an http server with the returned handler.\n//\n// The handler is wrapped with server middleware (adding X-Request-ID\n// headers, logging requests/responses, etc).\nfunc Command(svcName arvados.ServiceName, newHandler NewHandlerFunc) cmd.Handler {\n\treturn &command{\n\t\tnewHandler: newHandler,\n\t\tsvcName:    svcName,\n\t\tctx:        context.Background(),\n\t}\n}\n\nfunc (c *command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tlog := ctxlog.New(stderr, \"json\", \"info\")\n\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"exiting\")\n\t\t}\n\t}()\n\n\tflags := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tflags.SetOutput(stderr)\n\n\tloader := config.NewLoader(stdin, log)\n\tloader.SetupFlags(flags)\n\n\t// prog is [keepstore, keep-web, ...]  but the\n\t// legacy config flags are [-legacy-keepstore-config,\n\t// -legacy-keepweb-config, ...]\n\tlegacyFlag := \"-legacy-\" + strings.Replace(prog, \"keep-\", \"keep\", 1) + \"-config\"\n\targs = loader.MungeLegacyConfigArgs(log, args, legacyFlag)\n\n\tversionFlag := flags.Bool(\"version\", false, \"Write version information to stdout and exit 0\")\n\tpprofAddr := flags.String(\"pprof\", \"\", \"Serve Go profile data at `[addr]:port`\")\n\tif ok, code := cmd.ParseFlags(flags, prog, args, \"\", stderr); !ok {\n\t\treturn code\n\t} else if *versionFlag {\n\t\treturn cmd.Version.RunCommand(prog, args, stdin, stdout, stderr)\n\t}\n\n\tif *pprofAddr != \"\" {\n\t\tgo func() {\n\t\t\tlog.Println(http.ListenAndServe(*pprofAddr, nil))\n\t\t}()\n\t}\n\n\tif strings.HasSuffix(prog, \"controller\") {\n\t\t// Some config-loader checks try to make API calls via\n\t\t// controller. Those can't be expected to work if this\n\t\t// process _is_ the controller: we haven't started an\n\t\t// http server yet.\n\t\tloader.SkipAPICalls = true\n\t}\n\n\tcfg, err := loader.Load()\n\tif err != nil {\n\t\treturn 1\n\t}\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn 1\n\t}\n\n\t// Now that we've read the config, replace the bootstrap\n\t// logger with a new one according to the logging config.\n\tlog = ctxlog.New(stderr, cluster.SystemLogs.Format, cluster.SystemLogs.LogLevel)\n\tlogger := log.WithFields(logrus.Fields{\n\t\t\"PID\":       os.Getpid(),\n\t\t\"ClusterID\": cluster.ClusterID,\n\t})\n\tctx := ctxlog.Context(c.ctx, logger)\n\n\t// Check whether the caller is attempting to use environment\n\t// variables to override cluster configuration, and advise\n\t// that won't work.\n\t{\n\t\tenvhost := os.Getenv(\"ARVADOS_API_HOST\")\n\t\tif envhost != \"\" && envhost != cluster.Services.Controller.ExternalURL.Host {\n\t\t\tlogger.Warn(\"ARVADOS_API_HOST environment variable is present, but will not be used\")\n\t\t}\n\t\tenvins := os.Getenv(\"ARVADOS_API_HOST_INSECURE\")\n\t\tif envins != \"\" && (envins != \"0\") != cluster.TLS.Insecure {\n\t\t\tlogger.Warn(\"ARVADOS_API_HOST_INSECURE environment variable is present, but will not be used\")\n\t\t}\n\t\tenvtoken := os.Getenv(\"ARVADOS_API_TOKEN\")\n\t\tif envtoken != \"\" && envtoken != cluster.SystemRootToken {\n\t\t\tlogger.Warn(\"ARVADOS_API_TOKEN environment variable is present, but will not be used\")\n\t\t}\n\t}\n\n\tlistenURL, internalURL, err := getListenAddr(cluster.Services, c.svcName, log)\n\tif err != nil {\n\t\treturn 1\n\t}\n\tctx = context.WithValue(ctx, contextKeyURL{}, internalURL)\n\n\treg := prometheus.NewRegistry()\n\tloader.RegisterMetrics(reg)\n\n\t// arvados_version_running{version=\"1.2.3~4\"} 1.0\n\tmVersion := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tName:      \"version_running\",\n\t\tHelp:      \"Indicated version is running.\",\n\t}, []string{\"version\"})\n\tmVersion.WithLabelValues(cmd.Version.String()).Set(1)\n\treg.MustRegister(mVersion)\n\n\thandler := c.newHandler(ctx, cluster, cluster.SystemRootToken, reg)\n\tif err = handler.CheckHealth(); err != nil {\n\t\treturn 1\n\t}\n\n\tinstrumented := httpserver.Instrument(reg, log,\n\t\thttpserver.HandlerWithDeadline(cluster.API.RequestTimeout.Duration(),\n\t\t\thttpserver.AddRequestIDs(\n\t\t\t\thttpserver.Inspect(reg, cluster.ManagementToken,\n\t\t\t\t\thttpserver.LogRequests(\n\t\t\t\t\t\tinterceptHealthReqs(cluster.ManagementToken, handler.CheckHealth,\n\t\t\t\t\t\t\tc.requestLimiter(handler, cluster, reg)))))))\n\tsrv := &httpserver.Server{\n\t\tServer: http.Server{\n\t\t\tHandler:     ifCollectionInHost(instrumented, instrumented.ServeAPI(cluster.ManagementToken, instrumented)),\n\t\t\tBaseContext: func(net.Listener) context.Context { return ctx },\n\t\t},\n\t\tAddr: listenURL.Host,\n\t}\n\tif listenURL.Scheme == \"https\" || listenURL.Scheme == \"wss\" {\n\t\ttlsconfig, err := makeTLSConfig(cluster, logger)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Errorf(\"cannot start %s service on %s\", c.svcName, listenURL.String())\n\t\t\treturn 1\n\t\t}\n\t\tsrv.TLSConfig = tlsconfig\n\t}\n\terr = srv.Start()\n\tif err != nil {\n\t\treturn 1\n\t}\n\tlogger.WithFields(logrus.Fields{\n\t\t\"URL\":     listenURL,\n\t\t\"Listen\":  srv.Addr,\n\t\t\"Service\": c.svcName,\n\t\t\"Version\": cmd.Version.String(),\n\t}).Info(\"listening\")\n\tif _, err := daemon.SdNotify(false, \"READY=1\"); err != nil {\n\t\tlogger.WithError(err).Errorf(\"error notifying init daemon\")\n\t}\n\tgo func() {\n\t\t// Shut down server if caller cancels context\n\t\t<-ctx.Done()\n\t\tsrv.Close()\n\t}()\n\tgo func() {\n\t\t// Shut down server if handler dies\n\t\t<-handler.Done()\n\t\tsrv.Close()\n\t}()\n\tgo c.requestQueueDumpCheck(cluster, prog, reg, &srv.Server, logger)\n\terr = srv.Wait()\n\tif err != nil {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\n// If SystemLogs.RequestQueueDumpDirectory is set, monitor the\n// server's incoming HTTP request limiters. When the number of\n// concurrent requests in any queue (\"api\" or \"tunnel\") exceeds 90% of\n// its maximum slots, write the /_inspect/requests data to a JSON file\n// in the specified directory.\nfunc (c *command) requestQueueDumpCheck(cluster *arvados.Cluster, prog string, reg *prometheus.Registry, srv *http.Server, logger logrus.FieldLogger) {\n\toutdir := cluster.SystemLogs.RequestQueueDumpDirectory\n\tif outdir == \"\" || cluster.ManagementToken == \"\" {\n\t\treturn\n\t}\n\tlogger = logger.WithField(\"worker\", \"RequestQueueDump\")\n\toutfile := outdir + \"/\" + prog + \"-requests.json\"\n\tfor range time.NewTicker(requestQueueDumpCheckInterval).C {\n\t\tmfs, err := reg.Gather()\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Warn(\"error getting metrics\")\n\t\t\tcontinue\n\t\t}\n\t\tcur := map[string]int{} // queue label => current\n\t\tmax := map[string]int{} // queue label => max\n\t\tfor _, mf := range mfs {\n\t\t\tfor _, m := range mf.GetMetric() {\n\t\t\t\tfor _, ml := range m.GetLabel() {\n\t\t\t\t\tif ml.GetName() == \"queue\" {\n\t\t\t\t\t\tn := int(m.GetGauge().GetValue())\n\t\t\t\t\t\tif name := mf.GetName(); name == \"arvados_concurrent_requests\" {\n\t\t\t\t\t\t\tcur[*ml.Value] = n\n\t\t\t\t\t\t} else if name == \"arvados_max_concurrent_requests\" {\n\t\t\t\t\t\t\tmax[*ml.Value] = n\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdump := false\n\t\tfor queue, n := range cur {\n\t\t\tif n > 0 && max[queue] > 0 && n >= max[queue]*9/10 {\n\t\t\t\tdump = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif dump {\n\t\t\treq, err := http.NewRequest(\"GET\", \"/_inspect/requests\", nil)\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Warn(\"error in http.NewRequest\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+cluster.ManagementToken)\n\t\t\tresp := httptest.NewRecorder()\n\t\t\tsrv.Handler.ServeHTTP(resp, req)\n\t\t\tif code := resp.Result().StatusCode; code != http.StatusOK {\n\t\t\t\tlogger.WithField(\"StatusCode\", code).Warn(\"error getting /_inspect/requests\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = os.WriteFile(outfile, resp.Body.Bytes(), 0777)\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Warn(\"error writing file\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Set up a httpserver.RequestLimiter with separate queues/streams for\n// API requests (obeying MaxConcurrentRequests etc) and gateway tunnel\n// requests (obeying MaxGatewayTunnels).\nfunc (c *command) requestLimiter(handler http.Handler, cluster *arvados.Cluster, reg *prometheus.Registry) http.Handler {\n\tmaxReqs := cluster.API.MaxConcurrentRequests\n\tif maxRails := cluster.API.MaxConcurrentRailsRequests; maxRails > 0 &&\n\t\t(maxRails < maxReqs || maxReqs == 0) &&\n\t\tc.svcName == arvados.ServiceNameController {\n\t\t// Ideally, we would accept up to\n\t\t// MaxConcurrentRequests, and apply the\n\t\t// MaxConcurrentRailsRequests limit only for requests\n\t\t// that require calling upstream to RailsAPI. But for\n\t\t// now we make the simplifying assumption that every\n\t\t// controller request causes an upstream RailsAPI\n\t\t// request.\n\t\tmaxReqs = maxRails\n\t}\n\trqAPI := &httpserver.RequestQueue{\n\t\tLabel:                      \"api\",\n\t\tMaxConcurrent:              maxReqs,\n\t\tMaxQueue:                   cluster.API.MaxQueuedRequests,\n\t\tMaxQueueTimeForMinPriority: cluster.API.MaxQueueTimeForLockRequests.Duration(),\n\t}\n\trqTunnel := &httpserver.RequestQueue{\n\t\tLabel:         \"tunnel\",\n\t\tMaxConcurrent: cluster.API.MaxGatewayTunnels,\n\t\tMaxQueue:      0,\n\t}\n\treturn &httpserver.RequestLimiter{\n\t\tHandler:  handler,\n\t\tPriority: c.requestPriority,\n\t\tRegistry: reg,\n\t\tQueue: func(req *http.Request) *httpserver.RequestQueue {\n\t\t\tif req.Method == http.MethodPost && reTunnelPath.MatchString(req.URL.Path) {\n\t\t\t\treturn rqTunnel\n\t\t\t} else {\n\t\t\t\treturn rqAPI\n\t\t\t}\n\t\t},\n\t}\n}\n\n// reTunnelPath matches paths of API endpoints that go in the \"tunnel\"\n// queue.\nvar reTunnelPath = regexp.MustCompile(func() string {\n\trePathVar := regexp.MustCompile(`{.*?}`)\n\tout := \"\"\n\tfor _, endpoint := range []arvados.APIEndpoint{\n\t\tarvados.EndpointContainerGatewayTunnel,\n\t\tarvados.EndpointContainerGatewayTunnelCompat,\n\t\tarvados.EndpointContainerSSH,\n\t\tarvados.EndpointContainerSSHCompat,\n\t} {\n\t\tif out != \"\" {\n\t\t\tout += \"|\"\n\t\t}\n\t\tout += `\\Q/` + rePathVar.ReplaceAllString(endpoint.Path, `\\E[^/]*\\Q`) + `\\E`\n\t}\n\treturn \"^(\" + out + \")$\"\n}())\n\nfunc (c *command) requestPriority(req *http.Request, queued time.Time) int64 {\n\tswitch {\n\tcase req.Method == http.MethodPost && strings.HasPrefix(req.URL.Path, \"/arvados/v1/containers/\") && strings.HasSuffix(req.URL.Path, \"/lock\"):\n\t\t// Return 503 immediately instead of queueing. We want\n\t\t// to send feedback to dispatchcloud ASAP to stop\n\t\t// bringing up new containers.\n\t\treturn httpserver.MinPriority\n\tcase req.Header.Get(\"Origin\") != \"\":\n\t\t// Handle interactive requests first. Positive\n\t\t// priority is called \"high\" in aggregate metrics.\n\t\treturn 1\n\tdefault:\n\t\t// Zero priority is called \"normal\" in aggregate\n\t\t// metrics.\n\t\treturn 0\n\t}\n}\n\n// If an incoming request's target vhost has an embedded collection\n// UUID or PDH, handle it with hTrue, otherwise handle it with\n// hFalse.\n//\n// Facilitates routing \"http://collections.example/metrics\" to metrics\n// and \"http://{uuid}.collections.example/metrics\" to a file in a\n// collection.\nfunc ifCollectionInHost(hTrue, hFalse http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif arvados.CollectionIDFromDNSName(r.Host) != \"\" {\n\t\t\thTrue.ServeHTTP(w, r)\n\t\t} else {\n\t\t\thFalse.ServeHTTP(w, r)\n\t\t}\n\t})\n}\n\nfunc interceptHealthReqs(mgtToken string, checkHealth func() error, next http.Handler) http.Handler {\n\tmux := httprouter.New()\n\tmux.Handler(\"GET\", \"/_health/ping\", &health.Handler{\n\t\tToken:  mgtToken,\n\t\tPrefix: \"/_health/\",\n\t\tRoutes: health.Routes{\"ping\": checkHealth},\n\t})\n\tmux.NotFound = next\n\treturn ifCollectionInHost(next, mux)\n}\n\n// Determine listenURL (addr:port where server should bind) and\n// internalURL (target url that client should connect to) for a\n// service.\n//\n// If the config does not specify ListenURL, we check all of the\n// configured InternalURLs. If there is exactly one that matches our\n// hostname, or exactly one that matches a local interface address,\n// then we use that as listenURL.\n//\n// Note that listenURL and internalURL may use different protocols\n// (e.g., listenURL is http, but the service sits behind a proxy, so\n// clients connect using https).\nfunc getListenAddr(svcs arvados.Services, prog arvados.ServiceName, log logrus.FieldLogger) (arvados.URL, arvados.URL, error) {\n\tsvc, ok := svcs.Map()[prog]\n\tif !ok {\n\t\treturn arvados.URL{}, arvados.URL{}, fmt.Errorf(\"unknown service name %q\", prog)\n\t}\n\n\tif want := os.Getenv(\"ARVADOS_SERVICE_INTERNAL_URL\"); want != \"\" {\n\t\turl, err := url.Parse(want)\n\t\tif err != nil {\n\t\t\treturn arvados.URL{}, arvados.URL{}, fmt.Errorf(\"$ARVADOS_SERVICE_INTERNAL_URL (%q): %s\", want, err)\n\t\t}\n\t\tif url.Path == \"\" {\n\t\t\turl.Path = \"/\"\n\t\t}\n\t\tfor internalURL, conf := range svc.InternalURLs {\n\t\t\tif internalURL.String() == url.String() {\n\t\t\t\tlistenURL := conf.ListenURL\n\t\t\t\tif listenURL.Host == \"\" {\n\t\t\t\t\tlistenURL = internalURL\n\t\t\t\t}\n\t\t\t\treturn listenURL, internalURL, nil\n\t\t\t}\n\t\t}\n\t\tlog.Warnf(\"possible configuration error: listening on %s (from $ARVADOS_SERVICE_INTERNAL_URL) even though configuration does not have a matching InternalURLs entry\", url)\n\t\tinternalURL := arvados.URL(*url)\n\t\treturn internalURL, internalURL, nil\n\t}\n\n\terrors := []string{}\n\tfor internalURL, conf := range svc.InternalURLs {\n\t\tlistenURL := conf.ListenURL\n\t\tif listenURL.Host == \"\" {\n\t\t\t// If ListenURL is not specified, assume\n\t\t\t// InternalURL is also usable as the listening\n\t\t\t// proto/addr/port (i.e., simple case with no\n\t\t\t// intermediate proxy/routing)\n\t\t\tlistenURL = internalURL\n\t\t}\n\t\tlistenAddr := listenURL.Host\n\t\tif _, _, err := net.SplitHostPort(listenAddr); err != nil {\n\t\t\t// url \"https://foo.example/\" (with no\n\t\t\t// explicit port name/number) means listen on\n\t\t\t// the well-known port for the specified\n\t\t\t// protocol, \"foo.example:https\".\n\t\t\tport := listenURL.Scheme\n\t\t\tif port == \"ws\" || port == \"wss\" {\n\t\t\t\tport = \"http\" + port[2:]\n\t\t\t}\n\t\t\tlistenAddr = net.JoinHostPort(listenAddr, port)\n\t\t}\n\t\tlistener, err := net.Listen(\"tcp\", listenAddr)\n\t\tif err == nil {\n\t\t\tlistener.Close()\n\t\t\treturn listenURL, internalURL, nil\n\t\t} else if strings.Contains(err.Error(), \"cannot assign requested address\") {\n\t\t\t// If 'Host' specifies a different server than\n\t\t\t// the current one, it'll resolve the hostname\n\t\t\t// to IP address, and then fail because it\n\t\t\t// can't bind an IP address it doesn't own.\n\t\t\tcontinue\n\t\t} else {\n\t\t\terrors = append(errors, fmt.Sprintf(\"%s: %s\", listenURL, err))\n\t\t}\n\t}\n\tif len(errors) > 0 {\n\t\treturn arvados.URL{}, arvados.URL{}, fmt.Errorf(\"could not enable the %q service on this host: %s\", prog, strings.Join(errors, \"; \"))\n\t}\n\treturn arvados.URL{}, arvados.URL{}, fmt.Errorf(\"configuration does not enable the %q service on this host\", prog)\n}\n\ntype contextKeyURL struct{}\n\nfunc URLFromContext(ctx context.Context) (arvados.URL, bool) {\n\tu, ok := ctx.Value(contextKeyURL{}).(arvados.URL)\n\treturn u, ok\n}\n"
  },
  {
    "path": "lib/service/cmd_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// package service provides a cmd.Handler that brings up a system service.\npackage service\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&Suite{})\n\ntype Suite struct{}\ntype key int\n\nconst (\n\tcontextKey key = iota\n)\n\nfunc unusedPort(c *check.C) string {\n\t// Find an available port on the testing host, so the test\n\t// cases don't get confused by \"already in use\" errors.\n\tlistener, err := net.Listen(\"tcp\", \":\")\n\tc.Assert(err, check.IsNil)\n\tlistener.Close()\n\t_, port, err := net.SplitHostPort(listener.Addr().String())\n\tc.Assert(err, check.IsNil)\n\treturn port\n}\n\nfunc (*Suite) TestGetListenAddress(c *check.C) {\n\tport := unusedPort(c)\n\tdefer os.Unsetenv(\"ARVADOS_SERVICE_INTERNAL_URL\")\n\tfor idx, trial := range []struct {\n\t\t// internalURL => listenURL, both with trailing \"/\"\n\t\t// because config loader always adds it\n\t\tinternalURLs     map[string]string\n\t\tenvVar           string\n\t\texpectErrorMatch string\n\t\texpectLogsMatch  string\n\t\texpectListen     string\n\t\texpectInternal   string\n\t}{\n\t\t{\n\t\t\tinternalURLs:   map[string]string{\"http://localhost:\" + port + \"/\": \"\"},\n\t\t\texpectListen:   \"http://localhost:\" + port + \"/\",\n\t\t\texpectInternal: \"http://localhost:\" + port + \"/\",\n\t\t},\n\t\t{ // implicit port 80 in InternalURLs\n\t\t\tinternalURLs:     map[string]string{\"http://localhost/\": \"\"},\n\t\t\texpectErrorMatch: `.*:80: bind: permission denied`,\n\t\t},\n\t\t{ // implicit port 443 in InternalURLs\n\t\t\tinternalURLs:   map[string]string{\"https://host.example/\": \"http://localhost:\" + port + \"/\"},\n\t\t\texpectListen:   \"http://localhost:\" + port + \"/\",\n\t\t\texpectInternal: \"https://host.example/\",\n\t\t},\n\t\t{ // implicit port 443 in ListenURL\n\t\t\tinternalURLs:     map[string]string{\"wss://host.example/\": \"wss://localhost/\"},\n\t\t\texpectErrorMatch: `.*:443: bind: permission denied`,\n\t\t},\n\t\t{\n\t\t\tinternalURLs:   map[string]string{\"https://hostname.example/\": \"http://localhost:8000/\"},\n\t\t\texpectListen:   \"http://localhost:8000/\",\n\t\t\texpectInternal: \"https://hostname.example/\",\n\t\t},\n\t\t{\n\t\t\tinternalURLs: map[string]string{\n\t\t\t\t\"https://hostname1.example/\": \"http://localhost:12435/\",\n\t\t\t\t\"https://hostname2.example/\": \"http://localhost:\" + port + \"/\",\n\t\t\t},\n\t\t\tenvVar:         \"https://hostname2.example\", // note this works despite missing trailing \"/\"\n\t\t\texpectListen:   \"http://localhost:\" + port + \"/\",\n\t\t\texpectInternal: \"https://hostname2.example/\",\n\t\t},\n\t\t{ // cannot listen on any of the ListenURLs\n\t\t\tinternalURLs: map[string]string{\n\t\t\t\t\"https://hostname1.example/\": \"http://1.2.3.4:\" + port + \"/\",\n\t\t\t\t\"https://hostname2.example/\": \"http://1.2.3.4:\" + port + \"/\",\n\t\t\t},\n\t\t\texpectErrorMatch: \"configuration does not enable the \\\"arvados-controller\\\" service on this host\",\n\t\t},\n\t\t{ // cannot listen on any of the (implied) ListenURLs\n\t\t\tinternalURLs: map[string]string{\n\t\t\t\t\"https://1.2.3.4/\": \"\",\n\t\t\t\t\"https://1.2.3.5/\": \"\",\n\t\t\t},\n\t\t\texpectErrorMatch: \"configuration does not enable the \\\"arvados-controller\\\" service on this host\",\n\t\t},\n\t\t{ // impossible port number\n\t\t\tinternalURLs: map[string]string{\n\t\t\t\t\"https://host.example/\": \"http://0.0.0.0:1234567\",\n\t\t\t},\n\t\t\texpectErrorMatch: `.*:1234567: listen tcp: address 1234567: invalid port`,\n\t\t},\n\t\t{\n\t\t\t// env var URL not mentioned in config = obey env var, with warning\n\t\t\tinternalURLs:    map[string]string{\"https://hostname1.example/\": \"http://localhost:8000/\"},\n\t\t\tenvVar:          \"https://hostname2.example\",\n\t\t\texpectListen:    \"https://hostname2.example/\",\n\t\t\texpectInternal:  \"https://hostname2.example/\",\n\t\t\texpectLogsMatch: `.*\\Qpossible configuration error: listening on https://hostname2.example/ (from $ARVADOS_SERVICE_INTERNAL_URL) even though configuration does not have a matching InternalURLs entry\\E.*\\n`,\n\t\t},\n\t\t{\n\t\t\t// env var + empty config = obey env var, with warning\n\t\t\tenvVar:          \"https://hostname.example\",\n\t\t\texpectListen:    \"https://hostname.example/\",\n\t\t\texpectInternal:  \"https://hostname.example/\",\n\t\t\texpectLogsMatch: `.*\\Qpossible configuration error: listening on https://hostname.example/ (from $ARVADOS_SERVICE_INTERNAL_URL) even though configuration does not have a matching InternalURLs entry\\E.*\\n`,\n\t\t},\n\t} {\n\t\tc.Logf(\"trial %d %+v\", idx, trial)\n\t\tos.Setenv(\"ARVADOS_SERVICE_INTERNAL_URL\", trial.envVar)\n\t\tvar logbuf bytes.Buffer\n\t\tlog := ctxlog.New(&logbuf, \"text\", \"info\")\n\t\tservices := arvados.Services{Controller: arvados.Service{InternalURLs: map[arvados.URL]arvados.ServiceInstance{}}}\n\t\tfor k, v := range trial.internalURLs {\n\t\t\tu, err := url.Parse(k)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tsi := arvados.ServiceInstance{}\n\t\t\tif v != \"\" {\n\t\t\t\tu, err := url.Parse(v)\n\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\tsi.ListenURL = arvados.URL(*u)\n\t\t\t}\n\t\t\tservices.Controller.InternalURLs[arvados.URL(*u)] = si\n\t\t}\n\t\tlistenURL, internalURL, err := getListenAddr(services, \"arvados-controller\", log)\n\t\tif trial.expectLogsMatch != \"\" {\n\t\t\tc.Check(logbuf.String(), check.Matches, trial.expectLogsMatch)\n\t\t}\n\t\tif trial.expectErrorMatch != \"\" {\n\t\t\tc.Check(err, check.ErrorMatches, trial.expectErrorMatch)\n\t\t\tcontinue\n\t\t}\n\t\tif !c.Check(err, check.IsNil) {\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(listenURL.String(), check.Equals, trial.expectListen)\n\t\tc.Check(internalURL.String(), check.Equals, trial.expectInternal)\n\t}\n}\n\nfunc (*Suite) TestCommand(c *check.C) {\n\tcf, err := ioutil.TempFile(\"\", \"cmd_test.\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(cf.Name())\n\tdefer cf.Close()\n\tfmt.Fprintf(cf, \"Clusters:\\n zzzzz:\\n  SystemRootToken: abcde\\n  NodeProfiles: {\\\"*\\\": {\\\"arvados-controller\\\": {Listen: \\\":1234\\\"}}}\")\n\n\thealthCheck := make(chan bool, 1)\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tcmd := Command(arvados.ServiceNameController, func(ctx context.Context, _ *arvados.Cluster, token string, reg *prometheus.Registry) Handler {\n\t\tc.Check(ctx.Value(contextKey), check.Equals, \"bar\")\n\t\tc.Check(token, check.Equals, \"abcde\")\n\t\treturn &testHandler{ctx: ctx, healthCheck: healthCheck}\n\t})\n\tcmd.(*command).ctx = context.WithValue(ctx, contextKey, \"bar\")\n\n\tdone := make(chan bool)\n\tvar stdin, stdout, stderr bytes.Buffer\n\n\tgo func() {\n\t\tcmd.RunCommand(\"arvados-controller\", []string{\"-config\", cf.Name()}, &stdin, &stdout, &stderr)\n\t\tclose(done)\n\t}()\n\tselect {\n\tcase <-healthCheck:\n\tcase <-done:\n\t\tc.Error(\"command exited without health check\")\n\t}\n\tcancel()\n\tc.Check(stdout.String(), check.Equals, \"\")\n\tc.Check(stderr.String(), check.Matches, `(?ms).*\"msg\":\"CheckHealth called\".*`)\n}\n\nfunc (s *Suite) TestTunnelPathRegexp(c *check.C) {\n\tc.Check(reTunnelPath.MatchString(`/arvados/v1/connect/zzzzz-dz642-aaaaaaaaaaaaaaa/gateway_tunnel`), check.Equals, true)\n\tc.Check(reTunnelPath.MatchString(`/arvados/v1/containers/zzzzz-dz642-aaaaaaaaaaaaaaa/gateway_tunnel`), check.Equals, true)\n\tc.Check(reTunnelPath.MatchString(`/arvados/v1/connect/zzzzz-dz642-aaaaaaaaaaaaaaa/ssh`), check.Equals, true)\n\tc.Check(reTunnelPath.MatchString(`/arvados/v1/containers/zzzzz-dz642-aaaaaaaaaaaaaaa/ssh`), check.Equals, true)\n\tc.Check(reTunnelPath.MatchString(`/blah/arvados/v1/containers/zzzzz-dz642-aaaaaaaaaaaaaaa/ssh`), check.Equals, false)\n\tc.Check(reTunnelPath.MatchString(`/arvados/v1/containers/zzzzz-dz642-aaaaaaaaaaaaaaa`), check.Equals, false)\n}\n\nfunc (s *Suite) TestRequestLimitsAndDumpRequests_Keepweb(c *check.C) {\n\ts.testRequestLimitAndDumpRequests(c, arvados.ServiceNameKeepweb, \"MaxConcurrentRequests\")\n}\n\nfunc (s *Suite) TestRequestLimitsAndDumpRequests_Controller(c *check.C) {\n\ts.testRequestLimitAndDumpRequests(c, arvados.ServiceNameController, \"MaxConcurrentRailsRequests\")\n}\n\nfunc (*Suite) testRequestLimitAndDumpRequests(c *check.C, serviceName arvados.ServiceName, maxReqsConfigKey string) {\n\tdefer func(orig time.Duration) { requestQueueDumpCheckInterval = orig }(requestQueueDumpCheckInterval)\n\trequestQueueDumpCheckInterval = time.Second / 10\n\n\tport := unusedPort(c)\n\ttmpdir := c.MkDir()\n\tcf, err := ioutil.TempFile(tmpdir, \"cmd_test.\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.Remove(cf.Name())\n\tdefer cf.Close()\n\n\tmax := 24\n\tmaxTunnels := 30\n\tfmt.Fprintf(cf, `\nClusters:\n zzzzz:\n  SystemRootToken: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n  ManagementToken: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n  API:\n   `+maxReqsConfigKey+`: %d\n   MaxQueuedRequests: 1\n   MaxGatewayTunnels: %d\n  SystemLogs: {RequestQueueDumpDirectory: %q}\n  Services:\n   Controller:\n    ExternalURL: \"http://localhost:`+port+`\"\n    InternalURLs: {\"http://localhost:`+port+`\": {}}\n   WebDAV:\n    ExternalURL: \"http://localhost:`+port+`\"\n    InternalURLs: {\"http://localhost:`+port+`\": {}}\n`, max, maxTunnels, tmpdir)\n\tcf.Close()\n\n\tstarted := make(chan bool, max+1)\n\thold := make(chan bool)\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif strings.Contains(r.URL.Path, \"/ssh\") || strings.Contains(r.URL.Path, \"/gateway_tunnel\") {\n\t\t\t<-hold\n\t\t} else {\n\t\t\tstarted <- true\n\t\t\t<-hold\n\t\t}\n\t})\n\thealthCheck := make(chan bool, 1)\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tcmd := Command(serviceName, func(ctx context.Context, _ *arvados.Cluster, token string, reg *prometheus.Registry) Handler {\n\t\treturn &testHandler{ctx: ctx, handler: handler, healthCheck: healthCheck}\n\t})\n\tcmd.(*command).ctx = context.WithValue(ctx, contextKey, \"bar\")\n\n\texited := make(chan bool)\n\tvar stdin, stdout, stderr bytes.Buffer\n\n\tgo func() {\n\t\tcmd.RunCommand(string(serviceName), []string{\"-config\", cf.Name()}, &stdin, &stdout, &stderr)\n\t\tclose(exited)\n\t}()\n\tselect {\n\tcase <-healthCheck:\n\tcase <-exited:\n\t\tc.Logf(\"%s\", stderr.String())\n\t\tc.Error(\"command exited without health check\")\n\t}\n\tclient := http.Client{}\n\tdeadline := time.Now().Add(time.Second * 2)\n\tvar activeReqs sync.WaitGroup\n\n\t// Start some API reqs\n\tvar apiResp200, apiResp503 int64\n\tfor i := 0; i < max+1; i++ {\n\t\tactiveReqs.Add(1)\n\t\tgo func() {\n\t\t\tdefer activeReqs.Done()\n\t\t\ttarget := \"http://localhost:\" + port + \"/testpath\"\n\t\t\tresp, err := client.Get(target)\n\t\t\tfor err != nil && strings.Contains(err.Error(), \"dial tcp\") && deadline.After(time.Now()) {\n\t\t\t\ttime.Sleep(time.Second / 100)\n\t\t\t\tresp, err = client.Get(target)\n\t\t\t}\n\t\t\tif c.Check(err, check.IsNil) {\n\t\t\t\tif resp.StatusCode == http.StatusOK {\n\t\t\t\t\tatomic.AddInt64(&apiResp200, 1)\n\t\t\t\t} else if resp.StatusCode == http.StatusServiceUnavailable {\n\t\t\t\t\tatomic.AddInt64(&apiResp503, 1)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\t// Start some gateway tunnel reqs that don't count toward our\n\t// API req limit\n\textraTunnelReqs := 20\n\tvar tunnelResp200, tunnelResp503 int64\n\tvar paths = []string{\n\t\t\"/\" + strings.Replace(arvados.EndpointContainerSSH.Path, \"{uuid}\", \"z1234-dz642-abcdeabcdeabcde\", -1),\n\t\t\"/\" + strings.Replace(arvados.EndpointContainerSSHCompat.Path, \"{uuid}\", \"z1234-dz642-abcdeabcdeabcde\", -1),\n\t\t\"/\" + strings.Replace(arvados.EndpointContainerGatewayTunnel.Path, \"{uuid}\", \"z1234-dz642-abcdeabcdeabcde\", -1),\n\t\t\"/\" + strings.Replace(arvados.EndpointContainerGatewayTunnelCompat.Path, \"{uuid}\", \"z1234-dz642-abcdeabcdeabcde\", -1),\n\t}\n\tfor i := 0; i < maxTunnels+extraTunnelReqs; i++ {\n\t\ti := i\n\t\tactiveReqs.Add(1)\n\t\tgo func() {\n\t\t\tdefer activeReqs.Done()\n\t\t\ttarget := \"http://localhost:\" + port + paths[i%len(paths)]\n\t\t\tresp, err := client.Post(target, \"application/octet-stream\", nil)\n\t\t\tfor err != nil && strings.Contains(err.Error(), \"dial tcp\") && deadline.After(time.Now()) {\n\t\t\t\ttime.Sleep(time.Second / 100)\n\t\t\t\tresp, err = client.Post(target, \"application/octet-stream\", nil)\n\t\t\t}\n\t\t\tif c.Check(err, check.IsNil) {\n\t\t\t\tif resp.StatusCode == http.StatusOK {\n\t\t\t\t\tatomic.AddInt64(&tunnelResp200, 1)\n\t\t\t\t} else if resp.StatusCode == http.StatusServiceUnavailable {\n\t\t\t\t\tatomic.AddInt64(&tunnelResp503, 1)\n\t\t\t\t} else {\n\t\t\t\t\tc.Errorf(\"tunnel response code %d\", resp.StatusCode)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\tfor i := 0; i < max; i++ {\n\t\tselect {\n\t\tcase <-started:\n\t\tcase <-time.After(time.Second):\n\t\t\tc.Logf(\"%s\", stderr.String())\n\t\t\tc.Logf(\"apiResp200 %d\", apiResp200)\n\t\t\tc.Logf(\"apiResp503 %d\", apiResp503)\n\t\t\tc.Logf(\"tunnelResp200 %d\", tunnelResp200)\n\t\t\tc.Logf(\"tunnelResp503 %d\", tunnelResp503)\n\t\t\tc.Fatal(\"timed out\")\n\t\t}\n\t}\n\tfor delay := time.Second / 100; ; delay = delay * 2 {\n\t\ttime.Sleep(delay)\n\t\tj, err := os.ReadFile(tmpdir + \"/\" + string(serviceName) + \"-requests.json\")\n\t\tif os.IsNotExist(err) && deadline.After(time.Now()) {\n\t\t\tcontinue\n\t\t}\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Logf(\"stderr:\\n%s\", stderr.String())\n\t\tc.Logf(\"json:\\n%s\", string(j))\n\n\t\tvar loaded []struct{ URL string }\n\t\terr = json.Unmarshal(j, &loaded)\n\t\tc.Check(err, check.IsNil)\n\n\t\tfor i := 0; i < len(loaded); i++ {\n\t\t\tif strings.Contains(loaded[i].URL, \"/ssh\") || strings.Contains(loaded[i].URL, \"/gateway_tunnel\") {\n\t\t\t\t// Filter out a gateway tunnel req\n\t\t\t\t// that doesn't count toward our API\n\t\t\t\t// req limit\n\t\t\t\tif i < len(loaded)-1 {\n\t\t\t\t\tcopy(loaded[i:], loaded[i+1:])\n\t\t\t\t\ti--\n\t\t\t\t}\n\t\t\t\tloaded = loaded[:len(loaded)-1]\n\t\t\t}\n\t\t}\n\n\t\tif len(loaded) < max {\n\t\t\t// Dumped when #requests was >90% but <100% of\n\t\t\t// limit. If we stop now, we won't be able to\n\t\t\t// confirm (below) that management endpoints\n\t\t\t// are still accessible when normal requests\n\t\t\t// are at 100%.\n\t\t\tc.Logf(\"loaded dumped requests, but len %d < max %d -- still waiting\", len(loaded), max)\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(loaded, check.HasLen, max+1)\n\t\tc.Check(loaded[0].URL, check.Equals, \"/testpath\")\n\t\tbreak\n\t}\n\n\tfor _, path := range []string{\"/_inspect/requests\", \"/metrics\"} {\n\t\treq, err := http.NewRequest(\"GET\", \"http://localhost:\"+port+\"\"+path, nil)\n\t\tc.Assert(err, check.IsNil)\n\t\treq.Header.Set(\"Authorization\", \"Bearer bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\")\n\t\tresp, err := client.Do(req)\n\t\tif !c.Check(err, check.IsNil) {\n\t\t\tbreak\n\t\t}\n\t\tc.Logf(\"got response for %s\", path)\n\t\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\t\tbuf, err := ioutil.ReadAll(resp.Body)\n\t\tc.Check(err, check.IsNil)\n\t\tswitch path {\n\t\tcase \"/metrics\":\n\t\t\tc.Check(string(buf), check.Matches, `(?ms).*arvados_concurrent_requests{queue=\"api\"} `+fmt.Sprintf(\"%d\", max)+`\\n.*`)\n\t\t\tc.Check(string(buf), check.Matches, `(?ms).*arvados_queued_requests{priority=\"normal\",queue=\"api\"} 1\\n.*`)\n\t\tcase \"/_inspect/requests\":\n\t\t\tc.Check(string(buf), check.Matches, `(?ms).*\"URL\":\"/testpath\".*`)\n\t\tdefault:\n\t\t\tc.Error(\"oops, testing bug\")\n\t\t}\n\t}\n\tclose(hold)\n\tactiveReqs.Wait()\n\tc.Check(int(apiResp200), check.Equals, max+1)\n\tc.Check(int(apiResp503), check.Equals, 0)\n\tc.Check(int(tunnelResp200), check.Equals, maxTunnels)\n\tc.Check(int(tunnelResp503), check.Equals, extraTunnelReqs)\n\tcancel()\n}\n\nfunc (*Suite) TestTLS(c *check.C) {\n\tport := unusedPort(c)\n\tcwd, err := os.Getwd()\n\tc.Assert(err, check.IsNil)\n\n\tstdin := bytes.NewBufferString(`\nClusters:\n zzzzz:\n  SystemRootToken: abcde\n  Services:\n   Controller:\n    ExternalURL: \"https://localhost:` + port + `\"\n    InternalURLs: {\"https://localhost:` + port + `\": {}}\n  TLS:\n   Key: file://` + cwd + `/../../services/api/tmp/self-signed.key\n   Certificate: file://` + cwd + `/../../services/api/tmp/self-signed.pem\n`)\n\n\tcalled := make(chan bool)\n\tcmd := Command(arvados.ServiceNameController, func(ctx context.Context, _ *arvados.Cluster, token string, reg *prometheus.Registry) Handler {\n\t\treturn &testHandler{handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.Write([]byte(\"ok\"))\n\t\t\tclose(called)\n\t\t})}\n\t})\n\n\texited := make(chan bool)\n\tvar stdout, stderr bytes.Buffer\n\tgo func() {\n\t\tcmd.RunCommand(\"arvados-controller\", []string{\"-config\", \"-\"}, stdin, &stdout, &stderr)\n\t\tclose(exited)\n\t}()\n\tgot := make(chan bool)\n\tgo func() {\n\t\tdefer close(got)\n\t\tclient := &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}\n\t\tfor range time.NewTicker(time.Millisecond).C {\n\t\t\tresp, err := client.Get(\"https://localhost:\" + port)\n\t\t\tif err != nil {\n\t\t\t\tc.Log(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tc.Logf(\"status %d, body %s\", resp.StatusCode, string(body))\n\t\t\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\t\t\tbreak\n\t\t}\n\t}()\n\tselect {\n\tcase <-called:\n\tcase <-exited:\n\t\tc.Error(\"command exited without calling handler\")\n\tcase <-time.After(time.Second):\n\t\tc.Error(\"timed out\")\n\t}\n\tselect {\n\tcase <-got:\n\tcase <-exited:\n\t\tc.Error(\"command exited before client received response\")\n\tcase <-time.After(time.Second):\n\t\tc.Error(\"timed out\")\n\t}\n\tc.Log(stderr.String())\n}\n\ntype testHandler struct {\n\tctx         context.Context\n\thandler     http.Handler\n\thealthCheck chan bool\n}\n\nfunc (th *testHandler) Done() <-chan struct{}                            { return nil }\nfunc (th *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { th.handler.ServeHTTP(w, r) }\nfunc (th *testHandler) CheckHealth() error {\n\tctxlog.FromContext(th.ctx).Info(\"CheckHealth called\")\n\tselect {\n\tcase th.healthCheck <- true:\n\tdefault:\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "lib/service/error.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage service\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// ErrorHandler returns a Handler that reports itself as unhealthy and\n// responds 500 to all requests.  ErrorHandler itself logs the given\n// error once, and the handler logs it again for each incoming\n// request.\nfunc ErrorHandler(ctx context.Context, _ *arvados.Cluster, err error) Handler {\n\tlogger := ctxlog.FromContext(ctx)\n\tlogger.WithError(err).Error(\"unhealthy service\")\n\treturn errorHandler{err, logger}\n}\n\ntype errorHandler struct {\n\terr    error\n\tlogger logrus.FieldLogger\n}\n\nfunc (eh errorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\teh.logger.WithError(eh.err).Error(\"unhealthy service\")\n\thttp.Error(w, \"\", http.StatusInternalServerError)\n}\n\nfunc (eh errorHandler) CheckHealth() error {\n\treturn eh.err\n}\n\n// Done returns a closed channel to indicate the service has\n// stopped/failed.\nfunc (eh errorHandler) Done() <-chan struct{} {\n\treturn doneChannel\n}\n\nvar doneChannel = func() <-chan struct{} {\n\tdone := make(chan struct{})\n\tclose(done)\n\treturn done\n}()\n"
  },
  {
    "path": "lib/service/log.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage service\n\nimport (\n\t\"bytes\"\n\t\"io\"\n)\n\ntype LogPrefixer struct {\n\tio.Writer\n\tPrefix []byte\n\tdid    bool\n}\n\nfunc (lp *LogPrefixer) Write(p []byte) (int, error) {\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\tvar out []byte\n\tif !lp.did {\n\t\tout = append(out, lp.Prefix...)\n\t}\n\tlp.did = p[len(p)-1] != '\\n'\n\tout = append(out, bytes.Replace(p[:len(p)-1], []byte(\"\\n\"), append([]byte(\"\\n\"), lp.Prefix...), -1)...)\n\tout = append(out, p[len(p)-1])\n\t_, err := lp.Writer.Write(out)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn len(p), nil\n}\n"
  },
  {
    "path": "lib/service/tls.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage service\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/crypto/acme/autocert\"\n)\n\nfunc makeTLSConfig(cluster *arvados.Cluster, logger logrus.FieldLogger) (*tls.Config, error) {\n\tif cluster.TLS.ACME.Server != \"\" {\n\t\treturn makeAutocertConfig(cluster, logger)\n\t} else {\n\t\treturn makeFileLoaderConfig(cluster, logger)\n\t}\n}\n\nvar errCertUnavailable = errors.New(\"certificate unavailable, waiting for supervisor to update cache\")\n\ntype readonlyDirCache autocert.DirCache\n\nfunc (c readonlyDirCache) Get(ctx context.Context, name string) ([]byte, error) {\n\tdata, err := autocert.DirCache(c).Get(ctx, name)\n\tif err != nil {\n\t\t// Returning an error other than autocert.ErrCacheMiss\n\t\t// causes GetCertificate() to fail early instead of\n\t\t// trying to obtain a certificate itself (which\n\t\t// wouldn't work because we're not in a position to\n\t\t// answer challenges).\n\t\treturn nil, errCertUnavailable\n\t}\n\treturn data, nil\n}\n\nfunc (c readonlyDirCache) Put(ctx context.Context, name string, data []byte) error {\n\treturn fmt.Errorf(\"(bug?) (readonlyDirCache)Put(%s) called\", name)\n}\n\nfunc (c readonlyDirCache) Delete(ctx context.Context, name string) error {\n\treturn nil\n}\n\nfunc makeAutocertConfig(cluster *arvados.Cluster, logger logrus.FieldLogger) (*tls.Config, error) {\n\tmgr := &autocert.Manager{\n\t\tCache:  readonlyDirCache(\"/var/lib/arvados/tmp/autocert\"),\n\t\tPrompt: autocert.AcceptTOS,\n\t\t// HostPolicy accepts all names because this Manager\n\t\t// doesn't request certs. Whoever writes certs to our\n\t\t// cache is effectively responsible for HostPolicy.\n\t\tHostPolicy: func(ctx context.Context, host string) error { return nil },\n\t\t// Keep using whatever's in the cache as long as\n\t\t// possible. Assume some other process (see lib/boot)\n\t\t// handles renewals.\n\t\tRenewBefore: time.Second,\n\t}\n\treturn mgr.TLSConfig(), nil\n}\n\nfunc makeFileLoaderConfig(cluster *arvados.Cluster, logger logrus.FieldLogger) (*tls.Config, error) {\n\tcurrentCert := make(chan *tls.Certificate, 1)\n\tloaded := false\n\n\tkey := strings.TrimPrefix(cluster.TLS.Key, \"file://\")\n\tcert := strings.TrimPrefix(cluster.TLS.Certificate, \"file://\")\n\n\tupdate := func() error {\n\t\tcert, err := tls.LoadX509KeyPair(cert, key)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error loading X509 key pair: %s\", err)\n\t\t}\n\t\tif loaded {\n\t\t\t// Throw away old cert\n\t\t\t<-currentCert\n\t\t}\n\t\tcurrentCert <- &cert\n\t\tloaded = true\n\t\treturn nil\n\t}\n\terr := update()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treload := make(chan os.Signal, 1)\n\tsignal.Notify(reload, syscall.SIGHUP)\n\tgo func() {\n\t\tfor range time.NewTicker(time.Hour).C {\n\t\t\treload <- nil\n\t\t}\n\t}()\n\tgo func() {\n\t\tfor range reload {\n\t\t\terr := update()\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Warn(\"error updating TLS certificate\")\n\t\t\t}\n\t\t}\n\t}()\n\n\t// https://blog.gopheracademy.com/advent-2016/exposing-go-on-the-internet/\n\treturn &tls.Config{\n\t\tPreferServerCipherSuites: true,\n\t\tCurvePreferences: []tls.CurveID{\n\t\t\ttls.CurveP256,\n\t\t\ttls.X25519,\n\t\t},\n\t\tMinVersion: tls.VersionTLS12,\n\t\tCipherSuites: []uint16{\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,\n\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,\n\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,\n\t\t},\n\t\tGetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {\n\t\t\tcert := <-currentCert\n\t\t\tcurrentCert <- cert\n\t\t\treturn cert, nil\n\t\t},\n\t}, nil\n}\n"
  },
  {
    "path": "lib/webdavfs/fs.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// Package webdavfs adds special behaviors to an arvados.FileSystem so\n// it's suitable to use with a webdav server.\npackage webdavfs\n\nimport (\n\t\"context\"\n\t\"crypto/rand\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\tprand \"math/rand\"\n\t\"os\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"golang.org/x/net/webdav\"\n)\n\nvar (\n\tlockPrefix     string = uuid()\n\tnextLockSuffix int64  = prand.Int63()\n\tErrReadOnly           = errors.New(\"read-only filesystem\")\n)\n\n// FS implements a webdav.FileSystem by wrapping an\n// arvados.CollectionFilesystem.\ntype FS struct {\n\tFileSystem arvados.FileSystem\n\t// Prefix works like fs.Sub: Stat(name) calls\n\t// Stat(prefix+name) in the wrapped filesystem.\n\tPrefix string\n\t// If Writing is false, all write operations return errors.\n\t// (Opening a file for writing succeeds -- otherwise webdav\n\t// would return 404 -- but writing to it fails.)\n\tWriting bool\n\t// webdav PROPFIND reads the first few bytes of each file\n\t// whose filename extension isn't recognized, which is\n\t// prohibitively expensive: we end up fetching multiple 64MiB\n\t// blocks. Avoid this by returning EOF on all reads when\n\t// handling a PROPFIND.\n\tAlwaysReadEOF bool\n}\n\nfunc (fs *FS) Mkdir(ctx context.Context, name string, perm os.FileMode) error {\n\tif !fs.Writing {\n\t\treturn ErrReadOnly\n\t}\n\tname = strings.TrimRight(name, \"/\")\n\treturn fs.FileSystem.Mkdir(fs.Prefix+name, 0755)\n}\n\nfunc (fs *FS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (f webdav.File, err error) {\n\twriting := flag&(os.O_WRONLY|os.O_RDWR|os.O_TRUNC) != 0\n\tf, err = fs.FileSystem.OpenFile(fs.Prefix+name, flag, perm)\n\tif !fs.Writing {\n\t\t// webdav module returns 404 on all OpenFile errors,\n\t\t// but returns 405 Method Not Allowed if OpenFile()\n\t\t// succeeds but Write() or Close() fails. We'd rather\n\t\t// have 405. writeFailer ensures Close() fails if the\n\t\t// file is opened for writing *or* Write() is called.\n\t\tvar err error\n\t\tif writing {\n\t\t\terr = ErrReadOnly\n\t\t}\n\t\tf = writeFailer{File: f, err: err}\n\t}\n\tif fs.AlwaysReadEOF {\n\t\tf = readEOF{File: f}\n\t}\n\treturn\n}\n\nfunc (fs *FS) RemoveAll(ctx context.Context, name string) error {\n\treturn fs.FileSystem.RemoveAll(fs.Prefix + name)\n}\n\nfunc (fs *FS) Rename(ctx context.Context, oldName, newName string) error {\n\tif !fs.Writing {\n\t\treturn ErrReadOnly\n\t}\n\tif strings.HasSuffix(oldName, \"/\") {\n\t\t// WebDAV \"MOVE foo/ bar/\" means rename foo to bar.\n\t\toldName = oldName[:len(oldName)-1]\n\t\tnewName = strings.TrimSuffix(newName, \"/\")\n\t}\n\treturn fs.FileSystem.Rename(fs.Prefix+oldName, fs.Prefix+newName)\n}\n\nfunc (fs *FS) Stat(ctx context.Context, name string) (os.FileInfo, error) {\n\treturn fs.FileSystem.Stat(fs.Prefix + name)\n}\n\ntype writeFailer struct {\n\twebdav.File\n\terr error\n}\n\nfunc (wf writeFailer) Write([]byte) (int, error) {\n\twf.err = ErrReadOnly\n\treturn 0, wf.err\n}\n\nfunc (wf writeFailer) Close() error {\n\terr := wf.File.Close()\n\tif err != nil {\n\t\twf.err = err\n\t}\n\treturn wf.err\n}\n\ntype readEOF struct {\n\twebdav.File\n}\n\nfunc (readEOF) Read(p []byte) (int, error) {\n\treturn 0, io.EOF\n}\n\n// NoLockSystem implements webdav.LockSystem by returning success for\n// every possible locking operation, even though it has no side\n// effects such as actually locking anything. This works for a\n// read-only webdav filesystem because webdav locks only apply to\n// writes.\n//\n// This is more suitable than webdav.NewMemLS() for two reasons:\n// First, it allows keep-web to use one locker for all collections\n// even though coll1.vhost/foo and coll2.vhost/foo have the same path\n// but represent different resources. Additionally, it returns valid\n// tokens (RFC 2518 specifies that tokens are represented as URIs and\n// are unique across all resources for all time), which might improve\n// client compatibility.\n//\n// However, it does also permit impossible operations, like acquiring\n// conflicting locks and releasing non-existent locks.  This might\n// confuse some clients if they try to probe for correctness.\n//\n// Currently this is a moot point: the LOCK and UNLOCK methods are not\n// accepted by keep-web, so it suffices to implement the\n// webdav.LockSystem interface.\nvar NoLockSystem = noLockSystem{}\n\ntype noLockSystem struct{}\n\nfunc (noLockSystem) Confirm(time.Time, string, string, ...webdav.Condition) (func(), error) {\n\treturn noop, nil\n}\n\nfunc (noLockSystem) Create(now time.Time, details webdav.LockDetails) (token string, err error) {\n\treturn fmt.Sprintf(\"opaquelocktoken:%s-%x\", lockPrefix, atomic.AddInt64(&nextLockSuffix, 1)), nil\n}\n\nfunc (noLockSystem) Refresh(now time.Time, token string, duration time.Duration) (webdav.LockDetails, error) {\n\treturn webdav.LockDetails{}, nil\n}\n\nfunc (noLockSystem) Unlock(now time.Time, token string) error {\n\treturn nil\n}\n\nfunc noop() {}\n\n// Return a version 1 variant 4 UUID, meaning all bits are random\n// except the ones indicating the version and variant.\nfunc uuid() string {\n\tvar data [16]byte\n\tif _, err := rand.Read(data[:]); err != nil {\n\t\tpanic(err)\n\t}\n\t// variant 1: N=10xx\n\tdata[8] = data[8]&0x3f | 0x80\n\t// version 4: M=0100\n\tdata[6] = data[6]&0x0f | 0x40\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", data[0:4], data[4:6], data[6:8], data[8:10], data[10:])\n}\n"
  },
  {
    "path": "lib/webdavfs/fs_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage webdavfs\n\nimport \"golang.org/x/net/webdav\"\n\nvar _ webdav.FileSystem = &FS{}\n"
  },
  {
    "path": "sdk/cli/.gitignore",
    "content": "arvados-cli*gem\ntmp\nGemfile.lock\n"
  },
  {
    "path": "sdk/cli/Gemfile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nsource 'https://rubygems.org'\ngemspec\ngem 'minitest', '~> 5.0'\ngem 'rake'\n"
  },
  {
    "path": "sdk/cli/LICENSE-2.0.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "sdk/cli/Rakefile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire 'rake/testtask'\n\nRake::TestTask.new do |t|\n  t.libs << 'test'\nend\n\ndesc 'Run tests'\ntask default: :test\n"
  },
  {
    "path": "sdk/cli/arvados-cli.gemspec",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nbegin\n  git_root = \"#{__dir__}/../..\"\n  git_timestamp, git_hash = IO.popen(\n    [\"git\", \"-C\", git_root,\n     \"log\", \"-n1\", \"--first-parent\", \"--format=%ct:%H\",\n     \"--\", \"build/version-at-commit.sh\", \"sdk/ruby\", \"sdk/cli\"],\n  ) do |git_log|\n    git_log.readline.chomp.split(\":\")\n  end\nrescue Errno::ENOENT\n  $stderr.puts(\"failed to get version information: 'git' not found\")\n  exit 69  # EX_UNAVAILABLE\nend\n\nif $? != 0\n  $stderr.puts(\"failed to get version information: 'git log' exited #{$?}\")\n  exit 65  # EX_DATAERR\nend\ngit_timestamp = Time.at(git_timestamp.to_i).utc\nversion = ENV[\"ARVADOS_BUILDING_VERSION\"] || IO.popen(\n            [\"#{git_root}/build/version-at-commit.sh\", git_hash],\n          ) do |ver_out|\n  ver_out.readline.chomp.encode(\"utf-8\")\nend\nversion = version.sub(\"~dev\", \".dev\").sub(\"~rc\", \".rc\")\narv_dep_version = if dev_index = (version =~ /\\.dev/)\n                    \"~> #{version[...dev_index]}.a\"\n                  else\n                    \"= #{version}\"\n                  end\n\nGem::Specification.new do |s|\n  s.name        = 'arvados-cli'\n  s.version     = version\n  s.date        = git_timestamp.strftime(\"%Y-%m-%d\")\n  s.summary     = \"Arvados CLI tools\"\n  s.description = \"Arvados command line tools, git commit #{git_hash}\"\n  s.authors     = [\"Arvados Authors\"]\n  s.email       = 'packaging@arvados.org'\n  #s.bindir      = '.'\n  s.licenses    = ['Apache-2.0']\n  s.files       = [\"bin/arv\", \"bin/arv-tag\", \"LICENSE-2.0.txt\"]\n  s.executables << \"arv\"\n  s.executables << \"arv-tag\"\n  s.required_ruby_version = '>= 3.0.0'\n  s.add_runtime_dependency 'arvados', arv_dep_version\n  # arvados fork of google-api-client gem with old API and new\n  # compatibility fixes, built from ../ruby-google-api-client/\n  s.add_runtime_dependency('arvados-google-api-client', '~> 0.8.7.5')\n  # activesupport 7.2.0 dropped Ruby 3.0.\n  s.add_dependency('activesupport', '~> 7.1.3', '>= 7.1.3.4')\n  s.add_runtime_dependency 'json', '>= 1.7.7', '<3'\n  s.add_runtime_dependency 'optimist', '~> 3.0'\n  s.add_runtime_dependency 'andand', '~> 1.3', '>= 1.3.3'\n  s.add_runtime_dependency 'oj', '~> 3.0'\n  # curb 1.2.0 (2025) metadata still claims to support Ruby 1,\n  # changelog still claims to test against Ruby 2.7.\n  s.add_runtime_dependency 'curb', '>= 0.8', '< 2'\n  # launchy 3.0.0 dropped Ruby 2.\n  # launchy 3.1.0 stopped testing against Ruby 3.0.\n  s.add_runtime_dependency 'launchy', '>= 2.5', '< 3.1'\n  s.homepage    =\n    'https://arvados.org'\nend\n"
  },
  {
    "path": "sdk/cli/bin/arv",
    "content": "#!/usr/bin/env ruby\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Arvados cli client\n#\n# Ward Vandewege <ward@curii.com>\n\nrequire 'fileutils'\nrequire 'shellwords'\n\nif RUBY_VERSION < '1.9.3' then\n  abort <<-EOS\n#{$0.gsub(/^\\.\\//,'')} requires Ruby version 1.9.3 or higher.\n  EOS\nend\n\nbegin\n  require 'json'\n  require 'net/http'\n  require 'pp'\n  require 'tempfile'\n  require 'yaml'\nrescue LoadError => error\n  abort \"Error loading libraries: #{error}\\n\"\nend\n\nbegin\n  require 'rubygems'\n  # Load the gems with more requirements first, so we respect any version\n  # constraints they put on gems loaded later.\n  require 'arvados/google_api_client'\n  require 'active_support/inflector'\n  require 'andand'\n  require 'curb'\n  require 'oj'\n  require 'optimist'\nrescue LoadError => error\n  abort <<-EOS\n\nError loading gems: #{error}\n\nPlease install all required gems:\n\n  gem install arvados activesupport andand curb json oj optimist\n\n  EOS\nend\n\n# Search for 'ENTRY POINT' to see where things get going\n\nActiveSupport::Inflector.inflections do |inflect|\n  inflect.irregular 'specimen', 'specimens'\n  inflect.irregular 'human', 'humans'\nend\n\nmodule Kernel\n  def suppress_warnings\n    original_verbosity = $VERBOSE\n    $VERBOSE = nil\n    result = yield\n    $VERBOSE = original_verbosity\n    return result\n  end\nend\n\ndef init_config\n  # read authentication data from arvados configuration file if present\n  lineno = 0\n  config_file = File.expand_path('~/.config/arvados/settings.conf') rescue nil\n  if not config_file.nil? and File.exist? config_file then\n    File.open(config_file, 'r').each do |line|\n      lineno = lineno + 1\n      # skip comments\n      if line.match('^\\s*#') then\n        next\n      end\n      var, val = line.chomp.split('=', 2)\n      # allow environment settings to override config files.\n      if var and val\n        ENV[var] ||= val\n      else\n        warn \"#{config_file}: #{lineno}: could not parse `#{line}'\"\n      end\n    end\n  end\nend\n\n\nsubcommands = %w(copy create edit get keep tag ws)\n\ndef exec_bin bin, opts\n  bin_path = `which #{bin.shellescape}`.strip\n  if bin_path.empty?\n    raise \"#{bin}: command not found\"\n  end\n  exec bin_path, *opts\nend\n\ndef check_subcommands client, arvados, subcommand, global_opts, remaining_opts\n  case subcommand\n  when 'create'\n    arv_create client, arvados, global_opts, remaining_opts\n  when 'edit'\n    arv_edit client, arvados, global_opts, remaining_opts\n  when 'get'\n    arv_get client, arvados, global_opts, remaining_opts\n  when 'copy', 'tag', 'ws'\n    exec_bin \"arv-#{subcommand}\", remaining_opts\n  when 'keep'\n    @sub = remaining_opts.shift\n    if ['get', 'put', 'ls', 'normalize'].index @sub then\n      # Native Arvados\n      exec_bin \"arv-#{@sub}\", remaining_opts\n    elsif @sub == 'docker'\n      exec_bin \"arv-keepdocker\", remaining_opts\n    else\n      puts \"Usage: arv keep [method] [--parameters]\\n\"\n      puts \"Use 'arv keep [method] --help' to get more information about specific methods.\\n\\n\"\n      puts \"Available methods: ls, get, put, docker\"\n    end\n    abort\n  end\nend\n\ndef command_exists?(command)\n  File.executable?(command) || ENV['PATH'].split(':').any? {|folder| File.executable?(File.join(folder, command))}\nend\n\ndef run_editor path\n  pid = Process::fork\n  if pid.nil?\n    editor = nil\n    [ENV[\"VISUAL\"], ENV[\"EDITOR\"], \"nano\", \"vi\"].each do |e|\n      editor ||= e if e and command_exists? e\n    end\n    if editor.nil?\n      abort \"Could not find any editor to use, please set $VISUAL or $EDITOR to your desired editor.\"\n    end\n    exec editor, path\n  else\n    Process.wait pid\n  end\n\n  if $?.exitstatus != 0\n    raise \"Editor exited with status #{$?.exitstatus}\"\n  end\nend\n\ndef edit_and_commit_object initial_obj, tmp_stem, global_opts, &block\n\n  content = get_obj_content initial_obj, global_opts\n\n  tmp_file = Tempfile.new([tmp_stem, \".#{global_opts[:format]}\"])\n  tmp_file.write(content)\n  tmp_file.close\n\n  begin\n    error_text = ''\n    while true\n      begin\n        run_editor tmp_file.path\n\n        tmp_file.open\n        newcontent = tmp_file.read()\n        tmp_file.close\n\n        # Strip lines starting with '#'\n        newcontent = newcontent.lines.select {|l| !l.start_with? '#'}.join\n\n        # Load the new object\n        newobj = case global_opts[:format]\n                 when 'json'\n                   Oj.safe_load(newcontent)\n                 when 'yaml'\n                   YAML.load(newcontent)\n                 else\n                   abort \"Unrecognized format #{global_opts[:format]}\"\n                 end\n\n        yield newobj\n\n        break\n      rescue => e\n        can_retry = true\n        if e.is_a? Psych::SyntaxError\n          this_error = \"YAML error parsing your input: #{e}\"\n        elsif e.is_a? JSON::ParserError or e.is_a? Oj::ParseError\n          this_error = \"JSON error parsing your input: #{e}\"\n        elsif e.is_a? ArvadosAPIError\n          this_error = \"API responded with error #{e}\"\n        else\n          this_error = \"#{e.class}: #{e}\"\n          can_retry = false\n        end\n        puts this_error\n\n        tmp_file.open\n        newcontent = tmp_file.read()\n        tmp_file.close\n\n        if newcontent == error_text or not can_retry\n          FileUtils::cp tmp_file.path, tmp_file.path + \".saved\"\n          puts \"File is unchanged, edit aborted.\" if can_retry\n          abort \"Saved contents to \" + tmp_file.path + \".saved\"\n        else\n          tmp_file.open\n          tmp_file.truncate 0\n          error_text = this_error.to_s.lines.map {|l| '# ' + l}.join + \"\\n\"\n          error_text += \"# Please fix the error and try again.\\n\"\n          error_text += newcontent.lines.select {|l| !l.start_with? '#'}.join\n          tmp_file.write error_text\n          tmp_file.close\n        end\n      end\n    end\n  ensure\n    tmp_file.close(true)\n  end\n\n  nil\nend\n\nclass ArvadosAPIError < RuntimeError\nend\n\ndef check_response result\n  begin\n    results = JSON.parse result.body\n  rescue JSON::ParserError, Oj::ParseError => e\n    raise \"Failed to parse server response:\\n\" + e.to_s\n  end\n\n  if result.response.status != 200\n    raise ArvadosAPIError.new(\"#{result.response.status}: #{\n                              ((results['errors'] && results['errors'].join('\\n')) ||\n                                Net::HTTPResponse::CODE_TO_OBJ[status.to_s].to_s.sub(/^Net::HTTP/, '').titleize)}\")\n  end\n\n  results\nend\n\ndef lookup_uuid_rsc arvados, uuid\n  m = /([a-z0-9]{5})-([a-z0-9]{5})-([a-z0-9]{15})/.match uuid\n  if !m\n    if /^[a-f0-9]{32}/.match uuid\n      abort \"Arvados collections are not editable.\"\n    else\n      abort \"'#{uuid}' does not appear to be an Arvados uuid\"\n    end\n  end\n\n  rsc = nil\n  arvados.discovery_document[\"resources\"].each do |k,v|\n    klass = k.singularize.camelize\n    dig = Digest::MD5.hexdigest(klass).to_i(16).to_s(36)[-5..-1]\n    if dig == m[2]\n      rsc = k\n    end\n  end\n\n  if rsc.nil?\n    abort \"Could not determine resource type #{m[2]}\"\n  end\n\n  return rsc\nend\n\ndef fetch_rsc_obj client, arvados, rsc, uuid, remaining_opts\n\n  begin\n    result = client.execute(:api_method => eval('arvados.' + rsc + '.get'),\n                            :parameters => {\"uuid\" => uuid},\n                            :authenticated => false,\n                            :headers => {\n                              authorization: 'Bearer '+ENV['ARVADOS_API_TOKEN']\n                            })\n    obj = check_response result\n  rescue => e\n    abort \"Server error: #{e}\"\n  end\n\n  if remaining_opts.length > 0\n    obj.select! { |k, v| remaining_opts.include? k }\n  end\n\n  return obj\nend\n\ndef get_obj_content obj, global_opts\n  content = case global_opts[:format]\n            when 'json'\n              Oj.dump(obj, :indent => 1)\n            when 'yaml'\n              obj.to_yaml\n            else\n              abort \"Unrecognized format #{global_opts[:format]}\"\n            end\n  return content\nend\n\ndef arv_edit client, arvados, global_opts, remaining_opts\n  uuid = remaining_opts.shift\n  if uuid.nil? or uuid == \"-h\" or uuid == \"--help\"\n    puts head_banner\n    puts \"Usage: arv edit [uuid] [fields...]\\n\\n\"\n    puts \"Fetch the specified Arvados object, select the specified fields, \\n\"\n    puts \"open an interactive text editor on a text representation (json or\\n\"\n    puts \"yaml, use --format) and then update the object.  Will use 'nano'\\n\"\n    puts \"by default, customize with the EDITOR or VISUAL environment variable.\\n\"\n    exit 255\n  end\n\n  rsc = lookup_uuid_rsc arvados, uuid\n  oldobj = fetch_rsc_obj client, arvados, rsc, uuid, remaining_opts\n\n  edit_and_commit_object oldobj, uuid, global_opts do |newobj|\n    newobj.select! {|k| newobj[k] != oldobj[k]}\n    if !newobj.empty?\n      result = client.execute(:api_method => eval('arvados.' + rsc + '.update'),\n                     :parameters => {\"uuid\" => uuid},\n                     :body_object => { rsc.singularize => newobj },\n                     :authenticated => false,\n                     :headers => {\n                       authorization: 'Bearer '+ENV['ARVADOS_API_TOKEN']\n                     })\n      results = check_response result\n      STDERR.puts \"Updated object #{results['uuid']}\"\n    else\n      STDERR.puts \"Object is unchanged, did not update.\"\n    end\n  end\n\n  exit 0\nend\n\ndef arv_get client, arvados, global_opts, remaining_opts\n  uuid = remaining_opts.shift\n  if uuid.nil? or uuid == \"-h\" or uuid == \"--help\"\n    puts head_banner\n    puts \"Usage: arv [--format json|yaml] get [uuid] [fields...]\\n\\n\"\n    puts \"Fetch the specified Arvados object, select the specified fields,\\n\"\n    puts \"and print a text representation.\\n\"\n    exit 255\n  end\n\n  rsc = lookup_uuid_rsc arvados, uuid\n  obj = fetch_rsc_obj client, arvados, rsc, uuid, remaining_opts\n  content = get_obj_content obj, global_opts\n\n  puts content\n  exit 0\nend\n\ndef arv_create client, arvados, global_opts, remaining_opts\n  types = resource_types(arvados.discovery_document)\n  create_opts = Optimist::options do\n    opt :project_uuid, \"Project uuid in which to create the object\", :type => :string\n    stop_on resource_types(arvados.discovery_document)\n  end\n\n  object_type = remaining_opts.shift\n  if object_type.nil?\n    abort \"Missing resource type, must be one of #{types.join ', '}\"\n  end\n\n  rsc = arvados.discovery_document[\"resources\"].keys.select { |k| object_type == k.singularize }\n  if rsc.empty?\n    abort \"Could not determine resource type #{object_type}\"\n  end\n  rsc = rsc.first\n\n  discovered_params = arvados.discovery_document[\"resources\"][rsc][\"methods\"][\"create\"][\"parameters\"]\n  method_opts = Optimist::options do\n    banner head_banner\n    banner \"Usage: arv create [--project-uuid] #{object_type} [create parameters]\"\n    banner \"\"\n    banner \"This method supports the following parameters:\"\n    banner \"\"\n    discovered_params.each do |k,v|\n      opts = Hash.new()\n      opts[:type] = v[\"type\"].to_sym if v.include?(\"type\")\n      if [:datetime, :text, :object, :array].index opts[:type]\n        opts[:type] = :string                       # else optimist bork\n      end\n      opts[:default] = v[\"default\"] if v.include?(\"default\")\n      opts[:default] = v[\"default\"].to_i if opts[:type] == :integer\n      opts[:default] = to_boolean(v[\"default\"]) if opts[:type] == :boolean\n      opts[:required] = true if v.include?(\"required\") and v[\"required\"]\n      description = ''\n      description = '  ' + v[\"description\"] if v.include?(\"description\")\n      opt k.to_sym, description, opts\n    end\n  end\n\n  initial_obj = {}\n  if create_opts[:project_uuid]\n    initial_obj[\"owner_uuid\"] = create_opts[:project_uuid]\n  end\n\n  edit_and_commit_object initial_obj, \"\", global_opts do |newobj|\n    result = client.execute(:api_method => eval('arvados.' + rsc + '.create'),\n                   :parameters => method_opts,\n                   :body_object => {object_type => newobj},\n                   :authenticated => false,\n                   :headers => {\n                     authorization: 'Bearer '+ENV['ARVADOS_API_TOKEN']\n                   })\n    results = check_response result\n    puts \"Created object #{results['uuid']}\"\n  end\n\n  exit 0\nend\n\ndef to_boolean(s)\n  !!(s =~ /^(true|t|yes|y|1)$/i)\nend\n\ndef head_banner\n  \"Arvados command line client\\n\"\nend\n\ndef help_methods(discovery_document, resource, method=nil)\n  banner = head_banner\n  banner += \"Usage: arv #{resource} [method] [--parameters]\\n\"\n  banner += \"Use 'arv #{resource} [method] --help' to get more information about specific methods.\\n\\n\"\n  banner += \"The #{resource} resource supports the following methods:\"\n  banner += \"\\n\\n\"\n  discovery_document[\"resources\"][resource.pluralize][\"methods\"].\n    each do |k,v|\n    description = ''\n    if v.include? \"description\"\n      # add only the first line of the discovery doc description\n      description = '  ' + v[\"description\"].split(\"\\n\").first.chomp\n    end\n    banner += \"   #{sprintf(\"%20s\",k)}#{description}\\n\"\n  end\n  banner += \"\\n\"\n  STDERR.puts banner\n\n  if not method.nil? and method != '--help' and method != '-h' then\n    abort \"Unknown method #{method.inspect} \" +\n                  \"for resource #{resource.inspect}\"\n  end\n  exit 255\nend\n\ndef help_resources(option_parser, discovery_document, resource)\n  option_parser.educate\n  exit 255\nend\n\ndef resource_types discovery_document\n  resource_types = Array.new()\n  discovery_document[\"resources\"].each do |k,v|\n    resource_types << k.singularize\n  end\n  resource_types\nend\n\ndef parse_arguments(discovery_document, subcommands)\n  resources_and_subcommands = resource_types(discovery_document) + subcommands\n\n  option_parser = Optimist::Parser.new do\n    version __FILE__\n    banner head_banner\n    banner \"Usage: arv [--flags] subcommand|resource [method] [--parameters]\"\n    banner \"\"\n    banner \"Available flags:\"\n\n    opt :dry_run, \"Don't actually do anything\", :short => \"-n\"\n    opt :verbose, \"Print some things on stderr\"\n    opt :format,\n        \"Set the output format. Must be one of json (default), yaml or uuid.\",\n        :type => :string,\n        :default => 'json'\n    opt :short, \"Return only UUIDs (equivalent to --format=uuid)\"\n\n    banner \"\"\n    banner \"Use 'arv subcommand|resource --help' to get more information about a particular command or resource.\"\n    banner \"\"\n    banner \"Available subcommands: #{subcommands.join(', ')}\"\n    banner \"\"\n\n    banner \"Available resources: #{discovery_document['resources'].keys.map { |k| k.singularize }.join(', ')}\"\n\n    banner \"\"\n    banner \"Additional options:\"\n\n    conflicts :short, :format\n    stop_on resources_and_subcommands\n  end\n\n  global_opts = Optimist::with_standard_exception_handling option_parser do\n    o = option_parser.parse ARGV\n  end\n\n  unless %w(json yaml uuid).include?(global_opts[:format])\n    $stderr.puts \"#{$0}: --format must be one of json, yaml or uuid.\"\n    $stderr.puts \"Use #{$0} --help for more information.\"\n    abort\n  end\n\n  if global_opts[:short]\n    global_opts[:format] = 'uuid'\n  end\n\n  resource = ARGV.shift\n\n  if not subcommands.include? resource\n    if not resources_and_subcommands.include?(resource)\n      puts \"Resource or subcommand '#{resource}' is not recognized.\\n\\n\" if !resource.nil?\n      help_resources(option_parser, discovery_document, resource)\n    end\n\n    method = ARGV.shift\n    if not (discovery_document[\"resources\"][resource.pluralize][\"methods\"].\n            include?(method))\n      help_methods(discovery_document, resource, method)\n    end\n\n    discovered_params = discovery_document\\\n    [\"resources\"][resource.pluralize]\\\n    [\"methods\"][method][\"parameters\"]\n    method_opts = Optimist::options do\n      banner head_banner\n      banner \"Usage: arv #{resource} #{method} [--parameters]\"\n      banner \"\"\n      banner \"This method supports the following parameters:\"\n      banner \"\"\n      discovered_params.each do |k,v|\n        opts = Hash.new()\n        opts[:type] = v[\"type\"].to_sym if v.include?(\"type\")\n        if [:datetime, :text, :object, :array].index opts[:type]\n          opts[:type] = :string                       # else optimist bork\n        end\n        opts[:default] = v[\"default\"] if v.include?(\"default\")\n        opts[:default] = v[\"default\"].to_i if opts[:type] == :integer\n        opts[:default] = to_boolean(v[\"default\"]) if opts[:type] == :boolean\n        opts[:required] = true if v.include?(\"required\") and v[\"required\"]\n        description = ''\n        description = '  ' + v[\"description\"] if v.include?(\"description\")\n        opt k.to_sym, description, opts\n      end\n\n      body_object = discovery_document[\"resources\"][resource.pluralize][\"methods\"][method][\"request\"]\n      if body_object and discovered_params[resource].nil?\n        is_required = true\n        if body_object[\"required\"] == false\n          is_required = false\n        end\n        resource_opt_desc = \"Either a string representing #{resource} as JSON or a filename from which to read #{resource} JSON (use '-' to read from stdin).\"\n        if is_required\n          resource_opt_desc += \" This option must be specified.\"\n        end\n        opt resource.to_sym, resource_opt_desc, {\n          required: is_required,\n          type: :string\n        }\n      end\n    end\n\n    discovered_params.merge({resource => {'type' => 'object'}}).each do |k,v|\n      k = k.to_sym\n      if ['object', 'array'].index(v[\"type\"]) and method_opts.has_key? k\n        if method_opts[k].andand.match /^\\//\n          method_opts[k] = File.open method_opts[k], 'rb' do |f| f.read end\n        end\n      end\n    end\n  end\n\n  return resource, method, method_opts, global_opts, ARGV\nend\n\n#\n# ENTRY POINT\n#\n\ninit_config\n\nENV['ARVADOS_API_VERSION'] ||= 'v1'\n\nif not ENV.include?('ARVADOS_API_HOST') or not ENV.include?('ARVADOS_API_TOKEN') then\n  abort <<-EOS\nARVADOS_API_HOST and ARVADOS_API_TOKEN need to be defined as environment variables.\n  EOS\nend\n\n# do this if you're testing with a dev server and you don't care about SSL certificate checks:\nif ENV['ARVADOS_API_HOST_INSECURE']\n  suppress_warnings { OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE }\nend\n\nbegin\n  client = Google::APIClient.new(:host => ENV['ARVADOS_API_HOST'], :application_name => 'arvados-cli', :application_version => '1.0')\n  arvados = client.discovered_api('arvados', ENV['ARVADOS_API_VERSION'])\nrescue Exception => e\n  puts \"Failed to connect to Arvados API server: #{e}\"\n  exit 1\nend\n\n# Parse arguments here\nresource_schema, method, method_opts, global_opts, remaining_opts = parse_arguments(arvados.discovery_document, subcommands)\n\ncheck_subcommands client, arvados, resource_schema, global_opts, remaining_opts\n\ncontroller = resource_schema.pluralize\n\napi_method = 'arvados.' + controller + '.' + method\n\nif global_opts[:dry_run]\n  if global_opts[:verbose]\n    $stderr.puts \"#{api_method} #{method_opts.inspect}\"\n  end\n  exit\nend\n\nrequest_parameters = {_profile:true}.merge(method_opts)\nresource_body = request_parameters.delete(resource_schema.to_sym)\nif resource_body\n  # check if resource_body is valid JSON by attempting to parse it\n  resource_body_is_json = true\n  begin\n    # we don't actually need the results of the parsing,\n    # just checking for the JSON::ParserError exception\n    JSON.parse resource_body\n  rescue JSON::ParserError => e\n    resource_body_is_json = false\n  end\n  resource_body_is_readable_file = false\n  # if resource_body is not valid JSON, it should be a filename (or '-' for stdin)\n  if resource_body == '-'\n    resource_body_is_readable_file = true\n    resource_body_file = $stdin\n  elsif File.readable? resource_body\n      resource_body_is_readable_file = true\n      resource_body_file = File.open(resource_body, 'r')\n  end\n  if resource_body_is_json and resource_body_is_readable_file\n    abort \"Argument specified for option '--#{resource_schema.to_sym}' is both valid JSON and a readable file. Please consider renaming the file: '#{resource_body}'\"\n  elsif !resource_body_is_json and !resource_body_is_readable_file\n    if File.exists? resource_body\n      # specified file exists but is not readable\n      abort \"Argument specified for option '--#{resource_schema.to_sym}' is an existing file but is not readable. Please check permissions on: '#{resource_body}'\"\n    else\n      # specified file does not exist\n      abort \"Argument specified for option '--#{resource_schema.to_sym}' is neither valid JSON nor an existing file: '#{resource_body}'\"\n    end\n  elsif resource_body_is_readable_file\n    resource_body = resource_body_file.read()\n    begin\n      # we don't actually need the results of the parsing,\n      # just checking for the JSON::ParserError exception\n      JSON.parse resource_body\n    rescue JSON::ParserError => e\n      abort \"Contents of file '#{resource_body_file.path}' is not valid JSON: #{e}\"\n    end\n    resource_body_file.close()\n  end\n  request_body = {\n    resource_schema => resource_body\n  }\nelse\n  request_body = nil\nend\n\ncase api_method\nwhen\n  'arvados.jobs.log_tail_follow'\n\n  # Special case for methods that respond with data streams rather\n  # than JSON (TODO: use the discovery document instead of a static\n  # list of methods)\n  uri_s = eval(api_method).generate_uri(request_parameters)\n  Curl::Easy.perform(uri_s) do |curl|\n    curl.headers['Accept'] = 'text/plain'\n    curl.headers['Authorization'] = \"Bearer #{ENV['ARVADOS_API_TOKEN']}\"\n    if ENV['ARVADOS_API_HOST_INSECURE']\n      curl.ssl_verify_peer = false\n      curl.ssl_verify_host = false\n    end\n    if global_opts[:verbose]\n      curl.on_header { |data| $stderr.write data }\n    end\n    curl.on_body { |data| $stdout.write data }\n  end\n  exit 0\nelse\n  result = client.execute(:api_method => eval(api_method),\n                          :parameters => request_parameters,\n                          :body_object => request_body,\n                          :authenticated => false,\n                          :headers => {\n                            authorization: 'Bearer '+ENV['ARVADOS_API_TOKEN']\n                          })\nend\n\nrequest_id = result.headers[:x_request_id]\nbegin\n  results = JSON.parse result.body\nrescue JSON::ParserError => e\n  err_msg = \"Failed to parse server response:\\n\" + e.to_s\n  if request_id\n    err_msg += \"\\nRequest ID: #{request_id or client.request_id}\"\n  end\n  abort err_msg\nend\n\nif results[\"errors\"] then\n  err_message = results[\"errors\"][0]\n  if request_id and !err_message.match(/.*req-[0-9a-zA-Z]{20}.*/)\n    err_message += \" (#{request_id})\"\n  end\n  abort \"Error: #{err_message}\"\nend\n\ncase global_opts[:format]\nwhen 'json'\n  puts Oj.dump(results, :indent => 1)\nwhen 'yaml'\n  puts results.to_yaml\nelse\n  if results[\"items\"] and results[\"kind\"].match /list$/i\n    results['items'].each do |i| puts i['uuid'] end\n  elsif results['uuid'].nil?\n    abort(\"Response did not include a uuid:\\n\" +\n          Oj.dump(results, :indent => 1) +\n          \"\\n\")\n  else\n    puts results['uuid']\n  end\nend\n"
  },
  {
    "path": "sdk/cli/bin/arv-tag",
    "content": "#! /usr/bin/env ruby\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# arv tag usage:\n#   arv tag add tag1 [tag2 ...] --object obj_uuid1 [--object obj_uuid2 ...]\n#   arv tag remove tag1 [tag2 ...] --object obj_uuid1 [--object obj_uuid2 ...]\n#   arv tag remove tag1 [tag2 ...] --all\n\ndef usage_string\n  return \"\\nUsage:\\n\" +\n    \"arv tag add tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]\\n\" +\n    \"arv tag remove tag1 [tag2 ...] --object object_uuid1 [object_uuid2...]\\n\" +\n    \"arv tag remove --all\\n\"\nend\n\ndef usage\n  abort usage_string\nend\n\ndef api_call(method, parameters:{}, request_body:{})\n  result = $client.execute(:api_method => method,\n                           :parameters => parameters,\n                           :body_object => request_body,\n                           :authenticated => false,\n                           :headers => {\n                             authorization: \"Bearer #{ENV['ARVADOS_API_TOKEN']}\",\n                           })\n\n  begin\n    results = JSON.parse result.body\n  rescue JSON::ParserError => e\n    abort \"Failed to parse server response:\\n\" + e.to_s\n  end\n\n  if results[\"errors\"]\n    abort \"Error: #{results[\"errors\"][0]}\"\n  end\n\n  return results\nend\n\ndef tag_add(tag, obj_uuid)\n  return api_call($arvados.links.create,\n                  request_body: {\n                    :link => {\n                      :name       => tag,\n                      :link_class => :tag,\n                      :head_uuid  => obj_uuid,\n                    }\n                  })\nend\n\ndef tag_remove(tag, obj_uuids=nil)\n  # If we got a list of objects to untag, look up the uuids for the\n  # links that need to be deleted.\n  link_uuids = []\n  if obj_uuids\n    obj_uuids.each do |uuid|\n      link = api_call($arvados.links.list,\n                      request_body: {\n                        :where => {\n                          :link_class => :tag,\n                          :name => tag,\n                          :head_uuid => uuid,\n                        }\n                      })\n      if link['items_available'] > 0\n        link_uuids.push link['items'][0]['uuid']\n      end\n    end\n  else\n    all_tag_links = api_call($arvados.links.list,\n                             request_body: {\n                               :where => {\n                                 :link_class => :tag,\n                                 :name => tag,\n                               }\n                             })\n    link_uuids = all_tag_links['items'].map { |obj| obj['uuid'] }\n  end\n\n  results = []\n  if link_uuids\n    link_uuids.each do |uuid|\n      results.push api_call($arvados.links.delete, parameters:{ :uuid => uuid })\n    end\n  else\n    $stderr.puts \"no tags found to remove\"\n  end\n\n  return results\nend\n\nif RUBY_VERSION < '1.9.3' then\n  abort <<-EOS\n#{$0.gsub(/^\\.\\//,'')} requires Ruby version 1.9.3 or higher.\nEOS\nend\n\n$arvados_api_version = ENV['ARVADOS_API_VERSION'] || 'v1'\n$arvados_api_host = ENV['ARVADOS_API_HOST'] or\n  abort \"#{$0}: fatal: ARVADOS_API_HOST environment variable not set.\"\n$arvados_api_token = ENV['ARVADOS_API_TOKEN'] or\n  abort \"#{$0}: fatal: ARVADOS_API_TOKEN environment variable not set.\"\n$arvados_api_host_insecure = %w(1 true yes).\n  include?((ENV['ARVADOS_API_HOST_INSECURE'] || \"\").downcase)\n\nbegin\n  require 'rubygems'\n  require 'google/api_client'\n  require 'json'\n  require 'pp'\n  require 'oj'\n  require 'optimist'\nrescue LoadError\n  abort <<-EOS\n#{$0}: fatal: some runtime dependencies are missing.\nTry: gem install pp google-api-client json optimist\n  EOS\nend\n\ndef debuglog(message, verbosity=1)\n  $stderr.puts \"#{File.split($0).last} #{$$}: #{message}\" if $debuglevel >= verbosity\nend\n\nmodule Kernel\n  def suppress_warnings\n    original_verbosity = $VERBOSE\n    $VERBOSE = nil\n    result = yield\n    $VERBOSE = original_verbosity\n    return result\n  end\nend\n\nif $arvados_api_host_insecure or $arvados_api_host.match /local/\n  # You probably don't care about SSL certificate checks if you're\n  # testing with a dev server.\n  suppress_warnings { OpenSSL::SSL::VERIFY_PEER = OpenSSL::SSL::VERIFY_NONE }\nend\n\nclass Google::APIClient\n  def discovery_document(api, version)\n    api = api.to_s\n    return @discovery_documents[\"#{api}:#{version}\"] ||=\n      begin\n        response = self.execute!(\n                                 :http_method => :get,\n                                 :uri => self.discovery_uri(api, version),\n                                 :authenticated => false\n                                 )\n        response.body.class == String ? JSON.parse(response.body) : response.body\n      end\n  end\nend\n\nglobal_opts = Optimist::options do\n  banner usage_string\n  banner \"\"\n  opt :dry_run, \"Don't actually do anything\", :short => \"-n\"\n  opt :verbose, \"Print some things on stderr\", :short => \"-v\"\n  opt :uuid, \"Return the UUIDs of the objects in the response, one per line (default)\", :short => nil\n  opt :json, \"Return the entire response received from the API server, as a JSON object\", :short => \"-j\"\n  opt :human, \"Return the response received from the API server, as a JSON object with whitespace added for human consumption\", :short => \"-h\"\n  opt :pretty, \"Synonym of --human\", :short => nil\n  opt :yaml, \"Return the response received from the API server, in YAML format\", :short => \"-y\"\n  stop_on ['add', 'remove']\nend\n\np = Optimist::Parser.new do\n  opt(:all,\n      \"Remove this tag from all objects under your ownership. Only valid with `tag remove'.\",\n      :short => :none)\n  opt(:object,\n      \"The UUID of an object to which this tag operation should be applied.\",\n      :type => :string,\n      :multi => true,\n      :short => :o)\nend\n\n$options = Optimist::with_standard_exception_handling p do\n  p.parse ARGV\nend\n\nif $options[:all] and ARGV[0] != 'remove'\n  usage\nend\n\n# Set up the API client.\n\n$client ||= Google::APIClient.\n  new(:host => $arvados_api_host,\n      :application_name => File.split($0).last,\n      :application_version => $application_version.to_s)\n$arvados = $client.discovered_api('arvados', $arvados_api_version)\n\nresults = []\ncmd = ARGV.shift\n\nif ARGV.empty?\n  usage\nend\n\ncase cmd\nwhen 'add'\n  ARGV.each do |tag|\n    $options[:object].each do |obj|\n      results.push(tag_add(tag, obj))\n    end\n  end\nwhen 'remove'\n  ARGV.each do |tag|\n    if $options[:all] then\n      results.concat tag_remove(tag)\n    else\n      results.concat tag_remove(tag, $options[:object])\n    end\n  end\nelse\n  usage\nend\n\nif global_opts[:human] or global_opts[:pretty] then\n  puts Oj.dump(results, :indent => 1)\nelsif global_opts[:yaml] then\n  puts results.to_yaml\nelsif global_opts[:json] then\n  puts Oj.dump(results)\nelse\n  results.each do |r|\n    if r['uuid'].nil?\n      abort(\"Response did not include a uuid:\\n\" +\n            Oj.dump(r, :indent => 1) +\n            \"\\n\")\n    else\n      puts r['uuid']\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/cli/test/binstub_arv-mount/arv-mount",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n"
  },
  {
    "path": "sdk/cli/test/binstub_clean_fail/arv-mount",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\necho >&2 Failing mount stub was called\nexit 44\n"
  },
  {
    "path": "sdk/cli/test/binstub_docker_noop/docker.io",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ntrue\n"
  },
  {
    "path": "sdk/cli/test/binstub_output_coll_owner/python",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\necho owner_uuid: $2 >&2\n\n"
  },
  {
    "path": "sdk/cli/test/binstub_sanity_check/docker.io",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nexit 8\n"
  },
  {
    "path": "sdk/cli/test/binstub_sanity_check/true",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nexit 7\n"
  },
  {
    "path": "sdk/cli/test/test_arv-collection-create.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire 'minitest/autorun'\nrequire 'digest/md5'\nrequire 'active_support'\nrequire 'active_support/core_ext'\nrequire 'tempfile'\n\nclass TestCollectionCreate < Minitest::Test\n  def setup\n  end\n\n  def test_small_collection\n    uuid = Digest::MD5.hexdigest(foo_manifest) + '+' + foo_manifest.size.to_s\n    ok = nil\n    out, err = capture_subprocess_io do\n      ok = arv('--format', 'uuid', 'collection', 'create', '--collection', {\n                     uuid: uuid,\n                     manifest_text: foo_manifest\n                   }.to_json)\n    end\n    assert_equal('', err)\n    assert_equal(true, ok)\n    assert_match(/^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/, out)\n  end\n\n  def test_collection_replace_files\n    ok = nil\n    uuid, err = capture_subprocess_io do\n      ok = arv('--format', 'uuid', 'collection', 'create', '--collection', '{}')\n    end\n    assert_equal('', err)\n    assert_equal(true, ok)\n    assert_match(/^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/, uuid)\n    uuid = uuid.strip\n\n    out, err = capture_subprocess_io do\n      ok = arv('--format', 'uuid',\n                   'collection', 'update',\n                   '--uuid', uuid,\n                   '--collection', '{}',\n                   '--replace-files', {\n                     \"/gpl.pdf\": \"b519d9cb706a29fc7ea24dbea2f05851+93/GNU_General_Public_License,_version_3.pdf\",\n                   }.to_json)\n    end\n    assert_equal('', err)\n    assert_equal(true, ok)\n    assert_equal(uuid, out.strip)\n\n    ok = nil\n    out, err = capture_subprocess_io do\n      ok = arv('--format', 'json', 'collection', 'get', '--uuid', uuid)\n    end\n    assert_equal('', err)\n    assert_equal(true, ok)\n    assert_match(/\\. 6a4ff0499484c6c79c95cd8c566bd25f\\+249025.* 0:249025:gpl.pdf\\\\n/, out)\n  end\n\n  def test_read_resource_object_from_file\n    tempfile = Tempfile.new('collection')\n    begin\n      tempfile.write({manifest_text: foo_manifest}.to_json)\n      tempfile.close\n      ok = nil\n      out, err = capture_subprocess_io do\n        ok = arv('--format', 'uuid',\n                     'collection', 'create', '--collection', tempfile.path)\n      end\n      assert_equal('', err)\n      assert_equal(true, ok)\n      assert_match(/^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/, out)\n    ensure\n      tempfile.unlink\n    end\n  end\n\n  protected\n  def arv(*args)\n    system(['./bin/arv', 'arv'], *args)\n  end\n\n  def foo_manifest\n    \". #{Digest::MD5.hexdigest('foo')}+3 0:3:foo\\n\"\n  end\nend\n"
  },
  {
    "path": "sdk/cli/test/test_arv-get.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire 'minitest/autorun'\nrequire 'json'\nrequire 'yaml'\n\n# Black box tests for 'arv get' command.\nclass TestArvGet < Minitest::Test\n  # UUID for an Arvados object that does not exist\n  NON_EXISTENT_OBJECT_UUID = \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\"\n  # Name of field of Arvados object that can store any (textual) value\n  STORED_VALUE_FIELD_NAME = \"name\"\n  # Name of UUID field of Arvados object\n  UUID_FIELD_NAME = \"uuid\"\n  # Name of an invalid field of Arvados object\n  INVALID_FIELD_NAME = \"invalid\"\n\n  # Tests that a valid Arvados object can be retrieved in a supported format\n  # using: `arv get [uuid]`. Given all other `arv foo` commands return JSON\n  # when no format is specified, JSON should be expected in this case.\n  def test_get_valid_object_no_format_specified\n    stored_value = __method__.to_s\n    uuid = create_arv_object_with_value(stored_value)\n    out, err = capture_subprocess_io do\n      assert(arv_get_default(uuid))\n    end\n    assert_empty(err, \"Error text not expected: '#{err}'\")\n    arv_object = parse_json_arv_object(out)\n    assert(has_field_with_value(arv_object, STORED_VALUE_FIELD_NAME, stored_value))\n  end\n\n  # Tests that a valid Arvados object can be retrieved in JSON format using:\n  # `arv get [uuid] --format json`.\n  def test_get_valid_object_json_format_specified\n    stored_value = __method__.to_s\n    uuid = create_arv_object_with_value(stored_value)\n    out, err = capture_subprocess_io do\n      assert(arv_get_json(uuid))\n    end\n    assert_empty(err, \"Error text not expected: '#{err}'\")\n    arv_object = parse_json_arv_object(out)\n    assert(has_field_with_value(arv_object, STORED_VALUE_FIELD_NAME, stored_value))\n  end\n\n  # Tests that a valid Arvados object can be retrieved in YAML format using:\n  # `arv get [uuid] --format yaml`.\n  def test_get_valid_object_yaml_format_specified\n    stored_value = __method__.to_s\n    uuid = create_arv_object_with_value(stored_value)\n    out, err = capture_subprocess_io do\n      assert(arv_get_yaml(uuid))\n    end\n    assert_empty(err, \"Error text not expected: '#{err}'\")\n    arv_object = parse_yaml_arv_object(out)\n    assert(has_field_with_value(arv_object, STORED_VALUE_FIELD_NAME, stored_value))\n  end\n\n  # Tests that a subset of all fields of a valid Arvados object can be retrieved\n  # using: `arv get [uuid] [fields...]`.\n  def test_get_valid_object_with_valid_fields\n    stored_value = __method__.to_s\n    uuid = create_arv_object_with_value(stored_value)\n    out, err = capture_subprocess_io do\n      assert(arv_get_json(uuid, STORED_VALUE_FIELD_NAME, UUID_FIELD_NAME))\n    end\n    assert_empty(err, \"Error text not expected: '#{err}'\")\n    arv_object = parse_json_arv_object(out)\n    assert(has_field_with_value(arv_object, STORED_VALUE_FIELD_NAME, stored_value))\n    assert(has_field_with_value(arv_object, UUID_FIELD_NAME, uuid))\n  end\n\n  # Tests that the valid field is retrieved when both a valid and invalid field\n  # are requested from a valid Arvados object, using:\n  # `arv get [uuid] [fields...]`.\n  def test_get_valid_object_with_both_valid_and_invalid_fields\n    stored_value = __method__.to_s\n    uuid = create_arv_object_with_value(stored_value)\n    out, err = capture_subprocess_io do\n      assert(arv_get_json(uuid, STORED_VALUE_FIELD_NAME, INVALID_FIELD_NAME))\n    end\n    assert_empty(err, \"Error text not expected: '#{err}'\")\n    arv_object = parse_json_arv_object(out)\n    assert(has_field_with_value(arv_object, STORED_VALUE_FIELD_NAME, stored_value))\n    refute(has_field_with_value(arv_object, INVALID_FIELD_NAME, stored_value))\n  end\n\n  # Tests that no fields are retreived when no valid fields are requested from\n  # a valid Arvados object, using: `arv get [uuid] [fields...]`.\n  def test_get_valid_object_with_no_valid_fields\n    stored_value = __method__.to_s\n    uuid = create_arv_object_with_value(stored_value)\n    out, err = capture_subprocess_io do\n      assert(arv_get_json(uuid, INVALID_FIELD_NAME))\n    end\n    assert_empty(err, \"Error text not expected: '#{err}'\")\n    arv_object = parse_json_arv_object(out)\n    assert_equal(0, arv_object.length)\n  end\n\n  # Tests that an invalid (non-existent) Arvados object is not retrieved using:\n  # using: `arv get [non-existent-uuid]`.\n  def test_get_invalid_object\n    out, err = capture_subprocess_io do\n      refute(arv_get_json(NON_EXISTENT_OBJECT_UUID))\n    end\n    refute_empty(err, \"Expected error feedback on request for invalid object\")\n    assert_empty(out)\n  end\n\n  # Tests that help text exists using: `arv get --help`.\n  def test_help_exists\n    out, err = capture_subprocess_io do\n#      assert(arv_get_default(\"--help\"), \"Expected exit code 0: #{$?}\")\n       #XXX: Exit code given is 255. It probably should be 0, which seems to be\n       #     standard elsewhere. However, 255 is in line with other `arv`\n       #     commands (e.g. see `arv edit`) so ignoring the problem here.\n       arv_get_default(\"--help\")\n    end\n    assert_empty(err, \"Error text not expected: '#{err}'\")\n    refute_empty(out, \"Help text should be given\")\n  end\n\n  protected\n  # Runs 'arv get <varargs>' with given arguments. Returns whether the exit\n  # status was 0 (i.e. success). Use $? to attain more details on failure.\n  def arv_get_default(*args)\n    return system(\"arv\", \"get\", *args)\n  end\n\n  # Runs 'arv --format json get <varargs>' with given arguments. Returns whether\n  # the exit status was 0 (i.e. success). Use $? to attain more details on\n  # failure.\n  def arv_get_json(*args)\n    return system(\"arv\", \"--format\", \"json\", \"get\", *args)\n  end\n\n  # Runs 'arv --format yaml get <varargs>' with given arguments. Returns whether\n  # the exit status was 0 (i.e. success). Use $? to attain more details on\n  # failure.\n  def arv_get_yaml(*args)\n    return system(\"arv\", \"--format\", \"yaml\", \"get\", *args)\n  end\n\n  # Creates an Arvados object that stores a given value. Returns the uuid of the\n  # created object.\n  def create_arv_object_with_value(value)\n    out, err = capture_subprocess_io do\n      system(\"arv\", \"tag\", \"add\", value, \"--object\", \"testing\")\n    end\n    assert_equal '', err\n    assert $?.success?, \"Command failure running `arv tag`: #{$?}\"\n    assert_operator 0, :<, out.strip.length\n    out.strip\n  end\n\n  # Parses the given JSON representation of an Arvados object, returning\n  # an equivalent Ruby representation (a hash map).\n  def parse_json_arv_object(arvObjectAsJson)\n    begin\n      parsed = JSON.parse(arvObjectAsJson)\n      assert(parsed.instance_of?(Hash))\n      return parsed\n    rescue JSON::ParserError => e\n      raise \"Invalid JSON representation of Arvados object.\\n\" \\\n            \"Parse error: '#{e}'\\n\" \\\n            \"JSON: '#{arvObjectAsJson}'\\n\"\n    end\n  end\n\n  # Parses the given JSON representation of an Arvados object, returning\n  # an equivalent Ruby representation (a hash map).\n  def parse_yaml_arv_object(arvObjectAsYaml)\n    begin\n      parsed = YAML.load(arvObjectAsYaml)\n      assert(parsed.instance_of?(Hash))\n      return parsed\n    rescue\n      raise \"Invalid YAML representation of Arvados object.\\n\" \\\n            \"YAML: '#{arvObjectAsYaml}'\\n\"\n    end\n  end\n\n  # Checks whether the given Arvados object has the given expected value for the\n  # specified field.\n  def has_field_with_value(arvObjectAsHash, fieldName, expectedValue)\n    if !arvObjectAsHash.has_key?(fieldName)\n      return false\n    end\n    return (arvObjectAsHash[fieldName] == expectedValue)\n  end\nend\n"
  },
  {
    "path": "sdk/cli/test/test_arv-keep-get.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire 'minitest/autorun'\nrequire 'digest/md5'\n\nclass TestArvKeepGet < Minitest::Test\n  def setup\n    begin\n      Dir.mkdir './tmp'\n    rescue Errno::EEXIST\n    end\n    @@foo_manifest_locator ||= `echo -n foo | ./bin/arv-put --filename foo --no-progress -`.strip\n    @@baz_locator ||= `echo -n baz | ./bin/arv-put --as-raw --no-progress -`.strip\n    @@multilevel_manifest_locator ||= `echo -n baz | ./bin/arv-put --filename foo/bar/baz --no-progress -`.strip\n  end\n\n  def test_no_args\n    out, err = capture_subprocess_io do\n      assert_arv_get false\n    end\n    assert_equal '', out\n    assert_match(/^usage:/, err)\n  end\n\n  def test_get_version\n    out, err = capture_subprocess_io do\n      assert_arv_get '--version'\n    end\n    # python3 handles action='version' differently than python2\n    # https://dev.arvados.org/issues/15888#note-23\n    assert_empty(err, \"STDERR not expected: '#{err}'\")\n    assert_match(/[0-9]+\\.[0-9]+\\.[0-9]+/, out, \"Version information incorrect: '#{out}'\")\n  end\n\n  def test_help\n    out, err = capture_subprocess_io do\n      assert_arv_get '-h'\n    end\n    $stderr.write err\n    assert_equal '', err\n    assert_match(/^usage:/, out)\n  end\n\n  def test_file_to_dev_stdout\n    test_file_to_stdout('/dev/stdout')\n  end\n\n  def test_file_to_stdout(specify_stdout_as='-')\n    out, err = capture_subprocess_io do\n      assert_arv_get @@foo_manifest_locator + '/foo', specify_stdout_as\n    end\n    assert_equal '', err\n    assert_equal 'foo', out\n  end\n\n  def test_file_to_file\n    remove_tmp_foo\n    out, err = capture_subprocess_io do\n      assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/foo'\n    end\n    assert_equal '', err\n    assert_equal '', out\n    assert_equal 'foo', IO.read('tmp/foo')\n  end\n\n  def test_file_to_file_no_overwrite_file\n    File.open './tmp/foo', 'wb' do |f|\n      f.write 'baz'\n    end\n    out, err = capture_subprocess_io do\n      assert_arv_get false, @@foo_manifest_locator + '/foo', 'tmp/foo'\n    end\n    assert_match(/Local file tmp\\/foo already exists/, err)\n    assert_equal '', out\n    assert_equal 'baz', IO.read('tmp/foo')\n  end\n\n  def test_file_to_file_no_overwrite_file_in_dir\n    File.open './tmp/foo', 'wb' do |f|\n      f.write 'baz'\n    end\n    out, err = capture_subprocess_io do\n      assert_arv_get false, @@foo_manifest_locator + '/', 'tmp/'\n    end\n    assert_match(/Local file tmp\\/foo already exists/, err)\n    assert_equal '', out\n    assert_equal 'baz', IO.read('tmp/foo')\n  end\n\n  def test_file_to_file_force_overwrite\n    File.open './tmp/foo', 'wb' do |f|\n      f.write 'baz'\n    end\n    assert_equal 'baz', IO.read('tmp/foo')\n    out, err = capture_subprocess_io do\n      assert_arv_get '-f', @@foo_manifest_locator + '/', 'tmp/'\n    end\n    assert_match '', err\n    assert_equal '', out\n    assert_equal 'foo', IO.read('tmp/foo')\n  end\n\n  def test_file_to_file_skip_existing\n    File.open './tmp/foo', 'wb' do |f|\n      f.write 'baz'\n    end\n    assert_equal 'baz', IO.read('tmp/foo')\n    out, err = capture_subprocess_io do\n      assert_arv_get '--skip-existing', @@foo_manifest_locator + '/', 'tmp/'\n    end\n    assert_match '', err\n    assert_equal '', out\n    assert_equal 'baz', IO.read('tmp/foo')\n  end\n\n  def test_file_to_dir\n    remove_tmp_foo\n    out, err = capture_subprocess_io do\n      assert_arv_get @@foo_manifest_locator + '/foo', 'tmp/'\n    end\n    assert_equal '', err\n    assert_equal '', out\n    assert_equal 'foo', IO.read('tmp/foo')\n  end\n\n  def test_dir_to_file\n    out, err = capture_subprocess_io do\n      assert_arv_get false, @@foo_manifest_locator + '/', 'tmp/foo'\n    end\n    assert_equal '', out\n    assert_match(/^usage:/, err)\n  end\n\n  def test_dir_to_empty_string\n    out, err = capture_subprocess_io do\n      assert_arv_get false, @@foo_manifest_locator + '/', ''\n    end\n    assert_equal '', out\n    assert_match(/^usage:/, err)\n  end\n\n  def test_nonexistent_block\n    out, err = capture_subprocess_io do\n      assert_arv_get false, 'e796ab2294f3e48ec709ffa8d6daf58c'\n    end\n    assert_equal '', out\n    assert_match(/ERROR:/, err)\n  end\n\n  def test_nonexistent_manifest\n    out, err = capture_subprocess_io do\n      assert_arv_get false, 'acbd18db4cc2f85cedef654fccc4a4d8/', 'tmp/'\n    end\n    assert_equal '', out\n    assert_match(/ERROR:/, err)\n  end\n\n  def test_manifest_root_to_dir\n    remove_tmp_foo\n    out, err = capture_subprocess_io do\n      assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp/'\n    end\n    assert_equal '', err\n    assert_equal '', out\n    assert_equal 'foo', IO.read('tmp/foo')\n  end\n\n  def test_manifest_root_to_dir_noslash\n    remove_tmp_foo\n    out, err = capture_subprocess_io do\n      assert_arv_get '-r', @@foo_manifest_locator + '/', 'tmp'\n    end\n    assert_equal '', err\n    assert_equal '', out\n    assert_equal 'foo', IO.read('tmp/foo')\n  end\n\n  def test_display_md5sum\n    remove_tmp_foo\n    out, err = capture_subprocess_io do\n      assert_arv_get '-r', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'\n    end\n    assert_equal \"#{Digest::MD5.hexdigest('foo')}  ./foo\\n\", err\n    assert_equal '', out\n    assert_equal 'foo', IO.read('tmp/foo')\n  end\n\n  def test_md5sum_nowrite\n    remove_tmp_foo\n    out, err = capture_subprocess_io do\n      assert_arv_get '-n', '--md5sum', @@foo_manifest_locator + '/', 'tmp/'\n    end\n    assert_equal \"#{Digest::MD5.hexdigest('foo')}  ./foo\\n\", err\n    assert_equal '', out\n    assert_equal false, File.exist?('tmp/foo')\n  end\n\n  def test_sha1_nowrite\n    remove_tmp_foo\n    out, err = capture_subprocess_io do\n      assert_arv_get '-n', '-r', '--hash', 'sha1', @@foo_manifest_locator+'/', 'tmp/'\n    end\n    assert_equal \"#{Digest::SHA1.hexdigest('foo')}  ./foo\\n\", err\n    assert_equal '', out\n    assert_equal false, File.exist?('tmp/foo')\n  end\n\n  def test_block_to_file\n    remove_tmp_foo\n    out, err = capture_subprocess_io do\n      assert_arv_get @@foo_manifest_locator, 'tmp/foo'\n    end\n    assert_equal '', err\n    assert_equal '', out\n\n    digest = Digest::MD5.hexdigest('foo')\n    !(IO.read('tmp/foo')).gsub!( /^(. #{digest}+3)(.*)( 0:3:foo)$/).nil?\n  end\n\n  def test_create_directory_tree\n    `rm -rf ./tmp/arv-get-test/`\n    Dir.mkdir './tmp/arv-get-test'\n    out, err = capture_subprocess_io do\n      assert_arv_get @@multilevel_manifest_locator + '/', 'tmp/arv-get-test/'\n    end\n    assert_equal '', err\n    assert_equal '', out\n    assert_equal 'baz', IO.read('tmp/arv-get-test/foo/bar/baz')\n  end\n\n  def test_create_partial_directory_tree\n    `rm -rf ./tmp/arv-get-test/`\n    Dir.mkdir './tmp/arv-get-test'\n    out, err = capture_subprocess_io do\n      assert_arv_get(@@multilevel_manifest_locator + '/foo/',\n                     'tmp/arv-get-test/')\n    end\n    assert_equal '', err\n    assert_equal '', out\n    assert_equal 'baz', IO.read('tmp/arv-get-test/bar/baz')\n  end\n\n  protected\n  def assert_arv_get(*args)\n    expect = case args.first\n             when true, false\n               args.shift\n             else\n               true\n             end\n    assert_equal(expect,\n                 system(['./bin/arv-get', 'arv-get'], *args),\n                 \"`arv-get #{args.join ' '}` \" +\n                 \"should exit #{if expect then 0 else 'non-zero' end}\")\n  end\n\n  def remove_tmp_foo\n    begin\n      File.unlink('tmp/foo')\n    rescue Errno::ENOENT\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/cli/test/test_arv-keep-put.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire 'minitest/autorun'\nrequire 'digest/md5'\n\nclass TestArvKeepPut < Minitest::Test\n  def setup\n    begin Dir.mkdir './tmp' rescue Errno::EEXIST end\n    begin Dir.mkdir './tmp/empty_dir' rescue Errno::EEXIST end\n    File.open './tmp/empty_file', 'wb' do\n    end\n    File.open './tmp/foo', 'wb' do |f|\n      f.write 'foo'\n    end\n  end\n\n  def test_help\n    out, err = capture_subprocess_io do\n      assert arv_put('-h'), 'arv-put -h exits zero'\n    end\n    $stderr.write err\n    assert_empty err\n    assert_match(/^usage:/, out)\n  end\n\n  def test_raw_stdin\n    out, err = capture_subprocess_io do\n      r,w = IO.pipe\n      wpid = fork do\n        r.close\n        w << 'foo'\n      end\n      w.close\n      assert arv_put('--raw', {in: r})\n      r.close\n      Process.waitpid wpid\n    end\n    $stderr.write err\n    assert_match '', err\n    assert_equal \"acbd18db4cc2f85cedef654fccc4a4d8+3\\n\", out\n  end\n\n  def test_raw_file\n    out, err = capture_subprocess_io do\n      assert arv_put('--no-cache', '--raw', './tmp/foo')\n    end\n    $stderr.write err\n    assert_match '', err\n    assert_equal \"acbd18db4cc2f85cedef654fccc4a4d8+3\\n\", out\n  end\n\n  def test_raw_empty_file\n    out, err = capture_subprocess_io do\n      assert arv_put('--raw', './tmp/empty_file')\n    end\n    $stderr.write err\n    assert_match '', err\n    assert_equal \"d41d8cd98f00b204e9800998ecf8427e+0\\n\", out\n  end\n\n  def test_filename_arg_with_directory\n    out, err = capture_subprocess_io do\n      assert_equal(false, arv_put('--filename', 'foo', './tmp/empty_dir/.'),\n                   'arv-put --filename refuses directory')\n    end\n    assert_match(/^usage:.*error:/m, err)\n    assert_empty out\n  end\n\n  def test_filename_arg_with_multiple_files\n    out, err = capture_subprocess_io do\n      assert_equal(false, arv_put('--filename', 'foo',\n                                  './tmp/empty_file',\n                                  './tmp/empty_file'),\n                   'arv-put --filename refuses directory')\n    end\n    assert_match(/^usage:.*error:/m, err)\n    assert_empty out\n  end\n\n  def test_filename_arg_with_empty_file\n    out, err = capture_subprocess_io do\n      assert arv_put('--filename', 'foo', './tmp/empty_file')\n    end\n    $stderr.write err\n    assert_match '', err\n    assert match_collection_uuid(out)\n  end\n\n  def test_as_stream\n    out, err = capture_subprocess_io do\n      assert arv_put('--no-cache', '--as-stream', './tmp/foo')\n    end\n    $stderr.write err\n    assert_match '', err\n    assert_equal foo_manifest, out\n  end\n\n  def test_progress\n    out, err = capture_subprocess_io do\n      assert arv_put('--no-cache', '--manifest', '--progress', './tmp/foo')\n    end\n    assert_match(/%/, err)\n    assert match_collection_uuid(out)\n  end\n\n  def test_batch_progress\n    out, err = capture_subprocess_io do\n      assert arv_put('--no-cache', '--manifest', '--batch-progress', './tmp/foo')\n    end\n    assert_match(/: 0 written 3 total/, err)\n    assert_match(/: 3 written 3 total/, err)\n    assert match_collection_uuid(out)\n  end\n\n  def test_progress_and_batch_progress\n    out, err = capture_subprocess_io do\n      assert_equal(false,\n                   arv_put('--progress', '--batch-progress', './tmp/foo'),\n                   'arv-put --progress --batch-progress is contradictory')\n    end\n    assert_match(/^usage:.*error:/m, err)\n    assert_empty out\n  end\n\n  def test_read_from_implicit_stdin\n    test_read_from_stdin(specify_stdin_as='--manifest')\n  end\n\n  def test_read_from_dev_stdin\n    test_read_from_stdin(specify_stdin_as='/dev/stdin')\n  end\n\n  def test_read_from_stdin(specify_stdin_as='-')\n    out, err = capture_subprocess_io do\n      r,w = IO.pipe\n      wpid = fork do\n        r.close\n        w << 'foo'\n      end\n      w.close\n      assert arv_put('--filename', 'foo', specify_stdin_as,\n                                 { in: r })\n      r.close\n      Process.waitpid wpid\n    end\n    $stderr.write err\n    assert_match '', err\n    assert match_collection_uuid(out)\n  end\n\n  def test_read_from_implicit_stdin_implicit_manifest\n    test_read_from_stdin_implicit_manifest(specify_stdin_as=nil,\n                                           expect_filename='stdin')\n  end\n\n  def test_read_from_dev_stdin_implicit_manifest\n    test_read_from_stdin_implicit_manifest(specify_stdin_as='/dev/stdin')\n  end\n\n  def test_read_from_stdin_implicit_manifest(specify_stdin_as='-',\n                                             expect_filename=nil)\n    expect_filename = expect_filename || specify_stdin_as.split('/').last\n    out, err = capture_subprocess_io do\n      r,w = IO.pipe\n      wpid = fork do\n        r.close\n        w << 'foo'\n      end\n      w.close\n      args = []\n      args.push specify_stdin_as if specify_stdin_as\n      assert arv_put(*args, { in: r })\n      r.close\n      Process.waitpid wpid\n    end\n    $stderr.write err\n    assert_match '', err\n    assert match_collection_uuid(out)\n  end\n\n  protected\n  def arv_put(*args)\n    system ['./bin/arv-put', 'arv-put'], *args\n  end\n\n  def foo_manifest(filename='foo')\n    \". #{Digest::MD5.hexdigest('foo')}+3 0:3:#{filename}\\n\"\n  end\n\n  def foo_manifest_locator(filename='foo')\n    Digest::MD5.hexdigest(foo_manifest(filename)) +\n      \"+#{foo_manifest(filename).length}\"\n  end\n\n  def match_collection_uuid(uuid)\n    /^([0-9a-z]{5}-4zz18-[0-9a-z]{15})?$/.match(uuid)\n  end\nend\n"
  },
  {
    "path": "sdk/cli/test/test_arv-tag.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire 'minitest/autorun'\nrequire 'digest/md5'\nrequire 'json'\n\ndef assert_failure *args\n  assert_equal false, *args\nend\n\nclass TestArvTag < Minitest::Test\n\n  def test_no_args\n    skip \"Waiting until #4534 is implemented\"\n\n    # arv-tag exits with failure if run with no args\n    out, err = capture_subprocess_io do\n      assert_equal false, arv_tag\n    end\n    assert_empty out\n    assert_match(/^usage:/i, err)\n  end\n\n  # Test adding and removing a single tag on a single object.\n  def test_single_tag_single_obj\n    skip \"TBD\"\n\n    # Add a single tag.\n    tag_uuid, err = capture_subprocess_io do\n      assert arv_tag '--short', 'add', 'test_tag1', '--object', 'uuid1'\n    end\n    assert_empty err\n\n    out, err = capture_subprocess_io do\n      assert arv 'link', 'show', '--uuid', tag_uuid.rstrip\n    end\n\n    assert_empty err\n    link = JSON.parse out\n    assert_tag link, 'test_tag1', 'uuid1'\n\n    # Remove the tag.\n    out, err = capture_subprocess_io do\n      assert arv_tag 'remove', 'test_tag1', '--object', 'uuid1'\n    end\n\n    assert_empty err\n    links = JSON.parse out\n    assert_equal 1, links.length\n    assert_tag links[0], 'test_tag1', 'uuid1'\n\n    # Verify that the link no longer exists.\n    out, err = capture_subprocess_io do\n      assert_equal false, arv('link', 'show', '--uuid', links[0]['uuid'])\n    end\n\n    assert_equal \"Error: Path not found\\n\", err\n  end\n\n  # Test adding and removing a single tag with multiple objects.\n  def test_single_tag_multi_objects\n    skip \"TBD\"\n\n    out, err = capture_subprocess_io do\n      assert arv_tag('add', 'test_tag1',\n                     '--object', 'uuid1',\n                     '--object', 'uuid2',\n                     '--object', 'uuid3')\n    end\n    assert_empty err\n\n    out, err = capture_subprocess_io do\n      assert arv 'link', 'list', '--where', '{\"link_class\":\"tag\",\"name\":\"test_tag1\"}'\n    end\n\n    assert_empty err\n    json_out = JSON.parse out\n    links = json_out['items'].sort { |a,b| a['head_uuid'] <=> b['head_uuid'] }\n    assert_equal 3, links.length\n    assert_tag links[0], 'test_tag1', 'uuid1'\n    assert_tag links[1], 'test_tag1', 'uuid2'\n    assert_tag links[2], 'test_tag1', 'uuid3'\n\n    out, err = capture_subprocess_io do\n      assert arv_tag('remove', 'test_tag1',\n                     '--object', 'uuid1',\n                     '--object', 'uuid2',\n                     '--object', 'uuid3')\n    end\n    assert_empty err\n\n    out, err = capture_subprocess_io do\n      assert arv 'link', 'list', '--where', '{\"link_class\":\"tag\",\"name\":\"test_tag1\"}'\n    end\n\n    assert_empty err\n    assert_empty out\n  end\n\n  protected\n  def arv_tag(*args)\n    system ['./bin/arv-tag', 'arv-tag'], *args\n  end\n\n  def arv(*args)\n    system ['./bin/arv', 'arv'], *args\n  end\n\n  def assert_tag(link, name, head_uuid)\n    assert_equal 'tag',     link['link_class']\n    assert_equal name,      link['name']\n    assert_equal head_uuid, link['head_uuid']\n  end\nend\n"
  },
  {
    "path": "sdk/cli/test/test_arv-ws.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire 'minitest/autorun'\n\nclass TestArvWs < Minitest::Test\n  def setup\n  end\n\n  def test_arv_ws_get_help\n    _, err = capture_subprocess_io do\n      system ('arv-ws -h')\n    end\n    assert_equal '', err\n  end\n\n  def test_arv_ws_such_option\n    _, err = capture_subprocess_io do\n      system ('arv-ws --junk')\n    end\n    refute_equal '', err\n  end\n\nend\n"
  },
  {
    "path": "sdk/cwl/LICENSE-2.0.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "sdk/cwl/MANIFEST.in",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ninclude LICENSE-2.0.txt\ninclude README.rst\ninclude arvados_version.py\ninclude arvados_cwl/arv-cwl-schema-*.yml\n"
  },
  {
    "path": "sdk/cwl/README.rst",
    "content": ".. Copyright (C) The Arvados Authors. All rights reserved.\n..\n.. SPDX-License-Identifier: Apache-2.0\n\n==================\nArvados CWL Runner\n==================\n\nOverview\n--------\n\nThis package provides the ``arvados-cwl-runner`` tool to register and run Common Workflow Language workflows in Arvados_.\n\n.. _Arvados: https://arvados.org/\n\nInstallation\n------------\n\nInstalling under your user account\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis method lets you install the package without root access.  However,\nother users on the same system will need to reconfigure their shell in order\nto be able to use it. Run the following to install the package in an\nenvironment at ``~/arvclients``::\n\n  python3 -m venv ~/arvclients\n  ~/arvclients/bin/pip install arvados-cwl-runner\n\nCommand line tools will be installed under ``~/arvclients/bin``. You can\ntest one by running::\n\n  ~/arvclients/bin/arvados-cwl-runner --version\n\nYou can run these tools by specifying the full path every time, or you can\nadd the directory to your shell's search path by running::\n\n  export PATH=\"$PATH:$HOME/arvclients/bin\"\n\nYou can make this search path change permanent by adding this command to\nyour shell's configuration, for example ``~/.bashrc`` if you're using bash.\nYou can test the change by running::\n\n  arvados-cwl-runner --version\n\nInstalling on Debian and Ubuntu systems\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nArvados publishes packages for Debian 12 \"bookworm,\" Ubuntu 22.04 \"jammy,\" and Ubuntu 24.04 \"noble.\" You can install the Python SDK package on any of these distributions by running the following commands::\n\n  sudo install -d /etc/apt/keyrings\n  sudo curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg\n  sudo tee /etc/apt/sources.list.d/arvados.sources >/dev/null <<EOF\n  Types: deb\n  URIs: https://apt.arvados.org/$(lsb_release -cs)\n  Suites: $(lsb_release -cs)\n  Components: main\n  Signed-by: /etc/apt/keyrings/arvados.asc\n  EOF\n  sudo apt update\n  sudo apt install python3-arvados-cwl-runner\n\nInstalling on Red Hat, AlmaLinux, and Rocky Linux\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nArvados publishes packages for RHEL 8 and 9, as well as distributions based on those. Note that these packages depend on, and will automatically enable, the Python 3.11 module. You can install the Python SDK package on any of these distributions by running the following commands::\n\n  sudo tee /etc/yum.repos.d/arvados.repo >/dev/null <<'EOF'\n  [arvados]\n  name=Arvados\n  baseurl=https://rpm.arvados.org/RHEL/$releasever/os/$basearch/\n  gpgcheck=1\n  gpgkey=https://rpm.arvados.org/RHEL/$releasever/RPM-GPG-KEY-arvados\n  EOF\n  sudo dnf install python3-arvados-cwl-runner\n\nConfiguration\n-------------\n\nThis client software needs two pieces of information to connect to\nArvados: the DNS name of the API server, and an API authorization\ntoken. `The Arvados user\ndocumentation\n<http://doc.arvados.org/user/reference/api-tokens.html>`_ describes\nhow to find this information in the Arvados Workbench, and install it\non your system.\n\nTesting and Development\n-----------------------\n\nThis package is one part of the Arvados source package, and it has\nintegration tests to check interoperability with other Arvados\ncomponents.  Our `hacking guide\n<https://dev.arvados.org/projects/arvados/wiki/Hacking_Python_SDK>`_\ndescribes how to set up a development environment and run tests.\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/__init__.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Implement cwl-runner interface for submitting and running work on Arvados, using\n# the Crunch containers API.\n\nimport argparse\nimport importlib.metadata\nimport importlib.resources\nimport logging\nimport os\nimport sys\nimport re\n\nfrom schema_salad.sourceline import SourceLine\nimport schema_salad.validate as validate\nimport cwltool.main\nimport cwltool.workflow\nimport cwltool.process\nimport cwltool.argparser\nfrom cwltool.errors import WorkflowException\nfrom cwltool.process import shortname, UnsupportedRequirement, use_custom_schema\nfrom cwltool.utils import adjustFileObjs, adjustDirObjs, get_listing\n\nimport arvados\nimport arvados.config\nimport arvados.logging\nfrom arvados.keep import KeepClient\nfrom arvados.errors import ApiError\nimport arvados.commands._util as arv_cmd\n\nfrom .perf import Perf\nfrom ._version import __version__\nfrom .executor import ArvCwlExecutor\nfrom .fsaccess import workflow_uuid_pattern\n\n# These aren't used directly in this file but\n# other code expects to import them from here\nfrom .arvcontainer import ArvadosContainer\nfrom .arvtool import ArvadosCommandTool\nfrom .fsaccess import CollectionFsAccess, CollectionCache, CollectionFetcher\nfrom .util import get_current_container\nfrom .executor import RuntimeStatusLoggingHandler, DEFAULT_PRIORITY\nfrom .arvworkflow import ArvadosWorkflow\n\nlogger = logging.getLogger('arvados.cwl-runner')\nmetrics = logging.getLogger('arvados.cwl-runner.metrics')\nlogger.setLevel(logging.INFO)\n\narvados.log_handler.setFormatter(logging.Formatter(\n        '%(asctime)s %(name)s %(levelname)s: %(message)s',\n        '%Y-%m-%d %H:%M:%S'))\n\ndef versionstring():\n    \"\"\"Print version string of key packages for provenance and debugging.\"\"\"\n    return \"{} {}, arvados-python-client {}, cwltool {}\".format(\n        sys.argv[0],\n        importlib.metadata.version('arvados-cwl-runner'),\n        importlib.metadata.version('arvados-python-client'),\n        importlib.metadata.version('cwltool'),\n    )\n\ndef arg_parser():  # type: () -> argparse.ArgumentParser\n    parser = argparse.ArgumentParser(\n        description='Arvados executor for Common Workflow Language',\n        parents=[arv_cmd.retry_opt],\n    )\n\n    parser.add_argument(\"--basedir\",\n                        help=\"Base directory used to resolve relative references in the input, default to directory of input object file or current directory (if inputs piped/provided on command line).\")\n    parser.add_argument(\"--outdir\", default=os.path.abspath('.'),\n                        help=\"Output directory, default current directory\")\n\n    parser.add_argument(\"--eval-timeout\",\n                        help=\"Time to wait for a Javascript expression to evaluate before giving an error, default 20s.\",\n                        type=float,\n                        default=20)\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--print-dot\", action=\"store_true\",\n                         help=\"Print workflow visualization in graphviz format and exit\")\n    exgroup.add_argument(\"--version\", action=\"version\", help=\"Print version and exit\", version=versionstring())\n    exgroup.add_argument(\"--validate\", action=\"store_true\", help=\"Validate CWL document only.\")\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--verbose\", action=\"store_true\", help=\"Default logging\")\n    exgroup.add_argument(\"--quiet\", action=\"store_true\", help=\"Only print warnings and errors.\")\n    exgroup.add_argument(\"--debug\", action=\"store_true\", help=\"Print even more logging\")\n\n    parser.add_argument(\"--metrics\", action=\"store_true\", help=\"Print timing metrics\")\n\n    parser.add_argument(\"--tool-help\", action=\"store_true\", help=\"Print command line help for tool\")\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--enable-reuse\", action=\"store_true\",\n                        default=True, dest=\"enable_reuse\",\n                        help=\"Enable container reuse (default)\")\n    exgroup.add_argument(\"--disable-reuse\", action=\"store_false\",\n                        default=True, dest=\"enable_reuse\",\n                        help=\"Disable container reuse\")\n\n    parser.add_argument(\"--project-uuid\", metavar=\"UUID\", help=\"Project that will own the workflow containers, if not provided, will go to home project.\")\n    parser.add_argument(\"--output-name\", help=\"Name to use for collection that stores the final output.\", default=None)\n    parser.add_argument(\"--output-tags\", help=\"Tags for the final output collection separated by commas, e.g., '--output-tags tag0,tag1,tag2'.\", default=None)\n    parser.add_argument(\"--ignore-docker-for-reuse\", action=\"store_true\",\n                        help=\"Ignore Docker image version when deciding whether to reuse past containers.\",\n                        default=False)\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--submit\", action=\"store_true\", help=\"Submit workflow to run on Arvados.\",\n                        default=True, dest=\"submit\")\n    exgroup.add_argument(\"--local\", action=\"store_false\", help=\"Run workflow on local host (submits containers to Arvados).\",\n                        default=True, dest=\"submit\")\n    exgroup.add_argument(\"--create-template\", action=\"store_true\", help=\"(Deprecated) synonym for --create-workflow.\",\n                         dest=\"create_workflow\")\n    exgroup.add_argument(\"--create-workflow\", action=\"store_true\", help=\"Register an Arvados workflow that can be run from Workbench\")\n    exgroup.add_argument(\"--update-workflow\", metavar=\"UUID\", help=\"Update an existing Arvados workflow with the given UUID.\")\n\n    exgroup.add_argument(\"--print-keep-deps\", action=\"store_true\", help=\"To assist copying, print a list of Keep collections that this workflow depends on.\")\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--wait\", action=\"store_true\", help=\"After submitting workflow runner, wait for completion.\",\n                        default=True, dest=\"wait\")\n    exgroup.add_argument(\"--no-wait\", action=\"store_false\", help=\"Submit workflow runner and exit.\",\n                        default=True, dest=\"wait\")\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--log-timestamps\", action=\"store_true\", help=\"Prefix logging lines with timestamp\",\n                        default=True, dest=\"log_timestamps\")\n    exgroup.add_argument(\"--no-log-timestamps\", action=\"store_false\", help=\"No timestamp on logging lines\",\n                        default=True, dest=\"log_timestamps\")\n\n    parser.add_argument(\"--api\",\n                        default=None, dest=\"work_api\",\n                        choices=(\"containers\",),\n                        help=\"Select work submission API.  Only supports 'containers'\")\n\n    parser.add_argument(\"--compute-checksum\", action=\"store_true\", default=False,\n                        help=\"Compute checksum of contents while collecting outputs\",\n                        dest=\"compute_checksum\")\n\n    parser.add_argument(\"--submit-runner-ram\", type=int,\n                        help=\"RAM (in MiB) required for the workflow runner job (default 1024)\",\n                        default=None)\n\n    parser.add_argument(\"--submit-runner-image\",\n                        help=\"Docker image for workflow runner job, default arvados/jobs:%s\" % __version__,\n                        default=None)\n\n    parser.add_argument(\"--always-submit-runner\", action=\"store_true\",\n                        help=\"When invoked with --submit --wait, always submit a runner to manage the workflow, even when only running a single CommandLineTool\",\n                        default=False)\n\n    parser.add_argument(\"--match-submitter-images\", action=\"store_true\",\n                        default=False, dest=\"match_local_docker\",\n                        help=\"Where Arvados has more than one Docker image of the same name, use image from the Docker instance on the submitting node.\")\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--submit-request-uuid\",\n                         default=None,\n                         help=\"Update and commit to supplied container request instead of creating a new one.\",\n                         metavar=\"UUID\")\n    exgroup.add_argument(\"--submit-runner-cluster\",\n                         help=\"Submit workflow runner to a remote cluster\",\n                         default=None,\n                         metavar=\"CLUSTER_ID\")\n\n    parser.add_argument(\"--collection-cache-size\", type=int,\n                        default=None,\n                        help=\"Collection cache size (in MiB, default 256).\")\n\n    parser.add_argument(\"--name\",\n                        help=\"Name to use for workflow execution instance.\",\n                        default=None)\n\n    parser.add_argument(\"--on-error\",\n                        help=\"Desired workflow behavior when a step fails.  One of 'stop' (do not submit any more steps) or \"\n                        \"'continue' (may submit other steps that are not downstream from the error). Default is 'continue'.\",\n                        default=\"continue\", choices=(\"stop\", \"continue\"))\n\n    parser.add_argument(\"--enable-dev\", action=\"store_true\",\n                        help=\"Enable loading and running development versions \"\n                             \"of the CWL standards.\", default=False)\n    parser.add_argument(\n        '--storage-classes',\n        type=arv_cmd.UniqueSplit(),\n        default=[],\n        help=\"Specify comma separated list of storage classes to be used when saving final workflow output to Keep.\",\n    )\n    parser.add_argument(\n        '--intermediate-storage-classes',\n        type=arv_cmd.UniqueSplit(),\n        default=[],\n        help=\"Specify comma separated list of storage classes to be used when saving intermediate workflow output to Keep.\",\n    )\n    parser.add_argument(\"--intermediate-output-ttl\", type=int, metavar=\"N\",\n                        help=\"If N > 0, intermediate output collections will be trashed N seconds after creation.  Default is 0 (don't trash).\",\n                        default=0)\n\n    parser.add_argument(\"--priority\", type=int,\n                        help=\"Workflow priority (range 1..1000, higher has precedence over lower)\",\n                        default=DEFAULT_PRIORITY)\n\n    parser.add_argument(\"--disable-validate\", dest=\"do_validate\",\n                        action=\"store_false\", default=True,\n                        help=argparse.SUPPRESS)\n\n    parser.add_argument(\"--disable-git\", dest=\"git_info\",\n                        action=\"store_false\", default=True,\n                        help=argparse.SUPPRESS)\n\n    parser.add_argument(\"--disable-color\", dest=\"enable_color\",\n                        action=\"store_false\", default=True,\n                        help=argparse.SUPPRESS)\n\n    parser.add_argument(\"--disable-js-validation\",\n                        action=\"store_true\", default=False,\n                        help=argparse.SUPPRESS)\n\n    parser.add_argument(\"--fast-parser\", dest=\"fast_parser\",\n                        action=\"store_true\", default=False,\n                        help=argparse.SUPPRESS)\n\n    parser.add_argument(\"--thread-count\", type=int,\n                        default=0, help=\"Number of threads to use for job submit and output collection.\")\n\n    parser.add_argument(\"--http-timeout\", type=int,\n                        default=5*60, dest=\"http_timeout\", help=\"API request timeout in seconds. Default is 300 seconds (5 minutes).\")\n\n    parser.add_argument(\"--defer-downloads\", action=\"store_true\", default=False,\n                        help=\"When submitting a workflow, defer downloading HTTP or S3 URLs to launch of the workflow runner container instead of downloading to Keep before submit.\")\n\n    parser.add_argument(\"--enable-aws-credential-capture\", action=\"store_true\", default=False, dest=\"aws_credential_capture\",\n                        help=\"When submitting a workflow that requires AWS credentials, capture them from the local environment for use by the workflow runner container.\")\n\n    parser.add_argument(\"--disable-aws-credential-capture\", action=\"store_false\", default=False, dest=\"aws_credential_capture\",\n                        help=\"Do not capture AWS credentials from the local environment, must use credentials registered with Arvados.\")\n\n    parser.add_argument(\"--s3-public-bucket\", action=\"store_true\",\n                        help=\"Downloads are from a public bucket, so no AWS credentials are required.\")\n\n    parser.add_argument(\"--use-credential\", default=None, dest=\"selected_credential\",\n                        help=\"Name or uuid of a credential registered with Arvados that will be used to fetch external resources.\")\n\n    parser.add_argument(\"--varying-url-params\", type=str, default=\"\",\n                        help=\"A comma separated list of URL query parameters that should be ignored when storing HTTP URLs in Keep.\")\n\n    parser.add_argument(\"--prefer-cached-downloads\", action=\"store_true\", default=False,\n                        help=\"If a HTTP URL is found in Keep, skip upstream URL freshness check (will not notice if the upstream has changed, but also not error if upstream is unavailable).\")\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--enable-preemptible\", dest=\"enable_preemptible\", default=None, action=\"store_true\", help=\"Use preemptible instances. Control individual steps with arv:UsePreemptible hint.\")\n    exgroup.add_argument(\"--disable-preemptible\", dest=\"enable_preemptible\", default=None, action=\"store_false\", help=\"Don't use preemptible instances.\")\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--enable-resubmit-non-preemptible\", dest=\"enable_resubmit_non_preemptible\",\n                         default=None, action=\"store_true\",\n                         help=\"If a workflow step fails due to the instance it is running on being preempted, re-submit the container with the `preemptible` flag disabled. Control individual steps with arv:PreemptionBehavior hint.\")\n    exgroup.add_argument(\"--disable-resubmit-non-preemptible\", dest=\"enable_resubmit_non_preemptible\",\n                         default=None, action=\"store_false\",\n                         help=\"Don't resumbit when a preemptible instance is reclaimed.\")\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--copy-deps\", dest=\"copy_deps\", default=None, action=\"store_true\", help=\"Copy dependencies into the destination project.\")\n    exgroup.add_argument(\"--no-copy-deps\", dest=\"copy_deps\", default=None, action=\"store_false\", help=\"Leave dependencies where they are.\")\n\n    parser.add_argument(\n        \"--skip-schemas\",\n        action=\"store_true\",\n        help=\"Skip loading of schemas\",\n        default=False,\n        dest=\"skip_schemas\",\n    )\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--trash-intermediate\", action=\"store_true\",\n                        default=False, dest=\"trash_intermediate\",\n                         help=\"Immediately trash intermediate outputs on workflow success.\")\n    exgroup.add_argument(\"--no-trash-intermediate\", action=\"store_false\",\n                        default=False, dest=\"trash_intermediate\",\n                        help=\"Do not trash intermediate outputs (default).\")\n\n    exgroup = parser.add_mutually_exclusive_group()\n    exgroup.add_argument(\"--enable-usage-report\", dest=\"enable_usage_report\", default=None, action=\"store_true\", help=\"Create usage_report.html with a summary of each step's resource usage.\")\n    exgroup.add_argument(\"--disable-usage-report\", dest=\"enable_usage_report\", default=None, action=\"store_false\", help=\"Disable usage report.\")\n\n    parser.add_argument(\"workflow\", default=None, help=\"The workflow to execute\")\n    parser.add_argument(\"job_order\", nargs=argparse.REMAINDER, help=\"The input object to the workflow.\")\n\n    return parser\n\ndef add_arv_hints():\n    cwltool.command_line_tool.ACCEPTLIST_EN_RELAXED_RE = re.compile(r\".*\")\n    cwltool.command_line_tool.ACCEPTLIST_RE = cwltool.command_line_tool.ACCEPTLIST_EN_RELAXED_RE\n    supported_versions = [\"v1.0\", \"v1.1\", \"v1.2\"]\n    for s in supported_versions:\n        customschema = importlib.resources.read_text(__name__, f'arv-cwl-schema-{s}.yml', encoding='utf-8')\n        use_custom_schema(s, \"http://arvados.org/cwl\", customschema)\n    cwltool.process.supportedProcessRequirements.extend([\n        \"http://arvados.org/cwl#RunInSingleContainer\",\n        \"http://arvados.org/cwl#OutputDirType\",\n        \"http://arvados.org/cwl#RuntimeConstraints\",\n        \"http://arvados.org/cwl#PartitionRequirement\",\n        \"http://arvados.org/cwl#APIRequirement\",\n        \"http://commonwl.org/cwltool#LoadListingRequirement\",\n        \"http://arvados.org/cwl#IntermediateOutput\",\n        \"http://arvados.org/cwl#ReuseRequirement\",\n        \"http://arvados.org/cwl#ClusterTarget\",\n        \"http://arvados.org/cwl#OutputStorageClass\",\n        \"http://arvados.org/cwl#ProcessProperties\",\n        \"http://commonwl.org/cwltool#CUDARequirement\",\n        \"http://arvados.org/cwl#UsePreemptible\",\n        \"http://arvados.org/cwl#OutputCollectionProperties\",\n        \"http://arvados.org/cwl#KeepCacheTypeRequirement\",\n        \"http://arvados.org/cwl#OutOfMemoryRetry\",\n        \"http://arvados.org/cwl#PreemptionBehavior\",\n        \"http://arvados.org/cwl#ROCmRequirement\",\n        \"http://arvados.org/cwl#PublishPorts\",\n    ])\n\ndef exit_signal_handler(sigcode, frame):\n    logger.error(str(u\"Caught signal {}, exiting.\").format(sigcode))\n    sys.exit(-sigcode)\n\ndef main(args=sys.argv[1:],\n         stdout=sys.stdout,\n         stderr=sys.stderr,\n         api_client=None,\n         keep_client=None,\n         install_sig_handlers=True):\n    parser = arg_parser()\n\n    job_order_object = None\n    arvargs = parser.parse_args(args)\n\n    arvargs.use_container = True\n    arvargs.relax_path_checks = True\n    arvargs.print_supported_versions = False\n\n    if install_sig_handlers:\n        arv_cmd.install_signal_handlers()\n\n    if arvargs.update_workflow:\n        if arvargs.update_workflow.find('-7fd4e-') == 5:\n            want_api = 'containers'\n        else:\n            want_api = None\n        if want_api and arvargs.work_api and want_api != arvargs.work_api:\n            logger.error(str(u'--update-workflow arg {!r} uses {!r} API, but --api={!r} specified').format(\n                arvargs.update_workflow, want_api, arvargs.work_api))\n            return 1\n        arvargs.work_api = want_api\n\n    workflow_op = arvargs.create_workflow or arvargs.update_workflow or arvargs.print_keep_deps\n\n    if workflow_op and not arvargs.job_order:\n        job_order_object = ({}, \"\")\n\n    add_arv_hints()\n\n    for key, val in cwltool.argparser.get_default_args().items():\n        if not hasattr(arvargs, key):\n            setattr(arvargs, key, val)\n\n    try:\n        if api_client is None:\n            api_client = arvados.safeapi.ThreadSafeApiCache(\n                api_params={\n                    'num_retries': arvargs.retries,\n                    'timeout': arvargs.http_timeout,\n                },\n                keep_params={\n                    'num_retries': arvargs.retries,\n                },\n                version='v1',\n            )\n            keep_client = api_client.keep\n            # Make an API object now so errors are reported early.\n            api_client.users().current().execute()\n        if keep_client is None:\n            block_cache = arvados.keep.KeepBlockCache(disk_cache=True)\n            keep_client = arvados.keep.KeepClient(\n                api_client=api_client,\n                block_cache=block_cache,\n                num_retries=arvargs.retries,\n            )\n        executor = ArvCwlExecutor(\n            api_client,\n            arvargs,\n            keep_client=keep_client,\n            num_retries=arvargs.retries,\n            stdout=stdout,\n        )\n    except WorkflowException as e:\n        logger.error(e, exc_info=(sys.exc_info()[1] if arvargs.debug else False))\n        return 1\n    except Exception:\n        logger.exception(\"Error creating the Arvados CWL Executor\")\n        return 1\n\n    # Note that unless in debug mode, some stack traces related to user\n    # workflow errors may be suppressed.\n\n    # Set the logging on most modules INFO (instead of default which is WARNING)\n    logger.setLevel(logging.INFO)\n    logging.getLogger('arvados').setLevel(logging.INFO)\n    logging.getLogger('arvados.keep').setLevel(logging.WARNING)\n    # API retries are filtered to the INFO level and can be noisy, but as long as\n    # they succeed we don't need to see warnings about it.\n    googleapiclient_http_logger = logging.getLogger('googleapiclient.http')\n    googleapiclient_http_logger.addFilter(arvados.logging.GoogleHTTPClientFilter())\n    googleapiclient_http_logger.setLevel(logging.WARNING)\n\n    if arvargs.debug:\n        logger.setLevel(logging.DEBUG)\n        logging.getLogger('arvados').setLevel(logging.DEBUG)\n        # In debug mode show logs about retries, but we arn't\n        # debugging the google client so we don't need to see\n        # everything.\n        googleapiclient_http_logger.setLevel(logging.NOTSET)\n        logging.getLogger('googleapiclient').setLevel(logging.INFO)\n\n    if arvargs.quiet:\n        logger.setLevel(logging.WARN)\n        logging.getLogger('arvados').setLevel(logging.WARN)\n        logging.getLogger('arvados.arv-run').setLevel(logging.WARN)\n\n    if arvargs.metrics:\n        metrics.setLevel(logging.DEBUG)\n        logging.getLogger(\"cwltool.metrics\").setLevel(logging.DEBUG)\n\n    if arvargs.log_timestamps:\n        arvados.log_handler.setFormatter(logging.Formatter(\n            '%(asctime)s %(name)s %(levelname)s: %(message)s',\n            '%Y-%m-%d %H:%M:%S'))\n    else:\n        arvados.log_handler.setFormatter(logging.Formatter('%(name)s %(levelname)s: %(message)s'))\n\n    if stdout is sys.stdout:\n        # cwltool.main has code to work around encoding issues with\n        # sys.stdout and unix pipes (they default to ASCII encoding,\n        # we want utf-8), so when stdout is sys.stdout set it to None\n        # to take advantage of that.  Don't override it for all cases\n        # since we still want to be able to capture stdout for the\n        # unit tests.\n        stdout = None\n\n    executor.loadingContext.default_docker_image = arvargs.submit_runner_image or \"arvados/jobs:\"+__version__\n\n    if arvargs.workflow.startswith(\"arvwf:\") or workflow_uuid_pattern.match(arvargs.workflow) or arvargs.workflow.startswith(\"keep:\"):\n        executor.loadingContext.do_validate = False\n        if arvargs.submit and not workflow_op:\n            executor.fast_submit = True\n\n    return cwltool.main.main(args=arvargs,\n                             stdout=stdout,\n                             stderr=stderr,\n                             executor=executor.arv_executor,\n                             versionfunc=versionstring,\n                             job_order_object=job_order_object,\n                             logger_handler=arvados.log_handler,\n                             custom_schema_callback=add_arv_hints,\n                             loadingContext=executor.loadingContext,\n                             runtimeContext=executor.toplevel_runtimeContext,\n                             input_required=not workflow_op)\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/arv-cwl-schema-v1.0.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n$base: \"http://arvados.org/cwl#\"\n$namespaces:\n  cwl: \"https://w3id.org/cwl/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\n$graph:\n- $import: https://w3id.org/cwl/CommonWorkflowLanguage.yml\n\n- name: cwltool:LoadListingRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  fields:\n    class:\n      type: string\n      doc: \"Always 'LoadListingRequirement'\"\n      jsonldPredicate:\n        \"_id\": \"@type\"\n        \"_type\": \"@vocab\"\n    loadListing:\n      type:\n        - \"null\"\n        - type: enum\n          name: LoadListingEnum\n          symbols: [no_listing, shallow_listing, deep_listing]\n\n- name: cwltool:Secrets\n  type: record\n  inVocab: false\n  extends: cwl:ProcessRequirement\n  fields:\n    class:\n      type: string\n      doc: \"Always 'Secrets'\"\n      jsonldPredicate:\n        \"_id\": \"@type\"\n        \"_type\": \"@vocab\"\n    secrets:\n      type: string[]\n      doc: |\n        List one or more input parameters that are sensitive (such as passwords)\n        which will be deliberately obscured from logging.\n      jsonldPredicate:\n        \"_type\": \"@id\"\n        refScope: 0\n\n- name: cwltool:TimeLimit\n  type: record\n  inVocab: false\n  extends: cwl:ProcessRequirement\n  doc: |\n    Set an upper limit on the execution time of a CommandLineTool or\n    ExpressionTool.  A tool execution which exceeds the time limit may\n    be preemptively terminated and considered failed.  May also be\n    used by batch systems to make scheduling decisions.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'TimeLimit'\"\n      jsonldPredicate:\n        \"_id\": \"@type\"\n        \"_type\": \"@vocab\"\n    - name: timelimit\n      type: [long, string]\n      doc: |\n        The time limit, in seconds.  A time limit of zero means no\n        time limit.  Negative time limits are an error.\n\n- name: RunInSingleContainer\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Indicates that a subworkflow should run in a single container\n    and not be scheduled as separate steps.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:RunInSingleContainer'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n\n- name: OutputDirType\n  type: enum\n  symbols:\n    - local_output_dir\n    - keep_output_dir\n  doc:\n    - |\n      local_output_dir: Use regular file system local to the compute node.\n      There must be sufficient local scratch space to store entire output;\n      specify this with `outdirMin` of `ResourceRequirement`.  Files are\n      batch uploaded to Keep when the process completes.  Most compatible, but\n      upload step can be time consuming for very large files.\n    - |\n      keep_output_dir: Use writable Keep mount.  Files are streamed to Keep as\n      they are written.  Does not consume local scratch space, but does consume\n      RAM for output buffers (up to 192 MiB per file simultaneously open for\n      writing.)  Best suited to processes which produce sequential output of\n      large files (non-sequential writes may produced fragmented file\n      manifests).  Supports regular files and directories, does not support\n      special files such as symlinks, hard links, named pipes, named sockets,\n      or device nodes.\n\n\n- name: RuntimeConstraints\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Set Arvados-specific runtime hints.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:RuntimeConstraints'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: keep_cache\n      type: int?\n      doc: |\n        Size of file data buffer for Keep mount in MiB. Default is 256\n        MiB. Increase this to reduce cache thrashing in situations such as\n        accessing multiple large (64+ MiB) files at the same time, or\n        performing random access on a large file.\n    - name: outputDirType\n      type: OutputDirType?\n      doc: |\n        Preferred backing store for output staging.  If not specified, the\n        system may choose which one to use.\n\n- name: PartitionRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Select preferred compute partitions on which to run jobs.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:PartitionRequirement'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: partition\n      type:\n        - string\n        - string[]\n\n- name: APIRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Indicates that process wants to access to the Arvados API.  Will be granted\n    limited network access and have ARVADOS_API_HOST and ARVADOS_API_TOKEN set\n    in the environment.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:APIRequirement'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n\n- name: IntermediateOutput\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify desired handling of intermediate output collections.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:IntermediateOutput'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    outputTTL:\n      type: int\n      doc: |\n        If the value is greater than zero, consider intermediate output\n        collections to be temporary and should be automatically\n        trashed. Temporary collections will be trashed `outputTTL` seconds\n        after creation.  A value of zero means intermediate output should be\n        retained indefinitely (this is the default behavior).\n\n        Note: arvados-cwl-runner currently does not take workflow dependencies\n        into account when setting the TTL on an intermediate output\n        collection. If the TTL is too short, it is possible for a collection to\n        be trashed before downstream steps that consume it are started.  The\n        recommended minimum value for TTL is the expected duration of the\n        entire the workflow.\n\n- name: ReuseRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Enable/disable work reuse for current process.  Default true (work reuse enabled).\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:ReuseRequirement'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: enableReuse\n      type: boolean\n\n- name: WorkflowRunnerResources\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify memory or cores resource request for the CWL runner process itself.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:WorkflowRunnerResources'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    ramMin:\n      type: int?\n      doc: Minimum RAM, in mebibytes (2**20)\n      jsonldPredicate: \"https://w3id.org/cwl/cwl#ResourceRequirement/ramMin\"\n    coresMin:\n      type: int?\n      doc: Minimum cores allocated to cwl-runner\n      jsonldPredicate: \"https://w3id.org/cwl/cwl#ResourceRequirement/coresMin\"\n    keep_cache:\n      type: int?\n      doc: |\n        Size of collection metadata cache for the workflow runner, in\n        MiB.  Default 256 MiB.  Will be added on to the RAM request\n        when determining node size to request.\n      jsonldPredicate: \"http://arvados.org/cwl#RuntimeConstraints/keep_cache\"\n    acrContainerImage:\n      type: string?\n      doc: |\n        The container image containing the correct version of\n        arvados-cwl-runner to use when invoking the workflow on\n        Arvados.\n\n- name: ClusterTarget\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify where a workflow step should run\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:ClusterTarget'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    cluster_id:\n      type: string?\n      doc: The cluster to run the container\n    project_uuid:\n      type: string?\n      doc: The project that will own the container requests and intermediate collections\n\n\n- name: OutputStorageClass\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify the storage class to be used for intermediate and final output\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:StorageClassHint\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    intermediateStorageClass:\n      type:\n        - \"null\"\n        - string\n        - type: array\n          items: string\n      doc: One or more storages classes\n    finalStorageClass:\n      type:\n        - \"null\"\n        - string\n        - type: array\n          items: string\n      doc: One or more storages classes\n\n- type: record\n  name: PropertyDef\n  doc: |\n    Define an arvados metadata property that will be set on a\n    container request or output collection.\n  fields:\n    - name: propertyName\n      type: string\n      doc: The property key\n    - name: propertyValue\n      type: [Any]\n      doc: The property value\n\n\n- name: ProcessProperties\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify metadata properties that will be set on the submitted\n    container request associated with this workflow or step.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:ProcessProperties\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    processProperties:\n      type: PropertyDef[]\n      jsonldPredicate:\n        mapSubject: propertyName\n        mapPredicate: propertyValue\n\n\n- name: cwltool:CUDARequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Require support for NVIDA CUDA (GPU hardware acceleration).\n  fields:\n    class:\n      type: string\n      doc: 'cwltool:CUDARequirement'\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    cudaVersionMin:\n      type: string\n      doc: |\n        Minimum CUDA version to run the software, in X.Y format.  This\n        corresponds to a CUDA SDK release.  When running directly on\n        the host (not in a container) the host must have a compatible\n        CUDA SDK (matching the exact version, or, starting with CUDA\n        11.3, matching major version).  When run in a container, the\n        container image should provide the CUDA runtime, and the host\n        driver is injected into the container.  In this case, because\n        CUDA drivers are backwards compatible, it is possible to\n        use an older SDK with a newer driver across major versions.\n\n        See https://docs.nvidia.com/deploy/cuda-compatibility/ for\n        details.\n    cudaComputeCapability:\n      type:\n        - 'string'\n        - 'string[]'\n      doc: |\n        CUDA hardware capability required to run the software, in X.Y\n        format.\n\n        * If this is a single value, it defines only the minimum\n          compute capability.  GPUs with higher capability are also\n          accepted.\n\n        * If it is an array value, then only select GPUs with compute\n          capabilities that explicitly appear in the array.\n    cudaDeviceCountMin:\n      type: ['null', int, cwl:Expression]\n      default: 1\n      doc: |\n        Minimum number of GPU devices to request.  If not specified,\n        same as `cudaDeviceCountMax`.  If neither are specified,\n        default 1.\n    cudaDeviceCountMax:\n      type: ['null', int, cwl:Expression]\n      doc: |\n        Maximum number of GPU devices to request.  If not specified,\n        same as `cudaDeviceCountMin`.\n    cudaVram:\n      type: ['null', long, cwl:Expression]\n      default: 1024\n      doc: |\n        Amount of VRAM to request, in mebibytes (2**20)\n\n\n- name: ROCmRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Require support for AMD ROCm (GPU hardware acceleration).\n  fields:\n    class:\n      type: string\n      doc: 'arv:ROCmRequirement'\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    rocmDriverVersion:\n      type: string\n      doc: |\n        Compatible ROCm driver version, in X.Y format, e.g. \"6.2\".\n    rocmTarget:\n      type:\n        - 'string'\n        - 'string[]'\n      doc: |\n        Compatible GPU architecture/ROCm LLVM targets, e.g. \"gfx1100\".\n    rocmDeviceCountMin:\n      type: ['null', int, cwl:Expression]\n      default: 1\n      doc: |\n        Minimum number of GPU devices to request.  If not specified,\n        same as `rocmDeviceCountMax`.  If neither are specified,\n        default 1.\n    rocmDeviceCountMax:\n      type: ['null', int, cwl:Expression]\n      doc: |\n        Maximum number of GPU devices to request.  If not specified,\n        same as `rocmDeviceCountMin`.\n    rocmVram:\n      type: [long, cwl:Expression]\n      default: 1024\n      doc: |\n        Amount of VRAM to request, in mebibytes (2**20).\n\n- name: UsePreemptible\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify a workflow step should opt-in or opt-out of using preemptible (spot) instances.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:UsePreemptible\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    usePreemptible: boolean\n\n- name: OutputCollectionProperties\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify metadata properties that will be set on the output\n    collection associated with this workflow or step.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:OutputCollectionProperties\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    outputProperties:\n      type: PropertyDef[]\n      jsonldPredicate:\n        mapSubject: propertyName\n        mapPredicate: propertyValue\n\n\n- name: KeepCacheType\n  type: enum\n  symbols:\n    - ram_cache\n    - disk_cache\n  doc:\n    - |\n        ram_cache: Keep blocks will be cached in RAM only.\n    - |\n        disk_cache: Keep blocks will be cached to disk and\n        memory-mapped.  The disk cache leverages the kernel's virtual\n        memory system so \"hot\" data will generally still be kept in\n        RAM.\n\n- name: KeepCacheTypeRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Choose keep cache strategy.\n  fields:\n    - name: class\n      type: string\n      doc: \"'arv:KeepCacheTypeRequirement'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: keepCacheType\n      type: KeepCacheType?\n      doc: |\n        Whether Keep blocks loaded by arv-mount should be kept in RAM\n        only or written to disk and memory-mapped.  The disk cache\n        leverages the kernel's virtual memory system so \"hot\" data will\n        generally still be kept in RAM.\n\n- name: OutOfMemoryRetry\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Detect when a failed tool run may have run out of memory, and\n    re-submit the container with more RAM.\n  fields:\n    - name: class\n      type: string\n      doc: \"'arv:OutOfMemoryRetry\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: memoryErrorRegex\n      type: string?\n      doc: |\n        A regular expression that will be used on the text of stdout\n        and stderr produced by the tool to determine if a failed job\n        should be retried with more RAM.  By default, searches for the\n        substrings 'bad_alloc' and 'OutOfMemory'.\n    - name: memoryRetryMultiplier\n      type: float?\n      doc: |\n        If the container failed on its first run, re-submit the\n        container with the RAM request multiplied by this factor.\n    - name: memoryRetryMultipler\n      type: float?\n      doc: |\n        Deprecated misspelling of \"memoryRetryMultiplier\".  Kept only\n        for backwards compatability, don't use this.\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/arv-cwl-schema-v1.1.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n$base: \"http://arvados.org/cwl#\"\n$namespaces:\n  cwl: \"https://w3id.org/cwl/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\n$graph:\n- $import: https://w3id.org/cwl/CommonWorkflowLanguage.yml\n\n- name: cwltool:Secrets\n  type: record\n  inVocab: false\n  extends: cwl:ProcessRequirement\n  fields:\n    class:\n      type: string\n      doc: \"Always 'Secrets'\"\n      jsonldPredicate:\n        \"_id\": \"@type\"\n        \"_type\": \"@vocab\"\n    secrets:\n      type: string[]\n      doc: |\n        List one or more input parameters that are sensitive (such as passwords)\n        which will be deliberately obscured from logging.\n      jsonldPredicate:\n        \"_type\": \"@id\"\n        refScope: 0\n\n- name: RunInSingleContainer\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Indicates that a subworkflow should run in a single container\n    and not be scheduled as separate steps.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:RunInSingleContainer'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n\n- name: OutputDirType\n  type: enum\n  symbols:\n    - local_output_dir\n    - keep_output_dir\n  doc:\n    - |\n      local_output_dir: Use regular file system local to the compute node.\n      There must be sufficient local scratch space to store entire output;\n      specify this with `outdirMin` of `ResourceRequirement`.  Files are\n      batch uploaded to Keep when the process completes.  Most compatible, but\n      upload step can be time consuming for very large files.\n    - |\n      keep_output_dir: Use writable Keep mount.  Files are streamed to Keep as\n      they are written.  Does not consume local scratch space, but does consume\n      RAM for output buffers (up to 192 MiB per file simultaneously open for\n      writing.)  Best suited to processes which produce sequential output of\n      large files (non-sequential writes may produced fragmented file\n      manifests).  Supports regular files and directories, does not support\n      special files such as symlinks, hard links, named pipes, named sockets,\n      or device nodes.\n\n\n- name: RuntimeConstraints\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Set Arvados-specific runtime hints.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:RuntimeConstraints'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: keep_cache\n      type: int?\n      doc: |\n        Size of file data buffer for Keep mount in MiB. Default is 256\n        MiB. Increase this to reduce cache thrashing in situations such as\n        accessing multiple large (64+ MiB) files at the same time, or\n        performing random access on a large file.\n    - name: outputDirType\n      type: OutputDirType?\n      doc: |\n        Preferred backing store for output staging.  If not specified, the\n        system may choose which one to use.\n\n- name: PartitionRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Select preferred compute partitions on which to run jobs.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:PartitionRequirement'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: partition\n      type:\n        - string\n        - string[]\n\n- name: APIRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Indicates that process wants to access to the Arvados API.  Will be granted\n    limited network access and have ARVADOS_API_HOST and ARVADOS_API_TOKEN set\n    in the environment.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:APIRequirement'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n\n- name: IntermediateOutput\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify desired handling of intermediate output collections.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:IntermediateOutput'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    outputTTL:\n      type: int\n      doc: |\n        If the value is greater than zero, consider intermediate output\n        collections to be temporary and should be automatically\n        trashed. Temporary collections will be trashed `outputTTL` seconds\n        after creation.  A value of zero means intermediate output should be\n        retained indefinitely (this is the default behavior).\n\n        Note: arvados-cwl-runner currently does not take workflow dependencies\n        into account when setting the TTL on an intermediate output\n        collection. If the TTL is too short, it is possible for a collection to\n        be trashed before downstream steps that consume it are started.  The\n        recommended minimum value for TTL is the expected duration of the\n        entire the workflow.\n\n- name: WorkflowRunnerResources\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify memory or cores resource request for the CWL runner process itself.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:WorkflowRunnerResources'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    ramMin:\n      type: int?\n      doc: Minimum RAM, in mebibytes (2**20)\n      jsonldPredicate: \"https://w3id.org/cwl/cwl#ResourceRequirement/ramMin\"\n    coresMin:\n      type: int?\n      doc: Minimum cores allocated to cwl-runner\n      jsonldPredicate: \"https://w3id.org/cwl/cwl#ResourceRequirement/coresMin\"\n    keep_cache:\n      type: int?\n      doc: |\n        Size of collection metadata cache for the workflow runner, in\n        MiB.  Default 256 MiB.  Will be added on to the RAM request\n        when determining node size to request.\n      jsonldPredicate: \"http://arvados.org/cwl#RuntimeConstraints/keep_cache\"\n    acrContainerImage:\n      type: string?\n      doc: |\n        The container image containing the correct version of\n        arvados-cwl-runner to use when invoking the workflow on\n        Arvados.\n\n- name: ClusterTarget\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify where a workflow step should run\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:ClusterTarget'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    cluster_id:\n      type: string?\n      doc: The cluster to run the container\n    project_uuid:\n      type: string?\n      doc: The project that will own the container requests and intermediate collections\n\n- name: OutputStorageClass\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify the storage class to be used for intermediate and final output\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:StorageClassHint\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    intermediateStorageClass:\n      type:\n        - \"null\"\n        - string\n        - type: array\n          items: string\n      doc: One or more storages classes\n    finalStorageClass:\n      type:\n        - \"null\"\n        - string\n        - type: array\n          items: string\n      doc: One or more storages classes\n\n- type: record\n  name: PropertyDef\n  doc: |\n    Define a property that will be set on the submitted container\n    request associated with this workflow or step.\n  fields:\n    - name: propertyName\n      type: string\n      doc: The property key\n    - name: propertyValue\n      type: [Any]\n      doc: The property value\n\n\n- name: ProcessProperties\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify metadata properties that will be set on the submitted\n    container request associated with this workflow or step.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:ProcessProperties\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    processProperties:\n      type: PropertyDef[]\n      jsonldPredicate:\n        mapSubject: propertyName\n        mapPredicate: propertyValue\n\n\n- name: cwltool:CUDARequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Require support for NVIDA CUDA (GPU hardware acceleration).\n  fields:\n    class:\n      type: string\n      doc: 'cwltool:CUDARequirement'\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    cudaVersionMin:\n      type: string\n      doc: |\n        Minimum CUDA version to run the software, in X.Y format.  This\n        corresponds to a CUDA SDK release.  When running directly on\n        the host (not in a container) the host must have a compatible\n        CUDA SDK (matching the exact version, or, starting with CUDA\n        11.3, matching major version).  When run in a container, the\n        container image should provide the CUDA runtime, and the host\n        driver is injected into the container.  In this case, because\n        CUDA drivers are backwards compatible, it is possible to\n        use an older SDK with a newer driver across major versions.\n\n        See https://docs.nvidia.com/deploy/cuda-compatibility/ for\n        details.\n    cudaComputeCapability:\n      type:\n        - 'string'\n        - 'string[]'\n      doc: |\n        CUDA hardware capability required to run the software, in X.Y\n        format.\n\n        * If this is a single value, it defines only the minimum\n          compute capability.  GPUs with higher capability are also\n          accepted.\n\n        * If it is an array value, then only select GPUs with compute\n          capabilities that explicitly appear in the array.\n    cudaDeviceCountMin:\n      type: ['null', int, cwl:Expression]\n      default: 1\n      doc: |\n        Minimum number of GPU devices to request.  If not specified,\n        same as `cudaDeviceCountMax`.  If neither are specified,\n        default 1.\n    cudaDeviceCountMax:\n      type: ['null', int, cwl:Expression]\n      doc: |\n        Maximum number of GPU devices to request.  If not specified,\n        same as `cudaDeviceCountMin`.\n    cudaVram:\n      type: ['null', long, cwl:Expression]\n      default: 1024\n      doc: |\n        Amount of VRAM to request, in mebibytes (2**20)\n\n\n- name: ROCmRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Require support for AMD ROCm (GPU hardware acceleration).\n  fields:\n    class:\n      type: string\n      doc: 'arv:ROCmRequirement'\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    rocmDriverVersion:\n      type: string\n      doc: |\n        Compatible ROCm driver version, in X.Y format, e.g. \"6.2\".\n    rocmTarget:\n      type:\n        - 'string'\n        - 'string[]'\n      doc: |\n        Compatible GPU architecture/ROCm LLVM targets, e.g. \"gfx1100\".\n    rocmDeviceCountMin:\n      type: ['null', int, cwl:Expression]\n      default: 1\n      doc: |\n        Minimum number of GPU devices to request.  If not specified,\n        same as `rocmDeviceCountMax`.  If neither are specified,\n        default 1.\n    rocmDeviceCountMax:\n      type: ['null', int, cwl:Expression]\n      doc: |\n        Maximum number of GPU devices to request.  If not specified,\n        same as `rocmDeviceCountMin`.\n    rocmVram:\n      type: [long, cwl:Expression]\n      default: 1024\n      doc: |\n        Amount of VRAM to request, in mebibytes (2**20).\n\n- name: UsePreemptible\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify a workflow step should opt-in or opt-out of using preemptible (spot) instances.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:UsePreemptible\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    usePreemptible: boolean\n\n- name: OutputCollectionProperties\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify metadata properties that will be set on the output\n    collection associated with this workflow or step.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:OutputCollectionProperties\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    outputProperties:\n      type: PropertyDef[]\n      jsonldPredicate:\n        mapSubject: propertyName\n        mapPredicate: propertyValue\n\n\n- name: KeepCacheType\n  type: enum\n  symbols:\n    - ram_cache\n    - disk_cache\n  doc:\n    - |\n        ram_cache: Keep blocks will be cached in RAM only.\n    - |\n        disk_cache: Keep blocks will be cached to disk and\n        memory-mapped.  The disk cache leverages the kernel's virtual\n        memory system so \"hot\" data will generally still be kept in\n        RAM.\n\n- name: KeepCacheTypeRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Choose keep cache strategy.\n  fields:\n    - name: class\n      type: string\n      doc: \"'arv:KeepCacheTypeRequirement'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: keepCacheType\n      type: KeepCacheType?\n      doc: |\n        Whether Keep blocks loaded by arv-mount should be kept in RAM\n        only or written to disk and memory-mapped.  The disk cache\n        leverages the kernel's virtual memory system so \"hot\" data will\n        generally still be kept in RAM.\n\n- name: OutOfMemoryRetry\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Detect when a failed tool run may have run out of memory, and\n    re-submit the container with more RAM.\n  fields:\n    - name: class\n      type: string\n      doc: \"'arv:OutOfMemoryRetry\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: memoryErrorRegex\n      type: string?\n      doc: |\n        A regular expression that will be used on the text of stdout\n        and stderr produced by the tool to determine if a failed job\n        should be retried with more RAM.  By default, searches for the\n        substrings 'bad_alloc' and 'OutOfMemory'.\n    - name: memoryRetryMultiplier\n      type: float?\n      doc: |\n        If the container failed on its first run, re-submit the\n        container with the RAM request multiplied by this factor.\n    - name: memoryRetryMultipler\n      type: float?\n      doc: |\n        Deprecated misspelling of \"memoryRetryMultiplier\".  Kept only\n        for backwards compatability, don't use this.\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/arv-cwl-schema-v1.2.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n$base: \"http://arvados.org/cwl#\"\n$namespaces:\n  cwl: \"https://w3id.org/cwl/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\n$graph:\n- $import: https://w3id.org/cwl/CommonWorkflowLanguage.yml\n\n- name: cwltool:Secrets\n  type: record\n  inVocab: false\n  extends: cwl:ProcessRequirement\n  fields:\n    class:\n      type: string\n      doc: \"Always 'Secrets'\"\n      jsonldPredicate:\n        \"_id\": \"@type\"\n        \"_type\": \"@vocab\"\n    secrets:\n      type: string[]\n      doc: |\n        List one or more input parameters that are sensitive (such as passwords)\n        which will be deliberately obscured from logging.\n      jsonldPredicate:\n        \"_type\": \"@id\"\n        refScope: 0\n\n- name: RunInSingleContainer\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Indicates that a subworkflow should run in a single container\n    and not be scheduled as separate steps.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:RunInSingleContainer'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n\n- name: OutputDirType\n  type: enum\n  symbols:\n    - local_output_dir\n    - keep_output_dir\n  doc:\n    - |\n      local_output_dir: Use regular file system local to the compute node.\n      There must be sufficient local scratch space to store entire output;\n      specify this with `outdirMin` of `ResourceRequirement`.  Files are\n      batch uploaded to Keep when the process completes.  Most compatible, but\n      upload step can be time consuming for very large files.\n    - |\n      keep_output_dir: Use writable Keep mount.  Files are streamed to Keep as\n      they are written.  Does not consume local scratch space, but does consume\n      RAM for output buffers (up to 192 MiB per file simultaneously open for\n      writing.)  Best suited to processes which produce sequential output of\n      large files (non-sequential writes may produced fragmented file\n      manifests).  Supports regular files and directories, does not support\n      special files such as symlinks, hard links, named pipes, named sockets,\n      or device nodes.\n\n\n- name: RuntimeConstraints\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Set Arvados-specific runtime hints.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:RuntimeConstraints'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: keep_cache\n      type: int?\n      doc: |\n        Size of file data buffer for Keep mount in MiB. Default is 256\n        MiB. Increase this to reduce cache thrashing in situations such as\n        accessing multiple large (64+ MiB) files at the same time, or\n        performing random access on a large file.\n    - name: outputDirType\n      type: OutputDirType?\n      doc: |\n        Preferred backing store for output staging.  If not specified, the\n        system may choose which one to use.\n\n- name: PartitionRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Select preferred compute partitions on which to run jobs.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:PartitionRequirement'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: partition\n      type:\n        - string\n        - string[]\n\n- name: APIRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Indicates that process wants to access to the Arvados API.  Will be granted\n    limited network access and have ARVADOS_API_HOST and ARVADOS_API_TOKEN set\n    in the environment.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:APIRequirement'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n\n- name: IntermediateOutput\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify desired handling of intermediate output collections.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:IntermediateOutput'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    outputTTL:\n      type: int\n      doc: |\n        If the value is greater than zero, consider intermediate output\n        collections to be temporary and should be automatically\n        trashed. Temporary collections will be trashed `outputTTL` seconds\n        after creation.  A value of zero means intermediate output should be\n        retained indefinitely (this is the default behavior).\n\n        Note: arvados-cwl-runner currently does not take workflow dependencies\n        into account when setting the TTL on an intermediate output\n        collection. If the TTL is too short, it is possible for a collection to\n        be trashed before downstream steps that consume it are started.  The\n        recommended minimum value for TTL is the expected duration of the\n        entire the workflow.\n\n- name: WorkflowRunnerResources\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify memory or cores resource request for the CWL runner process itself.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:WorkflowRunnerResources'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    ramMin:\n      type: int?\n      doc: Minimum RAM, in mebibytes (2**20)\n      jsonldPredicate: \"https://w3id.org/cwl/cwl#ResourceRequirement/ramMin\"\n    coresMin:\n      type: int?\n      doc: Minimum cores allocated to cwl-runner\n      jsonldPredicate: \"https://w3id.org/cwl/cwl#ResourceRequirement/coresMin\"\n    keep_cache:\n      type: int?\n      doc: |\n        Size of collection metadata cache for the workflow runner, in\n        MiB.  Default 256 MiB.  Will be added on to the RAM request\n        when determining node size to request.\n      jsonldPredicate: \"http://arvados.org/cwl#RuntimeConstraints/keep_cache\"\n    acrContainerImage:\n      type: string?\n      doc: |\n        The container image containing the correct version of\n        arvados-cwl-runner to use when invoking the workflow on\n        Arvados.\n\n- name: ClusterTarget\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify where a workflow step should run\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:ClusterTarget'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    cluster_id:\n      type: string?\n      doc: The cluster to run the container\n    project_uuid:\n      type: string?\n      doc: The project that will own the container requests and intermediate collections\n\n\n- name: OutputStorageClass\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify the storage class to be used for intermediate and final output\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:StorageClassHint\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    intermediateStorageClass:\n      type:\n        - \"null\"\n        - string\n        - type: array\n          items: string\n      doc: One or more storages classes\n    finalStorageClass:\n      type:\n        - \"null\"\n        - string\n        - type: array\n          items: string\n      doc: One or more storages classes\n\n\n- type: record\n  name: PropertyDef\n  doc: |\n    Define a property that will be set on the submitted container\n    request associated with this workflow or step.\n  fields:\n    - name: propertyName\n      type: string\n      doc: The property key\n    - name: propertyValue\n      type: [Any]\n      doc: The property value\n\n\n- name: ProcessProperties\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify metadata properties that will be set on the submitted\n    container request associated with this workflow or step.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:ProcessProperties\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    processProperties:\n      type: PropertyDef[]\n      jsonldPredicate:\n        mapSubject: propertyName\n        mapPredicate: propertyValue\n\n\n- name: cwltool:CUDARequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Require support for NVIDA CUDA (GPU hardware acceleration).\n  fields:\n    class:\n      type: string\n      doc: 'cwltool:CUDARequirement'\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    cudaVersionMin:\n      type: string\n      doc: |\n        Minimum CUDA version to run the software, in X.Y format.  This\n        corresponds to a CUDA SDK release.  When running directly on\n        the host (not in a container) the host must have a compatible\n        CUDA SDK (matching the exact version, or, starting with CUDA\n        11.3, matching major version).  When run in a container, the\n        container image should provide the CUDA runtime, and the host\n        driver is injected into the container.  In this case, because\n        CUDA drivers are backwards compatible, it is possible to\n        use an older SDK with a newer driver across major versions.\n\n        See https://docs.nvidia.com/deploy/cuda-compatibility/ for\n        details.\n    cudaComputeCapability:\n      type:\n        - 'string'\n        - 'string[]'\n      doc: |\n        CUDA hardware capability required to run the software, in X.Y\n        format.\n\n        * If this is a single value, it defines only the minimum\n          compute capability.  GPUs with higher capability are also\n          accepted.\n\n        * If it is an array value, then only select GPUs with compute\n          capabilities that explicitly appear in the array.\n    cudaDeviceCountMin:\n      type: ['null', int, cwl:Expression]\n      default: 1\n      doc: |\n        Minimum number of GPU devices to request.  If not specified,\n        same as `cudaDeviceCountMax`.  If neither are specified,\n        default 1.\n    cudaDeviceCountMax:\n      type: ['null', int, cwl:Expression]\n      doc: |\n        Maximum number of GPU devices to request.  If not specified,\n        same as `cudaDeviceCountMin`.\n    cudaVram:\n      type: ['null', long, cwl:Expression]\n      default: 1024\n      doc: |\n        Amount of VRAM to request, in mebibytes (2**20)\n\n\n- name: ROCmRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Require support for AMD ROCm (GPU hardware acceleration).\n  fields:\n    class:\n      type: string\n      doc: 'arv:ROCmRequirement'\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    rocmDriverVersion:\n      type: string\n      doc: |\n        Compatible ROCm driver version, in X.Y format, e.g. \"6.2\".\n    rocmTarget:\n      type:\n        - 'string'\n        - 'string[]'\n      doc: |\n        Compatible GPU architecture/ROCm LLVM targets, e.g. \"gfx1100\".\n    rocmDeviceCountMin:\n      type: ['null', int, cwl:Expression]\n      default: 1\n      doc: |\n        Minimum number of GPU devices to request.  If not specified,\n        same as `rocmDeviceCountMax`.  If neither are specified,\n        default 1.\n    rocmDeviceCountMax:\n      type: ['null', int, cwl:Expression]\n      doc: |\n        Maximum number of GPU devices to request.  If not specified,\n        same as `rocmDeviceCountMin`.\n    rocmVram:\n      type: [long, cwl:Expression]\n      default: 1024\n      doc: |\n        Amount of VRAM to request, in mebibytes (2**20).\n\n- name: UsePreemptible\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify a workflow step should opt-in or opt-out of using preemptible (spot) instances.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:UsePreemptible\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    usePreemptible: boolean\n\n- name: OutputCollectionProperties\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Specify metadata properties that will be set on the output\n    collection associated with this workflow or step.\n  fields:\n    class:\n      type: string\n      doc: \"Always 'arv:OutputCollectionProperties\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    outputProperties:\n      type: PropertyDef[]\n      jsonldPredicate:\n        mapSubject: propertyName\n        mapPredicate: propertyValue\n\n\n- name: KeepCacheType\n  type: enum\n  symbols:\n    - ram_cache\n    - disk_cache\n  doc:\n    - |\n        ram_cache: Keep blocks will be cached in RAM only.\n    - |\n        disk_cache: Keep blocks will be cached to disk and\n        memory-mapped.  The disk cache leverages the kernel's virtual\n        memory system so \"hot\" data will generally still be kept in\n        RAM.\n\n- name: KeepCacheTypeRequirement\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Choose keep cache strategy.\n  fields:\n    - name: class\n      type: string\n      doc: \"'arv:KeepCacheTypeRequirement'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: keepCacheType\n      type: KeepCacheType?\n      doc: |\n        Whether Keep blocks loaded by arv-mount should be kept in RAM\n        only or written to disk and memory-mapped.  The disk cache\n        leverages the kernel's virtual memory system so \"hot\" data will\n        generally still be kept in RAM.\n\n\n- name: OutOfMemoryRetry\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Detect when a failed tool run may have run out of memory, and\n    re-submit the container with more RAM.\n  fields:\n    - name: class\n      type: string\n      doc: \"'arv:OutOfMemoryRetry\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: memoryErrorRegex\n      type: string?\n      doc: |\n        A regular expression that will be used on the text of stdout\n        and stderr produced by the tool to determine if a failed job\n        should be retried with more RAM.  By default, searches for the\n        substrings 'bad_alloc' and 'OutOfMemory'.\n    - name: memoryRetryMultiplier\n      type: float?\n      doc: |\n        If the container failed on its first run, re-submit the\n        container with the RAM request multiplied by this factor.\n    - name: memoryRetryMultipler\n      type: float?\n      doc: |\n        Deprecated misspelling of \"memoryRetryMultiplier\".  Kept only\n        for backwards compatability, don't use this.\n\n\n- name: SeparateRunner\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Indicates that a subworkflow should run in a separate\n    arvados-cwl-runner process.\n  fields:\n    - name: class\n      type: string\n      doc: \"Always 'arv:SeparateRunner'\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n    - name: runnerProcessName\n      type: ['null', string, cwl:Expression]\n      doc: |\n        Custom name to use for the runner process\n\n\n- name: PreemptionBehavior\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    If `resubmitNonPreemptible` is true and a workflow step fails due\n    to the instance it is running on being preempted, re-submit the\n    container with the `preemptible` flag set to false.\n  fields:\n    - name: class\n      type: string\n      doc: \"arv:PreemptionBehavior\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n\n    - name: resubmitNonPreemptible\n      type: boolean\n\n\n- type: record\n  name: ServiceDef\n  doc: |\n    A web service available on a container.\n  fields:\n    - name: servicePort\n      type: string\n      doc: The property key\n    - name: serviceAccess\n      type:\n        type: enum\n        symbols:\n          - public\n          - private\n    - name: label\n      jsonldPredicate: \"rdfs:label\"\n      type: string\n    - name: initialPath\n      type: string\n      doc: The path to use for initial connection to the service on this port\n      default: \"\"\n\n- name: PublishPorts\n  type: record\n  extends: cwl:ProcessRequirement\n  inVocab: false\n  doc: |\n    Advertise ports on the container which are web services.\n  fields:\n    - name: class\n      type: string\n      doc: \"arv:PublishPorts\"\n      jsonldPredicate:\n        _id: \"@type\"\n        _type: \"@vocab\"\n\n    - name: publishPorts\n      type: ServiceDef[]\n      jsonldPredicate:\n        mapSubject: servicePort\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/arvcontainer.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport logging\nimport json\nimport os\nimport urllib.request, urllib.parse, urllib.error\nimport time\nimport datetime\nimport ciso8601\nimport uuid\nimport math\nimport re\n\nimport arvados_cwl.util\nimport ruamel.yaml\n\nfrom cwltool.errors import WorkflowException\nfrom cwltool.process import UnsupportedRequirement, shortname\nfrom cwltool.utils import aslist, adjustFileObjs, adjustDirObjs, visit_class\nfrom cwltool.job import JobBase\nfrom cwltool.builder import substitute\n\nimport arvados.collection\nimport arvados.util\n\nimport crunchstat_summary.summarizer\nimport crunchstat_summary.reader\n\nfrom .arvdocker import arv_docker_get_image\nfrom . import done\nfrom .runner import Runner, arvados_jobs_image, packed_workflow, trim_anonymous_location, remove_redundant_fields, make_builder\nfrom .fsaccess import CollectionFetcher\nfrom .pathmapper import NoFollowPathMapper, trim_listing\nfrom .perf import Perf\nfrom ._version import __version__\n\nlogger = logging.getLogger('arvados.cwl-runner')\nmetrics = logging.getLogger('arvados.cwl-runner.metrics')\n\ndef cleanup_name_for_collection(name):\n    return name.replace(\"/\", \" \")\n\nclass OutputGlobError(RuntimeError):\n    pass\n\nclass ArvadosContainer(JobBase):\n    \"\"\"Submit and manage a Crunch container request for executing a CWL CommandLineTool.\"\"\"\n\n    def __init__(self, runner, job_runtime, globpatterns,\n                 builder,   # type: Builder\n                 joborder,  # type: Dict[Text, Union[Dict[Text, Any], List, Text]]\n                 make_path_mapper,  # type: Callable[..., PathMapper]\n                 requirements,      # type: List[Dict[Text, Text]]\n                 hints,     # type: List[Dict[Text, Text]]\n                 name       # type: Text\n    ):\n        super(ArvadosContainer, self).__init__(builder, joborder, make_path_mapper, requirements, hints, name)\n        self.arvrunner = runner\n        self.job_runtime = job_runtime\n        self.running = False\n        self.uuid = None\n        self.attempt_count = 0\n        self.globpatterns = globpatterns\n\n    def update_pipeline_component(self, r):\n        pass\n\n    def _required_env(self):\n        env = {}\n        env[\"HOME\"] = self.outdir\n        env[\"TMPDIR\"] = self.tmpdir\n        return env\n\n    def run(self, toplevelRuntimeContext):\n        # ArvadosCommandTool subclasses from cwltool.CommandLineTool,\n        # which calls makeJobRunner() to get a new ArvadosContainer\n        # object.  The fields that define execution such as\n        # command_line, environment, etc are set on the\n        # ArvadosContainer object by CommandLineTool.job() before\n        # run() is called.\n\n        runtimeContext = self.job_runtime\n\n        if runtimeContext.submit_request_uuid:\n            container_request = self.arvrunner.api.container_requests().get(\n                uuid=runtimeContext.submit_request_uuid\n            ).execute(num_retries=self.arvrunner.num_retries)\n        else:\n            container_request = {}\n\n        container_request[\"command\"] = self.command_line\n        container_request[\"name\"] = self.name\n        container_request[\"output_path\"] = self.outdir\n        container_request[\"cwd\"] = self.outdir\n        container_request[\"priority\"] = runtimeContext.priority\n        container_request[\"state\"] = \"Uncommitted\"\n        container_request.setdefault(\"properties\", {})\n\n        container_request[\"properties\"][\"cwl_input\"] = self.joborder\n\n        runtime_constraints = {}\n\n        if runtimeContext.project_uuid:\n            container_request[\"owner_uuid\"] = runtimeContext.project_uuid\n\n        if self.arvrunner.secret_store.has_secret(self.command_line):\n            raise WorkflowException(\"Secret material leaked on command line, only file literals may contain secrets\")\n\n        if self.arvrunner.secret_store.has_secret(self.environment):\n            raise WorkflowException(\"Secret material leaked in environment, only file literals may contain secrets\")\n\n        resources = self.builder.resources\n        if resources is not None:\n            runtime_constraints[\"vcpus\"] = math.ceil(resources.get(\"cores\", 1))\n            runtime_constraints[\"ram\"] = math.ceil(resources.get(\"ram\") * 2**20)\n\n        mounts = {\n            self.outdir: {\n                \"kind\": \"tmp\",\n                \"capacity\": math.ceil(resources.get(\"outdirSize\", 0) * 2**20)\n            },\n            self.tmpdir: {\n                \"kind\": \"tmp\",\n                \"capacity\": math.ceil(resources.get(\"tmpdirSize\", 0) * 2**20)\n            }\n        }\n        secret_mounts = {}\n        scheduling_parameters = {}\n\n        rf = [self.pathmapper.mapper(f) for f in self.pathmapper.referenced_files]\n        rf.sort(key=lambda k: k.resolved)\n        prevdir = None\n        for resolved, target, tp, stg in rf:\n            if not stg:\n                continue\n            if prevdir and target.startswith(prevdir):\n                continue\n            if tp == \"Directory\":\n                targetdir = target\n            else:\n                targetdir = os.path.dirname(target)\n            sp = resolved.split(\"/\", 1)\n            pdh = sp[0][5:]   # remove \"keep:\"\n            mounts[targetdir] = {\n                \"kind\": \"collection\",\n                \"portable_data_hash\": pdh\n            }\n            if pdh in self.pathmapper.pdh_to_uuid:\n                mounts[targetdir][\"uuid\"] = self.pathmapper.pdh_to_uuid[pdh]\n            if len(sp) == 2:\n                if tp == \"Directory\":\n                    path = sp[1]\n                else:\n                    path = os.path.dirname(sp[1])\n                if path and path != \"/\":\n                    mounts[targetdir][\"path\"] = path\n            prevdir = targetdir + \"/\"\n\n        intermediate_collection_info = arvados_cwl.util.get_intermediate_collection_info(self.name, runtimeContext.current_container, runtimeContext.intermediate_output_ttl)\n\n        with Perf(metrics, \"generatefiles %s\" % self.name):\n            if self.generatefiles[\"listing\"]:\n                vwd = arvados.collection.Collection(api_client=self.arvrunner.api,\n                                                    keep_client=self.arvrunner.keep_client,\n                                                    num_retries=self.arvrunner.num_retries)\n                generatemapper = NoFollowPathMapper(self.generatefiles[\"listing\"], \"\", \"\",\n                                                    separateDirs=False)\n\n                sorteditems = sorted(generatemapper.items(), key=lambda n: n[1].target)\n\n                logger.debug(\"generatemapper is %s\", sorteditems)\n\n                with Perf(metrics, \"createfiles %s\" % self.name):\n                    for f, p in sorteditems:\n                        if not p.target:\n                            continue\n\n                        if p.target.startswith(\"/\"):\n                            dst = p.target[len(self.outdir)+1:] if p.target.startswith(self.outdir+\"/\") else p.target[1:]\n                        else:\n                            dst = p.target\n\n                        if p.type in (\"File\", \"Directory\", \"WritableFile\", \"WritableDirectory\"):\n                            if p.resolved.startswith(\"_:\"):\n                                vwd.mkdirs(dst)\n                            else:\n                                source, path = self.arvrunner.fs_access.get_collection(p.resolved)\n                                vwd.copy(path or \".\", dst, source_collection=source)\n                        elif p.type == \"CreateFile\":\n                            if self.arvrunner.secret_store.has_secret(p.resolved):\n                                mountpoint = p.target if p.target.startswith(\"/\") else os.path.join(self.outdir, p.target)\n                                secret_mounts[mountpoint] = {\n                                    \"kind\": \"text\",\n                                    \"content\": self.arvrunner.secret_store.retrieve(p.resolved)\n                                }\n                            else:\n                                with vwd.open(dst, \"w\") as n:\n                                    n.write(p.resolved)\n\n                def keepemptydirs(p):\n                    if isinstance(p, arvados.collection.RichCollectionBase):\n                        if len(p) == 0:\n                            p.open(\".keep\", \"w\").close()\n                        else:\n                            for c in p:\n                                keepemptydirs(p[c])\n\n                keepemptydirs(vwd)\n\n                if not runtimeContext.current_container:\n                    runtimeContext.current_container = arvados_cwl.util.get_current_container(self.arvrunner.api, self.arvrunner.num_retries, logger)\n                vwd.save_new(name=intermediate_collection_info[\"name\"],\n                             owner_uuid=runtimeContext.project_uuid,\n                             ensure_unique_name=True,\n                             trash_at=intermediate_collection_info[\"trash_at\"],\n                             properties=intermediate_collection_info[\"properties\"])\n\n                prev = None\n                for f, p in sorteditems:\n                    if (not p.target or self.arvrunner.secret_store.has_secret(p.resolved) or\n                        (prev is not None and p.target.startswith(prev))):\n                        continue\n                    if p.target.startswith(\"/\"):\n                        dst = p.target[len(self.outdir)+1:] if p.target.startswith(self.outdir+\"/\") else p.target[1:]\n                    else:\n                        dst = p.target\n                    mountpoint = p.target if p.target.startswith(\"/\") else os.path.join(self.outdir, p.target)\n                    mounts[mountpoint] = {\"kind\": \"collection\",\n                                          \"portable_data_hash\": vwd.portable_data_hash(),\n                                          \"path\": dst}\n                    if p.type.startswith(\"Writable\"):\n                        mounts[mountpoint][\"writable\"] = True\n                    prev = p.target + \"/\"\n\n        container_request[\"environment\"] = {\"TMPDIR\": self.tmpdir, \"HOME\": self.outdir}\n        if self.environment:\n            container_request[\"environment\"].update(self.environment)\n\n        if self.stdin:\n            sp = self.stdin[6:].split(\"/\", 1)\n            mounts[\"stdin\"] = {\"kind\": \"collection\",\n                                \"portable_data_hash\": sp[0],\n                                \"path\": sp[1]}\n\n        if self.stderr:\n            mounts[\"stderr\"] = {\"kind\": \"file\",\n                                \"path\": \"%s/%s\" % (self.outdir, self.stderr)}\n\n        if self.stdout:\n            mounts[\"stdout\"] = {\"kind\": \"file\",\n                                \"path\": \"%s/%s\" % (self.outdir, self.stdout)}\n\n        (docker_req, docker_is_req) = self.get_requirement(\"DockerRequirement\")\n\n        container_request[\"container_image\"] = arv_docker_get_image(self.arvrunner.api,\n                                                                    docker_req,\n                                                                    runtimeContext.pull_image,\n                                                                    runtimeContext)\n\n        network_req, _ = self.get_requirement(\"NetworkAccess\")\n        if network_req:\n            runtime_constraints[\"API\"] = network_req[\"networkAccess\"]\n\n        api_req, _ = self.get_requirement(\"http://arvados.org/cwl#APIRequirement\")\n        if api_req:\n            runtime_constraints[\"API\"] = True\n\n        use_disk_cache = (self.arvrunner.api.config()[\"Containers\"].get(\"DefaultKeepCacheRAM\", 0) == 0)\n\n        keep_cache_type_req, _ = self.get_requirement(\"http://arvados.org/cwl#KeepCacheTypeRequirement\")\n        if keep_cache_type_req:\n            if \"keepCacheType\" in keep_cache_type_req:\n                if keep_cache_type_req[\"keepCacheType\"] == \"ram_cache\":\n                    use_disk_cache = False\n\n        runtime_req, _ = self.get_requirement(\"http://arvados.org/cwl#RuntimeConstraints\")\n        if runtime_req:\n            if \"keep_cache\" in runtime_req:\n                if use_disk_cache:\n                    # If DefaultKeepCacheRAM is zero it means we should use disk cache.\n                    runtime_constraints[\"keep_cache_disk\"] = math.ceil(runtime_req[\"keep_cache\"] * 2**20)\n                else:\n                    runtime_constraints[\"keep_cache_ram\"] = math.ceil(runtime_req[\"keep_cache\"] * 2**20)\n            if \"outputDirType\" in runtime_req:\n                if runtime_req[\"outputDirType\"] == \"local_output_dir\":\n                    # Currently the default behavior.\n                    pass\n                elif runtime_req[\"outputDirType\"] == \"keep_output_dir\":\n                    mounts[self.outdir]= {\n                        \"kind\": \"collection\",\n                        \"writable\": True\n                    }\n\n        partition_req, _ = self.get_requirement(\"http://arvados.org/cwl#PartitionRequirement\")\n        if partition_req:\n            scheduling_parameters[\"partitions\"] = aslist(partition_req[\"partition\"])\n\n        intermediate_output_req, _ = self.get_requirement(\"http://arvados.org/cwl#IntermediateOutput\")\n        if intermediate_output_req:\n            self.output_ttl = intermediate_output_req[\"outputTTL\"]\n        else:\n            self.output_ttl = self.arvrunner.intermediate_output_ttl\n\n        if self.output_ttl < 0:\n            raise WorkflowException(\"Invalid value %d for output_ttl, cannot be less than zero\" % container_request[\"output_ttl\"])\n\n\n        if self.arvrunner.api._rootDesc[\"revision\"] >= \"20210628\":\n            storage_class_req, _ = self.get_requirement(\"http://arvados.org/cwl#OutputStorageClass\")\n            if storage_class_req and storage_class_req.get(\"intermediateStorageClass\"):\n                container_request[\"output_storage_classes\"] = aslist(storage_class_req[\"intermediateStorageClass\"])\n            else:\n                container_request[\"output_storage_classes\"] = (\n                    runtimeContext.intermediate_storage_classes\n                    or list(arvados.util.iter_storage_classes(self.arvrunner.api.config()))\n                )\n\n        cuda_req, _ = self.get_requirement(\"http://commonwl.org/cwltool#CUDARequirement\")\n        if cuda_req:\n            if self.arvrunner.api._rootDesc[\"revision\"] >= \"20250128\":\n                # Arvados 3.1+ API\n                runtime_constraints[\"gpu\"] = {\n                    \"stack\": \"cuda\",\n                    \"device_count\": resources.get(\"cudaDeviceCount\", 1),\n                    \"driver_version\": cuda_req[\"cudaVersionMin\"],\n                    \"hardware_target\": aslist(cuda_req[\"cudaComputeCapability\"]),\n                    \"vram\": self.builder.do_eval(cuda_req.get(\"cudaVram\", 0))*1024*1024,\n                }\n            else:\n                # Legacy API\n                runtime_constraints[\"cuda\"] = {\n                    \"device_count\": resources.get(\"cudaDeviceCount\", 1),\n                    \"driver_version\": cuda_req[\"cudaVersionMin\"],\n                    \"hardware_capability\": aslist(cuda_req[\"cudaComputeCapability\"])[0]\n                }\n\n        rocm_req, _ = self.get_requirement(\"http://arvados.org/cwl#ROCmRequirement\")\n        if rocm_req:\n            if self.arvrunner.api._rootDesc[\"revision\"] >= \"20250128\":\n                runtime_constraints[\"gpu\"] = {\n                    \"stack\": \"rocm\",\n                    \"device_count\": self.builder.do_eval(rocm_req.get(\"rocmDeviceCountMin\", None)) or self.builder.do_eval(rocm_req.get(\"rocmDeviceCountMax\", 1)),\n                    \"driver_version\": rocm_req[\"rocmDriverVersion\"],\n                    \"hardware_target\": aslist(rocm_req[\"rocmTarget\"]),\n                    \"vram\": self.builder.do_eval(rocm_req[\"rocmVram\"])*1024*1024,\n                }\n            else:\n                raise WorkflowException(\"Arvados API server does not support ROCm (requires Arvados 3.1+)\")\n\n        if runtimeContext.enable_preemptible is False:\n            scheduling_parameters[\"preemptible\"] = False\n        else:\n            preemptible_req, _ = self.get_requirement(\"http://arvados.org/cwl#UsePreemptible\")\n            if preemptible_req:\n                scheduling_parameters[\"preemptible\"] = preemptible_req[\"usePreemptible\"]\n            elif runtimeContext.enable_preemptible is True:\n                scheduling_parameters[\"preemptible\"] = True\n            elif runtimeContext.enable_preemptible is None:\n                pass\n\n        if scheduling_parameters.get(\"preemptible\") and self.may_resubmit_non_preemptible():\n            # Only make one attempt, because if it is preempted we\n            # will resubmit and ask for a non-preemptible instance.\n            container_request[\"container_count_max\"] = 1\n\n        if self.timelimit is not None and self.timelimit > 0:\n            scheduling_parameters[\"max_run_time\"] = self.timelimit\n\n        extra_submit_params = {}\n        if runtimeContext.submit_runner_cluster:\n            extra_submit_params[\"cluster_id\"] = runtimeContext.submit_runner_cluster\n\n        container_request[\"output_name\"] = cleanup_name_for_collection(\"Output from step %s\" % (self.name))\n        container_request[\"output_ttl\"] = self.output_ttl\n        container_request[\"mounts\"] = mounts\n        container_request[\"secret_mounts\"] = secret_mounts\n        container_request[\"runtime_constraints\"] = runtime_constraints\n        container_request[\"scheduling_parameters\"] = scheduling_parameters\n\n        enable_reuse = runtimeContext.enable_reuse\n        if enable_reuse:\n            reuse_req, _ = self.get_requirement(\"WorkReuse\")\n            if reuse_req:  # CWL >= v1.1; enableReuse can be an expression.\n                enable_reuse = reuse_req[\"enableReuse\"]\n                if isinstance(enable_reuse, str):\n                    enable_reuse = self.builder.do_eval(enable_reuse)\n            else:  # Arv extension to CWL v1.0; enableReuse is a boolean.\n                reuse_req, _ = self.get_requirement(\"http://arvados.org/cwl#ReuseRequirement\")\n                if reuse_req:\n                    enable_reuse = reuse_req[\"enableReuse\"]\n        container_request[\"use_existing\"] = enable_reuse\n\n        properties_req, _ = self.get_requirement(\"http://arvados.org/cwl#ProcessProperties\")\n        if properties_req:\n            for pr in properties_req[\"processProperties\"]:\n                container_request[\"properties\"][pr[\"propertyName\"]] = self.builder.do_eval(pr[\"propertyValue\"])\n\n        output_properties_req, _ = self.get_requirement(\"http://arvados.org/cwl#OutputCollectionProperties\")\n        if output_properties_req:\n            if self.arvrunner.api._rootDesc[\"revision\"] >= \"20220510\":\n                container_request[\"output_properties\"] = {}\n                for pr in output_properties_req[\"outputProperties\"]:\n                    container_request[\"output_properties\"][pr[\"propertyName\"]] = self.builder.do_eval(pr[\"propertyValue\"])\n            else:\n                logger.warning(\"%s API revision is %s, revision %s is required to support setting properties on output collections.\",\n                               self.arvrunner.label(self), self.arvrunner.api._rootDesc[\"revision\"], \"20220510\")\n\n        publish_port_req, _ = self.get_requirement(\"http://arvados.org/cwl#PublishPorts\")\n        if publish_port_req:\n            if self.arvrunner.api._rootDesc[\"revision\"] >= \"20250327\":\n                pp = {}\n                for p in publish_port_req[\"publishPorts\"]:\n                    pp[p[\"servicePort\"]] = {\n                        \"access\": p[\"serviceAccess\"],\n                        \"initial_path\": p.get(\"initialPath\", \"\"),\n                        \"label\": p[\"label\"],\n                    }\n                container_request[\"published_ports\"] = pp\n                container_request[\"service\"] = True\n                container_request[\"use_existing\"] = False\n                # The container needs networking to publish ports.\n                # This is how we get that as of Arvados 3.2.0:\n                runtime_constraints[\"API\"] = True\n            else:\n                raise WorkflowException(\"Arvados API server does not support published_ports (requires Arvados 3.2+)\")\n\n        if self.arvrunner.api._rootDesc[\"revision\"] >= \"20240502\" and self.globpatterns:\n            output_glob = []\n            try:\n                for gp in self.globpatterns:\n                    pattern = \"\"\n                    gb = None\n                    if isinstance(gp, str):\n                        try:\n                            gb = self.builder.do_eval(gp)\n                        except:\n                            raise OutputGlobError(\"Expression evaluation failed\")\n                    elif isinstance(gp, dict):\n                        # dict of two keys, 'glob' and 'pattern' which\n                        # means we should try to predict the names of\n                        # secondary files to capture.\n                        try:\n                            gb = self.builder.do_eval(gp[\"glob\"])\n                        except:\n                            raise OutputGlobError(\"Expression evaluation failed\")\n                        pattern = gp[\"pattern\"]\n\n                        if \"${\" in pattern or \"$(\" in pattern:\n                            # pattern is an expression, need to evaluate\n                            # it first.\n                            if '*' in gb or \"]\" in gb:\n                                # glob has wildcards, so we can't\n                                # predict the secondary file name.\n                                # Capture everything.\n                                raise OutputGlobError(\"glob has wildcards, cannot predict secondary file name\")\n\n                            # After evealuating 'glob' we have a\n                            # expected name we can provide to the\n                            # expression.\n                            nr, ne = os.path.splitext(gb)\n                            try:\n                                pattern = self.builder.do_eval(pattern, context={\n                                    \"path\": gb,\n                                    \"basename\": os.path.basename(gb),\n                                    \"nameext\": ne,\n                                    \"nameroot\": nr,\n                                })\n                            except:\n                                raise OutputGlobError(\"Expression evaluation failed\")\n                            if isinstance(pattern, str):\n                                # If we get a string back, that's the expected\n                                # file name for the secondary file.\n                                gb = pattern\n                                pattern = \"\"\n                            else:\n                                # However, it is legal for this to return a\n                                # file object or an array.  In that case we'll\n                                # just capture everything.\n                                raise OutputGlobError(\"secondary file expression did not evaluate to a string\")\n                    else:\n                        # Should never happen, globpatterns is\n                        # constructed in arvtool from data that has\n                        # already gone through schema validation, but\n                        # still good to have a fallback.\n                        raise TypeError(\"Expected glob pattern to be a str or dict, was %s\" % gp)\n\n                    if not gb:\n                        continue\n\n                    for gbeval in aslist(gb):\n                        if gbeval.startswith(self.outdir+\"/\"):\n                            gbeval = gbeval[len(self.outdir)+1:]\n                        while gbeval.startswith(\"./\"):\n                            gbeval = gbeval[2:]\n\n                        if pattern:\n                            # pattern is not an expression or we would\n                            # have handled this earlier, so it must be\n                            # a simple substitution on the secondary\n                            # file name.\n                            #\n                            # 'pattern' was assigned in the earlier code block\n                            #\n                            # if there's a wild card in the glob, figure\n                            # out if there's enough text after it that the\n                            # suffix substitution can be done correctly.\n                            cutpos = max(gbeval.find(\"*\"), gbeval.find(\"]\"))\n                            if cutpos > -1:\n                                tail = gbeval[cutpos+1:]\n                                if tail.count(\".\") < pattern.count(\"^\"):\n                                    # the known suffix in the glob has\n                                    # fewer dotted extensions than the\n                                    # substition pattern wants to remove,\n                                    # so we can't accurately predict\n                                    # correct name glob in advance.\n                                    gbeval = \"\"\n                            if gbeval:\n                                gbeval = substitute(gbeval, pattern)\n\n                        if gbeval in (self.outdir, \"\", \".\"):\n                            output_glob.append(\"**\")\n                        elif gbeval.endswith(\"/\"):\n                            output_glob.append(gbeval+\"**\")\n                        else:\n                            output_glob.append(gbeval)\n                            output_glob.append(gbeval + \"/**\")\n\n                if \"**\" in output_glob:\n                    # if it's going to match all, prefer not to provide it\n                    # at all.\n                    output_glob.clear()\n            except OutputGlobError as e:\n                logger.debug(\"Unable to set a more specific output_glob (this is not an error): %s\", e.args[0], exc_info=e)\n                output_glob.clear()\n\n            if output_glob:\n                # Tools should either use cwl.output.json or\n                # outputBinding globs. However, one CWL conformance\n                # test has both, so we need to make sure we collect\n                # cwl.output.json in this case. That test uses\n                # cwl.output.json return a string, but also uses\n                # outputBinding.\n                output_glob.append(\"cwl.output.json\")\n\n                # It could happen that a tool creates cwl.output.json,\n                # references a file, but also uses a outputBinding\n                # glob that doesn't include the file being referenced.\n                #\n                # In this situation, output_glob will only match the\n                # pattern we know about.  If cwl.output.json referred\n                # to other files in the output, those would be\n                # missing.  We could upload the entire output, but we\n                # currently have no way of knowing at this point\n                # whether cwl.output.json will be used this way.\n                #\n                # Because this is a corner case, I'm inclined to leave\n                # this as a known issue for now.  No conformance tests\n                # do this and I'd even be inclined to have it ruled\n                # incompatible in the CWL spec if it did come up.\n                # That said, in retrospect it would have been good to\n                # require CommandLineTool to declare when it expects\n                # cwl.output.json.\n\n                container_request[\"output_glob\"] = output_glob\n\n        ram_multiplier = [1]\n\n        oom_retry_req, _ = self.get_requirement(\"http://arvados.org/cwl#OutOfMemoryRetry\")\n        if oom_retry_req:\n            if oom_retry_req.get('memoryRetryMultiplier'):\n                ram_multiplier.append(oom_retry_req.get('memoryRetryMultiplier'))\n            elif oom_retry_req.get('memoryRetryMultipler'):\n                ram_multiplier.append(oom_retry_req.get('memoryRetryMultipler'))\n            else:\n                ram_multiplier.append(2)\n\n        if runtimeContext.runnerjob.startswith(\"arvwf:\"):\n            wfuuid = runtimeContext.runnerjob[6:runtimeContext.runnerjob.index(\"#\")]\n            wfrecord = self.arvrunner.api.workflows().get(uuid=wfuuid).execute(num_retries=self.arvrunner.num_retries)\n            if container_request[\"name\"] == \"main\":\n                container_request[\"name\"] = wfrecord[\"name\"]\n            container_request[\"properties\"][\"template_uuid\"] = wfuuid\n\n        if self.attempt_count == 0:\n            self.output_callback = self.arvrunner.get_wrapped_callback(self.output_callback)\n\n        try:\n            ram = runtime_constraints[\"ram\"]\n\n            self.uuid = runtimeContext.submit_request_uuid\n\n            for i in ram_multiplier:\n                runtime_constraints[\"ram\"] = ram * i\n\n                if self.uuid:\n                    response = self.arvrunner.api.container_requests().update(\n                        uuid=self.uuid,\n                        body=container_request,\n                        **extra_submit_params\n                    ).execute(num_retries=self.arvrunner.num_retries)\n                else:\n                    response = self.arvrunner.api.container_requests().create(\n                        body=container_request,\n                        **extra_submit_params\n                    ).execute(num_retries=self.arvrunner.num_retries)\n                    self.uuid = response[\"uuid\"]\n\n                if response[\"container_uuid\"] is not None:\n                    break\n\n            if response[\"container_uuid\"] is None:\n                runtime_constraints[\"ram\"] = ram * ram_multiplier[self.attempt_count]\n\n            container_request[\"state\"] = \"Committed\"\n            try:\n                response = self.arvrunner.api.container_requests().update(\n                    uuid=self.uuid,\n                    body=container_request,\n                    **extra_submit_params\n                ).execute(num_retries=self.arvrunner.num_retries)\n            except Exception as e:\n                # If the request was actually processed but we didn't\n                # receive a response, we'll re-try the request, but if\n                # the container went directly from \"Committed\" to\n                # \"Final\", the retry attempt will fail with a state\n                # change error.  So if there's an error, double check\n                # to see if the container is in the expected state.\n                #\n                # See discussion on #22160\n                response = self.arvrunner.api.container_requests().get(\n                    uuid=self.uuid\n                ).execute(num_retries=self.arvrunner.num_retries)\n                if response.get(\"state\") not in (\"Committed\", \"Final\"):\n                    raise\n\n            self.arvrunner.process_submitted(self)\n            self.attempt_count += 1\n\n            if response[\"state\"] == \"Final\":\n                logger.info(\"%s reused container %s\", self.arvrunner.label(self), response[\"container_uuid\"])\n            else:\n                logger.info(\"%s %s state is %s\", self.arvrunner.label(self), response[\"uuid\"], response[\"state\"])\n        except Exception as e:\n            logger.exception(\"%s error submitting container\\n%s\", self.arvrunner.label(self), e)\n            logger.debug(\"Container request was %s\", container_request)\n            self.output_callback({}, \"permanentFail\")\n\n    def may_resubmit_non_preemptible(self):\n        if self.job_runtime.enable_resubmit_non_preemptible is False:\n            # explicitly disabled\n            return False\n\n        spot_instance_retry_req, _ = self.get_requirement(\"http://arvados.org/cwl#PreemptionBehavior\")\n        if spot_instance_retry_req:\n            if spot_instance_retry_req[\"resubmitNonPreemptible\"] is False:\n                # explicitly disabled by hint\n                return False\n        elif self.job_runtime.enable_resubmit_non_preemptible is None:\n            # default behavior is we don't retry\n            return False\n\n        # At this point, by process of elimination either\n        # resubmitNonPreemptible or enable_resubmit_non_preemptible\n        # must be True, so now check if the container was actually\n        # preempted.\n\n        return True\n\n    def spot_instance_retry(self, record, container):\n        return self.may_resubmit_non_preemptible() and bool(container[\"runtime_status\"].get(\"preemptionNotice\"))\n\n    def out_of_memory_retry(self, record, container):\n        oom_retry_req, _ = self.get_requirement(\"http://arvados.org/cwl#OutOfMemoryRetry\")\n        if oom_retry_req is None:\n            return False\n\n        # Sometimes it gets killed with no warning\n        if container[\"exit_code\"] == 137:\n            return True\n\n        logc = arvados.collection.CollectionReader(record[\"log_uuid\"],\n                                                   api_client=self.arvrunner.api,\n                                                   keep_client=self.arvrunner.keep_client,\n                                                   num_retries=self.arvrunner.num_retries)\n\n        loglines = [\"\"]\n        def callback(v1, v2, v3):\n            loglines[0] = v3\n\n        done.logtail(logc, callback, \"\", maxlen=1000)\n\n        # Check allocation failure\n        oom_matches = oom_retry_req.get('memoryErrorRegex') or r'(bad_alloc|out ?of ?memory|memory ?error|container using over 9.% of memory)'\n        if re.search(oom_matches, loglines[0], re.IGNORECASE | re.MULTILINE):\n            return True\n\n        return False\n\n    def done(self, record):\n        outputs = {}\n        retried = False\n        rcode = None\n        do_retry = False\n\n        try:\n            container = self.arvrunner.api.containers().get(\n                uuid=record[\"container_uuid\"]\n            ).execute(num_retries=self.arvrunner.num_retries)\n\n            if container[\"state\"] == \"Complete\":\n                rcode = container[\"exit_code\"]\n                if self.successCodes and rcode in self.successCodes:\n                    processStatus = \"success\"\n                elif self.temporaryFailCodes and rcode in self.temporaryFailCodes:\n                    processStatus = \"temporaryFail\"\n                elif self.permanentFailCodes and rcode in self.permanentFailCodes:\n                    processStatus = \"permanentFail\"\n                elif rcode == 0:\n                    processStatus = \"success\"\n                else:\n                    processStatus = \"permanentFail\"\n\n                if processStatus == \"permanentFail\" and self.attempt_count == 1 and self.out_of_memory_retry(record, container):\n                    logger.info(\"%s Container failed with out of memory error.  Retrying container with more RAM.\",\n                                 self.arvrunner.label(self))\n                    self.job_runtime = self.job_runtime.copy()\n                    do_retry = True\n\n                if rcode == 137 and not do_retry:\n                    logger.warning(\"%s Container may have been killed for using too much RAM.  Try resubmitting with a higher 'ramMin' or use the arv:OutOfMemoryRetry feature.\",\n                                 self.arvrunner.label(self))\n            else:\n                processStatus = \"permanentFail\"\n\n            if processStatus == \"permanentFail\" and self.attempt_count == 1 and self.spot_instance_retry(record, container):\n                logger.info(\"%s Container failed because the preemptible instance it was running on was reclaimed.  Retrying container on a non-preemptible instance.\")\n                self.job_runtime = self.job_runtime.copy()\n                self.job_runtime.enable_preemptible = False\n                do_retry = True\n\n            if do_retry:\n                # Add a property indicating that this container was resubmitted.\n                updateproperties = record[\"properties\"].copy()\n                olduuid = self.uuid\n                self.job_runtime.submit_request_uuid = None\n                self.uuid = None\n                self.run(None)\n                # this flag suppresses calling the output callback, we only want to set this\n                # when we're sure that the resubmission has happened without issue.\n                retried = True\n                # Add a property to the old container request indicating it\n                # was retried\n                updateproperties[\"arv:failed_container_resubmitted\"] = self.uuid\n                self.arvrunner.api.container_requests().update(uuid=olduuid,\n                                                               body={\"properties\": updateproperties}).execute()\n                return\n\n            logc = None\n            if record[\"log_uuid\"]:\n                logc = arvados.collection.Collection(record[\"log_uuid\"],\n                                                     api_client=self.arvrunner.api,\n                                                     keep_client=self.arvrunner.keep_client,\n                                                     num_retries=self.arvrunner.num_retries)\n\n            if processStatus == \"permanentFail\" and logc is not None:\n                label = self.arvrunner.label(self)\n                done.logtail(\n                    logc, logger.error,\n                    \"%s (%s) error log:\" % (label, record[\"uuid\"]), maxlen=40, include_crunchrun=(rcode is None or rcode > 127))\n\n            if record[\"output_uuid\"]:\n                if self.arvrunner.trash_intermediate or self.arvrunner.intermediate_output_ttl:\n                    # Compute the trash time to avoid requesting the collection record.\n                    trash_at = ciso8601.parse_datetime_as_naive(record[\"modified_at\"]) + datetime.timedelta(0, self.arvrunner.intermediate_output_ttl)\n                    aftertime = \" at %s\" % trash_at.strftime(\"%Y-%m-%d %H:%M:%S UTC\") if self.arvrunner.intermediate_output_ttl else \"\"\n                    orpart = \", or\" if self.arvrunner.trash_intermediate and self.arvrunner.intermediate_output_ttl else \"\"\n                    oncomplete = \" upon successful completion of the workflow\" if self.arvrunner.trash_intermediate else \"\"\n                    logger.info(\"%s Intermediate output %s (%s) will be trashed%s%s%s.\" % (\n                        self.arvrunner.label(self), record[\"output_uuid\"], container[\"output\"], aftertime, orpart, oncomplete))\n                self.arvrunner.add_intermediate_output(record[\"output_uuid\"])\n\n            if container[\"output\"]:\n                outputs = done.done_outputs(self, container, \"/tmp\", self.outdir, \"/keep\")\n\n            properties = record[\"properties\"].copy()\n            properties[\"cwl_output\"] = outputs\n            self.arvrunner.api.container_requests().update(\n                uuid=self.uuid,\n                body={\"container_request\": {\"properties\": properties}}\n            ).execute(num_retries=self.arvrunner.num_retries)\n\n            if logc is not None and self.job_runtime.enable_usage_report is not False:\n                try:\n                    summarizer = crunchstat_summary.summarizer.ContainerRequestSummarizer(\n                        record,\n                        collection_object=logc,\n                        label=self.name,\n                        arv=self.arvrunner.api)\n                    summarizer.run()\n                    with logc.open(\"usage_report.html\", \"wt\") as mr:\n                        mr.write(summarizer.html_report())\n                    logc.save()\n\n                    # Post warnings about nodes that are under-utilized.\n                    for rc in summarizer._recommend_gen(lambda x: x):\n                        self.job_runtime.usage_report_notes.append(rc)\n\n                except Exception as e:\n                    logger.warning(\"%s unable to generate resource usage report\",\n                                 self.arvrunner.label(self),\n                                 exc_info=(e if self.arvrunner.debug else False))\n\n        except WorkflowException as e:\n            # Only include a stack trace if in debug mode.\n            # A stack trace may obfuscate more useful output about the workflow.\n            logger.error(\"%s unable to collect output from %s:\\n%s\",\n                         self.arvrunner.label(self), container[\"output\"], e, exc_info=(e if self.arvrunner.debug else False))\n            processStatus = \"permanentFail\"\n        except Exception:\n            logger.exception(\"%s while getting output object:\", self.arvrunner.label(self))\n            processStatus = \"permanentFail\"\n        finally:\n            if not retried:\n                self.output_callback(outputs, processStatus)\n\n\nclass RunnerContainer(Runner):\n    \"\"\"Submit and manage a container that runs arvados-cwl-runner.\"\"\"\n\n    def arvados_job_spec(self, runtimeContext, git_info):\n        \"\"\"Create an Arvados container request for this workflow.\n\n        The returned dict can be used to create a container passed as\n        the +body+ argument to container_requests().create().\n        \"\"\"\n\n        adjustDirObjs(self.job_order, trim_listing)\n        visit_class(self.job_order, (\"File\", \"Directory\"), trim_anonymous_location)\n        visit_class(self.job_order, (\"File\", \"Directory\"), remove_redundant_fields)\n\n        secret_mounts = {}\n        for param in sorted(self.job_order.keys()):\n            if self.secret_store.has_secret(self.job_order[param]):\n                mnt = \"/secrets/s%d\" % len(secret_mounts)\n                secret_mounts[mnt] = {\n                    \"kind\": \"text\",\n                    \"content\": self.secret_store.retrieve(self.job_order[param])\n                }\n                self.job_order[param] = {\"$include\": mnt}\n\n        environment = {}\n\n        if self.arvrunner.botosession is not None and runtimeContext.defer_downloads and runtimeContext.aws_credential_capture:\n            # There are deferred downloads from S3.  Save our credentials to secret\n            # storage\n            secret_mounts[\"/var/lib/cwl/.aws/config\"] = {\n                    \"kind\": \"text\",\n                    \"content\": \"\"\"[default]\nregion = {}\n\"\"\".format(self.arvrunner.botosession.region_name)\n            }\n            environment[\"AWS_CONFIG_FILE\"] = \"/var/lib/cwl/.aws/config\"\n\n            creds = self.arvrunner.botosession.get_credentials()\n            secret_mounts[\"/var/lib/cwl/.aws/credentials\"] = {\n                    \"kind\": \"text\",\n                    \"content\": \"\"\"[default]\naws_access_key_id = {}\naws_secret_access_key = {}\n\"\"\".format(creds.access_key, creds.secret_key)\n            }\n            environment[\"AWS_SHARED_CREDENTIALS_FILE\"] = \"/var/lib/cwl/.aws/credentials\"\n\n        container_image = arvados_jobs_image(self.arvrunner, self.jobs_image, runtimeContext)\n\n        workflow_runner_req, _ = self.embedded_tool.get_requirement(\"http://arvados.org/cwl#WorkflowRunnerResources\")\n        if workflow_runner_req and workflow_runner_req.get(\"acrContainerImage\"):\n            container_image = workflow_runner_req.get(\"acrContainerImage\")\n\n        container_req = {\n            \"name\": self.name,\n            \"output_path\": \"/var/spool/cwl\",\n            \"cwd\": \"/var/spool/cwl\",\n            \"priority\": self.priority,\n            \"state\": \"Committed\",\n            \"container_image\": container_image,\n            \"mounts\": {\n                \"/var/lib/cwl/cwl.input.json\": {\n                    \"kind\": \"json\",\n                    \"content\": self.job_order\n                },\n                \"stdout\": {\n                    \"kind\": \"file\",\n                    \"path\": \"/var/spool/cwl/cwl.output.json\"\n                },\n                \"/var/spool/cwl\": {\n                    \"kind\": \"collection\",\n                    \"writable\": True\n                }\n            },\n            \"secret_mounts\": secret_mounts,\n            \"runtime_constraints\": {\n                \"vcpus\": math.ceil(self.submit_runner_cores),\n                \"ram\": 1024*1024 * (math.ceil(self.submit_runner_ram) + math.ceil(self.collection_cache_size)),\n                \"API\": True\n            },\n            \"use_existing\": self.reuse_runner,\n            \"properties\": {},\n            \"environment\": environment\n        }\n\n        if self.embedded_tool.tool.get(\"id\", \"\").startswith(\"keep:\"):\n            sp = self.embedded_tool.tool[\"id\"].split('/')\n            workflowcollection = sp[0][5:]\n            workflowname = \"/\".join(sp[1:])\n            workflowpath = \"/var/lib/cwl/workflow/%s\" % workflowname\n            container_req[\"mounts\"][\"/var/lib/cwl/workflow\"] = {\n                \"kind\": \"collection\",\n                \"portable_data_hash\": \"%s\" % workflowcollection\n            }\n        elif self.embedded_tool.tool.get(\"id\", \"\").startswith(\"arvwf:\"):\n            uuid, frg = urllib.parse.urldefrag(self.embedded_tool.tool[\"id\"])\n            workflowpath = \"/var/lib/cwl/workflow.json#\" + frg\n            packedtxt = self.loadingContext.loader.fetch_text(uuid)\n            yaml = ruamel.yaml.YAML(typ='safe', pure=True)\n            packed = yaml.load(packedtxt)\n            container_req[\"mounts\"][\"/var/lib/cwl/workflow.json\"] = {\n                \"kind\": \"json\",\n                \"content\": packed\n            }\n            container_req[\"properties\"][\"template_uuid\"] = self.embedded_tool.tool[\"id\"][6:33]\n        elif self.embedded_tool.tool.get(\"id\", \"\").startswith(\"file:\"):\n            raise WorkflowException(\"Tool id '%s' is a local file but expected keep: or arvwf:\" % self.embedded_tool.tool.get(\"id\"))\n        else:\n            main = self.loadingContext.loader.idx[\"_:main\"]\n            if main.get(\"id\") == \"_:main\":\n                del main[\"id\"]\n            workflowpath = \"/var/lib/cwl/workflow.json#main\"\n            container_req[\"mounts\"][\"/var/lib/cwl/workflow.json\"] = {\n                \"kind\": \"json\",\n                \"content\": main\n            }\n\n        container_req[\"properties\"].update({k.replace(\"http://arvados.org/cwl#\", \"arv:\"): v for k, v in git_info.items()})\n\n        properties_req, _ = self.embedded_tool.get_requirement(\"http://arvados.org/cwl#ProcessProperties\")\n        if properties_req:\n            builder = make_builder(self.job_order, self.embedded_tool.hints, self.embedded_tool.requirements, runtimeContext, self.embedded_tool.metadata)\n            for pr in properties_req[\"processProperties\"]:\n                container_req[\"properties\"][pr[\"propertyName\"]] = builder.do_eval(pr[\"propertyValue\"])\n\n        # --local means execute the workflow instead of submitting a container request\n        # --api=containers means use the containers API\n        # --no-log-timestamps means don't add timestamps (the logging infrastructure does this)\n        # --disable-validate because we already validated so don't need to do it again\n        # --eval-timeout is the timeout for javascript invocation\n        # --parallel-task-count is the number of threads to use for job submission\n        # --enable/disable-reuse sets desired job reuse\n        # --collection-cache-size sets aside memory to store collections\n        command = [\"arvados-cwl-runner\",\n                   \"--local\",\n                   \"--api=containers\",\n                   \"--no-log-timestamps\",\n                   \"--disable-validate\",\n                   \"--disable-color\",\n                   \"--eval-timeout=%s\" % self.arvrunner.eval_timeout,\n                   \"--thread-count=%s\" % self.arvrunner.thread_count,\n                   \"--enable-reuse\" if self.enable_reuse else \"--disable-reuse\",\n                   \"--collection-cache-size=%s\" % self.collection_cache_size]\n\n        if self.output_name:\n            command.append(\"--output-name=\" + self.output_name)\n            container_req[\"output_name\"] = self.output_name\n\n        if self.output_tags:\n            command.append(\"--output-tags=\" + self.output_tags)\n\n        if runtimeContext.debug:\n            command.append(\"--debug\")\n\n        if runtimeContext.storage_classes:\n            command.append(\"--storage-classes=\" + \",\".join(runtimeContext.storage_classes))\n\n        if runtimeContext.intermediate_storage_classes:\n            command.append(\"--intermediate-storage-classes=\" + \",\".join(runtimeContext.intermediate_storage_classes))\n\n        if runtimeContext.on_error:\n            command.append(\"--on-error=\" + self.on_error)\n\n        if runtimeContext.intermediate_output_ttl:\n            command.append(\"--intermediate-output-ttl=%d\" % runtimeContext.intermediate_output_ttl)\n\n        if runtimeContext.trash_intermediate:\n            command.append(\"--trash-intermediate\")\n\n        if runtimeContext.project_uuid:\n            command.append(\"--project-uuid=\"+runtimeContext.project_uuid)\n\n        if self.enable_dev:\n            command.append(\"--enable-dev\")\n\n        if runtimeContext.enable_preemptible is True:\n            command.append(\"--enable-preemptible\")\n\n        if runtimeContext.enable_preemptible is False:\n            command.append(\"--disable-preemptible\")\n\n        if runtimeContext.varying_url_params:\n            command.append(\"--varying-url-params=\" + runtimeContext.varying_url_params)\n\n        if runtimeContext.prefer_cached_downloads:\n            command.append(\"--prefer-cached-downloads\")\n\n        if runtimeContext.enable_usage_report is True:\n            command.append(\"--enable-usage-report\")\n\n        if runtimeContext.enable_usage_report is False:\n            command.append(\"--disable-usage-report\")\n\n        if self.fast_parser:\n            command.append(\"--fast-parser\")\n\n        if self.arvrunner.selected_credential is not None:\n            command.append(\"--use-credential=\"+self.arvrunner.selected_credential[\"uuid\"])\n\n        if runtimeContext.s3_public_bucket is True:\n            command.append(\"--s3-public-bucket\")\n\n        command.extend([workflowpath, \"/var/lib/cwl/cwl.input.json\"])\n\n        container_req[\"command\"] = command\n\n        return container_req\n\n\n    def run(self, runtimeContext):\n        runtimeContext.keepprefix = \"keep:\"\n        job_spec = self.arvados_job_spec(runtimeContext, self.git_info)\n        if runtimeContext.project_uuid:\n            job_spec[\"owner_uuid\"] = runtimeContext.project_uuid\n\n        extra_submit_params = {}\n        if runtimeContext.submit_runner_cluster:\n            extra_submit_params[\"cluster_id\"] = runtimeContext.submit_runner_cluster\n\n        if runtimeContext.submit_request_uuid:\n            if \"cluster_id\" in extra_submit_params:\n                # Doesn't make sense for \"update\" and actually fails\n                del extra_submit_params[\"cluster_id\"]\n            response = self.arvrunner.api.container_requests().update(\n                uuid=runtimeContext.submit_request_uuid,\n                body=job_spec,\n                **extra_submit_params\n            ).execute(num_retries=self.arvrunner.num_retries)\n        else:\n            response = self.arvrunner.api.container_requests().create(\n                body=job_spec,\n                **extra_submit_params\n            ).execute(num_retries=self.arvrunner.num_retries)\n\n        self.uuid = response[\"uuid\"]\n        self.arvrunner.process_submitted(self)\n\n        logger.info(\"%s submitted container_request %s\", self.arvrunner.label(self), response[\"uuid\"])\n\n        workbench2 = self.arvrunner.api.config()[\"Services\"][\"Workbench2\"][\"ExternalURL\"]\n        if workbench2:\n            url = \"{}processes/{}\".format(workbench2, response[\"uuid\"])\n            logger.info(\"Monitor workflow progress at %s\", url)\n\n\n    def done(self, record):\n        try:\n            container = self.arvrunner.api.containers().get(\n                uuid=record[\"container_uuid\"]\n            ).execute(num_retries=self.arvrunner.num_retries)\n            container[\"log\"] = record[\"log_uuid\"]\n        except Exception:\n            logger.exception(\"%s while getting runner container\", self.arvrunner.label(self))\n            self.arvrunner.output_callback({}, \"permanentFail\")\n        else:\n            super(RunnerContainer, self).done(container)\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/arvdocker.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport logging\nimport sys\nimport threading\nimport copy\nimport re\nimport subprocess\n\nfrom schema_salad.sourceline import SourceLine\n\nimport cwltool.docker\nfrom cwltool.errors import WorkflowException\nimport arvados.commands.keepdocker\n\nlogger = logging.getLogger('arvados.cwl-runner')\n\ndef determine_image_id(dockerImageId):\n    for line in (\n            str(subprocess.check_output(  # nosec\n                [\"docker\", \"images\", \"--no-trunc\", \"--all\"]\n            ), \"utf-8\")\n            .splitlines()\n    ):\n        try:\n            match = re.match(r\"^([^ ]+)\\s+([^ ]+)\\s+([^ ]+)\", line)\n            split = dockerImageId.split(\":\")\n            if len(split) == 1:\n                split.append(\"latest\")\n            elif len(split) == 2:\n                #  if split[1] doesn't  match valid tag names, it is a part of repository\n                if not re.match(r\"[\\w][\\w.-]{0,127}\", split[1]):\n                    split[0] = split[0] + \":\" + split[1]\n                    split[1] = \"latest\"\n            elif len(split) == 3:\n                if re.match(r\"[\\w][\\w.-]{0,127}\", split[2]):\n                    split[0] = split[0] + \":\" + split[1]\n                    split[1] = split[2]\n                    del split[2]\n\n            # check for repository:tag match or image id match\n            if match and (\n                (split[0] == match.group(1) and split[1] == match.group(2))\n                or dockerImageId == match.group(3)\n            ):\n                return match.group(3)\n        except ValueError:\n            pass\n\n    return None\n\n\ndef arv_docker_get_image(api_client, dockerRequirement, pull_image, runtimeContext):\n    \"\"\"Check if a Docker image is available in Keep, if not, upload it using arv-keepdocker.\"\"\"\n\n    project_uuid = runtimeContext.project_uuid\n    force_pull = runtimeContext.force_docker_pull\n    tmp_outdir_prefix = runtimeContext.tmp_outdir_prefix\n    match_local_docker = runtimeContext.match_local_docker\n    copy_deps = runtimeContext.copy_deps\n    cached_lookups = runtimeContext.cached_docker_lookups\n\n    if \"http://arvados.org/cwl#dockerCollectionPDH\" in dockerRequirement:\n        return dockerRequirement[\"http://arvados.org/cwl#dockerCollectionPDH\"]\n\n    if \"dockerImageId\" not in dockerRequirement and \"dockerPull\" in dockerRequirement:\n        dockerRequirement = copy.deepcopy(dockerRequirement)\n        dockerRequirement[\"dockerImageId\"] = dockerRequirement[\"dockerPull\"]\n        if hasattr(dockerRequirement, 'lc'):\n            dockerRequirement.lc.data[\"dockerImageId\"] = dockerRequirement.lc.data[\"dockerPull\"]\n\n    if dockerRequirement[\"dockerImageId\"] in cached_lookups:\n        return cached_lookups[dockerRequirement[\"dockerImageId\"]]\n\n    with SourceLine(dockerRequirement, \"dockerImageId\", WorkflowException, logger.isEnabledFor(logging.DEBUG)):\n        sp = dockerRequirement[\"dockerImageId\"].split(\":\")\n        image_name = sp[0]\n        image_tag = sp[1] if len(sp) > 1 else \"latest\"\n\n        out_of_project_images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3,\n                                                                image_name=image_name,\n                                                                image_tag=image_tag,\n                                                                project_uuid=None)\n\n        if copy_deps:\n            # Only images that are available in the destination project\n            images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3,\n                                                                    image_name=image_name,\n                                                                    image_tag=image_tag,\n                                                                    project_uuid=project_uuid)\n        else:\n            images = out_of_project_images\n\n        if match_local_docker:\n            local_image_id = determine_image_id(dockerRequirement[\"dockerImageId\"])\n            if local_image_id:\n                # find it in the list\n                found = False\n                for i in images:\n                    if i[1][\"dockerhash\"] == local_image_id:\n                        found = True\n                        images = [i]\n                        break\n                if not found:\n                    # force re-upload.\n                    images = []\n\n                for i in out_of_project_images:\n                    if i[1][\"dockerhash\"] == local_image_id:\n                        found = True\n                        out_of_project_images = [i]\n                        break\n                if not found:\n                    # force re-upload.\n                    out_of_project_images = []\n\n        if not images:\n            if not out_of_project_images:\n                # Fetch Docker image if necessary.\n                try:\n                    dockerjob = cwltool.docker.DockerCommandLineJob(None, None, None, None, None, None)\n                    result = dockerjob.get_image(dockerRequirement, pull_image,\n                                                                  force_pull, tmp_outdir_prefix)\n                    if not result:\n                        raise WorkflowException(\"Docker image '%s' not available\" % dockerRequirement[\"dockerImageId\"])\n                except OSError as e:\n                    raise WorkflowException(\"While trying to get Docker image '%s', failed to execute 'docker': %s\" % (dockerRequirement[\"dockerImageId\"], e))\n\n            # Upload image to Arvados\n            args = []\n            if project_uuid:\n                args.append(\"--project-uuid=\"+project_uuid)\n            args.append(image_name)\n            args.append(image_tag)\n            logger.info(\"Uploading Docker image %s:%s\", image_name, image_tag)\n            try:\n                arvados.commands.put.api_client = api_client\n                arvados.commands.keepdocker.main(args, stdout=sys.stderr, install_sig_handlers=False, api=api_client)\n            except SystemExit as e:\n                # If e.code is None or zero, then keepdocker exited normally and we can continue\n                if e.code:\n                    raise WorkflowException(\"keepdocker exited with code %s\" % e.code)\n\n            images = arvados.commands.keepdocker.list_images_in_arv(api_client, 3,\n                                                                    image_name=image_name,\n                                                                    image_tag=image_tag,\n                                                                    project_uuid=project_uuid)\n\n        if not images:\n            raise WorkflowException(\"Could not find Docker image %s:%s\" % (image_name, image_tag))\n\n        pdh = api_client.collections().get(uuid=images[0][0]).execute()[\"portable_data_hash\"]\n\n        cached_lookups[dockerRequirement[\"dockerImageId\"]] = pdh\n\n    return pdh\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/arvtool.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom cwltool.command_line_tool import CommandLineTool, ExpressionTool\nfrom .arvcontainer import ArvadosContainer\nfrom .pathmapper import ArvPathMapper\nfrom .runner import make_builder\nfrom ._version import __version__\nfrom functools import partial\nfrom schema_salad.sourceline import SourceLine\nfrom cwltool.errors import WorkflowException\nfrom arvados.util import portable_data_hash_pattern\nfrom cwltool.utils import aslist\n\nfrom typing import Sequence, Mapping\n\ndef validate_cluster_target(arvrunner, runtimeContext):\n    if (runtimeContext.submit_runner_cluster and\n        runtimeContext.submit_runner_cluster not in arvrunner.api._rootDesc[\"remoteHosts\"] and\n        runtimeContext.submit_runner_cluster != arvrunner.api._rootDesc[\"uuidPrefix\"]):\n        raise WorkflowException(\"Unknown or invalid cluster id '%s' known remote clusters are %s\" % (runtimeContext.submit_runner_cluster,\n                                                                                                     \", \".join(list(arvrunner.api._rootDesc[\"remoteHosts\"].keys()))))\n    if runtimeContext.project_uuid:\n        cluster_target = runtimeContext.submit_runner_cluster or arvrunner.api._rootDesc[\"uuidPrefix\"]\n        if not runtimeContext.project_uuid.startswith(cluster_target):\n            raise WorkflowException(\"Project uuid '%s' should start with id of target cluster '%s'\" % (runtimeContext.project_uuid, cluster_target))\n\n        try:\n            if runtimeContext.project_uuid[5:12] == '-tpzed-':\n                arvrunner.api.users().get(uuid=runtimeContext.project_uuid).execute()\n            else:\n                proj = arvrunner.api.groups().get(uuid=runtimeContext.project_uuid).execute()\n                if proj[\"group_class\"] != \"project\":\n                    raise Exception(\"not a project, group_class is '%s'\" % (proj[\"group_class\"]))\n        except Exception as e:\n            raise WorkflowException(\"Invalid project uuid '%s': %s\" % (runtimeContext.project_uuid, e))\n\ndef set_cluster_target(tool, arvrunner, builder, runtimeContext):\n    cluster_target_req = None\n    for field in (\"hints\", \"requirements\"):\n        if field not in tool:\n            continue\n        for item in tool[field]:\n            if item[\"class\"] == \"http://arvados.org/cwl#ClusterTarget\":\n                cluster_target_req = item\n\n    if cluster_target_req is None:\n        return runtimeContext\n\n    with SourceLine(cluster_target_req, None, WorkflowException, runtimeContext.debug):\n        runtimeContext = runtimeContext.copy()\n        runtimeContext.submit_runner_cluster = builder.do_eval(cluster_target_req.get(\"cluster_id\")) or runtimeContext.submit_runner_cluster\n        runtimeContext.project_uuid = builder.do_eval(cluster_target_req.get(\"project_uuid\")) or runtimeContext.project_uuid\n        validate_cluster_target(arvrunner, runtimeContext)\n\n    return runtimeContext\n\n\nclass ArvadosCommandTool(CommandLineTool):\n    \"\"\"Wrap cwltool CommandLineTool to override selected methods.\"\"\"\n\n    def __init__(self, arvrunner, toolpath_object, loadingContext):\n        super(ArvadosCommandTool, self).__init__(toolpath_object, loadingContext)\n\n        (docker_req, docker_is_req) = self.get_requirement(\"DockerRequirement\")\n        if not docker_req:\n            if portable_data_hash_pattern.match(loadingContext.default_docker_image):\n                self.hints.append({\"class\": \"DockerRequirement\",\n                                   \"http://arvados.org/cwl#dockerCollectionPDH\": loadingContext.default_docker_image})\n            else:\n                self.hints.append({\"class\": \"DockerRequirement\",\n                                   \"dockerPull\": loadingContext.default_docker_image})\n\n        self.arvrunner = arvrunner\n        self.globpatterns = []\n        self._collect_globs(toolpath_object[\"outputs\"])\n\n    def _collect_globs(self, inputschema):\n        if isinstance(inputschema, str):\n            return\n\n        if isinstance(inputschema, Sequence):\n            for i in inputschema:\n                self._collect_globs(i)\n\n        if isinstance(inputschema, Mapping):\n            if \"type\" in inputschema:\n                self._collect_globs(inputschema[\"type\"])\n                if inputschema[\"type\"] == \"record\":\n                    for field in inputschema[\"fields\"]:\n                        self._collect_globs(field)\n\n            if \"outputBinding\" in inputschema and \"glob\" in inputschema[\"outputBinding\"]:\n                for gb in aslist(inputschema[\"outputBinding\"][\"glob\"]):\n                    self.globpatterns.append(gb)\n                if \"secondaryFiles\" in inputschema:\n                    for sf in aslist(inputschema[\"secondaryFiles\"]):\n                        for gb in aslist(inputschema[\"outputBinding\"][\"glob\"]):\n                            self.globpatterns.append({\"pattern\": sf[\"pattern\"], \"glob\": gb})\n\n    def make_job_runner(self, runtimeContext):\n        if runtimeContext.work_api == \"containers\":\n            return partial(ArvadosContainer, self.arvrunner, runtimeContext, self.globpatterns)\n        else:\n            raise Exception(\"Unsupported work_api %s\", runtimeContext.work_api)\n\n    def make_path_mapper(self, reffiles, stagedir, runtimeContext, separateDirs):\n        if runtimeContext.work_api == \"containers\":\n            return ArvPathMapper(self.arvrunner, reffiles+runtimeContext.extra_reffiles, runtimeContext.basedir,\n                                 \"/keep/%s\",\n                                 \"/keep/%s/%s\")\n\n    def job(self, joborder, output_callback, runtimeContext):\n        builder = make_builder(joborder, self.hints, self.requirements, runtimeContext, self.metadata)\n        runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)\n\n        if runtimeContext.work_api == \"containers\":\n            dockerReq, is_req = self.get_requirement(\"DockerRequirement\")\n            if dockerReq and dockerReq.get(\"dockerOutputDirectory\"):\n                runtimeContext.outdir = dockerReq.get(\"dockerOutputDirectory\")\n                runtimeContext.docker_outdir = dockerReq.get(\"dockerOutputDirectory\")\n            else:\n                runtimeContext.outdir = \"/var/spool/cwl\"\n                runtimeContext.docker_outdir = \"/var/spool/cwl\"\n        return super(ArvadosCommandTool, self).job(joborder, output_callback, runtimeContext)\n\nclass ArvadosExpressionTool(ExpressionTool):\n    def __init__(self, arvrunner, toolpath_object, loadingContext):\n        super(ArvadosExpressionTool, self).__init__(toolpath_object, loadingContext)\n        self.arvrunner = arvrunner\n\n    def job(self,\n            job_order,         # type: Mapping[Text, Text]\n            output_callback,  # type: Callable[[Any, Any], Any]\n            runtimeContext     # type: RuntimeContext\n           ):\n        return super(ArvadosExpressionTool, self).job(job_order, self.arvrunner.get_wrapped_callback(output_callback), runtimeContext)\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/arvworkflow.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport json\nimport copy\nimport logging\nimport urllib\nimport sys\nimport re\n\nfrom io import StringIO\nfrom typing import (MutableSequence, MutableMapping)\n\nfrom ruamel.yaml import YAML\nfrom ruamel.yaml.comments import CommentedMap, CommentedSeq\n\nfrom schema_salad.sourceline import SourceLine, cmap\nimport schema_salad.ref_resolver\n\nimport arvados.collection\n\nfrom cwltool.pack import pack\nfrom cwltool.load_tool import fetch_document, resolve_and_validate_document\nfrom cwltool.process import shortname, uniquename\nfrom cwltool.workflow import Workflow, WorkflowException, WorkflowStep\nfrom cwltool.utils import adjustFileObjs, adjustDirObjs, visit_class, normalizeFilesDirs\nfrom cwltool.context import LoadingContext, getdefault\n\nfrom schema_salad.ref_resolver import file_uri, uri_file_path\n\nimport ruamel.yaml as yaml\n\nfrom .runner import (upload_dependencies, packed_workflow, upload_workflow_collection,\n                     trim_anonymous_location, remove_redundant_fields, discover_secondary_files,\n                     make_builder, arvados_jobs_image, FileUpdates)\nfrom .arvcontainer import RunnerContainer\nfrom .pathmapper import ArvPathMapper, trim_listing\nfrom .arvtool import ArvadosCommandTool, set_cluster_target\nfrom ._version import __version__\nfrom .util import common_prefix\nfrom .arvdocker import arv_docker_get_image\n\nfrom .perf import Perf\n\nlogger = logging.getLogger('arvados.cwl-runner')\nmetrics = logging.getLogger('arvados.cwl-runner.metrics')\n\nmax_res_pars = (\"coresMin\", \"coresMax\", \"ramMin\", \"ramMax\", \"tmpdirMin\", \"tmpdirMax\")\nsum_res_pars = (\"outdirMin\", \"outdirMax\")\n\n_basetype_re = re.compile(r'''(?:\nDirectory\n|File\n|array\n|boolean\n|double\n|enum\n|float\n|int\n|long\n|null\n|record\n|string\n)(?:\\[\\])?\\??''', re.VERBOSE)\n\ndef make_wrapper_workflow(arvRunner, main, packed, project_uuid, name, git_info, tool):\n    col = arvados.collection.Collection(api_client=arvRunner.api,\n                                        keep_client=arvRunner.keep_client)\n\n    with col.open(\"workflow.json\", \"wt\") as f:\n        json.dump(packed, f, sort_keys=True, indent=4, separators=(',',': '))\n\n    pdh = col.portable_data_hash()\n\n    toolname = tool.tool.get(\"label\") or tool.metadata.get(\"label\") or os.path.basename(tool.tool[\"id\"])\n    if git_info and git_info.get(\"http://arvados.org/cwl#gitDescribe\"):\n        toolname = \"%s (%s)\" % (toolname, git_info.get(\"http://arvados.org/cwl#gitDescribe\"))\n\n    existing = arvRunner.api.collections().list(filters=[[\"portable_data_hash\", \"=\", pdh], [\"owner_uuid\", \"=\", project_uuid]]).execute(num_retries=arvRunner.num_retries)\n    if len(existing[\"items\"]) == 0:\n        col.save_new(name=toolname, owner_uuid=project_uuid, ensure_unique_name=True)\n\n    # now construct the wrapper\n\n    step = {\n        \"id\": \"#main/\" + toolname,\n        \"in\": [],\n        \"out\": [],\n        \"run\": \"keep:%s/workflow.json#main\" % pdh,\n        \"label\": name\n    }\n\n    newinputs = []\n    for i in main[\"inputs\"]:\n        inp = {}\n        # Make sure to only copy known fields that are meaningful at\n        # the workflow level. In practice this ensures that if we're\n        # wrapping a CommandLineTool we don't grab inputBinding.\n        # Right now also excludes extension fields, which is fine,\n        # Arvados doesn't currently look for any extension fields on\n        # input parameters.\n        for f in (\"type\", \"label\", \"secondaryFiles\", \"streamable\",\n                  \"doc\", \"id\", \"format\", \"loadContents\",\n                  \"loadListing\", \"default\"):\n            if f in i:\n                inp[f] = i[f]\n        newinputs.append(inp)\n\n    wrapper = {\n        \"class\": \"Workflow\",\n        \"id\": \"#main\",\n        \"inputs\": newinputs,\n        \"outputs\": [],\n        \"steps\": [step]\n    }\n\n    for i in main[\"inputs\"]:\n        step[\"in\"].append({\n            \"id\": \"#main/step/%s\" % shortname(i[\"id\"]),\n            \"source\": i[\"id\"]\n        })\n\n    for i in main[\"outputs\"]:\n        step[\"out\"].append({\"id\": \"#main/step/%s\" % shortname(i[\"id\"])})\n        wrapper[\"outputs\"].append({\"outputSource\": \"#main/step/%s\" % shortname(i[\"id\"]),\n                                   \"type\": i[\"type\"],\n                                   \"id\": i[\"id\"]})\n\n    wrapper[\"requirements\"] = [{\"class\": \"SubworkflowFeatureRequirement\"}]\n\n    if main.get(\"requirements\"):\n        wrapper[\"requirements\"].extend(main[\"requirements\"])\n    if main.get(\"hints\"):\n        wrapper[\"hints\"] = main[\"hints\"]\n\n    doc = {\"cwlVersion\": \"v1.2\", \"$graph\": [wrapper]}\n\n    if git_info:\n        for g in git_info:\n            doc[g] = git_info[g]\n\n    return json.dumps(doc, sort_keys=True, indent=4, separators=(',',': '))\n\n\ndef rel_ref(s, baseuri, urlexpander, merged_map, jobmapper):\n    if s.startswith(\"keep:\") or s.startswith(\"arvwf:\"):\n        return s\n\n    uri = urlexpander(s, baseuri)\n\n    if uri.startswith(\"keep:\"):\n        return uri\n\n    fileuri = urllib.parse.urldefrag(baseuri)[0]\n\n    for u in (baseuri, fileuri):\n        if u in merged_map:\n            replacements = merged_map[u].resolved\n            if uri in replacements:\n                return replacements[uri]\n\n    if uri in jobmapper:\n        return jobmapper.mapper(uri).target\n\n    p1 = os.path.dirname(uri_file_path(fileuri))\n    p2 = os.path.dirname(uri_file_path(uri))\n    p3 = os.path.basename(uri_file_path(uri))\n\n    r = os.path.relpath(p2, p1)\n    if r == \".\":\n        r = \"\"\n\n    return os.path.join(r, p3)\n\ndef is_basetype(tp):\n    return _basetype_re.match(tp) is not None\n\ndef update_refs(api, d, baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix):\n    if isinstance(d, MutableSequence):\n        for i, s in enumerate(d):\n            if prefix and isinstance(s, str):\n                if s.startswith(prefix):\n                    d[i] = replacePrefix+s[len(prefix):]\n            else:\n                update_refs(api, s, baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)\n    elif isinstance(d, MutableMapping):\n        for field in (\"id\", \"name\"):\n            if isinstance(d.get(field), str) and d[field].startswith(\"_:\"):\n                # blank node reference, was added in automatically, can get rid of it.\n                del d[field]\n\n        if \"id\" in d:\n            baseuri = urlexpander(d[\"id\"], baseuri, scoped_id=True)\n        elif \"name\" in d and isinstance(d[\"name\"], str):\n            baseuri = urlexpander(d[\"name\"], baseuri, scoped_id=True)\n\n        if d.get(\"class\") == \"DockerRequirement\":\n            d[\"http://arvados.org/cwl#dockerCollectionPDH\"] = arv_docker_get_image(api, d, False,\n                                                                                   runtimeContext)\n\n        for field in d:\n            if field in (\"location\", \"run\", \"name\") and isinstance(d[field], str):\n                d[field] = rel_ref(d[field], baseuri, urlexpander, merged_map, jobmapper)\n                continue\n\n            if field in (\"$include\", \"$import\") and isinstance(d[field], str):\n                d[field] = rel_ref(d[field], baseuri, urlexpander, {}, jobmapper)\n                continue\n\n            for t in (\"type\", \"items\"):\n                if (field == t and\n                    isinstance(d[t], str) and\n                    not is_basetype(d[t])):\n                    d[t] = rel_ref(d[t], baseuri, urlexpander, merged_map, jobmapper)\n                    continue\n\n            if field == \"inputs\" and isinstance(d[\"inputs\"], MutableMapping):\n                for inp in d[\"inputs\"]:\n                    if isinstance(d[\"inputs\"][inp], str) and not is_basetype(d[\"inputs\"][inp]):\n                        d[\"inputs\"][inp] = rel_ref(d[\"inputs\"][inp], baseuri, urlexpander, merged_map, jobmapper)\n                    if isinstance(d[\"inputs\"][inp], MutableMapping):\n                        update_refs(api, d[\"inputs\"][inp], baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)\n                continue\n\n            if field in (\"requirements\", \"hints\") and isinstance(d[field], MutableMapping):\n                dr = d[field].get(\"DockerRequirement\")\n                if dr:\n                    dr[\"http://arvados.org/cwl#dockerCollectionPDH\"] = arv_docker_get_image(api, dr, False,\n                                                                                            runtimeContext)\n\n            if field == \"$schemas\":\n                for n, s in enumerate(d[\"$schemas\"]):\n                    d[\"$schemas\"][n] = rel_ref(d[\"$schemas\"][n], baseuri, urlexpander, merged_map, jobmapper)\n                continue\n\n            update_refs(api, d[field], baseuri, urlexpander, merged_map, jobmapper, runtimeContext, prefix, replacePrefix)\n\n\ndef fix_schemadef(req, baseuri, urlexpander, merged_map, jobmapper, pdh):\n    req = copy.deepcopy(req)\n\n    for f in req[\"types\"]:\n        r = f[\"name\"]\n        path, frag = urllib.parse.urldefrag(r)\n        rel = rel_ref(r, baseuri, urlexpander, merged_map, jobmapper)\n        merged_map.setdefault(path, FileUpdates({}, {}))\n        rename = \"keep:%s/%s\" %(pdh, rel)\n        for mm in merged_map:\n            merged_map[mm].resolved[r] = rename\n    return req\n\n\ndef drop_ids(d):\n    if isinstance(d, MutableSequence):\n        for i, s in enumerate(d):\n            drop_ids(s)\n    elif isinstance(d, MutableMapping):\n        if \"id\" in d and d[\"id\"].startswith(\"file:\"):\n            del d[\"id\"]\n\n        for field in d:\n            drop_ids(d[field])\n\n\ndef upload_workflow(arvRunner, tool, job_order, project_uuid,\n                        runtimeContext,\n                        uuid=None,\n                        submit_runner_ram=0, name=None, merged_map=None,\n                        submit_runner_image=None,\n                        git_info=None,\n                        set_defaults=False,\n                        jobmapper=None):\n\n    firstfile = None\n    workflow_files = set()\n    import_files = set()\n    include_files = set()\n\n    # The document loader index will have entries for all the files\n    # that were loaded in the process of parsing the entire workflow\n    # (including subworkflows, tools, imports, etc).  We use this to\n    # get compose a list of the workflow file dependencies.\n    for w in tool.doc_loader.idx:\n        if w.startswith(\"file://\"):\n            workflow_files.add(urllib.parse.urldefrag(w)[0])\n            if firstfile is None:\n                firstfile = urllib.parse.urldefrag(w)[0]\n        if w.startswith(\"import:file://\"):\n            import_files.add(urllib.parse.urldefrag(w[7:])[0])\n        if w.startswith(\"include:file://\"):\n            include_files.add(urllib.parse.urldefrag(w[8:])[0])\n\n    all_files = workflow_files | import_files | include_files\n\n    # Find the longest common prefix among all the file names.  We'll\n    # use this to recreate the directory structure in a keep\n    # collection with correct relative references.\n    prefix = common_prefix(firstfile, all_files) if firstfile else \"\"\n\n\n    col = arvados.collection.Collection(api_client=arvRunner.api)\n\n    # Now go through all the files and update references to other\n    # files.  We previously scanned for file dependencies, these are\n    # are passed in as merged_map.\n    #\n    # note about merged_map: we upload dependencies of each process\n    # object (CommandLineTool/Workflow) to a separate collection.\n    # That way, when the user edits something, this limits collection\n    # PDH changes to just that tool, and minimizes situations where\n    # small changes break container reuse for the whole workflow.\n    #\n    for w in workflow_files | import_files:\n        # 1. load the YAML  file\n\n        text = tool.doc_loader.fetch_text(w)\n        if isinstance(text, bytes):\n            textIO = StringIO(str(text, 'utf-8'))\n        else:\n            textIO = StringIO(text)\n\n        yamlloader = schema_salad.utils.yaml_no_ts()\n        result = yamlloader.load(textIO)\n\n        # If the whole document is in \"flow style\" it is probably JSON\n        # formatted.  We'll re-export it as JSON because the\n        # ruamel.yaml round-trip mode is a lie and only preserves\n        # \"block style\" formatting and not \"flow style\" formatting.\n        export_as_json = result.fa.flow_style()\n\n        # 2. find $import, $include, $schema, run, location\n        # 3. update field value\n        update_refs(arvRunner.api, result, w, tool.doc_loader.expand_url, merged_map, jobmapper, runtimeContext, \"\", \"\")\n\n        # Write the updated file to the collection.\n        with col.open(w[len(prefix):], \"wt\") as f:\n            if export_as_json:\n                json.dump(result, f, indent=4, separators=(',',': '))\n            else:\n                yamlloader.dump(result, stream=f)\n\n        # Also store a verbatim copy of the original files\n        with col.open(os.path.join(\"original\", w[len(prefix):]), \"wt\") as f:\n            f.write(text)\n\n\n    # Upload files referenced by $include directives, these are used\n    # unchanged and don't need to be updated.\n    for w in include_files:\n        with col.open(w[len(prefix):], \"wb\") as f1:\n            with col.open(os.path.join(\"original\", w[len(prefix):]), \"wb\") as f3:\n                with open(uri_file_path(w), \"rb\") as f2:\n                    dat = f2.read(65536)\n                    while dat:\n                        f1.write(dat)\n                        f3.write(dat)\n                        dat = f2.read(65536)\n\n    # Now collect metadata: the collection name and git properties.\n\n    toolname = tool.tool.get(\"label\") or tool.metadata.get(\"label\") or os.path.basename(tool.tool[\"id\"])\n    if git_info and git_info.get(\"http://arvados.org/cwl#gitDescribe\"):\n        toolname = \"%s (%s)\" % (toolname, git_info.get(\"http://arvados.org/cwl#gitDescribe\"))\n\n    toolfile = tool.tool[\"id\"][len(prefix):]\n\n    properties = {\n        \"type\": \"workflow\",\n        \"arv:workflowMain\": toolfile,\n    }\n\n    if git_info:\n        for g in git_info:\n            p = g.split(\"#\", 1)[1]\n            properties[\"arv:\"+p] = git_info[g]\n\n    # Check if a collection with the same content already exists in the target project.  If so, just use that one.\n    existing = arvRunner.api.collections().list(filters=[[\"portable_data_hash\", \"=\", col.portable_data_hash()],\n                                                         [\"owner_uuid\", \"=\", arvRunner.project_uuid]]).execute(num_retries=arvRunner.num_retries)\n\n    if len(existing[\"items\"]) == 0:\n        toolname = toolname.replace(\"/\", \" \")\n        col.save_new(name=toolname, owner_uuid=arvRunner.project_uuid, ensure_unique_name=True, properties=properties)\n        logger.info(\"Workflow uploaded to %s\", col.manifest_locator())\n    else:\n        logger.info(\"Workflow uploaded to %s\", existing[\"items\"][0][\"uuid\"])\n\n    # Now that we've updated the workflow and saved it to a\n    # collection, we're going to construct a minimal \"wrapper\"\n    # workflow which consists of only of input and output parameters\n    # connected to a single step that runs the real workflow.\n\n    runfile = \"keep:%s/%s\" % (col.portable_data_hash(), toolfile)\n\n    step = {\n        \"id\": \"#main/\" + toolname,\n        \"in\": [],\n        \"out\": [],\n        \"run\": runfile,\n        \"label\": name\n    }\n\n    main = tool.tool\n\n    wf_runner_resources = None\n\n    hints = main.get(\"hints\", [])\n    found = False\n    for h in hints:\n        if h[\"class\"] == \"http://arvados.org/cwl#WorkflowRunnerResources\":\n            wf_runner_resources = h\n            found = True\n            break\n    if not found:\n        wf_runner_resources = {\"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"}\n        hints.append(wf_runner_resources)\n\n    if \"acrContainerImage\" not in wf_runner_resources:\n        wf_runner_resources[\"acrContainerImage\"] = arvados_jobs_image(arvRunner,\n                                                                      submit_runner_image or \"arvados/jobs:\"+__version__,\n                                                                      runtimeContext)\n\n    if submit_runner_ram:\n        wf_runner_resources[\"ramMin\"] = submit_runner_ram\n\n    # Remove a few redundant fields from the \"job order\" (aka input\n    # object or input parameters).  In the situation where we're\n    # creating or updating a workflow record, any values in the job\n    # order get copied over as default values for input parameters.\n    adjustDirObjs(job_order, trim_listing)\n    adjustFileObjs(job_order, trim_anonymous_location)\n    adjustDirObjs(job_order, trim_anonymous_location)\n\n    newinputs = []\n    for i in main[\"inputs\"]:\n        inp = {}\n        # Make sure to only copy known fields that are meaningful at\n        # the workflow level. In practice this ensures that if we're\n        # wrapping a CommandLineTool we don't grab inputBinding.\n        # Right now also excludes extension fields, which is fine,\n        # Arvados doesn't currently look for any extension fields on\n        # input parameters.\n        for f in (\"type\", \"label\", \"secondaryFiles\", \"streamable\",\n                  \"doc\", \"format\", \"loadContents\",\n                  \"loadListing\", \"default\"):\n            if f in i:\n                inp[f] = i[f]\n\n        if set_defaults:\n            sn = shortname(i[\"id\"])\n            if sn in job_order:\n                inp[\"default\"] = job_order[sn]\n\n        inp[\"id\"] = \"#main/%s\" % shortname(i[\"id\"])\n        newinputs.append(inp)\n\n    wrapper = {\n        \"class\": \"Workflow\",\n        \"id\": \"#main\",\n        \"inputs\": newinputs,\n        \"outputs\": [],\n        \"steps\": [step]\n    }\n\n    for i in main[\"inputs\"]:\n        step[\"in\"].append({\n            \"id\": \"#main/step/%s\" % shortname(i[\"id\"]),\n            \"source\": \"#main/%s\" % shortname(i[\"id\"])\n        })\n\n    for i in main[\"outputs\"]:\n        step[\"out\"].append({\"id\": \"#main/step/%s\" % shortname(i[\"id\"])})\n        wrapper[\"outputs\"].append({\"outputSource\": \"#main/step/%s\" % shortname(i[\"id\"]),\n                                   \"type\": i[\"type\"],\n                                   \"id\": \"#main/%s\" % shortname(i[\"id\"])})\n\n    wrapper[\"requirements\"] = [{\"class\": \"SubworkflowFeatureRequirement\"}]\n\n    if main.get(\"requirements\"):\n        wrapper[\"requirements\"].extend(main[\"requirements\"])\n    if hints:\n        wrapper[\"hints\"] = hints\n\n    # Schema definitions (this lets you define things like record\n    # types) require a special handling.\n\n    for i, r in enumerate(wrapper[\"requirements\"]):\n        if r[\"class\"] == \"SchemaDefRequirement\":\n            wrapper[\"requirements\"][i] = fix_schemadef(r, main[\"id\"], tool.doc_loader.expand_url, merged_map, jobmapper, col.portable_data_hash())\n\n    update_refs(arvRunner.api, wrapper, main[\"id\"], tool.doc_loader.expand_url, merged_map, jobmapper, runtimeContext, main[\"id\"]+\"#\", \"#main/\")\n\n    doc = {\"cwlVersion\": \"v1.2\", \"$graph\": [wrapper]}\n\n    if git_info:\n        for g in git_info:\n            doc[g] = git_info[g]\n\n    # Remove any lingering file references.\n    drop_ids(wrapper)\n\n    return doc\n\n\ndef make_workflow_record(arvRunner, doc, name, tool, project_uuid, update_uuid):\n\n    wrappertext = json.dumps(doc, sort_keys=True, indent=4, separators=(',',': '))\n\n    body = {\n        \"workflow\": {\n            \"name\": name,\n            \"description\": tool.tool.get(\"doc\", \"\"),\n            \"definition\": wrappertext\n        }}\n    if project_uuid:\n        body[\"workflow\"][\"owner_uuid\"] = project_uuid\n\n    if update_uuid:\n        call = arvRunner.api.workflows().update(uuid=update_uuid, body=body)\n    else:\n        call = arvRunner.api.workflows().create(body=body)\n    return call.execute(num_retries=arvRunner.num_retries)[\"uuid\"]\n\n\ndef dedup_reqs(reqs):\n    dedup = {}\n    for r in reversed(reqs):\n        if r[\"class\"] not in dedup and not r[\"class\"].startswith(\"http://arvados.org/cwl#\"):\n            dedup[r[\"class\"]] = r\n    return [dedup[r] for r in sorted(dedup.keys())]\n\ndef get_overall_res_req(res_reqs):\n    \"\"\"Take the overall of a list of ResourceRequirement,\n    i.e., the max of coresMin, coresMax, ramMin, ramMax, tmpdirMin, tmpdirMax\n    and the sum of outdirMin, outdirMax.\"\"\"\n\n    all_res_req = {}\n    exception_msgs = []\n    for a in max_res_pars + sum_res_pars:\n        all_res_req[a] = []\n        for res_req in res_reqs:\n            if a in res_req:\n                if isinstance(res_req[a], int): # integer check\n                    all_res_req[a].append(res_req[a])\n                else:\n                    msg = SourceLine(res_req, a).makeError(\n                    \"Non-top-level ResourceRequirement in single container cannot have expressions\")\n                    exception_msgs.append(msg)\n    if exception_msgs:\n        raise WorkflowException(\"\\n\".join(exception_msgs))\n    else:\n        overall_res_req = {}\n        for a in all_res_req:\n            if all_res_req[a]:\n                if a in max_res_pars:\n                    overall_res_req[a] = max(all_res_req[a])\n                elif a in sum_res_pars:\n                    overall_res_req[a] = sum(all_res_req[a])\n        if overall_res_req:\n            overall_res_req[\"class\"] = \"ResourceRequirement\"\n        return cmap(overall_res_req)\n\nclass ArvadosWorkflowStep(WorkflowStep):\n    def __init__(self,\n                 toolpath_object,      # type: Dict[Text, Any]\n                 pos,                  # type: int\n                 loadingContext,       # type: LoadingContext\n                 arvrunner,\n                 *argc,\n                 **argv\n                ):  # type: (...) -> None\n\n        if arvrunner.fast_submit:\n            self.tool = toolpath_object\n            self.tool[\"inputs\"] = []\n            self.tool[\"outputs\"] = []\n        else:\n            super(ArvadosWorkflowStep, self).__init__(toolpath_object, pos, loadingContext, *argc, **argv)\n            self.tool[\"class\"] = \"WorkflowStep\"\n        self.arvrunner = arvrunner\n\n    def job(self, joborder, output_callback, runtimeContext):\n        runtimeContext = runtimeContext.copy()\n        runtimeContext.toplevel = True  # Preserve behavior for #13365\n\n        builder = make_builder({shortname(k): v for k, v in joborder.items()}, self.hints, self.requirements,\n                               runtimeContext, self.metadata)\n        runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)\n        return super(ArvadosWorkflowStep, self).job(joborder, output_callback, runtimeContext)\n\n\nclass ArvadosWorkflow(Workflow):\n    \"\"\"Wrap cwltool Workflow to override selected methods.\"\"\"\n\n    def __init__(self, arvrunner, toolpath_object, loadingContext):\n        self.arvrunner = arvrunner\n        self.wf_pdh = None\n        self.dynamic_resource_req = []\n        self.static_resource_req = []\n        self.wf_reffiles = []\n        self.loadingContext = loadingContext.copy()\n\n        self.requirements = copy.deepcopy(getdefault(loadingContext.requirements, []))\n        tool_requirements = toolpath_object.get(\"requirements\", [])\n        self.hints = copy.deepcopy(getdefault(loadingContext.hints, []))\n        tool_hints = toolpath_object.get(\"hints\", [])\n\n        workflow_runner_req, _ = self.get_requirement(\"http://arvados.org/cwl#WorkflowRunnerResources\")\n        if workflow_runner_req and workflow_runner_req.get(\"acrContainerImage\"):\n            self.loadingContext.default_docker_image = workflow_runner_req.get(\"acrContainerImage\")\n\n        super(ArvadosWorkflow, self).__init__(toolpath_object, self.loadingContext)\n        self.cluster_target_req, _ = self.get_requirement(\"http://arvados.org/cwl#ClusterTarget\")\n\n\n    def runInSingleContainer(self, joborder, output_callback, runtimeContext, builder):\n        with SourceLine(self.tool, None, WorkflowException, logger.isEnabledFor(logging.DEBUG)):\n            if \"id\" not in self.tool:\n                raise WorkflowException(\"%s object must have 'id'\" % (self.tool[\"class\"]))\n\n        discover_secondary_files(self.arvrunner.fs_access, builder,\n                                 self.tool[\"inputs\"], joborder)\n\n        normalizeFilesDirs(joborder)\n\n        with Perf(metrics, \"subworkflow upload_deps\"):\n            upload_dependencies(self.arvrunner,\n                                os.path.basename(joborder.get(\"id\", \"#\")),\n                                self.doc_loader,\n                                joborder,\n                                joborder.get(\"id\", \"#\"),\n                                runtimeContext)\n\n            if self.wf_pdh is None:\n                packed = pack(self.loadingContext, self.tool[\"id\"], loader=self.doc_loader)\n\n                for p in packed[\"$graph\"]:\n                    if p[\"id\"] == \"#main\":\n                        p[\"requirements\"] = dedup_reqs(self.requirements)\n                        p[\"hints\"] = dedup_reqs(self.hints)\n\n                def visit(item):\n                    if \"requirements\" in item:\n                        item[\"requirements\"] = [i for i in item[\"requirements\"] if i[\"class\"] != \"DockerRequirement\"]\n                    for t in (\"hints\", \"requirements\"):\n                        if t not in item:\n                            continue\n                        for req in item[t]:\n                            if req[\"class\"] == \"ResourceRequirement\":\n                                dyn = False\n                                for k in max_res_pars + sum_res_pars:\n                                    if k in req:\n                                        if isinstance(req[k], str):\n                                            if item[\"id\"] == \"#main\":\n                                                # only the top-level requirements/hints may contain expressions\n                                                self.dynamic_resource_req.append(req)\n                                                dyn = True\n                                                break\n                                            else:\n                                                with SourceLine(req, k, WorkflowException):\n                                                    raise WorkflowException(\"Non-top-level ResourceRequirement in single container cannot have expressions\")\n                                if not dyn:\n                                    self.static_resource_req.append(req)\n\n                visit_class(packed[\"$graph\"], (\"Workflow\", \"CommandLineTool\"), visit)\n\n                if self.static_resource_req:\n                    self.static_resource_req = [get_overall_res_req(self.static_resource_req)]\n\n                upload_dependencies(self.arvrunner,\n                                    runtimeContext.name,\n                                    self.doc_loader,\n                                    packed,\n                                    self.tool[\"id\"],\n                                    runtimeContext)\n\n                # Discover files/directories referenced by the\n                # workflow (mainly \"default\" values)\n                visit_class(packed, (\"File\", \"Directory\"), self.wf_reffiles.append)\n\n\n        if self.dynamic_resource_req:\n            # Evaluate dynamic resource requirements using current builder\n            rs = copy.copy(self.static_resource_req)\n            for dyn_rs in self.dynamic_resource_req:\n                eval_req = {\"class\": \"ResourceRequirement\"}\n                for a in max_res_pars + sum_res_pars:\n                    if a in dyn_rs:\n                        eval_req[a] = builder.do_eval(dyn_rs[a])\n                rs.append(eval_req)\n            job_res_reqs = [get_overall_res_req(rs)]\n        else:\n            job_res_reqs = self.static_resource_req\n\n        with Perf(metrics, \"subworkflow adjust\"):\n            joborder_resolved = copy.deepcopy(joborder)\n            joborder_keepmount = copy.deepcopy(joborder)\n\n            reffiles = []\n            visit_class(joborder_keepmount, (\"File\", \"Directory\"), reffiles.append)\n\n            mapper = ArvPathMapper(self.arvrunner, reffiles+self.wf_reffiles, runtimeContext.basedir,\n                                   \"/keep/%s\",\n                                   \"/keep/%s/%s\")\n\n            # For containers API, we need to make sure any extra\n            # referenced files (ie referenced by the workflow but\n            # not in the inputs) are included in the mounts.\n            if self.wf_reffiles:\n                runtimeContext = runtimeContext.copy()\n                runtimeContext.extra_reffiles = copy.deepcopy(self.wf_reffiles)\n\n            def keepmount(obj):\n                remove_redundant_fields(obj)\n                with SourceLine(obj, None, WorkflowException, logger.isEnabledFor(logging.DEBUG)):\n                    if \"location\" not in obj:\n                        raise WorkflowException(\"%s object is missing required 'location' field: %s\" % (obj[\"class\"], obj))\n                with SourceLine(obj, \"location\", WorkflowException, logger.isEnabledFor(logging.DEBUG)):\n                    if obj[\"location\"].startswith(\"keep:\"):\n                        obj[\"location\"] = mapper.mapper(obj[\"location\"]).target\n                        if \"listing\" in obj:\n                            del obj[\"listing\"]\n                    elif obj[\"location\"].startswith(\"_:\"):\n                        del obj[\"location\"]\n                    else:\n                        raise WorkflowException(\"Location is not a keep reference or a literal: '%s'\" % obj[\"location\"])\n\n            visit_class(joborder_keepmount, (\"File\", \"Directory\"), keepmount)\n\n            def resolved(obj):\n                if obj[\"location\"].startswith(\"keep:\"):\n                    obj[\"location\"] = mapper.mapper(obj[\"location\"]).resolved\n\n            visit_class(joborder_resolved, (\"File\", \"Directory\"), resolved)\n\n            if self.wf_pdh is None:\n                adjustFileObjs(packed, keepmount)\n                adjustDirObjs(packed, keepmount)\n                self.wf_pdh = upload_workflow_collection(self.arvrunner, shortname(self.tool[\"id\"]), packed, runtimeContext)\n\n        self.loadingContext = self.loadingContext.copy()\n        self.loadingContext.metadata = self.loadingContext.metadata.copy()\n        self.loadingContext.metadata[\"http://commonwl.org/cwltool#original_cwlVersion\"] = \"v1.0\"\n\n        if len(job_res_reqs) == 1:\n            # RAM request needs to be at least 128 MiB or the workflow\n            # runner itself won't run reliably.\n            if job_res_reqs[0].get(\"ramMin\", 1024) < 128:\n                job_res_reqs[0][\"ramMin\"] = 128\n\n        arguments = [\"--no-container\", \"--move-outputs\", \"--preserve-entire-environment\", \"workflow.cwl\", \"cwl.input.yml\"]\n        if runtimeContext.debug:\n            arguments.insert(0, '--debug')\n\n        wf_runner = cmap({\n            \"class\": \"CommandLineTool\",\n            \"baseCommand\": \"cwltool\",\n            \"inputs\": self.tool[\"inputs\"],\n            \"outputs\": self.tool[\"outputs\"],\n            \"stdout\": \"cwl.output.json\",\n            \"requirements\": self.requirements+job_res_reqs+[\n                {\"class\": \"InlineJavascriptRequirement\"},\n                {\n                \"class\": \"InitialWorkDirRequirement\",\n                \"listing\": [{\n                        \"entryname\": \"workflow.cwl\",\n                        \"entry\": '$({\"class\": \"File\", \"location\": \"keep:%s/workflow.cwl\"})' % self.wf_pdh\n                    }, {\n                        \"entryname\": \"cwl.input.yml\",\n                        \"entry\": json.dumps(joborder_keepmount, indent=2, sort_keys=True, separators=(',',': ')).replace(\"\\\\\", \"\\\\\\\\\").replace('$(', '\\$(').replace('${', '\\${')\n                    }]\n            }],\n            \"hints\": self.hints,\n            \"arguments\": arguments,\n            \"id\": \"#\"\n        })\n        return ArvadosCommandTool(self.arvrunner, wf_runner, self.loadingContext).job(joborder_resolved, output_callback, runtimeContext)\n\n\n    def separateRunner(self, joborder, output_callback, runtimeContext, req, builder):\n\n        name = runtimeContext.name\n\n        rpn = req.get(\"runnerProcessName\")\n        if rpn:\n            name = builder.do_eval(rpn)\n\n        return RunnerContainer(self.arvrunner,\n                               self,\n                               self.loadingContext,\n                               runtimeContext.enable_reuse,\n                               None,\n                               None,\n                               submit_runner_ram=runtimeContext.submit_runner_ram,\n                               name=name,\n                               on_error=runtimeContext.on_error,\n                               submit_runner_image=runtimeContext.submit_runner_image,\n                               intermediate_output_ttl=runtimeContext.intermediate_output_ttl,\n                               merged_map=None,\n                               priority=runtimeContext.priority,\n                               secret_store=self.arvrunner.secret_store,\n                               collection_cache_size=runtimeContext.collection_cache_size,\n                               collection_cache_is_default=self.arvrunner.should_estimate_cache_size,\n                               git_info=runtimeContext.git_info,\n                               reuse_runner=True).job(joborder, output_callback, runtimeContext)\n\n\n    def job(self, joborder, output_callback, runtimeContext):\n\n        builder = make_builder(joborder, self.hints, self.requirements, runtimeContext, self.metadata)\n        runtimeContext = set_cluster_target(self.tool, self.arvrunner, builder, runtimeContext)\n\n        req, _ = self.get_requirement(\"http://arvados.org/cwl#RunInSingleContainer\")\n        if req:\n            return self.runInSingleContainer(joborder, output_callback, runtimeContext, builder)\n\n        req, _ = self.get_requirement(\"http://arvados.org/cwl#SeparateRunner\")\n        if req:\n            return self.separateRunner(joborder, output_callback, runtimeContext, req, builder)\n\n        return super(ArvadosWorkflow, self).job(joborder, output_callback, runtimeContext)\n\n\n    def make_workflow_step(self,\n                           toolpath_object,      # type: Dict[Text, Any]\n                           pos,                  # type: int\n                           loadingContext,       # type: LoadingContext\n                           *argc,\n                           **argv\n    ):\n        # (...) -> WorkflowStep\n        return ArvadosWorkflowStep(toolpath_object, pos, loadingContext, self.arvrunner, *argc, **argv)\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/context.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom cwltool.context import LoadingContext, RuntimeContext\nfrom collections import namedtuple\n\nclass ArvLoadingContext(LoadingContext):\n    def __init__(self, kwargs=None):\n        self.default_docker_image = None\n        super(ArvLoadingContext, self).__init__(kwargs)\n\nclass ArvRuntimeContext(RuntimeContext):\n    def __init__(self, kwargs=None):\n        self.work_api = None\n        self.extra_reffiles = []\n        self.priority = 500\n        self.enable_reuse = True\n        self.runnerjob = \"\"\n        self.submit_request_uuid = None\n        self.project_uuid = None\n        self.trash_intermediate = False\n        self.intermediate_output_ttl = 0\n        self.update_workflow = \"\"\n        self.create_workflow = False\n        self.submit_runner_ram = 0\n        self.ignore_docker_for_reuse = False\n        self.submit = True\n        self.submit_runner_image = None\n        self.wait = True\n        self.cwl_runner_job = None\n        self.storage_classes = []\n        self.intermediate_storage_classes = []\n        self.current_container = None\n        self.http_timeout = 300\n        self.submit_runner_cluster = None\n        self.cluster_target_id = 0\n        self.always_submit_runner = False\n        self.collection_cache_size = 256\n        self.match_local_docker = False\n        self.enable_preemptible = None\n        self.enable_resubmit_non_preemptible = None\n        self.copy_deps = None\n        self.defer_downloads = False\n        self.varying_url_params = \"\"\n        self.prefer_cached_downloads = False\n        self.cached_docker_lookups = {}\n        self.print_keep_deps = False\n        self.git_info = {}\n        self.enable_usage_report = None\n        self.usage_report_notes = []\n        self.aws_credential_capture = True\n        self.selected_credential = None\n        self.s3_public_bucket = False\n\n        super(ArvRuntimeContext, self).__init__(kwargs)\n\n        if self.submit_request_uuid:\n            self.submit_runner_cluster = self.submit_request_uuid[0:5]\n\n    def get_outdir(self) -> str:\n        \"\"\"Return self.outdir or create one with self.tmp_outdir_prefix.\"\"\"\n        return self.outdir\n\n    def get_tmpdir(self) -> str:\n        \"\"\"Return self.tmpdir or create one with self.tmpdir_prefix.\"\"\"\n        return self.tmpdir\n\n    def create_tmpdir(self) -> str:\n        \"\"\"Return self.tmpdir or create one with self.tmpdir_prefix.\"\"\"\n        return self.tmpdir\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/done.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport re\n\nfrom collections import deque\nfrom cwltool.errors import WorkflowException\n\ndef done(self, record, tmpdir, outdir, keepdir):\n    cols = [\n        (\"output\", \"Output %s of %s\" % (record[\"output\"][0:7], self.name), record[\"output\"]),\n        (\"log\", \"Log of %s\" % (record[\"uuid\"]), record[\"log\"])\n    ]\n\n    for coltype, colname, colpdh in cols:\n        # check if collection already exists with same owner, name and content\n        collection_exists = self.arvrunner.api.collections().list(\n            filters=[[\"owner_uuid\", \"=\", self.arvrunner.project_uuid],\n                     ['portable_data_hash', '=', colpdh],\n                     [\"name\", \"=\", colname]]\n        ).execute(num_retries=self.arvrunner.num_retries)\n\n        if not collection_exists[\"items\"]:\n            # Create a collection located in the same project as the\n            # pipeline with the contents of the output/log.\n            # First, get output/log record.\n            collections = self.arvrunner.api.collections().list(\n                limit=1,\n                filters=[['portable_data_hash', '=', colpdh]],\n                select=[\"manifest_text\"]\n            ).execute(num_retries=self.arvrunner.num_retries)\n\n            if not collections[\"items\"]:\n                raise WorkflowException(\n                    \"[job %s] %s '%s' cannot be found on API server\" % (\n                        self.name, coltype, colpdh))\n\n            # Create new collection in the parent project\n            # with the output/log contents.\n            self.arvrunner.api.collections().create(body={\n                \"owner_uuid\": self.arvrunner.project_uuid,\n                \"name\": colname,\n                \"portable_data_hash\": colpdh,\n                \"manifest_text\": collections[\"items\"][0][\"manifest_text\"]\n            }, ensure_unique_name=True).execute(\n                num_retries=self.arvrunner.num_retries)\n\n    return done_outputs(self, record, tmpdir, outdir, keepdir)\n\ndef done_outputs(self, record, tmpdir, outdir, keepdir):\n    self.builder.outdir = outdir\n    self.builder.pathmapper.keepdir = keepdir\n    return self.collect_outputs(\"keep:\" + record[\"output\"], record[\"exit_code\"])\n\ncrunchstat_re = re.compile(r\"^\\d{4}-\\d\\d-\\d\\d_\\d\\d:\\d\\d:\\d\\d [a-z0-9]{5}-8i9sb-[a-z0-9]{15} \\d+ \\d+ stderr crunchstat:\")\ntimestamp_re = re.compile(r\"^(\\d{4}-\\d\\d-\\d\\dT\\d\\d:\\d\\d:\\d\\d\\.\\d+Z) (.*)\")\n\ndef logtail(logcollection, logfunc, header, maxlen=25, include_crunchrun=True):\n    if len(logcollection) == 0:\n        logfunc(\"%s\\n%s\", header, \"  ** log is empty **\")\n        return\n\n    mergelogs = {}\n    logfiles = [\"stdout.txt\", \"stderr.txt\"]\n\n    if include_crunchrun:\n        logfiles.append(\"crunch-run.txt\")\n\n    for log in logfiles:\n        if log not in logcollection:\n            continue\n        logname = log[:-4]  # trim off the .txt\n        logt = deque([], maxlen)\n        mergelogs[logname] = logt\n        with logcollection.open(log, encoding=\"utf-8\") as f:\n            for l in f:\n                g = timestamp_re.match(l)\n                logt.append((g.group(1), g.group(2)))\n\n    keys = list(mergelogs)\n    loglines = []\n\n    # we assume the log lines are all in order so this this is a\n    # straight linear merge where we look at the next timestamp of\n    # each log and take whichever one is earliest.\n    while True:\n        earliest = None\n        for k in keys:\n            if mergelogs[k]:\n                if earliest is None or mergelogs[k][0][0] < mergelogs[earliest][0][0]:\n                    earliest = k\n        if earliest is None:\n            break\n        ts, msg = mergelogs[earliest].popleft()\n        loglines.append(\"%s %s %s\" % (ts, earliest, msg))\n    loglines = loglines[-maxlen:]\n\n    logtxt = \"\\n  \".join(l.strip() for l in loglines)\n    logfunc(\"%s\\n\\n  %s\\n\", header, logtxt)\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/executor.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport threading\nimport copy\nimport json\nimport re\nfrom functools import partial\nimport subprocess\nimport time\nimport urllib\n\nfrom cwltool.errors import WorkflowException\nimport cwltool.workflow\nfrom schema_salad.sourceline import SourceLine, cmap\nimport schema_salad.validate as validate\nfrom schema_salad.ref_resolver import file_uri, uri_file_path\n\nimport arvados\nimport arvados.config\nimport arvados.util\nfrom arvados.keep import KeepClient\nfrom arvados.errors import ApiError\n\nimport arvados_cwl.util\nfrom .arvcontainer import RunnerContainer, cleanup_name_for_collection\nfrom .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps, make_builder, update_from_merged_map, print_keep_deps, ArvSecretStore\nfrom .arvtool import ArvadosCommandTool, validate_cluster_target, ArvadosExpressionTool\nfrom .arvworkflow import ArvadosWorkflow, upload_workflow, make_workflow_record\nfrom .fsaccess import CollectionFsAccess, CollectionFetcher, collectionResolver, CollectionCache, pdh_size\nfrom .perf import Perf\nfrom .pathmapper import NoFollowPathMapper\nfrom cwltool.task_queue import TaskQueue\nfrom .context import ArvLoadingContext, ArvRuntimeContext\nfrom ._version import __version__\n\nfrom cwltool.process import shortname, UnsupportedRequirement, use_custom_schema\nfrom cwltool.utils import adjustFileObjs, adjustDirObjs, get_listing, visit_class, aslist\nfrom cwltool.command_line_tool import compute_checksums\nfrom cwltool.load_tool import load_tool\n\nlogger = logging.getLogger('arvados.cwl-runner')\nmetrics = logging.getLogger('arvados.cwl-runner.metrics')\n\nDEFAULT_PRIORITY = 500\n\nclass RuntimeStatusLoggingHandler(logging.Handler):\n    \"\"\"\n    Intercepts logging calls and report them as runtime statuses on runner\n    containers.\n    \"\"\"\n    def __init__(self, runtime_status_update_func):\n        super(RuntimeStatusLoggingHandler, self).__init__()\n        self.runtime_status_update = runtime_status_update_func\n        self.updatingRuntimeStatus = False\n\n    def emit(self, record):\n        kind = None\n        if record.levelno >= logging.ERROR:\n            kind = 'error'\n        elif record.levelno >= logging.WARNING:\n            kind = 'warning'\n        if kind == 'warning' and record.name in (\"salad\", \"crunchstat_summary\"):\n            # Don't send validation warnings to runtime status,\n            # they're noisy and unhelpful.\n            return\n        if kind is not None and self.updatingRuntimeStatus is not True:\n            self.updatingRuntimeStatus = True\n            try:\n                log_msg = record.getMessage()\n                if '\\n' in log_msg:\n                    # If the logged message is multi-line, use its first line as status\n                    # and the rest as detail.\n                    status, detail = log_msg.split('\\n', 1)\n                    self.runtime_status_update(\n                        kind,\n                        \"%s: %s\" % (record.name, status),\n                        detail\n                    )\n                else:\n                    self.runtime_status_update(\n                        kind,\n                        \"%s: %s\" % (record.name, record.getMessage())\n                    )\n            finally:\n                self.updatingRuntimeStatus = False\n\n\nclass ArvCwlExecutor(object):\n    \"\"\"Execute a CWL tool or workflow, submit work (using containers API),\n    wait for them to complete, and report output.\n\n    \"\"\"\n\n    def __init__(self, api_client,\n                 arvargs=None,\n                 keep_client=None,\n                 num_retries=4,\n                 thread_count=4,\n                 stdout=sys.stdout):\n\n        if arvargs is None:\n            arvargs = argparse.Namespace()\n            arvargs.work_api = None\n            arvargs.output_name = None\n            arvargs.output_tags = None\n            arvargs.thread_count = 1\n            arvargs.collection_cache_size = None\n            arvargs.git_info = True\n            arvargs.submit = False\n            arvargs.defer_downloads = False\n\n        self.api = api_client\n        self.processes = {}\n        self.workflow_eval_lock = threading.Condition(threading.RLock())\n        self.final_output = None\n        self.final_status = None\n        self.num_retries = num_retries\n        self.uuid = None\n        self.stop_polling = threading.Event()\n        self.poll_api = None\n        self.pipeline = None\n        self.final_output_collection = None\n        self.output_name = arvargs.output_name\n        self.output_tags = arvargs.output_tags\n        self.project_uuid = None\n        self.intermediate_output_ttl = 0\n        self.intermediate_output_collections = []\n        self.trash_intermediate = False\n        self.thread_count = arvargs.thread_count\n        self.poll_interval = 12\n        self.loadingContext = None\n        self.should_estimate_cache_size = True\n        self.fs_access = None\n        self.secret_store = None\n        self.stdout = stdout\n        self.fast_submit = False\n        self.git_info = arvargs.git_info\n        self.debug = False\n        self.botosession = None\n        self.selected_credential = None\n\n        if keep_client is not None:\n            self.keep_client = keep_client\n        else:\n            self.keep_client = arvados.keep.KeepClient(api_client=self.api, num_retries=self.num_retries)\n\n        if arvargs.collection_cache_size:\n            collection_cache_size = arvargs.collection_cache_size*1024*1024\n            self.should_estimate_cache_size = False\n        else:\n            collection_cache_size = 256*1024*1024\n\n        self.collection_cache = CollectionCache(self.api, self.keep_client, self.num_retries,\n                                                cap=collection_cache_size)\n\n        self.fetcher_constructor = partial(CollectionFetcher,\n                                           api_client=self.api,\n                                           fs_access=CollectionFsAccess(\"\", collection_cache=self.collection_cache),\n                                           num_retries=self.num_retries)\n\n        self.work_api = None\n        expected_api = [\"containers\"]\n        for api in expected_api:\n            try:\n                methods = self.api._rootDesc.get('resources')[api]['methods']\n                if ('httpMethod' in methods['create'] and\n                    (arvargs.work_api == api or arvargs.work_api is None)):\n                    self.work_api = api\n                    break\n            except KeyError:\n                pass\n\n        if not self.work_api:\n            if arvargs.work_api is None:\n                raise Exception(\"No supported APIs\")\n            else:\n                raise Exception(\"Unsupported API '%s', expected one of %s\" % (arvargs.work_api, expected_api))\n\n        if self.work_api == \"jobs\":\n            logger.error(\"\"\"\n*******************************\nThe 'jobs' API is no longer supported.\n*******************************\"\"\")\n            exit(1)\n\n        self.loadingContext = ArvLoadingContext(vars(arvargs))\n        self.loadingContext.fetcher_constructor = self.fetcher_constructor\n        self.loadingContext.resolver = partial(collectionResolver, self.api, num_retries=self.num_retries)\n        self.loadingContext.construct_tool_object = self.arv_make_tool\n\n        # Add a custom logging handler to the root logger for runtime status reporting\n        # if running inside a container\n        if arvados_cwl.util.get_current_container(self.api, self.num_retries, logger):\n            root_logger = logging.getLogger('')\n\n            # Remove existing RuntimeStatusLoggingHandlers if they exist\n            handlers = [h for h in root_logger.handlers if not isinstance(h, RuntimeStatusLoggingHandler)]\n            root_logger.handlers = handlers\n\n            handler = RuntimeStatusLoggingHandler(self.runtime_status_update)\n            root_logger.addHandler(handler)\n\n        self.toplevel_runtimeContext = ArvRuntimeContext(vars(arvargs))\n        self.toplevel_runtimeContext.make_fs_access = partial(CollectionFsAccess,\n                                                     collection_cache=self.collection_cache)\n        self.toplevel_runtimeContext.secret_store = ArvSecretStore()\n\n        self.defer_downloads = arvargs.submit and arvargs.defer_downloads\n\n        validate_cluster_target(self, self.toplevel_runtimeContext)\n\n\n    def arv_make_tool(self, toolpath_object, loadingContext):\n        if \"class\" in toolpath_object and toolpath_object[\"class\"] == \"CommandLineTool\":\n            return ArvadosCommandTool(self, toolpath_object, loadingContext)\n        elif \"class\" in toolpath_object and toolpath_object[\"class\"] == \"Workflow\":\n            return ArvadosWorkflow(self, toolpath_object, loadingContext)\n        elif \"class\" in toolpath_object and toolpath_object[\"class\"] == \"ExpressionTool\":\n            return ArvadosExpressionTool(self, toolpath_object, loadingContext)\n        else:\n            raise Exception(\"Unknown tool %s\" % toolpath_object.get(\"class\"))\n\n    def output_callback(self, out, processStatus):\n        with self.workflow_eval_lock:\n            if processStatus == \"success\":\n                logger.info(\"Overall process status is %s\", processStatus)\n                state = \"Complete\"\n            else:\n                logger.error(\"Overall process status is %s\", processStatus)\n                state = \"Failed\"\n            if self.pipeline:\n                self.api.pipeline_instances().update(uuid=self.pipeline[\"uuid\"],\n                                                        body={\"state\": state}).execute(num_retries=self.num_retries)\n            self.final_status = processStatus\n            self.final_output = out\n            self.workflow_eval_lock.notify_all()\n\n\n    def start_run(self, runnable, runtimeContext):\n        self.task_queue.add(partial(runnable.run, runtimeContext),\n                            self.workflow_eval_lock, self.stop_polling)\n\n    def process_submitted(self, container):\n        with self.workflow_eval_lock:\n            self.processes[container.uuid] = container\n\n    def process_done(self, uuid, record):\n        with self.workflow_eval_lock:\n            j = self.processes[uuid]\n            logger.info(\"%s %s is %s\", self.label(j), uuid, record[\"state\"])\n            self.task_queue.add(partial(j.done, record),\n                                self.workflow_eval_lock, self.stop_polling)\n            del self.processes[uuid]\n\n    def runtime_status_update(self, kind, message, detail=None):\n        \"\"\"\n        Updates the runtime_status field on the runner container.\n        Called when there's a need to report errors, warnings or just\n        activity statuses, for example in the RuntimeStatusLoggingHandler.\n        \"\"\"\n\n        if kind not in ('error', 'warning', 'activity'):\n            # Ignore any other status kind\n            return\n\n        with self.workflow_eval_lock:\n            current = None\n            try:\n                current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)\n            except Exception as e:\n                logger.info(\"Couldn't get current container: %s\", e)\n            if current is None:\n                return\n            runtime_status = current.get('runtime_status', {})\n\n            original_updatemessage = updatemessage = runtime_status.get(kind, \"\")\n            if kind == \"activity\" or not updatemessage:\n                updatemessage = message\n\n            # Subsequent messages tacked on in detail\n            original_updatedetail = updatedetail = runtime_status.get(kind+'Detail', \"\")\n            maxlines = 40\n            if updatedetail.count(\"\\n\") < maxlines:\n                if updatedetail:\n                    updatedetail += \"\\n\"\n                updatedetail += message + \"\\n\"\n\n                if detail:\n                    updatedetail += detail + \"\\n\"\n\n                if updatedetail.count(\"\\n\") >= maxlines:\n                    updatedetail += \"\\nSome messages may have been omitted.  Check the full log.\"\n\n            if updatemessage == original_updatemessage and updatedetail == original_updatedetail:\n                # don't waste time doing an update if nothing changed\n                # (usually because we exceeded the max lines)\n                return\n\n            runtime_status.update({\n                kind: updatemessage,\n                kind+'Detail': updatedetail,\n            })\n\n            try:\n                self.api.containers().update(uuid=current['uuid'],\n                                            body={\n                                                'runtime_status': runtime_status,\n                                            }).execute(num_retries=self.num_retries)\n            except Exception as e:\n                logger.info(\"Couldn't update runtime_status: %s\", e)\n\n    def wrapped_callback(self, cb, obj, st):\n        with self.workflow_eval_lock:\n            cb(obj, st)\n            self.workflow_eval_lock.notifyAll()\n\n    def get_wrapped_callback(self, cb):\n        return partial(self.wrapped_callback, cb)\n\n    def on_message(self, event):\n        if event.get(\"object_uuid\") in self.processes and event[\"event_type\"] == \"update\":\n            uuid = event[\"object_uuid\"]\n            if event[\"properties\"][\"new_attributes\"][\"state\"] == \"Running\":\n                with self.workflow_eval_lock:\n                    j = self.processes[uuid]\n                    if j.running is False:\n                        j.running = True\n                        j.update_pipeline_component(event[\"properties\"][\"new_attributes\"])\n                        logger.info(\"%s %s is Running\", self.label(j), uuid)\n            elif event[\"properties\"][\"new_attributes\"][\"state\"] == \"Final\":\n                # underlying container is completed or cancelled\n                self.process_done(uuid, event[\"properties\"][\"new_attributes\"])\n            elif (event[\"properties\"][\"new_attributes\"][\"state\"] == \"Committed\" and\n                  event[\"properties\"][\"new_attributes\"][\"priority\"] == 0):\n                # cancelled before it got a chance to run, remains in\n                # comitted state but isn't going to run so treat it as\n                # cancelled.\n                self.process_done(uuid, event[\"properties\"][\"new_attributes\"])\n\n\n    def label(self, obj):\n        return \"[%s %s]\" % (self.work_api[0:-1], obj.name)\n\n    def poll_states(self):\n        \"\"\"Poll status of containers listed in the processes dict.\n\n        Runs in a separate thread.\n        \"\"\"\n\n        try:\n            remain_wait = self.poll_interval\n            while True:\n                if remain_wait > 0:\n                    self.stop_polling.wait(remain_wait)\n                if self.stop_polling.is_set():\n                    break\n                with self.workflow_eval_lock:\n                    keys = list(self.processes)\n                if not keys:\n                    remain_wait = self.poll_interval\n                    continue\n\n                begin_poll = time.time()\n                if self.work_api == \"containers\":\n                    table = self.poll_api.container_requests()\n\n                pageSize = self.poll_api._rootDesc.get('maxItemsPerResponse', 1000)\n\n                while keys:\n                    page = keys[:pageSize]\n                    try:\n                        proc_states = table.list(filters=[[\"uuid\", \"in\", page]], select=[\"uuid\", \"container_uuid\", \"state\", \"log_uuid\",\n                                                                                         \"output_uuid\", \"modified_at\", \"properties\",\n                                                                                         \"runtime_constraints\", \"priority\"]).execute(num_retries=self.num_retries)\n                    except Exception as e:\n                        logger.warning(\"Temporary error checking states on API server: %s\", e)\n                        remain_wait = self.poll_interval\n                        continue\n\n                    for p in proc_states[\"items\"]:\n                        self.on_message({\n                            \"object_uuid\": p[\"uuid\"],\n                            \"event_type\": \"update\",\n                            \"properties\": {\n                                \"new_attributes\": p\n                            }\n                        })\n                    keys = keys[pageSize:]\n\n                finish_poll = time.time()\n                remain_wait = self.poll_interval - (finish_poll - begin_poll)\n        except:\n            logger.exception(\"Fatal error in state polling thread.\")\n            with self.workflow_eval_lock:\n                self.processes.clear()\n                self.workflow_eval_lock.notifyAll()\n        finally:\n            self.stop_polling.set()\n\n    def add_intermediate_output(self, uuid):\n        if uuid:\n            self.intermediate_output_collections.append(uuid)\n\n    def trash_intermediate_output(self):\n        logger.info(\"Cleaning up intermediate output collections\")\n        for i in self.intermediate_output_collections:\n            try:\n                self.api.collections().delete(uuid=i).execute(num_retries=self.num_retries)\n            except Exception:\n                logger.warning(\"Failed to delete intermediate output: %s\", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))\n            except (KeyboardInterrupt, SystemExit):\n                break\n\n    def check_features(self, obj, parentfield=\"\"):\n        if isinstance(obj, dict):\n            if obj.get(\"class\") == \"DockerRequirement\":\n                if obj.get(\"dockerOutputDirectory\"):\n                    if not obj.get(\"dockerOutputDirectory\").startswith('/'):\n                        raise SourceLine(obj, \"dockerOutputDirectory\", validate.ValidationException).makeError(\n                            \"Option 'dockerOutputDirectory' must be an absolute path.\")\n            if obj.get(\"class\") == \"InplaceUpdateRequirement\":\n                if obj[\"inplaceUpdate\"] and parentfield == \"requirements\":\n                    raise SourceLine(obj, \"class\", UnsupportedRequirement).makeError(\"InplaceUpdateRequirement not supported for keep collections.\")\n            for k,v in obj.items():\n                self.check_features(v, parentfield=k)\n        elif isinstance(obj, list):\n            for i,v in enumerate(obj):\n                with SourceLine(obj, i, UnsupportedRequirement, logger.isEnabledFor(logging.DEBUG)):\n                    self.check_features(v, parentfield=parentfield)\n\n    def make_output_collection(self, name, storage_classes, tagsString, output_properties, outputObj):\n        outputObj = copy.deepcopy(outputObj)\n\n        files = []\n        def captureFile(fileobj):\n            files.append(fileobj)\n\n        def captureDir(dirobj):\n            if dirobj[\"location\"].startswith(\"keep:\") and 'listing' in dirobj:\n                del dirobj['listing']\n            files.append(dirobj)\n\n        adjustDirObjs(outputObj, captureDir)\n        adjustFileObjs(outputObj, captureFile)\n\n        generatemapper = NoFollowPathMapper(files, \"\", \"\", separateDirs=False)\n\n        final = arvados.collection.Collection(api_client=self.api,\n                                              keep_client=self.keep_client,\n                                              num_retries=self.num_retries)\n\n        for k,v in generatemapper.items():\n            if v.type == \"Directory\" and v.resolved.startswith(\"_:\"):\n                    continue\n            if v.type == \"CreateFile\" and (k.startswith(\"_:\") or v.resolved.startswith(\"_:\")):\n                with final.open(v.target, \"wb\") as f:\n                    f.write(v.resolved.encode(\"utf-8\"))\n                    continue\n\n            if not v.resolved.startswith(\"keep:\"):\n                raise Exception(\"Output source is not in keep or a literal\")\n            sp = v.resolved.split(\"/\")\n            srccollection = sp[0][5:]\n            try:\n                reader = self.collection_cache.get(srccollection)\n                srcpath = urllib.parse.unquote(\"/\".join(sp[1:]) if len(sp) > 1 else \".\")\n                final.copy(srcpath, v.target, source_collection=reader, overwrite=False)\n            except arvados.errors.ArgumentError as e:\n                logger.error(\"Creating CollectionReader for '%s' '%s': %s\", k, v, e)\n                raise\n            except IOError as e:\n                logger.error(\"While preparing output collection: %s\", e)\n                raise\n\n        def rewrite(fileobj):\n            fileobj[\"location\"] = generatemapper.mapper(fileobj[\"location\"]).target\n            for k in (\"listing\", \"contents\", \"nameext\", \"nameroot\", \"dirname\"):\n                if k in fileobj:\n                    del fileobj[k]\n\n        adjustDirObjs(outputObj, rewrite)\n        adjustFileObjs(outputObj, rewrite)\n\n        with final.open(\"cwl.output.json\", \"w\") as f:\n            res = str(json.dumps(outputObj, sort_keys=True, indent=4, separators=(',',': '), ensure_ascii=False))\n            f.write(res)\n\n\n        final.save_new(name=name, owner_uuid=self.project_uuid, storage_classes=storage_classes,\n                       ensure_unique_name=True, properties=output_properties)\n\n        logger.info(\"Final output collection %s \\\"%s\\\" (%s)\", final.portable_data_hash(),\n                    final.api_response()[\"name\"],\n                    final.manifest_locator())\n\n        final_uuid = final.manifest_locator()\n        tags = tagsString.split(',')\n        for tag in tags:\n             self.api.links().create(body={\n                \"head_uuid\": final_uuid, \"link_class\": \"tag\", \"name\": tag\n                }).execute(num_retries=self.num_retries)\n\n        def finalcollection(fileobj):\n            fileobj[\"location\"] = \"keep:%s/%s\" % (final.portable_data_hash(), fileobj[\"location\"])\n\n        adjustDirObjs(outputObj, finalcollection)\n        adjustFileObjs(outputObj, finalcollection)\n\n        return (outputObj, final)\n\n    def set_crunch_output(self):\n        if self.work_api == \"containers\":\n            current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)\n            if current is None:\n                return\n            try:\n                self.api.containers().update(uuid=current['uuid'],\n                                             body={\n                                                 'output': self.final_output_collection.portable_data_hash(),\n                                                 'output_properties': self.final_output_collection.get_properties(),\n                                             }).execute(num_retries=self.num_retries)\n                self.api.collections().update(uuid=self.final_output_collection.manifest_locator(),\n                                              body={\n                                                  'is_trashed': True\n                                              }).execute(num_retries=self.num_retries)\n            except Exception:\n                logger.exception(\"Setting container output\")\n                raise\n\n    def apply_reqs(self, job_order_object, tool):\n        if \"https://w3id.org/cwl/cwl#requirements\" in job_order_object:\n            if tool.metadata.get(\"http://commonwl.org/cwltool#original_cwlVersion\") == 'v1.0':\n                raise WorkflowException(\n                    \"`cwl:requirements` in the input object is not part of CWL \"\n                    \"v1.0. You can adjust to use `cwltool:overrides` instead; or you \"\n                    \"can set the cwlVersion to v1.1 or greater and re-run with \"\n                    \"--enable-dev.\")\n            job_reqs = job_order_object[\"https://w3id.org/cwl/cwl#requirements\"]\n            for req in job_reqs:\n                tool.requirements.append(req)\n\n    @staticmethod\n    def get_git_info(tool):\n        in_a_git_repo = False\n        cwd = None\n        filepath = None\n\n        if tool.tool[\"id\"].startswith(\"file://\"):\n            # check if git is installed\n            try:\n                filepath = uri_file_path(tool.tool[\"id\"])\n                cwd = os.path.dirname(filepath)\n                subprocess.run(\n                    [\"git\", \"log\", \"--format=%H\", \"-n1\", \"HEAD\"],\n                    cwd=cwd,\n                    check=True,\n                    stdout=subprocess.DEVNULL,\n                )\n                in_a_git_repo = True\n            except Exception as e:\n                pass\n\n        gitproperties = {}\n\n        if in_a_git_repo:\n            def git_output(cmd):\n                return subprocess.run(\n                    cmd,\n                    cwd=cwd,\n                    stdout=subprocess.PIPE,\n                    universal_newlines=True,\n                ).stdout.strip()\n            git_commit = git_output([\"git\", \"log\", \"--format=%H\", \"-n1\", \"HEAD\"])\n            git_date = git_output([\"git\", \"log\", \"--format=%cD\", \"-n1\", \"HEAD\"])\n            git_committer = git_output([\"git\", \"log\", \"--format=%cn <%ce>\", \"-n1\", \"HEAD\"])\n            git_branch = git_output([\"git\", \"rev-parse\", \"--abbrev-ref\", \"HEAD\"])\n            git_origin = git_output([\"git\", \"remote\", \"get-url\", \"origin\"])\n            git_status = git_output([\"git\", \"status\", \"--untracked-files=no\", \"--porcelain\"])\n            git_describe = git_output([\"git\", \"describe\", \"--always\", \"--tags\"])\n            git_toplevel = git_output([\"git\", \"rev-parse\", \"--show-toplevel\"])\n            git_path = filepath[len(git_toplevel):]\n\n            git_origin = arvados_cwl.util.sanitize_url(git_origin)\n\n            gitproperties = {\n                \"http://arvados.org/cwl#gitCommit\": git_commit,\n                \"http://arvados.org/cwl#gitDate\": git_date,\n                \"http://arvados.org/cwl#gitCommitter\": git_committer,\n                \"http://arvados.org/cwl#gitBranch\": git_branch,\n                \"http://arvados.org/cwl#gitOrigin\": git_origin,\n                \"http://arvados.org/cwl#gitStatus\": git_status,\n                \"http://arvados.org/cwl#gitDescribe\": git_describe,\n                \"http://arvados.org/cwl#gitPath\": git_path,\n            }\n        else:\n            for g in (\"http://arvados.org/cwl#gitCommit\",\n                      \"http://arvados.org/cwl#gitDate\",\n                      \"http://arvados.org/cwl#gitCommitter\",\n                      \"http://arvados.org/cwl#gitBranch\",\n                      \"http://arvados.org/cwl#gitOrigin\",\n                      \"http://arvados.org/cwl#gitStatus\",\n                      \"http://arvados.org/cwl#gitDescribe\",\n                      \"http://arvados.org/cwl#gitPath\"):\n                if g in tool.metadata:\n                    gitproperties[g] = tool.metadata[g]\n\n        return gitproperties\n\n    def set_container_request_properties(self, container, properties):\n        resp = self.api.container_requests().list(filters=[[\"container_uuid\", \"=\", container[\"uuid\"]]], select=[\"uuid\", \"properties\"]).execute(num_retries=self.num_retries)\n        for cr in resp[\"items\"]:\n            cr[\"properties\"].update({k.replace(\"http://arvados.org/cwl#\", \"arv:\"): v for k, v in properties.items()})\n            self.api.container_requests().update(uuid=cr[\"uuid\"], body={\"container_request\": {\"properties\": cr[\"properties\"]}}).execute(num_retries=self.num_retries)\n\n    def get_credential(self, runtimeContext):\n        if runtimeContext.selected_credential is None:\n            return\n\n        for key in (\"uuid\", \"name\"):\n            result = self.api.credentials().list(filters=[[key, \"=\", runtimeContext.selected_credential]]).execute()\n            if len(result[\"items\"]) == 1:\n                self.selected_credential = result[\"items\"][0]\n                break\n\n    def get_credential_secret(self):\n        if self.selected_credential is None:\n            return\n        self.selected_credential.update(self.api.credentials().secret(uuid=self.selected_credential[\"uuid\"]).execute())\n\n\n    def arv_executor(self, updated_tool, job_order, runtimeContext, logger=None):\n        self.debug = runtimeContext.debug\n\n        self.runtime_status_update(\"activity\", \"initialization\")\n\n        git_info = self.get_git_info(updated_tool) if self.git_info else {}\n        if git_info:\n            logger.info(\"Git provenance\")\n            for g in git_info:\n                if git_info[g]:\n                    logger.info(\"  %s: %s\", g.split(\"#\", 1)[1], git_info[g])\n\n        runtimeContext.git_info = git_info\n\n        workbench1 = self.api.config()[\"Services\"][\"Workbench1\"][\"ExternalURL\"]\n        workbench2 = self.api.config()[\"Services\"][\"Workbench2\"][\"ExternalURL\"]\n        controller = self.api.config()[\"Services\"][\"Controller\"][\"ExternalURL\"]\n        logger.info(\"Using cluster %s (%s)\", self.api.config()[\"ClusterID\"], workbench2 or workbench1 or controller)\n\n        if not self.fast_submit:\n            updated_tool.visit(self.check_features)\n\n        self.pipeline = None\n        self.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)\n        self.secret_store = runtimeContext.secret_store\n\n        self.trash_intermediate = runtimeContext.trash_intermediate\n        if self.trash_intermediate and self.work_api != \"containers\":\n            raise Exception(\"--trash-intermediate is only supported with --api=containers.\")\n\n        self.intermediate_output_ttl = runtimeContext.intermediate_output_ttl\n        if self.intermediate_output_ttl and self.work_api != \"containers\":\n            raise Exception(\"--intermediate-output-ttl is only supported with --api=containers.\")\n        if self.intermediate_output_ttl < 0:\n            raise Exception(\"Invalid value %d for --intermediate-output-ttl, cannot be less than zero\" % self.intermediate_output_ttl)\n\n        if runtimeContext.submit_request_uuid and self.work_api != \"containers\":\n            raise Exception(\"--submit-request-uuid requires containers API, but using '{}' api\".format(self.work_api))\n\n        runtimeContext = runtimeContext.copy()\n\n        if not runtimeContext.name:\n            self.name = updated_tool.tool.get(\"label\") or updated_tool.metadata.get(\"label\") or os.path.basename(updated_tool.tool[\"id\"])\n            if git_info.get(\"http://arvados.org/cwl#gitDescribe\"):\n                self.name = \"%s (%s)\" % (self.name, git_info.get(\"http://arvados.org/cwl#gitDescribe\"))\n            runtimeContext.name = self.name\n\n        if runtimeContext.copy_deps is None and (runtimeContext.create_workflow or runtimeContext.update_workflow):\n            # When creating or updating workflow record, by default\n            # always copy dependencies and ensure Docker images are up\n            # to date.\n            runtimeContext.copy_deps = True\n            runtimeContext.match_local_docker = True\n\n        if runtimeContext.print_keep_deps:\n            runtimeContext.copy_deps = False\n            runtimeContext.match_local_docker = False\n\n        if runtimeContext.update_workflow and self.project_uuid is None:\n            # If we are updating a workflow, make sure anything that\n            # gets uploaded goes into the same parent project, unless\n            # an alternate --project-uuid was provided.\n            existing_wf = self.api.workflows().get(uuid=runtimeContext.update_workflow).execute()\n            runtimeContext.project_uuid = existing_wf[\"owner_uuid\"]\n\n        self.project_uuid = runtimeContext.project_uuid\n\n        self.runtime_status_update(\"activity\", \"data transfer\")\n\n        current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)\n        self.get_credential(runtimeContext)\n        if current_container:\n            logger.info(\"Running inside container %s\", current_container.get(\"uuid\"))\n            self.get_credential_secret()\n\n        # Upload local file references in the job order.\n        with Perf(metrics, \"upload_job_order\"):\n            job_order, jobmapper = upload_job_order(self, \"%s input\" % runtimeContext.name,\n                                         updated_tool, job_order, runtimeContext)\n\n        # determine if we are submitting or directly executing the workflow.\n        #\n        # the last clause means: if it is a command line tool, and we\n        # are going to wait for the result, and always_submit_runner\n        # is false, then we don't submit a runner process.\n\n        submitting = ((runtimeContext.submit and not\n                       (updated_tool.tool[\"class\"] == \"CommandLineTool\" and\n                        runtimeContext.wait and\n                        not runtimeContext.always_submit_runner)) or\n                      runtimeContext.defer_downloads)\n\n        loadingContext = self.loadingContext.copy()\n        loadingContext.do_validate = False\n        loadingContext.disable_js_validation = True\n        tool = updated_tool\n\n        # Upload direct dependencies of workflow steps, get back mapping of files to keep references.\n        # Also uploads docker images.\n        if not self.fast_submit:\n            logger.info(\"Uploading workflow dependencies\")\n            with Perf(metrics, \"upload_workflow_deps\"):\n                merged_map = upload_workflow_deps(self, tool, runtimeContext)\n        else:\n            # in the fast submit case, we are running a workflow that\n            # has already been uploaded to Arvados, so we assume all\n            # the dependencies have been pinned to keep references and\n            # there is nothing to do.\n            merged_map = {}\n\n        loadingContext.loader = tool.doc_loader\n        loadingContext.avsc_names = tool.doc_schema\n        loadingContext.metadata = tool.metadata\n        loadingContext.skip_resolve_all = True\n\n        workflow_wrapper = None\n        if (submitting and not self.fast_submit) or runtimeContext.update_workflow or runtimeContext.create_workflow or runtimeContext.print_keep_deps:\n            # upload workflow and get back the workflow wrapper\n\n            workflow_wrapper = upload_workflow(self, tool, job_order,\n                                               runtimeContext.project_uuid,\n                                               runtimeContext,\n                                               uuid=runtimeContext.update_workflow,\n                                               submit_runner_ram=runtimeContext.submit_runner_ram,\n                                               name=runtimeContext.name,\n                                               merged_map=merged_map,\n                                               submit_runner_image=runtimeContext.submit_runner_image,\n                                               git_info=git_info,\n                                               set_defaults=(runtimeContext.update_workflow or runtimeContext.create_workflow),\n                                               jobmapper=jobmapper)\n\n            if runtimeContext.update_workflow or runtimeContext.create_workflow:\n                # We're registering the workflow, so create or update\n                # the workflow record and then exit.\n                uuid = make_workflow_record(self, workflow_wrapper, runtimeContext.name, tool,\n                                            runtimeContext.project_uuid, runtimeContext.update_workflow)\n                self.stdout.write(uuid + \"\\n\")\n                return (None, \"success\")\n\n            if runtimeContext.print_keep_deps:\n                # Just find and print out all the collection dependencies and exit\n                print_keep_deps(self, runtimeContext, merged_map, tool)\n                return (None, \"success\")\n\n            # Did not register a workflow, we're going to submit\n            # it instead.\n            loadingContext.loader.idx.clear()\n            loadingContext.loader.idx[\"_:main\"] = workflow_wrapper\n            workflow_wrapper[\"id\"] = \"_:main\"\n\n            # Reload the minimal wrapper workflow.\n            self.fast_submit = True\n            tool = load_tool(workflow_wrapper, loadingContext)\n            loadingContext.loader.idx[\"_:main\"] = workflow_wrapper\n\n        if not submitting:\n            # If we are going to run the workflow now (rather than\n            # submit it), we need to update the workflow document\n            # replacing file references with keep references.  If we\n            # are just going to construct a run submission, we don't\n            # need to do this.\n            update_from_merged_map(tool, merged_map)\n\n        self.apply_reqs(job_order, tool)\n\n        self.ignore_docker_for_reuse = runtimeContext.ignore_docker_for_reuse\n        self.eval_timeout = runtimeContext.eval_timeout\n\n        runtimeContext.use_container = True\n        runtimeContext.tmpdir_prefix = \"tmp\"\n        runtimeContext.work_api = self.work_api\n\n        if not self.output_name:\n             self.output_name = \"Output from workflow %s\" % runtimeContext.name\n\n        self.output_name  = cleanup_name_for_collection(self.output_name)\n\n        if self.work_api == \"containers\":\n            if self.ignore_docker_for_reuse:\n                raise Exception(\"--ignore-docker-for-reuse not supported with containers API.\")\n            runtimeContext.outdir = \"/var/spool/cwl\"\n            runtimeContext.docker_outdir = \"/var/spool/cwl\"\n            runtimeContext.tmpdir = \"/tmp\"\n            runtimeContext.docker_tmpdir = \"/tmp\"\n\n        if runtimeContext.priority < 1 or runtimeContext.priority > 1000:\n            raise Exception(\"--priority must be in the range 1..1000.\")\n\n        if self.should_estimate_cache_size:\n            visited = set()\n            estimated_size = [0]\n            def estimate_collection_cache(obj):\n                if obj.get(\"location\", \"\").startswith(\"keep:\"):\n                    m = pdh_size.match(obj[\"location\"][5:])\n                    if m and m.group(1) not in visited:\n                        visited.add(m.group(1))\n                        estimated_size[0] += int(m.group(2))\n            visit_class(job_order, (\"File\", \"Directory\"), estimate_collection_cache)\n            runtimeContext.collection_cache_size = max(((estimated_size[0]*192) // (1024*1024))+1, 256)\n            self.collection_cache.set_cap(runtimeContext.collection_cache_size*1024*1024)\n\n        logger.info(\"Using collection cache size %s MiB\", runtimeContext.collection_cache_size)\n\n        runnerjob = None\n        if runtimeContext.submit:\n            # We are submitting instead of running immediately.\n            #\n            # Create a \"Runner job\" that when run() is invoked,\n            # creates the container request to run the workflow.\n            if self.work_api == \"containers\":\n                if submitting:\n                    loadingContext.metadata = updated_tool.metadata.copy()\n                    tool = RunnerContainer(self, tool, loadingContext, runtimeContext.enable_reuse,\n                                           self.output_name,\n                                           self.output_tags,\n                                           submit_runner_ram=runtimeContext.submit_runner_ram,\n                                           name=runtimeContext.name,\n                                           on_error=runtimeContext.on_error,\n                                           submit_runner_image=runtimeContext.submit_runner_image,\n                                           intermediate_output_ttl=runtimeContext.intermediate_output_ttl,\n                                           merged_map=merged_map,\n                                           priority=runtimeContext.priority,\n                                           secret_store=self.secret_store,\n                                           collection_cache_size=runtimeContext.collection_cache_size,\n                                           collection_cache_is_default=self.should_estimate_cache_size,\n                                           git_info=git_info)\n                else:\n                    runtimeContext.runnerjob = tool.tool[\"id\"]\n\n        if runtimeContext.cwl_runner_job is not None:\n            self.uuid = runtimeContext.cwl_runner_job.get('uuid')\n\n        jobiter = tool.job(job_order,\n                           self.output_callback,\n                           runtimeContext)\n\n        if runtimeContext.submit and not runtimeContext.wait:\n            # User provided --no-wait so submit the container request,\n            # get the container request uuid, print it out, and exit.\n            runnerjob = next(jobiter)\n            runnerjob.run(runtimeContext)\n            self.stdout.write(runnerjob.uuid+\"\\n\")\n            return (None, \"success\")\n\n        # We either running the workflow directly, or submitting it\n        # and will wait for a final result.\n\n        self.runtime_status_update(\"activity\", \"workflow execution\")\n\n        if current_container:\n            self.set_container_request_properties(current_container, git_info)\n\n        self.poll_api = arvados.api('v1', timeout=runtimeContext.http_timeout)\n        self.polling_thread = threading.Thread(target=self.poll_states)\n        self.polling_thread.start()\n\n        self.task_queue = TaskQueue(self.workflow_eval_lock, self.thread_count)\n\n        try:\n            self.workflow_eval_lock.acquire()\n\n            # Holds the lock while this code runs and releases it when\n            # it is safe to do so in self.workflow_eval_lock.wait(),\n            # at which point on_message can update job state and\n            # process output callbacks.\n\n            loopperf = Perf(metrics, \"jobiter\")\n            loopperf.__enter__()\n            for runnable in jobiter:\n                loopperf.__exit__()\n\n                if self.stop_polling.is_set():\n                    break\n\n                if self.task_queue.error is not None:\n                    raise self.task_queue.error\n\n                if runnable:\n                    with Perf(metrics, \"run\"):\n                        self.start_run(runnable, runtimeContext)\n                else:\n                    if (self.task_queue.in_flight + len(self.processes)) > 0:\n                        self.workflow_eval_lock.wait(3)\n                    else:\n                        if self.final_status is None:\n                            logger.error(\"Workflow is deadlocked, no runnable processes and not waiting on any pending processes.\")\n                        break\n\n                if self.stop_polling.is_set():\n                    break\n\n                loopperf.__enter__()\n            loopperf.__exit__()\n\n            while (self.task_queue.in_flight + len(self.processes)) > 0:\n                if self.task_queue.error is not None:\n                    raise self.task_queue.error\n                self.workflow_eval_lock.wait(3)\n\n        except UnsupportedRequirement:\n            raise\n        except:\n            if sys.exc_info()[0] is KeyboardInterrupt or sys.exc_info()[0] is SystemExit:\n                logger.error(\"Interrupted, workflow will be cancelled\", exc_info=self.debug)\n            elif isinstance(sys.exc_info()[1], WorkflowException):\n                logger.error(\"Workflow execution failed:\\n%s\", sys.exc_info()[1], exc_info=self.debug)\n            else:\n                logger.exception(\"Workflow execution failed\")\n\n            if self.pipeline:\n                self.api.pipeline_instances().update(uuid=self.pipeline[\"uuid\"],\n                                                     body={\"state\": \"Failed\"}).execute(num_retries=self.num_retries)\n\n            if self.work_api == \"containers\" and not current_container:\n                # Not running in a crunch container, so cancel any outstanding processes.\n                for p in self.processes:\n                    try:\n                        self.api.container_requests().update(uuid=p,\n                                                             body={\"priority\": \"0\"}\n                        ).execute(num_retries=self.num_retries)\n                    except Exception:\n                        pass\n        finally:\n            self.workflow_eval_lock.release()\n            self.task_queue.drain()\n            self.stop_polling.set()\n            self.polling_thread.join()\n            self.task_queue.join()\n\n        if self.final_status == \"UnsupportedRequirement\":\n            raise UnsupportedRequirement(\"Check log for details.\")\n\n        if self.final_output is None:\n            raise WorkflowException(\"Workflow did not return a result.\")\n\n        if runtimeContext.usage_report_notes:\n            logger.info(\"Steps with low resource utilization (possible optimization opportunities):\")\n            for x in runtimeContext.usage_report_notes:\n                logger.info(\"  %s\", x)\n\n        if runtimeContext.submit and isinstance(tool, Runner):\n            logger.info(\"Final output collection %s\", tool.final_output)\n            if workbench2 or workbench1:\n                logger.info(\"Output at %scollections/%s\", workbench2 or workbench1, tool.final_output)\n        else:\n            if self.output_tags is None:\n                self.output_tags = \"\"\n\n            storage_classes = \"\"\n            storage_class_req, _ = tool.get_requirement(\"http://arvados.org/cwl#OutputStorageClass\")\n            if storage_class_req and storage_class_req.get(\"finalStorageClass\"):\n                storage_classes = aslist(storage_class_req[\"finalStorageClass\"])\n            else:\n                storage_classes = (\n                    runtimeContext.storage_classes\n                    or list(arvados.util.iter_storage_classes(self.api.config()))\n                )\n\n            output_properties = {}\n            output_properties_req, _ = tool.get_requirement(\"http://arvados.org/cwl#OutputCollectionProperties\")\n            if output_properties_req:\n                builder = make_builder(job_order, tool.hints, tool.requirements, runtimeContext, tool.metadata)\n                for pr in output_properties_req[\"outputProperties\"]:\n                    output_properties[pr[\"propertyName\"]] = builder.do_eval(pr[\"propertyValue\"])\n\n            self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, storage_classes,\n                                                                                          self.output_tags, output_properties,\n                                                                                          self.final_output)\n            self.set_crunch_output()\n\n        if runtimeContext.compute_checksum:\n            adjustDirObjs(self.final_output, partial(get_listing, self.fs_access))\n            adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))\n\n        if self.trash_intermediate and self.final_status == \"success\":\n            self.trash_intermediate_output()\n\n        return (self.final_output, self.final_status)\n\ndef blank_secrets(job_order_object, process):\n    secrets_req, _ = process.get_requirement(\"http://commonwl.org/cwltool#Secrets\")\n    pass\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/fsaccess.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport fnmatch\nimport os\nimport errno\nimport urllib.parse\nimport re\nimport logging\nimport threading\nfrom collections import OrderedDict\nfrom io import StringIO\n\nimport ruamel.yaml\n\nimport cwltool.stdfsaccess\nfrom cwltool.pathmapper import abspath\nimport cwltool.resolver\n\nimport arvados.util\nimport arvados.collection\nimport arvados.arvfile\nimport arvados.errors\n\nfrom googleapiclient.errors import HttpError\n\nfrom schema_salad.ref_resolver import DefaultFetcher\n\nlogger = logging.getLogger('arvados.cwl-runner')\n\npdh_size = re.compile(r'([0-9a-f]{32})\\+(\\d+)(\\+\\S+)*')\n\nclass CollectionCache(object):\n    def __init__(self, api_client, keep_client, num_retries,\n                 cap=256*1024*1024,\n                 min_entries=2):\n        self.api_client = api_client\n        self.keep_client = keep_client\n        self.num_retries = num_retries\n        self.collections = OrderedDict()\n        self.lock = threading.Lock()\n        self.total = 0\n        self.cap = cap\n        self.min_entries = min_entries\n\n    def set_cap(self, cap):\n        self.cap = cap\n\n    def cap_cache(self, required):\n        # ordered dict iterates from oldest to newest\n        for pdh, v in list(self.collections.items()):\n            available = self.cap - self.total\n            if available >= required or len(self.collections) < self.min_entries:\n                return\n            # cut it loose\n            logger.debug(\"Evicting collection reader %s from cache (cap %s total %s required %s)\", pdh, self.cap, self.total, required)\n            del self.collections[pdh]\n            self.total -= v[1]\n\n    def get(self, locator):\n        with self.lock:\n            if locator not in self.collections:\n                m = pdh_size.match(locator)\n                if m:\n                    self.cap_cache(int(m.group(2)) * 128)\n                logger.debug(\"Creating collection reader for %s\", locator)\n                try:\n                    cr = arvados.collection.CollectionReader(locator, api_client=self.api_client,\n                                                             keep_client=self.keep_client,\n                                                             num_retries=self.num_retries)\n                except arvados.errors.ApiError as ap:\n                    raise IOError(errno.ENOENT, \"Could not access collection '%s': %s\" % (locator, str(ap._get_reason())))\n                sz = len(cr.manifest_text()) * 128\n                self.collections[locator] = (cr, sz)\n                self.total += sz\n            else:\n                cr, sz = self.collections[locator]\n                # bump it to the back\n                del self.collections[locator]\n                self.collections[locator] = (cr, sz)\n            return cr\n\n\nclass CollectionFsAccess(cwltool.stdfsaccess.StdFsAccess):\n    \"\"\"Implement the cwltool FsAccess interface for Arvados Collections.\"\"\"\n\n    def __init__(self, basedir, collection_cache=None):\n        super(CollectionFsAccess, self).__init__(basedir)\n        self.collection_cache = collection_cache\n\n    def get_collection(self, path):\n        sp = path.split(\"/\", 1)\n        p = sp[0]\n        if p.startswith(\"keep:\") and (arvados.util.keep_locator_pattern.match(p[5:]) or\n                                      arvados.util.collection_uuid_pattern.match(p[5:])):\n            locator = p[5:]\n            rest = os.path.normpath(urllib.parse.unquote(sp[1])) if len(sp) == 2 else None\n            return (self.collection_cache.get(locator), rest)\n        else:\n            return (None, path)\n\n    def _match(self, collection, patternsegments, parent):\n        if not patternsegments:\n            return []\n\n        if not isinstance(collection, arvados.collection.RichCollectionBase):\n            return []\n\n        ret = []\n        # iterate over the files and subcollections in 'collection'\n        for filename in collection:\n            if patternsegments[0] == '.':\n                # Pattern contains something like \"./foo\" so just shift\n                # past the \"./\"\n                ret.extend(self._match(collection, patternsegments[1:], parent))\n            elif fnmatch.fnmatch(filename, patternsegments[0]):\n                cur = os.path.join(parent, filename)\n                if len(patternsegments) == 1:\n                    ret.append(cur)\n                else:\n                    ret.extend(self._match(collection[filename], patternsegments[1:], cur))\n        return ret\n\n    def glob(self, pattern):\n        collection, rest = self.get_collection(pattern)\n        if collection is not None and rest in (None, \"\", \".\"):\n            return [pattern]\n        patternsegments = rest.split(\"/\")\n        return sorted(self._match(collection, patternsegments, \"keep:\" + collection.manifest_locator()))\n\n    def open(self, fn, mode, encoding=None):\n        collection, rest = self.get_collection(fn)\n        if collection is not None:\n            return collection.open(rest, mode, encoding=encoding)\n        else:\n            return super(CollectionFsAccess, self).open(self._abs(fn), mode)\n\n    def exists(self, fn):\n        try:\n            collection, rest = self.get_collection(fn)\n        except HttpError as err:\n            if err.resp.status == 404:\n                return False\n            else:\n                raise\n        except IOError as err:\n            if err.errno == errno.ENOENT:\n                return False\n            else:\n                raise\n        if collection is not None:\n            if rest:\n                return collection.exists(rest)\n            else:\n                return True\n        else:\n            return super(CollectionFsAccess, self).exists(fn)\n\n    def size(self, fn):  # type: (unicode) -> bool\n        collection, rest = self.get_collection(fn)\n        if collection is not None:\n            if rest:\n                arvfile = collection.find(rest)\n                if isinstance(arvfile, arvados.arvfile.ArvadosFile):\n                    return arvfile.size()\n            raise IOError(errno.EINVAL, \"Not a path to a file %s\" % (fn))\n        else:\n            return super(CollectionFsAccess, self).size(fn)\n\n    def isfile(self, fn):  # type: (unicode) -> bool\n        collection, rest = self.get_collection(fn)\n        if collection is not None:\n            if rest:\n                return isinstance(collection.find(rest), arvados.arvfile.ArvadosFile)\n            else:\n                return False\n        else:\n            return super(CollectionFsAccess, self).isfile(fn)\n\n    def isdir(self, fn):  # type: (unicode) -> bool\n        collection, rest = self.get_collection(fn)\n        if collection is not None:\n            if rest:\n                return isinstance(collection.find(rest), arvados.collection.RichCollectionBase)\n            else:\n                return True\n        else:\n            return super(CollectionFsAccess, self).isdir(fn)\n\n    def listdir(self, fn):  # type: (unicode) -> List[unicode]\n        collection, rest = self.get_collection(fn)\n        if collection is not None:\n            if rest:\n                dir = collection.find(rest)\n            else:\n                dir = collection\n            if dir is None:\n                raise IOError(errno.ENOENT, \"Directory '%s' in '%s' not found\" % (rest, collection.portable_data_hash()))\n            if not isinstance(dir, arvados.collection.RichCollectionBase):\n                raise IOError(errno.ENOENT, \"Path '%s' in '%s' is not a Directory\" % (rest, collection.portable_data_hash()))\n            return [abspath(l, fn) for l in list(dir.keys())]\n        else:\n            return super(CollectionFsAccess, self).listdir(fn)\n\n    def join(self, path, *paths): # type: (unicode, *unicode) -> unicode\n        if paths and paths[-1].startswith(\"keep:\") and arvados.util.keep_locator_pattern.match(paths[-1][5:]):\n            return paths[-1]\n        return os.path.join(path, *paths)\n\n    def realpath(self, path):\n        if path.startswith(\"$(task.tmpdir)\") or path.startswith(\"$(task.outdir)\"):\n            return path\n        collection, rest = self.get_collection(path)\n        if collection is not None:\n            return path\n        else:\n            return os.path.realpath(path)\n\nclass CollectionFetcher(DefaultFetcher):\n    def __init__(self, cache, session, api_client=None, fs_access=None, num_retries=4):\n        super(CollectionFetcher, self).__init__(cache, session)\n        self.api_client = api_client\n        self.fsaccess = fs_access\n        self.num_retries = num_retries\n\n    def fetch_text(self, url, content_types=None):\n        if url.startswith(\"keep:\"):\n            with self.fsaccess.open(url, \"r\", encoding=\"utf-8\") as f:\n                return f.read()\n        if url.startswith(\"arvwf:\"):\n            record = self.api_client.workflows().get(uuid=url[6:]).execute(num_retries=self.num_retries)\n            yaml = ruamel.yaml.YAML(typ='rt', pure=True)\n            definition = yaml.load(record[\"definition\"])\n            definition[\"label\"] = record[\"name\"]\n            stream = StringIO()\n            yaml.dump(definition, stream)\n            return stream.getvalue()\n        return super(CollectionFetcher, self).fetch_text(url)\n\n    def check_exists(self, url):\n        try:\n            if url.startswith(\"http://arvados.org/cwl\"):\n                return True\n            urld, _ = urllib.parse.urldefrag(url)\n            if urld.startswith(\"keep:\"):\n                return self.fsaccess.exists(urld)\n            if urld.startswith(\"arvwf:\"):\n                if self.fetch_text(urld):\n                    return True\n        except arvados.errors.NotFoundError:\n            return False\n        except Exception:\n            logger.exception(\"Got unexpected exception checking if file exists\")\n            return False\n        return super(CollectionFetcher, self).check_exists(url)\n\n    def urljoin(self, base_url, url):\n        if not url:\n            return base_url\n\n        urlsp = urllib.parse.urlsplit(url)\n        if urlsp.scheme or not base_url:\n            return url\n\n        basesp = urllib.parse.urlsplit(base_url)\n        if basesp.scheme in (\"keep\", \"arvwf\"):\n            if not basesp.path:\n                raise IOError(errno.EINVAL, \"Invalid Keep locator\", base_url)\n\n            baseparts = basesp.path.split(\"/\")\n            urlparts = urlsp.path.split(\"/\") if urlsp.path else []\n\n            locator = baseparts.pop(0)\n\n            if (basesp.scheme == \"keep\" and\n                (not arvados.util.keep_locator_pattern.match(locator)) and\n                (not arvados.util.collection_uuid_pattern.match(locator))):\n                raise IOError(errno.EINVAL, \"Invalid Keep locator\", base_url)\n\n            if urlsp.path.startswith(\"/\"):\n                baseparts = []\n                urlparts.pop(0)\n\n            if baseparts and urlsp.path:\n                baseparts.pop()\n\n            path = \"/\".join([locator] + baseparts + urlparts)\n            return urllib.parse.urlunsplit((basesp.scheme, \"\", path, \"\", urlsp.fragment))\n\n        return super(CollectionFetcher, self).urljoin(base_url, url)\n\n    schemes = [\"file\", \"http\", \"https\", \"mailto\", \"keep\", \"arvwf\", \"s3\"]\n\n    def supported_schemes(self):  # type: () -> List[Text]\n        return self.schemes\n\n\nworkflow_uuid_pattern = re.compile(r'[a-z0-9]{5}-7fd4e-[a-z0-9]{15}')\npipeline_template_uuid_pattern = re.compile(r'[a-z0-9]{5}-p5p6p-[a-z0-9]{15}')\n\ndef collectionResolver(api_client, document_loader, uri, num_retries=4):\n    if uri.startswith(\"keep:\") or uri.startswith(\"arvwf:\"):\n        return str(uri)\n\n    if workflow_uuid_pattern.match(uri):\n        return u\"arvwf:%s#main\" % (uri)\n\n    if pipeline_template_uuid_pattern.match(uri):\n        pt = api_client.pipeline_templates().get(uuid=uri).execute(num_retries=num_retries)\n        return u\"keep:\" + next(pt[\"components\"].values())[\"script_parameters\"][\"cwl:tool\"]\n\n    p = uri.split(\"/\")\n    if arvados.util.keep_locator_pattern.match(p[0]):\n        return u\"keep:%s\" % (uri)\n\n    if arvados.util.collection_uuid_pattern.match(p[0]):\n        return u\"keep:%s%s\" % (api_client.collections().\n                              get(uuid=p[0]).execute()[\"portable_data_hash\"],\n                              uri[len(p[0]):])\n\n    return cwltool.resolver.tool_resolver(document_loader, uri)\n\n# This is published as an entry point and picked up by cwltest so that\n# it uses CollectionFsAccess from Arvados instead of the standard\n# FsAccess that only works for the local file system.\ndef get_fsaccess():\n    api_client = arvados.api('v1')\n    return CollectionFsAccess(\"\", CollectionCache(api_client, api_client.keep, 3))\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/pathmapper.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport re\nimport logging\nimport uuid\nimport os\nimport datetime\nimport urllib.request, urllib.parse, urllib.error\n\nimport arvados_cwl.util\nimport arvados.commands.run\nimport arvados.collection\n\nfrom arvados.errors import ApiError\nfrom arvados._internal.http_to_keep import http_to_keep\nfrom cwltool.pathmapper import PathMapper, MapperEnt\nfrom cwltool.utils import adjustFileObjs, adjustDirObjs\nfrom cwltool.stdfsaccess import abspath\nfrom cwltool.workflow import WorkflowException\nfrom schema_salad.sourceline import SourceLine\n\nlogger = logging.getLogger('arvados.cwl-runner')\n\ndef trim_listing(obj):\n    \"\"\"Remove 'listing' field from Directory objects that are keep references.\n\n    When Directory objects represent Keep references, it is redundant and\n    potentially very expensive to pass fully enumerated Directory objects\n    between instances of cwl-runner (e.g. a submitting a job, or using the\n    RunInSingleContainer feature), so delete the 'listing' field when it is\n    safe to do so.\n\n    \"\"\"\n\n    if obj.get(\"location\", \"\").startswith(\"keep:\") and \"listing\" in obj:\n        del obj[\"listing\"]\n\ncollection_pdh_path = re.compile(r'^keep:[0-9a-f]{32}\\+\\d+/.+$')\ncollection_pdh_pattern = re.compile(r'^keep:([0-9a-f]{32}\\+\\d+)(/.*)?')\ncollection_uuid_pattern = re.compile(r'^keep:([a-z0-9]{5}-4zz18-[a-z0-9]{15})(/.*)?$')\n\ndef _resolve_one_credential(apiclient, filters, description):\n    results = apiclient.credentials().list(filters=filters, limit=2).execute()\n    match results['items']:\n        case []:\n            return None\n        case [c]:\n            return c\n        case _:\n            raise WorkflowException(f\"Multiple {description} found in Arvados. \\\nRun `arvados-cwl-runner` with the `--use-credential` option to provide the UUID \\\nof the credential to use.\")\n\n\ndef resolve_aws_key(apiclient, s3url):\n    if \"credentials\" not in apiclient._rootDesc[\"resources\"]:\n        raise WorkflowException(\"Arvados instance does not support the external credentials API.  Use --enable-aws-credential-capture to use locally-defined credentials.\")\n    desc_fmt = \"AWS access keys with scope {0!r}\"\n    expires_at = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(minutes=5)\n    scope = f's3://{urllib.parse.urlparse(s3url).netloc}'\n    filters = [\n        ['credential_class', '=', 'arv:aws_access_key'],\n        ['expires_at', '>', f'{expires_at.isoformat()}Z'],\n        ['scopes', 'contains', scope],\n    ]\n    if credential := _resolve_one_credential(apiclient, filters, desc_fmt.format(scope)):\n        return credential\n    wild_scope = 's3://*'\n    filters[-1] = ['scopes', 'contains', wild_scope]\n    if credential := _resolve_one_credential(apiclient, filters, desc_fmt.format(wild_scope)):\n        return credential\n    raise WorkflowException(f\"No AWS access keys for S3 bucket {scope} found in Arvados. \\\nFor information about how to run workflows with S3 inputs, refer to \\\n<https://doc.arvados.org/user/topics/external-inputs.html>.\")\n\n\nclass ArvPathMapper(PathMapper):\n    \"\"\"Convert container-local paths to and from Keep collection ids.\"\"\"\n\n    def __init__(self, arvrunner, referenced_files, input_basedir,\n                 collection_pattern, file_pattern, name=None, single_collection=False,\n                 optional_deps=None):\n        self.arvrunner = arvrunner\n        self.input_basedir = input_basedir\n        self.collection_pattern = collection_pattern\n        self.file_pattern = file_pattern\n        self.name = name\n        self.referenced_files = [r[\"location\"] for r in referenced_files]\n        self.single_collection = single_collection\n        self.pdh_to_uuid = {}\n        self.optional_deps = optional_deps or []\n        super(ArvPathMapper, self).__init__(referenced_files, input_basedir, None)\n\n    def visit(self, srcobj, uploadfiles):\n        src = srcobj[\"location\"]\n        if \"#\" in src:\n            src = src[:src.index(\"#\")]\n\n        debug = logger.isEnabledFor(logging.DEBUG)\n\n        if isinstance(src, str) and src.startswith(\"keep:\"):\n            if collection_pdh_pattern.match(src):\n                self._pathmap[src] = MapperEnt(src, self.collection_pattern % urllib.parse.unquote(src[5:]), srcobj[\"class\"], True)\n\n                if arvados_cwl.util.collectionUUID in srcobj:\n                    self.pdh_to_uuid[src.split(\"/\", 1)[0][5:]] = srcobj[arvados_cwl.util.collectionUUID]\n            elif not collection_uuid_pattern.match(src):\n                with SourceLine(srcobj, \"location\", WorkflowException, debug):\n                    raise WorkflowException(\"Invalid keep reference '%s'\" % src)\n\n        # Note: fsaccess->CollectionFetcher and\n        # runner->upload_dependencies->collect_uploads have lists of\n        # supported URL schemes that has to be updated when new\n        # schemes are added.\n        if src not in self._pathmap:\n            if src.startswith(\"file:\"):\n                # Local FS ref, may need to be uploaded or may be on keep\n                # mount.\n                ab = abspath(src, self.input_basedir)\n                st = arvados.commands.run.statfile(\"\", ab,\n                                                   fnPattern=\"keep:%s/%s\",\n                                                   dirPattern=\"keep:%s/%s\",\n                                                   raiseOSError=True)\n                with SourceLine(srcobj, \"location\", WorkflowException, debug):\n                    if isinstance(st, arvados.commands.run.UploadFile):\n                        uploadfiles.add((src, ab, st))\n                    elif isinstance(st, arvados.commands.run.ArvFile):\n                        self._pathmap[src] = MapperEnt(st.fn, self.collection_pattern % urllib.parse.unquote(st.fn[5:]), \"File\", True)\n                    else:\n                        raise WorkflowException(\"Input file path '%s' is invalid\" % st)\n            elif src.startswith(\"_:\"):\n                if srcobj[\"class\"] == \"File\" and \"contents\" not in srcobj:\n                    raise WorkflowException(\"File literal '%s' is missing `contents`\" % src)\n                if srcobj[\"class\"] == \"Directory\" and \"listing\" not in srcobj:\n                    raise WorkflowException(\"Directory literal '%s' is missing `listing`\" % src)\n            elif src.startswith(\"http:\") or src.startswith(\"https:\"):\n                try:\n                    if self.arvrunner.defer_downloads:\n                        # passthrough, we'll download it later.\n                        self._pathmap[src] = MapperEnt(src, src, srcobj[\"class\"], True)\n                    else:\n                        results = http_to_keep(self.arvrunner.api, self.arvrunner.project_uuid, src,\n                                               varying_url_params=self.arvrunner.toplevel_runtimeContext.varying_url_params,\n                                               prefer_cached_downloads=self.arvrunner.toplevel_runtimeContext.prefer_cached_downloads)\n                        keepref = \"keep:%s/%s\" % (results[0], results[1])\n                        logger.info(\"%s is %s\", src, keepref)\n                        self._pathmap[src] = MapperEnt(keepref, keepref, srcobj[\"class\"], True)\n                except Exception as e:\n                    logger.warning(\"Download error: %s\", e)\n            elif src.startswith(\"s3:\"):\n                try:\n                    # Using inline imports here instead of at the top\n                    # of the file to defer importing boto3 until we\n                    # actually need it, because if the user isn't\n                    # using s3 import there's zero reason to have the\n                    # module loaded at all.\n                    if self.arvrunner.botosession is None and (self.arvrunner.defer_downloads is False or self.arvrunner.toplevel_runtimeContext.aws_credential_capture):\n                        # Create a boto session, which we will either\n                        # use to download from S3 now, or to get the\n                        # credentials that will be passed to the\n                        # workflow runner container later.\n                        import boto3.session\n                        if self.arvrunner.selected_credential is not None:\n                            # Fetch the secret and create the boto session.\n                            self.arvrunner.botosession = boto3.session.Session(aws_access_key_id=self.arvrunner.selected_credential[\"external_id\"],\n                                                                               aws_secret_access_key=self.arvrunner.selected_credential[\"secret\"])\n                            logger.info(\"Using Arvados credential %s (%s)\", self.arvrunner.selected_credential[\"name\"], self.arvrunner.selected_credential[\"uuid\"])\n                        else:\n                            self.arvrunner.botosession = boto3.session.Session()\n                        if not self.arvrunner.botosession.get_credentials() and not self.arvrunner.toplevel_runtimeContext.s3_public_bucket:\n                            raise WorkflowException(\"boto3 did not find any local AWS credentials to use to download from S3.  If you want to use credentials registered with Arvados, use --defer-downloads.  If the bucket is public, use --s3-public-bucket.\")\n                        if self.arvrunner.botosession.get_credentials():\n                            logger.info(\"S3 downloads will use AWS access key id %s\", self.arvrunner.botosession.get_credentials().access_key)\n                    if self.arvrunner.defer_downloads:\n                        # passthrough, we'll download it later.\n                        self._pathmap[src] = MapperEnt(src, src, srcobj[\"class\"], True)\n                        if (self.arvrunner.selected_credential is None and\n                            self.arvrunner.botosession is None and\n                            not self.arvrunner.toplevel_runtimeContext.s3_public_bucket):\n                            self.arvrunner.selected_credential = resolve_aws_key(self.arvrunner.api, src)\n                            logger.info(\"S3 downloads will use access key id %s which is Arvados credential '%s' (%s)\",\n                                        self.arvrunner.selected_credential['external_id'],\n                                        self.arvrunner.selected_credential['name'],\n                                        self.arvrunner.selected_credential['uuid'])\n                    else:\n                        from arvados._internal.s3_to_keep import s3_to_keep\n                        results = s3_to_keep(self.arvrunner.api,\n                                             self.arvrunner.botosession,\n                                             self.arvrunner.project_uuid,\n                                             src,\n                                             prefer_cached_downloads=self.arvrunner.toplevel_runtimeContext.prefer_cached_downloads,\n                                             unsigned_requests=self.arvrunner.toplevel_runtimeContext.s3_public_bucket)\n                        keepref = \"keep:%s/%s\" % (results[0], results[1])\n                        logger.info(\"%s is %s\", src, keepref)\n                        self._pathmap[src] = MapperEnt(keepref, keepref, srcobj[\"class\"], True)\n                except Exception as e:\n                    logger.warning(\"Download error: %s\", e, exc_info=debug)\n            else:\n                self._pathmap[src] = MapperEnt(src, src, srcobj[\"class\"], True)\n\n        with SourceLine(srcobj, \"secondaryFiles\", WorkflowException, debug):\n            for l in srcobj.get(\"secondaryFiles\", []):\n                self.visit(l, uploadfiles)\n        with SourceLine(srcobj, \"listing\", WorkflowException, debug):\n            for l in srcobj.get(\"listing\", []):\n                self.visit(l, uploadfiles)\n\n    def addentry(self, obj, c, path, remap):\n        if obj[\"location\"] in self._pathmap:\n            src, srcpath = self.arvrunner.fs_access.get_collection(self._pathmap[obj[\"location\"]].resolved)\n            if srcpath == \"\":\n                srcpath = \".\"\n            c.copy(srcpath, path + \"/\" + obj[\"basename\"], source_collection=src, overwrite=True)\n            remap.append((obj[\"location\"], path + \"/\" + obj[\"basename\"]))\n            for l in obj.get(\"secondaryFiles\", []):\n                self.addentry(l, c, path, remap)\n        elif obj[\"class\"] == \"Directory\":\n            for l in obj.get(\"listing\", []):\n                self.addentry(l, c, path + \"/\" + obj[\"basename\"], remap)\n            remap.append((obj[\"location\"], path + \"/\" + obj[\"basename\"]))\n        elif obj[\"location\"].startswith(\"_:\") and \"contents\" in obj:\n            with c.open(path + \"/\" + obj[\"basename\"], \"w\") as f:\n                f.write(obj[\"contents\"])\n            remap.append((obj[\"location\"], path + \"/\" + obj[\"basename\"]))\n        else:\n            for opt in self.optional_deps:\n                if obj[\"location\"] == opt[\"location\"]:\n                    return\n            raise SourceLine(obj, \"location\", WorkflowException).makeError(\"Can't handle '%s'\" % obj[\"location\"])\n\n    def needs_new_collection(self, srcobj, prefix=\"\"):\n        \"\"\"Check if files need to be staged into a new collection.\n\n        If all the files are in the same collection and in the same\n        paths they would be staged to, return False.  Otherwise, a new\n        collection is needed with files copied/created in the\n        appropriate places.\n        \"\"\"\n\n        loc = srcobj[\"location\"]\n        if loc.startswith(\"_:\"):\n            return True\n\n        if self.arvrunner.defer_downloads and (loc.startswith(\"http:\") or loc.startswith(\"https:\") or loc.startswith(\"s3:\")):\n            return False\n\n        i = loc.rfind(\"/\")\n        if i > -1:\n            loc_prefix = loc[:i+1]\n            if not prefix:\n                prefix = loc_prefix\n            # quote/unquote to ensure consistent quoting\n            suffix = urllib.parse.quote(urllib.parse.unquote(loc[i+1:]), \"/+@\")\n        else:\n            # no '/' found\n            loc_prefix = loc+\"/\"\n            prefix = loc+\"/\"\n            suffix = \"\"\n\n        if prefix != loc_prefix:\n            return True\n\n        if \"basename\" in srcobj and suffix != urllib.parse.quote(srcobj[\"basename\"], \"/+@\"):\n            return True\n\n        if srcobj[\"class\"] == \"File\" and loc not in self._pathmap:\n            return True\n        for s in srcobj.get(\"secondaryFiles\", []):\n            if self.needs_new_collection(s, prefix):\n                return True\n        if srcobj.get(\"listing\"):\n            prefix = \"%s%s/\" % (prefix, urllib.parse.quote(srcobj.get(\"basename\", suffix), \"/+@\"))\n            for l in srcobj[\"listing\"]:\n                if self.needs_new_collection(l, prefix):\n                    return True\n        return False\n\n    def setup(self, referenced_files, basedir):\n        # type: (List[Any], unicode) -> None\n        uploadfiles = set()\n\n        collection = None\n        if self.single_collection:\n            collection = arvados.collection.Collection(api_client=self.arvrunner.api,\n                                                       keep_client=self.arvrunner.keep_client,\n                                                       num_retries=self.arvrunner.num_retries)\n\n        for srcobj in referenced_files:\n            self.visit(srcobj, uploadfiles)\n\n        arvados.commands.run.uploadfiles([u[2] for u in uploadfiles],\n                                         self.arvrunner.api,\n                                         dry_run=False,\n                                         num_retries=self.arvrunner.num_retries,\n                                         fnPattern=\"keep:%s/%s\",\n                                         name=self.name,\n                                         project=self.arvrunner.project_uuid,\n                                         collection=collection,\n                                         packed=False)\n\n        for src, ab, st in uploadfiles:\n            self._pathmap[src] = MapperEnt(urllib.parse.quote(st.fn, \"/:+@\"), urllib.parse.quote(self.collection_pattern % st.fn[5:], \"/:+@\"),\n                                           \"Directory\" if os.path.isdir(ab) else \"File\", True)\n\n        for srcobj in referenced_files:\n            remap = []\n            if srcobj[\"class\"] == \"Directory\" and srcobj[\"location\"] not in self._pathmap:\n                c = arvados.collection.Collection(api_client=self.arvrunner.api,\n                                                  keep_client=self.arvrunner.keep_client,\n                                                  num_retries=self.arvrunner.num_retries)\n                for l in srcobj.get(\"listing\", []):\n                    self.addentry(l, c, \".\", remap)\n\n                container = arvados_cwl.util.get_current_container(self.arvrunner.api, self.arvrunner.num_retries, logger)\n                info = arvados_cwl.util.get_intermediate_collection_info(None, container, self.arvrunner.intermediate_output_ttl)\n\n                c.save_new(name=info[\"name\"],\n                           owner_uuid=self.arvrunner.project_uuid,\n                           ensure_unique_name=True,\n                           trash_at=info[\"trash_at\"],\n                           properties=info[\"properties\"])\n\n                ab = self.collection_pattern % c.portable_data_hash()\n                self._pathmap[srcobj[\"location\"]] = MapperEnt(\"keep:\"+c.portable_data_hash(), ab, \"Directory\", True)\n            elif srcobj[\"class\"] == \"File\" and self.needs_new_collection(srcobj):\n                c = arvados.collection.Collection(api_client=self.arvrunner.api,\n                                                  keep_client=self.arvrunner.keep_client,\n                                                  num_retries=self.arvrunner.num_retries)\n                self.addentry(srcobj, c, \".\", remap)\n\n                container = arvados_cwl.util.get_current_container(self.arvrunner.api, self.arvrunner.num_retries, logger)\n                info = arvados_cwl.util.get_intermediate_collection_info(None, container, self.arvrunner.intermediate_output_ttl)\n\n                c.save_new(name=info[\"name\"],\n                           owner_uuid=self.arvrunner.project_uuid,\n                           ensure_unique_name=True,\n                           trash_at=info[\"trash_at\"],\n                           properties=info[\"properties\"])\n\n                ab = self.file_pattern % (c.portable_data_hash(), srcobj[\"basename\"])\n                self._pathmap[srcobj[\"location\"]] = MapperEnt(\"keep:%s/%s\" % (c.portable_data_hash(), srcobj[\"basename\"]),\n                                                              ab, \"File\", True)\n                if srcobj.get(\"secondaryFiles\"):\n                    ab = self.collection_pattern % c.portable_data_hash()\n                    self._pathmap[\"_:\" + str(uuid.uuid4())] = MapperEnt(\"keep:\"+c.portable_data_hash(), ab, \"Directory\", True)\n\n            if remap:\n                for loc, sub in remap:\n                    # subdirs start with \"./\", strip it off\n                    if sub.startswith(\"./\"):\n                        ab = self.file_pattern % (c.portable_data_hash(), sub[2:])\n                    else:\n                        ab = self.file_pattern % (c.portable_data_hash(), sub)\n                    self._pathmap[loc] = MapperEnt(\"keep:%s/%s\" % (c.portable_data_hash(), sub[2:]),\n                                                   ab, \"Directory\", True)\n\n        self.keepdir = None\n\n    def reversemap(self, target):\n        p = super(ArvPathMapper, self).reversemap(target)\n        if p:\n            return p\n        elif target.startswith(\"keep:\"):\n            return (target, target)\n        elif self.keepdir and target.startswith(self.keepdir):\n            kp = \"keep:\" + target[len(self.keepdir)+1:]\n            return (kp, kp)\n        else:\n            return None\n\n\nclass StagingPathMapper(PathMapper):\n    # Note that StagingPathMapper internally maps files from target to source.\n    # Specifically, the 'self._pathmap' dict keys are the target location and the\n    # values are 'MapperEnt' named tuples from which we use the 'resolved' attribute\n    # as the file identifier. This makes it possible to map an input file to multiple\n    # target directories. The exception is for file literals, which store the contents of\n    # the file in 'MapperEnt.resolved' and are therefore still mapped from source to target.\n\n    _follow_dirs = True\n\n    def __init__(self, referenced_files, basedir, stagedir, separateDirs=True):\n        self.targets = set()\n        super(StagingPathMapper, self).__init__(referenced_files, basedir, stagedir, separateDirs)\n\n    def visit(self, obj, stagedir, basedir, copy=False, staged=False):\n        # type: (Dict[unicode, Any], unicode, unicode, bool) -> None\n        loc = obj[\"location\"]\n        stagedir = obj.get(\"dirname\") or stagedir\n        tgt = os.path.join(stagedir, obj[\"basename\"])\n        basetgt, baseext = os.path.splitext(tgt)\n\n        def targetExists():\n            return tgt in self.targets and (\"contents\" not in obj) and (self._pathmap[tgt].resolved != loc)\n        def literalTargetExists():\n            return tgt in self.targets and \"contents\" in obj\n\n        n = 1\n        if targetExists() or literalTargetExists():\n            while tgt in self.targets:\n                n += 1\n                tgt = \"%s_%i%s\" % (basetgt, n, baseext)\n        self.targets.add(tgt)\n        if obj[\"class\"] == \"Directory\":\n            if obj.get(\"writable\"):\n                self._pathmap[tgt] = MapperEnt(loc, tgt, \"WritableDirectory\", staged)\n            else:\n                self._pathmap[tgt] = MapperEnt(loc, tgt, \"Directory\", staged)\n            if loc.startswith(\"_:\") or self._follow_dirs:\n                self.visitlisting(obj.get(\"listing\", []), tgt, basedir)\n        elif obj[\"class\"] == \"File\":\n            if tgt in self._pathmap:\n                return\n            if \"contents\" in obj and loc.startswith(\"_:\"):\n                self._pathmap[loc] = MapperEnt(obj[\"contents\"], tgt, \"CreateFile\", staged)\n            else:\n                if copy or obj.get(\"writable\"):\n                    self._pathmap[tgt] = MapperEnt(loc, tgt, \"WritableFile\", staged)\n                else:\n                    self._pathmap[tgt] = MapperEnt(loc, tgt, \"File\", staged)\n                self.visitlisting(obj.get(\"secondaryFiles\", []), stagedir, basedir)\n\n    def mapper(self, src):  # type: (Text) -> MapperEnt.\n        # Overridden to maintain the use case of mapping by source (identifier) to\n        # target regardless of how the map is structured interally.\n        def getMapperEnt(src):\n            for k,v in self._pathmap.items():\n                if (v.type != \"CreateFile\" and v.resolved == src) or (v.type == \"CreateFile\" and k == src):\n                    return v\n\n        if u\"#\" in src:\n            i = src.index(u\"#\")\n            v = getMapperEnt(src[i:])\n            return MapperEnt(v.resolved, v.target + src[i:], v.type, v.staged)\n        return getMapperEnt(src)\n\n\nclass VwdPathMapper(StagingPathMapper):\n    def setup(self, referenced_files, basedir):\n        # type: (List[Any], unicode) -> None\n\n        # Go through each file and set the target to its own directory along\n        # with any secondary files.\n        self.visitlisting(referenced_files, self.stagedir, basedir)\n\n        for path, (ab, tgt, type, staged) in self._pathmap.items():\n            if type in (\"File\", \"Directory\") and ab.startswith(\"keep:\"):\n                self._pathmap[path] = MapperEnt(\"$(task.keep)/%s\" % ab[5:], tgt, type, staged)\n\n\nclass NoFollowPathMapper(StagingPathMapper):\n    _follow_dirs = False\n    def setup(self, referenced_files, basedir):\n        # type: (List[Any], unicode) -> None\n        self.visitlisting(referenced_files, self.stagedir, basedir)\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/perf.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport time\nimport uuid\n\nclass Perf(object):\n    def __init__(self, logger, name):\n        self.logger = logger\n        self.name = name\n\n    def __enter__(self):\n        self.time = time.time()\n        self.logger.debug(\"ENTER %s %s\", self.name, self.time)\n\n    def __exit__(self, exc_type=None, exc_value=None, traceback=None):\n        now = time.time()\n        self.logger.debug(\"EXIT %s %s %s\", self.name, now, now - self.time)\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/runner.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport sys\nimport re\nimport urllib.parse\nfrom functools import partial\nimport logging\nimport json\nimport copy\nfrom collections import namedtuple\nfrom io import StringIO\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    Iterator,\n    List,\n    Mapping,\n    MutableMapping,\n    Sequence,\n    MutableSequence,\n    Optional,\n    Set,\n    Sized,\n    Tuple,\n    Type,\n    Union,\n    cast,\n)\n\nimport subprocess\n\nfrom schema_salad.sourceline import SourceLine, cmap\n\nfrom cwltool.command_line_tool import CommandLineTool\nimport cwltool.workflow\nfrom cwltool.process import (scandeps, UnsupportedRequirement, normalizeFilesDirs,\n                             shortname, Process, fill_in_defaults)\nfrom cwltool.load_tool import fetch_document, jobloaderctx\nfrom cwltool.utils import aslist, adjustFileObjs, adjustDirObjs, visit_class\nfrom cwltool.builder import substitute\nfrom cwltool.pack import pack\nfrom cwltool.update import INTERNAL_VERSION\nfrom cwltool.builder import Builder\nimport schema_salad.validate as validate\nimport schema_salad.ref_resolver\nfrom cwltool.secrets import SecretStore\n\nimport arvados.collection\nimport arvados.util\nfrom .util import collectionUUID\nfrom ruamel.yaml import YAML\nfrom ruamel.yaml.comments import CommentedMap, CommentedSeq\n\nimport arvados_cwl.arvdocker\nfrom .pathmapper import ArvPathMapper, trim_listing, collection_pdh_pattern, collection_uuid_pattern, MapperEnt\nfrom ._version import __version__\nfrom . import done\nfrom . context import ArvRuntimeContext\nfrom .perf import Perf\n\nbasestring = (bytes, str)\nlogger = logging.getLogger('arvados.cwl-runner')\nmetrics = logging.getLogger('arvados.cwl-runner.metrics')\n\ndef trim_anonymous_location(obj):\n    \"\"\"Remove 'location' field from File and Directory literals.\n\n    To make internal handling easier, literals are assigned a random id for\n    'location'.  However, when writing the record back out, this can break\n    reproducibility.  Since it is valid for literals not have a 'location'\n    field, remove it.\n\n    \"\"\"\n\n    if obj.get(\"location\", \"\").startswith(\"_:\"):\n        del obj[\"location\"]\n\n\ndef remove_redundant_fields(obj):\n    for field in (\"path\", \"nameext\", \"nameroot\", \"dirname\"):\n        if field in obj:\n            del obj[field]\n\n\ndef find_defaults(d, op):\n    if isinstance(d, list):\n        for i in d:\n            find_defaults(i, op)\n    elif isinstance(d, dict):\n        if \"default\" in d:\n            op(d)\n        else:\n            for i in d.values():\n                find_defaults(i, op)\n\ndef make_builder(joborder, hints, requirements, runtimeContext, metadata):\n    return Builder(\n                 job=joborder,\n                 files=[],               # type: List[Dict[Text, Text]]\n                 bindings=[],            # type: List[Dict[Text, Any]]\n                 schemaDefs={},          # type: Dict[Text, Dict[Text, Any]]\n                 names=None,               # type: Names\n                 requirements=requirements,        # type: List[Dict[Text, Any]]\n                 hints=hints,               # type: List[Dict[Text, Any]]\n                 resources={},           # type: Dict[str, int]\n                 mutation_manager=None,    # type: Optional[MutationManager]\n                 formatgraph=None,         # type: Optional[Graph]\n                 make_fs_access=None,      # type: Type[StdFsAccess]\n                 fs_access=None,           # type: StdFsAccess\n                 job_script_provider=runtimeContext.job_script_provider, # type: Optional[Any]\n                 timeout=runtimeContext.eval_timeout,             # type: float\n                 debug=runtimeContext.debug,               # type: bool\n                 js_console=runtimeContext.js_console,          # type: bool\n                 force_docker_pull=runtimeContext.force_docker_pull,   # type: bool\n                 loadListing=\"\",         # type: Text\n                 outdir=\"\",              # type: Text\n                 tmpdir=\"\",              # type: Text\n                 stagedir=\"\",            # type: Text\n                 cwlVersion=metadata.get(\"http://commonwl.org/cwltool#original_cwlVersion\") or metadata.get(\"cwlVersion\"),\n                 container_engine=\"docker\"\n                )\n\ndef search_schemadef(name, reqs):\n    for r in reqs:\n        if r[\"class\"] == \"SchemaDefRequirement\":\n            for sd in r[\"types\"]:\n                if sd[\"name\"] == name:\n                    return sd\n    return None\n\nprimitive_types_set = frozenset((\"null\", \"boolean\", \"int\", \"long\",\n                                 \"float\", \"double\", \"string\", \"record\",\n                                 \"array\", \"enum\"))\n\ndef set_secondary(fsaccess, builder, inputschema, secondaryspec, primary, discovered):\n    if isinstance(inputschema, Sequence) and not isinstance(inputschema, basestring):\n        # union type, collect all possible secondaryFiles\n        for i in inputschema:\n            set_secondary(fsaccess, builder, i, secondaryspec, primary, discovered)\n        return\n\n    if inputschema == \"File\":\n        inputschema = {\"type\": \"File\"}\n\n    if isinstance(inputschema, basestring):\n        sd = search_schemadef(inputschema, reversed(builder.hints+builder.requirements))\n        if sd:\n            inputschema = sd\n        else:\n            return\n\n    if \"secondaryFiles\" in inputschema:\n        # set secondaryFiles, may be inherited by compound types.\n        secondaryspec = inputschema[\"secondaryFiles\"]\n\n    if (isinstance(inputschema[\"type\"], (Mapping, Sequence)) and\n        not isinstance(inputschema[\"type\"], basestring)):\n        # compound type (union, array, record)\n        set_secondary(fsaccess, builder, inputschema[\"type\"], secondaryspec, primary, discovered)\n\n    elif (inputschema[\"type\"] == \"record\" and\n          isinstance(primary, Mapping)):\n        #\n        # record type, find secondary files associated with fields.\n        #\n        for f in inputschema[\"fields\"]:\n            p = primary.get(shortname(f[\"name\"]))\n            if p:\n                set_secondary(fsaccess, builder, f, secondaryspec, p, discovered)\n\n    elif (inputschema[\"type\"] == \"array\" and\n          isinstance(primary, Sequence)):\n        #\n        # array type, find secondary files of elements\n        #\n        for p in primary:\n            set_secondary(fsaccess, builder, {\"type\": inputschema[\"items\"]}, secondaryspec, p, discovered)\n\n    elif (inputschema[\"type\"] == \"File\" and\n          isinstance(primary, Mapping) and\n          primary.get(\"class\") == \"File\"):\n\n        if \"secondaryFiles\" in primary or not secondaryspec:\n            # Nothing to do.\n            return\n\n        #\n        # Found a file, check for secondaryFiles\n        #\n        specs = []\n        primary[\"secondaryFiles\"] = secondaryspec\n        for i, sf in enumerate(aslist(secondaryspec)):\n            if builder.cwlVersion == \"v1.0\":\n                pattern = sf\n            else:\n                pattern = sf[\"pattern\"]\n            if pattern is None:\n                continue\n            if isinstance(pattern, list):\n                specs.extend(pattern)\n            elif isinstance(pattern, dict):\n                specs.append(pattern)\n            elif isinstance(pattern, str):\n                if builder.cwlVersion == \"v1.0\":\n                    specs.append({\"pattern\": pattern, \"required\": True})\n                else:\n                    specs.append({\"pattern\": pattern, \"required\": sf.get(\"required\")})\n            else:\n                raise SourceLine(primary[\"secondaryFiles\"], i, validate.ValidationException).makeError(\n                    \"Expression must return list, object, string or null\")\n\n        found = []\n        for i, sf in enumerate(specs):\n            if isinstance(sf, dict):\n                if sf.get(\"class\") == \"File\":\n                    pattern = None\n                    if sf.get(\"location\") is None:\n                        raise SourceLine(primary[\"secondaryFiles\"], i, validate.ValidationException).makeError(\n                            \"File object is missing 'location': %s\" % sf)\n                    sfpath = sf[\"location\"]\n                    required = True\n                else:\n                    pattern = sf[\"pattern\"]\n                    required = sf.get(\"required\")\n            elif isinstance(sf, str):\n                pattern = sf\n                required = True\n            else:\n                raise SourceLine(primary[\"secondaryFiles\"], i, validate.ValidationException).makeError(\n                    \"Expression must return list, object, string or null\")\n\n            if pattern is not None:\n                if \"${\" in pattern or \"$(\" in pattern:\n                    sfname = builder.do_eval(pattern, context=primary)\n                else:\n                    sfname = substitute(primary[\"basename\"], pattern)\n\n                if sfname is None:\n                    continue\n\n                if isinstance(sfname, str):\n                    p_location = primary[\"location\"]\n                    if \"/\" in p_location:\n                        sfpath = (\n                            p_location[0 : p_location.rindex(\"/\") + 1]\n                            + sfname\n                        )\n\n            required = builder.do_eval(required, context=primary)\n\n            if isinstance(sfname, list) or isinstance(sfname, dict):\n                each = aslist(sfname)\n                for e in each:\n                    if required and not fsaccess.exists(e.get(\"location\")):\n                        raise SourceLine(primary[\"secondaryFiles\"], i, validate.ValidationException).makeError(\n                            \"Required secondary file '%s' does not exist\" % e.get(\"location\"))\n                found.extend(each)\n\n            if isinstance(sfname, str):\n                if fsaccess.exists(sfpath):\n                    if pattern is not None:\n                        found.append({\"location\": sfpath, \"class\": \"File\"})\n                    else:\n                        found.append(sf)\n                elif required:\n                    raise SourceLine(primary[\"secondaryFiles\"], i, validate.ValidationException).makeError(\n                        \"Required secondary file '%s' does not exist\" % sfpath)\n\n        primary[\"secondaryFiles\"] = cmap(found)\n        if discovered is not None:\n            discovered[primary[\"location\"]] = primary[\"secondaryFiles\"]\n    elif inputschema[\"type\"] not in primitive_types_set and inputschema[\"type\"] not in (\"File\", \"Directory\"):\n        set_secondary(fsaccess, builder, inputschema[\"type\"], secondaryspec, primary, discovered)\n\ndef discover_secondary_files(fsaccess, builder, inputs, job_order, discovered=None):\n    for inputschema in inputs:\n        primary = job_order.get(shortname(inputschema[\"id\"]))\n        if isinstance(primary, (Mapping, Sequence)):\n            set_secondary(fsaccess, builder, inputschema, None, primary, discovered)\n\ndef upload_dependencies(arvrunner, name, document_loader,\n                        workflowobj, uri, runtimeContext,\n                        include_primary=True, discovered_secondaryfiles=None,\n                        cache=None):\n    \"\"\"Upload the dependencies of the workflowobj document to Keep.\n\n    Returns a pathmapper object mapping local paths to keep references.  Also\n    does an in-place update of references in \"workflowobj\".\n\n    Use scandeps to find $schemas, File and Directory\n    fields that represent external references.\n\n    If workflowobj has an \"id\" field, this will reload the document to ensure\n    it is scanning the raw document prior to preprocessing.\n    \"\"\"\n\n    scanobj = workflowobj\n    metadata = scanobj\n\n    with Perf(metrics, \"scandeps\"):\n        sc_result = scandeps(uri, scanobj,\n                             set(),\n                             set((\"location\",)),\n                             None, urljoin=document_loader.fetcher.urljoin,\n                             nestdirs=False)\n        optional_deps = scandeps(uri, scanobj,\n                             set(),\n                             set((\"$schemas\",)),\n                             None, urljoin=document_loader.fetcher.urljoin,\n                             nestdirs=False)\n\n    if sc_result is None:\n        sc_result = []\n\n    if optional_deps is None:\n        optional_deps = []\n\n    if optional_deps:\n        sc_result.extend(optional_deps)\n\n    sc = []\n    uuids = {}\n\n    def collect_uuids(obj):\n        loc = obj.get(\"location\", \"\")\n        sp = loc.split(\":\")\n        if sp[0] == \"keep\":\n            # Collect collection uuids that need to be resolved to\n            # portable data hashes\n            gp = collection_uuid_pattern.match(loc)\n            if gp:\n                uuids[gp.groups()[0]] = obj\n            if collectionUUID in obj:\n                uuids[obj[collectionUUID]] = obj\n\n    def collect_uploads(obj):\n        loc = obj.get(\"location\", \"\")\n        sp = loc.split(\":\")\n        if len(sp) < 1:\n            return\n        if sp[0] in (\"file\", \"http\", \"https\", \"s3\"):\n            # Record local files than need to be uploaded,\n            # don't include file literals, keep references, etc.\n            sc.append(obj)\n        collect_uuids(obj)\n\n    with Perf(metrics, \"collect uuids\"):\n        visit_class(workflowobj, (\"File\", \"Directory\"), collect_uuids)\n\n    with Perf(metrics, \"collect uploads\"):\n        visit_class(sc_result, (\"File\", \"Directory\"), collect_uploads)\n\n    # Resolve any collection uuids we found to portable data hashes\n    # and assign them to uuid_map\n    uuid_map = {}\n    fetch_uuids = list(uuids.keys())\n    with Perf(metrics, \"fetch_uuids\"):\n        while fetch_uuids:\n            # For a large number of fetch_uuids, API server may limit\n            # response size, so keep fetching from API server has nothing\n            # more to give us.\n            lookups = arvrunner.api.collections().list(\n                filters=[[\"uuid\", \"in\", fetch_uuids]],\n                count=\"none\",\n                select=[\"uuid\", \"portable_data_hash\"]).execute(\n                    num_retries=arvrunner.num_retries)\n\n            if not lookups[\"items\"]:\n                break\n\n            for l in lookups[\"items\"]:\n                uuid_map[l[\"uuid\"]] = l[\"portable_data_hash\"]\n\n            fetch_uuids = [u for u in fetch_uuids if u not in uuid_map]\n\n    normalizeFilesDirs(sc)\n\n    if \"id\" in workflowobj:\n        defrg, _ = urllib.parse.urldefrag(workflowobj[\"id\"])\n        if include_primary:\n            # make sure it's included\n            sc.append({\"class\": \"File\", \"location\": defrg})\n        else:\n            # make sure it's excluded\n            sc = [d for d in sc if d.get(\"location\") != defrg]\n\n    def visit_default(obj):\n        def defaults_are_optional(f):\n            if \"location\" not in f and \"path\" in f:\n                f[\"location\"] = f[\"path\"]\n                del f[\"path\"]\n            normalizeFilesDirs(f)\n            optional_deps.append(f)\n        visit_class(obj[\"default\"], (\"File\", \"Directory\"), defaults_are_optional)\n\n    find_defaults(workflowobj, visit_default)\n\n    discovered = {}\n    def discover_default_secondary_files(obj):\n        builder_job_order = {}\n        for t in obj[\"inputs\"]:\n            builder_job_order[shortname(t[\"id\"])] = t[\"default\"] if \"default\" in t else None\n        # Need to create a builder object to evaluate expressions.\n        builder = make_builder(builder_job_order,\n                               obj.get(\"hints\", []),\n                               obj.get(\"requirements\", []),\n                               ArvRuntimeContext(),\n                               metadata)\n        discover_secondary_files(arvrunner.fs_access,\n                                 builder,\n                                 obj[\"inputs\"],\n                                 builder_job_order,\n                                 discovered)\n\n    copied, _ = document_loader.resolve_all(copy.deepcopy(cmap(workflowobj)), base_url=uri, checklinks=False)\n    visit_class(copied, (\"CommandLineTool\", \"Workflow\"), discover_default_secondary_files)\n\n    for d in list(discovered):\n        # Only interested in discovered secondaryFiles which are local\n        # files that need to be uploaded.\n        if d.startswith(\"file:\"):\n            sc.extend(discovered[d])\n        else:\n            del discovered[d]\n\n    with Perf(metrics, \"mapper\"):\n        mapper = ArvPathMapper(arvrunner, sc, \"\",\n                               \"keep:%s\",\n                               \"keep:%s/%s\",\n                               name=name,\n                               single_collection=True,\n                               optional_deps=optional_deps)\n\n    for k, v in uuid_map.items():\n        mapper._pathmap[\"keep:\"+k] = MapperEnt(v, \"\", \"\", False)\n\n    keeprefs = set()\n    def addkeepref(k):\n        if k.startswith(\"keep:\"):\n            keeprefs.add(collection_pdh_pattern.match(k).group(1))\n\n\n    def collectloc(p):\n        loc = p.get(\"location\")\n        if loc and (not loc.startswith(\"_:\")) and (not loc.startswith(\"keep:\")):\n            addkeepref(p[\"location\"])\n            return\n\n        if not loc:\n            return\n\n        if collectionUUID in p:\n            uuid = p[collectionUUID]\n            if uuid not in uuid_map:\n                raise SourceLine(p, collectionUUID, validate.ValidationException).makeError(\n                    \"Collection uuid %s not found\" % uuid)\n            gp = collection_pdh_pattern.match(loc)\n            if gp and uuid_map[uuid] != gp.groups()[0]:\n                # This file entry has both collectionUUID and a PDH\n                # location. If the PDH doesn't match the one returned\n                # the API server, raise an error.\n                raise SourceLine(p, \"location\", validate.ValidationException).makeError(\n                    \"Expected collection uuid %s to be %s but API server reported %s\" % (\n                        uuid, gp.groups()[0], uuid_map[p[collectionUUID]]))\n\n        gp = collection_uuid_pattern.match(loc)\n        if not gp:\n            # Not a uuid pattern (must be a pdh pattern)\n            addkeepref(p[\"location\"])\n            return\n\n        uuid = gp.groups()[0]\n        if uuid not in uuid_map:\n            raise SourceLine(p, \"location\", validate.ValidationException).makeError(\n                \"Collection uuid %s not found\" % uuid)\n\n    with Perf(metrics, \"collectloc\"):\n        visit_class(workflowobj, (\"File\", \"Directory\"), collectloc)\n        visit_class(discovered, (\"File\", \"Directory\"), collectloc)\n\n    if discovered_secondaryfiles is not None:\n        for d in discovered:\n            discovered_secondaryfiles[mapper.mapper(d).resolved] = discovered[d]\n\n    if runtimeContext.copy_deps:\n        # Find referenced collections and copy them into the\n        # destination project, for easy sharing.\n        already_present = list(arvados.util.keyset_list_all(arvrunner.api.collections().list,\n                                     filters=[[\"portable_data_hash\", \"in\", list(keeprefs)],\n                                              [\"owner_uuid\", \"=\", runtimeContext.project_uuid]],\n                                     select=[\"uuid\", \"portable_data_hash\", \"created_at\"]))\n\n        keeprefs = keeprefs - set(a[\"portable_data_hash\"] for a in already_present)\n        for kr in keeprefs:\n            col = arvrunner.api.collections().list(filters=[[\"portable_data_hash\", \"=\", kr]],\n                                                  order=\"created_at desc\",\n                                                   select=[\"name\", \"description\", \"properties\", \"portable_data_hash\", \"manifest_text\", \"storage_classes_desired\", \"trash_at\"],\n                                                   limit=1).execute()\n            if len(col[\"items\"]) == 0:\n                logger.warning(\"Cannot find collection with portable data hash %s\", kr)\n                continue\n            col = col[\"items\"][0]\n            col[\"name\"] = arvados.util.trim_name(col[\"name\"])\n            try:\n                arvrunner.api.collections().create(body={\"collection\": {\n                    \"owner_uuid\": runtimeContext.project_uuid,\n                    \"name\": col[\"name\"],\n                    \"description\": col[\"description\"],\n                    \"properties\": col[\"properties\"],\n                    \"portable_data_hash\": col[\"portable_data_hash\"],\n                    \"manifest_text\": col[\"manifest_text\"],\n                    \"storage_classes_desired\": col[\"storage_classes_desired\"],\n                    \"trash_at\": col[\"trash_at\"]\n                }}, ensure_unique_name=True).execute()\n            except Exception as e:\n                logger.warning(\"Unable to copy collection to destination: %s\", e)\n\n    if \"$schemas\" in workflowobj:\n        sch = CommentedSeq()\n        for s in workflowobj[\"$schemas\"]:\n            if s in mapper:\n                sch.append(mapper.mapper(s).resolved)\n        workflowobj[\"$schemas\"] = sch\n\n    return mapper\n\n\ndef upload_docker(arvrunner, tool, runtimeContext):\n    \"\"\"Uploads Docker images used in CommandLineTool objects.\"\"\"\n\n    if isinstance(tool, CommandLineTool):\n        (docker_req, docker_is_req) = tool.get_requirement(\"DockerRequirement\")\n        if docker_req:\n            if docker_req.get(\"dockerOutputDirectory\") and arvrunner.work_api != \"containers\":\n                raise SourceLine(docker_req, \"dockerOutputDirectory\", UnsupportedRequirement).makeError(\n                    \"Option 'dockerOutputDirectory' of DockerRequirement not supported.\")\n\n            arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, docker_req, True, runtimeContext)\n        else:\n            arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, {\"dockerPull\": \"arvados/jobs:\"+__version__},\n                                                       True, runtimeContext)\n    elif isinstance(tool, cwltool.workflow.Workflow):\n        for s in tool.steps:\n            upload_docker(arvrunner, s.embedded_tool, runtimeContext)\n\n\ndef packed_workflow(arvrunner, tool, merged_map, runtimeContext, git_info):\n    \"\"\"Create a packed workflow.\n\n    A \"packed\" workflow is one where all the components have been combined into a single document.\"\"\"\n\n    rewrites = {}\n    packed = pack(arvrunner.loadingContext, tool.tool[\"id\"],\n                  rewrite_out=rewrites,\n                  loader=tool.doc_loader)\n\n    rewrite_to_orig = {v: k for k,v in rewrites.items()}\n\n    def visit(v, cur_id):\n        if isinstance(v, dict):\n            if v.get(\"class\") in (\"CommandLineTool\", \"Workflow\", \"ExpressionTool\"):\n                if tool.metadata[\"cwlVersion\"] == \"v1.0\" and \"id\" not in v:\n                    raise SourceLine(v, None, Exception).makeError(\"Embedded process object is missing required 'id' field, add an 'id' or use to cwlVersion: v1.1\")\n                if \"id\" in v:\n                    cur_id = rewrite_to_orig.get(v[\"id\"], v[\"id\"])\n            if \"path\" in v and \"location\" not in v:\n                v[\"location\"] = v[\"path\"]\n                del v[\"path\"]\n            if \"location\" in v and cur_id in merged_map:\n                if v[\"location\"] in merged_map[cur_id].resolved:\n                    v[\"location\"] = merged_map[cur_id].resolved[v[\"location\"]]\n                if v[\"location\"] in merged_map[cur_id].secondaryFiles:\n                    v[\"secondaryFiles\"] = merged_map[cur_id].secondaryFiles[v[\"location\"]]\n            if v.get(\"class\") == \"DockerRequirement\":\n                v[\"http://arvados.org/cwl#dockerCollectionPDH\"] = arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, v, True,\n                                                                                                             runtimeContext)\n            for l in v:\n                visit(v[l], cur_id)\n        if isinstance(v, list):\n            for l in v:\n                visit(l, cur_id)\n    visit(packed, None)\n\n    if git_info:\n        for g in git_info:\n            packed[g] = git_info[g]\n\n    return packed\n\n\ndef tag_git_version(packed):\n    if tool.tool[\"id\"].startswith(\"file://\"):\n        path = os.path.dirname(tool.tool[\"id\"][7:])\n        try:\n            githash = subprocess.check_output(['git', 'log', '--first-parent', '--max-count=1', '--format=%H'], stderr=subprocess.STDOUT, cwd=path).strip()\n        except (OSError, subprocess.CalledProcessError):\n            pass\n        else:\n            packed[\"http://schema.org/version\"] = githash\n\ndef setloc(mapper, p):\n    loc = p.get(\"location\")\n    if loc and (not loc.startswith(\"_:\")) and (not loc.startswith(\"keep:\")):\n        p[\"location\"] = mapper.mapper(p[\"location\"]).resolved\n        return\n\n    if not loc:\n        return\n\n    if collectionUUID in p:\n        uuid = p[collectionUUID]\n        keepuuid = \"keep:\"+uuid\n        if keepuuid not in mapper:\n            raise SourceLine(p, collectionUUID, validate.ValidationException).makeError(\n                \"Collection uuid %s not found\" % uuid)\n        gp = collection_pdh_pattern.match(loc)\n        if gp and mapper.mapper(keepuuid).resolved != gp.groups()[0]:\n            # This file entry has both collectionUUID and a PDH\n            # location. If the PDH doesn't match the one returned\n            # the API server, raise an error.\n            raise SourceLine(p, \"location\", validate.ValidationException).makeError(\n                \"Expected collection uuid %s to be %s but API server reported %s\" % (\n                    uuid, gp.groups()[0], mapper.mapper(keepuuid).resolved))\n\n    gp = collection_uuid_pattern.match(loc)\n    if not gp:\n        # Not a uuid pattern (must be a pdh pattern)\n        return\n\n    uuid = gp.groups()[0]\n    keepuuid = \"keep:\"+uuid\n    if keepuuid not in mapper:\n        raise SourceLine(p, \"location\", validate.ValidationException).makeError(\n            \"Collection uuid %s not found\" % uuid)\n    p[\"location\"] = \"keep:%s%s\" % (mapper.mapper(keepuuid).resolved, gp.groups()[1] if gp.groups()[1] else \"\")\n    p[collectionUUID] = uuid\n\ndef update_from_mapper(workflowobj, mapper):\n    with Perf(metrics, \"setloc\"):\n        visit_class(workflowobj, (\"File\", \"Directory\"), partial(setloc, mapper))\n\ndef apply_merged_map(merged_map, workflowobj):\n    def visit(v, cur_id):\n        if isinstance(v, dict):\n            if v.get(\"class\") in (\"CommandLineTool\", \"Workflow\", \"ExpressionTool\"):\n                if \"id\" in v:\n                    cur_id = v[\"id\"]\n            if \"path\" in v and \"location\" not in v:\n                v[\"location\"] = v[\"path\"]\n                del v[\"path\"]\n            if \"location\" in v and cur_id in merged_map:\n                if v[\"location\"] in merged_map[cur_id].resolved:\n                    v[\"location\"] = merged_map[cur_id].resolved[v[\"location\"]]\n                if v[\"location\"] in merged_map[cur_id].secondaryFiles:\n                    v[\"secondaryFiles\"] = merged_map[cur_id].secondaryFiles[v[\"location\"]]\n            #if v.get(\"class\") == \"DockerRequirement\":\n            #    v[\"http://arvados.org/cwl#dockerCollectionPDH\"] = arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, v, True,\n            #                                                                                                 runtimeContext)\n            for l in v:\n                visit(v[l], cur_id)\n        if isinstance(v, list):\n            for l in v:\n                visit(l, cur_id)\n    visit(workflowobj, None)\n\ndef update_from_merged_map(tool, merged_map):\n    tool.visit(partial(apply_merged_map, merged_map))\n\ndef upload_job_order(arvrunner, name, tool, job_order, runtimeContext):\n    \"\"\"Upload local files referenced in the input object and return updated input\n    object with 'location' updated to the proper keep references.\n    \"\"\"\n\n    # Make a copy of the job order and set defaults.\n    builder_job_order = copy.copy(job_order)\n\n    # fill_in_defaults throws an error if there are any\n    # missing required parameters, we don't want it to do that\n    # so make them all optional.\n    inputs_copy = copy.deepcopy(tool.tool[\"inputs\"])\n    for i in inputs_copy:\n        if \"null\" not in i[\"type\"]:\n            i[\"type\"] = [\"null\"] + aslist(i[\"type\"])\n\n    fill_in_defaults(inputs_copy,\n                     builder_job_order,\n                     arvrunner.fs_access)\n    # Need to create a builder object to evaluate expressions.\n    builder = make_builder(builder_job_order,\n                           tool.hints,\n                           tool.requirements,\n                           ArvRuntimeContext(),\n                           tool.metadata)\n    # Now update job_order with secondaryFiles\n    discover_secondary_files(arvrunner.fs_access,\n                             builder,\n                             tool.tool[\"inputs\"],\n                             job_order)\n\n    _jobloaderctx = jobloaderctx.copy()\n    jobloader = schema_salad.ref_resolver.Loader(_jobloaderctx, fetcher_constructor=tool.doc_loader.fetcher_constructor)\n\n    jobmapper = upload_dependencies(arvrunner,\n                                    name,\n                                    jobloader,\n                                    job_order,\n                                    job_order.get(\"id\", \"#\"),\n                                    runtimeContext)\n\n    if \"id\" in job_order:\n        del job_order[\"id\"]\n\n    # Need to filter this out, gets added by cwltool when providing\n    # parameters on the command line.\n    if \"job_order\" in job_order:\n        del job_order[\"job_order\"]\n\n    update_from_mapper(job_order, jobmapper)\n\n    return job_order, jobmapper\n\nFileUpdates = namedtuple(\"FileUpdates\", [\"resolved\", \"secondaryFiles\"])\n\ndef upload_workflow_deps(arvrunner, tool, runtimeContext):\n    # Ensure that Docker images needed by this workflow are available\n\n    with Perf(metrics, \"upload_docker\"):\n        upload_docker(arvrunner, tool, runtimeContext)\n\n    document_loader = tool.doc_loader\n\n    merged_map = {}\n    tool_dep_cache = {}\n\n    todo = []\n\n    # Standard traversal is top down, we want to go bottom up, so use\n    # the visitor to accumalate a list of nodes to visit, then\n    # visit them in reverse order.\n    def upload_tool_deps(deptool):\n        if \"id\" in deptool:\n            todo.append(deptool)\n\n    tool.visit(upload_tool_deps)\n\n    for deptool in reversed(todo):\n        discovered_secondaryfiles = {}\n        with Perf(metrics, \"upload_dependencies %s\" % shortname(deptool[\"id\"])):\n            pm = upload_dependencies(arvrunner,\n                                     \"%s dependencies\" % (shortname(deptool[\"id\"])),\n                                     document_loader,\n                                     deptool,\n                                     deptool[\"id\"],\n                                     runtimeContext,\n                                     include_primary=False,\n                                     discovered_secondaryfiles=discovered_secondaryfiles,\n                                     cache=tool_dep_cache)\n\n        document_loader.idx[deptool[\"id\"]] = deptool\n        toolmap = {}\n        for k,v in pm.items():\n            toolmap[k] = v.resolved\n\n        merged_map[deptool[\"id\"]] = FileUpdates(toolmap, discovered_secondaryfiles)\n\n    return merged_map\n\ndef arvados_jobs_image(arvrunner, img, runtimeContext):\n    \"\"\"Determine if the right arvados/jobs image version is available.  If not, try to pull and upload it.\"\"\"\n\n    try:\n        return arvados_cwl.arvdocker.arv_docker_get_image(arvrunner.api, {\"dockerPull\": img},\n                                                          True, runtimeContext)\n    except Exception as e:\n        raise Exception(\"Docker image %s is not available\\n%s\" % (img, e) )\n\n\ndef upload_workflow_collection(arvrunner, name, packed, runtimeContext):\n    collection = arvados.collection.Collection(api_client=arvrunner.api,\n                                               keep_client=arvrunner.keep_client,\n                                               num_retries=arvrunner.num_retries)\n    with collection.open(\"workflow.cwl\", \"w\") as f:\n        f.write(json.dumps(packed, indent=2, sort_keys=True, separators=(',',': ')))\n\n    filters = [[\"portable_data_hash\", \"=\", collection.portable_data_hash()],\n               [\"name\", \"like\", name+\"%\"]]\n    if runtimeContext.project_uuid:\n        filters.append([\"owner_uuid\", \"=\", runtimeContext.project_uuid])\n    exists = arvrunner.api.collections().list(filters=filters).execute(num_retries=arvrunner.num_retries)\n\n    if exists[\"items\"]:\n        logger.info(\"Using collection %s\", exists[\"items\"][0][\"uuid\"])\n    else:\n        collection.save_new(name=name,\n                            owner_uuid=runtimeContext.project_uuid,\n                            ensure_unique_name=True,\n                            num_retries=arvrunner.num_retries)\n        logger.info(\"Uploaded to %s\", collection.manifest_locator())\n\n    return collection.portable_data_hash()\n\n\nclass Runner(Process):\n    \"\"\"Base class for runner processes, which submit an instance of\n    arvados-cwl-runner and wait for the final result.\"\"\"\n\n    def __init__(self, runner,\n                 tool, loadingContext, enable_reuse,\n                 output_name, output_tags, submit_runner_ram=0,\n                 name=None, on_error=None, submit_runner_image=None,\n                 intermediate_output_ttl=0, merged_map=None,\n                 priority=None, secret_store=None,\n                 collection_cache_size=256,\n                 collection_cache_is_default=True,\n                 git_info=None,\n                 reuse_runner=False):\n\n        self.loadingContext = loadingContext.copy()\n\n        super(Runner, self).__init__(tool.tool, loadingContext)\n\n        # This is called \"arvrunner\" but it's actually ArvCwlExecutor\n        self.arvrunner = runner\n        self.embedded_tool = tool\n        self.job_order = None\n        self.running = False\n        if enable_reuse:\n            # If reuse is permitted by command line arguments but\n            # disabled by the workflow itself, disable it.\n            reuse_req, _ = self.embedded_tool.get_requirement(\"http://arvados.org/cwl#ReuseRequirement\")\n            if reuse_req:\n                enable_reuse = reuse_req[\"enableReuse\"]\n            reuse_req, _ = self.embedded_tool.get_requirement(\"WorkReuse\")\n            if reuse_req:\n                enable_reuse = reuse_req[\"enableReuse\"]\n        self.enable_reuse = enable_reuse\n        self.uuid = None\n        self.final_output = None\n        self.output_name = output_name\n        self.output_tags = output_tags\n        self.name = name\n        self.on_error = on_error\n        self.jobs_image = submit_runner_image or \"arvados/jobs:\"+__version__\n        self.intermediate_output_ttl = intermediate_output_ttl\n        self.priority = priority\n        self.secret_store = secret_store\n        self.enable_dev = self.loadingContext.enable_dev\n        self.git_info = git_info\n        self.fast_parser = self.loadingContext.fast_parser\n        self.reuse_runner = reuse_runner\n\n        self.submit_runner_cores = 1\n        self.submit_runner_ram = 1024  # defaut 1 GiB\n        self.collection_cache_size = collection_cache_size\n\n        runner_resource_req, _ = self.embedded_tool.get_requirement(\"http://arvados.org/cwl#WorkflowRunnerResources\")\n        if runner_resource_req:\n            if runner_resource_req.get(\"coresMin\"):\n                self.submit_runner_cores = runner_resource_req[\"coresMin\"]\n            if runner_resource_req.get(\"ramMin\"):\n                self.submit_runner_ram = runner_resource_req[\"ramMin\"]\n            if runner_resource_req.get(\"keep_cache\") and collection_cache_is_default:\n                self.collection_cache_size = runner_resource_req[\"keep_cache\"]\n\n        if submit_runner_ram:\n            # Command line / initializer overrides default and/or spec from workflow\n            self.submit_runner_ram = submit_runner_ram\n\n        if self.submit_runner_ram <= 0:\n            raise Exception(\"Value of submit-runner-ram must be greater than zero\")\n\n        if self.submit_runner_cores <= 0:\n            raise Exception(\"Value of submit-runner-cores must be greater than zero\")\n\n        self.merged_map = merged_map or {}\n\n    def job(self,\n            job_order,         # type: Mapping[Text, Text]\n            output_callbacks,  # type: Callable[[Any, Any], Any]\n            runtimeContext     # type: RuntimeContext\n           ):  # type: (...) -> Generator[Any, None, None]\n        self.job_order = job_order\n        self._init_job(job_order, runtimeContext)\n        yield self\n\n    def update_pipeline_component(self, record):\n        pass\n\n    def done(self, record):\n        \"\"\"Base method for handling a completed runner.\"\"\"\n\n        try:\n            if record[\"state\"] == \"Complete\":\n                if record.get(\"exit_code\") is not None:\n                    if record[\"exit_code\"] == 33:\n                        processStatus = \"UnsupportedRequirement\"\n                    elif record[\"exit_code\"] == 0:\n                        processStatus = \"success\"\n                    else:\n                        processStatus = \"permanentFail\"\n                else:\n                    processStatus = \"success\"\n            else:\n                processStatus = \"permanentFail\"\n\n            outputs = {}\n\n            if processStatus == \"permanentFail\":\n                logc = arvados.collection.CollectionReader(record[\"log\"],\n                                                           api_client=self.arvrunner.api,\n                                                           keep_client=self.arvrunner.keep_client,\n                                                           num_retries=self.arvrunner.num_retries)\n                done.logtail(logc, logger.error, \"%s (%s) error log:\" % (self.arvrunner.label(self), record[\"uuid\"]), maxlen=40,\n                             include_crunchrun=(record.get(\"exit_code\") is None or record.get(\"exit_code\") > 127))\n\n            self.final_output = record[\"output\"]\n            outc = arvados.collection.CollectionReader(self.final_output,\n                                                       api_client=self.arvrunner.api,\n                                                       keep_client=self.arvrunner.keep_client,\n                                                       num_retries=self.arvrunner.num_retries)\n            if \"cwl.output.json\" in outc:\n                with outc.open(\"cwl.output.json\", \"rb\") as f:\n                    if f.size() > 0:\n                        outputs = json.loads(str(f.read(), 'utf-8'))\n            def keepify(fileobj):\n                path = fileobj[\"location\"]\n                if not path.startswith(\"keep:\"):\n                    fileobj[\"location\"] = \"keep:%s/%s\" % (record[\"output\"], path)\n            adjustFileObjs(outputs, keepify)\n            adjustDirObjs(outputs, keepify)\n        except Exception:\n            logger.exception(\"[%s] While getting final output object\", self.name)\n            self.arvrunner.output_callback({}, \"permanentFail\")\n        else:\n            self.arvrunner.output_callback(outputs, processStatus)\n\n\ndef print_keep_deps_visitor(api, runtimeContext, references, doc_loader, tool):\n    def collect_locators(obj):\n        loc = obj.get(\"location\", \"\")\n\n        g = arvados.util.keepuri_pattern.match(loc)\n        if g:\n            references.add(g[1])\n\n        if obj.get(\"class\") == \"http://arvados.org/cwl#WorkflowRunnerResources\" and \"acrContainerImage\" in obj:\n            references.add(obj[\"acrContainerImage\"])\n\n        if obj.get(\"class\") == \"DockerRequirement\":\n            references.add(arvados_cwl.arvdocker.arv_docker_get_image(api, obj, False, runtimeContext))\n\n    sc_result = scandeps(tool[\"id\"], tool,\n                         set(),\n                         set((\"location\", \"id\")),\n                         None, urljoin=doc_loader.fetcher.urljoin,\n                         nestdirs=False)\n\n    visit_class(sc_result, (\"File\", \"Directory\"), collect_locators)\n    visit_class(tool, (\"DockerRequirement\", \"http://arvados.org/cwl#WorkflowRunnerResources\"), collect_locators)\n\n\ndef print_keep_deps(arvRunner, runtimeContext, merged_map, tool):\n    references = set()\n\n    tool.visit(partial(print_keep_deps_visitor, arvRunner.api, runtimeContext, references, tool.doc_loader))\n\n    for mm in merged_map:\n        for k, v in merged_map[mm].resolved.items():\n            g = arvados.util.keepuri_pattern.match(v)\n            if g:\n                references.add(g[1])\n\n    json.dump(sorted(references), arvRunner.stdout)\n    print(file=arvRunner.stdout)\n\nclass ArvSecretStore(SecretStore):\n    def add(self, value):\n        if value is None:\n            return None\n        return super().add(value)\n"
  },
  {
    "path": "sdk/cwl/arvados_cwl/util.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport datetime\nimport urllib.parse\n\nfrom arvados.errors import ApiError\n\ncollectionUUID =  \"http://arvados.org/cwl#collectionUUID\"\n\n\ndef get_intermediate_collection_info(workflow_step_name, current_container, intermediate_output_ttl):\n    if workflow_step_name:\n        name = \"Intermediate collection for step %s\" % (workflow_step_name)\n    else:\n        name = \"Intermediate collection\"\n    trash_time = None\n    if intermediate_output_ttl > 0:\n        trash_time = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(seconds=intermediate_output_ttl)\n    container_uuid = None\n    props = {\"type\": \"intermediate\"}\n    if current_container:\n        props[\"container_uuid\"] = current_container['uuid']\n\n    return {\"name\" : name, \"trash_at\" : trash_time, \"properties\" : props}\n\n\ndef get_current_container(api, num_retries=0, logger=None):\n    current_container = None\n    try:\n        current_container = api.containers().current().execute(num_retries=num_retries)\n    except ApiError as e:\n        # Status code 404 just means we're not running in a container.\n        if e.resp.status != 404:\n            if logger:\n                logger.info(\"Getting current container: %s\", e)\n            raise e\n\n    return current_container\n\n\ndef common_prefix(firstfile, all_files):\n    common_parts = firstfile.split('/')\n    common_parts[-1] = ''\n    for f in all_files:\n        f_parts = f.split('/')\n        for index, (a, b) in enumerate(zip(common_parts, f_parts)):\n            if a != b:\n                common_parts = common_parts[:index + 1]\n                common_parts[-1] = ''\n                break\n        if not any(common_parts):\n            break\n    return '/'.join(common_parts)\n\n\ndef sanitize_url(url):\n    \"\"\"Remove username/password from http URL.\"\"\"\n\n    parts = urllib.parse.urlparse(url)\n    if parts.port is None:\n        netloc = parts.hostname\n    else:\n        netloc = f'{parts.hostname}:{parts.port}'\n    return urllib.parse.urlunparse(parts._replace(netloc=netloc))\n"
  },
  {
    "path": "sdk/cwl/arvados_version.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport dataclasses\nimport os\nimport re\nimport runpy\nimport subprocess\nimport typing as t\n\nfrom pathlib import Path, PurePath, PurePosixPath\n\nimport setuptools\nimport setuptools.command.build\n\nSETUP_DIR = Path(__file__).absolute().parent\nVERSION_SCRIPT_PATH = PurePath('build', 'version-at-commit.sh')\n# Built by ArvadosPythonPackage.register\nARVADOS_PYTHON_MODULES: dict[str, 'ArvadosPythonPackage'] = {}\n\n### Metadata generation\n\n@dataclasses.dataclass\nclass ArvadosPythonPackage:\n    package_name: str\n    module_name: str\n    src_path: PurePath\n    dependencies: t.Sequence['ArvadosPythonPackage']\n\n    _VERSION_SUBS = {\n        'development-': '',\n        '~dev': '.dev',\n        '~rc': 'rc',\n    }\n\n    @classmethod\n    def register(\n            cls,\n            package_name: str,\n            module_name: str,\n            src_path: PurePath | str,\n            *dependencies: str,\n    ) -> 'ArvadosPythonPackage':\n        if not isinstance(src_path, PurePath):\n            src_path = PurePosixPath(src_path)\n        deps = [ARVADOS_PYTHON_MODULES[key] for key in dependencies]\n        this_pkg = cls(package_name, module_name, src_path, deps)\n        ARVADOS_PYTHON_MODULES[package_name] = this_pkg\n        return this_pkg\n\n    def version_file_path(self):\n        return PurePath(self.module_name, '_version.py')\n\n    def _workspace_path(self, workdir):\n        try:\n            workspace = Path(os.environ['WORKSPACE'])\n            # This will raise ValueError if they're not related,\n            # in which case we don't want to use this $WORKSPACE.\n            workdir.relative_to(workspace)\n        except KeyError:\n            # $WORKSPACE isn't set. Fall back to the Git worktree toplevel.\n            try:\n                git_proc = subprocess.run(\n                    ['git', 'rev-parse', '--show-toplevel'],\n                    capture_output=True,\n                    check=True,\n                    cwd=workdir,\n                    text=True,\n                )\n                workspace = Path(git_proc.stdout.removesuffix('\\n'))\n            except (subprocess.CalledProcessError, FileNotFoundError, ValueError):\n                return None\n        except ValueError:\n            return None\n        if (workspace / VERSION_SCRIPT_PATH).exists():\n            return workspace\n        else:\n            return None\n\n    def _git_version(self, workdir):\n        workspace = self._workspace_path(workdir)\n        if workspace is None:\n            return None\n        git_log_cmd = [\n            'git', 'log', '-n1', '--format=%H', '--',\n            str(VERSION_SCRIPT_PATH), str(self.src_path),\n        ]\n        git_log_cmd.extend(str(dep.src_path) for dep in self.dependencies)\n        git_log_proc = subprocess.run(\n            git_log_cmd,\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        version_proc = subprocess.run(\n            [str(VERSION_SCRIPT_PATH), git_log_proc.stdout.rstrip('\\n')],\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        return version_proc.stdout.rstrip('\\n')\n\n    def _sdist_version(self, workdir):\n        try:\n            pkg_info = (workdir / 'PKG-INFO').open()\n        except FileNotFoundError:\n            return None\n        with pkg_info:\n            for line in pkg_info:\n                key, _, val = line.partition(': ')\n                if key == 'Version':\n                    return val.rstrip('\\n')\n        raise Exception(\"found PKG-INFO file but not Version metadata in it\")\n\n    def get_version(self, workdir=SETUP_DIR):\n        version = (\n            # If we're building out of a distribution, we should pass that\n            # version through unchanged.\n            self._sdist_version(workdir)\n            # Otherwise follow the usual Arvados versioning rules.\n            or os.environ.get('ARVADOS_BUILDING_VERSION')\n            or self._git_version(workdir)\n        )\n        if not version:\n            raise Exception(f\"no version information available for {self.package_name}\")\n        else:\n            return re.sub(\n                r'(^development-|~dev|~rc)',\n                lambda match: self._VERSION_SUBS[match.group(0)],\n                version,\n            )\n\n    def get_dependencies_version(self, workdir=SETUP_DIR, version=None):\n        if version is None:\n            version = self.get_version(workdir)\n        # A packaged development release should be installed with other\n        # development packages built from the same source, but those\n        # dependencies may have earlier \"dev\" versions (read: less recent\n        # Git commit timestamps). This compatible version dependency\n        # expresses that as closely as possible. Allowing versions\n        # compatible with .dev0 allows any development release.\n        # Regular expression borrowed partially from\n        # <https://packaging.python.org/en/latest/specifications/version-specifiers/#version-specifiers-regex>\n        dep_ver, match_count = re.subn(r'\\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)\n        return ('~=' if match_count else '==', dep_ver)\n\n    def iter_dependencies(self, workdir=SETUP_DIR, version=None, extras=None):\n        if extras is None:\n            extras = {}\n        dep_op, dep_ver = self.get_dependencies_version(workdir, version)\n        for dep in self.dependencies:\n            try:\n                dep_extras = f'[{\",\".join(extras[dep.package_name])}]'\n            except KeyError:\n                dep_extras = ''\n            yield f'{dep.package_name}{dep_extras} {dep_op} {dep_ver}'\n\n\n### Package database\n\nArvadosPythonPackage.register(\n    'arvados-python-client',\n    'arvados',\n    'sdk/python',\n),\nArvadosPythonPackage.register(\n    'crunchstat_summary',\n    'crunchstat_summary',\n    'tools/crunchstat-summary',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cluster-activity',\n    'arvados_cluster_activity',\n    'tools/cluster-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cwl-runner',\n    'arvados_cwl',\n    'sdk/cwl',\n    'arvados-python-client',\n    'crunchstat_summary',\n)\nArvadosPythonPackage.register(\n    'arvados_fuse',\n    'arvados_fuse',\n    'services/fuse',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-user-activity',\n    'arvados_user_activity',\n    'tools/user-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-tools',\n    'NO SRCDIR',\n    'tools/python-metapackage',\n    *ARVADOS_PYTHON_MODULES,\n)\nArvadosPythonPackage.register(\n    'arvados-docker-cleaner',\n    'arvados_docker',\n    'services/dockercleaner',\n)\n\n### setuptools integration\n\nclass BuildArvadosVersion(setuptools.Command):\n    \"\"\"Write _version.py for an Arvados module\"\"\"\n    def initialize_options(self):\n        self.build_lib = None\n\n    def finalize_options(self):\n        self.set_undefined_options(\"build_py\", (\"build_lib\", \"build_lib\"))\n        arv_mod = ARVADOS_PYTHON_MODULES[self.distribution.get_name()]\n        self.out_path = Path(self.build_lib, arv_mod.version_file_path())\n\n    def run(self):\n        with self.out_path.open('w') as out_file:\n            print(f'__version__ = {self.distribution.get_version()!r}', file=out_file)\n\n    def get_outputs(self):\n        return [str(self.out_path)]\n\n    def get_source_files(self):\n        return []\n\n    def get_output_mapping(self):\n        return {}\n\n\nclass ArvadosBuildCommand(setuptools.command.build.build):\n    sub_commands = [\n        *setuptools.command.build.build.sub_commands,\n        ('build_arvados_version', None),\n    ]\n\n\nCMDCLASS = {\n    'build': ArvadosBuildCommand,\n    'build_arvados_version': BuildArvadosVersion,\n}\n"
  },
  {
    "path": "sdk/cwl/bin/arvados-cwl-runner",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport sys\n\nfrom arvados_cwl import main\n\nsys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))\n"
  },
  {
    "path": "sdk/cwl/bin/cwl-runner",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport sys\n\nfrom arvados_cwl import main\n\nsys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))\n"
  },
  {
    "path": "sdk/cwl/fpm-info.sh",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfpm_depends+=(nodejs)\n\ncase \"$TARGET\" in\n    debian12 | ubuntu2204 )\n        fpm_depends+=(libcurl4)\n        ;;\n\n    debian* | ubuntu* )\n        fpm_depends+=(libcurl4t64)\n        ;;\nesac\n\nfpm_args+=(--conflicts=python-cwltool --conflicts=cwltool)\n"
  },
  {
    "path": "sdk/cwl/pyproject.toml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n[build-system]\nrequires = [\"setuptools ~= 80.9\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\ndynamic = [\"dependencies\", \"version\"]\nname = \"arvados-cwl-runner\"\ndescription = \"Arvados Common Workflow Language runner\"\nauthors = [\n  {name = \"Arvados\", email = \"info@arvados.org\"},\n]\nclassifiers = [\n  \"Development Status :: 5 - Production/Stable\",\n  \"Environment :: Console\",\n  \"Intended Audience :: Science/Research\",\n  \"Operating System :: POSIX\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\",\n  \"Programming Language :: Python :: 3.12\",\n  \"Programming Language :: Python :: 3.13\",\n  \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n]\nlicense = \"Apache-2.0\"\nlicense-files = [\n  \"LICENSE-2.0.txt\",\n]\nreadme = \"README.rst\"\nrequires-python = \"~= 3.10\"\n\n[project.entry-points.\"cwltest.fsaccess\"]\nfsaccess = \"arvados_cwl.fsaccess:get_fsaccess\"\n\n[project.scripts]\ncwl-runner = \"arvados_cwl:main\"\narvados-cwl-runner = \"arvados_cwl:main\"\n\n[project.urls]\nHomepage = \"https://arvados.org\"\nDocumentation = \"https://doc.arvados.org\"\nRepository = \"https://github.com/arvados/arvados\"\nIssues = \"https://github.com/arvados/arvados/issues\"\nChangelog = \"https://arvados.org/releases/\"\n\n[tool.setuptools]\ninclude-package-data = true\n\n[tool.setuptools.data-files]\n\"share/doc/arvados-cwl-runner\" = [\n  \"LICENSE-2.0.txt\",\n  \"README.rst\",\n]\n\n[tool.setuptools.packages.find]\nexclude = [\"tests*\"]\n"
  },
  {
    "path": "sdk/cwl/setup.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport setuptools\nimport runpy\n\nfrom pathlib import Path\n\narvados_version = runpy.run_path(Path(__file__).with_name('arvados_version.py'))\narv_mod = arvados_version['ARVADOS_PYTHON_MODULES']['arvados-cwl-runner']\nversion = arv_mod.get_version()\nsetuptools.setup(\n    cmdclass=arvados_version['CMDCLASS'],\n    install_requires=[\n        *arv_mod.iter_dependencies(version=version),\n        'cwltool == 3.1.20240508115724',\n        'schema-salad == 8.5.20240503091721',\n        'ciso8601 >= 2.0.0',\n    ],\n    version=version,\n)\n"
  },
  {
    "path": "sdk/cwl/tests/10380-trailing-slash-dir.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: CommandLineTool\ninputs: []\noutputs:\n  stuff:\n    type: Directory\n    outputBinding:\n      glob: './foo/'\nrequirements:\n  ShellCommandRequirement: {}\narguments: [{shellQuote: false, valueFrom: \"mkdir -p foo && touch baz.txt && touch foo/bar.txt\"}]\n"
  },
  {
    "path": "sdk/cwl/tests/12213-keepref-expr.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: ExpressionTool\nrequirements:\n  InlineJavascriptRequirement: {}\ninputs:\n  dir: Directory\noutputs:\n  out: Directory[]\nexpression: |\n  ${\n    var samples = {};\n    var pattern = /^(.+)(_S[0-9]{1,3}_)(.+)$/;\n    inputs.dir.listing = inputs.dir.listing.sort(function(a, b) { return a.basename.localeCompare(b.basename); });\n    for (var i = 0; i < inputs.dir.listing.length; i++) {\n      var file = inputs.dir.listing[i];\n      var groups = file.basename.match(pattern);\n      if (groups) {\n        var sampleid = groups[1];\n        if (!samples[sampleid]) {\n          samples[sampleid] = [];\n        }\n        samples[sampleid].push(file);\n      }\n    }\n    var dirs = [];\n    Object.keys(samples).sort().forEach(function(sampleid, _) {\n      dirs.push({\"class\": \"Directory\",\n                 \"basename\": sampleid,\n                 \"listing\": samples[sampleid]});\n    });\n    return {\"out\": dirs};\n  }\n"
  },
  {
    "path": "sdk/cwl/tests/12213-keepref-job.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndir:\n  class: Directory\n  location: samples"
  },
  {
    "path": "sdk/cwl/tests/12213-keepref-tool.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\nrequirements:\n  InlineJavascriptRequirement: {}\ninputs:\n  fastqsdir: Directory\noutputs:\n  out: stdout\nbaseCommand: [zcat]\nstdout: $(inputs.fastqsdir.listing[0].nameroot).txt\narguments:\n  - $(inputs.fastqsdir.listing[0].path)\n  - $(inputs.fastqsdir.listing[1].path)\n"
  },
  {
    "path": "sdk/cwl/tests/12213-keepref-wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\nrequirements:\n  ScatterFeatureRequirement: {}\ninputs:\n  dir: Directory\noutputs:\n  out:\n    type: File[]\n    outputSource: tool/out\nsteps:\n  ex:\n    in:\n      dir: dir\n    out: [out]\n    run: 12213-keepref-expr.cwl\n  tool:\n    in:\n      fastqsdir: ex/out\n    out: [out]\n    scatter: fastqsdir\n    run: 12213-keepref-tool.cwl"
  },
  {
    "path": "sdk/cwl/tests/12418-glob-empty-collection.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{\n   \"cwlVersion\": \"v1.0\",\n      \"arguments\": [\n        \"true\"\n      ],\n      \"class\": \"CommandLineTool\",\n      \"inputs\": [],\n      \"outputs\": [\n        {\n          \"id\": \"out\",\n          \"outputBinding\": {\n            \"glob\": \"*.txt\"\n          },\n          \"type\": [\n            \"null\",\n            \"File\"\n          ]\n        }\n      ]\n}"
  },
  {
    "path": "sdk/cwl/tests/13931-size-job.yml",
    "content": "fastq1:\n  class: File\n  location: keep:20850f01122e860fb878758ac1320877+71/sample1_S01_R1_001.fastq.gz"
  },
  {
    "path": "sdk/cwl/tests/13931-size.cwl",
    "content": "cwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  fastq1: File\noutputs:\n  out: stdout\nbaseCommand: echo\narguments:\n  - $(inputs.fastq1.size)\nstdout: size.txt"
  },
  {
    "path": "sdk/cwl/tests/13976-keepref-wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\nrequirements:\n  - class: InlineJavascriptRequirement\n  - class: ShellCommandRequirement\narguments:\n  - cd\n  - $(inputs.hello.dirname)\n  - {shellQuote: false, valueFrom: \"&&\"}\n  - ls\nstdout: hello.out\ninputs:\n  hello:\n    type: File\n    default:\n      class: File\n      location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt\n    secondaryFiles:\n      - .idx\noutputs:\n  out:\n    type: File\n    outputBinding:\n      glob: hello.out\n"
  },
  {
    "path": "sdk/cwl/tests/15241-writable-dir-job.json",
    "content": "{\n    \"filesDir\": {\n        \"location\": \"keep:d7514270f356df848477718d58308cc4+94\",\n        \"class\": \"Directory\"\n    }\n}\n"
  },
  {
    "path": "sdk/cwl/tests/15241-writable-dir.cwl",
    "content": "cwlVersion: v1.0\nclass: CommandLineTool\n\nrequirements:\n  - class: InitialWorkDirRequirement\n    listing:\n      - entry: $(inputs.filesDir)\n        writable: true\n\ninputs:\n  filesDir:\n    type: Directory\n\noutputs:\n  results:\n    type: Directory\n    outputBinding:\n      glob: .\n\narguments: [touch, $(inputs.filesDir.path)/blurg.txt]\n"
  },
  {
    "path": "sdk/cwl/tests/15295-bad-keep-ref.cwl",
    "content": "cwlVersion: v1.0\nclass: CommandLineTool\nrequirements:\n  - class: InlineJavascriptRequirement\narguments:\n  - ls\n  - -l\n  - $(inputs.hello)\ninputs:\n  hello:\n    type: File\noutputs: []\n"
  },
  {
    "path": "sdk/cwl/tests/16169-no-listing-hint.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\nrequirements:\n  cwltool:LoadListingRequirement:\n    loadListing: no_listing\ninputs:\n  d: Directory\nsteps:\n  step1:\n    in:\n      d: d\n    out: [out]\n    run: wf/16169-step.cwl\noutputs:\n  out:\n    type: File\n    outputSource: step1/out\n"
  },
  {
    "path": "sdk/cwl/tests/16377-missing-default.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\nrequirements:\n  - class: InlineJavascriptRequirement\n  - class: ShellCommandRequirement\narguments:\n  - cd\n  - $(inputs.hello.dirname)\n  - {shellQuote: false, valueFrom: \"&&\"}\n  - ls\nstdout: hello.out\ninputs:\n  hello:\n    type: File\n    default:\n      class: File\n      location: keep:ffffffffffffffffffffffffffaaaaaa+69/hello.txt\n    secondaryFiles:\n      - .idx\noutputs:\n  out:\n    type: File\n    outputBinding:\n      glob: hello.out\n"
  },
  {
    "path": "sdk/cwl/tests/17004-output-props.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.2\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nhints:\n  arv:OutputCollectionProperties:\n    outputProperties:\n      foo: bar\n      baz: $(inputs.inp.basename)\ninputs:\n  inp: File\nsteps:\n  cat:\n    in:\n      inp: inp\n    run: cat.cwl\n    out: []\noutputs: []\n"
  },
  {
    "path": "sdk/cwl/tests/17267-broken-schemas.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\n$schemas:\n  - http://example.com/schema.xml\ninputs: []\noutputs:\n  out: stdout\nbaseCommand: [echo, \"foo\"]\nstdout: foo.txt\n"
  },
  {
    "path": "sdk/cwl/tests/17521-dot-slash-glob.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: CommandLineTool\ninputs: []\noutputs:\n  stuff:\n    type: File\n    outputBinding:\n      glob: './foo/*.txt'\nrequirements:\n  ShellCommandRequirement: {}\narguments: [{shellQuote: false, valueFrom: \"mkdir -p foo && touch baz.txt && touch foo/bar.txt\"}]\n"
  },
  {
    "path": "sdk/cwl/tests/17801-runtime-outdir.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: CommandLineTool\ninputs: []\noutputs:\n  stuff:\n    type: Directory\n    outputBinding:\n      glob: $(runtime.outdir)\nrequirements:\n  ShellCommandRequirement: {}\narguments: [{shellQuote: false, valueFrom: \"mkdir -p foo && touch baz.txt && touch foo/bar.txt\"}]\n"
  },
  {
    "path": "sdk/cwl/tests/17858-pack-visit-crash.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.1\nclass: ExpressionTool\ninputs:\n  file1:\n    type: File\n    default:\n      class: File\n      location: keep:f225e6259bdd63bc7240599648dde9f1+97/hg19.fa\noutputs:\n  val: string\nrequirements:\n  InlineJavascriptRequirement: {}\nexpression: \"$({val: inputs.file1.location})\"\n"
  },
  {
    "path": "sdk/cwl/tests/17879-ignore-sbg-fields-job.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nsampleName: woble\n"
  },
  {
    "path": "sdk/cwl/tests/17879-ignore-sbg-fields.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n$namespaces:\n  sbg: https://www.sevenbridges.com/\nclass: \"Workflow\"\ncwlVersion: v1.1\nlabel: \"check that sbg x/y fields are correctly ignored\"\ninputs:\n  - id: sampleName\n    type: string\n    label: Sample name\n    'sbg:x': -22\n    'sbg:y': 33.4296875\noutputs:\n  - id: outstr\n    type: string\n    outputSource: step1/outstr\nsteps:\n  step1:\n    in:\n      sampleName: sampleName\n    out: [outstr]\n    run:\n      class: CommandLineTool\n      inputs:\n        sampleName: string\n      stdout: out.txt\n      outputs:\n        outstr:\n          type: string\n          outputBinding:\n            glob: out.txt\n            loadContents: true\n            outputEval: $(self[0].contents)\n      arguments: [echo, \"-n\", \"foo\", $(inputs.sampleName), \"bar\"]\n"
  },
  {
    "path": "sdk/cwl/tests/18888-download_def.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: CommandLineTool\n\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n\nrequirements:\n  NetworkAccess:\n    networkAccess: true\n  arv:RuntimeConstraints:\n    outputDirType: keep_output_dir\n\ninputs:\n  scripts:\n    type: Directory\n    default:\n      class: Directory\n      location: scripts/\noutputs:\n  out:\n    type: Directory\n    outputBinding:\n      glob: \".\"\n\narguments: [$(inputs.scripts.path)/download_all_data.sh, \".\"]\n"
  },
  {
    "path": "sdk/cwl/tests/18994-basename/check.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: CommandLineTool\ncwlVersion: v1.2\ninputs:\n  p: File\n  checkname: string\noutputs: []\narguments:\n  - sh\n  - \"-c\"\n  - |\n    name=`basename $(inputs.p.path)`\n    ls -l $(inputs.p.path)\n    if test $name = $(inputs.checkname) ; then\n      echo success\n    else\n      echo expected basename to be $(inputs.checkname) but was $name\n      exit 1\n    fi\n"
  },
  {
    "path": "sdk/cwl/tests/18994-basename/rename.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: ExpressionTool\ncwlVersion: v1.2\ninputs:\n  f1: File\n  newname: string\noutputs:\n  out: File\nexpression: |\n  ${\n  inputs.f1.basename = inputs.newname;\n  return {\"out\": inputs.f1};\n  }\n"
  },
  {
    "path": "sdk/cwl/tests/18994-basename/wf_ren.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.2\ninputs:\n  f1:\n    type: File\n    default:\n      class: File\n      location: whale.txt\n  newname:\n    type: string\n    default:  \"badger.txt\"\noutputs: []\nrequirements:\n  StepInputExpressionRequirement: {}\n  InlineJavascriptRequirement: {}\nsteps:\n  rename:\n    in:\n      f1: f1\n      newname: newname\n    run: rename.cwl\n    out: [out]\n\n  echo:\n    in:\n      p: rename/out\n      checkname: newname\n    out: []\n    run: check.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/18994-basename/whale.txt",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nwhale\n"
  },
  {
    "path": "sdk/cwl/tests/19070-copy-deps.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: CommandLineTool\nbaseCommand: echo\ninputs:\n  message:\n    type: File\n    inputBinding:\n      position: 1\n    default:\n      class: File\n      location: keep:d7514270f356df848477718d58308cc4+94/b\n\noutputs: []\n"
  },
  {
    "path": "sdk/cwl/tests/19109-upload-secondary/file1.txt",
    "content": "strawberry\n"
  },
  {
    "path": "sdk/cwl/tests/19109-upload-secondary/file1.txt.tbi",
    "content": "blueberry"
  },
  {
    "path": "sdk/cwl/tests/19109-upload-secondary/file2.txt",
    "content": "banana\n"
  },
  {
    "path": "sdk/cwl/tests/19109-upload-secondary/file2.txt.tbi",
    "content": "mango"
  },
  {
    "path": "sdk/cwl/tests/19109-upload-secondary.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: Workflow\n\nrequirements:\n  InlineJavascriptRequirement: {}\n\ninputs:\n  file1:\n    type: File?\n    secondaryFiles:\n      - pattern: .tbi\n        required: true\n  file2:\n    type: File\n    secondaryFiles:\n      - pattern: |\n          ${\n          return self.basename + '.tbi';\n          }\n        required: true\noutputs:\n  out:\n    type: File\n    outputSource: cat/out\n  out2:\n    type: File\n    outputSource: cat2/out\nsteps:\n  cat:\n    in:\n      inp: file1\n    run: cat2.cwl\n    out: [out]\n  cat2:\n    in:\n      inp: file2\n    run: cat2.cwl\n    out: [out]\n"
  },
  {
    "path": "sdk/cwl/tests/19109-upload-secondary.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfile1:\n  class: File\n  location: 19109-upload-secondary/file1.txt\nfile2:\n  class: File\n  location: 19109-upload-secondary/file2.txt\n"
  },
  {
    "path": "sdk/cwl/tests/19678-name-id.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.1\ninputs:\n  - type:\n      fields:\n        - name: first\n          type: string\n        - name: last\n          type: string\n      type: record\n    id: name\noutputs:\n  - type:\n      fields:\n        - name: first\n          type: string\n        - name: last\n          type: string\n      type: record\n    id: processed_name\n    outputSource: name\nsteps: []\n"
  },
  {
    "path": "sdk/cwl/tests/19678-name-id.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{\"name\":{\"first\":\"foo\",\"last\":\"bar\"}}\n"
  },
  {
    "path": "sdk/cwl/tests/22466/fake.bam",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n"
  },
  {
    "path": "sdk/cwl/tests/22466/input.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ninput_bam:\n  class: File\n  location: fake.bam\nsample_id: fake\n"
  },
  {
    "path": "sdk/cwl/tests/22466-output-glob-expressions-secondaryfile.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: CommandLineTool\nlabel: Output glob test for bug 22466\n\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n\nrequirements:\n- class: ShellCommandRequirement\n- class: InitialWorkDirRequirement\n  listing:\n  - $(inputs.input_bam)\n- class: InlineJavascriptRequirement\n\ninputs:\n- id: input_bam\n  label: Input bam\n  type: File\n- id: output_bam_name\n  label: Output BAM file name\n  type: string?\n  default: deduped\n- id: sample_id\n  label: Sample ID\n  type: string\n\noutputs:\n- id: metrics_file\n  label: Metrics file\n  doc: File to which the duplication metrics will be written.\n  type: File?\n  outputBinding:\n    glob: '*.txt'\n- id: deduped_bam\n  label: Deduped BAM\n  doc: The output file to which marked records will be written.\n  type: File?\n  secondaryFiles:\n  - pattern: ^.bai\n    required: false\n  - pattern: .bai\n    required: false\n  outputBinding:\n    glob: |-\n      ${\n          var ext = inputs.input_bam.nameext.slice(1)\n          return [\"*\", inputs.output_bam_name, ext].join(\".\")\n      }\n\narguments: [touch, fake.deduped.bam, fake.deduped.bai, metrics.txt]\n"
  },
  {
    "path": "sdk/cwl/tests/__init__.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport subprocess\nimport sys\nimport warnings\n\nfrom pathlib import Path\n\nTESTS_DIR = Path(__file__).parent\n\n# We default to running 3 jobs in parallel, which is tested to be what works in\n# October 2025 on Jenkins under run-tests.sh with crunch-dispatch-local.\n# Users testing directly against a live cluster can increase this.\n_jobs = os.environ.get('ARVADOS_CWLTEST_JOBS')\ntry:\n    ARVADOS_CWLTEST_JOBS = int(_jobs, 10)\nexcept (TypeError, ValueError):\n    _jobs_ok = False\nelse:\n    _jobs_ok = 0 < ARVADOS_CWLTEST_JOBS < sys.maxsize\nif not _jobs_ok:\n    ARVADOS_CWLTEST_JOBS = 3\n    if _jobs is not None:\n        warnings.warn(\n            f\"ARVADOS_CWLTEST_JOBS value {_jobs!r} is invalid;\"\n            f\" using default {ARVADOS_CWLTEST_JOBS!r}\"\n        )\ndel _jobs, _jobs_ok\n\ndef run_cwltest(\n        cwl_test,\n        cwl_tool,\n        badges_dir=None,\n        *,\n        test_args=(),\n        tool_args=(),\n):\n    cmd = [\n        'cwltest',\n        '--test', str(cwl_test),\n        '--tool', str(cwl_tool),\n        '-j', str(ARVADOS_CWLTEST_JOBS),\n    ]\n    cmd.extend(test_args)\n    # FIXME?: cwltest badge generation seems buggy as of 2.5.20241122133319\n    # if badges_dir:\n    #     cmd.append('--badgedir')\n    #     cmd.append(str(badges_dir))\n    cmd.extend([\n        '--',\n        '--compute-checksum',\n        '--disable-reuse',\n        '--enable-dev',\n    ])\n    cmd.extend(tool_args)\n    return subprocess.run(cmd)\n"
  },
  {
    "path": "sdk/cwl/tests/arvados-tests.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- job: dir-job.yml\n  output:\n    \"outlist\": {\n        \"size\": 20,\n        \"location\": \"output.txt\",\n        \"class\": \"File\",\n        \"checksum\": \"sha1$13cda8661796ae241da3a18668fb552161a72592\"\n    }\n  tool: keep-dir-test-input.cwl\n  doc: Test directory in keep\n\n- job: dir-job2.yml\n  output:\n    \"outlist\": {\n        \"size\": 20,\n        \"location\": \"output.txt\",\n        \"class\": \"File\",\n        \"checksum\": \"sha1$13cda8661796ae241da3a18668fb552161a72592\"\n    }\n  tool: keep-dir-test-input.cwl\n  doc: Test directory in keep\n\n- job: null\n  output:\n    \"outlist\": {\n        \"size\": 20,\n        \"location\": \"output.txt\",\n        \"class\": \"File\",\n        \"checksum\": \"sha1$13cda8661796ae241da3a18668fb552161a72592\"\n    }\n  tool: keep-dir-test-input2.cwl\n  doc: Test default directory in keep\n\n- job: null\n  output:\n    \"outlist\": {\n        \"size\": 20,\n        \"location\": \"output.txt\",\n        \"class\": \"File\",\n        \"checksum\": \"sha1$13cda8661796ae241da3a18668fb552161a72592\"\n    }\n  tool: keep-dir-test-input3.cwl\n  doc: Test default directory in keep\n\n- job: octo.yml\n  output: {}\n  tool: cat.cwl\n  doc: Test hashes in filenames\n\n- job: listing-job.yml\n  output: {\n    \"out\": {\n        \"class\": \"File\",\n        \"location\": \"output.txt\",\n        \"size\": 5,\n        \"checksum\": \"sha1$724ba28f4a9a1b472057ff99511ed393a45552e1\"\n    }\n  }\n  tool: wf/listing_shallow.cwl\n  doc: test shallow directory listing\n\n- job: listing-job.yml\n  output: {\n    \"out\": {\n        \"class\": \"File\",\n        \"location\": \"output.txt\",\n        \"size\": 5,\n        \"checksum\": \"sha1$724ba28f4a9a1b472057ff99511ed393a45552e1\"\n    }\n  }\n  tool: wf/listing_none.cwl\n  doc: test no directory listing\n\n- job: listing-job.yml\n  output: {\n    \"out\": {\n        \"class\": \"File\",\n        \"location\": \"output.txt\",\n        \"size\": 5,\n        \"checksum\": \"sha1$724ba28f4a9a1b472057ff99511ed393a45552e1\"\n    }\n  }\n  tool: wf/listing_deep.cwl\n  doc: test deep directory listing\n\n- job: null\n  output: {}\n  tool: noreuse.cwl\n  doc: \"Test arv:ReuseRequirement\"\n\n- job: 12213-keepref-job.yml\n  output: {\n    \"out\": [\n        {\n            \"checksum\": \"sha1$1c78028c0d69163391eef89316b44a57bde3fead\",\n            \"location\": \"sample1_S01_R1_001.fastq.txt\",\n            \"class\": \"File\",\n            \"size\": 32\n        },\n        {\n            \"checksum\": \"sha1$5f3b4df1b0f7fdced751fc6079778600ad9fdb45\",\n            \"location\": \"sample2_S01_R1_001.fastq.txt\",\n            \"class\": \"File\",\n            \"size\": 32\n        }\n    ]\n  }\n  tool: 12213-keepref-wf.cwl\n  doc: \"Test manipulating keep references with expression tools\"\n\n- job: null\n  output:\n    out: null\n  tool: 12418-glob-empty-collection.cwl\n  doc: \"Test glob output on empty collection\"\n\n- job: null\n  output:\n    \"out\": {\n        \"location\": \"hello.out\",\n        \"class\": \"File\",\n        \"checksum\": \"sha1$ec5d3976351abab45a483a49ce714a8430cb203a\",\n        \"size\": 24\n    }\n  tool: 13976-keepref-wf.cwl\n  doc: \"Test issue 13976\"\n\n- job: null\n  output:\n    out: out\n  tool: wf/runin-wf.cwl\n  doc: \"RunInSingleContainer cwl.input.json needs to be consistent with pathmapper manipulations\"\n\n- job: secondary/wf-job.yml\n  output: {}\n  tool: secondary/wf.cwl\n  doc: \"RunInSingleContainer applies secondaryFile discovery & manipulation before generating cwl.input.yml\"\n\n- job: null\n  output:\n    out: out\n  tool: wf/runin-with-ttl-wf.cwl\n  doc: \"RunInSingleContainer respects outputTTL\"\n\n- job: secret_test_job.yml\n  output: {\n    \"out\": {\n        \"class\": \"File\",\n        \"location\": \"hashed_example.txt\",\n        \"size\": 47,\n        \"checksum\": \"sha1$f45341c7f03b4dd10646c402908d1aea0d580f5d\"\n    }\n  }\n  tool: wf/secret_wf.cwl\n  doc: \"Test secret input parameters\"\n  tags: [ secrets ]\n\n- job: null\n  output:\n    out: null\n  tool: wf/runin-reqs-wf.cwl\n  doc: \"RunInSingleContainer handles dynamic resource requests on step\"\n\n- job: null\n  output:\n    out: null\n  tool: wf/runin-reqs-wf2.cwl\n  doc: \"RunInSingleContainer handles dynamic resource requests on embedded subworkflow\"\n\n- job: null\n  output:\n    out: null\n  tool: wf/runin-reqs-wf3.cwl\n  should_fail: true\n  doc: \"RunInSingleContainer disallows dynamic resource request on subworkflow steps\"\n\n- job: null\n  output:\n    out: null\n  tool: wf/runin-reqs-wf4.cwl\n  doc: \"RunInSingleContainer discovers static resource request in subworkflow steps\"\n\n- job: null\n  output:\n    out: null\n  tool: wf/runin-reqs-wf5.cwl\n  doc: \"RunInSingleContainer has minimum 128 MiB RAM\"\n\n- job: secondaryFiles/inp3.yml\n  output: {}\n  tool: secondaryFiles/example1.cwl\n  doc: Discover secondaryFiles at runtime if they are in keep\n\n- job: null\n  output: {}\n  tool: secondaryFiles/example3.cwl\n  doc: Discover secondaryFiles on default values\n\n- job: null\n  output:\n    out: null\n  tool: wf-defaults/wf1.cwl\n  doc: \"Can have separate default parameters including directory and file inside same directory\"\n\n- job: null\n  output:\n    out: null\n  tool: wf-defaults/wf2.cwl\n  doc: \"Can have a parameter default value that is a directory literal with a file literal\"\n\n- job: null\n  output:\n    out: null\n  tool: wf-defaults/wf3.cwl\n  doc: \"Do not accept a directory literal without a basename\"\n  should_fail: true\n\n- job: null\n  output:\n    out: null\n  tool: wf-defaults/wf4.cwl\n  doc: default in embedded subworkflow missing 'id' field, v1.0\n  should_fail: false\n\n- job: null\n  output:\n    out: null\n  tool: wf-defaults/wf8.cwl\n  doc: default in embedded subworkflow missing 'id' field, v1.1\n  should_fail: false\n\n- job: null\n  output:\n    out: null\n  tool: wf-defaults/wf5.cwl\n  doc: default in embedded subworkflow\n\n- job: null\n  output:\n    out: null\n  tool: wf-defaults/wf6.cwl\n  doc: default in RunInSingleContainer step\n\n- job: null\n  output:\n    out: null\n  tool: wf-defaults/wf7.cwl\n  doc: workflow level default in RunInSingleContainer\n\n- job: 13931-size-job.yml\n  output:\n    \"out\": {\n        \"checksum\": \"sha1$5bf6e5357bd42a6b1d2a3a040e16a91490064d26\",\n        \"location\": \"size.txt\",\n        \"class\": \"File\",\n        \"size\": 3\n    }\n  tool: 13931-size.cwl\n  doc: Test that size is set for files in Keep\n\n- job: 15241-writable-dir-job.json\n  output: {\n    \"results\": {\n        \"basename\": \"keep:6dd5fa20622d5a7a23c9147d0927da2a+180\",\n        \"class\": \"Directory\",\n        \"listing\": [\n            {\n                \"basename\": \"d7514270f356df848477718d58308cc4+94\",\n                \"class\": \"Directory\",\n                \"listing\": [\n                    {\n                        \"basename\": \"a\",\n                        \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n                        \"class\": \"File\",\n                        \"location\": \"a\",\n                        \"size\": 0\n                    },\n                    {\n                        \"basename\": \"blurg.txt\",\n                        \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n                        \"class\": \"File\",\n                        \"location\": \"blurg.txt\",\n                        \"size\": 0\n                    },\n                    {\n                        \"basename\": \"c\",\n                        \"class\": \"Directory\",\n                        \"listing\": [\n                            {\n                                \"basename\": \"d\",\n                                \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n                                \"class\": \"File\",\n                                \"location\": \"d\",\n                                \"size\": 0\n                            }\n                        ],\n                        \"location\": \"c\"\n                    },\n                    {\n                        \"basename\": \"b\",\n                        \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n                        \"class\": \"File\",\n                        \"location\": \"b\",\n                        \"size\": 0\n                    }\n                ],\n                \"location\": \"d7514270f356df848477718d58308cc4+94\"\n            }\n        ],\n        \"location\": \"keep:6dd5fa20622d5a7a23c9147d0927da2a+180\"\n    }\n  }\n  tool: 15241-writable-dir.cwl\n  doc: Test for writable collections\n\n- job: badkeep.yml\n  output: {}\n  should_fail: true\n  tool: 15295-bad-keep-ref.cwl\n  doc: Test checking for invalid keepref\n\n- job: listing-job.yml\n  output: {\n    \"out\": {\n        \"class\": \"File\",\n        \"location\": \"output.txt\",\n        \"size\": 5,\n        \"checksum\": \"sha1$724ba28f4a9a1b472057ff99511ed393a45552e1\"\n    }\n  }\n  tool: 16169-no-listing-hint.cwl\n  doc: \"Test cwltool:LoadListingRequirement propagation\"\n\n- job: hello.yml\n  output:\n    \"out\": {\n        \"location\": \"hello.out\",\n        \"class\": \"File\",\n        \"checksum\": \"sha1$ec5d3976351abab45a483a49ce714a8430cb203a\",\n        \"size\": 24\n    }\n  tool: 16377-missing-default.cwl\n  doc: \"Test issue 16377 - missing default fails even when it should be overridden by valid input\"\n\n- job: hello.yml\n  output:\n    \"out\":\n      \"checksum\": \"sha1$f1d2d2f924e986ac86fdf7b36c94bcdf32beec15\"\n      \"class\": \"File\"\n      \"location\": \"foo.txt\"\n      \"size\": 4\n  tool: 17267-broken-schemas.cwl\n  doc: \"Test issue 17267 - inaccessible $schemas URL is not a fatal error\"\n\n- job: null\n  output: {}\n  tool: wf/trick_defaults2.cwl\n  doc: \"Test issue 17462 - secondary file objects on file defaults are not resolved\"\n\n- job: null\n  output: {\n    \"stuff\": {\n        \"location\": \"bar.txt\",\n        \"basename\": \"bar.txt\",\n        \"class\": \"File\",\n        \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n        \"size\": 0\n    }\n  }\n  tool: 17521-dot-slash-glob.cwl\n  doc: \"Test issue 17521 - bug with leading './' capturing files in subdirectories\"\n\n- job: null\n  output: {\n    \"stuff\": {\n        \"basename\": \"foo\",\n        \"class\": \"Directory\",\n        \"listing\": [\n            {\n                \"basename\": \"bar.txt\",\n                \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n                \"class\": \"File\",\n                \"location\": \"foo/bar.txt\",\n                \"size\": 0\n            }\n        ],\n        \"location\": \"foo\"\n    }\n  }\n  tool: 10380-trailing-slash-dir.cwl\n  doc: \"Test issue 10380 - bug with trailing slash when capturing an output directory\"\n\n- job: null\n  output: {\n    \"stuff\": {\n        \"basename\": \"78f3957c41d044352303a3fa326dff1e+102\",\n        \"class\": \"Directory\",\n        \"listing\": [\n            {\n                \"basename\": \"baz.txt\",\n                \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n                \"class\": \"File\",\n                \"location\": \"78f3957c41d044352303a3fa326dff1e+102/baz.txt\",\n                \"size\": 0\n            },\n            {\n                \"basename\": \"foo\",\n                \"class\": \"Directory\",\n                \"listing\": [\n                    {\n                        \"basename\": \"bar.txt\",\n                        \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n                        \"class\": \"File\",\n                        \"location\": \"78f3957c41d044352303a3fa326dff1e+102/foo/bar.txt\",\n                        \"size\": 0\n                    }\n                ],\n                \"location\": \"78f3957c41d044352303a3fa326dff1e+102/foo\"\n            }\n        ],\n        \"location\": \"78f3957c41d044352303a3fa326dff1e+102\"\n    }\n  }\n  tool: 17801-runtime-outdir.cwl\n  doc: \"Test issue 17801 - bug using $(runtime.outdir) to capture the output directory\"\n\n- job: null\n  output:\n    \"val\": \"keep:f225e6259bdd63bc7240599648dde9f1+97/hg19.fa\"\n  tool: 17858-pack-visit-crash.cwl\n  doc: \"Test issue 17858 - keep ref default inputs on ExpressionTool\"\n\n- job: 17879-ignore-sbg-fields-job.yml\n  output:\n    \"outstr\": \"foo woble bar\"\n  tool: 17879-ignore-sbg-fields.cwl\n  doc: \"Test issue 17879 - ignores sbg fields\"\n\n- job: chipseq/chip-seq-single.json\n  output: {}\n  tool: chipseq/cwl-packed.json\n  doc: \"Test issue 18723 - correctly upload two directories with the same basename\"\n\n- job: null\n  output: {}\n  tool: 18994-basename/wf_ren.cwl\n  doc: \"Test issue 18994 - correctly stage file with modified basename\"\n\n- job: 19109-upload-secondary.yml\n  output: {\n    \"out\": {\n        \"basename\": \"file1.catted\",\n        \"class\": \"File\",\n        \"location\": \"file1.catted\",\n        \"size\": 20,\n        \"checksum\": \"sha1$c4cead17cebdd829f38c48e18a28f1da72339ef7\"\n    },\n    \"out2\": {\n        \"basename\": \"file2.catted\",\n        \"checksum\": \"sha1$6f71c5d1512519ede45bedfdd624e05fd8037b0d\",\n        \"class\": \"File\",\n        \"location\": \"file2.catted\",\n        \"size\": 12\n    }\n  }\n  tool: 19109-upload-secondary.cwl\n  doc: \"Test issue 19109 - correctly discover & upload secondary files\"\n\n- job: 19678-name-id.yml\n  output: {\n    \"processed_name\": {\n        \"first\": \"foo\",\n        \"last\": \"bar\"\n    }\n  }\n  tool: 19678-name-id.cwl\n  doc: \"Test issue 19678 - non-string type input parameter called 'name'\"\n\n- job: oom/fakeoom.yml\n  output: {}\n  tool: oom/19975-oom.cwl\n  doc: \"Test feature 19975 - retry on exit 137\"\n\n- job: oom/fakeoom.yml\n  output: {}\n  tool: oom/19975-oom-mispelled.cwl\n  doc: \"Test feature 19975 - retry on exit 137, old misspelled version\"\n\n- job: oom/fakeoom2.yml\n  output: {}\n  tool: oom/19975-oom.cwl\n  doc: \"Test feature 19975 - retry on memory error\"\n\n- job: oom/fakeoom3.yml\n  output: {}\n  tool: oom/19975-oom3.cwl\n  doc: \"Test feature 19975 - retry on custom error\"\n\n- job: null\n  output:\n    out: out\n  tool: wf/runseparate-wf.cwl\n  doc: \"test arv:SeparateRunner\"\n\n- job: null\n  output: {\n    \"val\": {\n        \"basename\": \"testdir\",\n        \"class\": \"Directory\",\n        \"listing\": [\n            {\n                \"basename\": \"a\",\n                \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n                \"class\": \"File\",\n                \"location\": \"testdir/a\",\n                \"size\": 0\n            },\n            {\n                \"basename\": \"b\",\n                \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n                \"class\": \"File\",\n                \"location\": \"testdir/b\",\n                \"size\": 0\n            },\n            {\n                \"basename\": \"c\",\n                \"class\": \"Directory\",\n                \"listing\": [\n                    {\n                        \"basename\": \"d\",\n                        \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n                        \"class\": \"File\",\n                        \"location\": \"testdir/c/d\",\n                        \"size\": 0\n                    }\n                ],\n                \"location\": \"testdir/c\"\n            }\n        ],\n        \"location\": \"testdir\"\n    },\n    \"val2\": [\n        {\n            \"basename\": \"a\",\n            \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n            \"class\": \"File\",\n            \"location\": \"a\",\n            \"size\": 0\n        },\n        {\n            \"basename\": \"b\",\n            \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n            \"class\": \"File\",\n            \"location\": \"b\",\n            \"size\": 0\n        }\n    ]\n  }\n  tool: wf/output_dir_wf.cwl\n  doc: \"test same file appearing in output of both Directory and array\"\n\n- job: 22466/input.yml\n  output: {\n    \"metrics_file\": {\n        \"location\": \"metrics.txt\",\n        \"basename\": \"metrics.txt\",\n        \"class\": \"File\",\n        \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n        \"size\": 0\n    },\n    \"deduped_bam\": {\n        \"location\": \"fake.deduped.bam\",\n        \"basename\": \"fake.deduped.bam\",\n        \"class\": \"File\",\n        \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\",\n        \"size\": 0,\n        \"secondaryFiles\": [\n            {\n                \"basename\": \"fake.deduped.bai\",\n                \"location\": \"fake.deduped.bai\",\n                \"class\": \"File\",\n                \"size\": 0,\n                \"checksum\": \"sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709\"\n            }\n        ]\n    }\n  }\n  tool: 22466-output-glob-expressions-secondaryfile.cwl\n  doc: \"test bug 22466\"\n"
  },
  {
    "path": "sdk/cwl/tests/badkeep.yml",
    "content": "hello:\n  class: File\n  location: keep:/4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt\n"
  },
  {
    "path": "sdk/cwl/tests/cat.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  - id: inp\n    type: File\n    inputBinding: {}\noutputs: []\nbaseCommand: cat\n"
  },
  {
    "path": "sdk/cwl/tests/cat2.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: CommandLineTool\ninputs:\n  - id: inp\n    type: File\n    secondaryFiles:\n      - pattern: .tbi\n        required: true\nstdout: $(inputs.inp.nameroot).catted\noutputs:\n  out:\n    type: stdout\narguments: [cat, '$(inputs.inp.path)', '$(inputs.inp.secondaryFiles[0].path)']\n"
  },
  {
    "path": "sdk/cwl/tests/chipseq/chip-seq-single.json",
    "content": "{\n    \"referenceGenomeSequence\": {\n        \"class\": \"File\",\n        \"location\": \"data/Genomes/Homo_sapiens/GRCh38.p2/WholeGenome/genome.fa\",\n        \"metadata\": {\n            \"reference_genome\": {\n                \"organism\": \"Homo sapiens\",\n                \"version\": \"hg38\"\n            },\n            \"annotation\": {\n                \"source\": \"gencode\",\n                \"version\": \"v24\"\n            }\n        }\n    },\n    \"referenceGenomeSequenceDrosophila\": {\n        \"class\": \"File\",\n        \"location\": \"data/Genomes/Drosophila_melanogaster/dmel_r6.16/WholeGenome/genome.fa\",\n        \"metadata\": {\n            \"reference_genome\": {\n                \"organism\": \"Drosophila melanogaster\",\n                \"version\": \"rmel_r6.16\"\n            }\n        }\n    },\n    \"blacklistBed\": {\n        \"class\": \"File\",\n        \"location\": \"data/Genomes/Blacklist/lists2/hg38-blacklist.v2.bed\",\n        \"metadata\": {\n            \"reference_genome\": {\n                \"organism\": \"Homo sapiens\",\n                \"version\": \"hg38\"\n            },\n            \"annotation\": {\n                \"source\": \"gencode\",\n                \"version\": \"v24\"\n            }\n        }\n    },\n    \"BowtieHumanReference\": {\n        \"class\": \"Directory\",\n        \"location\": \"data/Genomes/Homo_sapiens/GRCh38.p2/Bowtie2Index/\",\n        \"metadata\": {\n            \"reference_genome\": {\n                \"organism\": \"Homo sapiens\",\n                \"version\": \"hg38\"\n            },\n            \"annotation\": {\n                \"source\": \"gencode\",\n                \"version\": \"v24\"\n            }\n        }\n    },\n    \"BowtieDrosophilaReference\": {\n        \"class\": \"Directory\",\n        \"location\": \"data/Genomes/Drosophila_melanogaster/dmel_r6.16/Bowtie2Index/\",\n        \"metadata\": {\n            \"reference_genome\": {\n                \"organism\": \"Drosophila melanogaster\",\n                \"version\": \"rmel_r6.16\"\n            }\n        }\n    },\n    \"sampleName\": \"LED054_0p03nMR1.0\",\n    \"inputFastq1\": {\n        \"class\": \"File\",\n        \"metadata\": {\n            \"user\": \"kmavrommatis\",\n            \"sample_id\": [\n               2\n            ]\n        },\n        \"location\": \"DATEST/ChIP-Seq/Raw/fastq/Input_R1.fastq.gz\",\n        \"secondaryFiles\": []\n    },\n    \"inputFastq2\": {\n        \"class\": \"File\",\n        \"metadata\": {\n            \"user\": \"kmavrommatis\",\n            \"sample_id\": [\n                2\n            ]\n        },\n        \"location\": \"DATEST/ChIP-Seq/Raw/fastq/Input_R3.fastq.gz\",\n        \"secondaryFiles\": []\n    },\n    \"inputFastqUMI\": {\n        \"class\": \"File\",\n        \"metadata\": {\n            \"user\": \"kmavrommatis\",\n            \"sample_id\": [\n               2\n            ]\n        },\n        \"location\": \"DATEST/ChIP-Seq/Raw/fastq/Input_R2.fastq.gz\",\n        \"secondaryFiles\": []\n    }\n}\n\n"
  },
  {
    "path": "sdk/cwl/tests/chipseq/cwl-packed.json",
    "content": "{\n    \"$graph\": [\n        {\n            \"class\": \"Workflow\",\n            \"id\": \"#main\",\n            \"doc\": \"Pipeline that is applied on single ChIP-seq samples.\\n\\nStarts with QC on the reads and trimming (for adapters and based on quality)\\n\\nAligns to human genome and adds UMI\\n\\nAligns to Drosophila genome and counts the number of reads.\\n\\nAfter the alignment to human genome the files are filtered for duplicates, multimappers and alignments in black listed regions\",\n            \"label\": \"ChIP-Seq (single sample)\",\n            \"inputs\": [\n                {\n                    \"id\": \"#inputFastq1\",\n                    \"type\": \"File\",\n                    \"https://www.sevenbridges.com/fileTypes\": \"fastq\",\n                    \"https://www.sevenbridges.com/x\": 0,\n                    \"https://www.sevenbridges.com/y\": 1726.25\n                },\n                {\n                    \"id\": \"#blacklistBed\",\n                    \"type\": \"File\",\n                    \"https://www.sevenbridges.com/x\": 746.4744873046875,\n                    \"https://www.sevenbridges.com/y\": 1903.265625\n                },\n                {\n                    \"id\": \"#referenceGenomeSequence\",\n                    \"type\": \"File\",\n                    \"secondaryFiles\": [\n                        \".fai\",\n                        \"^.dict\"\n                    ],\n                    \"https://www.sevenbridges.com/fileTypes\": \"fasta, fa\",\n                    \"https://www.sevenbridges.com/x\": 0,\n                    \"https://www.sevenbridges.com/y\": 1405.203125\n                },\n                {\n                    \"id\": \"#sampleName\",\n                    \"type\": \"string\",\n                    \"https://www.sevenbridges.com/x\": 0,\n                    \"https://www.sevenbridges.com/y\": 1191.171875\n                },\n                {\n                    \"id\": \"#inputFastq2\",\n                    \"type\": [\n                        \"null\",\n                        \"File\"\n                    ],\n                    \"https://www.sevenbridges.com/fileTypes\": \"fastq\",\n                    \"https://www.sevenbridges.com/x\": 0,\n                    \"https://www.sevenbridges.com/y\": 1619.234375\n                },\n                {\n                    \"id\": \"#inputFastqUMI\",\n                    \"type\": \"File\",\n                    \"https://www.sevenbridges.com/x\": 0,\n                    \"https://www.sevenbridges.com/y\": 1512.21875\n                },\n                {\n                    \"id\": \"#BowtieHumanReference\",\n                    \"type\": \"Directory\",\n                    \"https://www.sevenbridges.com/x\": 363.875,\n                    \"https://www.sevenbridges.com/y\": 1519.21875\n                },\n                {\n                    \"id\": \"#BowtieDrosophilaReference\",\n                    \"type\": \"Directory\",\n                    \"https://www.sevenbridges.com/x\": 363.875,\n                    \"https://www.sevenbridges.com/y\": 1626.234375\n                },\n                {\n                    \"id\": \"#referenceGenomeSequenceDrosophila\",\n                    \"type\": \"File\",\n                    \"secondaryFiles\": [\n                        \".fai\"\n                    ],\n                    \"https://www.sevenbridges.com/x\": 0,\n                    \"https://www.sevenbridges.com/y\": 1298.1875\n                }\n            ],\n            \"outputs\": [\n            ],\n            \"steps\": [\n                {\n                    \"id\": \"#step1\",\n                    \"in\": {\n                        \"inp\": \"#inputFastq1\"\n                    },\n                    \"out\": [],\n                    \"run\": \"../cat.cwl\"\n                }\n            ],\n            \"requirements\": [\n            ]\n        },\n   ],\n    \"cwlVersion\": \"v1.0\"\n}\n"
  },
  {
    "path": "sdk/cwl/tests/chipseq/data/Genomes/Blacklist/lists2/hg38-blacklist.v2.bed",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/chipseq/data/Genomes/Drosophila_melanogaster/dmel_r6.16/Bowtie2Index/genome.fa",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/chipseq/data/Genomes/Drosophila_melanogaster/dmel_r6.16/WholeGenome/genome.dict",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/chipseq/data/Genomes/Drosophila_melanogaster/dmel_r6.16/WholeGenome/genome.fa",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/chipseq/data/Genomes/Drosophila_melanogaster/dmel_r6.16/WholeGenome/genome.fa.fai",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/chipseq/data/Genomes/Homo_sapiens/GRCh38.p2/Bowtie2Index/genome.fa",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/chipseq/data/Genomes/Homo_sapiens/GRCh38.p2/WholeGenome/genome.dict",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/chipseq/data/Genomes/Homo_sapiens/GRCh38.p2/WholeGenome/genome.fa",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/chipseq/data/Genomes/Homo_sapiens/GRCh38.p2/WholeGenome/genome.fa.fai",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/collection_per_tool/a.txt",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/collection_per_tool/b.txt",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/collection_per_tool/c.txt",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/collection_per_tool/collection_per_tool.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: Workflow\ninputs: []\noutputs: []\nsteps:\n  step1:\n    in: []\n    out: []\n    run: step1.cwl\n  step2:\n    in: []\n    out: []\n    run: step2.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/collection_per_tool/collection_per_tool_packed.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{\n    \"$graph\": [\n        {\n            \"class\": \"Workflow\",\n            \"hints\": [\n                {\n                    \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                    \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\n                }\n            ],\n            \"id\": \"#main\",\n            \"inputs\": [],\n            \"outputs\": [],\n            \"steps\": [\n                {\n                    \"id\": \"#main/step1\",\n                    \"in\": [],\n                    \"out\": [],\n                    \"run\": \"#step1.cwl\"\n                },\n                {\n                    \"id\": \"#main/step2\",\n                    \"in\": [],\n                    \"out\": [],\n                    \"run\": \"#step2.cwl\"\n                }\n            ]\n        },\n        {\n            \"arguments\": [\n                \"echo\",\n                \"$(inputs.a)\",\n                \"$(inputs.b)\"\n            ],\n            \"class\": \"CommandLineTool\",\n            \"id\": \"#step1.cwl\",\n            \"inputs\": [\n                {\n                    \"default\": {\n                        \"basename\": \"a.txt\",\n                        \"class\": \"File\",\n                        \"location\": \"keep:b9fca8bf06b170b8507b80b2564ee72b+57/a.txt\",\n                        \"nameext\": \".txt\",\n                        \"nameroot\": \"a\"\n                    },\n                    \"id\": \"#step1.cwl/a\",\n                    \"type\": \"File\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"b.txt\",\n                        \"class\": \"File\",\n                        \"location\": \"keep:b9fca8bf06b170b8507b80b2564ee72b+57/b.txt\",\n                        \"nameext\": \".txt\",\n                        \"nameroot\": \"b\"\n                    },\n                    \"id\": \"#step1.cwl/b\",\n                    \"type\": \"File\"\n                }\n            ],\n            \"outputs\": []\n        },\n        {\n            \"arguments\": [\n                \"echo\",\n                \"$(inputs.c)\",\n                \"$(inputs.b)\"\n            ],\n            \"class\": \"CommandLineTool\",\n            \"id\": \"#step2.cwl\",\n            \"inputs\": [\n                {\n                    \"default\": {\n                        \"basename\": \"b.txt\",\n                        \"class\": \"File\",\n                        \"location\": \"keep:8e2d09a066d96cdffdd2be41579e4e2e+57/b.txt\",\n                        \"nameext\": \".txt\",\n                        \"nameroot\": \"b\"\n                    },\n                    \"id\": \"#step2.cwl/b\",\n                    \"type\": \"File\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"c.txt\",\n                        \"class\": \"File\",\n                        \"location\": \"keep:8e2d09a066d96cdffdd2be41579e4e2e+57/c.txt\",\n                        \"nameext\": \".txt\",\n                        \"nameroot\": \"c\"\n                    },\n                    \"id\": \"#step2.cwl/c\",\n                    \"type\": \"File\"\n                }\n            ],\n            \"outputs\": []\n        }\n    ],\n    \"cwlVersion\": \"v1.0\"\n}\n"
  },
  {
    "path": "sdk/cwl/tests/collection_per_tool/collection_per_tool_wrapper.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{\n    \"$graph\": [\n        {\n            \"class\": \"Workflow\",\n            \"hints\": [\n                {\n                    \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                    \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\n                }\n            ],\n            \"id\": \"#main\",\n            \"inputs\": [],\n            \"outputs\": [],\n            \"requirements\": [\n                {\n                    \"class\": \"SubworkflowFeatureRequirement\"\n                }\n            ],\n            \"steps\": [\n                {\n                    \"id\": \"#main/collection_per_tool.cwl\",\n                    \"in\": [],\n                    \"label\": \"collection_per_tool.cwl\",\n                    \"out\": [],\n                    \"run\": \"keep:177002db236f41230905621862cc4230+367/collection_per_tool.cwl\"\n                }\n            ]\n        }\n    ],\n    \"cwlVersion\": \"v1.2\"\n}\n"
  },
  {
    "path": "sdk/cwl/tests/collection_per_tool/step1.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  a:\n    type: File\n    default:\n      class: File\n      location: a.txt\n  b:\n    type: File\n    default:\n      class: File\n      location: b.txt\noutputs: []\narguments: [echo, $(inputs.a), $(inputs.b)]"
  },
  {
    "path": "sdk/cwl/tests/collection_per_tool/step2.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  c:\n    type: File\n    default:\n      class: File\n      location: c.txt\n  b:\n    type: File\n    default:\n      class: File\n      location: b.txt\noutputs: []\narguments: [echo, $(inputs.c), $(inputs.b)]"
  },
  {
    "path": "sdk/cwl/tests/conftest.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport http\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom pathlib import Path\n\nimport arvados\nimport arvados_cwl._version as acr_version\nimport pytest\n\nfrom . import TESTS_DIR\nfrom arvados.collection import Collection\nfrom arvados.commands import keepdocker\n\ndef _ensure_collection(arv_client, coll_pdh, upload_root, glob='*'):\n    \"\"\"Ensure a collection exists with the given portable data hash\n\n    This fixture first tries to load the collection from Arvados. If it is not\n    found, a new one is created from the files in `upload_root` matching `glob`.\n    The collection's API record is returned.\n    \"\"\"\n    try:\n        coll = Collection(coll_pdh)\n    except arvados.errors.ApiError as error:\n        assert error.status_code == http.HTTPStatus.NOT_FOUND\n        coll = Collection()\n        upload_root = TESTS_DIR / upload_root\n        for path in upload_root.rglob(glob):\n            if path.is_dir():\n                continue\n            coll_path = str(path.relative_to(upload_root))\n            with path.open('rb') as src_file, coll.open(coll_path, 'wb') as dst_file:\n                shutil.copyfileobj(src_file, dst_file)\n        assert coll.portable_data_hash() == coll_pdh\n        coll.save_new()\n    return coll.api_response()\n\n\n@pytest.fixture(scope='session')\ndef acr_script(tmp_path_factory):\n    \"\"\"Return an executable path to run the tested version of a-c-r\"\"\"\n    bin_dir = tmp_path_factory.mktemp('bin.')\n    bin_path = bin_dir / 'test-arvados-cwl-runner'\n    bin_path.touch(0o755)\n    with bin_path.open('w') as bin_file:\n        bin_file.write(f\"\"\"\\\n#!{sys.executable}\nimport sys\nsys.argv[0] = 'arvados-cwl-runner'\nsys.path = {sys.path!r}\nimport arvados_cwl\nsys.exit(arvados_cwl.main())\n\"\"\")\n    return bin_path\n\n\n@pytest.fixture(scope='session')\ndef arv_session():\n    return arvados.api('v1')\n\n\n@pytest.fixture(scope='session')\ndef arv_session_config(arv_session):\n    return arv_session.configs().get().execute()\n\n\n@pytest.fixture(scope='session')\ndef coll_hellos(arv_session):\n    return _ensure_collection(\n        arv_session,\n        '4d8a70b1e63b2aad6984e40e338e2373+69',\n        'secondaryFiles',\n        'hello.txt*',\n    )\n\n\n@pytest.fixture(scope='session')\ndef coll_hg19(arv_session):\n    return _ensure_collection(\n        arv_session,\n        'f225e6259bdd63bc7240599648dde9f1+97',\n        'hg19',\n    )\n\n\n@pytest.fixture(scope='session')\ndef coll_sample1(arv_session):\n    return _ensure_collection(\n        arv_session,\n        '20850f01122e860fb878758ac1320877+71',\n        'samples',\n        'sample1_S01_R1_001.fastq.gz',\n    )\n\n\n@pytest.fixture(scope='session')\ndef coll_testdir(arv_session):\n    return _ensure_collection(\n        arv_session,\n        'd7514270f356df848477718d58308cc4+94',\n        'testdir',\n    )\n\n\n@pytest.fixture(scope='session')\ndef jobs_docker_image(arv_session):\n    image_name = 'arvados/jobs'\n    image_tag = acr_version.__version__\n    image_fullname = f'{image_name}:{image_tag}'\n\n    # We must have the image in our local repository. Otherwise we'll try to\n    # pull it, which won't work for development images.\n    image_listing = subprocess.run(\n        ['docker', 'image', 'list', '--quiet', image_fullname],\n        capture_output=True,\n        text=True,\n    )\n    assert image_listing.returncode == os.EX_OK, f\"Failed to query Docker: {image_listing.stderr}\"\n    if not image_listing.stdout.strip():\n        build_env = os.environ.copy()\n        try:\n            workspace = Path(os.environ['WORKSPACE'])\n        except KeyError:\n            workspace = TESTS_DIR.parent.parent.parent\n            build_env['WORKSPACE'] = str(workspace)\n        build_proc = subprocess.run([\n            sys.executable,\n            str(workspace / 'build/build_docker_image.py'),\n            '--environment', 'development',\n            '--tag', image_fullname,\n            image_name,\n        ], env=build_env)\n        assert build_proc.returncode == os.EX_OK, f\"Failed to build {image_name}\"\n\n    # Now upload it to our cluster. arv-keepdocker automatically works to avoid\n    # redundant uploads, so we're leaning on that here.\n    try:\n        keepdocker.main(\n            [image_name, image_tag],\n            install_sig_handlers=False,\n            api=arv_session,\n        )\n    except SystemExit as exit_err:\n        assert not exit_err.args[0], f\"Failed to uplaod {image_name}\"\n    return image_fullname\n\n\n@pytest.fixture(scope='session')\ndef integration_colls(coll_hellos, coll_hg19, coll_sample1, coll_testdir, jobs_docker_image):\n    return [coll_hellos, coll_hg19, coll_sample1, coll_testdir, jobs_docker_image]\n\n\n@pytest.fixture\ndef tmp_project(request, arv_session):\n    project = arv_session.groups().create(\n        body={'group': {\n            'name': f'Arvados CWL {request.function.__name__} work',\n            'group_class': 'project',\n        }},\n        ensure_unique_name=True,\n    ).execute()\n    yield project\n    arv_session.groups().delete(uuid=project['uuid']).execute()\n"
  },
  {
    "path": "sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt",
    "content": "2018-10-03T18:21:16.944508412Z crunchstat: keepcalls 0 put 0 get -- interval 10.0000 seconds 0 put 0 get\n2018-10-03T18:21:16.944508412Z crunchstat: net:keep0 0 tx 0 rx -- interval 10.0000 seconds 0 tx 0 rx\n2018-10-03T18:21:16.944508412Z crunchstat: keepcache 0 hit 0 miss -- interval 10.0000 seconds 0 hit 0 miss\n2018-10-03T18:21:16.944508412Z crunchstat: fuseops 0 write 0 read -- interval 10.0000 seconds 0 write 0 read\n2018-10-03T18:21:16.944508412Z crunchstat: blkio:0:0 0 write 0 read -- interval 10.0000 seconds 0 write 0 read\n2018-10-03T18:21:26.954764471Z crunchstat: keepcalls 0 put 0 get -- interval 10.0000 seconds 0 put 0 get\n2018-10-03T18:21:26.954764471Z crunchstat: net:keep0 0 tx 0 rx -- interval 10.0000 seconds 0 tx 0 rx\n2018-10-03T18:21:26.954764471Z crunchstat: keepcache 0 hit 0 miss -- interval 10.0000 seconds 0 hit 0 miss\n2018-10-03T18:21:26.954764471Z crunchstat: fuseops 0 write 0 read -- interval 10.0000 seconds 0 write 0 read\n2018-10-03T18:21:26.954764471Z crunchstat: blkio:0:0 0 write 0 read -- interval 10.0000 seconds 0 write 0 read\n"
  },
  {
    "path": "sdk/cwl/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt",
    "content": "2018-10-03T18:21:07.823780191Z notice: reading stats from /sys/fs/cgroup/cpuacct//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/cgroup.procs\n2018-10-03T18:21:07.823841282Z notice: monitoring temp dir /tmp/crunch-run.9tee4-dz642-lymtndkpy39eibk.438029160\n2018-10-03T18:21:07.823917514Z notice: reading stats from /sys/fs/cgroup/memory//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/memory.stat\n2018-10-03T18:21:07.824136521Z mem 0 cache 0 swap 0 pgmajfault 1187840 rss\n2018-10-03T18:21:07.824187182Z notice: reading stats from /sys/fs/cgroup/cpuacct//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/cpuacct.stat\n2018-10-03T18:21:07.824253726Z notice: reading stats from /sys/fs/cgroup/cpuset//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/cpuset.cpus\n2018-10-03T18:21:07.824296720Z cpu 0.0000 user 0.0100 sys 20.00 cpus\n2018-10-03T18:21:07.824361476Z notice: reading stats from /sys/fs/cgroup/blkio//slurm_compute0/uid_0/job_6478/step_batch/c1df52c9940aae3f0fd586cacd7c0d7cb81b33aec973a67c9a7519bfe38ea914/blkio.io_service_bytes\n2018-10-03T18:21:07.824551021Z statfs 397741461504 available 4869779456 used 402611240960 total\n2018-10-03T18:21:17.824503045Z mem 172032 cache 0 swap 0 pgmajfault 68247552 rss\n2018-10-03T18:21:17.824702097Z cpu 2.0000 user 0.3800 sys 20.00 cpus -- interval 10.0004 seconds 2.0000 user 0.3700 sys\n2018-10-03T18:21:17.824984621Z net:eth0 51930 tx 844687 rx\n2018-10-03T18:21:17.825021992Z statfs 397740937216 available 4870303744 used 402611240960 total -- interval 10.0005 seconds 524288 used\n2018-10-03T18:21:27.824480114Z mem 172032 cache 0 swap 0 pgmajfault 69525504 rss\n2018-10-03T18:21:27.826909728Z cpu 2.0600 user 0.3900 sys 20.00 cpus -- interval 10.0022 seconds 0.0600 user 0.0100 sys\n2018-10-03T18:21:27.827141860Z net:eth0 55888 tx 859480 rx -- interval 10.0022 seconds 3958 tx 14793 rx\n2018-10-03T18:21:27.827177703Z statfs 397744787456 available 4866453504 used 402611240960 total -- interval 10.0022 seconds -3850240 used\n"
  },
  {
    "path": "sdk/cwl/tests/dir-job.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nindir:\n  class: Directory\n  location: keep:d7514270f356df848477718d58308cc4+94"
  },
  {
    "path": "sdk/cwl/tests/dir-job2.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nindir:\n  class: Directory\n  location: keep:d7514270f356df848477718d58308cc4+94/\n"
  },
  {
    "path": "sdk/cwl/tests/fake-keep-mount/fake_collection_dir/subdir/banana.txt",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nstrawberry\n"
  },
  {
    "path": "sdk/cwl/tests/federation/README",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nArvados federated workflow testing\n\nRequires cwltool 1.0.20181109150732 or later\n\nCreate main-test.json:\n\n{\n    \"acr\": \"/path/to/arvados-cwl-runner\",\n    \"arvado_api_host_insecure\": false,\n    \"arvados_api_hosts\": [\n        \"c97qk.arvadosapi.com\",\n        \"4xphq.arvadosapi.com\",\n        \"9tee4.arvadosapi.com\"\n    ],\n    \"arvados_api_token\": \"...\",\n    \"arvados_cluster_ids\": [\n        \"c97qk\",\n        \"4xphq\",\n        \"9tee4\"\n    ]\n}\n\n\nRun tests:\n\n$ cwltool main.cwl main-test.json\n\n\nList test cases:\n\n$ cwltool --print-targets main.cwl\n\n\nRun a specific test case:\n\n$ cwltool -t twostep-remote-copy-to-home main.cwl main-test.json\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/base-case.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:base-case\ninputs:\n  inp:\n    type: File\n    inputBinding: {}\n  runOnCluster: string\noutputs:\n  hash:\n    type: File\n    outputSource: md5sum/hash\nsteps:\n  md5sum:\n    in:\n      inp: inp\n      runOnCluster: runOnCluster\n    out: [hash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: md5sum.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/cat.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  inp:\n    type: File[]\n    inputBinding: {}\noutputs:\n  joined: stdout\nstdout: joined.txt\nbaseCommand: cat\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/hint-on-tool.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:hint-on-tool\ninputs:\n  inp:\n    type: File\n    inputBinding: {}\n  runOnCluster: string\noutputs:\n  hash:\n    type: File\n    outputSource: md5sum/hash\nsteps:\n  md5sum:\n    in:\n      inp: inp\n      runOnCluster: runOnCluster\n    out: [hash]\n    run: md5sum-tool-hint.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/hint-on-wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:hint-on-wf\nhints:\n  arv:ClusterTarget:\n    cluster_id: $(inputs.runOnCluster)\ninputs:\n  inp:\n    type: File\n    inputBinding: {}\n  runOnCluster: string\noutputs:\n  hash:\n    type: File\n    outputSource: md5sum/hash\nsteps:\n  md5sum:\n    in:\n      inp: inp\n    out: [hash]\n    run: md5sum.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/md5sum-tool-hint.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\nhints:\n  arv:ClusterTarget:\n    cluster_id: $(inputs.runOnCluster)\ninputs:\n  inp: File\n  runOnCluster: string\noutputs:\n  hash:\n    type: File\n    outputBinding:\n      glob: out.txt\nstdin: $(inputs.inp.path)\nstdout: out.txt\narguments: [\"md5sum\", \"-\"]\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/md5sum.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\ninputs:\n  inp:\n    type: File\noutputs:\n  hash:\n    type: File\n    outputBinding:\n      glob: out.txt\nstdin: $(inputs.inp.path)\nstdout: out.txt\narguments: [\"md5sum\", \"-\"]\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/remote-case.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:remote-case\ninputs:\n  inp:\n    type: File\n    inputBinding: {}\n  runOnCluster: string\noutputs:\n  hash:\n    type: File\n    outputSource: md5sum/hash\nsteps:\n  md5sum:\n    in:\n      inp: inp\n      runOnCluster: runOnCluster\n    out: [hash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: md5sum.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/rev-input-to-output.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  ShellCommandRequirement: {}\ninputs:\n  inp:\n    type: File\noutputs:\n  original:\n    type: File\n    outputBinding:\n      glob: $(inputs.inp.basename)\n  revhash:\n    type: stdout\nstdout: rev-$(inputs.inp.basename)\narguments:\n  - shellQuote: false\n    valueFrom: |\n      ln -s $(inputs.inp.path) $(inputs.inp.basename) &&\n      rev $(inputs.inp.basename)\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/rev.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\ninputs:\n  inp:\n    type: File\noutputs:\n  revhash:\n    type: File\n    outputBinding:\n      glob: out.txt\nstdout: out.txt\narguments: [rev, $(inputs.inp)]\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/runner-home-step-remote.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:runner-home-step-remote\ninputs:\n  inp: File\n  runOnCluster: string\noutputs:\n  hash:\n    type: File\n    outputSource: md5sum/hash\nsteps:\n  md5sum:\n    in:\n      inp: inp\n      runOnCluster: runOnCluster\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    out: [hash]\n    run: md5sum.cwl"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/runner-remote-step-home.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:runner-remote-step-home\ninputs:\n  inp: File\n  runOnCluster: string\noutputs:\n  hash:\n    type: File\n    outputSource: md5sum/hash\nsteps:\n  md5sum:\n    in:\n      inp: inp\n      runOnCluster: runOnCluster\n    out: [hash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: md5sum.cwl"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/scatter-gather.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:scatter-gather\n  ScatterFeatureRequirement: {}\ninputs:\n  shards: File[]\n  clusters: string[]\noutputs:\n  joined:\n    type: File\n    outputSource: cat/joined\nsteps:\n  md5sum:\n    in:\n      inp: shards\n      runOnCluster: clusters\n    scatter: [inp, runOnCluster]\n    scatterMethod: dotproduct\n    out: [hash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: md5sum.cwl\n  cat:\n    in:\n      inp: md5sum/hash\n    out: [joined]\n    run: cat.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/threestep-remote.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:threestep-remote\n  ScatterFeatureRequirement: {}\ninputs:\n  inp: File\n  clusterA: string\n  clusterB: string\n  clusterC: string\noutputs:\n  revhash:\n    type: File\n    outputSource: revC/revhash\nsteps:\n  md5sum:\n    in:\n      inp: inp\n      runOnCluster: clusterA\n    out: [hash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: md5sum.cwl\n  revB:\n    in:\n      inp: md5sum/hash\n      runOnCluster: clusterB\n    out: [revhash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: rev-input-to-output.cwl\n  revC:\n    in:\n      inp: revB/revhash\n      runOnCluster: clusterC\n    out: [revhash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: rev-input-to-output.cwl"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/twostep-both-remote.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:twostep-both-remote\ninputs:\n  inp:\n    type: File\n    inputBinding: {}\n  md5sumCluster: string\n  revCluster: string\noutputs:\n  hash:\n    type: File\n    outputSource: md5sum/hash\nsteps:\n  md5sum:\n    in:\n      inp: inp\n      runOnCluster: md5sumCluster\n    out: [hash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: md5sum.cwl\n  rev:\n    in:\n      inp: md5sum/hash\n      runOnCluster: revCluster\n    out: [revhash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: rev.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/twostep-home-to-remote.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:twostep-home-to-remote\ninputs:\n  inp:\n    type: File\n    inputBinding: {}\n  md5sumCluster: string\n  revCluster: string\noutputs:\n  hash:\n    type: File\n    outputSource: md5sum/hash\nsteps:\n  md5sum:\n    in:\n      inp: inp\n      runOnCluster: md5sumCluster\n    out: [hash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: md5sum.cwl\n  rev:\n    in:\n      inp: md5sum/hash\n      runOnCluster: revCluster\n    out: [revhash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: rev.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/twostep-remote-copy-to-home.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:twostep-remote-copy-to-home\ninputs:\n  inp:\n    type: File\n    inputBinding: {}\n  md5sumCluster: string\n  revCluster: string\noutputs:\n  hash:\n    type: File\n    outputSource: md5sum/hash\nsteps:\n  md5sum:\n    in:\n      inp: inp\n      runOnCluster: md5sumCluster\n    out: [hash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: md5sum.cwl\n  rev:\n    in:\n      inp: md5sum/hash\n      runOnCluster: revCluster\n    out: [revhash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: rev-input-to-output.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/federation/cases/twostep-remote-to-home.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/fed-test:twostep-remote-to-home\ninputs:\n  inp:\n    type: File\n    inputBinding: {}\n  md5sumCluster: string\n  revCluster: string\noutputs:\n  hash:\n    type: File\n    outputSource: md5sum/hash\nsteps:\n  md5sum:\n    in:\n      inp: inp\n      runOnCluster: md5sumCluster\n    out: [hash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: md5sum.cwl\n  rev:\n    in:\n      inp: md5sum/hash\n      runOnCluster: revCluster\n    out: [revhash]\n    hints:\n      arv:ClusterTarget:\n        cluster_id: $(inputs.runOnCluster)\n    run: rev.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/base-case-input.txt",
    "content": "Call me base-case. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/hint-on-tool.txt",
    "content": "Call me hint-on-tool. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/hint-on-wf.txt",
    "content": "Call me hint-on-wf. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/remote-case-input.txt",
    "content": "Call me remote-case. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/runner-home-step-remote-input.txt",
    "content": "Call me runner-home-step-remote. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/runner-remote-step-home-input.txt",
    "content": "Call me runner-remote-step-home. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/scatter-gather-s1.txt",
    "content": "Call me scatter-gather-s1. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/scatter-gather-s2.txt",
    "content": "Call me scatter-gather-s2. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/scatter-gather-s3.txt",
    "content": "Call me scatter-gather-s3. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/threestep-remote.txt",
    "content": "Call me threestep-remote. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/twostep-both-remote.txt",
    "content": "Call me twostep-both-remote. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/twostep-home-to-remote.txt",
    "content": "Call me twostep-home-to-remote. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/twostep-remote-copy-to-home.txt",
    "content": "Call me twostep-remote-copy-to-home. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/data/twostep-remote-to-home.txt",
    "content": "Call me twostep-remote-to-home. Some years ago--never mind how long precisely--having\nlittle or no money in my purse, and nothing particular to interest me on\nshore, I thought I would sail about a little and see the watery part of\nthe world. It is a way I have of driving off the spleen and regulating\nthe circulation. Whenever I find myself growing grim about the mouth;\nwhenever it is a damp, drizzly November in my soul; whenever I find\nmyself involuntarily pausing before coffin warehouses, and bringing up\nthe rear of every funeral I meet; and especially whenever my hypos get\nsuch an upper hand of me, that it requires a strong moral principle to\nprevent me from deliberately stepping into the street, and methodically\nknocking people's hats off--then, I account it high time to get to sea\nas soon as I can. This is my substitute for pistol and ball. With a\nphilosophical flourish Cato throws himself upon his sword; I quietly\ntake to the ship. There is nothing surprising in this. If they but knew\nit, almost all men in their degree, some time or other, cherish very\nnearly the same feelings towards the ocean with me.\n"
  },
  {
    "path": "sdk/cwl/tests/federation/framework/check-exist.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\nrequirements:\n  InitialWorkDirRequirement:\n    listing:\n      - entryname: config.json\n        entry: |-\n          ${\n          return JSON.stringify({\n            check_collections: inputs.check_collections\n          });\n          }\n  EnvVarRequirement:\n    envDef:\n      ARVADOS_API_HOST: $(inputs.arvados_api_host)\n      ARVADOS_API_TOKEN: $(inputs.arvados_api_token)\n      ARVADOS_API_HOST_INSECURE: $(\"\"+inputs.arvado_api_host_insecure)\n  InlineJavascriptRequirement: {}\nhints:\n  DockerRequirement:\n    dockerPull: arvados/jobs\ninputs:\n  arvados_api_token: string\n  arvado_api_host_insecure: boolean\n  arvados_api_host: string\n  check_collections: string[]\n  preparescript:\n    type: File\n    default:\n      class: File\n      location: check_exist.py\n    inputBinding:\n      position: 1\noutputs:\n  success:\n    type: boolean\n    outputBinding:\n      glob: success\n      loadContents: true\n      outputEval: $(self[0].contents==\"true\")\nbaseCommand: python\n"
  },
  {
    "path": "sdk/cwl/tests/federation/framework/check_exist.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados\nimport json\n\napi = arvados.api()\n\nwith open(\"config.json\") as f:\n    config = json.load(f)\n\nsuccess = True\nfor c in config[\"check_collections\"]:\n    try:\n        api.collections().get(uuid=c).execute()\n    except Exception as e:\n        print(\"Checking for %s got exception %s\" % (c, e))\n        success = False\n\nwith open(\"success\", \"w\") as f:\n    if success:\n        f.write(\"true\")\n    else:\n        f.write(\"false\")\n"
  },
  {
    "path": "sdk/cwl/tests/federation/framework/dockerbuild.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  testcase: string\noutputs:\n  imagename:\n    type: string\n    outputBinding:\n      outputEval: $(inputs.testcase)\nrequirements:\n  InitialWorkDirRequirement:\n    listing:\n      - entryname: Dockerfile\n        entry: |-\n          FROM debian@sha256:0a5fcee6f52d5170f557ee2447d7a10a5bdcf715dd7f0250be0b678c556a501b\n          LABEL org.arvados.testcase=\"$(inputs.testcase)\"\narguments: [docker, build, -t, $(inputs.testcase), \".\"]\n"
  },
  {
    "path": "sdk/cwl/tests/federation/framework/prepare.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\nrequirements:\n  InitialWorkDirRequirement:\n    listing:\n      - entryname: input.json\n        entry: $(JSON.stringify(inputs.obj))\n      - entryname: config.json\n        entry: |-\n          ${\n          return JSON.stringify({\n            arvados_cluster_ids: inputs.arvados_cluster_ids,\n            scrub_images: [inputs.scrub_image],\n            scrub_collections: inputs.scrub_collections\n          });\n          }\n  EnvVarRequirement:\n    envDef:\n      ARVADOS_API_HOST: $(inputs.arvados_api_host)\n      ARVADOS_API_TOKEN: $(inputs.arvados_api_token)\n      ARVADOS_API_HOST_INSECURE: $(\"\"+inputs.arvado_api_host_insecure)\n  InlineJavascriptRequirement: {}\nhints:\n  DockerRequirement:\n    dockerPull: arvados/jobs\ninputs:\n  arvados_api_token: string\n  arvado_api_host_insecure: boolean\n  arvados_api_host: string\n  arvados_cluster_ids: string[]\n  wf: File\n  obj: Any\n  scrub_image: string\n  scrub_collections: string[]\n  preparescript:\n    type: File\n    default:\n      class: File\n      location: prepare.py\n    inputBinding:\n      position: 1\noutputs:\n  done:\n    type: boolean\n    outputBinding:\n      outputEval: $(true)\nbaseCommand: python\n"
  },
  {
    "path": "sdk/cwl/tests/federation/framework/prepare.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados\nimport json\n\napi = arvados.api()\n\nwith open(\"config.json\") as f:\n    config = json.load(f)\n\nscrub_collections = set(config[\"scrub_collections\"])\n\nfor cluster_id in config[\"arvados_cluster_ids\"]:\n    images = []\n    for scrub_image in config[\"scrub_images\"]:\n        sp = scrub_image.split(\":\")\n        image_name = sp[0]\n        image_tag = sp[1] if len(sp) > 1 else \"latest\"\n        images.append('{}:{}'.format(image_name, image_tag))\n\n    search_links = api.links().list(\n        filters=[['link_class', '=', 'docker_image_repo+tag'],\n                 ['name', 'in', images]],\n        cluster_id=cluster_id).execute()\n\n    head_uuids = [lk[\"head_uuid\"] for lk in search_links[\"items\"]]\n    cols = api.collections().list(filters=[[\"uuid\", \"in\", head_uuids]],\n                                  cluster_id=cluster_id).execute()\n    for c in cols[\"items\"]:\n        scrub_collections.add(c[\"portable_data_hash\"])\n    for lk in search_links[\"items\"]:\n        api.links().delete(uuid=lk[\"uuid\"]).execute()\n\nfor cluster_id in config[\"arvados_cluster_ids\"]:\n    matches = api.collections().list(filters=[[\"portable_data_hash\", \"in\", list(scrub_collections)]],\n                                     select=[\"uuid\", \"portable_data_hash\"], cluster_id=cluster_id).execute()\n    for m in matches[\"items\"]:\n        api.collections().delete(uuid=m[\"uuid\"]).execute()\n        print(\"Scrubbed %s (%s)\" % (m[\"uuid\"], m[\"portable_data_hash\"]))\n"
  },
  {
    "path": "sdk/cwl/tests/federation/framework/run-acr.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  acr:\n    type: string?\n    default: arvados-cwl-runner\n    inputBinding:\n      position: 1\n  arvados_api_host: string\n  arvados_api_token: string\n  arvado_api_host_insecure:\n    type: boolean\n    default: false\n  runner_cluster:\n    type: string?\n    inputBinding:\n      prefix: --submit-runner-cluster\n      position: 2\n  wf:\n    type: File\n    inputBinding:\n      position: 3\n  obj: Any\nrequirements:\n  InitialWorkDirRequirement:\n    listing:\n      - entryname: input.json\n        entry: $(JSON.stringify(inputs.obj))\n  EnvVarRequirement:\n    envDef:\n      ARVADOS_API_HOST: $(inputs.arvados_api_host)\n      ARVADOS_API_TOKEN: $(inputs.arvados_api_token)\n      ARVADOS_API_HOST_INSECURE: $(\"\"+inputs.arvado_api_host_insecure)\n  InlineJavascriptRequirement: {}\noutputs:\n  out:\n    type: Any\n    outputBinding:\n      glob: output.json\n      loadContents: true\n      #outputEval: $(JSON.parse(self[0].contents))\n      outputEval: $(self[0].contents)\nstdout: output.json\narguments:\n  - valueFrom: --disable-reuse\n    position: 2\n  - valueFrom: --always-submit-runner\n    position: 2\n  - valueFrom: --api=containers\n    position: 2\n  - valueFrom: input.json\n    position: 4"
  },
  {
    "path": "sdk/cwl/tests/federation/framework/testcase.cwl",
    "content": "#!/usr/bin/env cwl-runner\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\nhints:\n  cwltool:Secrets:\n    secrets: [arvados_api_token]\nrequirements:\n  StepInputExpressionRequirement: {}\n  InlineJavascriptRequirement: {}\n  SubworkflowFeatureRequirement: {}\ninputs:\n  arvados_api_token: string\n  arvado_api_host_insecure:\n    type: boolean\n    default: false\n  arvados_api_hosts: string[]\n  arvados_cluster_ids: string[]\n  acr: string?\n  wf: File\n  obj: Any\n  scrub_image: string\n  scrub_collections: string[]\n  runner_cluster: string?\noutputs:\n  out:\n    type: Any\n    outputSource: run-acr/out\n  success:\n    type: boolean\n    outputSource: check-result/success\nsteps:\n  dockerbuild:\n    in:\n      testcase: scrub_image\n    out: [imagename]\n    run: dockerbuild.cwl\n  prepare:\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_host: {source: arvados_api_hosts, valueFrom: \"$(self[0])\"}\n      arvados_cluster_ids: arvados_cluster_ids\n      wf: wf\n      obj: obj\n      scrub_image: scrub_image\n      scrub_collections: scrub_collections\n    out: [done]\n    run: prepare.cwl\n  run-acr:\n    in:\n      prepare: prepare/done\n      image-ready: dockerbuild/imagename\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_host: {source: arvados_api_hosts, valueFrom: \"$(self[0])\"}\n      runner_cluster: runner_cluster\n      acr: acr\n      wf: wf\n      obj: obj\n    out: [out]\n    run: run-acr.cwl\n  check-result:\n    in:\n      acr-done: run-acr/out\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_host: {source: arvados_api_hosts, valueFrom: \"$(self[0])\"}\n      check_collections: scrub_collections\n    out: [success]\n    run: check-exist.cwl"
  },
  {
    "path": "sdk/cwl/tests/federation/main.cwl",
    "content": "#!/usr/bin/env cwl-runner\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\nhints:\n  cwltool:Secrets:\n    secrets: [arvados_api_token]\nrequirements:\n  StepInputExpressionRequirement: {}\n  InlineJavascriptRequirement: {}\n  SubworkflowFeatureRequirement: {}\ninputs:\n  arvados_api_token: string\n  arvado_api_host_insecure:\n    type: boolean\n    default: false\n  arvados_api_hosts: string[]\n  arvados_cluster_ids: string[]\n  acr: string?\n  testcases:\n    type: string[]\n    default:\n      - base-case\n      - runner-home-step-remote\n      - runner-remote-step-home\noutputs:\n  base-case-success:\n    type: Any\n    outputSource: base-case/success\n  runner-home-step-remote-success:\n    type: Any\n    outputSource: runner-home-step-remote/success\n  runner-remote-step-home-success:\n    type: Any\n    outputSource: runner-remote-step-home/success\n  remote-case-success:\n    type: Any\n    outputSource: remote-case/success\n  twostep-home-to-remote-success:\n    type: Any\n    outputSource: twostep-home-to-remote/success\n  twostep-remote-to-home-success:\n    type: Any\n    outputSource: twostep-remote-to-home/success\n  twostep-both-remote-success:\n    type: Any\n    outputSource: twostep-both-remote/success\n  twostep-remote-copy-to-home-success:\n    type: Any\n    outputSource: twostep-remote-copy-to-home/success\n  scatter-gather-success:\n    type: Any\n    outputSource: scatter-gather/success\n  threestep-remote-success:\n    type: Any\n    outputSource: threestep-remote/success\n  hint-on-wf-success:\n    type: Any\n    outputSource: hint-on-wf/success\n  hint-on-tool-success:\n    type: Any\n    outputSource: hint-on-tool/success\n\nsteps:\n  base-case:\n    doc: |\n      Base case (no federation), single step workflow with both the\n      runner and step on the same cluster.\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/base-case.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum.cwl\n      obj:\n        default:\n          inp:\n            class: File\n            location: data/base-case-input.txt\n        valueFrom: |-\n          ${\n          self[\"runOnCluster\"] = inputs.arvados_cluster_ids[0];\n          return self;\n          }\n      scrub_image: {default: \"arvados/fed-test:base-case\"}\n      scrub_collections:\n        default:\n          - 031a4ced0aa99de90fb630568afc6e9b+67   # input collection\n          - eb93a6718eb1a1a8ee9f66ee7d683472+51   # md5sum output collection\n          - f654d4048612135f4a5e7707ec0fcf3e+112  # final output json\n    out: [out, success]\n    run: framework/testcase.cwl\n\n  runner-home-step-remote:\n    doc: |\n      Single step workflow with the runner on the home cluster and the\n      step on the remote cluster.  ClusterTarget hint is on the workflow step.\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/runner-home-step-remote.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum.cwl\n      obj:\n        default:\n          inp:\n            class: File\n            location: data/runner-home-step-remote-input.txt\n        valueFrom: |-\n          ${\n          self[\"runOnCluster\"] = inputs.arvados_cluster_ids[1];\n          return self;\n          }\n      runner_cluster: { valueFrom: \"$(inputs.arvados_cluster_ids[0])\" }\n      scrub_image: {default: \"arvados/fed-test:runner-home-step-remote\"}\n      scrub_collections:\n        default:\n          - 3bc373e38751fe13dcbd62778d583242+81   # input collection\n          - 428e6d91e41a3af3ae287b453949e7fd+51   # md5sum output collection\n          - a4b0ddd866525655e8480f83a1ca83c6+112  # runner output json\n    out: [out, success]\n    run: framework/testcase.cwl\n\n  runner-remote-step-home:\n    doc: |\n      Single step workflow with the runner on the remote cluster and the\n      step on the home cluster.\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/runner-remote-step-home.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum.cwl\n      obj:\n        default:\n          inp:\n            class: File\n            location: data/runner-remote-step-home-input.txt\n        valueFrom: |-\n          ${\n          self[\"runOnCluster\"] = inputs.arvados_cluster_ids[0];\n          return self;\n          }\n      runner_cluster: { valueFrom: \"$(inputs.arvados_cluster_ids[1])\" }\n      scrub_image: {default: \"arvados/fed-test:runner-remote-step-home\"}\n      scrub_collections:\n        default:\n          - 25fe10d8e8530329a738de69d9bc8ab5+81   # input collection\n          - 7f052d1a04b851b6f73fba77c7802e1d+51   # md5sum output collection\n          - ecb639201f454b6493757f5117f540df+112  # runner output json\n    out: [out, success]\n    run: framework/testcase.cwl\n\n  remote-case:\n    doc: |\n      Single step workflow with both the runner and the step on the\n      remote cluster.\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/remote-case.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum.cwl\n      obj:\n        default:\n          inp:\n            class: File\n            location: data/remote-case-input.txt\n        valueFrom: |-\n          ${\n          self[\"runOnCluster\"] = inputs.arvados_cluster_ids[1];\n          return self;\n          }\n      runner_cluster: { valueFrom: \"$(inputs.arvados_cluster_ids[1])\" }\n      scrub_image: {default: \"arvados/fed-test:remote-case\"}\n      scrub_collections:\n        default:\n          - fccd49fdef8e452295f718208abafd88+69   # input collection\n          - 58c0e8ea6b148134ef8577ee11307eec+51   # md5sum output collection\n          - 1fd679c5ab64c123b9764024dbf560f0+112  # final output json\n    out: [out, success]\n    run: framework/testcase.cwl\n\n  twostep-home-to-remote:\n    doc: |\n      Two step workflow.  The runner is on the home cluster, the first\n      step is on the home cluster, the second step is on the remote\n      cluster.\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/twostep-home-to-remote.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum.cwl\n            - class: File\n              location: cases/rev.cwl\n      obj:\n        default:\n          inp:\n            class: File\n            location: data/twostep-home-to-remote.txt\n        valueFrom: |-\n          ${\n          self[\"md5sumCluster\"] = inputs.arvados_cluster_ids[0];\n          self[\"revCluster\"] = inputs.arvados_cluster_ids[1];\n          return self;\n          }\n      runner_cluster: { valueFrom: \"$(inputs.arvados_cluster_ids[0])\" }\n      scrub_image: {default: \"arvados/fed-test:twostep-home-to-remote\"}\n      scrub_collections:\n        default:\n          - 268a54947fb75115cfe05bb54cc62c30+74   # input collection\n          - 400f03b8c5d2dc3dcb513a21b626ef88+51   # md5sum output collection\n          - 3738166916ca5f6f6ad12bf7e06b4a21+51   # rev output collection\n          - bc37c17a37aa25229e5de1339b27fbcc+112  # runner output json\n    out: [out, success]\n    run: framework/testcase.cwl\n\n  twostep-remote-to-home:\n    doc: |\n      Two step workflow.  The runner is on the home cluster, the first\n      step is on the remote cluster, the second step is on the home\n      cluster.\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/twostep-remote-to-home.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum.cwl\n            - class: File\n              location: cases/rev.cwl\n      obj:\n        default:\n          inp:\n            class: File\n            location: data/twostep-remote-to-home.txt\n        valueFrom: |-\n          ${\n          self[\"md5sumCluster\"] = inputs.arvados_cluster_ids[1];\n          self[\"revCluster\"] = inputs.arvados_cluster_ids[0];\n          return self;\n          }\n      runner_cluster: { valueFrom: \"$(inputs.arvados_cluster_ids[0])\" }\n      scrub_image: {default: \"arvados/fed-test:twostep-remote-to-home\"}\n      scrub_collections:\n        default:\n          - cce89b9f7b6e163978144051ce5f071a+74   # input collection\n          - 0c358c3af63644c6343766feff1b7238+51   # md5sum output collection\n          - 33fb7d512bf21f04847eca58cea46e74+51   # rev output collection\n          - 912e04aa3db04aba008cf5cd46c277b2+112  # runner output json\n    out: [out, success]\n    run: framework/testcase.cwl\n\n  twostep-both-remote:\n    doc: |\n      Two step workflow.  The runner is on the home cluster, both steps are\n      on the remote cluster.\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/twostep-both-remote.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum.cwl\n            - class: File\n              location: cases/rev.cwl\n      obj:\n        default:\n          inp:\n            class: File\n            location: data/twostep-both-remote.txt\n        valueFrom: |-\n          ${\n          self[\"md5sumCluster\"] = inputs.arvados_cluster_ids[1];\n          self[\"revCluster\"] = inputs.arvados_cluster_ids[1];\n          return self;\n          }\n      runner_cluster: { valueFrom: \"$(inputs.arvados_cluster_ids[0])\" }\n      scrub_image: {default: \"arvados/fed-test:twostep-both-remote\"}\n      scrub_collections:\n        default:\n          - 3c5e39939cf197d304ac1eac20841238+71   # input collection\n          - 3edb99aa607731593969cdab663d65b4+51   # md5sum output collection\n          - a91625b7139e60fe61a88cae42fbee13+51   # rev output collection\n          - ddfa58a81953dad08436d571615dd584+112  # runner output json\n    out: [out, success]\n    run: framework/testcase.cwl\n\n  twostep-remote-copy-to-home:\n    doc: |\n      Two step workflow.  The runner is on the home cluster, the first\n      step is on the remote cluster, the second step is on the home\n      cluster, and propagates its input file directly from input to\n      output by symlinking the input file in the output directory.\n      Tests that crunch-run will copy blocks from remote to local\n      when preparing output collection.\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/twostep-remote-copy-to-home.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum.cwl\n            - class: File\n              location: cases/rev-input-to-output.cwl\n      obj:\n        default:\n          inp:\n            class: File\n            location: data/twostep-remote-copy-to-home.txt\n        valueFrom: |-\n          ${\n          self[\"md5sumCluster\"] = inputs.arvados_cluster_ids[1];\n          self[\"revCluster\"] = inputs.arvados_cluster_ids[0];\n          return self;\n          }\n      runner_cluster: { valueFrom: \"$(inputs.arvados_cluster_ids[0])\" }\n      scrub_image: {default: \"arvados/fed-test:twostep-remote-copy-to-home\"}\n      scrub_collections:\n        default:\n          - 538887bc29a3098bf79abdb8536d17bd+79   # input collection\n          - 14da0e0d52d7ab2945427074b275e9ee+51   # md5sum output collection\n          - 2d3a4a840077390a0d7788f169eaba89+112  # rev output collection\n          - 2d3a4a840077390a0d7788f169eaba89+112  # runner output json\n    out: [out, success]\n    run: framework/testcase.cwl\n\n  scatter-gather:\n    doc: \"\"\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/scatter-gather.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum.cwl\n            - class: File\n              location: cases/cat.cwl\n      obj:\n        default:\n          shards:\n            - class: File\n              location: data/scatter-gather-s1.txt\n            - class: File\n              location: data/scatter-gather-s2.txt\n            - class: File\n              location: data/scatter-gather-s3.txt\n        valueFrom: |-\n          ${\n          self[\"clusters\"] = inputs.arvados_cluster_ids;\n          return self;\n          }\n      runner_cluster: { valueFrom: \"$(inputs.arvados_cluster_ids[0])\" }\n      scrub_image: {default: \"arvados/fed-test:scatter-gather\"}\n      scrub_collections:\n        default:\n          - 99cc18329bce1b4a5fe6c4cf60477668+209  # input collection\n          - 2e570e844e03c7027baad148642d726f+51   # s1 md5sum output collection\n          - 61c88ee7811d0b849b5c06376eb065a6+51   # s2 md5sum output collection\n          - 85aaf18d638045fe609e025d3a319b2a+51   # s3 md5sum output collection\n          - ec44bcba77e65128f1a8f843d881ede4+56   # cat output collection\n          - 89de265942800ae36549109969940363+117  # runner output json\n    out: [out, success]\n    run: framework/testcase.cwl\n\n  threestep-remote:\n    doc: \"\"\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/threestep-remote.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum.cwl\n            - class: File\n              location: cases/rev-input-to-output.cwl\n      obj:\n        default:\n          inp:\n            class: File\n            location: data/threestep-remote.txt\n        valueFrom: |-\n          ${\n          self[\"clusterA\"] = inputs.arvados_cluster_ids[0];\n          self[\"clusterB\"] = inputs.arvados_cluster_ids[1];\n          self[\"clusterC\"] = inputs.arvados_cluster_ids[2];\n          return self;\n          }\n      runner_cluster: { valueFrom: \"$(inputs.arvados_cluster_ids[0])\" }\n      scrub_image: {default: \"arvados/fed-test:threestep-remote\"}\n      scrub_collections:\n        default:\n          - 9fbf33e62876357fe134f619865cc5a5+68   # input collection\n          - 210c5f2a716f6689b04316acd4928c10+51   # md5sum output collection\n          - 3abea7506269d5ebf61fb17c78bbd2af+105  # revB output\n          - 9e1b3acb28949759ad07e4c9740bbaa5+113  # revC output\n          - 8c86dbec7de7948871b5e168ede417e1+120  # runner output json\n    out: [out, success]\n    run: framework/testcase.cwl\n\n  hint-on-wf:\n    doc: |\n      Single step workflow with the runner on the home cluster and the\n      step on the remote cluster.  ClusterTarget hint is at the workflow level.\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/hint-on-wf.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum.cwl\n      obj:\n        default:\n          inp:\n            class: File\n            location: data/hint-on-wf.txt\n        valueFrom: |-\n          ${\n          self[\"runOnCluster\"] = inputs.arvados_cluster_ids[1];\n          return self;\n          }\n      runner_cluster: { valueFrom: \"$(inputs.arvados_cluster_ids[0])\" }\n      scrub_image: {default: \"arvados/fed-test:hint-on-wf\"}\n      scrub_collections:\n        default:\n          - 862433f328041b2525c90b1dc3c462fd+62   # input collection\n          - 9a68b0b9720977faba8a28e75a4398b7+51   # md5sum output collection\n          - 6a601cddb36ee2f766783b1aa9ff8d66+112  # runner output json\n    out: [out, success]\n    run: framework/testcase.cwl\n\n  hint-on-tool:\n    doc: |\n      Single step workflow with the runner on the home cluster and the\n      step on the remote cluster.  ClusterTarget hint is at the tool level.\n    in:\n      arvados_api_token: arvados_api_token\n      arvado_api_host_insecure: arvado_api_host_insecure\n      arvados_api_hosts: arvados_api_hosts\n      arvados_cluster_ids: arvados_cluster_ids\n      acr: acr\n      wf:\n        default:\n          class: File\n          location: cases/hint-on-tool.cwl\n          secondaryFiles:\n            - class: File\n              location: cases/md5sum-tool-hint.cwl\n      obj:\n        default:\n          inp:\n            class: File\n            location: data/hint-on-tool.txt\n        valueFrom: |-\n          ${\n          self[\"runOnCluster\"] = inputs.arvados_cluster_ids[1];\n          return self;\n          }\n      runner_cluster: { valueFrom: \"$(inputs.arvados_cluster_ids[0])\" }\n      scrub_image: {default: \"arvados/fed-test:hint-on-tool\"}\n      scrub_collections:\n        default:\n          - 6803004a4f8db9f8d1d54f6229851599+64   # input collection\n          - cacb0d56235564b5ff485c5b31215ab5+51   # md5sum output collection\n          - 2b50af43fdd84a9e906be2d54b92cddf+112  # runner output json\n    out: [out, success]\n    run: framework/testcase.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/hello.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nhello:\n  class: File\n  location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt\n"
  },
  {
    "path": "sdk/cwl/tests/hg19/hg19.fa",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/hg19/hg19.fa.amb",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/hg19/hg19.fa.ann",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/hg19/hg19.fa.fai",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/hw.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nprint(\"Hello world\")\n"
  },
  {
    "path": "sdk/cwl/tests/input/blorp.txt",
    "content": "blopper blubber\n"
  },
  {
    "path": "sdk/cwl/tests/keep-dir-test-input.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: CommandLineTool\ncwlVersion: v1.0\nrequirements:\n  - class: ShellCommandRequirement\ninputs:\n  indir:\n    type: Directory\n    inputBinding:\n      prefix: cd\n      position: -1\noutputs:\n  outlist:\n    type: File\n    outputBinding:\n      glob: output.txt\narguments: [\n  {shellQuote: false, valueFrom: \"&&\"},\n  \"find\", \".\",\n  {shellQuote: false, valueFrom: \"|\"},\n  \"sort\"]\nstdout: output.txt"
  },
  {
    "path": "sdk/cwl/tests/keep-dir-test-input2.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: CommandLineTool\ncwlVersion: v1.0\nrequirements:\n  - class: ShellCommandRequirement\ninputs:\n  indir:\n    type: Directory\n    inputBinding:\n      prefix: cd\n      position: -1\n    default:\n      class: Directory\n      location: keep:d7514270f356df848477718d58308cc4+94\noutputs:\n  outlist:\n    type: File\n    outputBinding:\n      glob: output.txt\narguments: [\n  {shellQuote: false, valueFrom: \"&&\"},\n  \"find\", \".\",\n  {shellQuote: false, valueFrom: \"|\"},\n  \"sort\"]\nstdout: output.txt"
  },
  {
    "path": "sdk/cwl/tests/keep-dir-test-input3.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: CommandLineTool\ncwlVersion: v1.0\nrequirements:\n  - class: ShellCommandRequirement\ninputs:\n  indir:\n    type: Directory\n    inputBinding:\n      prefix: cd\n      position: -1\n    default:\n      class: Directory\n      location: keep:d7514270f356df848477718d58308cc4+94/\noutputs:\n  outlist:\n    type: File\n    outputBinding:\n      glob: output.txt\narguments: [\n  {shellQuote: false, valueFrom: \"&&\"},\n  \"find\", \".\",\n  {shellQuote: false, valueFrom: \"|\"},\n  \"sort\"]\nstdout: output.txt"
  },
  {
    "path": "sdk/cwl/tests/listing-job.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nd:\n  class: Directory\n  location: tmp1"
  },
  {
    "path": "sdk/cwl/tests/makes_intermediates/echo.cwl",
    "content": "class: CommandLineTool\ncwlVersion: v1.0\nrequirements:\n  InitialWorkDirRequirement:\n    listing:\n      - $(inputs.inp1)\n      - $(inputs.inp2)\n      - $(inputs.inp3)\ninputs:\n  inp1: File\n  inp2: [File, Directory]\n  inp3: Directory\noutputs: []\narguments: [echo, $(inputs.inp1), $(inputs.inp2), $(inputs.inp3)]\n"
  },
  {
    "path": "sdk/cwl/tests/makes_intermediates/hello1.txt",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/makes_intermediates/run_in_single.cwl",
    "content": "cwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  SubworkflowFeatureRequirement: {}\ninputs:\n  inp1:\n    type: File\n    default:\n      class: File\n      location: hello1.txt\n  inp2:\n    type: [File, Directory]\n    default:\n      class: File\n      basename: \"hello2.txt\"\n      contents: \"Hello world\"\n  inp3:\n    type: [File, Directory]\n    default:\n      class: Directory\n      basename: inp3\n      listing:\n        - class: File\n          basename: \"hello3.txt\"\n          contents: \"hello world\"\noutputs: []\nsteps:\n  step1:\n    requirements:\n      arv:RunInSingleContainer: {}\n    in:\n      inp1: inp1\n      inp2: inp2\n      inp3: inp3\n    out: []\n    run: subwf.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/makes_intermediates/subwf.cwl",
    "content": "cwlVersion: v1.0\nclass: Workflow\ninputs:\n  inp1: File\n  inp2: File\n  inp3: Directory\noutputs: []\nsteps:\n  step1:\n    in:\n      inp1: inp1\n      inp2: inp2\n      inp3: inp3\n    out: []\n    run: echo.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/matcher.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport difflib\nimport json\nimport re\n\nclass JsonDiffMatcher(object):\n    \"\"\"Raise AssertionError with a readable JSON diff when not __eq__().\n\n    Used with assert_called_with() so it's possible for a human to see\n    the differences between expected and actual call arguments that\n    include non-trivial data structures.\n    \"\"\"\n    def __init__(self, expected):\n        self.expected = expected\n\n    def __eq__(self, actual):\n        expected_json = json.dumps(self.expected, sort_keys=True, indent=2)\n        actual_json = json.dumps(actual, sort_keys=True, indent=2)\n        if expected_json != actual_json:\n            raise AssertionError(\"\".join(difflib.context_diff(\n                expected_json.splitlines(1),\n                actual_json.splitlines(1),\n                fromfile=\"Expected\", tofile=\"Actual\")))\n        return True\n\n\ndef StripYAMLComments(yml):\n    return re.sub(r'(?ms)^(#.*?\\n)*\\n*', '', yml)\n"
  },
  {
    "path": "sdk/cwl/tests/mock_discovery.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport json\nimport arvados\n\n_rootDesc = None\n\ndef get_rootDesc():\n    global _rootDesc\n    if not _rootDesc:\n        try:\n            _rootDesc = arvados.api('v1')._rootDesc\n        except ValueError:\n            raise Exception(\"Test requires an running API server to fetch discovery document\")\n    return _rootDesc\n"
  },
  {
    "path": "sdk/cwl/tests/noreuse.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\ninputs: []\noutputs: []\nsteps:\n  step1:\n    in:\n      message:\n        default: \"hello world\"\n    out: [output]\n    hints:\n      arv:ReuseRequirement:\n        enableReuse: false\n    run: stdout.cwl"
  },
  {
    "path": "sdk/cwl/tests/octo.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ninp:\n  class: File\n  location: \"octothorpe/item %231.txt\""
  },
  {
    "path": "sdk/cwl/tests/oom/19975-oom-mispelled.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: CommandLineTool\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nhints:\n  arv:OutOfMemoryRetry:\n    # legacy misspelled name, should behave exactly the same\n    memoryRetryMultipler: 2\n  ResourceRequirement:\n    ramMin: 256\n  arv:APIRequirement: {}\ninputs:\n  fakeoom: File\noutputs: []\narguments: [python3, $(inputs.fakeoom)]\n"
  },
  {
    "path": "sdk/cwl/tests/oom/19975-oom.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: CommandLineTool\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nhints:\n  arv:OutOfMemoryRetry:\n    memoryRetryMultiplier: 2\n  ResourceRequirement:\n    ramMin: 256\n  arv:APIRequirement: {}\ninputs:\n  fakeoom: File\noutputs: []\narguments: [python3, $(inputs.fakeoom)]\n"
  },
  {
    "path": "sdk/cwl/tests/oom/19975-oom3.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: CommandLineTool\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nhints:\n  arv:OutOfMemoryRetry:\n    memoryRetryMultiplier: 2\n    memoryErrorRegex: Whoops\n  ResourceRequirement:\n    ramMin: 256\n  arv:APIRequirement: {}\ninputs:\n  fakeoom: File\noutputs: []\narguments: [python3, $(inputs.fakeoom)]\n"
  },
  {
    "path": "sdk/cwl/tests/oom/fakeoom.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport sys\nimport time\nimport arvados\n\napi = arvados.api()\ncurrent_container = api.containers().current().execute()\n\nif current_container[\"runtime_constraints\"][\"ram\"] < (512*1024*1024):\n    sys.exit(137)\n"
  },
  {
    "path": "sdk/cwl/tests/oom/fakeoom.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfakeoom:\n  class: File\n  location: fakeoom.py\n"
  },
  {
    "path": "sdk/cwl/tests/oom/fakeoom2.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport sys\nimport time\nimport arvados\n\napi = arvados.api()\ncurrent_container = api.containers().current().execute()\n\nif current_container[\"runtime_constraints\"][\"ram\"] < (512*1024*1024):\n    raise MemoryError()\n"
  },
  {
    "path": "sdk/cwl/tests/oom/fakeoom2.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfakeoom:\n  class: File\n  location: fakeoom2.py\n"
  },
  {
    "path": "sdk/cwl/tests/oom/fakeoom3.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport sys\nimport time\nimport arvados\n\napi = arvados.api()\ncurrent_container = api.containers().current().execute()\n\nif current_container[\"runtime_constraints\"][\"ram\"] < (512*1024*1024):\n    print(\"Whoops\")\n    sys.exit(1)\n"
  },
  {
    "path": "sdk/cwl/tests/oom/fakeoom3.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfakeoom:\n  class: File\n  location: fakeoom3.py\n"
  },
  {
    "path": "sdk/cwl/tests/order/empty_order.json",
    "content": "{}\n"
  },
  {
    "path": "sdk/cwl/tests/order/inputs_test_order.json",
    "content": "{\n    \"fileInput\": {\n        \"class\": \"File\",\n        \"path\": \"../input/blorp.txt\"\n    },\n    \"boolInput\": true,\n    \"floatInput\": 1.234,\n    \"optionalFloatInput\": null\n}\n"
  },
  {
    "path": "sdk/cwl/tests/scripts/download_all_data.sh",
    "content": "#!/bin/sh\n\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\necho bubble\n"
  },
  {
    "path": "sdk/cwl/tests/secondary/dir/hg19.fa",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/secondary/dir/hg19.fa.amb",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/secondary/dir/hg19.fa.ann",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/secondary/dir/hg19.fa.fai",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/secondary/ls.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  i:\n    type: File\n    inputBinding:\n      position: 1\n    secondaryFiles:\n      - .fai\noutputs: []\narguments: [ls, $(inputs.i), $(inputs.i.path).fai]\n"
  },
  {
    "path": "sdk/cwl/tests/secondary/sub.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\ninputs:\n  i:\n    type: File\n    secondaryFiles:\n      - .fai\noutputs: []\nsteps:\n  step1:\n    in:\n      i: i\n    out: []\n    run: ls.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/secondary/wf-job.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ni:\n  class: File\n  location: keep:f225e6259bdd63bc7240599648dde9f1+97/hg19.fa\n"
  },
  {
    "path": "sdk/cwl/tests/secondary/wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\nrequirements:\n  SubworkflowFeatureRequirement: {}\n  DockerRequirement:\n    dockerPull: arvados/jobs:1.4.0.20190604172024\ninputs:\n  i:\n    type: File\n    # secondaryFiles:\n    #   - .fai\n    #   - .ann\n    #   - .amb\noutputs: []\nsteps:\n  step1:\n    in:\n      i: i\n    out: []\n    run: sub.cwl\n    requirements:\n      arv:RunInSingleContainer: {}\n"
  },
  {
    "path": "sdk/cwl/tests/secondaryFiles/example1.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.0\ninputs:\n  toplevel_input: File\noutputs: []\nsteps:\n  step1:\n    in:\n      step_input: toplevel_input\n    out: []\n    run:\n      id: sub\n      class: CommandLineTool\n      inputs:\n        step_input:\n          type: File\n          secondaryFiles:\n            - .idx\n      outputs: []\n      baseCommand: echo\n"
  },
  {
    "path": "sdk/cwl/tests/secondaryFiles/example3.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: CommandLineTool\ncwlVersion: v1.0\ninputs:\n  step_input:\n    type: File\n    secondaryFiles:\n      - .idx\n    default:\n      class: File\n      location: hello.txt\noutputs: []\nbaseCommand: echo\n"
  },
  {
    "path": "sdk/cwl/tests/secondaryFiles/hello.txt",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/secondaryFiles/hello.txt.idx",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/secondaryFiles/inp3.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ntoplevel_input:\n  class: File\n  location: keep:4d8a70b1e63b2aad6984e40e338e2373+69/hello.txt"
  },
  {
    "path": "sdk/cwl/tests/secret_test_job.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\npw: blorp\n"
  },
  {
    "path": "sdk/cwl/tests/stdout.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\nbaseCommand: echo\nstdout: output.txt\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nhints:\n  arv:RuntimeConstraints:\n    outputDirType: local_output_dir\ninputs:\n  message:\n    type: string\n    inputBinding:\n      position: 1\noutputs:\n  output:\n    type: stdout\n"
  },
  {
    "path": "sdk/cwl/tests/submit_test_job.json",
    "content": "{\n    \"x\": {\n        \"class\": \"File\",\n        \"path\": \"input/blorp.txt\"\n    },\n    \"y\": {\n        \"class\": \"Directory\",\n        \"location\": \"keep:99999999999999999999999999999998+99\",\n        \"listing\": [{\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\"\n        }]\n    },\n    \"z\": {\n        \"class\": \"Directory\",\n        \"basename\": \"anonymous\",\n        \"listing\": [{\n            \"basename\": \"renamed.txt\",\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\"\n        }],\n        \"location\": \"_:df80736f-f14d-4b10-b2e3-03aa27f034bb\"\n    }\n}\n"
  },
  {
    "path": "sdk/cwl/tests/submit_test_job_missing.json",
    "content": "{\n    \"x\": {\n        \"class\": \"File\",\n        \"path\": \"input/blorp.txt\"\n    },\n    \"y\": {\n        \"class\": \"Directory\",\n        \"location\": \"keep:99999999999999999999999999999998+99\",\n        \"listing\": [{\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\"\n        }]\n    }\n}\n"
  },
  {
    "path": "sdk/cwl/tests/submit_test_job_s3.json",
    "content": "{\n    \"x\": {\n        \"class\": \"File\",\n        \"location\": \"s3://examplebucket/blorp.txt\"\n    },\n    \"y\": {\n        \"class\": \"Directory\",\n        \"location\": \"keep:99999999999999999999999999999998+99\",\n        \"listing\": [{\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\"\n        }]\n    },\n    \"z\": {\n        \"class\": \"Directory\",\n        \"basename\": \"anonymous\",\n        \"listing\": [{\n            \"basename\": \"renamed.txt\",\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\"\n        }],\n        \"location\": \"_:df80736f-f14d-4b10-b2e3-03aa27f034bb\"\n    }\n}\n"
  },
  {
    "path": "sdk/cwl/tests/submit_test_job_with_inconsistent_uuids.json",
    "content": "{\n    \"x\": {\n        \"class\": \"File\",\n        \"path\": \"input/blorp.txt\"\n    },\n    \"y\": {\n        \"class\": \"Directory\",\n        \"location\": \"keep:99999999999999999999999999999998+99\",\n        \"http://arvados.org/cwl#collectionUUID\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\",\n        \"listing\": [{\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999997+99/file1.txt\",\n            \"http://arvados.org/cwl#collectionUUID\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\"\n        }]\n    },\n    \"z\": {\n        \"class\": \"Directory\",\n        \"basename\": \"anonymous\",\n        \"listing\": [{\n            \"basename\": \"renamed.txt\",\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\"\n        }]\n    }\n}\n"
  },
  {
    "path": "sdk/cwl/tests/submit_test_job_with_mismatched_uuids.json",
    "content": "{\n    \"x\": {\n        \"class\": \"File\",\n        \"path\": \"input/blorp.txt\"\n    },\n    \"y\": {\n        \"class\": \"Directory\",\n        \"location\": \"keep:99999999999999999999999999999998+99\",\n        \"http://arvados.org/cwl#collectionUUID\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\",\n        \"listing\": [{\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\",\n            \"http://arvados.org/cwl#collectionUUID\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\"\n        }]\n    },\n    \"z\": {\n        \"class\": \"Directory\",\n        \"basename\": \"anonymous\",\n        \"listing\": [{\n            \"basename\": \"renamed.txt\",\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\",\n            \"http://arvados.org/cwl#collectionUUID\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\"\n        }]\n    }\n}\n"
  },
  {
    "path": "sdk/cwl/tests/submit_test_job_with_uuids.json",
    "content": "{\n    \"x\": {\n        \"class\": \"File\",\n        \"path\": \"input/blorp.txt\"\n    },\n    \"y\": {\n        \"class\": \"Directory\",\n        \"location\": \"keep:zzzzz-4zz18-zzzzzzzzzzzzzzz\",\n        \"listing\": [{\n            \"class\": \"File\",\n            \"location\": \"keep:zzzzz-4zz18-zzzzzzzzzzzzzzz/file1.txt\"\n        }]\n    },\n    \"z\": {\n        \"class\": \"Directory\",\n        \"basename\": \"anonymous\",\n        \"listing\": [{\n            \"basename\": \"renamed.txt\",\n            \"class\": \"File\",\n            \"location\": \"keep:zzzzz-4zz18-zzzzzzzzzzzzzzz/file1.txt\"\n        }]\n    }\n}\n"
  },
  {
    "path": "sdk/cwl/tests/test_conformance.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport subprocess\nimport urllib.parse\n\nfrom pathlib import PurePosixPath\n\nimport pytest\n\nfrom . import run_cwltest\n\ndef _ensure_git_clone(git_dir, url, ref):\n    \"\"\"Clone `url` to `git_dir`, check out `ref`, and return `git_dir`\"\"\"\n    subprocess.run(\n        ['git', 'clone', '--quiet', '--no-checkout', url, str(git_dir)],\n        check=True,\n    )\n    subprocess.run(\n        ['git', 'switch', '--quiet', '--detach', ref],\n        cwd=git_dir,\n        check=True,\n    )\n    yield git_dir\n\n\ndef _ensure_git_worktree(git_dir, work_ref):\n    \"\"\"Create a temporary worktree at `git_dir` from `work_ref`\"\"\"\n    subprocess.run(\n        ['git', 'worktree', 'add', '--quiet', str(git_dir), work_ref],\n        check=True,\n    )\n    yield git_dir\n    subprocess.run(\n        ['git', 'worktree', 'remove', '--force', str(git_dir)],\n        check=True,\n    )\n\n\ndef _ensure_git(tmp_path_factory, url, ref, remote_name=None):\n    \"\"\"Create a temporary Git checkout\n\n    If the Git ref `remotes/REMOTE_NAME/ci-build` exists, create a worktree from\n    it and yield that. This is provided by the Arvados CI server and fast.\n    Otherwise, clone `url`, check out `ref`, and return that directory.\n    \"\"\"\n    if remote_name is None:\n        url_path = urllib.parse.urlparse(url).path\n        assert url_path\n        remote_name = PurePosixPath(url_path).stem\n    git_dir = tmp_path_factory.mktemp(f'{remote_name}_{ref.replace(\"/\", \"_\")}_')\n    rev_parse = subprocess.run(\n        ['git', 'rev-parse', '--verify', f'remotes/{remote_name}/ci-build'],\n        capture_output=True,\n        text=True,\n    )\n    if rev_parse.returncode == os.EX_OK:\n        yield from _ensure_git_worktree(git_dir, rev_parse.stdout.rstrip('\\n'))\n    else:\n        yield from _ensure_git_clone(git_dir, url, ref)\n\n\n@pytest.fixture\ndef badges_dir(request, tmp_path):\n    return tmp_path / 'badges'\n\n\n@pytest.fixture(scope='session')\ndef cwl1_0git(tmp_path_factory):\n    yield from _ensure_git(\n        tmp_path_factory,\n        'https://github.com/common-workflow-language/common-workflow-language.git',\n        'tags/v1.0.2',\n        'cwl-v1.0',\n    )\n\n\n@pytest.fixture(scope='session')\ndef cwl1_1git(tmp_path_factory):\n    yield from _ensure_git(\n        tmp_path_factory,\n        'https://github.com/common-workflow-language/cwl-v1.1.git',\n        '3e90671b25f7840ef2926ad2bacbf447772dda94',\n    )\n\n\n@pytest.fixture(scope='session')\ndef cwl1_2git(tmp_path_factory):\n    yield from _ensure_git(\n        tmp_path_factory,\n        'https://github.com/common-workflow-language/cwl-v1.2.git',\n        'tags/v1.2.1',\n    )\n\n\n@pytest.fixture\ndef skipped_tests_for_config(arv_session_config):\n    \"\"\"Return an appropriate `cwltest -S` option for this Arvados configuration\"\"\"\n    testnames = []\n    try:\n        runtime_engine = arv_session_config['Containers']['RuntimeEngine']\n    except (KeyError, TypeError):\n        runtime_engine = None\n    if runtime_engine != 'docker':\n        testnames.append('docker_entrypoint')\n    if testnames:\n        return ['-S', ','.join(testnames)]\n    else:\n        return []\n\n\n@pytest.mark.cwl_conformance\n@pytest.mark.integration\ndef test_conformance_1_0(acr_script, badges_dir, cwl1_0git, jobs_docker_image, skipped_tests_for_config):\n    cwltest = run_cwltest(\n        cwl1_0git / 'v1.0/conformance_test_v1.0.yaml',\n        acr_script,\n        badges_dir,\n        test_args=skipped_tests_for_config,\n    )\n    assert cwltest.returncode == os.EX_OK\n\n\n@pytest.mark.cwl_conformance\n@pytest.mark.integration\ndef test_conformance_1_1(acr_script, badges_dir, cwl1_1git, jobs_docker_image, skipped_tests_for_config):\n    cwltest = run_cwltest(\n        cwl1_1git / 'conformance_tests.yaml',\n        acr_script,\n        badges_dir,\n        test_args=skipped_tests_for_config + ['-N', '199'],\n    )\n    assert cwltest.returncode == os.EX_OK\n\n\n@pytest.mark.cwl_conformance\n@pytest.mark.integration\ndef test_conformance_1_2(acr_script, badges_dir, cwl1_2git, jobs_docker_image, skipped_tests_for_config):\n    cwltest = run_cwltest(\n        cwl1_2git / 'conformance_tests.yaml',\n        acr_script,\n        badges_dir,\n        test_args=skipped_tests_for_config,\n    )\n    assert cwltest.returncode == os.EX_OK\n"
  },
  {
    "path": "sdk/cwl/tests/test_container.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados_cwl\nimport arvados_cwl.context\nimport arvados_cwl.util\nimport copy\nimport arvados.config\nimport logging\nimport unittest\nimport os\nimport functools\nimport threading\nimport cwltool.process\nimport cwltool.secrets\nimport cwltool.load_tool\nfrom cwltool.update import INTERNAL_VERSION\nfrom schema_salad.ref_resolver import Loader\nfrom schema_salad.sourceline import cmap\nimport io\nfrom parameterized import parameterized\n\nfrom unittest import mock\n\nfrom .matcher import JsonDiffMatcher, StripYAMLComments\nfrom .mock_discovery import get_rootDesc\n\nif not os.getenv('ARVADOS_DEBUG'):\n    logging.getLogger('arvados.cwl-runner').setLevel(logging.WARN)\n    logging.getLogger('arvados.arv-run').setLevel(logging.WARN)\n\nclass CollectionMock(object):\n    def __init__(self, vwdmock, *args, **kwargs):\n        self.vwdmock = vwdmock\n        self.count = 0\n\n    def open(self, *args, **kwargs):\n        self.count += 1\n        return self.vwdmock.open(*args, **kwargs)\n\n    def copy(self, *args, **kwargs):\n        self.count += 1\n        self.vwdmock.copy(*args, **kwargs)\n\n    def save_new(self, *args, **kwargs):\n        pass\n\n    def __len__(self):\n        return self.count\n\n    def portable_data_hash(self):\n        if self.count == 0:\n            return arvados.config.EMPTY_BLOCK_LOCATOR\n        else:\n            return \"99999999999999999999999999999996+99\"\n\n\nclass TestContainer(unittest.TestCase):\n\n    def setUp(self):\n        cwltool.process._names = set()\n\n    def tearDown(self):\n        root_logger = logging.getLogger('')\n\n        # Remove existing RuntimeStatusLoggingHandlers if they exist\n        handlers = [h for h in root_logger.handlers if not isinstance(h, arvados_cwl.executor.RuntimeStatusLoggingHandler)]\n        root_logger.handlers = handlers\n\n    def helper(self, runner, enable_reuse=True):\n        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema(INTERNAL_VERSION)\n\n        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,\n                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))\n        fs_access = mock.MagicMock()\n        fs_access.exists.return_value = True\n\n        loadingContext = arvados_cwl.context.ArvLoadingContext(\n            {\"avsc_names\": avsc_names,\n             \"basedir\": \"\",\n             \"make_fs_access\": make_fs_access,\n             \"construct_tool_object\": runner.arv_make_tool,\n             \"fetcher_constructor\": functools.partial(arvados_cwl.CollectionFetcher, api_client=runner.api, fs_access=fs_access),\n             \"loader\": Loader({}),\n             \"metadata\": cmap({\"cwlVersion\": INTERNAL_VERSION, \"http://commonwl.org/cwltool#original_cwlVersion\": \"v1.0\"}),\n             \"default_docker_image\": \"arvados/jobs:\"+arvados_cwl.__version__,\n             })\n        runtimeContext = arvados_cwl.context.ArvRuntimeContext(\n            {\"work_api\": \"containers\",\n             \"basedir\": \"\",\n             \"name\": \"test_run_\"+str(enable_reuse),\n             \"make_fs_access\": make_fs_access,\n             \"tmpdir\": \"/tmp\",\n             \"outdir\": \"/tmp\",\n             \"enable_reuse\": enable_reuse,\n             \"priority\": 500,\n             \"project_uuid\": \"zzzzz-8i9sb-zzzzzzzzzzzzzzz\",\n             \"workflow_eval_lock\": threading.Condition(threading.RLock())\n            })\n\n        if isinstance(runner, mock.MagicMock):\n            def make_tool(toolpath_object, loadingContext):\n                return arvados_cwl.ArvadosCommandTool(runner, toolpath_object, loadingContext)\n            runner.arv_make_tool.side_effect = make_tool\n\n        return loadingContext, runtimeContext\n\n    # Helper function to set up the ArvCwlExecutor to use the containers api\n    # and test that the RuntimeStatusLoggingHandler is set up correctly\n    def setup_and_test_container_executor_and_logging(self, gcc_mock) :\n        api = mock.MagicMock()\n        api._rootDesc = copy.deepcopy(get_rootDesc())\n\n        # Make sure ArvCwlExecutor thinks it's running inside a container so it\n        # adds the logging handler that will call runtime_status_update() mock\n        self.assertFalse(gcc_mock.called)\n        runner = arvados_cwl.ArvCwlExecutor(api)\n        self.assertEqual(runner.work_api, 'containers')\n        root_logger = logging.getLogger('')\n        handlerClasses = [h.__class__ for h in root_logger.handlers]\n        self.assertTrue(arvados_cwl.RuntimeStatusLoggingHandler in handlerClasses)\n        return runner\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @parameterized.expand([\n        (True,),\n        (False,),\n    ])\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_run(self, enable_reuse, keepdocker):\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"baseCommand\": \"ls\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"class\": \"CommandLineTool\",\n            \"cwlVersion\": \"v1.2\"\n        })\n\n        loadingContext, runtimeContext = self.helper(runner, enable_reuse)\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher({\n                    'environment': {\n                        'HOME': '/var/spool/cwl',\n                        'TMPDIR': '/tmp'\n                    },\n                    'name': 'test_run_'+str(enable_reuse),\n                    'runtime_constraints': {\n                        'vcpus': 1,\n                        'ram': 268435456\n                    },\n                    'use_existing': enable_reuse,\n                    'priority': 500,\n                    'mounts': {\n                        '/tmp': {'kind': 'tmp',\n                                 \"capacity\": 1073741824\n                             },\n                        '/var/spool/cwl': {'kind': 'tmp',\n                                           \"capacity\": 1073741824 }\n                    },\n                    'state': 'Committed',\n                    'output_name': 'Output from step test_run_'+str(enable_reuse),\n                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n                    'output_path': '/var/spool/cwl',\n                    'output_ttl': 0,\n                    'container_image': '99999999999999999999999999999993+99',\n                    'command': ['ls', '/var/spool/cwl'],\n                    'cwd': '/var/spool/cwl',\n                    'scheduling_parameters': {},\n                    'properties': {'cwl_input': {}},\n                    'secret_mounts': {},\n                    'output_storage_classes': [\"default\"]\n                }))\n\n    # The test passes some fields in builder.resources\n    # For the remaining fields, the defaults will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_resource_requirements(self, keepdocker):\n        arvados_cwl.add_arv_hints()\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 3600\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"hints\": [{\n                \"class\": \"ResourceRequirement\",\n                \"coresMin\": 3,\n                \"ramMin\": 3000,\n                \"tmpdirMin\": 4000,\n                \"outdirMin\": 5000\n            }, {\n                \"class\": \"http://arvados.org/cwl#RuntimeConstraints\",\n                \"keep_cache\": 512\n            }, {\n                \"class\": \"http://arvados.org/cwl#APIRequirement\",\n            }, {\n                \"class\": \"http://arvados.org/cwl#PartitionRequirement\",\n                \"partition\": \"blurb\"\n            }, {\n                \"class\": \"http://arvados.org/cwl#IntermediateOutput\",\n                \"outputTTL\": 7200\n            }, {\n                \"class\": \"WorkReuse\",\n                \"enableReuse\": False\n            }],\n            \"baseCommand\": \"ls\",\n            \"id\": \"\",\n            \"class\": \"CommandLineTool\",\n            \"cwlVersion\": \"v1.2\"\n        })\n\n        loadingContext, runtimeContext = self.helper(runner)\n        runtimeContext.name = \"test_resource_requirements\"\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n\n        call_args, call_kwargs = runner.api.container_requests().create.call_args\n\n        call_body_expected = {\n            'environment': {\n                'HOME': '/var/spool/cwl',\n                'TMPDIR': '/tmp'\n            },\n            'name': 'test_resource_requirements',\n            'runtime_constraints': {\n                'vcpus': 3,\n                'ram': 3145728000,\n                'keep_cache_ram': 536870912,\n                'API': True\n            },\n            'use_existing': False,\n            'priority': 500,\n            'mounts': {\n                '/tmp': {'kind': 'tmp',\n                         \"capacity\": 4194304000 },\n                '/var/spool/cwl': {'kind': 'tmp',\n                                   \"capacity\": 5242880000 }\n            },\n            'state': 'Committed',\n            'output_name': 'Output from step test_resource_requirements',\n            'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n            'output_path': '/var/spool/cwl',\n            'output_ttl': 7200,\n            'container_image': '99999999999999999999999999999993+99',\n            'command': ['ls'],\n            'cwd': '/var/spool/cwl',\n            'scheduling_parameters': {\n                'partitions': ['blurb']\n            },\n            'properties': {'cwl_input': {}},\n            'secret_mounts': {},\n            'output_storage_classes': [\"default\"]\n        }\n\n        call_body = call_kwargs.get('body', None)\n        self.assertNotEqual(None, call_body)\n        for key in call_body:\n            self.assertEqual(call_body_expected.get(key), call_body.get(key))\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @parameterized.expand([True, False])\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_enable_reuse_expression(self, should_reuse, keepdocker):\n        arvados_cwl.add_arv_hints()\n\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        job_input = {\"shouldEnableReuse\": str(should_reuse)}\n\n        tool = cmap({\n            \"cwlVersion\": \"v1.2\",\n            \"class\": \"CommandLineTool\",\n            \"id\": \"\",\n            \"requirements\": [{\n                    \"class\": \"WorkReuse\",\n                    \"enableReuse\": '$(inputs.shouldEnableReuse === \"True\")'\n                },\n                {\n                    \"class\": \"InlineJavascriptRequirement\"\n                }],\n            \"baseCommand\": \"echo\",\n            \"inputs\": [{\n                \"id\": \"shouldEnableReuse\",\n                \"type\": \"string\",\n                \"inputBinding\": {\"position\": 1}\n            }],\n            \"outputs\": []\n        })\n\n        loadingContext, runtimeContext = self.helper(runner, True)\n        runtimeContext.name = \"test_enable_reuse_expression\"\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n        for j in arvtool.job(job_input, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher({\n                    'environment': {\n                        'HOME': '/var/spool/cwl',\n                        'TMPDIR': '/tmp'\n                    },\n                    'runtime_constraints': {\n                        'vcpus': 1,\n                        'ram': 268435456\n                    },\n                    'name': 'test_enable_reuse_expression',\n                    'use_existing': should_reuse,\n                    'priority': 500,\n                    'mounts': {\n                        '/tmp': {'kind': 'tmp',\n                                 \"capacity\": 1073741824 },\n                        '/var/spool/cwl': {'kind': 'tmp',\n                                           \"capacity\": 1073741824 }\n                    },\n                    'state': 'Committed',\n                    'output_name': 'Output from step test_enable_reuse_expression',\n                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n                    'output_path': '/var/spool/cwl',\n                    'output_ttl': 0,\n                    'container_image': '99999999999999999999999999999993+99',\n                    'command': ['echo', str(should_reuse)],\n                    'cwd': '/var/spool/cwl',\n                    'scheduling_parameters': {},\n                    'properties': {\n                        'cwl_input': {'shouldEnableReuse': str(should_reuse)}\n                    },\n                    'secret_mounts': {},\n                    'output_storage_classes': [\"default\"]\n                }))\n\n    # The test passes some fields in builder.resources\n    # For the remaining fields, the defaults will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    @mock.patch(\"arvados.collection.Collection\")\n    def test_initial_work_dir(self, collection_mock, keepdocker):\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        sourcemock = mock.MagicMock()\n        def get_collection_mock(p):\n            if \"/\" in p:\n                return (sourcemock, p.split(\"/\", 1)[1])\n            else:\n                return (sourcemock, \"\")\n        runner.fs_access.get_collection.side_effect = get_collection_mock\n\n        vwdmock = mock.MagicMock()\n        collection_mock.side_effect = lambda *args, **kwargs: CollectionMock(vwdmock, *args, **kwargs)\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"hints\": [{\n                \"class\": \"InitialWorkDirRequirement\",\n                \"listing\": [{\n                    \"class\": \"File\",\n                    \"basename\": \"foo\",\n                    \"location\": \"keep:99999999999999999999999999999995+99/bar\"\n                },\n                {\n                    \"class\": \"Directory\",\n                    \"basename\": \"foo2\",\n                    \"location\": \"keep:99999999999999999999999999999995+99\"\n                },\n                {\n                    \"class\": \"File\",\n                    \"basename\": \"filename\",\n                    \"location\": \"keep:99999999999999999999999999999995+99/baz/filename\"\n                },\n                {\n                    \"class\": \"Directory\",\n                    \"basename\": \"subdir\",\n                    \"location\": \"keep:99999999999999999999999999999995+99/subdir\"\n                }                        ]\n            }],\n            \"baseCommand\": \"ls\",\n            \"class\": \"CommandLineTool\",\n            \"cwlVersion\": \"v1.2\",\n            \"id\": \"\"\n        })\n\n        loadingContext, runtimeContext = self.helper(runner)\n        runtimeContext.name = \"test_initial_work_dir\"\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n\n        arvtool.formatgraph = None\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n\n        call_args, call_kwargs = runner.api.container_requests().create.call_args\n\n        vwdmock.copy.assert_has_calls([mock.call('bar', 'foo', source_collection=sourcemock)])\n        vwdmock.copy.assert_has_calls([mock.call('.', 'foo2', source_collection=sourcemock)])\n        vwdmock.copy.assert_has_calls([mock.call('baz/filename', 'filename', source_collection=sourcemock)])\n        vwdmock.copy.assert_has_calls([mock.call('subdir', 'subdir', source_collection=sourcemock)])\n\n        call_body_expected = {\n            'environment': {\n                'HOME': '/var/spool/cwl',\n                'TMPDIR': '/tmp'\n            },\n            'name': 'test_initial_work_dir',\n            'runtime_constraints': {\n                'vcpus': 1,\n                'ram': 268435456\n            },\n            'use_existing': True,\n            'priority': 500,\n            'mounts': {\n                '/tmp': {'kind': 'tmp',\n                         \"capacity\": 1073741824 },\n                '/var/spool/cwl': {'kind': 'tmp',\n                                   \"capacity\": 1073741824 },\n                '/var/spool/cwl/foo': {\n                    'kind': 'collection',\n                    'path': 'foo',\n                    'portable_data_hash': '99999999999999999999999999999996+99'\n                },\n                '/var/spool/cwl/foo2': {\n                    'kind': 'collection',\n                    'path': 'foo2',\n                    'portable_data_hash': '99999999999999999999999999999996+99'\n                },\n                '/var/spool/cwl/filename': {\n                    'kind': 'collection',\n                    'path': 'filename',\n                    'portable_data_hash': '99999999999999999999999999999996+99'\n                },\n                '/var/spool/cwl/subdir': {\n                    'kind': 'collection',\n                    'path': 'subdir',\n                    'portable_data_hash': '99999999999999999999999999999996+99'\n                }\n            },\n            'state': 'Committed',\n            'output_name': 'Output from step test_initial_work_dir',\n            'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n            'output_path': '/var/spool/cwl',\n            'output_ttl': 0,\n            'container_image': '99999999999999999999999999999993+99',\n            'command': ['ls'],\n            'cwd': '/var/spool/cwl',\n            'scheduling_parameters': {\n            },\n            'properties': {'cwl_input': {}},\n            'secret_mounts': {},\n            'output_storage_classes': [\"default\"]\n        }\n\n        call_body = call_kwargs.get('body', None)\n        self.assertNotEqual(None, call_body)\n        for key in call_body:\n            self.assertEqual(call_body_expected.get(key), call_body.get(key))\n\n\n    # Test redirecting stdin/stdout/stderr\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_redirects(self, keepdocker):\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema(INTERNAL_VERSION)\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"baseCommand\": \"ls\",\n            \"stdout\": \"stdout.txt\",\n            \"stderr\": \"stderr.txt\",\n            \"stdin\": \"/keep/99999999999999999999999999999996+99/file.txt\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"class\": \"CommandLineTool\",\n            \"cwlVersion\": \"v1.2\"\n        })\n\n        loadingContext, runtimeContext = self.helper(runner)\n        runtimeContext.name = \"test_run_redirect\"\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher({\n                    'environment': {\n                        'HOME': '/var/spool/cwl',\n                        'TMPDIR': '/tmp'\n                    },\n                    'name': 'test_run_redirect',\n                    'runtime_constraints': {\n                        'vcpus': 1,\n                        'ram': 268435456\n                    },\n                    'use_existing': True,\n                    'priority': 500,\n                    'mounts': {\n                        '/tmp': {'kind': 'tmp',\n                                 \"capacity\": 1073741824 },\n                        '/var/spool/cwl': {'kind': 'tmp',\n                                           \"capacity\": 1073741824 },\n                        \"stderr\": {\n                            \"kind\": \"file\",\n                            \"path\": \"/var/spool/cwl/stderr.txt\"\n                        },\n                        \"stdin\": {\n                            \"kind\": \"collection\",\n                            \"path\": \"file.txt\",\n                            \"portable_data_hash\": \"99999999999999999999999999999996+99\"\n                        },\n                        \"stdout\": {\n                            \"kind\": \"file\",\n                            \"path\": \"/var/spool/cwl/stdout.txt\"\n                        },\n                    },\n                    'state': 'Committed',\n                    \"output_name\": \"Output from step test_run_redirect\",\n                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n                    'output_path': '/var/spool/cwl',\n                    'output_ttl': 0,\n                    'container_image': '99999999999999999999999999999993+99',\n                    'command': ['ls', '/var/spool/cwl'],\n                    'cwd': '/var/spool/cwl',\n                    'scheduling_parameters': {},\n                    'properties': {'cwl_input': {}},\n                    'secret_mounts': {},\n                    'output_storage_classes': [\"default\"]\n                }))\n\n    @mock.patch(\"arvados.collection.Collection\")\n    def test_done(self, col):\n        api = mock.MagicMock()\n\n        runner = mock.MagicMock()\n        runner.api = api\n        runner.num_retries = 0\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n\n        runner.api.container_requests().get().execute.return_value = {\"container_uuid\":\"zzzzz-xvhdp-zzzzzzzzzzzzzzz\"}\n\n        runner.api.containers().get().execute.return_value = {\"state\":\"Complete\",\n                                                              \"output\": \"abc+123\",\n                                                              \"exit_code\": 0}\n\n        # Need to noop-out the close method otherwise it gets\n        # discarded when closed and we can't call getvalue() to check\n        # it.\n        class NoopCloseStringIO(io.StringIO):\n            def close(self):\n                pass\n\n        usage_report = NoopCloseStringIO()\n        def colreader_action(name, mode):\n            nonlocal usage_report\n            if name == \"node.json\":\n                return io.StringIO(\"\"\"{\n    \"ProviderType\": \"c5.large\",\n    \"VCPUs\": 2,\n    \"RAM\": 4294967296,\n    \"IncludedScratch\": 8000000000000,\n    \"AddedScratch\": 0,\n    \"Price\": 0.085,\n    \"Preemptible\": false,\n    \"CUDA\": {\n        \"DriverVersion\": \"\",\n        \"HardwareCapability\": \"\",\n        \"DeviceCount\": 0\n    }\n}\"\"\")\n            if name == 'crunchstat.txt':\n                return open(\"tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt\", \"rt\")\n            if name == 'arv-mount.txt':\n                return open(\"tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt\", \"rt\")\n            if name == 'usage_report.html':\n                return usage_report\n            return None\n\n        col().open.side_effect = colreader_action\n        col().__iter__.return_value = ['node.json', 'crunchstat.txt', 'arv-mount.txt']\n\n        loadingContext, runtimeContext = self.helper(runner)\n\n        arvjob = arvados_cwl.ArvadosContainer(runner,\n                                              runtimeContext,\n                                              [],\n                                              mock.MagicMock(),\n                                              {},\n                                              None,\n                                              [],\n                                              [],\n                                              \"testjob\")\n        arvjob.output_callback = mock.MagicMock()\n        arvjob.collect_outputs = mock.MagicMock()\n        arvjob.successCodes = [0]\n        arvjob.outdir = \"/var/spool/cwl\"\n        arvjob.output_ttl = 3600\n        arvjob.uuid = \"zzzzz-xvhdp-zzzzzzzzzzzzzz1\"\n\n        arvjob.collect_outputs.return_value = {\"out\": \"stuff\"}\n\n        arvjob.done({\n            \"state\": \"Final\",\n            \"log_uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz1\",\n            \"output_uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz2\",\n            \"uuid\": \"zzzzz-xvhdp-zzzzzzzzzzzzzzz\",\n            \"container_uuid\": \"zzzzz-8i9sb-zzzzzzzzzzzzzzz\",\n            \"modified_at\": \"2017-05-26T12:01:22Z\",\n            \"properties\": {},\n            \"name\": \"testjob\"\n        })\n\n        self.assertFalse(api.collections().create.called)\n        self.assertFalse(runner.runtime_status_error.called)\n\n        # Assert that something was written to the usage report\n        self.assertTrue(len(usage_report.getvalue()) > 0)\n\n        arvjob.collect_outputs.assert_called_with(\"keep:abc+123\", 0)\n        arvjob.output_callback.assert_called_with({\"out\": \"stuff\"}, \"success\")\n        runner.add_intermediate_output.assert_called_with(\"zzzzz-4zz18-zzzzzzzzzzzzzz2\")\n\n        runner.api.container_requests().update.assert_called_with(uuid=\"zzzzz-xvhdp-zzzzzzzzzzzzzz1\",\n                                                                  body={'container_request': {'properties': {'cwl_output': {'out': 'stuff'}}}})\n\n\n    # Test to make sure we dont call runtime_status_update if we already did\n    # some where higher up in the call stack\n    @mock.patch(\"arvados_cwl.util.get_current_container\")\n    def test_recursive_runtime_status_update(self, gcc_mock):\n        self.setup_and_test_container_executor_and_logging(gcc_mock)\n        root_logger = logging.getLogger('')\n\n        # get_current_container is invoked when we call runtime_status_update\n        # so try and log again!\n        gcc_mock.side_effect = lambda *args: root_logger.error(\"Second Error\")\n        try:\n            root_logger.error(\"First Error\")\n        except RuntimeError:\n            self.fail(\"RuntimeStatusLoggingHandler should not be called recursively\")\n\n\n    # Test to make sure that an exception raised from\n    # get_current_container doesn't cause the logger to raise an\n    # exception\n    @mock.patch(\"arvados_cwl.util.get_current_container\")\n    def test_runtime_status_get_current_container_exception(self, gcc_mock):\n        self.setup_and_test_container_executor_and_logging(gcc_mock)\n        root_logger = logging.getLogger('')\n\n        # get_current_container is invoked when we call\n        # runtime_status_update, it is going to also raise an\n        # exception.\n        gcc_mock.side_effect = Exception(\"Second Error\")\n        try:\n            root_logger.error(\"First Error\")\n        except Exception:\n            self.fail(\"Exception in logger should not propagate\")\n        self.assertTrue(gcc_mock.called)\n\n    @mock.patch(\"arvados_cwl.ArvCwlExecutor.runtime_status_update\")\n    @mock.patch(\"arvados_cwl.util.get_current_container\")\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    @mock.patch(\"arvados.collection.Collection\")\n    def test_child_failure(self, col, reader, gcc_mock, rts_mock):\n        runner = self.setup_and_test_container_executor_and_logging(gcc_mock)\n\n        gcc_mock.return_value = {\"uuid\" : \"zzzzz-dz642-zzzzzzzzzzzzzzz\"}\n        self.assertTrue(gcc_mock.called)\n\n        runner.num_retries = 0\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.label = mock.MagicMock()\n        runner.label.return_value = '[container testjob]'\n\n        runner.api.containers().get().execute.return_value = {\n            \"state\":\"Complete\",\n            \"output\": \"abc+123\",\n            \"exit_code\": 1,\n            \"log\": \"def+234\"\n        }\n\n        col().open.return_value = []\n\n        loadingContext, runtimeContext = self.helper(runner)\n\n        arvjob = arvados_cwl.ArvadosContainer(runner,\n                                              runtimeContext,\n                                              [],\n                                              mock.MagicMock(),\n                                              {},\n                                              None,\n                                              [],\n                                              [],\n                                              \"testjob\")\n        arvjob.output_callback = mock.MagicMock()\n        arvjob.collect_outputs = mock.MagicMock()\n        arvjob.successCodes = [0]\n        arvjob.outdir = \"/var/spool/cwl\"\n        arvjob.output_ttl = 3600\n        arvjob.collect_outputs.return_value = {\"out\": \"stuff\"}\n\n        arvjob.done({\n            \"state\": \"Final\",\n            \"log_uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz1\",\n            \"output_uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz2\",\n            \"uuid\": \"zzzzz-xvhdp-zzzzzzzzzzzzzzz\",\n            \"container_uuid\": \"zzzzz-8i9sb-zzzzzzzzzzzzzzz\",\n            \"modified_at\": \"2017-05-26T12:01:22Z\",\n            \"properties\": {}\n        })\n\n        rts_mock.assert_has_calls([\n            mock.call('error',\n                      'arvados.cwl-runner: [container testjob] (zzzzz-xvhdp-zzzzzzzzzzzzzzz) error log:',\n                      '  ** log is empty **'\n                      ),\n            mock.call('warning',\n                      'arvados.cwl-runner: [container testjob] unable to generate resource usage report'\n        )])\n        arvjob.output_callback.assert_called_with({\"out\": \"stuff\"}, \"permanentFail\")\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_mounts(self, keepdocker):\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999994+99\",\n            \"manifest_text\": \". 99999999999999999999999999999994+99 0:0:file1 0:0:file2\"}\n\n        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema(\"v1.1\")\n\n        tool = cmap({\n            \"inputs\": [\n                {\"id\": \"p1\",\n                 \"type\": \"Directory\"}\n            ],\n            \"outputs\": [],\n            \"baseCommand\": \"ls\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"class\": \"CommandLineTool\",\n            \"cwlVersion\": \"v1.2\"\n        })\n\n        loadingContext, runtimeContext = self.helper(runner)\n        runtimeContext.name = \"test_run_mounts\"\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n        job_order = {\n            \"p1\": {\n                \"class\": \"Directory\",\n                \"location\": \"keep:99999999999999999999999999999994+44\",\n                \"http://arvados.org/cwl#collectionUUID\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\",\n                \"listing\": [\n                    {\n                        \"class\": \"File\",\n                        \"location\": \"keep:99999999999999999999999999999994+44/file1\",\n                    },\n                    {\n                        \"class\": \"File\",\n                        \"location\": \"keep:99999999999999999999999999999994+44/file2\",\n                    }\n                ]\n            }\n        }\n        for j in arvtool.job(job_order, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher({\n                    'environment': {\n                        'HOME': '/var/spool/cwl',\n                        'TMPDIR': '/tmp'\n                    },\n                    'name': 'test_run_mounts',\n                    'runtime_constraints': {\n                        'vcpus': 1,\n                        'ram': 268435456\n                    },\n                    'use_existing': True,\n                    'priority': 500,\n                    'mounts': {\n                        \"/keep/99999999999999999999999999999994+44\": {\n                            \"kind\": \"collection\",\n                            \"portable_data_hash\": \"99999999999999999999999999999994+44\",\n                            \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\"\n                        },\n                        '/tmp': {'kind': 'tmp',\n                                 \"capacity\": 1073741824 },\n                        '/var/spool/cwl': {'kind': 'tmp',\n                                           \"capacity\": 1073741824 }\n                    },\n                    'state': 'Committed',\n                    'output_name': 'Output from step test_run_mounts',\n                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n                    'output_path': '/var/spool/cwl',\n                    'output_ttl': 0,\n                    'container_image': '99999999999999999999999999999994+99',\n                    'command': ['ls', '/var/spool/cwl'],\n                    'cwd': '/var/spool/cwl',\n                    'scheduling_parameters': {},\n                    'properties': {'cwl_input': {\n                        \"p1\": {\n                            \"basename\": \"99999999999999999999999999999994+44\",\n                            \"class\": \"Directory\",\n                            \"dirname\": \"/keep\",\n                            \"http://arvados.org/cwl#collectionUUID\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\",\n                            \"listing\": [\n                                {\n                                    \"basename\": \"file1\",\n                                    \"class\": \"File\",\n                                    \"dirname\": \"/keep/99999999999999999999999999999994+44\",\n                                    \"location\": \"keep:99999999999999999999999999999994+44/file1\",\n                                    \"nameext\": \"\",\n                                    \"nameroot\": \"file1\",\n                                    \"path\": \"/keep/99999999999999999999999999999994+44/file1\",\n                                    \"size\": 0\n                                },\n                                {\n                                    \"basename\": \"file2\",\n                                    \"class\": \"File\",\n                                    \"dirname\": \"/keep/99999999999999999999999999999994+44\",\n                                    \"location\": \"keep:99999999999999999999999999999994+44/file2\",\n                                    \"nameext\": \"\",\n                                    \"nameroot\": \"file2\",\n                                    \"path\": \"/keep/99999999999999999999999999999994+44/file2\",\n                                    \"size\": 0\n                                }\n                            ],\n                            \"location\": \"keep:99999999999999999999999999999994+44\",\n                            \"path\": \"/keep/99999999999999999999999999999994+44\"\n                        }\n                    }},\n                    'secret_mounts': {},\n                    'output_storage_classes': [\"default\"]\n                }))\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_secrets(self, keepdocker):\n        arvados_cwl.add_arv_hints()\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema(\"v1.1\")\n\n        tool = cmap({\"arguments\": [\"md5sum\", \"example.conf\"],\n                     \"class\": \"CommandLineTool\",\n                     \"cwlVersion\": \"v1.2\",\n                     \"hints\": [\n                         {\n                             \"class\": \"http://commonwl.org/cwltool#Secrets\",\n                             \"secrets\": [\n                                 \"#secret_job.cwl/pw\"\n                             ]\n                         }\n                     ],\n                     \"id\": \"\",\n                     \"inputs\": [\n                         {\n                             \"id\": \"#secret_job.cwl/pw\",\n                             \"type\": \"string\"\n                         }\n                     ],\n                     \"outputs\": [\n                     ],\n                     \"requirements\": [\n                         {\n                             \"class\": \"InitialWorkDirRequirement\",\n                             \"listing\": [\n                                 {\n                                     \"entry\": \"username: user\\npassword: $(inputs.pw)\\n\",\n                                     \"entryname\": \"example.conf\"\n                                 }\n                             ]\n                         }\n                     ]})\n\n        loadingContext, runtimeContext = self.helper(runner)\n        runtimeContext.name = \"test_secrets\"\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        job_order = {\"pw\": \"blorp\"}\n        runner.secret_store.store([\"pw\"], job_order)\n\n        for j in arvtool.job(job_order, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher({\n                    'environment': {\n                        'HOME': '/var/spool/cwl',\n                        'TMPDIR': '/tmp'\n                    },\n                    'name': 'test_secrets',\n                    'runtime_constraints': {\n                        'vcpus': 1,\n                        'ram': 268435456\n                    },\n                    'use_existing': True,\n                    'priority': 500,\n                    'mounts': {\n                        '/tmp': {'kind': 'tmp',\n                                 \"capacity\": 1073741824\n                             },\n                        '/var/spool/cwl': {'kind': 'tmp',\n                                           \"capacity\": 1073741824 }\n                    },\n                    'state': 'Committed',\n                    'output_name': 'Output from step test_secrets',\n                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n                    'output_path': '/var/spool/cwl',\n                    'output_ttl': 0,\n                    'container_image': '99999999999999999999999999999993+99',\n                    'command': ['md5sum', 'example.conf'],\n                    'cwd': '/var/spool/cwl',\n                    'scheduling_parameters': {},\n                    'properties': {'cwl_input': job_order},\n                    \"secret_mounts\": {\n                        \"/var/spool/cwl/example.conf\": {\n                            \"content\": \"username: user\\npassword: blorp\\n\",\n                            \"kind\": \"text\"\n                        }\n                    },\n                    'output_storage_classes': [\"default\"]\n                }))\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_timelimit(self, keepdocker):\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"baseCommand\": \"ls\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"cwlVersion\": \"v1.2\",\n            \"class\": \"CommandLineTool\",\n            \"hints\": [\n                {\n                    \"class\": \"ToolTimeLimit\",\n                    \"timelimit\": 42\n                }\n            ]\n        })\n\n        loadingContext, runtimeContext = self.helper(runner)\n        runtimeContext.name = \"test_timelimit\"\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n\n        _, kwargs = runner.api.container_requests().create.call_args\n        self.assertEqual(42, kwargs['body']['scheduling_parameters'].get('max_run_time'))\n\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_setting_storage_class(self, keepdocker):\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"baseCommand\": \"ls\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"cwlVersion\": \"v1.2\",\n            \"class\": \"CommandLineTool\",\n            \"hints\": [\n                {\n                    \"class\": \"http://arvados.org/cwl#OutputStorageClass\",\n                    \"finalStorageClass\": [\"baz_sc\", \"qux_sc\"],\n                    \"intermediateStorageClass\": [\"foo_sc\", \"bar_sc\"]\n                }\n            ]\n        })\n\n        loadingContext, runtimeContext = self.helper(runner, True)\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher({\n                    'environment': {\n                        'HOME': '/var/spool/cwl',\n                        'TMPDIR': '/tmp'\n                    },\n                    'name': 'test_run_True',\n                    'runtime_constraints': {\n                        'vcpus': 1,\n                        'ram': 268435456\n                    },\n                    'use_existing': True,\n                    'priority': 500,\n                    'mounts': {\n                        '/tmp': {'kind': 'tmp',\n                                 \"capacity\": 1073741824\n                             },\n                        '/var/spool/cwl': {'kind': 'tmp',\n                                           \"capacity\": 1073741824 }\n                    },\n                    'state': 'Committed',\n                    'output_name': 'Output from step test_run_True',\n                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n                    'output_path': '/var/spool/cwl',\n                    'output_ttl': 0,\n                    'container_image': '99999999999999999999999999999993+99',\n                    'command': ['ls', '/var/spool/cwl'],\n                    'cwd': '/var/spool/cwl',\n                    'scheduling_parameters': {},\n                    'properties': {'cwl_input': {}},\n                    'secret_mounts': {},\n                    'output_storage_classes': [\"foo_sc\", \"bar_sc\"]\n                }))\n\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_setting_process_properties(self, keepdocker):\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        tool = cmap({\n            \"inputs\": [\n                {\"id\": \"x\", \"type\": \"string\"}],\n            \"outputs\": [],\n            \"baseCommand\": \"ls\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"class\": \"CommandLineTool\",\n            \"cwlVersion\": \"v1.2\",\n            \"hints\": [\n            {\n                \"class\": \"http://arvados.org/cwl#ProcessProperties\",\n                \"processProperties\": [\n                    {\"propertyName\": \"foo\",\n                     \"propertyValue\": \"bar\"},\n                    {\"propertyName\": \"baz\",\n                     \"propertyValue\": \"$(inputs.x)\"},\n                    {\"propertyName\": \"quux\",\n                     \"propertyValue\": {\n                         \"q1\": 1,\n                         \"q2\": 2\n                     }\n                    }\n                ],\n            }\n        ]\n        })\n\n        loadingContext, runtimeContext = self.helper(runner, True)\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        for j in arvtool.job({\"x\": \"blorp\"}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher({\n                    'environment': {\n                        'HOME': '/var/spool/cwl',\n                        'TMPDIR': '/tmp'\n                    },\n                    'name': 'test_run_True',\n                    'runtime_constraints': {\n                        'vcpus': 1,\n                        'ram': 268435456\n                    },\n                    'use_existing': True,\n                    'priority': 500,\n                    'mounts': {\n                        '/tmp': {'kind': 'tmp',\n                                 \"capacity\": 1073741824\n                             },\n                        '/var/spool/cwl': {'kind': 'tmp',\n                                           \"capacity\": 1073741824 }\n                    },\n                    'state': 'Committed',\n                    'output_name': 'Output from step test_run_True',\n                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n                    'output_path': '/var/spool/cwl',\n                    'output_ttl': 0,\n                    'container_image': '99999999999999999999999999999993+99',\n                    'command': ['ls', '/var/spool/cwl'],\n                    'cwd': '/var/spool/cwl',\n                    'scheduling_parameters': {},\n                    'properties': {\n                        \"baz\": \"blorp\",\n                        \"cwl_input\": {\"x\": \"blorp\"},\n                        \"foo\": \"bar\",\n                        \"quux\": {\n                            \"q1\": 1,\n                            \"q2\": 2\n                        }\n                    },\n                    'secret_mounts': {},\n                    'output_storage_classes': [\"default\"]\n                }))\n\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_publish_ports(self, keepdocker):\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20250327\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"baseCommand\": \"ls\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"cwlVersion\": \"v1.2\",\n            \"class\": \"CommandLineTool\",\n            \"hints\": [\n                {\n                    \"class\": \"http://arvados.org/cwl#PublishPorts\",\n                    \"publishPorts\": [{\n                        \"servicePort\": \"80\",\n                        \"serviceAccess\": \"private\",\n                        \"label\": \"Jupyter notebook\",\n                        \"initialPath\": \"/?token=abcdefgh\",\n                    }]\n                }\n            ]\n        })\n\n        loadingContext, runtimeContext = self.helper(runner, True)\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher({\n                    'environment': {\n                        'HOME': '/var/spool/cwl',\n                        'TMPDIR': '/tmp'\n                    },\n                    'name': 'test_run_True',\n                    'runtime_constraints': {\n                        'API': True,\n                        'vcpus': 1,\n                        'ram': 268435456\n                    },\n                    'use_existing': False,\n                    'priority': 500,\n                    'mounts': {\n                        '/tmp': {'kind': 'tmp',\n                                 \"capacity\": 1073741824\n                             },\n                        '/var/spool/cwl': {'kind': 'tmp',\n                                           \"capacity\": 1073741824 }\n                    },\n                    'state': 'Committed',\n                    'output_name': 'Output from step test_run_True',\n                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n                    'output_path': '/var/spool/cwl',\n                    'output_ttl': 0,\n                    'container_image': '99999999999999999999999999999993+99',\n                    'command': ['ls', '/var/spool/cwl'],\n                    'cwd': '/var/spool/cwl',\n                    'scheduling_parameters': {},\n                    'properties': {'cwl_input': {}},\n                    'secret_mounts': {},\n                    'output_storage_classes': [\"default\"],\n                    'published_ports': {\n                        \"80\": {\n                            \"access\": \"private\",\n                            \"initial_path\": \"/?token=abcdefgh\",\n                            \"label\": \"Jupyter notebook\",\n                        }\n                    },\n                    'service': True,\n                }))\n\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @parameterized.expand([\n        # Legacy CUDA API\n        ({\n            \"class\": \"http://commonwl.org/cwltool#CUDARequirement\",\n            \"cudaVersionMin\": \"11.0\",\n            \"cudaComputeCapability\": \"9.0\",\n        }, {\n            'vcpus': 1,\n            'ram': 268435456,\n            'cuda': {\n                'device_count': 1,\n                'driver_version': \"11.0\",\n                'hardware_capability': \"9.0\"\n            }\n        }, \"20210628\"),\n        ({\n            \"class\": \"http://commonwl.org/cwltool#CUDARequirement\",\n            \"cudaVersionMin\": \"11.0\",\n            \"cudaComputeCapability\": \"9.0\",\n            \"cudaDeviceCountMin\": 2\n        }, {\n            'vcpus': 1,\n            'ram': 268435456,\n            'cuda': {\n                'device_count': 2,\n                'driver_version': \"11.0\",\n                'hardware_capability': \"9.0\"\n            }\n        }, \"20210628\"),\n        ({\n            \"class\": \"http://commonwl.org/cwltool#CUDARequirement\",\n            \"cudaVersionMin\": \"11.0\",\n            \"cudaComputeCapability\": [\"4.0\", \"5.0\"],\n            \"cudaDeviceCountMin\": 2\n        }, {\n            'vcpus': 1,\n            'ram': 268435456,\n            'cuda': {\n                'device_count': 2,\n                'driver_version': \"11.0\",\n                'hardware_capability': \"4.0\"\n            }\n        }, \"20210628\"),\n\n        # New GPU API\n        ({\n            \"class\": \"http://commonwl.org/cwltool#CUDARequirement\",\n            \"cudaVersionMin\": \"11.0\",\n            \"cudaComputeCapability\": \"9.0\",\n            \"cudaVram\": 8000,\n        }, {\n            'vcpus': 1,\n            'ram': 268435456,\n            'gpu': {\n                'device_count': 1,\n                'driver_version': \"11.0\",\n                'hardware_target': [\"9.0\"],\n                'stack': \"cuda\",\n                'vram': 8000*1024*1024,\n            }\n        }, \"20250128\"),\n        ({\n            \"class\": \"http://commonwl.org/cwltool#CUDARequirement\",\n            \"cudaVersionMin\": \"11.0\",\n            \"cudaComputeCapability\": \"9.0\",\n            \"cudaDeviceCountMin\": 2,\n            \"cudaVram\": 8000,\n        }, {\n            'vcpus': 1,\n            'ram': 268435456,\n            'gpu': {\n                'device_count': 2,\n                'driver_version': \"11.0\",\n                'hardware_target': [\"9.0\"],\n                'stack': \"cuda\",\n                'vram': 8000*1024*1024,\n            }\n        }, \"20250128\"),\n        ({\n            \"class\": \"http://commonwl.org/cwltool#CUDARequirement\",\n            \"cudaVersionMin\": \"11.0\",\n            \"cudaComputeCapability\": [\"4.0\", \"5.0\"],\n            \"cudaDeviceCountMin\": 2,\n            \"cudaVram\": 8000,\n        }, {\n            'vcpus': 1,\n            'ram': 268435456,\n            'gpu': {\n                'device_count': 2,\n                'driver_version': \"11.0\",\n                'hardware_target': [\"4.0\", \"5.0\"],\n                'stack': \"cuda\",\n                'vram': 8000*1024*1024,\n            }\n        }, \"20250128\"),\n\n        # ROCm\n        ({\n            \"class\": \"http://arvados.org/cwl#ROCmRequirement\",\n            \"rocmDriverVersion\": \"6.2\",\n            \"rocmTarget\": [\"gfx1100\", \"gfx1103\"],\n            \"rocmDeviceCountMin\": 1,\n            \"rocmVram\": 8000,\n        }, {\n            'vcpus': 1,\n            'ram': 268435456,\n            'gpu': {\n                'device_count': 1,\n                'driver_version': \"6.2\",\n                'hardware_target': [\"gfx1100\", \"gfx1103\"],\n                'stack': \"rocm\",\n                'vram': 8000*1024*1024,\n            }\n        }, \"20250128\"),\n\n    ])\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_gpu_requirement(self, test_cwl_req, test_arv_req, apiRevision, keepdocker):\n        arvados_cwl.add_arv_hints()\n\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": apiRevision}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"baseCommand\": \"nvidia-smi\",\n            \"arguments\": [],\n            \"id\": \"\",\n            \"cwlVersion\": \"v1.2\",\n            \"class\": \"CommandLineTool\",\n            \"requirements\": [test_cwl_req]\n        })\n\n        loadingContext, runtimeContext = self.helper(runner, True)\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher({\n                    'environment': {\n                        'HOME': '/var/spool/cwl',\n                        'TMPDIR': '/tmp'\n                    },\n                    'name': 'test_run_True',\n                    'runtime_constraints': test_arv_req,\n                    'use_existing': True,\n                    'priority': 500,\n                    'mounts': {\n                        '/tmp': {'kind': 'tmp',\n                                 \"capacity\": 1073741824\n                             },\n                        '/var/spool/cwl': {'kind': 'tmp',\n                                           \"capacity\": 1073741824 }\n                    },\n                    'state': 'Committed',\n                    'output_name': 'Output from step test_run_True',\n                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n                    'output_path': '/var/spool/cwl',\n                    'output_ttl': 0,\n                    'container_image': '99999999999999999999999999999993+99',\n                    'command': ['nvidia-smi'],\n                    'cwd': '/var/spool/cwl',\n                    'scheduling_parameters': {},\n                    'properties': {'cwl_input': {}},\n                    'secret_mounts': {},\n                    'output_storage_classes': [\"default\"]\n                }))\n\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @mock.patch(\"arvados_cwl.arvdocker.determine_image_id\")\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_match_local_docker(self, keepdocker, determine_image_id):\n        arvados_cwl.add_arv_hints()\n\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz4\", {\"dockerhash\": \"456\"}),\n                                   (\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", {\"dockerhash\": \"123\"})]\n        determine_image_id.side_effect = lambda x: \"123\"\n        def execute(uuid):\n            ex = mock.MagicMock()\n            lookup = {\"zzzzz-4zz18-zzzzzzzzzzzzzz4\": {\"portable_data_hash\": \"99999999999999999999999999999994+99\"},\n                      \"zzzzz-4zz18-zzzzzzzzzzzzzz3\": {\"portable_data_hash\": \"99999999999999999999999999999993+99\"}}\n            ex.execute.return_value = lookup[uuid]\n            return ex\n        runner.api.collections().get.side_effect = execute\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"baseCommand\": \"echo\",\n            \"arguments\": [],\n            \"id\": \"\",\n            \"cwlVersion\": \"v1.0\",\n            \"class\": \"org.w3id.cwl.cwl.CommandLineTool\"\n        })\n\n        loadingContext, runtimeContext = self.helper(runner, True)\n\n        arvtool = arvados_cwl.ArvadosCommandTool(runner, tool, loadingContext)\n        arvtool.formatgraph = None\n\n        container_request = {\n            'environment': {\n                'HOME': '/var/spool/cwl',\n                'TMPDIR': '/tmp'\n            },\n            'name': 'test_run_True',\n            'runtime_constraints': {\n                'vcpus': 1,\n                'ram': 1073741824,\n            },\n            'use_existing': True,\n            'priority': 500,\n            'mounts': {\n                '/tmp': {'kind': 'tmp',\n                         \"capacity\": 1073741824\n                         },\n                '/var/spool/cwl': {'kind': 'tmp',\n                                   \"capacity\": 1073741824 }\n            },\n            'state': 'Committed',\n            'output_name': 'Output from step test_run_True',\n            'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n            'output_path': '/var/spool/cwl',\n            'output_ttl': 0,\n            'container_image': '99999999999999999999999999999994+99',\n            'command': ['echo'],\n            'cwd': '/var/spool/cwl',\n            'scheduling_parameters': {},\n            'properties': {'cwl_input': {}},\n            'secret_mounts': {},\n            'output_storage_classes': [\"default\"]\n        }\n\n        runtimeContext.match_local_docker = False\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher(container_request))\n\n        runtimeContext.cached_docker_lookups.clear()\n        runtimeContext.match_local_docker = True\n        container_request['container_image'] = '99999999999999999999999999999993+99'\n        container_request['name'] = 'test_run_True_2'\n        container_request['output_name'] = 'Output from step test_run_True_2'\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher(container_request))\n\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @parameterized.expand([\n        (\"None, None\",   None,  None,  None),\n        (\"None, True\",   None,  True,  True),\n        (\"None, False\",  None,  False, False),\n        (\"False, None\",  False, None,  False),\n        (\"False, True\",  False, True,  False),\n        (\"False, False\", False, False, False),\n        (\"True, None\",   True,  None,  True),\n        (\"True, True\",   True,  True,  True),\n        (\"True, False\",  True,  False, False),\n    ])\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_run_preemptible_hint(self, _, enable_preemptible, preemptible_hint,\n                                  preemptible_setting, keepdocker):\n        arvados_cwl.add_arv_hints()\n\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        if preemptible_hint is not None:\n            hints = [{\n                \"class\": \"http://arvados.org/cwl#UsePreemptible\",\n                \"usePreemptible\": preemptible_hint\n            }]\n        else:\n            hints = []\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"baseCommand\": \"ls\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"class\": \"CommandLineTool\",\n            \"cwlVersion\": \"v1.2\",\n            \"hints\": hints\n        })\n\n        loadingContext, runtimeContext = self.helper(runner)\n\n        runtimeContext.name = 'test_run_enable_preemptible_'+str(enable_preemptible)+str(preemptible_hint)\n        runtimeContext.enable_preemptible = enable_preemptible\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        # Test the interactions between --enable/disable-preemptible\n        # and UsePreemptible hint\n\n        sched = {}\n        if preemptible_setting is not None:\n            sched['preemptible'] = preemptible_setting\n\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher({\n                    'environment': {\n                        'HOME': '/var/spool/cwl',\n                        'TMPDIR': '/tmp'\n                    },\n                    'name': runtimeContext.name,\n                    'runtime_constraints': {\n                        'vcpus': 1,\n                        'ram': 268435456\n                    },\n                    'use_existing': True,\n                    'priority': 500,\n                    'mounts': {\n                        '/tmp': {'kind': 'tmp',\n                                 \"capacity\": 1073741824\n                             },\n                        '/var/spool/cwl': {'kind': 'tmp',\n                                           \"capacity\": 1073741824 }\n                    },\n                    'state': 'Committed',\n                    'output_name': 'Output from step '+runtimeContext.name,\n                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n                    'output_path': '/var/spool/cwl',\n                    'output_ttl': 0,\n                    'container_image': '99999999999999999999999999999993+99',\n                    'command': ['ls', '/var/spool/cwl'],\n                    'cwd': '/var/spool/cwl',\n                    'scheduling_parameters': sched,\n                    'properties': {'cwl_input': {}},\n                    'secret_mounts': {},\n                    'output_storage_classes': [\"default\"]\n                }))\n\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @parameterized.expand([\n        (\"None, None\", None, None, False),\n        (\"None, True\", None, True, True),\n        (\"None, False\", None, False, False),\n        (\"False, None\", False, None, False),\n        (\"False, True\", False, True, False),  # command line overrides hint\n        (\"False, False\", False, False, False),\n        (\"True, None\", True, None, True),\n        (\"True, True\", True, True, True),\n        (\"True, False\", True, False, False),  # hint overrides command line\n    ])\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_spot_retry(self, _, enable_resubmit_non_preemptible,\n                        preemption_behavior_hint,\n                        expect_resubmit_behavior,\n                        keepdocker):\n        arvados_cwl.add_arv_hints()\n\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n\n        hints = [{\n            \"class\": \"http://arvados.org/cwl#UsePreemptible\",\n            \"usePreemptible\": True\n        }]\n\n        if preemption_behavior_hint is not None:\n            hints.append({\n                \"class\": \"http://arvados.org/cwl#PreemptionBehavior\",\n                \"resubmitNonPreemptible\": preemption_behavior_hint\n            })\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"baseCommand\": \"ls\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"class\": \"CommandLineTool\",\n            \"cwlVersion\": \"v1.2\",\n            \"hints\": hints\n        })\n\n        loadingContext, runtimeContext = self.helper(runner)\n\n        runtimeContext.name = 'test_spot_retry_'+str(enable_resubmit_non_preemptible)+str(preemption_behavior_hint)\n        runtimeContext.enable_resubmit_non_preemptible = enable_resubmit_non_preemptible\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        # Test the interactions between --enable/disable-preemptible\n        # and UsePreemptible hint\n\n        expect_container_request = {\n                    'environment': {\n                        'HOME': '/var/spool/cwl',\n                        'TMPDIR': '/tmp'\n                    },\n                    'name': runtimeContext.name,\n                    'runtime_constraints': {\n                        'vcpus': 1,\n                        'ram': 268435456\n                    },\n                    'use_existing': True,\n                    'priority': 500,\n                    'mounts': {\n                        '/tmp': {'kind': 'tmp',\n                                 \"capacity\": 1073741824\n                             },\n                        '/var/spool/cwl': {'kind': 'tmp',\n                                           \"capacity\": 1073741824 }\n                    },\n                    'state': 'Committed',\n                    'output_name': 'Output from step '+runtimeContext.name,\n                    'owner_uuid': 'zzzzz-8i9sb-zzzzzzzzzzzzzzz',\n                    'output_path': '/var/spool/cwl',\n                    'output_ttl': 0,\n                    'container_image': '99999999999999999999999999999993+99',\n                    'command': ['ls', '/var/spool/cwl'],\n                    'cwd': '/var/spool/cwl',\n                    'scheduling_parameters': {'preemptible': True},\n                    'properties': {'cwl_input': {}},\n                    'secret_mounts': {},\n                    'output_storage_classes': [\"default\"],\n                }\n\n        expect_resubmit_container_request = expect_container_request.copy()\n        expect_resubmit_container_request['scheduling_parameters'] = {'preemptible': False}\n\n        runner.api.container_requests().create().execute.return_value = {\"uuid\": \"zzzzz-xvhdp-zzzzzzzzzzzzzzz\",\n                                                                         \"container_uuid\": \"zzzzz-8i9sb-zzzzzzzzzzzzzzz\"}\n\n        if expect_resubmit_behavior:\n            expect_container_request['container_count_max'] = 1\n\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher(expect_container_request))\n        runner.api.containers().get().execute.return_value = {\n            \"state\":\"Cancelled\",\n            \"output\": \"abc+123\",\n            \"exit_code\": 1,\n            \"log\": \"def+234\",\n            \"runtime_status\": {\n                \"preemptionNotice\": \"bye bye\"\n            }\n        }\n        runner.api.container_requests().create().execute.return_value = {\"uuid\": \"zzzzz-xvhdp-zzzzzzzzzzzzzz2\",\n                                                                         \"container_uuid\": \"zzzzz-8i9sb-zzzzzzzzzzzzzz2\"}\n\n        j.done({\n            \"state\": \"Final\",\n            \"log_uuid\": \"\",\n            \"output_uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz2\",\n            \"uuid\": \"zzzzz-xvhdp-zzzzzzzzzzzzzzz\",\n            \"container_uuid\": \"zzzzz-8i9sb-zzzzzzzzzzzzzzz\",\n            \"modified_at\": \"2017-05-26T12:01:22Z\",\n            \"properties\": {},\n            \"name\": \"testjob\"\n        })\n        if expect_resubmit_behavior:\n            runner.api.container_requests().update.assert_any_call(\n                uuid=\"zzzzz-xvhdp-zzzzzzzzzzzzzzz\", body={\"properties\": {\"arv:failed_container_resubmitted\": \"zzzzz-xvhdp-zzzzzzzzzzzzzz2\"}})\n            runner.api.container_requests().create.assert_called_with(\n                body=JsonDiffMatcher(expect_resubmit_container_request))\n\n    @parameterized.expand([\n        (\"20210628\",),\n        (\"20220510\",),\n    ])\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_output_properties(self, rev, keepdocker):\n        arvados_cwl.add_arv_hints()\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": rev}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        tool = cmap({\n            \"inputs\": [{\n                \"id\": \"inp\",\n                \"type\": \"string\"\n            }],\n            \"outputs\": [],\n            \"baseCommand\": \"ls\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"cwlVersion\": \"v1.2\",\n            \"class\": \"CommandLineTool\",\n            \"hints\": [\n                {\n                    \"class\": \"http://arvados.org/cwl#OutputCollectionProperties\",\n                    \"outputProperties\": {\n                        \"foo\": \"bar\",\n                        \"baz\": \"$(inputs.inp)\"\n                    }\n                }\n            ]\n        })\n\n        loadingContext, runtimeContext = self.helper(runner)\n        runtimeContext.name = \"test_timelimit\"\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        for j in arvtool.job({\"inp\": \"quux\"}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n\n        _, kwargs = runner.api.container_requests().create.call_args\n        if rev == \"20220510\":\n            self.assertEqual({\"foo\": \"bar\", \"baz\": \"quux\"}, kwargs['body'].get('output_properties'))\n        else:\n            self.assertEqual(None, kwargs['body'].get('output_properties'))\n\n    @parameterized.expand([\n        (\"20231117\",),\n        (\"20240502\",),\n    ])\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_output_glob(self, rev, keepdocker):\n        arvados_cwl.add_arv_hints()\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": rev}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        tool = cmap({\n            \"inputs\": [{\n                \"id\": \"inp\",\n                \"type\": \"string\"\n            }],\n            \"outputs\": [\n                {\n                    \"id\": \"o1\",\n                    \"type\": \"File\",\n                    \"outputBinding\": {\n                        \"glob\": \"*.txt\"\n                    }\n                },\n                {\n                    \"id\": \"o2\",\n                    \"type\": \"File\",\n                    \"outputBinding\": {\n                        \"glob\": [\"*.dat\", \"*.bat\"]\n                    }\n                },\n                {\n                    \"id\": \"o3\",\n                    \"type\": {\n                        \"type\": \"record\",\n                        \"fields\": [\n                            {\n                                \"name\": \"f1\",\n                                \"type\": \"File\",\n                                \"outputBinding\": {\n                                    \"glob\": [\"*.cat\"]\n                                }\n                            }\n                        ]\n                    }\n                },\n                {\n                    \"id\": \"o4\",\n                    \"type\": \"File\",\n                    \"outputBinding\": {\n                        \"glob\": \"$(inputs.inp)\"\n                    }\n                },\n                {\n                    \"id\": \"o5\",\n                    \"type\": \"File\",\n                    \"outputBinding\": {\n                        \"glob\": \"*.foo\"\n                    },\n                    \"secondaryFiles\": [\".goo\", \"^.hoo\"]\n                },\n\n            ],\n            \"baseCommand\": \"ls\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"cwlVersion\": \"v1.2\",\n            \"class\": \"CommandLineTool\",\n            \"hints\": [ ]\n        })\n\n        loadingContext, runtimeContext = self.helper(runner)\n        runtimeContext.name = \"test_timelimit\"\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        for j in arvtool.job({\"inp\": \"quux\"}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n\n        _, kwargs = runner.api.container_requests().create.call_args\n        if rev == \"20240502\":\n            self.assertEqual(['*.txt', '*.txt/**',\n                              '*.dat', '*.dat/**',\n                              '*.bat', '*.bat/**',\n                              '*.cat', '*.cat/**',\n                              'quux', 'quux/**',\n                              '*.foo', '*.foo/**',\n                              '*.foo.goo', '*.foo.goo/**',\n                              '*.hoo', '*.hoo/**',\n                              'cwl.output.json',\n                              ], kwargs['body'].get('output_glob'))\n        else:\n            self.assertEqual(None, kwargs['body'].get('output_glob'))\n\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @parameterized.expand([\n        (\"Uncommitted\",),\n        (\"Committed\",),\n        (\"Final\",),\n\n    ])\n    @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n    def test_recheck_on_error(self, get_state, keepdocker):\n        runner = mock.MagicMock()\n        runner.ignore_docker_for_reuse = False\n        runner.intermediate_output_ttl = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n        runner.api._rootDesc = {\"revision\": \"20210628\"}\n        runner.api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        keepdocker.return_value = [(\"zzzzz-4zz18-zzzzzzzzzzzzzz3\", \"\")]\n        runner.api.collections().get().execute.return_value = {\n            \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n\n        tool = cmap({\n            \"inputs\": [],\n            \"outputs\": [],\n            \"baseCommand\": \"ls\",\n            \"arguments\": [{\"valueFrom\": \"$(runtime.outdir)\"}],\n            \"id\": \"\",\n            \"class\": \"CommandLineTool\",\n            \"cwlVersion\": \"v1.2\"\n        })\n\n        loadingContext, runtimeContext = self.helper(runner, False)\n\n        arvtool = cwltool.load_tool.load_tool(tool, loadingContext)\n        arvtool.formatgraph = None\n\n        # Test that if update() raises an exception, we re-check the\n        # container request record to see if we can proceed anyway.\n        runner.api.container_requests().update.side_effect = Exception(\"Invalid state transition\")\n\n        runner.api.container_requests().create().execute.return_value = {\n            'state': 'Uncommitted',\n            'uuid': \"zzzzz-xvhdp-zzzzzzzzzzzzzz1\",\n            \"container_uuid\": \"zzzzz-xvhdp-zzzzzzzzzzzzzzz\",\n        }\n        runner.api.container_requests().get().execute.return_value = {\n            'state': get_state,\n            'uuid': \"zzzzz-xvhdp-zzzzzzzzzzzzzz1\",\n        }\n\n        for j in arvtool.job({}, mock.MagicMock(), runtimeContext):\n            j.run(runtimeContext)\n            runner.api.container_requests().get.assert_called_with(uuid=\"zzzzz-xvhdp-zzzzzzzzzzzzzz1\")\n            assert j.attempt_count == (0 if get_state == \"Uncommitted\" else 1)\n\n\nclass TestWorkflow(unittest.TestCase):\n    def setUp(self):\n        cwltool.process._names = set()\n\n    def helper(self, runner, enable_reuse=True):\n        document_loader, avsc_names, schema_metadata, metaschema_loader = cwltool.process.get_schema(\"v1.0\")\n\n        make_fs_access=functools.partial(arvados_cwl.CollectionFsAccess,\n                                         collection_cache=arvados_cwl.CollectionCache(runner.api, None, 0))\n\n        document_loader.fetcher_constructor = functools.partial(arvados_cwl.CollectionFetcher, api_client=runner.api, fs_access=make_fs_access(\"\"))\n        document_loader.fetcher = document_loader.fetcher_constructor(document_loader.cache, document_loader.session)\n        document_loader.fetch_text = document_loader.fetcher.fetch_text\n        document_loader.check_exists = document_loader.fetcher.check_exists\n\n        loadingContext = arvados_cwl.context.ArvLoadingContext(\n            {\"avsc_names\": avsc_names,\n             \"basedir\": \"\",\n             \"make_fs_access\": make_fs_access,\n             \"loader\": document_loader,\n             \"metadata\": {\"cwlVersion\": INTERNAL_VERSION, \"http://commonwl.org/cwltool#original_cwlVersion\": \"v1.0\"},\n             \"construct_tool_object\": runner.arv_make_tool,\n             \"default_docker_image\": \"arvados/jobs:\"+arvados_cwl.__version__,\n             })\n        runtimeContext = arvados_cwl.context.ArvRuntimeContext(\n            {\"work_api\": \"containers\",\n             \"basedir\": \"\",\n             \"name\": \"test_run_wf_\"+str(enable_reuse),\n             \"make_fs_access\": make_fs_access,\n             \"tmpdir\": \"/tmp\",\n             \"enable_reuse\": enable_reuse,\n             \"priority\": 500})\n\n        return loadingContext, runtimeContext\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    @mock.patch(\"arvados.collection.Collection\")\n    @mock.patch('arvados.commands.keepdocker.list_images_in_arv')\n    def test_run(self, list_images_in_arv, mockcollection, mockcollectionreader):\n        arvados_cwl.add_arv_hints()\n\n        api = mock.MagicMock()\n        api._rootDesc = get_rootDesc()\n        api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        runner = arvados_cwl.executor.ArvCwlExecutor(api)\n        self.assertEqual(runner.work_api, 'containers')\n\n        list_images_in_arv.return_value = [[\"zzzzz-4zz18-zzzzzzzzzzzzzzz\"]]\n        runner.api.collections().get().execute.return_value = {\"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n        runner.api.collections().list().execute.return_value = {\"items\": [{\"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\",\n                                                                           \"portable_data_hash\": \"99999999999999999999999999999993+99\"}]}\n\n        runner.api.containers().current().execute.return_value = {}\n\n        runner.project_uuid = \"zzzzz-8i9sb-zzzzzzzzzzzzzzz\"\n        runner.ignore_docker_for_reuse = False\n        runner.num_retries = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n\n        loadingContext, runtimeContext = self.helper(runner)\n        runner.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)\n\n        mockcollectionreader().exists.return_value = True\n\n        tool, metadata = loadingContext.loader.resolve_ref(\"tests/wf/scatter2.cwl\")\n        metadata[\"cwlVersion\"] = tool[\"cwlVersion\"]\n\n        mockc = mock.MagicMock()\n        mockcollection.side_effect = lambda *args, **kwargs: CollectionMock(mockc, *args, **kwargs)\n        mockcollectionreader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), \"token.txt\")\n\n        arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)\n        arvtool.formatgraph = None\n        it = arvtool.job({}, mock.MagicMock(), runtimeContext)\n\n        next(it).run(runtimeContext)\n        next(it).run(runtimeContext)\n\n        with open(\"tests/wf/scatter2_subwf.cwl\") as f:\n            subwf = StripYAMLComments(f.read()).rstrip()\n\n        runner.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher({\n                \"command\": [\n                    \"cwltool\",\n                    \"--no-container\",\n                    \"--move-outputs\",\n                    \"--preserve-entire-environment\",\n                    \"workflow.cwl\",\n                    \"cwl.input.yml\"\n                ],\n                \"container_image\": \"99999999999999999999999999999993+99\",\n                \"cwd\": \"/var/spool/cwl\",\n                \"environment\": {\n                    \"HOME\": \"/var/spool/cwl\",\n                    \"TMPDIR\": \"/tmp\"\n                },\n                \"mounts\": {\n                    \"/keep/99999999999999999999999999999999+118\": {\n                        \"kind\": \"collection\",\n                        \"portable_data_hash\": \"99999999999999999999999999999999+118\"\n                    },\n                    \"/tmp\": {\n                        \"capacity\": 1073741824,\n                        \"kind\": \"tmp\"\n                    },\n                    \"/var/spool/cwl\": {\n                        \"capacity\": 1073741824,\n                        \"kind\": \"tmp\"\n                    },\n                    \"/var/spool/cwl/cwl.input.yml\": {\n                        \"kind\": \"collection\",\n                        \"path\": \"cwl.input.yml\",\n                        \"portable_data_hash\": \"99999999999999999999999999999996+99\"\n                    },\n                    \"/var/spool/cwl/workflow.cwl\": {\n                        \"kind\": \"collection\",\n                        \"path\": \"workflow.cwl\",\n                        \"portable_data_hash\": \"99999999999999999999999999999996+99\"\n                    },\n                    \"stdout\": {\n                        \"kind\": \"file\",\n                        \"path\": \"/var/spool/cwl/cwl.output.json\"\n                    }\n                },\n                \"name\": \"scatterstep\",\n                \"output_name\": \"Output from step scatterstep\",\n                \"output_path\": \"/var/spool/cwl\",\n                \"output_ttl\": 0,\n                \"priority\": 500,\n                \"properties\": {'cwl_input': {\n                        \"fileblub\": {\n                            \"basename\": \"token.txt\",\n                            \"class\": \"File\",\n                            \"dirname\": \"/keep/99999999999999999999999999999999+118\",\n                            \"location\": \"keep:99999999999999999999999999999999+118/token.txt\",\n                            \"nameext\": \".txt\",\n                            \"nameroot\": \"token\",\n                            \"path\": \"/keep/99999999999999999999999999999999+118/token.txt\",\n                            \"size\": 0\n                        },\n                        \"sleeptime\": 5\n                }},\n                \"runtime_constraints\": {\n                    \"ram\": 1073741824,\n                    \"vcpus\": 1\n                },\n                \"scheduling_parameters\": {},\n                \"secret_mounts\": {},\n                \"state\": \"Committed\",\n                \"use_existing\": True,\n                'output_storage_classes': [\"default\"]\n            }))\n        mockc.open().__enter__().write.assert_has_calls([mock.call(subwf)])\n        mockc.open().__enter__().write.assert_has_calls([mock.call(\n'''{\n  \"fileblub\": {\n    \"basename\": \"token.txt\",\n    \"class\": \"File\",\n    \"location\": \"/keep/99999999999999999999999999999999+118/token.txt\",\n    \"size\": 0\n  },\n  \"sleeptime\": 5\n}''')])\n\n    # The test passes no builder.resources\n    # Hence the default resources will apply: {'cores': 1, 'ram': 1024, 'outdirSize': 1024, 'tmpdirSize': 1024}\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    @mock.patch(\"arvados.collection.Collection\")\n    @mock.patch('arvados.commands.keepdocker.list_images_in_arv')\n    def test_overall_resource_singlecontainer(self, list_images_in_arv, mockcollection, mockcollectionreader):\n        arvados_cwl.add_arv_hints()\n\n        api = mock.MagicMock()\n        api._rootDesc = get_rootDesc()\n        api.config.return_value = {\"Containers\": {\"DefaultKeepCacheRAM\": 256<<20}}\n\n        runner = arvados_cwl.executor.ArvCwlExecutor(api)\n        self.assertEqual(runner.work_api, 'containers')\n\n        list_images_in_arv.return_value = [[\"zzzzz-4zz18-zzzzzzzzzzzzzzz\"]]\n        runner.api.collections().get().execute.return_value = {\"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\",\n                                                               \"portable_data_hash\": \"99999999999999999999999999999993+99\"}\n        runner.api.collections().list().execute.return_value = {\"items\": [{\"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\",\n                                                                           \"portable_data_hash\": \"99999999999999999999999999999993+99\"}]}\n\n        runner.project_uuid = \"zzzzz-8i9sb-zzzzzzzzzzzzzzz\"\n        runner.ignore_docker_for_reuse = False\n        runner.num_retries = 0\n        runner.secret_store = cwltool.secrets.SecretStore()\n\n        loadingContext, runtimeContext = self.helper(runner)\n        runner.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)\n        loadingContext.do_update = True\n        tool, metadata = loadingContext.loader.resolve_ref(\"tests/wf/echo-wf.cwl\")\n\n        mockcollection.side_effect = lambda *args, **kwargs: CollectionMock(mock.MagicMock(), *args, **kwargs)\n\n        arvtool = arvados_cwl.ArvadosWorkflow(runner, tool, loadingContext)\n        arvtool.formatgraph = None\n        it = arvtool.job({}, mock.MagicMock(), runtimeContext)\n\n        next(it).run(runtimeContext)\n        next(it).run(runtimeContext)\n\n        with open(\"tests/wf/echo-subwf.cwl\") as f:\n            subwf = StripYAMLComments(f.read())\n\n        runner.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher({\n                'output_ttl': 0,\n                'environment': {'HOME': '/var/spool/cwl', 'TMPDIR': '/tmp'},\n                'scheduling_parameters': {},\n                'name': u'echo-subwf',\n                'secret_mounts': {},\n                'runtime_constraints': {'API': True, 'vcpus': 3, 'ram': 1073741824},\n                'properties': {'cwl_input': {}},\n                'priority': 500,\n                'mounts': {\n                    '/var/spool/cwl/cwl.input.yml': {\n                        'portable_data_hash': '99999999999999999999999999999996+99',\n                        'kind': 'collection',\n                        'path': 'cwl.input.yml'\n                    },\n                    '/var/spool/cwl/workflow.cwl': {\n                        'portable_data_hash': '99999999999999999999999999999996+99',\n                        'kind': 'collection',\n                        'path': 'workflow.cwl'\n                    },\n                    'stdout': {\n                        'path': '/var/spool/cwl/cwl.output.json',\n                        'kind': 'file'\n                    },\n                    '/tmp': {\n                        'kind': 'tmp',\n                        'capacity': 1073741824\n                    }, '/var/spool/cwl': {\n                        'kind': 'tmp',\n                        'capacity': 3221225472\n                    }\n                },\n                'state': 'Committed',\n                'output_path': '/var/spool/cwl',\n                'container_image': '99999999999999999999999999999993+99',\n                'command': [\n                    u'cwltool',\n                    u'--no-container',\n                    u'--move-outputs',\n                    u'--preserve-entire-environment',\n                    u'workflow.cwl',\n                    u'cwl.input.yml'\n                ],\n                'use_existing': True,\n                'output_name': u'Output from step echo-subwf',\n                'cwd': '/var/spool/cwl',\n                'output_storage_classes': [\"default\"]\n            }))\n\n    def test_default_work_api(self):\n        arvados_cwl.add_arv_hints()\n\n        api = mock.MagicMock()\n        api._rootDesc = copy.deepcopy(get_rootDesc())\n        runner = arvados_cwl.executor.ArvCwlExecutor(api)\n        self.assertEqual(runner.work_api, 'containers')\n"
  },
  {
    "path": "sdk/cwl/tests/test_copy_deps.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport subprocess\n\nimport pytest\n\nfrom . import TESTS_DIR, run_cwltest\nfrom arvados.collection import Collection\n\nWORKFLOW_PATH = TESTS_DIR / '19070-copy-deps.cwl'\nEXPECTED_WORKFLOW = WORKFLOW_PATH.read_bytes()\n\n@pytest.fixture\ndef cmd_19070(acr_script, tmp_project, jobs_docker_image):\n    return [\n        str(acr_script), '--disable-git',\n        '--project-uuid', tmp_project['uuid'],\n        '--submit-runner-image', jobs_docker_image,\n        str(WORKFLOW_PATH),\n    ]\n\n\ndef check_core_contents(arv, group, wf_uuid):\n    \"\"\"Assert `group` contains `wf_uuid` and the workflow collection\"\"\"\n    contents = arv.groups().contents(uuid=group['uuid']).execute()\n    matches = [item for item in contents['items'] if item['uuid'] == wf_uuid]\n    assert len(matches) == 1\n    for item in contents['items']:\n        try:\n            coll = Collection(item['portable_data_hash'])\n        except KeyError:\n            continue\n        try:\n            with coll.open(WORKFLOW_PATH.name, 'rb') as cwl_file:\n                workflow_content =  cwl_file.read()\n        except FileNotFoundError:\n            continue\n        if workflow_content == EXPECTED_WORKFLOW:\n            break\n    else:\n        assert False, \"workflow collection not found\"\n    return contents\n\n\ndef check_dep_contents(arv_or_contents, group):\n    \"\"\"Assert `group` contains the `testdir` collection and arvados/jobs image\"\"\"\n    try:\n        items = arv_or_contents['items']\n    except TypeError:\n        contents = arv_or_contents.groups().contents(uuid=group['uuid']).execute()\n        items = contents['items']\n    assert any(\n        c['kind'] == 'arvados#collection'\n        and c['portable_data_hash'] == 'd7514270f356df848477718d58308cc4+94'\n        for c in items\n    ), f\"couldn't find collection depedency in group {group['uuid']}\"\n    assert any(\n        c['kind'] == 'arvados#collection'\n        and c['name'].startswith('Docker image arvados jobs')\n        for c in items\n    ), f\"couldn't find jobs image in group {group['uuid']}\"\n\n\ndef check_all_contents(arv, group, wf_uuid):\n    \"\"\"Assert `group` contains both the workflow and its dependencies\"\"\"\n    contents = check_core_contents(arv, group, wf_uuid)\n    check_dep_contents(contents, group)\n\n\n@pytest.mark.integration\ndef test_create(arv_session, cmd_19070, tmp_project, integration_colls):\n    # Create workflow, by default should also copy dependencies\n    cmd_19070.insert(1, '--create-workflow')\n    acr_proc = subprocess.run(cmd_19070, capture_output=True, text=True)\n    assert acr_proc.returncode == os.EX_OK\n    check_all_contents(arv_session, tmp_project, acr_proc.stdout.rstrip('\\n'))\n\n\n@pytest.mark.integration\ndef test_update(arv_session, cmd_19070, tmp_project, integration_colls):\n    # Create workflow, but with --no-copy-deps it shouldn't copy anything\n    acr = cmd_19070.pop(0)\n    create_cmd = [acr, '--create-workflow', '--no-copy-deps'] + cmd_19070\n    create_proc = subprocess.run(create_cmd, capture_output=True, text=True)\n    assert create_proc.returncode == os.EX_OK\n    wf_uuid = create_proc.stdout.rstrip('\\n')\n    contents = check_core_contents(arv_session, tmp_project, wf_uuid)\n    with pytest.raises(AssertionError):\n        check_dep_contents(contents, tmp_project)\n\n    update_cmd = [acr, '--update-workflow', wf_uuid] + cmd_19070\n    update_proc = subprocess.run(update_cmd, capture_output=True, text=True)\n    assert update_proc.returncode == os.EX_OK\n    check_all_contents(arv_session, tmp_project, wf_uuid)\n\n\n@pytest.mark.integration\ndef test_execute_without_deps(arv_session, cmd_19070, tmp_project, integration_colls):\n    run_proc = subprocess.run(cmd_19070)\n    assert run_proc.returncode == os.EX_OK\n    contents = arv_session.groups().contents(uuid=tmp_project['uuid']).execute()\n    # container request+log+container log+step output+final output == 5 items\n    assert len(contents['items']) == 5\n    assert not any(item['kind'] == 'arvados#workflow' for item in contents['items'])\n\n\n@pytest.mark.integration\ndef test_execute_with_deps(arv_session, cmd_19070, tmp_project, integration_colls):\n    cmd_19070.insert(1, '--copy-deps')\n    run_proc = subprocess.run(cmd_19070)\n    assert run_proc.returncode == os.EX_OK\n    check_dep_contents(arv_session, tmp_project)\n"
  },
  {
    "path": "sdk/cwl/tests/test_fsaccess.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\nimport sys\nimport unittest\nimport json\nimport logging\nimport os\n\nfrom unittest import mock\n\nimport arvados\nimport arvados.keep\nimport arvados.collection\nimport arvados_cwl\n\nfrom cwltool.pathmapper import MapperEnt\nfrom .mock_discovery import get_rootDesc\n\nfrom arvados_cwl.fsaccess import CollectionCache\n\nclass TestFsAccess(unittest.TestCase):\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_collection_cache(self, cr):\n        cache = CollectionCache(mock.MagicMock(), mock.MagicMock(), 4)\n        c1 = cache.get(\"99999999999999999999999999999991+99\")\n        c2 = cache.get(\"99999999999999999999999999999991+99\")\n        self.assertIs(c1, c2)\n        self.assertEqual(1, cr.call_count)\n        c3 = cache.get(\"99999999999999999999999999999992+99\")\n        self.assertEqual(2, cr.call_count)\n\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_collection_cache_limit(self, cr):\n        cache = CollectionCache(mock.MagicMock(), mock.MagicMock(), 4)\n        cr().manifest_text.return_value = 'x' * 524289\n        self.assertEqual(0, cache.total)\n        c1 = cache.get(\"99999999999999999999999999999991+524289\")\n        self.assertIn(\"99999999999999999999999999999991+524289\", cache.collections)\n        self.assertNotIn(\"99999999999999999999999999999992+524289\", cache.collections)\n        self.assertEqual((524289*128)*1, cache.total)\n\n        c2 = cache.get(\"99999999999999999999999999999992+524289\")\n        self.assertIn(\"99999999999999999999999999999991+524289\", cache.collections)\n        self.assertIn(\"99999999999999999999999999999992+524289\", cache.collections)\n        self.assertEqual((524289*128)*2, cache.total)\n\n        c1 = cache.get(\"99999999999999999999999999999991+524289\")\n        self.assertIn(\"99999999999999999999999999999991+524289\", cache.collections)\n        self.assertIn(\"99999999999999999999999999999992+524289\", cache.collections)\n        self.assertEqual((524289*128)*2, cache.total)\n\n        c3 = cache.get(\"99999999999999999999999999999993+524289\")\n        self.assertIn(\"99999999999999999999999999999991+524289\", cache.collections)\n        self.assertIn(\"99999999999999999999999999999992+524289\", cache.collections)\n        self.assertEqual((524289*128)*3, cache.total)\n\n        c4 = cache.get(\"99999999999999999999999999999994+524289\")\n        self.assertIn(\"99999999999999999999999999999991+524289\", cache.collections)\n        self.assertNotIn(\"99999999999999999999999999999992+524289\", cache.collections)\n        self.assertEqual((524289*128)*3, cache.total)\n\n        c5 = cache.get(\"99999999999999999999999999999995+524289\")\n        self.assertNotIn(\"99999999999999999999999999999991+524289\", cache.collections)\n        self.assertNotIn(\"99999999999999999999999999999992+524289\", cache.collections)\n        self.assertEqual((524289*128)*3, cache.total)\n\n\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_collection_cache_limit2(self, cr):\n        cache = CollectionCache(mock.MagicMock(), mock.MagicMock(), 4)\n        cr().manifest_text.return_value = 'x' * 524287\n        self.assertEqual(0, cache.total)\n        c1 = cache.get(\"99999999999999999999999999999991+524287\")\n        self.assertIn(\"99999999999999999999999999999991+524287\", cache.collections)\n        self.assertNotIn(\"99999999999999999999999999999992+524287\", cache.collections)\n        self.assertEqual((524287*128)*1, cache.total)\n\n        c2 = cache.get(\"99999999999999999999999999999992+524287\")\n        self.assertIn(\"99999999999999999999999999999991+524287\", cache.collections)\n        self.assertIn(\"99999999999999999999999999999992+524287\", cache.collections)\n        self.assertEqual((524287*128)*2, cache.total)\n\n        c1 = cache.get(\"99999999999999999999999999999991+524287\")\n        self.assertIn(\"99999999999999999999999999999991+524287\", cache.collections)\n        self.assertIn(\"99999999999999999999999999999992+524287\", cache.collections)\n        self.assertEqual((524287*128)*2, cache.total)\n\n        c3 = cache.get(\"99999999999999999999999999999993+524287\")\n        self.assertIn(\"99999999999999999999999999999991+524287\", cache.collections)\n        self.assertIn(\"99999999999999999999999999999992+524287\", cache.collections)\n        self.assertEqual((524287*128)*3, cache.total)\n\n        c4 = cache.get(\"99999999999999999999999999999994+524287\")\n        self.assertIn(\"99999999999999999999999999999991+524287\", cache.collections)\n        self.assertIn(\"99999999999999999999999999999992+524287\", cache.collections)\n        self.assertEqual((524287*128)*4, cache.total)\n\n        c5 = cache.get(\"99999999999999999999999999999995+524287\")\n        self.assertIn(\"99999999999999999999999999999991+524287\", cache.collections)\n        self.assertNotIn(\"99999999999999999999999999999992+524287\", cache.collections)\n        self.assertEqual((524287*128)*4, cache.total)\n\n        c6 = cache.get(\"99999999999999999999999999999996+524287\")\n        self.assertNotIn(\"99999999999999999999999999999991+524287\", cache.collections)\n        self.assertNotIn(\"99999999999999999999999999999992+524287\", cache.collections)\n        self.assertEqual((524287*128)*4, cache.total)\n"
  },
  {
    "path": "sdk/cwl/tests/test_integration.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport subprocess\n\nimport pytest\n\nfrom . import TESTS_DIR, run_cwltest\n\n@pytest.mark.integration\ndef test_arvados_cwltest(acr_script, integration_colls):\n    cwltest = run_cwltest(\n        TESTS_DIR / 'arvados-tests.yml',\n        acr_script,\n    )\n    assert cwltest.returncode == os.EX_OK\n\n\n@pytest.mark.integration\ndef test_set_properties_17004(arv_session, acr_script, jobs_docker_image, tmp_project):\n    inp_path = TESTS_DIR / 'scripts/download_all_data.sh'\n    acr_proc = subprocess.run([\n        str(acr_script),\n        '--project-uuid', tmp_project['uuid'],\n        '--submit-runner-image', jobs_docker_image,\n        str(TESTS_DIR / '17004-output-props.cwl'),\n        '--inp', str(inp_path),\n    ])\n    assert acr_proc.returncode == os.EX_OK\n    contents = arv_session.groups().contents(uuid=tmp_project['uuid']).execute()\n    for item in contents['items']:\n        if (\n                item['kind'] == 'arvados#collection'\n                and item['properties'].get('type') == 'output'\n                and item['properties'].get('foo') == 'bar'\n                and item['properties'].get('baz') == inp_path.name\n        ):\n            break\n    else:\n        assert False, \"did not find collection with output properties\"\n\n\n@pytest.mark.integration\ndef test_fix_workflow_18888(acr_script, jobs_docker_image):\n    # This is a standalone test because the bug was observed with this\n    # command line and was thought to be due to command line handling.\n    acr_proc = subprocess.run([\n        str(acr_script),\n        '--submit-runner-image', jobs_docker_image,\n        '18888-download_def.cwl',\n        '--scripts', 'scripts/',\n    ], cwd=TESTS_DIR)\n    assert acr_proc.returncode == os.EX_OK\n"
  },
  {
    "path": "sdk/cwl/tests/test_make_output.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\nimport json\nimport logging\nimport os\nimport io\nimport unittest\n\nfrom unittest import mock\n\nimport arvados\nimport arvados_cwl\nimport arvados_cwl.executor\nfrom .mock_discovery import get_rootDesc\n\nclass TestMakeOutput(unittest.TestCase):\n    def setUp(self):\n        self.api = mock.MagicMock()\n        self.api._rootDesc = get_rootDesc()\n\n    def tearDown(self):\n        root_logger = logging.getLogger('')\n\n        # Remove existing RuntimeStatusLoggingHandlers if they exist\n        handlers = [h for h in root_logger.handlers if not isinstance(h, arvados_cwl.executor.RuntimeStatusLoggingHandler)]\n        root_logger.handlers = handlers\n\n    @mock.patch(\"arvados.collection.Collection\")\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_make_output_collection(self, reader, col):\n        keep_client = mock.MagicMock()\n        runner = arvados_cwl.executor.ArvCwlExecutor(self.api, keep_client=keep_client)\n        runner.project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n\n        final = mock.MagicMock()\n        col.return_value = final\n        readermock = mock.MagicMock()\n        reader.return_value = readermock\n\n        final_uuid = final.manifest_locator()\n        num_retries = runner.num_retries\n\n        cwlout = io.StringIO()\n        openmock = mock.MagicMock()\n        final.open.return_value = openmock\n        openmock.__enter__.return_value = cwlout\n\n        _, runner.final_output_collection = runner.make_output_collection(\"Test output\", [\"foo\"], \"tag0,tag1,tag2\", {}, {\n            \"foo\": {\n                \"class\": \"File\",\n                \"location\": \"keep:99999999999999999999999999999991+99/foo.txt\",\n                \"size\": 3,\n                \"basename\": \"foo.txt\"\n            },\n            \"bar\": {\n                \"class\": \"File\",\n                \"location\": \"keep:99999999999999999999999999999992+99/bar.txt\",\n                \"basename\": \"baz.txt\",\n                \"size\": 4\n            }\n        })\n\n        final.copy.assert_has_calls([mock.call('bar.txt', 'baz.txt', overwrite=False, source_collection=readermock)])\n        final.copy.assert_has_calls([mock.call('foo.txt', 'foo.txt', overwrite=False, source_collection=readermock)])\n        final.save_new.assert_has_calls([mock.call(ensure_unique_name=True, name='Test output', owner_uuid='zzzzz-j7d0g-zzzzzzzzzzzzzzz', properties={}, storage_classes=['foo'])])\n        self.assertEqual(\"\"\"{\n    \"bar\": {\n        \"basename\": \"baz.txt\",\n        \"class\": \"File\",\n        \"location\": \"baz.txt\",\n        \"size\": 4\n    },\n    \"foo\": {\n        \"basename\": \"foo.txt\",\n        \"class\": \"File\",\n        \"location\": \"foo.txt\",\n        \"size\": 3\n    }\n}\"\"\", cwlout.getvalue())\n\n        self.assertIs(final, runner.final_output_collection)\n        self.assertIs(final_uuid, runner.final_output_collection.manifest_locator())\n        self.api.links().create.assert_has_calls([mock.call(body={\"head_uuid\": final_uuid, \"link_class\": \"tag\", \"name\": \"tag0\"}), mock.call().execute(num_retries=num_retries)])\n        self.api.links().create.assert_has_calls([mock.call(body={\"head_uuid\": final_uuid, \"link_class\": \"tag\", \"name\": \"tag1\"}), mock.call().execute(num_retries=num_retries)])\n        self.api.links().create.assert_has_calls([mock.call(body={\"head_uuid\": final_uuid, \"link_class\": \"tag\", \"name\": \"tag2\"}), mock.call().execute(num_retries=num_retries)])\n\n    @mock.patch(\"arvados.collection.Collection\")\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_make_output_for_multiple_file_targets(self, reader, col):\n        keep_client = mock.MagicMock()\n        runner = arvados_cwl.executor.ArvCwlExecutor(self.api, keep_client=keep_client)\n        runner.project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n\n        final = mock.MagicMock()\n        col.return_value = final\n        readermock = mock.MagicMock()\n        reader.return_value = readermock\n\n        # This output describes a single file listed in 2 different directories\n        _, runner.final_output_collection = runner.make_output_collection(\"Test output\", [\"foo\"], \"\", {}, { 'out': [\n        {\n            'basename': 'testdir1',\n            'listing': [\n                {\n                    'basename': 'test.txt',\n                    'nameroot': 'test',\n                    'nameext': '.txt',\n                    'location': 'keep:99999999999999999999999999999991+99/test.txt',\n                    'class': 'File',\n                    'size': 16\n                }\n            ],\n            'location': '_:99999999999999999999999999999992+99',\n            'class': 'Directory'\n        },\n        {\n            'basename': 'testdir2',\n            'listing': [\n                {\n                    'basename': 'test.txt',\n                    'nameroot': 'test',\n                    'nameext': '.txt',\n                    'location': 'keep:99999999999999999999999999999991+99/test.txt',\n                    'class':\n                    'File',\n                    'size': 16\n                }\n            ],\n            'location': '_:99999999999999999999999999999993+99',\n            'class': 'Directory'\n        }]})\n\n        # Check that copy is called on the collection for both locations\n        final.copy.assert_any_call(\"test.txt\", \"testdir1/test.txt\", source_collection=mock.ANY, overwrite=mock.ANY)\n        final.copy.assert_any_call(\"test.txt\", \"testdir2/test.txt\", source_collection=mock.ANY, overwrite=mock.ANY)\n\n    @mock.patch(\"arvados.collection.Collection\")\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_make_output_for_literal_name_conflicts(self, reader, col):\n        keep_client = mock.MagicMock()\n        runner = arvados_cwl.executor.ArvCwlExecutor(self.api, keep_client=keep_client)\n        runner.project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n\n        final = mock.MagicMock()\n        col.return_value = final\n        readermock = mock.MagicMock()\n        reader.return_value = readermock\n\n        # This output describes two literals with the same basename\n        _, runner.final_output_collection = runner.make_output_collection(\"Test output\", [\"foo\"], \"\",  {}, [\n        {\n            'lit':\n            {\n                'basename': 'a_file',\n                'nameext': '',\n                'nameroot': 'a_file',\n                'location': '_:f168fc0c-4291-40aa-a04e-366d57390560',\n                'class': 'File',\n                'contents': 'Hello file literal.'\n            }\n        },\n        {\n            'lit':\n            {\n                'basename': 'a_file',\n                'nameext': '',\n                'nameroot': 'a_file',\n                'location': '_:1728da8f-c64e-4a3e-b2e2-1ee356be7bc8',\n                'class': 'File',\n                'contents': 'Hello file literal.'\n            }\n        }])\n\n        # Check that the file name conflict is resolved and open is called for both\n        final.open.assert_any_call(\"a_file\", \"wb\")\n        final.open.assert_any_call(\"a_file_2\", \"wb\")\n"
  },
  {
    "path": "sdk/cwl/tests/test_pathmapper.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\nimport sys\nimport unittest\nimport json\nimport logging\nimport os\n\nfrom unittest import mock\n\nimport arvados\nimport arvados.keep\nimport arvados.collection\nimport arvados_cwl\nimport arvados_cwl.executor\n\nfrom cwltool.pathmapper import MapperEnt\nfrom .mock_discovery import get_rootDesc\n\nfrom arvados_cwl.pathmapper import ArvPathMapper\n\ndef upload_mock(files, api, dry_run=False, num_retries=0, project=None, fnPattern=\"$(file %s/%s)\", name=None, collection=None, packed=None):\n    pdh = \"99999999999999999999999999999991+99\"\n    for c in files:\n        c.keepref = \"%s/%s\" % (pdh, os.path.basename(c.fn))\n        c.fn = fnPattern % (pdh, os.path.basename(c.fn))\n\nclass TestPathmap(unittest.TestCase):\n    def setUp(self):\n        self.api = mock.MagicMock()\n        self.api._rootDesc = get_rootDesc()\n\n    def tearDown(self):\n        root_logger = logging.getLogger('')\n\n        # Remove existing RuntimeStatusLoggingHandlers if they exist\n        handlers = [h for h in root_logger.handlers if not isinstance(h, arvados_cwl.executor.RuntimeStatusLoggingHandler)]\n        root_logger.handlers = handlers\n\n    def test_keepref(self):\n        \"\"\"Test direct keep references.\"\"\"\n\n        arvrunner = arvados_cwl.executor.ArvCwlExecutor(self.api)\n\n        p = ArvPathMapper(arvrunner, [{\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999991+99/hw.py\"\n        }], \"\", \"/test/%s\", \"/test/%s/%s\")\n\n        self.assertEqual({'keep:99999999999999999999999999999991+99/hw.py': MapperEnt(resolved='keep:99999999999999999999999999999991+99/hw.py', target='/test/99999999999999999999999999999991+99/hw.py', type='File', staged=True)},\n                         p._pathmap)\n\n    @mock.patch(\"arvados.commands.run.uploadfiles\")\n    @mock.patch(\"arvados.commands.run.statfile\")\n    def test_upload(self, statfile, upl):\n        \"\"\"Test pathmapper uploading files.\"\"\"\n\n        arvrunner = arvados_cwl.executor.ArvCwlExecutor(self.api)\n\n        def statfile_mock(prefix, fn, fnPattern=\"$(file %s/%s)\", dirPattern=\"$(dir %s/%s/)\", raiseOSError=False):\n            st = arvados.commands.run.UploadFile(\"\", \"tests/hw.py\")\n            return st\n\n        upl.side_effect = upload_mock\n        statfile.side_effect = statfile_mock\n\n        p = ArvPathMapper(arvrunner, [{\n            \"class\": \"File\",\n            \"location\": \"file:tests/hw.py\"\n        }], \"\", \"/test/%s\", \"/test/%s/%s\")\n\n        self.assertEqual({'file:tests/hw.py': MapperEnt(resolved='keep:99999999999999999999999999999991+99/hw.py', target='/test/99999999999999999999999999999991+99/hw.py', type='File', staged=True)},\n                         p._pathmap)\n\n    @mock.patch(\"arvados.commands.run.uploadfiles\")\n    @mock.patch(\"arvados.commands.run.statfile\")\n    def test_statfile(self, statfile, upl):\n        \"\"\"Test pathmapper handling ArvFile references.\"\"\"\n        arvrunner = arvados_cwl.executor.ArvCwlExecutor(self.api)\n\n        # An ArvFile object returned from arvados.commands.run.statfile means the file is located on a\n        # keep mount, so we can construct a direct reference directly without upload.\n        def statfile_mock(prefix, fn, fnPattern=\"$(file %s/%s)\", dirPattern=\"$(dir %s/%s/)\", raiseOSError=False):\n            st = arvados.commands.run.ArvFile(\"\", fnPattern % (\"99999999999999999999999999999991+99\", \"hw.py\"))\n            return st\n\n        upl.side_effect = upload_mock\n        statfile.side_effect = statfile_mock\n\n        p = ArvPathMapper(arvrunner, [{\n            \"class\": \"File\",\n            \"location\": \"file:tests/hw.py\"\n        }], \"\", \"/test/%s\", \"/test/%s/%s\")\n\n        self.assertEqual({'file:tests/hw.py': MapperEnt(resolved='keep:99999999999999999999999999999991+99/hw.py', target='/test/99999999999999999999999999999991+99/hw.py', type='File', staged=True)},\n                         p._pathmap)\n\n    @mock.patch(\"os.stat\")\n    def test_missing_file(self, stat):\n        \"\"\"Test pathmapper handling missing references.\"\"\"\n        arvrunner = arvados_cwl.executor.ArvCwlExecutor(self.api)\n\n        stat.side_effect = OSError(2, \"No such file or directory\")\n\n        with self.assertRaises(OSError):\n            p = ArvPathMapper(arvrunner, [{\n                \"class\": \"File\",\n                \"location\": \"file:tests/hw.py\"\n            }], \"\", \"/test/%s\", \"/test/%s/%s\")\n\n    def test_needs_new_collection(self):\n        arvrunner = arvados_cwl.executor.ArvCwlExecutor(self.api)\n\n        # Plain file.  Don't need a new collection.\n        a = {\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999991+99/hw.py\",\n            \"basename\": \"hw.py\"\n        }\n        p = ArvPathMapper(arvrunner, [], \"\", \"%s\", \"%s/%s\")\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw.py\"] = True\n        self.assertFalse(p.needs_new_collection(a))\n\n        # A file that isn't in the pathmap (for some reason).  Need a new collection.\n        p = ArvPathMapper(arvrunner, [], \"\", \"%s\", \"%s/%s\")\n        self.assertTrue(p.needs_new_collection(a))\n\n        # A file with a secondary file in the same collection.  Don't need\n        # a new collection.\n        a = {\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999991+99/hw.py\",\n            \"basename\": \"hw.py\",\n            \"secondaryFiles\": [{\n                \"class\": \"File\",\n                \"location\": \"keep:99999999999999999999999999999991+99/hw.pyc\",\n                \"basename\": \"hw.pyc\"\n            }]\n        }\n        p = ArvPathMapper(arvrunner, [], \"\", \"%s\", \"%s/%s\")\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw.py\"] = True\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw.pyc\"] = True\n        self.assertFalse(p.needs_new_collection(a))\n\n        # Secondary file is in a different collection from the\n        # a new collectionprimary.  Need a new collection.\n        a = {\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999991+99/hw.py\",\n            \"basename\": \"hw.py\",\n            \"secondaryFiles\": [{\n                \"class\": \"File\",\n                \"location\": \"keep:99999999999999999999999999999992+99/hw.pyc\",\n                \"basename\": \"hw.pyc\"\n            }]\n        }\n        p = ArvPathMapper(arvrunner, [], \"\", \"%s\", \"%s/%s\")\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw.py\"] = True\n        p._pathmap[\"keep:99999999999999999999999999999992+99/hw.pyc\"] = True\n        self.assertTrue(p.needs_new_collection(a))\n\n        # Secondary file should be staged to a different name than\n        # path in location.  Need a new collection.\n        a = {\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999991+99/hw.py\",\n            \"basename\": \"hw.py\",\n            \"secondaryFiles\": [{\n                \"class\": \"File\",\n                \"location\": \"keep:99999999999999999999999999999991+99/hw.pyc\",\n                \"basename\": \"hw.other\"\n            }]\n        }\n        p = ArvPathMapper(arvrunner, [], \"\", \"%s\", \"%s/%s\")\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw.py\"] = True\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw.pyc\"] = True\n        self.assertTrue(p.needs_new_collection(a))\n\n        # Secondary file is a directory.  Do not need a new collection.\n        a = {\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999991+99/hw.py\",\n            \"basename\": \"hw.py\",\n            \"secondaryFiles\": [{\n                \"class\": \"Directory\",\n                \"location\": \"keep:99999999999999999999999999999991+99/hw\",\n                \"basename\": \"hw\",\n                \"listing\": [{\n                    \"class\": \"File\",\n                    \"location\": \"keep:99999999999999999999999999999991+99/hw/h2\",\n                    \"basename\": \"h2\"\n                }]\n            }]\n        }\n        p = ArvPathMapper(arvrunner, [], \"\", \"%s\", \"%s/%s\")\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw.py\"] = True\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw\"] = True\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw/h2\"] = True\n        self.assertFalse(p.needs_new_collection(a))\n\n        # Secondary file is a renamed directory.  Need a new collection.\n        a = {\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999991+99/hw.py\",\n            \"basename\": \"hw.py\",\n            \"secondaryFiles\": [{\n                \"class\": \"Directory\",\n                \"location\": \"keep:99999999999999999999999999999991+99/hw\",\n                \"basename\": \"wh\",\n                \"listing\": [{\n                    \"class\": \"File\",\n                    \"location\": \"keep:99999999999999999999999999999991+99/hw/h2\",\n                    \"basename\": \"h2\"\n                }]\n            }]\n        }\n        p = ArvPathMapper(arvrunner, [], \"\", \"%s\", \"%s/%s\")\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw.py\"] = True\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw\"] = True\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw/h2\"] = True\n        self.assertTrue(p.needs_new_collection(a))\n\n        # Secondary file is a file literal.  Need a new collection.\n        a = {\n            \"class\": \"File\",\n            \"location\": \"keep:99999999999999999999999999999991+99/hw.py\",\n            \"basename\": \"hw.py\",\n            \"secondaryFiles\": [{\n                \"class\": \"File\",\n                \"location\": \"_:123\",\n                \"basename\": \"hw.pyc\",\n                \"contents\": \"123\"\n            }]\n        }\n        p = ArvPathMapper(arvrunner, [], \"\", \"%s\", \"%s/%s\")\n        p._pathmap[\"keep:99999999999999999999999999999991+99/hw.py\"] = True\n        p._pathmap[\"_:123\"] = True\n        self.assertTrue(p.needs_new_collection(a))\n\n    def test_is_in_collection(self):\n        arvrunner = arvados_cwl.executor.ArvCwlExecutor(self.api)\n        self.maxDiff = 1000000\n\n        cwd = os.getcwd()\n        p = ArvPathMapper(arvrunner, [{\n            \"class\": \"File\",\n            \"location\": \"file://\"+cwd+\"/tests/fake-keep-mount/fake_collection_dir/subdir/banana.txt\"\n        }], \"\", \"/test/%s\", \"/test/%s/%s\")\n\n        self.assertEqual({\"file://\"+cwd+\"/tests/fake-keep-mount/fake_collection_dir/subdir/banana.txt\": MapperEnt(resolved='keep:99999999999999999999999999999991+99/subdir/banana.txt', target='/test/99999999999999999999999999999991+99/subdir/banana.txt', type='File', staged=True)},\n                         p._pathmap)\n"
  },
  {
    "path": "sdk/cwl/tests/test_submit.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport copy\nimport io\nimport itertools\nimport functools\nimport hashlib\nimport json\nimport logging\nimport sys\nimport unittest\nimport re\nimport os\nimport collections\n\nfrom io import BytesIO, StringIO\nfrom unittest import mock\n\nimport arvados\nimport arvados.collection\nimport arvados_cwl\nimport arvados_cwl.executor\nimport arvados_cwl.runner\nimport arvados.keep\n\nimport cwltool.process\n\nfrom .matcher import JsonDiffMatcher, StripYAMLComments\nfrom .mock_discovery import get_rootDesc\n\nimport ruamel.yaml\n\n_rootDesc = None\n\ndef stubs(wfdetails=('submit_wf.cwl', None)):\n    def outer_wrapper(func, *rest):\n        @functools.wraps(func)\n        @mock.patch(\"arvados_cwl.arvdocker.determine_image_id\")\n        @mock.patch(\"uuid.uuid4\")\n        @mock.patch(\"arvados.commands.keepdocker.list_images_in_arv\")\n        @mock.patch(\"arvados.collection.KeepClient\")\n        @mock.patch(\"arvados.keep.KeepClient\")\n        @mock.patch(\"arvados.events.subscribe\")\n        def wrapped(self, events, keep_client1, keep_client2, keepdocker,\n                    uuid4, determine_image_id, *args, **kwargs):\n            class Stubs(object):\n                pass\n\n            wfname = wfdetails[0]\n            wfpath = wfdetails[1]\n\n            stubs = Stubs()\n            stubs.events = events\n            stubs.keepdocker = keepdocker\n\n            uuid4.side_effect = [\"df80736f-f14d-4b10-b2e3-03aa27f034bb\", \"df80736f-f14d-4b10-b2e3-03aa27f034b1\",\n                                 \"df80736f-f14d-4b10-b2e3-03aa27f034b2\", \"df80736f-f14d-4b10-b2e3-03aa27f034b3\",\n                                 \"df80736f-f14d-4b10-b2e3-03aa27f034b4\", \"df80736f-f14d-4b10-b2e3-03aa27f034b5\",\n                                 \"df80736f-f14d-4b10-b2e3-03aa27f034b6\", \"df80736f-f14d-4b10-b2e3-03aa27f034b7\",\n                                 \"df80736f-f14d-4b10-b2e3-03aa27f034b8\", \"df80736f-f14d-4b10-b2e3-03aa27f034b9\",\n                                 \"df80736f-f14d-4b10-b2e3-03aa27f034c0\", \"df80736f-f14d-4b10-b2e3-03aa27f034c1\",\n                                 \"df80736f-f14d-4b10-b2e3-03aa27f034c2\", \"df80736f-f14d-4b10-b2e3-03aa27f034c3\",\n                                 \"df80736f-f14d-4b10-b2e3-03aa27f034c4\", \"df80736f-f14d-4b10-b2e3-03aa27f034c5\",\n                                 \"df80736f-f14d-4b10-b2e3-03aa27f034c6\", \"df80736f-f14d-4b10-b2e3-03aa27f034c7\"]\n\n            determine_image_id.return_value = None\n\n            def putstub(p, **kwargs):\n                return \"%s+%i\" % (hashlib.md5(p).hexdigest(), len(p))\n            keep_client1().put.side_effect = putstub\n            keep_client1.put.side_effect = putstub\n            keep_client2().put.side_effect = putstub\n            keep_client2.put.side_effect = putstub\n\n            stubs.keep_client = keep_client2\n            stubs.docker_images = {\n                \"arvados/jobs:\"+arvados_cwl.__version__: [(\"zzzzz-4zz18-zzzzzzzzzzzzzd3\", {})],\n                \"debian:buster-slim\": [(\"zzzzz-4zz18-zzzzzzzzzzzzzd4\", {})],\n                \"arvados/jobs:123\": [(\"zzzzz-4zz18-zzzzzzzzzzzzzd5\", {})],\n                \"arvados/jobs:latest\": [(\"zzzzz-4zz18-zzzzzzzzzzzzzd6\", {})],\n            }\n            def kd(a, b, image_name=None, image_tag=None, project_uuid=None):\n                return stubs.docker_images.get(\"%s:%s\" % (image_name, image_tag), [])\n            stubs.keepdocker.side_effect = kd\n\n            stubs.fake_user_uuid = \"zzzzz-tpzed-zzzzzzzzzzzzzzz\"\n            stubs.fake_container_uuid = \"zzzzz-dz642-zzzzzzzzzzzzzzz\"\n\n            stubs.capture_stdout = StringIO()\n\n            stubs.api = mock.MagicMock()\n            stubs.api._rootDesc = get_rootDesc()\n            stubs.api._rootDesc[\"uuidPrefix\"] = \"zzzzz\"\n            stubs.api._rootDesc[\"revision\"] = \"20210628\"\n\n            stubs.api.users().current().execute.return_value = {\n                \"uuid\": stubs.fake_user_uuid,\n            }\n            stubs.api.collections().list().execute.return_value = {\"items\": []}\n            stubs.api.containers().current().execute.return_value = {\n                \"uuid\": stubs.fake_container_uuid,\n            }\n            stubs.api.config()[\"StorageClasses\"].items.return_value = {\n                \"default\": {\n                    \"Default\": True\n                }\n            }.items()\n\n            class CollectionExecute(object):\n                def __init__(self, exe):\n                    self.exe = exe\n                def execute(self, num_retries=None):\n                    return self.exe\n\n            def collection_createstub(created_collections, body, ensure_unique_name=None):\n                mt = body[\"manifest_text\"].encode('utf-8')\n                uuid = \"zzzzz-4zz18-zzzzzzzzzzzzzx%d\" % len(created_collections)\n                pdh = \"%s+%i\" % (hashlib.md5(mt).hexdigest(), len(mt))\n                created_collections[uuid] = {\n                    \"uuid\": uuid,\n                    \"portable_data_hash\": pdh,\n                    \"manifest_text\": mt.decode('utf-8')\n                }\n                return CollectionExecute(created_collections[uuid])\n\n            def collection_getstub(created_collections, uuid):\n                for v in created_collections.values():\n                    if uuid in (v[\"uuid\"], v[\"portable_data_hash\"]):\n                        return CollectionExecute(v)\n\n            created_collections = {\n                \"99999999999999999999999999999998+99\": {\n                    \"uuid\": \"\",\n                    \"portable_data_hash\": \"99999999999999999999999999999998+99\",\n                    \"manifest_text\": \". 99999999999999999999999999999998+99 0:0:file1.txt\"\n                },\n                \"99999999999999999999999999999997+99\": {\n                    \"uuid\": \"\",\n                    \"portable_data_hash\": \"99999999999999999999999999999997+99\",\n                    \"manifest_text\": \". 99999999999999999999999999999997+99 0:0:file1.txt\"\n                },\n                \"99999999999999999999999999999994+99\": {\n                    \"uuid\": \"\",\n                    \"portable_data_hash\": \"99999999999999999999999999999994+99\",\n                    \"manifest_text\": \". 99999999999999999999999999999994+99 0:0:expect_arvworkflow.cwl\"\n                },\n                \"zzzzz-4zz18-zzzzzzzzzzzzzd3\": {\n                    \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzd3\",\n                    \"portable_data_hash\": \"999999999999999999999999999999d3+99\",\n                    \"manifest_text\": \"\"\n                },\n                \"zzzzz-4zz18-zzzzzzzzzzzzzd4\": {\n                    \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzd4\",\n                    \"portable_data_hash\": \"999999999999999999999999999999d4+99\",\n                    \"manifest_text\": \"\"\n                },\n                \"zzzzz-4zz18-zzzzzzzzzzzzzd5\": {\n                    \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzd5\",\n                    \"portable_data_hash\": \"999999999999999999999999999999d5+99\",\n                    \"manifest_text\": \"\"\n                },\n                \"zzzzz-4zz18-zzzzzzzzzzzzzd6\": {\n                    \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzd6\",\n                    \"portable_data_hash\": \"999999999999999999999999999999d6+99\",\n                    \"manifest_text\": \"\"\n                }\n            }\n            stubs.api.collections().create.side_effect = functools.partial(collection_createstub, created_collections)\n            stubs.api.collections().get.side_effect = functools.partial(collection_getstub, created_collections)\n\n            stubs.expect_container_request_uuid = \"zzzzz-xvhdp-zzzzzzzzzzzzzzz\"\n            stubs.api.container_requests().create().execute.return_value = {\n                \"uuid\": stubs.expect_container_request_uuid,\n                \"container_uuid\": \"zzzzz-dz642-zzzzzzzzzzzzzzz\",\n                \"state\": \"Queued\"\n            }\n\n            cwd = os.getcwd()\n            filepath = os.path.join(cwd, \"tests/wf/submit_wf_wrapper.cwl\")\n            with open(filepath) as f:\n                yaml = ruamel.yaml.YAML(typ='rt', pure=True)\n                expect_packed_workflow = yaml.load(f)\n\n            if wfpath is None:\n                wfpath = wfname\n\n            gitinfo_workflow = copy.deepcopy(expect_packed_workflow)\n            gitinfo_workflow[\"$graph\"][0][\"id\"] = \"file://%s/tests/wf/%s\" % (cwd, wfpath)\n            mocktool = mock.NonCallableMock(tool=gitinfo_workflow[\"$graph\"][0], metadata=gitinfo_workflow)\n\n            stubs.git_info = arvados_cwl.executor.ArvCwlExecutor.get_git_info(mocktool)\n            expect_packed_workflow.update(stubs.git_info)\n\n            stubs.git_props = {\"arv:\"+k.split(\"#\", 1)[1]: v for k,v in stubs.git_info.items()}\n\n            step_name = \"%s (%s)\" % (wfpath, stubs.git_props[\"arv:gitDescribe\"])\n            if wfname == wfpath:\n                container_name = \"%s (%s)\" % (wfpath, stubs.git_props[\"arv:gitDescribe\"])\n            else:\n                container_name = wfname\n\n            expect_packed_workflow[\"$graph\"][0][\"steps\"][0][\"id\"] = \"#main/\"+step_name\n            expect_packed_workflow[\"$graph\"][0][\"steps\"][0][\"label\"] = container_name\n\n            stubs.expect_container_spec = {\n                'priority': 500,\n                'mounts': {\n                    '/var/spool/cwl': {\n                        'writable': True,\n                        'kind': 'collection'\n                    },\n                    '/var/lib/cwl/workflow.json': {\n                        'content': expect_packed_workflow,\n                        'kind': 'json'\n                    },\n                    'stdout': {\n                        'path': '/var/spool/cwl/cwl.output.json',\n                        'kind': 'file'\n                    },\n                    '/var/lib/cwl/cwl.input.json': {\n                        'kind': 'json',\n                        'content': {\n                            'y': {\n                                'basename': '99999999999999999999999999999998+99',\n                                'location': 'keep:99999999999999999999999999999998+99',\n                                'class': 'Directory'},\n                            'x': {\n                                'basename': u'blorp.txt',\n                                'class': 'File',\n                                'location': u'keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt',\n                                \"size\": 16\n                            },\n                            'z': {'basename': 'anonymous', 'class': 'Directory', 'listing': [\n                                {'basename': 'renamed.txt',\n                                 'class': 'File',\n                                 'location': 'keep:99999999999999999999999999999998+99/file1.txt',\n                                 'size': 0\n                                }\n                            ]}\n                        },\n                        'kind': 'json'\n                    }\n                },\n                'secret_mounts': {},\n                'state': 'Committed',\n                'command': ['arvados-cwl-runner', '--local', '--api=containers',\n                            '--no-log-timestamps', '--disable-validate', '--disable-color',\n                            '--eval-timeout=20', '--thread-count=0',\n                            '--enable-reuse', \"--collection-cache-size=256\",\n                            '--output-name=Output from workflow '+container_name,\n                            '--debug', '--on-error=continue',\n                            '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],\n                'name': container_name,\n                'container_image': '999999999999999999999999999999d3+99',\n                'output_name': 'Output from workflow %s' % (container_name),\n                'output_path': '/var/spool/cwl',\n                'cwd': '/var/spool/cwl',\n                'runtime_constraints': {\n                    'API': True,\n                    'vcpus': 1,\n                    'ram': (1024+256)*1024*1024\n                },\n                'properties': stubs.git_props,\n                'use_existing': False,\n                'secret_mounts': {},\n                'environment': {},\n            }\n\n            stubs.expect_workflow_uuid = \"zzzzz-7fd4e-zzzzzzzzzzzzzzz\"\n            stubs.api.workflows().create().execute.return_value = {\n                \"uuid\": stubs.expect_workflow_uuid,\n            }\n            def update_mock(**kwargs):\n                stubs.updated_uuid = kwargs.get('uuid')\n                return mock.DEFAULT\n            stubs.api.workflows().update.side_effect = update_mock\n            stubs.api.workflows().update().execute.side_effect = lambda **kwargs: {\n                \"uuid\": stubs.updated_uuid,\n            }\n\n            return func(self, stubs, *args, **kwargs)\n        return wrapped\n    return outer_wrapper\n\nclass TestSubmit(unittest.TestCase):\n\n    def setUp(self):\n        cwltool.process._names = set()\n        #arvados_cwl.arvdocker.arv_docker_clear_cache()\n\n    def tearDown(self):\n        root_logger = logging.getLogger('')\n\n        # Remove existing RuntimeStatusLoggingHandlers if they exist\n        handlers = [h for h in root_logger.handlers if not isinstance(h, arvados_cwl.executor.RuntimeStatusLoggingHandler)]\n        root_logger.handlers = handlers\n\n    @mock.patch(\"time.sleep\")\n    @stubs()\n    def test_submit_invalid_runner_ram(self, stubs, tm):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--debug\", \"--submit-runner-ram=-2048\",\n             \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n        self.assertEqual(exited, 1)\n\n\n    @stubs()\n    def test_submit_container(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        stubs.api.collections().create.assert_has_calls([\n            mock.call(body=JsonDiffMatcher({\n                'manifest_text':\n                '. 979af1245a12a1fed634d4222473bfdc+16 0:16:blorp.txt\\n',\n                'replication_desired': None,\n                'name': 'submit_wf.cwl ('+ stubs.git_props[\"arv:gitDescribe\"] +') input (169f39d466a5438ac4a90e779bf750c7+53)',\n            }), ensure_unique_name=False),\n            mock.call(body=JsonDiffMatcher({\n                'manifest_text':\n                '. 5bcc9fe8f8d5992e6cf418dc7ce4dbb3+16 0:16:blub.txt\\n',\n                'replication_desired': None,\n                'name': 'submit_tool.cwl dependencies (5d373e7629203ce39e7c22af98a0f881+52)',\n            }), ensure_unique_name=False),\n            ])\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n\n    @stubs()\n    def test_submit_container_tool(self, stubs):\n        # test for issue #16139\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n                \"tests/tool/tool_with_sf.cwl\", \"tests/tool/tool_with_sf.yml\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_no_reuse(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--disable-reuse\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = [\n            'arvados-cwl-runner', '--local', '--api=containers',\n            '--no-log-timestamps', '--disable-validate', '--disable-color',\n            '--eval-timeout=20', '--thread-count=0',\n            '--disable-reuse', \"--collection-cache-size=256\",\n            '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n            '--debug', '--on-error=continue',\n            '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n        expect_container[\"use_existing\"] = False\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs(('submit_wf_no_reuse.cwl', None))\n    def test_submit_container_reuse_disabled_by_workflow(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n             \"tests/wf/submit_wf_no_reuse.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n        self.assertEqual(exited, 0)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = [\"--disable-reuse\" if v == \"--enable-reuse\" else v for v in expect_container[\"command\"]]\n        expect_container[\"use_existing\"] = False\n        expect_container[\"mounts\"][\"/var/lib/cwl/workflow.json\"][\"content\"][\"$graph\"][0][\"hints\"] = [\n            {\n                \"class\": \"WorkReuse\",\n                \"enableReuse\": False,\n            },\n                {\n                    \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                    \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\n                }\n        ]\n        expect_container[\"mounts\"][\"/var/lib/cwl/workflow.json\"][\"content\"][\"$graph\"][0][\"steps\"][0][\"run\"] = \"keep:0c12c72eb112405548c0369c987aef61+292/wf/submit_wf_no_reuse.cwl\"\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n\n\n    @stubs()\n    def test_submit_container_on_error(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--on-error=stop\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug', '--on-error=stop',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_output_name(self, stubs):\n        output_name = \"test_output_name\"\n\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--output-name\", output_name,\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       \"--output-name=\"+output_name, '--debug', '--on-error=continue',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n        expect_container[\"output_name\"] = output_name\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_storage_classes(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--debug\", \"--submit\", \"--no-wait\", \"--api=containers\", \"--storage-classes=foo\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       \"--debug\",\n                                       \"--storage-classes=foo\", '--on-error=continue',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_multiple_storage_classes(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--debug\", \"--submit\", \"--no-wait\", \"--api=containers\", \"--storage-classes=foo,bar\", \"--intermediate-storage-classes=baz\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       \"--debug\",\n                                       \"--storage-classes=foo,bar\", \"--intermediate-storage-classes=baz\", '--on-error=continue',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_invalid_empty_storage_classes(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--debug\", \"--submit\", \"--no-wait\", \"--api=containers\", \"--storage-classes=\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       \"--debug\",\n                                       '--on-error=continue',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_invalid_malformed_storage_classes(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--debug\", \"--submit\", \"--no-wait\", \"--api=containers\", \"--storage-classes=,,,,,\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       \"--debug\",\n                                       '--on-error=continue',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_duplicate_storage_classes(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--debug\", \"--submit\", \"--no-wait\", \"--api=containers\", \"--storage-classes=,foo,bar,,foo,\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       \"--debug\",\n                                       \"--storage-classes=foo,bar\", '--on-error=continue',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @mock.patch(\"cwltool.task_queue.TaskQueue\")\n    @mock.patch(\"arvados_cwl.arvworkflow.ArvadosWorkflow.job\")\n    @mock.patch(\"arvados_cwl.executor.ArvCwlExecutor.make_output_collection\")\n    @stubs()\n    def test_storage_classes_correctly_propagate_to_make_output_collection(self, stubs, make_output, job, tq):\n        final_output_c = arvados.collection.Collection()\n        make_output.return_value = ({},final_output_c)\n\n        def set_final_output(job_order, output_callback, runtimeContext):\n            output_callback({\"out\": \"zzzzz\"}, \"success\")\n            return []\n        job.side_effect = set_final_output\n\n        exited = arvados_cwl.main(\n            [\"--debug\", \"--local\", \"--storage-classes=foo\", \"--disable-git\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        make_output.assert_called_with(u'Output from workflow submit_wf.cwl', ['foo'], '', {}, {\"out\": \"zzzzz\"})\n        self.assertEqual(exited, 0)\n\n    @mock.patch(\"cwltool.task_queue.TaskQueue\")\n    @mock.patch(\"arvados_cwl.arvworkflow.ArvadosWorkflow.job\")\n    @mock.patch(\"arvados_cwl.executor.ArvCwlExecutor.make_output_collection\")\n    @stubs()\n    def test_default_storage_classes_correctly_propagate_to_make_output_collection(self, stubs, make_output, job, tq):\n        final_output_c = arvados.collection.Collection()\n        make_output.return_value = ({},final_output_c)\n        stubs.api.config().get.return_value = {\"default\": {\"Default\": True}}\n\n        def set_final_output(job_order, output_callback, runtimeContext):\n            output_callback({\"out\": \"zzzzz\"}, \"success\")\n            return []\n        job.side_effect = set_final_output\n\n        exited = arvados_cwl.main(\n            [\"--debug\", \"--local\", \"--disable-git\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        make_output.assert_called_with(u'Output from workflow submit_wf.cwl', ['default'], '', {}, {\"out\": \"zzzzz\"})\n        self.assertEqual(exited, 0)\n\n    @mock.patch(\"cwltool.task_queue.TaskQueue\")\n    @mock.patch(\"arvados_cwl.arvworkflow.ArvadosWorkflow.job\")\n    @mock.patch(\"arvados_cwl.executor.ArvCwlExecutor.make_output_collection\")\n    @stubs()\n    def test_storage_class_hint_to_make_output_collection(self, stubs, make_output, job, tq):\n        final_output_c = arvados.collection.Collection()\n        make_output.return_value = ({},final_output_c)\n\n        def set_final_output(job_order, output_callback, runtimeContext):\n            output_callback({\"out\": \"zzzzz\"}, \"success\")\n            return []\n        job.side_effect = set_final_output\n\n        exited = arvados_cwl.main(\n            [\"--debug\", \"--local\", \"--disable-git\",\n                \"tests/wf/submit_storage_class_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        make_output.assert_called_with(u'Output from workflow submit_storage_class_wf.cwl', ['foo', 'bar'], '', {}, {\"out\": \"zzzzz\"})\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_output_ttl(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--intermediate-output-ttl\", \"3600\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug',\n                                       '--on-error=continue',\n                                       \"--intermediate-output-ttl=3600\",\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_trash_intermediate(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--trash-intermediate\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug', '--on-error=continue',\n                                       \"--trash-intermediate\",\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_output_tags(self, stubs):\n        output_tags = \"tag0,tag1,tag2\"\n\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--output-tags\", output_tags,\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       \"--output-tags=\"+output_tags, '--debug', '--on-error=continue',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_runner_ram(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--submit-runner-ram=2048\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"runtime_constraints\"][\"ram\"] = (2048+256)*1024*1024\n        expect_container[\"mounts\"][\"/var/lib/cwl/workflow.json\"][\"content\"][\"$graph\"][0][\"hints\"] = [\n                {\n                    \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                    \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\",\n                    \"ramMin\": 2048\n                }\n        ]\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    @mock.patch(\"time.sleep\")\n    @stubs()\n    def test_submit_file_keepref(self, stubs, tm, collectionReader):\n        collectionReader().exists.return_value = True\n        collectionReader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), \"blorp.txt\")\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n             \"tests/wf/submit_keepref_wf.cwl\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n        self.assertEqual(exited, 0)\n\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    @mock.patch(\"time.sleep\")\n    @stubs()\n    def test_submit_keepref(self, stubs, tm, reader):\n        with open(\"tests/wf/expect_arvworkflow.cwl\") as f:\n            reader().open().__enter__().read.return_value = f.read()\n\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n             \"keep:99999999999999999999999999999994+99/expect_arvworkflow.cwl#main\", \"-x\", \"XxX\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n\n        expect_container = {\n            'priority': 500,\n            'mounts': {\n                '/var/spool/cwl': {\n                    'writable': True,\n                    'kind': 'collection'\n                },\n                'stdout': {\n                    'path': '/var/spool/cwl/cwl.output.json',\n                    'kind': 'file'\n                },\n                '/var/lib/cwl/workflow': {\n                    'portable_data_hash': '99999999999999999999999999999994+99',\n                    'kind': 'collection'\n                },\n                '/var/lib/cwl/cwl.input.json': {\n                    'content': {\n                        'x': 'XxX'\n                    },\n                    'kind': 'json'\n                }\n            }, 'state': 'Committed',\n            'output_path': '/var/spool/cwl',\n            'name': 'expect_arvworkflow.cwl#main',\n            'output_name': 'Output from workflow expect_arvworkflow.cwl#main',\n            'container_image': '999999999999999999999999999999d3+99',\n            'command': ['arvados-cwl-runner', '--local', '--api=containers',\n                        '--no-log-timestamps', '--disable-validate', '--disable-color',\n                        '--eval-timeout=20', '--thread-count=0',\n                        '--enable-reuse', \"--collection-cache-size=256\",\n                        '--output-name=Output from workflow expect_arvworkflow.cwl#main',\n                        '--debug', '--on-error=continue',\n                        '/var/lib/cwl/workflow/expect_arvworkflow.cwl#main', '/var/lib/cwl/cwl.input.json'],\n            'cwd': '/var/spool/cwl',\n            'runtime_constraints': {\n                'API': True,\n                'vcpus': 1,\n                'ram': 1342177280\n            },\n            'use_existing': False,\n            'properties': {},\n            'secret_mounts': {},\n            'environment': {},\n        }\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @mock.patch(\"time.sleep\")\n    @stubs()\n    def test_submit_arvworkflow(self, stubs, tm):\n        with open(\"tests/wf/expect_arvworkflow.cwl\") as f:\n            stubs.api.workflows().get().execute.return_value = {\"definition\": f.read(), \"name\": \"a test workflow\"}\n\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--disable-git\",\n             \"962eh-7fd4e-gkbzl62qqtfig37\", \"-x\", \"XxX\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n\n        expect_container = {\n            'priority': 500,\n            'mounts': {\n                '/var/spool/cwl': {\n                    'writable': True,\n                    'kind': 'collection'\n                },\n                'stdout': {\n                    'path': '/var/spool/cwl/cwl.output.json',\n                    'kind': 'file'\n                },\n                '/var/lib/cwl/workflow.json': {\n                    'kind': 'json',\n                    'content': {\n                        'cwlVersion': 'v1.0',\n                        'label': 'a test workflow',\n                        '$graph': [\n                            {\n                                'id': '#main',\n                                'inputs': [\n                                    {'type': 'string', 'id': '#main/x'}\n                                ],\n                                'steps': [\n                                    {'in': [{'source': '#main/x', 'id': '#main/step1/x'}],\n                                     'run': '#submit_tool.cwl',\n                                     'id': '#main/step1',\n                                     'out': []}\n                                ],\n                                'class': 'Workflow',\n                                'outputs': []\n                            },\n                            {\n                                'inputs': [\n                                    {\n                                        'inputBinding': {'position': 1},\n                                        'type': 'string',\n                                        'id': '#submit_tool.cwl/x'}\n                                ],\n                                'requirements': [\n                                    {\n                                        'dockerPull': 'debian:buster-slim',\n                                        'class': 'DockerRequirement'\n                                    }\n                                ],\n                                'id': '#submit_tool.cwl',\n                                'outputs': [],\n                                'baseCommand': 'cat',\n                                'class': 'CommandLineTool'\n                            }\n                        ]\n                    }\n                },\n                '/var/lib/cwl/cwl.input.json': {\n                    'content': {\n                        'x': 'XxX'\n                    },\n                    'kind': 'json'\n                }\n            }, 'state': 'Committed',\n            'output_path': '/var/spool/cwl',\n            'name': 'a test workflow',\n            'container_image': \"999999999999999999999999999999d3+99\",\n            'command': ['arvados-cwl-runner', '--local', '--api=containers',\n                        '--no-log-timestamps', '--disable-validate', '--disable-color',\n                        '--eval-timeout=20', '--thread-count=0',\n                        '--enable-reuse', \"--collection-cache-size=256\",\n                        \"--output-name=Output from workflow a test workflow\",\n                        '--debug', '--on-error=continue',\n                        '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json'],\n            'output_name': 'Output from workflow a test workflow',\n            'cwd': '/var/spool/cwl',\n            'runtime_constraints': {\n                'API': True,\n                'vcpus': 1,\n                'ram': 1342177280\n            },\n            'use_existing': False,\n            'properties': {\n                \"template_uuid\": \"962eh-7fd4e-gkbzl62qqtfig37\"\n            },\n            'secret_mounts': {},\n            'environment': {},\n        }\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs(('hello container 123', 'submit_wf.cwl'))\n    def test_submit_container_name(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--name=hello container 123\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_missing_input(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n             \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n        self.assertEqual(exited, 0)\n\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n             \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job_missing.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n        self.assertEqual(exited, 1)\n\n    @stubs()\n    def test_submit_container_project(self, stubs):\n        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n        stubs.api.groups().get().execute.return_value = {\"group_class\": \"project\"}\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--project-uuid=\"+project_uuid,\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"owner_uuid\"] = project_uuid\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       \"--eval-timeout=20\", \"--thread-count=0\",\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug',\n                                       '--on-error=continue',\n                                       '--project-uuid='+project_uuid,\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_eval_timeout(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--eval-timeout=60\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=60.0', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug', '--on-error=continue',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_collection_cache(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--collection-cache-size=500\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=500\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug', '--on-error=continue',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n        expect_container[\"runtime_constraints\"][\"ram\"] = (1024+500)*1024*1024\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_thread_count(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--thread-count=20\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=20',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug', '--on-error=continue',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_runner_image(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--submit-runner-image=arvados/jobs:123\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        stubs.expect_container_spec[\"container_image\"] = \"999999999999999999999999999999d5+99\"\n        stubs.expect_container_spec[\"mounts\"][\"/var/lib/cwl/workflow.json\"][\"content\"][\"$graph\"][0][\"hints\"] = [\n                {\n                    \"acrContainerImage\": \"999999999999999999999999999999d5+99\",\n                    \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\n                }\n        ]\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_priority(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--priority=669\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        stubs.expect_container_spec[\"priority\"] = 669\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs(('submit_wf_runner_resources.cwl', None))\n    def test_submit_wf_runner_resources(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n                \"tests/wf/submit_wf_runner_resources.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"runtime_constraints\"] = {\n            \"API\": True,\n            \"vcpus\": 2,\n            \"ram\": (2000+512) * 2**20\n        }\n        expect_container[\"mounts\"][\"/var/lib/cwl/workflow.json\"][\"content\"][\"$graph\"][0][\"hints\"] = [\n            {\n                \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\",\n                \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                \"coresMin\": 2,\n                \"ramMin\": 2000,\n                \"keep_cache\": 512\n            }\n        ]\n        #expect_container[\"mounts\"][\"/var/lib/cwl/workflow.json\"][\"content\"][\"$namespaces\"] = {\n        #    \"arv\": \"http://arvados.org/cwl#\",\n        #}\n        expect_container[\"command\"] = [\"--collection-cache-size=512\" if v == \"--collection-cache-size=256\" else v for v in expect_container[\"command\"]]\n        expect_container[\"mounts\"][\"/var/lib/cwl/workflow.json\"][\"content\"][\"$graph\"][0][\"steps\"][0][\"run\"] = \"keep:758635b2486327c80fa90055c8b5b4d2+308/wf/submit_wf_runner_resources.cwl\"\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @mock.patch(\"arvados.commands.keepdocker.find_one_image_hash\")\n    @mock.patch(\"cwltool.docker.DockerCommandLineJob.get_image\")\n    @mock.patch(\"arvados.api\")\n    def test_arvados_jobs_image(self, api, get_image, find_one_image_hash):\n        #arvados_cwl.arvdocker.arv_docker_clear_cache()\n\n        arvrunner = mock.MagicMock()\n        arvrunner.project_uuid = \"\"\n        api.return_value = mock.MagicMock()\n        arvrunner.api = api.return_value\n        arvrunner.runtimeContext.match_local_docker = False\n        arvrunner.api.links().list().execute.side_effect = itertools.cycle([\n            {\"items\": [{\"created_at\": \"2023-08-25T12:34:56.123456Z\",\n                        \"head_uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzzb\",\n                        \"link_class\": \"docker_image_repo+tag\",\n                        \"name\": \"arvados/jobs:\"+arvados_cwl.__version__,\n                        \"owner_uuid\": \"\",\n                        \"uuid\": \"zzzzz-o0j2j-arvadosjobsrepo\",\n                        \"properties\": {\"image_timestamp\": \"\"}}]},\n            {\"items\": []},\n            {\"items\": []},\n            {\"items\": [{\"created_at\": \"2023-08-25T12:34:57.234567Z\",\n                        \"head_uuid\": \"\",\n                        \"link_class\": \"docker_image_hash\",\n                        \"name\": \"123456\",\n                        \"owner_uuid\": \"\",\n                        \"uuid\": \"zzzzz-o0j2j-arvadosjobshash\",\n                        \"properties\": {\"image_timestamp\": \"\"}}]},\n            {\"items\": []},\n            {\"items\": []},\n        ])\n        find_one_image_hash.return_value = \"123456\"\n\n        arvrunner.api.collections().list().execute.side_effect = itertools.cycle([\n            {\"items\": [{\"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzzb\",\n                        \"owner_uuid\": \"\",\n                        \"manifest_text\": \"\",\n                        \"created_at\": \"2023-08-25T12:34:55.012345Z\",\n                        \"properties\": {}}]},\n            {\"items\": []},\n            {\"items\": []},\n        ])\n        arvrunner.api.collections().create().execute.return_value = {\"uuid\": \"\"}\n        arvrunner.api.collections().get().execute.return_value = {\"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzzb\",\n                                                                  \"portable_data_hash\": \"9999999999999999999999999999999b+99\"}\n\n        self.assertEqual(\"9999999999999999999999999999999b+99\",\n                         arvados_cwl.runner.arvados_jobs_image(arvrunner, \"arvados/jobs:\"+arvados_cwl.__version__, arvrunner.runtimeContext))\n\n\n    @stubs()\n    def test_submit_secrets(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--disable-git\",\n                \"tests/wf/secret_wf.cwl\", \"tests/secret_test_job.yml\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        stubs.git_props[\"arv:gitPath\"] = \"sdk/cwl/tests/wf/secret_wf.cwl\"\n        stubs.git_info[\"http://arvados.org/cwl#gitPath\"] = \"sdk/cwl/tests/wf/secret_wf.cwl\"\n\n        expect_container = {\n            \"command\": [\n                \"arvados-cwl-runner\",\n                \"--local\",\n                \"--api=containers\",\n                \"--no-log-timestamps\",\n                \"--disable-validate\",\n                \"--disable-color\",\n                \"--eval-timeout=20\",\n                '--thread-count=0',\n                \"--enable-reuse\",\n                \"--collection-cache-size=256\",\n                \"--output-name=Output from workflow secret_wf.cwl\",\n                \"--debug\",\n                \"--on-error=continue\",\n                \"/var/lib/cwl/workflow.json#main\",\n                \"/var/lib/cwl/cwl.input.json\"\n            ],\n            \"container_image\": \"999999999999999999999999999999d3+99\",\n            \"cwd\": \"/var/spool/cwl\",\n            \"mounts\": {\n                \"/var/lib/cwl/cwl.input.json\": {\n                    \"content\": {\n                        \"pw\": {\n                            \"$include\": \"/secrets/s0\"\n                        }\n                    },\n                    \"kind\": \"json\"\n                },\n                \"/var/lib/cwl/workflow.json\": {\n                    \"content\": {\n                        \"$graph\": [\n                            {\n                                \"class\": \"Workflow\",\n                                \"hints\": [\n                                    {\n                                    \"class\": \"DockerRequirement\",\n                                    \"dockerPull\": \"debian:buster-slim\",\n                                    \"http://arvados.org/cwl#dockerCollectionPDH\": \"999999999999999999999999999999d4+99\"\n                                    },\n                                    {\n                                        \"class\": \"http://commonwl.org/cwltool#Secrets\",\n                                        \"secrets\": [\n                                            \"#main/pw\"\n                                        ]\n                                    },\n                                    {\n                                        \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                                        \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\n                                    }\n                                ],\n                                \"id\": \"#main\",\n                                \"inputs\": [\n                                    {\n                                        \"id\": \"#main/pw\",\n                                        \"type\": \"string\"\n                                    }\n                                ],\n                                \"outputs\": [\n                                    {\n                                        \"id\": \"#main/out\",\n                                        \"outputSource\": \"#main/step/out\",\n                                        \"type\": \"File\"\n                                    }\n                                ],\n                                \"requirements\": [\n                                    {\n                                        \"class\": \"SubworkflowFeatureRequirement\"\n                                    }\n                                ],\n                                \"steps\": [\n                                    {\n                                        \"id\": \"#main/secret_wf.cwl\",\n                                        \"in\": [\n                                            {\n                                                \"id\": \"#main/step/pw\",\n                                                \"source\": \"#main/pw\"\n                                            }\n                                        ],\n                                        \"label\": \"secret_wf.cwl\",\n                                        \"out\": [\n                                            {\"id\": \"#main/step/out\"}\n                                        ],\n                                        \"run\": \"keep:a3b72b40f6df7bc7335df62e066b86ed+247/secret_wf.cwl\"\n                                    }\n                                ]\n                            }\n                        ],\n                        \"cwlVersion\": \"v1.2\"\n                    },\n                    \"kind\": \"json\"\n                },\n                \"/var/spool/cwl\": {\n                    \"kind\": \"collection\",\n                    \"writable\": True\n                },\n                \"stdout\": {\n                    \"kind\": \"file\",\n                    \"path\": \"/var/spool/cwl/cwl.output.json\"\n                }\n            },\n            \"name\": \"secret_wf.cwl\",\n            \"output_name\": \"Output from workflow secret_wf.cwl\",\n            \"output_path\": \"/var/spool/cwl\",\n            \"priority\": 500,\n            \"properties\": {},\n            \"runtime_constraints\": {\n                \"API\": True,\n                \"ram\": 1342177280,\n                \"vcpus\": 1\n            },\n            \"secret_mounts\": {\n                \"/secrets/s0\": {\n                    \"content\": \"blorp\",\n                    \"kind\": \"text\"\n                }\n            },\n            \"state\": \"Committed\",\n            \"use_existing\": False,\n            \"environment\": {}\n        }\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_request_uuid(self, stubs):\n        stubs.api._rootDesc[\"remoteHosts\"][\"zzzzz\"] = \"123\"\n        stubs.expect_container_request_uuid = \"zzzzz-xvhdp-yyyyyyyyyyyyyyy\"\n\n        stubs.api.container_requests().update().execute.return_value = {\n            \"uuid\": stubs.expect_container_request_uuid,\n            \"container_uuid\": \"zzzzz-dz642-zzzzzzzzzzzzzzz\",\n            \"state\": \"Queued\"\n        }\n\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--submit-request-uuid=zzzzz-xvhdp-yyyyyyyyyyyyyyy\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        stubs.api.container_requests().update.assert_called_with(\n            uuid=\"zzzzz-xvhdp-yyyyyyyyyyyyyyy\", body=JsonDiffMatcher(stubs.expect_container_spec))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_cluster_id(self, stubs):\n        stubs.api._rootDesc[\"remoteHosts\"][\"zbbbb\"] = \"123\"\n\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--submit-runner-cluster=zbbbb\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container), cluster_id=\"zbbbb\")\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_validate_cluster_id(self, stubs):\n        stubs.api._rootDesc[\"remoteHosts\"][\"zbbbb\"] = \"123\"\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--submit-runner-cluster=zcccc\",\n             \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n        self.assertEqual(exited, 1)\n\n    @stubs()\n    def test_submit_validate_project_uuid(self, stubs):\n        # Fails with bad cluster prefix\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--project-uuid=zzzzb-j7d0g-zzzzzzzzzzzzzzz\",\n             \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n        self.assertEqual(exited, 1)\n\n        # Project lookup fails\n        stubs.api.groups().get().execute.side_effect = Exception(\"Bad project\")\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--project-uuid=zzzzz-j7d0g-zzzzzzzzzzzzzzx\",\n             \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n        self.assertEqual(exited, 1)\n\n        # It should work this time because it is looking up a user (and only group is stubbed out to fail)\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--project-uuid=zzzzz-tpzed-zzzzzzzzzzzzzzx\",\n             \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n        self.assertEqual(exited, 0)\n\n\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    @stubs()\n    def test_submit_uuid_inputs(self, stubs, collectionReader):\n        collectionReader().exists.return_value = True\n        collectionReader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), \"file1.txt\")\n        def list_side_effect(**kwargs):\n            m = mock.MagicMock()\n            if \"count\" in kwargs:\n                m.execute.return_value = {\"items\": [\n                    {\"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\", \"portable_data_hash\": \"99999999999999999999999999999998+99\"}\n                ]}\n            else:\n                m.execute.return_value = {\"items\": []}\n            return m\n        stubs.api.collections().list.side_effect = list_side_effect\n        collectionReader().portable_data_hash.return_value = \"99999999999999999999999999999998+99\"\n\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job_with_uuids.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container['mounts']['/var/lib/cwl/cwl.input.json']['content']['y']['basename'] = 'zzzzz-4zz18-zzzzzzzzzzzzzzz'\n        expect_container['mounts']['/var/lib/cwl/cwl.input.json']['content']['y']['http://arvados.org/cwl#collectionUUID'] = 'zzzzz-4zz18-zzzzzzzzzzzzzzz'\n        expect_container['mounts']['/var/lib/cwl/cwl.input.json']['content']['z']['listing'][0]['http://arvados.org/cwl#collectionUUID'] = 'zzzzz-4zz18-zzzzzzzzzzzzzzz'\n\n        stubs.api.collections().list.assert_has_calls([\n            mock.call(count='none',\n                      filters=[['uuid', 'in', ['zzzzz-4zz18-zzzzzzzzzzzzzzz']]],\n                      select=['uuid', 'portable_data_hash'])])\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_mismatched_uuid_inputs(self, stubs):\n        def list_side_effect(**kwargs):\n            m = mock.MagicMock()\n            if \"count\" in kwargs:\n                m.execute.return_value = {\"items\": [\n                    {\"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzzz\", \"portable_data_hash\": \"99999999999999999999999999999997+99\"}\n                ]}\n            else:\n                m.execute.return_value = {\"items\": []}\n            return m\n        stubs.api.collections().list.side_effect = list_side_effect\n\n        for infile in (\"tests/submit_test_job_with_mismatched_uuids.json\", \"tests/submit_test_job_with_inconsistent_uuids.json\"):\n            capture_stderr = StringIO()\n            cwltool_logger = logging.getLogger('cwltool')\n            stderr_logger = logging.StreamHandler(capture_stderr)\n            cwltool_logger.addHandler(stderr_logger)\n\n            try:\n                exited = arvados_cwl.main(\n                    [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n                        \"tests/wf/submit_wf.cwl\", infile],\n                    stubs.capture_stdout, capture_stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n                self.assertEqual(exited, 1)\n                self.assertRegex(\n                    re.sub(r'[ \\n]+', ' ', capture_stderr.getvalue()),\n                    r\"Expected collection uuid zzzzz-4zz18-zzzzzzzzzzzzzzz to be 99999999999999999999999999999998\\+99 but API server reported 99999999999999999999999999999997\\+99\")\n            finally:\n                cwltool_logger.removeHandler(stderr_logger)\n\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    @stubs()\n    def test_submit_unknown_uuid_inputs(self, stubs, collectionReader):\n        collectionReader().find.return_value = arvados.arvfile.ArvadosFile(mock.MagicMock(), \"file1.txt\")\n        capture_stderr = StringIO()\n\n        cwltool_logger = logging.getLogger('cwltool')\n        stderr_logger = logging.StreamHandler(capture_stderr)\n        cwltool_logger.addHandler(stderr_logger)\n\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job_with_uuids.json\"],\n            stubs.capture_stdout, capture_stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        try:\n            self.assertEqual(exited, 1)\n            self.assertRegex(\n                capture_stderr.getvalue(),\n                r\"Collection\\s*uuid\\s*zzzzz-4zz18-zzzzzzzzzzzzzzz\\s*not\\s*found\")\n        finally:\n            cwltool_logger.removeHandler(stderr_logger)\n\n    @stubs(('submit_wf_process_properties.cwl', None))\n    def test_submit_set_process_properties(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\",\n                \"tests/wf/submit_wf_process_properties.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n\n        expect_container[\"mounts\"][\"/var/lib/cwl/workflow.json\"][\"content\"][\"$graph\"][0][\"hints\"] = [\n            {\n                \"class\": \"http://arvados.org/cwl#ProcessProperties\",\n                \"processProperties\": [\n                    {\"propertyName\": \"baz\",\n                     \"propertyValue\": \"$(inputs.x.basename)\"},\n                    {\"propertyName\": \"foo\",\n                     \"propertyValue\": \"bar\"},\n                    {\"propertyName\": \"quux\",\n                     \"propertyValue\": {\n                         \"q1\": 1,\n                         \"q2\": 2\n                     }\n                    }\n                ],\n            },\n            {\n                \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\n            }\n        ]\n        #expect_container[\"mounts\"][\"/var/lib/cwl/workflow.json\"][\"content\"][\"$namespaces\"] = {\n        #    \"arv\": \"http://arvados.org/cwl#\"\n        #}\n\n        expect_container[\"mounts\"][\"/var/lib/cwl/workflow.json\"][\"content\"][\"$graph\"][0][\"steps\"][0][\"run\"] = \"keep:14b65f1869348873add49327cd63630c+312/wf/submit_wf_process_properties.cwl\"\n\n        expect_container[\"properties\"].update({\n            \"baz\": \"blorp.txt\",\n            \"foo\": \"bar\",\n            \"quux\": {\n                \"q1\": 1,\n                \"q2\": 2\n            }\n        })\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n\n    @stubs()\n    def test_submit_enable_preemptible(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--enable-preemptible\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container['command'] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug', '--on-error=continue',\n                                       '--enable-preemptible',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_disable_preemptible(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--disable-preemptible\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container['command'] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug', '--on-error=continue',\n                                       '--disable-preemptible',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_prefer_cached_downloads(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--prefer-cached-downloads\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug', \"--on-error=continue\", '--prefer-cached-downloads',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_submit_container_varying_url_params(self, stubs):\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--varying-url-params\", \"KeyId,Signature\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug', \"--on-error=continue\", \"--varying-url-params=KeyId,Signature\",\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @mock.patch(\"boto3.session.Session\")\n    @stubs()\n    def test_submit_defer_s3_download(self, stubs, botosession):\n\n        sessionmock = mock.MagicMock(region_name='us-east-2')\n        botosession.return_value = sessionmock\n\n        CredsTuple = collections.namedtuple('CredsTuple', ['access_key', 'secret_key'])\n\n        sessionmock.get_credentials.return_value = CredsTuple('123key', '789secret')\n\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--defer-download\", \"--enable-aws-credential-capture\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job_s3.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n\n        expect_container['mounts']['/var/lib/cwl/cwl.input.json']['content']['x']['location'] = 's3://examplebucket/blorp.txt'\n        del expect_container['mounts']['/var/lib/cwl/cwl.input.json']['content']['x']['size']\n        expect_container['environment']['AWS_SHARED_CREDENTIALS_FILE'] = '/var/lib/cwl/.aws/credentials'\n        expect_container['environment']['AWS_CONFIG_FILE'] = '/var/lib/cwl/.aws/config'\n        expect_container['secret_mounts'] = {\n            \"/var/lib/cwl/.aws/credentials\": {\n                \"content\": \"[default]\\naws_access_key_id = 123key\\naws_secret_access_key = 789secret\\n\",\n                \"kind\": \"text\"\n            },\n            \"/var/lib/cwl/.aws/config\": {\n                \"content\": \"[default]\\nregion = us-east-2\\n\",\n                \"kind\": \"text\"\n            }\n        }\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @mock.patch(\"boto3.session.Session\")\n    @stubs()\n    def test_submit_defer_s3_download_no_credential_capture(self, stubs, botosession):\n\n        sessionmock = mock.MagicMock(region_name='us-east-2')\n        botosession.return_value = sessionmock\n\n        stubs.api.credentials().list().execute.return_value = {\n            \"items\": [{\n                \"uuid\": \"zzzzz-oss07-8jgyh6siwlfoofw\",\n                \"name\": \"AWS_TEST_CRED\",\n                \"external_id\": \"AKIASRXXXXXXXXXXYZKG\",\n                \"scopes\": []\n            }]\n        }\n\n        exited = arvados_cwl.main(\n            [\"--submit\", \"--no-wait\", \"--api=containers\", \"--debug\", \"--defer-download\", \"--disable-aws-credential-capture\",\n                \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job_s3.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api, keep_client=stubs.keep_client)\n\n        expect_container = copy.deepcopy(stubs.expect_container_spec)\n\n        expect_container['mounts']['/var/lib/cwl/cwl.input.json']['content']['x']['location'] = 's3://examplebucket/blorp.txt'\n        del expect_container['mounts']['/var/lib/cwl/cwl.input.json']['content']['x']['size']\n\n        expect_container[\"command\"] = ['arvados-cwl-runner', '--local', '--api=containers',\n                                       '--no-log-timestamps', '--disable-validate', '--disable-color',\n                                       '--eval-timeout=20', '--thread-count=0',\n                                       '--enable-reuse', \"--collection-cache-size=256\",\n                                       '--output-name=Output from workflow submit_wf.cwl (%s)' % stubs.git_props[\"arv:gitDescribe\"],\n                                       '--debug', \"--on-error=continue\", '--use-credential=zzzzz-oss07-8jgyh6siwlfoofw',\n                                       '/var/lib/cwl/workflow.json#main', '/var/lib/cwl/cwl.input.json']\n\n        stubs.api.container_requests().create.assert_called_with(\n            body=JsonDiffMatcher(expect_container))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_container_request_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n        sessionmock.get_credentials.assert_not_called()\n\nclass TestCreateWorkflow(unittest.TestCase):\n    existing_workflow_uuid = \"zzzzz-7fd4e-validworkfloyml\"\n    expect_workflow = StripYAMLComments(\n        open(\"tests/wf/expect_upload_wrapper.cwl\").read().rstrip())\n    expect_workflow_altname = StripYAMLComments(\n        open(\"tests/wf/expect_upload_wrapper_altname.cwl\").read().rstrip())\n\n    def setUp(self):\n        cwltool.process._names = set()\n        #arvados_cwl.arvdocker.arv_docker_clear_cache()\n\n    def tearDown(self):\n        root_logger = logging.getLogger('')\n\n        # Remove existing RuntimeStatusLoggingHandlers if they exist\n        handlers = [h for h in root_logger.handlers if not isinstance(h, arvados_cwl.executor.RuntimeStatusLoggingHandler)]\n        root_logger.handlers = handlers\n\n    @stubs()\n    def test_create(self, stubs):\n        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n        stubs.api.groups().get().execute.return_value = {\"group_class\": \"project\"}\n\n        exited = arvados_cwl.main(\n            [\"--create-workflow\", \"--debug\",\n             \"--api=containers\",\n             \"--project-uuid\", project_uuid,\n             \"--disable-git\",\n             \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n\n        stubs.api.pipeline_templates().create.refute_called()\n        stubs.api.container_requests().create.refute_called()\n\n        body = {\n            \"workflow\": {\n                \"owner_uuid\": project_uuid,\n                \"name\": \"submit_wf.cwl\",\n                \"description\": \"\",\n                \"definition\": self.expect_workflow,\n            }\n        }\n        stubs.api.workflows().create.assert_called_with(\n            body=JsonDiffMatcher(body))\n\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_workflow_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_create_name(self, stubs):\n        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n        stubs.api.groups().get().execute.return_value = {\"group_class\": \"project\"}\n\n        exited = arvados_cwl.main(\n            [\"--create-workflow\", \"--debug\",\n             \"--api=containers\",\n             \"--project-uuid\", project_uuid,\n             \"--name\", \"testing 123\",\n             \"--disable-git\",\n             \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n\n        stubs.api.pipeline_templates().create.refute_called()\n        stubs.api.container_requests().create.refute_called()\n\n        body = {\n            \"workflow\": {\n                \"owner_uuid\": project_uuid,\n                \"name\": \"testing 123\",\n                \"description\": \"\",\n                \"definition\": self.expect_workflow_altname,\n            }\n        }\n        stubs.api.workflows().create.assert_called_with(\n            body=JsonDiffMatcher(body))\n\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_workflow_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n\n    @stubs()\n    def test_update(self, stubs):\n        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n        stubs.api.workflows().get().execute.return_value = {\"owner_uuid\": project_uuid}\n\n        exited = arvados_cwl.main(\n            [\"--update-workflow\", self.existing_workflow_uuid,\n             \"--debug\",\n             \"--disable-git\",\n             \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n\n        body = {\n            \"workflow\": {\n                \"name\": \"submit_wf.cwl\",\n                \"description\": \"\",\n                \"definition\": self.expect_workflow,\n                \"owner_uuid\": project_uuid\n            }\n        }\n        stubs.api.workflows().update.assert_called_with(\n            uuid=self.existing_workflow_uuid,\n            body=JsonDiffMatcher(body))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         self.existing_workflow_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n\n    @stubs()\n    def test_update_name(self, stubs):\n        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n        stubs.api.workflows().get().execute.return_value = {\"owner_uuid\": project_uuid}\n\n        exited = arvados_cwl.main(\n            [\"--update-workflow\", self.existing_workflow_uuid,\n             \"--debug\", \"--name\", \"testing 123\",\n             \"--disable-git\",\n             \"tests/wf/submit_wf.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n\n        body = {\n            \"workflow\": {\n                \"name\": \"testing 123\",\n                \"description\": \"\",\n                \"definition\": self.expect_workflow_altname,\n                \"owner_uuid\": project_uuid\n            }\n        }\n        stubs.api.workflows().update.assert_called_with(\n            uuid=self.existing_workflow_uuid,\n            body=JsonDiffMatcher(body))\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         self.existing_workflow_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_create_collection_per_tool(self, stubs):\n        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n        stubs.api.groups().get().execute.return_value = {\"group_class\": \"project\"}\n\n        exited = arvados_cwl.main(\n            [\"--create-workflow\", \"--debug\",\n             \"--api=containers\",\n             \"--project-uuid\", project_uuid,\n             \"--disable-git\",\n             \"tests/collection_per_tool/collection_per_tool.cwl\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n\n        toolfile = \"tests/collection_per_tool/collection_per_tool_wrapper.cwl\"\n        expect_workflow = StripYAMLComments(open(toolfile).read().rstrip())\n\n        body = {\n            \"workflow\": {\n                \"owner_uuid\": project_uuid,\n                \"name\": \"collection_per_tool.cwl\",\n                \"description\": \"\",\n                \"definition\": expect_workflow,\n            }\n        }\n        stubs.api.workflows().create.assert_called_with(\n            body=JsonDiffMatcher(body))\n\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_workflow_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_create_with_imports(self, stubs):\n        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n        stubs.api.groups().get().execute.return_value = {\"group_class\": \"project\"}\n\n        exited = arvados_cwl.main(\n            [\"--create-workflow\", \"--debug\",\n             \"--api=containers\",\n             \"--project-uuid\", project_uuid,\n             \"tests/wf/feddemo/feddemo.cwl\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n\n        stubs.api.pipeline_templates().create.refute_called()\n        stubs.api.container_requests().create.refute_called()\n\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_workflow_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_create_with_no_input(self, stubs):\n        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n        stubs.api.groups().get().execute.return_value = {\"group_class\": \"project\"}\n\n        exited = arvados_cwl.main(\n            [\"--create-workflow\", \"--debug\",\n             \"--api=containers\",\n             \"--project-uuid\", project_uuid,\n             \"tests/wf/revsort/revsort.cwl\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n\n        stubs.api.pipeline_templates().create.refute_called()\n        stubs.api.container_requests().create.refute_called()\n\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_workflow_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n    @stubs()\n    def test_create_map(self, stubs):\n        # test uploading a document that uses objects instead of arrays\n        # for certain fields like inputs and requirements.\n\n        project_uuid = 'zzzzz-j7d0g-zzzzzzzzzzzzzzz'\n        stubs.api.groups().get().execute.return_value = {\"group_class\": \"project\"}\n\n        exited = arvados_cwl.main(\n            [\"--create-workflow\", \"--debug\",\n             \"--api=containers\",\n             \"--project-uuid\", project_uuid,\n             \"--disable-git\",\n             \"tests/wf/submit_wf_map.cwl\", \"tests/submit_test_job.json\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n\n        stubs.api.pipeline_templates().create.refute_called()\n        stubs.api.container_requests().create.refute_called()\n\n        expect_workflow = StripYAMLComments(\n            open(\"tests/wf/expect_upload_wrapper_map.cwl\").read().rstrip())\n\n        body = {\n            \"workflow\": {\n                \"owner_uuid\": project_uuid,\n                \"name\": \"submit_wf_map.cwl\",\n                \"description\": \"\",\n                \"definition\": expect_workflow,\n            }\n        }\n        stubs.api.workflows().create.assert_called_with(\n            body=JsonDiffMatcher(body))\n\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         stubs.expect_workflow_uuid + '\\n')\n        self.assertEqual(exited, 0)\n\n\nclass TestPrintKeepDeps(unittest.TestCase):\n    @stubs()\n    def test_print_keep_deps(self, stubs):\n        # test --print-keep-deps which is used by arv-copy\n\n        exited = arvados_cwl.main(\n            [\"--print-keep-deps\", \"--debug\",\n             \"tests/wf/submit_wf_map.cwl\"],\n            stubs.capture_stdout, sys.stderr, api_client=stubs.api)\n\n        self.assertEqual(stubs.capture_stdout.getvalue(),\n                         '[\"5d373e7629203ce39e7c22af98a0f881+52\", \"999999999999999999999999999999d4+99\"]' + '\\n')\n        self.assertEqual(exited, 0)\n"
  },
  {
    "path": "sdk/cwl/tests/test_tq.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\nimport sys\nimport unittest\nimport json\nimport logging\nimport os\nimport threading\n\nfrom unittest import mock\n\nfrom cwltool.task_queue import TaskQueue\n\ndef success_task():\n    pass\n\ndef fail_task():\n    raise Exception(\"Testing error handling\")\n\nclass TestTaskQueue(unittest.TestCase):\n    def test_tq(self):\n        tq = TaskQueue(threading.Lock(), 2)\n        try:\n            self.assertIsNone(tq.error)\n\n            unlock = threading.Lock()\n            unlock.acquire()\n            check_done = threading.Event()\n\n            tq.add(success_task, unlock, check_done)\n            tq.add(success_task, unlock, check_done)\n            tq.add(success_task, unlock, check_done)\n            tq.add(success_task, unlock, check_done)\n        finally:\n            tq.join()\n\n        self.assertIsNone(tq.error)\n\n\n    def test_tq_error(self):\n        tq = TaskQueue(threading.Lock(), 2)\n        try:\n            self.assertIsNone(tq.error)\n\n            unlock = threading.Lock()\n            unlock.acquire()\n            check_done = threading.Event()\n\n            tq.add(success_task, unlock, check_done)\n            tq.add(success_task, unlock, check_done)\n            tq.add(fail_task, unlock, check_done)\n            tq.add(success_task, unlock, check_done)\n        finally:\n            tq.join()\n\n        self.assertIsNotNone(tq.error)\n"
  },
  {
    "path": "sdk/cwl/tests/test_urljoin.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\nimport sys\nimport unittest\nimport json\nimport logging\nimport os\n\nfrom unittest import mock\n\nimport arvados\nimport arvados.keep\nimport arvados.collection\nimport arvados_cwl\n\nfrom arvados_cwl.fsaccess import CollectionFetcher\n\nclass TestUrljoin(unittest.TestCase):\n    def test_urljoin(self):\n        \"\"\"Test path joining for keep references.\"\"\"\n\n        cf = CollectionFetcher({}, None)\n\n        self.assertEqual(\"keep:99999999999999999999999999999991+99/hw.py\",\n                          cf.urljoin(\"keep:99999999999999999999999999999991+99\", \"hw.py\"))\n\n        self.assertEqual(\"keep:99999999999999999999999999999991+99/hw.py\",\n                          cf.urljoin(\"keep:99999999999999999999999999999991+99/\", \"hw.py\"))\n\n        self.assertEqual(\"keep:99999999999999999999999999999991+99/hw.py#main\",\n                          cf.urljoin(\"keep:99999999999999999999999999999991+99\", \"hw.py#main\"))\n\n        self.assertEqual(\"keep:99999999999999999999999999999991+99/hw.py#main\",\n                          cf.urljoin(\"keep:99999999999999999999999999999991+99/hw.py\", \"#main\"))\n\n        self.assertEqual(\"keep:99999999999999999999999999999991+99/dir/hw.py#main\",\n                          cf.urljoin(\"keep:99999999999999999999999999999991+99/dir/hw.py\", \"#main\"))\n\n        self.assertEqual(\"keep:99999999999999999999999999999991+99/dir/wh.py\",\n                          cf.urljoin(\"keep:99999999999999999999999999999991+99/dir/hw.py\", \"wh.py\"))\n\n        self.assertEqual(\"keep:99999999999999999999999999999991+99/wh.py\",\n                          cf.urljoin(\"keep:99999999999999999999999999999991+99/dir/hw.py\", \"/wh.py\"))\n\n        self.assertEqual(\"keep:99999999999999999999999999999991+99/wh.py#main\",\n                          cf.urljoin(\"keep:99999999999999999999999999999991+99/dir/hw.py\", \"/wh.py#main\"))\n\n        self.assertEqual(\"keep:99999999999999999999999999999991+99/wh.py\",\n                          cf.urljoin(\"keep:99999999999999999999999999999991+99/hw.py#main\", \"wh.py\"))\n\n        self.assertEqual(\"keep:99999999999999999999999999999992+99\",\n                          cf.urljoin(\"keep:99999999999999999999999999999991+99\", \"keep:99999999999999999999999999999992+99\"))\n\n        self.assertEqual(\"keep:99999999999999999999999999999991+99/dir/wh.py\",\n                          cf.urljoin(\"keep:99999999999999999999999999999991+99/dir/\", \"wh.py\"))\n\n    def test_resolver(self):\n        pass\n"
  },
  {
    "path": "sdk/cwl/tests/test_util.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport datetime\nimport httplib2\nimport unittest\n\nfrom unittest import mock\n\nfrom arvados_cwl.util import *\nfrom arvados.errors import ApiError\nfrom arvados_cwl.util import common_prefix, sanitize_url\n\nclass MockDateTime(datetime.datetime):\n    @classmethod\n    def utcnow(cls):\n        return datetime.datetime(2018, 1, 1, 0, 0, 0, 0)\n\n    @classmethod\n    def now(cls, tz):\n        return datetime.datetime(2018, 1, 1, 0, 0, 0, 0)\n\ndatetime.datetime = MockDateTime\n\nclass TestUtil(unittest.TestCase):\n    def test_get_intermediate_collection_info(self):\n        name = \"one\"\n        current_container = {\"uuid\": \"zzzzz-8i9sb-zzzzzzzzzzzzzzz\"}\n        intermediate_output_ttl = 120\n\n        info = get_intermediate_collection_info(name, current_container, intermediate_output_ttl)\n\n        self.assertEqual(info[\"name\"], \"Intermediate collection for step one\")\n        self.assertEqual(info[\"trash_at\"], datetime.datetime(2018, 1, 1, 0, 2, 0, 0))\n        self.assertEqual(info[\"properties\"], {\"type\" : \"intermediate\", \"container_uuid\" : \"zzzzz-8i9sb-zzzzzzzzzzzzzzz\"})\n\n    def test_get_current_container_success(self):\n        api = mock.MagicMock()\n        api.containers().current().execute.return_value = {\"uuid\" : \"zzzzz-8i9sb-zzzzzzzzzzzzzzz\"}\n\n        current_container = get_current_container(api)\n\n        self.assertEqual(current_container, {\"uuid\" : \"zzzzz-8i9sb-zzzzzzzzzzzzzzz\"})\n\n    def test_get_current_container_error(self):\n        api = mock.MagicMock()\n        api.containers().current().execute.side_effect = ApiError(httplib2.Response({\"status\": 300}), bytes(b\"\"))\n        logger = mock.MagicMock()\n\n        with self.assertRaises(ApiError):\n            get_current_container(api, num_retries=0, logger=logger)\n\n    def test_get_current_container_404_error(self):\n        api = mock.MagicMock()\n        api.containers().current().execute.side_effect = ApiError(httplib2.Response({\"status\": 404}), bytes(b\"\"))\n        logger = mock.MagicMock()\n\n        current_container = get_current_container(api, num_retries=0, logger=logger)\n        self.assertEqual(current_container, None)\n\n    def test_common_prefix(self):\n        self.assertEqual(common_prefix(\"file:///foo/bar\", [\"file:///foo/bar/baz\"]), \"file:///foo/\")\n        self.assertEqual(common_prefix(\"file:///foo\", [\"file:///foo\", \"file:///foo/bar\", \"file:///foo/bar/\"]), \"file:///\")\n        self.assertEqual(common_prefix(\"file:///foo/\", [\"file:///foo/\", \"file:///foo/bar\", \"file:///foo/bar/\"]), \"file:///foo/\")\n        self.assertEqual(common_prefix(\"file:///foo/bar\", [\"file:///foo/bar\", \"file:///foo/baz\", \"file:///foo/quux/q2\"]), \"file:///foo/\")\n        self.assertEqual(common_prefix(\"file:///foo/bar/\", [\"file:///foo/bar/\", \"file:///foo/baz\", \"file:///foo/quux/q2\"]), \"file:///foo/\")\n        self.assertEqual(common_prefix(\"file:///foo/bar/splat\", [\"file:///foo/bar/splat\", \"file:///foo/baz\", \"file:///foo/quux/q2\"]), \"file:///foo/\")\n        self.assertEqual(common_prefix(\"file:///foo/bar/splat\", [\"file:///foo/bar/splat\", \"file:///nope\", \"file:///foo/quux/q2\"]), \"file:///\")\n        self.assertEqual(common_prefix(\"file:///blub/foo\", [\"file:///blub/foo\", \"file:///blub/foo/bar\", \"file:///blub/foo/bar/\"]), \"file:///blub/\")\n\n        # sanity check, the subsequent code strips off the prefix so\n        # just confirm the logic doesn't have a fencepost error\n        prefix = \"file:///\"\n        self.assertEqual(\"file:///foo/bar\"[len(prefix):], \"foo/bar\")\n\n    def test_sanitize_url(self):\n        self.assertEqual(sanitize_url(\"https://x-access-token:blahblahblah@github.com/foo/bar.git\"), \"https://github.com/foo/bar.git\")\n        self.assertEqual(sanitize_url(\"https://github.com/foo/bar.git\"), \"https://github.com/foo/bar.git\")\n        self.assertEqual(sanitize_url(\"git@github.com:foo/bar.git\"), \"git@github.com:foo/bar.git\")\n"
  },
  {
    "path": "sdk/cwl/tests/testdir/a",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/testdir/b",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/testdir/c/d",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/tmp1/tmp2/tmp3/.gitkeep",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/tool/blub.txt",
    "content": "blibber blubber\n"
  },
  {
    "path": "sdk/cwl/tests/tool/blub.txt.cat",
    "content": "clipper clupper\n"
  },
  {
    "path": "sdk/cwl/tests/tool/submit_tool.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Test case for arvados-cwl-runner\n#\n# Used to test whether scanning a tool file for dependencies (e.g. default\n# value blub.txt) and uploading to Keep works as intended.\n\nclass: CommandLineTool\ncwlVersion: v1.0\nrequirements:\n  - class: DockerRequirement\n    dockerPull: debian:buster-slim\ninputs:\n  - id: x\n    type: File\n    default:\n      class: File\n      location: blub.txt\n    inputBinding:\n      position: 1\noutputs: []\nbaseCommand: cat\n"
  },
  {
    "path": "sdk/cwl/tests/tool/submit_tool_map.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Test case for arvados-cwl-runner\n#\n# Used to test whether scanning a tool file for dependencies (e.g. default\n# value blub.txt) and uploading to Keep works as intended.\n\nclass: CommandLineTool\ncwlVersion: v1.0\nrequirements:\n  DockerRequirement:\n    dockerPull: debian:buster-slim\ninputs:\n  x:\n    type: File\n    default:\n      class: File\n      location: blub.txt\n    inputBinding:\n      position: 1\noutputs: []\nbaseCommand: cat\n"
  },
  {
    "path": "sdk/cwl/tests/tool/tool_with_sf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Test case for arvados-cwl-runner\n#\n# Used to test whether scanning a tool file for dependencies (e.g. default\n# value blub.txt) and uploading to Keep works as intended.\n\nclass: CommandLineTool\ncwlVersion: v1.0\nrequirements:\n  - class: DockerRequirement\n    dockerPull: debian:buster-slim\ninputs:\n  - id: x\n    type: File\n    secondaryFiles:\n      - .cat\n    inputBinding:\n      valueFrom: $(self.path).cat\n      position: 1\noutputs: []\nbaseCommand: cat\n"
  },
  {
    "path": "sdk/cwl/tests/tool/tool_with_sf.yml",
    "content": "x:\n  class: File\n  location: blub.txt\n"
  },
  {
    "path": "sdk/cwl/tests/wf/16169-step.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: CommandLineTool\ncwlVersion: v1.0\nrequirements:\n  InlineJavascriptRequirement: {}\n  DockerRequirement:\n    dockerPull: debian:buster-slim\ninputs:\n  d: Directory\noutputs:\n  out: stdout\nstdout: output.txt\narguments:\n  [echo, \"${if(inputs.d.listing === undefined) {return 'true';} else {return 'false';}}\"]\n"
  },
  {
    "path": "sdk/cwl/tests/wf/check_mem.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados\nimport sys\nimport os\n\nif \"JOB_UUID\" in os.environ:\n    requested = arvados.api().jobs().get(uuid=os.environ[\"JOB_UUID\"]).execute()[\"runtime_constraints\"][\"min_ram_mb_per_node\"]\nelse:\n    requested = arvados.api().containers().current().execute()[\"runtime_constraints\"][\"ram\"] // (1024*1024)\n\nprint(\"Requested %d expected %d\" % (requested, int(sys.argv[1])))\n\nexit(0 if requested == int(sys.argv[1]) else 1)\n"
  },
  {
    "path": "sdk/cwl/tests/wf/echo-subwf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\nrequirements:\n  ResourceRequirement:\n    coresMin: 1\n\ninputs: []\n\noutputs: []\n\nsteps:\n  echo_a:\n    run: echo_a.cwl\n    in: []\n    out: []\n  echo_b:\n    run: echo_b.cwl\n    in: []\n    out: []\n"
  },
  {
    "path": "sdk/cwl/tests/wf/echo-wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  SubworkflowFeatureRequirement: {}\n\ninputs: []\n\noutputs: []\n\nsteps:\n  echo-subwf:\n    requirements:\n      arv:RunInSingleContainer: {}\n    run: echo-subwf.cwl\n    in: []\n    out: []\n"
  },
  {
    "path": "sdk/cwl/tests/wf/echo_a.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\nrequirements:\n  ResourceRequirement:\n    coresMin: 2\n    outdirMin: 1024\ninputs: []\noutputs: []\nbaseCommand: echo\narguments:\n  - \"a\"\n"
  },
  {
    "path": "sdk/cwl/tests/wf/echo_b.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\nrequirements:\n  ResourceRequirement:\n    coresMin: 3\n    outdirMin: 2048\ninputs: []\noutputs: []\nbaseCommand: echo\narguments:\n  - \"b\"\n"
  },
  {
    "path": "sdk/cwl/tests/wf/expect_arvworkflow.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\n$graph:\n- class: Workflow\n  id: '#main'\n  inputs:\n  - id: '#main/x'\n    type: string\n  outputs: []\n  steps:\n  - id: '#main/step1'\n    in:\n    - {id: '#main/step1/x', source: '#main/x'}\n    out: []\n    run: '#submit_tool.cwl'\n- baseCommand: cat\n  class: CommandLineTool\n  id: '#submit_tool.cwl'\n  inputs:\n  - id: '#submit_tool.cwl/x'\n    inputBinding: {position: 1}\n    type: string\n  outputs: []\n  requirements:\n  - {class: DockerRequirement, dockerPull: 'debian:buster-slim'}\n"
  },
  {
    "path": "sdk/cwl/tests/wf/expect_packed.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{\n    \"$graph\": [\n        {\n            \"baseCommand\": \"cat\",\n            \"class\": \"CommandLineTool\",\n            \"id\": \"#submit_tool.cwl\",\n            \"inputs\": [\n                {\n                    \"default\": {\n                        \"basename\": \"blub.txt\",\n                        \"class\": \"File\",\n                        \"location\": \"keep:5d373e7629203ce39e7c22af98a0f881+52/blub.txt\",\n                        \"nameext\": \".txt\",\n                        \"nameroot\": \"blub\"\n                    },\n                    \"id\": \"#submit_tool.cwl/x\",\n                    \"inputBinding\": {\n                        \"position\": 1\n                    },\n                    \"type\": \"File\"\n                }\n            ],\n            \"outputs\": [],\n            \"requirements\": [\n                {\n                    \"class\": \"DockerRequirement\",\n                    \"dockerPull\": \"debian:buster-slim\",\n                    \"http://arvados.org/cwl#dockerCollectionPDH\": \"999999999999999999999999999999d4+99\"\n                }\n            ]\n        },\n        {\n            \"class\": \"Workflow\",\n            \"id\": \"#main\",\n            \"inputs\": [\n                {\n                    \"default\": {\n                        \"basename\": \"blorp.txt\",\n                        \"class\": \"File\",\n                        \"location\": \"keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt\",\n                        \"nameext\": \".txt\",\n                        \"nameroot\": \"blorp\",\n                        \"size\": 16\n                    },\n                    \"id\": \"#main/x\",\n                    \"type\": \"File\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"99999999999999999999999999999998+99\",\n                        \"class\": \"Directory\",\n                        \"location\": \"keep:99999999999999999999999999999998+99\"\n                    },\n                    \"id\": \"#main/y\",\n                    \"type\": \"Directory\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"anonymous\",\n                        \"class\": \"Directory\",\n                        \"listing\": [\n                            {\n                                \"basename\": \"renamed.txt\",\n                                \"class\": \"File\",\n                                \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\",\n                                \"nameext\": \".txt\",\n                                \"nameroot\": \"renamed\",\n                                \"size\": 0\n                            }\n                        ],\n\t\t\t\"location\": \"_:df80736f-f14d-4b10-b2e3-03aa27f034b2\"\n                    },\n                    \"id\": \"#main/z\",\n                    \"type\": \"Directory\"\n                }\n            ],\n            \"outputs\": [],\n            \"steps\": [\n                {\n                    \"id\": \"#main/step1\",\n                    \"in\": [\n                        {\n                            \"id\": \"#main/step1/x\",\n                            \"source\": \"#main/x\"\n                        }\n                    ],\n                    \"out\": [],\n                    \"run\": \"#submit_tool.cwl\"\n                }\n            ]\n        }\n    ],\n    \"cwlVersion\": \"v1.0\"\n}"
  },
  {
    "path": "sdk/cwl/tests/wf/expect_upload_packed.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{\n    \"$graph\": [\n        {\n            \"baseCommand\": \"cat\",\n            \"class\": \"CommandLineTool\",\n            \"id\": \"#submit_tool.cwl\",\n            \"inputs\": [\n                {\n                    \"default\": {\n                        \"basename\": \"blub.txt\",\n                        \"class\": \"File\",\n                        \"location\": \"keep:5d373e7629203ce39e7c22af98a0f881+52/blub.txt\",\n                        \"nameext\": \".txt\",\n                        \"nameroot\": \"blub\"\n                    },\n                    \"id\": \"#submit_tool.cwl/x\",\n                    \"inputBinding\": {\n                        \"position\": 1\n                    },\n                    \"type\": \"File\"\n                }\n            ],\n            \"outputs\": [],\n            \"requirements\": [\n                {\n                    \"class\": \"DockerRequirement\",\n                    \"dockerPull\": \"debian:buster-slim\",\n                    \"http://arvados.org/cwl#dockerCollectionPDH\": \"999999999999999999999999999999d4+99\"\n                }\n            ]\n        },\n        {\n            \"class\": \"Workflow\",\n            \"hints\": [\n                {\n                    \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                    \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\n                }\n            ],\n            \"id\": \"#main\",\n            \"inputs\": [\n                {\n                    \"default\": {\n                        \"basename\": \"blorp.txt\",\n                        \"class\": \"File\",\n                        \"location\": \"keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt\",\n                        \"nameext\": \".txt\",\n                        \"nameroot\": \"blorp\",\n                        \"size\": 16\n                    },\n                    \"id\": \"#main/x\",\n                    \"type\": \"File\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"99999999999999999999999999999998+99\",\n                        \"class\": \"Directory\",\n                        \"location\": \"keep:99999999999999999999999999999998+99\"\n                    },\n                    \"id\": \"#main/y\",\n                    \"type\": \"Directory\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"anonymous\",\n                        \"class\": \"Directory\",\n                        \"listing\": [\n                            {\n                                \"basename\": \"renamed.txt\",\n                                \"class\": \"File\",\n                                \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\",\n                                \"nameext\": \".txt\",\n                                \"nameroot\": \"renamed\",\n                                \"size\": 0\n                            }\n                        ],\n                        \"location\": \"_:df80736f-f14d-4b10-b2e3-03aa27f034b2\"\n                    },\n                    \"id\": \"#main/z\",\n                    \"type\": \"Directory\"\n                }\n            ],\n            \"outputs\": [],\n            \"steps\": [\n                {\n                    \"id\": \"#main/step1\",\n                    \"in\": [\n                        {\n                            \"id\": \"#main/step1/x\",\n                            \"source\": \"#main/x\"\n                        }\n                    ],\n                    \"out\": [],\n                    \"run\": \"#submit_tool.cwl\"\n                }\n            ]\n        }\n    ],\n    \"cwlVersion\": \"v1.0\"\n}\n"
  },
  {
    "path": "sdk/cwl/tests/wf/expect_upload_wrapper.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{\n    \"$graph\": [\n        {\n            \"class\": \"Workflow\",\n            \"hints\": [\n                {\n                    \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                    \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\n                }\n            ],\n            \"id\": \"#main\",\n            \"inputs\": [\n                {\n                    \"default\": {\n                        \"basename\": \"blorp.txt\",\n                        \"class\": \"File\",\n                        \"location\": \"keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt\",\n                        \"nameext\": \".txt\",\n                        \"nameroot\": \"blorp\",\n                        \"size\": 16\n                    },\n                    \"id\": \"#main/x\",\n                    \"type\": \"File\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"99999999999999999999999999999998+99\",\n                        \"class\": \"Directory\",\n                        \"location\": \"keep:99999999999999999999999999999998+99\"\n                    },\n                    \"id\": \"#main/y\",\n                    \"type\": \"Directory\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"anonymous\",\n                        \"class\": \"Directory\",\n                        \"listing\": [\n                            {\n                                \"basename\": \"renamed.txt\",\n                                \"class\": \"File\",\n                                \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\",\n                                \"nameext\": \".txt\",\n                                \"nameroot\": \"renamed\",\n                                \"size\": 0\n                            }\n                        ]\n                    },\n                    \"id\": \"#main/z\",\n                    \"type\": \"Directory\"\n                }\n            ],\n            \"outputs\": [],\n            \"requirements\": [\n                {\n                    \"class\": \"SubworkflowFeatureRequirement\"\n                }\n            ],\n            \"steps\": [\n                {\n                    \"id\": \"#main/submit_wf.cwl\",\n                    \"in\": [\n                        {\n                            \"id\": \"#main/step/x\",\n                            \"source\": \"#main/x\"\n                        },\n                        {\n                            \"id\": \"#main/step/y\",\n                            \"source\": \"#main/y\"\n                        },\n                        {\n                            \"id\": \"#main/step/z\",\n                            \"source\": \"#main/z\"\n                        }\n                    ],\n                    \"label\": \"submit_wf.cwl\",\n                    \"out\": [],\n                    \"run\": \"keep:5494a5e0a2fe50ece3595dd2bd1c535f+274/wf/submit_wf.cwl\"\n                }\n            ]\n        }\n    ],\n    \"cwlVersion\": \"v1.2\"\n}\n"
  },
  {
    "path": "sdk/cwl/tests/wf/expect_upload_wrapper_altname.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{\n    \"$graph\": [\n        {\n            \"class\": \"Workflow\",\n            \"hints\": [\n                {\n                    \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                    \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\n                }\n            ],\n            \"id\": \"#main\",\n            \"inputs\": [\n                {\n                    \"default\": {\n                        \"basename\": \"blorp.txt\",\n                        \"class\": \"File\",\n                        \"location\": \"keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt\",\n                        \"nameext\": \".txt\",\n                        \"nameroot\": \"blorp\",\n                        \"size\": 16\n                    },\n                    \"id\": \"#main/x\",\n                    \"type\": \"File\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"99999999999999999999999999999998+99\",\n                        \"class\": \"Directory\",\n                        \"location\": \"keep:99999999999999999999999999999998+99\"\n                    },\n                    \"id\": \"#main/y\",\n                    \"type\": \"Directory\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"anonymous\",\n                        \"class\": \"Directory\",\n                        \"listing\": [\n                            {\n                                \"basename\": \"renamed.txt\",\n                                \"class\": \"File\",\n                                \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\",\n                                \"nameext\": \".txt\",\n                                \"nameroot\": \"renamed\",\n                                \"size\": 0\n                            }\n                        ]\n                    },\n                    \"id\": \"#main/z\",\n                    \"type\": \"Directory\"\n                }\n            ],\n            \"outputs\": [],\n            \"requirements\": [\n                {\n                    \"class\": \"SubworkflowFeatureRequirement\"\n                }\n            ],\n            \"steps\": [\n                {\n                    \"id\": \"#main/submit_wf.cwl\",\n                    \"in\": [\n                        {\n                            \"id\": \"#main/step/x\",\n                            \"source\": \"#main/x\"\n                        },\n                        {\n                            \"id\": \"#main/step/y\",\n                            \"source\": \"#main/y\"\n                        },\n                        {\n                            \"id\": \"#main/step/z\",\n                            \"source\": \"#main/z\"\n                        }\n                    ],\n                    \"label\": \"testing 123\",\n                    \"out\": [],\n                    \"run\": \"keep:5494a5e0a2fe50ece3595dd2bd1c535f+274/wf/submit_wf.cwl\"\n                }\n            ]\n        }\n    ],\n    \"cwlVersion\": \"v1.2\"\n}\n"
  },
  {
    "path": "sdk/cwl/tests/wf/expect_upload_wrapper_map.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{\n    \"$graph\": [\n        {\n            \"class\": \"Workflow\",\n            \"hints\": [\n                {\n                    \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                    \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\n                }\n            ],\n            \"id\": \"#main\",\n            \"inputs\": [\n                {\n                    \"default\": {\n                        \"basename\": \"blorp.txt\",\n                        \"class\": \"File\",\n                        \"location\": \"keep:169f39d466a5438ac4a90e779bf750c7+53/blorp.txt\",\n                        \"nameext\": \".txt\",\n                        \"nameroot\": \"blorp\",\n                        \"size\": 16\n                    },\n                    \"id\": \"#main/x\",\n                    \"type\": \"File\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"99999999999999999999999999999998+99\",\n                        \"class\": \"Directory\",\n                        \"location\": \"keep:99999999999999999999999999999998+99\"\n                    },\n                    \"id\": \"#main/y\",\n                    \"type\": \"Directory\"\n                },\n                {\n                    \"default\": {\n                        \"basename\": \"anonymous\",\n                        \"class\": \"Directory\",\n                        \"listing\": [\n                            {\n                                \"basename\": \"renamed.txt\",\n                                \"class\": \"File\",\n                                \"location\": \"keep:99999999999999999999999999999998+99/file1.txt\",\n                                \"nameext\": \".txt\",\n                                \"nameroot\": \"renamed\",\n                                \"size\": 0\n                            }\n                        ]\n                    },\n                    \"id\": \"#main/z\",\n                    \"type\": \"Directory\"\n                }\n            ],\n            \"outputs\": [],\n            \"requirements\": [\n                {\n                    \"class\": \"SubworkflowFeatureRequirement\"\n                }\n            ],\n            \"steps\": [\n                {\n                    \"id\": \"#main/submit_wf_map.cwl\",\n                    \"in\": [\n                        {\n                            \"id\": \"#main/step/x\",\n                            \"source\": \"#main/x\"\n                        },\n                        {\n                            \"id\": \"#main/step/y\",\n                            \"source\": \"#main/y\"\n                        },\n                        {\n                            \"id\": \"#main/step/z\",\n                            \"source\": \"#main/z\"\n                        }\n                    ],\n                    \"label\": \"submit_wf_map.cwl\",\n                    \"out\": [],\n                    \"run\": \"keep:6e94cbbad95593da698f57a28762f5c1+290/wf/submit_wf_map.cwl\"\n                }\n            ]\n        }\n    ],\n    \"cwlVersion\": \"v1.2\"\n}\n"
  },
  {
    "path": "sdk/cwl/tests/wf/hello.txt",
    "content": "hello\n"
  },
  {
    "path": "sdk/cwl/tests/wf/indir1/hello2.txt",
    "content": "hello2\n"
  },
  {
    "path": "sdk/cwl/tests/wf/inputs_test.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Test case for arvados-cwl-runner. Used to test propagation of\n# various input types as script_parameters in pipeline templates.\n\nclass: Workflow\ncwlVersion: v1.0\ninputs:\n  - id: \"#fileInput\"\n    type: File\n    label: It's a file; we expect to find some characters in it.\n    doc: |\n      If there were anything further to say, it would be said here,\n      or here.\n  - id: \"#boolInput\"\n    type: boolean\n    label: True or false?\n  - id: \"#floatInput\"\n    type: float\n    label: Floats like a duck\n    default: 0.1\n  - id: \"#optionalFloatInput\"\n    type: [\"null\", float]\noutputs: []\nsteps:\n  - id: step1\n    in:\n      - { id: x, source: \"#fileInput\" }\n    out: []\n    run: ../tool/submit_tool.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf/listing_deep.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: CommandLineTool\ncwlVersion: v1.0\n$namespaces:\n  cwltool: \"http://commonwl.org/cwltool#\"\nrequirements:\n  cwltool:LoadListingRequirement:\n    loadListing: deep_listing\n  InlineJavascriptRequirement: {}\ninputs:\n  d: Directory\noutputs:\n  out: stdout\nstdout: output.txt\narguments:\n  [echo, \"${if(inputs.d.listing[0].class === 'Directory' && inputs.d.listing[0].listing[0].class === 'Directory') {return 'true';} else {return 'false';}}\"]\n"
  },
  {
    "path": "sdk/cwl/tests/wf/listing_none.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: CommandLineTool\ncwlVersion: v1.0\n$namespaces:\n  cwltool: http://commonwl.org/cwltool#\nrequirements:\n  cwltool:LoadListingRequirement:\n    loadListing: no_listing\n  InlineJavascriptRequirement: {}\ninputs:\n  d: Directory\noutputs:\n  out: stdout\nstdout: output.txt\narguments:\n  [echo, \"${if(inputs.d.listing === undefined) {return 'true';} else {return 'false';}}\"]"
  },
  {
    "path": "sdk/cwl/tests/wf/listing_shallow.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: CommandLineTool\ncwlVersion: v1.0\n$namespaces:\n  cwltool: http://commonwl.org/cwltool#\nrequirements:\n  cwltool:LoadListingRequirement:\n    loadListing: shallow_listing\n  InlineJavascriptRequirement: {}\ninputs:\n  d: Directory\noutputs:\n  out: stdout\nstdout: output.txt\narguments:\n  [echo, \"${if(inputs.d.listing[0].class === 'Directory' && inputs.d.listing[0].listing === undefined) {return 'true';} else {return 'false';}}\"]\n"
  },
  {
    "path": "sdk/cwl/tests/wf/output_dir.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: ExpressionTool\ninputs:\n  file1:\n    type: Directory\n    loadListing: deep_listing\noutputs:\n  val: Directory\n  val2: File[]\nrequirements:\n  InlineJavascriptRequirement: {}\nexpression: |\n  ${\n   var val2 = inputs.file1.listing.filter(function (f) { return f.class == 'File'; } );\n   return {val: inputs.file1, val2: val2}\n  }\n"
  },
  {
    "path": "sdk/cwl/tests/wf/output_dir_wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: Workflow\ninputs:\n  file1:\n    type: Directory\n    loadListing: deep_listing\n    default:\n      class: Directory\n      location: ../testdir\n\nsteps:\n  step1:\n    in:\n      file1: file1\n    run: output_dir.cwl\n    out: [val, val2]\n\noutputs:\n  val:\n    type: Directory\n    outputSource: step1/val\n  val2:\n    type: File[]\n    outputSource: step1/val2\n"
  },
  {
    "path": "sdk/cwl/tests/wf/revsort/revsort.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n#\n# This is a two-step workflow which uses \"revtool\" and \"sorttool\" defined above.\n#\nclass: Workflow\ndoc: \"Reverse the lines in a document, then sort those lines.\"\ncwlVersion: v1.0\n\n\n# The inputs array defines the structure of the input object that describes\n# the inputs to the workflow.\n#\n# The \"reverse_sort\" input parameter demonstrates the \"default\" field.  If the\n# field \"reverse_sort\" is not provided in the input object, the default value will\n# be used.\ninputs:\n  input:\n    type: File\n    doc: \"The input file to be processed.\"\n  reverse_sort:\n    type: boolean\n    default: true\n    doc: \"If true, reverse (decending) sort\"\n\n# The \"outputs\" array defines the structure of the output object that describes\n# the outputs of the workflow.\n#\n# Each output field must be connected to the output of one of the workflow\n# steps using the \"connect\" field.  Here, the parameter \"#output\" of the\n# workflow comes from the \"#sorted\" output of the \"sort\" step.\noutputs:\n  output:\n    type: File\n    outputSource: sorted/output\n    doc: \"The output with the lines reversed and sorted.\"\n\n# The \"steps\" array lists the executable steps that make up the workflow.\n# The tool to execute each step is listed in the \"run\" field.\n#\n# In the first step, the \"inputs\" field of the step connects the upstream\n# parameter \"#input\" of the workflow to the input parameter of the tool\n# \"revtool.cwl#input\"\n#\n# In the second step, the \"inputs\" field of the step connects the output\n# parameter \"#reversed\" from the first step to the input parameter of the\n# tool \"sorttool.cwl#input\".\nsteps:\n  rev:\n    in:\n      input: input\n    out: [output]\n    run: revtool.cwl\n\n  sorted:\n    in:\n      input: rev/output\n      reverse: reverse_sort\n    out: [output]\n    run: sorttool.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf/revsort/revtool.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n#\n# Simplest example command line program wrapper for the Unix tool \"rev\".\n#\nclass: CommandLineTool\ncwlVersion: v1.0\ndoc: \"Reverse each line using the `rev` command\"\n\nhints:\n  ResourceRequirement:\n    ramMin: 8\n\n# The \"inputs\" array defines the structure of the input object that describes\n# the inputs to the underlying program.  Here, there is one input field\n# defined that will be called \"input\" and will contain a \"File\" object.\n#\n# The input binding indicates that the input value should be turned into a\n# command line argument.  In this example inputBinding is an empty object,\n# which indicates that the file name should be added to the command line at\n# a default location.\ninputs:\n  input:\n    type: File\n    inputBinding: {}\n\n# The \"outputs\" array defines the structure of the output object that\n# describes the outputs of the underlying program.  Here, there is one\n# output field defined that will be called \"output\", must be a \"File\" type,\n# and after the program executes, the output value will be the file\n# output.txt in the designated output directory.\noutputs:\n  output:\n    type: File\n    outputBinding:\n      glob: output.txt\n\n# The actual program to execute.\nbaseCommand: rev\n\n# Specify that the standard output stream must be redirected to a file called\n# output.txt in the designated output directory.\nstdout: output.txt\n"
  },
  {
    "path": "sdk/cwl/tests/wf/revsort/sorttool.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Example command line program wrapper for the Unix tool \"sort\"\n# demonstrating command line flags.\nclass: CommandLineTool\ndoc: \"Sort lines using the `sort` command\"\ncwlVersion: v1.0\nhints:\n  ResourceRequirement:\n    ramMin: 8\n\n# This example is similar to the previous one, with an additional input\n# parameter called \"reverse\".  It is a boolean parameter, which is\n# intepreted as a command line flag.  The value of \"prefix\" is used for\n# flag to put on the command line if \"reverse\" is true, if \"reverse\" is\n# false, no flag is added.\n#\n# This example also introduced the \"position\" field.  This indicates the\n# sorting order of items on the command line.  Lower numbers are placed\n# before higher numbers.  Here, the \"-r\" (same as \"--reverse\") flag (if\n#  present) will be added to the command line before the input file path.\ninputs:\n  - id: reverse\n    type: boolean\n    inputBinding:\n      position: 1\n      prefix: \"-r\"\n  - id: input\n    type: File\n    inputBinding:\n      position: 2\n\noutputs:\n  - id: output\n    type: File\n    outputBinding:\n      glob: output.txt\n\nbaseCommand: sort\nstdout: output.txt\n"
  },
  {
    "path": "sdk/cwl/tests/wf/runin-reqs-wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.0\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\ninputs:\n  count:\n    type: int[]\n    default: [1, 2, 3, 4]\n  script:\n    type: File\n    default:\n      class: File\n      location: check_mem.py\noutputs: []\nrequirements:\n  SubworkflowFeatureRequirement: {}\n  ScatterFeatureRequirement: {}\n  InlineJavascriptRequirement: {}\n  StepInputExpressionRequirement: {}\nhints:\n  DockerRequirement:\n    dockerPull: arvados/jobs:2.2.2\nsteps:\n  substep:\n    in:\n      count: count\n      script: script\n    out: []\n    hints:\n      - class: arv:RunInSingleContainer\n      - class: ResourceRequirement\n        ramMin: $(96+inputs.count*32)\n      - class: arv:APIRequirement\n    scatter: count\n    run:\n      class: Workflow\n      id: mysub\n      inputs:\n        count: int\n        script: File\n      outputs: []\n      steps:\n        sleep1:\n          in:\n            count: count\n            script: script\n          out: []\n          run:\n            class: CommandLineTool\n            id: subtool\n            inputs:\n              count:\n                type: int\n              script: File\n            outputs: []\n            arguments: [python, $(inputs.script), $(96+inputs.count * 32)]\n"
  },
  {
    "path": "sdk/cwl/tests/wf/runin-reqs-wf2.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.0\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\ninputs:\n  count:\n    type: int[]\n    default: [1, 2, 3, 4]\n  script:\n    type: File\n    default:\n      class: File\n      location: check_mem.py\noutputs: []\nrequirements:\n  SubworkflowFeatureRequirement: {}\n  ScatterFeatureRequirement: {}\n  InlineJavascriptRequirement: {}\n  StepInputExpressionRequirement: {}\nhints:\n  DockerRequirement:\n    dockerPull: arvados/jobs:2.2.2\nsteps:\n  substep:\n    in:\n      count: count\n      script: script\n    out: []\n    hints:\n      - class: arv:RunInSingleContainer\n      - class: arv:APIRequirement\n    scatter: count\n    run:\n      class: Workflow\n      id: mysub\n      inputs:\n        count: int\n        script: File\n      outputs: []\n      hints:\n        - class: ResourceRequirement\n          ramMin: $(96+inputs.count*32)\n      steps:\n        sleep1:\n          in:\n            count: count\n            script: script\n          out: []\n          run:\n            class: CommandLineTool\n            id: subtool\n            inputs:\n              count:\n                type: int\n              script: File\n            outputs: []\n            arguments: [python, $(inputs.script), $(96+inputs.count * 32)]\n"
  },
  {
    "path": "sdk/cwl/tests/wf/runin-reqs-wf3.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.0\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\ninputs:\n  count:\n    type: int[]\n    default: [1, 2, 3, 4]\n  script:\n    type: File\n    default:\n      class: File\n      location: check_mem.py\noutputs: []\nrequirements:\n  SubworkflowFeatureRequirement: {}\n  ScatterFeatureRequirement: {}\n  InlineJavascriptRequirement: {}\n  StepInputExpressionRequirement: {}\nsteps:\n  substep:\n    in:\n      count: count\n      script: script\n    out: []\n    hints:\n      - class: arv:RunInSingleContainer\n      - class: arv:APIRequirement\n    scatter: count\n    run:\n      class: Workflow\n      id: mysub\n      inputs:\n        count: int\n        script: File\n      outputs: []\n      steps:\n        sleep1:\n          in:\n            count: count\n            script: script\n          out: []\n          run:\n            class: CommandLineTool\n            id: subtool\n            hints:\n              - class: ResourceRequirement\n                ramMin: $(96+inputs.count*32)\n            inputs:\n              count:\n                type: int\n              script: File\n            outputs: []\n            arguments: [python, $(inputs.script), $(96+inputs.count * 32)]\n"
  },
  {
    "path": "sdk/cwl/tests/wf/runin-reqs-wf4.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.0\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\ninputs:\n  count:\n    type: int[]\n    default: [1, 2, 3, 4]\n  script:\n    type: File\n    default:\n      class: File\n      location: check_mem.py\noutputs: []\nrequirements:\n  SubworkflowFeatureRequirement: {}\n  ScatterFeatureRequirement: {}\n  InlineJavascriptRequirement: {}\n  StepInputExpressionRequirement: {}\nhints:\n  DockerRequirement:\n    dockerPull: arvados/jobs:2.2.2\nsteps:\n  substep:\n    in:\n      count: count\n      script: script\n    out: []\n    hints:\n      - class: arv:RunInSingleContainer\n      - class: arv:APIRequirement\n    scatter: count\n    run:\n      class: Workflow\n      id: mysub\n      inputs:\n        count: int\n        script: File\n      outputs: []\n      steps:\n        sleep1:\n          in:\n            count: count\n            script: script\n          out: []\n          run:\n            class: CommandLineTool\n            id: subtool\n            hints:\n              - class: ResourceRequirement\n                ramMin: 128\n            inputs:\n              count:\n                type: int\n              script: File\n            outputs: []\n            arguments: [python, $(inputs.script), \"128\"]\n"
  },
  {
    "path": "sdk/cwl/tests/wf/runin-reqs-wf5.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.0\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\ninputs:\n  count:\n    type: int[]\n    default: [1, 2, 3, 4]\n  script:\n    type: File\n    default:\n      class: File\n      location: check_mem.py\noutputs: []\nrequirements:\n  SubworkflowFeatureRequirement: {}\n  ScatterFeatureRequirement: {}\n  InlineJavascriptRequirement: {}\n  StepInputExpressionRequirement: {}\nhints:\n  DockerRequirement:\n    dockerPull: arvados/jobs:2.2.2\nsteps:\n  substep:\n    in:\n      count: count\n      script: script\n    out: []\n    hints:\n      - class: arv:RunInSingleContainer\n      - class: arv:APIRequirement\n    scatter: count\n    run:\n      class: Workflow\n      id: mysub\n      inputs:\n        count: int\n        script: File\n      outputs: []\n      steps:\n        sleep1:\n          in:\n            count: count\n            script: script\n          out: []\n          run:\n            class: CommandLineTool\n            id: subtool\n            hints:\n              - class: ResourceRequirement\n                ramMin: 32\n            inputs:\n              count:\n                type: int\n              script: File\n            outputs: []\n            arguments: [python, $(inputs.script), \"128\"]\n"
  },
  {
    "path": "sdk/cwl/tests/wf/runin-wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.0\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\ninputs:\n  sleeptime:\n    type: int\n    default: 5\n  fileblub:\n    type: File\n    default:\n      class: File\n      location: keep:d7514270f356df848477718d58308cc4+94/a\n      secondaryFiles:\n        - class: File\n          location: keep:d7514270f356df848477718d58308cc4+94/b\noutputs:\n  out:\n    type: string\n    outputSource: substep/out\nrequirements:\n  SubworkflowFeatureRequirement: {}\n  ScatterFeatureRequirement: {}\n  InlineJavascriptRequirement: {}\n  StepInputExpressionRequirement: {}\nsteps:\n  substep:\n    in:\n      sleeptime: sleeptime\n      fileblub: fileblub\n    out: [out]\n    hints:\n      - class: arv:RunInSingleContainer\n      - class: DockerRequirement\n        dockerPull: arvados/jobs:2.2.2\n    run:\n      class: Workflow\n      id: mysub\n      inputs:\n        fileblub: File\n      outputs:\n        out:\n          type: string\n          outputSource: sleep1/out\n      steps:\n        sleep1:\n          in:\n            fileblub: fileblub\n          out: [out]\n          run:\n            class: CommandLineTool\n            id: subtool\n            inputs:\n              fileblub:\n                type: File\n                inputBinding: {position: 1}\n            outputs:\n              out:\n                type: string\n                outputBinding:\n                  outputEval: $(\"out\")\n            baseCommand: cat\n"
  },
  {
    "path": "sdk/cwl/tests/wf/runin-with-ttl-wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.0\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\ninputs:\n  fileblub:\n    type: File\n    default:\n      class: File\n      location: keep:d7514270f356df848477718d58308cc4+94/a\n      secondaryFiles:\n        - class: File\n          location: keep:d7514270f356df848477718d58308cc4+94/b\noutputs:\n  out:\n    type: string\n    outputSource: substep/out\nrequirements:\n  SubworkflowFeatureRequirement: {}\n  ScatterFeatureRequirement: {}\n  InlineJavascriptRequirement: {}\n  StepInputExpressionRequirement: {}\nhints:\n  arv:IntermediateOutput:\n    outputTTL: 60\n  DockerRequirement:\n    dockerPull: arvados/jobs:1.4.0.20190604172024\nsteps:\n  substep:\n    in:\n      fileblub: fileblub\n    out: [out]\n    hints:\n      - class: arv:RunInSingleContainer\n    run:\n      class: Workflow\n      id: mysub\n      inputs:\n        fileblub: File\n      outputs:\n        out:\n          type: string\n          outputSource: cat1/out\n      steps:\n        cat1:\n          in:\n            fileblub: fileblub\n          out: [out]\n          run:\n            class: CommandLineTool\n            id: subtool\n            inputs:\n              fileblub:\n                type: File\n                inputBinding: {position: 1}\n            outputs:\n              out:\n                type: string\n                outputBinding:\n                  outputEval: $(\"out\")\n            baseCommand: cat\n"
  },
  {
    "path": "sdk/cwl/tests/wf/runseparate-wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.0\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\ninputs:\n  sleeptime:\n    type: int\n    default: 5\n  fileblub:\n    type: File\n    default:\n      class: File\n      location: keep:d7514270f356df848477718d58308cc4+94/a\n      secondaryFiles:\n        - class: File\n          location: keep:d7514270f356df848477718d58308cc4+94/b\noutputs:\n  out:\n    type: string\n    outputSource: substep/out\nrequirements:\n  SubworkflowFeatureRequirement: {}\n  ScatterFeatureRequirement: {}\n  InlineJavascriptRequirement: {}\n  StepInputExpressionRequirement: {}\nsteps:\n  substep:\n    in:\n      sleeptime: sleeptime\n      fileblub: fileblub\n    out: [out]\n    hints:\n      - class: arv:SeparateRunner\n        runnerProcessName: $(\"sleeptime \"+inputs.sleeptime)\n      - class: DockerRequirement\n        dockerPull: arvados/jobs:2.2.2\n    run:\n      class: Workflow\n      id: mysub\n      inputs:\n        fileblub: File\n        sleeptime: int\n      outputs:\n        out:\n          type: string\n          outputSource: sleep1/out\n      steps:\n        sleep1:\n          in:\n            fileblub: fileblub\n          out: [out]\n          run:\n            class: CommandLineTool\n            id: subtool\n            inputs:\n              fileblub:\n                type: File\n                inputBinding: {position: 1}\n            outputs:\n              out:\n                type: string\n                outputBinding:\n                  outputEval: 'out'\n            baseCommand: cat\n"
  },
  {
    "path": "sdk/cwl/tests/wf/scatter2.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: Workflow\ncwlVersion: v1.0\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\ninputs:\n  sleeptime:\n    type: int[]\n    default: [5]\n  fileblub:\n    type: File\n    default:\n      class: File\n      location: keep:99999999999999999999999999999999+118/token.txt\noutputs:\n  out:\n    type: string[]\n    outputSource: scatterstep/out\nrequirements:\n  SubworkflowFeatureRequirement: {}\n  ScatterFeatureRequirement: {}\n  InlineJavascriptRequirement: {}\n  StepInputExpressionRequirement: {}\nsteps:\n  scatterstep:\n    in:\n      sleeptime: sleeptime\n      fileblub: fileblub\n    out: [out]\n    scatter: sleeptime\n    hints:\n      - class: arv:RunInSingleContainer\n    run:\n      class: Workflow\n      id: mysub\n      inputs:\n        sleeptime: int\n        fileblub: File\n      outputs:\n        out:\n          type: string\n          outputSource: sleep1/out\n      steps:\n        sleep1:\n          in:\n            sleeptime: sleeptime\n            blurb:\n              valueFrom: |\n                ${\n                  return String(inputs.sleeptime) + \"b\";\n                }\n          out: [out]\n          run:\n            class: CommandLineTool\n            id: subtool\n            inputs:\n              sleeptime:\n                type: int\n                inputBinding: {position: 1}\n            outputs:\n              out:\n                type: string\n                outputBinding:\n                  outputEval: \"out\"\n            baseCommand: sleep\n"
  },
  {
    "path": "sdk/cwl/tests/wf/scatter2_subwf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{\n  \"$graph\": [\n    {\n      \"$namespaces\": {\n        \"arv\": \"http://arvados.org/cwl#\"\n      },\n      \"class\": \"Workflow\",\n      \"cwlVersion\": \"v1.0\",\n      \"hints\": [],\n      \"id\": \"#main\",\n      \"inputs\": [\n        {\n          \"id\": \"#main/fileblub\",\n          \"type\": \"File\"\n        },\n        {\n          \"id\": \"#main/sleeptime\",\n          \"type\": \"int\"\n        }\n      ],\n      \"outputs\": [\n        {\n          \"id\": \"#main/out\",\n          \"outputSource\": \"#main/sleep1/out\",\n          \"type\": \"string\"\n        }\n      ],\n      \"requirements\": [\n        {\n          \"class\": \"InlineJavascriptRequirement\"\n        },\n        {\n          \"class\": \"ScatterFeatureRequirement\"\n        },\n        {\n          \"class\": \"StepInputExpressionRequirement\"\n        },\n        {\n          \"class\": \"SubworkflowFeatureRequirement\"\n        }\n      ],\n      \"steps\": [\n        {\n          \"id\": \"#main/sleep1\",\n          \"in\": [\n            {\n              \"id\": \"#main/sleep1/blurb\",\n              \"valueFrom\": \"${\\n  return String(inputs.sleeptime) + \\\"b\\\";\\n}\\n\"\n            },\n            {\n              \"id\": \"#main/sleep1/sleeptime\",\n              \"source\": \"#main/sleeptime\"\n            }\n          ],\n          \"out\": [\n            \"#main/sleep1/out\"\n          ],\n          \"run\": {\n            \"baseCommand\": \"sleep\",\n            \"class\": \"CommandLineTool\",\n            \"id\": \"#main/sleep1/subtool\",\n            \"inputs\": [\n              {\n                \"id\": \"#main/sleep1/subtool/sleeptime\",\n                \"inputBinding\": {\n                  \"position\": 1\n                },\n                \"type\": \"int\"\n              }\n            ],\n            \"outputs\": [\n              {\n                \"id\": \"#main/sleep1/subtool/out\",\n                \"outputBinding\": {\n                  \"outputEval\": \"out\"\n                },\n                \"type\": \"string\"\n              }\n            ]\n          }\n        }\n      ]\n    }\n  ],\n  \"$namespaces\": {\n    \"arv\": \"http://arvados.org/cwl#\"\n  },\n  \"cwlVersion\": \"v1.0\"\n}\n"
  },
  {
    "path": "sdk/cwl/tests/wf/secret_job.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\n$namespaces:\n  cwltool: http://commonwl.org/cwltool#\nhints:\n  \"cwltool:Secrets\":\n    secrets: [pw]\nrequirements:\n  InitialWorkDirRequirement:\n    listing:\n      - entryname: example.conf\n        entry: |\n          username: user\n          password: $(inputs.pw)\ninputs:\n  pw: string\noutputs:\n  out: stdout\nstdout: hashed_example.txt\narguments: [md5sum, example.conf]\n"
  },
  {
    "path": "sdk/cwl/tests/wf/secret_wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.2\nclass: Workflow\n$namespaces:\n  cwltool: http://commonwl.org/cwltool#\nhints:\n  \"cwltool:Secrets\":\n    secrets: [pw]\n  DockerRequirement:\n    dockerPull: debian:buster-slim\ninputs:\n  pw: string\noutputs:\n  out:\n    type: File\n    outputSource: step1/out\nsteps:\n  step1:\n    in:\n      pw: pw\n    out: [out]\n    run: secret_job.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf/submit_keepref_wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Test case for arvados-cwl-runner\n#\n# Used to test whether scanning a workflow file for dependencies\n# (e.g. submit_tool.cwl) and uploading to Keep works as intended.\n\nclass: Workflow\ncwlVersion: v1.0\ninputs:\n  x:\n    type: File\n    default:\n      class: File\n      location: keep:99999999999999999999999999999994+99/blorp.txt\noutputs: []\nsteps:\n  step1:\n    in:\n      x: x\n    out: []\n    run: ../tool/submit_tool.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf/submit_storage_class_wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Test case for arvados-cwl-runner\n#\n# Used to test whether scanning a workflow file for dependencies\n# (e.g. submit_tool.cwl) and uploading to Keep works as intended.\n\nclass: Workflow\ncwlVersion: v1.0\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nhints:\n  arv:OutputStorageClass:\n    finalStorageClass: [foo, bar]\ninputs:\n  - id: x\n    type: File\n  - id: y\n    type: Directory\n  - id: z\n    type: Directory\noutputs: []\nsteps:\n  - id: step1\n    in:\n      - { id: x, source: \"#x\" }\n    out: []\n    run: ../tool/submit_tool.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf/submit_wf.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Test case for arvados-cwl-runner\n#\n# Used to test whether scanning a workflow file for dependencies\n# (e.g. submit_tool.cwl) and uploading to Keep works as intended.\n\nclass: Workflow\ncwlVersion: v1.2\ninputs:\n  - id: x\n    type: File\n  - id: y\n    type: Directory\n  - id: z\n    type: Directory\noutputs: []\nsteps:\n  - id: step1\n    in:\n      - { id: x, source: \"#x\" }\n    out: []\n    run: ../tool/submit_tool.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf/submit_wf_map.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Test case for arvados-cwl-runner\n#\n# Used to test whether scanning a workflow file for dependencies\n# (e.g. submit_tool.cwl) and uploading to Keep works as intended.\n\nclass: Workflow\ncwlVersion: v1.2\ninputs:\n  x:\n    type: File\n  y:\n    type: Directory\n  z:\n    type: Directory\noutputs: []\nsteps:\n  step1:\n    in:\n      x: x\n    out: []\n    run: ../tool/submit_tool_map.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf/submit_wf_no_reuse.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Test case for arvados-cwl-runner. Disables job/container reuse.\n\nclass: Workflow\ncwlVersion: v1.2\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\ninputs:\n  - id: x\n    type: File\n  - id: y\n    type: Directory\n  - id: z\n    type: Directory\noutputs: []\nsteps:\n  - id: step1\n    in:\n      - { id: x, source: \"#x\" }\n    out: []\n    run: ../tool/submit_tool.cwl\nhints:\n  WorkReuse:\n    enableReuse: false\n"
  },
  {
    "path": "sdk/cwl/tests/wf/submit_wf_packed.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\n$graph:\n- class: CommandLineTool\n  requirements:\n  - class: DockerRequirement\n    dockerPull: debian:buster-slim\n    'http://arvados.org/cwl#dockerCollectionPDH': 999999999999999999999999999999d4+99\n  inputs:\n  - id: '#submit_tool.cwl/x'\n    type: File\n    default:\n      class: File\n      location: keep:5d373e7629203ce39e7c22af98a0f881+52/blub.txt\n    inputBinding:\n      position: 1\n  outputs: []\n  baseCommand: cat\n  id: '#submit_tool.cwl'\n- class: Workflow\n  inputs:\n  - id: '#main/x'\n    type: File\n  - id: '#main/y'\n    type: Directory\n  - id: '#main/z'\n    type: Directory\n  outputs: []\n  steps:\n  - id: '#main/step1'\n    in:\n    - {id: '#main/step1/x', source: '#main/x'}\n    out: []\n    run: '#submit_tool.cwl'\n  id: '#main'\n"
  },
  {
    "path": "sdk/cwl/tests/wf/submit_wf_process_properties.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Test case for arvados-cwl-runner\n#\n# Used to test whether scanning a workflow file for dependencies\n# (e.g. submit_tool.cwl) and uploading to Keep works as intended.\n\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n\nclass: Workflow\ncwlVersion: v1.2\n\nhints:\n  arv:ProcessProperties:\n    processProperties:\n      foo: bar\n      baz: $(inputs.x.basename)\n      quux:\n        propertyValue:\n          q1: 1\n          q2: 2\n\ninputs:\n  - id: x\n    type: File\n  - id: y\n    type: Directory\n  - id: z\n    type: Directory\noutputs: []\nsteps:\n  - id: step1\n    in:\n      - { id: x, source: \"#x\" }\n    out: []\n    run: ../tool/submit_tool.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf/submit_wf_runner_resources.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Test case for arvados-cwl-runner\n#\n# Used to test whether scanning a workflow file for dependencies\n# (e.g. submit_tool.cwl) and uploading to Keep works as intended.\n\nclass: Workflow\ncwlVersion: v1.2\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nhints:\n  arv:WorkflowRunnerResources:\n    ramMin: 2000\n    coresMin: 2\n    keep_cache: 512\ninputs:\n  - id: x\n    type: File\n  - id: y\n    type: Directory\n  - id: z\n    type: Directory\noutputs: []\nsteps:\n  - id: step1\n    in:\n      - { id: x, source: \"#x\" }\n    out: []\n    run: ../tool/submit_tool.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf/submit_wf_wrapper.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{\n    \"$graph\": [\n        {\n            \"class\": \"Workflow\",\n            \"hints\": [\n                {\n                    \"acrContainerImage\": \"999999999999999999999999999999d3+99\",\n                    \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\n                }\n            ],\n            \"id\": \"#main\",\n            \"inputs\": [\n                {\n                    \"id\": \"#main/x\",\n                    \"type\": \"File\"\n                },\n                {\n                    \"id\": \"#main/y\",\n                    \"type\": \"Directory\"\n                },\n                {\n                    \"id\": \"#main/z\",\n                    \"type\": \"Directory\"\n                }\n            ],\n            \"outputs\": [],\n            \"requirements\": [\n                {\n                    \"class\": \"SubworkflowFeatureRequirement\"\n                }\n            ],\n            \"steps\": [\n                {\n                    \"id\": \"#main/submit_wf.cwl\",\n                    \"in\": [\n                        {\n                            \"id\": \"#main/step/x\",\n                            \"source\": \"#main/x\"\n                        },\n                        {\n                            \"id\": \"#main/step/y\",\n                            \"source\": \"#main/y\"\n                        },\n                        {\n                            \"id\": \"#main/step/z\",\n                            \"source\": \"#main/z\"\n                        }\n                    ],\n                    \"label\": \"submit_wf.cwl\",\n                    \"out\": [],\n                    \"run\": \"keep:5494a5e0a2fe50ece3595dd2bd1c535f+274/wf/submit_wf.cwl\"\n                }\n            ]\n        }\n    ],\n    \"cwlVersion\": \"v1.2\"\n}\n"
  },
  {
    "path": "sdk/cwl/tests/wf/trick_defaults2.cwl",
    "content": "#!/usr/bin/env cwl-runner\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nclass: CommandLineTool\ncwlVersion: v1.0\ninputs:\n  inp1:\n    type: File\n    default:\n      class: File\n      location: hello.txt\n      secondaryFiles:\n        - class: Directory\n          location: indir1\noutputs: []\nbaseCommand: 'true'\n"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/default-dir1.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  inp2:\n    type: Directory\n    default:\n      class: Directory\n      location: inp1\n  inp1:\n    type: File\n    default:\n      class: File\n      location: inp1/hello.txt\noutputs: []\narguments: [echo, $(inputs.inp1), $(inputs.inp2)]"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/default-dir2.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  inp2:\n    type: Directory\n    default:\n      class: Directory\n      basename: inp2\n      listing:\n        - class: File\n          basename: \"hello.txt\"\n          contents: \"hello world\"\noutputs: []\narguments: [echo, $(inputs.inp2)]"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/default-dir3.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  inp2:\n    type: Directory\n    default:\n      class: Directory\n      listing:\n        - class: File\n          location: \"inp1/hello.txt\"\noutputs: []\narguments: [echo, $(inputs.inp2)]"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/default-dir4.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\ninputs: []\noutputs: []\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nsteps:\n  step1:\n    in: []\n    out: []\n    run:\n      class: CommandLineTool\n      inputs:\n        inp2:\n          type: Directory\n          default:\n            class: Directory\n            location: inp1\n      outputs: []\n      arguments: [echo, $(inputs.inp2)]\n"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/default-dir5.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\ninputs: []\noutputs: []\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nsteps:\n  step1:\n    in: []\n    out: []\n    run:\n      id: stepid\n      class: CommandLineTool\n      inputs:\n        inp2:\n          type: Directory\n          default:\n            class: Directory\n            location: inp1\n      outputs: []\n      arguments: [echo, $(inputs.inp2)]"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/default-dir6.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\ninputs: []\noutputs: []\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nsteps:\n  step1:\n    in: []\n    out: []\n    run: default-dir6a.cwl"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/default-dir6a.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  inp2:\n    type: Directory\n    default:\n      class: Directory\n      location: inp1\noutputs: []\narguments: [echo, $(inputs.inp2)]"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/default-dir7.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\ninputs:\n  inp2:\n    type: Directory\n    default:\n      class: Directory\n      location: inp1\noutputs: []\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nsteps:\n  step1:\n    in:\n      inp2: inp2\n    out: []\n    run: default-dir7a.cwl"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/default-dir7a.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\ninputs:\n  inp2:\n    type: Directory\noutputs: []\narguments: [echo, $(inputs.inp2)]"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/default-dir8.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.1\nclass: Workflow\ninputs: []\noutputs: []\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nsteps:\n  step1:\n    in: []\n    out: []\n    run:\n      class: CommandLineTool\n      inputs:\n        inp2:\n          type: Directory\n          default:\n            class: Directory\n            location: inp1\n      outputs: []\n      arguments: [echo, $(inputs.inp2)]\n"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/inp1/hello.txt",
    "content": ""
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/wf1.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\ninputs: []\noutputs: []\nsteps:\n  step1:\n    in: []\n    out: []\n    run: default-dir1.cwl"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/wf2.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\ninputs: []\noutputs: []\nsteps:\n  step1:\n    in: []\n    out: []\n    run: default-dir2.cwl"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/wf3.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\ninputs: []\noutputs: []\nsteps:\n  step1:\n    in: []\n    out: []\n    run: default-dir3.cwl"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/wf4.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\ninputs: []\noutputs: []\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  SubworkflowFeatureRequirement: {}\nsteps:\n  step1:\n    in: []\n    out: []\n    run: default-dir4.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/wf5.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\ninputs: []\noutputs: []\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  SubworkflowFeatureRequirement: {}\nsteps:\n  step1:\n    in: []\n    out: []\n    run: default-dir5.cwl"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/wf6.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\ninputs: []\noutputs: []\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  SubworkflowFeatureRequirement: {}\nhints:\n  DockerRequirement:\n    dockerPull: arvados/jobs:1.4.0.20190604172024\nsteps:\n  step1:\n    requirements:\n      arv:RunInSingleContainer: {}\n    in: []\n    out: []\n    run: default-dir6.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/wf7.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\ninputs: []\noutputs: []\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  SubworkflowFeatureRequirement: {}\nhints:\n  DockerRequirement:\n    dockerPull: arvados/jobs:1.4.0.20190604172024\nsteps:\n  step1:\n    requirements:\n      arv:RunInSingleContainer: {}\n    in: []\n    out: []\n    run: default-dir7.cwl\n"
  },
  {
    "path": "sdk/cwl/tests/wf-defaults/wf8.cwl",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.1\nclass: Workflow\ninputs: []\noutputs: []\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\nrequirements:\n  SubworkflowFeatureRequirement: {}\nsteps:\n  step1:\n    in: []\n    out: []\n    run: default-dir8.cwl\n"
  },
  {
    "path": "sdk/go/arvados/api.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype APIEndpoint struct {\n\tMethod string\n\tPath   string\n\t// \"new attributes\" key for create/update requests\n\tAttrsKey string\n}\n\nvar (\n\tEndpointConfigGet                       = APIEndpoint{\"GET\", \"arvados/v1/config\", \"\"}\n\tEndpointVocabularyGet                   = APIEndpoint{\"GET\", \"arvados/v1/vocabulary\", \"\"}\n\tEndpointDiscoveryDocument               = APIEndpoint{\"GET\", \"discovery/v1/apis/arvados/v1/rest\", \"\"}\n\tEndpointLogin                           = APIEndpoint{\"GET\", \"login\", \"\"}\n\tEndpointLogout                          = APIEndpoint{\"GET\", \"logout\", \"\"}\n\tEndpointAuthorizedKeyCreate             = APIEndpoint{\"POST\", \"arvados/v1/authorized_keys\", \"authorized_key\"}\n\tEndpointAuthorizedKeyUpdate             = APIEndpoint{\"PATCH\", \"arvados/v1/authorized_keys/{uuid}\", \"authorized_key\"}\n\tEndpointAuthorizedKeyGet                = APIEndpoint{\"GET\", \"arvados/v1/authorized_keys/{uuid}\", \"\"}\n\tEndpointAuthorizedKeyList               = APIEndpoint{\"GET\", \"arvados/v1/authorized_keys\", \"\"}\n\tEndpointAuthorizedKeyDelete             = APIEndpoint{\"DELETE\", \"arvados/v1/authorized_keys/{uuid}\", \"\"}\n\tEndpointCollectionCreate                = APIEndpoint{\"POST\", \"arvados/v1/collections\", \"collection\"}\n\tEndpointCollectionUpdate                = APIEndpoint{\"PATCH\", \"arvados/v1/collections/{uuid}\", \"collection\"}\n\tEndpointCollectionGet                   = APIEndpoint{\"GET\", \"arvados/v1/collections/{uuid}\", \"\"}\n\tEndpointCollectionList                  = APIEndpoint{\"GET\", \"arvados/v1/collections\", \"\"}\n\tEndpointCollectionProvenance            = APIEndpoint{\"GET\", \"arvados/v1/collections/{uuid}/provenance\", \"\"}\n\tEndpointCollectionUsedBy                = APIEndpoint{\"GET\", \"arvados/v1/collections/{uuid}/used_by\", \"\"}\n\tEndpointCollectionDelete                = APIEndpoint{\"DELETE\", \"arvados/v1/collections/{uuid}\", \"\"}\n\tEndpointCollectionTrash                 = APIEndpoint{\"POST\", \"arvados/v1/collections/{uuid}/trash\", \"\"}\n\tEndpointCollectionUntrash               = APIEndpoint{\"POST\", \"arvados/v1/collections/{uuid}/untrash\", \"\"}\n\tEndpointComputedPermissionList          = APIEndpoint{\"GET\", \"arvados/v1/computed_permissions\", \"\"}\n\tEndpointContainerCreate                 = APIEndpoint{\"POST\", \"arvados/v1/containers\", \"container\"}\n\tEndpointContainerUpdate                 = APIEndpoint{\"PATCH\", \"arvados/v1/containers/{uuid}\", \"container\"}\n\tEndpointContainerPriorityUpdate         = APIEndpoint{\"POST\", \"arvados/v1/containers/{uuid}/update_priority\", \"container\"}\n\tEndpointContainerGet                    = APIEndpoint{\"GET\", \"arvados/v1/containers/{uuid}\", \"\"}\n\tEndpointContainerList                   = APIEndpoint{\"GET\", \"arvados/v1/containers\", \"\"}\n\tEndpointContainerDelete                 = APIEndpoint{\"DELETE\", \"arvados/v1/containers/{uuid}\", \"\"}\n\tEndpointContainerLock                   = APIEndpoint{\"POST\", \"arvados/v1/containers/{uuid}/lock\", \"\"}\n\tEndpointContainerUnlock                 = APIEndpoint{\"POST\", \"arvados/v1/containers/{uuid}/unlock\", \"\"}\n\tEndpointContainerSSH                    = APIEndpoint{\"POST\", \"arvados/v1/containers/{uuid}/ssh\", \"\"}\n\tEndpointContainerSSHCompat              = APIEndpoint{\"POST\", \"arvados/v1/connect/{uuid}/ssh\", \"\"} // for compatibility with arvados <2.7\n\tEndpointContainerGatewayTunnel          = APIEndpoint{\"POST\", \"arvados/v1/containers/{uuid}/gateway_tunnel\", \"\"}\n\tEndpointContainerGatewayTunnelCompat    = APIEndpoint{\"POST\", \"arvados/v1/connect/{uuid}/gateway_tunnel\", \"\"} // for compatibility with arvados <2.7\n\tEndpointContainerRequestCreate          = APIEndpoint{\"POST\", \"arvados/v1/container_requests\", \"container_request\"}\n\tEndpointContainerRequestUpdate          = APIEndpoint{\"PATCH\", \"arvados/v1/container_requests/{uuid}\", \"container_request\"}\n\tEndpointContainerRequestGet             = APIEndpoint{\"GET\", \"arvados/v1/container_requests/{uuid}\", \"\"}\n\tEndpointContainerRequestList            = APIEndpoint{\"GET\", \"arvados/v1/container_requests\", \"\"}\n\tEndpointContainerRequestDelete          = APIEndpoint{\"DELETE\", \"arvados/v1/container_requests/{uuid}\", \"\"}\n\tEndpointContainerRequestContainerStatus = APIEndpoint{\"GET\", \"arvados/v1/container_requests/{uuid}/container_status\", \"\"}\n\tEndpointContainerRequestLog             = APIEndpoint{\"GET\", \"arvados/v1/container_requests/{uuid}/log{path:|/.*}\", \"\"}\n\tEndpointGroupCreate                     = APIEndpoint{\"POST\", \"arvados/v1/groups\", \"group\"}\n\tEndpointGroupUpdate                     = APIEndpoint{\"PATCH\", \"arvados/v1/groups/{uuid}\", \"group\"}\n\tEndpointGroupGet                        = APIEndpoint{\"GET\", \"arvados/v1/groups/{uuid}\", \"\"}\n\tEndpointGroupList                       = APIEndpoint{\"GET\", \"arvados/v1/groups\", \"\"}\n\tEndpointGroupContents                   = APIEndpoint{\"GET\", \"arvados/v1/groups/contents\", \"\"}\n\tEndpointGroupContentsUUIDInPath         = APIEndpoint{\"GET\", \"arvados/v1/groups/{uuid}/contents\", \"\"} // Alternative HTTP route; client-side code should always use EndpointGroupContents instead\n\tEndpointGroupShared                     = APIEndpoint{\"GET\", \"arvados/v1/groups/shared\", \"\"}\n\tEndpointGroupDelete                     = APIEndpoint{\"DELETE\", \"arvados/v1/groups/{uuid}\", \"\"}\n\tEndpointGroupTrash                      = APIEndpoint{\"POST\", \"arvados/v1/groups/{uuid}/trash\", \"\"}\n\tEndpointGroupUntrash                    = APIEndpoint{\"POST\", \"arvados/v1/groups/{uuid}/untrash\", \"\"}\n\tEndpointLinkCreate                      = APIEndpoint{\"POST\", \"arvados/v1/links\", \"link\"}\n\tEndpointLinkUpdate                      = APIEndpoint{\"PATCH\", \"arvados/v1/links/{uuid}\", \"link\"}\n\tEndpointLinkGet                         = APIEndpoint{\"GET\", \"arvados/v1/links/{uuid}\", \"\"}\n\tEndpointLinkList                        = APIEndpoint{\"GET\", \"arvados/v1/links\", \"\"}\n\tEndpointLinkDelete                      = APIEndpoint{\"DELETE\", \"arvados/v1/links/{uuid}\", \"\"}\n\tEndpointLogCreate                       = APIEndpoint{\"POST\", \"arvados/v1/logs\", \"log\"}\n\tEndpointLogUpdate                       = APIEndpoint{\"PATCH\", \"arvados/v1/logs/{uuid}\", \"log\"}\n\tEndpointLogGet                          = APIEndpoint{\"GET\", \"arvados/v1/logs/{uuid}\", \"\"}\n\tEndpointLogList                         = APIEndpoint{\"GET\", \"arvados/v1/logs\", \"\"}\n\tEndpointLogDelete                       = APIEndpoint{\"DELETE\", \"arvados/v1/logs/{uuid}\", \"\"}\n\tEndpointSysTrashSweep                   = APIEndpoint{\"POST\", \"sys/trash_sweep\", \"\"}\n\tEndpointUserActivate                    = APIEndpoint{\"POST\", \"arvados/v1/users/{uuid}/activate\", \"\"}\n\tEndpointUserCreate                      = APIEndpoint{\"POST\", \"arvados/v1/users\", \"user\"}\n\tEndpointUserCurrent                     = APIEndpoint{\"GET\", \"arvados/v1/users/current\", \"\"}\n\tEndpointUserDelete                      = APIEndpoint{\"DELETE\", \"arvados/v1/users/{uuid}\", \"\"}\n\tEndpointUserGet                         = APIEndpoint{\"GET\", \"arvados/v1/users/{uuid}\", \"\"}\n\tEndpointUserGetCurrent                  = APIEndpoint{\"GET\", \"arvados/v1/users/current\", \"\"}\n\tEndpointUserGetSystem                   = APIEndpoint{\"GET\", \"arvados/v1/users/system\", \"\"}\n\tEndpointUserList                        = APIEndpoint{\"GET\", \"arvados/v1/users\", \"\"}\n\tEndpointUserMerge                       = APIEndpoint{\"POST\", \"arvados/v1/users/merge\", \"\"}\n\tEndpointUserSetup                       = APIEndpoint{\"POST\", \"arvados/v1/users/setup\", \"user\"}\n\tEndpointUserSystem                      = APIEndpoint{\"GET\", \"arvados/v1/users/system\", \"\"}\n\tEndpointUserUnsetup                     = APIEndpoint{\"POST\", \"arvados/v1/users/{uuid}/unsetup\", \"\"}\n\tEndpointUserUpdate                      = APIEndpoint{\"PATCH\", \"arvados/v1/users/{uuid}\", \"user\"}\n\tEndpointUserBatchUpdate                 = APIEndpoint{\"PATCH\", \"arvados/v1/users/batch_update\", \"\"}\n\tEndpointUserAuthenticate                = APIEndpoint{\"POST\", \"arvados/v1/users/authenticate\", \"\"}\n\tEndpointAPIClientAuthorizationCurrent   = APIEndpoint{\"GET\", \"arvados/v1/api_client_authorizations/current\", \"\"}\n\tEndpointAPIClientAuthorizationCreate    = APIEndpoint{\"POST\", \"arvados/v1/api_client_authorizations\", \"api_client_authorization\"}\n\tEndpointAPIClientAuthorizationUpdate    = APIEndpoint{\"PUT\", \"arvados/v1/api_client_authorizations/{uuid}\", \"api_client_authorization\"}\n\tEndpointAPIClientAuthorizationList      = APIEndpoint{\"GET\", \"arvados/v1/api_client_authorizations\", \"\"}\n\tEndpointAPIClientAuthorizationDelete    = APIEndpoint{\"DELETE\", \"arvados/v1/api_client_authorizations/{uuid}\", \"\"}\n\tEndpointAPIClientAuthorizationGet       = APIEndpoint{\"GET\", \"arvados/v1/api_client_authorizations/{uuid}\", \"\"}\n\tEndpointCredentialCreate                = APIEndpoint{\"POST\", \"arvados/v1/credentials\", \"credential\"}\n\tEndpointCredentialUpdate                = APIEndpoint{\"PATCH\", \"arvados/v1/credentials/{uuid}\", \"credential\"}\n\tEndpointCredentialGet                   = APIEndpoint{\"GET\", \"arvados/v1/credentials/{uuid}\", \"\"}\n\tEndpointCredentialDelete                = APIEndpoint{\"DELETE\", \"arvados/v1/credentials/{uuid}\", \"\"}\n\tEndpointCredentialSecret                = APIEndpoint{\"GET\", \"arvados/v1/credentials/{uuid}/credential_secret\", \"\"}\n)\n\ntype ContainerHTTPProxyOptions struct {\n\t// \"{container uuid}-{port}\", \":{dynamic-external-port}\", or\n\t// the name of a published port\n\tTarget    string        `json:\"target\"`\n\tNoForward bool          `json:\"no_forward\"`\n\tRequest   *http.Request `json:\"-\"`\n}\n\ntype ContainerSSHOptions struct {\n\tUUID          string `json:\"uuid\"`\n\tDetachKeys    string `json:\"detach_keys\"`\n\tLoginUsername string `json:\"login_username\"`\n\tNoForward     bool   `json:\"no_forward\"`\n}\n\ntype ConnectionResponse struct {\n\tConn   net.Conn           `json:\"-\"`\n\tBufrw  *bufio.ReadWriter  `json:\"-\"`\n\tLogger logrus.FieldLogger `json:\"-\"`\n\tHeader http.Header        `json:\"-\"`\n}\n\ntype ContainerGatewayTunnelOptions struct {\n\tUUID       string `json:\"uuid\"`\n\tAuthSecret string `json:\"auth_secret\"`\n}\n\ntype GetOptions struct {\n\tUUID         string   `json:\"uuid,omitempty\"`\n\tSelect       []string `json:\"select\"`\n\tIncludeTrash bool     `json:\"include_trash\"`\n\tForwardedFor string   `json:\"forwarded_for,omitempty\"`\n\tRemote       string   `json:\"remote,omitempty\"`\n}\n\ntype UntrashOptions struct {\n\tUUID             string `json:\"uuid\"`\n\tEnsureUniqueName bool   `json:\"ensure_unique_name\"`\n}\n\ntype ListOptions struct {\n\tClusterID          string                 `json:\"cluster_id\"`\n\tSelect             []string               `json:\"select\"`\n\tFilters            []Filter               `json:\"filters\"`\n\tWhere              map[string]interface{} `json:\"where\"`\n\tLimit              int64                  `json:\"limit\"`\n\tOffset             int64                  `json:\"offset\"`\n\tOrder              []string               `json:\"order\"`\n\tDistinct           bool                   `json:\"distinct\"`\n\tCount              string                 `json:\"count\"`\n\tIncludeTrash       bool                   `json:\"include_trash\"`\n\tIncludeOldVersions bool                   `json:\"include_old_versions\"`\n\tBypassFederation   bool                   `json:\"bypass_federation\"`\n\tForwardedFor       string                 `json:\"forwarded_for,omitempty\"`\n\tInclude            []string               `json:\"include\"`\n}\n\ntype CreateOptions struct {\n\tClusterID        string                 `json:\"cluster_id\"`\n\tEnsureUniqueName bool                   `json:\"ensure_unique_name\"`\n\tSelect           []string               `json:\"select\"`\n\tAttrs            map[string]interface{} `json:\"attrs\"`\n\t// ReplaceFiles and ReplaceSegments only apply when creating a\n\t// collection.\n\tReplaceFiles    map[string]string             `json:\"replace_files\"`\n\tReplaceSegments map[BlockSegment]BlockSegment `json:\"replace_segments\"`\n}\n\ntype UpdateOptions struct {\n\tUUID             string                 `json:\"uuid\"`\n\tAttrs            map[string]interface{} `json:\"attrs\"`\n\tSelect           []string               `json:\"select\"`\n\tBypassFederation bool                   `json:\"bypass_federation\"`\n\t// ReplaceFiles and ReplaceSegments only apply when updating a\n\t// collection.\n\tReplaceFiles    map[string]string             `json:\"replace_files\"`\n\tReplaceSegments map[BlockSegment]BlockSegment `json:\"replace_segments\"`\n}\n\ntype GroupContentsOptions struct {\n\tClusterID          string   `json:\"cluster_id\"`\n\tUUID               string   `json:\"uuid,omitempty\"`\n\tSelect             []string `json:\"select\"`\n\tFilters            []Filter `json:\"filters\"`\n\tLimit              int64    `json:\"limit\"`\n\tOffset             int64    `json:\"offset\"`\n\tOrder              []string `json:\"order\"`\n\tDistinct           bool     `json:\"distinct\"`\n\tCount              string   `json:\"count\"`\n\tInclude            []string `json:\"include\"`\n\tRecursive          bool     `json:\"recursive\"`\n\tIncludeTrash       bool     `json:\"include_trash\"`\n\tIncludeOldVersions bool     `json:\"include_old_versions\"`\n\tExcludeHomeProject bool     `json:\"exclude_home_project\"`\n}\n\ntype UserActivateOptions struct {\n\tUUID string `json:\"uuid\"`\n}\n\ntype UserSetupOptions struct {\n\tUUID                  string                 `json:\"uuid,omitempty\"`\n\tEmail                 string                 `json:\"email,omitempty\"`\n\tOpenIDPrefix          string                 `json:\"openid_prefix,omitempty\"`\n\tRepoName              string                 `json:\"repo_name,omitempty\"`\n\tVMUUID                string                 `json:\"vm_uuid,omitempty\"`\n\tSendNotificationEmail bool                   `json:\"send_notification_email,omitempty\"`\n\tAttrs                 map[string]interface{} `json:\"attrs\"`\n}\n\ntype UserMergeOptions struct {\n\tNewUserUUID       string `json:\"new_user_uuid,omitempty\"`\n\tOldUserUUID       string `json:\"old_user_uuid,omitempty\"`\n\tNewOwnerUUID      string `json:\"new_owner_uuid,omitempty\"`\n\tNewUserToken      string `json:\"new_user_token,omitempty\"`\n\tRedirectToNewUser bool   `json:\"redirect_to_new_user\"`\n}\n\ntype UserBatchUpdateOptions struct {\n\tUpdates map[string]map[string]interface{} `json:\"updates\"`\n}\n\ntype UserBatchUpdateResponse struct{}\n\ntype DeleteOptions struct {\n\tUUID string `json:\"uuid\"`\n}\n\ntype LoginOptions struct {\n\tReturnTo string `json:\"return_to\"`        // On success, redirect to this target with api_token=xxx query param\n\tRemote   string `json:\"remote,omitempty\"` // Salt token for remote Cluster ID\n\tCode     string `json:\"code,omitempty\"`   // OAuth2 callback code\n\tState    string `json:\"state,omitempty\"`  // OAuth2 callback state\n}\n\ntype UserAuthenticateOptions struct {\n\tUsername string `json:\"username,omitempty\"` // PAM username\n\tPassword string `json:\"password,omitempty\"` // PAM password\n}\n\ntype LogoutOptions struct {\n\tReturnTo string `json:\"return_to\"` // Redirect to this URL after logging out\n}\n\ntype BlockReadOptions struct {\n\tLocator      string\n\tWriteTo      io.Writer\n\tLocalLocator func(string)\n\t// If true, do not read the block data, just check whether the\n\t// block is available in a local filesystem or memory cache.\n\t// If not, return ErrNotCached.\n\tCheckCacheOnly bool\n}\n\n// See CheckCacheOnly field of BlockReadOptions.\nvar ErrNotCached = errors.New(\"block is not in cache\")\n\ntype BlockWriteOptions struct {\n\tHash           string\n\tData           []byte\n\tReader         io.Reader // Must be set if Data is nil.\n\tDataSize       int       // Must be set if Data is nil.\n\tRequestID      string\n\tStorageClasses []string\n\tReplicas       int\n\tAttempts       int\n}\n\ntype BlockWriteResponse struct {\n\tLocator        string\n\tReplicas       int\n\tStorageClasses map[string]int\n}\n\ntype WebDAVOptions struct {\n\tMethod string\n\tPath   string\n\tHeader http.Header\n}\n\ntype ContainerLogOptions struct {\n\tUUID      string `json:\"uuid\"`\n\tNoForward bool   `json:\"no_forward\"`\n\tWebDAVOptions\n}\n\ntype RepackOptions struct {\n\tCachedOnly bool\n\tFull       bool\n\tDryRun     bool\n}\n\ntype API interface {\n\tConfigGet(ctx context.Context) (json.RawMessage, error)\n\tVocabularyGet(ctx context.Context) (Vocabulary, error)\n\tLogin(ctx context.Context, options LoginOptions) (LoginResponse, error)\n\tLogout(ctx context.Context, options LogoutOptions) (LogoutResponse, error)\n\tAuthorizedKeyCreate(ctx context.Context, options CreateOptions) (AuthorizedKey, error)\n\tAuthorizedKeyUpdate(ctx context.Context, options UpdateOptions) (AuthorizedKey, error)\n\tAuthorizedKeyGet(ctx context.Context, options GetOptions) (AuthorizedKey, error)\n\tAuthorizedKeyList(ctx context.Context, options ListOptions) (AuthorizedKeyList, error)\n\tAuthorizedKeyDelete(ctx context.Context, options DeleteOptions) (AuthorizedKey, error)\n\tCollectionCreate(ctx context.Context, options CreateOptions) (Collection, error)\n\tCollectionUpdate(ctx context.Context, options UpdateOptions) (Collection, error)\n\tCollectionGet(ctx context.Context, options GetOptions) (Collection, error)\n\tCollectionList(ctx context.Context, options ListOptions) (CollectionList, error)\n\tCollectionProvenance(ctx context.Context, options GetOptions) (map[string]interface{}, error)\n\tCollectionUsedBy(ctx context.Context, options GetOptions) (map[string]interface{}, error)\n\tCollectionDelete(ctx context.Context, options DeleteOptions) (Collection, error)\n\tCollectionTrash(ctx context.Context, options DeleteOptions) (Collection, error)\n\tCollectionUntrash(ctx context.Context, options UntrashOptions) (Collection, error)\n\tComputedPermissionList(ctx context.Context, options ListOptions) (ComputedPermissionList, error)\n\tContainerCreate(ctx context.Context, options CreateOptions) (Container, error)\n\tContainerUpdate(ctx context.Context, options UpdateOptions) (Container, error)\n\tContainerPriorityUpdate(ctx context.Context, options UpdateOptions) (Container, error)\n\tContainerGet(ctx context.Context, options GetOptions) (Container, error)\n\tContainerList(ctx context.Context, options ListOptions) (ContainerList, error)\n\tContainerDelete(ctx context.Context, options DeleteOptions) (Container, error)\n\tContainerLock(ctx context.Context, options GetOptions) (Container, error)\n\tContainerUnlock(ctx context.Context, options GetOptions) (Container, error)\n\tContainerSSH(ctx context.Context, options ContainerSSHOptions) (ConnectionResponse, error)\n\tContainerGatewayTunnel(ctx context.Context, options ContainerGatewayTunnelOptions) (ConnectionResponse, error)\n\tContainerHTTPProxy(ctx context.Context, options ContainerHTTPProxyOptions) (http.Handler, error)\n\tContainerRequestCreate(ctx context.Context, options CreateOptions) (ContainerRequest, error)\n\tContainerRequestUpdate(ctx context.Context, options UpdateOptions) (ContainerRequest, error)\n\tContainerRequestGet(ctx context.Context, options GetOptions) (ContainerRequest, error)\n\tContainerRequestList(ctx context.Context, options ListOptions) (ContainerRequestList, error)\n\tContainerRequestDelete(ctx context.Context, options DeleteOptions) (ContainerRequest, error)\n\tContainerRequestContainerStatus(ctx context.Context, options GetOptions) (ContainerStatus, error)\n\tContainerRequestLog(ctx context.Context, options ContainerLogOptions) (http.Handler, error)\n\tGroupCreate(ctx context.Context, options CreateOptions) (Group, error)\n\tGroupUpdate(ctx context.Context, options UpdateOptions) (Group, error)\n\tGroupGet(ctx context.Context, options GetOptions) (Group, error)\n\tGroupList(ctx context.Context, options ListOptions) (GroupList, error)\n\tGroupContents(ctx context.Context, options GroupContentsOptions) (ObjectList, error)\n\tGroupShared(ctx context.Context, options ListOptions) (GroupList, error)\n\tGroupDelete(ctx context.Context, options DeleteOptions) (Group, error)\n\tGroupTrash(ctx context.Context, options DeleteOptions) (Group, error)\n\tGroupUntrash(ctx context.Context, options UntrashOptions) (Group, error)\n\tLinkCreate(ctx context.Context, options CreateOptions) (Link, error)\n\tLinkUpdate(ctx context.Context, options UpdateOptions) (Link, error)\n\tLinkGet(ctx context.Context, options GetOptions) (Link, error)\n\tLinkList(ctx context.Context, options ListOptions) (LinkList, error)\n\tLinkDelete(ctx context.Context, options DeleteOptions) (Link, error)\n\tLogCreate(ctx context.Context, options CreateOptions) (Log, error)\n\tLogUpdate(ctx context.Context, options UpdateOptions) (Log, error)\n\tLogGet(ctx context.Context, options GetOptions) (Log, error)\n\tLogList(ctx context.Context, options ListOptions) (LogList, error)\n\tLogDelete(ctx context.Context, options DeleteOptions) (Log, error)\n\tSysTrashSweep(ctx context.Context, options struct{}) (struct{}, error)\n\tUserCreate(ctx context.Context, options CreateOptions) (User, error)\n\tUserUpdate(ctx context.Context, options UpdateOptions) (User, error)\n\tUserMerge(ctx context.Context, options UserMergeOptions) (User, error)\n\tUserActivate(ctx context.Context, options UserActivateOptions) (User, error)\n\tUserSetup(ctx context.Context, options UserSetupOptions) (map[string]interface{}, error)\n\tUserUnsetup(ctx context.Context, options GetOptions) (User, error)\n\tUserGet(ctx context.Context, options GetOptions) (User, error)\n\tUserGetCurrent(ctx context.Context, options GetOptions) (User, error)\n\tUserGetSystem(ctx context.Context, options GetOptions) (User, error)\n\tUserList(ctx context.Context, options ListOptions) (UserList, error)\n\tUserDelete(ctx context.Context, options DeleteOptions) (User, error)\n\tUserBatchUpdate(context.Context, UserBatchUpdateOptions) (UserList, error)\n\tUserAuthenticate(ctx context.Context, options UserAuthenticateOptions) (APIClientAuthorization, error)\n\tAPIClientAuthorizationCurrent(ctx context.Context, options GetOptions) (APIClientAuthorization, error)\n\tAPIClientAuthorizationCreate(ctx context.Context, options CreateOptions) (APIClientAuthorization, error)\n\tAPIClientAuthorizationList(ctx context.Context, options ListOptions) (APIClientAuthorizationList, error)\n\tAPIClientAuthorizationDelete(ctx context.Context, options DeleteOptions) (APIClientAuthorization, error)\n\tAPIClientAuthorizationUpdate(ctx context.Context, options UpdateOptions) (APIClientAuthorization, error)\n\tAPIClientAuthorizationGet(ctx context.Context, options GetOptions) (APIClientAuthorization, error)\n\tDiscoveryDocument(ctx context.Context) (DiscoveryDocument, error)\n}\n"
  },
  {
    "path": "sdk/go/arvados/api_client_authorization.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"time\"\n\n// APIClientAuthorization is an arvados#apiClientAuthorization resource.\ntype APIClientAuthorization struct {\n\tUUID                string    `json:\"uuid\"`\n\tAPIToken            string    `json:\"api_token\"`\n\tCreatedAt           time.Time `json:\"created_at\"`\n\tCreatedByIPAddress  string    `json:\"created_by_ip_address\"`\n\tEtag                string    `json:\"etag\"`\n\tExpiresAt           time.Time `json:\"expires_at\"`\n\tLastUsedAt          time.Time `json:\"last_used_at\"`\n\tLastUsedByIPAddress string    `json:\"last_used_by_ip_address\"`\n\tModifiedAt          time.Time `json:\"modified_at\"`\n\tModifiedByUserUUID  string    `json:\"modified_by_user_uuid\"`\n\tOwnerUUID           string    `json:\"owner_uuid\"`\n\tScopes              []string  `json:\"scopes\"`\n}\n\n// APIClientAuthorizationList is an arvados#apiClientAuthorizationList resource.\ntype APIClientAuthorizationList struct {\n\tItems []APIClientAuthorization `json:\"items\"`\n}\n\nfunc (aca APIClientAuthorization) TokenV2() string {\n\treturn \"v2/\" + aca.UUID + \"/\" + aca.APIToken\n}\n"
  },
  {
    "path": "sdk/go/arvados/authorized_key.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"time\"\n\n// AuthorizedKey is an arvados#authorizedKey resource.\ntype AuthorizedKey struct {\n\tUUID               string    `json:\"uuid\"`\n\tEtag               string    `json:\"etag\"`\n\tOwnerUUID          string    `json:\"owner_uuid\"`\n\tCreatedAt          time.Time `json:\"created_at\"`\n\tModifiedAt         time.Time `json:\"modified_at\"`\n\tModifiedByUserUUID string    `json:\"modified_by_user_uuid\"`\n\tName               string    `json:\"name\"`\n\tAuthorizedUserUUID string    `json:\"authorized_user_uuid\"`\n\tPublicKey          string    `json:\"public_key\"`\n\tKeyType            string    `json:\"key_type\"`\n\tExpiresAt          time.Time `json:\"expires_at\"`\n}\n\n// AuthorizedKeyList is an arvados#authorizedKeyList resource.\ntype AuthorizedKeyList struct {\n\tItems          []AuthorizedKey `json:\"items\"`\n\tItemsAvailable int             `json:\"items_available\"`\n\tOffset         int             `json:\"offset\"`\n\tLimit          int             `json:\"limit\"`\n}\n"
  },
  {
    "path": "sdk/go/arvados/blob_signature.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// Generate and verify permission signatures for Keep locators.\n//\n// See https://dev.arvados.org/projects/arvados/wiki/Keep_locator_format\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"crypto/hmac\"\n\t\"crypto/sha1\"\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\t// ErrSignatureExpired - a signature was rejected because the\n\t// expiry time has passed.\n\tErrSignatureExpired = errors.New(\"Signature expired\")\n\t// ErrSignatureInvalid - a signature was rejected because it\n\t// was badly formatted or did not match the given secret key.\n\tErrSignatureInvalid = errors.New(\"Invalid signature\")\n\t// ErrSignatureMissing - the given locator does not have a\n\t// signature hint.\n\tErrSignatureMissing = errors.New(\"Missing signature\")\n)\n\n// makePermSignature generates a SHA-1 HMAC digest for the given blob,\n// token, expiry, and site secret.\nfunc makePermSignature(blobHash []byte, apiToken, expiry, blobSignatureTTL string, permissionSecret []byte) string {\n\thmac := hmac.New(sha1.New, permissionSecret)\n\thmac.Write(blobHash)\n\thmac.Write([]byte(\"@\"))\n\thmac.Write([]byte(apiToken))\n\thmac.Write([]byte(\"@\"))\n\thmac.Write([]byte(expiry))\n\thmac.Write([]byte(\"@\"))\n\thmac.Write([]byte(blobSignatureTTL))\n\tdigest := hmac.Sum(nil)\n\treturn fmt.Sprintf(\"%x\", digest)\n}\n\nvar (\n\tmBlkRe      = regexp.MustCompile(`^[0-9a-f]{32}.*`)\n\tmPermHintRe = regexp.MustCompile(`\\+A[^+]*`)\n)\n\n// SignManifest signs all locators in the given manifest, discarding\n// any existing signatures.\nfunc SignManifest(manifest string, apiToken string, expiry time.Time, ttl time.Duration, permissionSecret []byte) string {\n\treturn regexp.MustCompile(`\\S+`).ReplaceAllStringFunc(manifest, func(tok string) string {\n\t\tif mBlkRe.MatchString(tok) {\n\t\t\treturn SignLocator(mPermHintRe.ReplaceAllString(tok, \"\"), apiToken, expiry, ttl, permissionSecret)\n\t\t}\n\t\treturn tok\n\t})\n}\n\n// SignLocator returns blobLocator with a permission signature\n// added. If either permissionSecret or apiToken is empty, blobLocator\n// is returned untouched.\n//\n// This function is intended to be used by system components and admin\n// utilities: userland programs do not know the permissionSecret.\nfunc SignLocator(blobLocator, apiToken string, expiry time.Time, blobSignatureTTL time.Duration, permissionSecret []byte) string {\n\tif len(permissionSecret) == 0 || apiToken == \"\" {\n\t\treturn blobLocator\n\t}\n\t// Strip off all hints: only the hash is used to sign.\n\tblobHash := []byte(blobLocator)\n\tif hints := bytes.IndexRune(blobHash, '+'); hints > 0 {\n\t\tblobHash = blobHash[:hints]\n\t}\n\ttimestampHex := fmt.Sprintf(\"%08x\", expiry.Unix())\n\tblobSignatureTTLHex := strconv.FormatInt(int64(blobSignatureTTL.Seconds()), 16)\n\treturn blobLocator +\n\t\t\"+A\" + makePermSignature(blobHash, apiToken, timestampHex, blobSignatureTTLHex, permissionSecret) +\n\t\t\"@\" + timestampHex\n}\n\nvar SignedLocatorRe = regexp.MustCompile(\n\t//1                 2          34                         5   6                  7                 89\n\t`^([[:xdigit:]]{32})(\\+[0-9]+)?((\\+[B-Z][A-Za-z0-9@_-]*)*)(\\+A([[:xdigit:]]{40})@([[:xdigit:]]{8}))((\\+[B-Z][A-Za-z0-9@_-]*)*)$`)\n\n// VerifySignature returns nil if the signature on the signedLocator\n// can be verified using the given apiToken. Otherwise it returns\n// ErrSignatureExpired (if the signature's expiry time has passed,\n// which is something the client could have figured out\n// independently), ErrSignatureMissing (if there is no signature hint\n// at all), or ErrSignatureInvalid (if the signature is present but\n// badly formatted or incorrect).\n//\n// This function is intended to be used by system components and admin\n// utilities: userland programs do not know the permissionSecret.\nfunc VerifySignature(signedLocator, apiToken string, blobSignatureTTL time.Duration, permissionSecret []byte) error {\n\tmatches := SignedLocatorRe.FindStringSubmatch(signedLocator)\n\tif matches == nil {\n\t\treturn ErrSignatureMissing\n\t}\n\tblobHash := []byte(matches[1])\n\tsignatureHex := matches[6]\n\texpiryHex := matches[7]\n\tif expiryTime, err := parseHexTimestamp(expiryHex); err != nil {\n\t\treturn ErrSignatureInvalid\n\t} else if expiryTime.Before(time.Now()) {\n\t\treturn ErrSignatureExpired\n\t}\n\tblobSignatureTTLHex := strconv.FormatInt(int64(blobSignatureTTL.Seconds()), 16)\n\tif signatureHex != makePermSignature(blobHash, apiToken, expiryHex, blobSignatureTTLHex, permissionSecret) {\n\t\treturn ErrSignatureInvalid\n\t}\n\treturn nil\n}\n\nfunc parseHexTimestamp(timestampHex string) (ts time.Time, err error) {\n\tif tsInt, e := strconv.ParseInt(timestampHex, 16, 0); e == nil {\n\t\tts = time.Unix(tsInt, 0)\n\t} else {\n\t\terr = e\n\t}\n\treturn ts, err\n}\n\nvar errNoSignature = errors.New(\"locator has no signature\")\n\nfunc signatureExpiryTime(signedLocator string) (time.Time, error) {\n\tmatches := SignedLocatorRe.FindStringSubmatch(signedLocator)\n\tif matches == nil {\n\t\treturn time.Time{}, errNoSignature\n\t}\n\texpiryHex := matches[7]\n\treturn parseHexTimestamp(expiryHex)\n}\n\nfunc stripAllHints(locator string) string {\n\tif i := strings.IndexRune(locator, '+'); i > 0 {\n\t\treturn locator[:i]\n\t}\n\treturn locator\n}\n"
  },
  {
    "path": "sdk/go/arvados/blob_signature_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"time\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nconst (\n\tknownHash    = \"acbd18db4cc2f85cedef654fccc4a4d8\"\n\tknownLocator = knownHash + \"+3\"\n\tknownToken   = \"hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk\"\n\tknownKey     = \"13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk\" +\n\t\t\"p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc\" +\n\t\t\"ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4\" +\n\t\t\"jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y\" +\n\t\t\"gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6\" +\n\t\t\"vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei\" +\n\t\t\"786u5rw2a9gx743dj3fgq2irk\"\n\tknownSignature     = \"89118b78732c33104a4d6231e8b5a5fa1e4301e3\"\n\tknownTimestamp     = \"7fffffff\"\n\tknownSigHint       = \"+A\" + knownSignature + \"@\" + knownTimestamp\n\tknownSignedLocator = knownLocator + knownSigHint\n\tblobSignatureTTL   = 1209600 * time.Second\n)\n\nvar _ = check.Suite(&BlobSignatureSuite{})\n\ntype BlobSignatureSuite struct{}\n\nfunc (s *BlobSignatureSuite) BenchmarkSignManifest(c *check.C) {\n\tDebugLocksPanicMode = false\n\tts, err := parseHexTimestamp(knownTimestamp)\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"test manifest is %d bytes\", len(bigmanifest))\n\tfor i := 0; i < c.N; i++ {\n\t\tm := SignManifest(bigmanifest, knownToken, ts, blobSignatureTTL, []byte(knownKey))\n\t\tc.Check(m, check.Not(check.Equals), \"\")\n\t}\n}\n\nfunc (s *BlobSignatureSuite) TestSignLocator(c *check.C) {\n\tts, err := parseHexTimestamp(knownTimestamp)\n\tc.Check(err, check.IsNil)\n\tc.Check(SignLocator(knownLocator, knownToken, ts, blobSignatureTTL, []byte(knownKey)), check.Equals, knownSignedLocator)\n}\n\nfunc (s *BlobSignatureSuite) TestVerifySignature(c *check.C) {\n\tc.Check(VerifySignature(knownSignedLocator, knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)\n}\n\nfunc (s *BlobSignatureSuite) TestVerifySignatureExtraHints(c *check.C) {\n\t// handle hint before permission signature\n\tc.Check(VerifySignature(knownLocator+\"+K@xyzzy\"+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)\n\n\t// handle hint after permission signature\n\tc.Check(VerifySignature(knownLocator+knownSigHint+\"+Zfoo\", knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)\n\n\t// handle hints around permission signature\n\tc.Check(VerifySignature(knownLocator+\"+K@xyzzy\"+knownSigHint+\"+Zfoo\", knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)\n}\n\n// The size hint on the locator string should not affect signature\n// validation.\nfunc (s *BlobSignatureSuite) TestVerifySignatureWrongSize(c *check.C) {\n\t// handle incorrect size hint\n\tc.Check(VerifySignature(knownHash+\"+999999\"+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)\n\n\t// handle missing size hint\n\tc.Check(VerifySignature(knownHash+knownSigHint, knownToken, blobSignatureTTL, []byte(knownKey)), check.IsNil)\n}\n\nfunc (s *BlobSignatureSuite) TestVerifySignatureBadSig(c *check.C) {\n\tbadLocator := knownLocator + \"+Aaaaaaaaaaaaaaaa@\" + knownTimestamp\n\tc.Check(VerifySignature(badLocator, knownToken, blobSignatureTTL, []byte(knownKey)), check.Equals, ErrSignatureMissing)\n}\n\nfunc (s *BlobSignatureSuite) TestVerifySignatureBadTimestamp(c *check.C) {\n\tbadLocator := knownLocator + \"+A\" + knownSignature + \"@OOOOOOOl\"\n\tc.Check(VerifySignature(badLocator, knownToken, blobSignatureTTL, []byte(knownKey)), check.Equals, ErrSignatureMissing)\n}\n\nfunc (s *BlobSignatureSuite) TestVerifySignatureBadSecret(c *check.C) {\n\tc.Check(VerifySignature(knownSignedLocator, knownToken, blobSignatureTTL, []byte(\"00000000000000000000\")), check.Equals, ErrSignatureInvalid)\n}\n\nfunc (s *BlobSignatureSuite) TestVerifySignatureBadToken(c *check.C) {\n\tc.Check(VerifySignature(knownSignedLocator, \"00000000\", blobSignatureTTL, []byte(knownKey)), check.Equals, ErrSignatureInvalid)\n}\n\nfunc (s *BlobSignatureSuite) TestVerifySignatureExpired(c *check.C) {\n\tyesterday := time.Now().AddDate(0, 0, -1)\n\texpiredLocator := SignLocator(knownHash, knownToken, yesterday, blobSignatureTTL, []byte(knownKey))\n\tc.Check(VerifySignature(expiredLocator, knownToken, blobSignatureTTL, []byte(knownKey)), check.Equals, ErrSignatureExpired)\n}\n"
  },
  {
    "path": "sdk/go/arvados/block_segment.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n)\n\n// BlockSegment is a portion of a block stored in Keep. It is used in\n// the replace_segments API.\ntype BlockSegment struct {\n\tLocator string\n\tOffset  int\n\tLength  int\n}\n\nfunc (bs *BlockSegment) UnmarshalJSON(data []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(data, &s); err != nil {\n\t\treturn err\n\t}\n\t_, err := fmt.Sscanf(s, \"%s %d %d\", &bs.Locator, &bs.Offset, &bs.Length)\n\treturn err\n}\n\n// MarshalText enables encoding/json to encode BlockSegment as a map\n// key.\nfunc (bs BlockSegment) MarshalText() ([]byte, error) {\n\treturn []byte(fmt.Sprintf(\"%s %d %d\", bs.Locator, bs.Offset, bs.Length)), nil\n}\n\n// MarshalText enables encoding/json to decode BlockSegment as a map\n// key.\nfunc (bs *BlockSegment) UnmarshalText(p []byte) error {\n\t_, err := fmt.Sscanf(string(p), \"%s %d %d\", &bs.Locator, &bs.Offset, &bs.Length)\n\treturn err\n}\n\nfunc (bs BlockSegment) StripAllHints() BlockSegment {\n\tbs.Locator = stripAllHints(bs.Locator)\n\treturn bs\n}\n"
  },
  {
    "path": "sdk/go/arvados/block_segment_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"encoding/json\"\n\n\t. \"gopkg.in/check.v1\"\n)\n\nvar _ = Suite(&blockSegmentSuite{})\n\ntype blockSegmentSuite struct{}\n\nfunc (s *blockSegmentSuite) TestMarshal(c *C) {\n\tdst, err := json.Marshal(map[BlockSegment]BlockSegment{\n\t\tBlockSegment{\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1\", 0, 1}: BlockSegment{\"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+3\", 2, 1},\n\t})\n\tc.Check(err, IsNil)\n\tc.Check(string(dst), Equals, `{\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1 0 1\":\"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+3 2 1\"}`)\n}\n\nfunc (s *blockSegmentSuite) TestUnmarshal(c *C) {\n\tvar dst struct {\n\t\tF map[BlockSegment]BlockSegment\n\t}\n\terr := json.Unmarshal([]byte(`{\"f\": {\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1 0 1\": \"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+3 2 1\"}}`), &dst)\n\tc.Check(err, IsNil)\n\tc.Check(dst.F, HasLen, 1)\n\tfor k, v := range dst.F {\n\t\tc.Check(k, Equals, BlockSegment{\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1\", 0, 1})\n\t\tc.Check(v, Equals, BlockSegment{\"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+3\", 2, 1})\n\t}\n}\n\nfunc (s *blockSegmentSuite) TestRoundTrip(c *C) {\n\torig := map[BlockSegment]BlockSegment{\n\t\tBlockSegment{\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+1\", 0, 1}:   BlockSegment{\"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+50\", 0, 1},\n\t\tBlockSegment{\"cccccccccccccccccccccccccccccccc+49\", 0, 49}: BlockSegment{\"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+50\", 1, 49},\n\t}\n\tj, err := json.Marshal(orig)\n\tc.Check(err, IsNil)\n\tvar dst map[BlockSegment]BlockSegment\n\terr = json.Unmarshal(j, &dst)\n\tc.Check(err, IsNil)\n\tc.Check(dst, DeepEquals, orig)\n}\n"
  },
  {
    "path": "sdk/go/arvados/byte_size.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype ByteSize int64\n\n// ByteSizeOrPercent indicates either a number of bytes or a\n// percentage from 1 to 100.\ntype ByteSizeOrPercent ByteSize\n\nvar prefixValue = map[string]int64{\n\t\"\":   1,\n\t\"K\":  1000,\n\t\"Ki\": 1 << 10,\n\t\"M\":  1000000,\n\t\"Mi\": 1 << 20,\n\t\"G\":  1000000000,\n\t\"Gi\": 1 << 30,\n\t\"T\":  1000000000000,\n\t\"Ti\": 1 << 40,\n\t\"P\":  1000000000000000,\n\t\"Pi\": 1 << 50,\n\t\"E\":  1000000000000000000,\n\t\"Ei\": 1 << 60,\n}\n\nfunc (n *ByteSize) UnmarshalJSON(data []byte) error {\n\tif len(data) == 0 || data[0] != '\"' {\n\t\tvar i int64\n\t\terr := json.Unmarshal(data, &i)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*n = ByteSize(i)\n\t\treturn nil\n\t}\n\tvar s string\n\terr := json.Unmarshal(data, &s)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsplit := strings.LastIndexAny(s, \"0123456789.+-eE\") + 1\n\tif split == 0 {\n\t\treturn fmt.Errorf(\"invalid byte size %q\", s)\n\t}\n\tif s[split-1] == 'E' {\n\t\t// We accepted an E as if it started the exponent part\n\t\t// of a json number, but if the next char isn't +, -,\n\t\t// or digit, then the E must have meant Exa. Instead\n\t\t// of \"4.5E\"+\"iB\" we want \"4.5\"+\"EiB\".\n\t\tsplit--\n\t}\n\tvar val json.Number\n\tdec := json.NewDecoder(strings.NewReader(s[:split]))\n\tdec.UseNumber()\n\terr = dec.Decode(&val)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif split == len(s) {\n\t\treturn nil\n\t}\n\tprefix := strings.Trim(s[split:], \" \")\n\tif strings.HasSuffix(prefix, \"B\") {\n\t\tprefix = prefix[:len(prefix)-1]\n\t}\n\tpval, ok := prefixValue[prefix]\n\tif !ok {\n\t\treturn fmt.Errorf(\"invalid unit %q\", strings.Trim(s[split:], \" \"))\n\t}\n\tif intval, err := val.Int64(); err == nil {\n\t\tif pval > 1 && (intval*pval)/pval != intval {\n\t\t\treturn fmt.Errorf(\"size %q overflows int64\", s)\n\t\t}\n\t\t*n = ByteSize(intval * pval)\n\t\treturn nil\n\t} else if floatval, err := val.Float64(); err == nil {\n\t\tif floatval*float64(pval) > math.MaxInt64 {\n\t\t\treturn fmt.Errorf(\"size %q overflows int64\", s)\n\t\t}\n\t\t*n = ByteSize(int64(floatval * float64(pval)))\n\t\treturn nil\n\t} else {\n\t\treturn fmt.Errorf(\"bug: json.Number for %q is not int64 or float64: %s\", s, err)\n\t}\n}\n\nfunc (n ByteSizeOrPercent) MarshalJSON() ([]byte, error) {\n\tif n < 0 && n >= -100 {\n\t\treturn []byte(fmt.Sprintf(\"\\\"%d%%\\\"\", -n)), nil\n\t} else {\n\t\treturn json.Marshal(int64(n))\n\t}\n}\n\nfunc (n *ByteSizeOrPercent) UnmarshalJSON(data []byte) error {\n\tif len(data) == 0 || data[0] != '\"' {\n\t\treturn (*ByteSize)(n).UnmarshalJSON(data)\n\t}\n\tvar s string\n\terr := json.Unmarshal(data, &s)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif s := strings.TrimSpace(s); len(s) > 0 && s[len(s)-1] == '%' {\n\t\tpct, err := strconv.ParseInt(strings.TrimSpace(s[:len(s)-1]), 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif pct < 0 || pct > 100 {\n\t\t\treturn fmt.Errorf(\"invalid value %q (percentage must be between 0 and 100)\", s)\n\t\t}\n\t\t*n = ByteSizeOrPercent(-pct)\n\t\treturn nil\n\t}\n\treturn (*ByteSize)(n).UnmarshalJSON(data)\n}\n\n// ByteSize returns the absolute byte size specified by n, or 0 if n\n// specifies a percent.\nfunc (n ByteSizeOrPercent) ByteSize() ByteSize {\n\tif n >= -100 && n < 0 {\n\t\treturn 0\n\t} else {\n\t\treturn ByteSize(n)\n\t}\n}\n\n// ByteSize returns the percentage specified by n, or 0 if n specifies\n// an absolute byte size.\nfunc (n ByteSizeOrPercent) Percent() int64 {\n\tif n >= -100 && n < 0 {\n\t\treturn int64(-n)\n\t} else {\n\t\treturn 0\n\t}\n}\n"
  },
  {
    "path": "sdk/go/arvados/byte_size_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"github.com/ghodss/yaml\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&ByteSizeSuite{})\n\ntype ByteSizeSuite struct{}\n\nfunc (s *ByteSizeSuite) TestUnmarshal(c *check.C) {\n\tfor _, testcase := range []struct {\n\t\tin  string\n\t\tout int64\n\t}{\n\t\t{\"0\", 0},\n\t\t{\"5\", 5},\n\t\t{\"5B\", 5},\n\t\t{\"5 B\", 5},\n\t\t{\" 4 KiB \", 4096},\n\t\t{\"0K\", 0},\n\t\t{\"0Ki\", 0},\n\t\t{\"0 KiB\", 0},\n\t\t{\"4K\", 4000},\n\t\t{\"4KB\", 4000},\n\t\t{\"4Ki\", 4096},\n\t\t{\"4KiB\", 4096},\n\t\t{\"4MB\", 4000000},\n\t\t{\"4MiB\", 4194304},\n\t\t{\"4GB\", 4000000000},\n\t\t{\"4 GiB\", 4294967296},\n\t\t{\"4TB\", 4000000000000},\n\t\t{\"4TiB\", 4398046511104},\n\t\t{\"4PB\", 4000000000000000},\n\t\t{\"4PiB\", 4503599627370496},\n\t\t{\"4EB\", 4000000000000000000},\n\t\t{\"4EiB\", 4611686018427387904},\n\t\t{\"4.5EiB\", 5188146770730811392},\n\t\t{\"1.5 GB\", 1500000000},\n\t\t{\"1.5 GiB\", 1610612736},\n\t\t{\"1.234 GiB\", 1324997410}, // rounds down from 1324997410.816\n\t\t{\"1e2 KB\", 100000},\n\t\t{\"20E-1 KiB\", 2048},\n\t\t{\"1E0EB\", 1000000000000000000},\n\t\t{\"1E-1EB\", 100000000000000000},\n\t\t{\"1E-1EiB\", 115292150460684704},\n\t\t{\"4.5E15 K\", 4500000000000000000},\n\t} {\n\t\tvar n ByteSize\n\t\terr := yaml.Unmarshal([]byte(testcase.in+\"\\n\"), &n)\n\t\tc.Logf(\"%v => %v: %v\", testcase.in, testcase.out, n)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(int64(n), check.Equals, testcase.out)\n\t}\n\tfor _, testcase := range []string{\n\t\t\"B\", \"K\", \"KB\", \"KiB\", \"4BK\", \"4iB\", \"4A\", \"b\", \"4b\", \"4mB\", \"4m\", \"4mib\", \"4KIB\", \"4K iB\", \"4Ki B\", \"BB\", \"4BB\",\n\t\t\"400000 EB\", // overflows int64\n\t\t\"4.11e4 EB\", // ok as float64, but overflows int64\n\t} {\n\t\tvar n ByteSize\n\t\terr := yaml.Unmarshal([]byte(testcase+\"\\n\"), &n)\n\t\tc.Logf(\"%s => error: %v\", testcase, err)\n\t\tc.Check(err, check.NotNil)\n\t}\n}\n\nfunc (s *ByteSizeSuite) TestMarshalByteSizeOrPercent(c *check.C) {\n\tfor _, testcase := range []struct {\n\t\tin  ByteSizeOrPercent\n\t\tout string\n\t}{\n\t\t{0, \"0\"},\n\t\t{-1, \"1%\"},\n\t\t{-100, \"100%\"},\n\t\t{8, \"8\"},\n\t} {\n\t\tout, err := yaml.Marshal(&testcase.in)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(out), check.Equals, testcase.out+\"\\n\")\n\t}\n}\n\nfunc (s *ByteSizeSuite) TestUnmarshalByteSizeOrPercent(c *check.C) {\n\tfor _, testcase := range []struct {\n\t\tin  string\n\t\tout int64\n\t}{\n\t\t{\"0\", 0},\n\t\t{\"100\", 100},\n\t\t{\"0%\", 0},\n\t\t{\"1%\", -1},\n\t\t{\"100%\", -100},\n\t\t{\"8 GB\", 8000000000},\n\t} {\n\t\tvar n ByteSizeOrPercent\n\t\terr := yaml.Unmarshal([]byte(testcase.in+\"\\n\"), &n)\n\t\tc.Logf(\"%v => %v: %v\", testcase.in, testcase.out, n)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(int64(n), check.Equals, testcase.out)\n\t}\n\tfor _, testcase := range []string{\n\t\t\"1000%\", \"101%\", \"-1%\",\n\t\t\"%\", \"-%\", \"%%\", \"%1\",\n\t\t\"400000 EB\",\n\t\t\"4.11e4 EB\",\n\t} {\n\t\tvar n ByteSizeOrPercent\n\t\terr := yaml.Unmarshal([]byte(testcase+\"\\n\"), &n)\n\t\tc.Logf(\"%s => error: %v\", testcase, err)\n\t\tc.Check(err, check.NotNil)\n\t}\n}\n"
  },
  {
    "path": "sdk/go/arvados/client.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/rand\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"math\"\n\t\"math/big\"\n\tmathrand \"math/rand\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/hashicorp/go-retryablehttp\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// A Client is an HTTP client with an API endpoint and a set of\n// Arvados credentials.\n//\n// It offers methods for accessing individual Arvados APIs, and\n// methods that implement common patterns like fetching multiple pages\n// of results using List APIs.\ntype Client struct {\n\t// HTTP client used to make requests. If nil,\n\t// DefaultSecureClient or InsecureHTTPClient will be used.\n\tClient *http.Client `json:\"-\"`\n\n\t// Protocol scheme: \"http\", \"https\", or \"\" (https)\n\tScheme string\n\n\t// Hostname (or host:port) of Arvados API server.\n\tAPIHost string\n\n\t// User authentication token.\n\tAuthToken string\n\n\t// Accept unverified certificates. This works only if the\n\t// Client field is nil: otherwise, it has no effect.\n\tInsecure bool\n\n\t// Override keep service discovery with a list of base\n\t// URIs. (Currently there are no Client methods for\n\t// discovering keep services so this is just a convenience for\n\t// callers who use a Client to initialize an\n\t// arvadosclient.ArvadosClient.)\n\tKeepServiceURIs []string `json:\",omitempty\"`\n\n\t// HTTP headers to add/override in outgoing requests.\n\tSendHeader http.Header\n\n\t// Timeout for requests. NewClientFromConfig and\n\t// NewClientFromEnv return a Client with a default 5 minute\n\t// timeout. Within this time, retryable errors are\n\t// automatically retried with exponential backoff.\n\t//\n\t// To disable automatic retries, set Timeout to zero and use a\n\t// context deadline to establish a maximum request time.\n\tTimeout time.Duration\n\n\t// Maximum disk cache size in bytes or percent of total\n\t// filesystem size. If zero, use default, currently 10% of\n\t// filesystem size.\n\tDiskCacheSize ByteSizeOrPercent\n\n\t// Where to write debug logs. May be nil.\n\tLogger logrus.FieldLogger\n\n\t// The cluster config, if the Client was initialized via\n\t// NewClientFromConfig. Otherwise nil.\n\tCluster *Cluster\n\n\tdd *DiscoveryDocument\n\n\tdefaultRequestID string\n\n\t// APIHost and AuthToken were loaded from ARVADOS_* env vars\n\t// (used to customize \"no host/token\" error messages)\n\tloadedFromEnv bool\n\n\t// Track/limit concurrent outgoing API calls. Note this\n\t// differs from an outgoing connection limit (a feature\n\t// provided by http.Transport) when concurrent calls are\n\t// multiplexed on a single http2 connection.\n\t//\n\t// getRequestLimiter() should always be used, because this can\n\t// be nil.\n\trequestLimiter *requestLimiter\n\n\tlast503 atomic.Value\n}\n\n// InsecureHTTPClient is the default http.Client used by a Client with\n// Insecure==true and Client==nil.\nvar InsecureHTTPClient = &http.Client{\n\tTransport: &http.Transport{\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: true}}}\n\n// DefaultSecureClient is the default http.Client used by a Client otherwise.\nvar DefaultSecureClient = &http.Client{}\n\n// NewClientFromConfig creates a new Client that uses the endpoints in\n// the given cluster.\n//\n// AuthToken is left empty for the caller to populate.\nfunc NewClientFromConfig(cluster *Cluster) (*Client, error) {\n\tctrlURL := cluster.Services.Controller.ExternalURL\n\tif ctrlURL.Host == \"\" {\n\t\treturn nil, fmt.Errorf(\"no host in config Services.Controller.ExternalURL: %v\", ctrlURL)\n\t}\n\tvar hc *http.Client\n\tif srvaddr := os.Getenv(\"ARVADOS_SERVER_ADDRESS\"); srvaddr != \"\" {\n\t\t// When this client is used to make a request to\n\t\t// https://{ctrlhost}:port/ (any port), it dials the\n\t\t// indicated port on ARVADOS_SERVER_ADDRESS instead.\n\t\t//\n\t\t// This is invoked by arvados-server boot to ensure\n\t\t// that server->server traffic (e.g.,\n\t\t// keepproxy->controller) only hits local interfaces,\n\t\t// even if the Controller.ExternalURL host is a load\n\t\t// balancer / gateway and not a local interface\n\t\t// address (e.g., when running on a cloud VM).\n\t\t//\n\t\t// This avoids unnecessary delay/cost of routing\n\t\t// external traffic, and also allows controller to\n\t\t// recognize other services as internal clients based\n\t\t// on the connection source address.\n\t\tdivertedHost := (*url.URL)(&cluster.Services.Controller.ExternalURL).Hostname()\n\t\tvar dialer net.Dialer\n\t\thc = &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: cluster.TLS.Insecure},\n\t\t\t\tDialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {\n\t\t\t\t\thost, port, err := net.SplitHostPort(addr)\n\t\t\t\t\tif err == nil && network == \"tcp\" && host == divertedHost {\n\t\t\t\t\t\taddr = net.JoinHostPort(srvaddr, port)\n\t\t\t\t\t}\n\t\t\t\t\treturn dialer.DialContext(ctx, network, addr)\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\treturn &Client{\n\t\tClient:          hc,\n\t\tScheme:          ctrlURL.Scheme,\n\t\tAPIHost:         ctrlURL.Host,\n\t\tInsecure:        cluster.TLS.Insecure,\n\t\tKeepServiceURIs: parseKeepServiceURIs(os.Getenv(\"ARVADOS_KEEP_SERVICES\")),\n\t\tTimeout:         5 * time.Minute,\n\t\tDiskCacheSize:   cluster.Collections.WebDAVCache.DiskCacheSize,\n\t\trequestLimiter:  &requestLimiter{maxlimit: int64(cluster.API.MaxConcurrentRequests / 4)},\n\t\tCluster:         cluster,\n\t}, nil\n}\n\n// NewClientFromEnv creates a new Client that uses the default HTTP\n// client, and loads API endpoint and credentials from ARVADOS_*\n// environment variables (if set) and\n// $HOME/.config/arvados/settings.conf (if readable).\n//\n// If a config exists in both locations, the environment variable is\n// used.\n//\n// If there is an error (other than ENOENT) reading settings.conf,\n// NewClientFromEnv logs the error to log.Default(), then proceeds as\n// if settings.conf did not exist.\n//\n// Space characters are trimmed when reading the settings file, so\n// these are equivalent:\n//\n//\tARVADOS_API_HOST=localhost\\n\n//\tARVADOS_API_HOST=localhost\\r\\n\n//\tARVADOS_API_HOST = localhost \\n\n//\t\\tARVADOS_API_HOST = localhost\\n\nfunc NewClientFromEnv() *Client {\n\tvars := map[string]string{}\n\thome := os.Getenv(\"HOME\")\n\tconffile := home + \"/.config/arvados/settings.conf\"\n\tif home == \"\" {\n\t\t// no $HOME => just use env vars\n\t} else if settings, err := os.ReadFile(conffile); errors.Is(err, fs.ErrNotExist) {\n\t\t// no config file => just use env vars\n\t} else if err != nil {\n\t\t// config file unreadable => log message, then use env vars\n\t\tlog.Printf(\"continuing without loading %s: %s\", conffile, err)\n\t} else {\n\t\tfor _, line := range bytes.Split(settings, []byte{'\\n'}) {\n\t\t\tkv := bytes.SplitN(line, []byte{'='}, 2)\n\t\t\tk := string(bytes.TrimSpace(kv[0]))\n\t\t\tif len(kv) != 2 || !strings.HasPrefix(k, \"ARVADOS_\") {\n\t\t\t\t// Same behavior as python sdk:\n\t\t\t\t// silently skip leading # (comments),\n\t\t\t\t// blank lines, typos, and non-Arvados\n\t\t\t\t// vars.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvars[k] = string(bytes.TrimSpace(kv[1]))\n\t\t}\n\t}\n\tfor _, env := range os.Environ() {\n\t\tif !strings.HasPrefix(env, \"ARVADOS_\") {\n\t\t\tcontinue\n\t\t}\n\t\tkv := strings.SplitN(env, \"=\", 2)\n\t\tif len(kv) == 2 {\n\t\t\tvars[kv[0]] = kv[1]\n\t\t}\n\t}\n\tvar insecure bool\n\tif s := strings.ToLower(vars[\"ARVADOS_API_HOST_INSECURE\"]); s == \"1\" || s == \"yes\" || s == \"true\" {\n\t\tinsecure = true\n\t}\n\treturn &Client{\n\t\tScheme:          \"https\",\n\t\tAPIHost:         vars[\"ARVADOS_API_HOST\"],\n\t\tAuthToken:       vars[\"ARVADOS_API_TOKEN\"],\n\t\tInsecure:        insecure,\n\t\tKeepServiceURIs: parseKeepServiceURIs(vars[\"ARVADOS_KEEP_SERVICES\"]),\n\t\tTimeout:         5 * time.Minute,\n\t\tloadedFromEnv:   true,\n\t}\n}\n\nfunc parseKeepServiceURIs(svclist string) []string {\n\tvar svcs []string\n\tfor _, s := range strings.Split(svclist, \" \") {\n\t\tif s == \"\" {\n\t\t\tcontinue\n\t\t} else if u, err := url.Parse(s); err != nil {\n\t\t\tlog.Printf(\"ARVADOS_KEEP_SERVICES: %q: %s\", s, err)\n\t\t} else if !u.IsAbs() {\n\t\t\tlog.Printf(\"ARVADOS_KEEP_SERVICES: %q: not an absolute URI\", s)\n\t\t} else {\n\t\t\tsvcs = append(svcs, s)\n\t\t}\n\t}\n\treturn svcs\n}\n\nvar reqIDGen = httpserver.IDGenerator{Prefix: \"req-\"}\n\nvar nopCancelFunc context.CancelFunc = func() {}\n\n// Do augments (*http.Client)Do(): adds Authorization and X-Request-Id\n// headers, delays in order to comply with rate-limiting restrictions,\n// and retries failed requests when appropriate.\nfunc (c *Client) Do(req *http.Request) (*http.Response, error) {\n\tctx := req.Context()\n\tif auth, _ := ctx.Value(contextKeyAuthorization{}).(string); auth != \"\" {\n\t\treq.Header.Add(\"Authorization\", auth)\n\t} else if c.AuthToken != \"\" {\n\t\treq.Header.Add(\"Authorization\", \"Bearer \"+c.AuthToken)\n\t}\n\n\tif req.Header.Get(\"X-Request-Id\") == \"\" {\n\t\tvar reqid string\n\t\tif ctxreqid, _ := ctx.Value(contextKeyRequestID{}).(string); ctxreqid != \"\" {\n\t\t\treqid = ctxreqid\n\t\t} else if c.defaultRequestID != \"\" {\n\t\t\treqid = c.defaultRequestID\n\t\t} else {\n\t\t\treqid = reqIDGen.Next()\n\t\t}\n\t\tif req.Header == nil {\n\t\t\treq.Header = http.Header{\"X-Request-Id\": {reqid}}\n\t\t} else {\n\t\t\treq.Header.Set(\"X-Request-Id\", reqid)\n\t\t}\n\t}\n\n\trreq, err := retryablehttp.FromRequest(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcancel := nopCancelFunc\n\tvar lastResp *http.Response\n\tvar lastRespBody io.ReadCloser\n\tvar lastErr error\n\tvar checkRetryCalled int\n\n\trclient := retryablehttp.NewClient()\n\trclient.HTTPClient = c.httpClient()\n\trclient.Backoff = exponentialBackoff\n\tif c.Timeout > 0 {\n\t\trclient.RetryWaitMax = c.Timeout / 10\n\t\trclient.RetryMax = 32\n\t\tctx, cancel = context.WithDeadline(ctx, time.Now().Add(c.Timeout))\n\t\trreq = rreq.WithContext(ctx)\n\t} else {\n\t\trclient.RetryMax = 0\n\t}\n\trclient.CheckRetry = func(ctx context.Context, resp *http.Response, respErr error) (bool, error) {\n\t\tcheckRetryCalled++\n\t\tif c.getRequestLimiter().Report(resp, respErr) {\n\t\t\tc.last503.Store(time.Now())\n\t\t}\n\t\tif c.Timeout == 0 {\n\t\t\treturn false, nil\n\t\t}\n\t\tretrying, err := retryablehttp.DefaultRetryPolicy(ctx, resp, respErr)\n\t\tif retrying {\n\t\t\tlastResp, lastRespBody, lastErr = resp, nil, respErr\n\t\t\tif respErr == nil {\n\t\t\t\t// Save the response and body so we\n\t\t\t\t// can return it instead of \"deadline\n\t\t\t\t// exceeded\". retryablehttp.Client\n\t\t\t\t// will drain and discard resp.body,\n\t\t\t\t// so we need to stash it separately.\n\t\t\t\tbuf, err := ioutil.ReadAll(resp.Body)\n\t\t\t\tif err == nil {\n\t\t\t\t\tlastRespBody = io.NopCloser(bytes.NewReader(buf))\n\t\t\t\t} else {\n\t\t\t\t\tlastResp, lastErr = nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn retrying, err\n\t}\n\trclient.Logger = nil\n\n\tlimiter := c.getRequestLimiter()\n\tlimiter.Acquire(ctx)\n\tif ctx.Err() != nil {\n\t\tlimiter.Release()\n\t\tcancel()\n\t\treturn nil, ctx.Err()\n\t}\n\tresp, err := rclient.Do(rreq)\n\tif (errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled)) && (lastResp != nil || lastErr != nil) {\n\t\tresp = lastResp\n\t\terr = lastErr\n\t\tif checkRetryCalled > 0 && err != nil {\n\t\t\t// Mimic retryablehttp's \"giving up after X\n\t\t\t// attempts\" message, even if we gave up\n\t\t\t// because of time rather than maxretries.\n\t\t\terr = fmt.Errorf(\"%s %s giving up after %d attempt(s): %w\", req.Method, req.URL.String(), checkRetryCalled, err)\n\t\t}\n\t\tif resp != nil {\n\t\t\tresp.Body = lastRespBody\n\t\t}\n\t}\n\tif err != nil {\n\t\tlimiter.Release()\n\t\tcancel()\n\t\treturn nil, err\n\t}\n\t// We need to call cancel() eventually, but we can't use\n\t// \"defer cancel()\" because the context has to stay alive\n\t// until the caller has finished reading the response body.\n\tresp.Body = cancelOnClose{\n\t\tReadCloser: resp.Body,\n\t\tcancel: func() {\n\t\t\tlimiter.Release()\n\t\t\tcancel()\n\t\t},\n\t}\n\treturn resp, err\n}\n\n// Last503 returns the time of the most recent HTTP 503 (Service\n// Unavailable) response. Zero time indicates never.\nfunc (c *Client) Last503() time.Time {\n\tt, _ := c.last503.Load().(time.Time)\n\treturn t\n}\n\n// globalRequestLimiter entries (one for each APIHost) don't have a\n// hard limit on outgoing connections, but do add a delay and reduce\n// concurrency after 503 errors.\nvar (\n\tglobalRequestLimiter     = map[string]*requestLimiter{}\n\tglobalRequestLimiterLock sync.Mutex\n)\n\n// Get this client's requestLimiter, or a global requestLimiter\n// singleton for c's APIHost if this client doesn't have its own.\nfunc (c *Client) getRequestLimiter() *requestLimiter {\n\tif c.requestLimiter != nil {\n\t\treturn c.requestLimiter\n\t}\n\tglobalRequestLimiterLock.Lock()\n\tdefer globalRequestLimiterLock.Unlock()\n\tlimiter := globalRequestLimiter[c.APIHost]\n\tif limiter == nil {\n\t\tlimiter = &requestLimiter{}\n\t\tglobalRequestLimiter[c.APIHost] = limiter\n\t}\n\treturn limiter\n}\n\n// cancelOnClose calls a provided CancelFunc when its wrapped\n// ReadCloser's Close() method is called.\ntype cancelOnClose struct {\n\tio.ReadCloser\n\tcancel context.CancelFunc\n}\n\nfunc (coc cancelOnClose) Close() error {\n\terr := coc.ReadCloser.Close()\n\tcoc.cancel()\n\treturn err\n}\n\nfunc isRedirectStatus(code int) bool {\n\tswitch code {\n\tcase http.StatusMovedPermanently, http.StatusFound, http.StatusSeeOther, http.StatusTemporaryRedirect, http.StatusPermanentRedirect:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nconst minExponentialBackoffBase = time.Second\n\n// Implements retryablehttp.Backoff using the server-provided\n// Retry-After header if available, otherwise nearly-full jitter\n// exponential backoff (similar to\n// https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/),\n// in all cases respecting the provided min and max.\nfunc exponentialBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration {\n\tif attemptNum > 0 && min < minExponentialBackoffBase {\n\t\tmin = minExponentialBackoffBase\n\t}\n\tvar t time.Duration\n\tif resp != nil && (resp.StatusCode == http.StatusTooManyRequests || resp.StatusCode == http.StatusServiceUnavailable) {\n\t\tif s := resp.Header.Get(\"Retry-After\"); s != \"\" {\n\t\t\tif sleep, err := strconv.ParseInt(s, 10, 64); err == nil {\n\t\t\t\tt = time.Second * time.Duration(sleep)\n\t\t\t} else if stamp, err := time.Parse(time.RFC1123, s); err == nil {\n\t\t\t\tt = stamp.Sub(time.Now())\n\t\t\t}\n\t\t}\n\t}\n\tif t == 0 {\n\t\tjitter := mathrand.New(mathrand.NewSource(int64(time.Now().Nanosecond()))).Float64()\n\t\tt = min + time.Duration((math.Pow(2, float64(attemptNum))*float64(min)-float64(min))*jitter)\n\t}\n\tif t < min {\n\t\treturn min\n\t} else if t > max {\n\t\treturn max\n\t} else {\n\t\treturn t\n\t}\n}\n\n// DoAndDecode performs req and unmarshals the response (which must be\n// JSON) into dst. Use this instead of RequestAndDecode if you need\n// more control of the http.Request object.\n//\n// If the response status indicates an HTTP redirect, the Location\n// header value is unmarshalled to dst as a RedirectLocation\n// key/field.\nfunc (c *Client) DoAndDecode(dst interface{}, req *http.Request) error {\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tbuf, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch {\n\tcase resp.StatusCode == http.StatusNoContent:\n\t\treturn nil\n\tcase resp.StatusCode == http.StatusOK && dst == nil:\n\t\treturn nil\n\tcase resp.StatusCode == http.StatusOK:\n\t\treturn json.Unmarshal(buf, dst)\n\n\t// If the caller uses a client with a custom CheckRedirect\n\t// func, Do() might return the 3xx response instead of\n\t// following it.\n\tcase isRedirectStatus(resp.StatusCode) && dst == nil:\n\t\treturn nil\n\tcase isRedirectStatus(resp.StatusCode):\n\t\t// Copy the redirect target URL to dst.RedirectLocation.\n\t\tbuf, err := json.Marshal(map[string]string{\"redirect_location\": resp.Header.Get(\"Location\")})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn json.Unmarshal(buf, dst)\n\n\tdefault:\n\t\treturn newTransactionError(req, resp, buf)\n\t}\n}\n\n// Convert an arbitrary struct to url.Values. For example,\n//\n//\tFoo{Bar: []int{1,2,3}, Baz: \"waz\"}\n//\n// becomes\n//\n//\turl.Values{`bar`:`{\"a\":[1,2,3]}`,`Baz`:`waz`}\n//\n// params itself is returned if it is already an url.Values.\nfunc anythingToValues(params interface{}) (url.Values, error) {\n\tif v, ok := params.(url.Values); ok {\n\t\treturn v, nil\n\t}\n\t// TODO: Do this more efficiently, possibly using\n\t// json.Decode/Encode, so the whole thing doesn't have to get\n\t// encoded, decoded, and re-encoded.\n\tj, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar generic map[string]interface{}\n\tdec := json.NewDecoder(bytes.NewBuffer(j))\n\tdec.UseNumber()\n\terr = dec.Decode(&generic)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\turlValues := url.Values{}\n\tfor k, v := range generic {\n\t\tif v, ok := v.(string); ok {\n\t\t\turlValues.Set(k, v)\n\t\t\tcontinue\n\t\t}\n\t\tif v, ok := v.(json.Number); ok {\n\t\t\turlValues.Set(k, v.String())\n\t\t\tcontinue\n\t\t}\n\t\tif v, ok := v.(bool); ok {\n\t\t\tif v {\n\t\t\t\turlValues.Set(k, \"true\")\n\t\t\t} else {\n\t\t\t\t// \"foo=false\", \"foo=0\", and \"foo=\"\n\t\t\t\t// are all taken as true strings, so\n\t\t\t\t// don't send false values at all --\n\t\t\t\t// rely on the default being false.\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tj, err := json.Marshal(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif bytes.Equal(j, []byte(\"null\")) {\n\t\t\t// don't add it to urlValues at all\n\t\t\tcontinue\n\t\t}\n\t\turlValues.Set(k, string(j))\n\t}\n\treturn urlValues, nil\n}\n\n// RequestAndDecode performs an API request and unmarshals the\n// response (which must be JSON) into dst. Method and body arguments\n// are the same as for http.NewRequest(). The given path is added to\n// the server's scheme/host/port to form the request URL. The given\n// params are passed via POST form or query string.\n//\n// path must not contain a query string.\nfunc (c *Client) RequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error {\n\treturn c.RequestAndDecodeContext(context.Background(), dst, method, path, body, params)\n}\n\n// RequestAndDecodeContext does the same as RequestAndDecode, but with a context\nfunc (c *Client) RequestAndDecodeContext(ctx context.Context, dst interface{}, method, path string, body io.Reader, params interface{}) error {\n\tif body, ok := body.(io.Closer); ok {\n\t\t// Ensure body is closed even if we error out early\n\t\tdefer body.Close()\n\t}\n\tif c.APIHost == \"\" {\n\t\tif c.loadedFromEnv {\n\t\t\treturn errors.New(\"ARVADOS_API_HOST and/or ARVADOS_API_TOKEN environment variables are not set\")\n\t\t}\n\t\treturn errors.New(\"arvados.Client cannot perform request: APIHost is not set\")\n\t}\n\turlString := c.apiURL(path)\n\turlValues, err := anythingToValues(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif dst == nil {\n\t\tif urlValues == nil {\n\t\t\turlValues = url.Values{}\n\t\t}\n\t\turlValues[\"select\"] = []string{`[\"uuid\"]`}\n\t}\n\tif urlValues == nil {\n\t\t// Nothing to send\n\t} else if body != nil || ((method == \"GET\" || method == \"HEAD\") && len(urlValues.Encode()) < 1000) {\n\t\t// Send params in query part of URL\n\t\tu, err := url.Parse(urlString)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tu.RawQuery = urlValues.Encode()\n\t\turlString = u.String()\n\t} else {\n\t\tbody = strings.NewReader(urlValues.Encode())\n\t}\n\treq, err := http.NewRequest(method, urlString, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif (method == \"GET\" || method == \"HEAD\") && body != nil {\n\t\treq.Header.Set(\"X-Http-Method-Override\", method)\n\t\treq.Method = \"POST\"\n\t}\n\treq = req.WithContext(ctx)\n\treq.Header.Set(\"Content-type\", \"application/x-www-form-urlencoded\")\n\tfor k, v := range c.SendHeader {\n\t\treq.Header[k] = v\n\t}\n\treturn c.DoAndDecode(dst, req)\n}\n\ntype resource interface {\n\tresourceName() string\n}\n\n// UpdateBody returns an io.Reader suitable for use as an http.Request\n// Body for a create or update API call.\nfunc (c *Client) UpdateBody(rsc resource) io.Reader {\n\tj, err := json.Marshal(rsc)\n\tif err != nil {\n\t\t// Return a reader that returns errors.\n\t\tr, w := io.Pipe()\n\t\tw.CloseWithError(err)\n\t\treturn r\n\t}\n\tv := url.Values{rsc.resourceName(): {string(j)}}\n\treturn bytes.NewBufferString(v.Encode())\n}\n\n// WithRequestID returns a new shallow copy of c that sends the given\n// X-Request-Id value (instead of a new randomly generated one) with\n// each subsequent request that doesn't provide its own via context or\n// header.\nfunc (c *Client) WithRequestID(reqid string) *Client {\n\tcc := *c\n\tcc.defaultRequestID = reqid\n\treturn &cc\n}\n\nfunc (c *Client) httpClient() *http.Client {\n\tswitch {\n\tcase c.Client != nil:\n\t\treturn c.Client\n\tcase c.Insecure:\n\t\treturn InsecureHTTPClient\n\tdefault:\n\t\treturn DefaultSecureClient\n\t}\n}\n\nfunc (c *Client) apiURL(path string) string {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"https\"\n\t}\n\t// Double-slash in URLs tend to cause subtle hidden problems\n\t// (e.g., they can behave differently when a load balancer is\n\t// in the picture). Here we ensure exactly one \"/\" regardless\n\t// of whether the given APIHost or path has a superfluous one.\n\treturn scheme + \"://\" + strings.TrimSuffix(c.APIHost, \"/\") + \"/\" + strings.TrimPrefix(path, \"/\")\n}\n\n// DiscoveryDocument is the Arvados server's description of itself.\ntype DiscoveryDocument struct {\n\tBasePath                     string              `json:\"basePath\"`\n\tDefaultCollectionReplication int                 `json:\"defaultCollectionReplication\"`\n\tBlobSignatureTTL             int64               `json:\"blobSignatureTtl\"`\n\tGitURL                       string              `json:\"gitUrl\"`\n\tSchemas                      map[string]Schema   `json:\"schemas\"`\n\tResources                    map[string]Resource `json:\"resources\"`\n\tRevision                     string              `json:\"revision\"`\n}\n\ntype Resource struct {\n\tMethods map[string]ResourceMethod `json:\"methods\"`\n}\n\ntype ResourceMethod struct {\n\tHTTPMethod string         `json:\"httpMethod\"`\n\tPath       string         `json:\"path\"`\n\tResponse   MethodResponse `json:\"response\"`\n}\n\ntype MethodResponse struct {\n\tRef string `json:\"$ref\"`\n}\n\ntype Schema struct {\n\tUUIDPrefix string `json:\"uuidPrefix\"`\n}\n\n// DiscoveryDocument returns a *DiscoveryDocument. The returned object\n// should not be modified: the same object may be returned by\n// subsequent calls.\nfunc (c *Client) DiscoveryDocument() (*DiscoveryDocument, error) {\n\tif c.dd != nil {\n\t\treturn c.dd, nil\n\t}\n\tvar dd DiscoveryDocument\n\terr := c.RequestAndDecode(&dd, \"GET\", \"discovery/v1/apis/arvados/v1/rest\", nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.dd = &dd\n\treturn c.dd, nil\n}\n\nvar pdhRegexp = regexp.MustCompile(`^[0-9a-f]{32}\\+\\d+$`)\n\nfunc (c *Client) modelForUUID(dd *DiscoveryDocument, uuid string) (string, error) {\n\tif pdhRegexp.MatchString(uuid) {\n\t\treturn \"Collection\", nil\n\t}\n\tif len(uuid) != 27 {\n\t\treturn \"\", fmt.Errorf(\"invalid UUID: %q\", uuid)\n\t}\n\tinfix := uuid[6:11]\n\tvar model string\n\tfor m, s := range dd.Schemas {\n\t\tif s.UUIDPrefix == infix {\n\t\t\tmodel = m\n\t\t\tbreak\n\t\t}\n\t}\n\tif model == \"\" {\n\t\treturn \"\", fmt.Errorf(\"unrecognized type portion %q in UUID %q\", infix, uuid)\n\t}\n\treturn model, nil\n}\n\nfunc (c *Client) KindForUUID(uuid string) (string, error) {\n\tdd, err := c.DiscoveryDocument()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tmodel, err := c.modelForUUID(dd, uuid)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn \"arvados#\" + strings.ToLower(model[:1]) + model[1:], nil\n}\n\nfunc (c *Client) PathForUUID(method, uuid string) (string, error) {\n\tdd, err := c.DiscoveryDocument()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tmodel, err := c.modelForUUID(dd, uuid)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar resource string\n\tfor r, rsc := range dd.Resources {\n\t\tif rsc.Methods[\"get\"].Response.Ref == model {\n\t\t\tresource = r\n\t\t\tbreak\n\t\t}\n\t}\n\tif resource == \"\" {\n\t\treturn \"\", fmt.Errorf(\"no resource for model: %q\", model)\n\t}\n\tm, ok := dd.Resources[resource].Methods[method]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"no method %q for resource %q\", method, resource)\n\t}\n\tpath := dd.BasePath + strings.Replace(m.Path, \"{uuid}\", uuid, -1)\n\tif path[0] == '/' {\n\t\tpath = path[1:]\n\t}\n\treturn path, nil\n}\n\nvar maxUUIDInt = (&big.Int{}).Exp(big.NewInt(36), big.NewInt(15), nil)\n\nfunc RandomUUID(clusterID, infix string) string {\n\tn, err := rand.Int(rand.Reader, maxUUIDInt)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tnstr := n.Text(36)\n\tfor len(nstr) < 15 {\n\t\tnstr = \"0\" + nstr\n\t}\n\treturn clusterID + \"-\" + infix + \"-\" + nstr\n}\n"
  },
  {
    "path": "sdk/go/arvados/client_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing/iotest\"\n\t\"time\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\ntype stubTransport struct {\n\tResponses map[string]string\n\tRequests  []http.Request\n\tsync.Mutex\n}\n\nfunc (stub *stubTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\tstub.Lock()\n\tstub.Requests = append(stub.Requests, *req)\n\tstub.Unlock()\n\n\tresp := &http.Response{\n\t\tStatus:     \"200 OK\",\n\t\tStatusCode: 200,\n\t\tProto:      \"HTTP/1.1\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tRequest:    req,\n\t}\n\tstr := stub.Responses[req.URL.Path]\n\tif str == \"\" {\n\t\tresp.Status = \"404 Not Found\"\n\t\tresp.StatusCode = 404\n\t\tstr = \"{}\"\n\t}\n\tbuf := bytes.NewBufferString(str)\n\tresp.Body = ioutil.NopCloser(buf)\n\tresp.ContentLength = int64(buf.Len())\n\treturn resp, nil\n}\n\ntype errorTransport struct{}\n\nfunc (stub *errorTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\treturn nil, fmt.Errorf(\"something awful happened\")\n}\n\ntype timeoutTransport struct {\n\tresponse []byte\n}\n\nfunc (stub *timeoutTransport) RoundTrip(req *http.Request) (*http.Response, error) {\n\treturn &http.Response{\n\t\tStatus:     \"200 OK\",\n\t\tStatusCode: 200,\n\t\tProto:      \"HTTP/1.1\",\n\t\tProtoMajor: 1,\n\t\tProtoMinor: 1,\n\t\tRequest:    req,\n\t\tBody:       ioutil.NopCloser(iotest.TimeoutReader(bytes.NewReader(stub.response))),\n\t}, nil\n}\n\nvar _ = check.Suite(&clientSuite{})\n\ntype clientSuite struct{}\n\nfunc (*clientSuite) TestCurrentUser(c *check.C) {\n\tstub := &stubTransport{\n\t\tResponses: map[string]string{\n\t\t\t\"/arvados/v1/users/current\": `{\"uuid\":\"zzzzz-abcde-012340123401234\"}`,\n\t\t},\n\t}\n\tclient := &Client{\n\t\tClient: &http.Client{\n\t\t\tTransport: stub,\n\t\t},\n\t\tAPIHost:   \"zzzzz.arvadosapi.com\",\n\t\tAuthToken: \"xyzzy\",\n\t}\n\tu, err := client.CurrentUser()\n\tc.Check(err, check.IsNil)\n\tc.Check(u.UUID, check.Equals, \"zzzzz-abcde-012340123401234\")\n\tc.Check(stub.Requests, check.Not(check.HasLen), 0)\n\thdr := stub.Requests[len(stub.Requests)-1].Header\n\tc.Check(hdr.Get(\"Authorization\"), check.Equals, \"Bearer xyzzy\")\n\n\tclient.Client.Transport = &errorTransport{}\n\tu, err = client.CurrentUser()\n\tc.Check(err, check.NotNil)\n}\n\nfunc (*clientSuite) TestAnythingToValues(c *check.C) {\n\ttype testCase struct {\n\t\tin interface{}\n\t\t// ok==nil means anythingToValues should return an\n\t\t// error, otherwise it's a func that returns true if\n\t\t// out is correct\n\t\tok func(out url.Values) bool\n\t}\n\tfor _, tc := range []testCase{\n\t\t{\n\t\t\tin: map[string]interface{}{\"foo\": \"bar\"},\n\t\t\tok: func(out url.Values) bool {\n\t\t\t\treturn out.Get(\"foo\") == \"bar\"\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tin: map[string]interface{}{\"foo\": 2147483647},\n\t\t\tok: func(out url.Values) bool {\n\t\t\t\treturn out.Get(\"foo\") == \"2147483647\"\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tin: map[string]interface{}{\"foo\": 1.234},\n\t\t\tok: func(out url.Values) bool {\n\t\t\t\treturn out.Get(\"foo\") == \"1.234\"\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tin: map[string]interface{}{\"foo\": \"1.234\"},\n\t\t\tok: func(out url.Values) bool {\n\t\t\t\treturn out.Get(\"foo\") == \"1.234\"\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tin: map[string]interface{}{\"foo\": map[string]interface{}{\"bar\": 1.234}},\n\t\t\tok: func(out url.Values) bool {\n\t\t\t\treturn out.Get(\"foo\") == `{\"bar\":1.234}`\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tin: url.Values{\"foo\": {\"bar\"}},\n\t\t\tok: func(out url.Values) bool {\n\t\t\t\treturn out.Get(\"foo\") == \"bar\"\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tin: 1234,\n\t\t\tok: nil,\n\t\t},\n\t\t{\n\t\t\tin: []string{\"foo\"},\n\t\t\tok: nil,\n\t\t},\n\t} {\n\t\tc.Logf(\"%#v\", tc.in)\n\t\tout, err := anythingToValues(tc.in)\n\t\tif tc.ok == nil {\n\t\t\tc.Check(err, check.NotNil)\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(tc.ok(out), check.Equals, true)\n\t}\n}\n\n// select=[\"uuid\"] is added automatically when RequestAndDecode's\n// destination argument is nil.\nfunc (*clientSuite) TestAutoSelectUUID(c *check.C) {\n\tvar req *http.Request\n\tvar err error\n\tserver := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tc.Check(r.ParseForm(), check.IsNil)\n\t\treq = r\n\t\tw.Write([]byte(\"{}\"))\n\t}))\n\tclient := Client{\n\t\tAPIHost:   strings.TrimPrefix(server.URL, \"https://\"),\n\t\tAuthToken: \"zzz\",\n\t\tInsecure:  true,\n\t\tTimeout:   2 * time.Second,\n\t}\n\n\treq = nil\n\terr = client.RequestAndDecode(nil, http.MethodPost, \"test\", nil, nil)\n\tc.Check(err, check.IsNil)\n\tc.Check(req.FormValue(\"select\"), check.Equals, `[\"uuid\"]`)\n\n\treq = nil\n\terr = client.RequestAndDecode(nil, http.MethodGet, \"test\", nil, nil)\n\tc.Check(err, check.IsNil)\n\tc.Check(req.FormValue(\"select\"), check.Equals, `[\"uuid\"]`)\n\n\treq = nil\n\terr = client.RequestAndDecode(nil, http.MethodGet, \"test\", nil, map[string]interface{}{\"select\": []string{\"blergh\"}})\n\tc.Check(err, check.IsNil)\n\tc.Check(req.FormValue(\"select\"), check.Equals, `[\"uuid\"]`)\n\n\treq = nil\n\terr = client.RequestAndDecode(&struct{}{}, http.MethodGet, \"test\", nil, map[string]interface{}{\"select\": []string{\"blergh\"}})\n\tc.Check(err, check.IsNil)\n\tc.Check(req.FormValue(\"select\"), check.Equals, `[\"blergh\"]`)\n}\n\nfunc (*clientSuite) TestLoadConfig(c *check.C) {\n\toldenv := os.Environ()\n\tdefer func() {\n\t\tos.Clearenv()\n\t\tfor _, s := range oldenv {\n\t\t\ti := strings.IndexRune(s, '=')\n\t\t\tos.Setenv(s[:i], s[i+1:])\n\t\t}\n\t}()\n\n\ttmp := c.MkDir()\n\tos.Setenv(\"HOME\", tmp)\n\tfor _, s := range os.Environ() {\n\t\tif strings.HasPrefix(s, \"ARVADOS_\") {\n\t\t\ti := strings.IndexRune(s, '=')\n\t\t\tos.Unsetenv(s[:i])\n\t\t}\n\t}\n\tos.Mkdir(tmp+\"/.config\", 0777)\n\tos.Mkdir(tmp+\"/.config/arvados\", 0777)\n\n\t// Use $HOME/.config/arvados/settings.conf if no env vars are\n\t// set\n\tos.WriteFile(tmp+\"/.config/arvados/settings.conf\", []byte(`\n\t\tARVADOS_API_HOST = localhost:1\n\t\tARVADOS_API_TOKEN = token_from_settings_file1\n\t`), 0777)\n\tclient := NewClientFromEnv()\n\tc.Check(client.AuthToken, check.Equals, \"token_from_settings_file1\")\n\tc.Check(client.APIHost, check.Equals, \"localhost:1\")\n\tc.Check(client.Insecure, check.Equals, false)\n\n\t// ..._INSECURE=true, comments, ignored lines in settings.conf\n\tos.WriteFile(tmp+\"/.config/arvados/settings.conf\", []byte(`\n\t\t(ignored) = (ignored)\n\t\t#ARVADOS_API_HOST = localhost:2\n\t\tARVADOS_API_TOKEN = token_from_settings_file2\n\t\tARVADOS_API_HOST_INSECURE = true\n\t`), 0777)\n\tclient = NewClientFromEnv()\n\tc.Check(client.AuthToken, check.Equals, \"token_from_settings_file2\")\n\tc.Check(client.APIHost, check.Equals, \"\")\n\tc.Check(client.Insecure, check.Equals, true)\n\n\t// Environment variables override settings.conf\n\tos.Setenv(\"ARVADOS_API_HOST\", \"[::]:3\")\n\tos.Setenv(\"ARVADOS_API_HOST_INSECURE\", \"0\")\n\tos.Setenv(\"ARVADOS_KEEP_SERVICES\", \"http://[::]:12345\")\n\tclient = NewClientFromEnv()\n\tc.Check(client.AuthToken, check.Equals, \"token_from_settings_file2\")\n\tc.Check(client.APIHost, check.Equals, \"[::]:3\")\n\tc.Check(client.Insecure, check.Equals, false)\n\tc.Check(client.KeepServiceURIs, check.DeepEquals, []string{\"http://[::]:12345\"})\n\n\t// ARVADOS_KEEP_SERVICES environment variable overrides\n\t// cluster config, but ARVADOS_API_HOST/TOKEN do not.\n\tos.Setenv(\"ARVADOS_KEEP_SERVICES\", \"http://[::]:12345\")\n\tos.Setenv(\"ARVADOS_API_HOST\", \"wronghost.example\")\n\tos.Setenv(\"ARVADOS_API_TOKEN\", \"wrongtoken\")\n\tcfg := Cluster{}\n\tcfg.Services.Controller.ExternalURL = URL{Scheme: \"https\", Host: \"ctrl.example:55555\", Path: \"/\"}\n\tcfg.Services.Keepstore.InternalURLs = map[URL]ServiceInstance{\n\t\tURL{Scheme: \"https\", Host: \"keep0.example:55555\", Path: \"/\"}: ServiceInstance{},\n\t}\n\tclient, err := NewClientFromConfig(&cfg)\n\tc.Check(err, check.IsNil)\n\tc.Check(client.AuthToken, check.Equals, \"\")\n\tc.Check(client.APIHost, check.Equals, \"ctrl.example:55555\")\n\tc.Check(client.Insecure, check.Equals, false)\n\tc.Check(client.KeepServiceURIs, check.DeepEquals, []string{\"http://[::]:12345\"})\n}\n\nvar _ = check.Suite(&clientRetrySuite{})\n\ntype clientRetrySuite struct {\n\tserver     *httptest.Server\n\tclient     Client\n\treqs       []*http.Request\n\trespStatus chan int\n\trespDelay  time.Duration\n\n\torigLimiterQuietPeriod time.Duration\n}\n\nfunc (s *clientRetrySuite) SetUpTest(c *check.C) {\n\t// Test server: delay and return errors until a final status\n\t// appears on the respStatus channel.\n\ts.origLimiterQuietPeriod = requestLimiterQuietPeriod\n\trequestLimiterQuietPeriod = time.Second / 100\n\ts.server = httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ts.reqs = append(s.reqs, r)\n\t\tdelay := s.respDelay\n\t\tif delay == 0 {\n\t\t\tdelay = time.Duration(rand.Int63n(int64(time.Second / 10)))\n\t\t}\n\t\ttimer := time.NewTimer(delay)\n\t\tdefer timer.Stop()\n\t\tselect {\n\t\tcase code, ok := <-s.respStatus:\n\t\t\tif !ok {\n\t\t\t\tcode = http.StatusOK\n\t\t\t}\n\t\t\tw.WriteHeader(code)\n\t\t\tw.Write([]byte(`{}`))\n\t\tcase <-timer.C:\n\t\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\t}\n\t}))\n\ts.reqs = nil\n\ts.respStatus = make(chan int, 1)\n\ts.client = Client{\n\t\tAPIHost:   s.server.URL[8:],\n\t\tAuthToken: \"zzz\",\n\t\tInsecure:  true,\n\t\tTimeout:   2 * time.Second,\n\t}\n}\n\nfunc (s *clientRetrySuite) TearDownTest(c *check.C) {\n\ts.server.Close()\n\trequestLimiterQuietPeriod = s.origLimiterQuietPeriod\n}\n\nfunc (s *clientRetrySuite) TestOK(c *check.C) {\n\ts.respStatus <- http.StatusOK\n\terr := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, \"test\", nil, nil)\n\tc.Check(err, check.IsNil)\n\tc.Check(s.reqs, check.HasLen, 1)\n}\n\nfunc (s *clientRetrySuite) TestNetworkError(c *check.C) {\n\t// Close the stub server to produce a \"connection refused\" error.\n\ts.server.Close()\n\n\tstart := time.Now()\n\ttimeout := time.Second\n\tctx, cancel := context.WithDeadline(context.Background(), start.Add(timeout))\n\tdefer cancel()\n\ts.client.Timeout = timeout * 2\n\terr := s.client.RequestAndDecodeContext(ctx, &struct{}{}, http.MethodGet, \"test\", nil, nil)\n\tc.Check(err, check.ErrorMatches, `.*dial tcp .* connection refused.*`)\n\tdelta := time.Since(start)\n\tc.Check(delta > timeout, check.Equals, true, check.Commentf(\"time.Since(start) == %v, timeout = %v\", delta, timeout))\n}\n\nfunc (s *clientRetrySuite) TestNonRetryableError(c *check.C) {\n\ts.respStatus <- http.StatusBadRequest\n\terr := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, \"test\", nil, nil)\n\tc.Check(err, check.ErrorMatches, `.*400 Bad Request.*`)\n\tc.Check(s.reqs, check.HasLen, 1)\n}\n\n// as of 0.7.2., retryablehttp does not recognize this as a\n// non-retryable error.\nfunc (s *clientRetrySuite) TestNonRetryableStdlibError(c *check.C) {\n\ts.respStatus <- http.StatusOK\n\treq, err := http.NewRequest(http.MethodGet, \"https://\"+s.client.APIHost+\"/test\", nil)\n\tc.Assert(err, check.IsNil)\n\treq.Header.Set(\"Good-Header\", \"T\\033rrible header value\")\n\terr = s.client.DoAndDecode(&struct{}{}, req)\n\tc.Check(err, check.ErrorMatches, `.*after 1 attempt.*net/http: invalid header .*`)\n\tif !c.Check(s.reqs, check.HasLen, 0) {\n\t\tc.Logf(\"%v\", s.reqs[0])\n\t}\n}\n\nfunc (s *clientRetrySuite) TestNonRetryableAfter503s(c *check.C) {\n\ttime.AfterFunc(time.Second, func() { s.respStatus <- http.StatusNotFound })\n\terr := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, \"test\", nil, nil)\n\tc.Check(err, check.ErrorMatches, `.*404 Not Found.*`)\n}\n\nfunc (s *clientRetrySuite) TestOKAfter503s(c *check.C) {\n\tstart := time.Now()\n\tdelay := time.Second\n\ttime.AfterFunc(delay, func() { s.respStatus <- http.StatusOK })\n\terr := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, \"test\", nil, nil)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(s.reqs) > 1, check.Equals, true, check.Commentf(\"len(s.reqs) == %d\", len(s.reqs)))\n\tc.Check(time.Since(start) > delay, check.Equals, true)\n}\n\nfunc (s *clientRetrySuite) TestTimeoutAfter503(c *check.C) {\n\ts.respStatus <- http.StatusServiceUnavailable\n\ts.respDelay = time.Second * 2\n\ts.client.Timeout = time.Second / 2\n\terr := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, \"test\", nil, nil)\n\tc.Check(err, check.ErrorMatches, `.*503 Service Unavailable.*`)\n\tc.Check(s.reqs, check.HasLen, 2)\n}\n\nfunc (s *clientRetrySuite) Test503Forever(c *check.C) {\n\terr := s.client.RequestAndDecode(&struct{}{}, http.MethodGet, \"test\", nil, nil)\n\tc.Check(err, check.ErrorMatches, `.*503 Service Unavailable.*`)\n\tc.Check(len(s.reqs) > 1, check.Equals, true, check.Commentf(\"len(s.reqs) == %d\", len(s.reqs)))\n}\n\nfunc (s *clientRetrySuite) TestContextAlreadyCanceled(c *check.C) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\terr := s.client.RequestAndDecodeContext(ctx, &struct{}{}, http.MethodGet, \"test\", nil, nil)\n\tc.Check(err, check.Equals, context.Canceled)\n}\n\nfunc (s *clientRetrySuite) TestExponentialBackoff(c *check.C) {\n\tvar min, max time.Duration\n\tmin, max = time.Second, 64*time.Second\n\n\tt := exponentialBackoff(min, max, 0, nil)\n\tc.Check(t, check.Equals, min)\n\n\tfor e := float64(1); e < 5; e += 1 {\n\t\tok := false\n\t\tfor i := 0; i < 30; i++ {\n\t\t\tt = exponentialBackoff(min, max, int(e), nil)\n\t\t\t// Every returned value must be between min and min(2^e, max)\n\t\t\tc.Check(t >= min, check.Equals, true)\n\t\t\tc.Check(t <= min*time.Duration(math.Pow(2, e)), check.Equals, true)\n\t\t\tc.Check(t <= max, check.Equals, true)\n\t\t\t// Check that jitter is actually happening by\n\t\t\t// checking that at least one in 20 trials is\n\t\t\t// between min*2^(e-.75) and min*2^(e-.25)\n\t\t\tjittermin := time.Duration(float64(min) * math.Pow(2, e-0.75))\n\t\t\tjittermax := time.Duration(float64(min) * math.Pow(2, e-0.25))\n\t\t\tc.Logf(\"min %v max %v e %v jittermin %v jittermax %v t %v\", min, max, e, jittermin, jittermax, t)\n\t\t\tif t > jittermin && t < jittermax {\n\t\t\t\tok = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tc.Check(ok, check.Equals, true)\n\t}\n\n\tfor i := 0; i < 20; i++ {\n\t\tt := exponentialBackoff(min, max, 100, nil)\n\t\tc.Check(t < max, check.Equals, true)\n\t}\n\n\tfor _, trial := range []struct {\n\t\tretryAfter string\n\t\texpect     time.Duration\n\t}{\n\t\t{\"1\", time.Second * 4},             // minimum enforced\n\t\t{\"5\", time.Second * 5},             // header used\n\t\t{\"55\", time.Second * 10},           // maximum enforced\n\t\t{\"eleventy-nine\", time.Second * 4}, // invalid header, exponential backoff used\n\t\t{time.Now().UTC().Add(time.Second).Format(time.RFC1123), time.Second * 4},  // minimum enforced\n\t\t{time.Now().UTC().Add(time.Minute).Format(time.RFC1123), time.Second * 10}, // maximum enforced\n\t\t{time.Now().UTC().Add(-time.Minute).Format(time.RFC1123), time.Second * 4}, // minimum enforced\n\t} {\n\t\tc.Logf(\"trial %+v\", trial)\n\t\tt := exponentialBackoff(time.Second*4, time.Second*10, 0, &http.Response{\n\t\t\tStatusCode: http.StatusTooManyRequests,\n\t\t\tHeader:     http.Header{\"Retry-After\": {trial.retryAfter}}})\n\t\tc.Check(t, check.Equals, trial.expect)\n\t}\n\tt = exponentialBackoff(time.Second*4, time.Second*10, 0, &http.Response{\n\t\tStatusCode: http.StatusTooManyRequests,\n\t})\n\tc.Check(t, check.Equals, time.Second*4)\n\n\tt = exponentialBackoff(0, max, 0, nil)\n\tc.Check(t, check.Equals, time.Duration(0))\n\tt = exponentialBackoff(0, max, 1, nil)\n\tc.Check(t, check.Not(check.Equals), time.Duration(0))\n}\n"
  },
  {
    "path": "sdk/go/arvados/collection.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"crypto/md5\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/blockdigest\"\n)\n\nvar (\n\tUUIDMatch = regexp.MustCompile(`^[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}$`).MatchString\n\tPDHMatch  = regexp.MustCompile(`^[0-9a-f]{32}\\+\\d+$`).MatchString\n)\n\n// Collection is an arvados#collection resource.\ntype Collection struct {\n\tUUID                      string                 `json:\"uuid\"`\n\tEtag                      string                 `json:\"etag\"`\n\tOwnerUUID                 string                 `json:\"owner_uuid\"`\n\tTrashAt                   *time.Time             `json:\"trash_at\"`\n\tManifestText              string                 `json:\"manifest_text\"`\n\tUnsignedManifestText      string                 `json:\"unsigned_manifest_text\"`\n\tName                      string                 `json:\"name\"`\n\tCreatedAt                 time.Time              `json:\"created_at\"`\n\tModifiedAt                time.Time              `json:\"modified_at\"`\n\tModifiedByUserUUID        string                 `json:\"modified_by_user_uuid\"`\n\tPortableDataHash          string                 `json:\"portable_data_hash\"`\n\tReplicationConfirmed      *int                   `json:\"replication_confirmed\"`\n\tReplicationConfirmedAt    *time.Time             `json:\"replication_confirmed_at\"`\n\tReplicationDesired        *int                   `json:\"replication_desired\"`\n\tStorageClassesDesired     []string               `json:\"storage_classes_desired\"`\n\tStorageClassesConfirmed   []string               `json:\"storage_classes_confirmed\"`\n\tStorageClassesConfirmedAt *time.Time             `json:\"storage_classes_confirmed_at\"`\n\tDeleteAt                  *time.Time             `json:\"delete_at\"`\n\tIsTrashed                 bool                   `json:\"is_trashed\"`\n\tProperties                map[string]interface{} `json:\"properties\"`\n\tWritableBy                []string               `json:\"writable_by,omitempty\"`\n\tFileCount                 int                    `json:\"file_count\"`\n\tFileSizeTotal             int64                  `json:\"file_size_total\"`\n\tVersion                   int                    `json:\"version\"`\n\tPreserveVersion           bool                   `json:\"preserve_version\"`\n\tCurrentVersionUUID        string                 `json:\"current_version_uuid\"`\n\tDescription               string                 `json:\"description\"`\n}\n\nfunc (c Collection) resourceName() string {\n\treturn \"collection\"\n}\n\n// SizedDigests returns the hash+size part of each data block\n// referenced by the collection.\n//\n// Zero-length blocks are not included.\nfunc (c *Collection) SizedDigests() ([]SizedDigest, error) {\n\tmanifestText := []byte(c.ManifestText)\n\tif len(manifestText) == 0 {\n\t\tmanifestText = []byte(c.UnsignedManifestText)\n\t}\n\tif len(manifestText) == 0 && c.PortableDataHash != \"d41d8cd98f00b204e9800998ecf8427e+0\" {\n\t\t// TODO: Check more subtle forms of corruption, too\n\t\treturn nil, fmt.Errorf(\"manifest is missing\")\n\t}\n\tsds := make([]SizedDigest, 0, len(manifestText)/40)\n\tfor _, line := range bytes.Split(manifestText, []byte{'\\n'}) {\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\ttokens := bytes.Split(line, []byte{' '})\n\t\tif len(tokens) < 3 {\n\t\t\treturn nil, fmt.Errorf(\"Invalid stream (<3 tokens): %q\", line)\n\t\t}\n\t\tfor _, token := range tokens[1:] {\n\t\t\tif !blockdigest.LocatorPattern.Match(token) {\n\t\t\t\t// FIXME: ensure it's a file token\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif bytes.HasPrefix(token, []byte(\"d41d8cd98f00b204e9800998ecf8427e+0\")) {\n\t\t\t\t// Exclude \"empty block\" placeholder\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// FIXME: shouldn't assume 32 char hash\n\t\t\tif i := bytes.IndexRune(token[33:], '+'); i >= 0 {\n\t\t\t\ttoken = token[:33+i]\n\t\t\t}\n\t\t\tsds = append(sds, SizedDigest(string(token)))\n\t\t}\n\t}\n\treturn sds, nil\n}\n\ntype CollectionList struct {\n\tItems          []Collection `json:\"items\"`\n\tItemsAvailable int          `json:\"items_available\"`\n\tOffset         int          `json:\"offset\"`\n\tLimit          int          `json:\"limit\"`\n}\n\n// PortableDataHash computes the portable data hash of the given\n// manifest.\nfunc PortableDataHash(mt string) string {\n\t// To calculate the PDH, we write the manifest to an md5 hash\n\t// func, except we skip the \"extra\" part of block tokens that\n\t// look like \"abcdef0123456789abcdef0123456789+12345+extra\".\n\t//\n\t// This code is simplified by the facts that (A) all block\n\t// tokens -- even the first and last in a stream -- are\n\t// preceded and followed by a space character; and (B) all\n\t// non-block tokens either start with '.'  or contain ':'.\n\t//\n\t// A regexp-based approach (like the one this replaced) would\n\t// be more readable, but very slow.\n\th := md5.New()\n\tsize := 0\n\ttodo := []byte(mt)\n\tfor len(todo) > 0 {\n\t\t// sp is the end of the current token (note that if\n\t\t// the current token is the last file token in a\n\t\t// stream, we'll also include the \\n and the dirname\n\t\t// token on the next line, which is perfectly fine for\n\t\t// our purposes).\n\t\tsp := bytes.IndexByte(todo, ' ')\n\t\tif sp < 0 {\n\t\t\t// Last token of the manifest, which is never\n\t\t\t// a block token.\n\t\t\tn, _ := h.Write(todo)\n\t\t\tsize += n\n\t\t\tbreak\n\t\t}\n\t\tif sp >= 34 && todo[32] == '+' && bytes.IndexByte(todo[:32], ':') == -1 && todo[0] != '.' {\n\t\t\t// todo[:sp] is a block token.\n\t\t\tsizeend := bytes.IndexByte(todo[33:sp], '+')\n\t\t\tif sizeend < 0 {\n\t\t\t\t// \"hash+size\"\n\t\t\t\tsizeend = sp\n\t\t\t} else {\n\t\t\t\t// \"hash+size+extra\"\n\t\t\t\tsizeend += 33\n\t\t\t}\n\t\t\tn, _ := h.Write(todo[:sizeend])\n\t\t\th.Write([]byte{' '})\n\t\t\tsize += n + 1\n\t\t} else {\n\t\t\t// todo[:sp] is not a block token.\n\t\t\tn, _ := h.Write(todo[:sp+1])\n\t\t\tsize += n\n\t\t}\n\t\ttodo = todo[sp+1:]\n\t}\n\treturn fmt.Sprintf(\"%x+%d\", h.Sum(nil), size)\n}\n\n// CollectionIDFromDNSName returns a UUID or PDH if s begins with a\n// UUID or URL-encoded PDH; otherwise \"\".\nfunc CollectionIDFromDNSName(s string) string {\n\t// Strip domain.\n\tif i := strings.IndexRune(s, '.'); i >= 0 {\n\t\ts = s[:i]\n\t}\n\t// Names like {uuid}--collections.example.com serve the same\n\t// purpose as {uuid}.collections.example.com but can reduce\n\t// cost/effort of using [additional] wildcard certificates.\n\tif i := strings.Index(s, \"--\"); i >= 0 {\n\t\ts = s[:i]\n\t}\n\tif UUIDMatch(s) {\n\t\treturn s\n\t}\n\tif pdh := strings.Replace(s, \"-\", \"+\", 1); PDHMatch(pdh) {\n\t\treturn pdh\n\t}\n\treturn \"\"\n}\n"
  },
  {
    "path": "sdk/go/arvados/collection_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&CollectionSuite{})\n\ntype CollectionSuite struct{}\n\nfunc (s *CollectionSuite) TestSizedDigests(c *check.C) {\n\tcoll := Collection{ManifestText: \". d41d8cd98f00b204e9800998ecf8427e+0 acbd18db4cc2f85cedef654fccc4a4d8+3 73feffa4b7f6bb68e44cf984c85f6e88+3+Z+K@xyzzy 0:0:foo 0:3:bar 3:3:baz\\n\"}\n\tsd, err := coll.SizedDigests()\n\tc.Check(err, check.IsNil)\n\tc.Check(sd, check.DeepEquals, []SizedDigest{\"acbd18db4cc2f85cedef654fccc4a4d8+3\", \"73feffa4b7f6bb68e44cf984c85f6e88+3\"})\n\n\tcoll = Collection{ManifestText: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar\\n. 73feffa4b7f6bb68e44cf984c85f6e88+3+Z+K@xyzzy 0:3:baz\\n\"}\n\tsd, err = coll.SizedDigests()\n\tc.Check(err, check.IsNil)\n\tc.Check(sd, check.DeepEquals, []SizedDigest{\"acbd18db4cc2f85cedef654fccc4a4d8+3\", \"73feffa4b7f6bb68e44cf984c85f6e88+3\"})\n\n\tcoll = Collection{ManifestText: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\"}\n\tsd, err = coll.SizedDigests()\n\tc.Check(err, check.IsNil)\n\tc.Check(sd, check.HasLen, 0)\n\n\tcoll = Collection{ManifestText: \"\", PortableDataHash: \"d41d8cd98f00b204e9800998ecf8427e+0\"}\n\tsd, err = coll.SizedDigests()\n\tc.Check(err, check.IsNil)\n\tc.Check(sd, check.HasLen, 0)\n}\n"
  },
  {
    "path": "sdk/go/arvados/config.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/config\"\n)\n\nvar DefaultConfigFile = func() string {\n\tif path := os.Getenv(\"ARVADOS_CONFIG\"); path != \"\" {\n\t\treturn path\n\t}\n\treturn \"/etc/arvados/config.yml\"\n}()\n\ntype Config struct {\n\tClusters         map[string]Cluster\n\tAutoReloadConfig bool\n\tSourceTimestamp  time.Time\n\tSourceSHA256     string\n}\n\n// GetConfig returns the current system config, loading it from\n// configFile if needed.\nfunc GetConfig(configFile string) (*Config, error) {\n\tvar cfg Config\n\terr := config.LoadFile(&cfg, configFile)\n\treturn &cfg, err\n}\n\n// GetCluster returns the cluster ID and config for the given\n// cluster, or the default/only configured cluster if clusterID is \"\".\nfunc (sc *Config) GetCluster(clusterID string) (*Cluster, error) {\n\tif clusterID == \"\" {\n\t\tif len(sc.Clusters) == 0 {\n\t\t\treturn nil, fmt.Errorf(\"no clusters configured\")\n\t\t} else if len(sc.Clusters) > 1 {\n\t\t\treturn nil, fmt.Errorf(\"multiple clusters configured, cannot choose\")\n\t\t} else {\n\t\t\tfor id, cc := range sc.Clusters {\n\t\t\t\tcc.ClusterID = id\n\t\t\t\treturn &cc, nil\n\t\t\t}\n\t\t}\n\t}\n\tcc, ok := sc.Clusters[clusterID]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"cluster %q is not configured\", clusterID)\n\t}\n\tcc.ClusterID = clusterID\n\treturn &cc, nil\n}\n\ntype WebDAVCacheConfig struct {\n\tTTL                Duration\n\tDiskCacheSize      ByteSizeOrPercent\n\tMaxCollectionBytes ByteSize\n\tMaxSessions        int\n}\n\ntype UploadDownloadPermission struct {\n\tUpload   bool\n\tDownload bool\n}\n\ntype UploadDownloadRolePermissions struct {\n\tUser  UploadDownloadPermission\n\tAdmin UploadDownloadPermission\n}\n\ntype ManagedProperties map[string]struct {\n\tValue     interface{}\n\tFunction  string\n\tProtected bool\n}\n\ntype Cluster struct {\n\tClusterID       string `json:\"-\"`\n\tManagementToken string\n\tSystemRootToken string\n\tServices        Services\n\tInstanceTypes   InstanceTypeMap\n\tContainers      ContainersConfig\n\tRemoteClusters  map[string]RemoteCluster\n\tPostgreSQL      PostgreSQL\n\n\tAPI struct {\n\t\tAsyncPermissionsUpdateInterval   Duration\n\t\tDisabledAPIs                     StringSet\n\t\tMaxIndexDatabaseRead             int\n\t\tMaxItemsPerResponse              int\n\t\tMaxConcurrentRailsRequests       int\n\t\tMaxConcurrentRequests            int\n\t\tMaxQueuedRequests                int\n\t\tMaxGatewayTunnels                int\n\t\tMaxQueueTimeForLockRequests      Duration\n\t\tMaxKeepBlobBuffers               int\n\t\tMaxRequestAmplification          int\n\t\tMaxRequestSize                   int\n\t\tMaxTokenLifetime                 Duration\n\t\tRequestTimeout                   Duration\n\t\tSendTimeout                      Duration\n\t\tWebsocketClientEventQueue        int\n\t\tWebsocketServerEventQueue        int\n\t\tKeepServiceRequestTimeout        Duration\n\t\tVocabularyPath                   string\n\t\tFreezeProjectRequiresDescription bool\n\t\tFreezeProjectRequiresProperties  StringSet\n\t\tUnfreezeProjectRequiresAdmin     bool\n\t\tLockBeforeUpdate                 bool\n\t}\n\tAuditLogs struct {\n\t\tMaxAge             Duration\n\t\tMaxDeleteBatch     int\n\t\tUnloggedAttributes StringSet\n\t}\n\tCollections struct {\n\t\tBlobSigning                  bool\n\t\tBlobSigningKey               string\n\t\tBlobSigningTTL               Duration\n\t\tBlobTrash                    bool\n\t\tBlobTrashLifetime            Duration\n\t\tBlobTrashCheckInterval       Duration\n\t\tBlobTrashConcurrency         int\n\t\tBlobDeleteConcurrency        int\n\t\tBlobReplicateConcurrency     int\n\t\tCollectionVersioning         bool\n\t\tDefaultTrashLifetime         Duration\n\t\tDefaultReplication           int\n\t\tManagedProperties            ManagedProperties\n\t\tPreserveVersionIfIdle        Duration\n\t\tTrashSweepInterval           Duration\n\t\tTrustAllContent              bool\n\t\tForwardSlashNameSubstitution string\n\t\tS3FolderObjects              bool\n\n\t\tBlobMissingReport        string\n\t\tBalancePeriod            Duration\n\t\tBalanceCollectionBatch   int\n\t\tBalanceCollectionBuffers int\n\t\tBalanceTimeout           Duration\n\t\tBalanceUpdateLimit       int\n\t\tBalancePullLimit         int\n\t\tBalanceTrashLimit        int\n\n\t\tWebDAVCache WebDAVCacheConfig\n\n\t\tKeepproxyPermission       UploadDownloadRolePermissions\n\t\tWebDAVPermission          UploadDownloadRolePermissions\n\t\tWebDAVLogEvents           bool\n\t\tWebDAVLogDownloadInterval Duration\n\t\tWebDAVOutputBuffer        ByteSize\n\t}\n\tLogin struct {\n\t\tLDAP struct {\n\t\t\tEnable             bool\n\t\t\tURL                URL\n\t\t\tStartTLS           bool\n\t\t\tInsecureTLS        bool\n\t\t\tMinTLSVersion      TLSVersion\n\t\t\tStripDomain        string\n\t\t\tAppendDomain       string\n\t\t\tSearchAttribute    string\n\t\t\tSearchBindUser     string\n\t\t\tSearchBindPassword string\n\t\t\tSearchBase         string\n\t\t\tSearchFilters      string\n\t\t\tEmailAttribute     string\n\t\t\tUsernameAttribute  string\n\t\t}\n\t\tGoogle struct {\n\t\t\tEnable                          bool\n\t\t\tClientID                        string\n\t\t\tClientSecret                    string\n\t\t\tAlternateEmailAddresses         bool\n\t\t\tAuthenticationRequestParameters map[string]string\n\t\t}\n\t\tOpenIDConnect struct {\n\t\t\tEnable                          bool\n\t\t\tIssuer                          string\n\t\t\tClientID                        string\n\t\t\tClientSecret                    string\n\t\t\tEmailClaim                      string\n\t\t\tEmailVerifiedClaim              string\n\t\t\tUsernameClaim                   string\n\t\t\tAcceptAccessToken               bool\n\t\t\tAcceptAccessTokenScope          string\n\t\t\tAuthenticationRequestParameters map[string]string\n\t\t}\n\t\tPAM struct {\n\t\t\tEnable             bool\n\t\t\tService            string\n\t\t\tDefaultEmailDomain string\n\t\t}\n\t\tTest struct {\n\t\t\tEnable bool\n\t\t\tUsers  map[string]TestUser\n\t\t}\n\t\tLoginCluster         string\n\t\tRemoteTokenRefresh   Duration\n\t\tTokenLifetime        Duration\n\t\tTrustedClients       map[URL]struct{}\n\t\tTrustPrivateNetworks bool\n\t\tIssueTrustedTokens   bool\n\t}\n\tSystemLogs struct {\n\t\tLogLevel                  string\n\t\tFormat                    string\n\t\tMaxRequestLogParamsSize   int\n\t\tRequestQueueDumpDirectory string\n\t}\n\tTLS struct {\n\t\tCertificate string\n\t\tKey         string\n\t\tInsecure    bool\n\t\tACME        struct {\n\t\t\tServer string\n\t\t}\n\t}\n\tUsers struct {\n\t\tActivatedUsersAreVisibleToOthers      bool\n\t\tAnonymousUserToken                    string\n\t\tAdminNotifierEmailFrom                string\n\t\tAutoAdminFirstUser                    bool\n\t\tAutoAdminUserWithEmail                string\n\t\tAutoSetupNewUsers                     bool\n\t\tAutoSetupNewUsersWithVmUUID           string\n\t\tAutoSetupUsernameBlacklist            StringSet\n\t\tEmailSubjectPrefix                    string\n\t\tNewInactiveUserNotificationRecipients StringSet\n\t\tNewUserNotificationRecipients         StringSet\n\t\tNewUsersAreActive                     bool\n\t\tSendUserSetupNotificationEmail        bool\n\t\tSupportEmailAddress                   string\n\t\tUserNotifierEmailFrom                 string\n\t\tUserNotifierEmailBcc                  StringSet\n\t\tUserProfileNotificationAddress        string\n\t\tPreferDomainForUsername               string\n\t\tUserSetupMailText                     string\n\t\tRoleGroupsVisibleToAll                bool\n\t\tCanCreateRoleGroups                   bool\n\t\tActivityLoggingPeriod                 Duration\n\t\tSyncIgnoredGroups                     []string\n\t\tSyncRequiredGroups                    []string\n\t\tSyncUserAccounts                      bool\n\t\tSyncUserAPITokens                     bool\n\t\tSyncUserGroups                        bool\n\t\tSyncUserSSHKeys                       bool\n\t}\n\tStorageClasses map[string]StorageClassConfig\n\tVolumes        map[string]Volume\n\tWorkbench      struct {\n\t\tActivationContactLink   string\n\t\tArvadosDocsite          string\n\t\tArvadosPublicDataDocURL string\n\t\tDisableSharingURLsUI    bool\n\t\tFileViewersConfigURL    string\n\t\tShowUserAgreementInline bool\n\t\tSiteName                string\n\t\tTheme                   string\n\t\tUserProfileFormFields   map[string]struct {\n\t\t\tType                 string\n\t\t\tFormFieldTitle       string\n\t\t\tFormFieldDescription string\n\t\t\tRequired             bool\n\t\t\tPosition             int\n\t\t\tOptions              map[string]struct{}\n\t\t}\n\t\tUserProfileFormMessage string\n\t\tWelcomePageHTML        string\n\t\tInactivePageHTML       string\n\t\tSSHHelpPageHTML        string\n\t\tSSHHelpHostSuffix      string\n\t\tIdleTimeout            Duration\n\t\tBannerUUID             string\n\t}\n}\n\ntype StorageClassConfig struct {\n\tDefault  bool\n\tPriority int\n}\n\ntype Volume struct {\n\tAccessViaHosts         map[URL]VolumeAccess\n\tReadOnly               bool\n\tAllowTrashWhenReadOnly bool\n\tReplication            int\n\tStorageClasses         map[string]bool\n\tDriver                 string\n\tDriverParameters       json.RawMessage\n}\n\ntype S3VolumeDriverParameters struct {\n\tAccessKeyID        string\n\tSecretAccessKey    string\n\tEndpoint           string\n\tRegion             string\n\tBucket             string\n\tLocationConstraint bool\n\tV2Signature        bool\n\tIndexPageSize      int\n\tConnectTimeout     Duration\n\tReadTimeout        Duration\n\tRaceWindow         Duration\n\tUnsafeDelete       bool\n\tPrefixLength       int\n\tUsePathStyle       bool\n}\n\ntype AzureVolumeDriverParameters struct {\n\tStorageAccountName   string\n\tStorageAccountKey    string\n\tStorageBaseURL       string\n\tContainerName        string\n\tRequestTimeout       Duration\n\tListBlobsRetryDelay  Duration\n\tListBlobsMaxAttempts int\n}\n\ntype DirectoryVolumeDriverParameters struct {\n\tRoot      string\n\tSerialize bool\n}\n\ntype VolumeAccess struct {\n\tReadOnly bool\n}\n\ntype Services struct {\n\tComposer             Service\n\tContainerWebServices ServiceWithPortRange\n\tController           Service\n\tDispatchCloud        Service\n\tDispatchLSF          Service\n\tDispatchSLURM        Service\n\tHealth               Service\n\tKeepbalance          Service\n\tKeepproxy            Service\n\tKeepstore            Service\n\tRailsAPI             Service\n\tWebDAVDownload       Service\n\tWebDAV               Service\n\tWebShell             Service\n\tWebsocket            Service\n\tWorkbench1           Service\n\tWorkbench2           Service\n}\n\ntype Service struct {\n\tInternalURLs map[URL]ServiceInstance\n\tExternalURL  URL\n}\n\ntype ServiceWithPortRange struct {\n\tService\n\tExternalPortMin int\n\tExternalPortMax int\n}\n\ntype TestUser struct {\n\tEmail    string\n\tPassword string\n}\n\n// URL is a url.URL that is also usable as a JSON key/value.\ntype URL url.URL\n\n// UnmarshalText implements encoding.TextUnmarshaler so URL can be\n// used as a JSON key/value.\nfunc (su *URL) UnmarshalText(text []byte) error {\n\tu, err := url.Parse(string(text))\n\tif err == nil {\n\t\t*su = URL(*u)\n\t\tif su.Path == \"\" && su.Host != \"\" {\n\t\t\t// http://example really means http://example/\n\t\t\tsu.Path = \"/\"\n\t\t}\n\t}\n\treturn err\n}\n\nfunc (su URL) MarshalText() ([]byte, error) {\n\treturn []byte(su.String()), nil\n}\n\nfunc (su URL) String() string {\n\treturn (*url.URL)(&su).String()\n}\n\ntype TLSVersion uint16\n\nfunc (v TLSVersion) MarshalText() ([]byte, error) {\n\tswitch v {\n\tcase 0:\n\t\treturn []byte{}, nil\n\tcase tls.VersionTLS10:\n\t\treturn []byte(\"1.0\"), nil\n\tcase tls.VersionTLS11:\n\t\treturn []byte(\"1.1\"), nil\n\tcase tls.VersionTLS12:\n\t\treturn []byte(\"1.2\"), nil\n\tcase tls.VersionTLS13:\n\t\treturn []byte(\"1.3\"), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported TLSVersion %x\", v)\n\t}\n}\n\nfunc (v *TLSVersion) UnmarshalJSON(text []byte) error {\n\tif len(text) > 0 && text[0] == '\"' {\n\t\tvar s string\n\t\terr := json.Unmarshal(text, &s)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttext = []byte(s)\n\t}\n\tswitch string(text) {\n\tcase \"\":\n\t\t*v = 0\n\tcase \"1.0\":\n\t\t*v = tls.VersionTLS10\n\tcase \"1.1\":\n\t\t*v = tls.VersionTLS11\n\tcase \"1.2\":\n\t\t*v = tls.VersionTLS12\n\tcase \"1.3\":\n\t\t*v = tls.VersionTLS13\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported TLSVersion %q\", text)\n\t}\n\treturn nil\n}\n\ntype ServiceInstance struct {\n\tListenURL  URL\n\tRendezvous string `json:\",omitempty\"`\n}\n\ntype PostgreSQL struct {\n\tConnection     PostgreSQLConnection\n\tConnectionPool int\n}\n\ntype PostgreSQLConnection map[string]string\n\ntype RemoteCluster struct {\n\tHost          string\n\tProxy         bool\n\tScheme        string\n\tInsecure      bool\n\tActivateUsers bool\n}\n\ntype GPUFeatures struct {\n\t// as of this writing, stack is \"cuda\" or \"rocm\"\n\tStack          string\n\tDriverVersion  string\n\tHardwareTarget string\n\tDeviceCount    int\n\tVRAM           ByteSize\n}\n\ntype InstanceType struct {\n\tName            string `json:\"-\"`\n\tProviderType    string\n\tVCPUs           int\n\tRAM             ByteSize\n\tScratch         ByteSize `json:\"-\"`\n\tIncludedScratch ByteSize\n\tAddedScratch    ByteSize\n\tPrice           float64\n\tPreemptible     bool\n\tGPU             GPUFeatures\n}\n\ntype ContainersConfig struct {\n\tCloudVMs                      CloudVMsConfig\n\tCrunchRunCommand              string\n\tCrunchRunArgumentsList        []string\n\tDefaultKeepCacheRAM           ByteSize\n\tDispatchPrivateKey            string\n\tLogReuseDecisions             bool\n\tMaxDispatchAttempts           int\n\tMaxRetryAttempts              int\n\tMinRetryPeriod                Duration\n\tReserveExtraRAM               ByteSize\n\tStaleLockTimeout              Duration\n\tSupportedDockerImageFormats   StringSet\n\tAlwaysUsePreemptibleInstances bool\n\tPreemptiblePriceFactor        float64\n\tMaximumPriceFactor            float64\n\tRuntimeEngine                 string\n\tLocalKeepBlobBuffersPerVCPU   int\n\tLocalKeepLogsToContainerLog   string\n\n\tLogging struct {\n\t\tLogUpdatePeriod Duration\n\t\tLogUpdateSize   ByteSize\n\t}\n\tShellAccess struct {\n\t\tAdmin bool\n\t\tUser  bool\n\t}\n\tSLURM struct {\n\t\tPrioritySpread             int64\n\t\tSbatchArgumentsList        []string\n\t\tSbatchGPUArgumentsList     []string\n\t\tSbatchEnvironmentVariables map[string]string\n\t}\n\tLSF struct {\n\t\tBsubSudoUser       string\n\t\tBsubArgumentsList  []string\n\t\tBsubGPUArguments   []string\n\t\tMaxRunTimeOverhead Duration\n\t\tMaxRunTimeDefault  Duration\n\t}\n}\n\ntype CloudVMsConfig struct {\n\tEnable bool\n\n\tBootProbeCommand                string\n\tInstanceInitCommand             string\n\tDeployRunnerBinary              string\n\tDeployRunnerDirectory           string\n\tDeployPublicKey                 bool\n\tImageID                         string\n\tMaxCloudOpsPerSecond            int\n\tMaxProbesPerSecond              int\n\tMaxConcurrentInstanceCreateOps  int\n\tMaxRunningContainersPerInstance int\n\tMaxInstances                    int\n\tInitialQuotaEstimate            int\n\tSupervisorFraction              float64\n\tPollInterval                    Duration\n\tProbeInterval                   Duration\n\tSSHPort                         string\n\tSyncInterval                    Duration\n\tTimeoutBooting                  Duration\n\tTimeoutIdle                     Duration\n\tTimeoutProbe                    Duration\n\tTimeoutShutdown                 Duration\n\tTimeoutSignal                   Duration\n\tTimeoutStaleRunLock             Duration\n\tTimeoutTERM                     Duration\n\tResourceTags                    map[string]string\n\tTagKeyPrefix                    string\n\n\tDriver           string\n\tDriverParameters json.RawMessage\n}\n\ntype InstanceTypeMap map[string]InstanceType\n\nvar errDuplicateInstanceTypeName = errors.New(\"duplicate instance type name\")\n\n// UnmarshalJSON does special handling of InstanceTypes:\n//\n// - populate computed fields (Name and Scratch)\n//\n// - error out if InstancesTypes are populated as an array, which was\n// deprecated in Arvados 1.2.0\nfunc (it *InstanceTypeMap) UnmarshalJSON(data []byte) error {\n\tfixup := func(t InstanceType) (InstanceType, error) {\n\t\tif t.ProviderType == \"\" {\n\t\t\tt.ProviderType = t.Name\n\t\t}\n\t\t// If t.Scratch is set in the configuration file, it will be ignored and overwritten.\n\t\t// It will also generate a \"deprecated or unknown config entry\" warning.\n\t\tt.Scratch = t.IncludedScratch + t.AddedScratch\n\t\treturn t, nil\n\t}\n\n\tif len(data) > 0 && data[0] == '[' {\n\t\treturn fmt.Errorf(\"InstanceTypes must be specified as a map, not an array, see https://doc.arvados.org/admin/config.html\")\n\t}\n\tvar hash map[string]InstanceType\n\terr := json.Unmarshal(data, &hash)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Fill in Name field (and ProviderType field, if not\n\t// specified) using hash key.\n\t*it = InstanceTypeMap(hash)\n\tfor name, t := range *it {\n\t\tt.Name = name\n\t\tt, err := fixup(t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t(*it)[name] = t\n\t}\n\treturn nil\n}\n\ntype StringSet map[string]struct{}\n\n// UnmarshalJSON handles old config files that provide an array of\n// instance types instead of a hash.\nfunc (ss *StringSet) UnmarshalJSON(data []byte) error {\n\tif len(data) > 0 && data[0] == '[' {\n\t\tvar arr []string\n\t\terr := json.Unmarshal(data, &arr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(arr) == 0 {\n\t\t\t*ss = nil\n\t\t\treturn nil\n\t\t}\n\t\t*ss = make(map[string]struct{}, len(arr))\n\t\tfor _, t := range arr {\n\t\t\t(*ss)[t] = struct{}{}\n\t\t}\n\t\treturn nil\n\t}\n\tvar hash map[string]struct{}\n\terr := json.Unmarshal(data, &hash)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*ss = make(map[string]struct{}, len(hash))\n\tfor t := range hash {\n\t\t(*ss)[t] = struct{}{}\n\t}\n\n\treturn nil\n}\n\ntype ServiceName string\n\nconst (\n\tServiceNameController    ServiceName = \"arvados-controller\"\n\tServiceNameDispatchCloud ServiceName = \"arvados-dispatch-cloud\"\n\tServiceNameDispatchLSF   ServiceName = \"arvados-dispatch-lsf\"\n\tServiceNameDispatchSLURM ServiceName = \"crunch-dispatch-slurm\"\n\tServiceNameHealth        ServiceName = \"arvados-health\"\n\tServiceNameKeepbalance   ServiceName = \"keep-balance\"\n\tServiceNameKeepproxy     ServiceName = \"keepproxy\"\n\tServiceNameKeepstore     ServiceName = \"keepstore\"\n\tServiceNameKeepweb       ServiceName = \"keep-web\"\n\tServiceNameRailsAPI      ServiceName = \"arvados-api-server\"\n\tServiceNameWebsocket     ServiceName = \"arvados-ws\"\n\tServiceNameWorkbench1    ServiceName = \"arvados-workbench1\"\n\tServiceNameWorkbench2    ServiceName = \"arvados-workbench2\"\n)\n\n// Map returns all services as a map, suitable for iterating over all\n// services or looking up a service by name.\nfunc (svcs Services) Map() map[ServiceName]Service {\n\treturn map[ServiceName]Service{\n\t\tServiceNameController:    svcs.Controller,\n\t\tServiceNameDispatchCloud: svcs.DispatchCloud,\n\t\tServiceNameDispatchLSF:   svcs.DispatchLSF,\n\t\tServiceNameDispatchSLURM: svcs.DispatchSLURM,\n\t\tServiceNameHealth:        svcs.Health,\n\t\tServiceNameKeepbalance:   svcs.Keepbalance,\n\t\tServiceNameKeepproxy:     svcs.Keepproxy,\n\t\tServiceNameKeepstore:     svcs.Keepstore,\n\t\tServiceNameKeepweb:       svcs.WebDAV,\n\t\tServiceNameRailsAPI:      svcs.RailsAPI,\n\t\tServiceNameWebsocket:     svcs.Websocket,\n\t\tServiceNameWorkbench1:    svcs.Workbench1,\n\t\tServiceNameWorkbench2:    svcs.Workbench2,\n\t}\n}\n"
  },
  {
    "path": "sdk/go/arvados/config_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\n\t\"github.com/ghodss/yaml\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&ConfigSuite{})\n\ntype ConfigSuite struct{}\n\nfunc (s *ConfigSuite) TestStringSetAsArray(c *check.C) {\n\tvar cluster Cluster\n\tyaml.Unmarshal([]byte(`\nAPI:\n  DisabledAPIs: [jobs.list]`), &cluster)\n\tc.Check(len(cluster.API.DisabledAPIs), check.Equals, 1)\n\t_, ok := cluster.API.DisabledAPIs[\"jobs.list\"]\n\tc.Check(ok, check.Equals, true)\n}\n\nfunc (s *ConfigSuite) TestInstanceTypesAsHash(c *check.C) {\n\tvar cluster Cluster\n\tyaml.Unmarshal([]byte(\"InstanceTypes:\\n  foo:\\n    ProviderType: bar\\n\"), &cluster)\n\tc.Check(len(cluster.InstanceTypes), check.Equals, 1)\n\tc.Check(cluster.InstanceTypes[\"foo\"].Name, check.Equals, \"foo\")\n\tc.Check(cluster.InstanceTypes[\"foo\"].ProviderType, check.Equals, \"bar\")\n}\n\nfunc (s *ConfigSuite) TestInstanceTypeSize(c *check.C) {\n\tvar it InstanceType\n\terr := yaml.Unmarshal([]byte(\"Name: foo\\nIncludedScratch: 4GB\\nRAM: 4GiB\\n\"), &it)\n\tc.Check(err, check.IsNil)\n\tc.Check(int64(it.IncludedScratch), check.Equals, int64(4000000000))\n\tc.Check(int64(it.RAM), check.Equals, int64(4294967296))\n}\n\nfunc (s *ConfigSuite) TestInstanceTypeFixup(c *check.C) {\n\tfor _, confdata := range []string{\n\t\t// Current format: map of entries\n\t\t`{foo4: {IncludedScratch: 4GB}, foo8: {ProviderType: foo_8, AddedScratch: 8GB}}`,\n\t} {\n\t\tc.Log(confdata)\n\t\tvar itm InstanceTypeMap\n\t\terr := yaml.Unmarshal([]byte(confdata), &itm)\n\t\tc.Check(err, check.IsNil)\n\n\t\tc.Check(itm[\"foo4\"].Name, check.Equals, \"foo4\")\n\t\tc.Check(itm[\"foo4\"].ProviderType, check.Equals, \"foo4\")\n\t\tc.Check(itm[\"foo4\"].Scratch, check.Equals, ByteSize(4000000000))\n\t\tc.Check(itm[\"foo4\"].AddedScratch, check.Equals, ByteSize(0))\n\t\tc.Check(itm[\"foo4\"].IncludedScratch, check.Equals, ByteSize(4000000000))\n\n\t\tc.Check(itm[\"foo8\"].Name, check.Equals, \"foo8\")\n\t\tc.Check(itm[\"foo8\"].ProviderType, check.Equals, \"foo_8\")\n\t\tc.Check(itm[\"foo8\"].Scratch, check.Equals, ByteSize(8000000000))\n\t\tc.Check(itm[\"foo8\"].AddedScratch, check.Equals, ByteSize(8000000000))\n\t\tc.Check(itm[\"foo8\"].IncludedScratch, check.Equals, ByteSize(0))\n\t}\n}\n\nfunc (s *ConfigSuite) TestURLTrailingSlash(c *check.C) {\n\tvar a, b map[URL]bool\n\tjson.Unmarshal([]byte(`{\"https://foo.example\": true}`), &a)\n\tjson.Unmarshal([]byte(`{\"https://foo.example/\": true}`), &b)\n\tc.Check(a, check.DeepEquals, b)\n}\n\nfunc (s *ConfigSuite) TestTLSVersion(c *check.C) {\n\tvar v struct {\n\t\tVersion TLSVersion\n\t}\n\terr := json.Unmarshal([]byte(`{\"Version\": 1.0}`), &v)\n\tc.Check(err, check.IsNil)\n\tc.Check(v.Version, check.Equals, TLSVersion(tls.VersionTLS10))\n\n\terr = json.Unmarshal([]byte(`{\"Version\": \"1.3\"}`), &v)\n\tc.Check(err, check.IsNil)\n\tc.Check(v.Version, check.Equals, TLSVersion(tls.VersionTLS13))\n\n\terr = json.Unmarshal([]byte(`{\"Version\": \"1.345\"}`), &v)\n\tc.Check(err, check.NotNil)\n}\n"
  },
  {
    "path": "sdk/go/arvados/container.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"time\"\n\n// Container is an arvados#container resource.\ntype Container struct {\n\tUUID                      string                   `json:\"uuid\"`\n\tEtag                      string                   `json:\"etag\"`\n\tCreatedAt                 time.Time                `json:\"created_at\"`\n\tModifiedByUserUUID        string                   `json:\"modified_by_user_uuid\"`\n\tModifiedAt                time.Time                `json:\"modified_at\"`\n\tCommand                   []string                 `json:\"command\"`\n\tContainerImage            string                   `json:\"container_image\"`\n\tCwd                       string                   `json:\"cwd\"`\n\tEnvironment               map[string]string        `json:\"environment\"`\n\tLockedByUUID              string                   `json:\"locked_by_uuid\"`\n\tLockCount                 int                      `json:\"lock_count\"`\n\tMounts                    map[string]Mount         `json:\"mounts\"`\n\tOutput                    string                   `json:\"output\"`\n\tOutputPath                string                   `json:\"output_path\"`\n\tOutputGlob                []string                 `json:\"output_glob\"`\n\tPriority                  int64                    `json:\"priority\"`\n\tRuntimeConstraints        RuntimeConstraints       `json:\"runtime_constraints\"`\n\tState                     ContainerState           `json:\"state\"`\n\tSchedulingParameters      SchedulingParameters     `json:\"scheduling_parameters\"`\n\tExitCode                  int                      `json:\"exit_code\"`\n\tRuntimeStatus             map[string]interface{}   `json:\"runtime_status\"`\n\tStartedAt                 *time.Time               `json:\"started_at\"`  // nil if not yet started\n\tFinishedAt                *time.Time               `json:\"finished_at\"` // nil if not yet finished\n\tGatewayAddress            string                   `json:\"gateway_address\"`\n\tInteractiveSessionStarted bool                     `json:\"interactive_session_started\"`\n\tOutputStorageClasses      []string                 `json:\"output_storage_classes\"`\n\tRuntimeUserUUID           string                   `json:\"runtime_user_uuid\"`\n\tRuntimeAuthScopes         []string                 `json:\"runtime_auth_scopes\"`\n\tRuntimeToken              string                   `json:\"runtime_token\"`\n\tAuthUUID                  string                   `json:\"auth_uuid\"`\n\tLog                       string                   `json:\"log\"`\n\tCost                      float64                  `json:\"cost\"`\n\tSubrequestsCost           float64                  `json:\"subrequests_cost\"`\n\tService                   bool                     `json:\"service\"`\n\tPublishedPorts            map[string]PublishedPort `json:\"published_ports\"`\n}\n\n// ContainerRequest is an arvados#container_request resource.\ntype ContainerRequest struct {\n\tUUID                    string                          `json:\"uuid\"`\n\tOwnerUUID               string                          `json:\"owner_uuid\"`\n\tCreatedAt               time.Time                       `json:\"created_at\"`\n\tModifiedByUserUUID      string                          `json:\"modified_by_user_uuid\"`\n\tModifiedAt              time.Time                       `json:\"modified_at\"`\n\tEtag                    string                          `json:\"etag\"`\n\tName                    string                          `json:\"name\"`\n\tDescription             string                          `json:\"description\"`\n\tProperties              map[string]interface{}          `json:\"properties\"`\n\tState                   ContainerRequestState           `json:\"state\"`\n\tRequestingContainerUUID string                          `json:\"requesting_container_uuid\"`\n\tContainerUUID           string                          `json:\"container_uuid\"`\n\tContainerCountMax       int                             `json:\"container_count_max\"`\n\tMounts                  map[string]Mount                `json:\"mounts\"`\n\tRuntimeConstraints      RuntimeConstraints              `json:\"runtime_constraints\"`\n\tSchedulingParameters    SchedulingParameters            `json:\"scheduling_parameters\"`\n\tContainerImage          string                          `json:\"container_image\"`\n\tEnvironment             map[string]string               `json:\"environment\"`\n\tCwd                     string                          `json:\"cwd\"`\n\tCommand                 []string                        `json:\"command\"`\n\tOutputPath              string                          `json:\"output_path\"`\n\tOutputGlob              []string                        `json:\"output_glob\"`\n\tOutputName              string                          `json:\"output_name\"`\n\tOutputTTL               int                             `json:\"output_ttl\"`\n\tPriority                int                             `json:\"priority\"`\n\tUseExisting             bool                            `json:\"use_existing\"`\n\tLogUUID                 string                          `json:\"log_uuid\"`\n\tOutputUUID              string                          `json:\"output_uuid\"`\n\tRuntimeToken            string                          `json:\"runtime_token\"`\n\tExpiresAt               time.Time                       `json:\"expires_at\"`\n\tFilters                 []Filter                        `json:\"filters\"`\n\tContainerCount          int                             `json:\"container_count\"`\n\tOutputStorageClasses    []string                        `json:\"output_storage_classes\"`\n\tOutputProperties        map[string]interface{}          `json:\"output_properties\"`\n\tCumulativeCost          float64                         `json:\"cumulative_cost\"`\n\tService                 bool                            `json:\"service\"`\n\tPublishedPorts          map[string]RequestPublishedPort `json:\"published_ports\"`\n}\n\n// Mount is special behavior to attach to a filesystem path or device.\ntype Mount struct {\n\tKind              string      `json:\"kind\"`\n\tWritable          bool        `json:\"writable\"`\n\tPortableDataHash  string      `json:\"portable_data_hash\"`\n\tUUID              string      `json:\"uuid\"`\n\tDeviceType        string      `json:\"device_type\"`\n\tPath              string      `json:\"path\"`\n\tContent           interface{} `json:\"content\"`\n\tExcludeFromOutput bool        `json:\"exclude_from_output\"`\n\tCapacity          int64       `json:\"capacity\"`\n}\n\ntype GPURuntimeConstraints struct {\n\tStack          string   `json:\"stack\"`\n\tDriverVersion  string   `json:\"driver_version\"`\n\tHardwareTarget []string `json:\"hardware_target\"`\n\tDeviceCount    int      `json:\"device_count\"`\n\tVRAM           int64    `json:\"vram\"`\n}\n\n// RuntimeConstraints specify a container's compute resources (RAM,\n// CPU) and network connectivity.\ntype RuntimeConstraints struct {\n\tAPI           bool                  `json:\"API\"`\n\tRAM           int64                 `json:\"ram\"`\n\tVCPUs         int                   `json:\"vcpus\"`\n\tKeepCacheRAM  int64                 `json:\"keep_cache_ram\"`\n\tKeepCacheDisk int64                 `json:\"keep_cache_disk\"`\n\tGPU           GPURuntimeConstraints `json:\"gpu\"`\n}\n\n// SchedulingParameters specify a container's scheduling parameters\n// such as Partitions\ntype SchedulingParameters struct {\n\tPartitions  []string `json:\"partitions\"`\n\tPreemptible bool     `json:\"preemptible\"`\n\tMaxRunTime  int      `json:\"max_run_time\"`\n\tSupervisor  bool     `json:\"supervisor\"`\n}\n\n// ContainerList is an arvados#containerList resource.\ntype ContainerList struct {\n\tItems          []Container `json:\"items\"`\n\tItemsAvailable int         `json:\"items_available\"`\n\tOffset         int         `json:\"offset\"`\n\tLimit          int         `json:\"limit\"`\n}\n\n// ContainerRequestList is an arvados#containerRequestList resource.\ntype ContainerRequestList struct {\n\tItems          []ContainerRequest `json:\"items\"`\n\tItemsAvailable int                `json:\"items_available\"`\n\tOffset         int                `json:\"offset\"`\n\tLimit          int                `json:\"limit\"`\n}\n\n// ContainerState is a string corresponding to a valid Container state.\ntype ContainerState string\n\nconst (\n\tContainerStateQueued    = ContainerState(\"Queued\")\n\tContainerStateLocked    = ContainerState(\"Locked\")\n\tContainerStateRunning   = ContainerState(\"Running\")\n\tContainerStateComplete  = ContainerState(\"Complete\")\n\tContainerStateCancelled = ContainerState(\"Cancelled\")\n)\n\n// ContainerRequestState is a string corresponding to a valid Container Request state.\ntype ContainerRequestState string\n\nconst (\n\tContainerRequestStateUncomitted = ContainerRequestState(\"Uncommitted\")\n\tContainerRequestStateCommitted  = ContainerRequestState(\"Committed\")\n\tContainerRequestStateFinal      = ContainerRequestState(\"Final\")\n)\n\ntype ContainerStatus struct {\n\tUUID             string         `json:\"uuid\"`\n\tState            ContainerState `json:\"container_state\"`\n\tSchedulingStatus string         `json:\"scheduling_status\"`\n}\n\ntype PublishedPort struct {\n\tRequestPublishedPort\n\tBaseURL    string `json:\"base_url\"`\n\tInitialURL string `json:\"initial_url\"`\n}\n\ntype RequestPublishedPort struct {\n\tAccess      PublishedPortAccess `json:\"access\"`\n\tLabel       string              `json:\"label\"`\n\tInitialPath string              `json:\"initial_path\"`\n}\n\ntype PublishedPortAccess string\n\nconst (\n\tPublishedPortAccessPrivate = PublishedPortAccess(\"private\")\n\tPublishedPortAccessPublic  = PublishedPortAccess(\"public\")\n)\n"
  },
  {
    "path": "sdk/go/arvados/container_gateway.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"net/http\"\n\t\"sync\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc (cresp ConnectionResponse) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tdefer cresp.Conn.Close()\n\tconn, bufrw, err := http.NewResponseController(w).Hijack()\n\tif err != nil {\n\t\thttp.Error(w, \"connection upgrade failed: \"+err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\tconn.Write([]byte(\"HTTP/1.1 101 Switching Protocols\\r\\n\"))\n\tw.Header().Set(\"Connection\", \"upgrade\")\n\tfor k, v := range cresp.Header {\n\t\tw.Header()[k] = v\n\t}\n\tw.Header().Write(conn)\n\tconn.Write([]byte(\"\\r\\n\"))\n\thttpserver.ExemptFromDeadline(req)\n\n\tvar bytesIn, bytesOut int64\n\tctx, cancel := context.WithCancel(req.Context())\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdefer cancel()\n\t\tn, err := io.CopyN(conn, cresp.Bufrw, int64(cresp.Bufrw.Reader.Buffered()))\n\t\tbytesOut += n\n\t\tif err == nil {\n\t\t\tn, err = io.Copy(conn, cresp.Conn)\n\t\t\tbytesOut += n\n\t\t}\n\t\tif err != nil {\n\t\t\tctxlog.FromContext(ctx).WithError(err).Error(\"error copying downstream\")\n\t\t}\n\t}()\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tdefer cancel()\n\t\tn, err := io.CopyN(cresp.Conn, bufrw, int64(bufrw.Reader.Buffered()))\n\t\tbytesIn += n\n\t\tif err == nil {\n\t\t\tn, err = io.Copy(cresp.Conn, conn)\n\t\t\tbytesIn += n\n\t\t}\n\t\tif err != nil {\n\t\t\tctxlog.FromContext(ctx).WithError(err).Error(\"error copying upstream\")\n\t\t}\n\t}()\n\t<-ctx.Done()\n\tgo func() {\n\t\t// Wait for both io.Copy goroutines to finish and increment\n\t\t// their byte counters.\n\t\twg.Wait()\n\t\tif cresp.Logger != nil {\n\t\t\tcresp.Logger.WithFields(logrus.Fields{\n\t\t\t\t\"bytesIn\":  bytesIn,\n\t\t\t\t\"bytesOut\": bytesOut,\n\t\t\t}).Info(\"closed connection\")\n\t\t}\n\t}()\n}\n"
  },
  {
    "path": "sdk/go/arvados/context.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"context\"\n)\n\ntype contextKeyRequestID struct{}\ntype contextKeyAuthorization struct{}\n\nfunc ContextWithRequestID(ctx context.Context, reqid string) context.Context {\n\treturn context.WithValue(ctx, contextKeyRequestID{}, reqid)\n}\n\n// ContextWithAuthorization returns a child context that (when used\n// with (*Client)RequestAndDecodeContext) sends the given\n// Authorization header value instead of the Client's default\n// AuthToken.\nfunc ContextWithAuthorization(ctx context.Context, value string) context.Context {\n\treturn context.WithValue(ctx, contextKeyAuthorization{}, value)\n}\n"
  },
  {
    "path": "sdk/go/arvados/contextgroup.go",
    "content": "package arvados\n\nimport (\n\t\"context\"\n\t\"sync\"\n)\n\n// A contextGroup is a context-aware variation on sync.WaitGroup. It\n// provides a child context for the added funcs to use, so they can\n// exit early if another added func returns an error. Its Wait()\n// method returns the first error returned by any added func.\n//\n// Example:\n//\n//\terr := errors.New(\"oops\")\n//\tcg := newContextGroup()\n//\tdefer cg.Cancel()\n//\tcg.Go(func() error {\n//\t\tsomeFuncWithContext(cg.Context())\n//\t\treturn nil\n//\t})\n//\tcg.Go(func() error {\n//\t\treturn err // this cancels cg.Context()\n//\t})\n//\treturn cg.Wait() // returns err after both goroutines have ended\ntype contextGroup struct {\n\tctx    context.Context\n\tcancel context.CancelFunc\n\twg     sync.WaitGroup\n\terr    error\n\tmtx    sync.Mutex\n}\n\n// newContextGroup returns a new contextGroup. The caller must\n// eventually call the Cancel() method of the returned contextGroup.\nfunc newContextGroup(ctx context.Context) *contextGroup {\n\tctx, cancel := context.WithCancel(ctx)\n\treturn &contextGroup{\n\t\tctx:    ctx,\n\t\tcancel: cancel,\n\t}\n}\n\n// Cancel cancels the context group.\nfunc (cg *contextGroup) Cancel() {\n\tcg.cancel()\n}\n\n// Context returns a context.Context which will be canceled when all\n// funcs have succeeded or one has failed.\nfunc (cg *contextGroup) Context() context.Context {\n\treturn cg.ctx\n}\n\n// Go calls f in a new goroutine. If f returns an error, the\n// contextGroup is canceled.\n//\n// If f notices cg.Context() is done, it should abandon further work\n// and return. In this case, f's return value will be ignored.\nfunc (cg *contextGroup) Go(f func() error) {\n\tcg.mtx.Lock()\n\tdefer cg.mtx.Unlock()\n\tif cg.err != nil {\n\t\treturn\n\t}\n\tcg.wg.Add(1)\n\tgo func() {\n\t\tdefer cg.wg.Done()\n\t\terr := f()\n\t\tcg.mtx.Lock()\n\t\tdefer cg.mtx.Unlock()\n\t\tif err != nil && cg.err == nil {\n\t\t\tcg.err = err\n\t\t\tcg.cancel()\n\t\t}\n\t}()\n}\n\n// Wait waits for all added funcs to return, and returns the first\n// non-nil error.\n//\n// If the parent context is canceled before a func returns an error,\n// Wait returns the parent context's Err().\n//\n// Wait returns nil if all funcs return nil before the parent context\n// is canceled.\nfunc (cg *contextGroup) Wait() error {\n\tcg.wg.Wait()\n\tcg.mtx.Lock()\n\tdefer cg.mtx.Unlock()\n\tif cg.err != nil {\n\t\treturn cg.err\n\t}\n\treturn cg.ctx.Err()\n}\n"
  },
  {
    "path": "sdk/go/arvados/credential.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"time\"\n\n// Credential is an arvados#credential record\ntype Credential struct {\n\tUUID               string    `json:\"uuid,omitempty\"`\n\tEtag               string    `json:\"etag\"`\n\tOwnerUUID          string    `json:\"owner_uuid\"`\n\tCreatedAt          time.Time `json:\"created_at\"`\n\tModifiedAt         time.Time `json:\"modified_at\"`\n\tModifiedByUserUUID string    `json:\"modified_by_user_uuid\"`\n\tName               string    `json:\"name\"`\n\tDescription        string    `json:\"description\"`\n\tCredentialClass    string    `json:\"credential_class\"`\n\tScopes             []string  `json:\"scopes\"`\n\tExternalId         string    `json:\"external_id\"`\n\tSecret             string    `json:\"secret,omitempty\"`\n\tExpiresAt          time.Time `json:\"expires_at\"`\n}\n\n// CredentialList is an arvados#credentialList resource.\ntype CredentialList struct {\n\tItems          []Credential `json:\"items\"`\n\tItemsAvailable int          `json:\"items_available\"`\n\tOffset         int          `json:\"offset\"`\n\tLimit          int          `json:\"limit\"`\n}\n"
  },
  {
    "path": "sdk/go/arvados/doc.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// Package arvados is a client library for Arvados.\n//\n// The API is not stable: it should be considered experimental\n// pre-release.\n//\n// The intent is to offer model types and API call functions that can\n// be generated automatically (or at least mostly automatically) from\n// a discovery document. For the time being, there is a manually\n// generated subset of those types and API calls with (approximately)\n// the right signatures, plus client/authentication support and some\n// convenience functions.\npackage arvados\n"
  },
  {
    "path": "sdk/go/arvados/duration.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\n// Duration is time.Duration but looks like \"12s\" in JSON, rather than\n// a number of nanoseconds.\ntype Duration time.Duration\n\n// UnmarshalJSON implements json.Unmarshaler.\nfunc (d *Duration) UnmarshalJSON(data []byte) error {\n\tif bytes.Equal(data, []byte(`\"0\"`)) || bytes.Equal(data, []byte(`0`)) {\n\t\t// Unitless 0 is not accepted by ParseDuration, but we\n\t\t// accept it as a reasonable spelling of 0\n\t\t// nanoseconds.\n\t\t*d = 0\n\t\treturn nil\n\t}\n\tif data[0] == '\"' {\n\t\treturn d.Set(string(data[1 : len(data)-1]))\n\t}\n\t// Mimic error message returned by ParseDuration for a number\n\t// without units.\n\treturn fmt.Errorf(\"missing unit in duration %q\", data)\n}\n\n// MarshalJSON implements json.Marshaler.\nfunc (d Duration) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(d.String())\n}\n\n// String returns a format similar to (time.Duration)String() but with\n// \"0m\" and \"0s\" removed: e.g., \"1h\" instead of \"1h0m0s\".\nfunc (d Duration) String() string {\n\ts := time.Duration(d).String()\n\ts = strings.Replace(s, \"m0s\", \"m\", 1)\n\ts = strings.Replace(s, \"h0m\", \"h\", 1)\n\treturn s\n}\n\n// Duration returns a time.Duration.\nfunc (d Duration) Duration() time.Duration {\n\treturn time.Duration(d)\n}\n\n// Set implements the flag.Value interface and sets the duration value by using time.ParseDuration to parse the string.\nfunc (d *Duration) Set(s string) error {\n\tdur, err := time.ParseDuration(s)\n\t*d = Duration(dur)\n\treturn err\n}\n"
  },
  {
    "path": "sdk/go/arvados/duration_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"encoding/json\"\n\t\"time\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&DurationSuite{})\n\ntype DurationSuite struct{}\n\nfunc (s *DurationSuite) TestMarshalJSON(c *check.C) {\n\tvar d struct {\n\t\tD Duration\n\t}\n\terr := json.Unmarshal([]byte(`{\"D\":\"1.234s\"}`), &d)\n\tc.Check(err, check.IsNil)\n\tc.Check(d.D, check.Equals, Duration(time.Second+234*time.Millisecond))\n\tbuf, err := json.Marshal(d)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(buf), check.Equals, `{\"D\":\"1.234s\"}`)\n\n\tfor _, trial := range []struct {\n\t\tseconds int\n\t\tout     string\n\t}{\n\t\t{30, \"30s\"},\n\t\t{60, \"1m\"},\n\t\t{120, \"2m\"},\n\t\t{150, \"2m30s\"},\n\t\t{3600, \"1h\"},\n\t\t{7201, \"2h1s\"},\n\t\t{360600, \"100h10m\"},\n\t\t{360610, \"100h10m10s\"},\n\t} {\n\t\tbuf, err := json.Marshal(Duration(time.Duration(trial.seconds) * time.Second))\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(buf), check.Equals, `\"`+trial.out+`\"`)\n\t}\n}\n\nfunc (s *DurationSuite) TestUnmarshalJSON(c *check.C) {\n\tvar d struct {\n\t\tD Duration\n\t}\n\terr := json.Unmarshal([]byte(`{\"D\":1.234}`), &d)\n\tc.Check(err, check.ErrorMatches, `.*missing unit in duration \"?1\\.234\"?`)\n\terr = json.Unmarshal([]byte(`{\"D\":\"1.234\"}`), &d)\n\tc.Check(err, check.ErrorMatches, `.*missing unit in duration \"?1\\.234\"?`)\n\terr = json.Unmarshal([]byte(`{\"D\":\"1\"}`), &d)\n\tc.Check(err, check.ErrorMatches, `.*missing unit in duration \"?1\"?`)\n\terr = json.Unmarshal([]byte(`{\"D\":\"foobar\"}`), &d)\n\tc.Check(err, check.ErrorMatches, `.*invalid duration \"?foobar\"?`)\n\terr = json.Unmarshal([]byte(`{\"D\":\"60s\"}`), &d)\n\tc.Check(err, check.IsNil)\n\tc.Check(d.D.Duration(), check.Equals, time.Minute)\n\n\td.D = Duration(time.Second)\n\terr = json.Unmarshal([]byte(`{\"D\":\"0\"}`), &d)\n\tc.Check(err, check.IsNil)\n\tc.Check(d.D.Duration(), check.Equals, time.Duration(0))\n\n\td.D = Duration(time.Second)\n\terr = json.Unmarshal([]byte(`{\"D\":0}`), &d)\n\tc.Check(err, check.IsNil)\n\tc.Check(d.D.Duration(), check.Equals, time.Duration(0))\n}\n"
  },
  {
    "path": "sdk/go/arvados/error.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n)\n\ntype TransactionError struct {\n\tMethod     string\n\tURL        url.URL\n\tStatusCode int\n\tStatus     string\n\tErrors     []string\n}\n\nfunc (e TransactionError) Error() (s string) {\n\ts = fmt.Sprintf(\"request failed: %s\", e.URL.String())\n\tif e.Status != \"\" {\n\t\ts = s + \": \" + e.Status\n\t}\n\tif len(e.Errors) > 0 {\n\t\ts = s + \": \" + strings.Join(e.Errors, \"; \")\n\t}\n\treturn\n}\n\nfunc (e TransactionError) HTTPStatus() int {\n\treturn e.StatusCode\n}\n\nfunc newTransactionError(req *http.Request, resp *http.Response, buf []byte) *TransactionError {\n\tvar e TransactionError\n\tif json.Unmarshal(buf, &e) != nil {\n\t\t// No JSON-formatted error response\n\t\te.Errors = nil\n\t}\n\te.Method = req.Method\n\te.URL = *req.URL\n\tif resp != nil {\n\t\te.Status = resp.Status\n\t\te.StatusCode = resp.StatusCode\n\t}\n\treturn &e\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_backend.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n)\n\ntype fsBackend interface {\n\tkeepClient\n\tapiClient\n}\n\n// Ideally *Client would do everything; meanwhile keepBackend\n// implements fsBackend by merging the two kinds of arvados client.\ntype keepBackend struct {\n\tkeepClient\n\tapiClient\n}\n\ntype keepClient interface {\n\tReadAt(locator string, p []byte, off int) (int, error)\n\tBlockRead(context.Context, BlockReadOptions) (int, error)\n\tBlockWrite(context.Context, BlockWriteOptions) (BlockWriteResponse, error)\n\tLocalLocator(locator string) (string, error)\n}\n\ntype apiClient interface {\n\tRequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error\n}\n\nvar errStubClient = errors.New(\"stub client\")\n\ntype StubClient struct{}\n\nfunc (*StubClient) ReadAt(string, []byte, int) (int, error)                  { return 0, errStubClient }\nfunc (*StubClient) LocalLocator(loc string) (string, error)                  { return loc, nil }\nfunc (*StubClient) BlockRead(context.Context, BlockReadOptions) (int, error) { return 0, errStubClient }\nfunc (*StubClient) BlockWrite(context.Context, BlockWriteOptions) (BlockWriteResponse, error) {\n\treturn BlockWriteResponse{}, errStubClient\n}\nfunc (*StubClient) RequestAndDecode(_ interface{}, _, _ string, _ io.Reader, _ interface{}) error {\n\treturn errStubClient\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_base.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tErrReadOnlyFile      = errors.New(\"read-only file\")\n\tErrNegativeOffset    = errors.New(\"cannot seek to negative offset\")\n\tErrInvalidOperation  = errors.New(\"invalid operation\")\n\tErrInvalidArgument   = errors.New(\"invalid argument\")\n\tErrDirectoryNotEmpty = errors.New(\"directory not empty\")\n\tErrWriteOnlyMode     = errors.New(\"file is O_WRONLY\")\n\tErrSyncNotSupported  = errors.New(\"O_SYNC flag is not supported\")\n\tErrIsDirectory       = errors.New(\"cannot rename file to overwrite existing directory\")\n\tErrNotADirectory     = errors.New(\"not a directory\")\n\tErrPermission        = os.ErrPermission\n\tDebugLocksPanicMode  = false\n)\n\ntype syncer interface {\n\tSync() error\n}\n\nfunc debugPanicIfNotLocked(l sync.Locker, writing bool) {\n\tif !DebugLocksPanicMode {\n\t\treturn\n\t}\n\trace := false\n\tif rl, ok := l.(interface {\n\t\tRLock()\n\t\tRUnlock()\n\t}); ok && writing {\n\t\tgo func() {\n\t\t\t// Fail if we can grab the read lock during an\n\t\t\t// operation that purportedly has write lock.\n\t\t\trl.RLock()\n\t\t\trace = true\n\t\t\trl.RUnlock()\n\t\t}()\n\t} else {\n\t\tgo func() {\n\t\t\tl.Lock()\n\t\t\trace = true\n\t\t\tl.Unlock()\n\t\t}()\n\t}\n\ttime.Sleep(100)\n\tif race {\n\t\tpanic(\"bug: caller-must-have-lock func called, but nobody has lock\")\n\t}\n}\n\n// A File is an *os.File-like interface for reading and writing files\n// in a FileSystem.\ntype File interface {\n\tio.Reader\n\tio.Writer\n\tio.Closer\n\tio.Seeker\n\tSize() int64\n\tReaddir(int) ([]os.FileInfo, error)\n\tStat() (os.FileInfo, error)\n\tTruncate(int64) error\n\tSync() error\n\t// Create a snapshot of a file or directory tree, which can\n\t// then be spliced onto a different path or a different\n\t// collection.\n\tSnapshot() (*Subtree, error)\n\t// Replace this file or directory with the given snapshot.\n\t// The target must be inside a collection: Splice returns an\n\t// error if the File is a virtual file or directory like\n\t// by_id, a project directory, .arvados#collection,\n\t// etc. Splice can replace directories with regular files and\n\t// vice versa, except it cannot replace the root directory of\n\t// a collection with a regular file.\n\tSplice(snapshot *Subtree) error\n}\n\n// A Subtree is a detached part of a filesystem tree that can be\n// spliced into a filesystem via (File)Splice().\ntype Subtree struct {\n\tinode inode\n}\n\n// A FileSystem is an http.Filesystem plus Stat() and support for\n// opening writable files. All methods are safe to call from multiple\n// goroutines.\ntype FileSystem interface {\n\thttp.FileSystem\n\tfsBackend\n\n\trootnode() inode\n\n\t// filesystem-wide lock: used by Rename() to prevent deadlock\n\t// while locking multiple inodes.\n\tlocker() sync.Locker\n\n\t// throttle for limiting concurrent background writers\n\tthrottle() *throttle\n\n\t// create a new node with nil parent.\n\tnewNode(name string, perm os.FileMode, modTime time.Time) (node inode, err error)\n\n\t// analogous to os.Stat()\n\tStat(name string) (os.FileInfo, error)\n\n\t// analogous to os.Create(): create/truncate a file and open it O_RDWR.\n\tCreate(name string) (File, error)\n\n\t// Like os.OpenFile(): create or open a file or directory.\n\t//\n\t// If flag&os.O_EXCL==0, it opens an existing file or\n\t// directory if one exists. If flag&os.O_CREATE!=0, it creates\n\t// a new empty file or directory if one does not already\n\t// exist.\n\t//\n\t// When creating a new item, perm&os.ModeDir determines\n\t// whether it is a file or a directory.\n\t//\n\t// A file can be opened multiple times and used concurrently\n\t// from multiple goroutines. However, each File object should\n\t// be used by only one goroutine at a time.\n\tOpenFile(name string, flag int, perm os.FileMode) (File, error)\n\n\tMkdir(name string, perm os.FileMode) error\n\tRemove(name string) error\n\tRemoveAll(name string) error\n\tRename(oldname, newname string) error\n\n\t// Write buffered data from memory to storage, returning when\n\t// all updates have been saved to persistent storage.\n\tSync() error\n\n\t// Write buffered data from memory to storage, but don't wait\n\t// for all writes to finish before returning. If shortBlocks\n\t// is true, flush everything; otherwise, if there's less than\n\t// a full block of buffered data at the end of a stream, leave\n\t// it buffered in memory in case more data can be appended. If\n\t// path is \"\", flush all dirs/streams; otherwise, flush only\n\t// the specified dir/stream.\n\tFlush(path string, shortBlocks bool) error\n\n\t// Estimate current memory usage.\n\tMemorySize() int64\n}\n\ntype fsFS struct {\n\tFileSystem\n}\n\n// FS returns an fs.FS interface to the given FileSystem, to enable\n// the use of fs.WalkDir, etc.\nfunc FS(fs FileSystem) fs.FS { return fsFS{fs} }\nfunc (fs fsFS) Open(path string) (fs.File, error) {\n\tf, err := fs.FileSystem.Open(path)\n\treturn f, err\n}\n\ntype inode interface {\n\tSetParent(parent inode, name string)\n\tParent() inode\n\tFS() FileSystem\n\tRead([]byte, filenodePtr) (int, filenodePtr, error)\n\tWrite([]byte, filenodePtr) (int, filenodePtr, error)\n\tTruncate(int64) error\n\tIsDir() bool\n\tReaddir() ([]os.FileInfo, error)\n\tSize() int64\n\tFileInfo() os.FileInfo\n\t// Create a snapshot of this node and its descendants.\n\tSnapshot() (inode, error)\n\t// Replace this node with a copy of the provided snapshot.\n\t// Caller may provide the same snapshot to multiple Splice\n\t// calls, but must not modify the snapshot concurrently.\n\tSplice(inode) error\n\n\t// Child() performs lookups and updates of named child nodes.\n\t//\n\t// (The term \"child\" here is used strictly. This means name is\n\t// not \".\" or \"..\", and name does not contain \"/\".)\n\t//\n\t// If replace is non-nil, Child calls replace(x) where x is\n\t// the current child inode with the given name. If possible,\n\t// the child inode is replaced with the one returned by\n\t// replace().\n\t//\n\t// If replace(x) returns an inode (besides x or nil) that is\n\t// subsequently returned by Child(), then Child()'s caller\n\t// must ensure the new child's name and parent are set/updated\n\t// to Child()'s name argument and its receiver respectively.\n\t// This is not necessarily done before replace(x) returns, but\n\t// it must be done before Child()'s caller releases the\n\t// parent's lock.\n\t//\n\t// Nil represents \"no child\". replace(nil) signifies that no\n\t// child with this name exists yet. If replace() returns nil,\n\t// the existing child should be deleted if possible.\n\t//\n\t// An implementation of Child() is permitted to ignore\n\t// replace() or its return value. For example, a regular file\n\t// inode does not have children, so Child() always returns\n\t// nil.\n\t//\n\t// Child() returns the child, if any, with the given name: if\n\t// a child was added or changed, the new child is returned.\n\t//\n\t// Caller must have lock (or rlock if replace is nil).\n\tChild(name string, replace func(inode) (inode, error)) (inode, error)\n\n\tsync.Locker\n\tRLock()\n\tRUnlock()\n\tMemorySize() int64\n}\n\ntype fileinfo struct {\n\tname    string\n\tmode    os.FileMode\n\tsize    int64\n\tmodTime time.Time\n\t// If not nil, sys() returns the source data structure, which\n\t// can be a *Collection, *Group, or nil. Currently populated\n\t// only for project dirs and top-level collection dirs. Does\n\t// not stay up to date with upstream changes.\n\t//\n\t// Intended to support keep-web's properties-as-s3-metadata\n\t// feature (https://dev.arvados.org/issues/19088).\n\tsys func() interface{}\n}\n\n// Name implements os.FileInfo.\nfunc (fi fileinfo) Name() string {\n\treturn fi.name\n}\n\n// ModTime implements os.FileInfo.\nfunc (fi fileinfo) ModTime() time.Time {\n\treturn fi.modTime\n}\n\n// Mode implements os.FileInfo.\nfunc (fi fileinfo) Mode() os.FileMode {\n\treturn fi.mode\n}\n\n// IsDir implements os.FileInfo.\nfunc (fi fileinfo) IsDir() bool {\n\treturn fi.mode&os.ModeDir != 0\n}\n\n// Size implements os.FileInfo.\nfunc (fi fileinfo) Size() int64 {\n\treturn fi.size\n}\n\n// Sys implements os.FileInfo. See comment in fileinfo struct.\nfunc (fi fileinfo) Sys() interface{} {\n\tif fi.sys == nil {\n\t\treturn nil\n\t}\n\treturn fi.sys()\n}\n\ntype nullnode struct{}\n\nfunc (*nullnode) Mkdir(string, os.FileMode) error {\n\treturn ErrInvalidOperation\n}\n\nfunc (*nullnode) Read([]byte, filenodePtr) (int, filenodePtr, error) {\n\treturn 0, filenodePtr{}, ErrInvalidOperation\n}\n\nfunc (*nullnode) Write([]byte, filenodePtr) (int, filenodePtr, error) {\n\treturn 0, filenodePtr{}, ErrInvalidOperation\n}\n\nfunc (*nullnode) Truncate(int64) error {\n\treturn ErrInvalidOperation\n}\n\nfunc (*nullnode) FileInfo() os.FileInfo {\n\treturn fileinfo{}\n}\n\nfunc (*nullnode) IsDir() bool {\n\treturn false\n}\n\nfunc (*nullnode) Readdir() ([]os.FileInfo, error) {\n\treturn nil, ErrInvalidOperation\n}\n\nfunc (*nullnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {\n\treturn nil, ErrNotADirectory\n}\n\nfunc (*nullnode) MemorySize() int64 {\n\t// Types that embed nullnode should report their own size, but\n\t// if they don't, we at least report a non-zero size to ensure\n\t// a large tree doesn't get reported as 0 bytes.\n\treturn 64\n}\n\nfunc (*nullnode) Snapshot() (inode, error) {\n\treturn nil, ErrInvalidOperation\n}\n\nfunc (*nullnode) Splice(inode) error {\n\treturn ErrInvalidOperation\n}\n\ntype treenode struct {\n\tfs       FileSystem\n\tparent   inode\n\tinodes   map[string]inode\n\tfileinfo fileinfo\n\tsync.RWMutex\n\tnullnode\n}\n\nfunc (n *treenode) FS() FileSystem {\n\treturn n.fs\n}\n\nfunc (n *treenode) SetParent(p inode, name string) {\n\tn.Lock()\n\tdefer n.Unlock()\n\tn.parent = p\n\tn.fileinfo.name = name\n}\n\nfunc (n *treenode) Parent() inode {\n\tn.RLock()\n\tdefer n.RUnlock()\n\treturn n.parent\n}\n\nfunc (n *treenode) IsDir() bool {\n\treturn true\n}\n\nfunc (n *treenode) Child(name string, replace func(inode) (inode, error)) (child inode, err error) {\n\tdebugPanicIfNotLocked(n, false)\n\tchild = n.inodes[name]\n\tif name == \"\" || name == \".\" || name == \"..\" {\n\t\terr = ErrInvalidArgument\n\t\treturn\n\t}\n\tif replace == nil {\n\t\treturn\n\t}\n\tnewchild, err := replace(child)\n\tif err != nil {\n\t\treturn\n\t}\n\tif newchild == nil {\n\t\tdebugPanicIfNotLocked(n, true)\n\t\tdelete(n.inodes, name)\n\t} else if newchild != child {\n\t\tdebugPanicIfNotLocked(n, true)\n\t\tn.inodes[name] = newchild\n\t\tn.fileinfo.modTime = time.Now()\n\t\tchild = newchild\n\t}\n\treturn\n}\n\nfunc (n *treenode) Size() int64 {\n\treturn n.FileInfo().Size()\n}\n\nfunc (n *treenode) FileInfo() os.FileInfo {\n\tn.RLock()\n\tdefer n.RUnlock()\n\tfi := n.fileinfo\n\tfi.size = int64(len(n.inodes))\n\treturn fi\n}\n\nfunc (n *treenode) Readdir() (fi []os.FileInfo, err error) {\n\t// We need RLock to safely read n.inodes, but we must release\n\t// it before calling FileInfo() on the child nodes. Otherwise,\n\t// we risk deadlock when filter groups A and B match each\n\t// other, concurrent Readdir() calls try to RLock them in\n\t// opposite orders, and one cannot be RLocked a second time\n\t// because a third caller is waiting for a write lock.\n\tn.RLock()\n\tinodes := make([]inode, 0, len(n.inodes))\n\tfor _, inode := range n.inodes {\n\t\tinodes = append(inodes, inode)\n\t}\n\tn.RUnlock()\n\tfi = make([]os.FileInfo, 0, len(inodes))\n\tfor _, inode := range inodes {\n\t\tfi = append(fi, inode.FileInfo())\n\t}\n\treturn\n}\n\nfunc (n *treenode) Sync() error {\n\tn.RLock()\n\tdefer n.RUnlock()\n\tfor _, inode := range n.inodes {\n\t\tsyncer, ok := inode.(syncer)\n\t\tif !ok {\n\t\t\treturn ErrInvalidOperation\n\t\t}\n\t\terr := syncer.Sync()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (n *treenode) MemorySize() (size int64) {\n\t// To avoid making other callers wait while we count the\n\t// entire filesystem size, we lock the node only long enough\n\t// to copy the list of children. We accept that the resulting\n\t// size will sometimes be misleading (e.g., we will\n\t// double-count an item that moves from A to B after we check\n\t// A's size but before we check B's size).\n\tn.RLock()\n\tdebugPanicIfNotLocked(n, false)\n\ttodo := make([]inode, 0, len(n.inodes))\n\tfor _, inode := range n.inodes {\n\t\ttodo = append(todo, inode)\n\t}\n\tn.RUnlock()\n\tfor _, inode := range todo {\n\t\tsize += inode.MemorySize()\n\t}\n\treturn 64 + size\n}\n\ntype fileSystem struct {\n\troot inode\n\tfsBackend\n\tmutex sync.Mutex\n\tthr   *throttle\n}\n\nfunc (fs *fileSystem) rootnode() inode {\n\treturn fs.root\n}\n\nfunc (fs *fileSystem) throttle() *throttle {\n\treturn fs.thr\n}\n\nfunc (fs *fileSystem) locker() sync.Locker {\n\treturn &fs.mutex\n}\n\n// OpenFile is analogous to os.OpenFile().\nfunc (fs *fileSystem) OpenFile(name string, flag int, perm os.FileMode) (File, error) {\n\treturn fs.openFile(name, flag, perm)\n}\n\nfunc (fs *fileSystem) openFile(name string, flag int, perm os.FileMode) (*filehandle, error) {\n\tif flag&os.O_SYNC != 0 {\n\t\treturn nil, ErrSyncNotSupported\n\t}\n\tdirname, name := path.Split(name)\n\tancestors := map[inode]bool{}\n\tparent, err := rlookup(fs.root, dirname, ancestors)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar readable, writable bool\n\tswitch flag & (os.O_RDWR | os.O_RDONLY | os.O_WRONLY) {\n\tcase os.O_RDWR:\n\t\treadable = true\n\t\twritable = true\n\tcase os.O_RDONLY:\n\t\treadable = true\n\tcase os.O_WRONLY:\n\t\twritable = true\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid flags 0x%x\", flag)\n\t}\n\tif parent.IsDir() {\n\t\t// A directory can be opened via \"foo/\", \"foo/.\", or\n\t\t// \"foo/..\".\n\t\tswitch name {\n\t\tcase \".\", \"\":\n\t\t\treturn &filehandle{inode: parent, readable: readable, writable: writable}, nil\n\t\tcase \"..\":\n\t\t\treturn &filehandle{inode: parent.Parent(), readable: readable, writable: writable}, nil\n\t\t}\n\t}\n\tcreateMode := flag&os.O_CREATE != 0\n\t// We always need to take Lock() here, not just RLock(). Even\n\t// if we know we won't be creating a file, parent might be a\n\t// lookupnode, which sometimes populates its inodes map during\n\t// a Child() call.\n\tparent.Lock()\n\tdefer parent.Unlock()\n\tn, err := parent.Child(name, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if n == nil {\n\t\tif !createMode {\n\t\t\treturn nil, os.ErrNotExist\n\t\t}\n\t\tn, err = parent.Child(name, func(inode) (repl inode, err error) {\n\t\t\trepl, err = parent.FS().newNode(name, perm|0755, time.Now())\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\trepl.SetParent(parent, name)\n\t\t\treturn\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if n == nil {\n\t\t\t// Parent rejected new child, but returned no error\n\t\t\treturn nil, ErrInvalidArgument\n\t\t}\n\t} else if flag&os.O_EXCL != 0 {\n\t\treturn nil, os.ErrExist\n\t} else if flag&os.O_TRUNC != 0 {\n\t\tif !writable {\n\t\t\treturn nil, fmt.Errorf(\"invalid flag O_TRUNC in read-only mode\")\n\t\t} else if n.IsDir() {\n\t\t\treturn nil, fmt.Errorf(\"invalid flag O_TRUNC when opening directory\")\n\t\t} else if err := n.Truncate(0); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\t// If n and one of its parents/ancestors are [hardlinks to]\n\t// the same node (e.g., a filter group that matches itself),\n\t// open an \"empty directory\" node instead, so the inner\n\t// hardlink appears empty. This is needed to ensure\n\t// Open(\"a/b/c/x/x\").Readdir() appears empty, matching the\n\t// behavior of rlookup(\"a/b/c/x/x/z\") => ErrNotExist.\n\tif hl, ok := n.(*hardlink); (ok && ancestors[hl.inode]) || ancestors[n] {\n\t\tn = &treenode{\n\t\t\tfs:     n.FS(),\n\t\t\tparent: parent,\n\t\t\tinodes: nil,\n\t\t\tfileinfo: fileinfo{\n\t\t\t\tname:    name,\n\t\t\t\tmodTime: time.Now(),\n\t\t\t\tmode:    0555 | os.ModeDir,\n\t\t\t},\n\t\t}\n\t}\n\treturn &filehandle{\n\t\tinode:    n,\n\t\tappend:   flag&os.O_APPEND != 0,\n\t\treadable: readable,\n\t\twritable: writable,\n\t}, nil\n}\n\nfunc (fs *fileSystem) Open(name string) (http.File, error) {\n\treturn fs.OpenFile(name, os.O_RDONLY, 0)\n}\n\nfunc (fs *fileSystem) Create(name string) (File, error) {\n\treturn fs.OpenFile(name, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0)\n}\n\nfunc (fs *fileSystem) Mkdir(name string, perm os.FileMode) error {\n\tdirname, name := path.Split(name)\n\tn, err := rlookup(fs.root, dirname, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn.Lock()\n\tdefer n.Unlock()\n\tif child, err := n.Child(name, nil); err != nil {\n\t\treturn err\n\t} else if child != nil {\n\t\treturn os.ErrExist\n\t}\n\n\t_, err = n.Child(name, func(inode) (repl inode, err error) {\n\t\trepl, err = n.FS().newNode(name, perm|os.ModeDir, time.Now())\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\trepl.SetParent(n, name)\n\t\treturn\n\t})\n\treturn err\n}\n\nfunc (fs *fileSystem) Stat(name string) (os.FileInfo, error) {\n\tnode, err := rlookup(fs.root, name, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn node.FileInfo(), nil\n}\n\nfunc (fs *fileSystem) Rename(oldname, newname string) error {\n\tolddir, oldname := path.Split(oldname)\n\tif oldname == \"\" || oldname == \".\" || oldname == \"..\" {\n\t\treturn ErrInvalidArgument\n\t}\n\tolddirf, err := fs.openFile(olddir+\".\", os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%q: %s\", olddir, err)\n\t}\n\tdefer olddirf.Close()\n\n\tnewdir, newname := path.Split(newname)\n\tif newname == \".\" || newname == \"..\" {\n\t\treturn ErrInvalidArgument\n\t} else if newname == \"\" {\n\t\t// Rename(\"a/b\", \"c/\") means Rename(\"a/b\", \"c/b\")\n\t\tnewname = oldname\n\t}\n\tnewdirf, err := fs.openFile(newdir+\".\", os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%q: %s\", newdir, err)\n\t}\n\tdefer newdirf.Close()\n\n\t// TODO: If the nearest common ancestor (\"nca\") of olddirf and\n\t// newdirf is on a different filesystem than fs, we should\n\t// call nca.FS().Rename() instead of proceeding. Until then\n\t// it's awkward for filesystems to implement their own Rename\n\t// methods effectively: the only one that runs is the one on\n\t// the root FileSystem exposed to the caller (webdav, fuse,\n\t// etc).\n\n\t// When acquiring locks on multiple inodes, avoid deadlock by\n\t// locking the entire containing filesystem first.\n\tcfs := olddirf.inode.FS()\n\tcfs.locker().Lock()\n\tdefer cfs.locker().Unlock()\n\n\tif cfs != newdirf.inode.FS() {\n\t\t// Moving inodes across filesystems is not (yet)\n\t\t// supported. Locking inodes from different\n\t\t// filesystems could deadlock, so we must error out\n\t\t// now.\n\t\treturn ErrInvalidOperation\n\t}\n\n\t// To ensure we can test reliably whether we're about to move\n\t// a directory into itself, lock all potential common\n\t// ancestors of olddir and newdir.\n\tneedLock := []sync.Locker{}\n\tfor _, node := range []inode{olddirf.inode, newdirf.inode} {\n\t\tneedLock = append(needLock, node)\n\t\tfor node.Parent() != node && node.Parent().FS() == node.FS() {\n\t\t\tnode = node.Parent()\n\t\t\tneedLock = append(needLock, node)\n\t\t}\n\t}\n\tlocked := map[sync.Locker]bool{}\n\tfor i := len(needLock) - 1; i >= 0; i-- {\n\t\tn := needLock[i]\n\t\tif fs, ok := n.(interface{ rootnode() inode }); ok {\n\t\t\t// Lock the fs's root dir directly, not\n\t\t\t// through the fs. Otherwise our \"locked\" map\n\t\t\t// would not reliably prevent double-locking\n\t\t\t// the fs's root dir.\n\t\t\tn = fs.rootnode()\n\t\t}\n\t\tif !locked[n] {\n\t\t\tn.Lock()\n\t\t\tdefer n.Unlock()\n\t\t\tlocked[n] = true\n\t\t}\n\t}\n\n\t_, err = olddirf.inode.Child(oldname, func(oldinode inode) (inode, error) {\n\t\tif oldinode == nil {\n\t\t\treturn oldinode, os.ErrNotExist\n\t\t}\n\t\tif locked[oldinode] {\n\t\t\t// oldinode cannot become a descendant of itself.\n\t\t\treturn oldinode, ErrInvalidArgument\n\t\t}\n\t\tif oldinode.FS() != cfs && newdirf.inode != olddirf.inode {\n\t\t\t// moving a mount point to a different parent\n\t\t\t// is not (yet) supported.\n\t\t\treturn oldinode, ErrInvalidArgument\n\t\t}\n\t\taccepted, err := newdirf.inode.Child(newname, func(existing inode) (inode, error) {\n\t\t\tif existing != nil && existing.IsDir() {\n\t\t\t\treturn existing, ErrIsDirectory\n\t\t\t}\n\t\t\treturn oldinode, nil\n\t\t})\n\t\tif err != nil {\n\t\t\t// Leave oldinode in olddir.\n\t\t\treturn oldinode, err\n\t\t}\n\t\taccepted.SetParent(newdirf.inode, newname)\n\t\treturn nil, nil\n\t})\n\treturn err\n}\n\nfunc (fs *fileSystem) Remove(name string) error {\n\treturn fs.remove(strings.TrimRight(name, \"/\"), false)\n}\n\nfunc (fs *fileSystem) RemoveAll(name string) error {\n\terr := fs.remove(strings.TrimRight(name, \"/\"), true)\n\tif os.IsNotExist(err) {\n\t\t// \"If the path does not exist, RemoveAll returns\n\t\t// nil.\" (see \"os\" pkg)\n\t\terr = nil\n\t}\n\treturn err\n}\n\nfunc (fs *fileSystem) remove(name string, recursive bool) error {\n\tdirname, name := path.Split(name)\n\tif name == \"\" || name == \".\" || name == \"..\" {\n\t\treturn ErrInvalidArgument\n\t}\n\tdir, err := rlookup(fs.root, dirname, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdir.Lock()\n\tdefer dir.Unlock()\n\t_, err = dir.Child(name, func(node inode) (inode, error) {\n\t\tif node == nil {\n\t\t\treturn nil, os.ErrNotExist\n\t\t}\n\t\tif !recursive && node.IsDir() && node.Size() > 0 {\n\t\t\treturn node, ErrDirectoryNotEmpty\n\t\t}\n\t\treturn nil, nil\n\t})\n\treturn err\n}\n\nfunc (fs *fileSystem) Sync() error {\n\tif syncer, ok := fs.root.(syncer); ok {\n\t\treturn syncer.Sync()\n\t}\n\treturn ErrInvalidOperation\n}\n\nfunc (fs *fileSystem) Flush(string, bool) error {\n\tlog.Printf(\"TODO: flush fileSystem\")\n\treturn ErrInvalidOperation\n}\n\nfunc (fs *fileSystem) MemorySize() int64 {\n\treturn fs.root.MemorySize()\n}\n\n// rlookup (recursive lookup) returns the inode for the file/directory\n// with the given name (which may contain \"/\" separators). If no such\n// file/directory exists, the returned node is nil.\n//\n// The visited map should be either nil or empty. If non-nil, all\n// nodes and hardlink targets visited by the given path will be added\n// to it.\n//\n// If a cycle is detected, the second occurrence of the offending node\n// will be replaced by an empty directory. For example, if \"x\" is a\n// filter group that matches itself, then rlookup(\"a/b/c/x\") will\n// return the filter group, and rlookup(\"a/b/c/x/x\") will return an\n// empty directory.\nfunc rlookup(start inode, path string, visited map[inode]bool) (node inode, err error) {\n\tif visited == nil {\n\t\tvisited = map[inode]bool{}\n\t}\n\tnode = start\n\t// Clean up ./ and ../ and double-slashes, but (unlike\n\t// filepath.Clean) retain a trailing slash, because looking up\n\t// \".../regularfile/\" should fail.\n\ttrailingSlash := strings.HasSuffix(path, \"/\")\n\tpath = filepath.Clean(path)\n\tif trailingSlash && path != \"/\" {\n\t\tpath += \"/\"\n\t}\n\tfor _, name := range strings.Split(path, \"/\") {\n\t\tvisited[node] = true\n\t\tif node.IsDir() {\n\t\t\tif name == \".\" || name == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif name == \"..\" {\n\t\t\t\tnode = node.Parent()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tnode, err = func() (inode, error) {\n\t\t\tnode.Lock()\n\t\t\tdefer node.Unlock()\n\t\t\treturn node.Child(name, nil)\n\t\t}()\n\t\tif node == nil || err != nil {\n\t\t\tbreak\n\t\t}\n\t\tchecknode := node\n\t\tif hardlinked, ok := checknode.(*hardlink); ok {\n\t\t\tchecknode = hardlinked.inode\n\t\t}\n\t\tif visited[checknode] {\n\t\t\tnode = &treenode{\n\t\t\t\tfs:     node.FS(),\n\t\t\t\tparent: node.Parent(),\n\t\t\t\tinodes: nil,\n\t\t\t\tfileinfo: fileinfo{\n\t\t\t\t\tname:    name,\n\t\t\t\t\tmodTime: time.Now(),\n\t\t\t\t\tmode:    0555 | os.ModeDir,\n\t\t\t\t},\n\t\t\t}\n\t\t} else {\n\t\t\tvisited[checknode] = true\n\t\t}\n\t}\n\tif node == nil && err == nil {\n\t\terr = os.ErrNotExist\n\t}\n\treturn\n}\n\nfunc permittedName(name string) bool {\n\treturn name != \"\" && name != \".\" && name != \"..\" && !strings.Contains(name, \"/\")\n}\n\n// Snapshot returns a Subtree that's a copy of the given path. It\n// returns an error if the path is not inside a collection.\nfunc Snapshot(fs FileSystem, path string) (*Subtree, error) {\n\tf, err := fs.OpenFile(path, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn f.Snapshot()\n}\n\n// Splice inserts newsubtree at the indicated target path.\n//\n// Splice returns an error if target is not inside a collection.\n//\n// Splice returns an error if target is the root of a collection and\n// newsubtree is a snapshot of a file.\nfunc Splice(fs FileSystem, target string, newsubtree *Subtree) error {\n\tf, err := fs.OpenFile(target, os.O_WRONLY, 0)\n\tif os.IsNotExist(err) {\n\t\tf, err = fs.OpenFile(target, os.O_CREATE|os.O_WRONLY, 0700)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"open %s: %w\", target, err)\n\t}\n\tdefer f.Close()\n\treturn f.Splice(newsubtree)\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_collection.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"slices\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\nvar (\n\tmaxBlockSize      = 1 << 26\n\tconcurrentWriters = 4 // max goroutines writing to Keep in background and during flush()\n)\n\n// A CollectionFileSystem is a FileSystem that can be serialized as a\n// manifest and stored as a collection.\ntype CollectionFileSystem interface {\n\tFileSystem\n\n\t// Flush all file data to Keep and return a snapshot of the\n\t// filesystem suitable for saving as (Collection)ManifestText.\n\t// Prefix (normally \".\") is a top level directory, effectively\n\t// prepended to all paths in the returned manifest.\n\tMarshalManifest(prefix string) (string, error)\n\n\t// Given map {x->y}, replace each occurrence of x with y.\n\t// Except: If segment x is not referenced anywhere in the\n\t// collection, do not make any replacements that reference the\n\t// same locator as y. The first return value is true if any\n\t// substitutions were made.\n\tReplaceSegments(map[BlockSegment]BlockSegment) (bool, error)\n\n\t// If needed, combine small stored blocks into larger blocks\n\t// and update the in-memory representation to reference the\n\t// larger blocks. Returns the number of (small) blocks that\n\t// were replaced.\n\t//\n\t// After repacking, Sync() will persist the repacking results\n\t// and load the server's latest version of the collection,\n\t// reverting any other local changes.  To repack without\n\t// abandoning local changes, call Sync, then Repack, then Sync\n\t// again.\n\tRepack(context.Context, RepackOptions) (int, error)\n\n\t// Total data bytes in all files.\n\tSize() int64\n}\n\ntype collectionFileSystem struct {\n\tfileSystem\n\tuuid           string\n\treplicas       int\n\tstorageClasses []string\n\n\t// PDH returned by the server as of last sync/load.\n\tloadedPDH atomic.Value\n\t// Modification time of the most recent version retrieved from\n\t// the server, if any. See checkChangesOnServer.\n\tloadedModtime    time.Time\n\tloadedModtimeMtx sync.Mutex\n\t// PDH of the locally generated manifest as of last\n\t// sync/load. This can differ from loadedPDH after loading a\n\t// version that was generated with different code and sorts\n\t// filenames differently than we do, for example.\n\tsavedPDH atomic.Value\n\n\t// guessSignatureTTL tracks a lower bound for the server's\n\t// configured BlobSigningTTL. The guess is initially zero, and\n\t// increases when we come across a signature with an expiry\n\t// time further in the future than the previous guess.\n\t//\n\t// When the guessed TTL is much smaller than the real TTL,\n\t// preemptive signature refresh is delayed or missed entirely,\n\t// which is OK.\n\tguessSignatureTTL time.Duration\n\tholdCheckChanges  time.Time\n\tlockCheckChanges  sync.Mutex\n\n\t// Pending updates to send via replace_segments.  See\n\t// repackTree().\n\trepacked    map[BlockSegment]BlockSegment\n\trepackedMtx sync.Mutex\n}\n\n// FileSystem returns a CollectionFileSystem for the collection.\nfunc (c *Collection) FileSystem(client apiClient, kc keepClient) (CollectionFileSystem, error) {\n\tmodTime := c.ModifiedAt\n\tif modTime.IsZero() {\n\t\tmodTime = time.Now()\n\t}\n\tfs := &collectionFileSystem{\n\t\tuuid:           c.UUID,\n\t\tstorageClasses: c.StorageClassesDesired,\n\t\tfileSystem: fileSystem{\n\t\t\tfsBackend: keepBackend{apiClient: client, keepClient: kc},\n\t\t\tthr:       newThrottle(concurrentWriters),\n\t\t},\n\t}\n\tfs.loadedPDH.Store(c.PortableDataHash)\n\tif r := c.ReplicationDesired; r != nil {\n\t\tfs.replicas = *r\n\t}\n\troot := &dirnode{\n\t\tfs: fs,\n\t\ttreenode: treenode{\n\t\t\tfileinfo: fileinfo{\n\t\t\t\tname:    \".\",\n\t\t\t\tmode:    os.ModeDir | 0755,\n\t\t\t\tmodTime: modTime,\n\t\t\t\tsys: func() interface{} {\n\t\t\t\t\treturn &Collection{\n\t\t\t\t\t\tUUID:             fs.uuid,\n\t\t\t\t\t\tPortableDataHash: fs.loadedPDH.Load().(string),\n\t\t\t\t\t\tProperties:       c.Properties,\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\tinodes: make(map[string]inode),\n\t\t},\n\t}\n\troot.SetParent(root, \".\")\n\tif err := root.loadManifest(c.ManifestText); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// For detecting local changes during Sync, we need to compute\n\t// the PDH of our own encoding, which might differ from the\n\t// encoding received from server (e.g., ordering of files).\n\ttxt, err := root.marshalManifest(context.Background(), \".\", false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfs.savedPDH.Store(PortableDataHash(txt))\n\n\tbackdateTree(root, modTime)\n\tfs.root = root\n\treturn fs, nil\n}\n\n// caller must have lock (or guarantee no concurrent accesses somehow)\nfunc eachNode(n inode, ffunc func(*filenode), dfunc func(*dirnode)) {\n\tswitch n := n.(type) {\n\tcase *filenode:\n\t\tif ffunc != nil {\n\t\t\tffunc(n)\n\t\t}\n\tcase *dirnode:\n\t\tif dfunc != nil {\n\t\t\tdfunc(n)\n\t\t}\n\t\tfor _, n := range n.inodes {\n\t\t\teachNode(n, ffunc, dfunc)\n\t\t}\n\t}\n}\n\n// caller must have lock (or guarantee no concurrent accesses somehow)\nfunc backdateTree(n inode, modTime time.Time) {\n\teachNode(n, func(fn *filenode) {\n\t\tfn.fileinfo.modTime = modTime\n\t}, func(dn *dirnode) {\n\t\tdn.fileinfo.modTime = modTime\n\t})\n}\n\n// Approximate portion of signature TTL remaining, usually between 0\n// and 1, or negative if some signatures have expired.\nfunc (fs *collectionFileSystem) signatureTimeLeft() (float64, time.Duration) {\n\tvar (\n\t\tnow      = time.Now()\n\t\tearliest = now.Add(time.Hour * 24 * 7 * 365)\n\t\tlatest   time.Time\n\t)\n\tfs.fileSystem.root.RLock()\n\teachNode(fs.root, func(fn *filenode) {\n\t\tfn.Lock()\n\t\tdefer fn.Unlock()\n\t\tfor _, seg := range fn.segments {\n\t\t\tseg, ok := seg.(storedSegment)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\texpiryTime, err := signatureExpiryTime(seg.locator)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif expiryTime.Before(earliest) {\n\t\t\t\tearliest = expiryTime\n\t\t\t}\n\t\t\tif expiryTime.After(latest) {\n\t\t\t\tlatest = expiryTime\n\t\t\t}\n\t\t}\n\t}, nil)\n\tfs.fileSystem.root.RUnlock()\n\n\tif latest.IsZero() {\n\t\t// No signatures == 100% of TTL remaining.\n\t\treturn 1, 1\n\t}\n\n\tttl := latest.Sub(now)\n\tfs.fileSystem.root.Lock()\n\t{\n\t\tif ttl > fs.guessSignatureTTL {\n\t\t\t// ttl is closer to the real TTL than\n\t\t\t// guessSignatureTTL.\n\t\t\tfs.guessSignatureTTL = ttl\n\t\t} else {\n\t\t\t// Use the previous best guess to compute the\n\t\t\t// portion remaining (below, after unlocking\n\t\t\t// mutex).\n\t\t\tttl = fs.guessSignatureTTL\n\t\t}\n\t}\n\tfs.fileSystem.root.Unlock()\n\n\treturn earliest.Sub(now).Seconds() / ttl.Seconds(), ttl\n}\n\nfunc (fs *collectionFileSystem) updateSignatures(newmanifest string) {\n\tnewLoc := map[string]string{}\n\tfor _, tok := range regexp.MustCompile(`\\S+`).FindAllString(newmanifest, -1) {\n\t\tif mBlkRe.MatchString(tok) {\n\t\t\tnewLoc[stripAllHints(tok)] = tok\n\t\t}\n\t}\n\tfs.fileSystem.root.Lock()\n\tdefer fs.fileSystem.root.Unlock()\n\teachNode(fs.root, func(fn *filenode) {\n\t\tfn.Lock()\n\t\tdefer fn.Unlock()\n\t\tfor idx, seg := range fn.segments {\n\t\t\tseg, ok := seg.(storedSegment)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tloc, ok := newLoc[stripAllHints(seg.locator)]\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tseg.locator = loc\n\t\t\tfn.segments[idx] = seg\n\t\t}\n\t}, nil)\n}\n\nfunc (fs *collectionFileSystem) newNode(name string, perm os.FileMode, modTime time.Time) (node inode, err error) {\n\tif name == \"\" || name == \".\" || name == \"..\" {\n\t\treturn nil, ErrInvalidArgument\n\t}\n\tif perm.IsDir() {\n\t\treturn &dirnode{\n\t\t\tfs: fs,\n\t\t\ttreenode: treenode{\n\t\t\t\tfileinfo: fileinfo{\n\t\t\t\t\tname:    name,\n\t\t\t\t\tmode:    perm | os.ModeDir,\n\t\t\t\t\tmodTime: modTime,\n\t\t\t\t},\n\t\t\t\tinodes: make(map[string]inode),\n\t\t\t},\n\t\t}, nil\n\t}\n\treturn &filenode{\n\t\tfs: fs,\n\t\tfileinfo: fileinfo{\n\t\t\tname:    name,\n\t\t\tmode:    perm & ^os.ModeDir,\n\t\t\tmodTime: modTime,\n\t\t},\n\t}, nil\n}\n\nfunc (fs *collectionFileSystem) Child(name string, replace func(inode) (inode, error)) (inode, error) {\n\treturn fs.rootnode().Child(name, replace)\n}\n\nfunc (fs *collectionFileSystem) FS() FileSystem {\n\treturn fs\n}\n\nfunc (fs *collectionFileSystem) FileInfo() os.FileInfo {\n\treturn fs.rootnode().FileInfo()\n}\n\nfunc (fs *collectionFileSystem) IsDir() bool {\n\treturn true\n}\n\nfunc (fs *collectionFileSystem) Lock() {\n\tfs.rootnode().Lock()\n}\n\nfunc (fs *collectionFileSystem) Unlock() {\n\tfs.rootnode().Unlock()\n}\n\nfunc (fs *collectionFileSystem) RLock() {\n\tfs.rootnode().RLock()\n}\n\nfunc (fs *collectionFileSystem) RUnlock() {\n\tfs.rootnode().RUnlock()\n}\n\nfunc (fs *collectionFileSystem) Parent() inode {\n\treturn fs.rootnode().Parent()\n}\n\nfunc (fs *collectionFileSystem) Read(_ []byte, ptr filenodePtr) (int, filenodePtr, error) {\n\treturn 0, ptr, ErrInvalidOperation\n}\n\nfunc (fs *collectionFileSystem) Write(_ []byte, ptr filenodePtr) (int, filenodePtr, error) {\n\treturn 0, ptr, ErrInvalidOperation\n}\n\nfunc (fs *collectionFileSystem) Readdir() ([]os.FileInfo, error) {\n\treturn fs.rootnode().Readdir()\n}\n\nfunc (fs *collectionFileSystem) SetParent(parent inode, name string) {\n\tfs.rootnode().SetParent(parent, name)\n}\n\nfunc (fs *collectionFileSystem) Truncate(int64) error {\n\treturn ErrInvalidOperation\n}\n\n// Check for and incorporate upstream changes. If force==false, this\n// is a no-op except once every ttl/100 or so.\n//\n// Return value is true if new content was loaded from upstream and\n// any unsaved local changes have been discarded.\nfunc (fs *collectionFileSystem) checkChangesOnServer(force bool) (bool, error) {\n\tif fs.uuid == \"\" && fs.loadedPDH.Load() == \"\" {\n\t\treturn false, nil\n\t}\n\n\tfs.lockCheckChanges.Lock()\n\tif !force && fs.holdCheckChanges.After(time.Now()) {\n\t\tfs.lockCheckChanges.Unlock()\n\t\treturn false, nil\n\t}\n\tremain, ttl := fs.signatureTimeLeft()\n\tif remain > 0.01 {\n\t\tfs.holdCheckChanges = time.Now().Add(ttl / 100)\n\t}\n\tfs.lockCheckChanges.Unlock()\n\n\tif !force && remain >= 0.5 {\n\t\t// plenty of time left on current signatures\n\t\treturn false, nil\n\t}\n\n\tloadedPDH, _ := fs.loadedPDH.Load().(string)\n\tif fs.uuid != \"\" {\n\t\tvar coll Collection\n\t\terr := fs.RequestAndDecode(&coll, \"GET\", \"arvados/v1/collections/\"+fs.uuid, nil, map[string]interface{}{\"select\": []string{\"portable_data_hash\", \"manifest_text\", \"modified_at\"}})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif coll.PortableDataHash != loadedPDH && coll.PortableDataHash != fs.loadedPDH.Load().(string) {\n\t\t\t// collection has changed upstream since we\n\t\t\t// last loaded or saved (including other\n\t\t\t// goroutines racing with us).\n\t\t\t//\n\t\t\t// Refresh local data, losing any unsaved\n\t\t\t// local changes.\n\t\t\tfs.loadedModtimeMtx.Lock()\n\t\t\tdefer fs.loadedModtimeMtx.Unlock()\n\t\t\tif fs.loadedModtime.After(coll.ModifiedAt) {\n\t\t\t\t// Another goroutine called\n\t\t\t\t// checkChangesOnServer concurrently,\n\t\t\t\t// and already updated the collection\n\t\t\t\t// to a newer version than this one.\n\t\t\t\t// Leave their update in place.\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\t\tnewfs, err := coll.FileSystem(fs.fileSystem.fsBackend, fs.fileSystem.fsBackend)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tsnap, err := Snapshot(newfs, \"/\")\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\terr = Splice(fs, \"/\", snap)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tfs.loadedPDH.Store(coll.PortableDataHash)\n\t\t\tfs.savedPDH.Store(newfs.(*collectionFileSystem).savedPDH.Load())\n\t\t\tfs.loadedModtime = coll.ModifiedAt\n\t\t\treturn true, nil\n\t\t}\n\t\tfs.updateSignatures(coll.ManifestText)\n\t\treturn false, nil\n\t}\n\tif loadedPDH != \"\" {\n\t\tvar coll Collection\n\t\terr := fs.RequestAndDecode(&coll, \"GET\", \"arvados/v1/collections/\"+loadedPDH, nil, map[string]interface{}{\"select\": []string{\"portable_data_hash\", \"manifest_text\"}})\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tfs.updateSignatures(coll.ManifestText)\n\t}\n\treturn false, nil\n}\n\n// Refresh signature on a single locator, if necessary. Assume caller\n// has lock. If an update is needed, and there are any storedSegments\n// whose signatures can be updated, start a background task to update\n// them asynchronously when the caller releases locks.\nfunc (fs *collectionFileSystem) refreshSignature(locator string) string {\n\texp, err := signatureExpiryTime(locator)\n\tif err != nil || exp.Sub(time.Now()) > time.Minute {\n\t\t// Synchronous update is not needed. Start an\n\t\t// asynchronous update if needed.\n\t\tgo fs.checkChangesOnServer(false)\n\t\treturn locator\n\t}\n\tloadedPDH, _ := fs.loadedPDH.Load().(string)\n\tvar manifests string\n\tfor _, id := range []string{fs.uuid, loadedPDH} {\n\t\tif id == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tvar coll Collection\n\t\terr := fs.RequestAndDecode(&coll, \"GET\", \"arvados/v1/collections/\"+id, nil, map[string]interface{}{\"select\": []string{\"portable_data_hash\", \"manifest_text\"}})\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tmanifests += coll.ManifestText\n\t}\n\thash := stripAllHints(locator)\n\tfor _, tok := range regexp.MustCompile(`\\S+`).FindAllString(manifests, -1) {\n\t\tif mBlkRe.MatchString(tok) {\n\t\t\tif stripAllHints(tok) == hash {\n\t\t\t\tlocator = tok\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tgo fs.updateSignatures(manifests)\n\treturn locator\n}\n\nfunc (fs *collectionFileSystem) Sync() error {\n\tfs.repackedMtx.Lock()\n\tif len(fs.repacked) > 0 {\n\t\terr := fs.RequestAndDecode(nil, \"PATCH\", \"arvados/v1/collections/\"+fs.uuid, nil, map[string]interface{}{\n\t\t\t\"select\":           []string{\"portable_data_hash\"},\n\t\t\t\"replace_segments\": fs.repacked,\n\t\t})\n\t\tif err != nil {\n\t\t\tfs.repackedMtx.Unlock()\n\t\t\treturn fmt.Errorf(\"sync failed: replace_segments %s: %w\", fs.uuid, err)\n\t\t}\n\t\tfs.repacked = nil\n\t}\n\tfs.repackedMtx.Unlock()\n\n\trefreshed, err := fs.checkChangesOnServer(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif refreshed || fs.uuid == \"\" {\n\t\treturn nil\n\t}\n\tvar savingPDH, savingManifest string\n\tfor attempts := 0; ; attempts++ {\n\t\tsavedPDH := fs.savedPDH.Load()\n\t\tsavingManifest, err = fs.MarshalManifest(\".\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"sync failed: %s\", err)\n\t\t}\n\t\tsavingPDH = PortableDataHash(savingManifest)\n\t\tif savingPDH == savedPDH {\n\t\t\t// No local changes since last save or initial\n\t\t\t// load.\n\t\t\treturn nil\n\t\t}\n\t\tif savedPDH != fs.savedPDH.Load() {\n\t\t\t// Another goroutine saved or loaded changes\n\t\t\t// while we were doing MarshalManifest above.\n\t\t\t//\n\t\t\t// In the case where the other goroutine saved\n\t\t\t// local changes, it might have called\n\t\t\t// MarshalManifest before this Sync started,\n\t\t\t// and we might have newer changes that need\n\t\t\t// to be saved.  Retry this loop until we\n\t\t\t// complete it without losing the race to\n\t\t\t// another Sync.\n\t\t\t//\n\t\t\t// In the case where the other goroutine\n\t\t\t// loaded remote changes, it will have\n\t\t\t// clobbered the local changes we detected\n\t\t\t// above, and the next iteration of this loop\n\t\t\t// will find no changes, and return nil.\n\t\t\t//\n\t\t\t// In the worst case, the number of loop\n\t\t\t// iterations is bounded by the number of\n\t\t\t// concurrent Sync calls from other goroutines\n\t\t\t// that result in a change to savedPDH.  In\n\t\t\t// practice, it is rare to see even two\n\t\t\t// iterations.\n\t\t\tif attempts >= 1000 {\n\t\t\t\treturn fmt.Errorf(\"sync failed: bug: race unresolved after %d attempts\", attempts)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tcoll := Collection{\n\t\tUUID:         fs.uuid,\n\t\tManifestText: savingManifest,\n\t}\n\n\tselectFields := []string{\"uuid\", \"portable_data_hash\"}\n\tfs.lockCheckChanges.Lock()\n\tremain, _ := fs.signatureTimeLeft()\n\tfs.lockCheckChanges.Unlock()\n\tif remain < 0.5 {\n\t\tselectFields = append(selectFields, \"manifest_text\")\n\t}\n\n\terr = fs.RequestAndDecode(&coll, \"PUT\", \"arvados/v1/collections/\"+fs.uuid, nil, map[string]interface{}{\n\t\t\"collection\": map[string]string{\n\t\t\t\"manifest_text\": coll.ManifestText,\n\t\t},\n\t\t\"select\": selectFields,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"sync failed: update %s: %w\", fs.uuid, err)\n\t}\n\tfs.updateSignatures(coll.ManifestText)\n\tfs.loadedPDH.Store(coll.PortableDataHash)\n\tfs.savedPDH.Store(savingPDH)\n\treturn nil\n}\n\nfunc (fs *collectionFileSystem) Flush(path string, shortBlocks bool) error {\n\tnode, err := rlookup(fs.fileSystem.root, path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdn, ok := node.(*dirnode)\n\tif !ok {\n\t\treturn ErrNotADirectory\n\t}\n\tdn.Lock()\n\tdefer dn.Unlock()\n\tnames := dn.sortedNames()\n\tif path != \"\" {\n\t\t// Caller only wants to flush the specified dir,\n\t\t// non-recursively.  Drop subdirs from the list of\n\t\t// names.\n\t\tvar filenames []string\n\t\tfor _, name := range names {\n\t\t\tif _, ok := dn.inodes[name].(*filenode); ok {\n\t\t\t\tfilenames = append(filenames, name)\n\t\t\t}\n\t\t}\n\t\tnames = filenames\n\t}\n\tfor _, name := range names {\n\t\tchild := dn.inodes[name]\n\t\tchild.Lock()\n\t\tdefer child.Unlock()\n\t}\n\treturn dn.flush(context.TODO(), names, flushOpts{sync: false, shortBlocks: shortBlocks})\n}\n\nfunc (fs *collectionFileSystem) MemorySize() int64 {\n\treturn fs.fileSystem.root.(*dirnode).MemorySize()\n}\n\nfunc (fs *collectionFileSystem) MarshalManifest(prefix string) (string, error) {\n\tfs.fileSystem.root.Lock()\n\tdefer fs.fileSystem.root.Unlock()\n\treturn fs.fileSystem.root.(*dirnode).marshalManifest(context.TODO(), prefix, true)\n}\n\nfunc (fs *collectionFileSystem) Size() int64 {\n\treturn fs.fileSystem.root.(*dirnode).TreeSize()\n}\n\nfunc (fs *collectionFileSystem) Snapshot() (inode, error) {\n\treturn fs.fileSystem.root.Snapshot()\n}\n\nfunc (fs *collectionFileSystem) Splice(r inode) error {\n\treturn fs.fileSystem.root.Splice(r)\n}\n\nfunc (fs *collectionFileSystem) Repack(ctx context.Context, opts RepackOptions) (int, error) {\n\treturn fs.repackTree(ctx, opts, fs.root.(*dirnode))\n}\n\nfunc (fs *collectionFileSystem) repackTree(ctx context.Context, opts RepackOptions, root *dirnode) (int, error) {\n\tfs.fileSystem.root.Lock()\n\tplan, err := fs.planRepack(ctx, opts, fs.root.(*dirnode))\n\tfs.fileSystem.root.Unlock()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif opts.DryRun {\n\t\treturn len(plan), nil\n\t}\n\trepacked, err := fs.repackData(ctx, plan)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treplaced, err := fs.replaceSegments(repacked)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tnReplaced := len(replaced)\n\n\tfs.repackedMtx.Lock()\n\tif len(repacked) == 0 {\n\t\t// nothing to save\n\t} else if len(fs.repacked) == 0 {\n\t\tfs.repacked = repacked\n\t} else {\n\t\t// Merge new repacking results with existing unsaved\n\t\t// fs.repacked map.\n\t\tfor orig, repl := range fs.repacked {\n\t\t\t// If a previous repack saved\n\t\t\t// fs.repacked[A]==B, and now we have\n\t\t\t// repacked[B]==C, then next time we sync to\n\t\t\t// the server, we should replace A with C\n\t\t\t// instead of B. So we set repacked[A]=C.\n\t\t\tif newrepl, ok := repacked[repl.StripAllHints()]; ok {\n\t\t\t\trepacked[orig] = newrepl\n\t\t\t} else {\n\t\t\t\trepacked[orig] = repl\n\t\t\t}\n\t\t}\n\t\tfs.repacked = repacked\n\t}\n\tfs.repackedMtx.Unlock()\n\n\treturn nReplaced, nil\n}\n\nfunc (fs *collectionFileSystem) ReplaceSegments(m map[BlockSegment]BlockSegment) (bool, error) {\n\tchanged, err := fs.replaceSegments(m)\n\treturn len(changed) > 0, err\n}\n\nfunc (fs *collectionFileSystem) replaceSegments(m map[BlockSegment]BlockSegment) (map[BlockSegment]BlockSegment, error) {\n\tfs.fileSystem.root.Lock()\n\tdefer fs.fileSystem.root.Unlock()\n\tmissing := make(map[BlockSegment]bool, len(m))\n\tfor orig := range m {\n\t\torig.Locator = stripAllHints(orig.Locator)\n\t\tmissing[orig] = true\n\t}\n\tfs.fileSystem.root.(*dirnode).walkSegments(func(seg segment) segment {\n\t\tif seg, ok := seg.(storedSegment); ok {\n\t\t\tdelete(missing, seg.blockSegment().StripAllHints())\n\t\t}\n\t\treturn seg\n\t})\n\tskip := make(map[string]bool)\n\tfor orig, repl := range m {\n\t\torig.Locator = stripAllHints(orig.Locator)\n\t\tif missing[orig] {\n\t\t\tskip[repl.Locator] = true\n\t\t}\n\t}\n\ttodo := make(map[BlockSegment]storedSegment, len(m))\n\ttoks := make([][]byte, 3)\n\tfor orig, repl := range m {\n\t\tif !skip[repl.Locator] {\n\t\t\torig.Locator = stripAllHints(orig.Locator)\n\t\t\tif orig.Length != repl.Length {\n\t\t\t\treturn nil, fmt.Errorf(\"mismatched length: replacing segment length %d with segment length %d\", orig.Length, repl.Length)\n\t\t\t}\n\t\t\tif splitToToks([]byte(repl.Locator), '+', toks) < 2 {\n\t\t\t\treturn nil, errors.New(\"invalid replacement locator\")\n\t\t\t}\n\t\t\tblksize, err := strconv.ParseInt(string(toks[1]), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid size hint in replacement locator: %w\", err)\n\t\t\t}\n\t\t\tif repl.Offset+repl.Length > int(blksize) {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid replacement: offset %d + length %d > block size %d\", repl.Offset, repl.Length, blksize)\n\t\t\t}\n\t\t\ttodo[orig] = storedSegment{\n\t\t\t\tlocator: repl.Locator,\n\t\t\t\toffset:  repl.Offset,\n\t\t\t\tsize:    int(blksize),\n\t\t\t}\n\t\t}\n\t}\n\tchanged := make(map[BlockSegment]BlockSegment, len(todo))\n\tfs.fileSystem.root.(*dirnode).walkSegments(func(s segment) segment {\n\t\tseg, ok := s.(storedSegment)\n\t\tif !ok {\n\t\t\treturn s\n\t\t}\n\t\torig := seg.blockSegment().StripAllHints()\n\t\trepl, ok := todo[orig]\n\t\tif !ok {\n\t\t\treturn s\n\t\t}\n\t\tseg.locator = repl.locator\n\t\tseg.offset = repl.offset\n\t\tseg.size = repl.size\n\t\t// (leave seg.kc and seg.length unchanged)\n\t\tchanged[orig] = seg.blockSegment()\n\t\treturn seg\n\t})\n\treturn changed, nil\n}\n\n// See (*collectionFileSystem)planRepack.\ntype repackBucketThreshold struct {\n\tmaxIn  int\n\tminOut int\n}\n\nvar fullRepackBucketThresholds = []repackBucketThreshold{\n\t{maxIn: 1 << 25, minOut: 1 << 25},\n}\n\nvar repackBucketThresholds = []repackBucketThreshold{\n\t{maxIn: 1 << 23, minOut: 1 << 25},\n\t{maxIn: 1 << 21, minOut: 1 << 24},\n\t{maxIn: 1 << 19, minOut: 1 << 22},\n\t{maxIn: 1 << 17, minOut: 1 << 20},\n\t{maxIn: 1 << 15, minOut: 1 << 18},\n\t{maxIn: 1 << 13, minOut: 1 << 16},\n\t{maxIn: 1 << 11, minOut: 1 << 14},\n\t{maxIn: 1 << 9, minOut: 1 << 12},\n\t{maxIn: 1 << 7, minOut: 1 << 10},\n\t{maxIn: 1 << 5, minOut: 1 << 8},\n\t{maxIn: 1 << 3, minOut: 1 << 6},\n}\n\n// Produce a list of segment merges that would result in a more\n// efficient packing.  Each element in the returned plan is a slice of\n// 2+ segments with a combined length no greater than maxBlockSize.\n//\n// Caller must have lock on given root node.\nfunc (fs *collectionFileSystem) planRepack(ctx context.Context, opts RepackOptions, root *dirnode) (plan [][]storedSegment, err error) {\n\tvar thresholds []repackBucketThreshold\n\tif opts.Full {\n\t\tthresholds = fullRepackBucketThresholds\n\t} else {\n\t\tthresholds = repackBucketThresholds\n\t}\n\t// TODO: depending on opts, plan as if large but underutilized\n\t// blocks are short blocks.\n\tblockSize := make(map[string]int)\n\tbucketBlocks := make([][]string, len(thresholds))\n\troot.walkSegments(func(seg segment) segment {\n\t\tif ss, ok := seg.(storedSegment); ok {\n\t\t\tif opts.CachedOnly {\n\t\t\t\tif _, err := ss.kc.BlockRead(ctx, BlockReadOptions{\n\t\t\t\t\tLocator:        ss.locator,\n\t\t\t\t\tCheckCacheOnly: true,\n\t\t\t\t\tWriteTo:        io.Discard,\n\t\t\t\t}); err != nil {\n\t\t\t\t\treturn seg\n\t\t\t\t}\n\t\t\t}\n\t\t\thash := stripAllHints(ss.locator)\n\t\t\tif blockSize[hash] == 0 {\n\t\t\t\tblockSize[hash] = ss.size\n\t\t\t\tfor bucket, threshold := range thresholds {\n\t\t\t\t\tif ss.size >= threshold.maxIn {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tbucketBlocks[bucket] = append(bucketBlocks[bucket], hash)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn seg\n\t})\n\t// blockPlan[oldhash] == idx means plan[idx] will merge all\n\t// segments in <oldhash> into a new block.\n\tblockPlan := make(map[string]int)\n\tpending := []string{}\n\tfor bucket := range bucketBlocks {\n\t\tpending = pending[:0]\n\t\tpendingSize := 0\n\t\tfor _, hash := range bucketBlocks[bucket] {\n\t\t\tif _, planned := blockPlan[hash]; planned || slices.Contains(pending, hash) {\n\t\t\t\t// already planned to merge this block\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsize := blockSize[hash]\n\t\t\tif pendingSize+size > maxBlockSize {\n\t\t\t\tfor _, hash := range pending {\n\t\t\t\t\tblockPlan[hash] = len(plan)\n\t\t\t\t}\n\t\t\t\tplan = append(plan, nil)\n\t\t\t\tpending = pending[:0]\n\t\t\t\tpendingSize = 0\n\t\t\t}\n\t\t\tpendingSize += size\n\t\t\tpending = append(pending, hash)\n\t\t}\n\t\tif pendingSize >= thresholds[bucket].minOut {\n\t\t\tfor _, hash := range pending {\n\t\t\t\tblockPlan[hash] = len(plan)\n\t\t\t}\n\t\t\tplan = append(plan, nil)\n\t\t}\n\t}\n\t// We have decided which blocks to merge.  Now we collect all\n\t// of the segments that reference those blocks, and return\n\t// that as the final plan.\n\tdone := make(map[storedSegment]bool)\n\troot.walkSegments(func(seg segment) segment {\n\t\tss, ok := seg.(storedSegment)\n\t\tif !ok {\n\t\t\treturn seg\n\t\t}\n\t\thash := stripAllHints(ss.locator)\n\t\tif idx, planning := blockPlan[hash]; planning && !done[ss] {\n\t\t\tplan[idx] = append(plan[idx], ss)\n\t\t\tdone[ss] = true\n\t\t}\n\t\treturn seg\n\t})\n\treturn plan, nil\n}\n\n// Given a plan returned by planRepack, write new blocks with the\n// merged segment data, and return a replacement mapping suitable for\n// ReplaceSegments.\nfunc (fs *collectionFileSystem) repackData(ctx context.Context, plan [][]storedSegment) (repl map[BlockSegment]BlockSegment, err error) {\n\tif len(plan) == 0 {\n\t\treturn\n\t}\n\trepl = make(map[BlockSegment]BlockSegment)\n\tfor _, insegments := range plan {\n\t\t// TODO: concurrency > 1\n\t\toutsize := 0\n\t\tfor _, insegment := range insegments {\n\t\t\toutsize += insegment.length\n\t\t}\n\t\tif outsize > maxBlockSize {\n\t\t\treturn nil, fmt.Errorf(\"combined length %d would exceed maximum block size %d\", outsize, maxBlockSize)\n\t\t}\n\t\tpiper, pipew := io.Pipe()\n\t\tgo func() {\n\t\t\tfor _, insegment := range insegments {\n\t\t\t\tn, err := io.Copy(pipew, io.NewSectionReader(insegment, 0, int64(insegment.length)))\n\t\t\t\tif err != nil {\n\t\t\t\t\tpipew.CloseWithError(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif n != int64(insegment.length) {\n\t\t\t\t\tpipew.CloseWithError(fmt.Errorf(\"internal error: copied %d bytes, expected %d\", n, insegment.length))\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif ctx.Err() != nil {\n\t\t\t\t\tpipew.CloseWithError(ctx.Err())\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tpipew.Close()\n\t\t}()\n\t\twrote, err := fs.BlockWrite(ctx, BlockWriteOptions{Reader: piper, DataSize: outsize})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\toffset := 0\n\t\tfor _, insegment := range insegments {\n\t\t\trepl[insegment.blockSegment().StripAllHints()] = BlockSegment{\n\t\t\t\tLocator: wrote.Locator,\n\t\t\t\tOffset:  offset,\n\t\t\t\tLength:  insegment.length,\n\t\t\t}\n\t\t\toffset += insegment.length\n\t\t}\n\t}\n\treturn\n}\n\n// filenodePtr is an offset into a file that is (usually) efficient to\n// seek to. Specifically, if filenode.repacked==filenodePtr.repacked\n// then\n// filenode.segments[filenodePtr.segmentIdx][filenodePtr.segmentOff]\n// corresponds to file offset filenodePtr.off. Otherwise, it is\n// necessary to reexamine len(filenode.segments[0]) etc. to find the\n// correct segment and offset.\ntype filenodePtr struct {\n\toff        int64\n\tsegmentIdx int\n\tsegmentOff int\n\trepacked   int64\n}\n\n// seek returns a ptr that is consistent with both startPtr.off and\n// the current state of fn. The caller must already hold fn.RLock() or\n// fn.Lock().\n//\n// If startPtr is beyond EOF, ptr.segment* will indicate precisely\n// EOF.\n//\n// After seeking:\n//\n//\tptr.segmentIdx == len(filenode.segments) // i.e., at EOF\n//\t||\n//\tfilenode.segments[ptr.segmentIdx].Len() > ptr.segmentOff\nfunc (fn *filenode) seek(startPtr filenodePtr) (ptr filenodePtr) {\n\tptr = startPtr\n\tif ptr.off < 0 {\n\t\t// meaningless anyway\n\t\treturn\n\t} else if ptr.off >= fn.fileinfo.size {\n\t\tptr.segmentIdx = len(fn.segments)\n\t\tptr.segmentOff = 0\n\t\tptr.repacked = fn.repacked\n\t\treturn\n\t} else if ptr.repacked == fn.repacked {\n\t\t// segmentIdx and segmentOff accurately reflect\n\t\t// ptr.off, but might have fallen off the end of a\n\t\t// segment\n\t\tif ptr.segmentOff >= fn.segments[ptr.segmentIdx].Len() {\n\t\t\tptr.segmentIdx++\n\t\t\tptr.segmentOff = 0\n\t\t}\n\t\treturn\n\t}\n\tdefer func() {\n\t\tptr.repacked = fn.repacked\n\t}()\n\tif ptr.off >= fn.fileinfo.size {\n\t\tptr.segmentIdx, ptr.segmentOff = len(fn.segments), 0\n\t\treturn\n\t}\n\t// Recompute segmentIdx and segmentOff.  We have already\n\t// established fn.fileinfo.size > ptr.off >= 0, so we don't\n\t// have to deal with edge cases here.\n\tvar off int64\n\tfor ptr.segmentIdx, ptr.segmentOff = 0, 0; off < ptr.off; ptr.segmentIdx++ {\n\t\t// This would panic (index out of range) if\n\t\t// fn.fileinfo.size were larger than\n\t\t// sum(fn.segments[i].Len()) -- but that can't happen\n\t\t// because we have ensured fn.fileinfo.size is always\n\t\t// accurate.\n\t\tsegLen := int64(fn.segments[ptr.segmentIdx].Len())\n\t\tif off+segLen > ptr.off {\n\t\t\tptr.segmentOff = int(ptr.off - off)\n\t\t\tbreak\n\t\t}\n\t\toff += segLen\n\t}\n\treturn\n}\n\n// filenode implements inode.\ntype filenode struct {\n\tparent   inode\n\tfs       *collectionFileSystem\n\tfileinfo fileinfo\n\tsegments []segment\n\t// number of times `segments` has changed in a\n\t// way that might invalidate a filenodePtr\n\trepacked int64\n\tmemsize  int64 // bytes in memSegments\n\tsync.RWMutex\n\tnullnode\n}\n\n// caller must have lock\nfunc (fn *filenode) appendSegment(e segment) {\n\tfn.segments = append(fn.segments, e)\n\tfn.fileinfo.size += int64(e.Len())\n}\n\nfunc (fn *filenode) SetParent(p inode, name string) {\n\tfn.Lock()\n\tdefer fn.Unlock()\n\tfn.parent = p\n\tfn.fileinfo.name = name\n}\n\nfunc (fn *filenode) Parent() inode {\n\tfn.RLock()\n\tdefer fn.RUnlock()\n\treturn fn.parent\n}\n\nfunc (fn *filenode) FS() FileSystem {\n\treturn fn.fs\n}\n\nfunc (fn *filenode) MemorySize() (size int64) {\n\tfn.RLock()\n\tdefer fn.RUnlock()\n\tsize = 64\n\tfor _, seg := range fn.segments {\n\t\tsize += seg.memorySize()\n\t}\n\treturn\n}\n\n// Read reads file data from a single segment, starting at startPtr,\n// into p. startPtr is assumed not to be up-to-date. Caller must have\n// RLock or Lock.\nfunc (fn *filenode) Read(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {\n\tptr = fn.seek(startPtr)\n\tif ptr.off < 0 {\n\t\terr = ErrNegativeOffset\n\t\treturn\n\t}\n\tif ptr.segmentIdx >= len(fn.segments) {\n\t\terr = io.EOF\n\t\treturn\n\t}\n\tif ss, ok := fn.segments[ptr.segmentIdx].(storedSegment); ok {\n\t\tss.locator = fn.fs.refreshSignature(ss.locator)\n\t\tfn.segments[ptr.segmentIdx] = ss\n\t}\n\tn, err = fn.segments[ptr.segmentIdx].ReadAt(p, int64(ptr.segmentOff))\n\tif n > 0 {\n\t\tptr.off += int64(n)\n\t\tptr.segmentOff += n\n\t\tif ptr.segmentOff == fn.segments[ptr.segmentIdx].Len() {\n\t\t\tptr.segmentIdx++\n\t\t\tptr.segmentOff = 0\n\t\t\tif ptr.segmentIdx < len(fn.segments) && err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (fn *filenode) Size() int64 {\n\tfn.RLock()\n\tdefer fn.RUnlock()\n\treturn fn.fileinfo.Size()\n}\n\nfunc (fn *filenode) FileInfo() os.FileInfo {\n\tfn.RLock()\n\tdefer fn.RUnlock()\n\treturn fn.fileinfo\n}\n\nfunc (fn *filenode) Truncate(size int64) error {\n\tfn.Lock()\n\tdefer fn.Unlock()\n\treturn fn.truncate(size)\n}\n\nfunc (fn *filenode) truncate(size int64) error {\n\tif size == fn.fileinfo.size {\n\t\treturn nil\n\t}\n\tfn.repacked++\n\tif size < fn.fileinfo.size {\n\t\tptr := fn.seek(filenodePtr{off: size})\n\t\tfor i := ptr.segmentIdx; i < len(fn.segments); i++ {\n\t\t\tif seg, ok := fn.segments[i].(*memSegment); ok {\n\t\t\t\tfn.memsize -= int64(seg.Len())\n\t\t\t}\n\t\t}\n\t\tif ptr.segmentOff == 0 {\n\t\t\tfn.segments = fn.segments[:ptr.segmentIdx]\n\t\t} else {\n\t\t\tfn.segments = fn.segments[:ptr.segmentIdx+1]\n\t\t\tswitch seg := fn.segments[ptr.segmentIdx].(type) {\n\t\t\tcase *memSegment:\n\t\t\t\tseg.Truncate(ptr.segmentOff)\n\t\t\t\tfn.memsize += int64(seg.Len())\n\t\t\tdefault:\n\t\t\t\tfn.segments[ptr.segmentIdx] = seg.Slice(0, ptr.segmentOff)\n\t\t\t}\n\t\t}\n\t\tfn.fileinfo.size = size\n\t\treturn nil\n\t}\n\tfor size > fn.fileinfo.size {\n\t\tgrow := size - fn.fileinfo.size\n\t\tvar seg *memSegment\n\t\tvar ok bool\n\t\tif len(fn.segments) == 0 {\n\t\t\tseg = &memSegment{}\n\t\t\tfn.segments = append(fn.segments, seg)\n\t\t} else if seg, ok = fn.segments[len(fn.segments)-1].(*memSegment); !ok || seg.Len() >= maxBlockSize {\n\t\t\tseg = &memSegment{}\n\t\t\tfn.segments = append(fn.segments, seg)\n\t\t}\n\t\tif maxgrow := int64(maxBlockSize - seg.Len()); maxgrow < grow {\n\t\t\tgrow = maxgrow\n\t\t}\n\t\tseg.Truncate(seg.Len() + int(grow))\n\t\tfn.fileinfo.size += grow\n\t\tfn.memsize += grow\n\t}\n\treturn nil\n}\n\n// Write writes data from p to the file, starting at startPtr,\n// extending the file size if necessary. Caller must have Lock.\nfunc (fn *filenode) Write(p []byte, startPtr filenodePtr) (n int, ptr filenodePtr, err error) {\n\tif startPtr.off > fn.fileinfo.size {\n\t\tif err = fn.truncate(startPtr.off); err != nil {\n\t\t\treturn 0, startPtr, err\n\t\t}\n\t}\n\tptr = fn.seek(startPtr)\n\tif ptr.off < 0 {\n\t\terr = ErrNegativeOffset\n\t\treturn\n\t}\n\tfor len(p) > 0 && err == nil {\n\t\tcando := p\n\t\tif len(cando) > maxBlockSize {\n\t\t\tcando = cando[:maxBlockSize]\n\t\t}\n\t\t// Rearrange/grow fn.segments (and shrink cando if\n\t\t// needed) such that cando can be copied to\n\t\t// fn.segments[ptr.segmentIdx] at offset\n\t\t// ptr.segmentOff.\n\t\tcur := ptr.segmentIdx\n\t\tprev := ptr.segmentIdx - 1\n\t\tvar curWritable bool\n\t\tif cur < len(fn.segments) {\n\t\t\t_, curWritable = fn.segments[cur].(*memSegment)\n\t\t}\n\t\tvar prevAppendable bool\n\t\tif prev >= 0 && fn.segments[prev].Len() < maxBlockSize {\n\t\t\t_, prevAppendable = fn.segments[prev].(*memSegment)\n\t\t}\n\t\tif ptr.segmentOff > 0 && !curWritable {\n\t\t\t// Split a non-writable block.\n\t\t\tif max := fn.segments[cur].Len() - ptr.segmentOff; max <= len(cando) {\n\t\t\t\t// Truncate cur, and insert a new\n\t\t\t\t// segment after it.\n\t\t\t\tcando = cando[:max]\n\t\t\t\tfn.segments = append(fn.segments, nil)\n\t\t\t\tcopy(fn.segments[cur+1:], fn.segments[cur:])\n\t\t\t} else {\n\t\t\t\t// Split cur into two copies, truncate\n\t\t\t\t// the one on the left, shift the one\n\t\t\t\t// on the right, and insert a new\n\t\t\t\t// segment between them.\n\t\t\t\tfn.segments = append(fn.segments, nil, nil)\n\t\t\t\tcopy(fn.segments[cur+2:], fn.segments[cur:])\n\t\t\t\tfn.segments[cur+2] = fn.segments[cur+2].Slice(ptr.segmentOff+len(cando), -1)\n\t\t\t}\n\t\t\tcur++\n\t\t\tprev++\n\t\t\tseg := &memSegment{}\n\t\t\tseg.Truncate(len(cando))\n\t\t\tfn.memsize += int64(len(cando))\n\t\t\tfn.segments[cur] = seg\n\t\t\tfn.segments[prev] = fn.segments[prev].Slice(0, ptr.segmentOff)\n\t\t\tptr.segmentIdx++\n\t\t\tptr.segmentOff = 0\n\t\t\tfn.repacked++\n\t\t\tptr.repacked++\n\t\t} else if curWritable {\n\t\t\tif fit := int(fn.segments[cur].Len()) - ptr.segmentOff; fit < len(cando) {\n\t\t\t\tcando = cando[:fit]\n\t\t\t}\n\t\t} else {\n\t\t\tif prevAppendable {\n\t\t\t\t// Shrink cando if needed to fit in\n\t\t\t\t// prev segment.\n\t\t\t\tif cangrow := maxBlockSize - fn.segments[prev].Len(); cangrow < len(cando) {\n\t\t\t\t\tcando = cando[:cangrow]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif cur == len(fn.segments) {\n\t\t\t\t// ptr is at EOF, filesize is changing.\n\t\t\t\tfn.fileinfo.size += int64(len(cando))\n\t\t\t} else if el := fn.segments[cur].Len(); el <= len(cando) {\n\t\t\t\t// cando is long enough that we won't\n\t\t\t\t// need cur any more. shrink cando to\n\t\t\t\t// be exactly as long as cur\n\t\t\t\t// (otherwise we'd accidentally shift\n\t\t\t\t// the effective position of all\n\t\t\t\t// segments after cur).\n\t\t\t\tcando = cando[:el]\n\t\t\t\tcopy(fn.segments[cur:], fn.segments[cur+1:])\n\t\t\t\tfn.segments = fn.segments[:len(fn.segments)-1]\n\t\t\t} else {\n\t\t\t\t// shrink cur by the same #bytes we're growing prev\n\t\t\t\tfn.segments[cur] = fn.segments[cur].Slice(len(cando), -1)\n\t\t\t}\n\n\t\t\tif prevAppendable {\n\t\t\t\t// Grow prev.\n\t\t\t\tptr.segmentIdx--\n\t\t\t\tptr.segmentOff = fn.segments[prev].Len()\n\t\t\t\tfn.segments[prev].(*memSegment).Truncate(ptr.segmentOff + len(cando))\n\t\t\t\tfn.memsize += int64(len(cando))\n\t\t\t\tptr.repacked++\n\t\t\t\tfn.repacked++\n\t\t\t} else {\n\t\t\t\t// Insert a segment between prev and\n\t\t\t\t// cur, and advance prev/cur.\n\t\t\t\tfn.segments = append(fn.segments, nil)\n\t\t\t\tif cur < len(fn.segments) {\n\t\t\t\t\tcopy(fn.segments[cur+1:], fn.segments[cur:])\n\t\t\t\t\tptr.repacked++\n\t\t\t\t\tfn.repacked++\n\t\t\t\t} else {\n\t\t\t\t\t// appending a new segment does\n\t\t\t\t\t// not invalidate any ptrs\n\t\t\t\t}\n\t\t\t\tseg := &memSegment{}\n\t\t\t\tseg.Truncate(len(cando))\n\t\t\t\tfn.memsize += int64(len(cando))\n\t\t\t\tfn.segments[cur] = seg\n\t\t\t}\n\t\t}\n\n\t\t// Finally we can copy bytes from cando to the current segment.\n\t\tfn.segments[ptr.segmentIdx].(*memSegment).WriteAt(cando, ptr.segmentOff)\n\t\tn += len(cando)\n\t\tp = p[len(cando):]\n\n\t\tptr.off += int64(len(cando))\n\t\tptr.segmentOff += len(cando)\n\t\tif ptr.segmentOff >= maxBlockSize {\n\t\t\tfn.pruneMemSegments()\n\t\t}\n\t\tif fn.segments[ptr.segmentIdx].Len() == ptr.segmentOff {\n\t\t\tptr.segmentOff = 0\n\t\t\tptr.segmentIdx++\n\t\t}\n\n\t\tfn.fileinfo.modTime = time.Now()\n\t}\n\treturn\n}\n\n// Write some data out to disk to reduce memory use. Caller must have\n// write lock.\nfunc (fn *filenode) pruneMemSegments() {\n\t// TODO: share code with (*dirnode)flush()\n\t// TODO: pack/flush small blocks too, when fragmented\n\tfor idx, seg := range fn.segments {\n\t\tseg, ok := seg.(*memSegment)\n\t\tif !ok || seg.Len() < maxBlockSize || seg.flushing != nil {\n\t\t\tcontinue\n\t\t}\n\t\t// Setting seg.flushing guarantees seg.buf will not be\n\t\t// modified in place: WriteAt and Truncate will\n\t\t// allocate a new buf instead, if necessary.\n\t\tidx, buf := idx, seg.buf\n\t\tdone := make(chan struct{})\n\t\tseg.flushing = done\n\t\t// If lots of background writes are already in\n\t\t// progress, block here until one finishes, rather\n\t\t// than pile up an unlimited number of buffered writes\n\t\t// and network flush operations.\n\t\tfn.fs.throttle().Acquire()\n\t\tgo func() {\n\t\t\tdefer close(done)\n\t\t\tresp, err := fn.FS().BlockWrite(context.Background(), BlockWriteOptions{\n\t\t\t\tData:           buf,\n\t\t\t\tReplicas:       fn.fs.replicas,\n\t\t\t\tStorageClasses: fn.fs.storageClasses,\n\t\t\t})\n\t\t\tfn.fs.throttle().Release()\n\t\t\tfn.Lock()\n\t\t\tdefer fn.Unlock()\n\t\t\tif seg.flushing != done {\n\t\t\t\t// A new seg.buf has been allocated.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\t// TODO: stall (or return errors from)\n\t\t\t\t// subsequent writes until flushing\n\t\t\t\t// starts to succeed.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(fn.segments) <= idx || fn.segments[idx] != seg || len(seg.buf) != len(buf) {\n\t\t\t\t// Segment has been dropped/moved/resized.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfn.memsize -= int64(len(buf))\n\t\t\tfn.segments[idx] = storedSegment{\n\t\t\t\tkc:      fn.FS(),\n\t\t\t\tlocator: resp.Locator,\n\t\t\t\tsize:    len(buf),\n\t\t\t\toffset:  0,\n\t\t\t\tlength:  len(buf),\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// Block until all pending pruneMemSegments/flush work is\n// finished. Caller must NOT have lock.\nfunc (fn *filenode) waitPrune() {\n\tvar pending []<-chan struct{}\n\tfn.Lock()\n\tfor _, seg := range fn.segments {\n\t\tif seg, ok := seg.(*memSegment); ok && seg.flushing != nil {\n\t\t\tpending = append(pending, seg.flushing)\n\t\t}\n\t}\n\tfn.Unlock()\n\tfor _, p := range pending {\n\t\t<-p\n\t}\n}\n\nfunc (fn *filenode) Snapshot() (inode, error) {\n\tfn.RLock()\n\tdefer fn.RUnlock()\n\tsegments := make([]segment, 0, len(fn.segments))\n\tfor _, seg := range fn.segments {\n\t\tsegments = append(segments, seg.Slice(0, seg.Len()))\n\t}\n\tnewfn := &filenode{\n\t\tfileinfo: fn.fileinfo,\n\t\tsegments: segments,\n\t}\n\t// Clear references to the original filesystem, otherwise the\n\t// snapshot will prevent the old filesystem from being garbage\n\t// collected.\n\tnewfn.setFS(nil)\n\treturn newfn, nil\n}\n\nfunc (fn *filenode) Splice(repl inode) error {\n\trepl, err := repl.Snapshot()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfn.parent.Lock()\n\tdefer fn.parent.Unlock()\n\tfn.Lock()\n\tdefer fn.Unlock()\n\t_, err = fn.parent.Child(fn.fileinfo.name, func(inode) (inode, error) { return repl, nil })\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch repl := repl.(type) {\n\tcase *dirnode:\n\t\trepl.parent = fn.parent\n\t\trepl.fileinfo.name = fn.fileinfo.name\n\t\trepl.setTreeFS(fn.fs)\n\tcase *filenode:\n\t\trepl.parent = fn.parent\n\t\trepl.fileinfo.name = fn.fileinfo.name\n\t\trepl.setFS(fn.fs)\n\tdefault:\n\t\treturn fmt.Errorf(\"cannot splice snapshot containing %T: %w\", repl, ErrInvalidArgument)\n\t}\n\treturn nil\n}\n\n// Caller must have lock.\nfunc (fn *filenode) setFS(fs *collectionFileSystem) {\n\tfn.fs = fs\n\tfor i, seg := range fn.segments {\n\t\tif ss, ok := seg.(storedSegment); ok {\n\t\t\tss.kc = fs\n\t\t\tfn.segments[i] = ss\n\t\t}\n\t}\n}\n\ntype dirnode struct {\n\tfs *collectionFileSystem\n\ttreenode\n}\n\nfunc (dn *dirnode) FS() FileSystem {\n\treturn dn.fs\n}\n\nfunc (dn *dirnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {\n\tif dn == dn.fs.rootnode() && name == \".arvados#collection\" {\n\t\tgn := &getternode{Getter: func() ([]byte, error) {\n\t\t\tvar coll Collection\n\t\t\tvar err error\n\t\t\tcoll.ManifestText, err = dn.fs.MarshalManifest(\".\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tcoll.UUID = dn.fs.uuid\n\t\t\tdata, err := json.Marshal(&coll)\n\t\t\tif err == nil {\n\t\t\t\tdata = append(data, '\\n')\n\t\t\t}\n\t\t\treturn data, err\n\t\t}}\n\t\tgn.SetParent(dn, name)\n\t\treturn gn, nil\n\t}\n\treturn dn.treenode.Child(name, replace)\n}\n\ntype fnSegmentRef struct {\n\tfn  *filenode\n\tidx int\n}\n\n// commitBlock concatenates the data from the given filenode segments\n// (which must be *memSegments), writes the data out to Keep as a\n// single block, and replaces the filenodes' *memSegments with\n// storedSegments that reference the relevant portions of the new\n// block.\n//\n// bufsize is the total data size in refs. It is used to preallocate\n// the correct amount of memory when len(refs)>1.\n//\n// If sync is false, commitBlock returns right away, after starting a\n// goroutine to do the writes, reacquire the filenodes' locks, and\n// swap out the *memSegments. Some filenodes' segments might get\n// modified/rearranged in the meantime, in which case commitBlock\n// won't replace them.\n//\n// Caller must have write lock.\nfunc (dn *dirnode) commitBlock(ctx context.Context, refs []fnSegmentRef, bufsize int, sync bool) error {\n\tif len(refs) == 0 {\n\t\treturn nil\n\t}\n\tif err := ctx.Err(); err != nil {\n\t\treturn err\n\t}\n\tdone := make(chan struct{})\n\tvar block []byte\n\tsegs := make([]*memSegment, 0, len(refs))\n\toffsets := make([]int, 0, len(refs)) // location of segment's data within block\n\tfor _, ref := range refs {\n\t\tseg := ref.fn.segments[ref.idx].(*memSegment)\n\t\tif !sync && seg.flushingUnfinished() {\n\t\t\t// Let the other flushing goroutine finish. If\n\t\t\t// it fails, we'll try again next time.\n\t\t\tclose(done)\n\t\t\treturn nil\n\t\t}\n\t\t// In sync mode, we proceed regardless of\n\t\t// whether another flush is in progress: It\n\t\t// can't finish before we do, because we hold\n\t\t// fn's lock until we finish our own writes.\n\t\tseg.flushing = done\n\t\toffsets = append(offsets, len(block))\n\t\tif len(refs) == 1 {\n\t\t\tblock = seg.buf\n\t\t} else if block == nil {\n\t\t\tblock = append(make([]byte, 0, bufsize), seg.buf...)\n\t\t} else {\n\t\t\tblock = append(block, seg.buf...)\n\t\t}\n\t\tsegs = append(segs, seg)\n\t}\n\tblocksize := len(block)\n\tdn.fs.throttle().Acquire()\n\terrs := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(done)\n\t\tdefer close(errs)\n\t\tresp, err := dn.fs.BlockWrite(context.Background(), BlockWriteOptions{\n\t\t\tData:           block,\n\t\t\tReplicas:       dn.fs.replicas,\n\t\t\tStorageClasses: dn.fs.storageClasses,\n\t\t})\n\t\tdn.fs.throttle().Release()\n\t\tif err != nil {\n\t\t\terrs <- err\n\t\t\treturn\n\t\t}\n\t\tfor idx, ref := range refs {\n\t\t\tif !sync {\n\t\t\t\tref.fn.Lock()\n\t\t\t\t// In async mode, fn's lock was\n\t\t\t\t// released while we were waiting for\n\t\t\t\t// PutB(); lots of things might have\n\t\t\t\t// changed.\n\t\t\t\tif len(ref.fn.segments) <= ref.idx {\n\t\t\t\t\t// file segments have\n\t\t\t\t\t// rearranged or changed in\n\t\t\t\t\t// some way\n\t\t\t\t\tref.fn.Unlock()\n\t\t\t\t\tcontinue\n\t\t\t\t} else if seg, ok := ref.fn.segments[ref.idx].(*memSegment); !ok || seg != segs[idx] {\n\t\t\t\t\t// segment has been replaced\n\t\t\t\t\tref.fn.Unlock()\n\t\t\t\t\tcontinue\n\t\t\t\t} else if seg.flushing != done {\n\t\t\t\t\t// seg.buf has been replaced\n\t\t\t\t\tref.fn.Unlock()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tdata := ref.fn.segments[ref.idx].(*memSegment).buf\n\t\t\tref.fn.segments[ref.idx] = storedSegment{\n\t\t\t\tkc:      dn.fs,\n\t\t\t\tlocator: resp.Locator,\n\t\t\t\tsize:    blocksize,\n\t\t\t\toffset:  offsets[idx],\n\t\t\t\tlength:  len(data),\n\t\t\t}\n\t\t\t// atomic is needed here despite caller having\n\t\t\t// lock: caller might be running concurrent\n\t\t\t// commitBlock() goroutines using the same\n\t\t\t// lock, writing different segments from the\n\t\t\t// same file.\n\t\t\tatomic.AddInt64(&ref.fn.memsize, -int64(len(data)))\n\t\t\tif !sync {\n\t\t\t\tref.fn.Unlock()\n\t\t\t}\n\t\t}\n\t}()\n\tif sync {\n\t\treturn <-errs\n\t}\n\treturn nil\n}\n\ntype flushOpts struct {\n\tsync        bool\n\tshortBlocks bool\n}\n\n// flush in-memory data and remote-cluster block references (for the\n// children with the given names, which must be children of dn) to\n// local-cluster persistent storage.\n//\n// Caller must have write lock on dn and the named children.\n//\n// If any children are dirs, they will be flushed recursively.\nfunc (dn *dirnode) flush(ctx context.Context, names []string, opts flushOpts) error {\n\tcg := newContextGroup(ctx)\n\tdefer cg.Cancel()\n\n\tgoCommit := func(refs []fnSegmentRef, bufsize int) {\n\t\tcg.Go(func() error {\n\t\t\treturn dn.commitBlock(cg.Context(), refs, bufsize, opts.sync)\n\t\t})\n\t}\n\n\tvar pending []fnSegmentRef\n\tvar pendingLen int = 0\n\tlocalLocator := map[string]string{}\n\tfor _, name := range names {\n\t\tswitch node := dn.inodes[name].(type) {\n\t\tcase *dirnode:\n\t\t\tgrandchildNames := node.sortedNames()\n\t\t\tfor _, grandchildName := range grandchildNames {\n\t\t\t\tgrandchild := node.inodes[grandchildName]\n\t\t\t\tgrandchild.Lock()\n\t\t\t\tdefer grandchild.Unlock()\n\t\t\t}\n\t\t\tcg.Go(func() error { return node.flush(cg.Context(), grandchildNames, opts) })\n\t\tcase *filenode:\n\t\t\tfor idx, seg := range node.segments {\n\t\t\t\tswitch seg := seg.(type) {\n\t\t\t\tcase storedSegment:\n\t\t\t\t\tloc, ok := localLocator[seg.locator]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tvar err error\n\t\t\t\t\t\tloc, err = dn.fs.LocalLocator(seg.locator)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlocalLocator[seg.locator] = loc\n\t\t\t\t\t}\n\t\t\t\t\tseg.locator = loc\n\t\t\t\t\tnode.segments[idx] = seg\n\t\t\t\tcase *memSegment:\n\t\t\t\t\tif seg.Len() > maxBlockSize/2 {\n\t\t\t\t\t\tgoCommit([]fnSegmentRef{{node, idx}}, seg.Len())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif pendingLen+seg.Len() > maxBlockSize {\n\t\t\t\t\t\tgoCommit(pending, pendingLen)\n\t\t\t\t\t\tpending = nil\n\t\t\t\t\t\tpendingLen = 0\n\t\t\t\t\t}\n\t\t\t\t\tpending = append(pending, fnSegmentRef{node, idx})\n\t\t\t\t\tpendingLen += seg.Len()\n\t\t\t\tdefault:\n\t\t\t\t\tpanic(fmt.Sprintf(\"can't sync segment type %T\", seg))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif opts.shortBlocks {\n\t\tgoCommit(pending, pendingLen)\n\t}\n\treturn cg.Wait()\n}\n\nfunc (dn *dirnode) MemorySize() (size int64) {\n\tdn.RLock()\n\ttodo := make([]inode, 0, len(dn.inodes))\n\tfor _, node := range dn.inodes {\n\t\ttodo = append(todo, node)\n\t}\n\tdn.RUnlock()\n\tsize = 64\n\tfor _, node := range todo {\n\t\tsize += node.MemorySize()\n\t}\n\treturn\n}\n\n// caller must have write lock.\nfunc (dn *dirnode) sortedNames() []string {\n\tnames := make([]string, 0, len(dn.inodes))\n\tfor name := range dn.inodes {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\treturn names\n}\n\n// caller must have write lock.\nfunc (dn *dirnode) marshalManifest(ctx context.Context, prefix string, flush bool) (string, error) {\n\tcg := newContextGroup(ctx)\n\tdefer cg.Cancel()\n\n\tif len(dn.inodes) == 0 {\n\t\tif prefix == \".\" {\n\t\t\treturn \"\", nil\n\t\t}\n\t\t// Express the existence of an empty directory by\n\t\t// adding an empty file named `\\056`, which (unlike\n\t\t// the more obvious spelling `.`) is accepted by the\n\t\t// API's manifest validator.\n\t\treturn manifestEscape(prefix) + \" d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\056\\n\", nil\n\t}\n\n\tnames := dn.sortedNames()\n\n\t// Wait for children to finish any pending write operations\n\t// before locking them.\n\tfor _, name := range names {\n\t\tnode := dn.inodes[name]\n\t\tif fn, ok := node.(*filenode); ok {\n\t\t\tfn.waitPrune()\n\t\t}\n\t}\n\n\tvar dirnames []string\n\tvar filenames []string\n\tfor _, name := range names {\n\t\tnode := dn.inodes[name]\n\t\tnode.Lock()\n\t\tdefer node.Unlock()\n\t\tswitch node := node.(type) {\n\t\tcase *dirnode:\n\t\t\tdirnames = append(dirnames, name)\n\t\tcase *filenode:\n\t\t\tfilenames = append(filenames, name)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"can't marshal inode type %T\", node))\n\t\t}\n\t}\n\n\tsubdirs := make([]string, len(dirnames))\n\trootdir := \"\"\n\tfor i, name := range dirnames {\n\t\ti, name := i, name\n\t\tcg.Go(func() error {\n\t\t\ttxt, err := dn.inodes[name].(*dirnode).marshalManifest(cg.Context(), prefix+\"/\"+name, flush)\n\t\t\tsubdirs[i] = txt\n\t\t\treturn err\n\t\t})\n\t}\n\n\tcg.Go(func() error {\n\t\tvar streamLen int64\n\t\ttype filepart struct {\n\t\t\tname   string\n\t\t\toffset int64\n\t\t\tlength int64\n\t\t}\n\n\t\tvar fileparts []filepart\n\t\tvar blocks []string\n\t\tif !flush {\n\t\t\t// skip flush -- will fail below if anything\n\t\t\t// needed flushing\n\t\t} else if err := dn.flush(cg.Context(), filenames, flushOpts{sync: true, shortBlocks: true}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, name := range filenames {\n\t\t\tnode := dn.inodes[name].(*filenode)\n\t\t\tif len(node.segments) == 0 {\n\t\t\t\tfileparts = append(fileparts, filepart{name: name})\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, seg := range node.segments {\n\t\t\t\tswitch seg := seg.(type) {\n\t\t\t\tcase storedSegment:\n\t\t\t\t\tif len(blocks) > 0 && blocks[len(blocks)-1] == seg.locator {\n\t\t\t\t\t\tstreamLen -= int64(seg.size)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tblocks = append(blocks, seg.locator)\n\t\t\t\t\t}\n\t\t\t\t\tnext := filepart{\n\t\t\t\t\t\tname:   name,\n\t\t\t\t\t\toffset: streamLen + int64(seg.offset),\n\t\t\t\t\t\tlength: int64(seg.length),\n\t\t\t\t\t}\n\t\t\t\t\tif prev := len(fileparts) - 1; prev >= 0 &&\n\t\t\t\t\t\tfileparts[prev].name == name &&\n\t\t\t\t\t\tfileparts[prev].offset+fileparts[prev].length == next.offset {\n\t\t\t\t\t\tfileparts[prev].length += next.length\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfileparts = append(fileparts, next)\n\t\t\t\t\t}\n\t\t\t\t\tstreamLen += int64(seg.size)\n\t\t\t\tdefault:\n\t\t\t\t\t// We haven't unlocked since\n\t\t\t\t\t// calling flush(sync=true).\n\t\t\t\t\t// Evidently the caller passed\n\t\t\t\t\t// flush==false but there were\n\t\t\t\t\t// local changes.\n\t\t\t\t\treturn fmt.Errorf(\"can't marshal segment type %T\", seg)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tvar filetokens []string\n\t\tfor _, s := range fileparts {\n\t\t\tfiletokens = append(filetokens, fmt.Sprintf(\"%d:%d:%s\", s.offset, s.length, manifestEscape(s.name)))\n\t\t}\n\t\tif len(filetokens) == 0 {\n\t\t\treturn nil\n\t\t} else if len(blocks) == 0 {\n\t\t\tblocks = []string{\"d41d8cd98f00b204e9800998ecf8427e+0\"}\n\t\t}\n\t\trootdir = manifestEscape(prefix) + \" \" + strings.Join(blocks, \" \") + \" \" + strings.Join(filetokens, \" \") + \"\\n\"\n\t\treturn nil\n\t})\n\terr := cg.Wait()\n\treturn rootdir + strings.Join(subdirs, \"\"), err\n}\n\n// splitToToks is similar to bytes.SplitN(token, []byte{c}, 3), but\n// splits into the toks slice rather than allocating a new one, and\n// returns the number of toks (1, 2, or 3).\nfunc splitToToks(src []byte, c rune, toks [][]byte) int {\n\tc1 := bytes.IndexRune(src, c)\n\tif c1 < 0 {\n\t\ttoks[0] = src\n\t\treturn 1\n\t}\n\ttoks[0], src = src[:c1], src[c1+1:]\n\tc2 := bytes.IndexRune(src, c)\n\tif c2 < 0 {\n\t\ttoks[1] = src\n\t\treturn 2\n\t}\n\ttoks[1], toks[2] = src[:c2], src[c2+1:]\n\treturn 3\n}\n\nfunc (dn *dirnode) loadManifest(txt string) error {\n\tstreams := bytes.Split([]byte(txt), []byte{'\\n'})\n\tif len(streams[len(streams)-1]) != 0 {\n\t\treturn fmt.Errorf(\"line %d: no trailing newline\", len(streams))\n\t}\n\tstreams = streams[:len(streams)-1]\n\tsegments := []storedSegment{}\n\t// streamoffset[n] is the position in the stream of the nth\n\t// block, i.e., ∑ segments[j].size ∀ 0≤j<n. We ensure\n\t// len(streamoffset) == len(segments) + 1.\n\tstreamoffset := []int64{0}\n\t// To reduce allocs, we reuse a single \"pathparts\" slice\n\t// (pre-split on \"/\" separators) for the duration of this\n\t// func.\n\tvar pathparts []string\n\t// To reduce allocs, we reuse a single \"toks\" slice of 3 byte\n\t// slices.\n\tvar toks = make([][]byte, 3)\n\tfor i, stream := range streams {\n\t\tlineno := i + 1\n\t\tfnodeCache := make(map[string]*filenode)\n\t\tvar anyFileTokens bool\n\t\tvar segIdx int\n\t\tsegments = segments[:0]\n\t\tstreamoffset = streamoffset[:1]\n\t\tpathparts = nil\n\t\tstreamparts := 0\n\t\tfor i, token := range bytes.Split(stream, []byte{' '}) {\n\t\t\tif i == 0 {\n\t\t\t\tpathparts = strings.Split(manifestUnescape(string(token)), \"/\")\n\t\t\t\tstreamparts = len(pathparts)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !bytes.ContainsRune(token, ':') {\n\t\t\t\tif anyFileTokens {\n\t\t\t\t\treturn fmt.Errorf(\"line %d: bad file segment %q\", lineno, token)\n\t\t\t\t}\n\t\t\t\tif splitToToks(token, '+', toks) < 2 {\n\t\t\t\t\treturn fmt.Errorf(\"line %d: bad locator %q\", lineno, token)\n\t\t\t\t}\n\t\t\t\tlength, err := strconv.ParseInt(string(toks[1]), 10, 32)\n\t\t\t\tif err != nil || length < 0 {\n\t\t\t\t\treturn fmt.Errorf(\"line %d: bad locator %q\", lineno, token)\n\t\t\t\t}\n\t\t\t\tstreamoffset = append(streamoffset, streamoffset[len(segments)]+int64(length))\n\t\t\t\tsegments = append(segments, storedSegment{\n\t\t\t\t\tlocator: string(token),\n\t\t\t\t\tsize:    int(length),\n\t\t\t\t\toffset:  0,\n\t\t\t\t\tlength:  int(length),\n\t\t\t\t})\n\t\t\t\tcontinue\n\t\t\t} else if len(segments) == 0 {\n\t\t\t\treturn fmt.Errorf(\"line %d: bad locator %q\", lineno, token)\n\t\t\t}\n\t\t\tif splitToToks(token, ':', toks) != 3 {\n\t\t\t\treturn fmt.Errorf(\"line %d: bad file segment %q\", lineno, token)\n\t\t\t}\n\t\t\tanyFileTokens = true\n\n\t\t\toffset, err := strconv.ParseInt(string(toks[0]), 10, 64)\n\t\t\tif err != nil || offset < 0 {\n\t\t\t\treturn fmt.Errorf(\"line %d: bad file segment %q\", lineno, token)\n\t\t\t}\n\t\t\tlength, err := strconv.ParseInt(string(toks[1]), 10, 64)\n\t\t\tif err != nil || length < 0 {\n\t\t\t\treturn fmt.Errorf(\"line %d: bad file segment %q\", lineno, token)\n\t\t\t}\n\t\t\tfnode, cached := fnodeCache[string(toks[2])]\n\t\t\tif !cached {\n\t\t\t\tif !bytes.ContainsAny(toks[2], `\\/`) {\n\t\t\t\t\t// optimization for a common case\n\t\t\t\t\tpathparts = append(pathparts[:streamparts], string(toks[2]))\n\t\t\t\t} else {\n\t\t\t\t\tpathparts = append(pathparts[:streamparts], strings.Split(manifestUnescape(string(toks[2])), \"/\")...)\n\t\t\t\t}\n\t\t\t\tfnode, err = dn.createFileAndParents(pathparts)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"line %d: cannot use name %q with length %d: %s\", lineno, toks[2], length, err)\n\t\t\t\t}\n\t\t\t\tfnodeCache[string(toks[2])] = fnode\n\t\t\t}\n\t\t\tif fnode == nil {\n\t\t\t\t// name matches an existing directory\n\t\t\t\tif length != 0 {\n\t\t\t\t\treturn fmt.Errorf(\"line %d: cannot use name %q with length %d: is a directory\", lineno, toks[2], length)\n\t\t\t\t}\n\t\t\t\t// Special case: an empty file used as\n\t\t\t\t// a marker to preserve an otherwise\n\t\t\t\t// empty directory in a manifest.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Map the stream offset/range coordinates to\n\t\t\t// block/offset/range coordinates and add\n\t\t\t// corresponding storedSegments to the filenode\n\t\t\tif segIdx < len(segments) && streamoffset[segIdx] <= offset && streamoffset[segIdx+1] > offset {\n\t\t\t\t// common case with an easy\n\t\t\t\t// optimization: start where the\n\t\t\t\t// previous segment ended\n\t\t\t} else if guess := int(offset >> 26); guess >= 0 && guess < len(segments) && streamoffset[guess] <= offset && streamoffset[guess+1] > offset {\n\t\t\t\t// another common case with an easy\n\t\t\t\t// optimization: all blocks are 64 MiB\n\t\t\t\t// (or close enough)\n\t\t\t\tsegIdx = guess\n\t\t\t} else {\n\t\t\t\t// general case\n\t\t\t\tsegIdx = sort.Search(len(segments), func(i int) bool {\n\t\t\t\t\treturn streamoffset[i+1] > offset\n\t\t\t\t})\n\t\t\t}\n\t\t\tfor ; segIdx < len(segments); segIdx++ {\n\t\t\t\tblkStart := streamoffset[segIdx]\n\t\t\t\tif blkStart >= offset+length {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tseg := &segments[segIdx]\n\t\t\t\tif seg.size == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tvar blkOff int\n\t\t\t\tif blkStart < offset {\n\t\t\t\t\tblkOff = int(offset - blkStart)\n\t\t\t\t}\n\t\t\t\tblkLen := seg.size - blkOff\n\t\t\t\tif blkStart+int64(seg.size) > offset+length {\n\t\t\t\t\tblkLen = int(offset + length - blkStart - int64(blkOff))\n\t\t\t\t}\n\t\t\t\tfnode.appendSegment(storedSegment{\n\t\t\t\t\tkc:      dn.fs,\n\t\t\t\t\tlocator: seg.locator,\n\t\t\t\t\tsize:    seg.size,\n\t\t\t\t\toffset:  blkOff,\n\t\t\t\t\tlength:  blkLen,\n\t\t\t\t})\n\t\t\t}\n\t\t\tif segIdx == len(segments) && streamoffset[segIdx] < offset+length {\n\t\t\t\treturn fmt.Errorf(\"line %d: invalid segment in %d-byte stream: %q\", lineno, streamoffset[segIdx], token)\n\t\t\t}\n\t\t}\n\t\tif !anyFileTokens {\n\t\t\treturn fmt.Errorf(\"line %d: no file segments\", lineno)\n\t\t} else if len(segments) == 0 {\n\t\t\treturn fmt.Errorf(\"line %d: no locators\", lineno)\n\t\t} else if streamparts == 0 {\n\t\t\treturn fmt.Errorf(\"line %d: no stream name\", lineno)\n\t\t}\n\t}\n\treturn nil\n}\n\n// only safe to call from loadManifest -- no locking.\n//\n// If path is a \"parent directory exists\" marker (the last path\n// component is \".\"), the returned values are both nil.\n//\n// Newly added nodes have modtime==0. Caller is responsible for fixing\n// them with backdateTree.\nfunc (dn *dirnode) createFileAndParents(names []string) (fn *filenode, err error) {\n\tvar node inode = dn\n\tbasename := names[len(names)-1]\n\tfor _, name := range names[:len(names)-1] {\n\t\tswitch name {\n\t\tcase \"\", \".\":\n\t\t\tcontinue\n\t\tcase \"..\":\n\t\t\tif node == dn {\n\t\t\t\t// can't be sure parent will be a *dirnode\n\t\t\t\treturn nil, ErrInvalidArgument\n\t\t\t}\n\t\t\tnode = node.Parent()\n\t\t\tcontinue\n\t\t}\n\t\tnode.Lock()\n\t\tunlock := node.Unlock\n\t\tnode, err = node.Child(name, func(child inode) (inode, error) {\n\t\t\tif child == nil {\n\t\t\t\t// note modtime will be fixed later in backdateTree()\n\t\t\t\tchild, err := node.FS().newNode(name, 0755|os.ModeDir, time.Time{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tchild.SetParent(node, name)\n\t\t\t\treturn child, nil\n\t\t\t} else if !child.IsDir() {\n\t\t\t\treturn child, os.ErrExist\n\t\t\t} else {\n\t\t\t\treturn child, nil\n\t\t\t}\n\t\t})\n\t\tunlock()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif basename == \".\" {\n\t\treturn\n\t} else if !permittedName(basename) {\n\t\terr = fmt.Errorf(\"invalid file part %q in path %q\", basename, names)\n\t\treturn\n\t}\n\tnode.Lock()\n\tdefer node.Unlock()\n\t_, err = node.Child(basename, func(child inode) (inode, error) {\n\t\tswitch child := child.(type) {\n\t\tcase nil:\n\t\t\tchild, err = node.FS().newNode(basename, 0755, time.Time{})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tchild.SetParent(node, basename)\n\t\t\tfn = child.(*filenode)\n\t\t\treturn child, nil\n\t\tcase *filenode:\n\t\t\tfn = child\n\t\t\treturn child, nil\n\t\tcase *dirnode:\n\t\t\treturn child, ErrIsDirectory\n\t\tdefault:\n\t\t\treturn child, ErrInvalidArgument\n\t\t}\n\t})\n\treturn\n}\n\nfunc (dn *dirnode) TreeSize() (bytes int64) {\n\tdn.RLock()\n\tdefer dn.RUnlock()\n\tfor _, i := range dn.inodes {\n\t\tswitch i := i.(type) {\n\t\tcase *filenode:\n\t\t\tbytes += i.Size()\n\t\tcase *dirnode:\n\t\t\tbytes += i.TreeSize()\n\t\t}\n\t}\n\treturn\n}\n\nfunc (dn *dirnode) Snapshot() (inode, error) {\n\treturn dn.snapshot()\n}\n\nfunc (dn *dirnode) snapshot() (*dirnode, error) {\n\tdn.RLock()\n\tdefer dn.RUnlock()\n\tsnap := &dirnode{\n\t\ttreenode: treenode{\n\t\t\tinodes:   make(map[string]inode, len(dn.inodes)),\n\t\t\tfileinfo: dn.fileinfo,\n\t\t},\n\t}\n\tfor name, child := range dn.inodes {\n\t\tdupchild, err := child.Snapshot()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsnap.inodes[name] = dupchild\n\t\tdupchild.SetParent(snap, name)\n\t}\n\treturn snap, nil\n}\n\nfunc (dn *dirnode) Splice(repl inode) error {\n\trepl, err := repl.Snapshot()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot copy snapshot: %w\", err)\n\t}\n\tswitch repl := repl.(type) {\n\tdefault:\n\t\treturn fmt.Errorf(\"cannot splice snapshot containing %T: %w\", repl, ErrInvalidArgument)\n\tcase *dirnode:\n\t\tdn.Lock()\n\t\tdefer dn.Unlock()\n\t\tdn.inodes = repl.inodes\n\t\tfor name, child := range dn.inodes {\n\t\t\tchild.SetParent(dn, name)\n\t\t}\n\t\tdn.setTreeFS(dn.fs)\n\tcase *filenode:\n\t\tdn.parent.Lock()\n\t\tdefer dn.parent.Unlock()\n\t\tremoving, err := dn.parent.Child(dn.fileinfo.name, nil)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot use Splice to replace a top-level directory with a file: %w\", ErrInvalidOperation)\n\t\t} else if removing != dn {\n\t\t\t// If ../thisdirname is not this dirnode, it\n\t\t\t// must be an inode that wraps a dirnode, like\n\t\t\t// a collectionFileSystem or deferrednode.\n\t\t\tif deferred, ok := removing.(*deferrednode); ok {\n\t\t\t\t// More useful to report the type of\n\t\t\t\t// the wrapped node rather than just\n\t\t\t\t// *deferrednode. (We know the real\n\t\t\t\t// inode is already loaded because dn\n\t\t\t\t// is inside it.)\n\t\t\t\tremoving = deferred.realinode()\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"cannot use Splice to attach a file at top level of %T: %w\", removing, ErrInvalidOperation)\n\t\t}\n\t\tdn.Lock()\n\t\tdefer dn.Unlock()\n\t\t_, err = dn.parent.Child(dn.fileinfo.name, func(inode) (inode, error) { return repl, nil })\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error replacing filenode: dn.parent.Child(): %w\", err)\n\t\t}\n\t\trepl.setFS(dn.fs)\n\t}\n\treturn nil\n}\n\nfunc (dn *dirnode) setTreeFS(fs *collectionFileSystem) {\n\tdn.fs = fs\n\tfor _, child := range dn.inodes {\n\t\tswitch child := child.(type) {\n\t\tcase *dirnode:\n\t\t\tchild.setTreeFS(fs)\n\t\tcase *filenode:\n\t\t\tchild.setFS(fs)\n\t\t}\n\t}\n}\n\n// walkSegments visits all file data in the tree beneath dn, calling\n// fn on each segment and replacing it with fn's return value.\n//\n// caller must have lock.\nfunc (dn *dirnode) walkSegments(fn func(segment) segment) {\n\t// Visit all segments in files, then traverse subdirectories.\n\t// This way planRepack will tend to repack siblings together.\n\tnames := dn.sortedNames()\n\tfor _, name := range names {\n\t\tchild := dn.inodes[name]\n\t\tchild.Lock()\n\t\tif child, ok := child.(*filenode); ok {\n\t\t\tfor i, seg := range child.segments {\n\t\t\t\tchild.segments[i] = fn(seg)\n\t\t\t}\n\t\t}\n\t\tchild.Unlock()\n\t}\n\tfor _, name := range names {\n\t\tchild := dn.inodes[name]\n\t\tchild.Lock()\n\t\tif child, ok := child.(*dirnode); ok {\n\t\t\tchild.walkSegments(fn)\n\t\t}\n\t\tchild.Unlock()\n\t}\n}\n\ntype segment interface {\n\tio.ReaderAt\n\tLen() int\n\t// Return a new segment with a subsection of the data from this\n\t// one. length<0 means length=Len()-off.\n\tSlice(off int, length int) segment\n\tmemorySize() int64\n}\n\ntype memSegment struct {\n\tbuf []byte\n\t// If flushing is not nil and not ready/closed, then a) buf is\n\t// being shared by a pruneMemSegments goroutine, and must be\n\t// copied on write; and b) the flushing channel will close\n\t// when the goroutine finishes, whether it succeeds or not.\n\tflushing <-chan struct{}\n}\n\nfunc (me *memSegment) flushingUnfinished() bool {\n\tif me.flushing == nil {\n\t\treturn false\n\t}\n\tselect {\n\tcase <-me.flushing:\n\t\tme.flushing = nil\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}\n\nfunc (me *memSegment) Len() int {\n\treturn len(me.buf)\n}\n\nfunc (me *memSegment) Slice(off, length int) segment {\n\tif length < 0 {\n\t\tlength = len(me.buf) - off\n\t}\n\tbuf := make([]byte, length)\n\tcopy(buf, me.buf[off:])\n\treturn &memSegment{buf: buf}\n}\n\nfunc (me *memSegment) Truncate(n int) {\n\tif n > cap(me.buf) || (me.flushing != nil && n > len(me.buf)) {\n\t\tnewsize := 1024\n\t\tfor newsize < n {\n\t\t\tnewsize = newsize << 2\n\t\t}\n\t\tnewbuf := make([]byte, n, newsize)\n\t\tcopy(newbuf, me.buf)\n\t\tme.buf, me.flushing = newbuf, nil\n\t} else {\n\t\t// reclaim existing capacity, and zero reclaimed part\n\t\toldlen := len(me.buf)\n\t\tme.buf = me.buf[:n]\n\t\tfor i := oldlen; i < n; i++ {\n\t\t\tme.buf[i] = 0\n\t\t}\n\t}\n}\n\nfunc (me *memSegment) WriteAt(p []byte, off int) {\n\tif off+len(p) > len(me.buf) {\n\t\tpanic(\"overflowed segment\")\n\t}\n\tif me.flushing != nil {\n\t\tme.buf, me.flushing = append([]byte(nil), me.buf...), nil\n\t}\n\tcopy(me.buf[off:], p)\n}\n\nfunc (me *memSegment) ReadAt(p []byte, off int64) (n int, err error) {\n\tif off > int64(me.Len()) {\n\t\terr = io.EOF\n\t\treturn\n\t}\n\tn = copy(p, me.buf[int(off):])\n\tif n < len(p) {\n\t\terr = io.EOF\n\t}\n\treturn\n}\n\nfunc (me *memSegment) memorySize() int64 {\n\treturn 64 + int64(len(me.buf))\n}\n\ntype storedSegment struct {\n\tkc      fsBackend\n\tlocator string\n\tsize    int // size of stored block (also encoded in locator)\n\toffset  int // position of segment within the stored block\n\tlength  int // bytes in this segment (offset + length <= size)\n}\n\nfunc (se storedSegment) Len() int {\n\treturn se.length\n}\n\nfunc (se storedSegment) Slice(n, size int) segment {\n\tse.offset += n\n\tse.length -= n\n\tif size >= 0 && se.length > size {\n\t\tse.length = size\n\t}\n\treturn se\n}\n\nfunc (se storedSegment) ReadAt(p []byte, off int64) (n int, err error) {\n\tif off > int64(se.length) {\n\t\treturn 0, io.EOF\n\t}\n\tmaxlen := se.length - int(off)\n\tif len(p) > maxlen {\n\t\tp = p[:maxlen]\n\t\tn, err = se.kc.ReadAt(se.locator, p, int(off)+se.offset)\n\t\tif err == nil {\n\t\t\terr = io.EOF\n\t\t}\n\t\treturn\n\t}\n\treturn se.kc.ReadAt(se.locator, p, int(off)+se.offset)\n}\n\nfunc (se storedSegment) memorySize() int64 {\n\treturn 64 + int64(len(se.locator))\n}\n\nfunc (se storedSegment) blockSegment() BlockSegment {\n\treturn BlockSegment{\n\t\tLocator: se.locator,\n\t\tOffset:  se.offset,\n\t\tLength:  se.length,\n\t}\n}\n\nfunc canonicalName(name string) string {\n\tname = path.Clean(\"/\" + name)\n\tif name == \"/\" || name == \"./\" {\n\t\tname = \".\"\n\t} else if strings.HasPrefix(name, \"/\") {\n\t\tname = \".\" + name\n\t}\n\treturn name\n}\n\nvar manifestEscapeSeq = regexp.MustCompile(`\\\\([0-7]{3}|\\\\)`)\n\nfunc manifestUnescapeFunc(seq string) string {\n\tif seq == `\\\\` {\n\t\treturn `\\`\n\t}\n\ti, err := strconv.ParseUint(seq[1:], 8, 8)\n\tif err != nil {\n\t\t// Invalid escape sequence: can't unescape.\n\t\treturn seq\n\t}\n\treturn string([]byte{byte(i)})\n}\n\nfunc manifestUnescape(s string) string {\n\treturn manifestEscapeSeq.ReplaceAllStringFunc(s, manifestUnescapeFunc)\n}\n\nvar manifestEscapedChar = regexp.MustCompile(`[\\000-\\040:\\s\\\\]`)\n\nfunc manifestEscapeFunc(seq string) string {\n\treturn fmt.Sprintf(\"\\\\%03o\", byte(seq[0]))\n}\n\nfunc manifestEscape(s string) string {\n\treturn manifestEscapedChar.ReplaceAllStringFunc(s, manifestEscapeFunc)\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_collection_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/md5\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"runtime/pprof\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&CollectionFSSuite{})\n\ntype keepClientStub struct {\n\tblocks      map[string][]byte\n\trefreshable map[string]bool\n\tcached      map[string]bool\n\treads       []string                   // locators from ReadAt() calls\n\tonWrite     func(bufcopy []byte) error // called from WriteBlock, before acquiring lock\n\tauthToken   string                     // client's auth token (used for signing locators)\n\tsigkey      string                     // blob signing key\n\tsigttl      time.Duration              // blob signing ttl\n\tsync.RWMutex\n}\n\nvar errStub404 = errors.New(\"404 block not found\")\n\nfunc (kcs *keepClientStub) ReadAt(locator string, p []byte, off int) (int, error) {\n\tkcs.Lock()\n\tkcs.reads = append(kcs.reads, locator)\n\tkcs.Unlock()\n\tkcs.RLock()\n\tdefer kcs.RUnlock()\n\tif err := VerifySignature(locator, kcs.authToken, kcs.sigttl, []byte(kcs.sigkey)); err != nil {\n\t\treturn 0, err\n\t}\n\tbuf := kcs.blocks[locator[:32]]\n\tif buf == nil {\n\t\treturn 0, errStub404\n\t}\n\treturn copy(p, buf[off:]), nil\n}\n\nfunc (kcs *keepClientStub) BlockRead(_ context.Context, opts BlockReadOptions) (int, error) {\n\tkcs.Lock()\n\tkcs.reads = append(kcs.reads, opts.Locator)\n\tkcs.Unlock()\n\tkcs.RLock()\n\tdefer kcs.RUnlock()\n\tif opts.CheckCacheOnly {\n\t\tif kcs.cached[opts.Locator[:32]] {\n\t\t\treturn 0, nil\n\t\t} else {\n\t\t\treturn 0, ErrNotCached\n\t\t}\n\t}\n\tif err := VerifySignature(opts.Locator, kcs.authToken, kcs.sigttl, []byte(kcs.sigkey)); err != nil {\n\t\treturn 0, err\n\t}\n\tbuf := kcs.blocks[opts.Locator[:32]]\n\tif buf == nil {\n\t\treturn 0, errStub404\n\t}\n\tn, err := io.Copy(opts.WriteTo, bytes.NewReader(buf))\n\treturn int(n), err\n}\n\nfunc (kcs *keepClientStub) BlockWrite(_ context.Context, opts BlockWriteOptions) (BlockWriteResponse, error) {\n\tvar buf []byte\n\tif opts.Data == nil {\n\t\tbuf = make([]byte, opts.DataSize)\n\t\t_, err := io.ReadFull(opts.Reader, buf)\n\t\tif err != nil {\n\t\t\treturn BlockWriteResponse{}, err\n\t\t}\n\t} else {\n\t\tbuf = append([]byte(nil), opts.Data...)\n\t}\n\tlocator := SignLocator(fmt.Sprintf(\"%x+%d\", md5.Sum(buf), len(buf)), kcs.authToken, time.Now().Add(kcs.sigttl), kcs.sigttl, []byte(kcs.sigkey))\n\tif kcs.onWrite != nil {\n\t\terr := kcs.onWrite(buf)\n\t\tif err != nil {\n\t\t\treturn BlockWriteResponse{}, err\n\t\t}\n\t}\n\tfor _, sc := range opts.StorageClasses {\n\t\tif sc != \"default\" {\n\t\t\treturn BlockWriteResponse{}, fmt.Errorf(\"stub does not write storage class %q\", sc)\n\t\t}\n\t}\n\tkcs.Lock()\n\tdefer kcs.Unlock()\n\tkcs.blocks[locator[:32]] = buf\n\treturn BlockWriteResponse{Locator: locator, Replicas: 1}, nil\n}\n\nvar reRemoteSignature = regexp.MustCompile(`\\+[AR][^+]*`)\n\nfunc (kcs *keepClientStub) LocalLocator(locator string) (string, error) {\n\tif strings.Contains(locator, \"+A\") {\n\t\treturn locator, nil\n\t}\n\tkcs.Lock()\n\tdefer kcs.Unlock()\n\tif strings.Contains(locator, \"+R\") {\n\t\tif len(locator) < 32 {\n\t\t\treturn \"\", fmt.Errorf(\"bad locator: %q\", locator)\n\t\t}\n\t\tif _, ok := kcs.blocks[locator[:32]]; !ok && !kcs.refreshable[locator[:32]] {\n\t\t\treturn \"\", fmt.Errorf(\"kcs.refreshable[%q]==false\", locator)\n\t\t}\n\t}\n\tlocator = reRemoteSignature.ReplaceAllLiteralString(locator, \"\")\n\tlocator = SignLocator(locator, kcs.authToken, time.Now().Add(kcs.sigttl), kcs.sigttl, []byte(kcs.sigkey))\n\treturn locator, nil\n}\n\ntype CollectionFSSuite struct {\n\tclient *Client\n\tcoll   Collection\n\tfs     CollectionFileSystem\n\tkc     *keepClientStub\n}\n\nfunc (s *CollectionFSSuite) SetUpTest(c *check.C) {\n\ts.client = NewClientFromEnv()\n\ts.client.AuthToken = fixtureActiveToken\n\terr := s.client.RequestAndDecode(&s.coll, \"GET\", \"arvados/v1/collections/\"+fixtureFooAndBarFilesInDirUUID, nil, nil)\n\tc.Assert(err, check.IsNil)\n\ts.kc = &keepClientStub{\n\t\tblocks: map[string][]byte{\n\t\t\t\"3858f62230ac3c915f300c664312c63f\": []byte(\"foobar\"),\n\t\t},\n\t\tsigkey:    fixtureBlobSigningKey,\n\t\tsigttl:    fixtureBlobSigningTTL,\n\t\tauthToken: fixtureActiveToken,\n\t}\n\ts.fs, err = s.coll.FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (s *CollectionFSSuite) TestSyncNonCanonicalManifest(c *check.C) {\n\tvar coll Collection\n\terr := s.client.RequestAndDecode(&coll, \"GET\", \"arvados/v1/collections/\"+fixtureFooAndBarFilesInDirUUID, nil, nil)\n\tc.Assert(err, check.IsNil)\n\tmtxt := strings.Replace(coll.ManifestText, \"3:3:bar 0:3:foo\", \"0:3:foo 3:3:bar\", -1)\n\tc.Assert(mtxt, check.Not(check.Equals), coll.ManifestText)\n\terr = s.client.RequestAndDecode(&coll, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"collection\": map[string]interface{}{\n\t\t\t\"manifest_text\": mtxt}})\n\tc.Assert(err, check.IsNil)\n\t// In order for the rest of the test to work as intended, the API server\n\t// needs to retain the file ordering we set manually. We check that here.\n\t// We can't check `mtxt == coll.ManifestText` because the API server\n\t// might've returned new block signatures if the GET and POST happened in\n\t// different seconds.\n\texpectPattern := `\\./dir1 \\S+ 0:3:foo 3:3:bar\\n`\n\tc.Assert(coll.ManifestText, check.Matches, expectPattern)\n\n\tfs, err := coll.FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\terr = fs.Sync()\n\tc.Check(err, check.IsNil)\n\n\t// fs had no local changes, so Sync should not have saved\n\t// anything back to the API/database. (If it did, we would see\n\t// the manifest rewritten in canonical order.)\n\tvar saved Collection\n\terr = s.client.RequestAndDecode(&saved, \"GET\", \"arvados/v1/collections/\"+coll.UUID, nil, nil)\n\tc.Assert(err, check.IsNil)\n\tc.Check(saved.ManifestText, check.Matches, expectPattern)\n}\n\nfunc (s *CollectionFSSuite) TestHttpFileSystemInterface(c *check.C) {\n\t_, ok := s.fs.(http.FileSystem)\n\tc.Check(ok, check.Equals, true)\n}\n\nfunc (s *CollectionFSSuite) TestUnattainableStorageClasses(c *check.C) {\n\tfs, err := (&Collection{\n\t\tStorageClassesDesired: []string{\"unobtainium\"},\n\t}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\n\tf, err := fs.OpenFile(\"/foo\", os.O_CREATE|os.O_WRONLY, 0777)\n\tc.Assert(err, check.IsNil)\n\t_, err = f.Write([]byte(\"food\"))\n\tc.Assert(err, check.IsNil)\n\terr = f.Close()\n\tc.Assert(err, check.IsNil)\n\t_, err = fs.MarshalManifest(\".\")\n\tc.Assert(err, check.ErrorMatches, `.*stub does not write storage class \\\"unobtainium\\\"`)\n}\n\nfunc (s *CollectionFSSuite) TestColonInFilename(c *check.C) {\n\tfs, err := (&Collection{\n\t\tManifestText: \"./foo:foo 3858f62230ac3c915f300c664312c63f+3 0:3:bar:bar\\n\",\n\t}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\n\tf, err := fs.Open(\"/foo:foo\")\n\tc.Assert(err, check.IsNil)\n\n\tfis, err := f.Readdir(0)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(fis), check.Equals, 1)\n\tc.Check(fis[0].Name(), check.Equals, \"bar:bar\")\n}\n\nfunc (s *CollectionFSSuite) TestReaddirFull(c *check.C) {\n\tf, err := s.fs.Open(\"/dir1\")\n\tc.Assert(err, check.IsNil)\n\n\tst, err := f.Stat()\n\tc.Assert(err, check.IsNil)\n\tc.Check(st.Size(), check.Equals, int64(2))\n\tc.Check(st.IsDir(), check.Equals, true)\n\n\tfis, err := f.Readdir(0)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(fis), check.Equals, 2)\n\tif len(fis) > 0 {\n\t\tc.Check(fis[0].Size(), check.Equals, int64(3))\n\t}\n}\n\nfunc (s *CollectionFSSuite) TestReaddirLimited(c *check.C) {\n\tf, err := s.fs.Open(\"./dir1\")\n\tc.Assert(err, check.IsNil)\n\n\tfis, err := f.Readdir(1)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(fis), check.Equals, 1)\n\tif len(fis) > 0 {\n\t\tc.Check(fis[0].Size(), check.Equals, int64(3))\n\t}\n\n\tfis, err = f.Readdir(1)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(fis), check.Equals, 1)\n\tif len(fis) > 0 {\n\t\tc.Check(fis[0].Size(), check.Equals, int64(3))\n\t}\n\n\tfis, err = f.Readdir(1)\n\tc.Check(len(fis), check.Equals, 0)\n\tc.Check(err, check.NotNil)\n\tc.Check(err, check.Equals, io.EOF)\n\n\tf, err = s.fs.Open(\"dir1\")\n\tc.Assert(err, check.IsNil)\n\tfis, err = f.Readdir(1)\n\tc.Check(len(fis), check.Equals, 1)\n\tc.Assert(err, check.IsNil)\n\tfis, err = f.Readdir(2)\n\tc.Check(len(fis), check.Equals, 1)\n\tc.Assert(err, check.IsNil)\n\tfis, err = f.Readdir(2)\n\tc.Check(len(fis), check.Equals, 0)\n\tc.Assert(err, check.Equals, io.EOF)\n}\n\nfunc (s *CollectionFSSuite) TestPathMunge(c *check.C) {\n\tfor _, path := range []string{\".\", \"/\", \"./\", \"///\", \"/../\", \"/./..\"} {\n\t\tf, err := s.fs.Open(path)\n\t\tc.Assert(err, check.IsNil)\n\n\t\tst, err := f.Stat()\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(st.Size(), check.Equals, int64(1))\n\t\tc.Check(st.IsDir(), check.Equals, true)\n\t}\n\tfor _, path := range []string{\"/dir1\", \"dir1\", \"./dir1\", \"///dir1//.//\", \"../dir1/../dir1/\"} {\n\t\tc.Logf(\"%q\", path)\n\t\tf, err := s.fs.Open(path)\n\t\tc.Assert(err, check.IsNil)\n\n\t\tst, err := f.Stat()\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(st.Size(), check.Equals, int64(2))\n\t\tc.Check(st.IsDir(), check.Equals, true)\n\t}\n}\n\nfunc (s *CollectionFSSuite) TestNotExist(c *check.C) {\n\tfor _, path := range []string{\"/no\", \"no\", \"./no\", \"n/o\", \"/n/o\"} {\n\t\tf, err := s.fs.Open(path)\n\t\tc.Assert(f, check.IsNil)\n\t\tc.Assert(err, check.NotNil)\n\t\tc.Assert(os.IsNotExist(err), check.Equals, true)\n\t}\n}\n\nfunc (s *CollectionFSSuite) TestReadOnlyFile(c *check.C) {\n\tf, err := s.fs.OpenFile(\"/dir1/foo\", os.O_RDONLY, 0)\n\tc.Assert(err, check.IsNil)\n\tst, err := f.Stat()\n\tc.Assert(err, check.IsNil)\n\tc.Check(st.Size(), check.Equals, int64(3))\n\tn, err := f.Write([]byte(\"bar\"))\n\tc.Check(n, check.Equals, 0)\n\tc.Check(err, check.Equals, ErrReadOnlyFile)\n}\n\nfunc (s *CollectionFSSuite) TestCreateFile(c *check.C) {\n\tf, err := s.fs.OpenFile(\"/new-file 1\", os.O_RDWR|os.O_CREATE, 0)\n\tc.Assert(err, check.IsNil)\n\tst, err := f.Stat()\n\tc.Assert(err, check.IsNil)\n\tc.Check(st.Size(), check.Equals, int64(0))\n\n\tn, err := f.Write([]byte(\"bar\"))\n\tc.Check(n, check.Equals, 3)\n\tc.Check(err, check.IsNil)\n\n\tc.Check(f.Close(), check.IsNil)\n\n\tf, err = s.fs.OpenFile(\"/new-file 1\", os.O_RDWR|os.O_CREATE|os.O_EXCL, 0)\n\tc.Check(f, check.IsNil)\n\tc.Assert(err, check.NotNil)\n\n\tf, err = s.fs.OpenFile(\"/new-file 1\", os.O_RDWR, 0)\n\tc.Assert(err, check.IsNil)\n\tst, err = f.Stat()\n\tc.Assert(err, check.IsNil)\n\tc.Check(st.Size(), check.Equals, int64(3))\n\n\tc.Check(f.Close(), check.IsNil)\n\n\tm, err := s.fs.MarshalManifest(\".\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(m, check.Matches, `. 37b51d194a7513e45b56f6524f2d51f2\\+3\\+\\S+ 0:3:new-file\\\\0401\\n./dir1 .* 3:3:bar 0:3:foo\\n`)\n}\n\nfunc (s *CollectionFSSuite) TestReadWriteFile(c *check.C) {\n\tmaxBlockSize = 8\n\tdefer func() { maxBlockSize = 1 << 26 }()\n\n\tf, err := s.fs.OpenFile(\"/dir1/foo\", os.O_RDWR, 0)\n\tc.Assert(err, check.IsNil)\n\tdefer f.Close()\n\tst, err := f.Stat()\n\tc.Assert(err, check.IsNil)\n\tc.Check(st.Size(), check.Equals, int64(3))\n\n\tf2, err := s.fs.OpenFile(\"/dir1/foo\", os.O_RDWR, 0)\n\tc.Assert(err, check.IsNil)\n\tdefer f2.Close()\n\n\tbuf := make([]byte, 64)\n\tn, err := f.Read(buf)\n\tc.Check(n, check.Equals, 3)\n\tc.Check(err, check.Equals, io.EOF)\n\tc.Check(string(buf[:3]), check.DeepEquals, \"foo\")\n\n\tpos, err := f.Seek(-2, io.SeekCurrent)\n\tc.Check(pos, check.Equals, int64(1))\n\tc.Check(err, check.IsNil)\n\n\t// Split a storedExtent in two, and insert a memExtent\n\tn, err = f.Write([]byte(\"*\"))\n\tc.Check(n, check.Equals, 1)\n\tc.Check(err, check.IsNil)\n\n\tpos, err = f.Seek(0, io.SeekCurrent)\n\tc.Check(pos, check.Equals, int64(2))\n\tc.Check(err, check.IsNil)\n\n\tpos, err = f.Seek(0, io.SeekStart)\n\tc.Check(pos, check.Equals, int64(0))\n\tc.Check(err, check.IsNil)\n\n\trbuf, err := ioutil.ReadAll(f)\n\tc.Check(len(rbuf), check.Equals, 3)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(rbuf), check.Equals, \"f*o\")\n\n\t// Write multiple blocks in one call\n\tf.Seek(1, io.SeekStart)\n\tn, err = f.Write([]byte(\"0123456789abcdefg\"))\n\tc.Check(n, check.Equals, 17)\n\tc.Check(err, check.IsNil)\n\tpos, err = f.Seek(0, io.SeekCurrent)\n\tc.Check(pos, check.Equals, int64(18))\n\tc.Check(err, check.IsNil)\n\tpos, err = f.Seek(-18, io.SeekCurrent)\n\tc.Check(pos, check.Equals, int64(0))\n\tc.Check(err, check.IsNil)\n\tn, err = io.ReadFull(f, buf)\n\tc.Check(n, check.Equals, 18)\n\tc.Check(err, check.Equals, io.ErrUnexpectedEOF)\n\tc.Check(string(buf[:n]), check.Equals, \"f0123456789abcdefg\")\n\n\tbuf2, err := ioutil.ReadAll(f2)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(buf2), check.Equals, \"f0123456789abcdefg\")\n\n\t// truncate to current size\n\terr = f.Truncate(18)\n\tc.Check(err, check.IsNil)\n\tf2.Seek(0, io.SeekStart)\n\tbuf2, err = ioutil.ReadAll(f2)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(buf2), check.Equals, \"f0123456789abcdefg\")\n\n\t// shrink to zero some data\n\tf.Truncate(15)\n\tf2.Seek(0, io.SeekStart)\n\tbuf2, err = ioutil.ReadAll(f2)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(buf2), check.Equals, \"f0123456789abcd\")\n\n\t// grow to partial block/extent\n\tf.Truncate(20)\n\tf2.Seek(0, io.SeekStart)\n\tbuf2, err = ioutil.ReadAll(f2)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(buf2), check.Equals, \"f0123456789abcd\\x00\\x00\\x00\\x00\\x00\")\n\n\tf.Truncate(0)\n\tf2.Seek(0, io.SeekStart)\n\tf2.Write([]byte(\"12345678abcdefghijkl\"))\n\n\t// grow to block/extent boundary\n\tf.Truncate(64)\n\tf2.Seek(0, io.SeekStart)\n\tbuf2, err = ioutil.ReadAll(f2)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(buf2), check.Equals, 64)\n\tc.Check(len(f.(*filehandle).inode.(*filenode).segments), check.Equals, 8)\n\n\t// shrink to block/extent boundary\n\terr = f.Truncate(32)\n\tc.Check(err, check.IsNil)\n\tf2.Seek(0, io.SeekStart)\n\tbuf2, err = ioutil.ReadAll(f2)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(buf2), check.Equals, 32)\n\tc.Check(len(f.(*filehandle).inode.(*filenode).segments), check.Equals, 4)\n\n\t// shrink to partial block/extent\n\terr = f.Truncate(15)\n\tc.Check(err, check.IsNil)\n\tf2.Seek(0, io.SeekStart)\n\tbuf2, err = ioutil.ReadAll(f2)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(buf2), check.Equals, \"12345678abcdefg\")\n\tc.Check(len(f.(*filehandle).inode.(*filenode).segments), check.Equals, 2)\n\n\t// Force flush to ensure the block \"12345678\" gets stored, so\n\t// we know what to expect in the final manifest below.\n\t_, err = s.fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\n\t// Truncate to size=3 while f2's ptr is at 15\n\terr = f.Truncate(3)\n\tc.Check(err, check.IsNil)\n\tbuf2, err = ioutil.ReadAll(f2)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(buf2), check.Equals, \"\")\n\tf2.Seek(0, io.SeekStart)\n\tbuf2, err = ioutil.ReadAll(f2)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(buf2), check.Equals, \"123\")\n\tc.Check(len(f.(*filehandle).inode.(*filenode).segments), check.Equals, 1)\n\n\tm, err := s.fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\tm = regexp.MustCompile(`\\+A[^\\+ ]+`).ReplaceAllLiteralString(m, \"\")\n\tc.Check(m, check.Equals, \"./dir1 3858f62230ac3c915f300c664312c63f+6 25d55ad283aa400af464c76d713c07ad+8 3:3:bar 6:3:foo\\n\")\n\tc.Check(s.fs.Size(), check.Equals, int64(6))\n}\n\nfunc (s *CollectionFSSuite) TestSeekSparse(c *check.C) {\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\tf, err := fs.OpenFile(\"test\", os.O_CREATE|os.O_RDWR, 0755)\n\tc.Assert(err, check.IsNil)\n\tdefer f.Close()\n\n\tcheckSize := func(size int64) {\n\t\tfi, err := f.Stat()\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(fi.Size(), check.Equals, size)\n\n\t\tf, err := fs.OpenFile(\"test\", os.O_CREATE|os.O_RDWR, 0755)\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer f.Close()\n\t\tfi, err = f.Stat()\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(fi.Size(), check.Equals, size)\n\t\tpos, err := f.Seek(0, io.SeekEnd)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(pos, check.Equals, size)\n\t}\n\n\tf.Seek(2, io.SeekEnd)\n\tcheckSize(0)\n\tf.Write([]byte{1})\n\tcheckSize(3)\n\n\tf.Seek(2, io.SeekCurrent)\n\tcheckSize(3)\n\tf.Write([]byte{})\n\tcheckSize(5)\n\n\tf.Seek(8, io.SeekStart)\n\tcheckSize(5)\n\tn, err := f.Read(make([]byte, 1))\n\tc.Check(n, check.Equals, 0)\n\tc.Check(err, check.Equals, io.EOF)\n\tcheckSize(5)\n\tf.Write([]byte{1, 2, 3})\n\tcheckSize(11)\n}\n\nfunc (s *CollectionFSSuite) TestMarshalCopiesRemoteBlocks(c *check.C) {\n\tfoo := \"foo\"\n\tbar := \"bar\"\n\thash := map[string]string{\n\t\tfoo: fmt.Sprintf(\"%x\", md5.Sum([]byte(foo))),\n\t\tbar: fmt.Sprintf(\"%x\", md5.Sum([]byte(bar))),\n\t}\n\n\tfs, err := (&Collection{\n\t\tManifestText: \". \" + hash[foo] + \"+3+Rzaaaa-foo@bab \" + hash[bar] + \"+3+A12345@ffffff 0:2:fo.txt 2:4:obar.txt\\n\",\n\t}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\tmanifest, err := fs.MarshalManifest(\".\")\n\tc.Check(manifest, check.Equals, \"\")\n\tc.Check(err, check.NotNil)\n\n\ts.kc.refreshable = map[string]bool{hash[bar]: true}\n\n\tfor _, sigIn := range []string{\"Rzaaaa-foo@bab\", \"A12345@abcde\"} {\n\t\tfs, err = (&Collection{\n\t\t\tManifestText: \". \" + hash[foo] + \"+3+A12345@fffff \" + hash[bar] + \"+3+\" + sigIn + \" 0:2:fo.txt 2:4:obar.txt\\n\",\n\t\t}).FileSystem(s.client, s.kc)\n\t\tc.Assert(err, check.IsNil)\n\t\tmanifest, err := fs.MarshalManifest(\".\")\n\t\tc.Check(err, check.IsNil)\n\t\t// Both blocks should now have +A signatures.\n\t\tc.Check(manifest, check.Matches, `.*\\+A.* .*\\+A.*\\n`)\n\t\tc.Check(manifest, check.Not(check.Matches), `.*\\+R.*\\n`)\n\t}\n}\n\nfunc (s *CollectionFSSuite) TestMarshalSmallBlocks(c *check.C) {\n\tmaxBlockSize = 8\n\tdefer func() { maxBlockSize = 1 << 26 }()\n\n\tvar err error\n\ts.fs, err = (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\tfor _, name := range []string{\"foo\", \"bar\", \"baz\"} {\n\t\tf, err := s.fs.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0)\n\t\tc.Assert(err, check.IsNil)\n\t\tf.Write([]byte(name))\n\t\tf.Close()\n\t}\n\n\tm, err := s.fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\tm = regexp.MustCompile(`\\+A[^\\+ ]+`).ReplaceAllLiteralString(m, \"\")\n\tc.Check(m, check.Equals, \". c3c23db5285662ef7172373df0003206+6 acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar 3:3:baz 6:3:foo\\n\")\n}\n\nfunc (s *CollectionFSSuite) TestMkdir(c *check.C) {\n\terr := s.fs.Mkdir(\"foo/bar\", 0755)\n\tc.Check(err, check.Equals, os.ErrNotExist)\n\n\tf, err := s.fs.OpenFile(\"foo/bar\", os.O_CREATE, 0)\n\tc.Check(err, check.Equals, os.ErrNotExist)\n\n\terr = s.fs.Mkdir(\"foo\", 0755)\n\tc.Check(err, check.IsNil)\n\n\tf, err = s.fs.OpenFile(\"foo/bar\", os.O_CREATE|os.O_WRONLY, 0)\n\tc.Check(err, check.IsNil)\n\tif err == nil {\n\t\tdefer f.Close()\n\t\tf.Write([]byte(\"foo\"))\n\t}\n\n\t// mkdir fails if a file already exists with that name\n\terr = s.fs.Mkdir(\"foo/bar\", 0755)\n\tc.Check(err, check.NotNil)\n\n\terr = s.fs.Remove(\"foo/bar\")\n\tc.Check(err, check.IsNil)\n\n\t// mkdir succeeds after the file is deleted\n\terr = s.fs.Mkdir(\"foo/bar\", 0755)\n\tc.Check(err, check.IsNil)\n\n\t// creating a file in a nonexistent subdir should still fail\n\tf, err = s.fs.OpenFile(\"foo/bar/baz/foo.txt\", os.O_CREATE|os.O_WRONLY, 0)\n\tc.Check(err, check.Equals, os.ErrNotExist)\n\n\tf, err = s.fs.OpenFile(\"foo/bar/foo.txt\", os.O_CREATE|os.O_WRONLY, 0)\n\tc.Check(err, check.IsNil)\n\tif err == nil {\n\t\tdefer f.Close()\n\t\tf.Write([]byte(\"foo\"))\n\t}\n\n\t// creating foo/bar as a regular file should fail\n\tf, err = s.fs.OpenFile(\"foo/bar\", os.O_CREATE|os.O_EXCL, 0)\n\tc.Check(err, check.NotNil)\n\n\t// creating foo/bar as a directory should fail\n\tf, err = s.fs.OpenFile(\"foo/bar\", os.O_CREATE|os.O_EXCL, os.ModeDir)\n\tc.Check(err, check.NotNil)\n\terr = s.fs.Mkdir(\"foo/bar\", 0755)\n\tc.Check(err, check.NotNil)\n\n\tm, err := s.fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\tm = regexp.MustCompile(`\\+A[^\\+ ]+`).ReplaceAllLiteralString(m, \"\")\n\tc.Check(m, check.Equals, \"./dir1 3858f62230ac3c915f300c664312c63f+6 3:3:bar 0:3:foo\\n./foo/bar acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\\n\")\n}\n\nfunc (s *CollectionFSSuite) TestConcurrentWriters(c *check.C) {\n\tif testing.Short() {\n\t\tc.Skip(\"slow\")\n\t}\n\n\tmaxBlockSize = 8\n\tdefer func() { maxBlockSize = 1 << 26 }()\n\n\tvar wg sync.WaitGroup\n\tfor n := 0; n < 128; n++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tf, err := s.fs.OpenFile(\"/dir1/foo\", os.O_RDWR, 0)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tdefer f.Close()\n\t\t\tfor i := 0; i < 1024; i++ {\n\t\t\t\tr := rand.Uint32()\n\t\t\t\tswitch {\n\t\t\t\tcase r%11 == 0:\n\t\t\t\t\t_, err := s.fs.MarshalManifest(\".\")\n\t\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\tcase r&3 == 0:\n\t\t\t\t\tf.Truncate(int64(rand.Intn(64)))\n\t\t\t\tcase r&3 == 1:\n\t\t\t\t\tf.Seek(int64(rand.Intn(64)), io.SeekStart)\n\t\t\t\tcase r&3 == 2:\n\t\t\t\t\t_, err := f.Write([]byte(\"beep boop\"))\n\t\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\tcase r&3 == 3:\n\t\t\t\t\t_, err := ioutil.ReadAll(f)\n\t\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\n\tf, err := s.fs.OpenFile(\"/dir1/foo\", os.O_RDWR, 0)\n\tc.Assert(err, check.IsNil)\n\tdefer f.Close()\n\tbuf, err := ioutil.ReadAll(f)\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"after lots of random r/w/seek/trunc, buf is %q\", buf)\n}\n\nfunc (s *CollectionFSSuite) TestRandomWrites(c *check.C) {\n\tmaxBlockSize = 40\n\tdefer func() { maxBlockSize = 1 << 26 }()\n\n\tvar err error\n\ts.fs, err = (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\n\tconst nfiles = 256\n\tconst ngoroutines = 256\n\n\tvar wg sync.WaitGroup\n\tfor n := 0; n < ngoroutines; n++ {\n\t\twg.Add(1)\n\t\tgo func(n int) {\n\t\t\tdefer wg.Done()\n\t\t\texpect := make([]byte, 0, 64)\n\t\t\twbytes := []byte(\"there's no simple explanation for anything important that any of us do\")\n\t\t\tf, err := s.fs.OpenFile(fmt.Sprintf(\"random-%d\", n), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tdefer f.Close()\n\t\t\tfor i := 0; i < nfiles; i++ {\n\t\t\t\ttrunc := rand.Intn(65)\n\t\t\t\twoff := rand.Intn(trunc + 1)\n\t\t\t\twbytes = wbytes[:rand.Intn(64-woff+1)]\n\t\t\t\tfor buf, i := expect[:cap(expect)], len(expect); i < trunc; i++ {\n\t\t\t\t\tbuf[i] = 0\n\t\t\t\t}\n\t\t\t\texpect = expect[:trunc]\n\t\t\t\tif trunc < woff+len(wbytes) {\n\t\t\t\t\texpect = expect[:woff+len(wbytes)]\n\t\t\t\t}\n\t\t\t\tcopy(expect[woff:], wbytes)\n\t\t\t\tf.Truncate(int64(trunc))\n\t\t\t\tpos, err := f.Seek(int64(woff), io.SeekStart)\n\t\t\t\tc.Check(pos, check.Equals, int64(woff))\n\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\tn, err := f.Write(wbytes)\n\t\t\t\tc.Check(n, check.Equals, len(wbytes))\n\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\tpos, err = f.Seek(0, io.SeekStart)\n\t\t\t\tc.Check(pos, check.Equals, int64(0))\n\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\tbuf, err := ioutil.ReadAll(f)\n\t\t\t\tc.Check(string(buf), check.Equals, string(expect))\n\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t}\n\t\t}(n)\n\t}\n\twg.Wait()\n\n\tfor n := 0; n < ngoroutines; n++ {\n\t\tf, err := s.fs.OpenFile(fmt.Sprintf(\"random-%d\", n), os.O_RDONLY, 0)\n\t\tc.Assert(err, check.IsNil)\n\t\tf.(*filehandle).inode.(*filenode).waitPrune()\n\t\ts.checkMemSize(c, f)\n\t\tdefer f.Close()\n\t}\n\n\troot, err := s.fs.Open(\"/\")\n\tc.Assert(err, check.IsNil)\n\tdefer root.Close()\n\tfi, err := root.Readdir(-1)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(fi), check.Equals, nfiles)\n\n\t_, err = s.fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\t// TODO: check manifest content\n}\n\nfunc (s *CollectionFSSuite) TestRemove(c *check.C) {\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\terr = fs.Mkdir(\"dir0\", 0755)\n\tc.Assert(err, check.IsNil)\n\terr = fs.Mkdir(\"dir1\", 0755)\n\tc.Assert(err, check.IsNil)\n\terr = fs.Mkdir(\"dir1/dir2\", 0755)\n\tc.Assert(err, check.IsNil)\n\terr = fs.Mkdir(\"dir1/dir3\", 0755)\n\tc.Assert(err, check.IsNil)\n\n\terr = fs.Remove(\"dir0\")\n\tc.Check(err, check.IsNil)\n\terr = fs.Remove(\"dir0\")\n\tc.Check(err, check.Equals, os.ErrNotExist)\n\n\terr = fs.Remove(\"dir1/dir2/.\")\n\tc.Check(err, check.Equals, ErrInvalidArgument)\n\terr = fs.Remove(\"dir1/dir2/..\")\n\tc.Check(err, check.Equals, ErrInvalidArgument)\n\terr = fs.Remove(\"dir1\")\n\tc.Check(err, check.Equals, ErrDirectoryNotEmpty)\n\terr = fs.Remove(\"dir1/dir2/../../../dir1\")\n\tc.Check(err, check.Equals, ErrDirectoryNotEmpty)\n\terr = fs.Remove(\"dir1/dir3/\")\n\tc.Check(err, check.IsNil)\n\terr = fs.RemoveAll(\"dir1\")\n\tc.Check(err, check.IsNil)\n\terr = fs.RemoveAll(\"dir1\")\n\tc.Check(err, check.IsNil)\n}\n\nfunc (s *CollectionFSSuite) TestRenameError(c *check.C) {\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\terr = fs.Mkdir(\"first\", 0755)\n\tc.Assert(err, check.IsNil)\n\terr = fs.Mkdir(\"first/second\", 0755)\n\tc.Assert(err, check.IsNil)\n\tf, err := fs.OpenFile(\"first/second/file\", os.O_CREATE|os.O_WRONLY, 0755)\n\tc.Assert(err, check.IsNil)\n\tf.Write([]byte{1, 2, 3, 4, 5})\n\tf.Close()\n\terr = fs.Rename(\"first\", \"first/second/third\")\n\tc.Check(err, check.Equals, ErrInvalidArgument)\n\terr = fs.Rename(\"first\", \"first/third\")\n\tc.Check(err, check.Equals, ErrInvalidArgument)\n\terr = fs.Rename(\"first/second\", \"second\")\n\tc.Check(err, check.IsNil)\n\tf, err = fs.OpenFile(\"second/file\", 0, 0)\n\tc.Assert(err, check.IsNil)\n\tdata, err := ioutil.ReadAll(f)\n\tc.Check(err, check.IsNil)\n\tc.Check(data, check.DeepEquals, []byte{1, 2, 3, 4, 5})\n}\n\nfunc (s *CollectionFSSuite) TestRenameDirectory(c *check.C) {\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\terr = fs.Mkdir(\"foo\", 0755)\n\tc.Assert(err, check.IsNil)\n\terr = fs.Mkdir(\"bar\", 0755)\n\tc.Assert(err, check.IsNil)\n\terr = fs.Rename(\"bar\", \"baz\")\n\tc.Check(err, check.IsNil)\n\terr = fs.Rename(\"foo\", \"baz\")\n\tc.Check(err, check.NotNil)\n\terr = fs.Rename(\"foo\", \"baz/\")\n\tc.Check(err, check.IsNil)\n\terr = fs.Rename(\"baz/foo\", \".\")\n\tc.Check(err, check.Equals, ErrInvalidArgument)\n\terr = fs.Rename(\"baz/foo/\", \".\")\n\tc.Check(err, check.Equals, ErrInvalidArgument)\n}\n\nfunc (s *CollectionFSSuite) TestRename(c *check.C) {\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\tconst (\n\t\touter = 16\n\t\tinner = 16\n\t)\n\tfor i := 0; i < outer; i++ {\n\t\terr = fs.Mkdir(fmt.Sprintf(\"dir%d\", i), 0755)\n\t\tc.Assert(err, check.IsNil)\n\t\tfor j := 0; j < inner; j++ {\n\t\t\terr = fs.Mkdir(fmt.Sprintf(\"dir%d/dir%d\", i, j), 0755)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tfor _, fnm := range []string{\n\t\t\t\tfmt.Sprintf(\"dir%d/file%d\", i, j),\n\t\t\t\tfmt.Sprintf(\"dir%d/dir%d/file%d\", i, j, j),\n\t\t\t} {\n\t\t\t\tf, err := fs.OpenFile(fnm, os.O_CREATE|os.O_WRONLY, 0755)\n\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t_, err = f.Write([]byte(\"beep\"))\n\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\tf.Close()\n\t\t\t}\n\t\t}\n\t}\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < outer; i++ {\n\t\tfor j := 0; j < inner; j++ {\n\t\t\twg.Add(1)\n\t\t\tgo func(i, j int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\toldname := fmt.Sprintf(\"dir%d/dir%d/file%d\", i, j, j)\n\t\t\t\tnewname := fmt.Sprintf(\"dir%d/newfile%d\", i, inner-j-1)\n\t\t\t\t_, err := fs.Open(newname)\n\t\t\t\tc.Check(err, check.Equals, os.ErrNotExist)\n\t\t\t\terr = fs.Rename(oldname, newname)\n\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\tf, err := fs.Open(newname)\n\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\tf.Close()\n\t\t\t}(i, j)\n\n\t\t\twg.Add(1)\n\t\t\tgo func(i, j int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t// oldname does not exist\n\t\t\t\terr := fs.Rename(\n\t\t\t\t\tfmt.Sprintf(\"dir%d/dir%d/missing\", i, j),\n\t\t\t\t\tfmt.Sprintf(\"dir%d/dir%d/file%d\", outer-i-1, j, j))\n\t\t\t\tc.Check(err, check.ErrorMatches, `.*does not exist`)\n\n\t\t\t\t// newname parent dir does not exist\n\t\t\t\terr = fs.Rename(\n\t\t\t\t\tfmt.Sprintf(\"dir%d/dir%d\", i, j),\n\t\t\t\t\tfmt.Sprintf(\"dir%d/missing/irrelevant\", outer-i-1))\n\t\t\t\tc.Check(err, check.ErrorMatches, `.*does not exist`)\n\n\t\t\t\t// oldname parent dir is a file\n\t\t\t\terr = fs.Rename(\n\t\t\t\t\tfmt.Sprintf(\"dir%d/file%d/patherror\", i, j),\n\t\t\t\t\tfmt.Sprintf(\"dir%d/irrelevant\", i))\n\t\t\t\tc.Check(err, check.ErrorMatches, `.*not a directory`)\n\n\t\t\t\t// newname parent dir is a file\n\t\t\t\terr = fs.Rename(\n\t\t\t\t\tfmt.Sprintf(\"dir%d/dir%d/file%d\", i, j, j),\n\t\t\t\t\tfmt.Sprintf(\"dir%d/file%d/patherror\", i, inner-j-1))\n\t\t\t\tc.Check(err, check.ErrorMatches, `.*not a directory`)\n\t\t\t}(i, j)\n\t\t}\n\t}\n\twg.Wait()\n\n\tf, err := fs.OpenFile(\"dir1/newfile3\", 0, 0)\n\tc.Assert(err, check.IsNil)\n\tc.Check(f.Size(), check.Equals, int64(4))\n\tbuf, err := ioutil.ReadAll(f)\n\tc.Check(buf, check.DeepEquals, []byte(\"beep\"))\n\tc.Check(err, check.IsNil)\n\t_, err = fs.Open(\"dir1/dir1/file1\")\n\tc.Check(err, check.Equals, os.ErrNotExist)\n}\n\nfunc (s *CollectionFSSuite) TestPersist(c *check.C) {\n\tmaxBlockSize = 1024\n\tdefer func() { maxBlockSize = 1 << 26 }()\n\n\tvar err error\n\ts.fs, err = (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\terr = s.fs.Mkdir(\"d:r\", 0755)\n\tc.Assert(err, check.IsNil)\n\n\texpect := map[string][]byte{}\n\n\tvar wg sync.WaitGroup\n\tfor _, name := range []string{\"random 1\", \"random:2\", \"random\\\\3\", \"d:r/random4\"} {\n\t\tbuf := make([]byte, 500)\n\t\trand.Read(buf)\n\t\texpect[name] = buf\n\n\t\tf, err := s.fs.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0)\n\t\tc.Assert(err, check.IsNil)\n\t\t// Note: we don't close the file until after the test\n\t\t// is done. Writes to unclosed files should persist.\n\t\tdefer f.Close()\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor i := 0; i < len(buf); i += 5 {\n\t\t\t\t_, err := f.Write(buf[i : i+5])\n\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\n\tm, err := s.fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"%q\", m)\n\n\troot, err := s.fs.Open(\"/\")\n\tc.Assert(err, check.IsNil)\n\tdefer root.Close()\n\tfi, err := root.Readdir(-1)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(fi), check.Equals, 4)\n\n\tpersisted, err := (&Collection{ManifestText: m}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\n\troot, err = persisted.Open(\"/\")\n\tc.Assert(err, check.IsNil)\n\tdefer root.Close()\n\tfi, err = root.Readdir(-1)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(fi), check.Equals, 4)\n\n\tfor name, content := range expect {\n\t\tc.Logf(\"read %q\", name)\n\t\tf, err := persisted.Open(name)\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer f.Close()\n\t\tbuf, err := ioutil.ReadAll(f)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(buf, check.DeepEquals, content)\n\t}\n}\n\nfunc (s *CollectionFSSuite) TestPersistEmptyFilesAndDirs(c *check.C) {\n\tvar err error\n\ts.fs, err = (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\tfor _, name := range []string{\"dir\", \"dir/zerodir\", \"empty\", \"not empty\", \"not empty/empty\", \"zero\", \"zero/zero\"} {\n\t\terr = s.fs.Mkdir(name, 0755)\n\t\tc.Assert(err, check.IsNil)\n\t}\n\n\texpect := map[string][]byte{\n\t\t\"0\":                nil,\n\t\t\"00\":               {},\n\t\t\"one\":              {1},\n\t\t\"dir/0\":            nil,\n\t\t\"dir/two\":          {1, 2},\n\t\t\"dir/zero\":         nil,\n\t\t\"dir/zerodir/zero\": nil,\n\t\t\"zero/zero/zero\":   nil,\n\t}\n\tfor name, data := range expect {\n\t\tf, err := s.fs.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0)\n\t\tc.Assert(err, check.IsNil)\n\t\tif data != nil {\n\t\t\t_, err := f.Write(data)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t}\n\t\tf.Close()\n\t}\n\n\tm, err := s.fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"%q\", m)\n\n\tpersisted, err := (&Collection{ManifestText: m}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\n\tfor name, data := range expect {\n\t\t_, err = persisted.Open(\"bogus-\" + name)\n\t\tc.Check(err, check.NotNil)\n\n\t\tf, err := persisted.Open(name)\n\t\tc.Assert(err, check.IsNil)\n\n\t\tif data == nil {\n\t\t\tdata = []byte{}\n\t\t}\n\t\tbuf, err := ioutil.ReadAll(f)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(buf, check.DeepEquals, data)\n\t}\n\n\texpectDir := map[string]int{\n\t\t\"empty\":           0,\n\t\t\"not empty\":       1,\n\t\t\"not empty/empty\": 0,\n\t}\n\tfor name, expectLen := range expectDir {\n\t\t_, err := persisted.Open(name + \"/bogus\")\n\t\tc.Check(err, check.NotNil)\n\n\t\td, err := persisted.Open(name)\n\t\tdefer d.Close()\n\t\tc.Check(err, check.IsNil)\n\t\tfi, err := d.Readdir(-1)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(fi, check.HasLen, expectLen)\n\t}\n}\n\nfunc (s *CollectionFSSuite) TestOpenFileFlags(c *check.C) {\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\n\tf, err := fs.OpenFile(\"missing\", os.O_WRONLY, 0)\n\tc.Check(f, check.IsNil)\n\tc.Check(err, check.ErrorMatches, `file does not exist`)\n\n\tf, err = fs.OpenFile(\"new\", os.O_CREATE|os.O_RDONLY, 0)\n\tc.Assert(err, check.IsNil)\n\tdefer f.Close()\n\tn, err := f.Write([]byte{1, 2, 3})\n\tc.Check(n, check.Equals, 0)\n\tc.Check(err, check.ErrorMatches, `read-only file`)\n\tn, err = f.Read(make([]byte, 1))\n\tc.Check(n, check.Equals, 0)\n\tc.Check(err, check.Equals, io.EOF)\n\tf, err = fs.OpenFile(\"new\", os.O_RDWR, 0)\n\tc.Assert(err, check.IsNil)\n\tdefer f.Close()\n\t_, err = f.Write([]byte{4, 5, 6})\n\tc.Check(err, check.IsNil)\n\tfi, err := f.Stat()\n\tc.Assert(err, check.IsNil)\n\tc.Check(fi.Size(), check.Equals, int64(3))\n\n\tf, err = fs.OpenFile(\"new\", os.O_TRUNC|os.O_RDWR, 0)\n\tc.Assert(err, check.IsNil)\n\tdefer f.Close()\n\tpos, err := f.Seek(0, io.SeekEnd)\n\tc.Check(pos, check.Equals, int64(0))\n\tc.Check(err, check.IsNil)\n\tfi, err = f.Stat()\n\tc.Assert(err, check.IsNil)\n\tc.Check(fi.Size(), check.Equals, int64(0))\n\tfs.Remove(\"new\")\n\n\tbuf := make([]byte, 64)\n\tf, err = fs.OpenFile(\"append\", os.O_EXCL|os.O_CREATE|os.O_RDWR|os.O_APPEND, 0)\n\tc.Assert(err, check.IsNil)\n\tf.Write([]byte{1, 2, 3})\n\tf.Seek(0, io.SeekStart)\n\tn, _ = f.Read(buf[:1])\n\tc.Check(n, check.Equals, 1)\n\tc.Check(buf[:1], check.DeepEquals, []byte{1})\n\tpos, err = f.Seek(0, io.SeekCurrent)\n\tc.Assert(err, check.IsNil)\n\tc.Check(pos, check.Equals, int64(1))\n\tf.Write([]byte{4, 5, 6})\n\tpos, err = f.Seek(0, io.SeekCurrent)\n\tc.Assert(err, check.IsNil)\n\tc.Check(pos, check.Equals, int64(6))\n\tf.Seek(0, io.SeekStart)\n\tn, err = f.Read(buf)\n\tc.Check(buf[:n], check.DeepEquals, []byte{1, 2, 3, 4, 5, 6})\n\tc.Check(err, check.Equals, io.EOF)\n\tf.Close()\n\n\tf, err = fs.OpenFile(\"append\", os.O_RDWR|os.O_APPEND, 0)\n\tc.Assert(err, check.IsNil)\n\tpos, err = f.Seek(0, io.SeekCurrent)\n\tc.Check(pos, check.Equals, int64(0))\n\tc.Check(err, check.IsNil)\n\tf.Read(buf[:3])\n\tpos, _ = f.Seek(0, io.SeekCurrent)\n\tc.Check(pos, check.Equals, int64(3))\n\tf.Write([]byte{7, 8, 9})\n\tpos, err = f.Seek(0, io.SeekCurrent)\n\tc.Check(err, check.IsNil)\n\tc.Check(pos, check.Equals, int64(9))\n\tf.Close()\n\n\tf, err = fs.OpenFile(\"wronly\", os.O_CREATE|os.O_WRONLY, 0)\n\tc.Assert(err, check.IsNil)\n\tn, err = f.Write([]byte{3, 2, 1})\n\tc.Check(n, check.Equals, 3)\n\tc.Check(err, check.IsNil)\n\tpos, _ = f.Seek(0, io.SeekCurrent)\n\tc.Check(pos, check.Equals, int64(3))\n\tpos, _ = f.Seek(0, io.SeekStart)\n\tc.Check(pos, check.Equals, int64(0))\n\tn, err = f.Read(buf)\n\tc.Check(n, check.Equals, 0)\n\tc.Check(err, check.ErrorMatches, `.*O_WRONLY.*`)\n\tf, err = fs.OpenFile(\"wronly\", os.O_RDONLY, 0)\n\tc.Assert(err, check.IsNil)\n\tn, _ = f.Read(buf)\n\tc.Check(buf[:n], check.DeepEquals, []byte{3, 2, 1})\n\n\tf, err = fs.OpenFile(\"unsupported\", os.O_CREATE|os.O_SYNC, 0)\n\tc.Check(f, check.IsNil)\n\tc.Check(err, check.NotNil)\n\n\tf, err = fs.OpenFile(\"append\", os.O_RDWR|os.O_WRONLY, 0)\n\tc.Check(f, check.IsNil)\n\tc.Check(err, check.ErrorMatches, `invalid flag.*`)\n}\n\nfunc (s *CollectionFSSuite) TestFlushFullBlocksWritingLongFile(c *check.C) {\n\tdefer func(cw, mbs int) {\n\t\tconcurrentWriters = cw\n\t\tmaxBlockSize = mbs\n\t}(concurrentWriters, maxBlockSize)\n\tconcurrentWriters = 2\n\tmaxBlockSize = 1024\n\n\tproceed := make(chan struct{})\n\tvar started, concurrent int32\n\tblk2done := false\n\ts.kc.onWrite = func([]byte) error {\n\t\tatomic.AddInt32(&concurrent, 1)\n\t\tswitch atomic.AddInt32(&started, 1) {\n\t\tcase 1:\n\t\t\t// Wait until block 2 starts and finishes, and block 3 starts\n\t\t\tselect {\n\t\t\tcase <-proceed:\n\t\t\t\tc.Check(blk2done, check.Equals, true)\n\t\t\tcase <-time.After(time.Second):\n\t\t\t\tc.Error(\"timed out\")\n\t\t\t}\n\t\tcase 2:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t\tblk2done = true\n\t\tcase 3:\n\t\t\tclose(proceed)\n\t\tdefault:\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t\tc.Check(atomic.AddInt32(&concurrent, -1) < int32(concurrentWriters), check.Equals, true)\n\t\treturn nil\n\t}\n\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\tf, err := fs.OpenFile(\"50K\", os.O_WRONLY|os.O_CREATE, 0)\n\tc.Assert(err, check.IsNil)\n\tdefer f.Close()\n\n\tdata := make([]byte, 500)\n\trand.Read(data)\n\n\tfor i := 0; i < 100; i++ {\n\t\tn, err := f.Write(data)\n\t\tc.Assert(n, check.Equals, len(data))\n\t\tc.Assert(err, check.IsNil)\n\t}\n\n\tcurrentMemExtents := func() (memExtents []int) {\n\t\tfor idx, e := range f.(*filehandle).inode.(*filenode).segments {\n\t\t\tswitch e.(type) {\n\t\t\tcase *memSegment:\n\t\t\t\tmemExtents = append(memExtents, idx)\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tf.(*filehandle).inode.(*filenode).waitPrune()\n\tc.Check(currentMemExtents(), check.HasLen, 1)\n\n\tm, err := fs.MarshalManifest(\".\")\n\tc.Check(m, check.Matches, `[^:]* 0:50000:50K\\n`)\n\tc.Check(err, check.IsNil)\n\tc.Check(currentMemExtents(), check.HasLen, 0)\n}\n\n// Ensure blocks get flushed to disk if a lot of data is written to\n// small files/directories without calling sync().\n//\n// Write four 512KiB files into each of 256 top-level dirs (total\n// 512MiB), calling Flush() every 8 dirs. Ensure memory usage never\n// exceeds 24MiB (4 concurrentWriters * 2MiB + 8 unflushed dirs *\n// 2MiB).\nfunc (s *CollectionFSSuite) TestFlushAll(c *check.C) {\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\n\ts.kc.onWrite = func([]byte) error {\n\t\t// discard flushed data -- otherwise the stub will use\n\t\t// unlimited memory\n\t\ttime.Sleep(time.Millisecond)\n\t\ts.kc.Lock()\n\t\tdefer s.kc.Unlock()\n\t\ts.kc.blocks = map[string][]byte{}\n\t\treturn nil\n\t}\n\tfor i := 0; i < 256; i++ {\n\t\tbuf := bytes.NewBuffer(make([]byte, 524288))\n\t\tfmt.Fprintf(buf, \"test file in dir%d\", i)\n\n\t\tdir := fmt.Sprintf(\"dir%d\", i)\n\t\tfs.Mkdir(dir, 0755)\n\t\tfor j := 0; j < 2; j++ {\n\t\t\tf, err := fs.OpenFile(fmt.Sprintf(\"%s/file%d\", dir, j), os.O_WRONLY|os.O_CREATE, 0)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tdefer f.Close()\n\t\t\t_, err = io.Copy(f, buf)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t}\n\n\t\tif i%8 == 0 {\n\t\t\tfs.Flush(\"\", true)\n\t\t}\n\n\t\tsize := fs.MemorySize()\n\t\tif !c.Check(size <= 1<<24, check.Equals, true) {\n\t\t\tc.Logf(\"at dir%d fs.MemorySize()=%d\", i, size)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Ensure short blocks at the end of a stream don't get flushed by\n// Flush(false).\n//\n// Write 67x 1MiB files to each of 8 dirs, and check that 8 full 64MiB\n// blocks have been flushed while 8x 3MiB is still buffered in memory.\nfunc (s *CollectionFSSuite) TestFlushFullBlocksOnly(c *check.C) {\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\n\tvar flushed int64\n\ts.kc.onWrite = func(p []byte) error {\n\t\tatomic.AddInt64(&flushed, int64(len(p)))\n\t\treturn nil\n\t}\n\n\tnDirs := int64(8)\n\tnFiles := int64(67)\n\tmegabyte := make([]byte, 1<<20)\n\tfor i := int64(0); i < nDirs; i++ {\n\t\tdir := fmt.Sprintf(\"dir%d\", i)\n\t\tfs.Mkdir(dir, 0755)\n\t\tfor j := int64(0); j < nFiles; j++ {\n\t\t\tf, err := fs.OpenFile(fmt.Sprintf(\"%s/file%d\", dir, j), os.O_WRONLY|os.O_CREATE, 0)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tdefer f.Close()\n\t\t\t_, err = f.Write(megabyte)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t}\n\t}\n\tinodebytes := int64((nDirs*(nFiles+1) + 1) * 64)\n\tc.Check(fs.MemorySize(), check.Equals, nDirs*nFiles*(1<<20+64)+inodebytes)\n\tc.Check(flushed, check.Equals, int64(0))\n\n\twaitForFlush := func(expectUnflushed, expectFlushed int64) {\n\t\tfor deadline := time.Now().Add(5 * time.Second); fs.MemorySize() > expectUnflushed && time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) {\n\t\t}\n\t\tc.Check(fs.MemorySize(), check.Equals, expectUnflushed)\n\t\tc.Check(flushed, check.Equals, expectFlushed)\n\t}\n\n\t// Nothing flushed yet\n\twaitForFlush(nDirs*nFiles*(1<<20+64)+inodebytes, 0)\n\n\t// Flushing a non-empty dir \"/\" is non-recursive and there are\n\t// no top-level files, so this has no effect\n\tfs.Flush(\"/\", false)\n\twaitForFlush(nDirs*nFiles*(1<<20+64)+inodebytes, 0)\n\n\t// Flush the full block in dir0\n\tfs.Flush(\"dir0\", false)\n\tbigloclen := int64(32 + 9 + 51 + 64) // md5 + \"+\" + \"67xxxxxx\" + \"+Axxxxxx...\" + 64 (see (storedSegment)memorySize)\n\twaitForFlush((nDirs*nFiles-64)*(1<<20+64)+inodebytes+bigloclen*64, 64<<20)\n\n\terr = fs.Flush(\"dir-does-not-exist\", false)\n\tc.Check(err, check.NotNil)\n\n\t// Flush full blocks in all dirs\n\tfs.Flush(\"\", false)\n\twaitForFlush(nDirs*3*(1<<20+64)+inodebytes+bigloclen*64*nDirs, nDirs*64<<20)\n\n\t// Flush non-full blocks, too\n\tfs.Flush(\"\", true)\n\tsmallloclen := int64(32 + 8 + 51 + 64) // md5 + \"+\" + \"3xxxxxx\" + \"+Axxxxxx...\" + 64 (see (storedSegment)memorySize)\n\twaitForFlush(inodebytes+bigloclen*64*nDirs+smallloclen*3*nDirs, nDirs*67<<20)\n}\n\n// Even when writing lots of files/dirs from different goroutines, as\n// long as Flush(dir,false) is called after writing each file,\n// unflushed data should be limited to one full block per\n// concurrentWriter, plus one nearly-full block at the end of each\n// dir/stream.\nfunc (s *CollectionFSSuite) TestMaxUnflushed(c *check.C) {\n\tnDirs := int64(8)\n\tmaxUnflushed := (int64(concurrentWriters) + nDirs) << 26\n\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\n\trelease := make(chan struct{})\n\ttimeout := make(chan struct{})\n\ttime.AfterFunc(10*time.Second, func() { close(timeout) })\n\tvar putCount, concurrency int64\n\tvar unflushed int64\n\ts.kc.onWrite = func(p []byte) error {\n\t\tdefer atomic.AddInt64(&unflushed, -int64(len(p)))\n\t\tcur := atomic.AddInt64(&concurrency, 1)\n\t\tdefer atomic.AddInt64(&concurrency, -1)\n\t\tpc := atomic.AddInt64(&putCount, 1)\n\t\tif pc < int64(concurrentWriters) {\n\t\t\t// Block until we reach concurrentWriters, to\n\t\t\t// make sure we're really accepting concurrent\n\t\t\t// writes.\n\t\t\tselect {\n\t\t\tcase <-release:\n\t\t\tcase <-timeout:\n\t\t\t\tc.Error(\"timeout\")\n\t\t\t}\n\t\t} else if pc == int64(concurrentWriters) {\n\t\t\t// Unblock the first N-1 PUT reqs.\n\t\t\tclose(release)\n\t\t}\n\t\tc.Assert(cur <= int64(concurrentWriters), check.Equals, true)\n\t\tc.Assert(atomic.LoadInt64(&unflushed) <= maxUnflushed, check.Equals, true)\n\t\treturn nil\n\t}\n\n\tvar owg sync.WaitGroup\n\tmegabyte := make([]byte, 1<<20)\n\tfor i := int64(0); i < nDirs; i++ {\n\t\tdir := fmt.Sprintf(\"dir%d\", i)\n\t\tfs.Mkdir(dir, 0755)\n\t\towg.Add(1)\n\t\tgo func() {\n\t\t\tdefer owg.Done()\n\t\t\tdefer fs.Flush(dir, true)\n\t\t\tvar iwg sync.WaitGroup\n\t\t\tdefer iwg.Wait()\n\t\t\tfor j := 0; j < 67; j++ {\n\t\t\t\tiwg.Add(1)\n\t\t\t\tgo func(j int) {\n\t\t\t\t\tdefer iwg.Done()\n\t\t\t\t\tf, err := fs.OpenFile(fmt.Sprintf(\"%s/file%d\", dir, j), os.O_WRONLY|os.O_CREATE, 0)\n\t\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t\tdefer f.Close()\n\t\t\t\t\tn, err := f.Write(megabyte)\n\t\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t\tatomic.AddInt64(&unflushed, int64(n))\n\t\t\t\t\tfs.Flush(dir, false)\n\t\t\t\t}(j)\n\t\t\t}\n\t\t}()\n\t}\n\towg.Wait()\n\tfs.Flush(\"\", true)\n}\n\nfunc (s *CollectionFSSuite) TestFlushStress(c *check.C) {\n\tdone := false\n\tdefer func() { done = true }()\n\ttime.AfterFunc(10*time.Second, func() {\n\t\tif !done {\n\t\t\tpprof.Lookup(\"goroutine\").WriteTo(os.Stderr, 1)\n\t\t\tpanic(\"timeout\")\n\t\t}\n\t})\n\n\twrote := 0\n\ts.kc.onWrite = func(p []byte) error {\n\t\ts.kc.Lock()\n\t\ts.kc.blocks = map[string][]byte{}\n\t\twrote++\n\t\tdefer c.Logf(\"wrote block %d, %d bytes\", wrote, len(p))\n\t\ts.kc.Unlock()\n\t\ttime.Sleep(20 * time.Millisecond)\n\t\treturn nil\n\t}\n\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\n\tdata := make([]byte, 1<<20)\n\tfor i := 0; i < 3; i++ {\n\t\tdir := fmt.Sprintf(\"dir%d\", i)\n\t\tfs.Mkdir(dir, 0755)\n\t\tfor j := 0; j < 200; j++ {\n\t\t\tdata[0] = byte(j)\n\t\t\tf, err := fs.OpenFile(fmt.Sprintf(\"%s/file%d\", dir, j), os.O_WRONLY|os.O_CREATE, 0)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\t_, err = f.Write(data)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tf.Close()\n\t\t\tfs.Flush(dir, false)\n\t\t}\n\t\t_, err := fs.MarshalManifest(\".\")\n\t\tc.Check(err, check.IsNil)\n\t}\n}\n\nfunc (s *CollectionFSSuite) TestFlushShort(c *check.C) {\n\ts.kc.onWrite = func([]byte) error {\n\t\ts.kc.Lock()\n\t\ts.kc.blocks = map[string][]byte{}\n\t\ts.kc.Unlock()\n\t\treturn nil\n\t}\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\tfor _, blocksize := range []int{8, 1000000} {\n\t\tdir := fmt.Sprintf(\"dir%d\", blocksize)\n\t\terr = fs.Mkdir(dir, 0755)\n\t\tc.Assert(err, check.IsNil)\n\t\tdata := make([]byte, blocksize)\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tf, err := fs.OpenFile(fmt.Sprintf(\"%s/file%d\", dir, i), os.O_WRONLY|os.O_CREATE, 0)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\t_, err = f.Write(data)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tf.Close()\n\t\t\tfs.Flush(dir, false)\n\t\t}\n\t\tfs.Flush(dir, true)\n\t\t_, err := fs.MarshalManifest(\".\")\n\t\tc.Check(err, check.IsNil)\n\t}\n}\n\nfunc (s *CollectionFSSuite) TestBrokenManifests(c *check.C) {\n\tfor _, txt := range []string{\n\t\t\"\\n\",\n\t\t\".\\n\",\n\t\t\". \\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 \\n\",\n\t\t\". 0:0:foo\\n\",\n\t\t\".  0:0:foo\\n\",\n\t\t\". 0:0:foo 0:0:bar\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e 0:0:foo\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 :0:0:foo\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 foo:0:foo\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 0:foo:foo\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+1 0:1:foo 1:1:bar\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+1 0:1:\\\\056\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+1 0:1:\\\\056\\\\057\\\\056\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+1 0:1:.\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+1 0:1:..\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 0:0:..\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/..\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+1 0:0:foo\\n./foo d41d8cd98f00b204e9800998ecf8427e+1 0:0:bar\\n\",\n\t\t\"./foo d41d8cd98f00b204e9800998ecf8427e+1 0:0:bar\\n. d41d8cd98f00b204e9800998ecf8427e+1 0:0:foo\\n\",\n\t} {\n\t\tc.Logf(\"<-%q\", txt)\n\t\tfs, err := (&Collection{ManifestText: txt}).FileSystem(s.client, s.kc)\n\t\tc.Check(fs, check.IsNil)\n\t\tc.Logf(\"-> %s\", err)\n\t\tc.Check(err, check.NotNil)\n\t}\n}\n\nfunc (s *CollectionFSSuite) TestEdgeCaseManifests(c *check.C) {\n\tfor _, txt := range []string{\n\t\t\"\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 0:0:...\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 0:0:. 0:0:. 0:0:\\\\056 0:0:\\\\056\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/. 0:0:. 0:0:foo\\\\057bar\\\\057\\\\056\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo 0:0:foo 0:0:bar\\n\",\n\t\t\". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/bar\\n./foo d41d8cd98f00b204e9800998ecf8427e+0 0:0:bar\\n\",\n\t} {\n\t\tc.Logf(\"<-%q\", txt)\n\t\tfs, err := (&Collection{ManifestText: txt}).FileSystem(s.client, s.kc)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(fs, check.NotNil)\n\t}\n}\n\nvar fakeLocator = func() []string {\n\tlocs := make([]string, 10)\n\tfor i := range locs {\n\t\tlocs[i] = fmt.Sprintf(\"%x+%d\", md5.Sum(make([]byte, i)), i)\n\t\tif i%2 == 1 {\n\t\t\tlocs[i] += \"+Awhatever+Zotherhints\"\n\t\t}\n\t}\n\treturn locs\n}()\n\nfunc (s *CollectionFSSuite) TestReplaceSegments_HappyPath(c *check.C) {\n\tfs, err := (&Collection{\n\t\tManifestText: \". \" + fakeLocator[1] + \" \" + fakeLocator[2] + \" 0:3:file3\\n\",\n\t}).FileSystem(nil, &keepClientStub{})\n\tc.Assert(err, check.IsNil)\n\tchanged, err := fs.ReplaceSegments(map[BlockSegment]BlockSegment{\n\t\tBlockSegment{fakeLocator[1], 0, 1}: BlockSegment{fakeLocator[3], 0, 1},\n\t\tBlockSegment{fakeLocator[2], 0, 2}: BlockSegment{fakeLocator[3], 1, 2},\n\t})\n\tc.Check(changed, check.Equals, true)\n\tc.Check(err, check.IsNil)\n\tmtxt, err := fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\tc.Check(mtxt, check.Equals, \". \"+fakeLocator[3]+\" 0:3:file3\\n\")\n}\n\nfunc (s *CollectionFSSuite) TestReplaceSegments_InvalidOffset(c *check.C) {\n\torigtxt := \". \" + fakeLocator[1] + \" \" + fakeLocator[2] + \" 0:3:file3\\n\"\n\tfs, err := (&Collection{\n\t\tManifestText: origtxt,\n\t}).FileSystem(nil, &keepClientStub{})\n\tc.Assert(err, check.IsNil)\n\tchanged, err := fs.ReplaceSegments(map[BlockSegment]BlockSegment{\n\t\tBlockSegment{fakeLocator[1], 0, 1}: BlockSegment{fakeLocator[3], 0, 1},\n\t\tBlockSegment{fakeLocator[2], 0, 2}: BlockSegment{fakeLocator[3], 2, 2},\n\t})\n\tc.Check(changed, check.Equals, false)\n\tc.Check(err, check.ErrorMatches, `invalid replacement: offset 2 \\+ length 2 > block size 3`)\n\tmtxt, err := fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\tc.Check(mtxt, check.Equals, origtxt)\n}\n\nfunc (s *CollectionFSSuite) TestReplaceSegments_LengthMismatch(c *check.C) {\n\torigtxt := \". \" + fakeLocator[1] + \" \" + fakeLocator[2] + \" 0:3:file3\\n\"\n\tfs, err := (&Collection{\n\t\tManifestText: origtxt,\n\t}).FileSystem(nil, &keepClientStub{})\n\tc.Assert(err, check.IsNil)\n\tchanged, err := fs.ReplaceSegments(map[BlockSegment]BlockSegment{\n\t\tBlockSegment{fakeLocator[1], 0, 1}: BlockSegment{fakeLocator[3], 0, 1},\n\t\tBlockSegment{fakeLocator[2], 0, 2}: BlockSegment{fakeLocator[3], 0, 3},\n\t})\n\tc.Check(changed, check.Equals, false)\n\tc.Check(err, check.ErrorMatches, `mismatched length: replacing segment length 2 with segment length 3`)\n\tmtxt, err := fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\tc.Check(mtxt, check.Equals, origtxt)\n}\n\nfunc (s *CollectionFSSuite) TestReplaceSegments_SkipUnreferenced(c *check.C) {\n\tfs, err := (&Collection{\n\t\tManifestText: \". \" + fakeLocator[1] + \" \" + fakeLocator[2] + \" \" + fakeLocator[3] + \" 0:6:file6\\n\",\n\t}).FileSystem(nil, &keepClientStub{})\n\tc.Assert(err, check.IsNil)\n\tchanged, err := fs.ReplaceSegments(map[BlockSegment]BlockSegment{\n\t\tBlockSegment{fakeLocator[1], 0, 1}: BlockSegment{fakeLocator[4], 0, 1}, // skipped because [5] unref\n\t\tBlockSegment{fakeLocator[2], 0, 2}: BlockSegment{fakeLocator[4], 1, 2}, // skipped because [5] unref\n\t\tBlockSegment{fakeLocator[5], 0, 2}: BlockSegment{fakeLocator[4], 1, 2}, // [5] unreferenced in orig manifest\n\t\tBlockSegment{fakeLocator[3], 0, 3}: BlockSegment{fakeLocator[6], 3, 3}, // applied\n\t})\n\tc.Check(changed, check.Equals, true)\n\tc.Check(err, check.IsNil)\n\tmtxt, err := fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\tc.Check(mtxt, check.Equals, \". \"+fakeLocator[1]+\" \"+fakeLocator[2]+\" \"+fakeLocator[6]+\" 0:3:file6 6:3:file6\\n\")\n}\n\nfunc (s *CollectionFSSuite) TestReplaceSegments_SkipIncompleteSegment(c *check.C) {\n\torigtxt := \". \" + fakeLocator[2] + \" \" + fakeLocator[3] + \" 0:5:file5\\n\"\n\tfs, err := (&Collection{\n\t\tManifestText: origtxt,\n\t}).FileSystem(nil, &keepClientStub{})\n\tc.Assert(err, check.IsNil)\n\tchanged, err := fs.ReplaceSegments(map[BlockSegment]BlockSegment{\n\t\tBlockSegment{fakeLocator[2], 0, 1}: BlockSegment{fakeLocator[4], 0, 1}, // length=1 does not match the length=2 segment\n\t})\n\tc.Check(changed, check.Equals, false)\n\tc.Check(err, check.IsNil)\n\tmtxt, err := fs.MarshalManifest(\".\")\n\tc.Check(err, check.IsNil)\n\tc.Check(mtxt, check.Equals, origtxt)\n}\n\nfunc (s *CollectionFSSuite) testPlanRepack(c *check.C, opts RepackOptions, manifest string, expectPlan [][]storedSegment) {\n\tfs, err := (&Collection{ManifestText: manifest}).FileSystem(nil, s.kc)\n\tc.Assert(err, check.IsNil)\n\tcfs := fs.(*collectionFileSystem)\n\trepl, err := cfs.planRepack(context.Background(), opts, cfs.root.(*dirnode))\n\tc.Assert(err, check.IsNil)\n\n\t// we always expect kc==cfs, so we fill this in instead of\n\t// requiring each test case to repeat it\n\tfor _, pp := range expectPlan {\n\t\tfor i := range pp {\n\t\t\tpp[i].kc = cfs\n\t\t}\n\t}\n\tc.Check(repl, check.DeepEquals, expectPlan)\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_2x32M(c *check.C) {\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000 0:64000000:file\\n\",\n\t\t[][]storedSegment{\n\t\t\t{\n\t\t\t\t{locator: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000\", size: 32000000, length: 32000000, offset: 0},\n\t\t\t\t{locator: \"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000\", size: 32000000, length: 32000000, offset: 0},\n\t\t\t},\n\t\t})\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_2x32M_Cached(c *check.C) {\n\ts.kc.cached = map[string]bool{\n\t\t\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\": true,\n\t\t\"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\": true,\n\t}\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true, CachedOnly: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000 0:64000000:file\\n\",\n\t\t[][]storedSegment{\n\t\t\t{\n\t\t\t\t{locator: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000\", size: 32000000, length: 32000000, offset: 0},\n\t\t\t\t{locator: \"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000\", size: 32000000, length: 32000000, offset: 0},\n\t\t\t},\n\t\t})\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_2x32M_OneCached(c *check.C) {\n\ts.kc.cached = map[string]bool{\n\t\t\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\": true,\n\t}\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true, CachedOnly: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000 0:64000000:file\\n\",\n\t\tnil)\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_3x32M_TwoCached(c *check.C) {\n\ts.kc.cached = map[string]bool{\n\t\t\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\": true,\n\t\t\"cccccccccccccccccccccccccccccccc\": true,\n\t}\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true, CachedOnly: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000 cccccccccccccccccccccccccccccccc+32000000 0:96000000:file\\n\",\n\t\t[][]storedSegment{\n\t\t\t{\n\t\t\t\t{locator: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000\", size: 32000000, length: 32000000, offset: 0},\n\t\t\t\t{locator: \"cccccccccccccccccccccccccccccccc+32000000\", size: 32000000, length: 32000000, offset: 0},\n\t\t\t},\n\t\t})\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_2x32Mi(c *check.C) {\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+33554432 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+33554432 0:67108864:file\\n\",\n\t\tnil)\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_2x32MiMinus1(c *check.C) {\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+33554431 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+33554431 0:67108862:file\\n\",\n\t\t[][]storedSegment{\n\t\t\t{\n\t\t\t\t{locator: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+33554431\", size: 33554431, length: 33554431, offset: 0},\n\t\t\t\t{locator: \"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+33554431\", size: 33554431, length: 33554431, offset: 0},\n\t\t\t},\n\t\t})\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_3x32M(c *check.C) {\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000 cccccccccccccccccccccccccccccccc+32000000 0:96000000:file\\n\",\n\t\t[][]storedSegment{\n\t\t\t{\n\t\t\t\t{locator: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+32000000\", size: 32000000, length: 32000000, offset: 0},\n\t\t\t\t{locator: \"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+32000000\", size: 32000000, length: 32000000, offset: 0},\n\t\t\t},\n\t\t})\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_3x42M(c *check.C) {\n\t// Each block is more than half full, so do nothing.\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+42000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+42000000 cccccccccccccccccccccccccccccccc+42000000 0:126000000:file\\n\",\n\t\tnil)\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_Premature(c *check.C) {\n\t// Repacking would reduce to one block, but it would still be\n\t// too short to be worthwhile, so do nothing.\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+123 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+123 cccccccccccccccccccccccccccccccc+123 0:369:file\\n\",\n\t\tnil)\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_4x22M_NonAdjacent(c *check.C) {\n\t// Repack the first three 22M blocks into one 66M block.\n\t// Don't touch the 44M blocks or the final 22M block.\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+44000000 cccccccccccccccccccccccccccccccc+22000000 dddddddddddddddddddddddddddddddd+44000000 eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee+22000000 ffffffffffffffffffffffffffffffff+44000000 00000000000000000000000000000000+22000000 0:220000000:file\\n\",\n\t\t[][]storedSegment{\n\t\t\t{\n\t\t\t\t{locator: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000\", size: 22000000, length: 22000000, offset: 0},\n\t\t\t\t{locator: \"cccccccccccccccccccccccccccccccc+22000000\", size: 22000000, length: 22000000, offset: 0},\n\t\t\t\t{locator: \"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee+22000000\", size: 22000000, length: 22000000, offset: 0},\n\t\t\t},\n\t\t})\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_2x22M_DuplicateBlock(c *check.C) {\n\t// Repack a+b+c, not a+b+a.\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+22000000 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000 0:66000000:file\\n\"+\n\t\t\t\"./dir cccccccccccccccccccccccccccccccc+22000000 0:22000000:file\\n\",\n\t\t[][]storedSegment{\n\t\t\t{\n\t\t\t\t{locator: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000\", size: 22000000, length: 22000000, offset: 0},\n\t\t\t\t{locator: \"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+22000000\", size: 22000000, length: 22000000, offset: 0},\n\t\t\t\t{locator: \"cccccccccccccccccccccccccccccccc+22000000\", size: 22000000, length: 22000000, offset: 0},\n\t\t\t},\n\t\t})\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_2x22M_DuplicateBlock_TooShort(c *check.C) {\n\t// Repacking a+b would not meet the 32MiB threshold.\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000 bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+1 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+22000000 0:44000001:file\\n\",\n\t\tnil)\n}\n\nfunc (s *CollectionFSSuite) TestPlanRepack_SiblingsTogether(c *check.C) {\n\t// Pack sibling files' (\"a\" and \"c\") segments together before\n\t// other subdirs (\"b/b\"), even though subdir \"b\" sorts between\n\t// \"a\" and \"c\".\n\ts.testPlanRepack(c,\n\t\tRepackOptions{Full: true},\n\t\t\". aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+15000000 cccccccccccccccccccccccccccccccc+15000000 0:15000000:a 15000000:15000000:c\\n\"+\n\t\t\t\"./b bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+15000000 0:15000000:b\\n\",\n\t\t[][]storedSegment{\n\t\t\t{\n\t\t\t\t{locator: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+15000000\", size: 15000000, length: 15000000, offset: 0},\n\t\t\t\t{locator: \"cccccccccccccccccccccccccccccccc+15000000\", size: 15000000, length: 15000000, offset: 0},\n\t\t\t\t{locator: \"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+15000000\", size: 15000000, length: 15000000, offset: 0},\n\t\t\t},\n\t\t})\n}\n\nfunc (s *CollectionFSSuite) TestRepackData(c *check.C) {\n\tfs, err := (&Collection{}).FileSystem(nil, s.kc)\n\tc.Assert(err, check.IsNil)\n\tcfs := fs.(*collectionFileSystem)\n\n\ttestBlockWritten := make(map[int]string)\n\t// testSegment(N) returns an N-byte segment of a block\n\t// containing repeated byte N%256.  The segment's offset\n\t// within the block is N/1000000 (*).  The block also has\n\t// N/1000000 null bytes following the segment(*).\n\t//\n\t// If N=404, the block is not readable.\n\t//\n\t// (*) ...unless that would result in an oversize block.\n\ttestSegment := func(testSegmentNum int) storedSegment {\n\t\tlength := testSegmentNum\n\t\toffset := testSegmentNum / 1000000\n\t\tif offset+length > maxBlockSize {\n\t\t\toffset = 0\n\t\t}\n\t\tsize := testSegmentNum + offset\n\t\tif size+offset <= maxBlockSize {\n\t\t\tsize += offset\n\t\t}\n\t\tif _, stored := testBlockWritten[testSegmentNum]; !stored {\n\t\t\tdata := make([]byte, size)\n\t\t\tfor b := range data[offset : offset+length] {\n\t\t\t\tdata[b] = byte(testSegmentNum & 0xff)\n\t\t\t}\n\t\t\tresp, err := s.kc.BlockWrite(context.Background(), BlockWriteOptions{Data: data})\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\ttestBlockWritten[testSegmentNum] = resp.Locator\n\t\t\tif testSegmentNum == 404 {\n\t\t\t\tdelete(s.kc.blocks, resp.Locator[:32])\n\t\t\t}\n\t\t}\n\t\treturn storedSegment{\n\t\t\tkc:      cfs,\n\t\t\tlocator: testBlockWritten[testSegmentNum],\n\t\t\tsize:    size,\n\t\t\tlength:  length,\n\t\t\toffset:  offset,\n\t\t}\n\t}\n\tfor trialIndex, trial := range []struct {\n\t\tlabel string\n\t\t// \"input\" here has the same shape as repackData's\n\t\t// [][]storedSegment argument, but uses int N has\n\t\t// shorthand for testSegment(N).\n\t\tinput              [][]int\n\t\tonWrite            func([]byte) error\n\t\texpectRepackedLen  int\n\t\texpectErrorMatches string\n\t}{\n\t\t{\n\t\t\tlabel:             \"one {3 blocks to 1} merge\",\n\t\t\tinput:             [][]int{{1, 2, 3}},\n\t\t\texpectRepackedLen: 3,\n\t\t},\n\t\t{\n\t\t\tlabel:             \"two {3 blocks to 1} merges\",\n\t\t\tinput:             [][]int{{1, 2, 3}, {4, 5, 6}},\n\t\t\texpectRepackedLen: 6,\n\t\t},\n\t\t{\n\t\t\tlabel:             \"merge two {3 blocks to 1} merges\",\n\t\t\tinput:             [][]int{{1, 2, 3}, {4, 5, 6}},\n\t\t\texpectRepackedLen: 6,\n\t\t},\n\t\t{\n\t\t\tlabel:             \"no-op\",\n\t\t\tinput:             nil,\n\t\t\texpectRepackedLen: 0,\n\t\t},\n\t\t{\n\t\t\tlabel:             \"merge 3 blocks plus a zero-length segment -- not expected to be used, but should work\",\n\t\t\tinput:             [][]int{{1, 2, 0, 3}},\n\t\t\texpectRepackedLen: 4,\n\t\t},\n\t\t{\n\t\t\tlabel:             \"merge a single segment -- not expected to be used, but should work\",\n\t\t\tinput:             [][]int{{12345}},\n\t\t\texpectRepackedLen: 1,\n\t\t},\n\t\t{\n\t\t\tlabel:             \"merge a single empty segment -- not expected to be used, but should work\",\n\t\t\tinput:             [][]int{{0}},\n\t\t\texpectRepackedLen: 1,\n\t\t},\n\t\t{\n\t\t\tlabel:             \"merge zero segments -- not expected to be used, but should work\",\n\t\t\tinput:             [][]int{{}},\n\t\t\texpectRepackedLen: 0,\n\t\t},\n\t\t{\n\t\t\tlabel:             \"merge same orig segment into two different replacements -- not expected to be used, but should work\",\n\t\t\tinput:             [][]int{{1, 22, 3}, {4, 22, 6}},\n\t\t\texpectRepackedLen: 5,\n\t\t},\n\t\t{\n\t\t\tlabel:             \"identical merges -- not expected to be used, but should work\",\n\t\t\tinput:             [][]int{{11, 22, 33}, {11, 22, 33}},\n\t\t\texpectRepackedLen: 3,\n\t\t},\n\t\t{\n\t\t\tlabel:              \"read error on first segment\",\n\t\t\tinput:              [][]int{{404, 2, 3}},\n\t\t\texpectRepackedLen:  0,\n\t\t\texpectErrorMatches: \"404 block not found\",\n\t\t},\n\t\t{\n\t\t\tlabel:              \"read error on second segment\",\n\t\t\tinput:              [][]int{{1, 404, 3}},\n\t\t\texpectErrorMatches: \"404 block not found\",\n\t\t},\n\t\t{\n\t\t\tlabel:              \"read error on last segment\",\n\t\t\tinput:              [][]int{{1, 2, 404}},\n\t\t\texpectErrorMatches: \"404 block not found\",\n\t\t},\n\t\t{\n\t\t\tlabel:              \"merge does not fit in one block\",\n\t\t\tinput:              [][]int{{50000000, 20000000}},\n\t\t\texpectErrorMatches: \"combined length 70000000 would exceed maximum block size 67108864\",\n\t\t},\n\t\t{\n\t\t\tlabel:              \"write error\",\n\t\t\tinput:              [][]int{{1, 2, 3}},\n\t\t\tonWrite:            func(p []byte) error { return errors.New(\"stub write error\") },\n\t\t\texpectErrorMatches: \"stub write error\",\n\t\t},\n\t} {\n\t\tc.Logf(\"trial %d: %s\", trialIndex, trial.label)\n\t\tvar input [][]storedSegment\n\t\tfor _, seglist := range trial.input {\n\t\t\tvar segments []storedSegment\n\t\t\tfor _, segnum := range seglist {\n\t\t\t\tsegments = append(segments, testSegment(segnum))\n\t\t\t}\n\t\t\tinput = append(input, segments)\n\t\t}\n\t\ts.kc.onWrite = trial.onWrite\n\t\trepacked, err := cfs.repackData(context.Background(), input)\n\t\tif trial.expectErrorMatches != \"\" {\n\t\t\tc.Check(err, check.ErrorMatches, trial.expectErrorMatches)\n\t\t\tcontinue\n\t\t}\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(repacked, check.HasLen, trial.expectRepackedLen)\n\t\tfor _, origSegments := range input {\n\t\t\treplLocator := \"\"\n\t\t\tfor _, origSegment := range origSegments {\n\t\t\t\torigBlock := BlockSegment{\n\t\t\t\t\tLocator: stripAllHints(origSegment.locator),\n\t\t\t\t\tLength:  origSegment.length,\n\t\t\t\t\tOffset:  origSegment.offset,\n\t\t\t\t}\n\t\t\t\tbuf := make([]byte, origSegment.size)\n\t\t\t\tn, err := cfs.ReadAt(repacked[origBlock].Locator, buf, repacked[origBlock].Offset)\n\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\tc.Check(n, check.Equals, len(buf))\n\t\t\t\texpectContent := byte(origSegment.length & 0xff)\n\t\t\t\tfor segoffset, b := range buf {\n\t\t\t\t\tif b != expectContent {\n\t\t\t\t\t\tc.Errorf(\"content mismatch: origSegment.locator %s -> replLocator %s offset %d: byte %d is %d, expected %d\", origSegment.locator, replLocator, repacked[origBlock].Offset, segoffset, b, expectContent)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype dataToWrite struct {\n\tpath string\n\tdata func() []byte\n}\n\nfunc dataToWrite_SourceTree(c *check.C, maxfiles int) (writes []dataToWrite) {\n\tgitdir, err := filepath.Abs(\"../../..\")\n\tc.Assert(err, check.IsNil)\n\tinfs := os.DirFS(gitdir)\n\tbuf, err := exec.Command(\"git\", \"-C\", gitdir, \"ls-files\").CombinedOutput()\n\tc.Assert(err, check.IsNil, check.Commentf(\"%s\", buf))\n\tfor _, path := range bytes.Split(buf, []byte(\"\\n\")) {\n\t\tpath := string(path)\n\t\tif path == \"\" {\n\t\t\t// dangling symlink\n\t\t\tcontinue\n\t\t}\n\t\tfi, err := fs.Stat(infs, path)\n\t\tc.Assert(err, check.IsNil)\n\t\tif fi.IsDir() || fi.Mode()&os.ModeSymlink != 0 {\n\t\t\tcontinue\n\t\t}\n\t\twrites = append(writes, dataToWrite{\n\t\t\tpath: path,\n\t\t\tdata: func() []byte {\n\t\t\t\tdata, err := fs.ReadFile(infs, path)\n\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\treturn data\n\t\t\t},\n\t\t})\n\t\tif len(writes) >= maxfiles {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\nfunc dataToWrite_ConstantSizeFilesInDirs(c *check.C, ndirs, nfiles, filesize, chunksize int) (writes []dataToWrite) {\n\tfor chunk := 0; chunk == 0 || (chunksize > 0 && chunk < (filesize+chunksize-1)/chunksize); chunk++ {\n\t\tfor i := 0; i < nfiles; i++ {\n\t\t\tdatasize := filesize\n\t\t\tif chunksize > 0 {\n\t\t\t\tdatasize = chunksize\n\t\t\t\tif remain := filesize - chunk*chunksize; remain < chunksize {\n\t\t\t\t\tdatasize = remain\n\t\t\t\t}\n\t\t\t}\n\t\t\tdata := make([]byte, datasize)\n\t\t\tcopy(data, []byte(fmt.Sprintf(\"%d chunk %d\", i, chunk)))\n\t\t\twrites = append(writes, dataToWrite{\n\t\t\t\tpath: fmt.Sprintf(\"dir%d/file%d\", i*ndirs/nfiles, i),\n\t\t\t\tdata: func() []byte { return data },\n\t\t\t})\n\t\t}\n\t}\n\treturn\n}\n\nvar enableRepackCharts = os.Getenv(\"ARVADOS_TEST_REPACK_CHARTS\") != \"\"\n\nfunc (s *CollectionFSSuite) skipMostRepackCostTests(c *check.C) {\n\tif !enableRepackCharts {\n\t\tc.Skip(\"Set ARVADOS_TEST_REPACK_CHARTS to run more cost tests and generate data for charts like https://dev.arvados.org/issues/22320#note-14\")\n\t}\n}\n\n// If we upload 500 files (or 99999) and get a manifest with 60 or\n// fewer blocks (the third parameter of testRepackCost) then repacking\n// is working.  The number of blocks is going to proportional to the\n// amount of data in the source tree so these numbers may need to be\n// updated periodically, but what we're really testing for is that we\n// didn't get back a manifest with 500 or 5000 blocks.\n\nfunc (s *CollectionFSSuite) TestRepackCost_SourceTree_Part(c *check.C) {\n\ts.testRepackCost(c, dataToWrite_SourceTree(c, 500), 60)\n}\n\nfunc (s *CollectionFSSuite) TestRepackCost_SourceTree(c *check.C) {\n\ts.skipMostRepackCostTests(c)\n\ts.testRepackCost(c, dataToWrite_SourceTree(c, 99999), 60)\n}\n\nfunc (s *CollectionFSSuite) TestRepackCost_1000x_1M_Files(c *check.C) {\n\ts.skipMostRepackCostTests(c)\n\ts.testRepackCost(c, dataToWrite_ConstantSizeFilesInDirs(c, 10, 1000, 1000000, 0), 80)\n}\n\nfunc (s *CollectionFSSuite) TestRepackCost_100x_8M_Files(c *check.C) {\n\ts.skipMostRepackCostTests(c)\n\ts.testRepackCost(c, dataToWrite_ConstantSizeFilesInDirs(c, 10, 100, 8000000, 0), 20)\n}\n\nfunc (s *CollectionFSSuite) TestRepackCost_100x_8M_Files_1M_Chunks(c *check.C) {\n\ts.skipMostRepackCostTests(c)\n\ts.testRepackCost(c, dataToWrite_ConstantSizeFilesInDirs(c, 10, 100, 8000000, 1000000), 50)\n}\n\nfunc (s *CollectionFSSuite) TestRepackCost_100x_10M_Files_1M_Chunks(c *check.C) {\n\ts.skipMostRepackCostTests(c)\n\ts.testRepackCost(c, dataToWrite_ConstantSizeFilesInDirs(c, 10, 100, 10000000, 1000000), 80)\n}\n\nfunc (s *CollectionFSSuite) TestRepackCost_100x_10M_Files(c *check.C) {\n\ts.skipMostRepackCostTests(c)\n\ts.testRepackCost(c, dataToWrite_ConstantSizeFilesInDirs(c, 10, 100, 10000000, 0), 100)\n}\n\nfunc (s *CollectionFSSuite) testRepackCost(c *check.C, writes []dataToWrite, maxBlocks int) {\n\ts.kc.blocks = make(map[string][]byte)\n\ttestfs, err := (&Collection{}).FileSystem(nil, s.kc)\n\tc.Assert(err, check.IsNil)\n\tcfs := testfs.(*collectionFileSystem)\n\tdirsCreated := make(map[string]bool)\n\tbytesContent := 0\n\tbytesWritten := func() (n int) {\n\t\ts.kc.Lock()\n\t\tdefer s.kc.Unlock()\n\t\tfor _, data := range s.kc.blocks {\n\t\t\tn += len(data)\n\t\t}\n\t\treturn\n\t}\n\tblocksInManifest := func() int {\n\t\tblocks := make(map[string]bool)\n\t\tcfs.fileSystem.root.(*dirnode).walkSegments(func(s segment) segment {\n\t\t\tblocks[s.(storedSegment).blockSegment().StripAllHints().Locator] = true\n\t\t\treturn s\n\t\t})\n\t\treturn len(blocks)\n\t}\n\ttRepackNoop := time.Duration(0)\n\tnRepackNoop := 0\n\ttRepackTotal := time.Duration(0)\n\tnRepackTotal := 0\n\tfilesWritten := make(map[string]bool)\n\tstats := bytes.NewBuffer(nil)\n\tfmt.Fprint(stats, \"writes\\tfiles\\tbytes_in_files\\tblocks\\tbytes_written_backend\\tn_repacked\\tn_repack_noop\\tseconds_repacking\\n\")\n\tfor writeIndex, write := range writes {\n\t\tfor i, c := range write.path {\n\t\t\tif c == '/' && !dirsCreated[write.path[:i]] {\n\t\t\t\ttestfs.Mkdir(write.path[:i], 0700)\n\t\t\t\tdirsCreated[write.path[:i]] = true\n\t\t\t}\n\t\t}\n\t\tf, err := testfs.OpenFile(write.path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0700)\n\t\tc.Assert(err, check.IsNil)\n\t\tfilesWritten[write.path] = true\n\t\tdata := write.data()\n\t\t_, err = f.Write(data)\n\t\tc.Assert(err, check.IsNil)\n\t\terr = f.Close()\n\t\tc.Assert(err, check.IsNil)\n\t\tbytesContent += len(data)\n\n\t\t_, err = cfs.MarshalManifest(\"\")\n\t\tc.Assert(err, check.IsNil)\n\t\tt0 := time.Now()\n\t\tn, err := cfs.Repack(context.Background(), RepackOptions{})\n\t\tc.Assert(err, check.IsNil)\n\t\ttRepack := time.Since(t0)\n\t\ttRepackTotal += tRepack\n\t\tnRepackTotal++\n\n\t\tif n == 0 {\n\t\t\ttRepackNoop += tRepack\n\t\t\tnRepackNoop++\n\t\t} else if bytesWritten()/4 > bytesContent {\n\t\t\t// Rewriting data >4x on average means\n\t\t\t// something is terribly wrong -- give up now\n\t\t\t// instead of going OOM.\n\t\t\tc.Logf(\"something is terribly wrong -- bytesWritten %d >> bytesContent %d\", bytesWritten(), bytesContent)\n\t\t\tc.FailNow()\n\t\t}\n\t\tfmt.Fprintf(stats, \"%d\\t%d\\t%d\\t%d\\t%d\\t%d\\t%d\\t%.06f\\n\", writeIndex+1, len(filesWritten), bytesContent, blocksInManifest(), bytesWritten(), nRepackTotal-nRepackNoop, nRepackNoop, tRepackTotal.Seconds())\n\t}\n\tc.Check(err, check.IsNil)\n\tc.Check(blocksInManifest() <= maxBlocks, check.Equals, true, check.Commentf(\"expect %d <= %d\", blocksInManifest(), maxBlocks))\n\n\tc.Logf(\"writes %d files %d bytesContent %d bytesWritten %d bytesRewritten %d blocksInManifest %d\", len(writes), len(filesWritten), bytesContent, bytesWritten(), bytesWritten()-bytesContent, blocksInManifest())\n\tc.Logf(\"spent %v on %d Repack calls, average %v per call\", tRepackTotal, nRepackTotal, tRepackTotal/time.Duration(nRepackTotal))\n\tc.Logf(\"spent %v on %d Repack calls that had no effect, average %v per call\", tRepackNoop, nRepackNoop, tRepackNoop/time.Duration(nRepackNoop))\n\n\tif enableRepackCharts {\n\t\t// write stats to tmp/{testname}_stats.tsv\n\t\terr = os.Mkdir(\"tmp\", 0777)\n\t\tif !os.IsExist(err) {\n\t\t\tc.Check(err, check.IsNil)\n\t\t}\n\t\terr = os.WriteFile(\"tmp/\"+c.TestName()+\"_stats.tsv\", stats.Bytes(), 0666)\n\t\tc.Check(err, check.IsNil)\n\t}\n}\n\nfunc (s *CollectionFSSuite) TestSnapshotSplice(c *check.C) {\n\tfiledata1 := \"hello snapshot+splice world\\n\"\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\t{\n\t\tf, err := fs.OpenFile(\"file1\", os.O_CREATE|os.O_RDWR, 0700)\n\t\tc.Assert(err, check.IsNil)\n\t\t_, err = f.Write([]byte(filedata1))\n\t\tc.Assert(err, check.IsNil)\n\t\terr = f.Close()\n\t\tc.Assert(err, check.IsNil)\n\t}\n\n\tsnap, err := Snapshot(fs, \"/\")\n\tc.Assert(err, check.IsNil)\n\terr = Splice(fs, \"dir1\", snap)\n\tc.Assert(err, check.IsNil)\n\tf, err := fs.Open(\"dir1/file1\")\n\tc.Assert(err, check.IsNil)\n\tbuf, err := io.ReadAll(f)\n\tc.Assert(err, check.IsNil)\n\tc.Check(string(buf), check.Equals, filedata1)\n}\n\nfunc (s *CollectionFSSuite) TestRefreshSignatures(c *check.C) {\n\tfiledata1 := \"hello refresh signatures world\\n\"\n\tfs, err := (&Collection{}).FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\tfs.Mkdir(\"d1\", 0700)\n\tf, err := fs.OpenFile(\"d1/file1\", os.O_CREATE|os.O_RDWR, 0700)\n\tc.Assert(err, check.IsNil)\n\t_, err = f.Write([]byte(filedata1))\n\tc.Assert(err, check.IsNil)\n\terr = f.Close()\n\tc.Assert(err, check.IsNil)\n\n\tfiledata2 := \"hello refresh signatures universe\\n\"\n\tfs.Mkdir(\"d2\", 0700)\n\tf, err = fs.OpenFile(\"d2/file2\", os.O_CREATE|os.O_RDWR, 0700)\n\tc.Assert(err, check.IsNil)\n\t_, err = f.Write([]byte(filedata2))\n\tc.Assert(err, check.IsNil)\n\terr = f.Close()\n\tc.Assert(err, check.IsNil)\n\ttxt, err := fs.MarshalManifest(\".\")\n\tc.Assert(err, check.IsNil)\n\tvar saved Collection\n\terr = s.client.RequestAndDecode(&saved, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"select\": []string{\"manifest_text\", \"uuid\", \"portable_data_hash\"},\n\t\t\"collection\": map[string]interface{}{\n\t\t\t\"manifest_text\": txt,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\n\t// Update signatures synchronously if they are already expired\n\t// when Read() is called.\n\t{\n\t\tsaved.ManifestText = SignManifest(saved.ManifestText, s.kc.authToken, time.Now().Add(-2*time.Second), s.kc.sigttl, []byte(s.kc.sigkey))\n\t\tfs, err := saved.FileSystem(s.client, s.kc)\n\t\tc.Assert(err, check.IsNil)\n\t\tf, err := fs.OpenFile(\"d1/file1\", os.O_RDONLY, 0)\n\t\tc.Assert(err, check.IsNil)\n\t\tbuf, err := ioutil.ReadAll(f)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(buf), check.Equals, filedata1)\n\t}\n\n\t// Update signatures asynchronously if we're more than half\n\t// way to TTL when Read() is called.\n\t{\n\t\texp := time.Now().Add(2 * time.Minute)\n\t\tsaved.ManifestText = SignManifest(saved.ManifestText, s.kc.authToken, exp, s.kc.sigttl, []byte(s.kc.sigkey))\n\t\tfs, err := saved.FileSystem(s.client, s.kc)\n\t\tc.Assert(err, check.IsNil)\n\t\tf1, err := fs.OpenFile(\"d1/file1\", os.O_RDONLY, 0)\n\t\tc.Assert(err, check.IsNil)\n\t\tf2, err := fs.OpenFile(\"d2/file2\", os.O_RDONLY, 0)\n\t\tc.Assert(err, check.IsNil)\n\t\tbuf, err := ioutil.ReadAll(f1)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(buf), check.Equals, filedata1)\n\n\t\t// Ensure fs treats the 2-minute TTL as less than half\n\t\t// the server's signing TTL. If we don't do this,\n\t\t// collectionfs will guess the signature is fresh,\n\t\t// i.e., signing TTL is 2 minutes, and won't do an\n\t\t// async refresh.\n\t\tfs.(*collectionFileSystem).guessSignatureTTL = time.Hour\n\n\t\trefreshed := false\n\t\tfor deadline := time.Now().Add(time.Second * 10); time.Now().Before(deadline) && !refreshed; time.Sleep(time.Second / 10) {\n\t\t\t_, err = f1.Seek(0, io.SeekStart)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tbuf, err = ioutil.ReadAll(f1)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(string(buf), check.Equals, filedata1)\n\t\t\tloc := s.kc.reads[len(s.kc.reads)-1]\n\t\t\tt, err := signatureExpiryTime(loc)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Logf(\"last read block %s had signature expiry time %v\", loc, t)\n\t\t\tif t.Sub(time.Now()) > time.Hour {\n\t\t\t\trefreshed = true\n\t\t\t}\n\t\t}\n\t\tc.Check(refreshed, check.Equals, true)\n\n\t\t// Second locator should have been updated at the same\n\t\t// time.\n\t\tbuf, err = ioutil.ReadAll(f2)\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Assert(string(buf), check.Equals, filedata2)\n\t\tloc := s.kc.reads[len(s.kc.reads)-1]\n\t\tc.Check(loc, check.Not(check.Equals), s.kc.reads[len(s.kc.reads)-2])\n\t\tt, err := signatureExpiryTime(s.kc.reads[len(s.kc.reads)-1])\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Logf(\"last read block %s had signature expiry time %v\", loc, t)\n\t\tc.Check(t.Sub(time.Now()) > time.Hour, check.Equals, true)\n\t}\n}\n\nvar bigmanifest = func() string {\n\tvar buf bytes.Buffer\n\tfor i := 0; i < 2000; i++ {\n\t\tfmt.Fprintf(&buf, \"./dir%d\", i)\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tfmt.Fprintf(&buf, \" d41d8cd98f00b204e9800998ecf8427e+99999\")\n\t\t}\n\t\tfor i := 0; i < 2000; i++ {\n\t\t\tfmt.Fprintf(&buf, \" 1200000:300000:file%d\", i)\n\t\t}\n\t\tfmt.Fprintf(&buf, \"\\n\")\n\t}\n\treturn buf.String()\n}()\n\nfunc (s *CollectionFSSuite) BenchmarkParseManifest(c *check.C) {\n\tDebugLocksPanicMode = false\n\tc.Logf(\"test manifest is %d bytes\", len(bigmanifest))\n\tfor i := 0; i < c.N; i++ {\n\t\tfs, err := (&Collection{ManifestText: bigmanifest}).FileSystem(s.client, s.kc)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(fs, check.NotNil)\n\t}\n}\n\nfunc (s *CollectionFSSuite) checkMemSize(c *check.C, f File) {\n\tfn := f.(*filehandle).inode.(*filenode)\n\tvar memsize int64\n\tfor _, seg := range fn.segments {\n\t\tif e, ok := seg.(*memSegment); ok {\n\t\t\tmemsize += int64(len(e.buf))\n\t\t}\n\t}\n\tc.Check(fn.memsize, check.Equals, memsize)\n}\n\ntype CollectionFSUnitSuite struct{}\n\nvar _ = check.Suite(&CollectionFSUnitSuite{})\n\n// expect ~2 seconds to load a manifest with 256K files\nfunc (s *CollectionFSUnitSuite) TestLargeManifest_ManyFiles(c *check.C) {\n\tif testing.Short() {\n\t\tc.Skip(\"slow\")\n\t}\n\ts.testLargeManifest(c, 512, 512, 1, 0)\n}\n\nfunc (s *CollectionFSUnitSuite) TestLargeManifest_LargeFiles(c *check.C) {\n\tif testing.Short() {\n\t\tc.Skip(\"slow\")\n\t}\n\ts.testLargeManifest(c, 1, 800, 1000, 0)\n}\n\nfunc (s *CollectionFSUnitSuite) TestLargeManifest_InterleavedFiles(c *check.C) {\n\tif testing.Short() {\n\t\tc.Skip(\"slow\")\n\t}\n\t// Timing figures here are from a dev host, (0)->(1)->(2)->(3)\n\t// (0) no optimizations (main branch commit ea697fb1e8)\n\t// (1) resolve streampos->blkidx with binary search\n\t// (2) ...and rewrite PortableDataHash() without regexp\n\t// (3) ...and use fnodeCache in loadManifest\n\ts.testLargeManifest(c, 1, 800, 100, 4<<20) // 127s    -> 12s  -> 2.5s -> 1.5s\n\ts.testLargeManifest(c, 1, 50, 1000, 4<<20) // 44s     -> 10s  -> 1.5s -> 0.8s\n\ts.testLargeManifest(c, 1, 200, 100, 4<<20) // 13s     -> 4s   -> 0.6s -> 0.3s\n\ts.testLargeManifest(c, 1, 200, 150, 4<<20) // 26s     -> 4s   -> 1s   -> 0.5s\n\ts.testLargeManifest(c, 1, 200, 200, 4<<20) // 38s     -> 6s   -> 1.3s -> 0.7s\n\ts.testLargeManifest(c, 1, 200, 225, 4<<20) // 46s     -> 7s   -> 1.5s -> 1s\n\ts.testLargeManifest(c, 1, 400, 400, 4<<20) // 477s    -> 24s  -> 5s   -> 3s\n\t// s.testLargeManifest(c, 1, 800, 1000, 4<<20) // timeout -> 186s -> 28s  -> 17s\n}\n\nfunc (s *CollectionFSUnitSuite) testLargeManifest(c *check.C, dirCount, filesPerDir, blocksPerFile, interleaveChunk int) {\n\tt0 := time.Now()\n\tconst blksize = 1 << 26\n\tc.Logf(\"%s building manifest with dirCount=%d filesPerDir=%d blocksPerFile=%d\", time.Now(), dirCount, filesPerDir, blocksPerFile)\n\tmb := bytes.NewBuffer(make([]byte, 0, 40000000))\n\tblkid := 0\n\tfor i := 0; i < dirCount; i++ {\n\t\tfmt.Fprintf(mb, \"./dir%d\", i)\n\t\tfor j := 0; j < filesPerDir; j++ {\n\t\t\tfor k := 0; k < blocksPerFile; k++ {\n\t\t\t\tblkid++\n\t\t\t\tfmt.Fprintf(mb, \" %032x+%d+A%040x@%08x\", blkid, blksize, blkid, blkid)\n\t\t\t}\n\t\t}\n\t\tfor j := 0; j < filesPerDir; j++ {\n\t\t\tif interleaveChunk == 0 {\n\t\t\t\tfmt.Fprintf(mb, \" %d:%d:dir%d/file%d\", (filesPerDir-j-1)*blocksPerFile*blksize, blocksPerFile*blksize, j, j)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor todo := int64(blocksPerFile) * int64(blksize); todo > 0; todo -= int64(interleaveChunk) {\n\t\t\t\tsize := int64(interleaveChunk)\n\t\t\t\tif size > todo {\n\t\t\t\t\tsize = todo\n\t\t\t\t}\n\t\t\t\toffset := rand.Int63n(int64(blocksPerFile)*int64(blksize)*int64(filesPerDir) - size)\n\t\t\t\tfmt.Fprintf(mb, \" %d:%d:dir%d/file%d\", offset, size, j, j)\n\t\t\t}\n\t\t}\n\t\tmb.Write([]byte{'\\n'})\n\t}\n\tcoll := Collection{ManifestText: mb.String()}\n\tc.Logf(\"%s built manifest size=%d\", time.Now(), mb.Len())\n\n\tvar memstats runtime.MemStats\n\truntime.ReadMemStats(&memstats)\n\tc.Logf(\"%s Alloc=%d Sys=%d\", time.Now(), memstats.Alloc, memstats.Sys)\n\n\tf, err := coll.FileSystem(NewClientFromEnv(), &keepClientStub{})\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"%s loaded\", time.Now())\n\tc.Check(f.Size(), check.Equals, int64(dirCount*filesPerDir*blocksPerFile*blksize))\n\n\t// Stat() and OpenFile() each file. This mimics the behavior\n\t// of webdav propfind, which opens each file even when just\n\t// listing directory entries.\n\tfor i := 0; i < dirCount; i++ {\n\t\tfor j := 0; j < filesPerDir; j++ {\n\t\t\tfnm := fmt.Sprintf(\"./dir%d/dir%d/file%d\", i, j, j)\n\t\t\tfi, err := f.Stat(fnm)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Check(fi.IsDir(), check.Equals, false)\n\t\t\tf, err := f.OpenFile(fnm, os.O_RDONLY, 0)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tf.Close()\n\t\t}\n\t}\n\tc.Logf(\"%s OpenFile() x %d\", time.Now(), dirCount*filesPerDir)\n\n\truntime.ReadMemStats(&memstats)\n\tc.Logf(\"%s Alloc=%d Sys=%d\", time.Now(), memstats.Alloc, memstats.Sys)\n\tc.Logf(\"%s MemorySize=%d\", time.Now(), f.MemorySize())\n\tc.Logf(\"%s ... test duration %s\", time.Now(), time.Now().Sub(t0))\n}\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_deferred.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"os\"\n\t\"sync\"\n)\n\n// A deferrednode wraps an inode that's expensive to build. Initially,\n// it responds to basic directory functions by proxying to the given\n// placeholder. If a caller uses a read/write/lock operation,\n// deferrednode calls the create() func to create the real inode, and\n// proxies to the real inode from then on.\n//\n// In practice, this means a deferrednode's parent's directory listing\n// can be generated using only the placeholder, instead of waiting for\n// create().\ntype deferrednode struct {\n\twrapped inode\n\tcreate  func() inode\n\tmtx     sync.Mutex\n\tcreated bool\n}\n\nfunc (dn *deferrednode) realinode() inode {\n\tdn.mtx.Lock()\n\tdefer dn.mtx.Unlock()\n\tif !dn.created {\n\t\tdn.wrapped = dn.create()\n\t\tdn.created = true\n\t}\n\treturn dn.wrapped\n}\n\nfunc (dn *deferrednode) currentinode() inode {\n\tdn.mtx.Lock()\n\tdefer dn.mtx.Unlock()\n\treturn dn.wrapped\n}\n\nfunc (dn *deferrednode) Read(p []byte, pos filenodePtr) (int, filenodePtr, error) {\n\treturn dn.realinode().Read(p, pos)\n}\n\nfunc (dn *deferrednode) Write(p []byte, pos filenodePtr) (int, filenodePtr, error) {\n\treturn dn.realinode().Write(p, pos)\n}\n\nfunc (dn *deferrednode) Child(name string, replace func(inode) (inode, error)) (inode, error) {\n\treturn dn.realinode().Child(name, replace)\n}\n\n// Sync is a no-op if the real inode hasn't even been created yet.\nfunc (dn *deferrednode) Sync() error {\n\tdn.mtx.Lock()\n\tdefer dn.mtx.Unlock()\n\tif !dn.created {\n\t\treturn nil\n\t} else if syncer, ok := dn.wrapped.(syncer); ok {\n\t\treturn syncer.Sync()\n\t} else {\n\t\treturn ErrInvalidOperation\n\t}\n}\n\nfunc (dn *deferrednode) Truncate(size int64) error       { return dn.realinode().Truncate(size) }\nfunc (dn *deferrednode) SetParent(p inode, name string)  { dn.realinode().SetParent(p, name) }\nfunc (dn *deferrednode) IsDir() bool                     { return dn.currentinode().IsDir() }\nfunc (dn *deferrednode) Readdir() ([]os.FileInfo, error) { return dn.realinode().Readdir() }\nfunc (dn *deferrednode) Size() int64                     { return dn.currentinode().Size() }\nfunc (dn *deferrednode) FileInfo() os.FileInfo           { return dn.currentinode().FileInfo() }\nfunc (dn *deferrednode) Lock()                           { dn.realinode().Lock() }\nfunc (dn *deferrednode) Unlock()                         { dn.realinode().Unlock() }\nfunc (dn *deferrednode) RLock()                          { dn.realinode().RLock() }\nfunc (dn *deferrednode) RUnlock()                        { dn.realinode().RUnlock() }\nfunc (dn *deferrednode) FS() FileSystem                  { return dn.currentinode().FS() }\nfunc (dn *deferrednode) Parent() inode                   { return dn.currentinode().Parent() }\nfunc (dn *deferrednode) MemorySize() int64               { return dn.currentinode().MemorySize() }\nfunc (dn *deferrednode) Snapshot() (inode, error)        { return dn.realinode().Snapshot() }\nfunc (dn *deferrednode) Splice(repl inode) error         { return dn.realinode().Splice(repl) }\n"
  },
  {
    "path": "sdk/go/arvados/fs_filehandle.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"io/fs\"\n\t\"os\"\n)\n\ntype filehandle struct {\n\tinode\n\tptr        filenodePtr\n\tappend     bool\n\treadable   bool\n\twritable   bool\n\tunreaddirs []os.FileInfo\n}\n\nfunc (f *filehandle) Read(p []byte) (n int, err error) {\n\tif !f.readable {\n\t\treturn 0, ErrWriteOnlyMode\n\t}\n\tf.inode.RLock()\n\tdefer f.inode.RUnlock()\n\tn, f.ptr, err = f.inode.Read(p, f.ptr)\n\treturn\n}\n\nfunc (f *filehandle) Seek(off int64, whence int) (pos int64, err error) {\n\tsize := f.inode.Size()\n\tptr := f.ptr\n\tswitch whence {\n\tcase io.SeekStart:\n\t\tptr.off = off\n\tcase io.SeekCurrent:\n\t\tptr.off += off\n\tcase io.SeekEnd:\n\t\tptr.off = size + off\n\t}\n\tif ptr.off < 0 {\n\t\treturn f.ptr.off, ErrNegativeOffset\n\t}\n\tif ptr.off != f.ptr.off {\n\t\tf.ptr = ptr\n\t\t// force filenode to recompute f.ptr fields on next\n\t\t// use\n\t\tf.ptr.repacked = -1\n\t}\n\treturn f.ptr.off, nil\n}\n\nfunc (f *filehandle) Truncate(size int64) error {\n\treturn f.inode.Truncate(size)\n}\n\nfunc (f *filehandle) Write(p []byte) (n int, err error) {\n\tif !f.writable {\n\t\treturn 0, ErrReadOnlyFile\n\t}\n\tf.inode.Lock()\n\tdefer f.inode.Unlock()\n\tif fn, ok := f.inode.(*filenode); ok && f.append {\n\t\tf.ptr = filenodePtr{\n\t\t\toff:        fn.fileinfo.size,\n\t\t\tsegmentIdx: len(fn.segments),\n\t\t\tsegmentOff: 0,\n\t\t\trepacked:   fn.repacked,\n\t\t}\n\t}\n\tn, f.ptr, err = f.inode.Write(p, f.ptr)\n\treturn\n}\n\nfunc (f *filehandle) Repack(ctx context.Context, opts RepackOptions) (int, error) {\n\tdn, ok := f.inode.(*dirnode)\n\tif !ok {\n\t\treturn 0, ErrNotADirectory\n\t}\n\tdn.Lock()\n\tdefer dn.Unlock()\n\treturn dn.fs.repackTree(ctx, opts, dn)\n}\n\n// dirEntry implements fs.DirEntry, see (*filehandle)ReadDir().\ntype dirEntry struct {\n\tos.FileInfo\n}\n\nfunc (ent dirEntry) Type() fs.FileMode {\n\treturn ent.Mode().Type()\n}\nfunc (ent dirEntry) Info() (fs.FileInfo, error) {\n\treturn ent, nil\n}\n\n// ReadDir implements fs.ReadDirFile.\nfunc (f *filehandle) ReadDir(count int) ([]fs.DirEntry, error) {\n\tfis, err := f.Readdir(count)\n\tif len(fis) == 0 {\n\t\treturn nil, err\n\t}\n\tents := make([]fs.DirEntry, len(fis))\n\tfor i, fi := range fis {\n\t\tents[i] = dirEntry{fi}\n\t}\n\treturn ents, err\n}\n\nfunc (f *filehandle) Readdir(count int) ([]os.FileInfo, error) {\n\tif !f.inode.IsDir() {\n\t\treturn nil, ErrInvalidOperation\n\t}\n\tif count <= 0 {\n\t\treturn f.inode.Readdir()\n\t}\n\tif f.unreaddirs == nil {\n\t\tvar err error\n\t\tf.unreaddirs, err = f.inode.Readdir()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif len(f.unreaddirs) == 0 {\n\t\treturn nil, io.EOF\n\t}\n\tif count > len(f.unreaddirs) {\n\t\tcount = len(f.unreaddirs)\n\t}\n\tret := f.unreaddirs[:count]\n\tf.unreaddirs = f.unreaddirs[count:]\n\treturn ret, nil\n}\n\nfunc (f *filehandle) Stat() (os.FileInfo, error) {\n\treturn f.inode.FileInfo(), nil\n}\n\nfunc (f *filehandle) Close() error {\n\treturn nil\n}\n\nfunc (f *filehandle) Sync() error {\n\t// Sync the containing filesystem.\n\treturn f.FS().Sync()\n}\n\nfunc (f *filehandle) Snapshot() (*Subtree, error) {\n\tif !f.readable {\n\t\treturn nil, ErrInvalidOperation\n\t}\n\tnode, err := f.inode.Snapshot()\n\treturn &Subtree{inode: node}, err\n}\n\nfunc (f *filehandle) Splice(r *Subtree) error {\n\tif !f.writable {\n\t\treturn ErrReadOnlyFile\n\t}\n\treturn f.inode.Splice(r.inode)\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_getternode.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"time\"\n)\n\n// A getternode is a read-only character device that returns whatever\n// data is returned by the supplied function.\ntype getternode struct {\n\tGetter func() ([]byte, error)\n\n\ttreenode\n\tdata *bytes.Reader\n}\n\nfunc (*getternode) IsDir() bool {\n\treturn false\n}\n\nfunc (*getternode) Child(string, func(inode) (inode, error)) (inode, error) {\n\treturn nil, ErrInvalidOperation\n}\n\nfunc (gn *getternode) get() error {\n\tif gn.data != nil {\n\t\treturn nil\n\t}\n\tdata, err := gn.Getter()\n\tif err != nil {\n\t\treturn err\n\t}\n\tgn.data = bytes.NewReader(data)\n\treturn nil\n}\n\nfunc (gn *getternode) Size() int64 {\n\treturn gn.FileInfo().Size()\n}\n\nfunc (gn *getternode) FileInfo() os.FileInfo {\n\tgn.Lock()\n\tdefer gn.Unlock()\n\tvar size int64\n\tif gn.get() == nil {\n\t\tsize = gn.data.Size()\n\t}\n\treturn fileinfo{\n\t\tmodTime: time.Now(),\n\t\tmode:    0444,\n\t\tsize:    size,\n\t}\n}\n\nfunc (gn *getternode) Read(p []byte, ptr filenodePtr) (int, filenodePtr, error) {\n\tif err := gn.get(); err != nil {\n\t\treturn 0, ptr, err\n\t}\n\tn, err := gn.data.ReadAt(p, ptr.off)\n\treturn n, filenodePtr{off: ptr.off + int64(n)}, err\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_lookup.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"os\"\n\t\"time\"\n)\n\n// lookupnode is a caching tree node that is initially empty and calls\n// loadOne and loadAll to load/update child nodes as needed.\n//\n// See (*customFileSystem)MountUsers for example usage.\ntype lookupnode struct {\n\ttreenode\n\tloadOne func(parent inode, name string) (inode, error)\n\tloadAll func(parent inode) ([]inode, error)\n\tstale   func(time.Time) bool\n\n\t// internal fields\n\tstaleAll time.Time\n\tstaleOne map[string]time.Time\n}\n\n// Sync flushes pending writes for loaded children and, if successful,\n// triggers a reload on next lookup.\nfunc (ln *lookupnode) Sync() error {\n\terr := ln.treenode.Sync()\n\tif err != nil {\n\t\treturn err\n\t}\n\tln.Lock()\n\tln.staleAll = time.Time{}\n\tln.staleOne = nil\n\tln.Unlock()\n\treturn nil\n}\n\nfunc (ln *lookupnode) Readdir() ([]os.FileInfo, error) {\n\tln.Lock()\n\tcheckTime := time.Now()\n\tif ln.stale(ln.staleAll) {\n\t\tall, err := ln.loadAll(ln)\n\t\tif err != nil {\n\t\t\tln.Unlock()\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, child := range all {\n\t\t\tvar name string\n\t\t\tif hl, ok := child.(*hardlink); ok && hl.inode == ln {\n\t\t\t\t// If child is a hardlink to its\n\t\t\t\t// parent, FileInfo()->RLock() will\n\t\t\t\t// deadlock, because we already have\n\t\t\t\t// the write lock. In this situation\n\t\t\t\t// we can safely access the hardlink's\n\t\t\t\t// name directly.\n\t\t\t\tname = hl.name\n\t\t\t} else {\n\t\t\t\tname = child.FileInfo().Name()\n\t\t\t}\n\t\t\t_, err = ln.treenode.Child(name, func(inode) (inode, error) {\n\t\t\t\treturn child, nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tln.Unlock()\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tln.staleAll = checkTime\n\t\t// No value in ln.staleOne can make a difference to an\n\t\t// \"entry is stale?\" test now, because no value is\n\t\t// newer than ln.staleAll. Reclaim memory.\n\t\tln.staleOne = nil\n\t}\n\tln.Unlock()\n\treturn ln.treenode.Readdir()\n}\n\n// Child rejects (with ErrInvalidOperation) calls to add/replace\n// children, instead calling loadOne when a non-existing child is\n// looked up.\nfunc (ln *lookupnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {\n\tcheckTime := time.Now()\n\tvar existing inode\n\tvar err error\n\tif ln.stale(ln.staleAll) && ln.stale(ln.staleOne[name]) {\n\t\texisting, err = ln.treenode.Child(name, func(inode) (inode, error) {\n\t\t\treturn ln.loadOne(ln, name)\n\t\t})\n\t\tif err == nil && existing != nil {\n\t\t\tif ln.staleOne == nil {\n\t\t\t\tln.staleOne = map[string]time.Time{name: checkTime}\n\t\t\t} else {\n\t\t\t\tln.staleOne[name] = checkTime\n\t\t\t}\n\t\t}\n\t} else {\n\t\texisting, err = ln.treenode.Child(name, nil)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\treturn existing, err\n\t\t}\n\t}\n\tif replace != nil {\n\t\t// Let the callback try to delete or replace the\n\t\t// existing node; if it does, return\n\t\t// ErrInvalidOperation.\n\t\tif tryRepl, err := replace(existing); err != nil {\n\t\t\t// Propagate error from callback\n\t\t\treturn existing, err\n\t\t} else if tryRepl != existing {\n\t\t\treturn existing, ErrInvalidOperation\n\t\t}\n\t}\n\t// Return original error from ln.treenode.Child() (it might be\n\t// ErrNotExist).\n\treturn existing, err\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_project.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc (fs *customFileSystem) defaultUUID(uuid string) (string, error) {\n\tif uuid != \"\" {\n\t\treturn uuid, nil\n\t}\n\tvar resp User\n\terr := fs.RequestAndDecode(&resp, \"GET\", \"arvados/v1/users/current\", nil, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.UUID, nil\n}\n\n// loadOneChild loads only the named child, if it exists.\nfunc (fs *customFileSystem) projectsLoadOne(parent inode, uuid, name string) (inode, error) {\n\tuuid, err := fs.defaultUUID(uuid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar contents CollectionList\n\tfor _, subst := range []string{\"/\", fs.forwardSlashNameSubstitution} {\n\t\tcontents = CollectionList{}\n\t\terr = fs.RequestAndDecode(&contents, \"GET\", \"arvados/v1/groups/\"+uuid+\"/contents\", nil, ResourceListParams{\n\t\t\tCount: \"none\",\n\t\t\tOrder: \"uuid\",\n\t\t\tFilters: []Filter{\n\t\t\t\t{\"name\", \"=\", strings.Replace(name, subst, \"/\", -1)},\n\t\t\t\t{\"uuid\", \"is_a\", []string{\"arvados#collection\", \"arvados#group\"}},\n\t\t\t\t{\"groups.group_class\", \"in\", []string{\"project\", \"filter\"}},\n\t\t\t},\n\t\t\tSelect: []string{\"uuid\", \"name\", \"modified_at\", \"properties\"},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(contents.Items) > 0 || fs.forwardSlashNameSubstitution == \"/\" || fs.forwardSlashNameSubstitution == \"\" || !strings.Contains(name, fs.forwardSlashNameSubstitution) {\n\t\t\tbreak\n\t\t}\n\t\t// If the requested name contains the configured \"/\"\n\t\t// replacement string and didn't match a\n\t\t// project/collection exactly, we'll try again with\n\t\t// \"/\" in its place, so a lookup of a munged name\n\t\t// works regardless of whether the directory listing\n\t\t// has been populated with escaped names.\n\t\t//\n\t\t// Note this doesn't handle items whose names contain\n\t\t// both \"/\" and the substitution string.\n\t}\n\tif len(contents.Items) == 0 {\n\t\treturn nil, nil\n\t}\n\tcoll := contents.Items[0]\n\n\tif strings.Contains(coll.UUID, \"-j7d0g-\") {\n\t\t// Group item was loaded into a Collection var -- but\n\t\t// we only need the Name and UUID anyway, so it's OK.\n\t\treturn &hardlink{\n\t\t\tinode: fs.projectSingleton(coll.UUID, &Group{\n\t\t\t\tUUID:       coll.UUID,\n\t\t\t\tName:       coll.Name,\n\t\t\t\tModifiedAt: coll.ModifiedAt,\n\t\t\t\tProperties: coll.Properties,\n\t\t\t}),\n\t\t\tparent: parent,\n\t\t\tname:   coll.Name,\n\t\t}, nil\n\t} else if strings.Contains(coll.UUID, \"-4zz18-\") {\n\t\treturn fs.newDeferredCollectionDir(parent, name, coll.UUID, coll.ModifiedAt, coll.Properties), nil\n\t} else {\n\t\tlog.Printf(\"group contents: unrecognized UUID in response: %q\", coll.UUID)\n\t\treturn nil, ErrInvalidArgument\n\t}\n}\n\nfunc (fs *customFileSystem) projectsLoadAll(parent inode, uuid string) ([]inode, error) {\n\tuuid, err := fs.defaultUUID(uuid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpagesize := 100000\n\tvar inodes []inode\n\n\t// When #17424 is resolved, remove the outer loop here and use\n\t// []string{\"arvados#collection\", \"arvados#group\"} directly as the uuid\n\t// filter.\n\tfor _, class := range []string{\"arvados#collection\", \"arvados#group\"} {\n\t\t// Note: the \"filters\" slice's backing array might be reused\n\t\t// by append(filters,...) below. This isn't goroutine safe,\n\t\t// but all accesses are in the same goroutine, so it's OK.\n\t\tfilters := []Filter{\n\t\t\t{\"uuid\", \"is_a\", class},\n\t\t}\n\t\tif class == \"arvados#group\" {\n\t\t\tfilters = append(filters, Filter{\"groups.group_class\", \"in\", []string{\"project\", \"filter\"}})\n\t\t}\n\n\t\tparams := ResourceListParams{\n\t\t\tCount:   \"none\",\n\t\t\tFilters: filters,\n\t\t\tOrder:   \"uuid\",\n\t\t\tSelect:  []string{\"uuid\", \"name\", \"modified_at\", \"properties\"},\n\t\t\tLimit:   &pagesize,\n\t\t}\n\n\t\tfor {\n\t\t\t// The groups content endpoint returns\n\t\t\t// Collection and Group (project)\n\t\t\t// objects. This function only accesses the\n\t\t\t// UUID, Name, and ModifiedAt fields. Both\n\t\t\t// collections and groups have those fields,\n\t\t\t// so it is easier to just treat the\n\t\t\t// ObjectList that comes back as a\n\t\t\t// CollectionList.\n\t\t\tvar resp CollectionList\n\t\t\terr = fs.RequestAndDecode(&resp, \"GET\", \"arvados/v1/groups/\"+uuid+\"/contents\", nil, params)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif len(resp.Items) == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfor _, i := range resp.Items {\n\t\t\t\tif fs.forwardSlashNameSubstitution != \"\" {\n\t\t\t\t\ti.Name = strings.Replace(i.Name, \"/\", fs.forwardSlashNameSubstitution, -1)\n\t\t\t\t}\n\t\t\t\tif !permittedName(i.Name) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif strings.Contains(i.UUID, \"-j7d0g-\") {\n\t\t\t\t\tinodes = append(inodes, fs.newProjectDir(parent, i.Name, i.UUID, &Group{\n\t\t\t\t\t\tUUID:       i.UUID,\n\t\t\t\t\t\tName:       i.Name,\n\t\t\t\t\t\tModifiedAt: i.ModifiedAt,\n\t\t\t\t\t\tProperties: i.Properties,\n\t\t\t\t\t}))\n\t\t\t\t} else if strings.Contains(i.UUID, \"-4zz18-\") {\n\t\t\t\t\tinodes = append(inodes, fs.newDeferredCollectionDir(parent, i.Name, i.UUID, i.ModifiedAt, i.Properties))\n\t\t\t\t} else {\n\t\t\t\t\tlog.Printf(\"group contents: unrecognized UUID in response: %q\", i.UUID)\n\t\t\t\t\treturn nil, ErrInvalidArgument\n\t\t\t\t}\n\t\t\t}\n\t\t\tparams.Filters = append(filters, Filter{\"uuid\", \">\", resp.Items[len(resp.Items)-1].UUID})\n\t\t}\n\t}\n\treturn inodes, nil\n}\n\nfunc (fs *customFileSystem) newProjectDir(parent inode, name, uuid string, proj *Group) inode {\n\treturn &hardlink{inode: fs.projectSingleton(uuid, proj), parent: parent, name: name}\n}\n\nfunc (fs *customFileSystem) newDeferredCollectionDir(parent inode, name, uuid string, modTime time.Time, props map[string]interface{}) inode {\n\tif modTime.IsZero() {\n\t\tmodTime = time.Now()\n\t}\n\tplaceholder := &treenode{\n\t\tfs:     fs,\n\t\tparent: parent,\n\t\tinodes: nil,\n\t\tfileinfo: fileinfo{\n\t\t\tname:    name,\n\t\t\tmodTime: modTime,\n\t\t\tmode:    0755 | os.ModeDir,\n\t\t\tsys:     func() interface{} { return &Collection{UUID: uuid, Name: name, ModifiedAt: modTime, Properties: props} },\n\t\t},\n\t}\n\treturn &deferrednode{wrapped: placeholder, create: func() inode {\n\t\tnode, err := fs.collectionSingleton(uuid)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"BUG: unhandled error: %s\", err)\n\t\t\treturn placeholder\n\t\t}\n\t\treturn &hardlink{inode: node, parent: parent, name: name}\n\t}}\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_project_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\ntype spiedRequest struct {\n\tmethod string\n\tpath   string\n\tparams map[string]interface{}\n}\n\ntype spyingClient struct {\n\t*Client\n\tcalls []spiedRequest\n}\n\nfunc (sc *spyingClient) RequestAndDecode(dst interface{}, method, path string, body io.Reader, params interface{}) error {\n\tvar paramsCopy map[string]interface{}\n\tvar buf bytes.Buffer\n\tjson.NewEncoder(&buf).Encode(params)\n\tjson.NewDecoder(&buf).Decode(&paramsCopy)\n\tsc.calls = append(sc.calls, spiedRequest{\n\t\tmethod: method,\n\t\tpath:   path,\n\t\tparams: paramsCopy,\n\t})\n\treturn sc.Client.RequestAndDecode(dst, method, path, body, params)\n}\n\nfunc (s *SiteFSSuite) TestFilterGroup(c *check.C) {\n\t// Make sure that a collection and group that match the filter are present,\n\t// and that a group that does not match the filter is not present.\n\n\tcheckOpen := func(path string, exists bool) {\n\t\tf, err := s.fs.Open(path)\n\t\tif exists {\n\t\t\tif c.Check(err, check.IsNil) {\n\t\t\t\tc.Check(f.Close(), check.IsNil)\n\t\t\t}\n\t\t} else {\n\t\t\tc.Check(err, check.Equals, os.ErrNotExist)\n\t\t}\n\t}\n\n\tcheckDirContains := func(parent, child string, exists bool) {\n\t\tf, err := s.fs.Open(parent)\n\t\tif !c.Check(err, check.IsNil) {\n\t\t\treturn\n\t\t}\n\t\tents, err := f.Readdir(-1)\n\t\tif !c.Check(err, check.IsNil) {\n\t\t\treturn\n\t\t}\n\t\tfor _, ent := range ents {\n\t\t\tif !exists {\n\t\t\t\tc.Check(ent.Name(), check.Not(check.Equals), child)\n\t\t\t\tif child == \"\" {\n\t\t\t\t\t// no children are expected\n\t\t\t\t\tc.Errorf(\"child %q found in parent %q\", child, parent)\n\t\t\t\t}\n\t\t\t} else if ent.Name() == child {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif exists {\n\t\t\tc.Errorf(\"child %q not found in parent %q\", child, parent)\n\t\t}\n\t}\n\n\tcheckOpen(\"/users/active/This filter group/baz_file\", true)\n\tcheckOpen(\"/users/active/This filter group/A Subproject\", true)\n\tcheckOpen(\"/users/active/This filter group/A Project\", false)\n\ts.fs.MountProject(\"fg\", fixtureThisFilterGroupUUID)\n\tcheckOpen(\"/fg/baz_file\", true)\n\tcheckOpen(\"/fg/A Subproject\", true)\n\tcheckOpen(\"/fg/A Project\", false)\n\ts.fs.MountProject(\"home\", \"\")\n\tcheckOpen(\"/home/A filter group with an is_a collection filter/baz_file\", true)\n\tcheckOpen(\"/home/A filter group with an is_a collection filter/baz_file/baz\", true)\n\tcheckOpen(\"/home/A filter group with an is_a collection filter/A Subproject\", false)\n\tcheckOpen(\"/home/A filter group with an is_a collection filter/A Project\", false)\n\n\t// An empty filter means everything that is visible should be returned.\n\tcheckOpen(\"/users/active/A filter group without filters/baz_file\", true)\n\tcheckOpen(\"/users/active/A filter group without filters/A Subproject\", true)\n\tcheckOpen(\"/users/active/A filter group without filters/A Project\", true)\n\ts.fs.MountProject(\"fg2\", fixtureAFilterGroupTwoUUID)\n\tcheckOpen(\"/fg2/baz_file\", true)\n\tcheckOpen(\"/fg2/A Subproject\", true)\n\tcheckOpen(\"/fg2/A Project\", true)\n\n\t// If a filter group matches itself or one of its ancestors,\n\t// the matched item appears as an empty directory.\n\tcheckDirContains(\"/users/active/A filter group without filters\", \"A filter group without filters\", true)\n\tcheckOpen(\"/users/active/A filter group without filters/A filter group without filters\", true)\n\tcheckOpen(\"/users/active/A filter group without filters/A filter group without filters/baz_file\", false)\n\tcheckDirContains(\"/users/active/A filter group without filters/A filter group without filters\", \"\", false)\n\n\t// An 'is_a' 'arvados#collection' filter means only collections should be returned.\n\tcheckOpen(\"/users/active/A filter group with an is_a collection filter/baz_file\", true)\n\tcheckOpen(\"/users/active/A filter group with an is_a collection filter/baz_file/baz\", true)\n\tcheckOpen(\"/users/active/A filter group with an is_a collection filter/A Subproject\", false)\n\tcheckOpen(\"/users/active/A filter group with an is_a collection filter/A Project\", false)\n\ts.fs.MountProject(\"fg3\", fixtureAFilterGroupThreeUUID)\n\tcheckOpen(\"/fg3/baz_file\", true)\n\tcheckOpen(\"/fg3/baz_file/baz\", true)\n\tcheckOpen(\"/fg3/A Subproject\", false)\n\n\t// An 'exists' 'arvados#collection' filter means only collections with certain properties should be returned.\n\ts.fs.MountProject(\"fg4\", fixtureAFilterGroupFourUUID)\n\tcheckOpen(\"/fg4/collection with list property with odd values\", true)\n\tcheckOpen(\"/fg4/collection with list property with even values\", true)\n\tcheckOpen(\"/fg4/baz_file\", false)\n\n\t// A 'contains' 'arvados#collection' filter means only collections with certain properties should be returned.\n\ts.fs.MountProject(\"fg5\", fixtureAFilterGroupFiveUUID)\n\tcheckOpen(\"/fg5/collection with list property with odd values\", true)\n\tcheckOpen(\"/fg5/collection with list property with string value\", true)\n\tcheckOpen(\"/fg5/collection with prop2 5\", false)\n\tcheckOpen(\"/fg5/collection with list property with even values\", false)\n}\n\nfunc (s *SiteFSSuite) TestCurrentUserHome(c *check.C) {\n\ts.fs.MountProject(\"home\", \"\")\n\ts.testHomeProject(c, \"/home\", \"home\")\n}\n\nfunc (s *SiteFSSuite) TestUsersDir(c *check.C) {\n\t// /users/active is a hardlink to a dir whose name is the UUID\n\t// of the active user\n\ts.testHomeProject(c, \"/users/active\", fixtureActiveUserUUID)\n}\n\nfunc (s *SiteFSSuite) testHomeProject(c *check.C, path, expectRealName string) {\n\tf, err := s.fs.Open(path)\n\tc.Assert(err, check.IsNil)\n\tfis, err := f.Readdir(-1)\n\tc.Assert(err, check.IsNil)\n\tc.Check(len(fis), check.Not(check.Equals), 0)\n\n\tok := false\n\tfor _, fi := range fis {\n\t\tc.Check(fi.Name(), check.Not(check.Equals), \"\")\n\t\tif fi.Name() == \"A Project\" {\n\t\t\tok = true\n\t\t}\n\t}\n\tc.Check(ok, check.Equals, true)\n\n\tf, err = s.fs.Open(path + \"/A Project/..\")\n\tc.Assert(err, check.IsNil)\n\tfi, err := f.Stat()\n\tc.Assert(err, check.IsNil)\n\tc.Check(fi.IsDir(), check.Equals, true)\n\tc.Check(fi.Name(), check.Equals, expectRealName)\n\n\tf, err = s.fs.Open(path + \"/A Project/A Subproject\")\n\tc.Assert(err, check.IsNil)\n\tfi, err = f.Stat()\n\tc.Assert(err, check.IsNil)\n\tc.Check(fi.IsDir(), check.Equals, true)\n\n\tfor _, nx := range []string{\n\t\tpath + \"/Unrestricted public data\",\n\t\tpath + \"/Unrestricted public data/does not exist\",\n\t\tpath + \"/A Project/does not exist\",\n\t} {\n\t\tc.Log(nx)\n\t\tf, err = s.fs.Open(nx)\n\t\tc.Check(err, check.NotNil)\n\t\tc.Check(os.IsNotExist(err), check.Equals, true)\n\t}\n}\n\nfunc (s *SiteFSSuite) TestProjectReaddirAfterLoadOne(c *check.C) {\n\tf, err := s.fs.Open(\"/users/active/A Project/A Subproject\")\n\tc.Assert(err, check.IsNil)\n\tdefer f.Close()\n\tf, err = s.fs.Open(\"/users/active/A Project/Project does not exist\")\n\tc.Assert(err, check.NotNil)\n\tf, err = s.fs.Open(\"/users/active/A Project/A Subproject\")\n\tc.Assert(err, check.IsNil)\n\tdefer f.Close()\n\tf, err = s.fs.Open(\"/users/active/A Project\")\n\tc.Assert(err, check.IsNil)\n\tdefer f.Close()\n\tfis, err := f.Readdir(-1)\n\tc.Assert(err, check.IsNil)\n\tc.Logf(\"%#v\", fis)\n\tvar foundSubproject, foundCollection bool\n\tfor _, fi := range fis {\n\t\tswitch fi.Name() {\n\t\tcase \"A Subproject\":\n\t\t\tfoundSubproject = true\n\t\tcase \"collection_to_move_around\":\n\t\t\tfoundCollection = true\n\t\t}\n\t}\n\tc.Check(foundSubproject, check.Equals, true)\n\tc.Check(foundCollection, check.Equals, true)\n}\n\nfunc (s *SiteFSSuite) TestSlashInName(c *check.C) {\n\tvar badCollection Collection\n\terr := s.client.RequestAndDecode(&badCollection, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"collection\": map[string]string{\n\t\t\t\"name\":       \"bad/collection\",\n\t\t\t\"owner_uuid\": fixtureAProjectUUID,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\tdefer s.client.RequestAndDecode(nil, \"DELETE\", \"arvados/v1/collections/\"+badCollection.UUID, nil, nil)\n\n\tvar badProject Group\n\terr = s.client.RequestAndDecode(&badProject, \"POST\", \"arvados/v1/groups\", nil, map[string]interface{}{\n\t\t\"group\": map[string]string{\n\t\t\t\"name\":        \"bad/project\",\n\t\t\t\"group_class\": \"project\",\n\t\t\t\"owner_uuid\":  fixtureAProjectUUID,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\tdefer s.client.RequestAndDecode(nil, \"DELETE\", \"arvados/v1/groups/\"+badProject.UUID, nil, nil)\n\n\tdir, err := s.fs.Open(\"/users/active/A Project\")\n\tc.Assert(err, check.IsNil)\n\tfis, err := dir.Readdir(-1)\n\tc.Check(err, check.IsNil)\n\tfor _, fi := range fis {\n\t\tc.Logf(\"fi.Name() == %q\", fi.Name())\n\t\tc.Check(strings.Contains(fi.Name(), \"/\"), check.Equals, false)\n\t}\n\n\t// Make a new fs (otherwise content will still be cached from\n\t// above) and enable \"/\" replacement string.\n\ts.fs = s.client.SiteFileSystem(s.kc)\n\ts.fs.ForwardSlashNameSubstitution(\"___\")\n\tdir, err = s.fs.Open(\"/users/active/A Project/bad___collection\")\n\tif c.Check(err, check.IsNil) {\n\t\t_, err = dir.Readdir(-1)\n\t\tc.Check(err, check.IsNil)\n\t}\n\tdir, err = s.fs.Open(\"/users/active/A Project/bad___project\")\n\tif c.Check(err, check.IsNil) {\n\t\t_, err = dir.Readdir(-1)\n\t\tc.Check(err, check.IsNil)\n\t}\n}\n\nfunc (s *SiteFSSuite) TestProjectUpdatedByOther(c *check.C) {\n\ts.fs.MountProject(\"home\", \"\")\n\n\tproject, err := s.fs.OpenFile(\"/home/A Project\", 0, 0)\n\tc.Assert(err, check.IsNil)\n\n\t_, err = s.fs.Open(\"/home/A Project/oob\")\n\tc.Check(err, check.NotNil)\n\n\tvar oob Collection\n\terr = s.client.RequestAndDecode(&oob, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"collection\": map[string]string{\n\t\t\t\"name\":       \"oob\",\n\t\t\t\"owner_uuid\": fixtureAProjectUUID,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\tdefer s.client.RequestAndDecode(nil, \"DELETE\", \"arvados/v1/collections/\"+oob.UUID, nil, nil)\n\n\terr = project.Sync()\n\tc.Check(err, check.IsNil)\n\tf, err := s.fs.Open(\"/home/A Project/oob\")\n\tc.Assert(err, check.IsNil)\n\tfi, err := f.Stat()\n\tc.Assert(err, check.IsNil)\n\tc.Check(fi.IsDir(), check.Equals, true)\n\tf.Close()\n\n\twf, err := s.fs.OpenFile(\"/home/A Project/oob/test.txt\", os.O_CREATE|os.O_RDWR, 0700)\n\tc.Assert(err, check.IsNil)\n\t_, err = wf.Write([]byte(\"hello oob\\n\"))\n\tc.Check(err, check.IsNil)\n\terr = wf.Close()\n\tc.Check(err, check.IsNil)\n\n\terr = project.Sync()\n\tc.Check(err, check.IsNil)\n\tf, err = s.fs.Open(\"/home/A Project/oob/test.txt\")\n\tif c.Check(err, check.IsNil) {\n\t\tf.Close()\n\t}\n\n\t// Ensure collection was flushed by Sync\n\tvar latest Collection\n\terr = s.client.RequestAndDecode(&latest, \"GET\", \"arvados/v1/collections/\"+oob.UUID, nil, nil)\n\tc.Check(err, check.IsNil)\n\tc.Check(latest.ManifestText, check.Matches, `.*:test.txt.*\\n`)\n\n\t// Delete test.txt behind s.fs's back by updating the\n\t// collection record with an empty ManifestText.\n\terr = s.client.RequestAndDecode(nil, \"PATCH\", \"arvados/v1/collections/\"+oob.UUID, nil, map[string]interface{}{\n\t\t\"collection\": map[string]string{\n\t\t\t\"manifest_text\":      \"\",\n\t\t\t\"portable_data_hash\": \"d41d8cd98f00b204e9800998ecf8427e+0\",\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\n\t// Sync again to reload collection.\n\terr = project.Sync()\n\tc.Check(err, check.IsNil)\n\n\t// Check test.txt deletion is reflected in fs.\n\t_, err = s.fs.Open(\"/home/A Project/oob/test.txt\")\n\tc.Check(err, check.NotNil)\n\tf, err = s.fs.Open(\"/home/A Project/oob\")\n\tif c.Check(err, check.IsNil) {\n\t\tf.Close()\n\t}\n\n\terr = s.client.RequestAndDecode(nil, \"DELETE\", \"arvados/v1/collections/\"+oob.UUID, nil, nil)\n\tc.Assert(err, check.IsNil)\n\n\twf, err = s.fs.OpenFile(\"/home/A Project/oob/test.txt\", os.O_CREATE|os.O_RDWR, 0700)\n\tc.Assert(err, check.IsNil)\n\terr = wf.Close()\n\tc.Check(err, check.IsNil)\n\n\terr = project.Sync()\n\tc.Check(err, check.NotNil) // can't update the deleted collection\n\t_, err = s.fs.Open(\"/home/A Project/oob\")\n\tc.Check(err, check.IsNil) // parent dir still has old collection -- didn't reload, because Sync failed\n}\n\nfunc (s *SiteFSSuite) TestProjectUnsupportedOperations(c *check.C) {\n\ts.fs.MountByID(\"by_id\")\n\ts.fs.MountProject(\"home\", \"\")\n\n\t_, err := s.fs.OpenFile(\"/home/A Project/newfilename\", os.O_CREATE|os.O_RDWR, 0)\n\tc.Check(err, ErrorIs, ErrInvalidOperation)\n\n\terr = s.fs.Mkdir(\"/home/A Project/newdirname\", 0)\n\tc.Check(err, ErrorIs, ErrInvalidOperation)\n\n\terr = s.fs.Mkdir(\"/by_id/newdirname\", 0)\n\tc.Check(err, ErrorIs, ErrInvalidOperation)\n\n\terr = s.fs.Mkdir(\"/by_id/\"+fixtureAProjectUUID+\"/newdirname\", 0)\n\tc.Check(err, ErrorIs, ErrInvalidOperation)\n\n\t_, err = s.fs.OpenFile(\"/home/A Project\", 0, 0)\n\tc.Check(err, check.IsNil)\n}\n\ntype errorIsChecker struct {\n\t*check.CheckerInfo\n}\n\nvar ErrorIs check.Checker = errorIsChecker{\n\t&check.CheckerInfo{Name: \"ErrorIs\", Params: []string{\"value\", \"target\"}},\n}\n\nfunc (checker errorIsChecker) Check(params []interface{}, names []string) (result bool, errStr string) {\n\terr, ok := params[0].(error)\n\tif !ok {\n\t\treturn false, \"\"\n\t}\n\ttarget, ok := params[1].(error)\n\tif !ok {\n\t\treturn false, \"\"\n\t}\n\treturn errors.Is(err, target), \"\"\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_site.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype CustomFileSystem interface {\n\tFileSystem\n\tMountByID(mount string) error\n\tMountByPDH(mount string) error\n\tMountHome(mount string) error\n\tMountProject(mount, uuid string) error\n\tMountTmp(mount string) error\n\tMountUsers(mount string) error\n\tForwardSlashNameSubstitution(string)\n}\n\ntype customFileSystem struct {\n\tfileSystem\n\troot *vdirnode\n\tthr  *throttle\n\n\tstaleThreshold time.Time\n\tstaleLock      sync.Mutex\n\n\tforwardSlashNameSubstitution string\n\n\tbyID     map[string]inode\n\tbyIDLock sync.Mutex\n\tbyIDRoot *treenode\n}\n\nfunc (c *Client) CustomFileSystem(kc keepClient) CustomFileSystem {\n\troot := &vdirnode{}\n\tfs := &customFileSystem{\n\t\troot: root,\n\t\tfileSystem: fileSystem{\n\t\t\tfsBackend: keepBackend{apiClient: c, keepClient: kc},\n\t\t\troot:      root,\n\t\t\tthr:       newThrottle(concurrentWriters),\n\t\t},\n\t}\n\troot.treenode = treenode{\n\t\tfs:     fs,\n\t\tparent: root,\n\t\tfileinfo: fileinfo{\n\t\t\tname:    \"/\",\n\t\t\tmode:    os.ModeDir | 0755,\n\t\t\tmodTime: time.Now(),\n\t\t},\n\t\tinodes: make(map[string]inode),\n\t}\n\tfs.byID = map[string]inode{}\n\tfs.byIDRoot = &treenode{\n\t\tfs:     fs,\n\t\tparent: root,\n\t\tinodes: make(map[string]inode),\n\t\tfileinfo: fileinfo{\n\t\t\tname:    \"_internal_by_id\",\n\t\t\tmodTime: time.Now(),\n\t\t\tmode:    0755 | os.ModeDir,\n\t\t},\n\t}\n\treturn fs\n}\n\nfunc checkMountTarget(mount string) error {\n\tif len(mount) == 0 || strings.Contains(mount, \"/\") {\n\t\treturn ErrInvalidArgument\n\t}\n\treturn nil\n}\n\nfunc (fs *customFileSystem) MountTmp(mount string) error {\n\tif err := checkMountTarget(mount); err != nil {\n\t\treturn err\n\t}\n\tnewfs, err := (&Collection{}).FileSystem(fs, fs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcfs := newfs.(*collectionFileSystem)\n\tcfs.SetParent(fs.root, mount)\n\n\tfs.root.treenode.Lock()\n\tdefer fs.root.treenode.Unlock()\n\t_, err = fs.root.treenode.Child(mount, func(inode) (inode, error) {\n\t\treturn cfs, nil\n\t})\n\treturn err\n}\n\nfunc (fs *customFileSystem) MountByID(mount string) error {\n\treturn fs.mountVdir(mount, fs.newCollectionOrProjectHardlink)\n}\n\nfunc (fs *customFileSystem) MountByPDH(mount string) error {\n\treturn fs.mountVdir(mount, func(parent inode, name string) (inode, error) {\n\t\tif pdhRegexp.MatchString(name) {\n\t\t\treturn fs.newCollectionOrProjectHardlink(parent, name)\n\t\t}\n\t\treturn nil, nil\n\t})\n}\n\nfunc (fs *customFileSystem) mountVdir(mount string, createfunc func(inode, string) (inode, error)) error {\n\tif err := checkMountTarget(mount); err != nil {\n\t\treturn err\n\t}\n\tfs.root.treenode.Lock()\n\tdefer fs.root.treenode.Unlock()\n\t_, err := fs.root.treenode.Child(mount, func(inode) (inode, error) {\n\t\treturn &vdirnode{\n\t\t\ttreenode: treenode{\n\t\t\t\tfs:     fs,\n\t\t\t\tparent: fs.root,\n\t\t\t\tinodes: make(map[string]inode),\n\t\t\t\tfileinfo: fileinfo{\n\t\t\t\t\tname:    mount,\n\t\t\t\t\tmodTime: time.Now(),\n\t\t\t\t\tmode:    0755 | os.ModeDir,\n\t\t\t\t},\n\t\t\t},\n\t\t\tcreate: createfunc,\n\t\t}, nil\n\t})\n\treturn err\n}\n\nfunc (fs *customFileSystem) MountHome(mount string) error {\n\tif err := checkMountTarget(mount); err != nil {\n\t\treturn err\n\t}\n\tfs.root.treenode.Lock()\n\tdefer fs.root.treenode.Unlock()\n\t_, err := fs.root.treenode.Child(mount, func(inode) (inode, error) {\n\t\treturn &hardlink{inode: fs.projectSingleton(\"\", nil), parent: fs.root, name: mount}, nil\n\t})\n\treturn err\n}\n\nfunc (fs *customFileSystem) MountProject(mount, uuid string) error {\n\tif err := checkMountTarget(mount); err != nil {\n\t\treturn err\n\t}\n\tfs.root.treenode.Lock()\n\tdefer fs.root.treenode.Unlock()\n\t_, err := fs.root.treenode.Child(mount, func(inode) (inode, error) {\n\t\treturn fs.newProjectDir(fs.root, mount, uuid, nil), nil\n\t})\n\treturn err\n}\n\nfunc (fs *customFileSystem) MountUsers(mount string) error {\n\tif err := checkMountTarget(mount); err != nil {\n\t\treturn err\n\t}\n\tfs.root.treenode.Lock()\n\tdefer fs.root.treenode.Unlock()\n\t_, err := fs.root.treenode.Child(mount, func(inode) (inode, error) {\n\t\treturn &lookupnode{\n\t\t\tstale:   fs.Stale,\n\t\t\tloadOne: fs.usersLoadOne,\n\t\t\tloadAll: fs.usersLoadAll,\n\t\t\ttreenode: treenode{\n\t\t\t\tfs:     fs,\n\t\t\t\tparent: fs.root,\n\t\t\t\tinodes: make(map[string]inode),\n\t\t\t\tfileinfo: fileinfo{\n\t\t\t\t\tname:    mount,\n\t\t\t\t\tmodTime: time.Now(),\n\t\t\t\t\tmode:    0755 | os.ModeDir,\n\t\t\t\t},\n\t\t\t},\n\t\t}, nil\n\t})\n\treturn err\n}\n\nfunc (fs *customFileSystem) ForwardSlashNameSubstitution(repl string) {\n\tfs.forwardSlashNameSubstitution = repl\n}\n\nfunc (fs *customFileSystem) MemorySize() int64 {\n\treturn fs.fileSystem.MemorySize() + fs.byIDRoot.MemorySize()\n}\n\n// SiteFileSystem returns a FileSystem that maps collections and other\n// Arvados objects onto a filesystem layout.\n//\n// This is experimental: the filesystem layout is not stable, and\n// there are significant known bugs and shortcomings. For example,\n// writes are not persisted until Sync() is called.\nfunc (c *Client) SiteFileSystem(kc keepClient) CustomFileSystem {\n\tfs := c.CustomFileSystem(kc)\n\tfs.MountByID(\"by_id\")\n\tfs.MountUsers(\"users\")\n\treturn fs\n}\n\nfunc (fs *customFileSystem) Sync() error {\n\treturn fs.byIDRoot.Sync()\n}\n\n// Stale returns true if information obtained at time t should be\n// considered stale.\nfunc (fs *customFileSystem) Stale(t time.Time) bool {\n\tfs.staleLock.Lock()\n\tdefer fs.staleLock.Unlock()\n\treturn !fs.staleThreshold.Before(t)\n}\n\nfunc (fs *customFileSystem) newNode(name string, perm os.FileMode, modTime time.Time) (node inode, err error) {\n\treturn nil, ErrInvalidOperation\n}\n\nfunc (fs *customFileSystem) newCollectionOrProjectHardlink(parent inode, id string) (inode, error) {\n\tif strings.Contains(id, \"-4zz18-\") || pdhRegexp.MatchString(id) {\n\t\tnode, err := fs.collectionSingleton(id)\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, nil\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &hardlink{inode: node, parent: parent, name: id}, nil\n\t} else if strings.Contains(id, \"-j7d0g-\") || strings.Contains(id, \"-tpzed-\") {\n\t\tfs.byIDLock.Lock()\n\t\tnode := fs.byID[id]\n\t\tfs.byIDLock.Unlock()\n\t\tif node == nil {\n\t\t\t// Look up the project synchronously before\n\t\t\t// calling projectSingleton (otherwise we\n\t\t\t// wouldn't detect a nonexistent project until\n\t\t\t// it's too late to return ErrNotExist).\n\t\t\tproj, err := fs.getProject(id)\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\treturn nil, nil\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnode = fs.projectSingleton(id, proj)\n\t\t}\n\t\treturn &hardlink{inode: node, parent: parent, name: id}, nil\n\t} else {\n\t\treturn nil, nil\n\t}\n}\n\nfunc (fs *customFileSystem) projectSingleton(uuid string, proj *Group) inode {\n\tfs.byIDLock.Lock()\n\tdefer fs.byIDLock.Unlock()\n\tif n := fs.byID[uuid]; n != nil {\n\t\treturn n\n\t}\n\tname := uuid\n\tif name == \"\" {\n\t\t// special case uuid==\"\" implements the \"home project\"\n\t\t// (owner_uuid == current user uuid)\n\t\tname = \"home\"\n\t}\n\tvar projLoading sync.Mutex\n\tn := &lookupnode{\n\t\tstale:   fs.Stale,\n\t\tloadOne: func(parent inode, name string) (inode, error) { return fs.projectsLoadOne(parent, uuid, name) },\n\t\tloadAll: func(parent inode) ([]inode, error) { return fs.projectsLoadAll(parent, uuid) },\n\t\ttreenode: treenode{\n\t\t\tfs:     fs,\n\t\t\tparent: fs.byIDRoot,\n\t\t\tinodes: make(map[string]inode),\n\t\t\tfileinfo: fileinfo{\n\t\t\t\tname:    name,\n\t\t\t\tmodTime: time.Now(),\n\t\t\t\tmode:    0755 | os.ModeDir,\n\t\t\t\tsys: func() interface{} {\n\t\t\t\t\tprojLoading.Lock()\n\t\t\t\t\tdefer projLoading.Unlock()\n\t\t\t\t\tif proj != nil {\n\t\t\t\t\t\treturn proj\n\t\t\t\t\t}\n\t\t\t\t\tg, err := fs.getProject(uuid)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tproj = g\n\t\t\t\t\treturn proj\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfs.byID[uuid] = n\n\treturn n\n}\n\nfunc (fs *customFileSystem) getProject(uuid string) (*Group, error) {\n\tvar g Group\n\terr := fs.RequestAndDecode(&g, \"GET\", \"arvados/v1/groups/\"+uuid, nil, nil)\n\tif statusErr, ok := err.(interface{ HTTPStatus() int }); ok && statusErr.HTTPStatus() == http.StatusNotFound {\n\t\treturn nil, os.ErrNotExist\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\treturn &g, err\n}\n\nfunc (fs *customFileSystem) collectionSingleton(id string) (inode, error) {\n\t// Return existing singleton, if we have it\n\tfs.byIDLock.Lock()\n\texisting := fs.byID[id]\n\tfs.byIDLock.Unlock()\n\tif existing != nil {\n\t\treturn existing, nil\n\t}\n\n\tcoll, err := fs.getCollection(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewfs, err := coll.FileSystem(fs, fs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcfs := newfs.(*collectionFileSystem)\n\tcfs.SetParent(fs.byIDRoot, id)\n\n\t// Check again in case another goroutine has added a node to\n\t// fs.byID since we checked above.\n\tfs.byIDLock.Lock()\n\tdefer fs.byIDLock.Unlock()\n\tif existing = fs.byID[id]; existing != nil {\n\t\t// Other goroutine won the race. Discard the node we\n\t\t// just made, and return the race winner.\n\t\treturn existing, nil\n\t}\n\t// We won the race. Save the new node in fs.byID and\n\t// fs.byIDRoot.\n\tfs.byID[id] = cfs\n\tfs.byIDRoot.Lock()\n\tdefer fs.byIDRoot.Unlock()\n\tfs.byIDRoot.Child(id, func(inode) (inode, error) { return cfs, nil })\n\treturn cfs, nil\n}\n\nfunc (fs *customFileSystem) getCollection(id string) (*Collection, error) {\n\tvar coll Collection\n\terr := fs.RequestAndDecode(&coll, \"GET\", \"arvados/v1/collections/\"+id, nil, nil)\n\tif statusErr, ok := err.(interface{ HTTPStatus() int }); ok && statusErr.HTTPStatus() == http.StatusNotFound {\n\t\treturn nil, os.ErrNotExist\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\tif len(id) != 27 {\n\t\t// This means id is a PDH, and controller/railsapi\n\t\t// returned one of (possibly) many collections with\n\t\t// that PDH. Even if controller returns more fields\n\t\t// besides PDH and manifest text (which are equal for\n\t\t// all matching collections), we don't want to expose\n\t\t// them (e.g., through Sys()).\n\t\tcoll = Collection{\n\t\t\tPortableDataHash: coll.PortableDataHash,\n\t\t\tManifestText:     coll.ManifestText,\n\t\t}\n\t}\n\treturn &coll, nil\n}\n\n// vdirnode wraps an inode by rejecting (with ErrInvalidOperation)\n// calls that add/replace children directly, instead calling a\n// create() func when a non-existing child is looked up.\n//\n// create() can return either a new node, which will be added to the\n// treenode, or nil for ENOENT.\ntype vdirnode struct {\n\ttreenode\n\tcreate func(parent inode, name string) (inode, error)\n}\n\nfunc (vn *vdirnode) Child(name string, replace func(inode) (inode, error)) (inode, error) {\n\treturn vn.treenode.Child(name, func(existing inode) (inode, error) {\n\t\tif existing == nil && vn.create != nil {\n\t\t\tnewnode, err := vn.create(vn, name)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif newnode != nil {\n\t\t\t\tnewnode.SetParent(vn, name)\n\t\t\t\texisting = newnode\n\t\t\t\tvn.treenode.fileinfo.modTime = time.Now()\n\t\t\t}\n\t\t}\n\t\tif replace == nil {\n\t\t\treturn existing, nil\n\t\t} else if tryRepl, err := replace(existing); err != nil {\n\t\t\treturn existing, err\n\t\t} else if tryRepl != existing {\n\t\t\treturn existing, ErrInvalidOperation\n\t\t} else {\n\t\t\treturn existing, nil\n\t\t}\n\t})\n}\n\n// A hardlink can be used to mount an existing node at an additional\n// point in the same filesystem.\ntype hardlink struct {\n\tinode\n\tparent inode\n\tname   string\n}\n\n// If the wrapped inode is a filesystem, rootnode returns the wrapped\n// fs's rootnode, otherwise inode itself. This allows\n// (*fileSystem)Rename() to lock the root node of a hardlink-wrapped\n// filesystem.\nfunc (hl *hardlink) rootnode() inode {\n\tif node, ok := hl.inode.(interface{ rootnode() inode }); ok {\n\t\treturn node.rootnode()\n\t} else {\n\t\treturn hl.inode\n\t}\n}\n\nfunc (hl *hardlink) Sync() error {\n\tif node, ok := hl.inode.(syncer); ok {\n\t\treturn node.Sync()\n\t} else {\n\t\treturn ErrInvalidOperation\n\t}\n}\n\nfunc (hl *hardlink) SetParent(parent inode, name string) {\n\thl.Lock()\n\tdefer hl.Unlock()\n\thl.parent = parent\n\thl.name = name\n}\n\nfunc (hl *hardlink) Parent() inode {\n\thl.RLock()\n\tdefer hl.RUnlock()\n\treturn hl.parent\n}\n\nfunc (hl *hardlink) FileInfo() os.FileInfo {\n\tfi := hl.inode.FileInfo()\n\tif fi, ok := fi.(fileinfo); ok {\n\t\tfi.name = hl.name\n\t\treturn fi\n\t}\n\treturn fi\n}\n\nfunc (hl *hardlink) MemorySize() int64 {\n\treturn 64 + int64(len(hl.name))\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_site_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nconst (\n\t// Importing arvadostest would be an import cycle, so these\n\t// fixtures are duplicated here [until fs moves to a separate\n\t// package].\n\tfixtureActiveUserUUID               = \"zzzzz-tpzed-xurymjxw79nv3jz\"\n\tfixtureActiveToken                  = \"3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi\"\n\tfixtureAProjectUUID                 = \"zzzzz-j7d0g-v955i6s2oi1cbso\"\n\tfixtureThisFilterGroupUUID          = \"zzzzz-j7d0g-thisfiltergroup\"\n\tfixtureAFilterGroupTwoUUID          = \"zzzzz-j7d0g-afiltergrouptwo\"\n\tfixtureAFilterGroupThreeUUID        = \"zzzzz-j7d0g-filtergroupthre\"\n\tfixtureAFilterGroupFourUUID         = \"zzzzz-j7d0g-filtergroupfour\"\n\tfixtureAFilterGroupFiveUUID         = \"zzzzz-j7d0g-filtergroupfive\"\n\tfixtureFooAndBarFilesInDirUUID      = \"zzzzz-4zz18-foonbarfilesdir\"\n\tfixtureFooCollectionName            = \"zzzzz-4zz18-fy296fx3hot09f7 added sometime\"\n\tfixtureFooCollectionPDH             = \"1f4b0bc7583c2a7f9102c395f4ffc5e3+45\"\n\tfixtureFooCollection                = \"zzzzz-4zz18-fy296fx3hot09f7\"\n\tfixtureNonexistentCollection        = \"zzzzz-4zz18-totallynotexist\"\n\tfixtureStorageClassesDesiredArchive = \"zzzzz-4zz18-3t236wr12769qqa\"\n\tfixtureBlobSigningKey               = \"zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc\"\n\tfixtureBlobSigningTTL               = 336 * time.Hour\n)\n\nvar _ = check.Suite(&SiteFSSuite{})\n\nfunc init() {\n\t// Enable DebugLocksPanicMode sometimes. Don't enable it all\n\t// the time, though -- it adds many calls to time.Sleep(),\n\t// which could hide different bugs.\n\tif time.Now().Second()&1 == 0 {\n\t\tDebugLocksPanicMode = true\n\t}\n}\n\ntype SiteFSSuite struct {\n\tclient *Client\n\tfs     CustomFileSystem\n\tkc     keepClient\n}\n\nfunc (s *SiteFSSuite) SetUpTest(c *check.C) {\n\ts.client = &Client{\n\t\tAPIHost:   os.Getenv(\"ARVADOS_API_HOST\"),\n\t\tAuthToken: fixtureActiveToken,\n\t\tInsecure:  true,\n\t}\n\ts.kc = &keepClientStub{\n\t\tblocks: map[string][]byte{\n\t\t\t\"3858f62230ac3c915f300c664312c63f\": []byte(\"foobar\"),\n\t\t},\n\t\tsigkey:    fixtureBlobSigningKey,\n\t\tsigttl:    fixtureBlobSigningTTL,\n\t\tauthToken: fixtureActiveToken,\n\t}\n\ts.fs = s.client.SiteFileSystem(s.kc)\n}\n\nfunc (s *SiteFSSuite) TestHttpFileSystemInterface(c *check.C) {\n\t_, ok := s.fs.(http.FileSystem)\n\tc.Check(ok, check.Equals, true)\n}\n\nfunc (s *SiteFSSuite) TestByIDEmpty(c *check.C) {\n\tf, err := s.fs.Open(\"/by_id\")\n\tc.Assert(err, check.IsNil)\n\tfis, err := f.Readdir(-1)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(fis), check.Equals, 0)\n}\n\nfunc (s *SiteFSSuite) TestUpdateStorageClasses(c *check.C) {\n\tf, err := s.fs.OpenFile(\"/by_id/\"+fixtureStorageClassesDesiredArchive+\"/newfile\", os.O_CREATE|os.O_RDWR, 0777)\n\tc.Assert(err, check.IsNil)\n\t_, err = f.Write([]byte(\"nope\"))\n\tc.Assert(err, check.IsNil)\n\terr = f.Close()\n\tc.Assert(err, check.IsNil)\n\terr = s.fs.Sync()\n\tc.Assert(err, check.ErrorMatches, `.*stub does not write storage class \"archive\"`)\n}\n\nfunc (s *SiteFSSuite) TestSameCollectionDifferentPaths(c *check.C) {\n\terr := s.fs.MountProject(\"home\", \"\")\n\tc.Assert(err, check.IsNil)\n\tvar coll Collection\n\terr = s.client.RequestAndDecode(&coll, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"collection\": map[string]interface{}{\n\t\t\t\"owner_uuid\": fixtureAProjectUUID,\n\t\t\t\"name\":       fmt.Sprintf(\"test collection %d\", time.Now().UnixNano()),\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\n\tviaProjID := \"by_id/\" + fixtureAProjectUUID + \"/\" + coll.Name\n\tviaProjName := \"home/A Project/\" + coll.Name\n\tviaCollID := \"by_id/\" + coll.UUID\n\tfor n, dirs := range [][]string{\n\t\t{viaCollID, viaProjID, viaProjName},\n\t\t{viaCollID, viaProjName, viaProjID},\n\t\t{viaProjID, viaProjName, viaCollID},\n\t\t{viaProjID, viaCollID, viaProjName},\n\t\t{viaProjName, viaCollID, viaProjID},\n\t\t{viaProjName, viaProjID, viaCollID},\n\t} {\n\t\tfilename := fmt.Sprintf(\"file %d\", n)\n\t\tf := make([]File, 3)\n\t\tfor i, dir := range dirs {\n\t\t\tpath := dir + \"/\" + filename\n\t\t\tmode := os.O_RDWR\n\t\t\tif i == 0 {\n\t\t\t\tmode |= os.O_CREATE\n\t\t\t\tc.Logf(\"create %s\", path)\n\t\t\t} else {\n\t\t\t\tc.Logf(\"open %s\", path)\n\t\t\t}\n\t\t\tf[i], err = s.fs.OpenFile(path, mode, 0777)\n\t\t\tc.Assert(err, check.IsNil, check.Commentf(\"n=%d i=%d path=%s\", n, i, path))\n\t\t\tdefer f[i].Close()\n\t\t}\n\t\t_, err = io.WriteString(f[0], filename)\n\t\tc.Assert(err, check.IsNil)\n\t\t_, err = f[1].Seek(0, io.SeekEnd)\n\t\tc.Assert(err, check.IsNil)\n\t\t_, err = io.WriteString(f[1], filename)\n\t\tc.Assert(err, check.IsNil)\n\t\tbuf, err := io.ReadAll(f[2])\n\t\tc.Assert(err, check.IsNil)\n\t\tc.Check(string(buf), check.Equals, filename+filename)\n\t}\n}\n\nfunc (s *SiteFSSuite) TestByUUIDAndPDH(c *check.C) {\n\tf, err := s.fs.Open(\"/by_id\")\n\tc.Assert(err, check.IsNil)\n\tfis, err := f.Readdir(-1)\n\tc.Check(err, check.IsNil)\n\tc.Check(len(fis), check.Equals, 0)\n\n\terr = s.fs.Mkdir(\"/by_id/\"+fixtureFooCollection, 0755)\n\tc.Check(err, check.Equals, os.ErrExist)\n\n\tf, err = s.fs.Open(\"/by_id/\" + fixtureNonexistentCollection)\n\tc.Assert(err, check.Equals, os.ErrNotExist)\n\n\tfor _, path := range []string{\n\t\tfixtureFooCollection,\n\t\tfixtureFooCollectionPDH,\n\t\tfixtureAProjectUUID + \"/\" + fixtureFooCollectionName,\n\t} {\n\t\tf, err = s.fs.Open(\"/by_id/\" + path)\n\t\tc.Assert(err, check.IsNil)\n\t\tfis, err = f.Readdir(-1)\n\t\tc.Assert(err, check.IsNil)\n\t\tvar names []string\n\t\tfor _, fi := range fis {\n\t\t\tnames = append(names, fi.Name())\n\t\t}\n\t\tc.Check(names, check.DeepEquals, []string{\"foo\"})\n\t}\n\n\tf, err = s.fs.Open(\"/by_id/\" + fixtureAProjectUUID + \"/A Subproject/baz_file\")\n\tc.Assert(err, check.IsNil)\n\tfis, err = f.Readdir(-1)\n\tc.Assert(err, check.IsNil)\n\tvar names []string\n\tfor _, fi := range fis {\n\t\tnames = append(names, fi.Name())\n\t}\n\tc.Check(names, check.DeepEquals, []string{\"baz\"})\n\tf, err = s.fs.Open(\"/by_id/\" + fixtureAProjectUUID + \"/A Subproject/baz_file/baz\")\n\tc.Assert(err, check.IsNil)\n\terr = f.Close()\n\tc.Assert(err, check.IsNil)\n\t_, err = s.fs.Open(\"/by_id/\" + fixtureAProjectUUID + \"/A Subproject/baz_file/baz/\")\n\tc.Assert(err, check.Equals, ErrNotADirectory)\n\t_, err = s.fs.Open(\"/by_id/\" + fixtureAProjectUUID + \"/A Subproject/baz_file/baz/z\")\n\tc.Assert(err, check.Equals, ErrNotADirectory)\n\t_, err = s.fs.Open(\"/by_id/\" + fixtureAProjectUUID + \"/A Subproject/baz_file/baz/..\")\n\tc.Assert(err, check.Equals, ErrNotADirectory)\n\n\t_, err = s.fs.OpenFile(\"/by_id/\"+fixtureNonexistentCollection, os.O_RDWR|os.O_CREATE, 0755)\n\tc.Check(err, ErrorIs, ErrInvalidOperation)\n\terr = s.fs.Rename(\"/by_id/\"+fixtureFooCollection, \"/by_id/beep\")\n\tc.Check(err, ErrorIs, ErrInvalidOperation)\n\terr = s.fs.Rename(\"/by_id/\"+fixtureFooCollection+\"/foo\", \"/by_id/beep\")\n\tc.Check(err, ErrorIs, ErrInvalidOperation)\n\t_, err = s.fs.Stat(\"/by_id/beep\")\n\tc.Check(err, check.Equals, os.ErrNotExist)\n\terr = s.fs.Rename(\"/by_id/\"+fixtureFooCollection+\"/foo\", \"/by_id/\"+fixtureFooCollection+\"/bar\")\n\tc.Check(err, check.IsNil)\n\n\terr = s.fs.Rename(\"/by_id\", \"/beep\")\n\tc.Check(err, ErrorIs, ErrInvalidOperation)\n}\n\n// Copy subtree from OS src to dst path inside fs. If src is a\n// directory, dst must exist and be a directory.\nfunc copyFromOS(fs FileSystem, dst, src string) error {\n\tinf, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer inf.Close()\n\tdirents, err := inf.Readdir(-1)\n\tif e, ok := err.(*os.PathError); ok {\n\t\tif e, ok := e.Err.(syscall.Errno); ok {\n\t\t\tif e == syscall.ENOTDIR {\n\t\t\t\terr = syscall.ENOTDIR\n\t\t\t}\n\t\t}\n\t}\n\tif err == syscall.ENOTDIR {\n\t\toutf, err := fs.OpenFile(dst, os.O_CREATE|os.O_EXCL|os.O_TRUNC|os.O_WRONLY, 0700)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"open %s: %s\", dst, err)\n\t\t}\n\t\tdefer outf.Close()\n\t\t_, err = io.Copy(outf, inf)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%s: copying data from %s: %s\", dst, src, err)\n\t\t}\n\t\terr = outf.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"%s: readdir: %T %s\", src, err, err)\n\t} else {\n\t\t{\n\t\t\td, err := fs.Open(dst)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"opendir(%s): %s\", dst, err)\n\t\t\t}\n\t\t\td.Close()\n\t\t}\n\t\tfor _, ent := range dirents {\n\t\t\tif ent.Name() == \".\" || ent.Name() == \"..\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdstname := dst + \"/\" + ent.Name()\n\t\t\tif ent.IsDir() {\n\t\t\t\terr = fs.Mkdir(dstname, 0700)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"mkdir %s: %s\", dstname, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = copyFromOS(fs, dstname, src+\"/\"+ent.Name())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *SiteFSSuite) TestSnapshotSplice(c *check.C) {\n\terr := s.fs.MountProject(\"home\", \"\")\n\tc.Assert(err, check.IsNil)\n\tthisfile, err := ioutil.ReadFile(\"fs_site_test.go\")\n\tc.Assert(err, check.IsNil)\n\n\tvar src1 Collection\n\terr = s.client.RequestAndDecode(&src1, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"collection\": map[string]string{\n\t\t\t\"name\":       \"TestSnapshotSplice src1\",\n\t\t\t\"owner_uuid\": fixtureAProjectUUID,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\tdefer s.client.RequestAndDecode(nil, \"DELETE\", \"arvados/v1/collections/\"+src1.UUID, nil, nil)\n\terr = s.fs.Sync()\n\tc.Assert(err, check.IsNil)\n\terr = copyFromOS(s.fs, \"/home/A Project/TestSnapshotSplice src1\", \"..\") // arvados.git/sdk/go\n\tc.Assert(err, check.IsNil)\n\n\tvar src2 Collection\n\terr = s.client.RequestAndDecode(&src2, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"collection\": map[string]string{\n\t\t\t\"name\":       \"TestSnapshotSplice src2\",\n\t\t\t\"owner_uuid\": fixtureAProjectUUID,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\tdefer s.client.RequestAndDecode(nil, \"DELETE\", \"arvados/v1/collections/\"+src2.UUID, nil, nil)\n\terr = s.fs.Sync()\n\tc.Assert(err, check.IsNil)\n\terr = copyFromOS(s.fs, \"/home/A Project/TestSnapshotSplice src2\", \"..\") // arvados.git/sdk/go\n\tc.Assert(err, check.IsNil)\n\n\tvar dst Collection\n\terr = s.client.RequestAndDecode(&dst, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"collection\": map[string]string{\n\t\t\t\"name\":       \"TestSnapshotSplice dst\",\n\t\t\t\"owner_uuid\": fixtureAProjectUUID,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\tdefer s.client.RequestAndDecode(nil, \"DELETE\", \"arvados/v1/collections/\"+dst.UUID, nil, nil)\n\terr = s.fs.Sync()\n\tc.Assert(err, check.IsNil)\n\n\tdstPath := \"/home/A Project/TestSnapshotSplice dst\"\n\terr = copyFromOS(s.fs, dstPath, \"..\") // arvados.git/sdk/go\n\tc.Assert(err, check.IsNil)\n\n\t// Snapshot directory\n\tsnap1, err := Snapshot(s.fs, \"/home/A Project/TestSnapshotSplice src1/ctxlog\")\n\tc.Check(err, check.IsNil)\n\t// Attach same snapshot twice, at paths that didn't exist before\n\terr = Splice(s.fs, dstPath+\"/ctxlog-copy\", snap1)\n\tc.Check(err, check.IsNil)\n\terr = Splice(s.fs, dstPath+\"/ctxlog-copy2\", snap1)\n\tc.Check(err, check.IsNil)\n\t// Splicing a snapshot twice results in two independent copies\n\terr = s.fs.Rename(dstPath+\"/ctxlog-copy2/log.go\", dstPath+\"/ctxlog-copy/log2.go\")\n\tc.Check(err, check.IsNil)\n\t_, err = s.fs.Open(dstPath + \"/ctxlog-copy2/log.go\")\n\tc.Check(err, check.Equals, os.ErrNotExist)\n\tf, err := s.fs.Open(dstPath + \"/ctxlog-copy/log.go\")\n\tif c.Check(err, check.IsNil) {\n\t\tbuf, err := ioutil.ReadAll(f)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(buf), check.Not(check.Equals), \"\")\n\t\tf.Close()\n\t}\n\n\t// Snapshot regular file\n\tsnapFile, err := Snapshot(s.fs, \"/home/A Project/TestSnapshotSplice src1/arvados/fs_site_test.go\")\n\tc.Check(err, check.IsNil)\n\t// Replace dir with file\n\terr = Splice(s.fs, dstPath+\"/ctxlog-copy2\", snapFile)\n\tc.Check(err, check.IsNil)\n\tif f, err := s.fs.Open(dstPath + \"/ctxlog-copy2\"); c.Check(err, check.IsNil) {\n\t\tbuf, err := ioutil.ReadAll(f)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(buf), check.Equals, string(thisfile))\n\t}\n\n\t// Cannot splice a file onto a collection root; cannot splice\n\t// anything to a target outside a collection.\n\tfor _, badpath := range []string{\n\t\tdstPath + \"/\",\n\t\tdstPath,\n\t\t\"/home/A Project/newnodename/\",\n\t\t\"/home/A Project/newnodename\",\n\t\t\"/home/A Project/\",\n\t\t\"/home/A Project\",\n\t\t\"/home/newnodename/\",\n\t\t\"/home/newnodename\",\n\t\t\"/home/\",\n\t\t\"/home\",\n\t\t\"/newnodename/\",\n\t\t\"/newnodename\",\n\t\t\"/\",\n\t} {\n\t\terr = Splice(s.fs, badpath, snapFile)\n\t\tc.Check(err, check.NotNil)\n\t\tif strings.Contains(badpath, \"newnodename\") && strings.HasSuffix(badpath, \"/\") {\n\t\t\tc.Check(err, ErrorIs, os.ErrNotExist, check.Commentf(\"badpath %q\", badpath))\n\t\t} else {\n\t\t\tc.Check(err, ErrorIs, ErrInvalidOperation, check.Commentf(\"badpath %q\", badpath))\n\t\t}\n\t\tif strings.TrimSuffix(badpath, \"/\") == dstPath {\n\t\t\tc.Check(err, check.ErrorMatches, `cannot use Splice to attach a file at top level of \\*arvados.collectionFileSystem: invalid operation`, check.Commentf(\"badpath: %q\", badpath))\n\t\t\tcontinue\n\t\t}\n\n\t\terr = Splice(s.fs, badpath, snap1)\n\t\tif strings.Contains(badpath, \"newnodename\") && strings.HasSuffix(badpath, \"/\") {\n\t\t\tc.Check(err, ErrorIs, os.ErrNotExist, check.Commentf(\"badpath %q\", badpath))\n\t\t} else {\n\t\t\tc.Check(err, ErrorIs, ErrInvalidOperation, check.Commentf(\"badpath %q\", badpath))\n\t\t}\n\t}\n\n\t// Destination's parent must already exist\n\tfor _, badpath := range []string{\n\t\tdstPath + \"/newdirname/\",\n\t\tdstPath + \"/newdirname/foobar\",\n\t\t\"/foo/bar\",\n\t} {\n\t\terr = Splice(s.fs, badpath, snap1)\n\t\tc.Check(err, ErrorIs, os.ErrNotExist, check.Commentf(\"badpath %s\", badpath))\n\t\terr = Splice(s.fs, badpath, snapFile)\n\t\tc.Check(err, ErrorIs, os.ErrNotExist, check.Commentf(\"badpath %s\", badpath))\n\t}\n\n\tsnap2, err := Snapshot(s.fs, dstPath+\"/ctxlog-copy\")\n\tif c.Check(err, check.IsNil) {\n\t\terr = Splice(s.fs, dstPath+\"/ctxlog-copy-copy\", snap2)\n\t\tc.Check(err, check.IsNil)\n\t}\n\n\t// Snapshot entire collection, splice into same collection at\n\t// a new path, remove file from original location, verify\n\t// spliced content survives\n\tsnapDst, err := Snapshot(s.fs, dstPath+\"\")\n\tc.Check(err, check.IsNil)\n\terr = Splice(s.fs, dstPath+\"\", snapDst)\n\tc.Check(err, check.IsNil)\n\terr = Splice(s.fs, dstPath+\"/copy1\", snapDst)\n\tc.Check(err, check.IsNil)\n\terr = Splice(s.fs, dstPath+\"/copy2\", snapDst)\n\tc.Check(err, check.IsNil)\n\terr = s.fs.RemoveAll(dstPath + \"/arvados/fs_site_test.go\")\n\tc.Check(err, check.IsNil)\n\terr = s.fs.RemoveAll(dstPath + \"/arvados\")\n\tc.Check(err, check.IsNil)\n\t_, err = s.fs.Open(dstPath + \"/arvados/fs_site_test.go\")\n\tc.Check(err, check.Equals, os.ErrNotExist)\n\tf, err = s.fs.Open(dstPath + \"/copy2/arvados/fs_site_test.go\")\n\tif c.Check(err, check.IsNil) {\n\t\tdefer f.Close()\n\t\tbuf, err := ioutil.ReadAll(f)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(string(buf), check.Equals, string(thisfile))\n\t}\n}\n\nfunc (s *SiteFSSuite) TestLocks(c *check.C) {\n\tDebugLocksPanicMode = false\n\tdone := make(chan struct{})\n\tdefer close(done)\n\tticker := time.NewTicker(2 * time.Second)\n\tgo func() {\n\t\tfor {\n\t\t\ttimeout := time.AfterFunc(5*time.Second, func() {\n\t\t\t\t// c.FailNow() doesn't break deadlock, but this sure does\n\t\t\t\tpanic(\"timed out -- deadlock?\")\n\t\t\t})\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\ttimeout.Stop()\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tc.Logf(\"MemorySize == %d\", s.fs.MemorySize())\n\t\t\t}\n\t\t\ttimeout.Stop()\n\t\t}\n\t}()\n\tncolls := 5\n\tndirs := 3\n\tnfiles := 5\n\tprojects := make([]Group, 5)\n\tfor pnum := range projects {\n\t\tc.Logf(\"make project %d\", pnum)\n\t\terr := s.client.RequestAndDecode(&projects[pnum], \"POST\", \"arvados/v1/groups\", nil, map[string]interface{}{\n\t\t\t\"group\": map[string]string{\n\t\t\t\t\"name\":        fmt.Sprintf(\"TestLocks project %d\", pnum),\n\t\t\t\t\"owner_uuid\":  fixtureAProjectUUID,\n\t\t\t\t\"group_class\": \"project\",\n\t\t\t},\n\t\t\t\"ensure_unique_name\": true,\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t\tfor cnum := 0; cnum < ncolls; cnum++ {\n\t\t\tc.Logf(\"make project %d collection %d\", pnum, cnum)\n\t\t\tvar coll Collection\n\t\t\terr = s.client.RequestAndDecode(&coll, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\t\t\"collection\": map[string]string{\n\t\t\t\t\t\"name\":       fmt.Sprintf(\"TestLocks collection %d\", cnum),\n\t\t\t\t\t\"owner_uuid\": projects[pnum].UUID,\n\t\t\t\t},\n\t\t\t})\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tfor d1num := 0; d1num < ndirs; d1num++ {\n\t\t\t\ts.fs.Mkdir(fmt.Sprintf(\"/by_id/%s/dir1-%d\", coll.UUID, d1num), 0777)\n\t\t\t\tfor d2num := 0; d2num < ndirs; d2num++ {\n\t\t\t\t\ts.fs.Mkdir(fmt.Sprintf(\"/by_id/%s/dir1-%d/dir2-%d\", coll.UUID, d1num, d2num), 0777)\n\t\t\t\t\tfor fnum := 0; fnum < nfiles; fnum++ {\n\t\t\t\t\t\tf, err := s.fs.OpenFile(fmt.Sprintf(\"/by_id/%s/dir1-%d/dir2-%d/file-%d\", coll.UUID, d1num, d2num, fnum), os.O_CREATE|os.O_RDWR, 0755)\n\t\t\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t\t\tf.Close()\n\t\t\t\t\t\tf, err = s.fs.OpenFile(fmt.Sprintf(\"/by_id/%s/dir1-%d/file-%d\", coll.UUID, d1num, fnum), os.O_CREATE|os.O_RDWR, 0755)\n\t\t\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t\t\tf.Close()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tc.Log(\"sync\")\n\ts.fs.Sync()\n\tvar wg sync.WaitGroup\n\tfor n := 0; n < 100; n++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor pnum, project := range projects {\n\t\t\t\tc.Logf(\"read project %d\", pnum)\n\t\t\t\tif pnum%2 == 0 {\n\t\t\t\t\tf, err := s.fs.Open(fmt.Sprintf(\"/by_id/%s\", project.UUID))\n\t\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t\tf.Readdir(-1)\n\t\t\t\t\tf.Close()\n\t\t\t\t}\n\t\t\t\tfor cnum := 0; cnum < ncolls; cnum++ {\n\t\t\t\t\tc.Logf(\"read project %d collection %d\", pnum, cnum)\n\t\t\t\t\tif pnum%2 == 0 {\n\t\t\t\t\t\tf, err := s.fs.Open(fmt.Sprintf(\"/by_id/%s/TestLocks collection %d\", project.UUID, cnum))\n\t\t\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t\t\t_, err = f.Readdir(-1)\n\t\t\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t\t\tf.Close()\n\t\t\t\t\t}\n\t\t\t\t\tif pnum%3 == 0 {\n\t\t\t\t\t\tfor d1num := 0; d1num < ndirs; d1num++ {\n\t\t\t\t\t\t\tf, err := s.fs.Open(fmt.Sprintf(\"/by_id/%s/TestLocks collection %d/dir1-%d\", project.UUID, cnum, d1num))\n\t\t\t\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t\t\t\tfis, err := f.Readdir(-1)\n\t\t\t\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t\t\t\tc.Assert(fis, check.HasLen, ndirs+nfiles)\n\t\t\t\t\t\t\tf.Close()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfor d1num := 0; d1num < ndirs; d1num++ {\n\t\t\t\t\t\tfor d2num := 0; d2num < ndirs; d2num++ {\n\t\t\t\t\t\t\tf, err := s.fs.Open(fmt.Sprintf(\"/by_id/%s/TestLocks collection %d/dir1-%d/dir2-%d\", project.UUID, cnum, d1num, d2num))\n\t\t\t\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t\t\t\tfis, err := f.Readdir(-1)\n\t\t\t\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\t\t\t\tc.Assert(fis, check.HasLen, nfiles)\n\t\t\t\t\t\t\tf.Close()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\tc.Logf(\"MemorySize == %d\", s.fs.MemorySize())\n}\n\nvar _ = check.Suite(&customFSSuite{})\n\ntype customFSSuite struct {\n\tclient *Client\n\tkc     keepClient\n\tfs     CustomFileSystem\n\tcoll   Collection\n}\n\nfunc (s *customFSSuite) SetUpTest(c *check.C) {\n\ts.client = &Client{\n\t\tAPIHost:   os.Getenv(\"ARVADOS_API_HOST\"),\n\t\tAuthToken: fixtureActiveToken,\n\t\tInsecure:  true,\n\t}\n\ts.kc = &keepClientStub{\n\t\tblocks:    map[string][]byte{},\n\t\tsigkey:    fixtureBlobSigningKey,\n\t\tsigttl:    fixtureBlobSigningTTL,\n\t\tauthToken: fixtureActiveToken,\n\t}\n\ttmpfs, err := s.coll.FileSystem(s.client, s.kc)\n\tc.Assert(err, check.IsNil)\n\tf, err := tmpfs.OpenFile(\"testfile.txt\", os.O_CREATE|os.O_RDWR, 0700)\n\tc.Assert(err, check.IsNil)\n\t_, err = f.Write([]byte(\"testfile contents\"))\n\tc.Assert(err, check.IsNil)\n\terr = f.Close()\n\tc.Assert(err, check.IsNil)\n\ts.coll.ManifestText, err = tmpfs.MarshalManifest(\".\")\n\tc.Assert(err, check.IsNil)\n\terr = s.client.RequestAndDecode(&s.coll, \"POST\", \"arvados/v1/collections\", nil, map[string]interface{}{\n\t\t\"collection\": map[string]interface{}{\n\t\t\t\"owner_uuid\":    fixtureAProjectUUID,\n\t\t\t\"name\":          fmt.Sprintf(\"test collection %d\", time.Now().UnixNano()),\n\t\t\t\"manifest_text\": s.coll.ManifestText,\n\t\t},\n\t})\n\tc.Assert(err, check.IsNil)\n\ts.fs = s.client.CustomFileSystem(s.kc)\n}\n\nfunc (s *customFSSuite) TearDownTest(c *check.C) {\n\tif s.coll.UUID != \"\" {\n\t\terr := s.client.RequestAndDecode(nil, \"DELETE\", \"arvados/v1/collections/\"+s.coll.UUID, nil, nil)\n\t\tc.Check(err, check.IsNil)\n\t}\n}\n\nfunc (s *customFSSuite) TestMountByPDH(c *check.C) {\n\terr := s.fs.MountByPDH(\"a/b/c\")\n\tc.Assert(err, check.Equals, ErrInvalidArgument)\n\terr = s.fs.MountByPDH(\"dirname\")\n\tc.Assert(err, check.IsNil)\n\t_, err = s.fs.Open(fmt.Sprintf(\"/dirname/%s/testfile.txt\", s.coll.UUID))\n\tc.Check(err, check.Equals, os.ErrNotExist)\n\tf, err := s.fs.Open(fmt.Sprintf(\"/dirname/%s/testfile.txt\", s.coll.PortableDataHash))\n\tc.Assert(err, check.IsNil)\n\tf.Close()\n}\n\nfunc (s *customFSSuite) TestMountByID(c *check.C) {\n\terr := s.fs.MountByID(\"a/b/c\")\n\tc.Assert(err, check.Equals, ErrInvalidArgument)\n\terr = s.fs.MountByID(\"dirname\")\n\tc.Assert(err, check.IsNil)\n\tf, err := s.fs.Open(fmt.Sprintf(\"/dirname/%s/testfile.txt\", s.coll.PortableDataHash))\n\tc.Assert(err, check.IsNil)\n\tf.Close()\n\tf, err = s.fs.Open(fmt.Sprintf(\"/dirname/%s/testfile.txt\", s.coll.UUID))\n\tc.Assert(err, check.IsNil)\n\tf.Close()\n\tf, err = s.fs.Open(fmt.Sprintf(\"/dirname/%s/%s/testfile.txt\", s.coll.OwnerUUID, s.coll.Name))\n\tc.Assert(err, check.IsNil)\n\tf.Close()\n}\n\nfunc (s *customFSSuite) TestMountTmp(c *check.C) {\n\terr := s.fs.MountTmp(\"a/b/c\")\n\tc.Assert(err, check.Equals, ErrInvalidArgument)\n\terr = s.fs.MountTmp(\"dirname\")\n\tc.Assert(err, check.IsNil)\n\t{\n\t\tf, err := s.fs.OpenFile(\"/dirname/testfile.txt\", os.O_CREATE|os.O_RDWR, 0700)\n\t\tc.Assert(err, check.IsNil)\n\t\t_, err = f.Write([]byte(\"file data in temporary collection\"))\n\t\tc.Check(err, check.IsNil)\n\t\terr = f.Close()\n\t\tc.Check(err, check.IsNil)\n\t}\n\t{\n\t\tf, err := s.fs.Open(\"/dirname/testfile.txt\")\n\t\tc.Assert(err, check.IsNil)\n\t\tdata, err := io.ReadAll(f)\n\t\tc.Check(string(data), check.Equals, \"file data in temporary collection\")\n\t\terr = f.Close()\n\t\tc.Check(err, check.IsNil)\n\t}\n\t{\n\t\tf, err := s.fs.Open(\"/dirname/.arvados#collection\")\n\t\tc.Assert(err, check.IsNil)\n\t\tvar tmpcoll Collection\n\t\terr = json.NewDecoder(f).Decode(&tmpcoll)\n\t\tc.Check(err, check.IsNil)\n\t\terr = f.Close()\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(tmpcoll.ManifestText, check.Not(check.Equals), \"\")\n\t\tc.Check(tmpcoll.ManifestText, check.Matches, `.*testfile\\.txt\\n`)\n\t\tc.Check(tmpcoll.UUID, check.Equals, \"\")\n\t}\n}\n\nfunc (s *customFSSuite) TestMountHome(c *check.C) {\n\terr := s.fs.MountHome(\"a/b/c\")\n\tc.Assert(err, check.Equals, ErrInvalidArgument)\n\terr = s.fs.MountHome(\".\")\n\tc.Assert(err, check.Equals, ErrInvalidArgument)\n\terr = s.fs.MountHome(\"..\")\n\tc.Assert(err, check.Equals, ErrInvalidArgument)\n\terr = s.fs.MountHome(\"dirname\")\n\tc.Assert(err, check.IsNil)\n\t{\n\t\tf, err := s.fs.Open(\"/dirname/A Project/zzzzz-4zz18-fy296fx3hot09f7 added sometime/foo\")\n\t\tc.Assert(err, check.IsNil)\n\t\tf.Close()\n\t}\n\t{\n\t\t_, err := s.fs.OpenFile(\"/dirname/testfile.txt\", os.O_CREATE, 0700)\n\t\tc.Check(err, check.Equals, ErrInvalidOperation)\n\t}\n\t{\n\t\t_, err := s.fs.Open(\"/dirname/.arvados#collection\")\n\t\tc.Check(os.IsNotExist(err), check.Equals, true)\n\t}\n}\n"
  },
  {
    "path": "sdk/go/arvados/fs_users.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"os\"\n)\n\nfunc (fs *customFileSystem) usersLoadOne(parent inode, name string) (inode, error) {\n\tvar resp UserList\n\terr := fs.RequestAndDecode(&resp, \"GET\", \"arvados/v1/users\", nil, ResourceListParams{\n\t\tCount:   \"none\",\n\t\tFilters: []Filter{{\"username\", \"=\", name}},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(resp.Items) == 0 {\n\t\treturn nil, os.ErrNotExist\n\t}\n\tuser := resp.Items[0]\n\treturn fs.newProjectDir(parent, user.Username, user.UUID, nil), nil\n}\n\nfunc (fs *customFileSystem) usersLoadAll(parent inode) ([]inode, error) {\n\tparams := ResourceListParams{\n\t\tCount: \"none\",\n\t\tOrder: \"uuid\",\n\t}\n\tvar inodes []inode\n\tfor {\n\t\tvar resp UserList\n\t\terr := fs.RequestAndDecode(&resp, \"GET\", \"arvados/v1/users\", nil, params)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if len(resp.Items) == 0 {\n\t\t\treturn inodes, nil\n\t\t}\n\t\tfor _, user := range resp.Items {\n\t\t\tif user.Username == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tinodes = append(inodes, fs.newProjectDir(parent, user.Username, user.UUID, nil))\n\t\t}\n\t\tparams.Filters = []Filter{{\"uuid\", \">\", resp.Items[len(resp.Items)-1].UUID}}\n\t}\n}\n"
  },
  {
    "path": "sdk/go/arvados/group.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"time\"\n)\n\n// Group is an arvados#group record\ntype Group struct {\n\tUUID               string                 `json:\"uuid\"`\n\tName               string                 `json:\"name\"`\n\tOwnerUUID          string                 `json:\"owner_uuid\"`\n\tGroupClass         string                 `json:\"group_class\"`\n\tEtag               string                 `json:\"etag\"`\n\tTrashAt            *time.Time             `json:\"trash_at\"`\n\tCreatedAt          time.Time              `json:\"created_at\"`\n\tModifiedAt         time.Time              `json:\"modified_at\"`\n\tModifiedByUserUUID string                 `json:\"modified_by_user_uuid\"`\n\tDeleteAt           *time.Time             `json:\"delete_at\"`\n\tIsTrashed          bool                   `json:\"is_trashed\"`\n\tProperties         map[string]interface{} `json:\"properties\"`\n\tWritableBy         []string               `json:\"writable_by,omitempty\"`\n\tDescription        string                 `json:\"description\"`\n\tFrozenByUUID       string                 `json:\"frozen_by_uuid\"`\n\tCanWrite           bool                   `json:\"can_write\"`\n\tCanManage          bool                   `json:\"can_manage\"`\n}\n\n// GroupList is an arvados#groupList resource.\ntype GroupList struct {\n\tItems          []Group       `json:\"items\"`\n\tItemsAvailable int           `json:\"items_available\"`\n\tOffset         int           `json:\"offset\"`\n\tLimit          int           `json:\"limit\"`\n\tIncluded       []interface{} `json:\"included\"`\n}\n\n// ObjectList is an arvados#objectList resource.\ntype ObjectList struct {\n\tIncluded       []interface{} `json:\"included\"`\n\tItems          []interface{} `json:\"items\"`\n\tItemsAvailable int           `json:\"items_available\"`\n\tOffset         int           `json:\"offset\"`\n\tLimit          int           `json:\"limit\"`\n}\n\nfunc (g Group) resourceName() string {\n\treturn \"group\"\n}\n"
  },
  {
    "path": "sdk/go/arvados/integration_test_cluster.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n)\n\n// IntegrationTestCluster returns the cluster that has been set up by\n// the integration test framework (see /build/run-tests.sh). It panics\n// on error.\nfunc IntegrationTestCluster() *Cluster {\n\tconfig, err := GetConfig(filepath.Join(os.Getenv(\"WORKSPACE\"), \"tmp\", \"arvados.yml\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcluster, err := config.GetCluster(\"\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn cluster\n}\n"
  },
  {
    "path": "sdk/go/arvados/keep_block.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n)\n\n// SizedDigest is a minimal Keep block locator: hash+size\ntype SizedDigest string\n\n// Size returns the size of the data block, in bytes.\nfunc (sd SizedDigest) Size() int64 {\n\tn, _ := strconv.ParseInt(strings.Split(string(sd), \"+\")[1], 10, 64)\n\treturn n\n}\n"
  },
  {
    "path": "sdk/go/arvados/keep_cache.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/md5\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/sys/unix\"\n)\n\ntype KeepGateway interface {\n\tReadAt(locator string, dst []byte, offset int) (int, error)\n\tBlockRead(ctx context.Context, opts BlockReadOptions) (int, error)\n\tBlockWrite(ctx context.Context, opts BlockWriteOptions) (BlockWriteResponse, error)\n\tLocalLocator(locator string) (string, error)\n}\n\n// DiskCache wraps KeepGateway, adding a disk-based cache layer.\n//\n// A DiskCache is automatically incorporated into the backend stack of\n// each keepclient.KeepClient. Most programs do not need to use\n// DiskCache directly.\ntype DiskCache struct {\n\tKeepGateway\n\tDir     string\n\tMaxSize ByteSizeOrPercent\n\tLogger  logrus.FieldLogger\n\tMetrics KeepClientMetrics\n\n\t*sharedCache\n\tsetupOnce sync.Once\n}\n\nvar (\n\tsharedCachesLock sync.Mutex\n\tsharedCaches     = map[string]*sharedCache{}\n)\n\n// sharedCache has fields that coordinate the cache usage in a single\n// cache directory; it can be shared by multiple DiskCaches.\n//\n// This serves to share a single pool of held-open filehandles, a\n// single tidying goroutine, etc., even when the program (like\n// keep-web) uses multiple KeepGateway stacks that use different auth\n// tokens, etc.\ntype sharedCache struct {\n\tdir     string\n\tmaxSize ByteSizeOrPercent\n\n\ttidying        int32 // see tidy()\n\tdefaultMaxSize int64\n\n\t// The \"heldopen\" fields are used to open cache files for\n\t// reading, and leave them open for future/concurrent ReadAt\n\t// operations. See quickReadAt.\n\theldopen     map[string]*openFileEnt\n\theldopenMax  int\n\theldopenLock sync.Mutex\n\n\t// The \"writing\" fields allow multiple concurrent/sequential\n\t// ReadAt calls to be notified as a single\n\t// read-block-from-backend-into-cache goroutine fills the\n\t// cache file.\n\twriting     map[string]*writeprogress\n\twritingCond *sync.Cond\n\twritingLock sync.Mutex\n\n\tsizeMeasured    int64 // actual size on disk after last tidy(); zero if not measured yet\n\tsizeEstimated   int64 // last measured size, plus files we have written since\n\tlastFileCount   int64 // number of files on disk at last count\n\twritesSinceTidy int64 // number of files written since last tidy()\n}\n\ntype writeprogress struct {\n\tcond    *sync.Cond     // broadcast whenever size or done changes\n\tdone    bool           // size and err have their final values\n\tsize    int            // bytes copied into cache file so far\n\terr     error          // error encountered while copying from backend to cache\n\tsharedf *os.File       // readable filehandle, usable if done && err==nil\n\treaders sync.WaitGroup // goroutines that haven't finished reading from f yet\n}\n\ntype openFileEnt struct {\n\tsync.RWMutex\n\tf   *os.File\n\terr error // if err is non-nil, f should not be used.\n}\n\nconst (\n\tcacheFileSuffix = \".keepcacheblock\"\n\ttmpFileSuffix   = \".tmp\"\n)\n\nfunc (cache *DiskCache) setup() {\n\tsharedCachesLock.Lock()\n\tdefer sharedCachesLock.Unlock()\n\tdir := cache.Dir\n\tif sharedCaches[dir] == nil {\n\t\tcache.debugf(\"initializing sharedCache using %s with max size %d\", dir, cache.MaxSize)\n\t\tsharedCaches[dir] = &sharedCache{dir: dir, maxSize: cache.MaxSize}\n\t} else {\n\t\tcache.debugf(\"using existing sharedCache using %s with max size %d (would have initialized with %d)\", dir, sharedCaches[dir].maxSize, cache.MaxSize)\n\t}\n\tcache.sharedCache = sharedCaches[dir]\n}\n\nfunc (cache *DiskCache) cacheFile(locator string) string {\n\thash := locator\n\tif i := strings.Index(hash, \"+\"); i > 0 {\n\t\thash = hash[:i]\n\t}\n\treturn filepath.Join(cache.dir, hash[:3], hash+cacheFileSuffix)\n}\n\n// Open a cache file, creating the parent dir if necessary.\nfunc (cache *DiskCache) openFile(name string, flags int) (*os.File, error) {\n\tf, err := os.OpenFile(name, flags, 0600)\n\tif os.IsNotExist(err) {\n\t\t// Create the parent dir and try again. (We could have\n\t\t// checked/created the parent dir before, but that\n\t\t// would be less efficient in the much more common\n\t\t// situation where it already exists.)\n\t\tparent, _ := filepath.Split(name)\n\t\tos.Mkdir(parent, 0700)\n\t\tf, err = os.OpenFile(name, flags, 0600)\n\t}\n\treturn f, err\n}\n\n// Rename a file, creating the new path's parent dir if necessary.\nfunc (cache *DiskCache) rename(old, new string) error {\n\tif nil == os.Rename(old, new) {\n\t\treturn nil\n\t}\n\tparent, _ := filepath.Split(new)\n\tos.Mkdir(parent, 0700)\n\treturn os.Rename(old, new)\n}\n\nfunc (cache *DiskCache) debugf(format string, args ...interface{}) {\n\tlogger := cache.Logger\n\tif logger == nil {\n\t\treturn\n\t}\n\tlogger.Debugf(format, args...)\n}\n\n// BlockWrite writes through to the wrapped KeepGateway, and (if\n// possible) retains a copy of the written block in the cache.\nfunc (cache *DiskCache) BlockWrite(ctx context.Context, opts BlockWriteOptions) (BlockWriteResponse, error) {\n\tcache.setupOnce.Do(cache.setup)\n\tunique := fmt.Sprintf(\"%x.%p%s\", os.Getpid(), &opts, tmpFileSuffix)\n\ttmpfilename := filepath.Join(cache.dir, \"tmp\", unique)\n\ttmpfile, err := cache.openFile(tmpfilename, os.O_CREATE|os.O_EXCL|os.O_RDWR)\n\tif err != nil {\n\t\tcache.debugf(\"BlockWrite: open(%s) failed: %s\", tmpfilename, err)\n\t\treturn cache.KeepGateway.BlockWrite(ctx, opts)\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tcopyerr := make(chan error, 1)\n\n\t// Start a goroutine to copy the caller's source data to\n\t// tmpfile, a hash checker, and (via pipe) the wrapped\n\t// KeepGateway.\n\tpipereader, pipewriter := io.Pipe()\n\tdefer pipereader.Close()\n\tgo func() {\n\t\t// Note this is a double-close (which is a no-op) in\n\t\t// the happy path.\n\t\tdefer tmpfile.Close()\n\t\t// Note this is a no-op in the happy path (the\n\t\t// uniquely named tmpfilename will have been renamed).\n\t\tdefer os.Remove(tmpfilename)\n\t\tdefer pipewriter.Close()\n\n\t\t// Copy from opts.Data or opts.Reader, depending on\n\t\t// which was provided.\n\t\tvar src io.Reader\n\t\tif opts.Data != nil {\n\t\t\tsrc = bytes.NewReader(opts.Data)\n\t\t} else {\n\t\t\tsrc = opts.Reader\n\t\t}\n\n\t\thashcheck := md5.New()\n\t\tn, err := io.Copy(io.MultiWriter(tmpfile, pipewriter, hashcheck), src)\n\t\tif err != nil {\n\t\t\tcopyerr <- err\n\t\t\tcancel()\n\t\t\treturn\n\t\t} else if opts.DataSize > 0 && opts.DataSize != int(n) {\n\t\t\tcopyerr <- fmt.Errorf(\"block size %d did not match provided size %d\", n, opts.DataSize)\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t\terr = tmpfile.Close()\n\t\tif err != nil {\n\t\t\t// Don't rename tmpfile into place, but allow\n\t\t\t// the BlockWrite call to succeed if nothing\n\t\t\t// else goes wrong.\n\t\t\treturn\n\t\t}\n\t\thash := fmt.Sprintf(\"%x\", hashcheck.Sum(nil))\n\t\tif opts.Hash != \"\" && opts.Hash != hash {\n\t\t\t// Even if the wrapped KeepGateway doesn't\n\t\t\t// notice a problem, this should count as an\n\t\t\t// error.\n\t\t\tcopyerr <- fmt.Errorf(\"block hash %s did not match provided hash %s\", hash, opts.Hash)\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t\tcachefilename := cache.cacheFile(hash)\n\t\terr = cache.rename(tmpfilename, cachefilename)\n\t\tif err != nil {\n\t\t\tcache.debugf(\"BlockWrite: rename(%s, %s) failed: %s\", tmpfilename, cachefilename, err)\n\t\t}\n\t\tatomic.AddInt64(&cache.sizeEstimated, int64(n))\n\t\tcache.gotidy()\n\t}()\n\n\t// Write through to the wrapped KeepGateway from the pipe,\n\t// instead of the original reader.\n\tnewopts := opts\n\tif newopts.DataSize == 0 {\n\t\tnewopts.DataSize = len(newopts.Data)\n\t}\n\tnewopts.Reader = pipereader\n\tnewopts.Data = nil\n\n\tresp, err := cache.KeepGateway.BlockWrite(ctx, newopts)\n\tif len(copyerr) > 0 {\n\t\t// If the copy-to-pipe goroutine failed, that error\n\t\t// will be more helpful than the resulting \"context\n\t\t// canceled\" or \"read [from pipereader] failed\" error\n\t\t// seen by the wrapped KeepGateway.\n\t\t//\n\t\t// If the wrapped KeepGateway encounters an error\n\t\t// before all the data is copied into the pipe, it\n\t\t// stops reading from the pipe, which causes the\n\t\t// io.Copy() in the goroutine to block until our\n\t\t// deferred pipereader.Close() call runs. In that case\n\t\t// len(copyerr)==0 here, so the wrapped KeepGateway\n\t\t// error is the one we return to our caller.\n\t\terr = <-copyerr\n\t}\n\treturn resp, err\n}\n\ntype funcwriter func([]byte) (int, error)\n\nfunc (fw funcwriter) Write(p []byte) (int, error) {\n\treturn fw(p)\n}\n\n// ReadAt reads the entire block from the wrapped KeepGateway into the\n// cache if needed, and copies the requested portion into the provided\n// slice.\n//\n// ReadAt returns as soon as the requested portion is available in the\n// cache. The remainder of the block may continue to be copied into\n// the cache in the background.\nfunc (cache *DiskCache) ReadAt(locator string, dst []byte, offset int) (int, error) {\n\treturn cache.readAt(locator, dst, offset, false)\n}\n\nfunc (cache *DiskCache) readAt(locator string, dst []byte, offset int, checkCacheOnly bool) (int, error) {\n\tcache.setupOnce.Do(cache.setup)\n\tcachefilename := cache.cacheFile(locator)\n\tif n, err := cache.quickReadAt(cachefilename, dst, offset); err == nil {\n\t\treturn n, nil\n\t}\n\n\tcache.writingLock.Lock()\n\tprogress := cache.writing[cachefilename]\n\tif progress == nil {\n\t\tif checkCacheOnly {\n\t\t\tcache.writingLock.Unlock()\n\t\t\treturn 0, ErrNotCached\n\t\t}\n\t\t// Nobody else is fetching from backend, so we'll add\n\t\t// a new entry to cache.writing, fetch in a separate\n\t\t// goroutine.\n\t\tprogress = &writeprogress{}\n\t\tprogress.cond = sync.NewCond(&sync.Mutex{})\n\t\tif cache.writing == nil {\n\t\t\tcache.writing = map[string]*writeprogress{}\n\t\t}\n\t\tcache.writing[cachefilename] = progress\n\n\t\t// Start a goroutine to copy from backend to f. As\n\t\t// data arrives, wake up any waiting loops (see below)\n\t\t// so ReadAt() requests for partial data can return as\n\t\t// soon as the relevant bytes have been copied.\n\t\tgo func() {\n\t\t\tvar size int\n\t\t\tvar err error\n\t\t\tdefer func() {\n\t\t\t\tif err == nil && progress.sharedf != nil {\n\t\t\t\t\terr = progress.sharedf.Sync()\n\t\t\t\t}\n\t\t\t\tprogress.cond.L.Lock()\n\t\t\t\tprogress.err = err\n\t\t\t\tprogress.done = true\n\t\t\t\tprogress.size = size\n\t\t\t\tprogress.cond.L.Unlock()\n\t\t\t\tprogress.cond.Broadcast()\n\t\t\t\tcache.writingLock.Lock()\n\t\t\t\tdelete(cache.writing, cachefilename)\n\t\t\t\tcache.writingLock.Unlock()\n\n\t\t\t\t// Wait for other goroutines to wake\n\t\t\t\t// up, notice we're done, and use our\n\t\t\t\t// sharedf to read their data, before\n\t\t\t\t// we close sharedf.\n\t\t\t\t//\n\t\t\t\t// Nobody can join the WaitGroup after\n\t\t\t\t// the progress entry is deleted from\n\t\t\t\t// cache.writing above. Therefore,\n\t\t\t\t// this Wait ensures nobody else is\n\t\t\t\t// accessing progress, and we don't\n\t\t\t\t// need to lock anything.\n\t\t\t\tprogress.readers.Wait()\n\t\t\t\tprogress.sharedf.Close()\n\t\t\t}()\n\t\t\tprogress.sharedf, err = cache.openFile(cachefilename, os.O_CREATE|os.O_RDWR)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"ReadAt: %w\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = syscall.Flock(int(progress.sharedf.Fd()), syscall.LOCK_SH)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"flock(%s, lock_sh) failed: %w\", cachefilename, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcache.Metrics.CacheMisses.Add(1)\n\t\t\tsize, err = cache.KeepGateway.BlockRead(context.Background(), BlockReadOptions{\n\t\t\t\tLocator: locator,\n\t\t\t\tWriteTo: funcwriter(func(p []byte) (int, error) {\n\t\t\t\t\tn, err := progress.sharedf.Write(p)\n\t\t\t\t\tif n > 0 {\n\t\t\t\t\t\tprogress.cond.L.Lock()\n\t\t\t\t\t\tprogress.size += n\n\t\t\t\t\t\tprogress.cond.L.Unlock()\n\t\t\t\t\t\tprogress.cond.Broadcast()\n\t\t\t\t\t}\n\t\t\t\t\treturn n, err\n\t\t\t\t})})\n\t\t\tatomic.AddInt64(&cache.sizeEstimated, int64(size))\n\t\t\tcache.gotidy()\n\t\t}()\n\t}\n\t// We add ourselves to the readers WaitGroup so the\n\t// fetch-from-backend goroutine doesn't close the shared\n\t// filehandle before we read the data we need from it.\n\tprogress.readers.Add(1)\n\tdefer progress.readers.Done()\n\tcache.writingLock.Unlock()\n\n\tprogress.cond.L.Lock()\n\tfor !progress.done && progress.size < len(dst)+offset {\n\t\tprogress.cond.Wait()\n\t}\n\tsharedf := progress.sharedf\n\terr := progress.err\n\tprogress.cond.L.Unlock()\n\n\tif err != nil {\n\t\t// If the copy-from-backend goroutine encountered an\n\t\t// error, we return that error. (Even if we read the\n\t\t// desired number of bytes, the error might be\n\t\t// something like BadChecksum so we should not ignore\n\t\t// it.)\n\t\treturn 0, err\n\t}\n\tif len(dst) == 0 {\n\t\t// It's possible that sharedf==nil here (the writer\n\t\t// goroutine might not have done anything at all yet)\n\t\t// and we don't need it anyway because no bytes are\n\t\t// being read. Reading zero bytes seems pointless, but\n\t\t// if someone does it, we might as well return\n\t\t// suitable values, rather than risk a crash by\n\t\t// calling sharedf.ReadAt() when sharedf is nil.\n\t\treturn 0, nil\n\t}\n\treturn sharedf.ReadAt(dst, int64(offset))\n}\n\nvar quickReadAtLostRace = errors.New(\"quickReadAt: lost race\")\n\n// Remove the cache entry for the indicated cachefilename if it\n// matches expect (quickReadAt() usage), or if expect is nil (tidy()\n// usage).\n//\n// If expect is non-nil, close expect's filehandle.\n//\n// If expect is nil and a different cache entry is deleted, close its\n// filehandle.\nfunc (cache *DiskCache) deleteHeldopen(cachefilename string, expect *openFileEnt) {\n\tneedclose := expect\n\n\tcache.heldopenLock.Lock()\n\tfound := cache.heldopen[cachefilename]\n\tif found != nil && (expect == nil || expect == found) {\n\t\tdelete(cache.heldopen, cachefilename)\n\t\tneedclose = found\n\t}\n\tcache.heldopenLock.Unlock()\n\n\tif needclose != nil {\n\t\tneedclose.Lock()\n\t\tdefer needclose.Unlock()\n\t\tif needclose.f != nil {\n\t\t\tneedclose.f.Close()\n\t\t\tneedclose.f = nil\n\t\t}\n\t}\n}\n\n// heldopenEnt returns a new or existing *openFileEnt entry from\n// cache.heldopen.\nfunc (cache *DiskCache) heldopenEnt(cachefilename string) *openFileEnt {\n\tcache.heldopenLock.Lock()\n\tlocked := true\n\tdefer func() {\n\t\tif locked {\n\t\t\tcache.heldopenLock.Unlock()\n\t\t}\n\t}()\n\tif cache.heldopenMax == 0 {\n\t\t// Choose a reasonable limit on open cache files based\n\t\t// on RLIMIT_NOFILE. Note Go automatically raises\n\t\t// softlimit to hardlimit, so it's typically 1048576,\n\t\t// not 1024.\n\t\tlim := syscall.Rlimit{}\n\t\terr := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lim)\n\t\tif err != nil {\n\t\t\tcache.heldopenMax = 100\n\t\t} else if lim.Cur > 400000 {\n\t\t\tcache.heldopenMax = 10000\n\t\t} else {\n\t\t\tcache.heldopenMax = int(lim.Cur / 40)\n\t\t}\n\t}\n\tif heldopen, ok := cache.heldopen[cachefilename]; ok {\n\t\treturn heldopen\n\t}\n\tif len(cache.heldopen) >= cache.heldopenMax {\n\t\t// Rather than go to the trouble of tracking last\n\t\t// access time, just close all files, and open again\n\t\t// as needed. Even in the worst pathological case,\n\t\t// this causes one extra open+close per read, which is\n\t\t// not especially bad (see benchmarks).\n\t\tgo func(m map[string]*openFileEnt) {\n\t\t\tfor _, heldopen := range m {\n\t\t\t\theldopen.Lock()\n\t\t\t\tdefer heldopen.Unlock()\n\t\t\t\tif heldopen.f != nil {\n\t\t\t\t\theldopen.f.Close()\n\t\t\t\t\theldopen.f = nil\n\t\t\t\t}\n\t\t\t}\n\t\t}(cache.heldopen)\n\t\tcache.heldopen = nil\n\t}\n\tif cache.heldopen == nil {\n\t\tcache.heldopen = make(map[string]*openFileEnt, cache.heldopenMax)\n\t}\n\theldopen := &openFileEnt{}\n\theldopen.Lock()\n\tdefer heldopen.Unlock()\n\tcache.heldopen[cachefilename] = heldopen\n\n\t// Unlock the cache-wide lock early (and skip the deferred\n\t// Unlock) to avoid holding it while opening and flocking the\n\t// file.\n\tcache.heldopenLock.Unlock()\n\tlocked = false\n\n\t// Open and flock the file, and save the filehandle in\n\t// heldopen.f (or an error in heldopen.err).\n\tf, err := os.Open(cachefilename)\n\tif err != nil {\n\t\tgo cache.deleteHeldopen(cachefilename, heldopen)\n\t\theldopen.err = err\n\t\treturn heldopen\n\t}\n\terr = syscall.Flock(int(f.Fd()), syscall.LOCK_SH)\n\tif err != nil {\n\t\tf.Close()\n\t\tgo cache.deleteHeldopen(cachefilename, heldopen)\n\t\theldopen.err = err\n\t\treturn heldopen\n\t}\n\n\t// Python SDK's hits_counter metric (reported in arv-mount\n\t// crunchstat as \"keepcache hit\") counts blocks reads that are\n\t// served from cache.  But here, applications generally do\n\t// partial block reads, not full block reads.  So, as a proxy,\n\t// we count how many times we open a cache file to serve a\n\t// read request.\n\tcache.Metrics.CacheHits.Add(1)\n\n\theldopen.f = f\n\treturn heldopen\n}\n\n// quickReadAt attempts to use a cached-filehandle approach to read\n// from the indicated file. The expectation is that the caller\n// (ReadAt) will try a more robust approach when this fails, so\n// quickReadAt doesn't try especially hard to ensure success in\n// races. In particular, when there are concurrent calls, and one\n// fails, that can cause others to fail too.\nfunc (cache *DiskCache) quickReadAt(cachefilename string, dst []byte, offset int) (int, error) {\n\theldopen := cache.heldopenEnt(cachefilename)\n\t// Acquire read lock to ensure (1) initialization is complete,\n\t// if it's done by a different goroutine, and (2) any \"delete\n\t// old/unused entries\" waits for our read to finish before\n\t// closing the file.\n\theldopen.RLock()\n\tdefer heldopen.RUnlock()\n\tif heldopen.err != nil {\n\t\t// Other goroutine encountered an error during setup\n\t\treturn 0, heldopen.err\n\t} else if heldopen.f == nil {\n\t\t// Other goroutine closed the file before we got RLock\n\t\treturn 0, quickReadAtLostRace\n\t}\n\n\t// If another goroutine is currently writing the file, wait\n\t// for it to catch up to the end of the range we need.\n\tcache.writingLock.Lock()\n\tprogress := cache.writing[cachefilename]\n\tcache.writingLock.Unlock()\n\tif progress != nil {\n\t\tprogress.cond.L.Lock()\n\t\tfor !progress.done && progress.size < len(dst)+offset {\n\t\t\tprogress.cond.Wait()\n\t\t}\n\t\tprogress.cond.L.Unlock()\n\t\t// If size<needed && progress.err!=nil here, we'll end\n\t\t// up reporting a less helpful \"EOF reading from cache\n\t\t// file\" below, instead of the actual error fetching\n\t\t// from upstream to cache file.  This is OK though,\n\t\t// because our caller (ReadAt) doesn't even report our\n\t\t// error, it just retries.\n\t}\n\n\tn, err := heldopen.f.ReadAt(dst, int64(offset))\n\tif err != nil {\n\t\t// wait for any concurrent users to finish, then\n\t\t// delete this cache entry in case reopening the\n\t\t// backing file helps.\n\t\tgo cache.deleteHeldopen(cachefilename, heldopen)\n\t}\n\treturn n, err\n}\n\n// BlockRead reads an entire block using a 128 KiB buffer.\nfunc (cache *DiskCache) BlockRead(ctx context.Context, opts BlockReadOptions) (int, error) {\n\tcache.setupOnce.Do(cache.setup)\n\ti := strings.Index(opts.Locator, \"+\")\n\tif i < 0 || i >= len(opts.Locator) {\n\t\treturn 0, errors.New(\"invalid block locator: no size hint\")\n\t}\n\tsizestr := opts.Locator[i+1:]\n\ti = strings.Index(sizestr, \"+\")\n\tif i > 0 {\n\t\tsizestr = sizestr[:i]\n\t}\n\tblocksize, err := strconv.ParseInt(sizestr, 10, 32)\n\tif err != nil || blocksize < 0 {\n\t\treturn 0, errors.New(\"invalid block locator: invalid size hint\")\n\t}\n\tif opts.CheckCacheOnly {\n\t\treturn cache.readAt(opts.Locator, nil, 0, true)\n\t}\n\n\toffset := 0\n\tbuf := make([]byte, 131072)\n\tfor offset < int(blocksize) {\n\t\tif ctx.Err() != nil {\n\t\t\treturn offset, ctx.Err()\n\t\t}\n\t\tif int(blocksize)-offset < len(buf) {\n\t\t\tbuf = buf[:int(blocksize)-offset]\n\t\t}\n\t\tnr, err := cache.ReadAt(opts.Locator, buf, offset)\n\t\tif nr > 0 {\n\t\t\tnw, err := opts.WriteTo.Write(buf[:nr])\n\t\t\tif err != nil {\n\t\t\t\treturn offset + nw, err\n\t\t\t}\n\t\t}\n\t\toffset += nr\n\t\tif err != nil {\n\t\t\treturn offset, err\n\t\t}\n\t}\n\treturn offset, nil\n}\n\n// Start a tidy() goroutine, unless one is already running / recently\n// finished.\nfunc (cache *DiskCache) gotidy() {\n\twrites := atomic.AddInt64(&cache.writesSinceTidy, 1)\n\t// Skip if another tidy goroutine is running in this process.\n\tn := atomic.AddInt32(&cache.tidying, 1)\n\tif n != 1 {\n\t\tatomic.AddInt32(&cache.tidying, -1)\n\t\treturn\n\t}\n\t// Skip if sizeEstimated is based on an actual measurement and\n\t// is below maxSize, and we haven't done very many writes\n\t// since last tidy (defined as 1% of number of cache files at\n\t// last count).\n\tif cache.sizeMeasured > 0 &&\n\t\tatomic.LoadInt64(&cache.sizeEstimated) < atomic.LoadInt64(&cache.defaultMaxSize) &&\n\t\twrites < cache.lastFileCount/100 {\n\t\tatomic.AddInt32(&cache.tidying, -1)\n\t\treturn\n\t}\n\tgo func() {\n\t\tcache.tidy()\n\t\tatomic.StoreInt64(&cache.writesSinceTidy, 0)\n\t\tatomic.AddInt32(&cache.tidying, -1)\n\t}()\n}\n\n// Delete cache files as needed to control disk usage.\nfunc (cache *DiskCache) tidy() {\n\tmaxsize := int64(cache.maxSize.ByteSize())\n\tif maxsize < 1 {\n\t\tmaxsize = atomic.LoadInt64(&cache.defaultMaxSize)\n\t\tif maxsize == 0 {\n\t\t\t// defaultMaxSize not yet computed. Use 10% of\n\t\t\t// filesystem capacity (or different\n\t\t\t// percentage if indicated by cache.maxSize)\n\t\t\tpct := cache.maxSize.Percent()\n\t\t\tif pct == 0 {\n\t\t\t\tpct = 10\n\t\t\t}\n\t\t\tvar stat unix.Statfs_t\n\t\t\tif nil == unix.Statfs(cache.dir, &stat) {\n\t\t\t\tmaxsize = int64(stat.Blocks) * stat.Bsize * pct / 100\n\t\t\t\tatomic.StoreInt64(&cache.defaultMaxSize, maxsize)\n\t\t\t\tcache.debugf(\"setting cache size %d = blocks %d * bsize %d * pct %d / 100\", maxsize, stat.Blocks, stat.Bsize, pct)\n\t\t\t} else {\n\t\t\t\t// In this case we will set\n\t\t\t\t// defaultMaxSize below after\n\t\t\t\t// measuring current usage.\n\t\t\t}\n\t\t}\n\t}\n\n\t// Bail if a tidy goroutine is running in a different process.\n\tlockfile, err := cache.openFile(filepath.Join(cache.dir, \"tmp\", \"tidy.lock\"), os.O_CREATE|os.O_WRONLY)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer lockfile.Close()\n\terr = syscall.Flock(int(lockfile.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tmissing := func() map[string]struct{} {\n\t\tcache.heldopenLock.Lock()\n\t\tdefer cache.heldopenLock.Unlock()\n\t\tm := make(map[string]struct{}, len(cache.heldopen))\n\t\tfor path := range cache.heldopen {\n\t\t\tm[path] = struct{}{}\n\t\t}\n\t\treturn m\n\t}()\n\n\ttype entT struct {\n\t\tpath  string\n\t\tatime time.Time\n\t\tsize  int64\n\t}\n\tvar ents []entT\n\tvar totalsize int64\n\tfilepath.Walk(cache.dir, func(path string, info fs.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\tcache.debugf(\"tidy: skipping dir %s: %s\", path, err)\n\t\t\treturn nil\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tif !strings.HasSuffix(path, cacheFileSuffix) && !strings.HasSuffix(path, tmpFileSuffix) {\n\t\t\treturn nil\n\t\t}\n\t\tdelete(missing, path)\n\t\tvar atime time.Time\n\t\tif stat, ok := info.Sys().(*syscall.Stat_t); ok {\n\t\t\t// Access time is available (hopefully the\n\t\t\t// filesystem is not mounted with noatime)\n\t\t\tatime = time.Unix(stat.Atim.Sec, stat.Atim.Nsec)\n\t\t} else {\n\t\t\t// If access time isn't available we fall back\n\t\t\t// to sorting by modification time.\n\t\t\tatime = info.ModTime()\n\t\t}\n\t\tents = append(ents, entT{path, atime, info.Size()})\n\t\ttotalsize += info.Size()\n\t\treturn nil\n\t})\n\tif cache.Logger != nil {\n\t\tcache.Logger.WithFields(logrus.Fields{\n\t\t\t\"totalsize\": totalsize,\n\t\t\t\"maxsize\":   maxsize,\n\t\t}).Debugf(\"DiskCache: checked current cache usage\")\n\t}\n\n\t// Drop heldopen entries (and close their filehandles) if the\n\t// files have been deleted by another process, or are\n\t// unreachable via Walk() for any other reason.\n\tfor path := range missing {\n\t\tif cache.Logger != nil {\n\t\t\tcache.Logger.WithField(\"path\", path).Debug(\"cache file is missing, closing my handle\")\n\t\t}\n\t\tcache.deleteHeldopen(path, nil)\n\t}\n\n\t// If MaxSize wasn't specified and we failed to come up with a\n\t// defaultSize above, use the larger of {current cache size, 1\n\t// GiB} as the defaultMaxSize for subsequent tidy()\n\t// operations.\n\tif maxsize == 0 {\n\t\tif totalsize < 1<<30 {\n\t\t\tatomic.StoreInt64(&cache.defaultMaxSize, 1<<30)\n\t\t} else {\n\t\t\tatomic.StoreInt64(&cache.defaultMaxSize, totalsize)\n\t\t}\n\t\tcache.debugf(\"found initial size %d, setting defaultMaxSize %d\", totalsize, cache.defaultMaxSize)\n\t\treturn\n\t}\n\n\t// If we're below MaxSize or there's only one block in the\n\t// cache, just update the usage estimate and return.\n\t//\n\t// (We never delete the last block because that would merely\n\t// cause the same block to get re-fetched repeatedly from the\n\t// backend.)\n\tif totalsize <= maxsize || len(ents) == 1 {\n\t\tatomic.StoreInt64(&cache.sizeMeasured, totalsize)\n\t\tatomic.StoreInt64(&cache.sizeEstimated, totalsize)\n\t\tcache.lastFileCount = int64(len(ents))\n\t\treturn\n\t}\n\n\t// Set a new size target of maxsize minus 5%.  This makes some\n\t// room for sizeEstimate to grow before it triggers another\n\t// tidy. We don't want to walk/sort an entire large cache\n\t// directory each time we write a block.\n\ttarget := maxsize - (maxsize / 20)\n\n\t// Delete oldest entries until totalsize < target or we're\n\t// down to a single cached block.\n\tsort.Slice(ents, func(i, j int) bool {\n\t\treturn ents[i].atime.Before(ents[j].atime)\n\t})\n\tdeleted := 0\n\tfor _, ent := range ents {\n\t\tos.Remove(ent.path)\n\t\tgo cache.deleteHeldopen(ent.path, nil)\n\t\tdeleted++\n\t\ttotalsize -= ent.size\n\t\tif totalsize <= target || deleted == len(ents)-1 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif cache.Logger != nil {\n\t\tcache.Logger.WithFields(logrus.Fields{\n\t\t\t\"deleted\":   deleted,\n\t\t\t\"totalsize\": totalsize,\n\t\t}).Debugf(\"DiskCache: remaining cache usage after deleting\")\n\t}\n\tatomic.StoreInt64(&cache.sizeMeasured, totalsize)\n\tatomic.StoreInt64(&cache.sizeEstimated, totalsize)\n\tcache.lastFileCount = int64(len(ents) - deleted)\n}\n"
  },
  {
    "path": "sdk/go/arvados/keep_cache_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/md5\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/rand\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&keepCacheSuite{})\n\ntype keepCacheSuite struct {\n}\n\ntype keepGatewayBlackHole struct {\n}\n\nfunc (*keepGatewayBlackHole) ReadAt(locator string, dst []byte, offset int) (int, error) {\n\treturn 0, errors.New(\"block not found\")\n}\nfunc (*keepGatewayBlackHole) BlockRead(ctx context.Context, opts BlockReadOptions) (int, error) {\n\treturn 0, errors.New(\"block not found\")\n}\nfunc (*keepGatewayBlackHole) LocalLocator(locator string) (string, error) {\n\treturn locator, nil\n}\nfunc (*keepGatewayBlackHole) BlockWrite(ctx context.Context, opts BlockWriteOptions) (BlockWriteResponse, error) {\n\th := md5.New()\n\tvar size int64\n\tif opts.Reader == nil {\n\t\tsize, _ = io.Copy(h, bytes.NewReader(opts.Data))\n\t} else {\n\t\tsize, _ = io.Copy(h, opts.Reader)\n\t}\n\treturn BlockWriteResponse{Locator: fmt.Sprintf(\"%x+%d\", h.Sum(nil), size), Replicas: 1}, nil\n}\n\ntype keepGatewayMemoryBacked struct {\n\tmtx                 sync.RWMutex\n\tdata                map[string][]byte\n\tpauseBlockReadAfter int\n\tpauseBlockReadUntil chan error\n}\n\nfunc (k *keepGatewayMemoryBacked) ReadAt(locator string, dst []byte, offset int) (int, error) {\n\tk.mtx.RLock()\n\tdata := k.data[locator]\n\tk.mtx.RUnlock()\n\tif data == nil {\n\t\treturn 0, errors.New(\"block not found: \" + locator)\n\t}\n\tvar n int\n\tif len(data) > offset {\n\t\tn = copy(dst, data[offset:])\n\t}\n\tif n < len(dst) {\n\t\treturn n, io.EOF\n\t}\n\treturn n, nil\n}\nfunc (k *keepGatewayMemoryBacked) BlockRead(ctx context.Context, opts BlockReadOptions) (int, error) {\n\tif opts.CheckCacheOnly {\n\t\treturn 0, ErrNotCached\n\t}\n\tk.mtx.RLock()\n\tdata := k.data[opts.Locator]\n\tk.mtx.RUnlock()\n\tif data == nil {\n\t\treturn 0, errors.New(\"block not found: \" + opts.Locator)\n\t}\n\tif k.pauseBlockReadUntil != nil {\n\t\tsrc := bytes.NewReader(data)\n\t\tn, err := io.CopyN(opts.WriteTo, src, int64(k.pauseBlockReadAfter))\n\t\tif err != nil {\n\t\t\treturn int(n), err\n\t\t}\n\t\t<-k.pauseBlockReadUntil\n\t\tn2, err := io.Copy(opts.WriteTo, src)\n\t\treturn int(n + n2), err\n\t}\n\treturn opts.WriteTo.Write(data)\n}\nfunc (k *keepGatewayMemoryBacked) LocalLocator(locator string) (string, error) {\n\treturn locator, nil\n}\nfunc (k *keepGatewayMemoryBacked) BlockWrite(ctx context.Context, opts BlockWriteOptions) (BlockWriteResponse, error) {\n\th := md5.New()\n\tdata := bytes.NewBuffer(nil)\n\tif opts.Reader == nil {\n\t\tdata.Write(opts.Data)\n\t\th.Write(data.Bytes())\n\t} else {\n\t\tio.Copy(io.MultiWriter(h, data), opts.Reader)\n\t}\n\tlocator := fmt.Sprintf(\"%x+%d\", h.Sum(nil), data.Len())\n\tk.mtx.Lock()\n\tif k.data == nil {\n\t\tk.data = map[string][]byte{}\n\t}\n\tk.data[locator] = data.Bytes()\n\tk.mtx.Unlock()\n\treturn BlockWriteResponse{Locator: locator, Replicas: 1}, nil\n}\n\nfunc (s *keepCacheSuite) TestBlockWrite(c *check.C) {\n\tbackend := &keepGatewayMemoryBacked{}\n\tcache := DiskCache{\n\t\tKeepGateway: backend,\n\t\tMaxSize:     40000000,\n\t\tDir:         c.MkDir(),\n\t\tLogger:      ctxlog.TestLogger(c),\n\t\tMetrics:     NewKeepClientMetrics(),\n\t}\n\tctx := context.Background()\n\treal, err := cache.BlockWrite(ctx, BlockWriteOptions{\n\t\tData: make([]byte, 100000),\n\t})\n\tc.Assert(err, check.IsNil)\n\n\t// Write different data but supply the same hash. Should be\n\t// rejected (even though our fake backend doesn't notice).\n\t_, err = cache.BlockWrite(ctx, BlockWriteOptions{\n\t\tHash: real.Locator[:32],\n\t\tData: make([]byte, 10),\n\t})\n\tc.Check(err, check.ErrorMatches, `block hash .+ did not match provided hash .+`)\n\n\t// Ensure the bogus write didn't overwrite (or delete) the\n\t// real cached data associated with that hash.\n\tdelete(backend.data, real.Locator)\n\tn, err := cache.ReadAt(real.Locator, make([]byte, 100), 0)\n\tc.Check(n, check.Equals, 100)\n\tc.Check(err, check.IsNil)\n}\n\nfunc (s *keepCacheSuite) TestMaxSize(c *check.C) {\n\tbackend := &keepGatewayMemoryBacked{}\n\tcache := &DiskCache{\n\t\tKeepGateway: backend,\n\t\tMaxSize:     40000000,\n\t\tDir:         c.MkDir(),\n\t\tLogger:      ctxlog.TestLogger(c),\n\t\tMetrics:     NewKeepClientMetrics(),\n\t}\n\tctx := context.Background()\n\tresp1, err := cache.BlockWrite(ctx, BlockWriteOptions{\n\t\tData: make([]byte, 44000000),\n\t})\n\tc.Check(err, check.IsNil)\n\n\t// Wait for tidy to finish, check that it doesn't delete the\n\t// only block.\n\twaitTidy(cache)\n\tc.Check(atomic.LoadInt64(&cache.sizeMeasured), check.Equals, int64(44000000))\n\n\tresp2, err := cache.BlockWrite(ctx, BlockWriteOptions{\n\t\tData: make([]byte, 32000000),\n\t})\n\tc.Check(err, check.IsNil)\n\tdelete(backend.data, resp1.Locator)\n\tdelete(backend.data, resp2.Locator)\n\n\t// Wait for tidy to finish, check that it deleted the older\n\t// block.\n\twaitTidy(cache)\n\tc.Check(atomic.LoadInt64(&cache.sizeMeasured), check.Equals, int64(32000000))\n\n\tn, err := cache.ReadAt(resp1.Locator, make([]byte, 2), 0)\n\tc.Check(n, check.Equals, 0)\n\tc.Check(err, check.ErrorMatches, `block not found: .*\\+44000000`)\n\n\tn, err = cache.ReadAt(resp2.Locator, make([]byte, 2), 0)\n\tc.Check(n > 0, check.Equals, true)\n\tc.Check(err, check.IsNil)\n}\n\nfunc (s *keepCacheSuite) TestConcurrentReadersNoRefresh(c *check.C) {\n\ts.testConcurrentReaders(c, true, false)\n}\nfunc (s *keepCacheSuite) TestConcurrentReadersMangleCache(c *check.C) {\n\ts.testConcurrentReaders(c, false, true)\n}\nfunc (s *keepCacheSuite) testConcurrentReaders(c *check.C, cannotRefresh, mangleCache bool) {\n\tblksize := 64000000\n\tbackend := &keepGatewayMemoryBacked{}\n\tcache := DiskCache{\n\t\tKeepGateway: backend,\n\t\tMaxSize:     ByteSizeOrPercent(blksize),\n\t\tDir:         c.MkDir(),\n\t\tLogger:      ctxlog.TestLogger(c),\n\t\tMetrics:     NewKeepClientMetrics(),\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tresp, err := cache.BlockWrite(ctx, BlockWriteOptions{\n\t\tData: make([]byte, blksize),\n\t})\n\tc.Check(err, check.IsNil)\n\tif cannotRefresh {\n\t\t// Delete the block from the backing store, to ensure\n\t\t// the cache doesn't rely on re-reading a block that\n\t\t// it has just written.\n\t\tdelete(backend.data, resp.Locator)\n\t}\n\tif mangleCache {\n\t\t// Replace cache files with truncated files (and\n\t\t// delete them outright) while the ReadAt loop is\n\t\t// running, to ensure the cache can re-fetch from the\n\t\t// backend as needed.\n\t\tvar nRemove, nTrunc int\n\t\tdefer func() {\n\t\t\tc.Logf(\"nRemove %d\", nRemove)\n\t\t\tc.Logf(\"nTrunc %d\", nTrunc)\n\t\t}()\n\t\tgo func() {\n\t\t\t// Truncate/delete the cache file at various\n\t\t\t// intervals. Readers should re-fetch/recover from\n\t\t\t// this.\n\t\t\tfnm := cache.cacheFile(resp.Locator)\n\t\t\tfor ctx.Err() == nil {\n\t\t\t\ttrunclen := rand.Int63() % int64(blksize*2)\n\t\t\t\tif trunclen > int64(blksize) {\n\t\t\t\t\terr := os.Remove(fnm)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tnRemove++\n\t\t\t\t\t}\n\t\t\t\t} else if os.WriteFile(fnm+\"#\", make([]byte, trunclen), 0700) == nil {\n\t\t\t\t\terr := os.Rename(fnm+\"#\", fnm)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tnTrunc++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tfailed := false\n\tvar wg sync.WaitGroup\n\tvar slots = make(chan bool, 100) // limit concurrency / memory usage\n\tfor i := 0; i < 20000; i++ {\n\t\toffset := (i * 123456) % blksize\n\t\tslots <- true\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tdefer func() { <-slots }()\n\t\t\tbuf := make([]byte, 654321)\n\t\t\tif offset+len(buf) > blksize {\n\t\t\t\tbuf = buf[:blksize-offset]\n\t\t\t}\n\t\t\tn, err := cache.ReadAt(resp.Locator, buf, offset)\n\t\t\tif failed {\n\t\t\t\t// don't fill logs with subsequent errors\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !c.Check(err, check.IsNil, check.Commentf(\"offset=%d\", offset)) {\n\t\t\t\tfailed = true\n\t\t\t}\n\t\t\tc.Assert(n, check.Equals, len(buf))\n\t\t}()\n\t}\n\twg.Wait()\n}\n\nfunc (s *keepCacheSuite) TestBlockRead_CheckCacheOnly(c *check.C) {\n\tblkCached := make([]byte, 12_000_000)\n\tblkUncached := make([]byte, 13_000_000)\n\tbackend := &keepGatewayMemoryBacked{}\n\tcache := DiskCache{\n\t\tKeepGateway: backend,\n\t\tMaxSize:     ByteSizeOrPercent(len(blkUncached) + len(blkCached)),\n\t\tDir:         c.MkDir(),\n\t\tLogger:      ctxlog.TestLogger(c),\n\t\tMetrics:     NewKeepClientMetrics(),\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tresp, err := cache.BlockWrite(ctx, BlockWriteOptions{\n\t\tData: blkUncached,\n\t})\n\tc.Check(err, check.IsNil)\n\tlocUncached := resp.Locator\n\n\tresp, err = cache.BlockWrite(ctx, BlockWriteOptions{\n\t\tData: blkCached,\n\t})\n\tc.Check(err, check.IsNil)\n\tlocCached := resp.Locator\n\n\tos.RemoveAll(filepath.Join(cache.Dir, locUncached[:3]))\n\tcache.deleteHeldopen(cache.cacheFile(locUncached), nil)\n\tbackend.data = make(map[string][]byte)\n\n\t// Do multiple concurrent reads so we have a chance of catching\n\t// race/locking bugs.\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 50; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tvar buf bytes.Buffer\n\t\t\tn, err := cache.BlockRead(ctx, BlockReadOptions{\n\t\t\t\tLocator:        locUncached,\n\t\t\t\tWriteTo:        &buf,\n\t\t\t\tCheckCacheOnly: true})\n\t\t\tc.Check(n, check.Equals, 0)\n\t\t\tc.Check(err, check.Equals, ErrNotCached)\n\t\t\tc.Check(buf.Len(), check.Equals, 0)\n\t\t}()\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tvar buf bytes.Buffer\n\t\t\tn, err := cache.BlockRead(ctx, BlockReadOptions{\n\t\t\t\tLocator:        locCached,\n\t\t\t\tWriteTo:        &buf,\n\t\t\t\tCheckCacheOnly: true})\n\t\t\tc.Check(n, check.Equals, 0)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tc.Check(buf.Len(), check.Equals, 0)\n\t\t}()\n\t}\n\twg.Wait()\n}\n\nfunc (s *keepCacheSuite) TestStreaming(c *check.C) {\n\tblksize := 64000000\n\tbackend := &keepGatewayMemoryBacked{\n\t\tpauseBlockReadUntil: make(chan error),\n\t\tpauseBlockReadAfter: blksize / 8,\n\t}\n\tcache := DiskCache{\n\t\tKeepGateway: backend,\n\t\tMaxSize:     ByteSizeOrPercent(blksize),\n\t\tDir:         c.MkDir(),\n\t\tLogger:      ctxlog.TestLogger(c),\n\t\tMetrics:     NewKeepClientMetrics(),\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tresp, err := cache.BlockWrite(ctx, BlockWriteOptions{\n\t\tData: make([]byte, blksize),\n\t})\n\tc.Check(err, check.IsNil)\n\tos.RemoveAll(filepath.Join(cache.Dir, resp.Locator[:3]))\n\n\t// Start a lot of concurrent requests for various ranges of\n\t// the same block. Our backend will return the first 8MB and\n\t// then pause. The requests that can be satisfied by the first\n\t// 8MB of data should return quickly. The rest should wait,\n\t// and return after we release pauseBlockReadUntil.\n\tvar wgEarly, wgLate sync.WaitGroup\n\tvar doneEarly, doneLate int32\n\tfor i := 0; i < 10000; i++ {\n\t\twgEarly.Add(1)\n\t\tgo func() {\n\t\t\toffset := int(rand.Int63() % int64(blksize-benchReadSize))\n\t\t\tif offset+benchReadSize > backend.pauseBlockReadAfter {\n\t\t\t\twgLate.Add(1)\n\t\t\t\tdefer wgLate.Done()\n\t\t\t\twgEarly.Done()\n\t\t\t\tdefer atomic.AddInt32(&doneLate, 1)\n\t\t\t} else {\n\t\t\t\tdefer wgEarly.Done()\n\t\t\t\tdefer atomic.AddInt32(&doneEarly, 1)\n\t\t\t}\n\t\t\tbuf := make([]byte, benchReadSize)\n\t\t\tn, err := cache.ReadAt(resp.Locator, buf, offset)\n\t\t\tc.Check(n, check.Equals, len(buf))\n\t\t\tc.Check(err, check.IsNil)\n\t\t}()\n\t}\n\n\t// Ensure all early ranges finish while backend request(s) are\n\t// paused.\n\twgEarly.Wait()\n\tc.Logf(\"doneEarly = %d\", doneEarly)\n\tc.Check(doneLate, check.Equals, int32(0))\n\n\t// Unpause backend request(s).\n\tclose(backend.pauseBlockReadUntil)\n\twgLate.Wait()\n\tc.Logf(\"doneLate = %d\", doneLate)\n}\n\n// Check that we empty out the heldopen filehandle cache when it\n// exceeds heldopenMax entries.\nfunc (s *keepCacheSuite) TestHeldOpen_RollCache(c *check.C) {\n\tblksize := 64000\n\tblkcount := 64\n\tcache, locators := setupCacheWithBlocks(c, blksize, blkcount)\n\tcache.maxSize = ByteSizeOrPercent(blksize*blkcount + 1)\n\tcache.sharedCache.heldopenMax = blkcount + 1\n\ttargetsize := blkcount / 4\n\n\t// Exercise the cache until we have more heldopen files than\n\t// targetsize\n\tfor i := 0; i < 100; i++ {\n\t\tdoConcurrentReads(c, blkcount, cache, locators, blksize)\n\t\twaitTidy(cache)\n\t\tcache.tidy()\n\t\tif len(cache.sharedCache.heldopen) > targetsize {\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Assert(len(cache.sharedCache.heldopen) > targetsize, check.Equals, true)\n\n\t// Reduce heldopenMax to make sure we roll the cache in the\n\t// following ReadAt().\n\tcache.sharedCache.heldopenMax = targetsize / 2\n\tcache.deleteHeldopen(cache.cacheFile(locators[0][:32]), nil)\n\t_, err := cache.ReadAt(locators[0], make([]byte, 1234), 0)\n\tc.Assert(err, check.IsNil)\n\tc.Check(len(cache.sharedCache.heldopen), check.Equals, 1)\n}\n\n// Check that we close our heldopen files when they are deleted by\n// another process.\nfunc (s *keepCacheSuite) TestHeldOpen_CloseDeletedFiles(c *check.C) {\n\tblksize := 64000\n\tblkcount := 64\n\tcache, locators := setupCacheWithBlocks(c, blksize, blkcount)\n\tcache.maxSize = ByteSizeOrPercent(blksize*blkcount + 1)\n\tcache.sharedCache.heldopenMax = blkcount + 1\n\ttargetsize := blkcount / 4\n\n\t// Exercise the cache until we have more heldopen files than\n\t// targetsize\n\tfor i := 0; i < 100; i++ {\n\t\tdoConcurrentReads(c, blkcount, cache, locators, blksize)\n\t\twaitTidy(cache)\n\t\tcache.tidy()\n\t\tif len(cache.sharedCache.heldopen) > targetsize {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tc.Logf(\"len(cache.sharedCache.heldopen) == %d, targetsize == %d\", len(cache.sharedCache.heldopen), targetsize)\n\tc.Assert(len(cache.sharedCache.heldopen) > targetsize, check.Equals, true)\n\n\tfor i := targetsize; i < blkcount; i++ {\n\t\tos.Remove(cache.cacheFile(locators[i][:32]))\n\t}\n\twaitTidy(cache)\n\tcache.tidy()\n\n\tc.Logf(\"len(cache.sharedCache.heldopen) == %d, targetsize == %d\", len(cache.sharedCache.heldopen), targetsize)\n\tc.Check(len(cache.sharedCache.heldopen) <= targetsize, check.Equals, true)\n}\n\nvar _ = check.Suite(&keepCacheBenchSuite{})\n\ntype keepCacheBenchSuite struct {\n\tblksize  int\n\tblkcount int\n\tcache    *DiskCache\n\tlocators []string\n}\n\nfunc (s *keepCacheBenchSuite) SetUpTest(c *check.C) {\n\ts.blksize = 64000000\n\ts.blkcount = 8\n\ts.cache, s.locators = setupCacheWithBlocks(c, s.blksize, s.blkcount)\n}\n\nfunc (s *keepCacheBenchSuite) BenchmarkConcurrentReads_LowNOFiles(c *check.C) {\n\ts.cache.sharedCache.heldopenMax = 4\n\ts.BenchmarkConcurrentReads(c)\n}\n\nfunc (s *keepCacheBenchSuite) BenchmarkConcurrentReads(c *check.C) {\n\tdoConcurrentReads(c, c.N, s.cache, s.locators, s.blksize)\n}\n\nfunc (s *keepCacheBenchSuite) BenchmarkSequentialReads(c *check.C) {\n\tbuf := make([]byte, benchReadSize)\n\tfor i := 0; i < c.N; i++ {\n\t\t_, err := s.cache.ReadAt(s.locators[i%s.blkcount], buf, int((int64(i)*1234)%int64(s.blksize-benchReadSize)))\n\t\tif err != nil {\n\t\t\tc.Fail()\n\t\t}\n\t}\n}\n\nconst benchReadSize = 1000\n\nvar _ = check.Suite(&fileOpsSuite{})\n\ntype fileOpsSuite struct{}\n\n// BenchmarkOpenClose and BenchmarkKeepOpen can be used to measure the\n// potential performance improvement of caching filehandles rather\n// than opening/closing the cache file for each read.\n//\n// Results from a development machine indicate a ~3x throughput\n// improvement: ~636 MB/s when opening/closing the file for each\n// 1000-byte read vs. ~2 GB/s when opening the file once and doing\n// concurrent reads using the same file descriptor.\nfunc (s *fileOpsSuite) BenchmarkOpenClose(c *check.C) {\n\tfnm := c.MkDir() + \"/testfile\"\n\tos.WriteFile(fnm, make([]byte, 64000000), 0700)\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < c.N; i++ {\n\t\ti := i\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tf, err := os.OpenFile(fnm, os.O_CREATE|os.O_RDWR, 0700)\n\t\t\tif err != nil {\n\t\t\t\tc.Fail()\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, err = f.ReadAt(make([]byte, benchReadSize), (int64(i)*1000000)%63123123)\n\t\t\tif err != nil {\n\t\t\t\tc.Fail()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tf.Close()\n\t\t}()\n\t}\n\twg.Wait()\n}\n\nfunc (s *fileOpsSuite) BenchmarkKeepOpen(c *check.C) {\n\tfnm := c.MkDir() + \"/testfile\"\n\tos.WriteFile(fnm, make([]byte, 64000000), 0700)\n\tf, err := os.OpenFile(fnm, os.O_CREATE|os.O_RDWR, 0700)\n\tif err != nil {\n\t\tc.Fail()\n\t\treturn\n\t}\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < c.N; i++ {\n\t\ti := i\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t_, err = f.ReadAt(make([]byte, benchReadSize), (int64(i)*1000000)%63123123)\n\t\t\tif err != nil {\n\t\t\t\tc.Fail()\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n\tf.Close()\n}\n\nfunc setupCacheWithBlocks(c *check.C, blksize, blkcount int) (cache *DiskCache, locators []string) {\n\tbackend := &keepGatewayMemoryBacked{}\n\tcache = &DiskCache{\n\t\tKeepGateway: backend,\n\t\tMaxSize:     ByteSizeOrPercent(blksize),\n\t\tDir:         c.MkDir(),\n\t\tLogger:      ctxlog.TestLogger(c),\n\t\tMetrics:     NewKeepClientMetrics(),\n\t}\n\tlocators = make([]string, blkcount)\n\tdata := make([]byte, blksize)\n\tfor b := 0; b < blkcount; b++ {\n\t\tfor i := range data {\n\t\t\tdata[i] = byte(b)\n\t\t}\n\t\tresp, err := cache.BlockWrite(context.Background(), BlockWriteOptions{\n\t\t\tData: data,\n\t\t})\n\t\tc.Assert(err, check.IsNil)\n\t\tlocators[b] = resp.Locator\n\t}\n\treturn\n}\n\nfunc doConcurrentReads(c *check.C, N int, cache *DiskCache, locators []string, blksize int) {\n\tblkcount := len(locators)\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < N; i++ {\n\t\ti := i\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tbuf := make([]byte, benchReadSize)\n\t\t\t_, err := cache.ReadAt(locators[i%blkcount], buf, int((int64(i)*1234)%int64(blksize-benchReadSize)))\n\t\t\tif err != nil {\n\t\t\t\tc.Fail()\n\t\t\t}\n\t\t}()\n\t}\n\twg.Wait()\n}\n\nfunc waitTidy(cache *DiskCache) {\n\ttime.Sleep(time.Millisecond)\n\tfor atomic.LoadInt32(&cache.tidying) > 0 {\n\t\ttime.Sleep(time.Millisecond)\n\t}\n}\n"
  },
  {
    "path": "sdk/go/arvados/keep_metrics.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"github.com/prometheus/client_golang/prometheus\"\n\ntype KeepClientMetrics struct {\n\tBackendBytes    *prometheus.CounterVec\n\tBackendBytesIn  prometheus.Counter\n\tBackendBytesOut prometheus.Counter\n\tClientOps       *prometheus.CounterVec\n\tClientOpsGet    prometheus.Counter\n\tClientOpsPut    prometheus.Counter\n\tCache           *prometheus.CounterVec\n\tCacheMisses     prometheus.Counter\n\tCacheHits       prometheus.Counter\n}\n\nfunc NewKeepClientMetrics() KeepClientMetrics {\n\tvar m KeepClientMetrics\n\tm.BackendBytes = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"keepclient\",\n\t\tName:      \"backend_bytes\",\n\t\tHelp:      \"network traffic generated by keepclient\",\n\t}, []string{\"direction\"})\n\tm.BackendBytesIn = m.BackendBytes.WithLabelValues(\"in\")\n\tm.BackendBytesOut = m.BackendBytes.WithLabelValues(\"out\")\n\tm.ClientOps = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"keepclient\",\n\t\tName:      \"ops\",\n\t\tHelp:      \"keepclient operations requested by application\",\n\t}, []string{\"op\"})\n\tm.ClientOpsGet = m.ClientOps.WithLabelValues(\"get\")\n\tm.ClientOpsPut = m.ClientOps.WithLabelValues(\"put\")\n\tm.Cache = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"keepclient\",\n\t\tName:      \"cache\",\n\t\tHelp:      \"keepclient disk cache events\",\n\t}, []string{\"event\"})\n\tm.CacheMisses = m.Cache.WithLabelValues(\"miss\")\n\tm.CacheHits = m.Cache.WithLabelValues(\"hit\")\n\treturn m\n}\n\nfunc (m *KeepClientMetrics) Register(reg *prometheus.Registry) error {\n\tfor _, c := range []prometheus.Collector{m.BackendBytes, m.ClientOps, m.Cache} {\n\t\terr := reg.Register(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "sdk/go/arvados/keep_service.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n)\n\n// KeepService is an arvados#keepService record\ntype KeepService struct {\n\tUUID           string    `json:\"uuid\"`\n\tServiceHost    string    `json:\"service_host\"`\n\tServicePort    int       `json:\"service_port\"`\n\tServiceSSLFlag bool      `json:\"service_ssl_flag\"`\n\tServiceType    string    `json:\"service_type\"`\n\tReadOnly       bool      `json:\"read_only\"`\n\tCreatedAt      time.Time `json:\"created_at\"`\n\tModifiedAt     time.Time `json:\"modified_at\"`\n}\n\ntype KeepMount struct {\n\tUUID           string          `json:\"uuid\"`\n\tDeviceID       string          `json:\"device_id\"`\n\tAllowWrite     bool            `json:\"allow_write\"`\n\tAllowTrash     bool            `json:\"allow_trash\"`\n\tReplication    int             `json:\"replication\"`\n\tStorageClasses map[string]bool `json:\"storage_classes\"`\n}\n\n// KeepServiceList is an arvados#keepServiceList record\ntype KeepServiceList struct {\n\tItems          []KeepService `json:\"items\"`\n\tItemsAvailable int           `json:\"items_available\"`\n\tOffset         int           `json:\"offset\"`\n\tLimit          int           `json:\"limit\"`\n}\n\n// KeepServiceIndexEntry is what a keep service's index response tells\n// us about a stored block.\ntype KeepServiceIndexEntry struct {\n\tSizedDigest\n\t// Time of last write, in nanoseconds since Unix epoch\n\tMtime int64\n}\n\n// EachKeepService calls f once for every readable\n// KeepService. EachKeepService stops if it encounters an\n// error, such as f returning a non-nil error.\nfunc (c *Client) EachKeepService(f func(KeepService) error) error {\n\tparams := ResourceListParams{}\n\tfor {\n\t\tvar page KeepServiceList\n\t\terr := c.RequestAndDecode(&page, \"GET\", \"arvados/v1/keep_services\", nil, params)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, item := range page.Items {\n\t\t\terr = f(item)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tparams.Offset = params.Offset + len(page.Items)\n\t\tif params.Offset >= page.ItemsAvailable {\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (s *KeepService) url(path string) string {\n\tvar f string\n\tif s.ServiceSSLFlag {\n\t\tf = \"https://%s:%d/%s\"\n\t} else {\n\t\tf = \"http://%s:%d/%s\"\n\t}\n\treturn fmt.Sprintf(f, s.ServiceHost, s.ServicePort, path)\n}\n\n// String implements fmt.Stringer\nfunc (s *KeepService) String() string {\n\treturn s.UUID\n}\n\nfunc (s *KeepService) Mounts(c *Client) ([]KeepMount, error) {\n\turl := s.url(\"mounts\")\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar mounts []KeepMount\n\terr = c.DoAndDecode(&mounts, req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GET %v: %v\", url, err)\n\t}\n\treturn mounts, nil\n}\n\n// Touch updates the timestamp on the given block.\nfunc (s *KeepService) Touch(ctx context.Context, c *Client, blk string) error {\n\treq, err := http.NewRequest(\"TOUCH\", s.url(blk), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\treturn fmt.Errorf(\"%s %s: %s\", resp.Proto, resp.Status, body)\n\t}\n\treturn nil\n}\n\n// Untrash moves/copies the given block out of trash.\nfunc (s *KeepService) Untrash(ctx context.Context, c *Client, blk string) error {\n\treq, err := http.NewRequest(\"PUT\", s.url(\"untrash/\"+blk), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\treturn fmt.Errorf(\"%s %s: %s\", resp.Proto, resp.Status, body)\n\t}\n\treturn nil\n}\n\n// IndexMount returns an unsorted list of blocks at the given mount point.\nfunc (s *KeepService) IndexMount(ctx context.Context, c *Client, mountUUID string, prefix string) ([]KeepServiceIndexEntry, error) {\n\treturn s.index(ctx, c, prefix, s.url(\"mounts/\"+mountUUID+\"/blocks?prefix=\"+prefix))\n}\n\n// Index returns an unsorted list of blocks that can be retrieved from\n// this server.\nfunc (s *KeepService) Index(ctx context.Context, c *Client, prefix string) ([]KeepServiceIndexEntry, error) {\n\treturn s.index(ctx, c, prefix, s.url(\"index/\"+prefix))\n}\n\nfunc (s *KeepService) index(ctx context.Context, c *Client, prefix, url string) ([]KeepServiceIndexEntry, error) {\n\treq, err := http.NewRequestWithContext(ctx, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"NewRequestWithContext(%v): %v\", url, err)\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Do(%v): %v\", url, err)\n\t} else if resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"%v: %d %v\", url, resp.StatusCode, resp.Status)\n\t}\n\tdefer resp.Body.Close()\n\n\tvar progress int64\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tgo func() {\n\t\tlog := ctxlog.FromContext(ctx)\n\t\tlogticker := time.NewTicker(5 * time.Minute)\n\t\tdefer logticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-logticker.C:\n\t\t\t\tlog.Printf(\"index progress: received %d blocks from %s\", atomic.LoadInt64(&progress), url)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tvar entries []KeepServiceIndexEntry\n\tscanner := bufio.NewScanner(resp.Body)\n\tsawEOF := false\n\tfor scanner.Scan() {\n\t\tif ctx.Err() != nil {\n\t\t\treturn nil, ctx.Err()\n\t\t}\n\t\tif scanner.Err() != nil {\n\t\t\t// If we encounter a read error (timeout,\n\t\t\t// connection failure), stop now and return it\n\t\t\t// below, so it doesn't get masked by the\n\t\t\t// ensuing \"badly formatted response\" error.\n\t\t\tbreak\n\t\t}\n\t\tif sawEOF {\n\t\t\treturn nil, fmt.Errorf(\"Index response contained non-terminal blank line\")\n\t\t}\n\t\tline := scanner.Text()\n\t\tif line == \"\" {\n\t\t\tsawEOF = true\n\t\t\tcontinue\n\t\t}\n\t\tfields := strings.Split(line, \" \")\n\t\tif len(fields) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"Malformed index line %q: %d fields\", line, len(fields))\n\t\t}\n\t\tif !strings.HasPrefix(fields[0], prefix) {\n\t\t\treturn nil, fmt.Errorf(\"Index response included block %q despite asking for prefix %q\", fields[0], prefix)\n\t\t}\n\t\tmtime, err := strconv.ParseInt(fields[1], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Malformed index line %q: mtime: %v\", line, err)\n\t\t}\n\t\tif mtime < 1e12 {\n\t\t\t// An old version of keepstore is giving us\n\t\t\t// timestamps in seconds instead of\n\t\t\t// nanoseconds. (This threshold correctly\n\t\t\t// handles all times between 1970-01-02 and\n\t\t\t// 33658-09-27.)\n\t\t\tmtime = mtime * 1e9\n\t\t}\n\t\tentries = append(entries, KeepServiceIndexEntry{\n\t\t\tSizedDigest: SizedDigest(fields[0]),\n\t\t\tMtime:       mtime,\n\t\t})\n\t\tatomic.AddInt64(&progress, 1)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error scanning index response: %v\", err)\n\t}\n\tif !sawEOF {\n\t\treturn nil, fmt.Errorf(\"Index response had no EOF marker\")\n\t}\n\treturn entries, nil\n}\n"
  },
  {
    "path": "sdk/go/arvados/keep_service_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&KeepServiceSuite{})\n\ntype KeepServiceSuite struct{}\n\nfunc (*KeepServiceSuite) TestIndexTimeout(c *check.C) {\n\tclient := &Client{\n\t\tClient: &http.Client{\n\t\t\tTransport: &timeoutTransport{response: []byte(\"\\n\")},\n\t\t},\n\t\tAPIHost:   \"zzzzz.arvadosapi.com\",\n\t\tAuthToken: \"xyzzy\",\n\t}\n\t_, err := (&KeepService{}).IndexMount(context.Background(), client, \"fake\", \"\")\n\tc.Check(err, check.ErrorMatches, `.*timeout.*`)\n}\n"
  },
  {
    "path": "sdk/go/arvados/limiter.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\trequestLimiterQuietPeriod        = time.Second\n\trequestLimiterInitialLimit int64 = 8\n)\n\ntype requestLimiter struct {\n\tcurrent    int64\n\tlimit      int64\n\tmaxlimit   int64\n\tlock       sync.Mutex\n\tcond       *sync.Cond\n\tquietUntil time.Time\n}\n\n// Acquire reserves one request slot, waiting if necessary.\n//\n// Acquire returns early if ctx cancels before a slot is available. It\n// is assumed in this case the caller will immediately notice\n// ctx.Err() != nil and call Release().\nfunc (rl *requestLimiter) Acquire(ctx context.Context) {\n\trl.lock.Lock()\n\tif rl.cond == nil {\n\t\t// First use of requestLimiter. Initialize.\n\t\trl.cond = sync.NewCond(&rl.lock)\n\t\trl.limit = requestLimiterInitialLimit\n\t}\n\t// Wait out the quiet period(s) immediately following a 503.\n\tfor ctx.Err() == nil {\n\t\tdelay := rl.quietUntil.Sub(time.Now())\n\t\tif delay < 0 {\n\t\t\tbreak\n\t\t}\n\t\t// Wait for the end of the quiet period, which started\n\t\t// when we last received a 503 response.\n\t\trl.lock.Unlock()\n\t\ttimer := time.NewTimer(delay)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\tcase <-ctx.Done():\n\t\t\ttimer.Stop()\n\t\t}\n\t\trl.lock.Lock()\n\t}\n\tready := make(chan struct{})\n\tgo func() {\n\t\t// close ready when a slot is available _or_ we wake\n\t\t// up and find ctx has been canceled (meaning Acquire\n\t\t// has already returned, or is about to).\n\t\tfor rl.limit > 0 && rl.limit <= rl.current && ctx.Err() == nil {\n\t\t\trl.cond.Wait()\n\t\t}\n\t\tclose(ready)\n\t}()\n\tselect {\n\tcase <-ready:\n\t\t// Wait() returned, so we have the lock.\n\t\trl.current++\n\t\trl.lock.Unlock()\n\tcase <-ctx.Done():\n\t\t// When Wait() returns the lock to our goroutine\n\t\t// (which might have already happened) we need to\n\t\t// release it (if we don't do this now, the following\n\t\t// Lock() can deadlock).\n\t\tgo func() {\n\t\t\t<-ready\n\t\t\trl.lock.Unlock()\n\t\t}()\n\t\t// Note we may have current > limit until the caller\n\t\t// calls Release().\n\t\trl.lock.Lock()\n\t\trl.current++\n\t\trl.lock.Unlock()\n\t}\n}\n\n// Release releases a slot that has been reserved with Acquire.\nfunc (rl *requestLimiter) Release() {\n\trl.lock.Lock()\n\trl.current--\n\trl.lock.Unlock()\n\trl.cond.Signal()\n}\n\n// Report uses the return values from (*http.Client)Do() to adjust the\n// outgoing request limit (increase on success, decrease on 503).\n//\n// Return value is true if the response was a 503.\nfunc (rl *requestLimiter) Report(resp *http.Response, err error) bool {\n\trl.lock.Lock()\n\tdefer rl.lock.Unlock()\n\tis503 := false\n\tif err != nil {\n\t\tuerr := &url.Error{}\n\t\tif errors.As(err, &uerr) && uerr.Err.Error() == \"Service Unavailable\" {\n\t\t\t// This is how http.Client reports 503 from proxy server\n\t\t\tis503 = true\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\tis503 = resp.StatusCode == http.StatusServiceUnavailable\n\t}\n\tif is503 {\n\t\tif rl.limit == 0 {\n\t\t\t// Concurrency was unlimited until now.\n\t\t\t// Calculate new limit based on actual\n\t\t\t// concurrency instead of previous limit.\n\t\t\trl.limit = rl.current\n\t\t}\n\t\tif time.Now().After(rl.quietUntil) {\n\t\t\t// Reduce concurrency limit by half.\n\t\t\trl.limit = (rl.limit + 1) / 2\n\t\t\t// Don't start any new calls (or reduce the\n\t\t\t// limit even further on additional 503s) for\n\t\t\t// a second.\n\t\t\trl.quietUntil = time.Now().Add(requestLimiterQuietPeriod)\n\t\t}\n\t\treturn true\n\t}\n\tif err == nil && resp.StatusCode >= 200 && resp.StatusCode < 400 && rl.limit > 0 {\n\t\t// After each non-server-error response, increase\n\t\t// concurrency limit by at least 10% -- but not beyond\n\t\t// 2x the highest concurrency level we've seen without\n\t\t// a failure.\n\t\tincrease := rl.limit / 10\n\t\tif increase < 1 {\n\t\t\tincrease = 1\n\t\t}\n\t\trl.limit += increase\n\t\tif max := rl.current * 2; max < rl.limit {\n\t\t\trl.limit = max\n\t\t}\n\t\tif rl.maxlimit > 0 && rl.maxlimit < rl.limit {\n\t\t\trl.limit = rl.maxlimit\n\t\t}\n\t\trl.cond.Broadcast()\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "sdk/go/arvados/limiter_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net/http\"\n\t\"sync\"\n\t\"time\"\n\n\t. \"gopkg.in/check.v1\"\n)\n\nvar _ = Suite(&limiterSuite{})\n\ntype limiterSuite struct{}\n\nfunc (*limiterSuite) TestInitialLimit(c *C) {\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))\n\tdefer cancel()\n\trl := requestLimiter{}\n\n\tvar wg sync.WaitGroup\n\twg.Add(int(requestLimiterInitialLimit))\n\tfor i := int64(0); i < requestLimiterInitialLimit; i++ {\n\t\tgo func() {\n\t\t\trl.Acquire(ctx)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\tc.Check(rl.current, Equals, requestLimiterInitialLimit)\n\twg.Add(int(requestLimiterInitialLimit))\n\tfor i := int64(0); i < requestLimiterInitialLimit; i++ {\n\t\tgo func() {\n\t\t\trl.Release()\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\tc.Check(rl.current, Equals, int64(0))\n}\n\nfunc (*limiterSuite) TestCancelWhileWaitingForAcquire(c *C) {\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))\n\tdefer cancel()\n\trl := requestLimiter{}\n\n\trl.Acquire(ctx)\n\trl.limit = 1\n\tctxShort, cancel := context.WithDeadline(ctx, time.Now().Add(time.Millisecond))\n\tdefer cancel()\n\trl.Acquire(ctxShort)\n\tc.Check(rl.current, Equals, int64(2))\n\tc.Check(ctxShort.Err(), NotNil)\n\trl.Release()\n\trl.Release()\n\tc.Check(rl.current, Equals, int64(0))\n}\n\nfunc (*limiterSuite) TestReducedLimitAndQuietPeriod(c *C) {\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))\n\tdefer cancel()\n\trl := requestLimiter{}\n\n\t// Use a short quiet period to make tests faster\n\tdefer func(orig time.Duration) { requestLimiterQuietPeriod = orig }(requestLimiterQuietPeriod)\n\trequestLimiterQuietPeriod = time.Second / 10\n\n\tfor i := 0; i < 5; i++ {\n\t\trl.Acquire(ctx)\n\t}\n\trl.Report(&http.Response{StatusCode: http.StatusServiceUnavailable}, nil)\n\tc.Check(rl.limit, Equals, requestLimiterInitialLimit/2)\n\tfor i := 0; i < 5; i++ {\n\t\trl.Release()\n\t}\n\n\t// Even with all slots released, we can't Acquire in the quiet\n\t// period.\n\n\t// (a) If our context expires before the end of the quiet\n\t// period, we get back DeadlineExceeded -- without waiting for\n\t// the end of the quiet period.\n\tacquire := time.Now()\n\tctxShort, cancel := context.WithDeadline(ctx, time.Now().Add(requestLimiterQuietPeriod/10))\n\tdefer cancel()\n\trl.Acquire(ctxShort)\n\tc.Check(ctxShort.Err(), Equals, context.DeadlineExceeded)\n\tc.Check(time.Since(acquire) < requestLimiterQuietPeriod/2, Equals, true)\n\tc.Check(rl.quietUntil.Sub(time.Now()) > requestLimiterQuietPeriod/2, Equals, true)\n\trl.Release()\n\n\t// (b) If our context does not expire first, Acquire waits for\n\t// the end of the quiet period.\n\tctxLong, cancel := context.WithDeadline(ctx, time.Now().Add(requestLimiterQuietPeriod*2))\n\tdefer cancel()\n\tacquire = time.Now()\n\trl.Acquire(ctxLong)\n\tc.Check(time.Since(acquire) > requestLimiterQuietPeriod/10, Equals, true)\n\tc.Check(time.Since(acquire) < requestLimiterQuietPeriod, Equals, true)\n\tc.Check(ctxLong.Err(), IsNil)\n\trl.Release()\n\n\t// OK to call Report() with nil Response and non-nil error.\n\trl.Report(nil, errors.New(\"network error\"))\n}\n"
  },
  {
    "path": "sdk/go/arvados/link.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"time\"\n\n// Link is an arvados#link record\ntype Link struct {\n\tUUID               string                 `json:\"uuid,omitempty\"`\n\tEtag               string                 `json:\"etag\"`\n\tOwnerUUID          string                 `json:\"owner_uuid\"`\n\tName               string                 `json:\"name\"`\n\tLinkClass          string                 `json:\"link_class\"`\n\tCreatedAt          time.Time              `json:\"created_at\"`\n\tModifiedAt         time.Time              `json:\"modified_at\"`\n\tModifiedByUserUUID string                 `json:\"modified_by_user_uuid\"`\n\tHeadUUID           string                 `json:\"head_uuid\"`\n\tHeadKind           string                 `json:\"head_kind\"`\n\tTailUUID           string                 `json:\"tail_uuid\"`\n\tTailKind           string                 `json:\"tail_kind\"`\n\tProperties         map[string]interface{} `json:\"properties\"`\n}\n\n// LinkList is an arvados#linkList resource.\ntype LinkList struct {\n\tItems          []Link `json:\"items\"`\n\tItemsAvailable int    `json:\"items_available\"`\n\tOffset         int    `json:\"offset\"`\n\tLimit          int    `json:\"limit\"`\n}\n\ntype ComputedPermission struct {\n\tUserUUID   string `json:\"user_uuid\"`\n\tTargetUUID string `json:\"target_uuid\"`\n\tPermLevel  string `json:\"perm_level\"`\n}\n\ntype ComputedPermissionList struct {\n\tItems          []ComputedPermission `json:\"items\"`\n\tItemsAvailable int                  `json:\"items_available\"`\n\tLimit          int                  `json:\"limit\"`\n}\n"
  },
  {
    "path": "sdk/go/arvados/log.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"time\"\n)\n\n// Log is an arvados#log record\ntype Log struct {\n\tID              int64                  `json:\"id\"`\n\tUUID            string                 `json:\"uuid\"`\n\tOwnerUUID       string                 `json:\"owner_uuid\"`\n\tObjectUUID      string                 `json:\"object_uuid\"`\n\tObjectOwnerUUID string                 `json:\"object_owner_uuid\"`\n\tEventType       string                 `json:\"event_type\"`\n\tEventAt         time.Time              `json:\"event\"`\n\tSummary         string                 `json:\"summary\"`\n\tProperties      map[string]interface{} `json:\"properties\"`\n\tCreatedAt       time.Time              `json:\"created_at\"`\n\tModifiedAt      time.Time              `json:\"modified_at\"`\n}\n\n// LogList is an arvados#logList resource.\ntype LogList struct {\n\tItems          []Log `json:\"items\"`\n\tItemsAvailable int   `json:\"items_available\"`\n\tOffset         int   `json:\"offset\"`\n\tLimit          int   `json:\"limit\"`\n}\n"
  },
  {
    "path": "sdk/go/arvados/login.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"net/http\"\n)\n\ntype LoginResponse struct {\n\tRedirectLocation string       `json:\"redirect_location,omitempty\"`\n\tHTML             bytes.Buffer `json:\"-\"`\n}\n\nfunc (resp LoginResponse) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Cache-Control\", \"no-store\")\n\tif resp.RedirectLocation != \"\" {\n\t\tw.Header().Set(\"Location\", resp.RedirectLocation)\n\t\tw.WriteHeader(http.StatusFound)\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", \"text/html\")\n\t\tw.Write(resp.HTML.Bytes())\n\t}\n}\n\ntype LogoutResponse struct {\n\tRedirectLocation string `json:\"redirect_location,omitempty\"`\n}\n\nfunc (resp LogoutResponse) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Location\", resp.RedirectLocation)\n\tw.WriteHeader(http.StatusFound)\n}\n"
  },
  {
    "path": "sdk/go/arvados/node.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"time\"\n\n// Node is an arvados#node resource.\ntype Node struct {\n\tUUID       string         `json:\"uuid\"`\n\tDomain     string         `json:\"domain\"`\n\tHostname   string         `json:\"hostname\"`\n\tIPAddress  string         `json:\"ip_address\"`\n\tLastPingAt *time.Time     `json:\"last_ping_at\"`\n\tSlotNumber int            `json:\"slot_number\"`\n\tStatus     string         `json:\"status\"`\n\tJobUUID    string         `json:\"job_uuid\"`\n\tProperties NodeProperties `json:\"properties\"`\n}\n\ntype NodeProperties struct {\n\tCloudNode      NodePropertiesCloudNode `json:\"cloud_node\"`\n\tTotalCPUCores  int                     `json:\"total_cpu_cores,omitempty\"`\n\tTotalScratchMB int64                   `json:\"total_scratch_mb,omitempty\"`\n\tTotalRAMMB     int64                   `json:\"total_ram_mb,omitempty\"`\n}\n\ntype NodePropertiesCloudNode struct {\n\tSize  string  `json:\"size,omitempty\"`\n\tPrice float64 `json:\"price\"`\n}\n\nfunc (c Node) resourceName() string {\n\treturn \"node\"\n}\n\n// NodeList is an arvados#nodeList resource.\ntype NodeList struct {\n\tItems          []Node `json:\"items\"`\n\tItemsAvailable int    `json:\"items_available\"`\n\tOffset         int    `json:\"offset\"`\n\tLimit          int    `json:\"limit\"`\n}\n"
  },
  {
    "path": "sdk/go/arvados/postgresql.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"strings\"\n\nfunc (c PostgreSQLConnection) String() string {\n\ts := \"\"\n\tfor k, v := range c {\n\t\tif v == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ts += strings.ToLower(k)\n\t\ts += \"='\"\n\t\ts += strings.Replace(\n\t\t\tstrings.Replace(v, `\\`, `\\\\`, -1),\n\t\t\t`'`, `\\'`, -1)\n\t\ts += \"' \"\n\t}\n\treturn s\n}\n"
  },
  {
    "path": "sdk/go/arvados/resource_list.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n)\n\n// ResourceListParams expresses which results are requested in a\n// list/index API.\ntype ResourceListParams struct {\n\tSelect             []string `json:\"select,omitempty\"`\n\tFilters            []Filter `json:\"filters,omitempty\"`\n\tIncludeTrash       bool     `json:\"include_trash,omitempty\"`\n\tIncludeOldVersions bool     `json:\"include_old_versions,omitempty\"`\n\tLimit              *int     `json:\"limit,omitempty\"`\n\tOffset             int      `json:\"offset,omitempty\"`\n\tOrder              string   `json:\"order,omitempty\"`\n\tDistinct           bool     `json:\"distinct,omitempty\"`\n\tCount              string   `json:\"count,omitempty\"`\n}\n\n// A Filter restricts the set of records returned by a list/index API.\ntype Filter struct {\n\tAttr     string\n\tOperator string\n\tOperand  interface{}\n}\n\n// MarshalJSON encodes a Filter to a JSON array.\nfunc (f *Filter) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal([]interface{}{f.Attr, f.Operator, f.Operand})\n}\n\n// UnmarshalJSON decodes a JSON array to a Filter.\nfunc (f *Filter) UnmarshalJSON(data []byte) error {\n\tvar decoded interface{}\n\terr := json.Unmarshal(data, &decoded)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch decoded := decoded.(type) {\n\tcase string:\n\t\t// Accept \"(foo < bar)\" as a more obvious way to spell\n\t\t// [\"(foo < bar)\",\"=\",true]\n\t\t*f = Filter{decoded, \"=\", true}\n\tcase []interface{}:\n\t\tif len(decoded) != 3 {\n\t\t\treturn fmt.Errorf(\"invalid filter %q: must have 3 decoded\", data)\n\t\t}\n\t\tattr, ok := decoded[0].(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"invalid filter attr %q\", decoded[0])\n\t\t}\n\t\top, ok := decoded[1].(string)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"invalid filter operator %q\", decoded[1])\n\t\t}\n\t\toperand := decoded[2]\n\t\tswitch operand.(type) {\n\t\tcase string, float64, []interface{}, nil, bool:\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid filter operand %q\", decoded[2])\n\t\t}\n\t\t*f = Filter{attr, op, operand}\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid filter: json decoded as %T instead of array or string\", decoded)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "sdk/go/arvados/resource_list_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"encoding/json\"\n\t\"time\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&filterEncodingSuite{})\n\ntype filterEncodingSuite struct{}\n\nfunc (s *filterEncodingSuite) TestMarshalNanoseconds(c *check.C) {\n\tt0 := time.Now()\n\tt0str := t0.Format(time.RFC3339Nano)\n\tbuf, err := json.Marshal([]Filter{\n\t\t{Attr: \"modified_at\", Operator: \"=\", Operand: t0}})\n\tc.Assert(err, check.IsNil)\n\tc.Check(string(buf), check.Equals, `[[\"modified_at\",\"=\",\"`+t0str+`\"]]`)\n}\n\nfunc (s *filterEncodingSuite) TestMarshalNil(c *check.C) {\n\tbuf, err := json.Marshal([]Filter{\n\t\t{Attr: \"modified_at\", Operator: \"=\", Operand: nil}})\n\tc.Assert(err, check.IsNil)\n\tc.Check(string(buf), check.Equals, `[[\"modified_at\",\"=\",null]]`)\n}\n\nfunc (s *filterEncodingSuite) TestUnmarshalNil(c *check.C) {\n\tbuf := []byte(`[\"modified_at\",\"=\",null]`)\n\tvar f Filter\n\terr := f.UnmarshalJSON(buf)\n\tc.Assert(err, check.IsNil)\n\tc.Check(f, check.DeepEquals, Filter{Attr: \"modified_at\", Operator: \"=\", Operand: nil})\n}\n\nfunc (s *filterEncodingSuite) TestMarshalBoolean(c *check.C) {\n\tbuf, err := json.Marshal([]Filter{\n\t\t{Attr: \"is_active\", Operator: \"=\", Operand: true}})\n\tc.Assert(err, check.IsNil)\n\tc.Check(string(buf), check.Equals, `[[\"is_active\",\"=\",true]]`)\n}\n\nfunc (s *filterEncodingSuite) TestUnmarshalBoolean(c *check.C) {\n\tbuf := []byte(`[\"is_active\",\"=\",true]`)\n\tvar f Filter\n\terr := f.UnmarshalJSON(buf)\n\tc.Assert(err, check.IsNil)\n\tc.Check(f, check.DeepEquals, Filter{Attr: \"is_active\", Operator: \"=\", Operand: true})\n}\n\nfunc (s *filterEncodingSuite) TestUnmarshalBooleanExpression(c *check.C) {\n\tbuf := []byte(`\"(foo < bar)\"`)\n\tvar f Filter\n\terr := f.UnmarshalJSON(buf)\n\tc.Assert(err, check.IsNil)\n\tc.Check(f, check.DeepEquals, Filter{Attr: \"(foo < bar)\", Operator: \"=\", Operand: true})\n}\n"
  },
  {
    "path": "sdk/go/arvados/throttle.go",
    "content": "package arvados\n\ntype throttle struct {\n\tc chan struct{}\n}\n\nfunc newThrottle(n int) *throttle {\n\treturn &throttle{c: make(chan struct{}, n)}\n}\n\nfunc (t *throttle) Acquire() {\n\tt.c <- struct{}{}\n}\n\nfunc (t *throttle) Release() {\n\t<-t.c\n}\n"
  },
  {
    "path": "sdk/go/arvados/tls_certs.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"os\"\n\n// Load root CAs from /etc/arvados/ca-certificates.crt if it exists\n// and SSL_CERT_FILE does not already specify a different file.\nfunc init() {\n\tenvvar := \"SSL_CERT_FILE\"\n\tcertfile := \"/etc/arvados/ca-certificates.crt\"\n\tif os.Getenv(envvar) != \"\" {\n\t\t// Caller has already specified SSL_CERT_FILE.\n\t\treturn\n\t}\n\tif _, err := os.ReadFile(certfile); err != nil {\n\t\t// Custom cert file is not present/readable.\n\t\treturn\n\t}\n\tos.Setenv(envvar, certfile)\n}\n"
  },
  {
    "path": "sdk/go/arvados/tls_certs_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"os\"\n\t\"os/exec\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\ntype tlsCertsSuite struct{}\n\nvar _ = check.Suite(&tlsCertsSuite{})\n\nfunc (s *tlsCertsSuite) TestCustomCert(c *check.C) {\n\tcertfile := \"/etc/arvados/ca-certificates.crt\"\n\tif _, err := os.Stat(certfile); err != nil {\n\t\tc.Skip(\"custom cert file \" + certfile + \" does not exist\")\n\t}\n\tout, err := exec.Command(\"bash\", \"-c\", \"SSL_CERT_FILE= go run tls_certs_test_showenv.go\").CombinedOutput()\n\tc.Logf(\"%s\", out)\n\tc.Assert(err, check.IsNil)\n\tc.Check(string(out), check.Equals, certfile+\"\\n\")\n\n\tout, err = exec.Command(\"bash\", \"-c\", \"SSL_CERT_FILE=/dev/null go run tls_certs_test_showenv.go\").CombinedOutput()\n\tc.Logf(\"%s\", out)\n\tc.Assert(err, check.IsNil)\n\tc.Check(string(out), check.Equals, \"/dev/null\\n\")\n}\n"
  },
  {
    "path": "sdk/go/arvados/tls_certs_test_showenv.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n//go:build ignore\n\n// This is a test program invoked by tls_certs_test.go\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\nvar _ = arvados.Client{}\n\nfunc main() {\n\tfmt.Println(os.Getenv(\"SSL_CERT_FILE\"))\n}\n"
  },
  {
    "path": "sdk/go/arvados/user.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"time\"\n\n// User is an arvados#user record\ntype User struct {\n\tUUID               string                 `json:\"uuid\"`\n\tEtag               string                 `json:\"etag\"`\n\tIsActive           bool                   `json:\"is_active\"`\n\tIsAdmin            bool                   `json:\"is_admin\"`\n\tUsername           string                 `json:\"username\"`\n\tEmail              string                 `json:\"email\"`\n\tFullName           string                 `json:\"full_name\"`\n\tFirstName          string                 `json:\"first_name\"`\n\tLastName           string                 `json:\"last_name\"`\n\tIdentityURL        string                 `json:\"identity_url\"`\n\tIsInvited          bool                   `json:\"is_invited\"`\n\tOwnerUUID          string                 `json:\"owner_uuid\"`\n\tCreatedAt          time.Time              `json:\"created_at\"`\n\tModifiedAt         time.Time              `json:\"modified_at\"`\n\tModifiedByUserUUID string                 `json:\"modified_by_user_uuid\"`\n\tPrefs              map[string]interface{} `json:\"prefs\"`\n\tWritableBy         []string               `json:\"writable_by,omitempty\"`\n\tCanWrite           bool                   `json:\"can_write\"`\n\tCanManage          bool                   `json:\"can_manage\"`\n}\n\n// UserList is an arvados#userList resource.\ntype UserList struct {\n\tItems          []User `json:\"items\"`\n\tItemsAvailable int    `json:\"items_available\"`\n\tOffset         int    `json:\"offset\"`\n\tLimit          int    `json:\"limit\"`\n}\n\n// CurrentUser calls arvados.v1.users.current, and returns the User\n// record corresponding to this client's credentials.\nfunc (c *Client) CurrentUser() (User, error) {\n\tvar u User\n\terr := c.RequestAndDecode(&u, \"GET\", \"arvados/v1/users/current\", nil, nil)\n\treturn u, err\n}\n"
  },
  {
    "path": "sdk/go/arvados/virtual_machine.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"time\"\n\n// VirtualMachine is an arvados#virtualMachine resource.\ntype VirtualMachine struct {\n\tUUID               string     `json:\"uuid\"`\n\tOwnerUUID          string     `json:\"owner_uuid\"`\n\tHostname           string     `json:\"hostname\"`\n\tCreatedAt          *time.Time `json:\"created_at\"`\n\tModifiedAt         *time.Time `json:\"modified_at\"`\n\tModifiedByUserUUID string     `json:\"modified_by_user_uuid\"`\n}\n\n// VirtualMachineList is an arvados#virtualMachineList resource.\ntype VirtualMachineList struct {\n\tItems          []VirtualMachine `json:\"items\"`\n\tItemsAvailable int              `json:\"items_available\"`\n\tOffset         int              `json:\"offset\"`\n\tLimit          int              `json:\"limit\"`\n}\n"
  },
  {
    "path": "sdk/go/arvados/vocabulary.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Vocabulary struct {\n\treservedTagKeys map[string]bool          `json:\"-\"`\n\tStrictTags      bool                     `json:\"strict_tags\"`\n\tTags            map[string]VocabularyTag `json:\"tags\"`\n}\n\ntype VocabularyTag struct {\n\tStrict bool                          `json:\"strict\"`\n\tLabels []VocabularyLabel             `json:\"labels\"`\n\tValues map[string]VocabularyTagValue `json:\"values\"`\n}\n\n// Cannot have a constant map in Go, so we have to use a function.\n// If you are adding a new system property, it SHOULD start with `arv:`,\n// and Check will allow it. This map is for historical exceptions that\n// predate standardizing on this prefix.\nfunc (v *Vocabulary) systemTagKeys() map[string]bool {\n\treturn map[string]bool{\n\t\t// Collection keys - set by arvados-cwl-runner\n\t\t\"container_request\": true,\n\t\t\"container_uuid\":    true,\n\n\t\t// legacy Collection key, set by arvados-cwl-runner,\n\t\t// was changed to container_uuid in Arvados 2.6.0 but\n\t\t// still gets set if an older version of a-c-r is\n\t\t// used.\n\t\t\"container\": true,\n\n\t\t// Set by several components to indicate the intended\n\t\t// role of a collection\n\t\t\"type\": true,\n\n\t\t// Collection keys - set by arv-keepdocker (on the way out)\n\t\t\"docker-image-repo-tag\": true,\n\n\t\t// Container request keys - set by arvados-cwl-runner\n\t\t\"cwl_input\":  true,\n\t\t\"cwl_output\": true,\n\n\t\t// Container request key set alongside by Workbench 2\n\t\t// to link to the Workflow definition used to launch\n\t\t// the workflow\n\t\t\"template_uuid\": true,\n\t\t\"workflowName\":  true,\n\n\t\t// Group keys\n\t\t\"filters\": true,\n\n\t\t// Link keys\n\t\t\"groups\":          true,\n\t\t\"image_timestamp\": true,\n\t\t\"username\":        true,\n\t}\n}\n\ntype VocabularyLabel struct {\n\tLabel string `json:\"label\"`\n}\n\ntype VocabularyTagValue struct {\n\tLabels []VocabularyLabel `json:\"labels\"`\n}\n\n// NewVocabulary creates a new Vocabulary from a JSON definition and a list\n// of reserved tag keys that will get special treatment when strict mode is\n// enabled.\nfunc NewVocabulary(data []byte, managedTagKeys []string) (voc *Vocabulary, err error) {\n\tif r := bytes.Compare(data, []byte(\"\")); r == 0 {\n\t\treturn &Vocabulary{}, nil\n\t}\n\terr = json.Unmarshal(data, &voc)\n\tif err != nil {\n\t\tvar serr *json.SyntaxError\n\t\tif errors.As(err, &serr) {\n\t\t\toffset := serr.Offset\n\t\t\terrorMsg := string(data[:offset])\n\t\t\tline := 1 + strings.Count(errorMsg, \"\\n\")\n\t\t\tcolumn := offset - int64(strings.LastIndex(errorMsg, \"\\n\")+len(\"\\n\"))\n\t\t\treturn nil, fmt.Errorf(\"invalid JSON format: %q (line %d, column %d)\", err, line, column)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"invalid JSON format: %q\", err)\n\t}\n\tif reflect.DeepEqual(voc, &Vocabulary{}) {\n\t\treturn nil, fmt.Errorf(\"JSON data provided doesn't match Vocabulary format: %q\", data)\n\t}\n\n\tshouldReportErrors := false\n\terrors := []string{}\n\n\t// json.Unmarshal() doesn't error out on duplicate keys.\n\tdupedKeys := []string{}\n\terr = checkJSONDupedKeys(json.NewDecoder(bytes.NewReader(data)), nil, &dupedKeys)\n\tif err != nil {\n\t\tshouldReportErrors = true\n\t\tfor _, dk := range dupedKeys {\n\t\t\terrors = append(errors, fmt.Sprintf(\"duplicate JSON key %q\", dk))\n\t\t}\n\t}\n\tvoc.reservedTagKeys = make(map[string]bool)\n\tfor _, managedKey := range managedTagKeys {\n\t\tvoc.reservedTagKeys[managedKey] = true\n\t}\n\tfor systemKey := range voc.systemTagKeys() {\n\t\tvoc.reservedTagKeys[systemKey] = true\n\t}\n\tvalidationErrs, err := voc.validate()\n\tif err != nil {\n\t\tshouldReportErrors = true\n\t\terrors = append(errors, validationErrs...)\n\t}\n\tif shouldReportErrors {\n\t\treturn nil, fmt.Errorf(\"%s\", strings.Join(errors, \"\\n\"))\n\t}\n\treturn voc, nil\n}\n\nfunc checkJSONDupedKeys(d *json.Decoder, path []string, errors *[]string) error {\n\tt, err := d.Token()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdelim, ok := t.(json.Delim)\n\tif !ok {\n\t\treturn nil\n\t}\n\tswitch delim {\n\tcase '{':\n\t\tkeys := make(map[string]bool)\n\t\tfor d.More() {\n\t\t\tt, err := d.Token()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tkey := t.(string)\n\n\t\t\tif keys[key] {\n\t\t\t\t*errors = append(*errors, strings.Join(append(path, key), \".\"))\n\t\t\t}\n\t\t\tkeys[key] = true\n\n\t\t\tif err := checkJSONDupedKeys(d, append(path, key), errors); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t// consume closing '}'\n\t\tif _, err := d.Token(); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase '[':\n\t\ti := 0\n\t\tfor d.More() {\n\t\t\tif err := checkJSONDupedKeys(d, append(path, strconv.Itoa(i)), errors); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\t// consume closing ']'\n\t\tif _, err := d.Token(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif len(path) == 0 && len(*errors) > 0 {\n\t\treturn fmt.Errorf(\"duplicate JSON key(s) found\")\n\t}\n\treturn nil\n}\n\nfunc (v *Vocabulary) validate() ([]string, error) {\n\tif v == nil {\n\t\treturn nil, nil\n\t}\n\ttagKeys := map[string]string{}\n\t// Checks for Vocabulary strictness\n\tif v.StrictTags && len(v.Tags) == 0 {\n\t\treturn nil, fmt.Errorf(\"vocabulary is strict but no tags are defined\")\n\t}\n\t// Checks for collisions between tag keys, reserved tag keys\n\t// and tag key labels.\n\terrors := []string{}\n\tfor key := range v.Tags {\n\t\tif v.reservedTagKeys[key] {\n\t\t\terrors = append(errors, fmt.Sprintf(\"tag key %q is reserved\", key))\n\t\t}\n\t\tlcKey := strings.ToLower(key)\n\t\tif tagKeys[lcKey] != \"\" {\n\t\t\terrors = append(errors, fmt.Sprintf(\"duplicate tag key %q\", key))\n\t\t}\n\t\ttagKeys[lcKey] = key\n\t\tfor _, lbl := range v.Tags[key].Labels {\n\t\t\tlabel := strings.ToLower(lbl.Label)\n\t\t\tif tagKeys[label] != \"\" {\n\t\t\t\terrors = append(errors, fmt.Sprintf(\"tag label %q for key %q already seen as a tag key or label\", lbl.Label, key))\n\t\t\t}\n\t\t\ttagKeys[label] = lbl.Label\n\t\t}\n\t\t// Checks for value strictness\n\t\tif v.Tags[key].Strict && len(v.Tags[key].Values) == 0 {\n\t\t\terrors = append(errors, fmt.Sprintf(\"tag key %q is configured as strict but doesn't provide values\", key))\n\t\t}\n\t\t// Checks for collisions between tag values and tag value labels.\n\t\ttagValues := map[string]string{}\n\t\tfor val := range v.Tags[key].Values {\n\t\t\tlcVal := strings.ToLower(val)\n\t\t\tif tagValues[lcVal] != \"\" {\n\t\t\t\terrors = append(errors, fmt.Sprintf(\"duplicate tag value %q for tag %q\", val, key))\n\t\t\t}\n\t\t\t// Checks for collisions between labels from different values.\n\t\t\ttagValues[lcVal] = val\n\t\t\tfor _, tagLbl := range v.Tags[key].Values[val].Labels {\n\t\t\t\tlabel := strings.ToLower(tagLbl.Label)\n\t\t\t\tif tagValues[label] != \"\" && tagValues[label] != val {\n\t\t\t\t\terrors = append(errors, fmt.Sprintf(\"tag value label %q for pair (%q:%q) already seen on value %q\", tagLbl.Label, key, val, tagValues[label]))\n\t\t\t\t}\n\t\t\t\ttagValues[label] = val\n\t\t\t}\n\t\t}\n\t}\n\tif len(errors) > 0 {\n\t\treturn errors, fmt.Errorf(\"invalid vocabulary\")\n\t}\n\treturn nil, nil\n}\n\nfunc (v *Vocabulary) getLabelsToKeys() (labels map[string]string) {\n\tif v == nil {\n\t\treturn\n\t}\n\tlabels = make(map[string]string)\n\tfor key, val := range v.Tags {\n\t\tfor _, lbl := range val.Labels {\n\t\t\tlabel := strings.ToLower(lbl.Label)\n\t\t\tlabels[label] = key\n\t\t}\n\t}\n\treturn labels\n}\n\nfunc (v *Vocabulary) getLabelsToValues(key string) (labels map[string]string) {\n\tif v == nil {\n\t\treturn\n\t}\n\tlabels = make(map[string]string)\n\tif _, ok := v.Tags[key]; ok {\n\t\tfor val := range v.Tags[key].Values {\n\t\t\tlabels[strings.ToLower(val)] = val\n\t\t\tfor _, tagLbl := range v.Tags[key].Values[val].Labels {\n\t\t\t\tlabel := strings.ToLower(tagLbl.Label)\n\t\t\t\tlabels[label] = val\n\t\t\t}\n\t\t}\n\t}\n\treturn labels\n}\n\nfunc (v *Vocabulary) checkValue(key, val string) error {\n\tif _, ok := v.Tags[key].Values[val]; !ok {\n\t\tlcVal := strings.ToLower(val)\n\t\tcorrectValue, ok := v.getLabelsToValues(key)[lcVal]\n\t\tif ok {\n\t\t\treturn fmt.Errorf(\"tag value %q for key %q is an alias, must be provided as %q\", val, key, correctValue)\n\t\t} else if v.Tags[key].Strict {\n\t\t\treturn fmt.Errorf(\"tag value %q is not valid for key %q\", val, key)\n\t\t}\n\t}\n\treturn nil\n}\n\n// Check validates the given data against the vocabulary.\nfunc (v *Vocabulary) Check(data map[string]interface{}) error {\n\tif v == nil {\n\t\treturn nil\n\t}\n\tfor key, val := range data {\n\t\t// Checks for key validity\n\t\tif strings.HasPrefix(key, \"arv:\") || v.reservedTagKeys[key] {\n\t\t\t// Allow reserved keys to be used even if they are not defined in\n\t\t\t// the vocabulary no matter its strictness.\n\t\t\tcontinue\n\t\t}\n\t\tif _, ok := v.Tags[key]; !ok {\n\t\t\tlcKey := strings.ToLower(key)\n\t\t\tcorrectKey, ok := v.getLabelsToKeys()[lcKey]\n\t\t\tif ok {\n\t\t\t\treturn fmt.Errorf(\"tag key %q is an alias, must be provided as %q\", key, correctKey)\n\t\t\t} else if v.StrictTags {\n\t\t\t\treturn fmt.Errorf(\"tag key %q is not defined in the vocabulary\", key)\n\t\t\t}\n\t\t\t// If the key is not defined, we don't need to check the value\n\t\t\tcontinue\n\t\t}\n\t\t// Checks for value validity -- key is defined\n\t\tswitch val := val.(type) {\n\t\tcase string:\n\t\t\terr := v.checkValue(key, val)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase []interface{}:\n\t\t\tfor _, singleVal := range val {\n\t\t\t\tswitch singleVal := singleVal.(type) {\n\t\t\t\tcase string:\n\t\t\t\t\terr := v.checkValue(key, singleVal)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn fmt.Errorf(\"value list element type for tag key %q was %T, but expected a string\", key, singleVal)\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"value type for tag key %q was %T, but expected a string or list of strings\", key, val)\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "sdk/go/arvados/vocabulary_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport (\n\t\"encoding/json\"\n\t\"regexp\"\n\t\"strings\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\ntype VocabularySuite struct {\n\ttestVoc *Vocabulary\n}\n\nvar _ = check.Suite(&VocabularySuite{})\n\nfunc (s *VocabularySuite) SetUpTest(c *check.C) {\n\ts.testVoc = &Vocabulary{\n\t\treservedTagKeys: map[string]bool{\n\t\t\t\"reservedKey\": true,\n\t\t},\n\t\tStrictTags: false,\n\t\tTags: map[string]VocabularyTag{\n\t\t\t\"IDTAGANIMALS\": {\n\t\t\t\tStrict: false,\n\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Creature\"}},\n\t\t\t\tValues: map[string]VocabularyTagValue{\n\t\t\t\t\t\"IDVALANIMAL1\": {\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Human\"}, {Label: \"Homo sapiens\"}},\n\t\t\t\t\t},\n\t\t\t\t\t\"IDVALANIMAL2\": {\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Elephant\"}, {Label: \"Loxodonta\"}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"IDTAGIMPORTANCE\": {\n\t\t\t\tStrict: true,\n\t\t\t\tLabels: []VocabularyLabel{{Label: \"Importance\"}, {Label: \"Priority\"}},\n\t\t\t\tValues: map[string]VocabularyTagValue{\n\t\t\t\t\t\"IDVAL3\": {\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Low\"}, {Label: \"Low priority\"}},\n\t\t\t\t\t},\n\t\t\t\t\t\"IDVAL2\": {\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Medium\"}, {Label: \"Medium priority\"}},\n\t\t\t\t\t},\n\t\t\t\t\t\"IDVAL1\": {\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"High\"}, {Label: \"High priority\"}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"IDTAGCOMMENT\": {\n\t\t\t\tStrict: false,\n\t\t\t\tLabels: []VocabularyLabel{{Label: \"Comment\"}},\n\t\t\t},\n\t\t},\n\t}\n\t_, err := s.testVoc.validate()\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (s *VocabularySuite) TestCheck(c *check.C) {\n\ttests := []struct {\n\t\tname          string\n\t\tstrictVoc     bool\n\t\tprops         string\n\t\texpectSuccess bool\n\t\terrMatches    string\n\t}{\n\t\t// Check succeeds\n\t\t{\n\t\t\t\"Known key, known value\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGANIMALS\":\"IDVALANIMAL1\"}`,\n\t\t\ttrue,\n\t\t\t\"\",\n\t\t},\n\t\t{\n\t\t\t\"Unknown non-alias key on non-strict vocabulary\",\n\t\t\tfalse,\n\t\t\t`{\"foo\":\"bar\"}`,\n\t\t\ttrue,\n\t\t\t\"\",\n\t\t},\n\t\t{\n\t\t\t\"Known non-strict key, unknown non-alias value\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGANIMALS\":\"IDVALANIMAL3\"}`,\n\t\t\ttrue,\n\t\t\t\"\",\n\t\t},\n\t\t{\n\t\t\t\"Undefined but reserved key on strict vocabulary\",\n\t\t\ttrue,\n\t\t\t`{\"reservedKey\":\"bar\"}`,\n\t\t\ttrue,\n\t\t\t\"\",\n\t\t},\n\t\t{\n\t\t\t\"Known key, list of known values\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGANIMALS\":[\"IDVALANIMAL1\",\"IDVALANIMAL2\"]}`,\n\t\t\ttrue,\n\t\t\t\"\",\n\t\t},\n\t\t{\n\t\t\t\"Known non-strict key, list of unknown non-alias values\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGCOMMENT\":[\"hello world\",\"lorem ipsum\"]}`,\n\t\t\ttrue,\n\t\t\t\"\",\n\t\t},\n\t\t// Check fails\n\t\t{\n\t\t\t\"Known first key & value; known 2nd key, unknown 2nd value\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGANIMALS\":\"IDVALANIMAL1\", \"IDTAGIMPORTANCE\": \"blah blah\"}`,\n\t\t\tfalse,\n\t\t\t\"tag value.*is not valid for key.*\",\n\t\t},\n\t\t{\n\t\t\t\"Unknown non-alias key on strict vocabulary\",\n\t\t\ttrue,\n\t\t\t`{\"foo\":\"bar\"}`,\n\t\t\tfalse,\n\t\t\t\"tag key.*is not defined in the vocabulary\",\n\t\t},\n\t\t{\n\t\t\t\"Known non-strict key, known value alias\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGANIMALS\":\"Loxodonta\"}`,\n\t\t\tfalse,\n\t\t\t\"tag value.*for key.* is an alias, must be provided as.*\",\n\t\t},\n\t\t{\n\t\t\t\"Known strict key, unknown non-alias value\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGIMPORTANCE\":\"Unimportant\"}`,\n\t\t\tfalse,\n\t\t\t\"tag value.*is not valid for key.*\",\n\t\t},\n\t\t{\n\t\t\t\"Known strict key, lowercase value regarded as alias\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGIMPORTANCE\":\"idval1\"}`,\n\t\t\tfalse,\n\t\t\t\"tag value.*for key.* is an alias, must be provided as.*\",\n\t\t},\n\t\t{\n\t\t\t\"Known strict key, known value alias\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGIMPORTANCE\":\"High\"}`,\n\t\t\tfalse,\n\t\t\t\"tag value.* for key.*is an alias, must be provided as.*\",\n\t\t},\n\t\t{\n\t\t\t\"Known strict key, list of known alias values\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGIMPORTANCE\":[\"High\", \"Low\"]}`,\n\t\t\tfalse,\n\t\t\t\"tag value.*for key.*is an alias, must be provided as.*\",\n\t\t},\n\t\t{\n\t\t\t\"Known strict key, list of unknown non-alias values\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGIMPORTANCE\":[\"foo\",\"bar\"]}`,\n\t\t\tfalse,\n\t\t\t\"tag value.*is not valid for key.*\",\n\t\t},\n\t\t{\n\t\t\t\"Invalid value type\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGANIMALS\":1}`,\n\t\t\tfalse,\n\t\t\t\"value type for tag key.* was.*, but expected a string or list of strings\",\n\t\t},\n\t\t{\n\t\t\t\"Value list of invalid type\",\n\t\t\tfalse,\n\t\t\t`{\"IDTAGANIMALS\":[1]}`,\n\t\t\tfalse,\n\t\t\t\"value list element type for tag key.* was.*, but expected a string\",\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName()+\" \", tt.name)\n\t\ts.testVoc.StrictTags = tt.strictVoc\n\n\t\tvar data map[string]interface{}\n\t\terr := json.Unmarshal([]byte(tt.props), &data)\n\t\tc.Assert(err, check.IsNil)\n\t\terr = s.testVoc.Check(data)\n\t\tif tt.expectSuccess {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t} else {\n\t\t\tc.Assert(err, check.NotNil)\n\t\t\tc.Assert(err.Error(), check.Matches, tt.errMatches)\n\t\t}\n\t}\n}\n\nfunc (s *VocabularySuite) TestNewVocabulary(c *check.C) {\n\ttests := []struct {\n\t\tname       string\n\t\tdata       string\n\t\tisValid    bool\n\t\terrMatches string\n\t\texpect     *Vocabulary\n\t}{\n\t\t{\"Empty data\", \"\", true, \"\", &Vocabulary{}},\n\t\t{\"Invalid JSON\", \"foo\", false, \"invalid JSON format.*\", nil},\n\t\t{\"Valid, empty JSON\", \"{}\", false, \".*doesn't match Vocabulary format.*\", nil},\n\t\t{\"Valid JSON, wrong data\", `{\"foo\":\"bar\"}`, false, \".*doesn't match Vocabulary format.*\", nil},\n\t\t{\n\t\t\t\"Simple valid example\",\n\t\t\t`{\"tags\":{\n\t\t\t\t\"IDTAGANIMALS\":{\n\t\t\t\t\t\"strict\": false,\n\t\t\t\t\t\"labels\": [{\"label\": \"Animal\"}, {\"label\": \"Creature\"}],\n\t\t\t\t\t\"values\": {\n\t\t\t\t\t\t\"IDVALANIMAL1\":{\"labels\":[{\"label\":\"Human\"}, {\"label\":\"Homo sapiens\"}]},\n\t\t\t\t\t\t\"IDVALANIMAL2\":{\"labels\":[{\"label\":\"Elephant\"}, {\"label\":\"Loxodonta\"}]},\n\t\t\t\t\t\t\"DOG\":{\"labels\":[{\"label\":\"Dog\"}, {\"label\":\"Canis lupus familiaris\"}, {\"label\":\"dOg\"}]}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}}`,\n\t\t\ttrue, \"\",\n\t\t\t&Vocabulary{\n\t\t\t\treservedTagKeys: map[string]bool{\n\t\t\t\t\t\"container_request\":     true,\n\t\t\t\t\t\"container_uuid\":        true,\n\t\t\t\t\t\"container\":             true,\n\t\t\t\t\t\"cwl_input\":             true,\n\t\t\t\t\t\"cwl_output\":            true,\n\t\t\t\t\t\"docker-image-repo-tag\": true,\n\t\t\t\t\t\"filters\":               true,\n\t\t\t\t\t\"groups\":                true,\n\t\t\t\t\t\"image_timestamp\":       true,\n\t\t\t\t\t\"template_uuid\":         true,\n\t\t\t\t\t\"type\":                  true,\n\t\t\t\t\t\"username\":              true,\n\t\t\t\t\t\"workflowName\":          true,\n\t\t\t\t},\n\t\t\t\tStrictTags: false,\n\t\t\t\tTags: map[string]VocabularyTag{\n\t\t\t\t\t\"IDTAGANIMALS\": {\n\t\t\t\t\t\tStrict: false,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Creature\"}},\n\t\t\t\t\t\tValues: map[string]VocabularyTagValue{\n\t\t\t\t\t\t\t\"IDVALANIMAL1\": {\n\t\t\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Human\"}, {Label: \"Homo sapiens\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"IDVALANIMAL2\": {\n\t\t\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Elephant\"}, {Label: \"Loxodonta\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"DOG\": {\n\t\t\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Dog\"}, {Label: \"Canis lupus familiaris\"}, {Label: \"dOg\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"Invalid JSON error with line & column numbers\",\n\t\t\t`{\"tags\":{\n\t\t\t\t\"aKey\":{\n\t\t\t\t\t\"labels\": [,{\"label\": \"A label\"}]\n\t\t\t\t}\n\t\t\t}}`,\n\t\t\tfalse, `invalid JSON format:.*\\(line \\d+, column \\d+\\)`, nil,\n\t\t},\n\t\t{\n\t\t\t\"Invalid JSON with duplicate & reserved keys\",\n\t\t\t`{\"tags\":{\n\t\t\t\t\"type\":{\n\t\t\t\t\t\"strict\": false,\n\t\t\t\t\t\"labels\": [{\"label\": \"Class\", \"label\": \"Type\"}]\n\t\t\t\t},\n\t\t\t\t\"type\":{\n\t\t\t\t\t\"labels\": []\n\t\t\t\t}\n\t\t\t}}`,\n\t\t\tfalse, \"(?s).*duplicate JSON key \\\"tags.type.labels.0.label\\\"\\nduplicate JSON key \\\"tags.type\\\"\\ntag key \\\"type\\\" is reserved\", nil,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName()+\" \", tt.name)\n\t\tvoc, err := NewVocabulary([]byte(tt.data), []string{})\n\t\tif tt.isValid {\n\t\t\tc.Assert(err, check.IsNil)\n\t\t} else {\n\t\t\tc.Assert(err, check.NotNil)\n\t\t\tif tt.errMatches != \"\" {\n\t\t\t\tc.Assert(err, check.ErrorMatches, tt.errMatches)\n\t\t\t}\n\t\t}\n\t\tc.Assert(voc, check.DeepEquals, tt.expect)\n\t}\n}\n\nfunc (s *VocabularySuite) TestValidSystemProperties(c *check.C) {\n\ts.testVoc.StrictTags = true\n\tproperties := map[string]interface{}{\n\t\t\"arv:gitBranch\": \"main\",\n\t\t\"arv:OK\":        true,\n\t\t\"arv:cost\":      123,\n\t}\n\tc.Check(s.testVoc.Check(properties), check.IsNil)\n}\n\nfunc (s *VocabularySuite) TestSystemPropertiesPrefixTypo(c *check.C) {\n\ts.testVoc.StrictTags = true\n\tfor _, key := range []string{\n\t\t// Extra characters in prefix\n\t\t\"arv :foo\",\n\t\t\" arv:foo\",\n\t\t// Wrong punctuation\n\t\t\"arv.foo\",\n\t\t\"arv-foo\",\n\t\t\"arv_foo\",\n\t\t// Wrong case\n\t\t\"Arv:foo\",\n\t\t// Wrong word\n\t\t\"arvados\",\n\t\t\"arvados:foo\",\n\t} {\n\t\tproperties := map[string]interface{}{key: \"value\"}\n\t\tc.Check(s.testVoc.Check(properties), check.NotNil)\n\t}\n}\n\nfunc (s *VocabularySuite) TestValidationErrors(c *check.C) {\n\ttests := []struct {\n\t\tname       string\n\t\tvoc        *Vocabulary\n\t\terrMatches []string\n\t}{\n\t\t{\n\t\t\t\"Strict vocabulary, no keys\",\n\t\t\t&Vocabulary{\n\t\t\t\tStrictTags: true,\n\t\t\t},\n\t\t\t[]string{\"vocabulary is strict but no tags are defined\"},\n\t\t},\n\t\t{\n\t\t\t\"Collision between tag key and tag key label\",\n\t\t\t&Vocabulary{\n\t\t\t\tStrictTags: false,\n\t\t\t\tTags: map[string]VocabularyTag{\n\t\t\t\t\t\"IDTAGANIMALS\": {\n\t\t\t\t\t\tStrict: false,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Creature\"}},\n\t\t\t\t\t},\n\t\t\t\t\t\"IDTAGCOMMENT\": {\n\t\t\t\t\t\tStrict: false,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Comment\"}, {Label: \"IDTAGANIMALS\"}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnil, // Depending on how the map is sorted, this could be one of two errors\n\t\t},\n\t\t{\n\t\t\t\"Collision between tag key and tag key label (case-insensitive)\",\n\t\t\t&Vocabulary{\n\t\t\t\tStrictTags: false,\n\t\t\t\tTags: map[string]VocabularyTag{\n\t\t\t\t\t\"IDTAGANIMALS\": {\n\t\t\t\t\t\tStrict: false,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Creature\"}},\n\t\t\t\t\t},\n\t\t\t\t\t\"IDTAGCOMMENT\": {\n\t\t\t\t\t\tStrict: false,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Comment\"}, {Label: \"IdTagAnimals\"}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnil, // Depending on how the map is sorted, this could be one of two errors\n\t\t},\n\t\t{\n\t\t\t\"Collision between tag key labels\",\n\t\t\t&Vocabulary{\n\t\t\t\tStrictTags: false,\n\t\t\t\tTags: map[string]VocabularyTag{\n\t\t\t\t\t\"IDTAGANIMALS\": {\n\t\t\t\t\t\tStrict: false,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Creature\"}},\n\t\t\t\t\t},\n\t\t\t\t\t\"IDTAGCOMMENT\": {\n\t\t\t\t\t\tStrict: false,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Comment\"}, {Label: \"Animal\"}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]string{\"(?s).*tag label.*for key.*already seen.*\"},\n\t\t},\n\t\t{\n\t\t\t\"Collision between tag value and tag value label\",\n\t\t\t&Vocabulary{\n\t\t\t\tStrictTags: false,\n\t\t\t\tTags: map[string]VocabularyTag{\n\t\t\t\t\t\"IDTAGANIMALS\": {\n\t\t\t\t\t\tStrict: false,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Creature\"}},\n\t\t\t\t\t\tValues: map[string]VocabularyTagValue{\n\t\t\t\t\t\t\t\"IDVALANIMAL1\": {\n\t\t\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Human\"}, {Label: \"Mammal\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"IDVALANIMAL2\": {\n\t\t\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Elephant\"}, {Label: \"IDVALANIMAL1\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnil, // Depending on how the map is sorted, this could be one of two errors\n\t\t},\n\t\t{\n\t\t\t\"Collision between tag value and tag value label (case-insensitive)\",\n\t\t\t&Vocabulary{\n\t\t\t\tStrictTags: false,\n\t\t\t\tTags: map[string]VocabularyTag{\n\t\t\t\t\t\"IDTAGANIMALS\": {\n\t\t\t\t\t\tStrict: false,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Creature\"}},\n\t\t\t\t\t\tValues: map[string]VocabularyTagValue{\n\t\t\t\t\t\t\t\"IDVALANIMAL1\": {\n\t\t\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Human\"}, {Label: \"Mammal\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"IDVALANIMAL2\": {\n\t\t\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Elephant\"}, {Label: \"IDValAnimal1\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tnil, // Depending on how the map is sorted, this could be one of two errors\n\t\t},\n\t\t{\n\t\t\t\"Collision between tag value labels\",\n\t\t\t&Vocabulary{\n\t\t\t\tStrictTags: false,\n\t\t\t\tTags: map[string]VocabularyTag{\n\t\t\t\t\t\"IDTAGANIMALS\": {\n\t\t\t\t\t\tStrict: false,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Creature\"}},\n\t\t\t\t\t\tValues: map[string]VocabularyTagValue{\n\t\t\t\t\t\t\t\"IDVALANIMAL1\": {\n\t\t\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Human\"}, {Label: \"Mammal\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"IDVALANIMAL2\": {\n\t\t\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Elephant\"}, {Label: \"Mammal\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]string{\"(?s).*tag value label.*for pair.*already seen.*on value.*\"},\n\t\t},\n\t\t{\n\t\t\t\"Collision between tag value labels (case-insensitive)\",\n\t\t\t&Vocabulary{\n\t\t\t\tStrictTags: false,\n\t\t\t\tTags: map[string]VocabularyTag{\n\t\t\t\t\t\"IDTAGANIMALS\": {\n\t\t\t\t\t\tStrict: false,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Creature\"}},\n\t\t\t\t\t\tValues: map[string]VocabularyTagValue{\n\t\t\t\t\t\t\t\"IDVALANIMAL1\": {\n\t\t\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Human\"}, {Label: \"Mammal\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\"IDVALANIMAL2\": {\n\t\t\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Elephant\"}, {Label: \"mAMMAL\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]string{\"(?s).*tag value label.*for pair.*already seen.*on value.*\"},\n\t\t},\n\t\t{\n\t\t\t\"Strict tag key, with no values\",\n\t\t\t&Vocabulary{\n\t\t\t\tStrictTags: false,\n\t\t\t\tTags: map[string]VocabularyTag{\n\t\t\t\t\t\"IDTAGANIMALS\": {\n\t\t\t\t\t\tStrict: true,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Creature\"}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]string{\"(?s).*tag key.*is configured as strict but doesn't provide values\"},\n\t\t},\n\t\t{\n\t\t\t\"Multiple errors reported\",\n\t\t\t&Vocabulary{\n\t\t\t\tStrictTags: false,\n\t\t\t\tTags: map[string]VocabularyTag{\n\t\t\t\t\t\"IDTAGANIMALS\": {\n\t\t\t\t\t\tStrict: true,\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Creature\"}},\n\t\t\t\t\t},\n\t\t\t\t\t\"IDTAGSIZES\": {\n\t\t\t\t\t\tLabels: []VocabularyLabel{{Label: \"Animal\"}, {Label: \"Size\"}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]string{\n\t\t\t\t\"(?s).*tag key.*is configured as strict but doesn't provide values.*\",\n\t\t\t\t\"(?s).*tag label.*for key.*already seen.*\",\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tc.Log(c.TestName()+\" \", tt.name)\n\t\tvalidationErrs, err := tt.voc.validate()\n\t\tc.Assert(err, check.NotNil)\n\t\tfor _, errMatch := range tt.errMatches {\n\t\t\tseen := false\n\t\t\tfor _, validationErr := range validationErrs {\n\t\t\t\tif regexp.MustCompile(errMatch).MatchString(validationErr) {\n\t\t\t\t\tseen = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(validationErrs) == 0 {\n\t\t\t\tc.Assert(err, check.ErrorMatches, errMatch)\n\t\t\t} else {\n\t\t\t\tc.Assert(seen, check.Equals, true,\n\t\t\t\t\tcheck.Commentf(\"Expected to see error matching %q:\\n%s\",\n\t\t\t\t\t\terrMatch, strings.Join(validationErrs, \"\\n\")))\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "sdk/go/arvados/workflow.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvados\n\nimport \"time\"\n\n// Workflow is an arvados#workflow resource.\ntype Workflow struct {\n\tUUID        string     `json:\"uuid\"`\n\tOwnerUUID   string     `json:\"owner_uuid\"`\n\tName        string     `json:\"name\"`\n\tDescription string     `json:\"description\"`\n\tDefinition  string     `json:\"definition\"`\n\tCreatedAt   *time.Time `json:\"created_at\"`\n\tModifiedAt  *time.Time `json:\"modified_at\"`\n}\n\n// WorkflowList is an arvados#workflowList resource.\ntype WorkflowList struct {\n\tItems          []Workflow `json:\"items\"`\n\tItemsAvailable int        `json:\"items_available\"`\n\tOffset         int        `json:\"offset\"`\n\tLimit          int        `json:\"limit\"`\n}\n"
  },
  {
    "path": "sdk/go/arvadosclient/arvadosclient.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n/* Simple Arvados Go SDK for communicating with API server. */\n\npackage arvadosclient\n\nimport (\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype StringMatcher func(string) bool\n\nvar UUIDMatch StringMatcher = arvados.UUIDMatch\nvar PDHMatch StringMatcher = arvados.PDHMatch\n\nvar MissingArvadosApiHost = errors.New(\"Missing required environment variable ARVADOS_API_HOST\")\nvar MissingArvadosApiToken = errors.New(\"Missing required environment variable ARVADOS_API_TOKEN\")\nvar ErrInvalidArgument = errors.New(\"Invalid argument\")\n\n// A common failure mode is to reuse a keepalive connection that has been\n// terminated (in a way that we can't detect) for being idle too long.\n// POST and DELETE are not safe to retry automatically, so we minimize\n// such failures by always using a new or recently active socket.\nvar MaxIdleConnectionDuration = 30 * time.Second\n\nvar RetryDelay = 2 * time.Second\n\nvar (\n\tdefaultInsecureHTTPClient *http.Client\n\tdefaultSecureHTTPClient   *http.Client\n\tdefaultHTTPClientMtx      sync.Mutex\n)\n\n// APIServerError contains an error that was returned by the API server.\ntype APIServerError struct {\n\t// Address of server returning error, of the form \"host:port\".\n\tServerAddress string\n\n\t// Components of server response.\n\tHttpStatusCode    int\n\tHttpStatusMessage string\n\n\t// Additional error details from response body.\n\tErrorDetails []string\n}\n\nfunc (e APIServerError) Error() string {\n\tif len(e.ErrorDetails) > 0 {\n\t\treturn fmt.Sprintf(\"arvados API server error: %s (%d: %s) returned by %s\",\n\t\t\tstrings.Join(e.ErrorDetails, \"; \"),\n\t\t\te.HttpStatusCode,\n\t\t\te.HttpStatusMessage,\n\t\t\te.ServerAddress)\n\t}\n\treturn fmt.Sprintf(\"arvados API server error: %d: %s returned by %s\",\n\t\te.HttpStatusCode,\n\t\te.HttpStatusMessage,\n\t\te.ServerAddress)\n}\n\n// StringBool tests whether s is suggestive of true. It returns true\n// if s is a mixed/uppoer/lower-case variant of \"1\", \"yes\", or \"true\".\nfunc StringBool(s string) bool {\n\ts = strings.ToLower(s)\n\treturn s == \"1\" || s == \"yes\" || s == \"true\"\n}\n\n// Dict is a helper type so we don't have to write out 'map[string]interface{}' every time.\ntype Dict map[string]interface{}\n\n// ArvadosClient contains information about how to contact the Arvados server\ntype ArvadosClient struct {\n\t// https\n\tScheme string\n\n\t// Arvados API server, form \"host:port\"\n\tApiServer string\n\n\t// Arvados API token for authentication\n\tApiToken string\n\n\t// Whether to require a valid SSL certificate or not\n\tApiInsecure bool\n\n\t// Client object shared by client requests.  Supports HTTP KeepAlive.\n\tClient *http.Client\n\n\t// Base URIs of Keep services, e.g., {\"https://host1:8443\",\n\t// \"https://host2:8443\"}.  If this is nil, Keep clients will\n\t// use the arvados.v1.keep_services.accessible API to discover\n\t// available services.\n\tKeepServiceURIs []string\n\n\t// Maximum disk cache size in bytes or percent of total\n\t// filesystem size. If zero, use default, currently 10% of\n\t// filesystem size.\n\tDiskCacheSize arvados.ByteSizeOrPercent\n\n\t// Where to write debug logs. May be nil.\n\tLogger logrus.FieldLogger\n\n\t// Discovery document\n\tDiscoveryDoc Dict\n\n\tlastClosedIdlesAt time.Time\n\n\t// Number of retries\n\tRetries int\n\n\t// X-Request-Id for outgoing requests\n\tRequestID string\n\n\t// Cluster config from the arvados.Client passed to New(), if\n\t// any. If non-nil, its keep services configuration is used\n\t// instead of requesting a server list from controller. Note\n\t// this is disabled by default in test suites via\n\t// ARVADOS_FORCE_KEEP_SERVICES_TABLE environment variable.\n\tCluster *arvados.Cluster\n}\n\n// MakeTLSConfig sets up TLS configuration for communicating with\n// Arvados and Keep services.\nfunc MakeTLSConfig(insecure bool) *tls.Config {\n\treturn &tls.Config{InsecureSkipVerify: insecure}\n}\n\n// New returns an ArvadosClient using the given arvados.Client\n// configuration. This is useful for callers who load arvados.Client\n// fields from configuration files but still need to use the\n// arvadosclient.ArvadosClient package.\nfunc New(c *arvados.Client) (*ArvadosClient, error) {\n\thc := c.Client\n\tif hc == nil {\n\t\thc = &http.Client{\n\t\t\tTimeout: 5 * time.Minute,\n\t\t\tTransport: &http.Transport{\n\t\t\t\tTLSClientConfig: MakeTLSConfig(c.Insecure)},\n\t\t}\n\t}\n\tac := &ArvadosClient{\n\t\tScheme:            \"https\",\n\t\tApiServer:         c.APIHost,\n\t\tApiToken:          c.AuthToken,\n\t\tApiInsecure:       c.Insecure,\n\t\tClient:            hc,\n\t\tRetries:           2,\n\t\tKeepServiceURIs:   c.KeepServiceURIs,\n\t\tDiskCacheSize:     c.DiskCacheSize,\n\t\tLogger:            c.Logger,\n\t\tlastClosedIdlesAt: time.Now(),\n\t\tCluster:           c.Cluster,\n\t}\n\n\treturn ac, nil\n}\n\n// MakeArvadosClient creates a new ArvadosClient using the standard\n// environment variables ARVADOS_API_HOST, ARVADOS_API_TOKEN,\n// ARVADOS_API_HOST_INSECURE, and ARVADOS_KEEP_SERVICES.\nfunc MakeArvadosClient() (*ArvadosClient, error) {\n\treturn New(arvados.NewClientFromEnv())\n}\n\n// CallRaw is the same as Call() but returns a Reader that reads the\n// response body, instead of taking an output object.\nfunc (c *ArvadosClient) CallRaw(method string, resourceType string, uuid string, action string, parameters Dict) (reader io.ReadCloser, err error) {\n\tscheme := c.Scheme\n\tif scheme == \"\" {\n\t\tscheme = \"https\"\n\t}\n\tif c.ApiServer == \"\" {\n\t\treturn nil, fmt.Errorf(\"Arvados client is not configured (target API host is not set). Maybe env var ARVADOS_API_HOST should be set first?\")\n\t}\n\tu := url.URL{\n\t\tScheme: scheme,\n\t\tHost:   c.ApiServer}\n\n\tif resourceType != ApiDiscoveryResource {\n\t\tu.Path = \"/arvados/v1\"\n\t}\n\n\tif resourceType != \"\" {\n\t\tu.Path = u.Path + \"/\" + resourceType\n\t}\n\tif uuid != \"\" {\n\t\tu.Path = u.Path + \"/\" + uuid\n\t}\n\tif action != \"\" {\n\t\tu.Path = u.Path + \"/\" + action\n\t}\n\n\tif parameters == nil {\n\t\tparameters = make(Dict)\n\t}\n\n\tvals := make(url.Values)\n\tfor k, v := range parameters {\n\t\tif s, ok := v.(string); ok {\n\t\t\tvals.Set(k, s)\n\t\t} else if m, err := json.Marshal(v); err == nil {\n\t\t\tvals.Set(k, string(m))\n\t\t}\n\t}\n\tvar req *http.Request\n\tif method == \"GET\" || method == \"HEAD\" {\n\t\tu.RawQuery = vals.Encode()\n\t\tif req, err = http.NewRequest(method, u.String(), nil); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tif req, err = http.NewRequest(method, u.String(), bytes.NewBufferString(vals.Encode())); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treq.Header.Add(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\t}\n\tif c.RequestID != \"\" {\n\t\treq.Header.Add(\"X-Request-Id\", c.RequestID)\n\t}\n\tclient := arvados.Client{\n\t\tClient:    c.Client,\n\t\tAPIHost:   c.ApiServer,\n\t\tAuthToken: c.ApiToken,\n\t\tInsecure:  c.ApiInsecure,\n\t\tTimeout:   30 * RetryDelay * time.Duration(c.Retries),\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent {\n\t\tdefer resp.Body.Close()\n\t\treturn nil, newAPIServerError(c.ApiServer, resp)\n\t}\n\treturn resp.Body, nil\n}\n\nfunc newAPIServerError(ServerAddress string, resp *http.Response) APIServerError {\n\n\tase := APIServerError{\n\t\tServerAddress:     ServerAddress,\n\t\tHttpStatusCode:    resp.StatusCode,\n\t\tHttpStatusMessage: resp.Status}\n\n\t// If the response body has {\"errors\":[\"reason1\",\"reason2\"]}\n\t// then return those reasons.\n\tvar errInfo = Dict{}\n\tif err := json.NewDecoder(resp.Body).Decode(&errInfo); err == nil {\n\t\tif errorList, ok := errInfo[\"errors\"]; ok {\n\t\t\tif errArray, ok := errorList.([]interface{}); ok {\n\t\t\t\tfor _, errItem := range errArray {\n\t\t\t\t\t// We expect an array of strings here.\n\t\t\t\t\t// Non-strings will be passed along\n\t\t\t\t\t// JSON-encoded.\n\t\t\t\t\tif s, ok := errItem.(string); ok {\n\t\t\t\t\t\tase.ErrorDetails = append(ase.ErrorDetails, s)\n\t\t\t\t\t} else if j, err := json.Marshal(errItem); err == nil {\n\t\t\t\t\t\tase.ErrorDetails = append(ase.ErrorDetails, string(j))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn ase\n}\n\n// Call an API endpoint and parse the JSON response into an object.\n//\n//\tmethod - HTTP method: GET, HEAD, PUT, POST, PATCH or DELETE.\n//\tresourceType - the type of arvados resource to act on (e.g., \"collections\", \"pipeline_instances\").\n//\tuuid - the uuid of the specific item to access. May be empty.\n//\taction - API method name (e.g., \"lock\"). This is often empty if implied by method and uuid.\n//\tparameters - method parameters.\n//\toutput - a map or annotated struct which is a legal target for encoding/json/Decoder.\n//\n// Returns a non-nil error if an error occurs making the API call, the\n// API responds with a non-successful HTTP status, or an error occurs\n// parsing the response body.\nfunc (c *ArvadosClient) Call(method, resourceType, uuid, action string, parameters Dict, output interface{}) error {\n\treader, err := c.CallRaw(method, resourceType, uuid, action, parameters)\n\tif reader != nil {\n\t\tdefer reader.Close()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif output != nil {\n\t\tdec := json.NewDecoder(reader)\n\t\tif err = dec.Decode(output); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// Create a new resource. See Call for argument descriptions.\nfunc (c *ArvadosClient) Create(resourceType string, parameters Dict, output interface{}) error {\n\treturn c.Call(\"POST\", resourceType, \"\", \"\", parameters, output)\n}\n\n// Delete a resource. See Call for argument descriptions.\nfunc (c *ArvadosClient) Delete(resource string, uuid string, parameters Dict, output interface{}) (err error) {\n\treturn c.Call(\"DELETE\", resource, uuid, \"\", parameters, output)\n}\n\n// Update attributes of a resource. See Call for argument descriptions.\nfunc (c *ArvadosClient) Update(resourceType string, uuid string, parameters Dict, output interface{}) (err error) {\n\treturn c.Call(\"PUT\", resourceType, uuid, \"\", parameters, output)\n}\n\n// Get a resource. See Call for argument descriptions.\nfunc (c *ArvadosClient) Get(resourceType string, uuid string, parameters Dict, output interface{}) (err error) {\n\tif !UUIDMatch(uuid) && !(resourceType == \"collections\" && PDHMatch(uuid)) {\n\t\t// No object has uuid == \"\": there is no need to make\n\t\t// an API call. Furthermore, the HTTP request for such\n\t\t// an API call would be \"GET /arvados/v1/type/\", which\n\t\t// is liable to be misinterpreted as the List API.\n\t\treturn ErrInvalidArgument\n\t}\n\treturn c.Call(\"GET\", resourceType, uuid, \"\", parameters, output)\n}\n\n// List resources of a given type. See Call for argument descriptions.\nfunc (c *ArvadosClient) List(resource string, parameters Dict, output interface{}) (err error) {\n\treturn c.Call(\"GET\", resource, \"\", \"\", parameters, output)\n}\n\nconst ApiDiscoveryResource = \"discovery/v1/apis/arvados/v1/rest\"\n\n// Discovery returns the value of the given parameter in the discovery\n// document. Returns a non-nil error if the discovery document cannot\n// be retrieved/decoded. Returns ErrInvalidArgument if the requested\n// parameter is not found in the discovery document.\nfunc (c *ArvadosClient) Discovery(parameter string) (value interface{}, err error) {\n\tif len(c.DiscoveryDoc) == 0 {\n\t\tc.DiscoveryDoc = make(Dict)\n\t\terr = c.Call(\"GET\", ApiDiscoveryResource, \"\", \"\", nil, &c.DiscoveryDoc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tvar found bool\n\tvalue, found = c.DiscoveryDoc[parameter]\n\tif found {\n\t\treturn value, nil\n\t}\n\treturn value, ErrInvalidArgument\n}\n\n// ClusterConfig returns the value of the given key in the current cluster's\n// exported config. If key is an empty string, it'll return the entire config.\nfunc (c *ArvadosClient) ClusterConfig(key string) (config interface{}, err error) {\n\tvar clusterConfig interface{}\n\terr = c.Call(\"GET\", \"config\", \"\", \"\", nil, &clusterConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif key == \"\" {\n\t\treturn clusterConfig, nil\n\t}\n\tconfigData, ok := clusterConfig.(map[string]interface{})[key]\n\tif !ok {\n\t\treturn nil, ErrInvalidArgument\n\t}\n\treturn configData, nil\n}\n\nfunc (c *ArvadosClient) httpClient() *http.Client {\n\tif c.Client != nil {\n\t\treturn c.Client\n\t}\n\tcl := &defaultSecureHTTPClient\n\tif c.ApiInsecure {\n\t\tcl = &defaultInsecureHTTPClient\n\t}\n\tif *cl == nil {\n\t\tdefaultHTTPClientMtx.Lock()\n\t\tdefer defaultHTTPClientMtx.Unlock()\n\t\t*cl = &http.Client{Transport: &http.Transport{\n\t\t\tTLSClientConfig: MakeTLSConfig(c.ApiInsecure)}}\n\t}\n\treturn *cl\n}\n"
  },
  {
    "path": "sdk/go/arvadosclient/arvadosclient_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadosclient\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t. \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\nvar _ = Suite(&ServerRequiredSuite{})\nvar _ = Suite(&UnitSuite{})\nvar _ = Suite(&MockArvadosServerSuite{})\n\n// Tests that require the Keep server running\ntype ServerRequiredSuite struct{}\n\nfunc (s *ServerRequiredSuite) SetUpSuite(c *C) {\n\tarvadostest.StartKeep(2, false)\n\tRetryDelay = 2 * time.Second\n}\n\nfunc (s *ServerRequiredSuite) TearDownSuite(c *C) {\n\tarvadostest.StopKeep(2)\n}\n\nfunc (s *ServerRequiredSuite) SetUpTest(c *C) {\n\tarvadostest.ResetEnv()\n}\n\nfunc (s *ServerRequiredSuite) TestMakeArvadosClientSecure(c *C) {\n\tdefer os.Setenv(\"ARVADOS_API_HOST_INSECURE\", os.Getenv(\"ARVADOS_API_HOST_INSECURE\"))\n\tos.Setenv(\"ARVADOS_API_HOST_INSECURE\", \"\")\n\tac, err := MakeArvadosClient()\n\tc.Assert(err, Equals, nil)\n\tc.Check(ac.ApiServer, Equals, os.Getenv(\"ARVADOS_API_HOST\"))\n\tc.Check(ac.ApiToken, Equals, os.Getenv(\"ARVADOS_API_TOKEN\"))\n\tc.Check(ac.ApiInsecure, Equals, false)\n}\n\nfunc (s *ServerRequiredSuite) TestMakeArvadosClientInsecure(c *C) {\n\tdefer os.Setenv(\"ARVADOS_API_HOST_INSECURE\", os.Getenv(\"ARVADOS_API_HOST_INSECURE\"))\n\tos.Setenv(\"ARVADOS_API_HOST_INSECURE\", \"true\")\n\tac, err := MakeArvadosClient()\n\tc.Assert(err, Equals, nil)\n\tc.Check(ac.ApiInsecure, Equals, true)\n\tc.Check(ac.ApiServer, Equals, os.Getenv(\"ARVADOS_API_HOST\"))\n\tc.Check(ac.ApiToken, Equals, os.Getenv(\"ARVADOS_API_TOKEN\"))\n\tc.Check(ac.Client.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify, Equals, true)\n}\n\nfunc (s *ServerRequiredSuite) TestGetInvalidUUID(c *C) {\n\tarv, err := MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\n\tgetback := make(Dict)\n\terr = arv.Get(\"collections\", \"\", nil, &getback)\n\tc.Assert(err, Equals, ErrInvalidArgument)\n\tc.Assert(len(getback), Equals, 0)\n\n\terr = arv.Get(\"collections\", \"zebra-moose-unicorn\", nil, &getback)\n\tc.Assert(err, Equals, ErrInvalidArgument)\n\tc.Assert(len(getback), Equals, 0)\n\n\terr = arv.Get(\"collections\", \"acbd18db4cc2f85cedef654fccc4a4d8\", nil, &getback)\n\tc.Assert(err, Equals, ErrInvalidArgument)\n\tc.Assert(len(getback), Equals, 0)\n}\n\nfunc (s *ServerRequiredSuite) TestGetValidUUID(c *C) {\n\tarv, err := MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\n\tgetback := make(Dict)\n\terr = arv.Get(\"collections\", \"zzzzz-4zz18-abcdeabcdeabcde\", nil, &getback)\n\tc.Assert(err, FitsTypeOf, APIServerError{})\n\tc.Assert(err.(APIServerError).HttpStatusCode, Equals, http.StatusNotFound)\n\tc.Assert(len(getback), Equals, 0)\n\n\terr = arv.Get(\"collections\", \"acbd18db4cc2f85cedef654fccc4a4d8+3\", nil, &getback)\n\tc.Assert(err, FitsTypeOf, APIServerError{})\n\tc.Assert(err.(APIServerError).HttpStatusCode, Equals, http.StatusNotFound)\n\tc.Assert(len(getback), Equals, 0)\n}\n\nfunc (s *ServerRequiredSuite) TestInvalidResourceType(c *C) {\n\tarv, err := MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\n\tgetback := make(Dict)\n\terr = arv.Get(\"unicorns\", \"zzzzz-zebra-unicorn7unicorn\", nil, &getback)\n\tc.Assert(err, FitsTypeOf, APIServerError{})\n\tc.Assert(err.(APIServerError).HttpStatusCode, Equals, http.StatusNotFound)\n\tc.Assert(len(getback), Equals, 0)\n\n\terr = arv.Update(\"unicorns\", \"zzzzz-zebra-unicorn7unicorn\", nil, &getback)\n\tc.Assert(err, FitsTypeOf, APIServerError{})\n\tc.Assert(err.(APIServerError).HttpStatusCode, Equals, http.StatusNotFound)\n\tc.Assert(len(getback), Equals, 0)\n\n\terr = arv.List(\"unicorns\", nil, &getback)\n\tc.Assert(err, FitsTypeOf, APIServerError{})\n\tc.Assert(err.(APIServerError).HttpStatusCode, Equals, http.StatusNotFound)\n\tc.Assert(len(getback), Equals, 0)\n}\n\nfunc (s *ServerRequiredSuite) TestErrorResponse(c *C) {\n\tarv, _ := MakeArvadosClient()\n\n\tgetback := make(Dict)\n\n\t{\n\t\terr := arv.Create(\"logs\",\n\t\t\tDict{\"log\": Dict{\"bogus_attr\": \"foo\"}},\n\t\t\t&getback)\n\t\tc.Assert(err, ErrorMatches, \"arvados API server error: .*\")\n\t\tc.Assert(err, ErrorMatches, \".*unknown attribute(: | ')bogus_attr.*\")\n\t\tc.Assert(err, FitsTypeOf, APIServerError{})\n\t\tc.Assert(err.(APIServerError).HttpStatusCode, Equals, 422)\n\t}\n\n\t{\n\t\terr := arv.Create(\"bogus\",\n\t\t\tDict{\"bogus\": Dict{}},\n\t\t\t&getback)\n\t\tc.Assert(err, ErrorMatches, \"arvados API server error: .*\")\n\t\tc.Assert(err, ErrorMatches, \".*Path not found.*\")\n\t\tc.Assert(err, FitsTypeOf, APIServerError{})\n\t\tc.Assert(err.(APIServerError).HttpStatusCode, Equals, 404)\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestAPIDiscovery_Get_defaultCollectionReplication(c *C) {\n\tarv, err := MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\tvalue, err := arv.Discovery(\"defaultCollectionReplication\")\n\tc.Assert(err, IsNil)\n\tc.Assert(value, NotNil)\n}\n\nfunc (s *ServerRequiredSuite) TestAPIDiscovery_Get_noSuchParameter(c *C) {\n\tarv, err := MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\tvalue, err := arv.Discovery(\"noSuchParameter\")\n\tc.Assert(err, NotNil)\n\tc.Assert(value, IsNil)\n}\n\nfunc (s *ServerRequiredSuite) TestAPIClusterConfig_Get_StorageClasses(c *C) {\n\tarv, err := MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\tdata, err := arv.ClusterConfig(\"StorageClasses\")\n\tc.Assert(err, IsNil)\n\tc.Assert(data, NotNil)\n\tclusterConfig := data.(map[string]interface{})\n\t_, ok := clusterConfig[\"default\"]\n\tc.Assert(ok, Equals, true)\n}\n\nfunc (s *ServerRequiredSuite) TestAPIClusterConfig_Get_All(c *C) {\n\tarv, err := MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\tdata, err := arv.ClusterConfig(\"\")\n\tc.Assert(err, IsNil)\n\tc.Assert(data, NotNil)\n\tclusterConfig := data.(map[string]interface{})\n\t_, ok := clusterConfig[\"StorageClasses\"]\n\tc.Assert(ok, Equals, true)\n}\n\nfunc (s *ServerRequiredSuite) TestAPIClusterConfig_Get_noSuchSection(c *C) {\n\tarv, err := MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\tdata, err := arv.ClusterConfig(\"noSuchSection\")\n\tc.Assert(err, NotNil)\n\tc.Assert(data, IsNil)\n}\n\nfunc (s *ServerRequiredSuite) TestCreateLarge(c *C) {\n\tarv, err := MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\n\ttxt := arvados.SignLocator(\"d41d8cd98f00b204e9800998ecf8427e+0\", arv.ApiToken, time.Now().Add(time.Minute), time.Minute, []byte(arvadostest.SystemRootToken))\n\t// Ensure our request body is bigger than the Go http server's\n\t// default max size, 10 MB.\n\tfor len(txt) < 12000000 {\n\t\ttxt = txt + \" \" + txt\n\t}\n\ttxt = \". \" + txt + \" 0:0:foo\\n\"\n\n\tresp := Dict{}\n\terr = arv.Create(\"collections\", Dict{\n\t\t\"ensure_unique_name\": true,\n\t\t\"collection\": Dict{\n\t\t\t\"is_trashed\":    true,\n\t\t\t\"name\":          \"test\",\n\t\t\t\"manifest_text\": txt,\n\t\t},\n\t}, &resp)\n\tc.Check(err, IsNil)\n\tc.Check(resp[\"portable_data_hash\"], Not(Equals), \"\")\n\tc.Check(resp[\"portable_data_hash\"], Not(Equals), \"d41d8cd98f00b204e9800998ecf8427e+0\")\n}\n\ntype UnitSuite struct{}\n\nfunc (s *UnitSuite) TestUUIDMatch(c *C) {\n\tc.Assert(UUIDMatch(\"zzzzz-tpzed-000000000000000\"), Equals, true)\n\tc.Assert(UUIDMatch(\"zzzzz-zebra-000000000000000\"), Equals, true)\n\tc.Assert(UUIDMatch(\"00000-00000-zzzzzzzzzzzzzzz\"), Equals, true)\n\tc.Assert(UUIDMatch(\"ZEBRA-HORSE-AFRICANELEPHANT\"), Equals, false)\n\tc.Assert(UUIDMatch(\" zzzzz-tpzed-000000000000000\"), Equals, false)\n\tc.Assert(UUIDMatch(\"d41d8cd98f00b204e9800998ecf8427e\"), Equals, false)\n\tc.Assert(UUIDMatch(\"d41d8cd98f00b204e9800998ecf8427e+0\"), Equals, false)\n\tc.Assert(UUIDMatch(\"\"), Equals, false)\n}\n\nfunc (s *UnitSuite) TestPDHMatch(c *C) {\n\tc.Assert(PDHMatch(\"zzzzz-tpzed-000000000000000\"), Equals, false)\n\tc.Assert(PDHMatch(\"d41d8cd98f00b204e9800998ecf8427e\"), Equals, false)\n\tc.Assert(PDHMatch(\"d41d8cd98f00b204e9800998ecf8427e+0\"), Equals, true)\n\tc.Assert(PDHMatch(\"d41d8cd98f00b204e9800998ecf8427e+12345\"), Equals, true)\n\tc.Assert(PDHMatch(\"d41d8cd98f00b204e9800998ecf8427e 12345\"), Equals, false)\n\tc.Assert(PDHMatch(\"D41D8CD98F00B204E9800998ECF8427E+12345\"), Equals, false)\n\tc.Assert(PDHMatch(\"d41d8cd98f00b204e9800998ecf8427e+12345 \"), Equals, false)\n\tc.Assert(PDHMatch(\"d41d8cd98f00b204e9800998ecf8427e+abcdef\"), Equals, false)\n\tc.Assert(PDHMatch(\"da39a3ee5e6b4b0d3255bfef95601890afd80709\"), Equals, false)\n\tc.Assert(PDHMatch(\"da39a3ee5e6b4b0d3255bfef95601890afd80709+0\"), Equals, false)\n\tc.Assert(PDHMatch(\"d41d8cd98f00b204e9800998ecf8427+12345\"), Equals, false)\n\tc.Assert(PDHMatch(\"d41d8cd98f00b204e9800998ecf8427e+12345\\n\"), Equals, false)\n\tc.Assert(PDHMatch(\"+12345\"), Equals, false)\n\tc.Assert(PDHMatch(\"\"), Equals, false)\n}\n\n// Tests that use mock arvados server\ntype MockArvadosServerSuite struct{}\n\nfunc (s *MockArvadosServerSuite) SetUpSuite(c *C) {\n\tRetryDelay = 100 * time.Millisecond\n}\n\nfunc (s *MockArvadosServerSuite) SetUpTest(c *C) {\n\tarvadostest.ResetEnv()\n}\n\ntype APIServer struct {\n\tlistener net.Listener\n\turl      string\n}\n\nfunc RunFakeArvadosServer(st http.Handler) (api APIServer, err error) {\n\tapi.listener, err = net.ListenTCP(\"tcp\", &net.TCPAddr{Port: 0})\n\tif err != nil {\n\t\treturn\n\t}\n\tapi.url = api.listener.Addr().String()\n\tgo http.Serve(api.listener, st)\n\treturn\n}\n\ntype APIStub struct {\n\tmethod        string\n\tretryAttempts int\n\texpected      int\n\trespStatus    []int\n\tresponseBody  []string\n}\n\nfunc (h *APIStub) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tif status := h.respStatus[h.retryAttempts]; status < 0 {\n\t\t// Fail the client's Do() by hanging up without\n\t\t// sending an HTTP response header.\n\t\tconn, _, err := http.NewResponseController(resp).Hijack()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tconn.Write([]byte(\"zzzzzzzzzz\"))\n\t\tconn.Close()\n\t} else {\n\t\tresp.WriteHeader(status)\n\t\tresp.Write([]byte(h.responseBody[h.retryAttempts]))\n\t}\n\th.retryAttempts++\n}\n\nfunc (s *MockArvadosServerSuite) TestWithRetries(c *C) {\n\tfor _, stub := range []APIStub{\n\t\t{\n\t\t\t\"get\", 0, 200, []int{200, 500}, []string{`{\"ok\":\"ok\"}`, ``},\n\t\t},\n\t\t{\n\t\t\t\"create\", 0, 200, []int{200, 500}, []string{`{\"ok\":\"ok\"}`, ``},\n\t\t},\n\t\t{\n\t\t\t\"get\", 0, 423, []int{500, 500, 423, 200}, []string{``, ``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"create\", 0, 423, []int{500, 500, 423, 200}, []string{``, ``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"update\", 0, 422, []int{500, 500, 422, 200}, []string{``, ``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"delete\", 0, 422, []int{500, 500, 422, 200}, []string{``, ``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"get\", 0, 401, []int{500, 502, 401, 200}, []string{``, ``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"create\", 0, 422, []int{500, 502, 422, 200}, []string{``, ``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"get\", 0, 200, []int{500, 500, 200}, []string{``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"create\", 0, 200, []int{500, 500, 200}, []string{``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"delete\", 0, 200, []int{500, 500, 200}, []string{``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"update\", 0, 200, []int{500, 500, 200}, []string{``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"get\", 0, 401, []int{401, 200}, []string{``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"create\", 0, 401, []int{401, 200}, []string{``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"create\", 0, 403, []int{403, 200}, []string{``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"create\", 0, 422, []int{422, 200}, []string{``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"get\", 0, 404, []int{404, 200}, []string{``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t{\n\t\t\t\"get\", 0, 401, []int{500, 401, 200}, []string{``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\n\t\t// Response code -1 simulates an HTTP/network error\n\t\t// (i.e., Do() returns an error; there is no HTTP\n\t\t// response status code).\n\n\t\t// Succeed on second retry\n\t\t{\n\t\t\t\"get\", 0, 200, []int{-1, -1, 200}, []string{``, ``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t\t// \"POST\" protocol error is safe to retry\n\t\t{\n\t\t\t\"create\", 0, 200, []int{-1, 200}, []string{``, `{\"ok\":\"ok\"}`},\n\t\t},\n\t} {\n\t\tc.Logf(\"stub: %#v\", stub)\n\n\t\tapi, err := RunFakeArvadosServer(&stub)\n\t\tc.Check(err, IsNil)\n\n\t\tdefer api.listener.Close()\n\n\t\tarv := ArvadosClient{\n\t\t\tScheme:      \"http\",\n\t\t\tApiServer:   api.url,\n\t\t\tApiToken:    \"abc123\",\n\t\t\tApiInsecure: true,\n\t\t\tClient:      &http.Client{Transport: &http.Transport{}},\n\t\t\tRetries:     2}\n\n\t\tgetback := make(Dict)\n\t\tswitch stub.method {\n\t\tcase \"get\":\n\t\t\terr = arv.Get(\"collections\", \"zzzzz-4zz18-znfnqtbbv4spc3w\", nil, &getback)\n\t\tcase \"create\":\n\t\t\terr = arv.Create(\"collections\",\n\t\t\t\tDict{\"collection\": Dict{\"name\": \"testing\"}},\n\t\t\t\t&getback)\n\t\tcase \"update\":\n\t\t\terr = arv.Update(\"collections\", \"zzzzz-4zz18-znfnqtbbv4spc3w\",\n\t\t\t\tDict{\"collection\": Dict{\"name\": \"testing\"}},\n\t\t\t\t&getback)\n\t\tcase \"delete\":\n\t\t\terr = arv.Delete(\"pipeline_templates\", \"zzzzz-4zz18-znfnqtbbv4spc3w\", nil, &getback)\n\t\t}\n\n\t\tswitch stub.expected {\n\t\tcase 200:\n\t\t\tc.Check(err, IsNil)\n\t\t\tc.Check(getback[\"ok\"], Equals, \"ok\")\n\t\tcase -1:\n\t\t\tc.Check(err, NotNil)\n\t\t\tc.Check(err, ErrorMatches, `.*stopped after \\d+ redirects`)\n\t\tdefault:\n\t\t\tc.Check(err, NotNil)\n\t\t\tc.Check(err, ErrorMatches, fmt.Sprintf(\"arvados API server error: %d.*\", stub.expected))\n\t\t\tif c.Check(err, FitsTypeOf, APIServerError{}) {\n\t\t\t\tc.Check(err.(APIServerError).HttpStatusCode, Equals, stub.expected)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "sdk/go/arvadosclient/pool.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadosclient\n\nimport (\n\t\"sync\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// A ClientPool is a pool of ArvadosClients. This is useful for\n// applications that make API calls using a dynamic set of tokens,\n// like web services that pass through their own clients'\n// credentials. See sync.Pool for more information about garbage\n// collection.\ntype ClientPool struct {\n\t// Initialize new clients by copying this one.\n\tPrototype *ArvadosClient\n\n\tpool      *sync.Pool\n\tlastErr   error\n\tsetupOnce sync.Once\n}\n\n// MakeClientPool returns a new empty ClientPool, using environment\n// variables to initialize the prototype.\nfunc MakeClientPool() *ClientPool {\n\treturn MakeClientPoolWith(nil)\n}\n\n// MakeClientPoolWith returns a new empty ClientPool with a previously\n// initialized arvados.Client.\nfunc MakeClientPoolWith(client *arvados.Client) *ClientPool {\n\tvar err error\n\tvar proto *ArvadosClient\n\n\tif client == nil {\n\t\tproto, err = MakeArvadosClient()\n\t} else {\n\t\tproto, err = New(client)\n\t}\n\treturn &ClientPool{\n\t\tPrototype: proto,\n\t\tlastErr:   err,\n\t}\n}\n\nfunc (p *ClientPool) setup() {\n\tp.pool = &sync.Pool{New: func() interface{} {\n\t\tif p.lastErr != nil {\n\t\t\treturn nil\n\t\t}\n\t\tc := *p.Prototype\n\t\treturn &c\n\t}}\n}\n\n// Err returns the error that was encountered last time Get returned\n// nil.\nfunc (p *ClientPool) Err() error {\n\treturn p.lastErr\n}\n\n// Get returns an ArvadosClient taken from the pool, or a new one if\n// the pool is empty. If an existing client is returned, its state\n// (including its ApiToken) will be just as it was when it was Put\n// back in the pool.\nfunc (p *ClientPool) Get() *ArvadosClient {\n\tp.setupOnce.Do(p.setup)\n\tc, ok := p.pool.Get().(*ArvadosClient)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn c\n}\n\n// Put puts an ArvadosClient back in the pool.\nfunc (p *ClientPool) Put(c *ArvadosClient) {\n\tp.setupOnce.Do(p.setup)\n\tp.pool.Put(c)\n}\n"
  },
  {
    "path": "sdk/go/arvadostest/api.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadostest\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n)\n\nvar ErrStubUnimplemented = errors.New(\"stub unimplemented\")\n\ntype APIStub struct {\n\t// The error to return from every stubbed API method.\n\tError error\n\tcalls []APIStubCall\n\tmtx   sync.Mutex\n}\n\n// BaseURL implements federation.backend\nfunc (as *APIStub) BaseURL() url.URL {\n\treturn url.URL{Scheme: \"https\", Host: \"apistub.example.com\"}\n}\nfunc (as *APIStub) ConfigGet(ctx context.Context) (json.RawMessage, error) {\n\tas.appendCall(ctx, as.ConfigGet, nil)\n\treturn nil, as.Error\n}\nfunc (as *APIStub) VocabularyGet(ctx context.Context) (arvados.Vocabulary, error) {\n\tas.appendCall(ctx, as.VocabularyGet, nil)\n\treturn arvados.Vocabulary{}, as.Error\n}\nfunc (as *APIStub) DiscoveryDocument(ctx context.Context) (arvados.DiscoveryDocument, error) {\n\tas.appendCall(ctx, as.DiscoveryDocument, nil)\n\treturn arvados.DiscoveryDocument{}, as.Error\n}\nfunc (as *APIStub) Login(ctx context.Context, options arvados.LoginOptions) (arvados.LoginResponse, error) {\n\tas.appendCall(ctx, as.Login, options)\n\treturn arvados.LoginResponse{}, as.Error\n}\nfunc (as *APIStub) Logout(ctx context.Context, options arvados.LogoutOptions) (arvados.LogoutResponse, error) {\n\tas.appendCall(ctx, as.Logout, options)\n\treturn arvados.LogoutResponse{}, as.Error\n}\nfunc (as *APIStub) AuthorizedKeyCreate(ctx context.Context, options arvados.CreateOptions) (arvados.AuthorizedKey, error) {\n\tas.appendCall(ctx, as.AuthorizedKeyCreate, options)\n\treturn arvados.AuthorizedKey{}, as.Error\n}\nfunc (as *APIStub) AuthorizedKeyUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.AuthorizedKey, error) {\n\tas.appendCall(ctx, as.AuthorizedKeyUpdate, options)\n\treturn arvados.AuthorizedKey{}, as.Error\n}\nfunc (as *APIStub) AuthorizedKeyGet(ctx context.Context, options arvados.GetOptions) (arvados.AuthorizedKey, error) {\n\tas.appendCall(ctx, as.AuthorizedKeyGet, options)\n\treturn arvados.AuthorizedKey{}, as.Error\n}\nfunc (as *APIStub) AuthorizedKeyList(ctx context.Context, options arvados.ListOptions) (arvados.AuthorizedKeyList, error) {\n\tas.appendCall(ctx, as.AuthorizedKeyList, options)\n\treturn arvados.AuthorizedKeyList{}, as.Error\n}\nfunc (as *APIStub) AuthorizedKeyDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.AuthorizedKey, error) {\n\tas.appendCall(ctx, as.AuthorizedKeyDelete, options)\n\treturn arvados.AuthorizedKey{}, as.Error\n}\nfunc (as *APIStub) CollectionCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Collection, error) {\n\tas.appendCall(ctx, as.CollectionCreate, options)\n\treturn arvados.Collection{}, as.Error\n}\nfunc (as *APIStub) CollectionUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Collection, error) {\n\tas.appendCall(ctx, as.CollectionUpdate, options)\n\treturn arvados.Collection{}, as.Error\n}\nfunc (as *APIStub) CollectionGet(ctx context.Context, options arvados.GetOptions) (arvados.Collection, error) {\n\tas.appendCall(ctx, as.CollectionGet, options)\n\treturn arvados.Collection{}, as.Error\n}\nfunc (as *APIStub) CollectionList(ctx context.Context, options arvados.ListOptions) (arvados.CollectionList, error) {\n\tas.appendCall(ctx, as.CollectionList, options)\n\treturn arvados.CollectionList{}, as.Error\n}\nfunc (as *APIStub) CollectionProvenance(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {\n\tas.appendCall(ctx, as.CollectionProvenance, options)\n\treturn nil, as.Error\n}\nfunc (as *APIStub) CollectionUsedBy(ctx context.Context, options arvados.GetOptions) (map[string]interface{}, error) {\n\tas.appendCall(ctx, as.CollectionUsedBy, options)\n\treturn nil, as.Error\n}\nfunc (as *APIStub) CollectionDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {\n\tas.appendCall(ctx, as.CollectionDelete, options)\n\treturn arvados.Collection{}, as.Error\n}\nfunc (as *APIStub) CollectionTrash(ctx context.Context, options arvados.DeleteOptions) (arvados.Collection, error) {\n\tas.appendCall(ctx, as.CollectionTrash, options)\n\treturn arvados.Collection{}, as.Error\n}\nfunc (as *APIStub) CollectionUntrash(ctx context.Context, options arvados.UntrashOptions) (arvados.Collection, error) {\n\tas.appendCall(ctx, as.CollectionUntrash, options)\n\treturn arvados.Collection{}, as.Error\n}\nfunc (as *APIStub) ComputedPermissionList(ctx context.Context, options arvados.ListOptions) (arvados.ComputedPermissionList, error) {\n\tas.appendCall(ctx, as.ComputedPermissionList, options)\n\treturn arvados.ComputedPermissionList{}, as.Error\n}\nfunc (as *APIStub) ContainerCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Container, error) {\n\tas.appendCall(ctx, as.ContainerCreate, options)\n\treturn arvados.Container{}, as.Error\n}\nfunc (as *APIStub) ContainerUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {\n\tas.appendCall(ctx, as.ContainerUpdate, options)\n\treturn arvados.Container{}, as.Error\n}\nfunc (as *APIStub) ContainerPriorityUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Container, error) {\n\tas.appendCall(ctx, as.ContainerPriorityUpdate, options)\n\treturn arvados.Container{}, as.Error\n}\nfunc (as *APIStub) ContainerGet(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {\n\tas.appendCall(ctx, as.ContainerGet, options)\n\treturn arvados.Container{}, as.Error\n}\nfunc (as *APIStub) ContainerList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerList, error) {\n\tas.appendCall(ctx, as.ContainerList, options)\n\treturn arvados.ContainerList{}, as.Error\n}\nfunc (as *APIStub) ContainerDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Container, error) {\n\tas.appendCall(ctx, as.ContainerDelete, options)\n\treturn arvados.Container{}, as.Error\n}\nfunc (as *APIStub) ContainerLock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {\n\tas.appendCall(ctx, as.ContainerLock, options)\n\treturn arvados.Container{}, as.Error\n}\nfunc (as *APIStub) ContainerUnlock(ctx context.Context, options arvados.GetOptions) (arvados.Container, error) {\n\tas.appendCall(ctx, as.ContainerUnlock, options)\n\treturn arvados.Container{}, as.Error\n}\nfunc (as *APIStub) ContainerSSH(ctx context.Context, options arvados.ContainerSSHOptions) (arvados.ConnectionResponse, error) {\n\tas.appendCall(ctx, as.ContainerSSH, options)\n\treturn arvados.ConnectionResponse{}, as.Error\n}\nfunc (as *APIStub) ContainerGatewayTunnel(ctx context.Context, options arvados.ContainerGatewayTunnelOptions) (arvados.ConnectionResponse, error) {\n\tas.appendCall(ctx, as.ContainerGatewayTunnel, options)\n\treturn arvados.ConnectionResponse{}, as.Error\n}\nfunc (as *APIStub) ContainerHTTPProxy(ctx context.Context, options arvados.ContainerHTTPProxyOptions) (http.Handler, error) {\n\tas.appendCall(ctx, as.ContainerHTTPProxy, options)\n\treturn http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {}), as.Error\n}\nfunc (as *APIStub) ContainerRequestCreate(ctx context.Context, options arvados.CreateOptions) (arvados.ContainerRequest, error) {\n\tas.appendCall(ctx, as.ContainerRequestCreate, options)\n\treturn arvados.ContainerRequest{}, as.Error\n}\nfunc (as *APIStub) ContainerRequestUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.ContainerRequest, error) {\n\tas.appendCall(ctx, as.ContainerRequestUpdate, options)\n\treturn arvados.ContainerRequest{}, as.Error\n}\nfunc (as *APIStub) ContainerRequestGet(ctx context.Context, options arvados.GetOptions) (arvados.ContainerRequest, error) {\n\tas.appendCall(ctx, as.ContainerRequestGet, options)\n\treturn arvados.ContainerRequest{}, as.Error\n}\nfunc (as *APIStub) ContainerRequestList(ctx context.Context, options arvados.ListOptions) (arvados.ContainerRequestList, error) {\n\tas.appendCall(ctx, as.ContainerRequestList, options)\n\treturn arvados.ContainerRequestList{}, as.Error\n}\nfunc (as *APIStub) ContainerRequestDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.ContainerRequest, error) {\n\tas.appendCall(ctx, as.ContainerRequestDelete, options)\n\treturn arvados.ContainerRequest{}, as.Error\n}\nfunc (as *APIStub) ContainerRequestContainerStatus(ctx context.Context, options arvados.GetOptions) (arvados.ContainerStatus, error) {\n\tas.appendCall(ctx, as.ContainerRequestContainerStatus, options)\n\treturn arvados.ContainerStatus{}, as.Error\n}\nfunc (as *APIStub) ContainerRequestLog(ctx context.Context, options arvados.ContainerLogOptions) (http.Handler, error) {\n\tas.appendCall(ctx, as.ContainerRequestLog, options)\n\t// Return a handler that responds with the configured\n\t// error/success status.\n\treturn http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {\n\t\tif as.Error == nil {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t} else if err := httpserver.HTTPStatusError(nil); errors.As(as.Error, &err) {\n\t\t\tw.WriteHeader(err.HTTPStatus())\n\t\t\tio.WriteString(w, err.Error())\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tio.WriteString(w, err.Error())\n\t\t}\n\t}), nil\n}\nfunc (as *APIStub) GroupCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Group, error) {\n\tas.appendCall(ctx, as.GroupCreate, options)\n\treturn arvados.Group{}, as.Error\n}\nfunc (as *APIStub) GroupUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Group, error) {\n\tas.appendCall(ctx, as.GroupUpdate, options)\n\treturn arvados.Group{}, as.Error\n}\nfunc (as *APIStub) GroupGet(ctx context.Context, options arvados.GetOptions) (arvados.Group, error) {\n\tas.appendCall(ctx, as.GroupGet, options)\n\treturn arvados.Group{}, as.Error\n}\nfunc (as *APIStub) GroupList(ctx context.Context, options arvados.ListOptions) (arvados.GroupList, error) {\n\tas.appendCall(ctx, as.GroupList, options)\n\treturn arvados.GroupList{}, as.Error\n}\nfunc (as *APIStub) GroupContents(ctx context.Context, options arvados.GroupContentsOptions) (arvados.ObjectList, error) {\n\tas.appendCall(ctx, as.GroupContents, options)\n\treturn arvados.ObjectList{}, as.Error\n}\nfunc (as *APIStub) GroupShared(ctx context.Context, options arvados.ListOptions) (arvados.GroupList, error) {\n\tas.appendCall(ctx, as.GroupShared, options)\n\treturn arvados.GroupList{}, as.Error\n}\nfunc (as *APIStub) GroupDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Group, error) {\n\tas.appendCall(ctx, as.GroupDelete, options)\n\treturn arvados.Group{}, as.Error\n}\nfunc (as *APIStub) GroupTrash(ctx context.Context, options arvados.DeleteOptions) (arvados.Group, error) {\n\tas.appendCall(ctx, as.GroupTrash, options)\n\treturn arvados.Group{}, as.Error\n}\nfunc (as *APIStub) GroupUntrash(ctx context.Context, options arvados.UntrashOptions) (arvados.Group, error) {\n\tas.appendCall(ctx, as.GroupUntrash, options)\n\treturn arvados.Group{}, as.Error\n}\nfunc (as *APIStub) LinkCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Link, error) {\n\tas.appendCall(ctx, as.LinkCreate, options)\n\treturn arvados.Link{}, as.Error\n}\nfunc (as *APIStub) LinkUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Link, error) {\n\tas.appendCall(ctx, as.LinkUpdate, options)\n\treturn arvados.Link{}, as.Error\n}\nfunc (as *APIStub) LinkGet(ctx context.Context, options arvados.GetOptions) (arvados.Link, error) {\n\tas.appendCall(ctx, as.LinkGet, options)\n\treturn arvados.Link{}, as.Error\n}\nfunc (as *APIStub) LinkList(ctx context.Context, options arvados.ListOptions) (arvados.LinkList, error) {\n\tas.appendCall(ctx, as.LinkList, options)\n\treturn arvados.LinkList{}, as.Error\n}\nfunc (as *APIStub) LinkDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Link, error) {\n\tas.appendCall(ctx, as.LinkDelete, options)\n\treturn arvados.Link{}, as.Error\n}\nfunc (as *APIStub) LogCreate(ctx context.Context, options arvados.CreateOptions) (arvados.Log, error) {\n\tas.appendCall(ctx, as.LogCreate, options)\n\treturn arvados.Log{}, as.Error\n}\nfunc (as *APIStub) LogUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.Log, error) {\n\tas.appendCall(ctx, as.LogUpdate, options)\n\treturn arvados.Log{}, as.Error\n}\nfunc (as *APIStub) LogGet(ctx context.Context, options arvados.GetOptions) (arvados.Log, error) {\n\tas.appendCall(ctx, as.LogGet, options)\n\treturn arvados.Log{}, as.Error\n}\nfunc (as *APIStub) LogList(ctx context.Context, options arvados.ListOptions) (arvados.LogList, error) {\n\tas.appendCall(ctx, as.LogList, options)\n\treturn arvados.LogList{}, as.Error\n}\nfunc (as *APIStub) LogDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.Log, error) {\n\tas.appendCall(ctx, as.LogDelete, options)\n\treturn arvados.Log{}, as.Error\n}\nfunc (as *APIStub) SysTrashSweep(ctx context.Context, options struct{}) (struct{}, error) {\n\tas.appendCall(ctx, as.SysTrashSweep, options)\n\treturn struct{}{}, as.Error\n}\nfunc (as *APIStub) UserCreate(ctx context.Context, options arvados.CreateOptions) (arvados.User, error) {\n\tas.appendCall(ctx, as.UserCreate, options)\n\treturn arvados.User{}, as.Error\n}\nfunc (as *APIStub) UserUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.User, error) {\n\tas.appendCall(ctx, as.UserUpdate, options)\n\treturn arvados.User{}, as.Error\n}\nfunc (as *APIStub) UserActivate(ctx context.Context, options arvados.UserActivateOptions) (arvados.User, error) {\n\tas.appendCall(ctx, as.UserActivate, options)\n\treturn arvados.User{}, as.Error\n}\nfunc (as *APIStub) UserSetup(ctx context.Context, options arvados.UserSetupOptions) (map[string]interface{}, error) {\n\tas.appendCall(ctx, as.UserSetup, options)\n\treturn nil, as.Error\n}\nfunc (as *APIStub) UserUnsetup(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\tas.appendCall(ctx, as.UserUnsetup, options)\n\treturn arvados.User{}, as.Error\n}\nfunc (as *APIStub) UserGet(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\tas.appendCall(ctx, as.UserGet, options)\n\treturn arvados.User{}, as.Error\n}\nfunc (as *APIStub) UserGetCurrent(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\tas.appendCall(ctx, as.UserGetCurrent, options)\n\treturn arvados.User{}, as.Error\n}\nfunc (as *APIStub) UserGetSystem(ctx context.Context, options arvados.GetOptions) (arvados.User, error) {\n\tas.appendCall(ctx, as.UserGetSystem, options)\n\treturn arvados.User{}, as.Error\n}\nfunc (as *APIStub) UserList(ctx context.Context, options arvados.ListOptions) (arvados.UserList, error) {\n\tas.appendCall(ctx, as.UserList, options)\n\treturn arvados.UserList{}, as.Error\n}\nfunc (as *APIStub) UserDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.User, error) {\n\tas.appendCall(ctx, as.UserDelete, options)\n\treturn arvados.User{}, as.Error\n}\nfunc (as *APIStub) UserMerge(ctx context.Context, options arvados.UserMergeOptions) (arvados.User, error) {\n\tas.appendCall(ctx, as.UserMerge, options)\n\treturn arvados.User{}, as.Error\n}\nfunc (as *APIStub) UserBatchUpdate(ctx context.Context, options arvados.UserBatchUpdateOptions) (arvados.UserList, error) {\n\tas.appendCall(ctx, as.UserBatchUpdate, options)\n\treturn arvados.UserList{}, as.Error\n}\nfunc (as *APIStub) UserAuthenticate(ctx context.Context, options arvados.UserAuthenticateOptions) (arvados.APIClientAuthorization, error) {\n\tas.appendCall(ctx, as.UserAuthenticate, options)\n\treturn arvados.APIClientAuthorization{}, as.Error\n}\nfunc (as *APIStub) APIClientAuthorizationCurrent(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {\n\tas.appendCall(ctx, as.APIClientAuthorizationCurrent, options)\n\treturn arvados.APIClientAuthorization{}, as.Error\n}\nfunc (as *APIStub) APIClientAuthorizationCreate(ctx context.Context, options arvados.CreateOptions) (arvados.APIClientAuthorization, error) {\n\tas.appendCall(ctx, as.APIClientAuthorizationCreate, options)\n\treturn arvados.APIClientAuthorization{}, as.Error\n}\nfunc (as *APIStub) APIClientAuthorizationUpdate(ctx context.Context, options arvados.UpdateOptions) (arvados.APIClientAuthorization, error) {\n\tas.appendCall(ctx, as.APIClientAuthorizationUpdate, options)\n\treturn arvados.APIClientAuthorization{}, as.Error\n}\nfunc (as *APIStub) APIClientAuthorizationDelete(ctx context.Context, options arvados.DeleteOptions) (arvados.APIClientAuthorization, error) {\n\tas.appendCall(ctx, as.APIClientAuthorizationDelete, options)\n\treturn arvados.APIClientAuthorization{}, as.Error\n}\nfunc (as *APIStub) APIClientAuthorizationList(ctx context.Context, options arvados.ListOptions) (arvados.APIClientAuthorizationList, error) {\n\tas.appendCall(ctx, as.APIClientAuthorizationList, options)\n\treturn arvados.APIClientAuthorizationList{}, as.Error\n}\nfunc (as *APIStub) APIClientAuthorizationGet(ctx context.Context, options arvados.GetOptions) (arvados.APIClientAuthorization, error) {\n\tas.appendCall(ctx, as.APIClientAuthorizationGet, options)\n\treturn arvados.APIClientAuthorization{}, as.Error\n}\nfunc (as *APIStub) ReadAt(locator string, dst []byte, offset int) (int, error) {\n\tas.appendCall(context.TODO(), as.ReadAt, struct {\n\t\tlocator string\n\t\tdst     []byte\n\t\toffset  int\n\t}{locator, dst, offset})\n\treturn 0, as.Error\n}\nfunc (as *APIStub) BlockRead(ctx context.Context, options arvados.BlockReadOptions) (int, error) {\n\tas.appendCall(ctx, as.BlockRead, options)\n\treturn 0, as.Error\n}\nfunc (as *APIStub) BlockWrite(ctx context.Context, options arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {\n\tas.appendCall(ctx, as.BlockWrite, options)\n\treturn arvados.BlockWriteResponse{}, as.Error\n}\nfunc (as *APIStub) LocalLocator(locator string) (int, error) {\n\tas.appendCall(context.TODO(), as.LocalLocator, locator)\n\treturn 0, as.Error\n}\n\nfunc (as *APIStub) appendCall(ctx context.Context, method interface{}, options interface{}) {\n\tas.mtx.Lock()\n\tdefer as.mtx.Unlock()\n\tas.calls = append(as.calls, APIStubCall{method, ctx, options})\n}\n\nfunc (as *APIStub) Calls(method interface{}) []APIStubCall {\n\tas.mtx.Lock()\n\tdefer as.mtx.Unlock()\n\tvar calls []APIStubCall\n\tfor _, call := range as.calls {\n\t\tif method == nil || (runtime.FuncForPC(reflect.ValueOf(call.Method).Pointer()).Name() ==\n\t\t\truntime.FuncForPC(reflect.ValueOf(method).Pointer()).Name()) {\n\t\t\tcalls = append(calls, call)\n\t\t}\n\t}\n\treturn calls\n}\n\ntype APIStubCall struct {\n\tMethod  interface{}\n\tContext context.Context\n\tOptions interface{}\n}\n"
  },
  {
    "path": "sdk/go/arvadostest/api_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadostest\n\nimport \"git.arvados.org/arvados.git/sdk/go/arvados\"\n\n// Test that *APIStub implements arvados.API\nvar _ arvados.API = &APIStub{}\n"
  },
  {
    "path": "sdk/go/arvadostest/busybox_image.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage arvadostest\n\nimport (\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"os\"\n\n\t. \"gopkg.in/check.v1\"\n)\n\n// BusyboxDockerImage downloads the busybox:uclibc docker image\n// (busybox_uclibc.tar) from cache.arvados.org into a temporary file\n// and returns the temporary file name.\nfunc BusyboxDockerImage(c *C) string {\n\tfnm := \"busybox_uclibc.tar\"\n\tcachedir := c.MkDir()\n\tcachefile := cachedir + \"/\" + fnm\n\tif _, err := os.Stat(cachefile); err == nil {\n\t\treturn cachefile\n\t}\n\n\tf, err := ioutil.TempFile(cachedir, \"\")\n\tc.Assert(err, IsNil)\n\tdefer f.Close()\n\tdefer os.Remove(f.Name())\n\n\tresp, err := http.Get(\"https://cache.arvados.org/\" + fnm)\n\tc.Assert(err, IsNil)\n\tdefer resp.Body.Close()\n\t_, err = io.Copy(f, resp.Body)\n\tc.Assert(err, IsNil)\n\terr = f.Close()\n\tc.Assert(err, IsNil)\n\terr = os.Rename(f.Name(), cachefile)\n\tc.Assert(err, IsNil)\n\n\treturn cachefile\n}\n"
  },
  {
    "path": "sdk/go/arvadostest/db.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadostest\n\nimport (\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/jmoiron/sqlx\"\n\n\t// sqlx needs lib/pq to talk to PostgreSQL\n\t_ \"github.com/lib/pq\"\n\t\"gopkg.in/check.v1\"\n)\n\n// DB returns a DB connection for the given cluster config.\nfunc DB(c *check.C, cluster *arvados.Cluster) *sqlx.DB {\n\tdb, err := sqlx.Open(\"postgres\", cluster.PostgreSQL.Connection.String())\n\tc.Assert(err, check.IsNil)\n\treturn db\n}\n"
  },
  {
    "path": "sdk/go/arvadostest/fixtures.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadostest\n\n// IDs of API server's test fixtures\nconst (\n\tSpectatorToken          = \"zw2f4gwx8hw8cjre7yp6v1zylhrhn3m5gvjq73rtpwhmknrybu\"\n\tActiveToken             = \"3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi\"\n\tActiveTokenUUID         = \"zzzzz-gj3su-077z32aux8dg2s1\"\n\tActiveTokenV2           = \"v2/zzzzz-gj3su-077z32aux8dg2s1/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi\"\n\tAdminUserUUID           = \"zzzzz-tpzed-d9tiejq69daie8f\"\n\tAdminToken              = \"4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h\"\n\tAdminTokenUUID          = \"zzzzz-gj3su-027z32aux8dg2s1\"\n\tAnonymousToken          = \"4kg6k6lzmp9kj4cpkcoxie964cmvjahbt4fod9zru44k4jqdmi\"\n\tDataManagerToken        = \"320mkve8qkswstz7ff61glpk3mhgghmg67wmic7elw4z41pke1\"\n\tSystemRootToken         = \"systemusertesttoken1234567890aoeuidhtnsqjkxbmwvzpy\"\n\tManagementToken         = \"e687950a23c3a9bceec28c6223a06c79\"\n\tActiveUserUUID          = \"zzzzz-tpzed-xurymjxw79nv3jz\"\n\tFederatedActiveUserUUID = \"zbbbb-tpzed-xurymjxw79nv3jz\"\n\tSpectatorUserUUID       = \"zzzzz-tpzed-l1s2piq4t4mps8r\"\n\tUserAgreementCollection = \"zzzzz-4zz18-uukreo9rbgwsujr\" // user_agreement_in_anonymously_accessible_project\n\tFooCollectionName       = \"zzzzz-4zz18-fy296fx3hot09f7 added sometime\"\n\tFooCollection           = \"zzzzz-4zz18-fy296fx3hot09f7\"\n\tFooCollectionPDH        = \"1f4b0bc7583c2a7f9102c395f4ffc5e3+45\"\n\tNonexistentCollection   = \"zzzzz-4zz18-totallynotexist\"\n\tHelloWorldCollection    = \"zzzzz-4zz18-4en62shvi99lxd4\"\n\tFooBarDirCollection     = \"zzzzz-4zz18-foonbarfilesdir\"\n\tWazVersion1Collection   = \"zzzzz-4zz18-25k12570yk1ver1\"\n\tUserAgreementPDH        = \"b519d9cb706a29fc7ea24dbea2f05851+93\"\n\tHelloWorldPdh           = \"55713e6a34081eb03609e7ad5fcad129+62\"\n\n\tMultilevelCollection1                        = \"zzzzz-4zz18-pyw8yp9g3pr7irn\"\n\tMultilevelCollection1PDH                     = \"f9ddda46bb293b6847da984e3aa735db+290\"\n\tStorageClassesDesiredDefaultConfirmedDefault = \"zzzzz-4zz18-3t236wr12769tga\"\n\tStorageClassesDesiredArchiveConfirmedDefault = \"zzzzz-4zz18-3t236wr12769qqa\"\n\tEmptyCollectionUUID                          = \"zzzzz-4zz18-gs9ooj1h9sd5mde\"\n\n\tAProjectUUID     = \"zzzzz-j7d0g-v955i6s2oi1cbso\"\n\tASubprojectUUID  = \"zzzzz-j7d0g-axqo7eu9pwvna1x\"\n\tAFilterGroupUUID = \"zzzzz-j7d0g-thisfiltergroup\"\n\n\tFooAndBarFilesInDirUUID = \"zzzzz-4zz18-foonbarfilesdir\"\n\tFooAndBarFilesInDirPDH  = \"870369fc72738603c2fad16664e50e2d+58\"\n\n\tQueuedContainerRequestUUID = \"zzzzz-xvhdp-cr4queuedcontnr\"\n\tQueuedContainerUUID        = \"zzzzz-dz642-queuedcontainer\"\n\n\tLockedContainerUUID = \"zzzzz-dz642-lockedcontainer\"\n\n\tRunningContainerUUID = \"zzzzz-dz642-runningcontainr\"\n\n\tCompletedContainerUUID         = \"zzzzz-dz642-compltcontainer\"\n\tCompletedContainerRequestUUID  = \"zzzzz-xvhdp-cr4completedctr\"\n\tCompletedContainerRequestUUID2 = \"zzzzz-xvhdp-cr4completedcr2\"\n\n\tCompletedDiagnosticsContainerRequest1UUID     = \"zzzzz-xvhdp-diagnostics0001\"\n\tCompletedDiagnosticsContainerRequest2UUID     = \"zzzzz-xvhdp-diagnostics0002\"\n\tCompletedDiagnosticsContainer1UUID            = \"zzzzz-dz642-diagcompreq0001\"\n\tCompletedDiagnosticsContainer2UUID            = \"zzzzz-dz642-diagcompreq0002\"\n\tDiagnosticsContainerRequest1LogCollectionUUID = \"zzzzz-4zz18-diagcompreqlog1\"\n\tDiagnosticsContainerRequest2LogCollectionUUID = \"zzzzz-4zz18-diagcompreqlog2\"\n\n\tCompletedDiagnosticsHasher1ContainerRequestUUID = \"zzzzz-xvhdp-diag1hasher0001\"\n\tCompletedDiagnosticsHasher2ContainerRequestUUID = \"zzzzz-xvhdp-diag1hasher0002\"\n\tCompletedDiagnosticsHasher3ContainerRequestUUID = \"zzzzz-xvhdp-diag1hasher0003\"\n\tCompletedDiagnosticsHasher1ContainerUUID        = \"zzzzz-dz642-diagcomphasher1\"\n\tCompletedDiagnosticsHasher2ContainerUUID        = \"zzzzz-dz642-diagcomphasher2\"\n\tCompletedDiagnosticsHasher3ContainerUUID        = \"zzzzz-dz642-diagcomphasher3\"\n\n\tUncommittedContainerRequestUUID = \"zzzzz-xvhdp-cr4uncommittedc\"\n\n\tHasher1LogCollectionUUID = \"zzzzz-4zz18-dlogcollhash001\"\n\tHasher2LogCollectionUUID = \"zzzzz-4zz18-dlogcollhash002\"\n\tHasher3LogCollectionUUID = \"zzzzz-4zz18-dlogcollhash003\"\n\n\tArvadosRepoUUID = \"zzzzz-s0uqq-arvadosrepo0123\"\n\tArvadosRepoName = \"arvados\"\n\tFooRepoUUID     = \"zzzzz-s0uqq-382brsig8rp3666\"\n\tFooRepoName     = \"active/foo\"\n\tRepository2UUID = \"zzzzz-s0uqq-382brsig8rp3667\"\n\tRepository2Name = \"active/foo2\"\n\n\tFooFileCollectionUUID             = \"zzzzz-4zz18-znfnqtbbv4spc3w\"\n\tFooFileCollectionSharingTokenUUID = \"zzzzz-gj3su-gf02tdm4g1z3e3u\"\n\tFooFileCollectionSharingToken     = \"iknqgmunrhgsyfok8uzjlwun9iscwm3xacmzmg65fa1j1lpdss\"\n\tBarFileCollectionUUID             = \"zzzzz-4zz18-ehbhgtheo8909or\"\n\tBarFileCollectionPDH              = \"fa7aeb5140e2848d39b416daeef4ffc5+45\"\n\n\tWorkflowWithDefinitionYAMLUUID = \"zzzzz-7fd4e-validworkfloyml\"\n\n\tCollectionReplicationDesired2Confirmed2UUID = \"zzzzz-4zz18-434zv1tnnf2rygp\"\n\n\tActiveUserCanReadAllUsersLinkUUID = \"zzzzz-o0j2j-ctbysaduejxfrs5\"\n\n\tTrustedWorkbenchAPIClientUUID = \"zzzzz-ozdt8-teyxzyd8qllg11h\"\n\n\tAdminAuthorizedKeysUUID = \"zzzzz-fngyi-12nc9ov4osp8nae\"\n\n\tCrunchstatForRunningContainerLogUUID = \"zzzzz-57u5n-containerlog006\"\n\n\tIdleNodeUUID = \"zzzzz-7ekkf-2z3mc76g2q73aio\"\n\n\tTestVMUUID = \"zzzzz-2x53u-382brsig8rp3064\"\n\n\tCollectionWithUniqueWordsUUID = \"zzzzz-4zz18-mnt690klmb51aud\"\n\n\tLogCollectionUUID  = \"zzzzz-4zz18-logcollection01\"\n\tLogCollectionUUID2 = \"zzzzz-4zz18-logcollection02\"\n\n\tDockerImage112PDH      = \"d740a57097711e08eb9b2a93518f20ab+174\"\n\tDockerImage112Filename = \"sha256:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\"\n)\n\nvar TestCollectionUUIDToPDH = map[string]string{\n\tFooCollection:           FooCollectionPDH,\n\tMultilevelCollection1:   MultilevelCollection1PDH,\n\tFooAndBarFilesInDirUUID: FooAndBarFilesInDirPDH,\n\tBarFileCollectionUUID:   BarFileCollectionPDH,\n}\n\nvar TestCollectionPDHToManifest = map[string]string{\n\tFooCollectionPDH: \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\",\n\tMultilevelCollection1PDH: `. d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\n./dir1 d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\n./dir1/subdir d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\n./dir2 d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\n`,\n\tFooAndBarFilesInDirPDH: \"./dir1 3858f62230ac3c915f300c664312c63f+6 3:3:bar 0:3:foo\\n\",\n\tBarFileCollectionPDH:   \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n}\n\n// PathologicalManifest : A valid manifest designed to test\n// various edge cases and parsing requirements\nconst PathologicalManifest = \". acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 73feffa4b7f6bb68e44cf984c85f6e88+3+Z+K@xyzzy acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:zero@0 0:1:f 1:0:zero@1 1:4:ooba 4:0:zero@4 5:1:r 5:4:rbaz 9:0:zero@9\\n\" +\n\t\"./overlapReverse acbd18db4cc2f85cedef654fccc4a4d8+3 acbd18db4cc2f85cedef654fccc4a4d8+3 5:1:o 4:2:oo 2:4:ofoo\\n\" +\n\t\"./segmented acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 0:1:frob 5:1:frob 1:1:frob 1:2:oof 0:1:oof 5:0:frob 3:1:frob\\n\" +\n\t`./foo\\040b\\141r acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:baz` + \"\\n\" +\n\t`./foo\\040b\\141r acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:b\\141z\\040w\\141z` + \"\\n\" +\n\t\"./foo acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:zero 0:3:foo\\n\" +\n\t\". acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:foo/zero 0:3:foo/foo\\n\"\n\n// An MD5 collision.\nvar (\n\tMD5CollisionData = [][]byte{\n\t\t[]byte(\"\\x0e0eaU\\x9a\\xa7\\x87\\xd0\\x0b\\xc6\\xf7\\x0b\\xbd\\xfe4\\x04\\xcf\\x03e\\x9epO\\x854\\xc0\\x0f\\xfbe\\x9cL\\x87@\\xcc\\x94/\\xeb-\\xa1\\x15\\xa3\\xf4\\x15\\\\\\xbb\\x86\\x07Is\\x86em}\\x1f4\\xa4 Y\\xd7\\x8fZ\\x8d\\xd1\\xef\"),\n\t\t[]byte(\"\\x0e0eaU\\x9a\\xa7\\x87\\xd0\\x0b\\xc6\\xf7\\x0b\\xbd\\xfe4\\x04\\xcf\\x03e\\x9etO\\x854\\xc0\\x0f\\xfbe\\x9cL\\x87@\\xcc\\x94/\\xeb-\\xa1\\x15\\xa3\\xf4\\x15\\xdc\\xbb\\x86\\x07Is\\x86em}\\x1f4\\xa4 Y\\xd7\\x8fZ\\x8d\\xd1\\xef\"),\n\t}\n\tMD5CollisionMD5 = \"cee9a457e790cf20d4bdaa6d69f01e41\"\n)\n\n// BlobSigningKey used by the test servers\nconst BlobSigningKey = \"zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc\"\n"
  },
  {
    "path": "sdk/go/arvadostest/keep_stub.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadostest\n\ntype KeepStub struct{}\n"
  },
  {
    "path": "sdk/go/arvadostest/manifest.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadostest\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math/rand\"\n)\n\nfunc FakeManifest(dirCount, filesPerDir, blocksPerFile, interleaveChunk int) string {\n\tconst blksize = 1 << 26\n\tmb := bytes.NewBuffer(make([]byte, 0, 40000000))\n\tblkid := 0\n\tfor i := 0; i < dirCount; i++ {\n\t\tfmt.Fprintf(mb, \"./dir%d\", i)\n\t\tfor j := 0; j < filesPerDir; j++ {\n\t\t\tfor k := 0; k < blocksPerFile; k++ {\n\t\t\t\tblkid++\n\t\t\t\tfmt.Fprintf(mb, \" %032x+%d+A%040x@%08x\", blkid, blksize, blkid, blkid)\n\t\t\t}\n\t\t}\n\t\tfor j := 0; j < filesPerDir; j++ {\n\t\t\tif interleaveChunk == 0 {\n\t\t\t\tfmt.Fprintf(mb, \" %d:%d:dir%d/file%d\", (filesPerDir-j-1)*blocksPerFile*blksize, blocksPerFile*blksize, j, j)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor todo := int64(blocksPerFile) * int64(blksize); todo > 0; todo -= int64(interleaveChunk) {\n\t\t\t\tsize := int64(interleaveChunk)\n\t\t\t\tif size > todo {\n\t\t\t\t\tsize = todo\n\t\t\t\t}\n\t\t\t\toffset := rand.Int63n(int64(blocksPerFile)*int64(blksize)*int64(filesPerDir) - size)\n\t\t\t\tfmt.Fprintf(mb, \" %d:%d:dir%d/file%d\", offset, size, j, j)\n\t\t\t}\n\t\t}\n\t\tmb.Write([]byte{'\\n'})\n\t}\n\treturn mb.String()\n}\n"
  },
  {
    "path": "sdk/go/arvadostest/metrics.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadostest\n\nimport (\n\t\"bytes\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/common/expfmt\"\n\t\"gopkg.in/check.v1\"\n)\n\nfunc GatherMetricsAsString(reg *prometheus.Registry) string {\n\tbuf := bytes.NewBuffer(nil)\n\tenc := expfmt.NewEncoder(buf, expfmt.NewFormat(expfmt.TypeTextPlain))\n\tgot, _ := reg.Gather()\n\tfor _, mf := range got {\n\t\tenc.Encode(mf)\n\t}\n\treturn buf.String()\n}\n\n// GetMetricValue returns the current value of the indicated metric.\n// Metric parameter names and values are given in labels, as in:\n//\n//\tGetMetricValue(c, reg, \"arvados_metric_name\", \"label1\", \"value1\", \"label2\", \"value2\")\nfunc GetMetricValue(c *check.C, reg *prometheus.Registry, name string, labels ...string) float64 {\n\tgather, _ := reg.Gather()\n\tfor _, mf := range gather {\n\t\tif mf.Name != nil && *mf.Name == name {\n\t\tmetric:\n\t\t\tfor _, m := range mf.Metric {\n\t\t\t\tif 2*len(m.Label) != len(labels) {\n\t\t\t\t\tcontinue metric\n\t\t\t\t}\n\t\t\t\tfor i, lp := range m.Label {\n\t\t\t\t\tif lp.Name == nil ||\n\t\t\t\t\t\t*lp.Name != labels[i*2] ||\n\t\t\t\t\t\tlp.Value == nil ||\n\t\t\t\t\t\t*lp.Value != labels[i*2+1] {\n\t\t\t\t\t\tcontinue metric\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif m.GetCounter() != nil {\n\t\t\t\t\treturn *m.GetCounter().Value\n\t\t\t\t}\n\t\t\t\tif m.GetGauge() != nil {\n\t\t\t\t\treturn *m.GetGauge().Value\n\t\t\t\t}\n\t\t\t\tif m.GetUntyped() != nil {\n\t\t\t\t\treturn *m.GetUntyped().Value\n\t\t\t\t}\n\t\t\t\tc.Fatalf(\"GetMetricValue: unsupported metric type: %s\", m)\n\t\t\t\treturn -1\n\t\t\t}\n\t\t}\n\t}\n\tc.Fatalf(\"metric not found: %s %v\", name, labels)\n\treturn -1\n}\n"
  },
  {
    "path": "sdk/go/arvadostest/oidc_provider.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadostest\n\nimport (\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in/check.v1\"\n\t\"gopkg.in/go-jose/go-jose.v2\"\n\t\"gopkg.in/go-jose/go-jose.v2/jwt\"\n)\n\ntype OIDCProvider struct {\n\t// expected token request\n\tValidCode         string\n\tValidClientID     string\n\tValidClientSecret string\n\t// desired response from token endpoint\n\tAuthEmail          string\n\tAuthEmailVerified  bool\n\tAuthName           string\n\tAuthGivenName      string\n\tAuthFamilyName     string\n\tAccessTokenPayload map[string]interface{}\n\t// end_session_endpoint metadata URL.\n\t// If nil or empty, not included in discovery.\n\t// If relative, built from Issuer.URL.\n\tEndSessionEndpoint *url.URL\n\n\tPeopleAPIResponse map[string]interface{}\n\n\t// send incoming /userinfo requests to HoldUserInfo (if not\n\t// nil), then receive from ReleaseUserInfo (if not nil),\n\t// before responding (these are used to set up races)\n\tHoldUserInfo        chan *http.Request\n\tReleaseUserInfo     chan struct{}\n\tUserInfoErrorStatus int // if non-zero, return this http status (probably 5xx)\n\n\tkey       *rsa.PrivateKey\n\tIssuer    *httptest.Server\n\tPeopleAPI *httptest.Server\n\tc         *check.C\n}\n\nfunc NewOIDCProvider(c *check.C) *OIDCProvider {\n\tp := &OIDCProvider{c: c}\n\tvar err error\n\tp.key, err = rsa.GenerateKey(rand.Reader, 2048)\n\tc.Assert(err, check.IsNil)\n\tp.Issuer = httptest.NewServer(http.HandlerFunc(p.serveOIDC))\n\tp.PeopleAPI = httptest.NewServer(http.HandlerFunc(p.servePeopleAPI))\n\tp.AccessTokenPayload = map[string]interface{}{\"sub\": \"example\"}\n\treturn p\n}\n\nfunc (p *OIDCProvider) ValidAccessToken() string {\n\tbuf, _ := json.Marshal(p.AccessTokenPayload)\n\treturn p.fakeToken(buf)\n}\n\nfunc (p *OIDCProvider) serveOIDC(w http.ResponseWriter, req *http.Request) {\n\treq.ParseForm()\n\tp.c.Logf(\"serveOIDC: got req: %s %s %s\", req.Method, req.URL, req.Form)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tswitch req.URL.Path {\n\tcase \"/.well-known/openid-configuration\":\n\t\tconfiguration := map[string]interface{}{\n\t\t\t\"issuer\":                 p.Issuer.URL,\n\t\t\t\"authorization_endpoint\": p.Issuer.URL + \"/auth\",\n\t\t\t\"token_endpoint\":         p.Issuer.URL + \"/token\",\n\t\t\t\"jwks_uri\":               p.Issuer.URL + \"/jwks\",\n\t\t\t\"userinfo_endpoint\":      p.Issuer.URL + \"/userinfo\",\n\t\t}\n\t\tif p.EndSessionEndpoint == nil {\n\t\t\t// Not included in configuration\n\t\t} else if p.EndSessionEndpoint.Scheme != \"\" {\n\t\t\tconfiguration[\"end_session_endpoint\"] = p.EndSessionEndpoint.String()\n\t\t} else {\n\t\t\tu, err := url.Parse(p.Issuer.URL)\n\t\t\tp.c.Check(err, check.IsNil,\n\t\t\t\tcheck.Commentf(\"error parsing IssuerURL for EndSessionEndpoint\"))\n\t\t\tu.Scheme = \"https\"\n\t\t\tu.Path = u.Path + p.EndSessionEndpoint.Path\n\t\t\tconfiguration[\"end_session_endpoint\"] = u.String()\n\t\t}\n\t\tjson.NewEncoder(w).Encode(configuration)\n\tcase \"/token\":\n\t\tvar clientID, clientSecret string\n\t\tauth, _ := base64.StdEncoding.DecodeString(strings.TrimPrefix(req.Header.Get(\"Authorization\"), \"Basic \"))\n\t\tauthsplit := strings.Split(string(auth), \":\")\n\t\tif len(authsplit) == 2 {\n\t\t\tclientID, _ = url.QueryUnescape(authsplit[0])\n\t\t\tclientSecret, _ = url.QueryUnescape(authsplit[1])\n\t\t}\n\t\tif clientID != p.ValidClientID || clientSecret != p.ValidClientSecret {\n\t\t\tp.c.Logf(\"OIDCProvider: expected (%q, %q) got (%q, %q)\", p.ValidClientID, p.ValidClientSecret, clientID, clientSecret)\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tif req.Form.Get(\"code\") != p.ValidCode || p.ValidCode == \"\" {\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tidToken, _ := json.Marshal(map[string]interface{}{\n\t\t\t\"iss\":            p.Issuer.URL,\n\t\t\t\"aud\":            []string{clientID},\n\t\t\t\"sub\":            \"fake-user-id\",\n\t\t\t\"exp\":            time.Now().UTC().Add(time.Minute).Unix(),\n\t\t\t\"iat\":            time.Now().UTC().Unix(),\n\t\t\t\"nonce\":          \"fake-nonce\",\n\t\t\t\"email\":          p.AuthEmail,\n\t\t\t\"email_verified\": p.AuthEmailVerified,\n\t\t\t\"name\":           p.AuthName,\n\t\t\t\"given_name\":     p.AuthGivenName,\n\t\t\t\"family_name\":    p.AuthFamilyName,\n\t\t\t\"alt_verified\":   true,                    // for custom claim tests\n\t\t\t\"alt_email\":      \"alt_email@example.com\", // for custom claim tests\n\t\t\t\"alt_username\":   \"desired-username\",      // for custom claim tests\n\t\t})\n\t\tjson.NewEncoder(w).Encode(struct {\n\t\t\tAccessToken  string `json:\"access_token\"`\n\t\t\tTokenType    string `json:\"token_type\"`\n\t\t\tRefreshToken string `json:\"refresh_token\"`\n\t\t\tExpiresIn    int32  `json:\"expires_in\"`\n\t\t\tIDToken      string `json:\"id_token\"`\n\t\t}{\n\t\t\tAccessToken:  p.ValidAccessToken(),\n\t\t\tTokenType:    \"Bearer\",\n\t\t\tRefreshToken: \"test-refresh-token\",\n\t\t\tExpiresIn:    30,\n\t\t\tIDToken:      p.fakeToken(idToken),\n\t\t})\n\tcase \"/jwks\":\n\t\tjson.NewEncoder(w).Encode(jose.JSONWebKeySet{\n\t\t\tKeys: []jose.JSONWebKey{\n\t\t\t\t{Key: p.key.Public(), Algorithm: string(jose.RS256), KeyID: \"\"},\n\t\t\t},\n\t\t})\n\tcase \"/auth\":\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\tcase \"/userinfo\":\n\t\tif p.HoldUserInfo != nil {\n\t\t\tp.HoldUserInfo <- req\n\t\t}\n\t\tif p.ReleaseUserInfo != nil {\n\t\t\t<-p.ReleaseUserInfo\n\t\t}\n\t\tif p.UserInfoErrorStatus > 0 {\n\t\t\tw.WriteHeader(p.UserInfoErrorStatus)\n\t\t\tfmt.Fprintf(w, \"%T error body\", p)\n\t\t\treturn\n\t\t}\n\t\tauthhdr := req.Header.Get(\"Authorization\")\n\t\tif _, err := jwt.ParseSigned(strings.TrimPrefix(authhdr, \"Bearer \")); err != nil {\n\t\t\tp.c.Logf(\"OIDCProvider: bad auth %q\", authhdr)\n\t\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\t\"sub\":            \"fake-user-id\",\n\t\t\t\"name\":           p.AuthName,\n\t\t\t\"given_name\":     p.AuthGivenName,\n\t\t\t\"family_name\":    p.AuthFamilyName,\n\t\t\t\"alt_username\":   \"desired-username\",\n\t\t\t\"email\":          p.AuthEmail,\n\t\t\t\"email_verified\": p.AuthEmailVerified,\n\t\t})\n\tdefault:\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n}\n\nfunc (p *OIDCProvider) servePeopleAPI(w http.ResponseWriter, req *http.Request) {\n\treq.ParseForm()\n\tp.c.Logf(\"servePeopleAPI: got req: %s %s %s\", req.Method, req.URL, req.Form)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tswitch req.URL.Path {\n\tcase \"/v1/people/me\":\n\t\tif f := req.Form.Get(\"personFields\"); f != \"emailAddresses,names\" {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tbreak\n\t\t}\n\t\tjson.NewEncoder(w).Encode(p.PeopleAPIResponse)\n\tdefault:\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n}\n\nfunc (p *OIDCProvider) fakeToken(payload []byte) string {\n\tsigner, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.RS256, Key: p.key}, nil)\n\tif err != nil {\n\t\tp.c.Error(err)\n\t\treturn \"\"\n\t}\n\tobject, err := signer.Sign(payload)\n\tif err != nil {\n\t\tp.c.Error(err)\n\t\treturn \"\"\n\t}\n\tt, err := object.CompactSerialize()\n\tif err != nil {\n\t\tp.c.Error(err)\n\t\treturn \"\"\n\t}\n\tp.c.Logf(\"fakeToken(%q) == %q\", payload, t)\n\treturn t\n}\n"
  },
  {
    "path": "sdk/go/arvadostest/proxy.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadostest\n\nimport (\n\t\"crypto/tls\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"gopkg.in/check.v1\"\n)\n\ntype Proxy struct {\n\t*httptest.Server\n\n\t// URL where the proxy is listening. Same as Server.URL, but\n\t// with parsing already done for you.\n\tURL *url.URL\n\n\t// A dump of each request that has been proxied.\n\tRequestDumps [][]byte\n\n\t// If non-nil, func will be called on each incoming request\n\t// before proxying it.\n\tDirector func(*http.Request)\n\n\twg sync.WaitGroup\n}\n\n// NewProxy returns a new Proxy that saves a dump of each reqeust\n// before forwarding to the indicated service.\nfunc NewProxy(c *check.C, svc arvados.Service) *Proxy {\n\tvar target url.URL\n\tc.Assert(svc.InternalURLs, check.HasLen, 1)\n\tfor u := range svc.InternalURLs {\n\t\ttarget = url.URL(u)\n\t\tbreak\n\t}\n\trp := httputil.NewSingleHostReverseProxy(&target)\n\trp.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {\n\t\tdump, _ := httputil.DumpRequest(r, false)\n\t\tc.Logf(\"arvadostest.Proxy ErrorHandler(%s): %s\\n%s\", r.URL, err, dump)\n\t\thttp.Error(w, err.Error(), http.StatusBadGateway)\n\t}\n\trp.Transport = &http.Transport{\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout:   30 * time.Second,\n\t\t\tKeepAlive: 30 * time.Second,\n\t\t\tDualStack: true,\n\t\t}).DialContext,\n\t\tMaxIdleConns:          100,\n\t\tIdleConnTimeout:       90 * time.Second,\n\t\tTLSHandshakeTimeout:   10 * time.Second,\n\t\tExpectContinueTimeout: 1 * time.Second,\n\t\tTLSClientConfig:       &tls.Config{InsecureSkipVerify: true},\n\t}\n\tsrv := httptest.NewServer(rp)\n\tu, err := url.Parse(srv.URL)\n\tc.Assert(err, check.IsNil)\n\tproxy := &Proxy{\n\t\tServer: srv,\n\t\tURL:    u,\n\t}\n\tvar mtx sync.Mutex\n\trp.Director = func(r *http.Request) {\n\t\tproxy.wg.Add(1)\n\t\tdefer proxy.wg.Done()\n\t\tif proxy.Director != nil {\n\t\t\tproxy.Director(r)\n\t\t}\n\t\tdump, _ := httputil.DumpRequest(r, true)\n\t\tmtx.Lock()\n\t\tproxy.RequestDumps = append(proxy.RequestDumps, dump)\n\t\tmtx.Unlock()\n\t\tr.URL.Scheme = target.Scheme\n\t\tr.URL.Host = target.Host\n\t}\n\treturn proxy\n}\n\n// Wait waits until all of the proxied requests that have been sent to\n// Director() have also been recorded in RequestDumps.\nfunc (proxy *Proxy) Wait() {\n\tproxy.wg.Wait()\n}\n"
  },
  {
    "path": "sdk/go/arvadostest/run_servers.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadostest\n\nimport (\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"gopkg.in/check.v1\"\n)\n\nvar authSettings = make(map[string]string)\n\n// ResetEnv resets ARVADOS_* env vars to whatever they were the first\n// time this func was called.\n//\n// Call it from your SetUpTest or SetUpSuite func if your tests modify\n// env vars.\nfunc ResetEnv() {\n\tif len(authSettings) == 0 {\n\t\tfor _, e := range os.Environ() {\n\t\t\te := strings.SplitN(e, \"=\", 2)\n\t\t\tif len(e) == 2 {\n\t\t\t\tauthSettings[e[0]] = e[1]\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor k, v := range authSettings {\n\t\t\tos.Setenv(k, v)\n\t\t}\n\t}\n}\n\nfunc pythonTestDir() string {\n\treldir := \"sdk/python/tests/\"\n\tfor i := 0; i < 10; i++ {\n\t\tif _, err := os.Stat(reldir); err == nil {\n\t\t\tdir, err := filepath.Abs(reldir)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\treturn dir\n\t\t}\n\t\treldir = \"../\" + reldir\n\t}\n\tlog.Fatalf(\"sdk/python/tests/ not found in any ancestor\")\n\treturn \"\"\n}\n\nfunc ResetDB(c *check.C) {\n\thc := http.Client{Transport: &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}}\n\treq, err := http.NewRequest(\"POST\", \"https://\"+os.Getenv(\"ARVADOS_TEST_API_HOST\")+\"/database/reset\", nil)\n\tc.Assert(err, check.IsNil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+AdminToken)\n\tresp, err := hc.Do(req)\n\tc.Assert(err, check.IsNil)\n\tdefer resp.Body.Close()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n}\n\n// StartKeep starts the given number of keep servers,\n// optionally with --keep-blob-signing enabled.\n// Use numKeepServers = 2 and blobSigning = false under all normal circumstances.\nfunc StartKeep(numKeepServers int, blobSigning bool) {\n\tcmdArgs := []string{\"run_test_server.py\", \"start_keep\", \"--num-keep-servers\", strconv.Itoa(numKeepServers)}\n\tif blobSigning {\n\t\tcmdArgs = append(cmdArgs, \"--keep-blob-signing\")\n\t}\n\tcmd := exec.Command(\"python\", cmdArgs...)\n\tcmd.Dir = pythonTestDir()\n\tbgRun(cmd)\n}\n\n// StopKeep stops keep servers that were started with StartKeep.\n// numkeepServers should be the same value that was passed to StartKeep,\n// which is 2 under all normal circumstances.\nfunc StopKeep(numKeepServers int) {\n\tcmd := exec.Command(\"python\", \"run_test_server.py\", \"stop_keep\", \"--num-keep-servers\", strconv.Itoa(numKeepServers))\n\tcmd.Dir = pythonTestDir()\n\tbgRun(cmd)\n\t// Without Wait, \"go test\" in go1.10.1 tends to hang. https://github.com/golang/go/issues/24050\n\tcmd.Wait()\n}\n\n// Start cmd, with stderr and stdout redirected to our own\n// stderr. Return when the process exits, but do not wait for its\n// stderr and stdout to close: any grandchild processes will continue\n// writing to our stderr.\nfunc bgRun(cmd *exec.Cmd) {\n\tcmd.Stdin = nil\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stderr\n\tif err := cmd.Start(); err != nil {\n\t\tlog.Fatalf(\"%+v: %s\", cmd.Args, err)\n\t}\n\tif pstate, err := cmd.Process.Wait(); err != nil {\n\t\tlog.Fatalf(\"%+v: %s\", cmd.Args, err)\n\t} else if pstate.ExitCode() != 0 {\n\t\tlog.Fatalf(\"%+v: exited %d\", cmd.Args, pstate.ExitCode())\n\t}\n}\n\n// CreateBadPath creates a tmp dir, appends given string and returns that path\n// This will guarantee that the path being returned does not exist\nfunc CreateBadPath() (badpath string, err error) {\n\ttempdir, err := ioutil.TempDir(\"\", \"bad\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Could not create temporary directory for bad path: %v\", err)\n\t}\n\tbadpath = path.Join(tempdir, \"bad\")\n\treturn badpath, nil\n}\n\n// DestroyBadPath deletes the tmp dir created by the previous CreateBadPath call\nfunc DestroyBadPath(badpath string) error {\n\ttempdir := path.Join(badpath, \"..\")\n\terr := os.Remove(tempdir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not remove bad path temporary directory %v: %v\", tempdir, err)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "sdk/go/arvadostest/stub.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage arvadostest\n\nimport (\n\t\"net/http\"\n\t\"net/url\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// StubResponse struct with response status and body\ntype StubResponse struct {\n\tStatus int\n\tBody   string\n}\n\n// ServerStub with response map of path and StubResponse\n// Ex:  /arvados/v1/keep_services = arvadostest.StubResponse{200, string(`{}`)}\ntype ServerStub struct {\n\tResponses map[string]StubResponse\n}\n\nfunc (stub *ServerStub) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tif req.URL.Path == \"/redirect-loop\" {\n\t\thttp.Redirect(resp, req, \"/redirect-loop\", http.StatusFound)\n\t\treturn\n\t}\n\n\tpathResponse := stub.Responses[req.URL.Path]\n\tif pathResponse.Status == -1 {\n\t\thttp.Redirect(resp, req, \"/redirect-loop\", http.StatusFound)\n\t} else if pathResponse.Body != \"\" {\n\t\tresp.WriteHeader(pathResponse.Status)\n\t\tresp.Write([]byte(pathResponse.Body))\n\t} else {\n\t\tresp.WriteHeader(500)\n\t\tresp.Write([]byte(``))\n\t}\n}\n\n// SetServiceURL overrides the given service config/discovery with the\n// given internalURLs.\n//\n// ExternalURL is set to the last internalURL, which only aims to\n// address the case where there is only one.\n//\n// SetServiceURL panics on errors.\nfunc SetServiceURL(service *arvados.Service, internalURLs ...string) {\n\tservice.InternalURLs = map[arvados.URL]arvados.ServiceInstance{}\n\tfor _, u := range internalURLs {\n\t\tu, err := url.Parse(u)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tservice.InternalURLs[arvados.URL(*u)] = arvados.ServiceInstance{}\n\t\tservice.ExternalURL = arvados.URL(*u)\n\t}\n}\n"
  },
  {
    "path": "sdk/go/asyncbuf/buf.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage asyncbuf\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"sync\"\n)\n\n// A Buffer is an io.Writer that distributes written data\n// asynchronously to multiple concurrent readers.\n//\n// NewReader() can be called at any time. In all cases, every returned\n// io.Reader reads all data written to the Buffer.\n//\n// Behavior is undefined if Write is called after Close or\n// CloseWithError.\ntype Buffer interface {\n\tio.WriteCloser\n\n\t// NewReader() returns an io.Reader that reads all data\n\t// written to the Buffer.\n\tNewReader() io.Reader\n\n\t// Close, but return the given error (instead of io.EOF) to\n\t// all readers when they reach the end of the buffer.\n\t//\n\t// CloseWithError(nil) is equivalent to\n\t// CloseWithError(io.EOF).\n\tCloseWithError(error) error\n}\n\ntype buffer struct {\n\tdata *bytes.Buffer\n\tcond sync.Cond\n\terr  error // nil if there might be more writes\n}\n\n// NewBuffer creates a new Buffer using buf as its initial\n// contents. The new Buffer takes ownership of buf, and the caller\n// should not use buf after this call.\nfunc NewBuffer(buf []byte) Buffer {\n\treturn &buffer{\n\t\tdata: bytes.NewBuffer(buf),\n\t\tcond: sync.Cond{L: &sync.Mutex{}},\n\t}\n}\n\nfunc (b *buffer) Write(p []byte) (int, error) {\n\tdefer b.cond.Broadcast()\n\tb.cond.L.Lock()\n\tdefer b.cond.L.Unlock()\n\tif b.err != nil {\n\t\treturn 0, b.err\n\t}\n\treturn b.data.Write(p)\n}\n\nfunc (b *buffer) Close() error {\n\treturn b.CloseWithError(nil)\n}\n\nfunc (b *buffer) CloseWithError(err error) error {\n\tdefer b.cond.Broadcast()\n\tb.cond.L.Lock()\n\tdefer b.cond.L.Unlock()\n\tif err == nil {\n\t\tb.err = io.EOF\n\t} else {\n\t\tb.err = err\n\t}\n\treturn nil\n}\n\nfunc (b *buffer) NewReader() io.Reader {\n\treturn &reader{b: b}\n}\n\ntype reader struct {\n\tb    *buffer\n\tread int // # bytes already read\n}\n\nfunc (r *reader) Read(p []byte) (int, error) {\n\tr.b.cond.L.Lock()\n\tfor {\n\t\tswitch {\n\t\tcase r.read < r.b.data.Len():\n\t\t\tbuf := r.b.data.Bytes()\n\t\t\tr.b.cond.L.Unlock()\n\t\t\tn := copy(p, buf[r.read:])\n\t\t\tr.read += n\n\t\t\treturn n, nil\n\t\tcase r.b.err != nil || len(p) == 0:\n\t\t\t// r.b.err != nil means we reached EOF.  And\n\t\t\t// even if we're not at EOF, there's no need\n\t\t\t// to block if len(p)==0.\n\t\t\terr := r.b.err\n\t\t\tr.b.cond.L.Unlock()\n\t\t\treturn 0, err\n\t\tdefault:\n\t\t\tr.b.cond.Wait()\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "sdk/go/asyncbuf/buf_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage asyncbuf\n\nimport (\n\t\"crypto/md5\"\n\t\"errors\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&Suite{})\n\ntype Suite struct{}\n\nfunc (s *Suite) TestNoWrites(c *check.C) {\n\tb := NewBuffer(nil)\n\tr1 := b.NewReader()\n\tr2 := b.NewReader()\n\tb.Close()\n\ts.checkReader(c, r1, []byte{}, nil, nil)\n\ts.checkReader(c, r2, []byte{}, nil, nil)\n}\n\nfunc (s *Suite) TestNoReaders(c *check.C) {\n\tb := NewBuffer(nil)\n\tn, err := b.Write([]byte(\"foobar\"))\n\terr2 := b.Close()\n\tc.Check(n, check.Equals, 6)\n\tc.Check(err, check.IsNil)\n\tc.Check(err2, check.IsNil)\n}\n\nfunc (s *Suite) TestWriteReadClose(c *check.C) {\n\tdone := make(chan bool, 2)\n\tb := NewBuffer(nil)\n\tn, err := b.Write([]byte(\"foobar\"))\n\tc.Check(n, check.Equals, 6)\n\tc.Check(err, check.IsNil)\n\tr1 := b.NewReader()\n\tr2 := b.NewReader()\n\tgo s.checkReader(c, r1, []byte(\"foobar\"), nil, done)\n\tgo s.checkReader(c, r2, []byte(\"foobar\"), nil, done)\n\ttime.Sleep(time.Millisecond)\n\tc.Check(len(done), check.Equals, 0)\n\tb.Close()\n\t<-done\n\t<-done\n}\n\nfunc (s *Suite) TestPrefillWriteCloseRead(c *check.C) {\n\tdone := make(chan bool, 2)\n\tb := NewBuffer([]byte(\"baz\"))\n\tn, err := b.Write([]byte(\"waz\"))\n\tc.Check(n, check.Equals, 3)\n\tc.Check(err, check.IsNil)\n\tb.Close()\n\tr1 := b.NewReader()\n\tgo s.checkReader(c, r1, []byte(\"bazwaz\"), nil, done)\n\tr2 := b.NewReader()\n\tgo s.checkReader(c, r2, []byte(\"bazwaz\"), nil, done)\n\t<-done\n\t<-done\n}\n\nfunc (s *Suite) TestWriteReadCloseRead(c *check.C) {\n\tdone := make(chan bool, 1)\n\tb := NewBuffer(nil)\n\tr1 := b.NewReader()\n\tgo s.checkReader(c, r1, []byte(\"bazwazqux\"), nil, done)\n\n\tb.Write([]byte(\"bazwaz\"))\n\n\tr2 := b.NewReader()\n\tr2.Read(make([]byte, 3))\n\n\tb.Write([]byte(\"qux\"))\n\tb.Close()\n\n\ts.checkReader(c, r2, []byte(\"wazqux\"), nil, nil)\n\t<-done\n}\n\nfunc (s *Suite) TestReadAtEOF(c *check.C) {\n\tbuf := make([]byte, 8)\n\n\tb := NewBuffer([]byte{1, 2, 3})\n\n\tr := b.NewReader()\n\tn, err := r.Read(buf)\n\tc.Check(n, check.Equals, 3)\n\tc.Check(err, check.IsNil)\n\n\t// Reading zero bytes at EOF, but before Close(), doesn't\n\t// block or error\n\tdone := make(chan bool)\n\tgo func() {\n\t\tdefer close(done)\n\t\tn, err = r.Read(buf[:0])\n\t\tc.Check(n, check.Equals, 0)\n\t\tc.Check(err, check.IsNil)\n\t}()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(time.Second):\n\t\tc.Error(\"timeout\")\n\t}\n\n\tb.Close()\n\n\t// Reading zero bytes after Close() returns EOF\n\tn, err = r.Read(buf[:0])\n\tc.Check(n, check.Equals, 0)\n\tc.Check(err, check.Equals, io.EOF)\n\n\t// Reading from start after Close() returns 3 bytes, then EOF\n\tr = b.NewReader()\n\tn, err = r.Read(buf)\n\tc.Check(n, check.Equals, 3)\n\tif err != nil {\n\t\tc.Check(err, check.Equals, io.EOF)\n\t}\n\tn, err = r.Read(buf[:0])\n\tc.Check(n, check.Equals, 0)\n\tc.Check(err, check.Equals, io.EOF)\n\tn, err = r.Read(buf)\n\tc.Check(n, check.Equals, 0)\n\tc.Check(err, check.Equals, io.EOF)\n}\n\nfunc (s *Suite) TestCloseWithError(c *check.C) {\n\terrFake := errors.New(\"it's not even a real error\")\n\n\tdone := make(chan bool, 1)\n\tb := NewBuffer(nil)\n\tr1 := b.NewReader()\n\tgo s.checkReader(c, r1, []byte(\"bazwazqux\"), errFake, done)\n\n\tb.Write([]byte(\"bazwaz\"))\n\n\tr2 := b.NewReader()\n\tr2.Read(make([]byte, 3))\n\n\tb.Write([]byte(\"qux\"))\n\tb.CloseWithError(errFake)\n\n\ts.checkReader(c, r2, []byte(\"wazqux\"), errFake, nil)\n\t<-done\n}\n\n// Write n*n bytes, n at a time; read them into n goroutines using\n// varying buffer sizes; compare checksums.\nfunc (s *Suite) TestManyReaders(c *check.C) {\n\tconst n = 256\n\n\tb := NewBuffer(nil)\n\n\texpectSum := make(chan []byte)\n\tgo func() {\n\t\thash := md5.New()\n\t\tbuf := make([]byte, n)\n\t\tfor i := 0; i < n; i++ {\n\t\t\ttime.Sleep(10 * time.Nanosecond)\n\t\t\trand.Read(buf)\n\t\t\tb.Write(buf)\n\t\t\thash.Write(buf)\n\t\t}\n\t\texpectSum <- hash.Sum(nil)\n\t\tb.Close()\n\t}()\n\n\tgotSum := make(chan []byte)\n\tfor i := 0; i < n; i++ {\n\t\tgo func(bufSize int) {\n\t\t\tgot := md5.New()\n\t\t\tio.CopyBuffer(got, b.NewReader(), make([]byte, bufSize))\n\t\t\tgotSum <- got.Sum(nil)\n\t\t}(i + n/2)\n\t}\n\n\texpect := <-expectSum\n\tfor i := 0; i < n; i++ {\n\t\tc.Check(expect, check.DeepEquals, <-gotSum)\n\t}\n}\n\nfunc (s *Suite) BenchmarkOneReader(c *check.C) {\n\ts.benchmarkReaders(c, 1)\n}\n\nfunc (s *Suite) BenchmarkManyReaders(c *check.C) {\n\ts.benchmarkReaders(c, 100)\n}\n\nfunc (s *Suite) benchmarkReaders(c *check.C, readers int) {\n\tvar n int64\n\tt0 := time.Now()\n\n\tbuf := make([]byte, 10000)\n\trand.Read(buf)\n\tfor i := 0; i < 10; i++ {\n\t\tb := NewBuffer(nil)\n\t\tgo func() {\n\t\t\tfor i := 0; i < c.N; i++ {\n\t\t\t\tb.Write(buf)\n\t\t\t}\n\t\t\tb.Close()\n\t\t}()\n\n\t\tvar wg sync.WaitGroup\n\t\tfor i := 0; i < readers; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tnn, _ := io.Copy(ioutil.Discard, b.NewReader())\n\t\t\t\tatomic.AddInt64(&n, int64(nn))\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\t}\n\tc.Logf(\"%d bytes, %.0f MB/s\", n, float64(n)/time.Since(t0).Seconds()/1000000)\n}\n\nfunc (s *Suite) checkReader(c *check.C, r io.Reader, expectData []byte, expectError error, done chan bool) {\n\tbuf, err := ioutil.ReadAll(r)\n\tc.Check(err, check.Equals, expectError)\n\tc.Check(buf, check.DeepEquals, expectData)\n\tif done != nil {\n\t\tdone <- true\n\t}\n}\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n"
  },
  {
    "path": "sdk/go/auth/auth.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage auth\n\nimport (\n\t\"context\"\n\t\"encoding/base64\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n)\n\ntype Credentials struct {\n\tTokens []string\n}\n\nfunc NewCredentials(tokens ...string) *Credentials {\n\treturn &Credentials{Tokens: tokens}\n}\n\nfunc NewContext(ctx context.Context, c *Credentials) context.Context {\n\treturn context.WithValue(ctx, contextKeyCredentials{}, c)\n}\n\nfunc FromContext(ctx context.Context) (*Credentials, bool) {\n\tc, ok := ctx.Value(contextKeyCredentials{}).(*Credentials)\n\treturn c, ok\n}\n\nfunc CredentialsFromRequest(r *http.Request) *Credentials {\n\tif c, ok := FromContext(r.Context()); ok {\n\t\t// preloaded by middleware\n\t\treturn c\n\t}\n\tc := NewCredentials()\n\tc.LoadTokensFromHTTPRequest(r)\n\treturn c\n}\n\n// EncodeTokenCookie accepts a token and returns a byte slice suitable\n// for use as a cookie value, such that it will be decoded correctly\n// by LoadTokensFromHTTPRequest.\nvar EncodeTokenCookie func([]byte) string = base64.URLEncoding.EncodeToString\n\n// DecodeTokenCookie accepts a cookie value and returns the encoded\n// token.\nvar DecodeTokenCookie func(string) ([]byte, error) = base64.URLEncoding.DecodeString\n\n// LoadTokensFromHTTPRequest loads all tokens it can find in the\n// headers and query string of an http query.\nfunc (a *Credentials) LoadTokensFromHTTPRequest(r *http.Request) {\n\t// Load plain token from \"Authorization: Bearer ...\" header\n\t// (typically used by smart API clients).  Note many pre-3.0\n\t// clients send \"OAuth2 ...\" instead of \"Bearer ...\" and that\n\t// is still accepted.\n\tif toks := strings.SplitN(r.Header.Get(\"Authorization\"), \" \", 2); len(toks) == 2 && (toks[0] == \"OAuth2\" || toks[0] == \"Bearer\") {\n\t\ta.Tokens = append(a.Tokens, strings.TrimSpace(toks[1]))\n\t}\n\n\t// Load base64-encoded token from \"Authorization: Basic ...\"\n\t// header (typically used by git via credential helper)\n\tif _, password, ok := r.BasicAuth(); ok {\n\t\ta.Tokens = append(a.Tokens, strings.TrimSpace(password))\n\t}\n\n\t// Load tokens from query string. It's generally not a good\n\t// idea to pass tokens around this way, but passing a narrowly\n\t// scoped token is a reasonable way to implement \"secret link\n\t// to an object\" in a generic way.\n\t//\n\t// ParseQuery always returns a non-nil map which might have\n\t// valid parameters, even when a decoding error causes it to\n\t// return a non-nil err. We ignore err; hopefully the caller\n\t// will also need to parse the query string for\n\t// application-specific purposes and will therefore\n\t// find/report decoding errors in a suitable way.\n\tqvalues, _ := url.ParseQuery(r.URL.RawQuery)\n\tif val, ok := qvalues[\"api_token\"]; ok {\n\t\tfor _, token := range val {\n\t\t\ta.Tokens = append(a.Tokens, strings.TrimSpace(token))\n\t\t}\n\t}\n\n\ta.loadTokenFromCookie(r)\n\n\t// TODO: Load token from Rails session cookie (if Rails site\n\t// secret is known)\n}\n\nfunc (a *Credentials) loadTokenFromCookie(r *http.Request) {\n\tcookie, err := r.Cookie(\"arvados_api_token\")\n\tif err != nil || len(cookie.Value) == 0 {\n\t\treturn\n\t}\n\ttoken, err := DecodeTokenCookie(cookie.Value)\n\tif err != nil {\n\t\treturn\n\t}\n\ta.Tokens = append(a.Tokens, strings.TrimSpace(string(token)))\n}\n\n// LoadTokensFromHTTPRequestBody loads credentials from the request\n// body.\n//\n// This is separate from LoadTokensFromHTTPRequest() because it's not\n// always desirable to read the request body. This has to be requested\n// explicitly by the application.\nfunc (a *Credentials) LoadTokensFromHTTPRequestBody(r *http.Request) error {\n\tif r.Header.Get(\"Content-Type\") != \"application/x-www-form-urlencoded\" {\n\t\treturn nil\n\t}\n\tif err := r.ParseForm(); err != nil {\n\t\treturn err\n\t}\n\tif t := r.PostFormValue(\"api_token\"); t != \"\" {\n\t\ta.Tokens = append(a.Tokens, strings.TrimSpace(t))\n\t}\n\treturn nil\n}\n\n// TokenUUIDs returns a list of token UUIDs (or a placeholder for v1\n// tokens) suitable for logging.\nfunc (creds *Credentials) TokenUUIDs() []string {\n\tvar tokenUUIDs []string\n\tfor _, t := range creds.Tokens {\n\t\tif strings.HasPrefix(t, \"v2/\") {\n\t\t\ttokenParts := strings.Split(t, \"/\")\n\t\t\tif len(tokenParts) >= 3 {\n\t\t\t\ttokenUUIDs = append(tokenUUIDs, tokenParts[1])\n\t\t\t}\n\t\t} else {\n\t\t\tend := t\n\t\t\tif len(t) > 5 {\n\t\t\t\tend = t[len(t)-5:]\n\t\t\t}\n\t\t\ttokenUUIDs = append(tokenUUIDs, \"v1 token ending in \"+end)\n\t\t}\n\t}\n\treturn tokenUUIDs\n}\n"
  },
  {
    "path": "sdk/go/auth/handlers.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage auth\n\nimport (\n\t\"context\"\n\t\"net/http\"\n)\n\ntype contextKeyCredentials struct{}\n\n// LoadToken wraps the next handler, adding credentials to the request\n// context so subsequent handlers can access them efficiently via\n// CredentialsFromRequest.\nfunc LoadToken(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif _, ok := r.Context().Value(contextKeyCredentials{}).(*Credentials); !ok {\n\t\t\tr = r.WithContext(context.WithValue(r.Context(), contextKeyCredentials{}, CredentialsFromRequest(r)))\n\t\t}\n\t\tnext.ServeHTTP(w, r)\n\t})\n}\n\n// RequireLiteralToken wraps the next handler, rejecting any request\n// that doesn't supply the given token. If the given token is empty,\n// RequireLiteralToken returns next (i.e., no auth checks are\n// performed).\nfunc RequireLiteralToken(token string, next http.Handler) http.Handler {\n\tif token == \"\" {\n\t\treturn next\n\t}\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tc := CredentialsFromRequest(r)\n\t\tif len(c.Tokens) == 0 {\n\t\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tfor _, t := range c.Tokens {\n\t\t\tif t == token {\n\t\t\t\tnext.ServeHTTP(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\thttp.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)\n\t})\n}\n"
  },
  {
    "path": "sdk/go/auth/handlers_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage auth\n\nimport (\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"strings\"\n\t\"testing\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&HandlersSuite{})\n\ntype HandlersSuite struct {\n\tserved         int\n\tgotCredentials *Credentials\n}\n\nfunc (s *HandlersSuite) SetUpTest(c *check.C) {\n\ts.served = 0\n\ts.gotCredentials = nil\n}\n\nfunc (s *HandlersSuite) TestLoadToken(c *check.C) {\n\thandler := LoadToken(s)\n\thandler.ServeHTTP(httptest.NewRecorder(), httptest.NewRequest(\"GET\", \"/foo/bar?api_token=xyzzy\", nil))\n\tc.Check(s.gotCredentials.Tokens, check.DeepEquals, []string{\"xyzzy\"})\n}\n\n// Ignore leading and trailing spaces, newlines, etc. in case a user\n// has added them inadvertently during copy/paste.\nfunc (s *HandlersSuite) TestTrimSpaceInQuery(c *check.C) {\n\thandler := LoadToken(s)\n\thandler.ServeHTTP(httptest.NewRecorder(), httptest.NewRequest(\"GET\", \"/foo/bar?api_token=%20xyzzy%0a\", nil))\n\tc.Check(s.gotCredentials.Tokens, check.DeepEquals, []string{\"xyzzy\"})\n}\nfunc (s *HandlersSuite) TestTrimSpaceInPostForm(c *check.C) {\n\thandler := LoadToken(s)\n\treq := httptest.NewRequest(\"POST\", \"/foo/bar\", strings.NewReader(url.Values{\"api_token\": []string{\"\\nxyzzy\\n\"}}.Encode()))\n\treq.Header.Set(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\thandler.ServeHTTP(httptest.NewRecorder(), req)\n\tc.Check(s.gotCredentials.Tokens, check.DeepEquals, []string{\"xyzzy\"})\n}\nfunc (s *HandlersSuite) TestTrimSpaceInCookie(c *check.C) {\n\thandler := LoadToken(s)\n\treq := httptest.NewRequest(\"GET\", \"/foo/bar\", nil)\n\treq.AddCookie(&http.Cookie{Name: \"arvados_api_token\", Value: EncodeTokenCookie([]byte(\"\\vxyzzy\\n\"))})\n\thandler.ServeHTTP(httptest.NewRecorder(), req)\n\tc.Check(s.gotCredentials.Tokens, check.DeepEquals, []string{\"xyzzy\"})\n}\nfunc (s *HandlersSuite) TestTrimSpaceInBasicAuth(c *check.C) {\n\thandler := LoadToken(s)\n\treq := httptest.NewRequest(\"GET\", \"/foo/bar\", nil)\n\treq.SetBasicAuth(\"username\", \"\\txyzzy\\n\")\n\thandler.ServeHTTP(httptest.NewRecorder(), req)\n\tc.Check(s.gotCredentials.Tokens, check.DeepEquals, []string{\"xyzzy\"})\n}\n\nfunc (s *HandlersSuite) TestRequireLiteralTokenEmpty(c *check.C) {\n\thandler := RequireLiteralToken(\"\", s)\n\n\tw := httptest.NewRecorder()\n\thandler.ServeHTTP(w, httptest.NewRequest(\"GET\", \"/foo/bar?api_token=abcdef\", nil))\n\tc.Check(s.served, check.Equals, 1)\n\tc.Check(w.Code, check.Equals, http.StatusOK)\n\n\tw = httptest.NewRecorder()\n\thandler.ServeHTTP(w, httptest.NewRequest(\"GET\", \"/foo/bar\", nil))\n\tc.Check(s.served, check.Equals, 2)\n\tc.Check(w.Code, check.Equals, http.StatusOK)\n}\n\nfunc (s *HandlersSuite) TestRequireLiteralToken(c *check.C) {\n\thandler := RequireLiteralToken(\"xyzzy\", s)\n\n\tw := httptest.NewRecorder()\n\thandler.ServeHTTP(w, httptest.NewRequest(\"GET\", \"/foo/bar?api_token=abcdef\", nil))\n\tc.Check(s.served, check.Equals, 0)\n\tc.Check(w.Code, check.Equals, http.StatusForbidden)\n\n\tw = httptest.NewRecorder()\n\thandler.ServeHTTP(w, httptest.NewRequest(\"GET\", \"/foo/bar\", nil))\n\tc.Check(s.served, check.Equals, 0)\n\tc.Check(w.Code, check.Equals, http.StatusUnauthorized)\n\n\tw = httptest.NewRecorder()\n\thandler.ServeHTTP(w, httptest.NewRequest(\"GET\", \"/foo/bar?api_token=xyzzy\", nil))\n\tc.Check(s.served, check.Equals, 1)\n\tc.Check(w.Code, check.Equals, http.StatusOK)\n\tc.Assert(s.gotCredentials, check.NotNil)\n\tc.Assert(s.gotCredentials.Tokens, check.HasLen, 1)\n\tc.Check(s.gotCredentials.Tokens[0], check.Equals, \"xyzzy\")\n}\n\nfunc (s *HandlersSuite) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ts.served++\n\ts.gotCredentials = CredentialsFromRequest(r)\n\ts.gotCredentials.LoadTokensFromHTTPRequestBody(r)\n}\n"
  },
  {
    "path": "sdk/go/auth/salt.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage auth\n\nimport (\n\t\"crypto/hmac\"\n\t\"crypto/sha1\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\treObsoleteToken  = regexp.MustCompile(`^[0-9a-z]{41,}$`)\n\tErrObsoleteToken = errors.New(\"obsolete token format\")\n\tErrTokenFormat   = errors.New(\"badly formatted token\")\n\tErrSalted        = errors.New(\"token already salted\")\n)\n\nfunc SaltToken(token, remote string) (string, error) {\n\tparts := strings.Split(token, \"/\")\n\tif len(parts) < 3 || parts[0] != \"v2\" {\n\t\tif reObsoleteToken.MatchString(token) {\n\t\t\treturn \"\", ErrObsoleteToken\n\t\t}\n\t\treturn \"\", ErrTokenFormat\n\t}\n\tuuid := parts[1]\n\tsecret := parts[2]\n\tif strings.HasPrefix(uuid, remote) {\n\t\t// target cluster issued this token -- send the real\n\t\t// token\n\t\treturn token, nil\n\t} else if len(secret) != 40 {\n\t\t// not already salted\n\t\thmac := hmac.New(sha1.New, []byte(secret))\n\t\tio.WriteString(hmac, remote)\n\t\tsecret = fmt.Sprintf(\"%x\", hmac.Sum(nil))\n\t\treturn \"v2/\" + uuid + \"/\" + secret, nil\n\t} else {\n\t\t// already salted, and not issued by target cluster --\n\t\t// can't be used\n\t\treturn \"\", ErrSalted\n\t}\n}\n"
  },
  {
    "path": "sdk/go/blockdigest/blockdigest.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// Package blockdigest stores a Block Locator Digest compactly. Can be used as a map key.\npackage blockdigest\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar LocatorPattern = regexp.MustCompile(\n\t\"^[0-9a-fA-F]{32}\\\\+[0-9]+(\\\\+[A-Z][A-Za-z0-9@_-]*)*$\")\n\n// BlockDigest stores a Block Locator Digest compactly, up to 128 bits. Can be\n// used as a map key.\ntype BlockDigest struct {\n\tH uint64\n\tL uint64\n}\n\ntype DigestWithSize struct {\n\tDigest BlockDigest\n\tSize   uint32\n}\n\ntype BlockLocator struct {\n\tDigest BlockDigest\n\tSize   int\n\tHints  []string\n}\n\nfunc (d BlockDigest) String() string {\n\treturn fmt.Sprintf(\"%016x%016x\", d.H, d.L)\n}\n\nfunc (w DigestWithSize) String() string {\n\treturn fmt.Sprintf(\"%s+%d\", w.Digest.String(), w.Size)\n}\n\n// FromString creates a new BlockDigest unless an error is encountered.\nfunc FromString(s string) (dig BlockDigest, err error) {\n\tif len(s) != 32 {\n\t\terr = fmt.Errorf(\"Block digest should be exactly 32 characters but this one is %d: %s\", len(s), s)\n\t\treturn\n\t}\n\n\tvar d BlockDigest\n\td.H, err = strconv.ParseUint(s[:16], 16, 64)\n\tif err != nil {\n\t\treturn\n\t}\n\td.L, err = strconv.ParseUint(s[16:], 16, 64)\n\tif err != nil {\n\t\treturn\n\t}\n\tdig = d\n\treturn\n}\n\nfunc IsBlockLocator(s string) bool {\n\treturn LocatorPattern.MatchString(s)\n}\n\nfunc ParseBlockLocator(s string) (BlockLocator, error) {\n\tif !LocatorPattern.MatchString(s) {\n\t\treturn BlockLocator{}, fmt.Errorf(\"String %q does not match block locator pattern %q.\", s, LocatorPattern.String())\n\t}\n\ttokens := strings.Split(s, \"+\")\n\t// We expect both of the following to succeed since\n\t// LocatorPattern restricts the strings appropriately.\n\tblockDigest, err := FromString(tokens[0])\n\tif err != nil {\n\t\treturn BlockLocator{}, err\n\t}\n\tblockSize, err := strconv.ParseInt(tokens[1], 10, 0)\n\tif err != nil {\n\t\treturn BlockLocator{}, err\n\t}\n\treturn BlockLocator{\n\t\tDigest: blockDigest,\n\t\tSize:   int(blockSize),\n\t\tHints:  tokens[2:],\n\t}, nil\n}\n"
  },
  {
    "path": "sdk/go/blockdigest/blockdigest_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage blockdigest\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc getStackTrace() string {\n\tbuf := make([]byte, 1000)\n\tbytesWritten := runtime.Stack(buf, false)\n\treturn \"Stack Trace:\\n\" + string(buf[:bytesWritten])\n}\n\nfunc expectEqual(t *testing.T, actual interface{}, expected interface{}) {\n\tif actual != expected {\n\t\tt.Fatalf(\"Expected %v but received %v instead. %s\",\n\t\t\texpected,\n\t\t\tactual,\n\t\t\tgetStackTrace())\n\t}\n}\n\nfunc expectStringSlicesEqual(t *testing.T, actual []string, expected []string) {\n\tif len(actual) != len(expected) {\n\t\tt.Fatalf(\"Expected %v (length %d), but received %v (length %d) instead. %s\", expected, len(expected), actual, len(actual), getStackTrace())\n\t}\n\tfor i := range actual {\n\t\tif actual[i] != expected[i] {\n\t\t\tt.Fatalf(\"Expected %v but received %v instead (first disagreement at position %d). %s\", expected, actual, i, getStackTrace())\n\t\t}\n\t}\n}\n\nfunc expectValidDigestString(t *testing.T, s string) {\n\tbd, err := FromString(s)\n\tif err != nil {\n\t\tt.Fatalf(\"Expected %s to produce a valid BlockDigest but instead got error: %v\", s, err)\n\t}\n\n\texpected := strings.ToLower(s)\n\n\tif expected != bd.String() {\n\t\tt.Fatalf(\"Expected %s to be returned by FromString(%s).String() but instead we received %s\", expected, s, bd.String())\n\t}\n}\n\nfunc expectInvalidDigestString(t *testing.T, s string) {\n\t_, err := FromString(s)\n\tif err == nil {\n\t\tt.Fatalf(\"Expected %s to be an invalid BlockDigest, but did not receive an error\", s)\n\t}\n}\n\nfunc expectBlockLocator(t *testing.T, actual BlockLocator, expected BlockLocator) {\n\texpectEqual(t, actual.Digest, expected.Digest)\n\texpectEqual(t, actual.Size, expected.Size)\n\texpectStringSlicesEqual(t, actual.Hints, expected.Hints)\n}\n\nfunc expectLocatorPatternMatch(t *testing.T, s string) {\n\tif !LocatorPattern.MatchString(s) {\n\t\tt.Fatalf(\"Expected \\\"%s\\\" to match locator pattern but it did not.\",\n\t\t\ts)\n\t}\n}\n\nfunc expectLocatorPatternFail(t *testing.T, s string) {\n\tif LocatorPattern.MatchString(s) {\n\t\tt.Fatalf(\"Expected \\\"%s\\\" to fail locator pattern but it passed.\",\n\t\t\ts)\n\t}\n}\n\nfunc TestValidDigestStrings(t *testing.T) {\n\texpectValidDigestString(t, \"01234567890123456789abcdefabcdef\")\n\texpectValidDigestString(t, \"01234567890123456789ABCDEFABCDEF\")\n\texpectValidDigestString(t, \"01234567890123456789AbCdEfaBcDeF\")\n}\n\nfunc TestInvalidDigestStrings(t *testing.T) {\n\texpectInvalidDigestString(t, \"01234567890123456789abcdefabcdeg\")\n\texpectInvalidDigestString(t, \"01234567890123456789abcdefabcde\")\n\texpectInvalidDigestString(t, \"01234567890123456789abcdefabcdefa\")\n\texpectInvalidDigestString(t, \"g1234567890123456789abcdefabcdef\")\n}\n\nfunc TestBlockDigestWorksAsMapKey(t *testing.T) {\n\tm := make(map[BlockDigest]int)\n\tbd, err := FromString(\"01234567890123456789abcdefabcdef\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error during FromString for block: %v\", err)\n\t}\n\tm[bd] = 5\n}\n\nfunc TestBlockDigestGetsPrettyPrintedByPrintf(t *testing.T) {\n\tinput := \"01234567890123456789abcdefabcdef\"\n\tfromString, err := FromString(input)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error during FromString: %v\", err)\n\t}\n\tprettyPrinted := fmt.Sprintf(\"%v\", fromString)\n\tif prettyPrinted != input {\n\t\tt.Fatalf(\"Expected blockDigest produced from \\\"%s\\\" to be printed as \"+\n\t\t\t\"\\\"%s\\\", but instead it was printed as %s\",\n\t\t\tinput, input, prettyPrinted)\n\t}\n}\n\nfunc TestBlockDigestGetsPrettyPrintedByPrintfInNestedStructs(t *testing.T) {\n\tinput, err := FromString(\"01234567890123456789abcdefabcdef\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error during FromString for block: %v\", err)\n\t}\n\tvalue := 42\n\tnested := struct {\n\t\t// Fun trivia fact: If this field was called \"digest\" instead of\n\t\t// \"Digest\", then it would not be exported and String() would\n\t\t// never get called on it and our output would look very\n\t\t// different.\n\t\tDigest BlockDigest\n\t\tvalue  int\n\t}{\n\t\tinput,\n\t\tvalue,\n\t}\n\tprettyPrinted := fmt.Sprintf(\"%+v\", nested)\n\texpected := fmt.Sprintf(\"{Digest:%s value:%d}\", input, value)\n\tif prettyPrinted != expected {\n\t\tt.Fatalf(\"Expected blockDigest produced from \\\"%s\\\" to be printed as \"+\n\t\t\t\"\\\"%s\\\", but instead it was printed as %s\",\n\t\t\tinput, expected, prettyPrinted)\n\t}\n}\n\nfunc TestLocatorPatternBasic(t *testing.T) {\n\texpectLocatorPatternMatch(t, \"12345678901234567890123456789012+12345\")\n\texpectLocatorPatternMatch(t, \"A2345678901234abcdefababdeffdfdf+12345\")\n\texpectLocatorPatternMatch(t, \"12345678901234567890123456789012+12345+A1\")\n\texpectLocatorPatternMatch(t,\n\t\t\"12345678901234567890123456789012+12345+A1+B123wxyz@_-\")\n\texpectLocatorPatternMatch(t,\n\t\t\"12345678901234567890123456789012+12345+A1+B123wxyz@_-+C@\")\n\texpectLocatorPatternMatch(t, \"12345678901234567890123456789012+12345+A\")\n\texpectLocatorPatternMatch(t, \"12345678901234567890123456789012+12345+A1+B\")\n\texpectLocatorPatternMatch(t, \"12345678901234567890123456789012+12345+A+B2\")\n\n\texpectLocatorPatternFail(t, \"12345678901234567890123456789012\")\n\texpectLocatorPatternFail(t, \"12345678901234567890123456789012+\")\n\texpectLocatorPatternFail(t, \"12345678901234567890123456789012+12345+\")\n\texpectLocatorPatternFail(t, \"1234567890123456789012345678901+12345\")\n\texpectLocatorPatternFail(t, \"123456789012345678901234567890123+12345\")\n\texpectLocatorPatternFail(t, \"g2345678901234abcdefababdeffdfdf+12345\")\n\texpectLocatorPatternFail(t, \"12345678901234567890123456789012+12345 \")\n\texpectLocatorPatternFail(t, \"12345678901234567890123456789012+12345+1\")\n\texpectLocatorPatternFail(t, \"12345678901234567890123456789012+12345+1A\")\n\texpectLocatorPatternFail(t, \"12345678901234567890123456789012+12345+a1\")\n\texpectLocatorPatternFail(t, \"12345678901234567890123456789012+12345+A1+\")\n\n}\n\nfunc TestParseBlockLocatorSimple(t *testing.T) {\n\tb, err := ParseBlockLocator(\"365f83f5f808896ec834c8b595288735+2310+K@qr1hi+Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error parsing block locator: %v\", err)\n\t}\n\td, err := FromString(\"365f83f5f808896ec834c8b595288735\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error during FromString for block: %v\", err)\n\t}\n\texpectBlockLocator(t, b, BlockLocator{Digest: d,\n\t\tSize: 2310,\n\t\tHints: []string{\"K@qr1hi\",\n\t\t\t\"Af0c9a66381f3b028677411926f0be1c6282fe67c@542b5ddf\"}})\n}\n"
  },
  {
    "path": "sdk/go/blockdigest/testing.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// Code used for testing only.\n\npackage blockdigest\n\n// MakeTestBlockDigest is used for testing with distinct BlockDigests\nfunc MakeTestBlockDigest(i int) BlockDigest {\n\treturn BlockDigest{L: uint64(i)}\n}\n\nfunc MakeTestDigestSpecifySize(i int, s int) DigestWithSize {\n\treturn DigestWithSize{Digest: BlockDigest{L: uint64(i)}, Size: uint32(s)}\n}\n\nfunc MakeTestDigestWithSize(i int) DigestWithSize {\n\treturn MakeTestDigestSpecifySize(i, i)\n}\n"
  },
  {
    "path": "sdk/go/config/dump.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"errors\"\n\t\"os\"\n\n\t\"github.com/ghodss/yaml\"\n)\n\n// DumpAndExit writes the given config to stdout as YAML. If an error\n// occurs, that error is returned. Otherwise, the program exits 0.\n//\n// Example:\n//\n//\tlog.Fatal(DumpAndExit(cfg))\nfunc DumpAndExit(cfg interface{}) error {\n\ty, err := yaml.Marshal(cfg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = os.Stdout.Write(y)\n\tif err != nil {\n\t\treturn err\n\t}\n\tos.Exit(0)\n\treturn errors.New(\"exit failed!?\")\n}\n"
  },
  {
    "path": "sdk/go/config/load.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage config\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\n\t\"github.com/ghodss/yaml\"\n)\n\n// LoadFile loads configuration from the file given by configPath and\n// decodes it into cfg.\n//\n// YAML and JSON formats are supported.\nfunc LoadFile(cfg interface{}, configPath string) error {\n\tbuf, err := ioutil.ReadFile(configPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = yaml.Unmarshal(buf, cfg)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error decoding config %q: %v\", configPath, err)\n\t}\n\treturn nil\n}\n\n// Dump returns a YAML representation of cfg.\nfunc Dump(cfg interface{}) ([]byte, error) {\n\treturn yaml.Marshal(cfg)\n}\n"
  },
  {
    "path": "sdk/go/ctxlog/log.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage ctxlog\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nvar (\n\tloggerCtxKey = new(int)\n\trootLogger   = logrus.New()\n)\n\nconst rfc3339NanoFixed = \"2006-01-02T15:04:05.000000000Z07:00\"\n\n// Context returns a new child context such that FromContext(child)\n// returns the given logger.\nfunc Context(ctx context.Context, logger logrus.FieldLogger) context.Context {\n\treturn context.WithValue(ctx, loggerCtxKey, logger)\n}\n\n// FromContext returns the logger suitable for the given context -- the one\n// attached by contextWithLogger() if applicable, otherwise the\n// top-level logger with no fields/values.\nfunc FromContext(ctx context.Context) logrus.FieldLogger {\n\tif ctx != nil {\n\t\tif logger, ok := ctx.Value(loggerCtxKey).(logrus.FieldLogger); ok {\n\t\t\treturn logger\n\t\t}\n\t}\n\treturn rootLogger.WithFields(nil)\n}\n\n// New returns a new logger with the indicated format and\n// level.\nfunc New(out io.Writer, format, level string) *logrus.Logger {\n\tlogger := logrus.New()\n\tlogger.Out = out\n\tsetFormat(logger, format)\n\tsetLevel(logger, level)\n\treturn logger\n}\n\nfunc TestLogger(c interface{ Log(...interface{}) }) *logrus.Logger {\n\tlogger := logrus.New()\n\tlogger.Out = &logWriter{c.Log}\n\tsetFormat(logger, \"text\")\n\tif d := os.Getenv(\"ARVADOS_DEBUG\"); d != \"0\" && d != \"\" {\n\t\tsetLevel(logger, \"debug\")\n\t} else {\n\t\tsetLevel(logger, \"info\")\n\t}\n\treturn logger\n}\n\n// LogWriter returns an io.Writer that writes to the given log func,\n// which is typically (*check.C).Log().\nfunc LogWriter(log func(...interface{})) io.Writer {\n\treturn &logWriter{log}\n}\n\n// SetLevel sets the current logging level. See logrus for level\n// names.\nfunc SetLevel(level string) {\n\tsetLevel(rootLogger, level)\n}\n\nfunc setLevel(logger *logrus.Logger, level string) {\n\tif level == \"\" {\n\t} else if lvl, err := logrus.ParseLevel(level); err != nil {\n\t\tlogrus.WithField(\"Level\", level).Fatal(\"unknown log level\")\n\t} else {\n\t\tlogger.Level = lvl\n\t}\n}\n\n// SetFormat sets the current logging format to \"json\" or \"text\".\nfunc SetFormat(format string) {\n\tsetFormat(rootLogger, format)\n}\n\nfunc setFormat(logger *logrus.Logger, format string) {\n\tswitch format {\n\tcase \"text\":\n\t\tlogger.Formatter = &logrus.TextFormatter{\n\t\t\tFullTimestamp:   true,\n\t\t\tTimestampFormat: rfc3339NanoFixed,\n\t\t}\n\tcase \"plain\":\n\t\tlogger.Formatter = &logrus.TextFormatter{\n\t\t\tDisableColors:    true,\n\t\t\tDisableTimestamp: true,\n\t\t}\n\tcase \"json\", \"\":\n\t\tlogger.Formatter = &logrus.JSONFormatter{\n\t\t\tTimestampFormat: rfc3339NanoFixed,\n\t\t}\n\tdefault:\n\t\tlogrus.WithField(\"Format\", format).Fatal(\"unknown log format\")\n\t}\n}\n\n// logWriter is an io.Writer that writes by calling a \"write log\"\n// function, typically (*check.C)Log().\ntype logWriter struct {\n\tlogfunc func(...interface{})\n}\n\nfunc (tl *logWriter) Write(buf []byte) (int, error) {\n\ttl.logfunc(string(bytes.TrimRight(buf, \"\\n\")))\n\treturn len(buf), nil\n}\n"
  },
  {
    "path": "sdk/go/dispatch/dispatch.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// Package dispatch is a helper library for building Arvados container\n// dispatchers.\npackage dispatch\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/container\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst (\n\tQueued    = arvados.ContainerStateQueued\n\tLocked    = arvados.ContainerStateLocked\n\tRunning   = arvados.ContainerStateRunning\n\tComplete  = arvados.ContainerStateComplete\n\tCancelled = arvados.ContainerStateCancelled\n)\n\ntype Logger interface {\n\tPrintf(string, ...interface{})\n\tWarnf(string, ...interface{})\n\tDebugf(string, ...interface{})\n}\n\n// Dispatcher struct\ntype Dispatcher struct {\n\tArv *arvadosclient.ArvadosClient\n\n\tLogger Logger\n\n\t// Batch size for container queries\n\tBatchSize int\n\n\t// Queue polling frequency\n\tPollPeriod time.Duration\n\n\t// Time to wait between successive attempts to run the same container\n\tMinRetryPeriod time.Duration\n\n\t// Func that implements the container lifecycle. Must be set\n\t// to a non-nil DispatchFunc before calling Run().\n\tRunContainer DispatchFunc\n\n\tauth     arvados.APIClientAuthorization\n\tmtx      sync.Mutex\n\ttrackers map[string]*runTracker\n\tthrottle throttle\n}\n\n// A DispatchFunc executes a container (if the container record is\n// Locked) or resume monitoring an already-running container, and wait\n// until that container exits.\n//\n// While the container runs, the DispatchFunc should listen for\n// updated container records on the provided channel. When the channel\n// closes, the DispatchFunc should stop the container if it's still\n// running, and return.\n//\n// The DispatchFunc should not return until the container is finished.\ntype DispatchFunc func(*Dispatcher, arvados.Container, <-chan arvados.Container) error\n\n// Run watches the API server's queue for containers that are either\n// ready to run and available to lock, or are already locked by this\n// dispatcher's token. When a new one appears, Run calls RunContainer\n// in a new goroutine.\nfunc (d *Dispatcher) Run(ctx context.Context) error {\n\tif d.Logger == nil {\n\t\td.Logger = logrus.StandardLogger()\n\t}\n\n\terr := d.Arv.Call(\"GET\", \"api_client_authorizations\", \"\", \"current\", nil, &d.auth)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting my token UUID: %v\", err)\n\t}\n\n\td.throttle.hold = d.MinRetryPeriod\n\n\tpoll := time.NewTicker(d.PollPeriod)\n\tdefer poll.Stop()\n\n\tif d.BatchSize == 0 {\n\t\td.BatchSize = 100\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-poll.C:\n\t\t\tbreak\n\t\tcase <-ctx.Done():\n\t\t\td.mtx.Lock()\n\t\t\tdefer d.mtx.Unlock()\n\t\t\tfor _, tracker := range d.trackers {\n\t\t\t\ttracker.close()\n\t\t\t}\n\t\t\treturn ctx.Err()\n\t\t}\n\n\t\ttodo := make(map[string]*runTracker)\n\t\td.mtx.Lock()\n\t\t// Make a copy of trackers\n\t\tfor uuid, tracker := range d.trackers {\n\t\t\ttodo[uuid] = tracker\n\t\t}\n\t\td.mtx.Unlock()\n\n\t\t// Containers I currently own (Locked/Running)\n\t\tquerySuccess := d.checkForUpdates([][]interface{}{\n\t\t\t{\"locked_by_uuid\", \"=\", d.auth.UUID}}, todo)\n\n\t\t// Containers I should try to dispatch\n\t\tquerySuccess = d.checkForUpdates([][]interface{}{\n\t\t\t{\"state\", \"=\", Queued},\n\t\t\t{\"priority\", \">\", \"0\"}}, todo) && querySuccess\n\n\t\tif !querySuccess {\n\t\t\t// There was an error in one of the previous queries,\n\t\t\t// we probably didn't get updates for all the\n\t\t\t// containers we should have.  Don't check them\n\t\t\t// individually because it may be expensive.\n\t\t\tcontinue\n\t\t}\n\n\t\t// Containers I know about but didn't fall into the\n\t\t// above two categories (probably Complete/Cancelled)\n\t\tvar missed []string\n\t\tfor uuid := range todo {\n\t\t\tmissed = append(missed, uuid)\n\t\t}\n\n\t\tfor len(missed) > 0 {\n\t\t\tvar batch []string\n\t\t\tif len(missed) > 20 {\n\t\t\t\tbatch = missed[0:20]\n\t\t\t\tmissed = missed[20:]\n\t\t\t} else {\n\t\t\t\tbatch = missed\n\t\t\t\tmissed = missed[0:0]\n\t\t\t}\n\t\t\tquerySuccess = d.checkForUpdates([][]interface{}{\n\t\t\t\t{\"uuid\", \"in\", batch}}, todo) && querySuccess\n\t\t}\n\n\t\tif !querySuccess {\n\t\t\t// There was an error in one of the previous queries, we probably\n\t\t\t// didn't see all the containers we should have, so don't shut down\n\t\t\t// the missed containers.\n\t\t\tcontinue\n\t\t}\n\n\t\t// Containers that I know about that didn't show up in any\n\t\t// query should be let go.\n\t\tfor uuid, tracker := range todo {\n\t\t\td.Logger.Printf(\"Container %q not returned by any query, stopping tracking.\", uuid)\n\t\t\ttracker.close()\n\t\t}\n\n\t}\n}\n\n// Start a runner in a new goroutine, and send the initial container\n// record to its updates channel.\nfunc (d *Dispatcher) start(c arvados.Container) *runTracker {\n\ttracker := &runTracker{\n\t\tupdates: make(chan arvados.Container, 1),\n\t\tlogger:  d.Logger,\n\t}\n\ttracker.updates <- c\n\tgo func() {\n\t\tfallbackState := Queued\n\t\terr := d.RunContainer(d, c, tracker.updates)\n\t\tif err != nil {\n\t\t\ttext := fmt.Sprintf(\"Error running container %s: %s\", c.UUID, err)\n\t\t\tif err, ok := err.(container.ConstraintsNotSatisfiableError); ok {\n\t\t\t\tfallbackState = Cancelled\n\t\t\t\tvar logBuf bytes.Buffer\n\t\t\t\tfmt.Fprintf(&logBuf, \"cannot run container %s: %s\\n\", c.UUID, err)\n\t\t\t\tif len(err.AvailableTypes) == 0 {\n\t\t\t\t\tfmt.Fprint(&logBuf, \"No instance types are configured.\\n\")\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprint(&logBuf, \"Available instance types:\\n\")\n\t\t\t\t\tfor _, t := range err.AvailableTypes {\n\t\t\t\t\t\tfmt.Fprintf(&logBuf,\n\t\t\t\t\t\t\t\"Type %q: %d VCPUs, %d RAM, %d Scratch, %f Price\\n\",\n\t\t\t\t\t\t\tt.Name, t.VCPUs, t.RAM, t.Scratch, t.Price)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\ttext = logBuf.String()\n\t\t\t}\n\t\t\td.Logger.Printf(\"%s\", text)\n\t\t\tlr := arvadosclient.Dict{\"log\": arvadosclient.Dict{\n\t\t\t\t\"object_uuid\": c.UUID,\n\t\t\t\t\"event_type\":  \"dispatch\",\n\t\t\t\t\"properties\":  map[string]string{\"text\": text}}}\n\t\t\td.Arv.Create(\"logs\", lr, nil)\n\t\t}\n\t\t// If checkListForUpdates() doesn't close the tracker\n\t\t// after 2 queue updates, try to move the container to\n\t\t// the fallback state, which should eventually work\n\t\t// and cause the tracker to close.\n\t\tupdates := 0\n\t\tfor upd := range tracker.updates {\n\t\t\tupdates++\n\t\t\tif upd.State == Locked || upd.State == Running {\n\t\t\t\t// Tracker didn't clean up before\n\t\t\t\t// returning -- or this is the first\n\t\t\t\t// update and it contains stale\n\t\t\t\t// information from before\n\t\t\t\t// RunContainer() returned.\n\t\t\t\tif updates < 2 {\n\t\t\t\t\t// Avoid generating confusing\n\t\t\t\t\t// logs / API calls in the\n\t\t\t\t\t// stale-info case.\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\td.Logger.Printf(\"container %s state is still %s, changing to %s\", c.UUID, upd.State, fallbackState)\n\t\t\t\td.UpdateState(c.UUID, fallbackState)\n\t\t\t}\n\t\t}\n\t}()\n\treturn tracker\n}\n\nfunc (d *Dispatcher) checkForUpdates(filters [][]interface{}, todo map[string]*runTracker) bool {\n\tvar countList arvados.ContainerList\n\tparams := arvadosclient.Dict{\n\t\t\"filters\": filters,\n\t\t\"count\":   \"exact\",\n\t\t\"limit\":   0,\n\t\t\"order\":   []string{\"priority desc\"}}\n\terr := d.Arv.List(\"containers\", params, &countList)\n\tif err != nil {\n\t\td.Logger.Warnf(\"error getting count of containers: %q\", err)\n\t\treturn false\n\t}\n\titemsAvailable := countList.ItemsAvailable\n\tparams = arvadosclient.Dict{\n\t\t\"filters\": filters,\n\t\t\"count\":   \"none\",\n\t\t\"limit\":   d.BatchSize,\n\t\t\"order\":   []string{\"priority desc\"}}\n\toffset := 0\n\tfor {\n\t\tparams[\"offset\"] = offset\n\n\t\t// This list variable must be a new one declared\n\t\t// inside the loop: otherwise, items in the API\n\t\t// response would get deep-merged into the items\n\t\t// loaded in previous iterations.\n\t\tvar list arvados.ContainerList\n\n\t\terr := d.Arv.List(\"containers\", params, &list)\n\t\tif err != nil {\n\t\t\td.Logger.Warnf(\"error getting list of containers: %q\", err)\n\t\t\treturn false\n\t\t}\n\t\td.checkListForUpdates(list.Items, todo)\n\t\toffset += len(list.Items)\n\t\tif len(list.Items) == 0 || itemsAvailable <= offset {\n\t\t\treturn true\n\t\t}\n\t}\n}\n\nfunc (d *Dispatcher) checkListForUpdates(containers []arvados.Container, todo map[string]*runTracker) {\n\td.mtx.Lock()\n\tdefer d.mtx.Unlock()\n\tif d.trackers == nil {\n\t\td.trackers = make(map[string]*runTracker)\n\t}\n\n\tfor _, c := range containers {\n\t\ttracker, alreadyTracking := d.trackers[c.UUID]\n\t\tdelete(todo, c.UUID)\n\n\t\tif c.LockedByUUID != \"\" && c.LockedByUUID != d.auth.UUID {\n\t\t\td.Logger.Debugf(\"ignoring %s locked by %s\", c.UUID, c.LockedByUUID)\n\t\t} else if alreadyTracking {\n\t\t\tswitch c.State {\n\t\t\tcase Queued, Cancelled, Complete:\n\t\t\t\td.Logger.Debugf(\"update has %s in state %s, closing tracker\", c.UUID, c.State)\n\t\t\t\ttracker.close()\n\t\t\t\tdelete(d.trackers, c.UUID)\n\t\t\tcase Locked, Running:\n\t\t\t\td.Logger.Debugf(\"update has %s in state %s, updating tracker\", c.UUID, c.State)\n\t\t\t\ttracker.update(c)\n\t\t\t}\n\t\t} else {\n\t\t\tswitch c.State {\n\t\t\tcase Queued:\n\t\t\t\tif !d.throttle.Check(c.UUID) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\terr := d.lock(c.UUID)\n\t\t\t\tif err != nil {\n\t\t\t\t\td.Logger.Warnf(\"error locking container %s: %s\", c.UUID, err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tc.State = Locked\n\t\t\t\td.trackers[c.UUID] = d.start(c)\n\t\t\tcase Locked, Running:\n\t\t\t\tif !d.throttle.Check(c.UUID) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\td.trackers[c.UUID] = d.start(c)\n\t\t\tcase Cancelled, Complete:\n\t\t\t\t// no-op (we already stopped monitoring)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// UpdateState makes an API call to change the state of a container.\nfunc (d *Dispatcher) UpdateState(uuid string, state arvados.ContainerState) error {\n\terr := d.Arv.Update(\"containers\", uuid,\n\t\tarvadosclient.Dict{\n\t\t\t\"container\": arvadosclient.Dict{\"state\": state},\n\t\t}, nil)\n\tif err != nil {\n\t\td.Logger.Warnf(\"error updating container %s to state %q: %s\", uuid, state, err)\n\t}\n\treturn err\n}\n\n// Lock makes the lock API call which updates the state of a container to Locked.\nfunc (d *Dispatcher) lock(uuid string) error {\n\treturn d.Arv.Call(\"POST\", \"containers\", uuid, \"lock\", nil, nil)\n}\n\n// Unlock makes the unlock API call which updates the state of a container to Queued.\nfunc (d *Dispatcher) Unlock(uuid string) error {\n\treturn d.Arv.Call(\"POST\", \"containers\", uuid, \"unlock\", nil, nil)\n}\n\n// TrackContainer ensures a tracker is running for the given UUID,\n// regardless of the current state of the container (except: if the\n// container is locked by a different dispatcher, a tracker will not\n// be started). If the container is not in Locked or Running state,\n// the new tracker will close down immediately.\n//\n// This allows the dispatcher to put its own RunContainer func into a\n// cleanup phase (for example, to kill local processes created by a\n// prevous dispatch process that are still running even though the\n// container state is final) without the risk of having multiple\n// goroutines monitoring the same UUID.\nfunc (d *Dispatcher) TrackContainer(uuid string) error {\n\tvar cntr arvados.Container\n\terr := d.Arv.Call(\"GET\", \"containers\", uuid, \"\", nil, &cntr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cntr.LockedByUUID != \"\" && cntr.LockedByUUID != d.auth.UUID {\n\t\treturn nil\n\t}\n\n\td.mtx.Lock()\n\tdefer d.mtx.Unlock()\n\tif _, alreadyTracking := d.trackers[uuid]; alreadyTracking {\n\t\treturn nil\n\t}\n\tif d.trackers == nil {\n\t\td.trackers = make(map[string]*runTracker)\n\t}\n\td.trackers[uuid] = d.start(cntr)\n\tswitch cntr.State {\n\tcase Queued, Cancelled, Complete:\n\t\td.trackers[uuid].close()\n\t}\n\treturn nil\n}\n\ntype runTracker struct {\n\tclosing bool\n\tupdates chan arvados.Container\n\tlogger  Logger\n}\n\nfunc (tracker *runTracker) close() {\n\tif !tracker.closing {\n\t\tclose(tracker.updates)\n\t}\n\ttracker.closing = true\n}\n\nfunc (tracker *runTracker) update(c arvados.Container) {\n\tif tracker.closing {\n\t\treturn\n\t}\n\tselect {\n\tcase <-tracker.updates:\n\t\ttracker.logger.Debugf(\"runner is handling updates slowly, discarded previous update for %s\", c.UUID)\n\tdefault:\n\t}\n\ttracker.updates <- c\n}\n"
  },
  {
    "path": "sdk/go/dispatch/dispatch_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage dispatch\n\nimport (\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t. \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nvar _ = Suite(&suite{})\n\ntype suite struct{}\n\nfunc (s *suite) TestTrackContainer(c *C) {\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, Equals, nil)\n\tarv.ApiToken = arvadostest.SystemRootToken\n\n\tdone := make(chan bool, 1)\n\ttime.AfterFunc(10*time.Second, func() { done <- false })\n\td := &Dispatcher{\n\t\tArv: arv,\n\t\tRunContainer: func(dsp *Dispatcher, ctr arvados.Container, status <-chan arvados.Container) error {\n\t\t\tfor ctr := range status {\n\t\t\t\tc.Logf(\"%#v\", ctr)\n\t\t\t}\n\t\t\tdone <- true\n\t\t\treturn nil\n\t\t},\n\t}\n\td.TrackContainer(arvadostest.QueuedContainerUUID)\n\tc.Assert(<-done, Equals, true)\n}\n"
  },
  {
    "path": "sdk/go/dispatch/throttle.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage dispatch\n\nimport (\n\t\"sync\"\n\t\"time\"\n)\n\ntype throttleEnt struct {\n\tlast time.Time // last attempt that was allowed\n}\n\ntype throttle struct {\n\thold      time.Duration\n\tseen      map[string]*throttleEnt\n\tupdated   sync.Cond\n\tsetupOnce sync.Once\n\tmtx       sync.Mutex\n}\n\n// Check checks whether there have been too many recent attempts with\n// the given uuid, and returns true if it's OK to attempt [again] now.\nfunc (t *throttle) Check(uuid string) bool {\n\tif t.hold == 0 {\n\t\treturn true\n\t}\n\tt.setupOnce.Do(t.setup)\n\tt.mtx.Lock()\n\tdefer t.updated.Broadcast()\n\tdefer t.mtx.Unlock()\n\tent, ok := t.seen[uuid]\n\tif !ok {\n\t\tt.seen[uuid] = &throttleEnt{last: time.Now()}\n\t\treturn true\n\t}\n\tif time.Since(ent.last) < t.hold {\n\t\treturn false\n\t}\n\tent.last = time.Now()\n\treturn true\n}\n\nfunc (t *throttle) setup() {\n\tt.seen = make(map[string]*throttleEnt)\n\tt.updated.L = &t.mtx\n\tgo func() {\n\t\tfor range time.NewTicker(t.hold).C {\n\t\t\tt.mtx.Lock()\n\t\t\tfor uuid, ent := range t.seen {\n\t\t\t\tif time.Since(ent.last) >= t.hold {\n\t\t\t\t\tdelete(t.seen, uuid)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// don't bother cleaning again until the next update\n\t\t\tt.updated.Wait()\n\t\t\tt.mtx.Unlock()\n\t\t}\n\t}()\n}\n"
  },
  {
    "path": "sdk/go/dispatch/throttle_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage dispatch\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&ThrottleTestSuite{})\n\ntype ThrottleTestSuite struct{}\n\nfunc (*ThrottleTestSuite) TestThrottle(c *check.C) {\n\tuuid := \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\"\n\tt0 := throttle{}\n\tc.Check(t0.Check(uuid), check.Equals, true)\n\tc.Check(t0.Check(uuid), check.Equals, true)\n\n\ttNs := throttle{hold: time.Nanosecond}\n\tc.Check(tNs.Check(uuid), check.Equals, true)\n\ttime.Sleep(time.Microsecond)\n\tc.Check(tNs.Check(uuid), check.Equals, true)\n\n\ttMin := throttle{hold: time.Minute}\n\tc.Check(tMin.Check(uuid), check.Equals, true)\n\tc.Check(tMin.Check(uuid), check.Equals, false)\n\tc.Check(tMin.Check(uuid), check.Equals, false)\n\ttMin.seen[uuid].last = time.Now().Add(-time.Hour)\n\tc.Check(tMin.Check(uuid), check.Equals, true)\n\tc.Check(tMin.Check(uuid), check.Equals, false)\n}\n"
  },
  {
    "path": "sdk/go/health/aggregator.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage health\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/tls\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst (\n\tdefaultTimeout = arvados.Duration(2 * time.Second)\n\tmaxClockSkew   = time.Minute\n)\n\n// Aggregator implements service.Handler. It handles \"GET /_health/all\"\n// by checking the health of all configured services on the cluster\n// and responding 200 if everything is healthy.\ntype Aggregator struct {\n\tsetupOnce  sync.Once\n\thttpClient *http.Client\n\ttimeout    arvados.Duration\n\n\tCluster *arvados.Cluster\n\n\t// If non-nil, Log is called after handling each request.\n\tLog func(*http.Request, error)\n\n\t// If non-nil, report clock skew on each health-check.\n\tMetricClockSkew prometheus.Gauge\n}\n\nfunc (agg *Aggregator) setup() {\n\tagg.httpClient = &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: agg.Cluster.TLS.Insecure,\n\t\t\t},\n\t\t},\n\t}\n\tif agg.timeout == 0 {\n\t\t// this is always the case, except in the test suite\n\t\tagg.timeout = defaultTimeout\n\t}\n}\n\nfunc (agg *Aggregator) CheckHealth() error {\n\treturn nil\n}\n\nfunc (agg *Aggregator) Done() <-chan struct{} {\n\treturn nil\n}\n\nfunc (agg *Aggregator) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tagg.setupOnce.Do(agg.setup)\n\tsendErr := func(statusCode int, err error) {\n\t\tresp.WriteHeader(statusCode)\n\t\tjson.NewEncoder(resp).Encode(map[string]string{\"error\": err.Error()})\n\t\tif agg.Log != nil {\n\t\t\tagg.Log(req, err)\n\t\t}\n\t}\n\n\tresp.Header().Set(\"Content-Type\", \"application/json\")\n\n\tif !agg.checkAuth(req) {\n\t\tsendErr(http.StatusUnauthorized, errUnauthorized)\n\t\treturn\n\t}\n\tif req.URL.Path == \"/_health/all\" {\n\t\tjson.NewEncoder(resp).Encode(agg.ClusterHealth())\n\t} else if req.URL.Path == \"/_health/ping\" {\n\t\tresp.Write(healthyBody)\n\t} else {\n\t\tsendErr(http.StatusNotFound, errNotFound)\n\t\treturn\n\t}\n\tif agg.Log != nil {\n\t\tagg.Log(req, nil)\n\t}\n}\n\ntype ClusterHealthResponse struct {\n\t// \"OK\" if all needed services are OK, otherwise \"ERROR\".\n\tHealth string\n\n\t// An entry for each known health check of each known instance\n\t// of each needed component: \"instance of service S on node N\n\t// reports health-check C is OK.\"\n\tChecks map[string]CheckResult\n\n\t// An entry for each service type: \"service S is OK.\" This\n\t// exposes problems that can't be expressed in Checks, like\n\t// \"service S is needed, but isn't configured to run\n\t// anywhere.\"\n\tServices map[arvados.ServiceName]ServiceHealth\n\n\t// Difference between min/max timestamps in individual\n\t// health-check responses.\n\tClockSkew arvados.Duration\n\n\tErrors []string\n}\n\ntype CheckResult struct {\n\tHealth         string\n\tError          string                 `json:\",omitempty\"`\n\tHTTPStatusCode int                    `json:\",omitempty\"`\n\tResponse       map[string]interface{} `json:\",omitempty\"`\n\tResponseTime   json.Number\n\tClockTime      time.Time\n\tServer         string // \"Server\" header in http response\n\tMetrics\n\trespTime time.Duration\n}\n\ntype Metrics struct {\n\tConfigSourceTimestamp time.Time\n\tConfigSourceSHA256    string\n\tVersion               string\n}\n\ntype ServiceHealth struct {\n\tHealth string // \"OK\", \"ERROR\", or \"SKIP\"\n\tN      int\n}\n\nfunc (agg *Aggregator) ClusterHealth() ClusterHealthResponse {\n\tagg.setupOnce.Do(agg.setup)\n\tresp := ClusterHealthResponse{\n\t\tHealth:   \"OK\",\n\t\tChecks:   make(map[string]CheckResult),\n\t\tServices: make(map[arvados.ServiceName]ServiceHealth),\n\t}\n\n\tmtx := sync.Mutex{}\n\twg := sync.WaitGroup{}\n\tfor svcName, svc := range agg.Cluster.Services.Map() {\n\t\t// Ensure svc is listed in resp.Services.\n\t\tmtx.Lock()\n\t\tif _, ok := resp.Services[svcName]; !ok {\n\t\t\tresp.Services[svcName] = ServiceHealth{Health: \"MISSING\"}\n\t\t}\n\t\tmtx.Unlock()\n\n\t\tcheckURLs := map[arvados.URL]bool{}\n\t\tfor addr := range svc.InternalURLs {\n\t\t\tcheckURLs[addr] = true\n\t\t}\n\t\tif len(checkURLs) == 0 && svc.ExternalURL.Host != \"\" {\n\t\t\tcheckURLs[svc.ExternalURL] = true\n\t\t}\n\t\tfor addr := range checkURLs {\n\t\t\twg.Add(1)\n\t\t\tgo func(svcName arvados.ServiceName, addr arvados.URL) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tvar result CheckResult\n\t\t\t\tpingURL, err := agg.pingURL(addr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tresult = CheckResult{\n\t\t\t\t\t\tHealth: \"ERROR\",\n\t\t\t\t\t\tError:  err.Error(),\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tresult = agg.ping(pingURL)\n\t\t\t\t\tif result.Health != \"SKIP\" {\n\t\t\t\t\t\tm, err := agg.metrics(pingURL)\n\t\t\t\t\t\tif err != nil && result.Error == \"\" {\n\t\t\t\t\t\t\tresult.Error = \"metrics: \" + err.Error()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tresult.Metrics = m\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tmtx.Lock()\n\t\t\t\tdefer mtx.Unlock()\n\t\t\t\tresp.Checks[fmt.Sprintf(\"%s+%s\", svcName, pingURL)] = result\n\t\t\t\tif result.Health == \"OK\" || result.Health == \"SKIP\" {\n\t\t\t\t\th := resp.Services[svcName]\n\t\t\t\t\th.N++\n\t\t\t\t\tif result.Health == \"OK\" || h.N == 1 {\n\t\t\t\t\t\t// \"\" => \"SKIP\" or \"OK\"\n\t\t\t\t\t\t// \"SKIP\" => \"OK\"\n\t\t\t\t\t\th.Health = result.Health\n\t\t\t\t\t}\n\t\t\t\t\tresp.Services[svcName] = h\n\t\t\t\t} else {\n\t\t\t\t\tresp.Health = \"ERROR\"\n\t\t\t\t\tresp.Errors = append(resp.Errors, fmt.Sprintf(\"%s: %s: %s\", svcName, result.Health, result.Error))\n\t\t\t\t}\n\t\t\t}(svcName, addr)\n\t\t}\n\t}\n\twg.Wait()\n\n\t// Report ERROR if a needed service didn't fail any checks\n\t// merely because it isn't configured to run anywhere.\n\tfor svcName, sh := range resp.Services {\n\t\tswitch svcName {\n\t\tcase arvados.ServiceNameDispatchCloud,\n\t\t\tarvados.ServiceNameDispatchLSF,\n\t\t\tarvados.ServiceNameDispatchSLURM:\n\t\t\t// ok to not run any given dispatcher\n\t\tcase arvados.ServiceNameHealth,\n\t\t\tarvados.ServiceNameWorkbench1,\n\t\t\tarvados.ServiceNameWorkbench2:\n\t\t\t// typically doesn't have InternalURLs in config\n\t\tdefault:\n\t\t\tif sh.Health != \"OK\" && sh.Health != \"SKIP\" {\n\t\t\t\tresp.Health = \"ERROR\"\n\t\t\t\tresp.Errors = append(resp.Errors, fmt.Sprintf(\"%s: %s: no InternalURLs configured\", svcName, sh.Health))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check for clock skew between hosts\n\tvar maxResponseTime time.Duration\n\tvar clockMin, clockMax time.Time\n\tfor _, result := range resp.Checks {\n\t\tif result.ClockTime.IsZero() {\n\t\t\tcontinue\n\t\t}\n\t\tif clockMin.IsZero() || result.ClockTime.Before(clockMin) {\n\t\t\tclockMin = result.ClockTime\n\t\t}\n\t\tif result.ClockTime.After(clockMax) {\n\t\t\tclockMax = result.ClockTime\n\t\t}\n\t\tif result.respTime > maxResponseTime {\n\t\t\tmaxResponseTime = result.respTime\n\t\t}\n\t}\n\tskew := clockMax.Sub(clockMin)\n\tresp.ClockSkew = arvados.Duration(skew)\n\tif skew > maxClockSkew+maxResponseTime {\n\t\tmsg := fmt.Sprintf(\"clock skew detected: maximum timestamp spread is %s (exceeds warning threshold of %s)\", resp.ClockSkew, arvados.Duration(maxClockSkew))\n\t\tresp.Errors = append(resp.Errors, msg)\n\t\tresp.Health = \"ERROR\"\n\t}\n\tif agg.MetricClockSkew != nil {\n\t\tagg.MetricClockSkew.Set(skew.Seconds())\n\t}\n\n\t// Check for mismatched config files\n\tvar newest Metrics\n\tfor _, result := range resp.Checks {\n\t\tif result.Metrics.ConfigSourceTimestamp.After(newest.ConfigSourceTimestamp) {\n\t\t\tnewest = result.Metrics\n\t\t}\n\t}\n\tvar mismatches []string\n\tfor target, result := range resp.Checks {\n\t\tif hash := result.Metrics.ConfigSourceSHA256; hash != \"\" && hash != newest.ConfigSourceSHA256 {\n\t\t\tmismatches = append(mismatches, target)\n\t\t}\n\t}\n\tfor _, target := range mismatches {\n\t\tmsg := fmt.Sprintf(\"outdated config: %s: config file (sha256 %s) does not match latest version with timestamp %s\",\n\t\t\tstrings.TrimSuffix(target, \"/_health/ping\"),\n\t\t\tresp.Checks[target].Metrics.ConfigSourceSHA256,\n\t\t\tnewest.ConfigSourceTimestamp.Format(time.RFC3339))\n\t\tresp.Errors = append(resp.Errors, msg)\n\t\tresp.Health = \"ERROR\"\n\t}\n\n\t// Check for services running a different version than we are.\n\tfor target, result := range resp.Checks {\n\t\tif result.Metrics.Version != \"\" && !sameVersion(result.Metrics.Version, cmd.Version.String()) {\n\t\t\tmsg := fmt.Sprintf(\"version mismatch: %s is running %s -- expected %s\",\n\t\t\t\tstrings.TrimSuffix(target, \"/_health/ping\"),\n\t\t\t\tresult.Metrics.Version,\n\t\t\t\tcmd.Version.String())\n\t\t\tresp.Errors = append(resp.Errors, msg)\n\t\t\tresp.Health = \"ERROR\"\n\t\t}\n\t}\n\treturn resp\n}\n\nfunc (agg *Aggregator) pingURL(svcURL arvados.URL) (*url.URL, error) {\n\tbase := url.URL(svcURL)\n\treturn base.Parse(\"/_health/ping\")\n}\n\nfunc (agg *Aggregator) ping(target *url.URL) (result CheckResult) {\n\tt0 := time.Now()\n\tdefer func() {\n\t\tresult.respTime = time.Since(t0)\n\t\tresult.ResponseTime = json.Number(fmt.Sprintf(\"%.6f\", result.respTime.Seconds()))\n\t}()\n\tresult.Health = \"ERROR\"\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(agg.timeout))\n\tdefer cancel()\n\treq, err := http.NewRequestWithContext(ctx, \"GET\", target.String(), nil)\n\tif err != nil {\n\t\tresult.Error = err.Error()\n\t\treturn\n\t}\n\treq.Header.Set(\"Authorization\", \"Bearer \"+agg.Cluster.ManagementToken)\n\n\t// Avoid workbench1's redirect-http-to-https feature\n\treq.Header.Set(\"X-Forwarded-Proto\", \"https\")\n\n\tresp, err := agg.httpClient.Do(req)\n\tif urlerr, ok := err.(*url.Error); ok {\n\t\tif neterr, ok := urlerr.Err.(*net.OpError); ok && isLocalHost(target.Hostname()) {\n\t\t\tresult = CheckResult{\n\t\t\t\tHealth: \"SKIP\",\n\t\t\t\tError:  neterr.Error(),\n\t\t\t}\n\t\t\terr = nil\n\t\t\treturn\n\t\t}\n\t}\n\tif err != nil {\n\t\tresult.Error = err.Error()\n\t\treturn\n\t}\n\tresult.HTTPStatusCode = resp.StatusCode\n\terr = json.NewDecoder(resp.Body).Decode(&result.Response)\n\tif err != nil {\n\t\tresult.Error = fmt.Sprintf(\"cannot decode response: %s\", err)\n\t} else if resp.StatusCode != http.StatusOK {\n\t\tresult.Error = fmt.Sprintf(\"HTTP %d %s\", resp.StatusCode, resp.Status)\n\t} else if h, _ := result.Response[\"health\"].(string); h != \"OK\" {\n\t\tif e, ok := result.Response[\"error\"].(string); ok && e != \"\" {\n\t\t\tresult.Error = e\n\t\t\treturn\n\t\t} else {\n\t\t\tresult.Error = fmt.Sprintf(\"health=%q in ping response\", h)\n\t\t\treturn\n\t\t}\n\t}\n\tresult.Health = \"OK\"\n\tresult.ClockTime, _ = time.Parse(time.RFC1123, resp.Header.Get(\"Date\"))\n\tresult.Server = resp.Header.Get(\"Server\")\n\treturn\n}\n\nvar (\n\treConfigMetric  = regexp.MustCompile(`arvados_config_source_timestamp_seconds{sha256=\"([0-9a-f]+)\"} (\\d[\\d\\.e\\+]+)`)\n\treVersionMetric = regexp.MustCompile(`arvados_version_running{version=\"([^\"]+)\"} 1`)\n)\n\nfunc (agg *Aggregator) metrics(pingURL *url.URL) (result Metrics, err error) {\n\tmetricsURL, err := pingURL.Parse(\"/metrics\")\n\tif err != nil {\n\t\treturn\n\t}\n\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(agg.timeout))\n\tdefer cancel()\n\treq, err := http.NewRequestWithContext(ctx, \"GET\", metricsURL.String(), nil)\n\tif err != nil {\n\t\treturn\n\t}\n\treq.Header.Set(\"Authorization\", \"Bearer \"+agg.Cluster.ManagementToken)\n\n\t// Avoid workbench1's redirect-http-to-https feature\n\treq.Header.Set(\"X-Forwarded-Proto\", \"https\")\n\n\tresp, err := agg.httpClient.Do(req)\n\tif err != nil {\n\t\treturn\n\t} else if resp.StatusCode != http.StatusOK {\n\t\terr = fmt.Errorf(\"%s: HTTP %d %s\", metricsURL.String(), resp.StatusCode, resp.Status)\n\t\treturn\n\t}\n\n\tscanner := bufio.NewScanner(resp.Body)\n\tfor scanner.Scan() {\n\t\tif m := reConfigMetric.FindSubmatch(scanner.Bytes()); len(m) == 3 && len(m[1]) > 0 {\n\t\t\tresult.ConfigSourceSHA256 = string(m[1])\n\t\t\tunixtime, _ := strconv.ParseFloat(string(m[2]), 64)\n\t\t\tresult.ConfigSourceTimestamp = time.UnixMicro(int64(unixtime * 1e6))\n\t\t} else if m = reVersionMetric.FindSubmatch(scanner.Bytes()); len(m) == 2 && len(m[1]) > 0 {\n\t\t\tresult.Version = string(m[1])\n\t\t}\n\t}\n\tif err = scanner.Err(); err != nil {\n\t\terr = fmt.Errorf(\"error parsing response from %s: %w\", metricsURL.String(), err)\n\t\treturn\n\t}\n\treturn\n}\n\n// Test whether host is an easily recognizable loopback address:\n// 0.0.0.0, 127.x.x.x, ::1, or localhost.\nfunc isLocalHost(host string) bool {\n\tip := net.ParseIP(host)\n\treturn ip.IsLoopback() || bytes.Equal(ip.To4(), []byte{0, 0, 0, 0}) || strings.EqualFold(host, \"localhost\")\n}\n\nfunc (agg *Aggregator) checkAuth(req *http.Request) bool {\n\tcreds := auth.CredentialsFromRequest(req)\n\tfor _, token := range creds.Tokens {\n\t\tif token != \"\" && token == agg.Cluster.ManagementToken {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nvar errSilent = errors.New(\"\")\n\nvar CheckCommand cmd.Handler = checkCommand{}\n\ntype checkCommand struct{}\n\nfunc (ccmd checkCommand) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tlogger := ctxlog.New(stderr, \"json\", \"info\")\n\tctx := ctxlog.Context(context.Background(), logger)\n\terr := ccmd.run(ctx, prog, args, stdin, stdout, stderr)\n\tif err != nil {\n\t\tif err != errSilent {\n\t\t\tfmt.Fprintln(stderr, err.Error())\n\t\t}\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc (ccmd checkCommand) run(ctx context.Context, prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) error {\n\tflags := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tflags.SetOutput(stderr)\n\tloader := config.NewLoader(stdin, ctxlog.New(stderr, \"text\", \"info\"))\n\tloader.SetupFlags(flags)\n\tversionFlag := flags.Bool(\"version\", false, \"Write version information to stdout and exit 0\")\n\ttimeout := flags.Duration(\"timeout\", defaultTimeout.Duration(), \"Maximum time to wait for health responses\")\n\tquiet := flags.Bool(\"quiet\", false, \"Silent on success (suppress 'health check OK' message on stderr)\")\n\toutputYAML := flags.Bool(\"yaml\", false, \"Output full health report in YAML format (default mode prints 'health check OK' or plain text errors)\")\n\tif ok, _ := cmd.ParseFlags(flags, prog, args, \"\", stderr); !ok {\n\t\t// cmd.ParseFlags already reported the error\n\t\treturn errSilent\n\t} else if *versionFlag {\n\t\tcmd.Version.RunCommand(prog, args, stdin, stdout, stderr)\n\t\treturn nil\n\t}\n\tcfg, err := loader.Load()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tlogger := ctxlog.New(stderr, cluster.SystemLogs.Format, cluster.SystemLogs.LogLevel).WithFields(logrus.Fields{\n\t\t\"ClusterID\": cluster.ClusterID,\n\t})\n\tctx = ctxlog.Context(ctx, logger)\n\tagg := Aggregator{Cluster: cluster, timeout: arvados.Duration(*timeout)}\n\tresp := agg.ClusterHealth()\n\tif *outputYAML {\n\t\ty, err := yaml.Marshal(resp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstdout.Write(y)\n\t\tif resp.Health != \"OK\" {\n\t\t\treturn errSilent\n\t\t}\n\t\treturn nil\n\t}\n\tif resp.Health != \"OK\" {\n\t\tfor _, msg := range resp.Errors {\n\t\t\tfmt.Fprintln(stderr, msg)\n\t\t}\n\t\tfmt.Fprintln(stderr, \"health check failed\")\n\t\treturn errSilent\n\t}\n\tif !*quiet {\n\t\tfmt.Fprintln(stderr, \"health check OK\")\n\t}\n\treturn nil\n}\n\nvar (\n\treGoVersion  = regexp.MustCompile(` \\(go\\d+([\\d.])*\\)$`)\n\treDevVersion = regexp.MustCompile(`~dev\\d+$`)\n)\n\n// Return true if either a==b or the only difference is that one has a\n// \" (go1.2.3)\" suffix and the other does not.\n//\n// This allows us to recognize a non-Go (rails) service as the same\n// version as a Go service.\nfunc sameVersion(a, b string) bool {\n\t// Strip \" (go1.2.3)\" suffix\n\ta = reGoVersion.ReplaceAllLiteralString(a, \"\")\n\tb = reGoVersion.ReplaceAllLiteralString(b, \"\")\n\tanodev := reDevVersion.ReplaceAllLiteralString(a, \"\")\n\tbnodev := reDevVersion.ReplaceAllLiteralString(b, \"\")\n\treturn anodev == bnodev && (a == anodev) == (b == bnodev)\n}\n"
  },
  {
    "path": "sdk/go/health/aggregator_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage health\n\nimport (\n\t\"bytes\"\n\t\"crypto/sha256\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/ghodss/yaml\"\n\t\"gopkg.in/check.v1\"\n)\n\ntype AggregatorSuite struct {\n\thandler *Aggregator\n\treq     *http.Request\n\tresp    *httptest.ResponseRecorder\n}\n\n// Gocheck boilerplate\nvar _ = check.Suite(&AggregatorSuite{})\n\nfunc (s *AggregatorSuite) TestInterface(c *check.C) {\n\tvar _ http.Handler = &Aggregator{}\n}\n\nfunc (s *AggregatorSuite) SetUpTest(c *check.C) {\n\tldr := config.NewLoader(bytes.NewBufferString(`Clusters: {zzzzz: {}}`), ctxlog.TestLogger(c))\n\tldr.Path = \"-\"\n\tcfg, err := ldr.Load()\n\tc.Assert(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\tcluster.ManagementToken = arvadostest.ManagementToken\n\tcluster.SystemRootToken = arvadostest.SystemRootToken\n\tcluster.Collections.BlobSigningKey = arvadostest.BlobSigningKey\n\tcluster.Volumes[\"z\"] = arvados.Volume{StorageClasses: map[string]bool{\"default\": true}}\n\tcluster.Containers.LocalKeepBlobBuffersPerVCPU = 0\n\ts.handler = &Aggregator{Cluster: cluster}\n\ts.req = httptest.NewRequest(\"GET\", \"/_health/all\", nil)\n\ts.req.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ManagementToken)\n\ts.resp = httptest.NewRecorder()\n}\n\nfunc (s *AggregatorSuite) TestSameVersion(c *check.C) {\n\tc.Check(sameVersion(\"2.8.0~dev20240610194320 (go1.21.10)\", \"2.8.1~dev20240610194320\"), check.Equals, false)\n\tc.Check(sameVersion(\"2.8.0~dev20240610194320 (go1.21.10)\", \"2.8.1~dev20240610194320 (go1.21.10)\"), check.Equals, false)\n\tc.Check(sameVersion(\"2.8.0~dev20240610194320 (go1.21.10)\", \"2.8.1~dev20240610194320 (go1.21.9)\"), check.Equals, false)\n\tc.Check(sameVersion(\"2.8.0~dev20240610194320 (go1.21.10)\", \"2.8.0~dev20240610194320 (go1.21.9)\"), check.Equals, true)\n\tc.Check(sameVersion(\"2.8.0~dev20240610194320 (go1.21.10)\", \"2.8.0~dev20240611211146 (go1.21.10)\"), check.Equals, true)\n\tc.Check(sameVersion(\"2.8.0~dev20240610194320 (go1.21.10)\", \"2.8.0~dev20240611211146\"), check.Equals, true)\n\tc.Check(sameVersion(\"2.8.0~dev20240610194320 (go1.21.10)\", \"2.8.0\"), check.Equals, false)\n\tc.Check(sameVersion(\"2.8.0~dev20240610194320\", \"2.8.0\"), check.Equals, false)\n\tc.Check(sameVersion(\"2.8.0\", \"2.8.0\"), check.Equals, true)\n\tc.Check(sameVersion(\"2.8.0\", \"2.8.1\"), check.Equals, false)\n}\n\nfunc (s *AggregatorSuite) TestNoAuth(c *check.C) {\n\ts.req.Header.Del(\"Authorization\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\ts.checkError(c)\n\tc.Check(s.resp.Code, check.Equals, http.StatusUnauthorized)\n}\n\nfunc (s *AggregatorSuite) TestBadAuth(c *check.C) {\n\ts.req.Header.Set(\"Authorization\", \"xyzzy\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\ts.checkError(c)\n\tc.Check(s.resp.Code, check.Equals, http.StatusUnauthorized)\n}\n\nfunc (s *AggregatorSuite) TestNoServicesConfigured(c *check.C) {\n\ts.handler.ServeHTTP(s.resp, s.req)\n\ts.checkUnhealthy(c)\n}\n\nfunc (s *AggregatorSuite) stubServer(handler http.Handler) (*httptest.Server, string) {\n\tsrv := httptest.NewServer(handler)\n\tvar port string\n\tif parts := strings.Split(srv.URL, \":\"); len(parts) < 3 {\n\t\tpanic(srv.URL)\n\t} else {\n\t\tport = parts[len(parts)-1]\n\t}\n\treturn srv, \":\" + port\n}\n\nfunc (s *AggregatorSuite) TestUnhealthy(c *check.C) {\n\tsrv, listen := s.stubServer(&unhealthyHandler{})\n\tdefer srv.Close()\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepstore, \"http://localhost\"+listen+\"/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\ts.checkUnhealthy(c)\n}\n\nfunc (s *AggregatorSuite) TestHealthy(c *check.C) {\n\tsrv, listen := s.stubServer(&healthyHandler{})\n\tdefer srv.Close()\n\ts.setAllServiceURLs(listen)\n\ts.handler.ServeHTTP(s.resp, s.req)\n\tresp := s.checkOK(c)\n\tsvc := \"keepstore+http://localhost\" + listen + \"/_health/ping\"\n\tc.Logf(\"%#v\", resp)\n\tep := resp.Checks[svc]\n\tc.Check(ep.Health, check.Equals, \"OK\")\n\tc.Check(ep.HTTPStatusCode, check.Equals, 200)\n}\n\nfunc (s *AggregatorSuite) TestHealthyAndUnhealthy(c *check.C) {\n\tsrvH, listenH := s.stubServer(&healthyHandler{})\n\tdefer srvH.Close()\n\tsrvU, listenU := s.stubServer(&unhealthyHandler{})\n\tdefer srvU.Close()\n\ts.setAllServiceURLs(listenH)\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepstore, \"http://localhost\"+listenH+\"/\", \"http://127.0.0.1\"+listenU+\"/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\tresp := s.checkUnhealthy(c)\n\tep := resp.Checks[\"keepstore+http://localhost\"+listenH+\"/_health/ping\"]\n\tc.Check(ep.Health, check.Equals, \"OK\")\n\tc.Check(ep.HTTPStatusCode, check.Equals, 200)\n\tep = resp.Checks[\"keepstore+http://127.0.0.1\"+listenU+\"/_health/ping\"]\n\tc.Check(ep.Health, check.Equals, \"ERROR\")\n\tc.Check(ep.HTTPStatusCode, check.Equals, 200)\n\tc.Logf(\"%#v\", ep)\n}\n\n// If an InternalURL host is 0.0.0.0, localhost, 127/8, or ::1 and\n// nothing is listening there, don't fail the health check -- instead,\n// assume the relevant component just isn't installed/enabled on this\n// node, but does work when contacted through ExternalURL.\nfunc (s *AggregatorSuite) TestUnreachableLoopbackPort(c *check.C) {\n\tsrvH, listenH := s.stubServer(&healthyHandler{})\n\tdefer srvH.Close()\n\ts.setAllServiceURLs(listenH)\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepproxy, \"http://localhost:9/\")\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.Workbench1, \"http://0.0.0.0:9/\")\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepbalance, \"http://127.0.0.127:9/\")\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.WebDAV, \"http://[::1]:9/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\ts.checkOK(c)\n\n\t// If a non-loopback address is unreachable, that's still a\n\t// fail.\n\ts.resp = httptest.NewRecorder()\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.WebDAV, \"http://172.31.255.254:9/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\ts.checkUnhealthy(c)\n}\n\nfunc (s *AggregatorSuite) TestIsLocalHost(c *check.C) {\n\tc.Check(isLocalHost(\"Localhost\"), check.Equals, true)\n\tc.Check(isLocalHost(\"localhost\"), check.Equals, true)\n\tc.Check(isLocalHost(\"127.0.0.1\"), check.Equals, true)\n\tc.Check(isLocalHost(\"127.0.0.127\"), check.Equals, true)\n\tc.Check(isLocalHost(\"127.1.2.7\"), check.Equals, true)\n\tc.Check(isLocalHost(\"0.0.0.0\"), check.Equals, true)\n\tc.Check(isLocalHost(\"::1\"), check.Equals, true)\n\tc.Check(isLocalHost(\"1.2.3.4\"), check.Equals, false)\n\tc.Check(isLocalHost(\"1::1\"), check.Equals, false)\n\tc.Check(isLocalHost(\"example.com\"), check.Equals, false)\n\tc.Check(isLocalHost(\"127.0.0\"), check.Equals, false)\n\tc.Check(isLocalHost(\"\"), check.Equals, false)\n}\n\nfunc (s *AggregatorSuite) TestConfigMismatch(c *check.C) {\n\t// time1/hash1: current config\n\ttime1 := time.Now().Add(time.Second - time.Minute - time.Hour)\n\thash1 := fmt.Sprintf(\"%x\", sha256.Sum256([]byte(`Clusters: {zzzzz: {SystemRootToken: xyzzy}}`)))\n\t// time2/hash2: old config\n\ttime2 := time1.Add(-time.Hour)\n\thash2 := fmt.Sprintf(\"%x\", sha256.Sum256([]byte(`Clusters: {zzzzz: {SystemRootToken: old-token}}`)))\n\n\t// srv1: current file\n\thandler1 := healthyHandler{configHash: hash1, configTime: time1}\n\tsrv1, listen1 := s.stubServer(&handler1)\n\tdefer srv1.Close()\n\t// srv2: old file, current content\n\thandler2 := healthyHandler{configHash: hash1, configTime: time2}\n\tsrv2, listen2 := s.stubServer(&handler2)\n\tdefer srv2.Close()\n\t// srv3: old file, old content\n\thandler3 := healthyHandler{configHash: hash2, configTime: time2}\n\tsrv3, listen3 := s.stubServer(&handler3)\n\tdefer srv3.Close()\n\t// srv4: no metrics handler\n\thandler4 := healthyHandler{}\n\tsrv4, listen4 := s.stubServer(&handler4)\n\tdefer srv4.Close()\n\n\ts.setAllServiceURLs(listen1)\n\n\t// listen2 => old timestamp, same content => no problem\n\ts.resp = httptest.NewRecorder()\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.DispatchCloud,\n\t\t\"http://localhost\"+listen2+\"/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\tresp := s.checkOK(c)\n\n\t// listen4 => no metrics on some services => no problem\n\ts.resp = httptest.NewRecorder()\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.WebDAV,\n\t\t\"http://localhost\"+listen4+\"/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\tresp = s.checkOK(c)\n\n\t// listen3 => old timestamp, old content => report discrepancy\n\ts.resp = httptest.NewRecorder()\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepstore,\n\t\t\"http://localhost\"+listen1+\"/\",\n\t\t\"http://localhost\"+listen3+\"/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\tresp = s.checkUnhealthy(c)\n\tif c.Check(len(resp.Errors) > 0, check.Equals, true) {\n\t\tc.Check(resp.Errors[0], check.Matches, `outdated config: \\Qkeepstore+http://localhost`+listen3+`\\E: config file \\(sha256 .*\\) does not match latest version with timestamp .*`)\n\t}\n\n\t// no services report config time (migrating to current version) => no problem\n\ts.resp = httptest.NewRecorder()\n\ts.setAllServiceURLs(listen4)\n\ts.handler.ServeHTTP(s.resp, s.req)\n\ts.checkOK(c)\n}\n\nfunc (s *AggregatorSuite) TestClockSkew(c *check.C) {\n\t// srv1: report real wall clock time\n\thandler1 := healthyHandler{}\n\tsrv1, listen1 := s.stubServer(&handler1)\n\tdefer srv1.Close()\n\t// srv2: report near-future time\n\thandler2 := healthyHandler{headerDate: time.Now().Add(3 * time.Second)}\n\tsrv2, listen2 := s.stubServer(&handler2)\n\tdefer srv2.Close()\n\t// srv3: report far-future time\n\thandler3 := healthyHandler{headerDate: time.Now().Add(3*time.Minute + 3*time.Second)}\n\tsrv3, listen3 := s.stubServer(&handler3)\n\tdefer srv3.Close()\n\n\ts.setAllServiceURLs(listen1)\n\n\t// near-future time => OK\n\ts.resp = httptest.NewRecorder()\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.DispatchCloud,\n\t\t\"http://localhost\"+listen2+\"/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\ts.checkOK(c)\n\n\t// far-future time => error\n\ts.resp = httptest.NewRecorder()\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.WebDAV,\n\t\t\"http://localhost\"+listen3+\"/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\tresp := s.checkUnhealthy(c)\n\tif c.Check(len(resp.Errors) > 0, check.Equals, true) {\n\t\tc.Check(resp.Errors[0], check.Matches, `clock skew detected: maximum timestamp spread is 3m.* \\(exceeds warning threshold of 1m\\)`)\n\t}\n}\n\nfunc (s *AggregatorSuite) TestVersionSkew(c *check.C) {\n\t// srv1: report same version\n\thandler1 := healthyHandler{version: cmd.Version.String()}\n\tsrv1, listen1 := s.stubServer(&handler1)\n\tdefer srv1.Close()\n\t// srv2: report same version but without \" (go1.2.3)\" part\n\thandler2 := healthyHandler{version: strings.Fields(cmd.Version.String())[0]}\n\tsrv2, listen2 := s.stubServer(&handler2)\n\tdefer srv2.Close()\n\t// srv3: report different version\n\thandler3 := healthyHandler{version: \"1.2.3~4 (\" + runtime.Version() + \")\"}\n\tsrv3, listen3 := s.stubServer(&handler3)\n\tdefer srv3.Close()\n\n\ts.setAllServiceURLs(listen1)\n\n\t// same version but without go1.2.3 part => OK\n\ts.resp = httptest.NewRecorder()\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.RailsAPI,\n\t\t\"http://localhost\"+listen2+\"/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\ts.checkOK(c)\n\n\t// different version => error\n\ts.resp = httptest.NewRecorder()\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.WebDAV,\n\t\t\"http://localhost\"+listen3+\"/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\tresp := s.checkUnhealthy(c)\n\tif c.Check(len(resp.Errors) > 0, check.Equals, true) {\n\t\tc.Check(resp.Errors[0], check.Matches, `version mismatch: \\Qkeep-web+http://localhost`+listen3+`\\E is running 1.2.3~4 (.*) -- expected \\Q`+cmd.Version.String()+`\\E`)\n\t}\n}\n\nfunc (s *AggregatorSuite) TestPingTimeout(c *check.C) {\n\ts.handler.timeout = arvados.Duration(100 * time.Millisecond)\n\tsrv, listen := s.stubServer(&slowHandler{})\n\tdefer srv.Close()\n\tarvadostest.SetServiceURL(&s.handler.Cluster.Services.Keepstore, \"http://localhost\"+listen+\"/\")\n\ts.handler.ServeHTTP(s.resp, s.req)\n\tresp := s.checkUnhealthy(c)\n\tep := resp.Checks[\"keepstore+http://localhost\"+listen+\"/_health/ping\"]\n\tc.Check(ep.Health, check.Equals, \"ERROR\")\n\tc.Check(ep.HTTPStatusCode, check.Equals, 0)\n\trt, err := ep.ResponseTime.Float64()\n\tc.Check(err, check.IsNil)\n\tc.Check(rt > 0.005, check.Equals, true)\n}\n\nfunc (s *AggregatorSuite) TestCheckCommand(c *check.C) {\n\tsrv, listen := s.stubServer(&healthyHandler{})\n\tdefer srv.Close()\n\ts.setAllServiceURLs(listen)\n\ttmpdir := c.MkDir()\n\tconfdata, err := yaml.Marshal(arvados.Config{Clusters: map[string]arvados.Cluster{s.handler.Cluster.ClusterID: *s.handler.Cluster}})\n\tc.Assert(err, check.IsNil)\n\tconfdata = regexp.MustCompile(`Source(Timestamp|SHA256): [^\\n]+\\n`).ReplaceAll(confdata, []byte{})\n\terr = ioutil.WriteFile(tmpdir+\"/config.yml\", confdata, 0777)\n\tc.Assert(err, check.IsNil)\n\n\tvar stdout, stderr bytes.Buffer\n\n\texitcode := CheckCommand.RunCommand(\"check\", []string{\"-config=\" + tmpdir + \"/config.yml\"}, &bytes.Buffer{}, &stdout, &stderr)\n\tc.Check(exitcode, check.Equals, 0)\n\tc.Check(stderr.String(), check.Equals, \"health check OK\\n\")\n\tc.Check(stdout.String(), check.Equals, \"\")\n\n\tstdout.Reset()\n\tstderr.Reset()\n\texitcode = CheckCommand.RunCommand(\"check\", []string{\"-quiet\", \"-config=\" + tmpdir + \"/config.yml\"}, &bytes.Buffer{}, &stdout, &stderr)\n\tc.Check(exitcode, check.Equals, 0)\n\tc.Check(stderr.String(), check.Equals, \"\")\n\tc.Check(stdout.String(), check.Equals, \"\")\n\n\tstdout.Reset()\n\tstderr.Reset()\n\texitcode = CheckCommand.RunCommand(\"check\", []string{\"-config=\" + tmpdir + \"/config.yml\", \"-yaml\"}, &bytes.Buffer{}, &stdout, &stderr)\n\tc.Check(exitcode, check.Equals, 0)\n\tc.Check(stderr.String(), check.Equals, \"\")\n\tc.Check(stdout.String(), check.Matches, `(?ms).*(\\n|^)Health: OK\\n.*`)\n}\n\nfunc (s *AggregatorSuite) checkError(c *check.C) {\n\tc.Check(s.resp.Code, check.Not(check.Equals), http.StatusOK)\n\tvar resp ClusterHealthResponse\n\terr := json.Unmarshal(s.resp.Body.Bytes(), &resp)\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.Health, check.Not(check.Equals), \"OK\")\n}\n\nfunc (s *AggregatorSuite) checkUnhealthy(c *check.C) ClusterHealthResponse {\n\treturn s.checkResult(c, \"ERROR\")\n}\n\nfunc (s *AggregatorSuite) checkOK(c *check.C) ClusterHealthResponse {\n\treturn s.checkResult(c, \"OK\")\n}\n\nfunc (s *AggregatorSuite) checkResult(c *check.C, health string) ClusterHealthResponse {\n\tc.Check(s.resp.Code, check.Equals, http.StatusOK)\n\tvar resp ClusterHealthResponse\n\tc.Log(s.resp.Body.String())\n\terr := json.Unmarshal(s.resp.Body.Bytes(), &resp)\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.Health, check.Equals, health)\n\treturn resp\n}\n\nfunc (s *AggregatorSuite) setAllServiceURLs(listen string) {\n\tsvcs := &s.handler.Cluster.Services\n\tfor _, svc := range []*arvados.Service{\n\t\t&svcs.Controller,\n\t\t&svcs.DispatchCloud,\n\t\t&svcs.DispatchLSF,\n\t\t&svcs.DispatchSLURM,\n\t\t&svcs.Keepbalance,\n\t\t&svcs.Keepproxy,\n\t\t&svcs.Keepstore,\n\t\t&svcs.Health,\n\t\t&svcs.RailsAPI,\n\t\t&svcs.WebDAV,\n\t\t&svcs.Websocket,\n\t\t&svcs.Workbench1,\n\t\t&svcs.Workbench2,\n\t} {\n\t\tarvadostest.SetServiceURL(svc, \"http://localhost\"+listen+\"/\")\n\t}\n}\n\ntype unhealthyHandler struct{}\n\nfunc (*unhealthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tif req.URL.Path == \"/_health/ping\" {\n\t\tresp.Write([]byte(`{\"health\":\"ERROR\",\"error\":\"the bends\"}`))\n\t} else {\n\t\thttp.Error(resp, \"not found\", http.StatusNotFound)\n\t}\n}\n\ntype healthyHandler struct {\n\tversion    string\n\tconfigHash string\n\tconfigTime time.Time\n\theaderDate time.Time\n}\n\nfunc (h *healthyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tif !h.headerDate.IsZero() {\n\t\tresp.Header().Set(\"Date\", h.headerDate.Format(time.RFC1123))\n\t}\n\tauthOK := req.Header.Get(\"Authorization\") == \"Bearer \"+arvadostest.ManagementToken\n\tif req.URL.Path == \"/_health/ping\" {\n\t\tif !authOK {\n\t\t\thttp.Error(resp, \"unauthorized\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tresp.Write([]byte(`{\"health\":\"OK\"}`))\n\t} else if req.URL.Path == \"/metrics\" {\n\t\tif !authOK {\n\t\t\thttp.Error(resp, \"unauthorized\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tt := h.configTime\n\t\tif t.IsZero() {\n\t\t\tt = time.Now()\n\t\t}\n\t\tfmt.Fprintf(resp, `# HELP arvados_config_load_timestamp_seconds Time when config file was loaded.\n# TYPE arvados_config_load_timestamp_seconds gauge\narvados_config_load_timestamp_seconds{sha256=\"%s\"} %g\n# HELP arvados_config_source_timestamp_seconds Timestamp of config file when it was loaded.\n# TYPE arvados_config_source_timestamp_seconds gauge\narvados_config_source_timestamp_seconds{sha256=\"%s\"} %g\n# HELP arvados_version_running Indicated version is running.\n# TYPE arvados_version_running gauge\narvados_version_running{version=\"%s\"} 1\n`,\n\t\t\th.configHash, float64(time.Now().UnixNano())/1e9,\n\t\t\th.configHash, float64(t.UnixNano())/1e9,\n\t\t\th.version)\n\t} else {\n\t\thttp.Error(resp, \"not found\", http.StatusNotFound)\n\t}\n}\n\ntype slowHandler struct{}\n\nfunc (*slowHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tif req.URL.Path == \"/_health/ping\" {\n\t\ttime.Sleep(3 * time.Second)\n\t\tresp.Write([]byte(`{\"health\":\"OK\"}`))\n\t} else {\n\t\thttp.Error(resp, \"not found\", http.StatusNotFound)\n\t}\n}\n"
  },
  {
    "path": "sdk/go/health/handler.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage health\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"net/http\"\n\t\"strings\"\n\t\"sync\"\n)\n\n// Func is a health-check function: it returns nil when healthy, an\n// error when not.\ntype Func func() error\n\n// Routes is a map of URI path to health-check function.\ntype Routes map[string]Func\n\n// Handler is an http.Handler that responds to authenticated\n// health-check requests with JSON responses like {\"health\":\"OK\"} or\n// {\"health\":\"ERROR\",\"error\":\"error text\"}.\n//\n// Fields of a Handler should not be changed after the Handler is\n// first used.\ntype Handler struct {\n\tsetupOnce sync.Once\n\tmux       *http.ServeMux\n\n\t// Authentication token. If empty, all requests will return 404.\n\tToken string\n\n\t// Route prefix, typically \"/_health/\".\n\tPrefix string\n\n\t// Map of URI paths to health-check Func. The prefix is\n\t// omitted: Routes[\"foo\"] is the health check invoked by a\n\t// request to \"{Prefix}/foo\".\n\t//\n\t// If \"ping\" is not listed here, it will be added\n\t// automatically and will always return a \"healthy\" response.\n\tRoutes Routes\n\n\t// If non-nil, Log is called after handling each request. The\n\t// error argument is nil if the request was successfully\n\t// authenticated and served, even if the health check itself\n\t// failed.\n\tLog func(*http.Request, error)\n}\n\n// ServeHTTP implements http.Handler.\nfunc (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\th.setupOnce.Do(h.setup)\n\th.mux.ServeHTTP(w, r)\n}\n\nfunc (h *Handler) setup() {\n\th.mux = http.NewServeMux()\n\tprefix := h.Prefix\n\tif !strings.HasSuffix(prefix, \"/\") {\n\t\tprefix = prefix + \"/\"\n\t}\n\tfor name, fn := range h.Routes {\n\t\th.mux.Handle(prefix+name, h.healthJSON(fn))\n\t}\n\tif _, ok := h.Routes[\"ping\"]; !ok {\n\t\th.mux.Handle(prefix+\"ping\", h.healthJSON(func() error { return nil }))\n\t}\n}\n\nvar (\n\thealthyBody     = []byte(`{\"health\":\"OK\"}` + \"\\n\")\n\terrNotFound     = errors.New(http.StatusText(http.StatusNotFound))\n\terrUnauthorized = errors.New(http.StatusText(http.StatusUnauthorized))\n\terrForbidden    = errors.New(http.StatusText(http.StatusForbidden))\n)\n\nfunc (h *Handler) healthJSON(fn Func) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tvar err error\n\t\tdefer func() {\n\t\t\tif h.Log != nil {\n\t\t\t\th.Log(r, err)\n\t\t\t}\n\t\t}()\n\t\tif h.Token == \"\" {\n\t\t\thttp.Error(w, \"disabled\", http.StatusNotFound)\n\t\t\terr = errNotFound\n\t\t} else if ah := r.Header.Get(\"Authorization\"); ah == \"\" {\n\t\t\thttp.Error(w, \"authorization required\", http.StatusUnauthorized)\n\t\t\terr = errUnauthorized\n\t\t} else if ah != \"Bearer \"+h.Token {\n\t\t\thttp.Error(w, \"authorization error\", http.StatusForbidden)\n\t\t\terr = errForbidden\n\t\t} else if err = fn(); err == nil {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.Write(healthyBody)\n\t\t} else {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tenc := json.NewEncoder(w)\n\t\t\terr = enc.Encode(map[string]string{\n\t\t\t\t\"health\": \"ERROR\",\n\t\t\t\t\"error\":  err.Error(),\n\t\t\t})\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "sdk/go/health/handler_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage health\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"testing\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nvar _ = check.Suite(&Suite{})\n\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\ntype Suite struct{}\n\nconst (\n\tgoodToken = \"supersecret\"\n\tbadToken  = \"pwn\"\n)\n\nfunc (s *Suite) TestPassFailRefuse(c *check.C) {\n\th := &Handler{\n\t\tToken:  goodToken,\n\t\tPrefix: \"/_health/\",\n\t\tRoutes: Routes{\n\t\t\t\"success\": func() error { return nil },\n\t\t\t\"miracle\": func() error { return errors.New(\"unimplemented\") },\n\t\t},\n\t}\n\n\tresp := httptest.NewRecorder()\n\th.ServeHTTP(resp, s.request(\"/_health/ping\", goodToken))\n\ts.checkHealthy(c, resp)\n\n\tresp = httptest.NewRecorder()\n\th.ServeHTTP(resp, s.request(\"/_health/success\", goodToken))\n\ts.checkHealthy(c, resp)\n\n\tresp = httptest.NewRecorder()\n\th.ServeHTTP(resp, s.request(\"/_health/miracle\", goodToken))\n\ts.checkUnhealthy(c, resp)\n\n\tresp = httptest.NewRecorder()\n\th.ServeHTTP(resp, s.request(\"/_health/miracle\", badToken))\n\tc.Check(resp.Code, check.Equals, http.StatusForbidden)\n\n\tresp = httptest.NewRecorder()\n\th.ServeHTTP(resp, s.request(\"/_health/miracle\", \"\"))\n\tc.Check(resp.Code, check.Equals, http.StatusUnauthorized)\n\n\tresp = httptest.NewRecorder()\n\th.ServeHTTP(resp, s.request(\"/_health/theperthcountyconspiracy\", \"\"))\n\tc.Check(resp.Code, check.Equals, http.StatusNotFound)\n\n\tresp = httptest.NewRecorder()\n\th.ServeHTTP(resp, s.request(\"/x/miracle\", \"\"))\n\tc.Check(resp.Code, check.Equals, http.StatusNotFound)\n\n\tresp = httptest.NewRecorder()\n\th.ServeHTTP(resp, s.request(\"/miracle\", \"\"))\n\tc.Check(resp.Code, check.Equals, http.StatusNotFound)\n}\n\nfunc (s *Suite) TestPingOverride(c *check.C) {\n\tvar ok bool\n\th := &Handler{\n\t\tToken: goodToken,\n\t\tRoutes: Routes{\n\t\t\t\"ping\": func() error {\n\t\t\t\tok = !ok\n\t\t\t\tif ok {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn errors.New(\"good error\")\n\t\t\t},\n\t\t},\n\t}\n\tresp := httptest.NewRecorder()\n\th.ServeHTTP(resp, s.request(\"/ping\", goodToken))\n\ts.checkHealthy(c, resp)\n\n\tresp = httptest.NewRecorder()\n\th.ServeHTTP(resp, s.request(\"/ping\", goodToken))\n\ts.checkUnhealthy(c, resp)\n}\n\nfunc (s *Suite) TestZeroValueIsDisabled(c *check.C) {\n\tresp := httptest.NewRecorder()\n\t(&Handler{}).ServeHTTP(resp, s.request(\"/ping\", goodToken))\n\tc.Check(resp.Code, check.Equals, http.StatusNotFound)\n\n\tresp = httptest.NewRecorder()\n\t(&Handler{}).ServeHTTP(resp, s.request(\"/ping\", \"\"))\n\tc.Check(resp.Code, check.Equals, http.StatusNotFound)\n}\n\nfunc (s *Suite) request(path, token string) *http.Request {\n\tu, _ := url.Parse(\"http://foo.local\" + path)\n\treq := &http.Request{\n\t\tMethod:     \"GET\",\n\t\tHost:       u.Host,\n\t\tURL:        u,\n\t\tRequestURI: u.RequestURI(),\n\t}\n\tif token != \"\" {\n\t\treq.Header = http.Header{\n\t\t\t\"Authorization\": {\"Bearer \" + token},\n\t\t}\n\t}\n\treturn req\n}\n\nfunc (s *Suite) checkHealthy(c *check.C, resp *httptest.ResponseRecorder) {\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\tc.Check(resp.Body.String(), check.Equals, `{\"health\":\"OK\"}`+\"\\n\")\n}\n\nfunc (s *Suite) checkUnhealthy(c *check.C, resp *httptest.ResponseRecorder) {\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\tvar result map[string]interface{}\n\terr := json.Unmarshal(resp.Body.Bytes(), &result)\n\tc.Assert(err, check.IsNil)\n\tc.Check(result[\"health\"], check.Equals, \"ERROR\")\n\tc.Check(result[\"error\"].(string), check.Not(check.Equals), \"\")\n}\n"
  },
  {
    "path": "sdk/go/httpserver/error.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n)\n\ntype HTTPStatusError interface {\n\terror\n\tHTTPStatus() int\n}\n\nfunc Errorf(status int, tmpl string, args ...interface{}) error {\n\treturn errorWithStatus{fmt.Errorf(tmpl, args...), status}\n}\n\nfunc ErrorWithStatus(err error, status int) error {\n\treturn errorWithStatus{err, status}\n}\n\ntype errorWithStatus struct {\n\terror\n\tStatus int\n}\n\nfunc (ews errorWithStatus) HTTPStatus() int {\n\treturn ews.Status\n}\n\ntype ErrorResponse struct {\n\tErrors []string `json:\"errors\"`\n}\n\nfunc Error(w http.ResponseWriter, error string, code int) {\n\tErrors(w, []string{error}, code)\n}\n\nfunc Errors(w http.ResponseWriter, errors []string, code int) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.WriteHeader(code)\n\tjson.NewEncoder(w).Encode(ErrorResponse{Errors: errors})\n}\n"
  },
  {
    "path": "sdk/go/httpserver/httpserver.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype Server struct {\n\thttp.Server\n\tAddr     string // host:port where the server is listening.\n\terr      error\n\tcond     *sync.Cond\n\trunning  bool\n\tlistener *net.TCPListener\n\twantDown bool\n}\n\n// Start is essentially (*http.Server)ListenAndServe() with two more\n// features: (1) by the time Start() returns, Addr is changed to the\n// address:port we ended up listening to -- which makes listening on\n// \":0\" useful in test suites -- and (2) the server can be shut down\n// without killing the process -- which is useful in test cases, and\n// makes it possible to shut down gracefully on SIGTERM without\n// killing active connections.\nfunc (srv *Server) Start() error {\n\taddr, err := net.ResolveTCPAddr(\"tcp\", srv.Addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsrv.listener, err = listenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsrv.Addr = srv.listener.Addr().String()\n\n\tmutex := &sync.RWMutex{}\n\tsrv.cond = sync.NewCond(mutex.RLocker())\n\tsrv.running = true\n\tgo func() {\n\t\tlnr := tcpKeepAliveListener{srv.listener}\n\t\tif srv.TLSConfig != nil {\n\t\t\terr = srv.ServeTLS(lnr, \"\", \"\")\n\t\t} else {\n\t\t\terr = srv.Serve(lnr)\n\t\t}\n\t\tif !srv.wantDown {\n\t\t\tsrv.err = err\n\t\t}\n\t\tmutex.Lock()\n\t\tsrv.running = false\n\t\tsrv.cond.Broadcast()\n\t\tmutex.Unlock()\n\t}()\n\treturn nil\n}\n\n// Close shuts down the server and returns when it has stopped.\nfunc (srv *Server) Close() error {\n\tsrv.wantDown = true\n\tsrv.listener.Close()\n\treturn srv.Wait()\n}\n\n// Wait returns when the server has shut down.\nfunc (srv *Server) Wait() error {\n\tif srv.cond == nil {\n\t\treturn nil\n\t}\n\tsrv.cond.L.Lock()\n\tdefer srv.cond.L.Unlock()\n\tfor srv.running {\n\t\tsrv.cond.Wait()\n\t}\n\treturn srv.err\n}\n\n// tcpKeepAliveListener is copied from net/http because not exported.\ntype tcpKeepAliveListener struct {\n\t*net.TCPListener\n}\n\nfunc (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {\n\ttc, err := ln.AcceptTCP()\n\tif err != nil {\n\t\treturn\n\t}\n\ttc.SetKeepAlive(true)\n\ttc.SetKeepAlivePeriod(3 * time.Minute)\n\treturn tc, nil\n}\n\n// net.ListenTCP, but retry after \"address already in use\" for up to 5\n// minutes if running inside the arvados test suite.\nfunc listenTCP(network string, addr *net.TCPAddr) (*net.TCPListener, error) {\n\tif os.Getenv(\"ARVADOS_TEST_API_HOST\") == \"\" {\n\t\treturn net.ListenTCP(\"tcp\", addr)\n\t}\n\ttimeout := 5 * time.Minute\n\tdeadline := time.Now().Add(timeout)\n\tlogged := false\n\tfor {\n\t\tln, err := net.ListenTCP(\"tcp\", addr)\n\t\tif err != nil && strings.Contains(err.Error(), \"address already in use\") && time.Now().Before(deadline) {\n\t\t\tif !logged {\n\t\t\t\tlog.Printf(\"listenTCP: retrying up to %v after error: %s\", timeout, err)\n\t\t\t\tlogged = true\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\t\treturn ln, err\n\t}\n}\n"
  },
  {
    "path": "sdk/go/httpserver/id_generator.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"math/rand\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tHeaderRequestID = \"X-Request-Id\"\n)\n\n// IDGenerator generates alphanumeric strings suitable for use as\n// unique IDs (a given IDGenerator will never return the same ID\n// twice).\ntype IDGenerator struct {\n\t// Prefix is prepended to each returned ID.\n\tPrefix string\n\n\tmtx sync.Mutex\n\tsrc rand.Source\n}\n\n// Next returns a new ID string. It is safe to call Next from multiple\n// goroutines.\nfunc (g *IDGenerator) Next() string {\n\tg.mtx.Lock()\n\tdefer g.mtx.Unlock()\n\tif g.src == nil {\n\t\tg.src = rand.NewSource(time.Now().UnixNano())\n\t}\n\ta, b := g.src.Int63(), g.src.Int63()\n\tid := strconv.FormatInt(a, 36) + strconv.FormatInt(b, 36)\n\tfor len(id) > 20 {\n\t\tid = id[:20]\n\t}\n\treturn g.Prefix + id\n}\n\n// AddRequestIDs wraps an http.Handler, adding an X-Request-Id header\n// to each request that doesn't already have one.\nfunc AddRequestIDs(h http.Handler) http.Handler {\n\tgen := &IDGenerator{Prefix: \"req-\"}\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tif req.Header.Get(HeaderRequestID) == \"\" {\n\t\t\tif req.Header == nil {\n\t\t\t\treq.Header = http.Header{}\n\t\t\t}\n\t\t\treq.Header.Set(HeaderRequestID, gen.Next())\n\t\t}\n\t\tw.Header().Set(\"X-Request-Id\", req.Header.Get(\"X-Request-Id\"))\n\t\th.ServeHTTP(w, req)\n\t})\n}\n"
  },
  {
    "path": "sdk/go/httpserver/inspect.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"encoding/json\"\n\t\"net/http\"\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\n// Inspect serves a report of current requests at \"GET\n// /_inspect/requests\", and passes other requests through to the next\n// handler.\n//\n// If registry is not nil, Inspect registers metrics about current\n// requests.\nfunc Inspect(registry *prometheus.Registry, authToken string, next http.Handler) http.Handler {\n\ttype ent struct {\n\t\tstartTime  time.Time\n\t\thangupTime atomic.Value\n\t}\n\tcurrent := map[*http.Request]*ent{}\n\tmtx := sync.Mutex{}\n\tif registry != nil {\n\t\tregistry.MustRegister(prometheus.NewGaugeFunc(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tNamespace: \"arvados\",\n\t\t\t\tName:      \"max_active_request_age_seconds\",\n\t\t\t\tHelp:      \"Age of oldest active request\",\n\t\t\t},\n\t\t\tfunc() float64 {\n\t\t\t\tmtx.Lock()\n\t\t\t\tdefer mtx.Unlock()\n\t\t\t\tearliest := time.Time{}\n\t\t\t\tany := false\n\t\t\t\tfor _, e := range current {\n\t\t\t\t\tif _, ok := e.hangupTime.Load().(time.Time); ok {\n\t\t\t\t\t\t// Don't count abandoned requests here\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif !any || e.startTime.Before(earliest) {\n\t\t\t\t\t\tany = true\n\t\t\t\t\t\tearliest = e.startTime\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !any {\n\t\t\t\t\treturn 0\n\t\t\t\t}\n\t\t\t\treturn float64(time.Since(earliest).Seconds())\n\t\t\t},\n\t\t))\n\t\tregistry.MustRegister(prometheus.NewGaugeFunc(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tNamespace: \"arvados\",\n\t\t\t\tName:      \"max_abandoned_request_age_seconds\",\n\t\t\t\tHelp:      \"Maximum time since client hung up on a request whose processing thread is still running\",\n\t\t\t},\n\t\t\tfunc() float64 {\n\t\t\t\tmtx.Lock()\n\t\t\t\tdefer mtx.Unlock()\n\t\t\t\tearliest := time.Time{}\n\t\t\t\tany := false\n\t\t\t\tfor _, e := range current {\n\t\t\t\t\tif hangupTime, ok := e.hangupTime.Load().(time.Time); ok {\n\t\t\t\t\t\tif !any || hangupTime.Before(earliest) {\n\t\t\t\t\t\t\tany = true\n\t\t\t\t\t\t\tearliest = hangupTime\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !any {\n\t\t\t\t\treturn 0\n\t\t\t\t}\n\t\t\t\treturn float64(time.Since(earliest).Seconds())\n\t\t\t},\n\t\t))\n\t}\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tif req.Method == \"GET\" && req.URL.Path == \"/_inspect/requests\" {\n\t\t\tif authToken == \"\" || req.Header.Get(\"Authorization\") != \"Bearer \"+authToken {\n\t\t\t\tError(w, \"unauthorized\", http.StatusUnauthorized)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmtx.Lock()\n\t\t\tdefer mtx.Unlock()\n\t\t\ttype outrec struct {\n\t\t\t\tRequestID     string\n\t\t\t\tMethod        string\n\t\t\t\tHost          string\n\t\t\t\tURL           string\n\t\t\t\tRemoteAddr    string\n\t\t\t\tXForwardedFor string\n\t\t\t\tElapsed       float64\n\t\t\t}\n\t\t\tnow := time.Now()\n\t\t\toutrecs := []outrec{}\n\t\t\tfor req, e := range current {\n\t\t\t\toutrecs = append(outrecs, outrec{\n\t\t\t\t\tRequestID:     req.Header.Get(HeaderRequestID),\n\t\t\t\t\tMethod:        req.Method,\n\t\t\t\t\tHost:          req.Host,\n\t\t\t\t\tURL:           req.URL.String(),\n\t\t\t\t\tRemoteAddr:    req.RemoteAddr,\n\t\t\t\t\tXForwardedFor: req.Header.Get(\"X-Forwarded-For\"),\n\t\t\t\t\tElapsed:       now.Sub(e.startTime).Seconds(),\n\t\t\t\t})\n\t\t\t}\n\t\t\tsort.Slice(outrecs, func(i, j int) bool { return outrecs[i].Elapsed < outrecs[j].Elapsed })\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tjson.NewEncoder(w).Encode(outrecs)\n\t\t} else {\n\t\t\te := ent{startTime: time.Now()}\n\t\t\tmtx.Lock()\n\t\t\tcurrent[req] = &e\n\t\t\tmtx.Unlock()\n\t\t\tgo func() {\n\t\t\t\t<-req.Context().Done()\n\t\t\t\te.hangupTime.Store(time.Now())\n\t\t\t}()\n\t\t\tdefer func() {\n\t\t\t\tmtx.Lock()\n\t\t\t\tdefer mtx.Unlock()\n\t\t\t\tdelete(current, req)\n\t\t\t}()\n\t\t\tnext.ServeHTTP(w, req)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "sdk/go/httpserver/inspect_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nfunc (s *Suite) TestInspect(c *check.C) {\n\treg := prometheus.NewRegistry()\n\th := newTestHandler()\n\tmh := Inspect(reg, \"abcd\", h)\n\thandlerReturned := make(chan struct{})\n\treqctx, reqcancel := context.WithCancel(context.Background())\n\tlongreq := httptest.NewRequest(\"GET\", \"/test\", nil).WithContext(reqctx)\n\tlongreq.Header.Set(\"X-Forwarded-For\", \"192.168.0.123, 172.16.0.123\")\n\tgo func() {\n\t\tmh.ServeHTTP(httptest.NewRecorder(), longreq)\n\t\tclose(handlerReturned)\n\t}()\n\t<-h.inHandler\n\n\tresp := httptest.NewRecorder()\n\treq := httptest.NewRequest(\"GET\", \"/_inspect/requests\", nil)\n\tmh.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusUnauthorized)\n\tc.Check(resp.Body.String(), check.Equals, `{\"errors\":[\"unauthorized\"]}`+\"\\n\")\n\n\tresp = httptest.NewRecorder()\n\treq.Header.Set(\"Authorization\", \"Bearer abcde\")\n\tmh.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusUnauthorized)\n\n\tresp = httptest.NewRecorder()\n\treq.Header.Set(\"Authorization\", \"Bearer abcd\")\n\tmh.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\treqs := []map[string]interface{}{}\n\terr := json.NewDecoder(resp.Body).Decode(&reqs)\n\tc.Check(err, check.IsNil)\n\tc.Check(reqs, check.HasLen, 1)\n\tc.Check(reqs[0][\"URL\"], check.Equals, \"/test\")\n\tc.Check(reqs[0][\"XForwardedFor\"], check.Equals, \"192.168.0.123, 172.16.0.123\")\n\n\t// Request is active, so we should see active request age > 0\n\tresp = httptest.NewRecorder()\n\tmreq := httptest.NewRequest(\"GET\", \"/metrics\", nil)\n\tpromhttp.HandlerFor(reg, promhttp.HandlerOpts{}).ServeHTTP(resp, mreq)\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*\\narvados_max_active_request_age_seconds [0\\.]*[1-9][-\\d\\.e]*\\n.*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*\\narvados_max_abandoned_request_age_seconds 0\\n.*`)\n\n\treqcancel()\n\n\t// Request context is canceled but handler hasn't returned, so\n\t// we should see max abandoned request age > 0 and active ==\n\t// 0. We might need to wait a short time for the cancel to\n\t// propagate.\n\tfor deadline := time.Now().Add(time.Second); time.Now().Before(deadline); time.Sleep(time.Second / 100) {\n\t\tresp = httptest.NewRecorder()\n\t\tpromhttp.HandlerFor(reg, promhttp.HandlerOpts{}).ServeHTTP(resp, mreq)\n\t\tc.Assert(resp.Code, check.Equals, http.StatusOK)\n\t\tif strings.Contains(resp.Body.String(), \"\\narvados_max_active_request_age_seconds 0\\n\") {\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*\\narvados_max_active_request_age_seconds 0\\n.*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*\\narvados_max_abandoned_request_age_seconds [0\\.]*[1-9][-\\d\\.e]*\\n.*`)\n\n\th.okToProceed <- struct{}{}\n\t<-handlerReturned\n\n\t// Handler has returned, so we should see max abandoned\n\t// request age == max active request age == 0\n\tresp = httptest.NewRecorder()\n\tpromhttp.HandlerFor(reg, promhttp.HandlerOpts{}).ServeHTTP(resp, mreq)\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*\\narvados_max_active_request_age_seconds 0\\n.*`)\n\tc.Check(resp.Body.String(), check.Matches, `(?ms).*\\narvados_max_abandoned_request_age_seconds 0\\n.*`)\n\n\t// ...and no active requests at the /_monitor endpoint\n\tresp = httptest.NewRecorder()\n\tmh.ServeHTTP(resp, req)\n\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\treqs = nil\n\terr = json.NewDecoder(resp.Body).Decode(&reqs)\n\tc.Check(err, check.IsNil)\n\tc.Assert(reqs, check.HasLen, 0)\n}\n"
  },
  {
    "path": "sdk/go/httpserver/log.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"fmt\"\n\t\"log\"\n)\n\n// Log calls log.Println but first transforms strings so they are\n// safer to write in logs (e.g., 'foo\"bar' becomes\n// '\"foo\\\"bar\"'). Arguments that aren't strings and don't have a\n// (String() string) method are left alone.\nfunc Log(args ...interface{}) {\n\tnewargs := make([]interface{}, len(args))\n\tfor i, arg := range args {\n\t\tif s, ok := arg.(string); ok {\n\t\t\tnewargs[i] = fmt.Sprintf(\"%+q\", s)\n\t\t} else if s, ok := arg.(fmt.Stringer); ok {\n\t\t\tnewargs[i] = fmt.Sprintf(\"%+q\", s.String())\n\t\t} else {\n\t\t\tnewargs[i] = arg\n\t\t}\n\t}\n\tlog.Println(newargs...)\n}\n"
  },
  {
    "path": "sdk/go/httpserver/logger.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/stats\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype contextKey struct {\n\tname string\n}\n\nvar (\n\trequestTimeContextKey       = contextKey{\"requestTime\"}\n\tresponseLogFieldsContextKey = contextKey{\"responseLogFields\"}\n\tmutexContextKey             = contextKey{\"mutex\"}\n\tstopDeadlineTimerContextKey = contextKey{\"stopDeadlineTimer\"}\n)\n\n// HandlerWithDeadline cancels the request context if the request\n// takes longer than the specified timeout without having its\n// connection hijacked.\n//\n// If timeout is 0, there is no deadline: HandlerWithDeadline is a\n// no-op.\nfunc HandlerWithDeadline(timeout time.Duration, next http.Handler) http.Handler {\n\tif timeout == 0 {\n\t\treturn next\n\t}\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tctx, cancel := context.WithCancel(r.Context())\n\t\tdefer cancel()\n\t\ttimer := time.AfterFunc(timeout, cancel)\n\t\tctx = context.WithValue(ctx, stopDeadlineTimerContextKey, timer.Stop)\n\t\tnext.ServeHTTP(w, r.WithContext(ctx))\n\t\ttimer.Stop()\n\t})\n}\n\n// ExemptFromDeadline exempts the given request from the timeout set\n// by HandlerWithDeadline.\n//\n// It is a no-op if the deadline has already passed, or none was set.\nfunc ExemptFromDeadline(r *http.Request) {\n\tif stop, ok := r.Context().Value(stopDeadlineTimerContextKey).(func() bool); ok {\n\t\tstop()\n\t}\n}\n\nfunc SetResponseLogFields(ctx context.Context, fields logrus.Fields) {\n\tm, _ := ctx.Value(&mutexContextKey).(*sync.Mutex)\n\tc, _ := ctx.Value(&responseLogFieldsContextKey).(logrus.Fields)\n\tif m == nil || c == nil {\n\t\treturn\n\t}\n\tm.Lock()\n\tdefer m.Unlock()\n\tfor k, v := range fields {\n\t\tc[k] = v\n\t}\n}\n\n// LogRequests wraps an http.Handler, logging each request and\n// response.\nfunc LogRequests(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(wrapped http.ResponseWriter, req *http.Request) {\n\t\tw := &responseTimer{ResponseWriter: WrapResponseWriter(wrapped)}\n\t\tlgr := ctxlog.FromContext(req.Context()).WithFields(logrus.Fields{\n\t\t\t\"RequestID\":       req.Header.Get(\"X-Request-Id\"),\n\t\t\t\"remoteAddr\":      req.RemoteAddr,\n\t\t\t\"reqForwardedFor\": req.Header.Get(\"X-Forwarded-For\"),\n\t\t\t\"reqMethod\":       req.Method,\n\t\t\t\"reqHost\":         req.Host,\n\t\t\t\"reqPath\":         req.URL.Path[1:],\n\t\t\t\"reqQuery\":        req.URL.RawQuery,\n\t\t\t\"reqBytes\":        req.ContentLength,\n\t\t})\n\t\tctx := req.Context()\n\t\tctx = context.WithValue(ctx, &requestTimeContextKey, time.Now())\n\t\tctx = context.WithValue(ctx, &responseLogFieldsContextKey, logrus.Fields{})\n\t\tctx = context.WithValue(ctx, &mutexContextKey, &sync.Mutex{})\n\t\tctx = ctxlog.Context(ctx, lgr)\n\t\treq = req.WithContext(ctx)\n\n\t\tlogRequest(w, req, lgr)\n\t\tdefer logResponse(w, req, lgr)\n\t\th.ServeHTTP(w, req)\n\t})\n}\n\nfunc Logger(req *http.Request) logrus.FieldLogger {\n\treturn ctxlog.FromContext(req.Context())\n}\n\nfunc logRequest(w *responseTimer, req *http.Request, lgr *logrus.Entry) {\n\tlgr.Info(\"request\")\n}\n\nfunc logResponse(w *responseTimer, req *http.Request, lgr *logrus.Entry) {\n\tif tStart, ok := req.Context().Value(&requestTimeContextKey).(time.Time); ok {\n\t\ttDone := time.Now()\n\t\twriteTime := w.writeTime\n\t\tif !w.wrote {\n\t\t\t// Empty response body. Header was sent when\n\t\t\t// handler exited.\n\t\t\twriteTime = tDone\n\t\t}\n\t\tlgr = lgr.WithFields(logrus.Fields{\n\t\t\t\"timeTotal\":     stats.Duration(tDone.Sub(tStart)),\n\t\t\t\"timeToStatus\":  stats.Duration(writeTime.Sub(tStart)),\n\t\t\t\"timeWriteBody\": stats.Duration(tDone.Sub(writeTime)),\n\t\t})\n\t}\n\tif responseLogFields, ok := req.Context().Value(&responseLogFieldsContextKey).(logrus.Fields); ok {\n\t\tlgr = lgr.WithFields(responseLogFields)\n\t}\n\trespCode := w.WroteStatus()\n\tif respCode == 0 {\n\t\trespCode = http.StatusOK\n\t}\n\tfields := logrus.Fields{\n\t\t\"respStatusCode\": respCode,\n\t\t\"respStatus\":     http.StatusText(respCode),\n\t\t\"respBytes\":      w.WroteBodyBytes(),\n\t}\n\tif respCode >= 400 {\n\t\tfields[\"respBody\"] = string(w.Sniffed())\n\t}\n\tlgr.WithFields(fields).Info(\"response\")\n}\n\ntype responseTimer struct {\n\tResponseWriter\n\twrote     bool\n\twriteTime time.Time\n}\n\nfunc (rt *responseTimer) Unwrap() http.ResponseWriter {\n\treturn rt.ResponseWriter\n}\n\nfunc (rt *responseTimer) WriteHeader(code int) {\n\tif !rt.wrote {\n\t\trt.wrote = true\n\t\trt.writeTime = time.Now()\n\t}\n\trt.ResponseWriter.WriteHeader(code)\n}\n\nfunc (rt *responseTimer) Write(p []byte) (int, error) {\n\tif !rt.wrote {\n\t\trt.wrote = true\n\t\trt.writeTime = time.Now()\n\t}\n\treturn rt.ResponseWriter.Write(p)\n}\n"
  },
  {
    "path": "sdk/go/httpserver/logger_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&Suite{})\n\ntype Suite struct {\n\tctx     context.Context\n\tlog     *logrus.Logger\n\tlogdata *bytes.Buffer\n}\n\nfunc (s *Suite) SetUpTest(c *check.C) {\n\ts.logdata = bytes.NewBuffer(nil)\n\ts.log = logrus.New()\n\ts.log.Out = s.logdata\n\ts.log.Formatter = &logrus.JSONFormatter{\n\t\tTimestampFormat: time.RFC3339Nano,\n\t}\n\ts.ctx = ctxlog.Context(context.Background(), s.log)\n}\n\nfunc (s *Suite) TestWithDeadline(c *check.C) {\n\treq, err := http.NewRequest(\"GET\", \"https://foo.example/bar\", nil)\n\tc.Assert(err, check.IsNil)\n\n\t// Short timeout cancels context in <1s\n\tresp := httptest.NewRecorder()\n\tHandlerWithDeadline(time.Millisecond, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tselect {\n\t\tcase <-req.Context().Done():\n\t\t\tw.Write([]byte(\"ok\"))\n\t\tcase <-time.After(time.Second):\n\t\t\tc.Error(\"timed out\")\n\t\t}\n\t})).ServeHTTP(resp, req.WithContext(s.ctx))\n\tc.Check(resp.Body.String(), check.Equals, \"ok\")\n\n\t// Long timeout does not cancel context in <1ms\n\tresp = httptest.NewRecorder()\n\tHandlerWithDeadline(time.Second, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tselect {\n\t\tcase <-req.Context().Done():\n\t\t\tc.Error(\"request context done too soon\")\n\t\tcase <-time.After(time.Millisecond):\n\t\t\tw.Write([]byte(\"ok\"))\n\t\t}\n\t})).ServeHTTP(resp, req.WithContext(s.ctx))\n\tc.Check(resp.Body.String(), check.Equals, \"ok\")\n}\n\nfunc (s *Suite) TestExemptFromDeadline(c *check.C) {\n\tsrv := Server{\n\t\tAddr: \":\",\n\t\tServer: http.Server{\n\t\t\tHandler: HandlerWithDeadline(time.Millisecond, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\t\tif req.URL.Path == \"/exempt\" {\n\t\t\t\t\tExemptFromDeadline(req)\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Second / 10)\n\t\t\t\tif req.Context().Err() != nil {\n\t\t\t\t\tw.WriteHeader(499)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfmt.Fprint(w, \"ok\")\n\t\t\t})),\n\t\t\tBaseContext: func(net.Listener) context.Context { return s.ctx },\n\t\t},\n\t}\n\tsrv.Start()\n\tdefer srv.Close()\n\n\tresp, err := http.Get(\"http://\" + srv.Addr + \"/normal\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(resp.StatusCode, check.Equals, 499)\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(body), check.Equals, \"\")\n\n\tresp, err = http.Get(\"http://\" + srv.Addr + \"/exempt\")\n\tc.Assert(err, check.IsNil)\n\tc.Check(resp.StatusCode, check.Equals, 200)\n\tbody, err = ioutil.ReadAll(resp.Body)\n\tc.Check(err, check.IsNil)\n\tc.Check(string(body), check.Equals, \"ok\")\n}\n\nfunc (s *Suite) TestLogRequests(c *check.C) {\n\th := AddRequestIDs(LogRequests(\n\t\thttp.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\ttime.Sleep(time.Microsecond) // ensure timeWriteBody > 0\n\t\t\tw.Write([]byte(\"hello world\"))\n\t\t})))\n\n\treq, err := http.NewRequest(\"GET\", \"https://foo.example/bar\", nil)\n\treq.Header.Set(\"X-Forwarded-For\", \"1.2.3.4:12345\")\n\tc.Assert(err, check.IsNil)\n\tresp := httptest.NewRecorder()\n\n\th.ServeHTTP(resp, req.WithContext(s.ctx))\n\n\tdec := json.NewDecoder(s.logdata)\n\n\tgotReq := make(map[string]interface{})\n\terr = dec.Decode(&gotReq)\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"%#v\", gotReq)\n\tc.Check(gotReq[\"RequestID\"], check.Matches, \"req-[a-z0-9]{20}\")\n\tc.Check(gotReq[\"reqForwardedFor\"], check.Equals, \"1.2.3.4:12345\")\n\tc.Check(gotReq[\"msg\"], check.Equals, \"request\")\n\n\tgotResp := make(map[string]interface{})\n\terr = dec.Decode(&gotResp)\n\tc.Check(err, check.IsNil)\n\tc.Logf(\"%#v\", gotResp)\n\tc.Check(gotResp[\"RequestID\"], check.Equals, gotReq[\"RequestID\"])\n\tc.Check(gotResp[\"reqForwardedFor\"], check.Equals, \"1.2.3.4:12345\")\n\tc.Check(gotResp[\"msg\"], check.Equals, \"response\")\n\n\tc.Assert(gotResp[\"time\"], check.FitsTypeOf, \"\")\n\t_, err = time.Parse(time.RFC3339Nano, gotResp[\"time\"].(string))\n\tc.Check(err, check.IsNil)\n\n\tfor _, key := range []string{\"timeToStatus\", \"timeWriteBody\", \"timeTotal\"} {\n\t\tc.Assert(gotResp[key], check.FitsTypeOf, float64(0))\n\t\tc.Check(gotResp[key].(float64), check.Not(check.Equals), float64(0))\n\t}\n}\n\nfunc (s *Suite) TestLogErrorBody(c *check.C) {\n\tdec := json.NewDecoder(s.logdata)\n\n\tfor _, trial := range []struct {\n\t\tlabel      string\n\t\tstatusCode int\n\t\tsentBody   string\n\t\texpectLog  bool\n\t\texpectBody string\n\t}{\n\t\t{\"ok\", 200, \"hello world\", false, \"\"},\n\t\t{\"redir\", 302, \"<a href='http://foo.example/baz'>redir</a>\", false, \"\"},\n\t\t{\"4xx short body\", 400, \"oops\", true, \"oops\"},\n\t\t{\"4xx long body\", 400, fmt.Sprintf(\"%0*d\", sniffBytes*2, 1), true, fmt.Sprintf(\"%0*d\", sniffBytes, 0)},\n\t\t{\"5xx empty body\", 500, \"\", true, \"\"},\n\t} {\n\t\tcomment := check.Commentf(\"in trial: %q\", trial.label)\n\n\t\treq, err := http.NewRequest(\"GET\", \"https://foo.example/bar\", nil)\n\t\tc.Assert(err, check.IsNil)\n\t\tresp := httptest.NewRecorder()\n\n\t\tLogRequests(\n\t\t\thttp.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\t\t\tw.WriteHeader(trial.statusCode)\n\t\t\t\tw.Write([]byte(trial.sentBody))\n\t\t\t}),\n\t\t).ServeHTTP(resp, req.WithContext(s.ctx))\n\n\t\tgotReq := make(map[string]interface{})\n\t\terr = dec.Decode(&gotReq)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Logf(\"%#v\", gotReq)\n\t\tgotResp := make(map[string]interface{})\n\t\terr = dec.Decode(&gotResp)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Logf(\"%#v\", gotResp)\n\t\tif trial.expectLog {\n\t\t\tc.Check(gotResp[\"respBody\"], check.Equals, trial.expectBody, comment)\n\t\t} else {\n\t\t\tc.Check(gotResp[\"respBody\"], check.IsNil, comment)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "sdk/go/httpserver/metrics.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/auth\"\n\t\"git.arvados.org/arvados.git/sdk/go/stats\"\n\t\"github.com/gogo/protobuf/jsonpb\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype Handler interface {\n\thttp.Handler\n\n\t// Returns an http.Handler that serves the Handler's metrics\n\t// data at /metrics and /metrics.json, and passes other\n\t// requests through to next.\n\tServeAPI(token string, next http.Handler) http.Handler\n}\n\ntype metrics struct {\n\tnext         http.Handler\n\tlogger       *logrus.Logger\n\tregistry     *prometheus.Registry\n\treqDuration  *prometheus.SummaryVec\n\ttimeToStatus *prometheus.SummaryVec\n\texportProm   http.Handler\n}\n\nfunc (*metrics) Levels() []logrus.Level {\n\treturn logrus.AllLevels\n}\n\n// Fire implements logrus.Hook in order to collect data points from\n// request logs.\nfunc (m *metrics) Fire(ent *logrus.Entry) error {\n\tif tts, ok := ent.Data[\"timeToStatus\"].(stats.Duration); !ok {\n\t} else if method, ok := ent.Data[\"reqMethod\"].(string); !ok {\n\t} else if code, ok := ent.Data[\"respStatusCode\"].(int); !ok {\n\t} else {\n\t\tm.timeToStatus.WithLabelValues(strconv.Itoa(code), strings.ToLower(method)).Observe(time.Duration(tts).Seconds())\n\t}\n\treturn nil\n}\n\nfunc (m *metrics) exportJSON(w http.ResponseWriter, req *http.Request) {\n\tjm := jsonpb.Marshaler{Indent: \"  \"}\n\tmfs, _ := m.registry.Gather()\n\tw.Write([]byte{'['})\n\tfor i, mf := range mfs {\n\t\tif i > 0 {\n\t\t\tw.Write([]byte{','})\n\t\t}\n\t\tjm.Marshal(w, mf)\n\t}\n\tw.Write([]byte{']'})\n}\n\n// ServeHTTP implements http.Handler.\nfunc (m *metrics) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tm.next.ServeHTTP(w, req)\n}\n\n// ServeAPI returns a new http.Handler that serves current data at\n// metrics API endpoints (currently \"GET /metrics(.json)?\") and passes\n// other requests through to next.\n//\n// If the given token is not empty, that token must be supplied by a\n// client in order to access the metrics endpoints.\n//\n// Typical example:\n//\n//\tm := Instrument(...)\n//\tsrv := http.Server{Handler: m.ServeAPI(\"secrettoken\", m)}\nfunc (m *metrics) ServeAPI(token string, next http.Handler) http.Handler {\n\tjsonMetrics := auth.RequireLiteralToken(token, http.HandlerFunc(m.exportJSON))\n\tplainMetrics := auth.RequireLiteralToken(token, m.exportProm)\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tswitch {\n\t\tcase req.Method != \"GET\" && req.Method != \"HEAD\":\n\t\t\tnext.ServeHTTP(w, req)\n\t\tcase req.URL.Path == \"/metrics.json\":\n\t\t\tjsonMetrics.ServeHTTP(w, req)\n\t\tcase req.URL.Path == \"/metrics\":\n\t\t\tplainMetrics.ServeHTTP(w, req)\n\t\tdefault:\n\t\t\tnext.ServeHTTP(w, req)\n\t\t}\n\t})\n}\n\n// Instrument returns a new Handler that passes requests through to\n// the next handler in the stack, and tracks metrics of those\n// requests.\n//\n// For the metrics to be accurate, the caller must ensure every\n// request passed to the Handler also passes through\n// LogRequests(...), and vice versa.\n//\n// If registry is nil, a new registry is created.\n//\n// If logger is nil, logrus.StandardLogger() is used.\nfunc Instrument(registry *prometheus.Registry, logger *logrus.Logger, next http.Handler) Handler {\n\tif logger == nil {\n\t\tlogger = logrus.StandardLogger()\n\t}\n\tif registry == nil {\n\t\tregistry = prometheus.NewRegistry()\n\t}\n\treqDuration := prometheus.NewSummaryVec(prometheus.SummaryOpts{\n\t\tName: \"request_duration_seconds\",\n\t\tHelp: \"Summary of request duration.\",\n\t}, []string{\"code\", \"method\"})\n\ttimeToStatus := prometheus.NewSummaryVec(prometheus.SummaryOpts{\n\t\tName: \"time_to_status_seconds\",\n\t\tHelp: \"Summary of request TTFB.\",\n\t}, []string{\"code\", \"method\"})\n\tregistry.MustRegister(timeToStatus)\n\tregistry.MustRegister(reqDuration)\n\tm := &metrics{\n\t\tnext:         promhttp.InstrumentHandlerDuration(reqDuration, next),\n\t\tlogger:       logger,\n\t\tregistry:     registry,\n\t\treqDuration:  reqDuration,\n\t\ttimeToStatus: timeToStatus,\n\t\texportProm: promhttp.HandlerFor(registry, promhttp.HandlerOpts{\n\t\t\tErrorLog: logger,\n\t\t}),\n\t}\n\tm.logger.AddHook(m)\n\treturn m\n}\n"
  },
  {
    "path": "sdk/go/httpserver/request_limiter.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"container/heap\"\n\t\"math\"\n\t\"net/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst MinPriority = math.MinInt64\n\n// Prometheus typically polls every 10 seconds, but it doesn't cost us\n// much to also accommodate higher frequency collection by updating\n// internal stats more frequently. (This limits time resolution only\n// for the metrics that aren't generated on the fly.)\nconst metricsUpdateInterval = time.Second\n\n// RequestLimiter wraps http.Handler, limiting the number of\n// concurrent requests being handled by the wrapped Handler. Requests\n// that arrive when the handler is already at the specified\n// concurrency limit are queued and handled in the order indicated by\n// the Priority function.\n//\n// Caller must not modify any RequestLimiter fields after calling its\n// methods.\ntype RequestLimiter struct {\n\tHandler http.Handler\n\n\t// Queue determines which queue a request is assigned to.\n\tQueue func(req *http.Request) *RequestQueue\n\n\t// Priority determines queue ordering. Requests with higher\n\t// priority are handled first. Requests with equal priority\n\t// are handled FIFO. If Priority is nil, all requests are\n\t// handled FIFO.\n\tPriority func(req *http.Request, queued time.Time) int64\n\n\t// \"concurrent_requests\", \"max_concurrent_requests\",\n\t// \"queued_requests\", and \"max_queued_requests\" metrics are\n\t// registered with Registry, if it is not nil.\n\tRegistry *prometheus.Registry\n\n\tsetupOnce     sync.Once\n\tmQueueDelay   *prometheus.SummaryVec\n\tmQueueTimeout *prometheus.SummaryVec\n\tmQueueUsage   *prometheus.GaugeVec\n\tmtx           sync.Mutex\n\trqs           map[*RequestQueue]bool // all RequestQueues in use\n}\n\ntype RequestQueue struct {\n\t// Label for metrics. No two queues should have the same label.\n\tLabel string\n\n\t// Maximum number of requests being handled at once. Beyond\n\t// this limit, requests will be queued.\n\tMaxConcurrent int\n\n\t// Maximum number of requests in the queue. Beyond this limit,\n\t// the lowest priority requests will return 503.\n\tMaxQueue int\n\n\t// Return 503 for any request for which Priority() returns\n\t// MinPriority if it spends longer than this in the queue\n\t// before starting processing.\n\tMaxQueueTimeForMinPriority time.Duration\n\n\tqueue    queue\n\thandling int\n}\n\ntype qent struct {\n\trq       *RequestQueue\n\tqueued   time.Time\n\tpriority int64\n\theappos  int\n\tready    chan bool // true = handle now; false = return 503 now\n}\n\ntype queue []*qent\n\nfunc (h queue) Swap(i, j int) {\n\th[i], h[j] = h[j], h[i]\n\th[i].heappos, h[j].heappos = i, j\n}\n\nfunc (h queue) Less(i, j int) bool {\n\tpi, pj := h[i].priority, h[j].priority\n\treturn pi > pj || (pi == pj && h[i].queued.Before(h[j].queued))\n}\n\nfunc (h queue) Len() int {\n\treturn len(h)\n}\n\nfunc (h *queue) Push(x interface{}) {\n\tn := len(*h)\n\tent := x.(*qent)\n\tent.heappos = n\n\t*h = append(*h, ent)\n}\n\nfunc (h *queue) Pop() interface{} {\n\tn := len(*h)\n\tent := (*h)[n-1]\n\tent.heappos = -1\n\t(*h)[n-1] = nil\n\t*h = (*h)[0 : n-1]\n\treturn ent\n}\n\nfunc (h *queue) add(ent *qent) {\n\tent.heappos = h.Len()\n\th.Push(ent)\n}\n\nfunc (h *queue) removeMax() *qent {\n\treturn heap.Pop(h).(*qent)\n}\n\nfunc (h *queue) remove(i int) {\n\theap.Remove(h, i)\n}\n\nfunc (rl *RequestLimiter) setup() {\n\tif rl.Registry != nil {\n\t\tmCurrentReqs := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"arvados\",\n\t\t\tName:      \"concurrent_requests\",\n\t\t\tHelp:      \"Number of requests in progress\",\n\t\t}, []string{\"queue\"})\n\t\trl.Registry.MustRegister(mCurrentReqs)\n\t\tmMaxReqs := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"arvados\",\n\t\t\tName:      \"max_concurrent_requests\",\n\t\t\tHelp:      \"Maximum number of concurrent requests\",\n\t\t}, []string{\"queue\"})\n\t\trl.Registry.MustRegister(mMaxReqs)\n\t\tmMaxQueue := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"arvados\",\n\t\t\tName:      \"max_queued_requests\",\n\t\t\tHelp:      \"Maximum number of queued requests\",\n\t\t}, []string{\"queue\"})\n\t\trl.Registry.MustRegister(mMaxQueue)\n\t\trl.mQueueUsage = prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\tNamespace: \"arvados\",\n\t\t\tName:      \"queued_requests\",\n\t\t\tHelp:      \"Number of requests in queue\",\n\t\t}, []string{\"queue\", \"priority\"})\n\t\trl.Registry.MustRegister(rl.mQueueUsage)\n\t\trl.mQueueDelay = prometheus.NewSummaryVec(prometheus.SummaryOpts{\n\t\t\tNamespace:  \"arvados\",\n\t\t\tName:       \"queue_delay_seconds\",\n\t\t\tHelp:       \"Time spent in the incoming request queue before start of processing\",\n\t\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},\n\t\t}, []string{\"queue\", \"priority\"})\n\t\trl.Registry.MustRegister(rl.mQueueDelay)\n\t\trl.mQueueTimeout = prometheus.NewSummaryVec(prometheus.SummaryOpts{\n\t\t\tNamespace:  \"arvados\",\n\t\t\tName:       \"queue_timeout_seconds\",\n\t\t\tHelp:       \"Time spent in the incoming request queue before client timed out or disconnected\",\n\t\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.005, 0.99: 0.001},\n\t\t}, []string{\"queue\", \"priority\"})\n\t\trl.Registry.MustRegister(rl.mQueueTimeout)\n\t\tgo func() {\n\t\t\tfor range time.NewTicker(metricsUpdateInterval).C {\n\t\t\t\trl.mtx.Lock()\n\t\t\t\tfor rq := range rl.rqs {\n\t\t\t\t\tvar low, normal, high int\n\t\t\t\t\tfor _, ent := range rq.queue {\n\t\t\t\t\t\tswitch {\n\t\t\t\t\t\tcase ent.priority < 0:\n\t\t\t\t\t\t\tlow++\n\t\t\t\t\t\tcase ent.priority > 0:\n\t\t\t\t\t\t\thigh++\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tnormal++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tmCurrentReqs.WithLabelValues(rq.Label).Set(float64(rq.handling))\n\t\t\t\t\tmMaxReqs.WithLabelValues(rq.Label).Set(float64(rq.MaxConcurrent))\n\t\t\t\t\tmMaxQueue.WithLabelValues(rq.Label).Set(float64(rq.MaxQueue))\n\t\t\t\t\trl.mQueueUsage.WithLabelValues(rq.Label, \"low\").Set(float64(low))\n\t\t\t\t\trl.mQueueUsage.WithLabelValues(rq.Label, \"normal\").Set(float64(normal))\n\t\t\t\t\trl.mQueueUsage.WithLabelValues(rq.Label, \"high\").Set(float64(high))\n\t\t\t\t}\n\t\t\t\trl.mtx.Unlock()\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// caller must have lock\nfunc (rq *RequestQueue) runqueue() {\n\t// Handle entries from the queue as capacity permits\n\tfor len(rq.queue) > 0 && (rq.MaxConcurrent == 0 || rq.handling < rq.MaxConcurrent) {\n\t\trq.handling++\n\t\tent := rq.queue.removeMax()\n\t\tent.ready <- true\n\t}\n}\n\n// If the queue is too full, fail and remove the lowest-priority\n// entry. Caller must have lock. Queue must not be empty.\nfunc (rq *RequestQueue) trimqueue() {\n\tif len(rq.queue) <= rq.MaxQueue {\n\t\treturn\n\t}\n\tmin := 0\n\tfor i := range rq.queue {\n\t\tif i == 0 || rq.queue.Less(min, i) {\n\t\t\tmin = i\n\t\t}\n\t}\n\trq.queue[min].ready <- false\n\trq.queue.remove(min)\n}\n\nfunc (rl *RequestLimiter) enqueue(req *http.Request) *qent {\n\trl.mtx.Lock()\n\tdefer rl.mtx.Unlock()\n\tqtime := time.Now()\n\tvar priority int64\n\tif rl.Priority != nil {\n\t\tpriority = rl.Priority(req, qtime)\n\t}\n\tent := &qent{\n\t\trq:       rl.Queue(req),\n\t\tqueued:   qtime,\n\t\tpriority: priority,\n\t\tready:    make(chan bool, 1),\n\t\theappos:  -1,\n\t}\n\tif rl.rqs == nil {\n\t\trl.rqs = map[*RequestQueue]bool{}\n\t}\n\trl.rqs[ent.rq] = true\n\tif ent.rq.MaxConcurrent == 0 || ent.rq.MaxConcurrent > ent.rq.handling {\n\t\t// fast path, skip the queue\n\t\tent.rq.handling++\n\t\tent.ready <- true\n\t\treturn ent\n\t}\n\tent.rq.queue.add(ent)\n\tent.rq.trimqueue()\n\treturn ent\n}\n\nfunc (rl *RequestLimiter) remove(ent *qent) {\n\trl.mtx.Lock()\n\tdefer rl.mtx.Unlock()\n\tif ent.heappos >= 0 {\n\t\tent.rq.queue.remove(ent.heappos)\n\t\tent.ready <- false\n\t}\n}\n\nfunc (rl *RequestLimiter) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\trl.setupOnce.Do(rl.setup)\n\tent := rl.enqueue(req)\n\tSetResponseLogFields(req.Context(), logrus.Fields{\"priority\": ent.priority, \"queue\": ent.rq.Label})\n\tif ent.priority == MinPriority {\n\t\t// Note that MaxQueueTime==0 does not cancel a req\n\t\t// that skips the queue, because in that case\n\t\t// rl.enqueue() has already fired ready<-true and\n\t\t// rl.remove() is a no-op.\n\t\tgo func() {\n\t\t\ttime.Sleep(ent.rq.MaxQueueTimeForMinPriority)\n\t\t\trl.remove(ent)\n\t\t}()\n\t}\n\tvar ok bool\n\tselect {\n\tcase <-req.Context().Done():\n\t\trl.remove(ent)\n\t\t// we still need to wait for ent.ready, because\n\t\t// sometimes runqueue() will have already decided to\n\t\t// send true before our rl.remove() call, and in that\n\t\t// case we'll need to decrement ent.rq.handling below.\n\t\tok = <-ent.ready\n\tcase ok = <-ent.ready:\n\t}\n\n\t// Report time spent in queue in the appropriate bucket:\n\t// mQueueDelay if the request actually got processed,\n\t// mQueueTimeout if it was abandoned or cancelled before\n\t// getting a processing slot.\n\tvar series *prometheus.SummaryVec\n\tif ok {\n\t\tseries = rl.mQueueDelay\n\t} else {\n\t\tseries = rl.mQueueTimeout\n\t}\n\tif series != nil {\n\t\tvar qlabel string\n\t\tswitch {\n\t\tcase ent.priority < 0:\n\t\t\tqlabel = \"low\"\n\t\tcase ent.priority > 0:\n\t\t\tqlabel = \"high\"\n\t\tdefault:\n\t\t\tqlabel = \"normal\"\n\t\t}\n\t\tseries.WithLabelValues(ent.rq.Label, qlabel).Observe(time.Now().Sub(ent.queued).Seconds())\n\t}\n\n\tif !ok {\n\t\tresp.WriteHeader(http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\tdefer func() {\n\t\trl.mtx.Lock()\n\t\tdefer rl.mtx.Unlock()\n\t\tent.rq.handling--\n\t\t// unblock the next waiting request\n\t\tent.rq.runqueue()\n\t}()\n\trl.Handler.ServeHTTP(resp, req)\n}\n"
  },
  {
    "path": "sdk/go/httpserver/request_limiter_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\ntype testHandler struct {\n\tinHandler   chan struct{}\n\tokToProceed chan struct{}\n}\n\nfunc (h *testHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\th.inHandler <- struct{}{}\n\t<-h.okToProceed\n}\n\nfunc newTestHandler() *testHandler {\n\treturn &testHandler{\n\t\tinHandler:   make(chan struct{}),\n\t\tokToProceed: make(chan struct{}),\n\t}\n}\n\nfunc (s *Suite) TestRequestLimiter1(c *check.C) {\n\th := newTestHandler()\n\trq := &RequestQueue{\n\t\tMaxConcurrent: 1}\n\tl := RequestLimiter{\n\t\tQueue:   func(*http.Request) *RequestQueue { return rq },\n\t\tHandler: h}\n\tvar wg sync.WaitGroup\n\tresps := make([]*httptest.ResponseRecorder, 10)\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tresps[i] = httptest.NewRecorder()\n\t\tgo func(i int) {\n\t\t\tl.ServeHTTP(resps[i], &http.Request{})\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\tdone := make(chan struct{})\n\tgo func() {\n\t\t// Make sure one request has entered the handler\n\t\t<-h.inHandler\n\t\t// Make sure all unsuccessful requests finish (but don't wait\n\t\t// for the one that's still waiting for okToProceed)\n\t\twg.Add(-1)\n\t\twg.Wait()\n\t\t// Wait for the last goroutine\n\t\twg.Add(1)\n\t\th.okToProceed <- struct{}{}\n\t\twg.Wait()\n\t\tdone <- struct{}{}\n\t}()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(10 * time.Second):\n\t\tc.Fatal(\"test timed out, probably deadlocked\")\n\t}\n\tn200 := 0\n\tn503 := 0\n\tfor i := 0; i < 10; i++ {\n\t\tswitch resps[i].Code {\n\t\tcase 200:\n\t\t\tn200++\n\t\tcase 503:\n\t\t\tn503++\n\t\tdefault:\n\t\t\tc.Fatalf(\"Unexpected response code %d\", resps[i].Code)\n\t\t}\n\t}\n\tif n200 != 1 || n503 != 9 {\n\t\tc.Fatalf(\"Got %d 200 responses, %d 503 responses (expected 1, 9)\", n200, n503)\n\t}\n\t// Now that all 10 are finished, an 11th request should\n\t// succeed.\n\tgo func() {\n\t\t<-h.inHandler\n\t\th.okToProceed <- struct{}{}\n\t}()\n\tresp := httptest.NewRecorder()\n\tl.ServeHTTP(resp, &http.Request{})\n\tif resp.Code != 200 {\n\t\tc.Errorf(\"Got status %d on 11th request, want 200\", resp.Code)\n\t}\n}\n\nfunc (*Suite) TestRequestLimiter10(c *check.C) {\n\th := newTestHandler()\n\trq := &RequestQueue{\n\t\tMaxConcurrent: 10}\n\tl := RequestLimiter{\n\t\tQueue:   func(*http.Request) *RequestQueue { return rq },\n\t\tHandler: h}\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tl.ServeHTTP(httptest.NewRecorder(), &http.Request{})\n\t\t\twg.Done()\n\t\t}()\n\t\t// Make sure the handler starts before we initiate the\n\t\t// next request, but don't let it finish yet.\n\t\t<-h.inHandler\n\t}\n\tfor i := 0; i < 10; i++ {\n\t\th.okToProceed <- struct{}{}\n\t}\n\twg.Wait()\n}\n\nfunc (*Suite) TestRequestLimiterQueuePriority(c *check.C) {\n\th := newTestHandler()\n\trq := &RequestQueue{\n\t\tMaxConcurrent: 1000,\n\t\tMaxQueue:      200,\n\t}\n\trl := RequestLimiter{\n\t\tHandler: h,\n\t\tQueue:   func(*http.Request) *RequestQueue { return rq },\n\t\tPriority: func(r *http.Request, _ time.Time) int64 {\n\t\t\tp, _ := strconv.ParseInt(r.Header.Get(\"Priority\"), 10, 64)\n\t\t\treturn p\n\t\t}}\n\n\tc.Logf(\"starting initial requests\")\n\tfor i := 0; i < rq.MaxConcurrent; i++ {\n\t\tgo func() {\n\t\t\trl.ServeHTTP(httptest.NewRecorder(), &http.Request{Header: http.Header{\"No-Priority\": {\"x\"}}})\n\t\t}()\n\t}\n\tc.Logf(\"waiting for initial requests to consume all MaxConcurrent slots\")\n\tfor i := 0; i < rq.MaxConcurrent; i++ {\n\t\t<-h.inHandler\n\t}\n\n\tc.Logf(\"starting %d priority=MinPriority requests (should respond 503 immediately)\", rq.MaxQueue)\n\tvar wgX sync.WaitGroup\n\tfor i := 0; i < rq.MaxQueue; i++ {\n\t\twgX.Add(1)\n\t\tgo func() {\n\t\t\tdefer wgX.Done()\n\t\t\tresp := httptest.NewRecorder()\n\t\t\trl.ServeHTTP(resp, &http.Request{Header: http.Header{\"Priority\": {fmt.Sprintf(\"%d\", MinPriority)}}})\n\t\t\tc.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)\n\t\t}()\n\t}\n\twgX.Wait()\n\n\tc.Logf(\"starting %d priority=MinPriority requests (should respond 503 after 100 ms)\", rq.MaxQueue)\n\t// Usage docs say the caller isn't allowed to change fields\n\t// after first use, but we secretly know it's OK to change\n\t// this field on the fly as long as no requests are arriving\n\t// concurrently.\n\trq.MaxQueueTimeForMinPriority = time.Millisecond * 100\n\tfor i := 0; i < rq.MaxQueue; i++ {\n\t\twgX.Add(1)\n\t\tgo func() {\n\t\t\tdefer wgX.Done()\n\t\t\tresp := httptest.NewRecorder()\n\t\t\tt0 := time.Now()\n\t\t\trl.ServeHTTP(resp, &http.Request{Header: http.Header{\"Priority\": {fmt.Sprintf(\"%d\", MinPriority)}}})\n\t\t\tc.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)\n\t\t\telapsed := time.Since(t0)\n\t\t\tc.Check(elapsed > rq.MaxQueueTimeForMinPriority, check.Equals, true)\n\t\t\tc.Check(elapsed < rq.MaxQueueTimeForMinPriority*10, check.Equals, true)\n\t\t}()\n\t}\n\twgX.Wait()\n\n\tc.Logf(\"starting %d priority=1 and %d priority=1 requests\", rq.MaxQueue, rq.MaxQueue)\n\tvar wg1, wg2 sync.WaitGroup\n\twg1.Add(rq.MaxQueue)\n\twg2.Add(rq.MaxQueue)\n\tfor i := 0; i < rq.MaxQueue*2; i++ {\n\t\ti := i\n\t\tgo func() {\n\t\t\tpri := (i & 1) + 1\n\t\t\tresp := httptest.NewRecorder()\n\t\t\trl.ServeHTTP(resp, &http.Request{Header: http.Header{\"Priority\": {fmt.Sprintf(\"%d\", pri)}}})\n\t\t\tif pri == 1 {\n\t\t\t\tc.Check(resp.Code, check.Equals, http.StatusServiceUnavailable)\n\t\t\t\twg1.Done()\n\t\t\t} else {\n\t\t\t\tc.Check(resp.Code, check.Equals, http.StatusOK)\n\t\t\t\twg2.Done()\n\t\t\t}\n\t\t}()\n\t}\n\n\tc.Logf(\"waiting for queued priority=1 requests to fail\")\n\twg1.Wait()\n\n\tc.Logf(\"allowing initial requests to proceed\")\n\tfor i := 0; i < rq.MaxConcurrent; i++ {\n\t\th.okToProceed <- struct{}{}\n\t}\n\n\tc.Logf(\"allowing queued priority=2 requests to proceed\")\n\tfor i := 0; i < rq.MaxQueue; i++ {\n\t\t<-h.inHandler\n\t\th.okToProceed <- struct{}{}\n\t}\n\tc.Logf(\"waiting for queued priority=2 requests to succeed\")\n\twg2.Wait()\n}\n"
  },
  {
    "path": "sdk/go/httpserver/responsewriter.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage httpserver\n\nimport (\n\t\"bufio\"\n\t\"net\"\n\t\"net/http\"\n\t\"time\"\n)\n\nconst sniffBytes = 1024\n\ntype ResponseWriter interface {\n\thttp.ResponseWriter\n\tWroteStatus() int\n\tWroteBodyBytes() int\n\tSniffed() []byte\n}\n\n// responseWriter wraps http.ResponseWriter and exposes the status\n// sent, the number of bytes sent to the client, and the last write\n// error.\ntype responseWriter struct {\n\thttp.ResponseWriter\n\twroteStatus    int   // First status given to WriteHeader()\n\twroteBodyBytes int   // Bytes successfully written\n\terr            error // Last error returned from Write()\n\tsniffed        []byte\n}\n\nfunc WrapResponseWriter(orig http.ResponseWriter) ResponseWriter {\n\treturn &responseWriter{ResponseWriter: orig}\n}\n\nfunc (w *responseWriter) WriteHeader(s int) {\n\tif w.wroteStatus == 0 {\n\t\tw.wroteStatus = s\n\t}\n\t// ...else it's too late to change the status seen by the\n\t// client -- but we call the wrapped WriteHeader() anyway so\n\t// it can log a warning.\n\tw.ResponseWriter.WriteHeader(s)\n}\n\nfunc (w *responseWriter) Write(data []byte) (n int, err error) {\n\tif w.wroteStatus == 0 {\n\t\tw.WriteHeader(http.StatusOK)\n\t} else if w.wroteStatus >= 400 {\n\t\tw.sniff(data)\n\t}\n\tn, err = w.ResponseWriter.Write(data)\n\tw.wroteBodyBytes += n\n\tw.err = err\n\treturn\n}\n\nfunc (w *responseWriter) WroteStatus() int {\n\treturn w.wroteStatus\n}\n\nfunc (w *responseWriter) WroteBodyBytes() int {\n\treturn w.wroteBodyBytes\n}\n\nfunc (w *responseWriter) Err() error {\n\treturn w.err\n}\n\nfunc (w *responseWriter) sniff(data []byte) {\n\tmax := sniffBytes - len(w.sniffed)\n\tif max <= 0 {\n\t\treturn\n\t} else if max < len(data) {\n\t\tdata = data[:max]\n\t}\n\tw.sniffed = append(w.sniffed, data...)\n}\n\nfunc (w *responseWriter) Sniffed() []byte {\n\treturn w.sniffed\n}\n\nfunc (w *responseWriter) Unwrap() http.ResponseWriter {\n\treturn w.ResponseWriter\n}\n\n// ResponseControllerShim uses a ResponseController to re-add the\n// optional interface methods to a ResponseWriter that has lost them\n// via wrapping by middleware.\n//\n// This allows us to combine old code (like x/net/websocket) with\n// middleware that doesn't explicitly support the optional interfaces\n// (like responseTimer and responseWriter here).\ntype ResponseControllerShim struct{ http.ResponseWriter }\n\nfunc (s ResponseControllerShim) EnableFullDuplex() error {\n\treturn http.NewResponseController(s.ResponseWriter).EnableFullDuplex()\n}\n\nfunc (s ResponseControllerShim) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn http.NewResponseController(s.ResponseWriter).Hijack()\n}\n\nfunc (s ResponseControllerShim) SetReadDeadline(d time.Time) error {\n\treturn http.NewResponseController(s.ResponseWriter).SetReadDeadline(d)\n}\n\nfunc (s ResponseControllerShim) SetWriteDeadline(d time.Time) error {\n\treturn http.NewResponseController(s.ResponseWriter).SetWriteDeadline(d)\n}\n\nfunc (s ResponseControllerShim) Flush() error {\n\treturn http.NewResponseController(s.ResponseWriter).Flush()\n}\n"
  },
  {
    "path": "sdk/go/keepclient/collectionreader.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport (\n\t\"errors\"\n\t\"os\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// ErrNoManifest indicates the given collection has no manifest\n// information (e.g., manifest_text was excluded by a \"select\"\n// parameter when retrieving the collection record).\nvar ErrNoManifest = errors.New(\"Collection has no manifest\")\n\n// CollectionFileReader returns a Reader that reads content from a single file\n// in the collection. The filename must be relative to the root of the\n// collection.  A leading prefix of \"/\" or \"./\" in the filename is ignored.\nfunc (kc *KeepClient) CollectionFileReader(collection map[string]interface{}, filename string) (arvados.File, error) {\n\tmText, ok := collection[\"manifest_text\"].(string)\n\tif !ok {\n\t\treturn nil, ErrNoManifest\n\t}\n\tfs, err := (&arvados.Collection{ManifestText: mText}).FileSystem(nil, kc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn fs.OpenFile(filename, os.O_RDONLY, 0)\n}\n"
  },
  {
    "path": "sdk/go/keepclient/collectionreader_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport (\n\t\"crypto/md5\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&CollectionReaderUnit{})\n\ntype CollectionReaderUnit struct {\n\tarv     *arvadosclient.ArvadosClient\n\tkc      *KeepClient\n\thandler SuccessHandler\n}\n\nfunc (s *CollectionReaderUnit) SetUpTest(c *check.C) {\n\tvar err error\n\ts.arv, err = arvadosclient.MakeArvadosClient()\n\tc.Assert(err, check.IsNil)\n\ts.arv.ApiToken = arvadostest.ActiveToken\n\n\ts.kc, err = MakeKeepClient(s.arv)\n\tc.Assert(err, check.IsNil)\n\n\ts.handler = SuccessHandler{\n\t\tdisk: make(map[string][]byte),\n\t\tlock: make(chan struct{}, 1),\n\t\tops:  new(int),\n\t}\n\tlocalRoots := make(map[string]string)\n\tfor i, k := range RunSomeFakeKeepServers(s.handler, 4) {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t}\n\ts.kc.SetServiceRoots(localRoots, localRoots, nil)\n}\n\ntype SuccessHandler struct {\n\tdisk map[string][]byte\n\tlock chan struct{} // channel with buffer==1: full when an operation is in progress.\n\tops  *int          // number of operations completed\n}\n\nfunc (h SuccessHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tswitch req.Method {\n\tcase \"PUT\":\n\t\tbuf, err := ioutil.ReadAll(req.Body)\n\t\tif err != nil {\n\t\t\tresp.WriteHeader(500)\n\t\t\treturn\n\t\t}\n\t\tpdh := fmt.Sprintf(\"%x+%d\", md5.Sum(buf), len(buf))\n\t\th.lock <- struct{}{}\n\t\th.disk[pdh] = buf\n\t\tif h.ops != nil {\n\t\t\t(*h.ops)++\n\t\t}\n\t\t<-h.lock\n\t\tresp.Write([]byte(pdh))\n\tcase \"GET\":\n\t\tpdh := req.URL.Path[1:]\n\t\th.lock <- struct{}{}\n\t\tbuf, ok := h.disk[pdh]\n\t\tif h.ops != nil {\n\t\t\t(*h.ops)++\n\t\t}\n\t\t<-h.lock\n\t\tif !ok {\n\t\t\tresp.WriteHeader(http.StatusNotFound)\n\t\t} else {\n\t\t\tresp.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", len(buf)))\n\t\t\tresp.Write(buf)\n\t\t}\n\tdefault:\n\t\tresp.WriteHeader(http.StatusMethodNotAllowed)\n\t}\n}\n\ntype rdrTest struct {\n\tmt   string      // manifest text\n\tf    string      // filename\n\twant interface{} // error or string to expect\n}\n\nfunc (s *CollectionReaderUnit) TestCollectionReaderContent(c *check.C) {\n\ts.kc.PutB([]byte(\"foo\"))\n\ts.kc.PutB([]byte(\"bar\"))\n\ts.kc.PutB([]byte(\"Hello world\\n\"))\n\ts.kc.PutB([]byte(\"\"))\n\n\tmt := arvadostest.PathologicalManifest\n\n\tfor _, testCase := range []rdrTest{\n\t\t{mt: mt, f: \"zzzz\", want: os.ErrNotExist},\n\t\t{mt: mt, f: \"frob\", want: os.ErrNotExist},\n\t\t{mt: mt, f: \"/segmented/frob\", want: \"frob\"},\n\t\t{mt: mt, f: \"./segmented/frob\", want: \"frob\"},\n\t\t{mt: mt, f: \"/f\", want: \"f\"},\n\t\t{mt: mt, f: \"./f\", want: \"f\"},\n\t\t{mt: mt, f: \"foo bar//baz\", want: \"foo\"},\n\t\t{mt: mt, f: \"foo/zero\", want: \"\"},\n\t\t{mt: mt, f: \"zero@0\", want: \"\"},\n\t\t{mt: mt, f: \"zero@1\", want: \"\"},\n\t\t{mt: mt, f: \"zero@4\", want: \"\"},\n\t\t{mt: mt, f: \"zero@9\", want: \"\"},\n\t\t{mt: mt, f: \"f\", want: \"f\"},\n\t\t{mt: mt, f: \"ooba\", want: \"ooba\"},\n\t\t{mt: mt, f: \"overlapReverse/o\", want: \"o\"},\n\t\t{mt: mt, f: \"overlapReverse/oo\", want: \"oo\"},\n\t\t{mt: mt, f: \"overlapReverse/ofoo\", want: \"ofoo\"},\n\t\t{mt: mt, f: \"foo bar/baz\", want: \"foo\"},\n\t\t{mt: mt, f: \"segmented/frob\", want: \"frob\"},\n\t\t{mt: mt, f: \"segmented/oof\", want: \"oof\"},\n\t} {\n\t\tc.Logf(\"%#v\", testCase)\n\t\trdr, err := s.kc.CollectionFileReader(map[string]interface{}{\"manifest_text\": testCase.mt}, testCase.f)\n\t\tswitch want := testCase.want.(type) {\n\t\tcase error:\n\t\t\tc.Check(rdr, check.IsNil)\n\t\t\tc.Check(err, check.Equals, want)\n\t\tcase string:\n\t\t\tbuf := make([]byte, len(want))\n\t\t\t_, err := io.ReadFull(rdr, buf)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tfor i := 0; i < 4; i++ {\n\t\t\t\tc.Check(string(buf), check.Equals, want)\n\t\t\t\tn, err := rdr.Read(buf)\n\t\t\t\tc.Check(n, check.Equals, 0)\n\t\t\t\tc.Check(err, check.Equals, io.EOF)\n\t\t\t}\n\n\t\t\tfor a := len(want) - 2; a >= 0; a-- {\n\t\t\t\tfor b := a + 1; b <= len(want); b++ {\n\t\t\t\t\toffset, err := rdr.Seek(int64(a), io.SeekStart)\n\t\t\t\t\tc.Logf(\"...a=%d, b=%d\", a, b)\n\t\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\t\tc.Check(offset, check.Equals, int64(a))\n\t\t\t\t\tbuf := make([]byte, b-a)\n\t\t\t\t\tn, err := io.ReadFull(rdr, buf)\n\t\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\t\tc.Check(n, check.Equals, b-a)\n\t\t\t\t\tc.Check(string(buf), check.Equals, want[a:b])\n\t\t\t\t}\n\t\t\t}\n\t\t\toffset, err := rdr.Seek(-1, io.SeekStart)\n\t\t\tc.Check(err, check.NotNil)\n\t\t\tc.Check(offset, check.Equals, int64(len(want)))\n\n\t\t\tc.Check(rdr.Close(), check.Equals, nil)\n\t\t}\n\t}\n}\n\nfunc (s *CollectionReaderUnit) TestCollectionReaderManyBlocks(c *check.C) {\n\th := md5.New()\n\tbuf := make([]byte, 4096)\n\tlocs := make([]string, len(buf))\n\ttestdata := make([]byte, 0, len(buf)*len(buf))\n\tfilesize := 0\n\tfor i := range locs {\n\t\t_, err := rand.Read(buf[:i])\n\t\tc.Assert(err, check.IsNil)\n\t\th.Write(buf[:i])\n\t\tlocs[i], _, err = s.kc.PutB(buf[:i])\n\t\tc.Assert(err, check.IsNil)\n\t\tfilesize += i\n\t\ttestdata = append(testdata, buf[:i]...)\n\t}\n\tmanifest := \"./random \" + strings.Join(locs, \" \") + \" 0:\" + strconv.Itoa(filesize) + \":bytes.bin\\n\"\n\tdataMD5 := h.Sum(nil)\n\n\tcheckMD5 := md5.New()\n\trdr, err := s.kc.CollectionFileReader(map[string]interface{}{\"manifest_text\": manifest}, \"random/bytes.bin\")\n\tc.Assert(err, check.IsNil)\n\tdefer rdr.Close()\n\n\t_, err = io.Copy(checkMD5, rdr)\n\tc.Check(err, check.IsNil)\n\t_, err = rdr.Read(make([]byte, 1))\n\tc.Check(err, check.Equals, io.EOF)\n\tc.Check(checkMD5.Sum(nil), check.DeepEquals, dataMD5)\n\n\tsize, err := rdr.Seek(0, io.SeekEnd)\n\tc.Check(err, check.IsNil)\n\tbuf = make([]byte, len(testdata))\n\tcopy(buf, testdata)\n\tcurPos := size\n\tfor i := 0; i < 16; i++ {\n\t\toffset := rand.Intn(len(buf) - 1)\n\t\tcount := rand.Intn(len(buf) - offset)\n\t\tif rand.Intn(2) == 0 {\n\t\t\tcurPos, _ = rdr.Seek(int64(offset)-curPos, io.SeekCurrent)\n\t\t} else {\n\t\t\tcurPos, _ = rdr.Seek(int64(offset), io.SeekStart)\n\t\t}\n\t\tc.Check(curPos, check.Equals, int64(offset))\n\t\tfor count > 0 {\n\t\t\tn, err := rdr.Read(buf[offset : offset+count])\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(n > 0, check.Equals, true)\n\t\t\toffset += n\n\t\t\tcount -= n\n\t\t}\n\t\tcurPos, err = rdr.Seek(0, io.SeekCurrent)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(curPos, check.Equals, int64(offset))\n\t}\n\tc.Check(md5.Sum(buf), check.DeepEquals, md5.Sum(testdata))\n\tc.Check(buf[:1000], check.DeepEquals, testdata[:1000])\n\n\texpectPos := curPos + size + 12345\n\tcurPos, err = rdr.Seek(size+12345, io.SeekCurrent)\n\tc.Check(err, check.IsNil)\n\tc.Check(curPos, check.Equals, expectPos)\n\n\tcurPos, err = rdr.Seek(8-curPos, io.SeekCurrent)\n\tc.Check(err, check.IsNil)\n\tc.Check(curPos, check.Equals, int64(8))\n\n\tcurPos, err = rdr.Seek(-9, io.SeekCurrent)\n\tc.Check(err, check.NotNil)\n\tc.Check(curPos, check.Equals, int64(8))\n}\n\nfunc (s *CollectionReaderUnit) TestCollectionReaderCloseEarly(c *check.C) {\n\t// Disable cache\n\ts.kc.gatewayStack = &keepViaHTTP{s.kc}\n\n\ts.kc.PutB([]byte(\"foo\"))\n\ts.kc.PutB([]byte(\"bar\"))\n\ts.kc.PutB([]byte(\"baz\"))\n\n\tmt := \". \"\n\tfor i := 0; i < 300; i++ {\n\t\tmt += \"acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 73feffa4b7f6bb68e44cf984c85f6e88+3 \"\n\t}\n\tmt += \"0:2700:foo900.txt\\n\"\n\n\t// Grab the stub server's lock, ensuring our cfReader doesn't\n\t// get anything back from its first call to kc.Get() before we\n\t// have a chance to call Close().\n\ts.handler.lock <- struct{}{}\n\topsBeforeRead := *s.handler.ops\n\n\trdr, err := s.kc.CollectionFileReader(map[string]interface{}{\"manifest_text\": mt}, \"foo900.txt\")\n\tc.Assert(err, check.IsNil)\n\n\tfirstReadDone := make(chan struct{})\n\tgo func() {\n\t\tn, err := rdr.Read(make([]byte, 3))\n\t\tc.Check(n, check.Equals, 3)\n\t\tc.Check(err, check.IsNil)\n\t\tclose(firstReadDone)\n\t}()\n\n\t// Release the stub server's lock. The first GET operation will proceed.\n\t<-s.handler.lock\n\n\t// Make sure our first read operation consumes the data\n\t// received from the first GET.\n\t<-firstReadDone\n\n\terr = rdr.Close()\n\tc.Check(err, check.IsNil)\n\n\t// Stub should have handled exactly one GET request.\n\tc.Check(*s.handler.ops, check.Equals, opsBeforeRead+1)\n}\n\nfunc (s *CollectionReaderUnit) TestCollectionReaderDataError(c *check.C) {\n\tmanifest := \". ffffffffffffffffffffffffffffffff+1 0:1:notfound.txt\\n\"\n\tbuf := make([]byte, 1)\n\trdr, err := s.kc.CollectionFileReader(map[string]interface{}{\"manifest_text\": manifest}, \"notfound.txt\")\n\tc.Check(err, check.IsNil)\n\tfor i := 0; i < 2; i++ {\n\t\t_, err = io.ReadFull(rdr, buf)\n\t\tc.Check(err, check.NotNil)\n\t\tc.Check(err, check.Not(check.Equals), io.EOF)\n\t}\n\tc.Check(rdr.Close(), check.IsNil)\n}\n"
  },
  {
    "path": "sdk/go/keepclient/discover.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport (\n\t\"crypto/md5\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os/signal\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n)\n\n// RefreshServiceDiscovery clears the Keep service discovery cache.\nfunc RefreshServiceDiscovery() {\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\tsvcListCacheMtx.Lock()\n\tdefer svcListCacheMtx.Unlock()\n\tfor _, ent := range svcListCache {\n\t\twg.Add(1)\n\t\tclear := ent.clear\n\t\tgo func() {\n\t\t\tclear <- struct{}{}\n\t\t\twg.Done()\n\t\t}()\n\t}\n}\n\n// RefreshServiceDiscoveryOnSIGHUP installs a signal handler that calls\n// RefreshServiceDiscovery when SIGHUP is received.\nfunc RefreshServiceDiscoveryOnSIGHUP() {\n\tsvcListCacheMtx.Lock()\n\tdefer svcListCacheMtx.Unlock()\n\tif svcListCacheSignal != nil {\n\t\treturn\n\t}\n\tsvcListCacheSignal = make(chan os.Signal, 1)\n\tsignal.Notify(svcListCacheSignal, syscall.SIGHUP)\n\tgo func() {\n\t\tfor range svcListCacheSignal {\n\t\t\tRefreshServiceDiscovery()\n\t\t}\n\t}()\n}\n\nvar (\n\tsvcListCache       = map[string]cachedSvcList{}\n\tsvcListCacheSignal chan os.Signal\n\tsvcListCacheMtx    sync.Mutex\n)\n\ntype cachedSvcList struct {\n\tarv    *arvadosclient.ArvadosClient\n\tlatest chan svcList\n\tclear  chan struct{}\n}\n\n// Check for new services list every few minutes. Send the latest list\n// to the \"latest\" channel as needed.\nfunc (ent *cachedSvcList) poll() {\n\twakeup := make(chan struct{})\n\n\treplace := make(chan svcList)\n\tgo func() {\n\t\twakeup <- struct{}{}\n\t\tcurrent := <-replace\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ent.clear:\n\t\t\t\twakeup <- struct{}{}\n\t\t\t\t// Wait here for the next success, in\n\t\t\t\t// order to avoid returning stale\n\t\t\t\t// results on the \"latest\" channel.\n\t\t\t\tcurrent = <-replace\n\t\t\tcase current = <-replace:\n\t\t\tcase ent.latest <- current:\n\t\t\t}\n\t\t}\n\t}()\n\n\tokDelay := 5 * time.Minute\n\terrDelay := 3 * time.Second\n\ttimer := time.NewTimer(okDelay)\n\tfor {\n\t\tselect {\n\t\tcase <-timer.C:\n\t\tcase <-wakeup:\n\t\t\tif !timer.Stop() {\n\t\t\t\t// Lost race stopping timer; skip extra firing\n\t\t\t\t<-timer.C\n\t\t\t}\n\t\t}\n\t\tvar next svcList\n\t\terr := ent.arv.Call(\"GET\", \"keep_services\", \"\", \"accessible\", nil, &next)\n\t\tif err != nil {\n\t\t\tif ent.arv.Logger != nil {\n\t\t\t\tent.arv.Logger.WithError(err).Warnf(\"error retrieving services list (retrying in %v)\", errDelay)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"WARNING: Error retrieving services list: %s (retrying in %v)\", err, errDelay)\n\t\t\t}\n\t\t\ttimer.Reset(errDelay)\n\t\t\tcontinue\n\t\t}\n\t\treplace <- next\n\t\ttimer.Reset(okDelay)\n\t}\n}\n\n// discoverServices gets the list of available keep services from\n// the API server.\n//\n// If a list of services is provided in the arvadosclient (e.g., from\n// an environment variable or local config), that list is used\n// instead.\n//\n// If an API call is made, the result is cached for 5 minutes or until\n// ClearCache() is called, and during this interval it is reused by\n// other KeepClients that use the same API server host.\nfunc (kc *KeepClient) discoverServices() error {\n\tif kc.disableDiscovery {\n\t\treturn nil\n\t}\n\n\tif kc.Arvados.KeepServiceURIs != nil {\n\t\tkc.disableDiscovery = true\n\t\tkc.foundNonDiskSvc = true\n\t\tkc.replicasPerService = 0\n\t\troots := make(map[string]string)\n\t\tfor i, uri := range kc.Arvados.KeepServiceURIs {\n\t\t\troots[fmt.Sprintf(\"00000-bi6l4-%015d\", i)] = strings.TrimSuffix(uri, \"/\")\n\t\t}\n\t\tkc.setServiceRoots(roots, roots, roots)\n\t\treturn nil\n\t}\n\n\tif kc.Arvados.Cluster != nil && os.Getenv(\"ARVADOS_USE_KEEP_ACCESSIBLE_API\") == \"\" {\n\t\tkc.disableDiscovery = true\n\t\troots := make(map[string]string)\n\t\tfor url, info := range kc.Arvados.Cluster.Services.Keepstore.InternalURLs {\n\t\t\trvz := info.Rendezvous\n\t\t\tif rvz == \"\" {\n\t\t\t\trvz = url.String()\n\t\t\t}\n\t\t\t// If info.Rendezvous is 15 ascii alphanums,\n\t\t\t// we use it verbatim as the last 15 chars of\n\t\t\t// the UUID. Otherwise, we hash\n\t\t\t// info.Rendezvous (or, if empty, the URL) and\n\t\t\t// use the first 15 chars of the hash as the\n\t\t\t// last 15 chars of the UUID. This matches the\n\t\t\t// behavior of\n\t\t\t// services/api/app/models/keep_service.rb.\n\t\t\trvzhash := len(rvz) != 15\n\t\t\tfor i := 0; i < len(rvz) && !rvzhash; i++ {\n\t\t\t\trvzhash = !(rvz[i] >= '0' && rvz[i] <= '9' ||\n\t\t\t\t\trvz[i] >= 'a' && rvz[i] <= 'z' ||\n\t\t\t\t\trvz[i] >= 'A' && rvz[i] <= 'Z')\n\t\t\t}\n\t\t\tif rvzhash {\n\t\t\t\trvz = fmt.Sprintf(\"%x\", md5.Sum([]byte(rvz)))[:15]\n\t\t\t}\n\t\t\tuuid := kc.Arvados.Cluster.ClusterID + \"-bi6l4-\" + rvz\n\t\t\troots[uuid] = strings.TrimSuffix(url.String(), \"/\")\n\t\t}\n\t\tkc.setServiceRoots(roots, roots, nil)\n\t\treturn nil\n\t}\n\n\tif kc.Arvados.ApiServer == \"\" {\n\t\treturn fmt.Errorf(\"Arvados client is not configured (target API host is not set). Maybe env var ARVADOS_API_HOST should be set first?\")\n\t}\n\n\tsvcListCacheMtx.Lock()\n\tcacheEnt, ok := svcListCache[kc.Arvados.ApiServer]\n\tif !ok {\n\t\tarv := *kc.Arvados\n\t\tcacheEnt = cachedSvcList{\n\t\t\tlatest: make(chan svcList),\n\t\t\tclear:  make(chan struct{}),\n\t\t\tarv:    &arv,\n\t\t}\n\t\tgo cacheEnt.poll()\n\t\tsvcListCache[kc.Arvados.ApiServer] = cacheEnt\n\t}\n\tsvcListCacheMtx.Unlock()\n\n\tselect {\n\tcase <-time.After(time.Minute):\n\t\treturn errors.New(\"timed out while getting initial list of keep services\")\n\tcase sl := <-cacheEnt.latest:\n\t\treturn kc.loadKeepServers(sl)\n\t}\n}\n\nfunc (kc *KeepClient) RefreshServiceDiscovery() {\n\tsvcListCacheMtx.Lock()\n\tent, ok := svcListCache[kc.Arvados.ApiServer]\n\tsvcListCacheMtx.Unlock()\n\tif !ok || kc.Arvados.KeepServiceURIs != nil || kc.disableDiscovery {\n\t\treturn\n\t}\n\tent.clear <- struct{}{}\n}\n\n// LoadKeepServicesFromJSON gets list of available keep services from\n// given JSON and disables automatic service discovery.\nfunc (kc *KeepClient) LoadKeepServicesFromJSON(services string) error {\n\tkc.disableDiscovery = true\n\n\tvar list svcList\n\tdec := json.NewDecoder(strings.NewReader(services))\n\tif err := dec.Decode(&list); err != nil {\n\t\treturn err\n\t}\n\n\treturn kc.loadKeepServers(list)\n}\n\nfunc (kc *KeepClient) loadKeepServers(list svcList) error {\n\tlisted := make(map[string]bool)\n\tlocalRoots := make(map[string]string)\n\tgatewayRoots := make(map[string]string)\n\twritableLocalRoots := make(map[string]string)\n\n\t// replicasPerService is 1 for disks; unknown or unlimited otherwise\n\tkc.replicasPerService = 1\n\n\tfor _, service := range list.Items {\n\t\tscheme := \"http\"\n\t\tif service.SSL {\n\t\t\tscheme = \"https\"\n\t\t}\n\t\turl := fmt.Sprintf(\"%s://%s:%d\", scheme, service.Hostname, service.Port)\n\n\t\t// Skip duplicates\n\t\tif listed[url] {\n\t\t\tcontinue\n\t\t}\n\t\tlisted[url] = true\n\n\t\tlocalRoots[service.Uuid] = url\n\t\tif service.ReadOnly == false {\n\t\t\twritableLocalRoots[service.Uuid] = url\n\t\t\tif service.SvcType != \"disk\" {\n\t\t\t\tkc.replicasPerService = 0\n\t\t\t}\n\t\t}\n\n\t\tif service.SvcType != \"disk\" {\n\t\t\tkc.foundNonDiskSvc = true\n\t\t}\n\n\t\t// Gateway services are only used when specified by\n\t\t// UUID, so there's nothing to gain by filtering them\n\t\t// by service type. Including all accessible services\n\t\t// (gateway and otherwise) merely accommodates more\n\t\t// service configurations.\n\t\tgatewayRoots[service.Uuid] = url\n\t}\n\n\tkc.setServiceRoots(localRoots, writableLocalRoots, gatewayRoots)\n\treturn nil\n}\n"
  },
  {
    "path": "sdk/go/keepclient/discover_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport (\n\t\"bytes\"\n\t\"crypto/md5\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"gopkg.in/check.v1\"\n)\n\nfunc (s *ServerRequiredSuite) TestOverrideDiscovery(c *check.C) {\n\tdefer os.Unsetenv(\"ARVADOS_KEEP_SERVICES\")\n\n\tdata := []byte(\"TestOverrideDiscovery\")\n\thash := fmt.Sprintf(\"%x+%d\", md5.Sum(data), len(data))\n\tst := StubGetHandler{\n\t\tc,\n\t\thash,\n\t\tarvadostest.ActiveToken,\n\t\thttp.StatusOK,\n\t\tdata}\n\tks := RunSomeFakeKeepServers(st, 2)\n\n\tos.Setenv(\"ARVADOS_KEEP_SERVICES\", \"\")\n\tarv1, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, check.IsNil)\n\tarv1.ApiToken = arvadostest.ActiveToken\n\n\tos.Setenv(\"ARVADOS_KEEP_SERVICES\", ks[0].url+\"  \"+ks[1].url+\" \")\n\tarv2, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, check.IsNil)\n\tarv2.ApiToken = arvadostest.ActiveToken\n\n\t// ARVADOS_KEEP_SERVICES was empty when we created arv1, but\n\t// it pointed to our stub servers when we created\n\t// arv2. Regardless of what it's set to now, a keepclient for\n\t// arv2 should use our stub servers, but one created for arv1\n\t// should not.\n\n\tkc1, err := MakeKeepClient(arv1)\n\tc.Assert(err, check.IsNil)\n\tkc2, err := MakeKeepClient(arv2)\n\tc.Assert(err, check.IsNil)\n\n\t_, _, _, err = kc1.Get(hash)\n\tc.Check(err, check.NotNil)\n\t_, _, _, err = kc2.Get(hash)\n\tc.Check(err, check.IsNil)\n}\n\nfunc (s *ServerRequiredSuite) TestDoubleSlash(c *check.C) {\n\tdefer os.Unsetenv(\"ARVADOS_KEEP_SERVICES\")\n\n\tdata := []byte(\"TestDoubleSlash\")\n\thash := fmt.Sprintf(\"%x+%d\", md5.Sum(data), len(data))\n\n\tos.Setenv(\"ARVADOS_KEEP_SERVICES\", \"\")\n\tarv1, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, check.IsNil)\n\tarv1.ApiToken = arvadostest.ActiveToken\n\tkc1, err := MakeKeepClient(arv1)\n\tc.Assert(err, check.IsNil)\n\n\t// Use kc1's config to set up a new client kc2, but add an\n\t// extra trailing slash to each URL.\n\tvar svcs string\n\tfor _, url := range kc1.LocalRoots() {\n\t\tsvcs += url + \"/ \"\n\t}\n\tc.Assert(svcs, check.Not(check.HasLen), 0)\n\tos.Setenv(\"ARVADOS_KEEP_SERVICES\", svcs)\n\n\tarv2, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, check.IsNil)\n\tarv2.ApiToken = arvadostest.ActiveToken\n\tkc2, err := MakeKeepClient(arv2)\n\tc.Assert(err, check.IsNil)\n\n\t// Check that trailing slashes were trimmed.\n\tfor _, url := range kc2.LocalRoots() {\n\t\tc.Assert(url, check.Not(check.Matches), `.*/$`)\n\t}\n\n\t_, _, err = kc2.PutB(data)\n\tc.Assert(err, check.IsNil)\n\t_, _, _, err = kc2.Get(hash)\n\tc.Check(err, check.IsNil)\n}\n\nfunc (s *StandaloneSuite) TestKeepServicesFromClusterConfig(c *check.C) {\n\t// This behavior is disabled via env var in the test\n\t// environment. Clear the env var to test the default\n\t// production behavior.\n\tv := \"ARVADOS_USE_KEEP_ACCESSIBLE_API\"\n\tdefer os.Setenv(v, os.Getenv(v))\n\tos.Unsetenv(v)\n\n\trdr := bytes.NewReader([]byte(`\nClusters:\n zzzzz:\n  Services:\n   Keepstore:\n    InternalURLs:\n     \"https://[::1]:12345/\":\n      Rendezvous: abcdefghijklmno\n     \"https://[::1]:54321/\":\n      Rendezvous: xyz\n     \"http://0.0.0.0:54321/\":\n      {}\n   Keepproxy:\n    InternalURLs:\n     \"https://[::1]:55555/\":\n      {}\n`))\n\tldr := config.NewLoader(rdr, ctxlog.TestLogger(c))\n\tldr.Path = \"-\"\n\tcfg, err := ldr.Load()\n\tc.Assert(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\tc.Assert(cluster.ClusterID, check.Equals, \"zzzzz\")\n\tac, err := arvados.NewClientFromConfig(cluster)\n\tc.Assert(err, check.IsNil)\n\tarv1, err := arvadosclient.New(ac)\n\tc.Assert(err, check.IsNil)\n\tc.Check(arv1.Cluster, check.NotNil)\n\tkc, err := MakeKeepClient(arv1)\n\tc.Assert(err, check.IsNil)\n\t// Note the default rendezvous string is generated based on\n\t// the MD5 of the keepstore URL and that URL *must* have a\n\t// trailing slash in order to match the RailsAPI behavior --\n\t// meanwhile, the keepstore URL given in the localRoots map\n\t// *must not* have a trailing slash.\n\tc.Check(kc.localRoots, check.DeepEquals, map[string]string{\n\t\t\"zzzzz-bi6l4-abcdefghijklmno\":                                                \"https://[::1]:12345\",\n\t\tfmt.Sprintf(\"zzzzz-bi6l4-%x\", md5.Sum([]byte(\"xyz\")))[:27]:                   \"https://[::1]:54321\",\n\t\tfmt.Sprintf(\"zzzzz-bi6l4-%x\", md5.Sum([]byte(\"http://0.0.0.0:54321/\")))[:27]: \"http://0.0.0.0:54321\",\n\t})\n}\n"
  },
  {
    "path": "sdk/go/keepclient/gateway_shim.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// keepViaHTTP implements arvados.KeepGateway by using a KeepClient to\n// do upstream requests to keepstore and keepproxy.\n//\n// This enables KeepClient to use KeepGateway wrappers (like\n// arvados.DiskCache) to wrap its own HTTP client back-end methods\n// (getOrHead, httpBlockWrite).\n//\n// See (*KeepClient)upstreamGateway() for the relevant glue.\ntype keepViaHTTP struct {\n\t*KeepClient\n}\n\nfunc (kvh *keepViaHTTP) ReadAt(locator string, dst []byte, offset int) (int, error) {\n\trdr, _, _, _, err := kvh.getOrHead(\"GET\", locator, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer rdr.Close()\n\t_, err = io.CopyN(io.Discard, rdr, int64(offset))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tn, err := rdr.Read(dst)\n\treturn int(n), err\n}\n\nfunc (kvh *keepViaHTTP) BlockRead(ctx context.Context, opts arvados.BlockReadOptions) (int, error) {\n\tif opts.CheckCacheOnly {\n\t\treturn 0, arvados.ErrNotCached\n\t}\n\trdr, _, _, _, err := kvh.getOrHead(\"GET\", opts.Locator, nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tn, err := io.Copy(opts.WriteTo, rdr)\n\terrClose := rdr.Close()\n\tif err == nil {\n\t\terr = errClose\n\t}\n\treturn int(n), err\n}\n\nfunc (kvh *keepViaHTTP) BlockWrite(ctx context.Context, req arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {\n\treturn kvh.httpBlockWrite(ctx, req)\n}\n\nfunc (kvh *keepViaHTTP) LocalLocator(locator string) (string, error) {\n\tif !strings.Contains(locator, \"+R\") {\n\t\t// Either it has +A, or it's unsigned and we assume\n\t\t// it's a local locator on a site with signatures\n\t\t// disabled.\n\t\treturn locator, nil\n\t}\n\tsighdr := fmt.Sprintf(\"local, time=%s\", time.Now().UTC().Format(time.RFC3339))\n\t_, _, url, hdr, err := kvh.KeepClient.getOrHead(\"HEAD\", locator, http.Header{\"X-Keep-Signature\": []string{sighdr}})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tloc := hdr.Get(\"X-Keep-Locator\")\n\tif loc == \"\" {\n\t\treturn \"\", fmt.Errorf(\"missing X-Keep-Locator header in HEAD response from %s\", url)\n\t}\n\treturn loc, nil\n}\n"
  },
  {
    "path": "sdk/go/keepclient/hashcheck.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar BadChecksum = errors.New(\"Reader failed checksum\")\n\n// HashCheckingReader is an io.ReadCloser that checks the contents\n// read from the underlying io.Reader against the provided hash.\ntype HashCheckingReader struct {\n\t// The underlying data source\n\tio.Reader\n\n\t// The hash function to use\n\thash.Hash\n\n\t// The hash value to check against.  Must be a hex-encoded lowercase string.\n\tCheck string\n\n\t// If non-nil, count the number of bytes read.\n\tcounter prometheus.Counter\n}\n\n// Reads from the underlying reader, update the hashing function, and\n// pass the results through. Returns BadChecksum (instead of EOF) on\n// the last read if the checksum doesn't match.\nfunc (hcr HashCheckingReader) Read(p []byte) (n int, err error) {\n\tn, err = hcr.Reader.Read(p)\n\tif n > 0 {\n\t\thcr.Hash.Write(p[:n])\n\t}\n\tif err == io.EOF {\n\t\tsum := hcr.Hash.Sum(nil)\n\t\tif fmt.Sprintf(\"%x\", sum) != hcr.Check {\n\t\t\terr = BadChecksum\n\t\t}\n\t}\n\tif hcr.counter != nil {\n\t\thcr.counter.Add(float64(n))\n\t}\n\treturn n, err\n}\n\n// WriteTo writes the entire contents of hcr.Reader to dest. Returns\n// BadChecksum if writing is successful but the checksum doesn't\n// match.\nfunc (hcr HashCheckingReader) WriteTo(dest io.Writer) (written int64, err error) {\n\twritten, err = io.Copy(io.MultiWriter(dest, hcr.Hash), hcr.Reader)\n\tif hcr.counter != nil {\n\t\thcr.counter.Add(float64(written))\n\t}\n\tif err != nil {\n\t\treturn written, err\n\t}\n\n\tsum := hcr.Hash.Sum(nil)\n\tif fmt.Sprintf(\"%x\", sum) != hcr.Check {\n\t\treturn written, BadChecksum\n\t}\n\n\treturn written, nil\n}\n\n// Close reads all remaining data from the underlying Reader and\n// returns BadChecksum if the checksum doesn't match. It also closes\n// the underlying Reader if it implements io.ReadCloser.\nfunc (hcr HashCheckingReader) Close() (err error) {\n\t_, err = io.Copy(hcr.Hash, hcr.Reader)\n\n\tif closer, ok := hcr.Reader.(io.Closer); ok {\n\t\tcloseErr := closer.Close()\n\t\tif err == nil {\n\t\t\terr = closeErr\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif fmt.Sprintf(\"%x\", hcr.Hash.Sum(nil)) != hcr.Check {\n\t\treturn BadChecksum\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "sdk/go/keepclient/hashcheck_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport (\n\t\"bytes\"\n\t\"crypto/md5\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\n\t. \"gopkg.in/check.v1\"\n)\n\ntype HashcheckSuiteSuite struct{}\n\n// Gocheck boilerplate\nvar _ = Suite(&HashcheckSuiteSuite{})\n\nfunc (h *HashcheckSuiteSuite) TestRead(c *C) {\n\thash := fmt.Sprintf(\"%x\", md5.Sum([]byte(\"foo\")))\n\n\t{\n\t\tr, w := io.Pipe()\n\t\thcr := HashCheckingReader{r, md5.New(), hash, nil}\n\t\tgo func() {\n\t\t\tw.Write([]byte(\"foo\"))\n\t\t\tw.Close()\n\t\t}()\n\t\tp, err := ioutil.ReadAll(hcr)\n\t\tc.Check(len(p), Equals, 3)\n\t\tc.Check(err, Equals, nil)\n\t}\n\n\t{\n\t\tr, w := io.Pipe()\n\t\thcr := HashCheckingReader{r, md5.New(), hash, nil}\n\t\tgo func() {\n\t\t\tw.Write([]byte(\"bar\"))\n\t\t\tw.Close()\n\t\t}()\n\t\tp, err := ioutil.ReadAll(hcr)\n\t\tc.Check(len(p), Equals, 3)\n\t\tc.Check(err, Equals, BadChecksum)\n\t}\n}\n\nfunc (h *HashcheckSuiteSuite) TestWriteTo(c *C) {\n\thash := fmt.Sprintf(\"%x\", md5.Sum([]byte(\"foo\")))\n\n\t{\n\t\tbb := bytes.NewBufferString(\"foo\")\n\t\thcr := HashCheckingReader{bb, md5.New(), hash, nil}\n\t\tr, w := io.Pipe()\n\t\tdone := make(chan bool)\n\t\tgo func() {\n\t\t\tp, err := ioutil.ReadAll(r)\n\t\t\tc.Check(len(p), Equals, 3)\n\t\t\tc.Check(err, Equals, nil)\n\t\t\tdone <- true\n\t\t}()\n\n\t\tn, err := hcr.WriteTo(w)\n\t\tw.Close()\n\t\tc.Check(n, Equals, int64(3))\n\t\tc.Check(err, Equals, nil)\n\t\t<-done\n\t}\n\n\t{\n\t\tbb := bytes.NewBufferString(\"bar\")\n\t\thcr := HashCheckingReader{bb, md5.New(), hash, nil}\n\t\tr, w := io.Pipe()\n\t\tdone := make(chan bool)\n\t\tgo func() {\n\t\t\tp, err := ioutil.ReadAll(r)\n\t\t\tc.Check(len(p), Equals, 3)\n\t\t\tc.Check(err, Equals, nil)\n\t\t\tdone <- true\n\t\t}()\n\n\t\tn, err := hcr.WriteTo(w)\n\t\tw.Close()\n\t\tc.Check(n, Equals, int64(3))\n\t\tc.Check(err, Equals, BadChecksum)\n\t\t<-done\n\t}\n\n\t// If WriteTo stops early due to a write error, return the\n\t// write error (not \"bad checksum\").\n\t{\n\t\tinput := bytes.NewBuffer(make([]byte, 1<<26))\n\t\thcr := HashCheckingReader{input, md5.New(), hash, nil}\n\t\tr, w := io.Pipe()\n\t\tr.Close()\n\t\tn, err := hcr.WriteTo(w)\n\t\tc.Check(n, Equals, int64(0))\n\t\tc.Check(err, NotNil)\n\t\tc.Check(err, Not(Equals), BadChecksum)\n\t}\n}\n"
  },
  {
    "path": "sdk/go/keepclient/keepclient.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\n// Package keepclient provides low-level Get/Put primitives for accessing\n// Arvados Keep blocks.\npackage keepclient\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/md5\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\n// BLOCKSIZE defines the length of a Keep \"block\", which is 64MB.\nconst BLOCKSIZE = 64 * 1024 * 1024\n\nvar (\n\tDefaultRequestTimeout      = 20 * time.Second\n\tDefaultConnectTimeout      = 2 * time.Second\n\tDefaultTLSHandshakeTimeout = 4 * time.Second\n\tDefaultKeepAlive           = 180 * time.Second\n\n\tDefaultProxyRequestTimeout      = 300 * time.Second\n\tDefaultProxyConnectTimeout      = 30 * time.Second\n\tDefaultProxyTLSHandshakeTimeout = 10 * time.Second\n\tDefaultProxyKeepAlive           = 120 * time.Second\n\n\tDefaultRetryDelay = 2 * time.Second // see KeepClient.RetryDelay\n\tMinimumRetryDelay = time.Millisecond\n\n\trootCacheDir = \"/var/cache/arvados/keep\"\n\tuserCacheDir = \".cache/arvados/keep\" // relative to HOME\n)\n\n// Error interface with an error and boolean indicating whether the error is temporary\ntype Error interface {\n\terror\n\tTemporary() bool\n}\n\n// multipleResponseError is of type Error\ntype multipleResponseError struct {\n\terror\n\tisTemp bool\n}\n\nfunc (e *multipleResponseError) Temporary() bool {\n\treturn e.isTemp\n}\n\n// BlockNotFound is a multipleResponseError where isTemp is false\nvar BlockNotFound = &ErrNotFound{multipleResponseError{\n\terror:  errors.New(\"Block not found\"),\n\tisTemp: false,\n}}\n\n// ErrNotFound is a multipleResponseError where isTemp can be true or false\ntype ErrNotFound struct {\n\tmultipleResponseError\n}\n\nfunc (*ErrNotFound) HTTPStatus() int { return http.StatusNotFound }\n\ntype InsufficientReplicasError struct{ error }\n\ntype OversizeBlockError struct{ error }\n\nvar ErrOversizeBlock = OversizeBlockError{error: errors.New(\"Exceeded maximum block size (\" + strconv.Itoa(BLOCKSIZE) + \")\")}\nvar MissingArvadosApiHost = errors.New(\"Missing required environment variable ARVADOS_API_HOST\")\nvar MissingArvadosApiToken = errors.New(\"Missing required environment variable ARVADOS_API_TOKEN\")\nvar InvalidLocatorError = errors.New(\"Invalid locator\")\n\n// ErrNoSuchKeepServer is returned when GetIndex is invoked with a UUID with no matching keep server\nvar ErrNoSuchKeepServer = errors.New(\"No keep server matching the given UUID is found\")\n\n// ErrIncompleteIndex is returned when the Index response does not end with a new empty line\nvar ErrIncompleteIndex = errors.New(\"Got incomplete index\")\n\nconst (\n\tXKeepDesiredReplicas         = \"X-Keep-Desired-Replicas\"\n\tXKeepReplicasStored          = \"X-Keep-Replicas-Stored\"\n\tXKeepStorageClasses          = \"X-Keep-Storage-Classes\"\n\tXKeepStorageClassesConfirmed = \"X-Keep-Storage-Classes-Confirmed\"\n\tXKeepSignature               = \"X-Keep-Signature\"\n\tXKeepLocator                 = \"X-Keep-Locator\"\n)\n\ntype HTTPClient interface {\n\tDo(*http.Request) (*http.Response, error)\n}\n\nconst DiskCacheDisabled = arvados.ByteSizeOrPercent(1)\n\n// KeepClient holds information about Arvados and Keep servers.\ntype KeepClient struct {\n\tArvados            *arvadosclient.ArvadosClient\n\tWant_replicas      int\n\tlocalRoots         map[string]string\n\twritableLocalRoots map[string]string\n\tgatewayRoots       map[string]string\n\tlock               sync.RWMutex\n\tHTTPClient         HTTPClient\n\n\t// Number of times to automatically retry a read/write\n\t// operation after a transient failure.\n\tRetries int\n\n\t// Initial maximum delay for automatic retry. If zero,\n\t// DefaultRetryDelay is used.  The delay after attempt N\n\t// (0-based) will be a random duration between\n\t// MinimumRetryDelay and RetryDelay * 2^N, not to exceed a cap\n\t// of RetryDelay * 10.\n\tRetryDelay time.Duration\n\n\tRequestID             string\n\tStorageClasses        []string\n\tDefaultStorageClasses []string                  // Set by cluster's exported config\n\tDiskCacheSize         arvados.ByteSizeOrPercent // See also DiskCacheDisabled\n\n\t// set to 1 if all writable services are of disk type, otherwise 0\n\treplicasPerService int\n\n\t// Any non-disk typed services found in the list of keepservers?\n\tfoundNonDiskSvc bool\n\n\t// Disable automatic discovery of keep services\n\tdisableDiscovery bool\n\n\tgatewayStack arvados.KeepGateway\n\n\tsetupMetricsOnce sync.Once\n\tmetrics          arvados.KeepClientMetrics\n}\n\nfunc (kc *KeepClient) Clone() *KeepClient {\n\tkc.lock.Lock()\n\tdefer kc.lock.Unlock()\n\tkc.setupMetrics()\n\tclone := &KeepClient{\n\t\tArvados:               kc.Arvados,\n\t\tWant_replicas:         kc.Want_replicas,\n\t\tlocalRoots:            kc.localRoots,\n\t\twritableLocalRoots:    kc.writableLocalRoots,\n\t\tgatewayRoots:          kc.gatewayRoots,\n\t\tHTTPClient:            kc.HTTPClient,\n\t\tRetries:               kc.Retries,\n\t\tRetryDelay:            kc.RetryDelay,\n\t\tRequestID:             kc.RequestID,\n\t\tStorageClasses:        kc.StorageClasses,\n\t\tDefaultStorageClasses: kc.DefaultStorageClasses,\n\t\tDiskCacheSize:         kc.DiskCacheSize,\n\t\treplicasPerService:    kc.replicasPerService,\n\t\tfoundNonDiskSvc:       kc.foundNonDiskSvc,\n\t\tdisableDiscovery:      kc.disableDiscovery,\n\t\tmetrics:               kc.metrics,\n\t}\n\tclone.setupMetricsOnce.Do(func() {})\n\treturn clone\n}\n\nfunc (kc *KeepClient) loadDefaultClasses() error {\n\tscData, err := kc.Arvados.ClusterConfig(\"StorageClasses\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tclasses := scData.(map[string]interface{})\n\tfor scName := range classes {\n\t\tscConf, _ := classes[scName].(map[string]interface{})\n\t\tisDefault, ok := scConf[\"Default\"].(bool)\n\t\tif ok && isDefault {\n\t\t\tkc.DefaultStorageClasses = append(kc.DefaultStorageClasses, scName)\n\t\t}\n\t}\n\treturn nil\n}\n\n// MakeKeepClient creates a new KeepClient, loads default storage\n// classes, calls discoverServices(), and returns when the client is\n// ready to use.\nfunc MakeKeepClient(arv *arvadosclient.ArvadosClient) (*KeepClient, error) {\n\tkc := New(arv)\n\treturn kc, kc.discoverServices()\n}\n\n// New creates a new KeepClient. Service discovery will occur on the\n// next read/write operation.\nfunc New(arv *arvadosclient.ArvadosClient) *KeepClient {\n\tdefaultReplicationLevel := 2\n\tvalue, err := arv.Discovery(\"defaultCollectionReplication\")\n\tif err == nil {\n\t\tv, ok := value.(float64)\n\t\tif ok && v > 0 {\n\t\t\tdefaultReplicationLevel = int(v)\n\t\t}\n\t}\n\tkc := &KeepClient{\n\t\tArvados:       arv,\n\t\tWant_replicas: defaultReplicationLevel,\n\t\tRetries:       2,\n\t}\n\terr = kc.loadDefaultClasses()\n\tif err != nil && arv.Logger != nil {\n\t\tarv.Logger.WithError(err).Debug(\"unable to load the default storage classes cluster config\")\n\t}\n\treturn kc\n}\n\n// PutHR puts a block given the block hash, a reader, and the number of bytes\n// to read from the reader (which must be between 0 and BLOCKSIZE).\n//\n// Returns the locator for the written block, the number of replicas\n// written, and an error.\n//\n// Returns an InsufficientReplicasError if 0 <= replicas <\n// kc.Wants_replicas.\nfunc (kc *KeepClient) PutHR(hash string, r io.Reader, dataBytes int64) (string, int, error) {\n\tresp, err := kc.BlockWrite(context.Background(), arvados.BlockWriteOptions{\n\t\tHash:     hash,\n\t\tReader:   r,\n\t\tDataSize: int(dataBytes),\n\t})\n\treturn resp.Locator, resp.Replicas, err\n}\n\n// PutHB writes a block to Keep. The hash of the bytes is given in\n// hash, and the data is given in buf.\n//\n// Return values are the same as for PutHR.\nfunc (kc *KeepClient) PutHB(hash string, buf []byte) (string, int, error) {\n\tresp, err := kc.BlockWrite(context.Background(), arvados.BlockWriteOptions{\n\t\tHash: hash,\n\t\tData: buf,\n\t})\n\treturn resp.Locator, resp.Replicas, err\n}\n\n// PutB writes a block to Keep. It computes the hash itself.\n//\n// Return values are the same as for PutHR.\nfunc (kc *KeepClient) PutB(buffer []byte) (string, int, error) {\n\tresp, err := kc.BlockWrite(context.Background(), arvados.BlockWriteOptions{\n\t\tData: buffer,\n\t})\n\treturn resp.Locator, resp.Replicas, err\n}\n\n// PutR writes a block to Keep. It first reads all data from r into a buffer\n// in order to compute the hash.\n//\n// Return values are the same as for PutHR.\n//\n// If the block hash and data size are known, PutHR is more efficient.\nfunc (kc *KeepClient) PutR(r io.Reader) (locator string, replicas int, err error) {\n\tbuffer, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\treturn kc.PutB(buffer)\n}\n\nfunc (kc *KeepClient) getOrHead(method string, locator string, header http.Header) (io.ReadCloser, int64, string, http.Header, error) {\n\tif strings.HasPrefix(locator, \"d41d8cd98f00b204e9800998ecf8427e+0\") {\n\t\treturn ioutil.NopCloser(bytes.NewReader(nil)), 0, \"\", nil, nil\n\t}\n\n\treqid := kc.getRequestID()\n\n\tvar expectLength int64\n\tif parts := strings.SplitN(locator, \"+\", 3); len(parts) < 2 {\n\t\texpectLength = -1\n\t} else if n, err := strconv.ParseInt(parts[1], 10, 64); err != nil {\n\t\texpectLength = -1\n\t} else {\n\t\texpectLength = n\n\t}\n\n\tvar errs []string\n\n\tdelay := delayCalculator{InitialMaxDelay: kc.RetryDelay}\n\ttriesRemaining := 1 + kc.Retries\n\n\tserversToTry := kc.getSortedRoots(locator)\n\n\tnumServers := len(serversToTry)\n\tcount404 := 0\n\n\tvar retryList []string\n\n\tfor triesRemaining > 0 {\n\t\ttriesRemaining--\n\t\tretryList = nil\n\n\t\tfor _, host := range serversToTry {\n\t\t\turl := host + \"/\" + locator\n\n\t\t\treq, err := http.NewRequest(method, url, nil)\n\t\t\tif err != nil {\n\t\t\t\terrs = append(errs, fmt.Sprintf(\"%s: %v\", url, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor k, v := range header {\n\t\t\t\treq.Header[k] = append([]string(nil), v...)\n\t\t\t}\n\t\t\tif req.Header.Get(\"Authorization\") == \"\" {\n\t\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+kc.Arvados.ApiToken)\n\t\t\t}\n\t\t\tif req.Header.Get(\"X-Request-Id\") == \"\" {\n\t\t\t\treq.Header.Set(\"X-Request-Id\", reqid)\n\t\t\t}\n\t\t\tresp, err := kc.httpClient().Do(req)\n\t\t\tif err != nil {\n\t\t\t\t// Probably a network error, may be transient,\n\t\t\t\t// can try again.\n\t\t\t\terrs = append(errs, fmt.Sprintf(\"%s: %v\", url, err))\n\t\t\t\tretryList = append(retryList, host)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\tvar respbody []byte\n\t\t\t\trespbody, _ = ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: 4096})\n\t\t\t\tresp.Body.Close()\n\t\t\t\terrs = append(errs, fmt.Sprintf(\"%s: HTTP %d %q\",\n\t\t\t\t\turl, resp.StatusCode, bytes.TrimSpace(respbody)))\n\n\t\t\t\tif resp.StatusCode == 408 ||\n\t\t\t\t\tresp.StatusCode == 429 ||\n\t\t\t\t\tresp.StatusCode >= 500 {\n\t\t\t\t\t// Timeout, too many requests, or other\n\t\t\t\t\t// server side failure, transient\n\t\t\t\t\t// error, can try again.\n\t\t\t\t\tretryList = append(retryList, host)\n\t\t\t\t} else if resp.StatusCode == 404 {\n\t\t\t\t\tcount404++\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif expectLength < 0 {\n\t\t\t\tif resp.ContentLength < 0 {\n\t\t\t\t\tresp.Body.Close()\n\t\t\t\t\treturn nil, 0, \"\", nil, fmt.Errorf(\"error reading %q: no size hint, no Content-Length header in response\", locator)\n\t\t\t\t}\n\t\t\t\texpectLength = resp.ContentLength\n\t\t\t} else if resp.ContentLength >= 0 && expectLength != resp.ContentLength {\n\t\t\t\tresp.Body.Close()\n\t\t\t\treturn nil, 0, \"\", nil, fmt.Errorf(\"error reading %q: size hint %d != Content-Length %d\", locator, expectLength, resp.ContentLength)\n\t\t\t}\n\t\t\t// Success\n\t\t\tif method == \"GET\" {\n\t\t\t\tkc.setupMetrics()\n\t\t\t\treturn HashCheckingReader{\n\t\t\t\t\tReader:  resp.Body,\n\t\t\t\t\tHash:    md5.New(),\n\t\t\t\t\tCheck:   locator[0:32],\n\t\t\t\t\tcounter: kc.metrics.BackendBytesIn,\n\t\t\t\t}, expectLength, url, resp.Header, nil\n\t\t\t}\n\t\t\tresp.Body.Close()\n\t\t\treturn nil, expectLength, url, resp.Header, nil\n\t\t}\n\t\tserversToTry = retryList\n\t\tif len(serversToTry) > 0 && triesRemaining > 0 {\n\t\t\ttime.Sleep(delay.Next())\n\t\t}\n\t}\n\tif kc.Arvados.Logger != nil {\n\t\tkc.Arvados.Logger.Debugf(\"DEBUG: %s %s failed: %v\", method, locator, errs)\n\t}\n\n\tvar err error\n\tif count404 == numServers {\n\t\terr = BlockNotFound\n\t} else {\n\t\terr = &ErrNotFound{multipleResponseError{\n\t\t\terror:  fmt.Errorf(\"%s %s failed: %v\", method, locator, errs),\n\t\t\tisTemp: len(serversToTry) > 0,\n\t\t}}\n\t}\n\treturn nil, 0, \"\", nil, err\n}\n\n// attempt to create dir/subdir/ and its parents, up to but not\n// including dir itself, using mode 0700.\nfunc makedirs(dir, subdir string) {\n\tfor _, part := range strings.Split(subdir, string(os.PathSeparator)) {\n\t\tdir = filepath.Join(dir, part)\n\t\tos.Mkdir(dir, 0700)\n\t}\n}\n\n// upstreamGateway creates/returns the KeepGateway stack used to read\n// and write data: a disk-backed cache on top of an http backend.\nfunc (kc *KeepClient) upstreamGateway() arvados.KeepGateway {\n\tkc.lock.Lock()\n\tdefer kc.lock.Unlock()\n\tif kc.gatewayStack != nil {\n\t\treturn kc.gatewayStack\n\t}\n\tvar cachedir string\n\tif os.Geteuid() == 0 {\n\t\tcachedir = rootCacheDir\n\t\tmakedirs(\"/\", cachedir)\n\t} else {\n\t\thome := \"/\" + os.Getenv(\"HOME\")\n\t\tmakedirs(home, userCacheDir)\n\t\tcachedir = filepath.Join(home, userCacheDir)\n\t}\n\tbackend := &keepViaHTTP{kc}\n\tif kc.DiskCacheSize == DiskCacheDisabled {\n\t\tkc.gatewayStack = backend\n\t} else {\n\t\tkc.setupMetrics()\n\t\tkc.gatewayStack = &arvados.DiskCache{\n\t\t\tDir:         cachedir,\n\t\t\tMaxSize:     kc.DiskCacheSize,\n\t\t\tKeepGateway: backend,\n\t\t\tLogger:      kc.Arvados.Logger,\n\t\t\tMetrics:     kc.metrics,\n\t\t}\n\t}\n\treturn kc.gatewayStack\n}\n\n// LocalLocator returns a locator equivalent to the one supplied, but\n// with a valid signature from the local cluster. If the given locator\n// already has a local signature, it is returned unchanged.\nfunc (kc *KeepClient) LocalLocator(locator string) (string, error) {\n\treturn kc.upstreamGateway().LocalLocator(locator)\n}\n\n// Get retrieves the specified block from the local cache or a backend\n// server. Returns a reader, the expected data length (or -1 if not\n// known), and an error.\n//\n// The third return value (formerly a source URL in previous versions)\n// is an empty string.\n//\n// If the block checksum does not match, the final Read() on the\n// reader returned by this method will return a BadChecksum error\n// instead of EOF.\n//\n// New code should use BlockRead and/or ReadAt instead of Get.\nfunc (kc *KeepClient) Get(locator string) (io.ReadCloser, int64, string, error) {\n\tloc, err := MakeLocator(locator)\n\tif err != nil {\n\t\treturn nil, 0, \"\", err\n\t}\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\tn, err := kc.BlockRead(context.Background(), arvados.BlockReadOptions{\n\t\t\tLocator: locator,\n\t\t\tWriteTo: pw,\n\t\t})\n\t\tif err != nil {\n\t\t\tpw.CloseWithError(err)\n\t\t} else if loc.Size >= 0 && n != loc.Size {\n\t\t\tpw.CloseWithError(fmt.Errorf(\"expected block size %d but read %d bytes\", loc.Size, n))\n\t\t} else {\n\t\t\tpw.Close()\n\t\t}\n\t}()\n\t// Wait for the first byte to arrive, so that, if there's an\n\t// error before we receive any data, we can return the error\n\t// directly, instead of indirectly via a reader that returns\n\t// an error.\n\tbufr := bufio.NewReader(pr)\n\t_, err = bufr.Peek(1)\n\tif err != nil && err != io.EOF {\n\t\tpr.CloseWithError(err)\n\t\treturn nil, 0, \"\", err\n\t}\n\tif err == io.EOF && (loc.Size == 0 || loc.Hash == \"d41d8cd98f00b204e9800998ecf8427e\") {\n\t\t// In the special case of the zero-length block, EOF\n\t\t// error from Peek() is normal.\n\t\treturn pr, 0, \"\", nil\n\t}\n\treturn struct {\n\t\tio.Reader\n\t\tio.Closer\n\t}{\n\t\tReader: bufr,\n\t\tCloser: pr,\n\t}, int64(loc.Size), \"\", err\n}\n\n// BlockRead retrieves a block from the cache if it's present, otherwise\n// from the network.\nfunc (kc *KeepClient) BlockRead(ctx context.Context, opts arvados.BlockReadOptions) (int, error) {\n\tkc.setupMetrics()\n\tkc.metrics.ClientOpsGet.Add(1)\n\treturn kc.upstreamGateway().BlockRead(ctx, opts)\n}\n\n// ReadAt retrieves a portion of block from the cache if it's\n// present, otherwise from the network.\nfunc (kc *KeepClient) ReadAt(locator string, p []byte, off int) (int, error) {\n\tif off == 0 {\n\t\t// Python SDK's get_counter metric (reported in\n\t\t// arv-mount crunchstat as \"keepcalls ... get\") counts\n\t\t// blocks reads initiated by the application.  But\n\t\t// here, applications might not do any full block\n\t\t// reads at all, only partial reads.  We count partial\n\t\t// reads with offset==0 as a full block read, so the\n\t\t// resulting statistics are similar to what the Python\n\t\t// SDK would report for similar activity.\n\t\tkc.setupMetrics()\n\t\tkc.metrics.ClientOpsGet.Add(1)\n\t}\n\treturn kc.upstreamGateway().ReadAt(locator, p, off)\n}\n\n// BlockWrite writes a full block to upstream servers and saves a copy\n// in the local cache.\nfunc (kc *KeepClient) BlockWrite(ctx context.Context, req arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {\n\tkc.setupMetrics()\n\tkc.metrics.ClientOpsPut.Add(1)\n\treturn kc.upstreamGateway().BlockWrite(ctx, req)\n}\n\n// Ask verifies that a block with the given hash is available and\n// readable, according to at least one Keep service. Unlike Get, it\n// does not retrieve the data or verify that the data content matches\n// the hash specified by the locator.\n//\n// Returns the data size (content length) reported by the Keep service\n// and the URI reporting the data size.\nfunc (kc *KeepClient) Ask(locator string) (int64, string, error) {\n\t_, size, url, _, err := kc.getOrHead(\"HEAD\", locator, nil)\n\treturn size, url, err\n}\n\n// GetIndex retrieves a list of blocks stored on the given server whose hashes\n// begin with the given prefix. The returned reader will return an error (other\n// than EOF) if the complete index cannot be retrieved.\n//\n// This is meant to be used only by system components and admin tools.\n// It will return an error unless the client is using a \"data manager token\"\n// recognized by the Keep services.\nfunc (kc *KeepClient) GetIndex(keepServiceUUID, prefix string) (io.Reader, error) {\n\turl := kc.LocalRoots()[keepServiceUUID]\n\tif url == \"\" {\n\t\treturn nil, ErrNoSuchKeepServer\n\t}\n\n\turl += \"/index\"\n\tif prefix != \"\" {\n\t\turl += \"/\" + prefix\n\t}\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Authorization\", \"Bearer \"+kc.Arvados.ApiToken)\n\treq.Header.Set(\"X-Request-Id\", kc.getRequestID())\n\tresp, err := kc.httpClient().Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Got http status code: %d\", resp.StatusCode)\n\t}\n\n\tvar respBody []byte\n\trespBody, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Got index; verify that it is complete\n\t// The response should be \"\\n\" if no locators matched the prefix\n\t// Else, it should be a list of locators followed by a blank line\n\tif !bytes.Equal(respBody, []byte(\"\\n\")) && !bytes.HasSuffix(respBody, []byte(\"\\n\\n\")) {\n\t\treturn nil, ErrIncompleteIndex\n\t}\n\n\t// Got complete index; strip the trailing newline and send\n\treturn bytes.NewReader(respBody[0 : len(respBody)-1]), nil\n}\n\n// LocalRoots returns the map of local (i.e., disk and proxy) Keep\n// services: uuid -> baseURI.\nfunc (kc *KeepClient) LocalRoots() map[string]string {\n\tkc.discoverServices()\n\tkc.lock.RLock()\n\tdefer kc.lock.RUnlock()\n\treturn kc.localRoots\n}\n\n// GatewayRoots returns the map of Keep remote gateway services:\n// uuid -> baseURI.\nfunc (kc *KeepClient) GatewayRoots() map[string]string {\n\tkc.discoverServices()\n\tkc.lock.RLock()\n\tdefer kc.lock.RUnlock()\n\treturn kc.gatewayRoots\n}\n\n// WritableLocalRoots returns the map of writable local Keep services:\n// uuid -> baseURI.\nfunc (kc *KeepClient) WritableLocalRoots() map[string]string {\n\tkc.discoverServices()\n\tkc.lock.RLock()\n\tdefer kc.lock.RUnlock()\n\treturn kc.writableLocalRoots\n}\n\n// SetServiceRoots disables service discovery and updates the\n// localRoots and gatewayRoots maps, without disrupting operations\n// that are already in progress.\n//\n// The supplied maps must not be modified after calling\n// SetServiceRoots.\nfunc (kc *KeepClient) SetServiceRoots(locals, writables, gateways map[string]string) {\n\tkc.disableDiscovery = true\n\tkc.setServiceRoots(locals, writables, gateways)\n}\n\nfunc (kc *KeepClient) setServiceRoots(locals, writables, gateways map[string]string) {\n\tkc.lock.Lock()\n\tdefer kc.lock.Unlock()\n\tkc.localRoots = locals\n\tkc.writableLocalRoots = writables\n\tkc.gatewayRoots = gateways\n}\n\n// getSortedRoots returns a list of base URIs of Keep services, in the\n// order they should be attempted in order to retrieve content for the\n// given locator.\nfunc (kc *KeepClient) getSortedRoots(locator string) []string {\n\tvar found []string\n\tfor _, hint := range strings.Split(locator, \"+\") {\n\t\tif len(hint) < 7 || hint[0:2] != \"K@\" {\n\t\t\t// Not a service hint.\n\t\t\tcontinue\n\t\t}\n\t\tif len(hint) == 7 {\n\t\t\t// +K@abcde means fetch from proxy at\n\t\t\t// keep.abcde.arvadosapi.com\n\t\t\tfound = append(found, \"https://keep.\"+hint[2:]+\".arvadosapi.com\")\n\t\t} else if len(hint) == 29 {\n\t\t\t// +K@abcde-abcde-abcdeabcdeabcde means fetch\n\t\t\t// from gateway with given uuid\n\t\t\tif gwURI, ok := kc.GatewayRoots()[hint[2:]]; ok {\n\t\t\t\tfound = append(found, gwURI)\n\t\t\t}\n\t\t\t// else this hint is no use to us; carry on.\n\t\t}\n\t}\n\t// After trying all usable service hints, fall back to local roots.\n\tfound = append(found, NewRootSorter(kc.LocalRoots(), locator[0:32]).GetSortedRoots()...)\n\treturn found\n}\n\nfunc (kc *KeepClient) SetStorageClasses(sc []string) {\n\t// make a copy so the caller can't mess with it.\n\tkc.StorageClasses = append([]string{}, sc...)\n}\n\nfunc (kc *KeepClient) setupMetrics() {\n\tkc.setupMetricsOnce.Do(func() {\n\t\tkc.metrics = arvados.NewKeepClientMetrics()\n\t})\n}\n\n// RegisterMetrics registers keepclient's metrics with the given\n// registry.\n//\n// If a KeepClient is cloned, metrics are combined.  RegisterMetrics\n// should only be called once, either on the original or one of the\n// clones.\nfunc (kc *KeepClient) RegisterMetrics(reg *prometheus.Registry) error {\n\tkc.setupMetrics()\n\treturn kc.metrics.Register(reg)\n}\n\nvar (\n\t// There are four global http.Client objects for the four\n\t// possible permutations of TLS behavior (verify/skip-verify)\n\t// and timeout settings (proxy/non-proxy).\n\tdefaultClient = map[bool]map[bool]HTTPClient{\n\t\t// defaultClient[false] is used for verified TLS reqs\n\t\tfalse: {},\n\t\t// defaultClient[true] is used for unverified\n\t\t// (insecure) TLS reqs\n\t\ttrue: {},\n\t}\n\tdefaultClientMtx sync.Mutex\n)\n\n// httpClient returns the HTTPClient field if it's not nil, otherwise\n// whichever of the four global http.Client objects is suitable for\n// the current environment (i.e., TLS verification on/off, keep\n// services are/aren't proxies).\nfunc (kc *KeepClient) httpClient() HTTPClient {\n\tif kc.HTTPClient != nil {\n\t\treturn kc.HTTPClient\n\t}\n\tdefaultClientMtx.Lock()\n\tdefer defaultClientMtx.Unlock()\n\tif c, ok := defaultClient[kc.Arvados.ApiInsecure][kc.foundNonDiskSvc]; ok {\n\t\treturn c\n\t}\n\n\tvar requestTimeout, connectTimeout, keepAlive, tlsTimeout time.Duration\n\tif kc.foundNonDiskSvc {\n\t\t// Use longer timeouts when connecting to a proxy,\n\t\t// because this usually means the intervening network\n\t\t// is slower.\n\t\trequestTimeout = DefaultProxyRequestTimeout\n\t\tconnectTimeout = DefaultProxyConnectTimeout\n\t\ttlsTimeout = DefaultProxyTLSHandshakeTimeout\n\t\tkeepAlive = DefaultProxyKeepAlive\n\t} else {\n\t\trequestTimeout = DefaultRequestTimeout\n\t\tconnectTimeout = DefaultConnectTimeout\n\t\ttlsTimeout = DefaultTLSHandshakeTimeout\n\t\tkeepAlive = DefaultKeepAlive\n\t}\n\n\tc := &http.Client{\n\t\tTimeout: requestTimeout,\n\t\t// It's not safe to copy *http.DefaultTransport\n\t\t// because it has a mutex (which might be locked)\n\t\t// protecting a private map (which might not be nil).\n\t\t// So we build our own, using the Go 1.12 default\n\t\t// values, ignoring any changes the application has\n\t\t// made to http.DefaultTransport.\n\t\tTransport: &http.Transport{\n\t\t\tDialContext: (&net.Dialer{\n\t\t\t\tTimeout:   connectTimeout,\n\t\t\t\tKeepAlive: keepAlive,\n\t\t\t\tDualStack: true,\n\t\t\t}).DialContext,\n\t\t\tMaxIdleConns:          100,\n\t\t\tIdleConnTimeout:       90 * time.Second,\n\t\t\tTLSHandshakeTimeout:   tlsTimeout,\n\t\t\tExpectContinueTimeout: 1 * time.Second,\n\t\t\tTLSClientConfig:       arvadosclient.MakeTLSConfig(kc.Arvados.ApiInsecure),\n\t\t},\n\t}\n\tdefaultClient[kc.Arvados.ApiInsecure][kc.foundNonDiskSvc] = c\n\treturn c\n}\n\nvar reqIDGen = httpserver.IDGenerator{Prefix: \"req-\"}\n\nfunc (kc *KeepClient) getRequestID() string {\n\tif kc.RequestID != \"\" {\n\t\treturn kc.RequestID\n\t}\n\treturn reqIDGen.Next()\n}\n\nfunc (kc *KeepClient) debugf(format string, args ...interface{}) {\n\tif kc.Arvados.Logger == nil {\n\t\treturn\n\t}\n\tkc.Arvados.Logger.Debugf(format, args...)\n}\n\ntype Locator struct {\n\tHash  string\n\tSize  int      // -1 if data size is not known\n\tHints []string // Including the size hint, if any\n}\n\nfunc (loc *Locator) String() string {\n\ts := loc.Hash\n\tif len(loc.Hints) > 0 {\n\t\ts = s + \"+\" + strings.Join(loc.Hints, \"+\")\n\t}\n\treturn s\n}\n\nvar locatorMatcher = regexp.MustCompile(\"^([0-9a-f]{32})([+](.*))?$\")\n\nfunc MakeLocator(path string) (*Locator, error) {\n\tsm := locatorMatcher.FindStringSubmatch(path)\n\tif sm == nil {\n\t\treturn nil, InvalidLocatorError\n\t}\n\tloc := Locator{Hash: sm[1], Size: -1}\n\tif sm[2] != \"\" {\n\t\tloc.Hints = strings.Split(sm[3], \"+\")\n\t} else {\n\t\tloc.Hints = []string{}\n\t}\n\tif len(loc.Hints) > 0 {\n\t\tif size, err := strconv.Atoi(loc.Hints[0]); err == nil {\n\t\t\tloc.Size = size\n\t\t}\n\t}\n\treturn &loc, nil\n}\n"
  },
  {
    "path": "sdk/go/keepclient/keepclient_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/md5\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t. \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tDefaultRetryDelay = 50 * time.Millisecond\n\tTestingT(t)\n}\n\n// Gocheck boilerplate\nvar _ = Suite(&ServerRequiredSuite{})\nvar _ = Suite(&StandaloneSuite{})\n\n// Tests that require the Keep server running\ntype ServerRequiredSuite struct{}\n\n// Standalone tests\ntype StandaloneSuite struct {\n\torigDefaultRetryDelay time.Duration\n\torigMinimumRetryDelay time.Duration\n}\n\nvar origHOME = os.Getenv(\"HOME\")\n\nfunc (s *StandaloneSuite) SetUpTest(c *C) {\n\tRefreshServiceDiscovery()\n\t// Prevent cache state from leaking between test cases\n\tos.Setenv(\"HOME\", c.MkDir())\n\ts.origDefaultRetryDelay = DefaultRetryDelay\n\ts.origMinimumRetryDelay = MinimumRetryDelay\n}\n\nfunc (s *StandaloneSuite) TearDownTest(c *C) {\n\tos.Setenv(\"HOME\", origHOME)\n\tDefaultRetryDelay = s.origDefaultRetryDelay\n\tMinimumRetryDelay = s.origMinimumRetryDelay\n}\n\nfunc pythonDir() string {\n\tcwd, _ := os.Getwd()\n\treturn fmt.Sprintf(\"%s/../../python/tests\", cwd)\n}\n\nfunc (s *ServerRequiredSuite) SetUpSuite(c *C) {\n\tarvadostest.StartKeep(2, false)\n}\n\nfunc (s *ServerRequiredSuite) TearDownSuite(c *C) {\n\tarvadostest.StopKeep(2)\n\tos.Setenv(\"HOME\", origHOME)\n}\n\nfunc (s *ServerRequiredSuite) SetUpTest(c *C) {\n\tRefreshServiceDiscovery()\n\t// Prevent cache state from leaking between test cases\n\tos.Setenv(\"HOME\", c.MkDir())\n}\n\nfunc (s *ServerRequiredSuite) TestMakeKeepClient(c *C) {\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\n\tkc, err := MakeKeepClient(arv)\n\n\tc.Assert(err, IsNil)\n\tc.Check(len(kc.LocalRoots()), Equals, 2)\n\tfor _, root := range kc.LocalRoots() {\n\t\tc.Check(root, Matches, \"http://localhost:\\\\d+\")\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestDefaultStorageClasses(c *C) {\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\n\tcc, err := arv.ClusterConfig(\"StorageClasses\")\n\tc.Assert(err, IsNil)\n\tc.Assert(cc, NotNil)\n\tc.Assert(cc.(map[string]interface{})[\"default\"], NotNil)\n\n\tkc := New(arv)\n\tc.Assert(kc.DefaultStorageClasses, DeepEquals, []string{\"default\"})\n}\n\nfunc (s *ServerRequiredSuite) TestDefaultReplications(c *C) {\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\n\tkc, err := MakeKeepClient(arv)\n\tc.Check(err, IsNil)\n\tc.Assert(kc.Want_replicas, Equals, 2)\n\n\tarv.DiscoveryDoc[\"defaultCollectionReplication\"] = 3.0\n\tkc, err = MakeKeepClient(arv)\n\tc.Check(err, IsNil)\n\tc.Assert(kc.Want_replicas, Equals, 3)\n\n\tarv.DiscoveryDoc[\"defaultCollectionReplication\"] = 1.0\n\tkc, err = MakeKeepClient(arv)\n\tc.Check(err, IsNil)\n\tc.Assert(kc.Want_replicas, Equals, 1)\n}\n\ntype StubPutHandler struct {\n\tc                    *C\n\texpectPath           string\n\texpectAPIToken       string\n\texpectBody           string\n\texpectStorageClass   string\n\treturnStorageClasses string\n\thandled              chan string\n\trequests             []*http.Request\n\tmtx                  sync.Mutex\n}\n\nfunc (sph *StubPutHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tsph.mtx.Lock()\n\tsph.requests = append(sph.requests, req)\n\tsph.mtx.Unlock()\n\tsph.c.Check(req.URL.Path, Equals, \"/\"+sph.expectPath)\n\tsph.c.Check(req.Header.Get(\"Authorization\"), Equals, fmt.Sprintf(\"Bearer %s\", sph.expectAPIToken))\n\tif sph.expectStorageClass != \"*\" {\n\t\tsph.c.Check(req.Header.Get(\"X-Keep-Storage-Classes\"), Equals, sph.expectStorageClass)\n\t}\n\tbody, err := ioutil.ReadAll(req.Body)\n\tsph.c.Check(err, IsNil)\n\tsph.c.Check(body, DeepEquals, []byte(sph.expectBody))\n\tresp.Header().Set(\"X-Keep-Replicas-Stored\", \"1\")\n\tif sph.returnStorageClasses != \"\" {\n\t\tresp.Header().Set(\"X-Keep-Storage-Classes-Confirmed\", sph.returnStorageClasses)\n\t}\n\tresp.WriteHeader(200)\n\tsph.handled <- fmt.Sprintf(\"http://%s\", req.Host)\n}\n\nfunc RunFakeKeepServer(st http.Handler) (ks KeepServer) {\n\tvar err error\n\t// If we don't explicitly bind it to localhost, ks.listener.Addr() will\n\t// bind to 0.0.0.0 or [::] which is not a valid address for Dial()\n\tks.listener, err = net.ListenTCP(\"tcp\", &net.TCPAddr{IP: []byte{127, 0, 0, 1}, Port: 0})\n\tif err != nil {\n\t\tpanic(\"Could not listen on any port\")\n\t}\n\tks.url = fmt.Sprintf(\"http://%s\", ks.listener.Addr().String())\n\tgo http.Serve(ks.listener, st)\n\treturn\n}\n\nfunc UploadToStubHelper(c *C, st http.Handler, f func(*KeepClient, string,\n\tio.ReadCloser, io.WriteCloser, chan uploadStatus)) {\n\n\tks := RunFakeKeepServer(st)\n\tdefer ks.listener.Close()\n\n\tarv, _ := arvadosclient.MakeArvadosClient()\n\tarv.ApiToken = \"abc123\"\n\n\tkc, _ := MakeKeepClient(arv)\n\n\treader, writer := io.Pipe()\n\tuploadStatusChan := make(chan uploadStatus)\n\n\tf(kc, ks.url, reader, writer, uploadStatusChan)\n}\n\nfunc (s *StandaloneSuite) TestUploadToStubKeepServer(c *C) {\n\tst := &StubPutHandler{\n\t\tc:                    c,\n\t\texpectPath:           \"acbd18db4cc2f85cedef654fccc4a4d8\",\n\t\texpectAPIToken:       \"abc123\",\n\t\texpectBody:           \"foo\",\n\t\texpectStorageClass:   \"\",\n\t\treturnStorageClasses: \"default=1\",\n\t\thandled:              make(chan string),\n\t}\n\n\tUploadToStubHelper(c, st,\n\t\tfunc(kc *KeepClient, url string, reader io.ReadCloser, writer io.WriteCloser, uploadStatusChan chan uploadStatus) {\n\t\t\tgo kc.uploadToKeepServer(url, st.expectPath, nil, reader, uploadStatusChan, len(\"foo\"), kc.getRequestID())\n\n\t\t\twriter.Write([]byte(\"foo\"))\n\t\t\twriter.Close()\n\n\t\t\t<-st.handled\n\t\t\tstatus := <-uploadStatusChan\n\t\t\tc.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf(\"%s/%s\", url, st.expectPath), 200, 1, map[string]int{\"default\": 1}, \"\"})\n\t\t})\n}\n\nfunc (s *StandaloneSuite) TestUploadToStubKeepServerBufferReader(c *C) {\n\tst := &StubPutHandler{\n\t\tc:                    c,\n\t\texpectPath:           \"acbd18db4cc2f85cedef654fccc4a4d8\",\n\t\texpectAPIToken:       \"abc123\",\n\t\texpectBody:           \"foo\",\n\t\texpectStorageClass:   \"\",\n\t\treturnStorageClasses: \"default=1\",\n\t\thandled:              make(chan string),\n\t}\n\n\tUploadToStubHelper(c, st,\n\t\tfunc(kc *KeepClient, url string, _ io.ReadCloser, _ io.WriteCloser, uploadStatusChan chan uploadStatus) {\n\t\t\tgo kc.uploadToKeepServer(url, st.expectPath, nil, bytes.NewBuffer([]byte(\"foo\")), uploadStatusChan, 3, kc.getRequestID())\n\n\t\t\t<-st.handled\n\n\t\t\tstatus := <-uploadStatusChan\n\t\t\tc.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf(\"%s/%s\", url, st.expectPath), 200, 1, map[string]int{\"default\": 1}, \"\"})\n\t\t})\n}\n\nfunc (s *StandaloneSuite) TestUploadWithStorageClasses(c *C) {\n\tfor _, trial := range []struct {\n\t\trespHeader string\n\t\texpectMap  map[string]int\n\t}{\n\t\t{\"\", nil},\n\t\t{\"foo=1\", map[string]int{\"foo\": 1}},\n\t\t{\" foo=1 , bar=2 \", map[string]int{\"foo\": 1, \"bar\": 2}},\n\t\t{\" =foo=1 \", nil},\n\t\t{\"foo\", nil},\n\t} {\n\t\tst := &StubPutHandler{\n\t\t\tc:                    c,\n\t\t\texpectPath:           \"acbd18db4cc2f85cedef654fccc4a4d8\",\n\t\t\texpectAPIToken:       \"abc123\",\n\t\t\texpectBody:           \"foo\",\n\t\t\texpectStorageClass:   \"\",\n\t\t\treturnStorageClasses: trial.respHeader,\n\t\t\thandled:              make(chan string),\n\t\t}\n\n\t\tUploadToStubHelper(c, st,\n\t\t\tfunc(kc *KeepClient, url string, reader io.ReadCloser, writer io.WriteCloser, uploadStatusChan chan uploadStatus) {\n\t\t\t\tgo kc.uploadToKeepServer(url, st.expectPath, nil, reader, uploadStatusChan, len(\"foo\"), kc.getRequestID())\n\n\t\t\t\twriter.Write([]byte(\"foo\"))\n\t\t\t\twriter.Close()\n\n\t\t\t\t<-st.handled\n\t\t\t\tstatus := <-uploadStatusChan\n\t\t\t\tc.Check(status, DeepEquals, uploadStatus{nil, fmt.Sprintf(\"%s/%s\", url, st.expectPath), 200, 1, trial.expectMap, \"\"})\n\t\t\t})\n\t}\n}\n\nfunc (s *StandaloneSuite) TestPutWithoutStorageClassesClusterSupport(c *C) {\n\tnServers := 5\n\tfor _, trial := range []struct {\n\t\treplicas      int\n\t\tclientClasses []string\n\t\tputClasses    []string\n\t\tminRequests   int\n\t\tmaxRequests   int\n\t\tsuccess       bool\n\t}{\n\t\t// Talking to an older cluster (no default storage classes exported\n\t\t// config) and no other additional storage classes requirements.\n\t\t{1, nil, nil, 1, 1, true},\n\t\t{2, nil, nil, 2, 2, true},\n\t\t{3, nil, nil, 3, 3, true},\n\t\t{nServers*2 + 1, nil, nil, nServers, nServers, false},\n\n\t\t{1, []string{\"class1\"}, nil, 1, 1, true},\n\t\t{2, []string{\"class1\"}, nil, 2, 2, true},\n\t\t{3, []string{\"class1\"}, nil, 3, 3, true},\n\t\t{1, []string{\"class1\", \"class2\"}, nil, 1, 1, true},\n\t\t{nServers*2 + 1, []string{\"class1\"}, nil, nServers, nServers, false},\n\n\t\t{1, nil, []string{\"class1\"}, 1, 1, true},\n\t\t{2, nil, []string{\"class1\"}, 2, 2, true},\n\t\t{3, nil, []string{\"class1\"}, 3, 3, true},\n\t\t{1, nil, []string{\"class1\", \"class2\"}, 1, 1, true},\n\t\t{nServers*2 + 1, nil, []string{\"class1\"}, nServers, nServers, false},\n\t} {\n\t\tc.Logf(\"%+v\", trial)\n\t\tst := &StubPutHandler{\n\t\t\tc:                    c,\n\t\t\texpectPath:           \"acbd18db4cc2f85cedef654fccc4a4d8\",\n\t\t\texpectAPIToken:       \"abc123\",\n\t\t\texpectBody:           \"foo\",\n\t\t\texpectStorageClass:   \"*\",\n\t\t\treturnStorageClasses: \"\", // Simulate old cluster without SC keep support\n\t\t\thandled:              make(chan string, 100),\n\t\t}\n\t\tks := RunSomeFakeKeepServers(st, nServers)\n\t\tarv, _ := arvadosclient.MakeArvadosClient()\n\t\tkc, _ := MakeKeepClient(arv)\n\t\tkc.Want_replicas = trial.replicas\n\t\tkc.StorageClasses = trial.clientClasses\n\t\tkc.DefaultStorageClasses = nil // Simulate an old cluster without SC defaults\n\t\tarv.ApiToken = \"abc123\"\n\t\tlocalRoots := make(map[string]string)\n\t\twritableLocalRoots := make(map[string]string)\n\t\tfor i, k := range ks {\n\t\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\t\tdefer k.listener.Close()\n\t\t}\n\t\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\n\t\t_, err := kc.BlockWrite(context.Background(), arvados.BlockWriteOptions{\n\t\t\tData:           []byte(\"foo\"),\n\t\t\tStorageClasses: trial.putClasses,\n\t\t})\n\t\tif trial.success {\n\t\t\tc.Check(err, IsNil)\n\t\t} else {\n\t\t\tc.Check(err, NotNil)\n\t\t}\n\t\tc.Check(len(st.handled) >= trial.minRequests, Equals, true, Commentf(\"len(st.handled)==%d, trial.minRequests==%d\", len(st.handled), trial.minRequests))\n\t\tc.Check(len(st.handled) <= trial.maxRequests, Equals, true, Commentf(\"len(st.handled)==%d, trial.maxRequests==%d\", len(st.handled), trial.maxRequests))\n\t\tif trial.clientClasses == nil && trial.putClasses == nil {\n\t\t\tc.Check(st.requests[0].Header.Get(\"X-Keep-Storage-Classes\"), Equals, \"\")\n\t\t}\n\t}\n}\n\nfunc (s *StandaloneSuite) TestPutWithStorageClasses(c *C) {\n\tnServers := 5\n\tfor _, trial := range []struct {\n\t\treplicas       int\n\t\tdefaultClasses []string\n\t\tclientClasses  []string // clientClasses takes precedence over defaultClasses\n\t\tputClasses     []string // putClasses takes precedence over clientClasses\n\t\tminRequests    int\n\t\tmaxRequests    int\n\t\tsuccess        bool\n\t}{\n\t\t{1, []string{\"class1\"}, nil, nil, 1, 1, true},\n\t\t{2, []string{\"class1\"}, nil, nil, 1, 2, true},\n\t\t{3, []string{\"class1\"}, nil, nil, 2, 3, true},\n\t\t{1, []string{\"class1\", \"class2\"}, nil, nil, 1, 1, true},\n\n\t\t// defaultClasses doesn't matter when any of the others is specified.\n\t\t{1, []string{\"class1\"}, []string{\"class1\"}, nil, 1, 1, true},\n\t\t{2, []string{\"class1\"}, []string{\"class1\"}, nil, 1, 2, true},\n\t\t{3, []string{\"class1\"}, []string{\"class1\"}, nil, 2, 3, true},\n\t\t{1, []string{\"class1\"}, []string{\"class1\", \"class2\"}, nil, 1, 1, true},\n\t\t{3, []string{\"class1\"}, nil, []string{\"class1\"}, 2, 3, true},\n\t\t{1, []string{\"class1\"}, nil, []string{\"class1\", \"class2\"}, 1, 1, true},\n\t\t{1, []string{\"class1\"}, []string{\"class404\"}, []string{\"class1\", \"class2\"}, 1, 1, true},\n\t\t{1, []string{\"class1\"}, []string{\"class1\"}, []string{\"class404\", \"class2\"}, nServers, nServers, false},\n\t\t{nServers*2 + 1, []string{}, []string{\"class1\"}, nil, nServers, nServers, false},\n\t\t{1, []string{\"class1\"}, []string{\"class404\"}, nil, nServers, nServers, false},\n\t\t{1, []string{\"class1\"}, []string{\"class1\", \"class404\"}, nil, nServers, nServers, false},\n\t\t{1, []string{\"class1\"}, nil, []string{\"class1\", \"class404\"}, nServers, nServers, false},\n\t} {\n\t\tc.Logf(\"%+v\", trial)\n\t\tst := &StubPutHandler{\n\t\t\tc:                    c,\n\t\t\texpectPath:           \"acbd18db4cc2f85cedef654fccc4a4d8\",\n\t\t\texpectAPIToken:       \"abc123\",\n\t\t\texpectBody:           \"foo\",\n\t\t\texpectStorageClass:   \"*\",\n\t\t\treturnStorageClasses: \"class1=2, class2=2\",\n\t\t\thandled:              make(chan string, 100),\n\t\t}\n\t\tks := RunSomeFakeKeepServers(st, nServers)\n\t\tarv, _ := arvadosclient.MakeArvadosClient()\n\t\tkc, _ := MakeKeepClient(arv)\n\t\tkc.Want_replicas = trial.replicas\n\t\tkc.StorageClasses = trial.clientClasses\n\t\tkc.DefaultStorageClasses = trial.defaultClasses\n\t\tarv.ApiToken = \"abc123\"\n\t\tlocalRoots := make(map[string]string)\n\t\twritableLocalRoots := make(map[string]string)\n\t\tfor i, k := range ks {\n\t\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\t\tdefer k.listener.Close()\n\t\t}\n\t\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\n\t\t_, err := kc.BlockWrite(context.Background(), arvados.BlockWriteOptions{\n\t\t\tData:           []byte(\"foo\"),\n\t\t\tStorageClasses: trial.putClasses,\n\t\t})\n\t\tif trial.success {\n\t\t\tc.Check(err, IsNil)\n\t\t} else {\n\t\t\tc.Check(err, NotNil)\n\t\t}\n\t\tc.Check(len(st.handled) >= trial.minRequests, Equals, true, Commentf(\"len(st.handled)==%d, trial.minRequests==%d\", len(st.handled), trial.minRequests))\n\t\tc.Check(len(st.handled) <= trial.maxRequests, Equals, true, Commentf(\"len(st.handled)==%d, trial.maxRequests==%d\", len(st.handled), trial.maxRequests))\n\t\tif !trial.success && trial.replicas == 1 && c.Check(len(st.requests) >= 2, Equals, true) {\n\t\t\t// Max concurrency should be 1. First request\n\t\t\t// should have succeeded for class1. Second\n\t\t\t// request should only ask for class404.\n\t\t\tc.Check(st.requests[1].Header.Get(\"X-Keep-Storage-Classes\"), Equals, \"class404\")\n\t\t}\n\t}\n}\n\ntype FailHandler struct {\n\thandled chan string\n}\n\nfunc (fh FailHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tresp.WriteHeader(500)\n\tfh.handled <- fmt.Sprintf(\"http://%s\", req.Host)\n}\n\ntype FailThenSucceedHandler struct {\n\tmorefails      int // fail 1 + this many times before succeeding\n\thandled        chan string\n\tcount          atomic.Int64\n\tsuccesshandler http.Handler\n\treqIDs         []string\n}\n\nfunc (fh *FailThenSucceedHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tfh.reqIDs = append(fh.reqIDs, req.Header.Get(\"X-Request-Id\"))\n\tif int(fh.count.Add(1)) <= fh.morefails+1 {\n\t\tresp.WriteHeader(500)\n\t\tfh.handled <- fmt.Sprintf(\"http://%s\", req.Host)\n\t} else {\n\t\tfh.successhandler.ServeHTTP(resp, req)\n\t}\n}\n\ntype Error404Handler struct {\n\thandled chan string\n}\n\nfunc (fh Error404Handler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tresp.WriteHeader(404)\n\tfh.handled <- fmt.Sprintf(\"http://%s\", req.Host)\n}\n\nfunc (s *StandaloneSuite) TestFailedUploadToStubKeepServer(c *C) {\n\tst := FailHandler{\n\t\tmake(chan string)}\n\n\thash := \"acbd18db4cc2f85cedef654fccc4a4d8\"\n\n\tUploadToStubHelper(c, st,\n\t\tfunc(kc *KeepClient, url string, reader io.ReadCloser,\n\t\t\twriter io.WriteCloser, uploadStatusChan chan uploadStatus) {\n\n\t\t\tgo kc.uploadToKeepServer(url, hash, nil, reader, uploadStatusChan, 3, kc.getRequestID())\n\n\t\t\twriter.Write([]byte(\"foo\"))\n\t\t\twriter.Close()\n\n\t\t\t<-st.handled\n\n\t\t\tstatus := <-uploadStatusChan\n\t\t\tc.Check(status.url, Equals, fmt.Sprintf(\"%s/%s\", url, hash))\n\t\t\tc.Check(status.statusCode, Equals, 500)\n\t\t})\n}\n\ntype KeepServer struct {\n\tlistener net.Listener\n\turl      string\n}\n\nfunc RunSomeFakeKeepServers(st http.Handler, n int) (ks []KeepServer) {\n\tks = make([]KeepServer, n)\n\n\tfor i := 0; i < n; i++ {\n\t\tks[i] = RunFakeKeepServer(st)\n\t}\n\n\treturn ks\n}\n\nfunc (s *StandaloneSuite) TestPutB(c *C) {\n\thash := Md5String(\"foo\")\n\n\tst := &StubPutHandler{\n\t\tc:                    c,\n\t\texpectPath:           hash,\n\t\texpectAPIToken:       \"abc123\",\n\t\texpectBody:           \"foo\",\n\t\texpectStorageClass:   \"default\",\n\t\treturnStorageClasses: \"\",\n\t\thandled:              make(chan string, 5),\n\t}\n\n\tarv, _ := arvadosclient.MakeArvadosClient()\n\tkc, _ := MakeKeepClient(arv)\n\n\tkc.Want_replicas = 2\n\tarv.ApiToken = \"abc123\"\n\tlocalRoots := make(map[string]string)\n\twritableLocalRoots := make(map[string]string)\n\n\tks := RunSomeFakeKeepServers(st, 5)\n\n\tfor i, k := range ks {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\tdefer k.listener.Close()\n\t}\n\n\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\n\tkc.PutB([]byte(\"foo\"))\n\n\tshuff := NewRootSorter(\n\t\tkc.LocalRoots(), Md5String(\"foo\")).GetSortedRoots()\n\n\ts1 := <-st.handled\n\ts2 := <-st.handled\n\tc.Check((s1 == shuff[0] && s2 == shuff[1]) ||\n\t\t(s1 == shuff[1] && s2 == shuff[0]),\n\t\tEquals,\n\t\ttrue)\n}\n\nfunc (s *StandaloneSuite) TestPutHR(c *C) {\n\thash := fmt.Sprintf(\"%x\", md5.Sum([]byte(\"foo\")))\n\n\tst := &StubPutHandler{\n\t\tc:                    c,\n\t\texpectPath:           hash,\n\t\texpectAPIToken:       \"abc123\",\n\t\texpectBody:           \"foo\",\n\t\texpectStorageClass:   \"default\",\n\t\treturnStorageClasses: \"\",\n\t\thandled:              make(chan string, 5),\n\t}\n\n\tarv, _ := arvadosclient.MakeArvadosClient()\n\tkc, _ := MakeKeepClient(arv)\n\n\tkc.Want_replicas = 2\n\tarv.ApiToken = \"abc123\"\n\tlocalRoots := make(map[string]string)\n\twritableLocalRoots := make(map[string]string)\n\n\tks := RunSomeFakeKeepServers(st, 5)\n\n\tfor i, k := range ks {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\tdefer k.listener.Close()\n\t}\n\n\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\n\tkc.PutHR(hash, bytes.NewBuffer([]byte(\"foo\")), 3)\n\n\tshuff := NewRootSorter(kc.LocalRoots(), hash).GetSortedRoots()\n\n\ts1 := <-st.handled\n\ts2 := <-st.handled\n\n\tc.Check((s1 == shuff[0] && s2 == shuff[1]) ||\n\t\t(s1 == shuff[1] && s2 == shuff[0]),\n\t\tEquals,\n\t\ttrue)\n}\n\nfunc (s *StandaloneSuite) TestPutWithFail(c *C) {\n\thash := fmt.Sprintf(\"%x\", md5.Sum([]byte(\"foo\")))\n\n\tst := &StubPutHandler{\n\t\tc:                    c,\n\t\texpectPath:           hash,\n\t\texpectAPIToken:       \"abc123\",\n\t\texpectBody:           \"foo\",\n\t\texpectStorageClass:   \"default\",\n\t\treturnStorageClasses: \"\",\n\t\thandled:              make(chan string, 4),\n\t}\n\n\tfh := FailHandler{\n\t\tmake(chan string, 1)}\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\n\tkc.Want_replicas = 2\n\tarv.ApiToken = \"abc123\"\n\tlocalRoots := make(map[string]string)\n\twritableLocalRoots := make(map[string]string)\n\n\tks1 := RunSomeFakeKeepServers(st, 4)\n\tks2 := RunSomeFakeKeepServers(fh, 1)\n\n\tfor i, k := range ks1 {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\tdefer k.listener.Close()\n\t}\n\tfor i, k := range ks2 {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i+len(ks1))] = k.url\n\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i+len(ks1))] = k.url\n\t\tdefer k.listener.Close()\n\t}\n\n\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\n\tshuff := NewRootSorter(\n\t\tkc.LocalRoots(), Md5String(\"foo\")).GetSortedRoots()\n\tc.Logf(\"%+v\", shuff)\n\n\tphash, replicas, err := kc.PutB([]byte(\"foo\"))\n\n\t<-fh.handled\n\n\tc.Check(err, IsNil)\n\tc.Check(phash, Equals, \"\")\n\tc.Check(replicas, Equals, 2)\n\n\ts1 := <-st.handled\n\ts2 := <-st.handled\n\n\tc.Check((s1 == shuff[1] && s2 == shuff[2]) ||\n\t\t(s1 == shuff[2] && s2 == shuff[1]),\n\t\tEquals,\n\t\ttrue)\n}\n\nfunc (s *StandaloneSuite) TestPutWithTooManyFail(c *C) {\n\thash := fmt.Sprintf(\"%x\", md5.Sum([]byte(\"foo\")))\n\n\tst := &StubPutHandler{\n\t\tc:                    c,\n\t\texpectPath:           hash,\n\t\texpectAPIToken:       \"abc123\",\n\t\texpectBody:           \"foo\",\n\t\texpectStorageClass:   \"default\",\n\t\treturnStorageClasses: \"\",\n\t\thandled:              make(chan string, 1),\n\t}\n\n\tfh := FailHandler{\n\t\tmake(chan string, 4)}\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\n\tkc.Want_replicas = 2\n\tkc.Retries = 0\n\tarv.ApiToken = \"abc123\"\n\tlocalRoots := make(map[string]string)\n\twritableLocalRoots := make(map[string]string)\n\n\tks1 := RunSomeFakeKeepServers(st, 1)\n\tks2 := RunSomeFakeKeepServers(fh, 4)\n\n\tfor i, k := range ks1 {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\tdefer k.listener.Close()\n\t}\n\tfor i, k := range ks2 {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i+len(ks1))] = k.url\n\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i+len(ks1))] = k.url\n\t\tdefer k.listener.Close()\n\t}\n\n\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\n\t_, replicas, err := kc.PutB([]byte(\"foo\"))\n\n\tc.Check(err, FitsTypeOf, InsufficientReplicasError{})\n\tc.Check(replicas, Equals, 1)\n\tc.Check(<-st.handled, Equals, ks1[0].url)\n}\n\ntype StubGetHandler struct {\n\tc              *C\n\texpectPath     string\n\texpectAPIToken string\n\thttpStatus     int\n\tbody           []byte\n}\n\nfunc (sgh StubGetHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tsgh.c.Check(req.URL.Path, Equals, \"/\"+sgh.expectPath)\n\tsgh.c.Check(req.Header.Get(\"Authorization\"), Equals, fmt.Sprintf(\"Bearer %s\", sgh.expectAPIToken))\n\tresp.WriteHeader(sgh.httpStatus)\n\tresp.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", len(sgh.body)))\n\tresp.Write(sgh.body)\n}\n\nfunc (s *StandaloneSuite) TestGet(c *C) {\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\tst := StubGetHandler{\n\t\tc,\n\t\thash,\n\t\t\"abc123\",\n\t\thttp.StatusOK,\n\t\t[]byte(\"foo\")}\n\n\tks := RunFakeKeepServer(st)\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(map[string]string{\"x\": ks.url}, nil, nil)\n\n\tr, n, _, err := kc.Get(hash)\n\tc.Assert(err, IsNil)\n\tc.Check(n, Equals, int64(3))\n\n\tcontent, err2 := ioutil.ReadAll(r)\n\tc.Check(err2, IsNil)\n\tc.Check(content, DeepEquals, []byte(\"foo\"))\n\tc.Check(r.Close(), IsNil)\n}\n\nfunc (s *StandaloneSuite) TestGet404(c *C) {\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\tst := Error404Handler{make(chan string, 1)}\n\n\tks := RunFakeKeepServer(st)\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(map[string]string{\"x\": ks.url}, nil, nil)\n\n\tr, n, _, err := kc.Get(hash)\n\tc.Check(err, Equals, BlockNotFound)\n\tc.Check(n, Equals, int64(0))\n\tc.Check(r, IsNil)\n}\n\nfunc (s *StandaloneSuite) TestGetEmptyBlock(c *C) {\n\tst := Error404Handler{make(chan string, 1)}\n\n\tks := RunFakeKeepServer(st)\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(map[string]string{\"x\": ks.url}, nil, nil)\n\n\tr, n, _, err := kc.Get(\"d41d8cd98f00b204e9800998ecf8427e+0\")\n\tc.Check(err, IsNil)\n\tc.Check(n, Equals, int64(0))\n\tc.Assert(r, NotNil)\n\tbuf, err := ioutil.ReadAll(r)\n\tc.Check(err, IsNil)\n\tc.Check(buf, DeepEquals, []byte{})\n\tc.Check(r.Close(), IsNil)\n}\n\nfunc (s *StandaloneSuite) TestGetFail(c *C) {\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\tst := FailHandler{make(chan string, 1)}\n\n\tks := RunFakeKeepServer(st)\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(map[string]string{\"x\": ks.url}, nil, nil)\n\tkc.Retries = 0\n\n\tr, n, _, err := kc.Get(hash)\n\terrNotFound, _ := err.(*ErrNotFound)\n\tif c.Check(errNotFound, NotNil) {\n\t\tc.Check(strings.Contains(errNotFound.Error(), \"HTTP 500\"), Equals, true)\n\t\tc.Check(errNotFound.Temporary(), Equals, true)\n\t}\n\tc.Check(n, Equals, int64(0))\n\tc.Check(r, IsNil)\n}\n\nfunc (s *StandaloneSuite) TestGetFailRetry(c *C) {\n\tdefer func(origDefault, origMinimum time.Duration) {\n\t\tDefaultRetryDelay = origDefault\n\t\tMinimumRetryDelay = origMinimum\n\t}(DefaultRetryDelay, MinimumRetryDelay)\n\tDefaultRetryDelay = time.Second / 8\n\tMinimumRetryDelay = time.Millisecond\n\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\tfor _, delay := range []time.Duration{0, time.Nanosecond, time.Second / 8, time.Second / 16} {\n\t\tc.Logf(\"=== initial delay %v\", delay)\n\n\t\tst := &FailThenSucceedHandler{\n\t\t\tmorefails: 2,\n\t\t\thandled:   make(chan string, 4),\n\t\t\tsuccesshandler: StubGetHandler{\n\t\t\t\tc,\n\t\t\t\thash,\n\t\t\t\t\"abc123\",\n\t\t\t\thttp.StatusOK,\n\t\t\t\t[]byte(\"foo\")}}\n\n\t\tks := RunFakeKeepServer(st)\n\t\tdefer ks.listener.Close()\n\n\t\tarv, err := arvadosclient.MakeArvadosClient()\n\t\tc.Check(err, IsNil)\n\t\tkc, _ := MakeKeepClient(arv)\n\t\tarv.ApiToken = \"abc123\"\n\t\tkc.SetServiceRoots(map[string]string{\"x\": ks.url}, nil, nil)\n\t\tkc.Retries = 3\n\t\tkc.RetryDelay = delay\n\t\tkc.DiskCacheSize = DiskCacheDisabled\n\n\t\tt0 := time.Now()\n\t\tr, n, _, err := kc.Get(hash)\n\t\tc.Assert(err, IsNil)\n\t\tc.Check(n, Equals, int64(3))\n\t\telapsed := time.Since(t0)\n\n\t\tnonsleeptime := time.Second / 10\n\t\texpect := kc.RetryDelay\n\t\tif expect == 0 {\n\t\t\texpect = DefaultRetryDelay\n\t\t}\n\t\tmin := MinimumRetryDelay * 3\n\t\tmax := expect + expect*2 + expect*2*2 + nonsleeptime\n\t\tc.Check(elapsed >= min, Equals, true, Commentf(\"elapsed %v / expect min %v\", elapsed, min))\n\t\tc.Check(elapsed <= max, Equals, true, Commentf(\"elapsed %v / expect max %v\", elapsed, max))\n\n\t\tcontent, err := ioutil.ReadAll(r)\n\t\tc.Check(err, IsNil)\n\t\tc.Check(content, DeepEquals, []byte(\"foo\"))\n\t\tc.Check(r.Close(), IsNil)\n\n\t\tc.Logf(\"%q\", st.reqIDs)\n\t\tif c.Check(st.reqIDs, Not(HasLen), 0) {\n\t\t\tfor _, reqid := range st.reqIDs {\n\t\t\t\tc.Check(reqid, Not(Equals), \"\")\n\t\t\t\tc.Check(reqid, Equals, st.reqIDs[0])\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *StandaloneSuite) TestGetNetError(c *C) {\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(map[string]string{\"x\": \"http://localhost:62222\"}, nil, nil)\n\n\tr, n, _, err := kc.Get(hash)\n\terrNotFound, _ := err.(*ErrNotFound)\n\tif c.Check(errNotFound, NotNil) {\n\t\tc.Check(strings.Contains(errNotFound.Error(), \"connection refused\"), Equals, true)\n\t\tc.Check(errNotFound.Temporary(), Equals, true)\n\t}\n\tc.Check(n, Equals, int64(0))\n\tc.Check(r, IsNil)\n}\n\nfunc (s *StandaloneSuite) TestGetWithServiceHint(c *C) {\n\tuuid := \"zzzzz-bi6l4-123451234512345\"\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\t// This one shouldn't be used:\n\tks0 := RunFakeKeepServer(StubGetHandler{\n\t\tc,\n\t\t\"error if used\",\n\t\t\"abc123\",\n\t\thttp.StatusOK,\n\t\t[]byte(\"foo\")})\n\tdefer ks0.listener.Close()\n\t// This one should be used:\n\tks := RunFakeKeepServer(StubGetHandler{\n\t\tc,\n\t\thash + \"+K@\" + uuid,\n\t\t\"abc123\",\n\t\thttp.StatusOK,\n\t\t[]byte(\"foo\")})\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(\n\t\tmap[string]string{\"x\": ks0.url},\n\t\tnil,\n\t\tmap[string]string{uuid: ks.url})\n\n\tr, n, _, err := kc.Get(hash + \"+K@\" + uuid)\n\tc.Assert(err, IsNil)\n\tc.Check(n, Equals, int64(3))\n\n\tcontent, err := ioutil.ReadAll(r)\n\tc.Check(err, IsNil)\n\tc.Check(content, DeepEquals, []byte(\"foo\"))\n\tc.Check(r.Close(), IsNil)\n}\n\n// Use a service hint to fetch from a local disk service, overriding\n// rendezvous probe order.\nfunc (s *StandaloneSuite) TestGetWithLocalServiceHint(c *C) {\n\tuuid := \"zzzzz-bi6l4-zzzzzzzzzzzzzzz\"\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\t// This one shouldn't be used, although it appears first in\n\t// rendezvous probe order:\n\tks0 := RunFakeKeepServer(StubGetHandler{\n\t\tc,\n\t\t\"error if used\",\n\t\t\"abc123\",\n\t\thttp.StatusBadGateway,\n\t\tnil})\n\tdefer ks0.listener.Close()\n\t// This one should be used:\n\tks := RunFakeKeepServer(StubGetHandler{\n\t\tc,\n\t\thash + \"+K@\" + uuid,\n\t\t\"abc123\",\n\t\thttp.StatusOK,\n\t\t[]byte(\"foo\")})\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(\n\t\tmap[string]string{\n\t\t\t\"zzzzz-bi6l4-yyyyyyyyyyyyyyy\": ks0.url,\n\t\t\t\"zzzzz-bi6l4-xxxxxxxxxxxxxxx\": ks0.url,\n\t\t\t\"zzzzz-bi6l4-wwwwwwwwwwwwwww\": ks0.url,\n\t\t\tuuid:                          ks.url},\n\t\tnil,\n\t\tmap[string]string{\n\t\t\t\"zzzzz-bi6l4-yyyyyyyyyyyyyyy\": ks0.url,\n\t\t\t\"zzzzz-bi6l4-xxxxxxxxxxxxxxx\": ks0.url,\n\t\t\t\"zzzzz-bi6l4-wwwwwwwwwwwwwww\": ks0.url,\n\t\t\tuuid:                          ks.url},\n\t)\n\n\tr, n, _, err := kc.Get(hash + \"+K@\" + uuid)\n\tc.Assert(err, IsNil)\n\tc.Check(n, Equals, int64(3))\n\n\tcontent, err := ioutil.ReadAll(r)\n\tc.Check(err, IsNil)\n\tc.Check(content, DeepEquals, []byte(\"foo\"))\n\tc.Check(r.Close(), IsNil)\n}\n\nfunc (s *StandaloneSuite) TestGetWithServiceHintFailoverToLocals(c *C) {\n\tuuid := \"zzzzz-bi6l4-123451234512345\"\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\tksLocal := RunFakeKeepServer(StubGetHandler{\n\t\tc,\n\t\thash + \"+K@\" + uuid,\n\t\t\"abc123\",\n\t\thttp.StatusOK,\n\t\t[]byte(\"foo\")})\n\tdefer ksLocal.listener.Close()\n\tksGateway := RunFakeKeepServer(StubGetHandler{\n\t\tc,\n\t\thash + \"+K@\" + uuid,\n\t\t\"abc123\",\n\t\thttp.StatusInternalServerError,\n\t\t[]byte(\"Error\")})\n\tdefer ksGateway.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(\n\t\tmap[string]string{\"zzzzz-bi6l4-keepdisk0000000\": ksLocal.url},\n\t\tnil,\n\t\tmap[string]string{uuid: ksGateway.url})\n\n\tr, n, _, err := kc.Get(hash + \"+K@\" + uuid)\n\tc.Assert(err, IsNil)\n\tc.Check(n, Equals, int64(3))\n\n\tcontent, err := ioutil.ReadAll(r)\n\tc.Check(err, IsNil)\n\tc.Check(content, DeepEquals, []byte(\"foo\"))\n\tc.Check(r.Close(), IsNil)\n}\n\ntype BarHandler struct {\n\thandled chan string\n}\n\nfunc (h BarHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tresp.Write([]byte(\"bar\"))\n\th.handled <- fmt.Sprintf(\"http://%s\", req.Host)\n}\n\nfunc (s *StandaloneSuite) TestChecksum(c *C) {\n\tfoohash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\tbarhash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"bar\")))\n\n\tst := BarHandler{make(chan string, 1)}\n\n\tks := RunFakeKeepServer(st)\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(map[string]string{\"x\": ks.url}, nil, nil)\n\n\tr, n, _, err := kc.Get(barhash)\n\tif c.Check(err, IsNil) {\n\t\t_, err = ioutil.ReadAll(r)\n\t\tc.Check(n, Equals, int64(3))\n\t\tc.Check(err, IsNil)\n\t}\n\n\tselect {\n\tcase <-st.handled:\n\tcase <-time.After(time.Second):\n\t\tc.Fatal(\"timed out\")\n\t}\n\n\tr, n, _, err = kc.Get(foohash)\n\tif err == nil {\n\t\tbuf, readerr := ioutil.ReadAll(r)\n\t\tc.Logf(\"%q\", buf)\n\t\terr = readerr\n\t}\n\tc.Check(err, Equals, BadChecksum)\n\n\tselect {\n\tcase <-st.handled:\n\tcase <-time.After(time.Second):\n\t\tc.Fatal(\"timed out\")\n\t}\n}\n\nfunc (s *StandaloneSuite) TestGetWithFailures(c *C) {\n\tcontent := []byte(\"waz\")\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum(content))\n\n\tfh := Error404Handler{\n\t\tmake(chan string, 4)}\n\n\tst := StubGetHandler{\n\t\tc,\n\t\thash,\n\t\t\"abc123\",\n\t\thttp.StatusOK,\n\t\tcontent}\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tlocalRoots := make(map[string]string)\n\twritableLocalRoots := make(map[string]string)\n\n\tks1 := RunSomeFakeKeepServers(st, 1)\n\tks2 := RunSomeFakeKeepServers(fh, 4)\n\n\tfor i, k := range ks1 {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\tdefer k.listener.Close()\n\t}\n\tfor i, k := range ks2 {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i+len(ks1))] = k.url\n\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i+len(ks1))] = k.url\n\t\tdefer k.listener.Close()\n\t}\n\n\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\tkc.Retries = 0\n\n\t// This test works only if one of the failing services is\n\t// attempted before the succeeding service. Otherwise,\n\t// <-fh.handled below will just hang! (Probe order depends on\n\t// the choice of block content \"waz\" and the UUIDs of the fake\n\t// servers, so we just tried different strings until we found\n\t// an example that passes this Assert.)\n\tc.Assert(NewRootSorter(localRoots, hash).GetSortedRoots()[0], Not(Equals), ks1[0].url)\n\n\tr, n, _, err := kc.Get(hash)\n\n\tselect {\n\tcase <-fh.handled:\n\tcase <-time.After(time.Second):\n\t\tc.Fatal(\"timed out\")\n\t}\n\tc.Assert(err, IsNil)\n\tc.Check(n, Equals, int64(3))\n\n\treadContent, err2 := ioutil.ReadAll(r)\n\tc.Check(err2, IsNil)\n\tc.Check(readContent, DeepEquals, content)\n\tc.Check(r.Close(), IsNil)\n}\n\nfunc (s *ServerRequiredSuite) TestPutGetHead(c *C) {\n\tcontent := []byte(\"TestPutGetHead\")\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, err := MakeKeepClient(arv)\n\tc.Assert(err, IsNil)\n\n\thash := fmt.Sprintf(\"%x+%d\", md5.Sum(content), len(content))\n\n\t{\n\t\tn, _, err := kc.Ask(hash)\n\t\tc.Check(err, Equals, BlockNotFound)\n\t\tc.Check(n, Equals, int64(0))\n\t}\n\t{\n\t\thash2, replicas, err := kc.PutB(content)\n\t\tc.Check(err, IsNil)\n\t\tc.Check(hash2, Matches, `\\Q`+hash+`\\E\\b.*`)\n\t\tc.Check(replicas, Equals, 2)\n\t}\n\t{\n\t\tr, n, _, err := kc.Get(hash)\n\t\tc.Check(err, IsNil)\n\t\tc.Check(n, Equals, int64(len(content)))\n\t\tif c.Check(r, NotNil) {\n\t\t\treadContent, err := ioutil.ReadAll(r)\n\t\t\tc.Check(err, IsNil)\n\t\t\tif c.Check(len(readContent), Equals, len(content)) {\n\t\t\t\tc.Check(readContent, DeepEquals, content)\n\t\t\t}\n\t\t\tc.Check(r.Close(), IsNil)\n\t\t}\n\t}\n\t{\n\t\tn, url2, err := kc.Ask(hash)\n\t\tc.Check(err, IsNil)\n\t\tc.Check(n, Equals, int64(len(content)))\n\t\tc.Check(url2, Matches, \"http://localhost:\\\\d+/\\\\Q\"+hash+\"\\\\E\")\n\t}\n\t{\n\t\tloc, err := kc.LocalLocator(hash)\n\t\tc.Check(err, IsNil)\n\t\tc.Assert(len(loc) >= 32, Equals, true)\n\t\tc.Check(loc[:32], Equals, hash[:32])\n\t}\n\t{\n\t\tcontent := []byte(\"the perth county conspiracy\")\n\t\tloc, err := kc.LocalLocator(fmt.Sprintf(\"%x+%d+Rzaaaa-abcde@12345\", md5.Sum(content), len(content)))\n\t\tc.Check(loc, Equals, \"\")\n\t\tc.Check(err, ErrorMatches, `.*HEAD .*\\+R.*`)\n\t\tc.Check(err, ErrorMatches, `.*HTTP 400.*`)\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestCollectMetrics(c *C) {\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\tkc1, err := MakeKeepClient(arv)\n\tkc1.Want_replicas = 1\n\tc.Assert(err, IsNil)\n\tkc2 := kc1.Clone()\n\tkc2.Want_replicas = 2\n\tkcNoCache := kc1.Clone()\n\tkcNoCache.DiskCacheSize = DiskCacheDisabled\n\tkcNoCache.Want_replicas = 1\n\n\treg := prometheus.NewRegistry()\n\tkc1.RegisterMetrics(reg)\n\n\twrite := func(kc *KeepClient, len int) string {\n\t\thash, _, err := kc.PutB(make([]byte, len))\n\t\tc.Assert(err, IsNil)\n\t\treturn hash\n\t}\n\tread := func(kc *KeepClient, hash string) {\n\t\tr, _, _, err := kc.Get(hash)\n\t\tc.Assert(err, IsNil)\n\t\t_, err = ioutil.ReadAll(r)\n\t\tc.Assert(err, IsNil)\n\t}\n\ttype metrics struct {\n\t\tbytesIn   int\n\t\tbytesOut  int\n\t\tcacheHit  int\n\t\tcacheMiss int\n\t\topsGet    int\n\t\topsPut    int\n\t}\n\tgetMetrics := func() (m metrics) {\n\t\tm.bytesIn = int(arvadostest.GetMetricValue(c, reg, \"arvados_keepclient_backend_bytes\", \"direction\", \"in\"))\n\t\tm.bytesOut = int(arvadostest.GetMetricValue(c, reg, \"arvados_keepclient_backend_bytes\", \"direction\", \"out\"))\n\t\tm.cacheHit = int(arvadostest.GetMetricValue(c, reg, \"arvados_keepclient_cache\", \"event\", \"hit\"))\n\t\tm.cacheMiss = int(arvadostest.GetMetricValue(c, reg, \"arvados_keepclient_cache\", \"event\", \"miss\"))\n\t\tm.opsGet = int(arvadostest.GetMetricValue(c, reg, \"arvados_keepclient_ops\", \"op\", \"get\"))\n\t\tm.opsPut = int(arvadostest.GetMetricValue(c, reg, \"arvados_keepclient_ops\", \"op\", \"put\"))\n\t\treturn\n\t}\n\n\tm0 := getMetrics()\n\tc.Check(m0, Equals, metrics{})\n\thash1 := write(kc1, 1111) // 1111 bytes out\n\thash2 := write(kc2, 2222) // 4444 bytes out\n\tm := getMetrics()\n\tc.Check(m.bytesIn, Equals, m0.bytesIn)\n\tc.Check(m.bytesOut, Equals, m0.bytesOut+5555)\n\tc.Check(m.opsPut, Equals, m0.opsPut+2)\n\n\t// Read blocks using different clients, and check metrics:\n\t// bytes in, cache hits, and Get ops.\n\t{\n\t\tm0 = getMetrics()\n\n\t\tread(kc1, hash2) // cached\n\t\tread(kc2, hash1) // cached\n\t\tm = getMetrics()\n\t\tc.Check(m.bytesIn, Equals, m0.bytesIn)\n\t\tc.Check(m.cacheHit, Equals, m0.cacheHit+2)\n\t\tc.Check(m.cacheMiss, Equals, m0.cacheMiss)\n\t\tc.Check(m.opsGet, Equals, m0.opsGet+2)\n\t}\n\n\t// Use a client with DiskCacheDisabled to write a block, so we\n\t// can read it back without hitting the cache, and check the\n\t// cache hits metric.\n\t{\n\t\tm0 = getMetrics()\n\n\t\thash3 := write(kcNoCache, 3210)\n\t\tm = getMetrics()\n\t\tc.Check(m.opsGet, Equals, m0.opsGet)\n\t\tc.Check(m.opsPut, Equals, m0.opsPut+1)\n\t\tc.Check(m.cacheMiss, Equals, m0.cacheMiss)\n\t\tc.Check(m.bytesIn, Equals, m0.bytesIn)\n\t\tc.Check(m.bytesOut, Equals, m0.bytesOut+3210)\n\n\t\tread(kc1, hash3)\n\t\tm = getMetrics()\n\t\tc.Check(m.cacheHit, Equals, m0.cacheHit)\n\t\tc.Check(m.cacheMiss, Equals, m0.cacheMiss+1)\n\t\tc.Check(m.opsGet, Equals, m0.opsGet+1)\n\t\tc.Check(m.bytesIn, Equals, m0.bytesIn+3210)\n\t\tc.Check(m.bytesOut, Equals, m0.bytesOut+3210)\n\t}\n\n\tc.Logf(\"### Metrics after %s ###\\n%s\", c.TestName(), arvadostest.GatherMetricsAsString(reg))\n}\n\ntype StubProxyHandler struct {\n\thandled chan string\n}\n\nfunc (h StubProxyHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\tresp.Header().Set(\"X-Keep-Replicas-Stored\", \"2\")\n\th.handled <- fmt.Sprintf(\"http://%s\", req.Host)\n}\n\nfunc (s *StandaloneSuite) TestPutProxy(c *C) {\n\tst := StubProxyHandler{make(chan string, 1)}\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\n\tkc.Want_replicas = 2\n\tarv.ApiToken = \"abc123\"\n\tlocalRoots := make(map[string]string)\n\twritableLocalRoots := make(map[string]string)\n\n\tks1 := RunSomeFakeKeepServers(st, 1)\n\n\tfor i, k := range ks1 {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\tdefer k.listener.Close()\n\t}\n\n\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\n\t_, replicas, err := kc.PutB([]byte(\"foo\"))\n\t<-st.handled\n\n\tc.Check(err, IsNil)\n\tc.Check(replicas, Equals, 2)\n}\n\nfunc (s *StandaloneSuite) TestPutProxyInsufficientReplicas(c *C) {\n\tst := StubProxyHandler{make(chan string, 1)}\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\n\tkc.Want_replicas = 3\n\tarv.ApiToken = \"abc123\"\n\tlocalRoots := make(map[string]string)\n\twritableLocalRoots := make(map[string]string)\n\n\tks1 := RunSomeFakeKeepServers(st, 1)\n\n\tfor i, k := range ks1 {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\tdefer k.listener.Close()\n\t}\n\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\n\t_, replicas, err := kc.PutB([]byte(\"foo\"))\n\t<-st.handled\n\n\tc.Check(err, FitsTypeOf, InsufficientReplicasError{})\n\tc.Check(replicas, Equals, 2)\n}\n\nfunc (s *StandaloneSuite) TestMakeLocator(c *C) {\n\tl, err := MakeLocator(\"91f372a266fe2bf2823cb8ec7fda31ce+3+Aabcde@12345678\")\n\tc.Check(err, IsNil)\n\tc.Check(l.Hash, Equals, \"91f372a266fe2bf2823cb8ec7fda31ce\")\n\tc.Check(l.Size, Equals, 3)\n\tc.Check(l.Hints, DeepEquals, []string{\"3\", \"Aabcde@12345678\"})\n}\n\nfunc (s *StandaloneSuite) TestMakeLocatorNoHints(c *C) {\n\tl, err := MakeLocator(\"91f372a266fe2bf2823cb8ec7fda31ce\")\n\tc.Check(err, IsNil)\n\tc.Check(l.Hash, Equals, \"91f372a266fe2bf2823cb8ec7fda31ce\")\n\tc.Check(l.Size, Equals, -1)\n\tc.Check(l.Hints, DeepEquals, []string{})\n}\n\nfunc (s *StandaloneSuite) TestMakeLocatorNoSizeHint(c *C) {\n\tl, err := MakeLocator(\"91f372a266fe2bf2823cb8ec7fda31ce+Aabcde@12345678\")\n\tc.Check(err, IsNil)\n\tc.Check(l.Hash, Equals, \"91f372a266fe2bf2823cb8ec7fda31ce\")\n\tc.Check(l.Size, Equals, -1)\n\tc.Check(l.Hints, DeepEquals, []string{\"Aabcde@12345678\"})\n}\n\nfunc (s *StandaloneSuite) TestMakeLocatorPreservesUnrecognizedHints(c *C) {\n\tstr := \"91f372a266fe2bf2823cb8ec7fda31ce+3+Unknown+Kzzzzz+Afoobar\"\n\tl, err := MakeLocator(str)\n\tc.Check(err, IsNil)\n\tc.Check(l.Hash, Equals, \"91f372a266fe2bf2823cb8ec7fda31ce\")\n\tc.Check(l.Size, Equals, 3)\n\tc.Check(l.Hints, DeepEquals, []string{\"3\", \"Unknown\", \"Kzzzzz\", \"Afoobar\"})\n\tc.Check(l.String(), Equals, str)\n}\n\nfunc (s *StandaloneSuite) TestMakeLocatorInvalidInput(c *C) {\n\t_, err := MakeLocator(\"91f372a266fe2bf2823cb8ec7fda31c\")\n\tc.Check(err, Equals, InvalidLocatorError)\n}\n\nfunc (s *StandaloneSuite) TestPutBWant2ReplicasWithOnlyOneWritableLocalRoot(c *C) {\n\thash := Md5String(\"foo\")\n\n\tst := &StubPutHandler{\n\t\tc:                    c,\n\t\texpectPath:           hash,\n\t\texpectAPIToken:       \"abc123\",\n\t\texpectBody:           \"foo\",\n\t\texpectStorageClass:   \"default\",\n\t\treturnStorageClasses: \"\",\n\t\thandled:              make(chan string, 5),\n\t}\n\n\tarv, _ := arvadosclient.MakeArvadosClient()\n\tkc, _ := MakeKeepClient(arv)\n\n\tkc.Want_replicas = 2\n\tarv.ApiToken = \"abc123\"\n\tlocalRoots := make(map[string]string)\n\twritableLocalRoots := make(map[string]string)\n\n\tks := RunSomeFakeKeepServers(st, 5)\n\n\tfor i, k := range ks {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\tif i == 0 {\n\t\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\t}\n\t\tdefer k.listener.Close()\n\t}\n\n\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\n\t_, replicas, err := kc.PutB([]byte(\"foo\"))\n\n\tc.Check(err, FitsTypeOf, InsufficientReplicasError{})\n\tc.Check(replicas, Equals, 1)\n\n\tc.Check(<-st.handled, Equals, localRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", 0)])\n}\n\nfunc (s *StandaloneSuite) TestPutBWithNoWritableLocalRoots(c *C) {\n\thash := Md5String(\"foo\")\n\n\tst := &StubPutHandler{\n\t\tc:                    c,\n\t\texpectPath:           hash,\n\t\texpectAPIToken:       \"abc123\",\n\t\texpectBody:           \"foo\",\n\t\texpectStorageClass:   \"\",\n\t\treturnStorageClasses: \"\",\n\t\thandled:              make(chan string, 5),\n\t}\n\n\tarv, _ := arvadosclient.MakeArvadosClient()\n\tkc, _ := MakeKeepClient(arv)\n\n\tkc.Want_replicas = 2\n\tarv.ApiToken = \"abc123\"\n\tlocalRoots := make(map[string]string)\n\twritableLocalRoots := make(map[string]string)\n\n\tks := RunSomeFakeKeepServers(st, 5)\n\n\tfor i, k := range ks {\n\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\tdefer k.listener.Close()\n\t}\n\n\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\n\t_, replicas, err := kc.PutB([]byte(\"foo\"))\n\n\tc.Check(err, FitsTypeOf, InsufficientReplicasError{})\n\tc.Check(replicas, Equals, 0)\n}\n\ntype StubGetIndexHandler struct {\n\tc              *C\n\texpectPath     string\n\texpectAPIToken string\n\thttpStatus     int\n\tbody           []byte\n}\n\nfunc (h StubGetIndexHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\th.c.Check(req.URL.Path, Equals, h.expectPath)\n\th.c.Check(req.Header.Get(\"Authorization\"), Equals, fmt.Sprintf(\"Bearer %s\", h.expectAPIToken))\n\tresp.WriteHeader(h.httpStatus)\n\tresp.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", len(h.body)))\n\tresp.Write(h.body)\n}\n\nfunc (s *StandaloneSuite) TestGetIndexWithNoPrefix(c *C) {\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\tst := StubGetIndexHandler{\n\t\tc,\n\t\t\"/index\",\n\t\t\"abc123\",\n\t\thttp.StatusOK,\n\t\t[]byte(hash + \" 1443559274\\n\\n\")}\n\n\tks := RunFakeKeepServer(st)\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\tkc, err := MakeKeepClient(arv)\n\tc.Assert(err, IsNil)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(map[string]string{\"x\": ks.url}, nil, nil)\n\n\tr, err := kc.GetIndex(\"x\", \"\")\n\tc.Check(err, IsNil)\n\n\tcontent, err2 := ioutil.ReadAll(r)\n\tc.Check(err2, IsNil)\n\tc.Check(content, DeepEquals, st.body[0:len(st.body)-1])\n}\n\nfunc (s *StandaloneSuite) TestGetIndexWithPrefix(c *C) {\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\tst := StubGetIndexHandler{\n\t\tc,\n\t\t\"/index/\" + hash[0:3],\n\t\t\"abc123\",\n\t\thttp.StatusOK,\n\t\t[]byte(hash + \" 1443559274\\n\\n\")}\n\n\tks := RunFakeKeepServer(st)\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(map[string]string{\"x\": ks.url}, nil, nil)\n\n\tr, err := kc.GetIndex(\"x\", hash[0:3])\n\tc.Assert(err, IsNil)\n\n\tcontent, err2 := ioutil.ReadAll(r)\n\tc.Check(err2, IsNil)\n\tc.Check(content, DeepEquals, st.body[0:len(st.body)-1])\n}\n\nfunc (s *StandaloneSuite) TestGetIndexIncomplete(c *C) {\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\tst := StubGetIndexHandler{\n\t\tc,\n\t\t\"/index/\" + hash[0:3],\n\t\t\"abc123\",\n\t\thttp.StatusOK,\n\t\t[]byte(hash)}\n\n\tks := RunFakeKeepServer(st)\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(map[string]string{\"x\": ks.url}, nil, nil)\n\n\t_, err = kc.GetIndex(\"x\", hash[0:3])\n\tc.Check(err, Equals, ErrIncompleteIndex)\n}\n\nfunc (s *StandaloneSuite) TestGetIndexWithNoSuchServer(c *C) {\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"foo\")))\n\n\tst := StubGetIndexHandler{\n\t\tc,\n\t\t\"/index/\" + hash[0:3],\n\t\t\"abc123\",\n\t\thttp.StatusOK,\n\t\t[]byte(hash)}\n\n\tks := RunFakeKeepServer(st)\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(map[string]string{\"x\": ks.url}, nil, nil)\n\n\t_, err = kc.GetIndex(\"y\", hash[0:3])\n\tc.Check(err, Equals, ErrNoSuchKeepServer)\n}\n\nfunc (s *StandaloneSuite) TestGetIndexWithNoSuchPrefix(c *C) {\n\tst := StubGetIndexHandler{\n\t\tc,\n\t\t\"/index/abcd\",\n\t\t\"abc123\",\n\t\thttp.StatusOK,\n\t\t[]byte(\"\\n\")}\n\n\tks := RunFakeKeepServer(st)\n\tdefer ks.listener.Close()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Check(err, IsNil)\n\tkc, _ := MakeKeepClient(arv)\n\tarv.ApiToken = \"abc123\"\n\tkc.SetServiceRoots(map[string]string{\"x\": ks.url}, nil, nil)\n\n\tr, err := kc.GetIndex(\"x\", \"abcd\")\n\tc.Check(err, IsNil)\n\n\tcontent, err2 := ioutil.ReadAll(r)\n\tc.Check(err2, IsNil)\n\tc.Check(content, DeepEquals, st.body[0:len(st.body)-1])\n}\n\nfunc (s *StandaloneSuite) TestPutBRetry(c *C) {\n\tDefaultRetryDelay = time.Second / 8\n\tMinimumRetryDelay = time.Millisecond\n\n\tfor _, delay := range []time.Duration{0, time.Nanosecond, time.Second / 8, time.Second / 16} {\n\t\tc.Logf(\"=== initial delay %v\", delay)\n\n\t\tst := &FailThenSucceedHandler{\n\t\t\tmorefails: 5, // handler will fail 6x in total, 3 for each server\n\t\t\thandled:   make(chan string, 10),\n\t\t\tsuccesshandler: &StubPutHandler{\n\t\t\t\tc:                    c,\n\t\t\t\texpectPath:           Md5String(\"foo\"),\n\t\t\t\texpectAPIToken:       \"abc123\",\n\t\t\t\texpectBody:           \"foo\",\n\t\t\t\texpectStorageClass:   \"default\",\n\t\t\t\treturnStorageClasses: \"\",\n\t\t\t\thandled:              make(chan string, 5),\n\t\t\t},\n\t\t}\n\n\t\tarv, _ := arvadosclient.MakeArvadosClient()\n\t\tkc, _ := MakeKeepClient(arv)\n\t\tkc.Retries = 3\n\t\tkc.RetryDelay = delay\n\t\tkc.DiskCacheSize = DiskCacheDisabled\n\t\tkc.Want_replicas = 2\n\n\t\tarv.ApiToken = \"abc123\"\n\t\tlocalRoots := make(map[string]string)\n\t\twritableLocalRoots := make(map[string]string)\n\n\t\tks := RunSomeFakeKeepServers(st, 2)\n\n\t\tfor i, k := range ks {\n\t\t\tlocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\t\twritableLocalRoots[fmt.Sprintf(\"zzzzz-bi6l4-fakefakefake%03d\", i)] = k.url\n\t\t\tdefer k.listener.Close()\n\t\t}\n\n\t\tkc.SetServiceRoots(localRoots, writableLocalRoots, nil)\n\n\t\tt0 := time.Now()\n\t\thash, replicas, err := kc.PutB([]byte(\"foo\"))\n\n\t\tc.Check(err, IsNil)\n\t\tc.Check(hash, Equals, \"\")\n\t\tc.Check(replicas, Equals, 2)\n\t\telapsed := time.Since(t0)\n\n\t\tnonsleeptime := time.Second / 10\n\t\texpect := kc.RetryDelay\n\t\tif expect == 0 {\n\t\t\texpect = DefaultRetryDelay\n\t\t}\n\t\tmin := MinimumRetryDelay * 3\n\t\tmax := expect + expect*2 + expect*2*2\n\t\tmax += nonsleeptime\n\t\tcheckInterval(c, elapsed, min, max)\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestMakeKeepClientWithNonDiskTypeService(c *C) {\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\n\t// Add an additional \"testblobstore\" keepservice\n\tblobKeepService := make(arvadosclient.Dict)\n\terr = arv.Create(\"keep_services\",\n\t\tarvadosclient.Dict{\"keep_service\": arvadosclient.Dict{\n\t\t\t\"service_host\": \"localhost\",\n\t\t\t\"service_port\": \"21321\",\n\t\t\t\"service_type\": \"testblobstore\"}},\n\t\t&blobKeepService)\n\tc.Assert(err, IsNil)\n\tdefer func() { arv.Delete(\"keep_services\", blobKeepService[\"uuid\"].(string), nil, nil) }()\n\tRefreshServiceDiscovery()\n\n\t// Make a keepclient and ensure that the testblobstore is included\n\tkc, err := MakeKeepClient(arv)\n\tc.Assert(err, IsNil)\n\n\t// verify kc.LocalRoots\n\tc.Check(len(kc.LocalRoots()), Equals, 3)\n\tfor _, root := range kc.LocalRoots() {\n\t\tc.Check(root, Matches, \"http://localhost:\\\\d+\")\n\t}\n\tc.Assert(kc.LocalRoots()[blobKeepService[\"uuid\"].(string)], Not(Equals), \"\")\n\n\t// verify kc.GatewayRoots\n\tc.Check(len(kc.GatewayRoots()), Equals, 3)\n\tfor _, root := range kc.GatewayRoots() {\n\t\tc.Check(root, Matches, \"http://localhost:\\\\d+\")\n\t}\n\tc.Assert(kc.GatewayRoots()[blobKeepService[\"uuid\"].(string)], Not(Equals), \"\")\n\n\t// verify kc.WritableLocalRoots\n\tc.Check(len(kc.WritableLocalRoots()), Equals, 3)\n\tfor _, root := range kc.WritableLocalRoots() {\n\t\tc.Check(root, Matches, \"http://localhost:\\\\d+\")\n\t}\n\tc.Assert(kc.WritableLocalRoots()[blobKeepService[\"uuid\"].(string)], Not(Equals), \"\")\n\n\tc.Assert(kc.replicasPerService, Equals, 0)\n\tc.Assert(kc.foundNonDiskSvc, Equals, true)\n\tc.Assert(kc.httpClient().(*http.Client).Timeout, Equals, 300*time.Second)\n}\n\nfunc (s *StandaloneSuite) TestDelayCalculator_Default(c *C) {\n\tMinimumRetryDelay = time.Second / 2\n\tDefaultRetryDelay = time.Second\n\n\tdc := delayCalculator{InitialMaxDelay: 0}\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second)\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*2)\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*4)\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*8)\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*10)\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*10)\n}\n\nfunc (s *StandaloneSuite) TestDelayCalculator_SetInitial(c *C) {\n\tMinimumRetryDelay = time.Second / 2\n\tDefaultRetryDelay = time.Second\n\n\tdc := delayCalculator{InitialMaxDelay: time.Second * 2}\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*2)\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*4)\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*8)\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*16)\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*20)\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*20)\n\tcheckInterval(c, dc.Next(), time.Second/2, time.Second*20)\n}\n\nfunc (s *StandaloneSuite) TestDelayCalculator_EnsureSomeLongDelays(c *C) {\n\tdc := delayCalculator{InitialMaxDelay: time.Second * 5}\n\tvar d time.Duration\n\tn := 4000\n\tfor i := 0; i < n; i++ {\n\t\tif i < 20 || i%10 == 0 {\n\t\t\tc.Logf(\"i=%d, delay=%v\", i, d)\n\t\t}\n\t\tif d = dc.Next(); d > dc.InitialMaxDelay*9 {\n\t\t\treturn\n\t\t}\n\t}\n\tc.Errorf(\"after %d trials, never got a delay more than 90%% of expected max %d; last was %v\", n, dc.InitialMaxDelay*10, d)\n}\n\n// If InitialMaxDelay is less than MinimumRetryDelay/10, then delay is\n// always MinimumRetryDelay.\nfunc (s *StandaloneSuite) TestDelayCalculator_InitialLessThanMinimum(c *C) {\n\tMinimumRetryDelay = time.Second / 2\n\tdc := delayCalculator{InitialMaxDelay: time.Millisecond}\n\tfor i := 0; i < 20; i++ {\n\t\tc.Check(dc.Next(), Equals, time.Second/2)\n\t}\n}\n\nfunc checkInterval(c *C, t, min, max time.Duration) {\n\tc.Check(t >= min, Equals, true, Commentf(\"got %v which is below expected min %v\", t, min))\n\tc.Check(t <= max, Equals, true, Commentf(\"got %v which is above expected max %v\", t, max))\n}\n"
  },
  {
    "path": "sdk/go/keepclient/perms.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport \"git.arvados.org/arvados.git/sdk/go/arvados\"\n\nvar (\n\tErrSignatureExpired = arvados.ErrSignatureExpired\n\tErrSignatureInvalid = arvados.ErrSignatureInvalid\n\tErrSignatureMissing = arvados.ErrSignatureMissing\n\tSignLocator         = arvados.SignLocator\n\tSignedLocatorRe     = arvados.SignedLocatorRe\n\tVerifySignature     = arvados.VerifySignature\n)\n"
  },
  {
    "path": "sdk/go/keepclient/root_sorter.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport (\n\t\"sort\"\n)\n\ntype RootSorter struct {\n\troot   []string\n\tweight []string\n\torder  []int\n}\n\nfunc NewRootSorter(serviceRoots map[string]string, hash string) *RootSorter {\n\trs := new(RootSorter)\n\trs.root = make([]string, len(serviceRoots))\n\trs.weight = make([]string, len(serviceRoots))\n\trs.order = make([]int, len(serviceRoots))\n\ti := 0\n\tfor uuid, root := range serviceRoots {\n\t\trs.root[i] = root\n\t\trs.weight[i] = rs.getWeight(hash, uuid)\n\t\trs.order[i] = i\n\t\ti++\n\t}\n\tsort.Sort(rs)\n\treturn rs\n}\n\nfunc (rs RootSorter) getWeight(hash string, uuid string) string {\n\tif len(uuid) == 27 {\n\t\treturn Md5String(hash + uuid[12:])\n\t}\n\t// Only useful for testing, a set of one service root, etc.\n\treturn Md5String(hash + uuid)\n}\n\nfunc (rs RootSorter) GetSortedRoots() []string {\n\tsorted := make([]string, len(rs.order))\n\tfor i := range rs.order {\n\t\tsorted[i] = rs.root[rs.order[i]]\n\t}\n\treturn sorted\n}\n\n// Less is really More here: the heaviest root will be at the front of the list.\nfunc (rs RootSorter) Less(i, j int) bool {\n\treturn rs.weight[rs.order[j]] < rs.weight[rs.order[i]]\n}\n\nfunc (rs RootSorter) Len() int {\n\treturn len(rs.order)\n}\n\nfunc (rs RootSorter) Swap(i, j int) {\n\tsort.IntSlice(rs.order).Swap(i, j)\n}\n"
  },
  {
    "path": "sdk/go/keepclient/root_sorter_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport (\n\t\"fmt\"\n\t. \"gopkg.in/check.v1\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype RootSorterSuite struct{}\n\nvar _ = Suite(&RootSorterSuite{})\n\nfunc FakeSvcRoot(i uint64) string {\n\treturn fmt.Sprintf(\"https://%x.svc/\", i)\n}\n\nfunc FakeSvcUUID(i uint64) string {\n\treturn fmt.Sprintf(\"zzzzz-bi6l4-%015x\", i)\n}\n\nfunc FakeServiceRoots(n uint64) map[string]string {\n\tsr := map[string]string{}\n\tfor i := uint64(0); i < n; i++ {\n\t\tsr[FakeSvcUUID(i)] = FakeSvcRoot(i)\n\t}\n\treturn sr\n}\n\nfunc (*RootSorterSuite) EmptyRoots(c *C) {\n\trs := NewRootSorter(map[string]string{}, Md5String(\"foo\"))\n\tc.Check(rs.GetSortedRoots(), Equals, []string{})\n}\n\nfunc (*RootSorterSuite) JustOneRoot(c *C) {\n\trs := NewRootSorter(FakeServiceRoots(1), Md5String(\"foo\"))\n\tc.Check(rs.GetSortedRoots(), Equals, []string{FakeSvcRoot(0)})\n}\n\nfunc (*RootSorterSuite) ReferenceSet(c *C) {\n\tfakeroots := FakeServiceRoots(16)\n\t// These reference probe orders are explained further in\n\t// ../../python/tests/test_keep_client.py:\n\texpectedOrders := []string{\n\t\t\"3eab2d5fc9681074\",\n\t\t\"097dba52e648f1c3\",\n\t\t\"c5b4e023f8a7d691\",\n\t\t\"9d81c02e76a3bf54\",\n\t}\n\tfor h, expectedOrder := range expectedOrders {\n\t\thash := Md5String(fmt.Sprintf(\"%064x\", h))\n\t\troots := NewRootSorter(fakeroots, hash).GetSortedRoots()\n\t\tfor i, svcIDs := range strings.Split(expectedOrder, \"\") {\n\t\t\tsvcID, err := strconv.ParseUint(svcIDs, 16, 64)\n\t\t\tc.Assert(err, Equals, nil)\n\t\t\tc.Check(roots[i], Equals, FakeSvcRoot(svcID))\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "sdk/go/keepclient/support.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage keepclient\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/md5\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/asyncbuf\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype keepService struct {\n\tUuid     string `json:\"uuid\"`\n\tHostname string `json:\"service_host\"`\n\tPort     int    `json:\"service_port\"`\n\tSSL      bool   `json:\"service_ssl_flag\"`\n\tSvcType  string `json:\"service_type\"`\n\tReadOnly bool   `json:\"read_only\"`\n}\n\n// Md5String returns md5 hash for the bytes in the given string\nfunc Md5String(s string) string {\n\treturn fmt.Sprintf(\"%x\", md5.Sum([]byte(s)))\n}\n\ntype svcList struct {\n\tItems []keepService `json:\"items\"`\n}\n\ntype uploadStatus struct {\n\terr            error\n\turl            string\n\tstatusCode     int\n\treplicasStored int\n\tclassesStored  map[string]int\n\tresponse       string\n}\n\ntype instrumentedReader struct {\n\tio.Reader\n\tprometheus.Counter\n}\n\nfunc (r instrumentedReader) Read(p []byte) (int, error) {\n\tn, err := r.Reader.Read(p)\n\tr.Counter.Add(float64(n))\n\treturn n, err\n}\n\nfunc (kc *KeepClient) uploadToKeepServer(host string, hash string, classesTodo []string, body io.Reader,\n\tuploadStatusChan chan<- uploadStatus, expectedLength int, reqid string) {\n\n\tvar req *http.Request\n\tvar err error\n\tvar url = fmt.Sprintf(\"%s/%s\", host, hash)\n\tif req, err = http.NewRequest(\"PUT\", url, nil); err != nil {\n\t\tkc.debugf(\"[%s] Error creating request: PUT %s error: %s\", reqid, url, err)\n\t\tuploadStatusChan <- uploadStatus{err, url, 0, 0, nil, \"\"}\n\t\treturn\n\t}\n\n\treq.ContentLength = int64(expectedLength)\n\tif expectedLength > 0 {\n\t\tkc.setupMetrics()\n\t\treq.Body = ioutil.NopCloser(instrumentedReader{body, kc.metrics.BackendBytesOut})\n\t} else {\n\t\t// \"For client requests, a value of 0 means unknown if\n\t\t// Body is not nil.\"  In this case we do want the body\n\t\t// to be empty, so don't set req.Body.\n\t}\n\n\treq.Header.Add(\"X-Request-Id\", reqid)\n\treq.Header.Add(\"Authorization\", \"Bearer \"+kc.Arvados.ApiToken)\n\treq.Header.Add(\"Content-Type\", \"application/octet-stream\")\n\treq.Header.Add(XKeepDesiredReplicas, fmt.Sprint(kc.Want_replicas))\n\tif len(classesTodo) > 0 {\n\t\treq.Header.Add(XKeepStorageClasses, strings.Join(classesTodo, \", \"))\n\t}\n\n\tvar resp *http.Response\n\tif resp, err = kc.httpClient().Do(req); err != nil {\n\t\tkc.debugf(\"[%s] Upload failed: %s error: %s\", reqid, url, err)\n\t\tuploadStatusChan <- uploadStatus{err, url, 0, 0, nil, err.Error()}\n\t\treturn\n\t}\n\n\trep := 1\n\tif xr := resp.Header.Get(XKeepReplicasStored); xr != \"\" {\n\t\tfmt.Sscanf(xr, \"%d\", &rep)\n\t}\n\tscc := resp.Header.Get(XKeepStorageClassesConfirmed)\n\tclassesStored, err := parseStorageClassesConfirmedHeader(scc)\n\tif err != nil {\n\t\tkc.debugf(\"[%s] Ignoring invalid %s header %q: %s\", reqid, XKeepStorageClassesConfirmed, scc, err)\n\t}\n\n\tdefer resp.Body.Close()\n\tdefer io.Copy(ioutil.Discard, resp.Body)\n\n\trespbody, err2 := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: 4096})\n\tresponse := strings.TrimSpace(string(respbody))\n\tif err2 != nil && err2 != io.EOF {\n\t\tkc.debugf(\"[%s] Upload %s error: %s response: %s\", reqid, url, err2, response)\n\t\tuploadStatusChan <- uploadStatus{err2, url, resp.StatusCode, rep, classesStored, response}\n\t} else if resp.StatusCode == http.StatusOK {\n\t\tkc.debugf(\"[%s] Upload %s success\", reqid, url)\n\t\tuploadStatusChan <- uploadStatus{nil, url, resp.StatusCode, rep, classesStored, response}\n\t} else {\n\t\tif resp.StatusCode >= 300 && response == \"\" {\n\t\t\tresponse = resp.Status\n\t\t}\n\t\tkc.debugf(\"[%s] Upload %s status: %d %s\", reqid, url, resp.StatusCode, response)\n\t\tuploadStatusChan <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, classesStored, response}\n\t}\n}\n\nfunc (kc *KeepClient) httpBlockWrite(ctx context.Context, req arvados.BlockWriteOptions) (arvados.BlockWriteResponse, error) {\n\tvar resp arvados.BlockWriteResponse\n\tvar getReader func() io.Reader\n\tif req.Data == nil && req.Reader == nil {\n\t\treturn resp, errors.New(\"invalid BlockWriteOptions: Data and Reader are both nil\")\n\t}\n\tif req.DataSize < 0 {\n\t\treturn resp, fmt.Errorf(\"invalid BlockWriteOptions: negative DataSize %d\", req.DataSize)\n\t}\n\tif req.DataSize > BLOCKSIZE || len(req.Data) > BLOCKSIZE {\n\t\treturn resp, ErrOversizeBlock\n\t}\n\tif req.Data != nil {\n\t\tif req.DataSize > len(req.Data) {\n\t\t\treturn resp, errors.New(\"invalid BlockWriteOptions: DataSize > len(Data)\")\n\t\t}\n\t\tif req.DataSize == 0 {\n\t\t\treq.DataSize = len(req.Data)\n\t\t}\n\t\tgetReader = func() io.Reader { return bytes.NewReader(req.Data[:req.DataSize]) }\n\t} else {\n\t\tbuf := asyncbuf.NewBuffer(make([]byte, 0, req.DataSize))\n\t\treader := req.Reader\n\t\tif req.Hash != \"\" {\n\t\t\treader = HashCheckingReader{req.Reader, md5.New(), req.Hash, nil}\n\t\t}\n\t\tgo func() {\n\t\t\t_, err := io.Copy(buf, reader)\n\t\t\tbuf.CloseWithError(err)\n\t\t}()\n\t\tgetReader = buf.NewReader\n\t}\n\tif req.Hash == \"\" {\n\t\tm := md5.New()\n\t\t_, err := io.Copy(m, getReader())\n\t\tif err != nil {\n\t\t\treturn resp, err\n\t\t}\n\t\treq.Hash = fmt.Sprintf(\"%x\", m.Sum(nil))\n\t}\n\tif req.StorageClasses == nil {\n\t\tif len(kc.StorageClasses) > 0 {\n\t\t\treq.StorageClasses = kc.StorageClasses\n\t\t} else {\n\t\t\treq.StorageClasses = kc.DefaultStorageClasses\n\t\t}\n\t}\n\tif req.Replicas == 0 {\n\t\treq.Replicas = kc.Want_replicas\n\t}\n\tif req.RequestID == \"\" {\n\t\treq.RequestID = kc.getRequestID()\n\t}\n\tif req.Attempts == 0 {\n\t\treq.Attempts = 1 + kc.Retries\n\t}\n\n\t// Calculate the ordering for uploading to servers\n\tsv := NewRootSorter(kc.WritableLocalRoots(), req.Hash).GetSortedRoots()\n\n\t// The next server to try contacting\n\tnextServer := 0\n\n\t// The number of active writers\n\tactive := 0\n\n\t// Used to communicate status from the upload goroutines\n\tuploadStatusChan := make(chan uploadStatus)\n\tdefer func() {\n\t\t// Wait for any abandoned uploads (e.g., we started\n\t\t// two uploads and the first replied with replicas=2)\n\t\t// to finish before closing the status channel.\n\t\tgo func() {\n\t\t\tfor active > 0 {\n\t\t\t\t<-uploadStatusChan\n\t\t\t}\n\t\t\tclose(uploadStatusChan)\n\t\t}()\n\t}()\n\n\treplicasTodo := map[string]int{}\n\tfor _, c := range req.StorageClasses {\n\t\treplicasTodo[c] = req.Replicas\n\t}\n\n\treplicasPerThread := kc.replicasPerService\n\tif replicasPerThread < 1 {\n\t\t// unlimited or unknown\n\t\treplicasPerThread = req.Replicas\n\t}\n\n\tdelay := delayCalculator{InitialMaxDelay: kc.RetryDelay}\n\tretriesRemaining := req.Attempts\n\tvar retryServers []string\n\n\tlastError := make(map[string]string)\n\ttrackingClasses := len(replicasTodo) > 0\n\n\tfor retriesRemaining > 0 {\n\t\tretriesRemaining--\n\t\tnextServer = 0\n\t\tretryServers = []string{}\n\t\tfor {\n\t\t\tvar classesTodo []string\n\t\t\tvar maxConcurrency int\n\t\t\tfor sc, r := range replicasTodo {\n\t\t\t\tclassesTodo = append(classesTodo, sc)\n\t\t\t\tif maxConcurrency == 0 || maxConcurrency > r {\n\t\t\t\t\t// Having more than r\n\t\t\t\t\t// writes in flight\n\t\t\t\t\t// would overreplicate\n\t\t\t\t\t// class sc.\n\t\t\t\t\tmaxConcurrency = r\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !trackingClasses {\n\t\t\t\tmaxConcurrency = req.Replicas - resp.Replicas\n\t\t\t}\n\t\t\tif maxConcurrency < 1 {\n\t\t\t\t// If there are no non-zero entries in\n\t\t\t\t// replicasTodo, we're done.\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfor active*replicasPerThread < maxConcurrency {\n\t\t\t\t// Start some upload requests\n\t\t\t\tif nextServer < len(sv) {\n\t\t\t\t\tkc.debugf(\"[%s] Begin upload %s to %s\", req.RequestID, req.Hash, sv[nextServer])\n\t\t\t\t\tgo kc.uploadToKeepServer(sv[nextServer], req.Hash, classesTodo, getReader(), uploadStatusChan, req.DataSize, req.RequestID)\n\t\t\t\t\tnextServer++\n\t\t\t\t\tactive++\n\t\t\t\t} else {\n\t\t\t\t\tif active == 0 && retriesRemaining == 0 {\n\t\t\t\t\t\tmsg := \"Could not write sufficient replicas: \"\n\t\t\t\t\t\tfor _, resp := range lastError {\n\t\t\t\t\t\t\tmsg += resp + \"; \"\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmsg = msg[:len(msg)-2]\n\t\t\t\t\t\treturn resp, InsufficientReplicasError{error: errors.New(msg)}\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tkc.debugf(\"[%s] Replicas remaining to write: %d active uploads: %d\", req.RequestID, replicasTodo, active)\n\t\t\tif active < 1 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Wait for something to happen.\n\t\t\tstatus := <-uploadStatusChan\n\t\t\tactive--\n\n\t\t\tif status.statusCode == http.StatusOK {\n\t\t\t\tdelete(lastError, status.url)\n\t\t\t\tresp.Replicas += status.replicasStored\n\t\t\t\tif len(status.classesStored) == 0 {\n\t\t\t\t\t// Server doesn't report\n\t\t\t\t\t// storage classes. Give up\n\t\t\t\t\t// trying to track which ones\n\t\t\t\t\t// are satisfied; just rely on\n\t\t\t\t\t// total # replicas.\n\t\t\t\t\ttrackingClasses = false\n\t\t\t\t}\n\t\t\t\tfor className, replicas := range status.classesStored {\n\t\t\t\t\tif replicasTodo[className] > replicas {\n\t\t\t\t\t\treplicasTodo[className] -= replicas\n\t\t\t\t\t} else {\n\t\t\t\t\t\tdelete(replicasTodo, className)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tresp.Locator = status.response\n\t\t\t} else {\n\t\t\t\tmsg := fmt.Sprintf(\"[%d] %s\", status.statusCode, status.response)\n\t\t\t\tif len(msg) > 100 {\n\t\t\t\t\tmsg = msg[:100]\n\t\t\t\t}\n\t\t\t\tlastError[status.url] = msg\n\t\t\t}\n\n\t\t\tif status.statusCode == 0 || status.statusCode == 408 || status.statusCode == 429 ||\n\t\t\t\t(status.statusCode >= 500 && status.statusCode != http.StatusInsufficientStorage) {\n\t\t\t\t// Timeout, too many requests, or other server side failure\n\t\t\t\t// (do not auto-retry status 507 \"full\")\n\t\t\t\tretryServers = append(retryServers, status.url[0:strings.LastIndex(status.url, \"/\")])\n\t\t\t}\n\t\t}\n\n\t\tsv = retryServers\n\t\tif len(sv) > 0 {\n\t\t\ttime.Sleep(delay.Next())\n\t\t}\n\t}\n\n\treturn resp, nil\n}\n\nfunc parseStorageClassesConfirmedHeader(hdr string) (map[string]int, error) {\n\tif hdr == \"\" {\n\t\treturn nil, nil\n\t}\n\tclassesStored := map[string]int{}\n\tfor _, cr := range strings.Split(hdr, \",\") {\n\t\tcr = strings.TrimSpace(cr)\n\t\tif cr == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tfields := strings.SplitN(cr, \"=\", 2)\n\t\tif len(fields) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"expected exactly one '=' char in entry %q\", cr)\n\t\t}\n\t\tclassName := fields[0]\n\t\tif className == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"empty class name in entry %q\", cr)\n\t\t}\n\t\treplicas, err := strconv.Atoi(fields[1])\n\t\tif err != nil || replicas < 1 {\n\t\t\treturn nil, fmt.Errorf(\"invalid replica count %q\", fields[1])\n\t\t}\n\t\tclassesStored[className] = replicas\n\t}\n\treturn classesStored, nil\n}\n\n// delayCalculator calculates a series of delays for implementing\n// exponential backoff with jitter.  The first call to Next() returns\n// a random duration between MinimumRetryDelay and the specified\n// InitialMaxDelay (or DefaultRetryDelay if 0).  The max delay is\n// doubled on each subsequent call to Next(), up to 10x the initial\n// max delay.\ntype delayCalculator struct {\n\tInitialMaxDelay time.Duration\n\tn               int // number of delays returned so far\n\tnextmax         time.Duration\n\tlimit           time.Duration\n}\n\nfunc (dc *delayCalculator) Next() time.Duration {\n\tif dc.nextmax <= MinimumRetryDelay {\n\t\t// initialize\n\t\tif dc.InitialMaxDelay > 0 {\n\t\t\tdc.nextmax = dc.InitialMaxDelay\n\t\t} else {\n\t\t\tdc.nextmax = DefaultRetryDelay\n\t\t}\n\t\tdc.limit = 10 * dc.nextmax\n\t}\n\td := time.Duration(rand.Float64() * float64(dc.nextmax))\n\tif d < MinimumRetryDelay {\n\t\td = MinimumRetryDelay\n\t}\n\tdc.nextmax *= 2\n\tif dc.nextmax > dc.limit {\n\t\tdc.nextmax = dc.limit\n\t}\n\treturn d\n}\n"
  },
  {
    "path": "sdk/go/stats/duration.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage stats\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n)\n\n// Duration is a duration that is displayed as a number of seconds in\n// fixed-point notation.\ntype Duration time.Duration\n\n// MarshalJSON implements json.Marshaler.\nfunc (d Duration) MarshalJSON() ([]byte, error) {\n\treturn []byte(d.String()), nil\n}\n\n// String implements fmt.Stringer.\nfunc (d Duration) String() string {\n\treturn fmt.Sprintf(\"%.6f\", time.Duration(d).Seconds())\n}\n\n// UnmarshalJSON implements json.Unmarshaler\nfunc (d *Duration) UnmarshalJSON(data []byte) error {\n\treturn d.Set(string(data))\n}\n\n// Set implements flag.Value\nfunc (d *Duration) Set(s string) error {\n\tsec, err := strconv.ParseFloat(s, 64)\n\tif err == nil {\n\t\t*d = Duration(sec * float64(time.Second))\n\t}\n\treturn err\n}\n"
  },
  {
    "path": "sdk/go/stats/duration_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: Apache-2.0\n\npackage stats\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestString(t *testing.T) {\n\td := Duration(123123123123 * time.Nanosecond)\n\tif s, expect := d.String(), \"123.123123\"; s != expect {\n\t\tt.Errorf(\"got %s, expect %s\", s, expect)\n\t}\n}\n\nfunc TestSet(t *testing.T) {\n\tvar d Duration\n\tif err := d.Set(\"123.456\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, expect := time.Duration(d).Nanoseconds(), int64(123456000000); got != expect {\n\t\tt.Errorf(\"got %d, expect %d\", got, expect)\n\t}\n}\n"
  },
  {
    "path": "sdk/python/LICENSE-2.0.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "sdk/python/MANIFEST.in",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ninclude LICENSE-2.0.txt\ninclude README.rst\ninclude arvados-v1-discovery.json\ninclude arvados_version.py\ninclude discovery2pydoc.py\n"
  },
  {
    "path": "sdk/python/README.rst",
    "content": ".. Copyright (C) The Arvados Authors. All rights reserved.\n..\n.. SPDX-License-Identifier: Apache-2.0\n\n=====================\nArvados Python Client\n=====================\n\nOverview\n--------\n\nThis package provides the ``arvados`` module, an API client for\nArvados_.  It also includes higher-level functions to help you write\nCrunch scripts, and command-line tools to store and retrieve data in\nthe Keep storage server.\n\n.. _Arvados: https://arvados.org/\n\nInstallation\n------------\n\nInstalling under your user account\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis method lets you install the package without root access.  However,\nother users on the same system will need to reconfigure their shell in order\nto be able to use it. Run the following to install the package in an\nenvironment at ``~/arvclients``::\n\n  python3 -m venv ~/arvclients\n  ~/arvclients/bin/pip install arvados-python-client\n\nCommand line tools will be installed under ``~/arvclients/bin``. You can\ntest one by running::\n\n  ~/arvclients/bin/arv-get --version\n\nYou can run these tools by specifying the full path every time, or you can\nadd the directory to your shell's search path by running::\n\n  export PATH=\"$PATH:$HOME/arvclients/bin\"\n\nYou can make this search path change permanent by adding this command to\nyour shell's configuration, for example ``~/.bashrc`` if you're using bash.\nYou can test the change by running::\n\n  arv-get --version\n\nInstalling on Debian and Ubuntu systems\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nArvados publishes packages for Debian 12 \"bookworm,\" Ubuntu 22.04 \"jammy,\" and Ubuntu 24.04 \"noble.\" You can install the Python SDK package on any of these distributions by running the following commands::\n\n  sudo install -d /etc/apt/keyrings\n  sudo curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg\n  sudo tee /etc/apt/sources.list.d/arvados.sources >/dev/null <<EOF\n  Types: deb\n  URIs: https://apt.arvados.org/$(lsb_release -cs)\n  Suites: $(lsb_release -cs)\n  Components: main\n  Signed-by: /etc/apt/keyrings/arvados.asc\n  EOF\n  sudo apt update\n  sudo apt install python3-arvados-python-client\n\nInstalling on Red Hat, AlmaLinux, and Rocky Linux\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nArvados publishes packages for RHEL 8 and 9, as well as distributions based on those. Note that these packages depend on, and will automatically enable, the Python 3.11 module. You can install the Python SDK package on any of these distributions by running the following commands::\n\n  sudo tee /etc/yum.repos.d/arvados.repo >/dev/null <<'EOF'\n  [arvados]\n  name=Arvados\n  baseurl=https://rpm.arvados.org/RHEL/$releasever/os/$basearch/\n  gpgcheck=1\n  gpgkey=https://rpm.arvados.org/RHEL/$releasever/RPM-GPG-KEY-arvados\n  EOF\n  sudo dnf install python3-arvados-python-client\n\nConfiguration\n-------------\n\nThis client software needs two pieces of information to connect to\nArvados: the DNS name of the API server, and an API authorization\ntoken. `The Arvados user\ndocumentation\n<http://doc.arvados.org/user/reference/api-tokens.html>`_ describes\nhow to find this information in the Arvados Workbench, and install it\non your system.\n\nTesting and Development\n-----------------------\n\nThis package is one part of the Arvados source package, and it has\nintegration tests to check interoperability with other Arvados\ncomponents.  Our `hacking guide\n<https://dev.arvados.org/projects/arvados/wiki/Hacking_Python_SDK>`_\ndescribes how to set up a development environment and run tests.\n"
  },
  {
    "path": "sdk/python/arvados/__init__.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Arvados Python SDK\n\nThis module provides the entire Python SDK for Arvados. The most useful modules\ninclude:\n\n* arvados.api - This module provides the `arvados.api.api` function to\n  construct an Arvados REST API client, as well as other classes and functions\n  that support it. You can call the `arvados.api` module just like a function\n  as a shortcut for calling `arvados.api.api`.\n\n* arvados.api_resources - The methods on an Arvados REST API client are\n  generated dynamically at runtime. This module documents those methods and\n  return values for the current version of Arvados. This module does not\n  implement anything so you don't need to import it, but it's a helpful\n  reference to understand how to use the Arvados REST API client.\n\n* arvados.collection - The `arvados.collection.Collection` class provides a\n  high-level interface to read and write collections. It coordinates sending\n  data to and from Keep, and synchronizing updates with the collection object.\n\n* arvados.util - Utility functions to use mostly in conjunction with the API\n  client object and the results it returns.\n\nOther submodules provide lower-level functionality.\n\"\"\"\n\nimport logging as stdliblog\nimport os\nimport sys\nimport types\n\nfrom collections import UserDict\n\nfrom . import api, errors, util\nfrom .api import api_from_config, http_cache\nfrom .collection import CollectionReader\nfrom arvados.keep import *\nfrom .logging import log_format, log_date_format, log_handler\nfrom .retry import RetryLoop\n\n# Backwards compatibility shims: these modules used to get pulled in after\n# `import arvados` with previous versions of the SDK. We must keep the names\n# accessible even though there's no longer any functional need for them.\nfrom . import cache\nfrom . import safeapi\n\n# Previous versions of the PySDK used to say `from .api import api`.  This\n# made it convenient to call the API client constructor, but difficult to\n# access the rest of the `arvados.api` module. The magic below fixes that\n# bug while retaining backwards compatibility: `arvados.api` is now the\n# module and you can import it normally, but we make that module callable so\n# all the existing code that says `arvados.api('v1', ...)` still works.\nclass _CallableAPIModule(api.__class__):\n    __call__ = staticmethod(api.api)\napi.__class__ = _CallableAPIModule\n\n# Override logging module pulled in via `from ... import *`\n# so users can `import arvados.logging`.\nlogging = sys.modules['arvados.logging']\n\n# Set up Arvados logging based on the user's configuration.\n# All Arvados code should log under the arvados hierarchy.\nlogger = stdliblog.getLogger('arvados')\nlogger.addHandler(log_handler)\nlogger.setLevel(stdliblog.DEBUG if config.get('ARVADOS_DEBUG')\n                else stdliblog.WARNING)\n"
  },
  {
    "path": "sdk/python/arvados/_internal/__init__.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Arvados internal utilities\n\nEverything in `arvados._internal` is support code for the Arvados Python SDK\nand tools. Nothing in this module is intended to be part of the public-facing\nSDK API. Classes and functions in this module may be changed or removed at any\ntime.\n\"\"\"\n\nimport functools\nimport operator\nimport re\nimport time\nimport warnings\n\nimport typing as t\n\nHT = t.TypeVar('HT', bound=t.Hashable)\n\nclass Timer:\n    def __init__(self, verbose=False):\n        self.verbose = verbose\n\n    def __enter__(self):\n        self.start = time.time()\n        return self\n\n    def __exit__(self, *args):\n        self.end = time.time()\n        self.secs = self.end - self.start\n        self.msecs = self.secs * 1000  # millisecs\n        if self.verbose:\n            print('elapsed time: %f ms' % self.msecs)\n\n\ndef deprecated(version=None, preferred=None):\n    \"\"\"Mark a callable as deprecated in the SDK\n\n    This will wrap the callable to emit as a DeprecationWarning\n    and add a deprecation notice to its docstring.\n\n    If the following arguments are given, they'll be included in the\n    notices:\n\n    * preferred: str | None --- The name of an alternative that users should\n      use instead.\n\n    * version: str | None --- The version of Arvados when the callable is\n      scheduled to be removed.\n    \"\"\"\n    if version is None:\n        version = ''\n    else:\n        version = f' and scheduled to be removed in Arvados {version}'\n    if preferred is None:\n        preferred = ''\n    else:\n        preferred = f' Prefer {preferred} instead.'\n    def deprecated_decorator(func):\n        fullname = f'{func.__module__}.{func.__qualname__}'\n        parent, _, name = fullname.rpartition('.')\n        if name == '__init__':\n            fullname = parent\n        warning_msg = f'{fullname} is deprecated{version}.{preferred}'\n        @functools.wraps(func)\n        def deprecated_wrapper(*args, **kwargs):\n            warnings.warn(warning_msg, DeprecationWarning, 2)\n            return func(*args, **kwargs)\n        # Get func's docstring without any trailing newline or empty lines.\n        func_doc = re.sub(r'\\n\\s*$', '', func.__doc__ or '')\n        match = re.search(r'\\n([ \\t]+)\\S', func_doc)\n        indent = '' if match is None else match.group(1)\n        warning_doc = f'\\n\\n{indent}.. WARNING:: Deprecated\\n{indent}   {warning_msg}'\n        # Make the deprecation notice the second \"paragraph\" of the\n        # docstring if possible. Otherwise append it.\n        docstring, count = re.subn(\n            rf'\\n[ \\t]*\\n{indent}',\n            f'{warning_doc}\\n\\n{indent}',\n            func_doc,\n            count=1,\n        )\n        if not count:\n            docstring = f'{func_doc.lstrip()}{warning_doc}'\n        deprecated_wrapper.__doc__ = docstring\n        return deprecated_wrapper\n    return deprecated_decorator\n\n\ndef parse_seq(\n        s: str,\n        split: t.Callable[[str], t.Iterable[str]]=operator.methodcaller('split', ','),\n        clean: t.Callable[[str], str]=operator.methodcaller('strip'),\n        check: t.Callable[[str], bool]=bool,\n) -> t.Iterator[str]:\n    \"\"\"Split, clean, and filter a string into multiple items\n\n    The default arguments split on commas, strip substrings, and skip empty\n    items.\n    \"\"\"\n    return (word for substr in split(s) if check(word := clean(substr)))\n\n\ndef uniq(it: t.Iterable[HT]) -> t.Iterator[HT]:\n    \"\"\"Yield only unique items from an iterable\n\n    The items must be hashable.\n    \"\"\"\n    seen = set()\n    for item in it:\n        if item not in seen:\n            seen.add(item)\n            yield item\n"
  },
  {
    "path": "sdk/python/arvados/_internal/basedirs.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Base directories utility module\n\nThis module provides a set of classes useful to search and manipulate base\ndirectory defined by systemd and the XDG specification. Most users will just\ninstantiate and use `BaseDirectories`.\n\"\"\"\n\nimport dataclasses\nimport enum\nimport itertools\nimport logging\nimport os\nimport shlex\nimport stat\n\nfrom pathlib import Path, PurePath\nfrom typing import (\n    Iterator,\n    Mapping,\n    Optional,\n    Union,\n)\n\nlogger = logging.getLogger('arvados')\n\n@dataclasses.dataclass\nclass BaseDirectorySpec:\n    \"\"\"Parse base directories\n\n    A BaseDirectorySpec defines all the environment variable keys and defaults\n    related to a set of base directories (cache, config, state, etc.). It\n    provides pure methods to parse environment settings into valid paths.\n    \"\"\"\n    systemd_key: str\n    xdg_home_key: str\n    xdg_home_default: PurePath\n    xdg_dirs_key: Optional[str] = None\n    xdg_dirs_default: str = ''\n\n    @staticmethod\n    def _abspath_from_env(env: Mapping[str, str], key: str) -> Optional[Path]:\n        try:\n            path = Path(env[key])\n        except (KeyError, ValueError):\n            ok = False\n        else:\n            ok = path.is_absolute()\n        return path if ok else None\n\n    @staticmethod\n    def _iter_abspaths(value: str) -> Iterator[Path]:\n        for path_s in value.split(':'):\n            path = Path(path_s)\n            if path.is_absolute():\n                yield path\n\n    def iter_systemd(self, env: Mapping[str, str]) -> Iterator[Path]:\n        return self._iter_abspaths(env.get(self.systemd_key, ''))\n\n    def iter_xdg(self, env: Mapping[str, str], subdir: PurePath) -> Iterator[Path]:\n        yield self.xdg_home(env, subdir)\n        if self.xdg_dirs_key is not None:\n            for path in self._iter_abspaths(env.get(self.xdg_dirs_key) or self.xdg_dirs_default):\n                yield path / subdir\n\n    def xdg_home(self, env: Mapping[str, str], subdir: PurePath) -> Path:\n        return (\n            self._abspath_from_env(env, self.xdg_home_key)\n            or self.xdg_home_default_path(env)\n        ) / subdir\n\n    def xdg_home_default_path(self, env: Mapping[str, str]) -> Path:\n        return (self._abspath_from_env(env, 'HOME') or Path.home()) / self.xdg_home_default\n\n    def xdg_home_is_customized(self, env: Mapping[str, str]) -> bool:\n        xdg_home = self._abspath_from_env(env, self.xdg_home_key)\n        return xdg_home is not None and xdg_home != self.xdg_home_default_path(env)\n\n\nclass BaseDirectorySpecs(enum.Enum):\n    \"\"\"Base directory specifications\n\n    This enum provides easy access to the standard base directory settings.\n    \"\"\"\n    CACHE = BaseDirectorySpec(\n        'CACHE_DIRECTORY',\n        'XDG_CACHE_HOME',\n        PurePath('.cache'),\n    )\n    CONFIG = BaseDirectorySpec(\n        'CONFIGURATION_DIRECTORY',\n        'XDG_CONFIG_HOME',\n        PurePath('.config'),\n        'XDG_CONFIG_DIRS',\n        '/etc/xdg',\n    )\n    STATE = BaseDirectorySpec(\n        'STATE_DIRECTORY',\n        'XDG_STATE_HOME',\n        PurePath('.local', 'state'),\n    )\n\n\nclass BaseDirectories:\n    \"\"\"Resolve paths from a base directory spec\n\n    Given a BaseDirectorySpec, this class provides stateful methods to find\n    existing files and return the most-preferred directory for writing.\n    \"\"\"\n    _STORE_MODE = stat.S_IFDIR | stat.S_IWUSR\n\n    def __init__(\n            self,\n            spec: Union[BaseDirectorySpec, BaseDirectorySpecs, str],\n            env: Mapping[str, str]=os.environ,\n            xdg_subdir: Union[os.PathLike, str]='arvados',\n    ) -> None:\n        if isinstance(spec, str):\n            spec = BaseDirectorySpecs[spec].value\n        elif isinstance(spec, BaseDirectorySpecs):\n            spec = spec.value\n        self._spec = spec\n        self._env = env\n        self._xdg_subdir = PurePath(xdg_subdir)\n\n    def search_paths(self) -> Iterator[Path]:\n        return itertools.chain(\n                self._spec.iter_systemd(self._env),\n                self._spec.iter_xdg(self._env, self._xdg_subdir))\n\n    def search(self, name: str) -> Iterator[Path]:\n        any_found = False\n        for search_path in self.search_paths():\n            path = search_path / name\n            if path.exists():\n                yield path\n                any_found = True\n        # The rest of this function is dedicated to warning the user if they\n        # have a custom XDG_*_HOME value that prevented the search from\n        # succeeding. This should be rare.\n        if any_found or not self._spec.xdg_home_is_customized(self._env):\n            return\n        default_home = self._spec.xdg_home_default_path(self._env)\n        default_path = Path(self._xdg_subdir / name)\n        if not (default_home / default_path).exists():\n            return\n        if self._spec.xdg_dirs_key is None:\n            suggest_key = self._spec.xdg_home_key\n            suggest_value = default_home\n        else:\n            suggest_key = self._spec.xdg_dirs_key\n            cur_value = self._env.get(suggest_key, '')\n            value_sep = ':' if cur_value else ''\n            suggest_value = f'{cur_value}{value_sep}{default_home}'\n        logger.warning(\n            \"\\\n%s was not found under your configured $%s (%s), \\\nbut does exist at the default location (%s) - \\\nconsider running this program with the environment setting %s=%s\\\n\",\n            default_path,\n            self._spec.xdg_home_key,\n            self._spec.xdg_home(self._env, ''),\n            default_home,\n            suggest_key,\n            shlex.quote(suggest_value),\n        )\n\n    def storage_path(\n            self,\n            subdir: Union[str, os.PathLike]=PurePath(),\n            mode: int=0o700,\n    ) -> Path:\n        for path in self._spec.iter_systemd(self._env):\n            try:\n                mode = path.stat().st_mode\n            except OSError:\n                continue\n            if (mode & self._STORE_MODE) == self._STORE_MODE:\n                break\n        else:\n            path = self._spec.xdg_home(self._env, self._xdg_subdir)\n        path /= subdir\n        path.mkdir(parents=True, exist_ok=True, mode=mode)\n        return path\n"
  },
  {
    "path": "sdk/python/arvados/_internal/diskcache.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport threading\nimport mmap\nimport os\nimport traceback\nimport stat\nimport tempfile\nimport fcntl\nimport time\nimport errno\nimport logging\nimport weakref\nimport collections\n\n_logger = logging.getLogger('arvados.keep')\n\ncacheblock_suffix = \".keepcacheblock\"\n\nclass DiskCacheSlot(object):\n    __slots__ = (\"locator\", \"ready\", \"content\", \"cachedir\", \"filehandle\", \"linger\")\n\n    def __init__(self, locator, cachedir):\n        self.locator = locator\n        self.ready = threading.Event()\n        self.content = None\n        self.cachedir = cachedir\n        self.filehandle = None\n        self.linger = None\n\n    def get(self):\n        self.ready.wait()\n        # 'content' can None, an empty byte string, or a nonempty mmap\n        # region.  If it is an mmap region, we want to advise the\n        # kernel we're going to use it.  This nudges the kernel to\n        # re-read most or all of the block if necessary (instead of\n        # just a few pages at a time), reducing the number of page\n        # faults and improving performance by 4x compared to not\n        # calling madvise.\n        if self.content:\n            self.content.madvise(mmap.MADV_WILLNEED)\n        return self.content\n\n    def set(self, value):\n        tmpfile = None\n        try:\n            if value is None:\n                self.content = None\n                self.ready.set()\n                return False\n\n            if len(value) == 0:\n                # Can't mmap a 0 length file\n                self.content = b''\n                self.ready.set()\n                return True\n\n            if self.content is not None:\n                # Has been set already\n                self.ready.set()\n                return False\n\n            blockdir = os.path.join(self.cachedir, self.locator[0:3])\n            os.makedirs(blockdir, mode=0o700, exist_ok=True)\n\n            final = os.path.join(blockdir, self.locator) + cacheblock_suffix\n\n            self.filehandle = tempfile.NamedTemporaryFile(dir=blockdir, delete=False, prefix=\"tmp\", suffix=cacheblock_suffix)\n            tmpfile = self.filehandle.name\n            os.chmod(tmpfile, stat.S_IRUSR | stat.S_IWUSR)\n\n            # aquire a shared lock, this tells other processes that\n            # we're using this block and to please not delete it.\n            fcntl.flock(self.filehandle, fcntl.LOCK_SH)\n\n            self.filehandle.write(value)\n            self.filehandle.flush()\n            os.rename(tmpfile, final)\n            tmpfile = None\n\n            self.content = mmap.mmap(self.filehandle.fileno(), 0, access=mmap.ACCESS_READ)\n            # only set the event when mmap is successful\n            self.ready.set()\n            return True\n        finally:\n            if tmpfile is not None:\n                # If the tempfile hasn't been renamed on disk yet, try to delete it.\n                try:\n                    os.remove(tmpfile)\n                except:\n                    pass\n\n    def size(self):\n        if self.content is None:\n            if self.linger is not None:\n                # If it is still lingering (object is still accessible\n                # through the weak reference) it is still taking up\n                # space.\n                content = self.linger()\n                if content is not None:\n                    return len(content)\n            return 0\n        else:\n            return len(self.content)\n\n    def evict(self):\n        if not self.content:\n            return\n\n        # The mmap region might be in use when we decided to evict\n        # it.  This can happen if the cache is too small.\n        #\n        # If we call close() now, it'll throw an error if\n        # something tries to access it.\n        #\n        # However, we don't need to explicitly call mmap.close()\n        #\n        # I confirmed in mmapmodule.c that that both close\n        # and deallocate do the same thing:\n        #\n        # a) close the file descriptor\n        # b) unmap the memory range\n        #\n        # So we can forget it in the cache and delete the file on\n        # disk, and it will tear it down after any other\n        # lingering Python references to the mapped memory are\n        # gone.\n\n        blockdir = os.path.join(self.cachedir, self.locator[0:3])\n        final = os.path.join(blockdir, self.locator) + cacheblock_suffix\n        try:\n            fcntl.flock(self.filehandle, fcntl.LOCK_UN)\n\n            # try to get an exclusive lock, this ensures other\n            # processes are not using the block.  It is\n            # nonblocking and will throw an exception if we\n            # can't get it, which is fine because that means\n            # we just won't try to delete it.\n            #\n            # I should note here, the file locking is not\n            # strictly necessary, we could just remove it and\n            # the kernel would ensure that the underlying\n            # inode remains available as long as other\n            # processes still have the file open.  However, if\n            # you have multiple processes sharing the cache\n            # and deleting each other's files, you'll end up\n            # with a bunch of ghost files that don't show up\n            # in the file system but are still taking up\n            # space, which isn't particularly user friendly.\n            # The locking strategy ensures that cache blocks\n            # in use remain visible.\n            #\n            fcntl.flock(self.filehandle, fcntl.LOCK_EX | fcntl.LOCK_NB)\n\n            os.remove(final)\n            return True\n        except OSError:\n            pass\n        finally:\n            self.filehandle = None\n            self.content = None\n\n    @staticmethod\n    def get_from_disk(locator, cachedir):\n        blockdir = os.path.join(cachedir, locator[0:3])\n        final = os.path.join(blockdir, locator) + cacheblock_suffix\n\n        try:\n            filehandle = open(final, \"rb\")\n\n            # aquire a shared lock, this tells other processes that\n            # we're using this block and to please not delete it.\n            fcntl.flock(filehandle, fcntl.LOCK_SH)\n\n            content = mmap.mmap(filehandle.fileno(), 0, access=mmap.ACCESS_READ)\n            dc = DiskCacheSlot(locator, cachedir)\n            dc.filehandle = filehandle\n            dc.content = content\n            dc.ready.set()\n            return dc\n        except FileNotFoundError:\n            pass\n        except Exception as e:\n            traceback.print_exc()\n\n        return None\n\n    @staticmethod\n    def cache_usage(cachedir):\n        usage = 0\n        for root, dirs, files in os.walk(cachedir):\n            for name in files:\n                if not name.endswith(cacheblock_suffix):\n                    continue\n\n                blockpath = os.path.join(root, name)\n                res = os.stat(blockpath)\n                usage += res.st_size\n        return usage\n\n\n    @staticmethod\n    def init_cache(cachedir, maxslots):\n        #\n        # First check the disk cache works at all by creating a 1 byte cache entry\n        #\n        checkexists = DiskCacheSlot.get_from_disk('0cc175b9c0f1b6a831c399e269772661', cachedir)\n        ds = DiskCacheSlot('0cc175b9c0f1b6a831c399e269772661', cachedir)\n        ds.set(b'a')\n        if checkexists is None:\n            # Don't keep the test entry around unless it existed beforehand.\n            ds.evict()\n\n        # map in all the files in the cache directory, up to max slots.\n        # after max slots, try to delete the excess blocks.\n        #\n        # this gives the calling process ownership of all the blocks\n\n        blocks = []\n        for root, dirs, files in os.walk(cachedir):\n            for name in files:\n                if not name.endswith(cacheblock_suffix):\n                    continue\n\n                blockpath = os.path.join(root, name)\n                res = os.stat(blockpath)\n\n                if len(name) == (32+len(cacheblock_suffix)) and not name.startswith(\"tmp\"):\n                    blocks.append((name[0:32], res.st_atime))\n                elif name.startswith(\"tmp\") and ((time.time() - res.st_mtime) > 60):\n                    # found a temporary file more than 1 minute old,\n                    # try to delete it.\n                    try:\n                        os.remove(blockpath)\n                    except:\n                        pass\n\n        # sort by access time (atime), going from most recently\n        # accessed (highest timestamp) to least recently accessed\n        # (lowest timestamp).\n        blocks.sort(key=lambda x: x[1], reverse=True)\n\n        # Map in all the files we found, up to maxslots, if we exceed\n        # maxslots, start throwing things out.\n        cachelist: collections.OrderedDict = collections.OrderedDict()\n        for b in blocks:\n            got = DiskCacheSlot.get_from_disk(b[0], cachedir)\n            if got is None:\n                continue\n            if len(cachelist) < maxslots:\n                cachelist[got.locator] = got\n            else:\n                # we found more blocks than maxslots, try to\n                # throw it out of the cache.\n                got.evict()\n\n        return cachelist\n"
  },
  {
    "path": "sdk/python/arvados/_internal/downloaderbase.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport abc\n\nclass DownloaderBase(abc.ABC):\n    def __init__(self):\n        self.collection = None\n        self.target = None\n        self.name = None\n\n    @abc.abstractmethod\n    def head(self, url):\n        ...\n\n    @abc.abstractmethod\n    def download(self, url, headers):\n        ...\n"
  },
  {
    "path": "sdk/python/arvados/_internal/http_to_keep.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport datetime\nimport logging\nimport re\nimport time\nimport urllib.parse\n\nimport pycurl\n\nimport arvados\nimport arvados.collection\nimport arvados._internal\n\nfrom .downloaderbase import DownloaderBase\nfrom .pycurl import PyCurlHelper\nfrom .to_keep_util import (Response, url_to_keep, generic_check_cached_url)\n\nlogger = logging.getLogger('arvados.http_import')\n\nclass _Downloader(DownloaderBase, PyCurlHelper):\n    # Wait up to 60 seconds for connection\n    # How long it can be in \"low bandwidth\" state before it gives up\n    # Low bandwidth threshold is 32 KiB/s\n    DOWNLOADER_TIMEOUT = (60, 300, 32768)\n\n    def __init__(self, apiclient):\n        DownloaderBase.__init__(self)\n        PyCurlHelper.__init__(self, title_case_headers=True)\n        self.curl = pycurl.Curl()\n        self.curl.setopt(pycurl.NOSIGNAL, 1)\n        self.curl.setopt(pycurl.OPENSOCKETFUNCTION,\n                    lambda *args, **kwargs: self._socket_open(*args, **kwargs))\n        self.apiclient = apiclient\n\n    def head(self, url):\n        get_headers = {'Accept': 'application/octet-stream'}\n        self._headers = {}\n\n        self.curl.setopt(pycurl.URL, url.encode('utf-8'))\n        self.curl.setopt(pycurl.HTTPHEADER, [\n            '{}: {}'.format(k,v) for k,v in get_headers.items()])\n\n        self.curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)\n        self.curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())\n        self.curl.setopt(pycurl.NOBODY, True)\n        self.curl.setopt(pycurl.FOLLOWLOCATION, True)\n\n        self._setcurltimeouts(self.curl, self.DOWNLOADER_TIMEOUT, True)\n\n        try:\n            self.curl.perform()\n        except Exception as e:\n            raise arvados.errors.HttpError(0, str(e))\n        finally:\n            if self._socket:\n                self._socket.close()\n                self._socket = None\n\n        return Response(self.curl.getinfo(pycurl.RESPONSE_CODE), self._headers)\n\n    def download(self, url, headers):\n        self.count = 0\n        self.start = time.time()\n        self.checkpoint = self.start\n        self._headers = {}\n        self._first_chunk = True\n        self.collection = None\n        self.parsedurl = urllib.parse.urlparse(url)\n\n        get_headers = {'Accept': 'application/octet-stream'}\n        get_headers.update(headers)\n\n        self.curl.setopt(pycurl.URL, url.encode('utf-8'))\n        self.curl.setopt(pycurl.HTTPHEADER, [\n            '{}: {}'.format(k,v) for k,v in get_headers.items()])\n\n        self.curl.setopt(pycurl.WRITEFUNCTION, self.body_write)\n        self.curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)\n\n        self.curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())\n        self.curl.setopt(pycurl.HTTPGET, True)\n        self.curl.setopt(pycurl.FOLLOWLOCATION, True)\n\n        self._setcurltimeouts(self.curl, self.DOWNLOADER_TIMEOUT, False)\n\n        try:\n            self.curl.perform()\n        except Exception as e:\n            raise arvados.errors.HttpError(0, str(e))\n        finally:\n            if self._socket:\n                self._socket.close()\n                self._socket = None\n\n        return Response(self.curl.getinfo(pycurl.RESPONSE_CODE), self._headers)\n\n    def headers_received(self):\n        self.collection = arvados.collection.Collection(api_client=self.apiclient)\n\n        if \"Content-Length\" in self._headers:\n            self.contentlength = int(self._headers[\"Content-Length\"])\n            logger.info(\"File size is %s bytes\", self.contentlength)\n        else:\n            self.contentlength = None\n\n        if self._headers.get(\"Content-Disposition\"):\n            grp = re.search(r'filename=(\"((\\\"|[^\"])+)\"|([^][()<>@,;:\\\"/?={} ]+))',\n                            self._headers[\"Content-Disposition\"])\n            if grp.group(2):\n                self.name = grp.group(2)\n            else:\n                self.name = grp.group(4)\n        else:\n            self.name = self.parsedurl.path.split(\"/\")[-1]\n\n        # Can't call curl.getinfo(pycurl.RESPONSE_CODE) until\n        # perform() is done but we need to know the status before that\n        # so we have to parse the status line ourselves.\n        mt = re.match(r'^HTTP\\/(\\d(\\.\\d)?) ([1-5]\\d\\d) ([^\\r\\n\\x00-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]*)\\r\\n$', self._headers[\"x-status-line\"])\n        code = int(mt.group(3))\n\n        if not self.name:\n            logger.error(\"Cannot determine filename from URL or headers\")\n            return\n\n        if code == 200:\n            self.target = self.collection.open(self.name, \"wb\")\n\n    def body_write(self, chunk):\n        if self._first_chunk:\n            self.headers_received()\n            self._first_chunk = False\n\n        self.count += len(chunk)\n\n        if self.target is None:\n            # \"If this number is not equal to the size of the byte\n            # string, this signifies an error and libcurl will abort\n            # the request.\"\n            return 0\n\n        self.target.write(chunk)\n        loopnow = time.time()\n        if (loopnow - self.checkpoint) < 20:\n            return\n\n        bps = self.count / (loopnow - self.start)\n        if self.contentlength is not None:\n            logger.info(\"%2.1f%% complete, %6.2f MiB/s, %1.0f seconds left\",\n                        ((self.count * 100) / self.contentlength),\n                        (bps / (1024.0*1024.0)),\n                        ((self.contentlength-self.count) // bps))\n        else:\n            logger.info(\"%d downloaded, %6.2f MiB/s\", self.count, (bps / (1024.0*1024.0)))\n        self.checkpoint = loopnow\n\n\ndef check_cached_url(api, project_uuid, url, etags,\n                     utcnow=datetime.datetime.utcnow,\n                     varying_url_params=\"\",\n                     prefer_cached_downloads=False):\n    return generic_check_cached_url(api, _Downloader(api),\n                            project_uuid, url, etags,\n                            utcnow=utcnow,\n                            varying_url_params=varying_url_params,\n                            prefer_cached_downloads=prefer_cached_downloads)\n\n\ndef http_to_keep(api, project_uuid, url,\n                 utcnow=datetime.datetime.utcnow, varying_url_params=\"\",\n                 prefer_cached_downloads=False):\n    \"\"\"Download a file over HTTP and upload it to keep, with HTTP headers as metadata.\n\n    Before downloading the URL, checks to see if the URL already\n    exists in Keep and applies HTTP caching policy, the\n    varying_url_params and prefer_cached_downloads flags in order to\n    decide whether to use the version in Keep or re-download it.\n    \"\"\"\n\n    return url_to_keep(api, _Downloader(api),\n                       project_uuid, url,\n                       utcnow,\n                       varying_url_params,\n                       prefer_cached_downloads)\n"
  },
  {
    "path": "sdk/python/arvados/_internal/pycurl.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport collections\nimport socket\nimport pycurl\nimport math\n\nclass PyCurlHelper:\n    # Default Keep server connection timeout:  2 seconds\n    # Default Keep server read timeout:       256 seconds\n    # Default Keep server bandwidth minimum:  32768 bytes per second\n    # Default Keep proxy connection timeout:  20 seconds\n    # Default Keep proxy read timeout:        256 seconds\n    # Default Keep proxy bandwidth minimum:   32768 bytes per second\n    DEFAULT_TIMEOUT = (2, 256, 32768)\n    DEFAULT_PROXY_TIMEOUT = (20, 256, 32768)\n\n    def __init__(self, title_case_headers=False):\n        self._socket = None\n        self.title_case_headers = title_case_headers\n\n    def _socket_open(self, *args, **kwargs):\n        if len(args) + len(kwargs) == 2:\n            return self._socket_open_pycurl_7_21_5(*args, **kwargs)\n        else:\n            return self._socket_open_pycurl_7_19_3(*args, **kwargs)\n\n    def _socket_open_pycurl_7_19_3(self, family, socktype, protocol, address=None):\n        return self._socket_open_pycurl_7_21_5(\n            purpose=None,\n            address=collections.namedtuple(\n                'Address', ['family', 'socktype', 'protocol', 'addr'],\n            )(family, socktype, protocol, address))\n\n    def _socket_open_pycurl_7_21_5(self, purpose, address):\n        \"\"\"Because pycurl doesn't have CURLOPT_TCP_KEEPALIVE\"\"\"\n        s = socket.socket(address.family, address.socktype, address.protocol)\n        s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n        # Will throw invalid protocol error on mac. This test prevents that.\n        if hasattr(socket, 'TCP_KEEPIDLE'):\n            s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 75)\n        s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 75)\n        self._socket = s\n        return s\n\n    def _setcurltimeouts(self, curl, timeouts, ignore_bandwidth=False):\n        if not timeouts:\n            return\n        elif isinstance(timeouts, tuple):\n            if len(timeouts) == 2:\n                conn_t, xfer_t = timeouts\n                bandwidth_bps = self.DEFAULT_TIMEOUT[2]\n            else:\n                conn_t, xfer_t, bandwidth_bps = timeouts\n        else:\n            conn_t, xfer_t = (timeouts, timeouts)\n            bandwidth_bps = self.DEFAULT_TIMEOUT[2]\n        curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(conn_t*1000))\n        if not ignore_bandwidth:\n            curl.setopt(pycurl.LOW_SPEED_TIME, int(math.ceil(xfer_t)))\n            curl.setopt(pycurl.LOW_SPEED_LIMIT, int(math.ceil(bandwidth_bps)))\n\n    def _headerfunction(self, header_line):\n        if isinstance(header_line, bytes):\n            header_line = header_line.decode('iso-8859-1')\n        if ':' in header_line:\n            name, value = header_line.split(':', 1)\n            if self.title_case_headers:\n                name = name.strip().title()\n            else:\n                name = name.strip().lower()\n            value = value.strip()\n        elif self._headers:\n            name = self._lastheadername\n            value = self._headers[name] + ' ' + header_line.strip()\n        elif header_line.startswith('HTTP/'):\n            name = 'x-status-line'\n            value = header_line\n        else:\n            _logger.error(\"Unexpected header line: %s\", header_line)\n            return\n        self._lastheadername = name\n        self._headers[name] = value\n        # Returning None implies all bytes were written\n"
  },
  {
    "path": "sdk/python/arvados/_internal/report_template.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ntry:\n    from html import escape\nexcept ImportError:\n    from cgi import escape\n\nimport json\nfrom typing import ItemsView\n\nclass ReportTemplate(object):\n    \"\"\"Base class for HTML reports produced by Arvados reporting tools.\n\n    Used by crunchstat-summary and cluster-activity.\n\n    \"\"\"\n\n    STYLE = '''\n    <style>\n        body {\n          background: #fafafa;\n          font-family: \"Roboto\", \"Helvetica\", \"Arial\", sans-serif;\n          font-size: 0.875rem;\n          color: rgba(0, 0, 0, 0.87);\n          font-weight: 400;\n        }\n        .card {\n          background: #ffffff;\n          box-shadow: 0px 1px 5px 0px rgba(0,0,0,0.2),0px 2px 2px 0px rgba(0,0,0,0.14),0px 3px 1px -2px rgba(0,0,0,0.12);\n          border-radius: 4px;\n          margin: 20px;\n        }\n        .content {\n          padding: 2px 16px 8px 16px;\n        }\n        table {\n          border-spacing: 0px;\n        }\n        tr {\n          height: 36px;\n          text-align: left;\n        }\n        th {\n          padding-right: 4em;\n          border-top: 1px solid rgba(224, 224, 224, 1);\n        }\n        td {\n          padding-right: 2em;\n          border-top: 1px solid rgba(224, 224, 224, 1);\n        }\n        #chart {\n          margin-left: -20px;\n        }\n    </style>\n    '''\n\n    def __init__(self, label):\n        self.label = label\n        self.cards = []\n\n    def cardlist(self, items):\n        if not isinstance(items, list):\n            items = [items]\n\n        return \"\\n\".join(\n                \"\"\"\n                <div class=\"card\">\n                  <div class=\"content\">\n{}\n                  </div>\n                </div>\"\"\".format(i) for i in items)\n\n    def html(self):\n        return '''<!doctype html>\n<html>\n  <head>\n    <title>{label}</title>\n\n{js}\n\n{style}\n\n{header}\n\n  </head>\n\n  <body>\n  <div class=\"card\">\n    <div class=\"content\">\n      <h1>{label}</h1>\n    </div>\n  </div>\n\n{cards}\n\n  </body>\n</html>\n        '''.format(label=escape(self.label),\n                   js=self.js(),\n                   style=self.style(),\n                   header=self.headHTML(),\n                   cards=self.cardlist(self.cards))\n\n    def js(self):\n        return ''\n\n    def style(self):\n        return self.STYLE\n\n    def headHTML(self):\n        \"\"\"Return extra HTML text to include in HEAD.\"\"\"\n        return ''\n"
  },
  {
    "path": "sdk/python/arvados/_internal/s3_to_keep.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport datetime\nimport logging\nimport time\nimport urllib.parse\n\nimport arvados\nimport arvados.collection\n\nimport boto3\nimport boto3.s3.transfer\n\nfrom .downloaderbase import DownloaderBase\nfrom .to_keep_util import (Response, url_to_keep, generic_check_cached_url)\n\nlogger = logging.getLogger('arvados.s3_import')\n\n\nclass _Downloader(DownloaderBase):\n    def __init__(self, apiclient, botoclient):\n        super().__init__()\n        self.apiclient = apiclient\n        self.botoclient = botoclient\n        self.headresult = None\n\n    def head(self, url):\n        self.parsedurl = urllib.parse.urlparse(url)\n\n        extraArgs = {}\n        versionId = urllib.parse.parse_qs(self.parsedurl.query).get(\"versionId\", [False])[0]\n        if versionId:\n            extraArgs[\"VersionId\"] = versionId\n            extraArgs[\"ResponseCacheControl\"] = \"immutable\"\n        response = self.botoclient.head_object(\n            Bucket=self.parsedurl.netloc,\n            Key=self.parsedurl.path.lstrip('/'),\n            **extraArgs\n        )\n        return Response(response['ResponseMetadata']['HTTPStatusCode'],\n                        {k.title(): v for k,v in response['ResponseMetadata']['HTTPHeaders'].items()})\n\n    def download(self, url, headers):\n        self.collection = arvados.collection.Collection(api_client=self.apiclient)\n\n        self.count = 0\n        self.start = time.time()\n        self.checkpoint = self.start\n        self.contentlength = None\n        self.target = None\n\n        self.parsedurl = urllib.parse.urlparse(url)\n        extraArgs = {}\n        versionId = urllib.parse.parse_qs(self.parsedurl.query).get(\"versionId\", [None])[0]\n        if versionId:\n            extraArgs[\"VersionId\"] = versionId\n\n        self.name = self.parsedurl.path.split(\"/\")[-1]\n        self.target = self.collection.open(self.name, \"wb\")\n\n        objectMeta = self.head(url)\n        self.contentlength = int(objectMeta.headers[\"Content-Length\"])\n\n        self.botoclient.download_fileobj(\n            Bucket=self.parsedurl.netloc,\n            Key=self.parsedurl.path.lstrip('/'),\n            Fileobj=self.target,\n            ExtraArgs=extraArgs,\n            Callback=self.data_received,\n            Config=boto3.s3.transfer.TransferConfig(\n                multipart_threshold=64*1024*1024,\n                multipart_chunksize=64*1024*1024,\n                use_threads=False,\n            ))\n\n        return objectMeta\n\n    def data_received(self, count):\n        self.count += count\n\n        loopnow = time.time()\n        if (loopnow - self.checkpoint) < 20:\n            return\n\n        bps = self.count / (loopnow - self.start)\n        if self.contentlength is not None:\n            logger.info(\"%2.1f%% complete, %6.2f MiB/s, %1.0f seconds left\",\n                        ((self.count * 100) / self.contentlength),\n                        (bps / (1024.0*1024.0)),\n                        ((self.contentlength-self.count) // bps))\n        else:\n            logger.info(\"%d downloaded, %6.2f MiB/s\", self.count, (bps / (1024.0*1024.0)))\n        self.checkpoint = loopnow\n\ndef get_botoclient(botosession, unsigned_requests):\n    if unsigned_requests:\n        from botocore import UNSIGNED\n        from botocore.config import Config\n        return botosession.client('s3', config=Config(signature_version=UNSIGNED))\n    else:\n        return botosession.client('s3')\n\n\ndef check_cached_url(api, botosession, project_uuid, url, etags,\n                     utcnow=datetime.datetime.utcnow,\n                     prefer_cached_downloads=False,\n                     unsigned_requests=False):\n\n    return generic_check_cached_url(api, _Downloader(api, get_botoclient(botosession, unsigned_requests)),\n                            project_uuid, url, etags,\n                            utcnow=utcnow,\n                            prefer_cached_downloads=prefer_cached_downloads)\n\ndef s3_to_keep(api, botosession, project_uuid, url,\n               utcnow=datetime.datetime.utcnow,\n               prefer_cached_downloads=False,\n               unsigned_requests=False):\n    \"\"\"Download a file over S3 and upload it to keep, with HTTP headers as metadata.\n\n    Because simple S3 object fetches are just HTTP underneath, we can\n    reuse most of the HTTP downloading infrastructure.\n    \"\"\"\n\n    return url_to_keep(api, _Downloader(api, get_botoclient(botosession, unsigned_requests)),\n                       project_uuid, url,\n                       utcnow=utcnow,\n                       prefer_cached_downloads=prefer_cached_downloads)\n"
  },
  {
    "path": "sdk/python/arvados/_internal/streams.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport logging\nimport re\n\nfrom .. import config\n\n_logger = logging.getLogger('arvados.streams')\n\n# Log level below 'debug' !\nRANGES_SPAM = 9\n\nclass Range:\n    __slots__ = (\"locator\", \"range_start\", \"range_size\", \"segment_offset\")\n\n    def __init__(self, locator, range_start, range_size, segment_offset=0):\n        self.locator = locator\n        self.range_start = range_start\n        self.range_size = range_size\n        self.segment_offset = segment_offset\n\n    def __repr__(self):\n        return \"Range(%r, %r, %r, %r)\" % (self.locator, self.range_start, self.range_size, self.segment_offset)\n\n    def __eq__(self, other):\n        return (self.locator == other.locator and\n                self.range_start == other.range_start and\n                self.range_size == other.range_size and\n                self.segment_offset == other.segment_offset)\n\n\nclass LocatorAndRange:\n    __slots__ = (\"locator\", \"block_size\", \"segment_offset\", \"segment_size\")\n\n    def __init__(self, locator, block_size, segment_offset, segment_size):\n        self.locator = locator\n        self.block_size = block_size\n        self.segment_offset = segment_offset\n        self.segment_size = segment_size\n\n    def __eq__(self, other):\n        return  (self.locator == other.locator and\n                 self.block_size == other.block_size and\n                 self.segment_offset == other.segment_offset and\n                 self.segment_size == other.segment_size)\n\n    def __repr__(self):\n        return \"LocatorAndRange(%r, %r, %r, %r)\" % (self.locator, self.block_size, self.segment_offset, self.segment_size)\n\n\ndef first_block(data_locators, range_start):\n    block_start = 0\n\n    # range_start/block_start is the inclusive lower bound\n    # range_end/block_end is the exclusive upper bound\n\n    hi = len(data_locators)\n    lo = 0\n    i = (hi + lo) // 2\n    block_size = data_locators[i].range_size\n    block_start = data_locators[i].range_start\n    block_end = block_start + block_size\n\n    # perform a binary search for the first block\n    # assumes that all of the blocks are contiguous, so range_start is guaranteed\n    # to either fall into the range of a block or be outside the block range entirely\n    while not (range_start >= block_start and range_start < block_end):\n        if lo == i:\n            # must be out of range, fail\n            return None\n        if range_start > block_start:\n            lo = i\n        else:\n            hi = i\n        i = (hi + lo) // 2\n        block_size = data_locators[i].range_size\n        block_start = data_locators[i].range_start\n        block_end = block_start + block_size\n\n    return i\n\ndef locators_and_ranges(data_locators, range_start, range_size, limit=None):\n    \"\"\"Get blocks that are covered by a range.\n\n    Returns a list of LocatorAndRange objects.\n\n    :data_locators:\n      list of Range objects, assumes that blocks are in order and contiguous\n\n    :range_start:\n      start of range\n\n    :range_size:\n      size of range\n\n    :limit:\n      Maximum segments to return, default None (unlimited).  Will truncate the\n      result if there are more segments needed to cover the range than the\n      limit.\n\n    \"\"\"\n    if range_size == 0:\n        return []\n    resp = []\n    range_end = range_start + range_size\n\n    i = first_block(data_locators, range_start)\n    if i is None:\n        return []\n\n    # We should always start at the first segment due to the binary\n    # search.\n    while i < len(data_locators) and len(resp) != limit:\n        dl = data_locators[i]\n        block_start = dl.range_start\n        block_size = dl.range_size\n        block_end = block_start + block_size\n        _logger.log(RANGES_SPAM,\n            \"L&R %s range_start %s block_start %s range_end %s block_end %s\",\n            dl.locator, range_start, block_start, range_end, block_end)\n        if range_end <= block_start:\n            # range ends before this block starts, so don't look at any more locators\n            break\n\n        if range_start >= block_start and range_end <= block_end:\n            # range starts and ends in this block\n            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset + (range_start - block_start), range_size))\n        elif range_start >= block_start and range_end > block_end:\n            # range starts in this block\n            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset + (range_start - block_start), block_end - range_start))\n        elif range_start < block_start and range_end > block_end:\n            # range starts in a previous block and extends to further blocks\n            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset, block_size))\n        elif range_start < block_start and range_end <= block_end:\n            # range starts in a previous block and ends in this block\n            resp.append(LocatorAndRange(dl.locator, block_size, dl.segment_offset, range_end - block_start))\n        block_start = block_end\n        i += 1\n    return resp\n\ndef replace_range(data_locators, new_range_start, new_range_size, new_locator, new_segment_offset):\n    \"\"\"\n    Replace a file segment range with a new segment.\n\n    NOTE::\n    data_locators will be updated in place\n\n    :data_locators:\n      list of Range objects, assumes that segments are in order and contiguous\n\n    :new_range_start:\n      start of range to replace in data_locators\n\n    :new_range_size:\n      size of range to replace in data_locators\n\n    :new_locator:\n      locator for new segment to be inserted\n\n    :new_segment_offset:\n      segment offset within the locator\n\n    \"\"\"\n    if new_range_size == 0:\n        return\n\n    new_range_end = new_range_start + new_range_size\n\n    if len(data_locators) == 0:\n        data_locators.append(Range(new_locator, new_range_start, new_range_size, new_segment_offset))\n        return\n\n    last = data_locators[-1]\n    if (last.range_start+last.range_size) == new_range_start:\n        if last.locator == new_locator and (last.segment_offset+last.range_size) == new_segment_offset:\n            # extend last segment\n            last.range_size += new_range_size\n        else:\n            data_locators.append(Range(new_locator, new_range_start, new_range_size, new_segment_offset))\n        return\n\n    i = first_block(data_locators, new_range_start)\n    if i is None:\n        return\n\n    # We should always start at the first segment due to the binary\n    # search.\n    while i < len(data_locators):\n        dl = data_locators[i]\n        old_segment_start = dl.range_start\n        old_segment_end = old_segment_start + dl.range_size\n        _logger.log(RANGES_SPAM,\n            \"RR %s range_start %s segment_start %s range_end %s segment_end %s\",\n            dl, new_range_start, old_segment_start, new_range_end,\n            old_segment_end)\n        if new_range_end <= old_segment_start:\n            # range ends before this segment starts, so don't look at any more locators\n            break\n\n        if old_segment_start <= new_range_start and new_range_end <= old_segment_end:\n            # new range starts and ends in old segment\n            # split segment into up to 3 pieces\n            if (new_range_start-old_segment_start) > 0:\n                data_locators[i] = Range(dl.locator, old_segment_start, (new_range_start-old_segment_start), dl.segment_offset)\n                data_locators.insert(i+1, Range(new_locator, new_range_start, new_range_size, new_segment_offset))\n            else:\n                data_locators[i] = Range(new_locator, new_range_start, new_range_size, new_segment_offset)\n                i -= 1\n            if (old_segment_end-new_range_end) > 0:\n                data_locators.insert(i+2, Range(dl.locator, new_range_end, (old_segment_end-new_range_end), dl.segment_offset + (new_range_start-old_segment_start) + new_range_size))\n            return\n        elif old_segment_start <= new_range_start and new_range_end > old_segment_end:\n            # range starts in this segment\n            # split segment into 2 pieces\n            data_locators[i] = Range(dl.locator, old_segment_start, (new_range_start-old_segment_start), dl.segment_offset)\n            data_locators.insert(i+1, Range(new_locator, new_range_start, new_range_size, new_segment_offset))\n            i += 1\n        elif new_range_start < old_segment_start and new_range_end >= old_segment_end:\n            # range starts in a previous segment and extends to further segments\n            # delete this segment\n            del data_locators[i]\n            i -= 1\n        elif new_range_start < old_segment_start and new_range_end < old_segment_end:\n            # range starts in a previous segment and ends in this segment\n            # move the starting point of this segment up, and shrink it.\n            data_locators[i] = Range(dl.locator, new_range_end, (old_segment_end-new_range_end), dl.segment_offset + (new_range_end-old_segment_start))\n            return\n        i += 1\n\ndef escape(path):\n    return re.sub(r'[\\\\:\\000-\\040]', lambda m: \"\\\\%03o\" % ord(m.group(0)), path)\n\ndef normalize_stream(stream_name, stream):\n    \"\"\"Take manifest stream and return a list of tokens in normalized format.\n\n    :stream_name:\n      The name of the stream.\n\n    :stream:\n      A dict mapping each filename to a list of `_range.LocatorAndRange` objects.\n\n    \"\"\"\n\n    stream_name = escape(stream_name)\n    stream_tokens = [stream_name]\n    sortedfiles = list(stream.keys())\n    sortedfiles.sort()\n\n    blocks = {}\n    streamoffset = 0\n    # Go through each file and add each referenced block exactly once.\n    for streamfile in sortedfiles:\n        for segment in stream[streamfile]:\n            if segment.locator not in blocks:\n                stream_tokens.append(segment.locator)\n                blocks[segment.locator] = streamoffset\n                streamoffset += segment.block_size\n\n    # Add the empty block if the stream is otherwise empty.\n    if len(stream_tokens) == 1:\n        stream_tokens.append(config.EMPTY_BLOCK_LOCATOR)\n\n    for streamfile in sortedfiles:\n        # Add in file segments\n        current_span = None\n        fout = escape(streamfile)\n        for segment in stream[streamfile]:\n            # Collapse adjacent segments\n            streamoffset = blocks[segment.locator] + segment.segment_offset\n            if current_span is None:\n                current_span = [streamoffset, streamoffset + segment.segment_size]\n            else:\n                if streamoffset == current_span[1]:\n                    current_span[1] += segment.segment_size\n                else:\n                    stream_tokens.append(u\"{0}:{1}:{2}\".format(current_span[0], current_span[1] - current_span[0], fout))\n                    current_span = [streamoffset, streamoffset + segment.segment_size]\n\n        if current_span is not None:\n            stream_tokens.append(u\"{0}:{1}:{2}\".format(current_span[0], current_span[1] - current_span[0], fout))\n\n        if not stream[streamfile]:\n            stream_tokens.append(u\"0:0:{0}\".format(fout))\n\n    return stream_tokens\n"
  },
  {
    "path": "sdk/python/arvados/_internal/to_keep_util.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport collections\nimport dataclasses\nimport typing\nimport logging\nimport email.utils\nimport calendar\nimport datetime\nimport re\nimport urllib.parse\nimport arvados\nimport arvados.collection\nimport arvados._internal\n\nlogger = logging.getLogger('arvados.file_import')\n\nCheckCacheResult = collections.namedtuple('CheckCacheResult',\n                                          ['portable_data_hash', 'file_name',\n                                           'uuid', 'clean_url', 'now'])\n\n@dataclasses.dataclass\nclass Response:\n    status_code: int\n    headers: typing.Mapping[str, str]\n\ndef _my_formatdate(dt):\n    return email.utils.formatdate(timeval=calendar.timegm(dt.timetuple()),\n                                  localtime=False, usegmt=True)\n\ndef _my_parsedate(text):\n    parsed = email.utils.parsedate_tz(text)\n    if parsed:\n        if parsed[9]:\n            # Adjust to UTC\n            return datetime.datetime(*parsed[:6]) + datetime.timedelta(seconds=parsed[9])\n        else:\n            # TZ is zero or missing, assume UTC.\n            return datetime.datetime(*parsed[:6])\n    else:\n        return datetime.datetime(1970, 1, 1)\n\ndef _fresh_cache(url, properties, now):\n    pr = properties[url]\n    expires = None\n\n    logger.debug(\"Checking cache freshness for %s using %s\", url, pr)\n\n    if \"Cache-Control\" in pr:\n        if re.match(r\"immutable\", pr[\"Cache-Control\"]):\n            return True\n\n        g = re.match(r\"(s-maxage|max-age)=(\\d+)\", pr[\"Cache-Control\"])\n        if g:\n            expires = _my_parsedate(pr[\"Date\"]) + datetime.timedelta(seconds=int(g.group(2)))\n\n    if expires is None and \"Expires\" in pr:\n        expires = _my_parsedate(pr[\"Expires\"])\n\n    if expires is None:\n        # Use a default cache time of 24 hours if upstream didn't set\n        # any cache headers, to reduce redundant downloads.\n        expires = _my_parsedate(pr[\"Date\"]) + datetime.timedelta(hours=24)\n\n    if not expires:\n        return False\n\n    return (now < expires)\n\ndef remember_headers(url, properties, headers, now):\n    properties.setdefault(url, {})\n    for h in (\"Cache-Control\", \"Etag\", \"Expires\", \"Date\", \"Content-Length\"):\n        if h in headers:\n            properties[url][h] = headers[h]\n    if \"Date\" not in headers:\n        properties[url][\"Date\"] = _my_formatdate(now)\n\ndef _changed(url, clean_url, properties, now, downloader):\n    req = downloader.head(url)\n\n    if req.status_code != 200:\n        # Sometimes endpoints are misconfigured and will deny HEAD but\n        # allow GET so instead of failing here, we'll try GET If-None-Match\n        return True\n\n    # previous version of this code used \"ETag\", now we are\n    # normalizing to \"Etag\", check for both.\n    etag = properties[url].get(\"Etag\") or properties[url].get(\"ETag\")\n\n    if url in properties:\n        del properties[url]\n    remember_headers(clean_url, properties, req.headers, now)\n\n    if \"Etag\" in req.headers and etag == req.headers[\"Etag\"]:\n        # Didn't change\n        return False\n\n    return True\n\n\ndef generic_check_cached_url(api, downloader, project_uuid, url, etags,\n                     utcnow=datetime.datetime.utcnow,\n                     varying_url_params=\"\",\n                     prefer_cached_downloads=False):\n\n    logger.info(\"Checking Keep for %s\", url)\n\n    varying_params = set(arvados._internal.parse_seq(varying_url_params))\n\n    parsed = urllib.parse.urlparse(url)\n    query = [q for q in urllib.parse.parse_qsl(parsed.query)\n             if q[0] not in varying_params]\n\n    clean_url = urllib.parse.urlunparse((parsed.scheme, parsed.netloc, parsed.path, parsed.params,\n                                         urllib.parse.urlencode(query, safe=\"/\"),  parsed.fragment))\n\n    r1 = api.collections().list(filters=[[\"properties\", \"exists\", url]]).execute()\n\n    if clean_url == url:\n        items = r1[\"items\"]\n    else:\n        r2 = api.collections().list(filters=[[\"properties\", \"exists\", clean_url]]).execute()\n        items = r1[\"items\"] + r2[\"items\"]\n\n    now = utcnow()\n\n    for item in items:\n        properties = item[\"properties\"]\n\n        if clean_url in properties:\n            cache_url = clean_url\n        elif url in properties:\n            cache_url = url\n        else:\n            raise Exception(\"Shouldn't happen, got an API result for %s that doesn't have the URL in properties\" % item[\"uuid\"])\n\n        if prefer_cached_downloads or _fresh_cache(cache_url, properties, now):\n            # HTTP caching rules say we should use the cache\n            cr = arvados.collection.CollectionReader(item[\"portable_data_hash\"], api_client=api)\n            return CheckCacheResult(item[\"portable_data_hash\"], next(iter(cr.keys())),\n                                    item[\"uuid\"], clean_url, now)\n\n        if not _changed(cache_url, clean_url, properties, now, downloader):\n            # Etag didn't change, same content, just update headers\n            api.collections().update(uuid=item[\"uuid\"], body={\"collection\":{\"properties\": properties}}).execute()\n            cr = arvados.collection.CollectionReader(item[\"portable_data_hash\"], api_client=api)\n            return CheckCacheResult(item[\"portable_data_hash\"], next(iter(cr.keys())),\n                                    item[\"uuid\"], clean_url, now)\n\n        for etagstr in (\"Etag\", \"ETag\"):\n            if etagstr in properties[cache_url] and len(properties[cache_url][etagstr]) > 2:\n                etags[properties[cache_url][etagstr]] = item\n\n    logger.debug(\"Found ETag values %s\", etags)\n\n    return CheckCacheResult(None, None, None, clean_url, now)\n\ndef etag_quote(etag):\n    # if it already has leading and trailing quotes, do nothing\n    if etag[0] == '\"' and etag[-1] == '\"':\n        return etag\n    else:\n        # Add quotes.\n        return '\"' + etag + '\"'\n\ndef url_to_keep(api, downloader, project_uuid, url,\n                 utcnow=datetime.datetime.utcnow, varying_url_params=\"\",\n                 prefer_cached_downloads=False):\n    \"\"\"Download a from a HTTP-like protocol and upload it to keep, with HTTP headers as metadata.\n\n    Before downloading the URL, checks to see if the URL already\n    exists in Keep and applies HTTP caching policy, the\n    varying_url_params and prefer_cached_downloads flags in order to\n    decide whether to use the version in Keep or re-download it.\n\n    This\n    \"\"\"\n\n    etags = {}\n    cache_result = generic_check_cached_url(api, downloader,\n                                    project_uuid, url, etags,\n                                    utcnow, varying_url_params,\n                                    prefer_cached_downloads)\n\n    if cache_result.portable_data_hash is not None:\n        return cache_result\n\n    clean_url = cache_result.clean_url\n    now = cache_result.now\n\n    properties = {}\n    headers = {}\n    if etags:\n        headers['If-None-Match'] = ', '.join([etag_quote(k) for k,v in etags.items()])\n    logger.debug(\"Sending GET request with headers %s\", headers)\n\n    logger.info(\"Beginning download of %s\", url)\n\n    req = downloader.download(url, headers)\n\n    c = downloader.collection\n\n    if req.status_code not in (200, 304):\n        raise Exception(\"Failed to download '%s' got status %s \" % (url, req.status_code))\n\n    if downloader.target is not None:\n        downloader.target.close()\n\n    remember_headers(clean_url, properties, req.headers, now)\n\n    if req.status_code == 304 and \"Etag\" in req.headers and req.headers[\"Etag\"] in etags:\n        item = etags[req.headers[\"Etag\"]]\n        item[\"properties\"].update(properties)\n        api.collections().update(uuid=item[\"uuid\"], body={\"collection\":{\"properties\": item[\"properties\"]}}).execute()\n        cr = arvados.collection.CollectionReader(item[\"portable_data_hash\"], api_client=api)\n        return (item[\"portable_data_hash\"], list(cr.keys())[0], item[\"uuid\"], clean_url, now)\n\n    logger.info(\"Download complete\")\n\n    collectionname = \"Downloaded from %s\" % urllib.parse.quote(clean_url, safe='')\n\n    # max length - space to add a timestamp used by ensure_unique_name\n    max_name_len = 254 - 28\n\n    if len(collectionname) > max_name_len:\n        over = len(collectionname) - max_name_len\n        split = int(max_name_len/2)\n        collectionname = collectionname[0:split] + \"…\" + collectionname[split+over:]\n\n    c.save_new(name=collectionname, owner_uuid=project_uuid, ensure_unique_name=True)\n\n    api.collections().update(uuid=c.manifest_locator(), body={\"collection\":{\"properties\": properties}}).execute()\n\n    return CheckCacheResult(c.portable_data_hash(), downloader.name,\n                            c.manifest_locator(), clean_url, now)\n"
  },
  {
    "path": "sdk/python/arvados/api.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Arvados REST API client\n\nThis module provides classes and functions to construct an Arvados REST API\nclient. Most users will want to use one of these constructor functions, in\norder of preference:\n\n* `arvados.api.api` provides a high-level interface to construct a client from\n  either arguments or user configuration. You can call this module just like\n  a function as a shortcut for calling `arvados.api.api`.\n\n* `arvados.api.api_from_config` constructs a client from user configuration in\n  a dictionary.\n\n* `arvados.api.api_client` provides a lower-level interface to construct a\n  simpler client object that is not threadsafe.\n\nOther classes and functions is this module support creating and customizing\nthe client for specialized use-cases.\n\nThe methods on an Arvados REST API client are generated dynamically at\nruntime. The `arvados.api_resources` module documents those methods and\nreturn values for the current version of Arvados. It does not\nimplement anything so you don't need to import it, but it's a helpful\nreference to understand how to use the Arvados REST API client.\n\"\"\"\n\nimport collections\nimport errno\nimport hashlib\nimport httplib2\nimport json\nimport logging\nimport os\nimport pathlib\nimport re\nimport socket\nimport ssl\nimport sys\nimport tempfile\nimport threading\nimport time\nimport types\n\nfrom typing import (\n    Any,\n    Dict,\n    List,\n    Mapping,\n    Optional,\n)\n\nimport apiclient\nimport apiclient.http\nfrom apiclient import discovery as apiclient_discovery\nfrom apiclient import errors as apiclient_errors\nfrom . import config\nfrom . import errors\nfrom . import keep\nfrom . import retry\nfrom . import util\nfrom ._internal import basedirs\nfrom .logging import GoogleHTTPClientFilter, log_handler\n\n_logger = logging.getLogger('arvados.api')\n_googleapiclient_log_lock = threading.Lock()\n\nMAX_IDLE_CONNECTION_DURATION = 30\n\"\"\"\nNumber of seconds that API client HTTP connections should be allowed to idle\nin keepalive state before they are forced closed. Client code can adjust this\nconstant, and it will be used for all Arvados API clients constructed after\nthat point.\n\"\"\"\n\n# An unused HTTP 5xx status code to request a retry internally.\n# See _intercept_http_request. This should not be user-visible.\n_RETRY_4XX_STATUS = 545\n\nif sys.version_info >= (3,):\n    httplib2.SSLHandshakeError = None\n\ndef _reset_googleapiclient_logging() -> None:\n    \"\"\"Make the next API client constructor log googleapiclient retries\n\n    This function is for main-level CLI tools like arv-copy to ensure retries\n    are logged when constructing different API clients.\n    \"\"\"\n    try:\n        _googleapiclient_log_lock.release()\n    except RuntimeError:\n        pass\n\n_orig_retry_request = apiclient.http._retry_request\ndef _retry_request(http, num_retries, *args, **kwargs):\n    try:\n        num_retries = max(num_retries, http.num_retries)\n    except AttributeError:\n        # `http` client object does not have a `num_retries` attribute.\n        # It apparently hasn't gone through _patch_http_request, possibly\n        # because this isn't an Arvados API client. Pass through to\n        # avoid interfering with other Google API clients.\n        return _orig_retry_request(http, num_retries, *args, **kwargs)\n    response, body = _orig_retry_request(http, num_retries, *args, **kwargs)\n    # If _intercept_http_request ran out of retries for a 4xx response,\n    # restore the original status code.\n    if response.status == _RETRY_4XX_STATUS:\n        response.status = int(response['status'])\n    return (response, body)\napiclient.http._retry_request = _retry_request\n\ndef _intercept_http_request(self, uri, method=\"GET\", headers={}, **kwargs):\n    if not headers.get('X-Request-Id'):\n        headers['X-Request-Id'] = self._request_id()\n    try:\n        if (self.max_request_size and\n            kwargs.get('body') and\n            self.max_request_size < len(kwargs['body'])):\n            raise apiclient_errors.MediaUploadSizeError(\"Request size %i bytes exceeds published limit of %i bytes\" % (len(kwargs['body']), self.max_request_size))\n\n        headers['Authorization'] = 'Bearer %s' % self.arvados_api_token\n\n        if (time.time() - self._last_request_time) > self._max_keepalive_idle:\n            # High probability of failure due to connection atrophy. Make\n            # sure this request [re]opens a new connection by closing and\n            # forgetting all cached connections first.\n            for conn in self.connections.values():\n                conn.close()\n            self.connections.clear()\n\n        self._last_request_time = time.time()\n        try:\n            response, body = self.orig_http_request(uri, method, headers=headers, **kwargs)\n        except ssl.CertificateError as e:\n            raise ssl.CertificateError(e.args[0], \"Could not connect to %s\\n%s\\nPossible causes: remote SSL/TLS certificate expired, or was issued by an untrusted certificate authority.\" % (uri, e)) from None\n        # googleapiclient only retries 403, 429, and 5xx status codes.\n        # If we got another 4xx status that we want to retry, convert it into\n        # 5xx so googleapiclient handles it the way we want.\n        if response.status in retry._HTTP_CAN_RETRY and response.status < 500:\n            response.status = _RETRY_4XX_STATUS\n        return (response, body)\n    except Exception as e:\n        # Prepend \"[request_id] \" to the error message, which we\n        # assume is the first string argument passed to the exception\n        # constructor.\n        for i in range(len(e.args or ())):\n            if type(e.args[i]) == type(\"\"):\n                e.args = e.args[:i] + (\"[{}] {}\".format(headers['X-Request-Id'], e.args[i]),) + e.args[i+1:]\n                raise type(e)(*e.args)\n        raise\n\ndef _patch_http_request(http, api_token, num_retries):\n    http.arvados_api_token = api_token\n    http.max_request_size = 0\n    http.num_retries = num_retries\n    http.orig_http_request = http.request\n    http.request = types.MethodType(_intercept_http_request, http)\n    http._last_request_time = 0\n    http._max_keepalive_idle = MAX_IDLE_CONNECTION_DURATION\n    http._request_id = util.new_request_id\n    return http\n\ndef _close_connections(self):\n    for conn in self._http.connections.values():\n        conn.close()\n\n# Monkey patch discovery._cast() so objects and arrays get serialized\n# with json.dumps() instead of str().\n_cast_orig = apiclient_discovery._cast\ndef _cast_objects_too(value, schema_type):\n    global _cast_orig\n    if (type(value) != type('') and\n        type(value) != type(b'') and\n        (schema_type == 'object' or schema_type == 'array')):\n        return json.dumps(value)\n    else:\n        return _cast_orig(value, schema_type)\napiclient_discovery._cast = _cast_objects_too\n\n# Convert apiclient's HttpErrors into our own API error subclass for better\n# error reporting.\n# Reassigning apiclient_errors.HttpError is not sufficient because most of the\n# apiclient submodules import the class into their own namespace.\ndef _new_http_error(cls, *args, **kwargs):\n    return super(apiclient_errors.HttpError, cls).__new__(\n        errors.ApiError, *args, **kwargs)\napiclient_errors.HttpError.__new__ = staticmethod(_new_http_error)\n\nclass ThreadSafeHTTPCache:\n    \"\"\"Thread-safe replacement for `httplib2.FileCache`\n\n    `arvados.api.http_cache` is the preferred way to construct this object.\n    Refer to that function's docstring for details.\n    \"\"\"\n\n    def __init__(self, path=None, max_age=None):\n        self._dir = path\n        if max_age is not None:\n            try:\n                self._clean(threshold=time.time() - max_age)\n            except:\n                pass\n\n    def _clean(self, threshold=0):\n        for ent in os.listdir(self._dir):\n            fnm = os.path.join(self._dir, ent)\n            if os.path.isdir(fnm) or not fnm.endswith('.tmp'):\n                continue\n            stat = os.lstat(fnm)\n            if stat.st_mtime < threshold:\n                try:\n                    os.unlink(fnm)\n                except OSError as err:\n                    if err.errno != errno.ENOENT:\n                        raise\n\n    def __str__(self):\n        return self._dir\n\n    def _filename(self, url):\n        return os.path.join(self._dir, hashlib.md5(url.encode('utf-8')).hexdigest()+'.tmp')\n\n    def get(self, url):\n        filename = self._filename(url)\n        try:\n            with open(filename, 'rb') as f:\n                return f.read()\n        except (IOError, OSError):\n            return None\n\n    def set(self, url, content):\n        try:\n            fd, tempname = tempfile.mkstemp(dir=self._dir)\n        except:\n            return None\n        try:\n            try:\n                f = os.fdopen(fd, 'wb')\n            except:\n                os.close(fd)\n                raise\n            try:\n                f.write(content)\n            finally:\n                f.close()\n            os.rename(tempname, self._filename(url))\n            tempname = None\n        finally:\n            if tempname:\n                os.unlink(tempname)\n\n    def delete(self, url):\n        try:\n            os.unlink(self._filename(url))\n        except OSError as err:\n            if err.errno != errno.ENOENT:\n                raise\n\n\nclass ThreadSafeAPIClient(object):\n    \"\"\"Thread-safe wrapper for an Arvados API client\n\n    This class takes all the arguments necessary to build a lower-level\n    Arvados API client `googleapiclient.discovery.Resource`, then\n    transparently builds and wraps a unique object per thread. This works\n    around the fact that the client's underlying HTTP client object is not\n    thread-safe.\n\n    Arguments:\n\n    * apiconfig: Mapping[str, str] | None --- A mapping with entries for\n      `ARVADOS_API_HOST`, `ARVADOS_API_TOKEN`, and optionally\n      `ARVADOS_API_HOST_INSECURE`. If not provided, uses\n      `arvados.config.settings` to get these parameters from user\n      configuration.  You can pass an empty mapping to build the client\n      solely from `api_params`.\n\n    * keep_params: Mapping[str, Any] --- Keyword arguments used to construct\n      an associated `arvados.keep.KeepClient`.\n\n    * api_params: Mapping[str, Any] --- Keyword arguments used to construct\n      each thread's API client. These have the same meaning as in the\n      `arvados.api.api` function.\n\n    * version: str | None --- A string naming the version of the Arvados API\n      to use. If not specified, the code will log a warning and fall back to\n      `'v1'`.\n    \"\"\"\n    def __init__(\n            self,\n            apiconfig: Optional[Mapping[str, str]]=None,\n            keep_params: Optional[Mapping[str, Any]]={},\n            api_params: Optional[Mapping[str, Any]]={},\n            version: Optional[str]=None,\n    ) -> None:\n        if apiconfig or apiconfig is None:\n            self._api_kwargs = api_kwargs_from_config(version, apiconfig, **api_params)\n        else:\n            self._api_kwargs = normalize_api_kwargs(version, **api_params)\n        self.api_token = self._api_kwargs['token']\n        self.request_id = self._api_kwargs.get('request_id')\n        self.local = threading.local()\n        self.keep = keep.KeepClient(api_client=self, **keep_params)\n\n    def localapi(self) -> 'googleapiclient.discovery.Resource':\n        try:\n            client = self.local.api\n        except AttributeError:\n            client = api_client(**self._api_kwargs)\n            client._http._request_id = lambda: self.request_id or util.new_request_id()\n            self.local.api = client\n        return client\n\n    def __getattr__(self, name: str) -> Any:\n        # Proxy nonexistent attributes to the thread-local API client.\n        return getattr(self.localapi(), name)\n\n\ndef http_cache(data_type: str) -> Optional[ThreadSafeHTTPCache]:\n    \"\"\"Set up an HTTP file cache\n\n    This function constructs and returns an `arvados.api.ThreadSafeHTTPCache`\n    backed by the filesystem under a cache directory from the environment, or\n    `None` if the directory cannot be set up. The return value can be passed to\n    `httplib2.Http` as the `cache` argument.\n\n    Arguments:\n\n    * data_type: str --- The name of the subdirectory\n      where data is cached.\n    \"\"\"\n    try:\n        path = basedirs.BaseDirectories('CACHE').storage_path(data_type)\n    except (OSError, RuntimeError):\n        return None\n    else:\n        return ThreadSafeHTTPCache(str(path), max_age=60*60*24*2)\n\ndef api_client(\n        version: str,\n        discoveryServiceUrl: str,\n        token: str,\n        *,\n        cache: bool=True,\n        http: Optional[httplib2.Http]=None,\n        insecure: bool=False,\n        num_retries: int=10,\n        request_id: Optional[str]=None,\n        timeout: int=5*60,\n        **kwargs: Any,\n) -> apiclient_discovery.Resource:\n    \"\"\"Build an Arvados API client\n\n    This function returns a `googleapiclient.discovery.Resource` object\n    constructed from the given arguments. This is a relatively low-level\n    interface that requires all the necessary inputs as arguments. Most\n    users will prefer to use `api` which can accept more flexible inputs.\n\n    Arguments:\n\n    * version: str --- A string naming the version of the Arvados API to use.\n\n    * discoveryServiceUrl: str --- The URL used to discover APIs passed\n      directly to `googleapiclient.discovery.build`.\n\n    * token: str --- The authentication token to send with each API call.\n\n    Keyword-only arguments:\n\n    * cache: bool --- If true, loads the API discovery document from, or\n      saves it to, a cache on disk.\n\n    * http: httplib2.Http | None --- The HTTP client object the API client\n      object will use to make requests.  If not provided, this function will\n      build its own to use. Either way, the object will be patched as part\n      of the build process.\n\n    * insecure: bool --- If true, ignore SSL certificate validation\n      errors. Default `False`.\n\n    * num_retries: int --- The number of times to retry each API request if\n      it encounters a temporary failure. Default 10.\n\n    * request_id: str | None --- Default `X-Request-Id` header value for\n      outgoing requests that don't already provide one. If `None` or\n      omitted, generate a random ID. When retrying failed requests, the same\n      ID is used on all attempts.\n\n    * timeout: int --- A timeout value for HTTP requests in seconds. Default\n      300 (5 minutes).\n\n    Additional keyword arguments will be passed directly to\n    `googleapiclient.discovery.build`.\n    \"\"\"\n    if http is None:\n        http = httplib2.Http(\n            ca_certs=util.ca_certs_path(),\n            cache=http_cache('discovery') if cache else None,\n            disable_ssl_certificate_validation=bool(insecure),\n        )\n    if http.timeout is None:\n        http.timeout = timeout\n    http = _patch_http_request(http, token, num_retries)\n\n    # The first time a client is instantiated, temporarily route\n    # googleapiclient.http retry logs if they're not already. These are\n    # important because temporary problems fetching the discovery document\n    # can cause clients to appear to hang early. This can be removed after\n    # we have a more general story for handling googleapiclient logs (#20521).\n    client_logger = logging.getLogger('googleapiclient.http')\n    # \"first time a client is instantiated\" = thread that acquires this lock\n    # It is only released explicitly by calling _reset_googleapiclient_logging.\n    # googleapiclient sets up its own NullHandler so we detect if logging is\n    # configured by looking for a real handler anywhere in the hierarchy.\n    client_logger_unconfigured = _googleapiclient_log_lock.acquire(blocking=False) and all(\n        isinstance(handler, logging.NullHandler)\n        for logger_name in ['', 'googleapiclient', 'googleapiclient.http']\n        for handler in logging.getLogger(logger_name).handlers\n    )\n    if client_logger_unconfigured:\n        client_level = client_logger.level\n        client_filter = GoogleHTTPClientFilter()\n        client_logger.addFilter(client_filter)\n        client_logger.addHandler(log_handler)\n        if logging.NOTSET < client_level < client_filter.retry_levelno:\n            client_logger.setLevel(client_level)\n        else:\n            client_logger.setLevel(client_filter.retry_levelno)\n    try:\n        svc = apiclient_discovery.build(\n            'arvados', version,\n            cache_discovery=False,\n            discoveryServiceUrl=discoveryServiceUrl,\n            http=http,\n            num_retries=num_retries,\n            **kwargs,\n        )\n    finally:\n        if client_logger_unconfigured:\n            client_logger.removeHandler(log_handler)\n            client_logger.removeFilter(client_filter)\n            client_logger.setLevel(client_level)\n    svc.api_token = token\n    svc.insecure = insecure\n    svc.request_id = request_id\n    svc.config = lambda: util.get_config_once(svc)\n    svc.vocabulary = lambda: util.get_vocabulary_once(svc)\n    svc.close_connections = types.MethodType(_close_connections, svc)\n    http.max_request_size = svc._rootDesc.get('maxRequestSize', 0)\n    http.cache = None\n    http._request_id = lambda: svc.request_id or util.new_request_id()\n    return svc\n\ndef normalize_api_kwargs(\n        version: Optional[str]=None,\n        discoveryServiceUrl: Optional[str]=None,\n        host: Optional[str]=None,\n        token: Optional[str]=None,\n        **kwargs: Any,\n) -> Dict[str, Any]:\n    \"\"\"Validate kwargs from `api` and build kwargs for `api_client`\n\n    This method takes high-level keyword arguments passed to the `api`\n    constructor and normalizes them into a new dictionary that can be passed\n    as keyword arguments to `api_client`. It raises `ValueError` if required\n    arguments are missing or conflict.\n\n    Arguments:\n\n    * version: str | None --- A string naming the version of the Arvados API\n      to use. If not specified, the code will log a warning and fall back to\n      'v1'.\n\n    * discoveryServiceUrl: str | None --- The URL used to discover APIs\n      passed directly to `googleapiclient.discovery.build`. It is an error\n      to pass both `discoveryServiceUrl` and `host`.\n\n    * host: str | None --- The hostname and optional port number of the\n      Arvados API server. Used to build `discoveryServiceUrl`. It is an\n      error to pass both `discoveryServiceUrl` and `host`.\n\n    * token: str --- The authentication token to send with each API call.\n\n    Additional keyword arguments will be included in the return value.\n    \"\"\"\n    if discoveryServiceUrl and host:\n        raise ValueError(\"both discoveryServiceUrl and host provided\")\n    elif discoveryServiceUrl:\n        url_src = \"discoveryServiceUrl\"\n    elif host:\n        url_src = \"host argument\"\n        discoveryServiceUrl = 'https://%s/discovery/v1/apis/{api}/{apiVersion}/rest' % (host,)\n    elif token:\n        # This specific error message gets priority for backwards compatibility.\n        raise ValueError(\"token argument provided, but host missing.\")\n    else:\n        raise ValueError(\"neither discoveryServiceUrl nor host provided\")\n    if not token:\n        raise ValueError(\"%s provided, but token missing\" % (url_src,))\n    if not version:\n        version = 'v1'\n        _logger.info(\n            \"Using default API version. Call arvados.api(%r) instead.\",\n            version,\n        )\n    return {\n        'discoveryServiceUrl': discoveryServiceUrl,\n        'token': token,\n        'version': version,\n        **kwargs,\n    }\n\ndef api_kwargs_from_config(\n        version: Optional[str]=None,\n        apiconfig: Optional[Mapping[str, str]]=None,\n        **kwargs: Any\n) -> Dict[str, Any]:\n    \"\"\"Build `api_client` keyword arguments from configuration\n\n    This function accepts a mapping with Arvados configuration settings like\n    `ARVADOS_API_HOST` and converts them into a mapping of keyword arguments\n    that can be passed to `api_client`. If `ARVADOS_API_HOST` or\n    `ARVADOS_API_TOKEN` are not configured, it raises `ValueError`.\n\n    Arguments:\n\n    * version: str | None --- A string naming the version of the Arvados API\n      to use. If not specified, the code will log a warning and fall back to\n      'v1'.\n\n    * apiconfig: Mapping[str, str] | None --- A mapping with entries for\n      `ARVADOS_API_HOST`, `ARVADOS_API_TOKEN`, and optionally\n      `ARVADOS_API_HOST_INSECURE`. If not provided, calls\n      `arvados.config.settings` to get these parameters from user\n      configuration.\n\n    Additional keyword arguments will be included in the return value.\n    \"\"\"\n    if apiconfig is None:\n        apiconfig = config.settings()\n    missing = \" and \".join(\n        key\n        for key in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']\n        if key not in apiconfig\n    )\n    if missing:\n        raise ValueError(\n            \"%s not set.\\nPlease set in %s or export environment variable.\" %\n            (missing, config.default_config_file),\n        )\n    return normalize_api_kwargs(\n        version,\n        None,\n        apiconfig['ARVADOS_API_HOST'],\n        apiconfig['ARVADOS_API_TOKEN'],\n        insecure=config.flag_is_true('ARVADOS_API_HOST_INSECURE', apiconfig),\n        **kwargs,\n    )\n\ndef api(\n        version: Optional[str]=None,\n        cache: bool=True,\n        host: Optional[str]=None,\n        token: Optional[str]=None,\n        insecure: bool=False,\n        request_id: Optional[str]=None,\n        timeout: int=5*60,\n        *,\n        discoveryServiceUrl: Optional[str]=None,\n        **kwargs: Any,\n) -> ThreadSafeAPIClient:\n    \"\"\"Dynamically build an Arvados API client\n\n    This function provides a high-level \"do what I mean\" interface to build an\n    Arvados API client object. You can call it with no arguments to build a\n    client from user configuration; pass `host` and `token` arguments just\n    like you would write in user configuration; or pass additional arguments\n    for lower-level control over the client.\n\n    This function returns a `arvados.api.ThreadSafeAPIClient`, an\n    API-compatible wrapper around `googleapiclient.discovery.Resource`. If\n    you're handling concurrency yourself and/or your application is very\n    performance-sensitive, consider calling `api_client` directly.\n\n    Arguments:\n\n    * version: str | None --- A string naming the version of the Arvados API\n      to use. If not specified, the code will log a warning and fall back to\n      'v1'.\n\n    * host: str | None --- The hostname and optional port number of the\n      Arvados API server.\n\n    * token: str | None --- The authentication token to send with each API\n      call.\n\n    * discoveryServiceUrl: str | None --- The URL used to discover APIs\n      passed directly to `googleapiclient.discovery.build`.\n\n    If `host`, `token`, and `discoveryServiceUrl` are all omitted, `host` and\n    `token` will be loaded from the user's configuration. Otherwise, you must\n    pass `token` and one of `host` or `discoveryServiceUrl`. It is an error to\n    pass both `host` and `discoveryServiceUrl`.\n\n    Other arguments are passed directly to `api_client`. See that function's\n    docstring for more information about their meaning.\n    \"\"\"\n    kwargs.update(\n        cache=cache,\n        insecure=insecure,\n        request_id=request_id,\n        timeout=timeout,\n    )\n    if discoveryServiceUrl or host or token:\n        kwargs.update(normalize_api_kwargs(version, discoveryServiceUrl, host, token))\n    else:\n        kwargs.update(api_kwargs_from_config(version))\n    version = kwargs.pop('version')\n    return ThreadSafeAPIClient({}, {}, kwargs, version)\n\ndef api_from_config(\n        version: Optional[str]=None,\n        apiconfig: Optional[Mapping[str, str]]=None,\n        **kwargs: Any\n) -> ThreadSafeAPIClient:\n    \"\"\"Build an Arvados API client from a configuration mapping\n\n    This function builds an Arvados API client from a mapping with user\n    configuration. It accepts that mapping as an argument, so you can use a\n    configuration that's different from what the user has set up.\n\n    This function returns a `arvados.api.ThreadSafeAPIClient`, an\n    API-compatible wrapper around `googleapiclient.discovery.Resource`. If\n    you're handling concurrency yourself and/or your application is very\n    performance-sensitive, consider calling `api_client` directly.\n\n    Arguments:\n\n    * version: str | None --- A string naming the version of the Arvados API\n      to use. If not specified, the code will log a warning and fall back to\n      'v1'.\n\n    * apiconfig: Mapping[str, str] | None --- A mapping with entries for\n      `ARVADOS_API_HOST`, `ARVADOS_API_TOKEN`, and optionally\n      `ARVADOS_API_HOST_INSECURE`. If not provided, calls\n      `arvados.config.settings` to get these parameters from user\n      configuration.\n\n    Other arguments are passed directly to `api_client`. See that function's\n    docstring for more information about their meaning.\n    \"\"\"\n    return api(**api_kwargs_from_config(version, apiconfig, **kwargs))\n"
  },
  {
    "path": "sdk/python/arvados/arvfile.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport bz2\nimport collections\nimport copy\nimport errno\nimport functools\nimport hashlib\nimport logging\nimport os\nimport queue\nimport re\nimport sys\nimport threading\nimport uuid\nimport zlib\n\nfrom . import config\nfrom ._internal import streams\nfrom .errors import KeepWriteError, AssertionError, ArgumentError\nfrom .keep import KeepLocator\nfrom .retry import retry_method\n\nADD = \"add\"\n\"\"\"Argument value for `Collection` methods to represent an added item\"\"\"\nDEL = \"del\"\n\"\"\"Argument value for `Collection` methods to represent a removed item\"\"\"\nMOD = \"mod\"\n\"\"\"Argument value for `Collection` methods to represent a modified item\"\"\"\nTOK = \"tok\"\n\"\"\"Argument value for `Collection` methods to represent an item with token differences\"\"\"\nWRITE = \"write\"\n\"\"\"Argument value for `Collection` methods to represent that a file was written to\"\"\"\n\n_logger = logging.getLogger('arvados.arvfile')\n\ndef split(path):\n    \"\"\"split(path) -> streamname, filename\n\n    Separate the stream name and file name in a /-separated stream path and\n    return a tuple (stream_name, file_name).  If no stream name is available,\n    assume '.'.\n\n    \"\"\"\n    try:\n        stream_name, file_name = path.rsplit('/', 1)\n    except ValueError:  # No / in string\n        stream_name, file_name = '.', path\n    return stream_name, file_name\n\n\nclass UnownedBlockError(Exception):\n    \"\"\"Raised when there's an writable block without an owner on the BlockManager.\"\"\"\n    pass\n\n\nclass _FileLikeObjectBase(object):\n    def __init__(self, name, mode):\n        self.name = name\n        self.mode = mode\n        self.closed = False\n\n    @staticmethod\n    def _before_close(orig_func):\n        @functools.wraps(orig_func)\n        def before_close_wrapper(self, *args, **kwargs):\n            if self.closed:\n                raise ValueError(\"I/O operation on closed stream file\")\n            return orig_func(self, *args, **kwargs)\n        return before_close_wrapper\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        try:\n            self.close()\n        except Exception:\n            if exc_type is None:\n                raise\n\n    def close(self):\n        self.closed = True\n\n\nclass ArvadosFileReaderBase(_FileLikeObjectBase):\n    def __init__(self, name, mode, num_retries=None):\n        super(ArvadosFileReaderBase, self).__init__(name, mode)\n        self._filepos = 0\n        self.num_retries = num_retries\n        self._readline_cache = (None, None)\n\n    def __iter__(self):\n        while True:\n            data = self.readline()\n            if not data:\n                break\n            yield data\n\n    def decompressed_name(self):\n        return re.sub(r'\\.(bz2|gz)$', '', self.name)\n\n    @_FileLikeObjectBase._before_close\n    def seek(self, pos, whence=os.SEEK_SET):\n        if whence == os.SEEK_CUR:\n            pos += self._filepos\n        elif whence == os.SEEK_END:\n            pos += self.size()\n        if pos < 0:\n            raise IOError(errno.EINVAL, \"Tried to seek to negative file offset.\")\n        self._filepos = pos\n        return self._filepos\n\n    def tell(self):\n        return self._filepos\n\n    def readable(self):\n        return True\n\n    def writable(self):\n        return False\n\n    def seekable(self):\n        return True\n\n    @_FileLikeObjectBase._before_close\n    @retry_method\n    def readall(self, size=2**20, num_retries=None):\n        while True:\n            data = self.read(size, num_retries=num_retries)\n            if len(data) == 0:\n                break\n            yield data\n\n    @_FileLikeObjectBase._before_close\n    @retry_method\n    def readline(self, size=float('inf'), num_retries=None):\n        cache_pos, cache_data = self._readline_cache\n        if self.tell() == cache_pos:\n            data = [cache_data]\n            self._filepos += len(cache_data)\n        else:\n            data = [b'']\n        data_size = len(data[-1])\n        while (data_size < size) and (b'\\n' not in data[-1]):\n            next_read = self.read(2 ** 20, num_retries=num_retries)\n            if not next_read:\n                break\n            data.append(next_read)\n            data_size += len(next_read)\n        data = b''.join(data)\n        try:\n            nextline_index = data.index(b'\\n') + 1\n        except ValueError:\n            nextline_index = len(data)\n        nextline_index = min(nextline_index, size)\n        self._filepos -= len(data) - nextline_index\n        self._readline_cache = (self.tell(), data[nextline_index:])\n        return data[:nextline_index].decode()\n\n    @_FileLikeObjectBase._before_close\n    @retry_method\n    def decompress(self, decompress, size, num_retries=None):\n        for segment in self.readall(size, num_retries=num_retries):\n            data = decompress(segment)\n            if data:\n                yield data\n\n    @_FileLikeObjectBase._before_close\n    @retry_method\n    def readall_decompressed(self, size=2**20, num_retries=None):\n        self.seek(0)\n        if self.name.endswith('.bz2'):\n            dc = bz2.BZ2Decompressor()\n            return self.decompress(dc.decompress, size,\n                                   num_retries=num_retries)\n        elif self.name.endswith('.gz'):\n            dc = zlib.decompressobj(16+zlib.MAX_WBITS)\n            return self.decompress(lambda segment: dc.decompress(dc.unconsumed_tail + segment),\n                                   size, num_retries=num_retries)\n        else:\n            return self.readall(size, num_retries=num_retries)\n\n    @_FileLikeObjectBase._before_close\n    @retry_method\n    def readlines(self, sizehint=float('inf'), num_retries=None):\n        data = []\n        data_size = 0\n        for s in self.readall(num_retries=num_retries):\n            data.append(s)\n            data_size += len(s)\n            if data_size >= sizehint:\n                break\n        return b''.join(data).decode().splitlines(True)\n\n    def size(self):\n        raise IOError(errno.ENOSYS, \"Not implemented\")\n\n    def read(self, size, num_retries=None):\n        raise IOError(errno.ENOSYS, \"Not implemented\")\n\n    def readfrom(self, start, size, num_retries=None):\n        raise IOError(errno.ENOSYS, \"Not implemented\")\n\n\ndef synchronized(orig_func):\n    @functools.wraps(orig_func)\n    def synchronized_wrapper(self, *args, **kwargs):\n        with self.lock:\n            return orig_func(self, *args, **kwargs)\n    return synchronized_wrapper\n\n\nclass StateChangeError(Exception):\n    def __init__(self, message, state, nextstate):\n        super(StateChangeError, self).__init__(message)\n        self.state = state\n        self.nextstate = nextstate\n\nclass _BufferBlock(object):\n    \"\"\"A stand-in for a Keep block that is in the process of being written.\n\n    Writers can append to it, get the size, and compute the Keep locator.\n    There are three valid states:\n\n    WRITABLE\n      Can append to block.\n\n    PENDING\n      Block is in the process of being uploaded to Keep, append is an error.\n\n    COMMITTED\n      The block has been written to Keep, its internal buffer has been\n      released, fetching the block will fetch it via keep client (since we\n      discarded the internal copy), and identifiers referring to the BufferBlock\n      can be replaced with the block locator.\n\n    \"\"\"\n\n    WRITABLE = 0\n    PENDING = 1\n    COMMITTED = 2\n    ERROR = 3\n    DELETED = 4\n\n    def __init__(self, blockid, starting_capacity, owner):\n        \"\"\"\n        :blockid:\n          the identifier for this block\n\n        :starting_capacity:\n          the initial buffer capacity\n\n        :owner:\n          ArvadosFile that owns this block\n\n        \"\"\"\n        self.blockid = blockid\n        self.buffer_block = bytearray(starting_capacity)\n        self.buffer_view = memoryview(self.buffer_block)\n        self.write_pointer = 0\n        self._state = _BufferBlock.WRITABLE\n        self._locator = None\n        self.owner = owner\n        self.lock = threading.Lock()\n        self.wait_for_commit = threading.Event()\n        self.error = None\n\n    @synchronized\n    def append(self, data):\n        \"\"\"Append some data to the buffer.\n\n        Only valid if the block is in WRITABLE state.  Implements an expanding\n        buffer, doubling capacity as needed to accomdate all the data.\n\n        \"\"\"\n        if self._state == _BufferBlock.WRITABLE:\n            if not isinstance(data, bytes) and not isinstance(data, memoryview):\n                data = data.encode()\n            while (self.write_pointer+len(data)) > len(self.buffer_block):\n                new_buffer_block = bytearray(len(self.buffer_block) * 2)\n                new_buffer_block[0:self.write_pointer] = self.buffer_block[0:self.write_pointer]\n                self.buffer_block = new_buffer_block\n                self.buffer_view = memoryview(self.buffer_block)\n            self.buffer_view[self.write_pointer:self.write_pointer+len(data)] = data\n            self.write_pointer += len(data)\n            self._locator = None\n        else:\n            raise AssertionError(\"Buffer block is not writable\")\n\n    STATE_TRANSITIONS = frozenset([\n            (WRITABLE, PENDING),\n            (PENDING, COMMITTED),\n            (PENDING, ERROR),\n            (ERROR, PENDING)])\n\n    @synchronized\n    def set_state(self, nextstate, val=None):\n        if (self._state, nextstate) not in self.STATE_TRANSITIONS:\n            raise StateChangeError(\"Invalid state change from %s to %s\" % (self._state, nextstate), self._state, nextstate)\n        self._state = nextstate\n\n        if self._state == _BufferBlock.PENDING:\n            self.wait_for_commit.clear()\n\n        if self._state == _BufferBlock.COMMITTED:\n            self._locator = val\n            self.buffer_view = None\n            self.buffer_block = None\n            self.wait_for_commit.set()\n\n        if self._state == _BufferBlock.ERROR:\n            self.error = val\n            self.wait_for_commit.set()\n\n    @synchronized\n    def state(self):\n        return self._state\n\n    def size(self):\n        \"\"\"The amount of data written to the buffer.\"\"\"\n        return self.write_pointer\n\n    @synchronized\n    def locator(self):\n        \"\"\"The Keep locator for this buffer's contents.\"\"\"\n        if self._locator is None:\n            self._locator = \"%s+%i\" % (hashlib.md5(self.buffer_view[0:self.write_pointer]).hexdigest(), self.size())\n        return self._locator\n\n    @synchronized\n    def clone(self, new_blockid, owner):\n        if self._state == _BufferBlock.COMMITTED:\n            raise AssertionError(\"Cannot duplicate committed buffer block\")\n        bufferblock = _BufferBlock(new_blockid, self.size(), owner)\n        bufferblock.append(self.buffer_view[0:self.size()])\n        return bufferblock\n\n    @synchronized\n    def clear(self):\n        self._state = _BufferBlock.DELETED\n        self.owner = None\n        self.buffer_block = None\n        self.buffer_view = None\n\n    @synchronized\n    def repack_writes(self):\n        \"\"\"Optimize buffer block by repacking segments in file sequence.\n\n        When the client makes random writes, they appear in the buffer block in\n        the sequence they were written rather than the sequence they appear in\n        the file.  This makes for inefficient, fragmented manifests.  Attempt\n        to optimize by repacking writes in file sequence.\n\n        \"\"\"\n        if self._state != _BufferBlock.WRITABLE:\n            raise AssertionError(\"Cannot repack non-writable block\")\n\n        segs = self.owner.segments()\n\n        # Collect the segments that reference the buffer block.\n        bufferblock_segs = [s for s in segs if s.locator == self.blockid]\n\n        # Collect total data referenced by segments (could be smaller than\n        # bufferblock size if a portion of the file was written and\n        # then overwritten).\n        write_total = sum([s.range_size for s in bufferblock_segs])\n\n        if write_total < self.size() or len(bufferblock_segs) > 1:\n            # If there's more than one segment referencing this block, it is\n            # due to out-of-order writes and will produce a fragmented\n            # manifest, so try to optimize by re-packing into a new buffer.\n            contents = self.buffer_view[0:self.write_pointer].tobytes()\n            new_bb = _BufferBlock(None, write_total, None)\n            for t in bufferblock_segs:\n                new_bb.append(contents[t.segment_offset:t.segment_offset+t.range_size])\n                t.segment_offset = new_bb.size() - t.range_size\n\n            self.buffer_block = new_bb.buffer_block\n            self.buffer_view = new_bb.buffer_view\n            self.write_pointer = new_bb.write_pointer\n            self._locator = None\n            new_bb.clear()\n            self.owner.set_segments(segs)\n\n    def __repr__(self):\n        return \"<BufferBlock %s>\" % (self.blockid)\n\n\nclass NoopLock(object):\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        pass\n\n    def acquire(self, blocking=False):\n        pass\n\n    def release(self):\n        pass\n\n\ndef must_be_writable(orig_func):\n    @functools.wraps(orig_func)\n    def must_be_writable_wrapper(self, *args, **kwargs):\n        if not self.writable():\n            raise IOError(errno.EROFS, \"Collection is read-only.\")\n        return orig_func(self, *args, **kwargs)\n    return must_be_writable_wrapper\n\n\nclass _BlockManager(object):\n    \"\"\"BlockManager handles buffer blocks.\n\n    Also handles background block uploads, and background block prefetch for a\n    Collection of ArvadosFiles.\n\n    \"\"\"\n\n    DEFAULT_PUT_THREADS = 2\n\n    def __init__(self, keep,\n                 copies=None,\n                 put_threads=None,\n                 num_retries=None,\n                 storage_classes_func=None):\n        \"\"\"keep: KeepClient object to use\"\"\"\n        self._keep = keep\n        self._bufferblocks = collections.OrderedDict()\n        self._put_queue = None\n        self._put_threads = None\n        self.lock = threading.Lock()\n        self.prefetch_lookahead = self._keep.num_prefetch_threads\n        self.num_put_threads = put_threads or _BlockManager.DEFAULT_PUT_THREADS\n        self.copies = copies\n        self.storage_classes = storage_classes_func or (lambda: [])\n        self._pending_write_size = 0\n        self.threads_lock = threading.Lock()\n        self.padding_block = None\n        self.num_retries = num_retries\n\n    @synchronized\n    def alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):\n        \"\"\"Allocate a new, empty bufferblock in WRITABLE state and return it.\n\n        :blockid:\n          optional block identifier, otherwise one will be automatically assigned\n\n        :starting_capacity:\n          optional capacity, otherwise will use default capacity\n\n        :owner:\n          ArvadosFile that owns this block\n\n        \"\"\"\n        return self._alloc_bufferblock(blockid, starting_capacity, owner)\n\n    def _alloc_bufferblock(self, blockid=None, starting_capacity=2**14, owner=None):\n        if blockid is None:\n            blockid = str(uuid.uuid4())\n        bufferblock = _BufferBlock(blockid, starting_capacity=starting_capacity, owner=owner)\n        self._bufferblocks[bufferblock.blockid] = bufferblock\n        return bufferblock\n\n    @synchronized\n    def dup_block(self, block, owner):\n        \"\"\"Create a new bufferblock initialized with the content of an existing bufferblock.\n\n        :block:\n          the buffer block to copy.\n\n        :owner:\n          ArvadosFile that owns the new block\n\n        \"\"\"\n        new_blockid = str(uuid.uuid4())\n        bufferblock = block.clone(new_blockid, owner)\n        self._bufferblocks[bufferblock.blockid] = bufferblock\n        return bufferblock\n\n    @synchronized\n    def is_bufferblock(self, locator):\n        return locator in self._bufferblocks\n\n    def _commit_bufferblock_worker(self):\n        \"\"\"Background uploader thread.\"\"\"\n\n        while True:\n            try:\n                bufferblock = self._put_queue.get()\n                if bufferblock is None:\n                    return\n\n                if self.copies is None:\n                    loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes(), num_retries=self.num_retries, classes=self.storage_classes())\n                else:\n                    loc = self._keep.put(bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes(), num_retries=self.num_retries, copies=self.copies, classes=self.storage_classes())\n                bufferblock.set_state(_BufferBlock.COMMITTED, loc)\n            except Exception as e:\n                bufferblock.set_state(_BufferBlock.ERROR, e)\n            finally:\n                if self._put_queue is not None:\n                    self._put_queue.task_done()\n\n    def start_put_threads(self):\n        with self.threads_lock:\n            if self._put_threads is None:\n                # Start uploader threads.\n\n                # If we don't limit the Queue size, the upload queue can quickly\n                # grow to take up gigabytes of RAM if the writing process is\n                # generating data more quickly than it can be sent to the Keep\n                # servers.\n                #\n                # With two upload threads and a queue size of 2, this means up to 4\n                # blocks pending.  If they are full 64 MiB blocks, that means up to\n                # 256 MiB of internal buffering, which is the same size as the\n                # default download block cache in KeepClient.\n                self._put_queue = queue.Queue(maxsize=2)\n\n                self._put_threads = []\n                for i in range(0, self.num_put_threads):\n                    thread = threading.Thread(target=self._commit_bufferblock_worker)\n                    self._put_threads.append(thread)\n                    thread.daemon = True\n                    thread.start()\n\n    @synchronized\n    def stop_threads(self):\n        \"\"\"Shut down and wait for background upload and download threads to finish.\"\"\"\n\n        if self._put_threads is not None:\n            for t in self._put_threads:\n                self._put_queue.put(None)\n            for t in self._put_threads:\n                t.join()\n        self._put_threads = None\n        self._put_queue = None\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        self.stop_threads()\n\n    @synchronized\n    def repack_small_blocks(self, force=False, sync=False, closed_file_size=0):\n        \"\"\"Packs small blocks together before uploading\"\"\"\n\n        self._pending_write_size += closed_file_size\n\n        # Check if there are enough small blocks for filling up one in full\n        if not (force or (self._pending_write_size >= config.KEEP_BLOCK_SIZE)):\n            return\n\n        # Search blocks ready for getting packed together before being\n        # committed to Keep.\n        # A WRITABLE block always has an owner.\n        # A WRITABLE block with its owner.closed() implies that its\n        # size is <= KEEP_BLOCK_SIZE/2.\n        try:\n            small_blocks = [b for b in self._bufferblocks.values()\n                            if b.state() == _BufferBlock.WRITABLE and b.owner.closed()]\n        except AttributeError:\n            # Writable blocks without owner shouldn't exist.\n            raise UnownedBlockError()\n\n        if len(small_blocks) <= 1:\n            # Not enough small blocks for repacking\n            return\n\n        for bb in small_blocks:\n            bb.repack_writes()\n\n        # Update the pending write size count with its true value, just in case\n        # some small file was opened, written and closed several times.\n        self._pending_write_size = sum([b.size() for b in small_blocks])\n\n        if self._pending_write_size < config.KEEP_BLOCK_SIZE and not force:\n            return\n\n        new_bb = self._alloc_bufferblock()\n        new_bb.owner = []\n        files = []\n        while len(small_blocks) > 0 and (new_bb.write_pointer + small_blocks[0].size()) <= config.KEEP_BLOCK_SIZE:\n            bb = small_blocks.pop(0)\n            new_bb.owner.append(bb.owner)\n            self._pending_write_size -= bb.size()\n            new_bb.append(bb.buffer_view[0:bb.write_pointer].tobytes())\n            files.append((bb, new_bb.write_pointer - bb.size()))\n\n        self.commit_bufferblock(new_bb, sync=sync)\n\n        for bb, new_bb_segment_offset in files:\n            newsegs = bb.owner.segments()\n            for s in newsegs:\n                if s.locator == bb.blockid:\n                    s.locator = new_bb.blockid\n                    s.segment_offset = new_bb_segment_offset+s.segment_offset\n            bb.owner.set_segments(newsegs)\n            self._delete_bufferblock(bb.blockid)\n\n    def commit_bufferblock(self, block, sync):\n        \"\"\"Initiate a background upload of a bufferblock.\n\n        :block:\n          The block object to upload\n\n        :sync:\n          If `sync` is True, upload the block synchronously.\n          If `sync` is False, upload the block asynchronously.  This will\n          return immediately unless the upload queue is at capacity, in\n          which case it will wait on an upload queue slot.\n\n        \"\"\"\n        try:\n            # Mark the block as PENDING so to disallow any more appends.\n            block.set_state(_BufferBlock.PENDING)\n        except StateChangeError as e:\n            if e.state == _BufferBlock.PENDING:\n                if sync:\n                    block.wait_for_commit.wait()\n                else:\n                    return\n            if block.state() == _BufferBlock.COMMITTED:\n                return\n            elif block.state() == _BufferBlock.ERROR:\n                raise block.error\n            else:\n                raise\n\n        if sync:\n            try:\n                if self.copies is None:\n                    loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes(), num_retries=self.num_retries, classes=self.storage_classes())\n                else:\n                    loc = self._keep.put(block.buffer_view[0:block.write_pointer].tobytes(), num_retries=self.num_retries, copies=self.copies, classes=self.storage_classes())\n                block.set_state(_BufferBlock.COMMITTED, loc)\n            except Exception as e:\n                block.set_state(_BufferBlock.ERROR, e)\n                raise\n        else:\n            self.start_put_threads()\n            self._put_queue.put(block)\n\n    @synchronized\n    def get_bufferblock(self, locator):\n        return self._bufferblocks.get(locator)\n\n    @synchronized\n    def get_padding_block(self):\n        \"\"\"Get a bufferblock 64 MB in size consisting of all zeros, used as padding\n        when using truncate() to extend the size of a file.\n\n        For reference (and possible future optimization), the md5sum of the\n        padding block is: 7f614da9329cd3aebf59b91aadc30bf0+67108864\n\n        \"\"\"\n\n        if self.padding_block is None:\n            self.padding_block = self._alloc_bufferblock(starting_capacity=config.KEEP_BLOCK_SIZE)\n            self.padding_block.write_pointer = config.KEEP_BLOCK_SIZE\n            self.commit_bufferblock(self.padding_block, False)\n        return self.padding_block\n\n    @synchronized\n    def delete_bufferblock(self, locator):\n        self._delete_bufferblock(locator)\n\n    def _delete_bufferblock(self, locator):\n        if locator in self._bufferblocks:\n            bb = self._bufferblocks[locator]\n            bb.clear()\n            del self._bufferblocks[locator]\n\n    def get_block_contents(self, locator, num_retries, cache_only=False):\n        \"\"\"Fetch a block.\n\n        First checks to see if the locator is a BufferBlock and return that, if\n        not, passes the request through to KeepClient.get().\n\n        \"\"\"\n        with self.lock:\n            if locator in self._bufferblocks:\n                bufferblock = self._bufferblocks[locator]\n                if bufferblock.state() != _BufferBlock.COMMITTED:\n                    return bufferblock.buffer_view[0:bufferblock.write_pointer].tobytes()\n                else:\n                    locator = bufferblock._locator\n        if cache_only:\n            return self._keep.get_from_cache(locator)\n        else:\n            return self._keep.get(locator, num_retries=num_retries)\n\n    def commit_all(self):\n        \"\"\"Commit all outstanding buffer blocks.\n\n        This is a synchronous call, and will not return until all buffer blocks\n        are uploaded.  Raises KeepWriteError() if any blocks failed to upload.\n\n        \"\"\"\n        self.repack_small_blocks(force=True, sync=True)\n\n        with self.lock:\n            items = list(self._bufferblocks.items())\n\n        for k,v in items:\n            if v.state() != _BufferBlock.COMMITTED and v.owner:\n                # Ignore blocks with a list of owners, as if they're not in COMMITTED\n                # state, they're already being committed asynchronously.\n                if isinstance(v.owner, ArvadosFile):\n                    v.owner.flush(sync=False)\n\n        with self.lock:\n            if self._put_queue is not None:\n                self._put_queue.join()\n\n                err = []\n                for k,v in items:\n                    if v.state() == _BufferBlock.ERROR:\n                        err.append((v.locator(), v.error))\n                if err:\n                    raise KeepWriteError(\"Error writing some blocks\", err, label=\"block\")\n\n        for k,v in items:\n            # flush again with sync=True to remove committed bufferblocks from\n            # the segments.\n            if v.owner:\n                if isinstance(v.owner, ArvadosFile):\n                    v.owner.flush(sync=True)\n                elif isinstance(v.owner, list) and len(v.owner) > 0:\n                    # This bufferblock is referenced by many files as a result\n                    # of repacking small blocks, so don't delete it when flushing\n                    # its owners, just do it after flushing them all.\n                    for owner in v.owner:\n                        owner.flush(sync=True)\n                    self.delete_bufferblock(k)\n\n        self.stop_threads()\n\n    def block_prefetch(self, locator):\n        \"\"\"Initiate a background download of a block.\n        \"\"\"\n\n        if not self.prefetch_lookahead:\n            return\n\n        with self.lock:\n            if locator in self._bufferblocks:\n                return\n\n        self._keep.block_prefetch(locator)\n\n\nclass ArvadosFile(object):\n    \"\"\"Represent a file in a Collection.\n\n    ArvadosFile manages the underlying representation of a file in Keep as a\n    sequence of segments spanning a set of blocks, and implements random\n    read/write access.\n\n    This object may be accessed from multiple threads.\n\n    \"\"\"\n\n    __slots__ = ('parent', 'name', '_writers', '_committed',\n                 '_segments', 'lock', '_current_bblock', 'fuse_entry', '_read_counter')\n\n    def __init__(self, parent, name, stream=[], segments=[]):\n        \"\"\"\n        ArvadosFile constructor.\n\n        :stream:\n          a list of Range objects representing a block stream\n\n        :segments:\n          a list of Range objects representing segments\n        \"\"\"\n        self.parent = parent\n        self.name = name\n        self._writers = set()\n        self._committed = False\n        self._segments = []\n        self.lock = parent.root_collection().lock\n        for s in segments:\n            self._add_segment(stream, s.locator, s.range_size)\n        self._current_bblock = None\n        self._read_counter = 0\n\n    def writable(self):\n        return self.parent.writable()\n\n    @synchronized\n    def permission_expired(self, as_of_dt=None):\n        \"\"\"Returns True if any of the segment's locators is expired\"\"\"\n        for r in self._segments:\n            if KeepLocator(r.locator).permission_expired(as_of_dt):\n                return True\n        return False\n\n    @synchronized\n    def has_remote_blocks(self):\n        \"\"\"Returns True if any of the segment's locators has a +R signature\"\"\"\n\n        for s in self._segments:\n            if '+R' in s.locator:\n                return True\n        return False\n\n    @synchronized\n    def _copy_remote_blocks(self, remote_blocks={}):\n        \"\"\"Ask Keep to copy remote blocks and point to their local copies.\n\n        This is called from the parent Collection.\n\n        :remote_blocks:\n            Shared cache of remote to local block mappings. This is used to avoid\n            doing extra work when blocks are shared by more than one file in\n            different subdirectories.\n        \"\"\"\n\n        for s in self._segments:\n            if '+R' in s.locator:\n                try:\n                    loc = remote_blocks[s.locator]\n                except KeyError:\n                    loc = self.parent._my_keep().refresh_signature(s.locator)\n                    remote_blocks[s.locator] = loc\n                s.locator = loc\n                self.parent.set_committed(False)\n        return remote_blocks\n\n    @synchronized\n    def segments(self):\n        return copy.copy(self._segments)\n\n    @synchronized\n    def clone(self, new_parent, new_name):\n        \"\"\"Make a copy of this file.\"\"\"\n        cp = ArvadosFile(new_parent, new_name)\n        cp.replace_contents(self)\n        return cp\n\n    @must_be_writable\n    @synchronized\n    def replace_contents(self, other):\n        \"\"\"Replace segments of this file with segments from another `ArvadosFile` object.\"\"\"\n\n        eventtype = TOK if self == other else MOD\n\n        map_loc = {}\n        self._segments = []\n        for other_segment in other.segments():\n            new_loc = other_segment.locator\n            if other.parent._my_block_manager().is_bufferblock(other_segment.locator):\n                if other_segment.locator not in map_loc:\n                    bufferblock = other.parent._my_block_manager().get_bufferblock(other_segment.locator)\n                    if bufferblock.state() != _BufferBlock.WRITABLE:\n                        map_loc[other_segment.locator] = bufferblock.locator()\n                    else:\n                        map_loc[other_segment.locator] = self.parent._my_block_manager().dup_block(bufferblock, self).blockid\n                new_loc = map_loc[other_segment.locator]\n\n            self._segments.append(streams.Range(new_loc, other_segment.range_start, other_segment.range_size, other_segment.segment_offset))\n\n        self.set_committed(False)\n        self.parent.notify(eventtype, self.parent, self.name, (self, self))\n\n    def __eq__(self, other):\n        if other is self:\n            return True\n        if not isinstance(other, ArvadosFile):\n            return False\n\n        othersegs = other.segments()\n        with self.lock:\n            if len(self._segments) != len(othersegs):\n                return False\n            for i in range(0, len(othersegs)):\n                seg1 = self._segments[i]\n                seg2 = othersegs[i]\n                loc1 = seg1.locator\n                loc2 = seg2.locator\n\n                if self.parent._my_block_manager().is_bufferblock(loc1):\n                    loc1 = self.parent._my_block_manager().get_bufferblock(loc1).locator()\n\n                if other.parent._my_block_manager().is_bufferblock(loc2):\n                    loc2 = other.parent._my_block_manager().get_bufferblock(loc2).locator()\n\n                if (KeepLocator(loc1).stripped() != KeepLocator(loc2).stripped() or\n                    seg1.range_start != seg2.range_start or\n                    seg1.range_size != seg2.range_size or\n                    seg1.segment_offset != seg2.segment_offset):\n                    return False\n\n        return True\n\n    def __ne__(self, other):\n        return not self.__eq__(other)\n\n    @synchronized\n    def set_segments(self, segs):\n        self._segments = segs\n\n    @synchronized\n    def set_committed(self, value=True):\n        \"\"\"Set committed flag.\n\n        If value is True, set committed to be True.\n\n        If value is False, set committed to be False for this and all parents.\n        \"\"\"\n        if value == self._committed:\n            return\n        self._committed = value\n        if self._committed is False and self.parent is not None:\n            self.parent.set_committed(False)\n\n    @synchronized\n    def committed(self):\n        \"\"\"Get whether this is committed or not.\"\"\"\n        return self._committed\n\n    @synchronized\n    def add_writer(self, writer):\n        \"\"\"Add an ArvadosFileWriter reference to the list of writers\"\"\"\n        if isinstance(writer, ArvadosFileWriter):\n            self._writers.add(writer)\n\n    @synchronized\n    def remove_writer(self, writer, flush):\n        \"\"\"\n        Called from ArvadosFileWriter.close(). Remove a writer reference from the list\n        and do some block maintenance tasks.\n        \"\"\"\n        self._writers.remove(writer)\n\n        if flush or self.size() > config.KEEP_BLOCK_SIZE // 2:\n            # File writer closed, not small enough for repacking\n            self.flush()\n        elif self.closed():\n            # All writers closed and size is adequate for repacking\n            self.parent._my_block_manager().repack_small_blocks(closed_file_size=self.size())\n\n    def closed(self):\n        \"\"\"\n        Get whether this is closed or not. When the writers list is empty, the file\n        is supposed to be closed.\n        \"\"\"\n        return len(self._writers) == 0\n\n    @must_be_writable\n    @synchronized\n    def truncate(self, size):\n        \"\"\"Shrink or expand the size of the file.\n\n        If `size` is less than the size of the file, the file contents after\n        `size` will be discarded.  If `size` is greater than the current size\n        of the file, it will be filled with zero bytes.\n\n        \"\"\"\n        if size < self.size():\n            new_segs = []\n            for r in self._segments:\n                range_end = r.range_start+r.range_size\n                if r.range_start >= size:\n                    # segment is past the trucate size, all done\n                    break\n                elif size < range_end:\n                    nr = streams.Range(r.locator, r.range_start, size - r.range_start, 0)\n                    nr.segment_offset = r.segment_offset\n                    new_segs.append(nr)\n                    break\n                else:\n                    new_segs.append(r)\n\n            self._segments = new_segs\n            self.set_committed(False)\n        elif size > self.size():\n            padding = self.parent._my_block_manager().get_padding_block()\n            diff = size - self.size()\n            while diff > config.KEEP_BLOCK_SIZE:\n                self._segments.append(streams.Range(padding.blockid, self.size(), config.KEEP_BLOCK_SIZE, 0))\n                diff -= config.KEEP_BLOCK_SIZE\n            if diff > 0:\n                self._segments.append(streams.Range(padding.blockid, self.size(), diff, 0))\n            self.set_committed(False)\n        else:\n            # size == self.size()\n            pass\n\n    def readfrom(self, offset, size, num_retries, exact=False, return_memoryview=False):\n        \"\"\"Read up to `size` bytes from the file starting at `offset`.\n\n        Arguments:\n\n        * exact: bool --- If False (default), return less data than\n         requested if the read crosses a block boundary and the next\n         block isn't cached.  If True, only return less data than\n         requested when hitting EOF.\n\n        * return_memoryview: bool --- If False (default) return a\n          `bytes` object, which may entail making a copy in some\n          situations.  If True, return a `memoryview` object which may\n          avoid making a copy, but may be incompatible with code\n          expecting a `bytes` object.\n\n        \"\"\"\n\n        with self.lock:\n            if size == 0 or offset >= self.size():\n                return memoryview(b'') if return_memoryview else b''\n            readsegs = streams.locators_and_ranges(self._segments, offset, size)\n\n            prefetch = None\n            prefetch_lookahead = self.parent._my_block_manager().prefetch_lookahead\n            if prefetch_lookahead:\n                # Doing prefetch on every read() call is surprisingly expensive\n                # when we're trying to deliver data at 600+ MiBps and want\n                # the read() fast path to be as lightweight as possible.\n                #\n                # Only prefetching every 128 read operations\n                # dramatically reduces the overhead while still\n                # getting the benefit of prefetching (e.g. when\n                # reading 128 KiB at a time, it checks for prefetch\n                # every 16 MiB).\n                self._read_counter = (self._read_counter+1) % 128\n                if self._read_counter == 1:\n                    prefetch = streams.locators_and_ranges(\n                        self._segments,\n                        offset + size,\n                        config.KEEP_BLOCK_SIZE * prefetch_lookahead,\n                        limit=(1+prefetch_lookahead),\n                    )\n\n        locs = set()\n        data = []\n        for lr in readsegs:\n            block = self.parent._my_block_manager().get_block_contents(lr.locator, num_retries=num_retries, cache_only=(bool(data) and not exact))\n            if block:\n                blockview = memoryview(block)\n                data.append(blockview[lr.segment_offset:lr.segment_offset+lr.segment_size])\n                locs.add(lr.locator)\n            else:\n                break\n\n        if prefetch:\n            for lr in prefetch:\n                if lr.locator not in locs:\n                    self.parent._my_block_manager().block_prefetch(lr.locator)\n                    locs.add(lr.locator)\n\n        if len(data) == 1:\n            return data[0] if return_memoryview else data[0].tobytes()\n        else:\n            return memoryview(b''.join(data)) if return_memoryview else b''.join(data)\n\n\n    @must_be_writable\n    @synchronized\n    def writeto(self, offset, data, num_retries):\n        \"\"\"Write `data` to the file starting at `offset`.\n\n        This will update existing bytes and/or extend the size of the file as\n        necessary.\n\n        \"\"\"\n        if not isinstance(data, bytes) and not isinstance(data, memoryview):\n            data = data.encode()\n        if len(data) == 0:\n            return\n\n        if offset > self.size():\n            self.truncate(offset)\n\n        if len(data) > config.KEEP_BLOCK_SIZE:\n            # Chunk it up into smaller writes\n            n = 0\n            dataview = memoryview(data)\n            while n < len(data):\n                self.writeto(offset+n, dataview[n:n + config.KEEP_BLOCK_SIZE].tobytes(), num_retries)\n                n += config.KEEP_BLOCK_SIZE\n            return\n\n        self.set_committed(False)\n\n        if self._current_bblock is None or self._current_bblock.state() != _BufferBlock.WRITABLE:\n            self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)\n\n        if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:\n            self._current_bblock.repack_writes()\n            if (self._current_bblock.size() + len(data)) > config.KEEP_BLOCK_SIZE:\n                self.parent._my_block_manager().commit_bufferblock(self._current_bblock, sync=False)\n                self._current_bblock = self.parent._my_block_manager().alloc_bufferblock(owner=self)\n\n        self._current_bblock.append(data)\n        streams.replace_range(\n            self._segments,\n            offset,\n            len(data),\n            self._current_bblock.blockid,\n            self._current_bblock.write_pointer - len(data),\n        )\n        self.parent.notify(WRITE, self.parent, self.name, (self, self))\n        return len(data)\n\n    @synchronized\n    def flush(self, sync=True, num_retries=0):\n        \"\"\"Flush the current bufferblock to Keep.\n\n        :sync:\n          If True, commit block synchronously, wait until buffer block has been written.\n          If False, commit block asynchronously, return immediately after putting block into\n          the keep put queue.\n        \"\"\"\n        if self.committed():\n            return\n\n        if self._current_bblock and self._current_bblock.state() != _BufferBlock.COMMITTED:\n            if self._current_bblock.state() == _BufferBlock.WRITABLE:\n                self._current_bblock.repack_writes()\n            if self._current_bblock.state() != _BufferBlock.DELETED:\n                self.parent._my_block_manager().commit_bufferblock(self._current_bblock, sync=sync)\n\n        if sync:\n            to_delete = set()\n            for s in self._segments:\n                bb = self.parent._my_block_manager().get_bufferblock(s.locator)\n                if bb:\n                    if bb.state() != _BufferBlock.COMMITTED:\n                        self.parent._my_block_manager().commit_bufferblock(bb, sync=True)\n                    to_delete.add(s.locator)\n                    s.locator = bb.locator()\n            for s in to_delete:\n                # Don't delete the bufferblock if it's owned by many files. It'll be\n                # deleted after all of its owners are flush()ed.\n                if self.parent._my_block_manager().get_bufferblock(s).owner is self:\n                    self.parent._my_block_manager().delete_bufferblock(s)\n\n        self.parent.notify(MOD, self.parent, self.name, (self, self))\n\n    @must_be_writable\n    @synchronized\n    def add_segment(self, blocks, pos, size):\n        \"\"\"Add a segment to the end of the file.\n\n        `pos` and `offset` reference a section of the stream described by\n        `blocks` (a list of Range objects)\n\n        \"\"\"\n        self._add_segment(blocks, pos, size)\n\n    def _add_segment(self, blocks, pos, size):\n        \"\"\"Internal implementation of add_segment.\"\"\"\n        self.set_committed(False)\n        for lr in streams.locators_and_ranges(blocks, pos, size):\n            last = self._segments[-1] if self._segments else streams.Range(0, 0, 0, 0)\n            r = streams.Range(lr.locator, last.range_start+last.range_size, lr.segment_size, lr.segment_offset)\n            self._segments.append(r)\n\n    @synchronized\n    def size(self):\n        \"\"\"Get the file size.\"\"\"\n        if self._segments:\n            n = self._segments[-1]\n            return n.range_start + n.range_size\n        else:\n            return 0\n\n    @synchronized\n    def manifest_text(self, stream_name=\".\", portable_locators=False,\n                      normalize=False, only_committed=False):\n        buf = \"\"\n        filestream = []\n        for segment in self._segments:\n            loc = segment.locator\n            if self.parent._my_block_manager().is_bufferblock(loc):\n                if only_committed:\n                    continue\n                loc = self.parent._my_block_manager().get_bufferblock(loc).locator()\n            if portable_locators:\n                loc = KeepLocator(loc).stripped()\n            filestream.append(streams.LocatorAndRange(\n                loc,\n                KeepLocator(loc).size,\n                segment.segment_offset,\n                segment.range_size,\n            ))\n        buf += ' '.join(streams.normalize_stream(stream_name, {self.name: filestream}))\n        buf += \"\\n\"\n        return buf\n\n    @must_be_writable\n    @synchronized\n    def _reparent(self, newparent, newname):\n        self.set_committed(False)\n        self.flush(sync=True)\n        self.parent.remove(self.name)\n        self.parent = newparent\n        self.name = newname\n        self.lock = self.parent.root_collection().lock\n\n\nclass ArvadosFileReader(ArvadosFileReaderBase):\n    \"\"\"Wraps ArvadosFile in a file-like object supporting reading only.\n\n    Be aware that this class is NOT thread safe as there is no locking around\n    updating file pointer.\n\n    \"\"\"\n\n    def __init__(self, arvadosfile, mode=\"r\", num_retries=None):\n        super(ArvadosFileReader, self).__init__(arvadosfile.name, mode=mode, num_retries=num_retries)\n        self.arvadosfile = arvadosfile\n\n    def size(self):\n        return self.arvadosfile.size()\n\n    def stream_name(self):\n        return self.arvadosfile.parent.stream_name()\n\n    def readinto(self, b):\n        data = self.read(len(b))\n        b[:len(data)] = data\n        return len(data)\n\n    @_FileLikeObjectBase._before_close\n    @retry_method\n    def read(self, size=-1, num_retries=None, return_memoryview=False):\n        \"\"\"Read up to `size` bytes from the file and return the result.\n\n        Starts at the current file position.  If `size` is negative or None,\n        read the entire remainder of the file.\n\n        Returns None if the file pointer is at the end of the file.\n\n        Returns a `bytes` object, unless `return_memoryview` is True,\n        in which case it returns a memory view, which may avoid an\n        unnecessary data copy in some situations.\n\n        \"\"\"\n        if size < 0 or size is None:\n            data = []\n            #\n            # specify exact=False, return_memoryview=True here so that we\n            # only copy data once into the final buffer.\n            #\n            rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries, exact=False, return_memoryview=True)\n            while rd:\n                data.append(rd)\n                self._filepos += len(rd)\n                rd = self.arvadosfile.readfrom(self._filepos, config.KEEP_BLOCK_SIZE, num_retries, exact=False, return_memoryview=True)\n            return memoryview(b''.join(data)) if return_memoryview else b''.join(data)\n        else:\n            data = self.arvadosfile.readfrom(self._filepos, size, num_retries, exact=True, return_memoryview=return_memoryview)\n            self._filepos += len(data)\n            return data\n\n    @_FileLikeObjectBase._before_close\n    @retry_method\n    def readfrom(self, offset, size, num_retries=None, return_memoryview=False):\n        \"\"\"Read up to `size` bytes from the stream, starting at the specified file offset.\n\n        This method does not change the file position.\n\n        Returns a `bytes` object, unless `return_memoryview` is True,\n        in which case it returns a memory view, which may avoid an\n        unnecessary data copy in some situations.\n\n        \"\"\"\n        return self.arvadosfile.readfrom(offset, size, num_retries, exact=True, return_memoryview=return_memoryview)\n\n    def flush(self):\n        pass\n\n\nclass ArvadosFileWriter(ArvadosFileReader):\n    \"\"\"Wraps ArvadosFile in a file-like object supporting both reading and writing.\n\n    Be aware that this class is NOT thread safe as there is no locking around\n    updating file pointer.\n\n    \"\"\"\n\n    def __init__(self, arvadosfile, mode, num_retries=None):\n        super(ArvadosFileWriter, self).__init__(arvadosfile, mode=mode, num_retries=num_retries)\n        self.arvadosfile.add_writer(self)\n\n    def writable(self):\n        return True\n\n    @_FileLikeObjectBase._before_close\n    @retry_method\n    def write(self, data, num_retries=None):\n        if self.mode[0] == \"a\":\n            self._filepos = self.size()\n        self.arvadosfile.writeto(self._filepos, data, num_retries)\n        self._filepos += len(data)\n        return len(data)\n\n    @_FileLikeObjectBase._before_close\n    @retry_method\n    def writelines(self, seq, num_retries=None):\n        for s in seq:\n            self.write(s, num_retries=num_retries)\n\n    @_FileLikeObjectBase._before_close\n    def truncate(self, size=None):\n        if size is None:\n            size = self._filepos\n        self.arvadosfile.truncate(size)\n\n    @_FileLikeObjectBase._before_close\n    def flush(self):\n        self.arvadosfile.flush()\n\n    def close(self, flush=True):\n        if not self.closed:\n            self.arvadosfile.remove_writer(self, flush)\n            super(ArvadosFileWriter, self).close()\n\n\nclass WrappableFile(object):\n    \"\"\"An interface to an Arvados file that's compatible with io wrappers.\n\n    \"\"\"\n    def __init__(self, f):\n        self.f = f\n        self.closed = False\n    def close(self):\n        self.closed = True\n        return self.f.close()\n    def flush(self):\n        return self.f.flush()\n    def read(self, *args, **kwargs):\n        return self.f.read(*args, **kwargs)\n    def readable(self):\n        return self.f.readable()\n    def readinto(self, *args, **kwargs):\n        return self.f.readinto(*args, **kwargs)\n    def seek(self, *args, **kwargs):\n        return self.f.seek(*args, **kwargs)\n    def seekable(self):\n        return self.f.seekable()\n    def tell(self):\n        return self.f.tell()\n    def writable(self):\n        return self.f.writable()\n    def write(self, *args, **kwargs):\n        return self.f.write(*args, **kwargs)\n"
  },
  {
    "path": "sdk/python/arvados/cache.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"arvados.cache - Shim compatibility module\n\nThis module used to define `arvados.cache.SafeHTTPCache`. Now it only exists\nto provide backwards compatible imports. New code should prefer to import\n`arvados.api.ThreadSafeHTTPCache`.\n\n@private\n\"\"\"\n\nfrom .api import ThreadSafeHTTPCache as SafeHTTPCache\n"
  },
  {
    "path": "sdk/python/arvados/collection.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Tools to work with Arvados collections\n\nThis module provides high-level interfaces to create, read, and update\nArvados collections. Most users will want to instantiate `Collection`\nobjects, and use methods like `Collection.open` and `Collection.mkdirs` to\nread and write data in the collection. Refer to the Arvados Python SDK\ncookbook for [an introduction to using the Collection class][cookbook].\n\n[cookbook]: https://doc.arvados.org/sdk/python/cookbook.html#working-with-collections\n\"\"\"\n\nimport ciso8601\nimport datetime\nimport errno\nimport functools\nimport hashlib\nimport io\nimport logging\nimport os\nimport re\nimport sys\nimport threading\nimport time\n\nfrom collections import deque\nfrom stat import *\n\nfrom ._internal import streams\nfrom .api import ThreadSafeAPIClient\nfrom .arvfile import split, _FileLikeObjectBase, ArvadosFile, ArvadosFileWriter, ArvadosFileReader, WrappableFile, _BlockManager, synchronized, must_be_writable, NoopLock, ADD, DEL, MOD, TOK, WRITE\nfrom .keep import KeepLocator, KeepClient\nimport arvados.config as config\nimport arvados.errors as errors\nimport arvados.util\nimport arvados.events as events\nfrom arvados.retry import retry_method\n\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    IO,\n    Iterator,\n    List,\n    Mapping,\n    Optional,\n    Tuple,\n    Union,\n)\n\nif sys.version_info < (3, 8):\n    from typing_extensions import Literal\nelse:\n    from typing import Literal\n\n_logger = logging.getLogger('arvados.collection')\n\n\nFILE = \"file\"\n\"\"\"`create_type` value for `Collection.find_or_create`\"\"\"\nCOLLECTION = \"collection\"\n\"\"\"`create_type` value for `Collection.find_or_create`\"\"\"\n\nChangeList = List[Union[\n    Tuple[Literal[ADD, DEL], str, 'Collection'],\n    Tuple[Literal[MOD, TOK], str, 'Collection', 'Collection'],\n]]\nChangeType = Literal[ADD, DEL, MOD, TOK]\nCollectionItem = Union[ArvadosFile, 'Collection']\nChangeCallback = Callable[[ChangeType, 'Collection', str, CollectionItem], object]\nCreateType = Literal[COLLECTION, FILE]\nProperties = Dict[str, Any]\nStorageClasses = List[str]\n\nclass CollectionBase(object):\n    \"\"\"Abstract base class for Collection classes\n\n    .. ATTENTION:: Internal\n       This class is meant to be used by other parts of the SDK. User code\n       should instantiate or subclass `Collection` or one of its subclasses\n       directly.\n    \"\"\"\n\n    def __enter__(self):\n        \"\"\"Enter a context block with this collection instance\"\"\"\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        \"\"\"Exit a context block with this collection instance\"\"\"\n        pass\n\n    def _my_keep(self):\n        if self._keep_client is None:\n            self._keep_client = KeepClient(api_client=self._api_client,\n                                           num_retries=self.num_retries)\n        return self._keep_client\n\n    def stripped_manifest(self) -> str:\n        \"\"\"Create a copy of the collection manifest with only size hints\n\n        This method returns a string with the current collection's manifest\n        text with all non-portable locator hints like permission hints and\n        remote cluster hints removed. The only hints in the returned manifest\n        will be size hints.\n        \"\"\"\n        raw = self.manifest_text()\n        clean = []\n        for line in raw.split(\"\\n\"):\n            fields = line.split()\n            if fields:\n                clean_fields = fields[:1] + [\n                    (re.sub(r'\\+[^\\d][^\\+]*', '', x)\n                     if re.match(arvados.util.keep_locator_pattern, x)\n                     else x)\n                    for x in fields[1:]]\n                clean += [' '.join(clean_fields), \"\\n\"]\n        return ''.join(clean)\n\n\nclass _WriterFile(_FileLikeObjectBase):\n    def __init__(self, coll_writer, name):\n        super(_WriterFile, self).__init__(name, 'wb')\n        self.dest = coll_writer\n\n    def close(self):\n        super(_WriterFile, self).close()\n        self.dest.finish_current_file()\n\n    @_FileLikeObjectBase._before_close\n    def write(self, data):\n        self.dest.write(data)\n\n    @_FileLikeObjectBase._before_close\n    def writelines(self, seq):\n        for data in seq:\n            self.write(data)\n\n    @_FileLikeObjectBase._before_close\n    def flush(self):\n        self.dest.flush_data()\n\n\nclass RichCollectionBase(CollectionBase):\n    \"\"\"Base class for Collection classes\n\n    .. ATTENTION:: Internal\n       This class is meant to be used by other parts of the SDK. User code\n       should instantiate or subclass `Collection` or one of its subclasses\n       directly.\n    \"\"\"\n\n    def __init__(self, parent=None):\n        self.parent = parent\n        self._committed = False\n        self._has_remote_blocks = False\n        self._callback = None\n        self._items = {}\n\n    def _my_api(self):\n        raise NotImplementedError()\n\n    def _my_keep(self):\n        raise NotImplementedError()\n\n    def _my_block_manager(self):\n        raise NotImplementedError()\n\n    def writable(self) -> bool:\n        \"\"\"Indicate whether this collection object can be modified\n\n        This method returns `False` if this object is a `CollectionReader`,\n        else `True`.\n        \"\"\"\n        raise NotImplementedError()\n\n    def root_collection(self) -> 'Collection':\n        \"\"\"Get this collection's root collection object\n\n        If you open a subcollection with `Collection.find`, calling this method\n        on that subcollection returns the source Collection object.\n        \"\"\"\n        raise NotImplementedError()\n\n    def stream_name(self) -> str:\n        \"\"\"Get the name of the manifest stream represented by this collection\n\n        If you open a subcollection with `Collection.find`, calling this method\n        on that subcollection returns the name of the stream you opened.\n        \"\"\"\n        raise NotImplementedError()\n\n    @synchronized\n    def has_remote_blocks(self) -> bool:\n        \"\"\"Indiciate whether the collection refers to remote data\n\n        Returns `True` if the collection manifest includes any Keep locators\n        with a remote hint (`+R`), else `False`.\n        \"\"\"\n        if self._has_remote_blocks:\n            return True\n        for item in self:\n            if self[item].has_remote_blocks():\n                return True\n        return False\n\n    @synchronized\n    def set_has_remote_blocks(self, val: bool) -> None:\n        \"\"\"Cache whether this collection refers to remote blocks\n\n        .. ATTENTION:: Internal\n           This method is only meant to be used by other Collection methods.\n\n        Set this collection's cached \"has remote blocks\" flag to the given\n        value.\n        \"\"\"\n        self._has_remote_blocks = val\n        if self.parent:\n            self.parent.set_has_remote_blocks(val)\n\n    @must_be_writable\n    @synchronized\n    def find_or_create(\n            self,\n            path: str,\n            create_type: CreateType,\n    ) -> CollectionItem:\n        \"\"\"Get the item at the given path, creating it if necessary\n\n        If `path` refers to a stream in this collection, returns a\n        corresponding `Subcollection` object. If `path` refers to a file in\n        this collection, returns a corresponding\n        `arvados.arvfile.ArvadosFile` object. If `path` does not exist in\n        this collection, then this method creates a new object and returns\n        it, creating parent streams as needed. The type of object created is\n        determined by the value of `create_type`.\n\n        Arguments:\n\n        * path: str --- The path to find or create within this collection.\n\n        * create_type: Literal[COLLECTION, FILE] --- The type of object to\n          create at `path` if one does not exist. Passing `COLLECTION`\n          creates a stream and returns the corresponding\n          `Subcollection`. Passing `FILE` creates a new file and returns the\n          corresponding `arvados.arvfile.ArvadosFile`.\n        \"\"\"\n        pathcomponents = path.split(\"/\", 1)\n        if pathcomponents[0]:\n            item = self._items.get(pathcomponents[0])\n            if len(pathcomponents) == 1:\n                if item is None:\n                    # create new file\n                    if create_type == COLLECTION:\n                        item = Subcollection(self, pathcomponents[0])\n                    else:\n                        item = ArvadosFile(self, pathcomponents[0])\n                    self._items[pathcomponents[0]] = item\n                    self.set_committed(False)\n                    self.notify(ADD, self, pathcomponents[0], item)\n                return item\n            else:\n                if item is None:\n                    # create new collection\n                    item = Subcollection(self, pathcomponents[0])\n                    self._items[pathcomponents[0]] = item\n                    self.set_committed(False)\n                    self.notify(ADD, self, pathcomponents[0], item)\n                if isinstance(item, RichCollectionBase):\n                    return item.find_or_create(pathcomponents[1], create_type)\n                else:\n                    raise IOError(errno.ENOTDIR, \"Not a directory\", pathcomponents[0])\n        else:\n            return self\n\n    @synchronized\n    def find(self, path: str) -> CollectionItem:\n        \"\"\"Get the item at the given path\n\n        If `path` refers to a stream in this collection, returns a\n        corresponding `Subcollection` object. If `path` refers to a file in\n        this collection, returns a corresponding\n        `arvados.arvfile.ArvadosFile` object. If `path` does not exist in\n        this collection, then this method raises `NotADirectoryError`.\n\n        Arguments:\n\n        * path: str --- The path to find or create within this collection.\n        \"\"\"\n        if not path:\n            raise errors.ArgumentError(\"Parameter 'path' is empty.\")\n\n        pathcomponents = path.split(\"/\", 1)\n        if pathcomponents[0] == '':\n            raise IOError(errno.ENOTDIR, \"Not a directory\", pathcomponents[0])\n\n        item = self._items.get(pathcomponents[0])\n        if item is None:\n            return None\n        elif len(pathcomponents) == 1:\n            return item\n        else:\n            if isinstance(item, RichCollectionBase):\n                if pathcomponents[1]:\n                    return item.find(pathcomponents[1])\n                else:\n                    return item\n            else:\n                raise IOError(errno.ENOTDIR, \"Not a directory\", pathcomponents[0])\n\n    @synchronized\n    def mkdirs(self, path: str) -> 'Subcollection':\n        \"\"\"Create and return a subcollection at `path`\n\n        If `path` exists within this collection, raises `FileExistsError`.\n        Otherwise, creates a stream at that path and returns the\n        corresponding `Subcollection`.\n        \"\"\"\n        if self.find(path) != None:\n            raise IOError(errno.EEXIST, \"Directory or file exists\", path)\n\n        return self.find_or_create(path, COLLECTION)\n\n    def open(\n            self,\n            path: str,\n            mode: str=\"r\",\n            encoding: Optional[str]=None\n    ) -> IO:\n        \"\"\"Open a file-like object within the collection\n\n        This method returns a file-like object that can read and/or write the\n        file located at `path` within the collection. If you attempt to write\n        a `path` that does not exist, the file is created with `find_or_create`.\n        If the file cannot be opened for any other reason, this method raises\n        `OSError` with an appropriate errno.\n\n        Arguments:\n\n        * path: str --- The path of the file to open within this collection\n\n        * mode: str --- The mode to open this file. Supports all the same\n          values as `builtins.open`.\n\n        * encoding: str | None --- The text encoding of the file. Only used\n          when the file is opened in text mode. The default is\n          platform-dependent.\n\n        \"\"\"\n        if not re.search(r'^[rwa][bt]?\\+?$', mode):\n            raise errors.ArgumentError(\"Invalid mode {!r}\".format(mode))\n\n        if mode[0] == 'r' and '+' not in mode:\n            fclass = ArvadosFileReader\n            arvfile = self.find(path)\n        elif not self.writable():\n            raise IOError(errno.EROFS, \"Collection is read only\")\n        else:\n            fclass = ArvadosFileWriter\n            arvfile = self.find_or_create(path, FILE)\n\n        if arvfile is None:\n            raise IOError(errno.ENOENT, \"File not found\", path)\n        if not isinstance(arvfile, ArvadosFile):\n            raise IOError(errno.EISDIR, \"Is a directory\", path)\n\n        if mode[0] == 'w':\n            arvfile.truncate(0)\n\n        binmode = mode[0] + 'b' + re.sub('[bt]', '', mode[1:])\n        f = fclass(arvfile, mode=binmode, num_retries=self.num_retries)\n        if 'b' not in mode:\n            bufferclass = io.BufferedRandom if f.writable() else io.BufferedReader\n            f = io.TextIOWrapper(bufferclass(WrappableFile(f)), encoding=encoding)\n        return f\n\n    def modified(self) -> bool:\n        \"\"\"Indicate whether this collection has an API server record\n\n        Returns `False` if this collection corresponds to a record loaded from\n        the API server, `True` otherwise.\n        \"\"\"\n        return not self.committed()\n\n    @synchronized\n    def committed(self):\n        \"\"\"Indicate whether this collection has an API server record\n\n        Returns `True` if this collection corresponds to a record loaded from\n        the API server, `False` otherwise.\n        \"\"\"\n        return self._committed\n\n    @synchronized\n    def set_committed(self, value: bool=True):\n        \"\"\"Cache whether this collection has an API server record\n\n        .. ATTENTION:: Internal\n           This method is only meant to be used by other Collection methods.\n\n        Set this collection's cached \"committed\" flag to the given\n        value and propagates it as needed.\n        \"\"\"\n        if value == self._committed:\n            return\n        if value:\n            for k,v in self._items.items():\n                v.set_committed(True)\n            self._committed = True\n        else:\n            self._committed = False\n            if self.parent is not None:\n                self.parent.set_committed(False)\n\n    @synchronized\n    def __iter__(self) -> Iterator[str]:\n        \"\"\"Iterate names of streams and files in this collection\n\n        This method does not recurse. It only iterates the contents of this\n        collection's corresponding stream.\n        \"\"\"\n        return iter(self._items)\n\n    @synchronized\n    def __getitem__(self, k: str) -> CollectionItem:\n        \"\"\"Get a `arvados.arvfile.ArvadosFile` or `Subcollection` in this collection\n\n        This method does not recurse. If you want to search a path, use\n        `RichCollectionBase.find` instead.\n        \"\"\"\n        return self._items[k]\n\n    @synchronized\n    def __contains__(self, k: str) -> bool:\n        \"\"\"Indicate whether this collection has an item with this name\n\n        This method does not recurse. It you want to check a path, use\n        `RichCollectionBase.exists` instead.\n        \"\"\"\n        return k in self._items\n\n    @synchronized\n    def __len__(self):\n        \"\"\"Get the number of items directly contained in this collection\n\n        This method does not recurse. It only counts the streams and files\n        in this collection's corresponding stream.\n        \"\"\"\n        return len(self._items)\n\n    @must_be_writable\n    @synchronized\n    def __delitem__(self, p: str) -> None:\n        \"\"\"Delete an item from this collection's stream\n\n        This method does not recurse. If you want to remove an item by a\n        path, use `RichCollectionBase.remove` instead.\n        \"\"\"\n        del self._items[p]\n        self.set_committed(False)\n        self.notify(DEL, self, p, None)\n\n    @synchronized\n    def keys(self) -> Iterator[str]:\n        \"\"\"Iterate names of streams and files in this collection\n\n        This method does not recurse. It only iterates the contents of this\n        collection's corresponding stream.\n        \"\"\"\n        return self._items.keys()\n\n    @synchronized\n    def values(self) -> List[CollectionItem]:\n        \"\"\"Get a list of objects in this collection's stream\n\n        The return value includes a `Subcollection` for every stream, and an\n        `arvados.arvfile.ArvadosFile` for every file, directly within this\n        collection's stream.  This method does not recurse.\n        \"\"\"\n        return list(self._items.values())\n\n    @synchronized\n    def items(self) -> List[Tuple[str, CollectionItem]]:\n        \"\"\"Get a list of `(name, object)` tuples from this collection's stream\n\n        The return value includes a `Subcollection` for every stream, and an\n        `arvados.arvfile.ArvadosFile` for every file, directly within this\n        collection's stream.  This method does not recurse.\n        \"\"\"\n        return list(self._items.items())\n\n    def exists(self, path: str) -> bool:\n        \"\"\"Indicate whether this collection includes an item at `path`\n\n        This method returns `True` if `path` refers to a stream or file within\n        this collection, else `False`.\n\n        Arguments:\n\n        * path: str --- The path to check for existence within this collection\n        \"\"\"\n        return self.find(path) is not None\n\n    @must_be_writable\n    @synchronized\n    def remove(self, path: str, recursive: bool=False) -> None:\n        \"\"\"Remove the file or stream at `path`\n\n        Arguments:\n\n        * path: str --- The path of the item to remove from the collection\n\n        * recursive: bool --- Controls the method's behavior if `path` refers\n          to a nonempty stream. If `False` (the default), this method raises\n          `OSError` with errno `ENOTEMPTY`. If `True`, this method removes all\n          items under the stream.\n        \"\"\"\n        if not path:\n            raise errors.ArgumentError(\"Parameter 'path' is empty.\")\n\n        pathcomponents = path.split(\"/\", 1)\n        item = self._items.get(pathcomponents[0])\n        if item is None:\n            raise IOError(errno.ENOENT, \"File not found\", path)\n        if len(pathcomponents) == 1:\n            if isinstance(self._items[pathcomponents[0]], RichCollectionBase) and len(self._items[pathcomponents[0]]) > 0 and not recursive:\n                raise IOError(errno.ENOTEMPTY, \"Directory not empty\", path)\n            deleteditem = self._items[pathcomponents[0]]\n            del self._items[pathcomponents[0]]\n            self.set_committed(False)\n            self.notify(DEL, self, pathcomponents[0], deleteditem)\n        else:\n            item.remove(pathcomponents[1], recursive=recursive)\n\n    def _clonefrom(self, source):\n        for k,v in source.items():\n            self._items[k] = v.clone(self, k)\n\n    def clone(self):\n        raise NotImplementedError()\n\n    @must_be_writable\n    @synchronized\n    def add(\n            self,\n            source_obj: CollectionItem,\n            target_name: str,\n            overwrite: bool=False,\n            reparent: bool=False,\n    ) -> None:\n        \"\"\"Copy or move a file or subcollection object to this collection\n\n        Arguments:\n\n        * source_obj: arvados.arvfile.ArvadosFile | Subcollection --- The file or subcollection\n          to add to this collection\n\n        * target_name: str --- The path inside this collection where\n          `source_obj` should be added.\n\n        * overwrite: bool --- Controls the behavior of this method when the\n          collection already contains an object at `target_name`. If `False`\n          (the default), this method will raise `FileExistsError`. If `True`,\n          the object at `target_name` will be replaced with `source_obj`.\n\n        * reparent: bool --- Controls whether this method copies or moves\n          `source_obj`. If `False` (the default), `source_obj` is copied into\n          this collection. If `True`, `source_obj` is moved into this\n          collection.\n        \"\"\"\n        if target_name in self and not overwrite:\n            raise IOError(errno.EEXIST, \"File already exists\", target_name)\n\n        modified_from = None\n        if target_name in self:\n            modified_from = self[target_name]\n\n        # Actually make the move or copy.\n        if reparent:\n            source_obj._reparent(self, target_name)\n            item = source_obj\n        else:\n            item = source_obj.clone(self, target_name)\n\n        self._items[target_name] = item\n        self.set_committed(False)\n        if not self._has_remote_blocks and source_obj.has_remote_blocks():\n            self.set_has_remote_blocks(True)\n\n        if modified_from:\n            self.notify(MOD, self, target_name, (modified_from, item))\n        else:\n            self.notify(ADD, self, target_name, item)\n\n    def _get_src_target(self, source, target_path, source_collection, create_dest):\n        if source_collection is None:\n            source_collection = self\n\n        # Find the object\n        if isinstance(source, str):\n            source_obj = source_collection.find(source)\n            if source_obj is None:\n                raise IOError(errno.ENOENT, \"File not found\", source)\n            sourcecomponents = source.split(\"/\")\n        else:\n            source_obj = source\n            sourcecomponents = None\n\n        # Find parent collection the target path\n        targetcomponents = target_path.split(\"/\")\n\n        # Determine the name to use.\n        target_name = targetcomponents[-1] if targetcomponents[-1] else sourcecomponents[-1]\n\n        if not target_name:\n            raise errors.ArgumentError(\"Target path is empty and source is an object.  Cannot determine destination filename to use.\")\n\n        if create_dest:\n            target_dir = self.find_or_create(\"/\".join(targetcomponents[0:-1]), COLLECTION)\n        else:\n            if len(targetcomponents) > 1:\n                target_dir = self.find(\"/\".join(targetcomponents[0:-1]))\n            else:\n                target_dir = self\n\n        if target_dir is None:\n            raise IOError(errno.ENOENT, \"Target directory not found\", target_name)\n\n        if target_name in target_dir and isinstance(target_dir[target_name], RichCollectionBase) and sourcecomponents:\n            target_dir = target_dir[target_name]\n            target_name = sourcecomponents[-1]\n\n        return (source_obj, target_dir, target_name)\n\n    @must_be_writable\n    @synchronized\n    def copy(\n            self,\n            source: Union[str, CollectionItem],\n            target_path: str,\n            source_collection: Optional['RichCollectionBase']=None,\n            overwrite: bool=False,\n    ) -> None:\n        \"\"\"Copy a file or subcollection object to this collection\n\n        Arguments:\n\n        * source: str | arvados.arvfile.ArvadosFile |\n          arvados.collection.Subcollection --- The file or subcollection to\n          add to this collection. If `source` is a str, the object will be\n          found by looking up this path from `source_collection` (see\n          below).\n\n        * target_path: str --- The path inside this collection where the\n          source object should be added.\n\n        * source_collection: arvados.collection.Collection | None --- The\n          collection to find the source object from when `source` is a\n          path. Defaults to the current collection (`self`).\n\n        * overwrite: bool --- Controls the behavior of this method when the\n          collection already contains an object at `target_path`. If `False`\n          (the default), this method will raise `FileExistsError`. If `True`,\n          the object at `target_path` will be replaced with `source_obj`.\n        \"\"\"\n        source_obj, target_dir, target_name = self._get_src_target(source, target_path, source_collection, True)\n        target_dir.add(source_obj, target_name, overwrite, False)\n\n    @must_be_writable\n    @synchronized\n    def rename(\n            self,\n            source: Union[str, CollectionItem],\n            target_path: str,\n            source_collection: Optional['RichCollectionBase']=None,\n            overwrite: bool=False,\n    ) -> None:\n        \"\"\"Move a file or subcollection object to this collection\n\n        Arguments:\n\n        * source: str | arvados.arvfile.ArvadosFile |\n          arvados.collection.Subcollection --- The file or subcollection to\n          add to this collection. If `source` is a str, the object will be\n          found by looking up this path from `source_collection` (see\n          below).\n\n        * target_path: str --- The path inside this collection where the\n          source object should be added.\n\n        * source_collection: arvados.collection.Collection | None --- The\n          collection to find the source object from when `source` is a\n          path. Defaults to the current collection (`self`).\n\n        * overwrite: bool --- Controls the behavior of this method when the\n          collection already contains an object at `target_path`. If `False`\n          (the default), this method will raise `FileExistsError`. If `True`,\n          the object at `target_path` will be replaced with `source_obj`.\n        \"\"\"\n        source_obj, target_dir, target_name = self._get_src_target(source, target_path, source_collection, False)\n        if not source_obj.writable():\n            raise IOError(errno.EROFS, \"Source collection is read only\", source)\n        target_dir.add(source_obj, target_name, overwrite, True)\n\n    def portable_manifest_text(self, stream_name: str=\".\") -> str:\n        \"\"\"Get the portable manifest text for this collection\n\n        The portable manifest text is normalized, and does not include access\n        tokens. This method does not flush outstanding blocks to Keep.\n\n        Arguments:\n\n        * stream_name: str --- The name to use for this collection's stream in\n          the generated manifest. Default `'.'`.\n        \"\"\"\n        return self._get_manifest_text(stream_name, True, True)\n\n    @synchronized\n    def manifest_text(\n            self,\n            stream_name: str=\".\",\n            strip: bool=False,\n            normalize: bool=False,\n            only_committed: bool=False,\n    ) -> str:\n        \"\"\"Get the manifest text for this collection\n\n        Arguments:\n\n        * stream_name: str --- The name to use for this collection's stream in\n          the generated manifest. Default `'.'`.\n\n        * strip: bool --- Controls whether or not the returned manifest text\n          includes access tokens. If `False` (the default), the manifest text\n          will include access tokens. If `True`, the manifest text will not\n          include access tokens.\n\n        * normalize: bool --- Controls whether or not the returned manifest\n          text is normalized. Default `False`.\n\n        * only_committed: bool --- Controls whether or not this method uploads\n          pending data to Keep before building and returning the manifest text.\n          If `False` (the default), this method will finish uploading all data\n          to Keep, then return the final manifest. If `True`, this method will\n          build and return a manifest that only refers to the data that has\n          finished uploading at the time this method was called.\n        \"\"\"\n        if not only_committed:\n            self._my_block_manager().commit_all()\n        return self._get_manifest_text(stream_name, strip, normalize,\n                                       only_committed=only_committed)\n\n    @synchronized\n    def _get_manifest_text(self, stream_name, strip, normalize, only_committed=False):\n        \"\"\"Get the manifest text for this collection, sub collections and files.\n\n        :stream_name:\n          Name to use for this stream (directory)\n\n        :strip:\n          If True, remove signing tokens from block locators if present.\n          If False (default), block locators are left unchanged.\n\n        :normalize:\n          If True, always export the manifest text in normalized form\n          even if the Collection is not modified.  If False (default) and the collection\n          is not modified, return the original manifest text even if it is not\n          in normalized form.\n\n        :only_committed:\n          If True, only include blocks that were already committed to Keep.\n\n        \"\"\"\n\n        if not self.committed() or self._manifest_text is None or normalize:\n            stream = {}\n            buf = []\n            sorted_keys = sorted(self.keys())\n            for filename in [s for s in sorted_keys if isinstance(self[s], ArvadosFile)]:\n                # Create a stream per file `k`\n                arvfile = self[filename]\n                filestream = []\n                for segment in arvfile.segments():\n                    loc = segment.locator\n                    if arvfile.parent._my_block_manager().is_bufferblock(loc):\n                        if only_committed:\n                            continue\n                        loc = arvfile.parent._my_block_manager().get_bufferblock(loc).locator()\n                    if strip:\n                        loc = KeepLocator(loc).stripped()\n                    filestream.append(streams.LocatorAndRange(\n                        loc,\n                        KeepLocator(loc).size,\n                        segment.segment_offset,\n                        segment.range_size,\n                    ))\n                stream[filename] = filestream\n            if stream:\n                buf.append(\" \".join(streams.normalize_stream(stream_name, stream)) + \"\\n\")\n            for dirname in [s for s in sorted_keys if isinstance(self[s], RichCollectionBase)]:\n                buf.append(self[dirname].manifest_text(\n                    stream_name=os.path.join(stream_name, dirname),\n                    strip=strip, normalize=True, only_committed=only_committed))\n            return \"\".join(buf)\n        else:\n            if strip:\n                return self.stripped_manifest()\n            else:\n                return self._manifest_text\n\n    @synchronized\n    def _copy_remote_blocks(self, remote_blocks={}):\n        \"\"\"Scan through the entire collection and ask Keep to copy remote blocks.\n\n        When accessing a remote collection, blocks will have a remote signature\n        (+R instead of +A). Collect these signatures and request Keep to copy the\n        blocks to the local cluster, returning local (+A) signatures.\n\n        :remote_blocks:\n          Shared cache of remote to local block mappings. This is used to avoid\n          doing extra work when blocks are shared by more than one file in\n          different subdirectories.\n\n        \"\"\"\n        for item in self:\n            remote_blocks = self[item]._copy_remote_blocks(remote_blocks)\n        return remote_blocks\n\n    @synchronized\n    def diff(\n            self,\n            end_collection: 'RichCollectionBase',\n            prefix: str=\".\",\n            holding_collection: Optional['Collection']=None,\n    ) -> ChangeList:\n        \"\"\"Build a list of differences between this collection and another\n\n        Arguments:\n\n        * end_collection: arvados.collection.RichCollectionBase --- A\n          collection object with the desired end state. The returned diff\n          list will describe how to go from the current collection object\n          `self` to `end_collection`.\n\n        * prefix: str --- The name to use for this collection's stream in\n          the diff list. Default `'.'`.\n\n        * holding_collection: arvados.collection.Collection | None --- A\n          collection object used to hold objects for the returned diff\n          list. By default, a new empty collection is created.\n        \"\"\"\n        changes = []\n        if holding_collection is None:\n            holding_collection = Collection(api_client=self._my_api(), keep_client=self._my_keep())\n        for k in self:\n            if k not in end_collection:\n               changes.append((DEL, os.path.join(prefix, k), self[k].clone(holding_collection, \"\")))\n        for k in end_collection:\n            if k in self:\n                if isinstance(end_collection[k], Subcollection) and isinstance(self[k], Subcollection):\n                    changes.extend(self[k].diff(end_collection[k], os.path.join(prefix, k), holding_collection))\n                elif end_collection[k] != self[k]:\n                    changes.append((MOD, os.path.join(prefix, k), self[k].clone(holding_collection, \"\"), end_collection[k].clone(holding_collection, \"\")))\n                else:\n                    changes.append((TOK, os.path.join(prefix, k), self[k].clone(holding_collection, \"\"), end_collection[k].clone(holding_collection, \"\")))\n            else:\n                changes.append((ADD, os.path.join(prefix, k), end_collection[k].clone(holding_collection, \"\")))\n        return changes\n\n    @must_be_writable\n    @synchronized\n    def apply(self, changes: ChangeList) -> None:\n        \"\"\"Apply a list of changes from to this collection\n\n        This method takes a list of changes generated by\n        `RichCollectionBase.diff` and applies it to this\n        collection. Afterward, the state of this collection object will\n        match the state of `end_collection` passed to `diff`. If a change\n        conflicts with a local change, it will be saved to an alternate path\n        indicating the conflict.\n\n        Arguments:\n\n        * changes: arvados.collection.ChangeList --- The list of differences\n          generated by `RichCollectionBase.diff`.\n        \"\"\"\n        if changes:\n            self.set_committed(False)\n        for change in changes:\n            event_type = change[0]\n            path = change[1]\n            initial = change[2]\n            local = self.find(path)\n            conflictpath = \"%s~%s~conflict~\" % (path, time.strftime(\"%Y%m%d-%H%M%S\",\n                                                                    time.gmtime()))\n            if event_type == ADD:\n                if local is None:\n                    # No local file at path, safe to copy over new file\n                    self.copy(initial, path)\n                elif local is not None and local != initial:\n                    # There is already local file and it is different:\n                    # save change to conflict file.\n                    self.copy(initial, conflictpath)\n            elif event_type == MOD or event_type == TOK:\n                final = change[3]\n                if local == initial:\n                    # Local matches the \"initial\" item so it has not\n                    # changed locally and is safe to update.\n                    if isinstance(local, ArvadosFile) and isinstance(final, ArvadosFile):\n                        # Replace contents of local file with new contents\n                        local.replace_contents(final)\n                    else:\n                        # Overwrite path with new item; this can happen if\n                        # path was a file and is now a collection or vice versa\n                        self.copy(final, path, overwrite=True)\n                elif event_type == MOD:\n                    # Local doesn't match the \"start\" value or local\n                    # is missing (presumably deleted) so save change\n                    # to conflict file.  Don't do this for TOK events\n                    # which means the file didn't change but only had\n                    # tokens updated.\n                    self.copy(final, conflictpath)\n            elif event_type == DEL:\n                if local == initial:\n                    # Local item matches \"initial\" value, so it is safe to remove.\n                    self.remove(path, recursive=True)\n                # else, the file is modified or already removed, in either\n                # case we don't want to try to remove it.\n\n    def portable_data_hash(self) -> str:\n        \"\"\"Get the portable data hash for this collection's manifest\"\"\"\n        if self._manifest_locator and self.committed():\n            # If the collection is already saved on the API server, and it's committed\n            # then return API server's PDH response.\n            return self._portable_data_hash\n        else:\n            stripped = self.portable_manifest_text().encode()\n            return '{}+{}'.format(hashlib.md5(stripped).hexdigest(), len(stripped))\n\n    @synchronized\n    def subscribe(self, callback: ChangeCallback) -> None:\n        \"\"\"Set a notify callback for changes to this collection\n\n        Arguments:\n\n        * callback: arvados.collection.ChangeCallback --- The callable to\n          call each time the collection is changed.\n        \"\"\"\n        if self._callback is None:\n            self._callback = callback\n        else:\n            raise errors.ArgumentError(\"A callback is already set on this collection.\")\n\n    @synchronized\n    def unsubscribe(self) -> None:\n        \"\"\"Remove any notify callback set for changes to this collection\"\"\"\n        if self._callback is not None:\n            self._callback = None\n\n    @synchronized\n    def notify(\n            self,\n            event: ChangeType,\n            collection: 'RichCollectionBase',\n            name: str,\n            item: CollectionItem,\n    ) -> None:\n        \"\"\"Notify any subscribed callback about a change to this collection\n\n        .. ATTENTION:: Internal\n           This method is only meant to be used by other Collection methods.\n\n        If a callback has been registered with `RichCollectionBase.subscribe`,\n        it will be called with information about a change to this collection.\n        Then this notification will be propagated to this collection's root.\n\n        Arguments:\n\n        * event: Literal[ADD, DEL, MOD, TOK] --- The type of modification to\n          the collection.\n\n        * collection: arvados.collection.RichCollectionBase --- The\n          collection that was modified.\n\n        * name: str --- The name of the file or stream within `collection` that\n          was modified.\n\n        * item: arvados.arvfile.ArvadosFile |\n          arvados.collection.Subcollection --- For ADD events, the new\n          contents at `name` within `collection`; for DEL events, the\n          item that was removed.  For MOD and TOK events, a 2-tuple of\n          the previous item and the new item (may be the same object\n          or different, depending on whether the action involved it\n          being modified in place or replaced).\n\n        \"\"\"\n        if self._callback:\n            self._callback(event, collection, name, item)\n        self.root_collection().notify(event, collection, name, item)\n\n    @synchronized\n    def __eq__(self, other: Any) -> bool:\n        \"\"\"Indicate whether this collection object is equal to another\"\"\"\n        if other is self:\n            return True\n        if not isinstance(other, RichCollectionBase):\n            return False\n        if len(self._items) != len(other):\n            return False\n        for k in self._items:\n            if k not in other:\n                return False\n            if self._items[k] != other[k]:\n                return False\n        return True\n\n    def __ne__(self, other: Any) -> bool:\n        \"\"\"Indicate whether this collection object is not equal to another\"\"\"\n        return not self.__eq__(other)\n\n    @synchronized\n    def flush(self) -> None:\n        \"\"\"Upload any pending data to Keep\"\"\"\n        for e in self.values():\n            e.flush()\n\n\nclass Collection(RichCollectionBase):\n    \"\"\"Read and manipulate an Arvados collection\n\n    This class provides a high-level interface to create, read, and update\n    Arvados collections and their contents. Refer to the Arvados Python SDK\n    cookbook for [an introduction to using the Collection class][cookbook].\n\n    [cookbook]: https://doc.arvados.org/sdk/python/cookbook.html#working-with-collections\n    \"\"\"\n\n    def __init__(self, manifest_locator_or_text: Optional[str]=None,\n                 api_client: Optional['arvados.api_resources.ArvadosAPIClient']=None,\n                 keep_client: Optional['arvados.keep.KeepClient']=None,\n                 num_retries: int=10,\n                 parent: Optional['Collection']=None,\n                 apiconfig: Optional[Mapping[str, str]]=None,\n                 block_manager: Optional['arvados.arvfile._BlockManager']=None,\n                 replication_desired: Optional[int]=None,\n                 storage_classes_desired: Optional[List[str]]=None,\n                 put_threads: Optional[int]=None):\n        \"\"\"Initialize a Collection object\n\n        Arguments:\n\n        * manifest_locator_or_text: str | None --- This string can contain a\n          collection manifest text, portable data hash, or UUID. When given a\n          portable data hash or UUID, this instance will load a collection\n          record from the API server. Otherwise, this instance will represent a\n          new collection without an API server record. The default value `None`\n          instantiates a new collection with an empty manifest.\n\n        * api_client: arvados.api_resources.ArvadosAPIClient | None --- The\n          Arvados API client object this instance uses to make requests. If\n          none is given, this instance creates its own client using the\n          settings from `apiconfig` (see below). If your client instantiates\n          many Collection objects, you can help limit memory utilization by\n          calling `arvados.api.api` to construct an\n          `arvados.api.ThreadSafeAPIClient`, and use that as the `api_client`\n          for every Collection.\n\n        * keep_client: arvados.keep.KeepClient | None --- The Keep client\n          object this instance uses to make requests. If none is given, this\n          instance creates its own client using its `api_client`.\n\n        * num_retries: int --- The number of times that client requests are\n          retried. Default 10.\n\n        * parent: arvados.collection.Collection | None --- The parent Collection\n          object of this instance, if any. This argument is primarily used by\n          other Collection methods; user client code shouldn't need to use it.\n\n        * apiconfig: Mapping[str, str] | None --- A mapping with entries for\n          `ARVADOS_API_HOST`, `ARVADOS_API_TOKEN`, and optionally\n          `ARVADOS_API_HOST_INSECURE`. When no `api_client` is provided, the\n          Collection object constructs one from these settings. If no\n          mapping is provided, calls `arvados.config.settings` to get these\n          parameters from user configuration.\n\n        * block_manager: arvados.arvfile._BlockManager | None --- The\n          _BlockManager object used by this instance to coordinate reading\n          and writing Keep data blocks. If none is given, this instance\n          constructs its own. This argument is primarily used by other\n          Collection methods; user client code shouldn't need to use it.\n\n        * replication_desired: int | None --- This controls both the value of\n          the `replication_desired` field on API collection records saved by\n          this class, as well as the number of Keep services that the object\n          writes new data blocks to. If none is given, uses the default value\n          configured for the cluster.\n\n        * storage_classes_desired: list[str] | None --- This controls both\n          the value of the `storage_classes_desired` field on API collection\n          records saved by this class, as well as selecting which specific\n          Keep services the object writes new data blocks to. If none is\n          given, defaults to an empty list.\n\n        * put_threads: int | None --- The number of threads to run\n          simultaneously to upload data blocks to Keep. This value is used when\n          building a new `block_manager`. It is unused when a `block_manager`\n          is provided.\n        \"\"\"\n\n        if storage_classes_desired and type(storage_classes_desired) is not list:\n            raise errors.ArgumentError(\"storage_classes_desired must be list type.\")\n\n        super(Collection, self).__init__(parent)\n        self._api_client = api_client\n        self._keep_client = keep_client\n\n        # Use the keep client from ThreadSafeAPIClient\n        if self._keep_client is None and isinstance(self._api_client, ThreadSafeAPIClient):\n            self._keep_client = self._api_client.keep\n\n        self._block_manager = block_manager\n        self.replication_desired = replication_desired\n        self._storage_classes_desired = storage_classes_desired\n        self.put_threads = put_threads\n\n        if apiconfig:\n            self._config = apiconfig\n        else:\n            self._config = config.settings()\n\n        self.num_retries = num_retries\n        self._manifest_locator = None\n        self._manifest_text = None\n        self._portable_data_hash = None\n        self._api_response = None\n        self._token_refresh_timestamp = 0\n\n        self.lock = threading.RLock()\n        self.events = None\n\n        if manifest_locator_or_text:\n            if re.match(arvados.util.keep_locator_pattern, manifest_locator_or_text):\n                self._manifest_locator = manifest_locator_or_text\n            elif re.match(arvados.util.collection_uuid_pattern, manifest_locator_or_text):\n                self._manifest_locator = manifest_locator_or_text\n                if not self._has_local_collection_uuid():\n                    self._has_remote_blocks = True\n            elif re.match(arvados.util.manifest_pattern, manifest_locator_or_text):\n                self._manifest_text = manifest_locator_or_text\n                if '+R' in self._manifest_text:\n                    self._has_remote_blocks = True\n            else:\n                raise errors.ArgumentError(\n                    \"Argument to CollectionReader is not a manifest or a collection UUID\")\n\n            try:\n                self._populate()\n            except errors.SyntaxError as e:\n                raise errors.ArgumentError(\"Error processing manifest text: %s\", str(e)) from None\n\n    def storage_classes_desired(self) -> List[str]:\n        \"\"\"Get this collection's `storage_classes_desired` value\"\"\"\n        return self._storage_classes_desired or []\n\n    def root_collection(self) -> 'Collection':\n        return self\n\n    def get_properties(self) -> Properties:\n        \"\"\"Get this collection's properties\n\n        This method always returns a dict. If this collection object does not\n        have an associated API record, or that record does not have any\n        properties set, this method returns an empty dict.\n        \"\"\"\n        if self._api_response and self._api_response[\"properties\"]:\n            return self._api_response[\"properties\"]\n        else:\n            return {}\n\n    def get_trash_at(self) -> Optional[datetime.datetime]:\n        \"\"\"Get this collection's `trash_at` field\n\n        This method parses the `trash_at` field of the collection's API\n        record and returns a datetime from it. If that field is not set, or\n        this collection object does not have an associated API record,\n        returns None.\n        \"\"\"\n        if self._api_response and self._api_response[\"trash_at\"]:\n            try:\n                return ciso8601.parse_datetime(self._api_response[\"trash_at\"])\n            except ValueError:\n                return None\n        else:\n            return None\n\n    def stream_name(self) -> str:\n        return \".\"\n\n    def writable(self) -> bool:\n        return True\n\n    @synchronized\n    @retry_method\n    def update(\n            self,\n            other: Optional['Collection']=None,\n            num_retries: Optional[int]=None,\n    ) -> None:\n        \"\"\"Merge another collection's contents into this one\n\n        This method compares the manifest of this collection instance with\n        another, then updates this instance's manifest with changes from the\n        other, renaming files to flag conflicts where necessary.\n\n        When called without any arguments, this method reloads the collection's\n        API record, and updates this instance with any changes that have\n        appeared server-side. If this instance does not have a corresponding\n        API record, this method raises `arvados.errors.ArgumentError`.\n\n        Arguments:\n\n        * other: arvados.collection.Collection | None --- The collection\n          whose contents should be merged into this instance. When not\n          provided, this method reloads this collection's API record and\n          constructs a Collection object from it.  If this instance does not\n          have a corresponding API record, this method raises\n          `arvados.errors.ArgumentError`.\n\n        * num_retries: int | None --- The number of times to retry reloading\n          the collection's API record from the API server. If not specified,\n          uses the `num_retries` provided when this instance was constructed.\n        \"\"\"\n\n        token_refresh_period = 60*60\n        time_since_last_token_refresh = (time.time() - self._token_refresh_timestamp)\n        upstream_response = None\n\n        if other is None:\n            if self._manifest_locator is None:\n                raise errors.ArgumentError(\"`other` is None but collection does not have a manifest_locator uuid\")\n\n            if re.match(arvados.util.portable_data_hash_pattern, self._manifest_locator) and time_since_last_token_refresh < token_refresh_period:\n                return\n\n            upstream_response = self._my_api().collections().get(uuid=self._manifest_locator).execute(num_retries=num_retries)\n            other = CollectionReader(upstream_response[\"manifest_text\"], self._my_api())\n\n        if self.committed():\n            # 1st case, no local changes, content is the same\n            if self.portable_data_hash() == other.portable_data_hash() and time_since_last_token_refresh < token_refresh_period:\n                # No difference in content.  Remember the API record\n                # (metadata such as name or properties may have changed)\n                # but don't update the token refresh timestamp.\n                if upstream_response is not None:\n                    self._remember_api_response(upstream_response)\n                return\n\n            # 2nd case, no local changes, but either upstream changed\n            # or we want to refresh tokens.\n\n            self.apply(self.diff(other))\n            if upstream_response is not None:\n                self._remember_api_response(upstream_response)\n            self._update_token_timestamp()\n            self.set_committed(True)\n            return\n\n        # 3rd case, upstream changed, but we also have uncommitted\n        # changes that we want to incorporate so they don't get lost.\n\n        # _manifest_text stores the text from last time we received a\n        # record from the API server.  This is the state of the\n        # collection before our uncommitted changes.\n        baseline = Collection(self._manifest_text, self._my_api())\n\n        # Get the set of changes between our baseline and the other\n        # collection and apply them to self.\n        #\n        # If a file was modified in both 'self' and 'other', the\n        # 'apply' method keeps the contents of 'self' and creates a\n        # conflict file with the contents of 'other'.\n        self.apply(baseline.diff(other))\n\n        # Remember the new baseline, changes to a file\n        if upstream_response is not None:\n            self._remember_api_response(upstream_response)\n\n\n    @synchronized\n    def _my_api(self):\n        if self._api_client is None:\n            self._api_client = ThreadSafeAPIClient(self._config, version='v1')\n            if self._keep_client is None:\n                self._keep_client = self._api_client.keep\n        return self._api_client\n\n    @synchronized\n    def _my_keep(self):\n        if self._keep_client is None:\n            if self._api_client is None:\n                self._my_api()\n            else:\n                self._keep_client = KeepClient(api_client=self._api_client)\n        return self._keep_client\n\n    @synchronized\n    def _my_block_manager(self):\n        if self._block_manager is None:\n            copies = (self.replication_desired or\n                      self._my_api()._rootDesc.get('defaultCollectionReplication',\n                                                   2))\n            self._block_manager = _BlockManager(self._my_keep(),\n                                                copies=copies,\n                                                put_threads=self.put_threads,\n                                                num_retries=self.num_retries,\n                                                storage_classes_func=self.storage_classes_desired)\n        return self._block_manager\n\n    def _remember_api_response(self, response):\n        self._api_response = response\n        self._manifest_text = self._api_response['manifest_text']\n        self._portable_data_hash = self._api_response['portable_data_hash']\n\n    def _update_token_timestamp(self):\n        self._token_refresh_timestamp = time.time()\n\n    def _populate_from_api_server(self):\n        # As in KeepClient itself, we must wait until the last\n        # possible moment to instantiate an API client, in order to\n        # avoid tripping up clients that don't have access to an API\n        # server.  If we do build one, make sure our Keep client uses\n        # it.  If instantiation fails, we'll fall back to the except\n        # clause, just like any other Collection lookup\n        # failure. Return an exception, or None if successful.\n        self._remember_api_response(self._my_api().collections().get(\n            uuid=self._manifest_locator).execute(\n                num_retries=self.num_retries))\n\n        # If not overriden via kwargs, we should try to load the\n        # replication_desired and storage_classes_desired from the API server\n        if self.replication_desired is None:\n            self.replication_desired = self._api_response.get('replication_desired', None)\n        if self._storage_classes_desired is None:\n            self._storage_classes_desired = self._api_response.get('storage_classes_desired', None)\n\n    def _populate(self):\n        if self._manifest_text is None:\n            if self._manifest_locator is None:\n                return\n            else:\n                self._populate_from_api_server()\n        self._baseline_manifest = self._manifest_text\n        self._import_manifest(self._manifest_text)\n\n    def _has_collection_uuid(self):\n        return self._manifest_locator is not None and re.match(arvados.util.collection_uuid_pattern, self._manifest_locator)\n\n    def _has_local_collection_uuid(self):\n        return self._has_collection_uuid and \\\n            self._my_api()._rootDesc['uuidPrefix'] == self._manifest_locator.split('-')[0]\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        \"\"\"Exit a context with this collection instance\n\n        If no exception was raised inside the context block, and this\n        collection is writable and has a corresponding API record, that\n        record will be updated to match the state of this instance at the end\n        of the block.\n        \"\"\"\n        if exc_type is None:\n            if self.writable() and self._has_collection_uuid():\n                self.save()\n        self.stop_threads()\n\n    def stop_threads(self) -> None:\n        \"\"\"Stop background Keep upload/download threads\"\"\"\n        if self._block_manager is not None:\n            self._block_manager.stop_threads()\n\n    @synchronized\n    def manifest_locator(self) -> Optional[str]:\n        \"\"\"Get this collection's manifest locator, if any\n\n        * If this collection instance is associated with an API record with a\n          UUID, return that.\n        * Otherwise, if this collection instance was loaded from an API record\n          by portable data hash, return that.\n        * Otherwise, return `None`.\n        \"\"\"\n        return self._manifest_locator\n\n    @synchronized\n    def clone(\n            self,\n            new_parent: Optional['Collection']=None,\n            new_name: Optional[str]=None,\n            readonly: bool=False,\n            new_config: Optional[Mapping[str, str]]=None,\n    ) -> 'Collection':\n        \"\"\"Create a Collection object with the same contents as this instance\n\n        This method creates a new Collection object with contents that match\n        this instance's. The new collection will not be associated with any API\n        record.\n\n        Arguments:\n\n        * new_parent: arvados.collection.Collection | None --- This value is\n          passed to the new Collection's constructor as the `parent`\n          argument.\n\n        * new_name: str | None --- This value is unused.\n\n        * readonly: bool --- If this value is true, this method constructs and\n          returns a `CollectionReader`. Otherwise, it returns a mutable\n          `Collection`. Default `False`.\n\n        * new_config: Mapping[str, str] | None --- This value is passed to the\n          new Collection's constructor as `apiconfig`. If no value is provided,\n          defaults to the configuration passed to this instance's constructor.\n        \"\"\"\n        if new_config is None:\n            new_config = self._config\n        if readonly:\n            newcollection = CollectionReader(parent=new_parent, apiconfig=new_config)\n        else:\n            newcollection = Collection(parent=new_parent, apiconfig=new_config)\n\n        newcollection._clonefrom(self)\n        return newcollection\n\n    @synchronized\n    def api_response(self) -> Optional[Dict[str, Any]]:\n        \"\"\"Get this instance's associated API record\n\n        If this Collection instance has an associated API record, return it.\n        Otherwise, return `None`.\n        \"\"\"\n        return self._api_response\n\n    def find_or_create(\n            self,\n            path: str,\n            create_type: CreateType,\n    ) -> CollectionItem:\n        if path == \".\":\n            return self\n        else:\n            return super(Collection, self).find_or_create(path[2:] if path.startswith(\"./\") else path, create_type)\n\n    def find(self, path: str) -> CollectionItem:\n        if path == \".\":\n            return self\n        else:\n            return super(Collection, self).find(path[2:] if path.startswith(\"./\") else path)\n\n    def remove(self, path: str, recursive: bool=False) -> None:\n        if path == \".\":\n            raise errors.ArgumentError(\"Cannot remove '.'\")\n        else:\n            return super(Collection, self).remove(path[2:] if path.startswith(\"./\") else path, recursive)\n\n    @must_be_writable\n    @synchronized\n    @retry_method\n    def save(\n            self,\n            properties: Optional[Properties]=None,\n            storage_classes: Optional[StorageClasses]=None,\n            trash_at: Optional[datetime.datetime]=None,\n            merge: bool=True,\n            num_retries: Optional[int]=None,\n            preserve_version: bool=False,\n    ) -> str:\n        \"\"\"Save collection to an existing API record\n\n        This method updates the instance's corresponding API record to match\n        the instance's state. If this instance does not have a corresponding API\n        record yet, raises `AssertionError`. (To create a new API record, use\n        `Collection.save_new`.) This method returns the saved collection\n        manifest.\n\n        Arguments:\n\n        * properties: dict[str, Any] | None --- If provided, the API record will\n          be updated with these properties. Note this will completely replace\n          any existing properties.\n\n        * storage_classes: list[str] | None --- If provided, the API record will\n          be updated with this value in the `storage_classes_desired` field.\n          This value will also be saved on the instance and used for any\n          changes that follow.\n\n        * trash_at: datetime.datetime | None --- If provided, the API record\n          will be updated with this value in the `trash_at` field.\n\n        * merge: bool --- If `True` (the default), this method will first\n          reload this collection's API record, and merge any new contents into\n          this instance before saving changes. See `Collection.update` for\n          details.\n\n        * num_retries: int | None --- The number of times to retry reloading\n          the collection's API record from the API server. If not specified,\n          uses the `num_retries` provided when this instance was constructed.\n\n        * preserve_version: bool --- This value will be passed to directly\n          to the underlying API call. If `True`, the Arvados API will\n          preserve the versions of this collection both immediately before\n          and after the update. If `True` when the API server is not\n          configured with collection versioning, this method raises\n          `arvados.errors.ArgumentError`.\n        \"\"\"\n        if properties and type(properties) is not dict:\n            raise errors.ArgumentError(\"properties must be dictionary type.\")\n\n        if storage_classes and type(storage_classes) is not list:\n            raise errors.ArgumentError(\"storage_classes must be list type.\")\n        if storage_classes:\n            self._storage_classes_desired = storage_classes\n\n        if trash_at and type(trash_at) is not datetime.datetime:\n            raise errors.ArgumentError(\"trash_at must be datetime type.\")\n\n        if preserve_version and not self._my_api().config()['Collections'].get('CollectionVersioning', False):\n            raise errors.ArgumentError(\"preserve_version is not supported when CollectionVersioning is not enabled.\")\n\n        body={}\n        if properties:\n            body[\"properties\"] = properties\n        if self.storage_classes_desired():\n            body[\"storage_classes_desired\"] = self.storage_classes_desired()\n        if trash_at:\n            t = trash_at.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n            body[\"trash_at\"] = t\n        if preserve_version:\n            body[\"preserve_version\"] = preserve_version\n\n        if not self.committed():\n            if self._has_remote_blocks:\n                # Copy any remote blocks to the local cluster.\n                self._copy_remote_blocks(remote_blocks={})\n                self._has_remote_blocks = False\n            if not self._has_collection_uuid():\n                raise AssertionError(\"Collection manifest_locator is not a collection uuid.  Use save_new() for new collections.\")\n            elif not self._has_local_collection_uuid():\n                raise AssertionError(\"Collection manifest_locator is from a remote cluster. Use save_new() to save it on the local cluster.\")\n\n            self._my_block_manager().commit_all()\n\n            if merge:\n                self.update()\n\n            text = self.manifest_text(strip=False)\n            body['manifest_text'] = text\n\n            self._remember_api_response(self._my_api().collections().update(\n                uuid=self._manifest_locator,\n                body=body\n                ).execute(num_retries=num_retries))\n            self.set_committed(True)\n        elif body:\n            self._remember_api_response(self._my_api().collections().update(\n                uuid=self._manifest_locator,\n                body=body\n                ).execute(num_retries=num_retries))\n\n        return self._manifest_text\n\n\n    @must_be_writable\n    @synchronized\n    @retry_method\n    def save_new(\n            self,\n            name: Optional[str]=None,\n            create_collection_record: bool=True,\n            owner_uuid: Optional[str]=None,\n            properties: Optional[Properties]=None,\n            storage_classes: Optional[StorageClasses]=None,\n            trash_at: Optional[datetime.datetime]=None,\n            ensure_unique_name: bool=False,\n            num_retries: Optional[int]=None,\n            preserve_version: bool=False,\n    ):\n        \"\"\"Save collection to a new API record\n\n        This method finishes uploading new data blocks and (optionally)\n        creates a new API collection record with the provided data. If a new\n        record is created, this instance becomes associated with that record\n        for future updates like `save()`. This method returns the saved\n        collection manifest.\n\n        Arguments:\n\n        * name: str | None --- The `name` field to use on the new collection\n          record. If not specified, a generic default name is generated.\n\n        * create_collection_record: bool --- If `True` (the default), creates a\n          collection record on the API server. If `False`, the method finishes\n          all data uploads and only returns the resulting collection manifest\n          without sending it to the API server.\n\n        * owner_uuid: str | None --- The `owner_uuid` field to use on the\n          new collection record.\n\n        * properties: dict[str, Any] | None --- The `properties` field to use on\n          the new collection record.\n\n        * storage_classes: list[str] | None --- The\n          `storage_classes_desired` field to use on the new collection record.\n\n        * trash_at: datetime.datetime | None --- The `trash_at` field to use\n          on the new collection record.\n\n        * ensure_unique_name: bool --- This value is passed directly to the\n          Arvados API when creating the collection record. If `True`, the API\n          server may modify the submitted `name` to ensure the collection's\n          `name`+`owner_uuid` combination is unique. If `False` (the default),\n          if a collection already exists with this same `name`+`owner_uuid`\n          combination, creating a collection record will raise a validation\n          error.\n\n        * num_retries: int | None --- The number of times to retry reloading\n          the collection's API record from the API server. If not specified,\n          uses the `num_retries` provided when this instance was constructed.\n\n        * preserve_version: bool --- This value will be passed to directly\n          to the underlying API call. If `True`, the Arvados API will\n          preserve the versions of this collection both immediately before\n          and after the update. If `True` when the API server is not\n          configured with collection versioning, this method raises\n          `arvados.errors.ArgumentError`.\n        \"\"\"\n        if properties and type(properties) is not dict:\n            raise errors.ArgumentError(\"properties must be dictionary type.\")\n\n        if storage_classes and type(storage_classes) is not list:\n            raise errors.ArgumentError(\"storage_classes must be list type.\")\n\n        if trash_at and type(trash_at) is not datetime.datetime:\n            raise errors.ArgumentError(\"trash_at must be datetime type.\")\n\n        if preserve_version and not self._my_api().config()['Collections'].get('CollectionVersioning', False):\n            raise errors.ArgumentError(\"preserve_version is not supported when CollectionVersioning is not enabled.\")\n\n        if self._has_remote_blocks:\n            # Copy any remote blocks to the local cluster.\n            self._copy_remote_blocks(remote_blocks={})\n            self._has_remote_blocks = False\n\n        if storage_classes:\n            self._storage_classes_desired = storage_classes\n\n        self._my_block_manager().commit_all()\n        text = self.manifest_text(strip=False)\n\n        if create_collection_record:\n            if name is None:\n                name = \"New collection\"\n                ensure_unique_name = True\n\n            body = {\"manifest_text\": text,\n                    \"name\": name,\n                    \"replication_desired\": self.replication_desired}\n            if owner_uuid:\n                body[\"owner_uuid\"] = owner_uuid\n            if properties:\n                body[\"properties\"] = properties\n            if self.storage_classes_desired():\n                body[\"storage_classes_desired\"] = self.storage_classes_desired()\n            if trash_at:\n                t = trash_at.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n                body[\"trash_at\"] = t\n            if preserve_version:\n                body[\"preserve_version\"] = preserve_version\n\n            self._remember_api_response(self._my_api().collections().create(ensure_unique_name=ensure_unique_name, body=body).execute(num_retries=num_retries))\n            self._manifest_locator = self._api_response[\"uuid\"]\n            self.set_committed(True)\n\n        return text\n\n    _token_re = re.compile(r'(\\S+)(\\s+|$)')\n    _block_re = re.compile(r'[0-9a-f]{32}\\+(\\d+)(\\+\\S+)*')\n    _segment_re = re.compile(r'(\\d+):(\\d+):(\\S+)')\n\n    def _unescape_manifest_path(self, path):\n        return re.sub(r'\\\\([0-3][0-7][0-7])', lambda m: chr(int(m.group(1), 8)), path)\n\n    @synchronized\n    def _import_manifest(self, manifest_text):\n        \"\"\"Import a manifest into a `Collection`.\n\n        :manifest_text:\n          The manifest text to import from.\n\n        \"\"\"\n        if len(self) > 0:\n            raise ArgumentError(\"Can only import manifest into an empty collection\")\n\n        STREAM_NAME = 0\n        BLOCKS = 1\n        SEGMENTS = 2\n\n        stream_name = None\n        state = STREAM_NAME\n\n        for token_and_separator in self._token_re.finditer(manifest_text):\n            tok = token_and_separator.group(1)\n            sep = token_and_separator.group(2)\n\n            if state == STREAM_NAME:\n                # starting a new stream\n                stream_name = self._unescape_manifest_path(tok)\n                blocks = []\n                segments = []\n                streamoffset = 0\n                state = BLOCKS\n                self.find_or_create(stream_name, COLLECTION)\n                continue\n\n            if state == BLOCKS:\n                block_locator = self._block_re.match(tok)\n                if block_locator:\n                    blocksize = int(block_locator.group(1))\n                    blocks.append(streams.Range(tok, streamoffset, blocksize, 0))\n                    streamoffset += blocksize\n                else:\n                    state = SEGMENTS\n\n            if state == SEGMENTS:\n                file_segment = self._segment_re.match(tok)\n                if file_segment:\n                    pos = int(file_segment.group(1))\n                    size = int(file_segment.group(2))\n                    name = self._unescape_manifest_path(file_segment.group(3))\n                    if name.split('/')[-1] == '.':\n                        # placeholder for persisting an empty directory, not a real file\n                        if len(name) > 2:\n                            self.find_or_create(os.path.join(stream_name, name[:-2]), COLLECTION)\n                    else:\n                        filepath = os.path.join(stream_name, name)\n                        try:\n                            afile = self.find_or_create(filepath, FILE)\n                        except IOError as e:\n                            if e.errno == errno.ENOTDIR:\n                                raise errors.SyntaxError(\"Dir part of %s conflicts with file of the same name.\", filepath) from None\n                            else:\n                                raise e from None\n                        if isinstance(afile, ArvadosFile):\n                            afile.add_segment(blocks, pos, size)\n                        else:\n                            raise errors.SyntaxError(\"File %s conflicts with stream of the same name.\", filepath)\n                else:\n                    # error!\n                    raise errors.SyntaxError(\"Invalid manifest format, expected file segment but did not match format: '%s'\" % tok)\n\n            if sep == \"\\n\":\n                stream_name = None\n                state = STREAM_NAME\n\n        self._update_token_timestamp()\n        self.set_committed(True)\n\n    @synchronized\n    def notify(\n            self,\n            event: ChangeType,\n            collection: 'RichCollectionBase',\n            name: str,\n            item: CollectionItem,\n    ) -> None:\n        if self._callback:\n            self._callback(event, collection, name, item)\n\n\nclass Subcollection(RichCollectionBase):\n    \"\"\"Read and manipulate a stream/directory within an Arvados collection\n\n    This class represents a single stream (like a directory) within an Arvados\n    `Collection`. It is returned by `Collection.find` and provides the same API.\n    Operations that work on the API collection record propagate to the parent\n    `Collection` object.\n    \"\"\"\n\n    def __init__(self, parent, name):\n        super(Subcollection, self).__init__(parent)\n        self.lock = self.root_collection().lock\n        self._manifest_text = None\n        self.name = name\n        self.num_retries = parent.num_retries\n\n    def root_collection(self) -> 'Collection':\n        return self.parent.root_collection()\n\n    def writable(self) -> bool:\n        return self.root_collection().writable()\n\n    def _my_api(self):\n        return self.root_collection()._my_api()\n\n    def _my_keep(self):\n        return self.root_collection()._my_keep()\n\n    def _my_block_manager(self):\n        return self.root_collection()._my_block_manager()\n\n    def stream_name(self) -> str:\n        return os.path.join(self.parent.stream_name(), self.name)\n\n    @synchronized\n    def clone(\n            self,\n            new_parent: Optional['Collection']=None,\n            new_name: Optional[str]=None,\n    ) -> 'Subcollection':\n        c = Subcollection(new_parent, new_name)\n        c._clonefrom(self)\n        return c\n\n    @must_be_writable\n    @synchronized\n    def _reparent(self, newparent, newname):\n        self.set_committed(False)\n        self.flush()\n        self.parent.remove(self.name, recursive=True)\n        self.parent = newparent\n        self.name = newname\n        self.lock = self.parent.root_collection().lock\n\n    @synchronized\n    def _get_manifest_text(self, stream_name, strip, normalize, only_committed=False):\n        \"\"\"Encode empty directories by using an \\056-named (\".\") empty file\"\"\"\n        if len(self._items) == 0:\n            return \"%s %s 0:0:\\\\056\\n\" % (\n                streams.escape(stream_name), config.EMPTY_BLOCK_LOCATOR)\n        return super(Subcollection, self)._get_manifest_text(stream_name,\n                                                             strip, normalize,\n                                                             only_committed)\n\n\nclass CollectionReader(Collection):\n    \"\"\"Read-only `Collection` subclass\n\n    This class will never create or update any API collection records. You can\n    use this class for additional code safety when you only need to read\n    existing collections.\n    \"\"\"\n    def __init__(self, manifest_locator_or_text, *args, **kwargs):\n        self._in_init = True\n        super(CollectionReader, self).__init__(manifest_locator_or_text, *args, **kwargs)\n        self._in_init = False\n\n        # Forego any locking since it should never change once initialized.\n        self.lock = NoopLock()\n\n        # Backwards compatability with old CollectionReader\n        # all_streams() and all_files()\n        self._streams = None\n\n    def writable(self) -> bool:\n        return self._in_init\n"
  },
  {
    "path": "sdk/python/arvados/commands/__init__.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Arvados CLI commands\n\nThis module implements the CLI tools that are shipped with the Arvados Python\nSDK. Nothing in this module is intended to be part of the public-facing\nSDK API. Classes and functions in this module may be changed or removed at any\ntime.\n\n@private\n\"\"\"\n"
  },
  {
    "path": "sdk/python/arvados/commands/_util.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport dataclasses\nimport errno\nimport json\nimport logging\nimport operator\nimport os\nimport re\nimport signal\nimport sys\nimport functools\n\nimport typing as t\n\nfrom .. import _internal\n\nFILTER_STR_RE = re.compile(r'''\n^\\(\n\\ *(\\w+)\n\\ *(<|<=|=|>=|>)\n\\ *(\\w+)\n\\ *\\)$\n''', re.ASCII | re.VERBOSE)\n\nT = t.TypeVar('T')\n\n@dataclasses.dataclass(unsafe_hash=True)\nclass RangedValue(t.Generic[T]):\n    \"\"\"Validate that an argument string is within a valid range of values\"\"\"\n    parse_func: t.Callable[[str], T]\n    valid_range: t.Container[T]\n\n    def __call__(self, s: str) -> T:\n        value = self.parse_func(s)\n        if value in self.valid_range:\n            return value\n        else:\n            raise ValueError(f\"{value!r} is not a valid value\")\n\n\n@dataclasses.dataclass(unsafe_hash=True)\nclass UniqueSplit(t.Generic[T]):\n    \"\"\"Parse a string into a list of unique values\"\"\"\n    split: t.Callable[[str], t.Iterable[str]]=operator.methodcaller('split', ',')\n    clean: t.Callable[[str], str]=operator.methodcaller('strip')\n    check: t.Callable[[str], bool]=bool\n\n    def __call__(self, s: str) -> T:\n        return list(_internal.uniq(_internal.parse_seq(s, self.split, self.clean, self.check)))\n\n\nretry_opt = argparse.ArgumentParser(add_help=False)\nretry_opt.add_argument(\n    '--retries',\n    type=RangedValue(int, range(0, sys.maxsize)),\n    default=10,\n    help=\"\"\"Maximum number of times to retry server requests that encounter\ntemporary failures (e.g., server down).  Default %(default)r.\n\"\"\")\n\ndef _ignore_error(error):\n    return None\n\ndef _raise_error(error):\n    raise error\n\nCAUGHT_SIGNALS = [signal.SIGINT, signal.SIGQUIT, signal.SIGTERM]\n\ndef exit_signal_handler(sigcode, frame):\n    logging.getLogger('arvados').error(\"Caught signal {}, exiting.\".format(sigcode))\n    sys.exit(-sigcode)\n\ndef install_signal_handlers():\n    global orig_signal_handlers\n    orig_signal_handlers = {sigcode: signal.signal(sigcode, exit_signal_handler)\n                            for sigcode in CAUGHT_SIGNALS}\n\ndef restore_signal_handlers():\n    for sigcode, orig_handler in orig_signal_handlers.items():\n        signal.signal(sigcode, orig_handler)\n\ndef validate_filters(filters):\n    \"\"\"Validate user-provided filters\n\n    This function validates that a user-defined object represents valid\n    Arvados filters that can be passed to an API client: that it's a list of\n    3-element lists with the field name and operator given as strings. If any\n    of these conditions are not true, it raises a ValueError with details about\n    the problem.\n\n    It returns validated filters. Currently the provided filters are returned\n    unmodified. Future versions of this function may clean up the filters with\n    \"obvious\" type conversions, so callers SHOULD use the returned value for\n    Arvados API calls.\n    \"\"\"\n    if not isinstance(filters, list):\n        raise ValueError(f\"filters are not a list: {filters!r}\")\n    for index, f in enumerate(filters):\n        if isinstance(f, str):\n            match = FILTER_STR_RE.fullmatch(f)\n            if match is None:\n                raise ValueError(f\"filter at index {index} has invalid syntax: {f!r}\")\n            s, op, o = match.groups()\n            if s[0].isdigit():\n                raise ValueError(f\"filter at index {index} has invalid syntax: bad field name {s!r}\")\n            if o[0].isdigit():\n                raise ValueError(f\"filter at index {index} has invalid syntax: bad field name {o!r}\")\n            continue\n        elif not isinstance(f, list):\n            raise ValueError(f\"filter at index {index} is not a string or list: {f!r}\")\n        try:\n            s, op, o = f\n        except ValueError:\n            raise ValueError(\n                f\"filter at index {index} does not have three items (field name, operator, operand): {f!r}\",\n            ) from None\n        if not isinstance(s, str):\n            raise ValueError(f\"filter at index {index} field name is not a string: {s!r}\")\n        if not isinstance(op, str):\n            raise ValueError(f\"filter at index {index} operator is not a string: {op!r}\")\n    return filters\n\n\nclass JSONStringArgument:\n    \"\"\"Callable JSON input parser with post-parsing validation function.\n\n    This is designed to be used as an argparse argument type. Typical usage\n    looks like:\n\n        parser = argparse.ArgumentParser()\n        parser.add_argument('--object', type=JSONStringArgument(), ...)\n\n    When called on one string value, returns the result of parsing the value as\n    JSON.\n\n    If the parsing fails, or if the parsing succeeds but the result fails the\n    further validation (if any), raises argparse.ArgumentTypeError with a\n    suitable error message that will be printed to the stderr by argparse.\n\n    The behavior may be further customized by providing the \"validator\" or\n    \"loader\" callback functions; see the __init__ method documentation for\n    details.\n\n    By default, when initialized without any keyword arguments, it functions as\n    a simple JSON loader.\n    \"\"\"\n    def __init__(\n        self,\n        validator: t.Optional[t.Callable[[t.Any], t.Any]] = None,\n        loader: t.Callable[[str], t.Any] = json.loads,\n        pretty_name: str = \"JSON\"\n    ):\n        \"\"\"Keyword arguments:\n\n        * validator: callable --- optional callable that takes the JSON-parsing\n          result (Python object) as input and performs additional validation\n          after JSON-parsing. It should raise TypeError or ValueError to signal\n          validation failure, and return the validated object (possibly\n          modified) when validation succeeds. Its return value will become the\n          return value of __call__() (i.e. type conversion for the input\n          argument value). In addition, it may raise argparse.ArgumentTypeError\n          directly for finer-grained control of messaging.\n\n        * loader: callable --- optional callable that is used to load the\n          value passed to __call__(). By default, json.loads is used, but you\n          may supply your own loader to handle exceptions. The loader should\n          raise ValueError (of which json.JSONDecodeError is a subtype) to\n          signal failure to handle the input value, or raise\n          argparse.ArgumentTypeError directly for finer-grained control of\n          messaging.\n\n        * pretty_name: str --- used by argparse to pretty-print the error\n          message when the input fails validation. It should be a brief\n          human-readable name for the kind of value that the argument takes.\n          Default: \"JSON\".\n        \"\"\"\n        self.loader = loader\n        self.post_validator = validator\n        self.pretty_name = pretty_name or \"JSON\"\n\n    def __call__(self, value: str):\n        failure = None\n        try:\n            retval = self.loader(value)\n        except ValueError as err:  # This covers json.JSONDecodeError too.\n            failure = err\n        else:\n            if self.post_validator is not None:\n                try:\n                    retval = self.post_validator(retval)\n                except (ValueError, TypeError) as err:\n                    failure = err\n        if failure is not None:\n            msg = f\"{value!r} is not valid {self.pretty_name}\"\n            if str(failure):\n                msg += f\": {failure!s}\"\n            raise argparse.ArgumentTypeError(msg) from None\n        return retval\n\n\ndef json_or_file_loader(value: str):\n    \"\"\"Loader function that accepts either a JSON string, or a file whose\n    content can be read and parsed as JSON (including \"-\" which represents the\n    standard input). This is intended to be used as a custom loader function\n    for JSONStringArgument.\n    \"\"\"\n    fh = None\n    no_file_error = None\n    if value == \"-\":\n        fh = sys.stdin\n    else:\n        try:\n            fh = open(value, \"rb\")\n        except FileNotFoundError as err:\n            no_file_error = err\n        except ValueError:\n            # Input contains character that cannot appear in path; the\n            # reasonable explanation is that it's not intended as a path.\n            pass\n        except OSError as err:\n            # Terminate early; str(err) will contain path.\n            raise argparse.ArgumentTypeError(str(err)) from None\n\n    # Handle as path.\n    if fh is not None:\n        # Corner case first.\n        try:\n            retval = json.loads(value)\n        except json.JSONDecodeError:\n            pass\n        else:\n            assert value != \"-\"\n            fh.close()\n            raise argparse.ArgumentTypeError(\n                f\"{value!r} is both valid JSON and a readable file.\"\n                \" Please consider renaming the file.\"\n            ) from None\n        # Main handling logic.\n        try:\n            retval = json.load(fh)\n        except json.JSONDecodeError as json_error:\n            if value == \"-\":\n                msg = \"Content of standard input is not valid JSON\"\n            else:\n                msg = f\"Content of file {value!r} is not valid JSON\"\n            msg += f\": {json_error!s}\"\n            raise argparse.ArgumentTypeError(msg) from None\n        else:\n            return retval\n        finally:\n            if value != \"-\":\n                fh.close()\n\n    # Handle as string.\n    try:\n        retval = json.loads(value)\n    except json.JSONDecodeError as json_error:\n        if no_file_error is not None:\n            # In user-facing error message, mention the fact that we've tried\n            # but failed to find it as file (may help discover typo or\n            # moved/deleted file when path is intended).\n            msg = (\n                f\"{value!r} is not a readable file or valid JSON\"\n                f\" [JSON decoding error: {json_error!s}]\"\n            )\n        else:\n            msg = f\"{value!r} is not valid JSON: {json_error!s}\"\n        raise argparse.ArgumentTypeError(msg) from None\n    else:\n        return retval\n\n\nJSONArgument = functools.partial(\n    JSONStringArgument, loader=json_or_file_loader\n)\nJSONArgument.__doc__ = \"\"\"\nParse a JSON file from a command line argument string or path\n\nJSONArgument objects can be called with a string and return an arbitrary\nobject. First it will try to decode the string as JSON. If that fails, it will\ntry to open a file at the path named by the string, and decode its content as\nJSON. Or, if the input is the string \"-\" (a single dash), it will read the\nstandard input and try to decode the content as JSON.\n\nYou can construct JSONArgument with an optional validation function. If given,\nit is called with the Python object decoded from the input JSON string. The\nreturn value of the validation function replaces the original JSON-decoded\nobject. The validation function should raise ValueError or TypeError,\npreferablly with a suitable message, if the object fails validation.\nAlternatively, it can directly raise argparse.ArgumentTypeError for\nfiner-grained error message control.\n\nTypical usage with argparse looks like:\n\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\n        '--object',\n        type=JSONArgument(/...optional validation function.../),\n        ...\n    )\n\nPlease see the documentation for JSONStringArgument for more details about the\noptional validation function.\n\"\"\"\n"
  },
  {
    "path": "sdk/python/arvados/commands/arv_copy.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# arv-copy [--recursive] [--no-recursive] object-uuid\n#\n# Copies an object from Arvados instance src to instance dst.\n#\n# By default, arv-copy recursively copies any dependent objects\n# necessary to make the object functional in the new instance\n# (e.g. for a workflow, arv-copy copies the workflow,\n# input collections, and docker images). If\n# --no-recursive is given, arv-copy copies only the single record\n# identified by object-uuid.\n#\n# The user must have configuration files {src}.conf and\n# {dst}.conf in a standard configuration directory with valid login credentials\n# for instances src and dst.  If either of these files is not found,\n# arv-copy will issue an error.\n\nimport argparse\nimport contextlib\nimport getpass\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport logging\nimport tempfile\nimport urllib.parse\nimport io\nimport json\nimport queue\nimport threading\nimport errno\n\nimport httplib2.error\nimport googleapiclient\n\nimport arvados\nimport arvados.api\nimport arvados.config\nimport arvados.keep\nimport arvados.util\nimport arvados.commands._util as arv_cmd\nimport arvados.commands.keepdocker\nfrom arvados.logging import log_handler\n\nfrom arvados._internal import basedirs, http_to_keep, s3_to_keep, to_keep_util\nfrom arvados._version import __version__\n\nCOMMIT_HASH_RE = re.compile(r'^[0-9a-f]{1,40}$')\n\narvlogger = logging.getLogger('arvados')\nkeeplogger = logging.getLogger('arvados.keep')\nlogger = logging.getLogger('arvados.arv-copy')\n\n# Set this up so connection errors get logged.\ngoogleapi_logger = logging.getLogger('googleapiclient.http')\n\n# List of collections that have been copied in this session, and their\n# destination collection UUIDs.\ncollections_copied = {}\n\n# Set of (repository, script_version) two-tuples of commits copied in git.\nscripts_copied = set()\n\n# The owner_uuid of the object being copied\nsrc_owner_uuid = None\n\ndef main(arguments=None):\n    copy_opts = argparse.ArgumentParser(arguments, add_help=False)\n\n    copy_opts.add_argument(\n        '--version', action='version', version=\"%s %s\" % (sys.argv[0], __version__),\n        help='Print version and exit.')\n    copy_opts.add_argument(\n        '-v', '--verbose', dest='verbose', action='store_true',\n        help='Verbose output.')\n    copy_opts.add_argument(\n        '--progress', dest='progress', action='store_true',\n        help='Report progress on copying collections. (default)')\n    copy_opts.add_argument(\n        '--no-progress', dest='progress', action='store_false',\n        help='Do not report progress on copying collections.')\n    copy_opts.add_argument(\n        '-f', '--force', dest='force', action='store_true',\n        help='Perform copy even if the object appears to exist at the remote destination.')\n    copy_opts.add_argument(\n        '--src', dest='source_arvados',\n        help=\"\"\"\nClient configuration location for the source Arvados cluster.\nMay be either a configuration file path, or a plain identifier like `foo`\nto search for a configuration file `foo.conf` under a systemd or XDG configuration directory.\nIf not provided, will search for a configuration file named after the cluster ID of the source object UUID.\n\"\"\",\n    )\n    copy_opts.add_argument(\n        '--dst', dest='destination_arvados',\n        help=\"\"\"\nClient configuration location for the destination Arvados cluster.\nMay be either a configuration file path, or a plain identifier like `foo`\nto search for a configuration file `foo.conf` under a systemd or XDG configuration directory.\nIf not provided, will use the default client configuration from the environment or `settings.conf`.\n\"\"\",\n    )\n    copy_opts.add_argument(\n        '--recursive', dest='recursive', action='store_true',\n        help='Recursively copy any dependencies for this object, and subprojects. (default)')\n    copy_opts.add_argument(\n        '--no-recursive', dest='recursive', action='store_false',\n        help='Do not copy any dependencies or subprojects.')\n    copy_opts.add_argument(\n        '--project-uuid', dest='project_uuid',\n        help='The UUID of the project at the destination to which the collection or workflow should be copied.')\n    copy_opts.add_argument(\n        '--replication',\n        type=arv_cmd.RangedValue(int, range(1, sys.maxsize)),\n        metavar='N',\n        help=\"\"\"\nNumber of replicas per storage class for the copied collections at the destination.\nIf not provided (or if provided with invalid value),\nuse the destination's default replication-level setting (if found),\nor the fallback value 2.\n\"\"\")\n    copy_opts.add_argument(\n        '--storage-classes',\n        type=arv_cmd.UniqueSplit(),\n        help='Comma separated list of storage classes to be used when saving data to the destinaton Arvados instance.')\n    copy_opts.add_argument(\n        '--block-copy',\n        dest='keep_block_copy',\n        action='store_true',\n        help=\"\"\"Copy Keep blocks when copying collections (default).\"\"\",\n    )\n    copy_opts.add_argument(\n        '--no-block-copy',\n        dest='keep_block_copy',\n        action='store_false',\n        help=\"\"\"Do not copy Keep blocks when copying collections. Must have\nadministrator privileges on the destination cluster to create collections.\n\"\"\")\n\n    copy_opts.add_argument(\"--varying-url-params\", type=str, default=\"\",\n                        help=\"A comma separated list of URL query parameters that should be ignored when storing HTTP URLs in Keep.\")\n\n    copy_opts.add_argument(\"--prefer-cached-downloads\", action=\"store_true\", default=False,\n                        help=\"If a HTTP URL is found in Keep, skip upstream URL freshness check (will not notice if the upstream has changed, but also not error if upstream is unavailable).\")\n    copy_opts.add_argument(\n        'object_uuid',\n        help='The UUID of the object to be copied.')\n\n    copy_opts.set_defaults(\n        # export_all_fields is used by external tools to make complete copies\n        # of Arvados records.\n        export_all_fields=False,\n        keep_block_copy=True,\n        progress=True,\n        recursive=True,\n    )\n\n    parser = argparse.ArgumentParser(\n        description='Copy a workflow, collection or project from one Arvados instance to another.  On success, the uuid of the copied object is printed to stdout.',\n        parents=[copy_opts, arv_cmd.retry_opt])\n    args = parser.parse_args()\n\n    if args.verbose:\n        arvlogger.setLevel(logging.DEBUG)\n    else:\n        arvlogger.setLevel(logging.INFO)\n        keeplogger.setLevel(logging.WARNING)\n\n    if not args.source_arvados and arvados.util.uuid_pattern.match(args.object_uuid):\n        args.source_arvados = args.object_uuid[:5]\n\n    if not args.destination_arvados and args.project_uuid:\n        args.destination_arvados = args.project_uuid[:5]\n\n    # Make sure errors trying to connect to clusters get logged.\n    googleapi_logger.setLevel(logging.WARN)\n    googleapi_logger.addHandler(log_handler)\n\n    # Create API clients for the source and destination instances\n    src_arv = api_for_instance(args.source_arvados, args.retries)\n    dst_arv = api_for_instance(args.destination_arvados, args.retries)\n\n    # Once we've successfully contacted the clusters, we probably\n    # don't want to see logging about retries (unless the user asked\n    # for verbose output).\n    if not args.verbose:\n        googleapi_logger.setLevel(logging.ERROR)\n\n    if src_arv.config()[\"ClusterID\"] == dst_arv.config()[\"ClusterID\"]:\n        logger.info(\"Copying within cluster %s\", src_arv.config()[\"ClusterID\"])\n    else:\n        logger.info(\"Source cluster is %s\", src_arv.config()[\"ClusterID\"])\n        logger.info(\"Destination cluster is %s\", dst_arv.config()[\"ClusterID\"])\n\n    if not args.project_uuid:\n        args.project_uuid = dst_arv.users().current().execute(num_retries=args.retries)[\"uuid\"]\n\n    # Identify the kind of object we have been given, and begin copying.\n    t = uuid_type(src_arv, args.object_uuid)\n\n    try:\n        if t == 'Collection':\n            set_src_owner_uuid(src_arv.collections(), args.object_uuid, args)\n            result = copy_collection(args.object_uuid,\n                                     src_arv, dst_arv,\n                                     args)\n        elif t == 'Workflow':\n            set_src_owner_uuid(src_arv.workflows(), args.object_uuid, args)\n            result = copy_workflow(args.object_uuid, src_arv, dst_arv, args)\n        elif t == 'Group':\n            set_src_owner_uuid(src_arv.groups(), args.object_uuid, args)\n            result = copy_project(args.object_uuid, src_arv, dst_arv, args.project_uuid, args)\n        elif t == 'httpURL' or t == 's3URL':\n            result = copy_from_url(args.object_uuid, src_arv, dst_arv, args)\n        else:\n            abort(\"cannot copy object {} of type {}\".format(args.object_uuid, t))\n    except Exception as e:\n        logger.error(\"%s\", e, exc_info=args.verbose)\n        exit(1)\n\n    if not result:\n        exit(1)\n\n    # If no exception was thrown and the response does not have an\n    # error_token field, presume success\n    if result is None or 'error_token' in result or 'uuid' not in result:\n        if result:\n            logger.error(\"API server returned an error result: {}\".format(result))\n        exit(1)\n\n    print(result['uuid'])\n\n    if result.get('partial_error'):\n        logger.warning(\"Warning: created copy with uuid {} but failed to copy some items: {}\".format(result['uuid'], result['partial_error']))\n        exit(1)\n\n    logger.info(\"Success: created copy with uuid {}\".format(result['uuid']))\n    exit(0)\n\ndef set_src_owner_uuid(resource, uuid, args):\n    global src_owner_uuid\n    c = resource.get(uuid=uuid).execute(num_retries=args.retries)\n    src_owner_uuid = c.get(\"owner_uuid\")\n\n# api_for_instance(instance_name)\n#\n#     Creates an API client for the Arvados instance identified by\n#     instance_name.\n#\n#     If instance_name contains a slash, it is presumed to be a path\n#     (either local or absolute) to a file with Arvados configuration\n#     settings.\n#\n#     Otherwise, it is presumed to be the name of a file in a standard\n#     configuration directory.\n#\ndef api_for_instance(instance_name, num_retries):\n    msg = []\n    if instance_name:\n        if '/' in instance_name:\n            config_file = instance_name\n        else:\n            dirs = basedirs.BaseDirectories('CONFIG')\n            config_file = next(dirs.search(f'{instance_name}.conf'), '')\n\n        arvados.api._reset_googleapiclient_logging()\n        try:\n            cfg = arvados.config.load(config_file)\n\n            if 'ARVADOS_API_HOST' in cfg and 'ARVADOS_API_TOKEN' in cfg:\n                api_is_insecure = (\n                    cfg.get('ARVADOS_API_HOST_INSECURE', '').lower() in set(\n                        ['1', 't', 'true', 'y', 'yes']))\n                return arvados.api('v1',\n                                     host=cfg['ARVADOS_API_HOST'],\n                                     token=cfg['ARVADOS_API_TOKEN'],\n                                     insecure=api_is_insecure,\n                                     num_retries=num_retries,\n                                     )\n            else:\n                msg.append('missing ARVADOS_API_HOST or ARVADOS_API_TOKEN for {} in config file {}'.format(instance_name, config_file))\n        except OSError as e:\n            if e.errno in (errno.EHOSTUNREACH, errno.ECONNREFUSED, errno.ECONNRESET, errno.ENETUNREACH):\n                verb = 'connect to instance from'\n            elif config_file:\n                verb = 'open'\n            else:\n                verb = 'find'\n                searchlist = \":\".join(str(p) for p in dirs.search_paths())\n                config_file = f'{instance_name}.conf in path {searchlist}'\n            msg.append((\"Could not {} config file {}: {}\").format(\n                       verb, config_file, e.strerror))\n        except (httplib2.error.HttpLib2Error, googleapiclient.errors.Error) as e:\n            msg.append(\"Failed to connect to instance {} at {}, error was {}\".format(instance_name, cfg['ARVADOS_API_HOST'], e))\n\n    arvados.api._reset_googleapiclient_logging()\n    default_api = None\n    default_instance = None\n    try:\n        default_api = arvados.api('v1', num_retries=num_retries)\n        default_instance = default_api.config()[\"ClusterID\"]\n    except ValueError:\n        pass\n    except (httplib2.error.HttpLib2Error, googleapiclient.errors.Error, OSError) as e:\n        msg.append(\"Failed to connect to default instance, error was {}\".format(e))\n\n    if default_api is not None and (not instance_name or instance_name == default_instance):\n        # Use default settings\n        return default_api\n\n    if instance_name and default_instance and instance_name != default_instance:\n        msg.append(\"Default credentials are for {} but need to connect to {}\".format(default_instance, instance_name))\n\n    for m in msg:\n        logger.error(m)\n\n    abort('Unable to find usable ARVADOS_API_HOST and ARVADOS_API_TOKEN')\n\n# Check if git is available\ndef check_git_availability():\n    try:\n        subprocess.run(\n            ['git', '--version'],\n            check=True,\n            stdout=subprocess.DEVNULL,\n        )\n    except FileNotFoundError:\n        abort('git command is not available. Please ensure git is installed.')\n\n\ndef filter_iter(arg):\n    \"\"\"Iterate a filter string-or-list.\n\n    Pass in a filter field that can either be a string or list.\n    This will iterate elements as if the field had been written as a list.\n    \"\"\"\n    if isinstance(arg, str):\n        yield arg\n    else:\n        yield from arg\n\ndef migrate_repository_filter(repo_filter, src_repository, dst_repository):\n    \"\"\"Update a single repository filter in-place for the destination.\n\n    If the filter checks that the repository is src_repository, it is\n    updated to check that the repository is dst_repository.  If it does\n    anything else, this function raises ValueError.\n    \"\"\"\n    if src_repository is None:\n        raise ValueError(\"component does not specify a source repository\")\n    elif dst_repository is None:\n        raise ValueError(\"no destination repository specified to update repository filter\")\n    elif repo_filter[1:] == ['=', src_repository]:\n        repo_filter[2] = dst_repository\n    elif repo_filter[1:] == ['in', [src_repository]]:\n        repo_filter[2] = [dst_repository]\n    else:\n        raise ValueError(\"repository filter is not a simple source match\")\n\ndef migrate_script_version_filter(version_filter):\n    \"\"\"Update a single script_version filter in-place for the destination.\n\n    Currently this function checks that all the filter operands are Git\n    commit hashes.  If they're not, it raises ValueError to indicate that\n    the filter is not portable.  It could be extended to make other\n    transformations in the future.\n    \"\"\"\n    if not all(COMMIT_HASH_RE.match(v) for v in filter_iter(version_filter[2])):\n        raise ValueError(\"script_version filter is not limited to commit hashes\")\n\ndef attr_filtered(filter_, *attr_names):\n    \"\"\"Return True if filter_ applies to any of attr_names, else False.\"\"\"\n    return any((name == 'any') or (name in attr_names)\n               for name in filter_iter(filter_[0]))\n\n@contextlib.contextmanager\ndef exception_handler(handler, *exc_types):\n    \"\"\"If any exc_types are raised in the block, call handler on the exception.\"\"\"\n    try:\n        yield\n    except exc_types as error:\n        handler(error)\n\n\n# copy_workflow(wf_uuid, src, dst, args)\n#\n#    Copies a workflow identified by wf_uuid from src to dst.\n#\n#    If args.recursive is True, also copy any collections\n#      referenced in the workflow definition yaml.\n#\n#    The owner_uuid of the new workflow is set to any given\n#      project_uuid or the user who copied the template.\n#\n#    Returns the copied workflow object.\n#\ndef copy_workflow(wf_uuid, src, dst, args):\n    # fetch the workflow from the source instance\n    wf = src.workflows().get(uuid=wf_uuid).execute(num_retries=args.retries)\n\n    if not wf[\"definition\"]:\n        logger.warning(\"Workflow object {} has an empty or null definition, it won't do anything.\".format(wf_uuid))\n\n    # copy collections and docker images\n    if args.recursive and wf[\"definition\"]:\n        env = {\"ARVADOS_API_HOST\": urllib.parse.urlparse(src._rootDesc[\"rootUrl\"]).netloc,\n               \"ARVADOS_API_TOKEN\": src.api_token,\n               \"PATH\": os.environ[\"PATH\"]}\n        try:\n            result = subprocess.run(\n                [\"arvados-cwl-runner\", \"--quiet\", \"--print-keep-deps\", \"arvwf:\"+wf_uuid],\n                env=env,\n                stdout=subprocess.PIPE,\n                universal_newlines=True,\n            )\n        except FileNotFoundError:\n            no_arv_copy = True\n        else:\n            no_arv_copy = result.returncode == 2\n\n        if no_arv_copy:\n            raise Exception('Copying workflows requires arvados-cwl-runner 2.7.1 or later to be installed in PATH.')\n        elif result.returncode != 0:\n            raise Exception('There was an error getting Keep dependencies from workflow using arvados-cwl-runner --print-keep-deps')\n\n        locations = json.loads(result.stdout)\n\n        if locations:\n            copy_collections(locations, src, dst, args)\n\n    # copy the workflow itself\n    del wf['uuid']\n    wf['owner_uuid'] = args.project_uuid\n\n    existing = dst.workflows().list(filters=[[\"owner_uuid\", \"=\", args.project_uuid],\n                                             [\"name\", \"=\", wf[\"name\"]]]).execute()\n    if len(existing[\"items\"]) == 0:\n        return dst.workflows().create(body=wf).execute(num_retries=args.retries)\n    else:\n        return dst.workflows().update(uuid=existing[\"items\"][0][\"uuid\"], body=wf).execute(num_retries=args.retries)\n\n\ndef workflow_collections(obj, locations, docker_images):\n    if isinstance(obj, dict):\n        loc = obj.get('location', None)\n        if loc is not None:\n            if loc.startswith(\"keep:\"):\n                locations.append(loc[5:])\n\n        docker_image = obj.get('dockerImageId', None) or obj.get('dockerPull', None) or obj.get('acrContainerImage', None)\n        if docker_image is not None:\n            ds = docker_image.split(\":\", 1)\n            tag = ds[1] if len(ds)==2 else 'latest'\n            docker_images[ds[0]] = tag\n\n        for x in obj:\n            workflow_collections(obj[x], locations, docker_images)\n    elif isinstance(obj, list):\n        for x in obj:\n            workflow_collections(x, locations, docker_images)\n\n# copy_collections(obj, src, dst, args)\n#\n#    Recursively copies all collections referenced by 'obj' from src\n#    to dst.  obj may be a dict or a list, in which case we run\n#    copy_collections on every value it contains. If it is a string,\n#    search it for any substring that matches a collection hash or uuid\n#    (this will find hidden references to collections like\n#      \"input0\": \"$(file 3229739b505d2b878b62aed09895a55a+142/HWI-ST1027_129_D0THKACXX.1_1.fastq)\")\n#\n#    Returns a copy of obj with any old collection uuids replaced by\n#    the new ones.\n#\ndef copy_collections(obj, src, dst, args):\n\n    def copy_collection_fn(collection_match):\n        \"\"\"Helper function for regex substitution: copies a single collection,\n        identified by the collection_match MatchObject, to the\n        destination.  Returns the destination collection uuid (or the\n        portable data hash if that's what src_id is).\n\n        \"\"\"\n        src_id = collection_match.group(0)\n        if src_id not in collections_copied:\n            dst_col = copy_collection(src_id, src, dst, args)\n            if src_id in [dst_col['uuid'], dst_col['portable_data_hash']]:\n                collections_copied[src_id] = src_id\n            else:\n                collections_copied[src_id] = dst_col['uuid']\n        return collections_copied[src_id]\n\n    if isinstance(obj, str):\n        # Copy any collections identified in this string to dst, replacing\n        # them with the dst uuids as necessary.\n        obj = arvados.util.portable_data_hash_pattern.sub(copy_collection_fn, obj)\n        obj = arvados.util.collection_uuid_pattern.sub(copy_collection_fn, obj)\n        return obj\n    elif isinstance(obj, dict):\n        return type(obj)((v, copy_collections(obj[v], src, dst, args))\n                         for v in obj)\n    elif isinstance(obj, list):\n        return type(obj)(copy_collections(v, src, dst, args) for v in obj)\n    return obj\n\n\ndef total_collection_size(manifest_text):\n    \"\"\"Return the total number of bytes in this collection (excluding\n    duplicate blocks).\"\"\"\n\n    total_bytes = 0\n    locators_seen = {}\n    for line in manifest_text.splitlines():\n        words = line.split()\n        for word in words[1:]:\n            try:\n                loc = arvados.KeepLocator(word)\n            except ValueError:\n                continue  # this word isn't a locator, skip it\n            if loc.md5sum not in locators_seen:\n                locators_seen[loc.md5sum] = True\n                total_bytes += loc.size\n\n    return total_bytes\n\ndef create_collection_from(c, src, dst, args):\n    \"\"\"Create a new collection record on dst, and copy Docker metadata if\n    available.\"\"\"\n\n    collection_uuid = c['uuid']\n    if args.export_all_fields:\n        body = c.copy()\n    else:\n        body = {key: c[key] for key in [\n            'description',\n            'manifest_text',\n            'name',\n            'portable_data_hash',\n            'properties',\n        ]}\n        if not body['name']:\n            body['name'] = f\"copied from {collection_uuid}\"\n        if args.storage_classes:\n            body['storage_classes_desired'] = args.storage_classes\n        body['owner_uuid'] = args.project_uuid\n\n    dst_collection = dst.collections().create(body=body, ensure_unique_name=True).execute(num_retries=args.retries)\n\n    # Create docker_image_repo+tag and docker_image_hash links\n    # at the destination.\n    for link_class in (\"docker_image_repo+tag\", \"docker_image_hash\"):\n        docker_links = src.links().list(filters=[[\"head_uuid\", \"=\", collection_uuid], [\"link_class\", \"=\", link_class]]).execute(num_retries=args.retries)['items']\n\n        for src_link in docker_links:\n            body = {key: src_link[key]\n                    for key in ['link_class', 'name', 'properties']}\n            body['head_uuid'] = dst_collection['uuid']\n            body['owner_uuid'] = args.project_uuid\n\n            lk = dst.links().create(body=body).execute(num_retries=args.retries)\n            logger.debug('created dst link {}'.format(lk))\n\n    return dst_collection\n\n# copy_collection(obj_uuid, src, dst, args)\n#\n#    Copies the collection identified by obj_uuid from src to dst.\n#    Returns the collection object created at dst.\n#\n#    If args.progress is True, produce a human-friendly progress\n#    report.\n#\n#    If a collection with the desired portable_data_hash already\n#    exists at dst, and args.force is False, copy_collection returns\n#    the existing collection without copying any blocks.  Otherwise\n#    (if no collection exists or if args.force is True)\n#    copy_collection copies all of the collection data blocks from src\n#    to dst.\n#\n#    For this application, it is critical to preserve the\n#    collection's manifest hash, which is not guaranteed with the\n#    arvados.CollectionReader and arvados.CollectionWriter classes.\n#    Copying each block in the collection manually, followed by\n#    the manifest block, ensures that the collection's manifest\n#    hash will not change.\n#\ndef copy_collection(obj_uuid, src, dst, args):\n    if arvados.util.keep_locator_pattern.match(obj_uuid):\n        # If the obj_uuid is a portable data hash, it might not be\n        # uniquely identified with a particular collection.  As a\n        # result, it is ambiguous as to what name to use for the copy.\n        # Apply some heuristics to pick which collection to get the\n        # name from.\n        srccol = src.collections().list(\n            filters=[['portable_data_hash', '=', obj_uuid]],\n            order=\"created_at asc\"\n            ).execute(num_retries=args.retries)\n\n        items = srccol.get(\"items\")\n\n        if not items:\n            logger.warning(\"Could not find collection with portable data hash %s\", obj_uuid)\n            return\n\n        c = None\n\n        if len(items) == 1:\n            # There's only one collection with the PDH, so use that.\n            c = items[0]\n        if not c:\n            # See if there is a collection that's in the same project\n            # as the root item (usually a workflow) being copied.\n            for i in items:\n                if i.get(\"owner_uuid\") == src_owner_uuid and i.get(\"name\"):\n                    c = i\n                    break\n        if not c:\n            # Didn't find any collections located in the same project, so\n            # pick the oldest collection that has a name assigned to it.\n            for i in items:\n                if i.get(\"name\"):\n                    c = i\n                    break\n        if not c:\n            # None of the collections have names (?!), so just pick the\n            # first one.\n            c = items[0]\n\n        # list() doesn't return manifest text (and we don't want it to,\n        # because we don't need the same maninfest text sent to us 50\n        # times) so go and retrieve the collection object directly\n        # which will include the manifest text.\n        c = src.collections().get(uuid=c[\"uuid\"]).execute(num_retries=args.retries)\n    else:\n        # Assume this is an actual collection uuid, so fetch it directly.\n        c = src.collections().get(uuid=obj_uuid).execute(num_retries=args.retries)\n\n    # If a collection with this hash already exists at the\n    # destination, and 'force' is not true, just return that\n    # collection.\n    if not args.force:\n        if 'portable_data_hash' in c:\n            colhash = c['portable_data_hash']\n        else:\n            colhash = c['uuid']\n        dstcol = dst.collections().list(\n            filters=[['portable_data_hash', '=', colhash]]\n        ).execute(num_retries=args.retries)\n        if dstcol['items_available'] > 0:\n            for d in dstcol['items']:\n                if ((args.project_uuid == d['owner_uuid']) and\n                    (c.get('name') == d['name']) and\n                    (c['portable_data_hash'] == d['portable_data_hash'])):\n                    return d\n            c['manifest_text'] = dst.collections().get(\n                uuid=dstcol['items'][0]['uuid']\n            ).execute(num_retries=args.retries)['manifest_text']\n            return create_collection_from(c, src, dst, args)\n\n    if args.replication is None:\n        # Obtain default or fallback collection replication setting on the\n        # destination\n        try:\n            args.replication = int(dst.config()[\"Collections\"][\"DefaultReplication\"])\n        except (KeyError, TypeError, ValueError):\n            args.replication = 2\n\n    # Fetch the collection's manifest.\n    manifest = c['manifest_text']\n    logger.debug(\"Copying collection %s with manifest: <%s>\", obj_uuid, manifest)\n\n    # Copy each block from src_keep to dst_keep.\n    # Use the newly signed locators returned from dst_keep to build\n    # a new manifest as we go.\n    src_keep = src.keep\n    dst_keep = dst.keep\n    dst_manifest = io.StringIO()\n    dst_locators = {}\n    bytes_written = 0\n    bytes_expected = total_collection_size(manifest)\n    if args.progress:\n        progress_writer = ProgressWriter(human_progress)\n    else:\n        progress_writer = None\n\n    # go through the words\n    # put each block loc into 'get' queue\n    # 'get' threads get block and put it into 'put' queue\n    # 'put' threads put block and then update dst_locators\n    #\n    # after going through the whole manifest we go back through it\n    # again and build dst_manifest\n\n    lock = threading.Lock()\n\n    # the get queue should be unbounded because we'll add all the\n    # block hashes we want to get, but these are small\n    get_queue = queue.Queue()\n\n    threadcount = 4\n\n    # the put queue contains full data blocks\n    # and if 'get' is faster than 'put' we could end up consuming\n    # a great deal of RAM if it isn't bounded.\n    put_queue = queue.Queue(threadcount)\n    transfer_error = []\n\n    def get_thread():\n        while True:\n            word = get_queue.get()\n            if word is None:\n                put_queue.put(None)\n                get_queue.task_done()\n                return\n\n            blockhash = arvados.KeepLocator(word).md5sum\n            with lock:\n                if blockhash in dst_locators:\n                    # Already uploaded\n                    get_queue.task_done()\n                    continue\n\n            try:\n                logger.debug(\"Getting block %s\", word)\n                data = src_keep.get(word)\n                put_queue.put((word, data))\n            except Exception as e:\n                logger.error(\"Error getting block %s: %s\", word, e)\n                transfer_error.append(e)\n                try:\n                    # Drain the 'get' queue so we end early\n                    while True:\n                        get_queue.get(False)\n                        get_queue.task_done()\n                except queue.Empty:\n                    pass\n            finally:\n                get_queue.task_done()\n\n    def put_thread():\n        nonlocal bytes_written\n        while True:\n            item = put_queue.get()\n            if item is None:\n                put_queue.task_done()\n                return\n\n            word, data = item\n            loc = arvados.KeepLocator(word)\n            blockhash = loc.md5sum\n            with lock:\n                if blockhash in dst_locators:\n                    # Already uploaded\n                    put_queue.task_done()\n                    continue\n\n            try:\n                logger.debug(\"Putting block %s (%s bytes)\", blockhash, loc.size)\n                dst_locator = dst_keep.put(data, copies=args.replication, classes=(args.storage_classes or []))\n                with lock:\n                    dst_locators[blockhash] = dst_locator\n                    bytes_written += loc.size\n                    if progress_writer:\n                        progress_writer.report(obj_uuid, bytes_written, bytes_expected)\n            except Exception as e:\n                logger.error(\"Error putting block %s (%s bytes): %s\", blockhash, loc.size, e)\n                try:\n                    # Drain the 'get' queue so we end early\n                    while True:\n                        get_queue.get(False)\n                        get_queue.task_done()\n                except queue.Empty:\n                    pass\n                transfer_error.append(e)\n            finally:\n                put_queue.task_done()\n\n    if args.keep_block_copy:\n        for line in manifest.splitlines():\n            words = line.split()\n            for word in words[1:]:\n                try:\n                    loc = arvados.KeepLocator(word)\n                except ValueError:\n                    # If 'word' can't be parsed as a locator,\n                    # presume it's a filename.\n                    continue\n\n                get_queue.put(word)\n\n        for i in range(0, threadcount):\n            get_queue.put(None)\n\n        for i in range(0, threadcount):\n            threading.Thread(target=get_thread, daemon=True).start()\n\n        for i in range(0, threadcount):\n            threading.Thread(target=put_thread, daemon=True).start()\n\n        get_queue.join()\n        put_queue.join()\n\n        if len(transfer_error) > 0:\n            return {\"error_token\": \"Failed to transfer blocks\"}\n\n    for line in manifest.splitlines():\n        words = iter(line.split())\n        out_words = [next(words)]\n        for word in words:\n            try:\n                loc = arvados.KeepLocator(word)\n            except ValueError:\n                # If 'word' can't be parsed as a locator,\n                # presume it's a filename.\n                out_words.append(word)\n            else:\n                if args.keep_block_copy:\n                    out_words.append(dst_locators[loc.md5sum])\n                else:\n                    out_words.append(loc.stripped())\n        dst_manifest.write(' '.join(out_words))\n        dst_manifest.write(\"\\n\")\n\n    if progress_writer:\n        progress_writer.report(obj_uuid, bytes_written, bytes_expected)\n        progress_writer.finish()\n\n    # Copy the manifest and save the collection.\n    logger.debug('saving %s with manifest: <%s>', obj_uuid, dst_manifest.getvalue())\n\n    c['manifest_text'] = dst_manifest.getvalue()\n    return create_collection_from(c, src, dst, args)\n\ndef copy_docker_image(docker_image, docker_image_tag, src, dst, args):\n    \"\"\"Copy the docker image identified by docker_image and\n    docker_image_tag from src to dst. Create appropriate\n    docker_image_repo+tag and docker_image_hash links at dst.\n\n    \"\"\"\n\n    logger.debug('copying docker image {}:{}'.format(docker_image, docker_image_tag))\n\n    # Find the link identifying this docker image.\n    docker_image_list = arvados.commands.keepdocker.list_images_in_arv(\n        src, args.retries, docker_image, docker_image_tag)\n    if docker_image_list:\n        image_uuid, image_info = docker_image_list[0]\n        logger.debug('copying collection {} {}'.format(image_uuid, image_info))\n\n        # Copy the collection it refers to.\n        dst_image_col = copy_collection(image_uuid, src, dst, args)\n    elif arvados.util.keep_locator_pattern.match(docker_image):\n        dst_image_col = copy_collection(docker_image, src, dst, args)\n    else:\n        logger.warning('Could not find docker image {}:{}'.format(docker_image, docker_image_tag))\n\ndef copy_project(obj_uuid, src, dst, owner_uuid, args):\n\n    src_project_record = src.groups().get(uuid=obj_uuid).execute(num_retries=args.retries)\n\n    # Create/update the destination project\n    existing = dst.groups().list(filters=[[\"owner_uuid\", \"=\", owner_uuid],\n                                          [\"name\", \"=\", src_project_record[\"name\"]]]).execute(num_retries=args.retries)\n    try:\n        existing_uuid = existing['items'][0]['uuid']\n    except IndexError:\n        body = src_project_record if args.export_all_fields else {'group': {\n            'description': src_project_record['description'],\n            'group_class': 'project',\n            'name': src_project_record['name'],\n            'owner_uuid': owner_uuid,\n        }}\n        project_req = dst.groups().create(body=body)\n    else:\n        project_req = dst.groups().update(\n            uuid=existing_uuid,\n            body={'group': {\n                'description': src_project_record['description'],\n            }},\n        )\n\n    project_record = project_req.execute(num_retries=args.retries)\n    args.project_uuid = project_record[\"uuid\"]\n    logger.debug('Copying %s to %s', obj_uuid, project_record[\"uuid\"])\n\n    partial_error = \"\"\n    # Copy collections\n    try:\n        copy_collections([col[\"uuid\"] for col in arvados.util.keyset_list_all(src.collections().list, filters=[[\"owner_uuid\", \"=\", obj_uuid]])],\n                         src, dst, args)\n    except Exception as e:\n        partial_error += \"\\n\" + str(e)\n\n    # Copy workflows\n    for w in arvados.util.keyset_list_all(src.workflows().list, filters=[[\"owner_uuid\", \"=\", obj_uuid]]):\n        try:\n            copy_workflow(w[\"uuid\"], src, dst, args)\n        except Exception as e:\n            partial_error += \"\\n\" + \"Error while copying %s: %s\" % (w[\"uuid\"], e)\n\n    if args.recursive:\n        for g in arvados.util.keyset_list_all(src.groups().list, filters=[[\"owner_uuid\", \"=\", obj_uuid]]):\n            try:\n                copy_project(g[\"uuid\"], src, dst, project_record[\"uuid\"], args)\n            except Exception as e:\n                partial_error += \"\\n\" + \"Error while copying %s: %s\" % (g[\"uuid\"], e)\n\n    project_record[\"partial_error\"] = partial_error\n\n    return project_record\n\n# git_rev_parse(rev, repo)\n#\n#    Returns the 40-character commit hash corresponding to 'rev' in\n#    git repository 'repo' (which must be the path of a local git\n#    repository)\n#\ndef git_rev_parse(rev, repo):\n    proc = subprocess.run(\n        ['git', 'rev-parse', rev],\n        check=True,\n        cwd=repo,\n        stdout=subprocess.PIPE,\n        text=True,\n    )\n    return proc.stdout.read().strip()\n\n# uuid_type(api, object_uuid)\n#\n#    Returns the name of the class that object_uuid belongs to, based on\n#    the second field of the uuid.  This function consults the api's\n#    schema to identify the object class.\n#\n#    It returns a string such as 'Collection', 'Workflow', etc.\n#\n#    Special case: if handed a Keep locator hash, return 'Collection'.\n#\ndef uuid_type(api, object_uuid):\n    if re.match(arvados.util.keep_locator_pattern, object_uuid):\n        return 'Collection'\n\n    if object_uuid.startswith(\"http:\") or object_uuid.startswith(\"https:\"):\n        return 'httpURL'\n\n    if object_uuid.startswith(\"s3:\"):\n        return 's3URL'\n\n    p = object_uuid.split('-')\n    if len(p) == 3:\n        type_prefix = p[1]\n        for k in api._schema.schemas:\n            obj_class = api._schema.schemas[k].get('uuidPrefix', None)\n            if type_prefix == obj_class:\n                return k\n    return None\n\n\ndef copy_from_url(url, src, dst, args):\n\n    project_uuid = args.project_uuid\n    # Ensure string of varying parameters is well-formed\n    prefer_cached_downloads = args.prefer_cached_downloads\n\n    cached = to_keep_util.CheckCacheResult(None, None, None, None, None)\n\n    if url.startswith(\"http:\") or url.startswith(\"https:\"):\n        cached = http_to_keep.check_cached_url(src, project_uuid, url, {},\n                                               varying_url_params=args.varying_url_params,\n                                               prefer_cached_downloads=prefer_cached_downloads)\n    elif url.startswith(\"s3:\"):\n        import boto3.session\n        botosession = boto3.session.Session()\n        cached = s3_to_keep.check_cached_url(src, botosession, project_uuid, url, {},\n                                             prefer_cached_downloads=prefer_cached_downloads)\n\n    if cached[2] is not None:\n        return copy_collection(cached[2], src, dst, args)\n\n    if url.startswith(\"http:\") or url.startswith(\"https:\"):\n        cached = http_to_keep.http_to_keep(dst, project_uuid, url,\n                                           varying_url_params=args.varying_url_params,\n                                           prefer_cached_downloads=prefer_cached_downloads)\n    elif url.startswith(\"s3:\"):\n        cached = s3_to_keep.s3_to_keep(dst, botosession, project_uuid, url,\n                                       prefer_cached_downloads=prefer_cached_downloads)\n\n    if cached is not None:\n        return {\"uuid\": cached[2]}\n\n\ndef abort(msg, code=1):\n    logger.info(\"arv-copy: %s\", msg)\n    exit(code)\n\n\n# Code for reporting on the progress of a collection upload.\n# Stolen from arvados.commands.put.ArvPutCollectionWriter\n# TODO(twp): figure out how to refactor into a shared library\n# (may involve refactoring some arvados.commands.arv_copy.copy_collection\n# code)\n\ndef machine_progress(obj_uuid, bytes_written, bytes_expected):\n    return \"{} {}: {} {} written {} total\\n\".format(\n        sys.argv[0],\n        os.getpid(),\n        obj_uuid,\n        bytes_written,\n        -1 if (bytes_expected is None) else bytes_expected)\n\ndef human_progress(obj_uuid, bytes_written, bytes_expected):\n    if bytes_expected:\n        return \"\\r{}: {}M / {}M {:.1%} \".format(\n            obj_uuid,\n            bytes_written >> 20, bytes_expected >> 20,\n            float(bytes_written) / bytes_expected)\n    else:\n        return \"\\r{}: {} \".format(obj_uuid, bytes_written)\n\nclass ProgressWriter(object):\n    _progress_func = None\n    outfile = sys.stderr\n\n    def __init__(self, progress_func):\n        self._progress_func = progress_func\n\n    def report(self, obj_uuid, bytes_written, bytes_expected):\n        if self._progress_func is not None:\n            self.outfile.write(\n                self._progress_func(obj_uuid, bytes_written, bytes_expected))\n\n    def finish(self):\n        self.outfile.write(\"\\n\")\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "sdk/python/arvados/commands/arvcli.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Main executable for Arvados CLI SDK, the `arv` command.\n\nThis script implements the `arv` command's argument parser. The `arv` command\nis meant to be invoked in the following manner:\n\n$ arv [--flags] subcommand|resource [...options]\n\nwhere `--flags` are common CLI options for the various subcommands.\n\nThe `ArvCLIArgumentParser` class, specializing the standard Python\n`argparse.ArgumentParser`, provides the support for this CLI usage.\n\"\"\"\n\n\nimport argparse\nimport functools\nimport importlib\nimport json\nimport re\nimport sys\nfrom typing import NoReturn\nimport arvados\nimport arvados.commands._util as cmd_util\n\n\nclass _ArgTypes:\n    \"\"\"Private namespace class for JSON-related CLI argument types.\"\"\"\n    @staticmethod\n    def _validate_type(obj_type, obj):\n        if isinstance(obj, obj_type):\n            return obj\n        # No details to raise; caller handles error messaging with pretty_name.\n        raise ValueError()\n\n    json_array = cmd_util.JSONStringArgument(\n        validator=functools.partial(_validate_type, list),\n        pretty_name=\"JSON array\"\n    )\n\n    json_object = cmd_util.JSONStringArgument(\n        validator=functools.partial(_validate_type, dict),\n        pretty_name=\"JSON object\"\n    )\n\n    json_filter = cmd_util.JSONArgument(\n        validator=cmd_util.validate_filters,\n        pretty_name=\"Arvados API filter\"\n    )\n\n    json_body = cmd_util.JSONArgument(\n        validator=json_object.post_validator,\n        pretty_name=\"JSON request body object\"\n    )\n\n\nclass _ArgUtil:\n    \"\"\"Private namespace class for helpful functions (static methods) that\n    processes the discovery document for the purpose of CLI parser generation.\n    \"\"\"\n    @staticmethod\n    def singularize_resource(plural: str) -> str:\n        \"\"\"Returns the singular form of a resource term in the original\n        plural.\n        \"\"\"\n        match plural:\n            case \"vocabularies\":\n                return \"vocabulary\"\n            case \"sys\":\n                return \"sys\"\n            case _:\n                return plural.removesuffix(\"s\")\n\n    @staticmethod\n    def parameter_key_to_argument_name(parameter_key: str) -> str:\n        \"\"\"Convert a parameter key in the discovery document to CLI parameter\n        form, for example, `--foo-bar`.\n\n        Arguments:\n\n        * parameter_key: str -- Parameter key in the form as they appear in the\n          discovery document, typically like `foo_bar`.\n        \"\"\"\n        return \"--\" + parameter_key.replace(\"_\", \"-\")\n\n    @staticmethod\n    def get_method_options(method_schema):\n        \"\"\"Generate command-line options, in the form of \"-f/--foo\", from the\n        parameters as defined by the API method schema in the discovery\n        document.\n\n        For each key \"foo_bar\" in the \"parameters\" field of the method schema,\n        command-line options are created according to its definition as\n        follows.\n\n        If the parameter type is \"boolean\", a pair of options \"--no-foo-bar\"\n        and \"--foo-bar\" are created, with opposite meaning.\n\n        If the parameter type is \"integer\", the CLI input will be interpreted\n        as a Python int.\n\n        All other parameter types are parsed as Python str.\n\n        The short form of each option will also be created, by taking the first\n        letter of the long form, except when that letter is already used, in\n        which case the second letter will be used, and so on. For example,\n        \"--foo-bar\" will have short form \"-f\", unless \"-f\" is already used for\n        another option, in which case \"-o\" will be used, etc.\n\n        The \"negative\" form of boolean options (\"--no-foo-bar\") will not have\n        separate short forms of their own.\n\n        This  generator yields tuples in the form of `(names, kwargs)`, where\n        `names` is a one- or two-element tuple and `kwargs` is a dict, suitable\n        to be passed as\n        `argparse.ArgumentParser.add_argument(*names, **kwargs)`.\n\n        Arguments:\n\n        * method_schema: dict --- Dict object from the parsed discover document\n          that defines a method.\n        \"\"\"\n        parameters_schema = method_schema.get(\"parameters\", {}).copy()\n        # If the method comes with the \"request\" field, add another parameter\n        # based on the sole key in the \"properties\" dict of that field\n        request_schema = method_schema.get(\"request\")\n        if request_schema is not None and request_schema.get(\"properties\"):\n            for parameter_key in request_schema[\"properties\"].keys():\n                parameters_schema[parameter_key] = {\n                    \"type\": \"request\",  # special value for request parameter\n                    \"required\": request_schema.get(\"required\"),\n                    \"description\": (\n                        f\"Either a string representing {parameter_key} as JSON\"\n                        f\" or a filename from which to read {parameter_key}\"\n                        \" JSON (use '-' to read from stdin).\"\n                    )\n                }\n        argument_key_abbrevs = set(\"h\")  # prevent conflict with \"help\"\n        for parameter_key, parameter_dict in parameters_schema.items():\n            parameter_kwargs = {\n                \"required\": parameter_dict.get(\"required\", False)\n            }\n            parameter_kwargs[\"help\"] = parameter_dict.get(\"description\", \"\")\n            if parameter_kwargs[\"required\"]:\n                parameter_kwargs[\"help\"] += \" This option must be specified.\"\n            # The \"type\" member refers to one of the JSON values types, out of\n            # string/integer/array/object/boolean.\n            # NOTE: Currently, enum-like value choices are not implemented, as\n            # the enum values cannot be directly inferred from the discover\n            # doc.\n            argument_key = _ArgUtil.parameter_key_to_argument_name(\n                parameter_key\n            )\n            for argument_short_key in argument_key:\n                if (\n                    argument_short_key.isalpha()\n                    and argument_short_key not in argument_key_abbrevs\n                ):\n                    argument_key_abbrevs.add(argument_short_key)\n                    break\n            else:\n                # If the letters of the full argument name are exhausted, fall\n                # back to not using a short argument, indicated by the special\n                # value None:\n                argument_short_key = None\n            default = parameter_dict.get(\"default\")\n            if default is not None and parameter_dict.get(\"type\") != \"boolean\":\n                parameter_kwargs[\"help\"] += f\" Default: {default}.\"\n            match parameter_dict.get(\"type\"):\n                case \"boolean\":\n                    # Using the 'action=\"store_true\" (or \"store_false\")'\n                    # mechanism results in flag-like action rather than an\n                    # option that takes a true or false value. For each bool\n                    # flag \"--foo\", also generate an additional \"negative\"\n                    # version \"--no-foo\".\n                    neg_argument_key = _ArgUtil.parameter_key_to_argument_name(\n                        f\"no_{parameter_key}\"\n                    )\n                    neg_parameter_kwargs = {}\n                    neg_parameter_kwargs[\"action\"] = \"store_false\"\n                    neg_parameter_kwargs[\"required\"] = False\n                    neg_parameter_kwargs[\"dest\"] = parameter_key\n                    neg_parameter_kwargs[\"default\"] = json.loads(\n                        default if default is not None else \"null\"\n                    )\n                    yield (neg_argument_key,), neg_parameter_kwargs\n\n                    parameter_kwargs[\"action\"] = \"store_true\"\n                    parameter_kwargs[\"dest\"] = parameter_key\n                    parameter_kwargs[\"default\"] = (\n                        neg_parameter_kwargs[\"default\"]\n                    )\n                case \"integer\":\n                    parameter_kwargs[\"type\"] = int\n                    parameter_kwargs[\"metavar\"] = \"N\"\n                case \"array\":\n                    # The filters parameter is only used with \"getter\" methods\n                    # that doesn't send a request body (which is exclusive to\n                    # \"creator\"/\"updater\" methods). This means it's generally\n                    # safe to use the \"json_filter\" type converter which can\n                    # read from the stdin; it wouldn't conflict with the\n                    # request body parameter which can also read the stdin.\n                    if parameter_key == \"filters\":\n                        parameter_kwargs[\"type\"] = _ArgTypes.json_filter\n                        parameter_kwargs[\"metavar\"] = \"{JSON,FILE,-}\"\n                        parameter_kwargs[\"help\"] += (\n                            \" This can be a filename from which to read\"\n                            \" JSON (use '-' to read from stdin).\"\n                        )\n                    else:\n                        parameter_kwargs[\"type\"] = _ArgTypes.json_array\n                        parameter_kwargs[\"metavar\"] = \"JSON_ARRAY\"\n                case \"object\":\n                    parameter_kwargs[\"type\"] = _ArgTypes.json_object\n                    parameter_kwargs[\"metavar\"] = \"JSON_OBJECT\"\n                case \"request\":\n                    parameter_kwargs[\"dest\"] = \"body\"\n                    parameter_kwargs[\"type\"] = _ArgTypes.json_body\n                    parameter_kwargs[\"metavar\"] = \"{JSON,FILE,-}\"\n                case _:\n                    parameter_kwargs[\"type\"] = str\n                    parameter_kwargs[\"metavar\"] = \"STR\"\n            if argument_short_key is None:\n                yield (argument_key,), parameter_kwargs\n            else:\n                yield (\n                    (f\"-{argument_short_key}\", argument_key), parameter_kwargs\n                )\n\n\nclass ArvCLIArgumentParser(argparse.ArgumentParser):\n    \"\"\"Argument parser for `arv` commands.\n    \"\"\"\n    global_args = frozenset((\n        \"dry_run\",\n        \"verbose\",\n        \"format\",\n        \"subcommand\",\n        \"method\"\n    ))\n    external_command_modules = {\n        \"keep ls\": \"arvados.commands.ls\",\n        \"keep get\": \"arvados.commands.get\",\n        \"keep put\": \"arvados.commands.put\",\n        \"keep docker\": \"arvados.commands.keepdocker\",\n        \"ws\": \"arvados.commands.ws\",\n        \"copy\": \"arvados.commands.arv_copy\"\n    }\n\n    def __init__(self, resource_dictionary, **kwargs):\n        \"\"\"Arguments:\n\n        * resource dictionary: dict --- Dict containing the resources defined\n          in the discovery document; can be obtained as the\n          `_resourceDesc[\"resources\"]` attribute of an Arvados API client\n          object.\n        \"\"\"\n        super().__init__(description=\"Arvados command line client\", **kwargs)\n        # Common flags to the main command.\n        self.add_argument(\"-n\", \"--dry-run\", action=\"store_true\",\n                          help=\"Don't actually do anything\")\n        self.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n                          help=\"Print some things on stderr\")\n        # Default output format is JSON, while \"-s\" or \"--short\" can be\n        # used as a shorthand for \"--format=uuid\". If both are specified, the\n        # last one takes effect.\n        self.add_argument(\n            \"-f\", \"--format\",\n            choices=[\"json\", \"yaml\", \"uuid\"],\n            default=\"json\",\n            help=\"Set output format\"\n        )\n        self.add_argument(\n            \"-s\", \"--short\",\n            dest=\"format\",\n            action=\"store_const\", const=\"uuid\",\n            help=\"Return only UUIDs (equivalent to --format=uuid)\"\n        )\n\n        subparsers = self.add_subparsers(\n            dest=\"subcommand\",\n            help=\"Subcommands\",\n            required=True,\n            parser_class=functools.partial(\n                argparse.ArgumentParser,\n                add_help=False,\n            )\n        )\n\n        keep_parser = subparsers.add_parser(\"keep\")\n        keep_parser.add_argument(\n            \"method\",\n            choices=[\"ls\", \"get\", \"put\", \"docker\"]\n        )\n\n        ws_parser = subparsers.add_parser(\"ws\")\n        copy_parser = subparsers.add_parser(\"copy\")\n\n        self.subparsers = subparsers\n        self.resource_dictionary = resource_dictionary\n        self._subparser_index = {}\n        self._subcommand_to_resource = {}\n\n        self.add_resource_subcommands()\n\n        if \"sys\" in self._subparser_index:\n            self._subparser_index[\"sy\"] = self._subparser_index[\"sys\"]\n        if \"sys\" in self._subcommand_to_resource:\n            self._subcommand_to_resource[\"sy\"] = (\n                self._subcommand_to_resource[\"sys\"]\n            )\n\n    def add_resource_subcommands(self):\n        \"\"\"Add resources as subcommands, their associated methods as\n        sub-subcommands, and the parameters associated with each method.\n        \"\"\"\n        for resource, resource_schema in self.resource_dictionary.items():\n            subcommand = _ArgUtil.singularize_resource(resource)\n            self._subcommand_to_resource[subcommand] = resource\n            resource_subparser = self.subparsers.add_parser(\n                subcommand,\n                # For backward compatibility with legacy Ruby CLI client.\n                aliases=[\"sy\"] if subcommand == \"sys\" else []\n            )\n            self._subparser_index[subcommand] = resource_subparser\n            methods_dict = resource_schema.get(\"methods\")\n            if methods_dict:\n                # Create a collection of \"sub-subparsers\" under the resource\n                # subparser for the methods.\n                method_subparsers = resource_subparser.add_subparsers(\n                    title=\"Methods\",\n                    dest=\"method\",\n                    parser_class=argparse.ArgumentParser,\n                    help=f\"Methods for subcommand {subcommand}\"\n                )\n                for method, method_schema in methods_dict.items():\n                    # Add each specific method as a (sub-)subparser with its\n                    # associated parameters.\n                    method_parser = method_subparsers.add_parser(\n                        method,\n                        help=method_schema.get(\"description\")\n                    )\n                    for parameter_names, kwargs in _ArgUtil.get_method_options(\n                            method_schema\n                    ):\n                        method_parser.add_argument(*parameter_names, **kwargs)\n\n\ndef _handle_external_command(module_name: str, args: list[str]) -> NoReturn:\n    \"\"\"Import the external module for the subcommand, call the module's\n    `main()` function with given arguments, and exit with the main function's\n    return value as the exit status code.\n    \"\"\"\n    external_mod = importlib.import_module(module_name)\n    sys.exit(external_mod.main(args))\n\n\ndef _handle_resource_method(api_client, resource, args) -> NoReturn:\n    \"\"\"Prepare API request by resource name and the already-parsed arguments,\n    send the request, and analyze & print out the result.\n    \"\"\"\n    arv_resource = getattr(api_client, resource)()\n    arv_method = getattr(arv_resource, args.method)\n    method_call = arv_method(**{\n        k: v\n        for k, v in vars(args).items()\n        if k not in ArvCLIArgumentParser.global_args\n    })\n\n    try:\n        result = method_call.execute()\n    except arvados.errors.ApiError as err:\n        # NOTE: This is not exactly the same output as that generated by\n        # the Ruby 'arv' command upon error.\n        msg = str(err)\n        request_id = method_call.headers.get(\"X-Request-Id\")\n        if request_id and not re.search(\n            rf\"\\b{re.escape(request_id)}\\b\", msg\n        ):\n            msg += f\" ({request_id})\"\n        print(f\"Error: {msg}\", file=sys.stderr)\n        sys.exit(1)\n\n    match args.format:\n        case \"json\":\n            json.dump(result, sys.stdout, indent=1)\n            print()\n        case \"yaml\":\n            from ruamel.yaml import YAML\n            yaml = YAML(typ=\"safe\", pure=True)\n            yaml.default_flow_style = False\n            yaml.dump(result, sys.stdout)\n        case \"uuid\":\n            if (\n                    result.get(\"kind\", \"\").endswith(\"List\")\n                    and result.get(\"items\")\n            ):\n                for item in result[\"items\"]:\n                    # The received items may have the \"uuid\" field filtered out\n                    # by the \"--select\" parameter. The ruby \"arv\" command\n                    # simply outputs blank lines, which is not desirable.\n                    obj_uuid = item.get(\"uuid\")\n                    if obj_uuid is None:\n                        print(\n                            (\n                                \"Error: at least one item in response did not\"\n                                \" include a uuid. The full response was:\"\n                            ),\n                            json.dumps(result, indent=1),\n                            sep=\"\\n\",\n                            file=sys.stderr\n                        )\n                        sys.exit(1)\n                    else:\n                        print(item[\"uuid\"])\n            else:\n                obj_uuid = result.get(\"uuid\")\n                if obj_uuid is None:\n                    print(\n                        \"Error: response did not include a uuid:\",\n                        json.dumps(result, indent=1),\n                        sep=\"\\n\",\n                        file=sys.stderr\n                    )\n                    sys.exit(1)\n                print(obj_uuid)\n    sys.exit(0)\n\n\ndef dispatch(arguments=None):\n    api_client = arvados.api(\"v1\")\n    cmd_parser = ArvCLIArgumentParser(api_client._resourceDesc[\"resources\"])\n    args, remaining_args = cmd_parser.parse_known_args(arguments)\n\n    # There's always args.subcommand if we reach here, because \"subcommand\" is\n    # required by the parser. But \"method\" may be absent, as is in the case of\n    # external commands like \"ws\" or \"copy\".\n    method = getattr(args, \"method\", \"\")\n\n    # Are we calling an external command?\n    ext_module = cmd_parser.external_command_modules.get(\n        f\"{args.subcommand} {method}\" if method else args.subcommand\n    )\n    if ext_module is not None:\n        _handle_external_command(ext_module, remaining_args)  # Exits.\n\n    # Are we doing an API resource call?\n    resource = cmd_parser._subcommand_to_resource.get(args.subcommand)\n    if resource is not None:\n        # This is to work around an issue with nested subparsers being unable\n        # to show subcommand-level help (while help generation for the\n        # leafmost, method-level subparser works as expected). For example,\n        # \"arvcli.py resouce method -h\" will be handled by the leafmost parser\n        # first and the code will not reach here if that is the CLI given.\n        # However, \"arvcli.py resource -h\" is handled manually here.\n        help_wanted = \"-h\" in remaining_args or \"--help\" in remaining_args\n        if not method or help_wanted:\n            subparser = cmd_parser._subparser_index[args.subcommand]\n            subparser.print_help(\n                file=(sys.stdout if help_wanted else sys.stderr)\n            )\n            sys.exit(0 if help_wanted else 2)\n        # Any further remaining args indicate either malformed or unrecognized\n        # global args (e.g. \"arvcli.py --bad-arg resource method\") or undefined\n        # parameters to a valid resouce-method combination.\n        elif remaining_args:\n            print(\n                \"Error: unrecognized command-line arguments:\",\n                \", \".join(remaining_args),\n                file=sys.stderr\n            )\n            print(\n                f\"Try: {sys.argv[0]} --help\",\n                f\"     {sys.argv[0]} {args.subcommand} {method} --help\",\n                sep=\"\\n\",\n                file=sys.stderr\n            )\n            sys.exit(2)\n        else:\n            _handle_resource_method(api_client, resource, args)  # Exits.\n\n    # TODO: Other types of commands are not yet implemented (\"create\" and\n    # \"edit\"). The code immediately below is not reachable.\n    raise RuntimeError(\"Unexpected arguments: {arguments!r}\")\n\n\nif __name__ == \"__main__\":\n    dispatch()\n"
  },
  {
    "path": "sdk/python/arvados/commands/get.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport hashlib\nimport os\nimport pathlib\nimport re\nimport string\nimport sys\nimport logging\n\nimport arvados\nimport arvados.commands._util as arv_cmd\nimport arvados.util as util\n\nfrom arvados._version import __version__\n\nlogger = logging.getLogger('arvados.arv-get')\n\nparser = argparse.ArgumentParser(\n    description='Copy data from Keep to a local file or pipe.',\n    parents=[arv_cmd.retry_opt])\nparser.add_argument('--version', action='version',\n                    version=\"%s %s\" % (sys.argv[0], __version__),\n                    help='Print version and exit.')\nparser.add_argument('locator', type=str,\n                    help=\"\"\"\nCollection locator, optionally with a file path or prefix.\n\"\"\")\nparser.add_argument('destination', type=str, nargs='?', default='-',\n                    help=\"\"\"\nLocal file or directory where the data is to be written. Default: stdout.\n\"\"\")\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('--progress', action='store_true',\n                   help=\"\"\"\nDisplay human-readable progress on stderr (bytes and, if possible,\npercentage of total data size). This is the default behavior when it\nis not expected to interfere with the output: specifically, stderr is\na tty _and_ either stdout is not a tty, or output is being written to\nnamed files rather than stdout.\n\"\"\")\ngroup.add_argument('--no-progress', action='store_true',\n                   help=\"\"\"\nDo not display human-readable progress on stderr.\n\"\"\")\ngroup.add_argument('--batch-progress', action='store_true',\n                   help=\"\"\"\nDisplay machine-readable progress on stderr (bytes and, if known,\ntotal data size).\n\"\"\")\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('--hash',\n                    help=\"\"\"\nDisplay the hash of each file as it is read from Keep, using the given\nhash algorithm. Supported algorithms include md5, sha1, sha224,\nsha256, sha384, and sha512.\n\"\"\")\ngroup.add_argument('--md5sum', action='store_const',\n                    dest='hash', const='md5',\n                    help=\"\"\"\nDisplay the MD5 hash of each file as it is read from Keep.\n\"\"\")\nparser.add_argument('-n', action='store_true',\n                    help=\"\"\"\nDo not write any data -- just read from Keep, and report md5sums if\nrequested.\n\"\"\")\nparser.add_argument('-r', action='store_true',\n                    help=\"\"\"\nRetrieve all files in the specified collection/prefix. This is the\ndefault behavior if the \"locator\" argument ends with a forward slash.\n\"\"\")\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-f', action='store_true',\n                   help=\"\"\"\nOverwrite existing files while writing. The default behavior is to\nrefuse to write *anything* if any of the output files already\nexist. As a special case, -f is not needed to write to stdout.\n\"\"\")\ngroup.add_argument('-v', action='count', default=0,\n                    help=\"\"\"\nOnce for verbose mode, twice for debug mode.\n\"\"\")\ngroup.add_argument('--skip-existing', action='store_true',\n                   help=\"\"\"\nSkip files that already exist. The default behavior is to refuse to\nwrite *anything* if any files exist that would have to be\noverwritten. This option causes even devices, sockets, and fifos to be\nskipped.\n\"\"\")\ngroup.add_argument('--strip-manifest', action='store_true', default=False,\n                   help=\"\"\"\nWhen getting a collection manifest, strip its access tokens before writing\nit.\n\"\"\")\n\nparser.add_argument('--threads', type=int, metavar='N', default=4,\n                    help=\"\"\"\nSet the number of download threads to be used. Take into account that\nusing lots of threads will increase the RAM requirements. Default is\nto use 4 threads.\nOn high latency installations, using a greater number will improve\noverall throughput.\n\"\"\")\n\ndef parse_arguments(arguments, stdout, stderr):\n    args = parser.parse_args(arguments)\n\n    if args.locator[-1] == os.sep:\n        args.r = True\n    if (args.r and\n        not args.n and\n        not (args.destination and\n             os.path.isdir(args.destination))):\n        parser.error('Destination is not a directory.')\n    if not args.r and (os.path.isdir(args.destination) or\n                       args.destination[-1] == os.path.sep):\n        args.destination = os.path.join(args.destination,\n                                        os.path.basename(args.locator))\n        logger.debug(\"Appended source file name to destination directory: %s\",\n                     args.destination)\n\n    if args.destination == '/dev/stdout':\n        args.destination = \"-\"\n\n    if args.destination == '-':\n        # Normally you have to use -f to write to a file (or device) that\n        # already exists, but \"-\" and \"/dev/stdout\" are common enough to\n        # merit a special exception.\n        args.f = True\n    else:\n        args.destination = args.destination.rstrip(os.sep)\n\n    # Turn on --progress by default if stderr is a tty and output is\n    # either going to a named file, or going (via stdout) to something\n    # that isn't a tty.\n    if (not (args.batch_progress or args.no_progress)\n        and stderr.isatty()\n        and (args.destination != '-'\n             or not stdout.isatty())):\n        args.progress = True\n    return args\n\ndef main(arguments=None, stdout=sys.stdout, stderr=sys.stderr):\n    if stdout is sys.stdout and hasattr(stdout, 'buffer'):\n        # in Python 3, write to stdout as binary\n        stdout = stdout.buffer\n\n    args = parse_arguments(arguments, stdout, stderr)\n    logger.setLevel(logging.WARNING - 10 * args.v)\n\n    request_id = arvados.util.new_request_id()\n    logger.info('X-Request-Id: '+request_id)\n\n    api_client = arvados.api('v1', request_id=request_id, num_retries=args.retries)\n\n    r = re.search(r'^(.*?)(/.*)?$', args.locator)\n    col_loc = r.group(1)\n    get_prefix = r.group(2)\n    if args.r and not get_prefix:\n        get_prefix = os.sep\n\n    # User asked to download the collection's manifest\n    if not get_prefix:\n        if not args.n:\n            open_flags = os.O_CREAT | os.O_WRONLY\n            if not args.f:\n                open_flags |= os.O_EXCL\n            try:\n                if args.destination == \"-\":\n                    write_block_or_manifest(\n                        dest=stdout, src=col_loc,\n                        api_client=api_client, args=args)\n                else:\n                    out_fd = os.open(args.destination, open_flags)\n                    with os.fdopen(out_fd, 'wb') as out_file:\n                        write_block_or_manifest(\n                            dest=out_file, src=col_loc,\n                            api_client=api_client, args=args)\n            except (IOError, OSError) as error:\n                logger.error(\"can't write to '{}': {}\".format(args.destination, error))\n                return 1\n            except (arvados.errors.ApiError, arvados.errors.KeepReadError) as error:\n                logger.error(\"failed to download '{}': {}\".format(col_loc, error))\n                return 1\n            except arvados.errors.ArgumentError as error:\n                if 'Argument to CollectionReader' in str(error):\n                    logger.error(\"error reading collection: {}\".format(error))\n                    return 1\n                else:\n                    raise\n        return 0\n\n    try:\n        reader = arvados.CollectionReader(\n            col_loc, api_client=api_client, num_retries=args.retries,\n            keep_client=arvados.keep.KeepClient(block_cache=arvados.keep.KeepBlockCache((args.threads+1)*64 * 1024 * 1024), num_prefetch_threads=args.threads))\n    except Exception as error:\n        logger.error(\"failed to read collection: {}\".format(error))\n        return 1\n\n    # Scan the collection. Make an array of (stream, file, local\n    # destination filename) tuples, and add up total size to extract.\n    todo = []\n    todo_bytes = 0\n    try:\n        if get_prefix == os.sep:\n            item = reader\n        else:\n            item = reader.find('.' + get_prefix)\n\n        if isinstance(item, arvados.collection.Subcollection) or isinstance(item, arvados.collection.CollectionReader):\n            # If the user asked for a file and we got a subcollection, error out.\n            if get_prefix[-1] != os.sep:\n                logger.error(\"requested file '{}' is in fact a subcollection. Append a trailing '/' to download it.\".format('.' + get_prefix))\n                return 1\n            # If the user asked stdout as a destination, error out.\n            elif args.destination == '-':\n                logger.error(\"cannot use 'stdout' as destination when downloading multiple files.\")\n                return 1\n            # User asked for a subcollection, and that's what was found. Add up total size\n            # to download.\n            for s, f in files_in_collection(item):\n                dest_path = os.path.join(\n                    args.destination,\n                    os.path.join(s.stream_name(), f.name)[len(get_prefix)+1:])\n                if (not (args.n or args.f or args.skip_existing) and\n                    os.path.exists(dest_path)):\n                    logger.error('Local file %s already exists.' % (dest_path,))\n                    return 1\n                todo += [(s, f, dest_path)]\n                todo_bytes += f.size()\n        elif isinstance(item, arvados.arvfile.ArvadosFile):\n            todo += [(item.parent, item, args.destination)]\n            todo_bytes += item.size()\n        else:\n            logger.error(\"'{}' not found.\".format('.' + get_prefix))\n            return 1\n    except (IOError, arvados.errors.NotFoundError) as e:\n        logger.error(e)\n        return 1\n\n    out_bytes = 0\n    for s, f, outfilename in todo:\n        outfile = None\n        digestor = None\n        if not args.n:\n            if outfilename == \"-\":\n                outfile = stdout\n            else:\n                if args.skip_existing and os.path.exists(outfilename):\n                    logger.debug('Local file %s exists. Skipping.', outfilename)\n                    continue\n                elif not args.f and (os.path.isfile(outfilename) or\n                                   os.path.isdir(outfilename)):\n                    # Good thing we looked again: apparently this file wasn't\n                    # here yet when we checked earlier.\n                    logger.error('Local file %s already exists.' % (outfilename,))\n                    return 1\n                if args.r:\n                    pathlib.Path(outfilename).parent.mkdir(parents=True, exist_ok=True)\n                try:\n                    outfile = open(outfilename, 'wb')\n                except Exception as error:\n                    logger.error('Open(%s) failed: %s' % (outfilename, error))\n                    return 1\n        if args.hash:\n            digestor = hashlib.new(args.hash)\n        try:\n            with s.open(f.name, 'rb') as file_reader:\n                for data in file_reader.readall():\n                    if outfile:\n                        outfile.write(data)\n                    if digestor:\n                        digestor.update(data)\n                    out_bytes += len(data)\n                    if args.progress:\n                        stderr.write('\\r%d MiB / %d MiB %.1f%%' %\n                                     (out_bytes >> 20,\n                                      todo_bytes >> 20,\n                                      (100\n                                       if todo_bytes==0\n                                       else 100.0*out_bytes/todo_bytes)))\n                    elif args.batch_progress:\n                        stderr.write('%s %d read %d total %d\\n' %\n                                     (sys.argv[0], os.getpid(),\n                                      out_bytes, todo_bytes))\n            if digestor:\n                stderr.write(\"%s  %s/%s\\n\"\n                             % (digestor.hexdigest(), s.stream_name(), f.name))\n        except KeyboardInterrupt:\n            if outfile and (outfile.fileno() > 2) and not outfile.closed:\n                os.unlink(outfile.name)\n            break\n        finally:\n            if outfile != None and outfile != stdout:\n                outfile.close()\n\n    if args.progress:\n        stderr.write('\\n')\n    return 0\n\ndef files_in_collection(c):\n    # Sort first by file type, then alphabetically by file path.\n    for i in sorted(list(c.keys()),\n                    key=lambda k: (\n                        isinstance(c[k], arvados.collection.Subcollection),\n                        k.upper())):\n        if isinstance(c[i], arvados.arvfile.ArvadosFile):\n            yield (c, c[i])\n        elif isinstance(c[i], arvados.collection.Subcollection):\n            for s, f in files_in_collection(c[i]):\n                yield (s, f)\n\ndef write_block_or_manifest(dest, src, api_client, args):\n    if '+A' in src:\n        # block locator\n        kc = arvados.keep.KeepClient(api_client=api_client)\n        dest.write(kc.get(src, num_retries=args.retries))\n    else:\n        # collection UUID or portable data hash\n        reader = arvados.CollectionReader(\n            src, api_client=api_client, num_retries=args.retries)\n        dest.write(reader.manifest_text(strip=args.strip_manifest).encode())\n"
  },
  {
    "path": "sdk/python/arvados/commands/keepdocker.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport collections\nimport datetime\nimport errno\nimport fcntl\nimport json\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\n\nimport ciso8601\nfrom operator import itemgetter\nfrom pathlib import Path\nfrom stat import *\n\nimport arvados\nimport arvados.config\nimport arvados.util\nimport arvados.commands._util as arv_cmd\nimport arvados.commands.put as arv_put\n\nfrom arvados._internal import basedirs\nfrom arvados._version import __version__\nfrom typing import (\n    Callable,\n)\n\nlogger = logging.getLogger('arvados.keepdocker')\nlogger.setLevel(logging.DEBUG if arvados.config.get('ARVADOS_DEBUG')\n                else logging.INFO)\n\nEARLIEST_DATETIME = datetime.datetime(datetime.MINYEAR, 1, 1, 0, 0, 0)\nSTAT_CACHE_ERRORS = (IOError, OSError, ValueError)\n\nDockerImage = collections.namedtuple(\n    'DockerImage', ['repo', 'tag', 'hash', 'created', 'vsize'])\n\nkeepdocker_parser = argparse.ArgumentParser(add_help=False)\nkeepdocker_parser.add_argument(\n    '--version', action='version', version=\"%s %s\" % (sys.argv[0], __version__),\n    help='Print version and exit.')\nkeepdocker_parser.add_argument(\n    '-f', '--force', action='store_true', default=False,\n    help=\"Re-upload the image even if it already exists on the server\")\nkeepdocker_parser.add_argument(\n    '--force-image-format', action='store_true', default=False,\n    help=\"Proceed even if the image format is not supported by the server\")\n\n_group = keepdocker_parser.add_mutually_exclusive_group()\n_group.add_argument(\n    '--pull', action='store_true', default=False,\n    help=\"Try to pull the latest image from Docker registry\")\n_group.add_argument(\n    '--no-pull', action='store_false', dest='pull',\n    help=\"Use locally installed image only, don't pull image from Docker registry (default)\")\n\n# Combine keepdocker options listed above with run_opts options of arv-put.\n# The options inherited from arv-put include --name, --project-uuid,\n# --progress/--no-progress/--batch-progress and --resume/--no-resume.\narg_parser = argparse.ArgumentParser(\n        description=\"Upload or list Docker images in Arvados\",\n        parents=[keepdocker_parser, arv_put.run_opts, arv_cmd.retry_opt])\n\narg_parser.add_argument(\n    'image', nargs='?',\n    help=\"Docker image to upload: repo, repo:tag, or hash\")\narg_parser.add_argument(\n    'tag', nargs='?',\n    help=\"Tag of the Docker image to upload (default 'latest'), if image is given as an untagged repo name\")\n\nclass DockerError(Exception):\n    pass\n\n\ndef popen_docker(cmd, *args, **kwargs):\n    manage_stdin = ('stdin' not in kwargs)\n    kwargs.setdefault('stdin', subprocess.PIPE)\n    kwargs.setdefault('stdout', subprocess.PIPE)\n    kwargs.setdefault('stderr', subprocess.PIPE)\n    try:\n        docker_proc = subprocess.Popen(['docker'] + cmd, *args, **kwargs)\n    except OSError:  # No docker in $PATH, try docker.io\n        docker_proc = subprocess.Popen(['docker.io'] + cmd, *args, **kwargs)\n    if manage_stdin:\n        docker_proc.stdin.close()\n    return docker_proc\n\ndef check_docker(proc, description):\n    proc.wait()\n    if proc.returncode != 0:\n        raise DockerError(\"docker {} returned status code {}\".\n                          format(description, proc.returncode))\n\ndef docker_image_format(image_hash):\n    \"\"\"Return the registry format ('v1' or 'v2') of the given image.\"\"\"\n    cmd = popen_docker(['inspect', '--format={{.Id}}', image_hash],\n                        stdout=subprocess.PIPE)\n    try:\n        image_id = next(cmd.stdout).decode('utf-8').strip()\n        if image_id.startswith('sha256:'):\n            return 'v2'\n        elif ':' not in image_id:\n            return 'v1'\n        else:\n            return 'unknown'\n    finally:\n        check_docker(cmd, \"inspect\")\n\ndef docker_image_compatible(api, image_hash):\n    supported = api._rootDesc.get('dockerImageFormats', [])\n    if not supported:\n        logger.warning(\"server does not specify supported image formats (see docker_image_formats in server config).\")\n        return False\n\n    fmt = docker_image_format(image_hash)\n    if fmt in supported:\n        return True\n    else:\n        logger.error(\"image format is {!r} \" \\\n            \"but server supports only {!r}\".format(fmt, supported))\n        return False\n\ndef docker_images():\n    # Yield a DockerImage tuple for each installed image.\n    list_proc = popen_docker(['images', '--no-trunc'], stdout=subprocess.PIPE)\n    list_output = iter(list_proc.stdout)\n    next(list_output)  # Ignore the header line\n    for line in list_output:\n        words = line.split()\n        words = [word.decode('utf-8') for word in words]\n        size_index = len(words) - 2\n        repo, tag, imageid = words[:3]\n        ctime = ' '.join(words[3:size_index])\n        vsize = ' '.join(words[size_index:])\n        yield DockerImage(repo, tag, imageid, ctime, vsize)\n    list_proc.stdout.close()\n    check_docker(list_proc, \"images\")\n\ndef find_image_hashes(image_search, image_tag=None):\n    # Query for a Docker images with the repository and tag and return\n    # the image ids in a list.  Returns empty list if no match is\n    # found.\n\n    list_proc = popen_docker(['inspect', \"%s%s\" % (image_search, \":\"+image_tag if image_tag else \"\")], stdout=subprocess.PIPE)\n\n    inspect = list_proc.stdout.read()\n    list_proc.stdout.close()\n\n    imageinfo = json.loads(inspect)\n\n    return [i[\"Id\"] for i in imageinfo]\n\ndef find_one_image_hash(image_search, image_tag=None):\n    hashes = find_image_hashes(image_search, image_tag)\n    hash_count = len(hashes)\n    if hash_count == 1:\n        return hashes.pop()\n    elif hash_count == 0:\n        raise DockerError(\"no matching image found\")\n    else:\n        raise DockerError(\"{} images match {}\".format(hash_count, image_search))\n\ndef stat_cache_name(image_file):\n    return getattr(image_file, 'name', image_file) + '.stat'\n\ndef pull_image(image_name, image_tag):\n    check_docker(popen_docker(['pull', '{}:{}'.format(image_name, image_tag)]),\n                 \"pull\")\n\ndef save_image(image_hash, image_file):\n    # Save the specified Docker image to image_file, then try to save its\n    # stats so we can try to resume after interruption.\n    check_docker(popen_docker(['save', image_hash], stdout=image_file),\n                 \"save\")\n    image_file.flush()\n    try:\n        with open(stat_cache_name(image_file), 'w') as statfile:\n            json.dump(tuple(os.fstat(image_file.fileno())), statfile)\n    except STAT_CACHE_ERRORS:\n        pass  # We won't resume from this cache.  No big deal.\n\ndef get_cache_dir(\n        mkparent: Callable[[], Path]=basedirs.BaseDirectories('CACHE').storage_path,\n) -> str:\n    path = mkparent() / 'docker'\n    path.mkdir(mode=0o700, exist_ok=True)\n    return str(path)\n\ndef prep_image_file(filename):\n    # Return a file object ready to save a Docker image,\n    # and a boolean indicating whether or not we need to actually save the\n    # image (False if a cached save is available).\n    cache_dir = get_cache_dir()\n    if cache_dir is None:\n        image_file = tempfile.NamedTemporaryFile(suffix='.tar')\n        need_save = True\n    else:\n        file_path = os.path.join(cache_dir, filename)\n        try:\n            with open(stat_cache_name(file_path)) as statfile:\n                prev_stat = json.load(statfile)\n            now_stat = os.stat(file_path)\n            need_save = any(prev_stat[field] != now_stat[field]\n                            for field in [ST_MTIME, ST_SIZE])\n        except STAT_CACHE_ERRORS + (AttributeError, IndexError):\n            need_save = True  # We couldn't compare against old stats\n        image_file = open(file_path, 'w+b' if need_save else 'rb')\n    return image_file, need_save\n\ndef make_link(api_client, num_retries, link_class, link_name, **link_attrs):\n    link_attrs.update({'link_class': link_class, 'name': link_name})\n    return api_client.links().create(body=link_attrs).execute(\n        num_retries=num_retries)\n\ndef docker_link_sort_key(link):\n    \"\"\"Build a sort key to find the latest available Docker image.\n\n    To find one source collection for a Docker image referenced by\n    name or image id, the API server looks for a link with the most\n    recent `image_timestamp` property; then the most recent\n    `created_at` timestamp.  This method generates a sort key for\n    Docker metadata links to sort them from least to most preferred.\n    \"\"\"\n    try:\n        image_timestamp = ciso8601.parse_datetime_as_naive(\n            link['properties']['image_timestamp'])\n    except (KeyError, ValueError):\n        image_timestamp = EARLIEST_DATETIME\n    try:\n        created_timestamp = ciso8601.parse_datetime_as_naive(link['created_at'])\n    except ValueError:\n        created_timestamp = None\n    return (image_timestamp, created_timestamp)\n\ndef _get_docker_links(api_client, num_retries, **kwargs):\n    links = list(arvados.util.keyset_list_all(\n        api_client.links().list, num_retries=num_retries, **kwargs,\n    ))\n    for link in links:\n        link['_sort_key'] = docker_link_sort_key(link)\n    links.sort(key=itemgetter('_sort_key'), reverse=True)\n    return links\n\ndef _new_image_listing(link, dockerhash, repo='<none>', tag='<none>'):\n    timestamp_index = 1 if (link['_sort_key'][0] is EARLIEST_DATETIME) else 0\n    return {\n        '_sort_key': link['_sort_key'],\n        'timestamp': link['_sort_key'][timestamp_index],\n        'collection': link['head_uuid'],\n        'dockerhash': dockerhash,\n        'repo': repo,\n        'tag': tag,\n        }\n\ndef list_images_in_arv(api_client, num_retries, image_name=None, image_tag=None, project_uuid=None):\n    \"\"\"List all Docker images known to the api_client with image_name and\n    image_tag.  If no image_name is given, defaults to listing all\n    Docker images.\n\n    Returns a list of tuples representing matching Docker images,\n    sorted in preference order (i.e. the first collection in the list\n    is the one that the API server would use). Each tuple is a\n    (collection_uuid, collection_info) pair, where collection_info is\n    a dict with fields \"dockerhash\", \"repo\", \"tag\", and \"timestamp\".\n\n    \"\"\"\n    search_filters = []\n    repo_links = None\n    hash_links = None\n\n    project_filter = []\n    if project_uuid is not None:\n        project_filter = [[\"owner_uuid\", \"=\", project_uuid]]\n\n    if image_name:\n        # Find images with the name the user specified.\n        search_links = _get_docker_links(\n            api_client, num_retries,\n            filters=[['link_class', '=', 'docker_image_repo+tag'],\n                     ['name', '=',\n                      '{}:{}'.format(image_name, image_tag or 'latest')]]+project_filter)\n        if search_links:\n            repo_links = search_links\n        else:\n            # Fall back to finding images with the specified image hash.\n            search_links = _get_docker_links(\n                api_client, num_retries,\n                filters=[['link_class', '=', 'docker_image_hash'],\n                         ['name', 'ilike', image_name + '%']]+project_filter)\n            hash_links = search_links\n        # Only list information about images that were found in the search.\n        search_filters.append(['head_uuid', 'in',\n                               [link['head_uuid'] for link in search_links]])\n\n    # It should be reasonable to expect that each collection only has one\n    # image hash (though there may be many links specifying this).  Find\n    # the API server's most preferred image hash link for each collection.\n    if hash_links is None:\n        hash_links = _get_docker_links(\n            api_client, num_retries,\n            filters=search_filters + [['link_class', '=', 'docker_image_hash']]+project_filter)\n    hash_link_map = {link['head_uuid']: link for link in reversed(hash_links)}\n\n    # Each collection may have more than one name (though again, one name\n    # may be specified more than once).  Build an image listing from name\n    # tags, sorted by API server preference.\n    if repo_links is None:\n        repo_links = _get_docker_links(\n            api_client, num_retries,\n            filters=search_filters + [['link_class', '=',\n                                       'docker_image_repo+tag']]+project_filter)\n    seen_image_names = collections.defaultdict(set)\n    images = []\n    for link in repo_links:\n        collection_uuid = link['head_uuid']\n        if link['name'] in seen_image_names[collection_uuid]:\n            continue\n        seen_image_names[collection_uuid].add(link['name'])\n        try:\n            dockerhash = hash_link_map[collection_uuid]['name']\n        except KeyError:\n            dockerhash = '<unknown>'\n        name_parts = link['name'].rsplit(':', 1)\n        images.append(_new_image_listing(link, dockerhash, *name_parts))\n\n    # Find any image hash links that did not have a corresponding name link,\n    # and add image listings for them, retaining the API server preference\n    # sorting.\n    images_start_size = len(images)\n    for collection_uuid, link in hash_link_map.items():\n        if not seen_image_names[collection_uuid]:\n            images.append(_new_image_listing(link, link['name']))\n    if len(images) > images_start_size:\n        images.sort(key=itemgetter('_sort_key'), reverse=True)\n\n    # Remove any image listings that refer to unknown collections.\n    existing_coll_uuids = {coll['uuid'] for coll in arvados.util.keyset_list_all(\n        api_client.collections().list,\n        num_retries=num_retries,\n        filters=[['uuid', 'in', [im['collection'] for im in images]]]+project_filter,\n        select=['uuid'],\n    )}\n    return [(image['collection'], image) for image in images\n            if image['collection'] in existing_coll_uuids]\n\ndef items_owned_by(owner_uuid, arv_items):\n    return (item for item in arv_items if item['owner_uuid'] == owner_uuid)\n\ndef _uuid2pdh(api, uuid):\n    return api.collections().list(\n        filters=[['uuid', '=', uuid]],\n        select=['portable_data_hash'],\n    ).execute()['items'][0]['portable_data_hash']\n\ndef load_image_metadata(image_file):\n    \"\"\"Load an image manifest and config from an archive\n\n    Given an image archive as an open binary file object, this function loads\n    the image manifest and configuration, deserializing each from JSON and\n    returning them in a 2-tuple of dicts.\n    \"\"\"\n    image_file.seek(0)\n    with tarfile.open(fileobj=image_file) as image_tar:\n        with image_tar.extractfile('manifest.json') as manifest_file:\n            image_manifest_list = json.load(manifest_file)\n        # Because arv-keepdocker only saves one image, there should only be\n        # one manifest.  This extracts that from the list and raises\n        # ValueError if there's not exactly one.\n        image_manifest, = image_manifest_list\n        with image_tar.extractfile(image_manifest['Config']) as config_file:\n            image_config = json.load(config_file)\n    return image_manifest, image_config\n\ndef main(arguments=None, stdout=sys.stdout, stderr=sys.stderr, install_sig_handlers=True, api=None):\n    args = arg_parser.parse_args(arguments)\n    if api is None:\n        api = arvados.api('v1', num_retries=args.retries)\n\n    if args.image is None or args.image == 'images':\n        fmt = \"{:30}  {:10}  {:12}  {:29}  {:20}\\n\"\n        stdout.write(fmt.format(\"REPOSITORY\", \"TAG\", \"IMAGE ID\", \"COLLECTION\", \"CREATED\"))\n        try:\n            for i, j in list_images_in_arv(api, args.retries):\n                stdout.write(fmt.format(j[\"repo\"], j[\"tag\"], j[\"dockerhash\"][0:12], i, j[\"timestamp\"].strftime(\"%c\")))\n        except IOError as e:\n            if e.errno == errno.EPIPE:\n                pass\n            else:\n                raise\n        sys.exit(0)\n\n    if re.search(r':\\w[-.\\w]{0,127}$', args.image):\n        # image ends with :valid-tag\n        if args.tag is not None:\n            logger.error(\n                \"image %r already includes a tag, cannot add tag argument %r\",\n                args.image, args.tag)\n            sys.exit(1)\n        # rsplit() accommodates \"myrepo.example:8888/repo/image:tag\"\n        args.image, args.tag = args.image.rsplit(':', 1)\n    elif args.tag is None:\n        args.tag = 'latest'\n\n    if '/' in args.image:\n        hostport, path = args.image.split('/', 1)\n        if hostport.endswith(':443'):\n            # \"docker pull host:443/asdf\" transparently removes the\n            # :443 (which is redundant because https is implied) and\n            # after it succeeds \"docker images\" will list \"host/asdf\",\n            # not \"host:443/asdf\".  If we strip the :443 then the name\n            # doesn't change underneath us.\n            args.image = '/'.join([hostport[:-4], path])\n\n    # Pull the image if requested, unless the image is specified as a hash\n    # that we already have.\n    if args.pull and not find_image_hashes(args.image):\n        pull_image(args.image, args.tag)\n\n    images_in_arv = list_images_in_arv(api, args.retries, args.image, args.tag)\n\n    image_hash = None\n    try:\n        image_hash = find_one_image_hash(args.image, args.tag)\n        if not docker_image_compatible(api, image_hash):\n            if args.force_image_format:\n                logger.warning(\"forcing incompatible image\")\n            else:\n                logger.error(\"refusing to store \" \\\n                    \"incompatible format (use --force-image-format to override)\")\n                sys.exit(1)\n    except DockerError as error:\n        if images_in_arv:\n            # We don't have Docker / we don't have the image locally,\n            # use image that's already uploaded to Arvados\n            image_hash = images_in_arv[0][1]['dockerhash']\n        else:\n            logger.error(str(error))\n            sys.exit(1)\n\n    image_repo_tag = '{}:{}'.format(args.image, args.tag) if not image_hash.startswith(args.image.lower()) else None\n\n    if args.name is None:\n        if image_repo_tag:\n            collection_name = 'Docker image {} {}'.format(image_repo_tag.replace(\"/\", \" \"), image_hash[0:12])\n        else:\n            collection_name = 'Docker image {}'.format(image_hash[0:12])\n    else:\n        collection_name = args.name\n\n    # Acquire a lock so that only one arv-keepdocker process will\n    # dump/upload a particular docker image at a time.  Do this before\n    # checking if the image already exists in Arvados so that if there\n    # is an upload already underway, when that upload completes and\n    # this process gets a turn, it will discover the Docker image is\n    # already available and exit quickly.\n    outfile_name = '{}.tar'.format(image_hash)\n    lockfile_name = '{}.lock'.format(outfile_name)\n    lockfile = None\n    cache_dir = get_cache_dir()\n    if cache_dir:\n        lockfile = open(os.path.join(cache_dir, lockfile_name), 'w+')\n        fcntl.flock(lockfile, fcntl.LOCK_EX)\n\n    try:\n        if not args.force:\n            # Check if this image is already in Arvados.\n\n            # Project where everything should be owned\n            parent_project_uuid = args.project_uuid or api.users().current().execute(\n                num_retries=args.retries)['uuid']\n\n            # Find image hash tags\n            existing_links = _get_docker_links(\n                api, args.retries,\n                filters=[['link_class', '=', 'docker_image_hash'],\n                         ['name', '=', image_hash]])\n            if existing_links:\n                # get readable collections\n                collections = api.collections().list(\n                    filters=[['uuid', 'in', [link['head_uuid'] for link in existing_links]]],\n                    select=[\"uuid\", \"owner_uuid\", \"name\", \"manifest_text\"]\n                    ).execute(num_retries=args.retries)['items']\n\n                if collections:\n                    # check for repo+tag links on these collections\n                    if image_repo_tag:\n                        existing_repo_tag = _get_docker_links(\n                            api, args.retries,\n                            filters=[['link_class', '=', 'docker_image_repo+tag'],\n                                     ['name', '=', image_repo_tag],\n                                     ['head_uuid', 'in', [c[\"uuid\"] for c in collections]]])\n                    else:\n                        existing_repo_tag = []\n\n                    try:\n                        coll_uuid = next(items_owned_by(parent_project_uuid, collections))['uuid']\n                    except StopIteration:\n                        # create new collection owned by the project\n                        coll_uuid = api.collections().create(\n                            body={\"manifest_text\": collections[0]['manifest_text'],\n                                  \"name\": collection_name,\n                                  \"owner_uuid\": parent_project_uuid,\n                                  \"properties\": {\"docker-image-repo-tag\": image_repo_tag}},\n                            ensure_unique_name=True\n                            ).execute(num_retries=args.retries)['uuid']\n\n                    link_base = {'owner_uuid': parent_project_uuid,\n                                 'head_uuid':  coll_uuid,\n                                 'properties': existing_links[0]['properties']}\n\n                    if not any(items_owned_by(parent_project_uuid, existing_links)):\n                        # create image link owned by the project\n                        make_link(api, args.retries,\n                                  'docker_image_hash', image_hash, **link_base)\n\n                    if image_repo_tag and not any(items_owned_by(parent_project_uuid, existing_repo_tag)):\n                        # create repo+tag link owned by the project\n                        make_link(api, args.retries, 'docker_image_repo+tag',\n                                  image_repo_tag, **link_base)\n\n                    stdout.write(coll_uuid + \"\\n\")\n\n                    sys.exit(0)\n\n        # Open a file for the saved image, and write it if needed.\n        image_file, need_save = prep_image_file(outfile_name)\n        if need_save:\n            save_image(image_hash, image_file)\n\n        # Call arv-put with switches we inherited from it\n        # (a.k.a., switches that aren't our own).\n        if arguments is None:\n            arguments = sys.argv[1:]\n        arguments = [i for i in arguments if i not in (args.image, args.tag, image_repo_tag)]\n        put_args = keepdocker_parser.parse_known_args(arguments)[1]\n\n        # Don't fail when cached manifest is invalid, just ignore the cache.\n        put_args += ['--batch']\n\n        if args.name is None:\n            put_args += ['--name', collection_name]\n\n        coll_uuid = arv_put.main(\n            put_args + ['--filename', outfile_name, image_file.name], stdout=stdout,\n            install_sig_handlers=install_sig_handlers).strip()\n\n        # Managed properties could be already set\n        coll_properties = api.collections().get(uuid=coll_uuid).execute(num_retries=args.retries).get('properties', {})\n        coll_properties.update({\"docker-image-repo-tag\": image_repo_tag})\n        api.collections().update(uuid=coll_uuid, body={\"properties\": coll_properties}).execute(num_retries=args.retries)\n\n        _, image_metadata = load_image_metadata(image_file)\n        link_base = {'head_uuid': coll_uuid, 'properties': {}}\n        if 'created' in image_metadata:\n            link_base['properties']['image_timestamp'] = image_metadata['created']\n        if args.project_uuid is not None:\n            link_base['owner_uuid'] = args.project_uuid\n\n        make_link(api, args.retries, 'docker_image_hash', image_hash, **link_base)\n        if image_repo_tag:\n            make_link(api, args.retries,\n                      'docker_image_repo+tag', image_repo_tag, **link_base)\n\n        # Clean up.\n        image_file.close()\n        for filename in [stat_cache_name(image_file), image_file.name]:\n            try:\n                os.unlink(filename)\n            except OSError as error:\n                if error.errno != errno.ENOENT:\n                    raise\n    finally:\n        if lockfile is not None:\n            # Closing the lockfile unlocks it.\n            lockfile.close()\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "sdk/python/arvados/commands/ls.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport collections\nimport logging\nimport re\nimport sys\n\nimport arvados\nimport arvados.commands._util as arv_cmd\n\nfrom arvados._version import __version__\n\nFileInfo = collections.namedtuple('FileInfo', ['stream_name', 'name', 'size'])\n\ndef parse_args(args):\n    parser = argparse.ArgumentParser(\n        description='List contents of a manifest',\n        parents=[arv_cmd.retry_opt])\n\n    parser.add_argument('locator', type=str,\n                        help=\"\"\"Collection UUID or locator, optionally with a subdir path.\"\"\")\n    parser.add_argument('-s', action='store_true',\n                        help=\"\"\"List file sizes, in KiB.\"\"\")\n    parser.add_argument('--version', action='version',\n                        version=\"%s %s\" % (sys.argv[0], __version__),\n                        help='Print version and exit.')\n\n    return parser.parse_args(args)\n\ndef size_formatter(coll_file):\n    return \"{:>10}\".format((coll_file.size + 1023) // 1024)\n\ndef name_formatter(coll_file):\n    return \"{}/{}\".format(coll_file.stream_name, coll_file.name)\n\ndef main(arguments=None, stdout=sys.stdout, stderr=sys.stderr, api_client=None, logger=None):\n    args = parse_args(arguments)\n\n    if api_client is None:\n        api_client = arvados.api('v1', num_retries=args.retries)\n\n    if logger is None:\n        logger = logging.getLogger('arvados.arv-ls')\n\n    try:\n        r = re.search(r'^(.*?)(/.*)?$', args.locator)\n        collection = r.group(1)\n        get_prefix = r.group(2)\n\n        cr = arvados.CollectionReader(collection, api_client=api_client,\n                                      num_retries=args.retries)\n        if get_prefix:\n            if get_prefix[-1] == '/':\n                get_prefix = get_prefix[:-1]\n            stream_name = '.' + get_prefix\n            reader = cr.find(stream_name)\n            if not (isinstance(reader, arvados.CollectionReader) or\n                    isinstance(reader, arvados.collection.Subcollection)):\n                logger.error(\"'{}' is not a subdirectory\".format(get_prefix))\n                return 1\n        else:\n            stream_name = '.'\n            reader = cr\n    except (arvados.errors.ApiError,\n            arvados.errors.ArgumentError,\n            arvados.errors.NotFoundError) as error:\n        logger.error(\"error fetching collection: {}\".format(error))\n        return 1\n\n    formatters = []\n    if args.s:\n        formatters.append(size_formatter)\n    formatters.append(name_formatter)\n\n    for f in files_in_collection(reader, stream_name):\n        print(*(info_func(f) for info_func in formatters), file=stdout)\n\n    return 0\n\ndef files_in_collection(c, stream_name='.'):\n    # Sort first by file type, then alphabetically by file path.\n    for i in sorted(c.keys(),\n                    key=lambda k: (\n                        isinstance(c[k], arvados.collection.Subcollection),\n                        k.upper())):\n        if isinstance(c[i], arvados.arvfile.ArvadosFile):\n            yield FileInfo(stream_name=stream_name,\n                           name=i,\n                           size=c[i].size())\n        elif isinstance(c[i], arvados.collection.Subcollection):\n            for f in files_in_collection(c[i], \"{}/{}\".format(stream_name, i)):\n                yield f\n"
  },
  {
    "path": "sdk/python/arvados/commands/put.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport arvados\nimport arvados.collection\nimport base64\nimport ciso8601\nimport copy\nimport datetime\nimport errno\nimport fcntl\nimport fnmatch\nimport hashlib\nimport json\nimport logging\nimport os\nimport pwd\nimport re\nimport signal\nimport socket\nimport sys\nimport tempfile\nimport threading\nimport time\nimport traceback\n\nfrom pathlib import Path\n\nimport arvados.util\nimport arvados.commands._util as arv_cmd\n\nfrom apiclient import errors as apiclient_errors\nfrom arvados._internal import basedirs\nfrom arvados._version import __version__\n\napi_client = None\n\nupload_opts = argparse.ArgumentParser(add_help=False)\n\nupload_opts.add_argument('--version', action='version',\n                         version=\"%s %s\" % (sys.argv[0], __version__),\n                         help='Print version and exit.')\nupload_opts.add_argument('paths', metavar='path', type=str, nargs='*',\n                         help=\"\"\"\nLocal file or directory. If path is a directory reference with a trailing\nslash, then just upload the directory's contents; otherwise upload the\ndirectory itself. Default: read from standard input.\n\"\"\")\n\n_group = upload_opts.add_mutually_exclusive_group()\n\n_group.add_argument('--max-manifest-depth', type=int, metavar='N',\n                    default=-1, help=argparse.SUPPRESS)\n\n_group.add_argument('--normalize', action='store_true',\n                    help=\"\"\"\nNormalize the manifest by re-ordering files and streams after writing\ndata.\n\"\"\")\n\n_group.add_argument('--dry-run', action='store_true', default=False,\n                    help=\"\"\"\nDon't actually upload files, but only check if any file should be\nuploaded. Exit with code=2 when files are pending for upload.\n\"\"\")\n\n_group = upload_opts.add_mutually_exclusive_group()\n\n_group.add_argument('--as-stream', action='store_true', dest='stream',\n                    help=\"\"\"\nSynonym for --stream.\n\"\"\")\n\n_group.add_argument('--stream', action='store_true',\n                    help=\"\"\"\nStore the file content and display the resulting manifest on\nstdout. Do not save a Collection object in Arvados.\n\"\"\")\n\n_group.add_argument('--as-manifest', action='store_true', dest='manifest',\n                    help=\"\"\"\nSynonym for --manifest.\n\"\"\")\n\n_group.add_argument('--in-manifest', action='store_true', dest='manifest',\n                    help=\"\"\"\nSynonym for --manifest.\n\"\"\")\n\n_group.add_argument('--manifest', action='store_true',\n                    help=\"\"\"\nStore the file data and resulting manifest in Keep, save a Collection\nobject in Arvados, and display the manifest locator (Collection uuid)\non stdout. This is the default behavior.\n\"\"\")\n\n_group.add_argument('--as-raw', action='store_true', dest='raw',\n                    help=\"\"\"\nSynonym for --raw.\n\"\"\")\n\n_group.add_argument('--raw', action='store_true',\n                    help=\"\"\"\nStore the file content and display the data block locators on stdout,\nseparated by commas, with a trailing newline. Do not store a\nmanifest.\n\"\"\")\n\nupload_opts.add_argument('--update-collection', type=str, default=None,\n                         dest='update_collection', metavar=\"UUID\", help=\"\"\"\nUpdate an existing collection identified by the given Arvados collection\nUUID. All new local files will be uploaded.\n\"\"\")\n\nupload_opts.add_argument('--use-filename', type=str, default=None,\n                         dest='filename', help=\"\"\"\nSynonym for --filename.\n\"\"\")\n\nupload_opts.add_argument('--filename', type=str, default=None,\n                         help=\"\"\"\nUse the given filename in the manifest, instead of the name of the\nlocal file. This is useful when \"-\" or \"/dev/stdin\" is given as an\ninput file. It can be used only if there is exactly one path given and\nit is not a directory. Implies --manifest.\n\"\"\")\n\nupload_opts.add_argument('--portable-data-hash', action='store_true',\n                         help=\"\"\"\nPrint the portable data hash instead of the Arvados UUID for the collection\ncreated by the upload.\n\"\"\")\n\nupload_opts.add_argument('--replication', type=int, metavar='N', default=None,\n                         help=\"\"\"\nSet the replication level for the new collection: how many different\nphysical storage devices (e.g., disks) should have a copy of each data\nblock. Default is to use the server-provided default (if any) or 2.\n\"\"\")\n\nupload_opts.add_argument(\n    '--storage-classes',\n    type=arv_cmd.UniqueSplit(),\n    help=\"\"\"\nSpecify comma separated list of storage classes to be used when saving data to Keep.\n\"\"\")\n\nupload_opts.add_argument('--threads', type=int, metavar='N', default=None,\n                         help=\"\"\"\nSet the number of upload threads to be used. Take into account that\nusing lots of threads will increase the RAM requirements. Default is\nto use 2 threads.\nOn high latency installations, using a greater number will improve\noverall throughput.\n\"\"\")\n\nupload_opts.add_argument('--exclude', metavar='PATTERN', default=[],\n                      action='append', help=\"\"\"\nExclude files and directories whose names match the given glob pattern. When\nusing a path-like pattern like 'subdir/*.txt', all text files inside 'subdir'\ndirectory, relative to the provided input dirs will be excluded.\nWhen using a filename pattern like '*.txt', any text file will be excluded\nno matter where it is placed.\nFor the special case of needing to exclude only files or dirs directly below\nthe given input directory, you can use a pattern like './exclude_this.gif'.\nYou can specify multiple patterns by using this argument more than once.\n\"\"\")\n\n_group = upload_opts.add_mutually_exclusive_group()\n_group.add_argument('--follow-links', action='store_true', default=True,\n                    dest='follow_links', help=\"\"\"\nFollow file and directory symlinks (default).\n\"\"\")\n_group.add_argument('--no-follow-links', action='store_false', dest='follow_links',\n                    help=\"\"\"\nIgnore file and directory symlinks. Even paths given explicitly on the\ncommand line will be skipped if they are symlinks.\n\"\"\")\n\n\nrun_opts = argparse.ArgumentParser(add_help=False)\n\nrun_opts.add_argument('--project-uuid', metavar='UUID', help=\"\"\"\nStore the collection in the specified project, instead of your Home\nproject.\n\"\"\")\n\nrun_opts.add_argument('--name', help=\"\"\"\nSave the collection with the specified name.\n\"\"\")\n\n_group = run_opts.add_mutually_exclusive_group()\n_group.add_argument('--progress', action='store_true',\n                    help=\"\"\"\nDisplay human-readable progress on stderr (bytes and, if possible,\npercentage of total data size). This is the default behavior when\nstderr is a tty.\n\"\"\")\n\n_group.add_argument('--no-progress', action='store_true',\n                    help=\"\"\"\nDo not display human-readable progress on stderr, even if stderr is a\ntty.\n\"\"\")\n\n_group.add_argument('--batch-progress', action='store_true',\n                    help=\"\"\"\nDisplay machine-readable progress on stderr (bytes and, if known,\ntotal data size).\n\"\"\")\n\nrun_opts.add_argument('--silent', action='store_true',\n                      help=\"\"\"\nDo not print any debug messages to console. (Any error messages will\nstill be displayed.)\n\"\"\")\n\nrun_opts.add_argument('--batch', action='store_true', default=False,\n                      help=\"\"\"\nRetries with '--no-resume --no-cache' if cached state contains invalid/expired\nblock signatures.\n\"\"\")\n\n_group = run_opts.add_mutually_exclusive_group()\n_group.add_argument('--resume', action='store_true', default=True,\n                    help=\"\"\"\nContinue interrupted uploads from cached state (default).\n\"\"\")\n_group.add_argument('--no-resume', action='store_false', dest='resume',\n                    help=\"\"\"\nDo not continue interrupted uploads from cached state.\n\"\"\")\n\n_group = run_opts.add_mutually_exclusive_group()\n_group.add_argument('--cache', action='store_true', dest='use_cache', default=True,\n                    help=\"\"\"\nSave upload state in a cache file for resuming (default).\n\"\"\")\n_group.add_argument('--no-cache', action='store_false', dest='use_cache',\n                    help=\"\"\"\nDo not save upload state in a cache file for resuming.\n\"\"\")\n\n_group = upload_opts.add_mutually_exclusive_group()\n_group.add_argument('--trash-at', metavar='YYYY-MM-DDTHH:MM', default=None,\n                    help=\"\"\"\nSet the trash date of the resulting collection to an absolute date in the future.\nThe accepted format is defined by the ISO 8601 standard. Examples: 20090103, 2009-01-03, 20090103T181505, 2009-01-03T18:15:05.\\n\nTimezone information can be added. If not, the provided date/time is assumed as being in the local system's timezone.\n\"\"\")\n_group.add_argument('--trash-after', type=int, metavar='DAYS', default=None,\n                    help=\"\"\"\nSet the trash date of the resulting collection to an amount of days from the\ndate/time that the upload process finishes.\n\"\"\")\n\narg_parser = argparse.ArgumentParser(\n    description='Copy data from the local filesystem to Keep.',\n    parents=[upload_opts, run_opts, arv_cmd.retry_opt])\n\ndef parse_arguments(arguments):\n    args = arg_parser.parse_args(arguments)\n\n    if len(args.paths) == 0:\n        args.paths = ['-']\n\n    args.paths = [\"-\" if x == \"/dev/stdin\" else x for x in args.paths]\n\n    if args.filename and (len(args.paths) != 1 or os.path.isdir(args.paths[0])):\n        arg_parser.error(\"\"\"\n    --filename argument cannot be used when storing a directory or\n    multiple files.\n    \"\"\")\n\n    # Turn on --progress by default if stderr is a tty.\n    if (not (args.batch_progress or args.no_progress or args.silent)\n        and sys.stderr.isatty()):\n        args.progress = True\n\n    # Turn off --resume (default) if --no-cache is used.\n    if not args.use_cache:\n        args.resume = False\n\n    if args.paths == ['-']:\n        if args.update_collection:\n            arg_parser.error(\"\"\"\n    --update-collection cannot be used when reading from stdin.\n    \"\"\")\n        args.resume = False\n        args.use_cache = False\n        if not args.filename:\n            args.filename = 'stdin'\n\n    # Remove possible duplicated patterns\n    if len(args.exclude) > 0:\n        args.exclude = list(set(args.exclude))\n\n    return args\n\n\nclass PathDoesNotExistError(Exception):\n    pass\n\n\nclass CollectionUpdateError(Exception):\n    pass\n\n\nclass ResumeCacheConflict(Exception):\n    pass\n\n\nclass ResumeCacheInvalidError(Exception):\n    pass\n\nclass ArvPutArgumentConflict(Exception):\n    pass\n\n\nclass ArvPutUploadIsPending(Exception):\n    pass\n\n\nclass ArvPutUploadNotPending(Exception):\n    pass\n\n\nclass FileUploadList(list):\n    def __init__(self, dry_run=False):\n        list.__init__(self)\n        self.dry_run = dry_run\n\n    def append(self, other):\n        if self.dry_run:\n            raise ArvPutUploadIsPending()\n        super(FileUploadList, self).append(other)\n\n\n# Appends the X-Request-Id to the log message when log level is ERROR or DEBUG\nclass ArvPutLogFormatter(logging.Formatter):\n    std_fmtr = logging.Formatter(arvados.log_format, arvados.log_date_format)\n    err_fmtr = None\n    request_id_informed = False\n\n    def __init__(self, request_id):\n        self.err_fmtr = logging.Formatter(\n            arvados.log_format+' (X-Request-Id: {})'.format(request_id),\n            arvados.log_date_format)\n\n    def format(self, record):\n        if (not self.request_id_informed) and (record.levelno in (logging.DEBUG, logging.ERROR)):\n            self.request_id_informed = True\n            return self.err_fmtr.format(record)\n        return self.std_fmtr.format(record)\n\n\nclass ResumeCache(object):\n    CACHE_DIR = 'arv-put'\n\n    def __init__(self, file_spec):\n        self.cache_file = open(file_spec, 'a+')\n        self._lock_file(self.cache_file)\n        self.filename = self.cache_file.name\n\n    @classmethod\n    def make_path(cls, args):\n        md5 = hashlib.md5()\n        md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost').encode())\n        realpaths = sorted(os.path.realpath(path) for path in args.paths)\n        md5.update(b'\\0'.join([p.encode() for p in realpaths]))\n        if any(os.path.isdir(path) for path in realpaths):\n            md5.update(b'-1')\n        elif args.filename:\n            md5.update(args.filename.encode())\n        cache_path = Path(cls.CACHE_DIR)\n        if len(cache_path.parts) == 1:\n            cache_path = basedirs.BaseDirectories('CACHE').storage_path(cache_path)\n        else:\n            # Note this is a noop if cache_path is absolute, which is what we want.\n            cache_path = Path.home() / cache_path\n            cache_path.mkdir(parents=True, exist_ok=True, mode=0o700)\n        return str(cache_path / md5.hexdigest())\n\n    def _lock_file(self, fileobj):\n        try:\n            fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)\n        except IOError:\n            raise ResumeCacheConflict(u\"{} locked\".format(fileobj.name))\n\n    def load(self):\n        self.cache_file.seek(0)\n        return json.load(self.cache_file)\n\n    def check_cache(self, api_client=None, num_retries=0):\n        try:\n            state = self.load()\n            locator = None\n            try:\n                if \"_finished_streams\" in state and len(state[\"_finished_streams\"]) > 0:\n                    locator = state[\"_finished_streams\"][0][1][0]\n                elif \"_current_stream_locators\" in state and len(state[\"_current_stream_locators\"]) > 0:\n                    locator = state[\"_current_stream_locators\"][0]\n                if locator is not None:\n                    kc = arvados.keep.KeepClient(api_client=api_client)\n                    kc.head(locator, num_retries=num_retries)\n            except Exception as e:\n                self.restart()\n        except (ValueError):\n            pass\n\n    def save(self, data):\n        try:\n            new_cache_fd, new_cache_name = tempfile.mkstemp(\n                dir=os.path.dirname(self.filename))\n            self._lock_file(new_cache_fd)\n            new_cache = os.fdopen(new_cache_fd, 'r+')\n            json.dump(data, new_cache)\n            os.rename(new_cache_name, self.filename)\n        except (IOError, OSError, ResumeCacheConflict):\n            try:\n                os.unlink(new_cache_name)\n            except NameError:  # mkstemp failed.\n                pass\n        else:\n            self.cache_file.close()\n            self.cache_file = new_cache\n\n    def close(self):\n        self.cache_file.close()\n\n    def destroy(self):\n        try:\n            os.unlink(self.filename)\n        except OSError as error:\n            if error.errno != errno.ENOENT:  # That's what we wanted anyway.\n                raise\n        self.close()\n\n    def restart(self):\n        self.destroy()\n        self.__init__(self.filename)\n\n\nclass ArvPutUploadJob(object):\n    CACHE_DIR = 'arv-put'\n    EMPTY_STATE = {\n        'manifest' : None, # Last saved manifest checkpoint\n        'files' : {} # Previous run file list: {path : {size, mtime}}\n    }\n\n    def __init__(self, paths, resume=True, use_cache=True, reporter=None,\n                 name=None, owner_uuid=None, api_client=None, batch_mode=False,\n                 ensure_unique_name=False, num_retries=None,\n                 put_threads=None, replication_desired=None, filename=None,\n                 update_time=60.0, update_collection=None, storage_classes=None,\n                 logger=logging.getLogger('arvados.arv_put'), dry_run=False,\n                 follow_links=True, exclude_paths=[], exclude_names=None,\n                 trash_at=None):\n        self.paths = paths\n        self.resume = resume\n        self.use_cache = use_cache\n        self.batch_mode = batch_mode\n        self.update = False\n        self.reporter = reporter\n        # This will set to 0 before start counting, if no special files are going\n        # to be read.\n        self.bytes_expected = None\n        self.bytes_written = 0\n        self.bytes_skipped = 0\n        self.name = name\n        self.owner_uuid = owner_uuid\n        self.ensure_unique_name = ensure_unique_name\n        self.num_retries = num_retries\n        self.replication_desired = replication_desired\n        self.put_threads = put_threads\n        self.filename = filename\n        self.storage_classes = storage_classes\n        self._api_client = api_client\n        self._state_lock = threading.Lock()\n        self._state = None # Previous run state (file list & manifest)\n        self._current_files = [] # Current run file list\n        self._cache_file = None\n        self._collection_lock = threading.Lock()\n        self._remote_collection = None # Collection being updated (if asked)\n        self._local_collection = None # Collection from previous run manifest\n        self._file_paths = set() # Files to be updated in remote collection\n        self._stop_checkpointer = threading.Event()\n        self._checkpointer = threading.Thread(target=self._update_task)\n        self._checkpointer.daemon = True\n        self._update_task_time = update_time  # How many seconds wait between update runs\n        self._files_to_upload = FileUploadList(dry_run=dry_run)\n        self._upload_started = False\n        self.logger = logger\n        self.dry_run = dry_run\n        self._checkpoint_before_quit = True\n        self.follow_links = follow_links\n        self.exclude_paths = exclude_paths\n        self.exclude_names = exclude_names\n        self._trash_at = trash_at\n\n        if self._trash_at is not None:\n            if type(self._trash_at) not in [datetime.datetime, datetime.timedelta]:\n                raise TypeError('trash_at should be None, timezone-naive datetime or timedelta')\n            if type(self._trash_at) == datetime.datetime and self._trash_at.tzinfo is not None:\n                raise TypeError('provided trash_at datetime should be timezone-naive')\n\n        if not self.use_cache and self.resume:\n            raise ArvPutArgumentConflict('resume cannot be True when use_cache is False')\n\n        # Check for obvious dry-run responses\n        if self.dry_run and (not self.use_cache or not self.resume):\n            raise ArvPutUploadIsPending()\n\n        # Load cached data if any and if needed\n        self._setup_state(update_collection)\n\n        # Build the upload file list, excluding requested files and counting the\n        # bytes expected to be uploaded.\n        self._build_upload_list()\n\n    def _build_upload_list(self):\n        \"\"\"\n        Scan the requested paths to count file sizes, excluding requested files\n        and dirs and building the upload file list.\n        \"\"\"\n        # If there aren't special files to be read, reset total bytes count to zero\n        # to start counting.\n        if not any([p for p in self.paths\n                    if not (os.path.isfile(p) or os.path.isdir(p))]):\n            self.bytes_expected = 0\n\n        for path in self.paths:\n            # Test for stdin first, in case some file named '-' exist\n            if path == '-':\n                if self.dry_run:\n                    raise ArvPutUploadIsPending()\n                self._write_stdin(self.filename or 'stdin')\n            elif not os.path.exists(path):\n                 raise PathDoesNotExistError(u\"file or directory '{}' does not exist.\".format(path))\n            elif (not self.follow_links) and os.path.islink(path):\n                self.logger.warning(\"Skipping symlink '{}'\".format(path))\n                continue\n            elif os.path.isdir(path):\n                # Use absolute paths on cache index so CWD doesn't interfere\n                # with the caching logic.\n                orig_path = path\n                path = os.path.abspath(path)\n                if orig_path[-1:] == os.sep:\n                    # When passing a directory reference with a trailing slash,\n                    # its contents should be uploaded directly to the\n                    # collection's root.\n                    prefixdir = path\n                else:\n                    # When passing a directory reference with no trailing slash,\n                    # upload the directory to the collection's root.\n                    prefixdir = os.path.dirname(path)\n                prefixdir += os.sep\n                for root, dirs, files in os.walk(path,\n                                                 followlinks=self.follow_links):\n                    root_relpath = os.path.relpath(root, path)\n                    if root_relpath == '.':\n                        root_relpath = ''\n                    # Exclude files/dirs by full path matching pattern\n                    if self.exclude_paths:\n                        dirs[:] = [d for d in dirs\n                                   if not any(pathname_match(\n                                           os.path.join(root_relpath, d), pat)\n                                              for pat in self.exclude_paths)]\n                        files = [f for f in files\n                                 if not any(pathname_match(\n                                         os.path.join(root_relpath, f), pat)\n                                            for pat in self.exclude_paths)]\n                    # Exclude files/dirs by name matching pattern\n                    if self.exclude_names is not None:\n                        dirs[:] = [d for d in dirs\n                                   if not self.exclude_names.match(d)]\n                        files = [f for f in files\n                                 if not self.exclude_names.match(f)]\n                    # Make os.walk()'s dir traversing order deterministic\n                    dirs.sort()\n                    files.sort()\n                    for f in files:\n                        filepath = os.path.join(root, f)\n                        if not os.path.isfile(filepath):\n                            self.logger.warning(\"Skipping non-regular file '{}'\".format(filepath))\n                            continue\n                        # Add its size to the total bytes count (if applicable)\n                        if self.follow_links or (not os.path.islink(filepath)):\n                            if self.bytes_expected is not None:\n                                self.bytes_expected += os.path.getsize(filepath)\n                        self._check_file(filepath,\n                                         os.path.join(root[len(prefixdir):], f))\n            else:\n                filepath = os.path.abspath(path)\n                # Add its size to the total bytes count (if applicable)\n                if self.follow_links or (not os.path.islink(filepath)):\n                    if self.bytes_expected is not None:\n                        self.bytes_expected += os.path.getsize(filepath)\n                self._check_file(filepath,\n                                 self.filename or os.path.basename(path))\n        # If dry-mode is on, and got up to this point, then we should notify that\n        # there aren't any file to upload.\n        if self.dry_run:\n            raise ArvPutUploadNotPending()\n        # Remove local_collection's files that don't exist locally anymore, so the\n        # bytes_written count is correct.\n        for f in self.collection_file_paths(self._local_collection,\n                                            path_prefix=\"\"):\n            if f != 'stdin' and f != self.filename and not f in self._file_paths:\n                self._local_collection.remove(f)\n\n    def start(self, save_collection):\n        \"\"\"\n        Start supporting thread & file uploading\n        \"\"\"\n        self._checkpointer.start()\n        try:\n            # Update bytes_written from current local collection and\n            # report initial progress.\n            self._update()\n            # Actual file upload\n            self._upload_started = True # Used by the update thread to start checkpointing\n            self._upload_files()\n        except (SystemExit, Exception) as e:\n            self._checkpoint_before_quit = False\n            # Log stack trace only when Ctrl-C isn't pressed (SIGINT)\n            # Note: We're expecting SystemExit instead of\n            # KeyboardInterrupt because we have a custom signal\n            # handler in place that raises SystemExit with the catched\n            # signal's code.\n            if isinstance(e, PathDoesNotExistError):\n                # We aren't interested in the traceback for this case\n                pass\n            elif not isinstance(e, SystemExit) or e.code != -2:\n                self.logger.warning(\"Abnormal termination:\\n{}\".format(\n                    traceback.format_exc()))\n            raise\n        finally:\n            if not self.dry_run:\n                # Stop the thread before doing anything else\n                self._stop_checkpointer.set()\n                self._checkpointer.join()\n                if self._checkpoint_before_quit:\n                    # Commit all pending blocks & one last _update()\n                    self._local_collection.manifest_text()\n                    self._update(final=True)\n                    if save_collection:\n                        self.save_collection()\n            if self.use_cache:\n                self._cache_file.close()\n\n    def _collection_trash_at(self):\n        \"\"\"\n        Returns the trash date that the collection should use at save time.\n        Takes into account absolute/relative trash_at values requested\n        by the user.\n        \"\"\"\n        if type(self._trash_at) == datetime.timedelta:\n            # Get an absolute datetime for trash_at\n            return datetime.datetime.utcnow() + self._trash_at\n        return self._trash_at\n\n    def save_collection(self):\n        if self.update:\n            # Check if files should be updated on the remote collection.\n            for fp in self._file_paths:\n                remote_file = self._remote_collection.find(fp)\n                if not remote_file:\n                    # File don't exist on remote collection, copy it.\n                    self._remote_collection.copy(fp, fp, self._local_collection)\n                elif remote_file != self._local_collection.find(fp):\n                    # A different file exist on remote collection, overwrite it.\n                    self._remote_collection.copy(fp, fp, self._local_collection, overwrite=True)\n                else:\n                    # The file already exist on remote collection, skip it.\n                    pass\n            self._remote_collection.save(num_retries=self.num_retries,\n                                         trash_at=self._collection_trash_at())\n        else:\n            if len(self._local_collection) == 0:\n                self.logger.warning(\"No files were uploaded, skipping collection creation.\")\n                return\n            self._local_collection.save_new(\n                name=self.name, owner_uuid=self.owner_uuid,\n                ensure_unique_name=self.ensure_unique_name,\n                num_retries=self.num_retries,\n                trash_at=self._collection_trash_at())\n\n    def destroy_cache(self):\n        if self.use_cache:\n            try:\n                os.unlink(self._cache_filename)\n            except OSError as error:\n                # That's what we wanted anyway.\n                if error.errno != errno.ENOENT:\n                    raise\n            self._cache_file.close()\n\n    def _collection_size(self, collection):\n        \"\"\"\n        Recursively get the total size of the collection\n        \"\"\"\n        size = 0\n        for item in collection.values():\n            if isinstance(item, arvados.collection.Collection) or isinstance(item, arvados.collection.Subcollection):\n                size += self._collection_size(item)\n            else:\n                size += item.size()\n        return size\n\n    def _update_task(self):\n        \"\"\"\n        Periodically called support task. File uploading is\n        asynchronous so we poll status from the collection.\n        \"\"\"\n        while not self._stop_checkpointer.wait(1 if not self._upload_started else self._update_task_time):\n            self._update()\n\n    def _update(self, final=False):\n        \"\"\"\n        Update cached manifest text and report progress.\n        \"\"\"\n        if self._upload_started:\n            with self._collection_lock:\n                self.bytes_written = self._collection_size(self._local_collection)\n                if self.use_cache:\n                    if final:\n                        manifest = self._local_collection.manifest_text()\n                    else:\n                        # Get the manifest text without comitting pending blocks\n                        manifest = self._local_collection.manifest_text(strip=False,\n                                                                        normalize=False,\n                                                                        only_committed=True)\n                    # Update cache\n                    with self._state_lock:\n                        self._state['manifest'] = manifest\n            if self.use_cache:\n                try:\n                    self._save_state()\n                except Exception as e:\n                    self.logger.error(\"Unexpected error trying to save cache file: {}\".format(e))\n            # Keep remote collection's trash_at attribute synced when using relative expire dates\n            if self._remote_collection is not None and type(self._trash_at) == datetime.timedelta:\n                try:\n                    self._api_client.collections().update(\n                        uuid=self._remote_collection.manifest_locator(),\n                        body={'trash_at': self._collection_trash_at().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")}\n                    ).execute(num_retries=self.num_retries)\n                except Exception as e:\n                    self.logger.error(\"Unexpected error trying to update remote collection's expire date: {}\".format(e))\n        else:\n            self.bytes_written = self.bytes_skipped\n        # Call the reporter, if any\n        self.report_progress()\n\n    def report_progress(self):\n        if self.reporter is not None:\n            self.reporter(self.bytes_written, self.bytes_expected)\n\n    def _write_stdin(self, filename):\n        output = self._local_collection.open(filename, 'wb')\n        self._write(sys.stdin.buffer, output)\n        output.close()\n\n    def _check_file(self, source, filename):\n        \"\"\"\n        Check if this file needs to be uploaded\n        \"\"\"\n        # Ignore symlinks when requested\n        if (not self.follow_links) and os.path.islink(source):\n            return\n        resume_offset = 0\n        should_upload = False\n        new_file_in_cache = False\n        # Record file path for updating the remote collection before exiting\n        self._file_paths.add(filename)\n\n        with self._state_lock:\n            # If no previous cached data on this file, store it for an eventual\n            # repeated run.\n            if source not in self._state['files']:\n                self._state['files'][source] = {\n                    'mtime': os.path.getmtime(source),\n                    'size' : os.path.getsize(source)\n                }\n                new_file_in_cache = True\n            cached_file_data = self._state['files'][source]\n\n        # Check if file was already uploaded (at least partially)\n        file_in_local_collection = self._local_collection.find(filename)\n\n        # If not resuming, upload the full file.\n        if not self.resume:\n            should_upload = True\n        # New file detected from last run, upload it.\n        elif new_file_in_cache:\n            should_upload = True\n        # Local file didn't change from last run.\n        elif cached_file_data['mtime'] == os.path.getmtime(source) and cached_file_data['size'] == os.path.getsize(source):\n            if not file_in_local_collection:\n                # File not uploaded yet, upload it completely\n                should_upload = True\n            elif file_in_local_collection.permission_expired():\n                # Permission token expired, re-upload file. This will change whenever\n                # we have a API for refreshing tokens.\n                self.logger.warning(u\"Uploaded file '{}' access token expired, will re-upload it from scratch\".format(filename))\n                should_upload = True\n                self._local_collection.remove(filename)\n            elif cached_file_data['size'] == file_in_local_collection.size():\n                # File already there, skip it.\n                self.bytes_skipped += cached_file_data['size']\n            elif cached_file_data['size'] > file_in_local_collection.size():\n                # File partially uploaded, resume!\n                resume_offset = file_in_local_collection.size()\n                self.bytes_skipped += resume_offset\n                should_upload = True\n            else:\n                # Inconsistent cache, re-upload the file\n                should_upload = True\n                self._local_collection.remove(filename)\n                self.logger.warning(u\"Uploaded version of file '{}' is bigger than local version, will re-upload it from scratch.\".format(source))\n        # Local file differs from cached data, re-upload it.\n        else:\n            if file_in_local_collection:\n                self._local_collection.remove(filename)\n            should_upload = True\n\n        if should_upload:\n            try:\n                self._files_to_upload.append((source, resume_offset, filename))\n            except ArvPutUploadIsPending:\n                # This could happen when running on dry-mode, close cache file to\n                # avoid locking issues.\n                self._cache_file.close()\n                raise\n\n    def _upload_files(self):\n        for source, resume_offset, filename in self._files_to_upload:\n            with open(source, 'rb') as source_fd:\n                with self._state_lock:\n                    self._state['files'][source]['mtime'] = os.path.getmtime(source)\n                    self._state['files'][source]['size'] = os.path.getsize(source)\n                if resume_offset > 0:\n                    # Start upload where we left off\n                    output = self._local_collection.open(filename, 'ab')\n                    source_fd.seek(resume_offset)\n                else:\n                    # Start from scratch\n                    output = self._local_collection.open(filename, 'wb')\n                self._write(source_fd, output)\n                output.close(flush=False)\n\n    def _write(self, source_fd, output):\n        while True:\n            data = source_fd.read(arvados.config.KEEP_BLOCK_SIZE)\n            if not data:\n                break\n            output.write(data)\n\n    def _my_collection(self):\n        return self._remote_collection if self.update else self._local_collection\n\n    def _get_cache_filepath(self):\n        # Set up cache file name from input paths.\n        md5 = hashlib.md5()\n        md5.update(arvados.config.get('ARVADOS_API_HOST', '!nohost').encode())\n        realpaths = sorted(os.path.realpath(path) for path in self.paths)\n        md5.update(b'\\0'.join([p.encode() for p in realpaths]))\n        if self.filename:\n            md5.update(self.filename.encode())\n        cache_path = Path(self.CACHE_DIR)\n        if len(cache_path.parts) == 1:\n            cache_path = basedirs.BaseDirectories('CACHE').storage_path(cache_path)\n        else:\n            # Note this is a noop if cache_path is absolute, which is what we want.\n            cache_path = Path.home() / cache_path\n            cache_path.mkdir(parents=True, exist_ok=True, mode=0o700)\n        return str(cache_path / md5.hexdigest())\n\n    def _setup_state(self, update_collection):\n        \"\"\"\n        Create a new cache file or load a previously existing one.\n        \"\"\"\n        # Load an already existing collection for update\n        if update_collection and re.match(arvados.util.collection_uuid_pattern,\n                                          update_collection):\n            try:\n                self._remote_collection = arvados.collection.Collection(\n                    update_collection,\n                    api_client=self._api_client,\n                    storage_classes_desired=self.storage_classes,\n                    num_retries=self.num_retries)\n            except arvados.errors.ApiError as error:\n                raise CollectionUpdateError(\"Cannot read collection {} ({})\".format(update_collection, error))\n            else:\n                self.update = True\n        elif update_collection:\n            # Collection locator provided, but unknown format\n            raise CollectionUpdateError(\"Collection locator unknown: '{}'\".format(update_collection))\n\n        if self.use_cache:\n            cache_filepath = self._get_cache_filepath()\n            if self.resume and os.path.exists(cache_filepath):\n                self.logger.info(u\"Resuming upload from cache file {}\".format(cache_filepath))\n                self._cache_file = open(cache_filepath, 'a+')\n            else:\n                # --no-resume means start with a empty cache file.\n                self.logger.info(u\"Creating new cache file at {}\".format(cache_filepath))\n                self._cache_file = open(cache_filepath, 'w+')\n            self._cache_filename = self._cache_file.name\n            self._lock_file(self._cache_file)\n            self._cache_file.seek(0)\n\n        with self._state_lock:\n            if self.use_cache:\n                try:\n                    self._state = json.load(self._cache_file)\n                    if not set(['manifest', 'files']).issubset(set(self._state.keys())):\n                        # Cache at least partially incomplete, set up new cache\n                        self._state = copy.deepcopy(self.EMPTY_STATE)\n                except ValueError:\n                    # Cache file empty, set up new cache\n                    self._state = copy.deepcopy(self.EMPTY_STATE)\n            else:\n                self.logger.info(\"No cache usage requested for this run.\")\n                # No cache file, set empty state\n                self._state = copy.deepcopy(self.EMPTY_STATE)\n            if not self._cached_manifest_valid():\n                if not self.batch_mode:\n                    raise ResumeCacheInvalidError()\n                else:\n                    self.logger.info(\"Invalid signatures on cache file '{}' while being run in 'batch mode' -- continuing anyways.\".format(self._cache_file.name))\n                    self.use_cache = False # Don't overwrite preexisting cache file.\n                    self._state = copy.deepcopy(self.EMPTY_STATE)\n            # Load the previous manifest so we can check if files were modified remotely.\n            self._local_collection = arvados.collection.Collection(\n                self._state['manifest'],\n                replication_desired=self.replication_desired,\n                storage_classes_desired=self.storage_classes,\n                put_threads=self.put_threads,\n                api_client=self._api_client,\n                num_retries=self.num_retries)\n\n    def _cached_manifest_valid(self):\n        \"\"\"\n        Validate the oldest non-expired block signature to check if cached manifest\n        is usable: checking if the cached manifest was not created with a different\n        arvados account.\n        \"\"\"\n        if self._state.get('manifest', None) is None:\n            # No cached manifest yet, all good.\n            return True\n        now = datetime.datetime.utcnow()\n        oldest_exp = None\n        oldest_loc = None\n        block_found = False\n        for m in arvados.util.keep_locator_pattern.finditer(self._state['manifest']):\n            loc = m.group(0)\n            try:\n                exp = datetime.datetime.utcfromtimestamp(int(loc.split('@')[1], 16))\n            except IndexError:\n                # Locator without signature\n                continue\n            block_found = True\n            if exp > now and (oldest_exp is None or exp < oldest_exp):\n                oldest_exp = exp\n                oldest_loc = loc\n        if not block_found:\n            # No block signatures found => no invalid block signatures.\n            return True\n        if oldest_loc is None:\n            # Locator signatures found, but all have expired.\n            # Reset the cache and move on.\n            self.logger.info('Cache expired, starting from scratch.')\n            self._state['manifest'] = ''\n            return True\n        kc = arvados.KeepClient(api_client=self._api_client,\n                                num_retries=self.num_retries)\n        try:\n            kc.head(oldest_loc)\n        except arvados.errors.KeepRequestError:\n            # Something is wrong, cached manifest is not valid.\n            return False\n        return True\n\n    def collection_file_paths(self, col, path_prefix='.'):\n        \"\"\"Return a list of file paths by recursively go through the entire collection `col`\"\"\"\n        file_paths = []\n        for name, item in col.items():\n            if isinstance(item, arvados.arvfile.ArvadosFile):\n                file_paths.append(os.path.join(path_prefix, name))\n            elif isinstance(item, arvados.collection.Subcollection):\n                new_prefix = os.path.join(path_prefix, name)\n                file_paths += self.collection_file_paths(item, path_prefix=new_prefix)\n        return file_paths\n\n    def _lock_file(self, fileobj):\n        try:\n            fcntl.flock(fileobj, fcntl.LOCK_EX | fcntl.LOCK_NB)\n        except IOError:\n            raise ResumeCacheConflict(u\"{} locked\".format(fileobj.name))\n\n    def _save_state(self):\n        \"\"\"\n        Atomically save current state into cache.\n        \"\"\"\n        with self._state_lock:\n            # We're not using copy.deepcopy() here because it's a lot slower\n            # than json.dumps(), and we're already needing JSON format to be\n            # saved on disk.\n            state = json.dumps(self._state)\n        try:\n            new_cache = tempfile.NamedTemporaryFile(\n                mode='w+',\n                dir=os.path.dirname(self._cache_filename), delete=False)\n            self._lock_file(new_cache)\n            new_cache.write(state)\n            new_cache.flush()\n            os.fsync(new_cache)\n            os.rename(new_cache.name, self._cache_filename)\n        except (IOError, OSError, ResumeCacheConflict) as error:\n            self.logger.error(\"There was a problem while saving the cache file: {}\".format(error))\n            try:\n                os.unlink(new_cache_name)\n            except NameError:  # mkstemp failed.\n                pass\n        else:\n            self._cache_file.close()\n            self._cache_file = new_cache\n\n    def collection_name(self):\n        return self._my_collection().api_response()['name'] if self._my_collection().api_response() else None\n\n    def collection_trash_at(self):\n        return self._my_collection().get_trash_at()\n\n    def manifest_locator(self):\n        return self._my_collection().manifest_locator()\n\n    def portable_data_hash(self):\n        pdh = self._my_collection().portable_data_hash()\n        m = self._my_collection().stripped_manifest().encode()\n        local_pdh = '{}+{}'.format(hashlib.md5(m).hexdigest(), len(m))\n        if pdh != local_pdh:\n            self.logger.warning(\"\\n\".join([\n                \"arv-put: API server provided PDH differs from local manifest.\",\n                \"         This should not happen; showing API server version.\"]))\n        return pdh\n\n    def manifest_text(self, stream_name=\".\", strip=False, normalize=False):\n        return self._my_collection().manifest_text(stream_name, strip, normalize)\n\n    def _datablocks_on_item(self, item):\n        \"\"\"\n        Return a list of datablock locators, recursively navigating\n        through subcollections\n        \"\"\"\n        if isinstance(item, arvados.arvfile.ArvadosFile):\n            if item.size() == 0:\n                # Empty file locator\n                return [\"d41d8cd98f00b204e9800998ecf8427e+0\"]\n            else:\n                locators = []\n                for segment in item.segments():\n                    loc = segment.locator\n                    locators.append(loc)\n                return locators\n        elif isinstance(item, arvados.collection.Collection):\n            l = [self._datablocks_on_item(x) for x in item.values()]\n            # Fast list flattener method taken from:\n            # http://stackoverflow.com/questions/952914/making-a-flat-list-out-of-list-of-lists-in-python\n            return [loc for sublist in l for loc in sublist]\n        else:\n            return None\n\n    def data_locators(self):\n        with self._collection_lock:\n            # Make sure all datablocks are flushed before getting the locators\n            self._my_collection().manifest_text()\n            datablocks = self._datablocks_on_item(self._my_collection())\n        return datablocks\n\n_machine_format = \"{} {}: {{}} written {{}} total\\n\".format(sys.argv[0],\n                                                            os.getpid())\n\n# Simulate glob.glob() matching behavior without the need to scan the filesystem\n# Note: fnmatch() doesn't work correctly when used with pathnames. For example the\n# pattern 'tests/*.py' will match 'tests/run_test.py' and also 'tests/subdir/run_test.py',\n# so instead we're using it on every path component.\ndef pathname_match(pathname, pattern):\n    name = pathname.split(os.sep)\n    # Fix patterns like 'some/subdir/' or 'some//subdir'\n    pat = [x for x in pattern.split(os.sep) if x != '' and x != '.']\n    if len(name) != len(pat):\n        return False\n    for i in range(len(name)):\n        if not fnmatch.fnmatch(name[i], pat[i]):\n            return False\n    return True\n\ndef machine_progress(bytes_written, bytes_expected):\n    return _machine_format.format(\n        bytes_written, -1 if (bytes_expected is None) else bytes_expected)\n\ndef human_progress(bytes_written, bytes_expected):\n    if bytes_expected:\n        return \"\\r{}M / {}M {:.1%} \".format(\n            bytes_written >> 20, bytes_expected >> 20,\n            float(bytes_written) / bytes_expected)\n    else:\n        return \"\\r{} \".format(bytes_written)\n\ndef progress_writer(progress_func, outfile=sys.stderr):\n    def write_progress(bytes_written, bytes_expected):\n        outfile.write(progress_func(bytes_written, bytes_expected))\n    return write_progress\n\ndef desired_project_uuid(api_client, project_uuid, num_retries):\n    if not project_uuid:\n        query = api_client.users().current()\n    elif arvados.util.user_uuid_pattern.match(project_uuid):\n        query = api_client.users().get(uuid=project_uuid)\n    elif arvados.util.group_uuid_pattern.match(project_uuid):\n        query = api_client.groups().get(uuid=project_uuid)\n    else:\n        raise ValueError(\"Not a valid project UUID: {}\".format(project_uuid))\n    return query.execute(num_retries=num_retries)['uuid']\n\ndef main(arguments=None, stdout=sys.stdout, stderr=sys.stderr,\n         install_sig_handlers=True):\n    global api_client\n\n    args = parse_arguments(arguments)\n    logger = logging.getLogger('arvados.arv_put')\n    if args.silent:\n        logger.setLevel(logging.WARNING)\n    else:\n        logger.setLevel(logging.INFO)\n    status = 0\n\n    request_id = arvados.util.new_request_id()\n\n    formatter = ArvPutLogFormatter(request_id)\n    logging.getLogger('arvados').handlers[0].setFormatter(formatter)\n\n    if api_client is None:\n        api_client = arvados.api('v1', request_id=request_id, num_retries=args.retries)\n\n    if install_sig_handlers:\n        arv_cmd.install_signal_handlers()\n\n    # Trash arguments validation\n    trash_at = None\n    if args.trash_at is not None:\n        # ciso8601 considers YYYYMM as invalid but YYYY-MM as valid, so here we\n        # make sure the user provides a complete YYYY-MM-DD date.\n        if not re.match(r'^\\d{4}(?P<dash>-?)\\d{2}?(?P=dash)\\d{2}', args.trash_at):\n            logger.error(\"--trash-at argument format invalid, use --help to see examples.\")\n            sys.exit(1)\n        # Check if no time information was provided. In that case, assume end-of-day.\n        if re.match(r'^\\d{4}(?P<dash>-?)\\d{2}?(?P=dash)\\d{2}$', args.trash_at):\n            args.trash_at += 'T23:59:59'\n        try:\n            trash_at = ciso8601.parse_datetime(args.trash_at)\n        except:\n            logger.error(\"--trash-at argument format invalid, use --help to see examples.\")\n            sys.exit(1)\n        else:\n            if trash_at.tzinfo is not None:\n                # Timezone aware datetime provided.\n                utcoffset = -trash_at.utcoffset()\n            else:\n                # Timezone naive datetime provided. Assume is local.\n                if time.daylight:\n                    utcoffset = datetime.timedelta(seconds=time.altzone)\n                else:\n                    utcoffset = datetime.timedelta(seconds=time.timezone)\n            # Convert to UTC timezone naive datetime.\n            trash_at = trash_at.replace(tzinfo=None) + utcoffset\n\n        if trash_at <= datetime.datetime.utcnow():\n            logger.error(\"--trash-at argument must be set in the future\")\n            sys.exit(1)\n    if args.trash_after is not None:\n        if args.trash_after < 1:\n            logger.error(\"--trash-after argument must be >= 1\")\n            sys.exit(1)\n        trash_at = datetime.timedelta(seconds=(args.trash_after * 24 * 60 * 60))\n\n    # Determine the name to use\n    if args.name:\n        if args.stream or args.raw:\n            logger.error(\"Cannot use --name with --stream or --raw\")\n            sys.exit(1)\n        elif args.update_collection:\n            logger.error(\"Cannot use --name with --update-collection\")\n            sys.exit(1)\n        collection_name = args.name\n    else:\n        collection_name = \"Saved at {} by {}@{}\".format(\n            datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S UTC\"),\n            pwd.getpwuid(os.getuid()).pw_name,\n            socket.gethostname())\n\n    if args.project_uuid and (args.stream or args.raw):\n        logger.error(\"Cannot use --project-uuid with --stream or --raw\")\n        sys.exit(1)\n\n    # Determine the parent project\n    try:\n        project_uuid = desired_project_uuid(api_client, args.project_uuid,\n                                            args.retries)\n    except (apiclient_errors.Error, ValueError) as error:\n        logger.error(error)\n        sys.exit(1)\n\n    if args.progress:\n        reporter = progress_writer(human_progress)\n    elif args.batch_progress:\n        reporter = progress_writer(machine_progress)\n    else:\n        reporter = None\n\n    # Setup exclude regex from all the --exclude arguments provided\n    name_patterns = []\n    exclude_paths = []\n    exclude_names = None\n    if len(args.exclude) > 0:\n        # We're supporting 2 kinds of exclusion patterns:\n        # 1)   --exclude '*.jpg'    (file/dir name patterns, will only match\n        #                            the name, wherever the file is on the tree)\n        # 2.1) --exclude 'foo/bar'  (file/dir path patterns, will match the\n        #                            entire path, and should be relative to\n        #                            any input dir argument)\n        # 2.2) --exclude './*.jpg'  (Special case for excluding files/dirs\n        #                            placed directly underneath the input dir)\n        for p in args.exclude:\n            # Only relative paths patterns allowed\n            if p.startswith(os.sep):\n                logger.error(\"Cannot use absolute paths with --exclude\")\n                sys.exit(1)\n            if os.path.dirname(p):\n                # We don't support of path patterns with '..'\n                p_parts = p.split(os.sep)\n                if '..' in p_parts:\n                    logger.error(\n                        \"Cannot use path patterns that include or '..'\")\n                    sys.exit(1)\n                # Path search pattern\n                exclude_paths.append(p)\n            else:\n                # Name-only search pattern\n                name_patterns.append(p)\n        # For name only matching, we can combine all patterns into a single\n        # regexp, for better performance.\n        exclude_names = re.compile('|'.join(\n            [fnmatch.translate(p) for p in name_patterns]\n        )) if len(name_patterns) > 0 else None\n        # Show the user the patterns to be used, just in case they weren't\n        # specified inside quotes and got changed by the shell expansion.\n        logger.info(\"Exclude patterns: {}\".format(args.exclude))\n\n    # If this is used by a human, and there's at least one directory to be\n    # uploaded, the expected bytes calculation can take a moment.\n    if args.progress and any([os.path.isdir(f) for f in args.paths]):\n        logger.info(\"Calculating upload size, this could take some time...\")\n    try:\n        writer = ArvPutUploadJob(paths = args.paths,\n                                 resume = args.resume,\n                                 use_cache = args.use_cache,\n                                 batch_mode= args.batch,\n                                 filename = args.filename,\n                                 reporter = reporter,\n                                 api_client = api_client,\n                                 num_retries = args.retries,\n                                 replication_desired = args.replication,\n                                 put_threads = args.threads,\n                                 name = collection_name,\n                                 owner_uuid = project_uuid,\n                                 ensure_unique_name = True,\n                                 update_collection = args.update_collection,\n                                 storage_classes=args.storage_classes,\n                                 logger=logger,\n                                 dry_run=args.dry_run,\n                                 follow_links=args.follow_links,\n                                 exclude_paths=exclude_paths,\n                                 exclude_names=exclude_names,\n                                 trash_at=trash_at)\n    except ResumeCacheConflict:\n        logger.error(\"\\n\".join([\n            \"arv-put: Another process is already uploading this data.\",\n            \"         Use --no-cache if this is really what you want.\"]))\n        sys.exit(1)\n    except ResumeCacheInvalidError:\n        logger.error(\"\\n\".join([\n            \"arv-put: Resume cache contains invalid signature: it may have expired\",\n            \"         or been created with another Arvados user's credentials.\",\n            \"         Switch user or use one of the following options to restart upload:\",\n            \"         --no-resume to start a new resume cache.\",\n            \"         --no-cache to disable resume cache.\",\n            \"         --batch to ignore the resume cache if invalid.\"]))\n        sys.exit(1)\n    except (CollectionUpdateError, PathDoesNotExistError) as error:\n        logger.error(\"\\n\".join([\n            \"arv-put: %s\" % str(error)]))\n        sys.exit(1)\n    except ArvPutUploadIsPending:\n        # Dry run check successful, return proper exit code.\n        sys.exit(2)\n    except ArvPutUploadNotPending:\n        # No files pending for upload\n        sys.exit(0)\n\n    if not args.dry_run and not args.update_collection and args.resume and writer.bytes_written > 0:\n        logger.warning(\"\\n\".join([\n            \"arv-put: Resuming previous upload from last checkpoint.\",\n            \"         Use the --no-resume option to start over.\"]))\n\n    if not args.dry_run:\n        writer.report_progress()\n    output = None\n    try:\n        writer.start(save_collection=not(args.stream or args.raw))\n    except (arvados.errors.ApiError, arvados.errors.KeepWriteError) as error:\n        logger.error(\"\\n\".join([\n            \"arv-put: %s\" % str(error)]))\n        sys.exit(1)\n\n    if args.progress:  # Print newline to split stderr from stdout for humans.\n        logger.info(\"\\n\")\n\n    if args.stream:\n        if args.normalize:\n            output = writer.manifest_text(normalize=True)\n        else:\n            output = writer.manifest_text()\n    elif args.raw:\n        output = ','.join(writer.data_locators())\n    elif writer.manifest_locator() is not None:\n        try:\n            expiration_notice = \"\"\n            if writer.collection_trash_at() is not None:\n                # Get the local timezone-naive version, and log it with timezone information.\n                if time.daylight:\n                    local_trash_at = writer.collection_trash_at().replace(tzinfo=None) - datetime.timedelta(seconds=time.altzone)\n                else:\n                    local_trash_at = writer.collection_trash_at().replace(tzinfo=None) - datetime.timedelta(seconds=time.timezone)\n                expiration_notice = \". It will expire on {} {}.\".format(\n                    local_trash_at.strftime(\"%Y-%m-%d %H:%M:%S\"), time.strftime(\"%z\"))\n            if args.update_collection:\n                logger.info(u\"Collection updated: '{}'{}\".format(\n                    writer.collection_name(), expiration_notice))\n            else:\n                logger.info(u\"Collection saved as '{}'{}\".format(\n                    writer.collection_name(), expiration_notice))\n            if args.portable_data_hash:\n                output = writer.portable_data_hash()\n            else:\n                output = writer.manifest_locator()\n        except apiclient_errors.Error as error:\n            logger.error(\n                \"arv-put: Error creating Collection on project: {}.\".format(\n                    error))\n            status = 1\n    else:\n        status = 1\n\n    # Print the locator (uuid) of the new collection.\n    if output is None:\n        status = status or 1\n    elif not args.silent:\n        stdout.write(output)\n        if not output.endswith('\\n'):\n            stdout.write('\\n')\n\n    if install_sig_handlers:\n        arv_cmd.restore_signal_handlers()\n\n    if status != 0:\n        sys.exit(status)\n\n    # Success!\n    return output\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "sdk/python/arvados/commands/run.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n# Copyright (C) 2018 Genome Research Ltd.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#    http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport arvados\nimport arvados.commands.ws as ws\nimport argparse\nimport json\nimport re\nimport os\nimport stat\nfrom . import put\nimport time\nimport subprocess\nimport logging\nimport sys\nimport errno\nimport arvados.commands._util as arv_cmd\nimport arvados.collection\nimport arvados.config as config\n\nfrom arvados._version import __version__\n\nlogger = logging.getLogger('arvados.arv-run')\nlogger.setLevel(logging.INFO)\n\nclass ArvFile(object):\n    def __init__(self, prefix, fn):\n        self.prefix = prefix\n        self.fn = fn\n\n    def __hash__(self):\n        return (self.prefix+self.fn).__hash__()\n\n    def __eq__(self, other):\n        return (self.prefix == other.prefix) and (self.fn == other.fn)\n\nclass UploadFile(ArvFile):\n    pass\n\n# Determine if a file is in a collection, and return a tuple consisting of the\n# portable data hash and the path relative to the root of the collection.\n# Return None if the path isn't with an arv-mount collection or there was is error.\ndef is_in_collection(root, branch):\n    try:\n        if root == \"/\":\n            return (None, None)\n        fn = os.path.join(root, \".arvados#collection\")\n        if os.path.exists(fn):\n            with open(fn, 'r') as f:\n                c = json.load(f)\n            return (c[\"portable_data_hash\"], branch)\n        else:\n            sp = os.path.split(root)\n            return is_in_collection(sp[0], os.path.join(sp[1], branch))\n    except (IOError, OSError):\n        return (None, None)\n\n# Determine the project to place the output of this command by searching upward\n# for arv-mount psuedofile indicating the project.  If the cwd isn't within\n# an arv-mount project or there is an error, return current_user.\ndef determine_project(root, current_user):\n    try:\n        if root == \"/\":\n            return current_user\n        fn = os.path.join(root, \".arvados#project\")\n        if os.path.exists(fn):\n            with file(fn, 'r') as f:\n                c = json.load(f)\n            if 'writable_by' in c and current_user in c['writable_by']:\n                return c[\"uuid\"]\n            else:\n                return current_user\n        else:\n            sp = os.path.split(root)\n            return determine_project(sp[0], current_user)\n    except (IOError, OSError):\n        return current_user\n\n# Determine if string corresponds to a file, and if that file is part of a\n# arv-mounted collection or only local to the machine.  Returns one of\n# ArvFile() (file already exists in a collection), UploadFile() (file needs to\n# be uploaded to a collection), or simply returns prefix+fn (which yields the\n# original parameter string).\ndef statfile(prefix, fn, fnPattern=\"$(file %s/%s)\", dirPattern=\"$(dir %s/%s/)\", raiseOSError=False):\n    absfn = os.path.abspath(fn)\n    try:\n        st = os.stat(absfn)\n        sp = os.path.split(absfn)\n        (pdh, branch) = is_in_collection(sp[0], sp[1])\n        if pdh:\n            if stat.S_ISREG(st.st_mode):\n                return ArvFile(prefix, fnPattern % (pdh, branch))\n            elif stat.S_ISDIR(st.st_mode):\n                return ArvFile(prefix, dirPattern % (pdh, branch))\n            else:\n                raise Exception(\"%s is not a regular file or directory\" % absfn)\n        else:\n            # trim leading '/' for path prefix test later\n            return UploadFile(prefix, absfn[1:])\n    except OSError as e:\n        if e.errno == errno.ENOENT and not raiseOSError:\n            pass\n        else:\n            raise\n\n    return prefix+fn\n\ndef write_file(collection, pathprefix, fn, flush=False):\n    with open(os.path.join(pathprefix, fn), \"rb\") as src:\n        dst = collection.open(fn, \"wb\")\n        r = src.read(1024*128)\n        while r:\n            dst.write(r)\n            r = src.read(1024*128)\n        dst.close(flush=flush)\n\ndef uploadfiles(files, api, dry_run=False, num_retries=0,\n                project=None,\n                fnPattern=\"$(file %s/%s)\",\n                name=None,\n                collection=None,\n                packed=True):\n    # Find the smallest path prefix that includes all the files that need to be uploaded.\n    # This starts at the root and iteratively removes common parent directory prefixes\n    # until all file paths no longer have a common parent.\n    if files:\n        n = True\n        pathprefix = \"/\"\n        while n:\n            pathstep = None\n            for c in files:\n                if pathstep is None:\n                    sp = c.fn.split('/')\n                    if len(sp) < 2:\n                        # no parent directories left\n                        n = False\n                        break\n                    # path step takes next directory\n                    pathstep = sp[0] + \"/\"\n                else:\n                    # check if pathstep is common prefix for all files\n                    if not c.fn.startswith(pathstep):\n                        n = False\n                        break\n            if n:\n                # pathstep is common parent directory for all files, so remove the prefix\n                # from each path\n                pathprefix += pathstep\n                for c in files:\n                    c.fn = c.fn[len(pathstep):]\n\n        logger.info(\"Upload local files: \\\"%s\\\"\", '\" \"'.join([c.fn for c in files]))\n\n    if dry_run:\n        logger.info(\"$(input) is %s\", pathprefix.rstrip('/'))\n        pdh = \"$(input)\"\n    else:\n        files = sorted(files, key=lambda x: x.fn)\n        if collection is None:\n            collection = arvados.collection.Collection(api_client=api, num_retries=num_retries)\n        prev = \"\"\n        for f in files:\n            localpath = os.path.join(pathprefix, f.fn)\n            if prev and localpath.startswith(prev+\"/\"):\n                # If this path is inside an already uploaded subdirectory,\n                # don't redundantly re-upload it.\n                # e.g. we uploaded /tmp/foo and the next file is /tmp/foo/bar\n                # skip it because it starts with \"/tmp/foo/\"\n                continue\n            prev = localpath\n            if os.path.isfile(localpath):\n                write_file(collection, pathprefix, f.fn, not packed)\n            elif os.path.isdir(localpath):\n                for root, dirs, iterfiles in os.walk(localpath):\n                    root = root[len(pathprefix):]\n                    for src in iterfiles:\n                        write_file(collection, pathprefix, os.path.join(root, src), not packed)\n\n        pdh = None\n        if len(collection) > 0:\n            # non-empty collection\n            filters = [[\"portable_data_hash\", \"=\", collection.portable_data_hash()]]\n            name_pdh = \"%s (%s)\" % (name, collection.portable_data_hash())\n            if name:\n                filters.append([\"name\", \"=\", name_pdh])\n            if project:\n                filters.append([\"owner_uuid\", \"=\", project])\n\n            # do the list / create in a loop with up to 2 tries as we are using `ensure_unique_name=False`\n            # and there is a potential race with other workflows that may have created the collection\n            # between when we list it and find it does not exist and when we attempt to create it.\n            tries = 2\n            while pdh is None and tries > 0:\n                exists = api.collections().list(filters=filters, limit=1).execute(num_retries=num_retries)\n\n                if exists[\"items\"]:\n                    item = exists[\"items\"][0]\n                    pdh = item[\"portable_data_hash\"]\n                    logger.info(\"Using collection %s (%s)\", pdh, item[\"uuid\"])\n                else:\n                    try:\n                        collection.save_new(name=name_pdh, owner_uuid=project, ensure_unique_name=False)\n                        pdh = collection.portable_data_hash()\n                        logger.info(\"Uploaded to %s (%s)\", pdh, collection.manifest_locator())\n                    except arvados.errors.ApiError as ae:\n                        tries -= 1\n            if pdh is None:\n                # Something weird going on here, probably a collection\n                # with a conflicting name but wrong PDH.  We won't\n                # able to reuse it but we still need to save our\n                # collection, so so save it with unique name.\n                logger.info(\"Name conflict on '%s', existing collection has an unexpected portable data hash\", name_pdh)\n                collection.save_new(name=name_pdh, owner_uuid=project, ensure_unique_name=True)\n                pdh = collection.portable_data_hash()\n                logger.info(\"Uploaded to %s (%s)\", pdh, collection.manifest_locator())\n        else:\n            # empty collection\n            pdh = collection.portable_data_hash()\n            assert (pdh == config.EMPTY_BLOCK_LOCATOR), \"Empty collection portable_data_hash did not have expected locator, was %s\" % pdh\n            logger.debug(\"Using empty collection %s\", pdh)\n\n    for c in files:\n        c.keepref = \"%s/%s\" % (pdh, c.fn)\n        c.fn = fnPattern % (pdh, c.fn)\n\n\ndef main(arguments=None):\n    raise Exception(\"Legacy arv-run removed.\")\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "sdk/python/arvados/commands/ws.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport json\nimport logging\nimport signal\nimport sys\n\nimport arvados\nfrom arvados.events import subscribe\nfrom arvados._version import __version__\nfrom . import _util as arv_cmd\n\ndef main(arguments=None):\n    logger = logging.getLogger('arvados.arv-ws')\n\n    parser = argparse.ArgumentParser(parents=[arv_cmd.retry_opt])\n    parser.add_argument('--version', action='version',\n                        version=\"%s %s\" % (sys.argv[0], __version__),\n                        help='Print version and exit.')\n    parser.add_argument('-u', '--uuid', type=str, default=\"\", help=\"Filter events on object_uuid\")\n    parser.add_argument('-f', '--filters', type=str, default=\"\", help=\"Arvados query filter to apply to log events (JSON encoded)\")\n    parser.add_argument('-s', '--start-time', type=str, default=\"\", help=\"Arvados query filter to fetch log events created at or after this time. This will be server time in UTC. Allowed format: YYYY-MM-DD or YYYY-MM-DD hh:mm:ss\")\n    parser.add_argument('-i', '--id', type=int, default=None, help=\"Start from given log id.\")\n\n    group = parser.add_mutually_exclusive_group()\n    group.add_argument('--poll-interval', default=15, type=int, help=\"If websockets is not available, specify the polling interval, default is every 15 seconds\")\n    group.add_argument('--no-poll', action='store_false', dest='poll_interval', help=\"Do not poll if websockets are not available, just fail\")\n\n    group = parser.add_mutually_exclusive_group()\n    group.add_argument('-p', '--pipeline', type=str, default=\"\", help=\"Supply pipeline uuid, print log output from pipeline and its jobs\")\n    group.add_argument('-j', '--job', type=str, default=\"\", help=\"Supply job uuid, print log output from jobs\")\n\n    args = parser.parse_args(arguments)\n\n    global filters\n    global known_component_jobs\n    global ws\n\n    filters = []\n    known_component_jobs = set()\n    ws = None\n\n    def update_subscribed_components(components):\n        global known_component_jobs\n        global filters\n        pipeline_jobs = set()\n        for c in components:\n            if \"job\" in components[c]:\n                pipeline_jobs.add(components[c][\"job\"][\"uuid\"])\n        if known_component_jobs != pipeline_jobs:\n            new_filters = [['object_uuid', 'in', [args.pipeline] + list(pipeline_jobs)]]\n            ws.subscribe(new_filters)\n            ws.unsubscribe(filters)\n            filters = new_filters\n            known_component_jobs = pipeline_jobs\n\n    api = arvados.api('v1', num_retries=args.retries)\n\n    if args.uuid:\n        filters += [ ['object_uuid', '=', args.uuid] ]\n\n    if args.filters:\n        filters += json.loads(args.filters)\n\n    if args.job:\n        filters += [ ['object_uuid', '=', args.job] ]\n\n    if args.pipeline:\n        filters += [ ['object_uuid', '=', args.pipeline] ]\n\n    if args.start_time:\n        last_log_id = 1\n        filters += [ ['created_at', '>=', args.start_time] ]\n    else:\n        last_log_id = None\n\n    if args.id:\n        last_log_id = args.id-1\n\n    def on_message(ev):\n        global filters\n        global ws\n\n        logger.debug(ev)\n        if 'event_type' in ev and (args.pipeline or args.job):\n            if ev['event_type'] in ('stderr', 'stdout'):\n                sys.stdout.write(ev[\"properties\"][\"text\"])\n            elif ev[\"event_type\"] in (\"create\", \"update\"):\n                if ev[\"object_kind\"] == \"arvados#pipelineInstance\":\n                    c = api.pipeline_instances().get(uuid=ev[\"object_uuid\"]).execute()\n                    update_subscribed_components(c[\"components\"])\n\n                if ev[\"object_kind\"] == \"arvados#pipelineInstance\" and args.pipeline:\n                    if ev[\"properties\"][\"new_attributes\"][\"state\"] in (\"Complete\", \"Failed\", \"Paused\"):\n                        ws.close()\n\n                if ev[\"object_kind\"] == \"arvados#job\" and args.job:\n                    if ev[\"properties\"][\"new_attributes\"][\"state\"] in (\"Complete\", \"Failed\", \"Cancelled\"):\n                        ws.close()\n        elif 'status' in ev and ev['status'] == 200:\n            pass\n        else:\n            print(json.dumps(ev))\n\n    try:\n        ws = subscribe(arvados.api('v1'), filters, on_message, poll_fallback=args.poll_interval, last_log_id=last_log_id)\n        if ws:\n            if args.pipeline:\n                c = api.pipeline_instances().get(uuid=args.pipeline).execute()\n                update_subscribed_components(c[\"components\"])\n                if c[\"state\"] in (\"Complete\", \"Failed\", \"Paused\"):\n                    ws.close()\n            ws.run_forever()\n    except KeyboardInterrupt:\n        pass\n    except Exception as e:\n        logger.error(e)\n    finally:\n        if ws:\n            ws.close()\n"
  },
  {
    "path": "sdk/python/arvados/config.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# config.py - configuration settings and global variables for Arvados clients\n#\n# Arvados configuration settings are taken from $HOME/.config/arvados.\n# Environment variables override settings in the config file.\n\nimport os\nimport re\n\nfrom typing import (\n    Callable,\n    Iterable,\n    Union,\n)\n\nfrom . import util\nfrom ._internal import basedirs\n\n_settings = None\ndefault_config_file = ''\n\"\"\"\n.. WARNING:: Deprecated\n   Default configuration initialization now searches for the \"default\"\n   configuration in several places. This value no longer has any effect.\n\"\"\"\n\nKEEP_BLOCK_SIZE = 2**26\nEMPTY_BLOCK_LOCATOR = 'd41d8cd98f00b204e9800998ecf8427e+0'\n\ndef initialize(\n        config_file: Union[\n            str,\n            os.PathLike,\n            Callable[[str], Iterable[os.PathLike]],\n        ]=basedirs.BaseDirectories('CONFIG').search,\n) -> None:\n    global _settings\n    _settings = {}\n\n    if callable(config_file):\n        search_paths = iter(config_file('settings.conf'))\n        config_file = next(search_paths, '')\n\n    # load the specified config file if available\n    try:\n        _settings = load(config_file)\n    except IOError:\n        pass\n\n    # override any settings with environment vars\n    for var in os.environ:\n        if var.startswith('ARVADOS_'):\n            _settings[var] = os.environ[var]\n\ndef load(config_file):\n    cfg = {}\n    with open(config_file, \"r\") as f:\n        for config_line in f:\n            if re.match(r'^\\s*(?:#|$)', config_line):\n                continue\n            var, val = config_line.rstrip().split('=', 2)\n            cfg[var] = val\n    return cfg\n\ndef flag_is_true(key, d=None):\n    if d is None:\n        d = settings()\n    return d.get(key, '').lower() in set(['1', 't', 'true', 'y', 'yes'])\n\ndef get(key, default_val=None):\n    return settings().get(key, default_val)\n\ndef settings():\n    if _settings is None:\n        initialize()\n    return _settings\n"
  },
  {
    "path": "sdk/python/arvados/errors.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# errors.py - Arvados-specific exceptions.\n\nimport json\n\nfrom apiclient import errors as apiclient_errors\nfrom collections import OrderedDict\n\nclass ApiError(apiclient_errors.HttpError):\n    def _get_reason(self):\n        try:\n            return '; '.join(json.loads(self.content.decode('utf-8'))['errors'])\n        except (KeyError, TypeError, ValueError):\n            return super(ApiError, self)._get_reason()\n\n\nclass KeepRequestError(Exception):\n    \"\"\"Base class for errors accessing Keep services.\"\"\"\n    def __init__(self, message='', request_errors=(), label=\"\"):\n        \"\"\"KeepRequestError(message='', request_errors=(), label=\"\")\n\n        :message:\n          A human-readable message describing what Keep operation\n          failed.\n\n        :request_errors:\n          An iterable that yields 2-tuples of keys (where the key refers to\n          some operation that was attempted) to the error encountered when\n          talking to it--either an exception, or an HTTP response object.\n          These will be packed into an OrderedDict, available through the\n          request_errors() method.\n\n        :label:\n          A label indicating the type of value in the 'key' position of request_errors.\n\n        \"\"\"\n        self.label = label\n        self._request_errors = OrderedDict(request_errors)\n        if self._request_errors:\n            exc_reports = [self._format_error(*err_pair)\n                           for err_pair in self._request_errors.items()]\n            base_msg = \"{}: {}\".format(message, \"; \".join(exc_reports))\n        else:\n            base_msg = message\n        super(KeepRequestError, self).__init__(base_msg)\n        self.message = message\n\n    def _format_error(self, key, error):\n        if isinstance(error, HttpError):\n            err_fmt = \"{} {} responded with {e.status_code} {e.reason}\"\n        else:\n            err_fmt = \"{} {} raised {e.__class__.__name__} ({e})\"\n        return err_fmt.format(self.label, key, e=error)\n\n    def request_errors(self):\n        \"\"\"request_errors() -> OrderedDict\n\n        The keys of the dictionary are described by `self.label`\n        The corresponding value is the exception raised when sending the\n        request to it.\"\"\"\n        return self._request_errors\n\n\nclass HttpError(Exception):\n    def __init__(self, status_code, reason):\n        self.status_code = status_code\n        self.reason = reason\n\n\nclass ArgumentError(Exception):\n    pass\nclass SyntaxError(Exception):\n    pass\nclass AssertionError(Exception):\n    pass\nclass CommandFailedError(Exception):\n    pass\nclass KeepReadError(KeepRequestError):\n    pass\nclass KeepWriteError(KeepRequestError):\n    pass\nclass KeepCacheError(KeepRequestError):\n    pass\nclass NotFoundError(KeepReadError):\n    pass\nclass NotImplementedError(Exception):\n    pass\nclass NoKeepServersError(Exception):\n    pass\nclass StaleWriterStateError(Exception):\n    pass\nclass FeatureNotEnabledError(Exception):\n    pass\n"
  },
  {
    "path": "sdk/python/arvados/events.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Follow events on an Arvados cluster\n\nThis module provides different ways to get notified about events that happen\non an Arvados cluster. You indicate which events you want updates about, and\nprovide a function that is called any time one of those events is received\nfrom the server.\n\n`subscribe` is the main entry point. It helps you construct one of the two\nAPI-compatible client classes: `EventClient` (which uses WebSockets) or\n`PollClient` (which periodically queries the logs list methods).\n\"\"\"\n\nimport enum\nimport json\nimport logging\nimport os\nimport re\nimport ssl\nimport sys\nimport _thread\nimport threading\nimport time\n\nimport websockets.exceptions as ws_exc\nimport websockets.sync.client as ws_client\n\nfrom . import config\nfrom . import errors\nfrom . import util\nfrom .retry import RetryLoop\nfrom ._version import __version__\n\nfrom typing import (\n    Any,\n    Callable,\n    Dict,\n    Iterable,\n    List,\n    Optional,\n    Union,\n)\n\nEventCallback = Callable[[Dict[str, Any]], object]\n\"\"\"Type signature for an event handler callback\"\"\"\nFilterCondition = List[Union[None, str, 'Filter']]\n\"\"\"Type signature for a single filter condition\"\"\"\nFilter = List[FilterCondition]\n\"\"\"Type signature for an entire filter\"\"\"\n\n_logger = logging.getLogger('arvados.events')\n\nclass WSMethod(enum.Enum):\n    \"\"\"Arvados WebSocket methods\n\n    This enum represents valid values for the `method` field in messages\n    sent to an Arvados WebSocket server.\n    \"\"\"\n    SUBSCRIBE = 'subscribe'\n    SUB = SUBSCRIBE\n    UNSUBSCRIBE = 'unsubscribe'\n    UNSUB = UNSUBSCRIBE\n\n\nclass EventClient(threading.Thread):\n    \"\"\"Follow Arvados events via WebSocket\n\n    EventClient follows events on Arvados cluster published by the WebSocket\n    server. Users can select the events they want to follow and run their own\n    callback function on each.\n    \"\"\"\n    _USER_AGENT = 'Python/{}.{}.{} arvados.events/{}'.format(\n        *sys.version_info[:3],\n        __version__,\n    )\n\n    def __init__(\n            self,\n            url: str,\n            filters: Optional[Filter],\n            on_event_cb: EventCallback,\n            last_log_id: Optional[int]=None,\n            *,\n            insecure: Optional[bool]=None,\n    ) -> None:\n        \"\"\"Initialize a WebSocket client\n\n        Constructor arguments:\n\n        * url: str --- The `wss` URL for an Arvados WebSocket server.\n\n        * filters: arvados.events.Filter | None --- One event filter to\n          subscribe to after connecting to the WebSocket server. If not\n          specified, the client will subscribe to all events.\n\n        * on_event_cb: arvados.events.EventCallback --- When the client\n          receives an event from the WebSocket server, it calls this\n          function with the event object.\n\n        * last_log_id: int | None --- If specified, this will be used as the\n          value for the `last_log_id` field in subscribe messages sent by\n          the client.\n\n        Constructor keyword arguments:\n\n        * insecure: bool | None --- If `True`, the client will not check the\n          validity of the server's TLS certificate. If not specified, uses\n          the value from the user's `ARVADOS_API_HOST_INSECURE` setting.\n        \"\"\"\n        self.url = url\n        self.filters = [filters or []]\n        self.on_event_cb = on_event_cb\n        self.last_log_id = last_log_id\n        self.is_closed = threading.Event()\n        self._ssl_ctx = ssl.create_default_context(\n            purpose=ssl.Purpose.SERVER_AUTH,\n            cafile=util.ca_certs_path(),\n        )\n        if insecure is None:\n            insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')\n        if insecure:\n            self._ssl_ctx.check_hostname = False\n            self._ssl_ctx.verify_mode = ssl.CERT_NONE\n        self._subscribe_lock = threading.Lock()\n        self._connect()\n        super().__init__(daemon=True)\n        self.start()\n\n    def _connect(self) -> None:\n        # There are no locks protecting this method. After the thread starts,\n        # it should only be called from inside.\n        self._client = ws_client.connect(\n            self.url,\n            logger=_logger,\n            ssl_context=self._ssl_ctx,\n            user_agent_header=self._USER_AGENT,\n        )\n        self._client_ok = True\n\n    def _subscribe(self, f: Filter, last_log_id: Optional[int]) -> None:\n        extra = {}\n        if last_log_id is not None:\n            extra['last_log_id'] = last_log_id\n        return self._update_sub(WSMethod.SUBSCRIBE, f, **extra)\n\n    def _update_sub(self, method: WSMethod, f: Filter, **extra: Any) -> None:\n        msg = json.dumps({\n            'method': method.value,\n            'filters': f,\n            **extra,\n        })\n        self._client.send(msg)\n\n    def close(self, code: int=1000, reason: str='', timeout: float=0) -> None:\n        \"\"\"Close the WebSocket connection and stop processing events\n\n        Arguments:\n\n        * code: int --- The WebSocket close code sent to the server when\n          disconnecting. Default 1000.\n\n        * reason: str --- The WebSocket close reason sent to the server when\n          disconnecting. Default is an empty string.\n\n        * timeout: float --- How long to wait for the WebSocket server to\n          acknowledge the disconnection, in seconds. Default 0, which means\n          no timeout.\n        \"\"\"\n        self.is_closed.set()\n        self._client.close_timeout = timeout or None\n        self._client.close(code, reason)\n\n    def run_forever(self) -> None:\n        \"\"\"Run the WebSocket client indefinitely\n\n        This method blocks until the `close` method is called (e.g., from\n        another thread) or the client permanently loses its connection.\n        \"\"\"\n        # Have to poll here to let KeyboardInterrupt get raised.\n        while not self.is_closed.wait(1):\n            pass\n\n    def subscribe(self, f: Filter, last_log_id: Optional[int]=None) -> None:\n        \"\"\"Subscribe to another set of events from the server\n\n        Arguments:\n\n        * f: arvados.events.Filter | None --- One filter to subscribe to\n          events for.\n\n        * last_log_id: int | None --- If specified, request events starting\n          from this id. If not specified, the server will only send events\n          that occur after processing the subscription.\n        \"\"\"\n        with self._subscribe_lock:\n            self._subscribe(f, last_log_id)\n            self.filters.append(f)\n\n    def unsubscribe(self, f: Filter) -> None:\n        \"\"\"Unsubscribe from an event stream\n\n        Arguments:\n\n        * f: arvados.events.Filter | None --- One event filter to stop\n        receiving events for.\n        \"\"\"\n        with self._subscribe_lock:\n            try:\n                index = self.filters.index(f)\n            except ValueError:\n                raise ValueError(f\"filter not subscribed: {f!r}\") from None\n            self._update_sub(WSMethod.UNSUBSCRIBE, f)\n            del self.filters[index]\n\n    def on_closed(self) -> None:\n        \"\"\"Handle disconnection from the WebSocket server\n\n        This method is called when the client loses its connection from\n        receiving events. This implementation tries to establish a new\n        connection if it was not closed client-side.\n        \"\"\"\n        if self.is_closed.is_set():\n            return\n        _logger.warning(\"Unexpected close. Reconnecting.\")\n        for _ in RetryLoop(num_retries=25, backoff_start=.1, max_wait=15):\n            try:\n                self._connect()\n            except Exception as e:\n                _logger.warning(\"Error '%s' during websocket reconnect.\", e)\n            else:\n                _logger.warning(\"Reconnect successful.\")\n                break\n        else:\n            _logger.error(\"EventClient thread could not contact websocket server.\")\n            self.is_closed.set()\n            _thread.interrupt_main()\n\n    def on_event(self, m: Dict[str, Any]) -> None:\n        \"\"\"Handle an event from the WebSocket server\n\n        This method is called whenever the client receives an event from the\n        server. This implementation records the `id` field internally, then\n        calls the callback function provided at initialization time.\n\n        Arguments:\n\n        * m: Dict[str, Any] --- The event object, deserialized from JSON.\n        \"\"\"\n        try:\n            self.last_log_id = m['id']\n        except KeyError:\n            pass\n        try:\n            self.on_event_cb(m)\n        except Exception:\n            _logger.exception(\"Unexpected exception from event callback.\")\n            _thread.interrupt_main()\n\n    def run(self) -> None:\n        \"\"\"Run the client loop\n\n        This method runs in a separate thread to receive and process events\n        from the server.\n        \"\"\"\n        self.name = f'ArvadosWebsockets-{self.ident}'\n        while self._client_ok and not self.is_closed.is_set():\n            try:\n                with self._subscribe_lock:\n                    for f in self.filters:\n                        self._subscribe(f, self.last_log_id)\n                for msg_s in self._client:\n                    if not self.is_closed.is_set():\n                        msg = json.loads(msg_s)\n                        self.on_event(msg)\n            except ws_exc.ConnectionClosed:\n                self._client_ok = False\n                self.on_closed()\n\n\nclass PollClient(threading.Thread):\n    \"\"\"Follow Arvados events via polling logs\n\n    PollClient follows events on Arvados cluster by periodically running\n    logs list API calls. Users can select the events they want to follow and\n    run their own callback function on each.\n    \"\"\"\n    def __init__(\n            self,\n            api: 'arvados.api_resources.ArvadosAPIClient',\n            filters: Optional[Filter],\n            on_event: EventCallback,\n            poll_time: float=15,\n            last_log_id: Optional[int]=None,\n    ) -> None:\n        \"\"\"Initialize a polling client\n\n        Constructor arguments:\n\n        * api: arvados.api_resources.ArvadosAPIClient --- The Arvados API\n          client used to query logs. It will be used in a separate thread,\n          so if it is not an instance of `arvados.api.ThreadSafeAPIClient`\n          it should not be reused after the thread is started.\n\n        * filters: arvados.events.Filter | None --- One event filter to\n          subscribe to after connecting to the WebSocket server. If not\n          specified, the client will subscribe to all events.\n\n        * on_event: arvados.events.EventCallback --- When the client\n          receives an event from the WebSocket server, it calls this\n          function with the event object.\n\n        * poll_time: float --- The number of seconds to wait between querying\n          logs. Default 15.\n\n        * last_log_id: int | None --- If specified, queries will include a\n          filter for logs with an `id` at least this value.\n        \"\"\"\n        super(PollClient, self).__init__()\n        self.api = api\n        if filters:\n            self.filters = [filters]\n        else:\n            self.filters = [[]]\n        self.on_event = on_event\n        self.poll_time = poll_time\n        self.daemon = True\n        self.last_log_id = last_log_id\n        self._closing = threading.Event()\n        self._closing_lock = threading.RLock()\n\n        if self.last_log_id != None:\n            # Caller supplied the last-seen event ID from a previous\n            # connection.\n            self._skip_old_events = [[\"id\", \">\", str(self.last_log_id)]]\n        else:\n            # We need to do a reverse-order query to find the most\n            # recent event ID (see \"if not self._skip_old_events\"\n            # in run()).\n            self._skip_old_events = False\n\n    def run(self):\n        \"\"\"Run the client loop\n\n        This method runs in a separate thread to poll and process events\n        from the server.\n        \"\"\"\n        self.on_event({'status': 200})\n\n        while not self._closing.is_set():\n            moreitems = False\n            for f in self.filters:\n                for tries_left in RetryLoop(num_retries=25, backoff_start=.1, max_wait=self.poll_time):\n                    try:\n                        if not self._skip_old_events:\n                            # If the caller didn't provide a known\n                            # recent ID, our first request will ask\n                            # for the single most recent event from\n                            # the last 2 hours (the time restriction\n                            # avoids doing an expensive database\n                            # query, and leaves a big enough margin to\n                            # account for clock skew). If we do find a\n                            # recent event, we remember its ID but\n                            # then discard it (we are supposed to be\n                            # returning new/current events, not old\n                            # ones).\n                            #\n                            # Subsequent requests will get multiple\n                            # events in chronological order, and\n                            # filter on that same cutoff time, or\n                            # (once we see our first matching event)\n                            # the ID of the last-seen event.\n                            #\n                            # Note: self._skip_old_events must not be\n                            # set until the threshold is decided.\n                            # Otherwise, tests will be unreliable.\n                            filter_by_time = [[\n                                \"created_at\", \">=\",\n                                time.strftime(\n                                    \"%Y-%m-%dT%H:%M:%SZ\",\n                                    time.gmtime(time.time()-7200))]]\n                            items = self.api.logs().list(\n                                order=\"id desc\",\n                                limit=1,\n                                filters=f+filter_by_time).execute()\n                            if items[\"items\"]:\n                                self._skip_old_events = [\n                                    [\"id\", \">\", str(items[\"items\"][0][\"id\"])]]\n                                items = {\n                                    \"items\": [],\n                                    \"items_available\": 0,\n                                }\n                            else:\n                                # No recent events. We can keep using\n                                # the same timestamp threshold until\n                                # we receive our first new event.\n                                self._skip_old_events = filter_by_time\n                        else:\n                            # In this case, either we know the most\n                            # recent matching ID, or we know there\n                            # were no matching events in the 2-hour\n                            # window before subscribing. Either way we\n                            # can safely ask for events in ascending\n                            # order.\n                            items = self.api.logs().list(\n                                order=\"id asc\",\n                                filters=f+self._skip_old_events).execute()\n                        break\n                    except errors.ApiError as error:\n                        pass\n                    else:\n                        tries_left = 0\n                        break\n                if tries_left == 0:\n                    _logger.exception(\"PollClient thread could not contact API server.\")\n                    with self._closing_lock:\n                        self._closing.set()\n                    _thread.interrupt_main()\n                    return\n                for i in items[\"items\"]:\n                    self._skip_old_events = [[\"id\", \">\", str(i[\"id\"])]]\n                    with self._closing_lock:\n                        if self._closing.is_set():\n                            return\n                        try:\n                            self.on_event(i)\n                        except Exception as e:\n                            _logger.exception(\"Unexpected exception from event callback.\")\n                            _thread.interrupt_main()\n                if items[\"items_available\"] > len(items[\"items\"]):\n                    moreitems = True\n            if not moreitems:\n                self._closing.wait(self.poll_time)\n\n    def run_forever(self):\n        \"\"\"Run the polling client indefinitely\n\n        This method blocks until the `close` method is called (e.g., from\n        another thread) or the client permanently loses its connection.\n        \"\"\"\n        # Have to poll here, otherwise KeyboardInterrupt will never get processed.\n        while not self._closing.is_set():\n            self._closing.wait(1)\n\n    def close(self, code: Optional[int]=None, reason: Optional[str]=None, timeout: float=0) -> None:\n        \"\"\"Stop polling and processing events\n\n        Arguments:\n\n        * code: Optional[int] --- Ignored; this argument exists for API\n          compatibility with `EventClient.close`.\n\n        * reason: Optional[str] --- Ignored; this argument exists for API\n          compatibility with `EventClient.close`.\n\n        * timeout: float --- How long to wait for the client thread to finish\n          processing events. Default 0, which means no timeout.\n        \"\"\"\n        with self._closing_lock:\n            self._closing.set()\n        try:\n            self.join(timeout=timeout)\n        except RuntimeError:\n            # \"join() raises a RuntimeError if an attempt is made to join the\n            # current thread as that would cause a deadlock. It is also an\n            # error to join() a thread before it has been started and attempts\n            # to do so raises the same exception.\"\n            pass\n\n    def subscribe(self, f: Filter, last_log_id: Optional[int]=None) -> None:\n        \"\"\"Subscribe to another set of events from the server\n\n        Arguments:\n\n        * f: arvados.events.Filter | None --- One filter to subscribe to.\n\n        * last_log_id: Optional[int] --- Ignored; this argument exists for\n          API compatibility with `EventClient.subscribe`.\n        \"\"\"\n        self.on_event({'status': 200})\n        self.filters.append(f)\n\n    def unsubscribe(self, f):\n        \"\"\"Unsubscribe from an event stream\n\n        Arguments:\n\n        * f: arvados.events.Filter | None --- One event filter to stop\n        receiving events for.\n        \"\"\"\n        del self.filters[self.filters.index(f)]\n\n\ndef _subscribe_websocket(api, filters, on_event, last_log_id=None):\n    endpoint = api._rootDesc.get('websocketUrl', None)\n    if not endpoint:\n        raise errors.FeatureNotEnabledError(\n            \"Server does not advertise a websocket endpoint\")\n    uri_with_token = \"{}?api_token={}\".format(endpoint, api.api_token)\n    try:\n        client = EventClient(uri_with_token, filters, on_event, last_log_id)\n    except Exception:\n        _logger.warning(\"Failed to connect to websockets on %s\" % endpoint)\n        raise\n    else:\n        return client\n\ndef subscribe(\n        api: 'arvados.api_resources.ArvadosAPIClient',\n        filters: Optional[Filter],\n        on_event: EventCallback,\n        poll_fallback: float=15,\n        last_log_id: Optional[int]=None,\n) -> Union[EventClient, PollClient]:\n    \"\"\"Start a thread to monitor events\n\n    This method tries to construct an `EventClient` to process Arvados\n    events via WebSockets. If that fails, or the\n    `ARVADOS_DISABLE_WEBSOCKETS` flag is set in user configuration, it falls\n    back to constructing a `PollClient` to process the events via API\n    polling.\n\n    Arguments:\n\n    * api: arvados.api_resources.ArvadosAPIClient --- The Arvados API\n      client used to query logs. It may be used in a separate thread,\n      so if it is not an instance of `arvados.api.ThreadSafeAPIClient`\n      it should not be reused after this method returns.\n\n    * filters: arvados.events.Filter | None --- One event filter to\n      subscribe to after initializing the client. If not specified, the\n      client will subscribe to all events.\n\n    * on_event: arvados.events.EventCallback --- When the client receives an\n      event, it calls this function with the event object.\n\n    * poll_time: float --- The number of seconds to wait between querying\n      logs. If 0, this function will refuse to construct a `PollClient`.\n      Default 15.\n\n    * last_log_id: int | None --- If specified, start processing events with\n      at least this `id` value.\n    \"\"\"\n    if not poll_fallback:\n        return _subscribe_websocket(api, filters, on_event, last_log_id)\n\n    try:\n        if not config.flag_is_true('ARVADOS_DISABLE_WEBSOCKETS'):\n            return _subscribe_websocket(api, filters, on_event, last_log_id)\n        else:\n            _logger.info(\"Using polling because ARVADOS_DISABLE_WEBSOCKETS is true\")\n    except Exception as e:\n        _logger.warning(\"Falling back to polling after websocket error: %s\" % e)\n    p = PollClient(api, filters, on_event, poll_fallback, last_log_id)\n    p.start()\n    return p\n"
  },
  {
    "path": "sdk/python/arvados/keep.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport copy\nimport collections\nimport datetime\nimport hashlib\nimport errno\nimport io\nimport logging\nimport math\nimport os\nimport pycurl\nimport queue\nimport re\nimport socket\nimport ssl\nimport sys\nimport threading\nimport resource\nimport urllib.parse\nimport traceback\nimport weakref\n\nfrom io import BytesIO\n\nimport arvados\nimport arvados.config as config\nimport arvados.errors\nimport arvados.retry as retry\nimport arvados.util\n\nfrom ._internal import basedirs, diskcache, Timer, parse_seq\nfrom ._internal.pycurl import PyCurlHelper\n\n_logger = logging.getLogger('arvados.keep')\nglobal_client_object = None\n\n# Monkey patch TCP constants when not available (apple). Values sourced from:\n# http://www.opensource.apple.com/source/xnu/xnu-2422.115.4/bsd/netinet/tcp.h\nif sys.platform == 'darwin':\n    if not hasattr(socket, 'TCP_KEEPALIVE'):\n        socket.TCP_KEEPALIVE = 0x010\n    if not hasattr(socket, 'TCP_KEEPINTVL'):\n        socket.TCP_KEEPINTVL = 0x101\n    if not hasattr(socket, 'TCP_KEEPCNT'):\n        socket.TCP_KEEPCNT = 0x102\n\nclass KeepLocator(object):\n    EPOCH_DATETIME = datetime.datetime.utcfromtimestamp(0)\n    HINT_RE = re.compile(r'^[A-Z][A-Za-z0-9@_-]+$')\n\n    def __init__(self, locator_str):\n        self.hints = []\n        self._perm_sig = None\n        self._perm_expiry = None\n        pieces = iter(locator_str.split('+'))\n        self.md5sum = next(pieces)\n        try:\n            self.size = int(next(pieces))\n        except StopIteration:\n            self.size = None\n        for hint in pieces:\n            if self.HINT_RE.match(hint) is None:\n                raise ValueError(\"invalid hint format: {}\".format(hint))\n            elif hint.startswith('A'):\n                self.parse_permission_hint(hint)\n            else:\n                self.hints.append(hint)\n\n    def __str__(self):\n        return '+'.join(\n            str(s)\n            for s in [self.md5sum, self.size,\n                      self.permission_hint()] + self.hints\n            if s is not None)\n\n    def stripped(self):\n        if self.size is not None:\n            return \"%s+%i\" % (self.md5sum, self.size)\n        else:\n            return self.md5sum\n\n    def _make_hex_prop(name, length):\n        # Build and return a new property with the given name that\n        # must be a hex string of the given length.\n        data_name = '_{}'.format(name)\n        def getter(self):\n            return getattr(self, data_name)\n        def setter(self, hex_str):\n            if not arvados.util.is_hex(hex_str, length):\n                raise ValueError(\"{} is not a {}-digit hex string: {!r}\".\n                                 format(name, length, hex_str))\n            setattr(self, data_name, hex_str)\n        return property(getter, setter)\n\n    md5sum = _make_hex_prop('md5sum', 32)\n    perm_sig = _make_hex_prop('perm_sig', 40)\n\n    @property\n    def perm_expiry(self):\n        return self._perm_expiry\n\n    @perm_expiry.setter\n    def perm_expiry(self, value):\n        if not arvados.util.is_hex(value, 1, 8):\n            raise ValueError(\n                \"permission timestamp must be a hex Unix timestamp: {}\".\n                format(value))\n        self._perm_expiry = datetime.datetime.utcfromtimestamp(int(value, 16))\n\n    def permission_hint(self):\n        data = [self.perm_sig, self.perm_expiry]\n        if None in data:\n            return None\n        data[1] = int((data[1] - self.EPOCH_DATETIME).total_seconds())\n        return \"A{}@{:08x}\".format(*data)\n\n    def parse_permission_hint(self, s):\n        try:\n            self.perm_sig, self.perm_expiry = s[1:].split('@', 1)\n        except IndexError:\n            raise ValueError(\"bad permission hint {}\".format(s))\n\n    def permission_expired(self, as_of_dt=None):\n        if self.perm_expiry is None:\n            return False\n        elif as_of_dt is None:\n            as_of_dt = datetime.datetime.now()\n        return self.perm_expiry <= as_of_dt\n\n\nclass KeepBlockCache(object):\n    def __init__(self, cache_max=0, max_slots=0, disk_cache=False, disk_cache_dir=None):\n        self.cache_max = cache_max\n        self._cache = collections.OrderedDict()\n        self._cache_lock = threading.Lock()\n        self._max_slots = max_slots\n        self._disk_cache = disk_cache\n        self._disk_cache_dir = disk_cache_dir\n        self._cache_updating = threading.Condition(self._cache_lock)\n\n        if self._disk_cache and self._disk_cache_dir is None:\n            self._disk_cache_dir = str(basedirs.BaseDirectories('CACHE').storage_path('keep'))\n\n        if self._max_slots == 0:\n            if self._disk_cache:\n                # Each block uses two file descriptors, one used to\n                # open it initially and hold the flock(), and a second\n                # hidden one used by mmap().\n                #\n                # Set max slots to 1/8 of maximum file handles.  This\n                # means we'll use at most 1/4 of total file handles.\n                #\n                # NOFILE typically defaults to 1024 on Linux so this\n                # is 128 slots (256 file handles), which means we can\n                # cache up to 8 GiB of 64 MiB blocks.  This leaves\n                # 768 file handles for sockets and other stuff.\n                #\n                # When we want the ability to have more cache (e.g. in\n                # arv-mount) we'll increase rlimit before calling\n                # this.\n                self._max_slots = int(resource.getrlimit(resource.RLIMIT_NOFILE)[0] / 8)\n            else:\n                # RAM cache slots\n                self._max_slots = 512\n\n        if self.cache_max == 0:\n            if self._disk_cache:\n                fs = os.statvfs(self._disk_cache_dir)\n                # Calculation of available space incorporates existing cache usage\n                existing_usage = diskcache.DiskCacheSlot.cache_usage(self._disk_cache_dir)\n                avail = (fs.f_bavail * fs.f_bsize + existing_usage) / 4\n                maxdisk = int((fs.f_blocks * fs.f_bsize) * 0.10)\n                # pick smallest of:\n                # 10% of total disk size\n                # 25% of available space\n                # max_slots * 64 MiB\n                self.cache_max = min(min(maxdisk, avail), (self._max_slots * 64 * 1024 * 1024))\n            else:\n                # 256 MiB in RAM\n                self.cache_max = (256 * 1024 * 1024)\n\n        self.cache_max = max(self.cache_max, 64 * 1024 * 1024)\n\n        self.cache_total = 0\n        if self._disk_cache:\n            self._cache = diskcache.DiskCacheSlot.init_cache(self._disk_cache_dir, self._max_slots)\n            for slot in self._cache.values():\n                self.cache_total += slot.size()\n            self.cap_cache()\n\n    class _CacheSlot:\n        __slots__ = (\"locator\", \"ready\", \"content\")\n\n        def __init__(self, locator):\n            self.locator = locator\n            self.ready = threading.Event()\n            self.content = None\n\n        def get(self):\n            self.ready.wait()\n            return self.content\n\n        def set(self, value):\n            if self.content is not None:\n                return False\n            self.content = value\n            self.ready.set()\n            return True\n\n        def size(self):\n            if self.content is None:\n                return 0\n            else:\n                return len(self.content)\n\n        def evict(self):\n            self.content = None\n\n\n    def _resize_cache(self, cache_max, max_slots):\n        # Try and make sure the contents of the cache do not exceed\n        # the supplied maximums.\n\n        if self.cache_total <= cache_max and len(self._cache) <= max_slots:\n            return\n\n        _evict_candidates = collections.deque(self._cache.values())\n        while _evict_candidates and (self.cache_total > cache_max or len(self._cache) > max_slots):\n            slot = _evict_candidates.popleft()\n            if not slot.ready.is_set():\n                continue\n\n            sz = slot.size()\n            slot.evict()\n            self.cache_total -= sz\n            del self._cache[slot.locator]\n\n\n    def cap_cache(self):\n        '''Cap the cache size to self.cache_max'''\n        with self._cache_updating:\n            self._resize_cache(self.cache_max, self._max_slots)\n            self._cache_updating.notify_all()\n\n    def _get(self, locator):\n        # Test if the locator is already in the cache\n        if locator in self._cache:\n            n = self._cache[locator]\n            if n.ready.is_set() and n.content is None:\n                del self._cache[n.locator]\n                return None\n            self._cache.move_to_end(locator)\n            return n\n        if self._disk_cache:\n            # see if it exists on disk\n            n = diskcache.DiskCacheSlot.get_from_disk(locator, self._disk_cache_dir)\n            if n is not None:\n                self._cache[n.locator] = n\n                self.cache_total += n.size()\n                return n\n        return None\n\n    def get(self, locator):\n        with self._cache_lock:\n            return self._get(locator)\n\n    def reserve_cache(self, locator):\n        '''Reserve a cache slot for the specified locator,\n        or return the existing slot.'''\n        with self._cache_updating:\n            n = self._get(locator)\n            if n:\n                return n, False\n            else:\n                # Add a new cache slot for the locator\n                self._resize_cache(self.cache_max, self._max_slots-1)\n                while len(self._cache) >= self._max_slots:\n                    # If there isn't a slot available, need to wait\n                    # for something to happen that releases one of the\n                    # cache slots.  Idle for 200 ms or woken up by\n                    # another thread\n                    self._cache_updating.wait(timeout=0.2)\n                    self._resize_cache(self.cache_max, self._max_slots-1)\n\n                if self._disk_cache:\n                    n = diskcache.DiskCacheSlot(locator, self._disk_cache_dir)\n                else:\n                    n = KeepBlockCache._CacheSlot(locator)\n                self._cache[n.locator] = n\n                return n, True\n\n    def set(self, slot, blob):\n        try:\n            if slot.set(blob):\n                self.cache_total += slot.size()\n            return\n        except OSError as e:\n            if e.errno == errno.ENOMEM:\n                # Reduce max slots to current - 4, cap cache and retry\n                with self._cache_lock:\n                    self._max_slots = max(4, len(self._cache) - 4)\n            elif e.errno == errno.ENOSPC:\n                # Reduce disk max space to current - 256 MiB, cap cache and retry\n                with self._cache_lock:\n                    sm = sum(st.size() for st in self._cache.values())\n                    self.cache_max = max((256 * 1024 * 1024), sm - (256 * 1024 * 1024))\n            elif e.errno == errno.ENODEV:\n                _logger.error(\"Unable to use disk cache: The underlying filesystem does not support memory mapping.\")\n        except Exception as e:\n            pass\n        finally:\n            # Check if we should evict things from the cache.  Either\n            # because we added a new thing or there was an error and\n            # we possibly adjusted the limits down, so we might need\n            # to push something out.\n            self.cap_cache()\n\n        try:\n            # Only gets here if there was an error the first time. The\n            # exception handler adjusts limits downward in some cases\n            # to free up resources, which would make the operation\n            # succeed.\n            if slot.set(blob):\n                self.cache_total += slot.size()\n        except Exception as e:\n            # It failed again.  Give up.\n            slot.set(None)\n            raise arvados.errors.KeepCacheError(\"Unable to save block %s to disk cache: %s\" % (slot.locator, e))\n\n        self.cap_cache()\n\n    def clear(self):\n        with self._cache_lock:\n            self._cache.clear()\n            self.cache_total = 0\n\nclass _Counter:\n    def __init__(self, v=0):\n        self._lk = threading.Lock()\n        self._val = v\n\n    def add(self, v):\n        with self._lk:\n            self._val += v\n\n    def get(self):\n        with self._lk:\n            return self._val\n\n\nclass KeepClient(object):\n    DEFAULT_TIMEOUT = PyCurlHelper.DEFAULT_TIMEOUT\n    DEFAULT_PROXY_TIMEOUT = PyCurlHelper.DEFAULT_PROXY_TIMEOUT\n\n    class _KeepService(PyCurlHelper):\n        \"\"\"Make requests to a single Keep service, and track results.\n\n        A _KeepService is intended to last long enough to perform one\n        transaction (GET or PUT) against one Keep service. This can\n        involve calling either get() or put() multiple times in order\n        to retry after transient failures. However, calling both get()\n        and put() on a single instance -- or using the same instance\n        to access two different Keep services -- will not produce\n        sensible behavior.\n        \"\"\"\n\n        HTTP_ERRORS = (\n            socket.error,\n            ssl.SSLError,\n            arvados.errors.HttpError,\n        )\n\n        def __init__(self, root, user_agent_pool=queue.LifoQueue(),\n                     upload_counter=None,\n                     download_counter=None,\n                     headers={},\n                     insecure=False):\n            super().__init__()\n            self.root = root\n            self._user_agent_pool = user_agent_pool\n            self._result = {'error': None}\n            self._usable = True\n            self._session = None\n            self._socket = None\n            self.get_headers = {'Accept': 'application/octet-stream'}\n            self.get_headers.update(headers)\n            self.put_headers = headers\n            self.upload_counter = upload_counter\n            self.download_counter = download_counter\n            self.insecure = insecure\n\n        def usable(self):\n            \"\"\"Is it worth attempting a request?\"\"\"\n            return self._usable\n\n        def finished(self):\n            \"\"\"Did the request succeed or encounter permanent failure?\"\"\"\n            return self._result['error'] == False or not self._usable\n\n        def last_result(self):\n            return self._result\n\n        def _get_user_agent(self):\n            try:\n                return self._user_agent_pool.get(block=False)\n            except queue.Empty:\n                return pycurl.Curl()\n\n        def _put_user_agent(self, ua):\n            try:\n                ua.reset()\n                self._user_agent_pool.put(ua, block=False)\n            except:\n                ua.close()\n\n        def get(self, locator, method=\"GET\", timeout=None):\n            # locator is a KeepLocator object.\n            url = self.root + str(locator)\n            _logger.debug(\"Request: %s %s\", method, url)\n            curl = self._get_user_agent()\n            ok = None\n            try:\n                with Timer() as t:\n                    self._headers = {}\n                    response_body = BytesIO()\n                    curl.setopt(pycurl.NOSIGNAL, 1)\n                    curl.setopt(pycurl.OPENSOCKETFUNCTION,\n                                lambda *args, **kwargs: self._socket_open(*args, **kwargs))\n                    curl.setopt(pycurl.URL, url.encode('utf-8'))\n                    curl.setopt(pycurl.HTTPHEADER, [\n                        '{}: {}'.format(k,v) for k,v in self.get_headers.items()])\n                    curl.setopt(pycurl.WRITEFUNCTION, response_body.write)\n                    curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)\n                    if self.insecure:\n                        curl.setopt(pycurl.SSL_VERIFYPEER, 0)\n                        curl.setopt(pycurl.SSL_VERIFYHOST, 0)\n                    else:\n                        curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())\n                    if method == \"HEAD\":\n                        curl.setopt(pycurl.NOBODY, True)\n                    else:\n                        curl.setopt(pycurl.HTTPGET, True)\n                    self._setcurltimeouts(curl, timeout, method==\"HEAD\")\n\n                    try:\n                        curl.perform()\n                    except Exception as e:\n                        raise arvados.errors.HttpError(0, str(e))\n                    finally:\n                        if self._socket:\n                            self._socket.close()\n                            self._socket = None\n                    self._result = {\n                        'status_code': curl.getinfo(pycurl.RESPONSE_CODE),\n                        'body': response_body.getvalue(),\n                        'headers': self._headers,\n                        'error': False,\n                    }\n\n                ok = retry.check_http_response_success(self._result['status_code'])\n                if not ok:\n                    self._result['error'] = arvados.errors.HttpError(\n                        self._result['status_code'],\n                        self._headers.get('x-status-line', 'Error'))\n            except self.HTTP_ERRORS as e:\n                self._result = {\n                    'error': e,\n                }\n            self._usable = ok != False\n            if self._result.get('status_code', None):\n                # The client worked well enough to get an HTTP status\n                # code, so presumably any problems are just on the\n                # server side and it's OK to reuse the client.\n                self._put_user_agent(curl)\n            else:\n                # Don't return this client to the pool, in case it's\n                # broken.\n                curl.close()\n            if not ok:\n                _logger.debug(\"Request fail: GET %s => %s: %s\",\n                              url, type(self._result['error']), str(self._result['error']))\n                return None\n            if method == \"HEAD\":\n                _logger.info(\"HEAD %s: %s bytes\",\n                         self._result['status_code'],\n                         self._result.get('content-length'))\n                if self._result['headers'].get('x-keep-locator'):\n                    # This is a response to a remote block copy request, return\n                    # the local copy block locator.\n                    return self._result['headers'].get('x-keep-locator')\n                return True\n\n            _logger.info(\"GET %s: %s bytes in %s msec (%.3f MiB/sec)\",\n                         self._result['status_code'],\n                         len(self._result['body']),\n                         t.msecs,\n                         1.0*len(self._result['body'])/2**20/t.secs if t.secs > 0 else 0)\n\n            if self.download_counter:\n                self.download_counter.add(len(self._result['body']))\n            resp_md5 = hashlib.md5(self._result['body']).hexdigest()\n            if resp_md5 != locator.md5sum:\n                _logger.warning(\"Checksum fail: md5(%s) = %s\",\n                                url, resp_md5)\n                self._result['error'] = arvados.errors.HttpError(\n                    0, 'Checksum fail')\n                return None\n            return self._result['body']\n\n        def put(self, hash_s, body, timeout=None, headers={}):\n            put_headers = copy.copy(self.put_headers)\n            put_headers.update(headers)\n            url = self.root + hash_s\n            _logger.debug(\"Request: PUT %s\", url)\n            curl = self._get_user_agent()\n            ok = None\n            try:\n                with Timer() as t:\n                    self._headers = {}\n                    body_reader = BytesIO(body)\n                    response_body = BytesIO()\n                    curl.setopt(pycurl.NOSIGNAL, 1)\n                    curl.setopt(pycurl.OPENSOCKETFUNCTION,\n                                lambda *args, **kwargs: self._socket_open(*args, **kwargs))\n                    curl.setopt(pycurl.URL, url.encode('utf-8'))\n                    # Using UPLOAD tells cURL to wait for a \"go ahead\" from the\n                    # Keep server (in the form of a HTTP/1.1 \"100 Continue\"\n                    # response) instead of sending the request body immediately.\n                    # This allows the server to reject the request if the request\n                    # is invalid or the server is read-only, without waiting for\n                    # the client to send the entire block.\n                    curl.setopt(pycurl.UPLOAD, True)\n                    curl.setopt(pycurl.INFILESIZE, len(body))\n                    curl.setopt(pycurl.READFUNCTION, body_reader.read)\n                    curl.setopt(pycurl.HTTPHEADER, [\n                        '{}: {}'.format(k,v) for k,v in put_headers.items()])\n                    curl.setopt(pycurl.WRITEFUNCTION, response_body.write)\n                    curl.setopt(pycurl.HEADERFUNCTION, self._headerfunction)\n                    if self.insecure:\n                        curl.setopt(pycurl.SSL_VERIFYPEER, 0)\n                        curl.setopt(pycurl.SSL_VERIFYHOST, 0)\n                    else:\n                        curl.setopt(pycurl.CAINFO, arvados.util.ca_certs_path())\n                    self._setcurltimeouts(curl, timeout)\n                    try:\n                        curl.perform()\n                    except Exception as e:\n                        raise arvados.errors.HttpError(0, str(e))\n                    finally:\n                        if self._socket:\n                            self._socket.close()\n                            self._socket = None\n                    self._result = {\n                        'status_code': curl.getinfo(pycurl.RESPONSE_CODE),\n                        'body': response_body.getvalue().decode('utf-8'),\n                        'headers': self._headers,\n                        'error': False,\n                    }\n                ok = retry.check_http_response_success(self._result['status_code'])\n                if not ok:\n                    self._result['error'] = arvados.errors.HttpError(\n                        self._result['status_code'],\n                        self._headers.get('x-status-line', 'Error'))\n            except self.HTTP_ERRORS as e:\n                self._result = {\n                    'error': e,\n                }\n            self._usable = ok != False # still usable if ok is True or None\n            if self._result.get('status_code', None):\n                # Client is functional. See comment in get().\n                self._put_user_agent(curl)\n            else:\n                curl.close()\n            if not ok:\n                _logger.debug(\"Request fail: PUT %s => %s: %s\",\n                              url, type(self._result['error']), str(self._result['error']))\n                return False\n            _logger.info(\"PUT %s: %s bytes in %s msec (%.3f MiB/sec)\",\n                         self._result['status_code'],\n                         len(body),\n                         t.msecs,\n                         1.0*len(body)/2**20/t.secs if t.secs > 0 else 0)\n            if self.upload_counter:\n                self.upload_counter.add(len(body))\n            return True\n\n\n    class _KeepWriterQueue(queue.Queue):\n        def __init__(self, copies, classes=[]):\n            queue.Queue.__init__(self) # Old-style superclass\n            self.wanted_copies = copies\n            self.wanted_storage_classes = classes\n            self.successful_copies = 0\n            self.confirmed_storage_classes = {}\n            self.response = None\n            self.storage_classes_tracking = True\n            self.queue_data_lock = threading.RLock()\n            self.pending_tries = max(copies, len(classes))\n            self.pending_tries_notification = threading.Condition()\n\n        def write_success(self, response, replicas_nr, classes_confirmed):\n            with self.queue_data_lock:\n                self.successful_copies += replicas_nr\n                if classes_confirmed is None:\n                    self.storage_classes_tracking = False\n                elif self.storage_classes_tracking:\n                    for st_class, st_copies in classes_confirmed.items():\n                        try:\n                            self.confirmed_storage_classes[st_class] += st_copies\n                        except KeyError:\n                            self.confirmed_storage_classes[st_class] = st_copies\n                    self.pending_tries = max(self.wanted_copies - self.successful_copies, len(self.pending_classes()))\n                self.response = response\n            with self.pending_tries_notification:\n                self.pending_tries_notification.notify_all()\n\n        def write_fail(self, ks):\n            with self.pending_tries_notification:\n                self.pending_tries += 1\n                self.pending_tries_notification.notify()\n\n        def pending_copies(self):\n            with self.queue_data_lock:\n                return self.wanted_copies - self.successful_copies\n\n        def satisfied_classes(self):\n            with self.queue_data_lock:\n                if not self.storage_classes_tracking:\n                    # Notifies disabled storage classes expectation to\n                    # the outer loop.\n                    return None\n            return list(set(self.wanted_storage_classes) - set(self.pending_classes()))\n\n        def pending_classes(self):\n            with self.queue_data_lock:\n                if (not self.storage_classes_tracking) or (self.wanted_storage_classes is None):\n                    return []\n                unsatisfied_classes = copy.copy(self.wanted_storage_classes)\n                for st_class, st_copies in self.confirmed_storage_classes.items():\n                    if st_class in unsatisfied_classes and st_copies >= self.wanted_copies:\n                        unsatisfied_classes.remove(st_class)\n                return unsatisfied_classes\n\n        def get_next_task(self):\n            with self.pending_tries_notification:\n                while True:\n                    if self.pending_copies() < 1 and len(self.pending_classes()) == 0:\n                        # This notify_all() is unnecessary --\n                        # write_success() already called notify_all()\n                        # when pending<1 became true, so it's not\n                        # possible for any other thread to be in\n                        # wait() now -- but it's cheap insurance\n                        # against deadlock so we do it anyway:\n                        self.pending_tries_notification.notify_all()\n                        # Drain the queue and then raise Queue.Empty\n                        while True:\n                            self.get_nowait()\n                            self.task_done()\n                    elif self.pending_tries > 0:\n                        service, service_root = self.get_nowait()\n                        if service.finished():\n                            self.task_done()\n                            continue\n                        self.pending_tries -= 1\n                        return service, service_root\n                    elif self.empty():\n                        self.pending_tries_notification.notify_all()\n                        raise queue.Empty\n                    else:\n                        self.pending_tries_notification.wait()\n\n\n    class _KeepWriterThreadPool:\n        def __init__(self, data, data_hash, copies, max_service_replicas, timeout=None, classes=[]):\n            self.total_task_nr = 0\n            if (not max_service_replicas) or (max_service_replicas >= copies):\n                num_threads = 1\n            else:\n                num_threads = int(math.ceil(1.0*copies/max_service_replicas))\n            _logger.debug(\"Pool max threads is %d\", num_threads)\n            self.workers = []\n            self.queue = KeepClient._KeepWriterQueue(copies, classes)\n            # Create workers\n            for _ in range(num_threads):\n                w = KeepClient._KeepWriterThread(self.queue, data, data_hash, timeout)\n                self.workers.append(w)\n\n        def add_task(self, ks, service_root):\n            self.queue.put((ks, service_root))\n            self.total_task_nr += 1\n\n        def done(self):\n            return self.queue.successful_copies, self.queue.satisfied_classes()\n\n        def join(self):\n            # Start workers\n            for worker in self.workers:\n                worker.start()\n            # Wait for finished work\n            self.queue.join()\n\n        def response(self):\n            return self.queue.response\n\n\n    class _KeepWriterThread(threading.Thread):\n        class TaskFailed(RuntimeError):\n            \"\"\"Exception for failed Keep writes\n\n            TODO: Move this class to the module top level and document it\n\n            @private\n            \"\"\"\n\n\n        def __init__(self, queue, data, data_hash, timeout=None):\n            super().__init__()\n            self.timeout = timeout\n            self.queue = queue\n            self.data = data\n            self.data_hash = data_hash\n            self.daemon = True\n\n        def run(self):\n            while True:\n                try:\n                    service, service_root = self.queue.get_next_task()\n                except queue.Empty:\n                    return\n                try:\n                    locator, copies, classes = self.do_task(service, service_root)\n                except Exception as e:\n                    if not isinstance(e, self.TaskFailed):\n                        _logger.exception(\"Exception in _KeepWriterThread\")\n                    self.queue.write_fail(service)\n                else:\n                    self.queue.write_success(locator, copies, classes)\n                finally:\n                    self.queue.task_done()\n\n        def do_task(self, service, service_root):\n            classes = self.queue.pending_classes()\n            headers = {}\n            if len(classes) > 0:\n                classes.sort()\n                headers['X-Keep-Storage-Classes'] = ', '.join(classes)\n            success = bool(service.put(self.data_hash,\n                                        self.data,\n                                        timeout=self.timeout,\n                                        headers=headers))\n            result = service.last_result()\n\n            if not success:\n                if result.get('status_code'):\n                    _logger.debug(\"Request fail: PUT %s => %s %s\",\n                                  self.data_hash,\n                                  result.get('status_code'),\n                                  result.get('body'))\n                raise self.TaskFailed()\n\n            _logger.debug(\"_KeepWriterThread %s succeeded %s+%i %s\",\n                          str(threading.current_thread()),\n                          self.data_hash,\n                          len(self.data),\n                          service_root)\n            try:\n                replicas_stored = int(result['headers']['x-keep-replicas-stored'])\n            except (KeyError, ValueError):\n                replicas_stored = 1\n\n            classes_confirmed = collections.defaultdict(int)\n            try:\n                scch = result['headers']['x-keep-storage-classes-confirmed']\n                for confirmation in parse_seq(scch):\n                    stored_class, _, stored_copies = confirmation.partition('=')\n                    if stored_copies:\n                        classes_confirmed[stored_class] += int(stored_copies)\n            except (KeyError, ValueError):\n                # Storage classes confirmed header missing or corrupt\n                classes_confirmed = None\n\n            return result['body'].strip(), replicas_stored, classes_confirmed\n\n\n    def __init__(self, api_client=None, proxy=None,\n                 timeout=DEFAULT_TIMEOUT, proxy_timeout=DEFAULT_PROXY_TIMEOUT,\n                 api_token=None, local_store=None, block_cache=None,\n                 num_retries=10, session=None, num_prefetch_threads=None):\n        \"\"\"Initialize a new KeepClient.\n\n        Arguments:\n        :api_client:\n          The API client to use to find Keep services.  If not\n          provided, KeepClient will build one from available Arvados\n          configuration.\n\n        :proxy:\n          If specified, this KeepClient will send requests to this Keep\n          proxy.  Otherwise, KeepClient will fall back to the setting of the\n          ARVADOS_KEEP_SERVICES or ARVADOS_KEEP_PROXY configuration settings.\n          If you want to KeepClient does not use a proxy, pass in an empty\n          string.\n\n        :timeout:\n          The initial timeout (in seconds) for HTTP requests to Keep\n          non-proxy servers.  A tuple of three floats is interpreted as\n          (connection_timeout, read_timeout, minimum_bandwidth). A connection\n          will be aborted if the average traffic rate falls below\n          minimum_bandwidth bytes per second over an interval of read_timeout\n          seconds. Because timeouts are often a result of transient server\n          load, the actual connection timeout will be increased by a factor\n          of two on each retry.\n          Default: (2, 256, 32768).\n\n        :proxy_timeout:\n          The initial timeout (in seconds) for HTTP requests to\n          Keep proxies. A tuple of three floats is interpreted as\n          (connection_timeout, read_timeout, minimum_bandwidth). The behavior\n          described above for adjusting connection timeouts on retry also\n          applies.\n          Default: (20, 256, 32768).\n\n        :api_token:\n          If you're not using an API client, but only talking\n          directly to a Keep proxy, this parameter specifies an API token\n          to authenticate Keep requests.  It is an error to specify both\n          api_client and api_token.  If you specify neither, KeepClient\n          will use one available from the Arvados configuration.\n\n        :local_store:\n          If specified, this KeepClient will bypass Keep\n          services, and save data to the named directory.  If unspecified,\n          KeepClient will fall back to the setting of the $KEEP_LOCAL_STORE\n          environment variable.  If you want to ensure KeepClient does not\n          use local storage, pass in an empty string.  This is primarily\n          intended to mock a server for testing.\n\n        :num_retries:\n          The default number of times to retry failed requests.\n          This will be used as the default num_retries value when get() and\n          put() are called.  Default 10.\n        \"\"\"\n        self.lock = threading.Lock()\n        if proxy is None:\n            if config.get('ARVADOS_KEEP_SERVICES'):\n                proxy = config.get('ARVADOS_KEEP_SERVICES')\n            else:\n                proxy = config.get('ARVADOS_KEEP_PROXY')\n        if api_token is None:\n            if api_client is None:\n                api_token = config.get('ARVADOS_API_TOKEN')\n            else:\n                api_token = api_client.api_token\n        elif api_client is not None:\n            raise ValueError(\n                \"can't build KeepClient with both API client and token\")\n        if local_store is None:\n            local_store = os.environ.get('KEEP_LOCAL_STORE')\n\n        if api_client is None:\n            self.insecure = config.flag_is_true('ARVADOS_API_HOST_INSECURE')\n        else:\n            self.insecure = api_client.insecure\n\n        self.block_cache = block_cache if block_cache else KeepBlockCache()\n        self.timeout = timeout\n        self.proxy_timeout = proxy_timeout\n        self._user_agent_pool = queue.LifoQueue()\n        self.upload_counter = _Counter()\n        self.download_counter = _Counter()\n        self.put_counter = _Counter()\n        self.get_counter = _Counter()\n        self.hits_counter = _Counter()\n        self.misses_counter = _Counter()\n        self._storage_classes_unsupported_warning = False\n        self._default_classes = []\n        if num_prefetch_threads is not None:\n            self.num_prefetch_threads = num_prefetch_threads\n        else:\n            self.num_prefetch_threads = 2\n        self._prefetch_queue = None\n        self._prefetch_threads = None\n\n        if local_store:\n            self.local_store = local_store\n            self.head = self.local_store_head\n            self.get = self.local_store_get\n            self.put = self.local_store_put\n        else:\n            self.num_retries = num_retries\n            self.max_replicas_per_service = None\n            if proxy:\n                proxy_uris = proxy.split()\n                for i in range(len(proxy_uris)):\n                    if not proxy_uris[i].endswith('/'):\n                        proxy_uris[i] += '/'\n                    # URL validation\n                    url = urllib.parse.urlparse(proxy_uris[i])\n                    if not (url.scheme and url.netloc):\n                        raise arvados.errors.ArgumentError(\"Invalid proxy URI: {}\".format(proxy_uris[i]))\n                self.api_token = api_token\n                self._gateway_services = {}\n                self._keep_services = [{\n                    'uuid': \"00000-bi6l4-%015d\" % idx,\n                    'service_type': 'proxy',\n                    '_service_root': uri,\n                    } for idx, uri in enumerate(proxy_uris)]\n                self._writable_services = self._keep_services\n                self.using_proxy = True\n                self._static_services_list = True\n            else:\n                # It's important to avoid instantiating an API client\n                # unless we actually need one, for testing's sake.\n                if api_client is None:\n                    api_client = arvados.api('v1')\n                self.api_client = api_client\n                self.api_token = api_client.api_token\n                self._gateway_services = {}\n                self._keep_services = None\n                self._writable_services = None\n                self.using_proxy = None\n                self._static_services_list = False\n                try:\n                    self._default_classes = [\n                        k for k, v in self.api_client.config()['StorageClasses'].items() if v['Default']]\n                except KeyError:\n                    # We're talking to an old cluster\n                    pass\n\n    def current_timeout(self, attempt_number):\n        \"\"\"Return the appropriate timeout to use for this client.\n\n        The proxy timeout setting if the backend service is currently a proxy,\n        the regular timeout setting otherwise.  The `attempt_number` indicates\n        how many times the operation has been tried already (starting from 0\n        for the first try), and scales the connection timeout portion of the\n        return value accordingly.\n\n        \"\"\"\n        # TODO(twp): the timeout should be a property of a\n        # _KeepService, not a KeepClient. See #4488.\n        t = self.proxy_timeout if self.using_proxy else self.timeout\n        if len(t) == 2:\n            return (t[0] * (1 << attempt_number), t[1])\n        else:\n            return (t[0] * (1 << attempt_number), t[1], t[2])\n    def _any_nondisk_services(self, service_list):\n        return any(ks.get('service_type', 'disk') != 'disk'\n                   for ks in service_list)\n\n    def build_services_list(self, force_rebuild=False):\n        if (self._static_services_list or\n              (self._keep_services and not force_rebuild)):\n            return\n        with self.lock:\n            try:\n                keep_services = self.api_client.keep_services().accessible()\n            except Exception:  # API server predates Keep services.\n                keep_services = self.api_client.keep_disks().list()\n\n            # Gateway services are only used when specified by UUID,\n            # so there's nothing to gain by filtering them by\n            # service_type.\n            self._gateway_services = {ks['uuid']: ks for ks in\n                                      keep_services.execute()['items']}\n            if not self._gateway_services:\n                raise arvados.errors.NoKeepServersError()\n\n            # Precompute the base URI for each service.\n            for r in self._gateway_services.values():\n                host = r['service_host']\n                if not host.startswith('[') and host.find(':') >= 0:\n                    # IPv6 URIs must be formatted like http://[::1]:80/...\n                    host = '[' + host + ']'\n                r['_service_root'] = \"{}://{}:{:d}/\".format(\n                    'https' if r['service_ssl_flag'] else 'http',\n                    host,\n                    r['service_port'])\n\n            _logger.debug(str(self._gateway_services))\n            self._keep_services = [\n                ks for ks in self._gateway_services.values()\n                if not ks.get('service_type', '').startswith('gateway:')]\n            self._writable_services = [ks for ks in self._keep_services\n                                       if not ks.get('read_only')]\n\n            # For disk type services, max_replicas_per_service is 1\n            # It is unknown (unlimited) for other service types.\n            if self._any_nondisk_services(self._writable_services):\n                self.max_replicas_per_service = None\n            else:\n                self.max_replicas_per_service = 1\n\n    def _service_weight(self, data_hash, service_uuid):\n        \"\"\"Compute the weight of a Keep service endpoint for a data\n        block with a known hash.\n\n        The weight is md5(h + u) where u is the last 15 characters of\n        the service endpoint's UUID.\n        \"\"\"\n        return hashlib.md5((data_hash + service_uuid[-15:]).encode()).hexdigest()\n\n    def weighted_service_roots(self, locator, force_rebuild=False, need_writable=False):\n        \"\"\"Return an array of Keep service endpoints, in the order in\n        which they should be probed when reading or writing data with\n        the given hash+hints.\n        \"\"\"\n        self.build_services_list(force_rebuild)\n\n        sorted_roots = []\n        # Use the services indicated by the given +K@... remote\n        # service hints, if any are present and can be resolved to a\n        # URI.\n        for hint in locator.hints:\n            if hint.startswith('K@'):\n                if len(hint) == 7:\n                    sorted_roots.append(\n                        \"https://keep.{}.arvadosapi.com/\".format(hint[2:]))\n                elif len(hint) == 29:\n                    svc = self._gateway_services.get(hint[2:])\n                    if svc:\n                        sorted_roots.append(svc['_service_root'])\n\n        # Sort the available local services by weight (heaviest first)\n        # for this locator, and return their service_roots (base URIs)\n        # in that order.\n        use_services = self._keep_services\n        if need_writable:\n            use_services = self._writable_services\n        self.using_proxy = self._any_nondisk_services(use_services)\n        sorted_roots.extend([\n            svc['_service_root'] for svc in sorted(\n                use_services,\n                reverse=True,\n                key=lambda svc: self._service_weight(locator.md5sum, svc['uuid']))])\n        _logger.debug(\"{}: {}\".format(locator, sorted_roots))\n        return sorted_roots\n\n    def map_new_services(self, roots_map, locator, force_rebuild, need_writable, headers):\n        # roots_map is a dictionary, mapping Keep service root strings\n        # to _KeepService objects.  Poll for Keep services, and add any\n        # new ones to roots_map.  Return the current list of local\n        # root strings.\n        headers.setdefault('Authorization', \"Bearer %s\" % (self.api_token,))\n        local_roots = self.weighted_service_roots(locator, force_rebuild, need_writable)\n        for root in local_roots:\n            if root not in roots_map:\n                roots_map[root] = self._KeepService(\n                    root, self._user_agent_pool,\n                    upload_counter=self.upload_counter,\n                    download_counter=self.download_counter,\n                    headers=headers,\n                    insecure=self.insecure)\n        return local_roots\n\n    @staticmethod\n    def _check_loop_result(result):\n        # KeepClient RetryLoops should save results as a 2-tuple: the\n        # actual result of the request, and the number of servers available\n        # to receive the request this round.\n        # This method returns True if there's a real result, False if\n        # there are no more servers available, otherwise None.\n        if isinstance(result, Exception):\n            return None\n        result, tried_server_count = result\n        if (result is not None) and (result is not False):\n            return True\n        elif tried_server_count < 1:\n            _logger.info(\"No more Keep services to try; giving up\")\n            return False\n        else:\n            return None\n\n    def get_from_cache(self, loc_s):\n        \"\"\"Fetch a block only if is in the cache, otherwise return None.\"\"\"\n        locator = KeepLocator(loc_s)\n        slot = self.block_cache.get(locator.md5sum)\n        if slot is not None and slot.ready.is_set():\n            return slot.get()\n        else:\n            return None\n\n    def refresh_signature(self, loc):\n        \"\"\"Ask Keep to get the remote block and return its local signature\"\"\"\n        now = datetime.datetime.utcnow().isoformat(\"T\") + 'Z'\n        return self.head(loc, headers={'X-Keep-Signature': 'local, {}'.format(now)})\n\n    @retry.retry_method\n    def head(self, loc_s, **kwargs):\n        return self._get_or_head(loc_s, method=\"HEAD\", **kwargs)\n\n    @retry.retry_method\n    def get(self, loc_s, **kwargs):\n        return self._get_or_head(loc_s, method=\"GET\", **kwargs)\n\n    def _get_or_head(self, loc_s, method=\"GET\", num_retries=None, request_id=None, headers=None, prefetch=False):\n        \"\"\"Get data from Keep.\n\n        This method fetches one or more blocks of data from Keep.  It\n        sends a request each Keep service registered with the API\n        server (or the proxy provided when this client was\n        instantiated), then each service named in location hints, in\n        sequence.  As soon as one service provides the data, it's\n        returned.\n\n        Arguments:\n        * loc_s: A string of one or more comma-separated locators to fetch.\n          This method returns the concatenation of these blocks.\n        * num_retries: The number of times to retry GET requests to\n          *each* Keep server if it returns temporary failures, with\n          exponential backoff.  Note that, in each loop, the method may try\n          to fetch data from every available Keep service, along with any\n          that are named in location hints in the locator.  The default value\n          is set when the KeepClient is initialized.\n        \"\"\"\n        if ',' in loc_s:\n            return ''.join(self.get(x) for x in loc_s.split(','))\n\n        self.get_counter.add(1)\n\n        request_id = (request_id or\n                      (hasattr(self, 'api_client') and self.api_client.request_id) or\n                      arvados.util.new_request_id())\n        if headers is None:\n            headers = {}\n        headers['X-Request-Id'] = request_id\n\n        slot = None\n        blob = None\n        try:\n            locator = KeepLocator(loc_s)\n            if method == \"GET\":\n                while slot is None:\n                    slot, first = self.block_cache.reserve_cache(locator.md5sum)\n                    if first:\n                        # Fresh and empty \"first time it is used\" slot\n                        break\n                    if prefetch:\n                        # this is request for a prefetch to fill in\n                        # the cache, don't need to wait for the\n                        # result, so if it is already in flight return\n                        # immediately.  Clear 'slot' to prevent\n                        # finally block from calling slot.set()\n                        if slot.ready.is_set():\n                            slot.get()\n                        slot = None\n                        return None\n\n                    blob = slot.get()\n                    if blob is not None:\n                        self.hits_counter.add(1)\n                        return blob\n\n                    # If blob is None, this means either\n                    #\n                    # (a) another thread was fetching this block and\n                    # failed with an error or\n                    #\n                    # (b) cache thrashing caused the slot to be\n                    # evicted (content set to None) by another thread\n                    # between the call to reserve_cache() and get().\n                    #\n                    # We'll handle these cases by reserving a new slot\n                    # and then doing a full GET request.\n                    slot = None\n\n            self.misses_counter.add(1)\n\n            # If the locator has hints specifying a prefix (indicating a\n            # remote keepproxy) or the UUID of a local gateway service,\n            # read data from the indicated service(s) instead of the usual\n            # list of local disk services.\n            hint_roots = ['http://keep.{}.arvadosapi.com/'.format(hint[2:])\n                          for hint in locator.hints if hint.startswith('K@') and len(hint) == 7]\n            hint_roots.extend([self._gateway_services[hint[2:]]['_service_root']\n                               for hint in locator.hints if (\n                                       hint.startswith('K@') and\n                                       len(hint) == 29 and\n                                       self._gateway_services.get(hint[2:])\n                                       )])\n            # Map root URLs to their _KeepService objects.\n            roots_map = {\n                root: self._KeepService(root, self._user_agent_pool,\n                                       upload_counter=self.upload_counter,\n                                       download_counter=self.download_counter,\n                                       headers=headers,\n                                       insecure=self.insecure)\n                for root in hint_roots\n            }\n\n            # See #3147 for a discussion of the loop implementation.  Highlights:\n            # * Refresh the list of Keep services after each failure, in case\n            #   it's being updated.\n            # * Retry until we succeed, we're out of retries, or every available\n            #   service has returned permanent failure.\n            sorted_roots = []\n            roots_map = {}\n            loop = retry.RetryLoop(num_retries, self._check_loop_result,\n                                   backoff_start=2)\n            for tries_left in loop:\n                try:\n                    sorted_roots = self.map_new_services(\n                        roots_map, locator,\n                        force_rebuild=(tries_left < num_retries),\n                        need_writable=False,\n                        headers=headers)\n                except Exception as error:\n                    loop.save_result(error)\n                    continue\n\n                # Query _KeepService objects that haven't returned\n                # permanent failure, in our specified shuffle order.\n                services_to_try = [roots_map[root]\n                                   for root in sorted_roots\n                                   if roots_map[root].usable()]\n                for keep_service in services_to_try:\n                    blob = keep_service.get(locator, method=method, timeout=self.current_timeout(num_retries-tries_left))\n                    if blob is not None:\n                        break\n                loop.save_result((blob, len(services_to_try)))\n\n            # Always cache the result, then return it if we succeeded.\n            if loop.success():\n                return blob\n        finally:\n            if slot is not None:\n                self.block_cache.set(slot, blob)\n\n        # Q: Including 403 is necessary for the Keep tests to continue\n        # passing, but maybe they should expect KeepReadError instead?\n        not_founds = sum(1 for key in sorted_roots\n                         if roots_map[key].last_result().get('status_code', None) in {403, 404, 410})\n        service_errors = ((key, roots_map[key].last_result()['error'])\n                          for key in sorted_roots)\n        if not roots_map:\n            raise arvados.errors.KeepReadError(\n                \"[{}] failed to read {}: no Keep services available ({})\".format(\n                    request_id, loc_s, loop.last_result()))\n        elif not_founds == len(sorted_roots):\n            raise arvados.errors.NotFoundError(\n                \"[{}] {} not found\".format(request_id, loc_s), service_errors)\n        else:\n            raise arvados.errors.KeepReadError(\n                \"[{}] failed to read {} after {}\".format(request_id, loc_s, loop.attempts_str()), service_errors, label=\"service\")\n\n    @retry.retry_method\n    def put(self, data, copies=2, num_retries=None, request_id=None, classes=None):\n        \"\"\"Save data in Keep.\n\n        This method will get a list of Keep services from the API server, and\n        send the data to each one simultaneously in a new thread.  Once the\n        uploads are finished, if enough copies are saved, this method returns\n        the most recent HTTP response body.  If requests fail to upload\n        enough copies, this method raises KeepWriteError.\n\n        Arguments:\n        * data: The string of data to upload.\n        * copies: The number of copies that the user requires be saved.\n          Default 2.\n        * num_retries: The number of times to retry PUT requests to\n          *each* Keep server if it returns temporary failures, with\n          exponential backoff.  The default value is set when the\n          KeepClient is initialized.\n        * classes: An optional list of storage class names where copies should\n          be written.\n        \"\"\"\n\n        classes = classes or self._default_classes\n\n        if not isinstance(data, bytes):\n            data = data.encode()\n\n        self.put_counter.add(1)\n\n        data_hash = hashlib.md5(data).hexdigest()\n        loc_s = data_hash + '+' + str(len(data))\n        if copies < 1:\n            return loc_s\n        locator = KeepLocator(loc_s)\n\n        request_id = (request_id or\n                      (hasattr(self, 'api_client') and self.api_client.request_id) or\n                      arvados.util.new_request_id())\n        headers = {\n            'X-Request-Id': request_id,\n            'X-Keep-Desired-Replicas': str(copies),\n        }\n        roots_map = {}\n        loop = retry.RetryLoop(num_retries, self._check_loop_result,\n                               backoff_start=2)\n        done_copies = 0\n        done_classes = []\n        for tries_left in loop:\n            try:\n                sorted_roots = self.map_new_services(\n                    roots_map, locator,\n                    force_rebuild=(tries_left < num_retries),\n                    need_writable=True,\n                    headers=headers)\n            except Exception as error:\n                loop.save_result(error)\n                continue\n\n            pending_classes = []\n            if done_classes is not None:\n                pending_classes = list(set(classes) - set(done_classes))\n            writer_pool = KeepClient._KeepWriterThreadPool(\n                data=data,\n                data_hash=data_hash,\n                copies=copies - done_copies,\n                max_service_replicas=self.max_replicas_per_service,\n                timeout=self.current_timeout(num_retries - tries_left),\n                classes=pending_classes,\n            )\n            for service_root, ks in [(root, roots_map[root])\n                                     for root in sorted_roots]:\n                if ks.finished():\n                    continue\n                writer_pool.add_task(ks, service_root)\n            writer_pool.join()\n            pool_copies, pool_classes = writer_pool.done()\n            done_copies += pool_copies\n            if (done_classes is not None) and (pool_classes is not None):\n                done_classes += pool_classes\n                loop.save_result(\n                    (done_copies >= copies and set(done_classes) == set(classes),\n                    writer_pool.total_task_nr))\n            else:\n                # Old keepstore contacted without storage classes support:\n                # success is determined only by successful copies.\n                #\n                # Disable storage classes tracking from this point forward.\n                if not self._storage_classes_unsupported_warning:\n                    self._storage_classes_unsupported_warning = True\n                    _logger.warning(\"X-Keep-Storage-Classes header not supported by the cluster\")\n                done_classes = None\n                loop.save_result(\n                    (done_copies >= copies, writer_pool.total_task_nr))\n\n        if loop.success():\n            return writer_pool.response()\n        if not roots_map:\n            raise arvados.errors.KeepWriteError(\n                \"[{}] failed to write {}: no Keep services available ({})\".format(\n                    request_id, data_hash, loop.last_result()))\n        else:\n            service_errors = ((key, roots_map[key].last_result()['error'])\n                              for key in sorted_roots\n                              if roots_map[key].last_result()['error'])\n            raise arvados.errors.KeepWriteError(\n                \"[{}] failed to write {} after {} (wanted {} copies but wrote {})\".format(\n                    request_id, data_hash, loop.attempts_str(), (copies, classes), writer_pool.done()), service_errors, label=\"service\")\n\n    def _block_prefetch_worker(self):\n        \"\"\"The background downloader thread.\"\"\"\n        while True:\n            try:\n                b = self._prefetch_queue.get()\n                if b is None:\n                    return\n                self.get(b, prefetch=True)\n            except Exception:\n                _logger.exception(\"Exception doing block prefetch\")\n\n    def _start_prefetch_threads(self):\n        if self._prefetch_threads is None:\n            with self.lock:\n                if self._prefetch_threads is not None:\n                    return\n                self._prefetch_queue = queue.Queue()\n                self._prefetch_threads = []\n                for i in range(0, self.num_prefetch_threads):\n                    thread = threading.Thread(target=self._block_prefetch_worker)\n                    self._prefetch_threads.append(thread)\n                    thread.daemon = True\n                    thread.start()\n\n    def block_prefetch(self, locator):\n        \"\"\"\n        This relies on the fact that KeepClient implements a block cache,\n        so repeated requests for the same block will not result in repeated\n        downloads (unless the block is evicted from the cache.)  This method\n        does not block.\n        \"\"\"\n\n        if self.block_cache.get(locator) is not None:\n            return\n\n        self._start_prefetch_threads()\n        self._prefetch_queue.put(locator)\n\n    def stop_prefetch_threads(self):\n        with self.lock:\n            if self._prefetch_threads is not None:\n                for t in self._prefetch_threads:\n                    self._prefetch_queue.put(None)\n                for t in self._prefetch_threads:\n                    t.join()\n            self._prefetch_threads = None\n            self._prefetch_queue = None\n\n    def local_store_put(self, data, copies=1, num_retries=None, classes=[]):\n        \"\"\"A stub for put().\n\n        This method is used in place of the real put() method when\n        using local storage (see constructor's local_store argument).\n\n        copies and num_retries arguments are ignored: they are here\n        only for the sake of offering the same call signature as\n        put().\n\n        Data stored this way can be retrieved via local_store_get().\n        \"\"\"\n        md5 = hashlib.md5(data).hexdigest()\n        locator = '%s+%d' % (md5, len(data))\n        with open(os.path.join(self.local_store, md5 + '.tmp'), 'wb') as f:\n            f.write(data)\n        os.rename(os.path.join(self.local_store, md5 + '.tmp'),\n                  os.path.join(self.local_store, md5))\n        return locator\n\n    def local_store_get(self, loc_s, num_retries=None):\n        \"\"\"Companion to local_store_put().\"\"\"\n        try:\n            locator = KeepLocator(loc_s)\n        except ValueError:\n            raise arvados.errors.NotFoundError(\n                \"Invalid data locator: '%s'\" % loc_s)\n        if locator.md5sum == config.EMPTY_BLOCK_LOCATOR.split('+')[0]:\n            return b''\n        with open(os.path.join(self.local_store, locator.md5sum), 'rb') as f:\n            return f.read()\n\n    def local_store_head(self, loc_s, num_retries=None):\n        \"\"\"Companion to local_store_put().\"\"\"\n        try:\n            locator = KeepLocator(loc_s)\n        except ValueError:\n            raise arvados.errors.NotFoundError(\n                \"Invalid data locator: '%s'\" % loc_s)\n        if locator.md5sum == config.EMPTY_BLOCK_LOCATOR.split('+')[0]:\n            return True\n        if os.path.exists(os.path.join(self.local_store, locator.md5sum)):\n            return True\n"
  },
  {
    "path": "sdk/python/arvados/logging.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Logging utilities for Arvados clients\"\"\"\n\nimport logging\n\nlog_format = '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s'\nlog_date_format = '%Y-%m-%d %H:%M:%S'\nlog_handler = logging.StreamHandler()\nlog_handler.setFormatter(logging.Formatter(log_format, log_date_format))\n\nclass GoogleHTTPClientFilter:\n    \"\"\"Common googleapiclient.http log filters for Arvados clients\n\n    This filter makes `googleapiclient.http` log messages more useful for\n    typical Arvados applications. Currently it only changes the level of\n    retry messages (to INFO by default), but its functionality may be\n    extended in the future. Typical usage looks like:\n\n        logging.getLogger('googleapiclient.http').addFilter(GoogleHTTPClientFilter())\n    \"\"\"\n    def __init__(self, *, retry_level='INFO'):\n        self.retry_levelname = retry_level\n        self.retry_levelno = getattr(logging, retry_level)\n\n    def filter(self, record):\n        if record.msg.startswith(('Sleeping ', 'Retry ')):\n            record.levelname = self.retry_levelname\n            record.levelno = self.retry_levelno\n        return True\n"
  },
  {
    "path": "sdk/python/arvados/retry.py",
    "content": "\"\"\"Utilities to retry operations.\n\nThe core of this module is `RetryLoop`, a utility class to retry operations\nthat might fail. It can distinguish between temporary and permanent failures;\nprovide exponential backoff; and save a series of results.\n\nIt also provides utility functions for common operations with `RetryLoop`:\n\n* `check_http_response_success` can be used as a `RetryLoop` `success_check`\n  for HTTP response codes from the Arvados API server.\n* `retry_method` can decorate methods to provide a default `num_retries`\n  keyword argument.\n\"\"\"\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport functools\nimport inspect\nimport pycurl\nimport time\n\nfrom collections import deque\nfrom typing import (\n    Callable,\n    Generic,\n    Optional,\n    TypeVar,\n)\n\nimport arvados.errors\n\n_HTTP_SUCCESSES = set(range(200, 300))\n_HTTP_CAN_RETRY = set([408, 409, 423, 500, 502, 503, 504])\n\nCT = TypeVar('CT', bound=Callable)\nT = TypeVar('T')\n\nclass RetryLoop(Generic[T]):\n    \"\"\"Coordinate limited retries of code.\n\n    `RetryLoop` coordinates a loop that runs until it records a\n    successful result or tries too many times, whichever comes first.\n    Typical use looks like:\n\n        loop = RetryLoop(num_retries=2)\n        for tries_left in loop:\n            try:\n                result = do_something()\n            except TemporaryError as error:\n                log(\"error: {} ({} tries left)\".format(error, tries_left))\n            else:\n                loop.save_result(result)\n        if loop.success():\n            return loop.last_result()\n\n    Arguments:\n\n    * num_retries: int --- The maximum number of times to retry the loop if\n      it doesn't succeed.  This means the loop body could run at most\n      `num_retries + 1` times.\n\n    * success_check: Callable[[T], bool | None] --- This is a function that\n      will be called each time the loop saves a result.  The function should\n      return `True` if the result indicates the code succeeded, `False` if\n      it represents a permanent failure, and `None` if it represents a\n      temporary failure.  If no function is provided, the loop will end\n      after any result is saved.\n\n    * backoff_start: float --- The number of seconds that must pass before\n      the loop's second iteration.  Default 0, which disables all waiting.\n\n    * backoff_growth: float --- The wait time multiplier after each\n      iteration.  Default 2 (i.e., double the wait time each time).\n\n    * save_results: int --- Specify a number to store that many saved\n      results from the loop.  These are available through the `results`\n      attribute, oldest first.  Default 1.\n\n    * max_wait: float --- Maximum number of seconds to wait between\n      retries. Default 60.\n    \"\"\"\n    def __init__(\n            self,\n            num_retries: int,\n            success_check: Callable[[T], Optional[bool]]=lambda r: True,\n            backoff_start: float=0,\n            backoff_growth: float=2,\n            save_results: int=1,\n            max_wait: float=60\n    ) -> None:\n        self.tries_left = num_retries + 1\n        self.check_result = success_check\n        self.backoff_wait = backoff_start\n        self.backoff_growth = backoff_growth\n        self.max_wait = max_wait\n        self.next_start_time = 0\n        self.results = deque(maxlen=save_results)\n        self._attempts = 0\n        self._running = None\n        self._success = None\n\n    def __iter__(self) -> 'RetryLoop':\n        \"\"\"Return an iterator of retries.\"\"\"\n        return self\n\n    def running(self) -> Optional[bool]:\n        \"\"\"Return whether this loop is running.\n\n        Returns `None` if the loop has never run, `True` if it is still running,\n        or `False` if it has stopped—whether that's because it has saved a\n        successful result, a permanent failure, or has run out of retries.\n        \"\"\"\n        return self._running and (self._success is None)\n\n    def __next__(self) -> int:\n        \"\"\"Record a loop attempt.\n\n        If the loop is still running, decrements the number of tries left and\n        returns it. Otherwise, raises `StopIteration`.\n        \"\"\"\n        if self._running is None:\n            self._running = True\n        if (self.tries_left < 1) or not self.running():\n            self._running = False\n            raise StopIteration\n        else:\n            wait_time = max(0, self.next_start_time - time.time())\n            time.sleep(wait_time)\n            self.backoff_wait *= self.backoff_growth\n            if self.backoff_wait > self.max_wait:\n                self.backoff_wait = self.max_wait\n        self.next_start_time = time.time() + self.backoff_wait\n        self.tries_left -= 1\n        return self.tries_left\n\n    def save_result(self, result: T) -> None:\n        \"\"\"Record a loop result.\n\n        Save the given result, and end the loop if it indicates\n        success or permanent failure. See documentation for the `__init__`\n        `success_check` argument to learn how that's indicated.\n\n        Raises `arvados.errors.AssertionError` if called after the loop has\n        already ended.\n\n        Arguments:\n\n        * result: T --- The result from this loop attempt to check and save.\n        \"\"\"\n        if not self.running():\n            raise arvados.errors.AssertionError(\n                \"recorded a loop result after the loop finished\")\n        self.results.append(result)\n        self._success = self.check_result(result)\n        self._attempts += 1\n\n    def success(self) -> Optional[bool]:\n        \"\"\"Return the loop's end state.\n\n        Returns `True` if the loop recorded a successful result, `False` if it\n        recorded permanent failure, or else `None`.\n        \"\"\"\n        return self._success\n\n    def last_result(self) -> T:\n        \"\"\"Return the most recent result the loop saved.\n\n        Raises `arvados.errors.AssertionError` if called before any result has\n        been saved.\n        \"\"\"\n        try:\n            return self.results[-1]\n        except IndexError:\n            raise arvados.errors.AssertionError(\n                \"queried loop results before any were recorded\")\n\n    def attempts(self) -> int:\n        \"\"\"Return the number of results that have been saved.\n\n        This count includes all kinds of results: success, permanent failure,\n        and temporary failure.\n        \"\"\"\n        return self._attempts\n\n    def attempts_str(self) -> str:\n        \"\"\"Return a human-friendly string counting saved results.\n\n        This method returns '1 attempt' or 'N attempts', where the number\n        in the string is the number of saved results.\n        \"\"\"\n        if self._attempts == 1:\n            return '1 attempt'\n        else:\n            return '{} attempts'.format(self._attempts)\n\n\ndef check_http_response_success(status_code: int) -> Optional[bool]:\n    \"\"\"Convert a numeric HTTP status code to a loop control flag.\n\n    This method takes a numeric HTTP status code and returns `True` if\n    the code indicates success, `None` if it indicates temporary\n    failure, and `False` otherwise.  You can use this as the\n    `success_check` for a `RetryLoop` that queries the Arvados API server.\n    Specifically:\n\n    * Any 2xx result returns `True`.\n\n    * A select few status codes, or any malformed responses, return `None`.\n\n    * Everything else returns `False`.  Note that this includes 1xx and\n      3xx status codes.  They don't indicate success, and you can't\n      retry those requests verbatim.\n\n    Arguments:\n\n    * status_code: int --- A numeric HTTP response code\n    \"\"\"\n    if status_code in _HTTP_SUCCESSES:\n        return True\n    elif status_code in _HTTP_CAN_RETRY:\n        return None\n    elif 100 <= status_code < 600:\n        return False\n    else:\n        return None  # Get well soon, server.\n\ndef retry_method(orig_func: CT) -> CT:\n    \"\"\"Provide a default value for a method's num_retries argument.\n\n    This is a decorator for instance and class methods that accept a\n    `num_retries` keyword argument, with a `None` default.  When the method\n    is called without a value for `num_retries`, this decorator will set it\n    from the `num_retries` attribute of the underlying instance or class.\n\n    Arguments:\n\n    * orig_func: Callable --- A class or instance method that accepts a\n    `num_retries` keyword argument\n    \"\"\"\n    @functools.wraps(orig_func)\n    def num_retries_setter(self, *args, **kwargs):\n        if kwargs.get('num_retries') is None:\n            kwargs['num_retries'] = self.num_retries\n        return orig_func(self, *args, **kwargs)\n    return num_retries_setter\n"
  },
  {
    "path": "sdk/python/arvados/safeapi.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"arvados.safeapi - Shim compatibility module\n\nThis module used to define `arvados.safeapi.ThreadSafeApiCache`. Now it only\nexists to provide backwards compatible imports. New code should prefer to\nimport `arvados.api`.\n\n@private\n\"\"\"\n\nfrom .api import ThreadSafeAPIClient as ThreadSafeApiCache\n"
  },
  {
    "path": "sdk/python/arvados/util.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Arvados utilities\n\nThis module provides functions and constants that are useful across a variety\nof Arvados resource types, or extend the Arvados API client (see `arvados.api`).\n\"\"\"\n\nimport errno\nimport fcntl\nimport hashlib\nimport httplib2\nimport operator\nimport os\nimport random\nimport re\nimport subprocess\nimport sys\n\nimport arvados.errors\n\nfrom typing import (\n    Any,\n    Callable,\n    Container,\n    Dict,\n    Iterator,\n    List,\n    TypeVar,\n    Union,\n)\n\nT = TypeVar('T')\n\nHEX_RE = re.compile(r'^[0-9a-fA-F]+$')\n\"\"\"Regular expression to match a hexadecimal string (case-insensitive)\"\"\"\nCR_UNCOMMITTED = 'Uncommitted'\n\"\"\"Constant `state` value for uncommited container requests\"\"\"\nCR_COMMITTED = 'Committed'\n\"\"\"Constant `state` value for committed container requests\"\"\"\nCR_FINAL = 'Final'\n\"\"\"Constant `state` value for finalized container requests\"\"\"\n\nkeep_locator_pattern = re.compile(r'[0-9a-f]{32}\\+[0-9]+(\\+\\S+)*')\n\"\"\"Regular expression to match any Keep block locator\"\"\"\nsigned_locator_pattern = re.compile(r'[0-9a-f]{32}\\+[0-9]+(\\+\\S+)*\\+A\\S+(\\+\\S+)*')\n\"\"\"Regular expression to match any Keep block locator with an access token hint\"\"\"\nportable_data_hash_pattern = re.compile(r'[0-9a-f]{32}\\+[0-9]+')\n\"\"\"Regular expression to match any collection portable data hash\"\"\"\nmanifest_pattern = re.compile(r'((\\S+)( +[a-f0-9]{32}(\\+[0-9]+)(\\+\\S+)*)+( +[0-9]+:[0-9]+:\\S+)+$)+', flags=re.MULTILINE)\n\"\"\"Regular expression to match an Arvados collection manifest text\"\"\"\nkeep_file_locator_pattern = re.compile(r'([0-9a-f]{32}\\+[0-9]+)/(.*)')\n\"\"\"Regular expression to match a file path from a collection identified by portable data hash\"\"\"\nkeepuri_pattern = re.compile(r'keep:([0-9a-f]{32}\\+[0-9]+)/(.*)')\n\"\"\"Regular expression to match a `keep:` URI with a collection identified by portable data hash\"\"\"\n\nuuid_pattern = re.compile(r'[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}')\n\"\"\"Regular expression to match any Arvados object UUID\"\"\"\ncollection_uuid_pattern = re.compile(r'[a-z0-9]{5}-4zz18-[a-z0-9]{15}')\n\"\"\"Regular expression to match any Arvados collection UUID\"\"\"\ncontainer_uuid_pattern = re.compile(r'[a-z0-9]{5}-dz642-[a-z0-9]{15}')\n\"\"\"Regular expression to match any Arvados container UUID\"\"\"\ngroup_uuid_pattern = re.compile(r'[a-z0-9]{5}-j7d0g-[a-z0-9]{15}')\n\"\"\"Regular expression to match any Arvados group UUID\"\"\"\nlink_uuid_pattern = re.compile(r'[a-z0-9]{5}-o0j2j-[a-z0-9]{15}')\n\"\"\"Regular expression to match any Arvados link UUID\"\"\"\nuser_uuid_pattern = re.compile(r'[a-z0-9]{5}-tpzed-[a-z0-9]{15}')\n\"\"\"Regular expression to match any Arvados user UUID\"\"\"\n\ndef is_hex(s: str, *length_args: int) -> bool:\n    \"\"\"Indicate whether a string is a hexadecimal number\n\n    This method returns true if all characters in the string are hexadecimal\n    digits. It is case-insensitive.\n\n    You can also pass optional length arguments to check that the string has\n    the expected number of digits. If you pass one integer, the string must\n    have that length exactly, otherwise the method returns False. If you\n    pass two integers, the string's length must fall within that minimum and\n    maximum (inclusive), otherwise the method returns False.\n\n    Arguments:\n\n    * s: str --- The string to check\n\n    * length_args: int --- Optional length limit(s) for the string to check\n    \"\"\"\n    num_length_args = len(length_args)\n    if num_length_args > 2:\n        raise arvados.errors.ArgumentError(\n            \"is_hex accepts up to 3 arguments ({} given)\".format(1 + num_length_args))\n    elif num_length_args == 2:\n        good_len = (length_args[0] <= len(s) <= length_args[1])\n    elif num_length_args == 1:\n        good_len = (len(s) == length_args[0])\n    else:\n        good_len = True\n    return bool(good_len and HEX_RE.match(s))\n\n\ndef keyset_list_all(\n        fn: Callable[..., 'arvados.api_resources.ArvadosAPIRequest'],\n        order_key: str=\"created_at\",\n        num_retries: int=0,\n        ascending: bool=True,\n        key_fields: Container[str]=('uuid',),\n        **kwargs: Any,\n) -> Iterator[Dict[str, Any]]:\n    \"\"\"Iterate all Arvados resources from an API list call\n\n    This method takes a method that represents an Arvados API list call, and\n    iterates the objects returned by the API server. It can make multiple API\n    calls to retrieve and iterate all objects available from the API server.\n\n    Arguments:\n\n    * fn: Callable[..., arvados.api_resources.ArvadosAPIRequest] --- A\n      function that wraps an Arvados API method that returns a list of\n      objects. If you have an Arvados API client named `arv`, examples\n      include `arv.collections().list` and `arv.groups().contents`. Note\n      that you should pass the function *without* calling it.\n\n    * order_key: str --- The name of the primary object field that objects\n      should be sorted by. This name is used to build an `order` argument\n      for `fn`. Default `'created_at'`.\n\n    * num_retries: int --- This argument is passed through to\n      `arvados.api_resources.ArvadosAPIRequest.execute` for each API call. See\n      that method's docstring for details. Default 0 (meaning API calls will\n      use the `num_retries` value set when the Arvados API client was\n      constructed).\n\n    * ascending: bool --- Used to build an `order` argument for `fn`. If True,\n      all fields will be sorted in `'asc'` (ascending) order. Otherwise, all\n      fields will be sorted in `'desc'` (descending) order.\n\n    * key_fields: Container[str] --- One or two fields that constitute\n      a unique key for returned items.  Normally this should be the\n      default value `('uuid',)`, unless `fn` returns\n      computed_permissions records, in which case it should be\n      `('user_uuid', 'target_uuid')`.  If two fields are given, one of\n      them must be equal to `order_key`.\n\n    Additional keyword arguments will be passed directly to `fn` for each API\n    call. Note that this function sets `count`, `limit`, and `order` as part of\n    its work.\n\n    \"\"\"\n    tiebreak_keys = set(key_fields) - {order_key}\n    if len(tiebreak_keys) == 0:\n        tiebreak_key = 'uuid'\n    elif len(tiebreak_keys) == 1:\n        tiebreak_key = tiebreak_keys.pop()\n    else:\n        raise arvados.errors.ArgumentError(\n            \"key_fields can have at most one entry that is not order_key\")\n\n    pagesize = 1000\n    kwargs[\"limit\"] = pagesize\n    kwargs[\"count\"] = 'none'\n    asc = \"asc\" if ascending else \"desc\"\n    kwargs[\"order\"] = [f\"{order_key} {asc}\", f\"{tiebreak_key} {asc}\"]\n    other_filters = kwargs.get(\"filters\", [])\n\n    if 'select' in kwargs:\n        kwargs['select'] = list({*kwargs['select'], *key_fields, order_key})\n\n    nextpage = []\n    tot = 0\n    expect_full_page = True\n    key_getter = operator.itemgetter(*key_fields)\n    seen_prevpage = set()\n    seen_thispage = set()\n    lastitem = None\n    prev_page_all_same_order_key = False\n\n    while True:\n        kwargs[\"filters\"] = nextpage+other_filters\n        items = fn(**kwargs).execute(num_retries=num_retries)\n\n        if len(items[\"items\"]) == 0:\n            if prev_page_all_same_order_key:\n                nextpage = [[order_key, \">\" if ascending else \"<\", lastitem[order_key]]]\n                prev_page_all_same_order_key = False\n                continue\n            else:\n                return\n\n        seen_prevpage = seen_thispage\n        seen_thispage = set()\n\n        for i in items[\"items\"]:\n            # In cases where there's more than one record with the\n            # same order key, the result could include records we\n            # already saw in the last page.  Skip them.\n            seen_key = key_getter(i)\n            if seen_key in seen_prevpage:\n                continue\n            seen_thispage.add(seen_key)\n            yield i\n\n        firstitem = items[\"items\"][0]\n        lastitem = items[\"items\"][-1]\n\n        if firstitem[order_key] == lastitem[order_key]:\n            # Got a page where every item has the same order key.\n            # Switch to using tiebreak key for paging.\n            nextpage = [[order_key, \"=\", lastitem[order_key]], [tiebreak_key, \">\" if ascending else \"<\", lastitem[tiebreak_key]]]\n            prev_page_all_same_order_key = True\n        else:\n            # Start from the last order key seen, but skip the last\n            # known uuid to avoid retrieving the same row twice.  If\n            # there are multiple rows with the same order key it is\n            # still likely we'll end up retrieving duplicate rows.\n            # That's handled by tracking the \"seen\" rows for each page\n            # so they can be skipped if they show up on the next page.\n            nextpage = [[order_key, \">=\" if ascending else \"<=\", lastitem[order_key]]]\n            if tiebreak_key == \"uuid\":\n                nextpage += [[tiebreak_key, \"!=\", lastitem[tiebreak_key]]]\n            prev_page_all_same_order_key = False\n\n\ndef iter_computed_permissions(\n        fn: Callable[..., 'arvados.api_resources.ArvadosAPIRequest'],\n        order_key: str='user_uuid',\n        num_retries: int=0,\n        ascending: bool=True,\n        key_fields: Container[str]=('user_uuid', 'target_uuid'),\n        **kwargs: Any,\n) -> Iterator[Dict[str, Any]]:\n    \"\"\"Iterate all `computed_permission` resources\n\n    This method is the same as `keyset_list_all`, except that its\n    default arguments are suitable for the computed_permissions API.\n\n    Arguments:\n\n    * fn: Callable[..., arvados.api_resources.ArvadosAPIRequest] ---\n      see `keyset_list_all`.  Typically this is an instance of\n      `arvados.api_resources.ComputedPermissions.list`.  Given an\n      Arvados API client named `arv`, typical usage is\n      `iter_computed_permissions(arv.computed_permissions().list)`.\n\n    * order_key: str --- see `keyset_list_all`.  Default\n      `'user_uuid'`.\n\n    * num_retries: int --- see `keyset_list_all`.\n\n    * ascending: bool --- see `keyset_list_all`.\n\n    * key_fields: Container[str] --- see `keyset_list_all`. Default\n      `('user_uuid', 'target_uuid')`.\n\n    \"\"\"\n    return keyset_list_all(\n        fn=fn,\n        order_key=order_key,\n        num_retries=num_retries,\n        ascending=ascending,\n        key_fields=key_fields,\n        **kwargs)\n\n\ndef ca_certs_path(fallback: T=httplib2.CA_CERTS) -> Union[str, T]:\n    \"\"\"Return the path of the best available source of CA certificates\n\n    This function checks various known paths that provide trusted CA\n    certificates, and returns the first one that exists. It checks:\n\n    * the path in the `SSL_CERT_FILE` environment variable (used by OpenSSL)\n    * `/etc/arvados/ca-certificates.crt`, respected by all Arvados software\n    * `/etc/ssl/certs/ca-certificates.crt`, the default store on Debian-based\n      distributions\n    * `/etc/pki/tls/certs/ca-bundle.crt`, the default store on Red Hat-based\n      distributions\n\n    If none of these paths exist, this function returns the value of `fallback`.\n\n    Arguments:\n\n    * fallback: T --- The value to return if none of the known paths exist.\n      The default value is the certificate store of Mozilla's trusted CAs\n      included with the Python [certifi][] package.\n\n    [certifi]: https://pypi.org/project/certifi/\n    \"\"\"\n    for ca_certs_path in [\n        # SSL_CERT_FILE and SSL_CERT_DIR are openssl overrides - note\n        # that httplib2 itself also supports HTTPLIB2_CA_CERTS.\n        os.environ.get('SSL_CERT_FILE'),\n        # Arvados specific:\n        '/etc/arvados/ca-certificates.crt',\n        # Debian:\n        '/etc/ssl/certs/ca-certificates.crt',\n        # Red Hat:\n        '/etc/pki/tls/certs/ca-bundle.crt',\n        ]:\n        if ca_certs_path and os.path.exists(ca_certs_path):\n            return ca_certs_path\n    return fallback\n\n\ndef new_request_id() -> str:\n    \"\"\"Return a random request ID\n\n    This function generates and returns a random string suitable for use as a\n    `X-Request-Id` header value in the Arvados API.\n    \"\"\"\n    rid = \"req-\"\n    # 2**104 > 36**20 > 2**103\n    n = random.getrandbits(104)\n    for _ in range(20):\n        c = n % 36\n        if c < 10:\n            rid += chr(c+ord('0'))\n        else:\n            rid += chr(c+ord('a')-10)\n        n = n // 36\n    return rid\n\n\ndef get_config_once(svc: 'arvados.api_resources.ArvadosAPIClient') -> Dict[str, Any]:\n    \"\"\"Return an Arvados cluster's configuration, with caching\n\n    This function gets and returns the Arvados configuration from the API\n    server. It caches the result on the client object and reuses it on any\n    future calls.\n\n    Arguments:\n\n    * svc: arvados.api_resources.ArvadosAPIClient --- The Arvados API client\n      object to use to retrieve and cache the Arvados cluster configuration.\n    \"\"\"\n    if not svc._rootDesc.get('resources').get('configs', False):\n        # Old API server version, no config export endpoint\n        return {}\n    if not hasattr(svc, '_cached_config'):\n        svc._cached_config = svc.configs().get().execute()\n    return svc._cached_config\n\n\ndef get_vocabulary_once(svc: 'arvados.api_resources.ArvadosAPIClient') -> Dict[str, Any]:\n    \"\"\"Return an Arvados cluster's vocabulary, with caching\n\n    This function gets and returns the Arvados vocabulary from the API\n    server. It caches the result on the client object and reuses it on any\n    future calls.\n\n    .. HINT:: Low-level method\n       This is a relatively low-level wrapper around the Arvados API. Most\n       users will prefer to use `arvados.vocabulary.load_vocabulary`.\n\n    Arguments:\n\n    * svc: arvados.api_resources.ArvadosAPIClient --- The Arvados API client\n      object to use to retrieve and cache the Arvados cluster vocabulary.\n    \"\"\"\n    if not svc._rootDesc.get('resources').get('vocabularies', False):\n        # Old API server version, no vocabulary export endpoint\n        return {}\n    if not hasattr(svc, '_cached_vocabulary'):\n        svc._cached_vocabulary = svc.vocabularies().get().execute()\n    return svc._cached_vocabulary\n\n\ndef trim_name(collectionname: str) -> str:\n    \"\"\"Limit the length of a name to fit within Arvados API limits\n\n    This function ensures that a string is short enough to use as an object\n    name in the Arvados API, leaving room for text that may be added by the\n    `ensure_unique_name` argument. If the source name is short enough, it is\n    returned unchanged. Otherwise, this function returns a string with excess\n    characters removed from the middle of the source string and replaced with\n    an ellipsis.\n\n    Arguments:\n\n    * collectionname: str --- The desired source name\n    \"\"\"\n    max_name_len = 254 - 28\n\n    if len(collectionname) > max_name_len:\n        over = len(collectionname) - max_name_len\n        split = int(max_name_len/2)\n        collectionname = collectionname[0:split] + \"…\" + collectionname[split+over:]\n\n    return collectionname\n\n\ndef iter_storage_classes(\n        config: Dict[str, Any],\n        check: Callable[[Dict[str, Any]], bool]=operator.methodcaller('get', 'Default'),\n        fallback: str=\"default\",\n) -> Iterator[str]:\n    \"\"\"Read storage classes from the API client config\n\n    This function iterates storage class names for classes in `config` that\n    pass `check`. If no matches are found but `fallback` is given, it is\n    yielded.\n    \"\"\"\n    any_found = False\n    for key, value in config.get(\"StorageClasses\", {}).items():\n        if check(value):\n            any_found = True\n            yield key\n    if fallback and not any_found:\n        yield fallback\n"
  },
  {
    "path": "sdk/python/arvados/vocabulary.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport logging\n\nfrom . import api\n\n_logger = logging.getLogger('arvados.vocabulary')\n\ndef load_vocabulary(api_client=None):\n    \"\"\"Load the Arvados vocabulary from the API.\n    \"\"\"\n    if api_client is None:\n        api_client = api('v1')\n    return Vocabulary(api_client.vocabulary())\n\nclass VocabularyError(Exception):\n    \"\"\"Base class for all vocabulary errors.\n    \"\"\"\n    pass\n\nclass VocabularyKeyError(VocabularyError):\n    pass\n\nclass VocabularyValueError(VocabularyError):\n    pass\n\nclass Vocabulary(object):\n    def __init__(self, voc_definition={}):\n        self.strict_keys = voc_definition.get('strict_tags', False)\n        self.key_aliases = {}\n\n        for key_id, val in (voc_definition.get('tags') or {}).items():\n            strict = val.get('strict', False)\n            key_labels = [l['label'] for l in val.get('labels', [])]\n            values = {}\n            for v_id, v_val in (val.get('values') or {}).items():\n                labels = [l['label'] for l in v_val.get('labels', [])]\n                values[v_id] = VocabularyValue(v_id, labels)\n            vk = VocabularyKey(key_id, key_labels, values, strict)\n            self.key_aliases[key_id.lower()] = vk\n            for alias in vk.aliases:\n                self.key_aliases[alias.lower()] = vk\n\n    def __getitem__(self, key):\n        return self.key_aliases[key.lower()]\n\n    def convert_to_identifiers(self, obj={}):\n        \"\"\"Translate key/value pairs to machine readable identifiers.\n        \"\"\"\n        return self._convert_to_what(obj, 'identifier')\n\n    def convert_to_labels(self, obj={}):\n        \"\"\"Translate key/value pairs to human readable labels.\n        \"\"\"\n        return self._convert_to_what(obj, 'preferred_label')\n\n    def _convert_to_what(self, obj={}, what=None):\n        if not isinstance(obj, dict):\n            raise ValueError(\"obj must be a dict\")\n        if what not in ['preferred_label', 'identifier']:\n            raise ValueError(\"what attr must be 'preferred_label' or 'identifier'\")\n        r = {}\n        for k, v in obj.items():\n            # Key validation & lookup\n            key_found = False\n            if not isinstance(k, str):\n                raise VocabularyKeyError(\"key '{}' must be a string\".format(k))\n            k_what, v_what = k, v\n            try:\n                k_what = getattr(self[k], what)\n                key_found = True\n            except KeyError:\n                if self.strict_keys:\n                    raise VocabularyKeyError(\"key '{}' not found in vocabulary\".format(k))\n\n            # Value validation & lookup\n            if isinstance(v, list):\n                v_what = []\n                for x in v:\n                    if not isinstance(x, str):\n                        raise VocabularyValueError(\"value '{}' for key '{}' must be a string\".format(x, k))\n                    try:\n                        v_what.append(getattr(self[k][x], what))\n                    except KeyError:\n                        if self[k].strict:\n                            raise VocabularyValueError(\"value '{}' not found for key '{}'\".format(x, k))\n                        v_what.append(x)\n            else:\n                if not isinstance(v, str):\n                    raise VocabularyValueError(\"{} value '{}' for key '{}' must be a string\".format(type(v).__name__, v, k))\n                try:\n                    v_what = getattr(self[k][v], what)\n                except KeyError:\n                    if key_found and self[k].strict:\n                        raise VocabularyValueError(\"value '{}' not found for key '{}'\".format(v, k))\n\n            r[k_what] = v_what\n        return r\n\nclass VocabularyData(object):\n    def __init__(self, identifier, aliases=[]):\n        self.identifier = identifier\n        self.aliases = aliases\n\n    def __getattribute__(self, name):\n        if name == 'preferred_label':\n            return self.aliases[0]\n        return super(VocabularyData, self).__getattribute__(name)\n\nclass VocabularyValue(VocabularyData):\n    def __init__(self, identifier, aliases=[]):\n        super(VocabularyValue, self).__init__(identifier, aliases)\n\nclass VocabularyKey(VocabularyData):\n    def __init__(self, identifier, aliases=[], values={}, strict=False):\n        super(VocabularyKey, self).__init__(identifier, aliases)\n        self.strict = strict\n        self.value_aliases = {}\n        for v_id, v_val in values.items():\n            self.value_aliases[v_id.lower()] = v_val\n            for v_alias in v_val.aliases:\n                self.value_aliases[v_alias.lower()] = v_val\n\n    def __getitem__(self, key):\n        return self.value_aliases[key.lower()]"
  },
  {
    "path": "sdk/python/arvados-v1-discovery.json",
    "content": "{\n  \"auth\": {\n    \"oauth2\": {\n      \"scopes\": {\n        \"https://api.arvados.org/auth/arvados\": {\n          \"description\": \"View and manage objects\"\n        },\n        \"https://api.arvados.org/auth/arvados.readonly\": {\n          \"description\": \"View objects\"\n        }\n      }\n    }\n  },\n  \"basePath\": \"/arvados/v1/\",\n  \"batchPath\": \"batch\",\n  \"description\": \"The API to interact with Arvados.\",\n  \"discoveryVersion\": \"v1\",\n  \"documentationLink\": \"http://doc.arvados.org/api/index.html\",\n  \"id\": \"arvados:v1\",\n  \"kind\": \"discovery#restDescription\",\n  \"name\": \"arvados\",\n  \"parameters\": {\n    \"alt\": {\n      \"type\": \"string\",\n      \"description\": \"Data format for the response.\",\n      \"default\": \"json\",\n      \"enum\": [\n        \"json\"\n      ],\n      \"enumDescriptions\": [\n        \"Responses with Content-Type of application/json\"\n      ],\n      \"location\": \"query\"\n    },\n    \"fields\": {\n      \"type\": \"string\",\n      \"description\": \"Selector specifying which fields to include in a partial response.\",\n      \"location\": \"query\"\n    },\n    \"key\": {\n      \"type\": \"string\",\n      \"description\": \"API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.\",\n      \"location\": \"query\"\n    },\n    \"oauth_token\": {\n      \"type\": \"string\",\n      \"description\": \"OAuth 2.0 token for the current user.\",\n      \"location\": \"query\"\n    }\n  },\n  \"protocol\": \"rest\",\n  \"resources\": {\n    \"api_client_authorizations\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.api_client_authorizations.get\",\n          \"path\": \"api_client_authorizations/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a ApiClientAuthorization record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ApiClientAuthorization to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.api_client_authorizations.list\",\n          \"path\": \"api_client_authorizations\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a ApiClientAuthorizationList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorizationList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.api_client_authorizations.create\",\n          \"path\": \"api_client_authorizations\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new ApiClientAuthorization.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"api_client_authorization\": {\n                \"$ref\": \"ApiClientAuthorization\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.api_client_authorizations.update\",\n          \"path\": \"api_client_authorizations/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing ApiClientAuthorization.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ApiClientAuthorization to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"api_client_authorization\": {\n                \"$ref\": \"ApiClientAuthorization\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.api_client_authorizations.delete\",\n          \"path\": \"api_client_authorizations/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing ApiClientAuthorization.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ApiClientAuthorization to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"create_system_auth\": {\n          \"id\": \"arvados.api_client_authorizations.create_system_auth\",\n          \"path\": \"api_client_authorizations/create_system_auth\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a token for the system (\\\"root\\\") user.\",\n          \"parameters\": {\n            \"scopes\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"default\": \"[\\\"all\\\"]\",\n              \"description\": \"An array of strings defining the scope of resources this token will be allowed to access. Refer to the [scopes reference][] for details.\\n\\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"current\": {\n          \"id\": \"arvados.api_client_authorizations.current\",\n          \"path\": \"api_client_authorizations/current\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return all metadata for the token used to authorize this request.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"authorized_keys\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.authorized_keys.get\",\n          \"path\": \"authorized_keys/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a AuthorizedKey record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the AuthorizedKey to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"AuthorizedKey\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.authorized_keys.list\",\n          \"path\": \"authorized_keys\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a AuthorizedKeyList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"AuthorizedKeyList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.authorized_keys.create\",\n          \"path\": \"authorized_keys\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new AuthorizedKey.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"authorized_key\": {\n                \"$ref\": \"AuthorizedKey\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"AuthorizedKey\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.authorized_keys.update\",\n          \"path\": \"authorized_keys/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing AuthorizedKey.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the AuthorizedKey to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"authorized_key\": {\n                \"$ref\": \"AuthorizedKey\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"AuthorizedKey\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.authorized_keys.delete\",\n          \"path\": \"authorized_keys/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing AuthorizedKey.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the AuthorizedKey to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"AuthorizedKey\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"collections\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.collections.get\",\n          \"path\": \"collections/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Collection record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Show collection even if its `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.collections.list\",\n          \"path\": \"collections\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a CollectionList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include collections whose `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            },\n            \"include_old_versions\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include past collection versions.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"CollectionList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.collections.create\",\n          \"path\": \"collections\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Collection.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"replace_files\": {\n              \"type\": \"object\",\n              \"description\": \"Add, delete, and replace files and directories with new content\\nand/or content from other collections. Refer to the\\n[replace_files reference][] for details.\\n\\n[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files\\n\\n\",\n              \"required\": false,\n              \"location\": \"query\",\n              \"properties\": {},\n              \"additionalProperties\": {\n                \"type\": \"string\"\n              }\n            },\n            \"replace_segments\": {\n              \"type\": \"object\",\n              \"description\": \"Replace existing block segments in the collection with new segments.\\nRefer to the [replace_segments reference][] for details.\\n\\n[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments\\n\\n\",\n              \"required\": false,\n              \"location\": \"query\",\n              \"properties\": {},\n              \"additionalProperties\": {\n                \"type\": \"string\"\n              }\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"collection\": {\n                \"$ref\": \"Collection\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.collections.update\",\n          \"path\": \"collections/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Collection.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"replace_files\": {\n              \"type\": \"object\",\n              \"description\": \"Add, delete, and replace files and directories with new content\\nand/or content from other collections. Refer to the\\n[replace_files reference][] for details.\\n\\n[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files\\n\\n\",\n              \"required\": false,\n              \"location\": \"query\",\n              \"properties\": {},\n              \"additionalProperties\": {\n                \"type\": \"string\"\n              }\n            },\n            \"replace_segments\": {\n              \"type\": \"object\",\n              \"description\": \"Replace existing block segments in the collection with new segments.\\nRefer to the [replace_segments reference][] for details.\\n\\n[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments\\n\\n\",\n              \"required\": false,\n              \"location\": \"query\",\n              \"properties\": {},\n              \"additionalProperties\": {\n                \"type\": \"string\"\n              }\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"collection\": {\n                \"$ref\": \"Collection\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.collections.delete\",\n          \"path\": \"collections/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Collection.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"provenance\": {\n          \"id\": \"arvados.collections.provenance\",\n          \"path\": \"collections/{uuid}/provenance\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Detail the provenance of a given collection.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"used_by\": {\n          \"id\": \"arvados.collections.used_by\",\n          \"path\": \"collections/{uuid}/used_by\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Detail where a given collection has been used.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"trash\": {\n          \"id\": \"arvados.collections.trash\",\n          \"path\": \"collections/{uuid}/trash\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Trash a collection.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"untrash\": {\n          \"id\": \"arvados.collections.untrash\",\n          \"path\": \"collections/{uuid}/untrash\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Untrash a collection.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Collection to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Collection\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"computed_permissions\": {\n      \"methods\": {\n        \"list\": {\n          \"id\": \"arvados.computed_permissions.list\",\n          \"path\": \"computed_permissions\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a ComputedPermissionList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ComputedPermissionList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        }\n      }\n    },\n    \"containers\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.containers.get\",\n          \"path\": \"containers/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Container record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.containers.list\",\n          \"path\": \"containers\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a ContainerList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.containers.create\",\n          \"path\": \"containers\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Container.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"container\": {\n                \"$ref\": \"Container\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.containers.update\",\n          \"path\": \"containers/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Container.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"container\": {\n                \"$ref\": \"Container\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.containers.delete\",\n          \"path\": \"containers/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Container.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"auth\": {\n          \"id\": \"arvados.containers.auth\",\n          \"path\": \"containers/{uuid}/auth\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get the API client authorization token associated with this container.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"lock\": {\n          \"id\": \"arvados.containers.lock\",\n          \"path\": \"containers/{uuid}/lock\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Lock a container (for a dispatcher to begin running it).\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"unlock\": {\n          \"id\": \"arvados.containers.unlock\",\n          \"path\": \"containers/{uuid}/unlock\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Unlock a container (for a dispatcher to stop running it).\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update_priority\": {\n          \"id\": \"arvados.containers.update_priority\",\n          \"path\": \"containers/{uuid}/update_priority\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Recalculate and return the priority of a given container.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"secret_mounts\": {\n          \"id\": \"arvados.containers.secret_mounts\",\n          \"path\": \"containers/{uuid}/secret_mounts\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return secret mount information for the container associated with the API token authorizing this request.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Container to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"current\": {\n          \"id\": \"arvados.containers.current\",\n          \"path\": \"containers/current\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return the container record associated with the API token authorizing this request.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"Container\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"container_requests\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.container_requests.get\",\n          \"path\": \"container_requests/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a ContainerRequest record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ContainerRequest to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Show container request even if its owner project is trashed.\",\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"ContainerRequest\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.container_requests.list\",\n          \"path\": \"container_requests\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a ContainerRequestList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include container requests whose owner project is trashed.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerRequestList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.container_requests.create\",\n          \"path\": \"container_requests\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new ContainerRequest.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"container_request\": {\n                \"$ref\": \"ContainerRequest\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerRequest\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.container_requests.update\",\n          \"path\": \"container_requests/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing ContainerRequest.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ContainerRequest to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"container_request\": {\n                \"$ref\": \"ContainerRequest\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerRequest\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.container_requests.delete\",\n          \"path\": \"container_requests/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing ContainerRequest.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the ContainerRequest to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerRequest\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"container_status\": {\n          \"id\": \"arvados.container_requests.container_status\",\n          \"path\": \"container_requests/{uuid}/container_status\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return scheduling details for a container request.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"required\": true,\n              \"description\": \"The UUID of the container request to query.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"ContainerRequest\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"credentials\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.credentials.get\",\n          \"path\": \"credentials/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Credential record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Credential to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Credential\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.credentials.list\",\n          \"path\": \"credentials\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a CredentialList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"CredentialList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.credentials.create\",\n          \"path\": \"credentials\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Credential.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"credential\": {\n                \"$ref\": \"Credential\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Credential\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.credentials.update\",\n          \"path\": \"credentials/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Credential.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Credential to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"credential\": {\n                \"$ref\": \"Credential\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Credential\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.credentials.delete\",\n          \"path\": \"credentials/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Credential.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Credential to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Credential\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"secret\": {\n          \"id\": \"arvados.credentials.secret\",\n          \"path\": \"credentials/{uuid}/secret\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Fetch the secret part of the credential (can only be invoked by running containers).\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Credential to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Credential\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"groups\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.groups.get\",\n          \"path\": \"groups/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Group record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Group to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Return group/project even if its `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.groups.list\",\n          \"path\": \"groups\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a GroupList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include items whose `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"GroupList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.groups.create\",\n          \"path\": \"groups\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Group.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"async\": {\n              \"required\": false,\n              \"type\": \"boolean\",\n              \"location\": \"query\",\n              \"default\": \"false\",\n              \"description\": \"If true, cluster permission will not be updated immediately, but instead at the next configured update interval.\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"group\": {\n                \"$ref\": \"Group\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.groups.update\",\n          \"path\": \"groups/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Group.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Group to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"async\": {\n              \"required\": false,\n              \"type\": \"boolean\",\n              \"location\": \"query\",\n              \"default\": \"false\",\n              \"description\": \"If true, cluster permission will not be updated immediately, but instead at the next configured update interval.\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"group\": {\n                \"$ref\": \"Group\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.groups.delete\",\n          \"path\": \"groups/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Group.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Group to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"contents\": {\n          \"id\": \"arvados.groups.contents\",\n          \"path\": \"groups/contents\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List objects that belong to a group.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include items whose `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            },\n            \"uuid\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"\",\n              \"description\": \"If given, limit the listing to objects owned by the\\nuser or group with this UUID.\",\n              \"location\": \"query\"\n            },\n            \"recursive\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, include contents from child groups recursively.\",\n              \"location\": \"query\"\n            },\n            \"include\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of referenced objects to include in the `included` field of the response. Supported values in the array are:\\n\\n  * `\\\"container_uuid\\\"`\\n  * `\\\"owner_uuid\\\"`\\n  * `\\\"collection_uuid\\\"`\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"include_old_versions\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, include past versions of collections in the listing.\",\n              \"location\": \"query\"\n            },\n            \"exclude_home_project\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, exclude contents of the user's home project from the listing.\\nCalling this method with this flag set is how clients enumerate objects shared\\nwith the current user.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"shared\": {\n          \"id\": \"arvados.groups.shared\",\n          \"path\": \"groups/shared\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List groups that the current user can access via permission links.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            },\n            \"include_trash\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"Include items whose `is_trashed` attribute is true.\",\n              \"location\": \"query\"\n            },\n            \"include\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"A string naming referenced objects to include in the `included` field of the response. Supported values are:\\n\\n  * `\\\"owner_uuid\\\"`\\n\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"trash\": {\n          \"id\": \"arvados.groups.trash\",\n          \"path\": \"groups/{uuid}/trash\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Trash a group.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Group to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"untrash\": {\n          \"id\": \"arvados.groups.untrash\",\n          \"path\": \"groups/{uuid}/untrash\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Untrash a group.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Group to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Group\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"keep_services\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.keep_services.get\",\n          \"path\": \"keep_services/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a KeepService record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the KeepService to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"KeepService\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.keep_services.list\",\n          \"path\": \"keep_services\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a KeepServiceList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"KeepServiceList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.keep_services.create\",\n          \"path\": \"keep_services\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new KeepService.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"keep_service\": {\n                \"$ref\": \"KeepService\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"KeepService\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.keep_services.update\",\n          \"path\": \"keep_services/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing KeepService.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the KeepService to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"keep_service\": {\n                \"$ref\": \"KeepService\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"KeepService\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.keep_services.delete\",\n          \"path\": \"keep_services/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing KeepService.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the KeepService to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"KeepService\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"accessible\": {\n          \"id\": \"arvados.keep_services.accessible\",\n          \"path\": \"keep_services/accessible\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List Keep services that the current client can access.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"KeepService\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"links\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.links.get\",\n          \"path\": \"links/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Link record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Link to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Link\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.links.list\",\n          \"path\": \"links\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a LinkList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"LinkList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.links.create\",\n          \"path\": \"links\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Link.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"link\": {\n                \"$ref\": \"Link\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Link\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.links.update\",\n          \"path\": \"links/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Link.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Link to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"link\": {\n                \"$ref\": \"Link\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Link\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.links.delete\",\n          \"path\": \"links/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Link.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Link to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Link\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"get_permissions\": {\n          \"id\": \"arvados.links.get_permissions\",\n          \"path\": \"permissions/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List permissions granted on an Arvados object.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Link to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Link\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"logs\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.logs.get\",\n          \"path\": \"logs/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Log record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Log to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Log\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.logs.list\",\n          \"path\": \"logs\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a LogList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"LogList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.logs.create\",\n          \"path\": \"logs\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Log.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"log\": {\n                \"$ref\": \"Log\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Log\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.logs.update\",\n          \"path\": \"logs/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Log.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Log to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"log\": {\n                \"$ref\": \"Log\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Log\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.logs.delete\",\n          \"path\": \"logs/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Log.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Log to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Log\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"users\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.users.get\",\n          \"path\": \"users/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a User record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the User to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.users.list\",\n          \"path\": \"users\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a UserList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"UserList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.users.create\",\n          \"path\": \"users\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new User.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"user\": {\n                \"$ref\": \"User\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.users.update\",\n          \"path\": \"users/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing User.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the User to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not try to update the user on any other clusters in the federation,\\nonly the cluster that received the request.\\nYou must be an administrator to use this flag.\",\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"user\": {\n                \"$ref\": \"User\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.users.delete\",\n          \"path\": \"users/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing User.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the User to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"current\": {\n          \"id\": \"arvados.users.current\",\n          \"path\": \"users/current\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return the user record associated with the API token authorizing this request.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"system\": {\n          \"id\": \"arvados.users.system\",\n          \"path\": \"users/system\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Return this cluster's system (\\\"root\\\") user record.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"activate\": {\n          \"id\": \"arvados.users.activate\",\n          \"path\": \"users/{uuid}/activate\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Set the `is_active` flag on a user record.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the User to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"setup\": {\n          \"id\": \"arvados.users.setup\",\n          \"path\": \"users/setup\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Convenience method to \\\"fully\\\" set up a user record with a virtual machine login and notification email.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"UUID of an existing user record to set up.\",\n              \"location\": \"query\"\n            },\n            \"user\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"Attributes of a new user record to set up.\",\n              \"location\": \"query\"\n            },\n            \"repo_name\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"This parameter is obsolete and ignored.\",\n              \"location\": \"query\"\n            },\n            \"vm_uuid\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"If given, setup creates a login link to allow this user to access the Arvados virtual machine with this UUID.\",\n              \"location\": \"query\"\n            },\n            \"send_notification_email\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, send an email to the user notifying them they can now access this Arvados cluster.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"unsetup\": {\n          \"id\": \"arvados.users.unsetup\",\n          \"path\": \"users/{uuid}/unsetup\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Unset a user's active flag and delete associated records.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the User to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"merge\": {\n          \"id\": \"arvados.users.merge\",\n          \"path\": \"users/merge\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Transfer ownership of one user's data to another.\",\n          \"parameters\": {\n            \"new_owner_uuid\": {\n              \"type\": \"string\",\n              \"required\": true,\n              \"description\": \"UUID of the user or group that will take ownership of data owned by the old user.\",\n              \"location\": \"query\"\n            },\n            \"new_user_token\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"Valid API token for the user receiving ownership. If you use this option, it takes ownership of data owned by the user making the request.\",\n              \"location\": \"query\"\n            },\n            \"redirect_to_new_user\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, authorization attempts for the old user will be redirected to the new user.\",\n              \"location\": \"query\"\n            },\n            \"old_user_uuid\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"UUID of the user whose ownership is being transferred to `new_owner_uuid`. You must be an admin to use this option.\",\n              \"location\": \"query\"\n            },\n            \"new_user_uuid\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"description\": \"UUID of the user receiving ownership. You must be an admin to use this option.\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"User\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"user_agreements\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.user_agreements.get\",\n          \"path\": \"user_agreements/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a UserAgreement record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the UserAgreement to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.user_agreements.list\",\n          \"path\": \"user_agreements\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a UserAgreementList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"UserAgreementList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.user_agreements.create\",\n          \"path\": \"user_agreements\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new UserAgreement.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"user_agreement\": {\n                \"$ref\": \"UserAgreement\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.user_agreements.update\",\n          \"path\": \"user_agreements/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing UserAgreement.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the UserAgreement to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"user_agreement\": {\n                \"$ref\": \"UserAgreement\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.user_agreements.delete\",\n          \"path\": \"user_agreements/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing UserAgreement.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the UserAgreement to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"signatures\": {\n          \"id\": \"arvados.user_agreements.signatures\",\n          \"path\": \"user_agreements/signatures\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List all user agreement signature links from a user.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"sign\": {\n          \"id\": \"arvados.user_agreements.sign\",\n          \"path\": \"user_agreements/sign\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a signature link from the current user for a given user agreement.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"UserAgreement\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"virtual_machines\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.virtual_machines.get\",\n          \"path\": \"virtual_machines/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a VirtualMachine record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the VirtualMachine to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.virtual_machines.list\",\n          \"path\": \"virtual_machines\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a VirtualMachineList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"VirtualMachineList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.virtual_machines.create\",\n          \"path\": \"virtual_machines\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new VirtualMachine.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"virtual_machine\": {\n                \"$ref\": \"VirtualMachine\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.virtual_machines.update\",\n          \"path\": \"virtual_machines/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing VirtualMachine.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the VirtualMachine to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"virtual_machine\": {\n                \"$ref\": \"VirtualMachine\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.virtual_machines.delete\",\n          \"path\": \"virtual_machines/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing VirtualMachine.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the VirtualMachine to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"logins\": {\n          \"id\": \"arvados.virtual_machines.logins\",\n          \"path\": \"virtual_machines/{uuid}/logins\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List login permission links for a given virtual machine.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the VirtualMachine to query.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"get_all_logins\": {\n          \"id\": \"arvados.virtual_machines.get_all_logins\",\n          \"path\": \"virtual_machines/get_all_logins\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"List login permission links for all virtual machines.\",\n          \"parameters\": {},\n          \"response\": {\n            \"$ref\": \"VirtualMachine\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"workflows\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.workflows.get\",\n          \"path\": \"workflows/{uuid}\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get a Workflow record by UUID.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Workflow to return.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"parameterOrder\": [\n            \"uuid\"\n          ],\n          \"response\": {\n            \"$ref\": \"Workflow\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"list\": {\n          \"id\": \"arvados.workflows.list\",\n          \"path\": \"workflows\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Retrieve a WorkflowList.\",\n          \"parameters\": {\n            \"filters\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"Filters to limit which objects are returned by their attributes.\\nRefer to the [filters reference][] for more information about how to write filters.\\n\\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\\n\",\n              \"location\": \"query\"\n            },\n            \"where\": {\n              \"type\": \"object\",\n              \"required\": false,\n              \"description\": \"An object to limit which objects are returned by their attributes.\\nThe keys of this object are attribute names.\\nEach value is either a single matching value or an array of matching values for that attribute.\\nThe `filters` parameter is more flexible and preferred.\\n\",\n              \"location\": \"query\"\n            },\n            \"order\": {\n              \"type\": \"array\",\n              \"required\": false,\n              \"description\": \"An array of strings to set the order in which matching objects are returned.\\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\\n\",\n              \"location\": \"query\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return from each matching object.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"distinct\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If this is true, and multiple objects have the same values\\nfor the attributes that you specify in the `select` parameter, then each unique\\nset of values will only be returned once in the result set.\\n\",\n              \"location\": \"query\"\n            },\n            \"limit\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"100\",\n              \"description\": \"The maximum number of objects to return in the result.\\nNote that the API may return fewer results than this if your request hits other\\nlimits set by the administrator.\\n\",\n              \"location\": \"query\"\n            },\n            \"offset\": {\n              \"type\": \"integer\",\n              \"required\": false,\n              \"default\": \"0\",\n              \"description\": \"Return matching objects starting from this index.\\nNote that result indexes may change if objects are modified in between a series\\nof list calls.\\n\",\n              \"location\": \"query\"\n            },\n            \"count\": {\n              \"type\": \"string\",\n              \"required\": false,\n              \"default\": \"exact\",\n              \"description\": \"A string to determine result counting behavior. Supported values are:\\n\\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\\n    counts the number of objects that matched this search criteria,\\n    including ones not included in `items`.\\n\\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\\n    field. This improves performance by returning a result as soon as enough\\n    `items` have been loaded for this result.\\n\\n\",\n              \"location\": \"query\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster to return objects from\",\n              \"location\": \"query\",\n              \"required\": false\n            },\n            \"bypass_federation\": {\n              \"type\": \"boolean\",\n              \"required\": false,\n              \"default\": \"false\",\n              \"description\": \"If true, do not return results from other clusters in the\\nfederation, only the cluster that received the request.\\nYou must be an administrator to use this flag.\\n\",\n              \"location\": \"query\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"WorkflowList\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n        \"create\": {\n          \"id\": \"arvados.workflows.create\",\n          \"path\": \"workflows\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Create a new Workflow.\",\n          \"parameters\": {\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n              \"type\": \"boolean\",\n              \"description\": \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n              \"location\": \"query\",\n              \"required\": false,\n              \"default\": \"false\"\n            },\n            \"cluster_id\": {\n              \"type\": \"string\",\n              \"description\": \"Cluster ID of a federated cluster where this object should be created.\",\n              \"location\": \"query\",\n              \"required\": false\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"workflow\": {\n                \"$ref\": \"Workflow\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Workflow\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"update\": {\n          \"id\": \"arvados.workflows.update\",\n          \"path\": \"workflows/{uuid}\",\n          \"httpMethod\": \"PUT\",\n          \"description\": \"Update attributes of an existing Workflow.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Workflow to update.\",\n              \"required\": true,\n              \"location\": \"path\"\n            },\n            \"select\": {\n              \"type\": \"array\",\n              \"description\": \"An array of names of attributes to return in the response.\",\n              \"required\": false,\n              \"location\": \"query\"\n            }\n          },\n          \"request\": {\n            \"required\": true,\n            \"properties\": {\n              \"workflow\": {\n                \"$ref\": \"Workflow\"\n              }\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Workflow\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        },\n        \"delete\": {\n          \"id\": \"arvados.workflows.delete\",\n          \"path\": \"workflows/{uuid}\",\n          \"httpMethod\": \"DELETE\",\n          \"description\": \"Delete an existing Workflow.\",\n          \"parameters\": {\n            \"uuid\": {\n              \"type\": \"string\",\n              \"description\": \"The UUID of the Workflow to delete.\",\n              \"required\": true,\n              \"location\": \"path\"\n            }\n          },\n          \"response\": {\n            \"$ref\": \"Workflow\"\n          },\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\"\n          ]\n        }\n      }\n    },\n    \"configs\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.configs.get\",\n          \"path\": \"config\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get this cluster's public configuration settings.\",\n          \"parameters\": {},\n          \"parameterOrder\": [],\n          \"response\": {},\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        }\n      }\n    },\n    \"vocabularies\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.vocabularies.get\",\n          \"path\": \"vocabulary\",\n          \"httpMethod\": \"GET\",\n          \"description\": \"Get this cluster's configured vocabulary definition.\\n\\nRefer to [metadata vocabulary documentation][] for details.\\n\\n[metadata vocabulary documentation]: https://doc.arvados.org/admin/metadata-vocabulary.html\\n\\n\",\n          \"parameters\": {},\n          \"parameterOrder\": [],\n          \"response\": {},\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        }\n      }\n    },\n    \"sys\": {\n      \"methods\": {\n        \"get\": {\n          \"id\": \"arvados.sys.trash_sweep\",\n          \"path\": \"sys/trash_sweep\",\n          \"httpMethod\": \"POST\",\n          \"description\": \"Run scheduled data trash and sweep operations across this cluster's Keep services.\",\n          \"parameters\": {},\n          \"parameterOrder\": [],\n          \"response\": {},\n          \"scopes\": [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        }\n      }\n    }\n  },\n  \"revision\": \"20250402\",\n  \"schemas\": {\n    \"ApiClientAuthorizationList\": {\n      \"id\": \"ApiClientAuthorizationList\",\n      \"description\": \"A list of ApiClientAuthorization objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#apiClientAuthorizationList.\",\n          \"default\": \"arvados#apiClientAuthorizationList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching ApiClientAuthorization objects.\",\n          \"items\": {\n            \"$ref\": \"ApiClientAuthorization\"\n          }\n        }\n      }\n    },\n    \"ApiClientAuthorization\": {\n      \"id\": \"ApiClientAuthorization\",\n      \"description\": \"Arvados API client authorization token\\n\\nThis resource represents an API token a user may use to authenticate an\\nArvados API request.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"gj3su\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"api_token\": {\n          \"description\": \"The secret token that can be used to authorize Arvados API requests.\",\n          \"type\": \"string\"\n        },\n        \"created_by_ip_address\": {\n          \"description\": \"The IP address of the client that created this token.\",\n          \"type\": \"string\"\n        },\n        \"last_used_by_ip_address\": {\n          \"description\": \"The IP address of the client that last used this token.\",\n          \"type\": \"string\"\n        },\n        \"last_used_at\": {\n          \"description\": \"The last time this token was used to authorize a request. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"expires_at\": {\n          \"description\": \"The time after which this token is no longer valid for authorization. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this API client authorization was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"scopes\": {\n          \"description\": \"An array of strings identifying HTTP methods and API paths this token is\\nauthorized to use. Refer to the [scopes reference][] for details.\\n\\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\\n\\n\",\n          \"type\": \"Array\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This API client authorization's Arvados UUID, like `zzzzz-gj3su-12345abcde67890`.\"\n        }\n      }\n    },\n    \"AuthorizedKeyList\": {\n      \"id\": \"AuthorizedKeyList\",\n      \"description\": \"A list of AuthorizedKey objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#authorizedKeyList.\",\n          \"default\": \"arvados#authorizedKeyList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching AuthorizedKey objects.\",\n          \"items\": {\n            \"$ref\": \"AuthorizedKey\"\n          }\n        }\n      }\n    },\n    \"AuthorizedKey\": {\n      \"id\": \"AuthorizedKey\",\n      \"description\": \"Arvados authorized public key\\n\\nThis resource represents a public key a user may use to authenticate themselves\\nto services on the cluster. Its primary use today is to store SSH keys for\\nvirtual machines (\\\"shell nodes\\\"). It may be extended to store other keys in\\nthe future.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"fngyi\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This authorized key's Arvados UUID, like `zzzzz-fngyi-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this authorized key.\",\n          \"type\": \"string\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this authorized key.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this authorized key was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"name\": {\n          \"description\": \"The name of this authorized key assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"key_type\": {\n          \"description\": \"A string identifying what type of service uses this key. Supported values are:\\n\\n  * `\\\"SSH\\\"`\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"authorized_user_uuid\": {\n          \"description\": \"The UUID of the Arvados user that is authorized by this key.\",\n          \"type\": \"string\"\n        },\n        \"public_key\": {\n          \"description\": \"The full public key, in the format referenced by `key_type`.\",\n          \"type\": \"text\"\n        },\n        \"expires_at\": {\n          \"description\": \"The time after which this key is no longer valid for authorization. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this authorized key was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        }\n      }\n    },\n    \"CollectionList\": {\n      \"id\": \"CollectionList\",\n      \"description\": \"A list of Collection objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#collectionList.\",\n          \"default\": \"arvados#collectionList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Collection objects.\",\n          \"items\": {\n            \"$ref\": \"Collection\"\n          }\n        }\n      }\n    },\n    \"Collection\": {\n      \"id\": \"Collection\",\n      \"description\": \"Arvados data collection\\n\\nA collection describes how a set of files is stored in data blocks in Keep,\\nalong with associated metadata.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"4zz18\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this collection.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this collection was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this collection.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this collection was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"portable_data_hash\": {\n          \"description\": \"The portable data hash of this collection. This string provides a unique\\nand stable reference to these contents.\",\n          \"type\": \"string\"\n        },\n        \"replication_desired\": {\n          \"description\": \"The number of copies that should be made for data in this collection.\",\n          \"type\": \"integer\"\n        },\n        \"replication_confirmed_at\": {\n          \"description\": \"The last time the cluster confirmed that it met `replication_confirmed`\\nfor this collection. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"replication_confirmed\": {\n          \"description\": \"The number of copies of data in this collection that the cluster has confirmed\\nexist in storage.\",\n          \"type\": \"integer\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This collection's Arvados UUID, like `zzzzz-4zz18-12345abcde67890`.\"\n        },\n        \"manifest_text\": {\n          \"description\": \"The manifest text that describes how files are constructed from data blocks\\nin this collection. Refer to the [manifest format][] reference for details.\\n\\n[manifest format]: https://doc.arvados.org/architecture/manifest-format.html\\n\\n\",\n          \"type\": \"text\"\n        },\n        \"name\": {\n          \"description\": \"The name of this collection assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this collection assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"string\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this collection.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"delete_at\": {\n          \"description\": \"The time this collection will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"trash_at\": {\n          \"description\": \"The time this collection will be trashed. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"is_trashed\": {\n          \"description\": \"A boolean flag to indicate whether or not this collection is trashed.\",\n          \"type\": \"boolean\"\n        },\n        \"storage_classes_desired\": {\n          \"description\": \"An array of strings identifying the storage class(es) that should be used\\nfor data in this collection. Storage classes are configured by the cluster administrator.\",\n          \"type\": \"Array\"\n        },\n        \"storage_classes_confirmed\": {\n          \"description\": \"An array of strings identifying the storage class(es) the cluster has\\nconfirmed have a copy of this collection's data.\",\n          \"type\": \"Array\"\n        },\n        \"storage_classes_confirmed_at\": {\n          \"description\": \"The last time the cluster confirmed that data was stored on the storage\\nclass(es) in `storage_classes_confirmed`. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"current_version_uuid\": {\n          \"description\": \"The UUID of the current version of this collection.\",\n          \"type\": \"string\"\n        },\n        \"version\": {\n          \"description\": \"An integer that counts which version of a collection this record\\nrepresents. Refer to [collection versioning][] for details. This attribute is\\nread-only.\\n\\n[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html\\n\\n\",\n          \"type\": \"integer\"\n        },\n        \"preserve_version\": {\n          \"description\": \"A boolean flag to indicate whether this specific version of this collection\\nshould be persisted in cluster storage.\",\n          \"type\": \"boolean\"\n        },\n        \"file_count\": {\n          \"description\": \"The number of files represented in this collection's `manifest_text`.\\nThis attribute is read-only.\",\n          \"type\": \"integer\"\n        },\n        \"file_size_total\": {\n          \"description\": \"The total size in bytes of files represented in this collection's `manifest_text`.\\nThis attribute is read-only.\",\n          \"type\": \"integer\"\n        }\n      }\n    },\n    \"ComputedPermissionList\": {\n      \"id\": \"ComputedPermissionList\",\n      \"description\": \"A list of ComputedPermission objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#computedPermissionList.\",\n          \"default\": \"arvados#computedPermissionList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching ComputedPermission objects.\",\n          \"items\": {\n            \"$ref\": \"ComputedPermission\"\n          }\n        }\n      }\n    },\n    \"ComputedPermission\": {\n      \"id\": \"ComputedPermission\",\n      \"description\": \"Arvados computed permission\\n\\nComputed permissions do not correspond directly to any Arvados resource, but\\nprovide a simple way to query the entire graph of permissions granted to\\nusers and groups.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"user_uuid\": {\n          \"description\": \"The UUID of the Arvados user who has this permission.\",\n          \"type\": \"string\"\n        },\n        \"target_uuid\": {\n          \"description\": \"The UUID of the Arvados object the user has access to.\",\n          \"type\": \"string\"\n        },\n        \"perm_level\": {\n          \"description\": \"A string representing the user's level of access to the target object.\\nPossible values are:\\n\\n  * `\\\"can_read\\\"`\\n  * `\\\"can_write\\\"`\\n  * `\\\"can_manage\\\"`\\n\\n\",\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"ContainerList\": {\n      \"id\": \"ContainerList\",\n      \"description\": \"A list of Container objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#containerList.\",\n          \"default\": \"arvados#containerList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Container objects.\",\n          \"items\": {\n            \"$ref\": \"Container\"\n          }\n        }\n      }\n    },\n    \"Container\": {\n      \"id\": \"Container\",\n      \"description\": \"Arvados container record\\n\\nA container represents compute work that has been or should be dispatched,\\nalong with its results. A container can satisfy one or more container requests.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"dz642\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This container's Arvados UUID, like `zzzzz-dz642-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this container.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this container was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this container was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this container.\",\n          \"type\": \"string\"\n        },\n        \"state\": {\n          \"description\": \"A string representing the container's current execution status. Possible\\nvalues are:\\n\\n  * `\\\"Queued\\\"` --- This container has not been dispatched yet.\\n  * `\\\"Locked\\\"` --- A dispatcher has claimed this container in preparation to run it.\\n  * `\\\"Running\\\"` --- A dispatcher is running this container.\\n  * `\\\"Cancelled\\\"` --- Container execution has been cancelled by user request.\\n  * `\\\"Complete\\\"` --- A dispatcher ran this container to completion and recorded the results.\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"started_at\": {\n          \"description\": \" The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"finished_at\": {\n          \"description\": \" The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"log\": {\n          \"description\": \"The portable data hash of the Arvados collection that contains this\\ncontainer's logs.\",\n          \"type\": \"string\"\n        },\n        \"environment\": {\n          \"description\": \"A hash of string keys and values that defines the environment variables\\nfor the dispatcher to set when it executes this container.\",\n          \"type\": \"Hash\"\n        },\n        \"cwd\": {\n          \"description\": \"A string that the defines the working directory that the dispatcher should\\nuse when it executes the command inside this container.\",\n          \"type\": \"string\"\n        },\n        \"command\": {\n          \"description\": \"An array of strings that defines the command that the dispatcher should\\nexecute inside this container.\",\n          \"type\": \"Array\"\n        },\n        \"output_path\": {\n          \"description\": \"A string that defines the file or directory path where the command\\nwrites output that should be saved from this container.\",\n          \"type\": \"string\"\n        },\n        \"mounts\": {\n          \"description\": \"A hash where each key names a directory inside this container, and its\\nvalue is an object that defines the mount source for that directory. Refer\\nto the [mount types reference][] for details.\\n\\n[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"runtime_constraints\": {\n          \"description\": \"A hash that identifies compute resources this container requires to run\\nsuccessfully. See the [runtime constraints reference][] for details.\\n\\n[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"output\": {\n          \"description\": \"The portable data hash of the Arvados collection that contains this\\ncontainer's output file(s).\",\n          \"type\": \"string\"\n        },\n        \"container_image\": {\n          \"description\": \"The portable data hash of the Arvados collection that contains the image\\nto use for this container.\",\n          \"type\": \"string\"\n        },\n        \"progress\": {\n          \"description\": \"A float between 0.0 and 1.0 (inclusive) that represents the container's\\nexecution progress. This attribute is not implemented yet.\",\n          \"type\": \"float\"\n        },\n        \"priority\": {\n          \"description\": \"An integer between 0 and 1000 (inclusive) that represents this container's\\nscheduling priority. 0 represents a request to be cancelled. Higher\\nvalues represent higher priority. Refer to the [priority reference][] for details.\\n\\n[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority\\n\\n\",\n          \"type\": \"integer\"\n        },\n        \"exit_code\": {\n          \"description\": \"An integer that records the Unix exit code of the `command` from a\\nfinished container.\",\n          \"type\": \"integer\"\n        },\n        \"auth_uuid\": {\n          \"description\": \"The UUID of the Arvados API client authorization token that a dispatcher\\nshould use to set up this container. This token is automatically created by\\nArvados and this attribute automatically assigned unless a container is\\ncreated with `runtime_token`.\",\n          \"type\": \"string\"\n        },\n        \"locked_by_uuid\": {\n          \"description\": \"The UUID of the Arvados API client authorization token that successfully\\nlocked this container in preparation to execute it.\",\n          \"type\": \"string\"\n        },\n        \"scheduling_parameters\": {\n          \"description\": \"A hash of scheduling parameters that should be passed to the underlying\\ndispatcher when this container is run.\\nSee the [scheduling parameters reference][] for details.\\n\\n[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"runtime_status\": {\n          \"description\": \"A hash with status updates from a running container.\\nRefer to the [runtime status reference][] for details.\\n\\n[runtime status reference]: https://doc.arvados.org/api/methods/containers.html#runtime_status\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"runtime_user_uuid\": {\n          \"description\": \"The UUID of the Arvados user associated with the API client authorization\\ntoken used to run this container.\",\n          \"type\": \"text\"\n        },\n        \"runtime_auth_scopes\": {\n          \"description\": \"The `scopes` from the API client authorization token used to run this container.\",\n          \"type\": \"Array\"\n        },\n        \"lock_count\": {\n          \"description\": \"The number of times this container has been locked by a dispatcher. This\\nmay be greater than 1 if a dispatcher locks a container but then execution is\\ninterrupted for any reason.\",\n          \"type\": \"integer\"\n        },\n        \"gateway_address\": {\n          \"description\": \"A string with the address of the Arvados gateway server, in `HOST:PORT`\\nformat. This is for internal use only.\",\n          \"type\": \"string\"\n        },\n        \"interactive_session_started\": {\n          \"description\": \"This flag is set true if any user starts an interactive shell inside the\\nrunning container.\",\n          \"type\": \"boolean\"\n        },\n        \"output_storage_classes\": {\n          \"description\": \"An array of strings identifying the storage class(es) that should be set\\non the output collection of this container. Storage classes are configured by\\nthe cluster administrator.\",\n          \"type\": \"Array\"\n        },\n        \"output_properties\": {\n          \"description\": \"A hash of arbitrary metadata to set on the output collection of this container.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"cost\": {\n          \"description\": \"A float with the estimated cost of the cloud instance used to run this\\ncontainer. The value is `0` if cost estimation is not available on this cluster.\",\n          \"type\": \"float\"\n        },\n        \"subrequests_cost\": {\n          \"description\": \"A float with the estimated cost of all cloud instances used to run this\\ncontainer and all its subrequests. The value is `0` if cost estimation is not\\navailable on this cluster.\",\n          \"type\": \"float\"\n        },\n        \"output_glob\": {\n          \"description\": \"An array of strings of shell-style glob patterns that define which file(s)\\nand subdirectory(ies) under the `output_path` directory should be recorded in\\nthe container's final output. Refer to the [glob patterns reference][] for details.\\n\\n[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns\\n\\n\",\n          \"type\": \"Array\"\n        },\n        \"service\": {\n          \"description\": \"A boolean flag. If set, it informs the system that this is a long-running container\\nthat functions as a system service or web app, rather than a once-through batch operation.\",\n          \"type\": \"boolean\"\n        },\n        \"published_ports\": {\n          \"description\": \"A hash where keys are numeric TCP ports on the container which expose HTTP services.  Arvados\\nwill proxy HTTP requests to these ports.  Values are hashes with the following keys:\\n\\n  * `\\\"access\\\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\\n  * `\\\"label\\\"` --- A human readable label describing the service, for display in Workbench.\\n  * `\\\"initial_path\\\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.\",\n          \"type\": \"jsonb\"\n        }\n      }\n    },\n    \"ContainerRequestList\": {\n      \"id\": \"ContainerRequestList\",\n      \"description\": \"A list of ContainerRequest objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#containerRequestList.\",\n          \"default\": \"arvados#containerRequestList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching ContainerRequest objects.\",\n          \"items\": {\n            \"$ref\": \"ContainerRequest\"\n          }\n        }\n      }\n    },\n    \"ContainerRequest\": {\n      \"id\": \"ContainerRequest\",\n      \"description\": \"Arvados container request\\n\\nA container request represents a user's request that Arvados do some compute\\nwork, along with full details about what work should be done. Arvados will\\nattempt to fulfill the request by mapping it to a matching container record,\\nrunning the work on demand if necessary.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"xvhdp\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This container request's Arvados UUID, like `zzzzz-xvhdp-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this container request.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this container request was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this container request was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this container request.\",\n          \"type\": \"string\"\n        },\n        \"name\": {\n          \"description\": \"The name of this container request assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this container request assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"text\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this container request.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"state\": {\n          \"description\": \"A string indicating where this container request is in its lifecycle.\\nPossible values are:\\n\\n  * `\\\"Uncommitted\\\"` --- The container request has not been finalized and can still be edited.\\n  * `\\\"Committed\\\"` --- The container request is ready to be fulfilled.\\n  * `\\\"Final\\\"` --- The container request has been fulfilled or cancelled.\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"requesting_container_uuid\": {\n          \"description\": \"The UUID of the container that created this container request, if any.\",\n          \"type\": \"string\"\n        },\n        \"container_uuid\": {\n          \"description\": \"The UUID of the container that fulfills this container request, if any.\",\n          \"type\": \"string\"\n        },\n        \"container_count_max\": {\n          \"description\": \"An integer that defines the maximum number of times Arvados should attempt\\nto dispatch a container to fulfill this container request.\",\n          \"type\": \"integer\"\n        },\n        \"mounts\": {\n          \"description\": \"A hash where each key names a directory inside this container, and its\\nvalue is an object that defines the mount source for that directory. Refer\\nto the [mount types reference][] for details.\\n\\n[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"runtime_constraints\": {\n          \"description\": \"A hash that identifies compute resources this container requires to run\\nsuccessfully. See the [runtime constraints reference][] for details.\\n\\n[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"container_image\": {\n          \"description\": \"The portable data hash of the Arvados collection that contains the image\\nto use for this container.\",\n          \"type\": \"string\"\n        },\n        \"environment\": {\n          \"description\": \"A hash of string keys and values that defines the environment variables\\nfor the dispatcher to set when it executes this container.\",\n          \"type\": \"Hash\"\n        },\n        \"cwd\": {\n          \"description\": \"A string that the defines the working directory that the dispatcher should\\nuse when it executes the command inside this container.\",\n          \"type\": \"string\"\n        },\n        \"command\": {\n          \"description\": \"An array of strings that defines the command that the dispatcher should\\nexecute inside this container.\",\n          \"type\": \"Array\"\n        },\n        \"output_path\": {\n          \"description\": \"A string that defines the file or directory path where the command\\nwrites output that should be saved from this container.\",\n          \"type\": \"string\"\n        },\n        \"priority\": {\n          \"description\": \"An integer between 0 and 1000 (inclusive) that represents this container request's\\nscheduling priority. 0 represents a request to be cancelled. Higher\\nvalues represent higher priority. Refer to the [priority reference][] for details.\\n\\n[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority\\n\\n\",\n          \"type\": \"integer\"\n        },\n        \"expires_at\": {\n          \"description\": \"The time after which this container request will no longer be fulfilled. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"filters\": {\n          \"description\": \"Filters that limit which existing containers are eligible to satisfy this\\ncontainer request. This attribute is not implemented yet and should be null.\",\n          \"type\": \"text\"\n        },\n        \"container_count\": {\n          \"description\": \"An integer that records how many times Arvados has attempted to dispatch\\na container to fulfill this container request.\",\n          \"type\": \"integer\"\n        },\n        \"use_existing\": {\n          \"description\": \"A boolean flag. If set, Arvados may choose to satisfy this container\\nrequest with an eligible container that already exists. Otherwise, Arvados will\\nsatisfy this container request with a newer container, which will usually result\\nin the container running again.\",\n          \"type\": \"boolean\"\n        },\n        \"scheduling_parameters\": {\n          \"description\": \"A hash of scheduling parameters that should be passed to the underlying\\ndispatcher when this container is run.\\nSee the [scheduling parameters reference][] for details.\\n\\n[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"output_uuid\": {\n          \"description\": \"The UUID of the Arvados collection that contains output for all the\\ncontainer(s) that were dispatched to fulfill this container request.\",\n          \"type\": \"string\"\n        },\n        \"log_uuid\": {\n          \"description\": \"The UUID of the Arvados collection that contains logs for all the\\ncontainer(s) that were dispatched to fulfill this container request.\",\n          \"type\": \"string\"\n        },\n        \"output_name\": {\n          \"description\": \"The name to set on the output collection of this container request.\",\n          \"type\": \"string\"\n        },\n        \"output_ttl\": {\n          \"description\": \"An integer in seconds. If greater than zero, when an output collection is\\ncreated for this container request, its `expires_at` attribute will be set this\\nfar in the future.\",\n          \"type\": \"integer\"\n        },\n        \"output_storage_classes\": {\n          \"description\": \"An array of strings identifying the storage class(es) that should be set\\non the output collection of this container request. Storage classes are configured by\\nthe cluster administrator.\",\n          \"type\": \"Array\"\n        },\n        \"output_properties\": {\n          \"description\": \"A hash of arbitrary metadata to set on the output collection of this container request.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"cumulative_cost\": {\n          \"description\": \"A float with the estimated cost of all cloud instances used to run\\ncontainer(s) to fulfill this container request and their subrequests.\\nThe value is `0` if cost estimation is not available on this cluster.\",\n          \"type\": \"float\"\n        },\n        \"output_glob\": {\n          \"description\": \"An array of strings of shell-style glob patterns that define which file(s)\\nand subdirectory(ies) under the `output_path` directory should be recorded in\\nthe container's final output. Refer to the [glob patterns reference][] for details.\\n\\n[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns\\n\\n\",\n          \"type\": \"Array\"\n        },\n        \"service\": {\n          \"description\": \"A boolean flag. If set, it informs the system that this request is for a long-running container\\nthat functions as a system service or web app, rather than a once-through batch operation.\",\n          \"type\": \"boolean\"\n        },\n        \"published_ports\": {\n          \"description\": \"A hash where keys are numeric TCP ports on the container which expose HTTP services.  Arvados\\nwill proxy HTTP requests to these ports.  Values are hashes with the following keys:\\n\\n  * `\\\"access\\\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\\n  * `\\\"label\\\"` --- A human readable label describing the service, for display in Workbench.\\n  * `\\\"initial_path\\\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.\",\n          \"type\": \"Hash\"\n        }\n      }\n    },\n    \"CredentialList\": {\n      \"id\": \"CredentialList\",\n      \"description\": \"A list of Credential objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#credentialList.\",\n          \"default\": \"arvados#credentialList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Credential objects.\",\n          \"items\": {\n            \"$ref\": \"Credential\"\n          }\n        }\n      }\n    },\n    \"Credential\": {\n      \"id\": \"Credential\",\n      \"description\": \"Arvados credential.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"oss07\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This credential's Arvados UUID, like `zzzzz-oss07-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this credential.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this credential was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this credential was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this credential.\",\n          \"type\": \"string\"\n        },\n        \"name\": {\n          \"description\": \"The name of this credential assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this credential assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"text\"\n        },\n        \"credential_class\": {\n          \"description\": \"The type of credential being stored.\",\n          \"type\": \"string\"\n        },\n        \"scopes\": {\n          \"description\": \"The resources the credential applies to or should be used with.\",\n          \"type\": \"Array\"\n        },\n        \"external_id\": {\n          \"description\": \"The non-secret external identifier associated with a credential, e.g. a username.\",\n          \"type\": \"string\"\n        },\n        \"expires_at\": {\n          \"description\": \"Date after which the credential_secret field is no longer valid. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        }\n      }\n    },\n    \"GroupList\": {\n      \"id\": \"GroupList\",\n      \"description\": \"A list of Group objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#groupList.\",\n          \"default\": \"arvados#groupList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Group objects.\",\n          \"items\": {\n            \"$ref\": \"Group\"\n          }\n        }\n      }\n    },\n    \"Group\": {\n      \"id\": \"Group\",\n      \"description\": \"Arvados group\\n\\nGroups provide a way to organize users or data together, depending on their\\n`group_class`.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"j7d0g\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This group's Arvados UUID, like `zzzzz-j7d0g-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this group.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this group was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this group.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this group was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"name\": {\n          \"description\": \"The name of this group assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this group assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"string\"\n        },\n        \"group_class\": {\n          \"description\": \"A string representing which type of group this is. One of:\\n\\n  * `\\\"filter\\\"` --- A virtual project whose contents are selected dynamically by filters.\\n  * `\\\"project\\\"` --- An Arvados project that can contain collections,\\n    container records, workflows, and subprojects.\\n  * `\\\"role\\\"` --- A group of users that can be granted permissions in Arvados.\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"trash_at\": {\n          \"description\": \"The time this group will be trashed. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"is_trashed\": {\n          \"description\": \"A boolean flag to indicate whether or not this group is trashed.\",\n          \"type\": \"boolean\"\n        },\n        \"delete_at\": {\n          \"description\": \"The time this group will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this group.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"frozen_by_uuid\": {\n          \"description\": \"The UUID of the user that has frozen this group, if any. Frozen projects\\ncannot have their contents or metadata changed, even by admins.\",\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"KeepServiceList\": {\n      \"id\": \"KeepServiceList\",\n      \"description\": \"A list of KeepService objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#keepServiceList.\",\n          \"default\": \"arvados#keepServiceList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching KeepService objects.\",\n          \"items\": {\n            \"$ref\": \"KeepService\"\n          }\n        }\n      }\n    },\n    \"KeepService\": {\n      \"id\": \"KeepService\",\n      \"description\": \"Arvados Keep service\\n\\nThis resource stores information about a single Keep service in this Arvados\\ncluster that clients can contact to retrieve and store data.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"bi6l4\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This Keep service's Arvados UUID, like `zzzzz-bi6l4-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this Keep service.\",\n          \"type\": \"string\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this Keep service.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this Keep service was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"service_host\": {\n          \"description\": \"The DNS hostname of this Keep service.\",\n          \"type\": \"string\"\n        },\n        \"service_port\": {\n          \"description\": \"The TCP port where this Keep service listens.\",\n          \"type\": \"integer\"\n        },\n        \"service_ssl_flag\": {\n          \"description\": \"A boolean flag that indicates whether or not this Keep service uses TLS/SSL.\",\n          \"type\": \"boolean\"\n        },\n        \"service_type\": {\n          \"description\": \"A string that describes which type of Keep service this is. One of:\\n\\n  * `\\\"disk\\\"` --- A service that stores blocks on a local filesystem.\\n  * `\\\"blob\\\"` --- A service that stores blocks in a cloud object store.\\n  * `\\\"proxy\\\"` --- A keepproxy service.\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this Keep service was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"read_only\": {\n          \"description\": \"A boolean flag. If set, this Keep service does not accept requests to write data\\nblocks; it only serves blocks it already has.\",\n          \"type\": \"boolean\"\n        }\n      }\n    },\n    \"LinkList\": {\n      \"id\": \"LinkList\",\n      \"description\": \"A list of Link objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#linkList.\",\n          \"default\": \"arvados#linkList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Link objects.\",\n          \"items\": {\n            \"$ref\": \"Link\"\n          }\n        }\n      }\n    },\n    \"Link\": {\n      \"id\": \"Link\",\n      \"description\": \"Arvados object link\\n\\nA link provides a way to define relationships between Arvados objects,\\ndepending on their `link_class`.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"o0j2j\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This link's Arvados UUID, like `zzzzz-o0j2j-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this link.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this link was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this link.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this link was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"tail_uuid\": {\n          \"description\": \"The UUID of the Arvados object that is the target of this relationship.\",\n          \"type\": \"string\"\n        },\n        \"link_class\": {\n          \"description\": \"A string that defines which kind of link this is. One of:\\n\\n  * `\\\"permission\\\"` --- This link grants a permission to the user or group\\n    referenced by `head_uuid` to the object referenced by `tail_uuid`. The\\n    access level is set by `name`.\\n  * `\\\"star\\\"` --- This link represents a \\\"favorite.\\\" The user referenced\\n    by `head_uuid` wants quick access to the object referenced by `tail_uuid`.\\n  * `\\\"tag\\\"` --- This link represents an unstructured metadata tag. The object\\n    referenced by `tail_uuid` has the tag defined by `name`.\\n\\n\",\n          \"type\": \"string\"\n        },\n        \"name\": {\n          \"description\": \"The primary value of this link. For `\\\"permission\\\"` links, this is one of\\n`\\\"can_read\\\"`, `\\\"can_write\\\"`, or `\\\"can_manage\\\"`.\",\n          \"type\": \"string\"\n        },\n        \"head_uuid\": {\n          \"description\": \"The UUID of the Arvados object that is the originator or actor in this\\nrelationship. May be null.\",\n          \"type\": \"string\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this link.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        }\n      }\n    },\n    \"LogList\": {\n      \"id\": \"LogList\",\n      \"description\": \"A list of Log objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#logList.\",\n          \"default\": \"arvados#logList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Log objects.\",\n          \"items\": {\n            \"$ref\": \"Log\"\n          }\n        }\n      }\n    },\n    \"Log\": {\n      \"id\": \"Log\",\n      \"description\": \"Arvados log record\\n\\nThis resource represents a single log record about an event in this Arvados\\ncluster. Some individual Arvados services create log records. Users can also\\ncreate custom logs.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"57u5n\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"id\": {\n          \"description\": \"The serial number of this log. You can use this in filters to query logs\\nthat were created before/after another.\",\n          \"type\": \"integer\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This log's Arvados UUID, like `zzzzz-57u5n-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this log.\",\n          \"type\": \"string\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this log.\",\n          \"type\": \"string\"\n        },\n        \"object_uuid\": {\n          \"description\": \"The UUID of the Arvados object that this log pertains to, such as a user\\nor container.\",\n          \"type\": \"string\"\n        },\n        \"event_at\": {\n          \"description\": \" The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"event_type\": {\n          \"description\": \"An arbitrary short string that classifies what type of log this is.\",\n          \"type\": \"string\"\n        },\n        \"summary\": {\n          \"description\": \"A text string that describes the logged event. This is the primary\\nattribute for simple logs.\",\n          \"type\": \"text\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this log.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this log was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this log was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"object_owner_uuid\": {\n          \"description\": \"The `owner_uuid` of the object referenced by `object_uuid` at the time\\nthis log was created.\",\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"UserList\": {\n      \"id\": \"UserList\",\n      \"description\": \"A list of User objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#userList.\",\n          \"default\": \"arvados#userList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching User objects.\",\n          \"items\": {\n            \"$ref\": \"User\"\n          }\n        }\n      }\n    },\n    \"User\": {\n      \"id\": \"User\",\n      \"description\": \"Arvados user\\n\\nA user represents a single individual or role who may be authorized to access\\nthis Arvados cluster.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"tpzed\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This user's Arvados UUID, like `zzzzz-tpzed-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this user.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this user was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this user.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this user was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"email\": {\n          \"description\": \"This user's email address.\",\n          \"type\": \"string\"\n        },\n        \"first_name\": {\n          \"description\": \"This user's first name.\",\n          \"type\": \"string\"\n        },\n        \"last_name\": {\n          \"description\": \"This user's last name.\",\n          \"type\": \"string\"\n        },\n        \"identity_url\": {\n          \"description\": \"A URL that represents this user with the cluster's identity provider.\",\n          \"type\": \"string\"\n        },\n        \"is_admin\": {\n          \"description\": \"A boolean flag. If set, this user is an administrator of the Arvados\\ncluster, and automatically passes most permissions checks.\",\n          \"type\": \"boolean\"\n        },\n        \"prefs\": {\n          \"description\": \"A hash that stores cluster-wide user preferences.\",\n          \"type\": \"Hash\"\n        },\n        \"is_active\": {\n          \"description\": \"A boolean flag. If unset, this user is not permitted to make any Arvados\\nAPI requests.\",\n          \"type\": \"boolean\"\n        },\n        \"username\": {\n          \"description\": \"This user's Unix username on virtual machines.\",\n          \"type\": \"string\"\n        }\n      }\n    },\n    \"UserAgreementList\": {\n      \"id\": \"UserAgreementList\",\n      \"description\": \"A list of UserAgreement objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#userAgreementList.\",\n          \"default\": \"arvados#userAgreementList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching UserAgreement objects.\",\n          \"items\": {\n            \"$ref\": \"UserAgreement\"\n          }\n        }\n      }\n    },\n    \"UserAgreement\": {\n      \"id\": \"UserAgreement\",\n      \"description\": \"Arvados user agreement\\n\\nA user agreement is a collection with terms that users must agree to before\\nthey can use this Arvados cluster.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"gv0sa\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this user agreement.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this user agreement was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this user agreement.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this user agreement was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"portable_data_hash\": {\n          \"description\": \"The portable data hash of this user agreement. This string provides a unique\\nand stable reference to these contents.\",\n          \"type\": \"string\"\n        },\n        \"replication_desired\": {\n          \"description\": \"The number of copies that should be made for data in this user agreement.\",\n          \"type\": \"integer\"\n        },\n        \"replication_confirmed_at\": {\n          \"description\": \"The last time the cluster confirmed that it met `replication_confirmed`\\nfor this user agreement. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"replication_confirmed\": {\n          \"description\": \"The number of copies of data in this user agreement that the cluster has confirmed\\nexist in storage.\",\n          \"type\": \"integer\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This user agreement's Arvados UUID, like `zzzzz-gv0sa-12345abcde67890`.\"\n        },\n        \"manifest_text\": {\n          \"description\": \"The manifest text that describes how files are constructed from data blocks\\nin this user agreement. Refer to the [manifest format][] reference for details.\\n\\n[manifest format]: https://doc.arvados.org/architecture/manifest-format.html\\n\\n\",\n          \"type\": \"text\"\n        },\n        \"name\": {\n          \"description\": \"The name of this user agreement assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this user agreement assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"string\"\n        },\n        \"properties\": {\n          \"description\": \"A hash of arbitrary metadata for this user agreement.\\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\\nRefer to the [metadata properties reference][] for details.\\n\\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\\n\\n\",\n          \"type\": \"Hash\"\n        },\n        \"delete_at\": {\n          \"description\": \"The time this user agreement will be permanently deleted. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"trash_at\": {\n          \"description\": \"The time this user agreement will be trashed. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"is_trashed\": {\n          \"description\": \"A boolean flag to indicate whether or not this user agreement is trashed.\",\n          \"type\": \"boolean\"\n        },\n        \"storage_classes_desired\": {\n          \"description\": \"An array of strings identifying the storage class(es) that should be used\\nfor data in this user agreement. Storage classes are configured by the cluster administrator.\",\n          \"type\": \"Array\"\n        },\n        \"storage_classes_confirmed\": {\n          \"description\": \"An array of strings identifying the storage class(es) the cluster has\\nconfirmed have a copy of this user agreement's data.\",\n          \"type\": \"Array\"\n        },\n        \"storage_classes_confirmed_at\": {\n          \"description\": \"The last time the cluster confirmed that data was stored on the storage\\nclass(es) in `storage_classes_confirmed`. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"current_version_uuid\": {\n          \"description\": \"The UUID of the current version of this user agreement.\",\n          \"type\": \"string\"\n        },\n        \"version\": {\n          \"description\": \"An integer that counts which version of a user agreement this record\\nrepresents. Refer to [collection versioning][] for details. This attribute is\\nread-only.\\n\\n[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html\\n\\n\",\n          \"type\": \"integer\"\n        },\n        \"preserve_version\": {\n          \"description\": \"A boolean flag to indicate whether this specific version of this user agreement\\nshould be persisted in cluster storage.\",\n          \"type\": \"boolean\"\n        },\n        \"file_count\": {\n          \"description\": \"The number of files represented in this user agreement's `manifest_text`.\\nThis attribute is read-only.\",\n          \"type\": \"integer\"\n        },\n        \"file_size_total\": {\n          \"description\": \"The total size in bytes of files represented in this user agreement's `manifest_text`.\\nThis attribute is read-only.\",\n          \"type\": \"integer\"\n        }\n      }\n    },\n    \"VirtualMachineList\": {\n      \"id\": \"VirtualMachineList\",\n      \"description\": \"A list of VirtualMachine objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#virtualMachineList.\",\n          \"default\": \"arvados#virtualMachineList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching VirtualMachine objects.\",\n          \"items\": {\n            \"$ref\": \"VirtualMachine\"\n          }\n        }\n      }\n    },\n    \"VirtualMachine\": {\n      \"id\": \"VirtualMachine\",\n      \"description\": \"Arvados virtual machine (\\\"shell node\\\")\\n\\nThis resource stores information about a virtual machine or \\\"shell node\\\"\\nhosted on this Arvados cluster where users can log in and use preconfigured\\nArvados client tools.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"2x53u\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This virtual machine's Arvados UUID, like `zzzzz-2x53u-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this virtual machine.\",\n          \"type\": \"string\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this virtual machine.\",\n          \"type\": \"string\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this virtual machine was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"hostname\": {\n          \"description\": \"The DNS hostname where users should access this virtual machine.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this virtual machine was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        }\n      }\n    },\n    \"WorkflowList\": {\n      \"id\": \"WorkflowList\",\n      \"description\": \"A list of Workflow objects.\",\n      \"type\": \"object\",\n      \"properties\": {\n        \"kind\": {\n          \"type\": \"string\",\n          \"description\": \"Object type. Always arvados#workflowList.\",\n          \"default\": \"arvados#workflowList\"\n        },\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"List cache version.\"\n        },\n        \"items\": {\n          \"type\": \"array\",\n          \"description\": \"An array of matching Workflow objects.\",\n          \"items\": {\n            \"$ref\": \"Workflow\"\n          }\n        }\n      }\n    },\n    \"Workflow\": {\n      \"id\": \"Workflow\",\n      \"description\": \"Arvados workflow\\n\\nA workflow contains workflow definition source code that Arvados can execute\\nalong with associated metadata for users.\",\n      \"type\": \"object\",\n      \"uuidPrefix\": \"7fd4e\",\n      \"properties\": {\n        \"etag\": {\n          \"type\": \"string\",\n          \"description\": \"Object cache version.\"\n        },\n        \"uuid\": {\n          \"type\": \"string\",\n          \"description\": \"This workflow's Arvados UUID, like `zzzzz-7fd4e-12345abcde67890`.\"\n        },\n        \"owner_uuid\": {\n          \"description\": \"The UUID of the user or group that owns this workflow.\",\n          \"type\": \"string\"\n        },\n        \"created_at\": {\n          \"description\": \"The time this workflow was created. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_at\": {\n          \"description\": \"The time this workflow was last updated. The string encodes a UTC date and time in ISO 8601 format.\",\n          \"type\": \"datetime\"\n        },\n        \"modified_by_user_uuid\": {\n          \"description\": \"The UUID of the user that last updated this workflow.\",\n          \"type\": \"string\"\n        },\n        \"name\": {\n          \"description\": \"The name of this workflow assigned by a user.\",\n          \"type\": \"string\"\n        },\n        \"description\": {\n          \"description\": \"A longer HTML description of this workflow assigned by a user.\\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n          \"type\": \"text\"\n        },\n        \"definition\": {\n          \"description\": \"A string with the CWL source of this workflow.\",\n          \"type\": \"text\"\n        },\n        \"collection_uuid\": {\n          \"description\": \"The collection this workflow is linked to, containing the definition of the workflow.\",\n          \"type\": \"string\"\n        }\n      }\n    }\n  },\n  \"servicePath\": \"arvados/v1/\",\n  \"title\": \"Arvados API\",\n  \"version\": \"v1\"\n}"
  },
  {
    "path": "sdk/python/arvados_version.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport dataclasses\nimport os\nimport re\nimport runpy\nimport subprocess\nimport typing as t\n\nfrom pathlib import Path, PurePath, PurePosixPath\n\nimport setuptools\nimport setuptools.command.build\n\nSETUP_DIR = Path(__file__).absolute().parent\nVERSION_SCRIPT_PATH = PurePath('build', 'version-at-commit.sh')\n# Built by ArvadosPythonPackage.register\nARVADOS_PYTHON_MODULES: dict[str, 'ArvadosPythonPackage'] = {}\n\n### Metadata generation\n\n@dataclasses.dataclass\nclass ArvadosPythonPackage:\n    package_name: str\n    module_name: str\n    src_path: PurePath\n    dependencies: t.Sequence['ArvadosPythonPackage']\n\n    _VERSION_SUBS = {\n        'development-': '',\n        '~dev': '.dev',\n        '~rc': 'rc',\n    }\n\n    @classmethod\n    def register(\n            cls,\n            package_name: str,\n            module_name: str,\n            src_path: PurePath | str,\n            *dependencies: str,\n    ) -> 'ArvadosPythonPackage':\n        if not isinstance(src_path, PurePath):\n            src_path = PurePosixPath(src_path)\n        deps = [ARVADOS_PYTHON_MODULES[key] for key in dependencies]\n        this_pkg = cls(package_name, module_name, src_path, deps)\n        ARVADOS_PYTHON_MODULES[package_name] = this_pkg\n        return this_pkg\n\n    def version_file_path(self):\n        return PurePath(self.module_name, '_version.py')\n\n    def _workspace_path(self, workdir):\n        try:\n            workspace = Path(os.environ['WORKSPACE'])\n            # This will raise ValueError if they're not related,\n            # in which case we don't want to use this $WORKSPACE.\n            workdir.relative_to(workspace)\n        except KeyError:\n            # $WORKSPACE isn't set. Fall back to the Git worktree toplevel.\n            try:\n                git_proc = subprocess.run(\n                    ['git', 'rev-parse', '--show-toplevel'],\n                    capture_output=True,\n                    check=True,\n                    cwd=workdir,\n                    text=True,\n                )\n                workspace = Path(git_proc.stdout.removesuffix('\\n'))\n            except (subprocess.CalledProcessError, FileNotFoundError, ValueError):\n                return None\n        except ValueError:\n            return None\n        if (workspace / VERSION_SCRIPT_PATH).exists():\n            return workspace\n        else:\n            return None\n\n    def _git_version(self, workdir):\n        workspace = self._workspace_path(workdir)\n        if workspace is None:\n            return None\n        git_log_cmd = [\n            'git', 'log', '-n1', '--format=%H', '--',\n            str(VERSION_SCRIPT_PATH), str(self.src_path),\n        ]\n        git_log_cmd.extend(str(dep.src_path) for dep in self.dependencies)\n        git_log_proc = subprocess.run(\n            git_log_cmd,\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        version_proc = subprocess.run(\n            [str(VERSION_SCRIPT_PATH), git_log_proc.stdout.rstrip('\\n')],\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        return version_proc.stdout.rstrip('\\n')\n\n    def _sdist_version(self, workdir):\n        try:\n            pkg_info = (workdir / 'PKG-INFO').open()\n        except FileNotFoundError:\n            return None\n        with pkg_info:\n            for line in pkg_info:\n                key, _, val = line.partition(': ')\n                if key == 'Version':\n                    return val.rstrip('\\n')\n        raise Exception(\"found PKG-INFO file but not Version metadata in it\")\n\n    def get_version(self, workdir=SETUP_DIR):\n        version = (\n            # If we're building out of a distribution, we should pass that\n            # version through unchanged.\n            self._sdist_version(workdir)\n            # Otherwise follow the usual Arvados versioning rules.\n            or os.environ.get('ARVADOS_BUILDING_VERSION')\n            or self._git_version(workdir)\n        )\n        if not version:\n            raise Exception(f\"no version information available for {self.package_name}\")\n        else:\n            return re.sub(\n                r'(^development-|~dev|~rc)',\n                lambda match: self._VERSION_SUBS[match.group(0)],\n                version,\n            )\n\n    def get_dependencies_version(self, workdir=SETUP_DIR, version=None):\n        if version is None:\n            version = self.get_version(workdir)\n        # A packaged development release should be installed with other\n        # development packages built from the same source, but those\n        # dependencies may have earlier \"dev\" versions (read: less recent\n        # Git commit timestamps). This compatible version dependency\n        # expresses that as closely as possible. Allowing versions\n        # compatible with .dev0 allows any development release.\n        # Regular expression borrowed partially from\n        # <https://packaging.python.org/en/latest/specifications/version-specifiers/#version-specifiers-regex>\n        dep_ver, match_count = re.subn(r'\\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)\n        return ('~=' if match_count else '==', dep_ver)\n\n    def iter_dependencies(self, workdir=SETUP_DIR, version=None, extras=None):\n        if extras is None:\n            extras = {}\n        dep_op, dep_ver = self.get_dependencies_version(workdir, version)\n        for dep in self.dependencies:\n            try:\n                dep_extras = f'[{\",\".join(extras[dep.package_name])}]'\n            except KeyError:\n                dep_extras = ''\n            yield f'{dep.package_name}{dep_extras} {dep_op} {dep_ver}'\n\n\n### Package database\n\nArvadosPythonPackage.register(\n    'arvados-python-client',\n    'arvados',\n    'sdk/python',\n),\nArvadosPythonPackage.register(\n    'crunchstat_summary',\n    'crunchstat_summary',\n    'tools/crunchstat-summary',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cluster-activity',\n    'arvados_cluster_activity',\n    'tools/cluster-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cwl-runner',\n    'arvados_cwl',\n    'sdk/cwl',\n    'arvados-python-client',\n    'crunchstat_summary',\n)\nArvadosPythonPackage.register(\n    'arvados_fuse',\n    'arvados_fuse',\n    'services/fuse',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-user-activity',\n    'arvados_user_activity',\n    'tools/user-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-tools',\n    'NO SRCDIR',\n    'tools/python-metapackage',\n    *ARVADOS_PYTHON_MODULES,\n)\nArvadosPythonPackage.register(\n    'arvados-docker-cleaner',\n    'arvados_docker',\n    'services/dockercleaner',\n)\n\n### setuptools integration\n\nclass BuildArvadosVersion(setuptools.Command):\n    \"\"\"Write _version.py for an Arvados module\"\"\"\n    def initialize_options(self):\n        self.build_lib = None\n\n    def finalize_options(self):\n        self.set_undefined_options(\"build_py\", (\"build_lib\", \"build_lib\"))\n        arv_mod = ARVADOS_PYTHON_MODULES[self.distribution.get_name()]\n        self.out_path = Path(self.build_lib, arv_mod.version_file_path())\n\n    def run(self):\n        with self.out_path.open('w') as out_file:\n            print(f'__version__ = {self.distribution.get_version()!r}', file=out_file)\n\n    def get_outputs(self):\n        return [str(self.out_path)]\n\n    def get_source_files(self):\n        return []\n\n    def get_output_mapping(self):\n        return {}\n\n\nclass ArvadosBuildCommand(setuptools.command.build.build):\n    sub_commands = [\n        *setuptools.command.build.build.sub_commands,\n        ('build_arvados_version', None),\n    ]\n\n\nCMDCLASS = {\n    'build': ArvadosBuildCommand,\n    'build_arvados_version': BuildArvadosVersion,\n}\n"
  },
  {
    "path": "sdk/python/bin/arv-copy",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom arvados.commands.arv_copy import main\nmain()\n"
  },
  {
    "path": "sdk/python/bin/arv-get",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport sys\n\nfrom arvados.commands.get import main\n\nsys.exit(main(sys.argv[1:]))\n"
  },
  {
    "path": "sdk/python/bin/arv-keepdocker",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom arvados.commands.keepdocker import main\nmain()\n"
  },
  {
    "path": "sdk/python/bin/arv-ls",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport sys\n\nfrom arvados.commands.ls import main\n\nsys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))\n"
  },
  {
    "path": "sdk/python/bin/arv-normalize",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport hashlib\nimport os\nimport re\nimport string\nimport sys\n\nimport arvados\nfrom arvados._version import __version__\n\nparser = argparse.ArgumentParser(\n    description='Read manifest on standard input and put normalized manifest on standard output.')\n\nparser.add_argument('--extract', type=str,\n                    help=\"The file to extract from the input manifest\")\nparser.add_argument('--strip', action='store_true',\n                    help=\"Strip authorization tokens\")\nparser.add_argument('--version', action='version',\n                    version=\"%s %s\" % (sys.argv[0], __version__),\n                    help='Print version and exit.')\n\nargs = parser.parse_args()\n\nr = sys.stdin.read()\n\ncr = arvados.CollectionReader(r)\n\nif args.extract:\n    i = args.extract.rfind('/')\n    if i == -1:\n        stream = '.'\n        fn = args.extract\n    else:\n        stream = args.extract[:i]\n        fn = args.extract[(i+1):]\n    for s in cr.all_streams():\n        if s.name() == stream:\n            if fn in s.files():\n                sys.stdout.write(s.files()[fn].as_manifest())\nelse:\n    sys.stdout.write(cr.manifest_text(strip=args.strip, normalize=True))\n"
  },
  {
    "path": "sdk/python/bin/arv-put",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom arvados.commands.put import main\nmain()\n"
  },
  {
    "path": "sdk/python/bin/arv-ws",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom arvados.commands.ws import main\nmain()\n"
  },
  {
    "path": "sdk/python/discovery2pydoc.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"discovery2pydoc - Build skeleton Python from the Arvados discovery document\n\nThis tool reads the Arvados discovery document and writes a Python source file\nwith classes and methods that correspond to the resources that\ngoogle-api-python-client builds dynamically. This source does not include any\nimplementation, but it does include real method signatures and documentation\nstrings, so it's useful as documentation for tools that read Python source,\nincluding pydoc and pdoc.\n\nIf you run this tool with the path to a discovery document, it uses no\ndependencies outside the Python standard library. If it needs to read\nconfiguration to find the discovery document dynamically, it'll load the\n`arvados` module to do that.\n\"\"\"\n\nimport argparse\nimport inspect\nimport json\nimport keyword\nimport operator\nimport os\nimport pathlib\nimport re\nimport sys\nimport urllib.parse\nimport urllib.request\n\nfrom typing import (\n    Any,\n    Callable,\n    Iterator,\n    Mapping,\n    Optional,\n    Sequence,\n)\n\nRESOURCE_SCHEMA_MAP = {\n    # Special cases for iter_resource_schemas that can't be generated\n    # automatically. Note these schemas may not actually be defined.\n    'sys': 'Sys',\n    'vocabularies': 'Vocabulary',\n}\n\ndef iter_resource_schemas(name: str) -> Iterator[str]:\n    try:\n        schema_name = RESOURCE_SCHEMA_MAP[name]\n    except KeyError:\n        # Remove trailing 's'\n        schema_name = name[:-1]\n    schema_name = re.sub(\n        r'(^|_)(\\w)',\n        lambda match: match.group(2).capitalize(),\n        schema_name,\n    )\n    yield schema_name\n    yield f'{schema_name}List'\n\nLOWERCASE = operator.methodcaller('lower')\nNAME_KEY = operator.attrgetter('name')\nSTDSTREAM_PATH = pathlib.Path('-')\nTITLECASE = operator.methodcaller('title')\n\n_ALIASED_METHODS = frozenset([\n    'destroy',\n    'index',\n    'show',\n])\n_DEPRECATED_NOTICE = '''\n\n.. WARNING:: Deprecated\n   This resource is deprecated in the Arvados API.\n'''\n# _DEPRECATED_RESOURCES contains string keys of resources in the discovery\n# document that are currently deprecated.\n_DEPRECATED_RESOURCES = frozenset()\n_DEPRECATED_SCHEMAS = frozenset(\n    schema_name\n    for resource_name in _DEPRECATED_RESOURCES\n    for schema_name in iter_resource_schemas(resource_name)\n)\n\n_LIST_UTIL_METHODS = {\n    'ComputedPermissionList': 'arvados.util.iter_computed_permissions',\n    'ComputedPermissions': 'arvados.util.iter_computed_permissions',\n}\n_LIST_METHOD_PYDOC = '''\nThis method returns a single page of `{cls_name}` objects that match your search\ncriteria. If you just want to iterate all objects that match your search\ncriteria, consider using `{list_util_func}`.\n'''\n_LIST_SCHEMA_PYDOC = '''\n\nThis is the dictionary object returned when you call `{cls_name}s.list`.\nIf you just want to iterate all objects that match your search criteria,\nconsider using `{list_util_func}`.\nIf you work with this raw object, the keys of the dictionary are documented\nbelow, along with their types. The `items` key maps to a list of matching\n`{cls_name}` objects.\n'''\n_MODULE_PYDOC = '''Arvados API client reference documentation\n\nThis module provides reference documentation for the interface of the\nArvados API client, including method signatures and type information for\nreturned objects. However, the functions in `arvados.api` will return\ndifferent classes at runtime that are generated dynamically from the Arvados\nAPI discovery document. The classes in this module do not have any\nimplementation, and you should not instantiate them in your code.\n\nIf you're just starting out, `ArvadosAPIClient` documents the methods\navailable from the client object. From there, you can follow the trail into\nresource methods, request objects, and finally the data dictionaries returned\nby the API server.\n'''\n_SCHEMA_PYDOC = '''\n\nThis is the dictionary object that represents a single {cls_name} in Arvados\nand is returned by most `{cls_name}s` methods.\nThe keys of the dictionary are documented below, along with their types.\nNot every key may appear in every dictionary returned by an API call.\nWhen a method doesn't return all the data, you can use its `select` parameter\nto list the specific keys you need. Refer to the API documentation for details.\n'''\n\n_MODULE_PRELUDE = '''\nimport googleapiclient.discovery\nimport googleapiclient.http\nimport httplib2\nimport sys\nfrom typing import Any, Dict, Generic, List, Literal, Optional, TypedDict, TypeVar\n\n# ST represents an API response type\nST = TypeVar('ST', bound=TypedDict)\n'''\n_REQUEST_CLASS = '''\nclass ArvadosAPIRequest(googleapiclient.http.HttpRequest, Generic[ST]):\n    \"\"\"Generic API request object\n\n    When you call an API method in the Arvados Python SDK, it returns a\n    request object. You usually call `execute()` on this object to submit the\n    request to your Arvados API server and retrieve the response. `execute()`\n    will return the type of object annotated in the subscript of\n    `ArvadosAPIRequest`.\n    \"\"\"\n\n    def execute(self, http: Optional[httplib2.Http]=None, num_retries: int=0) -> ST:\n        \"\"\"Execute this request and return the response\n\n        Arguments:\n\n        * http: httplib2.Http | None --- The HTTP client object to use to\n          execute the request. If not specified, uses the HTTP client object\n          created with the API client object.\n\n        * num_retries: int --- The maximum number of times to retry this\n          request if the server returns a retryable failure. The API client\n          object also has a maximum number of retries specified when it is\n          instantiated (see `arvados.api.api_client`). This request is run\n          with the larger of that number and this argument. Default 0.\n        \"\"\"\n\n'''\n\n# Annotation represents a valid Python type annotation. Future development\n# could expand this to include other valid types like `type`.\nAnnotation = str\n_TYPE_MAP: Mapping[str, Annotation] = {\n    # Map the API's JavaScript-based type names to Python annotations.\n    # Some of these may disappear after Arvados issue #19795 is fixed.\n    'Array': 'List',\n    'array': 'List',\n    'boolean': 'bool',\n    # datetime fields are strings in ISO 8601 format.\n    'datetime': 'str',\n    'Hash': 'Dict[str, Any]',\n    'integer': 'int',\n    'object': 'Dict[str, Any]',\n    'string': 'str',\n    'text': 'str',\n}\n\ndef get_type_annotation(name: str) -> str:\n    return _TYPE_MAP.get(name, name)\n\n\ndef to_docstring(s: str, indent: int) -> str:\n    prefix = ' ' * indent\n    s = s.replace('\"\"\"', '\"\"\\\"')\n    s = re.sub(r'(\\n+)', r'\\1' + prefix, s)\n    s = s.strip()\n    if '\\n' in s:\n        return f'{prefix}\"\"\"{s}\\n{prefix}\"\"\"'\n    else:\n        return f'{prefix}\"\"\"{s}\"\"\"'\n\n\ndef transform_name(s: str, sep: str, fix_part: Callable[[str], str]) -> str:\n    return sep.join(fix_part(part) for part in s.split('_'))\n\n\ndef classify_name(s: str) -> str:\n    return transform_name(s, '', TITLECASE)\n\n\ndef humanize_name(s: str) -> str:\n    return transform_name(s, ' ', LOWERCASE)\n\n\nclass Parameter(inspect.Parameter):\n    def __init__(self, name: str, spec: Mapping[str, Any]) -> None:\n        self.api_name = name\n        self._spec = spec\n        if keyword.iskeyword(name):\n            name += '_'\n        annotation = get_type_annotation(self._spec['type'])\n        if self.is_required():\n            default = inspect.Parameter.empty\n        else:\n            default = self.default_value()\n            if default is None:\n                annotation = f'Optional[{annotation}]'\n        super().__init__(\n            name,\n            inspect.Parameter.KEYWORD_ONLY,\n            annotation=annotation,\n            default=default,\n        )\n\n    @classmethod\n    def from_request(cls, spec: Mapping[str, Any]) -> 'Parameter':\n        try:\n            # Unpack the single key and value out of properties\n            (key, val_spec), = spec['properties'].items()\n        except (KeyError, ValueError):\n            # ValueError if there was not exactly one property\n            raise NotImplementedError(\n                \"only exactly one request parameter is currently supported\",\n            ) from None\n        val_type = get_type_annotation(val_spec['$ref'])\n        return cls('body', {\n            'description': f\"\"\"A dictionary with a single item `{key!r}`.\nIts value is a `{val_type}` dictionary defining the attributes to set.\"\"\",\n            'required': spec['required'],\n            'type': f'Dict[Literal[{key!r}], {val_type}]',\n        })\n\n    def default_value(self) -> object:\n        try:\n            src_value: str = self._spec['default']\n        except KeyError:\n            return None\n        try:\n            return json.loads(src_value)\n        except ValueError:\n            return src_value\n\n    def is_required(self) -> bool:\n        return self._spec['required']\n\n    def doc(self) -> str:\n        if self.default is None or self.default is inspect.Parameter.empty:\n            default_doc = ''\n        else:\n            default_doc = f\"Default `{self.default!r}`.\"\n        description = self._spec['description'].rstrip()\n        # Does the description contain multiple paragraphs of real text\n        # (excluding, e.g., hyperlink targets)?\n        if re.search(r'\\n\\s*\\n\\s*[\\w*]', description):\n            # Yes: append the default doc as a separate paragraph.\n            description += f'\\n\\n{default_doc}'\n        else:\n            # No: append the default doc to the first (and only) paragraph.\n            description = re.sub(\n                r'(\\n\\s*\\n|\\s*$)',\n                rf' {default_doc}\\1',\n                description,\n                count=1,\n            )\n        # Align all lines with the list bullet we're formatting it in.\n        description = re.sub(r'\\n(\\S)', r'\\n  \\1', description)\n        return f'''\n* {self.api_name}: {self.annotation} --- {description}\n'''\n\n\nclass Method:\n    def __init__(\n            self,\n            name: str,\n            spec: Mapping[str, Any],\n            cls_name: Optional[str]=None,\n            annotate: Callable[[Annotation], Annotation]=str,\n    ) -> None:\n        self.name = name\n        self._spec = spec\n        self.cls_name = cls_name\n        self._annotate = annotate\n        self._required_params = []\n        self._optional_params = []\n        for param in self._iter_parameters():\n            if param.is_required():\n                param_list = self._required_params\n            else:\n                param_list = self._optional_params\n            param_list.append(param)\n        self._required_params.sort(key=NAME_KEY)\n        self._optional_params.sort(key=NAME_KEY)\n\n    def _iter_parameters(self) -> Iterator[Parameter]:\n        try:\n            body = self._spec['request']\n        except KeyError:\n            pass\n        else:\n            yield Parameter.from_request(body)\n        for name, spec in self._spec['parameters'].items():\n            yield Parameter(name, spec)\n\n    def signature(self) -> inspect.Signature:\n        parameters = [\n            inspect.Parameter('self', inspect.Parameter.POSITIONAL_OR_KEYWORD),\n            *self._required_params,\n            *self._optional_params,\n        ]\n        try:\n            returns = get_type_annotation(self._spec['response']['$ref'])\n        except KeyError:\n            returns = 'Dict[str, Any]'\n        returns = self._annotate(returns)\n        return inspect.Signature(parameters, return_annotation=returns)\n\n    def doc(self, doc_slice: slice=slice(None)) -> str:\n        doc_lines = self._spec['description'].splitlines(keepends=True)[doc_slice]\n        if not doc_lines[-1].endswith('\\n'):\n            doc_lines.append('\\n')\n        try:\n            returns_list = self._spec['response']['$ref'].endswith('List')\n        except KeyError:\n            returns_list = False\n        if returns_list and self.cls_name is not None:\n            doc_lines.append(_LIST_METHOD_PYDOC.format(\n                cls_name=self.cls_name[:-1],\n                list_util_func=_LIST_UTIL_METHODS.get(self.cls_name, 'arvados.util.keyset_list_all'),\n            ))\n        if self._required_params:\n            doc_lines.append(\"\\nRequired parameters:\\n\")\n            doc_lines.extend(param.doc() for param in self._required_params)\n        if self._optional_params:\n            doc_lines.append(\"\\nOptional parameters:\\n\")\n            doc_lines.extend(param.doc() for param in self._optional_params)\n        return f'''\n    def {self.name}{self.signature()}:\n{to_docstring(''.join(doc_lines), 8)}\n'''\n\n\ndef document_schema(name: str, spec: Mapping[str, Any]) -> str:\n    description = spec['description']\n    if name in _DEPRECATED_SCHEMAS:\n        description += _DEPRECATED_NOTICE\n    if name.endswith('List'):\n        description += _LIST_SCHEMA_PYDOC.format(\n            cls_name=name[:-4],\n            list_util_func=_LIST_UTIL_METHODS.get(name, 'arvados.util.keyset_list_all'),\n        )\n    else:\n        description += _SCHEMA_PYDOC.format(cls_name=name)\n    lines = [\n        f\"class {name}(TypedDict, total=False):\",\n        to_docstring(description, 4),\n    ]\n    for field_name, field_spec in spec['properties'].items():\n        field_type = get_type_annotation(field_spec['type'])\n        try:\n            subtype = field_spec['items']['$ref']\n        except KeyError:\n            pass\n        else:\n            field_type += f\"[{get_type_annotation(subtype)}]\"\n\n        field_line = f\"    {field_name}: {field_type!r}\"\n        try:\n            field_line += f\" = {field_spec['default']!r}\"\n        except KeyError:\n            pass\n        lines.append(field_line)\n\n        field_doc: str = field_spec.get('description', '')\n        if field_spec['type'] == 'datetime':\n            field_doc += \" Pass this to `ciso8601.parse_datetime` to build a `datetime.datetime`.\"\n        if field_doc:\n            lines.append(to_docstring(field_doc, 4))\n    lines.append('\\n')\n    return '\\n'.join(lines)\n\ndef document_resource(name: str, spec: Mapping[str, Any]) -> str:\n    class_name = classify_name(name)\n    docstring = f\"Methods to query and manipulate Arvados {humanize_name(name)}\"\n    if class_name in _DEPRECATED_RESOURCES:\n        docstring += _DEPRECATED_NOTICE\n    methods = [\n        Method(key, meth_spec, class_name, 'ArvadosAPIRequest[{}]'.format)\n        for key, meth_spec in spec['methods'].items()\n        if key not in _ALIASED_METHODS\n    ]\n    return f'''class {class_name}:\n{to_docstring(docstring, 4)}\n{''.join(method.doc() for method in sorted(methods, key=NAME_KEY))}\n'''\n\ndef parse_arguments(arglist: Optional[Sequence[str]]) -> argparse.Namespace:\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\n        '--output-file', '-O',\n        type=pathlib.Path,\n        metavar='PATH',\n        default=STDSTREAM_PATH,\n        help=\"\"\"Path to write output. Specify `-` to use stdout (the default)\n\"\"\")\n    parser.add_argument(\n        'discovery_url',\n        nargs=argparse.OPTIONAL,\n        metavar='URL',\n        help=\"\"\"URL or file path of a discovery document to load.\nSpecify `-` to use stdin.\nIf not provided, retrieved dynamically from Arvados client configuration.\n\"\"\")\n    args = parser.parse_args(arglist)\n    if args.discovery_url is None:\n        from arvados.api import api_kwargs_from_config\n        discovery_fmt = api_kwargs_from_config('v1')['discoveryServiceUrl']\n        args.discovery_url = discovery_fmt.format(api='arvados', apiVersion='v1')\n    elif args.discovery_url == '-':\n        args.discovery_url = 'file:///dev/stdin'\n    else:\n        parts = urllib.parse.urlsplit(args.discovery_url)\n        if not (parts.scheme or parts.netloc):\n            args.discovery_url = pathlib.Path(args.discovery_url).resolve().as_uri()\n    # Our output is Python source, so it should be UTF-8 regardless of locale.\n    if args.output_file == STDSTREAM_PATH:\n        args.out_file = open(sys.stdout.fileno(), 'w', encoding='utf-8', closefd=False)\n    else:\n        args.out_file = args.output_file.open('w', encoding='utf-8')\n    return args\n\ndef main(arglist: Optional[Sequence[str]]=None) -> int:\n    args = parse_arguments(arglist)\n    with urllib.request.urlopen(args.discovery_url) as discovery_file:\n        status = discovery_file.getcode()\n        if not (status is None or 200 <= status < 300):\n            print(\n                f\"error getting {args.discovery_url}: server returned {discovery_file.status}\",\n                file=sys.stderr,\n            )\n            return os.EX_IOERR\n        discovery_document = json.load(discovery_file)\n    print(\n        to_docstring(_MODULE_PYDOC, indent=0),\n        _MODULE_PRELUDE,\n        _REQUEST_CLASS,\n        sep='\\n', file=args.out_file,\n    )\n\n    schemas = dict(discovery_document['schemas'])\n    resources = sorted(discovery_document['resources'].items())\n    for name, resource_spec in resources:\n        for schema_name in iter_resource_schemas(name):\n            try:\n                schema_spec = schemas.pop(schema_name)\n            except KeyError:\n                pass\n            else:\n                print(document_schema(schema_name, schema_spec), file=args.out_file)\n        print(document_resource(name, resource_spec), file=args.out_file)\n    for name, schema_spec in sorted(schemas.items()):\n        print(document_schema(name, schema_spec), file=args.out_file)\n\n    print(\n        '''class ArvadosAPIClient(googleapiclient.discovery.Resource):''',\n        sep='\\n', file=args.out_file,\n    )\n    for name, _ in resources:\n        class_name = classify_name(name)\n        docstring = f\"Return an instance of `{class_name}` to call methods via this client\"\n        if class_name in _DEPRECATED_RESOURCES:\n            docstring += _DEPRECATED_NOTICE\n        method_spec = {\n            'description': docstring,\n            'parameters': {},\n            'response': {\n                '$ref': class_name,\n            },\n        }\n        print(Method(name, method_spec).doc(), end='', file=args.out_file)\n\n    args.out_file.close()\n    return os.EX_OK\n\nif __name__ == '__main__':\n    sys.exit(main())\n"
  },
  {
    "path": "sdk/python/fpm-info.sh",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncase \"$TARGET\" in\n    debian12 | ubuntu2204 )\n        fpm_depends+=(libcurl4)\n        ;;\n\n    debian* | ubuntu* )\n        fpm_depends+=(libcurl4t64)\n        ;;\nesac\n"
  },
  {
    "path": "sdk/python/pyproject.toml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n[build-system]\nrequires = [\"setuptools ~= 80.9\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\ndynamic = [\"dependencies\", \"version\"]\nname = \"arvados-python-client\"\ndescription = \"Arvados client library\"\nauthors = [\n  {name = \"Arvados\", email = \"info@arvados.org\"},\n]\nclassifiers = [\n  \"Development Status :: 5 - Production/Stable\",\n  \"Environment :: Console\",\n  \"Intended Audience :: Science/Research\",\n  \"Operating System :: POSIX\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\",\n  \"Programming Language :: Python :: 3.12\",\n  \"Programming Language :: Python :: 3.13\",\n  \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n]\nlicense = \"Apache-2.0\"\nlicense-files = [\n  \"LICENSE-2.0.txt\",\n]\nreadme = \"README.rst\"\nrequires-python = \"~= 3.10\"\n\n[project.urls]\nHomepage = \"https://arvados.org\"\nDocumentation = \"https://doc.arvados.org\"\nRepository = \"https://github.com/arvados/arvados\"\nIssues = \"https://github.com/arvados/arvados/issues\"\nChangelog = \"https://arvados.org/releases/\"\n\n[tool.setuptools]\nscript-files = [\n  \"bin/arv-copy\",\n  \"bin/arv-get\",\n  \"bin/arv-keepdocker\",\n  \"bin/arv-ls\",\n  \"bin/arv-normalize\",\n  \"bin/arv-put\",\n  \"bin/arv-ws\",\n]\n\n[tool.setuptools.data-files]\n\"share/doc/arvados-python-client\" = [\n  \"LICENSE-2.0.txt\",\n  \"README.rst\",\n]\n\n[tool.setuptools.packages.find]\nexclude = [\"tests*\"]\n"
  },
  {
    "path": "sdk/python/pytest.ini",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n[pytest]\naddopts =\n  --import-mode=append\n  --strict-markers\n\nmarkers =\n  cwl_conformance: CWL conformance test suite\n  integration: Test that depends on external Arvados services\n\ntestpaths =\n  tests\n"
  },
  {
    "path": "sdk/python/setup.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport setuptools\nimport runpy\n\nfrom pathlib import Path\n\narvados_version = runpy.run_path(Path(__file__).with_name('arvados_version.py'))\n\nclass BuildDiscoveryPydoc(setuptools.Command):\n    \"\"\"Generate Arvados API documentation\n\n    This class implements a setuptools subcommand, so it follows\n    [the SubCommand protocol][1]. Most of these methods are required by that\n    protocol, except `should_run`, which we register as the subcommand\n    predicate.\n\n    [1]: https://setuptools.pypa.io/en/latest/userguide/extension.html#setuptools.command.build.SubCommand\n    \"\"\"\n    user_options = [\n        ('discovery-json=', 'J', 'JSON discovery document used to build pydoc'),\n        ('discovery-output=', 'O', 'relative path to write discovery document pydoc'),\n    ]\n\n    def initialize_options(self):\n        self.build_lib = None\n        self.discovery_json = 'arvados-v1-discovery.json'\n        self.discovery_output = str(Path('arvados', 'api_resources.py'))\n\n    def _relative_path(self, src, optname):\n        retval = Path(src)\n        if retval.is_absolute():\n            raise Exception(f\"--{optname} should be a relative path\")\n        else:\n            return retval\n\n    def finalize_options(self):\n        self.set_undefined_options(\"build_py\", (\"build_lib\", \"build_lib\"))\n        self.json_path = self._relative_path(self.discovery_json, 'discovery-json')\n        self.out_path = Path(\n            self.build_lib,\n            self._relative_path(self.discovery_output, 'discovery-output'),\n        )\n\n    def run(self):\n        discovery2pydoc = runpy.run_path(Path(__file__).with_name('discovery2pydoc.py'))\n        arglist = ['--output-file', str(self.out_path), str(self.json_path)]\n        returncode = discovery2pydoc['main'](arglist)\n        if returncode != 0:\n            raise Exception(f\"discovery2pydoc exited {returncode}\")\n\n    def get_outputs(self):\n        return [str(self.out_path)]\n\n    def get_source_files(self):\n        return [self.discovery_json]\n\n    def get_output_mapping(self):\n        return {\n            str(self.out_path): self.discovery_json,\n        }\n\n\nclass ArvadosBuild(arvados_version['ArvadosBuildCommand']):\n    sub_commands = [\n        *arvados_version['ArvadosBuildCommand'].sub_commands,\n        ('build_discovery_pydoc', None),\n    ]\n\n\narv_mod = arvados_version['ARVADOS_PYTHON_MODULES']['arvados-python-client']\nversion = arv_mod.get_version()\nsetuptools.setup(\n    version=version,\n    cmdclass={\n        'build': ArvadosBuild,\n        'build_arvados_version': arvados_version['BuildArvadosVersion'],\n        'build_discovery_pydoc': BuildDiscoveryPydoc,\n    },\n    install_requires=[\n        *arv_mod.iter_dependencies(version=version),\n        'boto3',\n        'ciso8601 >= 2.0.0',\n        'google-api-python-client >= 2.1.0',\n        'google-auth',\n        'httplib2 >= 0.9.2',\n        'pycurl >= 7.19.5.1',\n        # As of 636a597c, sdk/cwl depends on cwltool == 3.1.20240508115724,\n        # which also has the following ruamel.yaml dependency; see\n        # https://github.com/common-workflow-language/cwltool/blob/3.1.20240508115724/setup.py#L127\n        'ruamel.yaml >= 0.16, < 0.19',\n        'websockets >= 11.0',\n    ],\n)\n"
  },
  {
    "path": "sdk/python/tests/__init__.py",
    "content": ""
  },
  {
    "path": "sdk/python/tests/arvados_testutil.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados\nimport contextlib\nimport errno\nimport hashlib\nimport http.client\nimport httplib2\nimport io\nimport os\nimport pycurl\nimport queue\nimport shutil\nimport sys\nimport tempfile\nimport unittest\n\nfrom io import StringIO, BytesIO\nfrom unittest import mock\n\n# Use this hostname when you want to make sure the traffic will be\n# instantly refused.  100::/64 is a dedicated black hole.\nTEST_HOST = '100::'\n\nskip_sleep = mock.patch('time.sleep', lambda n: None)  # clown'll eat me\n\ndef queue_with(items):\n    \"\"\"Return a thread-safe iterator that yields the given items.\n\n    +items+ can be given as an array or an iterator. If an iterator is\n    given, it will be consumed to fill the queue before queue_with()\n    returns.\n    \"\"\"\n    q = queue.Queue()\n    for val in items:\n        q.put(val)\n    return lambda *args, **kwargs: q.get(block=False)\n\n# fake_httplib2_response and mock_responses\n# mock calls to httplib2.Http.request()\ndef fake_httplib2_response(code, **headers):\n    headers.update(status=str(code),\n                   reason=http.client.responses.get(code, \"Unknown Response\"))\n    return httplib2.Response(headers)\n\ndef mock_responses(body, *codes, **headers):\n    if not isinstance(body, bytes) and hasattr(body, 'encode'):\n        body = body.encode()\n    return mock.patch('httplib2.Http.request', side_effect=queue_with((\n        (fake_httplib2_response(code, **headers), body) for code in codes)))\n\ndef mock_api_responses(api_client, body, codes, headers={}, method='request'):\n    if not isinstance(body, bytes) and hasattr(body, 'encode'):\n        body = body.encode()\n    return mock.patch.object(api_client._http, method, side_effect=queue_with((\n        (fake_httplib2_response(code, **headers), body) for code in codes)))\n\ndef str_keep_locator(s):\n    return '{}+{}'.format(hashlib.md5(s if isinstance(s, bytes) else s.encode()).hexdigest(), len(s))\n\n@contextlib.contextmanager\ndef redirected_streams(stdout=None, stderr=None):\n    if stdout == StringIO:\n        stdout = StringIO()\n    if stderr == StringIO:\n        stderr = StringIO()\n    orig_stdout, sys.stdout = sys.stdout, stdout or sys.stdout\n    orig_stderr, sys.stderr = sys.stderr, stderr or sys.stderr\n    try:\n        yield (stdout, stderr)\n    finally:\n        sys.stdout = orig_stdout\n        sys.stderr = orig_stderr\n\n\nclass VersionChecker(object):\n    def assertVersionOutput(self, out, err):\n        self.assertEqual(err.getvalue(), '')\n        v = out.getvalue()\n        self.assertRegex(v, r\"[0-9]+\\.[0-9]+\\.[0-9]+(\\.dev[0-9]+)?$\\n\")\n\n\nclass FakeCurl(object):\n    @classmethod\n    def make(cls, code, body=b'', headers={}):\n        if not isinstance(body, bytes) and hasattr(body, 'encode'):\n            body = body.encode()\n        return mock.Mock(spec=cls, wraps=cls(code, body, headers))\n\n    def __init__(self, code=200, body=b'', headers={}):\n        self._opt = {}\n        self._got_url = None\n        self._writer = None\n        self._headerfunction = None\n        self._resp_code = code\n        self._resp_body = body\n        self._resp_headers = headers\n\n    def getopt(self, opt):\n        return self._opt.get(str(opt), None)\n\n    def setopt(self, opt, val):\n        self._opt[str(opt)] = val\n        if opt == pycurl.WRITEFUNCTION:\n            self._writer = val\n        elif opt == pycurl.HEADERFUNCTION:\n            self._headerfunction = val\n\n    def perform(self):\n        if not isinstance(self._resp_code, int):\n            raise self._resp_code\n        if self.getopt(pycurl.URL) is None:\n            raise ValueError\n        if self._writer is None:\n            raise ValueError\n        if self._headerfunction:\n            self._headerfunction(\"HTTP/1.1 {} Status\".format(self._resp_code))\n            for k, v in self._resp_headers.items():\n                self._headerfunction(k + ': ' + str(v))\n        if type(self._resp_body) is not bool:\n            self._writer(self._resp_body)\n\n    def close(self):\n        pass\n\n    def reset(self):\n        \"\"\"Prevent fake UAs from going back into the user agent pool.\"\"\"\n        raise Exception\n\n    def getinfo(self, opt):\n        if opt == pycurl.RESPONSE_CODE:\n            return self._resp_code\n        raise Exception\n\n\ndef mock_keep_responses(body, *codes, **headers):\n    \"\"\"Patch pycurl to return fake responses and raise exceptions.\n\n    body can be a string to return as the response body; an exception\n    to raise when perform() is called; or an iterable that returns a\n    sequence of such values.\n    \"\"\"\n    cm = mock.MagicMock()\n    if isinstance(body, tuple):\n        codes = list(codes)\n        codes.insert(0, body)\n        responses = [\n            FakeCurl.make(code=code, body=b, headers=headers)\n            for b, code in codes\n        ]\n    else:\n        responses = [\n            FakeCurl.make(code=code, body=body, headers=headers)\n            for code in codes\n        ]\n    cm.side_effect = queue_with(responses)\n    cm.responses = responses\n    return mock.patch('pycurl.Curl', cm)\n\n\nclass ApiClientMock(object):\n    def api_client_mock(self):\n        api_mock = mock.MagicMock(name='api_client_mock')\n        api_mock.config.return_value = {\n            'StorageClasses': {\n                'default': {'Default': True}\n            }\n        }\n        return api_mock\n\n    def mock_keep_services(self, api_mock=None, status=200, count=12,\n                           service_type='disk',\n                           service_host=None,\n                           service_port=None,\n                           service_ssl_flag=False,\n                           additional_services=[],\n                           read_only=False):\n        if api_mock is None:\n            api_mock = self.api_client_mock()\n        body = {\n            'items_available': count,\n            'items': [{\n                'uuid': 'zzzzz-bi6l4-{:015x}'.format(i),\n                'owner_uuid': 'zzzzz-tpzed-000000000000000',\n                'service_host': service_host or 'keep0x{:x}'.format(i),\n                'service_port': service_port or 65535-i,\n                'service_ssl_flag': service_ssl_flag,\n                'service_type': service_type,\n                'read_only': read_only,\n            } for i in range(0, count)] + additional_services\n        }\n        self._mock_api_call(api_mock.keep_services().accessible, status, body)\n        return api_mock\n\n    def _mock_api_call(self, mock_method, code, body):\n        mock_method = mock_method().execute\n        if code == 200:\n            mock_method.return_value = body\n        else:\n            mock_method.side_effect = arvados.errors.ApiError(\n                fake_httplib2_response(code), b\"{}\")\n\n\nclass ArvadosBaseTestCase(unittest.TestCase):\n    # This class provides common utility functions for our tests.\n\n    def setUp(self):\n        self._tempdirs = []\n\n    def tearDown(self):\n        for workdir in self._tempdirs:\n            shutil.rmtree(workdir, ignore_errors=True)\n\n    def make_tmpdir(self):\n        self._tempdirs.append(tempfile.mkdtemp())\n        return self._tempdirs[-1]\n\n    def data_file(self, filename):\n        try:\n            basedir = os.path.dirname(__file__)\n        except NameError:\n            basedir = '.'\n        return open(os.path.join(basedir, 'data', filename))\n\n    def build_directory_tree(self, tree):\n        tree_root = self.make_tmpdir()\n        for leaf in tree:\n            path = os.path.join(tree_root, leaf)\n            try:\n                os.makedirs(os.path.dirname(path))\n            except OSError as error:\n                if error.errno != errno.EEXIST:\n                    raise\n            with open(path, 'w') as tmpfile:\n                tmpfile.write(leaf)\n        return tree_root\n\n    def make_test_file(self, text=b\"test\"):\n        testfile = tempfile.NamedTemporaryFile()\n        testfile.write(text)\n        testfile.flush()\n        return testfile\n\ndef binary_compare(a, b):\n    if len(a) != len(b):\n        return False\n    for i in range(0, len(a)):\n        if a[i] != b[i]:\n            return False\n    return True\n\nclass DiskCacheBase:\n    def make_block_cache(self, disk_cache):\n        self.disk_cache_dir = tempfile.mkdtemp() if disk_cache else None\n        block_cache = arvados.keep.KeepBlockCache(disk_cache=disk_cache,\n                                                  disk_cache_dir=self.disk_cache_dir)\n        return block_cache\n\n    def tearDown(self):\n        if self.disk_cache_dir:\n            shutil.rmtree(self.disk_cache_dir)\n"
  },
  {
    "path": "sdk/python/tests/conftest.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport pytest\n\nfrom . import run_test_server\n\n@pytest.fixture\ndef reset_test_server_db():\n    \"\"\"pytest fixture wrapper for run_test_server.reset()\"\"\"\n    try:\n        yield\n    finally:\n        run_test_server.reset()\n"
  },
  {
    "path": "sdk/python/tests/data/1000G_ref_manifest",
    "content": ". 231e69ef8840dcdb883b934a008f0eeb+67108864+K@qr1hi e14a59b578206d2d32dd858715645e0b+67108864+K@qr1hi 27a4b87e4cf1f85dc3fd917d2c388641+67108864+K@qr1hi 06b3ff80cf45bda52aca0711059a0bd6+67108864+K@qr1hi b036f1120ca429d0a148a5e8312663d9+67108864+K@qr1hi 83dc6b43bf27ce28da50967cd7dd23c3+67108864+K@qr1hi 3f6a4512b125bca64e1fa3d82f1e638d+67108864+K@qr1hi c0a8af66954841dae178e9417a82b710+67108864+K@qr1hi b3b4fb7120fae8b8f804849e36de9b55+67108864+K@qr1hi 2323ea3c93cc9664f35b6f90493ad01e+67108864+K@qr1hi e0b0f131d6f4669d0eaafbd4d72e0268+67108864+K@qr1hi 4274ff53c12dd5821c9ff6b12c4678f2+67108864+K@qr1hi 5d7af6348037a8161b1f932edcf32fae+67108864+K@qr1hi b7d88946691cc0d0f3c22dc3619b2ef3+67108864+K@qr1hi 799dd7f25556ad3a90604e094b538149+67108864+K@qr1hi 41b4d1c38afbcc48c0d463ad37adbdb7+67108864+K@qr1hi 6adcba8494cb0a6f6563e4c92a5c002a+67108864+K@qr1hi d0b7417a3872a5889cdc66daff4da326+67108864+K@qr1hi 7a5aeeb69132524c3e35cf454683fabe+67108864+K@qr1hi 9b09a732903086533e58e1acefc5df1f+67108864+K@qr1hi cdd07f6573f9e1239ef83ba8a02d6bf4+67108864+K@qr1hi 3d802b3e5b532210397b6992d9d9caff+67108864+K@qr1hi 5ce57221bdb69beb1376479a00dc839c+67108864+K@qr1hi 9173b38ec40c457fa3bd36ef89e562fc+67108864+K@qr1hi 05c712dee07f2115b657bb83d18f77dc+67108864+K@qr1hi 5764176e6aa0e8dd1195eb37c10ff921+67108864+K@qr1hi ce5d4f465c761cdfce6a7075e75e4c8d+67108864+K@qr1hi 2f594f1a5028e5954b14aba3cc7edc5d+67108864+K@qr1hi 9cb165ce899a80dc0aba79290d054f2e+67108864+K@qr1hi 9c5c8f5ad6dd0a23dbdd7bae47d9b77c+67108864+K@qr1hi b7afd688ca053e6cdd44f7c7add74c88+67108864+K@qr1hi 0b6c92b166993dbf4ebe65b130a18531+67108864+K@qr1hi 9c0e13bc1825573446cdb2e0d2a13057+67108864+K@qr1hi a64e372054ceb89494e7da42e1869a19+67108864+K@qr1hi 04b9acc199d01058c413c0b61474fa42+67108864+K@qr1hi 4d3f26dd05c50bbc5dac61ffc45f4a36+67108864+K@qr1hi f7f4400a463b1950a9422d3d798ae4e4+67108864+K@qr1hi 88814bec594bc5207ed70b52da08c964+67108864+K@qr1hi e645deb6e2cb633d175636327432b547+67108864+K@qr1hi 82249761efba1a759a94533a0f0225ee+67108864+K@qr1hi ff981edca6999dddb04e2b3528be376a+67108864+K@qr1hi d6e5f14509a26363492aab3775b070a7+67108864+K@qr1hi 20395032fda158a8fddd68281559b706+67108864+K@qr1hi f8dce2bf8d30bfa2306715f355e14702+67108864+K@qr1hi 937e9736f130276bd99c5a3cfd419f01+67108864+K@qr1hi c5ce8d646b2e1f8ceda6a88bdaa42354+67108864+K@qr1hi e82f42c6458660ba3e4aba0bf9f2fa83+67108864+K@qr1hi d2940d4c7155a94dfb31caf6951d9a59+67108864+K@qr1hi 165ffca3eae18d12aebfc395f4565547+67108864+K@qr1hi 8500ebaa55b2b377414c1d627e9a1982+67108864+K@qr1hi 594501b0e794179d7a04170d668358a8+67108864+K@qr1hi cd353db66ea7bba1ce4aa9fcd78fbf47+67108864+K@qr1hi 63b0e03e3a11c00f2b36de1384325756+67108864+K@qr1hi ac811ad87a8a5bc4e23130d2cc8ab588+67108864+K@qr1hi bb2c282f29b1ff680e1ecf82ba2902a9+67108864+K@qr1hi 5d3dbfa93769c1cba4406e616cc15d92+67108864+K@qr1hi 4f70517fc34def9e912ca98c127e8568+67108864+K@qr1hi 7da6aeb8a23b001ef8324ac28996f79d+67108864+K@qr1hi bcaba7f5eb10668c62c366527c5b7225+67108864+K@qr1hi 6fe50b949e8ace7e7bbe4a443b15cd89+67108864+K@qr1hi 7f104154192f4f15ef6760cd8ef836b4+67108864+K@qr1hi 9e528cc6e61ac04ab4806b1eca14473b+67108864+K@qr1hi b9af9ac127033b9639a32b4b4d21a033+67108864+K@qr1hi 724d08ad32a6de2dea78c357367f4de8+67108864+K@qr1hi 8afc63f0435da749a1a58544c290a792+67108864+K@qr1hi 7b6ea56cbf9bfce245fab717b02bc5c7+67108864+K@qr1hi c5903ce244515bb870dbad04acd7b482+67108864+K@qr1hi 491f4228b75e5d9af5c10eb4c7960d63+67108864+K@qr1hi 252bcf6db697723c6bcea11f601be4b0+67108864+K@qr1hi 81244819a14415d52b26a340d0b9a367+67108864+K@qr1hi 40336ab12de8a3c192dc4275c65e0b16+67108864+K@qr1hi 991ffe3960f9510b26352f5f925c23ba+67108864+K@qr1hi 0331ed4cfbc01ce07053691a31568c55+67108864+K@qr1hi b7d63f0ae6507c7f828c376ef2861e1c+67108864+K@qr1hi 548c0041481a1795913f62321944a81b+67108864+K@qr1hi b804bd2af77b7a3a32ac9512380d94e2+67108864+K@qr1hi a1702dad1c2354c9c85b02191800d5df+67108864+K@qr1hi 8e314c6cbda431e328e2de30ed0bacf0+67108864+K@qr1hi 8930eda1c1c868067fe4514c86cb0006+67108864+K@qr1hi 8ab6885d8e7a65fdff1a64fae78f83cb+67108864+K@qr1hi bb9ca8c63316097110e34d49f9b1a551+67108864+K@qr1hi 280e1ceb75a5db7a72d23b0705e2de94+67108864+K@qr1hi 47172b0b71e03d88fe560c68c3ca5b13+67108864+K@qr1hi 3f985d99b0929ab8869c2d6ee78ebe06+67108864+K@qr1hi f99ce1f7543bf5d6b92bbf1d74bf341b+67108864+K@qr1hi 2d1073b508cde7fdb47d3e0045f12ba1+67108864+K@qr1hi 66d40bfdb5d04f954745e47bd958c959+67108864+K@qr1hi 423753509d9fb12a3b99eb463a8e6441+67108864+K@qr1hi 5d7676bf15ff2c73c84da0d6a1b21ec9+67108864+K@qr1hi 17dd674093f3ec22acb79db4d3c958e8+67108864+K@qr1hi 792f960e48650f1ff4397400d61e4868+67108864+K@qr1hi e44a4987cc0df77331131a4b7ee5408e+67108864+K@qr1hi 1d5b68a793bde89b35afd0abfffa1e91+67108864+K@qr1hi 93474190338acbc799a96e095c1ce6e1+67108864+K@qr1hi 19ad3d61a0729bd779608cefcefd3f17+67108864+K@qr1hi 4effea732567c4def12cb91616fcadab+67108864+K@qr1hi 00313b354ef436c75e89b7d9abb83d4f+67108864+K@qr1hi df5e63c2a4f060d462436f512c54858a+67108864+K@qr1hi ce8b016b1c46ebc3cf3872b26a799ec1+67108864+K@qr1hi 87163cb8078652223da89aa3d2f902f3+67108864+K@qr1hi f85e64b456e64bbb679dfaa2c462a033+67108864+K@qr1hi de8050929d7d89303e809d8f27c422dc+67108864+K@qr1hi adf643ec5085bf757fcc59a47813288a+67108864+K@qr1hi 7d2b01ba0f9be07644927cca7c459e43+67108864+K@qr1hi a5dc9a1d620b49be2da2098bef54ab18+67108864+K@qr1hi d24da6679ba48e4cbe29af40b5f36968+67108864+K@qr1hi 7d9f73079ff9aeeda056e21e64cddadc+67108864+K@qr1hi 47e45f31917f41385dd79c9316e1cefc+67108864+K@qr1hi a8f9a8ca17809b7c2f64aba9a0c13b1f+67108864+K@qr1hi d8d534661a3af310b085323d496f6d44+67108864+K@qr1hi 899756d962d283156f681a461c0588ce+67108864+K@qr1hi 0f28d102f6e05dca547d5a019fe85b61+67108864+K@qr1hi ddd31b0c369f60219e0a65a071772018+67108864+K@qr1hi 7ff925b478d6a51806982f1a32ecdb5a+67108864+K@qr1hi b7afe2d94145776bc7fecb7a8385527d+67108864+K@qr1hi c77b5ee8577c6c89533114803b5efc9b+67108864+K@qr1hi 884661029ffb441e16ca87a248bcf394+67108864+K@qr1hi cd22c4e3fd707f54818545004da24a1c+67108864+K@qr1hi a90b6f132d31540d6a1c9a5e426997f7+67108864+K@qr1hi c9f699cdb3ab822dd0bd9c345926c86d+67108864+K@qr1hi 4b0abce1b72d97eebb652fb438c1b3be+67108864+K@qr1hi d4568db71967719cda57374a9963a6dc+67108864+K@qr1hi cfb15250fb5eb9ac69a18f64eaeb3e31+67108864+K@qr1hi 4d3ac3e4c5a278b91cda0d890db84e3a+67108864+K@qr1hi 40b137b57f8d83e4d508a1a4b95ac134+67108864+K@qr1hi c86b47517a22f1303bb519a7a244e57d+67108864+K@qr1hi 712681dd526dd49add64af4f168254d0+67108864+K@qr1hi b5abb03c2b4fbd01e8997f8c755bf347+67108864+K@qr1hi 50245663fc28f000c536a9b85ae05d52+67108864+K@qr1hi 89e2b1ece4b8a34700a48bd5625c1ddc+67108864+K@qr1hi a06799afa99163b7ba2471f4bc2da4a2+67108864+K@qr1hi 4cecb752f0015ca1b70a95c1b4497c7b+67108864+K@qr1hi e0b110c9e392c0126787a7b1635f4923+67108864+K@qr1hi f32788adf3c77d67b09abc43f2922012+67108864+K@qr1hi fb6e48fa0feb6ee3d6356beb765a98a4+67108864+K@qr1hi 52a464c2209f6e262dc72e485b5df16c+67108864+K@qr1hi 50ed10e5d3dfedbd18f1fbff62048487+67108864+K@qr1hi b4d649a55f537c628bc337729857a2cd+67108864+K@qr1hi d8d5fa84e7199021ed7f3daa9e1d3c6a+67108864+K@qr1hi 7ea980aec5a6d9bc7ae8353b34145daa+67108864+K@qr1hi f9b989d982bbc5b3587a2a06df2f7bdc+67108864+K@qr1hi 27c8ba6cc8208fa1c220657768e8780e+67108864+K@qr1hi 3ec1187853bc27c62ecc02c7a27f0587+67108864+K@qr1hi 9b34e5a81de59c416255d6e23de498fc+67108864+K@qr1hi 469f6398ee90d5dd398c21ad06b05fe3+67108864+K@qr1hi d0969eae2340c1e1d34d311cc4815fde+67108864+K@qr1hi 2e3e6b8823e39e08d5e697adc2120339+67108864+K@qr1hi 224fc36688f7adb2ec5cc088b09e8463+42902639+K@qr1hi 0:51549666:1000G_omni2.5.b37.vcf.gz 51549666:95:1000G_omni2.5.b37.vcf.gz.md5 51549761:475087:1000G_omni2.5.b37.vcf.idx.gz 52024848:99:1000G_omni2.5.b37.vcf.idx.gz.md5 52024947:45036197:1000G_phase1.indels.b37.vcf.gz 97061144:101:1000G_phase1.indels.b37.vcf.gz.md5 97061245:333605:1000G_phase1.indels.b37.vcf.idx.gz 97394850:105:1000G_phase1.indels.b37.vcf.idx.gz.md5 97394955:550102132:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.gz 647497087:124:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.gz.md5 647497211:3555843:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.idx.gz 651053054:128:CEUTrio.HiSeq.WGS.b37.bestPractices.phased.b37.vcf.idx.gz.md5 651053182:19868212:Mills_and_1000G_gold_standard.indels.b37.vcf.gz 670921394:118:Mills_and_1000G_gold_standard.indels.b37.vcf.gz.md5 670921512:547962:Mills_and_1000G_gold_standard.indels.b37.vcf.idx.gz 671469474:122:Mills_and_1000G_gold_standard.indels.b37.vcf.idx.gz.md5 671469596:29993649:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.gz 701463245:128:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.gz.md5 701463373:578447:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.idx.gz 702041820:132:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.sites.vcf.idx.gz.md5 702041952:38839441:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.gz 740881393:122:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.gz.md5 740881515:605289:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.idx.gz 741486804:126:NA12878.HiSeq.WGS.bwa.cleaned.raw.subset.b37.vcf.idx.gz.md5 741486930:6040047539:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam 6781534469:113749:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.bai.gz 6781648218:124:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.bai.gz.md5 6781648342:117:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.bam.md5 6781648459:3928395:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.gz 6785576854:120:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.gz.md5 6785576974:66113:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.idx.gz 6785643087:124:NA12878.HiSeq.WGS.bwa.cleaned.recal.b37.20.vcf.idx.gz.md5 6785643211:282374229:dbsnp_137.b37.excluding_sites_after_129.vcf.gz 7068017440:117:dbsnp_137.b37.excluding_sites_after_129.vcf.gz.md5 7068017557:3824375:dbsnp_137.b37.excluding_sites_after_129.vcf.idx.gz 7071841932:121:dbsnp_137.b37.excluding_sites_after_129.vcf.idx.gz.md5 7071842053:1022107667:dbsnp_137.b37.vcf.gz 8093949720:91:dbsnp_137.b37.vcf.gz.md5 8093949811:3982568:dbsnp_137.b37.vcf.idx.gz 8097932379:95:dbsnp_137.b37.vcf.idx.gz.md5 8097932474:59819710:hapmap_3.3.b37.vcf.gz 8157752184:92:hapmap_3.3.b37.vcf.gz.md5 8157752276:1022297:hapmap_3.3.b37.vcf.idx.gz 8158774573:96:hapmap_3.3.b37.vcf.idx.gz.md5 8158774669:2597:human_g1k_v37.dict.gz 8158777266:92:human_g1k_v37.dict.gz.md5 8158777358:1044:human_g1k_v37.fasta.fai.gz 8158778402:97:human_g1k_v37.fasta.fai.gz.md5 8158778499:869925027:human_g1k_v37.fasta.gz 9028703526:93:human_g1k_v37.fasta.gz.md5 9028703619:85:human_g1k_v37.stats.gz 9028703704:93:human_g1k_v37.stats.gz.md5 9028703797:2689:human_g1k_v37_decoy.dict.gz 9028706486:98:human_g1k_v37_decoy.dict.gz.md5 9028706584:1095:human_g1k_v37_decoy.fasta.fai.gz 9028707679:103:human_g1k_v37_decoy.fasta.fai.gz.md5 9028707782:879197576:human_g1k_v37_decoy.fasta.gz 9907905358:99:human_g1k_v37_decoy.fasta.gz.md5 9907905457:91:human_g1k_v37_decoy.stats.gz 9907905548:99:human_g1k_v37_decoy.stats.gz.md5\n"
  },
  {
    "path": "sdk/python/tests/data/hello-world-README.txt",
    "content": "The hello-world-*.tar files are archived from the official Docker\nhello-world:latest image available on 2024-02-01,\nsha256:d2c94e258dcb3c5ac2798d32e1249e42ef01cba4841c2234249495f87264ac5a.\n<https://github.com/docker-library/hello-world/tree/a2269bdb107d086851a5e3d448cf47770b50bff7>\n\nCopyright (c) 2014 Docker, Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be included\nin all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\nTORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\nSOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "sdk/python/tests/data/jlake_manifest",
    "content": "./PG0002577-DNA-jlake-germline ec9ae2be7620af8b6efd96809e5e75e0+1917833 0:6148:.DS_Store 6148:4096:._.DS_Store 10244:1904161:PG0002577-DNA.pdf 1914405:76:PersonalGenome_Mac.command 1914481:704:PersonalGenome_Windows.bat 1915185:2648:md5sum.txt\n./PG0002577-DNA-jlake-germline/Assembly 42a43977e2303a4337129331577d122a+10244 0:6148:.DS_Store 6148:4096:._.DS_Store\n./PG0002577-DNA-jlake-germline/Assembly/conf 95747f01cc916acc1f385ea2d018841a+165976 0:61440:dirs.tar 61440:11245:project.conf 72685:61440:project.dirs.tar 134125:31851:run.conf.xml\n./PG0002577-DNA-jlake-germline/Assembly/genome 6762e2e508fc8eb451f7ec0ea0faee33+10244 0:6148:.DS_Store 6148:4096:._.DS_Store\n./PG0002577-DNA-jlake-germline/Assembly/genome/bam 5a854b86da65e1ff3572f17e088633fe+67108864 9c6c77dd7511dada2e631a3b038b5fa1+67108864 54c6025d7dd4cd8ecccfe573499a2176+67108864 c9cfd86fe5df4b83c6ad8dddd901becc+67108864 c9d7be11850c860e2d5cb0c9e1a44915+67108864 3fb42e18373c8710aed8620fa81db3c1+67108864 f419d215df8195df08394865f719fbca+67108864 d263423f327da62e24867f22fc490706+67108864 40ef57add0dfbee66836365ae0fd7d06+67108864 0fea4b759d870ab6decbb632176fbeb5+67108864 c109c9e484cdafee14c53198c0d6b610+67108864 103f902681b7d40dcde1dce0b79b17c8+67108864 b6b93a2b846620c39e38302f1f7ffe10+67108864 e38be01100ab8d220f06b800b7cd62dd+67108864 f5812ce719898a4d2cb06eebcafdacc0+67108864 6f5fc3c25e8abeb9c079ef801d0f9450+67108864 d9ad4e7e17a79487817ae2ad8e534498+67108864 e5a15e06712a2090e74f78e678b490f6+67108864 69d57fee8eb8dd67906c9a7e9e3e5cac+67108864 68395295a64f0d0b75117a2400a4c1ee+67108864 497dbca0f9dd89eeb2e8dd8aed9796fa+67108864 d14e96347585f714ed527fb0e2e78be5+67108864 03bcad873029c5778e59dd4c4109f5d7+67108864 0bb736900002c0c2b2ab31a6f8009fdb+67108864 22509a92aca7813b2363661ab233772f+67108864 c23528b87c5581e6d6ad538ff007e3a8+67108864 784f055bea23e35078c9437eaa233e0e+67108864 76a724595ce0b01bb51c6a948fa2f5a4+67108864 c52e5205c23728d439e90378267ee382+67108864 e4f2400071c39af9661de0662018e984+67108864 66b8a86b349902d359dc6ac9788e0246+67108864 98353a4694dbc5eae4c3960d2cd7baf9+67108864 130d8c29af7d0a5468b854bb1758dd4d+67108864 9741fb84b07c6282bd6b6a002ecdc15e+67108864 7eaa6674cf09efc2adfd14a7b169e357+67108864 4071ca2915995af71dd9da03e8c6c4c1+67108864 888673ec3ca2787511dca770f090c138+67108864 21d21d390663b32c3738bebdd97c32e8+67108864 f575e7b15bc194bf98e346bb4b2788a4+67108864 b5d983d92f990f90622be2232ced743b+67108864 0e6bd01b5b9e49b1969b5d3817499476+67108864 481144b5d5ab8a1ab7235fe64bdc872b+67108864 6c8f5fe6d45e5719fc6270748015db12+67108864 68966f5d47a96aa8731c5950fd1f467d+67108864 4d48317b33738dfa96174bd5a1dcc841+67108864 24f75fafbe546fe366f7a6d465240f8d+67108864 8711ad611384bd7f0e89c400c90cfb7e+67108864 2ae54914c5bf92a8cbdad755e8135b1b+67108864 e11116820a72cfed92d64bced7172187+67108864 4cf0a16d830cb8c91837b3f24093ca2a+67108864 196a74cb26cd1bd8e540d22e46fa4302+67108864 92982486ba3b6d370cc31ccfddfd8110+67108864 6c5e160bb75c99ed338bdbbe9c968955+67108864 d0a48c19f2a00ad5b3709326c428b3d9+67108864 877890001e22c2d88375892890e634fa+67108864 f3cad932b80e8d20c8f67f4239a5c279+67108864 15d980c196e5d7690de660a3c3400154+67108864 964bb3d46fe32c7b3ff9077c96a32a0d+67108864 60107013728b9a04d9822588613cc1bb+67108864 e265d4a367e48f740f6fc1c881fcb053+67108864 4808ac7dbb6393908ebf47f453e659c2+67108864 1442fbee68d6d39576d7de798d293859+67108864 c9a16c84c579e83b7423358b7141d66f+67108864 36874b0c65f95e43a0656adad0734d8f+67108864 b174dadbcc463d64b80980a4cdd3fe41+67108864 518c5f4c27b2d5d2b42863e2132e0320+67108864 ab658f08b58b199860913a4b55d68d1d+67108864 92d9f01091907ec183981cedf2f26b37+67108864 539a6fe604549facefb68c4cfc7b204f+67108864 31e641b640af914fe6a2e4bcc31c1948+67108864 ba1e9349e65c068170b62f655a300d3b+67108864 131ff6d51b65974c9d366e81cd608d57+67108864 4ff4d286591293acdd3610373bce6f11+67108864 07cf4b86c65559b68689849c85d8cd64+67108864 051cf1cbf95a08df5c18fc2e98f705a5+67108864 2737c7e7ea93ff9a2c5b2cfd76c06022+67108864 60c00403a73e9b0c40d4b137870abe53+67108864 64068170004d472feb843e4448d8d121+67108864 e955ab9f87979be9d09753e843d36b10+67108864 180692a3f95576c265370680a2a2e052+67108864 cd7a1d16bdef3fa9474ca0a5b21264a2+67108864 4ac9f4d038b4be59a9a2808fef0fea43+67108864 c4947ab9755e190b5ebc9ee9bdc609da+67108864 96ff83a8ceb80097fa25b893b88104f0+67108864 e893caeea6fc8b2f39695b48bbc16362+67108864 254628d570c39863ee1c7fd1a0750196+67108864 2e24dd5c0c0696e79e674e85a3f12934+67108864 1990250dc54ed2e708dca7b4f9a41c18+67108864 8e6e40e53a663018974749ceebb1b98a+67108864 d84c21e61a9cb82dc9c2a7674a51657f+67108864 95f61628504580fa779eeeb96c0577cd+67108864 19f4baba83495e69c94ad52d22ccc2e7+67108864 0b9a91c910d6bee96f8c1caf129a10c9+67108864 0fb342be2b7e50536485fad5fd98280d+67108864 482ccfbf6dc6d3cec1c3a58982f40db4+67108864 b51612ee241fc06eeac1d823abd70858+67108864 14ff4c77a9a151b0062338c1d5b30242+67108864 9c37ab2ca3179d71544b2793cab38918+67108864 3947527126983bccbcc795c2a922dbb3+67108864 df6e54c39b918a1a26a3f557fc61c69c+67108864 94ae725934b1aeee78db89dec7e629b9+67108864 b08756a57d798709bcbb60d448cfe703+67108864 50de90bf9d8cf7fdc8ff54a5a6ff6f5b+67108864 24c451aa9481b72c0f989ed264198730+67108864 b1f79cc8b5729ae7523bd5d0d7c840b0+67108864 9b5f5392fb4d814ae13d5106bec28696+67108864 55d49c99f24cd778295f3637daddbd4f+67108864 e18e2c5dac882f932e1e4e4cf557b572+67108864 453f3aa0d1a9832e2dd99c44b30cd3ed+67108864 57f19b58f8da911138af4351c6aea5a9+67108864 5e3843ce5bfaf6170a16f26b4e4b1aac+67108864 597919fdf8ff48500c264d0c9f7439f5+67108864 b51bb4c6be8618ab377787f180560048+67108864 ff6a60a658e75febf565ca3cecdd90fb+67108864 9c993d598ac6a696591249ca115169b0+67108864 283ecc93e81dbae4a4cc8523e0260fbf+67108864 ad8ad32aec1b8df99ae91a3580908f96+67108864 d3c0d337118c727705fc5de57e05c235+67108864 7fc7f73d3ccd5c6205c80f845aae7ed3+67108864 4e555e6ce17d792bb5982232fcf70405+67108864 320bd68a5462576ad726c129f6941844+67108864 7ffbde2e853e1e9b0fbfdae63be37dab+67108864 ae25a4cc941178b62929aae15831ef8b+67108864 ba24657691267443b2e7bf6b6964e051+67108864 1e9e12b68f26db8c8f7c7cc788945df0+67108864 edf4acc2527cddf0e20d009f4486d7d2+67108864 baefb0589e1516ed42fced78370f8710+67108864 c037629c703f4a2c1df194202cecff0a+67108864 944ceef59fc6dc1e2dac2d8bc71ef6fb+67108864 d955e2eee34d9873d36065870fe6904b+67108864 eb4b9530be56e0bb33b9827279990dd6+67108864 559d40ebfeb35b1d88e797a3e9f8fdd7+67108864 623e4c4f5a5ae8c466a7b52bcf6e830c+67108864 5790327fc52b0d57357ec948331ca387+67108864 232a548bccb18ba351c681ddb0f84d4a+67108864 e4797b5eea62b9980478d01e7092a05c+67108864 6b888368142e02359ae8241daa0ba99c+67108864 57969f6dd6f8f56064c57ed4929d6c09+67108864 10f07bd7b6c3b8bd55e990cc6b65de37+67108864 ce126a25374dfe6e4cd91d27239e3606+67108864 536c1ff25f7334b3eee12fd61cd4c88c+67108864 07f03c33b0a0ca3d76d31e65aec87a05+67108864 156cc2e702d5706c6dcbfc65937e0ced+67108864 ed19220354dd45366dda185aa6fcdda9+67108864 f8f3c1650d6d9fd1fb81defcef4f5e37+67108864 157836f6e84cb522a520b8e00cbdddb1+67108864 ea34080f4a2c67c8eac2d204adae0214+67108864 ceff9152067f3579158d38f67ccfe3ee+67108864 d42fd7b0f9afa30622f19cdad886e510+67108864 ca252dacf27733bcd1bbb0fee73dbcf1+67108864 74bd8949c0a3ad016dd2fe0dff389217+67108864 c198a863ca7030e39d8876e0f84ac8fe+67108864 6c63203260faceabf16fed782bee1e6a+67108864 d5d3f52e5615f06c301d8ffd43bdcaf1+67108864 921c874804339001b4122f0302cef8cc+67108864 e0c59c1f377fe5cd9d6ba345edb4f519+67108864 fced978955c7d8526a6dbc344eeff8ed+67108864 4da3bdacd3c86355934b49b053f09650+67108864 973880f2f3c56727b95dedcaa4d0a60e+67108864 caf2b7a8550df7b241e371efa2fe692d+67108864 d7cddaecf10aa58a54daa2b7b2a02e11+67108864 2bc6e177da10b99fcd6d091fd23fb155+67108864 de817551ab1a9cd5dd51c6e81e0e44bf+67108864 f137d5fffb9fd4cfcf7aa4926f3e99c0+67108864 9d17da7cf8b872e2a826fff2c5374142+67108864 5830325f09fd13cd12bce3f691aad968+67108864 8215a5983c95e23a63c687b642f13083+67108864 11f76f54f3d0552c10299367a10cdb22+67108864 6efb7fad52edf9859561e9499cf510e0+67108864 a088c12c4f3927e74ce4a4c5f8760621+67108864 e0f969e633f92e756b1094e28839bd5e+67108864 ab0b88f96c9ee1916797ef424b683167+67108864 dbb6ddb82e96ffecd55bf7b7f7a512ed+67108864 9a055029ec6bd836c85dc954fdf7bbc9+67108864 c5f10798cc176c312c605fff373b3a17+67108864 05ed65cc2bb638c886bcf4ffce6dfeac+67108864 c4a3be0b33972b566228dbd5f47d7e5b+67108864 57ef58cc15ea5d8b8ebbe8420cd06b55+67108864 274c1514d474e64f3de02c179a82956f+67108864 810a77d1d7ba6a20e82c8910f5e70763+67108864 42a4f5e30a2f86bf11fe622f7a618ae6+67108864 9ff7eeca6c1879bd22dd86ce89349276+67108864 913bb4a677d4b4c7c131fd3b1cbebf06+67108864 9ba24586a78d824ace0337f321d2c100+67108864 2f892d650527a819ed7d499a7e09a3f3+67108864 b25bda0cfe2c4344d1703fcf3a6d4b8a+67108864 2784662ce216490c73fe67fbd52130fa+67108864 e79a6367f486e2fae9e54d0020b0db17+67108864 2f24bfab56da9e223c37b44e1f1ce9b2+67108864 b50f659dd903cb6f9c707f699d21af72+67108864 b94162503a41517f5a419ee397340005+67108864 6231562ab94c3be051ea9087fb8401c9+67108864 ef51cf3cd05f7810b6f322adc187ba6d+67108864 5d621ca36e6f35cb4f057b117d372e50+67108864 8b8239bd23a8adc2cee1f8a156d85e85+67108864 66676e37602149cc7d37ccebe484fc03+67108864 f91cb726e6ac09145c242752c4404e1f+67108864 ce73c7682c4fe26b98ee72a730010746+67108864 8f1642675b8f47e6cef4ce6a65e7dddb+67108864 a0c772eee3f139b1831cca60ce49b347+67108864 547699429dc8c3cfcb1e77e7618f2f17+67108864 02df0efc955879c3f2501fa8b0bcc801+67108864 a701fcf88e400f11220ddc2cbd83a73e+67108864 4c1721a042b1a9437c3eef71a58754dd+67108864 0a5fe95394f9f497e66217c117bfeb42+67108864 f23e04ce1d188c78e33f258b45c5ac8d+67108864 f3c7badf1ec40e33b375e62971320278+67108864 d2242648880ab86690ccea9f98275e43+67108864 8b397dfb9224fdfed3859ce86d74ad0e+67108864 09153d492860fc9d412c01785340798a+67108864 c3350ac90cf4d559a7295a730f18d20a+67108864 baebf81afe44cb6b42f5f6e6290ec0fe+67108864 96cd500b282c3113007c697612444603+67108864 a73c14020805ae3c1925ce55eb2e5971+67108864 433ae5411010c6213478f28ad6cab58a+67108864 3e042ee7b88f2d28bfe5b98b44f3be6a+67108864 fd9dde59a76b04cef85e85d61d02e6ad+67108864 d16f4bbcf2a0e8fdf932b54e35695844+67108864 efe1099652662006f509b99df819f1b7+67108864 613fef2dfc96ad3305006cdb8dea0728+67108864 b3eedbfe69eb52fb6bf7d21ea1c3bd5e+67108864 2f1784343b9d2cc4682ee293d051500d+67108864 62ac15c401cabc308f91639667229e0d+67108864 2afc3e34c284f72faded649c0ec3b72c+67108864 1399e4bff224e69402319a61c6db3dd2+67108864 533d737e352255f8f1b65622d92fe9c7+67108864 203eeb4e4410b7a59230eeac6d8e6e71+67108864 c8bacf1552def30bf0a3525bb90d4ee3+67108864 de0d669dd7c9cc7c394eade1f4a11100+67108864 b995714c0553fb41ae5ab417579381f6+67108864 5f6be59b124ec208b9e4d461fe0e5321+67108864 fd8f0a64895e2d28f52b94e980d4a6e3+67108864 776afe2ccc11fc9b9b9bcd1637ef346d+67108864 95b8cdfe70f64a49bc570565107ecf5f+67108864 26479dc9fe565e63bfff3f5850d77998+67108864 8da85395f1b08cd52869d77a7364249f+67108864 66f76a05e19e0580251ede71e2958e92+67108864 5ff92a30a1aa13ab0a42320b77cfea45+67108864 6f3f8613d13412c9530801a9a3f35c1b+67108864 50dc0b3a14dbeffd71b69058275b2d37+67108864 cf19857fbd6aa6f6c3c258431674a106+67108864 e0b084b9493be695c18f5d3cb42fbaf6+67108864 3aef149ddbecaa8ade6fa0afbc9de9e9+67108864 3854fb0bc82e15b33cb2966f5be46239+67108864 06b43ce1c20a4bd948750f7214e40f4e+67108864 2ee5b68ddb703c975e18319a67621c7d+67108864 2306d047cf9aa09caa1b2a9a0c541896+67108864 af2c70d38e589ebd8c7649a90cb065fa+67108864 71bc4c0a4c2d00cc34988fb0ecd1285c+67108864 0c2c0cbfe580ec70c9cdf1b8ef31d4f9+67108864 cb15da81b8aab0167e184f2ee2b78b2e+67108864 003e15c44f959ec8e46327b05e79d599+67108864 6ff440ea12517ebcc7636b07f840b7ab+67108864 7a40e62a496ee3bb3b5fdadf6d2af3ae+67108864 db1e814bd3ae2f39083a62aff4381f90+67108864 7c68aa83cb27f585291a835b4ac5ced3+67108864 c7a7efb80d3347d4f7a13618ba5aa3f0+67108864 1411ce7884539214a1e92568231f235b+67108864 02a2b2122889b53f45667af2a120d01f+67108864 ad22b2a76908ccde5aa8adc29df538c0+67108864 280e52e831b65105f06a38a4f67b6402+67108864 a5f9439149211f3a7d25fa11de178fd1+67108864 0bb88f24287ad72b8d58c536c2e34663+67108864 417d3b1862cb8df75a7e8a210f15e3a0+67108864 a441aa0f23faba7c3cf66def56e66004+67108864 f55322a307d173bb797924fc5f4afbc3+67108864 5a9a3ad6dd8ae415a0e696dcd40a2b7f+67108864 0bab46dbc9501d08a4625629f4d5b860+67108864 67893ccbe30016c1eabe3d2c8a80692f+67108864 cc8357e24fe32c5cb15b5bc8057b9f4a+67108864 9e9673069303440d7f9fbfbbd4092044+67108864 f0c3f8cddd5792cf5eebe8b79dcc1bbe+67108864 21ce8204bbdb6f016f11fe9ae5f6020a+67108864 0f8a1598ee8c418b9ce7c34c175bf9c1+67108864 d7c5a77e5397635ce1430ed537183d5f+67108864 56b7099cd423ebba5cbbf10d5e0d2e7c+67108864 1c9804af0c110daa81ce966a7fbf41ea+67108864 163197488a7121c0e9a33bc0fde5ec51+67108864 374b8b0e3949983e1ac9fba65a3e4024+67108864 ced30f4e0c46e5659f364248955c1667+67108864 5988fa40843d6a1ca07b957e77c8ec12+67108864 fe2111482b8ba2c58e2accfb309b3b72+67108864 87a7d5a127d9273d55f5dd0ff4760a13+67108864 a64150d31725405d33e689ab39377efe+67108864 c8abd310e6150bd4afadfe16e7d256ac+67108864 027fe6eb0b09a4cb755d1d62b0a1423f+67108864 a9b655be082bfc1c61e21e0510153776+67108864 aa2a094dca5481091ce7ba08852b0903+67108864 eab0206180b5c06bdc9202aa6d7e0eaf+67108864 d84e88e3bf76cb3a4a44bf98eee1d262+67108864 9d39e5294e994cda2d993e06770d9b7f+67108864 427b9a850025c42f01274fd92bc4ad51+67108864 6155de11d88a0a5e81c04d75e240bf37+67108864 b700a6e431b551630b28368317194aa7+67108864 2d5a90b6e65546928056048116c45dd9+67108864 c9ed6f6d6dbf42ffb3b377973ca7dba7+67108864 08302c1433b0fb80ca8dfff9e6f74681+67108864 92db4d53c6888a14602556bbb79f835c+67108864 c21fba4899004a955ef88e4ab3d38941+67108864 de1b9dbb2f511d2ece9d9837323101f1+67108864 54180d64a3cb966eca0356ffaea49f26+67108864 381354a3022c66550fb4e44d8034a141+67108864 597876a4a4464de636f8a5fbf3e543f5+67108864 c29963efcbdeee5b08f5f1575a1d6092+67108864 fce9d2b920f580ea8779f72fbea8d964+67108864 2effefe2d35c5ba29c9e2e6acc03ed4c+67108864 e04da28dc87c175eb01704e4b51fb4c7+67108864 f4adcd9d6ecdbdb8c2b91ac2ad591d59+67108864 53f8e4d2781414ad30e4479605b589c3+67108864 301c286433798a684f2d721c7d1f5c03+67108864 4f5e4d9c261ec572be8c117135ccd8fb+67108864 d9feb7b8981f846190b4e4f8c3075164+67108864 ff9343ec031079a66cd0a5560e4ae9ac+67108864 c4abd8ab5a655d3bdded0516ee442db1+67108864 db082ef8f2e3ee42677b34ccca86b5e3+67108864 6fc83a81ea15c177c41b2a56a736a098+67108864 c09ed63c14f57b8218470f6b0ba1435b+67108864 3c87cc66c90c9f0d995408df69305209+67108864 9c9e9b96e4d2970f55ff3f5930495d44+67108864 7572a36b461e6c3fee3512d8582aeb61+67108864 948d1d1e7c8ddf237fa910623c963568+67108864 84fea0e98e7718e039bc9d6c1632586c+67108864 9eb6eee60f28e1d075f71d5cc0447bae+67108864 3f36fc2738992c1135a4e80a28b28b75+67108864 db81cde123e9bcd6c291209d385129c4+67108864 e14d1a2b49e62d23b81fc9383322dd19+67108864 c79fa6a3288e27bcedee730a1f0d54f3+67108864 80679d686e1a55b5b22a9d250023c2f0+67108864 114434b3a95875165370385cd5adfcb3+67108864 83c5ee9bf0da6e2419913d4954d28e35+67108864 8c26ea89fb32a840068c2b049380ac15+67108864 afd1f950225dc30b68260e0a85297e72+67108864 35ca9657c4d2ad957b3d5f9f56a3f2c1+67108864 6bbffec1991d0308e6fefaa7a260097f+67108864 0e9f68eab1ff4f688863e192a374846d+67108864 eb672833bd531bfe24f1246628a3dd8e+67108864 7a3eb4e715458a189579184ebdec2215+67108864 1f34c4808c3503e1993ccd80ce850ed1+67108864 041b3c92294ed8be56bad1009ebde213+67108864 a88c8e6b793f189af93f8c2bae915bc8+67108864 af77c7d4cadc116f4c78f46f9f648edc+67108864 4890bb9d239182d6b11b12f4322b3097+67108864 4707a2b5d2951423b9cb5cdf32141783+67108864 35ba9cc88e7ad5b5c98e43a921f7ff56+67108864 2eb75b8978046345cde58d2176af7170+67108864 add1039d192c6e42526cf2b6afa9bd42+67108864 9a477df1480fd7592fd2ce32c39f7406+67108864 6fcace189ae0cf89e39c98ae8738a85d+67108864 5bf37955e0e45794d0390850401964a0+67108864 446e566fd032785f676ebdd3ba1dfafa+67108864 fdf9c3e975893f7c3df285eaaef9d69c+67108864 9befa5fbe2f43cbcd728b81508369f97+67108864 d862dfb30026a97111e96504a3058e04+67108864 78f0cd0b88f301624b5f104fa30858f5+67108864 4ab8f486d99a3b4fe66c9c348d6fd907+67108864 580601b7db8d4a0c9adbb2e4fbfdd486+67108864 9cff8f97cc34a751dd58feb19e416e37+67108864 63ddfe988e3fc9c4b88365d69996f4e1+67108864 b56ecf4f9b696c9453ee8dc948fe17ad+67108864 81c2eebdfa8ea6950cf57b863175e2a9+67108864 da2e52c5fa244bff3d5c86f7b702bb25+67108864 462e9f5d6031574b2a22ad60f8cab54d+67108864 c445d18f7531e24f0a160a7630b1b8ff+67108864 1974531544580a294bc8ed247b5786a4+67108864 21e93b14a1f1e35264a62d1ad97a3501+67108864 5c634b78ab66b25b93a6c019dd10307e+67108864 40c7314f60f524b551a121a98a3b89e5+67108864 1677007ea98f9147329aa04fd618fe2b+67108864 a79c00d0291a392abe73c0c162649ac3+67108864 15bf1499acf1c24c8b66ddb5c2f7588c+67108864 2eaea836bff8cde4519db6cf2e69b05e+67108864 1e5ec5554dce9eede58caf8933ce3596+67108864 ac1078460e08af47d6c5d2ae1dd3b746+67108864 a6dea03259087ab7207a5f5a1ef679d0+67108864 78ed9f9db2e9643daf166658b952c127+67108864 c5d12b1cff4bca0c8d15e1bd73dae1d0+67108864 c883939a7cf896bcd2f85d850e71b07d+67108864 fc16e276e1588daaee99de8018df0571+67108864 7a21a92e38106e93ee8ae7a7381329a7+67108864 8eb5b51ffdc62129fe0eb28e9e38b489+67108864 d9c70a717133085d230e29c83b495676+67108864 2eedcd7fe3d79c6a9f2b4aaeaad04164+67108864 da5e919680b6ea8d41b8758e7086133e+67108864 d1cc0d339784a72a4f0d263464e071d8+67108864 124967dc391185cd924e86e2d9ff3745+67108864 e6b68f6b97dc9e2426b699b3140564a9+67108864 6cc20ee8b272deff8980d2b3a4477e90+67108864 dad069a6ba6c47b35483cc9fa3885d53+67108864 b0eda0c60528e29f5b216012d4616b1f+67108864 eb5e3a335f47c063adfef05de7b4667c+67108864 b545b5ad590984a3f478998f5696a72b+67108864 f73ddc8d5914626ddd50bcaae6abdeef+67108864 62793b6bdb743f9448ccd5740b33680f+67108864 1a84b04f18a2bf3cbf38ee71592361c6+67108864 518bbaeeb370f12d449c8d63946966e2+67108864 5457694f602380c630b15f36ec30981b+67108864 b8b72eacb6589d8103d1bd9c05759d9c+67108864 122d43c6dd8f63428580de4e2a3b7e31+67108864 5f5bf06a26037d6e683b088559a7853e+67108864 487e0c8983c7aa7a9660dced20fd75a4+67108864 0c4bab937d54d2b41d64874d05266304+67108864 e585812acfbdcef8b863cc1c55d3f070+67108864 a636fd1e57ba72c9710a20b0615c9c36+67108864 7ec1d0e5a83afbfe0848b1d11a192977+67108864 6206fde59d76ea51aa58dea598f35df7+67108864 5541b1f4b2ea70f386f99fe2bec7ca8e+67108864 2765d0875069650b3ded5bcf1057748b+67108864 76fc47f77e38cd9e600e0eeaf9e30b6b+67108864 c158c77c833392aa67e8499812e5f83e+67108864 b180e70bae15ed646c3f53e3e741651c+67108864 c9270bff3ed8f8b6676299d470ca10b5+67108864 50e72e004bc6fafcc55c95af20b38cb9+67108864 c4b5047e53e0330b9312be395cae7de8+67108864 8471c29a2e2a64144cfcf81102567306+67108864 edce7e124a06247a4eeaa5961def2fc8+67108864 83623c9a99c45b228a04d4099be110ff+67108864 98c5f2deab5676fc460e553c792fd5d5+67108864 583fd68a679773af986ddea8c2bb2323+67108864 2bcdc93ccfb5601210573e985f54dfb1+67108864 b36651e5cd042a6d221dbbb370fdf595+67108864 e815d8da6a24f58b6079500dcb6dde92+67108864 3ecbd8481a063f405a5b87ada486e229+67108864 559159e2bbe19ec78551d4f04ca71219+67108864 207965435d3dfa7dd6b872205ecee670+67108864 09c39df8dd0ed895f2f06f5fdf331f44+67108864 222acaaf41033f052ace00e4f633f8de+67108864 3bfca4b0dddb38adf015657390aa84b7+67108864 ca141e093060662c7fda108ce8467b9f+67108864 b72c6eda282461590d16f05cc5b6b6be+67108864 ee2d50ded50608024eeb6e2a375775c4+67108864 97aa77b8d241de99d3ca107876e59577+67108864 5fbe7fb61babb4c6ea78353db046577e+67108864 22eeffd26c93bc1832c08664e567f2c5+67108864 ee46575fb609641bfc79f45d0a552263+67108864 af222b7594ef2a21968e8f5a355f602f+67108864 a3196fe7b6d2a992ca31d59eb86a4884+67108864 f86726c23cb938537bda8f12bed90a0b+67108864 92f65da548b38c97fb87baeb414b985a+67108864 6a379105ad86d53af712cf270cf201c2+67108864 a6342eb8f300adce14b3dceb65c48042+67108864 68c0b82d38f8f55f234c8b864f3124aa+67108864 9f836e76ec05bc98614f88104805ce94+67108864 9698568c75af9181b50fb1aaa248a17d+67108864 ba20012679f7e77e55d5c0c8d4c9795e+67108864 ca7637fe43241795a070542007cc0fb1+67108864 73b586d5f610791131e73f5e5e2bbb33+67108864 ebaa2386120e44e5c3b003ed9afb1a1a+67108864 fe94d8e9d03089abdc49b5a91bd0a5f4+67108864 549a252c97994224d3f1878bdb2d3bd2+67108864 b4f2c8c6c2a363005dd59ed295f7a372+67108864 4d3df1f5848bad821d9aa9ba4b906e5d+67108864 29214f6eba324b577f3e57eeed313d8a+67108864 c2b2b768109ee7a185389bb2c03c9463+67108864 430763efd090fd67b9d34648ec769ff3+67108864 0b45e8e8635ce07dbe95511a0f73a58c+67108864 9672ae65f691a9906a30eaaf1f412456+67108864 2790ff8d6a65162c64a8020737c056ca+67108864 71deb0da8ab7b2cb8ac31bbbd2153b7a+67108864 0176831f5ced03159f23d1605f09c0b5+67108864 a214e08c45eb9659412f7e1da66f8393+67108864 082f6320725c4353e1713e5543896ceb+67108864 2ea08c52cc0a421cdb787af6093d6a8f+67108864 293b457808839b5fb1c96b4ad5307f64+67108864 a8bd1439b2bf08516f48867f2649c92b+67108864 7fd29dc64c3dd3945eaff1cb9159fbbb+67108864 e9de19748265de7ae96d9b7d9bc4f021+67108864 2990043a6a02592178b3d515cebbeabb+67108864 3e22d44e8668c64b812e6a69e3fc9ba1+67108864 17a9aedc84771d8b0b2a3a8004f1b5af+67108864 58015b5a67f8bd5924732d5821c0cf6c+67108864 00b48587ac18be30d2ee3be476f753c7+67108864 8c6e61d5713cbe261f9bda849810b113+67108864 6687b061aedcac8b3c0397ca06cd7454+67108864 a680a002f3165cea16d65e66ae764e6f+67108864 6a5f349fc52033d3e94f98904ab425a7+67108864 51eb722ed0402ac5da6cca70a5ec5c17+67108864 0b71fc77e9ac56ad924716419c4b14b9+67108864 cfd35aa4526e8ace42fe58ae778310ba+67108864 c189e3b91cf3fdedd4f16420da6edada+67108864 90414e8a86c002ad919e3fca22698f24+67108864 4548a56cc0285c88b12cbd149a8c33b9+67108864 57ad0742333b640abaf99d2d76251bbb+67108864 c96c7bfb6e4e5e91eaa95ef4a7f0063b+67108864 0a2432298689a7930310d505a8b72b25+67108864 68bf60dbfd64b9e42a4ffe985ec0ed1c+67108864 82af1dfa30b23d296c8c492ff49d363d+67108864 52012e0f2502a53d1a7e6bd0b53c1f7f+67108864 b7a38a5eb68c133166608d6894c40323+67108864 99a474347fe2aa2f885d3e03b41c5a13+67108864 3a7812b49e18c60e8ffc7e06f3f611c1+67108864 bbf3832ed3621c86917153a27bcd6c96+67108864 f7c80aa5a5c8912c7b374d17e53f8812+67108864 31d27c0506420d4dae49bf3500171123+67108864 5bf3a9a223b785637a976d8a2314d15b+67108864 0e2e4d0f02b414d814bd8f142a63c611+67108864 e0cd118bff0722657e8af9d6f7ab9387+67108864 aafbd2e68aaddc80b1b2c9a58d34d912+67108864 31bf19492fca47885d25ff1f03458eed+67108864 22ec120fd0738b9f641e45b0062eba27+67108864 218c385c10060cb8ce8d1af4df3199f9+67108864 1662e09426794ba9017f0b2ee3bbe056+67108864 62814cac58ffa10896b0e1efe404ab07+67108864 aaf6128b4e32d506c09fc2ff789e4659+67108864 fa9132261ef770571d448da6e820f05c+67108864 7a155a0ecd60a1a3802f9a3dbbdac312+67108864 b5c9b5769ba7f38b516dbad74e4b85f5+67108864 7738f2fa92dbe1dfdb7baf21db826cbd+67108864 07d0c63d6265000dbb3900229e27ff72+67108864 ad08ac30caaa64b0b1c6ae78ef1eeb70+67108864 14373867dd0ea7c9c7ef2f5c8f6ab326+67108864 6bdce1b8e03734102e07a68a1cedafe6+67108864 582c8263766cceff15f0e1aff84998b1+67108864 e71f57598751ea906c68ff46c478c76b+67108864 a4804871f087cd8fe93b888e274ee024+67108864 144967b90aacf70d4265a83656223e35+67108864 4da6c6e79f38ceca986d250f6e932096+67108864 9bae8b1ff4292019c53888841f72a982+67108864 f0ee2e68deba6b63c248003cb82f71c7+67108864 a5ec67f66515b948dfa49ebf4c85222f+67108864 04c73a8657409844f66619031721cf93+67108864 1da9010012af695023de6802e1f4cd96+67108864 d7e4035ba647ca2aeb4f218a5fba7f65+67108864 ea989512fa8076eb0fd44a028d460924+67108864 c8e3fe48f7b331d0b264f302275818b4+67108864 a1f498f4b71bf600f5416509cfb3e65a+67108864 24b0c787a8d21be9435748465ec26515+67108864 d11ca8b28ae1b8bdba9ea71a78ff73e7+67108864 0017d2c46a2e0250eb5c6d75a9045f92+67108864 bc156095174bc4814efae2ed49367fe8+67108864 7f127376f0cf418c605c5950a4bfec42+67108864 bb9e9b067b0363c6f1502b444c0b4472+67108864 2f6b3a746f5e8cc2254ed9cc08450a3d+67108864 196f05634db6db0b069ba240e6a646cf+67108864 5493c7d92b9710993ae12ae788904d3a+67108864 845f7edc40ff2731224713ec3deb2575+67108864 f050395dfec775d38db5b0d57ea0a581+67108864 4e5b768d18d85067d93a6a519ffd10ef+67108864 5cf888ffe5474f587318b36460ce0ad7+67108864 c3be66ebe0fd039a13cf0fae05774afe+67108864 303023470389e4048aae95d8bb34fe7e+67108864 af06698e1893e2704e03f722959d7831+67108864 57e7124fdd21f6c580c4ac374861a026+67108864 7dbb565e8fbae60e3597eb711921acdd+67108864 3e6550d9f748d441dca47757cf98ef22+67108864 178423073bb92104feabc8f7708164f0+67108864 98eeab6311f4d210e230573b9d5d43aa+67108864 8e3feb1831e7ad494dab2ba877831944+67108864 32ad56f602dcee3ecd0b26b33f439705+67108864 13fc3c90af586e0171cff2c1ced46af5+67108864 15cab69619aca7efbd9dc84dda97c2be+67108864 f734bc52b02ff5e7d00ff35469495084+67108864 2eae852de4af35923e335c11ab9a707f+67108864 fcfcdae7f7368cd3f4cbdf5bbe4c9218+67108864 da45807d889e3a9c7fff8d5184101bfd+67108864 9b877fb32436c4b8b207244e01e1874a+67108864 fb1c79d9d336df91ee9e8f16acab2866+67108864 cc45d4bc6c390fcc65ac41cb478414ea+67108864 abe9501379c619480d0894455896c4bf+67108864 8272118879a5ba38ade3942a7678dfbb+67108864 e94ed5c5ae2b7b30088b5b0f15f7b070+67108864 56586f0d6388bf0a191685aea2884b17+67108864 2f78d776307e364cac16d82b7ce20c3f+67108864 819cbd489af5fa54f388dc255e156012+67108864 023ea9e60186ddf1cf51b39abd8d9b89+67108864 52aa46153ceef085ac1c81b22f8fc7d6+67108864 e79234af9de91a7a55c5c96097fde9a2+67108864 eaeb9ddcac5b8cb42cd5c311995e5923+67108864 3c98cf812311fbd27ca8562640969aee+67108864 b8b1496bfe192d3863aed28169a1a5a1+67108864 7c0055922279d093c461459d93be5d6c+67108864 89109de3468e0c004a5df2e6ab4aaa5f+67108864 9fdcec9a7189a195b82946f477e8bcf6+67108864 2e924831c2241f3f8a9deeb44013e8d3+67108864 1cf6e8b97738a95881063cff3c1d11d5+67108864 7cc135fab1e7d88c70034bef501ff218+67108864 b1e2ed6915c6103dd5b854cc064271d7+67108864 7734fa966c13cea05fc1d67fec540a83+67108864 983e5f5a2f2632e9ac307f343ab18362+67108864 7d0fc77105fc9c1326e81baedadc2611+67108864 8e623fe4072c4e9ebd146d836c725076+67108864 06494a14aa546919573fde57a86ba94c+67108864 ba0b2cba826fae4c98c807b96fda418e+67108864 ed01c980866649a1a8dee6d43017aac6+67108864 f65de1fbbaa627a0e8388905bb9c2272+67108864 ce6ce58654540a18de19258cccf39b0d+67108864 a496b4869c4b258a443a9ef49073838f+67108864 d5522d98d48cc32f511b54efc4b85076+67108864 d82c6c2cfcdcacbf396f782a66f5a0b5+67108864 e16c8092f0a4ea3b0810f3d428581d36+67108864 a3882f1484c0817d39c5cf4a9e5f5cb6+67108864 c047df254090031ac754fd1518dda335+67108864 adf67c2a1461a1f3d31825563e9f591a+67108864 490c76dad1f70cd18fb91d4ae73227f3+67108864 6b5df83749324e88e4201030f97c203b+67108864 428d3fc9733f6833c773d207bc48b3bb+67108864 ecfcfcd10eeadeb5d551475d6ca99d8d+67108864 c79936bfbe83976274d8841a55edc0a0+67108864 3690834378f8713ddb8b209ca5dbdc60+67108864 7995163ac59ad78033956399183c6f80+67108864 19a11565a6e351a70b1f081490adee4f+67108864 0e629545f3e092533656aae73161ef7a+67108864 1c89e4d1ead0d3962c6a5a476ab7259d+67108864 94bb9ba880fc950bf008fdfe31926d18+67108864 ef92797ef7501d6748c0565527b78e13+67108864 1ceae0b2ee74bfd514655fbff18f185a+67108864 08fb4aa7c2e95ecf51a7b9edb6ca8abf+67108864 0005f0aae91d1b8aafa7cdf811aba9ad+67108864 821b28d84dd2717999becb924400b2a1+67108864 62fc209aa8daf7ed5e7d3e368a959715+67108864 a51feb570bc08c90109f072a00a86e6a+67108864 3a43bf7c8329e803d5a5b946f3d2eeb2+67108864 5fbaefefd0e33c0561d2d1ad02243cc2+67108864 70125926487b5e14434f4ee652e57f5c+67108864 266db112d5741f7cc0033a9a5d892a02+67108864 6402d4e9e089275626ada0a304569b08+67108864 ac89f489e384afdeca6f675efc065d03+67108864 7d712d4ce185e805bdc1f4824a2c2013+67108864 5365fec63b04a937ce25921cc74fe5f4+67108864 6ad1ceacaac14aa728bc5a0dbbb1cb8e+67108864 34435c0dedd729486c3629f8cf9f577d+67108864 79bdaa7e0686a43759a8bc554afe288f+67108864 976edc9593c14a6bd89b17d64b960e39+67108864 64b9bf319d1625a1ab43bcf702beb1a9+67108864 10f927dec0765488bc0c4e4ad5864fa4+67108864 f49bef52b8d0fb6b617da9c2e7f9670c+67108864 455129b3c82cc5883065979d29eee373+67108864 021ab29d986daf4d3eac5016202f7133+67108864 7a6b5fa82b5504061c240832980b5fb3+67108864 66ef0cabcdb0a0597c1d0cda9ea80ccd+67108864 7162f8592aaef6ba81eaec040513748b+67108864 616a218103d3bb29469d3a114a161b28+67108864 943d9da1352428c411e9dc7481a048bb+67108864 63afb9a885c0eeca44dc27b09bd5e08d+67108864 c4a6f3d00323af98bb03f4036d534f66+67108864 0caf3534294ff0737818091a223b39c1+67108864 8ff5a74aa3014ff730a4b2cc7fa19f8d+67108864 ea60fb9aadfd2b7dd2c2ec8311a78917+67108864 664f1859521ffa626e2180080a86da49+67108864 39c2b35af70b0cbbd185e525b6811d12+67108864 9cd080c53f594249856240fd8fe3befd+67108864 46c00423ce315e452ef072f1fd2f446c+67108864 791ec75ea6e41fb6569b906340c8359a+67108864 d6171b1a4a7cd7e7151181da6e355389+67108864 db9124b010b6d85517bf6c19df1fbcea+67108864 9d342295e2fa66cab7a9bf525e423cad+67108864 ebfb6d5d8da2c885ab89ccaab22addee+67108864 edc6f46d9e98132e8a70a5dba6d0a524+67108864 c3a3a1a69c7d06724eb506231ebaa4ad+67108864 a5efeed9314f0ed8d8cd098d27899fdc+67108864 6357216e884f403729f9ed85a1e156ff+67108864 4e2e7af2debd0ac7334125eb148736ea+67108864 ee9885bbe6fabb1d20ecf5075c3ef1ce+67108864 2e082c8cee844f71be6d3a8dc63a3275+67108864 8204308ba14a9b7cf1cd14e26ae231c9+67108864 49656b0e77291783b56dff27f69054f3+67108864 bce59a39ca6e7171f413c737a80771dc+67108864 90818e71eec72d6ba69f87a2ae552885+67108864 713752842990e7e98f4c6aaf4174847b+67108864 c9c4f210937340bc0741740dc0bfed2c+67108864 37abdd6893d4b1104ed0aab3031e28d8+67108864 29da5ad57f46c5105f97b87db4e080b4+67108864 e482af928333956a09b440dbddf00088+67108864 08b9e84d1f6841abd8a994210db3c69a+67108864 213b94ad5bbc43168711864b441349ad+67108864 c703506d76acb781ec09c3285fb0381f+67108864 31c7fb70b7eb1832377c81509aac937a+67108864 6bfc148caa4b734b423d889d3b0602b0+67108864 110e00453a504570e90799b4daa042a0+67108864 6bc9e6edf15a1168249ed54795464f41+67108864 0bfe1332f9773d4056009cb1f1a8eab7+67108864 ba552c6d88ca5c6005143c6a7e85e7c0+67108864 11f93451e28ff65ce40de89091b62f8f+67108864 6acdc268644d8ad70e90e68210e8852d+67108864 7eabb04f5237edf30c0152c632ccdbcf+67108864 a038c58f9dda274d06736df758a29436+67108864 9aa9974c9fbc581261e09d59e9224680+67108864 6e5398224bfc952fe566de3ea89076d7+67108864 265eb4465984d2f68fc639657fdde4db+67108864 0b2b8a7e74cc58cb8594cfbf75e5cf7a+67108864 7d19acd58426a5b4d7cd2a8cd4e4ce55+67108864 7ac5458db9dc2cff8f961850d4959771+67108864 eedd1feb836fce4707d7f44ece26b086+67108864 82e86b19ac612bbcc31206c40fa9b883+67108864 41d7129bb2145abcdeaa257b3b31312d+67108864 6e1240f3fda156181fe0a1d52ca5fd05+67108864 3180a5269907a15aa42c4e448724f816+67108864 ddeb2b08aeda71f0655c6cc786f2a85a+67108864 7db290b6db7f6d60386f08bf00518e68+67108864 4379e303cb12df9a9f28a0f86756737c+67108864 5621082de6125cae13913cad7cd546e3+67108864 be1af8242fce966ba091fdcdf5d3d769+67108864 1e714951ba26e3b7a010ba78ac5dc9ca+67108864 68921acf53a6b458cd6e5b8e0ed1a374+67108864 7c6d8b960e16168a32805ee8373f8eb4+67108864 5a3b518f9b1529a7c144d569521b7731+67108864 a0bae5564daf95e14cce3b8842b15535+67108864 4406c2de1a9787f1b589789ce2f4e768+67108864 118f08b4245eb3857a46276e45bab1e9+67108864 3d8d6effccf3b0342af688187dc4f3e6+67108864 d192c8d6741a6adee036f5aba554edef+67108864 965ae2d62cada40f496ac05b403e827b+67108864 81258bfeaf7a9fc15666ae10af305953+67108864 ae23e489f9d9eaaf7d2945855e972c75+67108864 297187be9f6fa75f3445720cf961ac77+67108864 2bcb1194581501e8c20f00264256ed58+67108864 16bafb9d0cc7730239d180101520734c+67108864 976cc5ddaee712bede5542e7b003b0bd+67108864 bcd02f6a3b00f4550730440a5aabfa9a+67108864 a455a0dd014194ae08007ea6e8ffc203+67108864 6495f63085f1eda582bb0475a44bb34b+67108864 76f39563689b46c281fc5af603fe744c+67108864 3c730aa83b0c01bc36b5cd8d666a35c4+67108864 637a2ec9a7840d248259466fd9025b05+67108864 0fc5ed24a204865033809c785cb7c5cf+67108864 4a3908945ac82bff0c9c133a42a60035+67108864 a2bd743f3304c65fedbdb7904f76c6db+67108864 b4b548164626fb0fb5bd4e1027c65634+67108864 b8ada412683bdaded98f4d772bfc0b72+67108864 b6230517ca29374241efe1fe500f3bf4+67108864 c2219a2c649a50b03e63447d7cf76290+67108864 7d5c44bcd5631650e79c979faa3ff1f5+67108864 e8589cfe03cac2b0958c8d8182005944+67108864 2e2260880ffdf589dd731679669d4162+67108864 ce30ca4d2ee870a4b645641fe46f4bb4+67108864 222aa06cfae2d5fdc4c54e31e0a2f7cf+67108864 7a5e6c5b9e962ad698ef50a3546e50f7+67108864 cbaa5957827447ed2f2548c044c98c2e+67108864 1316d790c4a0c56c96042c27c774d417+67108864 539b1ff5b0cf43d24364c8a2ddada430+67108864 d297a6c4ce2c3b5738afb24bfaf4112c+67108864 9c28f3fe301bb8359609a38272b99166+67108864 b14881938bd2985ee9780c543d79cd0a+67108864 b9131f8ccc3d3fb4d7879d3f6ca6ba8c+67108864 7b9bd91dff75a3da7898c54341a84797+67108864 d729676cb8f2032aa5068c0b6f3edded+67108864 36fd3979f82c521e0ac49591c38a1399+67108864 621f831d2103670dd086fbf74a2baeeb+67108864 434b0edbde1d7c8ccc524754ca511336+67108864 6191761726b291039971013e456860b2+67108864 81f56aa9beee03bf95e2d96e10331e6a+67108864 478af52460b6c572c4586067e621ecb7+67108864 d1b46bdc141e6beb00f573c7aa2eb76a+67108864 95e9acc5d676161bd4011c915d35cebb+67108864 31fb0b0f97de60f84cbbb9c6bae275fd+67108864 68383b28acd96e9a23087138fd3f67c9+67108864 06e7e4d9dd82b4dd307805302f59b6a8+67108864 f01d7a68a50b47c25636a61615fb2dcb+67108864 3d92fe7f228b5db0eb83a955ac765284+67108864 22543080bfb2cafc6276f067344b6c95+67108864 c777be42a3b970dbbfae050a59db63c0+67108864 523ed14e70faffe938d9562bda81b006+67108864 52e0dd73df65a5c28727088a336ca9f8+67108864 5d3735e054e694a998cd7d656afc498b+67108864 1a8a3f179210a12402a6308381e78a1f+67108864 8406bc8043e9a249e9575354b90420ec+67108864 a8aafb1fc4c208d712afd441c10ce110+67108864 7ef66e1c5a9aa8ddf12e37881a3fad5b+67108864 1cc5c7bebb389cb5d7cb2768aaae7a8d+67108864 de67652071ea4c8bd9fca0485d687406+67108864 bccb14bacc9d2bf516903dafeba159ce+67108864 208b6c5356a84735b523bdf1ee8c3352+67108864 cc6678ac9df8cfd452e5cf84e7382119+67108864 d05d3afe72f85020cbf1c47619e08137+67108864 811a2caffe5295fda0c20cfe07eb9e7c+67108864 91b6e3aad7844631e03a4b4691b7467e+67108864 3b36c286695fd13293960956a1d9ec79+67108864 e7cf08e2fa2b68acefe9c7ed126b1924+67108864 01756f89511027e9f7afdc51155f9c68+67108864 b63bc9636a6bf211183727f9e124888b+67108864 b36a38a8b9a5b2bc0c42ed1f58e75175+67108864 059efb6daa914416834e244e34b2f7ad+67108864 30cdfb63916ff1e609bf2e53895b77d1+67108864 9ccff5d27dbbdcc68dd1805afb4a0015+67108864 9bdbbec05d44b0c1ef1198a474dc0ef2+67108864 e98c4216d9976c63f8a7237d8aac1732+67108864 a8d8205ea2e1356fad587c2e7bc8c8b2+67108864 49d5ecb84c5327966e083984c43d218f+67108864 702343649c578c8961b92373eb5e6324+67108864 81b31e5e42d1e0dc2499686be84a5a0f+67108864 92e94d7be66176d50ef2c7b75df8388b+67108864 e052099368f8175e3cfd319a69b88e5f+67108864 50e2cacd47f698549ee368fe0b86af2f+67108864 067a888f2fcf0e334a59b76efb988f16+67108864 ea973e82d757f6027d79169dec882267+67108864 d6ba6ba9cd7ed076c6ca75a666c1cde0+67108864 f970793e593c3b7b273dd64f3278b06b+67108864 181e4e39f6fe1eff305c7ca124de8df9+67108864 81d3e93e5fed970958ba33baf6514018+67108864 d94d8d656d73e7c22a9fc21d61814021+67108864 cba3f4ddd207d8d6550929d3d6f59e6b+67108864 ec09778d02b57020f1df80e19fc43df7+67108864 31f7f1ac9de231b22b0756f9f90cd1c2+67108864 f7f69f67fc00841c81539220938061ec+67108864 9a4e943bbc8002c7973b34877d3a8832+67108864 8b2dbd591511b111aade4ac6dbe7e9ba+67108864 3f048fe65ab2388790762965f79018a6+67108864 7295219eaa859a1050df9fd769a109a2+67108864 d414303c3465d3c676cbd88bf62751de+67108864 e2cc7f0c2a6f8f2be16566448187fb9f+67108864 67d498ecf74a5b2bffef378249de6986+67108864 3dc7cde461783f6603ae130c886747b7+67108864 20bb90dab534b0fe6740d0f9a91b69ef+67108864 c341fcf8c17f3f0bc8fb77b1c38a87dc+67108864 4f6b22463d3bbbcd1ec223769a991c35+67108864 49b7d5714200d8fc8065cb7867feedc2+67108864 3fe374cf036f57b052f5bfac5e777536+67108864 849a12126e6e77ef89c6a6fd40b3b50e+67108864 d6597ebb50acf6026937d7e34e0fe5f5+67108864 1db60c0d626a7dd6931ad9a8f012a7a3+67108864 c541492c1dd39a9652a12929de6c132d+67108864 133456ad3ef89c02c15255894c8b4c38+67108864 ac762257261e25124d0f3bff34d87e5a+67108864 902a5877d0ca144be4e64e49ddbc6a84+67108864 4bed9dae329bb5a139487c26f77beadf+67108864 cd57fdb99ce8f02a528e55adf394c3c9+67108864 ddded9688f0f1d644ffdd0713a418434+67108864 c78069eef75782c7ca73f2cd4b222e5b+67108864 770652d2d600ea9467c19cd6b25318f0+67108864 e15caa477a7f2919ac7f50421fd4add1+67108864 4313b9b47b2af2652e84b57faff175cc+67108864 c7d7e55181b1614e5db644302b89d7b2+67108864 be4e2c5abe2b975abbfbbe05481205c9+67108864 0eebb9ad647ca490eecc8fd85f9e3130+67108864 b81284f74640253d18110baf27b2edb1+67108864 1bf994344e46cefef0ef528a65b16616+67108864 5b4e51780ab0d2c620dd4a09607c914e+67108864 a369023a52d53a580c9ac2e5b6414298+67108864 1ca1cc6b116631ef9042b7fcc1a71069+67108864 4c238ef73d518fe846db3da59e7f3371+67108864 9cff0ccf69f6718db3e2a2c814d5e93d+67108864 7c08baa884bceb5a721a2d1c3631303a+67108864 3c4632c93c3d96608d76a1835fb7c53f+67108864 21a1e400a720f29ad5ef0c398447ab96+67108864 e03a0738f567dd01ead1cdf5195482c2+67108864 31b21120078f7cfa2ed8f365e7d4ae67+67108864 72b9f9dfd2dcf67316d397d592515465+67108864 ec2acd07100379e405ee88c69baf8700+67108864 ed0e5cf5845d7b4d029579396edaa93f+67108864 2d6e8325ae138e53a69daf270a6a7143+67108864 165b7b0a9b59a0779078b237a0f43859+67108864 5426931979acf526c7dac829940b4744+67108864 a19f2f3722ba8b743f37ad4fc1de7efd+67108864 7899a1f6dba4a7561613ac8c346be233+67108864 d4af28e803d3acf8baf02343704771e9+67108864 c0acd238a240fe5d887796ebf2c59928+67108864 81c9046e203b9ea21a3854a08be3b6c7+67108864 19be9dc41ce37336f95cdfdcb9bcb1bc+67108864 592955756e6e716301b545670dee9519+67108864 664791aa59678b7f35d91490ef0e98e9+67108864 871796aec9cfe0957e02a973e6929aae+67108864 a29cbb99c16228863e2b94aeb52a4d9b+67108864 21529a807f3ca8c79de6985f65c229e6+67108864 65d73e789e7e2afb2398350501c94582+67108864 ab0f11f1bbf28776c8a942cc66a9fa24+67108864 f26f4e283ff6b79edb5b518483eb5698+67108864 eac4a53e663ffe1ba44350304e4f80eb+67108864 4bc565e82bb421fb178737d8b35e4de7+67108864 d6c94182c341a2cefb3c471fbca1aa5a+67108864 93053012095770ee0ec3efbbe5250dfa+67108864 861d66ba010c5138d68feb386a8929a6+67108864 c7ef775c4324deba52152a63d56e22ba+67108864 120c437c1605010561ae054991e0f06d+67108864 62fcc121f247bddeb2e4690e8794ce04+67108864 b219871e84e96e5e15caf19df374730e+67108864 7a3b52d2d071fe20ec7e4d1ebc3181c7+67108864 462271a7aace014a37e2278bad547c57+67108864 cac6ec7d98ff4b14bc19090ad1e7b5b8+67108864 dda917590a670289dba64ebf4ec81e7c+67108864 d8ed3cb70550899546ade41043264a91+67108864 5476c343b293ceb581ff79577c026993+67108864 bdd564f80a3ccf32c745be5e1b379712+67108864 674c08db645cb5081699beb789581124+67108864 de7b66c8448497df3b29bd147a6a372e+67108864 ff6d3e917e61990074f704fd680c8795+67108864 188c19e8e02c3d94e78a8e87ffc3d808+67108864 84c8d082ff79b3358a206846466a5bc3+67108864 3908b9e0a2e7cccf8d84e417e9742311+67108864 b03f0a17946803d7ac479ed9e24de52e+67108864 bad1eefd25bde16ef8234917d2200934+67108864 05b1547d6a2c9ccbc8ca098ceb9cd56e+67108864 227b71d3bc344620f24ca2bdd096d1f9+67108864 4e3a911661430007959d4d3c8195683b+67108864 e003b58fda6183184169d6cc75ddce25+67108864 7fad6f2dbdccee6cbbdb08cf1baec185+67108864 5b7acabe0b10aef1faf11f84606ee771+67108864 9fd97f1a0b485c0fec8b1b3719e7c077+67108864 31032fec03d16b41492a4f4be3dd436c+67108864 084d081193ca6c8ffecca056ea8f49c1+67108864 dae3aab6f390dc7e5356e53c9fd96e4d+67108864 f3f9b46cb5fe5960dc39ae0aff326805+67108864 c1c90111b2966da38021e23ef7624d2d+67108864 58d9d76b23af01b0b8a79ed6c8adaeb0+67108864 1b7ae7ec0e2a8eecf3b104c6a69b4557+67108864 0a32a1d91c8ce9e4139f1175eaaecdc4+67108864 31ca474df7cc3539f1811768bc88b1ab+67108864 1096ff9961700c784e2d8602f32be34d+67108864 85baad3a44ea3d1e052f9f065cbae2ef+67108864 e1fbe64ae53e81697580c7fe706dd637+67108864 00af57013678b23cd6701d2462c7cb45+67108864 66ed4630ae4c7f5650b77019778501d7+67108864 c53eba1250783154d4d6c796519b0743+67108864 2509db5b0048a95a99c8a05141d25a08+67108864 7bca0e2f4ed3f7975da1eb96e5e991b7+67108864 6dbc441cf6a53df1e0ccb2c458147187+67108864 9e68be13f859009d018bc89f013e2a5d+67108864 0b3260c834bcb8b24957ec1bdd834828+67108864 8fc3818ed7233f087dff6035d0d0e68c+67108864 af889dedf0f936ab5cc7751a3d8e273c+67108864 9619e43d50be73c63668dbe0143d7848+67108864 b172946e97fe8377517616b10aa6f5cd+67108864 483a1c541268bda2ae26f5babd1a3e4e+67108864 b849c2251770ba0a27b8cde071a7a8ea+67108864 21ad98835da14f125e0ddf3e5f641d1b+67108864 e746bc97b5689c1f05824139e6268be3+67108864 4bc2175232d8e03caf985f0de1d57ef5+67108864 0770fd6f7d5be477b1cd57357c65f2f3+67108864 215aeaa9a9883a9608751c8426340bfb+67108864 c30527b55a4841c52206b2ac388fd09c+67108864 109da8a7a4f5d899cfc2aec78f10417d+67108864 cc6b5a96b78c9f72186ab904b53bf6cb+67108864 45d85dd5079605ea72b2956e178936b7+67108864 511290d203b744c2f205f8e391565aee+67108864 292c5c76e08184652dc6b016cdf7bd69+67108864 b6b9f3aa332b32172b12227452fb9a0d+67108864 3faf30b6e440a7e63ec5b33026c4e8f2+67108864 287c54f2ededd394a348db4a51f31d73+67108864 86ea3ab570a70fcfb97cad610f30b2ad+67108864 b0ca221dd6800789a2b0e3ea3b3735ce+67108864 bf170401d62007080905447557163bd0+67108864 845e02d0645ac0bcc74f12573af46cbf+67108864 a7074ebcbd3bc6e477d74789565cb2b6+67108864 dfed9499e9f1aabe61841af5db7e32d4+67108864 aac55aad3070c0f1decd3648bb8dc75d+67108864 1e430b86330b7c4f20235ad50934e29d+67108864 8e6952bca44fa90f55d7df7efa35c24b+67108864 c9bd6e4d596358098d46e9c9852ba08a+67108864 f0481b0838851da2d672fecb8bed0ed1+67108864 39351d86071a678a156bd6685830852f+67108864 6f405976b4fd4b9f0d0fc482f11cdbb2+67108864 b99d1829308454f6a6d4c1f7fa67fc22+67108864 a65479a2abf49e006fff628f524c64d8+67108864 384fc792211710c36090828945f633b3+67108864 1b0ef2edf16035aa787a2234a2773678+67108864 2997683ebe6be9bbdf19777e6dcb7f85+67108864 a97176ce1a49dcead6f98c5d21e21f4f+67108864 fd8b3633b4da408c5348c901ee991c5f+67108864 05846a8b6bac781a0d5f0ae93da95c31+67108864 60bd664d28ab876ab2e4561f45a81413+67108864 4b2333ef5f5d87b933c943f993baf855+67108864 6b6cb6b3c49d63ac5b8c27a7287ffaa5+67108864 5c4c18633f8ca72ce9e4762b8e39c5ed+67108864 cda8a7e76d682bf2724dc36dfd628fd0+67108864 251af4c3c4c0297371c9cbc18f5391b7+67108864 3d9cb1ee432726b6704f4f062242a15f+67108864 d9be281b494fda2b2e097ecee73951ae+67108864 64097a37bcb45de638b1d1f3d02525e8+67108864 ad7c99315b582d3eee02a188986bbb06+67108864 7dbbcd083f126383c604d25a36318240+67108864 953dd0cc0f65581a304a69d1d8c6c074+67108864 6a4717fd23889c9e57e2fc16702b2ddc+67108864 ce2cc9afe4e799d38ac872ada6dd1c76+67108864 4874f67fe3f584f4bceb296e08e902fe+67108864 2eb3f1c36065e4d1356e30f849ef3887+67108864 8224b179ede992888735c59abe61208d+67108864 3bfb71ec8ce2fe06abdc1c072d2109f9+67108864 95b077361de7df58fac636a8c63f084d+67108864 34ce1eabb9b94098ffdde1971958457b+67108864 f6dcbab5c3c046737e2603deab056786+67108864 97381d8b85c75d95e4060e90ed0429cb+67108864 8009035222650ae111fc04889f6a91a0+67108864 7de4cff45fb41d2ab43da4ca4787a1b6+67108864 56e158d2d96c87c61ab07aec79a3f83d+67108864 80446cb5b295a6f943c345d60ca60fc4+67108864 014e252601f01f531ccca2232756c299+67108864 4521c10e47a426b0c8837cc808e20877+67108864 acc6f83179ba69237c125d8fbb85f5fb+67108864 b3c6f417342f1b43da8d644c10d2728f+67108864 f7d991cf5cfb8923681dd841fec2c3a6+67108864 81a497293d206956365b5e5994acd53f+67108864 6bbec259f46456e0ecdbd4cb4b3e38a8+67108864 6920db353f03e0b89a17c6eba721024d+67108864 11e1346974f9c5c99ab4e116c8b6e721+67108864 dc85c5e79ca12b3ce241634db0887a63+67108864 02ba2753b14df08c6065fe3d8067f520+67108864 512295f97d8196f4a3ee96fa7fbf28a8+67108864 167e357fef386e6fba3be79338c6b242+67108864 be1203699a729599cb62682276d7912f+67108864 57af384e85fbf8f3794f7187dd66da47+67108864 47551bb1052fd5456d8146be45ed0553+67108864 b96a537acec586386129a748be6210f0+67108864 5d9ebd409950ea6daacbf4fc81f0b8d7+67108864 efde5b728f3dcf9f88d791b2796d155f+67108864 2d0b282d815567c51d601a70fcddd68d+67108864 c4ceff02ed6b0513cafc9e3246d1457c+67108864 d84624307b9c2ff7948a79ab9f98646b+67108864 c1a0198f44e5bb6317988e9bdd613650+67108864 41e517c6cfc94a2f01f4f3d8391a9c66+67108864 c88cb112ac786c60539be3892adfa4e8+67108864 5ccbe64ef42d17e7d9a5efb4e555bcbd+67108864 98d2f0a55f84aeb228eac5d2f1bfb880+67108864 c065beef0ef2df2a3e046c13d2f13c21+67108864 6d029e69045be3737979e2711bd50ea7+67108864 384231d48e990757125f8ed75de83b6b+67108864 0a12b9a19ca1cfdb25ffee4cc3b79f33+67108864 5b8cb8cc4670684db3308db01c15e426+67108864 de0a828daef9037a9c79f2c631c1e0c5+67108864 02673dc624408ab64b89325ed9758ccf+67108864 7e3379b52437c0063d65dec9c0d425e4+67108864 fb6874cbe8aad09ac938aec059f8a64e+67108864 6f7e3055a1d27198d59b9db66e7f9fdf+67108864 592c577ce395168ab1e2b7f169f2810d+67108864 8dc6e1551248f7597693d128f0099708+67108864 8e59539a8b9c76e300824cb4c6e77400+67108864 e06cfaba57b978a4a60ed0d92830019c+67108864 fdc786f66bbe0ed72d008a9a93507238+67108864 df834d931b666e9892346816d1e023dd+67108864 5abd6568f3f49075aec50be5deb08b24+67108864 51c3db0547419829724af016a34376f4+67108864 5532f28919c3e73e4b311532f5173885+67108864 9f35d38d94c833e4bbecd64e2a5d820f+67108864 b9a010eb74aaea97ec508e46d2724635+67108864 96c729f98c1cc39b094e65e9912bd47b+67108864 37e46f23c8deef3b89bf4003034d3d0d+67108864 9bdfb8ab69c3f939c089658364e408b7+67108864 36e9ee64d69e5e766673cc0ff3568784+67108864 141e5f5a866f40a332481f8f0b5a02c4+67108864 13759de2988ae2c61bdc0646bd3f20c9+67108864 cbaffeea577a3e3022dd528979dd66a5+67108864 47d71f445f8a913639c2de35748ca1ac+67108864 c96f556870b01298a200c984c1b6a485+67108864 9101f4c9077a4999b25a816c5c7d4577+67108864 ac67846ccbaf979a06869dbe9b339b67+67108864 ae04dd6dc18bccc506d4a6c154ab0984+67108864 a7b1d2031b7790d6285ec6ceb9bc7caa+67108864 51cc682f1e4ca25e919b805449c99db0+67108864 dd675b1ce02a4cdb5010c23b8b8effc8+67108864 5402750a6d0d6b745bfb62cbc7a0ef41+67108864 abb173de47c177a5513b6eafc8e067fd+67108864 821c634696374641e713d263226f585f+67108864 312745c720c1dc832b990494ec21d328+67108864 ec41d509395490b699cbb5371748dc0e+67108864 571af59f889abd5666ebddc4acbf26e1+67108864 d13ec87ec18d2fa7c8594a7a683b2b87+67108864 c27783a42ed1b39611c5b4e56b898f89+67108864 256e0bd60301b630d4d9ae492fdac4ca+67108864 25e79caf8013afb12230195662f86195+67108864 06a2e7c753f64011e34974e7c2190b41+67108864 534e4134093ef3418c0546c6e4365783+67108864 4f973a9f34db9004c06c99a3e74b3586+67108864 37f3d57d8b0d46fd66be22d20504c3e4+67108864 f36e3a3343c70e8f6754d037b029ceef+67108864 8998b57cb2f3a4d4478b18b1a915ef22+67108864 e9a0de38f943bbf35ee736689b88ca2d+67108864 9f4b3c74f27a057b41ea1409c4da74e3+67108864 06b20eaa64f9d3dab4c529a8272248f8+67108864 6aea7ee9338fd608ce1ca71c8138dd16+67108864 add48912237a5d2310566a14e713fb5f+67108864 58471508500ae0331ebed69b053b3c1f+67108864 0f9c0aedb6cac897b66f462066aad408+67108864 7ae5b8003a518e6e73e0789744698564+67108864 f4997614ed3b49007c5ab44d55f9b2db+67108864 dee630fde59d34a16f13e06d696ff961+67108864 b24b9f7e6ef3ce54460e0c642229d8d5+67108864 cb98440ebce12bccf65c6b0d03dda757+67108864 1657ddc3534304afa69ecf8aba171de8+67108864 8136646433203d0f92305094371a044a+67108864 86980ec972ce5b94fd523b0005ae531e+67108864 25dc54c2c9ef76bf73baef782e9426ef+67108864 426c837fdc0e955984bd4abd2af4bf49+67108864 41fa3f238068fbebfeb9b5158ac624b0+67108864 c8d5838a82ffe2b1d160d9ecdac6dddf+67108864 cb8d6dd2800967ce59054dd528a34523+67108864 9d217fcc11e3c1cc0db31410b9725c2b+67108864 11fa34b137060862b6066b0c38583c64+67108864 47bc8a59acaac90e96cd61fa2c0f7dae+67108864 26c46447138d5f96e00b9a3501c1fdf7+67108864 3d28f5a3dfbdc118eaf758d5ff3e4d06+67108864 ff609548cd6699e0c060fedff5bc0ed4+67108864 9a6590c4f9bc00bbf1da8e0f06a5e152+67108864 7ebaf211e7365e86f04e9a8580c64ed6+67108864 0ff0d02833920ca107acb6f023f561bf+67108864 ba4f70a5ac053173f79d60ce3b03930f+67108864 055da2c4cee6215dea4d53f147b23004+67108864 b5e1a6ec785aba278cef39e0a6448a44+67108864 5512c19387b1dbf55b0966fea0996859+67108864 8a7d3fe39533cc44993f6f7021b101b4+67108864 b4b285ea1691d70835d477003691f40d+67108864 fb143f5264c383b1f9bb0aecd311040a+67108864 d990e38d43a13dbd04583b2509464048+67108864 66589506c3fdb72c89b4d57d2010f82d+67108864 fd266e7a1f8a3075f258c1884e8aa28c+67108864 148463ea6fed5dda988f4dd6cca2386e+67108864 a486f29b29d8b941513c8c0fb1c6e228+67108864 8cae319912b2b63bac4ca1ff0293cdd6+67108864 c2175b5c46aad00cae0dc874fa78d735+67108864 94fe5a4d987ae23d32ddd942654dbf34+67108864 366fee4ebcc1c4436e09886307d594a7+67108864 145819f02f913ab57b95aa6e687c7b65+67108864 021b8921b8a0be01fcfaf6a8f432ed1e+67108864 44aa8530fcda59a7386ab753d852af90+67108864 84c95c93dba1c2335c5132aea1bfbde9+67108864 ccad6eed58375535d5888a94dfacf051+67108864 8c70c1572583289f4f886b3e7e36f4ac+67108864 1f282e0018a1ad18505c191cddea2cab+67108864 c95e3457eeeda4ebe2db198892985d53+67108864 0cadf623f5acbd4c9bd62125de6a7a84+67108864 88bbe2bd406b21002e577196b7dec646+67108864 01a85e867d8f17994c267ae9f28802de+67108864 254a198d49a7cd9abc7ae6f9472ff938+67108864 765c620ae8b180b36e1dd15b9e46415f+67108864 b3bf18d1866f90046c14b21e1a49aeb6+67108864 64586178bfbf5919d7331c5417f343d5+67108864 b9780d2f2273ff02a535f9aa1d3ebd85+67108864 3e91ea1b91d943edcdc02ec2d2748f41+67108864 9b98df65c1d3b5b33e49b97177ec1b00+67108864 06c841928cb336b39a3fbef2ec27a411+67108864 f8d137eaf5bda678bf7822fa07e1d5f9+67108864 69843b1ca92cf826a802693ef2b2ae44+67108864 992a200bb822df303b43a2351bf302a0+67108864 3d20c7b9643d676eb2d2618d34c98270+67108864 31a88741befd885c16bcadebfd5a8cee+67108864 49ad328a8d37103e54a16dbae4c3055c+67108864 215e0e9cd05d16694b8cccd48dfa5abb+67108864 987fa48895459fc6f33853ab57fb6dc1+67108864 a65ac062423a006d76fcad051d72a65d+67108864 a7f805ac6a34145fc3b8d667ec43d576+67108864 dadabefe4c732ddf56e988a0ce034093+67108864 00e73b019e926773bd8b44e55ebfe89b+67108864 87869dda7d5df3ccda8050b44416b584+67108864 76c6be6d066a24b1b9b0aba1e3673f70+67108864 2c754bd71ef5bd4f72ade61d4434a7d4+67108864 7550d3c5d929e2433f167307593e468c+67108864 38113aec6f56566d7e9567c3a17f28f7+67108864 f7be1e07c0d71923a78e80fe90404bd3+67108864 7cd4833b1898db8aa453b9004e25c4ba+67108864 c68a382c523a60f0fbb0ab6073ea910e+67108864 0b771a04b9796e9f96c8da6c0e6b0fd9+67108864 8693722335dbf3c7b959c615e0fe6924+67108864 5cb47ebfba5a7db4b42dcc7fca2bb870+67108864 636be78a2c685b4da74e97cbcc124105+67108864 551b759b58ac10312ec353b7b0a267e0+67108864 ceeffeb5315ea9e54ee2050526b23bb8+67108864 c4e5252e8b0c6089c22ff9da672467dc+67108864 ebbf8c423715faf70b3af0077088bc6e+67108864 991c7c2448f265e70b0ac16ca43472e2+67108864 7c47a1a33a8d8e9588fa45f42518cc20+67108864 245c2168cde621737996c3fbd6c470ba+67108864 79047629ccbfef51995de48da7f3725e+67108864 e4bc4d51564c761c1f0fcdf1c80c6699+67108864 f14f733fe67b971cd66fd8b36564e29a+67108864 7eb7a28632a62a963e209bf8bd38f332+67108864 32ac8dcc12935fdd2d9171bcdfb015de+67108864 618f64f3ab4219f2b2923086be90d457+67108864 c2ce069535fe3beca8f628f18f73e653+67108864 39f9feacd8ee8a9165529930e6fa944d+67108864 6b97fba9adeab93c0021133ba08b2b47+67108864 5fad45965eea86e0f32f7631f852ced7+67108864 5c2fd36a54b250d15c4a04d6f4c9ef1a+67108864 46e18f793a73586043daafe0e013799b+67108864 1f5d09b11cd22a8f39d80e6162754274+67108864 a7333383f35cfe90ea57639ecd5e0795+67108864 2092efd510100ab4a93d08b240d4e551+67108864 6c8aa179f44bfd647c47eb5b5e04face+67108864 848379844fc6af50f86090526fe222e0+67108864 aff62043ce5de2aedcb03544e6e5db57+67108864 cd791e50bb80cefe40b2ef1b2ca85526+67108864 2ccde8a4341f0c8775c3539877e53fd8+67108864 7e2c4ca905113146e57920b996f8938b+67108864 c1ab3da1fccde2e04b990b3f54e7e7e0+67108864 2f152fe6cd43ff4bc5f9870189c5fd91+67108864 362763218d47cb2ad1be8827c7b51aea+67108864 9525519796bea462fd67a883c547d60f+67108864 13689b46f56b81757265379951f4bbb3+67108864 b9a0e158c2ff00011baff7d0db7e0e35+67108864 ba1a9edd255ca7957fa64b6c578f0de4+67108864 9b63e5f3b4d733d33de31dc2a6a79b48+67108864 5ef12f13166b39bed94977b68310fcd5+67108864 a515cd79b3361536326bcf2abe9b48f7+67108864 cf2eb5da8e95bda57de0836808c3b40b+67108864 65570f99a823ad70b69fb603c400f355+67108864 02c5238434e15f45094ca29aeda3c35c+67108864 596b6c24fd2f6414fb2d29b4bea5b104+67108864 3c017435093274bcb039d83568a7131a+67108864 47c1b455a5604fea60e6b906fa91488d+67108864 e074b3d5b5c478986b821cbc88f9dd77+67108864 4408bb38d2012b2cc8d856a83f8d9dc3+67108864 7d5a0460cc1d38fe20c194a673d6461d+67108864 3e79d38393f59cf7eaf7eeeb4b5ee82d+67108864 452a7aefccfad5289e42a3a3f8709ef4+67108864 6d0398bd5641472117545815d7906eb4+67108864 63f43499c4fdd70f9ca016a997777d63+67108864 ea130df56228d6d0e99397a1e5213ba3+67108864 da9b41f4c338e33f0a913e925f5ceaab+67108864 e1c79b8efd129398d6e8dc84daf0c65f+67108864 727b14298f8be46bae6553c7705eaef2+67108864 d7d9955f80c1b8f7f22d75ab64599aa9+67108864 fec64254c1e06da83c769861a48e0abf+67108864 2cc3c9076ca84d0b3d1424ff23f5e18d+67108864 be0315b12077acb758fb6d607ec0d9db+67108864 609a5aed43714ac588ceef16be5cf0bd+67108864 e8528687518544546f15d104e42ab36c+67108864 7590add63fc192d47386d025f96a8bf4+67108864 010654a6ae811cf3c911c34a7c306282+67108864 ff72a1acc0072ed5dfdd967da0830b6b+67108864 df6f44a67ada6387036e07449b2def8c+67108864 14405c4605e26f7114cfee29afc12e8e+67108864 e3f76e3d0906afeb8e3491e7890386ef+67108864 04df6b259de0b2cab861456dcff4707c+67108864 e9c9c7dfa8627d8b69a3d37361965a8b+67108864 a8f2caecea074252e7d7268f161c3ad6+67108864 a3f0b7cf9f5285990bd09a991e9046fb+67108864 5f08577621f017b31fcb40955fc95fdb+67108864 e4f5c8e2127f0e6c3387acb41b84f331+67108864 eaab7103ab17f9ef849287e24857b9c6+67108864 01dfbcc8bc5d7689f5f9b0969b1f50d7+67108864 dd0e627ba4e7cd97cfd07a89f34ab2d3+67108864 628e500b4a1e5739827242184dcc55f0+67108864 c1bd7e3cd8f6cb893c6691a1c132e41d+67108864 a8ce57e72e494f6ff16c01416a715388+67108864 e5e4d21216c2be92faead68dd4a7cbb8+67108864 fba18a980ef743d02623bdac1fdc11bf+67108864 fd35ea347e664d2bba5a06cc5b5859ae+67108864 c7c3772463ffddc941f864cb561196cf+67108864 e3d4d8d4c0582df9c4d9d7df34eb7254+67108864 0e96910a0a2a49a887f7c4afd7c6111f+67108864 d7d0ad947a996de69cf6d20f34dd0a75+67108864 7513ee25f082164fc1e2dd9303fd6dfa+67108864 ba35f2c1e66448e18ee94a7567e901ca+67108864 ab894212dd52e4048db5db39267af038+67108864 0fe5d7c56c2cf526a0fefff4361cc41b+67108864 6a72fbbf322b4efaffec4c17e16f0337+67108864 7d7ec2f367837d0a99c45f13e2a72ec4+67108864 e531e5a0f983b625b97dbeb6c8c75e39+67108864 141cca3d2ef79428c45e31b8196680ef+67108864 f5797709703765504615673f3a7798a1+67108864 42a7d894d9bb750583981d721fc0b43a+67108864 3538a8dbec2f3bfe2752d45a71e70b89+67108864 c80e07e5d0ca589894bf774edc001f37+67108864 47337a94882b8e782b9f52f5cbe47df7+67108864 989387d2308d63dfa14f1984764daa1d+67108864 ec0611f2664f96a0a146e82efe340723+67108864 fdfe2ea11dcc789a16d09e7f57bd724e+67108864 ed1c5b025df4101ce2d0453c52bcf44d+67108864 c871836dba44d682c2e9bd5068184083+67108864 da3e03f6d48247957ddf1138cffcb069+67108864 f0a4baf3d0c7b390005017316d5f2466+67108864 6c4a4f37c8d1c2e96ebed82c2112f2a5+67108864 b16a1edfe99ff8fac55049dd1838e3a9+67108864 830da62d52b0970e8486e09146d3c380+67108864 4f8285c1c54f0bb0e2a3548a709ca801+67108864 715993ca1791faa1c2318927f99e7cde+67108864 4620f01f02827a655dc211e6d19da234+67108864 ff6e868275e6ad24c573e4c158b5d36f+67108864 6cc04d5bd2dbf95a8bf51c3b4514e756+67108864 c81fcfdc4df6edb054225b043a3d0006+67108864 7eb19468c9690ff9f5ed0606e76f1ecb+67108864 df142d2444521375a0b3419e4ddf289c+67108864 85839ad5cd690b60fc3c9ffde0b16a48+67108864 a48acdc808f3f6871c5b1ddb61b856e7+67108864 0702af55adf78844d8447ae2edc612b8+67108864 6df29acca1f5512741144462c9965066+67108864 2772528da2379d504ee0a2f2f8f6d654+67108864 9a48c99615d21cc3803bab1109d01b74+67108864 c43285f8335705ff5db5c273649c1abc+67108864 95f810aee86d91c03e830b0b07c35802+67108864 fe441d6f0df7600e82f39bb8b783d689+67108864 a5fb5cd4c8bafefe7295d832d54d153f+67108864 fc3d46569b39762465fe0a52a4794e53+67108864 604abe47fe48c7ee596be8cd8593a48b+67108864 b363fd001c92188e289b26d54a1b2ff6+67108864 fe882c56c35ff21dcc34f96531254c5b+67108864 76ef911b4fdd4f71d93b74bdcbc9c090+67108864 e2ea51150b31146e112e909f64164267+67108864 d2c5b720e097815e664f6c87f0cf4ed2+67108864 97e7507df7dd00de336a42dabfc2c2a3+67108864 4491090d96143ccd1901779e9f00aeab+67108864 931d4a0940ec2eb34fef01cfb8dd9595+67108864 b7b81ce4af050cd0a01a0dc86a794805+67108864 77b4e0d34dbc3684fc6cca5120bc3456+67108864 ae2f71e229dc701bb379bcbfd00fed30+67108864 03e3daacfd7829c88c098033ccab1fa6+67108864 a6304b94717ba7d063489a01fec40e1b+67108864 cd4755b79ef2912a216fcac641af5635+67108864 3add1f551929e2bb8465bf103566afe2+67108864 575f0da37b5e9d2041faa60d5949af03+67108864 ab5a61475c7255b70370fcee5dd9d94f+67108864 30634d01c7f400d31d611cc7a7673d92+67108864 e31a7d521389ce6891f698cf268437b5+67108864 846f9a6630755b7a8b882effb86d2020+67108864 52e1eaf4662a1bed7e63d5310a893a43+67108864 8d09fac511f0037c94037b818268a2b6+67108864 774e3872523f78fdfd6b6ebce2b68c2c+67108864 ea5f1d513ec4f40d70c8f8e0a8726002+67108864 ac296ba33f17bdb4e9159024f8e4cdd1+67108864 c9a195ffbfd1259b22153695b36a2bbd+67108864 38b4931a017a78fcb6fc16b0e72f3ce0+67108864 702d41530330c10a587400342185ed59+67108864 05ab5fde413857fed335ff0deb326a30+67108864 2651b1e0e66b3014f752bede5e011723+67108864 a12615fc13b38ebdca36487bb3591724+67108864 e2992d9c680a9f7e11270af1b133e529+67108864 fae3470df006aae3f2bb2cfbb0953ed3+67108864 701c0290354a64872ad219a9244fb8c2+67108864 b93e0aee1e4f38b7f7fb4e16ff8d1ffc+67108864 8964e1e6e989bcafe111522b00d47713+67108864 51e9e5dab456ab0eb7b59279c4ec06ca+67108864 725c44c44536b8ac3fa0a9ae25d9baa1+67108864 4f39315c0645812f34179bde2133fa19+67108864 7bd82c0b2754702ec71a0634a9dc1d17+67108864 5de934e6e85aca1a204a89c3e99824a2+67108864 7bfe74cb7feff036a0ee21f370712821+67108864 0fc5fcddb3c53f5b53808cd958295800+67108864 d3f20cd29545736bbd57bd50f07b1579+67108864 0c64435faf1ab3845fcbb0e32d82e999+67108864 ea441e811b3520c2822c6a953144413a+67108864 d2b394a3ab92e0856a230fc11ce9065e+67108864 aa0c5f125034b3dba21fdfa3695d6a01+67108864 a8f0b4118ce8b8cbd6b7774679032512+67108864 339ee569c3993742cf8f7be8d874045d+67108864 1b38e6bebb07ff094362ee79fcbb6989+67108864 7c0e35f1ecceaf8a7588dfc8d3aa076c+67108864 fe15358161d85528453800ca110a449f+67108864 e13a94ecade9c1334056f51001161f0e+67108864 5ca9d755a0db3f62206b87f8489fa84c+67108864 1005fd30a8e26c4c04f5b39911722e7e+67108864 21d7603eb5492c507fca121f14b8d132+67108864 b1c8f98dee262a8a2084e1382454942a+67108864 1be9511623dfe333fe39bd2677811406+67108864 e6439cbf2bd93d45b11141be27fa80fd+67108864 12e41768e400ded7a86e78ed33446b65+67108864 3d99f810b556b5eac4247c3317b8616d+67108864 23632aa22a1a08cfe7a899f568511e87+67108864 42b10f75f25df8d105919717d081a850+67108864 aed75190ef2bd0c0fb2fad3e8a943934+67108864 fc57fc90c814367b5b34429fd52b4ec1+67108864 53da1e5576aacde23c1c86e1624d7b18+67108864 ebaf7a0946e3fc47a636389faed47d09+67108864 7066d5399a64e1dd669506972b53bdf9+67108864 077a6247a557f0f648fb492308f4cd74+67108864 0e0a5887a578b38577464d267e2b6df9+67108864 577478f28f10be523db58f30a56c13cd+67108864 df2840943cf215aea832ffe6252eda34+67108864 da6f29900f7677fe60efb9729da88c66+67108864 fcb41e115ba071a96b0a3b49e8bf0103+67108864 8a753ce795051334874fdae207855829+67108864 b13343e612361dd8e39ae29e41176e5b+67108864 704361fc939d60722e7bf633ae91d53d+67108864 a1cc9ac3b719fd41cfc2bc0d94c1bd2a+67108864 cd40ded00769b3b4bd845841c20c9b34+67108864 7c72ce73e8a5ad438ba8cdf99a65f9e9+67108864 d932108c4d3a8476491092cd581a905f+67108864 910a317116b2dcba43584187fdf33ad2+67108864 71178cdee4f382d77af80021d908457d+67108864 847b3fbb23de4510d6a10f216b1a210a+67108864 3950c35ae2852796a55eeb319f22f6f3+67108864 bfb8801bef84493b831ae34a5f73ca4f+67108864 955a59ef5011ab222a40de7cf3b507a7+67108864 e519cb7c85edabb54261110d8060de80+67108864 da074887680e187ef1db4ba128997cf1+67108864 b18567fc4c64b4aa0c0c0f204a01ff97+67108864 a41418794f481bf2b8409ec6e4267ffe+67108864 eb9b344f48720cefc217da3cce124e8a+67108864 ff92e78105ed0cb6cc6cdc3270faabf2+67108864 a2d0285e1207083dd58bf9002abc7fc2+67108864 147a9ed33d92f99fb89b685140a732d4+67108864 3f0138f29d10d2a73cdfa2a83d5653e4+67108864 4247f786d38761001e7fcdaa703e9b6b+67108864 e2974a5009436d37873f17608e406605+67108864 d491febe0a56b11ce2278deec0f617b0+67108864 8d9439a2a051307fd164906bd5c6bda2+67108864 8c5cfbbbfb6c3666eb4459a98921c954+67108864 d5ce682bd4f98463fdb3d773f518acae+67108864 e9dd69a37fc45ee1db12d0162fc7e165+67108864 52eeca7caec19ca02a391ce6bfa9be07+67108864 d64c37f9d21f14bf9487d5628a30eca1+67108864 f033519daee269196bfe92d70af67b39+67108864 c1282ca6aceba771a171f726d6a6ac16+67108864 1f2ca1e52852d72e5a57a1b16cbc3f74+67108864 54636fff02ff0270fa05e4d8cf29d7d2+67108864 0d39d0910ead42e48d1961bddf7ff5b2+67108864 44da2d272ba8acd54826bf012eb40e59+67108864 d80fe9f849963e7eeae297a546fcb845+67108864 0b9047663ab22c2a7dead55b79a368ea+67108864 0e0fd7eb8ae183106e9983da4f46532e+67108864 aa6d7435fdb5b00a690e3582f82cd6df+67108864 59b2fd0c4db35ecb13fa5e4dd3160df6+67108864 d1c103ecd75bdae037a7e4697a41333e+67108864 1d8679726c26461224e0a2e5cb0acc5f+67108864 f61e8db26fd4758f7cc84569ab2b028a+67108864 a2cb1b000b5332b21f725178bba5c4b5+67108864 0251aba53ffd3f3ba6da0fe53e80f315+67108864 0adc72a031deea5b21caeb4d49dbfd1a+67108864 da64eff1540d79841d656ca7191aad04+67108864 fb2b14ddb10660379ce55f727527ff2b+67108864 d4338af18b258bb8ce4931ca7ed05064+67108864 b59bccdac8c48e1a690fec35b6f8a8a4+67108864 e44379d4229ff8d166628aa4ad441afc+67108864 a2edd9b7190d04989e9ace2fad760116+67108864 efb094f97d5e1e44ea6534fabc8bc67d+67108864 8cfbbe4f5451f1a9a7667413b67abb00+67108864 8801376aba9e6efec6549b0c3e29c978+67108864 f8fcc193b53dd209a896d007327bee7d+67108864 7de9ca296a062cf33faf9ea8f312ccb3+67108864 8d69bae5e32e7a4ae1f6dd57e0d4bc27+67108864 09cb36b4aac4a4230b0f1933ff032e99+67108864 1b69f061dd6daa99762976d61c707490+67108864 b54b276356bf5e28514a9224be56eb30+67108864 2d95083444f68bd2384ab2482c545a93+67108864 8169dcd201cdabc17a0c651d69ddb5b9+67108864 8fea21ea4fde472cf5b9222bd2966fb4+67108864 bbf9e95ef3f9cd4eb43f143091ea7d31+67108864 081f91f057e44a5703a96560bb310e23+67108864 b696d01611c9215f4a93fc7684638ef9+67108864 498b72c742dc776c2b0e1525dbe16c2b+67108864 56b958bd8492a7d864ce60438db57311+67108864 333bf76155e916a6e4d72531c12b3b61+67108864 512c2e49f16514c68dd06a242c3006f4+67108864 0fb48df3a292cd784d3a4e1df2efea3b+67108864 b43562e4baf12fda2ef2f88e655d24ca+67108864 13b293acaea889c5672a10096d0b70b6+67108864 87232993894f0684d3071118528acb30+67108864 f7ebc10f8e47b12167c3b18a9b40af4a+67108864 7aba76f27f35b5c0c1ca8750c5895f45+67108864 889d9bb6f270501204b48e052c763e3f+67108864 8b574864a6d5ce579793d4d232bff6b3+67108864 e19cf0756dc6b54f2ad8bf750415772e+67108864 88e85afb7736347d28df444a048af72a+67108864 06bda3da34e66eb63f1699fef8c70047+67108864 a06ad6b51927d11c35f8cfa86980a713+67108864 68b3204eba15d00312166c99fa33eeab+67108864 98b12cada97a8843dae7448ff380298c+67108864 5ce5c167e9f03631d6d30cb6e45a52a2+67108864 348633f0b38db8610b435ce4f53fd049+67108864 8651641cdca73876e3ee249973888a4a+67108864 ba2e2453070570ecee21e549cdba4c4c+67108864 4c14365d045edf5f7db25a6077cd18d1+67108864 199d38530de4d2bbdf644082a5cbfaa3+67108864 ea8e9092d88d7a2ded26dddaea3e35bb+67108864 a981d1b9cc878eb2825e6ac397204ced+67108864 6c8a6a15f249927de8c391bfa9a13887+67108864 5fb6f06f70251fab1474bd5cf827c4fb+67108864 51336a8ac8e66b4cf183a87f83184a58+67108864 5e7324e999bfc640e1e4b8e193c2704f+67108864 ed0372472af294f0a70ec98f4b12eac5+67108864 97dc3c2bf973b650dcc1b14e4e307049+67108864 ac2dbf604592e32443f698243cf21b43+67108864 4f6b487a03217b6e2db48fd609ac3fdf+67108864 19ce146e7d279ace45a056b40c8b0e2c+67108864 7d22fb7947fc9a8f389ae5aa2681b767+67108864 9dcebdba72dfd50659747073415dce98+67108864 10982d30d47b5909b6ae40d073e2b7b8+67108864 bae58060b6dd999976b61149d05b2d9f+67108864 0fd4d4d81968a7e683c1e736af26ca08+67108864 ef25f3edcc83b407d40b406187c7e73a+67108864 befaf13aae2e584e9e3ad1c7a5e98f64+67108864 6880080067e109394df71b712d201f4c+67108864 485c60ad6add65d4eac035768e43db0f+67108864 902c2519a1d327098a8936486f237c3b+67108864 e0a1ca44d4a9b71efcb95a8690283a77+67108864 5cb09dac3177616dea86b0683c63d379+67108864 5a08f945265debf4ec6cc61e51efa702+67108864 f0179759fd17199bc13902de79374753+67108864 835e93df3ad2cfcc3b6b0b0ba26ecd52+67108864 7ffd15a96469d15bb821573bcb406900+67108864 282105da606f63b2bae3bf91b8703ad1+67108864 0e67f64306d1949d07c301077279a257+67108864 1ff5dc38a6e08ac8737f7b048883ba57+67108864 0c48efc7f081f704b5e81b37d2fcf78d+67108864 48cb575c08d65b849e336a528fd1fdef+67108864 2f89ed035978f3eda7402d3a352f172a+67108864 ee96f8e4c159fecd3167f2d5f89cd1f2+67108864 44c196f7b06576ca183baba37c07ed0b+67108864 c394702750ec25144bbd50864817f412+67108864 ecb5f5dfaa3ecbeb44f850c3f39e75ee+67108864 9189dae667bd9bb7efc65a468657e108+67108864 cf32ab8783731037450e3b3bb1a7e24c+67108864 970130f22be6713fe29ab1b4654508ef+67108864 3eec0cba1a8e599c776331fce72eb135+67108864 40f024255335221fc9b5e35147956717+67108864 0a23f0e47c7aff96458df2d8a4337b43+67108864 aa4dbb73141da37741ef87f25908de08+67108864 b4374046f4aa89c1bc56ab4ff243712a+67108864 5b3b59037994aa980b394d984a107364+67108864 1856eed7f374fef9a6a2d54855646dc0+67108864 c58269b0c2137b24b4ab73d017382af2+67108864 3ef84af204b89170bde51ff5b2251580+67108864 47eca65588f21d57d6826ab968ce0ee1+67108864 c1c8a064e8c5cb4998db2f63fbe38aa8+67108864 7989eaff82ce0c9ce44b07d30004f736+67108864 5c550ab03a78a8e95591466c98e73c35+67108864 a9bbfb746766c189c72e01f9c5f06893+67108864 df809e32f246382bd14dfaba10f796f7+67108864 c08575ebfe83afdb9d99c8b0a20f204d+67108864 342a1fff271f3b35fb476136ed1046aa+67108864 be53838e3da15290c9450dbf615a23a8+67108864 3d15a3114e259ce18e60f7d162b23b49+67108864 3fe7e7a6840c16fe922ee285073a5e34+67108864 0a98281ef6382ece1136ed68f1276e4a+67108864 d8242a4c2212e41de5d3871407813a04+67108864 89161d238c95e16e74ff7e98df4dae3d+67108864 2cd9e980b0c040903d723bcc097873fd+67108864 58c233f19a14fcf85763483343f68ed5+67108864 cd9ea5cb906ebbfe11989f6b2178b1e5+67108864 1ed5d50de85e8b1aa8a9fe351c17ffb3+67108864 b207e05f95076c307944bcf66e8271b5+67108864 1e16d3914e27d93a30a1fe14900d5bd2+67108864 3dd41cb336b5dba6ce1ec12a12d70838+67108864 0553a8a876bcd08a9e59b672dea3cadf+67108864 3270098cdd3a653802b64b97e2c013d3+67108864 74a4d14216f2e95c3c762fa899810245+67108864 c8f6bfd2f49cb68d2c12bfcddb7a3a14+67108864 d57828eafb6113e1378635fb7d811ef7+67108864 4ea5da07cc79e707cfbd46a05e71f626+67108864 fcf7ed7fd390b222f75879caeddc10bf+67108864 a9b52d652eb5b057507664e55d8c2065+67108864 1952492c2a5e553538b01b8c68754fba+67108864 1876ea6728d7f5135308bfbb2f907f7c+67108864 637eb85d012094cdce43454ac707b186+67108864 6146cbfbbde640b0ba81721ad730518d+67108864 339994a175ee6af23dbbadffabf5abaf+67108864 2512f64ca51feb66de4943bd63de1389+67108864 d45850194dffeeace32b87161df8b637+67108864 70e746b2c7a2785abd140bcefb359d5d+67108864 a675ca70dfc82b1295b8e934777c2963+67108864 89b608bcd8e1ae97b1b54a4ce6534f25+67108864 93fafcf8ad42d73962773d45aa703e48+67108864 b2ee61842cb6035d392cb39a9f9bf3d1+67108864 65201c355aec6c098ecbc096d807a94e+67108864 efc47cc3b58120afaa5b12f9847bd9b9+67108864 815b18871706defdbb32f52f79888b6b+67108864 cb212280920fd14d7c083e244fae8f5a+67108864 0563576707787553605de7c2c3b43f69+67108864 706b06048a3c162df3eb8c76fa4b9a1c+67108864 39ce8aefa48b3e1ed7387d9374ebe607+67108864 b68c296491338234d53ee8d7b80e20fd+67108864 6a604b76699c7b5d1f28c783ba7b785f+67108864 29ef63b9fb1236065d93d649c14a7696+67108864 3c42c2f540d3961241f8ca697ce4a15e+67108864 0f2212e11f9abffbc05b49c7085e6692+67108864 66bdf3d4ba4a002e86e81a0ef235c6e3+67108864 84dc9f05a6292ae7a1a368b39ec5ba61+67108864 3a9889f10bf0f526c8de4d9ad875cd25+67108864 8953d1e682dd25c8df3cd5b659db5d4f+67108864 ab7be59c61570efa5dc2b78cf83676bd+67108864 dba9ac0eb05c49a6cb308074a19206d8+67108864 f06d174cb6ec6a6ce10d0149c62d1a0f+67108864 11d2f482b141766b0ba4f53e59c2188d+67108864 9fb4a1a3ddf95e2c7135947af3114e2e+67108864 275bfb636251eda218715c1aff06e503+67108864 a0c89242b3edf53737070abf6ad87afd+67108864 8b4763a22fa4a3a9f045d22e4b11270a+67108864 2cca0708700a877794c8590619f2968e+67108864 b9d7eb594832c2b65a93d83ac35a19cd+67108864 d9f5beeed66607e179827b0fc6185139+67108864 f97887998347fd8619bd35cb03da2a4f+67108864 52c088791a443a992cba0ea3fbf12e1c+67108864 78f71f4bb45320072b2fbad6e5725055+67108864 652e98cbe0e04a9dd5d2b6ec2fc7c04e+67108864 550cf40ee7d2487066f44d7925c7b709+67108864 00d1a4f4532f950c85668a7748d38cad+67108864 f085dda612a3afa91c9582b0c5d4aaaa+67108864 fe1b1abfdc6bb203c38909ed0d124709+67108864 7316d6a08a14e2d840d524ec3700e837+67108864 e00f6b79c66e8e1e23f52c97467fb161+67108864 a3a1206eac6c1439baece870f922c446+67108864 fc677aeb5ae37d160ce7f658672d0352+67108864 b306ac3b86b18e3df67c12af3432f0f8+67108864 de6bcace6f2bbafbadab0830f970c581+67108864 30fb4d875646e3ee8b9f284e9761de9f+67108864 c2c9e0be88875d7fe8b93ea622f9b44e+67108864 dec0fac2abc6f1eaa399e5640819a000+67108864 2aaf7d4f5cfec013454efc6b1e702d3b+67108864 5a9d28e7cbf92495289d4719810580bf+67108864 5f618fe336664eb82b35dcd1a3527305+67108864 52b997bc9b934183e5b69029d3c90dc6+67108864 fdd35a04ec9d5938f7ba040422f09b50+67108864 b8eec7e11bfee019bede22b20fe0b59e+67108864 fd03ab91847277757e25c4214545378d+67108864 0cebfabcbdc53c4366f9fdb3c58da369+67108864 37fc9e297586cbe133823b0dcd74151b+67108864 5413d6e80ab9b9a4995238f56387f920+67108864 b65fbffd1cd35708767b822d354fb6c5+67108864 369d2566eabb30c55223261f52561e05+67108864 f6d5f3b3c0a22b93e1dda7a569813098+67108864 2097a5b95515dcab7170d6e45f15644f+67108864 651a39b728d858e73a97317f28f01570+67108864 1fffdd51f56f1df30310ed957e7b6940+67108864 1f3ebeb7f6da888bbc10daefbb389d86+67108864 0446df2210a9bb8aa78424ea8890dbfd+67108864 79ba0e4df90bd1aeb12f1521fb29150d+67108864 5eef6de0afe26994aa2c7cfd31c97c85+67108864 250e64306df679e7a1abca0302c796ec+67108864 64e0ef4c14f7c913b48a2a2ee2610077+67108864 8207b3d6919395a87fd4705d83033dad+67108864 673ff19dd35b2c519c4bcfb158976f56+67108864 ff2885cbdb66c872885e44c43bd8e68d+67108864 5a2e771e4b0338c1dd7647d693da555b+67108864 ee9f602d74e0fb02bf26d60025517511+67108864 b53d146b9ad8d8470a9ae1f8982f693e+67108864 0ec4753c965090fb02b2d4881cfded65+67108864 258255645bdc327b3f5aff2f5f308b86+67108864 eca1fb054e55cbbb106ec7b77987e4f9+67108864 67ff2f4aa9246afeee983520baa9f28f+67108864 9088128cd4b27cd3e49f2cd16605cf3b+67108864 eef9ef8b7d292d53b512844eeb1f8c14+67108864 82fb940c018616328ad5b1f40573d0d0+67108864 d91badac0cf1090d6b9e0c4835095d41+67108864 8cf02691ad8e84eab3f166cbf21fe456+67108864 d046af96ff4ea58d232fc61a0cec06c7+67108864 90fc539b57ac863cfbaa510a6d7a8831+67108864 c56a89de61ab11e03b57fe0652a9f824+67108864 ac049420424f3a7320ffd25124e73097+67108864 b331df4098dd950ed2127110a77d69cb+67108864 4169576a334d9d6d0ad1acf785de4beb+67108864 8417499360a366a19e803eb4bad782cb+67108864 ce9c12f73a782f7d8c4adb1955f5041b+67108864 67ac94f8d9252daa60fd83fdd485719d+67108864 b6745ef3760ed93c751c2a6ad463c453+67108864 3e8ca95376b731465cfdad3137debd5b+67108864 3597ed23a677c8510948fcffcaa32174+67108864 e183a236e71b541e936697989c953fdf+67108864 adb1b761eab093ccf4381e483767ad87+67108864 61fe4674dea00f7dfffb572981da4eb6+67108864 59d5521497d7004b1734f8f12d50995c+67108864 708ab80ea89529c7a828b27240bae2c9+67108864 d5546b0d36c4618401eb534f8ef9ae67+67108864 146b5cfb47f585add7fb91b3eb792b3f+67108864 98af89fbfc592389f4ee90001b73b7c0+67108864 f1f7e7a38ae792b1676958c38a079302+67108864 dee5b4e934ad4d820084abe03ec82bb0+67108864 233ac5529b4bddd3357ba114cecb8265+67108864 2ea7c26b43543828173d265429e5e9a3+67108864 554279d68a2ba24f37960716668f57c0+67108864 a652774854ea89b90addf1e5558b03db+67108864 2f32839c01a3178754b207ccf2cf774f+67108864 19e1085e4a9428f54516d2c9763fc69b+67108864 b08e52aa55ce36adddc8c827a7b56a90+67108864 ca5a2cfa551dcc4171a8548b2e75737c+67108864 07d0c5d3b9da6f020c49a09ddab88c11+67108864 297c14087d08799e3cc758bebb35a998+67108864 36745bcc5ab5e3b6db6d73d685486632+67108864 145570316045be126fb77dce478c9e7b+67108864 7ae40af44f48b1940ae5f1eaf7149d8e+67108864 a040da7c89a88d383f6351a09881e5c1+67108864 347090b7962725c04375d35b1426a922+67108864 969ae2ca582911e37549ab48999596f2+67108864 b3d6aff2b8fcca4b97080ec1a393d4ae+67108864 f2cfa7ddb99bb01b7fe0e380bbbeb743+67108864 459a7ea1a6c0b502ae06f87bc8a22d85+67108864 d6c18cc1df3b7973d1fc1dc994e90e7f+67108864 7585f0be7bce0bde7246e93f26d9663f+67108864 e75e85ea0ca03510ac43f793a012f71b+67108864 69b99706e27164ddd6bbebcc34704e2f+67108864 6d54e0c179d902173ec3550561baf014+67108864 6b8d7b1fc0b2db4a2eb4a62026df6f07+67108864 454dcf5308ef0d3cb8d50963536b47cf+67108864 00934415280fbe98d2f428ba54694504+67108864 8ce63e908997f169795175c2aecc2800+67108864 d2adddeec7da9fb3b9eb973b7dd62301+67108864 df334feb04844764f8d6deb418c8809c+67108864 c400cefe791d3885f256df23f156b790+67108864 3fe67e7bc24ff07ee05f1d5010f65e59+67108864 52dea4a8d0b2076971c0d8662f4012c0+67108864 4a361fd451f8f4f645f300880d6fbea1+67108864 a47ed8eebf37a796eaf28fd5b8a448f3+67108864 6333c635f5f8efd608837c9c43a54809+67108864 c5218502430171c40ff8329ea81d6142+67108864 e40b9b1935f2f8242ecd722c87876905+67108864 0fa2bd707cf80dd38937420c88ad07e3+67108864 68a7f964eb4f4f32f42d2fc0e4b4c168+67108864 d7031b0d93a9e6ae52fefb38ea359d0b+67108864 a8cb2892fdaa1ab06152ba266271bf6a+67108864 dde2ae6293a78d523de73934580adb98+67108864 64b31b76d0b3f567cabdb19ec8a6b5bb+67108864 5722cd27250a7b4f387f689eccf1387b+67108864 9a5a56f5ad46ae4cc425b52f5f66b0e9+67108864 2c26af8c2644c3130ce2630c11b55f16+67108864 b15cc2d99d1814ea3269caefe5aeed1f+67108864 845936a2571203663e760fe969cb7cd4+67108864 cfea2e5fd2d21b39a0279c0ad8166b0a+67108864 012754e502a0a7ec6ffe98299592e496+67108864 ca04a3214136dca30208e625288d50c0+67108864 fb015bda0345f425717e8e2da41515e1+67108864 6375587f9cd8b52f461e457d0ba5e6a5+67108864 18ac65bc4f38174709f34a54cb0bfdc3+67108864 fd3afea501ea6dc765eba676c7668d1c+67108864 9e576d0ff9d0f7ef7c55a9142627f48c+67108864 aafb3340fb59fd4e1ecd15b71486cf71+67108864 f8579bc6ec704e334984c5ed7626061d+67108864 67798d6de9a3a161763560a25cd5fdad+67108864 0895eb15394b8f3eb87630ced9c54374+67108864 e65992f5b348cf08f7f94c83abdbce9b+67108864 6e3a01b14054b64f85bee2b089c82183+67108864 5a02dd377136f74a88f37478d1465e4d+67108864 6d6cecaaf9e0446e484448dd62ee963e+67108864 4e1cbc7a34dc967a034ed3e9cba67b64+67108864 a7cc7e1328300bd8f32504136b084c80+67108864 648ce1fecf63126e094fdfc123f549be+67108864 164e1083eabbad57b7b4f72810a655db+67108864 452a5be2713d4931248299a6c563cef1+67108864 0ef85624c448f6e4ffbf5c69f0df6a63+67108864 636f3cf22e32d186b16700b34b93f34b+67108864 1005aeb33da4e9fc72bb3a0fafd0c3f4+67108864 6ce50685ad27cfbf0b83cbde29adf870+67108864 f8079b077ed19ab55860c83f0a5891d1+67108864 1e4471af55b217dbb89bcbebd476dbc1+67108864 0941ab2b20017001185b8f7d96b3da45+67108864 218c586c39817c072148fc1c9323fccd+67108864 c6ae2f238bec21d75c463888eb9eb34e+67108864 17e43a11af424ed19ec3e37208ccd7c2+67108864 088975b838d40639b140cacb48f24582+67108864 d3499a99641de103b50eec6391c8bb85+67108864 3a3925e9ce09b308b7d686bd35ded3ad+67108864 4be279b24000df6b90f3b21346a95ec0+67108864 dff78a508fe2d378362073a634fc8568+67108864 7e030eed13b4d92570bf27cea056de2b+67108864 e575167c80666518e1a1fcc37e5be977+67108864 59bf806d56b3f1da60d7e1ea1e326732+67108864 7ab1600fd8e0ea33d4a1522ccb4b16ec+67108864 b486802203e725f884e67662d60a4bc9+67108864 e2747727cd64c67213008cb997a182af+67108864 bbcd31beb88d98d982fae985212cd467+67108864 63e43c3dbf8b07f8823d6d6d8dca4e5a+67108864 1aca07c1a84f7370026399e36371059a+67108864 59da7470ed1691daa40159076f00aacc+67108864 2c67c1687441dfe2dd335aeace7960a3+67108864 148352f31cd89f167ebecb37709763e9+67108864 b15ebf827a05848de50be331fb5480ff+67108864 5a5e03fb89bf5dc0e4b2ea691944f772+67108864 8625d91e288b99dccb7b56561ee74876+67108864 3656bca4544924b720b82283a91cb76b+67108864 5fd2f6f1fad7d20c0e973703ac6e3669+67108864 b33f494062f3cb679c36337ffd306507+67108864 707dba27750617a509a96a2000893ef3+67108864 c9db9063eefa8c0c352ef3deb05af1f4+67108864 77ad729701812bfe2cc39ba1750c2051+67108864 632b1bef7f64c435bb783d1ebf397196+67108864 b9137b8556e1d9d3c47c79d756647182+67108864 146b878dac4ca3bb0a728d395b42a74e+67108864 0e65fdf451b17e75be0f91786077f848+67108864 2926796ae841754b2dd1815cbb707043+67108864 c0cae36e934007991f9d56a0caca8158+67108864 b8ec664a45a543ff21447729cc4799ee+67108864 4d3ea3381baa8a14c71628a95c4b2d82+67108864 a1ee68e9f6381a4c1723d2b221d182fc+67108864 51383ce68788f81c011a8b658f425b8d+67108864 39738b40415353cf36bcaebbddae28b0+67108864 49a9a08e0f163045fb4bdb87d94c7470+67108864 bf896038315fcb8c40a628ea135cdc31+67108864 3e5ca3e8307931fed9854d8bc8d6b04c+67108864 1c7a2e1225976dd6ad6b6c57d6089da9+67108864 8d970c8f25369d3ddee7897a801b3600+67108864 4099f0a3b81e8f24097fd8778a230cd6+67108864 1190633b75127dc6dcf564a81d903afc+67108864 aabcc49889ba529bbdde745f7a4e2a06+67108864 4006b0b18f997427d6b01d7488875b0a+67108864 6c02ecaf633dc323c688d0ac7c6a42cc+67108864 341c3efd284477d4540fd53938af4a40+67108864 070d5b5a0274690de3b95bc09ffb78dd+67108864 ebdc0b5a4395cebc98e0716965c47a3f+67108864 2ef29fc6ddb8118b08ee3960029506bd+67108864 5958bffe4d6506019c1596d8ed1ba6a1+67108864 9c15577caab6a3ebed7ea299e32e4dce+67108864 6cb435f99aaa250079c37629628d5520+67108864 4b3c90f76918b88372745d89eeb9e084+67108864 837c2eb79a671d35aa881aab520bbf4f+67108864 92f0463f8c9dae280f9a162870679502+67108864 95f70fcce64cfabc5b8707b62e6d245f+67108864 64ea34ea306f87277008613ffc46e4f7+67108864 de302fcd65dafb52be9e0488631b0de1+67108864 feb4e1556edf524f8020b14e432fb73d+67108864 f7cd9b1aa21cede7078a6dde4a9a6d91+67108864 061f4e1dc36630d27d4ce11239aba66f+44772532 0:6148:.DS_Store 6148:4096:._.DS_Store 10244:117550693231:PG0002577-DNA.bam 117550703475:8805224:PG0002577-DNA.bam.bai 117559508699:936266194:PG0002577-DNA.bam.fa.gz 118495774893:465904647:PG0002577-DNA.bam.tdf\n./PG0002577-DNA-jlake-germline/Assembly/genome/bam/realigned e3d7a51e4c0ead03e3e3896342f471e0+67108864 42ba06408eb1234fae80cec40e58427c+67108864 1af5af879cc2eb0a208852e0211e7625+67108864 30a07562440868426251ca573f2d9ec1+67108864 e9e0de7a6665939596b778ac72043926+67108864 e0ef7fc66317baaf856a3f9145731d98+67108864 b2ca4708a9098cdafcb7416705031238+67108864 1bbbafc913e7312f709f2cbe02037bef+67108864 1d4e3218c9bf91ee9b6de69f0e5559fa+67108864 8a089574e5a9a8ce2882b06bab082314+67108864 7efed976b32544fa933c38b1c810521a+67108864 cbf8af44acde3ecc1cca2bd98777832f+67108864 19d0c9bccdf28f7cae5dd51cb6f3883b+67108864 3220af4613dd7dde52fbf72208dedf65+67108864 41447742354119644391fa6e8ea27a92+67108864 1b003e733ed594760ede9079ab5f0997+67108864 5283d84d614113b2732c931d01f47ddd+67108864 db29a5faa61e4301c6bf71012ad1d4d5+67108864 ac0387a2bc339dca235c1f0eb1d6e3d9+24178596 0:1226675356:PG0002577-DNA.realigned.bam 1226675356:5462792:PG0002577-DNA.realigned.bam.bai\n./PG0002577-DNA-jlake-germline/Assembly/stats a3d360c03f346f830fe8c247fb6b6969+37754 0:17936:Reads.idx 17936:17936:Reads.idx.bu 35872:1249:coverage.summary.txt 37121:633:dupCount.summary.txt\n./PG0002577-DNA-jlake-germline/Docs 5b614c0ac2f1b7be407433860ee2a553+5625667 0:277564:1_IGS_Deliverable_Layout_gVCF.pdf 277564:304833:2_gVCF_Conventions_CASAVA_Release.pdf 582397:332651:3_Illumina_Annotation_Pipeline_Documentation.pdf 915048:3765366:S1_CASAVA_UG_15011196B.PDF 4680414:903872:S2_CASAVA_QRG_15011197B.PDF 5584286:41381:S3_bam2fastq.pdf\n./PG0002577-DNA-jlake-germline/Genotyping c73ad23d5e2b04a552ba4172a5efb1af+67108864 6bfe5a7cb20aceadc14486a51eaa81b8+67108864 6c7822f824a85de9c6e5a2e1e222777c+4019621 0:138237349:FinalReport_HumanOmni2.5-8v1_PG0002577.txt\n./PG0002577-DNA-jlake-germline/IGV 3ab66c4b117c21a43e1b3e921d32cf49+15934904 0:2898:.igv_session.xml 2898:2898:.mac_igv_session.xml 5796:2526:GID_session.xml 8322:171972:batik-codec.jar 180294:202:igv.bat 180496:43:igv.command 180539:15725768:igv.jar 15906307:42:igv.sh 15906349:1150:illumina.ico 15907499:26167:license_LGPL-2.1.txt 15933666:1238:readme.txt\n./PG0002577-DNA-jlake-germline/Variations c8be77db5bd5c21d538743aca8977c14+67108864 c7fe6cd59645d31b4de811ff627ef4b5+67108864 014529a882403f6d7f425015980848c6+67108864 dd7715f352e78bf2da032fa1ce2b1377+67108864 7967b6e51b1331f996358898955a0baa+67108864 f7491669a7747705bf63780078a8da60+67108864 ae300af114c7a1adbf251db77ef188b8+67108864 a5d6beb9c88ba5abf7003d34ff44bbe2+67108864 f1cc90f7192ffd09197c429d57400f07+67108864 13051c9b1052878f8a49992355c48860+67108864 eb964b9fc0515b5f6248fcc83a6af015+67108864 30bef0e73685998399da830c54ac64b4+67108864 0b3c289822fda2d7837bdc140ef18b62+67108864 ec49a958f1ec1bc94fd40b99d3baa0f9+67108864 39bf6e6bb4bbac8e98e46eee0751cb56+67108864 f6d35577522c66c11d96b0bce2359b87+67108864 3ce124d6ce052da22b79aa82277879a9+67108864 a6aa4f7b73c9bebc05b9f2d71a118dcb+67108864 de2132d64ee96ca4fa5c8f0bcf29529a+67108864 2305a45a0a9a219b0b992612f1954808+67108864 2eb2714435221611c91295d4b4036cc9+3233741 0:1209008791:PG0002577-DNA.genome.block.anno.vcf.gz 1209008791:3563086:PG0002577-DNA.genome.block.anno.vcf.gz.tbi 1212571877:131236358:PG0002577-DNA.snps.vcf.gz 1343808235:1602786:PG0002577-DNA.snps.vcf.gz.tbi\n./PG0002578-DNA-jlake-tumor 70ff27bf1044858b30152546a861370d+1848377 0:6148:.DS_Store 6148:4096:._.DS_Store 10244:1834705:PG0002578-DNA.pdf 1844949:76:PersonalGenome_Mac.command 1845025:704:PersonalGenome_Windows.bat 1845729:2648:md5sum.txt\n./PG0002578-DNA-jlake-tumor/Assembly/conf d6de8e335f14bb99df74e40d37649876+159230 0:61440:dirs.tar 61440:11017:project.conf 72457:61440:project.dirs.tar 133897:25333:run.conf.xml\n./PG0002578-DNA-jlake-tumor/Assembly/genome/bam b27667638ac1b82166eb4158a276203d+67108864 58eeb2e10cd0859bb96886a205bd4aaa+67108864 667e5de545718d41403ace0a1e52572b+67108864 1c5a59fcef82d558aea7f8402652c97f+67108864 f89963d6b7d2dda6c5859ec38fbd39ac+67108864 8446e283f77664263ba79f6c3d84b965+67108864 d3b974ed13fd0e3f4daa88611e9cf97a+67108864 cd6f8cd41438efa36f836963cabc14ee+67108864 dc4c3d32095842b4542cdbf678f8638e+67108864 e4163f2f75001d911c55291c6557fee9+67108864 a152baede8c988a2b79edc26d92859e5+67108864 203df769bf775c7a903427b47cd50058+67108864 2e1fd6d98da402f7057d9e42f31a6855+67108864 15694d0809b5cb0b9b62e278030fb9ba+67108864 c5659c15bf0a35bdd7c9ce9521caaf17+67108864 4b331f97efb322bab7823f60dc33cbaa+67108864 ef5df5e9be8cebe67752c1ec7b699360+67108864 195e68b05f0175fe26abfe17a52133c4+67108864 d1df9508f48008dd0897e986d5e0a095+67108864 cb9bb9701109b42927d21dc0c441c106+67108864 3c544db7889c09ad04a683ab65dbf905+67108864 b3059eeaa6c4be575ef6442d7d686044+67108864 6f7d0657e6dcacb680e2c18c7ea969f4+67108864 fd19754ef54a96294fa0be3c235190d3+67108864 d403cf9deb5d300471cf491270728769+67108864 c11e3a5138e7e73a8d462a817284a578+67108864 31a4eb0282b6a20b1db03ab537f27d90+67108864 d2925bfcd512c54f774b28371321e80a+67108864 b10985d7e9eb98adc7bcd47fd51d74c7+67108864 cd6611da61290e6320c85aa2e64b573c+67108864 790643150532ca3b87fc87b398463bda+67108864 d73db36a115a9d62d3dc624a68263299+67108864 428b2cb5bf196058426fa5cfb0ab1f55+67108864 0882d387d810173e5466f1659f8ac150+67108864 c5602579109a9527c299cf23ff8e90c0+67108864 1e51d370499018e399e9da092061c13c+67108864 e49696b1edd093737fc543f594f55f6e+67108864 19b700f5e309bf97a4bfe5cfbea39768+67108864 bf0f28dc79bce5d2c5f1562558f27447+67108864 4be9d66602af08697c4a1bfc3c31841f+67108864 b6ab46ebedea1f2232120c1d390bf39c+67108864 ce12df5c0931d07646ba92f9c97ddbf5+67108864 d4afb665643b3b448998e68bbc9a91a1+67108864 92ebdbd5ffe74cfc842bd6ae44d11bd5+67108864 57faf1813558e0a6c25dc13bea541fe9+67108864 20851e6a10cfeda7062427e52035dd8c+67108864 25a70e81b78029f0be4fd632f57aa7be+67108864 419283b83cbd3202499bd4cefb0a1cdb+67108864 1a4a73a0778b404a2436684ef85a97bd+67108864 917f420880916197600f18d403b8173d+67108864 bd83a677cfc43cb03a6abfbc30b43d8a+67108864 f76cf33b00cddbd5e19d18bbf0fbaa2e+67108864 8d580e6c30d80cedf72fd3b60fca4a64+67108864 ba25a6f251eeb30321f807c9429f51c9+67108864 d003d7b82f5778e576a4a6367b5117d0+67108864 f37d3c41b2fdba068359b21666c4c81a+67108864 ae93098d87574a5f0b86413e8cf5bf40+67108864 7c1d4fc584f7adc3b778852776eb00f9+67108864 e1db975356914650ba3e29d13cd125ef+67108864 853f89dad1549ae9b0ba44899d4ccc8e+67108864 8a182ad3a2444ae6d6da7f6f7f4df167+67108864 a473f2377653cf34cc216528c883a12d+67108864 f0614b5c5954fefc1b2e6e2c4eeb5114+67108864 095fab3d4eb93eaf3662e3d307bdca46+67108864 cc5ef7122b2566b67fc5576a8f5f1f21+67108864 5e495a56cc1dd12d2349a71a066d392b+67108864 ac7834fe596d03c10adf8b689f6f151b+67108864 ee09ce2a05fb11431d2682631b702e26+67108864 e8893b94fdcafbee3257c27255512899+67108864 ef566dc982a25f7723106d859971ae15+67108864 bef22febb5bb8d15b8760795bcc70dd6+67108864 736d1b506a8e928e1911c58eeb5de21b+67108864 8e21577e579ed20b42f0feea41d8bade+67108864 51ae0fbd5bec7e4cf8ffef8a32c1ce40+67108864 66c69c19a031d37c5ce3cc256dfc702d+67108864 503d43e6803675b8387234ee64564ac9+67108864 a29454d0d616e51caa2b352a968d7dd9+67108864 f8c68ced290518c6e754b0e2c45c64f2+67108864 1bbcedb7c16ee806a82bd8b4a80ce339+67108864 7742487b63e051606b7f15765025e69f+67108864 ce33f57c6e8405d27c611991f55aea65+67108864 97e7ac624da63ef827ab9622ca1f5013+67108864 25ff51e2abedd04c71e7337e49ddd2cc+67108864 d92bcb904e10595be0dc35838bfd01b3+67108864 b5107a1d8064062dfc20cf915aac6691+67108864 1a35c6f5f345d085c5b1ce13733b6c3a+67108864 aaf44b9e1927563d13da3e9667624594+67108864 eccece5552f7a4aaf887fcf1a9445fab+67108864 e842b55a948fa0b492b69fd2c029f3d1+67108864 67c57be4efda877f9b44f4d1a2069a1f+67108864 6ab8cdae41408d27e4da3682450177a7+67108864 7aa6e963e26b2b987ffcdf223d42be5b+67108864 999b6b6ce0fcea94a4261395eff99038+67108864 acf4418cf27bd8ab5e40e2d6004ee9dc+67108864 ae3f390d34b4a92c23053c44a39f0980+67108864 b2df89979a416c6703222503a8722b7d+67108864 325a9362025c52c3a93daf3a411ff54e+67108864 b6b885513511313a706768655b770470+67108864 7924347c70e23e71392ac3be1a8dd202+67108864 81b852cfa07cdedbc467204835f346ec+67108864 c14a909737d44c5ddbb31d8bbc2301aa+67108864 8eaaa6ccd27b0433a16379fbd46a46b1+67108864 7b515b1799a09df1c53ba8a42afaaa44+67108864 da78adc26bc2739f4778ec04cf4eb545+67108864 f89e9a83a12f6c20c99e2a8128b24480+67108864 32cba62bd7e4e0d6c669059d9e9e8187+67108864 9c98cde5507452e6e66a92a47f0b1177+67108864 3ec80b900ddc0aa5351ed295c2519506+67108864 3e440a25f00805e345904f712459d05e+67108864 c6d5b9c92034e7a3ae676db0f9440410+67108864 a459a2ef3c7a523d03641d4eb5b19b9a+67108864 38af85e2bfdd36f39e871d8ec77e99f1+67108864 3ef6789bfbe65b73ea91aabaf2814e51+67108864 71d11439333ded47932b26a0c1291ee6+67108864 ace062e48a19df1816c912cf14ec73e3+67108864 96c0bd7cd898216e1de2de81ba431fe5+67108864 b7b0bb102bb512b191d57dbac648a9ac+67108864 1beb107a743ddb783f1df684a770d3b3+67108864 dbb7ccff44d8a1fc3e2c2f8b56feef69+67108864 376856dc7222a5d06fba46edb8d421af+67108864 936cde3ac43bcc6fabc0dcccbd8c0bb7+67108864 fa0064d0ba4b197ea1e043b39214ecf8+67108864 faff642c6445bbff6766b2173caf52d8+67108864 329226588c99b28febbfafcd465a0506+67108864 b1053c66901f8c9ababa599a215f5865+67108864 52a1e0723389874dbe26cda89a7ba074+67108864 651fc0d719d0ecf654fb68fcead40fa7+67108864 11a5aae83b1855f665d1fb649ca77551+67108864 49fe9f4b9374b20cb104ce019597458c+67108864 ee63c03171c5ae71845f9ec36b5d7810+67108864 dd0eaced2856c38ca14f4b4e7cd1283a+67108864 031b5092c185f12485b6fb60f12f1bd9+67108864 52624c84bd220136bd295b728fb39ee7+67108864 619ce6edd5b950b97b0144fd8cc0d846+67108864 2a326f8467fcc97e00feb58a2d282e60+67108864 afe28a3567c3d8e22357e54767a66284+67108864 43ddb4ef932cd779efa7803a8c682ff3+67108864 d714bd4ea327fdc41bd030ba0dacfcb7+67108864 cd0b60019265606af1630fda58d7fe76+67108864 245bf3c257e85ecc61c94b21f199c3ba+67108864 cd57ef218d5dab7230df2ed458400f4a+67108864 05c0056dc807fae33dec61b84c21e72e+67108864 a5dbee91918ee2fe8ca60b3935a36f66+67108864 1763cd31e4ce10548f0cfefcdd389b27+67108864 f27c37a2d77439dfb851a797df640be1+67108864 5c2afc1352ac67ca338a2bea4daacb9b+67108864 26b65a719765253a2248f1cbce10ae42+67108864 ccd5dc52fb8f92f467e99bba643e962b+67108864 d6f46c28ec827a571d03d44c1b3f0be6+67108864 835810e6e8acfe67f9cc53f12058f3f5+67108864 fc04ac6ac1bc8f85696cbf34d47e067c+67108864 180ef12c3f768a9347368a3d68f83a76+67108864 6e2cd549271844576032f654ac3cade1+67108864 b0063ee8160312761b2d95ecbe61add1+67108864 5601decb3b13f98a1246f4841254d949+67108864 c2ebf8433eaa439e8a7101e8ce735b30+67108864 acb6486284c8ac5ddc3a5cabe10f6ec2+67108864 f2d534128b117a03e6a1b528fd7739bf+67108864 3aebf3617fcf25db123b20011a96e9d6+67108864 dcef10edb820661ba7e708c61789d9ef+67108864 115ec18ea7aa397236ab039332c60ea5+67108864 8fe654a590ccc870c5aefcd848768480+67108864 d343ec7f640ce4a8b1e9bb49d9c16f09+67108864 3a24e80c488cee7f138a7c9357236997+67108864 3f42b92c722326b05303aac018ccf3dd+67108864 b91bc0cc81c1a2886034555e0de77cb9+67108864 b04373680126dcd17847013f0a1cdc5e+67108864 771305894ea402b6275fb079baf1e176+67108864 83b902682a699d2e509f6148be44cc81+67108864 2f2dba45cbc0d0a3b039bbb15e642806+67108864 28c35daa91968426f743e02c1df09ecb+67108864 ae9c053be0004a45a04bd7c684816eeb+67108864 b779e49cfe032ea858f04a174d5f7915+67108864 1cb18ea4dfffb211cf2e1b3b89684cd0+67108864 700a140d8be71a3a4111efa7676ce751+67108864 0e39fc71ea3aef669e46273e879a9cb5+67108864 eb075d86d17bbe570daee899e531a0d6+67108864 1209317403b766b79db6e691aa51d807+67108864 06511508a5f7af30d2b1a25355709ddd+67108864 23415542c3528187dcaa667ab27e910a+67108864 b21c149732221ddbb710146b6e0dc641+67108864 d6cb43cea2c25237ff24c957ed305c4d+67108864 130240a70807018d8353a49ecd9a114a+67108864 02ee615b4caabd6b65be38fecde69e36+67108864 a44590e6dbeee583ec4e2e556fb4ebc1+67108864 6690720a79e990e60bb53330f2381b60+67108864 938b829c29b361dd9df3345183ad8a2e+67108864 c0979fc234519c48ff714b533b0ac84f+67108864 2925fd6e9204dd1e120fa0b1f81b3d79+67108864 824dacbcbab1e6e4bb3bf19f0998bd07+67108864 31f33dd52e63b4d1915be40521805ac7+67108864 75721f7a0f736b2e89c3660dcdebd669+67108864 8b5ec2f053d27d497a02bbd4430fd680+67108864 ef45bf06c76c11c1e018508ddfbea33e+67108864 46ec11a6412019631b342baf9bd8cb1b+67108864 a936e7b58f6453dea8eeb240f78436f7+67108864 cdc0790f7b0e0fb948d2fd72eadcc1b4+67108864 e4e68a5f78554687704ed9a6e02c587e+67108864 92be6241e5980eeed5c52922e5fb506e+67108864 cbadb73e5d13adc78f1cf5128cf2f071+67108864 a7d0e1b8cfdd33fb776e040a6d6c95b3+67108864 22c7264616a56cb9051f42d601d59e6c+67108864 47518cfef61f5870fc537240738134cc+67108864 57d873bc31e939b64fbde571a81807b4+67108864 e39e5c68e7527446b4569cc06d4173c1+67108864 445a9a6abb15410f43093da9ef20d14a+67108864 c03b8fa6fcfe2a4952410468e684052b+67108864 fee487e4e6e506f97fd349b827a973a2+67108864 2b13ead8d886deaf68c310893dace3d6+67108864 ad5e842788cd9cacaf6380cce4666e4b+67108864 818508d6056786e1705ae6b9c5135752+67108864 53f0b9bc1be7cb1b7a329c22f2ac4b92+67108864 61e1042f92891deb1a3ca198ed66111d+67108864 be5addb257cfa18b00ca9b2e394cb289+67108864 b527844cfeee11e254b667885581f483+67108864 67b373734db9d69ec644c1c7db2bf5f0+67108864 8b23c1262fd32884dcfee4a801af3e5d+67108864 000bbb9236cc0ceb98913aa92fff5e99+67108864 7fc2ffc8c1b6009d078595b74a62cb85+67108864 5de77a68570a727b13666ef7aca9366a+67108864 54bb5594fb272171844bd33cb42ad9b1+67108864 590f8787314bf797bbbcb2787f613835+67108864 fde9b2191cbf07223459ad210a5c428f+67108864 503e56d97295d6e0a12045c0e12002ee+67108864 03a5f9b183dc9569721ed453644ddc18+67108864 2be6fc27fc31381276862d2abe1ac5dd+67108864 5947b0d663c0a52c05875ec5ce61bd84+67108864 4ef80e9e4dae16a5aae51a9501c616ec+67108864 153df94cd79dbb2dc40303907ee33b80+67108864 09104bb5a00744a104ef3400b21f20f0+67108864 2c124e205a229a9e99c2b304be89294c+67108864 854ba0a363dabefa28e8d360250b2ba7+67108864 95ff4f4b90489ddbc14aa33c7c867e06+67108864 f9373ff3a5039c6a5d2257ebef59dc3a+67108864 7f1abc805d8f6324c34b912af0ed0493+67108864 86ed37c6ba43db6e1b73df095d2767a5+67108864 09b0a46851e882230861ffc4177b4ed2+67108864 1092b56cc1a2172b7084a0d99ec6d0b7+67108864 f70c26b2a62dd5a914208db67d341064+67108864 830ea49cee6d701200b3c08b39be989b+67108864 4255d52ed73e6e02f8bd4c431831adca+67108864 b43115fe604e7741234d9d816a29e89b+67108864 e5ac0b97870c3cadc72b85097df78157+67108864 db9645ed90daada987e1d19ff8ec56ae+67108864 ad29235faaf4367473e2af9fda3d5080+67108864 191eb8c2cd7957b63474c53121e79e70+67108864 8503282417733089b18da8530ccedbb7+67108864 5e0889c82ed4c2a16d34c7ae9b87a5c2+67108864 d45d6abe6f8dee6cd021619e017fd63b+67108864 2892b58d187bcab6e8878b849f43445d+67108864 dc541d6a0af76dfb86a6770bab90e408+67108864 d1d42ce6c4d0499eb502f2b413b84e8f+67108864 b14d93850fc3eeca8d6daab0b577a408+67108864 593d932d2839b798fe3dbdbbf8c3a7ea+67108864 f384a78c5361f354cff3d6abbc08de67+67108864 6cf3d9317a0c58fcaa96a06edbb4c34d+67108864 c2ac341f1ffe67c5f729e0f3318ed272+67108864 08b4299b0bb7afbe68cf2af3aebe1a58+67108864 7dacdb347bcc696d50683b5947de2941+67108864 bf115a0e40b82478001cc3337d05f4a5+67108864 9772c379171a80eaf9167a5229eadb18+67108864 b2999f960a78a00649e7775949e7348f+67108864 3d99cb6768c58bc98dc55785440cebd0+67108864 b41f747b1d5f7428faead1391f9aaca4+67108864 1287df2ffb45e0415a9a368d52f8d508+67108864 7d6e7553dfe6ba49963519eb068713af+67108864 14a8e108d5fdfd7eef34851c695bcba7+67108864 8eb7f420eb13a536f0392adb709885ef+67108864 559e61efc2da01fdb0986679bb1dbc37+67108864 2cec25bda3ad1d892fbee59af1163685+67108864 315616c4097c448b9cd9945744cf6cc7+67108864 6c7fcb4dae7790a43a4cc22204bb6a84+67108864 ed2551b5c8662c3a57dcc0c712e4fcc7+67108864 15b54667bae409ea2d760f93ae26bbd4+67108864 f95a0fa90b01a5ce5e6ade63c331305e+67108864 1cdd6995b63579938be9aec26195d995+67108864 23215d1a26ac2696e27f03418ce138a5+67108864 7b611d9a0f689a1544df8132347a56f6+67108864 8dd426c07fe5fb724dcba9c2a15b2931+67108864 2ce2ba723ce6ebe056066a234bac46ba+67108864 c4e8594a8b7ce23b3da167825c8de7d1+67108864 4e7e54bc389c33c968c46f1a124610e3+67108864 f4f9aceea1e08862d46dd717d8b4f8a8+67108864 a2aa43ceba29057bc66cdb9a361fb419+67108864 5e886dc968de49b8e6abe6d151d53fd0+67108864 99d00caf3c14fd72f776c383c90ab631+67108864 08544e774a050e67f03e202514d1ac42+67108864 b9d6faa9b8f2811ccb451d2910a6a589+67108864 f977340bde7d733ba35fc998963fa8b4+67108864 135ac1eee41a279e3d000a146adb1fce+67108864 b30fa80fdd9ff4691d796fdac74f0093+67108864 a41417fec9fee6f189d834402a9ebc8a+67108864 4ab7b6f850183e8d1af3dc0f94a1c83f+67108864 6c1635db222313655d1a61cd9264299e+67108864 348a8d7f292e600ee53dc9c0cf48358d+67108864 2c2101c9815e64d46a38a5c24720b233+67108864 be8acf03fe71d9d43c3cf59e77ef3f92+67108864 a82c39cf815336c4d230429c9e53ea30+67108864 7c3c4b4dc62efd8c2f3597e2ca1b4c4b+67108864 15f7175e532a484f4be80d6f7e15b808+67108864 d44d3e0dfa084fc5e888e1503fb2d28f+67108864 3fbea3c0a4c7876ce9358de20795b751+67108864 735b8c3b26ea942e166321f4e0bc5127+67108864 286e2637c88ccf5187dde287a4ca9990+67108864 bbf981a586017a3a8c323d9d0fdf1a43+67108864 5a809f908b75792c8589eb2b23a9b8c3+67108864 d6e897e6dd3ee464553d3c835cafae94+67108864 6ee1aef6e62e295bc7dd79e2ea6e0ccf+67108864 f3baa399c5e8bb8b3db494b0107e5af1+67108864 99e401e71cd20430f26df9cabd414658+67108864 f1ba18255163e40e1a7738d5107fa214+67108864 9be7a2b18f11ada848d5586e353cc2ff+67108864 4f453ae5dfe7744f1f355c8c360cb7e9+67108864 70348d7e74ba8dc70519172645d98b51+67108864 1149a22ebe1be33590c5d531a974e79c+67108864 379397789b87b9c0e44d46dd9a7b0927+67108864 0bc9781da20de560b00f9492bf9d26b1+67108864 965ef8040f95c779b567a3c2c9319e7e+67108864 d86b8775c719b63188321560cca2b076+67108864 6ce9dfda8310aefd648aedf750c23900+67108864 68b06472825519dfb5f743d759708c8a+67108864 9617ea0542a1ebf7496efd4477bb67bc+67108864 6335e90273f14ca152e88a986ec40935+67108864 35f1d34a24d889cd1d86b10b1b233c42+67108864 49ef382f65fe1ec46cb2a8a4f8a960cf+67108864 a3f0fec2599be5ecd19b360b203e5876+67108864 743dc90628f546541daf0cf89710fdde+67108864 d167ec2708343b4b13fba5c2f12bf9b0+67108864 bd0bb42bd6a7bf267b00e81479ce3de0+67108864 c92c45f21773d3fa66afe741fc40c79b+67108864 b31bcb75bf11f2a25a7829cf0d8050bc+67108864 63b72753cfd53c2cd9ab94bcb5902969+67108864 3ba309561299c6547c52ee967fb315df+67108864 aab873f87359a7dfc85b0fe51f01b5a9+67108864 a150926f927cc3e60d45b8b18f3f57a3+67108864 f195bee4c6bfba67434e6204ba7c082f+67108864 96c83f753f9b3ff62b1d097d7ab5161d+67108864 85ae48853801003271b8b7a113e3c439+67108864 7279992bc5df537682a25710e83cb6e0+67108864 9799fdb7db707cebfc2fb23279d1328b+67108864 28b4a1cb9b7d06db17482190b37eb014+67108864 bc8887be40fda9cec1cd4e405dee6784+67108864 67b3c19c0cfc1f897d082fc32558c0a2+67108864 66b5d0f197a5d622cc8a46ebfb91c831+67108864 aaa8c95f2e065adb74a630951644b69a+67108864 1df2ba81a4082700248ed31b49a78d3c+67108864 c641804541401992e665b6d71b08676e+67108864 43a751e4dd67528826b1c8b48a208899+67108864 12e4cfcbdac91438cae64b2cd7c9780f+67108864 0d3aae2988c4714fa91537613c5e616b+67108864 2f95606f37db2656ced4b009d6905ca0+67108864 a3053b1d8868bcdd108d9bbdf8987667+67108864 f4c6f20e3e6f4e9fbd11325b33c81f28+67108864 079ae4f891032ca44ccad706c3210fc5+67108864 aeb92132131d06d085946802bc63379f+67108864 0c51a101a9015707f28be913c549ea2d+67108864 a3adbdde43e893554b73304849163d31+67108864 1124b485aa2be68ec06067db378caadf+67108864 105a90949ef8a77743739cf6912ddecf+67108864 2d6eb744f7babcbd30e17f709cefc92d+67108864 c6e8581198ad5ba878697e95b4c55dc7+67108864 6633b9c706e53b649cc5d0eb5d7474bf+67108864 a0a8055971c9c2698af9cae2dcfc1de5+67108864 7afa27ead5036742aff637977742bce2+67108864 2a4689ec1a63cf5ccb4ae3754ad143a3+67108864 c50953205baa8180b24c33b3430e1895+67108864 43fa314ee5b342bc7aed72517cdff282+67108864 a3df0148323a0004acfe8387c67cc7d5+67108864 1c29892044dc4854651c73296d0e0c41+67108864 9d75a69bb257970ffaeb4ffbdb890728+67108864 b1b22a2463ee4106c480505247037743+67108864 7072621df84141b650e222049e3c3d4c+67108864 638e4160832bf12ac9eed8dc2d1aa57d+67108864 b88afc09886457810ba20ed4bc9725a3+67108864 45bce56d25f3612a8fa4d15adad97539+67108864 5421b2945c952a99b69ef6a6a70cf453+67108864 1f6f2dfb6224182059688f5d46e61ff9+67108864 359ac6d0f79d5e29b465fa12547d45f9+67108864 7b7c538226d537326ae3c8a334494425+67108864 86cffb84184aab85f109d155f45a6289+67108864 1913f3176b9441fa5adb2b5feb112a36+67108864 6020e46282904da6b914d6a37756c9ec+67108864 e7fb9206f7d4eafb469b3619a37f64cf+67108864 5c4eb1a7bc507b0c4fd9147bd6fe5b3d+67108864 39511f0f76cd426f1a8b55563a393090+67108864 228589eaa12f741ae3d63964e552ceb9+67108864 e71fd43848d544eea1440595225383bc+67108864 5d9d7af54c2b43e70331e74346de7be6+67108864 11f889041c34528b4f32b717fcef37f4+67108864 41f9c5578d0b11e7d4e96d951407a495+67108864 3164503cb8eed5eda0f929e89b4ba41f+67108864 e8232889c0ebeb8b95efda8a0be0fed5+67108864 f831bb088817b20fc665cb152ffcb7f7+67108864 c317567be4a749a38cee367bcb4650be+67108864 535ff483eea4aae8db6693c6fcbf4c96+67108864 4c3667b429aeab2cbd1c3764808beadc+67108864 f2d7e4f9220a5b144ab935441ee5a4c4+67108864 46376eaaac4261635cc8d4c421ad989c+67108864 8ad8c6ba6994354296efe3b271c83d68+67108864 f3ccadadfcc75bb032d33af3f218136d+67108864 fb7f377672c6ab9a6d7ee640de0708da+67108864 455e5b21427a1e0c1166a77888e3db84+67108864 56c1bd2a46e9c510c0f9df14a5e35005+67108864 729f094efa399f560639698946c924a5+67108864 f3597659df39a5489bf09e9fee9e5f18+67108864 6eb0404e6575741c52f564ed127a9e58+67108864 15b2142e9955e2a7f2597942e891d800+67108864 24a2e5ad926d978f93f1a055bac2ae82+67108864 6da3be627ed06f3302422a3571912798+67108864 faf50ff859c11e516516a396fa90e370+67108864 f30c246988782a97f8fb045f8562cd66+67108864 3dcfac0f2b47e7b54dd42b7593c11b3c+67108864 8c0cca153ed7b0298c272a38659dcd35+67108864 24d100860a2522d686b11e841e6bb3fc+67108864 a0fa91ad649ba8bfbd39edc032164aa4+67108864 4873a584c9bb9b86df2dda047977223e+67108864 71af7580293bbe70550235da201bc5b7+67108864 95246229f010e909205f973c336ef913+67108864 760718000bfa43f87faced458fef8a81+67108864 b2bad6b04dc56bf6d486a389c3a89f69+67108864 f131bbb77c16d5e8bb37241b38aed448+67108864 e93e069d21f6b5c65ad59f54e10954fa+67108864 8edd8526ceeae5a2f73898ea94a2637d+67108864 89998b502934966809d3c3c5e9fcae09+67108864 2fce7d4eb38c05370d1695cb5fe2b283+67108864 19d601c0232c10ba5dbfd1ad65e26dd2+67108864 9c7b6580cb0a2b603d05117f559453c8+67108864 72726a8bee94ed66806cce11af1e2371+67108864 2b5db38f1eab185a91777d57ee86d2c6+67108864 ec6db4e314d2fcd9740bb0fb6a05204b+67108864 94edf1746c8aa716099d6cb69c065a40+67108864 4836f61b8d29f86f2cc8b7f449587ae9+67108864 436d54ff31569849e9b3b861a8d7a2c2+67108864 7ab27b51efccbb501ec8b0524d862005+67108864 0c23eb2a65db550177f8a03356b7e726+67108864 fae904ea4bded822adba6a676481440a+67108864 07f9c8f81344f0cd051bde9c3bb20398+67108864 57f5fabdd15e3deefe7e8670c015ab49+67108864 27812f4f1bf76cac4467e94bc517e2b0+67108864 eeb1e8dc2b9efb3ebab313e848ba415d+67108864 f2827412ad0b5ca07628a44c28fce28d+67108864 27127550ebc9c8be6cd30f08c20d3e77+67108864 827a8e19e46b575e698a03f2f159929a+67108864 d6da49faa10324de4f345e0ba7eb963f+67108864 48ee9edb8e9be2824651378cd71bcf60+67108864 d42fd051025373f0b395f9ee413e133b+67108864 8f72b8ed9aacb48b4d008fba4da38055+67108864 4c382522d1fb5e5e70a7641b11cbbc5e+67108864 dc40ff7e5dc3419dbf06ce5d89db77b6+67108864 b178799b404d1a0f2405c45f853bb87e+67108864 aad7711a0c588e389e1d898837c77975+67108864 dc92a289df1ee20c2a50c5cb34b9ee9f+67108864 c0b506a6540b190c8de760845d89a20d+67108864 95d57277bef8851c91053f8930557783+67108864 f1fbd42cb8a45b97fd3e1befbebbd141+67108864 343af058b01d8c9cb6deab3c7041be9e+67108864 ff56bd3a487164a446760ba4691ec93e+67108864 4ff72bbf0fbd4429341cd6c0633adfac+67108864 9b08953e5766d8cc5665c73f3066b9d4+67108864 c26eb39cffb14c5b29464552158f6f9e+67108864 2597fd02083990464f66d77950854dad+67108864 da1df4d16834f62ee21f8f414b63264a+67108864 9ee6fbfd042c057d1d700b51a3d6620b+67108864 1728c736a8598ede8333ef2359d3c9bd+67108864 a1a10371aa32905f2ec8ddbcba5fb065+67108864 ba2ad4e1729d11bdce0129d87f2a999b+67108864 c41113602add81f97ffdc93b5c3ee306+67108864 ee449a445b76a3f2590fe5ccc313cd55+67108864 7190f9a48c2b16ad0b93eccd0ea07e5f+67108864 d825ce686c183120f818411877161caf+67108864 8696a1e8f5542fef24a8e7a1b4eb667d+67108864 eac9cc77117da7eca4e691815d440570+67108864 4eb5252425e2342053e27721ef419033+67108864 bb6ecfd5c6c3404d9a4c1f44e75397cd+67108864 7dd9c32e758ac7a2a6adbcce675ea9b8+67108864 711f558f7ccf54b12910e7f4646d3ed4+67108864 7867a303bc4095bcde227661553682af+67108864 9370f355a3193b46bdecdafa6cbb9c8a+67108864 016b4584b44296de38f1d102937c59d5+67108864 07261c25f8ca01b3a384f590673a3380+67108864 e6994e3638f2e12ed52a104c14d88b1d+67108864 16e57ee081f408f8dc74e909efb63879+67108864 235866ac3b7d83e27bf8f4a6bdb56c02+67108864 25efb32940871da66f4ea5465334825d+67108864 f79d26bb161e83bd2a26b8c35b1d42b4+67108864 2783ea5a5721654486aaa5efbc83fb74+67108864 461881362fa60250adae60d3c49dc131+67108864 f4b68164f31c286249f1e5d2d7237c35+67108864 659eac0ad8a3e7e597458709bb795680+67108864 034ebd61fd1a2c7b8a283fffbdbb6ca4+67108864 50d93209850a3d6bbfe509ef813ecafe+67108864 42a97fd6584b9ca3cbc7dcb74cd405a3+67108864 3d0aa7902822df7f39feae96b5344f1a+67108864 f5dec5ba880a6d6392e4cb7605ffac0d+67108864 567809b2e7769647651efca42fef2e8d+67108864 6b12c06ff734e6e58b9eccf64bf1f911+67108864 7bc4c900e5d59443b0096866a80a700e+67108864 b8add59db3b248dafa75beb2700136cc+67108864 6ce79cc7c998349ff12c651f464b4d42+67108864 6cbe5e5e6b19de4a0b715e0d845d5719+67108864 e59a47b51498f2ce7f276a9a0d63fa66+67108864 4f5ba032de58d21a0cbd20ff0c5ca2cb+67108864 9d5fdbdf3cf4cc4fe2c32e5678417bcb+67108864 606d871a7812d8d61717bb0d6de1d3bb+67108864 a7f59b847684061ae94b5e0d9bf70cb0+67108864 7704eacf857396f10a46a5141067f26a+67108864 bee69de8466c8b7ed1d8729658a40027+67108864 3c3c55ff9c9cd05f1fa4104140fcc901+67108864 4456b7aef7afd6689d3570b66ef0182e+67108864 50ba9fd74ef8d07e23dd96b20e1c1067+67108864 ff2c79e2bcae5c9204cd64cdf372d3b1+67108864 2ce25d963ba37dd5b24c2df65dd1295f+67108864 20b81f528f70c9e5f920e7534afc2bb2+67108864 a0b0e42685869944d3768dc337580428+67108864 fb0d0bc80635bda7b81030998c5bdec3+67108864 bfe86f22ef97ee20388699c8d4e824fc+67108864 5041aa0dc3a9b4b86f5aca145314477d+67108864 68e608344390a442be79c53395223e36+67108864 86bce8f646770b66dff7cf43751b77fc+67108864 de0b67d42b4b6a7825a3bef956737e5f+67108864 4b88b81edeb09740420ec66d79249c6a+67108864 755132bfea966ea29dec0b5703174ff2+67108864 c80f48e88aae10238b1c28647c7ed739+67108864 e001ea8aa95caab94f82df29fdf27e4f+67108864 8fccfa89d4af86d60e9f97af9936ecf2+67108864 bb01dafd921f67f6cd859d329eaa5463+67108864 931a95676fd8ba998400abeb98ad293a+67108864 3e0b9148a86dc17ee1ed7b5d44f394bd+67108864 63b29d2a9bee8cf79d63ce337feee7a9+67108864 bdc851ade507ac37df5fc3d3d869a996+67108864 fe054d07167887c82d439c841ae352c1+67108864 0107f8ff057ec92321c25060cd4d865d+67108864 ea6bb18766a302faa044cc2357ad1f3e+67108864 487fcc9b756d31eeb5ca6c2006bebf91+67108864 233f7ea448da931b0385a63aa0b3c1a3+67108864 fe1d37c64f7e48c69e4565428bc78857+67108864 4990ba572fc9afa7f325052b51d0a47a+67108864 423266abfb6cd3571e8e66c2b30d972c+67108864 8a83c116090deefd716b983e63ecd5af+67108864 8e9b6bb307f89902d414223e9cbd7d5d+67108864 e18c8fd868d41face78005a8b255bd3b+67108864 4a60796fad1c71dc21c23a74ee658421+67108864 2d27fff66b21eb557946c09e9b930deb+67108864 524dd7eb012fd8b3b3f64b31f43115ce+67108864 512765c1f1685768a0d8bb47ae5225fe+67108864 1e11f5b0124b427d1bf462e7632296b4+67108864 41faf74a022e8b3a583cee0410be3c32+67108864 0b4a883486509fa81c8c19bcc51532e2+67108864 f708c01017b33cdc5f4d373da7fc90ff+67108864 602de9b6714a2d9b55b81670256bdfb3+67108864 87346a79929afc2fb5029dac7a2279ae+67108864 e1251b9b403eba79863fc82fdfbd943b+67108864 189117239220e41f0d02e8d7620a04a6+67108864 736705421734ba02be110da2808cfa81+67108864 295f181993c67d894145c2c071b4301a+67108864 ff84adf713707bb22c1eee95e4e27dce+67108864 d3c2aa31c1d8d790741e828df484fe17+67108864 7aa19cd3f187fa5b7dd623c2c09e4236+67108864 779595e6c757024f5fb2fef69d9b048b+67108864 eb3fc60541f45f8ed8983af5c2bf3b95+67108864 b8fe527fc513cd3f60237c6aa87b0573+67108864 1c9d80f46710684355577738e5a07a61+67108864 98a2d14f30435bd37e68e18fd13879a5+67108864 dc464b8e7cc454900bd1af2f73167ed1+67108864 3fc4b913ade156db548d8db91e03389c+67108864 677c1763fe8b931e01b5d76c66119051+67108864 33bb1e8bc7548970a4b3f7055fed6cec+67108864 43857b257e194d9a0ba6b68e16d81dbc+67108864 4662626ec479033f2c67f00c10f4f3cc+67108864 82b053bffc39afd1835da5593026c5ed+67108864 f090aa4a714e943af26fbcabfc5a8be8+67108864 6a655ff89d9a9e1f034837de7291fc2c+67108864 f74ae1321c93c6d3d0ac64cee951518f+67108864 d4c8f4b673100f15076763b856e6261e+67108864 fc516957b488877fd52fb5723aaf1d9f+67108864 6729a81638a2518956a3eb5a6a18fd91+67108864 f13eb8762ad4935d6cdf6c872a5afc0e+67108864 8f0d2b097917b4f052e5f9533a55fe58+67108864 fdc3ecfd8eb73392e1fe1aa8edc916bd+67108864 337387bd132aaa183ed5843f272a49f4+67108864 6a9db804b6f2087c186fbc8ff77c79f0+67108864 5fe678e3a9e49f4d8debcc56a4d04dc8+67108864 89f2ad15d704ded3af481430d26c00f3+67108864 f803716b4143c361087ab2a034e9abe6+67108864 4edf94e4dd6c5c6bfb72da385c2b981e+67108864 c5f51291f77b31cf93fff9e294ac9659+67108864 d938cf9124ca3f39eff966a98668b3cb+67108864 aff6467b5d36539c8c6444107f08e959+67108864 da8a0ad7cd02d4f78df8ef8c8b61f8f7+67108864 9d33faec81bf171b5dae27fb380dfd8f+67108864 e4093751a7136d8d17ccb5034aced419+67108864 94fc879d3bcb98f726b2cfe018f3be3b+67108864 474ab1d4763a090f4a361d660544d8a6+67108864 8af4c1580f8c0bed92d3b860238a37f9+67108864 63b45b164b25c4457f672ea2095bb234+67108864 3ebf649e0b24f32487778caa41f24442+67108864 9719bd418db412587e6d66c75c3a6205+67108864 346d35f73e586a9e5e2573b336f194a4+67108864 efc845c053c037d89d9bcc059a025db6+67108864 186c5aa2db96fc6b9393d7d675a11e7f+67108864 631b5eced91f74bbfc206cf5856e812e+67108864 916386facb14f03f9f3251ca7b07476c+67108864 12844c228114136ab850dd20e11ace4e+67108864 ed3255e55123b2ef21f4a7d2f2bdd644+67108864 0e31381b92d55f99ced79c14281ab52a+67108864 fed7f83c927dc4e5b2ba63e21b1736b5+67108864 22ab11bc0606f0d041e34f4c1139a209+67108864 20bbbc8375b015e4a89df74a1566ef47+67108864 7d7752212e5226ae570e03c9b2700fdc+67108864 cd22f0b787d25927062cbee33bbe3371+67108864 40baadfd1426510073508dc0628f0794+67108864 181e4bf30a8f77da23c8959e1be70fe8+67108864 622a02d725eb682b81dda117da700ed1+67108864 8d9f0c71149e9c1199a951d452ea0d02+67108864 1814f3c34ea4c42495a79205816f169d+67108864 4f4298fbaf1dcc46ed7c3f410a065d45+67108864 960e592059027311a32d1c1e97bd7260+67108864 de9ffdb227add673b59b10bc19796360+67108864 2736cf83c1bdfbbfa742472b5af5e651+67108864 1269c0373263126cf938de481aa9b897+67108864 fe3fbc87e7a23f0160a69c0ed626082d+67108864 e276c8357f973f36b45f51f0c71d5aed+67108864 1fc4414ab833f2b0cb43c5c067aeb39a+67108864 652af913139e3e10555bdfc56ae16b9a+67108864 c3119097a1400ec0897abf917ef8a108+67108864 69a8272c1d1f14078e1d550b611bc985+67108864 23675ce9f0120b2d89289e253e92ba8f+67108864 f959c9a7b0272c49ed1f084208694276+67108864 b7e9bebb22287722cd96cadcc4eea2fe+67108864 15acb83d077045ffc1b8458a48a6398c+67108864 c4f0640ca93090b7a06df55c66110547+67108864 275e73ff5e9266d5d07fd4c9f0b6256c+67108864 2532625da1f8a0a444ef53dd0d196db2+67108864 7f05e653e318b82ff410b83e6bb0b556+67108864 d6e133ca3f00f63184ce80a53fec3107+67108864 b8daa3bb4165f81d53aae24183eb6608+67108864 0a8d711d220316af33b0ec0c1818f403+67108864 f9c34cdb6b97d2c5dd58430d324e7f05+67108864 ddcc459d76f637cec850257ab5fd711f+67108864 a532803027b7545065855b827ca760ba+67108864 c5109e5fbf248ae1145b45ff2e137a86+67108864 088bba73713732e628ed664b45a80878+67108864 720af32dd389f70a969d6e75c8442d05+67108864 fdb232e6af0bd7d28e2b5b07c62e1aa5+67108864 7a50af59afbed238c6bb61dc3ac22675+67108864 e3831c594aff61c21fbdca24228c70f5+67108864 b64ee908abe5dc9b59db0e928833f81e+67108864 7cd627c229001bfe2a2771412125ad9c+67108864 2eb73d895bbaa6e6f704712c5dfca62f+67108864 5fbe4f7adcb52ef8afe2770c3f444d57+67108864 cb00a9a59fe9adae4c19c55d0f67cb23+67108864 21309b59a02bf3956b350c33f5ce016c+67108864 fdc90f4032fbf373b66fd1a1d16cda0a+67108864 1fb14dbb013e05b7df0ec7275dac673e+67108864 a287d100a50408ba94ad3c1956445e44+67108864 bbf5b0e3c7d1f855d43c1b3ae5713cc7+67108864 66364beb6181a393943f61726e06a63f+67108864 71a199035090172c2c48dfca851fb77b+67108864 c85f3d46426e932c3ae5f20929f1d412+67108864 413053233729159c37ca20b2d325729e+67108864 16ea95b5143fac3d4894db0f6761bc07+67108864 65e3ea0a4c2192ba1b3dde341beb8b8e+67108864 9be1589739885d435b8208ec3a5608e1+67108864 19a2d0b16817c6e11cbfa36f0fa17cc5+67108864 e5b38b84741d4def69309c9c66532c92+67108864 1cef5fb113965f16ff73f0d74ac5f6f2+67108864 57a91e408fc820b8cfec6d2733f5b573+67108864 bbadd120a6bce9d182009445d90bf403+67108864 d498136e6a6b6821df6241c945245a72+67108864 a3a7f8dbc36741da81350f2fdc6f33c6+67108864 0459e23c9c447e5d6cf4a733a04de961+67108864 2dda2d49a05aa1fbc58c3ab040281bfa+67108864 30b7695c33688f894a83f3cce0b67da7+67108864 8689b86276aa67441da2602d13484a16+67108864 a77f3e13c2744bd912ecbc94ad5aade4+67108864 10da7c2337eb393cfd600368862ba662+67108864 ce7e2fde61772e393ed0d7c31e855be3+67108864 096b521bab8c247f74789d070e771807+67108864 1019c2b793990d6aaf7d81bbb2176ee9+67108864 54ed19ddec9c9c553f59ebab3a7d5dbf+67108864 094fdf5c30c3d805c38e769d5ee928cb+67108864 1d7ebd59cc5d0cc9400d6f2fb9e29594+67108864 d5cab35aba572e070cd870a19c0beec1+67108864 e418e25ea0e2d1dda947ff716d916529+67108864 a568b248c881c740a444dabe4a8cba1b+67108864 25688b79ead2563dbaa5d21e512c1aa7+67108864 4fa44475f1dfb2fab6dfa8347f43abd5+67108864 c37860c180fafb91feb9187acd355c2d+67108864 585dfedd5202b1084484ee85dbaa5ed1+67108864 ac00a4785238f69fb5005977d36d02a0+67108864 a97c8edd59cea65c679a80966ca03482+67108864 2b1207bbb44508f0738dfc60881c2e99+67108864 f0f698122fcff73e939f6b6bf22f9cf9+67108864 792a5130188e5504e23bb19dcd748fb5+67108864 8a50dd1f26086f2eba93a7b6e7035b11+67108864 7fba73104b7d27b8ca512746161aa95c+67108864 52f549dd074baf3fef610d2a81e8d1e2+67108864 e8ba8562c268fcd5b2edf5bdad76d319+67108864 6fae9fa89a358ce5c80c339e6b576d78+67108864 48f1f2fc7d52618658bb8f652fddf9a9+67108864 5c6a4bdc74ef2581d3fbf391a8d587ae+67108864 411afdc89fc1435d7ecd1569e474d6a8+67108864 a546bb78c4913f26cc4b1c8ab91ad277+67108864 fa45ef65dc70d14160f1efaa0a8e2687+67108864 481dfbeb675e469846f0f69cb7d36d73+67108864 1ceb17f51950b94673950f088842f33d+67108864 d1e4769eca2b7eb98a84a87cf6803cf2+67108864 f13d8deca0cbccb51fcd1a4ab40ab0da+67108864 20873c5e390a5552511f8ed98e2d49de+67108864 767eadf955a415988e7e1f83c74f7a9b+67108864 85562d1acff2915f0ff6d45ad87fdef1+67108864 12547b8f30465624e8883e25e870892a+67108864 f2dd149abc688678a5132be30af317b6+67108864 a5190593f6608e3747e929105fd601c8+67108864 101835f2b47c37fa0740525295019838+67108864 107c3a97bcc74c54b565f025068ee9c9+67108864 bd04bae6fa6e5a8fbfa9dccd2e467952+67108864 cf907435f4f5c0446fe211049e211984+67108864 16d183f23f46c6cb4e492c2a496901d1+67108864 c831967b607bcc15f73a8334ea4f1501+67108864 edee1bbe2a9f0126e45526d9bb421861+67108864 011ffbc947b2aeca6a75939ef16e4b1b+67108864 68d847961bed44f8d89bb26f1bd0af35+67108864 b6d56c74fe7d25dcf0c28df056090306+67108864 24da1fd3c8d4b91e3ae907a25d156a54+67108864 4a61fdabde6a07f7e5b2a4cdcc26d013+67108864 b861eeaa168860e759d5c8f24712f7ee+67108864 a266e80c9a70f7d4937384aaeb5ee360+67108864 f8a5c5d06023f4bf75eeaa1eb5bb3374+67108864 f228202d9f58ccdc24c1dca57fa5d77d+67108864 4e61e6dba9ff5a6a4051d48b907be4ba+67108864 2be5bf4cee45ab1c4400a680df60f81b+67108864 dd66f99ff5054cbc8cf54d3c8fefc5e6+67108864 dff0cfbd2ff138d4f9fea60a96fb2df6+67108864 500ca6e6c15d85070900b6fdf3979b76+67108864 6f36698535293c13016b1abd0ff46139+67108864 e9dcd509836c212d2f6d8274891c3c41+67108864 70d0934888c6c587addcc3927bad38a2+67108864 0c50fe89bb20e3e9efffd1a0cab6d084+67108864 d4e349a2a674497462202d061932508a+67108864 6bc71e5e68fc5f83370173073ab4ae1e+67108864 a00a2da32f91785cf75e6482e59892e7+67108864 397510c84c36e9c234e4c280b06862ab+67108864 e0c00e4cab678098571417c9fda1cf87+67108864 124092481a8bcc63fd71fcc8cfa84795+67108864 0e87753f045eeb278f2b257fa53ab510+67108864 8168fbb0188889addbb3e9b68d3b3593+67108864 f6a005c9723b4ddf4cd5bc1811279fc6+67108864 beca840b56f1ee8eab55366750fde36a+67108864 944d4724fdf35af10976624297ff3343+67108864 d82f71afcdf80d43ac70d43d18d039cd+67108864 7068a4db9fbe9a06e59350f634fd8855+67108864 bda72fa7ed6c0f07f149deae16120901+67108864 b134a0ad3152051241bb63ddb0ffa0ee+67108864 afd1344cd6247583b2e08b42a5b495c0+67108864 6d71dcd93c01a8bdedb1a384cc1b47d9+67108864 525f86b84d3f9602f1308d2c82206019+67108864 3c5edb580b5d56b8fc92b49681e22db6+67108864 8fb6f50d1873a1974b7c5947ad8ffd5b+67108864 fd510dc5ad3116a7b4424a405c9e0333+67108864 a5e463f2679cb6f4bab936e0e372b8ad+67108864 7fa2c65ea82c0bac249684bbe4ee3b33+67108864 d3c23cb68b91c0d1852e133c82881eeb+67108864 0d23ac035da7e96d139f6b6724fe6d94+67108864 e4bdc1e7327eaa9457860179765b1a2a+67108864 3838edbbd807bbe5c75379e6f7ef53c2+67108864 ee09b2bb5466e520d98348f0b4e5a272+67108864 4f5015d61f01512653964ae3d58cf2a3+67108864 c05fd85f363942029b17fabca47b21fc+67108864 93ba1a2da4ef91a9460fd65895b867c4+67108864 d6430bc6ff93632f3c571c57ced084ba+67108864 a59c6864154616b9704d769f4437fe48+67108864 29f7d541b7fd363162189ff4be354ec3+67108864 142b095b33338c13fe6a0cba04ab8982+67108864 9e4fff3f8f5a272c4e55d79e899acfed+67108864 5d782433b0615d0c81f40d45b5ecf4ac+67108864 a1ce4f9cf8db339593655aaf432dc372+67108864 814f4ba561bd3459a7da97a7fca5f3a2+67108864 9a663484b49fd5a5465906760f4677b1+67108864 b4fe2e513982a4efb17bf442ae8abf30+67108864 36ab4fbd80eb1525f5f078c0bad27961+67108864 888389ff68d5b71d3a38d44d2a5f5321+67108864 af94eb7d687c5d973c3ef6826986a377+67108864 7ff861173bc6125ac2729f82fb4f76cf+67108864 f04b119f090e7dae55e982c94dfa5197+67108864 67b095174cd6d4d5a97c4c10926488c6+67108864 54963c6c726ae113457ac8628493bb36+67108864 94b063d6778371dbdf74c81ec6e00705+67108864 efd856b38a4127578ef3b30cf2f8111b+67108864 e9a78aaf8290e786d31b52e3d280f25b+67108864 acf39db50c314e81d77063e1363b0d44+67108864 f5ed13b763e78ee4d4ef45c754ddf2c6+67108864 f0a522930f01ccd16a21c4c836d81486+67108864 638503d44d5f70646c90cd9c1e5ddad4+67108864 32375ece74a773563332b435121f1f69+67108864 206a739575b9f0864abd4f01c57e7a6c+67108864 4b8ce29ba1e51c563b67abf72ba2559b+67108864 5ae2edd2acd049b2bb6d2bf398e0ea9b+67108864 f142e90252904e409bf460290d3ef7de+67108864 c168023fec9fc0d340f647a31b49d68a+67108864 d46018096ee511db7264de70de2de130+67108864 a4ec756f5ec69838074d7c51e3ce9366+67108864 657b7662f9fa7ce790eb6b6788bf4fdb+67108864 88b268dbcd8cfc68e322a6593a3f85a9+67108864 f6abb1603254a9c704c214208e45ecd8+67108864 53ba009b6a5ccd6e5001c099c97fc3e2+67108864 5304881bcd7ae73781a53a4fdb8f013c+67108864 fcf742e76f89d28953997eb339c4d2ae+67108864 e732f6fc4718c7ab9d6832641ed9850c+67108864 de4c892de00874bc9cd07918393ef6e2+67108864 9abadd8f9c1dcd464f17c45d65e6c4ab+67108864 7f1971a1fd92794421c07d50771b404a+67108864 349595e945b11955f64fd8899611d58e+67108864 d58073ab66e3f676a560748bf9898ba5+67108864 8f8b26f5d44408c19d0a3cab03ef67cc+67108864 38bc3e0c4925c47fef039917090f69a0+67108864 0ca5058c7c829ff871ca3868332b662c+67108864 5ae9155c4b1a25c37ea00d671324c6c4+67108864 7028834108fafc13240ffe2021cf7027+67108864 f2e194ea64bc8a0dbd5a86f085f1e934+67108864 298bffc3352973f28307d993a8973c1d+67108864 5e43691e50baacabf4a3c295649f2b2e+67108864 727520dafcd59f806f52ffaeaff02bc1+67108864 706789ae77dc03640546c8e83e129528+67108864 b5eeaf5cd6a94607ff7247eac7eb6455+67108864 7f8b151b8ffe117beea3b1934f9f419b+67108864 bab5b9d0cfb54f33e79227643fbf0b65+67108864 a59d082d7207bef94b4991215b134a10+67108864 785056e6a4426076b734e608cae86737+67108864 47117a5988bb128528b528e4d541eb1a+67108864 1b7085a666ecc988bd37eccf3e93c4ed+67108864 bfcbe4d5f5d7e4678f198d60996f13b9+67108864 3f4f1303a528c16d0f930da14a16b230+67108864 9e9140f33f9ee9b5fc3907b58eb051c8+67108864 4fcb1c3af4d85fcf0ac1095e14a42bd4+67108864 a6be2e79bff044c13ce21c2aa9c95e08+67108864 9e03214d420e916b21e2324a432dbbe6+67108864 1f0b58dc5d2b1497cd83fb9b1a2176f1+67108864 44c835860f76c96f9e0837e5bae8dd54+67108864 f9d33a3817dc95252c3385a9b620de96+67108864 c9fb1942eb1ca6f2df777a537bd717f4+67108864 e56f8e279cf628f2575b9f0b8df32df6+67108864 febf0b9a281b32a2a20b9611264f93e9+67108864 e6d5ad53ed0acd1ff102f22fe495a7ac+67108864 75435e66e302aba8c7da88b547a2517d+67108864 3bae3dca82efdf59e588345138b08498+67108864 e2e2c0f5e15558e7dce55f20fb52e254+67108864 f4c5394e402d38d8d4c5725ba7eef920+67108864 2580fe3b582a0b4c17a4ff4bb9ebd673+67108864 ed5992cc4a9cf4daabd5cfcbfeb4b896+67108864 488d4c76a978e3265bdddfcde78fcc82+67108864 04fe93c825d53638d60d38953b372557+67108864 ba6e44ea4f79f61f907e36253c0bf958+67108864 41213b819b4bfff9f17581950193ebe0+67108864 269b6e49b5593ef54b3d49032e4edfd4+67108864 e267c0d9870a736ac13c5a2a01cb23fe+67108864 804a37500b781cfa66441090c323c677+67108864 33195f727565738c7f2e4a70c6920d7c+67108864 e7c94488ea6b2408b8a81d404bab5408+67108864 48b336d69d1188f5ef6523151afee6f3+67108864 6cad6bf178aeb3f6279911a1153b0bce+67108864 9a674b1c710c52a4994a0670610cbf4c+67108864 f6a38265f9473e02057ac4be6bee7170+67108864 833b1f028e71ab42dbecc4d183da4b07+67108864 6839ceaa235277adae36d5587640df0f+67108864 c4835d24adb63f08ca4311469d82587f+67108864 040e2f87dedfc549528ce3de53688dda+67108864 2984c84d1d32b7f52ae49bd838b8c016+67108864 33fce73dee1e203326ffff8fece4816a+67108864 f59f8f10cfa7548df73184ff86acd661+67108864 fa82b6721c15118eee3ee95b41ebc19f+67108864 7b6db4c134dcc09f5a851d81d6fe2fcd+67108864 178ba3074b954c0aca5910dfa78e341c+67108864 f44afa76eca4fac8b8bfaca8b14401fb+67108864 90d717e686fc0cfecc8b92bff5e3dc3a+67108864 4c497db5abb4e6c08762c66b51be2864+67108864 3f5f220f3749bec9664f2a5c22714f0c+67108864 36165d8271554ff80e9ad204d6f41c86+67108864 0cde3c478fe6d14dfc7238f017af453d+67108864 d22407672d47ebf442725a0365a4f9f1+67108864 9ee04e14feb275234673409d2efa97ee+67108864 1431699e15fa4cef7dd54e6ed69d9cd5+67108864 a91b69896d9cfe2be169f4dd1e585b24+67108864 8c35d02a5b3961f7717828239b49d21d+67108864 c4d11536015b05bf84926e0cef77dcbe+67108864 b14f3f000f174c9b63de76d6b7f7e2c3+67108864 2732612254c0b8fec0c533017bf946bb+67108864 87b760ef4b34ac9d5ecf0c32a2d07aea+67108864 7566c2a133f0f8c5dd31d49a6ba8d5f5+67108864 28de06e61335558872188129073eca90+67108864 384ec6e77e4581804ef5ebed60c49806+67108864 460f0d01ee80f6936a36202c52d1367a+67108864 f475c729d470dba96ad5a355223fa3f8+67108864 75951867b767478687a0ed04031005dd+67108864 988e9032fc26f255d5e65f896f97c386+67108864 c72d6fe3a565167215c4b8725c71d20c+67108864 03f2a74dc6d301e7eb1440e7af0e8aab+67108864 0cad7f4c65c691932c7490ec31f32a2c+67108864 b86eaab28570a18618fcc5551589535a+67108864 5c2e755aa09e7eb494fe2f380cfc18bb+67108864 7b0314633fd0276ba4a7cfb18faf2a63+67108864 805596626907ffea2d90c29d1a8bd183+67108864 bbe42cc13151e29901f2ff81cd16ac10+67108864 96f514038c76fbc50b3325ce0a50e260+67108864 73ec6c69f79cb39a8c6813d6f27082db+67108864 9ab13bcf77c6874f6843b2ce9bb18cb0+67108864 4570482366c5811d199f47a2e92ec727+67108864 dc1c567a96316d293feb2fae90a7b8ee+67108864 7b2bfabe105b5c19e70bc18cf7c62320+67108864 513593f9a7b1b1465c8e20a4daaa8e2f+67108864 6442521c08fd3b7b6772e71404c09d9d+67108864 601eec17c302eef232cdc707534a6c61+67108864 2a7822d5f641502312296d507301ebca+67108864 0805d7819623d6af391ea9e072ce9bc4+67108864 152c643d2ccad0674c38a23368a4a1f9+67108864 b1a4f45eba2a13984d9b095ab5a16e73+67108864 9cf64a5302f8b9c21c01dc5f068bdcd0+67108864 0e82e1e3f2f1390be5ca4560b18bd96e+67108864 6213cb25efa2a9bb524be9a4f273355b+67108864 a04a71578d23bf26517ae21d101f9ce7+67108864 c875cbbd27c30d3e2be026fd11b28011+67108864 1375d33fc6949a826a321ba2c2d21d05+67108864 aec41b74054aac99980f2047d462757d+67108864 e672ff1a260ac97238a25a95747a346e+67108864 1cf87c401a5084c740466f8a724132b0+67108864 771c370c42eb026842a56f533af4b8e5+67108864 5139242530c8feb094f7effef56e7305+67108864 1d4bd13959bb21b28b48363fd367fbbf+67108864 e920e35f2fb67d9b34b012b95d8e2c89+67108864 8facf133a1ded0c098fa41c953f63b89+67108864 6c32393c950f9062a02ca4c13bd858ae+67108864 6b2e28a4fd767ef70b56589f3d2f29f5+67108864 f2b0bc69c431be32a87fc6e1c097e456+67108864 adf88e147e6089df2e1f44472d4523fe+67108864 74aded23685105b0a360e006946a403a+67108864 62b19d7d4c3342d653f780bdeba26e90+67108864 0ec5b45cd3a722baf10c683c3be0f1e9+67108864 9da6829a21b6b75cef6b5c6f17f5e07b+67108864 826ffa05850817239f4fe38d60c01ea5+67108864 adb1cfacdf09d7041a55433f32d32c4f+67108864 29e8f3c27f97a7ee5f00a15196000f29+67108864 7a5753093a96c622e7bc5bb762bf65c4+67108864 1307b7902955bc0588261c48d684dafe+67108864 88d27c9d828382625103e1b1b75ef6e5+67108864 7345011181afb92c1990ed7656a0eb9e+67108864 22ab24fe279e19a3773a70f42a4b37d2+67108864 27446b06a710072e7439388ef138749d+67108864 27353af04e039140bf5560bfb12b49f0+67108864 2cf867ed85d88a901025580c0d42c163+67108864 a2cc7481a86b8577cd9b1c36d81f9c54+67108864 63fc71a9df52da41f1879db40ae4683c+67108864 cba5ff71d3b6c3499e8c9f9ef99484d7+67108864 621818664122058078b2669d0efb60d9+67108864 544590a66aeff05521be3c0c51009803+67108864 a56212f4d9e0072dd8b2c6eadf7b2d9a+67108864 2b19824bd9c1f9bac0c87163f671c56f+67108864 c039c43ad2190e1be0cf68376bef9e76+67108864 6c616beba20069803ac30b1be56aeae6+67108864 28c4d7d0e0f45cbc9bfac0bf72beca95+67108864 a3b74f9d7d729e0443b2acac559aebe2+67108864 67d7c31bb19b81d190cdf58c92a0762d+67108864 6ba1fa534fb5ebd4382d2f37ab9bc16c+67108864 5c8e4e4404b987be63f37942f38b4d17+67108864 b3fdd9a8d97720199e9fc8bdf5464ce8+67108864 9e41a0725995633c4199fb5c79705146+67108864 d6ed794aaa78d7e338c9b88980798d57+67108864 bec7ca93e898b711f8705ed27815bfed+67108864 73d9523c774903cce44f37b36e350b33+67108864 3bb892044528096fb01358fbcb5689d2+67108864 ba6a6d4afd22b52213df85f79a30d251+67108864 b1b5fddd4d4dfc8de7b6e9edd888f24e+67108864 ad1cfd8c575b1f4df0d9481bb25bfe10+67108864 eb42bc382bad6c31248953175c57fbc3+67108864 f7ff3fbcddfdeff776851bf7f0b91708+67108864 096d0a35baf62c6267a588ec20b0b295+67108864 213f9667d41a694c24edeb6723817425+67108864 a1a3102d7223910a50f343bfa9f567fd+67108864 cb20b98b9cfa88ab1cf0cde7e92a963c+67108864 cfd396b2c4fdb101d220385445bdcc6f+67108864 feec74cb51b17c68e03caeaf520a013d+67108864 3b0eb66cd74ca88137ca5f3ba474c653+67108864 4e564d3d903811897e5970f31e20cff2+67108864 4b811ae252ba5070d60c6dfe5f6374ed+67108864 139ab8e8f46ac3bf236c23de49925413+67108864 e8fe58e8624815a50badebfa4d6bec2a+67108864 3015e8601d6eb6e42e9cdbc64664f32b+67108864 d77de37630cf3560ad50d02352d0c514+67108864 abc01fbe76ef45bdf2835ef06e4541cc+67108864 4dfb8d4243e6049028dd1fe43fd2180d+67108864 3220ab3a86613f63e0e5d4bc5a02afde+67108864 fd77865c46669253284d781a553b8a57+67108864 851ca8a64dca3edf8eb645a0007aa9c1+67108864 16b554ca3194d8b58cd9be8df8b7a321+67108864 9d0b9cfe67898e7c9f58050a5231e4b0+67108864 757a90985b5b92b16c02642f7c36fd74+67108864 3857a258764c94340920f45da85541fd+67108864 40326fff73456df9b895e6ff137fdccc+67108864 63f1d9a514d48754916bd33722911176+67108864 4820a4a74d6d4be30f4cca1eea2fb507+67108864 636d058d10bf9791c3092a8443915c9a+67108864 916f4f6dd7635b871325c288fbde0475+67108864 e7b2a6379ba0d49e7017bae5d4e5469e+67108864 c17c35e312dba5bd13ffd23cc2c7d939+67108864 7d80a58afa6735d113ffee5228a87775+67108864 99ecaa8b85b99e85427fa76657681647+67108864 602ae0f4abd8be52e76e9749ab8d8766+67108864 546e66b992c13e1f42fb03ebbe21d20e+67108864 46be78022b380d3b1ad9a7d6d48d4ffd+67108864 632596cde5b56aa0ad4e5a364669bf30+67108864 df8e7efd77d29fc70afcdfb355bb7002+67108864 ea0b3c954abe83b54c8c2b82b4b29217+67108864 1b7ff88f8491f39efd6bf74caa1b8ca2+67108864 208469377ac08802354de7a56d216c99+67108864 b467a3def115f956b617a4b5c066bd38+67108864 1208f0cf2e1953ff2d218525a0e5267f+67108864 44d7cc36d50d20860983f5a0869248b5+67108864 ac5c5552be8df8b790863e15bb50f3d5+67108864 0eee3089f9e662f5593a5ebc27b00532+67108864 5debd7be66b6480762a64e42f03aba81+67108864 e6a2bf3e08fd8ae2532b5dfc07e61f4c+67108864 64cb0b5d826b614aef5d95f17da8303b+67108864 3df707ea75c92e036c1185f046d5a2a6+67108864 50a7f1474a09c58d74d64605bddea6ae+67108864 874d107937f315b0b8169346b0630074+67108864 8e67dea16e96a62bc9902954d98a0f66+67108864 8d97c8703fac17ff05d2493233a67db9+67108864 0b21830cbcf73b7742837500b4c815c4+67108864 f2d055d36a926d74b87235c598cb4bf8+67108864 8aaf0b8589579bdc8da894fe167df948+67108864 fb64252c12e1fe1b64762c7f2c476dbf+67108864 3e23aaebfe46253e265f1cdbe55e9ca7+67108864 3ec7f774ce71e3d5b05a1b428357879f+67108864 870c74fbba8c1c5ca855374e34227770+67108864 c847245ea26999c205496a2edadd1f0e+67108864 c9c01dbd3df4a60e85d49a09c7a78cd6+67108864 1c5409de5ac0dc6c56cbcadc39de2c88+67108864 73081aa5140b4be500081ff8097478ef+67108864 edd831590a9ea992557dfae2ced26ec3+67108864 45d83cbc0c27d34f54e8ab01dac78acd+67108864 29d0340872b053b07c17781af001d700+67108864 6c707bdab47d7f86926f3ca83d46a5b3+67108864 c83a0779a209c7b6d36008b3b5bcb094+67108864 022481f50f42cf63f4931425ce841e6c+67108864 3727c014ebdfa8fd044c11fa7e5fcdc4+67108864 2cc1df6a91333b4b9b1a1752d298d7f5+67108864 6455c34e8cb08007b5b538aa96335cd4+67108864 c611c113c44dbc38f2b7dc0e2c8e789e+67108864 72d91fa8c3fd9ca40fe9ce4abd6aef3d+67108864 269ba993ba237bf6fb38573d129e29be+67108864 c2647496d2d5e294a4f59d0b321e597c+67108864 8652c87dd888540694383e6c37fe61e6+67108864 ba173508a36e70695d67f82bd002ee75+67108864 6b24483f7cba1ee77e92965c96eee803+67108864 7dcc165fac7975c3e390ab595ea84215+67108864 34de3797f59cb379bd7bc54eb07c2159+67108864 2adc45907715d755fd4f422b841609ef+67108864 7a46f799c381c18cb2ff2812b5b845fa+67108864 1986cd180cd8e948cfff091ccbb656ad+67108864 622cd1910b7f81f0c7170da1a3362bca+67108864 3aa879b74c7dc6bb0858636638d2b5e5+67108864 0dbf410b5be606ff739a480dec8161ae+67108864 f4086a5dac293be83bd889b04b7fd312+67108864 1f63c6f777a762cbb96169b2278ada67+67108864 96b5c1defe59dabbe1a380a8bc6056d6+67108864 84b13f87fa6c3ab2d04eb93f3505c98d+67108864 dfdc2aae1c1f49fbc36b5ab9573cb64e+67108864 318d865d048de4b484291b88bf36af44+67108864 651188cc20eb0df1c8945c533fa2892b+67108864 a8256ef0104dad714359c01c8059499f+67108864 5534cf3170d3cca00e8926e811ed31a9+67108864 35097b20dae2e31750cebc3a4046ff7a+67108864 08146f4dbf346719da9351d806363c3a+67108864 dda60404102211f6f8958f1f044e2a4a+67108864 f0353b389211bb2f3787147643e0293e+67108864 42a778507d32f3d16563d23b17c836e3+67108864 770af5bcc4f42e26bfd94f11b2de1111+67108864 f1d6738a1979688177fb392dbc3ea8c8+67108864 40be76d3cc1e1f0700ae7bee5b084b88+67108864 8560b693afb6ec7ab75032819e1a0737+67108864 9ff467396156130788b0de8b3abbc835+67108864 fb81044f46ee376b4ea7067c0e9474ad+67108864 5aa3aa18a8eb9fb635b218b5ba85713c+67108864 4bf897273b900fc96efaec8b27266257+67108864 601cb544933cb27f9b8322d609c24379+67108864 0f845c76f2cebf936abd4a8acee42076+67108864 771a5507849272544dd28835aee1baa0+67108864 82ad48d9641075f08789f7e9dc4fba26+67108864 30e0fbcc1fd402d6d4ee077dbbed9985+67108864 9c3f4cff8a0292cabccfe492cc978c6f+67108864 6413966c9aa54f96e2426600936d72d3+67108864 cf903ebc0656b705c26d869d455405ee+67108864 0ed47f82adc7a5e5787715e6a43d9fcc+67108864 2a92d60161816494b4ccbe4d727e021f+67108864 6d4c65315c56f53ad4955b2cd402286d+67108864 3c6d8a7ccd14d9aa75d4ad2e74b7bb28+67108864 982dc1ddfea2915a10dde69ec06dfa76+67108864 03b9c546519fb29aaac3a4bee52414b1+67108864 7803ff4e38dfa625745189e39876c155+67108864 844ea317c0f0095e2f51182702c393f5+67108864 2ea4a37141cf743d79fd884b70f4de05+67108864 c7e2a60485c19ffd6ae4cd5b61b1cf48+67108864 791b84800ed2cc0869bf33b7d85cee77+67108864 6f2b1d1f71f26c1f5bb606034db357a7+67108864 ac185d4ac6b4a85d2320662aef29f060+67108864 2c30efc1fb13aa0043decfb8bc1d1463+67108864 b421b08e5745d876953098e2f2baacd4+67108864 307e458977fd32d61445babb4310030b+67108864 3a6d5736dcc1ef9db6c694dd36e6cfa8+67108864 7ee43e9000accce3462428668a696cde+67108864 a15a30673558f6070996b77e5f975285+67108864 d358ddad7d06cf521cf9d6657b51cec2+67108864 eac6f5c1a8e2cb363c89305c7bfdbd6b+67108864 e5a87afb73e69adc46d79f57b192b7da+67108864 bc7cf84f4d954131ac5e2df31b31144a+67108864 5a9e7bcc8bbf16f3d64545f77e446642+67108864 b1bca9126d855cff7680d960f7a67a45+67108864 15eefaf5212822bc3e0e06916681fe4f+67108864 2ec224ffa681bb8ebaf65b984fe5ac2d+67108864 c5a94822c9c89f4f6627d288747f9b66+67108864 52aaeee57527a75a3bbbe911100c0361+67108864 80fa3e2b96c7823d8e9979d67f41e98c+67108864 b361c3601325ae469be51356e5804d72+67108864 78a933d60f6ca62c1df56b9b11a19160+67108864 98c26a9f728291a50055330fa4880f93+67108864 4f5030e434da715a0e5e2b6f85f0fd46+67108864 06f2acf241fc5269f94d85533b7417fa+67108864 8e39b0432f784ab407735d0d9667db20+67108864 3d6d01636a230edc77fe2d2cfa8c822b+67108864 af54747b06874508412e92815d05fd54+67108864 5f9beb261ee7800d96cf0068707b1419+67108864 8f2cfef40aceb50cd10e37d3693d340e+67108864 e8871ce0905050da9fdd140f96219b34+67108864 1c907eabee3b1eb051aa5bd16ea8c40c+67108864 aa50715b07f837cc3f9e0047ea08ba4d+67108864 d4f89f368239b077b28902df2291ec7d+67108864 513c7bc368665fbbe551708bce375f54+67108864 b5ec7a608fdf46f63b2aace86b22ecbe+67108864 37180c46fdbb4da4ebf7fc85604cb6dc+67108864 5594f89127ce8cd5b5b65b15076d0017+67108864 85ea487408db514720f43e8e61013a9e+67108864 a58bf72af3f393a3bca9f78502f39f09+67108864 3d63ead9a1ca876fcf472a36e437d5d3+67108864 e437dcbb269983af98f515fdff1b8c7d+67108864 76e1b803f60d796a15457fd006290246+67108864 eb4473b76001e5c65ac412099965d200+67108864 18e85dad25031b80d870089342d3fb30+67108864 35a7541d53a64feb7eddeeef2535586f+67108864 c2fbf1f0f1fa1359db0899683894f33d+67108864 6794ee809f5e58111792320303f1a8b2+67108864 d848092b6abefd75c4af817805da1cfc+67108864 2f7471f842739e00be1bdee2c3af96de+67108864 33d3bf343d5703851cf571cc0169acf4+67108864 73d934696c7ef7d873c13a61c6e93610+67108864 eec4ab22ea8289a12fede9936d06c47f+67108864 f280a1f0d2afe437f560e2d37b3d6f98+67108864 d636a57ea0e9b1018424a2acaa6d6a6d+67108864 4025879d6bf913a95dd289efbbc1a289+67108864 d7a116cdc7c52a7e05658d3ed9e24124+67108864 e3221ca9702dc74d06f168840663f73f+67108864 4165ee4fd1dbdca4c8879aed8a2b47a8+67108864 81c0cbde0e9cb9b1b141b48ea1a308cd+67108864 bff9166a75515e5f10af5538dc303212+67108864 45b4ca425e17296da803a51e0fed70bf+67108864 60e983493191b0df26b65c8e7446e9f0+67108864 93590b8affca8daf37b17844b5bf9040+67108864 8621fb9f9c1023d9faaa6eb74fc54a4e+67108864 23275e63a4acd8c7af597fcb76df373e+67108864 f6c082b0018e2c7cb81b97dbc76d5276+67108864 c1de2367ebbb8f821924cc9829caf433+67108864 28d7fe0b01343515c98152458e7469ba+67108864 d30ecd76efca9a45fb6541bf982bb043+67108864 c19fc0b7c6131911ec86f395b32942a1+67108864 fee25c10fa170ef91d6716546c325503+67108864 ba852a7069532ec2b50d30a2b8680369+67108864 eb6f12e52f9742085af5b111263a8a31+67108864 a3c5a938fe579707f06bbe71a445f57f+67108864 103fef8dfcc65c29b0c4c348d6b7cd5c+67108864 891dce541cc4c9fc0cff89231b7081ad+67108864 63434747d0b4b7677fc028fc842a01f8+67108864 4debf4aae27725e624d95e8d8f092790+67108864 b8b34bd97e512895a509222db622ed34+67108864 37e2b5b9f31dc0ed14f45af72827b850+67108864 fd1093368c508a9ccb7faa058087080d+67108864 3ec967bb33e9b55fcf32b63986399481+67108864 608325c1df26b43099ff3f539eaa8534+67108864 b3bb50a8d7a2af13df953ccd64770037+67108864 85debc01db61ef231b17d5adac609899+67108864 8e088c719257d21ca6c9c14c437ef72c+67108864 fd1c96ccde6743a911fb60a4433fecb7+67108864 a8f426581b54a0591d2bded643fde0f0+67108864 19ceaaaacbd811496d178ab8be3d721a+67108864 603b2fcacaaa2903771c5c0a7be15c6a+67108864 ee44b1f92ff9204befa52460d6ac6dda+67108864 366c305dd4378452ab3e9c7ed7b75e5e+67108864 ee826c5bf9d9d25c12d74cca0cde7cde+67108864 cf9ac2e024028adaed51a54e6dc70cd6+67108864 eca814155c1024562187292140170356+67108864 ccfcc357515eb46b9fbadbe80e8ff20b+67108864 3699ec34d183a068422d10b40aae0631+67108864 8daacf3c17a7273ce86e75fc555ecf70+67108864 b23cdd04b9fa1e61b972c2caef1577a2+67108864 990eeff6f56cbb12ecd6df721cac3115+67108864 2de9439dbf73f13efcdad3bb1909d5d3+67108864 e2cb816e92cff445a13fa9754c2bd2d4+67108864 892fec0f10624ae2eab5e4a833a53e3c+67108864 838ae0ca9da00f5c2621af49ec562448+67108864 7278188b1c82dd51460dc4b8c4557cd5+67108864 3c9d0798f3827f8363ae677bafbe9a69+67108864 910df3804a999d1414eb4806bd3b91fe+67108864 5ec3deace8be01db561690bf2ad6fcd9+67108864 42245551b2225e0185aee98a3bbc96e5+67108864 024d1b37f61012e8b11bce86417c5b66+67108864 bf9d48c28504eae8ad723c1e723e6c1e+67108864 daae1d560067bf2c461707a8063fd284+67108864 f9663b2d732c9e954a7cb728978f15fa+67108864 fa95a4fdcbd2128496b72c4ad304065a+67108864 bede18218d8c3c019b3bd2c873b94543+67108864 5ff434025926d30ae45c2114dc816257+67108864 b2cde4f1fffd89abdeb9f585108ed9e7+67108864 d54e21fbd16a3e8c38055a1c10ff6c1a+67108864 d49e4c6e77ee68fe23d34a679c792535+67108864 c30788084c79acf5bdfbf2a44a6486c7+67108864 142696943bd0289004de141776a4ebf3+67108864 23253a95877919a9e15b9496d171a990+67108864 a9fc4555af8829d2720a2b308772ff19+67108864 4d97246ede727b2dfdb72568397a127b+67108864 55b1cc3eb96ef940a8872d2a828abf64+67108864 70fa925c5553dd08873444c7d0ef1b8e+67108864 76c3db8722b92364f03f87eeed8fe53d+67108864 43ee24bc7c125362797acd869881fdb2+67108864 6c28ceb03969b041c1c189068f12bab4+67108864 7078ef25b761e30d51aee9116c5c59fd+67108864 88cf2cb45ee2711c1477e290a6cda42b+67108864 6cbf85a79fdeb004196684e63e45de3f+67108864 0dee67d87e5628caad9c3cd19e8bb38c+67108864 5313f75103edb9ab1eef9d0d9e987609+67108864 3d8454608337215314df099fcde636d9+67108864 2864e53673cadb6e1ed4fed72ef6360d+67108864 ef77259b68a64e9db567fcedf9709612+67108864 8bd0df0b2999c64d2866152d868dd4a6+67108864 3853cdaf6af0a85b0cfec54cd0d2e7bf+67108864 26d95f4758780a9eadfb0fa4b9fa9e85+67108864 bfaba46d337bfc45ae3dff8a5eed0d2d+67108864 52098bd03d910f2947747f744a7e7cf6+67108864 4cbb3854d9975b3e3d3105257aba7ef3+67108864 e06499f3a15a3dd2699f6942c2c0620d+67108864 efc4c80437412a4d5b0baf65a3b5550a+67108864 c574766c117b409c08bd12d82229f8c1+67108864 52af621352517bc7b5f7a37bba1e9a7f+67108864 f2258eae9b00f0837441c0e9ca024308+67108864 e57077ada195464b45117e44ce43292f+67108864 50d87f4c461824c081eeea80f9405783+67108864 2b538fca02bbe2e7d32bd0b58921e82f+67108864 2db2d0e983e1e3507de4cd9976a31017+67108864 7fb8c5435dd3eb83ce94e88278123613+67108864 67e61af3278ebd337d0155843024df19+67108864 9b65d127cf41e54ca6eb215806c3c78a+67108864 76b9faf2c24bc448817c1ff94f7e0aad+67108864 c04f7e671684c411d736a4f217e8c285+67108864 c14a4e1dbef1112fe60bbc8515e0d6d6+67108864 851819a0f279d5b0958e5d718c5865f0+67108864 d1986a6720da6e1abe012637b5547d57+67108864 11b0f93f0f5373729978dbc17362ef97+67108864 8c1f042f1c7f465019182de726f02350+67108864 2a455b07fbb041db3cbf54ade540752e+67108864 647e3a6dc6b99ac5866d0d1c4e2e7352+67108864 7cafb10d4d750b6a70b6de5f064eeb0c+67108864 d6ae66882a0ca9bba70d847e66797896+67108864 0d01a0eee71ef4bee88f9453f632134b+67108864 ae2140209ba681364275a26e8eac217e+67108864 40b675b2b11e70de68407f0f70f1f03f+67108864 130d9d5d51e8cc5096ba667aa9364c97+67108864 380a783cf03f5a3cfeebef66aee5b28d+67108864 e56360b5cda00f91cc3ba4b51633fdb4+67108864 24d4b59098f119b5b9fc5818f16d6aab+67108864 0b593d049f80991ae59cd330123d42be+67108864 30826cd5f49acf23070a1ded8022808b+67108864 cff3def8b5cbd55e5a1679f9c8934923+67108864 2d4a11a94554b1040cf8904396b0680a+67108864 eb9a072fa8e648ff5ef10b288af5fb6c+67108864 8046144d1a921d88683205b893c9aebc+67108864 7730f957a9be36db7abd4962afb3054f+67108864 742a9f4fcde793b644c13fbb4ee3e975+67108864 a35832757441d24aa04836af07b4c3b5+67108864 0ba07a50ae988c3506b7fa07e509f5bb+67108864 b39743548f0a682c65f277764ff9b097+67108864 21098bc8efe18127c66082e6222bf7ba+67108864 6f21d4c116fdd221692ee09036c8e985+67108864 4f3e46fe795367a58ddb917439850237+67108864 3531ce816ce7ff198f668c7119d7e339+67108864 799778902558e13feecdb35008f64335+67108864 773a7ea46671db445dcff7cdb3641fa6+67108864 97e489ed1177fc6db932af81478b9a6d+67108864 987e799850a5a5027ec095af93e53250+67108864 3a531df05dbe8224cec6fec6f411baef+67108864 d2ee610b5ebc5e705a6029c74d86710a+67108864 672a73b64ae5009c06438b755549241a+67108864 1e7185a9cf5ba378826474bccc51370b+67108864 e489af9d0c3ffef982125ac3e016391e+67108864 5dc7dc12e9cf09150aa39798f16dda0c+67108864 764fae2a1a5c53bc9b7725b8d48e17a9+67108864 4fcbea54f6867700008205dee1a55ee1+67108864 2d7af75fae4b8fafc9198432a127d3e7+67108864 71c3f297de8c416025af0cde00cefa6d+67108864 cad383c3f7341c192398820fbe86e665+67108864 d3ee90d3f0e49c76fe7d5ded5fd8a13d+67108864 52b2a629ea9a32e9b3cd3bd153989b6f+67108864 267fdcb1dcfb1b721352462501444d53+67108864 283ac9aded198745e16d3e2172fe59f7+67108864 57025c826ecb06f0321ce3e0ed7b2e4b+67108864 7cbe3a01c92f13807b15ce870a160fec+67108864 de7f59f08ea07551bdf2eac2b5b346ae+67108864 e70e70e3dd37ec0e59e1109ac6f1b3b8+67108864 a64486d04c1bf082e27e323f3edbb6b2+67108864 3af2e24eddb3f079a43222f2c521a4b7+67108864 4a3ec05b28d6c7540660400f0b5e2132+67108864 d34ae3feefb83c1921cba43e1902a624+67108864 aa0b774639159abcdfd4e87cdbb22501+67108864 bb06b89f2760bae1e0205fa5030827af+67108864 e5f240b2ddec9bf3f9b685bcfe095dfb+67108864 120b67b713bdb4092424f299143ee075+67108864 f9ffd0f5696ed790a2cb0a3bfe7eac57+67108864 6c8d86f09658c53d0b0b30ee42482271+67108864 d9288b957174ccb7fba72fc4ea3d761c+67108864 ca4aa05a80f2a62000d32b9b3bec7e77+67108864 ce9809f906a6c05a0ab0fd87238d083b+67108864 ab0c00cda4e5e7e5ab426c40dff7b705+67108864 ec73f7e70dc8a41524c25bb3a48c34a7+67108864 1f073a660524c25e2613feb6f05eb72f+67108864 09542461e2d26915c2d6ad6d2ce37c22+67108864 2ae249fa2dadd829eb68f9706c6fdd5a+67108864 34a60d677721a4f6106aeda9486731c7+67108864 d5c3016a253edd4be3df2a2d9e205fa6+67108864 aefca039b38116199e66f3ca685ff711+67108864 a952a7a8e82db93aa5ece90dbfeb558c+67108864 df3e8d76a91b95534365ad17c7fd80ca+67108864 683acf860d1b972852697e75e82d4377+67108864 4b48aa46659ccc66948b1948920c8bb8+67108864 75921520ff8976da048e4f48b65713fc+67108864 b1f9b9541ca3df1ff6d644527853a56e+67108864 f482ed10f9bcc8839226c13374dc24fd+67108864 89f6a2018a94e0dc75efebbfb3298ea2+67108864 1116d391c6aa0168db01ae84e4a6974e+67108864 3638536201513086e8a6ab97f6b28013+67108864 e0ae416fbd2edbea670fd4ab33f0dd53+67108864 cab05291139f2f9695f80fd4f56eb9a7+67108864 2074dc1e5b43560a7f0dba097f798599+67108864 499e50cc59a0282465d7f5d7ab00ca3c+67108864 c8d5ea8ac6383e69951ae03b92f4ef6a+67108864 028684dd6cc229bc52bc421784696473+67108864 9a05421a96b3657e72306ac3cc91a14c+67108864 c28927241aa6dbad0408cad3384d4887+67108864 37e3154fa87dbfdfc1508a17d9a539d8+67108864 bd57b88617da02c1c547a534d4cea6ac+67108864 cced51fa202f8410d4b0c0cdbd839e43+67108864 b2a3991e08ed41183e4e3693b3de7871+67108864 6442a997170bbf2f64c2f423c3e58a02+67108864 afd77bcbb63d84d2a59a6ca20921187b+67108864 54eab8b24e4e9fa88256f15c302a77d7+67108864 d3affc2d9a34a998d7d189b9e5a7055d+67108864 c457931914fa11b139fb745f900bcc5f+67108864 51ad6a806b0eb57d4f9e43d713a8eb23+67108864 42a1e629c0fb3f26eabfd15560f97b32+67108864 f878ee0ae55ae5e0f57611ddcef39b1e+67108864 fcb4593f27accd8efdf42afe593a29a8+67108864 d61ab1521fec675ce644ec3426f49833+67108864 7844dcc6c4a53d645d8d6be11fdde7db+67108864 cab883dc96650743eea243f830ddfa99+67108864 e412e3496b8e288b0f84f3915f78682b+67108864 3c9f0bbfa22aea749ad1003759f90871+67108864 6476e0c1c60207f0c5f82179cbb1eb55+67108864 39803ea71680efa53d93ecf071da128a+67108864 746567896b262ea67fb15702772272ae+67108864 3ebba6eaf8d320520c1ae04c69c51c54+67108864 60ec5ed8a833afb4065e39dbf649ce3b+67108864 378ed3ee18558e18b02e9d864c740192+67108864 496ee9cfb30dae1cb7bb7d7be99b4f9d+67108864 32c5ca216b69a9024cd785f3c6f5727a+67108864 101aaf4e6cc6fea9372116cbc91295dc+67108864 26da7649915f412dc9e353ae77439e16+67108864 81f8dff12b4aeabe610806080b56cdf4+67108864 b305bf18afbd40bd57fdcbfe80a1f6b0+67108864 34dcac449fd85a37e468b5b747d860ec+67108864 dd7296825e900f1a5c48cc77887b6d26+67108864 778bec1f5aec275d7a0747fc2f1edb69+67108864 a1c96dd0819cf0eac95edd0ab5c05a58+67108864 2bd7d0ca76c8f4d0efda3935575cc45c+67108864 1aabfb4e8d4143022443335ff4326f1e+67108864 ab939e65a4d32b305261829f2abb8b2a+67108864 d336dba0e326ae1e24000c00c10a3e3e+67108864 91da82d2cc82f55b859e2233c76e60f3+67108864 5e8665e03aab339a9f7da3c529a2b612+67108864 d13a86b4688790e57a32e87bcc209ec4+67108864 05712bd5603728d3f9016791d46f18a0+67108864 0b9a1a50d053843bcaa8c2c92d2738d3+67108864 a155912e0e45b54521429e739096d7ed+67108864 dedad0d2a05e7f7786e90370e2f4aae7+67108864 bb3b9f9b905ea40b6aa7360ff26c251b+67108864 f1c1dbf5e0ca7bc398ace6d215eb1b0f+67108864 56ce951826892637bcf276bd7e234587+67108864 b1b20e8b062f0a22375754b6f3170876+67108864 942532b6c96f7b51859d39ed123b2509+67108864 c4ead5a64baa6a6fd16c6d9d639ab84c+67108864 d7d4b85f843e2f4a5576fc7d9b6f8c4a+67108864 7eff16af07bdf8470b0b19f843c22ec5+67108864 1dd070d54d64b07d27fb7625a94b47bb+67108864 b33c17ab40dc191636dbbff51c1b0aab+67108864 d8cdd7ca5fdc00505f405c9ae54947d3+67108864 a2a68c3d5ffa4d9660e1644fc784c595+67108864 6419d072f83937007be14ade52127892+67108864 a21e2c355fc15c5aed0ec8bbdd72ce57+67108864 5a39145dd181812527c28ef26515ec05+67108864 fce2f7b28fac75292ad68c9e158a13e5+67108864 ad8a8fcea664c1f0e73e68f1747a926c+67108864 6a0475795138c21b49a7f64a83830786+67108864 fdb58610320a6c1ac6b7a65621d0e1cf+67108864 7b00d44d90b7e2c303442017f4c90669+67108864 05d5186afda38885eb02558047b4de5a+67108864 1fd3dadea7b7325a1679a3bd64a0677b+67108864 02873b880b7c81807450431f38578adf+67108864 92f94a574cf8f897b0d5cdcf81a36757+67108864 ab23dc595b61727226997bbfc81d0ab5+67108864 d6641c1132d04737d040216305a9dcbc+67108864 0ef5d9a5a279b482fa6b97aa2039e423+67108864 aae63d13b1d9d8405d55c79f38a562d2+67108864 c676127582c2f5324575e7044e8e8e13+67108864 f4828a91ed7679c403f0711910adb8e9+67108864 9dc446c9842db3d526567b1b07a55c4d+67108864 c4192c0f0cb5a42c1bdb49530e3560bd+67108864 98cbdb65458086ad86164661fe3113fd+67108864 4d4c5ad721ba1ec22aa044fb5cec743c+67108864 7ea9aa9309936c10635dd5a5d1b986f3+67108864 7dc4a6d69b3ae0704a25fe6d0fd75272+67108864 f749b92411e4f602907a4c2e0853d914+67108864 89384497e0c6b5ae900d504e6f4e7348+67108864 01a97fdc4c69bb61d00bb0e0f175ee9c+67108864 f2920e0ad57f64b1c2eb48af5387321b+67108864 a8fa1e0e5aaafb37766bd135c818a3a4+67108864 7d7bec841a2c0c4214727cc83787733c+67108864 73d6603ce67a74b1b4d7346ce5442669+67108864 43e728dd53e31a4c6e24ae69e38d5482+67108864 427dd6b12aec18c38c5b8a8276160cf0+67108864 313574f4f22e7dc8b4d3f6d1b5594146+67108864 d0720964a956512960d5c147bf8dd280+67108864 8a35b2b23f9cf7fd81c6739a13c26add+36834981 0:95527142234:PG0002578-DNA.bam 95527142234:8761328:PG0002578-DNA.bam.bai 95535903562:936266194:PG0002578-DNA.bam.fa.gz 96472169756:469864841:PG0002578-DNA.bam.tdf\n./PG0002578-DNA-jlake-tumor/Assembly/genome/bam/realigned 39f862840d200c3dbc166ad29dea6807+67108864 34c3d868d8bd1134dbbde930b5a0161a+67108864 0c44e83c212416b1845cc0b292bafa9c+67108864 3f9bfd80e8eccd4fe3d26f3a74f3de35+67108864 9021a4781b43910f1952000e72100689+67108864 48083a918c1e46c64b9387a334c1faa2+67108864 1a3e290d82c15d770ab030a806d4166b+67108864 16aacdcffb8fb09724e517e76984463a+67108864 c4b69566ee2213c9da499588e27f9b37+67108864 77616a26e1711f0f0af2afe361f07559+67108864 23afc19d408c9e2b918eef3254899dff+67108864 ef820a07ec1446e36dcfb54f4b1bb4d9+67108864 85804989c1a609229f2100aca2a5164b+67108864 056c41fdc0e02e32e6a480ce2c401e78+67108864 d79df4b7582af576371a42cb17de617c+62074987 0:996151435:PG0002578-DNA.realigned.bam 996151435:5447648:PG0002578-DNA.realigned.bam.bai\n./PG0002578-DNA-jlake-tumor/Assembly/stats 6798533a7dfcc926ab0d03ca4b1fe9bb+30924 0:14525:Reads.idx 14525:14525:Reads.idx.bu 29050:1247:coverage.summary.txt 30297:627:dupCount.summary.txt\n./PG0002578-DNA-jlake-tumor/Docs 5b614c0ac2f1b7be407433860ee2a553+5625667 0:277564:1_IGS_Deliverable_Layout_gVCF.pdf 277564:304833:2_gVCF_Conventions_CASAVA_Release.pdf 582397:332651:3_Illumina_Annotation_Pipeline_Documentation.pdf 915048:3765366:S1_CASAVA_UG_15011196B.PDF 4680414:903872:S2_CASAVA_QRG_15011197B.PDF 5584286:41381:S3_bam2fastq.pdf\n./PG0002578-DNA-jlake-tumor/Genotyping 74a29b8642eafbee843c51f46fb526c0+67108864 c659bb8049be3ac13768227a5ac78228+67108864 388f348c50d59fc16bfb566f088e85aa+4019621 0:138237349:FinalReport_HumanOmni2.5-8v1_PG0002578.txt\n./PG0002578-DNA-jlake-tumor/IGV 1ae944d18638508fece1b0a8bfcf8c9e+15934904 0:2898:.igv_session.xml 2898:2898:.mac_igv_session.xml 5796:2526:GID_session.xml 8322:171972:batik-codec.jar 180294:202:igv.bat 180496:43:igv.command 180539:15725768:igv.jar 15906307:42:igv.sh 15906349:1150:illumina.ico 15907499:26167:license_LGPL-2.1.txt 15933666:1238:readme.txt\n./PG0002578-DNA-jlake-tumor/Variations c16493faff63b09072395f9131e38152+67108864 f8f61d884faf2a66f162af422c2749ee+67108864 75b165151b45b7aee32b4690f0644b38+67108864 a33a13617e75d300715e46c57162fc33+67108864 ca46c7a4c725d0efe0568dce26bc5213+67108864 18f8e870d93a271f8a894fc4c4351e60+67108864 adb2c4f3e1e99be24e3ab19cf5db7631+67108864 fefc2d2bfafc240776c7b06ddc8d475b+67108864 adf6ee298b575c59143eda731f83c1cf+67108864 78cf4992604945f97209402eabb2eb7d+67108864 8bf9394ad0435a51e2b2e57a841300c1+67108864 a6f8c5582335c30d06d111ecd79d85bf+67108864 7765bf35e97576b071639d17d9bdd090+67108864 8b9b746b76b18e32ba8047176e9f74d2+67108864 56590e49063b1f814005d9c02000ddfd+67108864 10a639f273764b31451b5e8d95a8e98d+67108864 1b1c8235e66633bb95d2a2a2bede6f3f+67108864 4cc650efa04c38fd0502287a92786737+67108864 4433bac63122a8f9058334633904e988+67108864 3b470c2e8bc8d0ed981ea14353f08eaa+67108864 13de261edeba63d0c330f85bf3544656+67108864 7aee17fb67ed1a622f2bed2b3a50ff78+67108864 8c7ecacdbd99ec2cb3cbd276984bb361+67108864 4bee2d90998217971ef72b2f94054966+12779495 0:1432964600:PG0002578-DNA.genome.block.anno.vcf.gz 1432964600:3650551:PG0002578-DNA.genome.block.anno.vcf.gz.tbi 1436615151:118084986:PG0002578-DNA.snps.vcf.gz 1554700137:1583230:PG0002578-DNA.snps.vcf.gz.tbi\n"
  },
  {
    "path": "sdk/python/tests/keepstub.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport http.server\nimport hashlib\nimport os\nimport re\nimport socket\nimport socketserver\nimport sys\nimport threading\nimport time\n\nfrom . import arvados_testutil as tutil\n\n_debug = os.environ.get('ARVADOS_DEBUG', None)\n\nclass StubKeepServers(tutil.ApiClientMock):\n    def setUp(self):\n        super(StubKeepServers, self).setUp()\n        sock = socket.socket()\n        sock.bind(('0.0.0.0', 0))\n        self.port = sock.getsockname()[1]\n        sock.close()\n        self.server = Server(('0.0.0.0', self.port), Handler)\n        self.thread = threading.Thread(target=self.server.serve_forever)\n        self.thread.daemon = True # Exit thread if main proc exits\n        self.thread.start()\n        self.api_client = self.mock_keep_services(\n            count=1,\n            service_host='localhost',\n            service_port=self.port,\n        )\n\n    def tearDown(self):\n        self.server.shutdown()\n        super(StubKeepServers, self).tearDown()\n\n\nclass Server(socketserver.ThreadingMixIn, http.server.HTTPServer, object):\n\n    allow_reuse_address = 1\n\n    def __init__(self, *args, **kwargs):\n        self.store = {}\n        self.delays = {\n            # before reading request headers\n            'request': 0,\n            # before reading request body\n            'request_body': 0,\n            # before setting response status and headers\n            'response': 0,\n            # before sending response body\n            'response_body': 0,\n            # before returning from handler (thus setting response EOF)\n            'response_close': 0,\n            # after writing over 1s worth of data at self.bandwidth\n            'mid_write': 0,\n            # after reading over 1s worth of data at self.bandwidth\n            'mid_read': 0,\n        }\n        self.bandwidth = None\n        super(Server, self).__init__(*args, **kwargs)\n\n    def setdelays(self, **kwargs):\n        \"\"\"In future requests, induce delays at the given checkpoints.\"\"\"\n        for (k, v) in kwargs.items():\n            self.delays.get(k)  # NameError if unknown key\n            self.delays[k] = v\n\n    def setbandwidth(self, bandwidth):\n        \"\"\"For future requests, set the maximum bandwidth (number of bytes per\n        second) to operate at. If setbandwidth is never called, function at\n        maximum bandwidth possible\"\"\"\n        self.bandwidth = float(bandwidth)\n\n    def _sleep_at_least(self, seconds):\n        \"\"\"Sleep for given time, even if signals are received.\"\"\"\n        wake = time.time() + seconds\n        todo = seconds\n        while todo > 0:\n            time.sleep(todo)\n            todo = wake - time.time()\n\n    def _do_delay(self, k):\n        self._sleep_at_least(self.delays[k])\n\n\nclass Handler(http.server.BaseHTTPRequestHandler, object):\n\n    protocol_version = 'HTTP/1.1'\n\n    def wfile_bandwidth_write(self, data_to_write):\n        if self.server.bandwidth is None and self.server.delays['mid_write'] == 0:\n            self.wfile.write(data_to_write)\n        else:\n            BYTES_PER_WRITE = int(self.server.bandwidth/4) or 32768\n            outage_happened = False\n            num_bytes = len(data_to_write)\n            num_sent_bytes = 0\n            target_time = time.time()\n            while num_sent_bytes < num_bytes:\n                if num_sent_bytes > self.server.bandwidth and not outage_happened:\n                    self.server._do_delay('mid_write')\n                    target_time += self.server.delays['mid_write']\n                    outage_happened = True\n                num_write_bytes = min(BYTES_PER_WRITE,\n                    num_bytes - num_sent_bytes)\n                self.wfile.write(data_to_write[\n                    num_sent_bytes:num_sent_bytes+num_write_bytes])\n                num_sent_bytes += num_write_bytes\n                if self.server.bandwidth is not None:\n                    target_time += num_write_bytes / self.server.bandwidth\n                    self.server._sleep_at_least(target_time - time.time())\n        return None\n\n    def rfile_bandwidth_read(self, bytes_to_read):\n        if self.server.bandwidth is None and self.server.delays['mid_read'] == 0:\n            return self.rfile.read(bytes_to_read)\n        else:\n            BYTES_PER_READ = int(self.server.bandwidth/4) or 32768\n            data = b''\n            outage_happened = False\n            bytes_read = 0\n            target_time = time.time()\n            while bytes_to_read > bytes_read:\n                if bytes_read > self.server.bandwidth and not outage_happened:\n                    self.server._do_delay('mid_read')\n                    target_time += self.server.delays['mid_read']\n                    outage_happened = True\n                next_bytes_to_read = min(BYTES_PER_READ,\n                    bytes_to_read - bytes_read)\n                data += self.rfile.read(next_bytes_to_read)\n                bytes_read += next_bytes_to_read\n                if self.server.bandwidth is not None:\n                    target_time += next_bytes_to_read / self.server.bandwidth\n                    self.server._sleep_at_least(target_time - time.time())\n        return data\n\n    def finish(self, *args, **kwargs):\n        try:\n            return super(Handler, self).finish(*args, **kwargs)\n        except Exception as err:\n            if _debug:\n                raise\n\n    def handle(self, *args, **kwargs):\n        try:\n            return super(Handler, self).handle(*args, **kwargs)\n        except:\n            if _debug:\n                raise\n\n    def handle_one_request(self, *args, **kwargs):\n        self._sent_continue = False\n        self.server._do_delay('request')\n        return super(Handler, self).handle_one_request(*args, **kwargs)\n\n    def handle_expect_100(self):\n        self.server._do_delay('request_body')\n        self._sent_continue = True\n        return super(Handler, self).handle_expect_100()\n\n    def do_GET(self):\n        self.server._do_delay('response')\n        r = re.search(r'[0-9a-f]{32}', self.path)\n        if not r:\n            return self.send_response(422)\n        datahash = r.group(0)\n        if datahash not in self.server.store:\n            return self.send_response(404)\n        self.send_response(200)\n        self.send_header('Connection', 'close')\n        self.send_header('Content-type', 'application/octet-stream')\n        self.end_headers()\n        self.server._do_delay('response_body')\n        self.wfile_bandwidth_write(self.server.store[datahash])\n        self.server._do_delay('response_close')\n\n    def do_HEAD(self):\n        self.server._do_delay('response')\n        r = re.search(r'[0-9a-f]{32}', self.path)\n        if not r:\n            return self.send_response(422)\n        datahash = r.group(0)\n        if datahash not in self.server.store:\n            return self.send_response(404)\n        self.send_response(200)\n        self.send_header('Connection', 'close')\n        self.send_header('Content-type', 'application/octet-stream')\n        self.send_header('Content-length', str(len(self.server.store[datahash])))\n        self.end_headers()\n        self.server._do_delay('response_close')\n        self.close_connection = True\n\n    def do_PUT(self):\n        if not self._sent_continue and self.headers.get('expect') == '100-continue':\n            # The comments at https://bugs.python.org/issue1491\n            # implies that Python 2.7 BaseHTTPRequestHandler was\n            # patched to support 100 Continue, but reading the actual\n            # code that ships in Debian it clearly is not, so we need\n            # to send the response on the socket directly.\n            self.server._do_delay('request_body')\n            self.wfile.write(\"{} {} {}\\r\\n\\r\\n\".format(\n                self.protocol_version, 100, \"Continue\").encode())\n        data = self.rfile_bandwidth_read(\n            int(self.headers.get('content-length')))\n        datahash = hashlib.md5(data).hexdigest()\n        self.server.store[datahash] = data\n        resp = '{}+{}\\n'.format(datahash, len(data)).encode()\n        self.server._do_delay('response')\n        self.send_response(200)\n        self.send_header('Connection', 'close')\n        self.send_header('Content-type', 'text/plain')\n        self.send_header('Content-length', len(resp))\n        self.end_headers()\n        self.server._do_delay('response_body')\n        self.wfile_bandwidth_write(resp)\n        self.server._do_delay('response_close')\n        self.close_connection = True\n\n    def log_request(self, *args, **kwargs):\n        if _debug:\n            super(Handler, self).log_request(*args, **kwargs)\n"
  },
  {
    "path": "sdk/python/tests/manifest_examples.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados\n\nfrom . import arvados_testutil as tutil\n\nclass ManifestExamples(object):\n    def make_manifest(self,\n                      bytes_per_block=1,\n                      blocks_per_file=1,\n                      files_per_stream=1,\n                      streams=1):\n        datablip = b'x' * bytes_per_block\n        data_loc = tutil.str_keep_locator(datablip)\n        with tutil.mock_keep_responses(data_loc, 200):\n            coll = arvados.collection.Collection()\n            for si in range(0, streams):\n                for fi in range(0, files_per_stream):\n                    with coll.open(\"stream{}/file{}.txt\".format(si, fi), 'wb') as f:\n                        for bi in range(0, blocks_per_file):\n                            f.write(datablip)\n            return coll.manifest_text()\n"
  },
  {
    "path": "sdk/python/tests/nginx.conf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndaemon off;\nevents {\n}\nhttp {\n  log_format customlog\n    '[$time_local] \"$http_x_request_id\" $server_name $status $body_bytes_sent $request_time $request_method \"$scheme://$http_host$request_uri\" $remote_addr:$remote_port '\n    '\"$http_referer\" \"$http_user_agent\"';\n  access_log \"{{ACCESSLOG}}\" customlog;\n  client_body_temp_path \"{{TMPDIR}}\";\n  proxy_temp_path \"{{TMPDIR}}\";\n  fastcgi_temp_path \"{{TMPDIR}}\";\n  uwsgi_temp_path \"{{TMPDIR}}\";\n  scgi_temp_path \"{{TMPDIR}}\";\n  geo $external_client {\n    default 1;\n    127.0.0.0/8 0;\n    ::1 0;\n    fd00::/8 0;\n    {{INTERNALSUBNETS}}\n  }\n  upstream controller {\n    server {{UPSTREAMHOST}}:{{CONTROLLERPORT}};\n  }\n  server {\n    listen {{LISTENHOST}}:{{CONTROLLERSSLPORT}} ssl;\n    {{CONTROLLERLISTENEXTRA}}\n    server_name controller ~\\.containers\\. ~.*;\n    ssl_certificate \"{{SSLCERT}}\";\n    ssl_certificate_key \"{{SSLKEY}}\";\n    client_max_body_size 0;\n    location  / {\n      proxy_pass http://controller;\n      proxy_set_header Upgrade $http_upgrade;\n      proxy_set_header Connection \"upgrade\";\n      proxy_set_header Host $http_host;\n      proxy_set_header X-External-Client $external_client;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n      proxy_set_header X-Forwarded-Proto https;\n      proxy_redirect off;\n      proxy_max_temp_file_size 0;\n      proxy_request_buffering off;\n      proxy_buffering off;\n      proxy_http_version 1.1;\n    }\n  }\n  upstream keepproxy {\n    server {{UPSTREAMHOST}}:{{KEEPPROXYPORT}};\n  }\n  server {\n    listen {{LISTENHOST}}:{{KEEPPROXYSSLPORT}} ssl;\n    server_name keepproxy keep.*;\n    ssl_certificate \"{{SSLCERT}}\";\n    ssl_certificate_key \"{{SSLKEY}}\";\n    location  / {\n      proxy_pass http://keepproxy;\n      proxy_set_header Host $http_host;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n      proxy_set_header X-Forwarded-Proto https;\n      proxy_redirect off;\n\n      client_max_body_size 67108864;\n      proxy_http_version 1.1;\n      proxy_request_buffering off;\n    }\n  }\n  upstream keep-web {\n    server {{UPSTREAMHOST}}:{{KEEPWEBPORT}};\n  }\n  server {\n    listen {{LISTENHOST}}:{{KEEPWEBSSLPORT}} ssl;\n    server_name keep-web collections.* ~\\.collections\\.;\n    ssl_certificate \"{{SSLCERT}}\";\n    ssl_certificate_key \"{{SSLKEY}}\";\n    location  / {\n      proxy_pass http://keep-web;\n      proxy_set_header Host $http_host;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n      proxy_set_header X-Forwarded-Proto https;\n      proxy_redirect off;\n\n      client_max_body_size 0;\n      proxy_http_version 1.1;\n      proxy_request_buffering off;\n    }\n  }\n  upstream health {\n    server {{UPSTREAMHOST}}:{{HEALTHPORT}};\n  }\n  server {\n    listen {{LISTENHOST}}:{{HEALTHSSLPORT}} ssl;\n    server_name health health.*;\n    ssl_certificate \"{{SSLCERT}}\";\n    ssl_certificate_key \"{{SSLKEY}}\";\n    location  / {\n      proxy_pass http://health;\n      proxy_set_header Host $http_host;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n      proxy_set_header X-Forwarded-Proto https;\n      proxy_redirect off;\n\n      proxy_http_version 1.1;\n      proxy_request_buffering off;\n    }\n  }\n  server {\n    listen {{LISTENHOST}}:{{KEEPWEBDLSSLPORT}} ssl;\n    server_name keep-web-dl download.* ~.*;\n    ssl_certificate \"{{SSLCERT}}\";\n    ssl_certificate_key \"{{SSLKEY}}\";\n    location  / {\n      proxy_pass http://keep-web;\n      proxy_set_header Host $http_host;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n      proxy_set_header X-Forwarded-Proto https;\n      proxy_redirect off;\n\n      client_max_body_size 0;\n      proxy_http_version 1.1;\n      proxy_request_buffering off;\n    }\n  }\n  upstream ws {\n    server {{UPSTREAMHOST}}:{{WSPORT}};\n  }\n  server {\n    listen {{LISTENHOST}}:{{WSSSLPORT}} ssl;\n    server_name websocket ws.*;\n    ssl_certificate \"{{SSLCERT}}\";\n    ssl_certificate_key \"{{SSLKEY}}\";\n    location  / {\n      proxy_pass http://ws;\n      proxy_set_header Upgrade $http_upgrade;\n      proxy_set_header Connection \"upgrade\";\n      proxy_set_header Host $http_host;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n      proxy_set_header X-Forwarded-Proto https;\n      proxy_redirect off;\n\n      client_max_body_size 0;\n      proxy_http_version 1.1;\n      proxy_request_buffering off;\n    }\n  }\n  # wb1->wb2 redirects copied from\n  # /tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench_configuration.sls\n  map $request_uri $wb1_redirect {\n    default                        0;\n\n    ~^/actions\\?uuid=(.*-4zz18-.*) /collections/$1;\n    ~^/actions\\?uuid=(.*-j7d0g-.*) /projects/$1;\n    ~^/actions\\?uuid=(.*-tpzed-.*) /projects/$1;\n    ~^/actions\\?uuid=(.*-7fd4e-.*) /workflows/$1;\n    ~^/actions\\?uuid=(.*-xvhdp-.*) /processes/$1;\n    ~^/actions\\?uuid=(.*)          /;\n\n    ^/work_units/(.*)              /processes/$1;\n    ^/container_requests/(.*)      /processes/$1;\n    ^/users/(.*)                   /user/$1;\n    ^/groups/(.*)                  /group/$1;\n\n    ^/virtual_machines.*           /virtual-machines-admin;\n    ^/users/.*/virtual_machines    /virtual-machines-user;\n    ^/authorized_keys.*            /ssh-keys-admin;\n    ^/users/.*/ssh_keys            /ssh-keys-user;\n    ^/containers.*                 /all_processes;\n    ^/container_requests           /all_processes;\n    ^/job.*                        /all_processes;\n    ^/users/link_account           /link_account;\n    ^/keep_services.*              /keep-services;\n    ^/trash_items.*                /trash;\n\n    ^/themes.*                     /;\n    ^/keep_disks.*                 /;\n    ^/user_agreements.*            /;\n    ^/nodes.*                      /;\n    ^/humans.*                     /;\n    ^/traits.*                     /;\n    ^/sessions.*                   /;\n    ^/logout.*                     /;\n    ^/logged_out.*                 /;\n    ^/current_token                /;\n    ^/logs.*                       /;\n    ^/factory_jobs.*               /;\n    ^/uploaded_datasets.*          /;\n    ^/specimens.*                  /;\n    ^/pipeline_templates.*         /;\n    ^/pipeline_instances.*         /;\n  }\n  upstream workbench2 {\n    server {{UPSTREAMHOST}}:{{WORKBENCH2PORT}};\n  }\n  server {\n    listen {{LISTENHOST}}:{{WORKBENCH2SSLPORT}} ssl;\n    listen {{LISTENHOST}}:{{WORKBENCH1SSLPORT}} ssl;\n    server_name workbench2 workbench2.* workbench1 workbench1.* workbench workbench.*;\n    ssl_certificate \"{{SSLCERT}}\";\n    ssl_certificate_key \"{{SSLKEY}}\";\n\n    if ($wb1_redirect) {\n      return 301 $wb1_redirect;\n    }\n\n    # file download redirects\n    if ($arg_disposition = attachment) {\n      rewrite ^/collections/([^/]*)/(.*) /?redirectToDownload=/c=$1/$2? redirect;\n    }\n    if ($arg_disposition = inline) {\n      rewrite ^/collections/([^/]*)/(.*) /?redirectToPreview=/c=$1/$2? redirect;\n    }\n\n    location / {\n      proxy_pass http://workbench2;\n      proxy_set_header Host $http_host;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n      proxy_set_header X-Forwarded-Proto https;\n      proxy_redirect off;\n    }\n  }\n}\n"
  },
  {
    "path": "sdk/python/tests/performance/__init__.py",
    "content": ""
  },
  {
    "path": "sdk/python/tests/performance/performance_profiler.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Use the \"profiled\" decorator on a test to get profiling data.\n#\n# Usage:\n#   from performance_profiler import profiled\n#\n#   # See report in tmp/profile/foobar\n#   @profiled\n#   def foobar():\n#       baz = 1\n#\n#   See \"test_a_sample.py\" for a working example.\n#\n# Performance tests run as part of regular test suite.\n# You can also run only the performance tests using one of the following:\n#     python -m unittest discover tests.performance\n#     ./run-tests.sh WORKSPACE=~/arvados --only sdk/python sdk/python_test=\"--test-suite=tests.performance\"\n\nimport functools\nimport os\nimport pstats\nimport sys\nimport unittest\ntry:\n    import cProfile as profile\nexcept ImportError:\n    import profile\n\noutput_dir = os.path.abspath(os.path.join('tmp', 'profile'))\nif not os.path.exists(output_dir):\n    os.makedirs(output_dir)\n\ndef profiled(function):\n    @functools.wraps(function)\n    def profiled_function(*args, **kwargs):\n        outfile = open(os.path.join(output_dir, function.__name__), \"w\")\n        caught = None\n        pr = profile.Profile()\n        pr.enable()\n        try:\n            return function(*args, **kwargs)\n        finally:\n            pr.disable()\n            ps = pstats.Stats(pr, stream=outfile)\n            ps.sort_stats('time').print_stats()\n    return profiled_function\n"
  },
  {
    "path": "sdk/python/tests/performance/test_a_sample.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport unittest\n\nfrom .performance_profiler import profiled\n\nclass PerformanceTestSample(unittest.TestCase):\n    def foo(self):\n        bar = 64\n\n    @profiled\n    def test_profiled_decorator(self):\n        j = 0\n        for i in range(0,2**20):\n            j += i\n        self.foo()\n        print('Hello')\n"
  },
  {
    "path": "sdk/python/tests/run_test_server.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport atexit\nimport errno\nimport glob\nimport httplib2\nimport os\nimport random\nimport re\nimport shlex\nimport shutil\nimport signal\nimport socket\nimport subprocess\nimport sys\nimport tempfile\nimport time\nimport unittest\nimport yaml\n\nfrom urllib.parse import urlparse\n\nMY_DIRNAME = os.path.dirname(os.path.realpath(__file__))\n\nimport arvados\nimport arvados.config\n\n# This module starts subprocesses and records them in pidfiles so they\n# can be managed by other processes (incl. after this process\n# exits). But if we don't keep a reference to each subprocess object\n# somewhere, the subprocess destructor runs, and we get a lot of\n# ResourceWarning noise in test logs. This is our bucket of subprocess\n# objects whose destructors we don't want to run but are otherwise\n# unneeded.\n_detachedSubprocesses = []\n\nARVADOS_DIR = os.path.realpath(os.path.join(MY_DIRNAME, '../../..'))\nSERVICES_SRC_DIR = os.path.join(ARVADOS_DIR, 'services')\n\n# Work around https://bugs.python.org/issue27805, should be no longer\n# necessary from sometime in Python 3.8.x\nif not os.environ.get('ARVADOS_DEBUG', ''):\n    WRITE_MODE = 'a'\nelse:\n    WRITE_MODE = 'w'\n\nif 'GOPATH' in os.environ:\n    # Add all GOPATH bin dirs to PATH -- but insert them after the\n    # ruby gems bin dir, to ensure \"bundle\" runs the Ruby bundler\n    # command, not the golang.org/x/tools/cmd/bundle command.\n    gopaths = os.environ['GOPATH'].split(':')\n    addbins = [os.path.join(path, 'bin') for path in gopaths]\n    newbins = []\n    for path in os.environ['PATH'].split(':'):\n        newbins.append(path)\n        if os.path.exists(os.path.join(path, 'bundle')):\n            newbins += addbins\n            addbins = []\n    newbins += addbins\n    os.environ['PATH'] = ':'.join(newbins)\n\nTEST_TMPDIR = os.path.join(ARVADOS_DIR, 'tmp')\nif not os.path.exists(TEST_TMPDIR):\n    os.mkdir(TEST_TMPDIR)\n\nmy_api_host = None\n_cached_config = {}\n_cached_db_config = {}\n_already_used_port = {}\n\ndef find_server_pid(PID_PATH, wait=10):\n    now = time.time()\n    timeout = now + wait\n    good_pid = False\n    while (not good_pid) and (now <= timeout):\n        time.sleep(0.2)\n        try:\n            with open(PID_PATH, 'r') as f:\n                server_pid = int(f.read())\n            good_pid = (os.kill(server_pid, 0) is None)\n        except EnvironmentError:\n            good_pid = False\n        now = time.time()\n\n    if not good_pid:\n        return None\n\n    return server_pid\n\ndef kill_server_pid(pidfile, wait=10, passenger_root=False):\n    # Must re-import modules in order to work during atexit\n    import os\n    import signal\n    import subprocess\n    import time\n\n    now = time.time()\n    startTERM = now\n    deadline = now + wait\n\n    if passenger_root:\n        # First try to shut down nicely\n        restore_cwd = os.getcwd()\n        os.chdir(passenger_root)\n        subprocess.call([\n            'bundle', 'exec', 'passenger', 'stop', '--pid-file', pidfile])\n        os.chdir(restore_cwd)\n        # Use up to half of the +wait+ period waiting for \"passenger\n        # stop\" to work. If the process hasn't exited by then, start\n        # sending TERM signals.\n        startTERM += wait//2\n\n    server_pid = None\n    while now <= deadline and server_pid is None:\n        try:\n            with open(pidfile, 'r') as f:\n                server_pid = int(f.read())\n        except IOError:\n            # No pidfile = nothing to kill.\n            return\n        except ValueError as error:\n            # Pidfile exists, but we can't parse it. Perhaps the\n            # server has created the file but hasn't written its PID\n            # yet?\n            print(\"Parse error reading pidfile {}: {}\".format(pidfile, error),\n                  file=sys.stderr)\n            time.sleep(0.1)\n            now = time.time()\n\n    while now <= deadline:\n        try:\n            exited, _ = os.waitpid(server_pid, os.WNOHANG)\n            if exited > 0:\n                _remove_pidfile(pidfile)\n                return\n        except OSError:\n            # already exited, or isn't our child process\n            pass\n        try:\n            if now >= startTERM:\n                os.kill(server_pid, signal.SIGTERM)\n                print(\"Sent SIGTERM to {} ({})\".format(server_pid, pidfile),\n                      file=sys.stderr)\n        except OSError as error:\n            if error.errno == errno.ESRCH:\n                # Thrown by os.getpgid() or os.kill() if the process\n                # does not exist, i.e., our work here is done.\n                _remove_pidfile(pidfile)\n                return\n            raise\n        time.sleep(0.1)\n        now = time.time()\n\n    print(\"Server PID {} ({}) did not exit, giving up after {}s\".\n          format(server_pid, pidfile, wait),\n          file=sys.stderr)\n\ndef _remove_pidfile(pidfile):\n    try:\n        os.unlink(pidfile)\n    except:\n        if os.path.lexists(pidfile):\n            raise\n\ndef find_available_port():\n    \"\"\"Return an IPv4 port number that is not in use right now.\n\n    We assume whoever needs to use the returned port is able to reuse\n    a recently used port without waiting for TIME_WAIT (see\n    SO_REUSEADDR / SO_REUSEPORT).\n\n    Some opportunity for races here, but it's better than choosing\n    something at random and not checking at all. If all of our servers\n    (hey Passenger) knew that listening on port 0 was a thing, the OS\n    would take care of the races, and this wouldn't be needed at all.\n    \"\"\"\n\n    global _already_used_port\n    while True:\n        sock = socket.socket()\n        sock.bind(('0.0.0.0', 0))\n        port = sock.getsockname()[1]\n        sock.close()\n        if port not in _already_used_port:\n            _already_used_port[port] = True\n            return port\n\ndef _wait_until_port_listens(port, *, timeout=300, pid=None,\n                             listener_must_be_pid=True):\n    \"\"\"Wait for a process to start listening on the given port.\n\n    If nothing listens on the port within the specified timeout (given\n    in seconds), raise an exception.\n\n    If the `pid` argument is given and `listener_must_be_pid` is True,\n    wait for that specific process to listen on the port, not just any\n    process.\n\n    If the `pid` argument is given, give up early if that process\n    exits; also, terminate that process if timing out.\n\n    \"\"\"\n    try:\n        subprocess.check_output(['which', 'netstat'])\n    except subprocess.CalledProcessError:\n        print(\"WARNING: No `netstat` -- cannot wait for port to listen. \"+\n              \"Sleeping 0.5 and hoping for the best.\",\n              file=sys.stderr)\n        time.sleep(0.5)\n        return\n    if pid and listener_must_be_pid:\n        matchpid = str(pid)\n    else:\n        matchpid = r'\\d+'\n    deadline = time.time() + timeout\n    logged = False\n    slept = 0\n    while time.time() < deadline:\n        if re.search(r'\\ntcp.*:'+str(port)+' .* LISTEN +'+matchpid+'/',\n                     subprocess.check_output(\n                         ['netstat', '-Wlnp'],\n                         stderr=subprocess.DEVNULL,\n                     ).decode()):\n            return True\n        if pid and not os.path.exists('/proc/{}/stat'.format(pid)):\n            raise Exception(\"process {} does not exist\"\n                            \" -- giving up on port {}\".format(\n                                pid or '', port))\n        if slept > 5 and not logged:\n            print(\"waiting for port {}...\".format(port), file=sys.stderr)\n            logged = True\n        time.sleep(0.1)\n        slept += 1\n    if pid:\n        try:\n            os.kill(pid, signal.SIGTERM)\n        except ProcessLookupError:\n            pass\n    raise Exception(\"process {} never listened on port {}\".format(\n        pid or '', port))\n\ndef _logfilename(label):\n    \"\"\"Set up a labelled log file, and return a path to write logs to.\n\n    Normally, the returned path is {tmpdir}/{label}.log.\n\n    In debug mode, logs are also written to stderr, with [label]\n    prepended to each line. The returned path is a FIFO.\n\n    +label+ should contain only alphanumerics: it is also used as part\n    of the FIFO filename.\n\n    \"\"\"\n    logfilename = os.path.join(TEST_TMPDIR, label+'.log')\n    if not os.environ.get('ARVADOS_DEBUG', ''):\n        return logfilename\n    fifo = os.path.join(TEST_TMPDIR, label+'.fifo')\n    try:\n        os.remove(fifo)\n    except OSError as error:\n        if error.errno != errno.ENOENT:\n            raise\n    os.mkfifo(fifo, 0o700)\n    stdbuf = ['stdbuf', '-i0', '-oL', '-eL']\n    # open(fifo, 'r') would block waiting for someone to open the fifo\n    # for writing, so we need a separate cat process to open it for\n    # us.\n    cat = subprocess.Popen(\n        stdbuf+['cat', fifo],\n        stdin=subprocess.DEVNULL,\n        stdout=subprocess.PIPE)\n    _detachedSubprocesses.append(cat)\n    tee = subprocess.Popen(\n        stdbuf+['tee', '-a', logfilename],\n        stdin=cat.stdout,\n        stdout=subprocess.PIPE)\n    _detachedSubprocesses.append(tee)\n    sed = subprocess.Popen(\n        stdbuf+['sed', '-e', 's/^/['+label+'] /'],\n        stdin=tee.stdout,\n        stdout=sys.stderr)\n    _detachedSubprocesses.append(sed)\n    return fifo\n\ndef _service_environ():\n    \"\"\"Return an environment mapping suitable for running an arvados\n    service process.\"\"\"\n    env = dict(os.environ)\n    env['ARVADOS_USE_KEEP_ACCESSIBLE_API'] = 'true'\n    return env\n\ndef run(leave_running_atexit=False):\n    \"\"\"Ensure an API server is running, and ARVADOS_API_* env vars have\n    admin credentials for it.\n\n    If ARVADOS_TEST_API_HOST is set, a parent process has started a\n    test server for us to use: we just need to reset() it using the\n    admin token fixture.\n\n    If a previous call to run() started a new server process, and it\n    is still running, we just need to reset() it to fixture state and\n    return.\n\n    If neither of those options work out, we'll really start a new\n    server.\n    \"\"\"\n    global my_api_host\n\n    # Delete cached discovery documents.\n    #\n    # This will clear cached docs that belong to other processes (like\n    # concurrent test suites) even if they're still running. They should\n    # be able to tolerate that.\n    for fn in glob.glob(os.path.join(\n            str(arvados.http_cache('discovery')),\n            '*,arvados,v1,rest,*')):\n        os.unlink(fn)\n\n    pid_file = _pidfile('api')\n    pid_file_ok = find_server_pid(pid_file, 0)\n\n    existing_api_host = os.environ.get('ARVADOS_TEST_API_HOST', my_api_host)\n    if existing_api_host and pid_file_ok:\n        if existing_api_host == my_api_host:\n            try:\n                return reset()\n            except:\n                # Fall through to shutdown-and-start case.\n                pass\n        else:\n            # Server was provided by parent. Can't recover if it's\n            # unresettable.\n            return reset()\n\n    # Before trying to start up our own server, call stop() to avoid\n    # \"Phusion Passenger Standalone is already running on PID 12345\".\n    # (If we've gotten this far, ARVADOS_TEST_API_HOST isn't set, so\n    # we know the server is ours to kill.)\n    stop(force=True)\n\n    restore_cwd = os.getcwd()\n    api_src_dir = os.path.join(SERVICES_SRC_DIR, 'api')\n    os.chdir(api_src_dir)\n\n    # Either we haven't started a server of our own yet, or it has\n    # died, or we have lost our credentials, or something else is\n    # preventing us from calling reset(). Start a new one.\n\n    if not os.path.exists('tmp'):\n        os.makedirs('tmp')\n\n    if not os.path.exists('tmp/api'):\n        os.makedirs('tmp/api')\n\n    if not os.path.exists('tmp/logs'):\n        os.makedirs('tmp/logs')\n\n    # Customizing the passenger config template is the only documented\n    # way to override the default passenger_stat_throttle_rate (10 s).\n    # In the testing environment, we want restart.txt to take effect\n    # immediately.\n    resdir = subprocess.check_output(['bundle', 'exec', 'passenger-config', 'about', 'resourcesdir']).decode().rstrip()\n    with open(resdir + '/templates/standalone/config.erb') as f:\n        template = f.read()\n    newtemplate = re.sub(r'http \\{', 'http {\\n        passenger_stat_throttle_rate 0;', template)\n    if newtemplate == template:\n        raise Exception(\"template edit failed\")\n    with open('tmp/passenger-nginx.conf.erb', 'w') as f:\n        f.write(newtemplate)\n\n    port = internal_port_from_config(\"RailsAPI\")\n    env = _service_environ()\n    env['RAILS_ENV'] = 'test'\n    env['ARVADOS_RAILS_LOG_TO_STDOUT'] = '1'\n    env.pop('ARVADOS_WEBSOCKETS', None)\n    env.pop('ARVADOS_TEST_API_HOST', None)\n    env.pop('ARVADOS_API_HOST', None)\n    env.pop('ARVADOS_API_HOST_INSECURE', None)\n    env.pop('ARVADOS_API_TOKEN', None)\n    logf = open(_logfilename('railsapi'), WRITE_MODE)\n    railsapi = subprocess.Popen(\n        ['bundle', 'exec',\n         'passenger', 'start', '-p{}'.format(port),\n         '--nginx-config-template', 'tmp/passenger-nginx.conf.erb',\n\t '--no-friendly-error-pages',\n\t '--disable-anonymous-telemetry',\n\t '--disable-security-update-check',\n         '--pid-file', pid_file,\n         '--log-file', '/dev/stdout',\n         '--ssl',\n         '--ssl-certificate', 'tmp/self-signed.pem',\n         '--ssl-certificate-key', 'tmp/self-signed.key'],\n        env=env,\n        stdin=subprocess.DEVNULL,\n        stdout=logf,\n        stderr=logf)\n    _detachedSubprocesses.append(railsapi)\n\n    if not leave_running_atexit:\n        atexit.register(kill_server_pid, pid_file, passenger_root=api_src_dir)\n\n    my_api_host = \"127.0.0.1:\"+str(port)\n    os.environ['ARVADOS_API_HOST'] = my_api_host\n\n    # Make sure the server is listening on its TCP port.\n    _wait_until_port_listens(port, pid=railsapi.pid, listener_must_be_pid=False)\n    # Make sure the server has written its pid file.\n    find_server_pid(pid_file)\n\n    reset()\n    os.chdir(restore_cwd)\n\ndef reset():\n    \"\"\"Reset the test server to fixture state.\n\n    This resets the ARVADOS_TEST_API_HOST provided by a parent process\n    if any, otherwise the server started by run().\n\n    It also resets ARVADOS_* environment vars to point to the test\n    server with admin credentials.\n    \"\"\"\n    existing_api_host = os.environ.get('ARVADOS_TEST_API_HOST', my_api_host)\n    token = auth_token('admin')\n    httpclient = httplib2.Http(ca_certs=os.path.join(\n        SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.pem'))\n    httpclient.request(\n        'https://{}/database/reset'.format(existing_api_host),\n        'POST',\n        headers={'Authorization': 'Bearer {}'.format(token), 'Connection':'close'})\n\n    os.environ['ARVADOS_API_HOST_INSECURE'] = 'true'\n    os.environ['ARVADOS_API_TOKEN'] = token\n    os.environ['ARVADOS_API_HOST'] = existing_api_host\n\ndef stop(force=False):\n    \"\"\"Stop the API server, if one is running.\n\n    If force==False, kill it only if we started it ourselves. (This\n    supports the use case where a Python test suite calls run(), but\n    run() just uses the ARVADOS_TEST_API_HOST provided by the parent\n    process, and the test suite cleans up after itself by calling\n    stop(). In this case the test server provided by the parent\n    process should be left alone.)\n\n    If force==True, kill it even if we didn't start it\n    ourselves. (This supports the use case in __main__, where \"run\"\n    and \"stop\" happen in different processes.)\n    \"\"\"\n    global my_api_host\n    if force or my_api_host is not None:\n        kill_server_pid(_pidfile('api'))\n        my_api_host = None\n\ndef get_config():\n    with open(os.environ[\"ARVADOS_CONFIG\"]) as f:\n        return yaml.safe_load(f)\n\ndef internal_port_from_config(service, idx=0):\n    return int(urlparse(\n        sorted(list(get_config()[\"Clusters\"][\"zzzzz\"][\"Services\"][service][\"InternalURLs\"].keys()))[idx]).\n               netloc.split(\":\")[1])\n\ndef external_port_from_config(service):\n    return int(urlparse(get_config()[\"Clusters\"][\"zzzzz\"][\"Services\"][service][\"ExternalURL\"]).netloc.split(\":\")[1])\n\ndef run_controller():\n    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:\n        return\n    stop_controller()\n    logf = open(_logfilename('controller'), WRITE_MODE)\n    port = internal_port_from_config(\"Controller\")\n    controller = subprocess.Popen(\n        [\"arvados-server\", \"controller\"],\n        env=_service_environ(),\n        stdin=subprocess.DEVNULL,\n        stdout=logf,\n        stderr=logf,\n        close_fds=True)\n    _detachedSubprocesses.append(controller)\n    with open(_pidfile('controller'), 'w') as f:\n        f.write(str(controller.pid))\n    _wait_until_port_listens(port, pid=controller.pid)\n    return port\n\ndef stop_controller():\n    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:\n        return\n    kill_server_pid(_pidfile('controller'))\n\ndef run_dispatch():\n    stop_dispatch()\n    arvados_server_bin = os.path.join(os.environ[\"GOPATH\"], \"bin\", \"arvados-server\")\n    crbin = os.path.join(TEST_TMPDIR, \"crunch-run\")\n    # Symlinking crunch-run -> arvados-server allows us to use it in a\n    # -crunch-run-command=\"/path/to/crunch-run\" argument -- because\n    # -crunch-run-command=\"/path/to/arvados-server crunch-run\" doesn't\n    # do what we want.\n    try:\n        os.remove(crbin)\n    except FileNotFoundError:\n        pass\n    os.symlink(arvados_server_bin, crbin)\n    cdlbin = os.path.join(os.environ[\"GOPATH\"], \"bin\", \"crunch-dispatch-local\")\n    print('starting crunch-dispatch-local ...', file=sys.stderr)\n    logf = open(_logfilename('dispatch'), WRITE_MODE)\n    debugport = find_available_port()\n    dispatch = subprocess.Popen(\n        [\n            cdlbin,\n            \"-crunch-run-command\", crbin,\n            \"-pprof\", \"localhost:{}\".format(debugport),\n        ],\n        cwd=TEST_TMPDIR,\n        env=_service_environ(),\n        stdin=subprocess.DEVNULL,\n        stdout=logf,\n        stderr=logf,\n        close_fds=True)\n    _detachedSubprocesses.append(dispatch)\n    with open(_pidfile('dispatch'), 'w') as f:\n        f.write(str(dispatch.pid))\n    _wait_until_port_listens(debugport, pid=dispatch.pid)\n    print('dispatch pid is {}'.format(dispatch.pid), file=sys.stderr)\n\ndef stop_dispatch():\n    kill_server_pid(_pidfile('dispatch'))\n\ndef run_ws():\n    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:\n        return\n    stop_ws()\n    port = internal_port_from_config(\"Websocket\")\n    logf = open(_logfilename('ws'), WRITE_MODE)\n    ws = subprocess.Popen(\n        [\"arvados-server\", \"ws\"],\n        env=_service_environ(),\n        stdin=subprocess.DEVNULL,\n        stdout=logf,\n        stderr=logf,\n        close_fds=True)\n    _detachedSubprocesses.append(ws)\n    with open(_pidfile('ws'), 'w') as f:\n        f.write(str(ws.pid))\n    _wait_until_port_listens(port, pid=ws.pid)\n    return port\n\ndef stop_ws():\n    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:\n        return\n    kill_server_pid(_pidfile('ws'))\n\ndef _start_keep(n, blob_signing=False):\n    datadir = os.path.join(TEST_TMPDIR, \"keep%d.data\"%n)\n    if os.path.exists(datadir):\n        shutil.rmtree(datadir)\n    os.mkdir(datadir)\n    port = internal_port_from_config(\"Keepstore\", idx=n)\n\n    # Make a copy of the config file with BlobSigning set to the\n    # requested value.\n    conf = os.path.join(TEST_TMPDIR, \"keep%d.yaml\"%n)\n    confdata = get_config()\n    confdata['Clusters']['zzzzz']['Collections']['BlobSigning'] = blob_signing\n    with open(conf, 'w') as f:\n        yaml.safe_dump(confdata, f)\n    keep_cmd = [\"arvados-server\", \"keepstore\", \"-config\", conf]\n\n    # Tell keepstore which of the InternalURLs it's supposed to listen\n    # on.\n    env = _service_environ()\n    env['ARVADOS_SERVICE_INTERNAL_URL'] = \"http://127.0.0.1:%d\"%port\n\n    with open(_logfilename('keep{}'.format(n)), WRITE_MODE) as logf:\n        child = subprocess.Popen(\n            keep_cmd,\n            env=env,\n            stdin=subprocess.DEVNULL,\n            stdout=logf,\n            stderr=logf,\n            close_fds=True)\n        _detachedSubprocesses.append(child)\n\n    print('keep{}.pid is {}'.format(n, child.pid), file=sys.stderr)\n    with open(_pidfile('keep{}'.format(n)), 'w') as f:\n        f.write(str(child.pid))\n\n    _wait_until_port_listens(port, pid=child.pid)\n\n    return port\n\ndef run_keep(num_servers=2, **kwargs):\n    stop_keep(num_servers)\n\n    api = arvados.api(\n        version='v1',\n        host=os.environ['ARVADOS_API_HOST'],\n        token=os.environ['ARVADOS_API_TOKEN'],\n        insecure=True)\n\n    for d in api.keep_services().list(filters=[['service_type','=','disk']]).execute()['items']:\n        api.keep_services().delete(uuid=d['uuid']).execute()\n\n    for d in range(0, num_servers):\n        port = _start_keep(d, **kwargs)\n        svc = api.keep_services().create(body={'keep_service': {\n            'uuid': 'zzzzz-bi6l4-keepdisk{:07d}'.format(d),\n            'service_host': 'localhost',\n            'service_port': port,\n            'service_type': 'disk',\n            'service_ssl_flag': False,\n        }}).execute()\n\n    # If keepproxy and/or keep-web is running, send SIGHUP to make\n    # them discover the new keepstore services.\n    for svc in ('keepproxy', 'keep-web'):\n        pidfile = _pidfile(svc)\n        if os.path.exists(pidfile):\n            try:\n                with open(pidfile) as pid:\n                    os.kill(int(pid.read()), signal.SIGHUP)\n            except OSError:\n                os.remove(pidfile)\n\ndef _stop_keep(n):\n    kill_server_pid(_pidfile('keep{}'.format(n)))\n\ndef stop_keep(num_servers=2):\n    for n in range(0, num_servers):\n        _stop_keep(n)\n\ndef run_keep_proxy():\n    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:\n        os.environ[\"ARVADOS_KEEP_SERVICES\"] = \"http://localhost:{}\".format(internal_port_from_config('Keepproxy'))\n        return\n    stop_keep_proxy()\n\n    port = internal_port_from_config(\"Keepproxy\")\n    env = _service_environ()\n    env['ARVADOS_API_TOKEN'] = auth_token('anonymous')\n    logf = open(_logfilename('keepproxy'), WRITE_MODE)\n    kp = subprocess.Popen(\n        ['arvados-server', 'keepproxy'],\n        env=env,\n        stdin=subprocess.DEVNULL,\n        stdout=logf,\n        stderr=logf,\n        close_fds=True)\n    _detachedSubprocesses.append(kp)\n\n    with open(_pidfile('keepproxy'), 'w') as f:\n        f.write(str(kp.pid))\n    _wait_until_port_listens(port, pid=kp.pid)\n\n    print(\"Using API %s token %s\" % (os.environ['ARVADOS_API_HOST'], auth_token('admin')), file=sys.stdout)\n    api = arvados.api(\n        version='v1',\n        host=os.environ['ARVADOS_API_HOST'],\n        token=auth_token('admin'),\n        insecure=True)\n    for d in api.keep_services().list(\n            filters=[['service_type','=','proxy']]).execute()['items']:\n        api.keep_services().delete(uuid=d['uuid']).execute()\n    api.keep_services().create(body={'keep_service': {\n        'service_host': 'localhost',\n        'service_port': port,\n        'service_type': 'proxy',\n        'service_ssl_flag': False,\n    }}).execute()\n    os.environ[\"ARVADOS_KEEP_SERVICES\"] = \"http://localhost:{}\".format(port)\n\ndef stop_keep_proxy():\n    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:\n        return\n    kill_server_pid(_pidfile('keepproxy'))\n\ndef run_keep_web():\n    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:\n        return\n    stop_keep_web()\n\n    keepwebport = internal_port_from_config(\"WebDAV\")\n    logf = open(_logfilename('keep-web'), WRITE_MODE)\n    keepweb = subprocess.Popen(\n        ['arvados-server', 'keep-web'],\n        env=_service_environ(),\n        stdin=subprocess.DEVNULL,\n        stdout=logf,\n        stderr=logf)\n    _detachedSubprocesses.append(keepweb)\n    with open(_pidfile('keep-web'), 'w') as f:\n        f.write(str(keepweb.pid))\n    _wait_until_port_listens(keepwebport, pid=keepweb.pid)\n\ndef stop_keep_web():\n    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:\n        return\n    kill_server_pid(_pidfile('keep-web'))\n\ndef run_nginx():\n    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:\n        return\n    stop_nginx()\n    nginxconf = {}\n    nginxconf['UPSTREAMHOST'] = '127.0.0.1'\n    nginxconf['LISTENHOST'] = '127.0.0.1'\n    nginxconf['CONTROLLERPORT'] = internal_port_from_config(\"Controller\")\n    nginxconf['CONTROLLERLISTENEXTRA'] = ''\n    nginxconf['ARVADOS_API_HOST'] = \"0.0.0.0:\" + str(external_port_from_config(\"Controller\"))\n    nginxconf['CONTROLLERSSLPORT'] = external_port_from_config(\"Controller\")\n    nginxconf['KEEPWEBPORT'] = internal_port_from_config(\"WebDAV\")\n    nginxconf['KEEPWEBDLSSLPORT'] = external_port_from_config(\"WebDAVDownload\")\n    nginxconf['KEEPWEBSSLPORT'] = external_port_from_config(\"WebDAV\")\n    nginxconf['KEEPPROXYPORT'] = internal_port_from_config(\"Keepproxy\")\n    nginxconf['KEEPPROXYSSLPORT'] = external_port_from_config(\"Keepproxy\")\n    nginxconf['HEALTHPORT'] = internal_port_from_config(\"Health\")\n    nginxconf['HEALTHSSLPORT'] = external_port_from_config(\"Health\")\n    nginxconf['WSPORT'] = internal_port_from_config(\"Websocket\")\n    nginxconf['WSSSLPORT'] = external_port_from_config(\"Websocket\")\n    nginxconf['WORKBENCH1SSLPORT'] = external_port_from_config(\"Workbench1\")\n    nginxconf['WORKBENCH2PORT'] = internal_port_from_config(\"Workbench2\")\n    nginxconf['WORKBENCH2SSLPORT'] = external_port_from_config(\"Workbench2\")\n    nginxconf['SSLCERT'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.pem')\n    nginxconf['SSLKEY'] = os.path.join(SERVICES_SRC_DIR, 'api', 'tmp', 'self-signed.key')\n    nginxconf['ACCESSLOG'] = _logfilename('nginx_access')\n    nginxconf['ERRORLOG'] = _logfilename('nginx_error')\n    nginxconf['TMPDIR'] = TEST_TMPDIR + '/nginx'\n    nginxconf['INTERNALSUBNETS'] = '169.254.0.0/16 0;'\n\n    conftemplatefile = os.path.join(MY_DIRNAME, 'nginx.conf')\n    conffile = os.path.join(TEST_TMPDIR, 'nginx.conf')\n    with open(conffile, 'w') as f:\n        f.write(re.sub(\n            r'{{([A-Z]+[A-Z0-9]+)}}',\n            lambda match: str(nginxconf.get(match.group(1))),\n            open(conftemplatefile).read()))\n\n    env = os.environ.copy()\n    env['PATH'] = env['PATH']+':/sbin:/usr/sbin:/usr/local/sbin'\n\n    nginx = subprocess.Popen(\n        ['nginx',\n         '-g', 'error_log stderr notice; pid '+_pidfile('nginx')+';',\n         '-c', conffile],\n        env=env,\n        stdin=subprocess.DEVNULL,\n        stdout=sys.stderr)\n    _detachedSubprocesses.append(nginx)\n    _wait_until_port_listens(nginxconf['CONTROLLERSSLPORT'], pid=nginx.pid)\n\ndef setup_config():\n    rails_api_port = find_available_port()\n    controller_port = find_available_port()\n    controller_external_port = find_available_port()\n    websocket_port = find_available_port()\n    websocket_external_port = find_available_port()\n    workbench1_external_port = find_available_port()\n    workbench2_port = find_available_port()\n    workbench2_external_port = find_available_port()\n    health_httpd_port = find_available_port()\n    health_httpd_external_port = find_available_port()\n    keepproxy_port = find_available_port()\n    keepproxy_external_port = find_available_port()\n    keepstore_ports = sorted([str(find_available_port()) for _ in range(0,4)])\n    keep_web_port = find_available_port()\n    keep_web_external_port = find_available_port()\n    keep_web_dl_external_port = find_available_port()\n\n    configsrc = os.environ.get(\"CONFIGSRC\", None)\n    if configsrc:\n        clusterconf = os.path.join(configsrc, \"config.yml\")\n        print(\"Getting config from %s\" % clusterconf, file=sys.stderr)\n        pgconnection = yaml.safe_load(open(clusterconf))[\"Clusters\"][\"zzzzz\"][\"PostgreSQL\"][\"Connection\"]\n    else:\n        # assume the conventional db credentials\n        pgconnection = {\n\t    \"client_encoding\": \"utf8\",\n\t    \"host\": \"localhost\",\n\t    \"port\": \"5432\",\n\t    \"dbname\": \"arvados_test\",\n\t    \"user\": \"arvados\",\n\t    \"password\": \"insecure_arvados_test\",\n        }\n\n    localhost = \"127.0.0.1\"\n    services = {\n        \"RailsAPI\": {\n            \"InternalURLs\": {\n                \"https://%s:%s\"%(localhost, rails_api_port): {},\n            },\n        },\n        \"Controller\": {\n            \"ExternalURL\": \"https://%s:%s\" % (localhost, controller_external_port),\n            \"InternalURLs\": {\n                \"http://%s:%s\"%(localhost, controller_port): {},\n            },\n        },\n        \"Websocket\": {\n            \"ExternalURL\": \"wss://%s:%s/websocket\" % (localhost, websocket_external_port),\n            \"InternalURLs\": {\n                \"http://%s:%s\"%(localhost, websocket_port): {},\n            },\n        },\n        \"Workbench1\": {\n            \"ExternalURL\": \"https://%s:%s/\" % (localhost, workbench1_external_port),\n        },\n        \"Workbench2\": {\n            \"ExternalURL\": \"https://%s:%s/\" % (localhost, workbench2_external_port),\n            \"InternalURLs\": {\n                \"http://%s:%s\"%(localhost, workbench2_port): {},\n            },\n        },\n        \"Health\": {\n            \"ExternalURL\": \"https://%s:%s\" % (localhost, health_httpd_external_port),\n            \"InternalURLs\": {\n                \"http://%s:%s\"%(localhost, health_httpd_port): {}\n            },\n        },\n        \"Keepstore\": {\n            \"InternalURLs\": {\n                \"http://%s:%s\"%(localhost, port): {} for port in keepstore_ports\n            },\n        },\n        \"Keepproxy\": {\n            \"ExternalURL\": \"https://%s:%s\" % (localhost, keepproxy_external_port),\n            \"InternalURLs\": {\n                \"http://%s:%s\"%(localhost, keepproxy_port): {},\n            },\n        },\n        \"WebDAV\": {\n            \"ExternalURL\": \"https://%s:%s\" % (localhost, keep_web_external_port),\n            \"InternalURLs\": {\n                \"http://%s:%s\"%(localhost, keep_web_port): {},\n            },\n        },\n        \"WebDAVDownload\": {\n            \"ExternalURL\": \"https://%s:%s\" % (localhost, keep_web_dl_external_port),\n            \"InternalURLs\": {\n                \"http://%s:%s\"%(localhost, keep_web_port): {},\n            },\n        },\n        \"ContainerWebServices\": {\n            \"ExternalURL\": \"https://*.containers.zzzzz.example.com\",\n            \"InternalURLs\": {\n                \"http://%s:%s\"%(localhost, controller_port): {},\n            },\n        },\n    }\n\n    config = {\n        \"Clusters\": {\n            \"zzzzz\": {\n                \"ManagementToken\": \"e687950a23c3a9bceec28c6223a06c79\",\n                \"SystemRootToken\": auth_token('system_user'),\n                \"API\": {\n                    \"RequestTimeout\": \"30s\",\n                    \"LockBeforeUpdate\": True,\n                },\n                \"Login\": {\n                    \"Test\": {\n                        \"Enable\": True,\n                        \"Users\": {\n                            \"alice\": {\n                                \"Email\": \"alice@example.com\",\n                                \"Password\": \"xyzzy\"\n                            }\n                        }\n                    },\n                    \"LDAP\": {\n                        \"Enable\": False,\n                        # Hostname used by lib/controller/localdb/login_docker_test\n                        # Other settings are the defaults for the\n                        # bitnamilegacy/openldap Docker image it uses\n                        \"URL\": \"ldap://arvados-test-openldap:1389/\",\n                        \"StartTLS\": False,\n                        \"SearchBase\": \"dc=example,dc=org\",\n                        \"SearchBindUser\": \"cn=admin,dc=example,dc=org\",\n                        \"SearchBindPassword\": \"adminpassword\",\n                    },\n                    \"PAM\": {\n                        \"Enable\": False,\n                        # Without this specific DefaultEmailDomain, inserted users\n                        # would prevent subsequent database/reset from working (see\n                        # database_controller.rb).\n                        \"DefaultEmailDomain\": \"example.com\",\n                    },\n                },\n                \"SystemLogs\": {\n                    \"LogLevel\": ('info' if os.environ.get('ARVADOS_DEBUG', '') in ['','0'] else 'debug'),\n                },\n                \"PostgreSQL\": {\n                    \"Connection\": pgconnection,\n                },\n                \"TLS\": {\n                    \"Insecure\": True,\n                },\n                \"Services\": services,\n                \"Users\": {\n                    \"AnonymousUserToken\": auth_token('anonymous'),\n                    \"UserProfileNotificationAddress\": \"arvados@example.com\",\n                },\n                \"Collections\": {\n                    \"CollectionVersioning\": True,\n                    \"BlobSigningKey\": \"zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc\",\n                    \"TrustAllContent\": False,\n                    \"ForwardSlashNameSubstitution\": \"/\",\n                    \"TrashSweepInterval\": \"-1s\", # disable, otherwise test cases can't acquire dblock\n                },\n                \"Containers\": {\n                    # sdk/cwl integration tests need containers to\n                    # connect to controller, which listens on\n                    # 127.0.0.1. This is only possible with \"host\"\n                    # networking mode.\n                    \"CrunchRunArgumentsList\": [\"--container-network-mode\", \"host\"],\n                    \"LocalKeepBlobBuffersPerVCPU\": 0,\n                    \"SupportedDockerImageFormats\": {\"v1\": {}},\n                    \"ShellAccess\": {\n                        \"Admin\": True,\n                        \"User\": True,\n                    },\n                },\n                \"Volumes\": {\n                    \"zzzzz-nyw5e-%015d\"%n: {\n                        \"AccessViaHosts\": {\n                            \"http://%s:%s\" % (localhost, keepstore_ports[n]): {},\n                        },\n                        \"Driver\": \"Directory\",\n                        \"DriverParameters\": {\n                            \"Root\": os.path.join(TEST_TMPDIR, \"keep%d.data\"%n),\n                        },\n                    } for n in range(len(keepstore_ports))\n                },\n            },\n        },\n    }\n\n    conf = os.path.join(TEST_TMPDIR, 'arvados.yml')\n    with open(conf, 'w') as f:\n        yaml.safe_dump(config, f)\n\n    ex = \"export ARVADOS_CONFIG=\"+conf\n    print(ex)\n\n\ndef stop_nginx():\n    if 'ARVADOS_TEST_PROXY_SERVICES' in os.environ:\n        return\n    kill_server_pid(_pidfile('nginx'))\n\ndef _pidfile(program):\n    return os.path.join(TEST_TMPDIR, program + '.pid')\n\ndef fixture(fix):\n    '''load a fixture yaml file'''\n    with open(os.path.join(SERVICES_SRC_DIR, 'api', \"test\", \"fixtures\",\n                           fix + \".yml\")) as f:\n        yaml_file = f.read()\n        try:\n          trim_index = yaml_file.index(\"# Test Helper trims the rest of the file\")\n          yaml_file = yaml_file[0:trim_index]\n        except ValueError:\n          pass\n        return yaml.safe_load(yaml_file)\n\ndef auth_token(token_name):\n    return fixture(\"api_client_authorizations\")[token_name][\"api_token\"]\n\ndef authorize_with(token_name):\n    '''token_name is the symbolic name of the token from the api_client_authorizations fixture'''\n    arvados.config.settings()[\"ARVADOS_API_TOKEN\"] = auth_token(token_name)\n    arvados.config.settings()[\"ARVADOS_API_HOST\"] = os.environ.get(\"ARVADOS_API_HOST\")\n    arvados.config.settings()[\"ARVADOS_API_HOST_INSECURE\"] = \"true\"\n\nclass TestCaseWithServers(unittest.TestCase):\n    \"\"\"TestCase to start and stop supporting Arvados servers.\n\n    Define any of MAIN_SERVER, KEEP_SERVER, and/or KEEP_PROXY_SERVER\n    class variables as a dictionary of keyword arguments.  If you do,\n    setUpClass will start the corresponding servers by passing these\n    keyword arguments to the run, run_keep, and/or run_keep_server\n    functions, respectively.  It will also set Arvados environment\n    variables to point to these servers appropriately.  If you don't\n    run a Keep or Keep proxy server, setUpClass will set up a\n    temporary directory for Keep local storage, and set it as\n    KEEP_LOCAL_STORE.\n\n    tearDownClass will stop any servers started, and restore the\n    original environment.\n    \"\"\"\n    MAIN_SERVER = None\n    WS_SERVER = None\n    KEEP_SERVER = None\n    KEEP_PROXY_SERVER = None\n    KEEP_WEB_SERVER = None\n\n    @staticmethod\n    def _restore_dict(src, dest):\n        for key in list(dest.keys()):\n            if key not in src:\n                del dest[key]\n        dest.update(src)\n\n    @classmethod\n    def setUpClass(cls):\n        cls._orig_environ = os.environ.copy()\n        cls._orig_config = arvados.config.settings().copy()\n        cls._cleanup_funcs = []\n        os.environ.pop('ARVADOS_KEEP_SERVICES', None)\n        for server_kwargs, start_func, stop_func in (\n                (cls.MAIN_SERVER, run, reset),\n                (cls.WS_SERVER, run_ws, stop_ws),\n                (cls.KEEP_SERVER, run_keep, stop_keep),\n                (cls.KEEP_PROXY_SERVER, run_keep_proxy, stop_keep_proxy),\n                (cls.KEEP_WEB_SERVER, run_keep_web, stop_keep_web)):\n            if server_kwargs is not None:\n                start_func(**server_kwargs)\n                cls._cleanup_funcs.append(stop_func)\n        if (cls.KEEP_SERVER is None) and (cls.KEEP_PROXY_SERVER is None):\n            cls.local_store = tempfile.mkdtemp()\n            os.environ['KEEP_LOCAL_STORE'] = cls.local_store\n            cls._cleanup_funcs.append(\n                lambda: shutil.rmtree(cls.local_store, ignore_errors=True))\n        else:\n            os.environ.pop('KEEP_LOCAL_STORE', None)\n        arvados.config.initialize()\n\n    @classmethod\n    def tearDownClass(cls):\n        for clean_func in cls._cleanup_funcs:\n            clean_func()\n        cls._restore_dict(cls._orig_environ, os.environ)\n        cls._restore_dict(cls._orig_config, arvados.config.settings())\n\n\nif __name__ == \"__main__\":\n    actions = [\n        'database_reset',\n        'start', 'stop',\n        'start_ws', 'stop_ws',\n        'start_controller', 'stop_controller',\n        'start_dispatch', 'stop_dispatch',\n        'start_keep', 'stop_keep',\n        'start_keep_proxy', 'stop_keep_proxy',\n        'start_keep-web', 'stop_keep-web',\n        'start_nginx', 'stop_nginx', 'setup_config',\n    ]\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\n        'action',\n        metavar='ACTION',\n        choices=actions,\n        help=\"one of %(choices)s\",\n    )\n    parser.add_argument('--auth', type=str, metavar='FIXTURE_NAME', help='Print authorization info for given api_client_authorizations fixture')\n    parser.add_argument('--num-keep-servers', metavar='int', type=int, default=2, help=\"Number of keep servers desired\")\n    parser.add_argument('--keep-blob-signing', action=\"store_true\", help=\"Enable blob signing for keepstore servers\")\n\n    args = parser.parse_args()\n    # Create a new process group so our child processes don't exit on\n    # ^C in run-tests.sh interactive mode.\n    os.setpgid(0, 0)\n    if args.action == 'database_reset':\n        reset()\n    elif args.action == 'start':\n        stop(force=('ARVADOS_TEST_API_HOST' not in os.environ))\n        run(leave_running_atexit=True)\n        host = os.environ['ARVADOS_API_HOST']\n        if args.auth is not None:\n            token = auth_token(args.auth)\n            print(\"export ARVADOS_API_TOKEN={}\".format(shlex.quote(token)))\n            print(\"export ARVADOS_API_HOST={}\".format(shlex.quote(host)))\n            print(\"export ARVADOS_API_HOST_INSECURE=true\")\n            print(\"export ARVADOS_USE_KEEP_ACCESSIBLE_API=true\")\n        else:\n            print(host)\n    elif args.action == 'stop':\n        stop(force=('ARVADOS_TEST_API_HOST' not in os.environ))\n    elif args.action == 'start_ws':\n        run_ws()\n    elif args.action == 'stop_ws':\n        stop_ws()\n    elif args.action == 'start_controller':\n        run_controller()\n    elif args.action == 'stop_controller':\n        stop_controller()\n    elif args.action == 'start_dispatch':\n        run_dispatch()\n    elif args.action == 'stop_dispatch':\n        stop_dispatch()\n    elif args.action == 'start_keep':\n        run_keep(blob_signing=args.keep_blob_signing, num_servers=args.num_keep_servers)\n    elif args.action == 'stop_keep':\n        stop_keep(num_servers=args.num_keep_servers)\n    elif args.action == 'start_keep_proxy':\n        run_keep_proxy()\n    elif args.action == 'stop_keep_proxy':\n        stop_keep_proxy()\n    elif args.action == 'start_keep-web':\n        run_keep_web()\n    elif args.action == 'stop_keep-web':\n        stop_keep_web()\n    elif args.action == 'start_nginx':\n        run_nginx()\n        print(\"export ARVADOS_API_HOST=0.0.0.0:{}\".format(external_port_from_config('Controller')))\n    elif args.action == 'stop_nginx':\n        stop_nginx()\n    elif args.action == 'setup_config':\n        setup_config()\n    else:\n        raise Exception(\"action recognized but not implemented!?\")\n"
  },
  {
    "path": "sdk/python/tests/test_api.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados\nimport collections\nimport contextlib\nimport httplib2\nimport itertools\nimport json\nimport logging\nimport mimetypes\nimport os\nimport socket\nimport string\nimport sys\nimport unittest\nimport urllib.parse as urlparse\n\nfrom unittest import mock\nfrom . import run_test_server\n\nfrom apiclient import errors as apiclient_errors\nfrom apiclient import http as apiclient_http\nfrom arvados.api import (\n    ThreadSafeAPIClient,\n    api_client,\n    normalize_api_kwargs,\n    api_kwargs_from_config,\n    _googleapiclient_log_lock,\n)\nfrom .arvados_testutil import fake_httplib2_response, mock_api_responses, queue_with\n\nimport googleapiclient\nimport httplib2.error\n\nif not mimetypes.inited:\n    mimetypes.init()\n\nclass ArvadosApiTest(run_test_server.TestCaseWithServers):\n    MAIN_SERVER = {}\n    ERROR_HEADERS = {'Content-Type': mimetypes.types_map['.json']}\n    RETRIED_4XX = frozenset([408, 409, 423])\n\n    def api_error_response(self, code, *errors):\n        return (fake_httplib2_response(code, **self.ERROR_HEADERS),\n                json.dumps({'errors': errors,\n                            'error_token': '1234567890+12345678'}).encode())\n\n    def _config_from_environ(self):\n        return {\n            key: value\n            for key, value in os.environ.items()\n            if key.startswith('ARVADOS_API_')\n        }\n\n    def _discoveryServiceUrl(\n            self,\n            host=None,\n            path='/discovery/v1/apis/{api}/{apiVersion}/rest',\n            scheme='https',\n    ):\n        if host is None:\n            host = os.environ['ARVADOS_API_HOST']\n        return urlparse.urlunsplit((scheme, host, path, None, None))\n\n    def test_new_api_objects_with_cache(self):\n        clients = [arvados.api('v1', cache=True) for index in [0, 1]]\n        self.assertIsNot(*clients)\n\n    def test_empty_list(self):\n        answer = arvados.api('v1').collections().list(\n            filters=[['uuid', '=', 'abcdef']]).execute()\n        self.assertEqual(answer['items_available'], len(answer['items']))\n\n    def test_nonempty_list(self):\n        answer = arvados.api('v1').collections().list().execute()\n        self.assertNotEqual(0, answer['items_available'])\n        self.assertNotEqual(0, len(answer['items']))\n\n    def test_timestamp_inequality_filter(self):\n        api = arvados.api('v1')\n        new_item = api.collections().create(body={}).execute()\n        for operator, should_include in [\n                ['<', False], ['>', False],\n                ['<=', True], ['>=', True], ['=', True]]:\n            response = api.collections().list(filters=[\n                ['created_at', operator, new_item['created_at']],\n                # Also filter by uuid to ensure (if it matches) it's on page 0\n                ['uuid', '=', new_item['uuid']]]).execute()\n            uuids = [item['uuid'] for item in response['items']]\n            did_include = new_item['uuid'] in uuids\n            self.assertEqual(\n                did_include, should_include,\n                \"'%s %s' filter should%s have matched '%s'\" % (\n                    operator, new_item['created_at'],\n                    ('' if should_include else ' not'),\n                    new_item['created_at']))\n\n    def test_exceptions_include_errors(self):\n        mock_responses = {\n            'arvados.collections.get': self.api_error_response(\n                422, \"Bad UUID format\", \"Bad output format\"),\n            }\n        req_builder = apiclient_http.RequestMockBuilder(mock_responses)\n        api = arvados.api('v1', requestBuilder=req_builder)\n        with self.assertRaises(apiclient_errors.HttpError) as err_ctx:\n            api.collections().get(uuid='xyz-xyz-abcdef').execute()\n        err_s = str(err_ctx.exception)\n        for msg in [\"Bad UUID format\", \"Bad output format\"]:\n            self.assertIn(msg, err_s)\n\n    @mock.patch('time.sleep')\n    def test_exceptions_include_request_id(self, sleep):\n        api = arvados.api('v1')\n        api.request_id='fake-request-id'\n        api._http.orig_http_request = mock.MagicMock()\n        api._http.orig_http_request.side_effect = socket.error('mock error')\n        caught = None\n        try:\n            api.users().current().execute()\n        except Exception as e:\n            caught = e\n        self.assertRegex(str(caught), r'fake-request-id')\n\n    def test_exceptions_without_errors_have_basic_info(self):\n        mock_responses = {\n            'arvados.collections.delete': (\n                fake_httplib2_response(500, **self.ERROR_HEADERS),\n                b\"\")\n            }\n        req_builder = apiclient_http.RequestMockBuilder(mock_responses)\n        api = arvados.api('v1', requestBuilder=req_builder)\n        with self.assertRaises(apiclient_errors.HttpError) as err_ctx:\n            api.collections().delete(uuid='xyz-xyz-abcdef').execute()\n        self.assertIn(\"500\", str(err_ctx.exception))\n\n    def test_request_too_large(self):\n        api = arvados.api('v1')\n        maxsize = api._rootDesc.get('maxRequestSize', 0)\n        with self.assertRaises(apiclient_errors.MediaUploadSizeError):\n            text = \"X\" * maxsize\n            arvados.api('v1').collections().create(body={\"manifest_text\": text}).execute()\n\n    def test_default_request_timeout(self):\n        api = arvados.api('v1')\n        self.assertEqual(api._http.timeout, 300,\n            \"Default timeout value should be 300\")\n\n    def test_custom_request_timeout(self):\n        api = arvados.api('v1', timeout=1234)\n        self.assertEqual(api._http.timeout, 1234,\n            \"Requested timeout value was 1234\")\n\n    def test_4xx_retried(self):\n        client = arvados.api('v1')\n        for code in self.RETRIED_4XX:\n            name = f'retried #{code}'\n            with self.subTest(name), mock.patch('time.sleep'):\n                expected = {'username': name}\n                with mock_api_responses(\n                        client,\n                        json.dumps(expected),\n                        [code, code, 200],\n                        self.ERROR_HEADERS,\n                        'orig_http_request',\n                ):\n                    actual = client.users().current().execute()\n                self.assertEqual(actual, expected)\n\n    def test_4xx_not_retried(self):\n        client = arvados.api('v1', num_retries=3)\n        # Note that googleapiclient does retry 403 *if* the response JSON\n        # includes flags that say the request was denied by rate limiting.\n        # An empty JSON response like we use here should not be retried.\n        for code in [400, 401, 403, 404, 422]:\n            with self.subTest(f'error {code}'), mock.patch('time.sleep'):\n                with mock_api_responses(\n                        client,\n                        b'{}',\n                        [code, 200],\n                        self.ERROR_HEADERS,\n                        'orig_http_request',\n                ), self.assertRaises(arvados.errors.ApiError) as exc_check:\n                    client.users().current().execute()\n                response = exc_check.exception.args[0]\n                self.assertEqual(response.status, code)\n                self.assertEqual(response.get('status'), str(code))\n\n    def test_4xx_raised_after_retry_exhaustion(self):\n        client = arvados.api('v1', num_retries=1)\n        for code in self.RETRIED_4XX:\n            with self.subTest(f'failed {code}'), mock.patch('time.sleep'):\n                with mock_api_responses(\n                        client,\n                        b'{}',\n                        [code, code, code, 200],\n                        self.ERROR_HEADERS,\n                        'orig_http_request',\n                ), self.assertRaises(arvados.errors.ApiError) as exc_check:\n                    client.users().current().execute()\n                response = exc_check.exception.args[0]\n                self.assertEqual(response.status, code)\n                self.assertEqual(response.get('status'), str(code))\n\n    def test_api_is_threadsafe(self):\n        api_kwargs = {\n            'host': os.environ['ARVADOS_API_HOST'],\n            'token': os.environ['ARVADOS_API_TOKEN'],\n            'insecure': True,\n        }\n        config_kwargs = {'apiconfig': os.environ}\n        for api_constructor, kwargs in [\n                (arvados.api, {}),\n                (arvados.api, api_kwargs),\n                (arvados.api_from_config, {}),\n                (arvados.api_from_config, config_kwargs),\n        ]:\n            sub_kwargs = \"kwargs\" if kwargs else \"no kwargs\"\n            with self.subTest(f\"{api_constructor.__name__} with {sub_kwargs}\"):\n                api_client = api_constructor('v1', **kwargs)\n                self.assertTrue(hasattr(api_client, 'localapi'),\n                                f\"client missing localapi method\")\n                self.assertTrue(hasattr(api_client, 'keep'),\n                                f\"client missing keep attribute\")\n\n    def test_api_host_constructor(self):\n        cache = True\n        insecure = True\n        client = arvados.api(\n            'v1',\n            cache,\n            os.environ['ARVADOS_API_HOST'],\n            os.environ['ARVADOS_API_TOKEN'],\n            insecure,\n        )\n        self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'],\n                         \"client constructed with incorrect token\")\n\n    def test_api_url_constructor(self):\n        client = arvados.api(\n            'v1',\n            discoveryServiceUrl=self._discoveryServiceUrl(),\n            token=os.environ['ARVADOS_API_TOKEN'],\n            insecure=True,\n        )\n        self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'],\n                         \"client constructed with incorrect token\")\n\n    def test_api_bad_args(self):\n        all_kwargs = {\n            'host': os.environ['ARVADOS_API_HOST'],\n            'token': os.environ['ARVADOS_API_TOKEN'],\n            'discoveryServiceUrl': self._discoveryServiceUrl(),\n        }\n        for use_keys in [\n                # Passing only a single key is missing required info\n                *([key] for key in all_kwargs.keys()),\n                # Passing all keys is a conflict\n                list(all_kwargs.keys()),\n        ]:\n            kwargs = {key: all_kwargs[key] for key in use_keys}\n            kwargs_list = ', '.join(use_keys)\n            with self.subTest(f\"calling arvados.api with {kwargs_list} fails\"), \\\n                 self.assertRaises(ValueError):\n                arvados.api('v1', insecure=True, **kwargs)\n\n    def test_api_bad_url(self):\n        for bad_kwargs in [\n                {'discoveryServiceUrl': self._discoveryServiceUrl() + '/BadTestURL'},\n                {'version': 'BadTestVersion', 'host': os.environ['ARVADOS_API_HOST']},\n        ]:\n            bad_key = next(iter(bad_kwargs))\n            with self.subTest(f\"api fails with bad {bad_key}\"), \\\n                 self.assertRaises(apiclient_errors.UnknownApiNameOrVersion):\n                arvados.api(**bad_kwargs, token='test_api_bad_url', insecure=True)\n\n    def test_normalize_api_good_args(self):\n        for version, discoveryServiceUrl, host in [\n                ('Test1', None, os.environ['ARVADOS_API_HOST']),\n                (None, self._discoveryServiceUrl(), None)\n        ]:\n            argname = 'discoveryServiceUrl' if host is None else 'host'\n            with self.subTest(f\"normalize_api_kwargs with {argname}\"):\n                actual = normalize_api_kwargs(\n                    version,\n                    discoveryServiceUrl,\n                    host,\n                    os.environ['ARVADOS_API_TOKEN'],\n                    insecure=True,\n                )\n                self.assertEqual(actual['discoveryServiceUrl'], self._discoveryServiceUrl())\n                self.assertEqual(actual['token'], os.environ['ARVADOS_API_TOKEN'])\n                self.assertEqual(actual['version'], version or 'v1')\n                self.assertTrue(actual['insecure'])\n                self.assertNotIn('host', actual)\n\n    def test_normalize_api_bad_args(self):\n        all_args = (\n            self._discoveryServiceUrl(),\n            os.environ['ARVADOS_API_HOST'],\n            os.environ['ARVADOS_API_TOKEN'],\n        )\n        for arg_index, arg_value in enumerate(all_args):\n            args = [None] * len(all_args)\n            args[arg_index] = arg_value\n            with self.subTest(f\"normalize_api_kwargs with only arg #{arg_index + 1}\"), \\\n                 self.assertRaises(ValueError):\n                normalize_api_kwargs('v1', *args)\n        with self.subTest(\"normalize_api_kwargs with discoveryServiceUrl and host\"), \\\n             self.assertRaises(ValueError):\n            normalize_api_kwargs('v1', *all_args)\n\n    def test_api_from_config_default(self):\n        client = arvados.api_from_config('v1')\n        self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'],\n                         \"client constructed with incorrect token\")\n\n    def test_api_from_config_explicit(self):\n        config = self._config_from_environ()\n        client = arvados.api_from_config('v1', config)\n        self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'],\n                         \"client constructed with incorrect token\")\n\n    def test_api_from_bad_config(self):\n        base_config = self._config_from_environ()\n        for del_key in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:\n            with self.subTest(f\"api_from_config without {del_key} fails\"), \\\n                 self.assertRaises(ValueError):\n                config = dict(base_config)\n                del config[del_key]\n                arvados.api_from_config('v1', config)\n\n    def test_api_kwargs_from_good_config(self):\n        for config in [None, self._config_from_environ()]:\n            conf_type = 'default' if config is None else 'passed'\n            with self.subTest(f\"api_kwargs_from_config with {conf_type} config\"):\n                version = 'Test1' if config else None\n                actual = api_kwargs_from_config(version, config)\n                self.assertEqual(actual['discoveryServiceUrl'], self._discoveryServiceUrl())\n                self.assertEqual(actual['token'], os.environ['ARVADOS_API_TOKEN'])\n                self.assertEqual(actual['version'], version or 'v1')\n                self.assertTrue(actual['insecure'])\n                self.assertNotIn('host', actual)\n\n    def test_api_kwargs_from_bad_config(self):\n        base_config = self._config_from_environ()\n        for del_key in ['ARVADOS_API_HOST', 'ARVADOS_API_TOKEN']:\n            with self.subTest(f\"api_kwargs_from_config without {del_key} fails\"), \\\n                 self.assertRaises(ValueError):\n                config = dict(base_config)\n                del config[del_key]\n                api_kwargs_from_config('v1', config)\n\n    def test_api_client_constructor(self):\n        client = api_client(\n            'v1',\n            self._discoveryServiceUrl(),\n            os.environ['ARVADOS_API_TOKEN'],\n            insecure=True,\n        )\n        self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'],\n                         \"client constructed with incorrect token\")\n        self.assertFalse(\n            hasattr(client, 'localapi'),\n            \"client has localapi method when it should not be thread-safe\",\n        )\n\n    def test_api_client_bad_url(self):\n        all_args = ('v1', self._discoveryServiceUrl(), 'test_api_client_bad_url')\n        for arg_index, arg_value in [\n                (0, 'BadTestVersion'),\n                (1, all_args[1] + '/BadTestURL'),\n        ]:\n            with self.subTest(f\"api_client fails with {arg_index}={arg_value!r}\"), \\\n                 self.assertRaises(apiclient_errors.UnknownApiNameOrVersion):\n                args = list(all_args)\n                args[arg_index] = arg_value\n                api_client(*args, insecure=True)\n\n    def test_initial_retry_logs(self):\n        try:\n            _googleapiclient_log_lock.release()\n        except RuntimeError:\n            # Lock was never acquired - that's the state we want anyway\n            pass\n        real_logger = logging.getLogger('googleapiclient.http')\n        mock_logger = mock.Mock(wraps=real_logger)\n        mock_logger.handlers = logging.getLogger('googleapiclient').handlers\n        mock_logger.level = logging.NOTSET\n        with mock.patch('logging.getLogger', return_value=mock_logger), \\\n             mock.patch('time.sleep'), \\\n             self.assertLogs(real_logger, 'INFO') as actual_logs:\n            try:\n                api_client('v1', 'https://test.invalid/', 'NoToken', num_retries=1)\n            except httplib2.error.ServerNotFoundError:\n                pass\n        mock_logger.addFilter.assert_called()\n        mock_logger.addHandler.assert_called()\n        mock_logger.setLevel.assert_called()\n        mock_logger.removeHandler.assert_called()\n        mock_logger.removeFilter.assert_called()\n        self.assertRegex(actual_logs.output[0], r'^INFO:googleapiclient\\.http:Sleeping \\d')\n\n    def test_configured_logger_untouched(self):\n        real_logger = logging.getLogger('googleapiclient.http')\n        mock_logger = mock.Mock(wraps=real_logger)\n        mock_logger.handlers = logging.getLogger().handlers\n        with mock.patch('logging.getLogger', return_value=mock_logger), \\\n             mock.patch('time.sleep'):\n            try:\n                api_client('v1', 'https://test.invalid/', 'NoToken', num_retries=1)\n            except httplib2.error.ServerNotFoundError:\n                pass\n        mock_logger.addFilter.assert_not_called()\n        mock_logger.addHandler.assert_not_called()\n        mock_logger.setLevel.assert_not_called()\n        mock_logger.removeHandler.assert_not_called()\n        mock_logger.removeFilter.assert_not_called()\n\n\nclass ConstructNumRetriesTestCase(unittest.TestCase):\n    @staticmethod\n    def _fake_retry_request(http, num_retries, req_type, sleep, rand, uri, method, *args, **kwargs):\n        return http.request(uri, method, *args, **kwargs)\n\n    @contextlib.contextmanager\n    def patch_retry(self):\n        # We have this dedicated context manager that goes through `sys.modules`\n        # instead of just using `mock.patch` because of the unfortunate\n        # `arvados.api` name collision.\n        orig_func = sys.modules['arvados.api']._orig_retry_request\n        expect_name = 'googleapiclient.http._retry_request'\n        self.assertEqual(\n            '{0.__module__}.{0.__name__}'.format(orig_func), expect_name,\n            f\"test setup problem: {expect_name} not at arvados.api._orig_retry_request\",\n        )\n        retry_mock = mock.Mock(wraps=self._fake_retry_request)\n        sys.modules['arvados.api']._orig_retry_request = retry_mock\n        try:\n            yield retry_mock\n        finally:\n            sys.modules['arvados.api']._orig_retry_request = orig_func\n\n    def _iter_num_retries(self, retry_mock):\n        for call in retry_mock.call_args_list:\n            try:\n                yield call.args[1]\n            except IndexError:\n                yield call.kwargs['num_retries']\n\n    def test_default_num_retries(self):\n        with self.patch_retry() as retry_mock:\n            client = arvados.api('v1')\n        actual = set(self._iter_num_retries(retry_mock))\n        self.assertEqual(len(actual), 1)\n        self.assertTrue(actual.pop() > 6, \"num_retries lower than expected\")\n\n    def _test_calls(self, init_arg, call_args, expected):\n        with self.patch_retry() as retry_mock:\n            client = arvados.api('v1', num_retries=init_arg)\n            for num_retries in call_args:\n                client.users().current().execute(num_retries=num_retries)\n        actual = self._iter_num_retries(retry_mock)\n        # The constructor makes two requests with its num_retries argument:\n        # one for the discovery document, and one for the config.\n        self.assertEqual(next(actual, None), init_arg)\n        self.assertEqual(next(actual, None), init_arg)\n        self.assertEqual(list(actual), expected)\n\n    def test_discovery_num_retries(self):\n        for num_retries in [0, 5, 55]:\n            with self.subTest(f\"num_retries={num_retries}\"):\n                self._test_calls(num_retries, [], [])\n\n    def test_num_retries_called_le_init(self):\n        for n in [6, 10]:\n            with self.subTest(f\"init_arg={n}\"):\n                call_args = [n - 4, n - 2, n]\n                expected = [n] * 3\n                self._test_calls(n, call_args, expected)\n\n    def test_num_retries_called_ge_init(self):\n        for n in [0, 10]:\n            with self.subTest(f\"init_arg={n}\"):\n                call_args = [n, n + 4, n + 8]\n                self._test_calls(n, call_args, call_args)\n\n    def test_num_retries_called_mixed(self):\n        self._test_calls(5, [2, 6, 4, 8], [5, 6, 5, 8])\n\n\nclass PreCloseSocketTestCase(unittest.TestCase):\n    def setUp(self):\n        self.api = arvados.api('v1')\n        self.assertTrue(hasattr(self.api._http, 'orig_http_request'),\n                        \"test doesn't know how to intercept HTTP requests\")\n        self.mock_response = {'user': 'person'}\n        self.request_success = (fake_httplib2_response(200),\n                                json.dumps(self.mock_response))\n        self.api._http.orig_http_request = mock.MagicMock()\n        # All requests succeed by default. Tests override as needed.\n        self.api._http.orig_http_request.return_value = self.request_success\n\n    @mock.patch('time.time', side_effect=[i*2**20 for i in range(99)])\n    def test_close_old_connections_non_retryable(self, sleep):\n        self._test_connection_close(expect=1)\n\n    @mock.patch('time.time', side_effect=itertools.count())\n    def test_no_close_fresh_connections_non_retryable(self, sleep):\n        self._test_connection_close(expect=0)\n\n    @mock.patch('time.time', side_effect=itertools.count())\n    def test_override_max_idle_time(self, sleep):\n        self.api._http._max_keepalive_idle = 0\n        self._test_connection_close(expect=1)\n\n    def _test_connection_close(self, expect=0):\n        # Do two POST requests. The second one must close all\n        # connections +expect+ times.\n        self.api.users().create(body={}).execute()\n        mock_conns = {str(i): mock.MagicMock() for i in range(2)}\n        self.api._http.connections = mock_conns.copy()\n        self.api.users().create(body={}).execute()\n        for c in mock_conns.values():\n            self.assertEqual(c.close.call_count, expect)\n\n\nclass ThreadSafeAPIClientTestCase(run_test_server.TestCaseWithServers):\n    MAIN_SERVER = {}\n\n    def test_constructor(self):\n        env_mapping = {\n            key: value\n            for key, value in os.environ.items()\n            if key.startswith('ARVADOS_API_')\n        }\n        extra_params = {\n            'timeout': 299,\n        }\n        base_params = {\n            key[12:].lower(): value\n            for key, value in env_mapping.items()\n        }\n        try:\n            base_params['insecure'] = base_params.pop('host_insecure')\n        except KeyError:\n            pass\n        expected_keep_params = {}\n        for config, params, subtest in [\n                (None, {}, \"default arguments\"),\n                (None, extra_params, \"extra params\"),\n                (env_mapping, {}, \"explicit config\"),\n                (env_mapping, extra_params, \"explicit config and params\"),\n                ({}, base_params, \"params only\"),\n        ]:\n            with self.subTest(f\"test constructor with {subtest}\"):\n                expected_timeout = params.get('timeout', 300)\n                expected_params = dict(params)\n                keep_params = dict(expected_keep_params)\n                client = ThreadSafeAPIClient(config, keep_params, params, 'v1')\n                self.assertTrue(hasattr(client, 'localapi'), \"client missing localapi method\")\n                self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'])\n                self.assertEqual(client._http.timeout, expected_timeout)\n                self.assertEqual(params, expected_params,\n                                 \"api_params was modified in-place\")\n                self.assertEqual(keep_params, expected_keep_params,\n                                 \"keep_params was modified in-place\")\n\n    def test_constructor_no_args(self):\n        client = ThreadSafeAPIClient()\n        self.assertTrue(hasattr(client, 'localapi'), \"client missing localapi method\")\n        self.assertEqual(client.api_token, os.environ['ARVADOS_API_TOKEN'])\n        self.assertTrue(client.insecure)\n\n    def test_constructor_bad_version(self):\n        with self.assertRaises(googleapiclient.errors.UnknownApiNameOrVersion):\n            ThreadSafeAPIClient(version='BadTestVersion')\n\n    def test_pre_v3_0_name(self):\n        from arvados.safeapi import ThreadSafeApiCache\n        self.assertIs(ThreadSafeApiCache, ThreadSafeAPIClient)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "sdk/python/tests/test_arv_copy.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport itertools\nimport os\nimport sys\nimport tempfile\nimport unittest\nimport shutil\nimport arvados.api\nimport arvados.util\nfrom arvados.collection import Collection, CollectionReader\n\nimport pytest\n\nimport arvados.commands.arv_copy as arv_copy\nfrom arvados._internal import basedirs\nfrom . import arvados_testutil as tutil\nfrom . import run_test_server\n\nclass ArvCopyVersionTestCase(run_test_server.TestCaseWithServers, tutil.VersionChecker):\n    MAIN_SERVER = {}\n    KEEP_SERVER = {}\n\n    def run_copy(self, args):\n        sys.argv = ['arv-copy'] + args\n        return arv_copy.main()\n\n    def test_unsupported_arg(self):\n        with self.assertRaises(SystemExit):\n            self.run_copy(['-x=unknown'])\n\n    def test_version_argument(self):\n        with tutil.redirected_streams(\n                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):\n            with self.assertRaises(SystemExit):\n                self.run_copy(['--version'])\n        self.assertVersionOutput(out, err)\n\n    def test_copy_project(self):\n        api = arvados.api()\n        src_proj = api.groups().create(body={\"group\": {\"name\": \"arv-copy project\", \"group_class\": \"project\"}}).execute()[\"uuid\"]\n\n        c = Collection()\n        with c.open('foo', 'wt') as f:\n            f.write('foo')\n        c.save_new(\"arv-copy foo collection\", owner_uuid=src_proj)\n        coll_record = api.collections().get(uuid=c.manifest_locator()).execute()\n        assert coll_record['storage_classes_desired'] == ['default']\n\n        dest_proj = api.groups().create(body={\"group\": {\"name\": \"arv-copy dest project\", \"group_class\": \"project\"}}).execute()[\"uuid\"]\n\n        tmphome = tempfile.mkdtemp()\n        home_was = os.environ['HOME']\n        os.environ['HOME'] = tmphome\n        try:\n            cfgdir = os.path.join(tmphome, \".config\", \"arvados\")\n            os.makedirs(cfgdir)\n            with open(os.path.join(cfgdir, \"zzzzz.conf\"), \"wt\") as f:\n                f.write(\"ARVADOS_API_HOST=%s\\n\" % os.environ[\"ARVADOS_API_HOST\"])\n                f.write(\"ARVADOS_API_TOKEN=%s\\n\" % os.environ[\"ARVADOS_API_TOKEN\"])\n                f.write(\"ARVADOS_API_HOST_INSECURE=1\\n\")\n\n            contents = api.groups().list(filters=[[\"owner_uuid\", \"=\", dest_proj]]).execute()\n            assert len(contents[\"items\"]) == 0\n\n            with tutil.redirected_streams(\n                    stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):\n                try:\n                    self.run_copy([\"--project-uuid\", dest_proj, \"--storage-classes\", \"foo\", src_proj])\n                except SystemExit as e:\n                    assert e.code == 0\n                copy_uuid_from_stdout = out.getvalue().strip()\n\n            contents = api.groups().list(filters=[[\"owner_uuid\", \"=\", dest_proj]]).execute()\n            assert len(contents[\"items\"]) == 1\n\n            assert contents[\"items\"][0][\"name\"] == \"arv-copy project\"\n            copied_project = contents[\"items\"][0][\"uuid\"]\n\n            assert copied_project == copy_uuid_from_stdout\n\n            contents = api.collections().list(filters=[[\"owner_uuid\", \"=\", copied_project]]).execute()\n            assert len(contents[\"items\"]) == 1\n\n            assert contents[\"items\"][0][\"uuid\"] != c.manifest_locator()\n            assert contents[\"items\"][0][\"name\"] == \"arv-copy foo collection\"\n            assert contents[\"items\"][0][\"portable_data_hash\"] == c.portable_data_hash()\n            assert contents[\"items\"][0][\"storage_classes_desired\"] == [\"foo\"]\n\n        finally:\n            os.environ['HOME'] = home_was\n            shutil.rmtree(tmphome)\n\n\nclass TestApiForInstance:\n    _token_counter = itertools.count(1)\n\n    class ApiObject:\n        def __init__(self, **kwargs):\n            self.kwargs = kwargs\n\n        def config(self):\n            return {\"ClusterID\": \"zzzzz\"}\n\n    @staticmethod\n    def api_config(version, **kwargs):\n        assert version == 'v1'\n        return TestApiForInstance.ApiObject(**kwargs)\n\n    @pytest.fixture\n    def patch_api(self, monkeypatch):\n        monkeypatch.setattr(arvados, 'api', self.api_config)\n        monkeypatch.setattr(\n            arvados.api,\n            '_reset_googleapiclient_logging',\n            lambda: None,\n            raising=False,\n        )\n\n    @pytest.fixture\n    def config_file(self, tmp_path):\n        count = next(self._token_counter)\n        path = tmp_path / f'config{count}.conf'\n        with path.open('w') as config_file:\n            print(\n                \"ARVADOS_API_HOST=localhost\",\n                f\"ARVADOS_API_TOKEN={self.expected_token(path)}\",\n                sep=\"\\n\", file=config_file,\n            )\n        return path\n\n    @pytest.fixture\n    def patch_search(self, tmp_path, monkeypatch):\n        def search(self, name):\n            path = tmp_path / name\n            if path.exists():\n                yield path\n        monkeypatch.setattr(basedirs.BaseDirectories, 'search', search)\n\n    def expected_token(self, path):\n        return f\"v2/zzzzz-gj3su-{path.stem:>015s}/{path.stem:>050s}\"\n\n    def test_from_environ(self, patch_api):\n        actual = arv_copy.api_for_instance('', 0)\n        assert actual.kwargs == {\"num_retries\": 0}\n\n    def test_instance_matches_environ(self, patch_api):\n        actual = arv_copy.api_for_instance('zzzzz', 0)\n        assert actual.kwargs == {\"num_retries\": 0}\n\n    def test_relative_path(self, patch_api, config_file, monkeypatch):\n        monkeypatch.chdir(config_file.parent)\n        actual = arv_copy.api_for_instance(f'./{config_file.name}', 0)\n        assert actual.kwargs['host'] == 'localhost'\n        assert actual.kwargs['token'] == self.expected_token(config_file)\n\n    def test_absolute_path(self, patch_api, config_file):\n        actual = arv_copy.api_for_instance(str(config_file), 0)\n        assert actual.kwargs['host'] == 'localhost'\n        assert actual.kwargs['token'] == self.expected_token(config_file)\n\n    def test_search_path(self, patch_api, patch_search, config_file):\n        actual = arv_copy.api_for_instance(config_file.stem, 0)\n        assert actual.kwargs['host'] == 'localhost'\n        assert actual.kwargs['token'] == self.expected_token(config_file)\n\n    def test_search_failed(self, patch_api, patch_search):\n        with pytest.raises(SystemExit) as exc_info:\n            arv_copy.api_for_instance('NotFound', 0)\n        assert exc_info.value.code > 0\n\n    def test_path_unreadable(self, patch_api, tmp_path):\n        with pytest.raises(SystemExit) as exc_info:\n            arv_copy.api_for_instance(str(tmp_path / 'nonexistent.conf'), 0)\n        assert exc_info.value.code > 0\n"
  },
  {
    "path": "sdk/python/tests/test_arv_get.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport io\nimport logging\nimport os\nimport re\nimport shutil\nimport tempfile\n\nfrom unittest import mock\n\nimport arvados\nimport arvados.collection as collection\nimport arvados.commands.get as arv_get\nfrom . import run_test_server\n\nfrom . import arvados_testutil as tutil\nfrom .arvados_testutil import ArvadosBaseTestCase\n\nclass ArvadosGetTestCase(run_test_server.TestCaseWithServers,\n                         tutil.VersionChecker,\n                         ArvadosBaseTestCase):\n    MAIN_SERVER = {}\n    KEEP_SERVER = {}\n\n    def setUp(self):\n        super(ArvadosGetTestCase, self).setUp()\n        self.tempdir = tempfile.mkdtemp()\n        self.col_loc, self.col_pdh, self.col_manifest = self.write_test_collection()\n\n        self.stdout = tutil.BytesIO()\n        self.stderr = tutil.StringIO()\n        self.loggingHandler = logging.StreamHandler(self.stderr)\n        self.loggingHandler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n        logging.getLogger().addHandler(self.loggingHandler)\n\n    def tearDown(self):\n        logging.getLogger().removeHandler(self.loggingHandler)\n        super(ArvadosGetTestCase, self).tearDown()\n        shutil.rmtree(self.tempdir)\n\n    def write_test_collection(self,\n                              strip_manifest=False,\n                              contents = {\n                                  'foo.txt' : 'foo',\n                                  'bar.txt' : 'bar',\n                                  'subdir/baz.txt' : 'baz',\n                              }):\n        api = arvados.api()\n        c = collection.Collection(api_client=api)\n        for path, data in contents.items():\n            with c.open(path, 'wb') as f:\n                f.write(data)\n        c.save_new()\n\n        api.close_connections()\n\n        return (c.manifest_locator(),\n                c.portable_data_hash(),\n                c.manifest_text(strip=strip_manifest))\n\n    def run_get(self, args):\n        self.stdout.seek(0, 0)\n        self.stdout.truncate(0)\n        self.stderr.seek(0, 0)\n        self.stderr.truncate(0)\n        return arv_get.main(args, self.stdout, self.stderr)\n\n    def test_version_argument(self):\n        with tutil.redirected_streams(\n                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):\n            with self.assertRaises(SystemExit):\n                self.run_get(['--version'])\n        self.assertVersionOutput(out, err)\n\n    def test_get_single_file(self):\n        # Get the file using the collection's locator\n        r = self.run_get([\"{}/subdir/baz.txt\".format(self.col_loc), '-'])\n        self.assertEqual(0, r)\n        self.assertEqual(b'baz', self.stdout.getvalue())\n        # Then, try by PDH\n        r = self.run_get([\"{}/subdir/baz.txt\".format(self.col_pdh), '-'])\n        self.assertEqual(0, r)\n        self.assertEqual(b'baz', self.stdout.getvalue())\n\n    def test_get_block(self):\n        # Get raw data using a block locator\n        blk = re.search(r' (acbd18\\S+\\+A\\S+) ', self.col_manifest).group(1)\n        r = self.run_get([blk, '-'])\n        self.assertEqual(0, r)\n        self.assertEqual(b'foo', self.stdout.getvalue())\n\n    def test_get_multiple_files(self):\n        # Download the entire collection to the temp directory\n        r = self.run_get([\"{}/\".format(self.col_loc), self.tempdir])\n        self.assertEqual(0, r)\n        with open(os.path.join(self.tempdir, \"foo.txt\"), \"r\") as f:\n            self.assertEqual(\"foo\", f.read())\n        with open(os.path.join(self.tempdir, \"bar.txt\"), \"r\") as f:\n            self.assertEqual(\"bar\", f.read())\n        with open(os.path.join(self.tempdir, \"subdir\", \"baz.txt\"), \"r\") as f:\n            self.assertEqual(\"baz\", f.read())\n\n    def test_get_collection_unstripped_manifest(self):\n        dummy_token = \"+Axxxxxxx\"\n        # Get the collection manifest by UUID\n        r = self.run_get([self.col_loc, self.tempdir])\n        self.assertEqual(0, r)\n        m_from_collection = re.sub(r\"\\+A[0-9a-f@]+\", dummy_token, self.col_manifest)\n        with open(os.path.join(self.tempdir, self.col_loc), \"r\") as f:\n            # Replace manifest tokens before comparison to avoid races\n            m_from_file = re.sub(r\"\\+A[0-9a-f@]+\", dummy_token, f.read())\n            self.assertEqual(m_from_collection, m_from_file)\n        # Get the collection manifest by PDH\n        r = self.run_get([self.col_pdh, self.tempdir])\n        self.assertEqual(0, r)\n        with open(os.path.join(self.tempdir, self.col_pdh), \"r\") as f:\n            # Replace manifest tokens before comparison to avoid races\n            m_from_file = re.sub(r\"\\+A[0-9a-f@]+\", dummy_token, f.read())\n            self.assertEqual(m_from_collection, m_from_file)\n\n    def test_get_collection_stripped_manifest(self):\n        col_loc, col_pdh, col_manifest = self.write_test_collection(\n            strip_manifest=True)\n        # Get the collection manifest by UUID\n        r = self.run_get(['--strip-manifest', col_loc, self.tempdir])\n        self.assertEqual(0, r)\n        with open(os.path.join(self.tempdir, col_loc), \"r\") as f:\n            self.assertEqual(col_manifest, f.read())\n        # Get the collection manifest by PDH\n        r = self.run_get(['--strip-manifest', col_pdh, self.tempdir])\n        self.assertEqual(0, r)\n        with open(os.path.join(self.tempdir, col_pdh), \"r\") as f:\n            self.assertEqual(col_manifest, f.read())\n\n    def test_invalid_collection(self):\n        # Asking for an invalid collection should generate an error.\n        r = self.run_get(['this-uuid-seems-to-be-fake', self.tempdir])\n        self.assertNotEqual(0, r)\n\n    def test_invalid_file_request(self):\n        # Asking for an inexistant file within a collection should generate an error.\n        r = self.run_get([\"{}/im-not-here.txt\".format(self.col_loc), self.tempdir])\n        self.assertNotEqual(0, r)\n\n    def test_invalid_destination(self):\n        # Asking to place the collection's files on a non existant directory\n        # should generate an error.\n        r = self.run_get([self.col_loc, \"/fake/subdir/\"])\n        self.assertNotEqual(0, r)\n\n    def test_preexistent_destination(self):\n        # Asking to place a file with the same path as a local one should\n        # generate an error and avoid overwrites.\n        with open(os.path.join(self.tempdir, \"foo.txt\"), \"w\") as f:\n            f.write(\"another foo\")\n        r = self.run_get([\"{}/foo.txt\".format(self.col_loc), self.tempdir])\n        self.assertNotEqual(0, r)\n        with open(os.path.join(self.tempdir, \"foo.txt\"), \"r\") as f:\n            self.assertEqual(\"another foo\", f.read())\n\n    def test_no_progress_when_stderr_not_a_tty(self):\n        # Create a collection with a big file (>64MB) to force the progress\n        # to be printed\n        c = collection.Collection()\n        with c.open('bigfile.txt', 'wb') as f:\n            for _ in range(65):\n                f.write(\"x\" * 1024 * 1024)\n        c.save_new()\n        tmpdir = self.make_tmpdir()\n        # Simulate a TTY stderr\n        stderr = mock.MagicMock()\n        stdout = tutil.BytesIO()\n\n        # Confirm that progress is written to stderr when is a tty\n        stderr.isatty.return_value = True\n        r = arv_get.main(['{}/bigfile.txt'.format(c.manifest_locator()),\n                          '{}/bigfile.txt'.format(tmpdir)],\n                         stdout, stderr)\n        self.assertEqual(0, r)\n        self.assertEqual(b'', stdout.getvalue())\n        self.assertTrue(stderr.write.called)\n\n        # Clean up and reset stderr mock\n        os.remove('{}/bigfile.txt'.format(tmpdir))\n        stderr = mock.MagicMock()\n        stdout = tutil.BytesIO()\n\n        # Confirm that progress is not written to stderr when isn't a tty\n        stderr.isatty.return_value = False\n        r = arv_get.main(['{}/bigfile.txt'.format(c.manifest_locator()),\n                          '{}/bigfile.txt'.format(tmpdir)],\n                         stdout, stderr)\n        self.assertEqual(0, r)\n        self.assertEqual(b'', stdout.getvalue())\n        self.assertFalse(stderr.write.called)\n\n    request_id_regex = r'INFO: X-Request-Id: req-[a-z0-9]{20}\\n'\n\n    def test_request_id_logging_on(self):\n        r = self.run_get([\"-v\", \"{}/\".format(self.col_loc), self.tempdir])\n        self.assertEqual(0, r)\n        self.assertRegex(self.stderr.getvalue(), self.request_id_regex)\n\n    def test_request_id_logging_off(self):\n        r = self.run_get([\"{}/\".format(self.col_loc), self.tempdir])\n        self.assertEqual(0, r)\n        self.assertNotRegex(self.stderr.getvalue(), self.request_id_regex)\n"
  },
  {
    "path": "sdk/python/tests/test_arv_keepdocker.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados\nimport collections\nimport collections.abc\nimport copy\nimport hashlib\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\nimport unittest\n\nfrom pathlib import Path\nfrom unittest import mock\n\nimport parameterized\nimport pytest\n\nimport arvados.commands.keepdocker as arv_keepdocker\nfrom . import arvados_testutil as tutil\n\nclass StopTest(Exception):\n    pass\n\n\nclass ArvKeepdockerTestCase(unittest.TestCase, tutil.VersionChecker):\n    def run_arv_keepdocker(self, args, err, **kwargs):\n        sys.argv = ['arv-keepdocker'] + args\n        log_handler = logging.StreamHandler(err)\n        arv_keepdocker.logger.addHandler(log_handler)\n        try:\n            return arv_keepdocker.main(**kwargs)\n        finally:\n            arv_keepdocker.logger.removeHandler(log_handler)\n\n    def test_unsupported_arg(self):\n        out = tutil.StringIO()\n        with tutil.redirected_streams(stdout=out, stderr=out), \\\n             self.assertRaises(SystemExit):\n            self.run_arv_keepdocker(['-x=unknown'], sys.stderr)\n        self.assertRegex(out.getvalue(), r'unrecognized arguments')\n\n    def test_version_argument(self):\n        with tutil.redirected_streams(\n                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):\n            with self.assertRaises(SystemExit):\n                self.run_arv_keepdocker(['--version'], sys.stderr)\n        self.assertVersionOutput(out, err)\n\n    @mock.patch('arvados.commands.keepdocker.list_images_in_arv',\n                return_value=[])\n    @mock.patch('arvados.commands.keepdocker.find_image_hashes',\n                return_value=['abc123'])\n    @mock.patch('arvados.commands.keepdocker.find_one_image_hash',\n                return_value='abc123')\n    def test_image_format_compatibility(self, _1, _2, _3):\n        old_id = hashlib.sha256(b'old').hexdigest()\n        new_id = 'sha256:'+hashlib.sha256(b'new').hexdigest()\n        for supported, img_id, expect_ok in [\n                (['v1'], old_id, True),\n                (['v1'], new_id, False),\n                (None, old_id, False),\n                ([], old_id, False),\n                ([], new_id, False),\n                (['v1', 'v2'], new_id, True),\n                (['v1'], new_id, False),\n                (['v2'], new_id, True)]:\n\n            fakeDD = arvados.api('v1')._rootDesc\n            if supported is None:\n                del fakeDD['dockerImageFormats']\n            else:\n                fakeDD['dockerImageFormats'] = supported\n\n            err = tutil.StringIO()\n            out = tutil.StringIO()\n\n            with tutil.redirected_streams(stdout=out), \\\n                 mock.patch('arvados.api') as api, \\\n                 mock.patch('arvados.commands.keepdocker.popen_docker',\n                            return_value=subprocess.Popen(\n                                ['echo', img_id],\n                                stdout=subprocess.PIPE)), \\\n                 mock.patch('arvados.commands.keepdocker.prep_image_file',\n                            side_effect=StopTest), \\\n                 self.assertRaises(StopTest if expect_ok else SystemExit):\n\n                api()._rootDesc = fakeDD\n                self.run_arv_keepdocker(['--force', 'testimage'], err)\n\n            self.assertEqual(out.getvalue(), '')\n            if expect_ok:\n                self.assertNotRegex(\n                    err.getvalue(), r\"refusing to store\",\n                    msg=repr((supported, img_id)))\n            else:\n                self.assertRegex(\n                    err.getvalue(), r\"refusing to store\",\n                    msg=repr((supported, img_id)))\n            if not supported:\n                self.assertRegex(\n                    err.getvalue(),\n                    r\"server does not specify supported image formats\",\n                    msg=repr((supported, img_id)))\n\n        fakeDD = arvados.api('v1')._rootDesc\n        fakeDD['dockerImageFormats'] = ['v1']\n        err = tutil.StringIO()\n        out = tutil.StringIO()\n        with tutil.redirected_streams(stdout=out), \\\n             mock.patch('arvados.api') as api, \\\n             mock.patch('arvados.commands.keepdocker.popen_docker',\n                        return_value=subprocess.Popen(\n                            ['echo', new_id],\n                            stdout=subprocess.PIPE)), \\\n             mock.patch('arvados.commands.keepdocker.prep_image_file',\n                        side_effect=StopTest), \\\n             self.assertRaises(StopTest):\n            api()._rootDesc = fakeDD\n            self.run_arv_keepdocker(\n                ['--force', '--force-image-format', 'testimage'], err)\n        self.assertRegex(err.getvalue(), r\"forcing incompatible image\")\n\n    def test_tag_given_twice(self):\n        with tutil.redirected_streams(stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):\n            with self.assertRaises(SystemExit):\n                self.run_arv_keepdocker(['myrepo:mytag', 'extratag'], sys.stderr)\n            self.assertRegex(err.getvalue(), r\"cannot add tag argument 'extratag'\")\n\n    def test_image_given_as_repo_colon_tag(self):\n        with self.assertRaises(StopTest), \\\n             mock.patch('arvados.commands.keepdocker.find_one_image_hash',\n                        side_effect=StopTest) as find_image_mock:\n            self.run_arv_keepdocker(['repo:tag'], sys.stderr)\n        find_image_mock.assert_called_with('repo', 'tag')\n\n    def test_image_given_as_registry_repo_colon_tag(self):\n        with self.assertRaises(StopTest), \\\n             mock.patch('arvados.commands.keepdocker.find_one_image_hash',\n                        side_effect=StopTest) as find_image_mock:\n            self.run_arv_keepdocker(['myreg.example:8888/repo/img:tag'], sys.stderr)\n        find_image_mock.assert_called_with('myreg.example:8888/repo/img', 'tag')\n\n        with self.assertRaises(StopTest), \\\n             mock.patch('arvados.commands.keepdocker.find_one_image_hash',\n                        side_effect=StopTest) as find_image_mock:\n            self.run_arv_keepdocker(['registry.hub.docker.com:443/library/debian:bullseye-slim'], sys.stderr)\n        find_image_mock.assert_called_with('registry.hub.docker.com/library/debian', 'bullseye-slim')\n\n    def test_image_has_colons(self):\n        with self.assertRaises(StopTest), \\\n             mock.patch('arvados.commands.keepdocker.find_one_image_hash',\n                        side_effect=StopTest) as find_image_mock:\n            self.run_arv_keepdocker(['[::1]:8888/repo/img'], sys.stderr)\n        find_image_mock.assert_called_with('[::1]:8888/repo/img', 'latest')\n\n        with self.assertRaises(StopTest), \\\n             mock.patch('arvados.commands.keepdocker.find_one_image_hash',\n                        side_effect=StopTest) as find_image_mock:\n            self.run_arv_keepdocker(['[::1]/repo/img'], sys.stderr)\n        find_image_mock.assert_called_with('[::1]/repo/img', 'latest')\n\n        with self.assertRaises(StopTest), \\\n             mock.patch('arvados.commands.keepdocker.find_one_image_hash',\n                        side_effect=StopTest) as find_image_mock:\n            self.run_arv_keepdocker(['[::1]:8888/repo/img:tag'], sys.stderr)\n        find_image_mock.assert_called_with('[::1]:8888/repo/img', 'tag')\n\n    def test_list_images_with_host_and_port(self):\n        api = arvados.api('v1')\n        taglink = api.links().create(body={'link': {\n            'link_class': 'docker_image_repo+tag',\n            'name': 'registry.example:1234/repo:latest',\n            'head_uuid': 'zzzzz-4zz18-1v45jub259sjjgb',\n        }}).execute()\n        try:\n            out = tutil.StringIO()\n            with self.assertRaises(SystemExit):\n                self.run_arv_keepdocker([], sys.stderr, stdout=out)\n            self.assertRegex(out.getvalue(), r'\\nregistry.example:1234/repo +latest ')\n        finally:\n            api.links().delete(uuid=taglink['uuid']).execute()\n\n    @mock.patch('arvados.commands.keepdocker.list_images_in_arv',\n                return_value=[])\n    @mock.patch('arvados.commands.keepdocker.find_image_hashes',\n                return_value=['abc123'])\n    @mock.patch('arvados.commands.keepdocker.find_one_image_hash',\n                return_value='abc123')\n    def test_collection_property_update(self, _1, _2, _3):\n        image_id = 'sha256:'+hashlib.sha256(b'image').hexdigest()\n        fakeDD = arvados.api('v1')._rootDesc\n        fakeDD['dockerImageFormats'] = ['v2']\n\n        err = tutil.StringIO()\n        out = tutil.StringIO()\n        File = collections.namedtuple('File', ['name'])\n        mocked_file = File(name='docker_image')\n        mocked_collection = {\n            'uuid': 'new-collection-uuid',\n            'properties': {\n                'responsible_person_uuid': 'person_uuid',\n            }\n        }\n\n        with tutil.redirected_streams(stdout=out), \\\n             mock.patch('arvados.api') as api, \\\n             mock.patch('arvados.commands.keepdocker.popen_docker',\n                        return_value=subprocess.Popen(\n                            ['echo', image_id],\n                            stdout=subprocess.PIPE)), \\\n             mock.patch('arvados.commands.keepdocker.prep_image_file',\n                        return_value=(mocked_file, False)), \\\n             mock.patch('arvados.commands.put.main',\n                        return_value='new-collection-uuid'), \\\n             self.assertRaises(StopTest):\n\n            api()._rootDesc = fakeDD\n            api().collections().get().execute.return_value = copy.deepcopy(mocked_collection)\n            api().collections().update().execute.side_effect = StopTest\n            self.run_arv_keepdocker(['--force', 'testimage'], err)\n\n        updated_properties = mocked_collection['properties']\n        updated_properties.update({'docker-image-repo-tag': 'testimage:latest'})\n        api().collections().update.assert_called_with(\n            uuid=mocked_collection['uuid'],\n            body={'properties': updated_properties})\n\n\n@parameterized.parameterized_class(('filename',), [\n    ('hello-world-ManifestV2.tar',),\n    ('hello-world-ManifestV2-OCILayout.tar',),\n])\nclass ImageMetadataTestCase(unittest.TestCase):\n    DATA_PATH = Path(__file__).parent / 'data'\n\n    @classmethod\n    def setUpClass(cls):\n        cls.image_file = (cls.DATA_PATH / cls.filename).open('rb')\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.image_file.close()\n\n    def setUp(self):\n        self.manifest, self.config = arv_keepdocker.load_image_metadata(self.image_file)\n\n    def test_image_manifest(self):\n        self.assertIsInstance(self.manifest, collections.abc.Mapping)\n        self.assertEqual(self.manifest.get('RepoTags'), ['hello-world:latest'])\n\n    def test_image_config(self):\n        self.assertIsInstance(self.config, collections.abc.Mapping)\n        self.assertEqual(self.config.get('created'), '2023-05-02T16:49:27Z')\n\n\ndef test_get_cache_dir(tmp_path):\n    actual = arv_keepdocker.get_cache_dir(lambda: tmp_path)\n    assert isinstance(actual, str)\n    actual = Path(actual)\n    assert actual.is_dir()\n    assert actual.name == 'docker'\n    assert actual.parent == tmp_path\n"
  },
  {
    "path": "sdk/python/tests/test_arv_ls.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport random\nimport sys\nimport tempfile\n\nfrom unittest import mock\n\nimport arvados.errors as arv_error\nimport arvados.commands.ls as arv_ls\nfrom . import run_test_server\n\nfrom . import arvados_testutil as tutil\nfrom .arvados_testutil import str_keep_locator, redirected_streams, StringIO\n\nclass ArvLsTestCase(run_test_server.TestCaseWithServers, tutil.VersionChecker):\n    FAKE_UUID = 'zzzzz-4zz18-12345abcde12345'\n\n    def newline_join(self, seq):\n        return '\\n'.join(seq) + '\\n'\n\n    def random_blocks(self, *sizes):\n        return ' '.join('{:032x}+{:d}'.format(\n                  random.randint(0, (16 ** 32) - 1), size\n                ) for size in sizes)\n\n    def mock_api_for_manifest(self, manifest_lines, uuid=FAKE_UUID):\n        manifest_text = self.newline_join(manifest_lines)\n        pdh = str_keep_locator(manifest_text)\n        coll_info = {'uuid': uuid,\n                     'portable_data_hash': pdh,\n                     'manifest_text': manifest_text}\n        api_client = mock.MagicMock(name='mock_api_client')\n        api_client.collections().get().execute.return_value = coll_info\n        return coll_info, api_client\n\n    def run_ls(self, args, api_client, logger=None):\n        self.stdout = StringIO()\n        self.stderr = StringIO()\n        return arv_ls.main(args, self.stdout, self.stderr, api_client, logger)\n\n    def test_plain_listing(self):\n        collection, api_client = self.mock_api_for_manifest(\n            ['. {} 0:3:one.txt 3:4:two.txt'.format(self.random_blocks(5, 2)),\n             './dir {} 1:5:sub.txt'.format(self.random_blocks(8))])\n        self.assertEqual(0, self.run_ls([collection['uuid']], api_client))\n        self.assertEqual(\n            self.newline_join(['./one.txt', './two.txt', './dir/sub.txt']),\n            self.stdout.getvalue())\n        self.assertEqual('', self.stderr.getvalue())\n\n    def test_size_listing(self):\n        collection, api_client = self.mock_api_for_manifest(\n            ['. {} 0:0:0.txt 0:1000:1.txt 1000:2000:2.txt'.format(\n                    self.random_blocks(3000))])\n        self.assertEqual(0, self.run_ls(['-s', collection['uuid']], api_client))\n        self.stdout.seek(0, 0)\n        for expected in range(3):\n            actual_size, actual_name = self.stdout.readline().split()\n            # But she seems much bigger to me...\n            self.assertEqual(str(expected), actual_size)\n            self.assertEqual('./{}.txt'.format(expected), actual_name)\n        self.assertEqual('', self.stdout.read(-1))\n        self.assertEqual('', self.stderr.getvalue())\n\n    def test_nonnormalized_manifest(self):\n        collection, api_client = self.mock_api_for_manifest(\n            ['. {} 0:1010:non.txt'.format(self.random_blocks(1010)),\n             '. {} 0:2020:non.txt'.format(self.random_blocks(2020))])\n        self.assertEqual(0, self.run_ls(['-s', collection['uuid']], api_client))\n        self.stdout.seek(0, 0)\n        self.assertEqual(['3', './non.txt'], self.stdout.readline().split())\n        self.assertEqual('', self.stdout.read(-1))\n        self.assertEqual('', self.stderr.getvalue())\n\n    def test_locator_failure(self):\n        api_client = mock.MagicMock(name='mock_api_client')\n        error_mock = mock.MagicMock()\n        logger = mock.MagicMock()\n        logger.error = error_mock\n        api_client.collections().get().execute.side_effect = (\n            arv_error.NotFoundError)\n        self.assertNotEqual(0, self.run_ls([self.FAKE_UUID], api_client, logger))\n        self.assertEqual(1, error_mock.call_count)\n\n    def test_version_argument(self):\n        import warnings\n        warnings.simplefilter(\"ignore\")\n        with redirected_streams(stdout=StringIO, stderr=StringIO) as (out, err):\n            with self.assertRaises(SystemExit):\n                self.run_ls(['--version'], None)\n        self.assertVersionOutput(out, err)\n"
  },
  {
    "path": "sdk/python/tests/test_arv_normalize.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport subprocess\nimport sys\nimport tempfile\nimport unittest\n\nfrom . import arvados_testutil as tutil\n\n\nclass ArvNormalizeTestCase(unittest.TestCase, tutil.VersionChecker):\n    def run_arv_normalize(self, args=[]):\n        p = subprocess.Popen([sys.executable, 'bin/arv-normalize'] + args,\n                             stdout=subprocess.PIPE,\n                             stderr=subprocess.PIPE)\n        out, err = p.communicate()\n        sys.stdout.write(out.decode())\n        sys.stderr.write(err.decode())\n        return p.returncode\n\n    def test_unsupported_arg(self):\n        with tutil.redirected_streams(\n                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):\n            returncode = self.run_arv_normalize(['-x=unknown'])\n        self.assertNotEqual(0, returncode)\n\n    def test_version_argument(self):\n        with tutil.redirected_streams(\n                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):\n            returncode = self.run_arv_normalize(['--version'])\n        self.assertVersionOutput(out, err)\n        self.assertEqual(0, returncode)\n"
  },
  {
    "path": "sdk/python/tests/test_arv_put.py",
    "content": "# -*- coding: utf-8 -*-\n\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport apiclient\nimport ciso8601\nimport copy\nimport datetime\nimport json\nimport logging\nimport multiprocessing\nimport os\nimport pwd\nimport random\nimport re\nimport select\nimport shutil\nimport signal\nimport subprocess\nimport sys\nimport tempfile\nimport time\nimport unittest\nimport uuid\n\nimport pytest\nfrom functools import partial\nfrom pathlib import Path\nfrom unittest import mock\n\nimport arvados\nimport arvados.commands.put as arv_put\nimport arvados.util\nfrom arvados._internal import basedirs\n\nfrom . import arvados_testutil as tutil\nfrom .arvados_testutil import ArvadosBaseTestCase, fake_httplib2_response\nfrom . import run_test_server\n\nclass ArvadosPutResumeCacheTest(ArvadosBaseTestCase):\n    CACHE_ARGSET = [\n        [],\n        ['/dev/null'],\n        ['/dev/null', '--filename', 'empty'],\n        ['/tmp']\n        ]\n\n    def tearDown(self):\n        super(ArvadosPutResumeCacheTest, self).tearDown()\n        try:\n            self.last_cache.destroy()\n        except AttributeError:\n            pass\n\n    def cache_path_from_arglist(self, arglist):\n        return arv_put.ResumeCache.make_path(arv_put.parse_arguments(arglist))\n\n    def test_cache_names_stable(self):\n        for argset in self.CACHE_ARGSET:\n            self.assertEqual(self.cache_path_from_arglist(argset),\n                              self.cache_path_from_arglist(argset),\n                              \"cache name changed for {}\".format(argset))\n\n    def test_cache_names_unique(self):\n        results = []\n        for argset in self.CACHE_ARGSET:\n            path = self.cache_path_from_arglist(argset)\n            self.assertNotIn(path, results)\n            results.append(path)\n\n    def test_cache_names_simple(self):\n        # The goal here is to make sure the filename doesn't use characters\n        # reserved by the filesystem.  Feel free to adjust this regexp as\n        # long as it still does that.\n        bad_chars = re.compile(r'[^-\\.\\w]')\n        for argset in self.CACHE_ARGSET:\n            path = self.cache_path_from_arglist(argset)\n            self.assertFalse(bad_chars.search(os.path.basename(path)),\n                             \"path too exotic: {}\".format(path))\n\n    def test_cache_names_ignore_argument_order(self):\n        self.assertEqual(\n            self.cache_path_from_arglist(['a', 'b', 'c']),\n            self.cache_path_from_arglist(['c', 'a', 'b']))\n        self.assertEqual(\n            self.cache_path_from_arglist(['-', '--filename', 'stdin']),\n            self.cache_path_from_arglist(['--filename', 'stdin', '-']))\n\n    def test_cache_names_differ_for_similar_paths(self):\n        # This test needs names at / that don't exist on the real filesystem.\n        self.assertNotEqual(\n            self.cache_path_from_arglist(['/_arvputtest1', '/_arvputtest2']),\n            self.cache_path_from_arglist(['/_arvputtest1/_arvputtest2']))\n\n    def test_cache_names_ignore_irrelevant_arguments(self):\n        # Workaround: parse_arguments bails on --filename with a directory.\n        path1 = self.cache_path_from_arglist(['/tmp'])\n        args = arv_put.parse_arguments(['/tmp'])\n        args.filename = 'tmp'\n        path2 = arv_put.ResumeCache.make_path(args)\n        self.assertEqual(path1, path2,\n                         \"cache path considered --filename for directory\")\n        self.assertEqual(\n            self.cache_path_from_arglist(['-']),\n            self.cache_path_from_arglist(['-', '--max-manifest-depth', '1']),\n            \"cache path considered --max-manifest-depth for file\")\n\n    def test_cache_names_treat_negative_manifest_depths_identically(self):\n        base_args = ['/tmp', '--max-manifest-depth']\n        self.assertEqual(\n            self.cache_path_from_arglist(base_args + ['-1']),\n            self.cache_path_from_arglist(base_args + ['-2']))\n\n    def test_cache_names_treat_stdin_consistently(self):\n        self.assertEqual(\n            self.cache_path_from_arglist(['-', '--filename', 'test']),\n            self.cache_path_from_arglist(['/dev/stdin', '--filename', 'test']))\n\n    def test_cache_names_identical_for_synonymous_names(self):\n        self.assertEqual(\n            self.cache_path_from_arglist(['.']),\n            self.cache_path_from_arglist([os.path.realpath('.')]))\n        testdir = self.make_tmpdir()\n        looplink = os.path.join(testdir, 'loop')\n        os.symlink(testdir, looplink)\n        self.assertEqual(\n            self.cache_path_from_arglist([testdir]),\n            self.cache_path_from_arglist([looplink]))\n\n    def test_cache_names_different_by_api_host(self):\n        config = arvados.config.settings()\n        orig_host = config.get('ARVADOS_API_HOST')\n        try:\n            name1 = self.cache_path_from_arglist(['.'])\n            config['ARVADOS_API_HOST'] = 'x' + (orig_host or 'localhost')\n            self.assertNotEqual(name1, self.cache_path_from_arglist(['.']))\n        finally:\n            if orig_host is None:\n                del config['ARVADOS_API_HOST']\n            else:\n                config['ARVADOS_API_HOST'] = orig_host\n\n    @mock.patch('arvados.keep.KeepClient.head')\n    def test_resume_cache_with_current_stream_locators(self, keep_client_head):\n        keep_client_head.side_effect = [True]\n        thing = {}\n        thing['_current_stream_locators'] = ['098f6bcd4621d373cade4e832627b4f6+4', '1f253c60a2306e0ee12fb6ce0c587904+6']\n        with tempfile.NamedTemporaryFile() as cachefile:\n            self.last_cache = arv_put.ResumeCache(cachefile.name)\n        self.last_cache.save(thing)\n        self.last_cache.close()\n        resume_cache = arv_put.ResumeCache(self.last_cache.filename)\n        self.assertNotEqual(None, resume_cache)\n\n    @mock.patch('arvados.keep.KeepClient.head')\n    def test_resume_cache_with_finished_streams(self, keep_client_head):\n        keep_client_head.side_effect = [True]\n        thing = {}\n        thing['_finished_streams'] = [['.', ['098f6bcd4621d373cade4e832627b4f6+4', '1f253c60a2306e0ee12fb6ce0c587904+6']]]\n        with tempfile.NamedTemporaryFile() as cachefile:\n            self.last_cache = arv_put.ResumeCache(cachefile.name)\n        self.last_cache.save(thing)\n        self.last_cache.close()\n        resume_cache = arv_put.ResumeCache(self.last_cache.filename)\n        self.assertNotEqual(None, resume_cache)\n\n    @mock.patch('arvados.keep.KeepClient.head')\n    def test_resume_cache_with_finished_streams_error_on_head(self, keep_client_head):\n        keep_client_head.side_effect = Exception('Locator not found')\n        thing = {}\n        thing['_finished_streams'] = [['.', ['098f6bcd4621d373cade4e832627b4f6+4', '1f253c60a2306e0ee12fb6ce0c587904+6']]]\n        with tempfile.NamedTemporaryFile() as cachefile:\n            self.last_cache = arv_put.ResumeCache(cachefile.name)\n        self.last_cache.save(thing)\n        self.last_cache.close()\n        resume_cache = arv_put.ResumeCache(self.last_cache.filename)\n        self.assertNotEqual(None, resume_cache)\n        resume_cache.check_cache()\n\n    def test_basic_cache_storage(self):\n        thing = ['test', 'list']\n        with tempfile.NamedTemporaryFile() as cachefile:\n            self.last_cache = arv_put.ResumeCache(cachefile.name)\n        self.last_cache.save(thing)\n        self.assertEqual(thing, self.last_cache.load())\n\n    def test_empty_cache(self):\n        with tempfile.NamedTemporaryFile() as cachefile:\n            cache = arv_put.ResumeCache(cachefile.name)\n        self.assertRaises(ValueError, cache.load)\n\n    def test_cache_persistent(self):\n        thing = ['test', 'list']\n        path = os.path.join(self.make_tmpdir(), 'cache')\n        cache = arv_put.ResumeCache(path)\n        cache.save(thing)\n        cache.close()\n        self.last_cache = arv_put.ResumeCache(path)\n        self.assertEqual(thing, self.last_cache.load())\n\n    def test_multiple_cache_writes(self):\n        thing = ['short', 'list']\n        with tempfile.NamedTemporaryFile() as cachefile:\n            self.last_cache = arv_put.ResumeCache(cachefile.name)\n        # Start writing an object longer than the one we test, to make\n        # sure the cache file gets truncated.\n        self.last_cache.save(['long', 'long', 'list'])\n        self.last_cache.save(thing)\n        self.assertEqual(thing, self.last_cache.load())\n\n    def test_cache_is_locked(self):\n        with tempfile.NamedTemporaryFile() as cachefile:\n            _ = arv_put.ResumeCache(cachefile.name)\n            self.assertRaises(arv_put.ResumeCacheConflict,\n                              arv_put.ResumeCache, cachefile.name)\n\n    def test_cache_stays_locked(self):\n        with tempfile.NamedTemporaryFile() as cachefile:\n            self.last_cache = arv_put.ResumeCache(cachefile.name)\n            path = cachefile.name\n        self.last_cache.save('test')\n        self.assertRaises(arv_put.ResumeCacheConflict,\n                          arv_put.ResumeCache, path)\n\n    def test_destroy_cache(self):\n        cachefile = tempfile.NamedTemporaryFile(delete=False)\n        try:\n            cache = arv_put.ResumeCache(cachefile.name)\n            cache.save('test')\n            cache.destroy()\n            try:\n                arv_put.ResumeCache(cachefile.name)\n            except arv_put.ResumeCacheConflict:\n                self.fail(\"could not load cache after destroying it\")\n            self.assertRaises(ValueError, cache.load)\n        finally:\n            if os.path.exists(cachefile.name):\n                os.unlink(cachefile.name)\n\n    def test_restart_cache(self):\n        path = os.path.join(self.make_tmpdir(), 'cache')\n        cache = arv_put.ResumeCache(path)\n        cache.save('test')\n        cache.restart()\n        self.assertRaises(ValueError, cache.load)\n        self.assertRaises(arv_put.ResumeCacheConflict,\n                          arv_put.ResumeCache, path)\n\n\nclass TestArvadosPutResumeCacheDir:\n    @pytest.fixture\n    def args(self, tmp_path):\n        return arv_put.parse_arguments([str(tmp_path)])\n\n    @pytest.mark.parametrize('cache_dir', [None, 'test-put'])\n    def test_cache_subdir(self, tmp_path, monkeypatch, cache_dir, args):\n        if cache_dir is None:\n            cache_dir = arv_put.ResumeCache.CACHE_DIR\n        else:\n            monkeypatch.setattr(arv_put.ResumeCache, 'CACHE_DIR', cache_dir)\n        monkeypatch.setattr(basedirs.BaseDirectories, 'storage_path', tmp_path.__truediv__)\n        actual = arv_put.ResumeCache.make_path(args)\n        assert isinstance(actual, str)\n        assert Path(actual).parent == (tmp_path / cache_dir)\n\n    def test_cache_relative_dir(self, tmp_path, monkeypatch, args):\n        expected = Path('rel', 'dir')\n        monkeypatch.setattr(Path, 'home', lambda: tmp_path)\n        monkeypatch.setattr(arv_put.ResumeCache, 'CACHE_DIR', str(expected))\n        actual = arv_put.ResumeCache.make_path(args)\n        assert isinstance(actual, str)\n        parent = Path(actual).parent\n        assert parent == (tmp_path / expected)\n        assert parent.is_dir()\n\n    def test_cache_absolute_dir(self, tmp_path, monkeypatch, args):\n        expected = tmp_path / 'arv-put'\n        monkeypatch.setattr(Path, 'home', lambda: tmp_path / 'home')\n        monkeypatch.setattr(arv_put.ResumeCache, 'CACHE_DIR', str(expected))\n        actual = arv_put.ResumeCache.make_path(args)\n        assert isinstance(actual, str)\n        parent = Path(actual).parent\n        assert parent == expected\n        assert parent.is_dir()\n\n\nclass TestArvadosPutUploadJobCacheDir:\n    @pytest.mark.parametrize('cache_dir', [None, 'test-put'])\n    def test_cache_subdir(self, tmp_path, monkeypatch, cache_dir):\n        def storage_path(self, subdir='.', mode=0o700):\n            path = tmp_path / subdir\n            path.mkdir(mode=mode)\n            return path\n        if cache_dir is None:\n            cache_dir = arv_put.ArvPutUploadJob.CACHE_DIR\n        else:\n            monkeypatch.setattr(arv_put.ArvPutUploadJob, 'CACHE_DIR', cache_dir)\n        monkeypatch.setattr(basedirs.BaseDirectories, 'storage_path', storage_path)\n        job = arv_put.ArvPutUploadJob([str(tmp_path)], use_cache=True)\n        job.destroy_cache()\n        assert Path(job._cache_filename).parent == (tmp_path / cache_dir)\n\n    def test_cache_relative_dir(self, tmp_path, monkeypatch):\n        expected = Path('rel', 'dir')\n        monkeypatch.setattr(Path, 'home', lambda: tmp_path)\n        monkeypatch.setattr(arv_put.ArvPutUploadJob, 'CACHE_DIR', str(expected))\n        job = arv_put.ArvPutUploadJob([str(tmp_path)], use_cache=True)\n        job.destroy_cache()\n        assert Path(job._cache_filename).parent == (tmp_path / expected)\n\n    def test_cache_absolute_dir(self, tmp_path, monkeypatch):\n        expected = tmp_path / 'arv-put'\n        monkeypatch.setattr(Path, 'home', lambda: tmp_path / 'home')\n        monkeypatch.setattr(arv_put.ArvPutUploadJob, 'CACHE_DIR', str(expected))\n        job = arv_put.ArvPutUploadJob([str(tmp_path)], use_cache=True)\n        job.destroy_cache()\n        assert Path(job._cache_filename).parent == expected\n\n\nclass ArvPutUploadJobTest(run_test_server.TestCaseWithServers,\n                          ArvadosBaseTestCase):\n\n    def setUp(self):\n        super(ArvPutUploadJobTest, self).setUp()\n        run_test_server.authorize_with('active')\n        # Temp files creation\n        self.tempdir = tempfile.mkdtemp()\n        subdir = os.path.join(self.tempdir, 'subdir')\n        os.mkdir(subdir)\n        data = \"x\" * 1024 # 1 KB\n        for i in range(1, 5):\n            with open(os.path.join(self.tempdir, str(i)), 'w') as f:\n                f.write(data * i)\n        with open(os.path.join(subdir, 'otherfile'), 'w') as f:\n            f.write(data * 5)\n        # Large temp file for resume test\n        _, self.large_file_name = tempfile.mkstemp()\n        fileobj = open(self.large_file_name, 'w')\n        # Make sure to write just a little more than one block\n        for _ in range((arvados.config.KEEP_BLOCK_SIZE>>20)+1):\n            data = random.choice(['x', 'y', 'z']) * 1024 * 1024 # 1 MiB\n            fileobj.write(data)\n        fileobj.close()\n        # Temp dir containing small files to be repacked\n        self.small_files_dir = tempfile.mkdtemp()\n        data = 'y' * 1024 * 1024 # 1 MB\n        for i in range(1, 70):\n            with open(os.path.join(self.small_files_dir, str(i)), 'w') as f:\n                f.write(data + str(i))\n        self.arvfile_write = getattr(arvados.arvfile.ArvadosFileWriter, 'write')\n        # Temp dir to hold a symlink to other temp dir\n        self.tempdir_with_symlink = tempfile.mkdtemp()\n        os.symlink(self.tempdir, os.path.join(self.tempdir_with_symlink, 'linkeddir'))\n        os.symlink(os.path.join(self.tempdir, '1'),\n                   os.path.join(self.tempdir_with_symlink, 'linkedfile'))\n\n    def tearDown(self):\n        super(ArvPutUploadJobTest, self).tearDown()\n        shutil.rmtree(self.tempdir)\n        os.unlink(self.large_file_name)\n        shutil.rmtree(self.small_files_dir)\n        shutil.rmtree(self.tempdir_with_symlink)\n\n    def test_non_regular_files_are_ignored_except_symlinks_to_dirs(self):\n        def pfunc(x):\n            with open(x, 'w') as f:\n                f.write('test')\n        fifo_filename = 'fifo-file'\n        fifo_path = os.path.join(self.tempdir_with_symlink, fifo_filename)\n        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))\n        os.mkfifo(fifo_path)\n        producer = multiprocessing.Process(target=pfunc, args=(fifo_path,))\n        producer.start()\n        cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink])\n        cwriter.start(save_collection=False)\n        if producer.exitcode is None:\n            # If the producer is still running, kill it. This should always be\n            # before any assertion that may fail.\n            producer.terminate()\n            producer.join(1)\n        self.assertIn('linkeddir', cwriter.manifest_text())\n        self.assertNotIn(fifo_filename, cwriter.manifest_text())\n\n    def test_symlinks_are_followed_by_default(self):\n        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))\n        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkedfile')))\n        cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink])\n        cwriter.start(save_collection=False)\n        self.assertIn('linkeddir', cwriter.manifest_text())\n        self.assertIn('linkedfile', cwriter.manifest_text())\n        cwriter.destroy_cache()\n\n    def test_symlinks_are_not_followed_when_requested(self):\n        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))\n        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkedfile')))\n        cwriter = arv_put.ArvPutUploadJob([self.tempdir_with_symlink],\n                                          follow_links=False)\n        cwriter.start(save_collection=False)\n        self.assertNotIn('linkeddir', cwriter.manifest_text())\n        self.assertNotIn('linkedfile', cwriter.manifest_text())\n        cwriter.destroy_cache()\n        # Check for bug #17800: passed symlinks should also be ignored.\n        linked_dir = os.path.join(self.tempdir_with_symlink, 'linkeddir')\n        cwriter = arv_put.ArvPutUploadJob([linked_dir], follow_links=False)\n        cwriter.start(save_collection=False)\n        self.assertNotIn('linkeddir', cwriter.manifest_text())\n        cwriter.destroy_cache()\n\n    def test_no_empty_collection_saved(self):\n        self.assertTrue(os.path.islink(os.path.join(self.tempdir_with_symlink, 'linkeddir')))\n        linked_dir = os.path.join(self.tempdir_with_symlink, 'linkeddir')\n        cwriter = arv_put.ArvPutUploadJob([linked_dir], follow_links=False)\n        cwriter.start(save_collection=True)\n        self.assertIsNone(cwriter.manifest_locator())\n        self.assertEqual('', cwriter.manifest_text())\n        cwriter.destroy_cache()\n\n    def test_passing_nonexistant_path_raise_exception(self):\n        uuid_str = str(uuid.uuid4())\n        with self.assertRaises(arv_put.PathDoesNotExistError):\n            arv_put.ArvPutUploadJob([\"/this/path/does/not/exist/{}\".format(uuid_str)])\n\n    def test_writer_works_without_cache(self):\n        cwriter = arv_put.ArvPutUploadJob(['/dev/null'], resume=False)\n        cwriter.start(save_collection=False)\n        self.assertEqual(\". d41d8cd98f00b204e9800998ecf8427e+0 0:0:null\\n\", cwriter.manifest_text())\n\n    def test_writer_works_with_cache(self):\n        with tempfile.NamedTemporaryFile() as f:\n            f.write(b'foo')\n            f.flush()\n            cwriter = arv_put.ArvPutUploadJob([f.name])\n            cwriter.start(save_collection=False)\n            self.assertEqual(0, cwriter.bytes_skipped)\n            self.assertEqual(3, cwriter.bytes_written)\n            # Don't destroy the cache, and start another upload\n            cwriter_new = arv_put.ArvPutUploadJob([f.name])\n            cwriter_new.start(save_collection=False)\n            cwriter_new.destroy_cache()\n            self.assertEqual(3, cwriter_new.bytes_skipped)\n            self.assertEqual(3, cwriter_new.bytes_written)\n\n    def make_progress_tester(self):\n        progression = []\n        def record_func(written, expected):\n            progression.append((written, expected))\n        return progression, record_func\n\n    def test_progress_reporting(self):\n        with tempfile.NamedTemporaryFile() as f:\n            f.write(b'foo')\n            f.flush()\n            for expect_count in (None, 8):\n                progression, reporter = self.make_progress_tester()\n                cwriter = arv_put.ArvPutUploadJob([f.name],\n                                                  reporter=reporter)\n                cwriter.bytes_expected = expect_count\n                cwriter.start(save_collection=False)\n                cwriter.destroy_cache()\n                self.assertIn((3, expect_count), progression)\n\n    def test_writer_upload_directory(self):\n        cwriter = arv_put.ArvPutUploadJob([self.tempdir])\n        cwriter.start(save_collection=False)\n        cwriter.destroy_cache()\n        self.assertEqual(1024*(1+2+3+4+5), cwriter.bytes_written)\n\n    def test_resume_large_file_upload(self):\n        def wrapped_write(*args, **kwargs):\n            data = args[1]\n            # Exit only on last block\n            if len(data) < arvados.config.KEEP_BLOCK_SIZE:\n                # Simulate a checkpoint before quitting. Ensure block commit.\n                self.writer._update(final=True)\n                raise SystemExit(\"Simulated error\")\n            return self.arvfile_write(*args, **kwargs)\n\n        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',\n                        autospec=True) as mocked_write:\n            mocked_write.side_effect = wrapped_write\n            writer = arv_put.ArvPutUploadJob([self.large_file_name],\n                                             replication_desired=1)\n            # We'll be accessing from inside the wrapper\n            self.writer = writer\n            with self.assertRaises(SystemExit):\n                writer.start(save_collection=False)\n            # Confirm that the file was partially uploaded\n            self.assertGreater(writer.bytes_written, 0)\n            self.assertLess(writer.bytes_written,\n                            os.path.getsize(self.large_file_name))\n        # Retry the upload\n        writer2 = arv_put.ArvPutUploadJob([self.large_file_name],\n                                          replication_desired=1)\n        writer2.start(save_collection=False)\n        self.assertEqual(writer.bytes_written + writer2.bytes_written - writer2.bytes_skipped,\n                         os.path.getsize(self.large_file_name))\n        writer2.destroy_cache()\n        del(self.writer)\n\n    # Test for bug #11002\n    def test_graceful_exit_while_repacking_small_blocks(self):\n        def wrapped_commit(*args, **kwargs):\n            raise SystemExit(\"Simulated error\")\n\n        with mock.patch('arvados.arvfile._BlockManager.commit_bufferblock',\n                        autospec=True) as mocked_commit:\n            mocked_commit.side_effect = wrapped_commit\n            # Upload a little more than 1 block, wrapped_commit will make the first block\n            # commit to fail.\n            # arv-put should not exit with an exception by trying to commit the collection\n            # as it's in an inconsistent state.\n            writer = arv_put.ArvPutUploadJob([self.small_files_dir],\n                                             replication_desired=1)\n            try:\n                with self.assertRaises(SystemExit):\n                    writer.start(save_collection=False)\n            except arvados.arvfile.UnownedBlockError:\n                self.fail(\"arv-put command is trying to use a corrupted BlockManager. See https://dev.arvados.org/issues/11002\")\n        writer.destroy_cache()\n\n    def test_no_resume_when_asked(self):\n        def wrapped_write(*args, **kwargs):\n            data = args[1]\n            # Exit only on last block\n            if len(data) < arvados.config.KEEP_BLOCK_SIZE:\n                # Simulate a checkpoint before quitting.\n                self.writer._update()\n                raise SystemExit(\"Simulated error\")\n            return self.arvfile_write(*args, **kwargs)\n\n        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',\n                        autospec=True) as mocked_write:\n            mocked_write.side_effect = wrapped_write\n            writer = arv_put.ArvPutUploadJob([self.large_file_name],\n                                             replication_desired=1)\n            # We'll be accessing from inside the wrapper\n            self.writer = writer\n            with self.assertRaises(SystemExit):\n                writer.start(save_collection=False)\n            # Confirm that the file was partially uploaded\n            self.assertGreater(writer.bytes_written, 0)\n            self.assertLess(writer.bytes_written,\n                            os.path.getsize(self.large_file_name))\n        # Retry the upload, this time without resume\n        writer2 = arv_put.ArvPutUploadJob([self.large_file_name],\n                                          replication_desired=1,\n                                          resume=False)\n        writer2.start(save_collection=False)\n        self.assertEqual(writer2.bytes_skipped, 0)\n        self.assertEqual(writer2.bytes_written,\n                         os.path.getsize(self.large_file_name))\n        writer2.destroy_cache()\n        del(self.writer)\n\n    def test_no_resume_when_no_cache(self):\n        def wrapped_write(*args, **kwargs):\n            data = args[1]\n            # Exit only on last block\n            if len(data) < arvados.config.KEEP_BLOCK_SIZE:\n                # Simulate a checkpoint before quitting.\n                self.writer._update()\n                raise SystemExit(\"Simulated error\")\n            return self.arvfile_write(*args, **kwargs)\n\n        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',\n                        autospec=True) as mocked_write:\n            mocked_write.side_effect = wrapped_write\n            writer = arv_put.ArvPutUploadJob([self.large_file_name],\n                                             replication_desired=1)\n            # We'll be accessing from inside the wrapper\n            self.writer = writer\n            with self.assertRaises(SystemExit):\n                writer.start(save_collection=False)\n            # Confirm that the file was partially uploaded\n            self.assertGreater(writer.bytes_written, 0)\n            self.assertLess(writer.bytes_written,\n                            os.path.getsize(self.large_file_name))\n        # Retry the upload, this time without cache usage\n        writer2 = arv_put.ArvPutUploadJob([self.large_file_name],\n                                          replication_desired=1,\n                                          resume=False,\n                                          use_cache=False)\n        writer2.start(save_collection=False)\n        self.assertEqual(writer2.bytes_skipped, 0)\n        self.assertEqual(writer2.bytes_written,\n                         os.path.getsize(self.large_file_name))\n        writer2.destroy_cache()\n        del(self.writer)\n\n    def test_dry_run_feature(self):\n        def wrapped_write(*args, **kwargs):\n            data = args[1]\n            # Exit only on last block\n            if len(data) < arvados.config.KEEP_BLOCK_SIZE:\n                # Simulate a checkpoint before quitting.\n                self.writer._update()\n                raise SystemExit(\"Simulated error\")\n            return self.arvfile_write(*args, **kwargs)\n\n        with mock.patch('arvados.arvfile.ArvadosFileWriter.write',\n                        autospec=True) as mocked_write:\n            mocked_write.side_effect = wrapped_write\n            writer = arv_put.ArvPutUploadJob([self.large_file_name],\n                                             replication_desired=1)\n            # We'll be accessing from inside the wrapper\n            self.writer = writer\n            with self.assertRaises(SystemExit):\n                writer.start(save_collection=False)\n            # Confirm that the file was partially uploaded\n            self.assertGreater(writer.bytes_written, 0)\n            self.assertLess(writer.bytes_written,\n                            os.path.getsize(self.large_file_name))\n        with self.assertRaises(arv_put.ArvPutUploadIsPending):\n            # Retry the upload using dry_run to check if there is a pending upload\n            writer2 = arv_put.ArvPutUploadJob([self.large_file_name],\n                                              replication_desired=1,\n                                              dry_run=True)\n        # Complete the pending upload\n        writer3 = arv_put.ArvPutUploadJob([self.large_file_name],\n                                          replication_desired=1)\n        writer3.start(save_collection=False)\n        with self.assertRaises(arv_put.ArvPutUploadNotPending):\n            # Confirm there's no pending upload with dry_run=True\n            writer4 = arv_put.ArvPutUploadJob([self.large_file_name],\n                                              replication_desired=1,\n                                              dry_run=True)\n        # Test obvious cases\n        with self.assertRaises(arv_put.ArvPutUploadIsPending):\n            arv_put.ArvPutUploadJob([self.large_file_name],\n                                    replication_desired=1,\n                                    dry_run=True,\n                                    resume=False,\n                                    use_cache=False)\n        with self.assertRaises(arv_put.ArvPutUploadIsPending):\n            arv_put.ArvPutUploadJob([self.large_file_name],\n                                    replication_desired=1,\n                                    dry_run=True,\n                                    resume=False)\n        del(self.writer)\n\nclass CachedManifestValidationTest(ArvadosBaseTestCase):\n    class MockedPut(arv_put.ArvPutUploadJob):\n        def __init__(self, cached_manifest=None):\n            self._state = copy.deepcopy(arv_put.ArvPutUploadJob.EMPTY_STATE)\n            self._state['manifest'] = cached_manifest\n            self._api_client = mock.MagicMock()\n            self.logger = mock.MagicMock()\n            self.num_retries = 1\n\n    def datetime_to_hex(self, dt):\n        return hex(int(time.mktime(dt.timetuple())))[2:]\n\n    def setUp(self):\n        super(CachedManifestValidationTest, self).setUp()\n        self.block1 = \"fdba98970961edb29f88241b9d99d890\" # foo\n        self.block2 = \"37b51d194a7513e45b56f6524f2d51f2\" # bar\n        self.template = \". \"+self.block1+\"+3+Asignature@%s \"+self.block2+\"+3+Anothersignature@%s 0:3:foofile.txt 3:6:barfile.txt\\n\"\n\n    def test_empty_cached_manifest_is_valid(self):\n        put_mock = self.MockedPut()\n        self.assertEqual(None, put_mock._state.get('manifest'))\n        self.assertTrue(put_mock._cached_manifest_valid())\n        put_mock._state['manifest'] = ''\n        self.assertTrue(put_mock._cached_manifest_valid())\n\n    def test_signature_cases(self):\n        now = datetime.datetime.utcnow()\n        yesterday = now - datetime.timedelta(days=1)\n        lastweek = now - datetime.timedelta(days=7)\n        tomorrow = now + datetime.timedelta(days=1)\n        nextweek = now + datetime.timedelta(days=7)\n\n        def mocked_head(blocks={}, loc=None):\n            blk = loc.split('+', 1)[0]\n            if blocks.get(blk):\n                return True\n            raise arvados.errors.KeepRequestError(\"mocked error - block invalid\")\n\n        # Block1_expiration, Block2_expiration, Block1_HEAD, Block2_HEAD, Expectation\n        cases = [\n            # All expired, reset cache - OK\n            (yesterday, lastweek, False, False, True),\n            (lastweek, yesterday, False, False, True),\n            # All non-expired valid blocks - OK\n            (tomorrow, nextweek, True, True, True),\n            (nextweek, tomorrow, True, True, True),\n            # All non-expired invalid blocks - Not OK\n            (tomorrow, nextweek, False, False, False),\n            (nextweek, tomorrow, False, False, False),\n            # One non-expired valid block - OK\n            (tomorrow, yesterday, True, False, True),\n            (yesterday, tomorrow, False, True, True),\n            # One non-expired invalid block - Not OK\n            (tomorrow, yesterday, False, False, False),\n            (yesterday, tomorrow, False, False, False),\n        ]\n        for case in cases:\n            b1_expiration, b2_expiration, b1_valid, b2_valid, outcome = case\n            head_responses = {\n                self.block1: b1_valid,\n                self.block2: b2_valid,\n            }\n            cached_manifest = self.template % (\n                self.datetime_to_hex(b1_expiration),\n                self.datetime_to_hex(b2_expiration),\n            )\n            arvput = self.MockedPut(cached_manifest)\n            with mock.patch('arvados.collection.KeepClient.head') as head_mock:\n                head_mock.side_effect = partial(mocked_head, head_responses)\n                self.assertEqual(outcome, arvput._cached_manifest_valid(),\n                    \"Case '%s' should have produced outcome '%s'\" % (case, outcome)\n                )\n                if b1_expiration > now or b2_expiration > now:\n                    # A HEAD request should have been done\n                    head_mock.assert_called_once()\n                else:\n                    head_mock.assert_not_called()\n\n\nclass ArvadosExpectedBytesTest(ArvadosBaseTestCase):\n    TEST_SIZE = os.path.getsize(__file__)\n\n    def test_expected_bytes_for_file(self):\n        writer = arv_put.ArvPutUploadJob([__file__])\n        self.assertEqual(self.TEST_SIZE,\n                         writer.bytes_expected)\n\n    def test_expected_bytes_for_tree(self):\n        tree = self.make_tmpdir()\n        shutil.copyfile(__file__, os.path.join(tree, 'one'))\n        shutil.copyfile(__file__, os.path.join(tree, 'two'))\n\n        writer = arv_put.ArvPutUploadJob([tree])\n        self.assertEqual(self.TEST_SIZE * 2,\n                         writer.bytes_expected)\n        writer = arv_put.ArvPutUploadJob([tree, __file__])\n        self.assertEqual(self.TEST_SIZE * 3,\n                         writer.bytes_expected)\n\n    def test_expected_bytes_for_device(self):\n        writer = arv_put.ArvPutUploadJob(['/dev/null'], use_cache=False, resume=False)\n        self.assertIsNone(writer.bytes_expected)\n        writer = arv_put.ArvPutUploadJob([__file__, '/dev/null'])\n        self.assertIsNone(writer.bytes_expected)\n\n\nclass ArvadosPutReportTest(ArvadosBaseTestCase):\n    def test_machine_progress(self):\n        for count, total in [(0, 1), (0, None), (1, None), (235, 9283)]:\n            expect = \": {} written {} total\\n\".format(\n                count, -1 if (total is None) else total)\n            self.assertTrue(\n                arv_put.machine_progress(count, total).endswith(expect))\n\n    def test_known_human_progress(self):\n        for count, total in [(0, 1), (2, 4), (45, 60)]:\n            expect = '{:.1%}'.format(1.0*count/total)\n            actual = arv_put.human_progress(count, total)\n            self.assertTrue(actual.startswith('\\r'))\n            self.assertIn(expect, actual)\n\n    def test_unknown_human_progress(self):\n        for count in [1, 20, 300, 4000, 50000]:\n            self.assertTrue(re.search(r'\\b{}\\b'.format(count),\n                                      arv_put.human_progress(count, None)))\n\n\nclass ArvPutLogFormatterTest(ArvadosBaseTestCase):\n    matcher = r'\\(X-Request-Id: req-[a-z0-9]{20}\\)'\n\n    def setUp(self):\n        super(ArvPutLogFormatterTest, self).setUp()\n        self.stderr = tutil.StringIO()\n        self.loggingHandler = logging.StreamHandler(self.stderr)\n        self.loggingHandler.setFormatter(\n            arv_put.ArvPutLogFormatter(arvados.util.new_request_id()))\n        self.logger = logging.getLogger()\n        self.logger.addHandler(self.loggingHandler)\n        self.logger.setLevel(logging.DEBUG)\n\n    def tearDown(self):\n        self.logger.removeHandler(self.loggingHandler)\n        self.stderr.close()\n        self.stderr = None\n        super(ArvPutLogFormatterTest, self).tearDown()\n\n    def test_request_id_logged_only_once_on_error(self):\n        self.logger.error('Ooops, something bad happened.')\n        self.logger.error('Another bad thing just happened.')\n        log_lines = self.stderr.getvalue().split('\\n')[:-1]\n        self.assertEqual(2, len(log_lines))\n        self.assertRegex(log_lines[0], self.matcher)\n        self.assertNotRegex(log_lines[1], self.matcher)\n\n    def test_request_id_logged_only_once_on_debug(self):\n        self.logger.debug('This is just a debug message.')\n        self.logger.debug('Another message, move along.')\n        log_lines = self.stderr.getvalue().split('\\n')[:-1]\n        self.assertEqual(2, len(log_lines))\n        self.assertRegex(log_lines[0], self.matcher)\n        self.assertNotRegex(log_lines[1], self.matcher)\n\n    def test_request_id_not_logged_on_info(self):\n        self.logger.info('This should be a useful message')\n        log_lines = self.stderr.getvalue().split('\\n')[:-1]\n        self.assertEqual(1, len(log_lines))\n        self.assertNotRegex(log_lines[0], self.matcher)\n\nclass ArvadosPutTest(run_test_server.TestCaseWithServers,\n                     ArvadosBaseTestCase,\n                     tutil.VersionChecker):\n    MAIN_SERVER = {}\n    Z_UUID = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'\n\n    def call_main_with_args(self, args):\n        self.main_stdout.seek(0, 0)\n        self.main_stdout.truncate(0)\n        self.main_stderr.seek(0, 0)\n        self.main_stderr.truncate(0)\n        return arv_put.main(args, self.main_stdout, self.main_stderr)\n\n    def call_main_on_test_file(self, args=[]):\n        with self.make_test_file() as testfile:\n            path = testfile.name\n            self.call_main_with_args(['--stream', '--no-progress'] + args + [path])\n        self.assertTrue(\n            os.path.exists(os.path.join(os.environ['KEEP_LOCAL_STORE'],\n                                        '098f6bcd4621d373cade4e832627b4f6')),\n            \"did not find file stream in Keep store\")\n\n    def setUp(self):\n        super(ArvadosPutTest, self).setUp()\n        run_test_server.authorize_with('active')\n        arv_put.api_client = None\n        self.main_stdout = tutil.StringIO()\n        self.main_stderr = tutil.StringIO()\n        self.loggingHandler = logging.StreamHandler(self.main_stderr)\n        self.loggingHandler.setFormatter(\n            arv_put.ArvPutLogFormatter(arvados.util.new_request_id()))\n        logging.getLogger().addHandler(self.loggingHandler)\n\n    def tearDown(self):\n        logging.getLogger().removeHandler(self.loggingHandler)\n        for outbuf in ['main_stdout', 'main_stderr']:\n            if hasattr(self, outbuf):\n                getattr(self, outbuf).close()\n                delattr(self, outbuf)\n        super(ArvadosPutTest, self).tearDown()\n\n    def test_version_argument(self):\n        with tutil.redirected_streams(\n                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):\n            with self.assertRaises(SystemExit):\n                self.call_main_with_args(['--version'])\n        self.assertVersionOutput(out, err)\n\n    def test_simple_file_put(self):\n        self.call_main_on_test_file()\n\n    def test_put_with_unwriteable_cache_dir(self):\n        orig_cachedir = arv_put.ResumeCache.CACHE_DIR\n        cachedir = self.make_tmpdir()\n        os.chmod(cachedir, 0o0)\n        arv_put.ResumeCache.CACHE_DIR = cachedir\n        try:\n            self.call_main_on_test_file()\n        finally:\n            arv_put.ResumeCache.CACHE_DIR = orig_cachedir\n            os.chmod(cachedir, 0o700)\n\n    def test_put_with_unwritable_cache_subdir(self):\n        orig_cachedir = arv_put.ResumeCache.CACHE_DIR\n        cachedir = self.make_tmpdir()\n        os.chmod(cachedir, 0o0)\n        arv_put.ResumeCache.CACHE_DIR = os.path.join(cachedir, 'cachedir')\n        try:\n            self.call_main_on_test_file()\n        finally:\n            arv_put.ResumeCache.CACHE_DIR = orig_cachedir\n            os.chmod(cachedir, 0o700)\n\n    def test_put_block_replication(self):\n        self.call_main_on_test_file()\n        arv_put.api_client = None\n        with mock.patch('arvados.collection.KeepClient.local_store_put') as put_mock:\n            put_mock.return_value = 'acbd18db4cc2f85cedef654fccc4a4d8+3'\n            self.call_main_on_test_file(['--replication', '1'])\n            self.call_main_on_test_file(['--replication', '4'])\n            self.call_main_on_test_file(['--replication', '5'])\n            self.assertEqual(\n                [x[-1].get('copies') for x in put_mock.call_args_list],\n                [1, 4, 5])\n\n    def test_normalize(self):\n        testfile1 = self.make_test_file()\n        testfile2 = self.make_test_file()\n        test_paths = [testfile1.name, testfile2.name]\n        # Reverse-sort the paths, so normalization must change their order.\n        test_paths.sort(reverse=True)\n        self.call_main_with_args(['--stream', '--no-progress', '--normalize'] +\n                                 test_paths)\n        manifest = self.main_stdout.getvalue()\n        # Assert the second file we specified appears first in the manifest.\n        file_indices = [manifest.find(':' + os.path.basename(path))\n                        for path in test_paths]\n        self.assertGreater(*file_indices)\n\n    def test_error_name_without_collection(self):\n        self.assertRaises(SystemExit, self.call_main_with_args,\n                          ['--name', 'test without Collection',\n                           '--stream', '/dev/null'])\n\n    def test_error_when_project_not_found(self):\n        self.assertRaises(SystemExit,\n                          self.call_main_with_args,\n                          ['--project-uuid', self.Z_UUID])\n\n    def test_error_bad_project_uuid(self):\n        self.assertRaises(SystemExit,\n                          self.call_main_with_args,\n                          ['--project-uuid', self.Z_UUID, '--stream'])\n\n    def test_error_when_excluding_absolute_path(self):\n        tmpdir = self.make_tmpdir()\n        self.assertRaises(SystemExit,\n                          self.call_main_with_args,\n                          ['--exclude', '/some/absolute/path/*',\n                           tmpdir])\n\n    def test_api_error_handling(self):\n        coll_save_mock = mock.Mock(name='arv.collection.Collection().save_new()')\n        coll_save_mock.side_effect = arvados.errors.ApiError(\n            fake_httplib2_response(403), b'{}')\n        with mock.patch('arvados.collection.Collection.save_new',\n                        new=coll_save_mock):\n            with self.assertRaises(SystemExit) as exc_test:\n                self.call_main_with_args(['/dev/null'])\n            self.assertLess(0, exc_test.exception.args[0])\n            self.assertLess(0, coll_save_mock.call_count)\n            self.assertEqual(\"\", self.main_stdout.getvalue())\n\n    def test_request_id_logging_on_error(self):\n        matcher = r'\\(X-Request-Id: req-[a-z0-9]{20}\\)\\n'\n        coll_save_mock = mock.Mock(name='arv.collection.Collection().save_new()')\n        coll_save_mock.side_effect = arvados.errors.ApiError(\n            fake_httplib2_response(403), b'{}')\n        with mock.patch('arvados.collection.Collection.save_new',\n                        new=coll_save_mock):\n            with self.assertRaises(SystemExit):\n                self.call_main_with_args(['/dev/null'])\n            self.assertRegex(\n                self.main_stderr.getvalue(), matcher)\n\n\nclass ArvPutIntegrationTest(run_test_server.TestCaseWithServers,\n                            ArvadosBaseTestCase):\n    MAIN_SERVER = {}\n    KEEP_SERVER = {'blob_signing': True}\n    PROJECT_UUID = run_test_server.fixture('groups')['aproject']['uuid']\n\n    @classmethod\n    def setUpClass(cls):\n        super(ArvPutIntegrationTest, cls).setUpClass()\n        cls.ENVIRON = os.environ.copy()\n        cls.ENVIRON['PYTHONPATH'] = ':'.join(sys.path)\n\n    def datetime_to_hex(self, dt):\n        return hex(int(time.mktime(dt.timetuple())))[2:]\n\n    def setUp(self):\n        super(ArvPutIntegrationTest, self).setUp()\n        arv_put.api_client = None\n\n    def authorize_with(self, token_name):\n        run_test_server.authorize_with(token_name)\n        for v in [\"ARVADOS_API_HOST\",\n                  \"ARVADOS_API_HOST_INSECURE\",\n                  \"ARVADOS_API_TOKEN\"]:\n            self.ENVIRON[v] = arvados.config.settings()[v]\n        arv_put.api_client = arvados.api('v1')\n\n    def current_user(self):\n        return arv_put.api_client.users().current().execute()\n\n    def test_check_real_project_found(self):\n        self.authorize_with('active')\n        self.assertTrue(arv_put.desired_project_uuid(arv_put.api_client, self.PROJECT_UUID, 0),\n                        \"did not correctly find test fixture project\")\n\n    def test_check_error_finding_nonexistent_uuid(self):\n        BAD_UUID = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'\n        self.authorize_with('active')\n        try:\n            result = arv_put.desired_project_uuid(arv_put.api_client, BAD_UUID,\n                                                  0)\n        except ValueError as error:\n            self.assertIn(BAD_UUID, str(error))\n        else:\n            self.assertFalse(result, \"incorrectly found nonexistent project\")\n\n    def test_check_error_finding_nonexistent_project(self):\n        BAD_UUID = 'zzzzz-tpzed-zzzzzzzzzzzzzzz'\n        self.authorize_with('active')\n        with self.assertRaises(apiclient.errors.HttpError):\n            arv_put.desired_project_uuid(arv_put.api_client, BAD_UUID,\n                                                  0)\n\n    def test_short_put_from_stdin(self):\n        # Have to run this as an integration test since arv-put can't\n        # read from the tests' stdin.\n        # arv-put usually can't stat(os.path.realpath('/dev/stdin')) in this\n        # case, because the /proc entry is already gone by the time it tries.\n        pipe = subprocess.Popen(\n            [sys.executable, arv_put.__file__, '--stream'],\n            stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n            stderr=subprocess.STDOUT, env=self.ENVIRON)\n        pipe.stdin.write(b'stdin test\\xa6\\n')\n        pipe.stdin.close()\n        deadline = time.time() + 5\n        while (pipe.poll() is None) and (time.time() < deadline):\n            time.sleep(.1)\n        returncode = pipe.poll()\n        if returncode is None:\n            pipe.terminate()\n            self.fail(\"arv-put did not PUT from stdin within 5 seconds\")\n        elif returncode != 0:\n            sys.stdout.write(pipe.stdout.read())\n            self.fail(\"arv-put returned exit code {}\".format(returncode))\n        self.assertIn('1cb671b355a0c23d5d1c61d59cdb1b2b+12',\n                      pipe.stdout.read().decode())\n\n    def test_sigint_logs_request_id(self):\n        # Start arv-put, give it a chance to start up, send SIGINT,\n        # and check that its output includes the X-Request-Id.\n        input_stream = subprocess.Popen(\n            ['sleep', '10'],\n            stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n        pipe = subprocess.Popen(\n            [sys.executable, arv_put.__file__, '--stream'],\n            stdin=input_stream.stdout, stdout=subprocess.PIPE,\n            stderr=subprocess.STDOUT, env=self.ENVIRON)\n        # Wait for arv-put child process to print something (i.e., a\n        # log message) so we know its signal handler is installed.\n        select.select([pipe.stdout], [], [], 10)\n        pipe.send_signal(signal.SIGINT)\n        deadline = time.time() + 5\n        while (pipe.poll() is None) and (time.time() < deadline):\n            time.sleep(.1)\n        returncode = pipe.poll()\n        input_stream.terminate()\n        if returncode is None:\n            pipe.terminate()\n            self.fail(\"arv-put did not exit within 5 seconds\")\n        self.assertRegex(pipe.stdout.read().decode(), r'\\(X-Request-Id: req-[a-z0-9]{20}\\)')\n\n    def test_ArvPutSignedManifest(self):\n        # ArvPutSignedManifest runs \"arv-put foo\" and then attempts to get\n        # the newly created manifest from the API server, testing to confirm\n        # that the block locators in the returned manifest are signed.\n        self.authorize_with('active')\n\n        # Before doing anything, demonstrate that the collection\n        # we're about to create is not present in our test fixture.\n        manifest_uuid = \"00b4e9f40ac4dd432ef89749f1c01e74+47\"\n        with self.assertRaises(apiclient.errors.HttpError):\n            arv_put.api_client.collections().get(\n                uuid=manifest_uuid).execute()\n\n        datadir = self.make_tmpdir()\n        with open(os.path.join(datadir, \"foo\"), \"w\") as f:\n            f.write(\"The quick brown fox jumped over the lazy dog\")\n        p = subprocess.Popen([sys.executable, arv_put.__file__,\n                              os.path.join(datadir, 'foo')],\n                             stdout=subprocess.PIPE,\n                             stderr=subprocess.PIPE,\n                             env=self.ENVIRON)\n        (_, err) = p.communicate()\n        self.assertRegex(err.decode(), r'INFO: Collection saved as ')\n        self.assertEqual(p.returncode, 0)\n\n        # The manifest text stored in the API server under the same\n        # manifest UUID must use signed locators.\n        c = arv_put.api_client.collections().get(uuid=manifest_uuid).execute()\n        self.assertRegex(\n            c['manifest_text'],\n            r'^\\. 08a008a01d498c404b0c30852b39d3b8\\+44\\+A[0-9a-f]+@[0-9a-f]+ 0:44:foo\\n')\n\n        os.remove(os.path.join(datadir, \"foo\"))\n        os.rmdir(datadir)\n\n    def run_and_find_collection(self, text, extra_args=[]):\n        self.authorize_with('active')\n        pipe = subprocess.Popen(\n            [sys.executable, arv_put.__file__] + extra_args,\n            stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n            stderr=subprocess.PIPE, env=self.ENVIRON)\n        stdout, stderr = pipe.communicate(text.encode())\n        self.assertRegex(stderr.decode(), r'INFO: Collection (updated:|saved as)')\n        search_key = ('portable_data_hash'\n                      if '--portable-data-hash' in extra_args else 'uuid')\n        collection_list = arvados.api('v1').collections().list(\n            filters=[[search_key, '=', stdout.decode().strip()]]\n        ).execute().get('items', [])\n        self.assertEqual(1, len(collection_list))\n        return collection_list[0]\n\n    def test_all_expired_signatures_invalidates_cache(self):\n        self.authorize_with('active')\n        tmpdir = self.make_tmpdir()\n        with open(os.path.join(tmpdir, 'somefile.txt'), 'w') as f:\n            f.write('foo')\n        # Upload a directory and get the cache file name\n        p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],\n                             stdout=subprocess.PIPE,\n                             stderr=subprocess.PIPE,\n                             env=self.ENVIRON)\n        (_, err) = p.communicate()\n        self.assertRegex(err.decode(), r'INFO: Creating new cache file at ')\n        self.assertEqual(p.returncode, 0)\n        cache_filepath = re.search(r'INFO: Creating new cache file at (.*)',\n                                   err.decode()).groups()[0]\n        self.assertTrue(os.path.isfile(cache_filepath))\n        # Load the cache file contents and modify the manifest to simulate\n        # an expired access token\n        with open(cache_filepath, 'r') as c:\n            cache = json.load(c)\n        self.assertRegex(cache['manifest'], r'\\+A\\S+\\@')\n        a_month_ago = datetime.datetime.now() - datetime.timedelta(days=30)\n        cache['manifest'] = re.sub(\n            r'\\@.*? ',\n            \"@{} \".format(self.datetime_to_hex(a_month_ago)),\n            cache['manifest'])\n        with open(cache_filepath, 'w') as c:\n            c.write(json.dumps(cache))\n        # Re-run the upload and expect to get an invalid cache message\n        p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],\n                             stdout=subprocess.PIPE,\n                             stderr=subprocess.PIPE,\n                             env=self.ENVIRON)\n        (_, err) = p.communicate()\n        self.assertRegex(\n            err.decode(),\n            r'INFO: Cache expired, starting from scratch.*')\n        self.assertEqual(p.returncode, 0)\n\n    def test_invalid_signature_in_cache(self):\n        for batch_mode in [False, True]:\n            self.authorize_with('active')\n            tmpdir = self.make_tmpdir()\n            with open(os.path.join(tmpdir, 'somefile.txt'), 'w') as f:\n                f.write('foo')\n            # Upload a directory and get the cache file name\n            arv_put_args = [tmpdir]\n            if batch_mode:\n                arv_put_args = ['--batch'] + arv_put_args\n            p = subprocess.Popen([sys.executable, arv_put.__file__] + arv_put_args,\n                                stdout=subprocess.PIPE,\n                                stderr=subprocess.PIPE,\n                                env=self.ENVIRON)\n            (_, err) = p.communicate()\n            self.assertRegex(err.decode(), r'INFO: Creating new cache file at ')\n            self.assertEqual(p.returncode, 0)\n            cache_filepath = re.search(r'INFO: Creating new cache file at (.*)',\n                                    err.decode()).groups()[0]\n            self.assertTrue(os.path.isfile(cache_filepath))\n            # Load the cache file contents and modify the manifest to simulate\n            # an invalid access token\n            with open(cache_filepath, 'r') as c:\n                cache = json.load(c)\n            self.assertRegex(cache['manifest'], r'\\+A\\S+\\@')\n            cache['manifest'] = re.sub(\n                r'\\+A.*\\@',\n                \"+Aabcdef0123456789abcdef0123456789abcdef01@\",\n                cache['manifest'])\n            with open(cache_filepath, 'w') as c:\n                c.write(json.dumps(cache))\n            # Re-run the upload and expect to get an invalid cache message\n            p = subprocess.Popen([sys.executable, arv_put.__file__] + arv_put_args,\n                                stdout=subprocess.PIPE,\n                                stderr=subprocess.PIPE,\n                                env=self.ENVIRON)\n            (_, err) = p.communicate()\n            if not batch_mode:\n                self.assertRegex(\n                    err.decode(),\n                    r'ERROR: arv-put: Resume cache contains invalid signature.*')\n                self.assertEqual(p.returncode, 1)\n            else:\n                self.assertRegex(\n                    err.decode(),\n                    r'Invalid signatures on cache file \\'.*\\' while being run in \\'batch mode\\' -- continuing anyways.*')\n                self.assertEqual(p.returncode, 0)\n\n    def test_single_expired_signature_reuploads_file(self):\n        self.authorize_with('active')\n        tmpdir = self.make_tmpdir()\n        with open(os.path.join(tmpdir, 'foofile.txt'), 'w') as f:\n            f.write('foo')\n        # Write a second file on its own subdir to force a new stream\n        os.mkdir(os.path.join(tmpdir, 'bar'))\n        with open(os.path.join(tmpdir, 'bar', 'barfile.txt'), 'w') as f:\n            f.write('bar')\n        # Upload a directory and get the cache file name\n        p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],\n                             stdout=subprocess.PIPE,\n                             stderr=subprocess.PIPE,\n                             env=self.ENVIRON)\n        (_, err) = p.communicate()\n        self.assertRegex(err.decode(), r'INFO: Creating new cache file at ')\n        self.assertEqual(p.returncode, 0)\n        cache_filepath = re.search(r'INFO: Creating new cache file at (.*)',\n                                   err.decode()).groups()[0]\n        self.assertTrue(os.path.isfile(cache_filepath))\n        # Load the cache file contents and modify the manifest to simulate\n        # an expired access token\n        with open(cache_filepath, 'r') as c:\n            cache = json.load(c)\n        self.assertRegex(cache['manifest'], r'\\+A\\S+\\@')\n        a_month_ago = datetime.datetime.now() - datetime.timedelta(days=30)\n        # Make one of the signatures appear to have expired\n        cache['manifest'] = re.sub(\n            r'\\@.*? 3:3:barfile.txt',\n            \"@{} 3:3:barfile.txt\".format(self.datetime_to_hex(a_month_ago)),\n            cache['manifest'])\n        with open(cache_filepath, 'w') as c:\n            c.write(json.dumps(cache))\n        # Re-run the upload and expect to get an invalid cache message\n        p = subprocess.Popen([sys.executable, arv_put.__file__, tmpdir],\n                             stdout=subprocess.PIPE,\n                             stderr=subprocess.PIPE,\n                             env=self.ENVIRON)\n        (_, err) = p.communicate()\n        self.assertRegex(\n            err.decode(),\n            r'WARNING: Uploaded file \\'.*barfile.txt\\' access token expired, will re-upload it from scratch')\n        self.assertEqual(p.returncode, 0)\n        # Confirm that the resulting cache is different from the last run.\n        with open(cache_filepath, 'r') as c2:\n            new_cache = json.load(c2)\n        self.assertNotEqual(cache['manifest'], new_cache['manifest'])\n\n    def test_put_collection_with_later_update(self):\n        tmpdir = self.make_tmpdir()\n        with open(os.path.join(tmpdir, 'file1'), 'w') as f:\n            f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')\n        col = self.run_and_find_collection(\"\", ['--no-progress', tmpdir])\n        self.assertNotEqual(None, col['uuid'])\n        # Add a new file to the directory\n        with open(os.path.join(tmpdir, 'file2'), 'w') as f:\n            f.write('The quick brown fox jumped over the lazy dog')\n        updated_col = self.run_and_find_collection(\"\", ['--no-progress', '--update-collection', col['uuid'], tmpdir])\n        self.assertEqual(col['uuid'], updated_col['uuid'])\n        # Get the manifest and check that the new file is being included\n        c = arv_put.api_client.collections().get(uuid=updated_col['uuid']).execute()\n        self.assertRegex(c['manifest_text'], r'^\\..* .*:44:file2\\n')\n\n    def test_put_collection_with_utc_expiring_datetime(self):\n        tmpdir = self.make_tmpdir()\n        trash_at = (datetime.datetime.utcnow() + datetime.timedelta(days=90)).strftime('%Y%m%dT%H%MZ')\n        with open(os.path.join(tmpdir, 'file1'), 'w') as f:\n            f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')\n        col = self.run_and_find_collection(\n            \"\",\n            ['--no-progress', '--trash-at', trash_at, tmpdir])\n        self.assertNotEqual(None, col['uuid'])\n        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()\n        self.assertEqual(ciso8601.parse_datetime(trash_at),\n            ciso8601.parse_datetime(c['trash_at']))\n\n    def test_put_collection_with_timezone_aware_expiring_datetime(self):\n        tmpdir = self.make_tmpdir()\n        trash_at = (datetime.datetime.utcnow() + datetime.timedelta(days=90)).strftime('%Y%m%dT%H%M-0300')\n        with open(os.path.join(tmpdir, 'file1'), 'w') as f:\n            f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')\n        col = self.run_and_find_collection(\n            \"\",\n            ['--no-progress', '--trash-at', trash_at, tmpdir])\n        self.assertNotEqual(None, col['uuid'])\n        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()\n        self.assertEqual(\n            ciso8601.parse_datetime(trash_at).replace(tzinfo=None) + datetime.timedelta(hours=3),\n            ciso8601.parse_datetime(c['trash_at']).replace(tzinfo=None))\n\n    def test_put_collection_with_timezone_naive_expiring_datetime(self):\n        tmpdir = self.make_tmpdir()\n        trash_at = (datetime.datetime.utcnow() + datetime.timedelta(days=90)).strftime('%Y%m%dT%H%M')\n        with open(os.path.join(tmpdir, 'file1'), 'w') as f:\n            f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')\n        col = self.run_and_find_collection(\n            \"\",\n            ['--no-progress', '--trash-at', trash_at, tmpdir])\n        self.assertNotEqual(None, col['uuid'])\n        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()\n        if time.daylight:\n            offset = datetime.timedelta(seconds=time.altzone)\n        else:\n            offset = datetime.timedelta(seconds=time.timezone)\n        self.assertEqual(\n            ciso8601.parse_datetime(trash_at) + offset,\n            ciso8601.parse_datetime(c['trash_at']).replace(tzinfo=None))\n\n    def test_put_collection_with_expiring_date_only(self):\n        tmpdir = self.make_tmpdir()\n        trash_at = '2140-01-01'\n        end_of_day = datetime.timedelta(hours=23, minutes=59, seconds=59)\n        with open(os.path.join(tmpdir, 'file1'), 'w') as f:\n            f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')\n        col = self.run_and_find_collection(\n            \"\",\n            ['--no-progress', '--trash-at', trash_at, tmpdir])\n        self.assertNotEqual(None, col['uuid'])\n        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()\n        if time.daylight:\n            offset = datetime.timedelta(seconds=time.altzone)\n        else:\n            offset = datetime.timedelta(seconds=time.timezone)\n        self.assertEqual(\n            ciso8601.parse_datetime(trash_at) + end_of_day + offset,\n            ciso8601.parse_datetime(c['trash_at']).replace(tzinfo=None))\n\n    def test_put_collection_with_invalid_absolute_expiring_datetimes(self):\n        cases = ['2100', '210010','2100-10', '2100-Oct']\n        tmpdir = self.make_tmpdir()\n        with open(os.path.join(tmpdir, 'file1'), 'w') as f:\n            f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')\n        for test_datetime in cases:\n            with self.assertRaises(AssertionError):\n                self.run_and_find_collection(\n                    \"\",\n                    ['--no-progress', '--trash-at', test_datetime, tmpdir])\n\n    def test_put_collection_with_relative_expiring_datetime(self):\n        expire_after = 7\n        dt_before = datetime.datetime.utcnow() + datetime.timedelta(days=expire_after)\n        tmpdir = self.make_tmpdir()\n        with open(os.path.join(tmpdir, 'file1'), 'w') as f:\n            f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')\n        col = self.run_and_find_collection(\n            \"\",\n            ['--no-progress', '--trash-after', str(expire_after), tmpdir])\n        self.assertNotEqual(None, col['uuid'])\n        dt_after = datetime.datetime.utcnow() + datetime.timedelta(days=expire_after)\n        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()\n        trash_at = ciso8601.parse_datetime(c['trash_at']).replace(tzinfo=None)\n        self.assertTrue(dt_before < trash_at)\n        self.assertTrue(dt_after > trash_at)\n\n    def test_put_collection_with_invalid_relative_expiring_datetime(self):\n        expire_after = 0 # Must be >= 1\n        tmpdir = self.make_tmpdir()\n        with open(os.path.join(tmpdir, 'file1'), 'w') as f:\n            f.write('Relaxing in basins at the end of inlets terminates the endless tests from the box')\n        with self.assertRaises(AssertionError):\n            self.run_and_find_collection(\n                \"\",\n                ['--no-progress', '--trash-after', str(expire_after), tmpdir])\n\n    def test_upload_directory_reference_without_trailing_slash(self):\n        tmpdir1 = self.make_tmpdir()\n        tmpdir2 = self.make_tmpdir()\n        with open(os.path.join(tmpdir1, 'foo'), 'w') as f:\n            f.write('This is foo')\n        with open(os.path.join(tmpdir2, 'bar'), 'w') as f:\n            f.write('This is not foo')\n        # Upload one directory and one file\n        col = self.run_and_find_collection(\"\", ['--no-progress',\n                                                tmpdir1,\n                                                os.path.join(tmpdir2, 'bar')])\n        self.assertNotEqual(None, col['uuid'])\n        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()\n        # Check that 'foo' was written inside a subcollection\n        # OTOH, 'bar' should have been directly uploaded on the root collection\n        self.assertRegex(c['manifest_text'], r'^\\. .*:15:bar\\n\\./.+ .*:11:foo\\n')\n\n    def test_upload_directory_reference_with_trailing_slash(self):\n        tmpdir1 = self.make_tmpdir()\n        tmpdir2 = self.make_tmpdir()\n        with open(os.path.join(tmpdir1, 'foo'), 'w') as f:\n            f.write('This is foo')\n        with open(os.path.join(tmpdir2, 'bar'), 'w') as f:\n            f.write('This is not foo')\n        # Upload one directory (with trailing slash) and one file\n        col = self.run_and_find_collection(\"\", ['--no-progress',\n                                                tmpdir1 + os.sep,\n                                                os.path.join(tmpdir2, 'bar')])\n        self.assertNotEqual(None, col['uuid'])\n        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()\n        # Check that 'foo' and 'bar' were written at the same level\n        self.assertRegex(c['manifest_text'], r'^\\. .*:15:bar .*:11:foo\\n')\n\n    def test_put_collection_with_high_redundancy(self):\n        # Write empty data: we're not testing CollectionWriter, just\n        # making sure collections.create tells the API server what our\n        # desired replication level is.\n        collection = self.run_and_find_collection(\"\", ['--replication', '4'])\n        self.assertEqual(4, collection['replication_desired'])\n\n    def test_put_collection_with_default_redundancy(self):\n        collection = self.run_and_find_collection(\"\")\n        self.assertEqual(None, collection['replication_desired'])\n\n    def test_put_collection_with_unnamed_project_link(self):\n        link = self.run_and_find_collection(\n            \"Test unnamed collection\",\n            ['--portable-data-hash', '--project-uuid', self.PROJECT_UUID])\n        username = pwd.getpwuid(os.getuid()).pw_name\n        self.assertRegex(\n            link['name'],\n            r'^Saved at .* by {}@'.format(re.escape(username)))\n\n    def test_put_collection_with_name_and_no_project(self):\n        link_name = 'Test Collection Link in home project'\n        collection = self.run_and_find_collection(\n            \"Test named collection in home project\",\n            ['--portable-data-hash', '--name', link_name])\n        self.assertEqual(link_name, collection['name'])\n        my_user_uuid = self.current_user()['uuid']\n        self.assertEqual(my_user_uuid, collection['owner_uuid'])\n\n    def test_put_collection_with_named_project_link(self):\n        link_name = 'Test auto Collection Link'\n        collection = self.run_and_find_collection(\"Test named collection\",\n                                      ['--portable-data-hash',\n                                       '--name', link_name,\n                                       '--project-uuid', self.PROJECT_UUID])\n        self.assertEqual(link_name, collection['name'])\n\n    def test_put_collection_with_storage_classes_specified(self):\n        collection = self.run_and_find_collection(\"\", ['--storage-classes', 'hot'])\n        self.assertEqual(len(collection['storage_classes_desired']), 1)\n        self.assertEqual(collection['storage_classes_desired'][0], 'hot')\n\n    def test_put_collection_with_multiple_storage_classes_specified(self):\n        collection = self.run_and_find_collection(\"\", ['--storage-classes', ' foo, bar  ,baz'])\n        self.assertEqual(len(collection['storage_classes_desired']), 3)\n        self.assertEqual(collection['storage_classes_desired'], ['foo', 'bar', 'baz'])\n\n    def test_put_collection_without_storage_classes_specified(self):\n        collection = self.run_and_find_collection(\"\")\n        self.assertEqual(len(collection['storage_classes_desired']), 1)\n        self.assertEqual(collection['storage_classes_desired'][0], 'default')\n\n    def test_put_collection_with_duplicate_and_malformed_storage_classes_specified(self):\n        collection = self.run_and_find_collection(\"\", ['--storage-classes', ' foo, bar  ,baz,,  bar, foo, , ,'])\n        self.assertEqual(len(collection['storage_classes_desired']), 3)\n        self.assertEqual(collection['storage_classes_desired'], ['foo', 'bar', 'baz'])\n\n    def test_exclude_filename_pattern(self):\n        tmpdir = self.make_tmpdir()\n        tmpsubdir = os.path.join(tmpdir, 'subdir')\n        os.mkdir(tmpsubdir)\n        for fname in ['file1', 'file2', 'file3']:\n            with open(os.path.join(tmpdir, \"%s.txt\" % fname), 'w') as f:\n                f.write(\"This is %s\" % fname)\n            with open(os.path.join(tmpsubdir, \"%s.txt\" % fname), 'w') as f:\n                f.write(\"This is %s\" % fname)\n        col = self.run_and_find_collection(\"\", ['--no-progress',\n                                                '--exclude', '*2.txt',\n                                                '--exclude', 'file3.*',\n                                                 tmpdir])\n        self.assertNotEqual(None, col['uuid'])\n        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()\n        # None of the file2.txt & file3.txt should have been uploaded\n        self.assertRegex(c['manifest_text'], r'^.*:file1.txt')\n        self.assertNotRegex(c['manifest_text'], r'^.*:file2.txt')\n        self.assertNotRegex(c['manifest_text'], r'^.*:file3.txt')\n\n    def test_exclude_filepath_pattern(self):\n        tmpdir = self.make_tmpdir()\n        tmpsubdir = os.path.join(tmpdir, 'subdir')\n        os.mkdir(tmpsubdir)\n        for fname in ['file1', 'file2', 'file3']:\n            with open(os.path.join(tmpdir, \"%s.txt\" % fname), 'w') as f:\n                f.write(\"This is %s\" % fname)\n            with open(os.path.join(tmpsubdir, \"%s.txt\" % fname), 'w') as f:\n                f.write(\"This is %s\" % fname)\n        col = self.run_and_find_collection(\"\", ['--no-progress',\n                                                '--exclude', 'subdir/*2.txt',\n                                                '--exclude', './file1.*',\n                                                 tmpdir])\n        self.assertNotEqual(None, col['uuid'])\n        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()\n        # Only tmpdir/file1.txt & tmpdir/subdir/file2.txt should have been excluded\n        self.assertNotRegex(c['manifest_text'],\n                            r'^\\./%s.*:file1.txt' % os.path.basename(tmpdir))\n        self.assertNotRegex(c['manifest_text'],\n                            r'^\\./%s/subdir.*:file2.txt' % os.path.basename(tmpdir))\n        self.assertRegex(c['manifest_text'],\n                         r'^\\./%s.*:file2.txt' % os.path.basename(tmpdir))\n        self.assertRegex(c['manifest_text'], r'^.*:file3.txt')\n\n    def test_unicode_on_filename(self):\n        tmpdir = self.make_tmpdir()\n        fname = u\"i❤arvados.txt\"\n        with open(os.path.join(tmpdir, fname), 'w') as f:\n            f.write(\"This is a unicode named file\")\n        col = self.run_and_find_collection(\"\", ['--no-progress', tmpdir])\n        self.assertNotEqual(None, col['uuid'])\n        c = arv_put.api_client.collections().get(uuid=col['uuid']).execute()\n        self.assertTrue(fname in c['manifest_text'], u\"{} does not include {}\".format(c['manifest_text'], fname))\n\n    def test_silent_mode_no_errors(self):\n        self.authorize_with('active')\n        tmpdir = self.make_tmpdir()\n        with open(os.path.join(tmpdir, 'test.txt'), 'w') as f:\n            f.write('hello world')\n        pipe = subprocess.Popen(\n            [sys.executable, arv_put.__file__] + ['--silent', tmpdir],\n            stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n            stderr=subprocess.PIPE, env=self.ENVIRON)\n        stdout, stderr = pipe.communicate()\n        # No console output should occur on normal operations\n        self.assertNotRegex(stderr.decode(), r'.+')\n        self.assertNotRegex(stdout.decode(), r'.+')\n\n    def test_silent_mode_does_not_avoid_error_messages(self):\n        self.authorize_with('active')\n        pipe = subprocess.Popen(\n            [sys.executable, arv_put.__file__] + ['--silent',\n                                                  '/path/not/existant'],\n            stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n            stderr=subprocess.PIPE, env=self.ENVIRON)\n        stdout, stderr = pipe.communicate()\n        # Error message should be displayed when errors happen\n        self.assertRegex(stderr.decode(), r'.*ERROR:.*')\n        self.assertNotRegex(stdout.decode(), r'.+')\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "sdk/python/tests/test_arv_ws.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport sys\nimport tempfile\nimport unittest\n\nimport arvados.errors as arv_error\nimport arvados.commands.ws as arv_ws\nfrom . import arvados_testutil as tutil\n\nclass ArvWsTestCase(unittest.TestCase, tutil.VersionChecker):\n    def run_ws(self, args):\n        return arv_ws.main(args)\n\n    def test_unsupported_arg(self):\n        with self.assertRaises(SystemExit):\n            self.run_ws(['-x=unknown'])\n\n    def test_version_argument(self):\n        with tutil.redirected_streams(\n                stdout=tutil.StringIO, stderr=tutil.StringIO) as (out, err):\n            with self.assertRaises(SystemExit):\n                self.run_ws(['--version'])\n        self.assertVersionOutput(out, err)\n"
  },
  {
    "path": "sdk/python/tests/test_arvcli.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\nimport re\nimport io\nimport json\nfrom unittest import mock\nimport ciso8601\nimport pytest\nfrom ruamel.yaml import YAML\nyaml = YAML(typ=\"safe\", pure=True)\n\nimport arvados\nfrom arvados.commands import arvcli\nfrom . import run_test_server\n\n\nCOLLECTION_UUID_PATTERN = re.compile(r\"^[0-9a-z]{5}-4zz18-[0-9a-z]{15}$\")\n\n\ndef test_global_option_help_followed_by_subcommand():\n    \"\"\"When called as arvcli.py -h [subcommand], the subcommand is ignored,\n    the -h option is consumed by the parser, and the help message is printed,\n    followed by normal exit.\n    \"\"\"\n    parser = arvcli.ArvCLIArgumentParser({})\n    with pytest.raises(SystemExit) as exit_status:\n        parser.parse_known_args([\"-h\", \"foo\"])\n    assert exit_status.value.code == 0\n\n\ndef test_no_subcommand():\n    parser = arvcli.ArvCLIArgumentParser({})\n    with pytest.raises(SystemExit) as exit_status:\n        parser.parse_known_args([\"-s\"])\n    assert exit_status.value.code == 2\n\n\ndef test_invalid_subcommand():\n    parser = arvcli.ArvCLIArgumentParser({})\n    with pytest.raises(SystemExit) as exit_status:\n        parser.parse_known_args([\"foo\"])\n    assert exit_status.value.code == 2\n\n\n# Pass-through (sub)commands and their corresponding 'entry point' functions.\nPASSTHROUGH_CMD_FUNCS = [\n    (\"keep ls\", \"arvados.commands.ls.main\"),\n    (\"keep get\", \"arvados.commands.get.main\"),\n    (\"keep put\", \"arvados.commands.put.main\"),\n    (\"keep docker\", \"arvados.commands.keepdocker.main\"),\n    (\"ws\", \"arvados.commands.ws.main\"),\n    (\"copy\", \"arvados.commands.arv_copy.main\")\n]\n\n\n@pytest.mark.parametrize(\"subcommand,main_fcn_name\", PASSTHROUGH_CMD_FUNCS)\ndef test_passthrough_commands_args(subcommand, main_fcn_name):\n    \"\"\"Test that arbitrary argv ('[...] arvcli.py subcommand --foo bar') to\n    arvcli.py gets passed to the underlying subcommand; i.e. the passed-through\n    subcommand's entry function gets called with [\"--foo\", \"bar\"].\n    \"\"\"\n    with mock.patch(main_fcn_name) as s:\n        with pytest.raises(SystemExit):\n            arvcli.dispatch([*subcommand.split(), \"--foo\", \"bar\"])\n        s.assert_called_with([\"--foo\", \"bar\"])\n\n\n@pytest.mark.parametrize(\"subcommand,main_fcn_name\", PASSTHROUGH_CMD_FUNCS)\ndef test_passthrough_commands_help(subcommand, main_fcn_name):\n    \"\"\"Test that the -h flag to a subcommand (as opposed to the main command)\n    gets passed to the underlying script rather than consumed by the main arg\n    parser.\n    \"\"\"\n    with mock.patch(main_fcn_name) as s:\n        with pytest.raises(SystemExit):\n            arvcli.dispatch([*subcommand.split(), \"-h\"])\n        s.assert_called_with([\"-h\"])\n\n\n@pytest.mark.parametrize(\"plural,singular\", (\n    (\"container_requests\", \"container_request\"),\n    (\"vocabularies\", \"vocabulary\"),\n    (\"sys\", \"sys\"),\n    (\"Foos\", \"Foo\"),  # generic nonce word that ends in \"-s\"\n    (\"foo\", \"foo\")  # already singular in form\n))\ndef test_singularizer(plural, singular):\n    assert arvcli._ArgUtil.singularize_resource(plural) == singular\n\n\ndef test_cli_parser_has_singular_plural_mapping():\n    api_client = arvados.api(\"v1\")\n    cmd_parser = arvcli.ArvCLIArgumentParser(\n        api_client._resourceDesc[\"resources\"]\n    )\n    for resource in cmd_parser.resource_dictionary.keys():\n        k = arvcli._ArgUtil.singularize_resource(resource)\n        assert cmd_parser._subcommand_to_resource[k] == resource\n    assert cmd_parser._subcommand_to_resource[\"sy\"] == cmd_parser._subcommand_to_resource[\"sys\"]\n\n\n@pytest.mark.parametrize(\"key,argument_name\", (\n    (\"ensure_unique_name\", \"--ensure-unique-name\"),\n    (\"filters\", \"--filters\"),\n))\ndef test_parameter_key_to_argument_name(key, argument_name):\n    assert arvcli._ArgUtil.parameter_key_to_argument_name(key) == argument_name\n\n\ndef test_get_method_options():\n    # Largely based on arvados.container_requests.create, but with a fictitious\n    # parameter entry for integer type, another one for required=True, and\n    # also with parameter descriptions replaced by brief strings.\n    input_method_schema = {\n        \"parameters\": {\n            \"select\": {\n                \"type\": \"array\",\n                \"description\": \"help-select.\",\n                \"required\": False,\n                \"location\": \"query\"\n            },\n            \"ensure_unique_name\": {\n                \"type\": \"boolean\",\n                \"description\": \"help-ensure-unique-name.\",\n                \"location\": \"query\",\n                \"required\": False,\n                \"default\": \"false\"\n            },\n            \"cluster_id\": {\n                \"type\": \"string\",\n                \"description\": \"help-cluster-id.\",\n                \"location\": \"query\",\n                \"required\": False\n            },\n            # Fictitious parameters\n            \"uuid\": {\n                \"type\": \"string\",\n                \"description\": \"help-uuid.\",\n                \"required\": True,\n                \"location\": \"path\"\n\n            },\n            \"limit\": {\n                \"type\": \"integer\",\n                \"required\": False,\n                \"default\": \"100\",\n                \"description\": \"help-limit.\",\n                \"location\": \"query\"\n            },\n            \"filters\": {\n                \"type\": \"array\",\n                \"required\": False,\n                \"description\": \"help-filters.\",\n                \"location\": \"query\"\n            }\n        },\n        \"request\": {\n            \"required\": True,\n            \"properties\": {\n                \"container_request\": {\n                    \"$ref\": \"ContainerRequest\"\n                }\n            }\n        }\n    }\n    output = [\n        (\n            (\"-s\", \"--select\"),\n            {\n                \"type\": arvcli._ArgTypes.json_array,\n                \"metavar\": \"JSON_ARRAY\",\n                \"help\": \"help-select.\",\n                \"required\": False\n            }\n        ),\n        (\n            (\"--no-ensure-unique-name\",),\n            {\n                \"dest\": \"ensure_unique_name\",\n                \"action\": \"store_false\",\n                \"default\": False,\n                \"required\": False\n            }\n        ),\n        (\n            (\"-e\", \"--ensure-unique-name\"),\n            {\n                \"dest\": \"ensure_unique_name\",\n                \"action\": \"store_true\",\n                \"help\": \"help-ensure-unique-name.\",\n                \"required\": False,\n                \"default\": False\n            }\n        ),\n        (\n            (\"-c\", \"--cluster-id\"),\n            {\n                \"type\": str,\n                \"metavar\": \"STR\",\n                \"help\": \"help-cluster-id.\",\n                \"required\": False\n            }\n        ),\n        # Fictitious parameters\n        (\n            (\"-u\", \"--uuid\"),\n            {\n                \"type\": str,\n                \"metavar\": \"STR\",\n                \"help\": \"help-uuid. This option must be specified.\",\n                \"required\": True,\n            }\n        ),\n        (\n            (\"-l\", \"--limit\"),\n            {\n                \"type\": int,\n                \"metavar\": \"N\",\n                \"help\": \"help-limit. Default: 100.\",\n                \"required\": False\n            }\n        ),\n        (\n            # NOTE: IRL, --filters parameter doesn't appear for methods that\n            # have the request parameter. This is purely used for testing\n            # schema-to-argparser conversion.\n            (\"-f\", \"--filters\"),\n            {\n                \"type\": arvcli._ArgTypes.json_filter,\n                \"metavar\": \"{JSON,FILE,-}\",\n                \"help\": \"help-filters. This can be a filename from which to read JSON (use '-' to read from stdin).\",\n                \"required\": False\n            }\n        ),\n        # Request parameter\n        (\n            (\"-o\", \"--container-request\"),\n            {\n                \"dest\": \"body\",\n                \"type\": arvcli._ArgTypes.json_body,\n                \"metavar\": \"{JSON,FILE,-}\",\n                \"help\": \"Either a string representing container_request as JSON or a filename from which to read container_request JSON (use '-' to read from stdin). This option must be specified.\",\n                \"required\": True\n            }\n        )\n    ]\n    assert list(\n        arvcli._ArgUtil.get_method_options(input_method_schema)\n    ) == output\n\n\nclass TestArgTypes:\n    \"\"\"Test the private type converter-validators under the arvcli._ArgTypes\n    namespace.\n    \"\"\"\n    def test_json_array_makes_list(self):\n        assert arvcli._ArgTypes.json_array(\"[]\") == []\n\n    def test_json_object_makes_dict(self):\n        assert arvcli._ArgTypes.json_object(\"{}\") == {}\n\n    @pytest.mark.parametrize(\"invalid_input\", (\"{}\", '\"\"', \"0\", \"null\"))\n    def test_json_array_rejects_non_array(self, invalid_input):\n        with pytest.raises(argparse.ArgumentTypeError):\n            arvcli._ArgTypes.json_array(invalid_input)\n\n    @pytest.mark.parametrize(\"invalid_input\", (\"[]\", '\"\"', \"0\", \"null\"))\n    def test_json_object_rejects_non_object(self, invalid_input):\n        with pytest.raises(argparse.ArgumentTypeError):\n            arvcli._ArgTypes.json_object(invalid_input)\n\n\n@pytest.fixture\ndef run_arvcli(capsys):\n\n    def the_run(cli_args):\n        with pytest.raises(SystemExit) as exc:\n            arvcli.dispatch(cli_args)\n        captured = capsys.readouterr()\n        return exc.value.code, captured.out, captured.err\n\n    return the_run\n\n\n@pytest.mark.parametrize(\n    \"invalid_value\",\n    (\"foo\", '\"foo\"', '{\"foo\": null}', '1.0', 'false', 'true', 'null')\n)\ndef test_cli_can_intercept_invalid_json_subtype(invalid_value, run_arvcli):\n    # --scopes takes JSON array\n    cli = [\"api_client_authorization\", \"create_system_auth\", \"--scopes\"]\n    cli.append(invalid_value)\n    exit_code, out, err = run_arvcli(cli)\n    assert exit_code == 2\n    assert \"not valid JSON array\" in err\n\n\nclass TestSameFlagInTwoPlaces:\n    def test_s_flag(self, run_arvcli):\n        # As \"global\" parameter, \"-s\" is for \"--short\" (display UUID[s] in\n        # output only).  As parameter to the resource method, the second \"-s\"\n        # is for \"--select\", which limits the output attributes.\n        # For a counterexample, see the function\n        # test_uuid_output_with_list_items_having_no_uuid()\n        exit_code, out, err = run_arvcli(\n            [\"-s\", \"collection\", \"list\", \"-s\", '[\"uuid\"]']\n        )\n        assert exit_code == 0\n        lines = out.splitlines()\n        assert any(lines)\n        assert all(COLLECTION_UUID_PATTERN.match(line) for line in lines)\n\n    def test_f_flag(self, run_arvcli):\n        # As \"global\" parameter, \"-f\" is for \"--format\", which takes one arg\n        # value. As parameter to the resource method, \"-f\" is for \"--filters\"\n        active_user = run_test_server.fixture(\"users\")[\"active\"][\"uuid\"]\n        exit_code, out, err = run_arvcli([\n            \"-f\", \"uuid\",\n            \"collection\", \"list\",\n            \"-f\", json.dumps([[\"owner_uuid\", \"=\", active_user]])\n        ])\n        assert exit_code == 0\n        assert not err\n        lines = out.splitlines()\n        assert any(lines)\n        assert all(COLLECTION_UUID_PATTERN.match(line) for line in lines)\n\n\nclass TestCommonMethods:\n    \"\"\"Basic tests that sample the common methods -- get, list, create, update,\n    delete -- with different resources and global CLI options. Basic sanity\n    checks are performed from the results of these calls.\n    \"\"\"\n\n    def test_container_request_get_yaml(self, run_arvcli):\n        fix = run_test_server.fixture(\"container_requests\")[\"queued\"]\n        exit_code, out, err = run_arvcli([\n            \"--format\", \"yaml\",\n            \"container_request\", \"get\",\n            \"--uuid\", fix[\"uuid\"]\n        ])\n        assert exit_code == 0\n        result = yaml.load(out)\n        attrs = (\n            \"name\", \"container_image\", \"owner_uuid\", \"command\", \"output_path\"\n        )\n        for attr in attrs:\n            assert result[attr] == fix[attr]\n\n    def test_group_list_format_json_common_args(self, run_arvcli):\n        exit_code, out, err = run_arvcli([\n            \"--format\", \"json\",\n            \"group\", \"list\",\n            \"--offset\", \"1\",\n            \"--limit\", \"10\",\n            \"--filters\", json.dumps([[\"group_class\", \"=\", \"project\"]]),\n            \"--count=none\",\n            \"--order\", '[\"modified_at desc\"]',\n            \"--select\", '[\"uuid\", \"name\", \"modified_at\"]'\n        ])\n        assert exit_code == 0\n        result = json.loads(out)\n        assert result[\"kind\"] == \"arvados#groupList\"\n\n    @pytest.mark.usefixtures(\"reset_test_server_db\")\n    def test_link_create_format_uuid(self, run_arvcli):\n        me = run_test_server.fixture(\"users\")[\"active\"]\n        project = run_test_server.fixture(\"groups\")[\"private\"]\n        exit_code, out, err = run_arvcli([\n            \"--format\", \"uuid\",\n            \"link\", \"create\",\n            \"--link\", json.dumps({\n                \"link_class\": \"star\",\n                \"owner_uuid\": me[\"uuid\"],\n                \"tail_uuid\": me[\"uuid\"],\n                \"head_uuid\": project[\"uuid\"]\n            })\n        ])\n        assert exit_code == 0\n        assert re.match(r\"^[0-9a-z]{5}-o0j2j-[0-9a-z]{15}$\", out)\n\n    @pytest.mark.usefixtures(\"reset_test_server_db\")\n    def test_user_update(self, run_arvcli):\n        me = run_test_server.fixture(\"users\")[\"active\"]\n        my_email = \"no-reply@test.example\"\n        exit_code, out, err = run_arvcli([\n            \"user\", \"update\",\n            \"--uuid\", me[\"uuid\"],\n            \"--user\", json.dumps({\"email\": my_email})\n        ])\n        assert exit_code == 0\n        result = json.loads(out)\n        assert result[\"uuid\"] == me[\"uuid\"]\n        assert result[\"email\"] == my_email\n\n    @pytest.mark.usefixtures(\"reset_test_server_db\")\n    def test_authorized_key_delete(self, run_arvcli):\n        key = run_test_server.fixture(\"authorized_keys\")[\"active\"]\n        exit_code, out, err = run_arvcli([\n            \"authorized_key\", \"delete\",\n            \"--uuid\", key[\"uuid\"]\n        ])\n        assert exit_code == 0\n        # Same key is gone.\n        exit_code, out, err = run_arvcli([\n            \"authorized_key\", \"get\",\n            \"--uuid\", key[\"uuid\"]\n        ])\n        assert exit_code == 1\n        assert \"404 not found\" in err.lower()\n\n\ndef _no_extra_spaces_at_end(text: str) -> bool:\n    # Text ends in newline but without extraneous whitespace characters.\n    return re.search(r\"(\\A|\\S)\\n\\Z\", text) is not None\n\n\nclass TestRequestBodyWithCollectionCreateCMD:\n    md5_empty = \"d41d8cd98f00b204e9800998ecf8427e\"\n    collection_test_name = \"empty-test\"\n    manifest_data = {\n        \"name\": collection_test_name,\n        \"manifest_text\": f\". {md5_empty}+0 0:0:empty\\n\"\n    }\n    cli = [\"collection\", \"create\", \"--collection\"]\n\n    def setup_method(self):\n        run_test_server.reset()\n\n    @classmethod\n    def teardown_class(self):\n        run_test_server.reset()\n\n    def test_request_body_missing(self, run_arvcli):\n        exit_code, out, err = run_arvcli(self.cli)\n        assert exit_code == 2\n        assert err\n        assert not out\n\n    @mock.patch(\"sys.stdin\", new_callable=io.StringIO)\n    def test_request_body_stdin_valid_json(self, mock_stdin, run_arvcli):\n        json.dump(self.manifest_data, mock_stdin)\n        mock_stdin.seek(0)\n        exit_code, out, err = run_arvcli(self.cli + [\"-\"])\n        assert exit_code == 0\n        assert not err\n        actual = json.loads(out)\n        assert actual[\"kind\"] == \"arvados#collection\"\n        assert actual[\"name\"] == self.manifest_data[\"name\"]\n        assert COLLECTION_UUID_PATTERN.match(actual[\"uuid\"])\n        assert _no_extra_spaces_at_end(out)\n\n    def test_request_body_file_valid_json_out_yaml(self, tmp_path, run_arvcli):\n        f = tmp_path / \"body.json\"\n        f.write_text(json.dumps(self.manifest_data))\n        exit_code, out, err = run_arvcli(\n            [\"--format\", \"yaml\"] + self.cli + [f\"{f!s}\"]\n        )\n        assert exit_code == 0\n        assert not err\n        actual = yaml.load(out)\n        assert actual[\"kind\"] == \"arvados#collection\"\n        assert actual[\"name\"] == self.manifest_data[\"name\"]\n        assert COLLECTION_UUID_PATTERN.match(actual[\"uuid\"])\n        assert _no_extra_spaces_at_end(out)\n\n    def test_request_body_file_valid_json_out_short(self, tmp_path, run_arvcli):\n        f = tmp_path / \"body.json\"\n        f.write_text(json.dumps(self.manifest_data))\n        exit_code, out, err = run_arvcli([\"-s\"] + self.cli + [f\"{f!s}\"])\n        assert exit_code == 0\n        assert not err\n        assert _no_extra_spaces_at_end(out)\n        assert COLLECTION_UUID_PATTERN.match(out.rstrip())\n\n    @mock.patch(\"sys.stdin\", new_callable=io.StringIO)\n    def test_replace_files(self, mock_stdin, run_arvcli):\n        json.dump(self.manifest_data, mock_stdin)\n        mock_stdin.seek(0)\n        replace_files = json.dumps({\n            \"/foo/bar.txt\": \"manifest_text/empty\"\n        })\n        exit_code, out, err = run_arvcli(\n            self.cli + [\"-\", \"--replace-files\", replace_files]\n        )\n        assert exit_code == 0\n        assert not err\n        actual = json.loads(out)\n        assert re.match(\n            fr\"^\\./foo {self.md5_empty}\\+0\\+A[0-9a-f]{{40}}@[0-9a-f]{{8}} 0:0:bar\\.txt\\n$\",\n            actual[\"manifest_text\"]\n        )\n\n    def test_invalid_request(self, tmp_path, run_arvcli):\n        f = tmp_path / \"body.json\"\n        f.write_text(json.dumps(self.manifest_data))\n        # request will be invalid because replace_files does not reference\n        # manifest data in body.\n        replace_files = json.dumps({\"/foo\": \"current/bar\"})\n        exit_code, out, err = run_arvcli([\n            \"collection\",\n            \"create\",\n            \"--collection\",\n            f\"{f!s}\",\n            \"--replace-files\",\n            replace_files\n        ])\n        assert exit_code == 1\n        assert not out\n        assert re.search(r\"\\breq-[0-9a-z]{20}\\b\", err)\n        assert _no_extra_spaces_at_end(err)\n\n\ndef _parse_simple_stream(manifest: str) -> dict[str, str]:\n    \"\"\"Extract the digest, size, and path from a simple, one-file stream from\n    a manifest text, following the format used in the API service test fixture\n    data (see services/api/test/fixtures/collections.yml; e.g.\n    `manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"`).\n    \"\"\"\n    stream_pattern = re.compile(\n        r\"(?a)\\A(\\.(/[^/\\s]+)*)\"  # stream-name\n        r\" (?P<digest>[0-9a-f]{32})\\+(?P<size>[0-9]+)\"  # locator as digest+size\n        r\" [0-9]+:(?P=size):(?P<filename>[^\\s]+)\\n\\Z\"\n    )\n    m = stream_pattern.match(manifest)\n    return m.groupdict() if m is not None else {}\n\n\n@pytest.mark.usefixtures(\"reset_test_server_db\")\ndef test_collection_update_with_replace_files(run_arvcli):\n    foo_uuid = run_test_server.fixture(\"collections\")[\"foo_file\"][\"uuid\"]\n    bar_pdh = run_test_server.fixture(\"collections\")[\"bar_file\"][\"portable_data_hash\"]\n    bar_manifest = run_test_server.fixture(\"collections\")[\"bar_file\"][\"manifest_text\"]\n    replace = json.dumps({\"/bar\": f\"{bar_pdh}/bar\"})\n\n    exit_code, out, err = run_arvcli([\n        \"collection\", \"update\",\n        \"--uuid\", foo_uuid,\n        \"--collection\", \"{}\",\n        \"--replace-files\", replace\n    ])\n    assert exit_code == 0\n    result = json.loads(out)\n    # Quick and dirty check that the file \"bar\" is now in the manifest.\n    bar_elements = _parse_simple_stream(bar_manifest)\n    bar_locator_part = f\"{bar_elements['digest']}+{bar_elements['size']}\"\n    assert bar_locator_part in result[\"manifest_text\"]\n    bar_file_part = f\":{bar_elements['size']}:{bar_elements['filename']}\"\n    assert bar_file_part in result[\"manifest_text\"]\n\n\ndef test_uuid_output_with_list_items_having_no_uuid(run_arvcli):\n    exit_code, out, err = run_arvcli([\n        \"--format\", \"uuid\", \"collection\", \"list\", \"--select\", '[\"name\"]',\n    ])\n    assert exit_code == 1\n    assert not out\n    assert \"did not include a uuid\" in err\n\n\nclass TestDefaultValuesForAPICalls:\n    resources = arvados.api(\"v1\")._resourceDesc[\"resources\"]\n\n    @classmethod\n    def get_default(cls, resource, method, parameter):\n        default = cls.resources[resource][\"methods\"][method][\"parameters\"][parameter].get(\"default\", \"null\")\n        return json.loads(default)\n\n    def test_no_override_default_parameter_value(self, run_arvcli):\n        exit_code, out, err = run_arvcli([\"user\", \"list\"])\n        assert exit_code == 0\n        assert not err\n        result = json.loads(out)\n        assert result[\"limit\"] == self.get_default(\"users\", \"list\", \"limit\")\n\n    def test_override_default_parameter_value(self, run_arvcli):\n        limit = 1\n        exit_code, out, err = run_arvcli(\n            [\"user\", \"list\", \"--limit\", str(limit)]\n        )\n        assert exit_code == 0\n        assert not err\n        result = json.loads(out)\n        assert result[\"limit\"] == limit\n\n\n# The \"config get\" command doesn't take any parameter.\nclass TestConfigGet:\n    def test_config_get(self, run_arvcli):\n        exit_code, out, err = run_arvcli([\"config\", \"get\"])\n        assert exit_code == 0\n\n    def test_config_get_uuid(self, run_arvcli):\n        exit_code, out, err = run_arvcli([\"--format\", \"uuid\", \"config\", \"get\"])\n        assert exit_code == 1\n        assert not out\n        err = io.StringIO(err)\n        assert err.readline().rstrip() == \"Error: response did not include a uuid:\"\n        assert json.load(err)\n\n\nclass TestApiClientAuthorizationsResource:\n    users = run_test_server.fixture(\"users\")\n    auths = run_test_server.fixture(\"api_client_authorizations\")\n\n    @classmethod\n    def teardown_class(self):\n        run_test_server.reset()\n\n    def assert_same_api_auth(self, fix: dict, result: dict):\n        \"\"\"Compare an API auth fixture as loaded by run_test_server.fixture()\n        to a result returned by the API.\n        \"\"\"\n        assert fix[\"uuid\"] == result[\"uuid\"]\n        assert fix[\"api_token\"] == result[\"api_token\"]\n        # Resolve user name in fixture to owner_uuid. \"Cheat\" by looking up the\n        # users fixtures directly.\n        owner_uuid = self.users[fix[\"user\"]][\"uuid\"]\n        assert owner_uuid == result[\"owner_uuid\"]\n        # Resolve date. The date field in the fixture is timezone-naïve, so we\n        # have to coerce away the timezone information for comparability.\n        result_expires_at = ciso8601.parse_datetime_as_naive(\n            result[\"expires_at\"]\n        )\n        assert fix[\"expires_at\"] == result_expires_at\n        assert fix.get(\"scopes\", [\"all\"]) == result[\"scopes\"]\n\n    def test_current(self, run_arvcli):\n        me = \"active\"\n        run_test_server.authorize_with(me)\n        fix = self.auths[me]\n\n        exit_code, out, err = run_arvcli(\n            [\"api_client_authorization\", \"current\"]\n        )\n\n        assert exit_code == 0\n        result = json.loads(out)\n        self.assert_same_api_auth(fix, result)\n\n    # TODO: investigate possible authorization issue with testing\n    # the create_system_auth method.\n"
  },
  {
    "path": "sdk/python/tests/test_arvfile.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport datetime\nimport os\nimport time\nimport unittest\n\nfrom unittest import mock\n\nimport arvados\n\nfrom arvados._internal.streams import Range\nfrom arvados.arvfile import ArvadosFile, ArvadosFileReader\nfrom arvados.collection import Collection\nfrom arvados.keep import KeepLocator\n\nfrom . import arvados_testutil as tutil\nfrom .test_stream import StreamFileReaderTestMixin, StreamRetryTestMixin\n\nclass ArvadosFileWriterTestCase(unittest.TestCase):\n    class MockKeep(object):\n        def __init__(self, blocks):\n            self.blocks = blocks\n            self.requests = []\n            self.num_prefetch_threads = 1\n        def get(self, locator, num_retries=0, prefetch=False):\n            self.requests.append(locator)\n            return self.blocks.get(locator)\n        def get_from_cache(self, locator):\n            self.requests.append(locator)\n            return self.blocks.get(locator)\n        def put(self, data, num_retries=None, copies=None, classes=[]):\n            pdh = tutil.str_keep_locator(data)\n            self.blocks[pdh] = bytes(data)\n            return pdh\n        def block_prefetch(self, loc):\n            self.requests.append(loc)\n\n    class MockApi(object):\n        def __init__(self, b, r):\n            self.body = b\n            self.response = r\n            self._schema = ArvadosFileWriterTestCase.MockApi.MockSchema()\n            self._rootDesc = {}\n        class MockSchema(object):\n            def __init__(self):\n                self.schemas = {'Collection': {'properties': {'replication_desired': {'type':'integer'}}}}\n        class MockCollections(object):\n            def __init__(self, b, r):\n                self.body = b\n                self.response = r\n            class Execute(object):\n                def __init__(self, r):\n                    self.response = r\n                def execute(self, num_retries=None):\n                    return self.response\n            def create(self, ensure_unique_name=False, body=None):\n                if body != self.body:\n                    raise Exception(\"Body %s does not match expectation %s\" % (body, self.body))\n                return ArvadosFileWriterTestCase.MockApi.MockCollections.Execute(self.response)\n            def update(self, uuid=None, body=None):\n                return ArvadosFileWriterTestCase.MockApi.MockCollections.Execute(self.response)\n        def collections(self):\n            return ArvadosFileWriterTestCase.MockApi.MockCollections(self.body, self.response)\n\n\n    def test_truncate(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\",\n        })\n        api = ArvadosFileWriterTestCase.MockApi({\n            \"name\": \"test_truncate\",\n            \"manifest_text\": \". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\\n\",\n            \"replication_desired\": None,\n        }, {\n            \"uuid\": \"zzzzz-4zz18-mockcollection0\",\n            \"manifest_text\": \". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\\n\",\n            \"portable_data_hash\":\"7fcd0eaac3aad4c31a6a0e756475da92+52\",\n        })\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n',\n                        api_client=api, keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(writer.size(), 10)\n            self.assertEqual(b\"0123456789\", writer.read(12))\n\n            writer.truncate(8)\n\n            # Make sure reading off the end doesn't break\n            self.assertEqual(b\"\", writer.read(12))\n\n            self.assertEqual(writer.size(), 8)\n            writer.seek(0, os.SEEK_SET)\n            self.assertEqual(b\"01234567\", writer.read(12))\n\n            self.assertIsNone(c.manifest_locator())\n            self.assertTrue(c.modified())\n            c.save_new(\"test_truncate\")\n            self.assertEqual(\"zzzzz-4zz18-mockcollection0\", c.manifest_locator())\n            self.assertFalse(c.modified())\n\n\n    def test_truncate2(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\",\n        })\n        api = ArvadosFileWriterTestCase.MockApi({\n            \"name\": \"test_truncate2\",\n            \"manifest_text\": \". 781e5e245d69b566979b86e28d23f2c7+10 7f614da9329cd3aebf59b91aadc30bf0+67108864 0:12:count.txt\\n\",\n            \"replication_desired\": None,\n        }, {\n            \"uuid\": \"zzzzz-4zz18-mockcollection0\",\n            \"manifest_text\": \". 781e5e245d69b566979b86e28d23f2c7+10 7f614da9329cd3aebf59b91aadc30bf0+67108864 0:12:count.txt\\n\",\n            \"portable_data_hash\": \"272da898abdf86ddc71994835e3155f8+95\",\n        })\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n',\n                        api_client=api, keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(writer.size(), 10)\n            self.assertEqual(b\"0123456789\", writer.read(12))\n\n            # extend file size\n            writer.truncate(12)\n\n            self.assertEqual(writer.size(), 12)\n            writer.seek(0, os.SEEK_SET)\n            self.assertEqual(b\"0123456789\\x00\\x00\", writer.read(12))\n\n            self.assertIsNone(c.manifest_locator())\n            self.assertTrue(c.modified())\n            c.save_new(\"test_truncate2\")\n            self.assertEqual(\"zzzzz-4zz18-mockcollection0\", c.manifest_locator())\n            self.assertFalse(c.modified())\n\n    def test_truncate3(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\",\n            \"a925576942e94b2ef57a066101b48876+10\": b\"abcdefghij\",\n        })\n        api = ArvadosFileWriterTestCase.MockApi({\n            \"name\": \"test_truncate\",\n            \"manifest_text\": \". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\\n\",\n            \"replication_desired\": None,\n        }, {\n            \"uuid\": \"zzzzz-4zz18-mockcollection0\",\n            \"manifest_text\": \". 781e5e245d69b566979b86e28d23f2c7+10 0:8:count.txt\\n\",\n            \"portable_data_hash\": \"7fcd0eaac3aad4c31a6a0e756475da92+52\",\n        })\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 a925576942e94b2ef57a066101b48876+10 0:20:count.txt\\n',\n                        api_client=api, keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(writer.size(), 20)\n            self.assertEqual(b\"0123456789ab\", writer.read(12))\n            self.assertEqual(12, writer.tell())\n\n            writer.truncate(8)\n\n            # Make sure reading off the end doesn't break\n            self.assertEqual(12, writer.tell())\n            self.assertEqual(b\"\", writer.read(12))\n\n            self.assertEqual(writer.size(), 8)\n            self.assertEqual(2, writer.seek(-10, os.SEEK_CUR))\n            self.assertEqual(b\"234567\", writer.read(12))\n\n            self.assertIsNone(c.manifest_locator())\n            self.assertTrue(c.modified())\n            c.save_new(\"test_truncate\")\n            self.assertEqual(\"zzzzz-4zz18-mockcollection0\", c.manifest_locator())\n            self.assertFalse(c.modified())\n\n    def test_write_to_end(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\",\n        })\n        api = ArvadosFileWriterTestCase.MockApi({\n            \"name\": \"test_append\",\n            \"manifest_text\": \". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:13:count.txt\\n\",\n            \"replication_desired\": None,\n        }, {\n            \"uuid\": \"zzzzz-4zz18-mockcollection0\",\n            \"manifest_text\": \". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:13:count.txt\\n\",\n            \"portable_data_hash\": \"c5c3af76565c8efb6a806546bcf073f3+88\",\n        })\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n',\n                             api_client=api, keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(writer.size(), 10)\n\n            self.assertEqual(5, writer.seek(5, os.SEEK_SET))\n            self.assertEqual(b\"56789\", writer.read(8))\n\n            writer.seek(10, os.SEEK_SET)\n            writer.write(\"foo\")\n            self.assertEqual(writer.size(), 13)\n\n            writer.seek(5, os.SEEK_SET)\n            self.assertEqual(b\"56789foo\", writer.read(8))\n\n            self.assertIsNone(c.manifest_locator())\n            self.assertTrue(c.modified())\n            self.assertIsNone(keep.get(\"acbd18db4cc2f85cedef654fccc4a4d8+3\"))\n\n            c.save_new(\"test_append\")\n            self.assertEqual(\"zzzzz-4zz18-mockcollection0\", c.manifest_locator())\n            self.assertFalse(c.modified())\n            self.assertEqual(b\"foo\", keep.get(\"acbd18db4cc2f85cedef654fccc4a4d8+3\"))\n\n\n    def test_append(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\",\n        })\n        for (mode, convert) in (\n                ('a+', lambda data: data.decode(encoding='utf-8')),\n                ('at+', lambda data: data.decode(encoding='utf-8')),\n                ('ab+', lambda data: data)):\n            c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n', keep_client=keep)\n            writer = c.open(\"count.txt\", mode)\n            self.assertEqual(writer.read(20), convert(b\"0123456789\"))\n\n            writer.seek(0, os.SEEK_SET)\n            writer.write(convert(b\"hello\"))\n            self.assertEqual(writer.read(), convert(b\"\"))\n            if 'b' in mode:\n                writer.seek(-5, os.SEEK_CUR)\n                self.assertEqual(writer.read(3), convert(b\"hel\"))\n                self.assertEqual(writer.read(), convert(b\"lo\"))\n            else:\n                with self.assertRaises(IOError):\n                    writer.seek(-5, os.SEEK_CUR)\n                with self.assertRaises(IOError):\n                    writer.seek(-3, os.SEEK_END)\n            writer.seek(0, os.SEEK_SET)\n            writer.read(7)\n            self.assertEqual(7, writer.tell())\n            self.assertEqual(7, writer.seek(7, os.SEEK_SET))\n\n            writer.seek(0, os.SEEK_SET)\n            self.assertEqual(writer.read(), convert(b\"0123456789hello\"))\n\n            writer.seek(0)\n            writer.write(convert(b\"world\"))\n            self.assertEqual(writer.read(), convert(b\"\"))\n            writer.seek(0)\n            self.assertEqual(writer.read(), convert(b\"0123456789helloworld\"))\n\n            self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 fc5e038d38a57032085441e7fe7010b0+10 0:20:count.txt\\n\", c.portable_manifest_text())\n\n    def test_write_at_beginning(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\",\n        })\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n',\n                             keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(b\"0123456789\", writer.readfrom(0, 13))\n            writer.seek(0, os.SEEK_SET)\n            writer.write(\"foo\")\n            self.assertEqual(writer.size(), 10)\n            self.assertEqual(b\"foo3456789\", writer.readfrom(0, 13))\n            self.assertEqual(\". acbd18db4cc2f85cedef654fccc4a4d8+3 781e5e245d69b566979b86e28d23f2c7+10 0:3:count.txt 6:7:count.txt\\n\", c.portable_manifest_text())\n\n    def test_write_empty(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        with Collection(keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"wb\")\n            self.assertEqual(writer.size(), 0)\n            self.assertEqual(\". d41d8cd98f00b204e9800998ecf8427e+0 0:0:count.txt\\n\", c.portable_manifest_text())\n\n    def test_save_manifest_text(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        with Collection(keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"wb\")\n            writer.write(b\"0123456789\")\n            self.assertEqual('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n', c.portable_manifest_text())\n            self.assertNotIn('781e5e245d69b566979b86e28d23f2c7+10', keep.blocks)\n\n            self.assertEqual('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n', c.save_new(create_collection_record=False))\n            self.assertIn('781e5e245d69b566979b86e28d23f2c7+10', keep.blocks)\n\n    def test_get_manifest_text_commits(self):\n         keep = ArvadosFileWriterTestCase.MockKeep({})\n         with Collection(keep_client=keep) as c:\n             writer = c.open(\"count.txt\", \"wb\")\n             writer.write(\"0123456789\")\n             self.assertEqual('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n', c.portable_manifest_text())\n             self.assertNotIn('781e5e245d69b566979b86e28d23f2c7+10', keep.blocks)\n             self.assertEqual('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n', c.manifest_text())\n             self.assertIn('781e5e245d69b566979b86e28d23f2c7+10', keep.blocks)\n\n\n    def test_write_in_middle(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\"})\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n',\n                             keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(b\"0123456789\", writer.readfrom(0, 13))\n            writer.seek(3, os.SEEK_SET)\n            writer.write(\"foo\")\n            self.assertEqual(writer.size(), 10)\n            self.assertEqual(b\"012foo6789\", writer.readfrom(0, 13))\n            self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:count.txt 10:3:count.txt 6:4:count.txt\\n\", c.portable_manifest_text())\n\n    def test_write_at_end(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\"})\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n',\n                             keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(b\"0123456789\", writer.readfrom(0, 13))\n            writer.seek(7, os.SEEK_SET)\n            writer.write(\"foo\")\n            self.assertEqual(writer.size(), 10)\n            self.assertEqual(b\"0123456foo\", writer.readfrom(0, 13))\n            self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:7:count.txt 10:3:count.txt\\n\", c.portable_manifest_text())\n\n    def test_write_across_segment_boundary(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\"})\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt 0:10:count.txt\\n',\n                             keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(b\"012345678901234\", writer.readfrom(0, 15))\n            writer.seek(7, os.SEEK_SET)\n            writer.write(\"foobar\")\n            self.assertEqual(writer.size(), 20)\n            self.assertEqual(b\"0123456foobar34\", writer.readfrom(0, 15))\n            self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 3858f62230ac3c915f300c664312c63f+6 0:7:count.txt 10:6:count.txt 3:7:count.txt\\n\", c.portable_manifest_text())\n\n    def test_write_across_several_segments(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\"})\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:4:count.txt 0:4:count.txt 0:4:count.txt',\n                             keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(b\"012301230123\", writer.readfrom(0, 15))\n            writer.seek(2, os.SEEK_SET)\n            writer.write(\"abcdefg\")\n            self.assertEqual(writer.size(), 12)\n            self.assertEqual(b\"01abcdefg123\", writer.readfrom(0, 15))\n            self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 7ac66c0f148de9519b8bd264312c4d64+7 0:2:count.txt 10:7:count.txt 1:3:count.txt\\n\", c.portable_manifest_text())\n\n    def test_write_large(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        api = ArvadosFileWriterTestCase.MockApi({\"name\":\"test_write_large\",\n                                                 \"manifest_text\": \". a5de24f4417cfba9d5825eadc2f4ca49+67108000 598cc1a4ccaef8ab6e4724d87e675d78+32892000 0:100000000:count.txt\\n\",\n                                                 \"replication_desired\":None},\n                                                {\"uuid\":\"zzzzz-4zz18-mockcollection0\",\n                                                 \"manifest_text\": \". a5de24f4417cfba9d5825eadc2f4ca49+67108000 598cc1a4ccaef8ab6e4724d87e675d78+32892000 0:100000000:count.txt\\n\",\n                                                 \"portable_data_hash\":\"9132ca8e3f671c76103a38f5bc24328c+108\"})\n        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',\n                             api_client=api, keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            text = \"0123456789\" * 100\n            for b in range(0, 100000):\n                writer.write(text)\n            self.assertEqual(writer.size(), 100000000)\n\n            self.assertIsNone(c.manifest_locator())\n            self.assertTrue(c.modified())\n            c.save_new(\"test_write_large\")\n            self.assertEqual(\"zzzzz-4zz18-mockcollection0\", c.manifest_locator())\n            self.assertFalse(c.modified())\n\n    def test_large_write(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        api = ArvadosFileWriterTestCase.MockApi({}, {})\n        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',\n                             api_client=api, keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(writer.size(), 0)\n\n            text = \"0123456789\"\n            writer.write(text)\n            text = \"0123456789\" * 9999999\n            writer.write(text)\n            self.assertEqual(writer.size(), 100000000)\n\n            self.assertEqual(c.manifest_text(), \". 781e5e245d69b566979b86e28d23f2c7+10 48dd23ea1645fd47d789804d71b5bb8e+67108864 77c57dc6ac5a10bb2205caaa73187994+32891126 0:100000000:count.txt\\n\")\n\n    def test_sparse_write(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        api = ArvadosFileWriterTestCase.MockApi({}, {})\n        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',\n                             api_client=api, keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(writer.size(), 0)\n\n            text = b\"0123456789\"\n            writer.seek(2)\n            writer.write(text)\n            self.assertEqual(writer.size(), 12)\n            writer.seek(0, os.SEEK_SET)\n            self.assertEqual(writer.read(), b\"\\x00\\x00\"+text)\n\n            self.assertEqual(c.manifest_text(), \". 7f614da9329cd3aebf59b91aadc30bf0+67108864 781e5e245d69b566979b86e28d23f2c7+10 0:2:count.txt 67108864:10:count.txt\\n\")\n\n    def test_sparse_write2(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        api = ArvadosFileWriterTestCase.MockApi({}, {})\n        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',\n                             api_client=api, keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            self.assertEqual(writer.size(), 0)\n\n            text = \"0123456789\"\n            writer.seek((arvados.config.KEEP_BLOCK_SIZE*2) + 2)\n            writer.write(text)\n            self.assertEqual(writer.size(), (arvados.config.KEEP_BLOCK_SIZE*2) + 12)\n            writer.seek(0, os.SEEK_SET)\n\n            self.assertEqual(c.manifest_text(), \". 7f614da9329cd3aebf59b91aadc30bf0+67108864 781e5e245d69b566979b86e28d23f2c7+10 0:67108864:count.txt 0:67108864:count.txt 0:2:count.txt 67108864:10:count.txt\\n\")\n\n    def test_sparse_write3(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        api = ArvadosFileWriterTestCase.MockApi({}, {})\n        for r in [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0], [3, 2, 0, 4, 1]]:\n            with Collection(api_client=api, keep_client=keep) as c:\n                writer = c.open(\"count.txt\", \"rb+\")\n                self.assertEqual(writer.size(), 0)\n\n                for i in r:\n                    w = (\"%s\" % i) * 10\n                    writer.seek(i*10)\n                    writer.write(w.encode())\n                writer.seek(0)\n                self.assertEqual(writer.read(), b\"00000000001111111111222222222233333333334444444444\")\n\n    def test_sparse_write4(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        api = ArvadosFileWriterTestCase.MockApi({}, {})\n        for r in [[0, 1, 2, 4], [4, 2, 1, 0], [2, 0, 4, 1]]:\n            with Collection(api_client=api, keep_client=keep) as c:\n                writer = c.open(\"count.txt\", \"rb+\")\n                self.assertEqual(writer.size(), 0)\n\n                for i in r:\n                    w = (\"%s\" % i) * 10\n                    writer.seek(i*10)\n                    writer.write(w.encode())\n                writer.seek(0)\n                self.assertEqual(writer.read(), b\"000000000011111111112222222222\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x004444444444\")\n\n    def test_rewrite_on_empty_file(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',\n                             keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            for b in range(0, 10):\n                writer.seek(0, os.SEEK_SET)\n                writer.write(\"0123456789\")\n\n            self.assertEqual(writer.size(), 10)\n            self.assertEqual(b\"0123456789\", writer.readfrom(0, 20))\n            self.assertEqual(\". 7a08b07e84641703e5f2c836aa59a170+100 90:10:count.txt\\n\", c.portable_manifest_text())\n            writer.flush()\n            self.assertEqual(writer.size(), 10)\n            self.assertEqual(b\"0123456789\", writer.readfrom(0, 20))\n            self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n\", c.portable_manifest_text())\n\n    def test_rewrite_append_existing_file(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\",\n        })\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt',\n                             keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            for b in range(0, 10):\n                writer.seek(10, os.SEEK_SET)\n                writer.write(\"abcdefghij\")\n\n            self.assertEqual(writer.size(), 20)\n            self.assertEqual(b\"0123456789abcdefghij\", writer.readfrom(0, 20))\n            self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 ae5f43bab79cf0be33f025fa97ae7398+100 0:10:count.txt 100:10:count.txt\\n\", c.portable_manifest_text())\n\n            writer.arvadosfile.flush()\n            self.assertEqual(writer.size(), 20)\n            self.assertEqual(b\"0123456789abcdefghij\", writer.readfrom(0, 20))\n            self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 a925576942e94b2ef57a066101b48876+10 0:20:count.txt\\n\", c.portable_manifest_text())\n\n    def test_rewrite_over_existing_file(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\",\n        })\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt',\n                             keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            for b in range(0, 10):\n                writer.seek(5, os.SEEK_SET)\n                writer.write(\"abcdefghij\")\n\n            self.assertEqual(writer.size(), 15)\n            self.assertEqual(b\"01234abcdefghij\", writer.readfrom(0, 20))\n            self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 ae5f43bab79cf0be33f025fa97ae7398+100 0:5:count.txt 100:10:count.txt\\n\", c.portable_manifest_text())\n\n            writer.arvadosfile.flush()\n\n            self.assertEqual(writer.size(), 15)\n            self.assertEqual(b\"01234abcdefghij\", writer.readfrom(0, 20))\n            self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 a925576942e94b2ef57a066101b48876+10 0:5:count.txt 10:10:count.txt\\n\", c.portable_manifest_text())\n\n    def test_write_large_rewrite(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        api = ArvadosFileWriterTestCase.MockApi({\"name\":\"test_write_large\",\n                                                 \"manifest_text\": \". 3dc0d4bc21f48060bedcb2c91af4f906+32892003 a5de24f4417cfba9d5825eadc2f4ca49+67108000 0:3:count.txt 32892006:67107997:count.txt 3:32892000:count.txt\\n\",\n                                                 \"replication_desired\":None},\n                                                {\"uuid\":\"zzzzz-4zz18-mockcollection0\",\n                                                 \"manifest_text\": \". 3dc0d4bc21f48060bedcb2c91af4f906+32892003 a5de24f4417cfba9d5825eadc2f4ca49+67108000 0:3:count.txt 32892006:67107997:count.txt 3:32892000:count.txt\\n\",\n                                                 \"portable_data_hash\":\"217665c6b713e1b78dfba7ebd42344db+156\"})\n        with Collection('. ' + arvados.config.EMPTY_BLOCK_LOCATOR + ' 0:0:count.txt',\n                             api_client=api, keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"rb+\")\n            text = b''.join([b\"0123456789\" for a in range(0, 100)])\n            for b in range(0, 100000):\n                writer.write(text)\n            writer.seek(0, os.SEEK_SET)\n            writer.write(\"foo\")\n            self.assertEqual(writer.size(), 100000000)\n\n            self.assertIsNone(c.manifest_locator())\n            self.assertTrue(c.modified())\n            c.save_new(\"test_write_large\")\n            self.assertEqual(\"zzzzz-4zz18-mockcollection0\", c.manifest_locator())\n            self.assertFalse(c.modified())\n\n    def test_create(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        api = ArvadosFileWriterTestCase.MockApi({\n            \"name\":\"test_create\",\n            \"manifest_text\":\". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\\n\",\n            \"replication_desired\":None,\n        }, {\n            \"uuid\":\"zzzzz-4zz18-mockcollection0\",\n            \"manifest_text\":\". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\\n\",\n            \"portable_data_hash\":\"7a461a8c58601798f690f8b368ac4423+51\",\n        })\n        with Collection(api_client=api, keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"wb+\")\n            self.assertEqual(writer.size(), 0)\n            writer.write(\"01234567\")\n            self.assertEqual(writer.size(), 8)\n\n            self.assertIsNone(c.manifest_locator())\n            self.assertTrue(c.modified())\n            self.assertIsNone(keep.get(\"2e9ec317e197819358fbc43afca7d837+8\"))\n            c.save_new(\"test_create\")\n            self.assertEqual(\"zzzzz-4zz18-mockcollection0\", c.manifest_locator())\n            self.assertFalse(c.modified())\n            self.assertEqual(b\"01234567\", keep.get(\"2e9ec317e197819358fbc43afca7d837+8\"))\n\n\n    def test_create_subdir(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        api = ArvadosFileWriterTestCase.MockApi({\"name\":\"test_create\",\n                                                 \"manifest_text\":\"./foo/bar 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\\n\",\n                                                 \"replication_desired\":None},\n                                                {\"uuid\":\"zzzzz-4zz18-mockcollection0\",\n                                                 \"manifest_text\":\"./foo/bar 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\\n\",\n                                                 \"portable_data_hash\":\"1b02aaa62528d28a5be41651cbb9d7c7+59\"})\n        with Collection(api_client=api, keep_client=keep) as c:\n            self.assertIsNone(c.api_response())\n            writer = c.open(\"foo/bar/count.txt\", \"wb+\")\n            writer.write(\"01234567\")\n            self.assertFalse(c.committed())\n            c.save_new(\"test_create\")\n            self.assertTrue(c.committed())\n            self.assertEqual(c.api_response(), api.response)\n\n    def test_overwrite(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\"781e5e245d69b566979b86e28d23f2c7+10\": \"0123456789\"})\n        api = ArvadosFileWriterTestCase.MockApi({\"name\":\"test_overwrite\",\n                                                 \"manifest_text\":\". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\\n\",\n                                                 \"replication_desired\":None},\n                                                {\"uuid\":\"zzzzz-4zz18-mockcollection0\",\n                                                 \"manifest_text\":\". 2e9ec317e197819358fbc43afca7d837+8 0:8:count.txt\\n\",\n                                                 \"portable_data_hash\":\"7a461a8c58601798f690f8b368ac4423+51\"})\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n',\n                             api_client=api, keep_client=keep) as c:\n            writer = c.open(\"count.txt\", \"wb+\")\n            self.assertEqual(writer.size(), 0)\n            writer.write(\"01234567\")\n            self.assertEqual(writer.size(), 8)\n\n            self.assertIsNone(c.manifest_locator())\n            self.assertTrue(c.modified())\n            c.save_new(\"test_overwrite\")\n            self.assertEqual(\"zzzzz-4zz18-mockcollection0\", c.manifest_locator())\n            self.assertFalse(c.modified())\n\n    def test_file_not_found(self):\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n') as c:\n            with self.assertRaises(IOError):\n                writer = c.open(\"nocount.txt\", \"rb\")\n\n    def test_cannot_open_directory(self):\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n') as c:\n            with self.assertRaises(IOError):\n                writer = c.open(\".\", \"rb\")\n\n    def test_create_multiple(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        api = ArvadosFileWriterTestCase.MockApi({\"name\":\"test_create_multiple\",\n                                                 \"manifest_text\":\". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:8:count1.txt 8:8:count2.txt\\n\",\n                                                 \"replication_desired\":None},\n                                                {\"uuid\":\"zzzzz-4zz18-mockcollection0\",\n                                                 \"manifest_text\":\". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:8:count1.txt 8:8:count2.txt\\n\",\n                                                 \"portable_data_hash\":\"71e7bb6c00d31fc2b4364199fd97be08+102\"})\n        with Collection(api_client=api, keep_client=keep) as c:\n            w1 = c.open(\"count1.txt\", \"wb\")\n            w2 = c.open(\"count2.txt\", \"wb\")\n            w1.write(\"01234567\")\n            w2.write(\"abcdefgh\")\n            self.assertEqual(w1.size(), 8)\n            self.assertEqual(w2.size(), 8)\n\n            self.assertIsNone(c.manifest_locator())\n            self.assertTrue(c.modified())\n            self.assertIsNone(keep.get(\"2e9ec317e197819358fbc43afca7d837+8\"))\n            c.save_new(\"test_create_multiple\")\n            self.assertEqual(\"zzzzz-4zz18-mockcollection0\", c.manifest_locator())\n            self.assertFalse(c.modified())\n            self.assertEqual(b\"01234567\", keep.get(\"2e9ec317e197819358fbc43afca7d837+8\"))\n\n\nclass ArvadosFileReaderTestCase(unittest.TestCase, StreamFileReaderTestMixin):\n    class MockParent(object):\n        class MockBlockMgr(object):\n            def __init__(self, blocks, nocache):\n                self.blocks = blocks\n                self.nocache = nocache\n                self._keep = ArvadosFileWriterTestCase.MockKeep({})\n                self.prefetch_lookahead = 0\n\n            def block_prefetch(self, loc):\n                pass\n\n            def get_block_contents(self, loc, num_retries=0, cache_only=False):\n                if self.nocache and cache_only:\n                    return None\n                return self.blocks[loc]\n\n        def __init__(self, blocks, nocache):\n            self.blocks = blocks\n            self.nocache = nocache\n            self.lock = arvados.arvfile.NoopLock()\n\n        def root_collection(self):\n            return self\n\n        def _my_block_manager(self):\n            return ArvadosFileReaderTestCase.MockParent.MockBlockMgr(self.blocks, self.nocache)\n\n\n    def make_file_reader(self, name='emptyfile', data='', nocache=False):\n        loc = tutil.str_keep_locator(data)\n        af = ArvadosFile(ArvadosFileReaderTestCase.MockParent({loc: data}, nocache=nocache), name, stream=[Range(loc, 0, len(data))], segments=[Range(0, len(data), len(data))])\n        return ArvadosFileReader(af, mode='rb')\n\n    def make_count_reader(self, nocache=False):\n        stream = []\n        n = 0\n        blocks = {}\n        for d in [b'01234', b'34567', b'67890']:\n            loc = tutil.str_keep_locator(d)\n            blocks[loc] = d\n            stream.append(Range(loc, n, len(d)))\n            n += len(d)\n        af = ArvadosFile(ArvadosFileReaderTestCase.MockParent(blocks, nocache=nocache), \"count.txt\", stream=stream, segments=[Range(1, 0, 3), Range(6, 3, 3), Range(11, 6, 3)])\n        return ArvadosFileReader(af, mode=\"rb\")\n\n    def make_newlines_reader(self, nocache=False):\n        stream = []\n        segments = []\n        n = 0\n        blocks = {}\n        for d in [b'one\\ntwo\\n\\nth', b'ree\\nfour\\n\\n']:\n            loc = tutil.str_keep_locator(d)\n            blocks[loc] = d\n            stream.append(Range(loc, n, len(d)))\n            segments.append(Range(n, len(d), n+len(d)))\n            n += len(d)\n        af = ArvadosFile(ArvadosFileReaderTestCase.MockParent(blocks, nocache=nocache), \"count.txt\", stream=stream, segments=segments)\n        return ArvadosFileReader(af, mode=\"rb\")\n\n    def test_read_block_crossing_behavior(self):\n        # read() needs to return all the data requested if possible, even if it\n        # crosses uncached blocks: https://arvados.org/issues/5856\n        sfile = self.make_count_reader(nocache=True)\n        self.assertEqual(b'12345678', sfile.read(8))\n\n    def test_tell_after_block_read(self):\n        sfile = self.make_count_reader(nocache=True)\n        self.assertEqual(b'12345678', sfile.read(8))\n        self.assertEqual(8, sfile.tell())\n\n    def test_prefetch(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"2e9ec317e197819358fbc43afca7d837+8\": b\"01234567\",\n            \"e8dc4081b13434b45189a720b77b6818+8\": b\"abcdefgh\",\n        })\n        with Collection(\". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:16:count.txt\\n\", keep_client=keep) as c:\n            r = c.open(\"count.txt\", \"rb\")\n            self.assertEqual(b\"0123\", r.read(4))\n        self.assertEqual([\"2e9ec317e197819358fbc43afca7d837+8\",\n                          \"e8dc4081b13434b45189a720b77b6818+8\"], keep.requests)\n\n    def test_prefetch_disabled(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"2e9ec317e197819358fbc43afca7d837+8\": b\"01234567\",\n            \"e8dc4081b13434b45189a720b77b6818+8\": b\"abcdefgh\",\n        })\n        keep.num_prefetch_threads = 0\n        with Collection(\". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:16:count.txt\\n\", keep_client=keep) as c:\n            r = c.open(\"count.txt\", \"rb\")\n            self.assertEqual(b\"0123\", r.read(4))\n\n        self.assertEqual([\"2e9ec317e197819358fbc43afca7d837+8\"], keep.requests)\n\n    def test_prefetch_first_read_only(self):\n        # test behavior that prefetch only happens every 128 reads\n        # check that it doesn't make a prefetch request on the second read\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"2e9ec317e197819358fbc43afca7d837+8\": b\"01234567\",\n            \"e8dc4081b13434b45189a720b77b6818+8\": b\"abcdefgh\",\n        })\n        with Collection(\". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:16:count.txt\\n\", keep_client=keep) as c:\n            r = c.open(\"count.txt\", \"rb\")\n            self.assertEqual(b\"0123\", r.read(4))\n            self.assertEqual(b\"45\", r.read(2))\n        self.assertEqual([\"2e9ec317e197819358fbc43afca7d837+8\",\n                          \"e8dc4081b13434b45189a720b77b6818+8\",\n                          \"2e9ec317e197819358fbc43afca7d837+8\"], keep.requests)\n        self.assertEqual(3, len(keep.requests))\n\n    def test_prefetch_again(self):\n        # test behavior that prefetch only happens every 128 reads\n        # check that it does make another prefetch request after 128 reads\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"2e9ec317e197819358fbc43afca7d837+8\": b\"01234567\",\n            \"e8dc4081b13434b45189a720b77b6818+8\": b\"abcdefgh\",\n        })\n        with Collection(\". 2e9ec317e197819358fbc43afca7d837+8 e8dc4081b13434b45189a720b77b6818+8 0:16:count.txt\\n\", keep_client=keep) as c:\n            r = c.open(\"count.txt\", \"rb\")\n            for i in range(0, 129):\n                r.seek(0)\n                self.assertEqual(b\"0123\", r.read(4))\n        self.assertEqual([\"2e9ec317e197819358fbc43afca7d837+8\",\n                          \"e8dc4081b13434b45189a720b77b6818+8\",\n                          \"2e9ec317e197819358fbc43afca7d837+8\",\n                          \"2e9ec317e197819358fbc43afca7d837+8\"], keep.requests[0:4])\n        self.assertEqual([\"2e9ec317e197819358fbc43afca7d837+8\",\n                          \"2e9ec317e197819358fbc43afca7d837+8\",\n                          \"2e9ec317e197819358fbc43afca7d837+8\",\n                          \"e8dc4081b13434b45189a720b77b6818+8\"], keep.requests[127:131])\n        # gets the 1st block 129 times from keep (cache),\n        # and the 2nd block twice to get 131 requests\n        self.assertEqual(131, len(keep.requests))\n\n    def test__eq__from_manifest(self):\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:\n            with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c2:\n                self.assertTrue(c1[\"count1.txt\"] == c2[\"count1.txt\"])\n                self.assertFalse(c1[\"count1.txt\"] != c2[\"count1.txt\"])\n\n    def test__eq__from_writes(self):\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:\n            with Collection() as c2:\n                f = c2.open(\"count1.txt\", \"wb\")\n                f.write(\"0123456789\")\n\n                self.assertTrue(c1[\"count1.txt\"] == c2[\"count1.txt\"])\n                self.assertFalse(c1[\"count1.txt\"] != c2[\"count1.txt\"])\n\n    def test__ne__(self):\n        with Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt') as c1:\n            with Collection() as c2:\n                f = c2.open(\"count1.txt\", \"wb\")\n                f.write(\"1234567890\")\n\n                self.assertTrue(c1[\"count1.txt\"] != c2[\"count1.txt\"])\n                self.assertFalse(c1[\"count1.txt\"] == c2[\"count1.txt\"])\n\n\nclass ArvadosFileReadTestCase(unittest.TestCase, StreamRetryTestMixin):\n    def reader_for(self, coll_name, **kwargs):\n        stream = []\n        segments = []\n        n = 0\n        for d in self.manifest_for(coll_name).split():\n            try:\n                k = KeepLocator(d)\n                segments.append(Range(n, n, k.size))\n                stream.append(Range(d, n, k.size))\n                n += k.size\n            except ValueError:\n                pass\n\n        blockmanager = arvados.arvfile._BlockManager(self.keep_client())\n        blockmanager.prefetch_enabled = False\n        col = Collection(keep_client=self.keep_client(), block_manager=blockmanager)\n        af = ArvadosFile(col, \"test\",\n                         stream=stream,\n                         segments=segments)\n        kwargs.setdefault('mode', 'rb')\n        return ArvadosFileReader(af, **kwargs)\n\n    def read_for_test(self, reader, byte_count, **kwargs):\n        return reader.read(byte_count, **kwargs)\n\n\nclass ArvadosFileReadFromTestCase(ArvadosFileReadTestCase):\n    def read_for_test(self, reader, byte_count, **kwargs):\n        return reader.readfrom(0, byte_count, **kwargs)\n\n\nclass ArvadosFileReadAllTestCase(ArvadosFileReadTestCase):\n    def read_for_test(self, reader, byte_count, **kwargs):\n        return b''.join(reader.readall(**kwargs))\n\n\nclass ArvadosFileReadAllDecompressedTestCase(ArvadosFileReadTestCase):\n    def read_for_test(self, reader, byte_count, **kwargs):\n        return b''.join(reader.readall_decompressed(**kwargs))\n\n\nclass ArvadosFileReadlinesTestCase(ArvadosFileReadTestCase):\n    def read_for_test(self, reader, byte_count, **kwargs):\n        return ''.join(reader.readlines(**kwargs)).encode()\n\n\nclass ArvadosFileTestCase(unittest.TestCase):\n    def datetime_to_hex(self, dt):\n        return hex(int(time.mktime(dt.timetuple())))[2:]\n\n    def test_permission_expired(self):\n        base_manifest = \". 781e5e245d69b566979b86e28d23f2c7+10+A715fd31f8111894f717eb1003c1b0216799dd9ec@{} 0:10:count.txt\\n\"\n        now = datetime.datetime.now()\n        a_week_ago = now - datetime.timedelta(days=7)\n        a_month_ago = now - datetime.timedelta(days=30)\n        a_week_from_now = now + datetime.timedelta(days=7)\n        with Collection(base_manifest.format(self.datetime_to_hex(a_week_from_now))) as c:\n            self.assertFalse(c.find('count.txt').permission_expired())\n        with Collection(base_manifest.format(self.datetime_to_hex(a_week_ago))) as c:\n            f = c.find('count.txt')\n            self.assertTrue(f.permission_expired())\n            self.assertTrue(f.permission_expired(a_week_from_now))\n            self.assertFalse(f.permission_expired(a_month_ago))\n\n\nclass BlockManagerTest(unittest.TestCase):\n    def test_bufferblock_append(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        with arvados.arvfile._BlockManager(keep) as blockmanager:\n            bufferblock = blockmanager.alloc_bufferblock()\n            bufferblock.append(\"foo\")\n\n            self.assertEqual(bufferblock.size(), 3)\n            self.assertEqual(bufferblock.buffer_view[0:3], b\"foo\")\n            self.assertEqual(bufferblock.locator(), \"acbd18db4cc2f85cedef654fccc4a4d8+3\")\n\n            bufferblock.append(\"bar\")\n\n            self.assertEqual(bufferblock.size(), 6)\n            self.assertEqual(bufferblock.buffer_view[0:6], b\"foobar\")\n            self.assertEqual(bufferblock.locator(), \"3858f62230ac3c915f300c664312c63f+6\")\n\n            bufferblock.set_state(arvados.arvfile._BufferBlock.PENDING)\n            with self.assertRaises(arvados.errors.AssertionError):\n                bufferblock.append(\"bar\")\n\n    def test_bufferblock_dup(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({})\n        with arvados.arvfile._BlockManager(keep) as blockmanager:\n            bufferblock = blockmanager.alloc_bufferblock()\n            bufferblock.append(\"foo\")\n\n            self.assertEqual(bufferblock.size(), 3)\n            self.assertEqual(bufferblock.buffer_view[0:3], b\"foo\")\n            self.assertEqual(bufferblock.locator(), \"acbd18db4cc2f85cedef654fccc4a4d8+3\")\n            bufferblock.set_state(arvados.arvfile._BufferBlock.PENDING)\n\n            bufferblock2 = blockmanager.dup_block(bufferblock, None)\n            self.assertNotEqual(bufferblock.blockid, bufferblock2.blockid)\n\n            bufferblock2.append(\"bar\")\n\n            self.assertEqual(bufferblock2.size(), 6)\n            self.assertEqual(bufferblock2.buffer_view[0:6], b\"foobar\")\n            self.assertEqual(bufferblock2.locator(), \"3858f62230ac3c915f300c664312c63f+6\")\n\n            self.assertEqual(bufferblock.size(), 3)\n            self.assertEqual(bufferblock.buffer_view[0:3], b\"foo\")\n            self.assertEqual(bufferblock.locator(), \"acbd18db4cc2f85cedef654fccc4a4d8+3\")\n\n    def test_bufferblock_get(self):\n        keep = ArvadosFileWriterTestCase.MockKeep({\n            \"781e5e245d69b566979b86e28d23f2c7+10\": b\"0123456789\",\n        })\n        with arvados.arvfile._BlockManager(keep) as blockmanager:\n            bufferblock = blockmanager.alloc_bufferblock()\n            bufferblock.append(\"foo\")\n\n            self.assertEqual(blockmanager.get_block_contents(\"781e5e245d69b566979b86e28d23f2c7+10\", 1), b\"0123456789\")\n            self.assertEqual(blockmanager.get_block_contents(bufferblock.blockid, 1), b\"foo\")\n\n    def test_bufferblock_commit(self):\n        mockkeep = mock.MagicMock()\n        with arvados.arvfile._BlockManager(mockkeep) as blockmanager:\n            bufferblock = blockmanager.alloc_bufferblock()\n            bufferblock.owner = mock.MagicMock(spec=arvados.arvfile.ArvadosFile)\n            def flush(sync=None):\n                blockmanager.commit_bufferblock(bufferblock, sync)\n            bufferblock.owner.flush.side_effect = flush\n            bufferblock.append(\"foo\")\n            blockmanager.commit_all()\n            self.assertTrue(bufferblock.owner.flush.called)\n            self.assertTrue(mockkeep.put.called)\n            self.assertEqual(bufferblock.state(), arvados.arvfile._BufferBlock.COMMITTED)\n            self.assertIsNone(bufferblock.buffer_view)\n\n    def test_bufferblock_commit_pending(self):\n        # Test for bug #7225\n        mockkeep = mock.MagicMock()\n        mockkeep.put.side_effect = lambda *args, **kwargs: time.sleep(1)\n        with arvados.arvfile._BlockManager(mockkeep) as blockmanager:\n            bufferblock = blockmanager.alloc_bufferblock()\n            bufferblock.append(\"foo\")\n\n            blockmanager.commit_bufferblock(bufferblock, False)\n            self.assertEqual(bufferblock.state(), arvados.arvfile._BufferBlock.PENDING)\n\n            blockmanager.commit_bufferblock(bufferblock, True)\n            self.assertEqual(bufferblock.state(), arvados.arvfile._BufferBlock.COMMITTED)\n\n    def test_bufferblock_commit_with_error(self):\n        mockkeep = mock.MagicMock()\n        mockkeep.put.side_effect = arvados.errors.KeepWriteError(\"fail\")\n        with arvados.arvfile._BlockManager(mockkeep) as blockmanager:\n            bufferblock = blockmanager.alloc_bufferblock()\n            bufferblock.owner = mock.MagicMock(spec=arvados.arvfile.ArvadosFile)\n            def flush(sync=None):\n                blockmanager.commit_bufferblock(bufferblock, sync)\n            bufferblock.owner.flush.side_effect = flush\n            bufferblock.append(\"foo\")\n            with self.assertRaises(arvados.errors.KeepWriteError) as err:\n                blockmanager.commit_all()\n            self.assertTrue(bufferblock.owner.flush.called)\n            self.assertEqual(str(err.exception), \"Error writing some blocks: block acbd18db4cc2f85cedef654fccc4a4d8+3 raised KeepWriteError (fail)\")\n            self.assertEqual(bufferblock.state(), arvados.arvfile._BufferBlock.ERROR)\n"
  },
  {
    "path": "sdk/python/tests/test_basedirs.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport stat\n\nimport pytest\n\nfrom pathlib import Path\n\nfrom arvados._internal import basedirs\n\nclass TestBaseDirectories:\n    SELF_PATH = Path(__file__)\n\n    @pytest.fixture\n    def dir_spec(self, tmp_path):\n        return basedirs.BaseDirectorySpec(\n            'TEST_DIRECTORY',\n            'XDG_TEST_HOME',\n            Path('.test'),\n            'XDG_TEST_DIRS',\n            f\"{tmp_path / '.test1'}:{tmp_path / '.test2'}\",\n        )\n\n    @pytest.fixture\n    def env(self, tmp_path):\n        return {'HOME': str(tmp_path)}\n\n    @pytest.fixture\n    def umask(self):\n        orig_umask = os.umask(0o002)\n        try:\n            yield\n        finally:\n            os.umask(orig_umask)\n\n    def test_search_systemd_dirs(self, dir_spec, env, tmp_path):\n        env['TEST_DIRECTORY'] = f'{tmp_path}:{self.SELF_PATH.parent}'\n        dirs = basedirs.BaseDirectories(dir_spec, env, 'tests')\n        actual = list(dirs.search(self.SELF_PATH.name))\n        assert actual == [self.SELF_PATH]\n\n    def test_search_xdg_home(self, dir_spec, env, tmp_path):\n        env['XDG_TEST_HOME'] = str(self.SELF_PATH.parent.parent)\n        dirs = basedirs.BaseDirectories(dir_spec, env, 'tests')\n        actual = list(dirs.search(self.SELF_PATH.name))\n        assert actual == [self.SELF_PATH]\n\n    def test_search_xdg_dirs(self, dir_spec, env, tmp_path):\n        env['XDG_TEST_DIRS'] = f'{tmp_path}:{self.SELF_PATH.parent.parent}'\n        dirs = basedirs.BaseDirectories(dir_spec, env, 'tests')\n        actual = list(dirs.search(self.SELF_PATH.name))\n        assert actual == [self.SELF_PATH]\n\n    def test_search_all_dirs(self, dir_spec, env, tmp_path):\n        env['TEST_DIRECTORY'] = f'{tmp_path}:{self.SELF_PATH.parent}'\n        env['XDG_TEST_HOME'] = str(self.SELF_PATH.parent.parent)\n        env['XDG_TEST_DIRS'] = f'{tmp_path}:{self.SELF_PATH.parent.parent}'\n        dirs = basedirs.BaseDirectories(dir_spec, env, 'tests')\n        actual = list(dirs.search(self.SELF_PATH.name))\n        assert actual == [self.SELF_PATH, self.SELF_PATH, self.SELF_PATH]\n\n    def test_search_paths(self, dir_spec, env, tmp_path):\n        env['TEST_DIRECTORY'] = f'{tmp_path}:{self.SELF_PATH.parent}'\n        env['XDG_TEST_HOME'] = str(self.SELF_PATH.parent.parent)\n        env['XDG_TEST_DIRS'] = f'{tmp_path}:{self.SELF_PATH.parent.parent}'\n        dirs = basedirs.BaseDirectories(dir_spec, env, 'tests')\n        actual = list(dirs.search_paths())\n        assert actual == [tmp_path, self.SELF_PATH.parent, self.SELF_PATH.parent, tmp_path / 'tests', self.SELF_PATH.parent.parent / 'tests']\n\n    def test_search_default_home(self, dir_spec, env, tmp_path):\n        expected = tmp_path / dir_spec.xdg_home_default / 'default_home'\n        expected.parent.mkdir()\n        expected.touch()\n        dirs = basedirs.BaseDirectories(dir_spec, env, '.')\n        actual = list(dirs.search(expected.name))\n        assert actual == [expected]\n\n    def test_search_default_dirs(self, dir_spec, env, tmp_path):\n        _, _, default_dir = dir_spec.xdg_dirs_default.rpartition(':')\n        expected = Path(default_dir, 'default_dirs')\n        expected.parent.mkdir()\n        expected.touch()\n        dirs = basedirs.BaseDirectories(dir_spec, env, '.')\n        actual = list(dirs.search(expected.name))\n        assert actual == [expected]\n\n    def test_search_no_default_dirs(self, dir_spec, env, tmp_path):\n        dir_spec.xdg_dirs_key = None\n        dir_spec.xdg_dirs_default = None\n        for subdir in ['.test1', '.test2', dir_spec.xdg_home_default]:\n            expected = tmp_path / subdir / 'no_dirs'\n            expected.parent.mkdir()\n            expected.touch()\n        dirs = basedirs.BaseDirectories(dir_spec, env, '.')\n        actual = list(dirs.search(expected.name))\n        assert actual == [expected]\n\n    def test_ignore_relative_directories(self, dir_spec, env, tmp_path):\n        test_path = Path(*self.SELF_PATH.parts[-2:])\n        assert test_path.exists(), \"test setup problem: need an existing file in a subdirectory of .\"\n        parent_path = str(test_path.parent)\n        env['TEST_DIRECTORY'] = '.'\n        env['XDG_TEST_HOME'] = parent_path\n        env['XDG_TEST_DIRS'] = parent_path\n        dirs = basedirs.BaseDirectories(dir_spec, env, parent_path)\n        assert not list(dirs.search(test_path.name))\n\n    def test_search_warns_nondefault_home(self, dir_spec, env, tmp_path, caplog):\n        search_path = tmp_path / dir_spec.xdg_home_default / 'Search' / 'SearchConfig'\n        search_path.parent.mkdir(parents=True)\n        search_path.touch()\n        env[dir_spec.xdg_home_key] = str(tmp_path / '.nonexistent')\n        dirs = basedirs.BaseDirectories(dir_spec, env, search_path.parent.name)\n        results = list(dirs.search(search_path.name))\n        expect_msg = \"{} was not found under your configured ${} ({}), but does exist at the default location ({})\".format(\n            Path(*search_path.parts[-2:]),\n            dir_spec.xdg_home_key,\n            env[dir_spec.xdg_home_key],\n            Path(*search_path.parts[:-2]),\n        )\n        assert caplog.messages\n        assert any(msg.startswith(expect_msg) for msg in caplog.messages)\n        assert not results\n\n    def test_storage_path_systemd(self, dir_spec, env, tmp_path):\n        expected = tmp_path / 'rwsystemd'\n        expected.mkdir(0o700)\n        env['TEST_DIRECTORY'] = str(expected)\n        dirs = basedirs.BaseDirectories(dir_spec, env)\n        assert dirs.storage_path() == expected\n\n    def test_storage_path_systemd_mixed_modes(self, dir_spec, env, tmp_path):\n        rodir = tmp_path / 'rodir'\n        rodir.mkdir(0o500)\n        expected = tmp_path / 'rwdir'\n        expected.mkdir(0o700)\n        env['TEST_DIRECTORY'] = f'{rodir}:{expected}'\n        dirs = basedirs.BaseDirectories(dir_spec, env)\n        assert dirs.storage_path() == expected\n\n    def test_storage_path_xdg_home(self, dir_spec, env, tmp_path):\n        expected = tmp_path / '.xdghome' / 'arvados'\n        env['XDG_TEST_HOME'] = str(expected.parent)\n        dirs = basedirs.BaseDirectories(dir_spec, env)\n        assert dirs.storage_path() == expected\n        exp_mode = stat.S_IFDIR | stat.S_IWUSR\n        assert (expected.stat().st_mode & exp_mode) == exp_mode\n\n    def test_storage_path_default(self, dir_spec, env, tmp_path):\n        expected = tmp_path / dir_spec.xdg_home_default / 'arvados'\n        dirs = basedirs.BaseDirectories(dir_spec, env)\n        assert dirs.storage_path() == expected\n        exp_mode = stat.S_IFDIR | stat.S_IWUSR\n        assert (expected.stat().st_mode & exp_mode) == exp_mode\n\n    @pytest.mark.parametrize('subdir,mode', [\n        ('str/dir', 0o750),\n        (Path('sub', 'path'), 0o770),\n    ])\n    def test_storage_path_subdir(self, dir_spec, env, umask, tmp_path, subdir, mode):\n        expected = tmp_path / dir_spec.xdg_home_default / 'arvados' / subdir\n        dirs = basedirs.BaseDirectories(dir_spec, env)\n        actual = dirs.storage_path(subdir, mode)\n        assert actual == expected\n        expect_mode = mode | stat.S_IFDIR\n        actual_mode = actual.stat().st_mode\n        assert (actual_mode & expect_mode) == expect_mode\n        assert not (actual_mode & stat.S_IRWXO)\n\n    def test_empty_xdg_home(self, dir_spec, env, tmp_path):\n        env['XDG_TEST_HOME'] = ''\n        expected = tmp_path / dir_spec.xdg_home_default / 'emptyhome'\n        dirs = basedirs.BaseDirectories(dir_spec, env, expected.name)\n        assert dirs.storage_path() == expected\n\n    def test_empty_xdg_dirs(self, dir_spec, env, tmp_path):\n        env['XDG_TEST_DIRS'] = ''\n        _, _, default_dir = dir_spec.xdg_dirs_default.rpartition(':')\n        expected = Path(default_dir, 'empty_dirs')\n        expected.parent.mkdir()\n        expected.touch()\n        dirs = basedirs.BaseDirectories(dir_spec, env, '.')\n        actual = list(dirs.search(expected.name))\n        assert actual == [expected]\n\n    def test_spec_key_lookup(self):\n        dirs = basedirs.BaseDirectories('CACHE')\n        assert dirs._spec.systemd_key == 'CACHE_DIRECTORY'\n        assert dirs._spec.xdg_dirs_key is None\n\n    def test_spec_enum_lookup(self):\n        dirs = basedirs.BaseDirectories(basedirs.BaseDirectorySpecs.CONFIG)\n        assert dirs._spec.systemd_key == 'CONFIGURATION_DIRECTORY'\n"
  },
  {
    "path": "sdk/python/tests/test_benchmark_collections.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados\nimport sys\n\nfrom . import run_test_server\nfrom . import arvados_testutil as tutil\nfrom . import manifest_examples\nfrom .performance.performance_profiler import profiled\n\nclass CollectionBenchmark(run_test_server.TestCaseWithServers,\n                          tutil.ArvadosBaseTestCase,\n                          manifest_examples.ManifestExamples):\n    MAIN_SERVER = {}\n    TEST_BLOCK_SIZE = 0\n\n    @classmethod\n    def list_recursive(cls, coll, parent_name=None):\n        if parent_name is None:\n            current_name = coll.stream_name()\n        else:\n            current_name = '{}/{}'.format(parent_name, coll.name)\n        try:\n            for name in coll:\n                for item in cls.list_recursive(coll[name], current_name):\n                    yield item\n        except TypeError:\n            yield current_name\n\n    @classmethod\n    def setUpClass(cls):\n        super(CollectionBenchmark, cls).setUpClass()\n        run_test_server.authorize_with('active')\n        cls.api_client = arvados.api('v1')\n        cls.keep_client = arvados.KeepClient(api_client=cls.api_client,\n                                             local_store=cls.local_store)\n\n    @profiled\n    def profile_new_collection_from_manifest(self, manifest_text):\n        return arvados.collection.Collection(manifest_text)\n\n    @profiled\n    def profile_new_collection_from_server(self, uuid):\n        return arvados.collection.Collection(uuid)\n\n    @profiled\n    def profile_new_collection_copying_bytes_from_collection(self, src):\n        dst = arvados.collection.Collection()\n        with tutil.mock_keep_responses('x'*self.TEST_BLOCK_SIZE, 200):\n            for name in self.list_recursive(src):\n                with src.open(name, 'rb') as srcfile, dst.open(name, 'wb') as dstfile:\n                    dstfile.write(srcfile.read())\n            dst.save_new()\n\n    @profiled\n    def profile_new_collection_copying_files_from_collection(self, src):\n        dst = arvados.collection.Collection()\n        with tutil.mock_keep_responses('x'*self.TEST_BLOCK_SIZE, 200):\n            for name in self.list_recursive(src):\n                dst.copy(name, name, src)\n            dst.save_new()\n\n    @profiled\n    def profile_collection_list_files(self, coll):\n        return sum(1 for name in self.list_recursive(coll))\n\n    def test_medium_sized_manifest(self):\n        \"\"\"Exercise manifest-handling code.\n\n        Currently, this test puts undue emphasis on some code paths\n        that don't reflect typical use because the contrived example\n        manifest has some unusual characteristics:\n\n        * Block size is zero.\n\n        * Every block is identical, so block caching patterns are\n          unrealistic.\n\n        * Every file begins and ends at a block boundary.\n        \"\"\"\n        specs = {\n            'streams': 100,\n            'files_per_stream': 100,\n            'blocks_per_file': 20,\n            'bytes_per_block': self.TEST_BLOCK_SIZE,\n        }\n        my_manifest = self.make_manifest(**specs)\n\n        coll = self.profile_new_collection_from_manifest(my_manifest)\n\n        coll.save_new()\n        self.profile_new_collection_from_server(coll.manifest_locator())\n\n        num_items = self.profile_collection_list_files(coll)\n        self.assertEqual(num_items, specs['streams'] * specs['files_per_stream'])\n\n        self.profile_new_collection_copying_bytes_from_collection(coll)\n\n        self.profile_new_collection_copying_files_from_collection(coll)\n"
  },
  {
    "path": "sdk/python/tests/test_cmd_util.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport contextlib\nimport re\nimport copy\nimport itertools\nimport functools\nimport json\nimport os\nimport io\nimport tempfile\nimport unittest\nimport argparse\n\nfrom pathlib import Path\n\nimport pytest\nfrom parameterized import parameterized\n\nimport arvados.commands._util as cmd_util\n\nFILE_PATH = Path(__file__)\n\nclass ValidateFiltersTestCase(unittest.TestCase):\n    NON_FIELD_TYPES = [\n        None,\n        123,\n        ('name', '=', 'tuple'),\n        {'filters': ['name', '=', 'object']},\n    ]\n    NON_FILTER_TYPES = NON_FIELD_TYPES + ['string']\n    VALID_FILTERS = [\n        ['owner_uuid', '=', 'zzzzz-tpzed-12345abcde67890'],\n        ['name', 'in', ['foo', 'bar']],\n        '(replication_desired > replication_cofirmed)',\n        '(replication_confirmed>=replication_desired)',\n    ]\n\n    @parameterized.expand(itertools.combinations(VALID_FILTERS, 2))\n    def test_valid_filters(self, f1, f2):\n        expected = [f1, f2]\n        actual = cmd_util.validate_filters(copy.deepcopy(expected))\n        self.assertEqual(actual, expected)\n\n    @parameterized.expand([(t,) for t in NON_FILTER_TYPES])\n    def test_filters_wrong_type(self, value):\n        with self.assertRaisesRegex(ValueError, r'^filters are not a list\\b'):\n            cmd_util.validate_filters(value)\n\n    @parameterized.expand([(t,) for t in NON_FIELD_TYPES])\n    def test_single_filter_wrong_type(self, value):\n        with self.assertRaisesRegex(ValueError, r'^filter at index 0 is not a string or list\\b'):\n            cmd_util.validate_filters([value])\n\n    @parameterized.expand([\n        ([],),\n        (['owner_uuid'],),\n        (['owner_uuid', 'zzzzz-tpzed-12345abcde67890'],),\n        (['name', 'not in', 'foo', 'bar'],),\n        (['name', 'in', 'foo', 'bar', 'baz'],),\n    ])\n    def test_filters_wrong_arity(self, value):\n        with self.assertRaisesRegex(ValueError, r'^filter at index 0 does not have three items\\b'):\n            cmd_util.validate_filters([value])\n\n    @parameterized.expand(itertools.product(\n        [0, 1],\n        NON_FIELD_TYPES,\n    ))\n    def test_filter_definition_wrong_type(self, index, bad_value):\n        value = ['owner_uuid', '=', 'zzzzz-tpzed-12345abcde67890']\n        value[index] = bad_value\n        name = ('field name', 'operator')[index]\n        with self.assertRaisesRegex(ValueError, rf'^filter at index 0 {name} is not a string\\b'):\n            cmd_util.validate_filters([value])\n\n    @parameterized.expand([\n        # Not enclosed in parentheses\n        'foo = bar',\n        '(foo) < bar',\n        'foo > (bar)',\n        # Not exactly one operator\n        '(a >= b >= c)',\n        '(foo)',\n        '(file_count version)',\n        # Invalid field identifiers\n        '(version = 1)',\n        '(2 = file_count)',\n        '(replication.desired <= replication.confirmed)',\n        # Invalid whitespace\n        '(file_count\\t=\\tversion)',\n        '(file_count >= version\\n)',\n    ])\n    def test_invalid_string_filter(self, value):\n        with self.assertRaisesRegex(ValueError, r'^filter at index 0 has invalid syntax\\b'):\n            cmd_util.validate_filters([value])\n\n\n# Used for matching verbatim error messages.\ndef verbatim(text: str) -> str:\n    return \"^\" + re.escape(text) + \"$\"\n\n\ndef _get_json_decode_error(text: str) -> str:\n    try:\n        json.loads(text)\n    except json.JSONDecodeError as err:\n        return str(err)\n\n\nJSON_OBJECTS = (\n    None,\n    123,\n    -456.789,\n    'string',\n    ['list', 1],\n    {'object': True, 'yaml': False},\n)\nINVALID_JSON = (\"\", \"\\n\", \"\\0\", \"foo\", \"[0, 1,]\", \"{\", \"'foo'\")\n\n\nclass TestJSONStringArgument:\n\n    def test_init_loader_not_callable(self):\n        bad_arg_type = cmd_util.JSONStringArgument(loader=1)\n        with pytest.raises(TypeError, match='is not callable'):\n            bad_arg_type('\"foo\"')\n\n    def test_init_validator_not_callable(self):\n        value = '\"foo\"'\n        bad_fcn = 1\n        bad_fcn_type_name = type(bad_fcn).__name__\n        arg_type_name = \"test widget\"\n        msg_match = re.escape(\n            f\"{value!r} is not valid {arg_type_name}:\"\n            f\" {bad_fcn_type_name!r} object is not callable\"\n        )\n        bad_arg_type = cmd_util.JSONStringArgument(\n            validator=bad_fcn, pretty_name=arg_type_name)\n        with pytest.raises(argparse.ArgumentTypeError, match=msg_match):\n            bad_arg_type(value)\n\n    def test_init_pretty_name_false(self):\n        parser = cmd_util.JSONStringArgument(pretty_name=False)\n        assert parser.pretty_name == \"JSON\"\n\n    @pytest.mark.parametrize(\"expected\", JSON_OBJECTS)\n    def test_plain_valid(self, expected):\n        value = json.dumps(expected)\n        parser = cmd_util.JSONStringArgument()\n        assert parser(value) == expected\n\n    @pytest.mark.parametrize(\"value\", INVALID_JSON)\n    def test_plain_invalid(self, value):\n        parser = cmd_util.JSONStringArgument()\n        details = _get_json_decode_error(value)\n        with pytest.raises(\n            argparse.ArgumentTypeError,\n            match=verbatim(f\"{value!r} is not valid JSON: {details}\")\n        ):\n            parser(value)\n\n    def test_custom_loader(self):\n        def reject(text):\n            raise json.JSONDecodeError(\n                f\"invalid float constant: {text!r}\",\n                text,\n                0\n            )\n        loader = functools.partial(json.loads, parse_constant=reject)\n        parser = cmd_util.JSONStringArgument(loader=loader)\n        value = \"NaN\"\n        # Obtain detailed error message produced by the callback.\n        try:\n            loader(value)\n        except json.JSONDecodeError as err:\n            loader_err_msg = str(err)\n\n        with pytest.raises(\n            argparse.ArgumentTypeError,\n            match=verbatim(f\"{value!r} is not valid JSON: {loader_err_msg}\")\n        ):\n            parser(value)\n\n    @pytest.mark.parametrize(\"expected_valid,value\", (\n        (False, \"0\"), (True, \"1\")\n    ))\n    def test_custom_validator_pretty_name(self, expected_valid, value):\n        further_msg = \"{0!s} is small\"\n        name = \"big JSON number\"\n\n        def is_big(number):\n            if number < 1:\n                raise ValueError(further_msg.format(number))\n            return number\n\n        parser = cmd_util.JSONStringArgument(\n            validator=is_big, pretty_name=name\n        )\n        if expected_valid:\n            assert parser(value) == json.loads(value)\n        else:\n            with pytest.raises(\n                argparse.ArgumentTypeError,\n                match=verbatim(\n                    f\"{value!r} is not valid {name}: \"\n                    + further_msg.format(json.loads(value))\n                )\n            ):\n                parser(value)\n\n\nclass _CountOpenFDs(contextlib.AbstractContextManager):\n    \"\"\"Rudimentary context manager that checks for possible file descriptor\n    leaks, by opening /dev/null and noting its numeric value before entering\n    and after exiting.\n    \"\"\"\n    def __init__(self):\n        self.before = -1\n        self.after = -1\n\n    def __enter__(self):\n        null_fd = os.open(os.devnull, os.O_RDONLY)\n        os.close(null_fd)\n        self.before = null_fd\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        null_fd = os.open(os.devnull, os.O_RDONLY)\n        os.close(null_fd)\n        self.after = null_fd\n\n    def assert_no_leak(self):\n        assert self.before >= 0\n        assert self.after >= 0\n        assert self.before == self.after\n\n\n# Private context manager for cleanly and temporarily switching the working\n# directory.\n@contextlib.contextmanager\ndef _pushd(target):\n    oldpwd = os.getcwd()\n    try:\n        os.chdir(target)\n        yield\n    finally:\n        os.chdir(oldpwd)\n\n\n@pytest.mark.usefixtures(\"tmp_path\")\nclass TestJsonOrFileLoader:\n    \"\"\"Lower-level tests for the json_or_file_loader function that is plugged\n    as a loader callback to JSONStringArgument to make JSONArgument.\n    \"\"\"\n    @pytest.mark.parametrize(\n        \"test_id,content\", enumerate((\"invalid json\", '[\"valid json\"]'))\n    )\n    def test_no_file_descriptor_leak(self, tmp_path, test_id, content):\n        f = tmp_path / f\"test{test_id!s}.json\"\n        f.write_text(content)\n        check_fd = _CountOpenFDs()\n        with check_fd:\n            try:\n                cmd_util.json_or_file_loader(str(f))\n            except argparse.ArgumentTypeError:\n                pass\n        check_fd.assert_no_leak()\n\n    @pytest.mark.parametrize(\"expected\", JSON_OBJECTS)\n    def test_read_stdin(self, expected):\n        with unittest.mock.patch(\n            \"sys.stdin\", new_callable=io.StringIO\n        ) as mock_stdin:\n            mock_stdin.write(json.dumps(expected))\n            mock_stdin.seek(0)\n            actual = cmd_util.json_or_file_loader(\"-\")\n            assert not mock_stdin.closed\n            assert actual == expected\n\n    @pytest.mark.parametrize(\"input_value\", INVALID_JSON)\n    def test_reject_invalid_stdin_content(self, input_value):\n        details = _get_json_decode_error(input_value)\n        err_notes = verbatim(\n            f\"Content of standard input is not valid JSON: {details}\"\n        )\n        with unittest.mock.patch(\n            \"sys.stdin\", new_callable=io.StringIO\n        ) as mock_stdin:\n            mock_stdin.write(input_value)\n            mock_stdin.seek(0)\n            with pytest.raises(argparse.ArgumentTypeError, match=err_notes):\n                cmd_util.json_or_file_loader(\"-\")\n\n    @pytest.mark.parametrize(\"value\", INVALID_JSON)\n    def test_reject_file_with_invalid_json(self, tmp_path, value):\n        f = tmp_path / \"test.not-json\"\n        f.write_text(value)\n        details = _get_json_decode_error(value)\n        err_notes = verbatim(\n            f\"Content of file {str(f)!r} is not valid JSON: {details}\"\n        )\n        with pytest.raises(argparse.ArgumentTypeError, match=err_notes):\n            cmd_util.json_or_file_loader(str(f))\n\n    def test_reject_file_name_resembling_json(self, tmp_path):\n        crafted_basename = '\"foo\"'  # basename is valid JSON\n        tmp_file = tmp_path / crafted_basename\n        tmp_file.write_text(\" \")  # ensure file exists; content doesn't matter.\n        err_notes = verbatim(\n            f\"{crafted_basename!r} is both valid JSON and a readable file.\"\n            \" Please consider renaming the file.\"\n        )\n        check_fd = _CountOpenFDs()\n        # cd into the temp directory so that we can refer to the file with its\n        # basename (which is valid JSON)\n        with pytest.raises(\n            argparse.ArgumentTypeError, match=err_notes\n        ), check_fd, _pushd(tmp_path):\n            cmd_util.json_or_file_loader(crafted_basename)\n        check_fd.assert_no_leak()\n\n    def test_path_resembles_json_and_is_not_readable_file(self, tmp_path):\n        # Input is both valid JSON string and existing directory (not readable\n        # file). The resemblance of file name to JSON should not matter; it is\n        # treated as just another OSError case, and we expect that the\n        # offending path appears in the exception details.\n        crafted_name = '\"bar\"'\n        tmp_dir = tmp_path / crafted_name\n        os.mkdir(tmp_dir)\n        # cd into the temp directory so that we can refer to the subdir with\n        # its name (which is valid JSON)\n        with pytest.raises(\n            argparse.ArgumentTypeError,\n            match=f\"^.*: {re.escape(repr(crafted_name))}\"\n        ), _pushd(tmp_path):\n            cmd_util.json_or_file_loader(crafted_name)\n\n    def test_not_json_and_is_directory(self, tmp_path):\n        path = tmp_path / \"subdir\"\n        os.mkdir(path)\n        with pytest.raises(\n            argparse.ArgumentTypeError,\n            match=f\"^.*: {re.escape(repr(str(path)))}\"\n        ):\n            cmd_util.json_or_file_loader(str(path))\n\n    def test_not_json_and_file_unreadable(self):\n        bad_file = tempfile.NamedTemporaryFile()\n        os.chmod(bad_file.fileno(), 0o000)\n        path = bad_file.name\n\n        @contextlib.contextmanager\n        def ctx():  # restore mode\n            try:\n                yield\n            finally:\n                os.chmod(bad_file.fileno(), 0o600)\n\n        with pytest.raises(\n            argparse.ArgumentTypeError,\n            match=f\"^.*: {re.escape(repr(path))}\"\n        ), ctx():\n            cmd_util.json_or_file_loader(path)\n\n    def test_not_json_and_not_path(self):\n        # This is a simple \"file not found\" case (open() raises\n        # FileNotFoundError), and the error message should not contain the path\n        # in the trailing part.\n        with tempfile.NamedTemporaryFile() as gone_file:\n            path = gone_file.name\n        details = _get_json_decode_error(path)\n        err_notes = verbatim(\n            f\"{path!r} is not a readable file or valid JSON\"\n            f\" [JSON decoding error: {details}]\"\n        )\n        with pytest.raises(argparse.ArgumentTypeError, match=err_notes):\n            cmd_util.json_or_file_loader(path)\n\n    def test_not_json_and_illegal_path(self):\n        # Null byte in path, illegal on almost all platforms.\n        path = \"\\0\"\n        details = _get_json_decode_error(path)\n        err_notes = verbatim(f\"{path!r} is not valid JSON: {details}\")\n        with pytest.raises(argparse.ArgumentTypeError, match=err_notes):\n            cmd_util.json_or_file_loader(path)\n\n\nclass TestJSONArgument:\n\n    @classmethod\n    def setup_class(cls):\n        cls.json_file = tempfile.NamedTemporaryFile(\n            'w+',\n            encoding='utf-8',\n            prefix='argtest',\n            suffix='.json',\n        )\n        cls.parser = cmd_util.JSONArgument()\n\n    @classmethod\n    def teardown_class(cls):\n        cls.json_file.close()\n\n    def setup_method(self):\n        self.json_file.seek(0)\n        self.json_file.truncate()\n\n    @pytest.mark.parametrize(\"obj\", JSON_OBJECTS)\n    def test_valid_argument_string(self, obj):\n        actual = self.parser(json.dumps(obj))\n        assert actual == obj\n\n    @pytest.mark.parametrize(\"obj\", JSON_OBJECTS)\n    def test_valid_argument_path(self, obj):\n        json.dump(obj, self.json_file)\n        self.json_file.flush()\n        actual = self.parser(self.json_file.name)\n        assert actual == obj\n\n    @pytest.mark.parametrize(\"path\", [FILE_PATH, None])\n    def test_argument_path_not_json(self, path):\n        if path is None:\n            path = self.json_file.name\n        details = _get_json_decode_error(str(path))\n        err_notes = verbatim(\n            f\"Content of file {str(path)!r} is not valid JSON: {details}\"\n        )\n        with pytest.raises(argparse.ArgumentTypeError, match=err_notes):\n            self.parser(str(path))\n\n\nclass TestJSONArgumentValidation:\n    @pytest.mark.parametrize(\"value\", JSON_OBJECTS)\n    def test_value_returned_from_validator(self, value):\n        # This validator fakes validation by discarding the result of actual\n        # JSON parsing (of the JSON string '{}') and replacing it with the\n        # arbitrary object \"value\".\n        parser = cmd_util.JSONArgument(lambda _: copy.deepcopy(value))\n        assert parser('{}') == value\n\n    @pytest.mark.parametrize(\"value\", JSON_OBJECTS)\n    def test_exception_raised_from_validator(self, value):\n        pretty_name = \"type for testing\"\n        json_value = json.dumps(value)\n        err_detail = f\"{json_value} fails validation\"\n\n        def raise_func(_):\n            raise ValueError(err_detail)\n\n        parser = cmd_util.JSONArgument(\n            validator=raise_func, pretty_name=pretty_name\n        )\n        err_notes = verbatim(\n            f\"{json_value!r} is not valid {pretty_name}: {err_detail}\"\n        )\n        with pytest.raises(argparse.ArgumentTypeError, match=err_notes):\n            parser(json_value)\n\n    @pytest.mark.parametrize(\n        \"filter_input\",\n        itertools.combinations(\n            ValidateFiltersTestCase.VALID_FILTERS, 2\n        )\n    )\n    def test_with_filter_validator_valid_filter(self, filter_input):\n        parser = cmd_util.JSONArgument(\n            validator=cmd_util.validate_filters,\n            pretty_name=\"filter\"\n        )\n        expected = list(filter_input)\n        input_str = json.dumps(expected)\n        assert parser(input_str) == expected\n\n    def test_with_filter_validator_invalid_filter(self):\n        parser = cmd_util.JSONArgument(\n            validator=cmd_util.validate_filters,\n            pretty_name=\"filter\"\n        )\n        input_str = '[1]'\n        # Obtain a copy of the detailed validation error message from the\n        # lower-level function.\n        try:\n            cmd_util.validate_filters(json.loads(input_str))\n        except ValueError as exc:\n            validation_err = str(exc)\n        # Check that the detailed validation error message is attached to the\n        # argparse-generated message.\n        err_notes = verbatim(\n            f\"{input_str!r} is not valid filter: {validation_err}\"\n        )\n        with pytest.raises(argparse.ArgumentTypeError, match=err_notes):\n            parser(input_str)\n\n\nclass TestRangedValue:\n    @pytest.fixture(scope='class')\n    def cmpint(self):\n        return cmd_util.RangedValue(int, range(-1, 2))\n\n    @pytest.mark.parametrize('s', ['-1', '0', '1'])\n    def test_valid_values(self, cmpint, s):\n        assert cmpint(s) == int(s)\n\n    @pytest.mark.parametrize('s', ['foo', '-2', '2', '0.2', '', ' '])\n    def test_invalid_values(self, cmpint, s):\n        with pytest.raises(ValueError):\n            cmpint(s)\n\n\nclass TestUniqueSplit:\n    @pytest.fixture(scope='class')\n    def argtype(self):\n        return cmd_util.UniqueSplit()\n\n    @pytest.mark.parametrize('arg', [\n        'foo',\n        'foo,bar',\n        'foo, bar, baz',\n        'foo , bar , baz , quux',\n    ])\n    def test_basic_parse(self, arg, argtype):\n        expected = ['foo', 'bar', 'baz', 'quux'][:arg.count(',') + 1]\n        assert argtype(arg) == expected\n\n    @pytest.mark.parametrize('arg', [\n        'foo, foo, bar',\n        'foo, bar, foo',\n        'foo, bar, bar',\n    ])\n    def test_uniqueness(self, arg, argtype):\n        assert argtype(arg) == ['foo', 'bar']\n"
  },
  {
    "path": "sdk/python/tests/test_collections.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport ciso8601\nimport copy\nimport datetime\nimport os\nimport random\nimport re\nimport shutil\nimport sys\nimport tempfile\nimport time\nimport unittest\n\nimport arvados\nimport arvados.keep\nimport parameterized\nimport pytest\n\nfrom arvados._internal.streams import Range, LocatorAndRange, locators_and_ranges\nfrom arvados.collection import Collection, CollectionReader\n\nfrom . import arvados_testutil as tutil\nfrom . import run_test_server\nfrom unittest import mock\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass ArvadosCollectionsTest(run_test_server.TestCaseWithServers,\n                             tutil.ArvadosBaseTestCase):\n    disk_cache = False\n    MAIN_SERVER = {}\n\n    @classmethod\n    def setUpClass(cls):\n        super(ArvadosCollectionsTest, cls).setUpClass()\n        # need admin privileges to make collections with unsigned blocks\n        run_test_server.authorize_with('admin')\n        if cls.disk_cache:\n            cls._disk_cache_dir = tempfile.mkdtemp(prefix='CollectionsTest-')\n        else:\n            cls._disk_cache_dir = None\n        block_cache = arvados.keep.KeepBlockCache(\n            disk_cache=cls.disk_cache,\n            disk_cache_dir=cls._disk_cache_dir,\n        )\n        cls.api_client = arvados.api('v1')\n        cls.keep_client = arvados.KeepClient(api_client=cls.api_client,\n                                             local_store=cls.local_store,\n                                             block_cache=block_cache)\n\n    @classmethod\n    def tearDownClass(cls):\n        if cls._disk_cache_dir:\n            shutil.rmtree(cls._disk_cache_dir)\n\n    def write_foo_bar_baz(self):\n        with arvados.collection.Collection(api_client=self.api_client).open('zzz', 'wb') as f:\n            f.write(b'foobar')\n            f.flush()\n            f.write(b'baz')\n        cw = arvados.collection.Collection(\n            api_client=self.api_client,\n            manifest_locator_or_text=\n            \". 3858f62230ac3c915f300c664312c63f+6 0:3:foo.txt 3:3:bar.txt\\n\" +\n            \"./baz 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz.txt\\n\")\n        cw.save_new()\n        return cw.portable_data_hash()\n\n    def test_pdh_is_native_str(self):\n        pdh = self.write_foo_bar_baz()\n        self.assertEqual(type(''), type(pdh))\n\n    def test_keep_local_store(self):\n        self.assertEqual(self.keep_client.put(b'foo'), 'acbd18db4cc2f85cedef654fccc4a4d8+3', 'wrong md5 hash from Keep.put')\n        self.assertEqual(self.keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3'), b'foo', 'wrong data from Keep.get')\n\n    def test_local_collection_writer(self):\n        self.assertEqual(self.write_foo_bar_baz(),\n                         '23ca013983d6239e98931cc779e68426+114',\n                         'wrong locator hash: ' + self.write_foo_bar_baz())\n\n    def test_collection_empty_file(self):\n        cw = arvados.collection.Collection(api_client=self.api_client)\n        with cw.open('zero.txt', 'wb') as f:\n            pass\n\n        self.assertEqual(cw.manifest_text(), \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:zero.txt\\n\")\n        self.check_manifest_file_sizes(cw.manifest_text(), [0])\n\n        cw = arvados.collection.Collection(api_client=self.api_client)\n        with cw.open('zero.txt', 'wb') as f:\n            pass\n        with cw.open('one.txt', 'wb') as f:\n            f.write(b'1')\n        with cw.open('foo/zero.txt', 'wb') as f:\n            pass\n        # sorted, that's: [./one.txt, ./zero.txt, foo/zero.txt]\n        self.check_manifest_file_sizes(cw.manifest_text(), [1,0,0])\n\n    def check_manifest_file_sizes(self, manifest_text, expect_sizes):\n        got_sizes = []\n        def walk(subdir):\n            for fnm in subdir:\n                if isinstance(subdir[fnm], arvados.arvfile.ArvadosFile):\n                    got_sizes.append(subdir[fnm].size())\n                else:\n                    walk(subdir[fnm])\n        cr = arvados.CollectionReader(manifest_text, self.api_client)\n        walk(cr)\n        self.assertEqual(got_sizes, expect_sizes, \"got wrong file sizes %s, expected %s\" % (got_sizes, expect_sizes))\n\n    def test_normalized_collection(self):\n        m1 = \"\"\". 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt\n. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt\n. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt\n\"\"\"\n        self.assertEqual(arvados.CollectionReader(m1, self.api_client).manifest_text(normalize=True),\n                         \"\"\". 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:127:md5sum.txt\n\"\"\")\n\n        m2 = \"\"\". 204e43b8a1185621ca55a94839582e6f+67108864 b9677abbac956bd3e86b1deb28dfac03+67108864 fc15aff2a762b13f521baf042140acec+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:227212247:var-GS000016015-ASM.tsv.bz2\n\"\"\"\n        self.assertEqual(arvados.CollectionReader(m2, self.api_client).manifest_text(normalize=True), m2)\n\n        m3 = \"\"\". 5348b82a029fd9e971a811ce1f71360b+43 3:40:md5sum.txt\n. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt\n. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt\n\"\"\"\n        self.assertEqual(arvados.CollectionReader(m3, self.api_client).manifest_text(normalize=True),\n                         \"\"\". 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 3:124:md5sum.txt\n\"\"\")\n\n        m4 = \"\"\". 204e43b8a1185621ca55a94839582e6f+67108864 0:3:foo/bar\n./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz\n./foo 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar\n\"\"\"\n        self.assertEqual(arvados.CollectionReader(m4, self.api_client).manifest_text(normalize=True),\n                         \"\"\"./foo 204e43b8a1185621ca55a94839582e6f+67108864 323d2a3ce20370c4ca1d3462a344f8fd+25885655 0:3:bar 67108864:3:bar\n./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz\n\"\"\")\n\n        m5 = \"\"\". 204e43b8a1185621ca55a94839582e6f+67108864 0:3:foo/bar\n./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz\n./foo 204e43b8a1185621ca55a94839582e6f+67108864 3:3:bar\n\"\"\"\n        self.assertEqual(arvados.CollectionReader(m5, self.api_client).manifest_text(normalize=True),\n                         \"\"\"./foo 204e43b8a1185621ca55a94839582e6f+67108864 0:6:bar\n./zzz 204e43b8a1185621ca55a94839582e6f+67108864 0:999:zzz\n\"\"\")\n\n        with self.data_file('1000G_ref_manifest') as f6:\n            m6 = f6.read()\n            self.assertEqual(arvados.CollectionReader(m6, self.api_client).manifest_text(normalize=True), m6)\n\n        with self.data_file('jlake_manifest') as f7:\n            m7 = f7.read()\n            self.assertEqual(arvados.CollectionReader(m7, self.api_client).manifest_text(normalize=True), m7)\n\n        m8 = \"\"\"./a\\\\040b\\\\040c 59ca0efa9f5633cb0371bbc0355478d8+13 0:13:hello\\\\040world.txt\n\"\"\"\n        self.assertEqual(arvados.CollectionReader(m8, self.api_client).manifest_text(normalize=True), m8)\n\n    def test_locators_and_ranges(self):\n        blocks2 = [Range('a', 0, 10),\n                   Range('b', 10, 10),\n                   Range('c', 20, 10),\n                   Range('d', 30, 10),\n                   Range('e', 40, 10),\n                   Range('f', 50, 10)]\n\n        self.assertEqual(locators_and_ranges(blocks2,  2,  2), [LocatorAndRange('a', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 12, 2), [LocatorAndRange('b', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 22, 2), [LocatorAndRange('c', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 32, 2), [LocatorAndRange('d', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 42, 2), [LocatorAndRange('e', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 52, 2), [LocatorAndRange('f', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 62, 2), [])\n        self.assertEqual(locators_and_ranges(blocks2, -2, 2), [])\n\n        self.assertEqual(locators_and_ranges(blocks2,  0,  2), [LocatorAndRange('a', 10, 0, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 10, 2), [LocatorAndRange('b', 10, 0, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 20, 2), [LocatorAndRange('c', 10, 0, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 30, 2), [LocatorAndRange('d', 10, 0, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 40, 2), [LocatorAndRange('e', 10, 0, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 50, 2), [LocatorAndRange('f', 10, 0, 2)])\n        self.assertEqual(locators_and_ranges(blocks2, 60, 2), [])\n        self.assertEqual(locators_and_ranges(blocks2, -2, 2), [])\n\n        self.assertEqual(locators_and_ranges(blocks2,  9,  2), [LocatorAndRange('a', 10, 9, 1), LocatorAndRange('b', 10, 0, 1)])\n        self.assertEqual(locators_and_ranges(blocks2, 19, 2), [LocatorAndRange('b', 10, 9, 1), LocatorAndRange('c', 10, 0, 1)])\n        self.assertEqual(locators_and_ranges(blocks2, 29, 2), [LocatorAndRange('c', 10, 9, 1), LocatorAndRange('d', 10, 0, 1)])\n        self.assertEqual(locators_and_ranges(blocks2, 39, 2), [LocatorAndRange('d', 10, 9, 1), LocatorAndRange('e', 10, 0, 1)])\n        self.assertEqual(locators_and_ranges(blocks2, 49, 2), [LocatorAndRange('e', 10, 9, 1), LocatorAndRange('f', 10, 0, 1)])\n        self.assertEqual(locators_and_ranges(blocks2, 59, 2), [LocatorAndRange('f', 10, 9, 1)])\n\n\n        blocks3 = [Range('a', 0, 10),\n                  Range('b', 10, 10),\n                  Range('c', 20, 10),\n                  Range('d', 30, 10),\n                  Range('e', 40, 10),\n                  Range('f', 50, 10),\n                   Range('g', 60, 10)]\n\n        self.assertEqual(locators_and_ranges(blocks3,  2,  2), [LocatorAndRange('a', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks3, 12, 2), [LocatorAndRange('b', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks3, 22, 2), [LocatorAndRange('c', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks3, 32, 2), [LocatorAndRange('d', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks3, 42, 2), [LocatorAndRange('e', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks3, 52, 2), [LocatorAndRange('f', 10, 2, 2)])\n        self.assertEqual(locators_and_ranges(blocks3, 62, 2), [LocatorAndRange('g', 10, 2, 2)])\n\n\n        blocks = [Range('a', 0, 10),\n                  Range('b', 10, 15),\n                  Range('c', 25, 5)]\n        self.assertEqual(locators_and_ranges(blocks, 1, 0), [])\n        self.assertEqual(locators_and_ranges(blocks, 0, 5), [LocatorAndRange('a', 10, 0, 5)])\n        self.assertEqual(locators_and_ranges(blocks, 3, 5), [LocatorAndRange('a', 10, 3, 5)])\n        self.assertEqual(locators_and_ranges(blocks, 0, 10), [LocatorAndRange('a', 10, 0, 10)])\n\n        self.assertEqual(locators_and_ranges(blocks, 0, 11), [LocatorAndRange('a', 10, 0, 10),\n                                                              LocatorAndRange('b', 15, 0, 1)])\n        self.assertEqual(locators_and_ranges(blocks, 1, 11), [LocatorAndRange('a', 10, 1, 9),\n                                                              LocatorAndRange('b', 15, 0, 2)])\n        self.assertEqual(locators_and_ranges(blocks, 0, 25), [LocatorAndRange('a', 10, 0, 10),\n                                                              LocatorAndRange('b', 15, 0, 15)])\n\n        self.assertEqual(locators_and_ranges(blocks, 0, 30), [LocatorAndRange('a', 10, 0, 10),\n                                                              LocatorAndRange('b', 15, 0, 15),\n                                                              LocatorAndRange('c', 5, 0, 5)])\n        self.assertEqual(locators_and_ranges(blocks, 1, 30), [LocatorAndRange('a', 10, 1, 9),\n                                                              LocatorAndRange('b', 15, 0, 15),\n                                                              LocatorAndRange('c', 5, 0, 5)])\n        self.assertEqual(locators_and_ranges(blocks, 0, 31), [LocatorAndRange('a', 10, 0, 10),\n                                                              LocatorAndRange('b', 15, 0, 15),\n                                                              LocatorAndRange('c', 5, 0, 5)])\n\n        self.assertEqual(locators_and_ranges(blocks, 15, 5), [LocatorAndRange('b', 15, 5, 5)])\n\n        self.assertEqual(locators_and_ranges(blocks, 8, 17), [LocatorAndRange('a', 10, 8, 2),\n                                                              LocatorAndRange('b', 15, 0, 15)])\n\n        self.assertEqual(locators_and_ranges(blocks, 8, 20), [LocatorAndRange('a', 10, 8, 2),\n                                                              LocatorAndRange('b', 15, 0, 15),\n                                                              LocatorAndRange('c', 5, 0, 3)])\n\n        self.assertEqual(locators_and_ranges(blocks, 26, 2), [LocatorAndRange('c', 5, 1, 2)])\n\n        self.assertEqual(locators_and_ranges(blocks, 9, 15), [LocatorAndRange('a', 10, 9, 1),\n                                                              LocatorAndRange('b', 15, 0, 14)])\n        self.assertEqual(locators_and_ranges(blocks, 10, 15), [LocatorAndRange('b', 15, 0, 15)])\n        self.assertEqual(locators_and_ranges(blocks, 11, 15), [LocatorAndRange('b', 15, 1, 14),\n                                                               LocatorAndRange('c', 5, 0, 1)])\n\n    class MockKeep(object):\n        def __init__(self, content, num_retries=0):\n            self.content = content\n            self.num_prefetch_threads = 1\n\n        def get(self, locator, num_retries=0, prefetch=False):\n            return self.content[locator]\n\n    def test_extract_file(self):\n        m1 = \"\"\". 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt\n. 085c37f02916da1cad16f93c54d899b7+41 0:41:md6sum.txt\n. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md7sum.txt\n. 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 47:80:md8sum.txt\n. 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 40:80:md9sum.txt\n\"\"\"\n        coll = arvados.CollectionReader(m1, self.api_client)\n        m2 = coll.manifest_text(normalize=True)\n        self.assertEqual(m2,\n                         \". 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt 43:41:md6sum.txt 84:43:md7sum.txt 6:37:md8sum.txt 84:43:md8sum.txt 83:1:md9sum.txt 0:43:md9sum.txt 84:36:md9sum.txt\\n\")\n        self.assertEqual(coll['md5sum.txt'].manifest_text(),\n                         \". 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt\\n\")\n        self.assertEqual(coll['md6sum.txt'].manifest_text(),\n                         \". 085c37f02916da1cad16f93c54d899b7+41 0:41:md6sum.txt\\n\")\n        self.assertEqual(coll['md7sum.txt'].manifest_text(),\n                         \". 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md7sum.txt\\n\")\n        self.assertEqual(coll['md9sum.txt'].manifest_text(),\n                         \". 085c37f02916da1cad16f93c54d899b7+41 5348b82a029fd9e971a811ce1f71360b+43 8b22da26f9f433dea0a10e5ec66d73ba+43 40:80:md9sum.txt\\n\")\n\n\nclass CollectionTestMixin(tutil.ApiClientMock):\n    API_COLLECTIONS = run_test_server.fixture('collections')\n    DEFAULT_COLLECTION = API_COLLECTIONS['foo_file']\n    DEFAULT_DATA_HASH = DEFAULT_COLLECTION['portable_data_hash']\n    DEFAULT_MANIFEST = DEFAULT_COLLECTION['manifest_text']\n    DEFAULT_UUID = DEFAULT_COLLECTION['uuid']\n    ALT_COLLECTION = API_COLLECTIONS['bar_file']\n    ALT_DATA_HASH = ALT_COLLECTION['portable_data_hash']\n    ALT_MANIFEST = ALT_COLLECTION['manifest_text']\n\n    def api_client_mock(self, status=200):\n        client = super(CollectionTestMixin, self).api_client_mock()\n        self.mock_keep_services(client, status=status, service_type='proxy', count=1)\n        return client\n\n\n@tutil.skip_sleep\nclass CollectionReaderTestCase(unittest.TestCase, CollectionTestMixin):\n    def mock_get_collection(self, api_mock, code, fixturename):\n        body = self.API_COLLECTIONS.get(fixturename)\n        self._mock_api_call(api_mock.collections().get, code, body)\n\n    def api_client_mock(self, status=200):\n        client = super(CollectionReaderTestCase, self).api_client_mock()\n        self.mock_get_collection(client, status, 'foo_file')\n        return client\n\n    def test_init_default_retries(self):\n        client = self.api_client_mock(200)\n        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)\n        reader.manifest_text()\n        client.collections().get().execute.assert_called_with(num_retries=10)\n\n    def test_uuid_init_success(self):\n        client = self.api_client_mock(200)\n        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client,\n                                          num_retries=3)\n        self.assertEqual(self.DEFAULT_COLLECTION['manifest_text'],\n                         reader.manifest_text())\n        client.collections().get().execute.assert_called_with(num_retries=3)\n\n    def test_uuid_init_failure_raises_api_error(self):\n        client = self.api_client_mock(500)\n        with self.assertRaises(arvados.errors.ApiError):\n            reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)\n\n    def test_locator_init(self):\n        client = self.api_client_mock(200)\n        # Ensure Keep will not return anything if asked.\n        with tutil.mock_keep_responses(None, 404):\n            reader = arvados.CollectionReader(self.DEFAULT_DATA_HASH,\n                                              api_client=client)\n            self.assertEqual(self.DEFAULT_MANIFEST, reader.manifest_text())\n\n    def test_init_no_fallback_to_keep(self):\n        # Do not look up a collection UUID or PDH in Keep.\n        for key in [self.DEFAULT_UUID, self.DEFAULT_DATA_HASH]:\n            client = self.api_client_mock(404)\n            with tutil.mock_keep_responses(self.DEFAULT_MANIFEST, 200):\n                with self.assertRaises(arvados.errors.ApiError):\n                    reader = arvados.CollectionReader(key, api_client=client)\n\n    def test_init_num_retries_propagated(self):\n        # More of an integration test...\n        client = self.api_client_mock(200)\n        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client,\n                                          num_retries=3)\n        with tutil.mock_keep_responses('foo', 500, 500, 200):\n            self.assertEqual('foo', reader.open('foo', 'r').read())\n\n    def test_read_nonnormalized_manifest_with_collection_reader(self):\n        # client should be able to use CollectionReader on a manifest without normalizing it\n        client = self.api_client_mock(500)\n        nonnormal = \". acbd18db4cc2f85cedef654fccc4a4d8+3+Aabadbadbee@abeebdee 0:3:foo.txt 1:0:bar.txt 0:3:foo.txt\\n\"\n        reader = arvados.CollectionReader(\n            nonnormal,\n            api_client=client, num_retries=0)\n        # Ensure stripped_manifest() doesn't mangle our manifest in\n        # any way other than stripping hints.\n        self.assertEqual(\n            re.sub(r'\\+[^\\d\\s\\+]+', '', nonnormal),\n            reader.stripped_manifest())\n        # Ensure stripped_manifest() didn't mutate our reader.\n        self.assertEqual(nonnormal, reader.manifest_text())\n\n    def test_read_empty_collection(self):\n        client = self.api_client_mock(200)\n        self.mock_get_collection(client, 200, 'empty')\n        reader = arvados.CollectionReader('d41d8cd98f00b204e9800998ecf8427e+0',\n                                          api_client=client)\n        self.assertEqual('', reader.manifest_text())\n        self.assertEqual(0, len(reader))\n        self.assertFalse(reader)\n\n    def test_api_response(self):\n        client = self.api_client_mock()\n        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)\n        self.assertEqual(self.DEFAULT_COLLECTION, reader.api_response())\n\n    def check_open_file(self, coll_file, stream_name, file_name, file_size):\n        self.assertFalse(coll_file.closed, \"returned file is not open\")\n        self.assertEqual(stream_name, coll_file.stream_name())\n        self.assertEqual(file_name, coll_file.name)\n        self.assertEqual(file_size, coll_file.size())\n\n    def test_open_collection_file_one_argument(self):\n        client = self.api_client_mock(200)\n        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)\n        cfile = reader.open('./foo', 'rb')\n        self.check_open_file(cfile, '.', 'foo', 3)\n\n    def test_open_deep_file(self):\n        coll_name = 'collection_with_files_in_subdir'\n        client = self.api_client_mock(200)\n        self.mock_get_collection(client, 200, coll_name)\n        reader = arvados.CollectionReader(\n            self.API_COLLECTIONS[coll_name]['uuid'], api_client=client)\n        cfile = reader.open('./subdir2/subdir3/file2_in_subdir3.txt', 'rb')\n        self.check_open_file(cfile, './subdir2/subdir3', 'file2_in_subdir3.txt',\n                             32)\n\n    def test_open_nonexistent_stream(self):\n        client = self.api_client_mock(200)\n        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)\n        self.assertRaises(IOError, reader.open, './nonexistent/foo')\n\n    def test_open_nonexistent_file(self):\n        client = self.api_client_mock(200)\n        reader = arvados.CollectionReader(self.DEFAULT_UUID, api_client=client)\n        self.assertRaises(IOError, reader.open, 'nonexistent')\n\n\nclass CollectionMethods(run_test_server.TestCaseWithServers):\n\n    def test_keys_values_items_support_indexing(self):\n        c = Collection()\n        with c.open('foo', 'wb') as f:\n            f.write(b'foo')\n        with c.open('bar', 'wb') as f:\n            f.write(b'bar')\n        self.assertEqual(2, len(c.keys()))\n        fn0, fn1 = c.keys()\n        self.assertEqual(2, len(c.values()))\n        f0 = c.values()[0]\n        f1 = c.values()[1]\n        self.assertEqual(2, len(c.items()))\n        self.assertEqual(fn0, c.items()[0][0])\n        self.assertEqual(fn1, c.items()[1][0])\n\n    def test_get_properties(self):\n        c = Collection()\n        self.assertEqual(c.get_properties(), {})\n        c.save_new(properties={\"foo\":\"bar\"})\n        self.assertEqual(c.get_properties(), {\"foo\":\"bar\"})\n\n    def test_get_trash_at(self):\n        c = Collection()\n        self.assertEqual(c.get_trash_at(), None)\n        c.save_new(trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))\n        self.assertEqual(c.get_trash_at(), ciso8601.parse_datetime('2111-01-01T11:11:11.111111000Z'))\n\n\nclass CollectionOpenModes(run_test_server.TestCaseWithServers):\n\n    def test_open_binary_modes(self):\n        c = Collection()\n        for mode in ['wb', 'wb+', 'ab', 'ab+']:\n            with c.open('foo', mode) as f:\n                f.write(b'foo')\n\n    def test_open_invalid_modes(self):\n        c = Collection()\n        for mode in ['+r', 'aa', '++', 'r+b', 'beer', '', None]:\n            with self.assertRaises(Exception):\n                c.open('foo', mode)\n\n    def test_open_text_modes(self):\n        c = Collection()\n        with c.open('foo', 'wb') as f:\n            f.write('foo')\n        for mode in ['r', 'rt', 'r+', 'rt+', 'w', 'wt', 'a', 'at']:\n            with c.open('foo', mode) as f:\n                if mode[0] == 'r' and '+' not in mode:\n                    self.assertEqual('foo', f.read(3))\n                else:\n                    f.write('bar')\n                    f.seek(0, os.SEEK_SET)\n                    self.assertEqual('bar', f.read(3))\n\n\nclass TextModes(run_test_server.TestCaseWithServers):\n\n    def setUp(self):\n        arvados.config.KEEP_BLOCK_SIZE = 4\n        self.sailboat = '\\N{SAILBOAT}'\n        self.snowman = '\\N{SNOWMAN}'\n\n    def tearDown(self):\n        arvados.config.KEEP_BLOCK_SIZE = 2 ** 26\n\n    def test_read_sailboat_across_block_boundary(self):\n        c = Collection()\n        f = c.open('sailboats', 'wb')\n        data = self.sailboat.encode('utf-8')\n        f.write(data)\n        f.write(data[:1])\n        f.write(data[1:])\n        f.write(b'\\n')\n        f.close()\n        self.assertRegex(c.portable_manifest_text(), r'\\+4 .*\\+3 ')\n\n        f = c.open('sailboats', 'r')\n        string = f.readline()\n        self.assertEqual(string, self.sailboat+self.sailboat+'\\n')\n        f.close()\n\n    def test_write_snowman_across_block_boundary(self):\n        c = Collection()\n        f = c.open('snowmany', 'w')\n        data = self.snowman\n        f.write(data+data+'\\n'+data+'\\n')\n        f.close()\n        self.assertRegex(c.portable_manifest_text(), r'\\+4 .*\\+4 .*\\+3 ')\n\n        f = c.open('snowmany', 'r')\n        self.assertEqual(f.readline(), self.snowman+self.snowman+'\\n')\n        self.assertEqual(f.readline(), self.snowman+'\\n')\n        f.close()\n\n\nclass NewCollectionTestCase(unittest.TestCase, CollectionTestMixin):\n\n    def test_replication_desired_kept_on_load(self):\n        m = '. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\\n'\n        c1 = Collection(m, replication_desired=1)\n        c1.save_new()\n        loc = c1.manifest_locator()\n        c2 = Collection(loc)\n        self.assertEqual(c1.manifest_text(strip=True), c2.manifest_text(strip=True))\n        self.assertEqual(c1.replication_desired, c2.replication_desired)\n\n    def test_replication_desired_not_loaded_if_provided(self):\n        m = '. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\\n'\n        c1 = Collection(m, replication_desired=1)\n        c1.save_new()\n        loc = c1.manifest_locator()\n        c2 = Collection(loc, replication_desired=2)\n        self.assertEqual(c1.manifest_text(strip=True), c2.manifest_text(strip=True))\n        self.assertNotEqual(c1.replication_desired, c2.replication_desired)\n\n    def test_storage_classes_desired_kept_on_load(self):\n        m = '. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\\n'\n        c1 = Collection(m, storage_classes_desired=['archival'])\n        c1.save_new()\n        loc = c1.manifest_locator()\n        c2 = Collection(loc)\n        self.assertEqual(c1.manifest_text(strip=True), c2.manifest_text(strip=True))\n        self.assertEqual(c1.storage_classes_desired(), c2.storage_classes_desired())\n\n    def test_storage_classes_change_after_save(self):\n        m = '. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\\n'\n        c1 = Collection(m, storage_classes_desired=['archival'])\n        c1.save_new()\n        loc = c1.manifest_locator()\n        c2 = Collection(loc)\n        self.assertEqual(['archival'], c2.storage_classes_desired())\n        c2.save(storage_classes=['highIO'])\n        self.assertEqual(['highIO'], c2.storage_classes_desired())\n        c3 = Collection(loc)\n        self.assertEqual(c1.manifest_text(strip=True), c3.manifest_text(strip=True))\n        self.assertEqual(['highIO'], c3.storage_classes_desired())\n\n    def test_storage_classes_desired_not_loaded_if_provided(self):\n        m = '. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\\n'\n        c1 = Collection(m, storage_classes_desired=['archival'])\n        c1.save_new()\n        loc = c1.manifest_locator()\n        c2 = Collection(loc, storage_classes_desired=['default'])\n        self.assertEqual(c1.manifest_text(strip=True), c2.manifest_text(strip=True))\n        self.assertNotEqual(c1.storage_classes_desired(), c2.storage_classes_desired())\n\n    def test_init_manifest(self):\n        m1 = \"\"\". 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt\n. 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt\n. 8b22da26f9f433dea0a10e5ec66d73ba+43 0:43:md5sum.txt\n\"\"\"\n        self.assertEqual(m1, CollectionReader(m1).manifest_text(normalize=False))\n        self.assertEqual(\". 5348b82a029fd9e971a811ce1f71360b+43 085c37f02916da1cad16f93c54d899b7+41 8b22da26f9f433dea0a10e5ec66d73ba+43 0:127:md5sum.txt\\n\", CollectionReader(m1).manifest_text(normalize=True))\n\n    def test_init_manifest_with_collision(self):\n        m1 = \"\"\". 5348b82a029fd9e971a811ce1f71360b+43 0:43:md5sum.txt\n./md5sum.txt 085c37f02916da1cad16f93c54d899b7+41 0:41:md5sum.txt\n\"\"\"\n        with self.assertRaises(arvados.errors.ArgumentError):\n            self.assertEqual(m1, CollectionReader(m1))\n\n    def test_init_manifest_with_error(self):\n        m1 = \"\"\". 0:43:md5sum.txt\"\"\"\n        with self.assertRaises(arvados.errors.ArgumentError):\n            self.assertEqual(m1, CollectionReader(m1))\n\n    def test_remove(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\\n')\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\\n\", c.portable_manifest_text())\n        self.assertIn(\"count1.txt\", c)\n        c.remove(\"count1.txt\")\n        self.assertNotIn(\"count1.txt\", c)\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n\", c.portable_manifest_text())\n        with self.assertRaises(arvados.errors.ArgumentError):\n            c.remove(\"\")\n\n    def test_remove_recursive(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:a/b/c/d/efg.txt 0:10:xyz.txt\\n')\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:xyz.txt\\n./a/b/c/d 781e5e245d69b566979b86e28d23f2c7+10 0:10:efg.txt\\n\", c.portable_manifest_text())\n        self.assertIn(\"a\", c)\n        self.assertEqual(1, len(c[\"a\"].keys()))\n        # cannot remove non-empty directory with default recursive=False\n        with self.assertRaises(OSError):\n            c.remove(\"a/b\")\n        with self.assertRaises(OSError):\n            c.remove(\"a/b/c/d\")\n        c.remove(\"a/b\", recursive=True)\n        self.assertEqual(0, len(c[\"a\"].keys()))\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:xyz.txt\\n./a d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\056\\n\", c.portable_manifest_text())\n\n    def test_find(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\\n')\n        self.assertIs(c.find(\".\"), c)\n        self.assertIs(c.find(\"./count1.txt\"), c[\"count1.txt\"])\n        self.assertIs(c.find(\"count1.txt\"), c[\"count1.txt\"])\n        with self.assertRaises(IOError):\n            c.find(\"/.\")\n        with self.assertRaises(arvados.errors.ArgumentError):\n            c.find(\"\")\n        self.assertIs(c.find(\"./nonexistant.txt\"), None)\n        self.assertIs(c.find(\"./nonexistantsubdir/nonexistant.txt\"), None)\n\n    def test_escaped_paths_dont_get_unescaped_on_manifest(self):\n        # Dir & file names are literally '\\056' (escaped form: \\134056)\n        manifest = './\\\\134056\\\\040Test d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\134056\\n'\n        c = Collection(manifest)\n        self.assertEqual(c.portable_manifest_text(), manifest)\n\n    def test_other_special_chars_on_file_token(self):\n        cases = [\n            ('\\\\000', '\\0'),\n            ('\\\\011', '\\t'),\n            ('\\\\012', '\\n'),\n            ('\\\\072', ':'),\n            ('\\\\134400', '\\\\400'),\n        ]\n        for encoded, decoded in cases:\n            manifest = '. d41d8cd98f00b204e9800998ecf8427e+0 0:0:some%sfile.txt\\n' % encoded\n            c = Collection(manifest)\n            self.assertEqual(c.portable_manifest_text(), manifest)\n            self.assertIn('some%sfile.txt' % decoded, c.keys())\n\n    def test_escaped_paths_do_get_unescaped_on_listing(self):\n        # Dir & file names are literally '\\056' (escaped form: \\134056)\n        manifest = './\\\\134056\\\\040Test d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\134056\\n'\n        c = Collection(manifest)\n        self.assertIn('\\\\056 Test', c.keys())\n        self.assertIn('\\\\056', c['\\\\056 Test'].keys())\n\n    def test_make_empty_dir_with_escaped_chars(self):\n        c = Collection()\n        c.mkdirs('./Empty\\\\056Dir')\n        self.assertEqual(c.portable_manifest_text(),\n                         './Empty\\\\134056Dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\056\\n')\n\n    def test_make_empty_dir_with_spaces(self):\n        c = Collection()\n        c.mkdirs('./foo bar/baz waz')\n        self.assertEqual(c.portable_manifest_text(),\n                         './foo\\\\040bar/baz\\\\040waz d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\056\\n')\n\n    def test_remove_in_subdir(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n')\n        c.remove(\"foo/count2.txt\")\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\056\\n\", c.portable_manifest_text())\n\n    def test_remove_empty_subdir(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n')\n        c.remove(\"foo/count2.txt\")\n        c.remove(\"foo\")\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n\", c.portable_manifest_text())\n\n    def test_remove_nonempty_subdir(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n')\n        with self.assertRaises(IOError):\n            c.remove(\"foo\")\n        c.remove(\"foo\", recursive=True)\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n\", c.portable_manifest_text())\n\n    def test_copy_to_file_in_dir(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c.copy(\"count1.txt\", \"foo/count2.txt\")\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n\", c.portable_manifest_text())\n\n    def test_copy_file(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c.copy(\"count1.txt\", \"count2.txt\")\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\\n\", c.portable_manifest_text())\n\n    def test_copy_to_existing_dir(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n')\n        c.copy(\"count1.txt\", \"foo\")\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:10:count2.txt\\n\", c.portable_manifest_text())\n\n    def test_copy_to_new_dir(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c.copy(\"count1.txt\", \"foo/\")\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n\", c.portable_manifest_text())\n\n    def test_rename_file(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c.rename(\"count1.txt\", \"count2.txt\")\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n\", c.manifest_text())\n\n    def test_move_file_to_dir(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c.mkdirs(\"foo\")\n        c.rename(\"count1.txt\", \"foo/count2.txt\")\n        self.assertEqual(\"./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n\", c.manifest_text())\n\n    def test_move_file_to_other(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c2 = Collection()\n        c2.rename(\"count1.txt\", \"count2.txt\", source_collection=c1)\n        self.assertEqual(\"\", c1.manifest_text())\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n\", c2.manifest_text())\n\n    def test_clone(self):\n        c = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n')\n        cl = c.clone()\n        self.assertEqual(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n\", cl.portable_manifest_text())\n\n    def test_diff_del_add(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\\n')\n        d = c2.diff(c1)\n        self.assertEqual(sorted(d), [\n            ('add', './count1.txt', c1[\"count1.txt\"]),\n            ('del', './count2.txt', c2[\"count2.txt\"]),\n        ])\n        d = c1.diff(c2)\n        self.assertEqual(sorted(d), [\n            ('add', './count2.txt', c2[\"count2.txt\"]),\n            ('del', './count1.txt', c1[\"count1.txt\"]),\n        ])\n        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n        c1.apply(d)\n        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n\n    def test_diff_same(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        d = c2.diff(c1)\n        self.assertEqual(d, [('tok', './count1.txt', c2[\"count1.txt\"], c1[\"count1.txt\"])])\n        d = c1.diff(c2)\n        self.assertEqual(d, [('tok', './count1.txt', c2[\"count1.txt\"], c1[\"count1.txt\"])])\n\n        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n        c1.apply(d)\n        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n\n    def test_diff_mod(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt\\n')\n        d = c2.diff(c1)\n        self.assertEqual(d, [('mod', './count1.txt', c2[\"count1.txt\"], c1[\"count1.txt\"])])\n        d = c1.diff(c2)\n        self.assertEqual(d, [('mod', './count1.txt', c1[\"count1.txt\"], c2[\"count1.txt\"])])\n\n        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n        c1.apply(d)\n        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n\n    def test_diff_add(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt 10:20:count2.txt\\n')\n        d = c2.diff(c1)\n        self.assertEqual(sorted(d), [\n            ('del', './count2.txt', c2[\"count2.txt\"]),\n            ('tok', './count1.txt', c2[\"count1.txt\"], c1[\"count1.txt\"]),\n        ])\n        d = c1.diff(c2)\n        self.assertEqual(sorted(d), [\n            ('add', './count2.txt', c2[\"count2.txt\"]),\n            ('tok', './count1.txt', c2[\"count1.txt\"], c1[\"count1.txt\"]),\n        ])\n\n        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n        c1.apply(d)\n        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n\n    def test_diff_add_in_subcollection(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\\n')\n        d = c2.diff(c1)\n        self.assertEqual(sorted(d), [\n            ('del', './foo', c2[\"foo\"]),\n            ('tok', './count1.txt', c2[\"count1.txt\"], c1[\"count1.txt\"]),\n        ])\n        d = c1.diff(c2)\n        self.assertEqual(sorted(d), [\n            ('add', './foo', c2[\"foo\"]),\n            ('tok', './count1.txt', c2[\"count1.txt\"], c1[\"count1.txt\"]),\n        ])\n        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n        c1.apply(d)\n        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n\n    def test_diff_del_add_in_subcollection(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\\n')\n        c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:3:count3.txt\\n')\n        d = c2.diff(c1)\n        self.assertEqual(sorted(d), [\n            ('add', './foo/count2.txt', c1.find(\"foo/count2.txt\")),\n            ('del', './foo/count3.txt', c2.find(\"foo/count3.txt\")),\n            ('tok', './count1.txt', c2[\"count1.txt\"], c1[\"count1.txt\"]),\n        ])\n        d = c1.diff(c2)\n        self.assertEqual(sorted(d), [\n            ('add', './foo/count3.txt', c2.find(\"foo/count3.txt\")),\n            ('del', './foo/count2.txt', c1.find(\"foo/count2.txt\")),\n            ('tok', './count1.txt', c2[\"count1.txt\"], c1[\"count1.txt\"]),\n        ])\n\n        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n        c1.apply(d)\n        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n\n    def test_diff_mod_in_subcollection(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n./foo 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\\n')\n        c2 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt 0:3:foo\\n')\n        d = c2.diff(c1)\n        self.assertEqual(sorted(d), [\n            ('mod', './foo', c2[\"foo\"], c1[\"foo\"]),\n            ('tok', './count1.txt', c2[\"count1.txt\"], c1[\"count1.txt\"]),\n        ])\n        d = c1.diff(c2)\n        self.assertEqual(sorted(d), [\n            ('mod', './foo', c1[\"foo\"], c2[\"foo\"]),\n            ('tok', './count1.txt', c2[\"count1.txt\"], c1[\"count1.txt\"]),\n        ])\n\n        self.assertNotEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n        c1.apply(d)\n        self.assertEqual(c1.portable_manifest_text(), c2.portable_manifest_text())\n\n    def test_conflict_keep_local_change(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n')\n        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count2.txt\\n')\n        d = c1.diff(c2)\n        self.assertEqual(sorted(d), [\n            ('add', './count2.txt', c2[\"count2.txt\"]),\n            ('del', './count1.txt', c1[\"count1.txt\"]),\n        ])\n        f = c1.open(\"count1.txt\", \"wb\")\n        f.write(b\"zzzzz\")\n\n        # c1 changed, so it should not be deleted.\n        c1.apply(d)\n        self.assertEqual(c1.portable_manifest_text(), \". 95ebc3c7b3b9f1d2c40fec14415d3cb8+5 5348b82a029fd9e971a811ce1f71360b+43 0:5:count1.txt 5:10:count2.txt\\n\")\n\n    def test_conflict_mod(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt')\n        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt')\n        d = c1.diff(c2)\n        self.assertEqual(d, [('mod', './count1.txt', c1[\"count1.txt\"], c2[\"count1.txt\"])])\n        f = c1.open(\"count1.txt\", \"wb\")\n        f.write(b\"zzzzz\")\n\n        # c1 changed, so c2 mod will go to a conflict file\n        c1.apply(d)\n        self.assertRegex(\n            c1.portable_manifest_text(),\n            r\"\\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\\+5 5348b82a029fd9e971a811ce1f71360b\\+43 0:5:count1\\.txt 5:10:count1\\.txt~\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d~conflict~$\")\n\n    def test_conflict_add(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count2.txt\\n')\n        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt\\n')\n        d = c1.diff(c2)\n        self.assertEqual(sorted(d), [\n            ('add', './count1.txt', c2[\"count1.txt\"]),\n            ('del', './count2.txt', c1[\"count2.txt\"]),\n        ])\n        f = c1.open(\"count1.txt\", \"wb\")\n        f.write(b\"zzzzz\")\n\n        # c1 added count1.txt, so c2 add will go to a conflict file\n        c1.apply(d)\n        self.assertRegex(\n            c1.portable_manifest_text(),\n            r\"\\. 95ebc3c7b3b9f1d2c40fec14415d3cb8\\+5 5348b82a029fd9e971a811ce1f71360b\\+43 0:5:count1\\.txt 5:10:count1\\.txt~\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d~conflict~$\")\n\n    def test_conflict_del(self):\n        c1 = Collection('. 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt')\n        c2 = Collection('. 5348b82a029fd9e971a811ce1f71360b+43 0:10:count1.txt')\n        d = c1.diff(c2)\n        self.assertEqual(d, [('mod', './count1.txt', c1[\"count1.txt\"], c2[\"count1.txt\"])])\n        c1.remove(\"count1.txt\")\n\n        # c1 deleted, so c2 mod will go to a conflict file\n        c1.apply(d)\n        self.assertRegex(\n            c1.portable_manifest_text(),\n            r\"\\. 5348b82a029fd9e971a811ce1f71360b\\+43 0:10:count1\\.txt~\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d~conflict~$\")\n\n    def test_notify(self):\n        c1 = Collection()\n        events = []\n        c1.subscribe(lambda event, collection, name, item: events.append((event, collection, name, item)))\n        f = c1.open(\"foo.txt\", \"wb\")\n        self.assertEqual(events[0], (arvados.collection.ADD, c1, \"foo.txt\", f.arvadosfile))\n\n    def test_open_w(self):\n        c1 = Collection(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count1.txt\\n\")\n        self.assertEqual(c1[\"count1.txt\"].size(), 10)\n        c1.open(\"count1.txt\", \"wb\").close()\n        self.assertEqual(c1[\"count1.txt\"].size(), 0)\n\n\nclass NewCollectionTestCaseWithServersAndTokens(run_test_server.TestCaseWithServers):\n    MAIN_SERVER = {}\n    KEEP_SERVER = {}\n    local_locator_re = r\"[0-9a-f]{32}\\+\\d+\\+A[a-f0-9]{40}@[a-f0-9]{8}\"\n    remote_locator_re = r\"[0-9a-f]{32}\\+\\d+\\+R[a-z]{5}-[a-f0-9]{40}@[a-f0-9]{8}\"\n\n    def setUp(self):\n        self.keep_put = getattr(arvados.keep.KeepClient, 'put')\n\n    @mock.patch('arvados.keep.KeepClient.put', autospec=True)\n    def test_storage_classes_desired(self, put_mock):\n        put_mock.side_effect = self.keep_put\n        c = Collection(storage_classes_desired=['default'])\n        with c.open(\"file.txt\", 'wb') as f:\n            f.write('content')\n        c.save_new()\n        _, kwargs = put_mock.call_args\n        self.assertEqual(['default'], kwargs['classes'])\n\n    @mock.patch('arvados.keep.KeepClient.put', autospec=True)\n    def test_repacked_block_submission_get_permission_token(self, mocked_put):\n        '''\n        Make sure that those blocks that are committed after repacking small ones,\n        get their permission tokens assigned on the collection manifest.\n        '''\n        def wrapped_keep_put(*args, **kwargs):\n            # Simulate slow put operations\n            time.sleep(1)\n            return self.keep_put(*args, **kwargs)\n\n        mocked_put.side_effect = wrapped_keep_put\n        c = Collection()\n        # Write 70 files ~1MiB each so we force to produce 1 big block by repacking\n        # small ones before finishing the upload.\n        for i in range(70):\n            f = c.open(\"file_{}.txt\".format(i), 'wb')\n            f.write(random.choice('abcdefghijklmnopqrstuvwxyz') * (2**20+i))\n            f.close(flush=False)\n        # We should get 2 blocks with their tokens\n        self.assertEqual(len(re.findall(self.local_locator_re, c.manifest_text())), 2)\n\n    @mock.patch('arvados.keep.KeepClient.refresh_signature')\n    def test_copy_remote_blocks_on_save_new(self, rs_mock):\n        remote_block_loc = \"acbd18db4cc2f85cedef654fccc4a4d8+3+Remote-\" + \"a\" * 40 + \"@abcdef01\"\n        local_block_loc = \"acbd18db4cc2f85cedef654fccc4a4d8+3+A\" + \"b\" * 40 + \"@abcdef01\"\n        rs_mock.return_value = local_block_loc\n        c = Collection(\". \" + remote_block_loc + \" 0:3:foofile.txt\\n\")\n        self.assertEqual(\n            len(re.findall(self.remote_locator_re, c.manifest_text())), 1)\n        self.assertEqual(\n            len(re.findall(self.local_locator_re, c.manifest_text())), 0)\n        c.save_new()\n        rs_mock.assert_called()\n        self.assertEqual(\n            len(re.findall(self.remote_locator_re, c.manifest_text())), 0)\n        self.assertEqual(\n            len(re.findall(self.local_locator_re, c.manifest_text())), 1)\n\n    @mock.patch('arvados.keep.KeepClient.refresh_signature')\n    def test_copy_remote_blocks_on_save(self, rs_mock):\n        remote_block_loc = \"acbd18db4cc2f85cedef654fccc4a4d8+3+Remote-\" + \"a\" * 40 + \"@abcdef01\"\n        local_block_loc = \"acbd18db4cc2f85cedef654fccc4a4d8+3+A\" + \"b\" * 40 + \"@abcdef01\"\n        rs_mock.return_value = local_block_loc\n        # Remote collection\n        remote_c = Collection(\". \" + remote_block_loc + \" 0:3:foofile.txt\\n\")\n        self.assertEqual(\n            len(re.findall(self.remote_locator_re, remote_c.manifest_text())), 1)\n        # Local collection\n        local_c = Collection()\n        with local_c.open('barfile.txt', 'wb') as f:\n            f.write('bar')\n        local_c.save_new()\n        self.assertEqual(\n            len(re.findall(self.local_locator_re, local_c.manifest_text())), 1)\n        self.assertEqual(\n            len(re.findall(self.remote_locator_re, local_c.manifest_text())), 0)\n        # Copy remote file to local collection\n        local_c.copy('./foofile.txt', './copied/foofile.txt', remote_c)\n        self.assertEqual(\n            len(re.findall(self.local_locator_re, local_c.manifest_text())), 1)\n        self.assertEqual(\n            len(re.findall(self.remote_locator_re, local_c.manifest_text())), 1)\n        # Save local collection: remote block should be copied\n        local_c.save()\n        rs_mock.assert_called()\n        self.assertEqual(\n            len(re.findall(self.local_locator_re, local_c.manifest_text())), 2)\n        self.assertEqual(\n            len(re.findall(self.remote_locator_re, local_c.manifest_text())), 0)\n\n\nclass NewCollectionTestCaseWithServers(run_test_server.TestCaseWithServers):\n    def test_preserve_version_on_save(self):\n        c = Collection()\n        c.save_new(preserve_version=True)\n        coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()\n        self.assertEqual(coll_record['version'], 1)\n        self.assertEqual(coll_record['preserve_version'], True)\n        with c.open(\"foo.txt\", \"wb\") as foo:\n            foo.write(b\"foo\")\n        c.save(preserve_version=True)\n        coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()\n        self.assertEqual(coll_record['version'], 2)\n        self.assertEqual(coll_record['preserve_version'], True)\n        with c.open(\"bar.txt\", \"wb\") as foo:\n            foo.write(b\"bar\")\n        c.save(preserve_version=False)\n        coll_record = arvados.api().collections().get(uuid=c.manifest_locator()).execute()\n        self.assertEqual(coll_record['version'], 3)\n        self.assertEqual(coll_record['preserve_version'], False)\n\n    def test_get_manifest_text_only_committed(self):\n        c = Collection()\n        with c.open(\"count.txt\", \"wb\") as f:\n            # One file committed\n            with c.open(\"foo.txt\", \"wb\") as foo:\n                foo.write(b\"foo\")\n                foo.flush() # Force block commit\n            f.write(b\"0123456789\")\n            # Other file not committed. Block not written to keep yet.\n            self.assertEqual(\n                c._get_manifest_text(\".\",\n                                     strip=False,\n                                     normalize=False,\n                                     only_committed=True),\n                '. acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:count.txt 0:3:foo.txt\\n')\n            # And now with the file closed...\n            f.flush() # Force block commit\n        self.assertEqual(\n            c._get_manifest_text(\".\",\n                                 strip=False,\n                                 normalize=False,\n                                 only_committed=True),\n            \". 781e5e245d69b566979b86e28d23f2c7+10 acbd18db4cc2f85cedef654fccc4a4d8+3 0:10:count.txt 10:3:foo.txt\\n\")\n\n    def test_only_small_blocks_are_packed_together(self):\n        c = Collection()\n        # Write a couple of small files,\n        f = c.open(\"count.txt\", \"wb\")\n        f.write(b\"0123456789\")\n        f.close(flush=False)\n        foo = c.open(\"foo.txt\", \"wb\")\n        foo.write(b\"foo\")\n        foo.close(flush=False)\n        # Then, write a big file, it shouldn't be packed with the ones above\n        big = c.open(\"bigfile.txt\", \"wb\")\n        big.write(b\"x\" * 1024 * 1024 * 33) # 33 MB > KEEP_BLOCK_SIZE/2\n        big.close(flush=False)\n        self.assertEqual(\n            c.manifest_text(\".\"),\n            '. 2d303c138c118af809f39319e5d507e9+34603008 a8430a058b8fbf408e1931b794dbd6fb+13 0:34603008:bigfile.txt 34603008:10:count.txt 34603018:3:foo.txt\\n')\n\n    def test_flush_after_small_block_packing(self):\n        c = Collection()\n        # Write a couple of small files,\n        f = c.open(\"count.txt\", \"wb\")\n        f.write(b\"0123456789\")\n        f.close(flush=False)\n        foo = c.open(\"foo.txt\", \"wb\")\n        foo.write(b\"foo\")\n        foo.close(flush=False)\n\n        self.assertEqual(\n            c.manifest_text(),\n            '. a8430a058b8fbf408e1931b794dbd6fb+13 0:10:count.txt 10:3:foo.txt\\n')\n\n        f = c.open(\"count.txt\", \"rb+\")\n        f.close(flush=True)\n\n        self.assertEqual(\n            c.manifest_text(),\n            '. a8430a058b8fbf408e1931b794dbd6fb+13 0:10:count.txt 10:3:foo.txt\\n')\n\n    def test_write_after_small_block_packing2(self):\n        c = Collection()\n        # Write a couple of small files,\n        f = c.open(\"count.txt\", \"wb\")\n        f.write(b\"0123456789\")\n        f.close(flush=False)\n        foo = c.open(\"foo.txt\", \"wb\")\n        foo.write(b\"foo\")\n        foo.close(flush=False)\n\n        self.assertEqual(\n            c.manifest_text(),\n            '. a8430a058b8fbf408e1931b794dbd6fb+13 0:10:count.txt 10:3:foo.txt\\n')\n\n        f = c.open(\"count.txt\", \"rb+\")\n        f.write(b\"abc\")\n        f.close(flush=False)\n\n        self.assertEqual(\n            c.manifest_text(),\n            '. 900150983cd24fb0d6963f7d28e17f72+3 a8430a058b8fbf408e1931b794dbd6fb+13 0:3:count.txt 6:7:count.txt 13:3:foo.txt\\n')\n\n\n    def test_small_block_packing_with_overwrite(self):\n        c = Collection()\n        c.open(\"b1\", \"wb\").close()\n        c[\"b1\"].writeto(0, b\"b1\", 0)\n\n        c.open(\"b2\", \"wb\").close()\n        c[\"b2\"].writeto(0, b\"b2\", 0)\n\n        c[\"b1\"].writeto(0, b\"1b\", 0)\n\n        self.assertEqual(c.manifest_text(), \". ed4f3f67c70b02b29c50ce1ea26666bd+4 0:2:b1 2:2:b2\\n\")\n        self.assertEqual(c[\"b1\"].manifest_text(), \". ed4f3f67c70b02b29c50ce1ea26666bd+4 0:2:b1\\n\")\n        self.assertEqual(c[\"b2\"].manifest_text(), \". ed4f3f67c70b02b29c50ce1ea26666bd+4 2:2:b2\\n\")\n\n\nclass CollectionCreateUpdateTest(run_test_server.TestCaseWithServers):\n    MAIN_SERVER = {}\n    KEEP_SERVER = {}\n\n    def create_count_txt(self):\n        # Create an empty collection, save it to the API server, then write a\n        # file, but don't save it.\n\n        c = Collection()\n        c.save_new(\"CollectionCreateUpdateTest\", ensure_unique_name=True)\n        self.assertEqual(c.portable_data_hash(), \"d41d8cd98f00b204e9800998ecf8427e+0\")\n        self.assertEqual(c.api_response()[\"portable_data_hash\"], \"d41d8cd98f00b204e9800998ecf8427e+0\" )\n\n        with c.open(\"count.txt\", \"wb\") as f:\n            f.write(b\"0123456789\")\n\n        self.assertEqual(c.portable_manifest_text(), \". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n\")\n\n        return c\n\n    def test_create_and_save(self):\n        c = self.create_count_txt()\n        c.save(properties={'type' : 'Intermediate'},\n               storage_classes=['archive'],\n               trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))\n\n        self.assertRegex(\n            c.manifest_text(),\n            r\"^\\. 781e5e245d69b566979b86e28d23f2c7\\+10\\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count\\.txt$\",)\n        self.assertEqual(c.api_response()[\"storage_classes_desired\"], ['archive'])\n        self.assertEqual(c.api_response()[\"properties\"], {'type' : 'Intermediate'})\n        self.assertEqual(c.api_response()[\"trash_at\"], '2111-01-01T11:11:11.111111000Z')\n\n\n    def test_create_and_save_new(self):\n        c = self.create_count_txt()\n        c.save_new(properties={'type' : 'Intermediate'},\n                   storage_classes=['archive'],\n                   trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))\n\n        self.assertRegex(\n            c.manifest_text(),\n            r\"^\\. 781e5e245d69b566979b86e28d23f2c7\\+10\\+A[a-f0-9]{40}@[a-f0-9]{8} 0:10:count\\.txt$\",)\n        self.assertEqual(c.api_response()[\"storage_classes_desired\"], ['archive'])\n        self.assertEqual(c.api_response()[\"properties\"], {'type' : 'Intermediate'})\n        self.assertEqual(c.api_response()[\"trash_at\"], '2111-01-01T11:11:11.111111000Z')\n\n    def test_create_and_save_after_commiting(self):\n        c = self.create_count_txt()\n        c.save(properties={'type' : 'Intermediate'},\n               storage_classes=['hot'],\n               trash_at=datetime.datetime(2111, 1, 1, 11, 11, 11, 111111))\n        c.save(properties={'type' : 'Output'},\n               storage_classes=['cold'],\n               trash_at=datetime.datetime(2222, 2, 2, 22, 22, 22, 222222))\n\n        self.assertEqual(c.api_response()[\"storage_classes_desired\"], ['cold'])\n        self.assertEqual(c.api_response()[\"properties\"], {'type' : 'Output'})\n        self.assertEqual(c.api_response()[\"trash_at\"], '2222-02-02T22:22:22.222222000Z')\n\n    def test_create_diff_apply(self):\n        c1 = self.create_count_txt()\n        c1.save()\n\n        c2 = Collection(c1.manifest_locator())\n        with c2.open(\"count.txt\", \"wb\") as f:\n            f.write(b\"abcdefg\")\n\n        diff = c1.diff(c2)\n\n        self.assertEqual(diff[0], (arvados.collection.MOD, u'./count.txt', c1[\"count.txt\"], c2[\"count.txt\"]))\n\n        c1.apply(diff)\n        self.assertEqual(c1.portable_data_hash(), c2.portable_data_hash())\n\n    def test_diff_apply_with_token(self):\n        baseline = CollectionReader(\". 781e5e245d69b566979b86e28d23f2c7+10+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:10:count.txt\\n\")\n        c = Collection(\". 781e5e245d69b566979b86e28d23f2c7+10 0:10:count.txt\\n\")\n        other = CollectionReader(\". 7ac66c0f148de9519b8bd264312c4d64+7+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:7:count.txt\\n\")\n\n        diff = baseline.diff(other)\n        self.assertEqual(diff, [('mod', u'./count.txt', c[\"count.txt\"], other[\"count.txt\"])])\n\n        c.apply(diff)\n\n        self.assertEqual(c.manifest_text(), \". 7ac66c0f148de9519b8bd264312c4d64+7+A715fd31f8111894f717eb1003c1b0216799dd9ec@54f5dd1a 0:7:count.txt\\n\")\n\n\n    def test_create_and_update(self):\n        c1 = self.create_count_txt()\n        c1.save()\n\n        c2 = arvados.collection.Collection(c1.manifest_locator())\n        with c2.open(\"count.txt\", \"wb\") as f:\n            f.write(b\"abcdefg\")\n\n        c2.save()\n\n        self.assertNotEqual(c1.portable_data_hash(), c2.portable_data_hash())\n        c1.update()\n        self.assertEqual(c1.portable_data_hash(), c2.portable_data_hash())\n\n\n    def test_create_and_update_with_conflict(self):\n        c1 = self.create_count_txt()\n        c1.save()\n\n        with c1.open(\"count.txt\", \"wb\") as f:\n            f.write(b\"XYZ\")\n\n        c2 = arvados.collection.Collection(c1.manifest_locator())\n        with c2.open(\"count.txt\", \"wb\") as f:\n            f.write(b\"abcdefg\")\n\n        c2.save()\n\n        c1.update()\n        self.assertRegex(\n            c1.manifest_text(),\n            r\"\\. e65075d550f9b5bf9992fa1d71a131be\\+3\\S* 7ac66c0f148de9519b8bd264312c4d64\\+7\\S* 0:3:count\\.txt 3:7:count\\.txt~\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d~conflict~$\")\n\n    def test_pdh_is_native_str(self):\n        c1 = self.create_count_txt()\n        pdh = c1.portable_data_hash()\n        self.assertEqual(type(''), type(pdh))\n\n\nclass TestCollectionAPIPassthrough:\n    _API_HOST = os.environ['ARVADOS_API_HOST']\n\n    @pytest.fixture\n    def no_arvados_config(self, monkeypatch):\n        \"\"\"Prevent loading Arvados client configuration\n\n        This fixture rigs things up so that Arvados API client objects can\n        only be constructed with explicit arguments.\n        \"\"\"\n        # In principle, this is all we need:\n        monkeypatch.setattr(arvados.config, 'settings', lambda: {})\n        # As a hedge against future development, cover more bases:\n        monkeypatch.setattr(arvados.config, '_settings', {})\n        monkeypatch.setattr(arvados.config, 'load', lambda: {})\n        monkeypatch.delenv('ARVADOS_API_HOST', raising=False)\n        monkeypatch.delenv('ARVADOS_API_TOKEN', raising=False)\n        monkeypatch.delenv('ARVADOS_API_HOST_INSECURE', raising=False)\n\n    @pytest.fixture\n    def arv_client(self, no_arvados_config):\n        return arvados.api(\n            'v1',\n            host=self._API_HOST,\n            token=run_test_server.auth_token('active'),\n            insecure=True,\n        )\n\n    @pytest.fixture\n    def coll_fixture(self):\n        return run_test_server.fixture('collections')['collection_owned_by_active']\n\n    def new_coll(self, arv_client, coll_fixture):\n        coll = Collection(\n            api_client=arv_client,\n            manifest_locator_or_text=coll_fixture['portable_data_hash'],\n        )\n        coll.save_new()\n        return coll\n\n    def expect_manifest(self, coll_fixture, *copied_filenames):\n        assert (match := re.search(r' 0:\\d+:\\S', coll_fixture['manifest_text']))\n        prefix = match.group(0)[:-1]\n        manifest_ext = ''.join(prefix + name for name in copied_filenames)\n        return coll_fixture['manifest_text'].replace('\\n', f'{manifest_ext}\\n', 1)\n\n    def test_update_with_no_changes(self, arv_client, coll_fixture):\n        coll = self.new_coll(arv_client, coll_fixture)\n        assert coll.update() is None\n        assert coll.portable_manifest_text() == coll_fixture['manifest_text']\n\n    def test_update_with_self_changes(self, arv_client, coll_fixture):\n        coll = self.new_coll(arv_client, coll_fixture)\n        coll.copy('bar', 'baz')\n        assert coll.update() is None\n        assert coll.portable_manifest_text() == self.expect_manifest(coll_fixture, 'baz')\n\n    def test_update_with_other_changes(self, arv_client, coll_fixture):\n        coll1 = self.new_coll(arv_client, coll_fixture)\n        coll2 = Collection(coll1.manifest_locator(), arv_client)\n        coll2.copy('bar', 'baz')\n        coll2.save()\n        assert coll1.update() is None\n        assert coll1.portable_manifest_text() == self.expect_manifest(coll_fixture, 'baz')\n\n    def test_update_with_both_changes(self, arv_client, coll_fixture):\n        coll1 = self.new_coll(arv_client, coll_fixture)\n        coll2 = Collection(coll1.manifest_locator(), arv_client)\n        coll2.copy('bar', 'baz')\n        coll2.save()\n        coll1.copy('bar', 'foo')\n        assert coll1.update() is None\n        assert coll1.portable_manifest_text() == self.expect_manifest(coll_fixture, 'baz', 'foo')\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "sdk/python/tests/test_computed_permissions.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados\nimport arvados.util\nfrom . import run_test_server\nfrom .test_util import KeysetTestHelper\n\nclass ComputedPermissionTest(run_test_server.TestCaseWithServers):\n    def test_computed_permission(self):\n        run_test_server.authorize_with('admin')\n        api_client = arvados.api('v1')\n        active_user_uuid = run_test_server.fixture('users')['active']['uuid']\n        resp = api_client.computed_permissions().list(\n            filters=[['user_uuid', '=', active_user_uuid]],\n        ).execute()\n        assert len(resp['items']) > 0\n        for item in resp['items']:\n            assert item['user_uuid'] == active_user_uuid\n\n    def test_keyset_list_all(self):\n        run_test_server.authorize_with('admin')\n        api_client = arvados.api('v1')\n        seen = {}\n        for item in arvados.util.keyset_list_all(api_client.computed_permissions().list, order_key='user_uuid', key_fields=('user_uuid', 'target_uuid')):\n            assert (item['user_uuid'], item['target_uuid']) not in seen\n            seen[(item['user_uuid'], item['target_uuid'])] = True\n\n    def test_iter_computed_permissions(self):\n        run_test_server.authorize_with('admin')\n        api_client = arvados.api('v1')\n        seen = {}\n        for item in arvados.util.iter_computed_permissions(api_client.computed_permissions().list):\n            assert item['perm_level']\n            assert (item['user_uuid'], item['target_uuid']) not in seen\n            seen[(item['user_uuid'], item['target_uuid'])] = True\n\n    def test_iter_computed_permissions_defaults(self):\n        ks = KeysetTestHelper([[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"user_uuid asc\", \"target_uuid asc\"], \"filters\": []},\n            {\"items\": [{\"user_uuid\": \"u\", \"target_uuid\": \"t\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"user_uuid asc\", \"target_uuid asc\"], \"filters\": [['user_uuid', '=', 'u'], ['target_uuid', '>', 't']]},\n            {\"items\": []},\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"user_uuid asc\", \"target_uuid asc\"], \"filters\": [['user_uuid', '>', 'u']]},\n            {\"items\": []},\n        ]])\n        ls = list(arvados.util.iter_computed_permissions(ks.fn))\n        assert ls == ks.expect[0][1]['items']\n\n    def test_iter_computed_permissions_order_key(self):\n        ks = KeysetTestHelper([[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"target_uuid desc\", \"user_uuid desc\"], \"filters\": []},\n            {\"items\": [{\"user_uuid\": \"u\", \"target_uuid\": \"t\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"target_uuid desc\", \"user_uuid desc\"], \"filters\": [['target_uuid', '=', 't'], ['user_uuid', '<', 'u']]},\n            {\"items\": []},\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"target_uuid desc\", \"user_uuid desc\"], \"filters\": [['target_uuid', '<', 't']]},\n            {\"items\": []},\n        ]])\n        ls = list(arvados.util.iter_computed_permissions(ks.fn, order_key='target_uuid', ascending=False))\n        assert ls == ks.expect[0][1]['items']\n\n    def test_iter_computed_permissions_num_retries(self):\n        ks = KeysetTestHelper([[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"user_uuid asc\", \"target_uuid asc\"], \"filters\": []},\n            {\"items\": []}\n        ]], expect_num_retries=33)\n        assert list(arvados.util.iter_computed_permissions(ks.fn, num_retries=33)) == []\n\n    def test_iter_computed_permissions_invalid_key_fields(self):\n        ks = KeysetTestHelper([])\n        with self.assertRaises(arvados.errors.ArgumentError) as exc:\n            _ = list(arvados.util.iter_computed_permissions(ks.fn, key_fields=['target_uuid', 'perm_level']))\n        assert exc.exception.args[0] == 'key_fields can have at most one entry that is not order_key'\n"
  },
  {
    "path": "sdk/python/tests/test_config.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\n\nimport pytest\n\nfrom arvados import config as arv_config\n\nclass TestInitialize:\n    @pytest.fixture(autouse=True)\n    def setup(self, monkeypatch):\n        arv_config._settings = None\n        monkeypatch.delenv('ARVADOS_API_HOST', raising=False)\n        monkeypatch.delenv('ARVADOS_API_TOKEN', raising=False)\n        try:\n            yield\n        finally:\n            arv_config._settings = None\n\n    @pytest.fixture\n    def tmp_settings(self, tmp_path):\n        path = tmp_path / 'settings.conf'\n        with path.open('w') as settings_file:\n            print(\"ARVADOS_API_HOST=localhost\", file=settings_file)\n            print(\"ARVADOS_API_TOKEN=TestInitialize\", file=settings_file)\n        return path\n\n    def test_static_path(self, tmp_settings):\n        arv_config.initialize(tmp_settings)\n        actual = arv_config.settings()\n        assert actual['ARVADOS_API_HOST'] == 'localhost'\n        assert actual['ARVADOS_API_TOKEN'] == 'TestInitialize'\n\n    def test_search_path(self, tmp_settings):\n        def search(filename):\n            assert filename == tmp_settings.name\n            yield tmp_settings\n        arv_config.initialize(search)\n        actual = arv_config.settings()\n        assert actual['ARVADOS_API_HOST'] == 'localhost'\n        assert actual['ARVADOS_API_TOKEN'] == 'TestInitialize'\n\n    def test_default_search(self, tmp_settings, monkeypatch):\n        monkeypatch.setenv('CONFIGURATION_DIRECTORY', str(tmp_settings.parent))\n        monkeypatch.setenv('XDG_CONFIG_HOME', str(tmp_settings.parent))\n        monkeypatch.delenv('XDG_CONFIG_DIRS', raising=False)\n        actual = arv_config.settings()\n        assert actual['ARVADOS_API_HOST'] == 'localhost'\n        assert actual['ARVADOS_API_TOKEN'] == 'TestInitialize'\n\n    def test_environ_override(self, monkeypatch):\n        monkeypatch.setenv('ARVADOS_API_TOKEN', 'test_environ_override')\n        arv_config.initialize('')\n        actual = arv_config.settings()\n        assert actual.get('ARVADOS_API_HOST') is None\n        assert actual['ARVADOS_API_TOKEN'] == 'test_environ_override'\n"
  },
  {
    "path": "sdk/python/tests/test_errors.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport traceback\nimport unittest\n\nimport arvados.errors as arv_error\nfrom . import arvados_testutil as tutil\n\nclass KeepRequestErrorTestCase(unittest.TestCase):\n    REQUEST_ERRORS = [\n        ('http://keep1.zzzzz.example.org/', IOError(\"test IOError\")),\n        ('http://keep3.zzzzz.example.org/', MemoryError(\"test MemoryError\")),\n        ('http://keep5.zzzzz.example.org/',\n         arv_error.HttpError(500, \"Internal Server Error\")),\n        ('http://keep7.zzzzz.example.org/', IOError(\"second test IOError\")),\n        ]\n\n    def check_get_message(self, *args):\n        test_exc = arv_error.KeepRequestError(\"test message\", *args)\n        self.assertEqual(\"test message\", test_exc.message)\n\n    def test_get_message_with_request_errors(self):\n        self.check_get_message(self.REQUEST_ERRORS[:])\n\n    def test_get_message_without_request_errors(self):\n        self.check_get_message()\n\n    def check_get_request_errors(self, *args):\n        expected = dict(args[0]) if args else {}\n        test_exc = arv_error.KeepRequestError(\"test service exceptions\", *args)\n        self.assertEqual(expected, test_exc.request_errors())\n\n    def test_get_request_errors(self):\n        self.check_get_request_errors(self.REQUEST_ERRORS[:])\n\n    def test_get_request_errors_none(self):\n        self.check_get_request_errors({})\n\n    def test_empty_exception(self):\n        test_exc = arv_error.KeepRequestError()\n        self.assertFalse(test_exc.message)\n        self.assertEqual({}, test_exc.request_errors())\n\n    def traceback_str(self, exc):\n        return traceback.format_exception_only(type(exc), exc)[-1]\n\n    def test_traceback_str_without_request_errors(self):\n        message = \"test plain traceback string\"\n        test_exc = arv_error.KeepRequestError(message)\n        exc_report = self.traceback_str(test_exc)\n        self.assertRegex(exc_report, r\"^(arvados\\.errors\\.)?KeepRequestError: \")\n        self.assertIn(message, exc_report)\n\n    def test_traceback_str_with_request_errors(self):\n        message = \"test traceback shows Keep services\"\n        test_exc = arv_error.KeepRequestError(message, self.REQUEST_ERRORS[:])\n        exc_report = self.traceback_str(test_exc)\n        self.assertRegex(exc_report, r\"^(arvados\\.errors\\.)?KeepRequestError: \")\n        self.assertIn(message, exc_report)\n        for expect_re in [\n                r\"raised (IOError|OSError)\", # IOError in Python2, OSError in Python3\n                r\"raised MemoryError\",\n                r\"test MemoryError\",\n                r\"second test IOError\",\n                r\"responded with 500 Internal Server Error\"]:\n            self.assertRegex(exc_report, expect_re)\n        # Assert the report maintains order of listed services.\n        last_index = -1\n        for service_key, _ in self.REQUEST_ERRORS:\n            service_index = exc_report.find(service_key)\n            self.assertGreater(service_index, last_index)\n            last_index = service_index\n"
  },
  {
    "path": "sdk/python/tests/test_events.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport json\nimport logging\nimport queue\nimport sys\nimport threading\nimport time\nimport unittest\n\nfrom unittest import mock\n\nimport websockets.exceptions as ws_exc\n\nimport arvados\nfrom . import arvados_testutil as tutil\nfrom . import run_test_server\n\nclass FakeWebsocketClient:\n    \"\"\"Fake self-contained version of websockets.sync.client.ClientConnection\n\n    This provides enough of the API to test EventClient. It loosely mimics\n    the Arvados WebSocket API by acknowledging subscribe messages. You can use\n    `mock_wrapper` to test calls. You can set `_check_lock` to test that the\n    given lock is acquired before `send` is called.\n    \"\"\"\n\n    def __init__(self):\n        self._check_lock = None\n        self._closed = threading.Event()\n        self._messages = queue.Queue()\n\n    def mock_wrapper(self):\n        wrapper = mock.Mock(wraps=self)\n        wrapper.__iter__ = lambda _: self.__iter__()\n        return wrapper\n\n    def __iter__(self):\n        while True:\n            msg = self._messages.get()\n            self._messages.task_done()\n            if isinstance(msg, Exception):\n                raise msg\n            else:\n                yield msg\n\n    def close(self, code=1000, reason=''):\n        if not self._closed.is_set():\n            self._closed.set()\n            self.force_disconnect()\n\n    def force_disconnect(self):\n        self._messages.put(ws_exc.ConnectionClosed(None, None))\n\n    def send(self, msg):\n        if self._check_lock is not None and self._check_lock.acquire(blocking=False):\n            self._check_lock.release()\n            raise AssertionError(f\"called ws_client.send() without lock\")\n        elif self._closed.is_set():\n            raise ws_exc.ConnectionClosed(None, None)\n        try:\n            msg = json.loads(msg)\n        except ValueError:\n            status = 400\n        else:\n            status = 200\n        self._messages.put(json.dumps({'status': status}))\n\n\nclass WebsocketTest(run_test_server.TestCaseWithServers):\n    MAIN_SERVER = {}\n\n    TIME_PAST = time.time()-3600\n    TIME_FUTURE = time.time()+3600\n    MOCK_WS_URL = 'wss://[{}]/'.format(tutil.TEST_HOST)\n\n    TEST_TIMEOUT = 10.0\n\n    def setUp(self):\n        self.ws = None\n\n    def tearDown(self):\n        try:\n            if self.ws:\n                self.ws.close()\n        except Exception as e:\n            print(\"Error in teardown: \", e)\n        super(WebsocketTest, self).tearDown()\n        run_test_server.reset()\n\n    def _test_subscribe(self, poll_fallback, expect_type, start_time=None, expected=1):\n        run_test_server.authorize_with('active')\n        events = queue.Queue(100)\n\n        # Create ancestor before subscribing.\n        # When listening with start_time in the past, this should also be retrieved.\n        # However, when start_time is omitted in subscribe, this should not be fetched.\n        ancestor = arvados.api('v1').collections().create(body={}).execute()\n\n        filters = [['object_uuid', 'is_a', 'arvados#collection']]\n        if start_time:\n            filters.append(['created_at', '>=', start_time])\n\n        self.ws = arvados.events.subscribe(\n            arvados.api('v1'), filters,\n            events.put_nowait,\n            poll_fallback=poll_fallback,\n            last_log_id=(1 if start_time else None))\n        self.assertIsInstance(self.ws, expect_type)\n        self.assertEqual(200, events.get(True, 5)['status'])\n\n        if hasattr(self.ws, '_skip_old_events'):\n            # Avoid race by waiting for the first \"find ID threshold\"\n            # poll to finish.\n            deadline = time.time() + 10\n            while not self.ws._skip_old_events:\n                self.assertLess(time.time(), deadline)\n                time.sleep(0.1)\n        collection = arvados.api('v1').collections().create(body={}).execute()\n\n        want_uuids = []\n        if expected > 0:\n            want_uuids.append(collection['uuid'])\n        if expected > 1:\n            want_uuids.append(ancestor['uuid'])\n        log_object_uuids = []\n        while set(want_uuids) - set(log_object_uuids):\n            log_object_uuids.append(events.get(True, 5)['object_uuid'])\n\n        if expected < 2:\n            with self.assertRaises(queue.Empty):\n                # assertEqual just serves to show us what unexpected\n                # thing comes out of the queue when the assertRaises\n                # fails; when the test passes, this assertEqual\n                # doesn't get called.\n                self.assertEqual(events.get(True, 2), None)\n\n    def test_subscribe_websocket(self):\n        self._test_subscribe(\n            poll_fallback=False, expect_type=arvados.events.EventClient, expected=1)\n\n    @mock.patch('arvados.events.EventClient.__init__')\n    def test_subscribe_poll(self, event_client_constr):\n        event_client_constr.side_effect = Exception('All is well')\n        self._test_subscribe(\n            poll_fallback=0.25, expect_type=arvados.events.PollClient, expected=1)\n\n    def test_subscribe_poll_retry(self):\n        api_mock = mock.MagicMock()\n        n = []\n        def on_ev(ev):\n            n.append(ev)\n\n        error_mock = mock.MagicMock()\n        error_mock.resp.status = 0\n        error_mock._get_reason.return_value = \"testing\"\n        api_mock.logs().list().execute.side_effect = (\n            arvados.errors.ApiError(error_mock, b\"\"),\n            {\"items\": [{\"id\": 1}], \"items_available\": 1},\n            arvados.errors.ApiError(error_mock, b\"\"),\n            {\"items\": [{\"id\": 1}], \"items_available\": 1},\n        )\n        pc = arvados.events.PollClient(api_mock, [], on_ev, 15, None)\n        pc.start()\n        while len(n) < 2:\n            time.sleep(.1)\n        pc.close()\n\n    def test_subscribe_websocket_with_start_time_past(self):\n        self._test_subscribe(\n            poll_fallback=False, expect_type=arvados.events.EventClient,\n            start_time=self.localiso(self.TIME_PAST),\n            expected=2)\n\n    @mock.patch('arvados.events.EventClient.__init__')\n    def test_subscribe_poll_with_start_time_past(self, event_client_constr):\n        event_client_constr.side_effect = Exception('All is well')\n        self._test_subscribe(\n            poll_fallback=0.25, expect_type=arvados.events.PollClient,\n            start_time=self.localiso(self.TIME_PAST),\n            expected=2)\n\n    def test_subscribe_websocket_with_start_time_future(self):\n        self._test_subscribe(\n            poll_fallback=False, expect_type=arvados.events.EventClient,\n            start_time=self.localiso(self.TIME_FUTURE),\n            expected=0)\n\n    @mock.patch('arvados.events.EventClient.__init__')\n    def test_subscribe_poll_with_start_time_future(self, event_client_constr):\n        event_client_constr.side_effect = Exception('All is well')\n        self._test_subscribe(\n            poll_fallback=0.25, expect_type=arvados.events.PollClient,\n            start_time=self.localiso(self.TIME_FUTURE),\n            expected=0)\n\n    def test_subscribe_websocket_with_start_time_past_utc(self):\n        self._test_subscribe(\n            poll_fallback=False, expect_type=arvados.events.EventClient,\n            start_time=self.utciso(self.TIME_PAST),\n            expected=2)\n\n    def test_subscribe_websocket_with_start_time_future_utc(self):\n        self._test_subscribe(\n            poll_fallback=False, expect_type=arvados.events.EventClient,\n            start_time=self.utciso(self.TIME_FUTURE),\n            expected=0)\n\n    def utciso(self, t):\n        return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(t))\n\n    def localiso(self, t):\n        return time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(t)) + self.isotz(-time.timezone//60)\n\n    def isotz(self, offset):\n        \"\"\"Convert minutes-east-of-UTC to RFC3339- and ISO-compatible time zone designator\"\"\"\n        return '{:+03d}:{:02d}'.format(offset//60, offset%60)\n\n    # Test websocket reconnection on (un)expected close\n    def _test_websocket_reconnect(self, close_unexpected):\n        run_test_server.authorize_with('active')\n        events = queue.Queue(100)\n\n        logstream = tutil.StringIO()\n        rootLogger = logging.getLogger()\n        streamHandler = logging.StreamHandler(logstream)\n        rootLogger.addHandler(streamHandler)\n\n        filters = [['object_uuid', 'is_a', 'arvados#collection']]\n        filters.append(['created_at', '>=', self.localiso(self.TIME_PAST)])\n        self.ws = arvados.events.subscribe(\n            arvados.api('v1'), filters,\n            events.put_nowait,\n            poll_fallback=False,\n            last_log_id=None)\n        self.assertIsInstance(self.ws, arvados.events.EventClient)\n        self.assertEqual(200, events.get(True, 5)['status'])\n\n        # create obj\n        collection = arvados.api('v1').collections().create(body={}).execute()\n\n        # expect an event\n        self.assertIn(collection['uuid'], events.get(True, 5)['object_uuid'])\n        with self.assertRaises(queue.Empty):\n            self.assertEqual(events.get(True, 2), None)\n\n        # close (im)properly\n        if close_unexpected:\n            self.ws._client.close()\n        else:\n            self.ws.close()\n\n        # create one more obj\n        collection2 = arvados.api('v1').collections().create(body={}).execute()\n\n        # (un)expect the object creation event\n        if close_unexpected:\n            log_object_uuids = []\n            for i in range(0, 2):\n                event = events.get(True, 5)\n                if event.get('object_uuid') != None:\n                    log_object_uuids.append(event['object_uuid'])\n            with self.assertRaises(queue.Empty):\n                self.assertEqual(events.get(True, 2), None)\n            self.assertNotIn(collection['uuid'], log_object_uuids)\n            self.assertIn(collection2['uuid'], log_object_uuids)\n        else:\n            with self.assertRaises(queue.Empty):\n                self.assertEqual(events.get(True, 2), None)\n\n        # verify log message to ensure that an (un)expected close\n        log_messages = logstream.getvalue()\n        closeLogFound = log_messages.find(\"Unexpected close. Reconnecting.\")\n        retryLogFound = log_messages.find(\"Error during websocket reconnect. Will retry\")\n        if close_unexpected:\n            self.assertNotEqual(closeLogFound, -1)\n        else:\n            self.assertEqual(closeLogFound, -1)\n        rootLogger.removeHandler(streamHandler)\n\n    def test_websocket_reconnect_on_unexpected_close(self):\n        self._test_websocket_reconnect(True)\n\n    def test_websocket_no_reconnect_on_close_by_user(self):\n        self._test_websocket_reconnect(False)\n\n    # Test websocket reconnection retry\n    @mock.patch('arvados.events.ws_client.connect')\n    def test_websocket_reconnect_retry(self, ws_conn):\n        logstream = tutil.StringIO()\n        rootLogger = logging.getLogger()\n        streamHandler = logging.StreamHandler(logstream)\n        rootLogger.addHandler(streamHandler)\n        try:\n            msg_event, wss_client, self.ws = self.fake_client(ws_conn)\n            self.assertTrue(msg_event.wait(timeout=1), \"timed out waiting for setup callback\")\n            msg_event.clear()\n            ws_conn.side_effect = [Exception('EventClient.connect error'), wss_client]\n            wss_client.force_disconnect()\n            self.assertTrue(msg_event.wait(timeout=1), \"timed out waiting for reconnect callback\")\n            # verify log messages to ensure retry happened\n            self.assertIn(\"Error 'EventClient.connect error' during websocket reconnect.\", logstream.getvalue())\n            self.assertEqual(ws_conn.call_count, 3)\n        finally:\n            rootLogger.removeHandler(streamHandler)\n\n    @mock.patch('arvados.events.ws_client.connect')\n    def test_run_forever_survives_reconnects(self, websocket_client):\n        client = arvados.events.EventClient(\n            self.MOCK_WS_URL, [], lambda event: None, None)\n        forever_thread = threading.Thread(target=client.run_forever)\n        forever_thread.start()\n        # Simulate an unexpected disconnect, and wait for reconnect.\n        try:\n            client.on_closed()\n            self.assertTrue(forever_thread.is_alive())\n            self.assertEqual(2, websocket_client.call_count)\n        finally:\n            client.close()\n            forever_thread.join()\n\n    @staticmethod\n    def fake_client(conn_patch, filters=None, url=MOCK_WS_URL):\n        \"\"\"Set up EventClient test infrastructure\n\n        Given a patch of `arvados.events.ws_client.connect`,\n        this returns a 3-tuple:\n\n        * `msg_event` is a `threading.Event` that is set as the test client\n          event callback. You can wait for this event to confirm that a\n          sent message has been acknowledged and processed.\n\n        * `mock_client` is a `mock.Mock` wrapper around `FakeWebsocketClient`.\n          Use this to assert `EventClient` calls the right methods. It tests\n          that `EventClient` acquires a lock before calling `send`.\n\n        * `client` is the `EventClient` that uses `mock_client` under the hood\n          that you exercise methods of.\n\n        Other arguments are passed to initialize `EventClient`.\n        \"\"\"\n        msg_event = threading.Event()\n        fake_client = FakeWebsocketClient()\n        mock_client = fake_client.mock_wrapper()\n        conn_patch.return_value = mock_client\n        client = arvados.events.EventClient(url, filters, lambda _: msg_event.set())\n        fake_client._check_lock = client._subscribe_lock\n        return msg_event, mock_client, client\n\n    @mock.patch('arvados.events.ws_client.connect')\n    def test_subscribe_locking(self, ws_conn):\n        f = [['created_at', '>=', '2023-12-01T00:00:00.000Z']]\n        msg_event, wss_client, self.ws = self.fake_client(ws_conn)\n        self.assertTrue(msg_event.wait(timeout=1), \"timed out waiting for setup callback\")\n        msg_event.clear()\n        wss_client.send.reset_mock()\n        self.ws.subscribe(f)\n        self.assertTrue(msg_event.wait(timeout=1), \"timed out waiting for subscribe callback\")\n        wss_client.send.assert_called()\n        (msg,), _ = wss_client.send.call_args\n        self.assertEqual(\n            json.loads(msg),\n            {'method': 'subscribe', 'filters': f},\n        )\n\n    @mock.patch('arvados.events.ws_client.connect')\n    def test_unsubscribe_locking(self, ws_conn):\n        f = [['created_at', '>=', '2023-12-01T01:00:00.000Z']]\n        msg_event, wss_client, self.ws = self.fake_client(ws_conn, f)\n        self.assertTrue(msg_event.wait(timeout=1), \"timed out waiting for setup callback\")\n        msg_event.clear()\n        wss_client.send.reset_mock()\n        self.ws.unsubscribe(f)\n        self.assertTrue(msg_event.wait(timeout=1), \"timed out waiting for unsubscribe callback\")\n        wss_client.send.assert_called()\n        (msg,), _ = wss_client.send.call_args\n        self.assertEqual(\n            json.loads(msg),\n            {'method': 'unsubscribe', 'filters': f},\n        )\n\n    @mock.patch('arvados.events.ws_client.connect')\n    def test_resubscribe_locking(self, ws_conn):\n        f = [['created_at', '>=', '2023-12-01T02:00:00.000Z']]\n        msg_event, wss_client, self.ws = self.fake_client(ws_conn, f)\n        self.assertTrue(msg_event.wait(timeout=1), \"timed out waiting for setup callback\")\n        msg_event.clear()\n        wss_client.send.reset_mock()\n        wss_client.force_disconnect()\n        self.assertTrue(msg_event.wait(timeout=1), \"timed out waiting for resubscribe callback\")\n        wss_client.send.assert_called()\n        (msg,), _ = wss_client.send.call_args\n        self.assertEqual(\n            json.loads(msg),\n            {'method': 'subscribe', 'filters': f},\n        )\n\n\nclass PollClientTestCase(unittest.TestCase):\n    TEST_TIMEOUT = 10.0\n\n    class MockLogs(object):\n\n        def __init__(self):\n            self.logs = []\n            self.lock = threading.Lock()\n            self.api_called = threading.Event()\n\n        def add(self, log):\n            with self.lock:\n                self.logs.append(log)\n\n        def return_list(self, num_retries=None):\n            self.api_called.set()\n            args, kwargs = self.list_func.call_args_list[-1]\n            filters = kwargs.get('filters', [])\n            if not any(True for f in filters if f[0] == 'id' and f[1] == '>'):\n                # No 'id' filter was given -- this must be the probe\n                # to determine the most recent id.\n                return {'items': [{'id': 1}], 'items_available': 1}\n            with self.lock:\n                retval = self.logs\n                self.logs = []\n            return {'items': retval, 'items_available': len(retval)}\n\n    def setUp(self):\n        self.logs = self.MockLogs()\n        self.arv = mock.MagicMock(name='arvados.api()')\n        self.arv.logs().list().execute.side_effect = self.logs.return_list\n        # our MockLogs object's \"execute\" stub will need to inspect\n        # the call history to determine X in\n        # ....logs().list(filters=X).execute():\n        self.logs.list_func = self.arv.logs().list\n        self.status_ok = threading.Event()\n        self.event_received = threading.Event()\n        self.recv_events = []\n\n    def tearDown(self):\n        if hasattr(self, 'client'):\n            self.client.close(timeout=None)\n\n    def callback(self, event):\n        if event.get('status') == 200:\n            self.status_ok.set()\n        else:\n            self.recv_events.append(event)\n            self.event_received.set()\n\n    def build_client(self, filters=None, callback=None, last_log_id=None, poll_time=99):\n        if filters is None:\n            filters = []\n        if callback is None:\n            callback = self.callback\n        self.client = arvados.events.PollClient(\n            self.arv, filters, callback, poll_time, last_log_id)\n\n    def was_filter_used(self, target):\n        return any(target in call[-1].get('filters', [])\n                   for call in self.arv.logs().list.call_args_list)\n\n    def test_callback(self):\n        test_log = {'id': 12345, 'testkey': 'testtext'}\n        self.logs.add({'id': 123})\n        self.build_client(poll_time=.01)\n        self.client.start()\n        self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))\n        self.assertTrue(self.event_received.wait(self.TEST_TIMEOUT))\n        self.event_received.clear()\n        self.logs.add(test_log.copy())\n        self.assertTrue(self.event_received.wait(self.TEST_TIMEOUT))\n        self.assertIn(test_log, self.recv_events)\n\n    def test_subscribe(self):\n        client_filter = ['kind', '=', 'arvados#test']\n        self.build_client()\n        self.client.unsubscribe([])\n        self.client.subscribe([client_filter[:]])\n        self.client.start()\n        self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))\n        self.assertTrue(self.logs.api_called.wait(self.TEST_TIMEOUT))\n        self.assertTrue(self.was_filter_used(client_filter))\n\n    def test_unsubscribe(self):\n        should_filter = ['foo', '=', 'foo']\n        should_not_filter = ['foo', '=', 'bar']\n        self.build_client(poll_time=0.01)\n        self.client.unsubscribe([])\n        self.client.subscribe([should_not_filter[:]])\n        self.client.subscribe([should_filter[:]])\n        self.client.unsubscribe([should_not_filter[:]])\n        self.client.start()\n        self.logs.add({'id': 123})\n        self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))\n        self.assertTrue(self.event_received.wait(self.TEST_TIMEOUT))\n        self.assertTrue(self.was_filter_used(should_filter))\n        self.assertFalse(self.was_filter_used(should_not_filter))\n\n    def test_run_forever(self):\n        self.build_client()\n        self.client.start()\n        forever_thread = threading.Thread(target=self.client.run_forever)\n        forever_thread.start()\n        self.assertTrue(self.status_ok.wait(self.TEST_TIMEOUT))\n        self.assertTrue(forever_thread.is_alive())\n        self.client.close()\n        forever_thread.join()\n        del self.client\n"
  },
  {
    "path": "sdk/python/tests/test_http_cache.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport hashlib\nimport os\nimport random\nimport shutil\nimport sys\nimport tempfile\nimport threading\nimport unittest\n\nimport pytest\nfrom unittest import mock\n\nimport arvados\nimport arvados.api\nimport arvados.util\nfrom arvados._internal import basedirs\n\nfrom . import run_test_server\n\ndef _random(n):\n    return bytearray(random.getrandbits(8) for _ in range(n))\n\nclass CacheTestThread(threading.Thread):\n    def __init__(self, dir):\n        super(CacheTestThread, self).__init__()\n        self._dir = dir\n\n    def run(self):\n        c = arvados.api.ThreadSafeHTTPCache(self._dir)\n        url = 'http://example.com/foo'\n        self.ok = True\n        for x in range(16):\n            try:\n                data_in = _random(128)\n                data_in = hashlib.md5(data_in).hexdigest().encode() + b\"\\n\" + data_in\n                c.set(url, data_in)\n                data_out = c.get(url)\n                digest, _, content = data_out.partition(b\"\\n\")\n                if digest != hashlib.md5(content).hexdigest().encode():\n                    self.ok = False\n            except Exception as err:\n                self.ok = False\n                print(\"cache failed: {}: {}\".format(type(err), err), file=sys.stderr)\n                raise\n\n\nclass TestAPIHTTPCache:\n    @pytest.mark.parametrize('data_type', ['discovery', 'keep'])\n    def test_good_storage(self, tmp_path, monkeypatch, data_type):\n        def storage_path(self, subdir='.', mode=0o700):\n            path = tmp_path / subdir\n            path.mkdir(mode=mode)\n            return path\n        monkeypatch.setattr(basedirs.BaseDirectories, 'storage_path', storage_path)\n        actual = arvados.http_cache(data_type)\n        assert str(actual) == str(tmp_path / data_type)\n\n    @pytest.mark.parametrize('error', [RuntimeError, FileExistsError, PermissionError])\n    def test_unwritable_storage(self, monkeypatch, error):\n        def fail(self, subdir='.', mode=0o700):\n            raise error()\n        monkeypatch.setattr(basedirs.BaseDirectories, 'storage_path', fail)\n        actual = arvados.http_cache('unwritable')\n        assert actual is None\n\n\nclass CacheTest(unittest.TestCase):\n    def setUp(self):\n        self._dir = tempfile.mkdtemp()\n\n    def tearDown(self):\n        shutil.rmtree(self._dir)\n\n    def test_cache_crud(self):\n        c = arvados.api.ThreadSafeHTTPCache(self._dir, max_age=0)\n        url = 'https://example.com/foo?bar=baz'\n        data1 = _random(256)\n        data2 = _random(128)\n        self.assertEqual(None, c.get(url))\n        c.delete(url)\n        c.set(url, data1)\n        self.assertEqual(data1, c.get(url))\n        c.delete(url)\n        self.assertEqual(None, c.get(url))\n        c.set(url, data1)\n        c.set(url, data2)\n        self.assertEqual(data2, c.get(url))\n\n    def test_cache_threads(self):\n        threads = []\n        for _ in range(64):\n            t = CacheTestThread(dir=self._dir)\n            t.start()\n            threads.append(t)\n        for t in threads:\n            t.join()\n            self.assertTrue(t.ok)\n\n\nclass CacheIntegrationTest(run_test_server.TestCaseWithServers):\n    MAIN_SERVER = {}\n\n    def test_cache_used_by_default_client(self):\n        with mock.patch('arvados.api.ThreadSafeHTTPCache.get') as getter:\n            arvados.api('v1')._rootDesc.get('foobar')\n            getter.assert_called()\n"
  },
  {
    "path": "sdk/python/tests/test_http_to_keep.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport copy\nimport io\nimport functools\nimport hashlib\nimport json\nimport logging\nimport sys\nimport unittest\nimport datetime\n\nfrom unittest import mock\n\nimport arvados\nimport arvados.collection\nimport arvados.keep\nimport pycurl\n\nfrom arvados._internal import http_to_keep\n\n# Turns out there was already \"FakeCurl\" that serves the same purpose, but\n# I wrote this before I knew that.  Whoops.\nclass CurlMock:\n    def __init__(self, headers = {}):\n        self.perform_was_called = False\n        self.headers = headers\n        self.get_response = 200\n        self.head_response = 200\n        self.req_headers = []\n\n    def setopt(self, op, *args):\n        if op == pycurl.URL:\n            self.url = args[0]\n        if op == pycurl.WRITEFUNCTION:\n            self.writefn = args[0]\n        if op == pycurl.HEADERFUNCTION:\n            self.headerfn = args[0]\n        if op == pycurl.NOBODY:\n            self.head = True\n        if op == pycurl.HTTPGET:\n            self.head = False\n        if op == pycurl.HTTPHEADER:\n            self.req_headers = args[0]\n\n    def getinfo(self, op):\n        if op == pycurl.RESPONSE_CODE:\n            if self.head:\n                return self.head_response\n            else:\n                return self.get_response\n\n    def perform(self):\n        self.perform_was_called = True\n\n        if self.head:\n            self.headerfn(\"HTTP/1.1 {} Status\\r\\n\".format(self.head_response))\n        else:\n            self.headerfn(\"HTTP/1.1 {} Status\\r\\n\".format(self.get_response))\n\n        for k,v in self.headers.items():\n            self.headerfn(\"%s: %s\" % (k,v))\n\n        if not self.head and self.get_response == 200:\n            self.writefn(self.chunk)\n\n\nclass TestHttpToKeep(unittest.TestCase):\n\n    @mock.patch(\"pycurl.Curl\")\n    @mock.patch(\"arvados.collection.Collection\")\n    def test_http_get(self, collectionmock, curlmock):\n        api = mock.MagicMock()\n\n        api.collections().list().execute.return_value = {\n            \"items\": []\n        }\n\n        cm = mock.MagicMock()\n        cm.manifest_locator.return_value = \"zzzzz-4zz18-zzzzzzzzzzzzzz3\"\n        cm.portable_data_hash.return_value = \"99999999999999999999999999999998+99\"\n        collectionmock.return_value = cm\n\n        mockobj = CurlMock()\n        mockobj.chunk = b'abc'\n        def init():\n            return mockobj\n        curlmock.side_effect = init\n\n        utcnow = mock.MagicMock()\n        utcnow.return_value = datetime.datetime(2018, 5, 15)\n\n        r = http_to_keep.http_to_keep(api, None, \"http://example.com/file1.txt\", utcnow=utcnow)\n        self.assertEqual(r, (\"99999999999999999999999999999998+99\", \"file1.txt\",\n                             'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',\n                             datetime.datetime(2018, 5, 15, 0, 0)))\n\n        assert mockobj.url == b\"http://example.com/file1.txt\"\n        assert mockobj.perform_was_called is True\n\n        cm.open.assert_called_with(\"file1.txt\", \"wb\")\n        cm.save_new.assert_called_with(name=\"Downloaded from http%3A%2F%2Fexample.com%2Ffile1.txt\",\n                                       owner_uuid=None, ensure_unique_name=True)\n\n        api.collections().update.assert_has_calls([\n            mock.call(uuid=cm.manifest_locator(),\n                      body={\"collection\":{\"properties\": {'http://example.com/file1.txt': {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})\n        ])\n\n\n    @mock.patch(\"pycurl.Curl\")\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_http_expires(self, collectionmock, curlmock):\n        api = mock.MagicMock()\n\n        api.collections().list().execute.return_value = {\n            \"items\": [{\n                \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz3\",\n                \"portable_data_hash\": \"99999999999999999999999999999998+99\",\n                \"properties\": {\n                    'http://example.com/file1.txt': {\n                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',\n                        'Expires': 'Tue, 17 May 2018 00:00:00 GMT'\n                    }\n                }\n            }]\n        }\n\n        cm = mock.MagicMock()\n        cm.manifest_locator.return_value = \"zzzzz-4zz18-zzzzzzzzzzzzzz3\"\n        cm.portable_data_hash.return_value = \"99999999999999999999999999999998+99\"\n        cm.keys.return_value = [\"file1.txt\"]\n        collectionmock.return_value = cm\n\n        mockobj = CurlMock()\n        mockobj.chunk = b'abc'\n        def init():\n            return mockobj\n        curlmock.side_effect = init\n\n        utcnow = mock.MagicMock()\n        utcnow.return_value = datetime.datetime(2018, 5, 16)\n\n        r = http_to_keep.http_to_keep(api, None, \"http://example.com/file1.txt\", utcnow=utcnow)\n        self.assertEqual(r, (\"99999999999999999999999999999998+99\", \"file1.txt\",\n                             'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',\n                             datetime.datetime(2018, 5, 16, 0, 0)))\n\n        assert mockobj.perform_was_called is False\n\n\n    @mock.patch(\"pycurl.Curl\")\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_http_cache_control(self, collectionmock, curlmock):\n        api = mock.MagicMock()\n\n        api.collections().list().execute.return_value = {\n            \"items\": [{\n                \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz3\",\n                \"portable_data_hash\": \"99999999999999999999999999999998+99\",\n                \"properties\": {\n                    'http://example.com/file1.txt': {\n                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',\n                        'Cache-Control': 'max-age=172800'\n                    }\n                }\n            }]\n        }\n\n        cm = mock.MagicMock()\n        cm.manifest_locator.return_value = \"zzzzz-4zz18-zzzzzzzzzzzzzz3\"\n        cm.portable_data_hash.return_value = \"99999999999999999999999999999998+99\"\n        cm.keys.return_value = [\"file1.txt\"]\n        collectionmock.return_value = cm\n\n        mockobj = CurlMock()\n        mockobj.chunk = b'abc'\n        def init():\n            return mockobj\n        curlmock.side_effect = init\n\n        utcnow = mock.MagicMock()\n        utcnow.return_value = datetime.datetime(2018, 5, 16)\n\n        r = http_to_keep.http_to_keep(api, None, \"http://example.com/file1.txt\", utcnow=utcnow)\n        self.assertEqual(r, (\"99999999999999999999999999999998+99\", \"file1.txt\", 'zzzzz-4zz18-zzzzzzzzzzzzzz3',\n                             'http://example.com/file1.txt', datetime.datetime(2018, 5, 16, 0, 0)))\n\n        assert mockobj.perform_was_called is False\n\n\n    @mock.patch(\"pycurl.Curl\")\n    @mock.patch(\"arvados.collection.Collection\")\n    def test_http_expired(self, collectionmock, curlmock):\n        api = mock.MagicMock()\n\n        api.collections().list().execute.return_value = {\n            \"items\": [{\n                \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz3\",\n                \"portable_data_hash\": \"99999999999999999999999999999998+99\",\n                \"properties\": {\n                    'http://example.com/file1.txt': {\n                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',\n                        'Expires': 'Wed, 16 May 2018 00:00:00 GMT'\n                    }\n                }\n            }]\n        }\n\n        cm = mock.MagicMock()\n        cm.manifest_locator.return_value = \"zzzzz-4zz18-zzzzzzzzzzzzzz4\"\n        cm.portable_data_hash.return_value = \"99999999999999999999999999999997+99\"\n        cm.keys.return_value = [\"file1.txt\"]\n        collectionmock.return_value = cm\n\n        mockobj = CurlMock({'Date': 'Thu, 17 May 2018 00:00:00 GMT'})\n        mockobj.chunk = b'def'\n        def init():\n            return mockobj\n        curlmock.side_effect = init\n\n        utcnow = mock.MagicMock()\n        utcnow.return_value = datetime.datetime(2018, 5, 17)\n\n        r = http_to_keep.http_to_keep(api, None, \"http://example.com/file1.txt\", utcnow=utcnow)\n        self.assertEqual(r, (\"99999999999999999999999999999997+99\", \"file1.txt\",\n                             'zzzzz-4zz18-zzzzzzzzzzzzzz4',\n                             'http://example.com/file1.txt', datetime.datetime(2018, 5, 17, 0, 0)))\n\n\n        assert mockobj.url == b\"http://example.com/file1.txt\"\n        assert mockobj.perform_was_called is True\n\n        cm.open.assert_called_with(\"file1.txt\", \"wb\")\n        cm.save_new.assert_called_with(name=\"Downloaded from http%3A%2F%2Fexample.com%2Ffile1.txt\",\n                                       owner_uuid=None, ensure_unique_name=True)\n\n        api.collections().update.assert_has_calls([\n            mock.call(uuid=cm.manifest_locator(),\n                      body={\"collection\":{\"properties\": {'http://example.com/file1.txt': {'Date': 'Thu, 17 May 2018 00:00:00 GMT'}}}})\n        ])\n\n\n    @mock.patch(\"pycurl.Curl\")\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_http_etag(self, collectionmock, curlmock):\n        api = mock.MagicMock()\n\n        api.collections().list().execute.return_value = {\n            \"items\": [{\n                \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz3\",\n                \"portable_data_hash\": \"99999999999999999999999999999998+99\",\n                \"properties\": {\n                    'http://example.com/file1.txt': {\n                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',\n                        'Expires': 'Wed, 16 May 2018 00:00:00 GMT',\n                        'Etag': '\"123456\"'\n                    }\n                }\n            }]\n        }\n\n        cm = mock.MagicMock()\n        cm.manifest_locator.return_value = \"zzzzz-4zz18-zzzzzzzzzzzzzz3\"\n        cm.portable_data_hash.return_value = \"99999999999999999999999999999998+99\"\n        cm.keys.return_value = [\"file1.txt\"]\n        collectionmock.return_value = cm\n\n        mockobj = CurlMock({\n            'Date': 'Thu, 17 May 2018 00:00:00 GMT',\n            'Expires': 'Sat, 19 May 2018 00:00:00 GMT',\n            'Etag': '\"123456\"'\n        })\n        mockobj.chunk = None\n        def init():\n            return mockobj\n        curlmock.side_effect = init\n\n        utcnow = mock.MagicMock()\n        utcnow.return_value = datetime.datetime(2018, 5, 17)\n\n        r = http_to_keep.http_to_keep(api, None, \"http://example.com/file1.txt\", utcnow=utcnow)\n        self.assertEqual(r, (\"99999999999999999999999999999998+99\", \"file1.txt\",\n                             'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',\n                             datetime.datetime(2018, 5, 17, 0, 0)))\n\n        cm.open.assert_not_called()\n\n        api.collections().update.assert_has_calls([\n            mock.call(uuid=cm.manifest_locator(),\n                      body={\"collection\":{\"properties\": {'http://example.com/file1.txt': {\n                          'Date': 'Thu, 17 May 2018 00:00:00 GMT',\n                          'Expires': 'Sat, 19 May 2018 00:00:00 GMT',\n                          'Etag': '\"123456\"'\n                      }}}})\n                      ])\n\n    @mock.patch(\"pycurl.Curl\")\n    @mock.patch(\"arvados.collection.Collection\")\n    def test_http_content_disp(self, collectionmock, curlmock):\n        api = mock.MagicMock()\n\n        api.collections().list().execute.return_value = {\n            \"items\": []\n        }\n\n        cm = mock.MagicMock()\n        cm.manifest_locator.return_value = \"zzzzz-4zz18-zzzzzzzzzzzzzz3\"\n        cm.portable_data_hash.return_value = \"99999999999999999999999999999998+99\"\n        collectionmock.return_value = cm\n\n        mockobj = CurlMock({\"Content-Disposition\": \"attachment; filename=file1.txt\"})\n        mockobj.chunk = \"abc\"\n        def init():\n            return mockobj\n        curlmock.side_effect = init\n\n        utcnow = mock.MagicMock()\n        utcnow.return_value = datetime.datetime(2018, 5, 15)\n\n        r = http_to_keep.http_to_keep(api, None, \"http://example.com/download?fn=/file1.txt\", utcnow=utcnow)\n        self.assertEqual(r, (\"99999999999999999999999999999998+99\", \"file1.txt\",\n                             'zzzzz-4zz18-zzzzzzzzzzzzzz3',\n                             'http://example.com/download?fn=/file1.txt',\n                             datetime.datetime(2018, 5, 15, 0, 0)))\n\n        assert mockobj.url == b\"http://example.com/download?fn=/file1.txt\"\n\n        cm.open.assert_called_with(\"file1.txt\", \"wb\")\n        cm.save_new.assert_called_with(name=\"Downloaded from http%3A%2F%2Fexample.com%2Fdownload%3Ffn%3D%2Ffile1.txt\",\n                                       owner_uuid=None, ensure_unique_name=True)\n\n        api.collections().update.assert_has_calls([\n            mock.call(uuid=cm.manifest_locator(),\n                      body={\"collection\":{\"properties\": {\"http://example.com/download?fn=/file1.txt\": {'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})\n        ])\n\n    @mock.patch(\"pycurl.Curl\")\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_http_etag_if_none_match(self, collectionmock, curlmock):\n        api = mock.MagicMock()\n\n        api.collections().list().execute.return_value = {\n            \"items\": [{\n                \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz3\",\n                \"portable_data_hash\": \"99999999999999999999999999999998+99\",\n                \"properties\": {\n                    'http://example.com/file1.txt': {\n                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',\n                        'Expires': 'Tue, 16 May 2018 00:00:00 GMT',\n                        'Etag': '\"123456\"'\n                    }\n                }\n            }]\n        }\n\n        cm = mock.MagicMock()\n        cm.manifest_locator.return_value = \"zzzzz-4zz18-zzzzzzzzzzzzzz3\"\n        cm.portable_data_hash.return_value = \"99999999999999999999999999999998+99\"\n        cm.keys.return_value = [\"file1.txt\"]\n        collectionmock.return_value = cm\n\n        mockobj = CurlMock({\n            'Date': 'Tue, 17 May 2018 00:00:00 GMT',\n            'Expires': 'Tue, 19 May 2018 00:00:00 GMT',\n            'Etag': '\"123456\"'\n        })\n        mockobj.chunk = None\n        mockobj.head_response = 403\n        mockobj.get_response = 304\n        def init():\n            return mockobj\n        curlmock.side_effect = init\n\n        utcnow = mock.MagicMock()\n        utcnow.return_value = datetime.datetime(2018, 5, 17)\n\n        r = http_to_keep.http_to_keep(api, None, \"http://example.com/file1.txt\", utcnow=utcnow)\n        self.assertEqual(r, (\"99999999999999999999999999999998+99\", \"file1.txt\",\n                             'zzzzz-4zz18-zzzzzzzzzzzzzz3', 'http://example.com/file1.txt',\n                             datetime.datetime(2018, 5, 17, 0, 0)))\n\n        print(mockobj.req_headers)\n        assert mockobj.req_headers == [\"Accept: application/octet-stream\", \"If-None-Match: \\\"123456\\\"\"]\n        cm.open.assert_not_called()\n\n        api.collections().update.assert_has_calls([\n            mock.call(uuid=cm.manifest_locator(),\n                      body={\"collection\":{\"properties\": {'http://example.com/file1.txt': {\n                          'Date': 'Tue, 17 May 2018 00:00:00 GMT',\n                          'Expires': 'Tue, 19 May 2018 00:00:00 GMT',\n                          'Etag': '\"123456\"'\n                      }}}})\n                      ])\n\n    @mock.patch(\"pycurl.Curl\")\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_http_prefer_cached_downloads(self, collectionmock, curlmock):\n        api = mock.MagicMock()\n\n        api.collections().list().execute.return_value = {\n            \"items\": [{\n                \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz3\",\n                \"portable_data_hash\": \"99999999999999999999999999999998+99\",\n                \"properties\": {\n                    'http://example.com/file1.txt': {\n                        'Date': 'Tue, 15 May 2018 00:00:00 GMT',\n                        'Expires': 'Tue, 16 May 2018 00:00:00 GMT',\n                        'Etag': '\"123456\"'\n                    }\n                }\n            }]\n        }\n\n        cm = mock.MagicMock()\n        cm.manifest_locator.return_value = \"zzzzz-4zz18-zzzzzzzzzzzzzz3\"\n        cm.portable_data_hash.return_value = \"99999999999999999999999999999998+99\"\n        cm.keys.return_value = [\"file1.txt\"]\n        collectionmock.return_value = cm\n\n        mockobj = CurlMock()\n        def init():\n            return mockobj\n        curlmock.side_effect = init\n\n        utcnow = mock.MagicMock()\n        utcnow.return_value = datetime.datetime(2018, 5, 17)\n\n        r = http_to_keep.http_to_keep(api, None, \"http://example.com/file1.txt\", utcnow=utcnow, prefer_cached_downloads=True)\n        self.assertEqual(r, (\"99999999999999999999999999999998+99\", \"file1.txt\", 'zzzzz-4zz18-zzzzzzzzzzzzzz3',\n                             'http://example.com/file1.txt', datetime.datetime(2018, 5, 17, 0, 0)))\n\n        assert mockobj.perform_was_called is False\n        cm.open.assert_not_called()\n        api.collections().update.assert_not_called()\n\n    @mock.patch(\"pycurl.Curl\")\n    @mock.patch(\"arvados.collection.CollectionReader\")\n    def test_http_varying_url_params(self, collectionmock, curlmock):\n        for prurl in (\"http://example.com/file1.txt\", \"http://example.com/file1.txt?KeyId=123&Signature=456&Expires=789\"):\n            api = mock.MagicMock()\n\n            api.collections().list().execute.return_value = {\n                \"items\": [{\n                    \"uuid\": \"zzzzz-4zz18-zzzzzzzzzzzzzz3\",\n                    \"portable_data_hash\": \"99999999999999999999999999999998+99\",\n                    \"properties\": {\n                        prurl: {\n                            'Date': 'Tue, 15 May 2018 00:00:00 GMT',\n                            'Expires': 'Tue, 16 May 2018 00:00:00 GMT',\n                            'Etag': '\"123456\"'\n                        }\n                    }\n                }]\n            }\n\n            cm = mock.MagicMock()\n            cm.manifest_locator.return_value = \"zzzzz-4zz18-zzzzzzzzzzzzzz3\"\n            cm.portable_data_hash.return_value = \"99999999999999999999999999999998+99\"\n            cm.keys.return_value = [\"file1.txt\"]\n            collectionmock.return_value = cm\n\n            mockobj = CurlMock({\n                'Date': 'Tue, 17 May 2018 00:00:00 GMT',\n                'Expires': 'Tue, 19 May 2018 00:00:00 GMT',\n                'Etag': '\"123456\"'\n            })\n            mockobj.chunk = None\n            def init():\n                return mockobj\n            curlmock.side_effect = init\n\n            utcnow = mock.MagicMock()\n            utcnow.return_value = datetime.datetime(2018, 5, 17)\n\n            r = http_to_keep.http_to_keep(api, None, \"http://example.com/file1.txt?KeyId=123&Signature=456&Expires=789\",\n                                          utcnow=utcnow, varying_url_params=\"KeyId,Signature,Expires\")\n            self.assertEqual(r, (\"99999999999999999999999999999998+99\", \"file1.txt\", 'zzzzz-4zz18-zzzzzzzzzzzzzz3',\n                                 'http://example.com/file1.txt', datetime.datetime(2018, 5, 17, 0, 0)))\n\n            assert mockobj.perform_was_called is True\n            cm.open.assert_not_called()\n\n            api.collections().update.assert_has_calls([\n                mock.call(uuid=cm.manifest_locator(),\n                          body={\"collection\":{\"properties\": {'http://example.com/file1.txt': {\n                              'Date': 'Tue, 17 May 2018 00:00:00 GMT',\n                              'Expires': 'Tue, 19 May 2018 00:00:00 GMT',\n                              'Etag': '\"123456\"'\n                          }}}})\n                          ])\n"
  },
  {
    "path": "sdk/python/tests/test_internal.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport re\n\nimport pytest\n\nfrom arvados import _internal\n\nclass TestDeprecated:\n    @staticmethod\n    @_internal.deprecated('TestVersion', 'arvados.noop')\n    def noop_func():\n        \"\"\"Do nothing\n\n        This function returns None.\n        \"\"\"\n\n    @pytest.mark.parametrize('pattern', [\n        r'^Do nothing$',\n        r'^ *.. WARNING:: Deprecated$',\n        r' removed in Arvados TestVersion\\.',\n        r' Prefer arvados\\.noop\\b',\n        r'^ *This function returns None\\.$',\n    ])\n    def test_docstring(self, pattern):\n        assert re.search(pattern, self.noop_func.__doc__, re.MULTILINE) is not None\n\n    def test_deprecation_warning(self):\n        with pytest.warns(DeprecationWarning) as check:\n            self.noop_func()\n        actual = str(check[0].message)\n        assert ' removed in Arvados TestVersion.' in actual\n        assert ' Prefer arvados.noop ' in actual\n\n\nclass TestParseSeq:\n    @pytest.mark.parametrize('s', [\n        'foo,bar',\n        'foo, bar',\n        'foo , bar',\n    ])\n    def test_default_split(self, s):\n        assert list(_internal.parse_seq(s)) == ['foo', 'bar']\n\n    @pytest.mark.parametrize('s', [\n        'foo',\n        ',foo',\n        'foo ,',\n        ' foo ',\n        ',foo,',\n        ', foo ,',\n    ])\n    def test_empty_filtering(self, s):\n        assert list(_internal.parse_seq(s)) == ['foo']\n\n    @pytest.mark.parametrize('s', [\n        '',\n        ' ',\n        ',',\n        ' , ',\n    ])\n    def test_empty_list(self, s):\n        assert list(_internal.parse_seq(s)) == []\n\n\nclass TestUniq:\n    @pytest.mark.parametrize('arg', [\n        'abcde',\n        'aabbccddee',\n        'abcdeabcde',\n        'ababcbabcdcbabcdedcbae',\n    ])\n    def test_uniq(self, arg):\n        assert list(_internal.uniq(iter(arg))) == list('abcde')\n"
  },
  {
    "path": "sdk/python/tests/test_keep_client.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport errno\nimport hashlib\nimport mmap\nimport os\nimport random\nimport re\nimport shutil\nimport socket\nimport stat\nimport sys\nimport tempfile\nimport time\nimport unittest\nimport urllib.parse\n\nfrom pathlib import Path\nfrom unittest import mock\nfrom unittest.mock import patch\n\nimport parameterized\nimport pycurl\n\nimport arvados\nimport arvados.retry\nimport arvados.util\n\nfrom . import arvados_testutil as tutil\nfrom . import keepstub\nfrom . import run_test_server\n\nfrom .arvados_testutil import DiskCacheBase\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):\n    disk_cache = False\n    MAIN_SERVER = {}\n    KEEP_SERVER = {}\n    block_cache_test = None\n\n    @classmethod\n    def setUpClass(cls):\n        super(KeepTestCase, cls).setUpClass()\n        run_test_server.authorize_with(\"admin\")\n        cls.api_client = arvados.api('v1')\n        cls.block_cache_test = DiskCacheBase()\n        cls.keep_client = arvados.KeepClient(api_client=cls.api_client,\n                                             proxy='', local_store='',\n                                             block_cache=cls.block_cache_test.make_block_cache(cls.disk_cache))\n\n    @classmethod\n    def tearDownClass(cls):\n        super(KeepTestCase, cls).setUpClass()\n        cls.block_cache_test.tearDown()\n\n    def test_KeepBasicRWTest(self):\n        self.assertEqual(0, self.keep_client.upload_counter.get())\n        foo_locator = self.keep_client.put('foo')\n        self.assertRegex(\n            foo_locator,\n            r'^acbd18db4cc2f85cedef654fccc4a4d8\\+3',\n            'wrong md5 hash from Keep.put(\"foo\"): ' + foo_locator)\n\n        # 6 bytes because uploaded 2 copies\n        self.assertEqual(6, self.keep_client.upload_counter.get())\n\n        self.assertEqual(0, self.keep_client.download_counter.get())\n        self.assertTrue(tutil.binary_compare(self.keep_client.get(foo_locator),\n                         b'foo'),\n                         'wrong content from Keep.get(md5(\"foo\"))')\n        self.assertEqual(3, self.keep_client.download_counter.get())\n\n    def test_KeepBinaryRWTest(self):\n        blob_str = b'\\xff\\xfe\\xf7\\x00\\x01\\x02'\n        blob_locator = self.keep_client.put(blob_str)\n        self.assertRegex(\n            blob_locator,\n            r'^7fc7c53b45e53926ba52821140fef396\\+6',\n            ('wrong locator from Keep.put(<binarydata>):' + blob_locator))\n        self.assertEqual(self.keep_client.get(blob_locator),\n                         blob_str,\n                         'wrong content from Keep.get(md5(<binarydata>))')\n\n    def test_KeepLongBinaryRWTest(self):\n        blob_data = b'\\xff\\xfe\\xfd\\xfc\\x00\\x01\\x02\\x03'\n        for i in range(0, 23):\n            blob_data = blob_data + blob_data\n        blob_locator = self.keep_client.put(blob_data)\n        self.assertRegex(\n            blob_locator,\n            r'^84d90fc0d8175dd5dcfab04b999bc956\\+67108864',\n            ('wrong locator from Keep.put(<binarydata>): ' + blob_locator))\n        self.assertEqual(self.keep_client.get(blob_locator),\n                         blob_data,\n                         'wrong content from Keep.get(md5(<binarydata>))')\n\n    @unittest.skip(\"unreliable test - please fix and close #8752\")\n    def test_KeepSingleCopyRWTest(self):\n        blob_data = b'\\xff\\xfe\\xfd\\xfc\\x00\\x01\\x02\\x03'\n        blob_locator = self.keep_client.put(blob_data, copies=1)\n        self.assertRegex(\n            blob_locator,\n            r'^c902006bc98a3eb4a3663b65ab4a6fab\\+8',\n            ('wrong locator from Keep.put(<binarydata>): ' + blob_locator))\n        self.assertEqual(self.keep_client.get(blob_locator),\n                         blob_data,\n                         'wrong content from Keep.get(md5(<binarydata>))')\n\n    def test_KeepEmptyCollectionTest(self):\n        blob_locator = self.keep_client.put('', copies=1)\n        self.assertRegex(\n            blob_locator,\n            r'^d41d8cd98f00b204e9800998ecf8427e\\+0',\n            ('wrong locator from Keep.put(\"\"): ' + blob_locator))\n\n    def test_KeepPutDataType(self):\n        with self.assertRaises(AttributeError):\n            # Must be bytes or have an encode() method\n            self.keep_client.put({})\n\n    def test_KeepHeadTest(self):\n        locator = self.keep_client.put('test_head')\n        self.assertRegex(\n            locator,\n            r'^b9a772c7049325feb7130fff1f8333e9\\+9',\n            'wrong md5 hash from Keep.put for \"test_head\": ' + locator)\n        self.assertEqual(True, self.keep_client.head(locator))\n        self.assertEqual(self.keep_client.get(locator),\n                         b'test_head',\n                         'wrong content from Keep.get for \"test_head\"')\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepPermissionTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):\n    disk_cache = False\n    MAIN_SERVER = {}\n    KEEP_SERVER = {'blob_signing': True}\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    def test_KeepBasicRWTest(self):\n        run_test_server.authorize_with('active')\n        keep_client = arvados.KeepClient(block_cache=self.make_block_cache(self.disk_cache))\n        foo_locator = keep_client.put('foo')\n        self.assertRegex(\n            foo_locator,\n            r'^acbd18db4cc2f85cedef654fccc4a4d8\\+3\\+A[a-f0-9]+@[a-f0-9]+$',\n            'invalid locator from Keep.put(\"foo\"): ' + foo_locator)\n        self.assertEqual(keep_client.get(foo_locator),\n                         b'foo',\n                         'wrong content from Keep.get(md5(\"foo\"))')\n\n        # GET with an unsigned locator => bad request\n        bar_locator = keep_client.put('bar')\n        unsigned_bar_locator = \"37b51d194a7513e45b56f6524f2d51f2+3\"\n        self.assertRegex(\n            bar_locator,\n            r'^37b51d194a7513e45b56f6524f2d51f2\\+3\\+A[a-f0-9]+@[a-f0-9]+$',\n            'invalid locator from Keep.put(\"bar\"): ' + bar_locator)\n        self.assertRaises(arvados.errors.KeepReadError,\n                          keep_client.get,\n                          unsigned_bar_locator)\n\n        # GET from a different user => bad request\n        run_test_server.authorize_with('spectator')\n        keep_client2 = arvados.KeepClient(block_cache=self.make_block_cache(self.disk_cache))\n        self.assertRaises(arvados.errors.KeepReadError,\n                          keep_client2.get,\n                          bar_locator)\n\n        # Unauthenticated GET for a signed locator => bad request\n        # Unauthenticated GET for an unsigned locator => bad request\n        keep_client.api_token = ''\n        self.assertRaises(arvados.errors.KeepReadError,\n                          keep_client.get,\n                          bar_locator)\n        self.assertRaises(arvados.errors.KeepReadError,\n                          keep_client.get,\n                          unsigned_bar_locator)\n\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepProxyTestCase(run_test_server.TestCaseWithServers, DiskCacheBase):\n    disk_cache = False\n    MAIN_SERVER = {}\n    KEEP_SERVER = {}\n    KEEP_PROXY_SERVER = {}\n\n    @classmethod\n    def setUpClass(cls):\n        super(KeepProxyTestCase, cls).setUpClass()\n        run_test_server.authorize_with('active')\n        cls.api_client = arvados.api('v1')\n\n    def tearDown(self):\n        super(KeepProxyTestCase, self).tearDown()\n        DiskCacheBase.tearDown(self)\n\n    def test_KeepProxyTest1(self):\n        # Will use ARVADOS_KEEP_SERVICES environment variable that\n        # is set by setUpClass().\n        keep_client = arvados.KeepClient(api_client=self.api_client,\n                                         local_store='', block_cache=self.make_block_cache(self.disk_cache))\n        baz_locator = keep_client.put('baz')\n        self.assertRegex(\n            baz_locator,\n            r'^73feffa4b7f6bb68e44cf984c85f6e88\\+3',\n            'wrong md5 hash from Keep.put(\"baz\"): ' + baz_locator)\n        self.assertEqual(keep_client.get(baz_locator),\n                         b'baz',\n                         'wrong content from Keep.get(md5(\"baz\"))')\n        self.assertTrue(keep_client.using_proxy)\n\n    def test_KeepProxyTestMultipleURIs(self):\n        # Test using ARVADOS_KEEP_SERVICES env var overriding any\n        # existing proxy setting and setting multiple proxies\n        arvados.config.settings()['ARVADOS_KEEP_SERVICES'] = 'http://10.0.0.1 https://foo.example.org:1234/'\n        keep_client = arvados.KeepClient(api_client=self.api_client,\n                                         local_store='',\n                                         block_cache=self.make_block_cache(self.disk_cache))\n        uris = [x['_service_root'] for x in keep_client._keep_services]\n        self.assertEqual(uris, ['http://10.0.0.1/',\n                                'https://foo.example.org:1234/'])\n\n    def test_KeepProxyTestInvalidURI(self):\n        arvados.config.settings()['ARVADOS_KEEP_SERVICES'] = 'bad.uri.org'\n        with self.assertRaises(arvados.errors.ArgumentError):\n            keep_client = arvados.KeepClient(api_client=self.api_client,\n                                             local_store='',\n                                             block_cache=self.make_block_cache(self.disk_cache))\n\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepClientServiceTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):\n    disk_cache = False\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    def get_service_roots(self, api_client):\n        keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))\n        services = keep_client.weighted_service_roots(arvados.KeepLocator('0'*32))\n        return [urllib.parse.urlparse(url) for url in sorted(services)]\n\n    def test_ssl_flag_respected_in_roots(self):\n        for ssl_flag in [False, True]:\n            services = self.get_service_roots(self.mock_keep_services(\n                service_ssl_flag=ssl_flag))\n            self.assertEqual(\n                ('https' if ssl_flag else 'http'), services[0].scheme)\n\n    def test_correct_ports_with_ipv6_addresses(self):\n        service = self.get_service_roots(self.mock_keep_services(\n            service_type='proxy', service_host='100::1', service_port=10, count=1))[0]\n        self.assertEqual('100::1', service.hostname)\n        self.assertEqual(10, service.port)\n\n    def test_recognize_proxy_services_in_controller_response(self):\n        keep_client = arvados.KeepClient(api_client=self.mock_keep_services(\n            service_type='proxy', service_host='localhost', service_port=9, count=1),\n                                         block_cache=self.make_block_cache(self.disk_cache))\n        try:\n            # this will fail, but it ensures we get the service\n            # discovery response\n            keep_client.put('baz2', num_retries=0)\n        except:\n            pass\n        self.assertTrue(keep_client.using_proxy)\n\n    def test_insecure_disables_tls_verify(self):\n        api_client = self.mock_keep_services(count=1)\n        force_timeout = socket.timeout(\"timed out\")\n\n        api_client.insecure = True\n        with tutil.mock_keep_responses(b'foo', 200) as mock:\n            keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))\n            keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3')\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.SSL_VERIFYPEER),\n                0)\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.SSL_VERIFYHOST),\n                0)\n\n        api_client.insecure = False\n        with tutil.mock_keep_responses(b'foo', 200) as mock:\n            keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))\n            keep_client.get('acbd18db4cc2f85cedef654fccc4a4d8+3')\n            # getopt()==None here means we didn't change the\n            # default. If we were using real pycurl instead of a mock,\n            # it would return the default value 1.\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.SSL_VERIFYPEER),\n                None)\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.SSL_VERIFYHOST),\n                None)\n\n    def test_refresh_signature(self):\n        blk_digest = '6f5902ac237024bdd0c176cb93063dc4+11'\n        blk_sig = 'da39a3ee5e6b4b0d3255bfef95601890afd80709@53bed294'\n        local_loc = blk_digest+'+A'+blk_sig\n        remote_loc = blk_digest+'+R'+blk_sig\n        api_client = self.mock_keep_services(count=1)\n        headers = {'X-Keep-Locator':local_loc}\n        with tutil.mock_keep_responses('', 200, **headers):\n            # Check that the translated locator gets returned\n            keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))\n            self.assertEqual(local_loc, keep_client.refresh_signature(remote_loc))\n            # Check that refresh_signature() uses the correct method and headers\n            keep_client._get_or_head = mock.MagicMock()\n            keep_client.refresh_signature(remote_loc)\n            args, kwargs = keep_client._get_or_head.call_args_list[0]\n            self.assertIn(remote_loc, args)\n            self.assertEqual(\"HEAD\", kwargs['method'])\n            self.assertIn('X-Keep-Signature', kwargs['headers'])\n\n    # test_*_timeout verify that KeepClient instructs pycurl to use\n    # the appropriate connection and read timeouts. They don't care\n    # whether pycurl actually exhibits the expected timeout behavior\n    # -- those tests are in the KeepClientTimeout test class.\n\n    def test_get_timeout(self):\n        api_client = self.mock_keep_services(count=1)\n        force_timeout = socket.timeout(\"timed out\")\n        with tutil.mock_keep_responses(force_timeout, 0) as mock:\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                block_cache=self.make_block_cache(self.disk_cache),\n                num_retries=0,\n            )\n            with self.assertRaises(arvados.errors.KeepReadError):\n                keep_client.get('ffffffffffffffffffffffffffffffff')\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),\n                int(arvados.KeepClient.DEFAULT_TIMEOUT[0]*1000))\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),\n                int(arvados.KeepClient.DEFAULT_TIMEOUT[1]))\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),\n                int(arvados.KeepClient.DEFAULT_TIMEOUT[2]))\n\n    def test_put_timeout(self):\n        api_client = self.mock_keep_services(count=1)\n        force_timeout = socket.timeout(\"timed out\")\n        with tutil.mock_keep_responses(force_timeout, 0) as mock:\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                block_cache=self.make_block_cache(self.disk_cache),\n                num_retries=0,\n            )\n            with self.assertRaises(arvados.errors.KeepWriteError):\n                keep_client.put(b'foo')\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),\n                int(arvados.KeepClient.DEFAULT_TIMEOUT[0]*1000))\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),\n                int(arvados.KeepClient.DEFAULT_TIMEOUT[1]))\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),\n                int(arvados.KeepClient.DEFAULT_TIMEOUT[2]))\n\n    def test_head_timeout(self):\n        api_client = self.mock_keep_services(count=1)\n        force_timeout = socket.timeout(\"timed out\")\n        with tutil.mock_keep_responses(force_timeout, 0) as mock:\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                block_cache=self.make_block_cache(self.disk_cache),\n                num_retries=0,\n            )\n            with self.assertRaises(arvados.errors.KeepReadError):\n                keep_client.head('ffffffffffffffffffffffffffffffff')\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),\n                int(arvados.KeepClient.DEFAULT_TIMEOUT[0]*1000))\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),\n                None)\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),\n                None)\n\n    def test_proxy_get_timeout(self):\n        api_client = self.mock_keep_services(service_type='proxy', count=1)\n        force_timeout = socket.timeout(\"timed out\")\n        with tutil.mock_keep_responses(force_timeout, 0) as mock:\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                block_cache=self.make_block_cache(self.disk_cache),\n                num_retries=0,\n            )\n            with self.assertRaises(arvados.errors.KeepReadError):\n                keep_client.get('ffffffffffffffffffffffffffffffff')\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),\n                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[0]*1000))\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),\n                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[1]))\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),\n                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[2]))\n\n    def test_proxy_head_timeout(self):\n        api_client = self.mock_keep_services(service_type='proxy', count=1)\n        force_timeout = socket.timeout(\"timed out\")\n        with tutil.mock_keep_responses(force_timeout, 0) as mock:\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                block_cache=self.make_block_cache(self.disk_cache),\n                num_retries=0,\n            )\n            with self.assertRaises(arvados.errors.KeepReadError):\n                keep_client.head('ffffffffffffffffffffffffffffffff')\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),\n                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[0]*1000))\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),\n                None)\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),\n                None)\n\n    def test_proxy_put_timeout(self):\n        self.disk_cache_dir = None\n        api_client = self.mock_keep_services(service_type='proxy', count=1)\n        force_timeout = socket.timeout(\"timed out\")\n        with tutil.mock_keep_responses(force_timeout, 0) as mock:\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                num_retries=0,\n            )\n            with self.assertRaises(arvados.errors.KeepWriteError):\n                keep_client.put('foo')\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.CONNECTTIMEOUT_MS),\n                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[0]*1000))\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_TIME),\n                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[1]))\n            self.assertEqual(\n                mock.responses[0].getopt(pycurl.LOW_SPEED_LIMIT),\n                int(arvados.KeepClient.DEFAULT_PROXY_TIMEOUT[2]))\n\n    def check_no_services_error(self, verb, exc_class):\n        api_client = mock.MagicMock(name='api_client')\n        api_client.keep_services().accessible().execute.side_effect = (\n            arvados.errors.ApiError)\n        keep_client = arvados.KeepClient(\n            api_client=api_client,\n            block_cache=self.make_block_cache(self.disk_cache),\n            num_retries=0,\n        )\n        with self.assertRaises(exc_class) as err_check:\n            getattr(keep_client, verb)('d41d8cd98f00b204e9800998ecf8427e+0')\n        self.assertEqual(0, len(err_check.exception.request_errors()))\n\n    def test_get_error_with_no_services(self):\n        self.check_no_services_error('get', arvados.errors.KeepReadError)\n\n    def test_head_error_with_no_services(self):\n        self.check_no_services_error('head', arvados.errors.KeepReadError)\n\n    def test_put_error_with_no_services(self):\n        self.check_no_services_error('put', arvados.errors.KeepWriteError)\n\n    def check_errors_from_last_retry(self, verb, exc_class):\n        api_client = self.mock_keep_services(count=2)\n        req_mock = tutil.mock_keep_responses(\n            \"retry error reporting test\", 500, 500, 500, 500, 500, 500, 502, 502)\n        with req_mock, tutil.skip_sleep, \\\n                self.assertRaises(exc_class) as err_check:\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                block_cache=self.make_block_cache(self.disk_cache),\n                num_retries=0,\n            )\n            getattr(keep_client, verb)('d41d8cd98f00b204e9800998ecf8427e+0',\n                                       num_retries=3)\n        self.assertEqual([502, 502], [\n                getattr(error, 'status_code', None)\n                for error in err_check.exception.request_errors().values()])\n        self.assertRegex(str(err_check.exception), r'failed to (read|write) .* after 4 attempts')\n\n    def test_get_error_reflects_last_retry(self):\n        self.check_errors_from_last_retry('get', arvados.errors.KeepReadError)\n\n    def test_head_error_reflects_last_retry(self):\n        self.check_errors_from_last_retry('head', arvados.errors.KeepReadError)\n\n    def test_put_error_reflects_last_retry(self):\n        self.check_errors_from_last_retry('put', arvados.errors.KeepWriteError)\n\n    def test_put_error_does_not_include_successful_puts(self):\n        data = 'partial failure test'\n        data_loc = tutil.str_keep_locator(data)\n        api_client = self.mock_keep_services(count=3)\n        with tutil.mock_keep_responses(data_loc, 200, 500, 500) as req_mock, \\\n                self.assertRaises(arvados.errors.KeepWriteError) as exc_check:\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                block_cache=self.make_block_cache(self.disk_cache),\n                num_retries=0,\n            )\n            keep_client.put(data)\n        self.assertEqual(2, len(exc_check.exception.request_errors()))\n\n    def test_proxy_put_with_no_writable_services(self):\n        data = 'test with no writable services'\n        data_loc = tutil.str_keep_locator(data)\n        api_client = self.mock_keep_services(service_type='proxy', read_only=True, count=1)\n        with tutil.mock_keep_responses(data_loc, 200, 500, 500) as req_mock, \\\n                self.assertRaises(arvados.errors.KeepWriteError) as exc_check:\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                block_cache=self.make_block_cache(self.disk_cache),\n                num_retries=0,\n            )\n            keep_client.put(data)\n        self.assertEqual(True, (\"no Keep services available\" in str(exc_check.exception)))\n        self.assertEqual(0, len(exc_check.exception.request_errors()))\n\n    def test_oddball_service_get(self):\n        body = b'oddball service get'\n        api_client = self.mock_keep_services(service_type='fancynewblobstore')\n        with tutil.mock_keep_responses(body, 200):\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                block_cache=self.make_block_cache(self.disk_cache),\n                num_retries=0,\n            )\n            actual = keep_client.get(tutil.str_keep_locator(body))\n        self.assertEqual(body, actual)\n\n    def test_oddball_service_put(self):\n        body = b'oddball service put'\n        pdh = tutil.str_keep_locator(body)\n        api_client = self.mock_keep_services(service_type='fancynewblobstore')\n        with tutil.mock_keep_responses(pdh, 200):\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                block_cache=self.make_block_cache(self.disk_cache),\n                num_retries=0,\n            )\n            actual = keep_client.put(body, copies=1)\n        self.assertEqual(pdh, actual)\n\n    def test_oddball_service_writer_count(self):\n        body = b'oddball service writer count'\n        pdh = tutil.str_keep_locator(body)\n        api_client = self.mock_keep_services(service_type='fancynewblobstore',\n                                             count=4)\n        headers = {'x-keep-replicas-stored': 3}\n        with tutil.mock_keep_responses(pdh, 200, 418, 418, 418,\n                                       **headers) as req_mock:\n            keep_client = arvados.KeepClient(\n                api_client=api_client,\n                block_cache=self.make_block_cache(self.disk_cache),\n                num_retries=0,\n            )\n            actual = keep_client.put(body, copies=2)\n        self.assertEqual(pdh, actual)\n        self.assertEqual(1, req_mock.call_count)\n\n\n@tutil.skip_sleep\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepClientCacheTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):\n    disk_cache = False\n\n    def setUp(self):\n        self.api_client = self.mock_keep_services(count=2)\n        self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))\n        self.data = b'xyzzy'\n        self.locator = '1271ed5ef305aadabc605b1609e24c52'\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    @mock.patch('arvados.KeepClient._KeepService.get')\n    def test_get_request_cache(self, get_mock):\n        with tutil.mock_keep_responses(self.data, 200, 200):\n            self.keep_client.get(self.locator)\n            self.keep_client.get(self.locator)\n        # Request already cached, don't require more than one request\n        get_mock.assert_called_once()\n\n    @mock.patch('arvados.KeepClient._KeepService.get')\n    def test_head_request_cache(self, get_mock):\n        with tutil.mock_keep_responses(self.data, 200, 200):\n            self.keep_client.head(self.locator)\n            self.keep_client.head(self.locator)\n        # Don't cache HEAD requests so that they're not confused with GET reqs\n        self.assertEqual(2, get_mock.call_count)\n\n    @mock.patch('arvados.KeepClient._KeepService.get')\n    def test_head_and_then_get_return_different_responses(self, get_mock):\n        head_resp = None\n        get_resp = None\n        get_mock.side_effect = [b'first response', b'second response']\n        with tutil.mock_keep_responses(self.data, 200, 200):\n            head_resp = self.keep_client.head(self.locator)\n            get_resp = self.keep_client.get(self.locator)\n        self.assertEqual(b'first response', head_resp)\n        # First reponse was not cached because it was from a HEAD request.\n        self.assertNotEqual(head_resp, get_resp)\n\n\n@tutil.skip_sleep\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepXRequestIdTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):\n    disk_cache = False\n\n    def setUp(self):\n        self.api_client = self.mock_keep_services(count=2)\n        self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))\n        self.data = b'xyzzy'\n        self.locator = '1271ed5ef305aadabc605b1609e24c52'\n        self.test_id = arvados.util.new_request_id()\n        self.assertRegex(self.test_id, r'^req-[a-z0-9]{20}$')\n        # If we don't set request_id to None explicitly here, it will\n        # return <MagicMock name='api_client_mock.request_id'\n        # id='123456789'>:\n        self.api_client.request_id = None\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    def test_default_to_api_client_request_id(self):\n        self.api_client.request_id = self.test_id\n        with tutil.mock_keep_responses(self.locator, 200, 200) as mock:\n            self.keep_client.put(self.data)\n        self.assertEqual(2, len(mock.responses))\n        for resp in mock.responses:\n            self.assertProvidedRequestId(resp)\n\n        with tutil.mock_keep_responses(self.data, 200) as mock:\n            self.keep_client.get(self.locator)\n        self.assertProvidedRequestId(mock.responses[0])\n\n        with tutil.mock_keep_responses(b'', 200) as mock:\n            self.keep_client.head(self.locator)\n        self.assertProvidedRequestId(mock.responses[0])\n\n    def test_explicit_request_id(self):\n        with tutil.mock_keep_responses(self.locator, 200, 200) as mock:\n            self.keep_client.put(self.data, request_id=self.test_id)\n        self.assertEqual(2, len(mock.responses))\n        for resp in mock.responses:\n            self.assertProvidedRequestId(resp)\n\n        with tutil.mock_keep_responses(self.data, 200) as mock:\n            self.keep_client.get(self.locator, request_id=self.test_id)\n        self.assertProvidedRequestId(mock.responses[0])\n\n        with tutil.mock_keep_responses(b'', 200) as mock:\n            self.keep_client.head(self.locator, request_id=self.test_id)\n        self.assertProvidedRequestId(mock.responses[0])\n\n    def test_automatic_request_id(self):\n        with tutil.mock_keep_responses(self.locator, 200, 200) as mock:\n            self.keep_client.put(self.data)\n        self.assertEqual(2, len(mock.responses))\n        for resp in mock.responses:\n            self.assertAutomaticRequestId(resp)\n\n        with tutil.mock_keep_responses(self.data, 200) as mock:\n            self.keep_client.get(self.locator)\n        self.assertAutomaticRequestId(mock.responses[0])\n\n        with tutil.mock_keep_responses(b'', 200) as mock:\n            self.keep_client.head(self.locator)\n        self.assertAutomaticRequestId(mock.responses[0])\n\n    def test_request_id_in_exception(self):\n        with tutil.mock_keep_responses(b'', 400, 400, 400) as mock:\n            with self.assertRaisesRegex(arvados.errors.KeepReadError, self.test_id):\n                self.keep_client.head(self.locator, request_id=self.test_id)\n\n        with tutil.mock_keep_responses(b'', 400, 400, 400) as mock:\n            with self.assertRaisesRegex(arvados.errors.KeepReadError, r'req-[a-z0-9]{20}'):\n                self.keep_client.get(self.locator)\n\n        with tutil.mock_keep_responses(b'', 400, 400, 400) as mock:\n            with self.assertRaisesRegex(arvados.errors.KeepWriteError, self.test_id):\n                self.keep_client.put(self.data, request_id=self.test_id)\n\n        with tutil.mock_keep_responses(b'', 400, 400, 400) as mock:\n            with self.assertRaisesRegex(arvados.errors.KeepWriteError, r'req-[a-z0-9]{20}'):\n                self.keep_client.put(self.data)\n\n    def assertAutomaticRequestId(self, resp):\n        hdr = [x for x in resp.getopt(pycurl.HTTPHEADER)\n               if x.startswith('X-Request-Id: ')][0]\n        self.assertNotEqual(hdr, 'X-Request-Id: '+self.test_id)\n        self.assertRegex(hdr, r'^X-Request-Id: req-[a-z0-9]{20}$')\n\n    def assertProvidedRequestId(self, resp):\n        self.assertIn('X-Request-Id: '+self.test_id,\n                      resp.getopt(pycurl.HTTPHEADER))\n\n\n@tutil.skip_sleep\n#@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepClientRendezvousTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):\n    disk_cache = False\n\n    def setUp(self):\n        # expected_order[i] is the probe order for\n        # hash=md5(sprintf(\"%064x\",i)) where there are 16 services\n        # with uuid sprintf(\"anything-%015x\",j) with j in 0..15. E.g.,\n        # the first probe for the block consisting of 64 \"0\"\n        # characters is the service whose uuid is\n        # \"zzzzz-bi6l4-000000000000003\", so expected_order[0][0]=='3'.\n        self.services = 16\n        self.expected_order = [\n            list('3eab2d5fc9681074'),\n            list('097dba52e648f1c3'),\n            list('c5b4e023f8a7d691'),\n            list('9d81c02e76a3bf54'),\n            ]\n        self.blocks = [\n            \"{:064x}\".format(x).encode()\n            for x in range(len(self.expected_order))]\n        self.hashes = [\n            hashlib.md5(self.blocks[x]).hexdigest()\n            for x in range(len(self.expected_order))]\n        self.api_client = self.mock_keep_services(count=self.services)\n        self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    def test_weighted_service_roots_against_reference_set(self):\n        # Confirm weighted_service_roots() returns the correct order\n        for i, hash in enumerate(self.hashes):\n            roots = self.keep_client.weighted_service_roots(arvados.KeepLocator(hash))\n            got_order = [\n                re.search(r'//\\[?keep0x([0-9a-f]+)', root).group(1)\n                for root in roots]\n            self.assertEqual(self.expected_order[i], got_order)\n\n    def test_get_probe_order_against_reference_set(self):\n        self._test_probe_order_against_reference_set(\n            lambda i: self.keep_client.get(self.hashes[i], num_retries=1))\n\n    def test_head_probe_order_against_reference_set(self):\n        self._test_probe_order_against_reference_set(\n            lambda i: self.keep_client.head(self.hashes[i], num_retries=1))\n\n    def test_put_probe_order_against_reference_set(self):\n        # copies=1 prevents the test from being sensitive to races\n        # between writer threads.\n        self._test_probe_order_against_reference_set(\n            lambda i: self.keep_client.put(self.blocks[i], num_retries=1, copies=1))\n\n    def _test_probe_order_against_reference_set(self, op):\n        for i in range(len(self.blocks)):\n            with tutil.mock_keep_responses('', *[500 for _ in range(self.services*2)]) as mock, \\\n                 self.assertRaises(arvados.errors.KeepRequestError):\n                op(i)\n            got_order = [\n                re.search(r'//\\[?keep0x([0-9a-f]+)', resp.getopt(pycurl.URL).decode()).group(1)\n                for resp in mock.responses]\n            self.assertEqual(self.expected_order[i]*2, got_order)\n\n    def test_put_probe_order_multiple_copies(self):\n        for copies in range(2, 4):\n            for i in range(len(self.blocks)):\n                with tutil.mock_keep_responses('', *[500 for _ in range(self.services*3)]) as mock, \\\n                     self.assertRaises(arvados.errors.KeepWriteError):\n                    self.keep_client.put(self.blocks[i], num_retries=2, copies=copies)\n                got_order = [\n                    re.search(r'//\\[?keep0x([0-9a-f]+)', resp.getopt(pycurl.URL).decode()).group(1)\n                    for resp in mock.responses]\n                # With T threads racing to make requests, the position\n                # of a given server in the sequence of HTTP requests\n                # (got_order) cannot be more than T-1 positions\n                # earlier than that server's position in the reference\n                # probe sequence (expected_order).\n                #\n                # Loop invariant: we have accounted for +pos+ expected\n                # probes, either by seeing them in +got_order+ or by\n                # putting them in +pending+ in the hope of seeing them\n                # later. As long as +len(pending)<T+, we haven't\n                # started a request too early.\n                pending = []\n                for pos, expected in enumerate(self.expected_order[i]*3):\n                    got = got_order[pos-len(pending)]\n                    while got in pending:\n                        del pending[pending.index(got)]\n                        got = got_order[pos-len(pending)]\n                    if got != expected:\n                        pending.append(expected)\n                        self.assertLess(\n                            len(pending), copies,\n                            \"pending={}, with copies={}, got {}, expected {}\".format(\n                                pending, copies, repr(got_order), repr(self.expected_order[i]*3)))\n\n    def test_probe_waste_adding_one_server(self):\n        hashes = [\n            hashlib.md5(\"{:064x}\".format(x).encode()).hexdigest() for x in range(100)]\n        initial_services = 12\n        self.api_client = self.mock_keep_services(count=initial_services)\n        self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))\n        probes_before = [\n            self.keep_client.weighted_service_roots(arvados.KeepLocator(hash)) for hash in hashes]\n        for added_services in range(1, 12):\n            api_client = self.mock_keep_services(count=initial_services+added_services)\n            keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))\n            total_penalty = 0\n            for hash_index in range(len(hashes)):\n                probe_after = keep_client.weighted_service_roots(\n                    arvados.KeepLocator(hashes[hash_index]))\n                penalty = probe_after.index(probes_before[hash_index][0])\n                self.assertLessEqual(penalty, added_services)\n                total_penalty += penalty\n            # Average penalty per block should not exceed\n            # N(added)/N(orig) by more than 20%, and should get closer\n            # to the ideal as we add data points.\n            expect_penalty = (\n                added_services *\n                len(hashes) / initial_services)\n            max_penalty = (\n                expect_penalty *\n                (120 - added_services)/100)\n            min_penalty = (\n                expect_penalty * 8/10)\n            self.assertTrue(\n                min_penalty <= total_penalty <= max_penalty,\n                \"With {}+{} services, {} blocks, penalty {} but expected {}..{}\".format(\n                    initial_services,\n                    added_services,\n                    len(hashes),\n                    total_penalty,\n                    min_penalty,\n                    max_penalty))\n\n    def check_64_zeros_error_order(self, verb, exc_class):\n        data = b'0' * 64\n        if verb == 'get':\n            data = tutil.str_keep_locator(data)\n        # Arbitrary port number:\n        aport = random.randint(1024,65535)\n        api_client = self.mock_keep_services(service_port=aport, count=self.services)\n        keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))\n        with mock.patch('pycurl.Curl') as curl_mock, \\\n             self.assertRaises(exc_class) as err_check:\n            curl_mock.return_value = tutil.FakeCurl.make(code=500, body=b'')\n            getattr(keep_client, verb)(data)\n        urls = [urllib.parse.urlparse(url)\n                for url in err_check.exception.request_errors()]\n        self.assertEqual([('keep0x' + c, aport) for c in '3eab2d5fc9681074'],\n                         [(url.hostname, url.port) for url in urls])\n\n    def test_get_error_shows_probe_order(self):\n        self.check_64_zeros_error_order('get', arvados.errors.KeepReadError)\n\n    def test_put_error_shows_probe_order(self):\n        self.check_64_zeros_error_order('put', arvados.errors.KeepWriteError)\n\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepClientTimeout(keepstub.StubKeepServers, unittest.TestCase, DiskCacheBase):\n    disk_cache = False\n\n    # BANDWIDTH_LOW_LIM must be less than len(DATA) so we can transfer\n    # 1s worth of data and then trigger bandwidth errors before running\n    # out of data.\n    DATA = b'x'*2**11\n    BANDWIDTH_LOW_LIM = 1024\n    TIMEOUT_TIME = 1.0\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    class assertTakesBetween(unittest.TestCase):\n        def __init__(self, tmin, tmax):\n            self.tmin = tmin\n            self.tmax = tmax\n\n        def __enter__(self):\n            self.t0 = time.time()\n\n        def __exit__(self, *args, **kwargs):\n            # Round times to milliseconds, like CURL. Otherwise, we\n            # fail when CURL reaches a 1s timeout at 0.9998s.\n            delta = round(time.time() - self.t0, 3)\n            self.assertGreaterEqual(delta, self.tmin)\n            self.assertLessEqual(delta, self.tmax)\n\n    class assertTakesGreater(unittest.TestCase):\n        def __init__(self, tmin):\n            self.tmin = tmin\n\n        def __enter__(self):\n            self.t0 = time.time()\n\n        def __exit__(self, *args, **kwargs):\n            delta = round(time.time() - self.t0, 3)\n            self.assertGreaterEqual(delta, self.tmin)\n\n    def keepClient(self, timeouts=(0.1, TIMEOUT_TIME, BANDWIDTH_LOW_LIM)):\n        return arvados.KeepClient(\n            api_client=self.api_client,\n            timeout=timeouts, block_cache=self.make_block_cache(self.disk_cache))\n\n    def test_timeout_slow_connect(self):\n        # Can't simulate TCP delays with our own socket. Leave our\n        # stub server running uselessly, and try to connect to an\n        # unroutable IP address instead.\n        self.api_client = self.mock_keep_services(\n            count=1,\n            service_host='240.0.0.0',\n        )\n        with self.assertTakesBetween(0.1, 0.5):\n            with self.assertRaises(arvados.errors.KeepWriteError):\n                self.keepClient().put(self.DATA, copies=1, num_retries=0)\n\n    def test_low_bandwidth_no_delays_success(self):\n        self.server.setbandwidth(2*self.BANDWIDTH_LOW_LIM)\n        kc = self.keepClient()\n        loc = kc.put(self.DATA, copies=1, num_retries=0)\n        self.assertEqual(self.DATA, kc.get(loc, num_retries=0))\n\n    def test_too_low_bandwidth_no_delays_failure(self):\n        # Check that lessening bandwidth corresponds to failing\n        kc = self.keepClient()\n        loc = kc.put(self.DATA, copies=1, num_retries=0)\n        self.server.setbandwidth(0.5*self.BANDWIDTH_LOW_LIM)\n        with self.assertTakesGreater(self.TIMEOUT_TIME):\n            with self.assertRaises(arvados.errors.KeepReadError):\n                kc.get(loc, num_retries=0)\n        with self.assertTakesGreater(self.TIMEOUT_TIME):\n            with self.assertRaises(arvados.errors.KeepWriteError):\n                kc.put(self.DATA, copies=1, num_retries=0)\n\n    def test_low_bandwidth_with_server_response_delay_failure(self):\n        kc = self.keepClient()\n        loc = kc.put(self.DATA, copies=1, num_retries=0)\n        self.server.setbandwidth(self.BANDWIDTH_LOW_LIM)\n        # Note the actual delay must be 1s longer than the low speed\n        # limit interval in order for curl to detect it reliably.\n        self.server.setdelays(response=self.TIMEOUT_TIME+1)\n        with self.assertTakesGreater(self.TIMEOUT_TIME):\n            with self.assertRaises(arvados.errors.KeepReadError):\n                kc.get(loc, num_retries=0)\n        with self.assertTakesGreater(self.TIMEOUT_TIME):\n            with self.assertRaises(arvados.errors.KeepWriteError):\n                kc.put(self.DATA, copies=1, num_retries=0)\n        with self.assertTakesGreater(self.TIMEOUT_TIME):\n            kc.head(loc, num_retries=0)\n\n    def test_low_bandwidth_with_server_mid_delay_failure(self):\n        kc = self.keepClient()\n        loc = kc.put(self.DATA, copies=1, num_retries=0)\n        self.server.setbandwidth(self.BANDWIDTH_LOW_LIM)\n        # Note the actual delay must be 1s longer than the low speed\n        # limit interval in order for curl to detect it reliably.\n        self.server.setdelays(mid_write=self.TIMEOUT_TIME+1, mid_read=self.TIMEOUT_TIME+1)\n        with self.assertTakesGreater(self.TIMEOUT_TIME):\n            with self.assertRaises(arvados.errors.KeepReadError) as e:\n                kc.get(loc, num_retries=0)\n        with self.assertTakesGreater(self.TIMEOUT_TIME):\n            with self.assertRaises(arvados.errors.KeepWriteError):\n                kc.put(self.DATA, copies=1, num_retries=0)\n\n    def test_timeout_slow_request(self):\n        loc = self.keepClient().put(self.DATA, copies=1, num_retries=0)\n        self.server.setdelays(request=.2)\n        self._test_connect_timeout_under_200ms(loc)\n        self.server.setdelays(request=2)\n        self._test_response_timeout_under_2s(loc)\n\n    def test_timeout_slow_response(self):\n        loc = self.keepClient().put(self.DATA, copies=1, num_retries=0)\n        self.server.setdelays(response=.2)\n        self._test_connect_timeout_under_200ms(loc)\n        self.server.setdelays(response=2)\n        self._test_response_timeout_under_2s(loc)\n\n    def test_timeout_slow_response_body(self):\n        loc = self.keepClient().put(self.DATA, copies=1, num_retries=0)\n        self.server.setdelays(response_body=.2)\n        self._test_connect_timeout_under_200ms(loc)\n        self.server.setdelays(response_body=2)\n        self._test_response_timeout_under_2s(loc)\n\n    def _test_connect_timeout_under_200ms(self, loc):\n        # Allow 100ms to connect, then 1s for response. Everything\n        # should work, and everything should take at least 200ms to\n        # return.\n        kc = self.keepClient(timeouts=(.1, 1))\n        with self.assertTakesBetween(.2, .3):\n            kc.put(self.DATA, copies=1, num_retries=0)\n        with self.assertTakesBetween(.2, .3):\n            self.assertEqual(self.DATA, kc.get(loc, num_retries=0))\n\n    def _test_response_timeout_under_2s(self, loc):\n        # Allow 10s to connect, then 1s for response. Nothing should\n        # work, and everything should take at least 1s to return.\n        kc = self.keepClient(timeouts=(10, 1))\n        with self.assertTakesBetween(1, 9):\n            with self.assertRaises(arvados.errors.KeepReadError):\n                kc.get(loc, num_retries=0)\n        with self.assertTakesBetween(1, 9):\n            with self.assertRaises(arvados.errors.KeepWriteError):\n                kc.put(self.DATA, copies=1, num_retries=0)\n\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepClientGatewayTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):\n    disk_cache = False\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    def mock_disks_and_gateways(self, disks=3, gateways=1):\n        self.gateways = [{\n                'uuid': 'zzzzz-bi6l4-gateway{:08d}'.format(i),\n                'owner_uuid': 'zzzzz-tpzed-000000000000000',\n                'service_host': 'gatewayhost{}'.format(i),\n                'service_port': 12345,\n                'service_ssl_flag': True,\n                'service_type': 'gateway:test',\n        } for i in range(gateways)]\n        self.gateway_roots = [\n            \"https://{service_host}:{service_port}/\".format(**gw)\n            for gw in self.gateways]\n        self.api_client = self.mock_keep_services(\n            count=disks, additional_services=self.gateways)\n        self.keepClient = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))\n\n    @mock.patch('pycurl.Curl')\n    def test_get_with_gateway_hint_first(self, MockCurl):\n        MockCurl.return_value = tutil.FakeCurl.make(\n            code=200, body='foo', headers={'Content-Length': 3})\n        self.mock_disks_and_gateways()\n        locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3+K@' + self.gateways[0]['uuid']\n        self.assertEqual(b'foo', self.keepClient.get(locator))\n        self.assertEqual(self.gateway_roots[0]+locator,\n                         MockCurl.return_value.getopt(pycurl.URL).decode())\n        self.assertEqual(True, self.keepClient.head(locator))\n\n    @mock.patch('pycurl.Curl')\n    def test_get_with_gateway_hints_in_order(self, MockCurl):\n        gateways = 4\n        disks = 3\n        mocks = [\n            tutil.FakeCurl.make(code=404, body='')\n            for _ in range(gateways+disks)\n        ]\n        MockCurl.side_effect = tutil.queue_with(mocks)\n        self.mock_disks_and_gateways(gateways=gateways, disks=disks)\n        locator = '+'.join(['acbd18db4cc2f85cedef654fccc4a4d8+3'] +\n                           ['K@'+gw['uuid'] for gw in self.gateways])\n        with self.assertRaises(arvados.errors.NotFoundError):\n            self.keepClient.get(locator)\n        # Gateways are tried first, in the order given.\n        for i, root in enumerate(self.gateway_roots):\n            self.assertEqual(root+locator,\n                             mocks[i].getopt(pycurl.URL).decode())\n        # Disk services are tried next.\n        for i in range(gateways, gateways+disks):\n            self.assertRegex(\n                mocks[i].getopt(pycurl.URL).decode(),\n                r'keep0x')\n\n    @mock.patch('pycurl.Curl')\n    def test_head_with_gateway_hints_in_order(self, MockCurl):\n        gateways = 4\n        disks = 3\n        mocks = [\n            tutil.FakeCurl.make(code=404, body=b'')\n            for _ in range(gateways+disks)\n        ]\n        MockCurl.side_effect = tutil.queue_with(mocks)\n        self.mock_disks_and_gateways(gateways=gateways, disks=disks)\n        locator = '+'.join(['acbd18db4cc2f85cedef654fccc4a4d8+3'] +\n                           ['K@'+gw['uuid'] for gw in self.gateways])\n        with self.assertRaises(arvados.errors.NotFoundError):\n            self.keepClient.head(locator)\n        # Gateways are tried first, in the order given.\n        for i, root in enumerate(self.gateway_roots):\n            self.assertEqual(root+locator,\n                             mocks[i].getopt(pycurl.URL).decode())\n        # Disk services are tried next.\n        for i in range(gateways, gateways+disks):\n            self.assertRegex(\n                mocks[i].getopt(pycurl.URL).decode(),\n                r'keep0x')\n\n    @mock.patch('pycurl.Curl')\n    def test_get_with_remote_proxy_hint(self, MockCurl):\n        MockCurl.return_value = tutil.FakeCurl.make(\n            code=200, body=b'foo', headers={'Content-Length': 3})\n        self.mock_disks_and_gateways()\n        locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3+K@xyzzy'\n        self.assertEqual(b'foo', self.keepClient.get(locator))\n        self.assertEqual('https://keep.xyzzy.arvadosapi.com/'+locator,\n                         MockCurl.return_value.getopt(pycurl.URL).decode())\n\n    @mock.patch('pycurl.Curl')\n    def test_head_with_remote_proxy_hint(self, MockCurl):\n        MockCurl.return_value = tutil.FakeCurl.make(\n            code=200, body=b'foo', headers={'Content-Length': 3})\n        self.mock_disks_and_gateways()\n        locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3+K@xyzzy'\n        self.assertEqual(True, self.keepClient.head(locator))\n        self.assertEqual('https://keep.xyzzy.arvadosapi.com/'+locator,\n                         MockCurl.return_value.getopt(pycurl.URL).decode())\n\n\nclass KeepClientRetryTestMixin(object):\n    disk_cache = False\n\n    # Testing with a local Keep store won't exercise the retry behavior.\n    # Instead, our strategy is:\n    # * Create a client with one proxy specified (pointed at a black\n    #   hole), so there's no need to instantiate an API client, and\n    #   all HTTP requests come from one place.\n    # * Mock httplib's request method to provide simulated responses.\n    # This lets us test the retry logic extensively without relying on any\n    # supporting servers, and prevents side effects in case something hiccups.\n    # To use this mixin, define DEFAULT_EXPECT, DEFAULT_EXCEPTION, and\n    # run_method().\n    #\n    # Test classes must define TEST_PATCHER to a method that mocks\n    # out appropriate methods in the client.\n\n    PROXY_ADDR = 'http://[%s]:65535/' % (tutil.TEST_HOST,)\n    TEST_DATA = b'testdata'\n    TEST_LOCATOR = 'ef654c40ab4f1747fc699915d4f70902+8'\n\n    def setUp(self):\n        self.client_kwargs = {'proxy': self.PROXY_ADDR, 'local_store': ''}\n\n    def new_client(self, **caller_kwargs):\n        kwargs = self.client_kwargs.copy()\n        kwargs.update(caller_kwargs)\n        kwargs['block_cache'] = self.make_block_cache(self.disk_cache)\n        return arvados.KeepClient(**kwargs)\n\n    def run_method(self, *args, **kwargs):\n        raise NotImplementedError(\"test subclasses must define run_method\")\n\n    def check_success(self, expected=None, *args, **kwargs):\n        if expected is None:\n            expected = self.DEFAULT_EXPECT\n        self.assertEqual(expected, self.run_method(*args, **kwargs))\n\n    def check_exception(self, error_class=None, *args, **kwargs):\n        if error_class is None:\n            error_class = self.DEFAULT_EXCEPTION\n        with self.assertRaises(error_class) as err:\n            self.run_method(*args, **kwargs)\n        return err\n\n    def test_immediate_success(self):\n        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 200):\n            self.check_success()\n\n    def test_retry_then_success(self):\n        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 200):\n            self.check_success(num_retries=3)\n\n    def test_exception_then_success(self):\n        with self.TEST_PATCHER(self.DEFAULT_EXPECT, Exception('mock err'), 200):\n            self.check_success(num_retries=3)\n\n    def test_no_retry_after_permanent_error(self):\n        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 403, 200):\n            self.check_exception(num_retries=3)\n\n    def test_error_after_retries_exhausted(self):\n        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 500, 200):\n            err = self.check_exception(num_retries=1)\n        self.assertRegex(str(err.exception), r'failed to .* after 2 attempts')\n\n    def test_num_retries_instance_fallback(self):\n        self.client_kwargs['num_retries'] = 3\n        with self.TEST_PATCHER(self.DEFAULT_EXPECT, 500, 200):\n            self.check_success()\n\n\n@tutil.skip_sleep\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepClientRetryGetTestCase(KeepClientRetryTestMixin, unittest.TestCase, DiskCacheBase):\n    DEFAULT_EXPECT = KeepClientRetryTestMixin.TEST_DATA\n    DEFAULT_EXCEPTION = arvados.errors.KeepReadError\n    HINTED_LOCATOR = KeepClientRetryTestMixin.TEST_LOCATOR + '+K@xyzzy'\n    TEST_PATCHER = staticmethod(tutil.mock_keep_responses)\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    def run_method(self, locator=KeepClientRetryTestMixin.TEST_LOCATOR,\n                   *args, **kwargs):\n        return self.new_client().get(locator, *args, **kwargs)\n\n    def test_specific_exception_when_not_found(self):\n        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 200):\n            self.check_exception(arvados.errors.NotFoundError, num_retries=3)\n\n    def test_general_exception_with_mixed_errors(self):\n        # get should raise a NotFoundError if no server returns the block,\n        # and a high threshold of servers report that it's not found.\n        # This test rigs up 50/50 disagreement between two servers, and\n        # checks that it does not become a NotFoundError.\n        client = self.new_client(num_retries=0)\n        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 500):\n            with self.assertRaises(arvados.errors.KeepReadError) as exc_check:\n                client.get(self.HINTED_LOCATOR)\n            self.assertNotIsInstance(\n                exc_check.exception, arvados.errors.NotFoundError,\n                \"mixed errors raised NotFoundError\")\n\n    def test_hint_server_can_succeed_without_retries(self):\n        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 200, 500):\n            self.check_success(locator=self.HINTED_LOCATOR)\n\n    def test_try_next_server_after_timeout(self):\n        with tutil.mock_keep_responses(\n                (socket.timeout(\"timed out\"), 200),\n                (self.DEFAULT_EXPECT, 200)):\n            self.check_success(locator=self.HINTED_LOCATOR)\n\n    def test_retry_data_with_wrong_checksum(self):\n        with tutil.mock_keep_responses(\n                ('baddata', 200),\n                (self.DEFAULT_EXPECT, 200)):\n            self.check_success(locator=self.HINTED_LOCATOR)\n\n\n@tutil.skip_sleep\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepClientRetryHeadTestCase(KeepClientRetryTestMixin, unittest.TestCase, DiskCacheBase):\n    DEFAULT_EXPECT = True\n    DEFAULT_EXCEPTION = arvados.errors.KeepReadError\n    HINTED_LOCATOR = KeepClientRetryTestMixin.TEST_LOCATOR + '+K@xyzzy'\n    TEST_PATCHER = staticmethod(tutil.mock_keep_responses)\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    def run_method(self, locator=KeepClientRetryTestMixin.TEST_LOCATOR,\n                   *args, **kwargs):\n        return self.new_client().head(locator, *args, **kwargs)\n\n    def test_specific_exception_when_not_found(self):\n        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 200):\n            self.check_exception(arvados.errors.NotFoundError, num_retries=3)\n\n    def test_general_exception_with_mixed_errors(self):\n        # head should raise a NotFoundError if no server returns the block,\n        # and a high threshold of servers report that it's not found.\n        # This test rigs up 50/50 disagreement between two servers, and\n        # checks that it does not become a NotFoundError.\n        client = self.new_client(num_retries=0)\n        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 500):\n            with self.assertRaises(arvados.errors.KeepReadError) as exc_check:\n                client.head(self.HINTED_LOCATOR)\n            self.assertNotIsInstance(\n                exc_check.exception, arvados.errors.NotFoundError,\n                \"mixed errors raised NotFoundError\")\n\n    def test_hint_server_can_succeed_without_retries(self):\n        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 404, 200, 500):\n            self.check_success(locator=self.HINTED_LOCATOR)\n\n    def test_try_next_server_after_timeout(self):\n        with tutil.mock_keep_responses(\n                (socket.timeout(\"timed out\"), 200),\n                (self.DEFAULT_EXPECT, 200)):\n            self.check_success(locator=self.HINTED_LOCATOR)\n\n\n@tutil.skip_sleep\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepClientRetryPutTestCase(KeepClientRetryTestMixin, unittest.TestCase, DiskCacheBase):\n    DEFAULT_EXPECT = KeepClientRetryTestMixin.TEST_LOCATOR\n    DEFAULT_EXCEPTION = arvados.errors.KeepWriteError\n    TEST_PATCHER = staticmethod(tutil.mock_keep_responses)\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    def run_method(self, data=KeepClientRetryTestMixin.TEST_DATA,\n                   copies=1, *args, **kwargs):\n        return self.new_client().put(data, copies, *args, **kwargs)\n\n    def test_do_not_send_multiple_copies_to_same_server(self):\n        with tutil.mock_keep_responses(self.DEFAULT_EXPECT, 200):\n            self.check_exception(copies=2, num_retries=3)\n\n\nclass AvoidOverreplication(unittest.TestCase, tutil.ApiClientMock):\n    class FakeKeepService(object):\n        def __init__(self, delay, will_succeed=False, will_raise=None, replicas=1):\n            self.delay = delay\n            self.will_succeed = will_succeed\n            self.will_raise = will_raise\n            self._result = {}\n            self._result['headers'] = {}\n            self._result['headers']['x-keep-replicas-stored'] = str(replicas)\n            self._result['headers']['x-keep-storage-classes-confirmed'] = 'default={}'.format(replicas)\n            self._result['body'] = 'foobar'\n\n        def put(self, data_hash, data, timeout, headers):\n            time.sleep(self.delay)\n            if self.will_raise is not None:\n                raise self.will_raise\n            return self.will_succeed\n\n        def last_result(self):\n            if self.will_succeed:\n                return self._result\n            else:\n                return {\"status_code\": 500, \"body\": \"didn't succeed\"}\n\n        def finished(self):\n            return False\n\n\n    def setUp(self):\n        self.copies = 3\n        self.pool = arvados.KeepClient._KeepWriterThreadPool(\n            data = 'foo',\n            data_hash = 'acbd18db4cc2f85cedef654fccc4a4d8+3',\n            max_service_replicas = self.copies,\n            copies = self.copies\n        )\n\n    def test_only_write_enough_on_success(self):\n        for i in range(10):\n            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)\n            self.pool.add_task(ks, None)\n        self.pool.join()\n        self.assertEqual(self.pool.done(), (self.copies, []))\n\n    def test_only_write_enough_on_partial_success(self):\n        for i in range(5):\n            ks = self.FakeKeepService(delay=i/10.0, will_succeed=False)\n            self.pool.add_task(ks, None)\n            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)\n            self.pool.add_task(ks, None)\n        self.pool.join()\n        self.assertEqual(self.pool.done(), (self.copies, []))\n\n    def test_only_write_enough_when_some_crash(self):\n        for i in range(5):\n            ks = self.FakeKeepService(delay=i/10.0, will_raise=Exception())\n            self.pool.add_task(ks, None)\n            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)\n            self.pool.add_task(ks, None)\n        self.pool.join()\n        self.assertEqual(self.pool.done(), (self.copies, []))\n\n    def test_fail_when_too_many_crash(self):\n        for i in range(self.copies+1):\n            ks = self.FakeKeepService(delay=i/10.0, will_raise=Exception())\n            self.pool.add_task(ks, None)\n        for i in range(self.copies-1):\n            ks = self.FakeKeepService(delay=i/10.0, will_succeed=True)\n            self.pool.add_task(ks, None)\n        self.pool.join()\n        self.assertEqual(self.pool.done(), (self.copies-1, []))\n\n\n@tutil.skip_sleep\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass RetryNeedsMultipleServices(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):\n    block_cache = False\n\n    # Test put()s that need two distinct servers to succeed, possibly\n    # requiring multiple passes through the retry loop.\n\n    def setUp(self):\n        self.api_client = self.mock_keep_services(count=2)\n        self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    def test_success_after_exception(self):\n        with tutil.mock_keep_responses(\n                'acbd18db4cc2f85cedef654fccc4a4d8+3',\n                Exception('mock err'), 200, 200) as req_mock:\n            self.keep_client.put('foo', num_retries=1, copies=2)\n        self.assertEqual(3, req_mock.call_count)\n\n    def test_success_after_retryable_error(self):\n        with tutil.mock_keep_responses(\n                'acbd18db4cc2f85cedef654fccc4a4d8+3',\n                500, 200, 200) as req_mock:\n            self.keep_client.put('foo', num_retries=1, copies=2)\n        self.assertEqual(3, req_mock.call_count)\n\n    def test_fail_after_final_error(self):\n        # First retry loop gets a 200 (can't achieve replication by\n        # storing again on that server) and a 400 (can't retry that\n        # server at all), so we shouldn't try a third request.\n        with tutil.mock_keep_responses(\n                'acbd18db4cc2f85cedef654fccc4a4d8+3',\n                200, 400, 200) as req_mock:\n            with self.assertRaises(arvados.errors.KeepWriteError):\n                self.keep_client.put('foo', num_retries=1, copies=2)\n        self.assertEqual(2, req_mock.call_count)\n\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepClientAPIErrorTest(unittest.TestCase, DiskCacheBase):\n    disk_cache = False\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    def test_api_fail(self):\n        class ApiMock(object):\n            def __getattr__(self, r):\n                if r == \"api_token\":\n                    return \"abc\"\n                elif r == \"insecure\":\n                    return False\n                elif r == \"config\":\n                    return lambda: {}\n                else:\n                    raise arvados.errors.KeepReadError()\n        keep_client = arvados.KeepClient(api_client=ApiMock(),\n                                         proxy='', local_store='',\n                                         block_cache=self.make_block_cache(self.disk_cache))\n\n        # The bug this is testing for is that if an API (not\n        # keepstore) exception is thrown as part of a get(), the next\n        # attempt to get that same block will result in a deadlock.\n        # This is why there are two get()s in a row.  Unfortunately,\n        # the failure mode for this test is that the test suite\n        # deadlocks, there isn't a good way to avoid that without\n        # adding a special case that has no use except for this test.\n\n        with self.assertRaises(arvados.errors.KeepReadError):\n            keep_client.get(\"acbd18db4cc2f85cedef654fccc4a4d8+3\")\n        with self.assertRaises(arvados.errors.KeepReadError):\n            keep_client.get(\"acbd18db4cc2f85cedef654fccc4a4d8+3\")\n\n\nclass KeepDiskCacheTestCase(unittest.TestCase, tutil.ApiClientMock):\n    def setUp(self):\n        self.api_client = self.mock_keep_services(count=2)\n        self.data = b'xyzzy'\n        self.locator = '1271ed5ef305aadabc605b1609e24c52'\n        self.disk_cache_dir = tempfile.mkdtemp()\n\n    def tearDown(self):\n        shutil.rmtree(self.disk_cache_dir)\n\n    @mock.patch('arvados._internal.basedirs.BaseDirectories.storage_path')\n    def test_default_disk_cache_dir(self, storage_path):\n        expected = Path(self.disk_cache_dir)\n        storage_path.return_value = expected\n        cache = arvados.keep.KeepBlockCache(disk_cache=True)\n        storage_path.assert_called_with('keep')\n        self.assertEqual(cache._disk_cache_dir, str(expected))\n\n    @mock.patch('arvados.KeepClient._KeepService.get')\n    def test_disk_cache_read(self, get_mock):\n        # confirm it finds an existing cache block when the cache is\n        # initialized.\n\n        os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))\n        with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\"), \"wb\") as f:\n            f.write(self.data)\n\n        # block cache should have found the existing block\n        block_cache = arvados.keep.KeepBlockCache(disk_cache=True,\n                                                  disk_cache_dir=self.disk_cache_dir)\n        keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)\n\n        self.assertTrue(tutil.binary_compare(keep_client.get(self.locator), self.data))\n\n        get_mock.assert_not_called()\n\n    @mock.patch('arvados.KeepClient._KeepService.get')\n    def test_disk_cache_share(self, get_mock):\n        # confirm it finds a cache block written after the disk cache\n        # was initialized.\n\n        block_cache = arvados.keep.KeepBlockCache(disk_cache=True,\n                                                  disk_cache_dir=self.disk_cache_dir)\n        keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)\n\n        os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))\n        with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\"), \"wb\") as f:\n            f.write(self.data)\n\n        # when we try to get the block, it'll check the disk and find it.\n        self.assertTrue(tutil.binary_compare(keep_client.get(self.locator), self.data))\n\n        get_mock.assert_not_called()\n\n    def test_disk_cache_write(self):\n        # confirm the cache block was created\n\n        block_cache = arvados.keep.KeepBlockCache(disk_cache=True,\n                                                  disk_cache_dir=self.disk_cache_dir)\n        keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)\n\n        with tutil.mock_keep_responses(self.data, 200) as mock:\n            self.assertTrue(tutil.binary_compare(keep_client.get(self.locator), self.data))\n\n        self.assertIsNotNone(keep_client.get_from_cache(self.locator))\n\n        with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\"), \"rb\") as f:\n            self.assertTrue(tutil.binary_compare(f.read(), self.data))\n\n    def test_disk_cache_clean(self):\n        # confirm that a tmp file in the cache is cleaned up\n\n        os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))\n        with open(os.path.join(self.disk_cache_dir, self.locator[0:3], \"tmpXYZABC.keepcacheblock\"), \"wb\") as f:\n            f.write(b\"abc1\")\n\n        with open(os.path.join(self.disk_cache_dir, self.locator[0:3], \"tmpXYZABC\"), \"wb\") as f:\n            f.write(b\"abc2\")\n\n        with open(os.path.join(self.disk_cache_dir, self.locator[0:3], \"XYZABC\"), \"wb\") as f:\n            f.write(b\"abc3\")\n\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], \"tmpXYZABC.keepcacheblock\")))\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], \"tmpXYZABC\")))\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], \"XYZABC\")))\n\n        block_cache = arvados.keep.KeepBlockCache(disk_cache=True,\n                                                  disk_cache_dir=self.disk_cache_dir)\n\n        # The tmp still hasn't been deleted because it was created in the last 60 seconds\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], \"tmpXYZABC.keepcacheblock\")))\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], \"tmpXYZABC\")))\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], \"XYZABC\")))\n\n        # Set the mtime to 61s in the past\n        os.utime(os.path.join(self.disk_cache_dir, self.locator[0:3], \"tmpXYZABC.keepcacheblock\"), times=(time.time()-61, time.time()-61))\n        os.utime(os.path.join(self.disk_cache_dir, self.locator[0:3], \"tmpXYZABC\"), times=(time.time()-61, time.time()-61))\n        os.utime(os.path.join(self.disk_cache_dir, self.locator[0:3], \"XYZABC\"), times=(time.time()-61, time.time()-61))\n\n        block_cache2 = arvados.keep.KeepBlockCache(disk_cache=True,\n                                                   disk_cache_dir=self.disk_cache_dir)\n\n        # Tmp should be gone but the other ones are safe.\n        self.assertFalse(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], \"tmpXYZABC.keepcacheblock\")))\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], \"tmpXYZABC\")))\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], \"XYZABC\")))\n\n    @mock.patch('arvados.KeepClient._KeepService.get')\n    def test_disk_cache_cap(self, get_mock):\n        # confirm that the cache is kept to the desired limit\n\n        os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))\n        with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\"), \"wb\") as f:\n            f.write(self.data)\n            # We want KeepBlockCache to consider this file older than the\n            # next file we write. Date it well in the past (a little over a\n            # day) to ensure that happens regardless of filesystem settings.\n            old_mtime = time.time() - 90000\n            os.utime(f.fileno(), (old_mtime, old_mtime))\n\n        os.makedirs(os.path.join(self.disk_cache_dir, \"acb\"))\n        with open(os.path.join(self.disk_cache_dir, \"acb\", \"acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock\"), \"wb\") as f:\n            f.write(b\"foo\")\n\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\")))\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, \"acb\", \"acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock\")))\n\n        block_cache = arvados.keep.KeepBlockCache(disk_cache=True,\n                                                   disk_cache_dir=self.disk_cache_dir,\n                                                   max_slots=1)\n\n        self.assertFalse(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\")))\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, \"acb\", \"acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock\")))\n\n    @mock.patch('arvados.KeepClient._KeepService.get')\n    def test_disk_cache_share(self, get_mock):\n        # confirm that a second cache doesn't delete files that belong to the first cache.\n\n        os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))\n        with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\"), \"wb\") as f:\n            f.write(self.data)\n\n        os.makedirs(os.path.join(self.disk_cache_dir, \"acb\"))\n        with open(os.path.join(self.disk_cache_dir, \"acb\", \"acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock\"), \"wb\") as f:\n            f.write(b\"foo\")\n\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\")))\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, \"acb\", \"acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock\")))\n\n        block_cache = arvados.keep.KeepBlockCache(disk_cache=True,\n                                                   disk_cache_dir=self.disk_cache_dir,\n                                                   max_slots=2)\n\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\")))\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, \"acb\", \"acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock\")))\n\n        block_cache2 = arvados.keep.KeepBlockCache(disk_cache=True,\n                                                   disk_cache_dir=self.disk_cache_dir,\n                                                   max_slots=1)\n\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\")))\n        self.assertTrue(os.path.exists(os.path.join(self.disk_cache_dir, \"acb\", \"acbd18db4cc2f85cedef654fccc4a4d8.keepcacheblock\")))\n\n    def test_disk_cache_error(self):\n        os.chmod(self.disk_cache_dir, stat.S_IRUSR)\n\n        # Fail during cache initialization.\n        with self.assertRaises(OSError):\n            block_cache = arvados.keep.KeepBlockCache(disk_cache=True,\n                                                      disk_cache_dir=self.disk_cache_dir)\n\n    def test_disk_cache_write_error(self):\n        block_cache = arvados.keep.KeepBlockCache(disk_cache=True,\n                                                  disk_cache_dir=self.disk_cache_dir)\n\n        keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)\n\n        # Make the cache dir read-only\n        os.makedirs(os.path.join(self.disk_cache_dir, self.locator[0:3]))\n        os.chmod(os.path.join(self.disk_cache_dir, self.locator[0:3]), stat.S_IRUSR)\n\n        # Cache fails\n        with self.assertRaises(arvados.errors.KeepCacheError):\n            with tutil.mock_keep_responses(self.data, 200) as mock:\n                keep_client.get(self.locator)\n\n    def test_disk_cache_retry_write_error(self):\n        cache_max_before = 512 * 1024 * 1024\n        block_cache = arvados.keep.KeepBlockCache(\n            cache_max=cache_max_before,\n            disk_cache=True,\n            disk_cache_dir=self.disk_cache_dir,\n        )\n        keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)\n\n        called = False\n        realmmap = mmap.mmap\n        def sideeffect_mmap(*args, **kwargs):\n            nonlocal called\n            if not called:\n                called = True\n                raise OSError(errno.ENOSPC, \"no space\")\n            else:\n                return realmmap(*args, **kwargs)\n\n        with patch('mmap.mmap', autospec=True, side_effect=sideeffect_mmap) as mockmmap:\n            with tutil.mock_keep_responses(self.data, 200) as mock:\n                self.assertTrue(tutil.binary_compare(keep_client.get(self.locator), self.data))\n\n            self.assertIsNotNone(keep_client.get_from_cache(self.locator))\n\n        with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\"), \"rb\") as f:\n            self.assertTrue(tutil.binary_compare(f.read(), self.data))\n\n        # shrank the cache in response to ENOSPC\n        self.assertGreater(cache_max_before, block_cache.cache_max)\n\n    def test_disk_cache_retry_write_error2(self):\n        block_cache = arvados.keep.KeepBlockCache(disk_cache=True,\n                                                  disk_cache_dir=self.disk_cache_dir)\n\n        keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=block_cache)\n\n        called = False\n        realmmap = mmap.mmap\n        def sideeffect_mmap(*args, **kwargs):\n            nonlocal called\n            if not called:\n                called = True\n                raise OSError(errno.ENOMEM, \"no memory\")\n            else:\n                return realmmap(*args, **kwargs)\n\n        with patch('mmap.mmap', autospec=True, side_effect=sideeffect_mmap) as mockmmap:\n            slots_before = block_cache._max_slots\n\n            with tutil.mock_keep_responses(self.data, 200) as mock:\n                self.assertTrue(tutil.binary_compare(keep_client.get(self.locator), self.data))\n\n            self.assertIsNotNone(keep_client.get_from_cache(self.locator))\n\n        with open(os.path.join(self.disk_cache_dir, self.locator[0:3], self.locator+\".keepcacheblock\"), \"rb\") as f:\n            self.assertTrue(tutil.binary_compare(f.read(), self.data))\n\n        # shrank the cache in response to ENOMEM\n        self.assertGreater(slots_before, block_cache._max_slots)\n"
  },
  {
    "path": "sdk/python/tests/test_keep_locator.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport datetime\nimport itertools\nimport random\nimport unittest\n\nfrom arvados.keep import KeepLocator\n\nclass ArvadosKeepLocatorTest(unittest.TestCase):\n    DEFAULT_TEST_COUNT = 10\n\n    def numstrs(fmtstr, base, exponent):\n        def genstrs(self, count=None):\n            return (fmtstr.format(random.randint(0, base ** exponent))\n                    for c in range(count or self.DEFAULT_TEST_COUNT))\n        return genstrs\n\n    checksums = numstrs('{:032x}', 16, 32)\n    sizes = numstrs('{:d}', 2, 26)\n    signatures = numstrs('{:040x}', 16, 40)\n    timestamps = numstrs('{:08x}', 16, 8)\n\n    def base_locators(self, count=DEFAULT_TEST_COUNT):\n        return ('+'.join(pair) for pair in\n                zip(self.checksums(count), self.sizes(count)))\n\n    def perm_hints(self, count=DEFAULT_TEST_COUNT):\n        for sig, ts in zip(self.signatures(count),\n                                      self.timestamps(count)):\n            yield 'A{}@{}'.format(sig, ts)\n\n    def test_good_locators_returned(self):\n        for hint_gens in [(), (self.sizes(),),\n                          (self.sizes(), self.perm_hints())]:\n            for loc_data in zip(self.checksums(), *hint_gens):\n                locator = '+'.join(loc_data)\n                self.assertEqual(locator, str(KeepLocator(locator)))\n\n    def test_nonchecksum_rejected(self):\n        for badstr in ['', 'badbadbad', '8f9e68d957b504a29ba76c526c3145dj',\n                       '+8f9e68d957b504a29ba76c526c3145d9',\n                       '3+8f9e68d957b504a29ba76c526c3145d9']:\n            self.assertRaises(ValueError, KeepLocator, badstr)\n\n    def test_unknown_hints_accepted(self):\n        base = next(self.base_locators(1))\n        for weirdhint in ['Zfoo', 'Ybar234', 'Xa@b_c-372', 'W99']:\n            locator = '+'.join([base, weirdhint])\n            self.assertEqual(locator, str(KeepLocator(locator)))\n\n    def test_bad_hints_rejected(self):\n        base = next(self.base_locators(1))\n        for badhint in ['', 'A', 'lowercase', '+32']:\n            self.assertRaises(ValueError, KeepLocator,\n                              '+'.join([base, badhint]))\n\n    def test_multiple_locator_hints_accepted(self):\n        base = next(self.base_locators(1))\n        for loc_hints in itertools.permutations(['Kab1cd', 'Kef2gh', 'Kij3kl']):\n            locator = '+'.join((base,) + loc_hints)\n            self.assertEqual(locator, str(KeepLocator(locator)))\n\n    def test_str_type(self):\n        base = next(self.base_locators(1))\n        locator = KeepLocator(base)\n        self.assertEqual(type(''), type(locator.__str__()))\n\n    def test_expiry_passed(self):\n        base = next(self.base_locators(1))\n        signature = next(self.signatures(1))\n        dt1980 = datetime.datetime(1980, 1, 1)\n        dt2000 = datetime.datetime(2000, 2, 2)\n        dt2080 = datetime.datetime(2080, 3, 3)\n        locator = KeepLocator(base)\n        self.assertFalse(locator.permission_expired())\n        self.assertFalse(locator.permission_expired(dt1980))\n        self.assertFalse(locator.permission_expired(dt2080))\n        # Timestamped to 1987-01-05 18:48:32.\n        locator = KeepLocator('{}+A{}@20000000'.format(base, signature))\n        self.assertTrue(locator.permission_expired())\n        self.assertTrue(locator.permission_expired(dt2000))\n        self.assertFalse(locator.permission_expired(dt1980))\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "sdk/python/tests/test_retry.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport itertools\nimport unittest\n\nfrom unittest import mock\n\nimport arvados.errors as arv_error\nimport arvados.retry as arv_retry\n\nclass RetryLoopTestMixin(object):\n    @staticmethod\n    def loop_success(result):\n        # During the tests, we use integers that look like HTTP status\n        # codes as loop results.  Then we define simplified HTTP\n        # heuristics here to decide whether the result is success (True),\n        # permanent failure (False), or temporary failure (None).\n        if result < 400:\n            return True\n        elif result < 500:\n            return False\n        else:\n            return None\n\n    def run_loop(self, num_retries, *results, **kwargs):\n        responses = itertools.chain(results, itertools.repeat(None))\n        retrier = arv_retry.RetryLoop(num_retries, self.loop_success,\n                                      **kwargs)\n        for tries_left, response in zip(retrier, responses):\n            retrier.save_result(response)\n        return retrier\n\n    def check_result(self, retrier, expect_success, last_code):\n        self.assertIs(retrier.success(), expect_success,\n                      \"loop success flag is incorrect\")\n        self.assertEqual(last_code, retrier.last_result())\n\n\nclass RetryLoopTestCase(unittest.TestCase, RetryLoopTestMixin):\n    def test_zero_retries_and_success(self):\n        retrier = self.run_loop(0, 200)\n        self.check_result(retrier, True, 200)\n\n    def test_zero_retries_and_tempfail(self):\n        retrier = self.run_loop(0, 500, 501)\n        self.check_result(retrier, None, 500)\n\n    def test_zero_retries_and_permfail(self):\n        retrier = self.run_loop(0, 400, 201)\n        self.check_result(retrier, False, 400)\n\n    def test_one_retry_with_immediate_success(self):\n        retrier = self.run_loop(1, 200, 201)\n        self.check_result(retrier, True, 200)\n\n    def test_one_retry_with_delayed_success(self):\n        retrier = self.run_loop(1, 500, 201)\n        self.check_result(retrier, True, 201)\n\n    def test_one_retry_with_no_success(self):\n        retrier = self.run_loop(1, 500, 501, 502)\n        self.check_result(retrier, None, 501)\n\n    def test_one_retry_but_permfail(self):\n        retrier = self.run_loop(1, 400, 201)\n        self.check_result(retrier, False, 400)\n\n    def test_two_retries_with_immediate_success(self):\n        retrier = self.run_loop(2, 200, 201, 202)\n        self.check_result(retrier, True, 200)\n\n    def test_two_retries_with_success_after_one(self):\n        retrier = self.run_loop(2, 500, 201, 502)\n        self.check_result(retrier, True, 201)\n\n    def test_two_retries_with_success_after_two(self):\n        retrier = self.run_loop(2, 500, 501, 202, 503)\n        self.check_result(retrier, True, 202)\n\n    def test_two_retries_with_no_success(self):\n        retrier = self.run_loop(2, 500, 501, 502, 503)\n        self.check_result(retrier, None, 502)\n\n    def test_two_retries_with_permfail(self):\n        retrier = self.run_loop(2, 500, 401, 202)\n        self.check_result(retrier, False, 401)\n\n    def test_save_result_before_start_is_error(self):\n        retrier = arv_retry.RetryLoop(0)\n        self.assertRaises(arv_error.AssertionError, retrier.save_result, 1)\n\n    def test_save_result_after_end_is_error(self):\n        retrier = arv_retry.RetryLoop(0)\n        for count in retrier:\n            pass\n        self.assertRaises(arv_error.AssertionError, retrier.save_result, 1)\n\n\n@mock.patch('time.time', side_effect=itertools.count())\n@mock.patch('time.sleep')\nclass RetryLoopBackoffTestCase(unittest.TestCase, RetryLoopTestMixin):\n    def run_loop(self, num_retries, *results, **kwargs):\n        kwargs.setdefault('backoff_start', 8)\n        return super(RetryLoopBackoffTestCase, self).run_loop(\n            num_retries, *results, **kwargs)\n\n    def check_backoff(self, sleep_mock, sleep_count, multiplier=1):\n        # Figure out how much time we actually spent sleeping.\n        sleep_times = [arglist[0][0] for arglist in sleep_mock.call_args_list\n                       if arglist[0][0] > 0]\n        self.assertEqual(sleep_count, len(sleep_times),\n                         \"loop did not back off correctly\")\n        last_wait = 0\n        for this_wait in sleep_times:\n            self.assertGreater(this_wait, last_wait * multiplier,\n                               \"loop did not grow backoff times correctly\")\n            last_wait = this_wait\n\n    def test_no_backoff_with_no_retries(self, sleep_mock, time_mock):\n        self.run_loop(0, 500, 201)\n        self.check_backoff(sleep_mock, 0)\n\n    def test_no_backoff_after_success(self, sleep_mock, time_mock):\n        self.run_loop(1, 200, 501)\n        self.check_backoff(sleep_mock, 0)\n\n    def test_no_backoff_after_permfail(self, sleep_mock, time_mock):\n        self.run_loop(1, 400, 201)\n        self.check_backoff(sleep_mock, 0)\n\n    def test_backoff_before_success(self, sleep_mock, time_mock):\n        self.run_loop(5, 500, 501, 502, 203, 504)\n        self.check_backoff(sleep_mock, 3)\n\n    def test_backoff_before_permfail(self, sleep_mock, time_mock):\n        self.run_loop(5, 500, 501, 502, 403, 504)\n        self.check_backoff(sleep_mock, 3)\n\n    def test_backoff_all_tempfail(self, sleep_mock, time_mock):\n        self.run_loop(3, 500, 501, 502, 503, 504)\n        self.check_backoff(sleep_mock, 3)\n\n    def test_backoff_multiplier(self, sleep_mock, time_mock):\n        self.run_loop(5, 500, 501, 502, 503, 504, 505,\n                      backoff_start=5, backoff_growth=10, max_wait=1000000000)\n        self.check_backoff(sleep_mock, 5, 9)\n\n\nclass CheckHTTPResponseSuccessTestCase(unittest.TestCase):\n    def results_map(self, *codes):\n        for code in codes:\n            yield code, arv_retry.check_http_response_success(code)\n\n    def check(assert_name):\n        def check_method(self, expected, *codes):\n            assert_func = getattr(self, assert_name)\n            for code, actual in self.results_map(*codes):\n                assert_func(expected, actual,\n                            \"{} status flagged {}\".format(code, actual))\n                if assert_name != 'assertIs':\n                    self.assertTrue(\n                        actual is True or actual is False or actual is None,\n                        \"{} status returned {}\".format(code, actual))\n        return check_method\n\n    check_is = check('assertIs')\n    check_is_not = check('assertIsNot')\n\n    def test_obvious_successes(self):\n        self.check_is(True, *list(range(200, 207)))\n\n    def test_obvious_stops(self):\n        self.check_is(False, 422, 424, 426, 428, 431,\n                      *list(range(400, 408)) + list(range(410, 420)))\n\n    def test_obvious_retries(self):\n        self.check_is(None, 500, 502, 503, 504)\n\n    def test_4xx_retries(self):\n        self.check_is(None, 408, 409, 423)\n\n    def test_5xx_failures(self):\n        self.check_is(False, 501, *list(range(505, 512)))\n\n    def test_1xx_not_retried(self):\n        self.check_is_not(None, 100, 101)\n\n    def test_redirects_not_retried(self):\n        self.check_is_not(None, *list(range(300, 309)))\n\n    def test_wacky_code_retries(self):\n        self.check_is(None, 0, 99, 600, -200)\n\n\nclass RetryMethodTestCase(unittest.TestCase):\n    class Tester(object):\n        def __init__(self):\n            self.num_retries = 1\n\n        @arv_retry.retry_method\n        def check(self, a, num_retries=None, z=0):\n            return (a, num_retries, z)\n\n\n    def test_positional_arg_raises(self):\n        # unsupported use -- make sure we raise rather than ignore\n        with self.assertRaises(TypeError):\n            self.assertEqual((3, 2, 0), self.Tester().check(3, 2))\n\n    def test_keyword_arg_passed(self):\n        self.assertEqual((4, 3, 0), self.Tester().check(num_retries=3, a=4))\n\n    def test_not_specified(self):\n        self.assertEqual((0, 1, 0), self.Tester().check(0))\n\n    def test_not_specified_with_other_kwargs(self):\n        self.assertEqual((1, 1, 1), self.Tester().check(1, z=1))\n\n    def test_bad_call(self):\n        with self.assertRaises(TypeError):\n            self.Tester().check(num_retries=2)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "sdk/python/tests/test_retry_job_helpers.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport hashlib\nimport json\nimport os\nimport unittest\n\nfrom apiclient import http as apiclient_http\nfrom unittest import mock\n\nimport arvados\nfrom . import run_test_server\nfrom . import arvados_testutil as tutil\n\n@tutil.skip_sleep\nclass ApiClientRetryTestMixin(object):\n\n    TEST_UUID = 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'\n    TEST_LOCATOR = 'd41d8cd98f00b204e9800998ecf8427e+0'\n\n    @classmethod\n    def setUpClass(cls):\n        run_test_server.run()\n\n    def setUp(self):\n        # Patch arvados.api() to return our mock API, so we can mock\n        # its http requests.\n        self.api_client = arvados.api('v1', cache=False, num_retries=0)\n        self.api_patch = mock.patch('arvados.api', return_value=self.api_client)\n        self.api_patch.start()\n\n    def tearDown(self):\n        self.api_patch.stop()\n\n    def run_method(self):\n        raise NotImplementedError(\"test subclasses must define run_method\")\n\n    def test_immediate_success(self):\n        with tutil.mock_api_responses(self.api_client, '{}', [200]):\n            self.run_method()\n\n    def test_immediate_failure(self):\n        with tutil.mock_api_responses(self.api_client, '{}', [400]), self.assertRaises(self.DEFAULT_EXCEPTION):\n            self.run_method()\n\n    def test_retry_then_success(self):\n        with tutil.mock_api_responses(self.api_client, '{}', [500, 200]):\n            self.run_method()\n\n    def test_error_after_default_retries_exhausted(self):\n        with tutil.mock_api_responses(self.api_client, '{}', [500, 500, 500, 500, 500, 500, 200]), self.assertRaises(self.DEFAULT_EXCEPTION):\n            self.run_method()\n\n    def test_no_retry_after_immediate_success(self):\n        with tutil.mock_api_responses(self.api_client, '{}', [200, 400]):\n            self.run_method()\n"
  },
  {
    "path": "sdk/python/tests/test_s3_to_keep.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport copy\nimport io\nimport functools\nimport hashlib\nimport json\nimport logging\nimport sys\nimport unittest\nimport datetime\n\nfrom unittest import mock\n\nimport arvados\nimport arvados.collection\nimport arvados.keep\n\nfrom arvados._internal import s3_to_keep\nimport boto3.s3.transfer\n\nclass TestS3ToKeep(unittest.TestCase):\n\n    @mock.patch(\"arvados.collection.Collection\")\n    def test_s3_get(self, collectionmock):\n        api = mock.MagicMock()\n\n        api.collections().list().execute.return_value = {\n            \"items\": []\n        }\n\n        cm = mock.MagicMock()\n        cm.manifest_locator.return_value = \"zzzzz-4zz18-zzzzzzzzzzzzzz3\"\n        cm.portable_data_hash.return_value = \"99999999999999999999999999999998+99\"\n        collectionmock.return_value = cm\n\n        mockfile = mock.MagicMock()\n        cm.open.return_value = mockfile\n\n        mockboto = mock.MagicMock()\n        mockbotoclient = mock.MagicMock()\n        mockboto.client.return_value = mockbotoclient\n\n        mockbotoclient.head_object.return_value = {\n            'ResponseMetadata': {\n                'HTTPStatusCode': 200,\n                'HTTPHeaders': {\n                    \"Content-Length\": 123\n                }\n            }\n        }\n\n        utcnow = mock.MagicMock()\n        utcnow.return_value = datetime.datetime(2018, 5, 15)\n\n        r = s3_to_keep.s3_to_keep(api, mockboto, None, \"s3://examplebucket/file1.txt\", utcnow=utcnow)\n        self.assertEqual(r, (\"99999999999999999999999999999998+99\", \"file1.txt\",\n                             'zzzzz-4zz18-zzzzzzzzzzzzzz3', 's3://examplebucket/file1.txt',\n                             datetime.datetime(2018, 5, 15, 0, 0)))\n\n        cm.open.assert_called_with(\"file1.txt\", \"wb\")\n        cm.save_new.assert_called_with(name=\"Downloaded from s3%3A%2F%2Fexamplebucket%2Ffile1.txt\",\n                                       owner_uuid=None, ensure_unique_name=True)\n\n        api.collections().update.assert_has_calls([\n            mock.call(uuid=cm.manifest_locator(),\n                      body={\"collection\":{\"properties\": {'s3://examplebucket/file1.txt': {'Content-Length': 123, 'Date': 'Tue, 15 May 2018 00:00:00 GMT'}}}})\n        ])\n\n        kall = mockbotoclient.download_fileobj.call_args\n        assert kall.kwargs['Bucket'] == 'examplebucket'\n        assert kall.kwargs['Key'] == 'file1.txt'\n        assert kall.kwargs['Fileobj'] is mockfile\n"
  },
  {
    "path": "sdk/python/tests/test_storage_classes.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados\nimport pycurl\n\nimport unittest\nimport parameterized\nfrom . import arvados_testutil as tutil\nfrom .arvados_testutil import DiskCacheBase\n\n@tutil.skip_sleep\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass KeepStorageClassesTestCase(unittest.TestCase, tutil.ApiClientMock, DiskCacheBase):\n    disk_cache = False\n\n    def setUp(self):\n        self.api_client = self.mock_keep_services(count=2)\n        self.keep_client = arvados.KeepClient(api_client=self.api_client, block_cache=self.make_block_cache(self.disk_cache))\n        self.data = b'xyzzy'\n        self.locator = '1271ed5ef305aadabc605b1609e24c52'\n\n    def tearDown(self):\n        DiskCacheBase.tearDown(self)\n\n    def test_multiple_default_storage_classes_req_header(self):\n        api_mock = self.api_client_mock()\n        api_mock.config.return_value = {\n            'StorageClasses': {\n                'foo': { 'Default': True },\n                'bar': { 'Default': True },\n                'baz': { 'Default': False }\n            }\n        }\n        api_client = self.mock_keep_services(api_mock=api_mock, count=2)\n        keep_client = arvados.KeepClient(api_client=api_client, block_cache=self.make_block_cache(self.disk_cache))\n        resp_hdr = {\n            'x-keep-storage-classes-confirmed': 'foo=1, bar=1',\n            'x-keep-replicas-stored': 1\n        }\n        with tutil.mock_keep_responses(self.locator, 200, **resp_hdr) as mock:\n            keep_client.put(self.data, copies=1)\n            req_hdr = mock.responses[0]\n            self.assertIn(\n                'X-Keep-Storage-Classes: bar, foo', req_hdr.getopt(pycurl.HTTPHEADER))\n\n    def test_storage_classes_req_header(self):\n        self.assertEqual(\n            self.api_client.config()['StorageClasses'],\n            {'default': {'Default': True}})\n        cases = [\n            # requested, expected\n            [['foo'], 'X-Keep-Storage-Classes: foo'],\n            [['bar', 'foo'], 'X-Keep-Storage-Classes: bar, foo'],\n            [[], 'X-Keep-Storage-Classes: default'],\n            [None, 'X-Keep-Storage-Classes: default'],\n        ]\n        for req_classes, expected_header in cases:\n            headers = {'x-keep-replicas-stored': 1}\n            if req_classes is None or len(req_classes) == 0:\n                confirmed_hdr = 'default=1'\n            elif len(req_classes) > 0:\n                confirmed_hdr = ', '.join([\"{}=1\".format(cls) for cls in req_classes])\n            headers.update({'x-keep-storage-classes-confirmed': confirmed_hdr})\n            with tutil.mock_keep_responses(self.locator, 200, **headers) as mock:\n                self.keep_client.put(self.data, copies=1, classes=req_classes)\n                req_hdr = mock.responses[0]\n                self.assertIn(expected_header, req_hdr.getopt(pycurl.HTTPHEADER))\n\n    def test_partial_storage_classes_put(self):\n        headers = {\n            'x-keep-replicas-stored': 1,\n            'x-keep-storage-classes-confirmed': 'foo=1'}\n        with tutil.mock_keep_responses(self.locator, 200, 503, **headers) as mock:\n            with self.assertRaises(arvados.errors.KeepWriteError):\n                self.keep_client.put(self.data, copies=1, classes=['foo', 'bar'], num_retries=0)\n            # 1st request, both classes pending\n            req1_headers = mock.responses[0].getopt(pycurl.HTTPHEADER)\n            self.assertIn('X-Keep-Storage-Classes: bar, foo', req1_headers)\n            # 2nd try, 'foo' class already satisfied\n            req2_headers = mock.responses[1].getopt(pycurl.HTTPHEADER)\n            self.assertIn('X-Keep-Storage-Classes: bar', req2_headers)\n\n    def test_successful_storage_classes_put_requests(self):\n        cases = [\n            # wanted_copies, wanted_classes, confirmed_copies, confirmed_classes, expected_requests\n            [ 1, ['foo'], 1, 'foo=1', 1],\n            [ 1, ['foo'], 2, 'foo=2', 1],\n            [ 2, ['foo'], 2, 'foo=2', 1],\n            [ 2, ['foo'], 1, 'foo=1', 2],\n            [ 1, ['foo', 'bar'], 1, 'foo=1, bar=1', 1],\n            [ 1, ['foo', 'bar'], 2, 'foo=2, bar=2', 1],\n            [ 2, ['foo', 'bar'], 2, 'foo=2, bar=2', 1],\n            [ 2, ['foo', 'bar'], 1, 'foo=1, bar=1', 2],\n            [ 1, ['foo', 'bar'], 1, None, 1],\n            [ 1, ['foo'], 1, None, 1],\n            [ 2, ['foo'], 2, None, 1],\n            [ 2, ['foo'], 1, None, 2],\n        ]\n        for w_copies, w_classes, c_copies, c_classes, e_reqs in cases:\n            headers = {'x-keep-replicas-stored': c_copies}\n            if c_classes is not None:\n                headers.update({'x-keep-storage-classes-confirmed': c_classes})\n            with tutil.mock_keep_responses(self.locator, 200, 200, **headers) as mock:\n                case_desc = 'wanted_copies={}, wanted_classes=\"{}\", confirmed_copies={}, confirmed_classes=\"{}\", expected_requests={}'.format(w_copies, ', '.join(w_classes), c_copies, c_classes, e_reqs)\n                self.assertEqual(self.locator,\n                    self.keep_client.put(self.data, copies=w_copies, classes=w_classes),\n                    case_desc)\n                self.assertEqual(e_reqs, mock.call_count, case_desc)\n\n    def test_failed_storage_classes_put_requests(self):\n        cases = [\n            # wanted_copies, wanted_classes, confirmed_copies, confirmed_classes, return_code\n            [ 1, ['foo'], 1, 'bar=1', 200],\n            [ 1, ['foo'], 1, None, 503],\n            [ 2, ['foo'], 1, 'bar=1, foo=0', 200],\n            [ 3, ['foo'], 1, 'bar=1, foo=1', 200],\n            [ 3, ['foo', 'bar'], 1, 'bar=2, foo=1', 200],\n        ]\n        for w_copies, w_classes, c_copies, c_classes, return_code in cases:\n            headers = {'x-keep-replicas-stored': c_copies}\n            if c_classes is not None:\n                headers.update({'x-keep-storage-classes-confirmed': c_classes})\n            with tutil.mock_keep_responses(self.locator, return_code, return_code, **headers):\n                case_desc = 'wanted_copies={}, wanted_classes=\"{}\", confirmed_copies={}, confirmed_classes=\"{}\"'.format(w_copies, ', '.join(w_classes), c_copies, c_classes)\n                with self.assertRaises(arvados.errors.KeepWriteError, msg=case_desc):\n                    self.keep_client.put(self.data, copies=w_copies, classes=w_classes, num_retries=0)\n"
  },
  {
    "path": "sdk/python/tests/test_stream.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport bz2\nimport gzip\nimport io\nimport os\nimport unittest\nimport hashlib\n\nfrom unittest import mock\n\nimport arvados\n\nfrom . import arvados_testutil as tutil\nfrom . import run_test_server\n\nclass StreamFileReaderTestMixin(object):\n    def test_read_block_crossing_behavior(self):\n        # read() calls will be aligned on block boundaries - see #3663.\n        sfile = self.make_count_reader()\n        self.assertEqual(b'123', sfile.read(10))\n\n    def test_small_read(self):\n        sfile = self.make_count_reader()\n        self.assertEqual(b'12', sfile.read(2))\n\n    def test_successive_reads(self):\n        sfile = self.make_count_reader()\n        for expect in [b'1234', b'5678', b'9', b'']:\n            self.assertEqual(expect, sfile.read(4))\n\n    def test_readfrom_spans_blocks(self):\n        sfile = self.make_count_reader()\n        self.assertEqual(b'6789', sfile.readfrom(5, 12))\n\n    def test_small_readfrom_spanning_blocks(self):\n        sfile = self.make_count_reader()\n        self.assertEqual(b'2345', sfile.readfrom(1, 4))\n\n    def test_readall(self):\n        sfile = self.make_count_reader()\n        self.assertEqual(b'123456789', b''.join(sfile.readall()))\n\n    def test_one_arg_seek(self):\n        self.test_absolute_seek([])\n\n    def test_absolute_seek(self, args=[os.SEEK_SET]):\n        sfile = self.make_count_reader()\n        sfile.seek(6, *args)\n        self.assertEqual(b'78', sfile.read(2))\n        sfile.seek(4, *args)\n        self.assertEqual(b'56', sfile.read(2))\n\n    def test_relative_seek(self, args=[os.SEEK_CUR]):\n        sfile = self.make_count_reader()\n        self.assertEqual(b'12', sfile.read(2))\n        sfile.seek(2, *args)\n        self.assertEqual(b'56', sfile.read(2))\n\n    def test_end_seek(self):\n        sfile = self.make_count_reader()\n        sfile.seek(-6, os.SEEK_END)\n        self.assertEqual(b'45', sfile.read(2))\n\n    def test_seek_min_zero(self):\n        sfile = self.make_count_reader()\n        self.assertEqual(0, sfile.tell())\n        with self.assertRaises(IOError):\n            sfile.seek(-2, os.SEEK_SET)\n        self.assertEqual(0, sfile.tell())\n\n    def test_seek_max_size(self):\n        sfile = self.make_count_reader()\n        sfile.seek(2, os.SEEK_END)\n        # POSIX permits seeking past end of file.\n        self.assertEqual(11, sfile.tell())\n\n    def test_size(self):\n        self.assertEqual(9, self.make_count_reader().size())\n\n    def test_tell_after_small_read(self):\n        sfile = self.make_count_reader()\n        sfile.read(1)\n        self.assertEqual(1, sfile.tell())\n\n    def test_no_read_after_close(self):\n        sfile = self.make_count_reader()\n        sfile.close()\n        self.assertRaises(ValueError, sfile.read, 2)\n\n    def test_context(self):\n        with self.make_count_reader() as sfile:\n            self.assertFalse(sfile.closed, \"reader is closed inside context\")\n            self.assertEqual(b'12', sfile.read(2))\n        self.assertTrue(sfile.closed, \"reader is open after context\")\n\n    def check_lines(self, actual):\n        self.assertEqual(['one\\n', 'two\\n', '\\n', 'three\\n', 'four\\n', '\\n'],\n                         actual)\n\n    def test_readline(self):\n        reader = self.make_newlines_reader()\n        actual = []\n        while True:\n            data = reader.readline()\n            if not data:\n                break\n            actual.append(data)\n        self.check_lines(actual)\n\n    def test_readlines(self):\n        self.check_lines(self.make_newlines_reader().readlines())\n\n    def test_iteration(self):\n        self.check_lines(list(iter(self.make_newlines_reader())))\n\n    def test_readline_size(self):\n        reader = self.make_newlines_reader()\n        self.assertEqual('on', reader.readline(2))\n        self.assertEqual('e\\n', reader.readline(4))\n        self.assertEqual('two\\n', reader.readline(6))\n        self.assertEqual('\\n', reader.readline(8))\n        self.assertEqual('thre', reader.readline(4))\n\n    def test_readlines_sizehint(self):\n        result = self.make_newlines_reader().readlines(8)\n        self.assertEqual(['one\\n', 'two\\n', '\\n', 'three\\n', 'four\\n', '\\n'], result)\n\n    def test_name_attribute(self):\n        sfile = self.make_file_reader(name='nametest')\n        self.assertEqual('nametest', sfile.name)\n\n    def check_decompressed_name(self, filename, expect):\n        reader = self.make_file_reader(name=filename)\n        self.assertEqual(expect, reader.decompressed_name())\n\n    def test_decompressed_name_uncompressed_file(self):\n        self.check_decompressed_name('test.log', 'test.log')\n\n    def test_decompressed_name_gzip_file(self):\n        self.check_decompressed_name('test.log.gz', 'test.log')\n\n    def test_decompressed_name_bz2_file(self):\n        self.check_decompressed_name('test.log.bz2', 'test.log')\n\n    def check_decompression(self, compress_ext, compress_func):\n        test_text = b'decompression\\ntest\\n'\n        test_data = compress_func(test_text)\n        reader = self.make_file_reader(name='test.'+compress_ext, data=test_data)\n        self.assertEqual(test_text, b''.join(reader.readall_decompressed()))\n\n    @staticmethod\n    def gzip_compress(data):\n        compressed_data = io.BytesIO()\n        with gzip.GzipFile(fileobj=compressed_data, mode='wb') as gzip_file:\n            gzip_file.write(data)\n        return compressed_data.getvalue()\n\n    def test_no_decompression(self):\n        self.check_decompression('log', lambda s: s)\n\n    def test_gzip_decompression(self):\n        self.check_decompression('gz', self.gzip_compress)\n\n    def test_bz2_decompression(self):\n        self.check_decompression('bz2', bz2.compress)\n\n    def test_readline_then_readlines(self):\n        reader = self.make_newlines_reader()\n        data = reader.readline()\n        self.assertEqual('one\\n', data)\n        data = reader.readlines()\n        self.assertEqual(['two\\n', '\\n', 'three\\n', 'four\\n', '\\n'], data)\n\n    def test_readline_then_readall(self):\n        reader = self.make_newlines_reader()\n        data = reader.readline()\n        self.assertEqual('one\\n', data)\n        self.assertEqual(b''.join([b'two\\n', b'\\n', b'three\\n', b'four\\n', b'\\n']), b''.join(reader.readall()))\n\n\nclass StreamRetryTestMixin(object):\n    # Define reader_for(coll_name, **kwargs)\n    # and read_for_test(reader, size, **kwargs).\n    API_COLLECTIONS = run_test_server.fixture('collections')\n\n    def keep_client(self):\n        return arvados.KeepClient(proxy='http://[%s]:1' % (tutil.TEST_HOST,),\n                                  local_store='')\n\n    def manifest_for(self, coll_name):\n        return self.API_COLLECTIONS[coll_name]['manifest_text']\n\n    @tutil.skip_sleep\n    def test_success_without_retries(self):\n        with tutil.mock_keep_responses('bar', 200):\n            reader = self.reader_for('bar_file')\n            self.assertEqual(b'bar', self.read_for_test(reader, 3))\n\n    @tutil.skip_sleep\n    def test_read_with_instance_retries(self):\n        with tutil.mock_keep_responses('foo', 500, 200):\n            reader = self.reader_for('foo_file', num_retries=3)\n            self.assertEqual(b'foo', self.read_for_test(reader, 3))\n\n    @tutil.skip_sleep\n    def test_read_with_method_retries(self):\n        with tutil.mock_keep_responses('foo', 500, 200):\n            reader = self.reader_for('foo_file')\n            self.assertEqual(b'foo',\n                             self.read_for_test(reader, 3, num_retries=3))\n\n    @tutil.skip_sleep\n    def test_read_instance_retries_exhausted(self):\n        with tutil.mock_keep_responses('bar', 500, 500, 500, 500, 200):\n            reader = self.reader_for('bar_file', num_retries=3)\n            with self.assertRaises(arvados.errors.KeepReadError):\n                self.read_for_test(reader, 3)\n\n    @tutil.skip_sleep\n    def test_read_method_retries_exhausted(self):\n        with tutil.mock_keep_responses('bar', 500, 500, 500, 500, 200):\n            reader = self.reader_for('bar_file')\n            with self.assertRaises(arvados.errors.KeepReadError):\n                self.read_for_test(reader, 3, num_retries=3)\n\n    @tutil.skip_sleep\n    def test_method_retries_take_precedence(self):\n        with tutil.mock_keep_responses('', 500, 500, 500, 200):\n            reader = self.reader_for('user_agreement', num_retries=10)\n            with self.assertRaises(arvados.errors.KeepReadError):\n                self.read_for_test(reader, 10, num_retries=1)\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "sdk/python/tests/test_util.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport itertools\nimport os\nimport subprocess\nimport unittest\n\nimport parameterized\nimport pytest\nfrom unittest import mock\n\nimport arvados\nimport arvados.util\n\nclass KeysetTestHelper:\n    def __init__(self, expect, expect_num_retries=0):\n        self.n = 0\n        self.expect = expect\n        self.expect_num_retries = expect_num_retries\n\n    def fn(self, **kwargs):\n        assert kwargs == self.expect[self.n][0]\n        return self\n\n    def execute(self, num_retries):\n        assert num_retries == self.expect_num_retries\n        self.n += 1\n        return self.expect[self.n-1][1]\n\n_SELECT_FAKE_ITEM = {\n    'uuid': 'zzzzz-zyyyz-zzzzzyyyyywwwww',\n    'name': 'KeysetListAllTestCase.test_select mock',\n    'created_at': '2023-08-28T12:34:56.123456Z',\n}\n\n_FAKE_COMPUTED_PERMISSIONS_ITEM = {\n    'user_uuid': 'zzzzz-zyyyz-zzzzzyyyyywwwww',\n    'target_uuid': 'zzzzz-ttttt-xxxxxyyyyyzzzzz',\n    'perm_level': 'can_write',\n}\n\nclass KeysetListAllTestCase(unittest.TestCase):\n    def test_empty(self):\n        ks = KeysetTestHelper([[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": []},\n            {\"items\": []}\n        ]])\n\n        ls = list(arvados.util.keyset_list_all(ks.fn))\n        self.assertEqual(ls, [])\n\n    def test_oneitem(self):\n        ks = KeysetTestHelper([[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": []},\n            {\"items\": [{\"created_at\": \"1\", \"uuid\": \"1\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"created_at\", \"=\", \"1\"], [\"uuid\", \">\", \"1\"]]},\n            {\"items\": []}\n        ],[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"created_at\", \">\", \"1\"]]},\n            {\"items\": []}\n        ]])\n\n        ls = list(arvados.util.keyset_list_all(ks.fn))\n        self.assertEqual(ls, [{\"created_at\": \"1\", \"uuid\": \"1\"}])\n\n    def test_onepage2(self):\n        ks = KeysetTestHelper([[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": []},\n            {\"items\": [{\"created_at\": \"1\", \"uuid\": \"1\"}, {\"created_at\": \"2\", \"uuid\": \"2\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"created_at\", \">=\", \"2\"], [\"uuid\", \"!=\", \"2\"]]},\n            {\"items\": []}\n        ]])\n\n        ls = list(arvados.util.keyset_list_all(ks.fn))\n        self.assertEqual(ls, [{\"created_at\": \"1\", \"uuid\": \"1\"}, {\"created_at\": \"2\", \"uuid\": \"2\"}])\n\n    def test_onepage3(self):\n        ks = KeysetTestHelper([[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": []},\n            {\"items\": [{\"created_at\": \"1\", \"uuid\": \"1\"}, {\"created_at\": \"2\", \"uuid\": \"2\"}, {\"created_at\": \"3\", \"uuid\": \"3\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"created_at\", \">=\", \"3\"], [\"uuid\", \"!=\", \"3\"]]},\n            {\"items\": []}\n        ]])\n\n        ls = list(arvados.util.keyset_list_all(ks.fn))\n        self.assertEqual(ls, [{\"created_at\": \"1\", \"uuid\": \"1\"}, {\"created_at\": \"2\", \"uuid\": \"2\"}, {\"created_at\": \"3\", \"uuid\": \"3\"}])\n\n\n    def test_twopage(self):\n        ks = KeysetTestHelper([[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": []},\n            {\"items\": [{\"created_at\": \"1\", \"uuid\": \"1\"}, {\"created_at\": \"2\", \"uuid\": \"2\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"created_at\", \">=\", \"2\"], [\"uuid\", \"!=\", \"2\"]]},\n            {\"items\": [{\"created_at\": \"3\", \"uuid\": \"3\"}, {\"created_at\": \"4\", \"uuid\": \"4\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"created_at\", \">=\", \"4\"], [\"uuid\", \"!=\", \"4\"]]},\n            {\"items\": []}\n        ]])\n\n        ls = list(arvados.util.keyset_list_all(ks.fn))\n        self.assertEqual(ls, [{\"created_at\": \"1\", \"uuid\": \"1\"},\n                              {\"created_at\": \"2\", \"uuid\": \"2\"},\n                              {\"created_at\": \"3\", \"uuid\": \"3\"},\n                              {\"created_at\": \"4\", \"uuid\": \"4\"}\n        ])\n\n    def test_repeated_key(self):\n        ks = KeysetTestHelper([[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": []},\n            {\"items\": [{\"created_at\": \"1\", \"uuid\": \"1\"}, {\"created_at\": \"2\", \"uuid\": \"2\"}, {\"created_at\": \"2\", \"uuid\": \"3\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"created_at\", \">=\", \"2\"], [\"uuid\", \"!=\", \"3\"]]},\n            {\"items\": [{\"created_at\": \"2\", \"uuid\": \"2\"}, {\"created_at\": \"2\", \"uuid\": \"4\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"created_at\", \"=\", \"2\"], [\"uuid\", \">\", \"4\"]]},\n            {\"items\": []}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"created_at\", \">\", \"2\"]]},\n            {\"items\": [{\"created_at\": \"3\", \"uuid\": \"5\"}, {\"created_at\": \"4\", \"uuid\": \"6\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"created_at\", \">=\", \"4\"], [\"uuid\", \"!=\", \"6\"]]},\n            {\"items\": []}\n        ],\n        ])\n\n        ls = list(arvados.util.keyset_list_all(ks.fn))\n        self.assertEqual(ls, [{\"created_at\": \"1\", \"uuid\": \"1\"},\n                              {\"created_at\": \"2\", \"uuid\": \"2\"},\n                              {\"created_at\": \"2\", \"uuid\": \"3\"},\n                              {\"created_at\": \"2\", \"uuid\": \"4\"},\n                              {\"created_at\": \"3\", \"uuid\": \"5\"},\n                              {\"created_at\": \"4\", \"uuid\": \"6\"}\n        ])\n\n    def test_onepage_withfilter(self):\n        ks = KeysetTestHelper([[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"foo\", \">\", \"bar\"]]},\n            {\"items\": [{\"created_at\": \"1\", \"uuid\": \"1\"}, {\"created_at\": \"2\", \"uuid\": \"2\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at asc\", \"uuid asc\"], \"filters\": [[\"created_at\", \">=\", \"2\"], [\"uuid\", \"!=\", \"2\"], [\"foo\", \">\", \"bar\"]]},\n            {\"items\": []}\n        ]])\n\n        ls = list(arvados.util.keyset_list_all(ks.fn, filters=[[\"foo\", \">\", \"bar\"]]))\n        self.assertEqual(ls, [{\"created_at\": \"1\", \"uuid\": \"1\"}, {\"created_at\": \"2\", \"uuid\": \"2\"}])\n\n    def test_onepage_desc(self):\n        ks = KeysetTestHelper([[\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at desc\", \"uuid desc\"], \"filters\": []},\n            {\"items\": [{\"created_at\": \"2\", \"uuid\": \"2\"}, {\"created_at\": \"1\", \"uuid\": \"1\"}]}\n        ], [\n            {\"limit\": 1000, \"count\": \"none\", \"order\": [\"created_at desc\", \"uuid desc\"], \"filters\": [[\"created_at\", \"<=\", \"1\"], [\"uuid\", \"!=\", \"1\"]]},\n            {\"items\": []}\n        ]])\n\n        ls = list(arvados.util.keyset_list_all(ks.fn, ascending=False))\n        self.assertEqual(ls, [{\"created_at\": \"2\", \"uuid\": \"2\"}, {\"created_at\": \"1\", \"uuid\": \"1\"}])\n\n    @parameterized.parameterized.expand(\n        (fake_item, key_fields, order_key, select)\n        for (fake_item, key_fields) in [\n            (_SELECT_FAKE_ITEM, ('uuid',)),\n            (_FAKE_COMPUTED_PERMISSIONS_ITEM, ('user_uuid', 'target_uuid')),\n        ]\n        for order_key in fake_item\n        if order_key != 'perm_level'\n        for count in range(len(fake_item) + 1)\n        for select in itertools.combinations(fake_item, count)\n    )\n    def test_select(self, fake_item, key_fields, order_key, select):\n        # keyset_list_all must have both uuid and order_key to function.\n        # Test that it selects those fields along with user-specified ones.\n        expect_select = {*key_fields, order_key, *select}\n        item = {\n            key: value\n            for key, value in fake_item.items()\n            if key in expect_select\n        }\n        list_func = mock.Mock()\n        list_func().execute = mock.Mock(\n            side_effect=[\n                {'items': [item]},\n                {'items': []},\n                {'items': []},\n            ],\n        )\n        list_func.reset_mock()\n        actual = list(arvados.util.keyset_list_all(list_func, order_key, select=list(select), key_fields=key_fields))\n        self.assertEqual(actual, [item])\n        calls = list_func.call_args_list\n        self.assertTrue(len(calls) >= 2, \"list_func() not called enough to exhaust items\")\n        for args, kwargs in calls:\n            self.assertEqual(set(kwargs.get('select', ())), expect_select)\n\n\nclass TestIterStorageClasses:\n    @pytest.fixture\n    def mixed_config(self):\n        return {'StorageClasses': {\n            'foo': {'Default': False},\n            'bar': {'Default': True},\n            'baz': {'Default': True},\n        }}\n\n    @pytest.fixture\n    def nodef_config(self):\n        return {'StorageClasses': {\n            'foo': {'Default': False},\n            'bar': {'Default': False},\n        }}\n\n    def test_defaults(self, mixed_config):\n        assert list(arvados.util.iter_storage_classes(mixed_config)) == ['bar', 'baz']\n\n    def test_custom_check(self, mixed_config):\n        assert list(arvados.util.iter_storage_classes(mixed_config, bool)) == ['foo', 'bar', 'baz']\n\n    def test_default_fallback(self, nodef_config):\n        assert list(arvados.util.iter_storage_classes(nodef_config)) == ['default']\n\n    def test_custom_fallback(self, nodef_config):\n        assert list(arvados.util.iter_storage_classes(nodef_config, fallback='fb')) == ['fb']\n\n    def test_no_fallback(self, nodef_config):\n        assert list(arvados.util.iter_storage_classes(nodef_config, fallback='')) == []\n\n"
  },
  {
    "path": "sdk/python/tests/test_vocabulary.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport arvados\nimport unittest\n\nfrom unittest import mock\n\nfrom arvados import api, vocabulary\n\nclass VocabularyTest(unittest.TestCase):\n    EXAMPLE_VOC = {\n        'tags': {\n            'IDTAGANIMALS': {\n                'strict': False,\n                'labels': [\n                    {'label': 'Animal'},\n                    {'label': 'Creature'},\n                ],\n                'values': {\n                    'IDVALANIMAL1': {\n                        'labels': [\n                            {'label': 'Human'},\n                            {'label': 'Homo sapiens'},\n                        ],\n                    },\n                    'IDVALANIMAL2': {\n                        'labels': [\n                            {'label': 'Elephant'},\n                            {'label': 'Loxodonta'},\n                        ],\n                    },\n                },\n            },\n            'IDTAGIMPORTANCES': {\n                'strict': True,\n                'labels': [\n                    {'label': 'Importance'},\n                    {'label': 'Priority'},\n                ],\n                'values': {\n                    'IDVALIMPORTANCE1': {\n                        'labels': [\n                            {'label': 'High'},\n                            {'label': 'High priority'},\n                        ],\n                    },\n                    'IDVALIMPORTANCE2': {\n                        'labels': [\n                            {'label': 'Medium'},\n                            {'label': 'Medium priority'},\n                        ],\n                    },\n                    'IDVALIMPORTANCE3': {\n                        'labels': [\n                            {'label': 'Low'},\n                            {'label': 'Low priority'},\n                        ],\n                    },\n                },\n            },\n            'IDTAGCOMMENTS': {\n                'strict': False,\n                'labels': [\n                    {'label': 'Comment'},\n                    {'label': 'Notes'},\n                ],\n                'values': None,\n            },\n        },\n    }\n\n    def setUp(self):\n        self.api = arvados.api('v1')\n        self.voc = vocabulary.Vocabulary(self.EXAMPLE_VOC)\n        self.api.vocabulary = mock.MagicMock(return_value=self.EXAMPLE_VOC)\n\n    def test_vocabulary_keys(self):\n        self.assertEqual(self.voc.strict_keys, False)\n        self.assertEqual(\n            self.voc.key_aliases.keys(),\n            set(['idtaganimals', 'creature', 'animal',\n                'idtagimportances', 'importance', 'priority',\n                'idtagcomments', 'comment', 'notes'])\n        )\n\n        vk = self.voc.key_aliases['creature']\n        self.assertEqual(vk.strict, False)\n        self.assertEqual(vk.identifier, 'IDTAGANIMALS')\n        self.assertEqual(vk.aliases, ['Animal', 'Creature'])\n        self.assertEqual(vk.preferred_label, 'Animal')\n        self.assertEqual(\n            vk.value_aliases.keys(),\n            set(['idvalanimal1', 'human', 'homo sapiens',\n                'idvalanimal2', 'elephant', 'loxodonta'])\n        )\n\n    def test_vocabulary_values(self):\n        vk = self.voc.key_aliases['creature']\n        vv = vk.value_aliases['human']\n        self.assertEqual(vv.identifier, 'IDVALANIMAL1')\n        self.assertEqual(vv.aliases, ['Human', 'Homo sapiens'])\n        self.assertEqual(vv.preferred_label, 'Human')\n\n    def test_vocabulary_indexing(self):\n        self.assertEqual(self.voc['creature']['human'].identifier, 'IDVALANIMAL1')\n        self.assertEqual(self.voc['Creature']['Human'].identifier, 'IDVALANIMAL1')\n        self.assertEqual(self.voc['CREATURE']['HUMAN'].identifier, 'IDVALANIMAL1')\n        with self.assertRaises(KeyError):\n            inexistant = self.voc['foo']\n\n    def test_empty_vocabulary(self):\n        voc = vocabulary.Vocabulary({})\n        self.assertEqual(voc.strict_keys, False)\n        self.assertEqual(voc.key_aliases, {})\n\n    def test_load_vocabulary_with_api(self):\n        voc = vocabulary.load_vocabulary(self.api)\n        self.assertEqual(voc['creature']['human'].identifier, 'IDVALANIMAL1')\n        self.assertEqual(voc['Creature']['Human'].identifier, 'IDVALANIMAL1')\n        self.assertEqual(voc['CREATURE']['HUMAN'].identifier, 'IDVALANIMAL1')\n\n    def test_convert_to_identifiers(self):\n        cases = [\n            {'IDTAGIMPORTANCES': 'IDVALIMPORTANCE1'},\n            {'IDTAGIMPORTANCES': 'High'},\n            {'importance': 'IDVALIMPORTANCE1'},\n            {'priority': 'high priority'},\n        ]\n        for case in cases:\n            self.assertEqual(\n                self.voc.convert_to_identifiers(case),\n                {'IDTAGIMPORTANCES': 'IDVALIMPORTANCE1'},\n                \"failing test case: {}\".format(case)\n            )\n\n    def test_convert_to_identifiers_multiple_pairs(self):\n        cases = [\n            {'IDTAGIMPORTANCES': 'IDVALIMPORTANCE1', 'IDTAGANIMALS': 'IDVALANIMAL1', 'IDTAGCOMMENTS': 'Very important person'},\n            {'IDTAGIMPORTANCES': 'High', 'IDTAGANIMALS': 'IDVALANIMAL1', 'comment': 'Very important person'},\n            {'importance': 'IDVALIMPORTANCE1', 'animal': 'IDVALANIMAL1', 'notes': 'Very important person'},\n            {'priority': 'high priority', 'animal': 'IDVALANIMAL1', 'NOTES': 'Very important person'},\n        ]\n        for case in cases:\n            self.assertEqual(\n                self.voc.convert_to_identifiers(case),\n                {'IDTAGIMPORTANCES': 'IDVALIMPORTANCE1', 'IDTAGANIMALS': 'IDVALANIMAL1', 'IDTAGCOMMENTS': 'Very important person'},\n                \"failing test case: {}\".format(case)\n            )\n\n    def test_convert_to_identifiers_value_lists(self):\n        cases = [\n            {'IDTAGIMPORTANCES': ['IDVALIMPORTANCE1', 'IDVALIMPORTANCE2']},\n            {'IDTAGIMPORTANCES': ['High', 'Medium']},\n            {'importance': ['IDVALIMPORTANCE1', 'IDVALIMPORTANCE2']},\n            {'priority': ['high', 'medium']},\n        ]\n        for case in cases:\n            self.assertEqual(\n                self.voc.convert_to_identifiers(case),\n                {'IDTAGIMPORTANCES': ['IDVALIMPORTANCE1', 'IDVALIMPORTANCE2']},\n                \"failing test case: {}\".format(case)\n            )\n\n    def test_convert_to_identifiers_unknown_key(self):\n        # Non-strict vocabulary\n        self.assertEqual(self.voc.strict_keys, False)\n        self.assertEqual(self.voc.convert_to_identifiers({'foo': 'bar'}), {'foo': 'bar'})\n        # Strict vocabulary\n        strict_voc = arvados.vocabulary.Vocabulary(self.EXAMPLE_VOC)\n        strict_voc.strict_keys = True\n        with self.assertRaises(vocabulary.VocabularyKeyError):\n            strict_voc.convert_to_identifiers({'foo': 'bar'})\n\n    def test_convert_to_identifiers_invalid_key(self):\n        with self.assertRaises(vocabulary.VocabularyKeyError):\n            self.voc.convert_to_identifiers({42: 'bar'})\n        with self.assertRaises(vocabulary.VocabularyKeyError):\n            self.voc.convert_to_identifiers({None: 'bar'})\n        with self.assertRaises(vocabulary.VocabularyKeyError):\n            self.voc.convert_to_identifiers({('f', 'o', 'o'): 'bar'})\n\n    def test_convert_to_identifiers_unknown_value(self):\n        # Non-strict key\n        self.assertEqual(self.voc['animal'].strict, False)\n        self.assertEqual(self.voc.convert_to_identifiers({'Animal': 'foo'}), {'IDTAGANIMALS': 'foo'})\n        # Strict key\n        self.assertEqual(self.voc['priority'].strict, True)\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_identifiers({'Priority': 'foo'})\n\n    def test_convert_to_identifiers_invalid_value(self):\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_identifiers({'Animal': 42})\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_identifiers({'Animal': None})\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_identifiers({'Animal': {'hello': 'world'}})\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_identifiers({'Animal': [42]})\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_identifiers({'Animal': [None]})\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_identifiers({'Animal': [{'hello': 'world'}]})\n\n    def test_convert_to_identifiers_unknown_value_list(self):\n        # Non-strict key\n        self.assertEqual(self.voc['animal'].strict, False)\n        self.assertEqual(\n            self.voc.convert_to_identifiers({'Animal': ['foo', 'loxodonta']}),\n            {'IDTAGANIMALS': ['foo', 'IDVALANIMAL2']}\n        )\n        # Strict key\n        self.assertEqual(self.voc['priority'].strict, True)\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_identifiers({'Priority': ['foo', 'bar']})\n\n    def test_convert_to_labels(self):\n        cases = [\n            {'IDTAGIMPORTANCES': 'IDVALIMPORTANCE1'},\n            {'IDTAGIMPORTANCES': 'High'},\n            {'importance': 'IDVALIMPORTANCE1'},\n            {'priority': 'high priority'},\n        ]\n        for case in cases:\n            self.assertEqual(\n                self.voc.convert_to_labels(case),\n                {'Importance': 'High'},\n                \"failing test case: {}\".format(case)\n            )\n\n    def test_convert_to_labels_multiple_pairs(self):\n        cases = [\n            {'IDTAGIMPORTANCES': 'IDVALIMPORTANCE1', 'IDTAGANIMALS': 'IDVALANIMAL1', 'IDTAGCOMMENTS': 'Very important person'},\n            {'IDTAGIMPORTANCES': 'High', 'IDTAGANIMALS': 'IDVALANIMAL1', 'comment': 'Very important person'},\n            {'importance': 'IDVALIMPORTANCE1', 'animal': 'IDVALANIMAL1', 'notes': 'Very important person'},\n            {'priority': 'high priority', 'animal': 'IDVALANIMAL1', 'NOTES': 'Very important person'},\n        ]\n        for case in cases:\n            self.assertEqual(\n                self.voc.convert_to_labels(case),\n                {'Importance': 'High', 'Animal': 'Human', 'Comment': 'Very important person'},\n                \"failing test case: {}\".format(case)\n            )\n\n    def test_convert_to_labels_value_lists(self):\n        cases = [\n            {'IDTAGIMPORTANCES': ['IDVALIMPORTANCE1', 'IDVALIMPORTANCE2']},\n            {'IDTAGIMPORTANCES': ['High', 'Medium']},\n            {'importance': ['IDVALIMPORTANCE1', 'IDVALIMPORTANCE2']},\n            {'priority': ['high', 'medium']},\n        ]\n        for case in cases:\n            self.assertEqual(\n                self.voc.convert_to_labels(case),\n                {'Importance': ['High', 'Medium']},\n                \"failing test case: {}\".format(case)\n            )\n\n    def test_convert_to_labels_unknown_key(self):\n        # Non-strict vocabulary\n        self.assertEqual(self.voc.strict_keys, False)\n        self.assertEqual(self.voc.convert_to_labels({'foo': 'bar'}), {'foo': 'bar'})\n        # Strict vocabulary\n        strict_voc = arvados.vocabulary.Vocabulary(self.EXAMPLE_VOC)\n        strict_voc.strict_keys = True\n        with self.assertRaises(vocabulary.VocabularyKeyError):\n            strict_voc.convert_to_labels({'foo': 'bar'})\n\n    def test_convert_to_labels_invalid_key(self):\n        with self.assertRaises(vocabulary.VocabularyKeyError):\n            self.voc.convert_to_labels({42: 'bar'})\n        with self.assertRaises(vocabulary.VocabularyKeyError):\n            self.voc.convert_to_labels({None: 'bar'})\n        with self.assertRaises(vocabulary.VocabularyKeyError):\n            self.voc.convert_to_labels({('f', 'o', 'o'): 'bar'})\n\n    def test_convert_to_labels_unknown_value(self):\n        # Non-strict key\n        self.assertEqual(self.voc['animal'].strict, False)\n        self.assertEqual(self.voc.convert_to_labels({'IDTAGANIMALS': 'foo'}), {'Animal': 'foo'})\n        # Strict key\n        self.assertEqual(self.voc['priority'].strict, True)\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_labels({'IDTAGIMPORTANCES': 'foo'})\n\n    def test_convert_to_labels_invalid_value(self):\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_labels({'IDTAGIMPORTANCES': {'high': True}})\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_labels({'IDTAGIMPORTANCES': None})\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_labels({'IDTAGIMPORTANCES': 42})\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_labels({'IDTAGIMPORTANCES': False})\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_labels({'IDTAGIMPORTANCES': [42]})\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_labels({'IDTAGIMPORTANCES': [None]})\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_labels({'IDTAGIMPORTANCES': [{'high': True}]})\n\n    def test_convert_to_labels_unknown_value_list(self):\n        # Non-strict key\n        self.assertEqual(self.voc['animal'].strict, False)\n        self.assertEqual(\n            self.voc.convert_to_labels({'IDTAGANIMALS': ['foo', 'IDVALANIMAL1']}),\n            {'Animal': ['foo', 'Human']}\n        )\n        # Strict key\n        self.assertEqual(self.voc['priority'].strict, True)\n        with self.assertRaises(vocabulary.VocabularyValueError):\n            self.voc.convert_to_labels({'IDTAGIMPORTANCES': ['foo', 'bar']})\n\n    def test_convert_roundtrip(self):\n        initial = {'IDTAGIMPORTANCES': 'IDVALIMPORTANCE1', 'IDTAGANIMALS': 'IDVALANIMAL1', 'IDTAGCOMMENTS': 'Very important person'}\n        converted = self.voc.convert_to_labels(initial)\n        self.assertNotEqual(converted, initial)\n        self.assertEqual(self.voc.convert_to_identifiers(converted), initial)\n"
  },
  {
    "path": "sdk/ruby/.gitignore",
    "content": "Gemfile.lock\narvados*gem\n"
  },
  {
    "path": "sdk/ruby/Gemfile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nsource 'https://rubygems.org'\ngemspec\ngem 'rake'\ngem 'minitest', '~> 5.0'\ngem 'mocha', '~> 2.1', require: false\n"
  },
  {
    "path": "sdk/ruby/LICENSE-2.0.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "sdk/ruby/README",
    "content": "\nThis directory contains contains the Ruby SDK.\n\n## Installation instructions\n\nYou can build the gem with the following command:\n\n  gem build arvados.gemspec\n\nand install it like this:\n\n  gem install ./arvados-0.1.0.gem\n\n## Code example\n\n#!/usr/bin/env ruby\n\nENV['ARVADOS_API_HOST'] = 'arvados.local'\nENV['ARVADOS_API_TOKEN'] = 'qwertyuiopasdfghjklzxcvbnm1234567890abcdefghijklmn'\n\nrequire 'arvados'\narv = Arvados.new( { :suppress_ssl_warnings => false } )\n\ncr_list = arv.container_request.list(where:{})\nputs cr_list[:items].first.inspect\n\ncr = arv.container_request.get(uuid:\"zzzzz-xvhdp-fkkbrl98u3pk87m\")\nputs pt.inspect\n"
  },
  {
    "path": "sdk/ruby/Rakefile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire 'rake/testtask'\n\nRake::TestTask.new do |t|\n  t.libs << 'test'\nend\n\ndesc 'Run tests'\ntask default: :test\n"
  },
  {
    "path": "sdk/ruby/arvados.gemspec",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nbegin\n  git_root = \"#{__dir__}/../..\"\n  git_timestamp, git_hash = IO.popen(\n    [\"git\", \"-C\", git_root,\n     \"log\", \"-n1\", \"--first-parent\", \"--format=%ct:%H\",\n     \"--\", \"build/version-at-commit.sh\", \"sdk/ruby\"],\n  ) do |git_log|\n    git_log.readline.chomp.split(\":\")\n  end\nrescue Errno::ENOENT\n  $stderr.puts(\"failed to get version information: 'git' not found\")\n  exit 69  # EX_UNAVAILABLE\nend\n\nif $? != 0\n  $stderr.puts(\"failed to get version information: 'git log' exited #{$?}\")\n  exit 65  # EX_DATAERR\nend\ngit_timestamp = Time.at(git_timestamp.to_i).utc\nversion = ENV[\"ARVADOS_BUILDING_VERSION\"] || IO.popen(\n            [\"#{git_root}/build/version-at-commit.sh\", git_hash],\n          ) do |ver_out|\n  ver_out.readline.chomp.encode(\"utf-8\")\nend\nversion = version.sub(\"~dev\", \".dev\").sub(\"~rc\", \".rc\")\n\nGem::Specification.new do |s|\n  s.name        = 'arvados'\n  s.version     = version\n  s.date        = git_timestamp.strftime(\"%Y-%m-%d\")\n  s.summary     = \"Arvados client library\"\n  s.description = \"Arvados client library, git commit #{git_hash}\"\n  s.authors     = [\"Arvados Authors\"]\n  s.email       = 'packaging@arvados.org'\n  s.licenses    = ['Apache-2.0']\n  s.files       = [\"lib/arvados.rb\", \"lib/arvados/google_api_client.rb\",\n                   \"lib/arvados/collection.rb\", \"lib/arvados/keep.rb\",\n                   \"README\", \"LICENSE-2.0.txt\"]\n  s.required_ruby_version = '>= 3.0.0'\n  # activesupport 7.2.0 dropped Ruby 3.0.\n  s.add_dependency('activesupport', '~> 7.1.3', '>= 7.1.3.4')\n  s.add_dependency('andand', '~> 1.3', '>= 1.3.3')\n  # arvados fork of google-api-client gem with old API and new\n  # compatibility fixes, built from ../ruby-google-api-client/\n  s.add_dependency('arvados-google-api-client', '~> 0.8.7.5')\n  s.add_dependency('json', '>= 1.7.7', '<3')\n  s.add_runtime_dependency('jwt', '<2', '>= 0.1.5')\n  s.homepage    =\n    'https://arvados.org'\nend\n"
  },
  {
    "path": "sdk/ruby/lib/arvados/collection.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire \"arvados/keep\"\n\nmodule Arv\n  class Collection\n    def initialize(manifest_text=\"\")\n      @manifest_text = manifest_text\n      @modified = false\n      @root = CollectionRoot.new\n      manifest = Keep::Manifest.new(manifest_text)\n      manifest.each_line do |stream_root, locators, file_specs|\n        if stream_root.empty? or locators.empty? or file_specs.empty?\n          raise ArgumentError.new(\"manifest text includes malformed line\")\n        end\n        loc_list = LocatorList.new(locators)\n        file_specs.map { |s| manifest.split_file_token(s) }.\n            each do |file_start, file_len, file_path|\n          begin\n            @root.file_at(normalize_path(stream_root, file_path)).\n              add_segment(loc_list.segment(file_start, file_len))\n          rescue Errno::ENOTDIR, Errno::EISDIR => error\n            raise ArgumentError.new(\"%p is both a stream and file\" %\n                                    error.to_s.partition(\" - \").last)\n          end\n        end\n      end\n    end\n\n    def manifest_text\n      @manifest_text ||= @root.manifest_text\n    end\n\n    def modified?\n      @modified\n    end\n\n    def unmodified\n      @modified = false\n      self\n    end\n\n    def normalize\n      @manifest_text = @root.manifest_text\n      self\n    end\n\n    def cp_r(source, target, source_collection=nil)\n      opts = {:descend_target => !source.end_with?(\"/\")}\n      copy(:merge, source.chomp(\"/\"), target, source_collection, opts)\n    end\n\n    def each_file_path(&block)\n      @root.each_file_path(&block)\n    end\n\n    def exist?(path)\n      begin\n        substream, item = find(path)\n        not (substream.leaf? or substream[item].nil?)\n      rescue Errno::ENOENT, Errno::ENOTDIR\n        false\n      end\n    end\n\n    def rename(source, target)\n      copy(:add_copy, source, target) { rm_r(source) }\n    end\n\n    def rm(source)\n      remove(source)\n    end\n\n    def rm_r(source)\n      remove(source, :recursive => true)\n    end\n\n    protected\n\n    def find(*parts)\n      @root.find(normalize_path(*parts))\n    end\n\n    private\n\n    def modified\n      @manifest_text = nil\n      @modified = true\n      self\n    end\n\n    def normalize_path(*parts)\n      path = File.join(*parts)\n      if path.empty?\n        raise ArgumentError.new(\"empty path\")\n      elsif (path == \".\") or path.start_with?(\"./\")\n        path\n      else\n        \"./#{path}\"\n      end\n    end\n\n    def copy(copy_method, source, target, source_collection=nil, opts={})\n      # Find the item at path `source` in `source_collection`, find the\n      # destination stream at path `target`, and use `copy_method` to copy\n      # the found object there.  If a block is passed in, it will be called\n      # right before we do the actual copy, after we confirm that everything\n      # is found and can be copied.\n      source_collection = self if source_collection.nil?\n      src_stream, src_tail = source_collection.find(source)\n      dst_stream_path, _, dst_tail = normalize_path(target).rpartition(\"/\")\n      if dst_stream_path.empty?\n        dst_stream, dst_tail = @root.find(dst_tail)\n        dst_tail ||= src_tail\n      else\n        dst_stream = @root.stream_at(dst_stream_path)\n        dst_tail = src_tail if dst_tail.empty?\n      end\n      if (source_collection.equal?(self) and\n          (src_stream.path == dst_stream.path) and (src_tail == dst_tail))\n        return self\n      end\n      src_item = src_stream[src_tail]\n      check_method = \"check_can_#{copy_method}\".to_sym\n      target_name = nil\n      if opts.fetch(:descend_target, true)\n        begin\n          # Find out if `target` refers to a stream we should copy into.\n          tail_stream = dst_stream[dst_tail]\n          tail_stream.send(check_method, src_item, src_tail)\n          # Yes it does.  Copy the item at `source` into it with the same name.\n          dst_stream = tail_stream\n          target_name = src_tail\n        rescue Errno::ENOENT, Errno::ENOTDIR\n          # It does not.  We'll fall back to writing to `target` below.\n        end\n      end\n      if target_name.nil?\n        dst_stream.send(check_method, src_item, dst_tail)\n        target_name = dst_tail\n      end\n      # At this point, we know the operation will work.  Call any block as\n      # a pre-copy hook.\n      if block_given?\n        yield\n        # Re-find the destination stream, in case the block removed\n        # the original (that's how rename is implemented).\n        dst_stream = @root.stream_at(dst_stream.path)\n      end\n      dst_stream.send(copy_method, src_item, target_name)\n      modified\n    end\n\n    def remove(path, opts={})\n      stream, name = find(path)\n      stream.delete(name, opts)\n      modified\n    end\n\n    Struct.new(\"LocatorSegment\", :locators, :start_pos, :length)\n\n    class LocatorRange < Range\n      attr_reader :locator\n\n      def initialize(loc_s, start)\n        @locator = loc_s\n        range_end = start + Keep::Locator.parse(loc_s).size.to_i\n        super(start, range_end, false)\n      end\n    end\n\n    class LocatorList\n      # LocatorList efficiently builds LocatorSegments from a stream manifest.\n      def initialize(locators)\n        next_start = 0\n        @ranges = locators.map do |loc_s|\n          new_range = LocatorRange.new(loc_s, next_start)\n          next_start = new_range.end\n          new_range\n        end\n      end\n\n      def segment(start_pos, length)\n        # Return a LocatorSegment that captures `length` bytes from `start_pos`.\n        start_index = search_for_byte(start_pos)\n        if length == 0\n          end_index = start_index\n        else\n          end_index = search_for_byte(start_pos + length - 1, start_index)\n        end\n        seg_ranges = @ranges[start_index..end_index]\n        Struct::LocatorSegment.new(seg_ranges.map(&:locator),\n                                   start_pos - seg_ranges.first.begin,\n                                   length)\n      end\n\n      private\n\n      def search_for_byte(target, start_index=0)\n        # Do a binary search for byte `target` in the list of locators,\n        # starting from `start_index`.  Return the index of the range in\n        # @ranges that contains the byte.\n        lo = start_index\n        hi = @ranges.size\n        loop do\n          ii = (lo + hi) / 2\n          range = @ranges[ii]\n          if range.include?(target) && (target < range.end || ii == hi-1)\n            return ii\n          elsif ii == lo\n            raise RangeError.new(\"%i not in segment\" % target)\n          elsif target < range.begin\n            hi = ii\n          else\n            lo = ii\n          end\n        end\n      end\n    end\n\n    class CollectionItem\n      attr_reader :path, :name\n\n      def initialize(path)\n        @path = path\n        @name = File.basename(path)\n      end\n    end\n\n    class CollectionFile < CollectionItem\n      def initialize(path)\n        super\n        @segments = []\n      end\n\n      def self.human_name\n        \"file\"\n      end\n\n      def file?\n        true\n      end\n\n      def leaf?\n        true\n      end\n\n      def add_segment(segment)\n        @segments << segment\n      end\n\n      def each_segment(&block)\n        @segments.each(&block)\n      end\n\n      def check_can_add_copy(src_item, name)\n        raise Errno::ENOTDIR.new(path)\n      end\n\n      alias_method :check_can_merge, :check_can_add_copy\n\n      def copy_named(copy_path)\n        copy = self.class.new(copy_path)\n        each_segment { |segment| copy.add_segment(segment) }\n        copy\n      end\n    end\n\n    class CollectionStream < CollectionItem\n      def initialize(path)\n        super\n        @items = {}\n      end\n\n      def self.human_name\n        \"stream\"\n      end\n\n      def file?\n        false\n      end\n\n      def leaf?\n        items.empty?\n      end\n\n      def [](key)\n        items[key] or\n          raise Errno::ENOENT.new(\"%p not found in %p\" % [key, path])\n      end\n\n      def delete(name, opts={})\n        item = self[name]\n        if item.file? or opts[:recursive]\n          items.delete(name)\n        else\n          raise Errno::EISDIR.new(path)\n        end\n      end\n\n      def each_file_path\n        return to_enum(__method__) unless block_given?\n        items.each_value do |item|\n          if item.file?\n            yield item.path\n          else\n            item.each_file_path { |path| yield path }\n          end\n        end\n      end\n\n      def find(find_path)\n        # Given a POSIX-style path, return the CollectionStream that\n        # contains the object at that path, and the name of the object\n        # inside it.\n        components = find_path.split(\"/\")\n        tail = components.pop\n        [components.reduce(self, :[]), tail]\n      end\n\n      def stream_at(find_path)\n        key, rest = find_path.split(\"/\", 2)\n        next_stream = get_or_new(key, CollectionStream, Errno::ENOTDIR)\n        if rest.nil?\n          next_stream\n        else\n          next_stream.stream_at(rest)\n        end\n      end\n\n      def file_at(find_path)\n        stream_path, _, file_name = find_path.rpartition(\"/\")\n        if stream_path.empty?\n          get_or_new(file_name, CollectionFile, Errno::EISDIR)\n        else\n          stream_at(stream_path).file_at(file_name)\n        end\n      end\n\n      def manifest_text\n        # Return a string with the normalized manifest text for this stream,\n        # including all substreams.\n        file_keys, stream_keys = items.keys.sort.partition do |key|\n          items[key].file?\n        end\n        my_line = StreamManifest.new(path)\n        file_keys.each do |file_name|\n          my_line.add_file(items[file_name])\n        end\n        sub_lines = stream_keys.map do |sub_name|\n          items[sub_name].manifest_text\n        end\n        my_line.to_s + sub_lines.join(\"\")\n      end\n\n      def check_can_add_copy(src_item, key)\n        if existing = check_can_merge(src_item, key) and not existing.leaf?\n          raise Errno::ENOTEMPTY.new(existing.path)\n        end\n      end\n\n      def check_can_merge(src_item, key)\n        if existing = items[key] and (existing.class != src_item.class)\n          raise Errno::ENOTDIR.new(existing.path)\n        end\n        existing\n      end\n\n      def add_copy(src_item, key)\n        if key == \".\"\n          self[key] = src_item.copy_named(\"#{path}\")\n        else\n          self[key] = src_item.copy_named(\"#{path}/#{key}\")\n        end\n      end\n\n      def merge(src_item, key)\n        # Do a recursive copy of the collection item `src_item` to destination\n        # `key`.  If a simple copy is safe, do that; otherwise, recursively\n        # merge the contents of the stream `src_item` into the stream at\n        # `key`.\n        begin\n          check_can_add_copy(src_item, key)\n          add_copy(src_item, key)\n        rescue Errno::ENOTEMPTY\n          dest = self[key]\n          error = nil\n          # Copy as much as possible, then raise any error encountered.\n          # Start with streams for a depth-first merge.\n          src_items = src_item.items.each_pair.sort_by do |_, sub_item|\n            (sub_item.file?) ? 1 : 0\n          end\n          src_items.each do |sub_key, sub_item|\n            begin\n              dest.merge(sub_item, sub_key)\n            rescue Errno::ENOTDIR => error\n            end\n          end\n          raise error unless error.nil?\n        end\n      end\n\n      def copy_named(copy_path)\n        copy = self.class.new(copy_path)\n        items.each_pair do |key, item|\n          copy.add_copy(item, key)\n        end\n        copy\n      end\n\n      protected\n\n      attr_reader :items\n\n      private\n\n      def []=(key, item)\n        items[key] = item\n      end\n\n      def get_or_new(key, klass, err_class)\n        # Return the collection item at `key` and ensure that it's a `klass`.\n        # If `key` does not exist, create a new `klass` there.\n        # If the value for `key` is not a `klass`, raise an `err_class`.\n        item = items[key]\n        if item.nil?\n          self[key] = klass.new(\"#{path}/#{key}\")\n        elsif not item.is_a?(klass)\n          raise err_class.new(item.path)\n        else\n          item\n        end\n      end\n    end\n\n    class CollectionRoot < CollectionStream\n      def initialize\n        super(\"\")\n        setup\n      end\n\n      def delete(name, opts={})\n        super\n        # If that didn't fail, it deleted the . stream.  Recreate it.\n        setup\n      end\n\n      def check_can_merge(src_item, key)\n        if items.include?(key)\n          super\n        else\n          raise_root_write_error(key)\n        end\n      end\n\n      private\n\n      def setup\n        items[\".\"] = CollectionStream.new(\".\")\n      end\n\n      def add_copy(src_item, key)\n        items[\".\"].add_copy(src_item, key)\n      end\n\n      def raise_root_write_error(key)\n        raise ArgumentError.new(\"can't write to %p at collection root\" % key)\n      end\n\n      def []=(key, item)\n        raise_root_write_error(key)\n      end\n    end\n\n    class StreamManifest\n      # Build a manifest text for a single stream, without substreams.\n      # The manifest includes files in the order they're added.  If you want\n      # a normalized manifest, add files in lexical order by name.\n\n      def initialize(name)\n        @name = name\n        @loc_ranges = []\n        @loc_range_start = 0\n        @file_specs = []\n      end\n\n      def add_file(coll_file)\n        coll_file.each_segment do |segment|\n          extend_file_specs(coll_file.name, segment)\n        end\n      end\n\n      def to_s\n        if @file_specs.empty?\n          \"\"\n        else\n          \"%s %s %s\\n\" % [escape_name(@name),\n                          @loc_ranges.collect(&:locator).join(\" \"),\n                          @file_specs.join(\" \")]\n        end\n      end\n\n      private\n\n      def extend_file_specs(filename, segment)\n        found_overlap = false\n        # Find the longest prefix of segment.locators that's a suffix\n        # of the existing @loc_ranges. If we find one, drop those\n        # locators (they'll be added back below, when we're handling\n        # the normal/no-overlap case).\n        (1..segment.locators.length).each do |overlap|\n          if @loc_ranges.length >= overlap && @loc_ranges[-overlap..-1].collect(&:locator) == segment.locators[0..overlap-1]\n            (1..overlap).each do\n              discarded = @loc_ranges.pop\n              @loc_range_start -= (discarded.end - discarded.begin)\n            end\n            found_overlap = true\n            break\n          end\n        end\n\n        # If there was no overlap at the end of our existing\n        # @loc_ranges, check whether the full set of segment.locators\n        # appears earlier in @loc_ranges. If so, use those instead of\n        # appending the same locators again.\n        if !found_overlap && segment.locators.length < @loc_ranges.length\n          segment_start = 0\n          (0..@loc_ranges.length-1).each do |ri|\n            if @loc_ranges[ri..ri+segment.locators.length-1].collect(&:locator) == segment.locators\n              @file_specs << \"#{segment.start_pos + @loc_ranges[ri].begin}:#{segment.length}:#{escape_name(filename)}\"\n              return\n            end\n          end\n        end\n\n        segment_start = @loc_range_start\n        segment.locators.each do |loc_s|\n          r = LocatorRange.new(loc_s, @loc_range_start)\n          @loc_ranges << r\n          @loc_range_start = r.end\n        end\n        @file_specs << \"#{segment.start_pos + segment_start}:#{segment.length}:#{escape_name(filename)}\"\n      end\n\n      def escape_name(name)\n        name.gsub(/\\\\/, \"\\\\\\\\\\\\\\\\\").gsub(/\\s/) do |s|\n          s.each_byte.map { |c| \"\\\\%03o\" % c }.join(\"\")\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby/lib/arvados/google_api_client.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire 'google/api_client'\nrequire 'json'\nrequire 'tempfile'\n\nclass Google::APIClient\n  def discovery_document(api, version)\n    api = api.to_s\n    discovery_uri = self.discovery_uri(api, version)\n    discovery_uri_hash = Digest::MD5.hexdigest(discovery_uri)\n    discovery_cache_path =\n      File.expand_path(\"~/.cache/arvados/discovery-#{discovery_uri_hash}.json\")\n    @discovery_documents[discovery_uri_hash] ||=\n      disk_cached_discovery_document(discovery_cache_path) or\n      fetched_discovery_document(discovery_uri, discovery_cache_path)\n  end\n\n  private\n\n  def disk_cached_discovery_document(cache_path)\n    begin\n      if (Time.now - File.mtime(cache_path)) < 86400\n        open(cache_path) do |cache_file|\n          return JSON.load(cache_file)\n        end\n      end\n    rescue IOError, SystemCallError, JSON::JSONError\n      # Error reading the cache.  Act like it doesn't exist.\n    end\n    nil\n  end\n\n  def write_cached_discovery_document(cache_path, body)\n    cache_dir = File.dirname(cache_path)\n    cache_file = nil\n    begin\n      FileUtils.makedirs(cache_dir)\n      cache_file = Tempfile.new(\"discovery\", cache_dir)\n      cache_file.write(body)\n      cache_file.flush\n      File.rename(cache_file.path, cache_path)\n    rescue IOError, SystemCallError\n      # Failure to write the cache is non-fatal.  Do nothing.\n    ensure\n      cache_file.close! unless cache_file.nil?\n    end\n  end\n\n  def fetched_discovery_document(uri, cache_path)\n    response = self.execute!(:http_method => :get,\n                             :uri => uri,\n                             :authenticated => false)\n    write_cached_discovery_document(cache_path, response.body)\n    JSON.load(response.body)\n  end\nend\n"
  },
  {
    "path": "sdk/ruby/lib/arvados/keep.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nmodule Keep\n  class Locator\n    # A Locator is used to parse and manipulate Keep locator strings.\n    #\n    # Locators obey the following syntax:\n    #\n    #   locator      ::= address hint*\n    #   address      ::= digest size-hint\n    #   digest       ::= <32 hexadecimal digits>\n    #   size-hint    ::= \"+\" [0-9]+\n    #   hint         ::= \"+\" hint-type hint-content\n    #   hint-type    ::= [A-Z]\n    #   hint-content ::= [A-Za-z0-9@_-]+\n    #\n    # Individual hints may have their own required format:\n    #\n    #   sign-hint      ::= \"+A\" <40 lowercase hex digits> \"@\" sign-timestamp\n    #   sign-timestamp ::= <8 lowercase hex digits>\n    attr_reader :hash, :hints, :size\n\n    LOCATOR_REGEXP = /^([[:xdigit:]]{32})(\\+([[:digit:]]+))?((\\+([[:upper:]][[:alnum:]@_-]*))+)?\\z/\n\n    def initialize(hasharg, sizearg, hintarg)\n      @hash = hasharg\n      @size = sizearg\n      @hints = hintarg\n    end\n\n    def self.valid? tok\n      !!(LOCATOR_REGEXP.match tok)\n    end\n\n    # Locator.parse returns a Locator object parsed from the string tok.\n    # Returns nil if tok could not be parsed as a valid locator.\n    def self.parse(tok)\n      begin\n        Locator.parse!(tok)\n      rescue ArgumentError\n        nil\n      end\n    end\n\n    # Locator.parse! returns a Locator object parsed from the string tok,\n    # raising an ArgumentError if tok cannot be parsed.\n    def self.parse!(tok)\n      if tok.nil? or tok.empty?\n        raise ArgumentError.new \"locator is nil or empty\"\n      end\n\n      m = LOCATOR_REGEXP.match(tok)\n      unless m\n        raise ArgumentError.new \"not a valid locator #{tok}\"\n      end\n\n      tokhash, _, toksize, _, _, trailer = m[1..6]\n      tokhints = []\n      if trailer\n        trailer.split('+').each do |hint|\n          if hint =~ /^[[:upper:]][[:alnum:]@_-]*$/\n            tokhints.push(hint)\n          else\n            raise ArgumentError.new \"invalid hint #{hint}\"\n          end\n        end\n      end\n\n      Locator.new(tokhash, toksize, tokhints)\n    end\n\n    # Returns the signature hint supplied with this locator,\n    # or nil if the locator was not signed.\n    def signature\n      @hints.grep(/^A/).first\n    end\n\n    # Returns an unsigned Locator.\n    def without_signature\n      Locator.new(@hash, @size, @hints.reject { |o| o.start_with?(\"A\") })\n    end\n\n    def strip_hints\n      Locator.new(@hash, @size, [])\n    end\n\n    def strip_hints!\n      @hints = []\n      self\n    end\n\n    def to_s\n      if @size\n        [ @hash, @size, *@hints ].join('+')\n      else\n        [ @hash, *@hints ].join('+')\n      end\n    end\n  end\n\n  class Manifest\n    STREAM_TOKEN_REGEXP = /^([^\\000-\\040\\\\]|\\\\[0-3][0-7][0-7])+$/\n    STREAM_NAME_REGEXP = /^(\\.)(\\/[^\\/]+)*$/\n\n    EMPTY_DIR_TOKEN_REGEXP = /^0:0:\\.$/ # The exception when a file can have '.' as a name\n    FILE_TOKEN_REGEXP = /^[[:digit:]]+:[[:digit:]]+:([^\\000-\\040\\\\]|\\\\[0-3][0-7][0-7])+$/\n    FILE_NAME_REGEXP = /^[[:digit:]]+:[[:digit:]]+:([^\\/]+(\\/[^\\/]+)*)$/\n\n    NON_8BIT_ENCODED_CHAR = /[^\\\\]\\\\[4-7][0-7][0-7]/\n\n    # Class to parse a manifest text and provide common views of that data.\n    def initialize(manifest_text)\n      @text = manifest_text\n      @files = nil\n    end\n\n    def each_line\n      return to_enum(__method__) unless block_given?\n      @text.each_line do |line|\n        stream_name = nil\n        block_tokens = []\n        file_tokens = []\n        line.scan(/\\S+/) do |token|\n          if stream_name.nil?\n            stream_name = unescape token\n          elsif file_tokens.empty? and Locator.valid? token\n            block_tokens << token\n          else\n            file_tokens << unescape(token)\n          end\n        end\n        # Ignore blank lines\n        next if stream_name.nil?\n        yield [stream_name, block_tokens, file_tokens]\n      end\n    end\n\n    def self.unescape(s)\n      return nil if s.nil?\n\n      # Parse backslash escapes in a Keep manifest stream or file name.\n      s.gsub(/\\\\(\\\\|[0-7]{3})/) do |_|\n        case $1\n        when '\\\\'\n          '\\\\'\n        else\n          $1.to_i(8).chr\n        end\n      end\n    end\n\n    def unescape(s)\n      self.class.unescape(s)\n    end\n\n    def split_file_token token\n      start_pos, filesize, filename = token.split(':', 3)\n      if filename.nil?\n        raise ArgumentError.new \"Invalid file token '#{token}'\"\n      end\n      [start_pos.to_i, filesize.to_i, unescape(filename)]\n    end\n\n    def each_file_spec\n      return to_enum(__method__) unless block_given?\n      @text.each_line do |line|\n        stream_name = nil\n        in_file_tokens = false\n        line.scan(/\\S+/) do |token|\n          if stream_name.nil?\n            stream_name = unescape token\n          elsif in_file_tokens or not Locator.valid? token\n            in_file_tokens = true\n\n            start_pos, file_size, file_name = split_file_token(token)\n            stream_name_adjuster = ''\n            if file_name.include?('/')                # '/' in filename\n              dirname, sep, basename = file_name.rpartition('/')\n              stream_name_adjuster = sep + dirname   # /dir_parts\n              file_name = basename\n            end\n\n            yield [stream_name + stream_name_adjuster, start_pos, file_size, file_name]\n          end\n        end\n      end\n      true\n    end\n\n    def files\n      if @files.nil?\n        file_sizes = Hash.new(0)\n        each_file_spec do |streamname, _, filesize, filename|\n          file_sizes[[streamname, filename]] += filesize\n        end\n        @files = file_sizes.each_pair.map do |(streamname, filename), size|\n          [streamname, filename, size]\n        end\n      end\n      @files\n    end\n\n    def files_count(stop_after=nil)\n      # Return the number of files represented in this manifest.\n      # If stop_after is provided, files_count will read the manifest\n      # incrementally, and return immediately when it counts that number of\n      # files.  This can help you avoid parsing the entire manifest if you\n      # just want to check if a small number of files are specified.\n      if stop_after.nil? or not @files.nil?\n        # Avoid counting empty dir placeholders\n        return files.reject{|_, name, size| name == '.' and size == 0}.size\n      end\n      seen_files = {}\n      each_file_spec do |streamname, _, filesize, filename|\n        # Avoid counting empty dir placeholders\n        next if filename == \".\" and filesize == 0\n        seen_files[[streamname, filename]] = true\n        return stop_after if (seen_files.size >= stop_after)\n      end\n      seen_files.size\n    end\n\n    def files_size\n      # Return the total size of all files in this manifest.\n      files.reduce(0) { |total, (_, _, size)| total + size }\n    end\n\n    def exact_file_count?(want_count)\n      files_count(want_count + 1) == want_count\n    end\n\n    def minimum_file_count?(want_count)\n      files_count(want_count) >= want_count\n    end\n\n    def has_file?(want_stream, want_file=nil)\n      if want_file.nil?\n        want_stream, want_file = File.split(want_stream)\n      end\n      each_file_spec do |streamname, _, _, name|\n        if streamname == want_stream and name == want_file\n          return true\n        end\n      end\n      false\n    end\n\n    # Verify that a given manifest is valid according to\n    # https://dev.arvados.org/projects/arvados/wiki/Keep_manifest_format\n    def self.validate! manifest\n      raise ArgumentError.new \"No manifest found\" if !manifest\n\n      return true if manifest.empty?\n\n      raise ArgumentError.new \"Invalid manifest: does not end with newline\" if !manifest.end_with?(\"\\n\")\n      line_count = 0\n      manifest.each_line do |line|\n        line_count += 1\n\n        words = line[0..-2].split(/ /)\n        raise ArgumentError.new \"Manifest invalid for stream #{line_count}: missing stream name\" if words.empty?\n\n        count = 0\n\n        word = words.shift\n        raise ArgumentError.new \"Manifest invalid for stream #{line_count}: >8-bit encoded chars not allowed on stream token #{word.inspect}\" if word =~ NON_8BIT_ENCODED_CHAR\n        unescaped_word = unescape(word)\n        count += 1 if word =~ STREAM_TOKEN_REGEXP and unescaped_word =~ STREAM_NAME_REGEXP and unescaped_word !~ /\\/\\.\\.?(\\/|$)/\n        raise ArgumentError.new \"Manifest invalid for stream #{line_count}: missing or invalid stream name #{word.inspect if word}\" if count != 1\n\n        count = 0\n        word = words.shift\n        while word =~ Locator::LOCATOR_REGEXP\n          word = words.shift\n          count += 1\n        end\n        raise ArgumentError.new \"Manifest invalid for stream #{line_count}: missing or invalid locator #{word.inspect if word}\" if count == 0\n\n        count = 0\n        raise ArgumentError.new \"Manifest invalid for stream #{line_count}: >8-bit encoded chars not allowed on file token #{word.inspect}\" if word =~ NON_8BIT_ENCODED_CHAR\n        while unescape(word) =~ EMPTY_DIR_TOKEN_REGEXP or\n          (word =~ FILE_TOKEN_REGEXP and unescape(word) =~ FILE_NAME_REGEXP and ($~[1].split('/') & ['..', '.']).empty?)\n          word = words.shift\n          count += 1\n        end\n\n        if word\n          raise ArgumentError.new \"Manifest invalid for stream #{line_count}: invalid file token #{word.inspect}\"\n        elsif count == 0\n          raise ArgumentError.new \"Manifest invalid for stream #{line_count}: no file tokens\"\n        end\n\n        # Ruby's split() method silently drops trailing empty tokens\n        # (which are not allowed by the manifest format) so we have to\n        # check trailing spaces manually.\n        raise ArgumentError.new \"Manifest invalid for stream #{line_count}: trailing space\" if line.end_with? \" \\n\"\n      end\n      true\n    end\n\n    def self.valid? manifest\n      begin\n        validate! manifest\n        true\n      rescue ArgumentError\n        false\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby/lib/arvados.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire 'rubygems'\nrequire 'active_support/inflector'\nrequire 'json'\nrequire 'fileutils'\nrequire 'andand'\nrequire 'net/http'\n\nrequire 'arvados/google_api_client'\n\nActiveSupport::Inflector.inflections do |inflect|\n  inflect.irregular 'specimen', 'specimens'\n  inflect.irregular 'human', 'humans'\nend\n\nclass Arvados\n  class ArvadosClient < Google::APIClient\n    attr_reader :request_id\n\n    def execute(*args)\n      @request_id = \"req-\" + Random.new.rand(2**128).to_s(36)[0..19]\n      if args.last.is_a? Hash\n        args.last[:headers] ||= {}\n        args.last[:headers]['X-Request-Id'] = @request_id\n      end\n      begin\n        super(*args)\n      rescue => e\n        if !e.message.match(/.*req-[0-9a-zA-Z]{20}.*/)\n          raise $!, \"#{$!} (Request ID: #{@request_id})\", $!.backtrace\n        end\n        raise e\n      end\n    end\n  end\n\n  class TransactionFailedError < StandardError\n  end\n\n  @@debuglevel = 0\n  class << self\n    attr_accessor :debuglevel\n  end\n\n  def initialize(opts={})\n    @application_version ||= 0.0\n    @application_name ||= File.split($0).last\n\n    @arvados_api_version = opts[:api_version] || 'v1'\n\n    @config = nil\n    [[:api_host, 'ARVADOS_API_HOST'],\n     [:api_token, 'ARVADOS_API_TOKEN']].each do |op, en|\n      if opts[op]\n        config[en] = opts[op]\n      end\n      if !config[en]\n        raise \"#{$0}: no :#{op} or ENV[#{en}] provided.\"\n      end\n    end\n\n    if (opts[:suppress_ssl_warnings] or\n        %w(1 true yes).index(config['ARVADOS_API_HOST_INSECURE'].\n                             andand.downcase))\n      suppress_warnings do\n        OpenSSL::SSL.const_set 'VERIFY_PEER', OpenSSL::SSL::VERIFY_NONE\n      end\n    end\n\n    # Define a class and an Arvados instance method for each Arvados\n    # resource. After this, self.job will return Arvados::Job;\n    # self.job.new() and self.job.find() will do what you want.\n    _arvados = self\n    namespace_class = Arvados.const_set \"A#{self.object_id}\", Class.new\n    self.arvados_api.schemas.each do |classname, schema|\n      next if classname.match(/List$/)\n      klass = Class.new(Arvados::Model) do\n        def self.arvados\n          @arvados\n        end\n        def self.api_models_sym\n          @api_models_sym\n        end\n        def self.api_model_sym\n          @api_model_sym\n        end\n      end\n\n      # Define the resource methods (create, get, update, delete, ...)\n      self.\n        arvados_api.\n        send(classname.underscore.split('/').last.pluralize.to_sym).\n        discovered_methods.\n        each do |method|\n        class << klass; self; end.class_eval do\n          define_method method.name do |*params|\n            self.api_exec method, *params\n          end\n        end\n      end\n\n      # Give the new class access to the API\n      klass.instance_eval do\n        @arvados = _arvados\n        # TODO: Pull these from the discovery document instead.\n        @api_models_sym = classname.underscore.split('/').last.pluralize.to_sym\n        @api_model_sym = classname.underscore.split('/').last.to_sym\n      end\n\n      # Create the new class in namespace_class so it doesn't\n      # interfere with classes created by other Arvados objects. The\n      # result looks like Arvados::A26949680::Job.\n      namespace_class.const_set classname, klass\n\n      self.define_singleton_method classname.underscore do\n        klass\n      end\n    end\n  end\n\n  def client\n    @client ||= ArvadosClient.\n      new(:host => config[\"ARVADOS_API_HOST\"],\n          :application_name => @application_name,\n          :application_version => @application_version.to_s)\n  end\n\n  def arvados_api\n    @arvados_api ||= self.client.discovered_api('arvados', @arvados_api_version)\n  end\n\n  def self.debuglog(message, verbosity=1)\n    $stderr.puts \"#{File.split($0).last} #{$$}: #{message}\" if @@debuglevel >= verbosity\n  end\n\n  def debuglog *args\n    self.class.debuglog(*args)\n  end\n\n  def config(config_file_path=\"~/.config/arvados/settings.conf\")\n    return @config if @config\n\n    # Initialize config settings with environment variables.\n    config = {}\n    config['ARVADOS_API_HOST']          = ENV['ARVADOS_API_HOST']\n    config['ARVADOS_API_TOKEN']         = ENV['ARVADOS_API_TOKEN']\n    config['ARVADOS_API_HOST_INSECURE'] = ENV['ARVADOS_API_HOST_INSECURE']\n\n    if config['ARVADOS_API_HOST'] and config['ARVADOS_API_TOKEN']\n      # Environment variables take precedence over the config file, so\n      # there is no point reading the config file. If the environment\n      # specifies a _HOST without asking for _INSECURE, we certainly\n      # shouldn't give the config file a chance to create a\n      # system-wide _INSECURE state for this user.\n      #\n      # Note: If we start using additional configuration settings from\n      # this file in the future, we might have to read the file anyway\n      # instead of returning here.\n      return (@config = config)\n    end\n\n    begin\n      expanded_path = File.expand_path config_file_path\n      if File.exist? expanded_path\n        # Load settings from the config file.\n        lineno = 0\n        File.open(expanded_path).each do |line|\n          lineno = lineno + 1\n          # skip comments and blank lines\n          next if line.match('^\\s*#') or not line.match('\\S')\n          var, val = line.chomp.split('=', 2)\n          var.strip!\n          val.strip!\n          # allow environment settings to override config files.\n          if !var.empty? and val\n            config[var] ||= val\n          else\n            debuglog \"#{expanded_path}: #{lineno}: could not parse `#{line}'\", 0\n          end\n        end\n      end\n    rescue StandardError => e\n      debuglog \"Ignoring error reading #{config_file_path}: #{e}\", 0\n    end\n\n    @config = config\n  end\n\n  def cluster_config\n    return @cluster_config if @cluster_config\n\n    uri = URI(\"https://#{config()[\"ARVADOS_API_HOST\"]}/arvados/v1/config\")\n    cc = JSON.parse(Net::HTTP.get(uri))\n\n    @cluster_config = cc\n  end\n\n  class Model\n    def self.arvados_api\n      arvados.arvados_api\n    end\n    def self.client\n      arvados.client\n    end\n    def self.debuglog(*args)\n      arvados.class.debuglog(*args)\n    end\n    def debuglog(*args)\n      self.class.arvados.class.debuglog(*args)\n    end\n    def self.api_exec(method, parameters={})\n      api_method = arvados_api.send(api_models_sym).send(method.name.to_sym)\n      parameters.each do |k,v|\n        parameters[k] = v.to_json if v.is_a? Array or v.is_a? Hash\n      end\n      # Look for objects expected by request.properties.(key).$ref and\n      # move them from parameters (query string) to request body.\n      body = nil\n      method.discovery_document['request'].\n        andand['properties'].\n        andand.each do |k,v|\n        if v.is_a? Hash and v['$ref']\n          body ||= {}\n          body[k] = parameters.delete k.to_sym\n        end\n      end\n      result = client.\n        execute(:api_method => api_method,\n                :authenticated => false,\n                :parameters => parameters,\n                :body_object => body,\n                :headers => {\n                  :authorization => 'Bearer '+arvados.config['ARVADOS_API_TOKEN']\n                })\n      resp = JSON.parse result.body, :symbolize_names => true\n      if resp[:errors]\n        if !resp[:errors][0].match(/.*req-[0-9a-zA-Z]{20}.*/)\n          resp[:errors][0] += \" (#{result.headers['X-Request-Id'] or client.request_id})\"\n        end\n        raise Arvados::TransactionFailedError.new(resp[:errors])\n      elsif resp[:uuid] and resp[:etag]\n        self.new(resp)\n      elsif resp[:items].is_a? Array\n        resp.merge(:items => resp[:items].collect do |i|\n                     self.new(i)\n                   end)\n      else\n        resp\n      end\n    end\n\n    def []=(x,y)\n      @attributes_to_update[x] = y\n      @attributes[x] = y\n    end\n    def [](x)\n      if @attributes[x].is_a? Hash or @attributes[x].is_a? Array\n        # We won't be notified via []= if these change, so we'll just\n        # assume they are going to get changed, and submit them if\n        # save() is called.\n        @attributes_to_update[x] = @attributes[x]\n      end\n      @attributes[x]\n    end\n    def save\n      @attributes_to_update.keys.each do |k|\n        @attributes_to_update[k] = @attributes[k]\n      end\n      j = self.class.api_exec :update, {\n        :uuid => @attributes[:uuid],\n        self.class.api_model_sym => @attributes_to_update.to_json\n      }\n      unless j.respond_to? :[] and j[:uuid]\n        debuglog \"Failed to save #{self.to_s}: #{j[:errors] rescue nil}\", 0\n        nil\n      else\n        @attributes_to_update = {}\n        @attributes = j\n      end\n    end\n\n    protected\n\n    def initialize(j)\n      @attributes_to_update = {}\n      @attributes = j\n    end\n  end\n\n  protected\n\n  def suppress_warnings\n    original_verbosity = $VERBOSE\n    begin\n      $VERBOSE = nil\n      yield\n    ensure\n      $VERBOSE = original_verbosity\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby/test/sdk_fixtures.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire \"yaml\"\n\nmodule SDKFixtures\n  module StaticMethods\n    # SDKFixtures will use these as class methods, and install them as\n    # instance methods on the test classes.\n    def random_block(size=nil)\n      sprintf(\"%032x+%d\", rand(16 ** 32), size || rand(64 * 1024 * 1024))\n    end\n\n    def random_blocks(count, size=nil)\n      (0...count).map { |_| random_block(size) }\n    end\n  end\n\n  extend StaticMethods\n\n  def self.included(base)\n    base.include(StaticMethods)\n  end\n\n  @@fixtures = {}\n  def fixtures name\n    @@fixtures[name] ||=\n      begin\n        path = File.\n          expand_path(\"../../../../services/api/test/fixtures/#{name}.yml\",\n                      __FILE__)\n        file = IO.read(path)\n        trim_index = file.index('# Test Helper trims the rest of the file')\n        file = file[0, trim_index] if trim_index\n        YAML.safe_load(file, permitted_classes: [Time])\n      end\n  end\n\n  ### Valid manifests\n  SIMPLEST_MANIFEST = \". #{random_block(9)} 0:9:simple.txt\\n\"\n  MULTIBLOCK_FILE_MANIFEST =\n    [\". #{random_block(8)} 0:4:repfile 4:4:uniqfile\",\n     \"./s1 #{random_block(6)} 0:3:repfile 3:3:uniqfile\",\n     \". #{random_block(8)} 0:7:uniqfile2 7:1:repfile\\n\"].join(\"\\n\")\n  MULTILEVEL_MANIFEST =\n    [\". #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\\n\",\n     \"./dir0 #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\\n\",\n     \"./dir0/subdir #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\\n\",\n     \"./dir1 #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\\n\",\n     \"./dir1/subdir #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\\n\",\n     \"./dir2 #{random_block(9)} 0:3:file1 3:3:file2 6:3:file3\\n\"].join(\"\")\n  COLON_FILENAME_MANIFEST = \". #{random_block(9)} 0:9:file:test.txt\\n\"\n  # Filename is `a a.txt`.\n  ESCAPED_FILENAME_MANIFEST = \". #{random_block(9)} 0:9:a\\\\040\\\\141.txt\\n\"\n  MANY_ESCAPES_MANIFEST =\n    \"./dir\\\\040name #{random_block(9)} 0:9:file\\\\\\\\name\\\\011\\\\here.txt\\n\"\n  NONNORMALIZED_MANIFEST =\n    [\"./dir2 #{random_block} 0:0:z 0:0:y 0:0:x\",\n     \"./dir1 #{random_block} 0:0:p 0:0:o 0:0:n\\n\"].join(\"\\n\")\n  MANIFEST_WITH_DIRS_IN_FILENAMES =\n    [\". #{random_block(10)} 0:3:file1 3:3:dir1/file1 6:3:dir1/dir2/file1\\n\"].join(\"\")\n  MULTILEVEL_MANIFEST_WITH_DIRS_IN_FILENAMES =\n    [\". #{random_block(10)} 0:3:file1 3:3:dir1/file1 6:4:dir1/dir2/file1\\n\",\n     \"./dir1 #{random_block(10)} 0:3:file1 3:7:dir2/file1\\n\"].join(\"\")\n\n  ### Non-tree manifests\n  # These manifests follow the spec, but they express a structure that can't\n  # can't be represented by a POSIX filesystem tree.  For example, there's a\n  # name conflict between a stream and a filename.\n  NAME_CONFLICT_MANIFEST =\n    [\". #{random_block(9)} 0:9:conflict\",\n     \"./conflict #{random_block} 0:0:name\\n\"].join(\"\\n\")\nend\n"
  },
  {
    "path": "sdk/ruby/test/test_big_request.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire 'minitest/autorun'\nrequire 'arvados'\nrequire 'digest/md5'\n\nclass TestBigRequest < Minitest::Test\n  def boring_manifest nblocks\n    x = '.'\n    (0..nblocks).each do |z|\n      x += ' d41d8cd98f00b204e9800998ecf8427e+0'\n    end\n    x += \" 0:0:foo.txt\\n\"\n    x\n  end\n\n  def test_create_manifest nblocks=1\n    skip \"Test needs an API server to run against\"\n    manifest_text = boring_manifest nblocks\n    uuid = Digest::MD5.hexdigest(manifest_text) + '+' + manifest_text.size.to_s\n    c = Arvados.new.collection.create(collection: {\n                                        uuid: uuid,\n                                        manifest_text: manifest_text,\n                                      })\n    assert_equal uuid, c[:portable_data_hash]\n  end\n\n  def test_create_big_manifest\n    # This ensures that manifest_text is passed in the request body:\n    # it's too large to fit in the query string.\n    test_create_manifest 9999\n  end\nend\n"
  },
  {
    "path": "sdk/ruby/test/test_collection.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire \"arvados/collection\"\nrequire \"minitest/autorun\"\nrequire \"sdk_fixtures\"\n\nclass CollectionTest < Minitest::Test\n  include SDKFixtures\n\n  TWO_BY_TWO_BLOCKS = SDKFixtures.random_blocks(2, 9)\n  TWO_BY_TWO_MANIFEST_A =\n    [\". #{TWO_BY_TWO_BLOCKS.first} 0:5:f1 5:4:f2\\n\",\n     \"./s1 #{TWO_BY_TWO_BLOCKS.last} 0:5:f1 5:4:f3\\n\"]\n  TWO_BY_TWO_MANIFEST_S = TWO_BY_TWO_MANIFEST_A.join(\"\")\n\n  def abcde_blocks\n    [\"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa+9\", \"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb+9\", \"cccccccccccccccccccccccccccccccc+9\", \"dddddddddddddddddddddddddddddddd+9\", \"eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee+9\"]\n  end\n\n  ### .new\n\n  def test_empty_construction\n    coll = Arv::Collection.new\n    assert_equal(\"\", coll.manifest_text)\n  end\n\n  def test_successful_construction\n    [:SIMPLEST_MANIFEST, :MULTIBLOCK_FILE_MANIFEST, :MULTILEVEL_MANIFEST].\n        each do |manifest_name|\n      manifest_text = SDKFixtures.const_get(manifest_name)\n      coll = Arv::Collection.new(manifest_text)\n      assert_equal(manifest_text, coll.manifest_text,\n                   \"did not get same manifest back out from #{manifest_name}\")\n    end\n  end\n\n  def test_range_edge_cases\n    [\n      \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1\\n\",\n      \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\\n\",\n      \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file1\\n\",\n      \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file1\\n\",\n      \". 0cc175b9c0f1b6a831c399e269772661+1 0:0:file1 1:0:file2 1:0:file1\\n\",\n    ].each do |txt|\n      coll = Arv::Collection.new(txt)\n      coll.normalize\n      assert_match(/ 0:0:file1/, coll.manifest_text)\n    end\n    [\n      \". d41d8cd98f00b204e9800998ecf8427e+0 1:0:file1\\n\",\n      \". 0cc175b9c0f1b6a831c399e269772661+1 0:0:file1 2:0:file2 1:0:file1\\n\",\n    ].each do |txt|\n      assert_raises(RangeError) do\n        coll = Arv::Collection.new(txt)\n        coll.normalize\n      end\n    end\n  end\n\n  def test_non_manifest_construction_error\n    [\"word\", \". abc def\", \". #{random_block} 0:\", \". / !\"].each do |m_text|\n      assert_raises(ArgumentError,\n                    \"built collection from manifest #{m_text.inspect}\") do\n        Arv::Collection.new(m_text)\n      end\n    end\n  end\n\n  def test_file_directory_conflict_construction_error\n    assert_raises(ArgumentError) do\n      Arv::Collection.new(NAME_CONFLICT_MANIFEST)\n    end\n  end\n\n  def test_no_implicit_normalization\n    coll = Arv::Collection.new(NONNORMALIZED_MANIFEST)\n    assert_equal(NONNORMALIZED_MANIFEST, coll.manifest_text)\n  end\n\n  ### .normalize\n\n  def test_non_posix_path_handling\n    m_text = \"./.. #{random_block(9)} 0:5:. 5:4:..\\n\"\n    coll = Arv::Collection.new(m_text.dup)\n    coll.normalize\n    assert_equal(m_text, coll.manifest_text)\n  end\n\n  def test_escaping_through_normalization\n    coll = Arv::Collection.new(MANY_ESCAPES_MANIFEST)\n    coll.normalize\n    # The result should simply duplicate the file spec.\n    # The source file spec has an unescaped backslash in it.\n    # It's OK for the Collection class to properly escape that.\n    expect_text = MANY_ESCAPES_MANIFEST.sub(/ \\d+:\\d+:\\S+/) do |file_spec|\n      file_spec.gsub(/([^\\\\])(\\\\[^\\\\\\d])/, '\\1\\\\\\\\\\2')\n    end\n    assert_equal(expect_text, coll.manifest_text)\n  end\n\n  def test_concatenation_with_locator_overlap(over_index=0)\n    blocks = random_blocks(4, 2)\n    blocks_s = blocks.join(\" \")\n    coll = Arv::Collection.new(\". %s 0:8:file\\n. %s 0:4:file\\n\" %\n                               [blocks_s, blocks[over_index, 2].join(\" \")])\n    coll.normalize\n    assert_equal(\". #{blocks_s} 0:8:file #{over_index * 2}:4:file\\n\",\n                 coll.manifest_text)\n  end\n\n  def test_concatenation_with_middle_locator_overlap\n    test_concatenation_with_locator_overlap(1)\n  end\n\n  def test_concatenation_with_end_locator_overlap\n    test_concatenation_with_locator_overlap(2)\n  end\n\n  def test_concatenation_with_partial_locator_overlap\n    blocks = random_blocks(3, 3)\n    coll = Arv::Collection\n      .new(\". %s 0:6:overlap\\n. %s 0:6:overlap\\n\" %\n           [blocks[0, 2].join(\" \"), blocks[1, 2].join(\" \")])\n    coll.normalize\n    assert_equal(\". #{blocks.join(' ')} 0:6:overlap 3:6:overlap\\n\",\n                 coll.manifest_text)\n  end\n\n  def test_normalize\n    block = random_block\n    coll = Arv::Collection.new(\". #{block} 0:0:f2 0:0:f1\\n\")\n    coll.normalize\n    assert_equal(\". #{block} 0:0:f1 0:0:f2\\n\", coll.manifest_text)\n  end\n\n  def test_normalization_file_spans_two_whole_blocks(file_specs=\"0:10:f1\",\n                                                     num_blocks=2)\n    blocks = random_blocks(num_blocks, 5)\n    m_text = \". #{blocks.join(' ')} #{file_specs}\\n\"\n    coll = Arv::Collection.new(m_text.dup)\n    coll.normalize\n    assert_equal(m_text, coll.manifest_text)\n  end\n\n  def test_normalization_file_fits_beginning_block\n    test_normalization_file_spans_two_whole_blocks(\"0:7:f1\")\n  end\n\n  def test_normalization_file_fits_end_block\n    test_normalization_file_spans_two_whole_blocks(\"3:7:f1\")\n  end\n\n  def test_normalization_file_spans_middle\n    test_normalization_file_spans_two_whole_blocks(\"3:5:f1\")\n  end\n\n  def test_normalization_file_spans_three_whole_blocks\n    test_normalization_file_spans_two_whole_blocks(\"0:15:f1\", 3)\n  end\n\n  def test_normalization_file_skips_bytes\n    test_normalization_file_spans_two_whole_blocks(\"0:3:f1 5:5:f1\")\n  end\n\n  def test_normalization_file_inserts_bytes\n    test_normalization_file_spans_two_whole_blocks(\"0:3:f1 5:3:f1 3:2:f1\")\n  end\n\n  def test_normalization_file_duplicates_bytes\n    test_normalization_file_spans_two_whole_blocks(\"2:3:f1 2:3:f1\", 1)\n  end\n\n  def test_normalization_handles_duplicate_locator\n    blocks = random_blocks(2, 5)\n    coll = Arv::Collection.new(\". %s %s 1:8:f1 11:8:f1\\n\" %\n                               [blocks.join(\" \"), blocks.reverse.join(\" \")])\n    coll.normalize\n    assert_equal(\". #{blocks.join(' ')} #{blocks[0]} 1:8:f1 6:8:f1\\n\",\n                 coll.manifest_text)\n  end\n\n  ### .cp_r\n\n  def test_simple_file_copy\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    coll.cp_r(\"./simple.txt\", \"./new\")\n    assert_equal(SIMPLEST_MANIFEST.sub(\" 0:9:\", \" 0:9:new 0:9:\"),\n                 coll.manifest_text)\n  end\n\n  def test_copy_file_into_other_stream(target=\"./s1/f2\", basename=\"f2\")\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    coll.cp_r(\"./f2\", target)\n    expected = \"%s./s1 %s 0:5:f1 14:4:%s 5:4:f3\\n\" %\n      [TWO_BY_TWO_MANIFEST_A.first,\n       TWO_BY_TWO_BLOCKS.reverse.join(\" \"), basename]\n    assert_equal(expected, coll.manifest_text)\n  end\n\n  def test_implicit_copy_file_into_other_stream\n    test_copy_file_into_other_stream(\"./s1\")\n  end\n\n  def test_copy_file_into_other_stream_with_new_name\n    test_copy_file_into_other_stream(\"./s1/f2a\", \"f2a\")\n  end\n\n  def test_copy_file_over_in_other_stream(target=\"./s1/f1\")\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    coll.cp_r(\"./f1\", target)\n    expected = \"%s./s1 %s 0:5:f1 14:4:f3\\n\" %\n      [TWO_BY_TWO_MANIFEST_A.first, TWO_BY_TWO_BLOCKS.join(\" \")]\n    assert_equal(expected, coll.manifest_text)\n  end\n\n  def test_implicit_copy_file_over_in_other_stream\n    test_copy_file_over_in_other_stream(\"./s1\")\n  end\n\n  def test_simple_stream_copy\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    coll.cp_r(\"./s1\", \"./sNew\")\n    new_line = TWO_BY_TWO_MANIFEST_A.last.sub(\"./s1 \", \"./sNew \")\n    assert_equal(TWO_BY_TWO_MANIFEST_S + new_line, coll.manifest_text)\n  end\n\n  def test_copy_stream_into_other_stream(target=\"./dir2/subdir\",\n                                         basename=\"subdir\")\n    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)\n    coll.cp_r(\"./dir1/subdir\", target)\n    new_line = MULTILEVEL_MANIFEST.lines[4].sub(\"./dir1/subdir \",\n                                                \"./dir2/#{basename} \")\n    assert_equal(MULTILEVEL_MANIFEST + new_line, coll.manifest_text)\n  end\n\n  def test_implicit_copy_stream_into_other_stream\n    test_copy_stream_into_other_stream(\"./dir2\")\n  end\n\n  def test_copy_stream_into_other_stream_with_new_name\n    test_copy_stream_into_other_stream(\"./dir2/newsub\", \"newsub\")\n  end\n\n  def test_copy_stream_over_empty_stream\n    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)\n    (1..3).each do |file_num|\n      coll.rm(\"./dir0/subdir/file#{file_num}\")\n    end\n    coll.cp_r(\"./dir1/subdir\", \"./dir0\")\n    expected = MULTILEVEL_MANIFEST.lines\n    expected[2] = expected[4].sub(\"./dir1/\", \"./dir0/\")\n    assert_equal(expected.join(\"\"), coll.manifest_text)\n  end\n\n  def test_copy_stream_over_file_raises_ENOTDIR(source=\"./s1\", target=\"./f2\")\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    assert_raises(Errno::ENOTDIR) do\n      coll.cp_r(source, target)\n    end\n  end\n\n  def test_copy_file_under_file_raises_ENOTDIR\n    test_copy_stream_over_file_raises_ENOTDIR(\"./f1\", \"./f2/newfile\")\n  end\n\n  def test_copy_stream_over_nonempty_stream_merges_and_overwrites\n    blocks = random_blocks(3, 9)\n    manifest_a =\n      [\"./subdir #{blocks[0]} 0:1:s1 1:2:zero\\n\",\n       \"./zdir #{blocks[1]} 0:9:zfile\\n\",\n       \"./zdir/subdir #{blocks[2]} 0:1:s2 1:2:zero\\n\"]\n    coll = Arv::Collection.new(manifest_a.join(\"\"))\n    coll.cp_r(\"./subdir\", \"./zdir\")\n    manifest_a[2] = \"./zdir/subdir %s %s 0:1:s1 9:1:s2 1:2:zero\\n\" %\n      [blocks[0], blocks[2]]\n    assert_equal(manifest_a.join(\"\"), coll.manifest_text)\n  end\n\n  def test_copy_stream_into_substream(source=\"./dir1\",\n                                      target=\"./dir1/subdir/dir1\")\n    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)\n    coll.cp_r(source, target)\n    expected = MULTILEVEL_MANIFEST.lines.flat_map do |line|\n      [line, line.gsub(/^#{Regexp.escape(source)}([\\/ ])/, \"#{target}\\\\1\")].uniq\n    end\n    assert_equal(expected.sort.join(\"\"), coll.manifest_text)\n  end\n\n  def test_copy_root\n    test_copy_stream_into_substream(\".\", \"./root\")\n  end\n\n  def test_adding_to_root_after_copy\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    coll.cp_r(\".\", \"./root\")\n    src_coll = Arv::Collection.new(COLON_FILENAME_MANIFEST)\n    coll.cp_r(\"./file:test.txt\", \".\", src_coll)\n    got_lines = coll.manifest_text.lines\n    assert_equal(2, got_lines.size)\n    assert_match(/^\\. \\S{33,} \\S{33,} 0:9:file:test\\.txt 9:9:simple\\.txt\\n/,\n                 got_lines.first)\n    assert_equal(SIMPLEST_MANIFEST.sub(\". \", \"./root \"), got_lines.last)\n  end\n\n  def test_copy_chaining\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    coll.cp_r(\"./simple.txt\", \"./a\").cp_r(\"./a\", \"./b\")\n    assert_equal(SIMPLEST_MANIFEST.sub(\" 0:9:\", \" 0:9:a 0:9:b 0:9:\"),\n                 coll.manifest_text)\n  end\n\n  def prep_two_collections_for_copy(src_stream, dst_stream)\n    blocks = random_blocks(2, 8)\n    src_text = \"#{src_stream} #{blocks.first} 0:8:f1\\n\"\n    dst_text = \"#{dst_stream} #{blocks.last} 0:8:f2\\n\"\n    return [blocks, src_text, dst_text,\n            Arv::Collection.new(src_text.dup),\n            Arv::Collection.new(dst_text.dup)]\n  end\n\n  def test_copy_file_from_other_collection(src_stream=\".\", dst_stream=\"./s1\")\n    blocks, src_text, dst_text, src_coll, dst_coll =\n      prep_two_collections_for_copy(src_stream, dst_stream)\n    dst_coll.cp_r(\"#{src_stream}/f1\", dst_stream, src_coll)\n    assert_equal(\"#{dst_stream} #{blocks.join(' ')} 0:8:f1 8:8:f2\\n\",\n                 dst_coll.manifest_text)\n    assert_equal(src_text, src_coll.manifest_text)\n  end\n\n  def test_copy_file_from_other_collection_to_root\n    test_copy_file_from_other_collection(\"./s1\", \".\")\n  end\n\n  def test_copy_stream_from_other_collection\n    blocks, src_text, dst_text, src_coll, dst_coll =\n      prep_two_collections_for_copy(\"./s2\", \"./s1\")\n    dst_coll.cp_r(\"./s2\", \"./s1\", src_coll)\n    assert_equal(dst_text + src_text.sub(\"./s2 \", \"./s1/s2 \"),\n                 dst_coll.manifest_text)\n    assert_equal(src_text, src_coll.manifest_text)\n  end\n\n  def test_copy_stream_from_other_collection_to_root\n    blocks, src_text, dst_text, src_coll, dst_coll =\n      prep_two_collections_for_copy(\"./s1\", \".\")\n    dst_coll.cp_r(\"./s1\", \".\", src_coll)\n    assert_equal(dst_text + src_text, dst_coll.manifest_text)\n    assert_equal(src_text, src_coll.manifest_text)\n  end\n\n  def test_copy_stream_contents\n    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)\n    coll.cp_r(\"./dir0/subdir/\", \"./dir1/subdir\")\n    expect_lines = MULTILEVEL_MANIFEST.lines\n    expect_lines[4] = expect_lines[2].sub(\"./dir0/\", \"./dir1/\")\n    assert_equal(expect_lines.join(\"\"), coll.manifest_text)\n  end\n\n  def test_copy_file_into_new_stream_with_implicit_filename\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    coll.cp_r(\"./simple.txt\", \"./new/\")\n    assert_equal(SIMPLEST_MANIFEST + SIMPLEST_MANIFEST.sub(\". \", \"./new \"),\n                 coll.manifest_text)\n  end\n\n  def test_copy_file_into_new_stream_with_explicit_filename\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    coll.cp_r(\"./simple.txt\", \"./new/newfile.txt\")\n    new_line = SIMPLEST_MANIFEST.sub(\". \", \"./new \").sub(\":simple\", \":newfile\")\n    assert_equal(SIMPLEST_MANIFEST + new_line, coll.manifest_text)\n  end\n\n  def test_copy_stream_contents_into_root\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    coll.cp_r(\"./s1/\", \".\")\n    assert_equal(\". %s 0:5:f1 14:4:f2 5:4:f3\\n%s\" %\n                 [TWO_BY_TWO_BLOCKS.reverse.join(\" \"),\n                  TWO_BY_TWO_MANIFEST_A.last],\n                 coll.manifest_text)\n  end\n\n  def test_copy_root_contents_into_stream\n    # This is especially fun, because we're copying a parent into its child.\n    # Make sure that happens depth-first.\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    coll.cp_r(\"./\", \"./s1\")\n    assert_equal(\"%s./s1 %s 0:5:f1 5:4:f2 14:4:f3\\n%s\" %\n                 [TWO_BY_TWO_MANIFEST_A.first, TWO_BY_TWO_BLOCKS.join(\" \"),\n                  TWO_BY_TWO_MANIFEST_A.last.sub(\"./s1 \", \"./s1/s1 \")],\n                 coll.manifest_text)\n  end\n\n  def test_copy_stream_contents_across_collections\n    block = random_block(8)\n    src_coll = Arv::Collection.new(\"./s1 #{block} 0:8:f1\\n\")\n    dst_coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    dst_coll.cp_r(\"./s1/\", \"./s1\", src_coll)\n    assert_equal(\"%s./s1 %s %s 0:8:f1 13:4:f3\\n\" %\n                 [TWO_BY_TWO_MANIFEST_A.first, block, TWO_BY_TWO_BLOCKS.last],\n                 dst_coll.manifest_text)\n  end\n\n  def test_copy_root_contents_across_collections\n    block = random_block(8)\n    src_coll = Arv::Collection.new(\". #{block} 0:8:f1\\n\")\n    dst_coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    dst_coll.cp_r(\"./\", \".\", src_coll)\n    assert_equal(\". %s %s 0:8:f1 13:4:f2\\n%s\" %\n                 [block, TWO_BY_TWO_BLOCKS.first, TWO_BY_TWO_MANIFEST_A.last],\n                 dst_coll.manifest_text)\n  end\n\n  def test_copy_root_into_empty_collection\n    block = random_block(8)\n    src_coll = Arv::Collection.new(\". #{block} 0:8:f1\\n\")\n    dst_coll = Arv::Collection.new()\n    dst_coll.cp_r(\"./\", \".\", src_coll)\n    assert_equal(\". %s 0:8:f1\\n\" %\n                 [block],\n                 dst_coll.manifest_text)\n  end\n\n  def test_copy_with_repeated_blocks\n    blocks = abcde_blocks\n    src_coll = Arv::Collection.new(\". #{blocks[0]} #{blocks[1]} #{blocks[2]} #{blocks[0]} #{blocks[1]} #{blocks[2]} #{blocks[3]} #{blocks[4]} 27:27:f1\\n\")\n    dst_coll = Arv::Collection.new()\n    dst_coll.cp_r(\"f1\", \"./\", src_coll)\n    assert_equal(\". #{blocks[0]} #{blocks[1]} #{blocks[2]} 0:27:f1\\n\", dst_coll.manifest_text, \"mangled by cp_r\")\n  end\n\n  def test_copy_with_repeated_split_blocks\n    blocks = abcde_blocks\n    src_coll = Arv::Collection.new(\". #{blocks[0]} #{blocks[1]} #{blocks[2]} #{blocks[0]} #{blocks[1]} #{blocks[2]} #{blocks[3]} #{blocks[4]} 20:27:f1\\n\")\n    dst_coll = Arv::Collection.new()\n    src_coll.normalize\n    assert_equal(\". #{blocks[2]} #{blocks[0]} #{blocks[1]} #{blocks[2]} 2:27:f1\\n\", src_coll.manifest_text, \"mangled by normalize()\")\n    dst_coll.cp_r(\"f1\", \"./\", src_coll)\n    assert_equal(\". #{blocks[2]} #{blocks[0]} #{blocks[1]} #{blocks[2]} 2:27:f1\\n\", dst_coll.manifest_text, \"mangled by cp_r\")\n  end\n\n  def test_copy_empty_source_path_raises_ArgumentError(src=\"\", dst=\"./s1\")\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    assert_raises(ArgumentError) do\n      coll.cp_r(src, dst)\n    end\n  end\n\n  def test_copy_empty_destination_path_raises_ArgumentError\n    test_copy_empty_source_path_raises_ArgumentError(\".\", \"\")\n  end\n\n  ### .each_file_path\n\n  def test_each_file_path\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    if block_given?\n      result = yield(coll)\n    else\n      result = []\n      coll.each_file_path { |path| result << path }\n    end\n    assert_equal([\"./f1\", \"./f2\", \"./s1/f1\", \"./s1/f3\"], result.sort)\n  end\n\n  def test_each_file_path_without_block\n    test_each_file_path { |coll| coll.each_file_path.to_a }\n  end\n\n  def test_each_file_path_empty_collection\n    assert_empty(Arv::Collection.new.each_file_path.to_a)\n  end\n\n  def test_each_file_path_after_collection_emptied\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    coll.rm(\"simple.txt\")\n    assert_empty(coll.each_file_path.to_a)\n  end\n\n  def test_each_file_path_deduplicates_manifest_listings\n    coll = Arv::Collection.new(MULTIBLOCK_FILE_MANIFEST)\n    assert_equal([\"./repfile\", \"./s1/repfile\", \"./s1/uniqfile\",\n                  \"./uniqfile\", \"./uniqfile2\"],\n                 coll.each_file_path.to_a.sort)\n  end\n\n  ### .exist?\n\n  def test_exist(test_method=:assert, path=\"f2\")\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    send(test_method, coll.exist?(path))\n  end\n\n  def test_file_not_exist\n    test_exist(:refute, \"f3\")\n  end\n\n  def test_stream_exist\n    test_exist(:assert, \"s1\")\n  end\n\n  def test_file_inside_stream_exist\n    test_exist(:assert, \"s1/f1\")\n  end\n\n  def test_path_inside_stream_not_exist\n    test_exist(:refute, \"s1/f2\")\n  end\n\n  def test_path_under_file_not_exist\n    test_exist(:refute, \"f2/nonexistent\")\n  end\n\n  def test_deep_substreams_not_exist\n    test_exist(:refute, \"a/b/c/d/e/f/g\")\n  end\n\n  ### .rename\n\n  def test_simple_file_rename\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    coll.rename(\"./simple.txt\", \"./new\")\n    assert_equal(SIMPLEST_MANIFEST.sub(\":simple.txt\", \":new\"),\n                 coll.manifest_text)\n  end\n\n  def test_rename_file_into_other_stream(target=\"./s1/f2\", basename=\"f2\")\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    coll.rename(\"./f2\", target)\n    expected = \". %s 0:5:f1\\n./s1 %s 0:5:f1 14:4:%s 5:4:f3\\n\" %\n      [TWO_BY_TWO_BLOCKS.first,\n       TWO_BY_TWO_BLOCKS.reverse.join(\" \"), basename]\n    assert_equal(expected, coll.manifest_text)\n  end\n\n  def test_implicit_rename_file_into_other_stream\n    test_rename_file_into_other_stream(\"./s1\")\n  end\n\n  def test_rename_file_into_other_stream_with_new_name\n    test_rename_file_into_other_stream(\"./s1/f2a\", \"f2a\")\n  end\n\n  def test_rename_file_over_in_other_stream(target=\"./s1/f1\")\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    coll.rename(\"./f1\", target)\n    expected = \". %s 5:4:f2\\n./s1 %s 0:5:f1 14:4:f3\\n\" %\n      [TWO_BY_TWO_BLOCKS.first, TWO_BY_TWO_BLOCKS.join(\" \")]\n    assert_equal(expected, coll.manifest_text)\n  end\n\n  def test_implicit_rename_file_over_in_other_stream\n    test_rename_file_over_in_other_stream(\"./s1\")\n  end\n\n  def test_simple_stream_rename\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    coll.rename(\"./s1\", \"./newS\")\n    assert_equal(TWO_BY_TWO_MANIFEST_S.sub(\"\\n./s1 \", \"\\n./newS \"),\n                 coll.manifest_text)\n  end\n\n  def test_rename_stream_into_other_stream(target=\"./dir2/subdir\",\n                                           basename=\"subdir\")\n    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)\n    coll.rename(\"./dir1/subdir\", target)\n    expected = MULTILEVEL_MANIFEST.lines\n    replaced_line = expected.delete_at(4)\n    expected << replaced_line.sub(\"./dir1/subdir \", \"./dir2/#{basename} \")\n    assert_equal(expected.join(\"\"), coll.manifest_text)\n  end\n\n  def test_implicit_rename_stream_into_other_stream\n    test_rename_stream_into_other_stream(\"./dir2\")\n  end\n\n  def test_rename_stream_into_other_stream_with_new_name\n    test_rename_stream_into_other_stream(\"./dir2/newsub\", \"newsub\")\n  end\n\n  def test_rename_stream_over_empty_stream\n    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)\n    (1..3).each do |file_num|\n      coll.rm(\"./dir0/subdir/file#{file_num}\")\n    end\n    coll.rename(\"./dir1/subdir\", \"./dir0\")\n    expected = MULTILEVEL_MANIFEST.lines\n    expected[2] = expected.delete_at(4).sub(\"./dir1/\", \"./dir0/\")\n    assert_equal(expected.sort.join(\"\"), coll.manifest_text)\n  end\n\n  def test_rename_stream_over_file_raises_ENOTDIR\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    assert_raises(Errno::ENOTDIR) do\n      coll.rename(\"./s1\", \"./f2\")\n    end\n  end\n\n  def test_rename_stream_over_nonempty_stream_raises_ENOTEMPTY\n    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)\n    assert_raises(Errno::ENOTEMPTY) do\n      coll.rename(\"./dir1/subdir\", \"./dir0\")\n    end\n  end\n\n  def test_rename_stream_into_substream(source=\"./dir1\",\n                                        target=\"./dir1/subdir/dir1\")\n    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)\n    coll.rename(source, target)\n    assert_equal(MULTILEVEL_MANIFEST.gsub(/^#{Regexp.escape(source)}([\\/ ])/m,\n                                          \"#{target}\\\\1\"),\n                 coll.manifest_text)\n  end\n\n  def test_rename_root\n    test_rename_stream_into_substream(\".\", \"./root\")\n  end\n\n  def test_adding_to_root_after_rename\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    coll.rename(\".\", \"./root\")\n    src_coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    coll.cp_r(\"./simple.txt\", \".\", src_coll)\n    assert_equal(SIMPLEST_MANIFEST + SIMPLEST_MANIFEST.sub(\". \", \"./root \"),\n                 coll.manifest_text)\n  end\n\n  def test_rename_chaining\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    coll.rename(\"./simple.txt\", \"./x\").rename(\"./x\", \"./simple.txt\")\n    assert_equal(SIMPLEST_MANIFEST, coll.manifest_text)\n  end\n\n  ### .rm\n\n  def test_simple_remove\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S.dup)\n    coll.rm(\"./f2\")\n    assert_equal(TWO_BY_TWO_MANIFEST_S.sub(\" 5:4:f2\", \"\"), coll.manifest_text)\n  end\n\n  def empty_stream_and_assert(expect_index=0)\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    yield coll\n    assert_equal(TWO_BY_TWO_MANIFEST_A[expect_index], coll.manifest_text)\n  end\n\n  def test_remove_all_files_in_substream\n    empty_stream_and_assert do |coll|\n      coll.rm(\"./s1/f1\")\n      coll.rm(\"./s1/f3\")\n    end\n  end\n\n  def test_remove_all_files_in_root_stream\n    empty_stream_and_assert(1) do |coll|\n      coll.rm(\"./f1\")\n      coll.rm(\"./f2\")\n    end\n  end\n\n  def test_chaining_removes\n    empty_stream_and_assert do |coll|\n      coll.rm(\"./s1/f1\").rm(\"./s1/f3\")\n    end\n  end\n\n  def test_remove_last_file\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    coll.rm(\"./simple.txt\")\n    assert_equal(\"\", coll.manifest_text)\n  end\n\n  def test_remove_nonexistent_file_raises_ENOENT(path=\"./NoSuchFile\",\n                                                 method=:rm)\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    assert_raises(Errno::ENOENT) do\n      coll.send(method, path)\n    end\n  end\n\n  def test_remove_from_nonexistent_stream_raises_ENOENT\n    test_remove_nonexistent_file_raises_ENOENT(\"./NoSuchStream/simple.txt\")\n  end\n\n  def test_remove_stream_raises_EISDIR(path=\"./s1\")\n    coll = Arv::Collection.new(TWO_BY_TWO_MANIFEST_S)\n    assert_raises(Errno::EISDIR) do\n      coll.rm(path)\n    end\n  end\n\n  def test_remove_root_raises_EISDIR\n    test_remove_stream_raises_EISDIR(\".\")\n  end\n\n  def test_remove_empty_string_raises_ArgumentError\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    assert_raises(ArgumentError) do\n      coll.rm(\"\")\n    end\n  end\n\n  ### rm_r\n\n  def test_recursive_remove\n    empty_stream_and_assert do |coll|\n      coll.rm_r(\"./s1\")\n    end\n  end\n\n  def test_recursive_remove_on_files\n    empty_stream_and_assert do |coll|\n      coll.rm_r(\"./s1/f1\")\n      coll.rm_r(\"./s1/f3\")\n    end\n  end\n\n  def test_recursive_remove_root\n    coll = Arv::Collection.new(MULTILEVEL_MANIFEST)\n    coll.rm_r(\".\")\n    assert_equal(\"\", coll.manifest_text)\n  end\n\n  def test_rm_r_nonexistent_file_raises_ENOENT(path=\"./NoSuchFile\")\n    test_remove_nonexistent_file_raises_ENOENT(\"./NoSuchFile\", :rm_r)\n  end\n\n  def test_rm_r_from_nonexistent_stream_raises_ENOENT\n    test_remove_nonexistent_file_raises_ENOENT(\"./NoSuchStream/file\", :rm_r)\n  end\n\n  def test_rm_r_empty_string_raises_ArgumentError\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    assert_raises(ArgumentError) do\n      coll.rm_r(\"\")\n    end\n  end\n\n  ### .modified?\n\n  def test_new_collection_unmodified(*args)\n    coll = Arv::Collection.new(*args)\n    yield coll if block_given?\n    refute(coll.modified?)\n  end\n\n  def test_collection_unmodified_after_instantiation\n    test_new_collection_unmodified(SIMPLEST_MANIFEST)\n  end\n\n  def test_collection_unmodified_after_mark\n    test_new_collection_unmodified(SIMPLEST_MANIFEST) do |coll|\n      coll.cp_r(\"./simple.txt\", \"./copy\")\n      coll.unmodified\n    end\n  end\n\n  def check_collection_modified\n    coll = Arv::Collection.new(SIMPLEST_MANIFEST)\n    yield coll\n    assert(coll.modified?)\n  end\n\n  def test_collection_modified_after_copy\n    check_collection_modified do |coll|\n      coll.cp_r(\"./simple.txt\", \"./copy\")\n    end\n  end\n\n  def test_collection_modified_after_remove\n    check_collection_modified do |coll|\n      coll.rm(\"./simple.txt\")\n    end\n  end\n\n  def test_collection_modified_after_rename\n    check_collection_modified do |coll|\n      coll.rename(\"./simple.txt\", \"./newname\")\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby/test/test_keep_manifest.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire \"arvados/keep\"\nrequire \"minitest/autorun\"\nrequire \"sdk_fixtures\"\n\nclass ManifestTest < Minitest::Test\n  include SDKFixtures\n\n  def check_stream(stream, exp_name, exp_blocks, exp_files)\n    assert_equal(exp_name, stream.first)\n    assert_equal(exp_blocks, stream[1].map(&:to_s))\n    assert_equal(exp_files, stream.last)\n  end\n\n  def test_simple_each_line_array\n    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)\n    stream_name, block_s, file = SIMPLEST_MANIFEST.strip.split\n    stream_a = manifest.each_line.to_a\n    assert_equal(1, stream_a.size, \"wrong number of streams\")\n    check_stream(stream_a.first, stream_name, [block_s], [file])\n  end\n\n  def test_simple_each_line_block\n    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)\n    result = []\n    manifest.each_line do |stream, blocks, files|\n      result << files\n    end\n    assert_equal([[SIMPLEST_MANIFEST.split.last]], result,\n                 \"wrong result from each_line block\")\n  end\n\n  def test_multilevel_each_line\n    manifest = Keep::Manifest.new(MULTILEVEL_MANIFEST)\n    seen = []\n    manifest.each_line do |stream, blocks, files|\n      refute(seen.include?(stream),\n             \"each_line already yielded stream #{stream}\")\n      seen << stream\n      assert_equal(3, files.size, \"wrong file count for stream #{stream}\")\n    end\n    assert_equal(MULTILEVEL_MANIFEST.count(\"\\n\"), seen.size,\n                 \"wrong number of streams\")\n  end\n\n  def test_empty_each_line\n    assert_empty(Keep::Manifest.new(\"\").each_line.to_a)\n  end\n\n  def test_empty_each_file_spec\n    assert_empty(Keep::Manifest.new(\"\").each_file_spec.to_a)\n  end\n\n  def test_empty_files\n    assert_empty(Keep::Manifest.new(\"\").files)\n  end\n\n  def test_empty_files_count\n    assert_equal(0, Keep::Manifest.new(\"\").files_count)\n  end\n\n  def test_empty_dir_files_count\n    assert_equal(0,\n      Keep::Manifest.new(\"./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\056\\n\").files_count)\n  end\n\n  def test_empty_files_size\n    assert_equal(0, Keep::Manifest.new(\"\").files_size)\n  end\n\n  def test_empty_has_file?\n    refute(Keep::Manifest.new(\"\").has_file?(\"\"))\n  end\n\n  def test_empty_line_within_manifest\n    block_s = random_block\n    manifest = Keep::Manifest.\n      new([\". #{block_s} 0:1:file1 1:2:file2\\n\",\n           \"\\n\",\n           \". #{block_s} 3:3:file3 6:4:file4\\n\"].join(\"\"))\n    streams = manifest.each_line.to_a\n    assert_equal(2, streams.size)\n    check_stream(streams[0], \".\", [block_s], [\"0:1:file1\", \"1:2:file2\"])\n    check_stream(streams[1], \".\", [block_s], [\"3:3:file3\", \"6:4:file4\"])\n  end\n\n  def test_backslash_escape_parsing\n    manifest = Keep::Manifest.new(MANY_ESCAPES_MANIFEST)\n    streams = manifest.each_line.to_a\n    assert_equal(1, streams.size, \"wrong number of streams with whitespace\")\n    assert_equal(\"./dir name\", streams.first.first,\n                 \"wrong stream name with whitespace\")\n    assert_equal([\"0:9:file\\\\name\\t\\\\here.txt\"], streams.first.last,\n                 \"wrong filename(s) with whitespace\")\n  end\n\n  def test_simple_files\n    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)\n    assert_equal([[\".\", \"simple.txt\", 9]], manifest.files)\n  end\n\n  def test_multilevel_files\n    manifest = Keep::Manifest.new(MULTILEVEL_MANIFEST)\n    seen = Hash.new { |this, key| this[key] = [] }\n    manifest.files.each do |stream, basename, size|\n      refute(seen[stream].include?(basename),\n             \"each_file repeated #{stream}/#{basename}\")\n      seen[stream] << basename\n      assert_equal(3, size, \"wrong size for #{stream}/#{basename}\")\n    end\n    seen.each_pair do |stream, basenames|\n      assert_equal(%w(file1 file2 file3), basenames.sort,\n                   \"wrong file list for #{stream}\")\n    end\n  end\n\n  def test_files_with_colons_in_names\n    manifest = Keep::Manifest.new(COLON_FILENAME_MANIFEST)\n    assert_equal([[\".\", \"file:test.txt\", 9]], manifest.files)\n  end\n\n  def test_files_with_escape_sequence_in_filename\n    manifest = Keep::Manifest.new(ESCAPED_FILENAME_MANIFEST)\n    assert_equal([[\".\", \"a a.txt\", 9]], manifest.files)\n  end\n\n  def test_files_spanning_multiple_blocks\n    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)\n    assert_equal([[\".\", \"repfile\", 5],\n                  [\".\", \"uniqfile\", 4],\n                  [\".\", \"uniqfile2\", 7],\n                  [\"./s1\", \"repfile\", 3],\n                  [\"./s1\", \"uniqfile\", 3]],\n                 manifest.files.sort)\n  end\n\n  def test_minimum_file_count_simple\n    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)\n    assert(manifest.minimum_file_count?(1), \"real minimum file count false\")\n    refute(manifest.minimum_file_count?(2), \"fake minimum file count true\")\n  end\n\n  def test_minimum_file_count_multiblock\n    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)\n    assert(manifest.minimum_file_count?(2), \"low minimum file count false\")\n    assert(manifest.minimum_file_count?(5), \"real minimum file count false\")\n    refute(manifest.minimum_file_count?(6), \"fake minimum file count true\")\n  end\n\n  def test_exact_file_count_simple\n    manifest = Keep::Manifest.new(SIMPLEST_MANIFEST)\n    assert(manifest.exact_file_count?(1), \"exact file count false\")\n    refute(manifest.exact_file_count?(0), \"-1 file count true\")\n    refute(manifest.exact_file_count?(2), \"+1 file count true\")\n  end\n\n  def test_exact_file_count_multiblock\n    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)\n    assert(manifest.exact_file_count?(5), \"exact file count false\")\n    refute(manifest.exact_file_count?(4), \"-1 file count true\")\n    refute(manifest.exact_file_count?(6), \"+1 file count true\")\n  end\n\n  def test_files_size_multiblock\n    assert_equal(22, Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST).files_size)\n  end\n\n  def test_files_size_with_skipped_overlapping_data\n    manifest = Keep::Manifest.new(\". #{random_block(9)} 3:3:f1 5:3:f2\\n\")\n    assert_equal(6, manifest.files_size)\n  end\n\n  def test_has_file\n    manifest = Keep::Manifest.new(MULTIBLOCK_FILE_MANIFEST)\n    assert(manifest.has_file?(\"./repfile\"), \"one-arg repfile not found\")\n    assert(manifest.has_file?(\".\", \"repfile\"), \"two-arg repfile not found\")\n    assert(manifest.has_file?(\"./s1/repfile\"), \"one-arg s1/repfile not found\")\n    assert(manifest.has_file?(\"./s1\", \"repfile\"), \"two-arg s1/repfile not found\")\n    refute(manifest.has_file?(\"./s1/uniqfile2\"), \"one-arg missing file found\")\n    refute(manifest.has_file?(\"./s1\", \"uniqfile2\"), \"two-arg missing file found\")\n    refute(manifest.has_file?(\"./s2/repfile\"), \"one-arg missing stream found\")\n    refute(manifest.has_file?(\"./s2\", \"repfile\"), \"two-arg missing stream found\")\n  end\n\n  def test_has_file_with_spaces\n    manifest = Keep::Manifest.new(ESCAPED_FILENAME_MANIFEST)\n    assert(manifest.has_file?(\"./a a.txt\"), \"one-arg path not found\")\n    assert(manifest.has_file?(\".\", \"a a.txt\"), \"two-arg path not found\")\n    refute(manifest.has_file?(\"a\\\\040\\\\141\"), \"one-arg unescaped found\")\n    refute(manifest.has_file?(\".\", \"a\\\\040\\\\141\"), \"two-arg unescaped found\")\n  end\n\n  def test_parse_all_fixtures\n    fixtures('collections').each do |name, collection|\n      parse_collection_manifest name, collection\n    end\n  end\n\n  def test_raise_on_bogus_fixture\n    assert_raises ArgumentError do\n      parse_collection_manifest('bogus collection',\n                                {'manifest_text' => \". zzz 0:\\n\"})\n    end\n  end\n\n  def parse_collection_manifest name, collection\n    manifest = Keep::Manifest.new(collection['manifest_text'])\n    manifest.each_file_spec do |stream_name, start_pos, file_size, file_name|\n      assert_kind_of String, stream_name\n      assert_kind_of Integer, start_pos\n      assert_kind_of Integer, file_size\n      assert_kind_of String, file_name\n      assert !stream_name.empty?, \"empty stream_name in #{name} fixture\"\n      assert !file_name.empty?, \"empty file_name in #{name} fixture\"\n    end\n  end\n\n  def test_collection_with_dirs_in_filenames\n    manifest = Keep::Manifest.new(MANIFEST_WITH_DIRS_IN_FILENAMES)\n\n    seen = Hash.new { |this, key| this[key] = [] }\n\n    manifest.files.each do |stream, basename, size|\n      refute(seen[stream].include?(basename), \"each_file repeated #{stream}/#{basename}\")\n      assert_equal(3, size, \"wrong size for #{stream}/#{basename}\")\n      seen[stream] << basename\n    end\n\n    assert_equal(%w(. ./dir1 ./dir1/dir2), seen.keys)\n\n    seen.each_pair do |stream, basenames|\n      assert_equal(%w(file1), basenames.sort, \"wrong file list for #{stream}\")\n    end\n  end\n\n  def test_multilevel_collection_with_dirs_in_filenames\n    manifest = Keep::Manifest.new(MULTILEVEL_MANIFEST_WITH_DIRS_IN_FILENAMES)\n\n    seen = Hash.new { |this, key| this[key] = [] }\n    expected_sizes = {'.' => 3, './dir1' => 6, './dir1/dir2' => 11}\n\n    manifest.files.each do |stream, basename, size|\n      refute(seen[stream].include?(basename), \"each_file repeated #{stream}/#{basename}\")\n      assert_equal(expected_sizes[stream], size, \"wrong size for #{stream}/#{basename}\")\n      seen[stream] << basename\n    end\n\n    assert_equal(%w(. ./dir1 ./dir1/dir2), seen.keys)\n\n    seen.each_pair do |stream, basenames|\n      assert_equal(%w(file1), basenames.sort, \"wrong file list for #{stream}\")\n    end\n  end\n\n  [[false, nil],\n   [false, '+0'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427+0'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427e0'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427e0+0'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427e+0 '],\n   [false, \"d41d8cd98f00b204e9800998ecf8427e+0\\n\"],\n   [false, ' d41d8cd98f00b204e9800998ecf8427e+0'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427e+K+0'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427e+0+0'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427e++'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427e+0+K+'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427e+0++K'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427e+0+K++'],\n   [false, 'd41d8cd98f00b204e9800998ecf8427e+0+K++Z'],\n   [true, 'd41d8cd98f00b204e9800998ecf8427e', nil,nil,nil],\n   [true, 'd41d8cd98f00b204e9800998ecf8427e+0', '+0','0',nil],\n   [true, 'd41d8cd98f00b204e9800998ecf8427e+0+Fizz+Buzz','+0','0','+Fizz+Buzz'],\n   [true, 'd41d8cd98f00b204e9800998ecf8427e+Fizz+Buzz', nil,nil,'+Fizz+Buzz'],\n   [true, 'd41d8cd98f00b204e9800998ecf8427e+0+Ad41d8cd98f00b204e9800998ecf8427e00000000+Foo', '+0','0','+Ad41d8cd98f00b204e9800998ecf8427e00000000+Foo'],\n   [true, 'd41d8cd98f00b204e9800998ecf8427e+Ad41d8cd98f00b204e9800998ecf8427e00000000+Foo', nil,nil,'+Ad41d8cd98f00b204e9800998ecf8427e00000000+Foo'],\n   [true, 'd41d8cd98f00b204e9800998ecf8427e+0+Z', '+0','0','+Z'],\n   [true, 'd41d8cd98f00b204e9800998ecf8427e+Z', nil,nil,'+Z'],\n  ].each do |ok, locator, match2, match3, match4|\n    define_method \"test_LOCATOR_REGEXP_on_#{locator.inspect}\" do\n      match = Keep::Locator::LOCATOR_REGEXP.match locator\n      assert_equal ok, !!match\n      if ok\n        assert_equal match2, match[2]\n        assert_equal match3, match[3]\n        assert_equal match4, match[4]\n      end\n    end\n    define_method \"test_parse_method_on_#{locator.inspect}\" do\n      loc = Keep::Locator.parse locator\n      if !ok\n        assert_nil loc\n      else\n        refute_nil loc\n        assert loc.is_a?(Keep::Locator)\n        #assert loc.hash\n        #assert loc.size\n        #assert loc.hints.is_a?(Array)\n      end\n    end\n  end\n\n  [\n    [false, nil, \"No manifest found\"],\n    [true, \"\"],\n    [false, \" \", \"Invalid manifest: does not end with newline\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e a41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\\n\"], # 2 locators\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/bar.txt\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:.foo.txt\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:.foo\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:...\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:.../.foo./.../bar\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/...\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/.../bar\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/.bar/baz.txt\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/bar./baz.txt\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 000000000000000000000000000000:0777:foo.txt\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:0:0\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\040\\n\"],\n    [true, \". 00000000000000000000000000000000+0 0:0:0\\n\"],\n    [true, \". 00000000000000000000000000000000+0 0:0:d41d8cd98f00b204e9800998ecf8427e+0+Ad41d8cd98f00b204e9800998ecf8427e00000000@ffffffff\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0+Ad41d8cd98f00b204e9800998ecf8427e00000000@ffffffff 0:0:empty.txt\\n\"],\n    [true, \"./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:.\\n\"],\n    [false, '. d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt',\n      \"Invalid manifest: does not end with newline\"],\n    [false, \"abc d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\\n\",\n      \"invalid stream name \\\"abc\\\"\"],\n    [false, \"abc/./foo d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\\n\",\n      \"invalid stream name \\\"abc/./foo\\\"\"],\n    [false, \"./abc/../foo d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\\n\",\n      \"invalid stream name \\\"./abc/../foo\\\"\"],\n    [false, \"./abc/. d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\\n\",\n      \"invalid stream name \\\"./abc/.\\\"\"],\n    [false, \"./abc/.. d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\\n\",\n      \"invalid stream name \\\"./abc/..\\\"\"],\n    [false, \"./abc/./foo d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt\\n\",\n      \"invalid stream name \\\"./abc/./foo\\\"\"],\n    # non-empty '.'-named file tokens aren't acceptable. Empty ones are used as empty dir placeholders.\n    [false, \". 8cf8463b34caa8ac871a52d5dd7ad1ef+1 0:1:.\\n\",\n      \"invalid file token \\\"0:1:.\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e 0:0:..\\n\",\n      \"invalid file token \\\"0:0:..\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e 0:0:./abc.txt\\n\",\n      \"invalid file token \\\"0:0:./abc.txt\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e 0:0:../abc.txt\\n\",\n      \"invalid file token \\\"0:0:../abc.txt\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt/.\\n\",\n      \"invalid file token \\\"0:0:abc.txt/.\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e 0:0:abc.txt/..\\n\",\n      \"invalid file token \\\"0:0:abc.txt/..\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e 0:0:a/./bc.txt\\n\",\n      \"invalid file token \\\"0:0:a/./bc.txt\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e 0:0:a/../bc.txt\\n\",\n      \"invalid file token \\\"0:0:a/../bc.txt\\\"\"],\n    [false, \"d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\\n\",\n      \"invalid stream name \\\"d41d8cd98f00b204e9800998ecf8427e+0\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427 0:0:abc.txt\\n\",\n      \"invalid locator \\\"d41d8cd98f00b204e9800998ecf8427\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e\\n\",\n      \"Manifest invalid for stream 1: no file tokens\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\\n/dir1 d41d8cd98f00b204e9800998ecf842 0:0:abc.txt\\n\",\n      \"Manifest invalid for stream 2: missing or invalid stream name \\\"/dir1\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\\n./dir1 d41d8cd98f00b204e9800998ecf842 0:0:abc.txt\\n\",\n      \"Manifest invalid for stream 2: missing or invalid locator \\\"d41d8cd98f00b204e9800998ecf842\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\\n./dir1 a41d8cd98f00b204e9800998ecf8427e+0 abc.txt\\n\",\n      \"Manifest invalid for stream 2: invalid file token \\\"abc.txt\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\\n./dir1 a41d8cd98f00b204e9800998ecf8427e+0 0:abc.txt\\n\",\n      \"Manifest invalid for stream 2: invalid file token \\\"0:abc.txt\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt\\n./dir1 a41d8cd98f00b204e9800998ecf8427e+0 0:0:abc.txt xyz.txt\\n\",\n      \"Manifest invalid for stream 2: invalid file token \\\"xyz.txt\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt d41d8cd98f00b204e9800998ecf8427e+0\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"d41d8cd98f00b204e9800998ecf8427e+0\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0:0:\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0\\n\",\n      \"Manifest invalid for stream 1: no file tokens\"],\n    [false, \". 0:0:foo.txt d41d8cd98f00b204e9800998ecf8427e+0\\n\",\n      \"Manifest invalid for stream 1: missing or invalid locator \\\"0:0:foo.txt\\\"\"],\n    [false, \". 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing or invalid locator \\\"0:0:foo.txt\\\"\"],\n    [false, \".\\n\", \"Manifest invalid for stream 1: missing or invalid locator\"],\n    [false, \".\", \"Invalid manifest: does not end with newline\"],\n    [false, \". \\n\", \"Manifest invalid for stream 1: missing or invalid locator\"],\n    [false, \".  \\n\", \"Manifest invalid for stream 1: missing or invalid locator\"],\n    [false, \" . d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt \\n\",\n      \"stream 1: trailing space\"],\n   # TAB and other tricky whitespace characters:\n    [false, \"\\v. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"\\\\v.\"],\n    [false, \"./foo\\vbar d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"./foo\\\\vbar\"],\n    [false, \"\\t. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"\\\\t\"],\n    [false, \".\\td41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\".\\\\t\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\t\\n\",\n      \"stream 1: invalid file token \\\"0:0:foo.txt\\\\t\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0\\t 0:0:foo.txt\\n\",\n      \"stream 1: missing or invalid locator \\\"d41d8cd98f00b204e9800998ecf8427e+0\\\\t\\\"\"],\n    [false, \"./foo\\tbar d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"stream 1: missing or invalid stream name \\\"./foo\\\\tbar\\\"\"],\n    # other whitespace errors:\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0  0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n \\n\",\n      \"Manifest invalid for stream 2: missing stream name\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\\n\",\n      \"Manifest invalid for stream 2: missing stream name\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n \",\n      \"Invalid manifest: does not end with newline\"],\n    [false, \"\\n. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing stream name\"],\n    [false, \" \\n. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing stream name\"],\n    # empty file and stream name components:\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:/foo.txt\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0:0:/foo.txt\\\"\"],\n    [false, \"./ d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"./\\\"\"],\n    [false, \".//foo d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\".//foo\\\"\"],\n    [false, \"./foo/ d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"./foo/\\\"\"],\n    [false, \"./foo//bar d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"./foo//bar\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo//bar.txt\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0:0:foo//bar.txt\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo/\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0:0:foo/\\\"\"],\n    # escaped chars\n    [true, \"./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\056\\n\"],\n    [false, \"./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\056\\\\056\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0:0:\\\\\\\\056\\\\\\\\056\\\"\"],\n    [false, \"./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\056\\\\056\\\\057foo\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0:0:\\\\\\\\056\\\\\\\\056\\\\\\\\057foo\\\"\"],\n    [false, \"./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 0\\\\0720\\\\072foo\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0\\\\\\\\0720\\\\\\\\072foo\\\"\"],\n    [false, \"./empty_dir d41d8cd98f00b204e9800998ecf8427e+0 \\\\060:\\\\060:foo\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"\\\\\\\\060:\\\\\\\\060:foo\\\"\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\\\057bar\\n\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\072\\n\"],\n    [true, \".\\\\057Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\"],\n    [true, \"\\\\056\\\\057Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\"],\n    [true, \"./\\\\134444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\"],\n    [false, \"./\\\\\\\\444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"./\\\\\\\\\\\\\\\\444\\\"\"],\n    [true, \"./\\\\011foo d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\"],\n    [false, \"./\\\\011/.. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"./\\\\\\\\011/..\\\"\"],\n    [false, \".\\\\056\\\\057 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\".\\\\\\\\056\\\\\\\\057\\\"\"],\n    [false, \".\\\\057\\\\056 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\".\\\\\\\\057\\\\\\\\056\\\"\"],\n    [false, \".\\\\057Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\\\444\\n\",\n      \"Manifest invalid for stream 1: >8-bit encoded chars not allowed on file token \\\"0:0:foo\\\\\\\\444\\\"\"],\n    [false, \"./\\\\444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n      \"Manifest invalid for stream 1: >8-bit encoded chars not allowed on stream token \\\"./\\\\\\\\444\\\"\"],\n    [false, \"./\\tfoo d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"./\\\\tfoo\\\"\"],\n    [false, \"./foo\\\\ d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"./foo\\\\\\\\\\\"\"],\n    [false, \"./foo\\\\r d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"./foo\\\\\\\\r\\\"\"],\n    [false, \"./foo\\\\444 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n      \"Manifest invalid for stream 1: >8-bit encoded chars not allowed on stream token \\\"./foo\\\\\\\\444\\\"\"],\n    [false, \"./foo\\\\888 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\"./foo\\\\\\\\888\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\\\\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0:0:foo\\\\\\\\\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\\\r\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0:0:foo\\\\\\\\r\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\\\444\\n\",\n      \"Manifest invalid for stream 1: >8-bit encoded chars not allowed on file token \\\"0:0:foo\\\\\\\\444\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\\\888\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0:0:foo\\\\\\\\888\\\"\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\\\057/bar\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0:0:foo\\\\\\\\057/bar\\\"\"],\n    [false, \".\\\\057/Data d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\",\n      \"Manifest invalid for stream 1: missing or invalid stream name \\\".\\\\\\\\057/Data\\\"\"],\n    [true, \"./Data\\\\040Folder d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\057foo/bar\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"0:0:\\\\\\\\057foo/bar\\\"\"],\n    [true, \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\134057foo/bar\\n\"],\n    [false, \". d41d8cd98f00b204e9800998ecf8427e+0 \\\\040:\\\\040:foo.txt\\n\",\n      \"Manifest invalid for stream 1: invalid file token \\\"\\\\\\\\040:\\\\\\\\040:foo.txt\\\"\"],\n  ].each do |ok, manifest, expected_error=nil|\n    define_method \"test_validate manifest #{manifest.inspect}\" do\n      assert_equal ok, Keep::Manifest.valid?(manifest)\n      if ok\n        assert Keep::Manifest.validate! manifest\n      else\n        begin\n          Keep::Manifest.validate! manifest\n        rescue ArgumentError => e\n          msg = e.message\n        end\n        refute_nil msg, \"Expected ArgumentError\"\n        assert msg.include?(expected_error), \"Did not find expected error message. Expected: #{expected_error}; Actual: #{msg}\"\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby/test/test_request_id.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nrequire \"arvados\"\nrequire \"mocha/minitest\"\n\nclass FakeError < StandardError; end\nclass RequestIdTest < Minitest::Test\n    def test_raise_exception_with_request_id\n        arv = Arvados.new\n        clnt = arv.client\n        assert_nil clnt.request_id\n\n        Google::APIClient.any_instance.stubs(:execute).raises(FakeError.new(\"Uh-oh...\"))\n        err = assert_raises(FakeError) do\n            arv.collection.get(uuid: \"zzzzz-4zz18-zzzzzzzzzzzzzzz\")\n        end\n        assert clnt.request_id != nil\n        assert_match(/Uh-oh.*\\(Request ID: req-[0-9a-zA-Z]{20}\\)/, err.message)\n    end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/.gitignore",
    "content": "._*\n.DS_Store\n.yardoc\n.bundle\n.rvmrc\nGemfile.lock\ncoverage\ndoc\nheckling\npkg\nspecdoc\nwiki\n.google-api.yaml\n*.log\n\n#IntelliJ\n.idea\n*.iml\natlassian*\n\n"
  },
  {
    "path": "sdk/ruby-google-api-client/.rspec",
    "content": "--colour\n--format documentation\n"
  },
  {
    "path": "sdk/ruby-google-api-client/.travis.yml",
    "content": "language: ruby\nrvm:\n  - 2.2\n  - 2.0.0\n  - 2.1\n  - 1.9.3\n  - rbx-2\n  - jruby\nenv:\n  - RAILS_VERSION=\"~>3.2\"\n  - RAILS_VERSION=\"~>4.0.0\"\n  - RAILS_VERSION=\"~>4.1.0\"\n  - RAILS_VERSION=\"~>4.2.0\"\nscript: \"bundle exec rake spec:all\"\nbefore_install:\n - sudo apt-get update\n - sudo apt-get install idn\nnotifications:\n  email:\n    recipients:\n      - sbazyl@google.com\n    on_success: change\n    on_failure: change\n"
  },
  {
    "path": "sdk/ruby-google-api-client/.yardopts",
    "content": "--markup markdown\nlib/**/*.rb\next/**/*.c\n-\nREADME.md\nCHANGELOG.md\nLICENSE\n"
  },
  {
    "path": "sdk/ruby-google-api-client/CHANGELOG.md",
    "content": "# 0.8.8\n* Do not put CR/LF in http headers\n\n# 0.8.7\n* Lock activesupport version to < 5.0\n\n# 0.8.6\n* Use discovered 'rootUrl' as base URI for services\n* Respect discovered methods with colons in path\n\n# 0.8.5\n* Corrects the regression Rails 4 support in the 0.8.4 release.\n\n# 0.8.4\n* Fixes a file permission issues with the 0.8.3 release\n* Fixes warnings when the library is used\n\n# 0.8.3\n* Adds support for authorization via Application Default Credentials.\n# Adds support for tracking coverage on coveralls.io\n\n# 0.8.2\n* Fixes for file storage and missing cacerts file\n\n# 0.8.1\n* Fix logger in rails\n\n# 0.8.0\n* Refactored credential storage, added support for redis\n* Update gem depdendencies\n* Fixed retry logic to allow for auth retries independent of the overall number of retries\n* Added `:force_encoding` option to set body content encoding based on the Content-Type header\n* Batch requests with the service interface now inherit the service's connection\n* `register_discover_document` now returns the API instance\n* Added `:proxy` option to set Faraday's HTTP proxy setting\n* Added `:faraday_options` option to allow passthrough settings to Faraday connection\n* Drop 1.8.x support\n* This will be the last release with 1.9.x support\n\n# 0.7.1\n* Minor fix to update gem dependencies\n\n# 0.7.0\n* Remove CLI\n* Support for automatic retires & backoff. Off by default, enable by setting `retries` on `APIClient`\n* Experimental new interface (see `Google::APIClient::Service`)\n* Fix warnings when using Faraday separately\n* Support Google Compute Engine service accounts\n* Enable gzip compression for responses\n* Upgrade to Faraday 0.9.0. Resolves multiple issues with query parameter encodings.\n* Use bundled root certificates for verifying SSL certificates\n* Rewind media when retrying uploads\n\n# 0.6.4\n* Pin signet version to 0.4.x\n\n# 0.6.3\n\n* Update autoparse to 0.3.3 to fix cases where results aren't correctly parsed.\n* Fix railtie loading for compatibility with rails < 3.0\n* Fix refresh of access token when passing credentials as parameter to execute\n* Fix URI processing in batch requests to allow query parameters\n\n# 0.6.2\n\n* Update signet to 0.4.6 to support server side continuation of postmessage\n  auth flows.\n\n# 0.6.1\n\n* Fix impersonation with service accounts\n\n# 0.6\n\n* Apps strongly encouraged to set :application_name & :application_version when\n  initializing a client\n* JWT/service accounts moved to signet\n* Added helper class for installed app OAuth flows, updated samples & CLI\n* Initial logging support for client\n* Fix PKCS12 loading on windows\n* Allow disabling auto-refresh of OAuth 2 access tokens\n* Compatibility with MultiJson >= 1.0.0 & Rails 3.2.8\n* Fix for body serialization when body doesn't respond to to_json\n* Remove OAuth 1.0 logins from CLI\n\n\n# 0.5.0\n\n* Beta candidate, potential incompatible changes with how requests are processed.\n    * All requests should be made using execute() or execute!()\n    * :api_method in request can no longer be a string\n    * Deprecated ResumableUpload.send_* methods.\n* Reduce memory utilization when uploading large files\n* Automatic refresh of OAuth 2 credentials & retry of request when 401 errors\n  are returned\n* Simplify internal request processing.\n\n# 0.4.7\n\n* Added the ability to convert client secrets to an authorization object\n\n# 0.4.6\n\n* Backwards compatibility for MultiJson\n\n# 0.4.5\n\n* Updated Launchy dependency\n* Updated Faraday dependency\n* Updated Addressable dependency\n* Updated Autoparse dependency\n* Removed Sinatra development dependency\n\n# 0.4.4\n\n* Added batch execution\n* Added service accounts\n* Can now supply authorization on a per-request basis.\n\n# 0.4.3\n\n* Added media upload capabilities\n* Support serializing OAuth credentials to client_secrets.json\n* Fixed OS name/version string on JRuby\n\n# 0.4.2\n\n* Fixed incompatibility with Ruby 1.8.7\n\n# 0.4.1\n\n* Fixed ancestor checking issue when assigning Autoparse identifiers\n* Renamed discovery methods to avoid collisions with some APIs\n* Updated autoparse dependency to avoid JSON bug\n\n# 0.4.0\n\n* Replaced httpadapter gem dependency with faraday\n* Replaced json gem dependency with multi_json\n* Fixed /dev/null issues on Windows\n* Repeated parameters now work\n\n# 0.3.0\n\n* Updated to use v1 of the discovery API\n* Updated to use httpadapter 1.0.0\n* Added OAuth 2 support to the command line tool\n* Renamed some switches in the command line tool\n* Added additional configuration capabilities\n* Fixed a few deprecation warnings from dependencies\n* Added gemspec to source control\n\n# 0.2.0\n\n* Updated to use v1 of the discovery API\n* Updated to use httpadapter 1.0.0\n* Added OAuth 2 support to the command line tool\n* Renamed some switches in the command line tool\n* Added additional configuration capabilities\n\n# 0.1.3\n\n* Added support for manual overrides of the discovery URI\n* Added support for manual overrides of the API base\n* Added support for xoauth_requestor_id\n\n# 0.1.2\n\n* Added support for two-legged OAuth\n* Moved some development dependencies into runtime\n\n# 0.1.1\n\n* Substantial improvements to the command line interface\n\n# 0.1.0\n\n* Initial release\n"
  },
  {
    "path": "sdk/ruby-google-api-client/CONTRIBUTING.md",
    "content": "# How to become a contributor and submit your own code\n\n## Contributor License Agreements\n\nWe'd love to accept your sample apps and patches! Before we can take them, we \nhave to jump a couple of legal hurdles.\n\nPlease fill out either the individual or corporate Contributor License Agreement\n(CLA).\n\n  * If you are an individual writing original source code and you're sure you\n    own the intellectual property, then you'll need to sign an [individual CLA]\n    (http://code.google.com/legal/individual-cla-v1.0.html).\n  * If you work for a company that wants to allow you to contribute your work,\n    then you'll need to sign a [corporate CLA]\n    (http://code.google.com/legal/corporate-cla-v1.0.html).\n\nFollow either of the two links above to access the appropriate CLA and\ninstructions for how to sign and return it. Once we receive it, we'll be able to\naccept your pull requests.\n\n## Contributing A Patch\n\n1. Submit an issue describing your proposed change to the repo in question.\n1. The repo owner will respond to your issue promptly.\n1. If your proposed change is accepted, and you haven't already done so, sign a\n   Contributor License Agreement (see details above).\n1. Fork the desired repo, develop and test your code changes.\n1. Ensure that your code is clear and comprehensible.\n1. Ensure that your code has an appropriate set of unit tests which all pass.\n1. Submit a pull request.\n\n"
  },
  {
    "path": "sdk/ruby-google-api-client/Gemfile",
    "content": "source 'https://rubygems.org'\n\ngemspec\n\ngem 'jruby-openssl', :platforms => :jruby\n\nif ENV['RAILS_VERSION']\n  gem 'rails', ENV['RAILS_VERSION']\nend"
  },
  {
    "path": "sdk/ruby-google-api-client/LICENSE",
    "content": "\n                              Apache License\n                        Version 2.0, January 2004\n                     http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n   \"License\" shall mean the terms and conditions for use, reproduction,\n   and distribution as defined by Sections 1 through 9 of this document.\n\n   \"Licensor\" shall mean the copyright owner or entity authorized by\n   the copyright owner that is granting the License.\n\n   \"Legal Entity\" shall mean the union of the acting entity and all\n   other entities that control, are controlled by, or are under common\n   control with that entity. For the purposes of this definition,\n   \"control\" means (i) the power, direct or indirect, to cause the\n   direction or management of such entity, whether by contract or\n   otherwise, or (ii) ownership of fifty percent (50%) or more of the\n   outstanding shares, or (iii) beneficial ownership of such entity.\n\n   \"You\" (or \"Your\") shall mean an individual or Legal Entity\n   exercising permissions granted by this License.\n\n   \"Source\" form shall mean the preferred form for making modifications,\n   including but not limited to software source code, documentation\n   source, and configuration files.\n\n   \"Object\" form shall mean any form resulting from mechanical\n   transformation or translation of a Source form, including but\n   not limited to compiled object code, generated documentation,\n   and conversions to other media types.\n\n   \"Work\" shall mean the work of authorship, whether in Source or\n   Object form, made available under the License, as indicated by a\n   copyright notice that is included in or attached to the work\n   (an example is provided in the Appendix below).\n\n   \"Derivative Works\" shall mean any work, whether in Source or Object\n   form, that is based on (or derived from) the Work and for which the\n   editorial revisions, annotations, elaborations, or other modifications\n   represent, as a whole, an original work of authorship. For the purposes\n   of this License, Derivative Works shall not include works that remain\n   separable from, or merely link (or bind by name) to the interfaces of,\n   the Work and Derivative Works thereof.\n\n   \"Contribution\" shall mean any work of authorship, including\n   the original version of the Work and any modifications or additions\n   to that Work or Derivative Works thereof, that is intentionally\n   submitted to Licensor for inclusion in the Work by the copyright owner\n   or by an individual or Legal Entity authorized to submit on behalf of\n   the copyright owner. For the purposes of this definition, \"submitted\"\n   means any form of electronic, verbal, or written communication sent\n   to the Licensor or its representatives, including but not limited to\n   communication on electronic mailing lists, source code control systems,\n   and issue tracking systems that are managed by, or on behalf of, the\n   Licensor for the purpose of discussing and improving the Work, but\n   excluding communication that is conspicuously marked or otherwise\n   designated in writing by the copyright owner as \"Not a Contribution.\"\n\n   \"Contributor\" shall mean Licensor and any individual or Legal Entity\n   on behalf of whom a Contribution has been received by Licensor and\n   subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   copyright license to reproduce, prepare Derivative Works of,\n   publicly display, publicly perform, sublicense, and distribute the\n   Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   (except as stated in this section) patent license to make, have made,\n   use, offer to sell, sell, import, and otherwise transfer the Work,\n   where such license applies only to those patent claims licensable\n   by such Contributor that are necessarily infringed by their\n   Contribution(s) alone or by combination of their Contribution(s)\n   with the Work to which such Contribution(s) was submitted. If You\n   institute patent litigation against any entity (including a\n   cross-claim or counterclaim in a lawsuit) alleging that the Work\n   or a Contribution incorporated within the Work constitutes direct\n   or contributory patent infringement, then any patent licenses\n   granted to You under this License for that Work shall terminate\n   as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n   Work or Derivative Works thereof in any medium, with or without\n   modifications, and in Source or Object form, provided that You\n   meet the following conditions:\n\n   (a) You must give any other recipients of the Work or\n       Derivative Works a copy of this License; and\n\n   (b) You must cause any modified files to carry prominent notices\n       stating that You changed the files; and\n\n   (c) You must retain, in the Source form of any Derivative Works\n       that You distribute, all copyright, patent, trademark, and\n       attribution notices from the Source form of the Work,\n       excluding those notices that do not pertain to any part of\n       the Derivative Works; and\n\n   (d) If the Work includes a \"NOTICE\" text file as part of its\n       distribution, then any Derivative Works that You distribute must\n       include a readable copy of the attribution notices contained\n       within such NOTICE file, excluding those notices that do not\n       pertain to any part of the Derivative Works, in at least one\n       of the following places: within a NOTICE text file distributed\n       as part of the Derivative Works; within the Source form or\n       documentation, if provided along with the Derivative Works; or,\n       within a display generated by the Derivative Works, if and\n       wherever such third-party notices normally appear. The contents\n       of the NOTICE file are for informational purposes only and\n       do not modify the License. You may add Your own attribution\n       notices within Derivative Works that You distribute, alongside\n       or as an addendum to the NOTICE text from the Work, provided\n       that such additional attribution notices cannot be construed\n       as modifying the License.\n\n   You may add Your own copyright statement to Your modifications and\n   may provide additional or different license terms and conditions\n   for use, reproduction, or distribution of Your modifications, or\n   for any such Derivative Works as a whole, provided Your use,\n   reproduction, and distribution of the Work otherwise complies with\n   the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n   any Contribution intentionally submitted for inclusion in the Work\n   by You to the Licensor shall be under the terms and conditions of\n   this License, without any additional terms or conditions.\n   Notwithstanding the above, nothing herein shall supersede or modify\n   the terms of any separate license agreement you may have executed\n   with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n   names, trademarks, service marks, or product names of the Licensor,\n   except as required for reasonable and customary use in describing the\n   origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n   agreed to in writing, Licensor provides the Work (and each\n   Contributor provides its Contributions) on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n   implied, including, without limitation, any warranties or conditions\n   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n   PARTICULAR PURPOSE. You are solely responsible for determining the\n   appropriateness of using or redistributing the Work and assume any\n   risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n   whether in tort (including negligence), contract, or otherwise,\n   unless required by applicable law (such as deliberate and grossly\n   negligent acts) or agreed to in writing, shall any Contributor be\n   liable to You for damages, including any direct, indirect, special,\n   incidental, or consequential damages of any character arising as a\n   result of this License or out of the use or inability to use the\n   Work (including but not limited to damages for loss of goodwill,\n   work stoppage, computer failure or malfunction, or any and all\n   other commercial damages or losses), even if such Contributor\n   has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n   the Work or Derivative Works thereof, You may choose to offer,\n   and charge a fee for, acceptance of support, warranty, indemnity,\n   or other liability obligations and/or rights consistent with this\n   License. However, in accepting such obligations, You may act only\n   on Your own behalf and on Your sole responsibility, not on behalf\n   of any other Contributor, and only if You agree to indemnify,\n   defend, and hold each Contributor harmless for any liability\n   incurred by, or claims asserted against, such Contributor by reason\n   of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n   To apply the Apache License to your work, attach the following\n   boilerplate notice, with the fields enclosed by brackets \"[]\"\n   replaced with your own identifying information. (Don't include\n   the brackets!)  The text should be enclosed in the appropriate\n   comment syntax for the file format. We also recommend that a\n   file or class name and description of purpose be included on the\n   same \"printed page\" as the copyright notice for easier\n   identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n"
  },
  {
    "path": "sdk/ruby-google-api-client/README.md",
    "content": "# Arvados Google API Client\n\nThis is a fork of the google-api-client gem, based on https://github.com/google/google-api-ruby-client version 0.8.6.\n\nIt adds compatibility fixes for newer versions of dependencies (Ruby, faraday, etc.) while avoiding the breaking API changes that have been made in the upstream project.\n\nIt is entirely focused on the use cases needed by the Arvados Ruby SDK and is not intended or expected to work elsewhere.\n"
  },
  {
    "path": "sdk/ruby-google-api-client/Rakefile",
    "content": "# -*- ruby -*-\nlib_dir = File.expand_path('../lib', __FILE__)\n$LOAD_PATH.unshift(lib_dir)\n$LOAD_PATH.uniq!\n\nrequire 'bundler/gem_tasks'\nrequire 'rubygems'\nrequire 'rake'\n\nrequire File.join(File.dirname(__FILE__), 'lib/google/api_client', 'version')\n\nPKG_DISPLAY_NAME   = 'Google API Client'\nPKG_NAME           = PKG_DISPLAY_NAME.downcase.gsub(/\\s/, '-')\nPKG_VERSION        = Google::APIClient::VERSION::STRING\nPKG_FILE_NAME      = \"#{PKG_NAME}-#{PKG_VERSION}\"\nPKG_HOMEPAGE       = 'https://github.com/google/google-api-ruby-client'\n\nRELEASE_NAME       = \"REL #{PKG_VERSION}\"\n\nPKG_AUTHOR         = [\"Bob Aman\", \"Steve Bazyl\"]\nPKG_AUTHOR_EMAIL   = \"sbazyl@google.com\"\nPKG_SUMMARY        = 'Package Summary'\nPKG_DESCRIPTION    = <<-TEXT\nThe Google API Ruby Client makes it trivial to discover and access supported\nAPIs.\nTEXT\n\nlist = FileList[\n    'lib/**/*', 'spec/**/*', 'vendor/**/*',\n    'tasks/**/*', 'website/**/*',\n    '[A-Z]*', 'Rakefile'\n].exclude(/[_\\.]git$/)\n(open(\".gitignore\") { |file| file.read }).split(\"\\n\").each do |pattern|\n  list.exclude(pattern)\nend\nPKG_FILES = list\n\ntask :default => 'spec'\n\nWINDOWS = (RUBY_PLATFORM =~ /mswin|win32|mingw|bccwin|cygwin/) rescue false\nSUDO = WINDOWS ? '' : ('sudo' unless ENV['SUDOLESS'])\n"
  },
  {
    "path": "sdk/ruby-google-api-client/arvados-google-api-client.gemspec",
    "content": "# -*- encoding: utf-8 -*-\nrequire File.join(File.dirname(__FILE__), 'lib/google/api_client', 'version')\n\nGem::Specification.new do |s|\n  s.name = \"arvados-google-api-client\"\n  s.version = Google::APIClient::VERSION::STRING\n\n  s.required_ruby_version = '>= 3.0.0'\n  s.required_rubygems_version = \">= 1.3.5\"\n  s.require_paths = [\"lib\"]\n  s.authors = [\"Bob Aman\", \"Steven Bazyl\"]\n  s.license = \"Apache-2.0\"\n  s.description = \"Fork of google-api-client used by Ruby-based Arvados components.\"\n  s.email = \"dev@arvados.org\"\n  s.extra_rdoc_files = [\"README.md\"]\n  s.files = %w(arvados-google-api-client.gemspec Rakefile LICENSE CHANGELOG.md README.md Gemfile)\n  s.files += Dir.glob(\"lib/**/*.rb\")\n  s.files += Dir.glob(\"lib/cacerts.pem\")\n  s.files += Dir.glob(\"spec/**/*.{rb,opts}\")\n  s.files += Dir.glob(\"vendor/**/*.rb\")\n  s.files += Dir.glob(\"tasks/**/*\")\n  s.files += Dir.glob(\"website/**/*\")\n  s.homepage = \"https://github.com/arvados/arvados/tree/main/sdk/ruby-google-api-client\"\n  s.rdoc_options = [\"--main\", \"README.md\"]\n  s.summary = \"Fork of google-api-client used by Ruby-based Arvados components.\"\n\n  # Dependencies below are pinned to a minor version in cases where\n  # the currently support Ruby 3.0 but they have previously dropped\n  # support for a Ruby version without incrementing the major version.\n\n  # addressable 2.8.0 dropped Ruby 2.1.\n  # addressable 2.8.1 updated metadata to require Ruby >= 2.2.\n  # addressable 2.8 is the highest minor version we've tested.\n  s.add_runtime_dependency 'addressable', '>= 2.3', '< 2.9'\n  # signet 0.20.0 dropped Ruby 3.0.\n  s.add_runtime_dependency 'signet', '~> 0.19.0'\n  # faraday 2.9.0 dropped Ruby 2.7.\n  # faraday 2.14.1 is the highest minor version we've tested.\n  s.add_runtime_dependency 'faraday', '~> 2.14.1'\n  # faraday-multipart 1.0.1 dropped Ruby 2.4, but 1.0.2 added it back.\n  s.add_runtime_dependency 'faraday-multipart', '~> 1.0'\n  # faraday-gzip 3.0.0 dropped Ruby 2.\n  s.add_runtime_dependency 'faraday-gzip', '~> 3.0'\n  # googleauth 1.14.0 dropped Ruby 2.\n  # googleauth 1.15 is the highest minor version we've tested.\n  s.add_runtime_dependency 'googleauth', '~> 1.15.0'\n  # multi_json 2.0 will drop Ruby 3.0.\n  # https://github.com/sferik/multi_json/pull/16#issue-3237521157\n  # multi_json 1 is the highest major version we've tested.\n  s.add_runtime_dependency 'multi_json', '~> 1.15'\n  # autoparse was archived 2022-07-27 at 0.3.3.\n  s.add_runtime_dependency 'autoparse', '~> 0.3'\n  # extlib had no release between 2014 and 2025.\n  s.add_runtime_dependency 'extlib', '~> 0.9'\n  # launchy 3.0.0 dropped Ruby 2.\n  # launchy 3.1.0 stopped testing against Ruby 3.0.\n  # launchy 3.0 is the higest minor version we've tested.\n  s.add_runtime_dependency 'launchy', '~> 3.0.1'\n  # retriable 2 is not API compatible with retriable 1.\n  s.add_runtime_dependency 'retriable', '~> 1.4'\n  # activesupport 7.2.0 dropped Ruby 3.0.\n  s.add_dependency('activesupport', '~> 7.1.3', '>= 7.1.3.4')\n\n  # These are indirect dependencies of the above where we force a resolution\n  # that supports all our Rubies.\n  # google-cloud-env 2.3.0 dropped Ruby 3.0.\n  s.add_runtime_dependency 'google-cloud-env', '~> 2.2.0'\n  # public_suffix 6.0.0 dropped Ruby 2.7.\n  s.add_runtime_dependency 'public_suffix', '~> 6.0'\n  # securerandom 0.4.0 dropped Ruby 3.0 (and 2.6 and 2.7) without\n  # mentioning anything in the changelog / release notes.\n  s.add_runtime_dependency 'securerandom', '~> 0.3.2'\n\n  # rake 12.3.0 dropped Ruby 1.\n  # rake 12 is the highest major version we've tested.\n  s.add_development_dependency 'rake', '>= 10', '< 13'\n  # yard 0.9.37 (2024) metadata still claims to support Ruby 1.\n  s.add_development_dependency 'yard', '>= 0.8', '< 0.10'\n  # rspec 3.13.1 (2025) metadata still claims to support Ruby 1.\n  # rspec 3.0 is the highest minor version we've tested.\n  s.add_development_dependency 'rspec', '~> 3.1'\n  # kramdown 2.5.0 dropped Ruby 2.4.\n  # kramdown 2.5 is the highest minor version we've tested.\n  s.add_development_dependency 'kramdown', '>= 1.5', '< 2.6'\n  # simplecov 0.19.0 (2020) dropped Ruby 2.4.\n  # simplecov 0.21 is the highest minor version we've tested.\n  s.add_development_dependency 'simplecov', '>= 0.9.2', '< 0.22.0'\n  # coveralls hasn't had a release since 0.8.23 (2019) whose metadata\n  # claims to support Ruby 1.8.7.\n  # coveralls 0.8 is the highest minor version we've tested.\n  s.add_development_dependency 'coveralls', '>= 0.7.11', '< 0.9'\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/cacerts.pem",
    "content": "# Issuer: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc.\n# Subject: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc.\n# Label: \"GTE CyberTrust Global Root\"\n# Serial: 421\n# MD5 Fingerprint: ca:3d:d3:68:f1:03:5c:d0:32:fa:b8:2b:59:e8:5a:db\n# SHA1 Fingerprint: 97:81:79:50:d8:1c:96:70:cc:34:d8:09:cf:79:44:31:36:7e:f4:74\n# SHA256 Fingerprint: a5:31:25:18:8d:21:10:aa:96:4b:02:c7:b7:c6:da:32:03:17:08:94:e5:fb:71:ff:fb:66:67:d5:e6:81:0a:36\n-----BEGIN CERTIFICATE-----\nMIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYD\nVQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv\nbHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv\nb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEzMjM1OTAwWjB1MQswCQYDVQQGEwJV\nUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU\ncnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds\nb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrH\niM3dFw4usJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTS\nr41tiGeA5u2ylc9yMcqlHHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X4\n04Wqk2kmhXBIgD8SFcd5tB8FLztimQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAG3r\nGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMWM4ETCJ57NE7fQMh017l9\n3PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OFNMQkpw0P\nlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/\n-----END CERTIFICATE-----\n\n# Issuer: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division\n# Subject: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division\n# Label: \"Thawte Server CA\"\n# Serial: 1\n# MD5 Fingerprint: c5:70:c4:a2:ed:53:78:0c:c8:10:53:81:64:cb:d0:1d\n# SHA1 Fingerprint: 23:e5:94:94:51:95:f2:41:48:03:b4:d5:64:d2:a3:a3:f5:d8:8b:8c\n# SHA256 Fingerprint: b4:41:0b:73:e2:e6:ea:ca:47:fb:c4:2f:8f:a4:01:8a:f4:38:1d:c5:4c:fa:a8:44:50:46:1e:ed:09:45:4d:e9\n-----BEGIN CERTIFICATE-----\nMIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkEx\nFTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD\nVQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv\nbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEm\nMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wHhcNOTYwODAx\nMDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT\nDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3\ndGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNl\ncyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3\nDQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQAD\ngY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl/Kj0R1HahbUgdJSGHg91\nyekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg71CcEJRCX\nL+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGj\nEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG\n7oWDTSEwjsrZqG9JGubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6e\nQNuozDJ0uW8NxuOzRAvZim+aKZuZGCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZ\nqdq5snUb9kLy78fyGPmJvKP/iiMucEc=\n-----END CERTIFICATE-----\n\n# Issuer: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division\n# Subject: CN=Thawte Premium Server CA O=Thawte Consulting cc OU=Certification Services Division\n# Label: \"Thawte Premium Server CA\"\n# Serial: 1\n# MD5 Fingerprint: 06:9f:69:79:16:66:90:02:1b:8c:8c:a2:c3:07:6f:3a\n# SHA1 Fingerprint: 62:7f:8d:78:27:65:63:99:d2:7d:7f:90:44:c9:fe:b3:f3:3e:fa:9a\n# SHA256 Fingerprint: ab:70:36:36:5c:71:54:aa:29:c2:c2:9f:5d:41:91:16:3b:16:2a:22:25:01:13:57:d5:6d:07:ff:a7:bc:1f:72\n-----BEGIN CERTIFICATE-----\nMIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkEx\nFTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD\nVQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv\nbiBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UEAxMYVGhhd3RlIFByZW1pdW0gU2Vy\ndmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZlckB0aGF3dGUuY29t\nMB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYTAlpB\nMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsG\nA1UEChMUVGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRp\nb24gU2VydmljZXMgRGl2aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNl\ncnZlciBDQTEoMCYGCSqGSIb3DQEJARYZcHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNv\nbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2aovXwlue2oFBYo847kkE\nVdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIhUdib0GfQ\nug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMR\nuHM/qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG\n9w0BAQQFAAOBgQAmSCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUI\nhfzJATj/Tb7yFkJD57taRvvBxhEf8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JM\npAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7tUCemDaYj+bvLpgcUQg==\n-----END CERTIFICATE-----\n\n# Issuer: O=Equifax OU=Equifax Secure Certificate Authority\n# Subject: O=Equifax OU=Equifax Secure Certificate Authority\n# Label: \"Equifax Secure CA\"\n# Serial: 903804111\n# MD5 Fingerprint: 67:cb:9d:c0:13:24:8a:82:9b:b2:17:1e:d1:1b:ec:d4\n# SHA1 Fingerprint: d2:32:09:ad:23:d3:14:23:21:74:e4:0d:7f:9d:62:13:97:86:63:3a\n# SHA256 Fingerprint: 08:29:7a:40:47:db:a2:36:80:c7:31:db:6e:31:76:53:ca:78:48:e1:be:bd:3a:0b:01:79:a7:07:f9:2c:f1:78\n-----BEGIN CERTIFICATE-----\nMIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV\nUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2Vy\ndGlmaWNhdGUgQXV0aG9yaXR5MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1\nMVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoTB0VxdWlmYXgxLTArBgNVBAsTJEVx\ndWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCBnzANBgkqhkiG9w0B\nAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPRfM6f\nBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+A\ncJkVV5MW8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kC\nAwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQ\nMA4GA1UEChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlm\naWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTgw\nODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvSspXXR9gj\nIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQF\nMAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA\nA4GBAFjOKer89961zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y\n7qj/WsjTVbJmcVfewCHrPSqnI0kBBIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh\n1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee9570+sB3c4\n-----END CERTIFICATE-----\n\n# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority\n# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority\n# Label: \"Verisign Class 3 Public Primary Certification Authority\"\n# Serial: 149843929435818692848040365716851702463\n# MD5 Fingerprint: 10:fc:63:5d:f6:26:3e:0d:f3:25:be:5f:79:cd:67:67\n# SHA1 Fingerprint: 74:2c:31:92:e6:07:e4:24:eb:45:49:54:2b:e1:bb:c5:3e:61:74:e2\n# SHA256 Fingerprint: e7:68:56:34:ef:ac:f6:9a:ce:93:9a:6b:25:5b:7b:4f:ab:ef:42:93:5b:50:a2:65:ac:b5:cb:60:27:e4:4e:70\n-----BEGIN CERTIFICATE-----\nMIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkG\nA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFz\ncyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2\nMDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVowXzELMAkGA1UEBhMCVVMxFzAVBgNV\nBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmlt\nYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GN\nADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhE\nBarsAx94f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/is\nI19wKTakyYbnsZogy1Olhec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0G\nCSqGSIb3DQEBAgUAA4GBALtMEivPLCYATxQT3ab7/AoRhIzzKBxnki98tsX63/Do\nlbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59AhWM1pF+NEHJwZRDmJXNyc\nAA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2OmufTqj/ZA1k\n-----END CERTIFICATE-----\n\n# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network\n# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority - G2/(c) 1998 VeriSign, Inc. - For authorized use only/VeriSign Trust Network\n# Label: \"Verisign Class 3 Public Primary Certification Authority - G2\"\n# Serial: 167285380242319648451154478808036881606\n# MD5 Fingerprint: a2:33:9b:4c:74:78:73:d4:6c:e7:c1:f3:8d:cb:5c:e9\n# SHA1 Fingerprint: 85:37:1c:a6:e5:50:14:3d:ce:28:03:47:1b:de:3a:09:e8:f8:77:0f\n# SHA256 Fingerprint: 83:ce:3c:12:29:68:8a:59:3d:48:5f:81:97:3c:0f:91:95:43:1e:da:37:cc:5e:36:43:0e:79:c7:a8:88:63:8b\n-----BEGIN CERTIFICATE-----\nMIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJ\nBgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xh\nc3MgMyBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcy\nMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3Jp\nemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMB4X\nDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVTMRcw\nFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMg\nUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEo\nYykgMTk5OCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5\nMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEB\nAQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCOFoUgRm1HP9SFIIThbbP4\npO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71lSk8UOg0\n13gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwID\nAQABMA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSk\nU01UbSuvDV1Ai2TT1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7i\nF6YM40AIOw7n60RzKprxaZLvcRTDOaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpY\noJ2daZH9\n-----END CERTIFICATE-----\n\n# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA\n# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA\n# Label: \"GlobalSign Root CA\"\n# Serial: 4835703278459707669005204\n# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a\n# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c\n# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99\n-----BEGIN CERTIFICATE-----\nMIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG\nA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv\nb3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw\nMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i\nYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT\naWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ\njc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp\nxy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp\n1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG\nsnUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ\nU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8\n9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E\nBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B\nAQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz\nyj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE\n38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP\nAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad\nDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME\nHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==\n-----END CERTIFICATE-----\n\n# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2\n# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R2\n# Label: \"GlobalSign Root CA - R2\"\n# Serial: 4835703278459682885658125\n# MD5 Fingerprint: 94:14:77:7e:3e:5e:fd:8f:30:bd:41:b0:cf:e7:d0:30\n# SHA1 Fingerprint: 75:e0:ab:b6:13:85:12:27:1c:04:f8:5f:dd:de:38:e4:b7:24:2e:fe\n# SHA256 Fingerprint: ca:42:dd:41:74:5f:d0:b8:1e:b9:02:36:2c:f9:d8:bf:71:9d:a1:bd:1b:1e:fc:94:6f:5b:4c:99:f4:2c:1b:9e\n-----BEGIN CERTIFICATE-----\nMIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G\nA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp\nZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1\nMDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG\nA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL\nv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8\neoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq\ntTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd\nC9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa\nzq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB\nmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH\nV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n\nbG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG\n3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs\nJ0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO\n291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS\not+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd\nAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7\nTBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg==\n-----END CERTIFICATE-----\n\n# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 1 Policy Validation Authority\n# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 1 Policy Validation Authority\n# Label: \"ValiCert Class 1 VA\"\n# Serial: 1\n# MD5 Fingerprint: 65:58:ab:15:ad:57:6c:1e:a8:a7:b5:69:ac:bf:ff:eb\n# SHA1 Fingerprint: e5:df:74:3c:b6:01:c4:9b:98:43:dc:ab:8c:e8:6a:81:10:9f:e4:8e\n# SHA256 Fingerprint: f4:c1:49:55:1a:30:13:a3:5b:c7:bf:fe:17:a7:f3:44:9b:c1:ab:5b:5a:0a:e7:4b:06:c2:3b:90:00:4c:01:04\n-----BEGIN CERTIFICATE-----\nMIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0\nIFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz\nBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y\naXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG\n9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIyMjM0OFoXDTE5MDYy\nNTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y\nazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs\nYXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw\nOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl\ncnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9Y\nLqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIiGQj4/xEjm84H9b9pGib+\nTunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCmDuJWBQ8Y\nTfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0\nLBwGlN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLW\nI8sogTLDAHkY7FkXicnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPw\nnXS3qT6gpf+2SQMT2iLM7XGCK5nPOrf1LXLI\n-----END CERTIFICATE-----\n\n# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 2 Policy Validation Authority\n# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 2 Policy Validation Authority\n# Label: \"ValiCert Class 2 VA\"\n# Serial: 1\n# MD5 Fingerprint: a9:23:75:9b:ba:49:36:6e:31:c2:db:f2:e7:66:ba:87\n# SHA1 Fingerprint: 31:7a:2a:d0:7f:2b:33:5e:f5:a1:c3:4e:4b:57:e8:b7:d8:f1:fc:a6\n# SHA256 Fingerprint: 58:d0:17:27:9c:d4:dc:63:ab:dd:b1:96:a6:c9:90:6c:30:c4:e0:87:83:ea:e8:c1:60:99:54:d6:93:55:59:6b\n-----BEGIN CERTIFICATE-----\nMIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0\nIFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz\nBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y\naXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG\n9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMTk1NFoXDTE5MDYy\nNjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y\nazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs\nYXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw\nOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl\ncnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDOOnHK5avIWZJV16vY\ndA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVCCSRrCl6zfN1SLUzm1NZ9\nWlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7RfZHM047QS\nv4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9v\nUJSZSWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTu\nIYEZoDJJKPTEjlbVUjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwC\nW/POuZ6lcg5Ktz885hZo+L7tdEy8W9ViH0Pd\n-----END CERTIFICATE-----\n\n# Issuer: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 3 Policy Validation Authority\n# Subject: CN=http://www.valicert.com/ O=ValiCert, Inc. OU=ValiCert Class 3 Policy Validation Authority\n# Label: \"RSA Root Certificate 1\"\n# Serial: 1\n# MD5 Fingerprint: a2:6f:53:b7:ee:40:db:4a:68:e7:fa:18:d9:10:4b:72\n# SHA1 Fingerprint: 69:bd:8c:f4:9c:d3:00:fb:59:2e:17:93:ca:55:6a:f3:ec:aa:35:fb\n# SHA256 Fingerprint: bc:23:f9:8a:31:3c:b9:2d:e3:bb:fc:3a:5a:9f:44:61:ac:39:49:4c:4a:e1:5a:9e:9d:f1:31:e9:9b:73:01:9a\n-----BEGIN CERTIFICATE-----\nMIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0\nIFZhbGlkYXRpb24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAz\nBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9y\naXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG\n9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAwMjIzM1oXDTE5MDYy\nNjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0d29y\nazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs\nYXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRw\nOi8vd3d3LnZhbGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNl\ncnQuY29tMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjmFGWHOjVsQaBalfD\ncnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td3zZxFJmP3MKS8edgkpfs\n2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89HBFx1cQqY\nJJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliE\nZwgs3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJ\nn0WuPIqpsHEzXcjFV9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/A\nPhmcGcwTTYJBtYze4D1gCCAPRX5ron+jjBXu\n-----END CERTIFICATE-----\n\n# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only\n# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only\n# Label: \"Verisign Class 3 Public Primary Certification Authority - G3\"\n# Serial: 206684696279472310254277870180966723415\n# MD5 Fingerprint: cd:68:b6:a7:c7:c4:ce:75:e0:1d:4f:57:44:61:92:09\n# SHA1 Fingerprint: 13:2d:0d:45:53:4b:69:97:cd:b2:d5:c3:39:e2:55:76:60:9b:5c:c6\n# SHA256 Fingerprint: eb:04:cf:5e:b1:f3:9a:fa:76:2f:2b:b1:20:f2:96:cb:a5:20:c1:b9:7d:b1:58:95:65:b8:1c:b9:a1:7b:72:44\n-----BEGIN CERTIFICATE-----\nMIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw\nCQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl\ncmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu\nLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT\naWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp\ndHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD\nVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT\naWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ\nbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu\nIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg\nLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b\nN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t\nKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu\nkxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm\nCC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ\nXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu\nimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te\n2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe\nDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC\n/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p\nF4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt\nTxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ==\n-----END CERTIFICATE-----\n\n# Issuer: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only\n# Subject: CN=VeriSign Class 4 Public Primary Certification Authority - G3 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 1999 VeriSign, Inc. - For authorized use only\n# Label: \"Verisign Class 4 Public Primary Certification Authority - G3\"\n# Serial: 314531972711909413743075096039378935511\n# MD5 Fingerprint: db:c8:f2:27:2e:b1:ea:6a:29:23:5d:fe:56:3e:33:df\n# SHA1 Fingerprint: c8:ec:8c:87:92:69:cb:4b:ab:39:e9:8d:7e:57:67:f3:14:95:73:9d\n# SHA256 Fingerprint: e3:89:36:0d:0f:db:ae:b3:d2:50:58:4b:47:30:31:4e:22:2f:39:c1:56:a0:20:14:4e:8d:96:05:61:79:15:06\n-----BEGIN CERTIFICATE-----\nMIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQsw\nCQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl\ncmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu\nLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT\naWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp\ndHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD\nVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT\naWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ\nbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu\nIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg\nLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK3LpRFpxlmr8Y+1\nGQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaStBO3IFsJ\n+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0Gbd\nU6LM8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLm\nNxdLMEYH5IBtptiWLugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XY\nufTsgsbSPZUd5cBPhMnZo0QoBmrXRazwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/\nky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAj/ola09b5KROJ1WrIhVZPMq1\nCtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXttmhwwjIDLk5Mq\ng6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm\nfjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c\n2NU8Qh0XwRJdRTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/\nbLvSHgCwIe34QWKCudiyxLtGUPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg==\n-----END CERTIFICATE-----\n\n# Issuer: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited\n# Subject: CN=Entrust.net Secure Server Certification Authority O=Entrust.net OU=www.entrust.net/CPS incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited\n# Label: \"Entrust.net Secure Server CA\"\n# Serial: 927650371\n# MD5 Fingerprint: df:f2:80:73:cc:f1:e6:61:73:fc:f5:42:e9:c5:7c:ee\n# SHA1 Fingerprint: 99:a6:9b:e6:1a:fe:88:6b:4d:2b:82:00:7c:b8:54:fc:31:7e:15:39\n# SHA256 Fingerprint: 62:f2:40:27:8c:56:4c:4d:d8:bf:7d:9d:4f:6f:36:6e:a8:94:d2:2f:5f:34:d9:89:a9:83:ac:ec:2f:ff:ed:50\n-----BEGIN CERTIFICATE-----\nMIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMC\nVVMxFDASBgNVBAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5u\nZXQvQ1BTIGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMc\nKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDE6MDgGA1UEAxMxRW50cnVzdC5u\nZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw05OTA1\nMjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIGA1UE\nChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5j\nb3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF\nbnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUg\nU2VydmVyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUA\nA4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQaO2f55M28Qpku0f1BBc/\nI0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5gXpa0zf3\nwkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OC\nAdcwggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHb\noIHYpIHVMIHSMQswCQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5\nBgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1p\ndHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1pdGVk\nMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRp\nb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu\ndHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0\nMFqBDzIwMTkwNTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8Bdi\nE1U9s/8KAGv7UISX8+1i0BowHQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAa\nMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EABAwwChsEVjQuMAMCBJAwDQYJKoZI\nhvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyNEwr75Ji174z4xRAN\n95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9n9cd\n2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI=\n-----END CERTIFICATE-----\n\n# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited\n# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited\n# Label: \"Entrust.net Premium 2048 Secure Server CA\"\n# Serial: 946059622\n# MD5 Fingerprint: ba:21:ea:20:d6:dd:db:8f:c1:57:8b:40:ad:a1:fc:fc\n# SHA1 Fingerprint: 80:1d:62:d0:7b:44:9d:5c:5c:03:5c:98:ea:61:fa:44:3c:2a:58:fe\n# SHA256 Fingerprint: d1:c3:39:ea:27:84:eb:87:0f:93:4f:c5:63:4e:4a:a9:ad:55:05:01:64:01:f2:64:65:d3:7a:57:46:63:35:9f\n-----BEGIN CERTIFICATE-----\nMIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML\nRW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp\nbmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5\nIEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp\nZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0xOTEy\nMjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3\nLmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp\nYWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG\nA1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq\nK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe\nsYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX\nMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT\nXTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/\nHoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH\n4QIDAQABo3QwcjARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGA\nvtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdERgL7YibkIozH5oSQJFrlwMB0G\nCSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEA\nWUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo\noPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQ\nh7A6tcOdBTcSo8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18\nf3v/rxzP5tsHrV7bhZ3QKw0z2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfN\nB/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjXOP/swNlQ8C5LWK5Gb9Auw2DaclVy\nvUxFnmG6v4SBkgPR0ml8xQ==\n-----END CERTIFICATE-----\n\n# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust\n# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust\n# Label: \"Baltimore CyberTrust Root\"\n# Serial: 33554617\n# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4\n# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74\n# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb\n-----BEGIN CERTIFICATE-----\nMIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ\nRTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD\nVQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX\nDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y\nZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy\nVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr\nmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr\nIZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK\nmpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu\nXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy\ndc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye\njl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1\nBE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3\nDQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92\n9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx\njkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0\nEpn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz\nksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS\nR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp\n-----END CERTIFICATE-----\n\n# Issuer: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc.\n# Subject: CN=Equifax Secure Global eBusiness CA-1 O=Equifax Secure Inc.\n# Label: \"Equifax Secure Global eBusiness CA\"\n# Serial: 1\n# MD5 Fingerprint: 8f:5d:77:06:27:c4:98:3c:5b:93:78:e7:d7:7d:9b:cc\n# SHA1 Fingerprint: 7e:78:4a:10:1c:82:65:cc:2d:e1:f1:6d:47:b4:40:ca:d9:0a:19:45\n# SHA256 Fingerprint: 5f:0b:62:ea:b5:e3:53:ea:65:21:65:16:58:fb:b6:53:59:f4:43:28:0a:4a:fb:d1:04:d7:7d:10:f9:f0:4c:07\n-----BEGIN CERTIFICATE-----\nMIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEc\nMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBT\nZWN1cmUgR2xvYmFsIGVCdXNpbmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIw\nMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0VxdWlmYXggU2Vj\ndXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEdsb2JhbCBlQnVzaW5l\nc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRVPEnC\nUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc\n58O/gGzNqfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/\no5brhTMhHD4ePmBudpxnhcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAH\nMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUvqigdHJQa0S3ySPY+6j/s1dr\naGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hsMA0GCSqGSIb3DQEBBAUA\nA4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okENI7SS+RkA\nZ70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv\n8qIYNMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV\n-----END CERTIFICATE-----\n\n# Issuer: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc.\n# Subject: CN=Equifax Secure eBusiness CA-1 O=Equifax Secure Inc.\n# Label: \"Equifax Secure eBusiness CA 1\"\n# Serial: 4\n# MD5 Fingerprint: 64:9c:ef:2e:44:fc:c6:8f:52:07:d0:51:73:8f:cb:3d\n# SHA1 Fingerprint: da:40:18:8b:91:89:a3:ed:ee:ae:da:97:fe:2f:9d:f5:b7:d1:8a:41\n# SHA256 Fingerprint: cf:56:ff:46:a4:a1:86:10:9d:d9:65:84:b5:ee:b5:8a:51:0c:42:75:b0:e5:f9:4f:40:bb:ae:86:5e:19:f6:73\n-----BEGIN CERTIFICATE-----\nMIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEc\nMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBT\nZWN1cmUgZUJ1c2luZXNzIENBLTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQw\nMDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMTRXF1aWZheCBTZWN1cmUgSW5j\nLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENBLTEwgZ8wDQYJ\nKoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ1MRo\nRvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBu\nWqDZQu4aIZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKw\nEnv+j6YDAgMBAAGjZjBkMBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTAD\nAQH/MB8GA1UdIwQYMBaAFEp4MlIR21kWNl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRK\neDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQFAAOBgQB1W6ibAxHm6VZM\nzfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5lSE/9dR+\nWB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN\n/Bf+KpYrtWKmpj29f5JZzVoqgrI3eQ==\n-----END CERTIFICATE-----\n\n# Issuer: O=Equifax Secure OU=Equifax Secure eBusiness CA-2\n# Subject: O=Equifax Secure OU=Equifax Secure eBusiness CA-2\n# Label: \"Equifax Secure eBusiness CA 2\"\n# Serial: 930140085\n# MD5 Fingerprint: aa:bf:bf:64:97:da:98:1d:6f:c6:08:3a:95:70:33:ca\n# SHA1 Fingerprint: 39:4f:f6:85:0b:06:be:52:e5:18:56:cc:10:e1:80:e8:82:b3:85:cc\n# SHA256 Fingerprint: 2f:27:4e:48:ab:a4:ac:7b:76:59:33:10:17:75:50:6d:c3:0e:e3:8e:f6:ac:d5:c0:49:32:cf:e0:41:23:42:20\n-----BEGIN CERTIFICATE-----\nMIIDIDCCAomgAwIBAgIEN3DPtTANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJV\nUzEXMBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2Vj\ndXJlIGVCdXNpbmVzcyBDQS0yMB4XDTk5MDYyMzEyMTQ0NVoXDTE5MDYyMzEyMTQ0\nNVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkVxdWlmYXggU2VjdXJlMSYwJAYD\nVQQLEx1FcXVpZmF4IFNlY3VyZSBlQnVzaW5lc3MgQ0EtMjCBnzANBgkqhkiG9w0B\nAQEFAAOBjQAwgYkCgYEA5Dk5kx5SBhsoNviyoynF7Y6yEb3+6+e0dMKP/wXn2Z0G\nvxLIPw7y1tEkshHe0XMJitSxLJgJDR5QRrKDpkWNYmi7hRsgcDKqQM2mll/EcTc/\nBPO3QSQ5BxoeLmFYoBIL5aXfxavqN3HMHMg3OrmXUqesxWoklE6ce8/AatbfIb0C\nAwEAAaOCAQkwggEFMHAGA1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEX\nMBUGA1UEChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2VjdXJl\nIGVCdXNpbmVzcyBDQS0yMQ0wCwYDVQQDEwRDUkwxMBoGA1UdEAQTMBGBDzIwMTkw\nNjIzMTIxNDQ1WjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUUJ4L6q9euSBIplBq\ny/3YIHqngnYwHQYDVR0OBBYEFFCeC+qvXrkgSKZQasv92CB6p4J2MAwGA1UdEwQF\nMAMBAf8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUA\nA4GBAAyGgq3oThr1jokn4jVYPSm0B482UJW/bsGe68SQsoWou7dC4A8HOd/7npCy\n0cE+U58DRLB+S/Rv5Hwf5+Kx5Lia78O9zt4LMjTZ3ijtM2vE1Nc9ElirfQkty3D1\nE4qUoSek1nDFbZS1yX2doNLGCEnZZpum0/QL3MUmV+GRMOrN\n-----END CERTIFICATE-----\n\n# Issuer: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network\n# Subject: CN=AddTrust Class 1 CA Root O=AddTrust AB OU=AddTrust TTP Network\n# Label: \"AddTrust Low-Value Services Root\"\n# Serial: 1\n# MD5 Fingerprint: 1e:42:95:02:33:92:6b:b9:5f:c0:7f:da:d6:b2:4b:fc\n# SHA1 Fingerprint: cc:ab:0e:a0:4c:23:01:d6:69:7b:dd:37:9f:cd:12:eb:24:e3:94:9d\n# SHA256 Fingerprint: 8c:72:09:27:9a:c0:4e:27:5e:16:d0:7f:d3:b7:75:e8:01:54:b5:96:80:46:e3:1f:52:dd:25:76:63:24:e9:a7\n-----BEGIN CERTIFICATE-----\nMIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEU\nMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3\nb3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMw\nMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML\nQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYD\nVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUA\nA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ul\nCDtbKRY654eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6n\ntGO0/7Gcrjyvd7ZWxbWroulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyl\ndI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1Zmne3yzxbrww2ywkEtvrNTVokMsAsJch\nPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJuiGMx1I4S+6+JNM3GOGvDC\n+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8wHQYDVR0O\nBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8E\nBTADAQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBl\nMQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFk\nZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENB\nIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxtZBsfzQ3duQH6lmM0MkhHma6X\n7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0PhiVYrqW9yTkkz\n43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY\neDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJl\npz/+0WatC7xrmYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOA\nWiFeIc9TVPC6b4nbqKqVz4vjccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk=\n-----END CERTIFICATE-----\n\n# Issuer: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network\n# Subject: CN=AddTrust External CA Root O=AddTrust AB OU=AddTrust External TTP Network\n# Label: \"AddTrust External Root\"\n# Serial: 1\n# MD5 Fingerprint: 1d:35:54:04:85:78:b0:3f:42:42:4d:bf:20:73:0a:3f\n# SHA1 Fingerprint: 02:fa:f3:e2:91:43:54:68:60:78:57:69:4d:f5:e4:5b:68:85:18:68\n# SHA256 Fingerprint: 68:7f:a4:51:38:22:78:ff:f0:c8:b1:1f:8d:43:d5:76:67:1c:6e:b2:bc:ea:b4:13:fb:83:d9:65:d0:6d:2f:f2\n-----BEGIN CERTIFICATE-----\nMIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU\nMBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs\nIFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290\nMB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux\nFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h\nbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v\ndDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt\nH7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9\nuMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX\nmk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX\na0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN\nE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0\nWicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD\nVR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0\nJvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU\ncnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx\nIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN\nAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH\nYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5\n6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC\nNr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX\nc4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a\nmnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ=\n-----END CERTIFICATE-----\n\n# Issuer: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network\n# Subject: CN=AddTrust Public CA Root O=AddTrust AB OU=AddTrust TTP Network\n# Label: \"AddTrust Public Services Root\"\n# Serial: 1\n# MD5 Fingerprint: c1:62:3e:23:c5:82:73:9c:03:59:4b:2b:e9:77:49:7f\n# SHA1 Fingerprint: 2a:b6:28:48:5e:78:fb:f3:ad:9e:79:10:dd:6b:df:99:72:2c:96:e5\n# SHA256 Fingerprint: 07:91:ca:07:49:b2:07:82:aa:d3:c7:d7:bd:0c:df:c9:48:58:35:84:3e:b2:d7:99:60:09:ce:43:ab:6c:69:27\n-----BEGIN CERTIFICATE-----\nMIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEU\nMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3\nb3JrMSAwHgYDVQQDExdBZGRUcnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAx\nMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtB\nZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIDAeBgNV\nBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOC\nAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV\n6tsfSlbunyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nX\nGCwwfQ56HmIexkvA/X1id9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnP\ndzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSGAa2Il+tmzV7R/9x98oTaunet3IAIx6eH\n1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAwHM+A+WD+eeSI8t0A65RF\n62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0GA1UdDgQW\nBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUw\nAwEB/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDEL\nMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRU\ncnVzdCBUVFAgTmV0d29yazEgMB4GA1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJv\nb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4JNojVhaTdt02KLmuG7jD8WS6\nIBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL+YPoRNWyQSW/\niHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao\nGEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh\n4SINhwBk/ox9Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQm\nXiLsks3/QppEIW1cxeMiHV9HEufOX1362KqxMy3ZdvJOOjMMK7MtkAY=\n-----END CERTIFICATE-----\n\n# Issuer: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network\n# Subject: CN=AddTrust Qualified CA Root O=AddTrust AB OU=AddTrust TTP Network\n# Label: \"AddTrust Qualified Certificates Root\"\n# Serial: 1\n# MD5 Fingerprint: 27:ec:39:47:cd:da:5a:af:e2:9a:01:65:21:a9:4c:bb\n# SHA1 Fingerprint: 4d:23:78:ec:91:95:39:b5:00:7f:75:8f:03:3b:21:1e:c5:4d:8b:cf\n# SHA256 Fingerprint: 80:95:21:08:05:db:4b:bc:35:5e:44:28:d8:fd:6e:c2:cd:e3:ab:5f:b9:7a:99:42:98:8e:b8:f4:dc:d0:60:16\n-----BEGIN CERTIFICATE-----\nMIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEU\nMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3\nb3JrMSMwIQYDVQQDExpBZGRUcnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1\nMzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcxCzAJBgNVBAYTAlNFMRQwEgYDVQQK\nEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5ldHdvcmsxIzAh\nBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG9w0B\nAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwq\nxBb/4Oxx64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G\n87B4pfYOQnrjfxvM0PC3KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i\n2O+tCBGaKZnhqkRFmhJePp1tUvznoD1oL/BLcHwTOK28FSXx1s6rosAx1i+f4P8U\nWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GRwVY18BTcZTYJbqukB8c1\n0cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HUMIHRMB0G\nA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0T\nAQH/BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6Fr\npGkwZzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQL\nExRBZGRUcnVzdCBUVFAgTmV0d29yazEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlm\naWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBABmrder4i2VhlRO6aQTv\nhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxGGuoYQ992zPlm\nhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X\ndgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3\nP6CxB9bpT9zeRXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9Y\niQBCYz95OdBEsIJuQRno3eDBiFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5no\nxqE=\n-----END CERTIFICATE-----\n\n# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.\n# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.\n# Label: \"Entrust Root Certification Authority\"\n# Serial: 1164660820\n# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4\n# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9\n# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c\n-----BEGIN CERTIFICATE-----\nMIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC\nVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0\nLm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW\nKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl\ncnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw\nNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw\nNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy\nZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV\nBAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ\nKoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo\nNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4\n4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9\nKlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI\nrb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi\n94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB\nsDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi\ngA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo\nkORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE\nvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA\nA4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t\nO1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua\nAGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP\n9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/\neu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m\n0vdXcDazv/wor3ElhVsT/h5/WrQ8\n-----END CERTIFICATE-----\n\n# Issuer: CN=GeoTrust Global CA O=GeoTrust Inc.\n# Subject: CN=GeoTrust Global CA O=GeoTrust Inc.\n# Label: \"GeoTrust Global CA\"\n# Serial: 144470\n# MD5 Fingerprint: f7:75:ab:29:fb:51:4e:b7:77:5e:ff:05:3c:99:8e:f5\n# SHA1 Fingerprint: de:28:f4:a4:ff:e5:b9:2f:a3:c5:03:d1:a3:49:a7:f9:96:2a:82:12\n# SHA256 Fingerprint: ff:85:6a:2d:25:1d:cd:88:d3:66:56:f4:50:12:67:98:cf:ab:aa:de:40:79:9c:72:2d:e4:d2:b5:db:36:a7:3a\n-----BEGIN CERTIFICATE-----\nMIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT\nMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i\nYWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG\nEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg\nR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9\n9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq\nfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv\niS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU\n1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+\nbw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW\nMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA\nephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l\nuMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn\nZ57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS\ntQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF\nPseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un\nhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV\n5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw==\n-----END CERTIFICATE-----\n\n# Issuer: CN=GeoTrust Global CA 2 O=GeoTrust Inc.\n# Subject: CN=GeoTrust Global CA 2 O=GeoTrust Inc.\n# Label: \"GeoTrust Global CA 2\"\n# Serial: 1\n# MD5 Fingerprint: 0e:40:a7:6c:de:03:5d:8f:d1:0f:e4:d1:8d:f9:6c:a9\n# SHA1 Fingerprint: a9:e9:78:08:14:37:58:88:f2:05:19:b0:6d:2b:0d:2b:60:16:90:7d\n# SHA256 Fingerprint: ca:2d:82:a0:86:77:07:2f:8a:b6:76:4f:f0:35:67:6c:fe:3e:5e:32:5e:01:21:72:df:3f:92:09:6d:b7:9b:85\n-----BEGIN CERTIFICATE-----\nMIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEW\nMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFs\nIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQG\nEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3Qg\nR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDvPE1A\nPRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/NTL8\nY2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hL\nTytCOb1kLUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL\n5mkWRxHCJ1kDs6ZgwiFAVvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7\nS4wMcoKK+xfNAGw6EzywhIdLFnopsk/bHdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe\n2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE\nFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNHK266ZUap\nEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6td\nEPx7srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv\n/NgdRN3ggX+d6YvhZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywN\nA0ZF66D0f0hExghAzN4bcLUprbqLOzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0\nabby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkCx1YAzUm5s2x7UwQa4qjJqhIF\nI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqFH4z1Ir+rzoPz\n4iIprn2DQKi6bA==\n-----END CERTIFICATE-----\n\n# Issuer: CN=GeoTrust Universal CA O=GeoTrust Inc.\n# Subject: CN=GeoTrust Universal CA O=GeoTrust Inc.\n# Label: \"GeoTrust Universal CA\"\n# Serial: 1\n# MD5 Fingerprint: 92:65:58:8b:a2:1a:31:72:73:68:5c:b4:a5:7a:07:48\n# SHA1 Fingerprint: e6:21:f3:35:43:79:05:9a:4b:68:30:9d:8a:2f:74:22:15:87:ec:79\n# SHA256 Fingerprint: a0:45:9b:9f:63:b2:25:59:f5:fa:5d:4c:6d:b3:f9:f7:2f:f1:93:42:03:35:78:f0:73:bf:1d:1b:46:cb:b9:12\n-----BEGIN CERTIFICATE-----\nMIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEW\nMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVy\nc2FsIENBMB4XDTA0MDMwNDA1MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UE\nBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xHjAcBgNVBAMTFUdlb1RydXN0\nIFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKYV\nVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9tJPi8\ncQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTT\nQjOgNB0eRXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFh\nF7em6fgemdtzbvQKoiFs7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2v\nc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d8Lsrlh/eezJS/R27tQahsiFepdaVaH/w\nmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7VqnJNk22CDtucvc+081xd\nVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3CgaRr0BHdCX\nteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZ\nf9hBZ3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfRe\nBi9Fi1jUIxaS5BZuKGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+\nnhutxx9z3SxPGWX9f5NAEC7S8O08ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB\n/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0XG0D08DYj3rWMB8GA1UdIwQY\nMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG\n9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc\naanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fX\nIwjhmF7DWgh2qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzyn\nANXH/KttgCJwpQzgXQQpAvvLoJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0z\nuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsKxr2EoyNB3tZ3b4XUhRxQ4K5RirqN\nPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxFKyDuSN/n3QmOGKja\nQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2DFKW\nkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9\nER/frslKxfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQt\nDF4JbAiXfKM9fJP/P6EUp8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/Sfuvm\nbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vIP/rmMuGNG2+k5o7Y+SlIis5z/iw=\n-----END CERTIFICATE-----\n\n# Issuer: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.\n# Subject: CN=GeoTrust Universal CA 2 O=GeoTrust Inc.\n# Label: \"GeoTrust Universal CA 2\"\n# Serial: 1\n# MD5 Fingerprint: 34:fc:b8:d0:36:db:9e:14:b3:c2:f2:db:8f:e4:94:c7\n# SHA1 Fingerprint: 37:9a:19:7b:41:85:45:35:0c:a6:03:69:f3:3c:2e:af:47:4f:20:79\n# SHA256 Fingerprint: a0:23:4f:3b:c8:52:7c:a5:62:8e:ec:81:ad:5d:69:89:5d:a5:68:0d:c9:1d:1c:b8:47:7f:33:f8:78:b9:5b:0b\n-----BEGIN CERTIFICATE-----\nMIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEW\nMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVy\nc2FsIENBIDIwHhcNMDQwMzA0MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYD\nVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1\nc3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC\nAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0DE81\nWzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUG\nFF+3Qs17j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdq\nXbboW0W63MOhBW9Wjo8QJqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxL\nse4YuU6W3Nx2/zu+z18DwPw76L5GG//aQMJS9/7jOvdqdzXQ2o3rXhhqMcceujwb\nKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2WP0+GfPtDCapkzj4T8Fd\nIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP20gaXT73\ny/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRt\nhAAnZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgoc\nQIgfksILAAX/8sgCSqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4\nLt1ZrtmhN79UNdxzMk+MBB4zsslG8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNV\nHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAfBgNV\nHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8EBAMCAYYwDQYJ\nKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z\ndXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQ\nL1EuxBRa3ugZ4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgr\nFg5fNuH8KrUwJM/gYwx7WBr+mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSo\nag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpqA1Ihn0CoZ1Dy81of398j9tx4TuaY\nT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpgY+RdM4kX2TGq2tbz\nGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiPpm8m\n1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJV\nOCiNUW7dFGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH\n6aLcr34YEoP9VhdBLtUpgn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwX\nQMAJKOSLakhT2+zNVVXxxvjpoixMptEmX36vWkzaH6byHCx+rgIW0lbQL1dTR+iS\n-----END CERTIFICATE-----\n\n# Issuer: CN=America Online Root Certification Authority 1 O=America Online Inc.\n# Subject: CN=America Online Root Certification Authority 1 O=America Online Inc.\n# Label: \"America Online Root Certification Authority 1\"\n# Serial: 1\n# MD5 Fingerprint: 14:f1:08:ad:9d:fa:64:e2:89:e7:1c:cf:a8:ad:7d:5e\n# SHA1 Fingerprint: 39:21:c1:15:c1:5d:0e:ca:5c:cb:5b:c4:f0:7d:21:d8:05:0b:56:6a\n# SHA256 Fingerprint: 77:40:73:12:c6:3a:15:3d:5b:c0:0b:4e:51:75:9c:df:da:c2:37:dc:2a:33:b6:79:46:e9:8e:9b:fa:68:0a:e3\n-----BEGIN CERTIFICATE-----\nMIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc\nMBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP\nbmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2\nMDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft\nZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg\nQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQADggEP\nADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lk\nhsmj76CGv2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym\n1BW32J/X3HGrfpq/m44zDyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsW\nOqMFf6Dch9Wc/HKpoH145LcxVR5lu9RhsCFg7RAycsWSJR74kEoYeEfffjA3PlAb\n2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP8c9GsEsPPt2IYriMqQko\nO3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0TAQH/BAUw\nAwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAU\nAK3Zo/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB\nBQUAA4IBAQB8itEfGDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkF\nZu90821fnZmv9ov761KyBZiibyrFVL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAb\nLjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft3OJvx8Fi8eNy1gTIdGcL+oir\noQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43gKd8hdIaC2y+C\nMMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds\nsPmuujz9dLQR6FgNgLzTqIA6me11zEZ7\n-----END CERTIFICATE-----\n\n# Issuer: CN=America Online Root Certification Authority 2 O=America Online Inc.\n# Subject: CN=America Online Root Certification Authority 2 O=America Online Inc.\n# Label: \"America Online Root Certification Authority 2\"\n# Serial: 1\n# MD5 Fingerprint: d6:ed:3c:ca:e2:66:0f:af:10:43:0d:77:9b:04:09:bf\n# SHA1 Fingerprint: 85:b5:ff:67:9b:0c:79:96:1f:c8:6e:44:22:00:46:13:db:17:92:84\n# SHA256 Fingerprint: 7d:3b:46:5a:60:14:e5:26:c0:af:fc:ee:21:27:d2:31:17:27:ad:81:1c:26:84:2d:00:6a:f3:73:06:cc:80:bd\n-----BEGIN CERTIFICATE-----\nMIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEc\nMBoGA1UEChMTQW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBP\nbmxpbmUgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2\nMDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkGA1UEBhMCVVMxHDAaBgNVBAoTE0Ft\nZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2EgT25saW5lIFJvb3Qg\nQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIP\nADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC\n206B89enfHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFci\nKtZHgVdEglZTvYYUAQv8f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2\nJxhP7JsowtS013wMPgwr38oE18aO6lhOqKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9\nBoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JNRvCAOVIyD+OEsnpD8l7e\nXz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0gBe4lL8B\nPeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67\nXnfn6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEq\nZ8A9W6Wa6897GqidFEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZ\no2C7HK2JNDJiuEMhBnIMoVxtRsX6Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3\n+L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnjB453cMor9H124HhnAgMBAAGj\nYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3OpaaEg5+31IqEj\nFNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE\nAwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmn\nxPBUlgtk87FYT15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2\nLHo1YGwRgJfMqZJS5ivmae2p+DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzccc\nobGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXgJXUjhx5c3LqdsKyzadsXg8n33gy8\nCNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//ZoyzH1kUQ7rVyZ2OuMe\nIjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgOZtMA\nDjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2F\nAjgQ5ANh1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUX\nOm/9riW99XJZZLF0KjhfGEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPb\nAZO1XB4Y3WRayhgoPmMEEf0cjQAPuDffZ4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQl\nZvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuPcX/9XhmgD0uRuMRUvAaw\nRY8mkaKO/qk=\n-----END CERTIFICATE-----\n\n# Issuer: CN=AAA Certificate Services O=Comodo CA Limited\n# Subject: CN=AAA Certificate Services O=Comodo CA Limited\n# Label: \"Comodo AAA Services root\"\n# Serial: 1\n# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0\n# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49\n# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4\n-----BEGIN CERTIFICATE-----\nMIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb\nMBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow\nGAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj\nYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL\nMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE\nBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM\nGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP\nADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua\nBtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe\n3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4\nYgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR\nrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm\nez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU\noBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF\nMAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v\nQUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t\nb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF\nAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q\nGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz\nRt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2\nG9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi\nl2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3\nsmPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==\n-----END CERTIFICATE-----\n\n# Issuer: CN=Secure Certificate Services O=Comodo CA Limited\n# Subject: CN=Secure Certificate Services O=Comodo CA Limited\n# Label: \"Comodo Secure Services root\"\n# Serial: 1\n# MD5 Fingerprint: d3:d9:bd:ae:9f:ac:67:24:b3:c8:1b:52:e1:b9:a9:bd\n# SHA1 Fingerprint: 4a:65:d5:f4:1d:ef:39:b8:b8:90:4a:4a:d3:64:81:33:cf:c7:a1:d1\n# SHA256 Fingerprint: bd:81:ce:3b:4f:65:91:d1:1a:67:b5:fc:7a:47:fd:ef:25:52:1b:f9:aa:4e:18:b9:e3:df:2e:34:a7:80:3b:e8\n-----BEGIN CERTIFICATE-----\nMIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEb\nMBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow\nGAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRp\nZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVow\nfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G\nA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAiBgNV\nBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEB\nBQADggEPADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPM\ncm3ye5drswfxdySRXyWP9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3S\nHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstcrbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996\nCF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rCoznl2yY4rYsK7hljxxwk\n3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3Vp6ea5EQz\n6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNV\nHQ4EFgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1Ud\nEwEB/wQFMAMBAf8wgYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2Rv\nY2EuY29tL1NlY3VyZUNlcnRpZmljYXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRw\nOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmww\nDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm4J4oqF7Tt/Q0\n5qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj\nZ55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtI\ngKvcnDe4IRRLDXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJ\naD61JlfutuC23bkpgHl9j6PwpCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDl\nizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1HRR3B7Hzs/Sk=\n-----END CERTIFICATE-----\n\n# Issuer: CN=Trusted Certificate Services O=Comodo CA Limited\n# Subject: CN=Trusted Certificate Services O=Comodo CA Limited\n# Label: \"Comodo Trusted Services root\"\n# Serial: 1\n# MD5 Fingerprint: 91:1b:3f:6e:cd:9e:ab:ee:07:fe:1f:71:d2:b3:61:27\n# SHA1 Fingerprint: e1:9f:e3:0e:8b:84:60:9e:80:9b:17:0d:72:a8:c5:ba:6e:14:09:bd\n# SHA256 Fingerprint: 3f:06:e5:56:81:d4:96:f5:be:16:9e:b5:38:9f:9f:2b:8f:f6:1e:17:08:df:68:81:72:48:49:cd:5d:27:cb:69\n-----BEGIN CERTIFICATE-----\nMIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEb\nMBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow\nGAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0\naWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEwMDAwMDBaFw0yODEyMzEyMzU5NTla\nMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1hbmNoZXN0ZXIxEDAO\nBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUwIwYD\nVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0B\nAQEFAAOCAQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWW\nfnJSoBVC21ndZHoa0Lh73TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMt\nTGo87IvDktJTdyR0nAducPy9C1t2ul/y/9c3S0pgePfw+spwtOpZqqPOSC+pw7IL\nfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6juljatEPmsbS9Is6FARW\n1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsSivnkBbA7\nkUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0G\nA1UdDgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYD\nVR0TAQH/BAUwAwEB/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21v\nZG9jYS5jb20vVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRo\ndHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENlcnRpZmljYXRlU2VydmljZXMu\nY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8NtwuleGFTQQuS9/\nHrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32\npSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxIS\njBc/lDb+XbDABHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+\nxqFx7D+gIIxmOom0jtTYsU0lR+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/Atyjcn\ndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O9y5Xt5hwXsjEeLBi\n-----END CERTIFICATE-----\n\n# Issuer: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com\n# Subject: CN=UTN - DATACorp SGC O=The USERTRUST Network OU=http://www.usertrust.com\n# Label: \"UTN DATACorp SGC Root CA\"\n# Serial: 91374294542884689855167577680241077609\n# MD5 Fingerprint: b3:a5:3e:77:21:6d:ac:4a:c0:c9:fb:d5:41:3d:ca:06\n# SHA1 Fingerprint: 58:11:9f:0e:12:82:87:ea:50:fd:d9:87:45:6f:4f:78:dc:fa:d6:d4\n# SHA256 Fingerprint: 85:fb:2f:91:dd:12:27:5a:01:45:b6:36:53:4f:84:02:4a:d6:8b:69:b8:ee:88:68:4f:f7:11:37:58:05:b3:48\n-----BEGIN CERTIFICATE-----\nMIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB\nkzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug\nQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho\ndHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw\nIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG\nEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD\nVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu\ndXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN\nBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6\nE5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ\nD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK\n4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq\nlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW\nbfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB\no4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT\nMtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js\nLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr\nBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB\nAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft\nGzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj\nj98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH\nKWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv\n2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3\nmfnGV/TJVTl4uix5yaaIK/QI\n-----END CERTIFICATE-----\n\n# Issuer: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com\n# Subject: CN=UTN-USERFirst-Hardware O=The USERTRUST Network OU=http://www.usertrust.com\n# Label: \"UTN USERFirst Hardware Root CA\"\n# Serial: 91374294542884704022267039221184531197\n# MD5 Fingerprint: 4c:56:41:e5:0d:bb:2b:e8:ca:a3:ed:18:08:ad:43:39\n# SHA1 Fingerprint: 04:83:ed:33:99:ac:36:08:05:87:22:ed:bc:5e:46:00:e3:be:f9:d7\n# SHA256 Fingerprint: 6e:a5:47:41:d0:04:66:7e:ed:1b:48:16:63:4a:a3:a7:9e:6e:4b:96:95:0f:82:79:da:fc:8d:9b:d8:81:21:37\n-----BEGIN CERTIFICATE-----\nMIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB\nlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug\nQ2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho\ndHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt\nSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG\nA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe\nMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v\nd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh\ncmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn\n0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ\nM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a\nMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd\noI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI\nDsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy\noUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD\nVR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0\ndHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy\nbDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF\nBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM\n//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli\nCE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE\nCJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t\n3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS\nKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA==\n-----END CERTIFICATE-----\n\n# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com\n# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com\n# Label: \"XRamp Global CA Root\"\n# Serial: 107108908803651509692980124233745014957\n# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1\n# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6\n# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2\n-----BEGIN CERTIFICATE-----\nMIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB\ngjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk\nMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY\nUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx\nNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3\ndy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy\ndmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB\ndXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6\n38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP\nKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q\nDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4\nqEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa\nJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi\nPvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P\nBAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs\njVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0\neS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD\nggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR\nvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt\nqZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa\nIR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy\ni6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ\nO+7ETPTsJ3xCwnR8gooJybQDJbw=\n-----END CERTIFICATE-----\n\n# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority\n# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority\n# Label: \"Go Daddy Class 2 CA\"\n# Serial: 0\n# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67\n# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4\n# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4\n-----BEGIN CERTIFICATE-----\nMIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh\nMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE\nYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3\nMDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo\nZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg\nMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN\nADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA\nPVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w\nwdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi\nEqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY\navx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+\nYihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE\nsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h\n/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5\nIEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj\nYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD\nggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy\nOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P\nTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ\nHmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER\ndEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf\nReYNnyicsbkqWletNw+vHX/bvZ8=\n-----END CERTIFICATE-----\n\n# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority\n# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority\n# Label: \"Starfield Class 2 CA\"\n# Serial: 0\n# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24\n# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a\n# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58\n-----BEGIN CERTIFICATE-----\nMIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl\nMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp\nU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw\nNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE\nChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp\nZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3\nDQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf\n8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN\n+lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0\nX9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa\nK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA\n1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G\nA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR\nzt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0\nYXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD\nbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w\nDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3\nL7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D\neruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl\nxy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp\nVSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY\nWQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=\n-----END CERTIFICATE-----\n\n# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing\n# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing\n# Label: \"StartCom Certification Authority\"\n# Serial: 1\n# MD5 Fingerprint: 22:4d:8f:8a:fc:f7:35:c2:bb:57:34:90:7b:8b:22:16\n# SHA1 Fingerprint: 3e:2b:f7:f2:03:1b:96:f3:8c:e6:c4:d8:a8:5d:3e:2d:58:47:6a:0f\n# SHA256 Fingerprint: c7:66:a9:be:f2:d4:07:1c:86:3a:31:aa:49:20:e8:13:b2:d1:98:60:8c:b7:b7:cf:e2:11:43:b8:36:df:09:ea\n-----BEGIN CERTIFICATE-----\nMIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW\nMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg\nQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh\ndGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9\nMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi\nU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh\ncnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA\nA4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk\npMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf\nOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C\nJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT\nKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi\nHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM\nAv+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w\n+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+\nGkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3\nZzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B\n26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID\nAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE\nFE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j\nZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js\nLnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM\nBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0\nY29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy\ndGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh\ncnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh\nYmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg\ndGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp\nbGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ\nYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT\nTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ\n9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8\njhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW\nFjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz\newT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1\nny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L\nEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu\nL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq\nyvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC\nO3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V\num0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh\nNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14=\n-----END CERTIFICATE-----\n\n# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com\n# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com\n# Label: \"DigiCert Assured ID Root CA\"\n# Serial: 17154717934120587862167794914071425081\n# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72\n# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43\n# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c\n-----BEGIN CERTIFICATE-----\nMIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv\nb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG\nEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl\ncnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi\nMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c\nJpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP\nmDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+\nwRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4\nVYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/\nAUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB\nAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW\nBBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun\npyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC\ndWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf\nfwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm\nNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx\nH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe\n+o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==\n-----END CERTIFICATE-----\n\n# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com\n# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com\n# Label: \"DigiCert Global Root CA\"\n# Serial: 10944719598952040374951832963794454346\n# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e\n# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36\n# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61\n-----BEGIN CERTIFICATE-----\nMIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD\nQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT\nMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j\nb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG\n9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB\nCSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97\nnh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt\n43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P\nT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4\ngdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO\nBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR\nTLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw\nDQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr\nhMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg\n06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF\nPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls\nYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk\nCAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=\n-----END CERTIFICATE-----\n\n# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com\n# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com\n# Label: \"DigiCert High Assurance EV Root CA\"\n# Serial: 3553400076410547919724730734378100087\n# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a\n# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25\n# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf\n-----BEGIN CERTIFICATE-----\nMIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs\nMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3\nd3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j\nZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3\nLmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug\nRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm\n+9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW\nPNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM\nxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB\nIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3\nhzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg\nEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF\nMAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA\nFLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec\nnzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z\neM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF\nhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2\nYzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe\nvEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep\n+OkuE6N36B9K\n-----END CERTIFICATE-----\n\n# Issuer: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.\n# Subject: CN=GeoTrust Primary Certification Authority O=GeoTrust Inc.\n# Label: \"GeoTrust Primary Certification Authority\"\n# Serial: 32798226551256963324313806436981982369\n# MD5 Fingerprint: 02:26:c3:01:5e:08:30:37:43:a9:d0:7d:cf:37:e6:bf\n# SHA1 Fingerprint: 32:3c:11:8e:1b:f7:b8:b6:52:54:e2:e2:10:0d:d6:02:90:37:f0:96\n# SHA256 Fingerprint: 37:d5:10:06:c5:12:ea:ab:62:64:21:f1:ec:8c:92:01:3f:c5:f8:2a:e9:8e:e5:33:eb:46:19:b8:de:b4:d0:6c\n-----BEGIN CERTIFICATE-----\nMIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY\nMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo\nR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx\nMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK\nEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp\nZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC\nAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9\nAWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA\nZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0\n7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W\nkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI\nmO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G\nA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ\nKoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1\n6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl\n4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K\noKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj\nUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU\nAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk=\n-----END CERTIFICATE-----\n\n# Issuer: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only\n# Subject: CN=thawte Primary Root CA O=thawte, Inc. OU=Certification Services Division/(c) 2006 thawte, Inc. - For authorized use only\n# Label: \"thawte Primary Root CA\"\n# Serial: 69529181992039203566298953787712940909\n# MD5 Fingerprint: 8c:ca:dc:0b:22:ce:f5:be:72:ac:41:1a:11:a8:d8:12\n# SHA1 Fingerprint: 91:c6:d6:ee:3e:8a:c8:63:84:e5:48:c2:99:29:5c:75:6c:81:7b:81\n# SHA256 Fingerprint: 8d:72:2f:81:a9:c1:13:c0:79:1d:f1:36:a2:96:6d:b2:6c:95:0a:97:1d:b4:6b:41:99:f4:ea:54:b7:8b:fb:9f\n-----BEGIN CERTIFICATE-----\nMIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB\nqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf\nQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw\nMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV\nBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw\nNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j\nLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG\nA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl\nIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs\nW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta\n3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk\n6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6\nSk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J\nNqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA\nMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP\nr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU\nDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz\nYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX\nxPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2\n/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/\nLHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7\njVaMaA==\n-----END CERTIFICATE-----\n\n# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only\n# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G5 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2006 VeriSign, Inc. - For authorized use only\n# Label: \"VeriSign Class 3 Public Primary Certification Authority - G5\"\n# Serial: 33037644167568058970164719475676101450\n# MD5 Fingerprint: cb:17:e4:31:67:3e:e2:09:fe:45:57:93:f3:0a:fa:1c\n# SHA1 Fingerprint: 4e:b6:d5:78:49:9b:1c:cf:5f:58:1e:ad:56:be:3d:9b:67:44:a5:e5\n# SHA256 Fingerprint: 9a:cf:ab:7e:43:c8:d8:80:d0:6b:26:2a:94:de:ee:e4:b4:65:99:89:c3:d0:ca:f1:9b:af:64:05:e4:1a:b7:df\n-----BEGIN CERTIFICATE-----\nMIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB\nyjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL\nExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp\nU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW\nZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0\naG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL\nMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW\nZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln\nbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp\nU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y\naXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1\nnmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex\nt0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz\nSdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG\nBO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+\nrCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/\nNIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E\nBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH\nBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy\naXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv\nMzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE\np6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y\n5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK\nWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ\n4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N\nhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq\n-----END CERTIFICATE-----\n\n# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited\n# Subject: CN=COMODO Certification Authority O=COMODO CA Limited\n# Label: \"COMODO Certification Authority\"\n# Serial: 104350513648249232941998508985834464573\n# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75\n# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b\n# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66\n-----BEGIN CERTIFICATE-----\nMIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB\ngTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G\nA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV\nBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw\nMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl\nYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P\nRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0\naG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3\nUcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI\n2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8\nQ5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp\n+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+\nDT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O\nnKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW\n/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g\nPKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u\nQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY\nSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv\nIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/\nRxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4\nzJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd\nBA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB\nZQ==\n-----END CERTIFICATE-----\n\n# Issuer: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.\n# Subject: CN=Network Solutions Certificate Authority O=Network Solutions L.L.C.\n# Label: \"Network Solutions Certificate Authority\"\n# Serial: 116697915152937497490437556386812487904\n# MD5 Fingerprint: d3:f3:a6:16:c0:fa:6b:1d:59:b1:2d:96:4d:0e:11:2e\n# SHA1 Fingerprint: 74:f8:a3:c3:ef:e7:b3:90:06:4b:83:90:3c:21:64:60:20:e5:df:ce\n# SHA256 Fingerprint: 15:f0:ba:00:a3:ac:7a:f3:ac:88:4c:07:2b:10:11:a0:77:bd:77:c0:97:f4:01:64:b2:f8:59:8a:bd:83:86:0c\n-----BEGIN CERTIFICATE-----\nMIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi\nMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu\nMTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp\ndHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV\nUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO\nZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz\nc7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP\nOCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl\nmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF\nBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4\nqY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw\ngZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB\nBjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu\nbmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp\ndHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8\n6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/\nh1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH\n/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv\nwKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN\npGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey\n-----END CERTIFICATE-----\n\n# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited\n# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited\n# Label: \"COMODO ECC Certification Authority\"\n# Serial: 41578283867086692638256921589707938090\n# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23\n# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11\n# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7\n-----BEGIN CERTIFICATE-----\nMIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL\nMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE\nBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT\nIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw\nMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy\nZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N\nT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv\nbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR\nFtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J\ncfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW\nBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/\nBAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm\nfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv\nGDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=\n-----END CERTIFICATE-----\n\n# Issuer: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA\n# Subject: CN=TC TrustCenter Class 2 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 2 CA\n# Label: \"TC TrustCenter Class 2 CA II\"\n# Serial: 941389028203453866782103406992443\n# MD5 Fingerprint: ce:78:33:5c:59:78:01:6e:18:ea:b9:36:a0:b9:2e:23\n# SHA1 Fingerprint: ae:50:83:ed:7c:f4:5c:bc:8f:61:c6:21:fe:68:5d:79:42:21:15:6e\n# SHA256 Fingerprint: e6:b8:f8:76:64:85:f8:07:ae:7f:8d:ac:16:70:46:1f:07:c0:a1:3e:ef:3a:1f:f7:17:53:8d:7a:ba:d3:91:b4\n-----BEGIN CERTIFICATE-----\nMIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjEL\nMAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV\nBAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0\nQ2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYwMTEyMTQzODQzWhcNMjUxMjMxMjI1\nOTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i\nSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UEAxMc\nVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD\nggEPADCCAQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jf\ntMjWQ+nEdVl//OEd+DFwIxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKg\nuNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2J\nXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQXa7pIXSSTYtZgo+U4+lK\n8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7uSNQZu+99\n5OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1Ud\nEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3\nkUrL84J6E1wIqzCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy\ndXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6\nLy93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz\nJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290\nY2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u\nTGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iS\nGNn3Bzn1LL4GdXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprt\nZjluS5TmVfwLG4t3wVMTZonZKNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8\nau0WOB9/WIFaGusyiC2y8zl3gK9etmF1KdsjTYjKUCjLhdLTEKJZbtOTVAB6okaV\nhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kPJOzHdiEoZa5X6AeI\ndUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfkvQ==\n-----END CERTIFICATE-----\n\n# Issuer: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA\n# Subject: CN=TC TrustCenter Class 3 CA II O=TC TrustCenter GmbH OU=TC TrustCenter Class 3 CA\n# Label: \"TC TrustCenter Class 3 CA II\"\n# Serial: 1506523511417715638772220530020799\n# MD5 Fingerprint: 56:5f:aa:80:61:12:17:f6:67:21:e6:2b:6d:61:56:8e\n# SHA1 Fingerprint: 80:25:ef:f4:6e:70:c8:d4:72:24:65:84:fe:40:3b:8a:8d:6a:db:f5\n# SHA256 Fingerprint: 8d:a0:84:fc:f9:9c:e0:77:22:f8:9b:32:05:93:98:06:fa:5c:b8:11:e1:c8:13:f6:a1:08:c7:d3:36:b3:40:8e\n-----BEGIN CERTIFICATE-----\nMIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjEL\nMAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNV\nBAsTGVRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0\nQ2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYwMTEyMTQ0MTU3WhcNMjUxMjMxMjI1\nOTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIgR21i\nSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UEAxMc\nVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQAD\nggEPADCCAQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJW\nHt4bNwcwIi9v8Qbxq63WyKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+Q\nVl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo6SI7dYnWRBpl8huXJh0obazovVkdKyT2\n1oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZuV3bOx4a+9P/FRQI2Alq\nukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk2ZyqBwi1\nRb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1Ud\nEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NX\nXAek0CSnwPIA1DCB7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRy\ndXN0Y2VudGVyLmRlL2NybC92Mi90Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6\nLy93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBUcnVzdENlbnRlciUyMENsYXNz\nJTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21iSCxPVT1yb290\nY2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u\nTGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlN\nirTzwppVMXzEO2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8\nTtXqluJucsG7Kv5sbviRmEb8yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6\ng0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9IJqDnxrcOfHFcqMRA/07QlIp2+gB\n95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal092Y+tTmBvTwtiBj\nS+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc5A==\n-----END CERTIFICATE-----\n\n# Issuer: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA\n# Subject: CN=TC TrustCenter Universal CA I O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA\n# Label: \"TC TrustCenter Universal CA I\"\n# Serial: 601024842042189035295619584734726\n# MD5 Fingerprint: 45:e1:a5:72:c5:a9:36:64:40:9e:f5:e4:58:84:67:8c\n# SHA1 Fingerprint: 6b:2f:34:ad:89:58:be:62:fd:b0:6b:5c:ce:bb:9d:d9:4f:4e:39:f3\n# SHA256 Fingerprint: eb:f3:c0:2a:87:89:b1:fb:7d:51:19:95:d6:63:b7:29:06:d9:13:ce:0d:5e:10:56:8a:8a:77:e2:58:61:67:e7\n-----BEGIN CERTIFICATE-----\nMIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTEL\nMAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV\nBAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1\nc3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcNMDYwMzIyMTU1NDI4WhcNMjUxMjMx\nMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1c3RDZW50ZXIg\nR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYwJAYD\nVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSR\nJJZ4Hgmgm5qVSkr1YnwCqMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3T\nfCZdzHd55yx4Oagmcw6iXSVphU9VDprvxrlE4Vc93x9UIuVvZaozhDrzznq+VZeu\njRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtwag+1m7Z3W0hZneTvWq3z\nwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9OgdwZu5GQ\nfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYD\nVR0jBBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAO\nBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0G\nCSqGSIb3DQEBBQUAA4IBAQAo0uCG1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X1\n7caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/CyvwbZ71q+s2IhtNerNXxTPqYn\n8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3ghUJGooWMNjs\nydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT\nujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/\n2TYcuiUaUj0a7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY\n-----END CERTIFICATE-----\n\n# Issuer: CN=Cybertrust Global Root O=Cybertrust, Inc\n# Subject: CN=Cybertrust Global Root O=Cybertrust, Inc\n# Label: \"Cybertrust Global Root\"\n# Serial: 4835703278459682877484360\n# MD5 Fingerprint: 72:e4:4a:87:e3:69:40:80:77:ea:bc:e3:f4:ff:f0:e1\n# SHA1 Fingerprint: 5f:43:e5:b1:bf:f8:78:8c:ac:1c:c7:ca:4a:9a:c6:22:2b:cc:34:c6\n# SHA256 Fingerprint: 96:0a:df:00:63:e9:63:56:75:0c:29:65:dd:0a:08:67:da:0b:9c:bd:6e:77:71:4a:ea:fb:23:49:ab:39:3d:a3\n-----BEGIN CERTIFICATE-----\nMIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYG\nA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2Jh\nbCBSb290MB4XDTA2MTIxNTA4MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UE\nChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBS\nb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+Mi8vRRQZhP/8NN5\n7CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW0ozS\nJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2y\nHLtgwEZLAfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iP\nt3sMpTjr3kfb1V05/Iin89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNz\nFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAY\nXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/\nMB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2MDSgMqAw\nhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3Js\nMB8GA1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUA\nA4IBAQBW7wojoFROlZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMj\nWqd8BfP9IjsO0QbE2zZMcwSO5bAi5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUx\nXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2hO0j9n0Hq0V+09+zv+mKts2o\nomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+TX3EJIrduPuoc\nA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW\nWL1WMRJOEcgh4LMRkWXbtKaIOM5V\n-----END CERTIFICATE-----\n\n# Issuer: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only\n# Subject: CN=GeoTrust Primary Certification Authority - G3 O=GeoTrust Inc. OU=(c) 2008 GeoTrust Inc. - For authorized use only\n# Label: \"GeoTrust Primary Certification Authority - G3\"\n# Serial: 28809105769928564313984085209975885599\n# MD5 Fingerprint: b5:e8:34:36:c9:10:44:58:48:70:6d:2e:83:d4:b8:05\n# SHA1 Fingerprint: 03:9e:ed:b8:0b:e7:a0:3c:69:53:89:3b:20:d2:d9:32:3a:4c:2a:fd\n# SHA256 Fingerprint: b4:78:b8:12:25:0d:f8:78:63:5c:2a:a7:ec:7d:15:5e:aa:62:5e:e8:29:16:e2:cd:29:43:61:88:6c:d1:fb:d4\n-----BEGIN CERTIFICATE-----\nMIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB\nmDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT\nMChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s\neTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv\ncml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ\nBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg\nMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0\nBgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg\nLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz\n+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm\nhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn\n5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W\nJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL\nDmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC\nhuOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw\nHQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB\nAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB\nzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN\nkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD\nAWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH\nSJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G\nspki4cErx5z481+oghLrGREt\n-----END CERTIFICATE-----\n\n# Issuer: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only\n# Subject: CN=thawte Primary Root CA - G2 O=thawte, Inc. OU=(c) 2007 thawte, Inc. - For authorized use only\n# Label: \"thawte Primary Root CA - G2\"\n# Serial: 71758320672825410020661621085256472406\n# MD5 Fingerprint: 74:9d:ea:60:24:c4:fd:22:53:3e:cc:3a:72:d9:29:4f\n# SHA1 Fingerprint: aa:db:bc:22:23:8f:c4:01:a1:27:bb:38:dd:f4:1d:db:08:9e:f0:12\n# SHA256 Fingerprint: a4:31:0d:50:af:18:a6:44:71:90:37:2a:86:af:af:8b:95:1f:fb:43:1d:83:7f:1e:56:88:b4:59:71:ed:15:57\n-----BEGIN CERTIFICATE-----\nMIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL\nMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp\nIDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi\nBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw\nMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh\nd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig\nYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v\ndCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/\nBebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6\npapu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E\nBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K\nDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3\nKMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox\nXZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg==\n-----END CERTIFICATE-----\n\n# Issuer: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only\n# Subject: CN=thawte Primary Root CA - G3 O=thawte, Inc. OU=Certification Services Division/(c) 2008 thawte, Inc. - For authorized use only\n# Label: \"thawte Primary Root CA - G3\"\n# Serial: 127614157056681299805556476275995414779\n# MD5 Fingerprint: fb:1b:5d:43:8a:94:cd:44:c6:76:f2:43:4b:47:e7:31\n# SHA1 Fingerprint: f1:8b:53:8d:1b:e9:03:b6:a6:f0:56:43:5b:17:15:89:ca:f3:6b:f2\n# SHA256 Fingerprint: 4b:03:f4:58:07:ad:70:f2:1b:fc:2c:ae:71:c9:fd:e4:60:4c:06:4c:f5:ff:b6:86:ba:e5:db:aa:d7:fd:d3:4c\n-----BEGIN CERTIFICATE-----\nMIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB\nrjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf\nQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw\nMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV\nBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa\nFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl\nLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u\nMTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl\nZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm\ngcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8\nYZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf\nb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9\n9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S\nzhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk\nOQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV\nHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA\n2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW\noCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu\nt8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c\nKUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM\nm7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu\nMdRAGmI0Nj81Aa6sY6A=\n-----END CERTIFICATE-----\n\n# Issuer: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only\n# Subject: CN=GeoTrust Primary Certification Authority - G2 O=GeoTrust Inc. OU=(c) 2007 GeoTrust Inc. - For authorized use only\n# Label: \"GeoTrust Primary Certification Authority - G2\"\n# Serial: 80682863203381065782177908751794619243\n# MD5 Fingerprint: 01:5e:d8:6b:bd:6f:3d:8e:a1:31:f8:12:e0:98:73:6a\n# SHA1 Fingerprint: 8d:17:84:d5:37:f3:03:7d:ec:70:fe:57:8b:51:9a:99:e6:10:d7:b0\n# SHA256 Fingerprint: 5e:db:7a:c4:3b:82:a0:6a:87:61:e8:d7:be:49:79:eb:f2:61:1f:7d:d7:9b:f9:1c:1c:6b:56:6a:21:9e:d7:66\n-----BEGIN CERTIFICATE-----\nMIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL\nMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj\nKSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2\nMDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0\neSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV\nBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw\nNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV\nBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH\nMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL\nSo17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal\ntJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO\nBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG\nCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT\nqQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz\nrD6ogRLQy7rQkgu2npaqBA+K\n-----END CERTIFICATE-----\n\n# Issuer: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only\n# Subject: CN=VeriSign Universal Root Certification Authority O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2008 VeriSign, Inc. - For authorized use only\n# Label: \"VeriSign Universal Root Certification Authority\"\n# Serial: 85209574734084581917763752644031726877\n# MD5 Fingerprint: 8e:ad:b5:01:aa:4d:81:e4:8c:1d:d1:e1:14:00:95:19\n# SHA1 Fingerprint: 36:79:ca:35:66:87:72:30:4d:30:a5:fb:87:3b:0f:a7:7b:b7:0d:54\n# SHA256 Fingerprint: 23:99:56:11:27:a5:71:25:de:8c:ef:ea:61:0d:df:2f:a0:78:b5:c8:06:7f:4e:82:82:90:bf:b8:60:e8:4b:3c\n-----BEGIN CERTIFICATE-----\nMIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB\nvTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL\nExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp\nU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W\nZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe\nFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX\nMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0\nIE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y\nIGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh\nbCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF\nAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF\n9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH\nH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H\nLL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN\n/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT\nrJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud\nEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw\nWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs\nexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud\nDgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4\nsAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+\nseQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz\n4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+\nBxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR\nlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3\n7M2CYfE45k+XmCpajQ==\n-----END CERTIFICATE-----\n\n# Issuer: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only\n# Subject: CN=VeriSign Class 3 Public Primary Certification Authority - G4 O=VeriSign, Inc. OU=VeriSign Trust Network/(c) 2007 VeriSign, Inc. - For authorized use only\n# Label: \"VeriSign Class 3 Public Primary Certification Authority - G4\"\n# Serial: 63143484348153506665311985501458640051\n# MD5 Fingerprint: 3a:52:e1:e7:fd:6f:3a:e3:6f:f3:6f:99:1b:f9:22:41\n# SHA1 Fingerprint: 22:d5:d8:df:8f:02:31:d1:8d:f7:9d:b7:cf:8a:2d:64:c9:3f:6c:3a\n# SHA256 Fingerprint: 69:dd:d7:ea:90:bb:57:c9:3e:13:5d:c8:5e:a6:fc:d5:48:0b:60:32:39:bd:c4:54:fc:75:8b:2a:26:cf:7f:79\n-----BEGIN CERTIFICATE-----\nMIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL\nMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW\nZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln\nbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp\nU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y\naXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG\nA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp\nU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg\nSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln\nbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5\nIC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm\nGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve\nfLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw\nAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ\naW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj\naHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW\nkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC\n4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga\nFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA==\n-----END CERTIFICATE-----\n\n# Issuer: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority\n# Subject: O=VeriSign, Inc. OU=Class 3 Public Primary Certification Authority\n# Label: \"Verisign Class 3 Public Primary Certification Authority\"\n# Serial: 80507572722862485515306429940691309246\n# MD5 Fingerprint: ef:5a:f1:33:ef:f1:cd:bb:51:02:ee:12:14:4b:96:c4\n# SHA1 Fingerprint: a1:db:63:93:91:6f:17:e4:18:55:09:40:04:15:c7:02:40:b0:ae:6b\n# SHA256 Fingerprint: a4:b6:b3:99:6f:c2:f3:06:b3:fd:86:81:bd:63:41:3d:8c:50:09:cc:4f:a3:29:c2:cc:f0:e2:fa:1b:14:03:05\n-----BEGIN CERTIFICATE-----\nMIICPDCCAaUCEDyRMcsf9tAbDpq40ES/Er4wDQYJKoZIhvcNAQEFBQAwXzELMAkG\nA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFz\ncyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2\nMDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVowXzELMAkGA1UEBhMCVVMxFzAVBgNV\nBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmlt\nYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUAA4GN\nADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhE\nBarsAx94f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/is\nI19wKTakyYbnsZogy1Olhec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0G\nCSqGSIb3DQEBBQUAA4GBABByUqkFFBkyCEHwxWsKzH4PIRnN5GfcX6kb5sroc50i\n2JhucwNhkcV8sEVAbkSdjbCxlnRhLQ2pRdKkkirWmnWXbj9T/UWZYB2oK0z5XqcJ\n2HUw19JlYD1n1khVdWk/kfVIC0dpImmClr7JyDiGSnoscxlIaU5rfGW/D/xwzoiQ\n-----END CERTIFICATE-----\n\n# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3\n# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3\n# Label: \"GlobalSign Root CA - R3\"\n# Serial: 4835703278459759426209954\n# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28\n# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad\n# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b\n-----BEGIN CERTIFICATE-----\nMIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G\nA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp\nZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4\nMTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG\nA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8\nRgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT\ngHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm\nKPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd\nQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ\nXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw\nDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o\nLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU\nRUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp\njjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK\n6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX\nmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs\nMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH\nWD9f\n-----END CERTIFICATE-----\n\n# Issuer: CN=TC TrustCenter Universal CA III O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA\n# Subject: CN=TC TrustCenter Universal CA III O=TC TrustCenter GmbH OU=TC TrustCenter Universal CA\n# Label: \"TC TrustCenter Universal CA III\"\n# Serial: 2010889993983507346460533407902964\n# MD5 Fingerprint: 9f:dd:db:ab:ff:8e:ff:45:21:5f:f0:6c:9d:8f:fe:2b\n# SHA1 Fingerprint: 96:56:cd:7b:57:96:98:95:d0:e1:41:46:68:06:fb:b8:c6:11:06:87\n# SHA256 Fingerprint: 30:9b:4a:87:f6:ca:56:c9:31:69:aa:a9:9c:6d:98:88:54:d7:89:2b:d5:43:7e:2d:07:b2:9c:be:da:55:d3:5d\n-----BEGIN CERTIFICATE-----\nMIID4TCCAsmgAwIBAgIOYyUAAQACFI0zFQLkbPQwDQYJKoZIhvcNAQEFBQAwezEL\nMAkGA1UEBhMCREUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNV\nBAsTG1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQTEoMCYGA1UEAxMfVEMgVHJ1\nc3RDZW50ZXIgVW5pdmVyc2FsIENBIElJSTAeFw0wOTA5MDkwODE1MjdaFw0yOTEy\nMzEyMzU5NTlaMHsxCzAJBgNVBAYTAkRFMRwwGgYDVQQKExNUQyBUcnVzdENlbnRl\nciBHbWJIMSQwIgYDVQQLExtUQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0ExKDAm\nBgNVBAMTH1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQSBJSUkwggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDC2pxisLlxErALyBpXsq6DFJmzNEubkKLF\n5+cvAqBNLaT6hdqbJYUtQCggbergvbFIgyIpRJ9Og+41URNzdNW88jBmlFPAQDYv\nDIRlzg9uwliT6CwLOunBjvvya8o84pxOjuT5fdMnnxvVZ3iHLX8LR7PH6MlIfK8v\nzArZQe+f/prhsq75U7Xl6UafYOPfjdN/+5Z+s7Vy+EutCHnNaYlAJ/Uqwa1D7KRT\nyGG299J5KmcYdkhtWyUB0SbFt1dpIxVbYYqt8Bst2a9c8SaQaanVDED1M4BDj5yj\ndipFtK+/fz6HP3bFzSreIMUWWMv5G/UPyw0RUmS40nZid4PxWJ//AgMBAAGjYzBh\nMB8GA1UdIwQYMBaAFFbn4VslQ4Dg9ozhcbyO5YAvxEjiMA8GA1UdEwEB/wQFMAMB\nAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRW5+FbJUOA4PaM4XG8juWAL8RI\n4jANBgkqhkiG9w0BAQUFAAOCAQEAg8ev6n9NCjw5sWi+e22JLumzCecYV42Fmhfz\ndkJQEw/HkG8zrcVJYCtsSVgZ1OK+t7+rSbyUyKu+KGwWaODIl0YgoGhnYIg5IFHY\naAERzqf2EQf27OysGh+yZm5WZ2B6dF7AbZc2rrUNXWZzwCUyRdhKBgePxLcHsU0G\nDeGl6/R1yrqc0L2z0zIkTO5+4nYES0lT2PLpVDP85XEfPRRclkvxOvIAu2y0+pZV\nCIgJwcyRGSmwIC3/yzikQOEXvnlhgP8HA4ZMTnsGnxGGjYnuJ8Tb4rwZjgvDwxPH\nLQNjO9Po5KIqwoIIlBZU8O8fJ5AluA0OKBtHd0e9HKgl8ZS0Zg==\n-----END CERTIFICATE-----\n\n# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.\n# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.\n# Label: \"Go Daddy Root Certificate Authority - G2\"\n# Serial: 0\n# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01\n# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b\n# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da\n-----BEGIN CERTIFICATE-----\nMIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx\nEDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT\nEUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp\nZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz\nNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH\nEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE\nAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw\nDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD\nE6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH\n/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy\nDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh\nGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR\ntDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA\nAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE\nFDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX\nWWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu\n9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr\ngIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo\n2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO\nLPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI\n4uJEvlz36hz1\n-----END CERTIFICATE-----\n\n# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.\n# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.\n# Label: \"Starfield Root Certificate Authority - G2\"\n# Serial: 0\n# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96\n# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e\n# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5\n-----BEGIN CERTIFICATE-----\nMIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx\nEDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT\nHFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs\nZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw\nMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6\nb25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj\naG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp\nY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC\nggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg\nnLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1\nHOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N\nHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN\ndloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0\nHZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO\nBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G\nCSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU\nsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3\n4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg\n8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K\npL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1\nmMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0\n-----END CERTIFICATE-----\n\n# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.\n# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.\n# Label: \"Starfield Services Root Certificate Authority - G2\"\n# Serial: 0\n# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2\n# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f\n# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5\n-----BEGIN CERTIFICATE-----\nMIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx\nEDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT\nHFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs\nZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5\nMDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD\nVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy\nZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy\ndmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI\nhvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p\nOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2\n8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K\nTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe\nhRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk\n6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw\nDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q\nAdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI\nbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB\nve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z\nqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd\niEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn\n0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN\nsSi6\n-----END CERTIFICATE-----\n\n# Issuer: CN=AffirmTrust Commercial O=AffirmTrust\n# Subject: CN=AffirmTrust Commercial O=AffirmTrust\n# Label: \"AffirmTrust Commercial\"\n# Serial: 8608355977964138876\n# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7\n# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7\n# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7\n-----BEGIN CERTIFICATE-----\nMIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE\nBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz\ndCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL\nMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp\ncm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC\nAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP\nHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr\nba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL\nMeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1\nyHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr\nVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/\nnx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ\nKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG\nXUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj\nvbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt\nZ8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g\nN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC\nnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=\n-----END CERTIFICATE-----\n\n# Issuer: CN=AffirmTrust Networking O=AffirmTrust\n# Subject: CN=AffirmTrust Networking O=AffirmTrust\n# Label: \"AffirmTrust Networking\"\n# Serial: 8957382827206547757\n# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f\n# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f\n# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b\n-----BEGIN CERTIFICATE-----\nMIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE\nBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz\ndCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL\nMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp\ncm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC\nAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y\nYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua\nkCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL\nQESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp\n6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG\nyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i\nQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ\nKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO\ntDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu\nQY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ\nLgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u\nolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48\nx3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=\n-----END CERTIFICATE-----\n\n# Issuer: CN=AffirmTrust Premium O=AffirmTrust\n# Subject: CN=AffirmTrust Premium O=AffirmTrust\n# Label: \"AffirmTrust Premium\"\n# Serial: 7893706540734352110\n# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57\n# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27\n# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a\n-----BEGIN CERTIFICATE-----\nMIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE\nBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz\ndCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG\nA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U\ncnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf\nqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ\nJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ\n+jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS\ns8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5\nHMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7\n70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG\nV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S\nqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S\n5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia\nC1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX\nOwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE\nFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/\nBAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2\nKI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg\nNt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B\n8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ\nMKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc\n0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ\nu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF\nu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH\nYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8\nGKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO\nRtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e\nKeC2uAloGRwYQw==\n-----END CERTIFICATE-----\n\n# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust\n# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust\n# Label: \"AffirmTrust Premium ECC\"\n# Serial: 8401224907861490260\n# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d\n# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb\n# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23\n-----BEGIN CERTIFICATE-----\nMIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC\nVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ\ncmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ\nBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt\nVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D\n0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9\nss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G\nA1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G\nA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs\naobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I\nflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==\n-----END CERTIFICATE-----\n\n# Issuer: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing\n# Subject: CN=StartCom Certification Authority O=StartCom Ltd. OU=Secure Digital Certificate Signing\n# Label: \"StartCom Certification Authority\"\n# Serial: 45\n# MD5 Fingerprint: c9:3b:0d:84:41:fc:a4:76:79:23:08:57:de:10:19:16\n# SHA1 Fingerprint: a3:f1:33:3f:e2:42:bf:cf:c5:d1:4e:8f:39:42:98:40:68:10:d1:a0\n# SHA256 Fingerprint: e1:78:90:ee:09:a3:fb:f4:f4:8b:9c:41:4a:17:d6:37:b7:a5:06:47:e9:bc:75:23:22:72:7f:cc:17:42:a9:11\n-----BEGIN CERTIFICATE-----\nMIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW\nMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg\nQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh\ndGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9\nMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi\nU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh\ncnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA\nA4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk\npMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf\nOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C\nJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT\nKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi\nHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM\nAv+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w\n+2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+\nGkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3\nZzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B\n26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID\nAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD\nVR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul\nF2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC\nATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w\nZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk\naWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0\nYXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg\nc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0\naWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93\nd3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG\nCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1\ndGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF\nwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS\nTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst\n0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc\npRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl\nCcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF\nP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK\n1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm\nKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE\nJnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ\n8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm\nfyWl8kgAwKQB2j8=\n-----END CERTIFICATE-----\n\n# Issuer: CN=StartCom Certification Authority G2 O=StartCom Ltd.\n# Subject: CN=StartCom Certification Authority G2 O=StartCom Ltd.\n# Label: \"StartCom Certification Authority G2\"\n# Serial: 59\n# MD5 Fingerprint: 78:4b:fb:9e:64:82:0a:d3:b8:4c:62:f3:64:f2:90:64\n# SHA1 Fingerprint: 31:f1:fd:68:22:63:20:ee:c6:3b:3f:9d:ea:4a:3e:53:7c:7c:39:17\n# SHA256 Fingerprint: c7:ba:65:67:de:93:a7:98:ae:1f:aa:79:1e:71:2d:37:8f:ae:1f:93:c4:39:7f:ea:44:1b:b7:cb:e6:fd:59:95\n-----BEGIN CERTIFICATE-----\nMIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW\nMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm\naWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1\nOTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG\nA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G\nCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ\nJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD\nvfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo\nD/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/\nQ0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW\nRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK\nHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN\nnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM\n0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i\nUUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9\nHa90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg\nTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE\nAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL\nBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K\n2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX\nUfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl\n6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK\n9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ\nHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI\nwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY\nXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l\nIxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo\nhdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr\nso8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/compat/multi_json.rb",
    "content": "require 'multi_json'\n\nif !MultiJson.respond_to?(:load) || [\n  Kernel,\n  defined?(ActiveSupport::Dependencies::Loadable) && ActiveSupport::Dependencies::Loadable\n].compact.include?(MultiJson.method(:load).owner)\n  module MultiJson\n    class <<self\n      alias :load :decode\n    end\n  end\nend\nif !MultiJson.respond_to?(:dump)\n  module MultiJson\n    class <<self\n      alias :dump :encode\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/auth/compute_service_account.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'faraday'\nrequire 'signet/oauth_2/client'\n\nmodule Google\n  class APIClient\n    class ComputeServiceAccount < Signet::OAuth2::Client\n      def fetch_access_token(options={})\n        connection = options[:connection] || Faraday.default_connection\n        response = connection.get 'http://metadata/computeMetadata/v1beta1/instance/service-accounts/default/token'\n        Signet::OAuth2.parse_credentials(response.body, response.headers['content-type'])\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/auth/file_storage.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'signet/oauth_2/client'\nrequire_relative 'storage'\nrequire_relative 'storages/file_store'\n\nmodule Google\n  class APIClient\n\n    ##\n    # Represents cached OAuth 2 tokens stored on local disk in a\n    # JSON serialized file. Meant to resemble the serialized format\n    # http://google-api-python-client.googlecode.com/hg/docs/epy/oauth2client.file.Storage-class.html\n    #\n    # @deprecated\n    #  Use {Google::APIClient::Storage} and {Google::APIClient::FileStore} instead\n    #\n    class FileStorage\n\n      attr_accessor :storage\n\n      def initialize(path)\n        store = Google::APIClient::FileStore.new(path)\n        @storage = Google::APIClient::Storage.new(store)\n        @storage.authorize\n      end\n\n      def load_credentials\n        storage.authorize\n      end\n\n      def authorization\n        storage.authorization\n      end\n\n      ##\n      # Write the credentials to the specified file.\n      #\n      # @param [Signet::OAuth2::Client] authorization\n      #    Optional authorization instance. If not provided, the authorization\n      #    already associated with this instance will be written.\n      def write_credentials(auth=nil)\n        storage.write_credentials(auth)\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/auth/installed_app.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'webrick'\nrequire 'launchy'\n\nmodule Google\n  class APIClient\n\n    # Small helper for the sample apps for performing OAuth 2.0 flows from the command\n    # line or in any other installed app environment.\n    #\n    # @example\n    #\n    #    client = Google::APIClient.new\n    #    flow = Google::APIClient::InstalledAppFlow.new(\n    #      :client_id => '691380668085.apps.googleusercontent.com',\n    #      :client_secret => '...',\n    #      :scope => 'https://www.googleapis.com/auth/drive'\n    #    )\n    #    client.authorization = flow.authorize\n    #\n    class InstalledAppFlow\n\n      RESPONSE_BODY = <<-HTML\n        <html>\n          <head>\n            <script>\n              function closeWindow() {\n                window.open('', '_self', '');\n                window.close();\n              }\n              setTimeout(closeWindow, 10);\n            </script>\n          </head>\n          <body>You may close this window.</body>\n        </html>\n      HTML\n\n      ##\n      # Configure the flow\n      #\n      # @param [Hash] options The configuration parameters for the client.\n      # @option options [Fixnum] :port\n      #   Port to run the embedded server on. Defaults to 9292\n      # @option options [String] :client_id\n      #   A unique identifier issued to the client to identify itself to the\n      #   authorization server.\n      # @option options [String] :client_secret\n      #   A shared symmetric secret issued by the authorization server,\n      #   which is used to authenticate the client.\n      # @option options [String] :scope\n      #   The scope of the access request, expressed either as an Array\n      #   or as a space-delimited String.\n      #\n      # @see Signet::OAuth2::Client\n      def initialize(options)\n        @port = options[:port] || 9292\n        @authorization = Signet::OAuth2::Client.new({\n          :authorization_uri => 'https://accounts.google.com/o/oauth2/auth',\n          :token_credential_uri => 'https://accounts.google.com/o/oauth2/token',\n          :redirect_uri => \"http://localhost:#{@port}/\"}.update(options)\n        )\n      end\n\n      ##\n      # Request authorization. Opens a browser and waits for response.\n      #\n      # @param [Google::APIClient::Storage] storage\n      #  Optional object that responds to :write_credentials, used to serialize\n      #  the OAuth 2 credentials after completing the flow.\n      #\n      # @return [Signet::OAuth2::Client]\n      #  Authorization instance, nil if user cancelled.\n      def authorize(storage=nil)\n        auth = @authorization\n\n        server = WEBrick::HTTPServer.new(\n          :Port => @port,\n          :BindAddress =>\"localhost\",\n          :Logger => WEBrick::Log.new(STDOUT, 0),\n          :AccessLog => []\n        )\n        begin\n          trap(\"INT\") { server.shutdown }\n\n          server.mount_proc '/' do |req, res|\n            auth.code = req.query['code']\n            if auth.code\n              auth.fetch_access_token!\n            end\n            res.status = WEBrick::HTTPStatus::RC_ACCEPTED\n            res.body = RESPONSE_BODY\n            server.stop\n          end\n\n          Launchy.open(auth.authorization_uri.to_s)\n          server.start\n        ensure\n          server.shutdown\n        end\n        if @authorization.access_token\n          if storage.respond_to?(:write_credentials)\n            storage.write_credentials(@authorization)\n          end\n          return @authorization\n        else\n          return nil\n        end\n      end\n    end\n\n  end\nend\n\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/auth/jwt_asserter.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'jwt'\nrequire 'signet/oauth_2/client'\nrequire 'delegate'\n\nmodule Google\n  class APIClient\n    ##\n    # Generates access tokens using the JWT assertion profile. Requires a\n    # service account & access to the private key.\n    #\n    # @example Using Signet\n    #\n    #   key = Google::APIClient::KeyUtils.load_from_pkcs12('client.p12', 'notasecret')\n    #   client.authorization = Signet::OAuth2::Client.new(\n    #     :token_credential_uri => 'https://accounts.google.com/o/oauth2/token',\n    #     :audience => 'https://accounts.google.com/o/oauth2/token',\n    #     :scope => 'https://www.googleapis.com/auth/prediction',\n    #     :issuer => '123456-abcdef@developer.gserviceaccount.com',\n    #     :signing_key => key)\n    #   client.authorization.fetch_access_token!\n    #   client.execute(...)\n    #\n    # @deprecated\n    #  Service accounts are now supported directly in Signet\n    # @see https://developers.google.com/accounts/docs/OAuth2ServiceAccount\n    class JWTAsserter\n      # @return [String] ID/email of the issuing party\n      attr_accessor :issuer\n      # @return [Fixnum] How long, in seconds, the assertion is valid for\n      attr_accessor :expiry\n      # @return [Fixnum] Seconds to expand the issued at/expiry window to account for clock skew\n      attr_accessor :skew\n      # @return [String] Scopes to authorize\n      attr_reader :scope\n      # @return [String,OpenSSL::PKey] key for signing assertions\n      attr_writer :key\n      # @return [String] Algorithm used for signing\n      attr_accessor :algorithm\n      \n      ##\n      # Initializes the asserter for a service account.\n      #\n      # @param [String] issuer\n      #    Name/ID of the client issuing the assertion\n      # @param [String, Array] scope\n      #   Scopes to authorize. May be a space delimited string or array of strings\n      # @param [String,OpenSSL::PKey] key\n      #   Key for signing assertions\n      # @param [String] algorithm\n      #   Algorithm to use, either 'RS256' for RSA with SHA-256 \n      #   or 'HS256' for HMAC with SHA-256\n      def initialize(issuer, scope, key, algorithm = \"RS256\")\n        self.issuer = issuer\n        self.scope = scope\n        self.expiry = 60 # 1 min default \n        self.skew = 60      \n        self.key = key\n        self.algorithm = algorithm\n      end\n\n      ##\n      # Set the scopes to authorize\n      #\n      # @param [String, Array] new_scope\n      #   Scopes to authorize. May be a space delimited string or array of strings\n      def scope=(new_scope)\n        case new_scope\n        when Array\n          @scope = new_scope.join(' ')\n        when String\n          @scope = new_scope\n        when nil\n          @scope = ''\n        else\n          raise TypeError, \"Expected Array or String, got #{new_scope.class}\"\n        end\n      end\n      \n      ##\n      # Request a new access token.\n      # \n      # @param [String] person\n      #   Email address of a user, if requesting a token to act on their behalf\n      # @param [Hash] options\n      #   Pass through to Signet::OAuth2::Client.fetch_access_token\n      # @return [Signet::OAuth2::Client] Access token \n      #\n      # @see Signet::OAuth2::Client.fetch_access_token!\n      def authorize(person = nil, options={})\n        authorization = self.to_authorization(person)\n        authorization.fetch_access_token!(options)\n        return authorization\n      end\n      \n      ##\n      # Builds a Signet OAuth2 client\n      #\n      # @return [Signet::OAuth2::Client] Access token \n      def to_authorization(person = nil)\n        return Signet::OAuth2::Client.new(\n          :token_credential_uri => 'https://accounts.google.com/o/oauth2/token',\n          :audience => 'https://accounts.google.com/o/oauth2/token',\n          :scope => self.scope,\n          :issuer => @issuer,\n          :signing_key => @key,\n          :signing_algorithm => @algorithm,\n          :person => person\n        )\n      end      \n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/auth/key_utils.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nmodule Google\n  class APIClient\n    ##\n    # Helper for loading keys from the PKCS12 files downloaded when\n    # setting up service accounts at the APIs Console.\n    #\n    module KeyUtils\n      ##\n      # Loads a key from PKCS12 file, assuming a single private key\n      # is present.\n      #\n      # @param [String] keyfile\n      #    Path of the PKCS12 file to load. If not a path to an actual file,\n      #    assumes the string is the content of the file itself.\n      # @param [String] passphrase\n      #   Passphrase for unlocking the private key\n      #\n      # @return [OpenSSL::PKey] The private key for signing assertions.\n      def self.load_from_pkcs12(keyfile, passphrase)\n        load_key(keyfile, passphrase) do |content, pass_phrase|\n          OpenSSL::PKCS12.new(content, pass_phrase).key\n        end\n      end\n\n\n      ##\n      # Loads a key from a PEM file.\n      #\n      # @param [String] keyfile\n      #    Path of the PEM file to load. If not a path to an actual file,\n      #    assumes the string is the content of the file itself.\n      # @param [String] passphrase\n      #   Passphrase for unlocking the private key\n      #\n      # @return [OpenSSL::PKey] The private key for signing assertions.\n      #\n      def self.load_from_pem(keyfile, passphrase)\n        load_key(keyfile, passphrase) do | content, pass_phrase|\n          OpenSSL::PKey::RSA.new(content, pass_phrase)\n        end\n      end\n\n      private\n\n      ##\n      # Helper for loading keys from file or memory. Accepts a block\n      # to handle the specific file format.\n      #\n      # @param [String] keyfile\n      #    Path of thefile to load. If not a path to an actual file,\n      #    assumes the string is the content of the file itself.\n      # @param [String] passphrase\n      #   Passphrase for unlocking the private key\n      #\n      # @yield [String, String]\n      #   Key file & passphrase to extract key from\n      # @yieldparam [String] keyfile\n      #   Contents of the file\n      # @yieldparam [String] passphrase\n      #   Passphrase to unlock key\n      # @yieldreturn [OpenSSL::PKey]\n      #   Private key\n      #\n      # @return [OpenSSL::PKey] The private key for signing assertions.\n      def self.load_key(keyfile, passphrase, &block)\n        begin\n          begin\n            content = File.open(keyfile, 'rb') { |io| io.read }\n          rescue\n            content = keyfile\n          end\n          block.call(content, passphrase)\n        rescue OpenSSL::OpenSSLError\n          raise ArgumentError.new(\"Invalid keyfile or passphrase\")\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/auth/pkcs12.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'google/api_client/auth/key_utils'\nmodule Google\n  class APIClient\n    ##\n    # Helper for loading keys from the PKCS12 files downloaded when\n    # setting up service accounts at the APIs Console.\n    #\n    module PKCS12\n      ##\n      # Loads a key from PKCS12 file, assuming a single private key\n      # is present.\n      #\n      # @param [String] keyfile\n      #    Path of the PKCS12 file to load. If not a path to an actual file,\n      #    assumes the string is the content of the file itself. \n      # @param [String] passphrase\n      #   Passphrase for unlocking the private key\n      #\n      # @return [OpenSSL::PKey] The private key for signing assertions.\n      # @deprecated \n      #  Use {Google::APIClient::KeyUtils} instead\n      def self.load_key(keyfile, passphrase)\n        KeyUtils.load_from_pkcs12(keyfile, passphrase)\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/auth/storage.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'signet/oauth_2/client'\n\nmodule Google\n  class APIClient\n    ##\n    # Represents cached OAuth 2 tokens stored on local disk in a\n    # JSON serialized file. Meant to resemble the serialized format\n    # http://google-api-python-client.googlecode.com/hg/docs/epy/oauth2client.file.Storage-class.html\n    #\n    class Storage\n\n      AUTHORIZATION_URI = 'https://accounts.google.com/o/oauth2/auth'\n      TOKEN_CREDENTIAL_URI = 'https://accounts.google.com/o/oauth2/token'\n\n      # @return [Object] Storage object.\n      attr_accessor :store\n\n      # @return [Signet::OAuth2::Client]\n      attr_reader :authorization\n\n      ##\n      # Initializes the Storage object.\n      #\n      # @params [Object] Storage object\n      def initialize(store)\n        @store= store\n        @authorization = nil\n      end\n\n      ##\n      # Write the credentials to the specified store.\n      #\n      # @params [Signet::OAuth2::Client] authorization\n      #    Optional authorization instance. If not provided, the authorization\n      #    already associated with this instance will be written.\n      def write_credentials(authorization=nil)\n        @authorization = authorization if authorization\n        if @authorization.respond_to?(:refresh_token) && @authorization.refresh_token\n          store.write_credentials(credentials_hash)\n        end\n      end\n\n      ##\n      # Loads credentials and authorizes an client.\n      # @return [Object] Signet::OAuth2::Client or NIL\n      def authorize\n        @authorization = nil\n        cached_credentials = load_credentials\n        if cached_credentials && cached_credentials.size > 0\n          @authorization = Signet::OAuth2::Client.new(cached_credentials)\n          @authorization.issued_at = Time.at(cached_credentials['issued_at'].to_i)\n          self.refresh_authorization if @authorization.expired?\n        end\n        return @authorization\n      end\n\n      ##\n      # refresh credentials and save them to store\n      def refresh_authorization\n        authorization.refresh!\n        self.write_credentials\n      end\n\n      private\n\n      ##\n      # Attempt to read in credentials from the specified store.\n      def load_credentials\n        store.load_credentials\n      end\n\n      ##\n      # @return [Hash] with credentials\n      def credentials_hash\n        {\n          :access_token          => authorization.access_token,\n          :authorization_uri     => AUTHORIZATION_URI,\n          :client_id             => authorization.client_id,\n          :client_secret         => authorization.client_secret,\n          :expires_in            => authorization.expires_in,\n          :refresh_token         => authorization.refresh_token,\n          :token_credential_uri  => TOKEN_CREDENTIAL_URI,\n          :issued_at             => authorization.issued_at.to_i\n        }\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/auth/storages/file_store.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'json'\n\nmodule Google\n  class APIClient\n    ##\n    # Represents cached OAuth 2 tokens stored on local disk in a\n    # JSON serialized file. Meant to resemble the serialized format\n    # http://google-api-python-client.googlecode.com/hg/docs/epy/oauth2client.file.Storage-class.html\n    #\n    class FileStore\n\n      attr_accessor :path\n\n      ##\n      # Initializes the FileStorage object.\n      #\n      # @param [String] path\n      #    Path to the credentials file.\n      def initialize(path)\n        @path= path\n      end\n\n      ##\n      # Attempt to read in credentials from the specified file.\n      def load_credentials\n        open(path, 'r') { |f| JSON.parse(f.read) }\n      rescue\n        nil\n      end\n\n      ##\n      # Write the credentials to the specified file.\n      #\n      # @param [Signet::OAuth2::Client] authorization\n      #    Optional authorization instance. If not provided, the authorization\n      #    already associated with this instance will be written.\n      def write_credentials(credentials_hash)\n        open(self.path, 'w+') do |f|\n          f.write(credentials_hash.to_json)\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/auth/storages/redis_store.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'json'\n\nmodule Google\n  class APIClient\n    class RedisStore\n\n      DEFAULT_REDIS_CREDENTIALS_KEY = \"google_api_credentials\"\n\n      attr_accessor :redis\n\n      ##\n      # Initializes the RedisStore object.\n      #\n      # @params [Object] Redis instance\n      def initialize(redis, key = nil)\n        @redis= redis\n        @redis_credentials_key = key\n      end\n\n      ##\n      # Attempt to read in credentials from redis.\n      def load_credentials\n        credentials = redis.get redis_credentials_key\n        JSON.parse(credentials) if credentials\n      end\n\n      def redis_credentials_key\n        @redis_credentials_key || DEFAULT_REDIS_CREDENTIALS_KEY\n      end\n\n      ##\n      # Write the credentials to redis.\n      #\n      # @params [Hash] credentials\n      def write_credentials(credentials_hash)\n        redis.set(redis_credentials_key, credentials_hash.to_json)\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/batch.rb",
    "content": "# Copyright 2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'addressable/uri'\nrequire 'google/api_client/reference'\nrequire 'securerandom'\n\nmodule Google\n  class APIClient\n\n    ##\n    # Helper class to contain a response to an individual batched call.\n    #\n    # @api private\n    class BatchedCallResponse\n      # @return [String] UUID of the call\n      attr_reader :call_id\n      # @return [Fixnum] HTTP status code\n      attr_accessor :status\n      # @return [Hash] HTTP response headers\n      attr_accessor :headers\n      # @return [String] HTTP response body\n      attr_accessor :body\n\n      ##\n      # Initialize the call response\n      #\n      # @param [String] call_id\n      #   UUID of the original call\n      # @param [Fixnum] status\n      #   HTTP status\n      # @param [Hash] headers\n      #   HTTP response headers\n      # @param [#read, #to_str] body\n      #   Response body\n      def initialize(call_id, status = nil, headers = nil, body = nil)\n        @call_id, @status, @headers, @body = call_id, status, headers, body\n      end\n    end\n\n    # Wraps multiple API calls into a single over-the-wire HTTP request.\n    #\n    # @example\n    #\n    #     client = Google::APIClient.new\n    #     urlshortener = client.discovered_api('urlshortener')\n    #     batch = Google::APIClient::BatchRequest.new do |result|\n    #        puts result.data\n    #     end\n    #\n    #     batch.add(:api_method => urlshortener.url.insert, :body_object => { 'longUrl' => 'http://example.com/foo' })\n    #     batch.add(:api_method => urlshortener.url.insert, :body_object => { 'longUrl' => 'http://example.com/bar' })\n    #\n    #     client.execute(batch)\n    #\n    class BatchRequest < Request\n      BATCH_BOUNDARY = \"-----------RubyApiBatchRequest\".freeze\n\n      # @api private\n      # @return [Array<(String,Google::APIClient::Request,Proc)] List of API calls in the batch\n      attr_reader :calls\n\n      ##\n      # Creates a new batch request.\n      #\n      # @param [Hash] options\n      #   Set of options for this request.\n      # @param [Proc] block\n      #   Callback for every call's response. Won't be called if a call defined\n      #   a callback of its own.\n      #\n      # @return [Google::APIClient::BatchRequest]\n      #   The constructed object.\n      #\n      # @yield [Google::APIClient::Result]\n      #   block to be called when result ready\n      def initialize(options = {}, &block)\n        @calls = []\n        @global_callback = nil\n        @global_callback = block if block_given?\n        @last_auto_id = 0\n\n        @base_id = SecureRandom.uuid\n\n        options[:uri] ||= 'https://www.googleapis.com/batch'\n        options[:http_method] ||= 'POST'\n\n        super options\n      end\n\n      ##\n      # Add a new call to the batch request.\n      # Each call must have its own call ID; if not provided, one will\n      # automatically be generated, avoiding collisions. If duplicate call IDs\n      # are provided, an error will be thrown.\n      #\n      # @param [Hash, Google::APIClient::Request] call\n      #   the call to be added.\n      # @param [String] call_id\n      #   the ID to be used for this call. Must be unique\n      # @param [Proc] block\n      #   callback for this call's response.\n      #\n      # @return [Google::APIClient::BatchRequest]\n      #   the BatchRequest, for chaining\n      #\n      # @yield [Google::APIClient::Result]\n      #   block to be called when result ready\n      def add(call, call_id = nil, &block)\n        unless call.kind_of?(Google::APIClient::Reference)\n          call = Google::APIClient::Reference.new(call)\n        end\n        call_id ||= new_id\n        if @calls.assoc(call_id)\n          raise BatchError,\n              'A call with this ID already exists: %s' % call_id\n        end\n        callback = block_given? ? block : @global_callback\n        @calls << [call_id, call, callback]\n        return self\n      end\n\n      ##\n      # Processes the HTTP response to the batch request, issuing callbacks.\n      #\n      # @api private\n      #\n      # @param [Faraday::Response] response\n      #   the HTTP response.\n      def process_http_response(response)\n        content_type = find_header('Content-Type', response.headers)\n        m = /.*boundary=(.+)/.match(content_type)\n        if m\n          boundary = m[1]\n          parts = response.body.split(/--#{Regexp.escape(boundary)}/)\n          parts = parts[1...-1]\n          parts.each do |part|\n            call_response = deserialize_call_response(part)\n            _, call, callback = @calls.assoc(call_response.call_id)\n            result = Google::APIClient::Result.new(call, call_response)\n            callback.call(result) if callback\n          end\n        end\n        Google::APIClient::Result.new(self, response)\n      end\n\n      ##\n      # Return the request body for the BatchRequest's HTTP request.\n      #\n      # @api private\n      #\n      # @return [String]\n      #   the request body.\n      def to_http_request\n        if @calls.nil? || @calls.empty?\n          raise BatchError, 'Cannot make an empty batch request'\n        end\n        parts = @calls.map {|(call_id, call, _callback)| serialize_call(call_id, call)}\n        build_multipart(parts, 'multipart/mixed', BATCH_BOUNDARY)\n        super\n      end\n\n\n      protected\n\n      ##\n      # Helper method to find a header from its name, regardless of case.\n      #\n      # @api private\n      #\n      # @param [String] name\n      #   the name of the header to find.\n      # @param [Hash] headers\n      #   the hash of headers and their values.\n      #\n      # @return [String]\n      #   the value of the desired header.\n      def find_header(name, headers)\n        _, header = headers.detect do |h, v|\n          h.downcase == name.downcase\n        end\n        return header\n      end\n\n      ##\n      # Create a new call ID. Uses an auto-incrementing, conflict-avoiding ID.\n      #\n      # @api private\n      #\n      # @return [String]\n      #  the new, unique ID.\n      def new_id\n        @last_auto_id += 1\n        while @calls.assoc(@last_auto_id)\n          @last_auto_id += 1\n        end\n        return @last_auto_id.to_s\n      end\n\n      ##\n      # Convert a Content-ID header value to an id. Presumes the Content-ID\n      # header conforms to the format that id_to_header() returns.\n      #\n      # @api private\n      #\n      # @param [String] header\n      #   Content-ID header value.\n      #\n      # @return [String]\n      #   The extracted ID value.\n      def header_to_id(header)\n        if !header.start_with?('<') || !header.end_with?('>') ||\n            !header.include?('+')\n          raise BatchError, 'Invalid value for Content-ID: \"%s\"' % header\n        end\n\n        _base, call_id = header[1...-1].split('+')\n        return Addressable::URI.unencode(call_id)\n      end\n\n      ##\n      # Auxiliary method to split the headers from the body in an HTTP response.\n      #\n      # @api private\n      #\n      # @param [String] response\n      #   the response to parse.\n      #\n      # @return [Array<Hash>, String]\n      #   the headers and the body, separately.\n      def split_headers_and_body(response)\n        headers = {}\n        payload = response.lstrip\n        while payload\n          line, payload = payload.split(\"\\n\", 2)\n          line.sub!(/\\s+\\z/, '')\n          break if line.empty?\n          match = /\\A([^:]+):\\s*/.match(line)\n          if match\n            headers[match[1]] = match.post_match\n          else\n            raise BatchError, 'Invalid header line in response: %s' % line\n          end\n        end\n        return headers, payload\n      end\n\n      ##\n      # Convert a single batched response into a BatchedCallResponse object.\n      #\n      # @api private\n      #\n      # @param [String] call_response\n      #   the request to deserialize.\n      #\n      # @return [Google::APIClient::BatchedCallResponse]\n      #   the parsed and converted response.\n      def deserialize_call_response(call_response)\n        outer_headers, outer_body = split_headers_and_body(call_response)\n        status_line, payload = outer_body.split(\"\\n\", 2)\n        _protocol, status, _reason = status_line.split(' ', 3)\n\n        headers, body = split_headers_and_body(payload)\n        content_id = find_header('Content-ID', outer_headers)\n        call_id = header_to_id(content_id)\n        return BatchedCallResponse.new(call_id, status.to_i, headers, body)\n      end\n\n      ##\n      # Serialize a single batched call for assembling the multipart message\n      #\n      # @api private\n      #\n      # @param [Google::APIClient::Request] call\n      #   the call to serialize.\n      #\n      # @return [Faraday::UploadIO]\n      #   the serialized request\n      def serialize_call(call_id, call)\n        method, uri, headers, body = call.to_http_request\n        request = \"#{method.to_s.upcase} #{Addressable::URI.parse(uri).request_uri} HTTP/1.1\"\n        headers.each do |header, value|\n          request << \"\\r\\n%s: %s\" % [header, value]\n        end\n        if body\n          # TODO - CompositeIO if body is a stream\n          request << \"\\r\\n\\r\\n\"\n          if body.respond_to?(:read)\n            request << body.read\n          else\n            request << body.to_s\n          end\n        end\n        Faraday::UploadIO.new(StringIO.new(request), 'application/http', 'ruby-api-request', 'Content-ID' => id_to_header(call_id))\n      end\n\n      ##\n      # Convert an id to a Content-ID header value.\n      #\n      # @api private\n      #\n      # @param [String] call_id\n      #   identifier of individual call.\n      #\n      # @return [String]\n      #   A Content-ID header with the call_id encoded into it. A UUID is\n      #   prepended to the value because Content-ID headers are supposed to be\n      #   universally unique.\n      def id_to_header(call_id)\n        return '<%s+%s>' % [@base_id, Addressable::URI.encode(call_id)]\n      end\n\n    end\n  end\nend"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/charset.rb",
    "content": "require 'faraday'\nrequire 'zlib'\n \nmodule Google\n  class APIClient\n    class Charset < Faraday::Middleware\n      include Google::APIClient::Logging\n\n      def charset_for_content_type(type)\n        if type\n          m = type.match(/(?:charset|encoding)=\"?([a-z0-9-]+)\"?/i)\n          if m\n            return Encoding.find(m[1])\n          end\n        end\n        nil\n      end\n\n      def adjust_encoding(env)\n        charset = charset_for_content_type(env[:response_headers]['content-type'])\n        if charset && env[:body].encoding != charset\n          env[:body].force_encoding(charset)\n        end\n      end\n      \n      def on_complete(env)\n        adjust_encoding(env)\n      end\n    end\n  end\nend\n \nFaraday::Response.register_middleware :charset => Google::APIClient::Charset\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/client_secrets.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nrequire 'compat/multi_json'\n\n\nmodule Google\n  class APIClient\n    ##\n    # Manages the persistence of client configuration data and secrets. Format\n    # inspired by the Google API Python client.\n    #\n    # @see https://developers.google.com/api-client-library/python/guide/aaa_client_secrets\n    #\n    # @example\n    #   {\n    #     \"web\": {\n    #       \"client_id\": \"asdfjasdljfasdkjf\",\n    #       \"client_secret\": \"1912308409123890\",\n    #       \"redirect_uris\": [\"https://www.example.com/oauth2callback\"],\n    #       \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n    #       \"token_uri\": \"https://accounts.google.com/o/oauth2/token\"\n    #     }\n    #   }\n    #\n    # @example\n    #   {\n    #     \"installed\": {\n    #       \"client_id\": \"837647042410-75ifg...usercontent.com\",\n    #       \"client_secret\":\"asdlkfjaskd\",\n    #       \"redirect_uris\": [\"http://localhost\", \"urn:ietf:oauth:2.0:oob\"],\n    #       \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n    #       \"token_uri\": \"https://accounts.google.com/o/oauth2/token\"\n    #     }\n    #   }\n    class ClientSecrets\n      \n      ##\n      # Reads client configuration from a file\n      #\n      # @param [String] filename\n      #   Path to file to load\n      #\n      # @return [Google::APIClient::ClientSecrets]\n      #   OAuth client settings\n      def self.load(filename=nil)\n        if filename && File.directory?(filename)\n          search_path = File.expand_path(filename)\n          filename = nil\n        end\n        while filename == nil\n          search_path ||= File.expand_path('.')\n          if File.exists?(File.join(search_path, 'client_secrets.json'))\n            filename = File.join(search_path, 'client_secrets.json')\n          elsif search_path == '/' || search_path =~ /[a-zA-Z]:[\\/\\\\]/\n            raise ArgumentError,\n              'No client_secrets.json filename supplied ' +\n              'and/or could not be found in search path.'\n          else\n            search_path = File.expand_path(File.join(search_path, '..'))\n          end\n        end\n        data = File.open(filename, 'r') { |file| MultiJson.load(file.read) }\n        return self.new(data)\n      end\n\n      ##\n      # Intialize OAuth client settings.\n      #\n      # @param [Hash] options\n      #   Parsed client secrets files\n      def initialize(options={})\n        # Client auth configuration\n        @flow = options[:flow] || options.keys.first.to_s || 'web'\n        fdata = options[@flow]\n        @client_id = fdata[:client_id] || fdata[\"client_id\"]\n        @client_secret = fdata[:client_secret] || fdata[\"client_secret\"]\n        @redirect_uris = fdata[:redirect_uris] || fdata[\"redirect_uris\"]\n        @redirect_uris ||= [fdata[:redirect_uri] || fdata[\"redirect_uri\"]].compact\n        @javascript_origins = (\n          fdata[:javascript_origins] ||\n          fdata[\"javascript_origins\"]\n        )\n        @javascript_origins ||= [fdata[:javascript_origin] || fdata[\"javascript_origin\"]].compact\n        @authorization_uri = fdata[:auth_uri] || fdata[\"auth_uri\"]\n        @authorization_uri ||= fdata[:authorization_uri]\n        @token_credential_uri = fdata[:token_uri] || fdata[\"token_uri\"]\n        @token_credential_uri ||= fdata[:token_credential_uri]\n\n        # Associated token info\n        @access_token = fdata[:access_token] || fdata[\"access_token\"]\n        @refresh_token = fdata[:refresh_token] || fdata[\"refresh_token\"]\n        @id_token = fdata[:id_token] || fdata[\"id_token\"]\n        @expires_in = fdata[:expires_in] || fdata[\"expires_in\"]\n        @expires_at = fdata[:expires_at] || fdata[\"expires_at\"]\n        @issued_at = fdata[:issued_at] || fdata[\"issued_at\"]\n      end\n\n      attr_reader(\n        :flow, :client_id, :client_secret, :redirect_uris, :javascript_origins,\n        :authorization_uri, :token_credential_uri, :access_token,\n        :refresh_token, :id_token, :expires_in, :expires_at, :issued_at\n      )\n\n      ##\n      # Serialize back to the original JSON form\n      #\n      # @return [String]\n      #   JSON\n      def to_json\n        return MultiJson.dump(to_hash)\n      end\n      \n      def to_hash\n        {\n          self.flow => ({\n            'client_id' => self.client_id,\n            'client_secret' => self.client_secret,\n            'redirect_uris' => self.redirect_uris,\n            'javascript_origins' => self.javascript_origins,\n            'auth_uri' => self.authorization_uri,\n            'token_uri' => self.token_credential_uri,\n            'access_token' => self.access_token,\n            'refresh_token' => self.refresh_token,\n            'id_token' => self.id_token,\n            'expires_in' => self.expires_in,\n            'expires_at' => self.expires_at,\n            'issued_at' => self.issued_at\n          }).inject({}) do |accu, (k, v)|\n            # Prunes empty values from JSON output.\n            unless v == nil || (v.respond_to?(:empty?) && v.empty?)\n              accu[k] = v\n            end\n            accu\n          end\n        }\n      end\n      \n      def to_authorization\n        gem 'signet', '>= 0.4.0'\n        require 'signet/oauth_2/client'\n        # NOTE: Do not rely on this default value, as it may change\n        new_authorization = Signet::OAuth2::Client.new\n        new_authorization.client_id = self.client_id\n        new_authorization.client_secret = self.client_secret\n        new_authorization.authorization_uri = (\n          self.authorization_uri ||\n          'https://accounts.google.com/o/oauth2/auth'\n        )\n        new_authorization.token_credential_uri = (\n          self.token_credential_uri ||\n          'https://accounts.google.com/o/oauth2/token'\n        )\n        new_authorization.redirect_uri = self.redirect_uris.first\n\n        # These are supported, but unlikely.\n        new_authorization.access_token = self.access_token\n        new_authorization.refresh_token = self.refresh_token\n        new_authorization.id_token = self.id_token\n        new_authorization.expires_in = self.expires_in\n        new_authorization.issued_at = self.issued_at if self.issued_at\n        new_authorization.expires_at = self.expires_at if self.expires_at\n        return new_authorization\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/discovery/api.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nrequire 'addressable/uri'\nrequire 'multi_json'\nrequire 'active_support/inflector'\nrequire 'google/api_client/discovery/resource'\nrequire 'google/api_client/discovery/method'\nrequire 'google/api_client/discovery/media'\n\nmodule Google\n  class APIClient\n    ##\n    # A service that has been described by a discovery document.\n    class API\n\n      ##\n      # Creates a description of a particular version of a service.\n      #\n      # @param [String] document_base\n      #   Base URI for the discovery document.\n      # @param [Hash] discovery_document\n      #   The section of the discovery document that applies to this service\n      #   version.\n      #\n      # @return [Google::APIClient::API] The constructed service object.\n      def initialize(document_base, discovery_document)\n        @document_base = Addressable::URI.parse(document_base)\n        @discovery_document = discovery_document\n        metaclass = (class << self; self; end)\n        self.discovered_resources.each do |resource|\n          method_name = ActiveSupport::Inflector.underscore(resource.name).to_sym\n          if !self.respond_to?(method_name)\n            metaclass.send(:define_method, method_name) { resource }\n          end\n        end\n        self.discovered_methods.each do |method|\n          method_name = ActiveSupport::Inflector.underscore(method.name).to_sym\n          if !self.respond_to?(method_name)\n            metaclass.send(:define_method, method_name) { method }\n          end\n        end\n      end\n      \n      # @return [String] unparsed discovery document for the API\n      attr_reader :discovery_document\n\n      ##\n      # Returns the id of the service.\n      #\n      # @return [String] The service id.\n      def id\n        return (\n          @discovery_document['id'] ||\n          \"#{self.name}:#{self.version}\"\n        )\n      end\n\n      ##\n      # Returns the identifier for the service.\n      #\n      # @return [String] The service identifier.\n      def name\n        return @discovery_document['name']\n      end\n\n      ##\n      # Returns the version of the service.\n      #\n      # @return [String] The service version.\n      def version\n        return @discovery_document['version']\n      end\n\n      ##\n      # Returns a human-readable title for the API.\n      #\n      # @return [Hash] The API title.\n      def title\n        return @discovery_document['title']\n      end\n\n      ##\n      # Returns a human-readable description of the API.\n      #\n      # @return [Hash] The API description.\n      def description\n        return @discovery_document['description']\n      end\n\n      ##\n      # Returns a URI for the API documentation.\n      #\n      # @return [Hash] The API documentation.\n      def documentation\n        return Addressable::URI.parse(@discovery_document['documentationLink'])\n      end\n\n      ##\n      # Returns true if this is the preferred version of this API.\n      #\n      # @return [TrueClass, FalseClass]\n      #   Whether or not this is the preferred version of this API.\n      def preferred\n        return !!@discovery_document['preferred']\n      end\n\n      ##\n      # Returns the list of API features.\n      #\n      # @return [Array]\n      #   The features supported by this API.\n      def features\n        return @discovery_document['features'] || []\n      end\n\n      ##\n      # Returns the root URI for this service.\n      #\n      # @return [Addressable::URI] The root URI.\n      def root_uri\n        return @root_uri ||= (\n          Addressable::URI.parse(self.discovery_document['rootUrl'])\n        )\n      end\n\n      ##\n      # Returns true if this API uses a data wrapper.\n      #\n      # @return [TrueClass, FalseClass]\n      #   Whether or not this API uses a data wrapper.\n      def data_wrapper?\n        return self.features.include?('dataWrapper')\n      end\n\n      ##\n      # Returns the base URI for the discovery document.\n      #\n      # @return [Addressable::URI] The base URI.\n      attr_reader :document_base\n\n      ##\n      # Returns the base URI for this version of the service.\n      #\n      # @return [Addressable::URI] The base URI that methods are joined to.\n      def method_base\n        if @discovery_document['basePath']\n          return @method_base ||= (\n            self.root_uri.join(Addressable::URI.parse(@discovery_document['basePath']))\n          ).normalize\n        else\n          return nil\n        end\n      end\n\n      ##\n      # Updates the hierarchy of resources and methods with the new base.\n      #\n      # @param [Addressable::URI, #to_str, String] new_method_base\n      #   The new base URI to use for the service.\n      def method_base=(new_method_base)\n        @method_base = Addressable::URI.parse(new_method_base)\n        self.discovered_resources.each do |resource|\n          resource.method_base = @method_base\n        end\n        self.discovered_methods.each do |method|\n          method.method_base = @method_base\n        end\n      end\n\n      ##\n      # Returns the base URI for batch calls to this service.\n      #\n      # @return [Addressable::URI] The base URI that methods are joined to.\n      def batch_path\n        if @discovery_document['batchPath']\n          return @batch_path ||= (\n            self.document_base.join(Addressable::URI.parse('/' +\n                @discovery_document['batchPath']))\n          ).normalize\n        else\n          return nil\n        end\n      end\n\n      ##\n      # A list of schemas available for this version of the API.\n      #\n      # @return [Hash] A list of {Google::APIClient::Schema} objects.\n      def schemas\n        return @schemas ||= (\n          (@discovery_document['schemas'] || []).inject({}) do |accu, (k, v)|\n            accu[k] = Google::APIClient::Schema.parse(self, v)\n            accu\n          end\n        )\n      end\n\n      ##\n      # Returns a schema for a kind value.\n      #\n      # @return [Google::APIClient::Schema] The associated Schema object.\n      def schema_for_kind(kind)\n        api_name, schema_name = kind.split('#', 2)\n        if api_name != self.name\n          raise ArgumentError,\n            \"The kind does not match this API. \" +\n            \"Expected '#{self.name}', got '#{api_name}'.\"\n        end\n        for k, v in self.schemas\n          return v if k.downcase == schema_name.downcase\n        end\n        return nil\n      end\n\n      ##\n      # A list of resources available at the root level of this version of the\n      # API.\n      #\n      # @return [Array] A list of {Google::APIClient::Resource} objects.\n      def discovered_resources\n        return @discovered_resources ||= (\n          (@discovery_document['resources'] || []).inject([]) do |accu, (k, v)|\n            accu << Google::APIClient::Resource.new(\n              self, self.method_base, k, v\n            )\n            accu\n          end\n        )\n      end\n\n      ##\n      # A list of methods available at the root level of this version of the\n      # API.\n      #\n      # @return [Array] A list of {Google::APIClient::Method} objects.\n      def discovered_methods\n        return @discovered_methods ||= (\n          (@discovery_document['methods'] || []).inject([]) do |accu, (k, v)|\n            accu << Google::APIClient::Method.new(self, self.method_base, k, v)\n            accu\n          end\n        )\n      end\n\n      ##\n      # Allows deep inspection of the discovery document.\n      def [](key)\n        return @discovery_document[key]\n      end\n\n      ##\n      # Converts the service to a flat mapping of RPC names and method objects.\n      #\n      # @return [Hash] All methods available on the service.\n      #\n      # @example\n      #   # Discover available methods\n      #   method_names = client.discovered_api('buzz').to_h.keys\n      def to_h\n        return @hash ||= (begin\n          methods_hash = {}\n          self.discovered_methods.each do |method|\n            methods_hash[method.id] = method\n          end\n          self.discovered_resources.each do |resource|\n            methods_hash.merge!(resource.to_h)\n          end\n          methods_hash\n        end)\n      end\n\n      ##\n      # Returns a <code>String</code> representation of the service's state.\n      #\n      # @return [String] The service's state, as a <code>String</code>.\n      def inspect\n        sprintf(\n          \"#<%s:%#0x ID:%s>\", self.class.to_s, self.object_id, self.id\n        )\n      end\n      \n      ##\n      # Marshalling support - serialize the API to a string (doc base + original \n      # discovery document).\n      def _dump(level)\n        MultiJson.dump([@document_base.to_s, @discovery_document])\n      end\n      \n      ##\n      # Marshalling support - Restore an API instance from serialized form\n      def self._load(obj)\n        new(*MultiJson.load(obj)) \n      end\n\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/discovery/media.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nrequire 'addressable/uri'\nrequire 'addressable/template'\n\nrequire 'google/api_client/errors'\n\n\nmodule Google\n  class APIClient\n    ##\n    # Media upload elements for discovered methods\n    class MediaUpload\n\n      ##\n      # Creates a description of a particular method.\n      #\n      # @param [Google::APIClient::API] api\n      #    Base discovery document for the API\n      # @param [Addressable::URI] method_base\n      #   The base URI for the service.\n      # @param [Hash] discovery_document\n      #   The media upload section of the discovery document.\n      #\n      # @return [Google::APIClient::Method] The constructed method object.\n      def initialize(api, method_base, discovery_document)\n        @api = api\n        @method_base = method_base\n        @discovery_document = discovery_document\n      end\n\n      ##\n      # List of acceptable mime types\n      #\n      # @return [Array]\n      #   List of acceptable mime types for uploaded content\n      def accepted_types\n        @discovery_document['accept']\n      end\n\n      ##\n      # Maximum size of an uplad\n      # TODO: Parse & convert to numeric value\n      #\n      # @return [String]\n      def max_size\n        @discovery_document['maxSize']\n      end\n\n      ##\n      # Returns the URI template for the method.  A parameter list can be\n      # used to expand this into a URI.\n      #\n      # @return [Addressable::Template] The URI template.\n      def uri_template\n        return @uri_template ||= Addressable::Template.new(\n          @api.method_base.join(Addressable::URI.parse(@discovery_document['protocols']['simple']['path']))\n        )\n      end\n\n    end\n\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/discovery/method.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nrequire 'addressable/uri'\nrequire 'addressable/template'\n\nrequire 'google/api_client/errors'\n\n\nmodule Google\n  class APIClient\n    ##\n    # A method that has been described by a discovery document.\n    class Method\n\n      ##\n      # Creates a description of a particular method.\n      #\n      # @param [Google::APIClient::API] api\n      #   The API this method belongs to.\n      # @param [Addressable::URI] method_base\n      #   The base URI for the service.\n      # @param [String] method_name\n      #   The identifier for the method.\n      # @param [Hash] discovery_document\n      #   The section of the discovery document that applies to this method.\n      #\n      # @return [Google::APIClient::Method] The constructed method object.\n      def initialize(api, method_base, method_name, discovery_document)\n        @api = api\n        @method_base = method_base\n        @name = method_name\n        @discovery_document = discovery_document\n      end\n\n      # @return [String] unparsed discovery document for the method\n      attr_reader :discovery_document\n\n      ##\n      # Returns the API this method belongs to.\n      #\n      # @return [Google::APIClient::API] The API this method belongs to.\n      attr_reader :api\n\n      ##\n      # Returns the identifier for the method.\n      #\n      # @return [String] The method identifier.\n      attr_reader :name\n\n      ##\n      # Returns the base URI for the method.\n      #\n      # @return [Addressable::URI]\n      #   The base URI that this method will be joined to.\n      attr_reader :method_base\n\n      ##\n      # Updates the method with the new base.\n      #\n      # @param [Addressable::URI, #to_str, String] new_method_base\n      #   The new base URI to use for the method.\n      def method_base=(new_method_base)\n        @method_base = Addressable::URI.parse(new_method_base)\n        @uri_template = nil\n      end\n\n      ##\n      # Returns a human-readable description of the method.\n      #\n      # @return [Hash] The API description.\n      def description\n        return @discovery_document['description']\n      end\n      \n      ##\n      # Returns the method ID.\n      #\n      # @return [String] The method identifier.\n      def id\n        return @discovery_document['id']\n      end\n\n      ##\n      # Returns the HTTP method or 'GET' if none is specified.\n      #\n      # @return [String] The HTTP method that will be used in the request.\n      def http_method\n        return @discovery_document['httpMethod'] || 'GET'\n      end\n\n      ##\n      # Returns the URI template for the method.  A parameter list can be\n      # used to expand this into a URI.\n      #\n      # @return [Addressable::Template] The URI template.\n      def uri_template\n        return @uri_template ||= Addressable::Template.new(\n          self.method_base.join(Addressable::URI.parse(\"./\" + @discovery_document['path']))\n        )\n      end\n\n      ##\n      # Returns media upload information for this method, if supported\n      #\n      # @return [Google::APIClient::MediaUpload] Description of upload endpoints\n      def media_upload\n        if @discovery_document['mediaUpload']\n          return @media_upload ||= Google::APIClient::MediaUpload.new(self, self.method_base, @discovery_document['mediaUpload'])\n        else\n          return nil\n        end\n      end\n\n      ##\n      # Returns the Schema object for the method's request, if any.\n      #\n      # @return [Google::APIClient::Schema] The request schema.\n      def request_schema\n        if @discovery_document['request']\n          schema_name = @discovery_document['request']['$ref']\n          return @api.schemas[schema_name]\n        else\n          return nil\n        end\n      end\n\n      ##\n      # Returns the Schema object for the method's response, if any.\n      #\n      # @return [Google::APIClient::Schema] The response schema.\n      def response_schema\n        if @discovery_document['response']\n          schema_name = @discovery_document['response']['$ref']\n          return @api.schemas[schema_name]\n        else\n          return nil\n        end\n      end\n\n      ##\n      # Normalizes parameters, converting to the appropriate types.\n      #\n      # @param [Hash, Array] parameters\n      #   The parameters to normalize.\n      #\n      # @return [Hash] The normalized parameters.\n      def normalize_parameters(parameters={})\n        # Convert keys to Strings when appropriate\n        if parameters.kind_of?(Hash) || parameters.kind_of?(Array)\n          # Returning an array since parameters can be repeated (ie, Adsense Management API)\n          parameters = parameters.inject([]) do |accu, (k, v)|\n            k = k.to_s if k.kind_of?(Symbol)\n            k = k.to_str if k.respond_to?(:to_str)\n            unless k.kind_of?(String)\n              raise TypeError, \"Expected String, got #{k.class}.\"\n            end\n            accu << [k, v]\n            accu\n          end\n        else\n          raise TypeError,\n            \"Expected Hash or Array, got #{parameters.class}.\"\n        end\n        return parameters\n      end\n\n      ##\n      # Expands the method's URI template using a parameter list.\n      #\n      # @api private\n      # @param [Hash, Array] parameters\n      #   The parameter list to use.\n      #\n      # @return [Addressable::URI] The URI after expansion.\n      def generate_uri(parameters={})\n        parameters = self.normalize_parameters(parameters)\n        \n        self.validate_parameters(parameters)\n        template_variables = self.uri_template.variables\n        upload_type = parameters.assoc('uploadType') || parameters.assoc('upload_type')\n        if upload_type\n          unless self.media_upload\n            raise ArgumentException, \"Media upload not supported for this method\"\n          end\n          case upload_type.last\n          when 'media', 'multipart', 'resumable'\n            uri = self.media_upload.uri_template.expand(parameters)\n          else\n            raise ArgumentException, \"Invalid uploadType '#{upload_type}'\"\n          end\n        else\n          uri = self.uri_template.expand(parameters)\n        end\n        query_parameters = parameters.reject do |k, v|\n          template_variables.include?(k)\n        end\n        # encode all non-template parameters\n        params = \"\"\n        unless query_parameters.empty?\n          params = \"?\" + Addressable::URI.form_encode(query_parameters.sort)\n        end\n        # Normalization is necessary because of undesirable percent-escaping\n        # during URI template expansion\n        return uri.normalize + params\n      end\n\n      ##\n      # Generates an HTTP request for this method.\n      #\n      # @api private\n      # @param [Hash, Array] parameters\n      #   The parameters to send.\n      # @param [String, StringIO] body The body for the HTTP request.\n      # @param [Hash, Array] headers The HTTP headers for the request.\n      # @option options [Faraday::Connection] :connection\n      #   The HTTP connection to use.\n      #\n      # @return [Array] The generated HTTP request.\n      def generate_request(parameters={}, body='', headers={}, options={})\n        if !headers.kind_of?(Array) && !headers.kind_of?(Hash)\n          raise TypeError, \"Expected Hash or Array, got #{headers.class}.\"\n        end\n        method = self.http_method.to_s.downcase.to_sym\n        uri = self.generate_uri(parameters)\n        headers = Faraday::Utils::Headers.new(headers)\n        return [method, uri, headers, body]\n      end\n\n\n      ##\n      # Returns a <code>Hash</code> of the parameter descriptions for\n      # this method.\n      #\n      # @return [Hash] The parameter descriptions.\n      def parameter_descriptions\n        @parameter_descriptions ||= (\n          @discovery_document['parameters'] || {}\n        ).inject({}) { |h,(k,v)| h[k]=v; h }\n      end\n\n      ##\n      # Returns an <code>Array</code> of the parameters for this method.\n      #\n      # @return [Array] The parameters.\n      def parameters\n        @parameters ||= ((\n          @discovery_document['parameters'] || {}\n        ).inject({}) { |h,(k,v)| h[k]=v; h }).keys\n      end\n\n      ##\n      # Returns an <code>Array</code> of the required parameters for this\n      # method.\n      #\n      # @return [Array] The required parameters.\n      #\n      # @example\n      #   # A list of all required parameters.\n      #   method.required_parameters\n      def required_parameters\n        @required_parameters ||= ((self.parameter_descriptions.select do |k, v|\n          v['required']\n        end).inject({}) { |h,(k,v)| h[k]=v; h }).keys\n      end\n\n      ##\n      # Returns an <code>Array</code> of the optional parameters for this\n      # method.\n      #\n      # @return [Array] The optional parameters.\n      #\n      # @example\n      #   # A list of all optional parameters.\n      #   method.optional_parameters\n      def optional_parameters\n        @optional_parameters ||= ((self.parameter_descriptions.reject do |k, v|\n          v['required']\n        end).inject({}) { |h,(k,v)| h[k]=v; h }).keys\n      end\n\n      ##\n      # Verifies that the parameters are valid for this method.  Raises an\n      # exception if validation fails.\n      #\n      # @api private\n      # @param [Hash, Array] parameters\n      #   The parameters to verify.\n      #\n      # @return [NilClass] <code>nil</code> if validation passes.\n      def validate_parameters(parameters={})\n        parameters = self.normalize_parameters(parameters)\n        required_variables = ((self.parameter_descriptions.select do |k, v|\n          v['required']\n        end).inject({}) { |h,(k,v)| h[k]=v; h }).keys\n        missing_variables = required_variables - parameters.map { |(k, _)| k }\n        if missing_variables.size > 0\n          raise ArgumentError,\n            \"Missing required parameters: #{missing_variables.join(', ')}.\"\n        end\n        parameters.each do |k, v|\n          # Handle repeated parameters.\n          if self.parameter_descriptions[k] &&\n              self.parameter_descriptions[k]['repeated'] &&\n              v.kind_of?(Array)\n            # If this is a repeated parameter and we've got an array as a\n            # value, just provide the whole array to the loop below.\n            items = v\n          else\n            # If this is not a repeated parameter, or if it is but we're\n            # being given a single value, wrap the value in an array, so that\n            # the loop below still works for the single element.\n            items = [v]\n          end\n\n          items.each do |item|\n            if self.parameter_descriptions[k]\n              enum = self.parameter_descriptions[k]['enum']\n              if enum && !enum.include?(item)\n                raise ArgumentError,\n                  \"Parameter '#{k}' has an invalid value: #{item}. \" +\n                  \"Must be one of #{enum.inspect}.\"\n              end\n              pattern = self.parameter_descriptions[k]['pattern']\n              if pattern\n                regexp = Regexp.new(\"^#{pattern}$\")\n                if item !~ regexp\n                  raise ArgumentError,\n                    \"Parameter '#{k}' has an invalid value: #{item}. \" +\n                    \"Must match: /^#{pattern}$/.\"\n                end\n              end\n            end\n          end\n        end\n        return nil\n      end\n\n      ##\n      # Returns a <code>String</code> representation of the method's state.\n      #\n      # @return [String] The method's state, as a <code>String</code>.\n      def inspect\n        sprintf(\n          \"#<%s:%#0x ID:%s>\",\n          self.class.to_s, self.object_id, self.id\n        )\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/discovery/resource.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nrequire 'addressable/uri'\n\nrequire 'active_support/inflector'\nrequire 'google/api_client/discovery/method'\n\n\nmodule Google\n  class APIClient\n    ##\n    # A resource that has been described by a discovery document.\n    class Resource\n\n      ##\n      # Creates a description of a particular version of a resource.\n      #\n      # @param [Google::APIClient::API] api\n      #   The API this resource belongs to.\n      # @param [Addressable::URI] method_base\n      #   The base URI for the service.\n      # @param [String] resource_name\n      #   The identifier for the resource.\n      # @param [Hash] discovery_document\n      #   The section of the discovery document that applies to this resource.\n      #\n      # @return [Google::APIClient::Resource] The constructed resource object.\n      def initialize(api, method_base, resource_name, discovery_document)\n        @api = api\n        @method_base = method_base\n        @name = resource_name\n        @discovery_document = discovery_document\n        metaclass = (class <<self; self; end)\n        self.discovered_resources.each do |resource|\n          method_name = ActiveSupport::Inflector.underscore(resource.name).to_sym\n          if !self.respond_to?(method_name)\n            metaclass.send(:define_method, method_name) { resource }\n          end\n        end\n        self.discovered_methods.each do |method|\n          method_name = ActiveSupport::Inflector.underscore(method.name).to_sym\n          if !self.respond_to?(method_name)\n            metaclass.send(:define_method, method_name) { method }\n          end\n        end\n      end\n\n      # @return [String] unparsed discovery document for the resource\n      attr_reader :discovery_document\n\n      ##\n      # Returns the identifier for the resource.\n      #\n      # @return [String] The resource identifier.\n      attr_reader :name\n\n      ##\n      # Returns the base URI for this resource.\n      #\n      # @return [Addressable::URI] The base URI that methods are joined to.\n      attr_reader :method_base\n\n      ##\n      # Returns a human-readable description of the resource.\n      #\n      # @return [Hash] The API description.\n      def description\n        return @discovery_document['description']\n      end\n\n      ##\n      # Updates the hierarchy of resources and methods with the new base.\n      #\n      # @param [Addressable::URI, #to_str, String] new_method_base\n      #   The new base URI to use for the resource.\n      def method_base=(new_method_base)\n        @method_base = Addressable::URI.parse(new_method_base)\n        self.discovered_resources.each do |resource|\n          resource.method_base = @method_base\n        end\n        self.discovered_methods.each do |method|\n          method.method_base = @method_base\n        end\n      end\n\n      ##\n      # A list of sub-resources available on this resource.\n      #\n      # @return [Array] A list of {Google::APIClient::Resource} objects.\n      def discovered_resources\n        return @discovered_resources ||= (\n          (@discovery_document['resources'] || []).inject([]) do |accu, (k, v)|\n            accu << Google::APIClient::Resource.new(\n              @api, self.method_base, k, v\n            )\n            accu\n          end\n        )\n      end\n\n      ##\n      # A list of methods available on this resource.\n      #\n      # @return [Array] A list of {Google::APIClient::Method} objects.\n      def discovered_methods\n        return @discovered_methods ||= (\n          (@discovery_document['methods'] || []).inject([]) do |accu, (k, v)|\n            accu << Google::APIClient::Method.new(@api, self.method_base, k, v)\n            accu\n          end\n        )\n      end\n\n      ##\n      # Converts the resource to a flat mapping of RPC names and method\n      # objects.\n      #\n      # @return [Hash] All methods available on the resource.\n      def to_h\n        return @hash ||= (begin\n          methods_hash = {}\n          self.discovered_methods.each do |method|\n            methods_hash[method.id] = method\n          end\n          self.discovered_resources.each do |resource|\n            methods_hash.merge!(resource.to_h)\n          end\n          methods_hash\n        end)\n      end\n\n      ##\n      # Returns a <code>String</code> representation of the resource's state.\n      #\n      # @return [String] The resource's state, as a <code>String</code>.\n      def inspect\n        sprintf(\n          \"#<%s:%#0x NAME:%s>\", self.class.to_s, self.object_id, self.name\n        )\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/discovery/schema.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nrequire 'time'\nrequire 'multi_json'\nrequire 'compat/multi_json'\nrequire 'base64'\nrequire 'autoparse'\nrequire 'addressable/uri'\nrequire 'addressable/template'\n\nrequire 'active_support/inflector'\nrequire 'google/api_client/errors'\n\n\nmodule Google\n  class APIClient\n    ##\n    # @api private\n    module Schema\n      def self.parse(api, schema_data)\n        # This method is super-long, but hard to break up due to the\n        # unavoidable dependence on closures and execution context.\n        schema_name = schema_data['id']\n\n        # Due to an oversight, schema IDs may not be URI references.\n        # TODO(bobaman): Remove this code once this has been resolved.\n        schema_uri = (\n          api.document_base +\n          (schema_name[0..0] != '#' ? '#' + schema_name : schema_name)\n        )\n\n        # Due to an oversight, schema IDs may not be URI references.\n        # TODO(bobaman): Remove this whole lambda once this has been resolved.\n        reformat_references = lambda do |data|\n          # This code is not particularly efficient due to recursive traversal\n          # and excess object creation, but this hopefully shouldn't be an\n          # issue since it should only be called only once per schema per\n          # process.\n          if data.kind_of?(Hash) &&\n              data['$ref'] && !data['$ref'].kind_of?(Hash)\n            if data['$ref'].respond_to?(:to_str)\n              reference = data['$ref'].to_str\n            else\n              raise TypeError, \"Expected String, got #{data['$ref'].class}\"\n            end\n            reference = '#' + reference if reference[0..0] != '#'\n            data.merge({\n              '$ref' => reference\n            })\n          elsif data.kind_of?(Hash)\n            data.inject({}) do |accu, (key, value)|\n              if value.kind_of?(Hash)\n                accu[key] = reformat_references.call(value)\n              else\n                accu[key] = value\n              end\n              accu\n            end\n          else\n            data\n          end\n        end\n        schema_data = reformat_references.call(schema_data)\n\n        if schema_name\n          api_name_string = ActiveSupport::Inflector.camelize(api.name)\n          api_version_string = ActiveSupport::Inflector.camelize(api.version).gsub('.', '_')\n          # This is for compatibility with Ruby 1.8.7.\n          # TODO(bobaman) Remove this when we eventually stop supporting 1.8.7.\n          args = []\n          args << false if Class.method(:const_defined?).arity != 1\n          if Google::APIClient::Schema.const_defined?(api_name_string, *args)\n            api_name = Google::APIClient::Schema.const_get(\n              api_name_string, *args\n            )\n          else\n            api_name = Google::APIClient::Schema.const_set(\n              api_name_string, Module.new\n            )\n          end\n          if api_name.const_defined?(api_version_string, *args)\n            api_version = api_name.const_get(api_version_string, *args)\n          else\n            api_version = api_name.const_set(api_version_string, Module.new)\n          end\n          if api_version.const_defined?(schema_name, *args)\n            schema_class = api_version.const_get(schema_name, *args)\n          end\n        end\n\n        # It's possible the schema has already been defined. If so, don't\n        # redefine it. This means that reloading a schema which has already\n        # been loaded into memory is not possible.\n        unless schema_class\n          schema_class = AutoParse.generate(schema_data, :uri => schema_uri)\n          if schema_name\n            api_version.const_set(schema_name, schema_class)\n          end\n        end\n        return schema_class\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/discovery.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nrequire 'google/api_client/discovery/api'\nrequire 'google/api_client/discovery/resource'\nrequire 'google/api_client/discovery/method'\nrequire 'google/api_client/discovery/schema'\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/environment.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nmodule Google\n  class APIClient\n    module ENV\n      OS_VERSION = begin\n        if RUBY_PLATFORM =~ /mswin|win32|mingw|bccwin|cygwin/\n          # TODO(bobaman)\n          # Confirm that all of these Windows environments actually have access\n          # to the `ver` command.\n          `ver`.sub(/\\s*\\[Version\\s*/, '/').sub(']', '').strip\n        elsif RUBY_PLATFORM =~ /darwin/i\n          \"Mac OS X/#{`sw_vers -productVersion`}\"\n        elsif RUBY_PLATFORM == 'java'\n          # Get the information from java system properties to avoid spawning a\n          # sub-process, which is not friendly in some contexts (web servers).\n          require 'java'\n          name = java.lang.System.getProperty('os.name')\n          version = java.lang.System.getProperty('os.version')\n          \"#{name}/#{version}\"\n        else\n          `uname -sr`.sub(' ', '/')\n        end\n      rescue Exception\n        RUBY_PLATFORM\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/errors.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nmodule Google\n  class APIClient\n    ##\n    # An error which is raised when there is an unexpected response or other\n    # transport error that prevents an operation from succeeding.\n    class TransmissionError < StandardError\n      attr_reader :result\n      def initialize(message = nil, result = nil)\n        super(message)\n        @result = result\n      end\n    end\n\n    ##\n    # An exception that is raised if a redirect is required\n    #\n    class RedirectError < TransmissionError\n    end\n\n    ##\n    # An exception that is raised if a method is called with missing or\n    # invalid parameter values.\n    class ValidationError < StandardError\n    end\n\n    ##\n    # A 4xx class HTTP error occurred.\n    class ClientError < TransmissionError\n    end\n\n    ##\n    # A 401 HTTP error occurred.\n    class AuthorizationError < ClientError\n    end\n\n    ##\n    # A 5xx class HTTP error occurred.\n    class ServerError < TransmissionError\n    end\n\n    ##\n    # An exception that is raised if an ID token could not be validated.\n    class InvalidIDTokenError < StandardError\n    end\n\n    # Error class for problems in batch requests.\n    class BatchError < StandardError\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/logging.rb",
    "content": "require 'logger'\n\nmodule Google\n  class APIClient\n    \n    class << self\n      ##\n      # Logger for the API client\n      #\n      # @return [Logger] logger instance.\n      attr_accessor :logger\n    end\n\n    self.logger = Logger.new(STDOUT)\n    self.logger.level = Logger::WARN  \n\n    ##\n    # Module to make accessing the logger simpler\n    module Logging\n      ##\n      # Logger for the API client\n      #\n      # @return [Logger] logger instance.\n      def logger\n        Google::APIClient.logger\n      end\n    end\n\n  end\n  \n  \nend"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/media.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nrequire 'google/api_client/reference'\nrequire 'faraday/multipart'\n\nmodule Google\n  class APIClient\n    ##\n    # Uploadable media support.  Holds an IO stream & content type.\n    #\n    # @see Faraday::UploadIO\n    # @example\n    #   media = Google::APIClient::UploadIO.new('mymovie.m4v', 'video/mp4')\n    class UploadIO < Faraday::Multipart::FilePart\n      \n      # @return [Fixnum] Size of chunks to upload. Default is nil, meaning upload the entire file in a single request\n      attr_accessor :chunk_size\n            \n      ##\n      # Get the length of the stream\n      #\n      # @return [Fixnum]\n      #   Length of stream, in bytes\n      def length\n        io.respond_to?(:length) ? io.length : File.size(local_path)\n      end\n    end\n    \n    ##\n    # Wraps an input stream and limits data to a given range\n    #\n    # @example\n    #   chunk = Google::APIClient::RangedIO.new(io, 0, 1000)\n    class RangedIO \n      ##\n      # Bind an input stream to a specific range.\n      #\n      # @param [IO] io\n      #   Source input stream\n      # @param [Fixnum] offset\n      #   Starting offset of the range\n      # @param [Fixnum] length\n      #   Length of range\n      def initialize(io, offset, length)\n        @io = io\n        @offset = offset\n        @length = length\n        self.rewind\n      end\n      \n      ##\n      # @see IO#read\n      def read(amount = nil, buf = nil)\n        buffer = buf || ''\n        if amount.nil?\n          size = @length - @pos\n          done = ''\n        elsif amount == 0\n          size = 0\n          done = ''\n        else \n          size = [@length - @pos, amount].min\n          done = nil\n        end\n\n        if size > 0\n          result = @io.read(size)\n          result.force_encoding(\"BINARY\") if result.respond_to?(:force_encoding)\n          buffer << result if result\n          @pos = @pos + size\n        end\n\n        if buffer.length > 0\n          buffer\n        else\n          done\n        end\n      end\n\n      ##\n      # @see IO#rewind\n      def rewind\n        self.pos = 0\n      end\n\n      ##\n      # @see IO#pos\n      def pos\n        @pos\n      end\n\n      ##\n      # @see IO#pos=\n      def pos=(pos)\n        @pos = pos\n        @io.pos = @offset + pos\n      end\n    end\n    \n    ##\n    # Resumable uploader.\n    #\n    class ResumableUpload < Request\n      # @return [Fixnum] Max bytes to send in a single request\n      attr_accessor :chunk_size\n  \n      ##\n      # Creates a new uploader.\n      #\n      # @param [Hash] options\n      #   Request options\n      def initialize(options={})\n        super options\n        self.uri = options[:uri]\n        self.http_method = :put\n        @offset = options[:offset] || 0\n        @complete = false\n        @expired = false\n      end\n      \n      ##\n      # Sends all remaining chunks to the server\n      #\n      # @deprecated Pass the instance to {Google::APIClient#execute} instead\n      #\n      # @param [Google::APIClient] api_client\n      #   API Client instance to use for sending\n      def send_all(api_client)\n        result = nil\n        until complete?\n          result = send_chunk(api_client)\n          break unless result.status == 308\n        end\n        return result\n      end\n      \n      \n      ##\n      # Sends the next chunk to the server\n      #\n      # @deprecated Pass the instance to {Google::APIClient#execute} instead\n      #\n      # @param [Google::APIClient] api_client\n      #   API Client instance to use for sending\n      def send_chunk(api_client)\n        return api_client.execute(self)\n      end\n\n      ##\n      # Check if upload is complete\n      #\n      # @return [TrueClass, FalseClass]\n      #   Whether or not the upload complete successfully\n      def complete?\n        return @complete\n      end\n\n      ##\n      # Check if the upload URL expired (upload not completed in alotted time.)\n      # Expired uploads must be restarted from the beginning\n      #\n      # @return [TrueClass, FalseClass]\n      #   Whether or not the upload has expired and can not be resumed\n      def expired?\n        return @expired\n      end\n      \n      ##\n      # Check if upload is resumable. That is, neither complete nor expired\n      #\n      # @return [TrueClass, FalseClass] True if upload can be resumed\n      def resumable?\n        return !(self.complete? or self.expired?)\n      end\n      \n      ##\n      # Convert to an HTTP request. Returns components in order of method, URI,\n      # request headers, and body\n      #\n      # @api private\n      #\n      # @return [Array<(Symbol, Addressable::URI, Hash, [#read,#to_str])>]\n      def to_http_request\n        if @complete\n          raise Google::APIClient::ClientError, \"Upload already complete\"\n        elsif @offset.nil?\n          self.headers.update({ \n            'Content-Length' => \"0\", \n            'Content-Range' => \"bytes */#{media.length}\" })\n        else\n          start_offset = @offset\n          remaining = self.media.length - start_offset\n          chunk_size = self.media.chunk_size || self.chunk_size || self.media.length\n          content_length = [remaining, chunk_size].min\n          chunk = RangedIO.new(self.media.io, start_offset, content_length)\n          end_offset = start_offset + content_length - 1\n          self.headers.update({\n            'Content-Length' => \"#{content_length}\",\n            'Content-Type' => self.media.content_type, \n            'Content-Range' => \"bytes #{start_offset}-#{end_offset}/#{media.length}\" })\n          self.body = chunk\n        end\n        super\n      end\n      \n      ##\n      # Check the result from the server, updating the offset and/or location\n      # if available.\n      #\n      # @api private\n      #\n      # @param [Faraday::Response] response\n      #   HTTP response\n      #\n      # @return [Google::APIClient::Result]\n      #   Processed API response\n      def process_http_response(response)\n        case response.status\n        when 200...299\n          @complete = true\n        when 308\n          range = response.headers['range']\n          if range\n            @offset = range.scan(/\\d+/).collect{|x| Integer(x)}.last + 1\n          end\n          if response.headers['location']\n            self.uri = response.headers['location']\n          end\n        when 400...499\n          @expired = true\n        when 500...599\n          # Invalidate the offset to mark it needs to be queried on the\n          # next request\n          @offset = nil\n        end\n        return Google::APIClient::Result.new(self, response)\n      end\n      \n      ##\n      # Hashified verison of the API request\n      #\n      # @return [Hash]\n      def to_hash\n        super.merge(:offset => @offset)\n      end\n      \n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/railtie.rb",
    "content": "require 'rails/railtie'\nrequire 'google/api_client/logging'\n\nmodule Google\n  class APIClient\n    \n    ##\n    # Optional support class for Rails. Currently replaces the built-in logger\n    # with Rails' application log.\n    #\n    class Railtie < Rails::Railtie\n      initializer 'google-api-client' do |app|\n        logger = app.config.logger || Rails.logger\n        Google::APIClient.logger = logger unless logger.nil?\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/reference.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'google/api_client/request'\n\nmodule Google\n  class APIClient\n    ##\n    # Subclass of Request for backwards compatibility with pre-0.5.0 versions of the library\n    # \n    # @deprecated\n    #   use {Google::APIClient::Request} instead\n    class Reference < Request\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/request.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'faraday'\nrequire 'compat/multi_json'\nrequire 'addressable/uri'\nrequire 'stringio'\nrequire 'google/api_client/discovery'\nrequire 'google/api_client/logging'\n\nmodule Google\n  class APIClient\n\n    ##\n    # Represents an API request.\n    class Request\n      include Google::APIClient::Logging\n\n      MULTIPART_BOUNDARY = \"-----------RubyApiMultipartPost\".freeze\n\n      # @return [Hash] Request parameters\n      attr_reader :parameters\n      # @return [Hash] Additional HTTP headers\n      attr_reader :headers\n      # @return [Google::APIClient::Method] API method to invoke\n      attr_reader :api_method\n      # @return [Google::APIClient::UploadIO] File to upload\n      attr_accessor :media\n      # @return [#generated_authenticated_request] User credentials\n      attr_accessor :authorization\n      # @return [TrueClass,FalseClass] True if request should include credentials\n      attr_accessor :authenticated\n      # @return [#read, #to_str] Request body\n      attr_accessor :body\n\n      ##\n      # Build a request\n      #\n      # @param [Hash] options\n      # @option options [Hash, Array] :parameters\n      #   Request parameters for the API method.\n      # @option options [Google::APIClient::Method] :api_method\n      #   API method to invoke. Either :api_method or :uri must be specified\n      # @option options [TrueClass, FalseClass] :authenticated\n      #   True if request should include credentials. Implicitly true if\n      #   unspecified and :authorization present\n      # @option options [#generate_signed_request] :authorization\n      #   OAuth credentials\n      # @option options [Google::APIClient::UploadIO] :media\n      #   File to upload, if media upload request\n      # @option options [#to_json, #to_hash] :body_object\n      #   Main body of the API request. Typically hash or object that can\n      #   be serialized to JSON\n      # @option options [#read, #to_str] :body\n      #   Raw body to send in POST/PUT requests\n      # @option options [String, Addressable::URI] :uri\n      #   URI to request. Either :api_method or :uri must be specified\n      # @option options [String, Symbol] :http_method\n      #   HTTP method when requesting a URI\n      def initialize(options={})\n        @parameters = Faraday::Utils::ParamsHash.new\n        @headers = Faraday::Utils::Headers.new\n\n        self.parameters.merge!(options[:parameters]) unless options[:parameters].nil?\n        self.headers.merge!(options[:headers]) unless options[:headers].nil?\n        self.api_method = options[:api_method]\n        self.authenticated = options[:authenticated]\n        self.authorization = options[:authorization]\n\n        # These parameters are handled differently because they're not\n        # parameters to the API method, but rather to the API system.\n        self.parameters['key'] ||= options[:key] if options[:key]\n        self.parameters['userIp'] ||= options[:user_ip] if options[:user_ip]\n\n        if options[:media]\n          self.initialize_media_upload(options)\n        elsif options[:body]\n          self.body = options[:body]\n        elsif options[:body_object]\n          self.headers['Content-Type'] ||= 'application/json'\n          self.body = serialize_body(options[:body_object])\n        else\n          self.body = ''\n        end\n\n        unless self.api_method\n          self.http_method = options[:http_method] || 'GET'\n          self.uri = options[:uri]\n        end\n      end\n\n      # @!attribute [r] upload_type\n      # @return [String] protocol used for upload\n      def upload_type\n        return self.parameters['uploadType'] || self.parameters['upload_type']\n      end\n\n      # @!attribute http_method\n      # @return [Symbol] HTTP method if invoking a URI\n      def http_method\n        return @http_method ||= self.api_method.http_method.to_s.downcase.to_sym\n      end\n\n      def http_method=(new_http_method)\n        if new_http_method.kind_of?(Symbol)\n          @http_method = new_http_method.to_s.downcase.to_sym\n        elsif new_http_method.respond_to?(:to_str)\n          @http_method = new_http_method.to_s.downcase.to_sym\n        else\n          raise TypeError,\n            \"Expected String or Symbol, got #{new_http_method.class}.\"\n        end\n      end\n\n      def api_method=(new_api_method)\n        if new_api_method.nil? || new_api_method.kind_of?(Google::APIClient::Method)\n          @api_method = new_api_method\n        else\n          raise TypeError,\n            \"Expected Google::APIClient::Method, got #{new_api_method.class}.\"\n        end\n      end\n\n      # @!attribute uri\n      # @return [Addressable::URI] URI to send request\n      def uri\n        return @uri ||= self.api_method.generate_uri(self.parameters)\n      end\n\n      def uri=(new_uri)\n        @uri = Addressable::URI.parse(new_uri)\n        @parameters.update(@uri.query_values) unless @uri.query_values.nil?\n      end\n\n\n      # Transmits the request with the given connection\n      #\n      # @api private\n      #\n      # @param [Faraday::Connection] connection\n      #   the connection to transmit with\n      # @param [TrueValue,FalseValue] is_retry\n      #   True if request has been previous sent\n      #\n      # @return [Google::APIClient::Result]\n      #   result of API request\n      def send(connection, is_retry = false)\n        self.body.rewind if is_retry && self.body.respond_to?(:rewind)\n        env = self.to_env(connection)\n        logger.debug  { \"#{self.class} Sending API request #{env[:method]} #{env[:url].to_s} #{env[:request_headers]}\" }\n        http_response = connection.app.call(env)\n        result = self.process_http_response(http_response)\n\n        logger.debug { \"#{self.class} Result: #{result.status} #{result.headers}\" }\n\n        # Resumamble slightly different than other upload protocols in that it requires at least\n        # 2 requests.\n        if result.status == 200 && self.upload_type == 'resumable' && self.media\n          upload = result.resumable_upload\n          unless upload.complete?\n            logger.debug { \"#{self.class} Sending upload body\" }\n            result = upload.send(connection)\n          end\n        end\n        return result\n      end\n\n      # Convert to an HTTP request. Returns components in order of method, URI,\n      # request headers, and body\n      #\n      # @api private\n      #\n      # @return [Array<(Symbol, Addressable::URI, Hash, [#read,#to_str])>]\n      def to_http_request\n        request = (\n          if self.api_method\n            self.api_method.generate_request(self.parameters, self.body, self.headers)\n          elsif self.uri\n            unless self.parameters.empty?\n              self.uri.query = Addressable::URI.form_encode(self.parameters)\n            end\n            [self.http_method, self.uri.to_s, self.headers, self.body]\n          end)\n        return request\n      end\n\n      ##\n      # Hashified verison of the API request\n      #\n      # @return [Hash]\n      def to_hash\n        options = {}\n        if self.api_method\n          options[:api_method] = self.api_method\n          options[:parameters] = self.parameters\n        else\n          options[:http_method] = self.http_method\n          options[:uri] = self.uri\n        end\n        options[:headers] = self.headers\n        options[:body] = self.body\n        options[:media] = self.media\n        unless self.authorization.nil?\n          options[:authorization] = self.authorization\n        end\n        return options\n      end\n\n      ##\n      # Prepares the request for execution, building a hash of parts\n      # suitable for sending to Faraday::Connection.\n      #\n      # @api private\n      #\n      # @param [Faraday::Connection] connection\n      #   Connection for building the request\n      #\n      # @return [Hash]\n      #   Encoded request\n      def to_env(connection)\n        method, uri, headers, body = self.to_http_request\n        http_request = connection.build_request(method) do |req|\n          req.url(uri.to_s)\n          req.headers.update(headers)\n          req.body = body\n        end\n\n        if self.authorization.respond_to?(:generate_authenticated_request)\n          http_request = self.authorization.generate_authenticated_request(\n            :request => http_request,\n            :connection => connection\n          )\n        end\n\n        http_request.to_env(connection)\n      end\n\n      ##\n      # Convert HTTP response to an API Result\n      #\n      # @api private\n      #\n      # @param [Faraday::Response] response\n      #   HTTP response\n      #\n      # @return [Google::APIClient::Result]\n      #   Processed API response\n      def process_http_response(response)\n        Result.new(self, response)\n      end\n\n      protected\n\n      ##\n      # Adjust headers & body for media uploads\n      #\n      # @api private\n      #\n      # @param [Hash] options\n      # @option options [Hash, Array] :parameters\n      #   Request parameters for the API method.\n      # @option options [Google::APIClient::UploadIO] :media\n      #   File to upload, if media upload request\n      # @option options [#to_json, #to_hash] :body_object\n      #   Main body of the API request. Typically hash or object that can\n      #   be serialized to JSON\n      # @option options [#read, #to_str] :body\n      #   Raw body to send in POST/PUT requests\n      def initialize_media_upload(options)\n        raise \"media upload not supported by arvados-google-api-client\"\n      end\n\n      ##\n      # Assemble a multipart message from a set of parts\n      #\n      # @api private\n      #\n      # @param [Array<[#read,#to_str]>] parts\n      #   Array of parts to encode.\n      # @param [String] mime_type\n      #   MIME type of the message\n      # @param [String] boundary\n      #   Boundary for separating each part of the message\n      def build_multipart(parts, mime_type = 'multipart/related', boundary = MULTIPART_BOUNDARY)\n        raise \"multipart upload not supported by arvados-google-api-client\"\n      end\n\n      ##\n      # Serialize body object to JSON\n      #\n      # @api private\n      #\n      # @param [#to_json,#to_hash] body\n      #   object to serialize\n      #\n      # @return [String]\n      #   JSON\n      def serialize_body(body)\n        return body.to_json if body.respond_to?(:to_json)\n        return MultiJson.dump(body.to_hash) if body.respond_to?(:to_hash)\n        raise TypeError, 'Could not convert body object to JSON.' +\n                         'Must respond to :to_json or :to_hash.'\n      end\n\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/result.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nmodule Google\n  class APIClient\n    ##\n    # This class wraps a result returned by an API call.\n    class Result\n      extend Forwardable\n      \n      ##\n      # Init the result\n      #\n      # @param [Google::APIClient::Request] request\n      #   The original request\n      # @param [Faraday::Response] response\n      #   Raw HTTP Response\n      def initialize(request, response)\n        @request = request\n        @response = response\n        @media_upload = reference if reference.kind_of?(ResumableUpload)\n      end\n\n      # @return [Google::APIClient::Request] Original request object\n      attr_reader :request\n      # @return [Faraday::Response] HTTP response\n      attr_reader :response\n      # @!attribute [r] reference\n      #   @return [Google::APIClient::Request] Original request object\n      #   @deprecated See {#request}\n      alias_method :reference, :request # For compatibility with pre-beta clients\n\n      # @!attribute [r] status\n      #   @return [Fixnum] HTTP status code\n      # @!attribute [r] headers\n      #   @return [Hash] HTTP response headers\n      # @!attribute [r] body\n      #   @return [String] HTTP response body\n      def_delegators :@response, :status, :headers, :body\n\n      # @!attribute [r] resumable_upload\n      # @return [Google::APIClient::ResumableUpload] For resuming media uploads\n      def resumable_upload        \n        @media_upload ||= (\n          options = self.reference.to_hash.merge(\n            :uri => self.headers['location'],\n            :media => self.reference.media\n          )\n          Google::APIClient::ResumableUpload.new(options)\n        )\n      end\n      \n      ##\n      # Get the content type of the response\n      # @!attribute [r] media_type\n      # @return [String]\n      #  Value of content-type header\n      def media_type\n        _, content_type = self.headers.detect do |h, v|\n          h.downcase == 'Content-Type'.downcase\n        end\n        if content_type\n          return content_type[/^([^;]*);?.*$/, 1].strip.downcase\n        else\n          return nil\n        end\n      end\n      \n      ##\n      # Check if request failed\n      #\n      # @!attribute [r] error?\n      # @return [TrueClass, FalseClass]\n      #   true if result of operation is an error\n      def error?\n        return self.response.status >= 400\n      end\n\n      ##\n      # Check if request was successful\n      #\n      # @!attribute [r] success?\n      # @return [TrueClass, FalseClass]\n      #   true if result of operation was successful\n      def success?\n        return !self.error?\n      end\n      \n      ##\n      # Extracts error messages from the response body\n      #\n      # @!attribute [r] error_message\n      # @return [String]\n      #   error message, if available\n      def error_message\n        if self.data?\n          if self.data.respond_to?(:error) &&\n             self.data.error.respond_to?(:message)\n            # You're going to get a terrible error message if the response isn't\n            # parsed successfully as an error.\n            return self.data.error.message\n          elsif self.data['error'] && self.data['error']['message']\n            return self.data['error']['message']\n          end\n        end\n        return self.body\n      end\n\n      ##\n      # Check for parsable data in response\n      #\n      # @!attribute [r] data?\n      # @return [TrueClass, FalseClass]\n      #   true if body can be parsed\n      def data?\n        !(self.body.nil? || self.body.empty? || self.media_type != 'application/json')\n      end\n      \n      ##\n      # Return parsed version of the response body.\n      #\n      # @!attribute [r] data\n      # @return [Object, Hash, String]\n      #   Object if body parsable from API schema, Hash if JSON, raw body if unable to parse\n      def data\n        return @data ||= (begin\n          if self.data?\n            media_type = self.media_type\n            data = self.body\n            case media_type\n            when 'application/json'\n              data = MultiJson.load(data)\n              # Strip data wrapper, if present\n              data = data['data'] if data.has_key?('data')\n            else\n              raise ArgumentError,\n                \"Content-Type not supported for parsing: #{media_type}\"\n            end\n            if @request.api_method && @request.api_method.response_schema\n              # Automatically parse using the schema designated for the\n              # response of this API method.\n              data = @request.api_method.response_schema.new(data)\n              data\n            else\n              # Otherwise, return the raw unparsed value.\n              # This value must be indexable like a Hash.\n              data\n            end\n          end\n        end)\n      end\n\n      ##\n      # Get the token used for requesting the next page of data\n      #\n      # @!attribute [r] next_page_token\n      # @return [String]\n      #   next page token\n      def next_page_token\n        if self.data.respond_to?(:next_page_token)\n          return self.data.next_page_token\n        elsif self.data.respond_to?(:[])\n          return self.data[\"nextPageToken\"]\n        else\n          raise TypeError, \"Data object did not respond to #next_page_token.\"\n        end\n      end\n\n      ##\n      # Build a request for fetching the next page of data\n      # \n      # @return [Google::APIClient::Request]\n      #   API request for retrieving next page, nil if no page token available\n      def next_page\n        return nil unless self.next_page_token\n        merged_parameters = Hash[self.reference.parameters].merge({\n          self.page_token_param => self.next_page_token\n        })\n        # Because Requests can be coerced to Hashes, we can merge them,\n        # preserving all context except the API method parameters that we're\n        # using for pagination.\n        return Google::APIClient::Request.new(\n          Hash[self.reference].merge(:parameters => merged_parameters)\n        )\n      end\n\n      ##\n      # Get the token used for requesting the previous page of data\n      #\n      # @!attribute [r] prev_page_token\n      # @return [String]\n      #   previous page token\n      def prev_page_token\n        if self.data.respond_to?(:prev_page_token)\n          return self.data.prev_page_token\n        elsif self.data.respond_to?(:[])\n          return self.data[\"prevPageToken\"]\n        else\n          raise TypeError, \"Data object did not respond to #next_page_token.\"\n        end\n      end\n\n      ##\n      # Build a request for fetching the previous page of data\n      # \n      # @return [Google::APIClient::Request]\n      #   API request for retrieving previous page, nil if no page token available\n      def prev_page\n        return nil unless self.prev_page_token\n        merged_parameters = Hash[self.reference.parameters].merge({\n          self.page_token_param => self.prev_page_token\n        })\n        # Because Requests can be coerced to Hashes, we can merge them,\n        # preserving all context except the API method parameters that we're\n        # using for pagination.\n        return Google::APIClient::Request.new(\n          Hash[self.reference].merge(:parameters => merged_parameters)\n        )\n      end\n      \n      ##\n      # Pagination scheme used by this request/response\n      #\n      # @!attribute [r] pagination_type\n      # @return [Symbol]\n      #  currently always :token\n      def pagination_type\n        return :token\n      end\n\n      ##\n      # Name of the field that contains the pagination token\n      #\n      # @!attribute [r] page_token_param\n      # @return [String]\n      #  currently always 'pageToken'\n      def page_token_param\n        return \"pageToken\"\n      end\n\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/service/batch.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'google/api_client/service/result'\nrequire 'google/api_client/batch'\n\nmodule Google\n  class APIClient\n    class Service\n\n      ##\n      # Helper class to contain the result of an individual batched call.\n      #\n      class BatchedCallResult < Result\n        # @return [Fixnum] Index of the call\n        def call_index\n          return @base_result.response.call_id.to_i - 1\n        end\n      end\n\n      ##\n      #\n      #\n      class BatchRequest\n        ##\n        # Creates a new batch request.\n        # This class shouldn't be instantiated directly, but rather through\n        # Service.batch.\n        #\n        # @param [Array] calls\n        #   List of Google::APIClient::Service::Request to be made.\n        # @param [Proc] block\n        #   Callback for every call's response. Won't be called if a call\n        #   defined a callback of its own.\n        #\n        # @yield [Google::APIClient::Service::Result]\n        #   block to be called when result ready\n        def initialize(service, calls, &block)\n          @service = service\n          @base_batch = Google::APIClient::BatchRequest.new\n          @global_callback = block if block_given?\n\n          if calls && calls.length > 0\n            calls.each do |call|\n              add(call)\n            end\n          end\n        end\n\n        ##\n        # Add a new call to the batch request.\n        #\n        # @param [Google::APIClient::Service::Request] call\n        #   the call to be added.\n        # @param [Proc] block\n        #   callback for this call's response.\n        #\n        # @return [Google::APIClient::Service::BatchRequest]\n        #   the BatchRequest, for chaining\n        #\n        # @yield [Google::APIClient::Service::Result]\n        #   block to be called when result ready\n        def add(call, &block)\n          if !block_given? && @global_callback.nil?\n            raise BatchError, 'Request needs a block'\n          end\n          callback = block || @global_callback\n          base_call = {\n            :api_method => call.method,\n            :parameters => call.parameters\n          }\n          if call.respond_to? :body\n            if call.body.respond_to? :to_hash\n              base_call[:body_object] = call.body\n            else\n              base_call[:body] = call.body\n            end\n          end\n          @base_batch.add(base_call) do |base_result|\n            result = Google::APIClient::Service::BatchedCallResult.new(\n                call, base_result)\n            callback.call(result)\n          end\n          return self\n        end\n\n        ##\n        # Executes the batch request.\n        def execute\n          @service.execute(self)\n        end\n\n        attr_reader :base_batch\n\n      end\n\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/service/request.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nmodule Google\n  class APIClient\n    class Service\n      ##\n      # Handles an API request.\n      # This contains a full definition of the request to be made (including\n      # method name, parameters, body and media). The remote API call can be\n      # invoked with execute().\n      class Request\n        ##\n        # Build a request.\n        # This class should not be directly instantiated in user code;\n        # instantiation is handled by the stub methods created on Service and\n        # Resource objects.\n        #\n        # @param [Google::APIClient::Service] service\n        #   The parent Service instance that will execute the request.\n        # @param [Google::APIClient::Method] method\n        #   The Method instance that describes the API method invoked by the\n        #   request.\n        # @param [Hash] parameters\n        #   A Hash of parameter names and values to be sent in the API call.\n        def initialize(service, method, parameters)\n          @service = service\n          @method = method\n          @parameters = parameters\n          @body = nil\n          @media = nil\n\n          metaclass = (class << self; self; end)\n\n          # If applicable, add \"body\", \"body=\" and resource-named methods for\n          # retrieving and setting the HTTP body for this request.\n          # Examples of setting the body for files.insert in the Drive API:\n          #   request.body = object\n          #   request.execute\n          #  OR\n          #   request.file = object\n          #   request.execute\n          #  OR\n          #   request.body(object).execute\n          #  OR\n          #   request.file(object).execute\n          # Examples of retrieving the body for files.insert in the Drive API:\n          #   object = request.body\n          #  OR\n          #   object = request.file\n          if method.request_schema\n            body_name = method.request_schema.data['id'].dup\n            body_name[0] = body_name[0].chr.downcase\n            body_name_equals = (body_name + '=').to_sym\n            body_name = body_name.to_sym\n\n            metaclass.send(:define_method, :body) do |*args|\n              if args.length == 1\n                @body = args.first\n                return self\n              elsif args.length == 0\n                return @body\n              else\n                raise ArgumentError,\n                  \"wrong number of arguments (#{args.length}; expecting 0 or 1)\"\n              end\n            end\n\n            metaclass.send(:define_method, :body=) do |body|\n              @body = body\n            end\n\n            metaclass.send(:alias_method, body_name, :body)\n            metaclass.send(:alias_method, body_name_equals, :body=)\n          end\n\n          # If applicable, add \"media\" and \"media=\" for retrieving and setting\n          # the media object for this request.\n          # Examples of setting the media object:\n          #   request.media = object\n          #   request.execute\n          #  OR\n          #   request.media(object).execute\n          # Example of retrieving the media object:\n          #   object = request.media\n          if method.media_upload\n            metaclass.send(:define_method, :media) do |*args|\n              if args.length == 1\n                @media = args.first\n                return self\n              elsif args.length == 0\n                return @media\n              else\n                raise ArgumentError,\n                  \"wrong number of arguments (#{args.length}; expecting 0 or 1)\"\n              end\n            end\n\n            metaclass.send(:define_method, :media=) do |media|\n              @media = media\n            end\n          end\n        end\n\n        ##\n        # Returns the parent service capable of executing this request.\n        #\n        # @return [Google::APIClient::Service] The parent service.\n        attr_reader :service\n\n        ##\n        # Returns the Method instance that describes the API method invoked by\n        # the request.\n        #\n        # @return [Google::APIClient::Method] The API method description.\n        attr_reader :method\n\n        ##\n        # Contains the Hash of parameter names and values to be sent as the\n        # parameters for the API call.\n        #\n        # @return [Hash] The request parameters.\n        attr_accessor :parameters\n\n        ##\n        # Executes the request.\n        def execute\n          @service.execute(self)\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/service/resource.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nmodule Google\n  class APIClient\n    class Service\n      ##\n      # Handles an API resource.\n      # Simple class that contains API methods and/or child resources.\n      class Resource\n        include Google::APIClient::Service::StubGenerator\n\n        ##\n        # Build a resource.\n        # This class should not be directly instantiated in user code; resources\n        # are instantiated by the stub generation mechanism on Service creation.\n        #\n        # @param [Google::APIClient::Service] service\n        #   The Service instance this resource belongs to.\n        # @param [Google::APIClient::API, Google::APIClient::Resource] root\n        #   The node corresponding to this resource.\n        def initialize(service, root)\n          @service = service\n          generate_call_stubs(service, root)\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/service/result.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nmodule Google\n  class APIClient\n    class Service\n      ##\n      # Handles an API result.\n      # Wraps around the Google::APIClient::Result class, making it easier to\n      # handle the result (e.g. pagination) and keeping it in line with the rest\n      # of the Service programming interface.\n      class Result\n        extend Forwardable\n\n        ##\n        # Init the result.\n        #\n        # @param [Google::APIClient::Service::Request] request\n        #   The original request\n        # @param [Google::APIClient::Result] base_result\n        #   The base result to be wrapped\n        def initialize(request, base_result)\n          @request = request\n          @base_result = base_result\n        end\n\n        # @!attribute [r] status\n        #   @return [Fixnum] HTTP status code\n        # @!attribute [r] headers\n        #   @return [Hash] HTTP response headers\n        # @!attribute [r] body\n        #   @return [String] HTTP response body\n        def_delegators :@base_result, :status, :headers, :body\n\n        # @return [Google::APIClient::Service::Request] Original request object\n        attr_reader :request\n\n        ##\n        # Get the content type of the response\n        # @!attribute [r] media_type\n        # @return [String]\n        #  Value of content-type header\n        def_delegators :@base_result, :media_type\n\n        ##\n        # Check if request failed\n        #\n        # @!attribute [r] error?\n        # @return [TrueClass, FalseClass]\n        #   true if result of operation is an error\n        def_delegators :@base_result, :error?\n\n        ##\n        # Check if request was successful\n        #\n        # @!attribute [r] success?\n        # @return [TrueClass, FalseClass]\n        #   true if result of operation was successful\n        def_delegators :@base_result, :success?\n\n        ##\n        # Extracts error messages from the response body\n        #\n        # @!attribute [r] error_message\n        # @return [String]\n        #   error message, if available\n        def_delegators :@base_result, :error_message\n\n        ##\n        # Check for parsable data in response\n        #\n        # @!attribute [r] data?\n        # @return [TrueClass, FalseClass]\n        #   true if body can be parsed\n        def_delegators :@base_result, :data?\n\n        ##\n        # Return parsed version of the response body.\n        #\n        # @!attribute [r] data\n        # @return [Object, Hash, String]\n        #   Object if body parsable from API schema, Hash if JSON, raw body if unable to parse\n        def_delegators :@base_result, :data\n\n        ##\n        # Pagination scheme used by this request/response\n        #\n        # @!attribute [r] pagination_type\n        # @return [Symbol]\n        #  currently always :token\n        def_delegators :@base_result, :pagination_type\n\n        ##\n        # Name of the field that contains the pagination token\n        #\n        # @!attribute [r] page_token_param\n        # @return [String]\n        #  currently always 'pageToken'\n        def_delegators :@base_result, :page_token_param\n\n        ##\n        # Get the token used for requesting the next page of data\n        #\n        # @!attribute [r] next_page_token\n        # @return [String]\n        #   next page tokenx =\n        def_delegators :@base_result, :next_page_token\n\n        ##\n        # Get the token used for requesting the previous page of data\n        #\n        # @!attribute [r] prev_page_token\n        # @return [String]\n        #   previous page token\n        def_delegators :@base_result, :prev_page_token\n\n        # @!attribute [r] resumable_upload\n        def resumable_upload\n          # TODO(sgomes): implement resumable_upload for Service::Result\n          raise NotImplementedError\n        end\n\n        ##\n        # Build a request for fetching the next page of data\n        #\n        # @return [Google::APIClient::Service::Request]\n        #   API request for retrieving next page\n        def next_page\n          request = @request.clone\n          # Make a deep copy of the parameters.\n          request.parameters = Marshal.load(Marshal.dump(request.parameters))\n          request.parameters[page_token_param] = self.next_page_token\n          return request\n        end\n\n        ##\n        # Build a request for fetching the previous page of data\n        #\n        # @return [Google::APIClient::Service::Request]\n        #   API request for retrieving previous page\n        def prev_page\n          request = @request.clone\n          # Make a deep copy of the parameters.\n          request.parameters = Marshal.load(Marshal.dump(request.parameters))\n          request.parameters[page_token_param] = self.prev_page_token\n          return request\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/service/simple_file_store.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nmodule Google\n  class APIClient\n    class Service\n\n      # Simple file store to be used in the event no ActiveSupport cache store\n      # is provided. This is not thread-safe, and does not support a number of\n      # features (such as expiration), but it's useful for the simple purpose of\n      # caching discovery documents to disk.\n      # Implements the basic cache methods of ActiveSupport::Cache::Store in a\n      # limited fashion.\n      class SimpleFileStore\n\n        # Creates a new SimpleFileStore.\n        #\n        # @param [String] file_path\n        #   The path to the cache file on disk.\n        # @param [Object] options\n        #   The options to be used with this SimpleFileStore. Not implemented.\n        def initialize(file_path, options = nil)\n          @file_path = file_path.to_s\n        end\n\n        # Returns true if a key exists in the cache.\n        #\n        # @param [String] name\n        #   The name of the key. Will always be converted to a string.\n        # @param [Object] options\n        #   The options to be used with this query. Not implemented.\n        def exist?(name, options = nil)\n          read_file\n          @cache.nil? ? nil : @cache.include?(name.to_s)\n        end\n\n        # Fetches data from the cache and returns it, using the given key.\n        # If the key is missing and no block is passed, returns nil.\n        # If the key is missing and a block is passed, executes the block, sets\n        # the key to its value, and returns it.\n        #\n        # @param [String] name\n        #   The name of the key. Will always be converted to a string.\n        # @param [Object] options\n        #   The options to be used with this query. Not implemented.\n        # @yield [String]\n        #   optional block with the default value if the key is missing\n        def fetch(name, options = nil)\n          read_file\n          if block_given?\n            entry = read(name.to_s, options)\n            if entry.nil?\n              value = yield name.to_s\n              write(name.to_s, value)\n              return value\n            else\n              return entry\n            end\n          else\n            return read(name.to_s, options)\n          end\n        end\n\n        # Fetches data from the cache, using the given key.\n        # Returns nil if the key is missing.\n        #\n        # @param [String] name\n        #   The name of the key. Will always be converted to a string.\n        # @param [Object] options\n        #   The options to be used with this query. Not implemented.\n        def read(name, options = nil)\n          read_file\n          @cache.nil? ? nil : @cache[name.to_s]\n        end\n\n        # Writes the value to the cache, with the key.\n        #\n        # @param [String] name\n        #   The name of the key. Will always be converted to a string.\n        # @param [Object] value\n        #   The value to be written.\n        # @param [Object] options\n        #   The options to be used with this query. Not implemented.\n        def write(name, value, options = nil)\n          read_file\n          @cache = {} if @cache.nil?\n          @cache[name.to_s] = value\n          write_file\n          return nil\n        end\n\n        # Deletes an entry in the cache.\n        # Returns true if an entry is deleted.\n        #\n        # @param [String] name\n        #   The name of the key. Will always be converted to a string.\n        # @param [Object] options\n        #   The options to be used with this query. Not implemented.\n        def delete(name, options = nil)\n          read_file\n          return nil if @cache.nil?\n          if @cache.include? name.to_s\n            @cache.delete name.to_s\n            write_file\n            return true\n          else\n            return nil\n          end\n        end\n\n        protected\n\n        # Read the entire cache file from disk.\n        # Will avoid reading if there have been no changes.\n        def read_file\n          if !File.exist? @file_path\n            @cache = nil\n          else\n            # Check for changes after our last read or write.\n            if @last_change.nil? || File.mtime(@file_path) > @last_change\n              File.open(@file_path) do |file|\n                @cache = Marshal.load(file)\n                @last_change = file.mtime\n              end\n            end\n          end\n          return @cache\n        end\n\n        # Write the entire cache contents to disk.\n        def write_file\n          File.open(@file_path, 'w') do |file|\n            Marshal.dump(@cache, file)\n          end\n          @last_change = File.mtime(@file_path)\n        end\n      end\n    end\n  end\nend"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/service/stub_generator.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'active_support/inflector'\n\nmodule Google\n  class APIClient\n    class Service\n      ##\n      # Auxiliary mixin to generate resource and method stubs.\n      # Used by the Service and Service::Resource classes to generate both\n      # top-level and nested resources and methods.\n      module StubGenerator\n        def generate_call_stubs(service, root)\n          metaclass = (class << self; self; end)\n\n          # Handle resources.\n          root.discovered_resources.each do |resource|\n            method_name = ActiveSupport::Inflector.underscore(resource.name).to_sym\n            if !self.respond_to?(method_name)\n              metaclass.send(:define_method, method_name) do\n                Google::APIClient::Service::Resource.new(service, resource)\n              end\n            end\n          end\n\n          # Handle methods.\n          root.discovered_methods.each do |method|\n            method_name = ActiveSupport::Inflector.underscore(method.name).to_sym\n            if !self.respond_to?(method_name)\n              metaclass.send(:define_method, method_name) do |*args|\n                if args.length > 1\n                  raise ArgumentError,\n                    \"wrong number of arguments (#{args.length} for 1)\"\n                elsif !args.first.respond_to?(:to_hash) && !args.first.nil?\n                  raise ArgumentError,\n                    \"expected parameter Hash, got #{args.first.class}\"\n                else\n                  return Google::APIClient::Service::Request.new(\n                    service, method, args.first\n                  )\n                end\n              end\n            end\n          end\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/service.rb",
    "content": "# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'google/api_client'\nrequire 'google/api_client/service/stub_generator'\nrequire 'google/api_client/service/resource'\nrequire 'google/api_client/service/request'\nrequire 'google/api_client/service/result'\nrequire 'google/api_client/service/batch'\nrequire 'google/api_client/service/simple_file_store'\n\nmodule Google\n  class APIClient\n\n    ##\n    # Experimental new programming interface at the API level.\n    # Hides Google::APIClient. Designed to be easier to use, with less code.\n    #\n    # @example\n    #   calendar = Google::APIClient::Service.new('calendar', 'v3')\n    #   result = calendar.events.list('calendarId' => 'primary').execute()\n    class Service\n      include Google::APIClient::Service::StubGenerator\n      extend Forwardable\n\n      DEFAULT_CACHE_FILE = 'discovery.cache'\n\n      # Cache for discovered APIs.\n      @@discovered = {}\n\n      ##\n      # Creates a new Service.\n      #\n      # @param [String, Symbol] api_name\n      #   The name of the API this service will access.\n      # @param [String, Symbol] api_version\n      #   The version of the API this service will access.\n      # @param [Hash] options\n      #   The configuration parameters for the service.\n      # @option options [Symbol, #generate_authenticated_request] :authorization\n      #   (:oauth_1)\n      #   The authorization mechanism used by the client.  The following\n      #   mechanisms are supported out-of-the-box:\n      #   <ul>\n      #     <li><code>:two_legged_oauth_1</code></li>\n      #     <li><code>:oauth_1</code></li>\n      #     <li><code>:oauth_2</code></li>\n      #   </ul>\n      # @option options [Boolean] :auto_refresh_token (true)\n      #   The setting that controls whether or not the api client attempts to\n      #   refresh authorization when a 401 is hit in #execute. If the token does\n      #   not support it, this option is ignored.\n      # @option options [String] :application_name\n      #   The name of the application using the client.\n      # @option options [String] :application_version\n      #   The version number of the application using the client.\n      # @option options [String] :host (\"www.googleapis.com\")\n      #   The API hostname used by the client. This rarely needs to be changed.\n      # @option options [String] :port (443)\n      #   The port number used by the client. This rarely needs to be changed.\n      # @option options [String] :discovery_path (\"/discovery/v1\")\n      #   The discovery base path. This rarely needs to be changed.\n      # @option options [String] :ca_file\n      #   Optional set of root certificates to use when validating SSL connections.\n      #   By default, a bundled set of trusted roots will be used.\n      # @option options [#generate_authenticated_request] :authorization\n      #   The authorization mechanism for requests. Used only if\n      #   `:authenticated` is `true`.\n      # @option options [TrueClass, FalseClass] :authenticated (default: true)\n      #   `true` if requests must be signed or somehow\n      #   authenticated, `false` otherwise.\n      # @option options [TrueClass, FalseClass] :gzip (default: true)\n      #   `true` if gzip enabled, `false` otherwise.\n      # @option options [Faraday::Connection] :connection\n      #   A custom connection to be used for all requests.\n      # @option options [ActiveSupport::Cache::Store, :default] :discovery_cache\n      #   A cache store to place the discovery documents for loaded APIs.\n      #   Avoids unnecessary roundtrips to the discovery service.\n      #   :default loads the default local file cache store.\n      def initialize(api_name, api_version, options = {})\n        @api_name = api_name.to_s\n        if api_version.nil?\n          raise ArgumentError,\n            \"API version must be set\"\n        end\n        @api_version = api_version.to_s\n        if options && !options.respond_to?(:to_hash)\n          raise ArgumentError,\n            \"expected options Hash, got #{options.class}\"\n        end\n\n        params = {}\n        [:application_name, :application_version, :authorization, :host, :port,\n         :discovery_path, :auto_refresh_token, :key, :user_ip,\n         :ca_file].each do |option|\n          if options.include? option\n            params[option] = options[option]\n          end\n        end\n\n        @client = Google::APIClient.new(params)\n\n        @connection = options[:connection] || @client.connection\n\n        @options = options\n\n        # Initialize cache store. Default to SimpleFileStore if :cache_store\n        # is not provided and we have write permissions.\n        if options.include? :cache_store\n          @cache_store = options[:cache_store]\n        else\n          cache_exists = File.exists?(DEFAULT_CACHE_FILE)\n          if (cache_exists && File.writable?(DEFAULT_CACHE_FILE)) ||\n             (!cache_exists && File.writable?(Dir.pwd))\n            @cache_store = Google::APIClient::Service::SimpleFileStore.new(\n              DEFAULT_CACHE_FILE)\n          end\n        end\n\n        # Attempt to read API definition from memory cache.\n        # Not thread-safe, but the worst that can happen is a cache miss.\n        unless @api = @@discovered[[api_name, api_version]]\n          # Attempt to read API definition from cache store, if there is one.\n          # If there's a miss or no cache store, call discovery service.\n          if !@cache_store.nil?\n            @api = @cache_store.fetch(\"%s/%s\" % [api_name, api_version]) do\n              @client.discovered_api(api_name, api_version)\n            end\n          else\n            @api = @client.discovered_api(api_name, api_version)\n          end\n          @@discovered[[api_name, api_version]] = @api\n        end\n\n        generate_call_stubs(self, @api)\n      end\n\n      ##\n      # Returns the authorization mechanism used by the service.\n      #\n      # @return [#generate_authenticated_request] The authorization mechanism.\n      def_delegators :@client, :authorization, :authorization=\n\n      ##\n      # The setting that controls whether or not the service attempts to\n      # refresh authorization when a 401 is hit during an API call.\n      #\n      # @return [Boolean]\n      def_delegators :@client, :auto_refresh_token, :auto_refresh_token=\n\n      ##\n      # The application's API key issued by the API console.\n      #\n      # @return [String] The API key.\n      def_delegators :@client, :key, :key=\n\n      ##\n      # The Faraday/HTTP connection used by this service.\n      #\n      # @return [Faraday::Connection]\n      attr_accessor :connection\n\n      ##\n      # The cache store used for storing discovery documents.\n      #\n      # @return [ActiveSupport::Cache::Store,\n      #          Google::APIClient::Service::SimpleFileStore,\n      #          nil]\n      attr_reader :cache_store\n\n      ##\n      # Prepares a Google::APIClient::BatchRequest object to make batched calls.\n      # @param [Array] calls\n      #   Optional array of Google::APIClient::Service::Request to initialize\n      #   the batch request with.\n      # @param [Proc] block\n      #   Callback for every call's response. Won't be called if a call defined\n      #   a callback of its own.\n      #\n      # @yield [Google::APIClient::Service::Result]\n      #   block to be called when result ready\n      def batch(calls = nil, &block)\n        Google::APIClient::Service::BatchRequest.new(self, calls, &block)\n      end\n\n      ##\n      # Executes an API request.\n      # Do not call directly; this method is only used by Request objects when\n      # executing.\n      #\n      # @param [Google::APIClient::Service::Request,\n      #         Google::APIClient::Service::BatchCall] request\n      #   The request to be executed.\n      def execute(request)\n        if request.instance_of? Google::APIClient::Service::Request\n          params = {:api_method => request.method,\n            :parameters => request.parameters,\n            :connection => @connection}\n          if request.respond_to? :body\n            if request.body.respond_to? :to_hash\n              params[:body_object] = request.body\n            else\n              params[:body] = request.body\n            end\n          end\n          if request.respond_to? :media\n            params[:media] = request.media\n          end\n          [:authenticated, :gzip].each do |option|\n            if @options.include? option\n              params[option] = @options[option]\n            end\n          end\n          result = @client.execute(params)\n          return Google::APIClient::Service::Result.new(request, result)\n        elsif request.instance_of? Google::APIClient::Service::BatchRequest\n          @client.execute(request.base_batch, {:connection => @connection})\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/service_account.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'google/api_client/auth/pkcs12'\nrequire 'google/api_client/auth/jwt_asserter'\nrequire 'google/api_client/auth/key_utils'\nrequire 'google/api_client/auth/compute_service_account'\nrequire 'google/api_client/auth/storage'\nrequire 'google/api_client/auth/storages/redis_store'\nrequire 'google/api_client/auth/storages/file_store'\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client/version.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nmodule Google\n  class APIClient\n    module VERSION\n      MAJOR = 0\n      MINOR = 8\n      TINY  = 7\n      PATCH = 17\n      STRING = [MAJOR, MINOR, TINY, PATCH].compact.join('.')\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/lib/google/api_client.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nrequire 'faraday'\nrequire 'faraday/gzip'\nrequire 'multi_json'\nrequire 'compat/multi_json'\nrequire 'stringio'\nrequire 'retriable'\n\nrequire 'google/api_client/version'\nrequire 'google/api_client/logging'\nrequire 'google/api_client/errors'\nrequire 'google/api_client/environment'\nrequire 'google/api_client/discovery'\nrequire 'google/api_client/request'\nrequire 'google/api_client/reference'\nrequire 'google/api_client/result'\nrequire 'google/api_client/media'\nrequire 'google/api_client/service_account'\nrequire 'google/api_client/batch'\nrequire 'google/api_client/charset'\nrequire 'google/api_client/client_secrets'\nrequire 'google/api_client/railtie' if defined?(Rails)\n\nmodule Google\n\n  ##\n  # This class manages APIs communication.\n  class APIClient\n    include Google::APIClient::Logging\n\n    ##\n    # Creates a new Google API client.\n    #\n    # @param [Hash] options The configuration parameters for the client.\n    # @option options [Symbol, #generate_authenticated_request] :authorization\n    #   (:oauth_1)\n    #   The authorization mechanism used by the client.  The following\n    #   mechanisms are supported out-of-the-box:\n    #   <ul>\n    #     <li><code>:two_legged_oauth_1</code></li>\n    #     <li><code>:oauth_1</code></li>\n    #     <li><code>:oauth_2</code></li>\n    #     <li><code>:google_app_default</code></li>\n    #   </ul>\n    # @option options [Boolean] :auto_refresh_token (true)\n    #   The setting that controls whether or not the api client attempts to\n    #   refresh authorization when a 401 is hit in #execute. If the token does\n    #   not support it, this option is ignored.\n    # @option options [String] :application_name\n    #   The name of the application using the client.\n    # @option options [String | Array | nil] :scope\n    #   The scope(s) used when using google application default credentials\n    # @option options [String] :application_version\n    #   The version number of the application using the client.\n    # @option options [String] :user_agent\n    #   (\"{app_name} google-api-ruby-client/{version} {os_name}/{os_version}\")\n    #   The user agent used by the client.  Most developers will want to\n    #   leave this value alone and use the `:application_name` option instead.\n    # @option options [String] :host (\"www.googleapis.com\")\n    #   The API hostname used by the client. This rarely needs to be changed.\n    # @option options [String] :port (443)\n    #   The port number used by the client. This rarely needs to be changed.\n    # @option options [String] :discovery_path (\"/discovery/v1\")\n    #   The discovery base path. This rarely needs to be changed.\n    # @option options [String] :ca_file\n    #   Optional set of root certificates to use when validating SSL connections.\n    #   By default, a bundled set of trusted roots will be used.\n    # @options options[Hash] :force_encoding\n    #   Experimental option. True if response body should be force encoded into the charset\n    #   specified in the Content-Type header. Mostly intended for compressed content.\n    # @options options[Hash] :faraday_options\n    #   Pass through of options to set on the Faraday connection\n    def initialize(options={})\n      logger.debug { \"#{self.class} - Initializing client with options #{options}\" }\n\n      # Normalize key to String to allow indifferent access.\n      options = options.inject({}) do |accu, (key, value)|\n        accu[key.to_sym] = value\n        accu\n      end\n      # Almost all API usage will have a host of 'www.googleapis.com'.\n      self.host = options[:host] || 'www.googleapis.com'\n      self.port = options[:port] || 443\n      self.discovery_path = options[:discovery_path] || '/discovery/v1'\n\n      # Most developers will want to leave this value alone and use the\n      # application_name option.\n      if options[:application_name]\n        app_name = options[:application_name]\n        app_version = options[:application_version]\n        application_string = \"#{app_name}/#{app_version || '0.0.0'}\"\n      else\n        logger.warn { \"#{self.class} - Please provide :application_name and :application_version when initializing the client\" }\n      end\n\n      proxy = options[:proxy] || Object::ENV[\"http_proxy\"]\n\n      self.user_agent = options[:user_agent] || (\n        \"#{application_string} \" +\n        \"google-api-ruby-client/#{Google::APIClient::VERSION::STRING} #{ENV::OS_VERSION}\".strip + \" (gzip)\"\n      ).strip\n      # The writer method understands a few Symbols and will generate useful\n      # default authentication mechanisms.\n      self.authorization =\n        options.key?(:authorization) ? options[:authorization] : :oauth_2\n      if !options['scope'].nil? and self.authorization.respond_to?(:scope=)\n        self.authorization.scope = options['scope']\n      end\n      self.auto_refresh_token = options.fetch(:auto_refresh_token) { true }\n      self.key = options[:key]\n      self.user_ip = options[:user_ip]\n      self.retries = options.fetch(:retries) { 0 }\n      self.expired_auth_retry = options.fetch(:expired_auth_retry) { true }\n      @discovery_uris = {}\n      @discovery_documents = {}\n      @discovered_apis = {}\n      ca_file = options[:ca_file] || File.expand_path('../../cacerts.pem', __FILE__)\n      self.connection = Faraday.new do |faraday|\n        faraday.request :gzip\n        faraday.response :charset if options[:force_encoding]\n        faraday.options.params_encoder = Faraday::FlatParamsEncoder\n        faraday.ssl.ca_file = ca_file\n        faraday.ssl.verify = true\n        if faraday.respond_to?(:proxy=)\n          # faraday >= 0.6.2\n          faraday.proxy = proxy\n        else\n          # older versions of faraday\n          faraday.proxy proxy\n        end\n        faraday.adapter Faraday.default_adapter\n        if options[:faraday_option].is_a?(Hash)\n          options[:faraday_option].each_pair do |option, value|\n            faraday.options.send(\"#{option}=\", value)\n          end\n        end\n      end\n      return self\n    end\n\n    ##\n    # Returns the authorization mechanism used by the client.\n    #\n    # @return [#generate_authenticated_request] The authorization mechanism.\n    attr_reader :authorization\n\n    ##\n    # Sets the authorization mechanism used by the client.\n    #\n    # @param [#generate_authenticated_request] new_authorization\n    #   The new authorization mechanism.\n    def authorization=(new_authorization)\n      case new_authorization\n      when :oauth_1, :oauth\n        require 'signet/oauth_1/client'\n        # NOTE: Do not rely on this default value, as it may change\n        new_authorization = Signet::OAuth1::Client.new(\n          :temporary_credential_uri =>\n            'https://www.google.com/accounts/OAuthGetRequestToken',\n          :authorization_uri =>\n            'https://www.google.com/accounts/OAuthAuthorizeToken',\n          :token_credential_uri =>\n            'https://www.google.com/accounts/OAuthGetAccessToken',\n          :client_credential_key => 'anonymous',\n          :client_credential_secret => 'anonymous'\n        )\n      when :two_legged_oauth_1, :two_legged_oauth\n        require 'signet/oauth_1/client'\n        # NOTE: Do not rely on this default value, as it may change\n        new_authorization = Signet::OAuth1::Client.new(\n          :client_credential_key => nil,\n          :client_credential_secret => nil,\n          :two_legged => true\n        )\n      when :google_app_default\n        require 'googleauth'\n        new_authorization = Google::Auth.get_application_default\n\n      when :oauth_2\n        require 'signet/oauth_2/client'\n        # NOTE: Do not rely on this default value, as it may change\n        new_authorization = Signet::OAuth2::Client.new(\n          :authorization_uri =>\n            'https://accounts.google.com/o/oauth2/auth',\n          :token_credential_uri =>\n            'https://accounts.google.com/o/oauth2/token'\n        )\n      when nil\n        # No authorization mechanism\n      else\n        if !new_authorization.respond_to?(:generate_authenticated_request)\n          raise TypeError,\n            'Expected authorization mechanism to respond to ' +\n            '#generate_authenticated_request.'\n        end\n      end\n      @authorization = new_authorization\n      return @authorization\n    end\n\n    ##\n    # Default Faraday/HTTP connection.\n    #\n    # @return [Faraday::Connection]\n    attr_accessor :connection\n\n    ##\n    # The setting that controls whether or not the api client attempts to\n    # refresh authorization when a 401 is hit in #execute.\n    #\n    # @return [Boolean]\n    attr_accessor :auto_refresh_token\n\n    ##\n    # The application's API key issued by the API console.\n    #\n    # @return [String] The API key.\n    attr_accessor :key\n\n    ##\n    # The IP address of the user this request is being performed on behalf of.\n    #\n    # @return [String] The user's IP address.\n    attr_accessor :user_ip\n\n    ##\n    # The user agent used by the client.\n    #\n    # @return [String]\n    #   The user agent string used in the User-Agent header.\n    attr_accessor :user_agent\n\n    ##\n    # The API hostname used by the client.\n    #\n    # @return [String]\n    #   The API hostname. Should almost always be 'www.googleapis.com'.\n    attr_accessor :host\n\n    ##\n    # The port number used by the client.\n    #\n    # @return [String]\n    #   The port number. Should almost always be 443.\n    attr_accessor :port\n\n    ##\n    # The base path used by the client for discovery.\n    #\n    # @return [String]\n    #   The base path. Should almost always be '/discovery/v1'.\n    attr_accessor :discovery_path\n\n    ##\n    # Number of times to retry on recoverable errors\n    #\n    # @return [FixNum]\n    #  Number of retries\n    attr_accessor :retries\n\n    ##\n    # Whether or not an expired auth token should be re-acquired\n    # (and the operation retried) regardless of retries setting\n    # @return [Boolean]\n    #  Auto retry on auth expiry\n    attr_accessor :expired_auth_retry\n\n    ##\n    # Returns the URI for the directory document.\n    #\n    # @return [Addressable::URI] The URI of the directory document.\n    def directory_uri\n      return resolve_uri(self.discovery_path + '/apis')\n    end\n\n    ##\n    # Manually registers a URI as a discovery document for a specific version\n    # of an API.\n    #\n    # @param [String, Symbol] api The API name.\n    # @param [String] version The desired version of the API.\n    # @param [Addressable::URI] uri The URI of the discovery document.\n    # @return [Google::APIClient::API] The service object.\n    def register_discovery_uri(api, version, uri)\n      api = api.to_s\n      version = version || 'v1'\n      @discovery_uris[\"#{api}:#{version}\"] = uri\n      discovered_api(api, version)\n    end\n\n    ##\n    # Returns the URI for the discovery document.\n    #\n    # @param [String, Symbol] api The API name.\n    # @param [String] version The desired version of the API.\n    # @return [Addressable::URI] The URI of the discovery document.\n    def discovery_uri(api, version=nil)\n      api = api.to_s\n      version = version || 'v1'\n      return @discovery_uris[\"#{api}:#{version}\"] ||= (\n        resolve_uri(\n          self.discovery_path + '/apis/{api}/{version}/rest',\n          'api' => api,\n          'version' => version\n        )\n      )\n    end\n\n    ##\n    # Manually registers a pre-loaded discovery document for a specific version\n    # of an API.\n    #\n    # @param [String, Symbol] api The API name.\n    # @param [String] version The desired version of the API.\n    # @param [String, StringIO] discovery_document\n    #   The contents of the discovery document.\n    # @return [Google::APIClient::API] The service object.\n    def register_discovery_document(api, version, discovery_document)\n      api = api.to_s\n      version = version || 'v1'\n      if discovery_document.kind_of?(StringIO)\n        discovery_document.rewind\n        discovery_document = discovery_document.string\n      elsif discovery_document.respond_to?(:to_str)\n        discovery_document = discovery_document.to_str\n      else\n        raise TypeError,\n          \"Expected String or StringIO, got #{discovery_document.class}.\"\n      end\n      @discovery_documents[\"#{api}:#{version}\"] =\n        MultiJson.load(discovery_document)\n      discovered_api(api, version)\n    end\n\n    ##\n    # Returns the parsed directory document.\n    #\n    # @return [Hash] The parsed JSON from the directory document.\n    def directory_document\n      return @directory_document ||= (begin\n        response = self.execute!(\n          :http_method => :get,\n          :uri => self.directory_uri,\n          :authenticated => false\n        )\n        response.data\n      end)\n    end\n\n    ##\n    # Returns the parsed discovery document.\n    #\n    # @param [String, Symbol] api The API name.\n    # @param [String] version The desired version of the API.\n    # @return [Hash] The parsed JSON from the discovery document.\n    def discovery_document(api, version=nil)\n      api = api.to_s\n      version = version || 'v1'\n      return @discovery_documents[\"#{api}:#{version}\"] ||= (begin\n        response = self.execute!(\n          :http_method => :get,\n          :uri => self.discovery_uri(api, version),\n          :authenticated => false\n        )\n        response.data\n      end)\n    end\n\n    ##\n    # Returns all APIs published in the directory document.\n    #\n    # @return [Array] The list of available APIs.\n    def discovered_apis\n      @directory_apis ||= (begin\n        document_base = self.directory_uri\n        if self.directory_document && self.directory_document['items']\n          self.directory_document['items'].map do |discovery_document|\n            Google::APIClient::API.new(\n              document_base,\n              discovery_document\n            )\n          end\n        else\n          []\n        end\n      end)\n    end\n\n    ##\n    # Returns the service object for a given service name and service version.\n    #\n    # @param [String, Symbol] api The API name.\n    # @param [String] version The desired version of the API.\n    #\n    # @return [Google::APIClient::API] The service object.\n    def discovered_api(api, version=nil)\n      if !api.kind_of?(String) && !api.kind_of?(Symbol)\n        raise TypeError,\n          \"Expected String or Symbol, got #{api.class}.\"\n      end\n      api = api.to_s\n      version = version || 'v1'\n      return @discovered_apis[\"#{api}:#{version}\"] ||= begin\n        document_base = self.discovery_uri(api, version)\n        discovery_document = self.discovery_document(api, version)\n        if document_base && discovery_document\n          Google::APIClient::API.new(\n            document_base,\n            discovery_document\n          )\n        else\n          nil\n        end\n      end\n    end\n\n    ##\n    # Returns the method object for a given RPC name and service version.\n    #\n    # @param [String, Symbol] rpc_name The RPC name of the desired method.\n    # @param [String, Symbol] api The API the method is within.\n    # @param [String] version The desired version of the API.\n    #\n    # @return [Google::APIClient::Method] The method object.\n    def discovered_method(rpc_name, api, version=nil)\n      if !rpc_name.kind_of?(String) && !rpc_name.kind_of?(Symbol)\n        raise TypeError,\n          \"Expected String or Symbol, got #{rpc_name.class}.\"\n      end\n      rpc_name = rpc_name.to_s\n      api = api.to_s\n      version = version || 'v1'\n      service = self.discovered_api(api, version)\n      if service.to_h[rpc_name]\n        return service.to_h[rpc_name]\n      else\n        return nil\n      end\n    end\n\n    ##\n    # Returns the service object with the highest version number.\n    #\n    # @note <em>Warning</em>: This method should be used with great care.\n    # As APIs are updated, minor differences between versions may cause\n    # incompatibilities. Requesting a specific version will avoid this issue.\n    #\n    # @param [String, Symbol] api The name of the service.\n    #\n    # @return [Google::APIClient::API] The service object.\n    def preferred_version(api)\n      if !api.kind_of?(String) && !api.kind_of?(Symbol)\n        raise TypeError,\n          \"Expected String or Symbol, got #{api.class}.\"\n      end\n      api = api.to_s\n      return self.discovered_apis.detect do |a|\n        a.name == api && a.preferred == true\n      end\n    end\n\n    ##\n    # Verifies an ID token against a server certificate. Used to ensure that\n    # an ID token supplied by an untrusted client-side mechanism is valid.\n    # Raises an error if the token is invalid or missing.\n    #\n    # @deprecated Use the google-id-token gem for verifying JWTs\n    def verify_id_token!\n      require 'jwt'\n      require 'openssl'\n      @certificates ||= {}\n      if !self.authorization.respond_to?(:id_token)\n        raise ArgumentError, (\n          \"Current authorization mechanism does not support ID tokens: \" +\n          \"#{self.authorization.class.to_s}\"\n        )\n      elsif !self.authorization.id_token\n        raise ArgumentError, (\n          \"Could not verify ID token, ID token missing. \" +\n          \"Scopes were: #{self.authorization.scope.inspect}\"\n        )\n      else\n        check_cached_certs = lambda do\n          valid = false\n          for _key, cert in @certificates\n            begin\n              self.authorization.decoded_id_token(cert.public_key)\n              valid = true\n            rescue JWT::DecodeError, Signet::UnsafeOperationError\n              # Expected exception. Ignore, ID token has not been validated.\n            end\n          end\n          valid\n        end\n        if check_cached_certs.call()\n          return true\n        end\n        response = self.execute!(\n          :http_method => :get,\n          :uri => 'https://www.googleapis.com/oauth2/v1/certs',\n          :authenticated => false\n        )\n        @certificates.merge!(\n          Hash[MultiJson.load(response.body).map do |key, cert|\n            [key, OpenSSL::X509::Certificate.new(cert)]\n          end]\n        )\n        if check_cached_certs.call()\n          return true\n        else\n          raise InvalidIDTokenError,\n            \"Could not verify ID token against any available certificate.\"\n        end\n      end\n      return nil\n    end\n\n    ##\n    # Generates a request.\n    #\n    # @option options [Google::APIClient::Method] :api_method\n    #   The method object or the RPC name of the method being executed.\n    # @option options [Hash, Array] :parameters\n    #   The parameters to send to the method.\n    # @option options [Hash, Array] :headers The HTTP headers for the request.\n    # @option options [String] :body The body of the request.\n    # @option options [String] :version (\"v1\")\n    #   The service version. Only used if `api_method` is a `String`.\n    # @option options [#generate_authenticated_request] :authorization\n    #   The authorization mechanism for the response. Used only if\n    #   `:authenticated` is `true`.\n    # @option options [TrueClass, FalseClass] :authenticated (true)\n    #   `true` if the request must be signed or somehow\n    #   authenticated, `false` otherwise.\n    #\n    # @return [Google::APIClient::Reference] The generated request.\n    #\n    # @example\n    #   request = client.generate_request(\n    #     :api_method => 'plus.activities.list',\n    #     :parameters =>\n    #       {'collection' => 'public', 'userId' => 'me'}\n    #   )\n    def generate_request(options={})\n      options = {\n        :api_client => self\n      }.merge(options)\n      return Google::APIClient::Request.new(options)\n    end\n\n    ##\n    # Executes a request, wrapping it in a Result object.\n    #\n    # @param [Google::APIClient::Request, Hash, Array] params\n    #   Either a Google::APIClient::Request, a Hash, or an Array.\n    #\n    #   If a Google::APIClient::Request, no other parameters are expected.\n    #\n    #   If a Hash, the below parameters are handled. If an Array, the\n    #   parameters are assumed to be in the below order:\n    #\n    #   - (Google::APIClient::Method) api_method:\n    #     The method object or the RPC name of the method being executed.\n    #   - (Hash, Array) parameters:\n    #     The parameters to send to the method.\n    #   - (String) body: The body of the request.\n    #   - (Hash, Array) headers: The HTTP headers for the request.\n    #   - (Hash) options: A set of options for the request, of which:\n    #     - (#generate_authenticated_request) :authorization (default: true) -\n    #       The authorization mechanism for the response. Used only if\n    #       `:authenticated` is `true`.\n    #     - (TrueClass, FalseClass) :authenticated (default: true) -\n    #       `true` if the request must be signed or somehow\n    #       authenticated, `false` otherwise.\n    #     - (TrueClass, FalseClass) :gzip (default: true) -\n    #       `true` if gzip enabled, `false` otherwise.\n    #     - (FixNum) :retries -\n    #       # of times to retry on recoverable errors\n    #\n    # @return [Google::APIClient::Result] The result from the API, nil if batch.\n    #\n    # @example\n    #   result = client.execute(batch_request)\n    #\n    # @example\n    #   plus = client.discovered_api('plus')\n    #   result = client.execute(\n    #     :api_method => plus.activities.list,\n    #     :parameters => {'collection' => 'public', 'userId' => 'me'}\n    #   )\n    #\n    # @see Google::APIClient#generate_request\n    def execute!(*params)\n      if params.first.kind_of?(Google::APIClient::Request)\n        request = params.shift\n        options = params.shift || {}\n      else\n        # This block of code allows us to accept multiple parameter passing\n        # styles, and maintaining some backwards compatibility.\n        #\n        # Note: I'm extremely tempted to deprecate this style of execute call.\n        if params.last.respond_to?(:to_hash) && params.size == 1\n          options = params.pop\n        else\n          options = {}\n        end\n\n        options[:api_method] = params.shift if params.size > 0\n        options[:parameters] = params.shift if params.size > 0\n        options[:body] = params.shift if params.size > 0\n        options[:headers] = params.shift if params.size > 0\n        options.update(params.shift) if params.size > 0\n        request = self.generate_request(options)\n      end\n\n      request.headers['User-Agent'] ||= '' + self.user_agent unless self.user_agent.nil?\n      request.headers['Accept-Encoding'] ||= 'gzip' unless options[:gzip] == false\n      request.headers['Content-Type'] ||= ''\n      request.parameters['key'] ||= self.key unless self.key.nil?\n      request.parameters['userIp'] ||= self.user_ip unless self.user_ip.nil?\n\n      connection = options[:connection] || self.connection\n      request.authorization = options[:authorization] || self.authorization unless options[:authenticated] == false\n\n      tries = 1 + (options[:retries] || self.retries)\n      attempt = 0\n\n      Retriable.retriable :tries => tries,\n                          :on => [TransmissionError],\n                          :on_retry => client_error_handler,\n                          :interval => lambda {|attempts| (2 ** attempts) + rand} do\n        attempt += 1\n\n        # This 2nd level retriable only catches auth errors, and supports 1 retry, which allows\n        # auth to be re-attempted without having to retry all sorts of other failures like\n        # NotFound, etc\n        Retriable.retriable :tries => ((expired_auth_retry || tries > 1) && attempt == 1) ? 2 : 1,\n                            :on => [AuthorizationError],\n                            :on_retry => authorization_error_handler(request.authorization) do\n          result = request.send(connection, true)\n\n          case result.status\n            when 200...300\n              result\n            when 301, 302, 303, 307\n              request = generate_request(request.to_hash.merge({\n                :uri => result.headers['location'],\n                :api_method => nil\n              }))\n              raise RedirectError.new(result.headers['location'], result)\n            when 401\n              raise AuthorizationError.new(result.error_message || 'Invalid/Expired Authentication', result)\n            when 400, 402...500\n              raise ClientError.new(result.error_message || \"A client error has occurred\", result)\n            when 500...600\n              raise ServerError.new(result.error_message || \"A server error has occurred\", result)\n            else\n              raise TransmissionError.new(result.error_message || \"A transmission error has occurred\", result)\n          end\n        end\n      end\n    end\n\n    ##\n    # Same as Google::APIClient#execute!, but does not raise an exception for\n    # normal API errros.\n    #\n    # @see Google::APIClient#execute\n    def execute(*params)\n      begin\n        return self.execute!(*params)\n      rescue TransmissionError => e\n        return e.result\n      end\n    end\n\n    protected\n\n    ##\n    # Resolves a URI template against the client's configured base.\n    #\n    # @api private\n    # @param [String, Addressable::URI, Addressable::Template] template\n    #   The template to resolve.\n    # @param [Hash] mapping The mapping that corresponds to the template.\n    # @return [Addressable::URI] The expanded URI.\n    def resolve_uri(template, mapping={})\n      @base_uri ||= Addressable::URI.new(\n        :scheme => 'https',\n        :host => self.host,\n        :port => self.port\n      ).normalize\n      template = if template.kind_of?(Addressable::Template)\n        template.pattern\n      elsif template.respond_to?(:to_str)\n        template.to_str\n      else\n        raise TypeError,\n          \"Expected String, Addressable::URI, or Addressable::Template, \" +\n          \"got #{template.class}.\"\n      end\n      return Addressable::Template.new(@base_uri + template).expand(mapping)\n    end\n\n\n    ##\n    # Returns on proc for special processing of retries for authorization errors\n    # Only 401s should be retried and only if the credentials are refreshable\n    #\n    # @param [#fetch_access_token!] authorization\n    #   OAuth 2 credentials\n    # @return [Proc]\n    def authorization_error_handler(authorization)\n      can_refresh = authorization.respond_to?(:refresh_token) && auto_refresh_token\n      Proc.new do |exception, tries|\n        next unless exception.kind_of?(AuthorizationError)\n        if can_refresh\n          begin\n            logger.debug(\"Attempting refresh of access token & retry of request\")\n            authorization.fetch_access_token!\n            next\n          rescue Signet::AuthorizationError\n          end\n        end\n        raise exception\n      end\n    end\n\n    ##\n    # Returns on proc for special processing of retries as not all client errors\n    # are recoverable. Only 401s should be retried (via authorization_error_handler)\n    #\n    # @return [Proc]\n    def client_error_handler\n      Proc.new do |exception, tries|\n        raise exception if exception.kind_of?(ClientError)\n      end\n    end\n\n  end\n\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/rakelib/gem.rake",
    "content": "require \"rubygems/package_task\"\n\nnamespace :gem do\n\n  desc \"Build the gem\"\n  task :build do\n    system \"gem build signet.gemspec\"\n  end\n\n  desc \"Install the gem\"\n  task :install => [\"clobber\", \"gem:package\"] do\n    sh \"#{SUDO} gem install --local pkg/#{GEM_SPEC.full_name}\"\n  end\n\n  desc \"Uninstall the gem\"\n  task :uninstall do\n    installed_list = Gem.source_index.find_name(PKG_NAME)\n    if installed_list &&\n        (installed_list.collect { |s| s.version.to_s}.include?(PKG_VERSION))\n      sh(\n        \"#{SUDO} gem uninstall --version '#{PKG_VERSION}' \" +\n        \"--ignore-dependencies --executables #{PKG_NAME}\"\n      )\n    end\n  end\n\n  desc \"Reinstall the gem\"\n  task :reinstall => [:uninstall, :install]\nend\n\ndesc \"Alias to gem:package\"\ntask \"gem\" => \"gem:package\"\n\ntask \"clobber\" => [\"gem:clobber_package\"]"
  },
  {
    "path": "sdk/ruby-google-api-client/rakelib/git.rake",
    "content": "namespace :git do\n  namespace :tag do\n    desc 'List tags from the Git repository'\n    task :list do\n      tags = `git tag -l`\n      tags.gsub!(\"\\r\", '')\n      tags = tags.split(\"\\n\").sort {|a, b| b <=> a }\n      puts tags.join(\"\\n\")\n    end\n\n    desc 'Create a new tag in the Git repository'\n    task :create do\n      changelog = File.open('CHANGELOG.md', 'r') { |file| file.read }\n      puts '-' * 80\n      puts changelog\n      puts '-' * 80\n      puts\n\n      v = ENV['VERSION'] or abort 'Must supply VERSION=x.y.z'\n      abort \"Versions don't match #{v} vs #{PKG_VERSION}\" if v != PKG_VERSION\n\n      git_status = `git status`\n      if git_status !~ /nothing to commit \\(working directory clean\\)/\n        abort \"Working directory isn't clean.\"\n      end\n\n      tag = \"#{PKG_NAME}-#{PKG_VERSION}\"\n      msg = \"Release #{PKG_NAME}-#{PKG_VERSION}\"\n\n      existing_tags = `git tag -l #{PKG_NAME}-*`.split('\\n')\n      if existing_tags.include?(tag)\n        warn('Tag already exists, deleting...')\n        unless system \"git tag -d #{tag}\"\n          abort 'Tag deletion failed.'\n        end\n      end\n      puts \"Creating git tag '#{tag}'...\"\n      unless system \"git tag -a -m \\\"#{msg}\\\" #{tag}\"\n        abort 'Tag creation failed.'\n      end\n    end\n  end\nend\n\ntask 'gem:release' => 'git:tag:create'"
  },
  {
    "path": "sdk/ruby-google-api-client/rakelib/metrics.rake",
    "content": "namespace :metrics do\n  task :lines do\n    lines, codelines, total_lines, total_codelines = 0, 0, 0, 0\n    for file_name in FileList['lib/**/*.rb']\n      f = File.open(file_name)\n      while line = f.gets\n        lines += 1\n        next if line =~ /^\\s*$/\n        next if line =~ /^\\s*#/\n        codelines += 1\n      end\n      puts \"L: #{sprintf('%4d', lines)}, \" +\n        \"LOC #{sprintf('%4d', codelines)} | #{file_name}\"\n      total_lines     += lines\n      total_codelines += codelines\n\n      lines, codelines = 0, 0\n    end\n\n    puts \"Total: Lines #{total_lines}, LOC #{total_codelines}\"\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/rakelib/spec.rake",
    "content": "require 'rake/clean'\nrequire 'rspec/core/rake_task'\n\nCLOBBER.include('coverage', 'specdoc')\n\nnamespace :spec do\n  RSpec::Core::RakeTask.new(:all) do |t|\n    t.pattern = FileList['spec/**/*_spec.rb']\n    t.rspec_opts = ['--color', '--format', 'documentation']\n  end\n\n  desc 'Generate HTML Specdocs for all specs.'\n  RSpec::Core::RakeTask.new(:specdoc) do |t|\n    specdoc_path = File.expand_path('../../specdoc', __FILE__)\n\n    t.rspec_opts = %W( --format html --out #{File.join(specdoc_path, 'index.html')} )\n    t.fail_on_error = false\n  end\nend\n\ndesc 'Alias to spec:all'\ntask 'spec' => 'spec:all'\n"
  },
  {
    "path": "sdk/ruby-google-api-client/rakelib/wiki.rake",
    "content": "require 'rake'\nrequire 'rake/clean'\n\nCLOBBER.include('wiki')\n\nCACHE_PREFIX =\n  \"http://www.gmodules.com/gadgets/proxy/container=default&debug=0&nocache=0/\"\n\nnamespace :wiki do\n  desc 'Autogenerate wiki pages'\n  task :supported_apis do\n    output = <<-WIKI\n#summary The list of supported APIs\n\nThe Google API Client for Ruby is a small flexible client library for accessing\nthe following Google APIs.\n\nWIKI\n    preferred_apis = {}\n    require 'google/api_client'\n    client = Google::APIClient.new\n    for api in client.discovered_apis\n      if !preferred_apis.has_key?(api.name)\n        preferred_apis[api.name] = api\n      elsif api.preferred\n        preferred_apis[api.name] = api\n      end\n    end\n    for api_name, api in preferred_apis\n      if api.documentation.to_s != \"\" && api.title != \"\"\n        output += (\n          \"||#{CACHE_PREFIX}#{api['icons']['x16']}||\" +\n          \"[#{api.documentation} #{api.title}]||\" +\n          \"#{api.description}||\\n\"\n        )\n      end\n    end\n    output.gsub!(/-32\\./, \"-16.\")\n    wiki_path = File.expand_path(\n      File.join(File.dirname(__FILE__), '../wiki/'))\n    Dir.mkdir(wiki_path) unless File.exists?(wiki_path)\n    File.open(File.join(wiki_path, 'SupportedAPIs.wiki'), 'w') do |file|\n      file.write(output)\n    end\n  end\n\n  task 'generate' => ['wiki:supported_apis']\nend\n\nbegin\n  $LOAD_PATH.unshift(\n    File.expand_path(File.join(File.dirname(__FILE__), '../yard/lib'))\n  )\n  $LOAD_PATH.unshift(File.expand_path('.'))\n  $LOAD_PATH.uniq!\n\n  require 'yard'\n  require 'yard/rake/wikidoc_task'\n\n  namespace :wiki do\n    desc 'Generate Wiki Documentation with YARD'\n    YARD::Rake::WikidocTask.new do |yardoc|\n      yardoc.name = 'reference'\n      yardoc.options = [\n        '--verbose',\n        '--markup', 'markdown',\n        '-e', 'yard/lib/yard-google-code.rb',\n        '-p', 'yard/templates',\n        '-f', 'wiki',\n        '-o', 'wiki'\n      ]\n      yardoc.files = [\n        'lib/**/*.rb', 'ext/**/*.c', '-', 'README.md', 'CHANGELOG.md'\n      ]\n    end\n\n    task 'generate' => ['wiki:reference', 'wiki:supported_apis']\n  end\nrescue LoadError\n  # If yard isn't available, it's not the end of the world\n  warn('YARD unavailable. Cannot fully generate wiki.')\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/rakelib/yard.rake",
    "content": "require 'rake'\nrequire 'rake/clean'\n\nCLOBBER.include('doc', '.yardoc')\nCLOBBER.uniq!\n\nbegin\n  require 'yard'\n  require 'yard/rake/yardoc_task'\n\n  namespace :doc do\n    desc 'Generate Yardoc documentation'\n    YARD::Rake::YardocTask.new do |yardoc|\n      yardoc.name = 'yard'\n      yardoc.options = ['--verbose', '--markup', 'markdown']\n      yardoc.files = [\n        'lib/**/*.rb', 'ext/**/*.c', '-',\n        'README.md', 'CONTRIB.md', 'CHANGELOG.md', 'LICENSE'\n      ]\n    end\n  end\n\n  desc 'Alias to doc:yard'\n  task 'doc' => 'doc:yard'\nrescue LoadError\n  # If yard isn't available, it's not the end of the world\n  desc 'Alias to doc:rdoc'\n  task 'doc' => 'doc:rdoc'\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/script/package",
    "content": "#!/usr/bin/env bash\n# Usage: script/gem\n# Updates the gemspec and builds a new gem in the pkg directory.\n\nmkdir -p pkg\ngem build *.gemspec\nmv *.gem pkg\n\n"
  },
  {
    "path": "sdk/ruby-google-api-client/script/release",
    "content": "age: script/release\n# Build the package, tag a commit, push it to origin, and then release the\n# package publicly.\n\nset -e\n\nversion=\"$(script/package | grep Version: | awk '{print $2}')\"\n[ -n \"$version\" ] || exit 1\n\ngit commit --allow-empty -a -m \"Release $version\"\ngit tag \"$version\"\ngit push --tags origin\ngem push pkg/*-${version}.gem\n\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/fixtures/files/auth_stored_credentials.json",
    "content": "{   \"access_token\":\"access_token_123456789\",\n    \"authorization_uri\":\"https://accounts.google.com/o/oauth2/auth\",\n    \"client_id\":\"123456789p.apps.googleusercontent.com\",\n    \"client_secret\":\"very_secret\",\n    \"expires_in\":3600,\n    \"refresh_token\":\"refresh_token_12345679\",\n    \"token_credential_uri\":\"https://accounts.google.com/o/oauth2/token\",\n    \"issued_at\":1386053761}"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/fixtures/files/client_secrets.json",
    "content": "{\"installed\":{\"auth_uri\":\"https://accounts.google.com/o/oauth2/auth\",\"client_secret\":\"i8YaXdGgiQ4_KrTVNGsB7QP1\",\"token_uri\":\"https://accounts.google.com/o/oauth2/token\",\"client_email\":\"\",\"client_x509_cert_url\":\"\",\"client_id\":\"898243283568.apps.googleusercontent.com\",\"auth_provider_x509_cert_url\":\"https://www.googleapis.com/oauth2/v1/certs\"}}"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/fixtures/files/sample.txt",
    "content": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus posuere urna bibendum diam vulputate fringilla. Fusce elementum fermentum justo id aliquam. Integer vel felis ut arcu elementum lacinia. Duis congue urna eget nisl dapibus tristique molestie turpis sollicitudin. Vivamus in justo quam. Proin condimentum mollis tortor at molestie. Cras luctus, nunc a convallis iaculis, est risus consequat nisi, sit amet sollicitudin metus mi a urna. Aliquam accumsan, massa quis condimentum varius, sapien massa faucibus nibh, a dignissim magna nibh a lacus. Nunc aliquet, nunc ac pulvinar consectetur, sapien lacus hendrerit enim, nec dapibus lorem mi eget risus. Praesent vitae justo eget dolor blandit ullamcorper. Duis id nibh vitae sem aliquam vehicula et ac massa. In neque elit, molestie pulvinar viverra at, vestibulum quis velit.\n\nMauris sit amet placerat enim. Duis vel tellus ac dui auctor tincidunt id nec augue. Donec ut blandit turpis. Mauris dictum urna id urna vestibulum accumsan. Maecenas sagittis urna vitae erat facilisis gravida. Phasellus tellus augue, commodo ut iaculis vitae, interdum ut dolor. Proin at dictum lorem. Quisque pellentesque neque ante, vitae rutrum elit. Pellentesque sit amet erat orci. Praesent justo diam, tristique eu tempus ut, vestibulum eget dui. Maecenas et elementum justo. Cras a augue a elit porttitor placerat eget ut magna.\n\nClass aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nam adipiscing tellus in arcu bibendum volutpat. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Sed laoreet faucibus tristique. Duis metus eros, molestie eget dignissim in, imperdiet fermentum nulla. Vestibulum laoreet lorem eu justo vestibulum lobortis. Praesent pharetra leo vel mauris rhoncus commodo sollicitudin ante auctor. Ut sagittis, tortor nec placerat rutrum, neque ipsum cursus nisl, ut lacinia magna risus ac risus. Sed volutpat commodo orci, sodales fermentum dui accumsan eu. Donec egestas ullamcorper elit at condimentum. In euismod sodales posuere. Nullam lacinia tempus molestie. Etiam vitae ullamcorper dui. Fusce congue suscipit arcu, at consectetur diam gravida id. Quisque augue urna, commodo eleifend volutpat vitae, tincidunt ac ligula. Curabitur eget orci nisl, vel placerat ipsum.\n\nCurabitur rutrum euismod nisi, consectetur varius tortor condimentum non. Pellentesque rhoncus nisi eu purus ultricies suscipit. Morbi ante nisi, varius nec molestie bibendum, pharetra quis enim. Proin eget nunc ante. Cras aliquam enim vel nunc laoreet ut facilisis nunc interdum. Fusce libero ipsum, posuere eget blandit quis, bibendum vitae quam. Integer dictum faucibus lacus eget facilisis. Duis adipiscing tortor magna, vel tincidunt risus. In non augue eu nisl sodales cursus vel eget nisi. Maecenas dignissim lectus elementum eros fermentum gravida et eget leo. Aenean quis cursus arcu. Mauris posuere purus non diam mattis vehicula. Integer nec orci velit.\n\nInteger ac justo ac magna adipiscing condimentum vitae tincidunt dui. Morbi augue arcu, blandit nec interdum sit amet, condimentum vel nisl. Nulla vehicula tincidunt laoreet. Aliquam ornare elementum urna, sed vehicula magna porta id. Vestibulum dictum ultrices tortor sit amet tincidunt. Praesent bibendum, metus vel volutpat interdum, nisl nunc cursus libero, vel congue ligula mi et felis. Nulla mollis elementum nulla, in accumsan risus consequat at. Suspendisse potenti. Vestibulum enim lorem, dignissim ut porta vestibulum, porta eget mi. Fusce a elit ac dui sodales gravida. Pellentesque sed elit at dui dapibus mattis a non arcu.\n\nCum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. In nec posuere augue. Praesent non suscipit arcu. Sed nibh risus, lacinia ut molestie vitae, tristique eget turpis. Sed pretium volutpat arcu, non rutrum leo volutpat sed. Maecenas quis neque nisl, sit amet ornare dolor. Nulla pharetra pulvinar tellus sed eleifend. Aliquam eget mattis nulla. Nulla dictum vehicula velit, non facilisis lorem volutpat id. Fusce scelerisque sem vitae purus dapibus lobortis. Mauris ac turpis nec nibh consequat porttitor. Ut sit amet iaculis lorem. Vivamus blandit erat ac odio venenatis fringilla a sit amet ante. Quisque ut urna sed augue laoreet sagittis.\n\nInteger nisl urna, bibendum id lobortis in, tempor non velit. Fusce sed volutpat quam. Suspendisse eu placerat purus. Maecenas quis feugiat lectus. Sed accumsan malesuada dui, a pretium purus facilisis quis. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nunc ac purus id lacus malesuada placerat et in nunc. Ut imperdiet tincidunt est, at consectetur augue egestas hendrerit. Pellentesque eu erat a dui dignissim adipiscing. Integer quis leo non felis placerat eleifend. Fusce luctus mi a lorem mattis eget accumsan libero posuere. Sed pellentesque, odio id pharetra tempus, enim quam placerat metus, auctor aliquam elit mi facilisis quam. Nam at velit et eros rhoncus accumsan.\n\nDonec tellus diam, fringilla ac viverra fringilla, rhoncus sit amet purus. Cras et ligula sed nibh tempor gravida. Aliquam id tempus mauris. Ut convallis quam sed arcu varius eget mattis magna tincidunt. Aliquam et suscipit est. Sed metus augue, tristique sed accumsan eget, euismod et augue. Nam augue sapien, placerat vel facilisis eu, tempor id risus. Aliquam mollis egestas mi. Fusce scelerisque convallis mauris quis blandit. Mauris nec ante id lacus sagittis tincidunt ornare vehicula dui. Curabitur tristique mattis nunc, vel cursus libero viverra feugiat. Suspendisse at sapien velit, a lacinia dolor. Vivamus in est non odio feugiat lacinia sodales ut magna.\n\nDonec interdum ligula id ipsum dapibus consectetur. Pellentesque vitae posuere ligula. Morbi rhoncus bibendum eleifend. Suspendisse fringilla nunc at elit malesuada vitae ullamcorper lorem laoreet. Suspendisse a ante at ipsum iaculis cursus. Duis accumsan ligula quis nibh luctus pretium. Duis ultrices scelerisque dolor, et vulputate lectus commodo ut.\n\nVestibulum ac tincidunt lorem. Vestibulum lorem massa, dictum a scelerisque ut, convallis vitae eros. Morbi ipsum nisl, lacinia non tempor nec, lobortis id diam. Fusce quis magna nunc. Proin ultricies congue justo sed mattis. Vestibulum sit amet arcu tellus. Quisque ultricies porta massa iaculis vehicula. Vestibulum sollicitudin tempor urna vel sodales. Pellentesque ultricies tellus vel metus porta nec iaculis sapien mollis. Maecenas ullamcorper, metus eget imperdiet sagittis, odio orci dapibus neque, in vulputate nunc nibh non libero. Donec velit quam, lobortis quis tempus a, hendrerit id arcu.\n\nDonec nec ante at tortor dignissim mattis. Curabitur vehicula tincidunt magna id sagittis. Proin euismod dignissim porta. Curabitur non turpis purus, in rutrum nulla. Nam turpis nulla, tincidunt et hendrerit non, posuere nec enim. Curabitur leo enim, lobortis ut placerat id, condimentum nec massa. In bibendum, lectus sit amet molestie commodo, felis massa rutrum nisl, ac fermentum ligula lacus in ipsum.\n\nPellentesque mi nulla, scelerisque vitae tempus id, consequat a augue. Quisque vel nisi sit amet ipsum faucibus laoreet sed vitae lorem. Praesent nunc tortor, volutpat ac commodo non, pharetra sed neque. Curabitur nec felis at mi blandit aliquet eu ornare justo. Mauris dignissim purus quis nisl porttitor interdum. Aenean id ipsum enim, blandit commodo justo. Quisque facilisis elit quis velit commodo scelerisque lobortis sapien condimentum. Cras sit amet porttitor velit. Praesent nec tempor arcu.\n\nDonec varius mi adipiscing elit semper vel feugiat ipsum dictum. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Donec non quam nisl, ac mattis justo. Vestibulum sed massa eget velit tristique auctor ut ac sapien. Curabitur aliquet ligula eget dui ornare at scelerisque mauris faucibus. Vestibulum id mauris metus, sed vestibulum nibh. Nulla egestas dictum blandit. Mauris vitae nibh at dui mollis lobortis. Phasellus sem leo, euismod at fringilla quis, mollis in nibh. Aenean vel lacus et elit pharetra elementum. Aliquam at ligula id sem bibendum volutpat. Pellentesque quis elit a massa dapibus viverra ut et lorem. Donec nulla eros, iaculis nec commodo vel, suscipit sit amet tortor. Integer tempor, elit at viverra imperdiet, velit sapien laoreet nunc, id laoreet ligula risus vel risus. Nullam sed tortor metus.\n\nIn nunc orci, tempor vulputate pretium vel, suscipit quis risus. Suspendisse accumsan facilisis felis eget posuere. Donec a faucibus felis. Proin nibh erat, sollicitudin quis vestibulum id, tincidunt quis justo. In sed purus eu nisi dignissim condimentum. Sed mattis dapibus lorem id vulputate. Suspendisse nec elit a augue interdum consequat quis id magna. In eleifend aliquam tempor. In in lacus augue.\n\nUt euismod sollicitudin lorem, id aliquam magna dictum sed. Nunc fringilla lobortis nisi sed consectetur. Nulla facilisi. Aenean nec lobortis augue. Curabitur ullamcorper dapibus libero, vel pellentesque arcu sollicitudin non. Praesent varius, turpis nec sollicitudin bibendum, elit tortor rhoncus lacus, gravida luctus leo nisi in felis. Ut metus eros, molestie non faucibus vel, condimentum ac elit.\n\nSuspendisse nisl justo, lacinia sit amet interdum nec, tincidunt placerat urna. Suspendisse potenti. In et odio sed purus malesuada cursus sed nec lectus. Cras commodo, orci sit amet hendrerit iaculis, nunc urna facilisis tellus, vel laoreet odio nulla quis nibh. Maecenas ut justo ut lacus posuere sodales. Vestibulum facilisis fringilla diam at volutpat. Proin a hendrerit urna. Aenean placerat pulvinar arcu, sit amet lobortis neque eleifend in. Aenean risus nulla, facilisis ut tincidunt vitae, fringilla at ligula. Praesent eleifend est at sem lacinia auctor. Nulla ornare nunc in erat laoreet blandit.\n\nSuspendisse pharetra leo ac est porta consequat. Nunc sem nibh, gravida vel aliquam a, ornare in tortor. Nulla vel sapien et felis placerat pellentesque id scelerisque nisl. Praesent et posuere."
  },
  {
    "path": "sdk/ruby-google-api-client/spec/fixtures/files/secret.pem",
    "content": "Bag Attributes\n    friendlyName: privatekey\n    localKeyID: 54 69 6D 65 20 31 33 35 31 38 38 38 31 37 38 36 39 36 \nKey Attributes: <No Attributes>\n-----BEGIN RSA PRIVATE KEY-----\nMIICXAIBAAKBgQDYDyPb3GhyFx5i/wxS/jFsO6wSLys1ehAk6QZoBXGlg7ETVrIJ\nHYh9gXQUno4tJiQoaO8wOvleIRrqI0LkiftCXKWVSrzOiV+O9GkKx1byw1yAIZus\nQdwMT7X0O9hrZLZwhICWC9s6cGhnlCVxLIP/+JkVK7hxEq/LxoSszNV77wIDAQAB\nAoGAa2G69L7quil7VMBmI6lqbtyJfNAsrXtpIq8eG/z4qsZ076ObAKTI/XeldcoH\n57CZL+xXVKU64umZMt0rleJuGXdlauEUbsSx+biGewRfGTgC4rUSjmE539rBvmRW\ngaKliorepPMp/+B9CcG/2YfDPRvG/2cgTXJHVvneo+xHL4ECQQD2Jx5Mvs8z7s2E\njY1mkpRKqh4Z7rlitkAwe1NXcVC8hz5ASu7ORyTl8EPpKAfRMYl1ofK/ozT1URXf\nkL5nChPfAkEA4LPUJ6cqrY4xrrtdGaM4iGIxzen5aZlKz/YNlq5LuQKbnLLHMuXU\nohp/ynpqNWbcAFbmtGSMayxGKW5+fJgZ8QJAUBOZv82zCmn9YcnK3juBEmkVMcp/\ndKVlbGAyVJgAc9RrY+78kQ6D6mmnLgpfwKYk2ae9mKo3aDbgrsIfrtWQcQJAfFGi\nCEpJp3orbLQG319ZsMM7MOTJdC42oPZOMFbAWFzkAX88DKHx0bn9h+XQizkccSej\nPpz+v3DgZJ3YZ1Cz0QJBALiqIokZ+oa3AY6oT0aiec6txrGvNPPbwOsrBpFqGNbu\nAByzWWBoBi40eKMSIR30LqN9H8YnJ91Aoy1njGYyQaw=\n-----END RSA PRIVATE KEY-----\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/fixtures/files/zoo.json",
    "content": "{\n \"kind\": \"discovery#describeItem\",\n \"name\": \"zoo\",\n \"version\": \"v1\",\n \"description\": \"Zoo API used for testing\",\n \"basePath\": \"/zoo/\",\n \"rootUrl\": \"https://www.googleapis.com/\",\n \"servicePath\": \"zoo/v1/\",\n \"rpcPath\": \"/rpc\",\n \"parameters\": {\n  \"alt\": {\n   \"type\": \"string\",\n   \"description\": \"Data format for the response.\",\n   \"default\": \"json\",\n   \"enum\": [\n    \"json\"\n   ],\n   \"enumDescriptions\": [\n    \"Responses with Content-Type of application/json\"\n   ],\n   \"location\": \"query\"\n  },\n  \"fields\": {\n   \"type\": \"string\",\n   \"description\": \"Selector specifying which fields to include in a partial response.\",\n   \"location\": \"query\"\n  },\n  \"key\": {\n   \"type\": \"string\",\n   \"description\": \"API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.\",\n   \"location\": \"query\"\n  },\n  \"oauth_token\": {\n   \"type\": \"string\",\n   \"description\": \"OAuth 2.0 token for the current user.\",\n   \"location\": \"query\"\n  },\n  \"prettyPrint\": {\n   \"type\": \"boolean\",\n   \"description\": \"Returns response with indentations and line breaks.\",\n   \"default\": \"true\",\n   \"location\": \"query\"\n  },\n  \"quotaUser\": {\n   \"type\": \"string\",\n   \"description\": \"Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.\",\n   \"location\": \"query\"\n  },\n  \"userIp\": {\n   \"type\": \"string\",\n   \"description\": \"IP address of the site where the request originates. Use this if you want to enforce per-user limits.\",\n   \"location\": \"query\"\n  }\n },\n \"features\": [\n  \"dataWrapper\"\n ],\n \"schemas\": {\n  \"Animal\": {\n   \"id\": \"Animal\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"etag\": {\n     \"type\": \"string\"\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"default\": \"zoo#animal\"\n    },\n    \"name\": {\n     \"type\": \"string\"\n    },\n    \"photo\": {\n     \"type\": \"object\",\n     \"properties\": {\n      \"filename\": {\n       \"type\": \"string\"\n      },\n      \"hash\": {\n       \"type\": \"string\"\n      },\n      \"hashAlgorithm\": {\n       \"type\": \"string\"\n      },\n      \"size\": {\n       \"type\": \"integer\"\n      },\n      \"type\": {\n       \"type\": \"string\"\n      }\n     }\n    }\n   }\n  },\n  \"Animal2\": {\n   \"id\": \"Animal2\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"kind\": {\n     \"type\": \"string\",\n     \"default\": \"zoo#animal\"\n    },\n    \"name\": {\n     \"type\": \"string\"\n    }\n   }\n  },\n  \"AnimalFeed\": {\n   \"id\": \"AnimalFeed\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"etag\": {\n     \"type\": \"string\"\n    },\n    \"items\": {\n     \"type\": \"array\",\n     \"items\": {\n      \"$ref\": \"Animal\"\n     }\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"default\": \"zoo#animalFeed\"\n    }\n   }\n  },\n  \"AnimalMap\": {\n   \"id\": \"AnimalMap\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"etag\": {\n     \"type\": \"string\"\n    },\n    \"animals\": {\n     \"type\": \"object\",\n     \"description\": \"Map of animal id to animal data\",\n     \"additionalProperties\": {\n      \"$ref\": \"Animal\"\n     }\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"default\": \"zoo#animalMap\"\n    }\n   }\n  },\n  \"LoadFeed\": {\n   \"id\": \"LoadFeed\",\n   \"type\": \"object\",\n   \"properties\": {\n    \"items\": {\n     \"type\": \"array\",\n     \"items\": {\n      \"type\": \"object\",\n      \"properties\": {\n       \"doubleVal\": {\n        \"type\": \"number\"\n       },\n       \"nullVal\": {\n        \"type\": \"null\"\n       },\n       \"booleanVal\": {\n        \"type\": \"boolean\",\n        \"description\": \"True or False.\"\n       },\n       \"anyVal\": {\n        \"type\": \"any\",\n        \"description\": \"Anything will do.\"\n       },\n       \"enumVal\": {\n        \"type\": \"string\"\n       },\n       \"kind\": {\n        \"type\": \"string\",\n        \"default\": \"zoo#loadValue\"\n       },\n       \"longVal\": {\n        \"type\": \"integer\"\n       },\n       \"stringVal\": {\n        \"type\": \"string\"\n       }\n      }\n     }\n    },\n    \"kind\": {\n     \"type\": \"string\",\n     \"default\": \"zoo#loadFeed\"\n    }\n   }\n  }\n },\n \"methods\": {\n  \"query\": {\n   \"path\": \"query\",\n   \"id\": \"bigquery.query\",\n   \"httpMethod\": \"GET\",\n   \"parameters\": {\n    \"q\": {\n     \"type\": \"string\",\n     \"location\": \"query\",\n     \"required\": false,\n     \"repeated\": false\n    },\n    \"i\": {\n     \"type\": \"integer\",\n     \"location\": \"query\",\n     \"required\": false,\n     \"repeated\": false,\n     \"minimum\": \"0\",\n     \"maximum\": \"4294967295\",\n     \"default\": \"20\"\n    },\n    \"n\": {\n     \"type\": \"number\",\n     \"location\": \"query\",\n     \"required\": false,\n     \"repeated\": false\n    },\n    \"b\": {\n     \"type\": \"boolean\",\n     \"location\": \"query\",\n     \"required\": false,\n     \"repeated\": false\n    },\n    \"a\": {\n     \"type\": \"any\",\n     \"location\": \"query\",\n     \"required\": false,\n     \"repeated\": false\n    },\n    \"o\": {\n     \"type\": \"object\",\n     \"location\": \"query\",\n     \"required\": false,\n     \"repeated\": false\n    },\n    \"e\": {\n     \"type\": \"string\",\n     \"location\": \"query\",\n     \"required\": false,\n     \"repeated\": false,\n     \"enum\": [\n       \"foo\",\n       \"bar\"\n     ]\n    },\n    \"er\": {\n      \"type\": \"string\",\n      \"location\": \"query\",\n      \"required\": false,\n      \"repeated\": true,\n      \"enum\": [\n        \"one\",\n        \"two\",\n        \"three\"\n      ]\n    },\n    \"rr\": {\n     \"type\": \"string\",\n     \"location\": \"query\",\n     \"required\": false,\n     \"repeated\": true,\n     \"pattern\": \"[a-z]+\"\n    }\n   }\n  }\n },\n \"resources\": {\n  \"my\": {\n   \"resources\": {\n    \"favorites\": {\n     \"methods\": {\n      \"list\": {\n       \"path\": \"favorites/@me/mine\",\n       \"id\": \"zoo.animals.mine\",\n       \"httpMethod\": \"GET\",\n       \"parameters\": {\n        \"max-results\": {\n          \"location\": \"query\",\n          \"required\": false\n        }\n       }\n      }\n     }\n    }\n   }\n  },\n  \"global\": {\n   \"resources\": {\n    \"print\": {\n     \"methods\": {\n      \"assert\": {\n       \"path\": \"global/print/assert\",\n       \"id\": \"zoo.animals.mine\",\n       \"httpMethod\": \"GET\",\n       \"parameters\": {\n        \"max-results\": {\n          \"location\": \"query\",\n          \"required\": false\n        }\n       }\n      }\n     }\n    }\n   }\n  },\n  \"animals\": {\n   \"methods\": {\n    \"crossbreed\": {\n     \"path\": \"animals/crossbreed\",\n     \"id\": \"zoo.animals.crossbreed\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Cross-breed animals\",\n     \"response\": {\n      \"$ref\": \"Animal2\"\n     },\n     \"mediaUpload\": {\n      \"accept\": [\n       \"image/png\"\n      ],\n      \"protocols\": {\n       \"simple\": {\n        \"multipart\": true,\n        \"path\": \"upload/activities/{userId}/@self\"\n       },\n       \"resumable\": {\n        \"multipart\": true,\n        \"path\": \"upload/activities/{userId}/@self\"\n       }\n      }\n     }\n    },\n    \"delete\": {\n     \"path\": \"animals/{name}\",\n     \"id\": \"zoo.animals.delete\",\n     \"httpMethod\": \"DELETE\",\n     \"description\": \"Delete animals\",\n     \"parameters\": {\n      \"name\": {\n       \"location\": \"path\",\n       \"required\": true,\n       \"description\": \"Name of the animal to delete\",\n       \"type\": \"string\"\n      }\n     },\n     \"parameterOrder\": [\n      \"name\"\n     ]\n    },\n    \"get\": {\n     \"path\": \"animals/{name}\",\n     \"id\": \"zoo.animals.get\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Get animals\",\n     \"supportsMediaDownload\": true,\n     \"parameters\": {\n      \"name\": {\n       \"location\": \"path\",\n       \"required\": true,\n       \"description\": \"Name of the animal to load\",\n       \"type\": \"string\"\n      },\n      \"projection\": {\n       \"location\": \"query\",\n       \"type\": \"string\",\n       \"enum\": [\n        \"full\"\n       ],\n       \"enumDescriptions\": [\n        \"Include everything\"\n       ]\n      }\n     },\n     \"parameterOrder\": [\n      \"name\"\n     ],\n     \"response\": {\n      \"$ref\": \"Animal\"\n     }\n    },\n    \"getmedia\": {\n     \"path\": \"animals/{name}\",\n     \"id\": \"zoo.animals.get\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"Get animals\",\n     \"parameters\": {\n      \"name\": {\n       \"location\": \"path\",\n       \"required\": true,\n       \"description\": \"Name of the animal to load\",\n       \"type\": \"string\"\n      },\n      \"projection\": {\n       \"location\": \"query\",\n       \"type\": \"string\",\n       \"enum\": [\n        \"full\"\n       ],\n       \"enumDescriptions\": [\n        \"Include everything\"\n       ]\n      }\n     },\n     \"parameterOrder\": [\n      \"name\"\n     ]\n    },\n    \"insert\": {\n     \"path\": \"animals\",\n     \"id\": \"zoo.animals.insert\",\n     \"httpMethod\": \"POST\",\n     \"description\": \"Insert animals\",\n     \"request\": {\n      \"$ref\": \"Animal\"\n     },\n     \"response\": {\n      \"$ref\": \"Animal\"\n     },\n     \"mediaUpload\": {\n      \"accept\": [\n       \"image/png\"\n      ],\n      \"maxSize\": \"1KB\",\n      \"protocols\": {\n       \"simple\": {\n        \"multipart\": true,\n        \"path\": \"upload/activities/{userId}/@self\"\n       },\n       \"resumable\": {\n        \"multipart\": true,\n        \"path\": \"upload/activities/{userId}/@self\"\n       }\n      }\n     }\n    },\n    \"list\": {\n     \"path\": \"animals\",\n     \"id\": \"zoo.animals.list\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"List animals\",\n     \"parameters\": {\n      \"max-results\": {\n       \"location\": \"query\",\n       \"description\": \"Maximum number of results to return\",\n       \"type\": \"integer\",\n       \"minimum\": \"0\"\n      },\n      \"name\": {\n       \"location\": \"query\",\n       \"description\": \"Restrict result to animals with this name\",\n       \"type\": \"string\"\n      },\n      \"projection\": {\n       \"location\": \"query\",\n       \"type\": \"string\",\n       \"enum\": [\n        \"full\"\n       ],\n       \"enumDescriptions\": [\n        \"Include absolutely everything\"\n       ]\n      },\n      \"start-token\": {\n       \"location\": \"query\",\n       \"description\": \"Pagination token\",\n       \"type\": \"string\"\n      }\n     },\n     \"response\": {\n      \"$ref\": \"AnimalFeed\"\n     }\n    },\n    \"patch\": {\n     \"path\": \"animals/{name}\",\n     \"id\": \"zoo.animals.patch\",\n     \"httpMethod\": \"PATCH\",\n     \"description\": \"Update animals\",\n     \"parameters\": {\n      \"name\": {\n       \"location\": \"path\",\n       \"required\": true,\n       \"description\": \"Name of the animal to update\",\n       \"type\": \"string\"\n      }\n     },\n     \"parameterOrder\": [\n      \"name\"\n     ],\n     \"request\": {\n      \"$ref\": \"Animal\"\n     },\n     \"response\": {\n      \"$ref\": \"Animal\"\n     }\n    },\n    \"update\": {\n     \"path\": \"animals/{name}\",\n     \"id\": \"zoo.animals.update\",\n     \"httpMethod\": \"PUT\",\n     \"description\": \"Update animals\",\n     \"parameters\": {\n      \"name\": {\n       \"location\": \"path\",\n       \"description\": \"Name of the animal to update\",\n       \"type\": \"string\"\n      }\n     },\n     \"parameterOrder\": [\n      \"name\"\n     ],\n     \"request\": {\n      \"$ref\": \"Animal\"\n     },\n     \"response\": {\n      \"$ref\": \"Animal\"\n     }\n    }\n   }\n  },\n  \"load\": {\n   \"methods\": {\n    \"list\": {\n     \"path\": \"load\",\n     \"id\": \"zoo.load.list\",\n     \"httpMethod\": \"GET\",\n     \"response\": {\n      \"$ref\": \"LoadFeed\"\n     }\n    }\n   }\n  },\n  \"loadNoTemplate\": {\n   \"methods\": {\n    \"list\": {\n     \"path\": \"loadNoTemplate\",\n     \"id\": \"zoo.loadNoTemplate.list\",\n     \"httpMethod\": \"GET\"\n    }\n   }\n  },\n  \"scopedAnimals\": {\n   \"methods\": {\n    \"list\": {\n     \"path\": \"scopedanimals\",\n     \"id\": \"zoo.scopedAnimals.list\",\n     \"httpMethod\": \"GET\",\n     \"description\": \"List animals (scoped)\",\n     \"parameters\": {\n      \"max-results\": {\n       \"location\": \"query\",\n       \"description\": \"Maximum number of results to return\",\n       \"type\": \"integer\",\n       \"minimum\": \"0\"\n      },\n      \"name\": {\n       \"location\": \"query\",\n       \"description\": \"Restrict result to animals with this name\",\n       \"type\": \"string\"\n      },\n      \"projection\": {\n       \"location\": \"query\",\n       \"type\": \"string\",\n       \"enum\": [\n        \"full\"\n       ],\n       \"enumDescriptions\": [\n        \"Include absolutely everything\"\n       ]\n      },\n      \"start-token\": {\n       \"location\": \"query\",\n       \"description\": \"Pagination token\",\n       \"type\": \"string\"\n      }\n     },\n     \"response\": {\n      \"$ref\": \"AnimalFeed\"\n     }\n    }\n   }\n  }\n }\n}"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/auth/storage_spec.rb",
    "content": "require 'spec_helper'\n\nrequire 'google/api_client'\nrequire 'google/api_client/version'\n\ndescribe Google::APIClient::Storage do\n  let(:client) { Google::APIClient.new(:application_name => 'API Client Tests') }\n  let(:root_path) { File.expand_path(File.join(__FILE__, '..', '..', '..')) }\n  let(:json_file) { File.expand_path(File.join(root_path, 'fixtures', 'files', 'auth_stored_credentials.json')) }\n\n  let(:store) { double }\n  let(:client_stub) { double }\n  subject { Google::APIClient::Storage.new(store) }\n\n  describe 'authorize' do\n    it 'should authorize' do\n      expect(subject).to respond_to(:authorization)\n      expect(subject.store).to be == store\n    end\n  end\n\n  describe 'authorize' do\n    describe 'with credentials' do\n\n      it 'should initialize a new OAuth Client' do\n        expect(subject).to receive(:load_credentials).and_return({:first => 'a dummy'})\n        expect(client_stub).to receive(:issued_at=)\n        expect(client_stub).to receive(:expired?).and_return(false)\n        expect(Signet::OAuth2::Client).to receive(:new).and_return(client_stub)\n        expect(subject).not_to receive(:refresh_authorization)\n        subject.authorize\n      end\n\n      it 'should refresh authorization' do\n        expect(subject).to receive(:load_credentials).and_return({:first => 'a dummy'})\n        expect(client_stub).to receive(:issued_at=)\n        expect(client_stub).to receive(:expired?).and_return(true)\n        expect(Signet::OAuth2::Client).to receive(:new).and_return(client_stub)\n        expect(subject).to receive(:refresh_authorization)\n        auth = subject.authorize\n        expect(auth).to be == subject.authorization\n        expect(auth).not_to be_nil\n      end\n    end\n\n    describe 'without credentials' do\n\n      it 'should return nil' do\n        expect(subject.authorization).to be_nil\n        expect(subject).to receive(:load_credentials).and_return({})\n        expect(subject.authorize).to be_nil\n        expect(subject.authorization).to be_nil\n      end\n    end\n  end\n\n  describe 'write_credentials' do\n    it 'should call store to write credentials' do\n      authorization_stub = double\n      expect(authorization_stub).to receive(:refresh_token).and_return(true)\n      expect(subject).to receive(:credentials_hash)\n      expect(subject.store).to receive(:write_credentials)\n      subject.write_credentials(authorization_stub)\n      expect(subject.authorization).to be == authorization_stub\n    end\n\n    it 'should not call store to write credentials' do\n      expect(subject).not_to receive(:credentials_hash)\n      expect(subject.store).not_to receive(:write_credentials)\n      expect {\n        subject.write_credentials()\n      }.not_to raise_error\n    end\n    it 'should not call store to write credentials' do\n      expect(subject).not_to receive(:credentials_hash)\n      expect(subject.store).not_to receive(:write_credentials)\n      expect {\n        subject.write_credentials('something')\n      }.not_to raise_error\n    end\n\n  end\n\n  describe 'refresh_authorization' do\n    it 'should call refresh and write credentials' do\n      expect(subject).to receive(:write_credentials)\n      authorization_stub = double\n      expect(subject).to receive(:authorization).and_return(authorization_stub)\n      expect(authorization_stub).to receive(:refresh!).and_return(true)\n      subject.refresh_authorization\n    end\n  end\n\n  describe 'load_credentials' do\n    it 'should call store to load credentials' do\n      expect(subject.store).to receive(:load_credentials)\n      subject.send(:load_credentials)\n    end\n  end\n\n  describe 'credentials_hash' do\n    it 'should return an hash' do\n      authorization_stub = double\n      expect(authorization_stub).to receive(:access_token)\n      expect(authorization_stub).to receive(:client_id)\n      expect(authorization_stub).to receive(:client_secret)\n      expect(authorization_stub).to receive(:expires_in)\n      expect(authorization_stub).to receive(:refresh_token)\n      expect(authorization_stub).to receive(:issued_at).and_return('100')\n      allow(subject).to receive(:authorization).and_return(authorization_stub)\n      credentials = subject.send(:credentials_hash)\n      expect(credentials).to include(:access_token)\n      expect(credentials).to include(:authorization_uri)\n      expect(credentials).to include(:client_id)\n      expect(credentials).to include(:client_secret)\n      expect(credentials).to include(:expires_in)\n      expect(credentials).to include(:refresh_token)\n      expect(credentials).to include(:token_credential_uri)\n      expect(credentials).to include(:issued_at)\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/auth/storages/file_store_spec.rb",
    "content": "require 'spec_helper'\n\nrequire 'google/api_client'\nrequire 'google/api_client/version'\n\ndescribe Google::APIClient::FileStore do\n  let(:root_path) { File.expand_path(File.join(__FILE__, '..','..','..', '..','..')) }\n  let(:json_file) { File.expand_path(File.join(root_path, 'fixtures', 'files', 'auth_stored_credentials.json')) }\n\n  let(:credentials_hash) {{\n      \"access_token\"=>\"my_access_token\",\n      \"authorization_uri\"=>\"https://accounts.google.com/o/oauth2/auth\",\n      \"client_id\"=>\"123456_test_client_id@.apps.googleusercontent.com\",\n      \"client_secret\"=>\"123456_client_secret\",\n      \"expires_in\"=>3600,\n      \"refresh_token\"=>\"my_refresh_token\",\n      \"token_credential_uri\"=>\"https://accounts.google.com/o/oauth2/token\",\n      \"issued_at\"=>1384440275\n  }}\n\n  subject{Google::APIClient::FileStore.new('a file path')}\n\n  it 'should have a path' do\n    expect(subject.path).to be == 'a file path'\n    subject.path = 'an other file path'\n    expect(subject.path).to be == 'an other file path'\n  end\n\n  it 'should load credentials' do\n    subject.path = json_file\n    credentials = subject.load_credentials\n    expect(credentials).to include('access_token', 'authorization_uri', 'refresh_token')\n  end\n\n  it 'should write credentials' do\n    io_stub = StringIO.new\n    expect(subject).to receive(:open).and_return(io_stub)\n    subject.write_credentials(credentials_hash)\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/auth/storages/redis_store_spec.rb",
    "content": "require 'spec_helper'\n\nrequire 'google/api_client'\nrequire 'google/api_client/version'\n\n\ndescribe Google::APIClient::RedisStore do\n  let(:root_path) { File.expand_path(File.join(__FILE__, '..', '..', '..', '..', '..')) }\n  let(:json_file) { File.expand_path(File.join(root_path, 'fixtures', 'files', 'auth_stored_credentials.json')) }\n  let(:redis) {double}\n\n  let(:credentials_hash) { {\n      \"access_token\" => \"my_access_token\",\n      \"authorization_uri\" => \"https://accounts.google.com/o/oauth2/auth\",\n      \"client_id\" => \"123456_test_client_id@.apps.googleusercontent.com\",\n      \"client_secret\" => \"123456_client_secret\",\n      \"expires_in\" => 3600,\n      \"refresh_token\" => \"my_refresh_token\",\n      \"token_credential_uri\" => \"https://accounts.google.com/o/oauth2/token\",\n      \"issued_at\" => 1384440275\n  } }\n\n  subject { Google::APIClient::RedisStore.new('a redis instance') }\n\n  it 'should have a redis instance' do\n    expect(subject.redis).to be == 'a redis instance'\n    subject.redis = 'an other redis instance'\n    expect(subject.redis).to be == 'an other redis instance'\n  end\n\n  describe 'load_credentials' do\n\n    it 'should load credentials' do\n      subject.redis= redis\n      expect(redis).to receive(:get).and_return(credentials_hash.to_json)\n      expect(subject.load_credentials).to be == credentials_hash\n    end\n\n    it 'should return nil' do\n      subject.redis= redis\n      expect(redis).to receive(:get).and_return(nil)\n      expect(subject.load_credentials).to be_nil\n    end\n  end\n\n  describe 'redis_credentials_key' do\n    context 'without given key' do\n      it 'should return default key' do\n        expect(subject.redis_credentials_key).to be == \"google_api_credentials\"\n      end\n    end\n    context 'with given key' do\n      let(:redis_store) { Google::APIClient::RedisStore.new('a redis instance', 'another_google_api_credentials') }\n      it 'should use given key' do\n        expect(redis_store.redis_credentials_key).to be == \"another_google_api_credentials\"\n      end\n    end\n\n  end\n\n  describe 'write credentials' do\n\n    it 'should write credentials' do\n      subject.redis= redis\n      expect(redis).to receive(:set).and_return('ok')\n      expect(subject.write_credentials(credentials_hash)).to be_truthy\n    end\n  end\n\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/batch_spec.rb",
    "content": "# Copyright 2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'spec_helper'\nrequire 'google/api_client'\n\nRSpec.describe Google::APIClient::BatchRequest do\n  CLIENT = Google::APIClient.new(:application_name => 'API Client Tests') unless defined?(CLIENT)\n\n  after do\n    # Reset client to not-quite-pristine state\n    CLIENT.key = nil\n    CLIENT.user_ip = nil\n  end\n\n  it 'should raise an error if making an empty batch request' do\n    batch = Google::APIClient::BatchRequest.new\n\n    expect(lambda do\n      CLIENT.execute(batch)\n    end).to raise_error(Google::APIClient::BatchError)\n  end\n\n  it 'should allow query parameters in batch requests' do\n    batch = Google::APIClient::BatchRequest.new\n    batch.add(:uri => 'https://example.com', :parameters => {\n      'a' => '12345'\n    })\n    method, uri, headers, body = batch.to_http_request\n    expect(body.read).to include(\"/?a=12345\")\n  end\n\n  describe 'with the discovery API' do\n    before do\n      CLIENT.authorization = nil\n      @discovery = CLIENT.discovered_api('discovery', 'v1')\n    end\n\n    describe 'with two valid requests' do\n      before do\n        @call1 = {\n          :api_method => @discovery.apis.get_rest,\n          :parameters => {\n            'api' => 'plus',\n            'version' => 'v1'\n          }\n        }\n\n        @call2 = {\n          :api_method => @discovery.apis.get_rest,\n          :parameters => {\n            'api' => 'discovery',\n            'version' => 'v1'\n          }\n        }\n      end\n\n      it 'should execute both when using a global callback' do\n        block_called = 0\n        ids = ['first_call', 'second_call']\n        expected_ids = ids.clone\n        batch = Google::APIClient::BatchRequest.new do |result|\n          block_called += 1\n          expect(result.status).to eq(200)\n          expect(expected_ids).to include(result.response.call_id)\n          expected_ids.delete(result.response.call_id)\n        end\n\n        batch.add(@call1, ids[0])\n        batch.add(@call2, ids[1])\n\n        CLIENT.execute(batch)\n        expect(block_called).to eq(2)\n      end\n\n      it 'should execute both when using individual callbacks' do\n        batch = Google::APIClient::BatchRequest.new\n\n        call1_returned, call2_returned = false, false\n        batch.add(@call1) do |result|\n          call1_returned = true\n          expect(result.status).to eq(200)\n        end\n        batch.add(@call2) do |result|\n          call2_returned = true\n          expect(result.status).to eq(200)\n        end\n\n        CLIENT.execute(batch)\n        expect(call1_returned).to be_truthy\n        expect(call2_returned).to be_truthy\n      end\n\n      it 'should raise an error if using the same call ID more than once' do\n        batch = Google::APIClient::BatchRequest.new\n\n        expect(lambda do\n          batch.add(@call1, 'my_id')\n          batch.add(@call2, 'my_id')\n        end).to raise_error(Google::APIClient::BatchError)\n      end\n    end\n\n    describe 'with a valid request and an invalid one' do\n      before do\n        @call1 = {\n          :api_method => @discovery.apis.get_rest,\n          :parameters => {\n            'api' => 'plus',\n            'version' => 'v1'\n          }\n        }\n\n        @call2 = {\n          :api_method => @discovery.apis.get_rest,\n          :parameters => {\n            'api' => 0,\n            'version' => 1\n          }\n        }\n      end\n\n      it 'should execute both when using a global callback' do\n        block_called = 0\n        ids = ['first_call', 'second_call']\n        expected_ids = ids.clone\n        batch = Google::APIClient::BatchRequest.new do |result|\n          block_called += 1\n          expect(expected_ids).to include(result.response.call_id)\n          expected_ids.delete(result.response.call_id)\n          if result.response.call_id == ids[0]\n            expect(result.status).to eq(200)\n          else\n            expect(result.status).to be >= 400\n            expect(result.status).to be < 500\n          end\n        end\n\n        batch.add(@call1, ids[0])\n        batch.add(@call2, ids[1])\n\n        CLIENT.execute(batch)\n        expect(block_called).to eq(2)\n      end\n\n      it 'should execute both when using individual callbacks' do\n        batch = Google::APIClient::BatchRequest.new\n\n        call1_returned, call2_returned = false, false\n        batch.add(@call1) do |result|\n          call1_returned = true\n          expect(result.status).to eq(200)\n        end\n        batch.add(@call2) do |result|\n          call2_returned = true\n          expect(result.status).to be >= 400\n          expect(result.status).to be < 500\n        end\n\n        CLIENT.execute(batch)\n        expect(call1_returned).to be_truthy\n        expect(call2_returned).to be_truthy\n      end\n    end\n  end\n\n  describe 'with the calendar API' do\n    before do\n      CLIENT.authorization = nil\n      @calendar = CLIENT.discovered_api('calendar', 'v3')\n    end\n\n    describe 'with two valid requests' do\n      before do\n        event1 = {\n          'summary' => 'Appointment 1',\n          'location' => 'Somewhere',\n          'start' => {\n            'dateTime' => '2011-01-01T10:00:00.000-07:00'\n          },\n          'end' => {\n            'dateTime' => '2011-01-01T10:25:00.000-07:00'\n          },\n          'attendees' => [\n            {\n              'email' => 'myemail@mydomain.tld'\n            }\n          ]\n        }\n\n        event2 = {\n          'summary' => 'Appointment 2',\n          'location' => 'Somewhere as well',\n          'start' => {\n            'dateTime' => '2011-01-02T10:00:00.000-07:00'\n          },\n          'end' => {\n            'dateTime' => '2011-01-02T10:25:00.000-07:00'\n          },\n          'attendees' => [\n            {\n              'email' => 'myemail@mydomain.tld'\n            }\n          ]\n        }\n\n        @call1 = {\n          :api_method => @calendar.events.insert,\n          :parameters => {'calendarId' => 'myemail@mydomain.tld'},\n          :body => MultiJson.dump(event1),\n          :headers => {'Content-Type' => 'application/json'}\n        }\n\n        @call2 = {\n          :api_method => @calendar.events.insert,\n          :parameters => {'calendarId' => 'myemail@mydomain.tld'},\n          :body => MultiJson.dump(event2),\n          :headers => {'Content-Type' => 'application/json'}\n        }\n      end\n\n      it 'should convert to a correct HTTP request' do\n        batch = Google::APIClient::BatchRequest.new { |result| }\n        batch.add(@call1, '1').add(@call2, '2')\n        request = batch.to_env(CLIENT.connection)\n        boundary = Google::APIClient::BatchRequest::BATCH_BOUNDARY\n        expect(request[:method].to_s.downcase).to eq('post')\n        expect(request[:url].to_s).to eq('https://www.googleapis.com/batch')\n        expect(request[:request_headers]['Content-Type']).to eq(\"multipart/mixed;boundary=#{boundary}\")\n        body = request[:body].read\n        expect(body).to include(@call1[:body])\n        expect(body).to include(@call2[:body])\n      end\n    end\n\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/client_secrets_spec.rb",
    "content": "# encoding:utf-8\n\n# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'spec_helper'\n\nrequire 'google/api_client/client_secrets'\n\nFIXTURES_PATH = File.expand_path('../../../fixtures', __FILE__)\n\nRSpec.describe Google::APIClient::ClientSecrets do\n  \n  context 'with JSON file' do\n    let(:file) { File.join(FIXTURES_PATH, 'files', 'client_secrets.json') }\n    subject(:secrets) { Google::APIClient::ClientSecrets.load(file)}\n  \n    it 'should load the correct client ID' do\n      expect(secrets.client_id).to be == '898243283568.apps.googleusercontent.com'\n    end\n\n    it 'should load the correct client secret' do\n      expect(secrets.client_secret).to be == 'i8YaXdGgiQ4_KrTVNGsB7QP1'\n    end\n    \n    context 'serialzed to hash' do\n      subject(:hash) { secrets.to_hash }\n      it 'should contain the flow as the first key' do\n        expect(hash).to have_key \"installed\"\n      end\n\n      it 'should contain the client ID' do\n        expect(hash[\"installed\"][\"client_id\"]).to be == '898243283568.apps.googleusercontent.com'\n      end\n\n      it 'should contain the client secret' do\n        expect(hash[\"installed\"][\"client_secret\"]).to be == 'i8YaXdGgiQ4_KrTVNGsB7QP1'\n      end\n\n    end\n  end\nend"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/discovery_spec.rb",
    "content": "# encoding:utf-8\n\n# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nrequire 'spec_helper'\n\nrequire 'faraday'\nrequire 'multi_json'\nrequire 'compat/multi_json'\nrequire 'signet/oauth_1/client'\nrequire 'google/api_client'\n\nfixtures_path = File.expand_path('../../../fixtures', __FILE__)\n\nRSpec.describe Google::APIClient do\n  include ConnectionHelpers\n  CLIENT = Google::APIClient.new(:application_name => 'API Client Tests') unless defined?(CLIENT)\n\n  after do\n    # Reset client to not-quite-pristine state\n    CLIENT.key = nil\n    CLIENT.user_ip = nil\n  end\n\n  it 'should raise a type error for bogus authorization' do\n    expect(lambda do\n      Google::APIClient.new(:application_name => 'API Client Tests', :authorization => 42)\n    end).to raise_error(TypeError)\n  end\n\n  it 'should not be able to retrieve the discovery document for a bogus API' do\n    expect(lambda do\n      CLIENT.discovery_document('bogus')\n    end).to raise_error(Google::APIClient::TransmissionError)\n    expect(lambda do\n      CLIENT.discovered_api('bogus')\n    end).to raise_error(Google::APIClient::TransmissionError)\n  end\n\n  it 'should raise an error for bogus services' do\n    expect(lambda do\n      CLIENT.discovered_api(42)\n    end).to raise_error(TypeError)\n  end\n\n  it 'should raise an error for bogus services' do\n    expect(lambda do\n      CLIENT.preferred_version(42)\n    end).to raise_error(TypeError)\n  end\n\n  it 'should raise an error for bogus methods' do\n    expect(lambda do\n      CLIENT.execute(42)\n    end).to raise_error(TypeError)\n  end\n\n  it 'should not return a preferred version for bogus service names' do\n    expect(CLIENT.preferred_version('bogus')).to eq(nil)\n  end\n\n  describe 'with zoo API' do\n    it 'should return API instance registered from file' do\n      zoo_json = File.join(fixtures_path, 'files', 'zoo.json')\n      contents = File.open(zoo_json, 'rb') { |io| io.read }\n      api = CLIENT.register_discovery_document('zoo', 'v1', contents)\n      expect(api).to be_kind_of(Google::APIClient::API)\n    end\n  end\n  \n  describe 'with the prediction API' do\n    before do\n      CLIENT.authorization = nil\n      # The prediction API no longer exposes a v1, so we have to be\n      # careful about looking up the wrong API version.\n      @prediction = CLIENT.discovered_api('prediction', 'v1.2')\n    end\n\n    it 'should correctly determine the discovery URI' do\n      expect(CLIENT.discovery_uri('prediction')).to be ===\n        'https://www.googleapis.com/discovery/v1/apis/prediction/v1/rest'\n    end\n\n    it 'should correctly determine the discovery URI if :user_ip is set' do\n      CLIENT.user_ip = '127.0.0.1'\n\n      conn = stub_connection do |stub|\n        stub.get('/discovery/v1/apis/prediction/v1.2/rest?userIp=127.0.0.1') do |env|\n          [200, {}, '{}']\n        end\n      end\n      CLIENT.execute(\n        :http_method => 'GET',\n        :uri => CLIENT.discovery_uri('prediction', 'v1.2'),\n        :authenticated => false,\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should correctly determine the discovery URI if :key is set' do\n      CLIENT.key = 'qwerty'\n      conn = stub_connection do |stub|\n        stub.get('/discovery/v1/apis/prediction/v1.2/rest?key=qwerty') do |env|\n          [200, {}, '{}']\n        end\n      end\n      request = CLIENT.execute(\n        :http_method => 'GET',\n        :uri => CLIENT.discovery_uri('prediction', 'v1.2'),\n        :authenticated => false,\n        :connection => conn\n        )\n        conn.verify\n    end\n\n    it 'should correctly determine the discovery URI if both are set' do\n      CLIENT.key = 'qwerty'\n      CLIENT.user_ip = '127.0.0.1'\n      conn = stub_connection do |stub|\n        stub.get('/discovery/v1/apis/prediction/v1.2/rest?key=qwerty&userIp=127.0.0.1') do |env|\n          [200, {}, '{}']\n        end\n      end\n      request = CLIENT.execute(\n        :http_method => 'GET',\n        :uri => CLIENT.discovery_uri('prediction', 'v1.2'),\n        :authenticated => false,\n        :connection => conn\n        )\n        conn.verify\n    end\n\n    it 'should correctly generate API objects' do\n      expect(CLIENT.discovered_api('prediction', 'v1.2').name).to eq('prediction')\n      expect(CLIENT.discovered_api('prediction', 'v1.2').version).to eq('v1.2')\n      expect(CLIENT.discovered_api(:prediction, 'v1.2').name).to eq('prediction')\n      expect(CLIENT.discovered_api(:prediction, 'v1.2').version).to eq('v1.2')\n    end\n\n    it 'should discover methods' do\n      expect(CLIENT.discovered_method(\n        'prediction.training.insert', 'prediction', 'v1.2'\n      ).name).to eq('insert')\n      expect(CLIENT.discovered_method(\n        :'prediction.training.insert', :prediction, 'v1.2'\n      ).name).to eq('insert')\n      expect(CLIENT.discovered_method(\n        'prediction.training.delete', 'prediction', 'v1.2'\n      ).name).to eq('delete')\n    end\n\n    it 'should define the origin API in discovered methods' do\n      expect(CLIENT.discovered_method(\n        'prediction.training.insert', 'prediction', 'v1.2'\n      ).api.name).to eq('prediction')\n    end\n\n    it 'should not find methods that are not in the discovery document' do\n      expect(CLIENT.discovered_method(\n        'prediction.bogus', 'prediction', 'v1.2'\n      )).to eq(nil)\n    end\n\n    it 'should raise an error for bogus methods' do\n      expect(lambda do\n        CLIENT.discovered_method(42, 'prediction', 'v1.2')\n      end).to raise_error(TypeError)\n    end\n\n    it 'should raise an error for bogus methods' do\n      expect(lambda do\n        CLIENT.execute(:api_method => CLIENT.discovered_api('prediction', 'v1.2'))\n      end).to raise_error(TypeError)\n    end\n\n    it 'should correctly determine the preferred version' do\n      expect(CLIENT.preferred_version('prediction').version).not_to eq('v1')\n      expect(CLIENT.preferred_version(:prediction).version).not_to eq('v1')\n    end\n\n    it 'should return a batch path' do\n      expect(CLIENT.discovered_api('prediction', 'v1.2').batch_path).not_to be_nil\n    end\n\n    it 'should generate valid requests' do\n      conn = stub_connection do |stub|\n        stub.post('/prediction/v1.2/training?data=12345') do |env|\n          expect(env[:body]).to eq('')\n          [200, {}, '{}']\n        end\n      end\n      request = CLIENT.execute(\n        :api_method => @prediction.training.insert,\n        :parameters => {'data' => '12345'},\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should generate valid requests when parameter value includes semicolon' do\n      conn = stub_connection do |stub|\n        # semicolon (;) in parameter value was being converted to\n        # bare ampersand (&) in 0.4.7. ensure that it gets converted\n        # to a CGI-escaped semicolon (%3B) instead.\n        stub.post('/prediction/v1.2/training?data=12345%3B67890') do |env|\n          expect(env[:body]).to eq('')\n          [200, {}, '{}']\n        end\n      end\n      request = CLIENT.execute(\n        :api_method => @prediction.training.insert,\n        :parameters => {'data' => '12345;67890'},\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should generate valid requests when multivalued parameters are passed' do\n      conn = stub_connection do |stub|\n         stub.post('/prediction/v1.2/training?data=1&data=2') do |env|\n           expect(env.params['data']).to include('1', '2')\n          [200, {}, '{}']\n         end\n       end\n      request = CLIENT.execute(\n        :api_method => @prediction.training.insert,\n        :parameters => {'data' => ['1', '2']},\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should generate requests against the correct URIs' do\n      conn = stub_connection do |stub|\n         stub.post('/prediction/v1.2/training?data=12345') do |env|\n          [200, {}, '{}']\n         end\n       end\n      request = CLIENT.execute(\n        :api_method => @prediction.training.insert,\n        :parameters => {'data' => '12345'},\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should generate requests against the correct URIs' do\n      conn = stub_connection do |stub|\n        stub.post('/prediction/v1.2/training?data=12345') do |env|\n          [200, {}, '{}']\n        end\n      end\n      request = CLIENT.execute(\n        :api_method => @prediction.training.insert,\n        :parameters => {'data' => '12345'},\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should allow modification to the base URIs for testing purposes' do\n      # Using a new client instance here to avoid caching rebased discovery doc\n      prediction_rebase =\n        Google::APIClient.new(:application_name => 'API Client Tests').discovered_api('prediction', 'v1.2')\n      prediction_rebase.method_base =\n        'https://testing-domain.example.com/prediction/v1.2/'\n\n      conn = stub_connection do |stub|\n        stub.post('/prediction/v1.2/training') do |env|\n          expect(env[:url].host).to eq('testing-domain.example.com')\n          [200, {}, '{}']          \n        end\n      end\n\n      request = CLIENT.execute(\n        :api_method => prediction_rebase.training.insert,\n        :parameters => {'data' => '123'},\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should generate OAuth 1 requests' do\n      CLIENT.authorization = :oauth_1\n      CLIENT.authorization.token_credential_key = '12345'\n      CLIENT.authorization.token_credential_secret = '12345'\n\n      conn = stub_connection do |stub|\n        stub.post('/prediction/v1.2/training?data=12345') do |env|\n          expect(env[:request_headers]).to have_key('Authorization')\n          expect(env[:request_headers]['Authorization']).to match(/^OAuth/)\n          [200, {}, '{}']\n        end\n      end\n\n      request = CLIENT.execute(\n        :api_method => @prediction.training.insert,\n        :parameters => {'data' => '12345'},\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should generate OAuth 2 requests' do\n      CLIENT.authorization = :oauth_2\n      CLIENT.authorization.access_token = '12345'\n\n      conn = stub_connection do |stub|\n        stub.post('/prediction/v1.2/training?data=12345') do |env|\n          expect(env[:request_headers]).to have_key('Authorization')\n          expect(env[:request_headers]['Authorization']).to match(/^Bearer/)\n          [200, {}, '{}']          \n        end\n      end\n\n      request = CLIENT.execute(\n        :api_method => @prediction.training.insert,\n        :parameters => {'data' => '12345'},\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should not be able to execute improperly authorized requests' do\n      CLIENT.authorization = :oauth_1\n      CLIENT.authorization.token_credential_key = '12345'\n      CLIENT.authorization.token_credential_secret = '12345'\n      result = CLIENT.execute(\n        @prediction.training.insert,\n        {'data' => '12345'}\n      )\n      expect(result.response.status).to eq(401)\n    end\n\n    it 'should not be able to execute improperly authorized requests' do\n      CLIENT.authorization = :oauth_2\n      CLIENT.authorization.access_token = '12345'\n      result = CLIENT.execute(\n        @prediction.training.insert,\n        {'data' => '12345'}\n      )\n      expect(result.response.status).to eq(401)\n    end\n\n    it 'should not be able to execute improperly authorized requests' do\n      expect(lambda do\n        CLIENT.authorization = :oauth_1\n        CLIENT.authorization.token_credential_key = '12345'\n        CLIENT.authorization.token_credential_secret = '12345'\n        result = CLIENT.execute!(\n          @prediction.training.insert,\n          {'data' => '12345'}\n        )\n      end).to raise_error(Google::APIClient::ClientError)\n    end\n\n    it 'should not be able to execute improperly authorized requests' do\n      expect(lambda do\n        CLIENT.authorization = :oauth_2\n        CLIENT.authorization.access_token = '12345'\n        result = CLIENT.execute!(\n          @prediction.training.insert,\n          {'data' => '12345'}\n        )\n      end).to raise_error(Google::APIClient::ClientError)\n    end\n\n    it 'should correctly handle unnamed parameters' do\n      conn = stub_connection do |stub|\n        stub.post('/prediction/v1.2/training') do |env|\n          expect(env[:request_headers]).to have_key('Content-Type')\n          expect(env[:request_headers]['Content-Type']).to eq('application/json')\n          [200, {}, '{}']\n        end\n      end\n      CLIENT.authorization = :oauth_2\n      CLIENT.authorization.access_token = '12345'\n      CLIENT.execute(\n        :api_method => @prediction.training.insert,\n        :body => MultiJson.dump({\"id\" => \"bucket/object\"}),\n        :headers => {'Content-Type' => 'application/json'},\n        :connection => conn\n      )\n      conn.verify\n    end\n  end\n\n  describe 'with the plus API' do\n    before do\n      CLIENT.authorization = nil\n      @plus = CLIENT.discovered_api('plus')\n    end\n\n    it 'should correctly determine the discovery URI' do\n      expect(CLIENT.discovery_uri('plus')).to be ===\n        'https://www.googleapis.com/discovery/v1/apis/plus/v1/rest'\n    end\n\n    it 'should find APIs that are in the discovery document' do\n      expect(CLIENT.discovered_api('plus').name).to eq('plus')\n      expect(CLIENT.discovered_api('plus').version).to eq('v1')\n      expect(CLIENT.discovered_api(:plus).name).to eq('plus')\n      expect(CLIENT.discovered_api(:plus).version).to eq('v1')\n    end\n\n    it 'should find methods that are in the discovery document' do\n      # TODO(bobaman) Fix this when the RPC names are correct\n      expect(CLIENT.discovered_method(\n        'plus.activities.list', 'plus'\n      ).name).to eq('list')\n    end\n\n    it 'should define the origin API in discovered methods' do\n      expect(CLIENT.discovered_method(\n        'plus.activities.list', 'plus'\n      ).api.name).to eq('plus')\n    end\n\n    it 'should not find methods that are not in the discovery document' do\n      expect(CLIENT.discovered_method('plus.bogus', 'plus')).to eq(nil)\n    end\n\n    it 'should generate requests against the correct URIs' do\n      conn = stub_connection do |stub|\n        stub.get('/plus/v1/people/107807692475771887386/activities/public') do |env|\n          [200, {}, '{}']\n        end\n      end\n\n      request = CLIENT.execute(\n        :api_method => @plus.activities.list,\n        :parameters => {\n          'userId' => '107807692475771887386', 'collection' => 'public'\n        },\n        :authenticated => false,\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should correctly validate parameters' do\n      expect(lambda do\n        CLIENT.execute(\n          :api_method => @plus.activities.list,\n          :parameters => {'alt' => 'json'},\n          :authenticated => false\n        )\n      end).to raise_error(ArgumentError)\n    end\n\n    it 'should correctly validate parameters' do\n      expect(lambda do\n        CLIENT.execute(\n          :api_method => @plus.activities.list,\n          :parameters => {\n            'userId' => '107807692475771887386', 'collection' => 'bogus'\n          },\n          :authenticated => false\n        ).to_env(CLIENT.connection)\n      end).to raise_error(ArgumentError)\n    end\n\n    it 'should correctly determine the service root_uri' do\n      expect(@plus.root_uri.to_s).to eq('https://www.googleapis.com/')\n    end\n  end\n\n  describe 'with the adsense API' do\n    before do\n      CLIENT.authorization = nil\n      @adsense = CLIENT.discovered_api('adsense', 'v1.3')\n    end\n\n    it 'should correctly determine the discovery URI' do\n      expect(CLIENT.discovery_uri('adsense', 'v1.3').to_s).to be ===\n        'https://www.googleapis.com/discovery/v1/apis/adsense/v1.3/rest'\n    end\n\n    it 'should find APIs that are in the discovery document' do\n      expect(CLIENT.discovered_api('adsense', 'v1.3').name).to eq('adsense')\n      expect(CLIENT.discovered_api('adsense', 'v1.3').version).to eq('v1.3')\n    end\n\n    it 'should return a batch path' do\n      expect(CLIENT.discovered_api('adsense', 'v1.3').batch_path).not_to be_nil\n    end\n\n    it 'should find methods that are in the discovery document' do\n      expect(CLIENT.discovered_method(\n        'adsense.reports.generate', 'adsense', 'v1.3'\n      ).name).to eq('generate')\n    end\n\n    it 'should not find methods that are not in the discovery document' do\n      expect(CLIENT.discovered_method('adsense.bogus', 'adsense', 'v1.3')).to eq(nil)\n    end\n\n    it 'should generate requests against the correct URIs' do\n      conn = stub_connection do |stub|\n        stub.get('/adsense/v1.3/adclients') do |env|\n          [200, {}, '{}']\n        end\n      end\n      request = CLIENT.execute(\n        :api_method => @adsense.adclients.list,\n        :authenticated => false,\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should not be able to execute requests without authorization' do\n      result = CLIENT.execute(\n        :api_method => @adsense.adclients.list,\n        :authenticated => false\n      )\n      expect(result.response.status).to eq(401)\n    end\n\n    it 'should fail when validating missing required parameters' do\n      expect(lambda do\n        CLIENT.execute(\n          :api_method => @adsense.reports.generate,\n          :authenticated => false\n        )\n      end).to raise_error(ArgumentError)\n    end\n\n    it 'should succeed when validating parameters in a correct call' do\n      conn = stub_connection do |stub|\n        stub.get('/adsense/v1.3/reports?dimension=DATE&endDate=2010-01-01&metric=PAGE_VIEWS&startDate=2000-01-01') do |env|\n          [200, {}, '{}']\n        end\n      end\n      expect(lambda do\n        CLIENT.execute(\n          :api_method => @adsense.reports.generate,\n          :parameters => {\n            'startDate' => '2000-01-01',\n            'endDate' => '2010-01-01',\n            'dimension' => 'DATE',\n            'metric' => 'PAGE_VIEWS'\n          },\n          :authenticated => false,\n          :connection => conn\n        )\n      end).not_to raise_error\n      conn.verify\n    end\n\n    it 'should fail when validating parameters with invalid values' do\n      expect(lambda do\n        CLIENT.execute(\n          :api_method => @adsense.reports.generate,\n          :parameters => {\n            'startDate' => '2000-01-01',\n            'endDate' => '2010-01-01',\n            'dimension' => 'BAD_CHARACTERS=-&*(£&',\n            'metric' => 'PAGE_VIEWS'\n          },\n          :authenticated => false\n        )\n      end).to raise_error(ArgumentError)\n    end\n\n    it 'should succeed when validating repeated parameters in a correct call' do\n      conn = stub_connection do |stub|\n        stub.get('/adsense/v1.3/reports?dimension=DATE&dimension=PRODUCT_CODE'+\n                 '&endDate=2010-01-01&metric=CLICKS&metric=PAGE_VIEWS&'+\n                 'startDate=2000-01-01') do |env|\n          [200, {}, '{}']\n        end\n      end\n      expect(lambda do\n        CLIENT.execute(\n          :api_method => @adsense.reports.generate,\n          :parameters => {\n            'startDate' => '2000-01-01',\n            'endDate' => '2010-01-01',\n            'dimension' => ['DATE', 'PRODUCT_CODE'],\n            'metric' => ['PAGE_VIEWS', 'CLICKS']\n          },\n          :authenticated => false,\n          :connection => conn\n        )\n      end).not_to raise_error\n      conn.verify\n    end\n\n    it 'should fail when validating incorrect repeated parameters' do\n      expect(lambda do\n        CLIENT.execute(\n          :api_method => @adsense.reports.generate,\n          :parameters => {\n            'startDate' => '2000-01-01',\n            'endDate' => '2010-01-01',\n            'dimension' => ['DATE', 'BAD_CHARACTERS=-&*(£&'],\n            'metric' => ['PAGE_VIEWS', 'CLICKS']\n          },\n          :authenticated => false\n        )\n      end).to raise_error(ArgumentError)\n    end\n\n    it 'should generate valid requests when multivalued parameters are passed' do\n      conn = stub_connection do |stub|\n         stub.get('/adsense/v1.3/reports?dimension=DATE&dimension=PRODUCT_CODE'+\n                 '&endDate=2010-01-01&metric=CLICKS&metric=PAGE_VIEWS&'+\n                 'startDate=2000-01-01') do |env|\n           expect(env.params['dimension']).to include('DATE', 'PRODUCT_CODE')\n           expect(env.params['metric']).to include('CLICKS', 'PAGE_VIEWS')\n          [200, {}, '{}']\n         end\n       end\n      request = CLIENT.execute(\n        :api_method => @adsense.reports.generate,\n          :parameters => {\n            'startDate' => '2000-01-01',\n            'endDate' => '2010-01-01',\n            'dimension' => ['DATE', 'PRODUCT_CODE'],\n            'metric' => ['PAGE_VIEWS', 'CLICKS']\n          },\n          :authenticated => false,\n          :connection => conn\n      )\n      conn.verify\n    end\n  end\n\n  describe 'with the Drive API' do\n    before do\n      CLIENT.authorization = nil\n      @drive = CLIENT.discovered_api('drive', 'v2')\n    end\n\n    it 'should include media upload info methods' do\n      expect(@drive.files.insert.media_upload).not_to eq(nil)\n    end\n\n    it 'should include accepted media types' do\n      expect(@drive.files.insert.media_upload.accepted_types).not_to be_empty\n    end\n\n    it 'should have an upload path' do\n      expect(@drive.files.insert.media_upload.uri_template).not_to eq(nil)\n    end\n\n    it 'should have a max file size' do\n      expect(@drive.files.insert.media_upload.max_size).not_to eq(nil)\n    end\n  end\n\n  describe 'with the Pub/Sub API' do\n    before do\n      CLIENT.authorization = nil\n      @pubsub = CLIENT.discovered_api('pubsub', 'v1beta2')\n    end\n\n    it 'should generate requests against the correct URIs' do\n      conn = stub_connection do |stub|\n        stub.get('/v1beta2/projects/12345/topics') do |env|\n          expect(env[:url].host).to eq('pubsub.googleapis.com')\n          [200, {}, '{}']\n        end\n      end\n      request = CLIENT.execute(\n        :api_method => @pubsub.projects.topics.list,\n        :parameters => {'project' => 'projects/12345'},\n        :connection => conn\n      )\n      conn.verify\n    end\n\n    it 'should correctly determine the service root_uri' do\n      expect(@pubsub.root_uri.to_s).to eq('https://pubsub.googleapis.com/')\n    end\n\n    it 'should discover correct method URIs' do\n      list = CLIENT.discovered_method(\n        \"pubsub.projects.topics.list\", \"pubsub\", \"v1beta2\"\n      )\n      expect(list.uri_template.pattern).to eq(\n        \"https://pubsub.googleapis.com/v1beta2/{+project}/topics\"\n      )\n\n      publish = CLIENT.discovered_method(\n        \"pubsub.projects.topics.publish\", \"pubsub\", \"v1beta2\"\n      )\n      expect(publish.uri_template.pattern).to eq(\n        \"https://pubsub.googleapis.com/v1beta2/{+topic}:publish\"\n      )\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/gzip_spec.rb",
    "content": "# Encoding: utf-8\n# Copyright 2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'spec_helper'\n\nrequire 'google/api_client'\n\nRSpec.describe Google::APIClient::Gzip do\n\n  def create_connection(&block)\n    Faraday.new do |b|\n      b.response :charset\n      b.response :gzip\n      b.adapter :test do |stub|\n        stub.get '/', &block\n      end\n    end\n  end  \n\n  it 'should ignore non-zipped content' do\n    conn = create_connection do |env|\n      [200, {}, 'Hello world']\n    end\n    result = conn.get('/')\n    expect(result.body).to eq(\"Hello world\")\n  end\n\n  it 'should decompress gziped content' do\n    conn = create_connection do |env|\n      [200, { 'Content-Encoding' => 'gzip'}, Base64.decode64('H4sICLVGwlEAA3RtcADzSM3JyVcozy/KSeECANXgObcMAAAA')]\n    end\n    result = conn.get('/')\n    expect(result.body).to eq(\"Hello world\\n\")\n  end\n  \n  it 'should inflate with the correct charset encoding' do\n    conn = create_connection do |env|\n      [200, \n        { 'Content-Encoding' => 'deflate', 'Content-Type' => 'application/json;charset=BIG5'}, \n        Base64.decode64('eJxb8nLp7t2VAA8fBCI=')]\n    end\n    result = conn.get('/')\n    expect(result.body.encoding).to eq(Encoding::BIG5)\n    expect(result.body).to eq('日本語'.encode(\"BIG5\"))\n  end\n\n  describe 'with API Client' do\n\n    before do\n      @client = Google::APIClient.new(:application_name => 'test')\n      @client.authorization = nil\n    end\n    \n    \n    it 'should send gzip in user agent' do\n      conn = create_connection do |env|\n        agent = env[:request_headers]['User-Agent']\n        expect(agent).not_to be_nil\n        expect(agent).to include 'gzip'\n        [200, {}, 'Hello world']\n      end\n      @client.execute(:uri => 'http://www.example.com/', :connection => conn)\n    end\n\n    it 'should send gzip in accept-encoding' do\n      conn = create_connection do |env|\n        encoding = env[:request_headers]['Accept-Encoding']\n        expect(encoding).not_to be_nil\n        expect(encoding).to include 'gzip'\n        [200, {}, 'Hello world']\n      end\n      @client.execute(:uri => 'http://www.example.com/', :connection => conn)\n    end\n    \n    it 'should not send gzip in accept-encoding if disabled for request' do\n      conn = create_connection do |env|\n        encoding = env[:request_headers]['Accept-Encoding']\n        expect(encoding).not_to include('gzip') unless encoding.nil?\n        [200, {}, 'Hello world']\n      end\n      response = @client.execute(:uri => 'http://www.example.com/', :gzip => false, :connection => conn)\n      puts response.status\n    end\n    \n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/media_spec.rb",
    "content": "# Copyright 2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'spec_helper'\n\nrequire 'google/api_client'\n\nfixtures_path = File.expand_path('../../../fixtures', __FILE__)\n\nRSpec.describe Google::APIClient::UploadIO do\n  it 'should reject invalid file paths' do\n    expect(lambda do\n      media = Google::APIClient::UploadIO.new('doesnotexist', 'text/plain')\n    end).to raise_error\n  end\n\n  describe 'with a file' do\n    before do\n      @file = File.expand_path('files/sample.txt', fixtures_path)\n      @media = Google::APIClient::UploadIO.new(@file, 'text/plain')\n    end\n\n    it 'should report the correct file length' do\n      expect(@media.length).to eq(File.size(@file))\n    end\n\n    it 'should have a mime type' do\n      expect(@media.content_type).to eq('text/plain')\n    end\n  end\n\n  describe 'with StringIO' do\n    before do\n      @content = \"hello world\"\n      @media = Google::APIClient::UploadIO.new(StringIO.new(@content), 'text/plain', 'test.txt')\n    end\n\n    it 'should report the correct file length' do\n      expect(@media.length).to eq(@content.length)\n    end\n\n    it 'should have a mime type' do\n      expect(@media.content_type).to eq('text/plain')\n    end\n  end\nend\n\nRSpec.describe Google::APIClient::RangedIO do\n  before do\n    @source = StringIO.new(\"1234567890abcdef\")\n    @io = Google::APIClient::RangedIO.new(@source, 1, 5)\n  end\n  \n  it 'should return the correct range when read entirely' do\n    expect(@io.read).to eq(\"23456\")\n  end\n  \n  it 'should maintain position' do\n    expect(@io.read(1)).to eq('2')\n    expect(@io.read(2)).to eq('34')\n    expect(@io.read(2)).to eq('56')\n  end\n  \n  it 'should allow rewinds' do\n    expect(@io.read(2)).to eq('23')\n    @io.rewind()\n    expect(@io.read(2)).to eq('23')\n  end\n  \n  it 'should allow setting position' do\n    @io.pos = 3\n    expect(@io.read).to eq('56')\n  end\n  \n  it 'should not allow position to be set beyond range' do\n    @io.pos = 10\n    expect(@io.read).to eq('')\n  end\n  \n  it 'should return empty string when read amount is zero' do\n    expect(@io.read(0)).to eq('')\n  end\n  \n  it 'should return empty string at EOF if amount is nil' do\n    @io.read\n    expect(@io.read).to eq('')\n  end\n  \n  it 'should return nil at EOF if amount is positive int' do\n    @io.read\n    expect(@io.read(1)).to eq(nil)\n  end\n    \nend\n\nRSpec.describe Google::APIClient::ResumableUpload do\n  CLIENT = Google::APIClient.new(:application_name => 'API Client Tests') unless defined?(CLIENT)\n\n  after do\n    # Reset client to not-quite-pristine state\n    CLIENT.key = nil\n    CLIENT.user_ip = nil\n  end\n\n  before do\n    @drive = CLIENT.discovered_api('drive', 'v2')\n    @file = File.expand_path('files/sample.txt', fixtures_path)\n    @media = Google::APIClient::UploadIO.new(@file, 'text/plain')\n    @uploader = Google::APIClient::ResumableUpload.new(\n      :media => @media,\n      :api_method => @drive.files.insert,\n      :uri => 'https://www.googleapis.com/upload/drive/v1/files/12345')\n  end\n\n  it 'should consider 20x status as complete' do\n    request = @uploader.to_http_request\n    @uploader.process_http_response(mock_result(200))\n    expect(@uploader.complete?).to eq(true)\n  end\n\n  it 'should consider 30x status as incomplete' do\n    request = @uploader.to_http_request\n    @uploader.process_http_response(mock_result(308))\n    expect(@uploader.complete?).to eq(false)\n    expect(@uploader.expired?).to eq(false)\n  end\n\n  it 'should consider 40x status as fatal' do\n    request = @uploader.to_http_request\n    @uploader.process_http_response(mock_result(404))\n    expect(@uploader.expired?).to eq(true)\n  end\n\n  it 'should detect changes to location' do\n    request = @uploader.to_http_request\n    @uploader.process_http_response(mock_result(308, 'location' => 'https://www.googleapis.com/upload/drive/v1/files/abcdef'))\n    expect(@uploader.uri.to_s).to eq('https://www.googleapis.com/upload/drive/v1/files/abcdef')\n  end\n\n  it 'should resume from the saved range reported by the server' do    \n    @uploader.chunk_size = 200\n    @uploader.to_http_request # Send bytes 0-199, only 0-99 saved\n    @uploader.process_http_response(mock_result(308, 'range' => '0-99'))\n    method, url, headers, body = @uploader.to_http_request # Send bytes 100-299\n    expect(headers['Content-Range']).to eq(\"bytes 100-299/#{@media.length}\")\n    expect(headers['Content-length']).to eq(\"200\")\n  end\n\n  it 'should resync the offset after 5xx errors' do\n    @uploader.chunk_size = 200\n    @uploader.to_http_request\n    @uploader.process_http_response(mock_result(500)) # Invalidates range\n    method, url, headers, body = @uploader.to_http_request # Resync\n    expect(headers['Content-Range']).to eq(\"bytes */#{@media.length}\")\n    expect(headers['Content-length']).to eq(\"0\")\n    @uploader.process_http_response(mock_result(308, 'range' => '0-99'))\n    method, url, headers, body = @uploader.to_http_request # Send next chunk at correct range\n    expect(headers['Content-Range']).to eq(\"bytes 100-299/#{@media.length}\")\n    expect(headers['Content-length']).to eq(\"200\")\n  end\n\n  def mock_result(status, headers = {})\n    reference = Google::APIClient::Reference.new(:api_method => @drive.files.insert)\n    double('result', :status => status, :headers => headers, :reference => reference)\n  end\n\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/request_spec.rb",
    "content": "# Copyright 2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'spec_helper'\n\nrequire 'google/api_client'\n\nRSpec.describe Google::APIClient::Request do\n  CLIENT = Google::APIClient.new(:application_name => 'API Client Tests') unless defined?(CLIENT)\n\n  it 'should normalize parameter names to strings' do\n    request = Google::APIClient::Request.new(:uri => 'https://www.google.com', :parameters => {\n      :a => '1', 'b' => '2'\n    })\n    expect(request.parameters['a']).to eq('1')\n    expect(request.parameters['b']).to eq('2')\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/result_spec.rb",
    "content": "# Copyright 2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'spec_helper'\n\nrequire 'google/api_client'\n\nRSpec.describe Google::APIClient::Result do\n  CLIENT = Google::APIClient.new(:application_name => 'API Client Tests') unless defined?(CLIENT)\n\n  describe 'with the plus API' do\n    before do\n      CLIENT.authorization = nil\n      @plus = CLIENT.discovered_api('plus', 'v1')\n      @reference = Google::APIClient::Reference.new({\n        :api_method => @plus.activities.list,\n        :parameters => {\n          'userId' => 'me',\n          'collection' => 'public',\n          'maxResults' => 20\n        }\n      })\n      @request = @reference.to_http_request\n\n      # Response double\n      @response = double(\"response\")\n      allow(@response).to receive(:status).and_return(200)\n      allow(@response).to receive(:headers).and_return({\n        'etag' => '12345',\n        'x-google-apiary-auth-scopes' =>\n          'https://www.googleapis.com/auth/plus.me',\n        'content-type' => 'application/json; charset=UTF-8',\n        'date' => 'Mon, 23 Apr 2012 00:00:00 GMT',\n        'cache-control' => 'private, max-age=0, must-revalidate, no-transform',\n        'server' => 'GSE',\n        'connection' => 'close'\n      })\n    end\n\n    describe 'with a next page token' do\n      before do\n        allow(@response).to receive(:body).and_return(\n          <<-END_OF_STRING\n          {\n            \"kind\": \"plus#activityFeed\",\n            \"etag\": \"FOO\",\n            \"nextPageToken\": \"NEXT+PAGE+TOKEN\",\n            \"selfLink\": \"https://www.googleapis.com/plus/v1/people/foo/activities/public?\",\n            \"nextLink\": \"https://www.googleapis.com/plus/v1/people/foo/activities/public?maxResults=20&pageToken=NEXT%2BPAGE%2BTOKEN\",\n            \"title\": \"Plus Public Activity Feed for \",\n            \"updated\": \"2012-04-23T00:00:00.000Z\",\n            \"id\": \"123456790\",\n            \"items\": []\n          }\n          END_OF_STRING\n        )\n        @result = Google::APIClient::Result.new(@reference, @response)\n      end\n\n      it 'should indicate a successful response' do\n        expect(@result.error?).to be_falsey\n      end\n\n      it 'should return the correct next page token' do\n        expect(@result.next_page_token).to eq('NEXT+PAGE+TOKEN')\n      end\n\n      it 'should escape the next page token when calling next_page' do\n        reference = @result.next_page\n        expect(Hash[reference.parameters]).to include('pageToken')\n        expect(Hash[reference.parameters]['pageToken']).to eq('NEXT+PAGE+TOKEN')\n        url = reference.to_env(CLIENT.connection)[:url]\n        expect(url.to_s).to include('pageToken=NEXT%2BPAGE%2BTOKEN')\n      end\n\n      it 'should return content type correctly' do\n        expect(@result.media_type).to eq('application/json')\n      end\n\n      it 'should return the result data correctly' do\n        expect(@result.data?).to be_truthy\n        expect(@result.data.class.to_s).to eq(\n            'Google::APIClient::Schema::Plus::V1::ActivityFeed'\n        )\n        expect(@result.data.kind).to eq('plus#activityFeed')\n        expect(@result.data.etag).to eq('FOO')\n        expect(@result.data.nextPageToken).to eq('NEXT+PAGE+TOKEN')\n        expect(@result.data.selfLink).to eq(\n            'https://www.googleapis.com/plus/v1/people/foo/activities/public?'\n        )\n        expect(@result.data.nextLink).to eq(\n            'https://www.googleapis.com/plus/v1/people/foo/activities/public?' +\n            'maxResults=20&pageToken=NEXT%2BPAGE%2BTOKEN'\n        )\n        expect(@result.data.title).to eq('Plus Public Activity Feed for ')\n        expect(@result.data.id).to eq(\"123456790\")\n        expect(@result.data.items).to be_empty\n      end\n    end\n\n    describe 'without a next page token' do\n      before do\n        allow(@response).to receive(:body).and_return(\n          <<-END_OF_STRING\n          {\n            \"kind\": \"plus#activityFeed\",\n            \"etag\": \"FOO\",\n            \"selfLink\": \"https://www.googleapis.com/plus/v1/people/foo/activities/public?\",\n            \"title\": \"Plus Public Activity Feed for \",\n            \"updated\": \"2012-04-23T00:00:00.000Z\",\n            \"id\": \"123456790\",\n            \"items\": []\n          }\n          END_OF_STRING\n        )\n        @result = Google::APIClient::Result.new(@reference, @response)\n      end\n\n      it 'should not return a next page token' do\n        expect(@result.next_page_token).to eq(nil)\n      end\n\n      it 'should return content type correctly' do\n        expect(@result.media_type).to eq('application/json')\n      end\n\n      it 'should return the result data correctly' do\n        expect(@result.data?).to be_truthy\n        expect(@result.data.class.to_s).to eq(\n            'Google::APIClient::Schema::Plus::V1::ActivityFeed'\n        )\n        expect(@result.data.kind).to eq('plus#activityFeed')\n        expect(@result.data.etag).to eq('FOO')\n        expect(@result.data.selfLink).to eq(\n            'https://www.googleapis.com/plus/v1/people/foo/activities/public?'\n        )\n        expect(@result.data.title).to eq('Plus Public Activity Feed for ')\n        expect(@result.data.id).to eq(\"123456790\")\n        expect(@result.data.items).to be_empty\n      end\n    end\n\n    describe 'with JSON error response' do\n      before do\n        allow(@response).to receive(:body).and_return(\n         <<-END_OF_STRING\n         {\n          \"error\": {\n           \"errors\": [\n            {\n             \"domain\": \"global\",\n             \"reason\": \"parseError\",\n             \"message\": \"Parse Error\"\n            }\n           ],\n           \"code\": 400,\n           \"message\": \"Parse Error\"\n          }\n         }\n         END_OF_STRING\n        )\n        allow(@response).to receive(:status).and_return(400)\n        @result = Google::APIClient::Result.new(@reference, @response)\n      end\n\n      it 'should return error status correctly' do\n        expect(@result.error?).to be_truthy\n      end\n\n      it 'should return the correct error message' do\n        expect(@result.error_message).to eq('Parse Error')\n      end\n    end\n\n    describe 'with 204 No Content response' do\n      before do\n        allow(@response).to receive(:body).and_return('')\n        allow(@response).to receive(:status).and_return(204)\n        allow(@response).to receive(:headers).and_return({})\n        @result = Google::APIClient::Result.new(@reference, @response)\n      end\n\n      it 'should indicate no data is available' do\n        expect(@result.data?).to be_falsey\n      end\n\n      it 'should return nil for data' do\n        expect(@result.data).to eq(nil)\n      end\n\n      it 'should return nil for media_type' do\n        expect(@result.media_type).to eq(nil)\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/service_account_spec.rb",
    "content": "# Copyright 2012 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'spec_helper'\n\nrequire 'google/api_client'\n\nfixtures_path = File.expand_path('../../../fixtures', __FILE__)\n\nRSpec.describe Google::APIClient::KeyUtils do\n  it 'should read PKCS12 files from the filesystem' do\n    if RUBY_PLATFORM == 'java' && RUBY_VERSION.start_with?('1.8')\n      pending \"Reading from PKCS12 not supported on jruby 1.8.x\"\n    end\n    path =  File.expand_path('files/privatekey.p12', fixtures_path)\n    key = Google::APIClient::KeyUtils.load_from_pkcs12(path, 'notasecret')\n    expect(key).not_to eq(nil)\n  end\n\n  it 'should read PKCS12 files from loaded files' do\n    if RUBY_PLATFORM == 'java' && RUBY_VERSION.start_with?('1.8')\n      pending \"Reading from PKCS12 not supported on jruby 1.8.x\"\n    end\n    path =  File.expand_path('files/privatekey.p12', fixtures_path)\n    content = File.read(path)\n    key = Google::APIClient::KeyUtils.load_from_pkcs12(content, 'notasecret')\n    expect(key).not_to eq(nil)\n  end\n\n  it 'should read PEM files from the filesystem' do\n    path =  File.expand_path('files/secret.pem', fixtures_path)\n    key = Google::APIClient::KeyUtils.load_from_pem(path, 'notasecret')\n    expect(key).not_to eq(nil)\n  end\n\n  it 'should read PEM files from loaded files' do\n    path =  File.expand_path('files/secret.pem', fixtures_path)\n    content = File.read(path)\n    key = Google::APIClient::KeyUtils.load_from_pem(content, 'notasecret')\n    expect(key).not_to eq(nil)\n  end\n\nend\n\nRSpec.describe Google::APIClient::JWTAsserter do\n  include ConnectionHelpers\n\n  before do\n    @key = OpenSSL::PKey::RSA.new 2048\n  end\n\n  it 'should generate valid JWTs' do\n    asserter = Google::APIClient::JWTAsserter.new('client1', 'scope1 scope2', @key)\n    jwt = asserter.to_authorization.to_jwt\n    expect(jwt).not_to eq(nil)\n\n    claim = JWT.decode(jwt, @key.public_key, true)\n    claim = claim[0] if claim[0]\n    expect(claim[\"iss\"]).to eq('client1')\n    expect(claim[\"scope\"]).to eq('scope1 scope2')\n  end\n\n  it 'should allow impersonation' do\n    conn = stub_connection do |stub|\n      stub.post('/o/oauth2/token') do |env|\n        params = Addressable::URI.form_unencode(env[:body])\n        JWT.decode(params.assoc(\"assertion\").last, @key.public_key)\n        expect(params.assoc(\"grant_type\")).to eq(['grant_type','urn:ietf:params:oauth:grant-type:jwt-bearer'])\n        [200, {'content-type' => 'application/json'}, '{\n          \"access_token\" : \"1/abcdef1234567890\",\n          \"token_type\" : \"Bearer\",\n          \"expires_in\" : 3600\n        }']\n      end\n    end\n    asserter = Google::APIClient::JWTAsserter.new('client1', 'scope1 scope2', @key)\n    auth = asserter.authorize('user1@email.com', { :connection => conn })\n    expect(auth).not_to eq(nil?)\n    expect(auth.person).to eq('user1@email.com')\n    conn.verify\n  end\n\n  it 'should send valid access token request' do\n    conn = stub_connection do |stub|\n      stub.post('/o/oauth2/token') do |env|\n        params = Addressable::URI.form_unencode(env[:body])\n        JWT.decode(params.assoc(\"assertion\").last, @key.public_key)\n        expect(params.assoc(\"grant_type\")).to eq(['grant_type','urn:ietf:params:oauth:grant-type:jwt-bearer'])\n        [200, {'content-type' => 'application/json'}, '{\n          \"access_token\" : \"1/abcdef1234567890\",\n          \"token_type\" : \"Bearer\",\n          \"expires_in\" : 3600\n        }']\n      end\n    end\n    asserter = Google::APIClient::JWTAsserter.new('client1', 'scope1 scope2', @key)\n    auth = asserter.authorize(nil, { :connection => conn })\n    expect(auth).not_to eq(nil?)\n    expect(auth.access_token).to eq(\"1/abcdef1234567890\")\n    conn.verify\n  end\n\n  it 'should be refreshable' do\n    conn = stub_connection do |stub|\n      stub.post('/o/oauth2/token') do |env|\n        params = Addressable::URI.form_unencode(env[:body])\n        JWT.decode(params.assoc(\"assertion\").last, @key.public_key)\n        expect(params.assoc(\"grant_type\")).to eq(['grant_type','urn:ietf:params:oauth:grant-type:jwt-bearer'])\n        [200, {'content-type' => 'application/json'}, '{\n          \"access_token\" : \"1/abcdef1234567890\",\n          \"token_type\" : \"Bearer\",\n          \"expires_in\" : 3600\n        }']\n      end\n      stub.post('/o/oauth2/token') do |env|\n        params = Addressable::URI.form_unencode(env[:body])\n        JWT.decode(params.assoc(\"assertion\").last, @key.public_key)\n        expect(params.assoc(\"grant_type\")).to eq(['grant_type','urn:ietf:params:oauth:grant-type:jwt-bearer'])\n        [200, {'content-type' => 'application/json'}, '{\n          \"access_token\" : \"1/0987654321fedcba\",\n          \"token_type\" : \"Bearer\",\n          \"expires_in\" : 3600\n        }']\n      end\n    end\n    asserter = Google::APIClient::JWTAsserter.new('client1', 'scope1 scope2', @key)\n    auth = asserter.authorize(nil, { :connection => conn })\n    expect(auth).not_to eq(nil?)\n    expect(auth.access_token).to eq(\"1/abcdef1234567890\")\n\n    auth.fetch_access_token!(:connection => conn)\n    expect(auth.access_token).to eq(\"1/0987654321fedcba\")\n\n    conn.verify\n  end\nend\n\nRSpec.describe Google::APIClient::ComputeServiceAccount do\n  include ConnectionHelpers\n\n  it 'should query metadata server' do\n    conn = stub_connection do |stub|\n      stub.get('/computeMetadata/v1beta1/instance/service-accounts/default/token') do |env|\n        expect(env.url.host).to eq('metadata')\n        [200, {'content-type' => 'application/json'}, '{\n          \"access_token\" : \"1/abcdef1234567890\",\n          \"token_type\" : \"Bearer\",\n          \"expires_in\" : 3600\n        }']\n      end\n    end\n    service_account = Google::APIClient::ComputeServiceAccount.new\n    auth = service_account.fetch_access_token!({ :connection => conn })\n    expect(auth).not_to eq(nil?)\n    expect(auth[\"access_token\"]).to eq(\"1/abcdef1234567890\")\n    conn.verify\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/service_spec.rb",
    "content": "# encoding:utf-8\n\n# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'spec_helper'\n\nrequire 'google/api_client'\nrequire 'google/api_client/service'\n\nfixtures_path = File.expand_path('../../../fixtures', __FILE__)\n\nRSpec.describe Google::APIClient::Service do\n  include ConnectionHelpers\n\n  APPLICATION_NAME = 'API Client Tests'\n\n  it 'should error out when called without an API name or version' do\n    expect(lambda do\n      Google::APIClient::Service.new\n    end).to raise_error(ArgumentError)\n  end\n\n  it 'should error out when called without an API version' do\n    expect(lambda do\n      Google::APIClient::Service.new('foo')\n    end).to raise_error(ArgumentError)\n  end\n\n  it 'should error out when the options hash is not a hash' do\n    expect(lambda do\n      Google::APIClient::Service.new('foo', 'v1', 42)\n    end).to raise_error(ArgumentError)\n  end\n\n  describe 'with the AdSense Management API' do\n\n    it 'should make a valid call for a method with no parameters' do\n      conn = stub_connection do |stub|\n        stub.get('/adsense/v1.3/adclients') do |env|\n          [200, {}, '{}']\n        end\n      end\n      adsense = Google::APIClient::Service.new(\n        'adsense',\n        'v1.3',\n        {\n          :application_name => APPLICATION_NAME,\n          :authenticated => false,\n          :connection => conn,\n          :cache_store => nil\n        }\n      )\n\n      req = adsense.adclients.list.execute()\n      conn.verify\n    end\n\n    it 'should make a valid call for a method with parameters' do\n      conn = stub_connection do |stub|\n        stub.get('/adsense/v1.3/adclients/1/adunits') do |env|\n          [200, {}, '{}']\n        end\n      end\n      adsense = Google::APIClient::Service.new(\n        'adsense',\n        'v1.3',\n        {\n          :application_name => APPLICATION_NAME,\n          :authenticated => false,\n          :connection => conn,\n          :cache_store => nil\n        }\n      )\n      req = adsense.adunits.list(:adClientId => '1').execute()\n    end\n\n    it 'should make a valid call for a deep method' do\n      conn = stub_connection do |stub|\n        stub.get('/adsense/v1.3/accounts/1/adclients') do |env|\n          [200, {}, '{}']\n        end\n      end\n      adsense = Google::APIClient::Service.new(\n        'adsense',\n        'v1.3',\n        {\n          :application_name => APPLICATION_NAME,\n          :authenticated => false,\n          :connection => conn,\n          :cache_store => nil\n        }\n      )\n      req = adsense.accounts.adclients.list(:accountId => '1').execute()\n    end\n\n    describe 'with no connection' do\n      before do\n        @adsense = Google::APIClient::Service.new('adsense', 'v1.3',\n          {:application_name => APPLICATION_NAME, :cache_store => nil})\n      end\n\n      it 'should return a resource when using a valid resource name' do\n        expect(@adsense.accounts).to be_a(Google::APIClient::Service::Resource)\n      end\n\n      it 'should throw an error when using an invalid resource name' do\n        expect(lambda do\n           @adsense.invalid_resource\n        end).to raise_error\n      end\n\n      it 'should return a request when using a valid method name' do\n        req = @adsense.adclients.list\n        expect(req).to be_a(Google::APIClient::Service::Request)\n        expect(req.method.id).to eq('adsense.adclients.list')\n        expect(req.parameters).to be_nil\n      end\n\n      it 'should throw an error when using an invalid method name' do\n        expect(lambda do\n           @adsense.adclients.invalid_method\n        end).to raise_error\n      end\n\n      it 'should return a valid request with parameters' do\n        req = @adsense.adunits.list(:adClientId => '1')\n        expect(req).to be_a(Google::APIClient::Service::Request)\n        expect(req.method.id).to eq('adsense.adunits.list')\n        expect(req.parameters).not_to be_nil\n        expect(req.parameters[:adClientId]).to eq('1')\n      end\n    end\n  end\n\n  describe 'with the Prediction API' do\n\n    it 'should make a valid call with an object body' do\n      conn = stub_connection do |stub|\n        stub.post('/prediction/v1.5/trainedmodels?project=1') do |env|\n          expect(env.body).to eq('{\"id\":\"1\"}')\n          [200, {}, '{}']\n        end\n      end\n      prediction = Google::APIClient::Service.new(\n        'prediction',\n        'v1.5',\n        {\n          :application_name => APPLICATION_NAME,\n          :authenticated => false,\n          :connection => conn,\n          :cache_store => nil\n        }\n      )\n      req = prediction.trainedmodels.insert(:project => '1').body({'id' => '1'}).execute()\n      conn.verify\n    end\n\n    it 'should make a valid call with a text body' do\n      conn = stub_connection do |stub|\n        stub.post('/prediction/v1.5/trainedmodels?project=1') do |env|\n          expect(env.body).to eq('{\"id\":\"1\"}')\n          [200, {}, '{}']\n        end\n      end\n      prediction = Google::APIClient::Service.new(\n        'prediction',\n        'v1.5',\n        {\n          :application_name => APPLICATION_NAME,\n          :authenticated => false,\n          :connection => conn,\n          :cache_store => nil\n        }\n      )\n      req = prediction.trainedmodels.insert(:project => '1').body('{\"id\":\"1\"}').execute()\n      conn.verify\n    end\n\n    describe 'with no connection' do\n      before do\n        @prediction = Google::APIClient::Service.new('prediction', 'v1.5',\n          {:application_name => APPLICATION_NAME, :cache_store => nil})\n      end\n\n      it 'should return a valid request with a body' do\n        req = @prediction.trainedmodels.insert(:project => '1').body({'id' => '1'})\n        expect(req).to be_a(Google::APIClient::Service::Request)\n        expect(req.method.id).to eq('prediction.trainedmodels.insert')\n        expect(req.body).to eq({'id' => '1'})\n        expect(req.parameters).not_to be_nil\n        expect(req.parameters[:project]).to eq('1')\n      end\n\n      it 'should return a valid request with a body when using resource name' do\n        req = @prediction.trainedmodels.insert(:project => '1').training({'id' => '1'})\n        expect(req).to be_a(Google::APIClient::Service::Request)\n        expect(req.method.id).to eq('prediction.trainedmodels.insert')\n        expect(req.training).to eq({'id' => '1'})\n        expect(req.parameters).not_to be_nil\n        expect(req.parameters[:project]).to eq('1')\n      end\n    end\n  end\n\n  describe 'with the Drive API' do\n\n    before do\n      @metadata = {\n        'title' => 'My movie',\n        'description' => 'The best home movie ever made'\n      }\n      @file = File.expand_path('files/sample.txt', fixtures_path)\n      @media = Google::APIClient::UploadIO.new(@file, 'text/plain')\n    end\n\n    it 'should make a valid call with an object body and media upload' do\n      conn = stub_connection do |stub|\n        stub.post('/upload/drive/v2/files?uploadType=multipart') do |env|\n          expect(env.body).to be_a Faraday::CompositeReadIO\n          [200, {}, '{}']\n        end\n      end\n      drive = Google::APIClient::Service.new(\n        'drive',\n        'v2',\n        {\n          :application_name => APPLICATION_NAME,\n          :authenticated => false,\n          :connection => conn,\n          :cache_store => nil\n        }\n      )\n      req = drive.files.insert(:uploadType => 'multipart').body(@metadata).media(@media).execute()\n      conn.verify\n    end\n\n    describe 'with no connection' do\n      before do\n        @drive = Google::APIClient::Service.new('drive', 'v2',\n          {:application_name => APPLICATION_NAME, :cache_store => nil})\n      end\n\n      it 'should return a valid request with a body and media upload' do\n        req = @drive.files.insert(:uploadType => 'multipart').body(@metadata).media(@media)\n        expect(req).to be_a(Google::APIClient::Service::Request)\n        expect(req.method.id).to eq('drive.files.insert')\n        expect(req.body).to eq(@metadata)\n        expect(req.media).to eq(@media)\n        expect(req.parameters).not_to be_nil\n        expect(req.parameters[:uploadType]).to eq('multipart')\n      end\n\n      it 'should return a valid request with a body and media upload when using resource name' do\n        req = @drive.files.insert(:uploadType => 'multipart').file(@metadata).media(@media)\n        expect(req).to be_a(Google::APIClient::Service::Request)\n        expect(req.method.id).to eq('drive.files.insert')\n        expect(req.file).to eq(@metadata)\n        expect(req.media).to eq(@media)\n        expect(req.parameters).not_to be_nil\n        expect(req.parameters[:uploadType]).to eq('multipart')\n      end\n    end\n  end\n\n  describe 'with the Discovery API' do\n    it 'should make a valid end-to-end request' do\n      discovery = Google::APIClient::Service.new('discovery', 'v1',\n          {:application_name => APPLICATION_NAME, :authenticated => false,\n           :cache_store => nil})\n      result = discovery.apis.get_rest(:api => 'discovery', :version => 'v1').execute\n      expect(result).not_to be_nil\n      expect(result.data.name).to eq('discovery')\n      expect(result.data.version).to eq('v1')\n    end\n  end\nend\n\n\nRSpec.describe Google::APIClient::Service::Result do\n\n  describe 'with the plus API' do\n    before do\n      @plus = Google::APIClient::Service.new('plus', 'v1',\n          {:application_name => APPLICATION_NAME, :cache_store => nil})\n      @reference = Google::APIClient::Reference.new({\n        :api_method => @plus.activities.list.method,\n        :parameters => {\n          'userId' => 'me',\n          'collection' => 'public',\n          'maxResults' => 20\n        }\n      })\n      @request = @plus.activities.list(:userId => 'me', :collection => 'public',\n        :maxResults => 20)\n\n      # Response double\n      @response = double(\"response\")\n      allow(@response).to receive(:status).and_return(200)\n      allow(@response).to receive(:headers).and_return({\n        'etag' => '12345',\n        'x-google-apiary-auth-scopes' =>\n          'https://www.googleapis.com/auth/plus.me',\n        'content-type' => 'application/json; charset=UTF-8',\n        'date' => 'Mon, 23 Apr 2012 00:00:00 GMT',\n        'cache-control' => 'private, max-age=0, must-revalidate, no-transform',\n        'server' => 'GSE',\n        'connection' => 'close'\n      })\n    end\n\n    describe 'with a next page token' do\n      before do\n        @body = <<-END_OF_STRING\n          {\n            \"kind\": \"plus#activityFeed\",\n            \"etag\": \"FOO\",\n            \"nextPageToken\": \"NEXT+PAGE+TOKEN\",\n            \"selfLink\": \"https://www.googleapis.com/plus/v1/people/foo/activities/public?\",\n            \"nextLink\": \"https://www.googleapis.com/plus/v1/people/foo/activities/public?maxResults=20&pageToken=NEXT%2BPAGE%2BTOKEN\",\n            \"title\": \"Plus Public Activity Feed for \",\n            \"updated\": \"2012-04-23T00:00:00.000Z\",\n            \"id\": \"123456790\",\n            \"items\": []\n          }\n          END_OF_STRING\n        allow(@response).to receive(:body).and_return(@body)\n        base_result = Google::APIClient::Result.new(@reference, @response)\n        @result = Google::APIClient::Service::Result.new(@request, base_result)\n      end\n\n      it 'should indicate a successful response' do\n        expect(@result.error?).to be_falsey\n      end\n\n      it 'should return the correct next page token' do\n        expect(@result.next_page_token).to eq('NEXT+PAGE+TOKEN')\n      end\n\n      it 'generate a correct request when calling next_page' do\n        next_page_request = @result.next_page\n        expect(next_page_request.parameters).to include('pageToken')\n        expect(next_page_request.parameters['pageToken']).to eq('NEXT+PAGE+TOKEN')\n        @request.parameters.each_pair do |param, value|\n          expect(next_page_request.parameters[param]).to eq(value)\n        end\n      end\n\n      it 'should return content type correctly' do\n        expect(@result.media_type).to eq('application/json')\n      end\n\n      it 'should return the body correctly' do\n        expect(@result.body).to eq(@body)\n      end\n\n      it 'should return the result data correctly' do\n        expect(@result.data?).to be_truthy\n        expect(@result.data.class.to_s).to eq(\n            'Google::APIClient::Schema::Plus::V1::ActivityFeed'\n        )\n        expect(@result.data.kind).to eq('plus#activityFeed')\n        expect(@result.data.etag).to eq('FOO')\n        expect(@result.data.nextPageToken).to eq('NEXT+PAGE+TOKEN')\n        expect(@result.data.selfLink).to eq(\n            'https://www.googleapis.com/plus/v1/people/foo/activities/public?'\n        )\n        expect(@result.data.nextLink).to eq(\n            'https://www.googleapis.com/plus/v1/people/foo/activities/public?' +\n            'maxResults=20&pageToken=NEXT%2BPAGE%2BTOKEN'\n        )\n        expect(@result.data.title).to eq('Plus Public Activity Feed for ')\n        expect(@result.data.id).to eq(\"123456790\")\n        expect(@result.data.items).to be_empty\n      end\n    end\n\n    describe 'without a next page token' do\n      before do\n        @body = <<-END_OF_STRING\n          {\n            \"kind\": \"plus#activityFeed\",\n            \"etag\": \"FOO\",\n            \"selfLink\": \"https://www.googleapis.com/plus/v1/people/foo/activities/public?\",\n            \"title\": \"Plus Public Activity Feed for \",\n            \"updated\": \"2012-04-23T00:00:00.000Z\",\n            \"id\": \"123456790\",\n            \"items\": []\n          }\n          END_OF_STRING\n        allow(@response).to receive(:body).and_return(@body)\n        base_result = Google::APIClient::Result.new(@reference, @response)\n        @result = Google::APIClient::Service::Result.new(@request, base_result)\n      end\n\n      it 'should not return a next page token' do\n        expect(@result.next_page_token).to eq(nil)\n      end\n\n      it 'should return content type correctly' do\n        expect(@result.media_type).to eq('application/json')\n      end\n\n      it 'should return the body correctly' do\n        expect(@result.body).to eq(@body)\n      end\n\n      it 'should return the result data correctly' do\n        expect(@result.data?).to be_truthy\n        expect(@result.data.class.to_s).to eq(\n            'Google::APIClient::Schema::Plus::V1::ActivityFeed'\n        )\n        expect(@result.data.kind).to eq('plus#activityFeed')\n        expect(@result.data.etag).to eq('FOO')\n        expect(@result.data.selfLink).to eq(\n            'https://www.googleapis.com/plus/v1/people/foo/activities/public?'\n        )\n        expect(@result.data.title).to eq('Plus Public Activity Feed for ')\n        expect(@result.data.id).to eq(\"123456790\")\n        expect(@result.data.items).to be_empty\n      end\n    end\n\n    describe 'with JSON error response' do\n      before do\n        @body = <<-END_OF_STRING\n         {\n          \"error\": {\n           \"errors\": [\n            {\n             \"domain\": \"global\",\n             \"reason\": \"parseError\",\n             \"message\": \"Parse Error\"\n            }\n           ],\n           \"code\": 400,\n           \"message\": \"Parse Error\"\n          }\n         }\n         END_OF_STRING\n        allow(@response).to receive(:body).and_return(@body)\n        allow(@response).to receive(:status).and_return(400)\n        base_result = Google::APIClient::Result.new(@reference, @response)\n        @result = Google::APIClient::Service::Result.new(@request, base_result)\n      end\n\n      it 'should return error status correctly' do\n        expect(@result.error?).to be_truthy\n      end\n\n      it 'should return the correct error message' do\n        expect(@result.error_message).to eq('Parse Error')\n      end\n\n      it 'should return the body correctly' do\n        expect(@result.body).to eq(@body)\n      end\n    end\n\n    describe 'with 204 No Content response' do\n      before do\n        allow(@response).to receive(:body).and_return('')\n        allow(@response).to receive(:status).and_return(204)\n        allow(@response).to receive(:headers).and_return({})\n        base_result = Google::APIClient::Result.new(@reference, @response)\n        @result = Google::APIClient::Service::Result.new(@request, base_result)\n      end\n\n      it 'should indicate no data is available' do\n        expect(@result.data?).to be_falsey\n      end\n\n      it 'should return nil for data' do\n        expect(@result.data).to eq(nil)\n      end\n\n      it 'should return nil for media_type' do\n        expect(@result.media_type).to eq(nil)\n      end\n    end\n  end\nend\n\nRSpec.describe Google::APIClient::Service::BatchRequest do\n  \n  include ConnectionHelpers\n  \n  context 'with a service connection' do\n    before do\n      @conn = stub_connection do |stub|\n        stub.post('/batch') do |env|\n          [500, {'Content-Type' => 'application/json'}, '{}']\n        end\n      end\n      @discovery = Google::APIClient::Service.new('discovery', 'v1',\n          {:application_name => APPLICATION_NAME, :authorization => nil,\n           :cache_store => nil, :connection => @conn})\n      @calls = [\n        @discovery.apis.get_rest(:api => 'plus', :version => 'v1'),\n        @discovery.apis.get_rest(:api => 'discovery', :version => 'v1')\n      ]\n    end\n\n    it 'should use the service connection' do\n      batch = @discovery.batch(@calls) do\n      end\n      batch.execute\n      @conn.verify\n    end  \n  end\n  \n  describe 'with the discovery API' do\n    before do\n      @discovery = Google::APIClient::Service.new('discovery', 'v1',\n          {:application_name => APPLICATION_NAME, :authorization => nil,\n           :cache_store => nil})\n    end\n\n    describe 'with two valid requests' do\n      before do\n        @calls = [\n          @discovery.apis.get_rest(:api => 'plus', :version => 'v1'),\n          @discovery.apis.get_rest(:api => 'discovery', :version => 'v1')\n        ]\n      end\n\n      it 'should execute both when using a global callback' do\n        block_called = 0\n        batch = @discovery.batch(@calls) do |result|\n          block_called += 1\n          expect(result.status).to eq(200)\n        end\n\n        batch.execute\n        expect(block_called).to eq(2)\n      end\n\n      it 'should execute both when using individual callbacks' do\n        call1_returned, call2_returned = false, false\n        batch = @discovery.batch\n\n        batch.add(@calls[0]) do |result|\n          call1_returned = true\n          expect(result.status).to eq(200)\n          expect(result.call_index).to eq(0)\n        end\n\n        batch.add(@calls[1]) do |result|\n          call2_returned = true\n          expect(result.status).to eq(200)\n          expect(result.call_index).to eq(1)\n        end\n\n        batch.execute\n        expect(call1_returned).to eq(true)\n        expect(call2_returned).to eq(true)\n      end\n    end\n\n    describe 'with a valid request and an invalid one' do\n      before do\n        @calls = [\n          @discovery.apis.get_rest(:api => 'plus', :version => 'v1'),\n          @discovery.apis.get_rest(:api => 'invalid', :version => 'invalid')\n        ]\n      end\n\n      it 'should execute both when using a global callback' do\n        block_called = 0\n        batch = @discovery.batch(@calls) do |result|\n          block_called += 1\n          if result.call_index == 0\n            expect(result.status).to eq(200)\n          else\n            expect(result.status).to be >= 400\n            expect(result.status).to be < 500\n          end\n        end\n\n        batch.execute\n        expect(block_called).to eq(2)\n      end\n\n      it 'should execute both when using individual callbacks' do\n        call1_returned, call2_returned = false, false\n        batch = @discovery.batch\n\n        batch.add(@calls[0]) do |result|\n          call1_returned = true\n          expect(result.status).to eq(200)\n          expect(result.call_index).to eq(0)\n        end\n\n        batch.add(@calls[1]) do |result|\n          call2_returned = true\n          expect(result.status).to be >= 400\n          expect(result.status).to be < 500\n          expect(result.call_index).to eq(1)\n        end\n\n        batch.execute\n        expect(call1_returned).to eq(true)\n        expect(call2_returned).to eq(true)\n      end      \n    end\n  end\nend"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client/simple_file_store_spec.rb",
    "content": "# encoding:utf-8\n\n# Copyright 2013 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'spec_helper'\n\nrequire 'google/api_client/service/simple_file_store'\n\nRSpec.describe Google::APIClient::Service::SimpleFileStore do\n\n  FILE_NAME = 'test.cache'\n\n  describe 'with no cache file' do\n    before(:each) do\n      File.delete(FILE_NAME) if File.exists?(FILE_NAME)\n      @cache = Google::APIClient::Service::SimpleFileStore.new(FILE_NAME)\n    end\n\n    it 'should return nil when asked if a key exists' do\n      expect(@cache.exist?('invalid')).to be_nil\n      expect(File.exists?(FILE_NAME)).to be_falsey\n    end\n\n    it 'should return nil when asked to read a key' do\n      expect(@cache.read('invalid')).to be_nil\n      expect(File.exists?(FILE_NAME)).to be_falsey\n    end\n\n    it 'should return nil when asked to fetch a key' do\n      expect(@cache.fetch('invalid')).to be_nil\n      expect(File.exists?(FILE_NAME)).to be_falsey\n    end\n\n    it 'should create a cache file when asked to fetch a key with a default' do\n      expect(@cache.fetch('new_key') do\n        'value'\n      end).to eq('value')\n      expect(File.exists?(FILE_NAME)).to be_truthy\n    end\n\n    it 'should create a cache file when asked to write a key' do\n      @cache.write('new_key', 'value')\n      expect(File.exists?(FILE_NAME)).to be_truthy\n    end\n\n    it 'should return nil when asked to delete a key' do\n      expect(@cache.delete('invalid')).to be_nil\n      expect(File.exists?(FILE_NAME)).to be_falsey\n    end\n  end\n\n  describe 'with an existing cache' do\n    before(:each) do\n      File.delete(FILE_NAME) if File.exists?(FILE_NAME)\n      @cache = Google::APIClient::Service::SimpleFileStore.new(FILE_NAME)\n      @cache.write('existing_key', 'existing_value')\n    end\n\n    it 'should return true when asked if an existing key exists' do\n      expect(@cache.exist?('existing_key')).to be_truthy\n    end\n\n    it 'should return false when asked if a nonexistent key exists' do\n      expect(@cache.exist?('invalid')).to be_falsey\n    end\n\n    it 'should return the value for an existing key when asked to read it' do\n      expect(@cache.read('existing_key')).to eq('existing_value')\n    end\n\n    it 'should return nil for a nonexistent key when asked to read it' do\n      expect(@cache.read('invalid')).to be_nil\n    end\n\n    it 'should return the value for an existing key when asked to read it' do\n      expect(@cache.read('existing_key')).to eq('existing_value')\n    end\n\n    it 'should return nil for a nonexistent key when asked to fetch it' do\n      expect(@cache.fetch('invalid')).to be_nil\n    end\n\n    it 'should return and save the default value for a nonexistent key when asked to fetch it with a default' do\n      expect(@cache.fetch('new_key') do\n        'value'\n      end).to eq('value')\n      expect(@cache.read('new_key')).to eq('value')\n    end\n\n    it 'should remove an existing value and return true when asked to delete it' do\n      expect(@cache.delete('existing_key')).to be_truthy\n      expect(@cache.read('existing_key')).to be_nil\n    end\n\n    it 'should return false when asked to delete a nonexistent key' do\n      expect(@cache.delete('invalid')).to be_falsey\n    end\n\n    it 'should convert keys to strings when storing them' do\n      @cache.write(:symbol_key, 'value')\n      expect(@cache.read('symbol_key')).to eq('value')\n    end\n\n    it 'should convert keys to strings when reading them' do\n      expect(@cache.read(:existing_key)).to eq('existing_value')\n    end\n\n    it 'should convert keys to strings when fetching them' do\n      expect(@cache.fetch(:existing_key)).to eq('existing_value')\n    end\n\n    it 'should convert keys to strings when deleting them' do\n      expect(@cache.delete(:existing_key)).to be_truthy\n      expect(@cache.read('existing_key')).to be_nil\n    end\n  end\n\n  after(:all) do\n    File.delete(FILE_NAME) if File.exists?(FILE_NAME)\n  end\nend"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/google/api_client_spec.rb",
    "content": "# Copyright 2010 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#      http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nrequire 'spec_helper'\n\nrequire 'faraday'\nrequire 'signet/oauth_1/client'\nrequire 'google/api_client'\n\nshared_examples_for 'configurable user agent' do\n  include ConnectionHelpers\n\n  it 'should allow the user agent to be modified' do\n    client.user_agent = 'Custom User Agent/1.2.3'\n    expect(client.user_agent).to eq('Custom User Agent/1.2.3')\n  end\n\n  it 'should allow the user agent to be set to nil' do\n    client.user_agent = nil\n    expect(client.user_agent).to eq(nil)\n  end\n\n  it 'should not allow the user agent to be used with bogus values' do\n    expect(lambda do\n      client.user_agent = 42\n      client.execute(:uri=>'https://www.google.com/')\n    end).to raise_error(TypeError)\n  end\n\n  it 'should transmit a User-Agent header when sending requests' do\n    client.user_agent = 'Custom User Agent/1.2.3'\n\n    conn = stub_connection do |stub|\n      stub.get('/') do |env|\n        headers = env[:request_headers]\n        expect(headers).to have_key('User-Agent')\n        expect(headers['User-Agent']).to eq(client.user_agent)\n        [200, {}, ['']]\n      end\n    end\n    client.execute(:uri=>'https://www.google.com/', :connection => conn)\n    conn.verify\n  end\nend\n\nRSpec.describe Google::APIClient do\n  include ConnectionHelpers\n\n  let(:client) { Google::APIClient.new(:application_name => 'API Client Tests') }\n\n  it \"should pass the faraday options provided on initialization to FaraDay configuration block\" do\n    client = Google::APIClient.new(faraday_option: {timeout: 999})\n    expect(client.connection.options.timeout).to be == 999\n  end\n\n  it 'should make its version number available' do\n    expect(Google::APIClient::VERSION::STRING).to be_instance_of(String)\n  end\n\n  it 'should default to OAuth 2' do\n    expect(Signet::OAuth2::Client).to be === client.authorization\n  end\n\n  describe 'configure for no authentication' do\n    before do\n      client.authorization = nil\n    end\n    it_should_behave_like 'configurable user agent'\n  end\n\n  describe 'configured for OAuth 1' do\n    before do\n      client.authorization = :oauth_1\n      client.authorization.token_credential_key = 'abc'\n      client.authorization.token_credential_secret = '123'\n    end\n\n    it 'should use the default OAuth1 client configuration' do\n      expect(client.authorization.temporary_credential_uri.to_s).to eq(\n        'https://www.google.com/accounts/OAuthGetRequestToken'\n      )\n      expect(client.authorization.authorization_uri.to_s).to include(\n        'https://www.google.com/accounts/OAuthAuthorizeToken'\n      )\n      expect(client.authorization.token_credential_uri.to_s).to eq(\n        'https://www.google.com/accounts/OAuthGetAccessToken'\n      )\n      expect(client.authorization.client_credential_key).to eq('anonymous')\n      expect(client.authorization.client_credential_secret).to eq('anonymous')\n    end\n\n    it_should_behave_like 'configurable user agent'\n  end\n\n  describe 'configured for OAuth 2' do\n    before do\n      client.authorization = :oauth_2\n      client.authorization.access_token = '12345'\n    end\n\n    # TODO\n    it_should_behave_like 'configurable user agent'\n  end\n\n  describe 'when executing requests' do\n    before do\n      @prediction = client.discovered_api('prediction', 'v1.2')\n      client.authorization = :oauth_2\n      @connection = stub_connection do |stub|\n        stub.post('/prediction/v1.2/training?data=12345') do |env|\n          expect(env[:request_headers]['Authorization']).to eq('Bearer 12345')\n          [200, {}, '{}']\n        end\n      end\n    end\n\n    after do\n      @connection.verify\n    end\n\n    it 'should use default authorization' do\n      client.authorization.access_token = \"12345\"\n      client.execute(\n        :api_method => @prediction.training.insert,\n        :parameters => {'data' => '12345'},\n        :connection => @connection\n      )\n    end\n\n    it 'should use request scoped authorization when provided' do\n      client.authorization.access_token = \"abcdef\"\n      new_auth = Signet::OAuth2::Client.new(:access_token => '12345')\n      client.execute(\n        :api_method => @prediction.training.insert,\n        :parameters => {'data' => '12345'},\n        :authorization => new_auth,\n        :connection => @connection\n      )\n    end\n\n    it 'should accept options with batch/request style execute' do\n      client.authorization.access_token = \"abcdef\"\n      new_auth = Signet::OAuth2::Client.new(:access_token => '12345')\n      request = client.generate_request(\n        :api_method => @prediction.training.insert,\n        :parameters => {'data' => '12345'}\n      )\n      client.execute(\n        request,\n        :authorization => new_auth,\n        :connection => @connection\n      )\n    end\n\n\n    it 'should accept options in array style execute' do\n       client.authorization.access_token = \"abcdef\"\n       new_auth = Signet::OAuth2::Client.new(:access_token => '12345')\n       client.execute(\n         @prediction.training.insert, {'data' => '12345'}, '', {},\n         { :authorization => new_auth, :connection => @connection }\n       )\n     end\n  end\n\n  describe 'when retries enabled' do\n    before do\n      client.retries = 2\n    end\n\n    after do\n      @connection.verify\n    end\n\n    it 'should follow redirects' do\n      client.authorization = nil\n      @connection = stub_connection do |stub|\n        stub.get('/foo') do |env|\n          [302, {'location' => 'https://www.google.com/bar'}, '{}']\n        end\n        stub.get('/bar') do |env|\n          [200, {}, '{}']\n        end\n      end\n\n      client.execute(  \n        :uri => 'https://www.google.com/foo',\n        :connection => @connection\n      )\n    end\n\n    it 'should refresh tokens on 401 errors' do\n      client.authorization.access_token = '12345'\n      expect(client.authorization).to receive(:fetch_access_token!)\n\n      @connection = stub_connection do |stub|\n        stub.get('/foo') do |env|\n          [401, {}, '{}']\n        end\n        stub.get('/foo') do |env|\n          [200, {}, '{}']\n        end\n      end\n\n      client.execute(  \n        :uri => 'https://www.google.com/foo',\n        :connection => @connection\n      )\n    end\n\n\n    it 'should not attempt multiple token refreshes' do\n      client.authorization.access_token = '12345'\n      expect(client.authorization).to receive(:fetch_access_token!).once\n\n      @connection = stub_connection do |stub|\n        stub.get('/foo') do |env|\n          [401, {}, '{}']\n        end\n      end\n\n      client.execute(  \n        :uri => 'https://www.google.com/foo',\n        :connection => @connection\n      )\n    end\n\n    it 'should not retry on client errors' do\n      count = 0\n      @connection = stub_connection do |stub|\n        stub.get('/foo') do |env|\n          expect(count).to eq(0)\n          count += 1\n          [403, {}, '{}']\n        end\n      end\n\n      client.execute(  \n        :uri => 'https://www.google.com/foo',\n        :connection => @connection,\n        :authenticated => false\n      )\n    end\n\n    it 'should retry on 500 errors' do\n      client.authorization = nil\n\n      @connection = stub_connection do |stub|\n        stub.get('/foo') do |env|\n          [500, {}, '{}']\n        end\n        stub.get('/foo') do |env|\n          [200, {}, '{}']\n        end\n      end\n\n      expect(client.execute(  \n        :uri => 'https://www.google.com/foo',\n        :connection => @connection\n      ).status).to eq(200)\n\n    end\n\n    it 'should fail after max retries' do\n      client.authorization = nil\n      count = 0\n      @connection = stub_connection do |stub|\n        stub.get('/foo') do |env|\n          count += 1\n          [500, {}, '{}']\n        end\n      end\n\n      expect(client.execute(  \n        :uri => 'https://www.google.com/foo',\n        :connection => @connection\n      ).status).to eq(500)\n      expect(count).to eq(3)\n    end\n\n  end\n\n  describe 'when retries disabled and expired_auth_retry on (default)' do\n    before do\n      client.retries = 0\n    end\n\n    after do\n      @connection.verify\n    end\n\n    it 'should refresh tokens on 401 errors' do\n      client.authorization.access_token = '12345'\n      expect(client.authorization).to receive(:fetch_access_token!)\n\n      @connection = stub_connection do |stub|\n        stub.get('/foo') do |env|\n          [401, {}, '{}']\n        end\n        stub.get('/foo') do |env|\n          [200, {}, '{}']\n        end\n      end\n\n      client.execute(\n        :uri => 'https://www.gogole.com/foo',\n        :connection => @connection\n      )\n    end\n\n  end\n\n  describe 'when retries disabled and expired_auth_retry off' do\n    before do\n      client.retries = 0\n      client.expired_auth_retry = false\n    end\n\n    it 'should not refresh tokens on 401 errors' do\n      client.authorization.access_token = '12345'\n      expect(client.authorization).not_to receive(:fetch_access_token!)\n\n      @connection = stub_connection do |stub|\n        stub.get('/foo') do |env|\n          [401, {}, '{}']\n        end\n        stub.get('/foo') do |env|\n          [200, {}, '{}']\n        end\n      end\n\n      resp = client.execute(\n        :uri => 'https://www.gogole.com/foo',\n        :connection => @connection\n      )\n\n      expect(resp.response.status).to be == 401\n    end\n\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/spec/spec_helper.rb",
    "content": "$LOAD_PATH.unshift(File.expand_path('../../lib', __FILE__))\n$LOAD_PATH.uniq!\n\nrequire 'rspec'\nrequire 'faraday'\n\nbegin\n  require 'simplecov'\n  require 'coveralls'\n\n  SimpleCov.formatter = Coveralls::SimpleCov::Formatter\n  SimpleCov.start\nrescue LoadError\n  # SimpleCov missing, so just run specs with no coverage.\nend\n\nFaraday::Adapter.load_middleware(:test)\n\nmodule Faraday\n  class Connection\n    def verify\n      if app.kind_of?(Faraday::Adapter::Test)\n        app.stubs.verify_stubbed_calls\n      else\n        raise TypeError, \"Expected test adapter\"\n      end\n    end\n  end\nend\n\nmodule ConnectionHelpers\n  def stub_connection(&block)\n    stubs = Faraday::Adapter::Test::Stubs.new do |stub|\n      block.call(stub)\n    end\n    connection = Faraday.new do |builder|\n      builder.options.params_encoder = Faraday::FlatParamsEncoder\n      builder.adapter(:test, stubs)\n    end\n  end\nend\n\nmodule JSONMatchers\n  class EqualsJson\n    def initialize(expected)\n      @expected = JSON.parse(expected)\n    end\n    def matches?(target)\n      @target = JSON.parse(target)\n      @target.eql?(@expected)\n    end\n    def failure_message\n      \"expected #{@target.inspect} to be #{@expected}\"\n    end\n    def negative_failure_message\n      \"expected #{@target.inspect} not to be #{@expected}\"\n    end\n  end\n\n  def be_json(expected)\n    EqualsJson.new(expected)\n  end\nend\n\nRSpec.configure do |config|\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/bin/yard-wiki",
    "content": "#!/usr/bin/env ruby\n$LOAD_PATH.unshift(\n  File.expand_path(File.join(File.dirname(__FILE__), '../lib'))\n)\n$LOAD_PATH.uniq!\n\nrequire 'yard/cli/wiki'\n\nYARD::CLI::Wiki.run(*ARGV)\n"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/lib/yard/cli/wiki.rb",
    "content": "require 'yard'\nrequire 'yard/serializers/wiki_serializer'\nrequire 'yard/cli/yardoc'\n\nmodule YARD\n  module CLI\n    class Wiki < Yardoc\n      # Creates a new instance of the commandline utility\n      def initialize\n        super\n        @options = SymbolHash.new(false)\n        @options.update(\n          :format => :html,\n          :template => :default,\n          :markup => :rdoc, # default is :rdoc but falls back on :none\n          :serializer => YARD::Serializers::WikiSerializer.new, # Sigh. :-(\n          :default_return => \"Object\",\n          :hide_void_return => false,\n          :no_highlight => false,\n          :files => [],\n          :verifier => Verifier.new\n        )\n        @visibilities = [:public]\n        @assets = {}\n        @excluded = []\n        @files = []\n        @hidden_tags = []\n        @use_cache = false\n        @use_yardopts_file = true\n        @use_document_file = true\n        @generate = true\n        @options_file = DEFAULT_YARDOPTS_FILE\n        @statistics = true\n        @list = false\n        @save_yardoc = true\n        @has_markup = false\n\n        if defined?(::Encoding) && ::Encoding.respond_to?(:default_external=)\n          ::Encoding.default_external, ::Encoding.default_internal = 'utf-8', 'utf-8'\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/lib/yard/rake/wikidoc_task.rb",
    "content": "require 'rake'\nrequire 'rake/tasklib'\nrequire 'yard/rake/yardoc_task'\nrequire 'yard/cli/wiki'\n\nmodule YARD\n  module Rake\n    # The rake task to run {CLI::Yardoc} and generate documentation.\n    class WikidocTask < YardocTask\n      protected\n\n      # Defines the rake task\n      # @return [void]\n      def define\n        desc \"Generate Wiki Documentation with YARD\"\n        task(name) do\n          before.call if before.is_a?(Proc)\n          yardoc = YARD::CLI::Wiki.new\n          yardoc.parse_arguments *(options + files)\n          yardoc.options[:verifier] = verifier if verifier\n          yardoc.run\n          after.call if after.is_a?(Proc)\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/lib/yard/serializers/wiki_serializer.rb",
    "content": "# encoding: utf-8\n\nrequire 'yard/serializers/file_system_serializer'\n\nmodule YARD\n  module Serializers\n    ##\n    # Subclass required to get correct filename for the top level namespace.\n    # :-(\n    class WikiSerializer < FileSystemSerializer\n      # Post-process the data before serializing.\n      # Strip unnecessary whitespace.\n      # Convert stuff into more wiki-friendly stuff.\n      # FULL OF HACKS!\n      def serialize(object, data)\n        data = data.encode(\"UTF-8\")\n        if object == \"Sidebar.wiki\"\n          data = data.gsub(/^#sidebar Sidebar\\n/, \"\")\n        end\n        data = data.gsub(/\\n\\s*\\n/, \"\\n\")\n        # ASCII/UTF-8 erb error work-around.\n        data = data.gsub(/--/, \"—\")\n        data = data.gsub(/——/, \"----\")\n        data = data.gsub(/----\\n----/, \"----\")\n        # HACK! Google Code Wiki treats <code> blocks like <pre> blocks.\n        data = data.gsub(/\\<code\\>(.+)\\<\\/code\\>/, \"`\\\\1`\")\n        super(object, data)\n      end\n\n      def serialized_path(object)\n        return object if object.is_a?(String)\n\n        if object.is_a?(CodeObjects::ExtraFileObject)\n          fspath = ['file.' + object.name + (extension.empty? ? '' : \".#{extension}\")]\n        else\n          # This line is the only change of significance.\n          # Changed from 'top-level-namespace' to 'TopLevelNamespace' to\n          # conform to wiki word page naming convention.\n          objname = object != YARD::Registry.root ? object.name.to_s : \"TopLevelNamespace\"\n          objname += '_' + object.scope.to_s[0,1] if object.is_a?(CodeObjects::MethodObject)\n          fspath = [objname + (extension.empty? ? '' : \".#{extension}\")]\n          if object.namespace && object.namespace.path != \"\"\n            fspath.unshift(*object.namespace.path.split(CodeObjects::NSEP))\n          end\n        end\n\n        # Don't change the filenames, it just makes it more complicated\n        # to figure out the original name.\n        #fspath.map! do |p|\n        #  p.gsub(/([a-z])([A-Z])/, '\\1_\\2').downcase\n        #end\n\n        # Remove special chars from filenames.\n        # Windows disallows \\ / : * ? \" < > | but we will just remove any\n        # non alphanumeric (plus period, underscore and dash).\n        fspath.map! do |p|\n          p.gsub(/[^\\w\\.-]/) do |x|\n            encoded = '_'\n\n            x.each_byte { |b| encoded << (\"%X\" % b) }\n            encoded\n          end\n        end\n        fspath.join(\"\")\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/lib/yard/templates/helpers/wiki_helper.rb",
    "content": "require 'cgi'\nrequire 'rdiscount'\n\nmodule YARD\n  module Templates::Helpers\n    # The helper module for HTML templates.\n    module WikiHelper\n      include MarkupHelper\n\n      # @return [String] escapes text\n      def h(text)\n        out = \"\"\n        text = text.split(/\\n/)\n        text.each_with_index do |line, i|\n          out <<\n          case line\n          when /^\\s*$/; \"\\n\\n\"\n          when /^\\s+\\S/, /^=/; line + \"\\n\"\n          else; line + (text[i + 1] =~ /^\\s+\\S/ ? \"\\n\" : \" \")\n          end\n        end\n        out.strip\n      end\n\n      # @return [String] wraps text at +col+ columns.\n      def wrap(text, col = 72)\n        text.strip.gsub(/(.{1,#{col}})( +|$\\n?)|(.{1,#{col}})/, \"\\\\1\\\\3\\n\")\n      end\n\n      # Escapes a URL\n      # \n      # @param [String] text the URL\n      # @return [String] the escaped URL\n      def urlencode(text)\n        CGI.escape(text.to_s)\n      end\n\n      def indent(text, len = 2)\n        text.gsub(/^/, ' ' * len)\n      end\n\n      def unindent(text)\n        lines = text.split(\"\\n\", -1)\n        min_indent_size = text.size\n        for line in lines\n          indent_size = (line.gsub(\"\\t\", \"  \") =~ /[^\\s]/) || text.size\n          min_indent_size = indent_size if indent_size < min_indent_size\n        end\n        text.gsub(\"\\t\", \"  \").gsub(Regexp.new(\"^\" + \" \" * min_indent_size), '')\n      end\n\n      # @group Converting Markup to HTML\n\n      # Turns text into HTML using +markup+ style formatting.\n      #\n      # @param [String] text the text to format\n      # @param [Symbol] markup examples are +:markdown+, +:textile+, +:rdoc+.\n      #   To add a custom markup type, see {MarkupHelper}\n      # @return [String] the HTML\n      def htmlify(text, markup = options[:markup])\n        markup_meth = \"html_markup_#{markup}\"\n        return text unless respond_to?(markup_meth)\n        return \"\" unless text\n        return text unless markup\n        load_markup_provider(markup)\n        html = send(markup_meth, text)\n        if html.respond_to?(:encode)\n          html = html.force_encoding(text.encoding) # for libs that mess with encoding\n          html = html.encode(:invalid => :replace, :replace => '?')\n        end\n        html = resolve_links(html)\n        html = html.gsub(/<pre>(?:\\s*<code>)?(.+?)(?:<\\/code>\\s*)?<\\/pre>/m) do\n          str = unindent($1).strip\n          str = html_syntax_highlight(CGI.unescapeHTML(str)) unless options[:no_highlight]\n          str\n        end unless markup == :text\n        html\n      end\n\n      # Converts Markdown to HTML\n      # @param [String] text input Markdown text\n      # @return [String] output HTML\n      # @since 0.6.0\n      def html_markup_markdown(text)\n        Markdown.new(text).to_html\n      end\n\n      # Converts Textile to HTML\n      # @param [String] text the input Textile text\n      # @return [String] output HTML\n      # @since 0.6.0\n      def html_markup_textile(text)\n        doc = markup_class(:textile).new(text)\n        doc.hard_breaks = false if doc.respond_to?(:hard_breaks=)\n        doc.to_html\n      end\n\n      # Converts plaintext to HTML\n      # @param [String] text the input text\n      # @return [String] the output HTML\n      # @since 0.6.0\n      def html_markup_text(text)\n        \"<pre>\" + text + \"</pre>\"\n      end\n\n      # Converts HTML to HTML\n      # @param [String] text input html\n      # @return [String] output HTML\n      # @since 0.6.0\n      def html_markup_html(text)\n        text\n      end\n\n      # @return [String] HTMLified text as a single line (paragraphs removed)\n      def htmlify_line(*args)\n        htmlify(*args)\n      end\n\n      # Fixes RDoc behaviour with ++ only supporting alphanumeric text.\n      #\n      # @todo Refactor into own SimpleMarkup subclass\n      def fix_typewriter(text)\n        text.gsub(/\\+(?! )([^\\n\\+]{1,900})(?! )\\+/) do\n          type_text, pre_text, no_match = $1, $`, $&\n          pre_match = pre_text.scan(%r(</?(?:pre|tt|code).*?>))\n          if pre_match.last.nil? || pre_match.last.include?('/')\n            '`' + h(type_text) + '`'\n          else\n            no_match\n          end\n        end\n      end\n\n      # Don't allow -- to turn into &#8212; element. The chances of this being\n      # some --option is far more likely than the typographical meaning.\n      #\n      # @todo Refactor into own SimpleMarkup subclass\n      def fix_dash_dash(text)\n        text.gsub(/&#8212;(?=\\S)/, '--')\n      end\n\n      # @group Syntax Highlighting Source Code\n\n      # Syntax highlights +source+ in language +type+.\n      #\n      # @note To support a specific language +type+, implement the method\n      #   +html_syntax_highlight_TYPE+ in this class.\n      #\n      # @param [String] source the source code to highlight\n      # @param [Symbol] type the language type (:ruby, :plain, etc). Use\n      #   :plain for no syntax highlighting.\n      # @return [String] the highlighted source\n      def html_syntax_highlight(source, type = nil)\n        return \"\" unless source\n        return \"{{{\\n#{source}\\n}}}\"\n      end\n\n      # @return [String] unhighlighted source\n      def html_syntax_highlight_plain(source)\n        return \"\" unless source\n        return \"{{{\\n#{source}\\n}}}\"\n      end\n\n      # @group Linking Objects and URLs\n\n      # Resolves any text in the form of +{Name}+ to the object specified by\n      # Name. Also supports link titles in the form +{Name title}+.\n      #\n      # @example Linking to an instance method\n      #   resolve_links(\"{MyClass#method}\") # => \"<a href='...'>MyClass#method</a>\"\n      # @example Linking to a class with a title\n      #   resolve_links(\"{A::B::C the C class}\") # => \"<a href='...'>the c class</a>\"\n      # @param [String] text the text to resolve links in\n      # @return [String] HTML with linkified references\n      def resolve_links(text)\n        code_tags = 0\n        text.gsub(/<(\\/)?(pre|code|tt)|\\{(\\S+?)(?:\\s(.*?\\S))?\\}(?=[\\W<]|.+<\\/|$)/) do |str|\n          closed, tag, name, title, match = $1, $2, $3, $4, $&\n          if tag\n            code_tags += (closed ? -1 : 1)\n            next str\n          end\n          next str unless code_tags == 0\n\n          next(match) if name[0,1] == '|'\n          if object.is_a?(String)\n            object\n          else\n            link = linkify(name, title)\n            if link == name || link == title\n              match = /(.+)?(\\{#{Regexp.quote name}(?:\\s.*?)?\\})(.+)?/.match(text)\n              file = (@file ? @file : object.file) || '(unknown)'\n              line = (@file ? 1 : (object.docstring.line_range ? object.docstring.line_range.first : 1)) + (match ? $`.count(\"\\n\") : 0)\n              log.warn \"In file `#{file}':#{line}: Cannot resolve link to #{name} from text\" + (match ? \":\" : \".\")\n              log.warn((match[1] ? '...' : '') + match[2].gsub(\"\\n\",\"\") + (match[3] ? '...' : '')) if match\n            end\n\n            link\n          end\n        end\n      end\n\n      def unlink(value)\n        value.gsub(/\\b(([A-Z][a-z]+){2,99})\\b/, \"!\\\\1\")\n      end\n\n      # (see BaseHelper#link_file)\n      def link_file(filename, title = nil, anchor = nil)\n        link_url(url_for_file(filename, anchor), title)\n      end\n\n      # (see BaseHelper#link_include_object)\n      def link_include_object(obj)\n        htmlify(obj.docstring)\n      end\n\n      # (see BaseHelper#link_object)\n      def link_object(obj, otitle = nil, anchor = nil, relative = true)\n        return otitle if obj.nil?\n        obj = Registry.resolve(object, obj, true, true) if obj.is_a?(String)\n        if !otitle && obj.root?\n          title = \"Top Level Namespace\"\n        elsif otitle\n          # title = \"`\" + otitle.to_s + \"`\"\n          title = otitle.to_s\n        elsif object.is_a?(CodeObjects::Base)\n          # title = \"`\" + h(object.relative_path(obj)) + \"`\"\n          title = h(object.relative_path(obj))\n        else\n          # title = \"`\" + h(obj.to_s) + \"`\"\n          title = h(obj.to_s)\n        end\n        unless serializer\n          return unlink(title)\n        end\n        return unlink(title) if obj.is_a?(CodeObjects::Proxy)\n\n        link = url_for(obj, anchor, relative)\n        if link\n          link_url(link, title, :formatted => false)\n        else\n          unlink(title)\n        end\n      end\n\n      # (see BaseHelper#link_url)\n      def link_url(url, title = nil, params = {})\n        title ||= url\n        if url.to_s == \"\"\n          title\n        else\n          if params[:formatted]\n            \"<a href=\\\"#{url}\\\">#{title}</a>\"\n          else\n            \"[#{url} #{title}]\"\n          end\n        end\n      end\n\n      # @group URL Helpers\n\n      # @param [CodeObjects::Base] object the object to get an anchor for\n      # @return [String] the anchor for a specific object\n      def anchor_for(object)\n        # Method:_Google::APIClient#execute!\n        case object\n        when CodeObjects::MethodObject\n          if object.scope == :instance\n            \"Method:_#{object.path}\"\n          elsif object.scope == :class\n            \"Method:_#{object.path}\"\n          end\n        when CodeObjects::ClassVariableObject\n          \"#{object.name.to_s.gsub('@@', '')}-#{object.type}\"\n        when CodeObjects::Base\n          \"#{object.name}-#{object.type}\"\n        when CodeObjects::Proxy\n          object.path\n        else\n          object.to_s\n        end\n      end\n\n      # Returns the URL for an object.\n      #\n      # @param [String, CodeObjects::Base] obj the object (or object path) to link to\n      # @param [String] anchor the anchor to link to\n      # @param [Boolean] relative use a relative or absolute link\n      # @return [String] the URL location of the object\n      def url_for(obj, anchor = nil, relative = true)\n        link = nil\n        return link unless serializer\n        if obj.kind_of?(CodeObjects::Base) && obj.root?\n          return 'TopLevelNamespace'\n        end\n\n        if obj.is_a?(CodeObjects::Base) && !obj.is_a?(CodeObjects::NamespaceObject)\n          # If the obj is not a namespace obj make it the anchor.\n          anchor, obj = obj, obj.namespace\n        end\n\n        objpath = serializer.serialized_path(obj)\n        return link unless objpath\n\n        if relative\n          fromobj = object\n          if object.is_a?(CodeObjects::Base) &&\n              !object.is_a?(CodeObjects::NamespaceObject)\n            fromobj = fromobj.namespace\n          end\n\n          from = serializer.serialized_path(fromobj)\n          link = File.relative_path(from, objpath)\n        else\n          link = objpath\n        end\n\n        return (\n          link.gsub(/\\.html$/, '').gsub(/\\.wiki$/, '') +\n          (anchor ? '#' + urlencode(anchor_for(anchor)) : '')\n        )\n      end\n\n      # Returns the URL for a specific file\n      #\n      # @param [String] filename the filename to link to\n      # @param [String] anchor optional anchor\n      # @return [String] the URL pointing to the file\n      def url_for_file(filename, anchor = nil)\n        fromobj = object\n        if CodeObjects::Base === fromobj && !fromobj.is_a?(CodeObjects::NamespaceObject)\n          fromobj = fromobj.namespace\n        end\n        from = serializer.serialized_path(fromobj)\n        if filename == options[:readme]\n          filename = 'Documentation'\n        else\n          filename = File.basename(filename).gsub(/\\.[^.]+$/, '').capitalize\n        end\n        link = File.relative_path(from, filename)\n        return (\n          link.gsub(/\\.html$/, '').gsub(/\\.wiki$/, '') +\n          (anchor ? '#' + urlencode(anchor) : '')\n        )\n      end\n\n      # @group Formatting Objects and Attributes\n\n      # Formats a list of objects and links them\n      # @return [String] a formatted list of objects\n      def format_object_name_list(objects)\n        objects.sort_by {|o| o.name.to_s.downcase }.map do |o|\n          \"<span class='name'>\" + linkify(o, o.name) + \"</span>\"\n        end.join(\", \")\n      end\n\n      # Formats a list of types from a tag.\n      #\n      # @param [Array<String>, FalseClass] typelist\n      #   the list of types to be formatted.\n      #\n      # @param [Boolean] brackets omits the surrounding\n      #   brackets if +brackets+ is set to +false+.\n      #\n      # @return [String] the list of types formatted\n      #   as [Type1, Type2, ...] with the types linked\n      #   to their respective descriptions.\n      #\n      def format_types(typelist, brackets = true)\n        return unless typelist.is_a?(Array)\n        list = typelist.map do |type|\n          type = type.gsub(/([<>])/) { h($1) }\n          type = type.gsub(/([\\w:]+)/) do\n            $1 == \"lt\" || $1 == \"gt\" ? \"`#{$1}`\" : linkify($1, $1)\n          end\n        end\n        list.empty? ? \"\" : (brackets ? \"(#{list.join(\", \")})\" : list.join(\", \"))\n      end\n\n      # Get the return types for a method signature.\n      #\n      # @param [CodeObjects::MethodObject] meth the method object\n      # @param [Boolean] link whether to link the types\n      # @return [String] the signature types\n      # @since 0.5.3\n      def signature_types(meth, link = true)\n        meth = convert_method_to_overload(meth)\n\n        type = options[:default_return] || \"\"\n        if meth.tag(:return) && meth.tag(:return).types\n          types = meth.tags(:return).map {|t| t.types ? t.types : [] }.flatten.uniq\n          first = link ? h(types.first) : format_types([types.first], false)\n          if types.size == 2 && types.last == 'nil'\n            type = first + '<sup>?</sup>'\n          elsif types.size == 2 && types.last =~ /^(Array)?<#{Regexp.quote types.first}>$/\n            type = first + '<sup>+</sup>'\n          elsif types.size > 2\n            type = [first, '...'].join(', ')\n          elsif types == ['void'] && options[:hide_void_return]\n            type = \"\"\n          else\n            type = link ? h(types.join(\", \")) : format_types(types, false)\n          end\n        elsif !type.empty?\n          type = link ? h(type) : format_types([type], false)\n        end\n        type = \"(#{type.to_s.strip}) \" unless type.empty?\n        type\n      end\n\n      # Formats the signature of method +meth+.\n      #\n      # @param [CodeObjects::MethodObject] meth the method object to list\n      #   the signature of\n      # @param [Boolean] link whether to link the method signature to the details view\n      # @param [Boolean] show_extras whether to show extra meta-data (visibility, attribute info)\n      # @param [Boolean] full_attr_name whether to show the full attribute name\n      #   (\"name=\" instead of \"name\")\n      # @return [String] the formatted method signature\n      def signature(meth, link = true, show_extras = true, full_attr_name = true)\n        meth = convert_method_to_overload(meth)\n\n        type = signature_types(meth, link)\n        name = full_attr_name ? meth.name : meth.name.to_s.gsub(/^(\\w+)=$/, '\\1')\n        blk = format_block(meth)\n        args = !full_attr_name && meth.writer? ? \"\" : format_args(meth)\n        extras = []\n        extras_text = ''\n        if show_extras\n          if rw = meth.attr_info\n            attname = [rw[:read] ? 'read' : nil, rw[:write] ? 'write' : nil].compact\n            attname = attname.size == 1 ? attname.join('') + 'only' : nil\n            extras << attname if attname\n          end\n          extras << meth.visibility if meth.visibility != :public\n          extras_text = ' <span class=\"extras\">(' + extras.join(\", \") + ')</span>' unless extras.empty?\n        end\n        title = \"%s *`%s`* `%s` `%s`\" % [type, h(name.to_s).strip, args, blk]\n        title.gsub!(/<tt>/, \"\")\n        title.gsub!(/<\\/tt>/, \"\")\n        title.gsub!(/`\\s*`/, \"\")\n        title.strip!\n        if link\n          if meth.is_a?(YARD::CodeObjects::MethodObject)\n            link_title =\n              \"#{h meth.name(true)} (#{meth.scope} #{meth.type})\"\n          else\n            link_title = \"#{h name} (#{meth.type})\"\n          end\n          # This has to be raw HTML, can't wiki-format a link title otherwise.\n          \"<a href=\\\"#{url_for(meth)}\\\">#{title}</a>#{extras_text}\"\n        else\n          title + extras_text\n        end\n      end\n\n      # @group Getting the Character Encoding\n\n      # Returns the current character set. The default value can be overridden\n      # by setting the +LANG+ environment variable or by overriding this\n      # method. In Ruby 1.9 you can also modify this value by setting\n      # +Encoding.default_external+.\n      #\n      # @return [String] the current character set\n      # @since 0.5.4\n      def charset\n        return 'utf-8' unless RUBY19 || lang = ENV['LANG']\n        if RUBY19\n          lang = Encoding.default_external.name.downcase\n        else\n          lang = lang.downcase.split('.').last\n        end\n        case lang\n        when \"ascii-8bit\", \"us-ascii\", \"ascii-7bit\"; 'iso-8859-1'\n        else; lang\n        end\n      end\n\n      # @endgroup\n\n      private\n\n      # Converts a set of hash options into HTML attributes for a tag\n      #\n      # @param [Hash{String => String}] opts the tag options\n      # @return [String] the tag attributes of an HTML tag\n      def tag_attrs(opts = {})\n        opts.sort_by {|k, v| k.to_s }.map {|k,v| \"#{k}=#{v.to_s.inspect}\" if v }.join(\" \")\n      end\n\n      # Converts a {CodeObjects::MethodObject} into an overload object\n      # @since 0.5.3\n      def convert_method_to_overload(meth)\n        # use first overload tag if it has a return type and method itself does not\n        if !meth.tag(:return) && meth.tags(:overload).size == 1 && meth.tag(:overload).tag(:return)\n          return meth.tag(:overload)\n        end\n        meth\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/lib/yard-google-code.rb",
    "content": "$LOAD_PATH.unshift(File.expand_path(File.dirname(__FILE__)))\n$LOAD_PATH.uniq!\n\nYARD::Templates::Engine.register_template_path File.dirname(__FILE__) + '/../templates'\nrequire 'yard/templates/template'\nrequire 'yard/templates/helpers/wiki_helper'\n\n::YARD::Templates::Template.extra_includes |= [\n  YARD::Templates::Helpers::WikiHelper\n]\n\nrequire 'yard/serializers/wiki_serializer'\n"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/templates/default/class/setup.rb",
    "content": "lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))\n$LOAD_PATH.unshift(lib_dir)\n$LOAD_PATH.uniq!\nrequire 'yard-google-code'\n\ninclude T('default/module')\n\ndef init\n  super\n  sections.place(:subclasses).before(:children)\n  sections.place(:constructor_details, [T('method_details')]).before(:methodmissing)\n  # Weird bug w/ doubled sections\n  sections.uniq!\nend\n\ndef constructor_details\n  ctors = object.meths(:inherited => true, :included => true)\n  return unless @ctor = ctors.find {|o| o.name == :initialize }\n  return if prune_method_listing([@ctor]).empty?\n  erb(:constructor_details)\nend\n\ndef subclasses\n  return if object.path == \"Object\" # don't show subclasses for Object\n  unless globals.subclasses\n    globals.subclasses = {}\n    list = run_verifier Registry.all(:class)\n    list.each do |o|\n      (globals.subclasses[o.superclass.path] ||= []) << o if o.superclass\n    end\n  end\n\n  @subclasses = globals.subclasses[object.path]\n  return if @subclasses.nil? || @subclasses.empty?\n  @subclasses = @subclasses.sort_by {|o| o.path }.map do |child|\n    name = child.path\n    if object.namespace\n      name = object.relative_path(child)\n    end\n    [name, child]\n  end\n  erb(:subclasses)\nend"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/templates/default/docstring/setup.rb",
    "content": "lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))\n$LOAD_PATH.unshift(lib_dir)\n$LOAD_PATH.uniq!\nrequire 'yard-google-code'\n\ndef init\n  return if object.docstring.blank? && !object.has_tag?(:api)\n  sections :index, [:private, :deprecated, :abstract, :todo, :note, :returns_void, :text], T('tags')\nend\n\ndef private\n  return unless object.has_tag?(:api) && object.tag(:api).text == 'private'\n  erb(:private)\nend\n\ndef abstract\n  return unless object.has_tag?(:abstract)\n  erb(:abstract)\nend\n\ndef deprecated\n  return unless object.has_tag?(:deprecated)\n  erb(:deprecated)\nend\n\ndef todo\n  return unless object.has_tag?(:todo)\n  erb(:todo)\nend\n\ndef note\n  return unless object.has_tag?(:note)\n  erb(:note)\nend\n\ndef returns_void\n  return unless object.type == :method\n  return if object.name == :initialize && object.scope == :instance\n  return unless object.tags(:return).size == 1 && object.tag(:return).types == ['void']\n  erb(:returns_void)\nend\n\ndef docstring_text\n  text = \"\"\n  unless object.tags(:overload).size == 1 && object.docstring.empty?\n    text = object.docstring\n  end\n\n  if text.strip.empty? && object.tags(:return).size == 1 && object.tag(:return).text\n    text = object.tag(:return).text.gsub(/\\A([a-z])/) {|x| x.upcase }\n  end\n\n  text.strip\nend"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/templates/default/method/setup.rb",
    "content": "lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))\n$LOAD_PATH.unshift(lib_dir)\n$LOAD_PATH.uniq!\nrequire 'yard-google-code'\n\ndef init\n  sections :header, [T('method_details')]\nend"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/templates/default/method_details/setup.rb",
    "content": "lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))\n$LOAD_PATH.unshift(lib_dir)\n$LOAD_PATH.uniq!\nrequire 'yard-google-code'\n\ndef init\n  sections :header, [:method_signature, T('docstring')]\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/templates/default/module/setup.rb",
    "content": "lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))\n$LOAD_PATH.unshift(lib_dir)\n$LOAD_PATH.uniq!\nrequire 'yard-google-code'\n\ninclude Helpers::ModuleHelper\n\ndef init\n  sections :header, :box_info, :pre_docstring, T('docstring'), :children,\n    :constant_summary, [T('docstring')], :inherited_constants,\n    :inherited_methods,\n    :methodmissing, [T('method_details')],\n    :attribute_details, [T('method_details')],\n    :method_details_list, [T('method_details')]\nend\n\ndef pre_docstring\n  return if object.docstring.blank?\n  erb(:pre_docstring)\nend\n\ndef children\n  @inner = [[:modules, []], [:classes, []]]\n  object.children.each do |child|\n    @inner[0][1] << child if child.type == :module\n    @inner[1][1] << child if child.type == :class\n  end\n  @inner.map! {|v| [v[0], run_verifier(v[1].sort_by {|o| o.name.to_s })] }\n  return if (@inner[0][1].size + @inner[1][1].size) == 0\n  erb(:children)\nend\n\ndef methodmissing\n  mms = object.meths(:inherited => true, :included => true)\n  return unless @mm = mms.find {|o| o.name == :method_missing && o.scope == :instance }\n  erb(:methodmissing)\nend\n\ndef method_listing(include_specials = true)\n  return @smeths ||= method_listing.reject {|o| special_method?(o) } unless include_specials\n  return @meths if @meths\n  @meths = object.meths(:inherited => false, :included => false)\n  @meths = sort_listing(prune_method_listing(@meths))\n  @meths\nend\n\ndef special_method?(meth)\n  return true if meth.name(true) == '#method_missing'\n  return true if meth.constructor?\n  false\nend\n\ndef attr_listing\n  return @attrs if @attrs\n  @attrs = []\n  [:class, :instance].each do |scope|\n    object.attributes[scope].each do |name, rw|\n      @attrs << (rw[:read] || rw[:write])\n    end\n  end\n  @attrs = sort_listing(prune_method_listing(@attrs, false))\nend\n\ndef constant_listing\n  return @constants if @constants\n  @constants = object.constants(:included => false, :inherited => false)\n  @constants += object.cvars\n  @constants = run_verifier(@constants)\n  @constants\nend\n\ndef sort_listing(list)\n  list.sort_by {|o| [o.scope.to_s, o.name.to_s.downcase] }\nend\n\ndef docstring_full(obj)\n  docstring = \"\"\n  if obj.tags(:overload).size == 1 && obj.docstring.empty?\n    docstring = obj.tag(:overload).docstring\n  else\n    docstring = obj.docstring\n  end\n\n  if docstring.summary.empty? && obj.tags(:return).size == 1 && obj.tag(:return).text\n    docstring = Docstring.new(obj.tag(:return).text.gsub(/\\A([a-z])/) {|x| x.upcase }.strip)\n  end\n\n  docstring\nend\n\ndef docstring_summary(obj)\n  docstring_full(obj).summary\nend\n\ndef groups(list, type = \"Method\")\n  if groups_data = object.groups\n    others = list.select {|m| !m.group }\n    groups_data.each do |name|\n      items = list.select {|m| m.group == name }\n      yield(items, name) unless items.empty?\n    end\n  else\n    others = []\n    group_data = {}\n    list.each do |meth|\n      if meth.group\n        (group_data[meth.group] ||= []) << meth\n      else\n        others << meth\n      end\n    end\n    group_data.each {|group, items| yield(items, group) unless items.empty? }\n  end\n\n  scopes(others) {|items, scope| yield(items, \"#{scope.to_s.capitalize} #{type} Summary\") }\nend\n\ndef scopes(list)\n  [:class, :instance].each do |scope|\n    items = list.select {|m| m.scope == scope }\n    yield(items, scope) unless items.empty?\n  end\nend\n\ndef mixed_into(object)\n  unless globals.mixed_into\n    globals.mixed_into = {}\n    list = run_verifier Registry.all(:class, :module)\n    list.each {|o| o.mixins.each {|m| (globals.mixed_into[m.path] ||= []) << o } }\n  end\n\n  globals.mixed_into[object.path] || []\nend\n"
  },
  {
    "path": "sdk/ruby-google-api-client/yard/templates/default/tags/setup.rb",
    "content": "lib_dir = File.expand_path(File.join(File.dirname(__FILE__), '../../../lib'))\n$LOAD_PATH.unshift(lib_dir)\n$LOAD_PATH.uniq!\nrequire 'yard-google-code'\n\ndef init\n  tags = Tags::Library.visible_tags - [:abstract, :deprecated, :note, :todo]\n  create_tag_methods(tags - [:example, :option, :overload, :see])\n  sections :index, tags\n  sections.any(:overload).push(T('docstring'))\nend\n\ndef return\n  if object.type == :method\n    return if object.name == :initialize && object.scope == :instance\n    return if object.tags(:return).size == 1 && object.tag(:return).types == ['void']\n  end\n  tag(:return)\nend\n\nprivate\n\ndef tag(name, opts = nil)\n  return unless object.has_tag?(name)\n  opts ||= options_for_tag(name)\n  @no_names = true if opts[:no_names]\n  @no_types = true if opts[:no_types]\n  @name = name\n  out = erb('tag')\n  @no_names, @no_types = nil, nil\n  out\nend\n\ndef create_tag_methods(tags)\n  tags.each do |tag|\n    next if respond_to?(tag)\n    instance_eval(<<-eof, __FILE__, __LINE__ + 1)\n      def #{tag}; tag(#{tag.inspect}) end\n    eof\n  end\nend\n\ndef options_for_tag(tag)\n  opts = {:no_types => true, :no_names => true}\n  case Tags::Library.factory_method_for(tag)\n  when :with_types\n    opts[:no_types] = false\n  when :with_types_and_name\n    opts[:no_types] = false\n    opts[:no_names] = false\n  when :with_name\n    opts[:no_names] = false\n  end\n  opts\nend\n"
  },
  {
    "path": "services/api/.gitignore",
    "content": "# Ignore the default SQLite database.\n/db/*.sqlite3\n\n# Ignore all logfiles and tempfiles.\n/log\n/tmp\n\n# Sensitive files and local configuration\n/config/database.yml\n/config/application.yml\n\n# asset cache\n/public/assets/\n\n/config/environments/development.rb\n/config/environments/production.rb\n/config/environments/test.rb\n\n# Capistrano files are coming from another repo\n/Capfile*\n/config/deploy*\n\n# SimpleCov reports\n/coverage\n\n# Dev/test SSL certificates\n/self-signed.key\n/self-signed.pem\n\n# Generated git-commit.version file\n/git-commit.version\n\n# Generated when building distribution packages\n/package-build.version\n\n# Debugger history\n.byebug_history\n"
  },
  {
    "path": "services/api/Gemfile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nsource 'https://rubygems.org'\n\n# Rails 7.2.0 dropped Ruby 3.0.\ngem 'rails', '~> 7.1.5'\ngem 'responders'\ngem 'i18n'\ngem 'sprockets-rails'\n\ngroup :test, :development do\n  # factory_bot 6.4.6 dropped Ruby 2.7.  Ruby 3.0 compatibility might\n  # also be dropped at a patchlevel release.\n  # factory_bot 6.5.5 is the latest version we've tested.\n  gem 'factory_bot', '6.5.5'\n  # Similarly, factory_bot_rails 6.5.1 dropped Ruby 3.0.\n  gem 'factory_bot_rails', '6.5.0'\n  # ruby-prof 1.7.0 dropped Ruby 2.7.\n  # ruby-prof 1.7 is the latest minor version we've tested.\n  gem 'ruby-prof', '~> 1.7.0'\n  # Note: \"require: false\" here tells bunder not to automatically\n  # 'require' the packages during application startup. Installation is\n  # still mandatory.\n  gem 'test-unit', require: false\n  gem 'simplecov', require: false\n  gem 'simplecov-rcov', require: false\n  gem 'mocha', require: false\n  # byebug 12.0 dropped Ruby 3.0.\n  gem 'byebug', '~> 11.1'\n  gem 'listen'\nend\n\n# pg 1.6 prebuilt gems require newer glibc than RHEL 8 has.\ngem 'pg', '~> 1.0', force_ruby_platform: true\n\ngem 'oj'\n\ngem 'jquery-rails'\n\ngem 'acts_as_api'\n\ngem 'passenger', '~> 6.0.26'\n\n# minitest 5.26.2 dropped Ruby 3.0.\ngem 'minitest', '5.26.1'\n\ngem 'andand'\n\ngem 'optimist'\n\ngem 'arvados', '~> 3.2.0'\ngem 'httpclient'\n\ngem 'lograge'\ngem 'logstash-event'\n\ngem 'rails-observers'\n\ngem 'rails-perftest'\ngem 'rails-controller-testing'\n\ngem 'webrick'\n\ngem 'mini_portile2', '~> 2.8', '>= 2.8.1'\n\n# Enforce Ruby 3.0 compatibility for indirect dependencies.\n\n# nokogiri 1.18 dropped Ruby 3.0.\n# https://nokogiri.org/CHANGELOG.html#v1180-2024-12-25\ngem 'nokogiri', '~> 1.17.0'\n# net-imap 0.5.0 dropped Ruby 3.0.\n# https://github.com/ruby/net-imap/releases/tag/v0.5.0\ngem 'net-imap', '~> 0.4.0'\n# securerandom 0.4.0 dropped Ruby 2.7 and Ruby 3.0.\ngem 'securerandom', '~> 0.3.2'\n# multi_json 2.0 will drop Ruby 3.0.\n# https://github.com/sferik/multi_json/pull/16#issue-3237521157\ngem 'multi_json', '~> 1.0'\n# hash_validator 2.0 dropped Ruby 2.7.\ngem 'hash_validator', '~> 2.0'\n\n# Install any plugin gems\nDir.glob(File.join(File.dirname(__FILE__), 'lib', '**', \"Gemfile\")) do |f|\n    eval(IO.read(f), binding)\nend\n"
  },
  {
    "path": "services/api/Passengerfile.json",
    "content": "{\n    \"auto\": true,\n    \"envvars\": {\n        \"RUBYOPT\": \"--disable-did_you_mean --disable-error_highlight --disable-syntax_suggest\"\n    },\n    \"preload_bundler\": true\n}\n"
  },
  {
    "path": "services/api/README",
    "content": "== Welcome to Rails\n\nRails is a web-application framework that includes everything needed to create\ndatabase-backed web applications according to the Model-View-Control pattern.\n\nThis pattern splits the view (also called the presentation) into \"dumb\"\ntemplates that are primarily responsible for inserting pre-built data in between\nHTML tags. The model contains the \"smart\" domain objects (such as Account,\nProduct, Person, Post) that holds all the business logic and knows how to\npersist themselves to a database. The controller handles the incoming requests\n(such as Save New Account, Update Product, Show Post) by manipulating the model\nand directing data to the view.\n\nIn Rails, the model is handled by what's called an object-relational mapping\nlayer entitled Active Record. This layer allows you to present the data from\ndatabase rows as objects and embellish these data objects with business logic\nmethods. You can read more about Active Record in\nlink:files/vendor/rails/activerecord/README.html.\n\nThe controller and view are handled by the Action Pack, which handles both\nlayers by its two parts: Action View and Action Controller. These two layers\nare bundled in a single package due to their heavy interdependence. This is\nunlike the relationship between the Active Record and Action Pack that is much\nmore separate. Each of these packages can be used independently outside of\nRails. You can read more about Action Pack in\nlink:files/vendor/rails/actionpack/README.html.\n\n\n== Getting Started\n\n1. At the command prompt, create a new Rails application:\n       <tt>rails new myapp</tt> (where <tt>myapp</tt> is the application name)\n\n2. Change directory to <tt>myapp</tt> and start the web server:\n       <tt>cd myapp; rails server</tt> (run with --help for options)\n\n3. Go to http://localhost:3000/ and you'll see:\n       \"Welcome aboard: You're riding Ruby on Rails!\"\n\n4. Follow the guidelines to start developing your application. You can find\nthe following resources handy:\n\n* The Getting Started Guide: http://guides.rubyonrails.org/getting_started.html\n* Ruby on Rails Tutorial Book: http://www.railstutorial.org/\n\n\n== Debugging Rails\n\nSometimes your application goes wrong. Fortunately there are a lot of tools that\nwill help you debug it and get it back on the rails.\n\nFirst area to check is the application log files. Have \"tail -f\" commands\nrunning on the server.log and development.log. Rails will automatically display\ndebugging and runtime information to these files. Debugging info will also be\nshown in the browser on requests from 127.0.0.1.\n\nYou can also log your own messages directly into the log file from your code\nusing the Ruby logger class from inside your controllers. Example:\n\n  class WeblogController < ActionController::Base\n    def destroy\n      @weblog = Weblog.find(params[:id])\n      @weblog.destroy\n      logger.info(\"#{Time.now} Destroyed Weblog ID ##{@weblog.id}!\")\n    end\n  end\n\nThe result will be a message in your log file along the lines of:\n\n  Mon Oct 08 14:22:29 +1000 2007 Destroyed Weblog ID #1!\n\nMore information on how to use the logger is at http://www.ruby-doc.org/core/\n\nAlso, Ruby documentation can be found at http://www.ruby-lang.org/. There are\nseveral books available online as well:\n\n* Programming Ruby: http://www.ruby-doc.org/docs/ProgrammingRuby/ (Pickaxe)\n* Learn to Program: http://pine.fm/LearnToProgram/ (a beginners guide)\n\nThese two books will bring you up to speed on the Ruby language and also on\nprogramming in general.\n\n\n== Debugger\n\nDebugger support is available through the debugger command when you start your\nMongrel or WEBrick server with --debugger. This means that you can break out of\nexecution at any point in the code, investigate and change the model, and then,\nresume execution! You need to install ruby-debug to run the server in debugging\nmode. With gems, use <tt>sudo gem install ruby-debug</tt>. Example:\n\n  class WeblogController < ActionController::Base\n    def index\n      @posts = Post.all\n      debugger\n    end\n  end\n\nSo the controller will accept the action, run the first line, then present you\nwith a IRB prompt in the server window. Here you can do things like:\n\n  >> @posts.inspect\n  => \"[#<Post:0x14a6be8\n          @attributes={\"title\"=>nil, \"body\"=>nil, \"id\"=>\"1\"}>,\n       #<Post:0x14a6620\n          @attributes={\"title\"=>\"Rails\", \"body\"=>\"Only ten..\", \"id\"=>\"2\"}>]\"\n  >> @posts.first.title = \"hello from a debugger\"\n  => \"hello from a debugger\"\n\n...and even better, you can examine how your runtime objects actually work:\n\n  >> f = @posts.first\n  => #<Post:0x13630c4 @attributes={\"title\"=>nil, \"body\"=>nil, \"id\"=>\"1\"}>\n  >> f.\n  Display all 152 possibilities? (y or n)\n\nFinally, when you're ready to resume execution, you can enter \"cont\".\n\n\n== Console\n\nThe console is a Ruby shell, which allows you to interact with your\napplication's domain model. Here you'll have all parts of the application\nconfigured, just like it is when the application is running. You can inspect\ndomain models, change values, and save to the database. Starting the script\nwithout arguments will launch it in the development environment.\n\nTo start the console, run <tt>rails console</tt> from the application\ndirectory.\n\nOptions:\n\n* Passing the <tt>-s, --sandbox</tt> argument will rollback any modifications\n  made to the database.\n* Passing an environment name as an argument will load the corresponding\n  environment. Example: <tt>rails console production</tt>.\n\nTo reload your controllers and models after launching the console run\n<tt>reload!</tt>\n\nMore information about irb can be found at:\nlink:http://www.rubycentral.org/pickaxe/irb.html\n\n\n== dbconsole\n\nYou can go to the command line of your database directly through <tt>rails\ndbconsole</tt>. You would be connected to the database with the credentials\ndefined in database.yml. Starting the script without arguments will connect you\nto the development database. Passing an argument will connect you to a different\ndatabase, like <tt>rails dbconsole production</tt>. Currently works for MySQL,\nPostgreSQL and SQLite 3.\n\n== Description of Contents\n\nThe default directory structure of a generated Ruby on Rails application:\n\n  |-- app\n  |   |-- assets\n  |       |-- images\n  |       |-- javascripts\n  |       `-- stylesheets\n  |   |-- controllers\n  |   |-- helpers\n  |   |-- mailers\n  |   |-- models\n  |   `-- views\n  |       `-- layouts\n  |-- config\n  |   |-- environments\n  |   |-- initializers\n  |   `-- locales\n  |-- db\n  |-- doc\n  |-- lib\n  |   `-- tasks\n  |-- log\n  |-- public\n  |-- script\n  |-- test\n  |   |-- fixtures\n  |   |-- functional\n  |   |-- integration\n  |   |-- performance\n  |   `-- unit\n  |-- tmp\n  |   |-- cache\n  |   |-- pids\n  |   |-- sessions\n  |   `-- sockets\n  `-- vendor\n      |-- assets\n          `-- stylesheets\n      `-- plugins\n\napp\n  Holds all the code that's specific to this particular application.\n\napp/assets\n  Contains subdirectories for images, stylesheets, and JavaScript files.\n\napp/controllers\n  Holds controllers that should be named like weblogs_controller.rb for\n  automated URL mapping. All controllers should descend from\n  ApplicationController which itself descends from ActionController::Base.\n\napp/models\n  Holds models that should be named like post.rb. Models descend from\n  ActiveRecord::Base by default.\n\napp/views\n  Holds the template files for the view that should be named like\n  weblogs/index.html.erb for the WeblogsController#index action. All views use\n  eRuby syntax by default.\n\napp/views/layouts\n  Holds the template files for layouts to be used with views. This models the\n  common header/footer method of wrapping views. In your views, define a layout\n  using the <tt>layout :default</tt> and create a file named default.html.erb.\n  Inside default.html.erb, call <% yield %> to render the view using this\n  layout.\n\napp/helpers\n  Holds view helpers that should be named like weblogs_helper.rb. These are\n  generated for you automatically when using generators for controllers.\n  Helpers can be used to wrap functionality for your views into methods.\n\nconfig\n  Configuration files for the Rails environment, the routing map, the database,\n  and other dependencies.\n\ndb\n  Contains the database schema in schema.rb. db/migrate contains all the\n  sequence of Migrations for your schema.\n\ndoc\n  This directory is where your application documentation will be stored when\n  generated using <tt>rake doc:app</tt>\n\nlib\n  Application specific libraries. Basically, any kind of custom code that\n  doesn't belong under controllers, models, or helpers. This directory is in\n  the load path.\n\npublic\n  The directory available for the web server. Also contains the dispatchers and the\n  default HTML files. This should be set as the DOCUMENT_ROOT of your web\n  server.\n\nscript\n  Helper scripts for automation and generation.\n\ntest\n  Unit and functional tests along with fixtures. When using the rails generate\n  command, template test files will be generated for you and placed in this\n  directory.\n\nvendor\n  External libraries that the application depends on. Also includes the plugins\n  subdirectory. If the app has frozen rails, those gems also go here, under\n  vendor/rails/. This directory is in the load path.\n"
  },
  {
    "path": "services/api/Rakefile",
    "content": "#!/usr/bin/env rake\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Add your own tasks in files placed in lib/tasks ending in .rake,\n# for example lib/tasks/capistrano.rake, and they will automatically be available to Rake.\n\nrequire File.expand_path('../config/application', __FILE__)\nrequire 'rake/testtask'\n\nServer::Application.load_tasks\n\nnamespace :test do\n  task(:run).clear\n  # Copied from the definition in Rails 3.2.\n  # This may need to be updated if we upgrade Rails.\n  task :run do\n    errors = %w(test:units test:functionals test:integration test:tasks).collect do |task|\n      begin\n        Rake::Task[task].invoke\n        nil\n      rescue => e\n        { :task => task, :exception => e }\n      end\n    end.compact\n\n    if errors.any?\n      puts errors.map { |e| \"Errors running #{e[:task]}! #{e[:exception].inspect}\" }.join(\"\\n\")\n      abort\n    end\n  end\nend\n\nnamespace :db do\n  namespace :schema do\n    task :dump do\n      require 'tempfile'\n      origfnm = File.expand_path('../db/structure.sql', __FILE__)\n      tmpfnm = Tempfile.new 'structure.sql', File.expand_path('..', origfnm)\n      copyright_done = false\n      started = false\n      begin\n        tmpfile = File.new tmpfnm, 'w+'\n        origfile = File.new origfnm\n        origfile.each_line do |line|\n          if !copyright_done\n            if !/Copyright .* Arvados/.match(line)\n               tmpfile.write \"-- Copyright (C) The Arvados Authors. All rights reserved.\\n--\\n-- SPDX-License-Identifier: AGPL-3.0\\n\\n\"\n            end\n            copyright_done = true\n          end\n\n          if !started && /^[^-\\n]/ !~ line\n            # Ignore the \"PostgreSQL database dump\" comment block,\n            # which varies from one client version to the next.\n            next\n          end\n          started = true\n\n          if /^SET (lock_timeout|idle_in_transaction_session_timeout|row_security) = / =~ line\n            # Avoid edit wars between versions that do/don't write (and can/can't execute) this line.\n            next\n          elsif /^COMMENT ON EXTENSION/ =~ line\n            # Avoid warning message when loading:\n            # \"structure.sql:22: ERROR:  must be owner of extension plpgsql\"\n            tmpfile.write \"-- \"\n          end\n          tmpfile.write line\n        end\n        origfile.close\n\n        # Remove trailing blank lines by stripping all trailing \\n and\n        # then adding one back.\n        tmpfile.seek(-1, :END)\n        while tmpfile.read == \"\\n\"\n          tmpfile.truncate(tmpfile.tell - 1)\n          tmpfile.seek(-1, :END)\n        end\n        tmpfile.write \"\\n\"\n\n        tmpfile.close\n        File.rename tmpfnm, origfnm\n        tmpfnm = false\n      ensure\n        File.unlink tmpfnm if tmpfnm\n      end\n    end\n  end\nend\n\n# Work around Rails3+PostgreSQL9.5 incompatibility (pg_dump used to\n# accept -i as a no-op, but now it's not accepted at all).\nmodule Kernel\n  alias_method :orig_backtick, :`\n  def `(*args) #`#` sorry, parsers\n    args[0].sub!(/\\Apg_dump -i /, 'pg_dump ') rescue nil\n    orig_backtick(*args)\n  end\nend\n"
  },
  {
    "path": "services/api/app/assets/config/manifest.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n//= link_tree ../images\n//= link_directory ../javascripts .js\n//= link_directory ../stylesheets .css\n"
  },
  {
    "path": "services/api/app/assets/stylesheets/application.css",
    "content": "/* Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 */\n\n/*\n * This is a manifest file that'll automatically include all the stylesheets available in this directory\n * and any sub-directories. You're free to add application-wide styles to this file and they'll appear at\n * the top of the compiled file, but it's generally better to create a new file per style scope.\n *= require_self\n *= require_tree .\n*/\n\n.contain-align-left {\n    text-align: left;\n}\n\nbody {\n    margin: 0;\n}\nbody > div {\n    margin: 2px;\n}\ndiv#footer {\n    font-family: Verdana,Arial,sans-serif;\n    font-size: 12px;\n    margin-top: 24px;\n    border-top: 1px solid #ccc;\n}\ndiv#footer, div#footer a {\n    color: #777;\n}\ndiv#header {\n    margin: 0;\n    padding: .5em 1em;\n    background: #000;\n    font-weight: bold;\n    font-size: 18px;\n    font-family: Verdana,Arial,sans-serif;\n    vertical-align: middle;\n    color: #ddd;\n}\ndiv#header > div {\n    display: inline-block;\n    font-size: 12px;\n    line-height: 18px;\n}\ndiv#header > .apptitle {\n    font-size: 18px;\n}\ndiv#header a.logout {\n    color: #fff;\n    font-weight: normal;\n}\ndiv#header button {\n    font-size: 12px;\n}\ndiv#header span.beta {\n    opacity: 0.5;\n}\ndiv#header span.beta > span {\n    border-top: 1px solid #fff;\n    border-bottom: 1px solid #fff;\n    font-size: 0.8em;\n}\nimg.arvados-logo {\n    height: 66px;\n}\n#intropage {\n    font-family: Verdana,Arial,sans-serif;\n}\n#errorpage {\n    font-family: Verdana,Arial,sans-serif;\n}\n\ndiv.full-page-tab-set > ul > li {\n    font-size: 14px;\n}\n.titlebanner p {\n    font-size: 16px;\n}\np {\n    font-size: 12px;\n}\n.small-text {\n    font-size: 12px;\n}\n.autoui-icon-float-left {\n    float: left;\n    margin-right: .3em;\n}\n.autoui-pad {\n    padding: 0 1em;\n}\ntable.datatablesme {\n    border: 0;\n    border-collapse: collapse;\n    width: 100%;\n}\n.loadinggif {\n    background: #fff url(/images/ajax-loader-16-fff-aad.gif) no-repeat;\n}\n.clientprogressgif {\n    /* warning: depends on 24px outer container. */\n    position: absolute;\n    left: 4px;\n    top: 4px;\n    width: 16px;\n    height: 16px;\n}\n.counttable {\n    width: 100%;\n    display: table;\n    border-collapse: collapse;\n    margin-bottom: 0.5em;\n}\n.counttable > div {\n    display: table-row;\n}\n.counttable > div > div {\n    display: table-cell;\n    text-align: center;\n    background: #ccf;\n    padding: 0 2px;\n    font-size: 0.8em;\n}\n.counttable > div > div.counter {\n    font-size: 2em;\n    padding: 4px 2px 0 2px;\n}\ntable.admin_table {\n    border-collapse: collapse;\n}\ntable.admin_table tbody tr {\n    height: 2.5em;\n}\ntable.admin_table th,table.admin_table td {\n    text-align: left;\n    border: 1px solid #bbb;\n    padding: 3px;\n}\ntable.admin_table tbody tr:hover {\n    background: #ff8;\n}\ntable.admin_table tbody tr:hover td {\n    background: transparent;\n}\n\ndiv.helptopics {\n    position: fixed;\n}\ndiv.helptopics ul {\n    padding: 0;\n    margin-left: 1em;\n    list-style-type: none;\n}\ndiv.helptopics ul li {\n    margin: 0 0 1em 0;\n}\ndiv.helpcontent li {\n    margin-bottom: .5em;\n}\n\ndiv.preview {\n    color: red;\n    font-weight: bold;\n    text-align: center;\n}\n\n.sudo-warning {\n    padding: 4px 10px;\n    background: #ffdd00;\n    color: red;\n    -webkit-border-radius: 3px;\n    -moz-border-radius: 3px;\n    border-radius: 3px\n}\n\ndiv#header a.sudo-logout {\n    color: #000;\n    font-weight: bold;\n}\n"
  },
  {
    "path": "services/api/app/assets/stylesheets/scaffolds.css.scss",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nbody {\n  background-color: #fff;\n  color: #333;\n  font-family: verdana, arial, helvetica, sans-serif;\n  font-size: 13px;\n  line-height: 18px;\n}\n\np, ol, ul, td {\n  font-family: verdana, arial, helvetica, sans-serif;\n  font-size: 13px;\n  line-height: 18px;\n}\n\npre {\n  background-color: #eee;\n  padding: 10px;\n  font-size: 11px;\n}\n\na {\n  color: #000;\n  &:visited {\n    color: #666;\n  }\n  &:hover {\n    color: #fff;\n    background-color: #000;\n  }\n}\n\ndiv {\n  &.field, &.actions {\n    margin-bottom: 10px;\n  }\n}\n\n#notice {\n  color: green;\n}\n\n.field_with_errors {\n  padding: 2px;\n  background-color: red;\n  display: table;\n}\n\n#error_explanation {\n  width: 450px;\n  border: 2px solid red;\n  padding: 7px;\n  padding-bottom: 0;\n  margin-bottom: 20px;\n  background-color: #f0f0f0;\n  h2 {\n    text-align: left;\n    font-weight: bold;\n    padding: 5px 5px 5px 15px;\n    font-size: 12px;\n    margin: -7px;\n    margin-bottom: 0px;\n    background-color: #c00;\n    color: #fff;\n  }\n  ul li {\n    font-size: 12px;\n    list-style: square;\n  }\n}\n"
  },
  {
    "path": "services/api/app/controllers/application_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'safe_json'\nrequire 'request_error'\n\nmodule ApiTemplateOverride\n  def allowed_to_render?(fieldset, field, model, options)\n    return false if !super\n    if options[:select]\n      options[:select].include? field.to_s\n    else\n      true\n    end\n  end\nend\n\nclass ActsAsApi::ApiTemplate\n  prepend ApiTemplateOverride\nend\n\nrequire 'load_param'\n\nclass ApplicationController < ActionController::Base\n  include CurrentApiClient\n  include LoadParam\n  include DbCurrentTime\n\n  respond_to :json\n\n  # Although CSRF protection is already enabled by default, this is\n  # still needed to reposition CSRF checks later in callback order.\n  protect_from_forgery\n\n  ERROR_ACTIONS = [:render_error, :render_not_found]\n\n  around_action :set_current_request_id\n  before_action :disable_api_methods\n  before_action :set_cors_headers\n  before_action :respond_with_json_by_default\n  before_action :remote_ip\n  before_action :load_read_auths\n  before_action :require_auth_scope, except: ERROR_ACTIONS\n\n  before_action :catch_redirect_hint\n  before_action :load_required_parameters\n  before_action :load_limit_offset_order_params, only: [:index, :contents]\n  before_action :load_select_param\n  before_action(:find_object_by_uuid,\n                except: [:index, :create, :update] + ERROR_ACTIONS)\n  before_action :find_object_for_update, only: [:update]\n  before_action :load_where_param, only: [:index, :contents]\n  before_action :load_filters_param, only: [:index, :contents]\n  before_action :find_objects_for_index, :only => :index\n  before_action(:set_nullable_attrs_to_null, only: [:update, :create])\n  before_action :reload_object_before_update, :only => :update\n  before_action(:render_404_if_no_object,\n                except: [:index, :create] + ERROR_ACTIONS)\n  before_action :only_admin_can_bypass_federation\n\n  attr_writer :resource_attrs\n\n  begin\n    rescue_from(Exception,\n                ArvadosModel::PermissionDeniedError,\n                :with => :render_error)\n    rescue_from(ActiveRecord::RecordNotFound,\n                ActionController::RoutingError,\n                AbstractController::ActionNotFound,\n                :with => :render_not_found)\n  end\n\n  def initialize *args\n    super\n    @object = nil\n    @objects = nil\n    @offset = nil\n    @limit = nil\n    @select = nil\n    @distinct = nil\n    @response_resource_name = nil\n    @attrs = nil\n    @extra_included = nil\n  end\n\n  def default_url_options\n    options = {}\n    if Rails.configuration.Services.Controller.ExternalURL != URI(\"\")\n      exturl = Rails.configuration.Services.Controller.ExternalURL\n      options[:host] = exturl.host\n      options[:port] = exturl.port\n      options[:protocol] = exturl.scheme\n    end\n    options\n  end\n\n  def index\n    if params[:eager] and params[:eager] != '0' and params[:eager] != 0 and params[:eager] != ''\n      @objects.each(&:eager_load_associations)\n    end\n    render_list\n  end\n\n  def show\n    send_json @object.as_api_response(nil, select: select_for_klass(@select, model_class))\n  end\n\n  def create\n    @object = model_class.new resource_attrs\n\n    if @object.respond_to?(:name) && params[:ensure_unique_name]\n      @object.save_with_unique_name!\n    else\n      @object.save!\n    end\n\n    show\n  end\n\n  def update\n    attrs_to_update = resource_attrs.reject { |k,v|\n      [:kind, :etag, :href].index k\n    }\n    @object.update! attrs_to_update\n    show\n  end\n\n  def destroy\n    @object.destroy\n    show\n  end\n\n  def catch_redirect_hint\n    if !current_user\n      if params.has_key?('redirect_to') then\n        session[:redirect_to] = params[:redirect_to]\n      end\n    end\n  end\n\n  def render_404_if_no_object\n    render_not_found \"Object not found\" if !@object\n  end\n\n  def only_admin_can_bypass_federation\n    unless !params[:bypass_federation] || current_user.andand.is_admin\n      send_error(\"The bypass_federation parameter is only permitted when current user is admin\", status: 403)\n    end\n  end\n\n  def render_error(e)\n    logger.error e.inspect\n    if e.respond_to? :backtrace and e.backtrace\n      # This will be cleared by lograge after adding it to the log.\n      # Usually lograge would get the exceptions, but in our case we're catching\n      # all of them with exception handlers that cannot re-raise them because they\n      # don't get propagated.\n      Thread.current[:exception] = e.inspect\n      Thread.current[:backtrace] = e.backtrace.collect { |x| x + \"\\n\" }.join('')\n    end\n    if (@object.respond_to? :errors and\n        @object.errors.andand.full_messages.andand.any?)\n      errors = @object.errors.full_messages\n      logger.error errors.inspect\n    else\n      errors = [e.inspect]\n    end\n\n    case e\n    when ActiveRecord::Deadlocked,\n         ActiveRecord::ConnectionNotEstablished,\n         ActiveRecord::LockWaitTimeout,\n         ActiveRecord::QueryAborted\n      status = 500\n    else\n      status = e.respond_to?(:http_status) ? e.http_status : 422\n    end\n\n    send_error(*errors, status: status)\n  end\n\n  def render_not_found(e=ActionController::RoutingError.new(\"Path not found\"))\n    logger.error e.inspect\n    send_error(\"Path not found\", status: 404)\n  end\n\n  def render_accepted\n    send_json ({accepted: true}), status: 202\n  end\n\n  protected\n\n  def bool_param(pname)\n    if params.include?(pname)\n      if params[pname].is_a?(Boolean)\n        return params[pname]\n      else\n        logger.warn \"Warning: received non-boolean value #{params[pname].inspect} for boolean parameter #{pname} on #{self.class.inspect}, treating as false.\"\n      end\n    end\n    false\n  end\n\n  def send_error(*args)\n    if args.last.is_a? Hash\n      err = args.pop\n    else\n      err = {}\n    end\n    err[:errors] ||= args\n    err[:errors].map! do |err|\n      err += \" (#{request.request_id})\"\n    end\n    err[:error_token] = [Time.now.utc.to_i, \"%08x\" % rand(16 ** 8)].join(\"+\")\n    status = err.delete(:status) || 422\n    logger.error \"Error #{err[:error_token]}: #{status}\"\n    send_json err, status: status\n  end\n\n  def send_json response, opts={}\n    # The obvious render(json: ...) forces a slow JSON encoder. See\n    # #3021 and commit logs. Might be fixed in Rails 4.1.\n    render({\n             plain: SafeJSON.dump(response).html_safe,\n             content_type: 'application/json'\n           }.merge opts)\n  end\n\n  def find_objects_for_index\n    @objects ||= model_class.readable_by(*@read_users, {\n      :include_trash => ((self.class._index_requires_parameters[:include_trash] && bool_param(:include_trash)) || 'untrash' == action_name),\n      :include_old_versions => self.class._index_requires_parameters[:include_old_versions] && bool_param(:include_old_versions)\n    })\n    apply_where_limit_order_params\n  end\n\n  def apply_filters model_class=nil\n    model_class ||= self.model_class\n    @objects = model_class.apply_filters(@objects, @filters)\n  end\n\n  def select_for_klass sel, model_class, raise_unknown=true\n    return nil if sel.nil?\n    # Filter the select fields to only the ones that apply to the\n    # given class.\n    sel.map do |column|\n      sp = column.split(\".\")\n      if sp.length == 2 && sp[0] == model_class.table_name && model_class.selectable_attributes.include?(sp[1])\n        sp[1]\n      elsif model_class.selectable_attributes.include? column\n        column\n      elsif raise_unknown\n        raise ArgumentError.new(\"Invalid attribute '#{column}' of #{model_class.name} in select parameter\")\n      else\n        nil\n      end\n    end.compact\n  end\n\n  def apply_where_limit_order_params model_class=nil\n    model_class ||= self.model_class\n    apply_filters model_class\n\n    ar_table_name = @objects.table_name\n    if @where.is_a? Hash and @where.any?\n      conditions = ['1=1']\n      @where.each do |attr,value|\n        if attr.to_s == 'any'\n          if value.is_a?(Array) and\n              value.length == 2 and\n              value[0] == 'contains' then\n            ilikes = []\n            model_class.any_searchable_columns('ilike').each do |column|\n              # Including owner_uuid in an \"any column\" search will\n              # probably just return a lot of false positives.\n              next if column == 'owner_uuid'\n              ilikes << \"#{ar_table_name}.#{column} ilike ?\"\n              conditions << \"%#{value[1]}%\"\n            end\n            if ilikes.any?\n              conditions[0] << ' and (' + ilikes.join(' or ') + ')'\n            end\n          else\n            equals = []\n            model_class.any_searchable_columns('=').each do |column|\n              equals << \"#{ar_table_name}.#{column} = ?\"\n              conditions << value\n            end\n            conditions[0] << ' and (' + equals.join(' or ') + ')'\n          end\n        elsif attr.to_s.match(/^[a-z][_a-z0-9]+$/) and\n            model_class.columns.collect(&:name).index(attr.to_s)\n          if value.nil?\n            conditions[0] << \" and #{ar_table_name}.#{attr} is ?\"\n            conditions << nil\n          elsif value.is_a? Array\n            if value[0] == 'contains' and value.length == 2\n              conditions[0] << \" and #{ar_table_name}.#{attr} like ?\"\n              conditions << \"%#{value[1]}%\"\n            else\n              conditions[0] << \" and #{ar_table_name}.#{attr} in (?)\"\n              conditions << value\n            end\n          elsif value.is_a? String or value.is_a? Integer or value == true or value == false\n            conditions[0] << \" and #{ar_table_name}.#{attr}=?\"\n            conditions << value\n          elsif value.is_a? Hash\n            # Not quite the same thing as \"equal?\" but better than nothing?\n            value.each do |k,v|\n              if v.is_a? String\n                conditions[0] << \" and #{ar_table_name}.#{attr} ilike ?\"\n                conditions << \"%#{k}%#{v}%\"\n              end\n            end\n          end\n        end\n      end\n      if conditions.length > 1\n        conditions[0].sub!(/^1=1 and /, '')\n        @objects = @objects.\n          where(*conditions)\n      end\n    end\n\n    if @select\n      unless action_name.in? %w(create update destroy)\n        # Map attribute names in @select to real column names, resolve\n        # those to fully-qualified SQL column names, and pass the\n        # resulting string to the select method.\n        columns_list = model_class.columns_for_attributes(select_for_klass @select, model_class).\n          map { |s| \"#{ar_table_name}.#{ActiveRecord::Base.connection.quote_column_name s}\" }\n        @objects = @objects.select(columns_list.join(\", \"))\n      end\n\n      # This information helps clients understand what they're seeing\n      # (Workbench always expects it), but they can't select it explicitly\n      # because it's not an SQL column.  Always add it.\n      # (This is harmless, given that clients can deduce what they're\n      # looking at by the returned UUID anyway.)\n      @select |= [\"kind\"]\n    end\n    @objects = @objects.order(@orders.join \", \") if @orders.any?\n    @objects = @objects.limit(@limit)\n    @objects = @objects.offset(@offset)\n    @objects = @objects.distinct() if @distinct\n  end\n\n  # limit_database_read ensures @objects (which must be an\n  # ActiveRelation) does not return too many results to fit in memory,\n  # by previewing the results and calling @objects.limit() if\n  # necessary.\n  def limit_database_read(model_class:)\n    return if @limit == 0 || @limit == 1\n    model_class ||= self.model_class\n    limit_columns = model_class.limit_index_columns_read\n    limit_columns &= model_class.columns_for_attributes(select_for_klass @select, model_class) if @select\n    return if limit_columns.empty?\n    model_class.transaction do\n      # This query does not use `pg_column_size()` because the returned value\n      # can be smaller than the apparent length thanks to compression.\n      # `octet_length(::text)` better reflects how expensive it will be for\n      # Rails to process the data.\n      limit_query = @objects.\n        except(:select, :distinct).\n        select(\"(%s) as read_length\" %\n               limit_columns.map { |s| \"coalesce(octet_length(#{model_class.table_name}.#{s}::text),0)\" }.join(\" + \"))\n      new_limit = 0\n      read_total = 0\n      limit_query.each do |record|\n        new_limit += 1\n        read_total += record.read_length.to_i\n        if read_total >= Rails.configuration.API.MaxIndexDatabaseRead\n          new_limit -= 1 if new_limit > 1\n          @limit = new_limit\n          break\n        elsif new_limit >= @limit\n          break\n        end\n      end\n      @objects = @objects.limit(@limit)\n      # Force @objects to run its query inside this transaction.\n      @objects.each { |_| break }\n    end\n  end\n\n  def resource_attrs\n    return @attrs if @attrs\n    @attrs = params[resource_name]\n    if @attrs.nil?\n      @attrs = {}\n    elsif @attrs.is_a? String\n      @attrs = Oj.strict_load @attrs, symbol_keys: true\n    end\n    unless [Hash, ActionController::Parameters].include? @attrs.class\n      message = \"No #{resource_name}\"\n      if resource_name.index('_')\n        message << \" (or #{resource_name.camelcase(:lower)})\"\n      end\n      message << \" hash provided with request\"\n      raise ArgumentError.new(message)\n    end\n    %w(created_at modified_by_client_uuid modified_by_user_uuid modified_at).each do |x|\n      @attrs.delete x.to_sym\n    end\n    @attrs = @attrs.symbolize_keys if @attrs.is_a? ActiveSupport::HashWithIndifferentAccess\n    @attrs\n  end\n\n  # Authentication\n  def load_read_auths\n    @read_auths = []\n    if current_api_client_authorization\n      @read_auths << current_api_client_authorization\n    end\n    # Load reader tokens if this is a read request.\n    # If there are too many reader tokens, assume the request is malicious\n    # and ignore it.\n    if request.get? and params[:reader_tokens] and\n      params[:reader_tokens].size < 100\n      secrets = params[:reader_tokens].map { |t|\n        if t.is_a? String and t.starts_with? \"v2/\"\n          t.split(\"/\")[2]\n        else\n          t\n        end\n      }\n      @read_auths += ApiClientAuthorization\n        .includes(:user)\n        .where('api_token IN (?) AND\n                (least(expires_at, refreshes_at) IS NULL\n                 OR least(expires_at, refreshes_at) > CURRENT_TIMESTAMP)',\n               secrets)\n        .to_a\n    end\n    @read_auths.select! { |auth| auth.scopes_allow_request? request }\n    @read_users = @read_auths.map(&:user).uniq\n  end\n\n  def require_login\n    if not current_user\n      respond_to do |format|\n        format.json { send_error(\"Not logged in\", status: 401) }\n        format.html { redirect_to '/login' }\n      end\n      false\n    end\n  end\n\n  def admin_required\n    unless current_user and current_user.is_admin\n      send_error(\"Forbidden\", status: 403)\n    end\n  end\n\n  def require_auth_scope\n    unless current_user && @read_auths.any? { |auth| auth.user.andand.uuid == current_user.uuid }\n      if require_login != false\n        send_error(\"Forbidden\", status: 403)\n      end\n      false\n    end\n  end\n\n  def set_current_request_id\n    Rails.logger.tagged(request.request_id) do\n      yield\n    end\n  end\n\n  def append_info_to_payload(payload)\n    super\n    payload[:request_id] = request.request_id\n    payload[:client_ipaddr] = @remote_ip\n    payload[:client_auth] = current_api_client_authorization.andand.uuid || nil\n  end\n\n  def disable_api_methods\n    if Rails.configuration.API.DisabledAPIs[controller_name + \".\" + action_name]\n      send_error(\"Disabled\", status: 404)\n    end\n  end\n\n  def set_cors_headers\n    response.headers['Access-Control-Allow-Origin'] = '*'\n    response.headers['Access-Control-Allow-Methods'] = 'GET, HEAD, PUT, POST, DELETE'\n    response.headers['Access-Control-Allow-Headers'] = 'Authorization, Content-Type'\n    response.headers['Access-Control-Max-Age'] = '86486400'\n  end\n\n  def respond_with_json_by_default\n    html_index = request.accepts.index(Mime[:html])\n    if html_index.nil? or request.accepts[0...html_index].include?(Mime[:json])\n      request.format = :json\n    end\n  end\n\n  def model_class\n    controller_name.classify.constantize\n  end\n\n  def resource_name             # params[] key used by client\n    controller_name.singularize\n  end\n\n  def table_name\n    controller_name\n  end\n\n  def find_object_for_update\n    find_object_by_uuid(with_lock: true)\n  end\n\n  def find_object_by_uuid(with_lock: false)\n    if params[:id] and params[:id].match(/\\D/)\n      params[:uuid] = params.delete :id\n    end\n    @where = {}\n    # Some APIs (at least groups/contents) take an optional uuid argument.\n    # They go through this method to handle it when present but we cannot\n    # assume it is always set.\n    @where[:uuid] = params[:uuid] if params[:uuid]\n    @offset = 0\n    @limit = 1\n    @orders = []\n    @filters = []\n    @objects = nil\n\n    # This is a little hacky but sometimes the fields the user wants\n    # to selecting on are unrelated to the object being loaded here,\n    # for example groups#contents, so filter the fields that will be\n    # used in find_objects_for_index and then reset afterwards.  In\n    # some cases, code that modifies the @select list needs to set\n    # @preserve_select.\n    @preserve_select = @select\n    @select = select_for_klass(@select, self.model_class, false)\n\n    find_objects_for_index\n    if with_lock && Rails.configuration.API.LockBeforeUpdate\n      @object = @objects.lock.first\n    else\n      @object = @objects.first\n    end\n    @select = @preserve_select\n  end\n\n  def nullable_attributes\n    []\n  end\n\n  # Go code may send empty values (ie: empty string instead of NULL) that\n  # should be translated to NULL on the database.\n  def set_nullable_attrs_to_null\n    nullify_attrs(resource_attrs.to_hash).each do |k, v|\n      resource_attrs[k] = v\n    end\n  end\n\n  def nullify_attrs(a = {})\n    new_attrs = a.to_hash.symbolize_keys\n    (new_attrs.keys & nullable_attributes).each do |attr|\n      val = new_attrs[attr]\n      if (val.class == Integer && val == 0) || (val.class == String && val == \"\")\n        new_attrs[attr] = nil\n      end\n    end\n    return new_attrs\n  end\n\n  def reload_object_before_update\n    # This is necessary to prevent an ActiveRecord::ReadOnlyRecord\n    # error when updating an object which was retrieved using a join.\n    if @object.andand.readonly?\n      @object = model_class.find_by_uuid(@objects.first.uuid)\n    end\n  end\n\n  def load_json_value(hash, key, must_be_class=nil)\n    return if hash[key].nil?\n\n    val = hash[key]\n    if val.is_a? ActionController::Parameters\n      val = val.to_unsafe_hash\n    elsif val.is_a? String\n      val = SafeJSON.load(val)\n      hash[key] = val\n    end\n    # When assigning a Hash to an ActionController::Parameters and then\n    # retrieve it, we get another ActionController::Parameters instead of\n    # a Hash. This doesn't happen with other types. This is why 'val' is\n    # being used to do type checking below.\n    if must_be_class and !val.is_a? must_be_class\n      raise TypeError.new(\"parameter #{key.to_s} must be a #{must_be_class.to_s}\")\n    end\n  end\n\n  def self.accept_attribute_as_json(attr, must_be_class=nil)\n    before_action lambda { accept_attribute_as_json attr, must_be_class }\n  end\n  accept_attribute_as_json :properties, Hash\n  accept_attribute_as_json :info, Hash\n  def accept_attribute_as_json(attr, must_be_class)\n    if params[resource_name] and [Hash, ActionController::Parameters].include?(resource_attrs.class)\n      if resource_attrs[attr].is_a? Hash\n        # Convert symbol keys to strings (in hashes provided by\n        # resource_attrs)\n        resource_attrs[attr] = resource_attrs[attr].\n          with_indifferent_access.to_hash\n      else\n        load_json_value(resource_attrs, attr, must_be_class)\n      end\n    end\n  end\n\n  def self.accept_param_as_json(key, must_be_class=nil)\n    prepend_before_action lambda { load_json_value(params, key, must_be_class) }\n  end\n  accept_param_as_json :reader_tokens, Array\n\n  def object_list(model_class:)\n    if @objects.respond_to?(:except)\n      limit_database_read(model_class: model_class)\n    end\n    list = {\n      :kind  => \"arvados##{(@response_resource_name || resource_name).camelize(:lower)}List\",\n      :etag => \"\",\n      :self_link => \"\",\n      :offset => @offset,\n      :limit => @limit,\n      :items => @objects.as_api_response(nil, {select: @select})\n    }\n    if @extra_included\n      list[:included] = @extra_included.as_api_response(nil, {select: @select})\n    end\n    case params[:count]\n    when nil, '', 'exact'\n      if @objects.respond_to? :except\n        list[:items_available] = @objects.\n          except(:limit).except(:offset).\n          count(@distinct ? :id : '*')\n      end\n    when 'none'\n    else\n      raise ArgumentError.new(\"count parameter must be 'exact' or 'none'\")\n    end\n    list\n  end\n\n  def render_list\n    send_json object_list(model_class: self.model_class)\n  end\n\n  def remote_ip\n    # Caveat: this is highly dependent on the proxy setup. YMMV.\n    if request.headers.key?('HTTP_X_REAL_IP') then\n      # We're behind a reverse proxy\n      @remote_ip = request.headers['HTTP_X_REAL_IP']\n    else\n      # Hopefully, we are not!\n      @remote_ip = request.env['REMOTE_ADDR']\n    end\n  end\n\n  def load_required_parameters\n    (self.class.send \"_#{params[:action]}_requires_parameters\" rescue {}).\n      each do |key, info|\n      if info[:required] and not params.include?(key)\n        raise ArgumentError.new(\"#{key} parameter is required\")\n      elsif info[:type] == 'boolean'\n        # Make sure params[key] is either true or false -- not a\n        # string, not nil, etc.\n        if not params.include?(key)\n          params[key] = info[:default] || false\n        elsif [false, 'false', '0', 0].include? params[key]\n          params[key] = false\n        elsif [true, 'true', '1', 1].include? params[key]\n          params[key] = true\n        else\n          raise TypeError.new(\"#{key} parameter must be a boolean, true or false\")\n        end\n      end\n    end\n    true\n  end\n\n  def self._create_requires_parameters\n    {\n      select: {\n        type: 'array',\n        description: \"An array of names of attributes to return in the response.\",\n        required: false,\n      },\n      ensure_unique_name: {\n        type: \"boolean\",\n        description: \"If the given name is already used by this owner, adjust the name to ensure uniqueness instead of returning an error.\",\n        location: \"query\",\n        required: false,\n        default: false\n      },\n      cluster_id: {\n        type: 'string',\n        description: \"Cluster ID of a federated cluster where this object should be created.\",\n        location: \"query\",\n        required: false,\n      },\n    }\n  end\n\n  def self._update_requires_parameters\n    {\n      select: {\n        type: 'array',\n        description: \"An array of names of attributes to return in the response.\",\n        required: false,\n      },\n    }\n  end\n\n  def self._show_requires_parameters\n    {\n      select: {\n        type: 'array',\n        description: \"An array of names of attributes to return in the response.\",\n        required: false,\n      },\n    }\n  end\n\n  def self._index_requires_parameters\n    {\n      filters: {\n        type: 'array',\n        required: false,\n        description: \"Filters to limit which objects are returned by their attributes.\nRefer to the [filters reference][] for more information about how to write filters.\n\n[filters reference]: https://doc.arvados.org/api/methods.html#filters\n\",\n      },\n      where: {\n        type: 'object',\n        required: false,\n        description: \"An object to limit which objects are returned by their attributes.\nThe keys of this object are attribute names.\nEach value is either a single matching value or an array of matching values for that attribute.\nThe `filters` parameter is more flexible and preferred.\n\",\n      },\n      order: {\n        type: 'array',\n        required: false,\n        description: \"An array of strings to set the order in which matching objects are returned.\nEach string has the format `<ATTRIBUTE> <DIRECTION>`.\n`DIRECTION` can be `asc` or omitted for ascending, or `desc` for descending.\n\",\n      },\n      select: {\n        type: 'array',\n        description: \"An array of names of attributes to return from each matching object.\",\n        required: false,\n      },\n      distinct: {\n        type: 'boolean',\n        required: false,\n        default: false,\n        description: \"If this is true, and multiple objects have the same values\nfor the attributes that you specify in the `select` parameter, then each unique\nset of values will only be returned once in the result set.\n\",\n      },\n      limit: {\n        type: 'integer',\n        required: false,\n        default: DEFAULT_LIMIT,\n        description: \"The maximum number of objects to return in the result.\nNote that the API may return fewer results than this if your request hits other\nlimits set by the administrator.\n\",\n      },\n      offset: {\n        type: 'integer',\n        required: false,\n        default: 0,\n        description: \"Return matching objects starting from this index.\nNote that result indexes may change if objects are modified in between a series\nof list calls.\n\",\n      },\n      count: {\n        type: 'string',\n        required: false,\n        default: 'exact',\n        description: \"A string to determine result counting behavior. Supported values are:\n\n  * `\\\"exact\\\"`: The response will include an `items_available` field that\n    counts the number of objects that matched this search criteria,\n    including ones not included in `items`.\n\n  * `\\\"none\\\"`: The response will not include an `items_avaliable`\n    field. This improves performance by returning a result as soon as enough\n    `items` have been loaded for this result.\n\n\",\n      },\n      cluster_id: {\n        type: 'string',\n        description: \"Cluster ID of a federated cluster to return objects from\",\n        location: \"query\",\n        required: false,\n      },\n      bypass_federation: {\n        type: 'boolean',\n        required: false,\n        default: false,\n        description: \"If true, do not return results from other clusters in the\nfederation, only the cluster that received the request.\nYou must be an administrator to use this flag.\n\",\n      }\n    }\n  end\n\n  def render *opts\n    if opts.first\n      response = opts.first[:json]\n      if response.is_a?(Hash) &&\n          params[:_profile] &&\n          Thread.current[:request_starttime]\n        response[:_profile] = {\n          request_time: Time.now - Thread.current[:request_starttime]\n        }\n      end\n    end\n    super(*opts)\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/api_client_authorizations_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'safe_json'\n\nclass Arvados::V1::ApiClientAuthorizationsController < ApplicationController\n  accept_attribute_as_json :scopes, Array\n  before_action :check_issue_trusted_tokens, :except => [:current]\n  before_action :admin_required, :only => :create_system_auth\n  skip_before_action :render_404_if_no_object, :only => [:create_system_auth, :current]\n  skip_before_action :find_object_by_uuid, :only => [:create_system_auth, :current]\n\n  def self._create_system_auth_method_description\n    \"Create a token for the system (\\\"root\\\") user.\"\n  end\n\n  def self._create_system_auth_requires_parameters\n    {\n      scopes: {\n        type: 'array',\n        required: false,\n        default: [\"all\"],\n        description: \"An array of strings defining the scope of resources this token will be allowed to access. Refer to the [scopes reference][] for details.\n\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\n\",\n      }\n    }\n  end\n\n  def self._current_method_description\n    \"Return all metadata for the token used to authorize this request.\"\n  end\n\n  def create_system_auth\n    @object = ApiClientAuthorization.\n      new(user_id: system_user.id,\n          created_by_ip_address: remote_ip,\n          scopes: SafeJSON.load(params[:scopes] || '[\"all\"]'))\n    @object.save!\n    show\n  end\n\n  def create\n    # Note: the user could specify a owner_uuid for a different user, which on\n    # the surface appears to be a security hole.  However, the record will be\n    # rejected before being saved to the database by the ApiClientAuthorization\n    # model which enforces that user_id == current user or the user is an\n    # admin.\n\n    if resource_attrs[:owner_uuid]\n      # The model has an owner_id attribute instead of owner_uuid, but\n      # we can't expect the client to know the local numeric ID. We\n      # translate UUID to numeric ID here.\n      resource_attrs[:user_id] =\n        User.where(uuid: resource_attrs.delete(:owner_uuid)).first.andand.id\n    else\n      resource_attrs[:user_id] = current_user.id\n    end\n    super\n  end\n\n  def current\n    @object = Thread.current[:api_client_authorization].dup\n    if params[:remote]\n      # Client is validating a salted token. Don't return the unsalted\n      # secret!\n      @object.api_token = nil\n    end\n    show\n  end\n\n  protected\n\n  def default_orders\n    [\"#{table_name}.created_at desc\"]\n  end\n\n  def find_objects_for_index\n    # Here we are deliberately less helpful about searching for client\n    # authorizations.  We look up tokens belonging to the current user\n    # and filter by exact matches on uuid, api_token, and scopes.\n    wanted_scopes = []\n    if @filters\n      wanted_scopes.concat(@filters.map { |attr, operator, operand|\n        ((attr == 'scopes') and (operator == '=')) ? operand : nil\n      })\n      @filters.select! { |attr, operator, operand|\n        operator == '=' && (attr == 'uuid' || attr == 'api_token')\n      }\n    end\n    if @where\n      wanted_scopes << @where['scopes']\n      @where.select! { |attr, val|\n        # \"where\":{\"uuid\":\"zzzzz-zzzzz-zzzzzzzzzzzzzzz\"} is OK but\n        # \"where\":{\"uuid\":[\"contains\",\"-\"]} is not supported\n        # \"where\":{\"uuid\":[\"uuid1\",\"uuid2\",\"uuid3\"]} is not supported\n        val.is_a?(String) && (attr == 'uuid' || attr == 'api_token')\n      }\n    end\n    if current_api_client_authorization.andand.api_token != Rails.configuration.SystemRootToken\n      @objects = model_class.where('user_id=?', current_user.id)\n    end\n    if wanted_scopes.compact.any?\n      # We can't filter on scopes effectively using AR/postgres.\n      # Instead we get the entire result set, do our own filtering on\n      # scopes to get a list of UUIDs, then start a new query\n      # (restricted to the selected UUIDs) so super can apply the\n      # offset/limit/order params in the usual way.\n      @request_limit = @limit\n      @request_offset = @offset\n      @limit = @objects.count\n      @offset = 0\n      super\n      wanted_scopes.compact.each do |scope_list|\n        if @objects.respond_to?(:where) && scope_list.length < 2\n          @objects = @objects.\n                     where('scopes in (?)',\n                           [scope_list.to_yaml, SafeJSON.dump(scope_list)])\n        else\n          if @objects.respond_to?(:where)\n            # Eliminate rows with scopes=['all'] before doing the\n            # expensive filter. They are typically the majority of\n            # rows, and they obviously won't match given\n            # scope_list.length>=2, so loading them all into\n            # ActiveRecord objects is a huge waste of time.\n            @objects = @objects.\n                       where('scopes not in (?)',\n                             [['all'].to_yaml, SafeJSON.dump(['all'])])\n          end\n          sorted_scopes = scope_list.sort\n          @objects = @objects.select { |auth| auth.scopes.sort == sorted_scopes }\n        end\n      end\n      @limit = @request_limit\n      @offset = @request_offset\n      @objects = model_class.where('uuid in (?)', @objects.collect(&:uuid))\n    end\n    super\n  end\n\n  def find_object_by_uuid(with_lock: false)\n    uuid_param = params[:uuid] || params[:id]\n    if (uuid_param != current_api_client_authorization.andand.uuid &&\n        !Rails.configuration.Login.IssueTrustedTokens)\n      return forbidden\n    end\n    @limit = 1\n    @offset = 0\n    @orders = []\n    @where = {}\n    @filters = [['uuid', '=', uuid_param]]\n    find_objects_for_index\n    query = @objects\n    if with_lock && Rails.configuration.API.LockBeforeUpdate\n      query = query.lock\n    end\n    @object = query.first\n  end\n\n  def check_issue_trusted_tokens\n    return true if current_api_client_authorization.andand.api_token == Rails.configuration.SystemRootToken\n    return forbidden if !Rails.configuration.Login.IssueTrustedTokens\n  end\n\n  def forbidden\n    send_error('Action prohibited by IssueTrustedTokens configuration.',\n               status: 403)\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/authorized_keys_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::AuthorizedKeysController < ApplicationController\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/collections_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire \"arvados/keep\"\nrequire \"trashable\"\n\nclass Arvados::V1::CollectionsController < ApplicationController\n  include DbCurrentTime\n  include TrashableController\n\n  def self._index_requires_parameters\n    (super rescue {}).\n      merge({\n        include_trash: {\n          type: 'boolean',\n          required: false,\n          default: false,\n          description: \"Include collections whose `is_trashed` attribute is true.\",\n        },\n        include_old_versions: {\n          type: 'boolean',\n          required: false,\n          default: false,\n          description: \"Include past collection versions.\",\n        },\n      })\n  end\n\n  def self._show_requires_parameters\n    (super rescue {}).\n      merge({\n        include_trash: {\n          type: 'boolean',\n          required: false,\n          default: false,\n          description: \"Show collection even if its `is_trashed` attribute is true.\",\n        },\n      })\n  end\n\n  def create\n    if resource_attrs[:uuid] and (loc = Keep::Locator.parse(resource_attrs[:uuid]))\n      resource_attrs[:portable_data_hash] = loc.to_s\n      resource_attrs.delete :uuid\n    end\n    resource_attrs.delete :version\n    resource_attrs.delete :current_version_uuid\n    super\n  end\n\n  def update\n    # preserve_version should be disabled unless explicitly asked otherwise.\n    if !resource_attrs[:preserve_version]\n      resource_attrs[:preserve_version] = false\n    end\n    super\n  end\n\n  def find_objects_for_index\n    opts = {\n      include_trash: params[:include_trash] || ['destroy', 'trash', 'untrash'].include?(action_name),\n      include_old_versions: params[:include_old_versions] || false,\n    }\n    @objects = Collection.readable_by(*@read_users, opts) if !opts.empty?\n    super\n  end\n\n  def find_object_by_uuid(with_lock: false)\n    # We are always willing to return an old version by UUID.\n    # We set the parameter so it gets used correctly by super methods.\n    params[:include_old_versions] = true\n    if loc = Keep::Locator.parse(params[:id])\n      loc.strip_hints!\n\n      opts = {\n        include_trash: params[:include_trash],\n        include_old_versions: params[:include_old_versions],\n      }\n\n      # It matters which Collection object we pick because blob\n      # signatures depend on the value of trash_at.\n      #\n      # From postgres doc: \"By default, null values sort as if larger\n      # than any non-null value; that is, NULLS FIRST is the default\n      # for DESC order, and NULLS LAST otherwise.\"\n      #\n      # \"trash_at desc\" sorts null first, then latest to earliest, so\n      # it will select the Collection object with the longest\n      # available lifetime.\n\n      select_attrs = (@select || [\"manifest_text\"]) | [\"portable_data_hash\", \"trash_at\"]\n      model = Collection\n      if with_lock && Rails.configuration.API.LockBeforeUpdate\n        model = model.lock\n      end\n      if c = model.\n               readable_by(*@read_users, opts).\n               where({ portable_data_hash: loc.to_s }).\n               order(\"trash_at desc\").\n               select(select_attrs.join(\", \")).\n               limit(1).\n               first\n        @object = {\n          uuid: c.portable_data_hash,\n          portable_data_hash: c.portable_data_hash,\n          trash_at: c.trash_at,\n        }\n        if select_attrs.index(\"manifest_text\")\n          @object[:manifest_text] = c.manifest_text\n        end\n      end\n    else\n      super(with_lock: with_lock)\n    end\n  end\n\n  def show\n    if @object.is_a? Collection\n      # Omit unsigned_manifest_text\n      @select ||= model_class.selectable_attributes - [\"unsigned_manifest_text\"]\n      super\n    else\n      send_json @object\n    end\n  end\n\n\n  def find_collections(visited, sp, ignore_columns=[], &b)\n    case sp\n    when ArvadosModel\n      sp.class.columns.each do |c|\n        find_collections(visited, sp[c.name.to_sym], &b) if !ignore_columns.include?(c.name)\n      end\n    when Hash\n      sp.each do |k, v|\n        find_collections(visited, v, &b)\n      end\n    when Array\n      sp.each do |v|\n        find_collections(visited, v, &b)\n      end\n    when String\n      if m = /[a-f0-9]{32}\\+\\d+/.match(sp)\n        yield m[0], nil\n      elsif m = Collection.uuid_regex.match(sp)\n        yield nil, m[0]\n      end\n    end\n  end\n\n  def search_edges(visited, uuid, direction)\n    if uuid.nil? or uuid.empty? or visited[uuid]\n      return\n    end\n\n    if loc = Keep::Locator.parse(uuid)\n      loc.strip_hints!\n      return if visited[loc.to_s]\n    end\n\n    if loc\n      # uuid is a portable_data_hash\n      collections = Collection.readable_by(*@read_users).where(portable_data_hash: loc.to_s)\n      c = collections.limit(2).all\n      if c.size == 1\n        visited[loc.to_s] = c[0]\n      elsif c.size > 1\n        name = collections.limit(1).where(\"name <> ''\").first\n        if name\n          visited[loc.to_s] = {\n            portable_data_hash: c[0].portable_data_hash,\n            name: \"#{name.name} + #{collections.count-1} more\"\n          }\n        else\n          visited[loc.to_s] = {\n            portable_data_hash: c[0].portable_data_hash,\n            name: loc.to_s\n          }\n        end\n      end\n\n      if direction == :search_up\n        # Search upstream for jobs where this locator is the output of some container\n        Container.readable_by(*@read_users).where(output: loc.to_s).pluck(:uuid).each do |c_uuid|\n          search_edges(visited, c_uuid, :search_up)\n        end\n\n        Container.readable_by(*@read_users).where(log: loc.to_s).pluck(:uuid).each do |c_uuid|\n          search_edges(visited, c_uuid, :search_up)\n        end\n      elsif direction == :search_down\n        if loc.to_s == \"d41d8cd98f00b204e9800998ecf8427e+0\"\n          # Special case, don't follow the empty collection.\n          return\n        end\n\n        # Search downstream for jobs where this locator is in mounts\n        Container.readable_by(*@read_users).where([Container.full_text_trgm + \" like ?\", \"%#{loc.to_s}%\"]).select(\"output, log, uuid\").each do |c|\n          if c.output != loc.to_s && c.log != loc.to_s\n            search_edges(visited, c.uuid, :search_down)\n          end\n        end\n      end\n    else\n      # uuid is a regular Arvados UUID\n      rsc = ArvadosModel::resource_class_for_uuid uuid\n      if rsc == Container\n        c = Container.readable_by(*@read_users).where(uuid: uuid).limit(1).first\n        if c\n          visited[uuid] = c.as_api_response\n          if direction == :search_up\n            # Follow upstream collections referenced in the script parameters\n            find_collections(visited, c, ignore_columns=[\"log\", \"output\"]) do |hash, col_uuid|\n              search_edges(visited, hash, :search_up) if hash\n              search_edges(visited, col_uuid, :search_up) if col_uuid\n            end\n          elsif direction == :search_down\n            # Follow downstream job output\n            search_edges(visited, c.output, :search_down)\n          end\n        end\n      elsif rsc == ContainerRequest\n        c = ContainerRequest.readable_by(*@read_users).where(uuid: uuid).limit(1).first\n        if c\n          visited[uuid] = c.as_api_response\n          if direction == :search_up\n            # Follow upstream collections\n            find_collections(visited, c, ignore_columns=[\"log_uuid\", \"output_uuid\"]) do |hash, col_uuid|\n              search_edges(visited, hash, :search_up) if hash\n              search_edges(visited, col_uuid, :search_up) if col_uuid\n            end\n          elsif direction == :search_down\n            # Follow downstream job output\n            search_edges(visited, c.output_uuid, :search_down)\n          end\n        end\n      elsif rsc == Collection\n        c = Collection.readable_by(*@read_users).where(uuid: uuid).limit(1).first\n        if c\n          if direction == :search_up\n            visited[c.uuid] = c.as_api_response\n\n            ContainerRequest.readable_by(*@read_users).where(output_uuid: uuid).pluck(:uuid).each do |cr_uuid|\n              search_edges(visited, cr_uuid, :search_up)\n            end\n\n            ContainerRequest.readable_by(*@read_users).where(log_uuid: uuid).pluck(:uuid).each do |cr_uuid|\n              search_edges(visited, cr_uuid, :search_up)\n            end\n          elsif direction == :search_down\n            search_edges(visited, c.portable_data_hash, :search_down)\n          end\n        end\n      elsif rsc != nil\n        rsc.where(uuid: uuid).each do |r|\n          visited[uuid] = r.as_api_response\n        end\n      end\n    end\n\n    if direction == :search_up\n      # Search for provenance links pointing to the current uuid\n      Link.readable_by(*@read_users).\n        where(head_uuid: uuid, link_class: \"provenance\").\n        each do |link|\n        visited[link.uuid] = link.as_api_response\n        search_edges(visited, link.tail_uuid, direction)\n      end\n    elsif direction == :search_down\n      # Search for provenance links emanating from the current uuid\n      Link.readable_by(current_user).\n        where(tail_uuid: uuid, link_class: \"provenance\").\n        each do |link|\n        visited[link.uuid] = link.as_api_response\n        search_edges(visited, link.head_uuid, direction)\n      end\n    end\n  end\n\n  def self._provenance_method_description\n    \"Detail the provenance of a given collection.\"\n  end\n\n  def provenance\n    visited = {}\n    if @object[:uuid]\n      search_edges(visited, @object[:uuid], :search_up)\n    else\n      search_edges(visited, @object[:portable_data_hash], :search_up)\n    end\n    send_json visited\n  end\n\n  def self._used_by_method_description\n    \"Detail where a given collection has been used.\"\n  end\n\n  def used_by\n    visited = {}\n    if @object[:uuid]\n      search_edges(visited, @object[:uuid], :search_down)\n    else\n      search_edges(visited, @object[:portable_data_hash], :search_down)\n    end\n    send_json visited\n  end\n\n  protected\n\n  def load_select_param *args\n    super\n    if action_name == 'index'\n      # Omit manifest_text and unsigned_manifest_text from index results unless expressly selected.\n      @select ||= model_class.selectable_attributes - [\"manifest_text\", \"unsigned_manifest_text\"]\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/computed_permissions_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::ComputedPermissionsController < ApplicationController\n  before_action :admin_required\n\n  def object_list(**args)\n    if !['none', '', nil].include?(params[:count])\n      raise ArgumentError.new(\"count parameter must be 'none'\")\n    end\n    params[:count] = 'none'\n\n    if !['0', 0, nil].include?(params[:offset])\n      raise ArgumentError.new(\"non-zero offset parameter #{params[:offset].inspect} is not supported\")\n    end\n\n    super\n  end\n\n  def limit_database_read(**args)\n    # This is counterproductive for this table, and the default\n    # implementation doesn't work because it relies on some\n    # real-model-like behavior that ComputedPermission does not offer.\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/container_requests_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'update_priorities'\n\nclass Arvados::V1::ContainerRequestsController < ApplicationController\n  accept_attribute_as_json :environment, Hash\n  accept_attribute_as_json :mounts, Hash\n  accept_attribute_as_json :runtime_constraints, Hash\n  accept_attribute_as_json :command, Array\n  accept_attribute_as_json :filters, Array\n  accept_attribute_as_json :scheduling_parameters, Hash\n  accept_attribute_as_json :secret_mounts, Hash\n\n  def self._index_requires_parameters\n    (super rescue {}).\n      merge({\n        include_trash: {\n          type: 'boolean', required: false, default: false, description: \"Include container requests whose owner project is trashed.\",\n        },\n      })\n  end\n\n  def self._show_requires_parameters\n    (super rescue {}).\n      merge({\n        include_trash: {\n          type: 'boolean', required: false, default: false, description: \"Show container request even if its owner project is trashed.\",\n        },\n      })\n  end\n\n  def self._container_status_requires_parameters\n    (super rescue {}).\n      merge({\n        uuid: {\n          type: 'string', required: true, description: \"The UUID of the container request to query.\",\n        },\n      })\n  end\n\n  # This API is handled entirely by controller, so this method is\n  # never called -- it's only here for the sake of adding the API to\n  # the generated discovery document.\n  def self._container_status_method_description\n    \"Return scheduling details for a container request.\"\n  end\n  \n  def container_status\n    send_json({\"errors\" => \"controller-only API, not handled by rails\"}, status: 400)\n  end\n\n  def update\n    if (resource_attrs.keys.map(&:to_sym) - [:owner_uuid, :name, :description, :properties]).empty? or @object.container_uuid.nil?\n      # If no attributes are being updated besides these, there are no\n      # cascading changes to other rows/tables, the only lock will be\n      # the single row lock on SQL UPDATE.\n      super\n    elsif @object.container.andand.final?\n      # If the assigned container is already finalized,\n      # Container#update_priority will skip the cascading priority\n      # update, so there's no need to preemptively acquire row locks\n      # here.\n      super\n    else\n      # Get locks ahead of time to avoid deadlock in cascading priority\n      # update\n      Container.transaction do\n        row_lock_for_priority_update @object.container_uuid\n        super\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/containers_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'update_priorities'\n\nclass Arvados::V1::ContainersController < ApplicationController\n  accept_attribute_as_json :environment, Hash\n  accept_attribute_as_json :mounts, Hash\n  accept_attribute_as_json :runtime_constraints, Hash\n  accept_attribute_as_json :runtime_status, Hash\n  accept_attribute_as_json :command, Array\n  accept_attribute_as_json :scheduling_parameters, Hash\n\n  skip_before_action :find_object_by_uuid, only: [:current]\n  skip_before_action :render_404_if_no_object, only: [:current]\n\n  def self._auth_method_description\n    \"Get the API client authorization token associated with this container.\"\n  end\n\n  def auth\n    if @object.locked_by_uuid != Thread.current[:api_client_authorization].uuid\n      raise ArvadosModel::PermissionDeniedError.new(\"Not locked by your token\")\n    end\n    if @object.runtime_token.nil?\n      @object = @object.auth\n    else\n      @object = ApiClientAuthorization.validate(token: @object.runtime_token)\n      if @object.nil?\n        raise ArvadosModel::PermissionDeniedError.new(\"Invalid runtime_token\")\n      end\n    end\n    show\n  end\n\n  def update\n    if (resource_attrs.keys.map(&:to_sym) - [:cost, :gateway_address, :output_properties, :progress, :runtime_status]).empty?\n      # If no attributes are being updated besides these, there are no\n      # cascading changes to other rows/tables, the only lock will the\n      # single row lock on SQL UPDATE.\n      super\n    else\n      Container.transaction do\n        # Get locks ahead of time to avoid deadlock in cascading priority\n        # update\n        row_lock_for_priority_update @object.uuid\n        super\n      end\n    end\n  end\n\n  def find_objects_for_index\n    super\n    if action_name == 'lock' || action_name == 'unlock'\n      # Avoid loading more fields than we need\n      @objects = @objects.select(:id, :uuid, :state, :priority, :auth_uuid, :locked_by_uuid, :lock_count)\n      # This gets called from within find_object_by_uuid.\n      # find_object_by_uuid stores the original value of @select in\n      # @preserve_select, edits the value of @select, calls\n      # find_objects_for_index, then restores @select from the value\n      # of @preserve_select.  So if we want our updated value of\n      # @select here to stick, we have to set @preserve_select.\n      @select = @preserve_select = %w(uuid state priority auth_uuid locked_by_uuid)\n    elsif action_name == 'update_priority'\n      # We're going to reload in update_priority!, which will select\n      # all attributes, but will fail if we don't select :id now.\n      @objects = @objects.select(:id, :uuid)\n    end\n  end\n\n  def self._lock_method_description\n    \"Lock a container (for a dispatcher to begin running it).\"\n  end\n\n  def lock\n    @object.lock\n    show\n  end\n\n  def self._unlock_method_description\n    \"Unlock a container (for a dispatcher to stop running it).\"\n  end\n\n  def unlock\n    @object.unlock\n    show\n  end\n\n  def self._update_priority_method_description\n    \"Recalculate and return the priority of a given container.\"\n  end\n\n  def update_priority\n    @object.update_priority!\n    show\n  end\n\n  def self._current_method_description\n    \"Return the container record associated with the API token authorizing this request.\"\n  end\n\n  def current\n    if Thread.current[:api_client_authorization].nil?\n      send_error(\"Not logged in\", status: 401)\n    else\n      @object = Container.for_current_token\n      if @object.nil?\n        send_error(\"Token is not associated with a container.\", status: 404)\n      else\n        show\n      end\n    end\n  end\n\n  def self._secret_mounts_method_description\n    \"Return secret mount information for the container associated with the API token authorizing this request.\"\n  end\n\n  def secret_mounts\n    c = Container.for_current_token\n    if @object && c && @object.uuid == c.uuid\n      send_json({\"secret_mounts\" => @object.secret_mounts})\n    else\n      send_error(\"Token is not associated with this container.\", status: 403)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/credentials_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::CredentialsController < ApplicationController\n\n  # \"secret\" is not returned in API calls, but we also want\n  # to disallow its use in queries in general.\n\n  def load_where_param\n    super\n    if @where[:secret]\n      raise ArvadosModel::PermissionDeniedError.new \"Cannot use 'secret' in where clause\"\n    end\n  end\n\n  def load_filters_param\n    super\n    @filters.map do |k|\n      if k[0] =~ /secret/\n        raise ArvadosModel::PermissionDeniedError.new \"Cannot filter on 'secret'\"\n      end\n    end\n  end\n\n  def load_limit_offset_order_params\n    super\n    @orders.each do |ord|\n      if ord =~ /secret/\n        raise ArvadosModel::PermissionDeniedError.new \"Cannot order by 'secret'\"\n      end\n    end\n  end\n\n  def self._secret_method_description\n    \"Fetch the secret part of the credential (can only be invoked by running containers).\"\n  end\n\n  def secret\n    # Should have previously determined the user can read the credential in @object\n    c = Container.for_current_token\n    if !@object || !c || c.state != Container::Running\n      send_error(\"Token is not associated with a running container.\", status: 403)\n      return\n    end\n\n    if Time.now >= @object.expires_at\n      send_error(\"Credential has expired.\", status: 403)\n      return\n    end\n\n    lg = Log.new(event_type: \"secret_access\")\n    lg.object_uuid = @object.uuid\n    lg.object_owner_uuid = @object.owner_uuid\n    lg.properties = {\n      \"name\": @object.name,\n      \"credential_class\": @object.credential_class,\n      \"external_id\": @object.external_id,\n    }\n    lg.save!\n    send_json({\"external_id\" => @object.external_id, \"secret\" => @object.secret})\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/groups_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire \"trashable\"\n\nclass Arvados::V1::GroupsController < ApplicationController\n  include TrashableController\n\n  before_action :load_include_param, only: [:shared, :contents]\n  skip_before_action :find_object_by_uuid, only: :shared\n  skip_before_action :render_404_if_no_object, only: :shared\n\n  def self._index_requires_parameters\n    (super rescue {}).\n      merge({\n        include_trash: {\n          type: 'boolean',\n          required: false,\n          default: false,\n          description: \"Include items whose `is_trashed` attribute is true.\",\n        },\n      })\n  end\n\n  def self._show_requires_parameters\n    (super rescue {}).\n      merge({\n        include_trash: {\n          type: 'boolean',\n          required: false,\n          default: false,\n          description: \"Return group/project even if its `is_trashed` attribute is true.\",\n        },\n      })\n  end\n\n  def self._contents_requires_parameters\n    _index_requires_parameters.merge(\n      {\n        uuid: {\n          type: 'string',\n          required: false,\n          default: '',\n          description: \"If given, limit the listing to objects owned by the\nuser or group with this UUID.\",\n        },\n        recursive: {\n          type: 'boolean',\n          required: false,\n          default: false,\n          description: 'If true, include contents from child groups recursively.',\n        },\n        include: {\n          type: 'array',\n          required: false,\n          description: \"An array of referenced objects to include in the `included` field of the response. Supported values in the array are:\n\n  * `\\\"container_uuid\\\"`\n  * `\\\"owner_uuid\\\"`\n  * `\\\"collection_uuid\\\"`\n\n\",\n        },\n        include_old_versions: {\n          type: 'boolean',\n          required: false,\n          default: false,\n          description: 'If true, include past versions of collections in the listing.',\n        },\n        exclude_home_project: {\n          type: \"boolean\",\n          required: false,\n          default: false,\n          description: \"If true, exclude contents of the user's home project from the listing.\nCalling this method with this flag set is how clients enumerate objects shared\nwith the current user.\",\n        },\n      }\n    )\n  end\n\n  def self._create_requires_parameters\n    super.merge(\n      {\n        async: {\n          required: false,\n          type: 'boolean',\n          location: 'query',\n          default: false,\n          description: 'If true, cluster permission will not be updated immediately, but instead at the next configured update interval.',\n        }\n      }\n    )\n  end\n\n  def self._update_requires_parameters\n    super.merge(\n      {\n        async: {\n          required: false,\n          type: 'boolean',\n          location: 'query',\n          default: false,\n          description: 'If true, cluster permission will not be updated immediately, but instead at the next configured update interval.',\n        }\n      }\n    )\n  end\n\n  def create\n    if params[:async]\n      @object = model_class.new(resource_attrs.merge({async_permissions_update: true}))\n      @object.save!\n      render_accepted\n    else\n      super\n    end\n  end\n\n  def update\n    if params[:async]\n      attrs_to_update = resource_attrs.reject { |k, v|\n        [:kind, :etag, :href].index k\n      }.merge({async_permissions_update: true})\n      @object.update!(attrs_to_update)\n      @object.save!\n      render_accepted\n    else\n      super\n    end\n  end\n\n  def render_404_if_no_object\n    if params[:action] == 'contents'\n      if !params[:uuid]\n        # OK!\n        @object = nil\n        true\n      elsif @object\n        # Project group\n        true\n      elsif (@object = User.where(uuid: params[:uuid]).first)\n        # \"Home\" pseudo-project\n        true\n      else\n        super\n      end\n    else\n      super\n    end\n  end\n\n  def self._contents_method_description\n    \"List objects that belong to a group.\"\n  end\n\n  def contents\n    @orig_select = @select\n    load_searchable_objects\n    list = {\n      :kind => \"arvados#objectList\",\n      :etag => \"\",\n      :self_link => \"\",\n      :offset => @offset,\n      :limit => @limit,\n      :items => @objects.as_api_response(nil)\n    }\n    if params[:count] != 'none'\n      list[:items_available] = @items_available\n    end\n    if @extra_included\n      if @orig_select.nil?\n        @orig_select = User.selectable_attributes.concat(\n          Group.selectable_attributes,\n          Container.selectable_attributes,\n          Collection.selectable_attributes - [\"unsigned_manifest_text\"])\n      end\n      @orig_select = @orig_select - [\"manifest_text\"]\n      list[:included] = @extra_included.as_api_response(nil, {select: @orig_select})\n    end\n    send_json(list)\n  end\n\n  def self._shared_method_description\n    \"List groups that the current user can access via permission links.\"\n  end\n\n  def shared\n    # The purpose of this endpoint is to return the toplevel set of\n    # groups which are *not* reachable through a direct ownership\n    # chain of projects starting from the current user account.  In\n    # other words, groups which to which access was granted via a\n    # permission link or chain of links.\n    #\n    # This also returns (in the \"included\" field) the objects that own\n    # those projects (users or non-project groups).\n    #\n    # The intended use of this endpoint is to support clients which\n    # wish to browse those projects which are visible to the user but\n    # are not part of the \"home\" project.\n\n    load_limit_offset_order_params\n    load_filters_param\n\n    @objects = exclude_home Group.readable_by(*@read_users), Group\n\n    apply_where_limit_order_params\n\n    if @include.include?(\"owner_uuid\")\n      owners = @objects.map(&:owner_uuid).to_set\n      @extra_included ||= []\n      [Group, User].each do |klass|\n        @extra_included += klass.readable_by(*@read_users).where(uuid: owners.to_a).to_a\n      end\n    end\n\n    if @include.include?(\"container_uuid\")\n      @extra_included ||= []\n      container_uuids = @objects.map { |o|\n        o.respond_to?(:container_uuid) ? o.container_uuid : nil\n      }.compact.to_set.to_a\n      @extra_included += Container.where(uuid: container_uuids).to_a\n    end\n\n    if @include.include?(\"collection_uuid\")\n      @extra_included ||= []\n      collection_uuids = @objects.map { |o|\n        o.respond_to?(:collection_uuid) ? o.collection_uuid : nil\n      }.compact.to_set.to_a\n      @extra_included += Collection.where(uuid: collection_uuids).to_a\n    end\n\n    index\n  end\n\n  def self._shared_requires_parameters\n    self._index_requires_parameters.merge(\n      {\n        include: {\n          type: 'string',\n          required: false,\n          description: \"A string naming referenced objects to include in the `included` field of the response. Supported values are:\n\n  * `\\\"owner_uuid\\\"`\n\n\",\n        },\n      }\n    )\n  end\n\n  protected\n\n  def load_include_param\n    @include = params[:include]\n    if @include.nil? || @include == \"\"\n      @include = Set[]\n    elsif @include.is_a?(String) && @include.start_with?('[')\n      @include = SafeJSON.load(@include).to_set\n    elsif @include.is_a?(String)\n      @include = Set[@include]\n    else\n      return send_error(\"'include' parameter must be a string or array\", status: 422)\n    end\n  end\n\n  def load_searchable_objects\n    all_objects = []\n    @items_available = 0\n\n    # Reload the orders param, this time without prefixing unqualified\n    # columns (\"name\" => \"groups.name\"). Here, unqualified orders\n    # apply to each table being searched, not just \"groups\", as\n    # fill_table_names would assume. Instead, table names are added\n    # inside the klasses loop below (see request_order).\n    load_limit_offset_order_params(fill_table_names: false)\n\n    # Trick apply_where_limit_order_params into applying suitable\n    # per-table values. *_all are the real ones we'll apply to the\n    # aggregate set.\n    limit_all = @limit\n    offset_all = @offset\n    # save the orders from the current request as determined by load_param,\n    # but otherwise discard them because we're going to be getting objects\n    # from many models\n    request_orders = @orders.clone\n    @orders = []\n\n    request_filters = @filters\n\n    klasses = [Group, ContainerRequest, Workflow, Collection]\n\n    table_names = Hash[klasses.collect { |k| [k, k.table_name] }]\n\n    disabled_methods = Rails.configuration.API.DisabledAPIs\n    avail_klasses = table_names.select{|k, t| !disabled_methods[t+'.index']}\n    klasses = avail_klasses.keys\n\n    request_filters.each do |col, op, val|\n      if col.index('.')\n        filter_table = col.split('.', 2)[0]\n        # singular \"container\" is valid as a special case for\n        # filtering container requests by their associated\n        # container_uuid, similarly singular \"collection\" for\n        # workflows.\n        if filter_table != \"container\" && filter_table != \"collection\" && !table_names.values.include?(filter_table)\n          raise ArgumentError.new(\"Invalid attribute '#{col}' in filter\")\n        end\n      end\n    end\n\n    wanted_klasses = []\n    request_filters.each do |col,op,val|\n      if op == 'is_a'\n        (val.is_a?(Array) ? val : [val]).each do |type|\n          type = type.split('#')[-1]\n          type[0] = type[0].capitalize\n          wanted_klasses << type\n        end\n      end\n    end\n\n    filter_by_owner = {}\n    if @object\n      if params['recursive']\n        filter_by_owner[:owner_uuid] = [@object.uuid] + @object.descendant_project_uuids\n      else\n        filter_by_owner[:owner_uuid] = @object.uuid\n      end\n\n      if params['exclude_home_project']\n        raise ArgumentError.new \"Cannot use 'exclude_home_project' with a parent object\"\n      end\n    end\n\n    # Check that any fields in @select are valid for at least one class\n    if @select\n      all_attributes = []\n      klasses.each do |klass|\n        all_attributes.concat klass.selectable_attributes\n      end\n      if klasses.include?(ContainerRequest) && @include.include?(\"container_uuid\")\n        all_attributes.concat Container.selectable_attributes\n      end\n      if klasses.include?(Workflow) && @include.include?(\"collection_uuid\")\n        all_attributes.concat Collection.selectable_attributes\n      end\n      @select.each do |check|\n        if !all_attributes.include? check\n          raise ArgumentError.new \"Invalid attribute '#{check}' in select\"\n        end\n      end\n    end\n    any_selections = @select\n\n    included_by_uuid = {}\n\n    error_by_class = {}\n    any_success = false\n\n    klasses.each do |klass|\n      # if klasses are specified, skip all other klass types\n      next if wanted_klasses.any? and !wanted_klasses.include?(klass.to_s)\n\n      # don't process rest of object types if we already have needed number of objects\n      break if params['count'] == 'none' and all_objects.size >= limit_all\n\n      # If the currently requested orders specifically match the\n      # table_name for the current klass, apply that order.\n      # Otherwise, order by recency.\n      request_order = request_orders.andand.map do |r|\n        if r =~ /^#{klass.table_name}\\./i\n          r\n        elsif r !~ /\\./\n          # If the caller provided an unqualified column like\n          # \"created_by desc\", but we might be joining another table\n          # that also has that column, so we need to specify that we\n          # mean this table.\n          klass.table_name + '.' + r\n        else\n          # Only applies to a different table / object type.\n          nil\n        end\n      end.compact\n      request_order = optimize_orders(request_order, model_class: klass)\n\n      @select = select_for_klass any_selections, klass, false\n\n      where_conds = filter_by_owner\n      if klass == Collection && @select.nil?\n        @select = klass.selectable_attributes - [\"manifest_text\", \"unsigned_manifest_text\"]\n      elsif klass == Group\n        where_conds = where_conds.merge(group_class: [\"project\",\"filter\"])\n      end\n\n      # Make signed manifest_text not selectable because controller\n      # currently doesn't know to sign it.\n      if @select\n        @select = @select - [\"manifest_text\"]\n      end\n\n      @filters = request_filters.map do |col, op, val|\n        if !col.index('.')\n          [col, op, val]\n        elsif (colsp = col.split('.', 2))[0] == klass.table_name\n          [colsp[1], op, val]\n        elsif klass == ContainerRequest && colsp[0] == \"container\"\n          [col, op, val]\n        elsif klass == Workflow && colsp[0] == \"collection\"\n          [col, op, val]\n        else\n          nil\n        end\n      end.compact\n\n      @objects = klass.readable_by(*@read_users, {\n          :include_trash => params[:include_trash],\n          :include_old_versions => params[:include_old_versions]\n        }).order(request_order).where(where_conds)\n\n      if params['exclude_home_project']\n        @objects = exclude_home @objects, klass\n      end\n\n      # Adjust the limit based on number of objects fetched so far\n      klass_limit = limit_all - all_objects.count\n      @limit = klass_limit\n\n      begin\n        apply_where_limit_order_params klass\n      rescue ArgumentError => e\n        if e.inspect =~ /Invalid attribute '.+' for operator '.+' in filter/ or\n          e.inspect =~ /Invalid attribute '.+' for subproperty filter/\n          error_by_class[klass.name] = e\n          next\n        end\n        raise\n      else\n        any_success = true\n      end\n\n      # This actually fetches the objects\n      klass_object_list = object_list(model_class: klass)\n\n      # The appropriate @offset for querying the next table depends on\n      # how many matching rows in this table were skipped due to the\n      # current @offset. If we retrieved any items (or @offset is\n      # already zero), then clearly exactly @offset rows were skipped,\n      # and the correct @offset for the next table is zero.\n      # Otherwise, we need to count all matching rows in the current\n      # table, and subtract that from @offset. If our previous query\n      # used count=none, we will need an additional query to get that\n      # count.\n      if params['count'] == 'none' and @offset > 0 and klass_object_list[:items].length == 0\n        # Just get the count.\n        klass_object_list[:items_available] = @objects.\n                                                except(:limit).except(:offset).\n                                                count(@distinct ? :id : '*')\n      end\n\n      klass_items_available = klass_object_list[:items_available]\n      if klass_items_available.nil?\n        # items_available may be nil if count=none and a non-zero\n        # number of items were returned.  One of these cases must be true:\n        #\n        # items returned >= limit, so we won't go to the next table, offset doesn't matter\n        # items returned < limit, so we want to start at the beginning of the next table, offset = 0\n        #\n        @offset = 0\n      else\n        # We have the exact count,\n        @items_available += klass_items_available\n        @offset = [@offset - klass_items_available, 0].max\n      end\n\n      # Add objects to the list of objects to be returned.\n      all_objects += klass_object_list[:items]\n\n      if klass_object_list[:limit] < klass_limit\n        # object_list() had to reduce @limit to comply with\n        # max_index_database_read. From now on, we'll do all queries\n        # with limit=0 and just accumulate items_available.\n        limit_all = all_objects.count\n      end\n\n      if @include.include?(\"owner_uuid\")\n        owners = klass_object_list[:items].map {|i| i[:owner_uuid]}.to_set\n        [Group, User].each do |ownerklass|\n          ownerklass.readable_by(*@read_users).where(uuid: owners.to_a).each do |ow|\n            included_by_uuid[ow.uuid] = ow\n          end\n        end\n      end\n\n      if @include.include?(\"container_uuid\") && klass == ContainerRequest\n        containers = klass_object_list[:items].collect { |cr| cr[:container_uuid] }.to_set\n        Container.where(uuid: containers.to_a).each do |c|\n          included_by_uuid[c.uuid] = c\n        end\n      end\n\n      if @include.include?(\"collection_uuid\") && klass == Workflow\n        collections = klass_object_list[:items].collect { |wf| wf[:collection_uuid] }.to_set\n        Collection.where(uuid: collections.to_a).each do |c|\n          included_by_uuid[c.uuid] = c\n        end\n      end\n    end\n\n    # Only error out when every searchable object type errored out\n    if !any_success && error_by_class.size > 0\n      error_msg = error_by_class.collect do |klass, err|\n        \"#{err} on object type #{klass}\"\n      end.join(\"\\n\")\n      raise ArgumentError.new(error_msg)\n    end\n\n    if !@include.empty?\n      @extra_included = included_by_uuid.values\n    end\n\n    @objects = all_objects\n    @limit = limit_all\n    @offset = offset_all\n  end\n\n  def exclude_home objectlist, klass\n    # select records that are readable by current user AND\n    #   the owner_uuid is a user (but not the current user) OR\n    #   the owner_uuid is not readable by the current user\n    #   the owner_uuid is a group but group_class is not a project\n\n    read_parent_check = if current_user.is_admin\n                          \"\"\n                        else\n                          \"NOT EXISTS(SELECT 1 FROM #{PERMISSION_VIEW} WHERE \"+\n                            \"user_uuid=(:user_uuid) AND target_uuid=#{klass.table_name}.owner_uuid AND perm_level >= 1) OR \"\n                        end\n\n    objectlist.where(\"#{klass.table_name}.owner_uuid IN (SELECT users.uuid FROM users WHERE users.uuid != (:user_uuid)) OR \"+\n                     read_parent_check+\n                     \"EXISTS(SELECT 1 FROM groups as gp where gp.uuid=#{klass.table_name}.owner_uuid and gp.group_class != 'project')\",\n                     user_uuid: current_user.uuid)\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/keep_services_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::KeepServicesController < ApplicationController\n\n  skip_before_action :find_object_by_uuid, only: :accessible\n  skip_before_action :render_404_if_no_object, only: :accessible\n  skip_before_action :require_auth_scope, only: :accessible\n\n  def find_objects_for_index\n    # all users can list all keep services\n    @objects = KeepService.all\n    super\n  end\n\n  def self._accessible_method_description\n    \"List Keep services that the current client can access.\"\n  end\n\n  def accessible\n    if request.headers['X-External-Client'] == '1'\n      @objects = KeepService.where('service_type=?', 'proxy')\n    else\n      @objects = KeepService.where('service_type<>?', 'proxy')\n    end\n    render_list\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/links_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::LinksController < ApplicationController\n\n  def check_uuid_kind uuid, kind\n    if kind and ArvadosModel::resource_class_for_uuid(uuid).andand.kind != kind\n      send_error(\"'#{kind}' does not match uuid '#{uuid}', expected '#{ArvadosModel::resource_class_for_uuid(uuid).andand.kind}'\",\n                 status: 422)\n      nil\n    else\n      true\n    end\n  end\n\n  def create\n    return if ! check_uuid_kind resource_attrs[:head_uuid], resource_attrs[:head_kind]\n    return if ! check_uuid_kind resource_attrs[:tail_uuid], resource_attrs[:tail_kind]\n\n    resource_attrs.delete :head_kind\n    resource_attrs.delete :tail_kind\n\n    if resource_attrs[:link_class] == 'permission' && Link::PermLevel[resource_attrs[:name]]\n      existing = Link.\n                   lock. # select ... for update\n                   where(link_class: 'permission',\n                         tail_uuid: resource_attrs[:tail_uuid],\n                         head_uuid: resource_attrs[:head_uuid],\n                         name: Link::PermLevel.keys).first\n      if existing\n        @object = existing\n        if Link::PermLevel[resource_attrs[:name]] > Link::PermLevel[existing.name]\n          # upgrade existing permission link to the requested level.\n          return update\n        else\n          # no-op: existing permission is already greater or equal to\n          # the newly requested permission.\n          return show\n        end\n      end\n    elsif resource_attrs[:link_class] == 'permission' &&\n          resource_attrs[:name] == 'can_login' &&\n          resource_attrs[:properties].respond_to?(:has_key?) &&\n          resource_attrs[:properties].has_key?(:username)\n      existing = Link.\n                   lock. # select ... for update\n                   where(link_class: 'permission',\n                         tail_uuid: resource_attrs[:tail_uuid],\n                         head_uuid: resource_attrs[:head_uuid]).\n                   where('properties @> ?', SafeJSON.dump({'username' => resource_attrs[:properties][:username]})).\n                   first\n      if existing\n        @object = existing\n        return show\n      end\n    end\n\n    super\n  end\n\n  def self._get_permissions_method_description\n    \"List permissions granted on an Arvados object.\"\n  end\n\n  def get_permissions\n    if current_user.andand.can?(manage: @object)\n      # find all links and return them\n      @objects = Link.unscoped.where(link_class: \"permission\",\n                                     head_uuid: params[:uuid])\n      @offset = 0\n      @limit = @objects.count\n      render_list\n    else\n      render :json => { errors: ['Forbidden'] }.to_json, status: 403\n    end\n  end\n\n  protected\n\n  def find_object_by_uuid(with_lock: false)\n    if params[:id] && params[:id].match(/\\D/)\n      params[:uuid] = params.delete :id\n    end\n    if action_name == 'get_permissions'\n      # get_permissions accepts a UUID for any kind of object.\n      @object = ArvadosModel::resource_class_for_uuid(params[:uuid])\n        .readable_by(*@read_users)\n        .where(uuid: params[:uuid])\n        .first\n    elsif !current_user\n      super(with_lock: with_lock)\n    else\n      # The usual permission-filtering index query is unnecessarily\n      # inefficient, and doesn't match all permission links that\n      # should be visible (see #18865).  Instead, we look up the link\n      # by UUID, then check whether (a) its tail_uuid is the current\n      # user or (b) its head_uuid is an object the current_user\n      # can_manage.\n      model = Link\n      if with_lock && Rails.configuration.API.LockBeforeUpdate\n        model = model.lock\n      end\n      link = model.unscoped.where(uuid: params[:uuid]).first\n      if link && link.link_class != 'permission'\n        # Not a permission link. Re-fetch using generic\n        # permission-filtering query.\n        super(with_lock: with_lock)\n      elsif link && (current_user.uuid == link.tail_uuid ||\n                     current_user.can?(manage: link.head_uuid))\n        # Permission granted.\n        @object = link\n      else\n        # Permission denied, i.e., link is invisible => 404.\n        @object = nil\n      end\n    end\n  end\n\n  # Overrides ApplicationController load_where_param\n  def load_where_param\n    super\n\n    # head_kind and tail_kind columns are now virtual,\n    # equivalent functionality is now provided by\n    # 'is_a', so fix up any old-style 'where' clauses.\n    if @where\n      @filters ||= []\n      if @where[:head_kind]\n        @filters << ['head_uuid', 'is_a', @where[:head_kind]]\n        @where.delete :head_kind\n      end\n      if @where[:tail_kind]\n        @filters << ['tail_uuid', 'is_a', @where[:tail_kind]]\n        @where.delete :tail_kind\n      end\n    end\n  end\n\n  # Overrides ApplicationController load_filters_param\n  def load_filters_param\n    super\n\n    # head_kind and tail_kind columns are now virtual,\n    # equivalent functionality is now provided by\n    # 'is_a', so fix up any old-style 'filter' clauses.\n    @filters = @filters.map do |k|\n      if k[0] == 'head_kind' and k[1] == '='\n        ['head_uuid', 'is_a', k[2]]\n      elsif k[0] == 'tail_kind' and k[1] == '='\n        ['tail_uuid', 'is_a', k[2]]\n      else\n        k\n      end\n    end\n\n    # If the provided filters are enough to limit the results to\n    # permission links with specific head_uuids or\n    # tail_uuid=current_user, bypass the normal readable_by query\n    # (which doesn't match all can_manage-able items, see #18865) --\n    # just ensure the current user actually has can_manage permission\n    # for the provided head_uuids, removing any that don't. At that\n    # point the caller's filters are an effective permission filter.\n    if @filters.include?(['link_class', '=', 'permission'])\n      @filters.map do |k|\n        if k[0] == 'tail_uuid' && k[1] == '=' && k[2] == current_user.uuid\n          @objects = Link.unscoped\n        elsif k[0] == 'head_uuid'\n          if k[1] == '=' && current_user.can?(manage: k[2])\n            @objects = Link.unscoped\n          elsif k[1] == 'in'\n            # Modify the filter operand element (k[2]) in place,\n            # removing any non-permitted UUIDs.\n            k[2].select! do |head_uuid|\n              current_user.can?(manage: head_uuid)\n            end\n            @objects = Link.unscoped\n          end\n        end\n      end\n    end\n  end\n\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/logs_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::LogsController < ApplicationController\n  # Overrides ApplicationController load_where_param\n  def load_where_param\n    super\n\n    # object_kind and column is now virtual,\n    # equivilent functionality is now provided by\n    # 'is_a', so fix up any old-style 'where' clauses.\n    if @where\n      @filters ||= []\n      if @where[:object_kind]\n        @filters << ['object_uuid', 'is_a', @where[:object_kind]]\n        @where.delete :object_kind\n      end\n    end\n  end\n\n  # Overrides ApplicationController load_filters_param\n  def load_filters_param\n    super\n\n    # object_kind and column is now virtual,\n    # equivilent functionality is now provided by\n    # 'is_a', so fix up any old-style 'filter' clauses.\n    @filters = @filters.map do |k|\n      if k[0] == 'object_kind' and k[1] == '='\n        ['object_uuid', 'is_a', k[2]]\n      else\n        k\n      end\n    end\n  end\n\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/management_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::ManagementController < ApplicationController\n  skip_before_action :catch_redirect_hint\n  skip_before_action :find_objects_for_index\n  skip_before_action :find_object_by_uuid\n  skip_before_action :load_filters_param\n  skip_before_action :load_limit_offset_order_params\n  skip_before_action :load_select_param\n  skip_before_action :load_read_auths\n  skip_before_action :load_where_param\n  skip_before_action :render_404_if_no_object\n  skip_before_action :require_auth_scope\n\n  before_action :check_auth_header\n\n  def check_auth_header\n    mgmt_token = Rails.configuration.ManagementToken\n    auth_header = request.headers['Authorization']\n\n    if mgmt_token == \"\"\n      send_json ({\"errors\" => \"disabled\"}), status: 404\n    elsif !auth_header\n      send_json ({\"errors\" => \"authorization required\"}), status: 401\n    elsif auth_header != 'Bearer '+mgmt_token\n      send_json ({\"errors\" => \"authorization error\"}), status: 403\n    end\n  end\n\n  def metrics\n    render content_type: 'text/plain', plain: <<~EOF\n# HELP arvados_config_load_timestamp_seconds Time when config file was loaded.\n# TYPE arvados_config_load_timestamp_seconds gauge\narvados_config_load_timestamp_seconds{sha256=\"#{Rails.configuration.SourceSHA256}\"} #{Rails.configuration.LoadTimestamp.to_f}\n# HELP arvados_config_source_timestamp_seconds Timestamp of config file when it was loaded.\n# TYPE arvados_config_source_timestamp_seconds gauge\narvados_config_source_timestamp_seconds{sha256=\"#{Rails.configuration.SourceSHA256}\"} #{Rails.configuration.SourceTimestamp.to_f}\n# HELP arvados_version_running Indicated version is running.\n# TYPE arvados_version_running gauge\narvados_version_running{version=\"#{AppVersion.package_version}\"} 1\nEOF\n  end\n\n  def health\n    case params[:check]\n    when 'ping'\n      resp = {\"health\" => \"OK\"}\n      send_json resp\n    else\n      send_json ({\"errors\" => \"not found\"}), status: 404\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/schema_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::SchemaController < ApplicationController\n  skip_before_action :catch_redirect_hint\n  skip_before_action :find_objects_for_index\n  skip_before_action :find_object_by_uuid\n  skip_before_action :load_filters_param\n  skip_before_action :load_limit_offset_order_params\n  skip_before_action :load_select_param\n  skip_before_action :load_read_auths\n  skip_before_action :load_where_param\n  skip_before_action :render_404_if_no_object\n  skip_before_action :require_auth_scope\n\n  include DbCurrentTime\n\n  def index\n    expires_in 24.hours, public: true\n    send_json discovery_doc\n  end\n\n  protected\n\n  ActionNameMap = {\n    'destroy' => 'delete',\n    'index' => 'list',\n    'show' => 'get',\n  }\n\n  HttpMethodDescriptionMap = {\n    \"DELETE\" => \"delete\",\n    \"GET\" => \"query\",\n    \"POST\" => \"update\",\n    \"PUT\" => \"create\",\n  }\n\n  ModelHumanNameMap = {\n    # The discovery document has code to humanize most model names.\n    # These are exceptions that require some capitalization.\n    \"ApiClientAuthorization\" => \"API client authorization\",\n    \"KeepService\" => \"Keep service\",\n  }\n\n  SchemaDescriptionMap = {\n    # This hash contains descriptions for everything in the schema.\n    # Schemas are looked up by their model name.\n    # Schema properties are looked up by \"{model_name}.{property_name}\"\n    # and fall back to just the property name if that doesn't exist.\n    \"ApiClientAuthorization\" => \"Arvados API client authorization token\n\nThis resource represents an API token a user may use to authenticate an\nArvados API request.\",\n    \"AuthorizedKey\" => \"Arvados authorized public key\n\nThis resource represents a public key a user may use to authenticate themselves\nto services on the cluster. Its primary use today is to store SSH keys for\nvirtual machines (\\\"shell nodes\\\"). It may be extended to store other keys in\nthe future.\",\n    \"Collection\" => \"Arvados data collection\n\nA collection describes how a set of files is stored in data blocks in Keep,\nalong with associated metadata.\",\n    \"ComputedPermission\" => \"Arvados computed permission\n\nComputed permissions do not correspond directly to any Arvados resource, but\nprovide a simple way to query the entire graph of permissions granted to\nusers and groups.\",\n    \"ContainerRequest\" => \"Arvados container request\n\nA container request represents a user's request that Arvados do some compute\nwork, along with full details about what work should be done. Arvados will\nattempt to fulfill the request by mapping it to a matching container record,\nrunning the work on demand if necessary.\",\n    \"Container\" => \"Arvados container record\n\nA container represents compute work that has been or should be dispatched,\nalong with its results. A container can satisfy one or more container requests.\",\n    \"Group\" => \"Arvados group\n\nGroups provide a way to organize users or data together, depending on their\n`group_class`.\",\n    \"KeepService\" => \"Arvados Keep service\n\nThis resource stores information about a single Keep service in this Arvados\ncluster that clients can contact to retrieve and store data.\",\n    \"Link\" => \"Arvados object link\n\nA link provides a way to define relationships between Arvados objects,\ndepending on their `link_class`.\",\n    \"Log\" => \"Arvados log record\n\nThis resource represents a single log record about an event in this Arvados\ncluster. Some individual Arvados services create log records. Users can also\ncreate custom logs.\",\n    \"UserAgreement\" => \"Arvados user agreement\n\nA user agreement is a collection with terms that users must agree to before\nthey can use this Arvados cluster.\",\n    \"User\" => \"Arvados user\n\nA user represents a single individual or role who may be authorized to access\nthis Arvados cluster.\",\n    \"VirtualMachine\" => \"Arvados virtual machine (\\\"shell node\\\")\n\nThis resource stores information about a virtual machine or \\\"shell node\\\"\nhosted on this Arvados cluster where users can log in and use preconfigured\nArvados client tools.\",\n    \"Workflow\" => \"Arvados workflow\n\nA workflow contains workflow definition source code that Arvados can execute\nalong with associated metadata for users.\",\n\n    # This section contains:\n    # * attributes shared across most resources\n    # * attributes shared across Collections and UserAgreements\n    # * attributes shared across Containers and ContainerRequests\n    \"command\" =>\n    \"An array of strings that defines the command that the dispatcher should\nexecute inside this container.\",\n    \"container_image\" =>\n    \"The portable data hash of the Arvados collection that contains the image\nto use for this container.\",\n    \"created_at\" => \"The time this %s was created.\",\n    \"current_version_uuid\" => \"The UUID of the current version of this %s.\",\n    \"cwd\" =>\n    \"A string that the defines the working directory that the dispatcher should\nuse when it executes the command inside this container.\",\n    \"delete_at\" => \"The time this %s will be permanently deleted.\",\n    \"description\" =>\n    \"A longer HTML description of this %s assigned by a user.\nAllowed HTML tags are `a`, `b`, `blockquote`, `br`, `code`,\n`del`, `dd`, `dl`, `dt`, `em`, `h1`, `h2`, `h3`, `h4`, `h5`, `h6`, `hr`,\n`i`, `img`, `kbd`, `li`, `ol`, `p`, `pre`,\n`s`, `section`, `span`, `strong`, `sub`, `sup`, and `ul`.\",\n    \"environment\" =>\n    \"A hash of string keys and values that defines the environment variables\nfor the dispatcher to set when it executes this container.\",\n    \"file_count\" =>\n    \"The number of files represented in this %s's `manifest_text`.\nThis attribute is read-only.\",\n    \"file_size_total\" =>\n    \"The total size in bytes of files represented in this %s's `manifest_text`.\nThis attribute is read-only.\",\n    \"is_trashed\" => \"A boolean flag to indicate whether or not this %s is trashed.\",\n    \"manifest_text\" =>\n    \"The manifest text that describes how files are constructed from data blocks\nin this %s. Refer to the [manifest format][] reference for details.\n\n[manifest format]: https://doc.arvados.org/architecture/manifest-format.html\n\n\",\n    \"modified_at\" => \"The time this %s was last updated.\",\n    \"modified_by_user_uuid\" => \"The UUID of the user that last updated this %s.\",\n    \"mounts\" =>\n    \"A hash where each key names a directory inside this container, and its\nvalue is an object that defines the mount source for that directory. Refer\nto the [mount types reference][] for details.\n\n[mount types reference]: https://doc.arvados.org/api/methods/containers.html#mount_types\n\n\",\n    \"name\" => \"The name of this %s assigned by a user.\",\n    \"output_glob\" =>\n    \"An array of strings of shell-style glob patterns that define which file(s)\nand subdirectory(ies) under the `output_path` directory should be recorded in\nthe container's final output. Refer to the [glob patterns reference][] for details.\n\n[glob patterns reference]: https://doc.arvados.org/api/methods/containers.html#glob_patterns\n\n\",\n    \"output_path\" =>\n    \"A string that defines the file or directory path where the command\nwrites output that should be saved from this container.\",\n    \"output_properties\" =>\n\"A hash of arbitrary metadata to set on the output collection of this %s.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n\",\n    \"output_storage_classes\" =>\n    \"An array of strings identifying the storage class(es) that should be set\non the output collection of this %s. Storage classes are configured by\nthe cluster administrator.\",\n    \"owner_uuid\" => \"The UUID of the user or group that owns this %s.\",\n    \"portable_data_hash\" =>\n    \"The portable data hash of this %s. This string provides a unique\nand stable reference to these contents.\",\n    \"preserve_version\" =>\n    \"A boolean flag to indicate whether this specific version of this %s\nshould be persisted in cluster storage.\",\n    \"priority\" =>\n    \"An integer between 0 and 1000 (inclusive) that represents this %s's\nscheduling priority. 0 represents a request to be cancelled. Higher\nvalues represent higher priority. Refer to the [priority reference][] for details.\n\n[priority reference]: https://doc.arvados.org/api/methods/container_requests.html#priority\n\n\",\n    \"properties\" =>\n    \"A hash of arbitrary metadata for this %s.\nSome keys may be reserved by Arvados or defined by a configured vocabulary.\nRefer to the [metadata properties reference][] for details.\n\n[metadata properties reference]: https://doc.arvados.org/api/properties.html\n\n\",\n    \"replication_confirmed\" =>\n    \"The number of copies of data in this %s that the cluster has confirmed\nexist in storage.\",\n    \"replication_confirmed_at\" =>\n    \"The last time the cluster confirmed that it met `replication_confirmed`\nfor this %s.\",\n    \"replication_desired\" =>\n    \"The number of copies that should be made for data in this %s.\",\n    \"runtime_auth_scopes\" =>\n    \"The `scopes` from the API client authorization token used to run this %s.\",\n    \"runtime_constraints\" =>\n    \"A hash that identifies compute resources this container requires to run\nsuccessfully. See the [runtime constraints reference][] for details.\n\n[runtime constraints reference]: https://doc.arvados.org/api/methods/containers.html#runtime_constraints\n\n\",\n    \"runtime_token\" =>\n    \"The `api_token` from an Arvados API client authorization token that a\ndispatcher should use to set up this container.\",\n    \"runtime_user_uuid\" =>\n    \"The UUID of the Arvados user associated with the API client authorization\ntoken used to run this container.\",\n    \"secret_mounts\" =>\n    \"A hash like `mounts`, but this attribute is only available through a\ndedicated API before the container is run.\",\n    \"scheduling_parameters\" =>\n    \"A hash of scheduling parameters that should be passed to the underlying\ndispatcher when this container is run.\nSee the [scheduling parameters reference][] for details.\n\n[scheduling parameters reference]: https://doc.arvados.org/api/methods/containers.html#scheduling_parameters\n\n\",\n    \"storage_classes_desired\" =>\n    \"An array of strings identifying the storage class(es) that should be used\nfor data in this %s. Storage classes are configured by the cluster administrator.\",\n    \"storage_classes_confirmed\" =>\n    \"An array of strings identifying the storage class(es) the cluster has\nconfirmed have a copy of this %s's data.\",\n    \"storage_classes_confirmed_at\" =>\n    \"The last time the cluster confirmed that data was stored on the storage\nclass(es) in `storage_classes_confirmed`.\",\n    \"trash_at\" => \"The time this %s will be trashed.\",\n\n    \"ApiClientAuthorization.api_token\" =>\n    \"The secret token that can be used to authorize Arvados API requests.\",\n    \"ApiClientAuthorization.created_by_ip_address\" =>\n    \"The IP address of the client that created this token.\",\n    \"ApiClientAuthorization.expires_at\" =>\n    \"The time after which this token is no longer valid for authorization.\",\n    \"ApiClientAuthorization.last_used_at\" =>\n    \"The last time this token was used to authorize a request.\",\n    \"ApiClientAuthorization.last_used_by_ip_address\" =>\n    \"The IP address of the client that last used this token.\",\n    \"ApiClientAuthorization.refreshes_at\" =>\n    \"The time at which the token will be revalidated if it is a cached token issued by a remote cluster, otherise null.\",\n    \"ApiClientAuthorization.scopes\" =>\n    \"An array of strings identifying HTTP methods and API paths this token is\nauthorized to use. Refer to the [scopes reference][] for details.\n\n[scopes reference]: https://doc.arvados.org/api/tokens.html#scopes\n\n\",\n    \"version\" =>\n    \"An integer that counts which version of a %s this record\nrepresents. Refer to [collection versioning][] for details. This attribute is\nread-only.\n\n[collection versioning]: https://doc.arvados.org/user/topics/collection-versioning.html\n\n\",\n\n    \"AuthorizedKey.authorized_user_uuid\" =>\n    \"The UUID of the Arvados user that is authorized by this key.\",\n    \"AuthorizedKey.expires_at\" =>\n    \"The time after which this key is no longer valid for authorization.\",\n    \"AuthorizedKey.key_type\" =>\n    \"A string identifying what type of service uses this key. Supported values are:\n\n  * `\\\"SSH\\\"`\n\n\",\n    \"AuthorizedKey.public_key\" =>\n    \"The full public key, in the format referenced by `key_type`.\",\n\n    \"ComputedPermission.user_uuid\" =>\n    \"The UUID of the Arvados user who has this permission.\",\n    \"ComputedPermission.target_uuid\" =>\n    \"The UUID of the Arvados object the user has access to.\",\n    \"ComputedPermission.perm_level\" =>\n    \"A string representing the user's level of access to the target object.\nPossible values are:\n\n  * `\\\"can_read\\\"`\n  * `\\\"can_write\\\"`\n  * `\\\"can_manage\\\"`\n\n\",\n\n    \"Container.auth_uuid\" =>\n    \"The UUID of the Arvados API client authorization token that a dispatcher\nshould use to set up this container. This token is automatically created by\nArvados and this attribute automatically assigned unless a container is\ncreated with `runtime_token`.\",\n    \"Container.cost\" =>\n    \"A float with the estimated cost of the cloud instance used to run this\ncontainer. The value is `0` if cost estimation is not available on this cluster.\",\n    \"Container.exit_code\" =>\n    \"An integer that records the Unix exit code of the `command` from a\nfinished container.\",\n    \"Container.gateway_address\" =>\n    \"A string with the address of the Arvados gateway server, in `HOST:PORT`\nformat. This is for internal use only.\",\n    \"Container.interactive_session_started\" =>\n    \"This flag is set true if any user starts an interactive shell inside the\nrunning container.\",\n    \"Container.lock_count\" =>\n    \"The number of times this container has been locked by a dispatcher. This\nmay be greater than 1 if a dispatcher locks a container but then execution is\ninterrupted for any reason.\",\n    \"Container.locked_by_uuid\" =>\n    \"The UUID of the Arvados API client authorization token that successfully\nlocked this container in preparation to execute it.\",\n    \"Container.log\" =>\n    \"The portable data hash of the Arvados collection that contains this\ncontainer's logs.\",\n    \"Container.output\" =>\n    \"The portable data hash of the Arvados collection that contains this\ncontainer's output file(s).\",\n    \"Container.progress\" =>\n    \"A float between 0.0 and 1.0 (inclusive) that represents the container's\nexecution progress. This attribute is not implemented yet.\",\n    \"Container.runtime_status\" =>\n    \"A hash with status updates from a running container.\nRefer to the [runtime status reference][] for details.\n\n[runtime status reference]: https://doc.arvados.org/api/methods/containers.html#runtime_status\n\n\",\n    \"Container.subrequests_cost\" =>\n    \"A float with the estimated cost of all cloud instances used to run this\ncontainer and all its subrequests. The value is `0` if cost estimation is not\navailable on this cluster.\",\n    \"Container.state\" =>\n    \"A string representing the container's current execution status. Possible\nvalues are:\n\n  * `\\\"Queued\\\"` --- This container has not been dispatched yet.\n  * `\\\"Locked\\\"` --- A dispatcher has claimed this container in preparation to run it.\n  * `\\\"Running\\\"` --- A dispatcher is running this container.\n  * `\\\"Cancelled\\\"` --- Container execution has been cancelled by user request.\n  * `\\\"Complete\\\"` --- A dispatcher ran this container to completion and recorded the results.\n\n\",\n    \"Container.service\" =>\n    \"A boolean flag. If set, it informs the system that this is a long-running container\nthat functions as a system service or web app, rather than a once-through batch operation.\",\n    \"Container.published_ports\" =>\n    \"A hash where keys are numeric TCP ports on the container which expose HTTP services.  Arvados\nwill proxy HTTP requests to these ports.  Values are hashes with the following keys:\n\n  * `\\\"access\\\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\n  * `\\\"label\\\"` --- A human readable label describing the service, for display in Workbench.\n  * `\\\"initial_path\\\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.\",\n\n    \"ContainerRequest.auth_uuid\" =>\n    \"The UUID of the Arvados API client authorization token that a\ndispatcher should use to set up a corresponding container. This token is\nautomatically created by Arvados and this attribute automatically assigned\nunless a container request is created with `runtime_token`.\",\n    \"ContainerRequest.container_count\" =>\n    \"An integer that records how many times Arvados has attempted to dispatch\na container to fulfill this container request.\",\n    \"ContainerRequest.container_count_max\" =>\n    \"An integer that defines the maximum number of times Arvados should attempt\nto dispatch a container to fulfill this container request.\",\n    \"ContainerRequest.container_uuid\" =>\n    \"The UUID of the container that fulfills this container request, if any.\",\n    \"ContainerRequest.cumulative_cost\" =>\n    \"A float with the estimated cost of all cloud instances used to run\ncontainer(s) to fulfill this container request and their subrequests.\nThe value is `0` if cost estimation is not available on this cluster.\",\n    \"ContainerRequest.expires_at\" =>\n    \"The time after which this %s will no longer be fulfilled.\",\n    \"ContainerRequest.filters\" =>\n    \"Filters that limit which existing containers are eligible to satisfy this\ncontainer request. This attribute is not implemented yet and should be null.\",\n    \"ContainerRequest.log_uuid\" =>\n    \"The UUID of the Arvados collection that contains logs for all the\ncontainer(s) that were dispatched to fulfill this container request.\",\n    \"ContainerRequest.output_name\" =>\n    \"The name to set on the output collection of this container request.\",\n    \"ContainerRequest.output_ttl\" =>\n    \"An integer in seconds. If greater than zero, when an output collection is\ncreated for this container request, its `expires_at` attribute will be set this\nfar in the future.\",\n    \"ContainerRequest.output_uuid\" =>\n    \"The UUID of the Arvados collection that contains output for all the\ncontainer(s) that were dispatched to fulfill this container request.\",\n    \"ContainerRequest.requesting_container_uuid\" =>\n    \"The UUID of the container that created this container request, if any.\",\n    \"ContainerRequest.state\" =>\n    \"A string indicating where this container request is in its lifecycle.\nPossible values are:\n\n  * `\\\"Uncommitted\\\"` --- The container request has not been finalized and can still be edited.\n  * `\\\"Committed\\\"` --- The container request is ready to be fulfilled.\n  * `\\\"Final\\\"` --- The container request has been fulfilled or cancelled.\n\n\",\n    \"ContainerRequest.use_existing\" =>\n    \"A boolean flag. If set, Arvados may choose to satisfy this container\nrequest with an eligible container that already exists. Otherwise, Arvados will\nsatisfy this container request with a newer container, which will usually result\nin the container running again.\",\n    \"ContainerRequest.service\" =>\n    \"A boolean flag. If set, it informs the system that this request is for a long-running container\nthat functions as a system service or web app, rather than a once-through batch operation.\",\n    \"ContainerRequest.published_ports\" =>\n    \"A hash where keys are numeric TCP ports on the container which expose HTTP services.  Arvados\nwill proxy HTTP requests to these ports.  Values are hashes with the following keys:\n\n  * `\\\"access\\\"` --- One of 'private' or 'public' indicating if an Arvados API token is required to access the endpoint.\n  * `\\\"label\\\"` --- A human readable label describing the service, for display in Workbench.\n  * `\\\"initial_path\\\"` --- The relative path that should be included when constructing the URL that will be presented to the user in Workbench.\",\n\n    \"Group.group_class\" =>\n    \"A string representing which type of group this is. One of:\n\n  * `\\\"filter\\\"` --- A virtual project whose contents are selected dynamically by filters.\n  * `\\\"project\\\"` --- An Arvados project that can contain collections,\n    container records, workflows, and subprojects.\n  * `\\\"role\\\"` --- A group of users that can be granted permissions in Arvados.\n\n\",\n    \"Group.frozen_by_uuid\" =>\n    \"The UUID of the user that has frozen this group, if any. Frozen projects\ncannot have their contents or metadata changed, even by admins.\",\n\n    \"KeepService.service_host\" => \"The DNS hostname of this %s.\",\n    \"KeepService.service_port\" => \"The TCP port where this %s listens.\",\n    \"KeepService.service_ssl_flag\" =>\n    \"A boolean flag that indicates whether or not this %s uses TLS/SSL.\",\n    \"KeepService.service_type\" =>\n    \"A string that describes which type of %s this is. One of:\n\n  * `\\\"disk\\\"` --- A service that stores blocks on a local filesystem.\n  * `\\\"blob\\\"` --- A service that stores blocks in a cloud object store.\n  * `\\\"proxy\\\"` --- A keepproxy service.\n\n\",\n    \"KeepService.read_only\" =>\n    \"A boolean flag. If set, this %s does not accept requests to write data\nblocks; it only serves blocks it already has.\",\n\n    \"Link.head_uuid\" =>\n    \"The UUID of the Arvados object that is the originator or actor in this\nrelationship. May be null.\",\n    \"Link.link_class\" =>\n    \"A string that defines which kind of link this is. One of:\n\n  * `\\\"permission\\\"` --- This link grants a permission to the user or group\n    referenced by `head_uuid` to the object referenced by `tail_uuid`. The\n    access level is set by `name`.\n  * `\\\"star\\\"` --- This link represents a \\\"favorite.\\\" The user referenced\n    by `head_uuid` wants quick access to the object referenced by `tail_uuid`.\n  * `\\\"tag\\\"` --- This link represents an unstructured metadata tag. The object\n    referenced by `tail_uuid` has the tag defined by `name`.\n\n\",\n    \"Link.name\" =>\n    \"The primary value of this link. For `\\\"permission\\\"` links, this is one of\n`\\\"can_read\\\"`, `\\\"can_write\\\"`, or `\\\"can_manage\\\"`.\",\n    \"Link.tail_uuid\" =>\n    \"The UUID of the Arvados object that is the target of this relationship.\",\n\n    \"Log.id\" =>\n    \"The serial number of this log. You can use this in filters to query logs\nthat were created before/after another.\",\n    \"Log.event_type\" =>\n    \"An arbitrary short string that classifies what type of log this is.\",\n    \"Log.object_owner_uuid\" =>\n    \"The `owner_uuid` of the object referenced by `object_uuid` at the time\nthis log was created.\",\n    \"Log.object_uuid\" =>\n    \"The UUID of the Arvados object that this log pertains to, such as a user\nor container.\",\n    \"Log.summary\" =>\n    \"A text string that describes the logged event. This is the primary\nattribute for simple logs.\",\n\n    \"User.email\" => \"This user's email address.\",\n    \"User.first_name\" => \"This user's first name.\",\n    \"User.identity_url\" =>\n    \"A URL that represents this user with the cluster's identity provider.\",\n    \"User.is_active\" =>\n    \"A boolean flag. If unset, this user is not permitted to make any Arvados\nAPI requests.\",\n    \"User.is_admin\" =>\n    \"A boolean flag. If set, this user is an administrator of the Arvados\ncluster, and automatically passes most permissions checks.\",\n    \"User.last_name\" => \"This user's last name.\",\n    \"User.prefs\" => \"A hash that stores cluster-wide user preferences.\",\n    \"User.username\" => \"This user's Unix username on virtual machines.\",\n\n    \"VirtualMachine.hostname\" =>\n    \"The DNS hostname where users should access this %s.\",\n\n    \"Workflow.definition\" => \"A string with the CWL source of this %s.\",\n    \"Workflow.collection_uuid\" => \"The collection this workflow is linked to, containing the definition of the workflow.\",\n\n    \"Credential.credential_class\" => \"The type of credential being stored.\",\n    \"Credential.scopes\" => \"The resources the credential applies to or should be used with.\",\n    \"Credential.external_id\" => \"The non-secret external identifier associated with a credential, e.g. a username.\",\n    \"Credential.secret\" => \"The secret part of the credential, e.g. a password.\",\n    \"Credential.expires_at\" => \"Date after which the credential_secret field is no longer valid.\",\n  }\n\n  def discovery_doc\n    Rails.application.eager_load!\n    remoteHosts = {}\n    Rails.configuration.RemoteClusters.each {|k,v| if k != :\"*\" then remoteHosts[k] = v[\"Host\"] end }\n    discovery = {\n      kind: \"discovery#restDescription\",\n      discoveryVersion: \"v1\",\n      id: \"arvados:v1\",\n      name: \"arvados\",\n      version: \"v1\",\n      # format is YYYYMMDD, must be fixed width (needs to be lexically\n      # sortable), updated manually, may be used by clients to\n      # determine availability of API server features.\n      revision: \"20250402\",\n      source_version: AppVersion.hash,\n      sourceVersion: AppVersion.hash, # source_version should be deprecated in the future\n      packageVersion: AppVersion.package_version,\n      generatedAt: db_current_time.iso8601,\n      title: \"Arvados API\",\n      description: \"The API to interact with Arvados.\",\n      documentationLink: \"http://doc.arvados.org/api/index.html\",\n      defaultCollectionReplication: Rails.configuration.Collections.DefaultReplication,\n      protocol: \"rest\",\n      baseUrl: root_url + \"arvados/v1/\",\n      basePath: \"/arvados/v1/\",\n      rootUrl: root_url,\n      servicePath: \"arvados/v1/\",\n      batchPath: \"batch\",\n      uuidPrefix: Rails.configuration.ClusterID,\n      defaultTrashLifetime: Rails.configuration.Collections.DefaultTrashLifetime,\n      blobSignatureTtl: Rails.configuration.Collections.BlobSigningTTL,\n      maxRequestSize: Rails.configuration.API.MaxRequestSize,\n      maxItemsPerResponse: Rails.configuration.API.MaxItemsPerResponse,\n      dockerImageFormats: Rails.configuration.Containers.SupportedDockerImageFormats.keys,\n      crunchLogUpdatePeriod: Rails.configuration.Containers.Logging.LogUpdatePeriod,\n      crunchLogUpdateSize: Rails.configuration.Containers.Logging.LogUpdateSize,\n      remoteHosts: remoteHosts,\n      remoteHostsViaDNS: Rails.configuration.RemoteClusters[\"*\"].Proxy,\n      websocketUrl: Rails.configuration.Services.Websocket.ExternalURL.to_s,\n      workbenchUrl: Rails.configuration.Services.Workbench1.ExternalURL.to_s,\n      workbench2Url: Rails.configuration.Services.Workbench2.ExternalURL.to_s,\n      keepWebServiceUrl: Rails.configuration.Services.WebDAV.ExternalURL.to_s,\n      parameters: {\n        alt: {\n          type: \"string\",\n          description: \"Data format for the response.\",\n          default: \"json\",\n          enum: [\n            \"json\"\n          ],\n          enumDescriptions: [\n            \"Responses with Content-Type of application/json\"\n          ],\n          location: \"query\"\n        },\n        fields: {\n          type: \"string\",\n          description: \"Selector specifying which fields to include in a partial response.\",\n          location: \"query\"\n        },\n        key: {\n          type: \"string\",\n          description: \"API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.\",\n          location: \"query\"\n        },\n        oauth_token: {\n          type: \"string\",\n          description: \"OAuth 2.0 token for the current user.\",\n          location: \"query\"\n        }\n      },\n      auth: {\n        oauth2: {\n          scopes: {\n            \"https://api.arvados.org/auth/arvados\" => {\n              description: \"View and manage objects\"\n            },\n            \"https://api.arvados.org/auth/arvados.readonly\" => {\n              description: \"View objects\"\n            }\n          }\n        }\n      },\n      schemas: {},\n      resources: {}\n    }\n\n    ActiveRecord::Base.descendants.reject(&:abstract_class?).sort_by(&:to_s).each do |k|\n      begin\n        ctl_class = \"Arvados::V1::#{k.to_s.pluralize}Controller\".constantize\n      rescue\n        # No controller -> no discovery.\n        next\n      end\n      human_name = ModelHumanNameMap[k.to_s] || k.to_s.underscore.humanize.downcase\n      object_properties = {}\n      k.columns.\n        select { |col| k.selectable_attributes.include? col.name }.\n        collect do |col|\n        if k.serialized_attributes.has_key? col.name\n          col_type = k.serialized_attributes[col.name].object_class.to_s\n        elsif k.attribute_types[col.name].is_a? JsonbType::Hash\n          col_type = Hash.to_s\n        elsif k.attribute_types[col.name].is_a? JsonbType::Array\n          col_type = Array.to_s\n        else\n          col_type = col.type\n        end\n        desc_fmt =\n          SchemaDescriptionMap[\"#{k}.#{col.name}\"] ||\n          SchemaDescriptionMap[col.name] ||\n          \"\"\n        if k.attribute_types[col.name].type == :datetime\n          desc_fmt += \" The string encodes a UTC date and time in ISO 8601 format.\"\n        end\n        object_properties[col.name] = {\n          description: desc_fmt % human_name,\n          type: col_type,\n        }\n      end\n      discovery[:schemas][k.to_s + 'List'] = {\n        id: k.to_s + 'List',\n        description: \"A list of #{k} objects.\",\n        type: \"object\",\n        properties: {\n          kind: {\n            type: \"string\",\n            description: \"Object type. Always arvados##{k.to_s.camelcase(:lower)}List.\",\n            default: \"arvados##{k.to_s.camelcase(:lower)}List\"\n          },\n          etag: {\n            type: \"string\",\n            description: \"List cache version.\"\n          },\n          items: {\n            type: \"array\",\n            description: \"An array of matching #{k} objects.\",\n            items: {\n              \"$ref\" => k.to_s\n            }\n          },\n        }\n      }\n      discovery[:schemas][k.to_s] = {\n        id: k.to_s,\n        description: SchemaDescriptionMap[k.to_s] || \"Arvados #{human_name}.\",\n        type: \"object\",\n        uuidPrefix: nil,\n        properties: {\n          etag: {\n            type: \"string\",\n            description: \"Object cache version.\"\n          }\n        }.merge(object_properties)\n      }\n      if k.respond_to? :uuid_prefix\n        discovery[:schemas][k.to_s][:uuidPrefix] ||= k.uuid_prefix\n        discovery[:schemas][k.to_s][:properties][:uuid] ||= {\n          type: \"string\",\n          description: \"This #{human_name}'s Arvados UUID, like `zzzzz-#{k.uuid_prefix}-12345abcde67890`.\"\n        }\n      end\n      discovery[:resources][k.to_s.underscore.pluralize] = {\n        methods: {\n          get: {\n            id: \"arvados.#{k.to_s.underscore.pluralize}.get\",\n            path: \"#{k.to_s.underscore.pluralize}/{uuid}\",\n            httpMethod: \"GET\",\n            description: \"Get a #{k.to_s} record by UUID.\",\n            parameters: {\n              uuid: {\n                type: \"string\",\n                description: \"The UUID of the #{k.to_s} to return.\",\n                required: true,\n                location: \"path\"\n              }\n            },\n            parameterOrder: [\n              \"uuid\"\n            ],\n            response: {\n              \"$ref\" => k.to_s\n            },\n            scopes: [\n              \"https://api.arvados.org/auth/arvados\",\n              \"https://api.arvados.org/auth/arvados.readonly\"\n            ]\n          },\n          list: {\n            id: \"arvados.#{k.to_s.underscore.pluralize}.list\",\n            path: k.to_s.underscore.pluralize,\n            httpMethod: \"GET\",\n            description: \"Retrieve a #{k.to_s}List.\",\n            parameters: {\n            },\n            response: {\n              \"$ref\" => \"#{k.to_s}List\"\n            },\n            scopes: [\n              \"https://api.arvados.org/auth/arvados\",\n              \"https://api.arvados.org/auth/arvados.readonly\"\n            ]\n          },\n          create: {\n            id: \"arvados.#{k.to_s.underscore.pluralize}.create\",\n            path: \"#{k.to_s.underscore.pluralize}\",\n            httpMethod: \"POST\",\n            description: \"Create a new #{k.to_s}.\",\n            parameters: {},\n            request: {\n              required: true,\n              properties: {\n                k.to_s.underscore => {\n                  \"$ref\" => k.to_s\n                }\n              }\n            },\n            response: {\n              \"$ref\" => k.to_s\n            },\n            scopes: [\n              \"https://api.arvados.org/auth/arvados\"\n            ]\n          },\n          update: {\n            id: \"arvados.#{k.to_s.underscore.pluralize}.update\",\n            path: \"#{k.to_s.underscore.pluralize}/{uuid}\",\n            httpMethod: \"PUT\",\n            description: \"Update attributes of an existing #{k.to_s}.\",\n            parameters: {\n              uuid: {\n                type: \"string\",\n                description: \"The UUID of the #{k.to_s} to update.\",\n                required: true,\n                location: \"path\"\n              }\n            },\n            request: {\n              required: true,\n              properties: {\n                k.to_s.underscore => {\n                  \"$ref\" => k.to_s\n                }\n              }\n            },\n            response: {\n              \"$ref\" => k.to_s\n            },\n            scopes: [\n              \"https://api.arvados.org/auth/arvados\"\n            ]\n          },\n          delete: {\n            id: \"arvados.#{k.to_s.underscore.pluralize}.delete\",\n            path: \"#{k.to_s.underscore.pluralize}/{uuid}\",\n            httpMethod: \"DELETE\",\n            description: \"Delete an existing #{k.to_s}.\",\n            parameters: {\n              uuid: {\n                type: \"string\",\n                description: \"The UUID of the #{k.to_s} to delete.\",\n                required: true,\n                location: \"path\"\n              }\n            },\n            response: {\n              \"$ref\" => k.to_s\n            },\n            scopes: [\n              \"https://api.arvados.org/auth/arvados\"\n            ]\n          }\n        }\n      }\n      # Check for Rails routes that don't match the usual actions\n      # listed above\n      d_methods = discovery[:resources][k.to_s.underscore.pluralize][:methods]\n      Rails.application.routes.routes.each do |route|\n        action = route.defaults[:action]\n        httpMethod = ['GET', 'POST', 'PUT', 'DELETE'].map { |method|\n          method if route.verb.match(method)\n        }.compact.first\n        if httpMethod &&\n          route.defaults[:controller] == 'arvados/v1/' + k.to_s.underscore.pluralize &&\n          ctl_class.action_methods.include?(action)\n          method_name = ActionNameMap[action] || action\n          method_key = method_name.to_sym\n          if !d_methods[method_key]\n            method = {\n              id: \"arvados.#{k.to_s.underscore.pluralize}.#{method_name}\",\n              path: route.path.spec.to_s.sub('/arvados/v1/','').sub('(.:format)','').sub(/:(uu)?id/,'{uuid}'),\n              httpMethod: httpMethod,\n              description: ctl_class.send(\"_#{method_name}_method_description\".to_sym),\n              parameters: {},\n              response: {\n                \"$ref\" => (method_name == 'list' ? \"#{k.to_s}List\" : k.to_s)\n              },\n              scopes: [\n                \"https://api.arvados.org/auth/arvados\"\n              ]\n            }\n            route.segment_keys.each do |key|\n              case key\n              when :format\n                next\n              when :id, :uuid\n                key = :uuid\n                description = \"The UUID of the #{k} to #{HttpMethodDescriptionMap[httpMethod]}.\"\n              else\n                description = \"\"\n              end\n              method[:parameters][key] = {\n                type: \"string\",\n                description: description,\n                required: true,\n                location: \"path\",\n              }\n            end\n          else\n            # We already built a generic method description, but we\n            # might find some more required parameters through\n            # introspection.\n            method = d_methods[method_key]\n          end\n          if ctl_class.respond_to? \"_#{action}_requires_parameters\".to_sym\n            ctl_class.send(\"_#{action}_requires_parameters\".to_sym).each do |l, v|\n              if v.is_a? Hash\n                method[:parameters][l] = v\n              else\n                method[:parameters][l] = {}\n              end\n              if !method[:parameters][l][:default].nil?\n                # The JAVA SDK is sensitive to all values being strings\n                method[:parameters][l][:default] = method[:parameters][l][:default].to_s\n              end\n              method[:parameters][l][:type] ||= 'string'\n              method[:parameters][l][:description] ||= ''\n              method[:parameters][l][:location] = (route.segment_keys.include?(l) ? 'path' : 'query')\n              if method[:parameters][l][:required].nil?\n                method[:parameters][l][:required] = v != false\n              end\n            end\n          end\n          d_methods[method_key] = method\n        end\n      end\n    end\n\n    # The computed_permissions controller does not offer all of the\n    # usual methods and attributes.  Modify discovery doc accordingly.\n    discovery[:resources]['computed_permissions'][:methods].select! do |method|\n      method == :list\n    end\n    discovery[:resources]['computed_permissions'][:methods][:list][:parameters].reject! do |param|\n      [:cluster_id, :bypass_federation, :offset].include?(param)\n    end\n    discovery[:schemas]['ComputedPermission'].delete(:uuidPrefix)\n    discovery[:schemas]['ComputedPermission'][:properties].reject! do |prop|\n      [:uuid, :etag].include?(prop)\n    end\n    discovery[:schemas]['ComputedPermission'][:properties]['perm_level'][:type] = 'string'\n\n    # The 'replace_files' and 'replace_segments' options are\n    # implemented in lib/controller, not Rails -- we just need to add\n    # them here so discovery-aware clients know how to validate them.\n    [:create, :update].each do |action|\n      discovery[:resources]['collections'][:methods][action][:parameters]['replace_files'] = {\n        type: 'object',\n        description:\n          \"Add, delete, and replace files and directories with new content\nand/or content from other collections. Refer to the\n[replace_files reference][] for details.\n\n[replace_files reference]: https://doc.arvados.org/api/methods/collections.html#replace_files\n\n\",\n        required: false,\n        location: 'query',\n        properties: {},\n        additionalProperties: {type: 'string'},\n      }\n      discovery[:resources]['collections'][:methods][action][:parameters]['replace_segments'] = {\n        type: 'object',\n        description:\n          \"Replace existing block segments in the collection with new segments.\nRefer to the [replace_segments reference][] for details.\n\n[replace_segments reference]: https://doc.arvados.org/api/methods/collections.html#replace_segments\n\n\",\n        required: false,\n        location: 'query',\n        properties: {},\n        additionalProperties: {type: 'string'},\n      }\n    end\n\n    discovery[:resources]['configs'] = {\n      methods: {\n        get: {\n          id: \"arvados.configs.get\",\n          path: \"config\",\n          httpMethod: \"GET\",\n          description: \"Get this cluster's public configuration settings.\",\n          parameters: {\n          },\n          parameterOrder: [\n          ],\n          response: {\n          },\n          scopes: [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n      }\n    }\n\n    discovery[:resources]['vocabularies'] = {\n      methods: {\n        get: {\n          id: \"arvados.vocabularies.get\",\n          path: \"vocabulary\",\n          httpMethod: \"GET\",\n          description: \"Get this cluster's configured vocabulary definition.\n\nRefer to [metadata vocabulary documentation][] for details.\n\n[metadata vocabulary documentation]: https://doc.arvados.org/admin/metadata-vocabulary.html\n\n\",\n          parameters: {\n          },\n          parameterOrder: [\n          ],\n          response: {\n          },\n          scopes: [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n      }\n    }\n\n    discovery[:resources]['sys'] = {\n      methods: {\n        get: {\n          id: \"arvados.sys.trash_sweep\",\n          path: \"sys/trash_sweep\",\n          httpMethod: \"POST\",\n          description:\n            \"Run scheduled data trash and sweep operations across this cluster's Keep services.\",\n          parameters: {\n          },\n          parameterOrder: [\n          ],\n          response: {\n          },\n          scopes: [\n            \"https://api.arvados.org/auth/arvados\",\n            \"https://api.arvados.org/auth/arvados.readonly\"\n          ]\n        },\n      }\n    }\n\n    Rails.configuration.API.DisabledAPIs.each do |method, _|\n      ctrl, action = method.to_s.split('.', 2)\n      next if ctrl.in?(['api_clients', 'job_tasks', 'jobs', 'keep_disks', 'nodes', 'pipeline_instances', 'pipeline_templates', 'repositories'])\n      discovery[:resources][ctrl][:methods].delete(action.to_sym)\n    end\n    discovery\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/user_agreements_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::UserAgreementsController < ApplicationController\n  before_action :admin_required, except: [:index, :sign, :signatures]\n  skip_before_action :find_object_by_uuid, only: [:sign, :signatures]\n  skip_before_action :render_404_if_no_object, only: [:sign, :signatures]\n\n  def model_class\n    Link\n  end\n\n  def table_name\n    'links'\n  end\n\n  def limit_database_read(model_class:)\n    # Because we implement a custom index method that takes no arguments,\n    # there's nothing to limit. Explicitly override; the superclass isn't\n    # prepared for the case where model_class is not the type of @objects.\n    # This should be safe since administrators are expected to select a (very)\n    # limited number of agreements.\n    return\n  end\n\n  def index\n    if not current_user.is_invited\n      # New users cannot see user agreements until/unless invited to\n      # use this installation.\n      @objects = []\n    else\n      act_as_system_user do\n        uuids = Link.where(\"owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?\",\n                           system_user_uuid,\n                           'signature',\n                           'require',\n                           system_user_uuid,\n                           Collection.uuid_like_pattern).\n          collect(&:head_uuid)\n        @objects = Collection.where('uuid in (?)', uuids)\n      end\n    end\n    @response_resource_name = 'collection'\n    super\n  end\n\n  def self._signatures_method_description\n    \"List all user agreement signature links from a user.\"\n  end\n\n  def signatures\n    current_user_uuid = (current_user.andand.is_admin && params[:uuid]) ||\n      current_user.uuid\n    act_as_system_user do\n      @objects = Link.where(\"owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?\",\n                            system_user_uuid,\n                            'signature',\n                            'click',\n                            current_user_uuid,\n                            Collection.uuid_like_pattern)\n    end\n    @response_resource_name = 'link'\n    render_list\n  end\n\n  def self._sign_method_description\n    \"Create a signature link from the current user for a given user agreement.\"\n  end\n\n  def sign\n    current_user_uuid = current_user.uuid\n    act_as_system_user do\n      @object = Link.create(link_class: 'signature',\n                            name: 'click',\n                            tail_uuid: current_user_uuid,\n                            head_uuid: params[:uuid])\n    end\n    show\n  end\n\n  def create\n    usage_error\n  end\n\n  def update\n    usage_error\n  end\n\n  def destroy\n    usage_error\n  end\n\n  protected\n  def usage_error\n    raise ArgumentError.new \\\n    \"Manage user agreements via Collections and Links instead.\"\n  end\n  \nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/users_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::UsersController < ApplicationController\n  accept_attribute_as_json :prefs, Hash\n  accept_param_as_json :updates\n\n  skip_before_action :find_object_by_uuid, only:\n    [:activate, :current, :system, :setup, :merge, :batch_update]\n  skip_before_action :render_404_if_no_object, only:\n    [:activate, :current, :system, :setup, :merge, :batch_update]\n  before_action :admin_required, only: [:setup, :unsetup, :batch_update]\n\n  # Internal API used by controller to update local cache of user\n  # records from LoginCluster.\n  def batch_update\n    @objects = []\n    # update_remote_user takes a row lock on the User record, so sort\n    # the keys so we always lock them in the same order.\n    sorted = params[:updates].keys.sort\n    sorted.each do |uuid|\n      attrs = params[:updates][uuid]\n      attrs[:uuid] = uuid\n      u = User.update_remote_user nullify_attrs(attrs)\n      @objects << u\n    end\n    @offset = 0\n    @limit = -1\n    render_list\n  end\n\n  def self._current_method_description\n    \"Return the user record associated with the API token authorizing this request.\"\n  end\n\n  def current\n    if current_user\n      @object = current_user\n      show\n    else\n      send_error(\"Not logged in\", status: 401)\n    end\n  end\n\n  def self._system_method_description\n    \"Return this cluster's system (\\\"root\\\") user record.\"\n  end\n\n  def system\n    @object = system_user\n    show\n  end\n\n  def self._activate_method_description\n    \"Set the `is_active` flag on a user record.\"\n  end\n\n  def activate\n    if params[:id] and params[:id].match(/\\D/)\n      params[:uuid] = params.delete :id\n    end\n    if current_user.andand.is_admin && params[:uuid]\n      @object = User.find_by_uuid params[:uuid]\n    else\n      @object = current_user\n    end\n    if not @object.is_active\n      if @object.uuid[0..4] == Rails.configuration.Login.LoginCluster &&\n         @object.uuid[0..4] != Rails.configuration.ClusterID\n        logger.warn \"Local user #{@object.uuid} called users#activate but only LoginCluster can do that\"\n        raise ArgumentError.new \"cannot activate user #{@object.uuid} here, only the #{@object.uuid[0..4]} cluster can do that\"\n      elsif not (current_user.is_admin or @object.is_invited)\n        logger.warn \"User #{@object.uuid} called users.activate \" +\n          \"but is not invited\"\n        raise ArgumentError.new \"Cannot activate without being invited.\"\n      end\n      act_as_system_user do\n        required_uuids = Link.where(\"owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?\",\n                                    system_user_uuid,\n                                    'signature',\n                                    'require',\n                                    system_user_uuid,\n                                    Collection.uuid_like_pattern).\n          collect(&:head_uuid)\n        signed_uuids = Link.where(owner_uuid: system_user_uuid,\n                                  link_class: 'signature',\n                                  name: 'click',\n                                  tail_uuid: @object.uuid,\n                                  head_uuid: required_uuids).\n          collect(&:head_uuid)\n        todo_uuids = required_uuids - signed_uuids\n        if todo_uuids.empty?\n          @object.update is_active: true\n          logger.info \"User #{@object.uuid} activated\"\n        else\n          logger.warn \"User #{@object.uuid} called users.activate \" +\n            \"before signing agreements #{todo_uuids.inspect}\"\n          raise ArvadosModel::PermissionDeniedError.new \\\n          \"Cannot activate without user agreements #{todo_uuids.inspect}.\"\n        end\n      end\n    end\n    show\n  end\n\n  def self._setup_method_description\n    \"Convenience method to \\\"fully\\\" set up a user record with a virtual machine login and notification email.\"\n  end\n\n  # create user object and all the needed links\n  def setup\n    if params[:uuid]\n      @object = User.find_by_uuid(params[:uuid])\n      if !@object\n        return render_404_if_no_object\n      end\n    elsif !params[:user] || params[:user].empty?\n      raise ArgumentError.new \"Required uuid or user\"\n    elsif !params[:user]['email']\n      raise ArgumentError.new \"Require user email\"\n    else\n      @object = model_class.create! resource_attrs\n    end\n\n    @response = @object.setup(vm_uuid: params[:vm_uuid],\n                              send_notification_email: params[:send_notification_email])\n\n    send_json kind: \"arvados#HashList\", items: @response.as_api_response(nil)\n  end\n\n  def self._unsetup_method_description\n    \"Unset a user's active flag and delete associated records.\"\n  end\n\n  # delete user agreements, vm, repository, login links; set state to inactive\n  def unsetup\n    reload_object_before_update\n    @object.unsetup\n    show\n  end\n\n  def self._merge_method_description\n    \"Transfer ownership of one user's data to another.\"\n  end\n\n  def merge\n    if (params[:old_user_uuid] || params[:new_user_uuid])\n      if !current_user.andand.is_admin\n        return send_error(\"Must be admin to use old_user_uuid/new_user_uuid\", status: 403)\n      end\n      if !params[:old_user_uuid] || !params[:new_user_uuid]\n        return send_error(\"Must supply both old_user_uuid and new_user_uuid\", status: 422)\n      end\n      new_user = User.find_by_uuid(params[:new_user_uuid])\n      if !new_user\n        return send_error(\"User in new_user_uuid not found\", status: 422)\n      end\n      @object = User.find_by_uuid(params[:old_user_uuid])\n      if !@object\n        return send_error(\"User in old_user_uuid not found\", status: 422)\n      end\n    else\n      if Thread.current[:api_client_authorization].scopes != ['all']\n        return send_error(\"cannot merge with a scoped token\", status: 403)\n      end\n\n      new_auth = ApiClientAuthorization.validate(token: params[:new_user_token])\n      if !new_auth\n        return send_error(\"invalid new_user_token\", status: 401)\n      end\n\n      if new_auth.user.uuid[0..4] == Rails.configuration.ClusterID\n        if new_auth.scopes != ['all']\n          return send_error(\"supplied new_user_token has restricted scope\", status: 403)\n        end\n      end\n      new_user = new_auth.user\n      @object = current_user\n    end\n\n    if @object.uuid == new_user.uuid\n      return send_error(\"cannot merge user to self\", status: 422)\n    end\n\n    if !params[:new_owner_uuid]\n      return send_error(\"missing new_owner_uuid\", status: 422)\n    end\n\n    if !new_user.can?(write: params[:new_owner_uuid])\n      return send_error(\"cannot move objects into supplied new_owner_uuid: new user does not have write permission\", status: 403)\n    end\n\n    act_as_system_user do\n      @object.merge(new_owner_uuid: params[:new_owner_uuid],\n                    new_user_uuid: new_user.uuid,\n                    redirect_to_new_user: params[:redirect_to_new_user])\n    end\n    show\n  end\n\n  protected\n\n  def self._merge_requires_parameters\n    {\n      new_owner_uuid: {\n        type: 'string',\n        required: true,\n        description: \"UUID of the user or group that will take ownership of data owned by the old user.\",\n      },\n      new_user_token: {\n        type: 'string',\n        required: false,\n        description: \"Valid API token for the user receiving ownership. If you use this option, it takes ownership of data owned by the user making the request.\",\n      },\n      redirect_to_new_user: {\n        type: 'boolean',\n        required: false,\n        default: false,\n        description: \"If true, authorization attempts for the old user will be redirected to the new user.\",\n      },\n      old_user_uuid: {\n        type: 'string',\n        required: false,\n        description: \"UUID of the user whose ownership is being transferred to `new_owner_uuid`. You must be an admin to use this option.\",\n      },\n      new_user_uuid: {\n        type: 'string',\n        required: false,\n        description: \"UUID of the user receiving ownership. You must be an admin to use this option.\",\n      }\n    }\n  end\n\n  def self._setup_requires_parameters\n    {\n      uuid: {\n        type: 'string',\n        required: false,\n        description: \"UUID of an existing user record to set up.\"\n      },\n      user: {\n        type: 'object',\n        required: false,\n        description: \"Attributes of a new user record to set up.\",\n      },\n      repo_name: {\n        type: 'string',\n        required: false,\n        description: \"This parameter is obsolete and ignored.\",\n      },\n      vm_uuid: {\n        type: 'string',\n        required: false,\n        description: \"If given, setup creates a login link to allow this user to access the Arvados virtual machine with this UUID.\",\n      },\n      send_notification_email: {\n        type: 'boolean',\n        required: false,\n        default: false,\n        description: \"If true, send an email to the user notifying them they can now access this Arvados cluster.\",\n      },\n    }\n  end\n\n  def self._update_requires_parameters\n    super.merge({\n      bypass_federation: {\n        type: 'boolean',\n        required: false,\n        default: false,\n        description: \"If true, do not try to update the user on any other clusters in the federation,\nonly the cluster that received the request.\nYou must be an administrator to use this flag.\",\n      },\n    })\n  end\n\n  def apply_filters(model_class=nil)\n    return super if @read_users.any?(&:is_admin)\n    if params[:uuid] != current_user.andand.uuid\n      # Non-admin index/show returns very basic information about readable users.\n      safe_attrs = [\"uuid\", \"is_active\", \"is_admin\", \"is_invited\", \"email\", \"first_name\", \"last_name\", \"username\", \"can_write\", \"can_manage\", \"kind\"]\n      if @select\n        @select = @select & safe_attrs\n      else\n        @select = safe_attrs\n      end\n      @filters += [['is_active', '=', true]]\n    end\n    # This gets called from within find_object_by_uuid.\n    # find_object_by_uuid stores the original value of @select in\n    # @preserve_select, edits the value of @select, calls\n    # find_objects_for_index, then restores @select from the value\n    # of @preserve_select.  So if we want our updated value of\n    # @select here to stick, we have to set @preserve_select.\n    @preserve_select = @select\n    super\n  end\n\n  def nullable_attributes\n    super + [:email, :first_name, :last_name, :username]\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/virtual_machines_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::VirtualMachinesController < ApplicationController\n  skip_before_action :find_object_by_uuid, :only => :get_all_logins\n  skip_before_action :render_404_if_no_object, :only => :get_all_logins\n  before_action(:admin_required,\n                :only => [:logins, :get_all_logins])\n\n  # Get all login permissons (user uuid, login account, SSH key) for a\n  # single VM\n  def self._logins_method_description\n    \"List login permission links for a given virtual machine.\"\n  end\n  \n  def logins\n    render_logins_for VirtualMachine.where(uuid: @object.uuid)\n  end\n\n  def self._get_all_logins_method_description\n    \"List login permission links for all virtual machines.\"\n  end\n\n  # Get all login permissons for all VMs\n  def get_all_logins\n    render_logins_for VirtualMachine\n  end\n\n  protected\n\n  def render_logins_for vm_query\n    @response = []\n    @vms = vm_query.eager_load :login_permissions\n    @users = {}\n    User.eager_load(:authorized_keys).\n      where('users.uuid in (?)',\n            @vms.map { |vm| vm.login_permissions.map(&:tail_uuid) }.flatten.uniq).\n      each do |u|\n      @users[u.uuid] = u\n    end\n    @vms.each do |vm|\n      vm.login_permissions.each do |perm|\n        user_uuid = perm.tail_uuid\n        next if not @users[user_uuid]\n        next if perm.properties['username'].blank?\n        aks = @users[user_uuid].authorized_keys\n        if aks.empty?\n          # We'll emit one entry, with no public key.\n          aks = [nil]\n        end\n        aks.each do |ak|\n          @response << {\n            username: perm.properties['username'],\n            hostname: vm.hostname,\n            groups: (perm.properties['groups'].to_a rescue []),\n            public_key: ak ? ak.public_key : nil,\n            user_uuid: user_uuid,\n            virtual_machine_uuid: vm.uuid,\n            authorized_key_uuid: ak ? ak.uuid : nil,\n          }\n        end\n      end\n    end\n    send_json kind: \"arvados#HashList\", items: @response\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/arvados/v1/workflows_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Arvados::V1::WorkflowsController < ApplicationController\n  def update\n    if @object.collection_uuid.nil?\n      # Only allowed to update directly when collection_uuid is nil (legacy behavior)\n      super\n    else\n      raise ArvadosModel::PermissionDeniedError.new(\"Cannot directly update Workflow records that have collection_uuid set, must update the linked collection (#{@object.collection_uuid})\")\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/database_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass DatabaseController < ApplicationController\n  skip_before_action :find_object_by_uuid\n  skip_before_action :render_404_if_no_object\n  before_action :admin_required\n  around_action :silence_logs, only: [:reset]\n\n  def reset\n    raise ArvadosModel::PermissionDeniedError unless Rails.env == 'test'\n\n    # Sanity check: If someone has actually logged in here, this might\n    # not really be a throwaway database. Client test suites should\n    # use @example.com email addresses when creating user records, so\n    # we can tell they're not valuable.\n    user_uuids = User.\n      where('email is null or (email not like ? and email not like ?)', '%@example.com', '%.example.com').\n      collect(&:uuid)\n    fnm = File.expand_path('../../../test/fixtures/users.yml', __FILE__)\n    fixture_uuids = File.open(fnm) do |f|\n      YAML.safe_load(f, filename: fnm, permitted_classes: [Time]).values.collect { |u| u['uuid'] }\n    end\n    unexpected_uuids = user_uuids - fixture_uuids\n    if unexpected_uuids.any?\n      logger.error(\"Running in test environment, but non-fixture users exist: \" +\n                   \"#{unexpected_uuids}\" + \"\\nMaybe test users without @example.com email addresses were created?\")\n      raise ArvadosModel::PermissionDeniedError\n    end\n\n    require 'active_record/fixtures'\n\n    # What kinds of fixtures do we have?\n    fixturesets = Dir.glob(Rails.root.join('test', 'fixtures', '*.yml')).\n      collect { |yml| yml.match(/([^\\/]*)\\.yml$/)[1] }\n\n    # Don't reset keep_services: clients need to discover our\n    # integration-testing keepstores, not test fixtures.\n    fixturesets -= %w[keep_services]\n\n    table_names = '\"' + ActiveRecord::Base.connection.tables.join('\",\"') + '\"'\n\n    attempts_left = 20\n    begin\n      ActiveRecord::Base.transaction do\n        # Avoid deadlock by locking all tables before doing anything\n        # drastic.\n        ActiveRecord::Base.connection.execute \\\n        \"LOCK TABLE #{table_names} IN ACCESS EXCLUSIVE MODE\"\n\n        # Delete existing fixtures (and everything else) from fixture\n        # tables\n        fixturesets.each do |x|\n          x.classify.constantize.unscoped.delete_all\n        end\n\n        # create_fixtures() is a no-op for cached fixture sets, so\n        # uncache them all.\n        ActiveRecord::FixtureSet.reset_cache\n        ActiveRecord::FixtureSet.\n          create_fixtures(Rails.root.join('test', 'fixtures'), fixturesets)\n\n        # Reset cache and global state\n        Rails.cache.clear\n        ActiveRecord::Base.connection.clear_query_cache\n\n        # Reload database seeds\n        DatabaseSeeds.install\n      end\n    rescue ActiveRecord::StatementInvalid => e\n      if \"#{e.inspect}\" =~ /deadlock detected/i and (attempts_left -= 1) > 0\n        logger.info \"Waiting for lock -- #{e.inspect}\"\n        sleep 0.5\n        retry\n      end\n      raise\n    end\n\n    require 'update_permissions'\n\n    refresh_permissions\n    refresh_trashed\n\n    # Done.\n    send_json success: true\n  end\n\n  protected\n\n  def silence_logs\n    Rails.logger.info(\"(logging level temporarily raised to :error, see #{__FILE__})\")\n    orig = ActiveRecord::Base.logger.level\n    ActiveRecord::Base.logger.level = :error\n    begin\n      yield\n    ensure\n      ActiveRecord::Base.logger.level = orig\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/static_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass StaticController < ApplicationController\n  respond_to :json, :html\n\n  skip_before_action :find_object_by_uuid\n  skip_before_action :render_404_if_no_object\n  skip_before_action :require_auth_scope, only: [:home, :empty, :login_failure]\n\n  def home\n    respond_to do |f|\n      f.html do\n        if !Rails.configuration.Services.Workbench1.ExternalURL.to_s.empty?\n          redirect_to Rails.configuration.Services.Workbench1.ExternalURL.to_s, allow_other_host: true\n        else\n          render_not_found \"Oops, this is an API endpoint. You probably want to point your browser to an Arvados Workbench site instead.\"\n        end\n      end\n      f.json do\n        render_not_found \"Path not found.\"\n      end\n    end\n  end\n\n  def empty\n    render plain: \"\"\n  end\n\nend\n"
  },
  {
    "path": "services/api/app/controllers/sys_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass SysController < ApplicationController\n  skip_before_action :find_object_by_uuid\n  skip_before_action :render_404_if_no_object\n  before_action :admin_required\n\n  def trash_sweep\n    act_as_system_user do\n      # Sweep trashed collections\n      Collection.\n        where('delete_at is not null and delete_at < statement_timestamp()').\n        in_batches(of: 15).\n        destroy_all\n      Collection.\n        where('is_trashed = false and trash_at < statement_timestamp()').\n        in_batches(of: 15).\n        update_all('is_trashed = true')\n\n      # Want to make sure the #update_trash hook on the Group class\n      # runs.  It does a couple of important things:\n      #\n      # - For projects, puts all the subprojects in the trashed_groups table.\n      #\n      # - For role groups, outbound permissions are deleted.\n      Group.\n        where(\"is_trashed = false and trash_at < statement_timestamp()\").each do |grp|\n        grp.is_trashed = true\n        grp.save\n      end\n\n      # Sweep groups and their contents that are ready to be deleted\n      Group.\n        where('delete_at is not null and delete_at < statement_timestamp()').each do |group|\n          delete_project_and_contents(group.uuid)\n      end\n\n      # Sweep expired tokens\n      ActiveRecord::Base.connection.execute(\"DELETE from api_client_authorizations where expires_at <= statement_timestamp()\")\n      ActiveRecord::Base.connection.execute(\"DELETE from api_client_authorizations where refreshes_at <= statement_timestamp()\")\n\n      # Sweep unused uuid_locks entries\n      ActiveRecord::Base.connection.execute(\"DELETE FROM uuid_locks WHERE uuid IN (SELECT uuid FROM uuid_locks FOR UPDATE SKIP LOCKED)\")\n\n      # forget expired credential secrets\n      Credential.\n        where(\"expires_at < statement_timestamp() and secret != ''\").\n        update_all(\"secret = ''\")\n    end\n    head :no_content\n  end\n\n  protected\n\n  def delete_project_and_contents(p_uuid)\n    p = Group.find_by_uuid(p_uuid)\n    if !p\n      raise \"can't sweep group '#{p_uuid}', it may not exist\"\n    end\n    if p.group_class == 'project'\n      # First delete sub projects and owned filter groups\n      Group.where({owner_uuid: p_uuid}).each do |sub_project|\n        delete_project_and_contents(sub_project.uuid)\n      end\n      # Next, iterate over all tables which have owner_uuid fields, with some\n      # exceptions, and delete records owned by this project\n      skipped_classes = ['Group', 'User']\n      ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|\n        if !skipped_classes.include?(klass.name) && klass.columns.collect(&:name).include?('owner_uuid')\n          klass.where({owner_uuid: p_uuid}).in_batches(of: 15).destroy_all\n        end\n      end\n    end\n    # Finally delete the group itself\n    p.destroy\n  end\nend\n"
  },
  {
    "path": "services/api/app/controllers/user_sessions_controller.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass UserSessionsController < ApplicationController\n  before_action :require_auth_scope, :only => [ :destroy ]\n\n  skip_before_action :set_cors_headers\n  skip_before_action :find_object_by_uuid\n  skip_before_action :render_404_if_no_object\n\n  respond_to :html\n\n  def login\n    return send_error \"Legacy code path no longer supported\", status: 404\n  end\n\n  def logout\n    return send_error \"Legacy code path no longer supported\", status: 404\n  end\n\n  # create a new session\n  def create\n    remote, return_to_url = params[:return_to].split(',', 2)\n    if params[:provider] != 'controller' ||\n       return_to_url != 'https://controller.api.client.invalid'\n      return send_error \"Legacy code path no longer supported\", status: 404\n    end\n    if request.headers['Authorization'] != 'Bearer ' + Rails.configuration.SystemRootToken\n      return send_error('Invalid authorization header', status: 401)\n    end\n    if remote == ''\n      remote = nil\n    elsif remote !~ /^[0-9a-z]{5}$/\n      return send_error 'Invalid remote cluster id', status: 400\n    end\n    # arvados-controller verified the user and is passing auth_info\n    # in request params.\n    authinfo = SafeJSON.load(params[:auth_info])\n    max_expires_at = authinfo[\"expires_at\"]\n\n    if !authinfo['user_uuid'].blank?\n      user = User.find_by_uuid(authinfo['user_uuid'])\n      if !user\n        Rails.logger.warn \"Nonexistent user_uuid in authinfo #{authinfo.inspect}\"\n        return redirect_to login_failure_url\n      end\n    else\n      begin\n        user = User.register(authinfo)\n      rescue => e\n        Rails.logger.warn \"User.register error #{e}\"\n        Rails.logger.warn \"authinfo was #{authinfo.inspect}\"\n        return redirect_to login_failure_url\n      end\n    end\n\n    # For the benefit of functional and integration tests:\n    @user = user\n\n    # prevent ArvadosModel#before_create and _update from throwing\n    # \"unauthorized\":\n    Thread.current[:user] = user\n\n    user.save or raise Exception.new(user.errors.messages)\n\n    return send_api_token_to(return_to_url, user, remote, max_expires_at)\n  end\n\n  # Omniauth failure callback\n  def failure\n    flash[:notice] = params[:message]\n  end\n\n  def send_api_token_to(callback_url, user, remote=nil, token_expiration=nil)\n    # Give the API client a token for making API calls on behalf of\n    # the authenticated user\n\n    if Rails.configuration.Login.TokenLifetime > 0\n      if token_expiration == nil\n        token_expiration = db_current_time + Rails.configuration.Login.TokenLifetime\n      else\n        token_expiration = [token_expiration, db_current_time + Rails.configuration.Login.TokenLifetime].min\n      end\n    end\n\n    @api_client_auth = ApiClientAuthorization.\n      new(user: user,\n          created_by_ip_address: remote_ip,\n          expires_at: token_expiration,\n          scopes: [\"all\"])\n    @api_client_auth.save!\n\n    if callback_url.index('?')\n      callback_url += '&'\n    else\n      callback_url += '?'\n    end\n    if remote.nil?\n      token = @api_client_auth.token\n    else\n      token = @api_client_auth.salted_token(remote: remote)\n    end\n    callback_url += 'api_token=' + token\n    redirect_to callback_url, allow_other_host: true\n  end\n\n  def cross_origin_forbidden\n    send_error 'Forbidden', status: 403\n  end\nend\n"
  },
  {
    "path": "services/api/app/helpers/application_helper.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule ApplicationHelper\n  include CurrentApiClient\nend\n"
  },
  {
    "path": "services/api/app/mailers/.gitkeep",
    "content": ""
  },
  {
    "path": "services/api/app/mailers/admin_notifier.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AdminNotifier < ActionMailer::Base\n  include AbstractController::Callbacks\n\n  default from: Rails.configuration.Users.AdminNotifierEmailFrom\n\n  def new_user(user)\n    @user = user\n    if not Rails.configuration.Users.NewUserNotificationRecipients.empty? then\n      @recipients = Rails.configuration.Users.NewUserNotificationRecipients.keys\n      logger.info \"Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)\"\n\n      add_to_subject = ''\n      if Rails.configuration.Users.AutoSetupNewUsers\n        add_to_subject = @user.is_invited ? ' and setup' : ', but not setup'\n      end\n\n      mail(to: @recipients,\n           subject: \"#{Rails.configuration.Users.EmailSubjectPrefix}New user created#{add_to_subject} notification\"\n          )\n    end\n  end\n\n  def new_inactive_user(user)\n    @user = user\n    if not Rails.configuration.Users.NewInactiveUserNotificationRecipients.empty? then\n      @recipients = Rails.configuration.Users.NewInactiveUserNotificationRecipients.keys\n      logger.info \"Sending mail to #{@recipients} about new user #{@user.uuid} (#{@user.full_name} <#{@user.email}>)\"\n      mail(to: @recipients,\n           subject: \"#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification\"\n          )\n    end\n  end\n\nend\n"
  },
  {
    "path": "services/api/app/mailers/profile_notifier.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ProfileNotifier < ActionMailer::Base\n  default from: Rails.configuration.Users.AdminNotifierEmailFrom\n\n  def profile_created(user, address)\n    @user = user\n    mail(to: address, subject: \"Profile created by #{@user.email}\")\n  end\nend\n"
  },
  {
    "path": "services/api/app/mailers/user_notifier.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass UserNotifier < ActionMailer::Base\n  include AbstractController::Callbacks\n\n  default from: Rails.configuration.Users.UserNotifierEmailFrom\n\n  def account_is_setup(user)\n    @user = user\n    if not Rails.configuration.Users.UserNotifierEmailBcc.empty? then\n      @bcc = Rails.configuration.Users.UserNotifierEmailBcc.keys\n      mail(to: user.email, subject: 'Welcome to Arvados - account enabled', bcc: @bcc)\n    else\n      mail(to: user.email, subject: 'Welcome to Arvados - account enabled')\n    end\n  end\n\nend\n"
  },
  {
    "path": "services/api/app/middlewares/arvados_api_token.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Perform api_token checking very early in the request process.  We want to do\n# this in the Rack stack instead of in ApplicationController because\n# websockets needs access to authentication but doesn't use any of the rails\n# active dispatch infrastructure.\nclass ArvadosApiToken\n\n  # Create a new ArvadosApiToken handler\n  # +app+  The next layer of the Rack stack.\n  def initialize(app = nil, options = nil)\n    @app = app.respond_to?(:call) ? app : nil\n  end\n\n  def call env\n    request = Rack::Request.new(env)\n    params = request.params\n    remote_ip = env[\"action_dispatch.remote_ip\"]\n\n    Thread.current[:request_starttime] = Time.now\n\n    remote = false\n    reader_tokens = nil\n    if params[\"remote\"] && request.get? && (\n         request.path.start_with?('/arvados/v1/groups') ||\n         request.path.start_with?('/arvados/v1/api_client_authorizations/current') ||\n         request.path.start_with?('/arvados/v1/users/current'))\n      # Request from a remote API server, asking to validate a salted\n      # token.\n      remote = params[\"remote\"]\n    elsif request.get? || params[\"_method\"] == 'GET'\n      reader_tokens = params[\"reader_tokens\"]\n      if reader_tokens.is_a? String\n        reader_tokens = SafeJSON.load(reader_tokens)\n      end\n    end\n\n    # Set current_user etc. based on the primary session token if a\n    # valid one is present. Otherwise, use the first valid token in\n    # reader_tokens.\n    accepted = false\n    auth = nil\n    remote_errcodes = []\n    remote_errmsgs = []\n    [params[\"api_token\"],\n     params[\"oauth_token\"],\n     env[\"HTTP_AUTHORIZATION\"].andand.match(/(OAuth2|Bearer) ([!-~]+)/).andand[2],\n     *reader_tokens,\n    ].each do |supplied|\n      next if !supplied\n      begin\n        try_auth = ApiClientAuthorization.validate(token: supplied, remote: remote)\n      rescue => e\n        begin\n          remote_errcodes.append(e.http_status)\n        rescue NoMethodError\n          # The exception is an internal validation problem, not a remote error.\n          next\n        end\n        begin\n          errors = SafeJSON.load(e.res.content)[\"errors\"]\n        rescue\n          errors = nil\n        end\n        remote_errmsgs += errors if errors.is_a?(Array)\n      else\n        if try_auth.andand.user\n          auth = try_auth\n          accepted = supplied\n          break\n        end\n      end\n    end\n\n    Thread.current[:api_client_ip_address] = remote_ip\n    Thread.current[:api_client_authorization] = auth\n    Thread.current[:token] = accepted\n    Thread.current[:user] = auth.andand.user\n\n    if auth.nil? and not remote_errcodes.empty?\n      # If we failed to validate any tokens because of remote validation\n      # errors, pass those on to the client. This code is functionally very\n      # similar to ApplicationController#render_error, but the implementation\n      # is very different because we're a Rack middleware, not in\n      # ActionDispatch land yet.\n      remote_errmsgs.prepend(\"failed to validate remote token\")\n      error_content = {\n        error_token: \"%d+%08x\" % [Time.now.utc.to_i, rand(16 ** 8)],\n        errors: remote_errmsgs,\n      }\n      [\n        remote_errcodes.max,\n        {\"Content-Type\": \"application/json\"},\n        SafeJSON.dump(error_content).html_safe,\n      ]\n    else\n      @app.call env if @app\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/.gitkeep",
    "content": ""
  },
  {
    "path": "services/api/app/models/api_client_authorization.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ApiClientAuthorization < ArvadosModel\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n  include Rails.application.routes.url_helpers\n  extend CurrentApiClient\n  extend DbCurrentTime\n\n  belongs_to :user, optional: true\n  after_initialize :assign_random_api_token\n  serialize :scopes, Array\n\n  before_validation :clamp_token_expiration\n\n  api_accessible :user, extend: :common do |t|\n    t.add :owner_uuid\n    # NB the \"api_token\" db column is a misnomer in that it's only the\n    # \"secret\" part of a token: a v1 token is just the secret, but a\n    # v2 token is \"v2/uuid/secret\".\n    t.add :api_token\n    t.add :created_by_ip_address\n    t.add :expires_at\n    t.add :last_used_at\n    t.add :last_used_by_ip_address\n    t.add :scopes\n  end\n\n  UNLOGGED_CHANGES = ['last_used_at', 'last_used_by_ip_address', 'updated_at']\n\n  def assign_random_api_token\n    begin\n      self.api_token ||= rand(2**256).to_s(36)\n    rescue ActiveModel::MissingAttributeError\n      # Ignore the case where self.api_token doesn't exist, which happens when\n      # the select=[...] is used.\n    end\n  end\n\n  def owner_uuid\n    self.user.andand.uuid\n  end\n  def owner_uuid_was\n    self.user_id_changed? ? User.where(id: self.user_id_was).first.andand.uuid : self.user.andand.uuid\n  end\n  def owner_uuid_changed?\n    self.user_id_changed?\n  end\n\n  def modified_by_user_uuid\n    nil\n  end\n  def modified_by_user_uuid=(x) end\n\n  def modified_at\n    nil\n  end\n  def modified_at=(x) end\n\n  def scopes_allow?(req_s)\n    scopes.each do |scope|\n      return true if (scope == 'all') or (scope == req_s) or\n        ((scope.end_with? '/') and (req_s.start_with? scope))\n    end\n    false\n  end\n\n  def scopes_allow_request?(request)\n    method = request.request_method\n    if method == 'GET' and request.path == url_for(controller: 'arvados/v1/api_client_authorizations', action: 'current', only_path: true)\n      true\n    elsif method == 'HEAD'\n      (scopes_allow?(['HEAD', request.path].join(' ')) ||\n       scopes_allow?(['GET', request.path].join(' ')))\n    else\n      scopes_allow?([method, request.path].join(' '))\n    end\n  end\n\n  def logged_attributes\n    super.except 'api_token'\n  end\n\n  def self.default_orders\n    [\"#{table_name}.id desc\"]\n  end\n\n  def self.remote_host(uuid_prefix:)\n    (Rails.configuration.RemoteClusters[uuid_prefix].andand[\"Host\"]) ||\n      (Rails.configuration.RemoteClusters[\"*\"][\"Proxy\"] &&\n       uuid_prefix+\".arvadosapi.com\")\n  end\n\n  def self.make_http_client(uuid_prefix:)\n    clnt = HTTPClient.new\n\n    if uuid_prefix && (Rails.configuration.RemoteClusters[uuid_prefix].andand.Insecure ||\n                       Rails.configuration.RemoteClusters['*'].andand.Insecure)\n      clnt.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE\n    else\n      # Use system CA certificates\n      [\"/etc/ssl/certs/ca-certificates.crt\",\n       \"/etc/pki/tls/certs/ca-bundle.crt\"]\n        .select { |ca_path| File.readable?(ca_path) }\n        .each { |ca_path| clnt.ssl_config.add_trust_ca(ca_path) }\n    end\n    clnt\n  end\n\n  def self.check_anonymous_user_token(token:, remote:)\n    case token[0..2]\n    when 'v2/'\n      _, token_uuid, secret, optional = token.split('/')\n      unless token_uuid.andand.length == 27 && secret.andand.length.andand > 0 &&\n             token_uuid == Rails.configuration.ClusterID+\"-gj3su-anonymouspublic\"\n        # invalid v2 token, or v2 token for another user\n        return nil\n      end\n    else\n      # v1 token\n      secret = token\n    end\n\n    # Usually, the secret is salted\n    salted_secret = OpenSSL::HMAC.hexdigest('sha1', Rails.configuration.Users.AnonymousUserToken, remote)\n\n    # The anonymous token could be specified as a full v2 token in the config,\n    # but the config loader strips it down to the secret part.\n    # The anonymous token content and minimum length is verified in lib/config\n    if secret.length >= 0 && (secret == Rails.configuration.Users.AnonymousUserToken || secret == salted_secret)\n      return ApiClientAuthorization.new(user: User.find_by_uuid(anonymous_user_uuid),\n                                        uuid: Rails.configuration.ClusterID+\"-gj3su-anonymouspublic\",\n                                        api_token: secret,\n                                        scopes: ['GET /'])\n    else\n      return nil\n    end\n  end\n\n  def self.check_system_root_token token\n    if token == Rails.configuration.SystemRootToken\n      return ApiClientAuthorization.new(user: User.find_by_uuid(system_user_uuid),\n                                        uuid: Rails.configuration.ClusterID+\"-gj3su-000000000000000\",\n                                        api_token: token)\n    else\n      return nil\n    end\n  end\n\n  def self.validate(token:, remote: nil)\n    return nil if token.nil? or token.empty?\n    remote ||= Rails.configuration.ClusterID\n\n    auth = self.check_anonymous_user_token(token: token, remote: remote)\n    if !auth.nil?\n      return auth\n    end\n\n    auth = self.check_system_root_token(token)\n    if !auth.nil?\n      return auth\n    end\n\n    token_uuid = ''\n    secret = token\n    stored_secret = nil         # ...if different from secret\n    optional = nil\n\n    case token[0..2]\n    when 'v2/'\n      _, token_uuid, secret, optional = token.split('/')\n      unless token_uuid.andand.length == 27 && secret.andand.length.andand > 0\n        return nil\n      end\n\n      if !optional.nil?\n        # if \"optional\" is a container uuid, check that it\n        # matches expections.\n        c = Container.where(uuid: optional).first\n        if !c.nil?\n          if !c.auth_uuid.nil? and c.auth_uuid != token_uuid\n            # token doesn't match the container's token\n            return nil\n          end\n          if !c.runtime_token.nil? and \"v2/#{token_uuid}/#{secret}\" != c.runtime_token\n            # token doesn't match the container's token\n            return nil\n          end\n          if ![Container::Locked, Container::Running].include?(c.state)\n            # container isn't locked or running, token shouldn't be used\n            return nil\n          end\n        end\n      end\n\n      # fast path: look up the token in the local database\n      auth = ApiClientAuthorization.\n             includes(:user).\n             where('uuid=?', token_uuid).\n             where('expires_at is null or expires_at > CURRENT_TIMESTAMP').\n             where('refreshes_at is null or refreshes_at > CURRENT_TIMESTAMP').\n             first\n      if auth && auth.user &&\n         (secret == auth.api_token ||\n          secret == OpenSSL::HMAC.hexdigest('sha1', auth.api_token, remote))\n        # found it\n        if token_uuid[0..4] != Rails.configuration.ClusterID\n          Rails.logger.debug \"found cached remote token #{token_uuid} with secret #{secret} in local db\"\n        end\n        return auth\n      end\n\n      upstream_cluster_id = token_uuid[0..4]\n      if upstream_cluster_id == Rails.configuration.ClusterID\n        # Token is supposedly issued by local cluster, but if the\n        # token were valid, we would have been found in the database\n        # in the above query.\n        return nil\n      elsif upstream_cluster_id.length != 5\n        # malformed\n        return nil\n      end\n\n    else\n      # token is not a 'v2' token. It could be just the secret part\n      # (\"v1 token\") -- or it could be an OpenIDConnect access token,\n      # in which case either (a) the controller will have inserted a\n      # row with api_token = hmac(systemroottoken,oidctoken) before\n      # forwarding it, or (b) we'll have done that ourselves, or (c)\n      # we'll need to ask LoginCluster to validate it for us below,\n      # and then insert a local row for a faster lookup next time.\n      hmac = OpenSSL::HMAC.hexdigest('sha256', Rails.configuration.SystemRootToken, token)\n      auth = ApiClientAuthorization.\n               includes(:user).\n               where('api_token in (?, ?)', token, hmac).\n               where('expires_at is null or expires_at > CURRENT_TIMESTAMP').\n               where('refreshes_at is null or refreshes_at > CURRENT_TIMESTAMP').\n               first\n      if auth && auth.user\n        return auth\n      elsif !Rails.configuration.Login.LoginCluster.blank? && Rails.configuration.Login.LoginCluster != Rails.configuration.ClusterID\n        # An unrecognized non-v2 token might be an OIDC Access Token\n        # that can be verified by our login cluster in the code\n        # below. If so, we'll stuff the database with hmac instead of\n        # the real OIDC token.\n        upstream_cluster_id = Rails.configuration.Login.LoginCluster\n        stored_secret = hmac\n      else\n        return nil\n      end\n    end\n\n    # Invariant: upstream_cluster_id != Rails.configuration.ClusterID\n    #\n    # In other words the remaining code in this method decides\n    # whether to accept a token that was issued by a remote cluster\n    # when the token is absent or expired in our database.  To\n    # begin, we need to ask the cluster that issued the token to\n    # [re]validate it.\n    clnt = ApiClientAuthorization.make_http_client(uuid_prefix: upstream_cluster_id)\n\n    host = remote_host(uuid_prefix: upstream_cluster_id)\n    if !host\n      Rails.logger.warn \"remote authentication rejected: no host for #{upstream_cluster_id.inspect}\"\n      return nil\n    end\n    remote_url = URI::parse(\"https://#{host}/\")\n    remote_query = {\"remote\" => Rails.configuration.ClusterID}\n    remote_headers = {\"Authorization\" => \"Bearer #{token}\"}\n\n    # First get the current token. This query is not limited by token scopes,\n    # and tells us the user's UUID via owner_uuid, so this gives us enough\n    # information to load a local user record from the database if one exists.\n    remote_token = nil\n    begin\n      remote_token = SafeJSON.load(\n        clnt.get_content(\n          remote_url.merge(\"arvados/v1/api_client_authorizations/current\"),\n          remote_query, remote_headers,\n        ))\n      Rails.logger.debug \"retrieved remote token #{remote_token.inspect}\"\n      token_uuid = remote_token['uuid']\n      if !token_uuid.match(HasUuid::UUID_REGEX) || token_uuid[0..4] != upstream_cluster_id\n        raise \"remote cluster #{upstream_cluster_id} returned invalid token uuid #{token_uuid.inspect}\"\n      end\n    rescue HTTPClient::BadResponseError => e\n      if e.res.status_code >= 400 && e.res.status_code < 500\n        # Remote cluster does not accept this token.\n        return nil\n      end\n      # CurrentApiToken#call and ApplicationController#render_error will\n      # propagate the status code from the #http_status method, so define\n      # that here.\n      def e.http_status\n        self.res.status_code\n      end\n      raise\n    # TODO #20927: Catch network exceptions and assign a 5xx status to them so\n    # the client knows they're a temporary problem.\n    rescue => e\n      Rails.logger.warn \"error getting remote token details for #{token.inspect}: #{e}\"\n      return nil\n    end\n\n    # Next, load the token's user record from the database (might be nil).\n    remote_user_prefix, remote_user_suffix = remote_token['owner_uuid'].split('-', 2)\n    if anonymous_user_uuid.end_with?(remote_user_suffix)\n      # Special case: map the remote anonymous user to local anonymous user\n      remote_user_uuid = anonymous_user_uuid\n    else\n      remote_user_uuid = remote_token['owner_uuid']\n    end\n    user = User.find_by_uuid(remote_user_uuid)\n\n    # Next, try to load the remote user. If this succeeds, we'll use this\n    # information to update/create the local database record as necessary.\n    # If this fails for any reason, but we successfully loaded a user record\n    # from the database, we'll just rely on that information.\n    remote_user = nil\n    begin\n      remote_user = SafeJSON.load(\n        clnt.get_content(\n          remote_url.merge(\"arvados/v1/users/current\"),\n          remote_query, remote_headers,\n        ))\n    rescue HTTPClient::BadResponseError => e\n      # If user is defined, we will use that alone for auth, see below.\n      if user.nil?\n        # See rationale in the previous BadResponseError rescue.\n        def e.http_status\n          self.res.status_code\n        end\n        raise\n      end\n    # TODO #20927: Catch network exceptions and assign a 5xx status to them so\n    # the client knows they're a temporary problem.\n    rescue => e\n      Rails.logger.warn \"getting remote user with token #{token.inspect} failed: #{e}\"\n    else\n      # Check the response is well formed.\n      if !remote_user.is_a?(Hash) || !remote_user['uuid'].is_a?(String)\n        Rails.logger.warn \"malformed remote user=#{remote_user.inspect}\"\n        remote_user = nil\n      # Clusters can only authenticate for their own users.\n      elsif remote_user_prefix != upstream_cluster_id\n        Rails.logger.warn \"remote user rejected: claimed remote user #{remote_user_prefix} but token was issued by #{upstream_cluster_id}\"\n        remote_user = nil\n      # Force our local copy of a remote root to have a static name\n      elsif system_user_uuid.end_with?(remote_user_suffix)\n        remote_user.update(\n          \"first_name\" => \"root\",\n          \"last_name\" => \"from cluster #{remote_user_prefix}\",\n        )\n      end\n    end\n\n    if user.nil? and remote_user.nil?\n      Rails.logger.warn \"remote token #{token.inspect} rejected: cannot get owner #{remote_user_uuid} from database or remote cluster\"\n      return nil\n    end\n\n    # Invariant:    remote_user_prefix == upstream_cluster_id\n    # therefore:    remote_user_prefix != Rails.configuration.ClusterID\n    # Add or update user and token in local database so we can\n    # validate subsequent requests faster.\n\n    act_as_system_user do\n      if remote_user && remote_user_uuid != anonymous_user_uuid\n        # Sync user record if we loaded a remote user.\n        user = User.update_remote_user remote_user\n      end\n\n      # If stored_secret is set, we save stored_secret in the database\n      # but return the real secret to the caller. This way, if we end\n      # up returning the auth record to the client, they see the same\n      # secret they supplied, instead of the HMAC we saved in the\n      # database.\n      stored_secret = stored_secret || secret\n\n      # We will accept this token (and avoid reloading the user\n      # record) for at most 'RemoteTokenRefresh' (default 5 minutes).\n      cache_expires = remote_token.andand['expires_at']\n      cache_refreshes = db_current_time + Rails.configuration.Login.RemoteTokenRefresh\n      scopes = remote_token.andand['scopes'] || ['all']\n      retries = 0\n      begin\n        # In older versions of Rails, `find_or_create_by` did not try to\n        # address race conditions, and the rescue logic below expects that\n        # behavior.  This block reimplements the old method so we can handle\n        # races ourselves.\n        if auth = ApiClientAuthorization.find_by(uuid: token_uuid)\n          auth.update!(\n            user: user,\n            api_token: stored_secret,\n            scopes: scopes,\n            expires_at: cache_expires,\n            refreshes_at: cache_refreshes,\n          )\n        else\n          auth = ApiClientAuthorization.create(uuid: token_uuid) do |auth|\n            auth.user = user\n            auth.api_token = stored_secret\n            auth.scopes = scopes\n            auth.expires_at = cache_expires\n            auth.refreshes_at = cache_refreshes\n          end\n        end\n      rescue ActiveRecord::RecordNotUnique\n        Rails.logger.debug(\"cached remote token #{token_uuid} already exists, retrying...\")\n        # Another request won the race (trying to find_or_create the\n        # same token UUID) ...and/or... there is an expired entry with\n        # the same secret but a different UUID (e.g., the token is an\n        # OIDC access token and [a] our database has an expired cached\n        # row that was not used above, and [b] the remote cluster had\n        # deleted its expired cached row so it assigned a new UUID).\n        #\n        # Delete any conflicting row if any. Retry twice (in case we\n        # hit both of those situations at once), then give up.\n        if (retries += 1) <= 2\n          ApiClientAuthorization.where('api_token=? and uuid<>?', stored_secret, token_uuid).delete_all\n          retry\n        else\n          Rails.logger.warn(\"cannot find or create cached remote token #{token_uuid}\")\n          return nil\n        end\n      end\n      Rails.logger.debug \"cached remote token #{token_uuid} with secret #{stored_secret} and scopes #{scopes} in local db\"\n      auth.api_token = secret\n      return auth\n    end\n\n    return nil\n  end\n\n  def token\n    v2token\n  end\n\n  def v1token\n    api_token\n  end\n\n  def v2token\n    'v2/' + uuid + '/' + api_token\n  end\n\n  def salted_token(remote:)\n    if remote.nil?\n      token\n    end\n    'v2/' + uuid + '/' + OpenSSL::HMAC.hexdigest('sha1', api_token, remote)\n  end\n\n  protected\n\n  def clamp_token_expiration\n    if Rails.configuration.API.MaxTokenLifetime > 0\n      max_token_expiration = db_current_time + Rails.configuration.API.MaxTokenLifetime\n      if (self.new_record? || self.expires_at_changed?) && (self.expires_at.nil? || (self.expires_at > max_token_expiration && !current_user.andand.is_admin))\n        self.expires_at = max_token_expiration\n      end\n    end\n  end\n\n  def permission_to_create\n    current_user.andand.is_admin or (current_user.andand.id == self.user_id)\n  end\n\n  def permission_to_update\n    permission_to_create && !uuid_changed? &&\n      (current_user.andand.is_admin || !user_id_changed?)\n  end\n\n  def log_update\n    super unless (saved_changes.keys - UNLOGGED_CHANGES).empty?\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/application_record.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ApplicationRecord < ActiveRecord::Base\n  self.abstract_class = true\nend"
  },
  {
    "path": "services/api/app/models/arvados_model.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'arvados_model_updates'\nrequire 'has_uuid'\nrequire 'record_filters'\nrequire 'serializers'\nrequire 'request_error'\n\nclass ArvadosModel < ApplicationRecord\n  self.abstract_class = true\n\n  include ArvadosModelUpdates\n  include CurrentApiClient      # current_user, current_api_client_authorization, etc.\n  include DbCurrentTime\n  extend RecordFilters\n\n  after_find :schedule_restoring_changes\n  after_find :type_check_serialized_attributes\n  after_initialize :log_start_state\n  before_save :ensure_permission_to_save\n  before_save :ensure_owner_uuid_is_permitted\n  before_save :ensure_ownership_path_leads_to_user\n  before_destroy :ensure_owner_uuid_is_permitted\n  before_destroy :ensure_permission_to_destroy\n  before_create :update_modified_by_fields\n  before_create :add_uuid_to_name, :if => Proc.new { @_add_uuid_to_name }\n  before_update :maybe_update_modified_by_fields\n  after_create :log_create\n  after_update :log_update\n  after_destroy :log_destroy\n  before_validation :normalize_collection_uuids\n  before_validation :set_default_owner\n  validate :ensure_valid_uuids\n\n  # Note: This only returns permission links. It does not account for\n  # permissions obtained via user.is_admin or\n  # user.uuid==object.owner_uuid.\n  has_many(:permissions,\n           ->{where(link_class: 'permission')},\n           foreign_key: 'head_uuid',\n           class_name: 'Link',\n           primary_key: 'uuid')\n\n  # If async is true at create or update, permission graph\n  # update is deferred allowing making multiple calls without the performance\n  # penalty.\n  attr_accessor :async_permissions_update\n\n  # Ignore listed attributes on mass assignments\n  def self.protected_attributes\n    []\n  end\n\n  class PermissionDeniedError < RequestError\n    def http_status\n      403\n    end\n  end\n\n  class AlreadyLockedError < RequestError\n    def http_status\n      422\n    end\n  end\n\n  class LockFailedError < RequestError\n    def http_status\n      422\n    end\n  end\n\n  class InvalidStateTransitionError < RequestError\n    def http_status\n      422\n    end\n  end\n\n  class UnauthorizedError < RequestError\n    def http_status\n      401\n    end\n  end\n\n  class UnresolvableContainerError < RequestError\n    def http_status\n      422\n    end\n  end\n\n  def self.kind_class(kind)\n    kind.match(/^arvados\\#(.+)$/)[1].classify.safe_constantize rescue nil\n  end\n\n  def self.permit_attribute_params raw_params\n    # strong_parameters does not provide security: permissions are\n    # implemented with before_save hooks.\n    #\n    # The following permit! is necessary even with\n    # \"ActionController::Parameters.permit_all_parameters = true\",\n    # because permit_all does not permit nested attributes.\n    raw_params ||= {}\n\n    if raw_params\n      raw_params = raw_params.to_hash\n      raw_params.delete_if { |k, _| self.protected_attributes.include? k }\n      serialized_attributes.each do |colname, coder|\n        param = raw_params[colname.to_sym]\n        if param.nil?\n          # ok\n        elsif !param.is_a?(coder.object_class)\n          raise ArgumentError.new(\"#{colname} parameter must be #{coder.object_class}, not #{param.class}\")\n        elsif has_nonstring_keys?(param)\n          raise ArgumentError.new(\"#{colname} parameter cannot have non-string hash keys\")\n        end\n      end\n      # Check JSONB columns that aren't listed on serialized_attributes\n      columns.select{|c| c.type == :jsonb}.collect{|j| j.name}.each do |colname|\n        if serialized_attributes.include? colname || raw_params[colname.to_sym].nil?\n          next\n        end\n        if has_nonstring_keys?(raw_params[colname.to_sym])\n          raise ArgumentError.new(\"#{colname} parameter cannot have non-string hash keys\")\n        end\n      end\n    end\n    ActionController::Parameters.new(raw_params).permit!\n  end\n\n  def initialize raw_params={}, *args\n    super(self.class.permit_attribute_params(raw_params), *args)\n  end\n\n  # Reload \"old attributes\" for logging, too.\n  def reload(*args)\n    super\n    log_start_state\n    self\n  end\n\n  def self.create raw_params={}, *args\n    super(permit_attribute_params(raw_params), *args)\n  end\n\n  def update raw_params={}, *args\n    super(self.class.permit_attribute_params(raw_params), *args)\n  end\n\n  def self.selectable_attributes(template=:user)\n    # Return an array of attribute name strings that can be selected\n    # in the given template.\n    api_accessible_attributes(template).map { |attr_spec| attr_spec.first.to_s }\n  end\n\n  def self.searchable_columns operator\n    textonly_operator = !operator.match(/[<=>]/) && !operator.in?(['in', 'not in'])\n    self.columns.select do |col|\n      case col.type\n      when :string, :text\n        true\n      when :datetime, :integer, :boolean\n        !textonly_operator\n      else\n        false\n      end\n    end.map(&:name)\n  end\n\n  def self.any_searchable_columns operator\n    datetime_columns = self.columns.select { |col| col.type == :datetime }.map(&:name)\n    self.searchable_columns(operator) - datetime_columns\n  end\n\n  def self.attributes_required_columns\n    # This method returns a hash.  Each key is the name of an API attribute,\n    # and it's mapped to a list of database columns that must be fetched\n    # to generate that attribute.\n    # This implementation generates a simple map of attributes to\n    # matching column names.  Subclasses can override this method\n    # to specify that method-backed API attributes need to fetch\n    # specific columns from the database.\n    all_columns = columns.map(&:name)\n    api_column_map = Hash.new { |hash, key| hash[key] = [] }\n    methods.grep(/^api_accessible_\\w+$/).each do |method_name|\n      next if method_name == :api_accessible_attributes\n      send(method_name).each_pair do |api_attr_name, col_name|\n        col_name = col_name.to_s\n        if all_columns.include?(col_name)\n          api_column_map[api_attr_name.to_s] |= [col_name]\n        end\n      end\n    end\n    api_column_map\n  end\n\n  def self.ignored_select_attributes\n    [\"href\", \"kind\", \"etag\"]\n  end\n\n  def self.columns_for_attributes(select_attributes)\n    if select_attributes.empty?\n      raise ArgumentError.new(\"Attribute selection list cannot be empty\")\n    end\n    api_column_map = attributes_required_columns\n    invalid_attrs = []\n    select_attributes.each do |s|\n      next if ignored_select_attributes.include? s\n      if not s.is_a? String or not api_column_map.include? s\n        invalid_attrs << s\n      end\n    end\n    if not invalid_attrs.empty?\n      raise ArgumentError.new(\"Invalid attribute(s): #{invalid_attrs.inspect}\")\n    end\n    # Given an array of attribute names to select, return an array of column\n    # names that must be fetched from the database to satisfy the request.\n    select_attributes.flat_map { |attr| api_column_map[attr] }.uniq\n  end\n\n  def self.default_orders\n    [\"#{table_name}.modified_at desc\", \"#{table_name}.uuid desc\"]\n  end\n\n  def self.unique_columns\n    [\"id\", \"uuid\"]\n  end\n\n  def self.limit_index_columns_read\n    # This method returns a list of column names.\n    # If an index request reads that column from the database,\n    # APIs that return lists will only fetch objects until reaching\n    # max_index_database_read bytes of data from those columns.\n    # This default implementation returns all columns that aren't \"small\".\n    self.columns.select do |col|\n      col_meta = col.sql_type_metadata\n      case col_meta.type\n      when :boolean, :datetime, :float, :integer\n        false\n      else\n        # 1024 is a semi-arbitrary choice. As of Arvados 3.0.0, \"regular\"\n        # strings are typically 255, and big strings are much larger (512K).\n        col_meta.limit.nil? or (col_meta.limit > 1024)\n      end\n    end.map(&:name)\n  end\n\n  # If current user can manage the object, return an array of uuids of\n  # users and groups that have permission to write the object. The\n  # first two elements are always [self.owner_uuid, current user's\n  # uuid].\n  #\n  # If current user can write but not manage the object, return\n  # [self.owner_uuid, current user's uuid].\n  #\n  # If current user cannot write this object, just return\n  # [self.owner_uuid].\n  def writable_by\n    # Return [] if this is a frozen project and the current user can't\n    # unfreeze\n    return [] if respond_to?(:frozen_by_uuid) && frozen_by_uuid &&\n                 (Rails.configuration.API.UnfreezeProjectRequiresAdmin ?\n                    !current_user.andand.is_admin :\n                    !current_user.can?(manage: uuid))\n    # Return [] if nobody can write because this object is inside a\n    # frozen project\n    return [] if FrozenGroup.where(uuid: owner_uuid).any?\n    return [owner_uuid] if not current_user\n    unless (owner_uuid == current_user.uuid or\n            current_user.is_admin or\n            (current_user.groups_i_can(:manage) & [uuid, owner_uuid]).any?)\n      if ((current_user.groups_i_can(:write) + [current_user.uuid]) &\n          [uuid, owner_uuid]).any?\n        return [owner_uuid, current_user.uuid]\n      else\n        return [owner_uuid]\n      end\n    end\n    [owner_uuid, current_user.uuid] + permissions.collect do |p|\n      if ['can_write', 'can_manage'].index p.name\n        p.tail_uuid\n      end\n    end.compact.uniq\n  end\n\n  def can_write\n    if respond_to?(:frozen_by_uuid) && frozen_by_uuid\n      # This special case is needed to return the correct value from a\n      # \"freeze project\" API, during which writable status changes\n      # from true to false.\n      #\n      # current_user.can?(write: self) returns true (which is correct\n      # in the context of permission-checking hooks) but the can_write\n      # value we're returning to the caller here represents the state\n      # _after_ the update, i.e., false.\n      return false\n    else\n      return current_user.can?(write: self)\n    end\n  end\n\n  def can_manage\n    return current_user.can?(manage: self)\n  end\n\n  # Return a query with read permissions restricted to the union of the\n  # permissions of the members of users_list, i.e. if something is readable by\n  # any user in users_list, it will be readable in the query returned by this\n  # function.\n  def self.readable_by(*users_list)\n    # Get rid of troublesome nils\n    users_list.compact!\n\n    # Load optional keyword arguments, if they exist.\n    if users_list.last.is_a? Hash\n      kwargs = users_list.pop\n    else\n      kwargs = {}\n    end\n\n    # Collect the UUIDs of the authorized users.\n    sql_table = kwargs.fetch(:table_name, table_name)\n    include_trash = kwargs.fetch(:include_trash, false)\n    include_old_versions = kwargs.fetch(:include_old_versions, false)\n\n    sql_conds = nil\n    user_uuids = users_list.map { |u| u.uuid }\n    all_user_uuids = []\n\n    admin = users_list.select { |u| u.is_admin }.any?\n\n    # For details on how the trashed_groups table is constructed, see\n    # see db/migrate/20200501150153_permission_table.rb\n\n    # excluded_trash is a SQL expression that determines whether a row\n    # should be excluded from the results due to being trashed.\n    # Trashed items inside frozen projects are invisible to regular\n    # (non-admin) users even when using include_trash, so we have:\n    #\n    # (item_trashed || item_inside_trashed_project)\n    # &&\n    # (!caller_requests_include_trash ||\n    #  (item_inside_frozen_project && caller_is_not_admin))\n    if (admin && include_trash) || sql_table == \"api_client_authorizations\"\n      excluded_trash = \"false\"\n    else\n      excluded_trash = \"(#{sql_table}.owner_uuid IN (SELECT group_uuid FROM #{TRASHED_GROUPS} \" +\n                       \"WHERE trash_at <= statement_timestamp()))\"\n      if sql_table == \"groups\" || sql_table == \"collections\"\n        excluded_trash = \"(#{excluded_trash} OR #{sql_table}.trash_at <= statement_timestamp() IS TRUE)\"\n      end\n\n      if include_trash\n        # Exclude trash inside frozen projects\n        excluded_trash = \"(#{excluded_trash} AND #{sql_table}.owner_uuid IN (SELECT uuid FROM #{FROZEN_GROUPS}))\"\n      end\n    end\n\n    if admin\n      # Admin skips most permission checks, but still want to filter\n      # on trashed items.\n      if !include_trash && sql_table != \"api_client_authorizations\"\n        # Only include records where the owner is not trashed\n        sql_conds = \"NOT (#{excluded_trash})\"\n      end\n    else\n      # The core of the permission check is a join against the\n      # materialized_permissions table to determine if the user has at\n      # least read permission to either the object itself or its\n      # direct owner (if traverse_owned is true).  See\n      # db/migrate/20200501150153_permission_table.rb for details on\n      # how the permissions are computed.\n\n      # A user can have can_manage access to another user, this grants\n      # full access to all that user's stuff.  To implement that we\n      # need to include those other users in the permission query.\n\n      # This was previously implemented by embedding the subquery\n      # directly into the query, but it was discovered later that this\n      # causes the Postgres query planner to do silly things because\n      # the query heuristics assumed the subquery would have a lot\n      # more rows that it does, and choose a bad merge strategy.  By\n      # doing the query here and embedding the result as a constant,\n      # Postgres also knows exactly how many items there are and can\n      # choose the right query strategy.\n      #\n      # (note: you could also do this with a temporary table, but that\n      # would require all every request be wrapped in a transaction,\n      # which is not currently the case).\n\n      all_user_uuids = ActiveRecord::Base.connection.exec_query %{\n#{USER_UUIDS_SUBQUERY_TEMPLATE % {user: \"'#{user_uuids.join \"', '\"}'\", perm_level: 1}}\n},\n                                             'readable_by.user_uuids'\n\n      user_uuids_subquery = \":user_uuids\"\n\n      # Note: it is possible to combine the direct_check and\n      # owner_check into a single IN (SELECT) clause, however it turns\n      # out query optimizer doesn't like it and forces a sequential\n      # table scan.  Constructing the query with separate IN (SELECT)\n      # clauses enables it to use the index.\n      #\n      # see issue 13208 for details.\n\n      # Match a direct read permission link from the user to the record uuid\n      direct_check = \"#{sql_table}.uuid IN (SELECT target_uuid FROM #{PERMISSION_VIEW} \"+\n                     \"WHERE user_uuid IN (#{user_uuids_subquery}) AND perm_level >= 1)\"\n\n      # Match a read permission for the user to the record's\n      # owner_uuid.  This is so we can have a permissions table that\n      # mostly consists of users and groups (projects are a type of\n      # group) and not have to compute and list user permission to\n      # every single object in the system.\n      #\n      # Don't do this for API keys (special behavior) or groups\n      # (already covered by direct_check).\n      #\n      # The traverse_owned flag indicates whether the permission to\n      # read an object also implies transitive permission to read\n      # things the object owns.  The situation where this is important\n      # are determining if we can read an object owned by another\n      # user.  This makes it possible to have permission to read the\n      # user record without granting permission to read things the\n      # other user owns.\n      owner_check = \"\"\n      if sql_table != \"api_client_authorizations\" and sql_table != \"groups\" then\n        owner_check = \"#{sql_table}.owner_uuid IN (SELECT target_uuid FROM #{PERMISSION_VIEW} \"+\n                      \"WHERE user_uuid IN (#{user_uuids_subquery}) AND perm_level >= 1 AND traverse_owned) \"\n\n        # We want to do owner_check before direct_check in the OR\n        # clause.  The order of the OR clause isn't supposed to\n        # matter, but in practice, it does -- apparently in the\n        # absence of other hints, it uses the ordering from the query.\n        # For certain types of queries (like filtering on owner_uuid),\n        # every item will match the owner_check clause, so then\n        # Postgres will optimize out the direct_check entirely.\n        direct_check = \" OR \" + direct_check\n      end\n\n      if Rails.configuration.Users.RoleGroupsVisibleToAll &&\n         sql_table == \"groups\" &&\n         users_list.select { |u| u.is_active }.any?\n        # All role groups are readable (but we still need the other\n        # direct_check clauses to handle non-role groups).\n        direct_check += \" OR #{sql_table}.group_class = 'role'\"\n      end\n\n      links_cond = \"\"\n      if sql_table == \"links\"\n        # 1) Match permission links incoming or outgoing on the\n        # user, i.e. granting permission on the user, or granting\n        # permission to the user.\n        #\n        # 2) Match permission links which grant permission on an\n        # object that this user can_manage.\n        #\n        links_cond = \"OR (#{sql_table}.link_class IN (:permission_link_classes) AND \"+\n                     \"   ((#{sql_table}.head_uuid IN (#{user_uuids_subquery}) OR #{sql_table}.tail_uuid IN (#{user_uuids_subquery})) OR \" +\n                     \"    #{sql_table}.head_uuid IN (SELECT target_uuid FROM #{PERMISSION_VIEW} \"+\n                     \"    WHERE user_uuid IN (#{user_uuids_subquery}) AND perm_level >= 3))) \"\n      end\n\n      sql_conds = \"(#{owner_check} #{direct_check} #{links_cond}) AND NOT (#{excluded_trash})\"\n\n    end\n\n    if !include_old_versions && sql_table == \"collections\"\n      exclude_old_versions = \"#{sql_table}.uuid = #{sql_table}.current_version_uuid\"\n      if sql_conds.nil?\n        sql_conds = exclude_old_versions\n      else\n        sql_conds += \" AND #{exclude_old_versions}\"\n      end\n    end\n\n    return self if sql_conds == nil\n    self.where(sql_conds,\n               user_uuids: all_user_uuids.collect{|c| c[\"target_uuid\"]},\n               permission_link_classes: ['permission'])\n  end\n\n  def save_with_unique_name!\n    max_retries = 2\n    transaction do\n      conn = ActiveRecord::Base.connection\n      conn.exec_query 'SAVEPOINT save_with_unique_name'\n      begin\n        save!\n        conn.exec_query 'RELEASE SAVEPOINT save_with_unique_name'\n      rescue ActiveRecord::RecordNotUnique => rn\n        raise if max_retries == 0\n        max_retries -= 1\n\n        # Dig into the error to determine if it is specifically calling out a\n        # (owner_uuid, name) uniqueness violation.  In this specific case, and\n        # the client requested a unique name with ensure_unique_name==true,\n        # update the name field and try to save again.  Loop as necessary to\n        # discover a unique name.  It is necessary to handle name choosing at\n        # this level (as opposed to the client) to ensure that record creation\n        # never fails due to a race condition.\n        err = rn.cause\n        raise unless err.is_a?(PG::UniqueViolation)\n\n        # Unfortunately ActiveRecord doesn't abstract out any of the\n        # necessary information to figure out if this the error is actually\n        # the specific case where we want to apply the ensure_unique_name\n        # behavior, so the following code is specialized to Postgres.\n        detail = err.result.error_field(PG::Result::PG_DIAG_MESSAGE_DETAIL)\n        raise unless /^Key \\(owner_uuid, name\\)=\\([a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}, .*?\\) already exists\\./.match detail\n\n        conn.exec_query 'ROLLBACK TO SAVEPOINT save_with_unique_name'\n\n        if uuid_was.nil?\n          # new record, the uuid caused a name collision (very\n          # unlikely but possible), so generate new uuid\n          self[:uuid] = nil\n          if self.is_a? Collection\n            # Also needs to be reset\n            self[:current_version_uuid] = nil\n          end\n          # need to adjust the name after the uuid has been generated\n          add_uuid_to_make_unique_name\n        else\n          # existing record, just update the name directly.\n          add_uuid_to_name\n        end\n        retry\n      end\n    end\n  end\n\n  def user_owner_uuid\n    if self.owner_uuid.nil?\n      return current_user.uuid\n    end\n    owner_class = ArvadosModel.resource_class_for_uuid(self.owner_uuid)\n    if owner_class == User\n      self.owner_uuid\n    else\n      owner_class.find_by_uuid(self.owner_uuid).user_owner_uuid\n    end\n  end\n\n  def logged_attributes\n    attributes.except(*Rails.configuration.AuditLogs.UnloggedAttributes.stringify_keys.keys)\n  end\n\n  def self.full_text_searchable_columns\n    self.columns.select do |col|\n      [:string, :text, :jsonb].include?(col.type) and\n      col.name !~ /(^|_)(^container_image|hash|uuid)$/\n    end.map(&:name)\n  end\n\n  def self.full_text_coalesce\n    full_text_searchable_columns.collect do |column|\n      is_jsonb = self.columns.select{|x|x.name == column}[0].type == :jsonb\n      cast = (is_jsonb || serialized_attributes[column]) ? '::text' : ''\n      \"coalesce(#{column}#{cast},'')\"\n    end\n  end\n\n  def self.full_text_trgm\n    \"(#{full_text_coalesce.join(\" || ' ' || \")})\"\n  end\n\n  def self.full_text_tsvector\n    parts = full_text_searchable_columns.collect do |column|\n      is_jsonb = self.columns.select{|x|x.name == column}[0].type == :jsonb\n      cast = (is_jsonb || serialized_attributes[column]) ? '::text' : ''\n      \"coalesce(#{column}#{cast},'')\"\n    end\n    \"to_tsvector('english', substr(#{parts.join(\" || ' ' || \")}, 0, 8000))\"\n  end\n\n  @_add_uuid_to_name = false\n  def add_uuid_to_make_unique_name\n    @_add_uuid_to_name = true\n  end\n\n  def add_uuid_to_name\n    # Incorporate the random part of the UUID into the name.  This\n    # lets us prevent name collision but the part we add to the name\n    # is still somewhat meaningful (instead of generating a second\n    # random meaningless string).\n    #\n    # Because ArvadosModel is an abstract class and assign_uuid is\n    # part of HasUuid (which is included by the other concrete\n    # classes) the assign_uuid hook gets added (and run) after this\n    # one.  So we need to call assign_uuid here to make sure we have a\n    # uuid.\n    assign_uuid\n    self.name = \"#{self.name[0..236]} (#{self.uuid[-15..-1]})\"\n  end\n\n  protected\n\n  # Fail if we loaded some serialized content from the database that\n  # doesn't match the expected type.\n  def type_check_serialized_attributes\n    # `serialized_attributes` lets us find attributes that are\n    # serialized by Rails and stored in text columns, like this one:\n    #\n    # serialize :environment, Hash\n    serialized_attributes.each do |attr, serializer|\n      if attributes.key?(attr) && !attributes[attr].is_a?(serializer.object_class)\n        raise \"invalid serialized data for #{self.class.to_s} #{attr}: #{attributes[attr].to_s[0..5]} is not a #{serializer.object_class}\"\n      end\n    end\n\n    # `type_for_attribute` lets us find attributes that are stored in\n    # jsonb columns, like this one:\n    #\n    # attribute :properties, :jsonbHash, default: {}\n    self.class.columns.each do |col|\n      if attributes.key?(col.name) && col.type == :jsonb\n        coltype = self.class.type_for_attribute(col.name)\n        if coltype.respond_to?(:enforce_type) && !attributes[col.name].is_a?(coltype.enforce_type)\n          raise \"invalid serialized data for #{self.class.to_s} #{col.name}: '#{attributes[col.name]}'[...] is not a #{coltype.enforce_type}\"\n        end\n      end\n    end\n\n    # Somehow, the above code flags the record as changed/dirty, so we\n    # need to clear that flag.\n    clear_changes_information\n  end\n\n  def self.deep_sort_hash(x)\n    if x.is_a? Hash\n      x.sort.collect do |k, v|\n        [k, deep_sort_hash(v)]\n      end.to_h\n    elsif x.is_a? Array\n      x.collect { |v| deep_sort_hash(v) }\n    else\n      x\n    end\n  end\n\n  def ensure_ownership_path_leads_to_user\n    if new_record? or owner_uuid_changed?\n      uuid_in_path = {owner_uuid => true, uuid => true}\n      x = owner_uuid\n      while (owner_class = ArvadosModel::resource_class_for_uuid(x)) != User\n        begin\n          if x == uuid\n            # Test for cycles with the new version, not the DB contents\n            x = owner_uuid\n          elsif !owner_class.respond_to? :find_by_uuid\n            raise ActiveRecord::RecordNotFound.new\n          else\n            x = owner_class.find_by_uuid(x).owner_uuid\n          end\n        rescue ActiveRecord::RecordNotFound => e\n          errors.add :owner_uuid, \"is not owned by any user: #{e}\"\n          throw(:abort)\n        end\n        if uuid_in_path[x]\n          if x == owner_uuid\n            errors.add :owner_uuid, \"would create an ownership cycle\"\n          else\n            errors.add :owner_uuid, \"has an ownership cycle\"\n          end\n          throw(:abort)\n        end\n        uuid_in_path[x] = true\n      end\n    end\n    true\n  end\n\n  def set_default_owner\n    if new_record? and current_user and respond_to? :owner_uuid=\n      self.owner_uuid ||= current_user.uuid\n    end\n  end\n\n  def ensure_owner_uuid_is_permitted\n    raise PermissionDeniedError if !current_user\n\n    if self.owner_uuid.nil?\n      errors.add :owner_uuid, \"cannot be nil\"\n      raise PermissionDeniedError\n    end\n\n    rsc_class = ArvadosModel::resource_class_for_uuid owner_uuid\n    unless rsc_class == User or rsc_class == Group\n      errors.add :owner_uuid, \"must be set to User or Group\"\n      raise PermissionDeniedError\n    end\n\n    if new_record? || owner_uuid_changed?\n      # Permission on owner_uuid_was is needed to move an existing\n      # object away from its previous owner (which implies permission\n      # to modify this object itself, so we don't need to check that\n      # separately). Permission on the new owner_uuid is also needed.\n      [['old', owner_uuid_was],\n       ['new', owner_uuid]\n      ].each do |which, check_uuid|\n        if check_uuid.nil?\n          # old_owner_uuid is nil? New record, no need to check.\n        elsif !current_user.can?(write: check_uuid)\n          if FrozenGroup.where(uuid: check_uuid).any?\n            errors.add :owner_uuid, \"cannot be set or changed because #{which} owner is frozen\"\n          else\n            logger.warn \"User #{current_user.uuid} tried to set ownership of #{self.class.to_s} #{self.uuid} but does not have permission to write #{which} owner_uuid #{check_uuid}\"\n            errors.add :owner_uuid, \"cannot be set or changed without write permission on #{which} owner\"\n          end\n          raise PermissionDeniedError\n        elsif rsc_class == Group && Group.find_by_uuid(owner_uuid).group_class != \"project\"\n          errors.add :owner_uuid, \"must be a project\"\n          raise PermissionDeniedError\n        end\n      end\n    else\n      # If the object already existed and we're not changing\n      # owner_uuid, we only need write permission on the object\n      # itself. (If we're in the act of unfreezing, we only need\n      # :unfreeze permission, which means \"what write permission would\n      # be if target weren't frozen\")\n      unless ((respond_to?(:frozen_by_uuid) && frozen_by_uuid_was && !frozen_by_uuid) ?\n                current_user.can?(unfreeze: uuid) :\n                current_user.can?(write: uuid))\n        logger.warn \"User #{current_user.uuid} tried to modify #{self.class.to_s} #{self.uuid} without write permission\"\n        errors.add :uuid, \" #{uuid} is not writable by #{current_user.uuid}\"\n        raise PermissionDeniedError\n      end\n    end\n\n    true\n  end\n\n  def ensure_permission_to_save\n    unless (new_record? ? permission_to_create : permission_to_update)\n      raise PermissionDeniedError\n    end\n  end\n\n  def permission_to_create\n    return current_user.andand.is_active\n  end\n\n  def permission_to_update\n    if !current_user\n      logger.warn \"Anonymous user tried to update #{self.class.to_s} #{self.uuid_was}\"\n      return false\n    end\n    if !current_user.is_active\n      logger.warn \"Inactive user #{current_user.uuid} tried to update #{self.class.to_s} #{self.uuid_was}\"\n      return false\n    end\n    return true if current_user.is_admin\n    if self.uuid_changed?\n      logger.warn \"User #{current_user.uuid} tried to change uuid of #{self.class.to_s} #{self.uuid_was} to #{self.uuid}\"\n      return false\n    end\n    return true\n  end\n\n  def ensure_permission_to_destroy\n    raise PermissionDeniedError unless permission_to_destroy\n  end\n\n  def permission_to_destroy\n    if [system_user_uuid, system_group_uuid, anonymous_group_uuid,\n        anonymous_user_uuid, public_project_uuid].include? uuid\n      false\n    else\n      permission_to_update\n    end\n  end\n\n  def maybe_update_modified_by_fields\n    update_modified_by_fields if self.changed? or self.new_record?\n    true\n  end\n\n  def update_modified_by_fields\n    current_time = db_current_time\n    self.created_at ||= created_at_was || current_time\n    self.updated_at = current_time\n    self.owner_uuid ||= current_user.uuid if current_user && self.respond_to?(:owner_uuid=)\n    if !anonymous_updater\n      self.modified_by_user_uuid = current_user ? current_user.uuid : nil\n    end\n    if !timeless_updater\n      self.modified_at = current_time\n    end\n    true\n  end\n\n  def self.has_nonstring_keys? x\n    if x.is_a? Hash\n      x.each do |k,v|\n        return true if !(k.is_a?(String) || k.is_a?(Symbol)) || has_nonstring_keys?(v)\n      end\n    elsif x.is_a? Array\n      x.each do |v|\n        return true if has_nonstring_keys?(v)\n      end\n    end\n    false\n  end\n\n  def self.where_serialized(colname, value, md5: false, multivalue: false)\n    colsql = colname.to_s\n    if md5\n      colsql = \"md5(#{colsql})\"\n    end\n    if value.empty?\n      # rails4 stores as null, rails3 stored as serialized [] or {}\n      sql = \"#{colsql} is null or #{colsql} IN (?)\"\n      sorted = value\n    else\n      sql = \"#{colsql} IN (?)\"\n      sorted = deep_sort_hash(value)\n    end\n    params = []\n    if multivalue\n      sorted.each do |v|\n        params << v.to_yaml\n        params << SafeJSON.dump(v)\n      end\n    else\n      params << sorted.to_yaml\n      params << SafeJSON.dump(sorted)\n    end\n    if md5\n      params = params.map { |x| Digest::MD5.hexdigest(x) }\n    end\n    where(sql, params)\n  end\n\n  Serializer = {\n    Hash => HashSerializer,\n    Array => ArraySerializer,\n  }\n\n  def self.serialize(colname, type)\n    coder = Serializer[type]\n    @serialized_attributes ||= {}\n    @serialized_attributes[colname.to_s] = coder\n    super(colname, coder: coder)\n  end\n\n  def self.serialized_attributes\n    @serialized_attributes ||= {}\n  end\n\n  def serialized_attributes\n    self.class.serialized_attributes\n  end\n\n  def foreign_key_attributes\n    attributes.keys.select { |a| a.match(/_uuid$/) }\n  end\n\n  def skip_uuid_read_permission_check\n    %w(modified_by_client_uuid)\n  end\n\n  def skip_uuid_existence_check\n    []\n  end\n\n  def normalize_collection_uuids\n    foreign_key_attributes.each do |attr|\n      attr_value = send attr\n      if attr_value.is_a? String and\n          attr_value.match(/^[0-9a-f]{32,}(\\+[@\\w]+)*$/)\n        begin\n          send \"#{attr}=\", Collection.normalize_uuid(attr_value)\n        rescue\n          # TODO: abort instead of silently accepting unnormalizable value?\n        end\n      end\n    end\n  end\n\n  @@prefixes_hash = nil\n  def self.uuid_prefixes\n    unless @@prefixes_hash\n      @@prefixes_hash = {}\n      Rails.application.eager_load!\n      ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |k|\n        if k.respond_to?(:uuid_prefix)\n          @@prefixes_hash[k.uuid_prefix] = k\n        end\n      end\n    end\n    @@prefixes_hash\n  end\n\n  def self.uuid_like_pattern\n    \"#{Rails.configuration.ClusterID}-#{uuid_prefix}-_______________\"\n  end\n\n  def self.uuid_regex\n    %r/[a-z0-9]{5}-#{uuid_prefix}-[a-z0-9]{15}/\n  end\n\n  def check_readable_uuid attr, attr_value\n    return if attr_value.nil?\n    if (r = ArvadosModel::resource_class_for_uuid attr_value)\n      unless skip_uuid_read_permission_check.include? attr\n        r = r.readable_by(current_user)\n      end\n      if r.where(uuid: attr_value).count == 0\n        errors.add(attr, \"'#{attr_value}' not found\")\n      end\n    else\n      # Not a valid uuid or PDH, but that (currently) is not an error.\n    end\n  end\n\n  def ensure_valid_uuids\n    specials = [system_user_uuid]\n\n    foreign_key_attributes.each do |attr|\n      if new_record? or send (attr + \"_changed?\")\n        next if skip_uuid_existence_check.include? attr\n        attr_value = send attr\n        next if specials.include? attr_value\n        check_readable_uuid attr, attr_value\n      end\n    end\n  end\n\n  def ensure_filesystem_compatible_name\n    if name == \".\" || name == \"..\"\n      errors.add(:name, \"cannot be '.' or '..'\")\n    elsif Rails.configuration.Collections.ForwardSlashNameSubstitution == \"\" && !name.nil? && name.index('/')\n      errors.add(:name, \"cannot contain a '/' character\")\n    end\n  end\n\n  class Email\n    def self.kind\n      \"email\"\n    end\n\n    def kind\n      self.class.kind\n    end\n\n    def self.readable_by (*u)\n      self\n    end\n\n    def self.where (u)\n      [{:uuid => u[:uuid]}]\n    end\n  end\n\n  def self.resource_class_for_uuid(uuid)\n    if uuid.is_a? ArvadosModel\n      return uuid.class\n    end\n    unless uuid.is_a? String\n      return nil\n    end\n\n    uuid.match HasUuid::UUID_REGEX do |re|\n      return uuid_prefixes[re[1]] if uuid_prefixes[re[1]]\n    end\n\n    if uuid.match(/.+@.+/)\n      return Email\n    end\n\n    nil\n  end\n\n  # Fill in implied zero/false values in database records that were\n  # created before #17014 made them explicit, and reset the Rails\n  # \"changed\" state so the record doesn't appear to have been modified\n  # after loading.\n  #\n  # Invoked by Container and ContainerRequest models as an after_find\n  # hook.\n  def fill_container_defaults_after_find\n    fill_container_defaults\n    clear_changes_information\n  end\n\n  # Fill in implied zero/false values. Invoked by ContainerRequest as\n  # a before_validation hook in order to (a) ensure every key has a\n  # value in the updated database record and (b) ensure the attribute\n  # whitelist doesn't reject a change from an explicit zero/false\n  # value in the database to an implicit zero/false value in an update\n  # request.\n  def fill_container_defaults\n    # Make sure these hashes are correctly sorted by key.  We merge in\n    # whatever is in the database on top of them, this will be the\n    # order that gets used downstream rather than the order the keys\n    # appear in the database.\n    rc = attributes['runtime_constraints']\n    if rc.is_a?(Hash)\n      # If it's not loaded, do nothing.\n      # If it's not a hash, leave it alone so it can fail validation.\n      self.runtime_constraints = {\n        'API' => false,\n        'gpu' => {},\n        'keep_cache_disk' => 0,\n        'keep_cache_ram' => 0,\n        'ram' => 0,\n        'vcpus' => 0,\n      }.merge(rc)\n      gpu = self.runtime_constraints['gpu']\n      if gpu.is_a?(Hash)\n        # If it's not a hash, leave it alone so it can fail validation.\n        self.runtime_constraints['gpu'] = {\n          'device_count' => 0,\n          'driver_version' => '',\n          'hardware_target' => [],\n          'stack' => '',\n          'vram' => 0,\n        }.merge(gpu)\n      end\n    end\n    sp = attributes['scheduling_parameters']\n    if sp.is_a?(Hash)\n      # If it's not loaded, do nothing.\n      # If it's not a hash, leave it alone so it can fail validation.\n      self.scheduling_parameters = {\n        'max_run_time' => 0,\n        'partitions' => [],\n        'preemptible' => false,\n        'supervisor' => false,\n      }.merge(sp)\n    end\n  end\n\n  # ArvadosModel.find_by_uuid needs extra magic to allow it to return\n  # an object in any class.\n  def self.find_by_uuid uuid\n    if self == ArvadosModel\n      # If called directly as ArvadosModel.find_by_uuid rather than via subclass,\n      # delegate to the appropriate subclass based on the given uuid.\n      self.resource_class_for_uuid(uuid).find_by_uuid(uuid)\n    else\n      super\n    end\n  end\n\n  def is_audit_logging_enabled?\n    return !(Rails.configuration.AuditLogs.MaxAge.to_i == 0 &&\n             Rails.configuration.AuditLogs.MaxDeleteBatch.to_i > 0)\n  end\n\n  def schedule_restoring_changes\n    # This will be checked at log_start_state, to reset any (virtual) changes\n    # produced by the act of reading a serialized attribute.\n    @fresh_from_database = true\n  end\n\n  def log_start_state\n    if is_audit_logging_enabled?\n      @old_attributes = Marshal.load(Marshal.dump(attributes))\n      @old_logged_attributes = Marshal.load(Marshal.dump(logged_attributes))\n      if @fresh_from_database\n        # This instance was created from reading a database record. Attributes\n        # haven't been changed, but those serialized attributes will be reported\n        # as unpersisted, so we restore them to avoid issues with lock!() and\n        # with_lock().\n        restore_attributes\n        @fresh_from_database = nil\n      end\n    end\n  end\n\n  def log_change(event_type)\n    if is_audit_logging_enabled?\n      log = Log.new(event_type: event_type).fill_object(self)\n      yield log\n      log.save!\n      log_start_state\n    end\n  end\n\n  def log_create\n    if is_audit_logging_enabled?\n      log_change('create') do |log|\n        log.fill_properties('old', nil, nil)\n        log.update_to self\n      end\n    end\n  end\n\n  def log_update\n    if is_audit_logging_enabled?\n      log_change('update') do |log|\n        log.fill_properties('old', etag(@old_attributes), @old_logged_attributes)\n        log.update_to self\n      end\n    end\n  end\n\n  def log_destroy\n    if is_audit_logging_enabled?\n      log_change('delete') do |log|\n        log.fill_properties('old', etag(@old_attributes), @old_logged_attributes)\n        log.update_to nil\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/authorized_key.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AuthorizedKey < ArvadosModel\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n  before_create :permission_to_set_authorized_user_uuid\n  before_update :permission_to_set_authorized_user_uuid\n\n  belongs_to :authorized_user,\n             foreign_key: 'authorized_user_uuid',\n             class_name: 'User',\n             primary_key: 'uuid',\n             optional: true\n\n  validate :public_key_must_be_unique\n\n  api_accessible :user, extend: :common do |t|\n    t.add :name\n    t.add :key_type\n    t.add :authorized_user_uuid\n    t.add :public_key\n    t.add :expires_at\n  end\n\n  def permission_to_set_authorized_user_uuid\n    # Anonymous users cannot do anything here\n    return false if !current_user\n\n    # Administrators can attach a key to any user account\n    return true if current_user.is_admin\n\n    # All users can attach keys to their own accounts\n    return true if current_user.uuid == authorized_user_uuid\n\n    # Default = deny.\n    false\n  end\n\n  def public_key_must_be_unique\n    if self.public_key\n      # Valid if no other rows have this public key\n      if self.class.where('uuid != ? and public_key like ?',\n                          uuid || '', \"%#{self.public_key}%\").any?\n        errors.add(:public_key, \"already exists in the database, use a different key.\")\n        return false\n      end\n    end\n    return true\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/blob.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'request_error'\n\nclass Blob\n  extend DbCurrentTime\n\n  def initialize locator\n    @locator = locator\n  end\n\n  def empty?\n    !!@locator.match(/^d41d8cd98f00b204e9800998ecf8427e(\\+.*)?$/)\n  end\n\n  # In order to get a Blob from Keep, you have to prove either\n  # [a] you have recently written it to Keep yourself, or\n  # [b] apiserver has recently decided that you should be able to read it\n  #\n  # To ensure that the requestor of a blob is authorized to read it,\n  # Keep requires clients to timestamp the blob locator with an expiry\n  # time, and to sign the timestamped locator with their API token.\n  #\n  # A signed blob locator has the form:\n  #     locator_hash +A blob_signature @ timestamp\n  # where the timestamp is a Unix time expressed as a hexadecimal value,\n  # and the blob_signature is the signed locator_hash + API token + timestamp.\n  #\n  class InvalidSignatureError < RequestError\n  end\n\n  # Blob.sign_locator: return a signed and timestamped blob locator.\n  #\n  # The 'opts' argument should include:\n  #   [required] :api_token - API token (signatures only work for this token)\n  #   [optional] :key       - the Arvados server-side blobstore key\n  #   [optional] :ttl       - number of seconds before signature should expire\n  #   [optional] :expire    - unix timestamp when signature should expire\n  #\n  def self.sign_locator blob_locator, opts\n    # We only use the hash portion for signatures.\n    blob_hash = blob_locator.split('+').first\n\n    # Generate an expiry timestamp (seconds after epoch, base 16)\n    if opts[:expire]\n      if opts[:ttl]\n        raise \"Cannot specify both :ttl and :expire options\"\n      end\n      timestamp = opts[:expire]\n    else\n      timestamp = db_current_time.to_i +\n        (opts[:ttl] || Rails.configuration.Collections.BlobSigningTTL.to_i)\n    end\n    timestamp_hex = timestamp.to_s(16)\n    # => \"53163cb4\"\n    blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_i.to_s(16)\n\n    # Generate a signature.\n    signature =\n      generate_signature((opts[:key] or Rails.configuration.Collections.BlobSigningKey),\n                         blob_hash, opts[:api_token], timestamp_hex, blob_signature_ttl)\n\n    blob_locator + '+A' + signature + '@' + timestamp_hex\n  end\n\n  # Blob.verify_signature\n  #   Safely verify the signature on a blob locator.\n  #   Return value: true if the locator has a valid signature, false otherwise\n  #   Arguments: signed_blob_locator, opts\n  #\n  def self.verify_signature(*args)\n    begin\n      self.verify_signature!(*args)\n      true\n    rescue Blob::InvalidSignatureError\n      false\n    end\n  end\n\n  # Blob.verify_signature!\n  #   Verify the signature on a blob locator.\n  #   Return value: true if the locator has a valid signature\n  #   Arguments: signed_blob_locator, opts\n  #   Exceptions:\n  #     Blob::InvalidSignatureError if the blob locator does not include a\n  #     valid signature\n  #\n  def self.verify_signature! signed_blob_locator, opts\n    blob_hash = signed_blob_locator.split('+').first\n    given_signature, timestamp = signed_blob_locator.\n      split('+A').last.\n      split('+').first.\n      split('@')\n\n    if !timestamp\n      raise Blob::InvalidSignatureError.new 'No signature provided.'\n    end\n    unless timestamp =~ /^[\\da-f]+$/\n      raise Blob::InvalidSignatureError.new 'Timestamp is not a base16 number.'\n    end\n    if timestamp.to_i(16) < (opts[:now] or db_current_time.to_i)\n      raise Blob::InvalidSignatureError.new 'Signature expiry time has passed.'\n    end\n    blob_signature_ttl = Rails.configuration.Collections.BlobSigningTTL.to_i.to_s(16)\n\n    my_signature =\n      generate_signature((opts[:key] or Rails.configuration.Collections.BlobSigningKey),\n                         blob_hash, opts[:api_token], timestamp, blob_signature_ttl)\n\n    if my_signature != given_signature\n      raise Blob::InvalidSignatureError.new 'Signature is invalid.'\n    end\n\n    true\n  end\n\n  def self.generate_signature key, blob_hash, api_token, timestamp, blob_signature_ttl\n    OpenSSL::HMAC.hexdigest('sha1', key,\n                            [blob_hash,\n                             api_token,\n                             timestamp,\n                             blob_signature_ttl].join('@'))\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/collection.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'arvados/keep'\nrequire 'trashable'\nrequire 'validate_serialized'\n\nclass Collection < ArvadosModel\n  extend CurrentApiClient\n  extend DbCurrentTime\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n  include Trashable\n\n  # Posgresql JSONB columns should NOT be declared as serialized, Rails 5\n  # already know how to properly treat them.\n  attribute :properties, :jsonbHash, default: {}\n  attribute :storage_classes_desired, :jsonbArray, default: lambda { Rails.configuration.DefaultStorageClasses }\n  attribute :storage_classes_confirmed, :jsonbArray, default: []\n\n  before_validation :default_empty_manifest\n  before_validation :default_storage_classes, on: :create\n  before_validation :managed_properties, on: :create\n  before_validation :check_encoding\n  before_validation :check_manifest_validity\n  before_validation :check_signatures\n  before_validation :strip_signatures_and_update_replication_confirmed\n  before_validation :name_null_if_empty\n  validates :properties, hash_attr: true\n  validates :storage_classes_desired, array_of_strings: true\n  validates :storage_classes_confirmed, array_of_strings: true\n  validate :ensure_filesystem_compatible_name\n  validate :ensure_pdh_matches_manifest_text\n  validate :ensure_storage_classes_desired_is_not_empty\n  validate :versioning_metadata_updates, on: :update\n  validate :past_versions_cannot_be_updated, on: :update\n  validate :protected_managed_properties_updates, on: :update\n  validate :workflow_type_property, on: :update\n  after_validation :set_file_count_and_total_size\n  before_save :set_file_names\n  after_save :check_linked_workflows\n  around_update :manage_versioning, unless: :is_past_version?\n\n  has_many :workflows,\n           class_name: 'Workflow',\n           foreign_key: 'collection_uuid',\n           primary_key: 'uuid',\n           dependent: :destroy\n\n  api_accessible :user, extend: :common do |t|\n    t.add lambda { |x| x.name || \"\" }, as: :name\n    t.add :description\n    t.add :properties\n    t.add :portable_data_hash\n    t.add :manifest_text, as: :unsigned_manifest_text\n    t.add :manifest_text, as: :manifest_text\n    t.add :replication_desired\n    t.add :replication_confirmed\n    t.add :replication_confirmed_at\n    t.add :storage_classes_desired\n    t.add :storage_classes_confirmed\n    t.add :storage_classes_confirmed_at\n    t.add :delete_at\n    t.add :trash_at\n    t.add :is_trashed\n    t.add :version\n    t.add :current_version_uuid\n    t.add :preserve_version\n    t.add :file_count\n    t.add :file_size_total\n  end\n\n  UNLOGGED_CHANGES = ['preserve_version', 'updated_at']\n\n  after_initialize do\n    @signatures_checked = false\n    @computed_pdh_for_manifest_text = false\n  end\n\n  def self.attributes_required_columns\n    super.merge(\n                # If we don't list unsigned_manifest_text explicitly,\n                # the params[:select] code gets confused by the way we\n                # expose manifest_text as unsigned_manifest_text in\n                # the API response, and never let clients select the\n                # unsigned_manifest_text column.\n                'unsigned_manifest_text' => ['manifest_text'],\n                'name' => ['name'],\n                )\n  end\n\n  def self.ignored_select_attributes\n    super + [\"updated_at\", \"file_names\"]\n  end\n\n  FILE_TOKEN = /^[[:digit:]]+:[[:digit:]]+:/\n  def check_signatures\n    throw(:abort) if self.manifest_text.nil?\n\n    return true if current_user.andand.is_admin\n\n    # Provided the manifest_text hasn't changed materially since an\n    # earlier validation, it's safe to pass this validation on\n    # subsequent passes without checking any signatures. This is\n    # important because the signatures have probably been stripped off\n    # by the time we get to a second validation pass!\n    if @signatures_checked && @signatures_checked == computed_pdh\n      return true\n    end\n\n    if self.manifest_text_changed?\n      # Check permissions on the collection manifest.\n      # If any signature cannot be verified, raise PermissionDeniedError\n      # which will return 403 Permission denied to the client.\n      api_token = Thread.current[:token]\n      signing_opts = {\n        api_token: api_token,\n        now: @validation_timestamp.to_i,\n      }\n      self.manifest_text.each_line do |entry|\n        entry.split.each do |tok|\n          if tok == '.' or tok.starts_with? './'\n            # Stream name token.\n          elsif tok =~ FILE_TOKEN\n            # This is a filename token, not a blob locator. Note that we\n            # keep checking tokens after this, even though manifest\n            # format dictates that all subsequent tokens will also be\n            # filenames. Safety first!\n          elsif Blob.verify_signature tok, signing_opts\n            # OK.\n          elsif Keep::Locator.parse(tok).andand.signature\n            # Signature provided, but verify_signature did not like it.\n            logger.warn \"Invalid signature on locator #{tok}\"\n            raise ArvadosModel::PermissionDeniedError\n          elsif !Rails.configuration.Collections.BlobSigning\n            # No signature provided, but we are running in insecure mode.\n            logger.debug \"Missing signature on locator #{tok} ignored\"\n          elsif Blob.new(tok).empty?\n            # No signature provided -- but no data to protect, either.\n          else\n            logger.warn \"Missing signature on locator #{tok}\"\n            raise ArvadosModel::PermissionDeniedError\n          end\n        end\n      end\n    end\n    @signatures_checked = computed_pdh\n  end\n\n  def strip_signatures_and_update_replication_confirmed\n    if self.manifest_text_changed?\n      in_old_manifest = {}\n      # manifest_text_was could be nil when dealing with a freshly created snapshot,\n      # so we skip this case because there was no real manifest change. (Bug #18005)\n      if (not self.replication_confirmed.nil?) and (not self.manifest_text_was.nil?)\n        self.class.each_manifest_locator(manifest_text_was) do |match|\n          in_old_manifest[match[1]] = true\n        end\n      end\n\n      stripped_manifest = self.class.munge_manifest_locators(manifest_text) do |match|\n        if not self.replication_confirmed.nil? and not in_old_manifest[match[1]]\n          # If the new manifest_text contains locators whose hashes\n          # weren't in the old manifest_text, storage replication is no\n          # longer confirmed.\n          self.replication_confirmed_at = nil\n          self.replication_confirmed = nil\n        end\n\n        # Return the locator with all permission signatures removed,\n        # but otherwise intact.\n        match[0].gsub(/\\+A[^+]*/, '')\n      end\n\n      if @computed_pdh_for_manifest_text == manifest_text\n        # If the cached PDH was valid before stripping, it is still\n        # valid after stripping.\n        @computed_pdh_for_manifest_text = stripped_manifest.dup\n      end\n\n      self[:manifest_text] = stripped_manifest\n    end\n    true\n  end\n\n  def ensure_pdh_matches_manifest_text\n    if not manifest_text_changed? and not portable_data_hash_changed?\n      true\n    elsif portable_data_hash.nil? or not portable_data_hash_changed?\n      self.portable_data_hash = computed_pdh\n    elsif portable_data_hash !~ Keep::Locator::LOCATOR_REGEXP\n      errors.add(:portable_data_hash, \"is not a valid locator\")\n      false\n    elsif portable_data_hash[0..31] != computed_pdh[0..31]\n      errors.add(:portable_data_hash,\n                 \"'#{portable_data_hash}' does not match computed hash '#{computed_pdh}'\")\n      false\n    else\n      # Ignore the client-provided size part: always store\n      # computed_pdh in the database.\n      self.portable_data_hash = computed_pdh\n    end\n  end\n\n  def name_null_if_empty\n    if name == \"\"\n      self.name = nil\n    end\n  end\n\n  def set_file_names\n    if self.manifest_text_changed?\n      self.file_names = manifest_files\n    end\n    true\n  end\n\n  def set_file_count_and_total_size\n    # Only update the file stats if the manifest changed\n    if self.manifest_text_changed?\n      m = Keep::Manifest.new(self.manifest_text)\n      self.file_size_total = m.files_size\n      self.file_count = m.files_count\n    # If the manifest didn't change but the attributes did, ignore the changes\n    elsif self.file_count_changed? || self.file_size_total_changed?\n      self.file_count = self.file_count_was\n      self.file_size_total = self.file_size_total_was\n    end\n    true\n  end\n\n  def manifest_files\n    return '' if !self.manifest_text\n\n    done = {}\n    names = ''\n    self.manifest_text.scan(/ \\d+:\\d+:(\\S+)/) do |name|\n      next if done[name]\n      done[name] = true\n      names << name.first.gsub('\\040',' ') + \"\\n\"\n    end\n    self.manifest_text.scan(/^\\.\\/(\\S+)/m) do |stream_name|\n      next if done[stream_name]\n      done[stream_name] = true\n      names << stream_name.first.gsub('\\040',' ') + \"\\n\"\n    end\n    names\n  end\n\n  def default_empty_manifest\n    self.manifest_text ||= ''\n  end\n\n  def skip_uuid_existence_check\n    # Avoid checking the existence of current_version_uuid, as it's\n    # assigned on creation of a new 'current version' collection, so\n    # the collection's UUID only lives on memory when the validation check\n    # is performed.\n    ['current_version_uuid']\n  end\n\n  def manage_versioning\n    should_preserve_version = should_preserve_version? # Time sensitive, cache value\n    return(yield) unless (should_preserve_version || syncable_updates.any?)\n\n    # Put aside the changes because with_lock does a record reload\n    changes = self.changes\n    snapshot = nil\n    restore_attributes\n    with_lock do\n      # Copy the original state to save it as old version\n      if should_preserve_version\n        snapshot = self.dup\n        snapshot.uuid = nil # Reset UUID so it's created as a new record\n        snapshot.created_at = self.created_at\n        snapshot.modified_at = self.modified_at_was\n      end\n\n      # Restore requested changes on the current version\n      changes.keys.each do |attr|\n        if attr == 'preserve_version' && changes[attr].last == false && !should_preserve_version\n          next # Ignore false assignment, once true it'll be true until next version\n        end\n        self.attributes = {attr => changes[attr].last}\n        if attr == 'uuid'\n          # Also update the current version reference\n          self.attributes = {'current_version_uuid' => changes[attr].last}\n        end\n      end\n\n      if should_preserve_version\n        self.version += 1\n      end\n\n      yield\n\n      sync_past_versions if syncable_updates.any?\n      if snapshot\n        snapshot.attributes = self.syncable_updates\n        leave_modified_by_user_alone do\n          leave_modified_at_alone do\n            act_as_system_user do\n              snapshot.save\n            end\n          end\n        end\n      end\n    end\n  end\n\n  def maybe_update_modified_by_fields\n    if !(self.changes.keys - ['updated_at', 'preserve_version']).empty?\n      super\n    end\n  end\n\n  def syncable_updates\n    updates = {}\n    if self.changes.any?\n      changes = self.changes\n    else\n      # If called after save...\n      changes = self.saved_changes\n    end\n    (syncable_attrs & changes.keys).each do |attr|\n      if attr == 'uuid'\n        # Point old versions to current version's new UUID\n        updates['current_version_uuid'] = changes[attr].last\n      else\n        updates[attr] = changes[attr].last\n      end\n    end\n    return updates\n  end\n\n  def sync_past_versions\n    Collection.where('current_version_uuid = ? AND uuid != ?', self.uuid_before_last_save, self.uuid_before_last_save).update_all self.syncable_updates\n  end\n\n  def versionable_updates?(attrs)\n    (['manifest_text', 'description', 'properties', 'name'] & attrs).any?\n  end\n\n  def syncable_attrs\n    ['uuid', 'owner_uuid', 'delete_at', 'trash_at', 'is_trashed', 'replication_desired', 'storage_classes_desired']\n  end\n\n  def is_past_version?\n    # Check for the '_was' values just in case the update operation\n    # includes a change on current_version_uuid or uuid.\n    !(new_record? || self.current_version_uuid_was == self.uuid_was)\n  end\n\n  def should_preserve_version?\n    return false unless (Rails.configuration.Collections.CollectionVersioning && versionable_updates?(self.changes.keys))\n\n    return false if self.is_trashed\n\n    idle_threshold = Rails.configuration.Collections.PreserveVersionIfIdle\n    if !self.preserve_version_was &&\n      !self.preserve_version &&\n      (idle_threshold < 0 ||\n        (idle_threshold > 0 && self.modified_at_was > db_current_time-idle_threshold.seconds))\n      return false\n    end\n    return true\n  end\n\n  def check_encoding\n    if !(manifest_text.encoding.name == 'UTF-8' and manifest_text.valid_encoding?)\n      begin\n        # If Ruby thinks the encoding is something else, like 7-bit\n        # ASCII, but its stored bytes are equal to the (valid) UTF-8\n        # encoding of the same string, we declare it to be a UTF-8\n        # string.\n        utf8 = manifest_text\n        utf8.force_encoding Encoding::UTF_8\n        if utf8.valid_encoding? and utf8 == manifest_text.encode(Encoding::UTF_8)\n          self.manifest_text = utf8\n          return true\n        end\n      rescue\n      end\n      errors.add :manifest_text, \"must use UTF-8 encoding\"\n      throw(:abort)\n    end\n  end\n\n  def check_manifest_validity\n    begin\n      Keep::Manifest.validate! manifest_text\n      true\n    rescue ArgumentError => e\n      errors.add :manifest_text, e.message\n      throw(:abort)\n    end\n  end\n\n  def signed_manifest_text_only_for_tests\n    if !has_attribute? :manifest_text\n      return nil\n    elsif is_trashed\n      return manifest_text\n    else\n      token = Thread.current[:token]\n      exp = [db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL.to_i,\n             trash_at].compact.map(&:to_i).min\n      self.class.sign_manifest_only_for_tests manifest_text, token, exp\n    end\n  end\n\n  def self.sign_manifest_only_for_tests manifest, token, exp=nil\n    if exp.nil?\n      exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL.to_i\n    end\n    signing_opts = {\n      api_token: token,\n      expire: exp,\n    }\n    m = munge_manifest_locators(manifest) do |match|\n      Blob.sign_locator(match[0], signing_opts)\n    end\n    return m\n  end\n\n  def self.munge_manifest_locators manifest\n    # Given a manifest text and a block, yield the regexp MatchData\n    # for each locator. Return a new manifest in which each locator\n    # has been replaced by the block's return value.\n    return nil if !manifest\n    return '' if manifest == ''\n\n    new_lines = []\n    manifest.each_line do |line|\n      line.rstrip!\n      new_words = []\n      line.split(' ').each do |word|\n        if new_words.empty?\n          new_words << word\n        elsif match = Keep::Locator::LOCATOR_REGEXP.match(word)\n          new_words << yield(match)\n        else\n          new_words << word\n        end\n      end\n      new_lines << new_words.join(' ')\n    end\n    new_lines.join(\"\\n\") + \"\\n\"\n  end\n\n  def self.each_manifest_locator manifest\n    # Given a manifest text and a block, yield the regexp match object\n    # for each locator.\n    manifest.each_line do |line|\n      # line will have a trailing newline, but the last token is never\n      # a locator, so it's harmless here.\n      line.split(' ').each do |word|\n        if match = Keep::Locator::LOCATOR_REGEXP.match(word)\n          yield(match)\n        end\n      end\n    end\n  end\n\n  def self.normalize_uuid uuid\n    hash_part = nil\n    size_part = nil\n    uuid.split('+').each do |token|\n      if token.match(/^[0-9a-f]{32,}$/)\n        raise \"uuid #{uuid} has multiple hash parts\" if hash_part\n        hash_part = token\n      elsif token.match(/^\\d+$/)\n        raise \"uuid #{uuid} has multiple size parts\" if size_part\n        size_part = token\n      end\n    end\n    raise \"uuid #{uuid} has no hash part\" if !hash_part\n    [hash_part, size_part].compact.join '+'\n  end\n\n  def self.get_compatible_images(readers, pattern, collections)\n    if collections.empty?\n      return []\n    end\n\n    migrations = Hash[\n      Link.where('tail_uuid in (?) AND link_class=? AND links.owner_uuid=?',\n                 collections.map(&:portable_data_hash),\n                 'docker_image_migration',\n                 system_user_uuid).\n      order('links.created_at asc').\n      map { |l|\n        [l.tail_uuid, l.head_uuid]\n      }]\n\n    migrated_collections = Hash[\n      Collection.readable_by(*readers).\n      where('portable_data_hash in (?)', migrations.values).\n      map { |c|\n        [c.portable_data_hash, c]\n      }]\n\n    collections.map { |c|\n      # Check if the listed image is compatible first, if not, then try the\n      # migration link.\n      manifest = Keep::Manifest.new(c.manifest_text)\n      if manifest.exact_file_count?(1) and manifest.files[0][1] =~ pattern\n        c\n      elsif m = migrated_collections[migrations[c.portable_data_hash]]\n        manifest = Keep::Manifest.new(m.manifest_text)\n        if manifest.exact_file_count?(1) and manifest.files[0][1] =~ pattern\n          m\n        end\n      end\n    }.compact\n  end\n\n  # Resolve a Docker repo+tag, hash, or collection PDH to an array of\n  # Collection objects, sorted by timestamp starting with the most recent\n  # match.\n  #\n  # If filter_compatible_format is true (the default), only return image\n  # collections which are support by the installation as indicated by\n  # Rails.configuration.Containers.SupportedDockerImageFormats.  Will follow\n  # 'docker_image_migration' links if search_term resolves to an incompatible\n  # image, but an equivalent compatible image is available.\n  def self.find_all_for_docker_image(search_term, search_tag=nil, readers=nil, filter_compatible_format: true)\n    readers ||= [Thread.current[:user]]\n    base_search = Link.\n      readable_by(*readers).\n      readable_by(*readers, table_name: \"collections\").\n      joins(\"JOIN collections ON links.head_uuid = collections.uuid\").\n      order(\"links.created_at DESC\")\n\n    docker_image_formats = Rails.configuration.Containers.SupportedDockerImageFormats.keys.map(&:to_s)\n\n    if (docker_image_formats.include? 'v1' and\n        docker_image_formats.include? 'v2') or filter_compatible_format == false\n      pattern = /^(sha256:)?[0-9A-Fa-f]{64}\\.tar$/\n    elsif docker_image_formats.include? 'v2'\n      pattern = /^(sha256:)[0-9A-Fa-f]{64}\\.tar$/\n    elsif docker_image_formats.include? 'v1'\n      pattern = /^[0-9A-Fa-f]{64}\\.tar$/\n    else\n      raise \"Unrecognized configuration for docker_image_formats #{docker_image_formats}\"\n    end\n\n    # If the search term is a Collection locator that contains one file\n    # that looks like a Docker image, return it.\n    if loc = Keep::Locator.parse(search_term)\n      loc.strip_hints!\n      coll_match = readable_by(*readers).where(portable_data_hash: loc.to_s).limit(1)\n      rc = Rails.configuration.RemoteClusters.select{ |k|\n        k != :\"*\" && k != Rails.configuration.ClusterID}\n      if coll_match.any? or rc.length == 0\n        return get_compatible_images(readers, pattern, coll_match)\n      else\n        # Allow bare pdh that doesn't exist in the local database so\n        # that federated container requests which refer to remotely\n        # stored containers will validate.\n        return [Collection.new(portable_data_hash: loc.to_s)]\n      end\n    end\n\n    if search_tag.nil? and (n = search_term.index(\":\"))\n      search_tag = search_term[n+1..-1]\n      search_term = search_term[0..n-1]\n    end\n\n    # Find Collections with matching Docker image repository+tag pairs.\n    matches = base_search.\n      where(link_class: \"docker_image_repo+tag\",\n            name: \"#{search_term}:#{search_tag || 'latest'}\")\n\n    # If that didn't work, find Collections with matching Docker image hashes.\n    if matches.empty?\n      matches = base_search.\n        where(\"link_class = ? and links.name LIKE ?\",\n              \"docker_image_hash\", \"#{search_term}%\")\n    end\n\n    # Generate an order key for each result.  We want to order the results\n    # so that anything with an image timestamp is considered more recent than\n    # anything without; then we use the link's created_at as a tiebreaker.\n    uuid_timestamps = {}\n    matches.each do |link|\n      uuid_timestamps[link.head_uuid] = [(-link.properties[\"image_timestamp\"].to_datetime.to_i rescue 0),\n       -link.created_at.to_i]\n     end\n\n    sorted = Collection.where('uuid in (?)', uuid_timestamps.keys).sort_by { |c|\n      uuid_timestamps[c.uuid]\n    }\n    compatible = get_compatible_images(readers, pattern, sorted)\n    if sorted.length > 0 and compatible.empty?\n      raise ArvadosModel::UnresolvableContainerError.new \"Matching Docker image is incompatible with 'docker_image_formats' configuration.\"\n    end\n    compatible\n  end\n\n  def self.for_latest_docker_image(search_term, search_tag=nil, readers=nil)\n    find_all_for_docker_image(search_term, search_tag, readers).first\n  end\n\n  def self.searchable_columns operator\n    super - [\"manifest_text\"]\n  end\n\n  def self.full_text_searchable_columns\n    super - [\"manifest_text\", \"storage_classes_desired\", \"storage_classes_confirmed\", \"current_version_uuid\"]\n  end\n\n  def check_linked_workflows\n    # - can't be linked (yet) if it is a new record.\n    #\n    # - properties[\"type\"]=>\"workflow\" is protected by the\n    #   \"workflow_type_property\" validation and can't be changed or removed as\n    #   long as there are linked workflows\n    #\n    # - \"workflows\" is provided by the ActiveRecord association at the\n    #   top of the file, we only want to do this (including\n    #   enforcement of property contents) if the collection is linked.\n    if !new_record? && properties[\"type\"] == \"workflow\" && workflows.any?\n      update_linked_workflows(workflows, true)\n    end\n  end\n\n  def update_linked_workflows(workflows_to_update, should_save)\n    workflowMain = self.properties[\"arv:workflowMain\"]\n    inputs = self.properties[\"arv:cwl_inputs\"]\n    outputs = self.properties[\"arv:cwl_outputs\"]\n    requirements = self.properties[\"arv:cwl_requirements\"]\n    hints = self.properties[\"arv:cwl_hints\"]\n\n    [['arv:workflowMain', workflowMain, String],\n     ['arv:cwl_inputs', inputs, Array],\n     ['arv:cwl_outputs', outputs, Array],\n     ['arv:cwl_requirements', requirements, Array],\n     ['arv:cwl_hints', hints, Array],\n    ].each do |key, val, type|\n      if val.nil?\n        raise \"missing field '#{key}' in collection properties\"\n      end\n      if !val.is_a?(type)\n        raise \"expected field '#{key}' in collection properties to be a #{type}\"\n      end\n    end\n\n    step = {\n      id: \"#main/\" + workflowMain,\n      in: [],\n      out: [],\n      run: \"keep:#{self.portable_data_hash}/#{workflowMain}\",\n      label: name\n    }\n\n    inputs.each do |i|\n      step[:in].push({id: \"#main/step/#{Collection.cwl_shortname(i['id'])}\",\n                      source: i['id']})\n    end\n\n    outputs.each do |i|\n      outid = \"#main/step/#{Collection.cwl_shortname(i['id'])}\"\n      step[:out].push({\"id\": outid})\n      i['outputSource'] = outid\n    end\n\n    wrapper = {\n      class: \"Workflow\",\n      id: \"#main\",\n      inputs: inputs,\n      outputs: outputs,\n      steps: [step],\n      requirements: requirements + [{\"class\": \"SubworkflowFeatureRequirement\"}],\n      hints: hints,\n    }\n\n    doc = SafeJSON.dump({cwlVersion: \"v1.2\", \"$graph\": [wrapper]})\n\n    workflows_to_update.each do |w|\n      w.name = self.name\n      w.description = self.description\n      w.definition = doc\n      w.owner_uuid = self.owner_uuid\n      w.save! if should_save\n    end\n\n    true\n  end\n\n  protected\n\n  # Although the defaults for these columns is already set up on the schema,\n  # collection creation from an API client seems to ignore them, making the\n  # validation on empty desired storage classes return an error.\n  def default_storage_classes\n    if self.storage_classes_desired.nil? || self.storage_classes_desired.empty?\n      self.storage_classes_desired = Rails.configuration.DefaultStorageClasses\n    end\n    self.storage_classes_confirmed ||= []\n  end\n\n  # Sets managed properties at creation time\n  def managed_properties\n    managed_props = Rails.configuration.Collections.ManagedProperties.with_indifferent_access\n    if managed_props.empty?\n      return\n    end\n    (managed_props.keys - self.properties.keys).each do |key|\n      if managed_props[key]['Function'] == 'original_owner'\n        self.properties[key] = self.user_owner_uuid\n      elsif managed_props[key]['Value']\n        self.properties[key] = managed_props[key]['Value']\n      else\n        logger.warn \"Unidentified default property definition '#{key}': #{managed_props[key].inspect}\"\n      end\n    end\n  end\n\n  def portable_manifest_text\n    self.class.munge_manifest_locators(manifest_text) do |match|\n      if match[2] # size\n        match[1] + match[2]\n      else\n        match[1]\n      end\n    end\n  end\n\n  def compute_pdh\n    portable_manifest = portable_manifest_text\n    (Digest::MD5.hexdigest(portable_manifest) +\n     '+' +\n     portable_manifest.bytesize.to_s)\n  end\n\n  def computed_pdh\n    if @computed_pdh_for_manifest_text == manifest_text\n      return @computed_pdh\n    end\n    @computed_pdh = compute_pdh\n    @computed_pdh_for_manifest_text = manifest_text.dup\n    @computed_pdh\n  end\n\n  def ensure_permission_to_save\n    if (not current_user.andand.is_admin)\n      if (replication_confirmed_at_changed? or replication_confirmed_changed?) and\n        not (replication_confirmed_at.nil? and replication_confirmed.nil?)\n        raise ArvadosModel::PermissionDeniedError.new(\"replication_confirmed and replication_confirmed_at attributes cannot be changed, except by setting both to nil\")\n      end\n      if (storage_classes_confirmed_changed? or storage_classes_confirmed_at_changed?) and\n        not (storage_classes_confirmed == [] and storage_classes_confirmed_at.nil?)\n        raise ArvadosModel::PermissionDeniedError.new(\"storage_classes_confirmed and storage_classes_confirmed_at attributes cannot be changed, except by setting them to [] and nil respectively\")\n      end\n    end\n    super\n  end\n\n  def ensure_storage_classes_desired_is_not_empty\n    if self.storage_classes_desired.empty?\n      errors.add(:storage_classes_desired, \"must not be empty\")\n    end\n  end\n\n  def past_versions_cannot_be_updated\n    if is_past_version?\n      errors.add(:base, \"past versions cannot be updated\")\n      false\n    end\n  end\n\n  def protected_managed_properties_updates\n    managed_properties = Rails.configuration.Collections.ManagedProperties.with_indifferent_access\n    if managed_properties.empty? || !properties_changed? || current_user.is_admin\n      return true\n    end\n    protected_props = managed_properties.keys.select do |p|\n      Rails.configuration.Collections.ManagedProperties[p]['Protected']\n    end\n    # Pre-existent protected properties can't be updated\n    invalid_updates = properties_was.keys.select{|p| properties_was[p] != properties[p]} & protected_props\n    if !invalid_updates.empty?\n      invalid_updates.each do |p|\n        errors.add(\"protected property cannot be updated:\", p)\n      end\n      raise PermissionDeniedError.new\n    end\n    true\n  end\n\n  def workflow_type_property\n    return if properties[\"type\"] == properties_was[\"type\"] || properties_was[\"type\"] != \"workflow\"\n\n    # properties[\"type\"] changed and the previous value of\n    # properties[\"type\"] was \"workflow\"\n\n    linked_workflows = Workflow.where(collection_uuid: self.uuid)\n    if !linked_workflows.empty?\n      errors.add(:properties, \"cannot change 'type' property when there are linked workflows\")\n      return false\n    end\n  end\n\n  def versioning_metadata_updates\n    valid = true\n    if !is_past_version? && current_version_uuid_changed?\n      errors.add(:current_version_uuid, \"cannot be updated\")\n      valid = false\n    end\n    if version_changed?\n      errors.add(:version, \"cannot be updated\")\n      valid = false\n    end\n    valid\n  end\n\n  def assign_uuid\n    super\n    self.current_version_uuid ||= self.uuid\n    true\n  end\n\n  def log_update\n    super unless (saved_changes.keys - UNLOGGED_CHANGES).empty?\n  end\n\n  def self.cwl_shortname inputid\n    inputid.split(\"/\")[-1]\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/computed_permission.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'record_filters'\n\nclass ComputedPermission < ApplicationRecord\n  self.table_name = 'materialized_permissions'\n  include CurrentApiClient\n  include CommonApiTemplate\n  extend RecordFilters\n\n  PERM_LEVEL_S = ['none', 'can_read', 'can_write', 'can_manage']\n\n  api_accessible :user do |t|\n    t.add :user_uuid\n    t.add :target_uuid\n    t.add :perm_level_s, as: :perm_level\n  end\n\n  protected\n\n  def perm_level_s\n    PERM_LEVEL_S[perm_level]\n  end\n\n  def self.default_orders\n    [\"#{table_name}.user_uuid\", \"#{table_name}.target_uuid\"]\n  end\n\n  def self.readable_by(*args)\n    self\n  end\n\n  def self.searchable_columns(operator)\n    if !operator.match(/[<=>]/) && !operator.in?(['in', 'not in'])\n      []\n    else\n      ['user_uuid', 'target_uuid']\n    end\n  end\n\n  def self.limit_index_columns_read\n    []\n  end\n\n  def self.selectable_attributes\n    %w(user_uuid target_uuid perm_level)\n  end\n\n  def self.columns_for_attributes(select_attributes)\n    select_attributes\n  end\n\n  def self.serialized_attributes\n    {}\n  end\n\n  def self.unique_columns\n    []\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/container.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'log_reuse_info'\nrequire 'whitelist_update'\nrequire 'safe_json'\nrequire 'update_priorities'\nrequire 'validate_serialized'\n\nclass Container < ArvadosModel\n  include ArvadosModelUpdates\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n  include WhitelistUpdate\n  extend CurrentApiClient\n  extend DbCurrentTime\n  extend LogReuseInfo\n\n  # Posgresql JSONB columns should NOT be declared as serialized, Rails 5\n  # already know how to properly treat them.\n  attribute :secret_mounts, :jsonbHash, default: {}\n  attribute :runtime_status, :jsonbHash, default: {}\n  attribute :runtime_auth_scopes, :jsonbArray, default: []\n  attribute :output_storage_classes, :jsonbArray, default: lambda { Rails.configuration.DefaultStorageClasses }\n  attribute :output_properties, :jsonbHash, default: {}\n\n  serialize :environment, Hash\n  serialize :mounts, Hash\n  serialize :runtime_constraints, Hash\n  serialize :command, Array\n  serialize :scheduling_parameters, Hash\n  serialize :output_glob, Array\n\n  after_find :fill_container_defaults_after_find\n  before_validation :fill_field_defaults, :if => :new_record?\n  before_validation :set_timestamps\n  before_validation :check_lock\n  before_validation :check_unlock\n  validates :command, :container_image, :output_path, :cwd, :priority, { presence: true }\n  validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0 }\n  validates :command, array_of_strings: {allow_empty_strings: true}\n  validates :environment, hash_attr: true\n  validates :mounts, hash_attr: true\n  validates :output_glob, array_of_strings: true\n  validates :output_properties, hash_attr: true\n  validates :output_storage_classes, array_of_strings: true\n  validates :published_ports, hash_attr: true\n  validates :runtime_auth_scopes, array_of_strings: true\n  validates :runtime_constraints, hash_attr: true\n  validates :runtime_status, hash_attr: true\n  validates :scheduling_parameters, hash_attr: true\n  validates :secret_mounts, hash_attr: true\n  validate :validate_runtime_status\n  validate :validate_state_change\n  validate :validate_change\n  validate :validate_lock\n  validate :validate_output\n  after_validation :assign_auth\n  before_save :sort_serialized_attrs\n  before_save :update_secret_mounts_md5\n  before_save :scrub_secrets\n  before_save :clear_runtime_status_when_queued\n  before_save :assign_external_ports\n  after_save :update_cr_logs\n  after_save :handle_completed\n\n  has_many :container_requests,\n           class_name: 'ContainerRequest',\n           foreign_key: 'container_uuid',\n           primary_key: 'uuid'\n  belongs_to :auth,\n             class_name: 'ApiClientAuthorization',\n             foreign_key: 'auth_uuid',\n             primary_key: 'uuid',\n             optional: true\n\n  api_accessible :user, extend: :common do |t|\n    t.add :command\n    t.add :container_image\n    t.add :cwd\n    t.add :environment\n    t.add :exit_code\n    t.add :finished_at\n    t.add :locked_by_uuid\n    t.add :log\n    t.add :mounts\n    t.add :output\n    t.add :output_path\n    t.add :output_glob\n    t.add :priority\n    t.add :progress\n    t.add :runtime_constraints\n    t.add :runtime_status\n    t.add :started_at\n    t.add :state\n    t.add :auth_uuid\n    t.add :scheduling_parameters\n    t.add :runtime_user_uuid\n    t.add :runtime_auth_scopes\n    t.add :lock_count\n    t.add :gateway_address\n    t.add :interactive_session_started\n    t.add :output_storage_classes\n    t.add :output_properties\n    t.add :cost\n    t.add :subrequests_cost\n    t.add :service\n    t.add :published_ports\n  end\n\n  # Supported states for a container\n  States =\n    [\n     (Queued = 'Queued'),\n     (Locked = 'Locked'),\n     (Running = 'Running'),\n     (Complete = 'Complete'),\n     (Cancelled = 'Cancelled')\n    ]\n\n  State_transitions = {\n    nil => [Queued],\n    Queued => [Locked, Cancelled],\n    Locked => [Queued, Running, Cancelled],\n    Running => [Complete, Cancelled],\n    Complete => [Cancelled]\n  }\n\n  def self.full_text_searchable_columns\n    super - [\"secret_mounts\", \"secret_mounts_md5\", \"runtime_token\", \"gateway_address\", \"output_storage_classes\"]\n  end\n\n  def self.searchable_columns *args\n    super - [\"secret_mounts_md5\", \"runtime_token\", \"gateway_address\", \"output_storage_classes\"]\n  end\n\n  def logged_attributes\n    super.except('secret_mounts', 'runtime_token')\n  end\n\n  def state_transitions\n    State_transitions\n  end\n\n  # Container priority is the highest \"computed priority\" of any\n  # matching request. The computed priority of a container-submitted\n  # request is the priority of the submitting container. The computed\n  # priority of a user-submitted request is a function of\n  # user-assigned priority and request creation time.\n  def update_priority!\n    update_priorities uuid\n    reload\n  end\n\n  # Create a new container (or find an existing one) to satisfy the\n  # given container request.\n  def self.resolve(req)\n    if req.runtime_token.nil?\n      runtime_user = if req.modified_by_user_uuid.nil?\n                       current_user\n                     else\n                       User.find_by_uuid(req.modified_by_user_uuid)\n                     end\n      runtime_auth_scopes = [\"all\"]\n    else\n      auth = ApiClientAuthorization.validate(token: req.runtime_token)\n      if auth.nil?\n        raise ArgumentError.new \"Invalid runtime token\"\n      end\n      runtime_user = User.find_by_id(auth.user_id)\n      runtime_auth_scopes = auth.scopes\n    end\n    c_attrs = act_as_user runtime_user do\n      {\n        command: req.command,\n        cwd: req.cwd,\n        environment: req.environment,\n        output_path: req.output_path,\n        output_glob: req.output_glob,\n        container_image: resolve_container_image(req.container_image),\n        mounts: resolve_mounts(req.mounts),\n        runtime_constraints: resolve_runtime_constraints(req.runtime_constraints),\n        scheduling_parameters: req.scheduling_parameters,\n        secret_mounts: req.secret_mounts,\n        runtime_token: req.runtime_token,\n        runtime_user_uuid: runtime_user.uuid,\n        runtime_auth_scopes: runtime_auth_scopes,\n        output_storage_classes: req.output_storage_classes,\n        service: req.service,\n        published_ports: req.published_ports,\n      }\n    end\n    act_as_system_user do\n      if req.use_existing && (reusable = find_reusable(c_attrs))\n        reusable\n      else\n        Container.create!(c_attrs)\n      end\n    end\n  end\n\n  # Return a runtime_constraints hash that complies with requested but\n  # is suitable for saving in a container record, i.e., has specific\n  # values instead of ranges.\n  #\n  # Doing this as a step separate from other resolutions, like \"git\n  # revision range to commit hash\", makes sense only when there is no\n  # opportunity to reuse an existing container (e.g., container reuse\n  # is not implemented yet, or we have already found that no existing\n  # containers are suitable).\n  def self.resolve_runtime_constraints(runtime_constraints)\n    rc = {}\n    runtime_constraints.each do |k, v|\n      if v.is_a? Array\n        rc[k] = v[0]\n      else\n        rc[k] = v\n      end\n    end\n    if rc['keep_cache_ram'] == 0\n      rc['keep_cache_ram'] = Rails.configuration.Containers.DefaultKeepCacheRAM\n    end\n    if rc['keep_cache_disk'] == 0 and rc['keep_cache_ram'] == 0\n      rc['keep_cache_disk'] = bound_keep_cache_disk(rc['ram'])\n    end\n    ContainerRequest.translate_cuda_to_gpu rc\n    self.deep_sort_hash(rc)\n  end\n\n  # Return a mounts hash suitable for a Container, i.e., with every\n  # readonly collection UUID resolved to a PDH.\n  def self.resolve_mounts(mounts)\n    c_mounts = {}\n    mounts.each do |k, mount|\n      mount = mount.dup\n      c_mounts[k] = mount\n      if mount['kind'] != 'collection'\n        next\n      end\n\n      uuid = mount.delete 'uuid'\n\n      if mount['portable_data_hash'].nil? and !uuid.nil?\n        # PDH not supplied, try by UUID\n        c = Collection.\n          readable_by(current_user).\n          where(uuid: uuid).\n          select(:portable_data_hash).\n          first\n        if !c\n          raise ArvadosModel::UnresolvableContainerError.new \"cannot mount collection #{uuid.inspect}: not found\"\n        end\n        mount['portable_data_hash'] = c.portable_data_hash\n      end\n    end\n    return c_mounts\n  end\n\n  # Return a container_image PDH suitable for a Container.\n  def self.resolve_container_image(container_image)\n    coll = Collection.for_latest_docker_image(container_image)\n    if !coll\n      raise ArvadosModel::UnresolvableContainerError.new \"docker image #{container_image.inspect} not found\"\n    end\n    coll.portable_data_hash\n  end\n\n  def self.find_reusable(attrs)\n    log_reuse_info { \"starting with #{Container.all.count} container records in database\" }\n    candidates = Container.where_serialized(:command, attrs[:command], md5: true)\n    log_reuse_info(candidates) { \"after filtering on command #{attrs[:command].inspect}\" }\n\n    candidates = candidates.where('cwd = ?', attrs[:cwd])\n    log_reuse_info(candidates) { \"after filtering on cwd #{attrs[:cwd].inspect}\" }\n\n    candidates = candidates.where_serialized(:environment, attrs[:environment], md5: true)\n    log_reuse_info(candidates) { \"after filtering on environment #{attrs[:environment].inspect}\" }\n\n    candidates = candidates.where('output_path = ?', attrs[:output_path])\n    log_reuse_info(candidates) { \"after filtering on output_path #{attrs[:output_path].inspect}\" }\n\n    candidates = candidates.where_serialized(:output_glob, attrs[:output_glob], md5: true)\n    log_reuse_info(candidates) { \"after filtering on output_glob #{attrs[:output_glob].inspect}\" }\n\n    image = resolve_container_image(attrs[:container_image])\n    candidates = candidates.where('container_image = ?', image)\n    log_reuse_info(candidates) { \"after filtering on container_image #{image.inspect} (resolved from #{attrs[:container_image].inspect})\" }\n\n    candidates = candidates.where_serialized(:mounts, resolve_mounts(attrs[:mounts]), md5: true)\n    log_reuse_info(candidates) { \"after filtering on mounts #{attrs[:mounts].inspect}\" }\n\n    secret_mounts_md5 = Digest::MD5.hexdigest(SafeJSON.dump(self.deep_sort_hash(attrs[:secret_mounts])))\n    candidates = candidates.where('secret_mounts_md5 = ?', secret_mounts_md5)\n    log_reuse_info(candidates) { \"after filtering on secret_mounts_md5 #{secret_mounts_md5.inspect}\" }\n\n    resolved_runtime_constraints = resolve_runtime_constraints(attrs[:runtime_constraints])\n    # Ideally we would completely ignore Keep cache constraints when making\n    # reuse considerations, but our database structure makes that impractical.\n    # The best we can do is generate a search that matches on all likely values.\n    runtime_constraint_variations = {\n      keep_cache_disk: [\n        # Check for constraints without keep_cache_disk\n        # (containers that predate the constraint)\n        nil,\n        # Containers that use keep_cache_ram instead\n        0,\n        # The default value\n        bound_keep_cache_disk(resolved_runtime_constraints['ram']),\n        # The minimum default bound\n        bound_keep_cache_disk(0),\n        # The maximum default bound (presumably)\n        bound_keep_cache_disk(1 << 60),\n        # The requested value\n        resolved_runtime_constraints.delete('keep_cache_disk'),\n      ].uniq,\n      keep_cache_ram: [\n        # Containers that use keep_cache_disk instead\n        0,\n        # The default value\n        Rails.configuration.Containers.DefaultKeepCacheRAM,\n        # The requested value\n        resolved_runtime_constraints.delete('keep_cache_ram'),\n      ].uniq,\n    }\n\n    resolved_gpu = resolved_runtime_constraints['gpu']\n    if resolved_gpu.nil? or resolved_gpu['device_count'] == 0\n      runtime_constraint_variations[:gpu] = [\n        # Check for constraints without gpu\n        # (containers that predate the constraint)\n        nil,\n        # The default \"don't need GPUs\" value\n        {\n          'device_count' => 0,\n          'driver_version' => '',\n          'hardware_target' => [],\n          'stack' => '',\n          'vram' => 0,\n        },\n        # The requested value\n        resolved_runtime_constraints.delete('gpu')\n      ].uniq\n    end\n\n    # Note: deprecated in favor of the more general \"GPU\" constraint above\n    # Kept for backwards compatability.\n    resolved_cuda = resolved_runtime_constraints['cuda']\n    if resolved_cuda.nil? or resolved_cuda['device_count'] == 0\n      runtime_constraint_variations[:cuda] = [\n        # Check for constraints without cuda\n        # (containers that predate the constraint)\n        nil,\n        # The default \"don't need CUDA\" value\n        {\n          'device_count' => 0,\n          'driver_version' => '',\n          'hardware_capability' => '',\n        },\n        # The requested value\n        resolved_runtime_constraints.delete('cuda')\n      ].uniq\n    else\n      # Need to check\n      # a) for legacy containers that only mention CUDA\n      # b) for new containers that were submitted with the old API that\n      # list both CUDA and GPU\n      runtime_constraint_variations[:gpu] = [\n        nil,\n        resolved_runtime_constraints.delete('gpu')\n      ]\n    end\n\n    reusable_runtime_constraints = hash_product(**runtime_constraint_variations)\n                                     .map { |v| resolved_runtime_constraints.merge(v) }\n\n    candidates = candidates.where_serialized(:runtime_constraints, reusable_runtime_constraints, md5: true, multivalue: true)\n    log_reuse_info(candidates) { \"after filtering on runtime_constraints #{attrs[:runtime_constraints].inspect}\" }\n\n    log_reuse_info { \"checking for state=Complete with readable output and log...\" }\n\n    select_readable_pdh = Collection.\n      readable_by(current_user).\n      select(:portable_data_hash).\n      to_sql\n\n    usable = candidates.where(state: Complete, exit_code: 0)\n    log_reuse_info(usable) { \"with state=Complete, exit_code=0\" }\n\n    usable = usable.where(\"log IN (#{select_readable_pdh})\")\n    log_reuse_info(usable) { \"with readable log\" }\n\n    usable = usable.where(\"output IN (#{select_readable_pdh})\")\n    log_reuse_info(usable) { \"with readable output\" }\n\n    usable = usable.order('finished_at ASC').limit(1).first\n    if usable\n      log_reuse_info { \"done, reusing container #{usable.uuid} with state=Complete\" }\n      return usable\n    end\n\n    # Check for non-failing Running candidates and return the most likely to finish sooner.\n    log_reuse_info { \"checking for state=Running...\" }\n    running = candidates.where(state: Running).\n              where(\"(runtime_status->'error') is null and priority > 0\").\n              order('progress desc, started_at asc').\n              limit(1).first\n    if running\n      log_reuse_info { \"done, reusing container #{running.uuid} with state=Running\" }\n      return running\n    else\n      log_reuse_info { \"have no containers in Running state\" }\n    end\n\n    # Check for Locked or Queued ones and return the most likely to start first.\n    locked_or_queued = candidates.\n                       where(\"state IN (?)\", [Locked, Queued]).\n                       order('state asc, priority desc, created_at asc').\n                       limit(1)\n    if !attrs[:scheduling_parameters]['preemptible']\n      locked_or_queued = locked_or_queued.\n                           where(\"not ((scheduling_parameters::jsonb)->>'preemptible')::boolean\")\n    end\n    chosen = locked_or_queued.first\n    if chosen\n      log_reuse_info { \"done, reusing container #{chosen.uuid} with state=#{chosen.state}\" }\n      return chosen\n    else\n      log_reuse_info { \"have no containers in Locked or Queued state\" }\n    end\n\n    log_reuse_info { \"done, no reusable container found\" }\n    nil\n  end\n\n  def lock\n    self.with_lock do\n      if self.state != Queued\n        raise LockFailedError.new(\"cannot lock when #{self.state}\")\n      end\n      self.update!(state: Locked)\n    end\n  end\n\n  def check_lock\n    if state_was == Queued and state == Locked\n      if self.priority <= 0\n        raise LockFailedError.new(\"cannot lock when priority<=0\")\n      end\n      self.lock_count = self.lock_count+1\n    end\n  end\n\n  def unlock\n    self.with_lock do\n      if self.state != Locked\n        raise InvalidStateTransitionError.new(\"cannot unlock when #{self.state}\")\n      end\n      self.update!(state: Queued)\n    end\n  end\n\n  def check_unlock\n    if state_was == Locked and state == Queued\n      if self.locked_by_uuid != current_api_client_authorization.uuid\n        raise ArvadosModel::PermissionDeniedError.new(\"locked by a different token\")\n      end\n      if self.lock_count >= Rails.configuration.Containers.MaxDispatchAttempts\n        self.state = Cancelled\n        self.runtime_status = {error: \"Failed to start container.  Cancelled after exceeding 'Containers.MaxDispatchAttempts' (lock_count=#{self.lock_count})\"}\n      end\n    end\n  end\n\n  def self.readable_by(*users_list)\n    return super if users_list.select { |u| u.is_a?(User) && u.is_admin }.any?\n    Container.where(ContainerRequest.readable_by(*users_list).where(\"containers.uuid = container_requests.container_uuid\").arel.exists)\n  end\n\n  def final?\n    [Complete, Cancelled].include?(self.state)\n  end\n\n  def self.for_current_token\n    return if !current_api_client_authorization\n    _, _, _, container_uuid = Thread.current[:token].split('/')\n    if container_uuid.nil?\n      Container.where(auth_uuid: current_api_client_authorization.uuid).first\n    else\n      Container.where('auth_uuid=? or (uuid=? and runtime_token=?)',\n                      current_api_client_authorization.uuid,\n                      container_uuid,\n                      current_api_client_authorization.token).first\n    end\n  end\n\n  protected\n\n  def self.bound_keep_cache_disk(value)\n    value ||= 0\n    min_value = 2 << 30\n    max_value = 32 << 30\n    if value < min_value\n      min_value\n    elsif value > max_value\n      max_value\n    else\n      value\n    end\n  end\n\n  def self.hash_product(**kwargs)\n    # kwargs is a hash that maps parameters to an array of values.\n    # This function enumerates every possible hash where each key has one of\n    # the values from its array.\n    # The output keys are strings since that's what container hash attributes\n    # want.\n    # A nil value yields a hash without that key.\n    [[:_, nil]].product(\n      *kwargs.map { |(key, values)| [key.to_s].product(values) },\n    ).map { |param_pairs| Hash[param_pairs].compact }\n  end\n\n  def fill_field_defaults\n    self.state ||= Queued\n    self.environment ||= {}\n    self.runtime_constraints ||= {}\n    self.mounts ||= {}\n    self.output_glob ||= []\n    self.cwd ||= \".\"\n    self.priority ||= 0\n    self.scheduling_parameters ||= {}\n  end\n\n  def permission_to_create\n    current_user.andand.is_admin\n  end\n\n  def permission_to_destroy\n    current_user.andand.is_admin\n  end\n\n  def ensure_owner_uuid_is_permitted\n    # validate_change ensures owner_uuid can't be changed at all --\n    # except during create, which requires admin privileges. Checking\n    # permission here would be superfluous.\n    true\n  end\n\n  def set_timestamps\n    if self.state_changed? and self.state == Running\n      self.started_at ||= db_current_time\n    end\n\n    if self.state_changed? and [Complete, Cancelled].include? self.state\n      self.finished_at ||= db_current_time\n    end\n  end\n\n  # Check that well-known runtime status keys have desired data types\n  def validate_runtime_status\n    [\n      'error', 'errorDetail', 'warning', 'warningDetail', 'activity',\n      'preemptionNotice',\n    ].each do |k|\n      if self.runtime_status.andand.include?(k) && !self.runtime_status[k].is_a?(String)\n        errors.add(:runtime_status, \"'#{k}' value must be a string\")\n      end\n    end\n  end\n\n  def validate_change\n    permitted = [:state]\n    final_attrs = [:finished_at]\n    progress_attrs = [:progress, :runtime_status, :subrequests_cost, :cost,\n                      :log, :output, :output_properties, :exit_code]\n\n    if self.new_record?\n      permitted.push(:owner_uuid, :command, :container_image, :cwd,\n                     :environment, :mounts, :output_path, :output_glob,\n                     :priority, :runtime_constraints,\n                     :scheduling_parameters, :secret_mounts,\n                     :runtime_token, :runtime_user_uuid,\n                     :runtime_auth_scopes, :output_storage_classes,\n                     :service, :published_ports)\n    end\n\n    case self.state\n    when Locked\n      permitted.push :priority, :runtime_status, :log, :lock_count\n\n    when Queued\n      permitted.push :priority\n\n    when Running\n      permitted.push :priority, :output_properties, :gateway_address, *progress_attrs\n      if self.state_changed?\n        permitted.push :started_at\n      end\n      if !self.interactive_session_started_was\n        permitted.push :interactive_session_started\n      end\n\n    when Complete\n      if self.state_was == Running\n        permitted.push *final_attrs, *progress_attrs\n      end\n\n    when Cancelled\n      case self.state_was\n      when Running\n        permitted.push :finished_at, *progress_attrs\n      when Queued, Locked\n        permitted.push :finished_at, :log, :runtime_status, :cost\n      end\n\n    else\n      # The state_transitions check will add an error message for this\n      return false\n    end\n\n    if self.state_was == Running &&\n       !current_api_client_authorization.nil? &&\n       (current_api_client_authorization.uuid == self.auth_uuid ||\n        current_api_client_authorization.token == self.runtime_token)\n      # The contained process itself can write final attrs but can't\n      # change priority or log.\n      permitted.push *final_attrs\n      permitted = permitted - [:log, :priority]\n    elsif !current_user.andand.is_admin\n      raise PermissionDeniedError\n    elsif self.locked_by_uuid && self.locked_by_uuid != current_api_client_authorization.andand.uuid\n      # When locked, progress fields cannot be updated by the wrong\n      # dispatcher, even though it has admin privileges.\n      permitted = permitted - progress_attrs\n    end\n    check_update_whitelist permitted\n  end\n\n  def validate_lock\n    if [Locked, Running].include? self.state\n      # If the Container was already locked, locked_by_uuid must not\n      # changes. Otherwise, the current auth gets the lock.\n      need_lock = locked_by_uuid_was || current_api_client_authorization.andand.uuid\n    else\n      need_lock = nil\n    end\n\n    # The caller can provide a new value for locked_by_uuid, but only\n    # if it's exactly what we expect. This allows a caller to perform\n    # an update like {\"state\":\"Unlocked\",\"locked_by_uuid\":null}.\n    if self.locked_by_uuid_changed?\n      if self.locked_by_uuid != need_lock\n        return errors.add :locked_by_uuid, \"can only change to #{need_lock}\"\n      end\n    end\n    self.locked_by_uuid = need_lock\n  end\n\n  def validate_output\n    # Output must exist and be readable by the current user.  This is so\n    # that a container cannot \"claim\" a collection that it doesn't otherwise\n    # have access to just by setting the output field to the collection PDH.\n    if output_changed?\n      c = Collection.\n            readable_by(current_user, {include_trash: true}).\n            where(portable_data_hash: self.output).\n            first\n      if !c\n        errors.add :output, \"collection must exist and be readable by current user.\"\n      end\n    end\n  end\n\n  def update_cr_logs\n    # If self.final?, this update is superfluous: the final log/output\n    # update will be done when handle_completed calls finalize! on\n    # each requesting CR.\n    return if self.final? || !saved_change_to_log?\n    leave_modified_by_user_alone do\n      ContainerRequest.where(container_uuid: self.uuid, state: ContainerRequest::Committed).each do |cr|\n        cr.update_collections(container: self, collections: ['log'])\n        cr.save!\n      end\n    end\n  end\n\n  def assign_auth\n    if self.auth_uuid_changed?\n         return errors.add :auth_uuid, 'is readonly'\n    end\n    if not [Locked, Running].include? self.state\n      # Don't need one. If auth already exists, expire it.\n      #\n      # We use db_transaction_time here (not db_current_time) to\n      # ensure the token doesn't validate later in the same\n      # transaction (e.g., in a test case) by satisfying expires_at >\n      # transaction timestamp.\n      self.auth.andand.update(expires_at: db_transaction_time)\n      self.auth = nil\n      return\n    elsif self.auth\n      # already have one\n      return\n    end\n    if self.runtime_token.nil?\n      if self.runtime_user_uuid.nil?\n        # legacy behavior, we don't have a runtime_user_uuid so get\n        # the user from the highest priority container request, needed\n        # when performing an upgrade and there are queued containers,\n        # and some tests.\n        cr = ContainerRequest.\n               where('container_uuid=? and priority>0', self.uuid).\n               order('priority desc').\n               first\n        if !cr\n          return errors.add :auth_uuid, \"cannot be assigned because priority <= 0\"\n        end\n        self.runtime_user_uuid = cr.modified_by_user_uuid\n        self.runtime_auth_scopes = [\"all\"]\n      end\n\n      # Generate a new token. This runs with admin credentials as it's done by a\n      # dispatcher user, so expires_at isn't enforced by API.MaxTokenLifetime.\n      self.auth = ApiClientAuthorization.\n                    create!(user_id: User.find_by_uuid(self.runtime_user_uuid).id,\n                            scopes: self.runtime_auth_scopes)\n    end\n  end\n\n  def sort_serialized_attrs\n    if self.environment_changed?\n      self.environment = self.class.deep_sort_hash(self.environment)\n    end\n    if self.mounts_changed?\n      self.mounts = self.class.deep_sort_hash(self.mounts)\n    end\n    if self.runtime_constraints_changed?\n      self.runtime_constraints = self.class.deep_sort_hash(self.runtime_constraints)\n    end\n    if self.scheduling_parameters_changed?\n      self.scheduling_parameters = self.class.deep_sort_hash(self.scheduling_parameters)\n    end\n    if self.runtime_auth_scopes_changed?\n      self.runtime_auth_scopes = self.runtime_auth_scopes.sort\n    end\n  end\n\n  def update_secret_mounts_md5\n    if self.secret_mounts_changed?\n      self.secret_mounts_md5 = Digest::MD5.hexdigest(\n        SafeJSON.dump(self.class.deep_sort_hash(self.secret_mounts)))\n    end\n  end\n\n  def scrub_secrets\n    # this runs after update_secret_mounts_md5, so the\n    # secret_mounts_md5 will still reflect the secrets that are being\n    # scrubbed here.\n    if self.state_changed? && self.final?\n      self.secret_mounts = {}\n      self.runtime_token = nil\n    end\n  end\n\n  def clear_runtime_status_when_queued\n    # Avoid leaking status messages between different dispatch attempts\n    if self.state_was == Locked && self.state == Queued\n      self.runtime_status = {}\n    end\n  end\n\n  def assign_external_ports\n    if state_was == Running && state != Running\n      ActiveRecord::Base.connection.exec_query(\n        'delete from container_ports where container_uuid=$1',\n        'assign_external_ports',\n        [uuid])\n    elsif state_was != Running && state == Running\n      exturl = Rails.configuration.Services.ContainerWebServices.ExternalURL\n      port_min = Rails.configuration.Services.ContainerWebServices.ExternalPortMin\n      port_max = Rails.configuration.Services.ContainerWebServices.ExternalPortMax\n      if port_min.andand > 0 &&\n         port_max.andand > 0 &&\n         !exturl.andand.host.andand.starts_with?(\"*\")\n        ActiveRecord::Base.connection.execute(\n          'lock table container_ports in exclusive mode')\n        published_ports.each do |ppkey, ppvalue|\n          external_port = nil\n          ActiveRecord::Base.connection.exec_query(\n            'select * from generate_series($1::int, $2::int) as port ' +\n            'where port not in (select external_port from container_ports) ' +\n            'limit 1',\n            'assign_external_ports',\n            [port_min, port_max]).each do |row|\n            external_port = row['port']\n          end\n          if !external_port\n            Rails.logger.debug(\"no ports available for #{uuid} port #{ppkey}\")\n            break\n          end\n          ActiveRecord::Base.connection.exec_query(\n            'insert into container_ports ' +\n            '(external_port, container_uuid, container_port) ' +\n            'values ($1, $2, $3)',\n            'assign_external_ports',\n            [external_port, uuid, ppkey.to_i])\n          ppvalue['external_port'] = external_port\n          published_ports[ppkey] = ppvalue\n        end\n      end\n      published_ports.each do |ppkey, ppvalue|\n        baseurl = exturl.dup\n        if baseurl.host.starts_with?(\"*\")\n          baseurl.host = \"#{uuid}-#{ppkey}#{baseurl.host[1..]}\"\n        elsif ppvalue['external_port'].andand > 0\n          baseurl.port = ppvalue['external_port'].to_s\n        else\n          next\n        end\n        ppvalue['base_url'] = baseurl.to_s\n        initialurl = baseurl\n        if ppvalue['initial_path'] && ppvalue['initial_path'] != \"\"\n          initialurl.path = \"/\" + ppvalue['initial_path'].delete_prefix(\"/\")\n        end\n        ppvalue['initial_url'] = initialurl.to_s\n        published_ports[ppkey] = ppvalue\n      end\n    end\n  end\n\n  def handle_completed\n    # This container is finished so finalize any associated container requests\n    # that are associated with this container.\n    if saved_change_to_state? and self.final?\n      # These get wiped out by with_lock (which reloads the record),\n      # so record them now in case we need to schedule a retry.\n      prev_secret_mounts = secret_mounts_before_last_save\n      prev_runtime_token = runtime_token_before_last_save\n\n      # Need to take a lock on the container to ensure that any\n      # concurrent container requests that might try to reuse this\n      # container will block until the container completion\n      # transaction finishes.  This ensure that concurrent container\n      # requests that try to reuse this container are finalized (on\n      # Complete) or don't reuse it (on Cancelled).\n      self.with_lock do\n        act_as_system_user do\n          if self.state == Cancelled\n            # Cancelled means the container didn't run to completion.\n            # This happens either because it was cancelled by the user\n            # or because there was an infrastructure failure.  We want\n            # to retry infrastructure failures automatically.\n            #\n            # Seach for live container requests to determine if we\n            # should retry the container.\n            retryable_requests = ContainerRequest.\n                                   joins('left outer join containers as requesting_container on container_requests.requesting_container_uuid = requesting_container.uuid').\n                                   where(\"container_requests.container_uuid = ? and \"+\n                                         \"container_requests.priority > 0 and \"+\n                                         \"container_requests.owner_uuid not in (select group_uuid from trashed_groups) and \"+\n                                         \"(requesting_container.priority is null or (requesting_container.state = 'Running' and requesting_container.priority > 0)) and \"+\n                                         \"container_requests.state = 'Committed' and \"+\n                                         \"container_requests.container_count < container_requests.container_count_max\", uuid).\n                                   order('container_requests.uuid asc')\n          else\n            retryable_requests = []\n          end\n\n          if retryable_requests.any?\n            scheduling_parameters = {\n              # partitions: empty if any are empty, else the union of all parameters\n              \"partitions\": retryable_requests\n                              .map { |req| req.scheduling_parameters[\"partitions\"] || [] }\n                              .reduce { |cur, new| (cur.empty? or new.empty?) ? [] : (cur | new) },\n\n              # preemptible: true if all are true, else false\n              \"preemptible\": retryable_requests\n                               .map { |req| req.scheduling_parameters[\"preemptible\"] }\n                               .all?,\n\n              # supervisor: true if all any true, else false\n              \"supervisor\": retryable_requests\n                               .map { |req| req.scheduling_parameters[\"supervisor\"] }\n                               .any?,\n\n              # max_run_time: 0 if any are 0 (unlimited), else the maximum\n              \"max_run_time\": retryable_requests\n                                .map { |req| req.scheduling_parameters[\"max_run_time\"] || 0 }\n                                .reduce do |cur, new|\n                if cur == 0 or new == 0\n                  0\n                elsif new > cur\n                  new\n                else\n                  cur\n                end\n              end,\n            }\n\n            c_attrs = {\n              command: self.command,\n              cwd: self.cwd,\n              environment: self.environment,\n              output_path: self.output_path,\n              output_glob: self.output_glob,\n              container_image: self.container_image,\n              mounts: self.mounts,\n              runtime_constraints: self.runtime_constraints,\n              scheduling_parameters: scheduling_parameters,\n              secret_mounts: prev_secret_mounts,\n              runtime_token: prev_runtime_token,\n              runtime_user_uuid: self.runtime_user_uuid,\n              runtime_auth_scopes: self.runtime_auth_scopes\n            }\n            c = Container.create! c_attrs\n            retryable_requests.each do |cr|\n              cr.with_lock do\n                leave_modified_by_user_alone do\n                  # Use row locking because this increments container_count\n                  cr.cumulative_cost += self.cost + self.subrequests_cost\n                  cr.container_uuid = c.uuid\n                  cr.save!\n                end\n              end\n            end\n          end\n\n          # Notify container requests associated with this container\n          ContainerRequest.where(container_uuid: uuid,\n                                 state: ContainerRequest::Committed).each do |cr|\n            leave_modified_by_user_alone do\n              cr.finalize!\n            end\n          end\n\n          # Cancel outstanding container requests made by this container.\n          ContainerRequest.\n            where(requesting_container_uuid: uuid,\n                  state: ContainerRequest::Committed).\n            in_batches(of: 15).each_record do |cr|\n            leave_modified_by_user_alone do\n              cr.set_priority_zero\n              container_state = Container.where(uuid: cr.container_uuid).pluck(:state).first\n              if container_state == Container::Queued || container_state == Container::Locked\n                # If the child container hasn't started yet, finalize the\n                # child CR now instead of leaving it \"on hold\", i.e.,\n                # Queued with priority 0.  (OTOH, if the child is already\n                # running, leave it alone so it can get cancelled the\n                # usual way, get a copy of the log collection, etc.)\n                cr.update!(state: ContainerRequest::Final)\n              end\n            end\n          end\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/container_port.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ContainerPort < ApplicationRecord\n  self.table_name = 'container_ports'\nend\n"
  },
  {
    "path": "services/api/app/models/container_request.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'arvados/collection'\nrequire 'validate_serialized'\nrequire 'whitelist_update'\n\nclass ContainerRequest < ArvadosModel\n  include ArvadosModelUpdates\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n  include WhitelistUpdate\n\n  belongs_to :container,\n             foreign_key: 'container_uuid',\n             primary_key: 'uuid',\n             optional: true\n  belongs_to :requesting_container,\n             class_name: 'Container',\n             foreign_key: 'requesting_container_uuid',\n             primary_key: 'uuid',\n             optional: true\n\n  # Posgresql JSONB columns should NOT be declared as serialized, Rails 5\n  # already know how to properly treat them.\n  attribute :properties, :jsonbHash, default: {}\n  attribute :secret_mounts, :jsonbHash, default: {}\n  attribute :output_storage_classes, :jsonbArray, default: lambda { Rails.configuration.DefaultStorageClasses }\n  attribute :output_properties, :jsonbHash, default: {}\n  attribute :published_ports, :jsonbHash, default: {}\n\n  serialize :environment, Hash\n  serialize :mounts, Hash\n  serialize :runtime_constraints, Hash\n  serialize :command, Array\n  serialize :scheduling_parameters, Hash\n  serialize :output_glob, Array\n\n  after_find :fill_container_defaults_after_find\n  after_initialize { @state_was_when_initialized = self.state_was } # see finalize_if_needed\n  before_validation :fill_field_defaults, :if => :new_record?\n  before_validation :fill_cuda_to_gpu\n  before_validation :fill_container_defaults\n  validates :command, :container_image, :output_path, :cwd, :presence => true\n  validates :output_ttl, numericality: { only_integer: true, greater_than_or_equal_to: 0 }\n  validates :priority, numericality: { only_integer: true, greater_than_or_equal_to: 0, less_than_or_equal_to: 1000 }\n  validates :command, array_of_strings: {allow_empty_strings: true}\n  validates :environment, hash_attr: true\n  validates :mounts, hash_attr: true\n  validates :output_glob, array_of_strings: true\n  validates :output_properties, hash_attr: true\n  validates :output_storage_classes, array_of_strings: true\n  validates :published_ports, hash_attr: true\n  validates :properties, hash_attr: true\n  validates :runtime_constraints, hash_attr: true\n  validates :scheduling_parameters, hash_attr: true\n  validates :secret_mounts, hash_attr: true\n  validate :validate_datatypes\n  validate :validate_runtime_constraints\n  validate :validate_scheduling_parameters\n  validate :validate_state_change\n  validate :check_update_whitelist\n  validate :secret_mounts_key_conflict\n  validate :validate_runtime_token\n  validate :validate_published_ports\n  after_validation :scrub_secrets\n  after_validation :set_preemptible\n  after_validation :set_container\n  before_create :set_requesting_container_uuid\n  before_destroy :set_priority_zero\n  after_save :update_priority\n  after_save :finalize_if_needed\n\n  api_accessible :user, extend: :common do |t|\n    t.add :command\n    t.add :container_count\n    t.add :container_count_max\n    t.add :container_image\n    t.add :container_uuid\n    t.add :cwd\n    t.add :description\n    t.add :environment\n    t.add :expires_at\n    t.add :filters\n    t.add :log_uuid\n    t.add :mounts\n    t.add :name\n    t.add :output_name\n    t.add :output_path\n    t.add :output_glob\n    t.add :output_uuid\n    t.add :output_ttl\n    t.add :priority\n    t.add :properties\n    t.add :requesting_container_uuid\n    t.add :runtime_constraints\n    t.add :scheduling_parameters\n    t.add :state\n    t.add :use_existing\n    t.add :output_storage_classes\n    t.add :output_properties\n    t.add :cumulative_cost\n    t.add :service\n    t.add :published_ports\n  end\n\n  # Supported states for a container request\n  States =\n    [\n     (Uncommitted = 'Uncommitted'),\n     (Committed = 'Committed'),\n     (Final = 'Final'),\n    ]\n\n  State_transitions = {\n    nil => [Uncommitted, Committed],\n    Uncommitted => [Committed],\n    Committed => [Final]\n  }\n\n  AttrsPermittedAlways = [:owner_uuid, :state, :name, :description, :properties]\n  AttrsPermittedBeforeCommit = [:command, :container_count_max,\n  :container_image, :cwd, :environment, :filters, :mounts,\n  :output_path, :output_glob, :priority, :runtime_token,\n  :runtime_constraints, :state, :container_uuid, :use_existing,\n  :scheduling_parameters, :secret_mounts, :output_name, :output_ttl,\n  :output_storage_classes, :output_properties, :service, :published_ports]\n\n  def self.any_preemptible_instances?\n    Rails.configuration.InstanceTypes.any? do |k, v|\n      v[\"Preemptible\"]\n    end\n  end\n\n  def logged_attributes\n    super.except('secret_mounts', 'runtime_token')\n  end\n\n  def state_transitions\n    State_transitions\n  end\n\n  def skip_uuid_read_permission_check\n    # The uuid_read_permission_check prevents users from making\n    # references to objects they can't view.  However, in this case we\n    # don't want to do that check since there's a circular dependency\n    # where user can't view the container until the user has\n    # constructed the container request that references the container.\n    %w(container_uuid)\n  end\n\n  def finalize_if_needed\n    return if state != Committed\n    while true\n      # get container lock first, then lock current container request\n      # (same order as Container#handle_completed). Locking always\n      # reloads the Container and ContainerRequest records.\n      #\n      # If the container is already finalized, there is no need to\n      # lock or reload it.  Locking the container row unnecessarily\n      # can cause excessive database contention, see #23382.\n      c = Container.find_by_uuid(container_uuid)\n      c.lock! if !c.nil? && !c.final?\n      self.lock!\n\n      if !c.nil? && container_uuid != c.uuid\n        # After locking, we've noticed a race, the container_uuid is\n        # different than the container record we just loaded.  This\n        # can happen if Container#handle_completed scheduled a new\n        # container for retry and set container_uuid while we were\n        # waiting on the container lock.  Restart the loop and get the\n        # new container.\n        redo\n      end\n\n      if !c.nil?\n        if state == Committed && c.final?\n          # The assigned container is final, so finalize this\n          # container request.\n          act_as_system_user do\n            leave_modified_by_user_alone do\n              finalize!\n            end\n          end\n        end\n      elsif state == Committed\n        # Behave as if the container is cancelled\n        update!(state: Final)\n      end\n      return true\n    end\n  end\n\n  # Finalize the container request after the container has\n  # finished/cancelled.\n  def finalize!\n    container = Container.find_by_uuid(container_uuid)\n    if !container.nil?\n      # We don't want to add the container cost if the container was\n      # already finished when this CR was committed. But we are\n      # running in an after_save hook after a lock/reload, so\n      # state_was has already been updated to Committed regardless.\n      # Hence the need for @state_was_when_initialized.\n      if @state_was_when_initialized == Committed\n        # Add the final container cost to our cumulative cost (which\n        # may already be non-zero from previous attempts if\n        # container_count_max > 1).\n        self.cumulative_cost += container.cost + container.subrequests_cost\n      end\n\n      # Add our cumulative cost to the subrequests_cost of the\n      # requesting container, if any.\n      if self.requesting_container_uuid\n        Container.where(\n          uuid: self.requesting_container_uuid,\n          state: Container::Running,\n        ).each do |c|\n          c.subrequests_cost += self.cumulative_cost\n          c.save!\n        end\n      end\n\n      update_collections(container: container)\n      # update_collections makes a log collection that includes all of the logs\n      # for all of the containers associated with this request. For requests\n      # that are retried, this is the primary way users can get logs for\n      # failed containers.\n      # The code below makes a log collection that is a verbatim copy of the\n      # container's logs. This is required for container reuse: a container\n      # will not be reused if the owner cannot read a collection with its logs.\n      # See the \"readable log\" section of Container.find_reusable().\n      if container.state == Container::Complete\n        log_col = Collection.where(portable_data_hash: container.log).first\n        if log_col\n          # Need to save collection\n          completed_coll = Collection.new(\n            owner_uuid: self.owner_uuid,\n            name: \"Container log for container #{container_uuid}\",\n            properties: {\n              'type' => 'log',\n              'container_request' => self.uuid,\n              'container_uuid' => container_uuid,\n            },\n            portable_data_hash: log_col.portable_data_hash,\n            manifest_text: log_col.manifest_text,\n            storage_classes_desired: self.output_storage_classes\n          )\n          completed_coll.save_with_unique_name!\n        end\n      end\n    end\n    update!(state: Final)\n  end\n\n  def update_collections(container:, collections: ['log', 'output'])\n\n    # Check if parent is frozen or trashed, in which case it isn't\n    # valid to create new collections in the project, so return\n    # without creating anything.\n    owner = Group.find_by_uuid(self.owner_uuid)\n    return if owner && !owner.admin_change_permitted\n\n    collections.each do |out_type|\n      pdh = container.send(out_type)\n      next if pdh.nil?\n      c = Collection.where(portable_data_hash: pdh).first\n      next if c.nil?\n      manifest = c.manifest_text\n\n      coll_name = \"Container #{out_type} for request #{uuid}\"\n      trash_at = nil\n      if out_type == 'output'\n        if self.output_name and self.output_name != \"\"\n          coll_name = self.output_name\n        end\n        if self.output_ttl > 0\n          trash_at = db_current_time + self.output_ttl\n        end\n      end\n\n      coll_uuid = self.send(out_type + '_uuid')\n      coll = coll_uuid.nil? ? nil : Collection.where(uuid: coll_uuid).first\n      if !coll\n        coll = Collection.new(\n          owner_uuid: self.owner_uuid,\n          name: coll_name,\n          manifest_text: \"\",\n          storage_classes_desired: self.output_storage_classes)\n      end\n\n      if out_type == \"log\"\n        # Copy the log into a merged collection\n        src = Arv::Collection.new(manifest)\n        dst = Arv::Collection.new(coll.manifest_text)\n        dst.cp_r(\"./\", \".\", src)\n        dst.cp_r(\"./\", \"log for container #{container.uuid}\", src)\n        manifest = dst.manifest_text\n      end\n\n      merged_properties = {}\n      merged_properties['container_request'] = uuid\n\n      if out_type == 'output' and !requesting_container_uuid.nil?\n        # output of a child process, give it \"intermediate\" type by\n        # default.\n        merged_properties['type'] = 'intermediate'\n      else\n        merged_properties['type'] = out_type\n      end\n\n      if out_type == \"output\"\n        merged_properties.update(container.output_properties)\n        merged_properties.update(self.output_properties)\n      end\n\n      coll.assign_attributes(\n        portable_data_hash: Digest::MD5.hexdigest(manifest) + '+' + manifest.bytesize.to_s,\n        manifest_text: manifest,\n        trash_at: trash_at,\n        delete_at: trash_at,\n        properties: merged_properties)\n      coll.save_with_unique_name!\n      self.send(out_type + '_uuid=', coll.uuid)\n    end\n  end\n\n  def self.full_text_searchable_columns\n    super - [\"mounts\", \"secret_mounts\", \"secret_mounts_md5\", \"runtime_token\", \"output_storage_classes\", \"output_glob\", \"service\", \"published_ports\"]\n  end\n\n  def set_priority_zero\n    self.update!(priority: 0) if self.priority > 0 && self.state != Final\n  end\n\n  protected\n\n  def fill_field_defaults\n    self.state ||= Uncommitted\n    self.environment ||= {}\n    self.runtime_constraints ||= {}\n    self.mounts ||= {}\n    self.secret_mounts ||= {}\n    self.cwd ||= \".\"\n    self.container_count_max ||= Rails.configuration.Containers.MaxRetryAttempts\n    self.scheduling_parameters ||= {}\n    self.output_ttl ||= 0\n    self.output_glob ||= []\n    self.priority ||= 0\n  end\n\n  def fill_cuda_to_gpu\n    ContainerRequest.translate_cuda_to_gpu attributes['runtime_constraints']\n  end\n\n  def self.translate_cuda_to_gpu rc\n    if rc.is_a?(Hash) && rc['cuda'] && rc['cuda']['device_count'] > 0\n      # Legacy API to request Nvidia GPUs, convert it so downstream\n      # code only has to handle generic GPU requests.\n      rc['gpu'] = {\n          'device_count' => rc['cuda']['device_count'],\n          'driver_version' => rc['cuda']['driver_version'],\n          'hardware_target' => [rc['cuda']['hardware_capability']],\n          'stack' => 'cuda',\n          'vram' => 0,\n      }\n    end\n  end\n\n  def set_container\n    if !errors.empty?\n      # Validation failed, record won't be saved anyway.\n      return false\n    end\n    if (container_uuid_changed? and\n        not current_user.andand.is_admin and\n        not container_uuid.nil?)\n      errors.add :container_uuid, \"can only be updated to nil.\"\n      return false\n    end\n    if self.container_count_changed?\n      errors.add :container_count, \"cannot be updated directly.\"\n      return false\n    end\n    if state_changed? and state == Committed and container_uuid.nil?\n      if self.command.length > 0 and self.command[0] == \"arvados-cwl-runner\"\n        # Special case, arvados-cwl-runner processes are always considered \"supervisors\"\n        self.scheduling_parameters['supervisor'] = true\n      end\n      while true\n        c = Container.resolve(self)\n        # If container is not final, we need to lock it to avoid\n        # missing CR finalization in a race with container completion.\n        # If container is final, locking can cause excessive database\n        # contention, see #23382.\n        c.lock! unless c.final?\n        if c.state == Container::Cancelled\n          # Lost a race, we have a lock on the container but the\n          # container was cancelled in a different request, restart\n          # the loop and resolve request to a new container.\n          redo\n        end\n        self.container_uuid = c.uuid\n        break\n      end\n    end\n    if self.container_uuid != self.container_uuid_was\n      self.container_count += 1\n      return if self.container_uuid_was.nil?\n\n      old_container_uuid = self.container_uuid_was\n      old_container_log = Container.where(uuid: old_container_uuid).pluck(:log).first\n      return if old_container_log.nil?\n\n      old_logs = Collection.where(portable_data_hash: old_container_log).first\n      return if old_logs.nil?\n\n      log_coll = self.log_uuid.nil? ? nil : Collection.where(uuid: self.log_uuid).first\n      if self.log_uuid.nil?\n        log_coll = Collection.new(\n          owner_uuid: self.owner_uuid,\n          name: coll_name = \"Container log for request #{uuid}\",\n          manifest_text: \"\",\n          storage_classes_desired: self.output_storage_classes)\n      end\n\n      # copy logs from old container into CR's log collection\n      src = Arv::Collection.new(old_logs.manifest_text)\n      dst = Arv::Collection.new(log_coll.manifest_text)\n      dst.cp_r(\"./\", \"log for container #{old_container_uuid}\", src)\n      manifest = dst.manifest_text\n\n      log_coll.assign_attributes(\n        portable_data_hash: Digest::MD5.hexdigest(manifest) + '+' + manifest.bytesize.to_s,\n        manifest_text: manifest)\n      log_coll.save_with_unique_name!\n      self.log_uuid = log_coll.uuid\n    end\n  end\n\n  def set_preemptible\n    if (new_record? || state_changed?) &&\n       state == Committed &&\n       Rails.configuration.Containers.AlwaysUsePreemptibleInstances &&\n       get_requesting_container_uuid() &&\n       self.class.any_preemptible_instances?\n      self.scheduling_parameters['preemptible'] = true\n    end\n  end\n\n  def validate_runtime_constraints\n    if errors[:runtime_constraints].any?\n      # If runtime_constraints is not even a hash, don't raise other\n      # confusing errors by trying to do more validation.\n      return false\n    end\n    if state != Committed && !new_record?\n      # Avoid running new validations on records that are already in\n      # the database and aren't going to be submitted as a result of\n      # this update.\n      return\n    end\n    case self.state\n    when Committed\n      ['vcpus', 'ram'].each do |k|\n        v = runtime_constraints[k]\n        if !v.is_a?(Integer) || v <= 0\n          errors.add(:runtime_constraints,\n                     \"[#{k}]=#{v.inspect} must be a positive integer\")\n        end\n      end\n      if runtime_constraints['cuda']\n        disallow_extra_keys(\n          :runtime_constraints, runtime_constraints['cuda'],\n          ['device_count', 'driver_version', 'hardware_capability'])\n        ['device_count'].each do |k|\n          v = runtime_constraints['cuda'][k]\n          if !v.is_a?(Integer) || v < 0\n            errors.add(:runtime_constraints,\n                       \"[cuda.#{k}]=#{v.inspect} must be a positive or zero integer\")\n          end\n        end\n        ['driver_version', 'hardware_capability'].each do |k|\n          v = runtime_constraints['cuda'][k]\n          if !v.is_a?(String) || (runtime_constraints['cuda']['device_count'] > 0 && v.to_f == 0.0)\n            errors.add(:runtime_constraints,\n                       \"[cuda.#{k}]=#{v.inspect} must be a string in format 'X.Y'\")\n          end\n        end\n      end\n\n      if !runtime_constraints['gpu'].is_a?(Hash)\n        errors.add(:runtime_constraints, \"[gpu] must be a hash\")\n      elsif runtime_constraints['gpu']\n        disallow_extra_keys(\n          :runtime_constraints, runtime_constraints['gpu'],\n          ['device_count', 'driver_version', 'hardware_target', 'stack', 'vram'])\n        k = 'stack'\n        v = runtime_constraints['gpu'][k]\n        if not [nil, '', 'cuda', 'rocm'].include? v\n            errors.add(:runtime_constraints,\n                       \"[gpu.#{k}]=#{v.inspect} must be one of 'cuda' or 'rocm' or be empty\")\n        end\n\n        ['device_count', 'vram'].each do |k|\n          v = runtime_constraints['gpu'][k]\n          if !v.is_a?(Integer) || v < 0\n            errors.add(:runtime_constraints,\n                       \"[gpu.#{k}]=#{v.inspect} must be a positive or zero integer\")\n          end\n        end\n\n        if runtime_constraints['gpu']['device_count'] > 0\n          k = 'driver_version'\n          v = runtime_constraints['gpu'][k]\n          if !v.is_a?(String) || v.to_f == 0.0\n            errors.add(:runtime_constraints,\n                       \"[gpu.#{k}]=#{v.inspect} must be a string in format 'X.Y'\")\n          end\n\n          k = 'hardware_target'\n          v = runtime_constraints['gpu'][k]\n          if v.is_a?(Array)\n            v.each do |tgt|\n              if !tgt.is_a?(String)\n                errors.add(:runtime_constraints,\n                           \"[gpu.#{k}]=#{v.inspect} must be an array of strings\")\n              end\n            end\n          else\n            errors.add(:runtime_constraints,\n                       \"[gpu.#{k}]=#{v.inspect} must be an array of strings\")\n          end\n        end\n      end\n    end\n    disallow_extra_keys(\n      :runtime_constraints, runtime_constraints,\n      ['API', 'gpu', 'keep_cache_disk', 'keep_cache_ram', 'ram', 'vcpus',\n       # When 'cuda' is automatically converted to 'gpu', the original\n       # 'cuda' section also remains. See #21926#note-21.\n       'cuda',\n      ])\n  end\n\n  MountKindFields = {\n    \"collection\" => [\"uuid\", \"portable_data_hash\", \"writable\", \"path\", \"exclude_from_output\"],\n    \"tmp\" => [\"capacity\", \"device_type\", \"exclude_from_output\"],\n    \"keep\" => [\"exclude_from_output\"],\n    \"file\" => [\"path\", \"exclude_from_output\"],\n    \"json\" => [\"content\", \"exclude_from_output\"],\n    \"text\" => [\"content\", \"exclude_from_output\"],\n  }\n\n  SecretMountKindFields = {\n    \"json\" => MountKindFields[\"json\"],\n    \"text\" => MountKindFields[\"text\"],\n  }\n\n  MountSchema = {\n    \"kind\" => \"string\",\n    \"uuid\" => \"string\",\n    \"portable_data_hash\" => \"string\",\n    \"writable\" => \"boolean\",\n    \"path\" => \"string\",\n    \"capacity\" => \"integer\",\n    \"device_type\" => [\"ram\", \"ssd\", \"disk\", \"network\", \"\"],\n    \"exclude_from_output\" => \"boolean\",\n  }\n\n  def validate_mount_hash(attr, mountpoint, mountspec)\n    kind = mountspec[\"kind\"]\n    allowed_fields =\n      case attr\n      when :mounts\n        MountKindFields[kind]\n      when :secret_mounts\n        SecretMountKindFields[kind]\n      else\n        raise ArgumentError.new(\"validate_mount_hash called with unexpected attr #{attr}\")\n      end\n    if !allowed_fields\n      errors.add(attr, \"[#{mountpoint}][kind]: unsupported value #{kind.inspect}\")\n      return\n    end\n    # Validate value types for the keys that are present, but don't\n    # complain about keys that are not.\n    schema = MountSchema.select { |k, v| mountspec.has_key?(k) }\n    if kind == \"text\"\n      schema[\"content\"] = \"string\"\n    elsif kind == \"json\"\n      # content can be anything, so no validation\n    end\n    validator = HashValidator.validate(mountspec, schema)\n    if !validator.valid?\n      validator.errors.each do |key, err|\n        errors.add(attr, \"[#{mountpoint}][#{key}]: incompatible value type: #{err}\")\n      end\n    end\n    mountspec.each do |key, value|\n      # Keys that do not apply to a given mount kind need to be\n      # accepted as long as they have empty/zero values, because\n      # that's how Go code (e.g., controller) serializes the Mount\n      # struct that's used for all mount kinds.  But inapplicable keys\n      # with non-empty values are not allowed.\n      if key != \"kind\" && !allowed_fields.include?(key) && ![0, \"\", false, nil].include?(value)\n        errors.add(attr, \"[#{mountpoint}][#{key}]: parameter is not supported for a #{mountspec[\"kind\"]} mount\")\n      end\n    end\n  end\n\n  def validate_datatypes\n    if state != Committed && !new_record?\n      # Avoid running new validations on records that are already in\n      # the database and aren't going to be submitted as a result of\n      # this update.\n      return\n    end\n    if !errors[:environment].any?\n      environment.each do |k, v|\n        if k.include?(\"\\0\") || k.include?(\"=\")\n          errors.add(:environment, \"key #{k.inspect} contains an invalid character NUL or '='\")\n        end\n        if k == \"\"\n          errors.add(:environment, \"key cannot be empty\")\n        end\n        if !v.is_a?(String)\n          errors.add(:environment, \"[#{k}]: incompatible value type: string required\")\n        elsif v.include?(\"\\0\")\n          errors.add(:environment, \"value for #{k.inspect} contains invalid character NUL\")\n        end\n      end\n    end\n    stream_targets = {\n      mounts: [\"stdin\", \"stdout\", \"stderr\"],\n      secret_mounts: [\"stdin\"],\n    }\n    [:mounts, :secret_mounts].each do |m|\n      if errors[m].any?\n        # Validation is already failing in a way that could make the\n        # following validations fail (e.g., non-string keys).\n        next\n      end\n      self[m].each do |k, v|\n        if !k.in?(stream_targets[m]) && !k.start_with?(\"/\")\n          errors.add(m, \"[#{k}]: invalid target: must be #{stream_targets[m].join(\", \")} or an absolute path\")\n        end\n        if !v.is_a?(Hash)\n          errors.add(m, \"[#{k}]: invalid mount specification: must be a hash, not a #{k.class.to_s.downcase}\")\n        else\n          validate_mount_hash(m, k, v)\n        end\n      end\n    end\n    if !mounts.has_key?(output_path)\n      errors.add(:output_path, \"must be a mount target\")\n    end\n  end\n\n  def validate_scheduling_parameters\n    if state != Committed && !new_record?\n      # Avoid running new validations on records that are already in\n      # the database and aren't going to be submitted as a result of\n      # this update.\n      return\n    end\n    if scheduling_parameters.include?('partitions') and\n      !scheduling_parameters['partitions'].nil? and\n      (!scheduling_parameters['partitions'].is_a?(Array) ||\n       scheduling_parameters['partitions'].reject{|x| !x.is_a?(String)}.size !=\n       scheduling_parameters['partitions'].size)\n      errors.add :scheduling_parameters, \"partitions must be an array of strings\"\n    end\n    if scheduling_parameters.include? 'max_run_time' and\n      (!scheduling_parameters['max_run_time'].is_a?(Integer) ||\n       scheduling_parameters['max_run_time'] < 0)\n      errors.add :scheduling_parameters, \"max_run_time must be positive integer\"\n    end\n    disallow_extra_keys(\n      :scheduling_parameters, scheduling_parameters,\n      ['max_run_time', 'partitions', 'preemptible', 'supervisor'])\n\n    # Configuration could change before state changes to Committed, so\n    # this is not flagged as an error for an Uncommitted.  We also\n    # don't want to prevent finalizing due to a config change.\n    if state == Committed &&\n       scheduling_parameters['preemptible'] &&\n       (new_record? || state_changed?) &&\n       !self.class.any_preemptible_instances?\n      errors.add :scheduling_parameters, \"preemptible instances are not configured in InstanceTypes\"\n    end\n  end\n\n  def disallow_extra_keys(attr, h, allowed_keys)\n    extra_keys = h.keys - allowed_keys\n    if extra_keys.any?\n      errors.add(attr, \"contains unexpected keys #{extra_keys}\")\n    end\n  end\n\n  def check_update_whitelist\n    permitted = AttrsPermittedAlways.dup\n\n    if self.new_record? || self.state_was == Uncommitted\n      # Allow create-and-commit in a single operation.\n      permitted.push(*AttrsPermittedBeforeCommit)\n    elsif mounts_changed? && mounts_was.keys.sort == mounts.keys.sort\n      # Ignore the updated mounts if the only changes are default/zero\n      # values as added by controller, see 17774\n      only_defaults = true\n      mounts.each do |path, mount|\n        (mount.to_a - mounts_was[path].to_a).each do |k, v|\n          if ![0, \"\", false, nil].index(v)\n            only_defaults = false\n          end\n        end\n      end\n      if only_defaults\n        clear_attribute_change(\"mounts\")\n      end\n    end\n\n    case self.state\n    when Committed\n      permitted.push :priority, :container_count_max, :container_uuid, :cumulative_cost\n\n      if self.priority.nil?\n        self.errors.add :priority, \"cannot be nil\"\n      end\n\n      # Allow container count to increment (not by client, only by us\n      # -- see set_container)\n      permitted.push :container_count\n\n      if current_user.andand.is_admin\n        permitted.push :log_uuid\n      end\n\n    when Final\n      if self.state_was == Committed\n        # \"Cancel\" means setting priority=0, state=Committed\n        permitted.push :priority, :cumulative_cost\n\n        if current_user.andand.is_admin\n          permitted.push :output_uuid, :log_uuid\n        end\n      end\n\n    end\n\n    super(permitted)\n  end\n\n  def secret_mounts_key_conflict\n    secret_mounts.each do |k, v|\n      if mounts.has_key?(k)\n        errors.add(:secret_mounts, 'conflict with non-secret mounts')\n        return false\n      end\n    end\n  end\n\n  def validate_runtime_token\n    if !self.runtime_token.nil? && self.runtime_token_changed?\n      if !runtime_token[0..2] == \"v2/\"\n        errors.add :runtime_token, \"not a v2 token\"\n        return\n      end\n      if ApiClientAuthorization.validate(token: runtime_token).nil?\n        errors.add :runtime_token, \"failed validation\"\n      end\n    end\n  end\n\n  def validate_published_ports\n    if self.service and self.use_existing\n      errors.add :use_existing, \"cannot be true if 'service' is true\"\n    end\n\n    self.published_ports.each do |k,v|\n      if !/^[0-9]+$/.match?(k)\n        errors.add :published_ports, \"entry #{k} must be a decimal port number in the range 1-65535\"\n        next\n      end\n      i = k.to_i\n      if i < 1 || i > 65535\n        errors.add :published_ports, \"entry #{k} must be a decimal port number in the range 1-65535\"\n        next\n      end\n\n      if v.is_a?(Hash)\n        v.each do |vkey, _|\n          if ![\"access\", \"label\", \"initial_path\"].include? vkey\n            errors.add :published_ports, \"entry #{k} has invalid key: #{vkey.inspect}\"\n          end\n        end\n        if v[\"access\"] != \"private\" && v[\"access\"] != \"public\"\n          errors.add :published_ports, \"entry #{k} 'access' must be one of 'public' or 'private' but was: #{v[\"access\"].inspect}\"\n        end\n        if !v[\"label\"].is_a?(String)\n          errors.add :published_ports, \"entry #{k} 'label' must be a string but was: #{v[\"label\"].inspect}\"\n        elsif v[\"label\"].empty?\n          errors.add :published_ports, \"entry #{k} 'label' cannot be empty\"\n        end\n        if !v[\"initial_path\"].is_a?(String)\n          errors.add :published_ports, \"entry #{k} 'initial_path' must be a string but was: #{v[\"initial_path\"].inspect}\"\n        end\n      else\n        errors.add :published_ports, \"entry #{k} must be an hash: #{v.inspect}\"\n      end\n    end\n  end\n\n  def scrub_secrets\n    if self.state == Final\n      self.secret_mounts = {}\n      self.runtime_token = nil\n    end\n  end\n\n  def update_priority\n    return unless saved_change_to_state? || saved_change_to_priority? || saved_change_to_container_uuid?\n    if container_uuid_before_last_save &&\n       container_uuid_before_last_save != container_uuid &&\n       !Container.find_by_uuid(container_uuid_before_last_save).andand.state.in?([Container::Complete, Container::Cancelled])\n      update_priorities(container_uuid_before_last_save)\n    end\n    if container_uuid && !container.andand.final?\n      update_priorities(container_uuid)\n    end\n  end\n\n  def set_requesting_container_uuid\n    if (self.requesting_container_uuid = get_requesting_container_uuid())\n      # Determine the priority of container request for the requesting\n      # container.\n      self.priority = ContainerRequest.where(container_uuid: self.requesting_container_uuid).maximum(\"priority\") || 0\n    end\n  end\n\n  def get_requesting_container_uuid\n    return self.requesting_container_uuid || Container.for_current_token.andand.uuid\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/credential.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'validate_serialized'\n\nclass Credential < ArvadosModel\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n\n  # Validation regexes for scopes, keyed by credential_class.\n  CRED_CLASS_SCOPES_VALIDATION_REGEX = {\n    \"arv:aws_access_key\" => [\n      %r{\\As3://(\\*|[a-z0-9][\\-.a-z0-9]{1,61}[a-z0-9])\\z}\n    ],\n  }.freeze\n\n  validates :name, :credential_class, :external_id, :secret, :expires_at, presence: true\n  validates :name, format: { without: /\\A[ \\t]*\\z/ }\n  validates :scopes, array_of_strings: true\n\n  attribute :scopes, :jsonbArray, default: []\n\n  validate :validate_credential_class_and_scopes\n\n  after_create :add_credential_manage_link\n\n  api_accessible :user, extend: :common do |t|\n    t.add :name\n    t.add :description\n    t.add :credential_class\n    t.add :scopes\n    t.add :external_id\n    t.add :expires_at\n  end\n\n  def updated_at=(v)\n      # no-op\n  end\n\n  def logged_attributes\n    super.except('secret')\n  end\n\n  def self.full_text_searchable_columns\n    super - [\"credential_class\", \"external_id\", \"secret\", \"expires_at\"]\n  end\n\n  def self.searchable_columns *args\n    super - [\"secret\"]\n  end\n\n  def ensure_owner_uuid_is_permitted\n    if new_record?\n      @requested_manager_uuid = owner_uuid\n      self.owner_uuid = system_user_uuid\n      return true\n    end\n\n    if self.owner_uuid != system_user_uuid\n      raise \"Owner uuid for credential must be system user\"\n    end\n  end\n\n  def add_credential_manage_link\n    if @requested_manager_uuid\n      act_as_system_user do\n       Link.create!(tail_uuid: @requested_manager_uuid,\n                    head_uuid: self.uuid,\n                    link_class: \"permission\",\n                    name: \"can_manage\")\n      end\n    end\n  end\n\n  private\n\n  def validate_credential_class_and_scopes\n    return unless credential_class.present?\n\n    if credential_class.start_with?(\"arv:\")\n      check_if_credential_class_is_implemented\n    elsif CRED_CLASS_SCOPES_VALIDATION_REGEX.key?(\"arv:\" + credential_class)\n      errors.add(:credential_class,  \"#{credential_class} conflicts with reserved credential class arv:#{credential_class}\")\n    end\n  end\n\n  def check_if_credential_class_is_implemented\n    if CRED_CLASS_SCOPES_VALIDATION_REGEX.key?(credential_class)\n      validate_scopes_for_implemented_credential_class\n    else\n      errors.add(:credential_class, \"#{credential_class} is not implemented\")\n    end\n  end\n\n  def validate_scopes_for_implemented_credential_class\n    if scopes.blank?\n      errors.add(:scopes, \"cannot be blank for credential class #{credential_class}\")\n      return\n    end\n\n    patterns = CRED_CLASS_SCOPES_VALIDATION_REGEX[credential_class]\n\n    invalid = scopes.reject do |scope|\n      patterns.any? { |re| re.match?(scope) }\n    end\n\n    if invalid.any?\n      errors.add(:scopes, \"not valid for credential class #{credential_class}: #{invalid.join(', ')}\")\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/database_seeds.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'update_permissions'\n\n# Seed database with default/initial data if needed.\n#\n# This runs before db:migrate in\n# build/rails-package-scripts/postinst.sh so it must only do things\n# that are safe in an in-use/production database.\nclass DatabaseSeeds\n  extend CurrentApiClient\n  def self.install\n    batch_update_permissions do\n      system_user\n      system_group\n      all_users_group\n      anonymous_group\n      anonymous_group_read_permission\n      anonymous_user\n      public_project_group\n      public_project_read_permission\n      empty_collection\n    end\n    refresh_trashed\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/frozen_group.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass FrozenGroup < ApplicationRecord\nend\n"
  },
  {
    "path": "services/api/app/models/group.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'can_be_an_owner'\nrequire 'trashable'\nrequire 'update_priorities'\n\nclass Group < ArvadosModel\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n  include CanBeAnOwner\n  include Trashable\n\n  # Posgresql JSONB columns should NOT be declared as serialized, Rails 5\n  # already know how to properly treat them.\n  attribute :properties, :jsonbHash, default: {}\n\n  validate :ensure_filesystem_compatible_name\n  validate :check_group_class\n  validate :check_filter_group_filters\n  validate :check_frozen_state_change_allowed\n  before_create :assign_name\n  after_create :after_ownership_change\n  after_create :update_trash\n  after_create :update_frozen\n\n  before_update :before_ownership_change\n  after_update :after_ownership_change\n\n  after_create :add_role_manage_link\n\n  after_update :update_trash\n  after_update :update_frozen\n  before_destroy :clear_permissions_trash_frozen\n\n  api_accessible :user, extend: :common do |t|\n    t.add :name\n    t.add :group_class\n    t.add :description\n    t.add :writable_by\n    t.add :delete_at\n    t.add :trash_at\n    t.add :is_trashed\n    t.add :properties\n    t.add :frozen_by_uuid\n    t.add :can_write\n    t.add :can_manage\n  end\n\n  def default_delete_after_trash_interval\n    if self.group_class == 'role'\n      ActiveSupport::Duration.build(0)\n    else\n      super\n    end\n  end\n\n  def minimum_delete_after_trash_interval\n    if self.group_class == 'role'\n      ActiveSupport::Duration.build(0)\n    else\n      super\n    end\n  end\n\n  def validate_trash_and_delete_timing\n    if self.group_class == 'role' && delete_at && delete_at != trash_at\n      errors.add :delete_at, \"must be == trash_at for role groups\"\n    else\n      super\n    end\n  end\n\n  # check if admins are allowed to make changes to the project, e.g. it\n  # isn't trashed or frozen.\n  def admin_change_permitted\n    !(FrozenGroup.where(uuid: self.uuid).any? || TrashedGroup.where(group_uuid: self.uuid).any?)\n  end\n\n  protected\n\n  def self.attributes_required_columns\n    super.merge(\n                'can_write' => ['owner_uuid', 'uuid'],\n                'can_manage' => ['owner_uuid', 'uuid'],\n                'writable_by' => ['owner_uuid', 'uuid'],\n                )\n  end\n\n  def ensure_filesystem_compatible_name\n    # project and filter groups need filesystem-compatible names, but others\n    # don't.\n    super if group_class == 'project' || group_class == 'filter'\n  end\n\n  def check_group_class\n    if group_class != 'project' && group_class != 'role' && group_class != 'filter'\n      errors.add :group_class, \"value must be one of 'project', 'role' or 'filter', was '#{group_class}'\"\n    end\n    if group_class_changed? && !group_class_was.nil?\n      errors.add :group_class, \"cannot be modified after record is created\"\n    end\n  end\n\n  def check_filter_group_filters\n    if group_class == 'filter'\n      if !self.properties.key?(\"filters\")\n        errors.add :properties, \"filters property missing, it must be an array of arrays, each with 3 elements\"\n        return\n      end\n      if !self.properties[\"filters\"].is_a?(Array)\n        errors.add :properties, \"filters property must be an array of arrays, each with 3 elements\"\n        return\n      end\n      self.properties[\"filters\"].each do |filter|\n        if !filter.is_a?(Array)\n          errors.add :properties, \"filters property must be an array of arrays, each with 3 elements\"\n          return\n        end\n        if filter.length() != 3\n          errors.add :properties, \"filters property must be an array of arrays, each with 3 elements\"\n          return\n        end\n        if !filter[0].include?(\".\") and filter[0].downcase != \"uuid\"\n          errors.add :properties, \"filter attribute must be 'uuid' or contain a dot (e.g. groups.name)\"\n          return\n        end\n        if (filter[0].downcase != \"uuid\" and filter[1].downcase == \"is_a\")\n          errors.add :properties, \"when filter operator is 'is_a', attribute must be 'uuid'\"\n          return\n        end\n        if ! [\"=\",\"<\",\"<=\",\">\",\">=\",\"!=\",\"like\",\"ilike\",\"in\",\"not in\",\"is_a\",\"exists\",\"contains\"].include?(filter[1].downcase)\n          errors.add :properties, \"filter operator is not valid (must be =,<,<=,>,>=,!=,like,ilike,in,not in,is_a,exists,contains)\"\n          return\n        end\n      end\n    end\n  end\n\n  def check_frozen_state_change_allowed\n    if frozen_by_uuid == \"\"\n      self.frozen_by_uuid = nil\n    end\n    if frozen_by_uuid_changed? || (new_record? && frozen_by_uuid)\n      if group_class != \"project\"\n        errors.add(:frozen_by_uuid, \"cannot be modified on a non-project group\")\n        return\n      end\n      if frozen_by_uuid_was && Rails.configuration.API.UnfreezeProjectRequiresAdmin && !current_user.is_admin\n        errors.add(:frozen_by_uuid, \"can only be changed by an admin user, once set\")\n        return\n      end\n      if frozen_by_uuid && frozen_by_uuid != current_user.uuid\n        errors.add(:frozen_by_uuid, \"can only be set to the current user's UUID\")\n        return\n      end\n      if !new_record? && !current_user.can?(manage: uuid)\n        raise PermissionDeniedError\n      end\n      if trash_at || delete_at || (!new_record? && TrashedGroup.where(group_uuid: uuid).any?)\n        errors.add(:frozen_by_uuid, \"cannot be set on a trashed project\")\n      end\n      if frozen_by_uuid_was.nil?\n        if Rails.configuration.API.FreezeProjectRequiresDescription && !attribute_present?(:description)\n          errors.add(:frozen_by_uuid, \"can only be set if description is non-empty\")\n        end\n        Rails.configuration.API.FreezeProjectRequiresProperties.andand.each do |key, _|\n          key = key.to_s\n          if !properties[key] || properties[key] == \"\"\n            errors.add(:frozen_by_uuid, \"can only be set if properties[#{key}] value is non-empty\")\n          end\n        end\n      end\n    end\n  end\n\n  def update_trash\n    return unless saved_change_to_trash_at? || saved_change_to_owner_uuid?\n\n    # The group was added or removed from the trash.\n    #\n    # Strategy:\n    #   Compute project subtree, propagating trash_at to subprojects\n    #   Ensure none of the newly trashed descendants were frozen (if so, bail out)\n    #   Remove groups that don't belong from trash\n    #   Add/update groups that do belong in the trash\n\n    frozen_descendants = ActiveRecord::Base.connection.exec_query(%{\nwith temptable as (select * from project_subtree_with_trash_at($1, LEAST($2, $3)::timestamp))\n      select uuid from frozen_groups, temptable where uuid = target_uuid\n},\n      \"Group.update_trash.select\",\n      [self.uuid,\n       TrashedGroup.find_by_group_uuid(self.owner_uuid).andand.trash_at,\n       self.trash_at])\n\n    if frozen_descendants.any?\n      raise ArgumentError.new(\"cannot trash project containing frozen project #{frozen_descendants[0][\"uuid\"]}\")\n    end\n\n    if self.trash_at and self.group_class == 'role'\n      # if this is a role group that is now in the trash, it loses all\n      # of its outgoing permissions.\n      Link.where(link_class: 'permission', tail_uuid: self.uuid).destroy_all\n    end\n\n    ActiveRecord::Base.connection.exec_query(%{\nwith temptable as (select * from project_subtree_with_trash_at($1, LEAST($2, $3)::timestamp)),\n\ndelete_rows as (delete from trashed_groups where group_uuid in (select target_uuid from temptable where trash_at is NULL)),\n\ninsert_rows as (insert into trashed_groups (group_uuid, trash_at)\n  select target_uuid as group_uuid, trash_at from temptable where trash_at is not NULL\n  on conflict (group_uuid) do update set trash_at=EXCLUDED.trash_at)\n\nselect container_uuid from container_requests where\n  owner_uuid in (select target_uuid from temptable) and\n  requesting_container_uuid is NULL and state = 'Committed' and container_uuid is not NULL\n},\n      \"Group.update_trash.select\",\n      [self.uuid,\n       TrashedGroup.find_by_group_uuid(self.owner_uuid).andand.trash_at,\n       self.trash_at]).each do |container_uuid|\n      update_priorities container_uuid[\"container_uuid\"]\n    end\n  end\n\n  def update_frozen\n    return unless saved_change_to_frozen_by_uuid? || saved_change_to_owner_uuid?\n\n    if frozen_by_uuid\n      rows = ActiveRecord::Base.connection.exec_query(%{\nwith temptable as (select * from project_subtree_with_is_frozen($1,$2))\n\nselect cr.uuid, cr.state from container_requests cr, temptable frozen\n  where cr.owner_uuid = frozen.uuid and frozen.is_frozen\n  and cr.state not in ($3, $4) limit 1\n},\n                                                      \"Group.update_frozen.check_container_requests\",\n                                                      [self.uuid,\n                                                       !self.frozen_by_uuid.nil?,\n                                                       ContainerRequest::Uncommitted,\n                                                       ContainerRequest::Final])\n      if rows.any?\n        raise ArgumentError.new(\"cannot freeze project containing container request #{rows.first['uuid']} with state = #{rows.first['state']}\")\n      end\n    end\n\nActiveRecord::Base.connection.exec_query(%{\nwith temptable as (select * from project_subtree_with_is_frozen($1,$2)),\n\ndelete_rows as (delete from frozen_groups where uuid in (select uuid from temptable where not is_frozen))\n\ninsert into frozen_groups (uuid) select uuid from temptable where is_frozen on conflict do nothing\n}, \"Group.update_frozen.update\",\n                                         [self.uuid,\n                                          !self.frozen_by_uuid.nil?])\n\n  end\n\n  def before_ownership_change\n    if owner_uuid_changed? and !self.owner_uuid_was.nil?\n      ComputedPermission.where(user_uuid: owner_uuid_was, target_uuid: uuid).delete_all\n      update_permissions self.owner_uuid_was, self.uuid, REVOKE_PERM\n    end\n  end\n\n  def after_ownership_change\n    if saved_change_to_owner_uuid?\n      update_permissions self.owner_uuid, self.uuid, CAN_MANAGE_PERM\n    end\n  end\n\n  def clear_permissions_trash_frozen\n    Link.where(link_class: 'permission', tail_uuid: self.uuid).destroy_all\n    ComputedPermission.where(target_uuid: uuid).delete_all\n    ActiveRecord::Base.connection.exec_delete(\n      \"delete from trashed_groups where group_uuid=$1\",\n      \"Group.clear_permissions_trash_frozen\",\n      [self.uuid])\n    ActiveRecord::Base.connection.exec_delete(\n      \"delete from frozen_groups where uuid=$1\",\n      \"Group.clear_permissions_trash_frozen\",\n      [self.uuid])\n  end\n\n  def assign_name\n    if self.new_record? and (self.name.nil? or self.name.empty?)\n      self.name = self.uuid\n    end\n    true\n  end\n\n  def ensure_owner_uuid_is_permitted\n    if group_class == \"role\"\n      @requested_manager_uuid = nil\n      if new_record?\n        @requested_manager_uuid = owner_uuid\n        self.owner_uuid = system_user_uuid\n        return true\n      end\n      if self.owner_uuid != system_user_uuid\n        raise \"Owner uuid for role must be system user\"\n      end\n      raise PermissionDeniedError.new(\"role group cannot be modified without can_manage permission\") unless current_user.can?(manage: uuid)\n      true\n    else\n      super\n    end\n  end\n\n  def add_role_manage_link\n    if group_class == \"role\" && @requested_manager_uuid\n      act_as_system_user do\n       Link.create!(tail_uuid: @requested_manager_uuid,\n                    head_uuid: self.uuid,\n                    link_class: \"permission\",\n                    name: \"can_manage\")\n      end\n    end\n  end\n\n  def permission_to_create\n    if !super\n      return false\n    elsif group_class == \"role\" &&\n       !Rails.configuration.Users.CanCreateRoleGroups &&\n       !current_user.andand.is_admin\n      raise PermissionDeniedError.new(\"this cluster does not allow users to create role groups\")\n    else\n      return true\n    end\n  end\n\n  def permission_to_update\n    if !super\n      return false\n    elsif frozen_by_uuid && frozen_by_uuid_was\n      errors.add :uuid, \"#{uuid} is frozen and cannot be modified\"\n      return false\n    else\n      return true\n    end\n  end\n\n  def self.full_text_searchable_columns\n    super - [\"frozen_by_uuid\"]\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/jsonb_type.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass JsonbType\n  # Emulate pre-rails5.0 behavior by having a interpreting NULL/nil as\n  # some other default value.\n  class WithDefault < ActiveModel::Type::Value\n    include ActiveModel::Type::Helpers::Mutable\n\n    def default_value\n      nil\n    end\n\n    def changed_in_place?(raw_old_value, value)\n      # Compare deserialized values for correctness, checking serialized values\n      # may include changes in ordering, inline whitespaces, etc.\n      deserialize(raw_old_value) != value\n    end\n\n    def deserialize(value)\n      if value.nil?\n        self.default_value\n      elsif value.is_a?(::String)\n        SafeJSON.load(value) rescue self.default_value\n      else\n        value\n      end\n    end\n\n    def serialize(value)\n      if value.nil?\n        self.default_value\n      else\n        SafeJSON.dump(value)\n      end\n    end\n  end\n\n  class Hash < JsonbType::WithDefault\n    def type\n      :jsonbHash\n    end\n\n    def default_value\n      {}\n    end\n\n    def enforce_type\n      ::Hash\n    end\n  end\n\n  class Array < JsonbType::WithDefault\n    def type\n      :jsonbArray\n    end\n\n    def default_value\n      []\n    end\n\n    def enforce_type\n      ::Array\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/keep_service.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass KeepService < ArvadosModel\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n  extend DbCurrentTime\n  extend CurrentApiClient\n\n  SERVER_START_TIME = db_current_time\n\n  api_accessible :user, extend: :common do |t|\n    t.add  :service_host\n    t.add  :service_port\n    t.add  :service_ssl_flag\n    t.add  :service_type\n    t.add  :read_only\n  end\n  api_accessible :superuser, :extend => :user do |t|\n  end\n\n  # return the set of keep services from the database (if this is an\n  # older installation or test system where entries have been added\n  # manually) or, preferably, the cluster config file.\n  def self.all *args\n    if super.count == 0\n      from_config\n    else\n      super\n    end\n  end\n\n  def self.where *args\n    all.where *args\n  end\n\n  protected\n\n  def permission_to_create\n    current_user.andand.is_admin\n  end\n\n  def permission_to_update\n    current_user.andand.is_admin\n  end\n\n  def self.from_config\n    config_time = connection.quote(SERVER_START_TIME)\n    owner = connection.quote(system_user_uuid)\n    values = []\n    id = 1\n    Rails.configuration.Services.Keepstore.InternalURLs.each do |url, info|\n      values << \"(#{id}, \" + quoted_column_values_from_url(url: url.to_s, rendezvous: info.Rendezvous).join(\", \") + \", 'disk', 'f'::bool, #{config_time}, #{config_time}, #{owner}, #{owner}, null)\"\n      id += 1\n    end\n    url = Rails.configuration.Services.Keepproxy.ExternalURL.to_s\n    if !url.blank?\n      values << \"(#{id}, \" + quoted_column_values_from_url(url: url, rendezvous: \"\").join(\", \") + \", 'proxy', 'f'::bool, #{config_time}, #{config_time}, #{owner}, #{owner}, null)\"\n      id += 1\n    end\n    if values.length == 0\n      # return empty set as AR relation\n      return unscoped.where('1=0')\n    else\n      sql = \"(values #{values.join(\", \")}) as keep_services (id, uuid, service_host, service_port, service_ssl_flag, service_type, read_only, created_at, modified_at, owner_uuid, modified_by_user_uuid, modified_by_client_uuid)\"\n      return unscoped.from(sql)\n    end\n  end\n\n  private\n\n  def self.quoted_column_values_from_url(url:, rendezvous:)\n    rvz = rendezvous\n    rvz = url if rvz.blank?\n    if /^[a-zA-Z0-9]{15}$/ !~ rvz\n      # If rvz is an URL (either the real service URL, or an alternate\n      # one specified in config in order to preserve rendezvous order\n      # when changing hosts/ports), hash it to get 15 alphanums.\n      rvz = Digest::MD5.hexdigest(rvz)[0..15]\n    end\n    uuid = Rails.configuration.ClusterID + \"-bi6l4-\" + rvz\n    uri = URI::parse(url)\n    [uuid, uri.host, uri.port].map { |x| connection.quote(x) } + [(uri.scheme == 'https' ? \"'t'::bool\" : \"'f'::bool\")]\n  end\n\nend\n"
  },
  {
    "path": "services/api/app/models/link.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Link < ArvadosModel\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n\n  # Posgresql JSONB columns should NOT be declared as serialized, Rails 5\n  # already know how to properly treat them.\n  attribute :properties, :jsonbHash, default: {}\n\n  validate :name_links_are_obsolete\n  validate :permission_to_attach_to_objects\n  validate :validate_published_port, :if => Proc.new { link_class == 'published_port' }\n  before_update :restrict_alter_permissions\n  before_update :apply_max_overlapping_permissions\n  before_create :apply_max_overlapping_permissions\n  after_update :delete_overlapping_permissions\n  after_update :call_update_permissions, :if => Proc.new { @need_update_permissions }\n  after_create :call_update_permissions, :if => Proc.new { @need_update_permissions }\n  before_destroy :clear_permissions\n  after_destroy :delete_overlapping_permissions\n  after_destroy :check_permissions\n  before_save :check_need_update_permissions\n\n  api_accessible :user, extend: :common do |t|\n    t.add :tail_uuid\n    t.add :link_class\n    t.add :name\n    t.add :head_uuid\n    t.add :head_kind\n    t.add :tail_kind\n    t.add :properties\n  end\n\n  PermLevel = {\n    'can_read' => 0,\n    'can_write' => 1,\n    'can_manage' => 2,\n  }\n\n  def apply_max_overlapping_permissions\n    return if self.link_class != 'permission' || !PermLevel[self.name]\n    Link.\n      lock. # select ... for update\n      where(link_class: 'permission',\n            tail_uuid: self.tail_uuid,\n            head_uuid: self.head_uuid,\n            name: PermLevel.keys).\n      where('uuid <> ?', self.uuid).each do |other|\n      if PermLevel[other.name] > PermLevel[self.name]\n        self.name = other.name\n      end\n    end\n  end\n\n  def delete_overlapping_permissions\n    return if self.link_class != 'permission'\n    redundant = nil\n    if PermLevel[self.name]\n      redundant = Link.\n                    lock. # select ... for update\n                    where(link_class: 'permission',\n                          tail_uuid: self.tail_uuid,\n                          head_uuid: self.head_uuid,\n                          name: PermLevel.keys).\n                    where('uuid <> ?', self.uuid)\n    elsif self.name == 'can_login' &&\n          self.properties.respond_to?(:has_key?) &&\n          self.properties.has_key?('username')\n      redundant = Link.\n                    lock. # select ... for update\n                    where(link_class: 'permission',\n                          tail_uuid: self.tail_uuid,\n                          head_uuid: self.head_uuid,\n                          name: 'can_login').\n                    where('properties @> ?', SafeJSON.dump({'username' => self.properties['username']})).\n                    where('uuid <> ?', self.uuid)\n    end\n    if redundant\n      redundant.each do |link|\n        link.clear_permissions\n      end\n      redundant.delete_all\n    end\n  end\n\n  def head_kind\n    if k = ArvadosModel::resource_class_for_uuid(head_uuid)\n      k.kind\n    end\n  end\n\n  def tail_kind\n    if k = ArvadosModel::resource_class_for_uuid(tail_uuid)\n      k.kind\n    end\n  end\n\n  protected\n\n  def check_readable_uuid attr, attr_value\n    if attr == 'tail_uuid' &&\n       !attr_value.nil? &&\n       self.link_class == 'permission' &&\n       attr_value[0..4] != Rails.configuration.ClusterID &&\n       ApiClientAuthorization.remote_host(uuid_prefix: attr_value[0..4]) &&\n       ArvadosModel::resource_class_for_uuid(attr_value) == User\n      # Permission link tail is a remote user (the user permissions\n      # are being granted to), so bypass the standard check that a\n      # referenced object uuid is readable by current user.\n      #\n      # We could do a call to the remote cluster to check if the user\n      # in tail_uuid exists.  This would detect copy-and-paste errors,\n      # but add another way for the request to fail, and I don't think\n      # it would improve security.  It doesn't seem to be worth the\n      # complexity tradeoff.\n      true\n    else\n      super\n    end\n  end\n\n  def permission_to_attach_to_objects\n    # Anonymous users cannot write links\n    return false if !current_user\n\n    # All users can write links that don't affect permissions\n    return true if self.link_class != 'permission'\n\n    if PERM_LEVEL[self.name].nil?\n      errors.add(:name, \"is invalid permission, must be one of 'can_read', 'can_write', 'can_manage', 'can_login'\")\n      return false\n    end\n\n    rsc_class = ArvadosModel::resource_class_for_uuid tail_uuid\n    if rsc_class == Group\n      tail_obj = Group.find_by_uuid(tail_uuid)\n      if tail_obj.nil?\n        errors.add(:tail_uuid, \"does not exist\")\n        return false\n      end\n      if tail_obj.group_class != \"role\"\n        errors.add(:tail_uuid, \"must be a user or role, was group with group_class #{tail_obj.group_class}\")\n        return false\n      end\n    elsif rsc_class != User\n      errors.add(:tail_uuid, \"must be a user or role\")\n      return false\n    end\n\n    # Administrators can grant permissions\n    return true if current_user.is_admin\n\n    head_obj = ArvadosModel.find_by_uuid(head_uuid)\n\n    if head_obj.nil?\n      errors.add(:head_uuid, \"does not exist\")\n      return false\n    end\n\n    # No permission links can be pointed to past collection versions\n    if head_obj.is_a?(Collection) && head_obj.current_version_uuid != head_uuid\n      errors.add(:head_uuid, \"cannot point to a past version of a collection\")\n      return false\n    end\n\n    # All users can grant permissions on objects they own or can manage\n    return true if current_user.can?(manage: head_obj)\n\n    # Default = deny.\n    false\n  end\n\n  def restrict_alter_permissions\n    return true if self.link_class != 'permission' && self.link_class_was != 'permission'\n\n    return true if current_user.andand.uuid == system_user.uuid\n\n    if link_class_changed? || tail_uuid_changed? || head_uuid_changed?\n      raise \"Can only alter permission link level\"\n    end\n  end\n\n  PERM_LEVEL = {\n    'can_read' => 1,\n    'can_login' => 1,\n    'can_write' => 2,\n    'can_manage' => 3,\n  }\n\n  def check_need_update_permissions\n    @need_update_permissions = self.link_class == 'permission' && (name != name_was || new_record?)\n  end\n\n  def call_update_permissions\n      update_permissions tail_uuid, head_uuid, PERM_LEVEL[name], self.uuid\n      current_user.forget_cached_group_perms\n  end\n\n  def clear_permissions\n    if self.link_class == 'permission'\n      update_permissions tail_uuid, head_uuid, REVOKE_PERM, self.uuid\n      current_user.forget_cached_group_perms\n    end\n  end\n\n  def check_permissions\n    if self.link_class == 'permission'\n      check_permissions_against_full_refresh\n    end\n  end\n\n  def name_links_are_obsolete\n    if link_class == 'name'\n      errors.add('name', 'Name links are obsolete')\n      false\n    else\n      true\n    end\n  end\n\n  def validate_published_port\n    if head_uuid.length != 27 || head_uuid[6..10] != ContainerRequest.uuid_prefix\n      errors.add('head_uuid', 'must be a container request UUID')\n    end\n  end\n\n  # A user is permitted to create, update or modify a permission link\n  # if and only if they have \"manage\" permission on the object\n  # indicated by the permission link's head_uuid.\n  #\n  # All other links are treated as regular ArvadosModel objects.\n  #\n  def ensure_owner_uuid_is_permitted\n    if link_class == 'permission'\n      ob = ArvadosModel.find_by_uuid(head_uuid)\n      raise PermissionDeniedError unless current_user.can?(manage: ob)\n      # All permission links should be owned by the system user.\n      self.owner_uuid = system_user_uuid\n      return true\n    else\n      super\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/log.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'audit_logs'\n\nclass Log < ArvadosModel\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n  serialize :properties, Hash\n  before_validation :set_default_event_at\n  after_save :send_notify\n  after_commit { AuditLogs.tidy_in_background }\n\n  api_accessible :user, extend: :common do |t|\n    t.add :id\n    t.add :object_uuid\n    t.add :object_owner_uuid\n    t.add :object_kind\n    t.add :event_at\n    t.add :event_type\n    t.add :summary\n    t.add :properties\n  end\n\n  def object_kind\n    if k = ArvadosModel::resource_class_for_uuid(object_uuid)\n      k.kind\n    end\n  end\n\n  def fill_object(thing)\n    self.object_uuid ||= thing.uuid\n    if respond_to? :object_owner_uuid=\n      # Skip this if the object_owner_uuid migration hasn't happened\n      # yet, i.e., we're in the process of migrating an old database.\n      self.object_owner_uuid = thing.owner_uuid\n    end\n    self.summary ||= \"#{self.event_type} of #{thing.uuid}\"\n    self\n  end\n\n  def fill_properties(age, etag_prop, attrs_prop)\n    self.properties.merge!({\"#{age}_etag\" => etag_prop,\n                             \"#{age}_attributes\" => attrs_prop})\n  end\n\n  def update_to(thing)\n    fill_properties('new', thing.andand.etag, thing.andand.logged_attributes)\n    case event_type\n    when \"create\"\n      self.event_at = thing.created_at\n    when \"update\"\n      self.event_at = thing.modified_at\n    when \"delete\"\n      self.event_at = db_current_time\n    end\n    self\n  end\n\n  def self.readable_by(*users_list)\n    if users_list.last.is_a? Hash\n      users_list.pop\n    end\n    if users_list.select { |u| u.is_admin }.any?\n      return self\n    end\n    user_uuids = users_list.map { |u| u.uuid }\n\n    joins(\"LEFT JOIN container_requests ON container_requests.container_uuid=logs.object_uuid\").\n      where(\"EXISTS(SELECT target_uuid FROM #{PERMISSION_VIEW} \"+\n            \"WHERE user_uuid IN (:user_uuids) AND perm_level >= 1 AND \"+\n            \"target_uuid IN (container_requests.uuid, container_requests.owner_uuid, logs.object_uuid, logs.owner_uuid, logs.object_owner_uuid))\",\n            user_uuids: user_uuids)\n  end\n\n  protected\n\n  def permission_to_create\n    true\n  end\n\n  def permission_to_update\n    current_user.andand.is_admin\n  end\n\n  alias_method :permission_to_delete, :permission_to_update\n\n  def set_default_event_at\n    self.event_at ||= db_current_time\n  end\n\n  def log_start_state\n    # don't log start state on logs\n  end\n\n  def log_change(event_type)\n    # Don't log changes to logs.\n  end\n\n  def ensure_valid_uuids\n    # logs can have references to deleted objects\n  end\n\n  def send_notify\n    ActiveRecord::Base.connection.execute \"NOTIFY logs, '#{self.id}'\"\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/trashed_group.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass TrashedGroup < ApplicationRecord\nend\n"
  },
  {
    "path": "services/api/app/models/user.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'can_be_an_owner'\n\nclass User < ArvadosModel\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n  include CanBeAnOwner\n  extend CurrentApiClient\n\n  serialize :prefs, Hash\n  has_many :api_client_authorizations\n  validates(:username,\n            format: {\n              with: /\\A[A-Za-z][A-Za-z0-9]*\\z/,\n              message: \"must begin with a letter and contain only alphanumerics\",\n            },\n            uniqueness: true,\n            allow_nil: true)\n  validates :prefs, hash_attr: true\n  validate :must_unsetup_to_deactivate\n  validate :identity_url_nil_if_empty\n  before_update :prevent_privilege_escalation\n  before_update :prevent_inactive_admin\n  before_update :prevent_nonadmin_system_root\n  after_update :setup_on_activate\n\n  before_create :check_auto_admin\n  before_validation :set_initial_username, :if => Proc.new {\n    new_record? && email\n  }\n  before_create :active_is_not_nil\n  after_create :after_ownership_change\n  after_create :setup_on_activate\n  after_create :add_system_group_permission_link\n  after_create :auto_setup_new_user, :if => Proc.new {\n    Rails.configuration.Users.AutoSetupNewUsers and\n    (uuid != system_user_uuid) and\n    (uuid != anonymous_user_uuid) and\n    (uuid[0..4] == Rails.configuration.ClusterID)\n  }\n  after_create :send_admin_notifications\n\n  before_update :before_ownership_change\n  after_update :after_ownership_change\n  after_update :send_profile_created_notification\n  before_destroy :clear_permissions\n  after_destroy :remove_self_from_permissions\n\n  has_many :authorized_keys, foreign_key: 'authorized_user_uuid', primary_key: 'uuid'\n\n  default_scope { where('redirect_to_user_uuid is null') }\n\n  api_accessible :user, extend: :common do |t|\n    t.add :email\n    t.add :username\n    t.add :full_name\n    t.add :first_name\n    t.add :last_name\n    t.add :identity_url\n    t.add :is_active\n    t.add :is_admin\n    t.add :is_invited\n    t.add :prefs\n    t.add :writable_by\n    t.add :can_write\n    t.add :can_manage\n  end\n\n  ALL_PERMISSIONS = {read: true, write: true, manage: true}\n\n  # Map numeric permission levels (see lib/create_permission_view.sql)\n  # back to read/write/manage flags.\n  PERMS_FOR_VAL =\n    [{},\n     {read: true},\n     {read: true, write: true},\n     {read: true, write: true, manage: true}]\n\n  VAL_FOR_PERM =\n    {:read => 1,\n     :write => 2,\n     :unfreeze => 3,\n     :manage => 3}\n\n\n  def full_name\n    \"#{first_name} #{last_name}\".strip\n  end\n\n  def is_invited\n    !!(self.is_active ||\n       Rails.configuration.Users.NewUsersAreActive ||\n       self.groups_i_can(:read).select { |x| x.match(/-f+$/) }.first)\n  end\n\n  def self.ignored_select_attributes\n    super + [\"full_name\", \"is_invited\"]\n  end\n\n  def groups_i_can(verb)\n    my_groups = self.group_permissions(VAL_FOR_PERM[verb]).keys\n    if verb == :read\n      my_groups << anonymous_group_uuid\n    end\n    my_groups\n  end\n\n  def can?(actions)\n    actions.each do |action, target|\n      unless target.nil?\n        if target.respond_to? :uuid\n          target_uuid = target.uuid\n        else\n          target_uuid = target\n          target = ArvadosModel.find_by_uuid(target_uuid)\n        end\n      end\n      next if target_uuid == self.uuid\n\n      if action == :write && target && !target.new_record? &&\n         target.respond_to?(:frozen_by_uuid) &&\n         target.frozen_by_uuid_was\n        # Just an optimization to skip the PERMISSION_VIEW and\n        # FrozenGroup queries below\n        return false\n      end\n\n      target_owner_uuid = target.owner_uuid if target.respond_to? :owner_uuid\n\n      user_uuids_subquery = USER_UUIDS_SUBQUERY_TEMPLATE % {user: \"$1\", perm_level: \"$3\"}\n\n      if !is_admin && !ActiveRecord::Base.connection.\n        exec_query(%{\nSELECT 1 FROM #{PERMISSION_VIEW}\n  WHERE user_uuid in (#{user_uuids_subquery}) and\n        ((target_uuid = $2 and perm_level >= $3)\n         or (target_uuid = $4 and perm_level >= $3 and traverse_owned))\n},\n                  # \"name\" arg is a query label that appears in logs:\n                   \"user_can_query\",\n                   [self.uuid,\n                    target_uuid,\n                    VAL_FOR_PERM[action],\n                    target_owner_uuid]\n                  ).any?\n        return false\n      end\n\n      if action == :write\n        if FrozenGroup.where(uuid: [target_uuid, target_owner_uuid]).any?\n          # self or parent is frozen\n          return false\n        end\n      elsif action == :unfreeze\n        # \"unfreeze\" permission means \"can write, but only if\n        # explicitly un-freezing at the same time\" (see\n        # ArvadosModel#ensure_owner_uuid_is_permitted). If the\n        # permission query above passed the permission level of\n        # :unfreeze (which is the same as :manage), and the parent\n        # isn't also frozen, then un-freeze is allowed.\n        if FrozenGroup.where(uuid: target_owner_uuid).any?\n          return false\n        end\n      end\n    end\n    true\n  end\n\n  def before_ownership_change\n    if owner_uuid_changed? and !self.owner_uuid_was.nil?\n      ComputedPermission.where(user_uuid: owner_uuid_was, target_uuid: uuid).delete_all\n      update_permissions self.owner_uuid_was, self.uuid, REVOKE_PERM\n    end\n  end\n\n  def after_ownership_change\n    if saved_change_to_owner_uuid?\n      update_permissions self.owner_uuid, self.uuid, CAN_MANAGE_PERM\n    end\n  end\n\n  def clear_permissions\n    ComputedPermission.where(\"user_uuid = ? and target_uuid != ?\", uuid, uuid).delete_all\n  end\n\n  def forget_cached_group_perms\n    @group_perms = nil\n  end\n\n  def remove_self_from_permissions\n    ComputedPermission.where(\"target_uuid = ?\", uuid).delete_all\n    check_permissions_against_full_refresh\n  end\n\n  # Return a hash of {user_uuid: group_perms}\n  #\n  # note: this does not account for permissions that a user gains by\n  # having can_manage on another user.\n  def self.all_group_permissions\n    all_perms = {}\n    ActiveRecord::Base.connection.\n      exec_query(%{\nSELECT user_uuid, target_uuid, perm_level\n                  FROM #{PERMISSION_VIEW}\n                  WHERE traverse_owned\n},\n                  # \"name\" arg is a query label that appears in logs:\n                 \"all_group_permissions\").\n      rows.each do |user_uuid, group_uuid, max_p_val|\n      all_perms[user_uuid] ||= {}\n      all_perms[user_uuid][group_uuid] = PERMS_FOR_VAL[max_p_val.to_i]\n    end\n    all_perms\n  end\n\n  # Return a hash of {group_uuid: perm_hash} where perm_hash[:read]\n  # and perm_hash[:write] are true if this user can read and write\n  # objects owned by group_uuid.\n  def group_permissions(level=1)\n    @group_perms ||= {}\n    if @group_perms.empty?\n      user_uuids_subquery = USER_UUIDS_SUBQUERY_TEMPLATE % {user: \"$1\", perm_level: 1}\n\n      ActiveRecord::Base.connection.\n        exec_query(%{\nSELECT target_uuid, perm_level\n  FROM #{PERMISSION_VIEW}\n  WHERE user_uuid in (#{user_uuids_subquery}) and perm_level >= 1\n},\n                   # \"name\" arg is a query label that appears in logs:\n                   \"User.group_permissions\",\n                   # \"binds\" arg is an array of [col_id, value] for '$1' vars:\n                   [uuid]).\n        rows.each do |group_uuid, max_p_val|\n        @group_perms[group_uuid] = PERMS_FOR_VAL[max_p_val.to_i]\n      end\n    end\n\n    case level\n    when 1\n      @group_perms\n    when 2\n      @group_perms.select {|k,v| v[:write] }\n    when 3\n      @group_perms.select {|k,v| v[:manage] }\n    else\n      raise \"level must be 1, 2 or 3\"\n    end\n  end\n\n  # create links\n  def setup(vm_uuid: nil, send_notification_email: nil)\n    newly_invited = Link.where(tail_uuid: self.uuid,\n                              head_uuid: all_users_group_uuid,\n                              link_class: 'permission').empty?\n\n    # Add can_read link from this user to \"all users\" which makes this\n    # user \"invited\", and (depending on config) a link in the opposite\n    # direction which makes this user visible to other users.\n    group_perms = add_to_all_users_group\n\n    # Add virtual machine\n    if vm_uuid.nil? and !Rails.configuration.Users.AutoSetupNewUsersWithVmUUID.empty?\n      vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID\n    end\n\n    vm_login_perm = if vm_uuid && username\n                      create_vm_login_permission_link(vm_uuid, username)\n                    end\n\n    # Send welcome email\n    if send_notification_email.nil?\n      send_notification_email = Rails.configuration.Users.SendUserSetupNotificationEmail\n    end\n\n    if newly_invited and send_notification_email and !Rails.configuration.Users.UserSetupMailText.empty?\n      begin\n        UserNotifier.account_is_setup(self).deliver_now\n      rescue => e\n        logger.warn \"Failed to send email to #{self.email}: #{e}\"\n      end\n    end\n\n    forget_cached_group_perms\n\n    return [vm_login_perm, *group_perms, self].compact\n  end\n\n  # delete user signatures, login, and vm perms, and mark as inactive\n  def unsetup\n    if self.uuid == system_user_uuid\n      raise \"System root user cannot be deactivated\"\n    end\n\n    # delete oid_login_perms for this user\n    #\n    # note: these permission links are obsolete anyway: they have no\n    # effect on anything and they are not created for new users.\n    Link.where(tail_uuid: self.email,\n               link_class: 'permission',\n               name: 'can_login').destroy_all\n\n    # Delete all sharing permissions so (a) the user doesn't\n    # automatically regain access to anything if re-setup in future,\n    # (b) the user doesn't appear in \"currently shared with\" lists\n    # shown to other users.\n    #\n    # Notably this includes the can_read -> \"all users\" group\n    # permission.\n    Link.where(tail_uuid: self.uuid,\n               link_class: 'permission').destroy_all\n\n    # delete any signatures by this user\n    Link.where(link_class: 'signature',\n               tail_uuid: self.uuid).destroy_all\n\n    # delete tokens for this user\n    ApiClientAuthorization.where(user_id: self.id).destroy_all\n    # delete ssh keys for this user\n    AuthorizedKey.where(owner_uuid: self.uuid).destroy_all\n    AuthorizedKey.where(authorized_user_uuid: self.uuid).destroy_all\n\n    # delete user preferences (including profile)\n    self.prefs = {}\n\n    # mark the user as inactive\n    self.is_admin = false  # can't be admin and inactive\n    self.is_active = false\n    forget_cached_group_perms\n    self.save!\n  end\n\n  # Called from ArvadosModel\n  def set_default_owner\n    self.owner_uuid = system_user_uuid\n  end\n\n  def must_unsetup_to_deactivate\n    if !self.new_record? &&\n       self.uuid[0..4] == Rails.configuration.Login.LoginCluster &&\n       self.uuid[0..4] != Rails.configuration.ClusterID\n      # OK to update our local record to whatever the LoginCluster\n      # reports, because self-activate is not allowed.\n      return\n    elsif self.is_active_changed? &&\n       self.is_active_was &&\n       !self.is_active\n\n      # When a user is set up, they are added to the \"All users\"\n      # group.  A user that is part of the \"All users\" group is\n      # allowed to self-activate.\n      #\n      # It doesn't make sense to deactivate a user (set is_active =\n      # false) without first removing them from the \"All users\" group,\n      # because they would be able to immediately reactivate\n      # themselves.\n      #\n      # The 'unsetup' method removes the user from the \"All users\"\n      # group (and also sets is_active = false) so send a message\n      # explaining the correct way to deactivate a user.\n      #\n      if Link.where(tail_uuid: self.uuid,\n                    head_uuid: all_users_group_uuid,\n                    link_class: 'permission').any?\n        errors.add :is_active, \"cannot be set to false directly, use the 'Deactivate' button on Workbench, or the 'unsetup' API call\"\n      end\n    end\n  end\n\n  def set_initial_username(requested: false)\n    if new_record? and requested == false and self.username != nil and self.username != \"\"\n      requested = self.username\n    end\n\n    if (!requested.is_a?(String) || requested.empty?) and email\n      email_parts = email.partition(\"@\")\n      local_parts = email_parts.first.partition(\"+\")\n      if email_parts.any?(&:empty?)\n        return\n      elsif not local_parts.first.empty?\n        requested = local_parts.first\n      else\n        requested = email_parts.first\n      end\n    end\n    if requested\n      requested.sub!(/^[^A-Za-z]+/, \"\")\n      requested.gsub!(/[^A-Za-z0-9]/, \"\")\n    end\n    unless !requested || requested.empty?\n      self.username = find_usable_username_from(requested)\n    end\n  end\n\n  def active_is_not_nil\n    self.is_active = false if self.is_active.nil?\n    self.is_admin = false if self.is_admin.nil?\n  end\n\n  # Move this user's (i.e., self's) owned items to new_owner_uuid and\n  # new_user_uuid (for things normally owned directly by the user).\n  #\n  # If redirect_auth is true, also reassign auth tokens and ssh keys,\n  # and redirect this account to redirect_to_user_uuid, i.e., when a\n  # caller authenticates to this account in the future, the account\n  # redirect_to_user_uuid account will be used instead.\n  #\n  # current_user must have admin privileges, i.e., the caller is\n  # responsible for checking permission to do this.\n  def merge(new_owner_uuid:, new_user_uuid:, redirect_to_new_user:)\n    raise PermissionDeniedError if !current_user.andand.is_admin\n    raise \"Missing new_owner_uuid\" if !new_owner_uuid\n    raise \"Missing new_user_uuid\" if !new_user_uuid\n    transaction(requires_new: true) do\n      reload\n      raise \"cannot merge an already merged user\" if self.redirect_to_user_uuid\n\n      new_user = User.where(uuid: new_user_uuid).first\n      raise \"user does not exist\" if !new_user\n      raise \"cannot merge to an already merged user\" if new_user.redirect_to_user_uuid\n\n      self.clear_permissions\n      new_user.clear_permissions\n\n      # If 'self' is a remote user, don't transfer authorizations\n      # (i.e. ability to access the account) to the new user, because\n      # that gives the remote site the ability to access the 'new'\n      # user account that takes over the 'self' account.\n      #\n      # If 'self' is a local user, it is okay to transfer\n      # authorizations, even if the 'new' user is a remote account,\n      # because the remote site does not gain the ability to access an\n      # account it could not before.\n\n      if redirect_to_new_user and self.uuid[0..4] == Rails.configuration.ClusterID\n        # Existing API tokens and ssh keys are updated to authenticate\n        # to the new user.\n        ApiClientAuthorization.\n          where(user_id: id).\n          update_all(user_id: new_user.id)\n\n        user_updates = [\n          [AuthorizedKey, :owner_uuid],\n          [AuthorizedKey, :authorized_user_uuid],\n          [Link, :owner_uuid],\n          [Link, :tail_uuid],\n          [Link, :head_uuid],\n        ]\n      else\n        # Destroy API tokens and ssh keys associated with the old\n        # user.\n        ApiClientAuthorization.where(user_id: id).destroy_all\n        AuthorizedKey.where(owner_uuid: uuid).destroy_all\n        AuthorizedKey.where(authorized_user_uuid: uuid).destroy_all\n        user_updates = [\n          [Link, :owner_uuid],\n          [Link, :tail_uuid]\n        ]\n      end\n\n      # References to the old user UUID in the context of a user ID\n      # (rather than a \"home project\" in the project hierarchy) are\n      # updated to point to the new user.\n      user_updates.each do |klass, column|\n        klass.where(column => uuid).update_all(column => new_user.uuid)\n      end\n\n      # References to the merged user's \"home project\" are updated to\n      # point to new_owner_uuid.\n      ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|\n        next if [ApiClientAuthorization,\n                 AuthorizedKey,\n                 Link,\n                 Log].include?(klass)\n        next if !klass.columns.collect(&:name).include?('owner_uuid')\n        klass.where(owner_uuid: uuid).update_all(owner_uuid: new_owner_uuid)\n      end\n\n      if redirect_to_new_user\n        update!(redirect_to_user_uuid: new_user.uuid, username: nil)\n      end\n      skip_check_permissions_against_full_refresh do\n        update_permissions self.uuid, self.uuid, CAN_MANAGE_PERM, nil, true\n        update_permissions new_user.uuid, new_user.uuid, CAN_MANAGE_PERM, nil, true\n        update_permissions new_user.owner_uuid, new_user.uuid, CAN_MANAGE_PERM, nil, true\n      end\n      update_permissions self.owner_uuid, self.uuid, CAN_MANAGE_PERM, nil, true\n    end\n  end\n\n  def redirects_to\n    user = self\n    redirects = 0\n    while (uuid = user.redirect_to_user_uuid)\n      break if uuid.empty?\n      nextuser = User.unscoped.find_by_uuid(uuid)\n      if !nextuser\n        raise Exception.new(\"user uuid #{user.uuid} redirects to nonexistent uuid '#{uuid}'\")\n      end\n      user = nextuser\n      redirects += 1\n      if redirects > 15\n        raise \"Starting from #{self.uuid} redirect_to_user_uuid exceeded maximum number of redirects\"\n      end\n    end\n    user\n  end\n\n  def self.register info\n    # login info expected fields, all can be optional but at minimum\n    # must supply either 'identity_url' or 'email'\n    #\n    #   email\n    #   first_name\n    #   last_name\n    #   username\n    #   alternate_emails\n    #   identity_url\n\n    primary_user = nil\n\n    # local database\n    identity_url = info['identity_url']\n\n    if identity_url && identity_url.length > 0\n      # Only local users can create sessions, hence uuid_like_pattern\n      # here.\n      user = User.unscoped.where('identity_url = ? and uuid like ?',\n                                 identity_url,\n                                 User.uuid_like_pattern).first\n      primary_user = user.redirects_to if user\n    end\n\n    if !primary_user\n      # identity url is unset or didn't find matching record.\n      emails = [info['email']] + (info['alternate_emails'] || [])\n      emails.select! {|em| !em.nil? && !em.empty?}\n\n      User.unscoped.where('email in (?) and uuid like ?',\n                          emails,\n                          User.uuid_like_pattern).each do |user|\n        if !primary_user\n          primary_user = user.redirects_to\n        elsif primary_user.uuid != user.redirects_to.uuid\n          raise \"Ambiguous email address, directs to both #{primary_user.uuid} and #{user.redirects_to.uuid}\"\n        end\n      end\n    end\n\n    if !primary_user\n      # New user registration\n      primary_user = User.new(:owner_uuid => system_user_uuid,\n                              :is_admin => false,\n                              :is_active => Rails.configuration.Users.NewUsersAreActive)\n\n      primary_user.set_initial_username(requested: info['username']) if info['username'] && !info['username'].blank?\n      primary_user.identity_url = info['identity_url'] if identity_url\n    end\n\n    primary_user.email = info['email'] if info['email']\n    primary_user.first_name = info['first_name'] if info['first_name']\n    primary_user.last_name = info['last_name'] if info['last_name']\n\n    if (!primary_user.email or primary_user.email.empty?) and (!primary_user.identity_url or primary_user.identity_url.empty?)\n      raise \"Must have supply at least one of 'email' or 'identity_url' to User.register\"\n    end\n\n    act_as_system_user do\n      primary_user.save!\n    end\n\n    primary_user\n  end\n\n  def self.update_remote_user remote_user\n    remote_user = remote_user.symbolize_keys\n    remote_user_prefix = remote_user[:uuid][0..4]\n\n    # interaction between is_invited and is_active\n    #\n    # either can flag can be nil, true or false\n    #\n    # in all cases, we create the user if they don't exist.\n    #\n    # invited nil, active nil: don't call setup or unsetup.\n    #\n    # invited nil, active false: call unsetup\n    #\n    # invited nil, active true: call setup and activate them.\n    #\n    #\n    # invited false, active nil: call unsetup\n    #\n    # invited false, active false: call unsetup\n    #\n    # invited false, active true: call unsetup\n    #\n    #\n    # invited true, active nil: call setup but don't change is_active\n    #\n    # invited true, active false: call setup but don't change is_active\n    #\n    # invited true, active true: call setup and activate them.\n\n    should_setup = (remote_user_prefix == Rails.configuration.Login.LoginCluster or\n                    Rails.configuration.Users.AutoSetupNewUsers or\n                    Rails.configuration.Users.NewUsersAreActive or\n                    Rails.configuration.RemoteClusters[remote_user_prefix].andand[\"ActivateUsers\"])\n\n    should_activate = (remote_user_prefix == Rails.configuration.Login.LoginCluster or\n                       Rails.configuration.Users.NewUsersAreActive or\n                       Rails.configuration.RemoteClusters[remote_user_prefix].andand[\"ActivateUsers\"])\n\n    remote_should_be_unsetup = (remote_user[:is_invited] == nil && remote_user[:is_active] == false) ||\n                               (remote_user[:is_invited] == false)\n\n    remote_should_be_setup = should_setup && (\n      (remote_user[:is_invited] == nil && remote_user[:is_active] == true) ||\n      (remote_user[:is_invited] == false && remote_user[:is_active] == true) ||\n      (remote_user[:is_invited] == true))\n\n    remote_should_be_active = should_activate && remote_user[:is_invited] != false && remote_user[:is_active] == true\n\n    # Make sure blank username is nil\n    remote_user[:username] = nil if remote_user[:username] == \"\"\n\n    begin\n      user = User.create_with(email: remote_user[:email],\n                              username: remote_user[:username],\n                              first_name: remote_user[:first_name],\n                              last_name: remote_user[:last_name],\n                              is_active: remote_should_be_active,\n                             ).find_or_create_by(uuid: remote_user[:uuid])\n    rescue ActiveRecord::RecordNotUnique\n      retry\n    end\n\n    user.with_lock do\n      needupdate = {}\n      [:email, :username, :first_name, :last_name, :prefs].each do |k|\n        v = remote_user[k]\n        if !v.nil? && user.send(k) != v\n          needupdate[k] = v\n        end\n      end\n\n      user.email = needupdate[:email] if needupdate[:email]\n\n      loginCluster = Rails.configuration.Login.LoginCluster\n      if user.username.nil? || user.username == \"\"\n        # Don't have a username yet, try to set one\n        initial_username = user.set_initial_username(requested: remote_user[:username])\n        needupdate[:username] = initial_username if !initial_username.nil?\n      elsif remote_user_prefix != loginCluster\n        # Upstream is not login cluster, don't try to change the\n        # username once set.\n        needupdate.delete :username\n      end\n\n      if needupdate.length > 0\n        begin\n          user.update!(needupdate)\n        rescue ActiveRecord::RecordInvalid\n          if remote_user_prefix == loginCluster && !needupdate[:username].nil?\n            local_user = User.find_by_username(needupdate[:username])\n            # The username of this record conflicts with an existing,\n            # different user record.  This can happen because the\n            # username changed upstream on the login cluster, or\n            # because we're federated with another cluster with a user\n            # by the same username.  The login cluster is the source\n            # of truth, so change the username on the conflicting\n            # record and retry the update operation.\n            if local_user.uuid != user.uuid\n              new_username = \"#{needupdate[:username]}#{rand(99999999)}\"\n              Rails.logger.warn(\"cached username '#{needupdate[:username]}' collision with user '#{local_user.uuid}' - renaming to '#{new_username}' before retrying\")\n              local_user.update!({username: new_username})\n              retry\n            end\n          end\n          raise # Not the issue we're handling above\n        end\n      elsif user.new_record?\n        begin\n          user.save!\n        rescue => e\n          Rails.logger.debug \"Error saving user record: #{$!}\"\n          Rails.logger.debug \"Backtrace:\\n\\t#{e.backtrace.join(\"\\n\\t\")}\"\n          raise\n        end\n      end\n\n      if remote_should_be_unsetup\n        # Remote user is not \"invited\" or \"active\" state on their home\n        # cluster, so they should be unsetup, which also makes them\n        # inactive.\n        user.unsetup\n      else\n        if !user.is_invited && remote_should_be_setup\n          user.setup\n        end\n\n        if !user.is_active && remote_should_be_active\n          # remote user is active and invited, we need to activate them\n          user.update!(is_active: true)\n        end\n\n        if remote_user_prefix == Rails.configuration.Login.LoginCluster and\n          user.is_active and\n          !remote_user[:is_admin].nil? and\n          user.is_admin != remote_user[:is_admin]\n          # Remote cluster controls our user database, including the\n          # admin flag.\n          user.update!(is_admin: remote_user[:is_admin])\n        end\n      end\n    end\n    user\n  end\n\n  protected\n\n  def self.attributes_required_columns\n    super.merge(\n                'can_write' => ['owner_uuid', 'uuid'],\n                'can_manage' => ['owner_uuid', 'uuid'],\n                'full_name' => ['first_name', 'last_name'],\n                )\n  end\n\n  def change_all_uuid_refs(old_uuid:, new_uuid:)\n    ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|\n      klass.columns.each do |col|\n        if col.name.end_with?('_uuid')\n          column = col.name.to_sym\n          klass.where(column => old_uuid).update_all(column => new_uuid)\n        end\n      end\n    end\n  end\n\n  def ensure_ownership_path_leads_to_user\n    true\n  end\n\n  def permission_to_update\n    if username_changed? || redirect_to_user_uuid_changed? || email_changed?\n      current_user.andand.is_admin\n    else\n      # users must be able to update themselves (even if they are\n      # inactive) in order to create sessions\n      self == current_user or super\n    end\n  end\n\n  def permission_to_create\n    current_user.andand.is_admin or\n      (self == current_user &&\n       self.redirect_to_user_uuid.nil? &&\n       self.is_active == Rails.configuration.Users.NewUsersAreActive)\n  end\n\n  def check_auto_admin\n    return if self.uuid.end_with?('anonymouspublic')\n    if (User.where(\"email = ?\",self.email).where(:is_admin => true).count == 0 and\n        !Rails.configuration.Users.AutoAdminUserWithEmail.empty? and self.email == Rails.configuration.Users[\"AutoAdminUserWithEmail\"]) or\n       (User.where(\"uuid not like '%-000000000000000'\").where(:is_admin => true).count == 0 and\n        Rails.configuration.Users.AutoAdminFirstUser)\n      self.is_admin = true\n      self.is_active = true\n    end\n  end\n\n  def find_usable_username_from(basename)\n    # If \"basename\" is a usable username, return that.\n    # Otherwise, find a unique username \"basenameN\", where N is the\n    # smallest integer greater than 1, and return that.\n    # Return nil if a unique username can't be found after reasonable\n    # searching.\n    quoted_name = self.class.connection.quote_string(basename)\n    next_username = basename\n    next_suffix = 1\n    while Rails.configuration.Users.AutoSetupUsernameBlacklist[next_username]\n      next_suffix += 1\n      next_username = \"%s%i\" % [basename, next_suffix]\n    end\n    0.upto(6).each do |suffix_len|\n      pattern = \"%s%s\" % [quoted_name, \"_\" * suffix_len]\n      self.class.unscoped.\n          where(\"username like '#{pattern}'\").\n          select(:username).\n          order('username asc').\n          each do |other_user|\n        if other_user.username > next_username\n          break\n        elsif other_user.username == next_username\n          next_suffix += 1\n          next_username = \"%s%i\" % [basename, next_suffix]\n        end\n      end\n      return next_username if (next_username.size <= pattern.size)\n    end\n    nil\n  end\n\n  def prevent_privilege_escalation\n    if current_user.andand.is_admin\n      return true\n    end\n    if self.is_active_changed?\n      if self.is_active != self.is_active_was\n        logger.warn \"User #{current_user.uuid} tried to change is_active from #{self.is_active_was} to #{self.is_active} for #{self.uuid}\"\n        self.is_active = self.is_active_was\n      end\n    end\n    if self.is_admin_changed?\n      if self.is_admin != self.is_admin_was\n        logger.warn \"User #{current_user.uuid} tried to change is_admin from #{self.is_admin_was} to #{self.is_admin} for #{self.uuid}\"\n        self.is_admin = self.is_admin_was\n      end\n    end\n    true\n  end\n\n  def prevent_inactive_admin\n    if self.is_admin and not self.is_active\n      # There is no known use case for the strange set of permissions\n      # that would result from this change. It's safest to assume it's\n      # a mistake and disallow it outright.\n      raise \"Admin users cannot be inactive\"\n    end\n    true\n  end\n\n  def prevent_nonadmin_system_root\n    if self.uuid == system_user_uuid and self.is_admin_changed? and !self.is_admin\n      raise \"System root user cannot be non-admin\"\n    end\n    true\n  end\n\n  def search_permissions(start, graph, merged={}, upstream_mask=nil, upstream_path={})\n    nextpaths = graph[start]\n    return merged if !nextpaths\n    return merged if upstream_path.has_key? start\n    upstream_path[start] = true\n    upstream_mask ||= ALL_PERMISSIONS\n    nextpaths.each do |head, mask|\n      merged[head] ||= {}\n      mask.each do |k,v|\n        merged[head][k] ||= v if upstream_mask[k]\n      end\n      search_permissions(head, graph, merged, upstream_mask.select { |k,v| v && merged[head][k] }, upstream_path)\n    end\n    upstream_path.delete start\n    merged\n  end\n\n  # create login permission for the given vm_uuid, if it does not already exist\n  def create_vm_login_permission_link(vm_uuid, username)\n    # vm uuid is optional\n    return if vm_uuid == \"\"\n\n    vm = VirtualMachine.where(uuid: vm_uuid).first\n    if !vm\n      logger.warn \"Could not find virtual machine for #{vm_uuid.inspect}\"\n      raise \"No vm found for #{vm_uuid}\"\n    end\n\n    logger.info { \"vm uuid: \" + vm[:uuid] }\n    login_attrs = {\n      tail_uuid: uuid, head_uuid: vm.uuid,\n      link_class: \"permission\", name: \"can_login\",\n    }\n\n    login_perm = Link.\n      where(login_attrs).\n      select { |link| link.properties[\"username\"] == username }.\n      first\n\n    login_perm ||= Link.\n      create(login_attrs.merge(properties: {\"username\" => username}))\n\n    logger.info { \"login permission: \" + login_perm[:uuid] }\n    login_perm\n  end\n\n  def add_to_all_users_group\n    resp = [Link.where(tail_uuid: self.uuid,\n                       head_uuid: all_users_group_uuid,\n                       link_class: 'permission',\n                       name: 'can_write').first ||\n            Link.create(tail_uuid: self.uuid,\n                        head_uuid: all_users_group_uuid,\n                        link_class: 'permission',\n                        name: 'can_write')]\n    if Rails.configuration.Users.ActivatedUsersAreVisibleToOthers\n      resp += [Link.where(tail_uuid: all_users_group_uuid,\n                          head_uuid: self.uuid,\n                          link_class: 'permission',\n                          name: 'can_read').first ||\n               Link.create(tail_uuid: all_users_group_uuid,\n                           head_uuid: self.uuid,\n                           link_class: 'permission',\n                           name: 'can_read')]\n    end\n    return resp\n  end\n\n  # Give the special \"System group\" permission to manage this user and\n  # all of this user's stuff.\n  def add_system_group_permission_link\n    return true if uuid == system_user_uuid\n    act_as_system_user do\n      Link.create(link_class: 'permission',\n                  name: 'can_manage',\n                  tail_uuid: system_group_uuid,\n                  head_uuid: self.uuid)\n    end\n  end\n\n  # Send admin notifications\n  def send_admin_notifications\n    if self.is_invited then\n      AdminNotifier.new_user(self).deliver_now\n    else\n      AdminNotifier.new_inactive_user(self).deliver_now\n    end\n  end\n\n  # Automatically setup if is_active flag turns on\n  def setup_on_activate\n    return if [system_user_uuid, anonymous_user_uuid].include?(self.uuid)\n    if is_active &&\n      (new_record? || saved_change_to_is_active? || will_save_change_to_is_active?)\n      setup\n    end\n  end\n\n  # Automatically setup new user during creation\n  def auto_setup_new_user\n    setup\n  end\n\n  # Send notification if the user saved profile for the first time\n  def send_profile_created_notification\n    if saved_change_to_prefs?\n      if prefs_before_last_save.andand.empty? || !prefs_before_last_save.andand['profile']\n        profile_notification_address = Rails.configuration.Users.UserProfileNotificationAddress\n        ProfileNotifier.profile_created(self, profile_notification_address).deliver_now if profile_notification_address and !profile_notification_address.empty?\n      end\n    end\n  end\n\n  def identity_url_nil_if_empty\n    if identity_url == \"\"\n      self.identity_url = nil\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/user_agreement.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass UserAgreement < Collection\n  # This class exists so that Arvados::V1::SchemaController includes\n  # UserAgreementsController's methods in the discovery document.\nend\n"
  },
  {
    "path": "services/api/app/models/virtual_machine.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass VirtualMachine < ArvadosModel\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n\n  has_many(:login_permissions,\n           -> { where(\"link_class = 'permission' and name = 'can_login'\") },\n           foreign_key: 'head_uuid',\n           class_name: 'Link',\n           primary_key: 'uuid')\n\n  api_accessible :user, extend: :common do |t|\n    t.add :hostname\n  end\n\n  protected\n\n  def permission_to_create\n    current_user and current_user.is_admin\n  end\n  def permission_to_update\n    current_user and current_user.is_admin\n  end\nend\n"
  },
  {
    "path": "services/api/app/models/workflow.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass Workflow < ArvadosModel\n  include HasUuid\n  include KindAndEtag\n  include CommonApiTemplate\n\n  validate :validate_definition\n  validate :validate_collection_uuid\n  before_save :set_name_and_description\n  before_save :link_with_collection\n\n  api_accessible :user, extend: :common do |t|\n    t.add :name\n    t.add :description\n    t.add :definition\n    t.add :collection_uuid\n  end\n\n  def validate_definition\n    begin\n      @definition_yaml = YAML.safe_load self.definition if !definition.nil?\n    rescue => e\n      errors.add :definition, \"is not valid yaml: #{e.message}\"\n    end\n  end\n\n  def validate_collection_uuid\n    return if !collection_uuid_changed?\n\n    # This block, and all references to\n    # $enable_workflow_collection_linking_for_tests, will be removed\n    # in 23057 when workflow->collection linking is enabled\n    # permanently.\n    if !$enable_workflow_collection_linking_for_tests\n      if !collection_uuid.nil?\n        errors.add :collection_uuid, \"must be null\"\n      end\n      return\n    end\n\n    c = Collection.\n          readable_by(current_user).\n          find_by_uuid(collection_uuid)\n    if !c\n      errors.add :collection_uuid, \"does not exist or do not have permission to read.\"\n    end\n\n    if c.properties[\"type\"] != \"workflow\"\n      errors.add :collection_uuid, \"properties does not have type: workflow\"\n    end\n  end\n\n  def set_name_and_description\n    old_wf = {}\n    begin\n      old_wf = YAML.safe_load self.definition_was if !self.definition_was.nil?\n    rescue => e\n      logger.warn \"set_name_and_description error: #{e.message}\"\n      return\n    end\n\n    ['name', 'description'].each do |a|\n      if !self.changes.include?(a)\n        v = self.read_attribute(a)\n        if !v.present? or v == old_wf[a]\n          val = @definition_yaml[a] if self.definition and @definition_yaml\n          self[a] = val\n        end\n      end\n    end\n  end\n\n  def self.full_text_searchable_columns\n    super - [\"definition\", \"collection_uuid\"]\n  end\n\n  def link_with_collection\n    return if collection_uuid.nil? || !collection_uuid_changed?\n    Collection.find_by_uuid(collection_uuid).update_linked_workflows([self], false)\n  end\n\n  def self.readable_by(*users_list)\n    return super if users_list.select { |u| u.is_a?(User) && u.is_admin }.any?\n    super.where(collection_uuid: nil).or(where(Collection.readable_by(*users_list).where(\"collections.uuid = workflows.collection_uuid\").arel.exists))\n  end\n\nend\n"
  },
  {
    "path": "services/api/app/views/admin_notifier/new_inactive_user.text.erb",
    "content": "<%# Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 %>\n\nA new user has been created, but not set up.\n\n  <%= @user.full_name %> <<%= @user.email %>> (<%= @user.username %>)\n\nThey will not be able to use Arvados unless set up by an admin.\n\n<% if Rails.configuration.Services.Workbench1.ExternalURL -%>\nPlease see Workbench for more information:\n\n  <%= URI::join(Rails.configuration.Services.Workbench1.ExternalURL, \"user/#{@user.uuid}\") %>\n\n<% end -%>\nThanks,\nYour friendly Arvados robot.\n"
  },
  {
    "path": "services/api/app/views/admin_notifier/new_user.text.erb",
    "content": "<%# Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 %>\n\nA new user has been created and set up.\n\n  <%= @user.full_name %> <<%= @user.email %>> (<%= @user.username %>)\n\nThey are able to use Arvados.\n\n<% if Rails.configuration.Services.Workbench1.ExternalURL -%>\nPlease see Workbench for more information:\n\n  <%= URI::join(Rails.configuration.Services.Workbench1.ExternalURL, \"user/#{@user.uuid}\") %>\n\n<% end -%>\nThanks,\nYour friendly Arvados robot.\n"
  },
  {
    "path": "services/api/app/views/layouts/application.html.erb",
    "content": "<%# Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 %>\n\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n  <title>Arvados API Server (<%= Rails.configuration.ClusterID %>)</title>\n  <%= stylesheet_link_tag    \"application\" %>\n  <%= javascript_include_tag \"application\" %>\n  <%= csrf_meta_tags %>\n</head>\n<body>\n<div id=\"header\">\n  <div class=\"apptitle\">ARVADOS</div>\n  <div>(<%= Rails.configuration.ClusterID %>)</div>\n  <div style=\"float:right\">\n    <% if current_user %>\n    <%= current_user.full_name %>\n    <% if current_user.is_admin %>\n    &nbsp;&bull;&nbsp;\n    <a class=\"logout\" href=\"/admin/users\">Admin</a>\n    <% end %>\n    &nbsp;&bull;&nbsp;\n    <a class=\"logout\" href=\"/logout\">Log out</a>\n    <% end %>\n\n    <% if current_user and session[:real_uid] and session[:switch_back_to] and User.find(session[:real_uid].to_i).verify_userswitch_cookie(session[:switch_back_to]) %>\n    &nbsp;&bull;&nbsp;\n    <span class=\"sudo-warning\">Logged in as <b><%= current_user.full_name %></b>. <%= link_to \"Back to #{User.find(session[:real_uid]).full_name}\", switch_to_user_path(session[:real_uid]), :method => :post, :class => 'sudo-logout' %></span>\n    <% end %>\n  </div>\n</div>\n\n\n<%= yield %>\n\n<div style=\"clear:both\"></div>\n\n<% if current_user or session['invite_code'] %>\n<div id=\"footer\">\n  <div style=\"clear:both\"></div>\n</div>\n<% end %>\n\n</body>\n</html>\n"
  },
  {
    "path": "services/api/app/views/profile_notifier/profile_created.text.erb",
    "content": "<%# Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 %>\n\nProfile created by user <%=@user.full_name%> <%=@user.email%>\nUser's profile: <%=@user.prefs['profile']%>\n"
  },
  {
    "path": "services/api/app/views/static/intro.html.erb",
    "content": "<%# Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 %>\n\n<% content_for :js do %>\n$(function(){\n  $('button.login').button().click(function(){window.location=$(this).attr('href')});\n});\n<% end %>\n<div id=\"intropage\">\n  <img class=\"arvados-logo\" src=\"<%= asset_path('logo.png') %>\" style=\"display:block; margin:2em auto\" alt=\"arvados logo\"/>\n  <div style=\"width:30em; margin:2em auto 0 auto\">\n    <h1>Welcome</h1>\n    <h4>ARVADOS</h4>\n\n    <% if !current_user and session['invite_code'] %>\n\n    <p>Arvados lets you manage and process biomedical data.</p>\n    <p style=\"float:right;margin-top:1em\">\n      <button class=\"login\" href=\"/login\">Log in and get started</button>\n    </p>\n\n    <% else %>\n\n    <% if !current_user %>\n    <p style=\"float:right;margin-top:1em\">\n      <a href=\"/login\">Log in here.</a>\n    </p>\n    <% end %>\n\n    <% end %>\n\n    <div style=\"clear:both;height:8em\"></div>\n  </div>\n</div>\n"
  },
  {
    "path": "services/api/app/views/static/login_failure.html.erb",
    "content": "<%# Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 %>\n\n<% content_for :js do %>\n$(function(){\n  $('button.login').button().click(function(){window.location=$(this).attr('href')});\n});\n<% end %>\n\n\n<div id=\"intropage\">\n  <img class=\"arvados-logo\" src=\"<%= asset_path('logo.png') rescue '/logo.png' %>\" style=\"display:block; margin:2em auto\" alt=\"arvados logo\" />\n  <div style=\"width:30em; margin:2em auto 0 auto\">\n\n    <h1>Error</h1>\n\n    <p>Sorry, something went wrong logging you in. Please try again.</p>\n\n    <!--<p style=\"float:right;margin-top:1em\">\n      <a href=\"/login\">Log in here.</a>\n    </p>-->\n\n    <div style=\"clear:both;height:8em\"></div>\n  </div>\n</div>\n"
  },
  {
    "path": "services/api/app/views/user_notifier/account_is_setup.text.erb",
    "content": "<%# Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 %>\n\n<%= ERB.new(Rails.configuration.Users.UserSetupMailText, trim_mode: \"-\").result(binding) %>\n"
  },
  {
    "path": "services/api/app/views/user_sessions/create.html.erb",
    "content": "<%# Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 %>\n\n<div style=\"width:40em; margin:2em auto 0 auto\">\n  <h1>Login redirect</h1>\n  <p>This login is linked to federated user <b><%= @user.email %></b> (<b><%= @user.uuid %></b>) on cluster <b><%= @user.uuid[0..4] %></b>.  You need to log in again on that cluster.</p>\n  <p>After logging in, you will be returned to this cluster (<b><%=Rails.configuration.ClusterID%></b>).</p>\n  <div style=\"width: 100%\">\n    <div style=\"float: left\"><a href=\"<%=@remotehomeurl%>\">Click here log in on cluster <%= @user.uuid[0..4] %>.</a></div>\n    <div style=\"float: right\"><a href=\"/logout\">Cancel</a></div>\n  </div>\n</div>\n"
  },
  {
    "path": "services/api/app/views/user_sessions/failure.html.erb",
    "content": "<%# Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 %>\n\n<h1>Fail</h1>\n\n<%= notice %>\n\n<br/>\n<a href=\"/login\">Retry Login</a>\n"
  },
  {
    "path": "services/api/arvados-railsapi.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=Arvados API server Rails backend\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nAssertPathExists=/etc/arvados/config.yml\nStartLimitIntervalSec=0\n\n[Install]\nWantedBy=multi-user.target\n\n[Service]\n# It would be nicer to write a Type=simple process, but then Passenger\n# duplicates a lot of logs to stdout.\nType=forking\nPIDFile=%t/%N/passenger.pid\nEnvironmentFile=-/etc/arvados/environment\n# Passenger web server settings come from (highest precedence first):\n# 1. Command line options\n# 2. PASSENGER_* environment variables\n# 3. /var/www/arvados-api/current/Passengerfile.json\n# You can change or add settings for this unit by running\n# `systemctl edit arvados-railsapi.service`.\n# Refer to the Passenger standalone configuration reference at\n# <https://www.phusionpassenger.com/library/config/standalone/reference>\n# for more information about options.\nEnvironment=PASSENGER_ADDRESS=localhost\nEnvironment=PASSENGER_ENVIRONMENT=production\nEnvironment=PASSENGER_LOG_FILE=log/production.log\nEnvironment=PASSENGER_PORT=8004\nWorkingDirectory=/var/www/arvados-api/current\nExecStartPre=+/bin/install -d log tmp\nExecStartPre=+/bin/chmod g+srwx log tmp\nExecStartPre=+-/bin/chmod g+rw ${PASSENGER_LOG_FILE}\n# Note that `bundle exec` lines should have overrides from the package that\n# use specific versions of `bundle` and `passenger`.\nExecStart=/usr/bin/bundle exec passenger start --daemonize --pid-file %t/%N/passenger.pid\nExecStop=/usr/bin/bundle exec passenger stop --pid-file %t/%N/passenger.pid\nExecReload=/usr/bin/bundle exec passenger-config reopen-logs\nRestart=always\nRestartSec=1\n\nReadWritePaths=/var/www/arvados-api/current/log\nReadWritePaths=/var/www/arvados-api/current/tmp\nReadWritePaths=/var/www/arvados-api/shared/log\nRuntimeDirectory=%N\n\nDynamicUser=true\nPrivateTmp=true\nProtectControlGroups=true\nProtectHome=true\nProtectSystem=strict\n\nLockPersonality=true\nNoNewPrivileges=true\nMemoryDenyWriteExecute=true\nPrivateDevices=true\nProtectKernelModules=true\nProtectKernelTunables=true\nRestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 AF_NETLINK\nRestrictNamespaces=true\nRestrictRealtime=true\nSystemCallFilter=@system-service\n"
  },
  {
    "path": "services/api/bin/bundle",
    "content": "#!/usr/bin/env ruby\n\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nENV['BUNDLE_GEMFILE'] ||= File.expand_path('../Gemfile', __dir__)\nload Gem.bin_path('bundler', 'bundle')\n"
  },
  {
    "path": "services/api/bin/rails",
    "content": "#!/usr/bin/env ruby\nAPP_PATH = File.expand_path(\"../config/application\", __dir__)\nrequire_relative \"../config/boot\"\nrequire \"rails/commands\"\n"
  },
  {
    "path": "services/api/bin/rake",
    "content": "#!/usr/bin/env ruby\nrequire_relative \"../config/boot\"\nrequire \"rake\"\nRake.application.run\n"
  },
  {
    "path": "services/api/bin/setup",
    "content": "#!/usr/bin/env ruby\nrequire \"fileutils\"\n\n# path to your application root.\nAPP_ROOT = File.expand_path(\"..\", __dir__)\n\ndef system!(*args)\n  system(*args) || abort(\"\\n== Command #{args} failed ==\")\nend\n\nFileUtils.chdir APP_ROOT do\n  # This script is a way to set up or update your development environment automatically.\n  # This script is idempotent, so that you can run it at any time and get an expectable outcome.\n  # Add necessary setup steps to this file.\n\n  puts \"== Installing dependencies ==\"\n  system! \"gem install bundler --conservative\"\n  system(\"bundle check\") || system!(\"bundle install\")\n\n  # puts \"\\n== Copying sample files ==\"\n  # unless File.exist?(\"config/database.yml\")\n  #   FileUtils.cp \"config/database.yml.sample\", \"config/database.yml\"\n  # end\n\n  puts \"\\n== Preparing database ==\"\n  system! \"bin/rails db:prepare\"\n\n  puts \"\\n== Removing old logs and tempfiles ==\"\n  system! \"bin/rails log:clear tmp:clear\"\n\n  puts \"\\n== Restarting application server ==\"\n  system! \"bin/rails restart\"\nend\n"
  },
  {
    "path": "services/api/bin/update",
    "content": "#!/usr/bin/env ruby\n\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'fileutils'\ninclude FileUtils\n\n# path to your application root.\nAPP_ROOT = File.expand_path('..', __dir__)\n\ndef system!(*args)\n  system(*args) || abort(\"\\n== Command #{args} failed ==\")\nend\n\nchdir APP_ROOT do\n  # This script is a way to update your development environment automatically.\n  # Add necessary update steps to this file.\n\n  puts '== Installing dependencies =='\n  system! 'gem install bundler --conservative'\n  system('bundle check') || system!('bundle install')\n\n  puts \"\\n== Updating database ==\"\n  system! 'bin/rails db:migrate'\n\n  puts \"\\n== Removing old logs and tempfiles ==\"\n  system! 'bin/rails log:clear tmp:clear'\n\n  puts \"\\n== Restarting application server ==\"\n  system! 'bin/rails restart'\nend\n"
  },
  {
    "path": "services/api/config/application.default.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Do not use this file for site configuration. Create application.yml\n# instead (see application.yml.example).\n#\n# The order of precedence is:\n# 1. config/environments/{RAILS_ENV}.rb (deprecated)\n# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)\n# 3. Section in application.yml called \"common\"\n# 4. Section in application.default.yml corresponding to RAILS_ENV\n# 5. Section in application.default.yml called \"common\"\n\ncommon:\n\n  ## Set Time.zone default to the specified zone and make Active\n  ## Record auto-convert to this zone.  Run \"rake -D time\" for a list\n  ## of tasks for finding time zone names. Default is UTC.\n  #time_zone: Central Time (US & Canada)\n\n  ## Default encoding used in templates for Ruby 1.9.\n  encoding: utf-8\n\n  # Enable the asset pipeline\n  assets.enabled: true\n\n  # Version of your assets, change this if you want to expire all your assets\n  assets.version: \"1.0\"\n\n  default_openid_prefix: https://www.google.com/accounts/o8/id\n\n  # Override the automatic version string. With the default value of\n  # false, the version string is read from git-commit.version in\n  # Rails.root (included in vendor packages) or determined by invoking\n  # \"git log\".\n  source_version: false\n\n  # Override the automatic package version string. With the default version of\n  # false, the package version is read from package-build.version in Rails.root\n  # (included in vendor packages).\n  package_version: false\n\ndevelopment:\n  force_ssl: false\n  cache_classes: false\n  whiny_nils: true\n  consider_all_requests_local: true\n  action_controller.perform_caching: false\n  action_mailer.raise_delivery_errors: false\n  action_mailer.perform_deliveries: false\n  active_support.deprecation: :log\n  action_dispatch.best_standards_support: :builtin\n  active_record.auto_explain_threshold_in_seconds: 0.5\n  assets.compress: false\n  assets.debug: true\n\nproduction:\n  force_ssl: true\n  cache_classes: true\n  consider_all_requests_local: false\n  action_controller.perform_caching: true\n  public_file_server:\n    enabled: false\n  assets.compress: true\n  assets.compile: false\n  assets.digest: true\n\ntest:\n  force_ssl: false\n  cache_classes: true\n  whiny_nils: true\n  consider_all_requests_local: true\n  action_controller.perform_caching: false\n  action_dispatch.show_exceptions: false\n  action_controller.allow_forgery_protection: false\n  action_mailer.delivery_method: :test\n  active_support.deprecation: :stderr\n"
  },
  {
    "path": "services/api/config/application.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire_relative \"boot\"\n\nrequire \"rails\"\n# Pick the frameworks you want:\nrequire \"active_model/railtie\"\nrequire \"active_job/railtie\"\nrequire \"active_record/railtie\"\n# require \"active_storage/engine\"\nrequire \"action_controller/railtie\"\nrequire \"action_mailer/railtie\"\n# require \"action_mailbox/engine\"\n# require \"action_text/engine\"\nrequire \"action_view/railtie\"\n# require \"action_cable/engine\"\nrequire \"sprockets/railtie\"\nrequire \"rails/test_unit/railtie\"\n\n# Require the gems listed in Gemfile, including any gems\n# you've limited to :test, :development, or :production.\nBundler.require(*Rails.groups)\n\nif ENV[\"ARVADOS_RAILS_LOG_TO_STDOUT\"]\n  Rails.logger = ActiveSupport::TaggedLogging.new(Logger.new(STDOUT))\nend\n\nmodule Server\n  class Application < Rails::Application\n\n    require_relative \"arvados_config.rb\"\n\n    # Initialize configuration defaults for specified Rails version.\n    config.load_defaults 7.0\n\n    # Configuration for the application, engines, and railties goes here.\n    #\n    # These settings can be overridden in specific environments using the files\n    # in config/environments, which are processed later.\n    #\n    # config.time_zone = \"Central Time (US & Canada)\"\n    # config.eager_load_paths << Rails.root.join(\"extras\")\n\n    # We use db/structure.sql instead of db/schema.rb.\n    config.active_record.schema_format = :sql\n\n    config.eager_load = true\n\n    config.active_support.test_order = :sorted\n\n    # container_request records can contain arbitrary data structures\n    # in mounts.*.content, so rails must not munge them.\n    config.action_dispatch.perform_deep_munge = false\n\n    # force_ssl's redirect-to-https feature doesn't work when the\n    # client supplies a port number, and prevents arvados-controller\n    # from connecting to Rails internally via plain http.\n    config.ssl_options = {redirect: false}\n\n    # This will change to 7.0 in a future release when there is no\n    # longer a possibility of rolling back to Arvados 2.7 (Rails 5.2)\n    # which cannot read 7.0-format cache files.\n    config.active_support.cache_format_version = 6.1\n\n    # Before using the filesystem backend for Rails.cache, check\n    # whether we own the relevant directory. If we don't, using it is\n    # likely to either fail or (if we're root) pollute it and cause\n    # other processes to fail later.\n    default_cache_path = Rails.root.join('tmp', 'cache')\n    if not File.owned?(default_cache_path)\n      if File.exist?(default_cache_path)\n        why = \"owner (uid=#{File::Stat.new(default_cache_path).uid}) \" +\n          \"is not me (uid=#{Process.euid})\"\n      else\n        why = \"does not exist\"\n      end\n      STDERR.puts(\"Defaulting to memory cache, \" +\n                  \"because #{default_cache_path} #{why}\")\n      config.cache_store = :memory_store\n    else\n      require Rails.root.join('lib/safer_file_store')\n      config.cache_store = ::SaferFileStore.new(default_cache_path)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/config/application.yml.example",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Copy this file to application.yml and edit to suit.\n#\n# Consult application.default.yml for the full list of configuration\n# settings.\n#\n# The order of precedence is:\n# 1. config/environments/{RAILS_ENV}.rb (deprecated)\n# 2. Section in application.yml corresponding to RAILS_ENV (e.g., development)\n# 3. Section in application.yml called \"common\"\n# 4. Section in application.default.yml corresponding to RAILS_ENV\n# 5. Section in application.default.yml called \"common\"\n\nproduction:\n  # Mandatory site configuration.  See application.default.yml and\n  # http://http://doc.arvados.org/install/install-api-server.html#configure_application\n  # for more information.\n  uuid_prefix: ~\n  secret_token: ~\n  blob_signing_key: ~\n  sso_app_secret: ~\n  sso_app_id: ~\n  sso_provider_url: ~\n  workbench_address: ~\n  websocket_address: ~\n  #git_repositories_dir: ~\n  #git_internal_dir: ~\n\ndevelopment:\n  # Separate settings for development configuration.\n  uuid_prefix: ~\n  secret_token: ~\n  blob_signing_key: ~\n  sso_app_id: ~\n  sso_app_secret: ~\n  sso_provider_url: ~\n  workbench_address: ~\n  websocket_address: ~\n  #git_repositories_dir: ~\n  #git_internal_dir: ~\n\ntest:\n  # Tests should be able to run without further configuration, but if you do\n  # want to change your local test configuration, this is where to do it.\n\ncommon:\n  # Settings in this section will be used in all environments\n  # (development, production, test) except when overridden in the\n  # environment-specific sections above.\n"
  },
  {
    "path": "services/api/config/arvados_config.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n#\n# Load Arvados configuration from /etc/arvados/config.yml, using defaults\n# from config.default.yml\n#\n# Existing application.yml is migrated into the new config structure.\n# Keys in the legacy application.yml take precedence.\n#\n# Use \"bundle exec config:dump\" to get the complete active configuration\n#\n# Use \"bundle exec config:migrate\" to migrate application.yml and\n# database.yml to config.yml.  After adding the output of\n# config:migrate to /etc/arvados/config.yml, you will be able to\n# delete application.yml and database.yml.\n\nrequire \"cgi\"\nrequire 'config_loader'\nrequire 'open3'\n\nbegin\n  # If secret_token.rb exists here, we need to load it first.\n  require_relative 'secret_token.rb'\nrescue LoadError\n  # Normally secret_token.rb is missing and the secret token is\n  # configured by application.yml (i.e., here!) instead.\nend\n\n# Load the defaults, used by config:migrate and fallback loading\n# legacy application.yml\nload_time = Time.now.utc\ndefaultYAML, stderr, status = Open3.capture3(\"arvados-server\", \"config-dump\", \"-config=-\", \"-skip-legacy\", stdin_data: \"Clusters: {xxxxx: {}}\")\nif !status.success?\n  puts stderr\n  raise \"error loading config: #{status}\"\nend\nconfs = YAML.safe_load(defaultYAML)\nclusterID, clusterConfig = confs[\"Clusters\"].first\n$arvados_config_defaults = clusterConfig\n$arvados_config_defaults[\"ClusterID\"] = clusterID\n$arvados_config_defaults[\"SourceTimestamp\"] = Time.rfc3339(confs[\"SourceTimestamp\"])\n$arvados_config_defaults[\"SourceSHA256\"] = confs[\"SourceSHA256\"]\n\nif ENV[\"ARVADOS_CONFIG\"] == \"none\"\n  # Don't load config. This magic value is set by packaging scripts so\n  # they can run \"rake assets:precompile\" without a real config.\n  $arvados_config_global = $arvados_config_defaults.deep_dup\nelse\n  # Load the global config file\n  Open3.popen2(\"arvados-server\", \"config-dump\", \"-skip-legacy\") do |stdin, stdout, status_thread|\n    confs = YAML.safe_load(stdout)\n    if confs && !confs.empty?\n      # config-dump merges defaults with user configuration, so every\n      # key should be set.\n      clusterID, clusterConfig = confs[\"Clusters\"].first\n      $arvados_config_global = clusterConfig\n      $arvados_config_global[\"ClusterID\"] = clusterID\n      $arvados_config_global[\"SourceTimestamp\"] = Time.rfc3339(confs[\"SourceTimestamp\"])\n      $arvados_config_global[\"SourceSHA256\"] = confs[\"SourceSHA256\"]\n    else\n      # config-dump failed, assume we will be loading from legacy\n      # application.yml, initialize with defaults.\n      $arvados_config_global = $arvados_config_defaults.deep_dup\n    end\n  end\nend\n\n# Now make a copy\n$arvados_config = $arvados_config_global.deep_dup\n$arvados_config[\"LoadTimestamp\"] = load_time\n\ndef arrayToHash cfg, k, v\n  val = {}\n  v.each do |entry|\n    val[entry.to_s] = {}\n  end\n  ConfigLoader.set_cfg cfg, k, val\nend\n\n# Declare all our configuration items.\narvcfg = ConfigLoader.new\narvcfg.declare_config \"ClusterID\", NonemptyString, :uuid_prefix\narvcfg.declare_config \"ManagementToken\", String, :ManagementToken\narvcfg.declare_config \"SystemRootToken\", String\narvcfg.declare_config \"API.DisabledAPIs\", Hash, :disable_api_methods, ->(cfg, k, v) { arrayToHash cfg, \"API.DisabledAPIs\", v }\narvcfg.declare_config \"API.MaxRequestSize\", Integer, :max_request_size\narvcfg.declare_config \"API.MaxIndexDatabaseRead\", Integer, :max_index_database_read\narvcfg.declare_config \"API.MaxItemsPerResponse\", Integer, :max_items_per_response\narvcfg.declare_config \"API.MaxTokenLifetime\", ActiveSupport::Duration\narvcfg.declare_config \"API.RequestTimeout\", ActiveSupport::Duration\narvcfg.declare_config \"API.AsyncPermissionsUpdateInterval\", ActiveSupport::Duration, :async_permissions_update_interval\narvcfg.declare_config \"Users.AutoSetupNewUsers\", Boolean, :auto_setup_new_users\narvcfg.declare_config \"Users.AutoSetupNewUsersWithVmUUID\", String, :auto_setup_new_users_with_vm_uuid\narvcfg.declare_config \"Users.AutoSetupUsernameBlacklist\", Hash, :auto_setup_name_blacklist, ->(cfg, k, v) { arrayToHash cfg, \"Users.AutoSetupUsernameBlacklist\", v }\narvcfg.declare_config \"Users.NewUsersAreActive\", Boolean, :new_users_are_active\narvcfg.declare_config \"Users.AutoAdminUserWithEmail\", String, :auto_admin_user\narvcfg.declare_config \"Users.AutoAdminFirstUser\", Boolean, :auto_admin_first_user\narvcfg.declare_config \"Users.UserProfileNotificationAddress\", String, :user_profile_notification_address\narvcfg.declare_config \"Users.AdminNotifierEmailFrom\", String, :admin_notifier_email_from\narvcfg.declare_config \"Users.EmailSubjectPrefix\", String, :email_subject_prefix\narvcfg.declare_config \"Users.UserNotifierEmailFrom\", String, :user_notifier_email_from\narvcfg.declare_config \"Users.UserNotifierEmailBcc\", Hash\narvcfg.declare_config \"Users.NewUserNotificationRecipients\", Hash, :new_user_notification_recipients, ->(cfg, k, v) { arrayToHash cfg, \"Users.NewUserNotificationRecipients\", v }\narvcfg.declare_config \"Users.NewInactiveUserNotificationRecipients\", Hash, :new_inactive_user_notification_recipients, method(:arrayToHash)\narvcfg.declare_config \"Users.CanCreateRoleGroups\", Boolean\narvcfg.declare_config \"Users.RoleGroupsVisibleToAll\", Boolean\narvcfg.declare_config \"Login.LoginCluster\", String\narvcfg.declare_config \"Login.TrustedClients\", Hash\narvcfg.declare_config \"Login.RemoteTokenRefresh\", ActiveSupport::Duration\narvcfg.declare_config \"Login.TokenLifetime\", ActiveSupport::Duration\narvcfg.declare_config \"TLS.Insecure\", Boolean, :sso_insecure\narvcfg.declare_config \"AuditLogs.MaxAge\", ActiveSupport::Duration, :max_audit_log_age\narvcfg.declare_config \"AuditLogs.MaxDeleteBatch\", Integer, :max_audit_log_delete_batch\narvcfg.declare_config \"AuditLogs.UnloggedAttributes\", Hash, :unlogged_attributes, ->(cfg, k, v) { arrayToHash cfg, \"AuditLogs.UnloggedAttributes\", v }\narvcfg.declare_config \"SystemLogs.MaxRequestLogParamsSize\", Integer, :max_request_log_params_size\narvcfg.declare_config \"Collections.DefaultReplication\", Integer, :default_collection_replication\narvcfg.declare_config \"Collections.DefaultTrashLifetime\", ActiveSupport::Duration, :default_trash_lifetime\narvcfg.declare_config \"Collections.CollectionVersioning\", Boolean, :collection_versioning\narvcfg.declare_config \"Collections.PreserveVersionIfIdle\", ActiveSupport::Duration, :preserve_version_if_idle\narvcfg.declare_config \"Collections.TrashSweepInterval\", ActiveSupport::Duration, :trash_sweep_interval\narvcfg.declare_config \"Collections.BlobSigningKey\", String, :blob_signing_key\narvcfg.declare_config \"Collections.BlobSigningTTL\", ActiveSupport::Duration, :blob_signature_ttl\narvcfg.declare_config \"Collections.BlobSigning\", Boolean, :permit_create_collection_with_unsigned_manifest, ->(cfg, k, v) { ConfigLoader.set_cfg cfg, \"Collections.BlobSigning\", !v }\narvcfg.declare_config \"Collections.ForwardSlashNameSubstitution\", String\narvcfg.declare_config \"Containers.SupportedDockerImageFormats\", Hash, :docker_image_formats, ->(cfg, k, v) { arrayToHash cfg, \"Containers.SupportedDockerImageFormats\", v }\narvcfg.declare_config \"Containers.LogReuseDecisions\", Boolean, :log_reuse_decisions\narvcfg.declare_config \"Containers.DefaultKeepCacheRAM\", Integer, :container_default_keep_cache_ram\narvcfg.declare_config \"Containers.MaxDispatchAttempts\", Integer, :max_container_dispatch_attempts\narvcfg.declare_config \"Containers.MaxRetryAttempts\", Integer, :container_count_max\narvcfg.declare_config \"Containers.AlwaysUsePreemptibleInstances\", Boolean, :preemptible_instances\narvcfg.declare_config \"Containers.Logging.LogUpdatePeriod\", ActiveSupport::Duration, :crunch_log_update_period\narvcfg.declare_config \"Containers.Logging.LogUpdateSize\", Integer, :crunch_log_update_size\narvcfg.declare_config \"Services.ContainerWebServices.ExternalURL\", URI\narvcfg.declare_config \"Services.ContainerWebServices.ExternalPortMin\", Integer\narvcfg.declare_config \"Services.ContainerWebServices.ExternalPortMax\", Integer\narvcfg.declare_config \"Services.Controller.ExternalURL\", URI\narvcfg.declare_config \"Services.Workbench1.ExternalURL\", URI, :workbench_address\narvcfg.declare_config \"Services.Websocket.ExternalURL\", URI, :websocket_address\narvcfg.declare_config \"Services.WebDAV.ExternalURL\", URI, :keep_web_service_url\narvcfg.declare_config \"RemoteClusters\", Hash, :remote_hosts, ->(cfg, k, v) {\n  h = if cfg[\"RemoteClusters\"] then\n        cfg[\"RemoteClusters\"].deep_dup\n      else\n        {}\n      end\n  v.each do |clusterid, host|\n    if h[clusterid].nil?\n      h[clusterid] = {\n        \"Host\" => host,\n        \"Proxy\" => true,\n        \"Scheme\" => \"https\",\n        \"Insecure\" => false,\n        \"ActivateUsers\" => false\n      }\n    end\n  end\n  ConfigLoader.set_cfg cfg, \"RemoteClusters\", h\n}\narvcfg.declare_config \"RemoteClusters.*.Proxy\", Boolean, :remote_hosts_via_dns\narvcfg.declare_config \"StorageClasses\", Hash\n\ndbcfg = ConfigLoader.new\n\ndbcfg.declare_config \"PostgreSQL.ConnectionPool\", Integer, :pool\ndbcfg.declare_config \"PostgreSQL.Connection.host\", String, :host\ndbcfg.declare_config \"PostgreSQL.Connection.port\", String, :port\ndbcfg.declare_config \"PostgreSQL.Connection.user\", String, :username\ndbcfg.declare_config \"PostgreSQL.Connection.password\", String, :password\ndbcfg.declare_config \"PostgreSQL.Connection.dbname\", String, :database\ndbcfg.declare_config \"PostgreSQL.Connection.template\", String, :template\ndbcfg.declare_config \"PostgreSQL.Connection.encoding\", String, :encoding\ndbcfg.declare_config \"PostgreSQL.Connection.collation\", String, :collation\n\napplication_config = {}\n%w(application.default application).each do |cfgfile|\n  path = \"#{::Rails.root.to_s}/config/#{cfgfile}.yml\"\n  confs = ConfigLoader.load(path, erb: true)\n  # Ignore empty YAML file:\n  next if confs == nil\n  application_config.deep_merge!(confs['common'] || {})\n  application_config.deep_merge!(confs[::Rails.env.to_s] || {})\nend\n\ndb_config = {}\npath = \"#{::Rails.root.to_s}/config/database.yml\"\nif !ENV['ARVADOS_CONFIG_NOLEGACY'] && File.exist?(path)\n  db_config = ConfigLoader.load(path, erb: true)\nend\n\n$remaining_config = arvcfg.migrate_config(application_config, $arvados_config)\ndbcfg.migrate_config(db_config[::Rails.env.to_s] || {}, $arvados_config)\n\nif application_config[:auto_activate_users_from]\n  application_config[:auto_activate_users_from].each do |cluster|\n    if $arvados_config.RemoteClusters[cluster]\n      $arvados_config.RemoteClusters[cluster][\"ActivateUsers\"] = true\n    end\n  end\nend\n\nif application_config[:host] || application_config[:port] || application_config[:scheme]\n  if !application_config[:host] || application_config[:host].empty?\n    raise \"Must set 'host' when setting 'port' or 'scheme'\"\n  end\n  $arvados_config.Services[\"Controller\"][\"ExternalURL\"] = URI((application_config[:scheme] || \"https\")+\"://\"+application_config[:host]+\n                                                              (if application_config[:port] then \":#{application_config[:port]}\" else \"\" end))\nend\n\n# Checks for wrongly typed configuration items, coerces properties\n# into correct types (such as Duration), and optionally raise error\n# for essential configuration that can't be empty.\narvcfg.coercion_and_check $arvados_config_defaults, check_nonempty: false\narvcfg.coercion_and_check $arvados_config_global, check_nonempty: false\narvcfg.coercion_and_check $arvados_config, check_nonempty: true\ndbcfg.coercion_and_check $arvados_config, check_nonempty: true\n\n# * $arvados_config_defaults is the defaults\n# * $arvados_config_global is $arvados_config_defaults merged with the contents of /etc/arvados/config.yml\n# These are used by the rake config: tasks\n#\n# * $arvados_config is $arvados_config_global merged with the migrated contents of application.yml\n# This is what actually gets copied into the Rails configuration object.\n\nif $arvados_config[\"Collections\"][\"DefaultTrashLifetime\"] < 86400.seconds then\n  raise \"default_trash_lifetime is %d, must be at least 86400\" % Rails.configuration.Collections.DefaultTrashLifetime\nend\n\ndefault_storage_classes = []\n$arvados_config[\"StorageClasses\"].each do |cls, cfg|\n  if cfg[\"Default\"]\n    default_storage_classes << cls\n  end\nend\nif default_storage_classes.length == 0\n  default_storage_classes = [\"default\"]\nend\n$arvados_config[\"DefaultStorageClasses\"] = default_storage_classes.sort\n\nif ::Rails.env.to_s == \"test\"\n  # Use template0 when creating a new database. Avoids\n  # character-encoding/collation problems.\n  $arvados_config[\"PostgreSQL\"][\"Connection\"][\"template\"] = \"template0\"\n  # Some test cases depend on en_US.UTF-8 collation.\n  $arvados_config[\"PostgreSQL\"][\"Connection\"][\"collation\"] = \"en_US.UTF-8\"\nend\n\nif ENV[\"ARVADOS_CONFIG\"] == \"none\"\n  # We need the postgresql connection URI to be valid, even if we\n  # don't use it.\n  $arvados_config[\"PostgreSQL\"][\"Connection\"][\"host\"] = \"localhost\"\n  $arvados_config[\"PostgreSQL\"][\"Connection\"][\"user\"] = \"x\"\n  $arvados_config[\"PostgreSQL\"][\"Connection\"][\"password\"] = \"x\"\n  $arvados_config[\"PostgreSQL\"][\"Connection\"][\"dbname\"] = \"x\"\nend\n\nif $arvados_config[\"PostgreSQL\"][\"Connection\"][\"password\"].empty?\n  raise \"Database password is empty, PostgreSQL section is: #{$arvados_config[\"PostgreSQL\"]}\"\nend\n\ndbhost = $arvados_config[\"PostgreSQL\"][\"Connection\"][\"host\"]\nif $arvados_config[\"PostgreSQL\"][\"Connection\"][\"port\"] != 0\n  dbhost += \":#{$arvados_config[\"PostgreSQL\"][\"Connection\"][\"port\"]}\"\nend\n\n#\n# If DATABASE_URL is set, then ActiveRecord won't error out if database.yml doesn't exist.\n#\n# For config migration, we've previously populated the PostgreSQL\n# section of the config from database.yml\n#\ndatabase_url = \"postgresql://#{CGI.escape $arvados_config[\"PostgreSQL\"][\"Connection\"][\"user\"]}:\"+\n                      \"#{CGI.escape $arvados_config[\"PostgreSQL\"][\"Connection\"][\"password\"]}@\"+\n                      \"#{dbhost}/#{CGI.escape $arvados_config[\"PostgreSQL\"][\"Connection\"][\"dbname\"]}?\"+\n                      \"template=#{CGI.escape $arvados_config[\"PostgreSQL\"][\"Connection\"][\"template\"].to_s}&\"+\n                      \"encoding=#{CGI.escape $arvados_config[\"PostgreSQL\"][\"Connection\"][\"client_encoding\"].to_s}&\"+\n                      \"collation=#{CGI.escape $arvados_config[\"PostgreSQL\"][\"Connection\"][\"collation\"].to_s}&\"+\n                      \"pool=#{$arvados_config[\"PostgreSQL\"][\"ConnectionPool\"]}\"\n\nENV[\"DATABASE_URL\"] = database_url\n\nServer::Application.configure do\n  # Copy into the Rails config object.  This also turns Hash into\n  # OrderedOptions so that application code can use\n  # Rails.configuration.API.Blah instead of\n  # Rails.configuration.API[\"Blah\"]\n  ConfigLoader.copy_into_config $arvados_config, config\n  ConfigLoader.copy_into_config $remaining_config, config\n\n  # We don't rely on cookies for authentication, so instead of\n  # requiring a signing key in config, we assign a new random one at\n  # startup.\n  credentials.secret_key_base = rand(1<<255).to_s(36)\nend\n"
  },
  {
    "path": "services/api/config/boot.rb",
    "content": "ENV[\"BUNDLE_GEMFILE\"] ||= File.expand_path(\"../Gemfile\", __dir__)\n\n# Setting an environment variable before loading rack is the only way\n# to change rack's request size limit for an urlencoded POST body.\n# Rack::QueryParser accepts an initialization argument to override the\n# default, but rack only ever uses its global default_parser, and\n# there is no facility for overriding that at runtime.\n#\n# Our strategy is to rely on the more configurable downstream servers\n# (Nginx and arvados-controller) to reject oversized requests before\n# they hit this server at all.\nENV[\"RACK_QUERY_PARSER_BYTESIZE_LIMIT\"] = (4 << 30).to_s\n\nrequire \"bundler/setup\" # Set up gems listed in the Gemfile.\n"
  },
  {
    "path": "services/api/config/cable.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ndevelopment:\n  adapter: async\n\ntest:\n  adapter: async\n\nproduction:\n  adapter: redis\n  url: redis://localhost:6379/1\n"
  },
  {
    "path": "services/api/config/database.yml.example",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ndevelopment:\n  adapter: postgresql\n  template: template0\n  encoding: utf8\n  database: arvados_development\n  username: arvados\n  password: xxxxxxxx\n  host: localhost\n\ntest:\n  adapter: postgresql\n  template: template0\n  encoding: utf8\n  collation: en_US.utf8\n  database: arvados_test\n  username: arvados\n  password: xxxxxxxx\n  host: localhost\n\nproduction:\n  adapter: postgresql\n  template: template0\n  encoding: utf8\n  database: arvados_production\n  username: arvados\n  password: xxxxxxxx\n  host: localhost\n  pool: 50\n"
  },
  {
    "path": "services/api/config/environment.rb",
    "content": "# Load the Rails application.\nrequire_relative \"application\"\n\n# Initialize the Rails application.\nRails.application.initialize!\n"
  },
  {
    "path": "services/api/config/environments/development.rb.example",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nRails.application.configure do\n  # Settings specified here will take precedence over those in config/application.rb\n\n  # In the development environment your application's code is reloaded on\n  # every request.  This slows down response time but is perfect for development\n  # since you don't have to restart the web server when you make code changes.\n  config.cache_classes = false\n\n  # Log error messages when you accidentally call methods on nil.\n  config.whiny_nils = true\n\n  # Show full error reports and disable caching\n  config.consider_all_requests_local       = true\n  config.action_controller.perform_caching = false\n\n  # Don't care if the mailer can't send\n  config.action_mailer.raise_delivery_errors = false\n  config.action_mailer.perform_deliveries = false\n\n  # Print deprecation notices to the Rails logger\n  config.active_support.deprecation = :log\n\n  # Only use best-standards-support built into browsers\n  config.action_dispatch.best_standards_support = :builtin\n\n  # Log the query plan for queries taking more than this (works\n  # with SQLite, MySQL, and PostgreSQL)\n  config.active_record.auto_explain_threshold_in_seconds = 0.5\n\n  # Do not compress assets\n  config.assets.compress = false\n\n  # Expands the lines which load the assets\n  config.assets.debug = true\n\n  config.force_ssl = false\n\nend\n"
  },
  {
    "path": "services/api/config/environments/production.rb.example",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nRails.application.configure do\n  # Settings specified here will take precedence over those in config/application.rb\n\n  # Code is not reloaded between requests\n  config.cache_classes = true\n\n  # Full error reports are disabled and caching is turned on\n  config.consider_all_requests_local       = false\n  config.action_controller.perform_caching = true\n\n  # Disable Rails's static asset server (Apache or nginx will already do this)\n  config.public_file_server.enabled = false\n\n  # Compress JavaScripts and CSS\n  config.assets.compress = true\n\n  # Don't fallback to assets pipeline if a precompiled asset is missed\n  config.assets.compile = false\n\n  # Generate digests for assets URLs\n  config.assets.digest = true\n\n  # Defaults to Rails.root.join(\"public/assets\")\n  # config.assets.manifest = YOUR_PATH\n\n  # Specifies the header that your server uses for sending files\n  # config.action_dispatch.x_sendfile_header = \"X-Sendfile\" # for apache\n  # config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for nginx\n\n  # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies.\n  # config.force_ssl = true\n\n  # See everything in the log (default is :info)\n  # config.log_level = :debug\n\n  # Use a different logger for distributed setups\n  # config.logger = SyslogLogger.new\n\n  # Use a different cache store in production\n  # config.cache_store = :mem_cache_store\n\n  # Enable serving of images, stylesheets, and JavaScripts from an asset server\n  # config.action_controller.asset_host = \"http://assets.example.com\"\n\n  # Precompile additional assets (application.js, application.css, and all non-JS/CSS are already added)\n  # config.assets.precompile += %w( search.js )\n\n  # Disable delivery errors, bad email addresses will be ignored\n  # config.action_mailer.raise_delivery_errors = false\n  # config.action_mailer.perform_deliveries = true\n\n  # Enable threaded mode\n  # config.threadsafe!\n\n  # Enable locale fallbacks for I18n (makes lookups for any locale fall back to\n  # the I18n.default_locale when a translation can not be found)\n  config.i18n.fallbacks = true\n\n  # Send deprecation notices to registered listeners\n  config.active_support.deprecation = :notify\n\n  config.log_level = :info\nend\n"
  },
  {
    "path": "services/api/config/environments/test.rb.example",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nRails.application.configure do\n  # Settings specified here will take precedence over those in config/application.rb\n\n  # The test environment is used exclusively to run your application's\n  # test suite.  You never need to work with it otherwise.  Remember that\n  # your test database is \"scratch space\" for the test suite and is wiped\n  # and recreated between test runs.  Don't rely on the data there!\n  config.cache_classes = true\n\n  # Configure static asset server for tests with Cache-Control for performance\n  config.public_file_server.enabled = true\n  config.public_file_server.headers = { 'Cache-Control' => 'public, max-age=3600' }\n\n  # Log error messages when you accidentally call methods on nil\n  config.whiny_nils = true\n\n  # Show full error reports and disable caching\n  config.consider_all_requests_local       = true\n  config.action_controller.perform_caching = false\n\n  # Raise exceptions instead of rendering exception templates\n  config.action_dispatch.show_exceptions = false\n\n  # Disable request forgery protection in test environment\n  config.action_controller.allow_forgery_protection    = false\n\n  # Tell Action Mailer not to deliver emails to the real world.\n  # The :test delivery method accumulates sent emails in the\n  # ActionMailer::Base.deliveries array.\n  config.action_mailer.delivery_method = :test\n\n  # Use SQL instead of Active Record's schema dumper when creating the test database.\n  # This is necessary if your schema can't be completely dumped by the schema dumper,\n  # like if you have constraints or database-specific column types\n  # config.active_record.schema_format = :sql\n\n  # Print deprecation notices to the stderr\n  config.active_support.deprecation = :stderr\n\n  # No need for SSL while testing\n  config.force_ssl = false\n\n  # I18n likes to warn when this variable is not set\n  I18n.enforce_available_locales = true\n\nend\n"
  },
  {
    "path": "services/api/config/initializers/andand.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'andand'\n"
  },
  {
    "path": "services/api/config/initializers/app_version.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'app_version'\n"
  },
  {
    "path": "services/api/config/initializers/application_controller_renderer.rb",
    "content": "# Be sure to restart your server when you modify this file.\n\n# ActiveSupport::Reloader.to_prepare do\n#   ApplicationController.renderer.defaults.merge!(\n#     http_host: 'example.org',\n#     https: false\n#   )\n# end\n"
  },
  {
    "path": "services/api/config/initializers/assets.rb",
    "content": "# Be sure to restart your server when you modify this file.\n\n# Version of your assets, change this if you want to expire all your assets.\nRails.application.config.assets.version = \"1.0\"\n\n# Add additional assets to the asset load path.\n# Rails.application.config.assets.paths << Emoji.images_path\n\n# Precompile additional assets.\n# application.js, application.css, and all non-JS/CSS in the app/assets\n# folder are already added.\n# Rails.application.config.assets.precompile += %w( admin.js admin.css )\n"
  },
  {
    "path": "services/api/config/initializers/authorization.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire_relative \"../../app/middlewares/arvados_api_token\"\n\nServer::Application.configure do\n  config.middleware.delete ActionDispatch::RemoteIp\n  config.middleware.insert 0, ActionDispatch::RemoteIp\n  config.middleware.insert 1, ArvadosApiToken\nend\n"
  },
  {
    "path": "services/api/config/initializers/backtrace_silencers.rb",
    "content": "# Be sure to restart your server when you modify this file.\n\n# You can add backtrace silencers for libraries that you're using but don't wish to see in your backtraces.\n# Rails.backtrace_cleaner.add_silencer { |line| /my_noisy_library/.match?(line) }\n\n# You can also remove all the silencers if you're trying to debug a problem that might stem from framework code\n# by setting BACKTRACE=1 before calling your invocation, like \"BACKTRACE=1 ./bin/rails runner 'MyClass.perform'\".\nRails.backtrace_cleaner.remove_silencers! if ENV[\"BACKTRACE\"]\n"
  },
  {
    "path": "services/api/config/initializers/clear_empty_content_type.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Rails handler stack crashes if the request Content-Type header value\n# is \"\", which is sometimes the case in GET requests from\n# ruby-google-api-client (which have no body content anyway).\n#\n# This middleware deletes such headers, so a request with an empty\n# Content-Type value is equivalent to a missing Content-Type header.\nclass ClearEmptyContentType\n  def initialize(app=nil, options=nil)\n    @app = app\n  end\n\n  def call(env)\n    if env[\"CONTENT_TYPE\"] == \"\"\n      env.delete(\"CONTENT_TYPE\")\n    end\n    @app.call(env) if @app.respond_to?(:call)\n  end\nend\n\nServer::Application.configure do\n  config.middleware.use ClearEmptyContentType\nend\n"
  },
  {
    "path": "services/api/config/initializers/common_api_template.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'common_api_template'\n"
  },
  {
    "path": "services/api/config/initializers/content_security_policy.rb",
    "content": "# Be sure to restart your server when you modify this file.\n\n# Define an application-wide content security policy.\n# See the Securing Rails Applications Guide for more information:\n# https://guides.rubyonrails.org/security.html#content-security-policy-header\n\n# Rails.application.configure do\n#   config.content_security_policy do |policy|\n#     policy.default_src :self, :https\n#     policy.font_src    :self, :https, :data\n#     policy.img_src     :self, :https, :data\n#     policy.object_src  :none\n#     policy.script_src  :self, :https\n#     policy.style_src   :self, :https\n#     # Specify URI for violation reports\n#     # policy.report_uri \"/csp-violation-report-endpoint\"\n#   end\n#\n#   # Generate session nonces for permitted importmap and inline scripts\n#   config.content_security_policy_nonce_generator = ->(request) { request.session.id.to_s }\n#   config.content_security_policy_nonce_directives = %w(script-src)\n#\n#   # Report violations without enforcing the policy.\n#   # config.content_security_policy_report_only = true\n# end\n"
  },
  {
    "path": "services/api/config/initializers/cookies_serializer.rb",
    "content": "# Be sure to restart your server when you modify this file.\n\n# Specify a serializer for the signed and encrypted cookie jars.\n# Valid options are :json, :marshal, and :hybrid.\nRails.application.config.action_dispatch.cookies_serializer = :json\n"
  },
  {
    "path": "services/api/config/initializers/current_api_client.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'current_api_client'\n"
  },
  {
    "path": "services/api/config/initializers/custom_types.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire_relative \"../../app/models/jsonb_type\"\n\n# JSONB backed Hash & Array types that default to their empty versions when\n# reading NULL from the database, or get nil passed by parameter.\nActiveRecord::Type.register(:jsonbHash, JsonbType::Hash)\nActiveRecord::Type.register(:jsonbArray, JsonbType::Array)\n"
  },
  {
    "path": "services/api/config/initializers/db_current_time.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'db_current_time'\n"
  },
  {
    "path": "services/api/config/initializers/db_timeout.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nActiveRecord::ConnectionAdapters::AbstractAdapter.set_callback :checkout, :before, ->(conn) do\n  ms = Rails.configuration.API.RequestTimeout.to_i * 1000\n  conn.execute(\"SET statement_timeout = #{ms}\")\n  conn.execute(\"SET lock_timeout = #{ms}\")\nend\n"
  },
  {
    "path": "services/api/config/initializers/filter_parameter_logging.rb",
    "content": "# Be sure to restart your server when you modify this file.\n\n# Configure parameters to be filtered from the log file. Use this to limit dissemination of\n# sensitive information. See the ActiveSupport::ParameterFilter documentation for supported\n# notations and behaviors.\nRails.application.config.filter_parameters += [\n  :passw, :secret, :token, :_key, :crypt, :salt, :certificate, :otp, :ssn\n]\n"
  },
  {
    "path": "services/api/config/initializers/fix_www_decode.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule URI\n  if Gem::Version.new(RUBY_VERSION) < Gem::Version.new('2.2')\n    # Rack uses the standard library method URI.decode_www_form_component to\n    # process parameters.  This method first validates the string with a\n    # regular expression, and then decodes it using another regular expression.\n    # Ruby 2.1 and earlier has a bug is in the validation; the regular\n    # expression that is used generates many backtracking points, which results\n    # in exponential memory growth when matching large strings.  The fix is to\n    # monkey-patch the version of the method from Ruby 2.2 which checks that\n    # the string is not invalid instead of checking it is valid.\n    def self.decode_www_form_component(str, enc=Encoding::UTF_8)\n      raise ArgumentError, \"invalid %-encoding (#{str})\" if /%(?!\\h\\h)/ =~ str\n      str.b.gsub(/\\+|%\\h\\h/, TBLDECWWWCOMP_).force_encoding(enc)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/config/initializers/inflections.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Be sure to restart your server when you modify this file.\n\n# Add new inflection rules using the following format. Inflections\n# are locale specific, and you may define rules for as many different\n# locales as you wish. All of these examples are active by default:\n# ActiveSupport::Inflector.inflections(:en) do |inflect|\n#   inflect.plural /^(ox)$/i, \"\\\\1en\"\n#   inflect.singular /^(ox)en/i, \"\\\\1\"\n#   inflect.irregular \"person\", \"people\"\n#   inflect.uncountable %w( fish sheep )\n# end\n\n# These inflection rules are supported but not enabled by default:\n# ActiveSupport::Inflector.inflections(:en) do |inflect|\n#   inflect.acronym \"RESTful\"\n# end\n\nActiveSupport::Inflector.inflections do |inflect|\n  inflect.plural(/^([Ss]pecimen)$/i, '\\1s')\n  inflect.singular(/^([Ss]pecimen)s?/i, '\\1')\n  inflect.plural(/^([Hh]uman)$/i, '\\1s')\n  inflect.singular(/^([Hh]uman)s?/i, '\\1')\nend\n"
  },
  {
    "path": "services/api/config/initializers/kind_and_etag.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'kind_and_etag'\n"
  },
  {
    "path": "services/api/config/initializers/lograge.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'safe_json'\n\nServer::Application.configure do\n  config.lograge.enabled = true\n  config.lograge.formatter = Lograge::Formatters::Logstash.new\n  config.lograge.custom_options = lambda do |event|\n    payload = {\n      ClusterID: Rails.configuration.ClusterID,\n      request_id: event.payload[:request_id],\n      client_ipaddr: event.payload[:client_ipaddr],\n      client_auth: event.payload[:client_auth],\n    }\n\n    # Lograge adds exceptions not being rescued to event.payload, but we're\n    # catching all errors on ApplicationController so we look for backtraces\n    # elsewhere.\n    if !Thread.current[:backtrace].nil?\n      payload.merge!(\n        {\n          exception: Thread.current[:exception],\n          exception_backtrace: Thread.current[:backtrace],\n        }\n      )\n      Thread.current[:exception] = nil\n      Thread.current[:backtrace] = nil\n    end\n\n    exceptions = %w(controller action format id)\n    params = event.payload[:params].except(*exceptions)\n\n    # Omit secret_mounts field if supplied in create/update request\n    # body.\n    [\n      ['container', 'secret_mounts'],\n      ['container_request', 'secret_mounts'],\n    ].each do |resource, field|\n      if params[resource].is_a? Hash\n        params[resource] = params[resource].except(field)\n      end\n    end\n\n    # Redact new_user_token param in /arvados/v1/users/merge\n    # request. Log the auth UUID instead, if the token exists.\n    if params['new_user_token'].is_a? String\n      params['new_user_token_uuid'] =\n        ApiClientAuthorization.\n          where('api_token = ?', params['new_user_token']).\n          first.andand.uuid\n      params['new_user_token'] = '[...]'\n    end\n\n    params_s = SafeJSON.dump(params)\n    if params_s.length > Rails.configuration.SystemLogs[\"MaxRequestLogParamsSize\"]\n      payload[:params_truncated] = params_s[0..Rails.configuration.SystemLogs[\"MaxRequestLogParamsSize\"]] + \"[...]\"\n    else\n      payload[:params] = params\n    end\n    payload\n  end\nend\n"
  },
  {
    "path": "services/api/config/initializers/mime_types.rb",
    "content": "# Be sure to restart your server when you modify this file.\n\n# Add new mime types for use in respond_to blocks:\n# Mime::Type.register \"text/richtext\", :rtf\n"
  },
  {
    "path": "services/api/config/initializers/net_http.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'net/http'\n"
  },
  {
    "path": "services/api/config/initializers/oj_mimic_json.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'oj'\n\nOj::Rails.set_encoder()\nOj::Rails.set_decoder()\nOj::Rails.optimize()\nOj::Rails.mimic_JSON()\n\n"
  },
  {
    "path": "services/api/config/initializers/permissions_policy.rb",
    "content": "# Define an application-wide HTTP permissions policy. For further\n# information see https://developers.google.com/web/updates/2018/06/feature-policy\n#\n# Rails.application.config.permissions_policy do |f|\n#   f.camera      :none\n#   f.gyroscope   :none\n#   f.microphone  :none\n#   f.usb         :none\n#   f.fullscreen  :self\n#   f.payment     :self, \"https://secure.example.com\"\n# end\n"
  },
  {
    "path": "services/api/config/initializers/permit_all_parameters.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nActionController::Parameters.permit_all_parameters = true\n"
  },
  {
    "path": "services/api/config/initializers/reload_config.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ndef start_reload_thread\n  Thread.new do\n    lockfile = Rails.root.join('tmp', 'reload_config.lock')\n    File.open(lockfile, File::WRONLY|File::CREAT, 0600) do |f|\n      # Note we don't use LOCK_NB here. If we did, each time passenger\n      # kills the lock-holder process, we would be left with nobody\n      # checking for updates until passenger starts a new worker,\n      # which could be a long time.\n      Rails.logger.debug(\"reload_config: waiting for lock on #{lockfile}\")\n      f.flock(File::LOCK_EX)\n\n      t_lastload = Rails.configuration.SourceTimestamp\n      hash_lastload = Rails.configuration.SourceSHA256\n      conffile = ENV['ARVADOS_CONFIG'] || \"/etc/arvados/config.yml\"\n      Rails.logger.info(\"reload_config: polling for updated mtime on #{conffile} with threshold #{t_lastload}\")\n      while true\n        sleep 1\n        t = File.mtime(conffile)\n        # If the file is newer than 5s, re-read it even if the\n        # timestamp matches the previously loaded file. This enables\n        # us to detect changes even if the filesystem's timestamp\n        # precision cannot represent multiple updates per second.\n        if t.to_f != t_lastload.to_f || Time.now.to_f - t.to_f < 5\n          Open3.popen2(\"arvados-server\", \"config-dump\", \"-skip-legacy\") do |stdin, stdout, status_thread|\n            confs = YAML.safe_load(stdout)\n            hash = confs[\"SourceSHA256\"]\n          rescue => e\n            Rails.logger.info(\"reload_config: config file updated but could not be loaded: #{e}\")\n            t_lastload = t\n            next\n          end\n          if hash == hash_lastload\n            # If we reloaded a new or updated file, but the content is\n            # identical, keep polling instead of restarting.\n            t_lastload = t\n            next\n          end\n\n          restartfile = Rails.root.join('tmp', 'restart.txt')\n          touchtime = Time.now\n          Rails.logger.info(\"reload_config: mtime on #{conffile} changed to #{t}, touching #{restartfile} to #{touchtime}\")\n          begin\n            File.utime(touchtime, touchtime, restartfile)\n          rescue\n            # remove + re-create works even if the existing file is\n            # owned by root, provided the tempdir is writable.\n            File.unlink(restartfile) rescue nil\n            File.open(restartfile, 'w') {}\n          end\n          # Even if passenger doesn't notice that we hit restart.txt\n          # and kill our process, there's no point waiting around to\n          # hit it again.\n          break\n        end\n      end\n    end\n  end\nend\n\nif !File.owned?(Rails.root.join('tmp'))\n  Rails.logger.debug(\"reload_config: not owner of #{Rails.root}/tmp, skipping\")\nelsif ENV[\"ARVADOS_CONFIG\"] == \"none\"\n  Rails.logger.debug(\"reload_config: no config in use, skipping\")\nelsif defined?(PhusionPassenger)\n  PhusionPassenger.on_event(:starting_worker_process) do |forked|\n    start_reload_thread\n  end\nelse\n  start_reload_thread\nend\n"
  },
  {
    "path": "services/api/config/initializers/request_id_middleware.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule CustomRequestId\n  def make_request_id(req_id)\n    if !req_id || req_id.length < 1 || req_id.length > 1024\n      # Client-supplied ID is either missing or too long to be\n      # considered friendly.\n      internal_request_id\n    else\n      req_id\n    end\n  end\n\n  def internal_request_id\n    \"req-\" + Random.new.rand(2**128).to_s(36)[0..19]\n  end\nend\n\nclass ActionDispatch::RequestId\n  # Instead of using the default UUID-like format for X-Request-Id headers,\n  # use our own.\n  prepend CustomRequestId\nend\n"
  },
  {
    "path": "services/api/config/initializers/session_store.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Be sure to restart your server when you modify this file.\n\nRails.application.config.session_store :cookie_store, key: '_server_session'\n\n# Use the database for sessions instead of the cookie-based default,\n# which shouldn't be used to store highly confidential information\n# (create the session table with \"rails generate session_migration\")\n# Server::Application.config.session_store :active_record_store\n"
  },
  {
    "path": "services/api/config/initializers/time_format.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nActiveSupport::JSON::Encoding.time_precision = 9\n\nclass ActiveSupport::TimeWithZone\n  remove_method :as_json\n  def as_json *args\n    strftime \"%Y-%m-%dT%H:%M:%S.%NZ\"\n  end\nend\n\nclass Time\n  remove_method :as_json\n  def as_json *args\n    strftime \"%Y-%m-%dT%H:%M:%S.%NZ\"\n  end\nend\n"
  },
  {
    "path": "services/api/config/initializers/time_zone.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nActiveRecord::ConnectionAdapters::AbstractAdapter.set_callback :checkout, :before, ->(conn) do\n  # If the database connection is in a time zone other than UTC,\n  # \"timestamp\" values don't behave as desired.\n  #\n  # For example, ['select now() > ?', Time.now] returns true in time\n  # zones +0100 and UTC (which makes sense since Time.now is evaluated\n  # before now()), but false in time zone -0100 (now() returns an\n  # earlier clock time, and its time zone is dropped when comparing to\n  # a \"timestamp without time zone\").\n  conn.execute(\"SET TIME ZONE 'UTC'\")\nend\n"
  },
  {
    "path": "services/api/config/initializers/wrap_parameters.rb",
    "content": "# Be sure to restart your server when you modify this file.\n\n# This file contains settings for ActionController::ParamsWrapper which\n# is enabled by default.\n\n# Enable parameter wrapping for JSON. You can disable this by setting :format to an empty array.\nActiveSupport.on_load(:action_controller) do\n  wrap_parameters format: [:json]\nend\n\n# To enable root element in JSON for ActiveRecord objects.\n# ActiveSupport.on_load(:active_record) do\n#   self.include_root_in_json = true\n# end\n"
  },
  {
    "path": "services/api/config/locales/en.yml",
    "content": "# Files in the config/locales directory are used for internationalization\n# and are automatically loaded by Rails. If you want to use locales other\n# than English, add the necessary files in this directory.\n#\n# To use the locales, use `I18n.t`:\n#\n#     I18n.t 'hello'\n#\n# In views, this is aliased to just `t`:\n#\n#     <%= t('hello') %>\n#\n# To use a different locale, set it with `I18n.locale`:\n#\n#     I18n.locale = :es\n#\n# This would use the information in config/locales/es.yml.\n#\n# The following keys must be escaped otherwise they will not be retrieved by\n# the default I18n backend:\n#\n# true, false, on, off, yes, no\n#\n# Instead, surround them with single quotes.\n#\n# en:\n#   'true': 'foo'\n#\n# To learn more, please read the Rails Internationalization guide\n# available at https://guides.rubyonrails.org/i18n.html.\n\nen:\n  hello: \"Hello world\"\n"
  },
  {
    "path": "services/api/config/puma.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Puma can serve each request in a thread from an internal thread pool.\n# The `threads` method setting takes two numbers a minimum and maximum.\n# Any libraries that use thread pools should be configured to match\n# the maximum value specified for Puma. Default is set to 5 threads for minimum\n# and maximum, this matches the default thread size of Active Record.\n#\nthreads_count = ENV.fetch(\"RAILS_MAX_THREADS\") { 5 }.to_i\nthreads threads_count, threads_count\n\n# Specifies the `port` that Puma will listen on to receive requests, default is 3000.\n#\nport        ENV.fetch(\"PORT\") { 3000 }\n\n# Specifies the `environment` that Puma will run in.\n#\nenvironment ENV.fetch(\"RAILS_ENV\") { \"development\" }\n\n# Specifies the number of `workers` to boot in clustered mode.\n# Workers are forked webserver processes. If using threads and workers together\n# the concurrency of the application would be max `threads` * `workers`.\n# Workers do not work on JRuby or Windows (both of which do not support\n# processes).\n#\n# workers ENV.fetch(\"WEB_CONCURRENCY\") { 2 }\n\n# Use the `preload_app!` method when specifying a `workers` number.\n# This directive tells Puma to first boot the application and load code\n# before forking the application. This takes advantage of Copy On Write\n# process behavior so workers use less memory. If you use this option\n# you need to make sure to reconnect any threads in the `on_worker_boot`\n# block.\n#\n# preload_app!\n\n# The code in the `on_worker_boot` will be called if you are using\n# clustered mode by specifying a number of `workers`. After each worker\n# process is booted this block will be run, if you are using `preload_app!`\n# option you will want to use this block to reconnect to any threads\n# or connections that may have been created at application boot, Ruby\n# cannot share connections between processes.\n#\n# on_worker_boot do\n#   ActiveRecord::Base.establish_connection if defined?(ActiveRecord)\n# end\n\n# Allow puma to be restarted by `rails restart` command.\nplugin :tmp_restart\n"
  },
  {
    "path": "services/api/config/routes.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nRails.application.routes.draw do\n  # OPTIONS requests are not allowed at routes that use cookies.\n  ['/auth/*a', '/login', '/logout'].each do |nono|\n    match nono, to: 'user_sessions#cross_origin_forbidden', via: 'OPTIONS'\n  end\n  # OPTIONS at discovery and API paths get an empty response with CORS headers.\n  match '/discovery/v1/*a', to: 'static#empty', via: 'OPTIONS'\n  match '/arvados/v1/*a', to: 'static#empty', via: 'OPTIONS'\n\n  namespace :arvados do\n    namespace :v1 do\n      resources :api_client_authorizations do\n        post 'create_system_auth', on: :collection\n        get 'current', on: :collection\n      end\n      resources :authorized_keys\n      resources :collections do\n        get 'provenance', on: :member\n        get 'used_by', on: :member\n        post 'trash', on: :member\n        post 'untrash', on: :member\n      end\n      resources :groups do\n        get 'contents', on: :collection\n        get 'contents', on: :member\n        get 'shared', on: :collection\n        post 'trash', on: :member\n        post 'untrash', on: :member\n      end\n      resources :containers do\n        get 'auth', on: :member\n        post 'lock', on: :member\n        post 'unlock', on: :member\n        post 'update_priority', on: :member\n        get 'secret_mounts', on: :member\n        get 'current', on: :collection\n      end\n      resources :container_requests do\n        get 'container_status', on: :member\n      end\n      resources :keep_services do\n        get 'accessible', on: :collection\n      end\n      resources :links\n      resources :logs\n      resources :user_agreements do\n        get 'signatures', on: :collection\n        post 'sign', on: :collection\n      end\n      resources :users do\n        get 'current', on: :collection\n        get 'system', on: :collection\n        post 'activate', on: :member\n        post 'setup', on: :collection\n        post 'unsetup', on: :member\n        post 'merge', on: :collection\n        patch 'batch_update', on: :collection\n      end\n      resources :virtual_machines do\n        get 'logins', on: :member\n        get 'get_all_logins', on: :collection\n      end\n      resources :workflows\n      resources :credentials do\n        get 'secret', on: :member\n      end\n      get '/computed_permissions', to: 'computed_permissions#index'\n      get '/permissions/:uuid', to: 'links#get_permissions'\n    end\n  end\n\n  post '/sys/trash_sweep', to: 'sys#trash_sweep'\n\n  if Rails.env == 'test'\n    post '/database/reset', to: 'database#reset'\n  end\n\n  # omniauth\n  match '/auth/:provider/callback', to: 'user_sessions#create', via: [:get, :post]\n  match '/auth/failure', to: 'user_sessions#failure', via: [:get, :post]\n  # not handled by omniauth provider -> 403 with no CORS headers.\n  get '/auth/*a', to: 'user_sessions#cross_origin_forbidden'\n\n  # Custom logout\n  match '/login', to: 'user_sessions#login', via: [:get, :post]\n  match '/logout', to: 'user_sessions#logout', via: [:get, :post]\n\n  match '/discovery/v1/apis/arvados/v1/rest', to: 'arvados/v1/schema#index', via: [:get, :post]\n\n  match '/static/login_failure', to: 'static#login_failure', as: :login_failure, via: [:get, :post]\n\n  match '/_health/:check', to: 'arvados/v1/management#health', via: [:get]\n  match '/metrics', to: 'arvados/v1/management#metrics', via: [:get]\n\n  # Send unroutable requests to an arbitrary controller\n  # (ends up at ApplicationController#render_not_found)\n  match '*a', to: 'static#render_not_found', via: [:get, :post, :put, :patch, :delete, :options]\n\n  root to: 'static#home'\nend\n"
  },
  {
    "path": "services/api/config/secrets.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Be sure to restart your server when you modify this file.\n\n# Your secret key is used for verifying the integrity of signed cookies.\n# If you change this key, all old signed cookies will become invalid!\n\n# Make sure the secret is at least 30 characters and all random,\n# no regular words or you'll be exposed to dictionary attacks.\n# You can use `rails secret` to generate a secure secret key.\n\n# NOTE that these get overriden by Arvados' own configuration system.\n\n# shared:\n#   api_key: a1B2c3D4e5F6\n\n# Environmental secrets are only available for that specific environment.\n\n# development:\n#   secret_key_base: <%= rand(1<<255).to_s(36) %>\n\n# test:\n#   secret_key_base: <%= rand(1<<255).to_s(36) %>\n\n# In case this doesn't get overriden for some reason, assign a random key\n# to gracefully degrade by rejecting cookies instead of by opening a\n# vulnerability.\nproduction:\n  secret_key_base: <%= rand(1<<255).to_s(36) %>\n"
  },
  {
    "path": "services/api/config/spring.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n%w(\n  .ruby-version\n  .rbenv-vars\n  tmp/restart.txt\n  tmp/caching-dev.txt\n).each { |path| Spring.watch(path) }\n"
  },
  {
    "path": "services/api/config/unbound.template",
    "content": "  local-data: \"%{hostname} IN A %{ip_address}\"\n  local-data: \"%{hostname}.%{uuid_prefix} IN A %{ip_address}\"\n  local-data: \"%{hostname}.%{uuid_prefix}.arvadosapi.com. IN A %{ip_address}\"\n  local-data: \"%{ptr_domain}. IN PTR %{hostname}.%{uuid_prefix}.arvadosapi.com\"\n"
  },
  {
    "path": "services/api/config.ru",
    "content": "# This file is used by Rack-based servers to start the application.\n\nrequire_relative \"config/environment\"\n\nrun Rails.application\nRails.application.load_server\n"
  },
  {
    "path": "services/api/db/migrate/20121016005009_create_collections.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateCollections < ActiveRecord::Migration[4.2]\n  def change\n    create_table :collections do |t|\n      t.string :locator\n      t.string :create_by_client\n      t.string :created_by_user\n      t.datetime :created_at\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :portable_data_hash\n      t.string :name\n      t.integer :redundancy\n      t.string :redundancy_confirmed_by_client\n      t.datetime :redundancy_confirmed_at\n      t.integer :redundancy_confirmed_as\n\n      t.timestamps\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130105203021_create_metadata.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateMetadata < ActiveRecord::Migration[4.2]\n  def change\n    create_table :metadata do |t|\n      t.string :uuid\n      t.string :created_by_client\n      t.string :created_by_user\n      t.datetime :created_at\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :target_uuid\n      t.string :target_kind\n      t.references :native_target, :polymorphic => true\n      t.string :metadatum_class\n      t.string :key\n      t.string :value\n      t.text :info # \"unlimited length\" in postgresql\n\n      t.timestamps\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130105224358_rename_metadata_class.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameMetadataClass < ActiveRecord::Migration[4.2]\n  def up\n    rename_column :metadata, :metadatum_class, :metadata_class\n  end\n\n  def down\n    rename_column :metadata, :metadata_class, :metadatum_class\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130105224618_rename_collection_created_by_client.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameCollectionCreatedByClient < ActiveRecord::Migration[4.2]\n  def up\n    rename_column :collections, :create_by_client, :created_by_client\n  end\n\n  def down\n    rename_column :collections, :created_by_client, :create_by_client\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130107181109_add_uuid_to_collections.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddUuidToCollections < ActiveRecord::Migration[4.2]\n  def change\n    add_column :collections, :uuid, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130107212832_create_nodes.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateNodes < ActiveRecord::Migration[4.2]\n  def up\n    create_table :nodes do |t|\n      t.string :uuid\n      t.string :created_by_client\n      t.string :created_by_user\n      t.datetime :created_at\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.integer :slot_number\n      t.string :hostname\n      t.string :domain\n      t.string :ip_address\n      t.datetime :first_ping_at\n      t.datetime :last_ping_at\n      t.text :info\n\n      t.timestamps\n    end\n    add_index :nodes, :uuid, :unique => true\n    add_index :nodes, :slot_number, :unique => true\n    add_index :nodes, :hostname, :unique => true\n  end\n  def down\n    drop_table :nodes\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130109175700_create_pipelines.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreatePipelines < ActiveRecord::Migration[4.2]\n  def up\n    create_table :pipelines do |t|\n      t.string :uuid\n      t.string :created_by_client\n      t.string :created_by_user\n      t.datetime :created_at\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :name\n      t.text :components\n\n      t.timestamps\n    end\n    add_index :pipelines, :uuid, :unique => true\n  end\n  def down\n    drop_table :pipelines\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130109220548_create_pipeline_invocations.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreatePipelineInvocations < ActiveRecord::Migration[4.2]\n  def up\n    create_table :pipeline_invocations do |t|\n      t.string :uuid\n      t.string :created_by_client\n      t.string :created_by_user\n      t.datetime :created_at\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :pipeline_uuid\n      t.string :name\n      t.text :components\n      t.boolean :success, :null => true\n      t.boolean :active, :default => false\n\n      t.timestamps\n    end\n    add_index :pipeline_invocations, :uuid, :unique => true\n  end\n  def down\n    drop_table :pipeline_invocations\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130113214204_add_index_to_collections_and_metadata.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddIndexToCollectionsAndMetadata < ActiveRecord::Migration[4.2]\n  def up\n    add_index :collections, :uuid, :unique => true\n    add_index :metadata, :uuid, :unique => true\n  end\n  def down\n    remove_index :metadata, :uuid\n    remove_index :collections, :uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130116024233_create_specimens.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateSpecimens < ActiveRecord::Migration[4.2]\n  def up\n    create_table :specimens do |t|\n      t.string :uuid\n      t.string :created_by_client\n      t.string :created_by_user\n      t.datetime :created_at\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :material\n\n      t.timestamps\n    end\n    add_index :specimens, :uuid, :unique => true\n  end\n  def down\n    drop_table :specimens\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130116215213_create_projects.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateProjects < ActiveRecord::Migration[4.2]\n  def up\n    create_table :projects do |t|\n      t.string :uuid\n      t.string :created_by_client\n      t.string :created_by_user\n      t.datetime :created_at\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :name\n      t.text :description\n\n      t.timestamps\n    end\n    add_index :projects, :uuid, :unique => true\n  end\n  def down\n    drop_table :projects\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130118002239_rename_metadata_attributes.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameMetadataAttributes < ActiveRecord::Migration[4.2]\n  def up\n    rename_column :metadata, :target_kind, :tail_kind\n    rename_column :metadata, :target_uuid, :tail\n    rename_column :metadata, :value, :head\n    rename_column :metadata, :key, :name\n    add_column :metadata, :head_kind, :string\n    add_index :metadata, :head\n    add_index :metadata, :head_kind\n    add_index :metadata, :tail\n    add_index :metadata, :tail_kind\n    begin\n      Metadatum.where('head like ?', 'orvos#%').each do |m|\n        kind_uuid = m.head.match /^(orvos\\#.*)\\#([-0-9a-z]+)$/\n        if kind_uuid\n          m.update(head_kind: kind_uuid[1],\n                              head: kind_uuid[2])\n        end\n      end\n    rescue\n    end\n  end\n\n  def down\n    begin\n      Metadatum.where('head_kind is not null and head_kind <> ? and head is not null', '').each do |m|\n        m.update(head: m.head_kind + '#' + m.head)\n      end\n    rescue\n    end\n    remove_index :metadata, :tail_kind\n    remove_index :metadata, :tail\n    remove_index :metadata, :head_kind\n    remove_index :metadata, :head\n    rename_column :metadata, :name, :key\n    remove_column :metadata, :head_kind\n    rename_column :metadata, :head, :value\n    rename_column :metadata, :tail, :target_uuid\n    rename_column :metadata, :tail_kind, :target_kind\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130122020042_create_users.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateUsers < ActiveRecord::Migration[4.2]\n  def change\n    create_table :users do |t|\n      t.string :uuid\n      t.string :created_by_client\n      t.string :created_by_user\n      t.datetime :created_at\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :email\n      t.string :first_name\n      t.string :last_name\n      t.string :identity_url\n      t.boolean :is_admin\n      t.text :prefs\n\n      t.timestamps\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130122201442_create_logs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateLogs < ActiveRecord::Migration[4.2]\n  def up\n    create_table :logs do |t|\n      t.string :uuid\n      t.string :created_by_client\n      t.string :created_by_user\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.string :object_kind\n      t.string :object_uuid\n      t.datetime :event_at\n      t.string :event_type\n      t.text :summary\n      t.text :info\n\n      t.timestamps\n    end\n    add_index :logs, :uuid, :unique => true\n    add_index :logs, :object_kind\n    add_index :logs, :object_uuid\n    add_index :logs, :event_type\n    add_index :logs, :event_at\n    add_index :logs, :summary\n  end\n\n  def down\n    drop_table :logs  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130122221616_add_modified_at_to_logs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddModifiedAtToLogs < ActiveRecord::Migration[4.2]\n  def change\n    add_column :logs, :modified_at, :datetime\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130123174514_add_uuid_index_to_users.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddUuidIndexToUsers < ActiveRecord::Migration[4.2]\n  def change\n    add_index :users, :uuid, :unique => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130123180224_create_api_clients.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateApiClients < ActiveRecord::Migration[4.2]\n  def change\n    create_table :api_clients do |t|\n      t.string :uuid\n      t.string :created_by_client\n      t.string :created_by_user\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :name\n      t.string :url_prefix\n\n      t.timestamps\n    end\n    add_index :api_clients, :uuid, :unique => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130123180228_create_api_client_authorizations.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateApiClientAuthorizations < ActiveRecord::Migration[4.2]\n  def change\n    create_table :api_client_authorizations do |t|\n      t.string :api_token, :null => false\n      t.references :api_client, :null => false\n      t.references :user, :null => false\n      t.string :created_by_ip_address\n      t.string :last_used_by_ip_address\n      t.datetime :last_used_at\n      t.datetime :expires_at\n\n      t.timestamps\n    end\n    add_index :api_client_authorizations, :api_token, :unique => true\n    add_index :api_client_authorizations, :api_client_id\n    add_index :api_client_authorizations, :user_id\n    add_index :api_client_authorizations, :expires_at\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130125220425_rename_created_by_to_owner.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameCreatedByToOwner < ActiveRecord::Migration[4.2]\n  def tables\n    %w{api_clients collections logs metadata nodes pipelines pipeline_invocations projects specimens users}\n  end\n\n  def up\n    tables.each do |t|\n      remove_column t.to_sym, :created_by_client\n      rename_column t.to_sym, :created_by_user, :owner\n    end\n  end\n\n  def down\n    tables.reverse.each do |t|\n      rename_column t.to_sym, :owner, :created_by_user\n      add_column t.to_sym, :created_by_client, :string\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130128202518_rename_metadata_to_links.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameMetadataToLinks < ActiveRecord::Migration[4.2]\n  def up\n    rename_table :metadata, :links\n    rename_column :links, :tail, :tail_uuid\n    rename_column :links, :head, :head_uuid\n    rename_column :links, :info, :properties\n    rename_column :links, :metadata_class, :link_class\n    rename_index :links, :index_metadata_on_head_kind, :index_links_on_head_kind\n    rename_index :links, :index_metadata_on_head, :index_links_on_head_uuid\n    rename_index :links, :index_metadata_on_tail_kind, :index_links_on_tail_kind\n    rename_index :links, :index_metadata_on_tail, :index_links_on_tail_uuid\n    rename_index :links, :index_metadata_on_uuid, :index_links_on_uuid\n  end\n\n  def down\n    rename_index :links, :index_links_on_uuid, :index_metadata_on_uuid\n    rename_index :links, :index_links_on_head_kind, :index_metadata_on_head_kind\n    rename_index :links, :index_links_on_head_uuid, :index_metadata_on_head\n    rename_index :links, :index_links_on_tail_kind, :index_metadata_on_tail_kind\n    rename_index :links, :index_links_on_tail_uuid, :index_metadata_on_tail\n    rename_column :links, :link_class, :metadata_class\n    rename_column :links, :properties, :info\n    rename_column :links, :head_uuid, :head\n    rename_column :links, :tail_uuid, :tail\n    rename_table :links, :metadata\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130128231343_add_properties_to_specimen.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddPropertiesToSpecimen < ActiveRecord::Migration[4.2]\n  def change\n    add_column :specimens, :properties, :text\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130130205749_add_manifest_text_to_collection.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddManifestTextToCollection < ActiveRecord::Migration[4.2]\n  def change\n    add_column :collections, :manifest_text, :text\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130203104818_create_jobs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateJobs < ActiveRecord::Migration[4.2]\n  def change\n    create_table :jobs do |t|\n      t.string :uuid\n      t.string :owner\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :submit_id\n      t.string :command\n      t.string :command_version\n      t.text :command_parameters\n      t.string :cancelled_by_client\n      t.string :cancelled_by_user\n      t.datetime :cancelled_at\n      t.datetime :started_at\n      t.datetime :finished_at\n      t.boolean :running\n      t.boolean :success\n      t.string :output\n\n      t.timestamps\n    end\n    add_index :jobs, :uuid, :unique => true\n    add_index :jobs, :submit_id, :unique => true\n    add_index :jobs, :command\n    add_index :jobs, :finished_at\n    add_index :jobs, :started_at\n    add_index :jobs, :output\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130203104824_create_job_steps.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateJobSteps < ActiveRecord::Migration[4.2]\n  def change\n    create_table :job_steps do |t|\n      t.string :uuid\n      t.string :owner\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :job_uuid\n      t.integer :sequence\n      t.text :parameters\n      t.text :output\n      t.float :progress\n      t.boolean :success\n\n      t.timestamps\n    end\n    add_index :job_steps, :uuid, :unique => true\n    add_index :job_steps, :job_uuid\n    add_index :job_steps, :sequence\n    add_index :job_steps, :success\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130203115329_add_priority_to_jobs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddPriorityToJobs < ActiveRecord::Migration[4.2]\n  def change\n    add_column :jobs, :priority, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130207195855_add_index_on_timestamps.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddIndexOnTimestamps < ActiveRecord::Migration[4.2]\n  def tables\n    %w{api_clients collections jobs job_steps links logs nodes pipeline_invocations pipelines projects specimens users}\n  end\n\n  def change\n    tables.each do |t|\n      add_index t.to_sym, :created_at\n      add_index t.to_sym, :modified_at\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130218181504_add_properties_to_pipeline_invocations.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddPropertiesToPipelineInvocations < ActiveRecord::Migration[4.2]\n  def change\n    add_column :pipeline_invocations, :properties, :text\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130226170000_remove_native_target_from_links.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RemoveNativeTargetFromLinks < ActiveRecord::Migration[4.2]\n  def up\n    remove_column :links, :native_target_id\n    remove_column :links, :native_target_type\n  end\n  def down\n    add_column :links, :native_target_id, :integer\n    add_column :links, :native_target_type, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130313175417_rename_projects_to_groups.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameProjectsToGroups < ActiveRecord::Migration[4.2]\n  def up\n    rename_table :projects, :groups\n    rename_index :groups, :index_projects_on_created_at, :index_groups_on_created_at\n    rename_index :groups, :index_projects_on_modified_at, :index_groups_on_modified_at\n    rename_index :groups, :index_projects_on_uuid, :index_groups_on_uuid\n    Link.update_all({head_kind:'orvos#group'}, ['head_kind=?','orvos#project'])\n    Link.update_all({tail_kind:'orvos#group'}, ['tail_kind=?','orvos#project'])\n    Log.update_all({object_kind:'orvos#group'}, ['object_kind=?','orvos#project'])\n  end\n\n  def down\n    Log.update_all({object_kind:'orvos#project'}, ['object_kind=?','orvos#group'])\n    Link.update_all({tail_kind:'orvos#project'}, ['tail_kind=?','orvos#group'])\n    Link.update_all({head_kind:'orvos#project'}, ['head_kind=?','orvos#group'])\n    rename_index :groups, :index_groups_on_created_at, :index_projects_on_created_at\n    rename_index :groups, :index_groups_on_modified_at, :index_projects_on_modified_at\n    rename_index :groups, :index_groups_on_uuid, :index_projects_on_uuid\n    rename_table :groups, :projects\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130315155820_add_is_locked_by_to_jobs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddIsLockedByToJobs < ActiveRecord::Migration[4.2]\n  def change\n    add_column :jobs, :is_locked_by, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130315183626_add_log_to_jobs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddLogToJobs < ActiveRecord::Migration[4.2]\n  def change\n    add_column :jobs, :log, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130315213205_add_tasks_summary_to_jobs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddTasksSummaryToJobs < ActiveRecord::Migration[4.2]\n  def change\n    add_column :jobs, :tasks_summary, :text\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130318002138_add_resource_limits_to_jobs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddResourceLimitsToJobs < ActiveRecord::Migration[4.2]\n  def change\n    add_column :jobs, :resource_limits, :text\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130319165853_rename_job_command_to_script.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameJobCommandToScript < ActiveRecord::Migration[4.2]\n  def up\n    rename_column :jobs, :command, :script\n    rename_column :jobs, :command_parameters, :script_parameters\n    rename_column :jobs, :command_version, :script_version\n    rename_index :jobs, :index_jobs_on_command, :index_jobs_on_script\n  end\n\n  def down\n    rename_index :jobs, :index_jobs_on_script, :index_jobs_on_command\n    rename_column :jobs, :script_version, :command_version\n    rename_column :jobs, :script_parameters, :command_parameters\n    rename_column :jobs, :script, :command\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130319180730_rename_pipeline_invocation_to_pipeline_instance.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenamePipelineInvocationToPipelineInstance < ActiveRecord::Migration[4.2]\n  def up\n    rename_table :pipeline_invocations, :pipeline_instances\n    rename_index :pipeline_instances, :index_pipeline_invocations_on_created_at, :index_pipeline_instances_on_created_at\n    rename_index :pipeline_instances, :index_pipeline_invocations_on_modified_at, :index_pipeline_instances_on_modified_at\n    rename_index :pipeline_instances, :index_pipeline_invocations_on_uuid, :index_pipeline_instances_on_uuid\n    Link.update_all({head_kind:'orvos#pipeline_instance'}, ['head_kind=?','orvos#pipeline_invocation'])\n    Link.update_all({tail_kind:'orvos#pipeline_instance'}, ['tail_kind=?','orvos#pipeline_invocation'])\n    Log.update_all({object_kind:'orvos#pipeline_instance'}, ['object_kind=?','orvos#pipeline_invocation'])\n  end\n\n  def down\n    Link.update_all({head_kind:'orvos#pipeline_invocation'}, ['head_kind=?','orvos#pipeline_instance'])\n    Link.update_all({tail_kind:'orvos#pipeline_invocation'}, ['tail_kind=?','orvos#pipeline_instance'])\n    Log.update_all({object_kind:'orvos#pipeline_invocation'}, ['object_kind=?','orvos#pipeline_instance'])\n    rename_index :pipeline_instances, :index_pipeline_instances_on_created_at, :index_pipeline_invocations_on_created_at\n    rename_index :pipeline_instances, :index_pipeline_instances_on_modified_at, :index_pipeline_invocations_on_modified_at\n    rename_index :pipeline_instances, :index_pipeline_instances_on_uuid, :index_pipeline_invocations_on_uuid\n    rename_table :pipeline_instances, :pipeline_invocations\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130319194637_rename_pipelines_to_pipeline_templates.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenamePipelinesToPipelineTemplates < ActiveRecord::Migration[4.2]\n  def up\n    rename_column :pipeline_instances, :pipeline_uuid, :pipeline_template_uuid\n    rename_table :pipelines, :pipeline_templates\n    rename_index :pipeline_templates, :index_pipelines_on_created_at, :index_pipeline_templates_on_created_at\n    rename_index :pipeline_templates, :index_pipelines_on_modified_at, :index_pipeline_templates_on_modified_at\n    rename_index :pipeline_templates, :index_pipelines_on_uuid, :index_pipeline_templates_on_uuid\n    Link.update_all({head_kind:'orvos#pipeline'}, ['head_kind=?','orvos#pipeline_template'])\n    Link.update_all({tail_kind:'orvos#pipeline'}, ['tail_kind=?','orvos#pipeline_template'])\n    Log.update_all({object_kind:'orvos#pipeline'}, ['object_kind=?','orvos#pipeline_template'])\n  end\n\n  def down\n    Link.update_all({head_kind:'orvos#pipeline_template'}, ['head_kind=?','orvos#pipeline'])\n    Link.update_all({tail_kind:'orvos#pipeline_template'}, ['tail_kind=?','orvos#pipeline'])\n    Log.update_all({object_kind:'orvos#pipeline_template'}, ['object_kind=?','orvos#pipeline'])\n    rename_index :pipeline_templates, :index_pipeline_templates_on_created_at, :index_pipelines_on_created_at\n    rename_index :pipeline_templates, :index_pipeline_templates_on_modified_at, :index_pipelines_on_modified_at\n    rename_index :pipeline_templates, :index_pipeline_templates_on_uuid, :index_pipelines_on_uuid\n    rename_table :pipeline_templates, :pipelines\n    rename_column :pipeline_instances, :pipeline_template_uuid, :pipeline_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130319201431_rename_job_steps_to_job_tasks.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameJobStepsToJobTasks < ActiveRecord::Migration[4.2]\n  def up\n    rename_table :job_steps, :job_tasks\n    rename_index :job_tasks, :index_job_steps_on_created_at, :index_job_tasks_on_created_at\n    rename_index :job_tasks, :index_job_steps_on_job_uuid, :index_job_tasks_on_job_uuid\n    rename_index :job_tasks, :index_job_steps_on_modified_at, :index_job_tasks_on_modified_at\n    rename_index :job_tasks, :index_job_steps_on_sequence, :index_job_tasks_on_sequence\n    rename_index :job_tasks, :index_job_steps_on_success, :index_job_tasks_on_success\n    rename_index :job_tasks, :index_job_steps_on_uuid, :index_job_tasks_on_uuid\n  end\n\n  def down\n    rename_index :job_steps, :index_job_tasks_on_created_at, :index_job_steps_on_created_at\n    rename_index :job_steps, :index_job_tasks_on_job_uuid, :index_job_steps_on_job_uuid\n    rename_index :job_steps, :index_job_tasks_on_modified_at, :index_job_steps_on_modified_at\n    rename_index :job_steps, :index_job_tasks_on_sequence, :index_job_steps_on_sequence\n    rename_index :job_steps, :index_job_tasks_on_success, :index_job_steps_on_success\n    rename_index :job_steps, :index_job_tasks_on_uuid, :index_job_steps_on_uuid\n    rename_table :job_tasks, :job_steps\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130319235957_add_default_owner_to_users.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddDefaultOwnerToUsers < ActiveRecord::Migration[4.2]\n  def change\n    add_column :users, :default_owner, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130320000107_add_default_owner_to_api_client_authorizations.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddDefaultOwnerToApiClientAuthorizations < ActiveRecord::Migration[4.2]\n  def change\n    add_column :api_client_authorizations, :default_owner, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130326173804_create_commits.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateCommits < ActiveRecord::Migration[4.2]\n  def change\n    create_table :commits do |t|\n      t.string :repository_name\n      t.string :sha1\n      t.string :message\n\n      t.timestamps\n    end\n    add_index :commits, [:repository_name, :sha1], :unique => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130326182917_create_commit_ancestors.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateCommitAncestors < ActiveRecord::Migration[4.2]\n  def change\n    create_table :commit_ancestors do |t|\n      t.string :repository_name\n      t.string :descendant, :null => false\n      t.string :ancestor, :null => false\n      t.boolean :is, :default => false, :null => false\n\n      t.timestamps\n    end\n    add_index :commit_ancestors, [:descendant, :ancestor], :unique => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130415020241_rename_orvos_to_arvados.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameOrvosToArvados < ActiveRecord::Migration[4.2]\n  def up\n    Link.update_all(\"head_kind=replace(head_kind,'orvos','arvados')\")\n    Link.update_all(\"tail_kind=replace(tail_kind,'orvos','arvados')\")\n    Log.update_all(\"object_kind=replace(object_kind,'orvos','arvados')\")\n  end\n\n  def down\n    Link.update_all(\"head_kind=replace(head_kind,'arvados','orvos')\")\n    Link.update_all(\"tail_kind=replace(tail_kind,'arvados','orvos')\")\n    Log.update_all(\"object_kind=replace(object_kind,'arvados','orvos')\")\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130425024459_create_keep_disks.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateKeepDisks < ActiveRecord::Migration[4.2]\n  def change\n    create_table :keep_disks do |t|\n      t.string :uuid, :null => false\n      t.string :owner, :null => false\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :ping_secret, :null => false\n      t.string :node_uuid\n      t.string :filesystem_uuid\n      t.integer :bytes_total\n      t.integer :bytes_free\n      t.boolean :is_readable, :null => false, :default => true\n      t.boolean :is_writable, :null => false, :default => true\n      t.datetime :last_read_at\n      t.datetime :last_write_at\n      t.datetime :last_ping_at\n\n      t.timestamps\n    end\n    add_index :keep_disks, :uuid, :unique => true\n    add_index :keep_disks, :filesystem_uuid\n    add_index :keep_disks, :node_uuid\n    add_index :keep_disks, :last_ping_at\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130425214427_add_service_host_and_service_port_and_service_ssl_flag_to_keep_disks.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddServiceHostAndServicePortAndServiceSslFlagToKeepDisks < ActiveRecord::Migration[4.2]\n  def change\n    add_column :keep_disks, :service_host, :string\n    add_column :keep_disks, :service_port, :integer\n    add_column :keep_disks, :service_ssl_flag, :boolean\n    add_index :keep_disks, [:service_host, :service_port, :last_ping_at],\n      name: 'keep_disks_service_host_port_ping_at_index'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130523060112_add_created_by_job_task_to_job_tasks.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddCreatedByJobTaskToJobTasks < ActiveRecord::Migration[4.2]\n  def change\n    add_column :job_tasks, :created_by_job_task, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130523060213_add_qsequence_to_job_tasks.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddQsequenceToJobTasks < ActiveRecord::Migration[4.2]\n  def change\n    add_column :job_tasks, :qsequence, :integer\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130524042319_fix_job_task_qsequence_type.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass FixJobTaskQsequenceType < ActiveRecord::Migration[4.2]\n  def up\n    change_column :job_tasks, :qsequence, :integer, :limit => 8\n  end\n\n  def down\n    change_column :job_tasks, :qsequence, :integer\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130528134100_update_nodes_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass UpdateNodesIndex < ActiveRecord::Migration[4.2]\n  def up\n    remove_index :nodes, :hostname\n    add_index :nodes, :hostname\n  end\n  def down\n    remove_index :nodes, :hostname\n    add_index :nodes, :hostname, :unique => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130606183519_create_authorized_keys.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateAuthorizedKeys < ActiveRecord::Migration[4.2]\n  def change\n    create_table :authorized_keys do |t|\n      t.string :uuid, :null => false\n      t.string :owner, :null => false\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :name\n      t.string :key_type\n      t.string :authorized_user\n      t.text :public_key\n      t.datetime :expires_at\n\n      t.timestamps\n    end\n    add_index :authorized_keys, :uuid, :unique => true\n    add_index :authorized_keys, [:authorized_user, :expires_at]\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130608053730_create_virtual_machines.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateVirtualMachines < ActiveRecord::Migration[4.2]\n  def change\n    create_table :virtual_machines do |t|\n      t.string :uuid, :null => false\n      t.string :owner, :null => false\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :hostname\n\n      t.timestamps\n    end\n    add_index :virtual_machines, :uuid, :unique => true\n    add_index :virtual_machines, :hostname\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130610202538_create_repositories.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateRepositories < ActiveRecord::Migration[4.2]\n  def change\n    create_table :repositories do |t|\n      t.string :uuid, :null => false\n      t.string :owner, :null => false\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :name\n      t.string :fetch_url\n      t.string :push_url\n\n      t.timestamps\n    end\n    add_index :repositories, :uuid, :unique => true\n    add_index :repositories, :name\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130611163736_rename_authorized_key_authorized_user_to_authorized_user_uuid.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameAuthorizedKeyAuthorizedUserToAuthorizedUserUuid < ActiveRecord::Migration[4.2]\n  def up\n    remove_index :authorized_keys, [:authorized_user, :expires_at]\n    rename_column :authorized_keys, :authorized_user, :authorized_user_uuid\n    add_index :authorized_keys, [:authorized_user_uuid, :expires_at]\n  end\n\n  def down\n    remove_index :authorized_keys, [:authorized_user_uuid, :expires_at]\n    rename_column :authorized_keys, :authorized_user_uuid, :authorized_user\n    add_index :authorized_keys, [:authorized_user, :expires_at]\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130612042554_add_name_unique_index_to_repositories.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddNameUniqueIndexToRepositories < ActiveRecord::Migration[4.2]\n  def up\n    remove_index :repositories, :name\n    add_index :repositories, :name, :unique => true\n  end\n\n  def down\n    remove_index :repositories, :name\n    add_index :repositories, :name\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130617150007_add_is_trusted_to_api_clients.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddIsTrustedToApiClients < ActiveRecord::Migration[4.2]\n  def change\n    add_column :api_clients, :is_trusted, :boolean, :default => false\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130626002829_add_is_active_to_users.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddIsActiveToUsers < ActiveRecord::Migration[4.2]\n  def change\n    add_column :users, :is_active, :boolean, :default => false\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130626022810_activate_all_admins.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ActivateAllAdmins < ActiveRecord::Migration[4.2]\n  def up\n    User.update_all({is_active: true}, ['is_admin=?', true])\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130627154537_create_traits.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateTraits < ActiveRecord::Migration[4.2]\n  def change\n    create_table :traits do |t|\n      t.string :uuid, :null => false\n      t.string :owner, :null => false\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.string :name\n      t.text :properties\n\n      t.timestamps\n    end\n    add_index :traits, :uuid, :unique => true\n    add_index :traits, :name\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130627184333_create_humans.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateHumans < ActiveRecord::Migration[4.2]\n  def change\n    create_table :humans do |t|\n      t.string :uuid, :null => false\n      t.string :owner, :null => false\n      t.string :modified_by_client\n      t.string :modified_by_user\n      t.datetime :modified_at\n      t.text :properties\n\n      t.timestamps\n    end\n    add_index :humans, :uuid, :unique => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130708163414_rename_foreign_uuid_attributes.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameForeignUuidAttributes < ActiveRecord::Migration[4.2]\n  def change\n    rename_column :api_client_authorizations, :default_owner, :default_owner_uuid\n    [:api_clients, :authorized_keys, :collections,\n     :groups, :humans, :job_tasks, :jobs, :keep_disks,\n     :links, :logs, :nodes, :pipeline_instances, :pipeline_templates,\n     :repositories, :specimens, :traits, :users, :virtual_machines].each do |t|\n      rename_column t, :owner, :owner_uuid\n      rename_column t, :modified_by_client, :modified_by_client_uuid\n      rename_column t, :modified_by_user, :modified_by_user_uuid\n    end\n    rename_column :collections, :redundancy_confirmed_by_client, :redundancy_confirmed_by_client_uuid\n    rename_column :jobs, :is_locked_by, :is_locked_by_uuid\n    rename_column :job_tasks, :created_by_job_task, :created_by_job_task_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130708182912_rename_job_foreign_uuid_attributes.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameJobForeignUuidAttributes < ActiveRecord::Migration[4.2]\n  def change\n    rename_column :jobs, :cancelled_by_client, :cancelled_by_client_uuid\n    rename_column :jobs, :cancelled_by_user, :cancelled_by_user_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130708185153_rename_user_default_owner.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameUserDefaultOwner < ActiveRecord::Migration[4.2]\n  def change\n    rename_column :users, :default_owner, :default_owner_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20130724153034_add_scopes_to_api_client_authorizations.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddScopesToApiClientAuthorizations < ActiveRecord::Migration[4.2]\n  def change\n    add_column :api_client_authorizations, :scopes, :text, :null => false, :default => ['all'].to_yaml\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20131007180607_rename_resource_limits_to_runtime_constraints.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameResourceLimitsToRuntimeConstraints < ActiveRecord::Migration[4.2]\n  def change\n    rename_column :jobs, :resource_limits, :runtime_constraints\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140117231056_normalize_collection_uuid.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass NormalizeCollectionUuid < ActiveRecord::Migration[4.2]\n  def count_orphans\n    %w(head tail).each do |ht|\n      results = ActiveRecord::Base.connection.execute(<<-EOS)\nSELECT COUNT(links.*)\n FROM links\n LEFT JOIN collections c\n   ON links.#{ht}_uuid = c.uuid\n WHERE (#{ht}_kind='arvados#collection' or #{ht}_uuid ~ '^[0-9a-f]{32,}')\n   AND #{ht}_uuid IS NOT NULL\n   AND #{ht}_uuid NOT IN (SELECT uuid FROM collections)\nEOS\n      puts \"#{results.first['count'].to_i} links with #{ht}_uuid pointing nowhere.\"\n    end\n  end\n\n  def up\n    # Normalize uuids in the collections table to\n    # {hash}+{size}. Existing uuids might be {hash},\n    # {hash}+{size}+K@{instance-name}, {hash}+K@{instance-name}, etc.\n\n    count_orphans\n    puts \"Normalizing collection UUIDs.\"\n\n    update_sql <<-EOS\nUPDATE collections\n SET uuid = regexp_replace(uuid,'\\\\+.*','') || '+' || length(manifest_text)\n WHERE uuid !~ '^[0-9a-f]{32,}\\\\+[0-9]+$'\n   AND (regexp_replace(uuid,'\\\\+.*','') || '+' || length(manifest_text))\n     NOT IN (SELECT uuid FROM collections)\nEOS\n\n    count_orphans\n    puts \"Updating links by stripping +K@.* from *_uuid attributes.\"\n\n    update_sql <<-EOS\nUPDATE links\n SET head_uuid = regexp_replace(head_uuid,'\\\\+K@.*','')\n WHERE head_uuid like '%+K@%'\nEOS\n    update_sql <<-EOS\nUPDATE links\n SET tail_uuid = regexp_replace(tail_uuid,'\\\\+K@.*','')\n WHERE tail_uuid like '%+K@%'\nEOS\n\n    count_orphans\n    puts \"Updating links by searching bare collection hashes using regexp.\"\n\n    # Next, update {hash} (and any other non-normalized forms) to\n    # {hash}+{size}. This can only work where the corresponding\n    # collection is found in the collections table (otherwise we can't\n    # know the size).\n    %w(head tail).each do |ht|\n      update_sql <<-EOS\nUPDATE links\n SET #{ht}_uuid = c.uuid\n FROM collections c\n WHERE #{ht}_uuid IS NOT NULL\n   AND (#{ht}_kind='arvados#collection' or #{ht}_uuid ~ '^[0-9a-f]{32,}')\n   AND #{ht}_uuid NOT IN (SELECT uuid FROM collections)\n   AND regexp_replace(#{ht}_uuid,'\\\\+.*','') = regexp_replace(c.uuid,'\\\\+.*','')\n   AND c.uuid ~ '^[0-9a-f]{32,}\\\\+[0-9]+$'\nEOS\n    end\n\n    count_orphans\n    puts \"Stripping \\\"+K@.*\\\" from jobs.output, jobs.log, job_tasks.output.\"\n\n    update_sql <<-EOS\nUPDATE jobs\n SET output = regexp_replace(output,'\\\\+K@.*','')\n WHERE output ~ '^[0-9a-f]{32,}\\\\+[0-9]+\\\\+K@\\\\w+$'\nEOS\n    update_sql <<-EOS\nUPDATE jobs\n SET log = regexp_replace(log,'\\\\+K@.*','')\n WHERE log ~ '^[0-9a-f]{32,}\\\\+[0-9]+\\\\+K@\\\\w+$'\nEOS\n    update_sql <<-EOS\nUPDATE job_tasks\n SET output = regexp_replace(output,'\\\\+K@.*','')\n WHERE output ~ '^[0-9a-f]{32,}\\\\+[0-9]+\\\\+K@\\\\w+$'\nEOS\n\n    puts \"Done.\"\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140124222114_fix_link_kind_underscores.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass FixLinkKindUnderscores < ActiveRecord::Migration[4.2]\n  def up\n    update_sql <<-EOS\nUPDATE links\n SET head_kind = 'arvados#virtualMachine'\n WHERE head_kind = 'arvados#virtual_machine'\nEOS\n  end\n\n  def down\n    update_sql <<-EOS\nUPDATE links\n SET head_kind = 'arvados#virtual_machine'\n WHERE head_kind = 'arvados#virtualMachine'\nEOS\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140129184311_normalize_collection_uuids_in_script_parameters.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass NormalizeCollectionUuidsInScriptParameters < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n  def up\n    act_as_system_user do\n      PipelineInstance.all.each do |pi|\n        pi.save! if fix_values_recursively(pi.components)\n      end\n      Job.all.each do |j|\n        changed = false\n        j.script_parameters.each do |p, v|\n          if v.is_a? String and v.match /\\+K/\n            v.gsub! /\\+K\\@\\w+/, ''\n            changed = true\n          end\n        end\n        j.save! if changed\n      end\n    end\n  end\n\n  def down\n  end\n\n  protected\n  def fix_values_recursively fixme\n    changed = false\n    if fixme.is_a? String\n      if fixme.match /\\+K/\n        fixme.gsub! /\\+K\\@\\w+/, ''\n        return true\n      else\n        return false\n      end\n    elsif fixme.is_a? Array\n      fixme.each do |v|\n        changed = fix_values_recursively(v) || changed\n      end\n    elsif fixme.is_a? Hash\n      fixme.each do |p, v|\n        changed = fix_values_recursively(v) || changed\n      end\n    end\n    changed\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140317135600_add_nondeterministic_column_to_job.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddNondeterministicColumnToJob < ActiveRecord::Migration[4.2]\n  def up\n    add_column :jobs, :nondeterministic, :boolean\n  end\n\n  def down\n    remove_column :jobs, :nondeterministic\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140319160547_separate_repository_from_script_version.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass SeparateRepositoryFromScriptVersion < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def fixup pt\n    c = pt.components\n    c.each do |k, v|\n      commit_ish = v[\"script_version\"]\n      if commit_ish.andand.index(':')\n        want_repo, commit_ish = commit_ish.split(':',2)\n        v[:repository] = want_repo\n        v[:script_version] = commit_ish\n      end\n    end\n    pt.save!\n  end\n\n  def up\n    act_as_system_user do\n      PipelineTemplate.all.each do |pt|\n        fixup pt\n      end\n      PipelineInstance.all.each do |pt|\n        fixup pt\n      end\n    end\n  end\n\n  def down\n    raise ActiveRecord::IrreversibleMigration\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140321191343_add_repository_column_to_job.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddRepositoryColumnToJob < ActiveRecord::Migration[4.2]\n  def up\n    add_column :jobs, :repository, :string\n  end\n\n  def down\n    remove_column :jobs, :repository\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140324024606_add_output_is_persistent_to_job.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddOutputIsPersistentToJob < ActiveRecord::Migration[4.2]\n  def change\n    add_column :jobs, :output_is_persistent, :boolean, null: false, default: false\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140325175653_remove_kind_columns.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RemoveKindColumns < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def up\n    remove_column :links, :head_kind\n    remove_column :links, :tail_kind\n    remove_column :logs, :object_kind\n  end\n\n  def down\n    add_column :links, :head_kind, :string\n    add_column :links, :tail_kind, :string\n    add_column :logs, :object_kind, :string\n\n    act_as_system_user do\n      Link.all.each do |l|\n        l.head_kind = ArvadosModel::resource_class_for_uuid(l.head_uuid).kind if l.head_uuid\n        l.tail_kind = ArvadosModel::resource_class_for_uuid(l.tail_uuid).kind if l.tail_uuid\n        l.save\n      end\n      Log.all.each do |l|\n        l.object_kind = ArvadosModel::resource_class_for_uuid(l.object_uuid).kind if l.object_uuid\n        l.save\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140402001908_add_system_group.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddSystemGroup < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def up\n    # Make sure the system group exists.\n    system_group\n  end\n\n  def down\n    act_as_system_user do\n      system_group.destroy\n\n      # Destroy the automatically generated links giving system_group\n      # permission on all users.\n      Link.destroy_all(tail_uuid: system_group_uuid, head_kind: 'arvados#user')\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140407184311_rename_log_info_to_properties.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameLogInfoToProperties < ActiveRecord::Migration[4.2]\n  def change\n    rename_column :logs, :info, :properties\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140421140924_add_group_class_to_groups.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddGroupClassToGroups < ActiveRecord::Migration[4.2]\n  def change\n    add_column :groups, :group_class, :string\n    add_index :groups, :group_class\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140421151939_rename_auth_keys_user_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameAuthKeysUserIndex < ActiveRecord::Migration[4.2]\n  # Rails' default name for this index is so long, Rails can't modify\n  # the index later, because the autogenerated temporary name exceeds\n  # PostgreSQL's 64-character limit.  This migration gives the index\n  # an explicit name to work around that issue.\n  def change\n    rename_index(\"authorized_keys\",\n                 \"index_authorized_keys_on_authorized_user_uuid_and_expires_at\",\n                 \"index_authkeys_on_user_and_expires_at\")\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140421151940_timestamps_not_null.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass TimestampsNotNull < ActiveRecord::Migration[4.2]\n  def up\n    ActiveRecord::Base.connection.tables.each do |t|\n      next if t == 'schema_migrations'\n      change_column t.to_sym, :created_at, :datetime, :null => false\n      change_column t.to_sym, :updated_at, :datetime, :null => false\n    end\n  end\n  def down\n    # There might have been a NULL constraint before this, depending\n    # on the version of Rails used to build the database.\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140422011506_pipeline_instance_state.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass PipelineInstanceState < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def up\n    add_column :pipeline_instances, :state, :string\n    add_column :pipeline_instances, :components_summary, :text\n\n    PipelineInstance.reset_column_information\n\n    act_as_system_user do\n      PipelineInstance.all.each do |pi|\n        pi.state = PipelineInstance::New\n\n        if !pi.attribute_present? :success   # success is nil\n          if pi[:active] == true\n            pi.state = PipelineInstance::RunningOnServer\n          else\n            if pi.components_look_ready?\n              pi.state = PipelineInstance::Ready\n            else\n              pi.state = PipelineInstance::New\n            end\n          end\n        elsif pi[:success] == true\n          pi.state = PipelineInstance::Complete\n        else\n          pi.state = PipelineInstance::Failed\n        end\n\n        pi.save!\n      end\n    end\n\n# We want to perform addition of state, and removal of active and success in two phases. Hence comment these statements out.\n=begin\n    if column_exists?(:pipeline_instances, :active)\n      remove_column :pipeline_instances, :active\n    end\n\n    if column_exists?(:pipeline_instances, :success)\n      remove_column :pipeline_instances, :success\n    end\n=end\n  end\n\n  def down\n# We want to perform addition of state, and removal of active and success in two phases. Hence comment these statements out.\n=begin\n    add_column :pipeline_instances, :success, :boolean, :null => true\n    add_column :pipeline_instances, :active, :boolean, :default => false\n\n    act_as_system_user do\n      PipelineInstance.all.each do |pi|\n        case pi.state\n        when PipelineInstance::New, PipelineInstance::Ready\n          pi.active = false\n          pi.success = nil\n        when PipelineInstance::RunningOnServer\n          pi.active = true\n          pi.success = nil\n        when PipelineInstance::RunningOnClient\n          pi.active = false\n          pi.success = nil\n        when PipelineInstance::Failed\n          pi.active = false\n          pi.success = false\n        when PipelineInstance::Complete\n          pi.active = false\n          pi.success = true\n        end\n        pi.save!\n      end\n    end\n=end\n\n    if column_exists?(:pipeline_instances, :components_summary)\n      remove_column :pipeline_instances, :components_summary\n    end\n\n    if column_exists?(:pipeline_instances, :state)\n      remove_column :pipeline_instances, :state\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140423132913_add_object_owner_to_logs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddObjectOwnerToLogs < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def up\n    add_column :logs, :object_owner_uuid, :string\n    act_as_system_user do\n      Log.find_in_batches(:batch_size => 500) do |batch|\n        upd = {}\n        ActiveRecord::Base.transaction do\n          batch.each do |log|\n            if log.properties[\"new_attributes\"]\n              log.object_owner_uuid = log.properties['new_attributes']['owner_uuid']\n              log.save\n            elsif log.properties[\"old_attributes\"]\n              log.object_owner_uuid = log.properties['old_attributes']['owner_uuid']\n              log.save\n            end\n          end\n        end\n      end\n    end\n  end\n\n  def down\n    remove_column :logs, :object_owner_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140423133559_new_scope_format.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# At the time we introduced scopes everywhere, VirtualMachinesController\n# recognized scopes that gave the URL for a VM to grant access to that VM's\n# login list.  This migration converts those VM-specific scopes to the new\n# general format, and back.\n\nclass NewScopeFormat < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  VM_PATH_REGEX =\n    %r{(/arvados/v1/virtual_machines/[0-9a-z]{5}-[0-9a-z]{5}-[0-9a-z]{15})}\n  OLD_SCOPE_REGEX = %r{^https?://[^/]+#{VM_PATH_REGEX.source}$}\n  NEW_SCOPE_REGEX = %r{^GET #{VM_PATH_REGEX.source}/logins$}\n\n  def fix_scopes_matching(regex)\n    act_as_system_user\n    ApiClientAuthorization.find_each do |auth|\n      auth.scopes = auth.scopes.map do |scope|\n        if match = regex.match(scope)\n          yield match\n        else\n          scope\n        end\n      end\n      auth.save!\n    end\n  end\n\n  def up\n    fix_scopes_matching(OLD_SCOPE_REGEX) do |match|\n      \"GET #{match[1]}/logins\"\n    end\n  end\n\n  def down\n    case Rails.env\n    when 'test'\n      hostname = 'www.example.com'\n    else\n      require 'socket'\n      hostname = Socket.gethostname\n    end\n    fix_scopes_matching(NEW_SCOPE_REGEX) do |match|\n      Rails.application.routes.url_for(controller: 'virtual_machines',\n                                       uuid: match[1].split('/').last,\n                                       host: hostname, protocol: 'https')\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140501165548_add_unique_name_index_to_links.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddUniqueNameIndexToLinks < ActiveRecord::Migration[4.2]\n  def change\n    # Make sure PgPower is here. Otherwise the \"where\" will be ignored\n    # and we'll end up with a far too restrictive unique\n    # constraint. (Rails4 should work without PgPower, but that isn't\n    # tested.)\n    if not PgPower then raise \"No partial column support\" end\n\n    add_index(:links, [:tail_uuid, :name], unique: true,\n              where: \"link_class='name'\",\n              name: 'links_tail_name_unique_if_link_class_name')\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140519205916_create_keep_services.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateKeepServices < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def change\n    act_as_system_user do\n      create_table :keep_services do |t|\n        t.string :uuid, :null => false\n        t.string :owner_uuid, :null => false\n        t.string :modified_by_client_uuid\n        t.string :modified_by_user_uuid\n        t.datetime :modified_at\n        t.string   :service_host\n        t.integer  :service_port\n        t.boolean  :service_ssl_flag\n        t.string   :service_type\n\n        t.timestamps\n      end\n      add_index :keep_services, :uuid, :unique => true\n\n      add_column :keep_disks, :keep_service_uuid, :string\n\n      KeepDisk.reset_column_information\n\n      services = {}\n\n      KeepDisk.find_each do |k|\n        services[\"#{k[:service_host]}_#{k[:service_port]}_#{k[:service_ssl_flag]}\"] = {\n          service_host: k[:service_host],\n          service_port: k[:service_port],\n          service_ssl_flag: k[:service_ssl_flag],\n          service_type: 'disk',\n          owner_uuid: k[:owner_uuid]\n        }\n      end\n\n      services.each do |k, v|\n        v['uuid'] = KeepService.create(v).uuid\n      end\n\n      KeepDisk.find_each do |k|\n        k.keep_service_uuid = services[\"#{k[:service_host]}_#{k[:service_port]}_#{k[:service_ssl_flag]}\"]['uuid']\n        k.save\n      end\n\n      remove_column :keep_disks, :service_host\n      remove_column :keep_disks, :service_port\n      remove_column :keep_disks, :service_ssl_flag\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140527152921_add_description_to_pipeline_templates.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddDescriptionToPipelineTemplates < ActiveRecord::Migration[4.2]\n  def change\n    add_column :pipeline_templates, :description, :text\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140530200539_add_supplied_script_version.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddSuppliedScriptVersion < ActiveRecord::Migration[4.2]\n  def up\n    add_column :jobs, :supplied_script_version, :string\n  end\n\n  def down\n    remove_column :jobs, :supplied_script_version, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140601022548_remove_name_from_collections.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RemoveNameFromCollections < ActiveRecord::Migration[4.2]\n  def up\n    remove_column :collections, :name\n  end\n\n  def down\n    add_column :collections, :name, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140602143352_remove_active_and_success_from_pipeline_instances.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RemoveActiveAndSuccessFromPipelineInstances < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def up\n    if column_exists?(:pipeline_instances, :active)\n      remove_column :pipeline_instances, :active\n    end\n\n    if column_exists?(:pipeline_instances, :success)\n      remove_column :pipeline_instances, :success\n    end\n  end\n\n  def down\n    if !column_exists?(:pipeline_instances, :success)\n      add_column :pipeline_instances, :success, :boolean, :null => true\n    end\n    if !column_exists?(:pipeline_instances, :active)\n      add_column :pipeline_instances, :active, :boolean, :default => false\n    end\n\n    act_as_system_user do\n      PipelineInstance.all.each do |pi|\n        case pi.state\n        when PipelineInstance::New, PipelineInstance::Ready, PipelineInstance::Paused, PipelineInstance::RunningOnClient\n          pi.active = nil\n          pi.success = nil\n        when PipelineInstance::RunningOnServer\n          pi.active = true\n          pi.success = nil\n        when PipelineInstance::Failed\n          pi.active = false\n          pi.success = false\n        when PipelineInstance::Complete\n          pi.active = false\n          pi.success = true\n        end\n        pi.save!\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140607150616_rename_folder_to_project.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameFolderToProject < ActiveRecord::Migration[4.2]\n  def up\n    Group.update_all(\"group_class = 'project'\", \"group_class = 'folder'\")\n  end\n\n  def down\n    Group.update_all(\"group_class = 'folder'\", \"group_class = 'project'\")\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140611173003_add_docker_locator_to_jobs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddDockerLocatorToJobs < ActiveRecord::Migration[4.2]\n  def change\n    add_column :jobs, :docker_image_locator, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140627210837_anonymous_group.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AnonymousGroup < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def up\n    # create the anonymous group and user\n    anonymous_group\n    anonymous_user\n  end\n\n  def down\n    act_as_system_user do\n      anonymous_user.destroy\n      anonymous_group.destroy\n    end\n  end\n\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140709172343_job_task_serial_qsequence.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass JobTaskSerialQsequence < ActiveRecord::Migration[4.2]\n  SEQ_NAME = \"job_tasks_qsequence_seq\"\n\n  def up\n    execute \"CREATE SEQUENCE #{SEQ_NAME} OWNED BY job_tasks.qsequence;\"\n  end\n\n  def down\n    execute \"DROP SEQUENCE #{SEQ_NAME};\"\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140714184006_empty_collection.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass EmptyCollection < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def up\n    empty_collection\n  end\n\n  def down\n    # do nothing when migrating down (having the empty collection\n    # and a permission link for it is harmless)\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140811184643_collection_use_regular_uuids.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CollectionUseRegularUuids < ActiveRecord::Migration[4.2]\n  def up\n    add_column :collections, :name, :string\n    add_column :collections, :description, :string\n    add_column :collections, :properties, :text\n    add_column :collections, :expires_at, :date\n    remove_column :collections, :locator\n\n    say_with_time \"Step 1. Move manifest hashes into portable_data_hash field\" do\n      ActiveRecord::Base.connection.execute(\"update collections set portable_data_hash=uuid, uuid=null\")\n    end\n\n    say_with_time \"Step 2. Create new collection objects from the name links in the table.\" do\n      from_clause = %{\nfrom links inner join collections on head_uuid=collections.portable_data_hash\nwhere link_class='name' and collections.uuid is null\n}\n      links = ActiveRecord::Base.connection.select_all %{\nselect links.uuid, head_uuid, tail_uuid, links.name,\nmanifest_text, links.created_at, links.modified_at, links.modified_by_client_uuid, links.modified_by_user_uuid\n#{from_clause}\n}\n      links.each do |d|\n        ActiveRecord::Base.connection.execute %{\ninsert into collections (uuid, portable_data_hash, owner_uuid, name, manifest_text, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, updated_at)\nvalues (#{ActiveRecord::Base.connection.quote Collection.generate_uuid},\n#{ActiveRecord::Base.connection.quote d['head_uuid']},\n#{ActiveRecord::Base.connection.quote d['tail_uuid']},\n#{ActiveRecord::Base.connection.quote d['name']},\n#{ActiveRecord::Base.connection.quote d['manifest_text']},\n#{ActiveRecord::Base.connection.quote d['created_at']},\n#{ActiveRecord::Base.connection.quote d['modified_at']},\n#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},\n#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},\n#{ActiveRecord::Base.connection.quote d['modified_at']})\n}\n      end\n      ActiveRecord::Base.connection.execute \"delete from links where links.uuid in (select links.uuid #{from_clause})\"\n    end\n\n    say_with_time \"Step 3. Create new collection objects from the can_read links in the table.\" do\n      from_clause = %{\nfrom links inner join collections on head_uuid=collections.portable_data_hash\nwhere link_class='permission' and links.name='can_read' and collections.uuid is null\n}\n      links = ActiveRecord::Base.connection.select_all %{\nselect links.uuid, head_uuid, tail_uuid, manifest_text, links.created_at, links.modified_at\n#{from_clause}\n}\n      links.each do |d|\n        ActiveRecord::Base.connection.execute %{\ninsert into collections (uuid, portable_data_hash, owner_uuid, manifest_text, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, updated_at)\nvalues (#{ActiveRecord::Base.connection.quote Collection.generate_uuid},\n#{ActiveRecord::Base.connection.quote d['head_uuid']},\n#{ActiveRecord::Base.connection.quote d['tail_uuid']},\n#{ActiveRecord::Base.connection.quote d['manifest_text']},\n#{ActiveRecord::Base.connection.quote d['created_at']},\n#{ActiveRecord::Base.connection.quote d['modified_at']},\n#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},\n#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},\n#{ActiveRecord::Base.connection.quote d['modified_at']})\n}\n      end\n      ActiveRecord::Base.connection.execute \"delete from links where links.uuid in (select links.uuid #{from_clause})\"\n    end\n\n    say_with_time \"Step 4. Migrate remaining orphan collection objects\" do\n      links = ActiveRecord::Base.connection.select_all %{\nselect portable_data_hash, owner_uuid, manifest_text, created_at, modified_at\nfrom collections\nwhere uuid is null and portable_data_hash not in (select portable_data_hash from collections where uuid is not null)\n}\n      links.each do |d|\n        ActiveRecord::Base.connection.execute %{\ninsert into collections (uuid, portable_data_hash, owner_uuid, manifest_text, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, updated_at)\nvalues (#{ActiveRecord::Base.connection.quote Collection.generate_uuid},\n#{ActiveRecord::Base.connection.quote d['portable_data_hash']},\n#{ActiveRecord::Base.connection.quote d['owner_uuid']},\n#{ActiveRecord::Base.connection.quote d['manifest_text']},\n#{ActiveRecord::Base.connection.quote d['created_at']},\n#{ActiveRecord::Base.connection.quote d['modified_at']},\n#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},\n#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},\n#{ActiveRecord::Base.connection.quote d['modified_at']})\n}\n      end\n    end\n\n    say_with_time \"Step 5. Delete old collection objects.\" do\n      ActiveRecord::Base.connection.execute(\"delete from collections where uuid is null\")\n    end\n\n    say_with_time \"Step 6. Delete permission links where tail_uuid is a collection (invalid records)\" do\n      ActiveRecord::Base.connection.execute %{\ndelete from links where links.uuid in (select links.uuid\nfrom links\nwhere tail_uuid like '________________________________+%' and link_class='permission' )\n}\n    end\n\n    say_with_time \"Step 7. Migrate collection -> collection provenance links to jobs\" do\n      from_clause = %{\nfrom links\nwhere head_uuid like '________________________________+%' and tail_uuid like '________________________________+%' and links.link_class = 'provenance'\n}\n      links = ActiveRecord::Base.connection.select_all %{\nselect links.uuid, head_uuid, tail_uuid, links.created_at, links.modified_at, links.modified_by_client_uuid, links.modified_by_user_uuid, links.owner_uuid\n#{from_clause}\n}\n      links.each do |d|\n        newuuid = Job.generate_uuid\n        ActiveRecord::Base.connection.execute %{\ninsert into jobs (uuid, script_parameters, output, running, success, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, owner_uuid, updated_at)\nvalues (#{ActiveRecord::Base.connection.quote newuuid},\n#{ActiveRecord::Base.connection.quote \"---\\ninput: \"+d['tail_uuid']},\n#{ActiveRecord::Base.connection.quote d['head_uuid']},\n#{ActiveRecord::Base.connection.quote false},\n#{ActiveRecord::Base.connection.quote true},\n#{ActiveRecord::Base.connection.quote d['created_at']},\n#{ActiveRecord::Base.connection.quote d['modified_at']},\n#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},\n#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},\n#{ActiveRecord::Base.connection.quote d['owner_uuid']},\n#{ActiveRecord::Base.connection.quote d['modified_at']})\n}\n      end\n      ActiveRecord::Base.connection.execute \"delete from links where links.uuid in (select links.uuid #{from_clause})\"\n    end\n\n    say_with_time \"Step 8. Migrate remaining links with head_uuid pointing to collections\" do\n      from_clause = %{\nfrom links inner join collections on links.head_uuid=portable_data_hash\nwhere collections.uuid is not null\n}\n      links = ActiveRecord::Base.connection.select_all %{\nselect links.uuid, collections.uuid as collectionuuid, tail_uuid, link_class, links.properties,\nlinks.name, links.created_at, links.modified_at, links.modified_by_client_uuid, links.modified_by_user_uuid, links.owner_uuid\n#{from_clause}\n}\n      links.each do |d|\n        ActiveRecord::Base.connection.execute %{\ninsert into links (uuid, head_uuid, tail_uuid, link_class, name, properties, created_at, modified_at, modified_by_client_uuid, modified_by_user_uuid, owner_uuid, updated_at)\nvalues (#{ActiveRecord::Base.connection.quote Link.generate_uuid},\n#{ActiveRecord::Base.connection.quote d['collectionuuid']},\n#{ActiveRecord::Base.connection.quote d['tail_uuid']},\n#{ActiveRecord::Base.connection.quote d['link_class']},\n#{ActiveRecord::Base.connection.quote d['name']},\n#{ActiveRecord::Base.connection.quote d['properties']},\n#{ActiveRecord::Base.connection.quote d['created_at']},\n#{ActiveRecord::Base.connection.quote d['modified_at']},\n#{ActiveRecord::Base.connection.quote d['modified_by_client_uuid']},\n#{ActiveRecord::Base.connection.quote d['modified_by_user_uuid']},\n#{ActiveRecord::Base.connection.quote d['owner_uuid']},\n#{ActiveRecord::Base.connection.quote d['modified_at']})\n}\n      end\n      ActiveRecord::Base.connection.execute \"delete from links where links.uuid in (select links.uuid #{from_clause})\"\n    end\n\n    say_with_time \"Step 9. Delete any remaining name links\" do\n      ActiveRecord::Base.connection.execute(\"delete from links where link_class='name'\")\n    end\n\n    say_with_time \"Step 10. Validate links table\" do\n      links = ActiveRecord::Base.connection.select_all %{\nselect links.uuid, head_uuid, tail_uuid, link_class, name\nfrom links\nwhere head_uuid like '________________________________+%' or tail_uuid like '________________________________+%'\n}\n      links.each do |d|\n        raise \"Bad row #{d}\"\n      end\n    end\n\n  end\n\n  def down\n    raise ActiveRecord::IrreversibleMigration, \"Can't downmigrate changes to collections and links without potentially losing data.\"\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140817035914_add_unique_name_constraints.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddUniqueNameConstraints < ActiveRecord::Migration[4.2]\n  def change\n    # Ensure uniqueness before adding constraints.\n    [\"collections\", \"pipeline_templates\", \"groups\"].each do |table|\n      rows = ActiveRecord::Base.connection.select_all %{\nselect uuid, owner_uuid, name from #{table} order by owner_uuid, name\n}\n      prev = {}\n      n = 1\n      rows.each do |r|\n        if r[\"owner_uuid\"] == prev[\"owner_uuid\"] and !r[\"name\"].nil? and r[\"name\"] == prev[\"name\"]\n          n += 1\n          ActiveRecord::Base.connection.execute %{\nupdate #{table} set name='#{r[\"name\"]} #{n}' where uuid='#{r[\"uuid\"]}'\n}\n        else\n          n = 1\n        end\n        prev = r\n      end\n    end\n\n    add_index(:collections, [:owner_uuid, :name], unique: true,\n              name: 'collection_owner_uuid_name_unique')\n    add_index(:pipeline_templates, [:owner_uuid, :name], unique: true,\n              name: 'pipeline_template_owner_uuid_name_unique')\n    add_index(:groups, [:owner_uuid, :name], unique: true,\n              name: 'groups_owner_uuid_name_unique')\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140818125735_add_not_null_constraint_to_group_name.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddNotNullConstraintToGroupName < ActiveRecord::Migration[4.2]\n  def change\n    ActiveRecord::Base.connection.execute(\"update groups set name=uuid where name is null or name=''\")\n    change_column_null :groups, :name, false\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140826180337_remove_output_is_persistent_column.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RemoveOutputIsPersistentColumn < ActiveRecord::Migration[4.2]\n  def up\n    remove_column :jobs, :output_is_persistent\n  end\n\n  def down\n    add_column :jobs, :output_is_persistent, :boolean, null: false, default: false\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140828141043_job_priority_fixup.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass JobPriorityFixup < ActiveRecord::Migration[4.2]\n  def up\n    remove_column :jobs, :priority\n    add_column :jobs, :priority, :integer, null: false, default: 0\n  end\n\n  def down\n    remove_column :jobs, :priority\n    add_column :jobs, :priority, :string, null: true, default: nil\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140909183946_add_start_finish_time_to_tasks_and_pipelines.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddStartFinishTimeToTasksAndPipelines < ActiveRecord::Migration[4.2]\n  def up\n    add_column :job_tasks, :started_at, :datetime\n    add_column :job_tasks, :finished_at, :datetime\n    add_column :pipeline_instances, :started_at, :datetime\n    add_column :pipeline_instances, :finished_at, :datetime\n  end\n\n  def down\n    remove_column :job_tasks, :started_at\n    remove_column :job_tasks, :finished_at\n    remove_column :pipeline_instances, :started_at\n    remove_column :pipeline_instances, :finished_at\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140911221252_add_description_to_pipeline_instances_and_jobs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddDescriptionToPipelineInstancesAndJobs < ActiveRecord::Migration[4.2]\n  def up\n    add_column :pipeline_instances, :description, :text, null: true\n    add_column :jobs, :description, :text, null: true\n  end\n\n  def down\n    remove_column :jobs, :description\n    remove_column :pipeline_instances, :description\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140918141529_change_user_owner_uuid_not_null.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ChangeUserOwnerUuidNotNull < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def up\n    User.update_all({owner_uuid: system_user_uuid}, 'owner_uuid is null')\n    change_column :users, :owner_uuid, :string, :null => false\n  end\n\n  def down\n    change_column :users, :owner_uuid, :string, :null => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140918153541_add_properties_to_node.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddPropertiesToNode < ActiveRecord::Migration[4.2]\n  def up\n    add_column :nodes, :properties, :text\n  end\n\n  def down\n    remove_column :nodes, :properties\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140918153705_add_state_to_job.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddStateToJob < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def up\n    ActiveRecord::Base.transaction do\n      add_column :jobs, :state, :string\n      Job.reset_column_information\n      Job.update_all({state: 'Cancelled'}, ['state is null and cancelled_at is not null'])\n      Job.update_all({state: 'Failed'}, ['state is null and success = ?', false])\n      Job.update_all({state: 'Complete'}, ['state is null and success = ?', true])\n      Job.update_all({state: 'Running'}, ['state is null and running = ?', true])\n      # Locked/started, but not Running/Failed/Complete? Let's assume it failed.\n      Job.update_all({state: 'Failed'}, ['state is null and (is_locked_by_uuid is not null or started_at is not null)'])\n      Job.update_all({state: 'Queued'}, ['state is null'])\n    end\n  end\n\n  def down\n    remove_column :jobs, :state\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20140924091559_add_job_uuid_to_nodes.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddJobUuidToNodes < ActiveRecord::Migration[4.2]\n  def up\n    change_table :nodes do |t|\n      t.column :job_uuid, :string\n    end\n  end\n\n  def down\n    change_table :nodes do |t|\n      t.remove :job_uuid\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20141111133038_add_arvados_sdk_version_to_jobs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddArvadosSdkVersionToJobs < ActiveRecord::Migration[4.2]\n  def up\n    change_table :jobs do |t|\n      t.column :arvados_sdk_version, :string\n    end\n  end\n\n  def down\n    change_table :jobs do |t|\n      t.remove :arvados_sdk_version\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20141208164553_owner_uuid_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass OwnerUuidIndex < ActiveRecord::Migration[4.2]\n  def tables_with_owner_uuid\n    %w{api_clients authorized_keys collections groups humans\n       job_tasks jobs keep_disks keep_services links logs\n       nodes pipeline_instances pipeline_templates repositories\n       specimens traits users virtual_machines}\n  end\n\n  def up\n    tables_with_owner_uuid.each do |table|\n      add_index table.to_sym, :owner_uuid\n    end\n  end\n\n  def down\n    tables_with_owner_uuid.each do |table|\n      indexes = ActiveRecord::Base.connection.indexes(table)\n      owner_uuid_index = indexes.select do |index|\n        index.columns == ['owner_uuid']\n      end\n      if !owner_uuid_index.empty?\n        remove_index table.to_sym, :owner_uuid\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20141208174553_descriptions_are_strings.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass DescriptionsAreStrings < ActiveRecord::Migration[4.2]\n  def tables_with_description_column\n    %w{collections groups jobs pipeline_instances pipeline_templates}\n  end\n\n  def up\n    tables_with_description_column.each do |table|\n      change_column table.to_sym, :description, :string, :limit => 2**19\n    end\n  end\n\n  def down\n    tables_with_description_column.each do |table|\n      if table == 'collections'\n        change_column table.to_sym, :description, :string # implicit limit 255\n      else\n        change_column table.to_sym, :description, :text\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20141208174653_collection_file_names.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CollectionFileNames < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def up\n    add_column :collections, :file_names, :string, :limit => 2**13\n\n    act_as_system_user do\n      Collection.find_each(batch_size: 20) do |c|\n        file_names = c.manifest_files\n        ActiveRecord::Base.connection.execute \"UPDATE collections\n                    SET file_names = #{ActiveRecord::Base.connection.quote(file_names)}\n                    WHERE uuid = '#{c.uuid}'\"\n      end\n    end\n  end\n\n  def down\n    remove_column :collections, :file_names\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20141208185217_search_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass SearchIndex < ActiveRecord::Migration[4.2]\n  def tables_with_searchable_columns\n    {\n      \"api_client_authorizations\" => [\"api_token\", \"created_by_ip_address\", \"last_used_by_ip_address\", \"default_owner_uuid\"],\n      \"api_clients\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"url_prefix\"],\n      \"authorized_keys\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"key_type\", \"authorized_user_uuid\"],\n      \"collections\" => [\"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"portable_data_hash\", \"redundancy_confirmed_by_client_uuid\", \"uuid\", \"name\", \"file_names\"],\n      \"groups\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"group_class\"],\n      \"humans\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\"],\n      \"job_tasks\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"job_uuid\", \"created_by_job_task_uuid\"],\n      \"jobs\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"submit_id\", \"script\", \"script_version\", \"cancelled_by_client_uuid\", \"cancelled_by_user_uuid\", \"output\", \"is_locked_by_uuid\", \"log\", \"repository\", \"supplied_script_version\", \"docker_image_locator\", \"state\", \"arvados_sdk_version\"],\n      \"keep_disks\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"ping_secret\", \"node_uuid\", \"filesystem_uuid\", \"keep_service_uuid\"],\n      \"keep_services\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"service_host\", \"service_type\"],\n      \"links\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"tail_uuid\", \"link_class\", \"name\", \"head_uuid\"],\n      \"logs\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"object_uuid\", \"event_type\", \"object_owner_uuid\"],\n      \"nodes\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"hostname\", \"domain\", \"ip_address\", \"job_uuid\"],\n      \"pipeline_instances\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"pipeline_template_uuid\", \"name\", \"state\"],\n      \"pipeline_templates\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\"],\n      \"repositories\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"fetch_url\", \"push_url\"],\n      \"specimens\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"material\"],\n      \"traits\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\"],\n      \"users\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"email\", \"first_name\", \"last_name\", \"identity_url\", \"default_owner_uuid\"],\n      \"virtual_machines\" => [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"hostname\"],\n    }\n  end\n\n  def change\n    tables_with_searchable_columns.each do |table, columns|\n      add_index table.to_sym, columns, name: \"#{table}_search_index\"\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150122175935_no_description_in_search_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# If the database reflects an obsolete version of the 20141208185217\n# migration (i.e., before commit:5c1db683), revert it and reapply the\n# current version. (The down-migration is the same in both versions.)\n\nrequire \"./db/migrate/20141208185217_search_index.rb\"\n\nclass NoDescriptionInSearchIndex < ActiveRecord::Migration[4.2]\n  def up\n    all_tables = %w{collections groups jobs pipeline_instances pipeline_templates}\n    all_tables.each do |table|\n      indexes = ActiveRecord::Base.connection.indexes(table)\n      search_index_by_name = indexes.select do |index|\n        index.name == \"#{table}_search_index\"\n      end\n\n      if !search_index_by_name.empty?\n        index_columns = search_index_by_name.first.columns\n        has_description = index_columns.include? 'description'\n        if has_description\n          SearchIndex.new.migrate(:down)\n          SearchIndex.new.migrate(:up)\n          break\n        end\n      end\n    end\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150123142953_full_text_search.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass FullTextSearch < ActiveRecord::Migration[4.2]\n\n  def up\n    execute \"CREATE INDEX collections_full_text_search_idx ON collections USING gin(#{Collection.full_text_tsvector});\"\n    execute \"CREATE INDEX groups_full_text_search_idx ON groups USING gin(#{Group.full_text_tsvector});\"\n    execute \"CREATE INDEX jobs_full_text_search_idx ON jobs USING gin(#{Job.full_text_tsvector});\"\n    execute \"CREATE INDEX pipeline_instances_full_text_search_idx ON pipeline_instances USING gin(#{PipelineInstance.full_text_tsvector});\"\n    execute \"CREATE INDEX pipeline_templates_full_text_search_idx ON pipeline_templates USING gin(#{PipelineTemplate.full_text_tsvector});\"\n  end\n\n  def down\n    remove_index :pipeline_templates, :name => 'pipeline_templates_full_text_search_idx'\n    remove_index :pipeline_instances, :name => 'pipeline_instances_full_text_search_idx'\n    remove_index :jobs, :name => 'jobs_full_text_search_idx'\n    remove_index :groups, :name => 'groups_full_text_search_idx'\n    remove_index :collections, :name => 'collections_full_text_search_idx'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150203180223_set_group_class_on_anonymous_group.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass SetGroupClassOnAnonymousGroup < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n  def up\n    act_as_system_user do\n      anonymous_group.update group_class: 'role', name: 'Anonymous users', description: 'Anonymous users'\n    end\n  end\n\n  def down\n    act_as_system_user do\n      anonymous_group.update group_class: nil, name: 'Anonymous group', description: 'Anonymous group'\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150206210804_all_users_can_read_anonymous_group.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AllUsersCanReadAnonymousGroup < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  def up\n    anonymous_group_read_permission\n  end\n\n  def down\n    # Do nothing - it's too dangerous to try to figure out whether or not\n    # the permission was created by the migration.\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150206230342_rename_replication_attributes.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameReplicationAttributes < ActiveRecord::Migration[4.2]\n  RENAME = [[:redundancy, :replication_desired],\n            [:redundancy_confirmed_as, :replication_confirmed],\n            [:redundancy_confirmed_at, :replication_confirmed_at]]\n\n  def up\n    RENAME.each do |oldname, newname|\n      rename_column :collections, oldname, newname\n    end\n    remove_column :collections, :redundancy_confirmed_by_client_uuid\n    Collection.reset_column_information\n\n    # Removing that column dropped some search indexes. Let's put them back.\n    add_index :collections, [\"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"portable_data_hash\", \"uuid\", \"name\", \"file_names\"], name: 'collections_search_index'\n    execute \"CREATE INDEX collections_full_text_search_idx ON collections USING gin(#{Collection.full_text_tsvector});\"\n  end\n\n  def down\n    remove_index :collections, name: 'collections_search_index'\n    add_column :collections, :redundancy_confirmed_by_client_uuid, :string\n    RENAME.reverse.each do |oldname, newname|\n      rename_column :collections, newname, oldname\n    end\n    remove_index :collections, :name => 'collections_full_text_search_idx'\n    Collection.reset_column_information\n\n    execute \"CREATE INDEX collections_full_text_search_idx ON collections USING gin(#{Collection.full_text_tsvector});\"\n    add_index :collections, [\"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"portable_data_hash\", \"uuid\", \"name\", \"file_names\", \"redundancy_confirmed_by_client_uuid\"], name: 'collections_search_index'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150216193428_collection_name_owner_unique_only_non_expired.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CollectionNameOwnerUniqueOnlyNonExpired < ActiveRecord::Migration[4.2]\n  def find_index\n    indexes = ActiveRecord::Base.connection.indexes('collections')\n    name_owner_index = indexes.select do |index|\n      index.name == 'collection_owner_uuid_name_unique'\n    end\n    name_owner_index\n  end\n\n  def up\n    remove_index :collections, :name => 'collection_owner_uuid_name_unique' if !find_index.empty?\n    add_index(:collections, [:owner_uuid, :name], unique: true,\n              where: 'expires_at is null',\n              name: 'collection_owner_uuid_name_unique')\n  end\n\n  def down\n    # it failed during up. is it going to pass now? should we do nothing?\n    remove_index :collections, :name => 'collection_owner_uuid_name_unique' if !find_index.empty?\n    add_index(:collections, [:owner_uuid, :name], unique: true,\n              name: 'collection_owner_uuid_name_unique')\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150303210106_fix_collection_portable_data_hash_with_hinted_manifest.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'has_uuid'\nrequire 'kind_and_etag'\n\nclass FixCollectionPortableDataHashWithHintedManifest < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  class ArvadosModel < ActiveRecord::Base\n    self.abstract_class = true\n    extend HasUuid::ClassMethods\n    include CurrentApiClient\n    include KindAndEtag\n    before_create do |record|\n      record.uuid ||= record.class.generate_uuid\n      record.owner_uuid ||= system_user_uuid\n    end\n    serialize :properties, Hash\n\n    def self.to_s\n      # Clean up the name of the stub model class so we generate correct UUIDs.\n      super.sub(\"FixCollectionPortableDataHashWithHintedManifest::\", \"\")\n    end\n  end\n\n  class Collection < ArvadosModel\n  end\n\n  class Log < ArvadosModel\n    def self.log_for(thing, age=\"old\")\n      { \"#{age}_etag\" => thing.etag,\n        \"#{age}_attributes\" => thing.attributes,\n      }\n    end\n\n    def self.log_create(thing)\n      new_log(\"create\", thing, log_for(thing, \"new\"))\n    end\n\n    def self.log_update(thing, start_state)\n      new_log(\"update\", thing, start_state.merge(log_for(thing, \"new\")))\n    end\n\n    def self.log_destroy(thing)\n      new_log(\"destroy\", thing, log_for(thing, \"old\"))\n    end\n\n    private\n\n    def self.new_log(event_type, thing, properties)\n      create!(event_type: event_type,\n              event_at: Time.now,\n              object_uuid: thing.uuid,\n              object_owner_uuid: thing.owner_uuid,\n              properties: properties)\n    end\n  end\n\n  def each_bad_collection\n    end_coll = Collection.order(\"id DESC\").first\n    return if end_coll.nil?\n    seen_uuids = []\n    (\"A\"..\"Z\").each do |hint_char|\n      query = Collection.\n        where(\"id <= ? AND manifest_text LIKE '%+#{hint_char}%'\", end_coll.id)\n      unless seen_uuids.empty?\n        query = query.where(\"uuid NOT IN (?)\", seen_uuids)\n      end\n      # It's important to make sure that this line doesn't swap.  The\n      # worst case scenario is that it finds a batch of collections that\n      # all have maximum size manifests (64MiB).  With a batch size of\n      # 50, that's about 3GiB.  Figure it will end up being 4GiB after\n      # other ActiveRecord overhead.  That's a size we're comfortable with.\n      query.find_each(batch_size: 50) do |coll|\n        seen_uuids << coll.uuid\n        stripped_manifest = coll.manifest_text.\n          gsub(/( [0-9a-f]{32}(\\+\\d+)?)\\+\\S+/, '\\1')\n        stripped_pdh = sprintf(\"%s+%i\",\n                               Digest::MD5.hexdigest(stripped_manifest),\n                               stripped_manifest.bytesize)\n        yield [coll, stripped_pdh] if (coll.portable_data_hash != stripped_pdh)\n      end\n    end\n  end\n\n  def up\n    Collection.reset_column_information\n    Log.reset_column_information\n    copied_attr_names =\n      [:owner_uuid, :created_at, :modified_by_client_uuid, :manifest_text,\n       :modified_by_user_uuid, :modified_at, :updated_at, :name,\n       :description, :portable_data_hash, :replication_desired,\n       :replication_confirmed, :replication_confirmed_at, :expires_at]\n    new_expiry = Date.new(2038, 1, 31)\n\n    each_bad_collection do |coll, stripped_pdh|\n      # Create a copy of the collection including bad portable data hash,\n      # with an expiration.  This makes it possible to resolve the bad\n      # portable data hash, but the expiration can hide the Collection\n      # from more user-friendly interfaces like Workbench.\n      start_log = Log.log_for(coll)\n      attributes = Hash[copied_attr_names.map { |key| [key, coll.send(key)] }]\n      attributes[:expires_at] ||= new_expiry\n      attributes[:properties] = (coll.properties.dup rescue {})\n      attributes[:properties][\"migrated_from\"] ||= coll.uuid\n      coll_copy = Collection.create!(attributes)\n      Log.log_create(coll_copy)\n      coll.update(portable_data_hash: stripped_pdh)\n      Log.log_update(coll, start_log)\n    end\n  end\n\n  def down\n    Collection.reset_column_information\n    Log.reset_column_information\n    each_bad_collection do |coll, stripped_pdh|\n      if ((src_uuid = coll.properties[\"migrated_from\"]) and\n          (src_coll = Collection.where(uuid: src_uuid).first) and\n          (src_coll.portable_data_hash == stripped_pdh))\n        start_log = Log.log_for(src_coll)\n        src_coll.portable_data_hash = coll.portable_data_hash\n        src_coll.save!\n        Log.log_update(src_coll, start_log)\n        coll.destroy or raise Exception.new(\"failed to destroy old collection\")\n        Log.log_destroy(coll)\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150312151136_change_collection_expires_at_to_datetime.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ChangeCollectionExpiresAtToDatetime < ActiveRecord::Migration[4.2]\n  def up\n    change_column :collections, :expires_at, :datetime\n  end\n\n  def down\n    change_column :collections, :expires_at, :date\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150317132720_add_username_to_users.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'has_uuid'\nrequire 'kind_and_etag'\n\nclass AddUsernameToUsers < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  SEARCH_INDEX_COLUMNS =\n    [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\",\n     \"modified_by_user_uuid\", \"email\", \"first_name\", \"last_name\",\n     \"identity_url\", \"default_owner_uuid\"]\n\n  class ArvadosModel < ActiveRecord::Base\n    self.abstract_class = true\n    extend HasUuid::ClassMethods\n    include CurrentApiClient\n    include KindAndEtag\n    before_create do |record|\n      record.uuid ||= record.class.generate_uuid\n      record.owner_uuid ||= system_user_uuid\n    end\n    serialize :properties, Hash\n\n    def self.to_s\n      # Clean up the name of the stub model class so we generate correct UUIDs.\n      super.rpartition(\"::\").last\n    end\n  end\n\n  class Log < ArvadosModel\n    def self.log_for(thing, age=\"old\")\n      { \"#{age}_etag\" => thing.etag,\n        \"#{age}_attributes\" => thing.attributes,\n      }\n    end\n\n    def self.log_create(thing)\n      new_log(\"create\", thing, log_for(thing, \"new\"))\n    end\n\n    def self.log_update(thing, start_state)\n      new_log(\"update\", thing, start_state.merge(log_for(thing, \"new\")))\n    end\n\n    def self.log_destroy(thing)\n      new_log(\"destroy\", thing, log_for(thing, \"old\"))\n    end\n\n    private\n\n    def self.new_log(event_type, thing, properties)\n      create!(event_type: event_type,\n              event_at: Time.now,\n              object_uuid: thing.uuid,\n              object_owner_uuid: thing.owner_uuid,\n              properties: properties)\n    end\n  end\n\n  class Link < ArvadosModel\n  end\n\n  class User < ArvadosModel\n  end\n\n  def sanitize_username(username)\n    username.\n      sub(/^[^A-Za-z]+/, \"\").\n      gsub(/[^A-Za-z0-9]/, \"\")\n  end\n\n  def usernames_wishlist(user)\n    usernames = Hash.new(0)\n    usernames[user.email.split(\"@\", 2).first] += 1\n    Link.\n       where(tail_uuid: user.uuid, link_class: \"permission\", name: \"can_login\").\n       find_each do |login_perm|\n      username = login_perm.properties[\"username\"]\n      usernames[username] += 2 if (username and not username.empty?)\n    end\n    usernames.keys.\n      sort_by { |n| -usernames[n] }.\n      map { |n| sanitize_username(n) }.\n      reject(&:empty?)\n  end\n\n  def increment_username(username)\n    @username_suffixes[username] += 1\n    \"%s%i\" % [username, @username_suffixes[username]]\n  end\n\n  def each_wanted_username(user)\n    usernames = usernames_wishlist(user)\n    usernames.each { |n| yield n }\n    base_username = usernames.first || \"arvadosuser\"\n    loop { yield increment_username(base_username) }\n  end\n\n  def recreate_search_index(columns)\n    remove_index :users, name: \"users_search_index\"\n    add_index :users, columns, name: \"users_search_index\"\n  end\n\n  def up\n    @username_suffixes = Hash.new(1)\n    add_column :users, :username, :string, null: true\n    add_index :users, :username, unique: true\n    recreate_search_index(SEARCH_INDEX_COLUMNS + [\"username\"])\n\n    [Link, Log, User].each { |m| m.reset_column_information }\n    User.validates(:username, uniqueness: true, allow_nil: true)\n    User.where(is_active: true).order(created_at: :asc).find_each do |user|\n      start_log = Log.log_for(user)\n      each_wanted_username(user) do |username|\n        user.username = username\n        break if user.valid?\n      end\n      user.save!\n      Log.log_update(user, start_log)\n    end\n  end\n\n  def down\n    remove_index :users, :username\n    recreate_search_index(SEARCH_INDEX_COLUMNS)\n    remove_column :users, :username\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150324152204_backward_compatibility_for_user_repositories.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'has_uuid'\nrequire 'kind_and_etag'\n\nclass BackwardCompatibilityForUserRepositories < ActiveRecord::Migration[4.2]\n  include CurrentApiClient\n\n  class ArvadosModel < ActiveRecord::Base\n    self.abstract_class = true\n    extend HasUuid::ClassMethods\n    include CurrentApiClient\n    include KindAndEtag\n    before_create do |record|\n      record.uuid ||= record.class.generate_uuid\n      record.owner_uuid ||= system_user_uuid\n    end\n    serialize :properties, Hash\n\n    def self.to_s\n      # Clean up the name of the stub model class so we generate correct UUIDs.\n      super.rpartition(\"::\").last\n    end\n  end\n\n  class Log < ArvadosModel\n    def self.log_for(thing, age=\"old\")\n      { \"#{age}_etag\" => thing.etag,\n        \"#{age}_attributes\" => thing.attributes,\n      }\n    end\n\n    def self.log_create(thing)\n      new_log(\"create\", thing, log_for(thing, \"new\"))\n    end\n\n    def self.log_update(thing, start_state)\n      new_log(\"update\", thing, start_state.merge(log_for(thing, \"new\")))\n    end\n\n    def self.log_destroy(thing)\n      new_log(\"destroy\", thing, log_for(thing, \"old\"))\n    end\n\n    private\n\n    def self.new_log(event_type, thing, properties)\n      create!(event_type: event_type,\n              event_at: Time.now,\n              object_uuid: thing.uuid,\n              object_owner_uuid: thing.owner_uuid,\n              properties: properties)\n    end\n  end\n\n  class Link < ArvadosModel\n  end\n\n  class Repository < ArvadosModel\n  end\n\n  def up\n    remove_index :repositories, name: \"repositories_search_index\"\n    add_index(:repositories, %w(uuid owner_uuid modified_by_client_uuid\n                                modified_by_user_uuid name),\n              name: \"repositories_search_index\")\n    remove_column :repositories, :fetch_url\n    remove_column :repositories, :push_url\n\n    [Link, Log, Repository].each { |m| m.reset_column_information }\n    Repository.where(\"owner_uuid != ?\", system_user_uuid).find_each do |repo|\n      link_attrs = {\n        tail_uuid: repo.owner_uuid,\n        link_class: \"permission\", name: \"can_manage\", head_uuid: repo.uuid,\n      }\n      if Link.where(link_attrs).first.nil?\n        manage_link = Link.create!(link_attrs)\n        Log.log_create(manage_link)\n      end\n      start_log = Log.log_for(repo)\n      repo.owner_uuid = system_user_uuid\n      repo.save!\n      Log.log_update(repo, start_log)\n    end\n  end\n\n  def down\n    raise ActiveRecord::IrreversibleMigration.\n      new(\"can't restore prior fetch and push URLs\")\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150423145759_no_filenames_in_collection_search_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass NoFilenamesInCollectionSearchIndex < ActiveRecord::Migration[4.2]\n  def up\n    remove_index :collections, :name => 'collections_search_index'\n    add_index :collections, [\"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"portable_data_hash\", \"uuid\", \"name\"], name: 'collections_search_index'\n  end\n\n  def down\n    remove_index :collections, :name => 'collections_search_index'\n    add_index :collections, [\"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"portable_data_hash\", \"uuid\", \"name\", \"file_names\"], name: 'collections_search_index'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150512193020_read_only_on_keep_services.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ReadOnlyOnKeepServices < ActiveRecord::Migration[4.2]\n  def change\n    add_column :keep_services, :read_only, :boolean, null: false, default: false\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20150526180251_leading_space_on_full_text_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire \"./db/migrate/20150123142953_full_text_search.rb\"\n\nclass LeadingSpaceOnFullTextIndex < ActiveRecord::Migration[4.2]\n  def up\n    # Inspect one of the full-text indexes (chosen arbitrarily) to\n    # determine whether this migration is needed.\n    ft_index_name = 'jobs_full_text_search_idx'\n    ActiveRecord::Base.connection.indexes('jobs').each do |idx|\n      if idx.name == ft_index_name\n        if idx.columns.first.index \"((((' '\"\n          # Index is already correct. This happens if the source tree\n          # already had the new version of full_text_tsvector by the\n          # time the initial FullTextSearch migration ran.\n          $stderr.puts \"This migration is not needed.\"\n        else\n          # Index was created using the old full_text_tsvector. Drop\n          # and re-create all full text indexes.\n          FullTextSearch.new.migrate(:down)\n          FullTextSearch.new.migrate(:up)\n        end\n        return\n      end\n    end\n    raise \"Did not find index '#{ft_index_name}'. Earlier migration missed??\"\n  end\n\n  def down\n    $stderr.puts <<EOS\nDown-migration is not supported for this change, and might be unnecessary.\n\nIf you run a code base older than 20150526180251 against this\ndatabase, full text search will be slow even on collections where it\nused to work well. If this is a concern, first check out the desired\nolder version of the code base, and then run\n\"rake db:migrate:down VERSION=20150123142953\"\nfollowed by\n\"rake db:migrate:up VERSION=20150123142953\"\n.\nEOS\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20151202151426_create_containers_and_requests.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateContainersAndRequests < ActiveRecord::Migration[4.2]\n  def change\n    create_table :containers do |t|\n      t.string :uuid\n      t.string :owner_uuid\n      t.datetime :created_at\n      t.datetime :modified_at\n      t.string :modified_by_client_uuid\n      t.string :modified_by_user_uuid\n      t.string :state\n      t.datetime :started_at\n      t.datetime :finished_at\n      t.string :log\n      t.text :environment\n      t.string :cwd\n      t.text :command\n      t.string :output_path\n      t.text :mounts\n      t.text :runtime_constraints\n      t.string :output\n      t.string :container_image\n      t.float :progress\n      t.integer :priority\n\n      t.timestamps\n    end\n\n    create_table :container_requests do |t|\n      t.string :uuid\n      t.string :owner_uuid\n      t.datetime :created_at\n      t.datetime :modified_at\n      t.string :modified_by_client_uuid\n      t.string :modified_by_user_uuid\n      t.string :name\n      t.text :description\n      t.text :properties\n      t.string :state\n      t.string :requesting_container_uuid\n      t.string :container_uuid\n      t.integer :container_count_max\n      t.text :mounts\n      t.text :runtime_constraints\n      t.string :container_image\n      t.text :environment\n      t.string :cwd\n      t.text :command\n      t.string :output_path\n      t.integer :priority\n      t.datetime :expires_at\n      t.text :filters\n\n      t.timestamps\n    end\n\n    add_index :containers, :uuid, :unique => true\n    add_index :container_requests, :uuid, :unique => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20151215134304_fix_containers_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass FixContainersIndex < ActiveRecord::Migration[4.2]\n  def up\n    execute \"CREATE INDEX container_requests_full_text_search_idx ON container_requests USING gin(#{ContainerRequest.full_text_tsvector});\"\n    add_index :container_requests, [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"state\", \"requesting_container_uuid\", \"container_uuid\", \"container_image\", \"cwd\", \"output_path\"], name: 'container_requests_search_index'\n    add_index :containers, [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"state\", \"log\", \"cwd\", \"output_path\", \"output\", \"container_image\"], name: 'containers_search_index'\n    add_index :container_requests, :owner_uuid\n    add_index :containers, :owner_uuid\n  end\n\n  def down\n    remove_index :container_requests, :name => 'container_requests_full_text_search_idx'\n    remove_index :container_requests, :name => 'container_requests_search_index'\n    remove_index :containers, :name => 'containers_search_index'\n    remove_index :container_requests, :owner_uuid\n    remove_index :containers, :owner_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20151229214707_add_exit_code_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddExitCodeToContainers < ActiveRecord::Migration[4.2]\n  def change\n    add_column :containers, :exit_code, :integer\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20160208210629_add_uuid_to_api_client_authorization.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'has_uuid'\n\nclass AddUuidToApiClientAuthorization < ActiveRecord::Migration[4.2]\n  extend HasUuid::ClassMethods\n\n  def up\n    add_column :api_client_authorizations, :uuid, :string\n    add_index :api_client_authorizations, :uuid, :unique => true\n\n    prefix = Server::Application.config.uuid_prefix + '-' +\n             Digest::MD5.hexdigest('ApiClientAuthorization'.to_s).to_i(16).to_s(36)[-5..-1] + '-'\n\n    update_sql <<-EOS\nupdate api_client_authorizations set uuid = (select concat('#{prefix}',\narray_to_string(ARRAY (SELECT substring(api_token FROM (ceil(random()*36))::int FOR 1) FROM generate_series(1, 15)), '')\n));\nEOS\n\n    change_column_null :api_client_authorizations, :uuid, false\n  end\n\n  def down\n    if column_exists?(:api_client_authorizations, :uuid)\n      remove_index :api_client_authorizations, :uuid\n      remove_column :api_client_authorizations, :uuid\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20160209155729_add_uuid_to_api_token_search_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddUuidToApiTokenSearchIndex < ActiveRecord::Migration[4.2]\n  def up\n    begin\n      remove_index :api_client_authorizations, :name => 'api_client_authorizations_search_index'\n    rescue\n    end\n    add_index :api_client_authorizations,\n              [\"api_token\", \"created_by_ip_address\", \"last_used_by_ip_address\", \"default_owner_uuid\", \"uuid\"],\n              name: \"api_client_authorizations_search_index\"\n  end\n\n  def down\n    begin\n      remove_index :api_client_authorizations, :name => 'api_client_authorizations_search_index'\n    rescue\n    end\n\t  add_index :api_client_authorizations,\n              [\"api_token\", \"created_by_ip_address\", \"last_used_by_ip_address\", \"default_owner_uuid\"],\n              name: \"api_client_authorizations_search_index\"\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20160324144017_add_components_to_job.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddComponentsToJob < ActiveRecord::Migration[4.2]\n  def up\n    add_column :jobs, :components, :text\n  end\n\n  def down\n    if column_exists?(:jobs, :components)\n      remove_column :jobs, :components\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20160506175108_add_auths_to_container.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddAuthsToContainer < ActiveRecord::Migration[4.2]\n  def change\n    add_column :containers, :auth_uuid, :string\n    add_column :containers, :locked_by_uuid, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20160509143250_add_auth_and_lock_to_container_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddAuthAndLockToContainerIndex < ActiveRecord::Migration[4.2]\n  Columns_were = [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"state\", \"log\", \"cwd\", \"output_path\", \"output\", \"container_image\"]\n  Columns = Columns_were + [\"auth_uuid\", \"locked_by_uuid\"]\n  def up\n    begin\n      remove_index :containers, :name => 'containers_search_index'\n    rescue\n    end\n    add_index(:containers, Columns, name: \"containers_search_index\")\n  end\n\n  def down\n    begin\n      remove_index :containers, :name => 'containers_search_index'\n    rescue\n    end\n    add_index(:containers, Columns_were, name: \"containers_search_index\")\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20160808151559_create_workflows.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateWorkflows < ActiveRecord::Migration[4.2]\n  def up\n    create_table :workflows do |t|\n      t.string :uuid\n      t.string :owner_uuid\n      t.datetime :created_at\n      t.datetime :modified_at\n      t.string :modified_by_client_uuid\n      t.string :modified_by_user_uuid\n      t.string :name\n      t.text :description\n      t.text :workflow\n\n      t.timestamps\n    end\n\n    add_index :workflows, :uuid, :unique => true\n    add_index :workflows, :owner_uuid\n    add_index :workflows, [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\"], name: 'workflows_search_idx'\n    execute \"CREATE INDEX workflows_full_text_search_idx ON workflows USING gin(#{Workflow.full_text_tsvector});\"\n  end\n\n  def down\n    remove_index :workflows, :name => 'workflows_full_text_search_idx'\n    remove_index :workflows, :name => 'workflows_search_idx'\n    remove_index :workflows, :owner_uuid\n    remove_index :workflows, :uuid\n    drop_table :workflows\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20160819195557_add_script_parameters_digest_to_jobs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddScriptParametersDigestToJobs < ActiveRecord::Migration[4.2]\n  def change\n    add_column :jobs, :script_parameters_digest, :string\n    add_index :jobs, :script_parameters_digest\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20160819195725_populate_script_parameters_digest.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass PopulateScriptParametersDigest < ActiveRecord::Migration[4.2]\n  def up\n    done = false\n    while !done\n      done = true\n      Job.\n        where('script_parameters_digest is null').\n        select([:id, :script_parameters, :script_parameters_digest]).\n        limit(200).\n        each do |j|\n        done = false\n        Job.\n          where('id=? or script_parameters=?', j.id, j.script_parameters.to_yaml).\n          update_all(script_parameters_digest: j.update_script_parameters_digest)\n      end\n    end\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20160901210110_repair_script_parameters_digest.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RepairScriptParametersDigest < ActiveRecord::Migration[4.2]\n  def up\n    Job.find_each do |j|\n      have = j.script_parameters_digest\n      want = j.update_script_parameters_digest\n      if have != want\n        # where().update_all() skips validations, event logging, and\n        # timestamp updates, and just runs SQL. (This change is\n        # invisible to clients.)\n        Job.where('id=?', j.id).update_all(script_parameters_digest: want)\n      end\n    end\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20160909181442_rename_workflow_to_definition.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RenameWorkflowToDefinition < ActiveRecord::Migration[4.2]\n  def up\n    rename_column :workflows, :workflow, :definition\n  end\n\n  def down\n    rename_column :workflows, :definition, :workflow\n  end\nend\n\n"
  },
  {
    "path": "services/api/db/migrate/20160926194129_add_container_count.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddContainerCount < ActiveRecord::Migration[4.2]\n  def up\n    add_column :container_requests, :container_count, :int, :default => 0\n  end\n\n  def down\n    remove_column :container_requests, :container_count\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20161019171346_add_use_existing_to_container_requests.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddUseExistingToContainerRequests < ActiveRecord::Migration[4.2]\n  def up\n    add_column :container_requests, :use_existing, :boolean, :default => true\n  end\n\n  def down\n    remove_column :container_requests, :use_existing\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20161111143147_add_scheduling_parameters_to_container.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddSchedulingParametersToContainer < ActiveRecord::Migration[4.2]\n  def change\n    add_column :containers, :scheduling_parameters, :text\n    add_column :container_requests, :scheduling_parameters, :text\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20161115171221_add_output_and_log_uuid_to_container_request.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'has_uuid'\n\nclass AddOutputAndLogUuidToContainerRequest < ActiveRecord::Migration[4.2]\n  extend HasUuid::ClassMethods\n\n  def up\n    add_column :container_requests, :output_uuid, :string\n    add_column :container_requests, :log_uuid, :string\n\n    no_such_out_coll = Server::Application.config.uuid_prefix + '-' + '4zz18' + '-xxxxxxxxxxxxxxx'\n    no_such_log_coll = Server::Application.config.uuid_prefix + '-' + '4zz18' + '-yyyyyyyyyyyyyyy'\n\n    update_sql <<-EOS\nupdate container_requests set output_uuid = ('#{no_such_out_coll}'), log_uuid = ('#{no_such_log_coll}');\nEOS\n  end\n\n  def down\n    remove_column :container_requests, :log_uuid\n    remove_column :container_requests, :output_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20161115174218_add_output_and_log_uuids_to_container_request_search_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddOutputAndLogUuidsToContainerRequestSearchIndex < ActiveRecord::Migration[4.2]\n  def up\n    begin\n      remove_index :container_requests, :name => 'container_requests_search_index'\n    rescue\n    end\n    add_index :container_requests,\n              [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"state\", \"requesting_container_uuid\", \"container_uuid\", \"container_image\", \"cwd\", \"output_path\", \"output_uuid\", \"log_uuid\"],\n              name: \"container_requests_search_index\"\n  end\n\n  def down\n    begin\n      remove_index :container_requests, :name => 'container_requests_search_index'\n    rescue\n    end\n\t  add_index :container_requests,\n              [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"state\", \"requesting_container_uuid\", \"container_uuid\", \"container_image\", \"cwd\", \"output_path\"],\n              name: \"container_requests_search_index\"\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20161213172944_full_text_search_indexes.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass FullTextSearchIndexes < ActiveRecord::Migration[4.2]\n  def fts_indexes\n    {\n      \"collections\" => \"collections_full_text_search_idx\",\n      \"container_requests\" => \"container_requests_full_text_search_idx\",\n      \"groups\" => \"groups_full_text_search_idx\",\n      \"jobs\" => \"jobs_full_text_search_idx\",\n      \"pipeline_instances\" => \"pipeline_instances_full_text_search_idx\",\n      \"pipeline_templates\" => \"pipeline_templates_full_text_search_idx\",\n      \"workflows\" => \"workflows_full_text_search_idx\",\n    }\n  end\n\n  def replace_index(t)\n    i = fts_indexes[t]\n    t.classify.constantize.reset_column_information\n    execute \"DROP INDEX IF EXISTS #{i}\"\n    execute \"CREATE INDEX #{i} ON #{t} USING gin(#{t.classify.constantize.full_text_tsvector})\"\n  end\n\n  def up\n    # remove existing fts indexes and create up to date ones with no\n    # leading space\n    fts_indexes.keys.each do |t|\n      replace_index(t)\n    end\n  end\n\n  def down\n    fts_indexes.each do |t, i|\n      remove_index t.to_sym, :name => i\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20161222153434_split_expiry_to_trash_and_delete.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass SplitExpiryToTrashAndDelete < ActiveRecord::Migration[4.2]\n  def up\n    Collection.transaction do\n      add_column(:collections, :trash_at, :datetime)\n      add_index(:collections, :trash_at)\n      add_column(:collections, :is_trashed, :boolean, null: false, default: false)\n      add_index(:collections, :is_trashed)\n      rename_column(:collections, :expires_at, :delete_at)\n      add_index(:collections, :delete_at)\n\n      Collection.reset_column_information\n      Collection.\n        where('delete_at is not null and delete_at <= statement_timestamp()').\n        delete_all\n      Collection.\n        where('delete_at is not null').\n        update_all('is_trashed = true, trash_at = statement_timestamp()')\n      add_index(:collections, [:owner_uuid, :name],\n                unique: true,\n                where: 'is_trashed = false',\n                name: 'index_collections_on_owner_uuid_and_name')\n      remove_index(:collections,\n                   name: 'collection_owner_uuid_name_unique')\n    end\n  end\n\n  def down\n    Collection.transaction do\n      remove_index(:collections, :delete_at)\n      rename_column(:collections, :delete_at, :expires_at)\n      add_index(:collections, [:owner_uuid, :name],\n                unique: true,\n                where: 'expires_at is null',\n                name: 'collection_owner_uuid_name_unique')\n      remove_index(:collections,\n                   name: 'index_collections_on_owner_uuid_and_name')\n      remove_column(:collections, :is_trashed)\n      remove_index(:collections, :trash_at)\n      remove_column(:collections, :trash_at)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20161223090712_add_output_name_to_container_requests.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddOutputNameToContainerRequests < ActiveRecord::Migration[4.2]\n  def up\n    add_column :container_requests, :output_name, :string, :default => nil\n  end\n\n  def down\n    remove_column :container_requests, :output_name\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170102153111_add_output_name_to_container_request_search_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddOutputNameToContainerRequestSearchIndex < ActiveRecord::Migration[4.2]\n  def up\n    begin\n      remove_index :container_requests, :name => 'container_requests_search_index'\n    rescue\n    end\n    add_index :container_requests,\n              [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"state\", \"requesting_container_uuid\", \"container_uuid\", \"container_image\", \"cwd\", \"output_path\", \"output_uuid\", \"log_uuid\", \"output_name\"],\n              name: \"container_requests_search_index\"\n  end\n\n  def down\n    begin\n      remove_index :container_requests, :name => 'container_requests_search_index'\n    rescue\n    end\n\t  add_index :container_requests,\n              [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"state\", \"requesting_container_uuid\", \"container_uuid\", \"container_image\", \"cwd\", \"output_path\", \"output_uuid\", \"log_uuid\"],\n              name: \"container_requests_search_index\"\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170105160301_add_output_name_to_cr_fts_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddOutputNameToCrFtsIndex < ActiveRecord::Migration[4.2]\n  def up\n    t = \"container_requests\"\n    i = \"container_requests_full_text_search_idx\"\n    t.classify.constantize.reset_column_information\n    ActiveRecord::Base.connection.indexes(t).each do |idx|\n      if idx.name == i\n        remove_index t.to_sym, :name => i\n        break\n      end\n    end\n    # By now, container_request should have the new column \"output_name\" so full_text_tsvector\n    # would include it on its results\n    execute \"CREATE INDEX #{i} ON #{t} USING gin(#{t.classify.constantize.full_text_tsvector});\"\n  end\n\n  def down\n    t = \"container_requests\"\n    i = \"container_requests_full_text_search_idx\"\n    remove_index t.to_sym, :name => i\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170105160302_set_finished_at_on_finished_pipeline_instances.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass SetFinishedAtOnFinishedPipelineInstances < ActiveRecord::Migration[4.2]\n  def change\n    ActiveRecord::Base.connection.execute(\"update pipeline_instances set finished_at=updated_at where finished_at is null and (state='Failed' or state='Complete')\")\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170216170823_no_cr_mounts_and_workflow_def_in_full_text_search_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass NoCrMountsAndWorkflowDefInFullTextSearchIndex < ActiveRecord::Migration[4.2]\n  def fts_indexes\n    {\n      \"container_requests\" => \"container_requests_full_text_search_idx\",\n      \"workflows\" => \"workflows_full_text_search_idx\",\n    }\n  end\n\n  def up\n    # remove existing fts index and recreate for container_requests and workflows\n    fts_indexes.each do |t, i|\n      t.classify.constantize.reset_column_information\n      ActiveRecord::Base.connection.indexes(t).each do |idx|\n        if idx.name == i\n          remove_index t.to_sym, :name => i\n          break\n        end\n      end\n      execute \"CREATE INDEX #{i} ON #{t} USING gin(#{t.classify.constantize.full_text_tsvector});\"\n    end\n  end\n\n  def down\n    fts_indexes.each do |t, i|\n      t.classify.constantize.reset_column_information\n      ActiveRecord::Base.connection.indexes(t).each do |idx|\n        if idx.name == i\n          remove_index t.to_sym, :name => i\n          break\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170301225558_no_downgrade_after_json.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass NoDowngradeAfterJson < ActiveRecord::Migration[4.2]\n  def up\n  end\n\n  def down\n    raise ActiveRecord::IrreversibleMigration.\n      new(\"cannot downgrade: older versions cannot read JSON from DB tables\")\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170319063406_serialized_columns_accept_null.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass SerializedColumnsAcceptNull < ActiveRecord::Migration[4.2]\n  def change\n    change_column :api_client_authorizations, :scopes, :text, null: true, default: '[\"all\"]'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170328215436_add_portable_data_hash_index_to_collections.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddPortableDataHashIndexToCollections < ActiveRecord::Migration[4.2]\n  def change\n    add_index :collections, :portable_data_hash\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170330012505_add_output_ttl_to_container_requests.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddOutputTtlToContainerRequests < ActiveRecord::Migration[4.2]\n  def change\n    add_column :container_requests, :output_ttl, :integer, default: 0, null: false\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170419173031_add_created_by_job_task_index_to_job_tasks.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddCreatedByJobTaskIndexToJobTasks < ActiveRecord::Migration[4.2]\n  def change\n    add_index :job_tasks, :created_by_job_task_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170419173712_add_object_owner_index_to_logs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddObjectOwnerIndexToLogs < ActiveRecord::Migration[4.2]\n  def change\n    add_index :logs, :object_owner_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170419175801_add_requesting_container_index_to_container_requests.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddRequestingContainerIndexToContainerRequests < ActiveRecord::Migration[4.2]\n  def change\n    add_index :container_requests, :requesting_container_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170628185847_jobs_yaml_to_json.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'migrate_yaml_to_json'\n\nclass JobsYamlToJson < ActiveRecord::Migration[4.2]\n  def up\n    [\n      'components',\n      'script_parameters',\n      'runtime_constraints',\n      'tasks_summary',\n    ].each do |column|\n      MigrateYAMLToJSON.migrate(\"jobs\", column)\n    end\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170704160233_yaml_to_json.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'migrate_yaml_to_json'\n\nclass YamlToJson < ActiveRecord::Migration[4.2]\n  def up\n    [\n      ['collections', 'properties'],\n      ['containers', 'environment'],\n      ['containers', 'mounts'],\n      ['containers', 'runtime_constraints'],\n      ['containers', 'command'],\n      ['containers', 'scheduling_parameters'],\n      ['container_requests', 'properties'],\n      ['container_requests', 'environment'],\n      ['container_requests', 'mounts'],\n      ['container_requests', 'runtime_constraints'],\n      ['container_requests', 'command'],\n      ['container_requests', 'scheduling_parameters'],\n      ['humans', 'properties'],\n      ['job_tasks', 'parameters'],\n      ['links', 'properties'],\n      ['nodes', 'info'],\n      ['nodes', 'properties'],\n      ['pipeline_instances', 'components'],\n      ['pipeline_instances', 'properties'],\n      ['pipeline_instances', 'components_summary'],\n      ['pipeline_templates', 'components'],\n      ['specimens', 'properties'],\n      ['traits', 'properties'],\n      ['users', 'prefs'],\n    ].each do |table, column|\n      MigrateYAMLToJSON.migrate(table, column)\n    end\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170706141334_json_collection_properties.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire './db/migrate/20161213172944_full_text_search_indexes'\n\nclass JsonCollectionProperties < ActiveRecord::Migration[4.2]\n  def up\n    # Drop the FT index before changing column type to avoid\n    # \"PG::DatatypeMismatch: ERROR: COALESCE types jsonb and text\n    # cannot be matched\".\n    ActiveRecord::Base.connection.execute 'DROP INDEX IF EXISTS collections_full_text_search_idx'\n    ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN properties TYPE jsonb USING properties::jsonb'\n    FullTextSearchIndexes.new.replace_index('collections')\n  end\n\n  def down\n    ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN properties TYPE text'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170824202826_trashable_groups.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass TrashableGroups < ActiveRecord::Migration[4.2]\n  def up\n    add_column :groups, :trash_at, :datetime\n    add_index(:groups, :trash_at)\n\n    add_column :groups, :is_trashed, :boolean, null: false, default: false\n    add_index(:groups, :is_trashed)\n\n    add_column :groups, :delete_at, :datetime\n    add_index(:groups, :delete_at)\n\n    Group.reset_column_information\n    add_index(:groups, [:owner_uuid, :name],\n              unique: true,\n              where: 'is_trashed = false',\n              name: 'index_groups_on_owner_uuid_and_name')\n    remove_index(:groups,\n                 name: 'groups_owner_uuid_name_unique')\n  end\n\n  def down\n    Group.transaction do\n      add_index(:groups, [:owner_uuid, :name], unique: true,\n                name: 'groups_owner_uuid_name_unique')\n      remove_index(:groups,\n                   name: 'index_groups_on_owner_uuid_and_name')\n\n      remove_index(:groups, :delete_at)\n      remove_column(:groups, :delete_at)\n\n      remove_index(:groups, :is_trashed)\n      remove_column(:groups, :is_trashed)\n\n      remove_index(:groups, :trash_at)\n      remove_column(:groups, :trash_at)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20170906224040_materialized_permission_view.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass MaterializedPermissionView < ActiveRecord::Migration[4.2]\n\n  @@idxtables = [:collections, :container_requests, :groups, :jobs, :links, :pipeline_instances, :pipeline_templates, :repositories, :users, :virtual_machines, :workflows, :logs]\n\n  def up\n\n    #\n    # Construct a materialized view for permissions.  This is a view which is\n    # derived from querying other tables, but is saved to a static table itself\n    # so that it can be indexed and queried efficiently without rerunning the\n    # query.  The view is updated using \"REFRESH MATERIALIZED VIEW\" which is\n    # executed after an operation invalidates the permission graph.\n    #\n\n    ActiveRecord::Base.connection.execute(\n\"-- constructing perm_edges\n--   1. get the list of all permission links,\n--   2. any can_manage link or permission link to a group means permission should 'follow through'\n--      (as a special case, can_manage links to a user grant access to everything owned by the user,\n--       unlike can_read or can_write which only grant access to the user record)\n--   3. add all owner->owned relationships between groups as can_manage edges\n--\n-- constructing permissions\n--   1. base case: start with set of all users as the working set\n--   2. recursive case:\n--      join with edges where the tail is in the working set and 'follow' is true\n--      produce a new working set with the head (target) of each edge\n--      set permission to the least permission encountered on the path\n--      propagate trashed flag down\n\nCREATE MATERIALIZED VIEW materialized_permission_view AS\nWITH RECURSIVE\nperm_value (name, val) AS (\n     VALUES\n     ('can_read',   1::smallint),\n     ('can_login',  1),\n     ('can_write',  2),\n     ('can_manage', 3)\n     ),\nperm_edges (tail_uuid, head_uuid, val, follow, trashed) AS (\n       SELECT links.tail_uuid,\n              links.head_uuid,\n              pv.val,\n              (pv.val = 3 OR groups.uuid IS NOT NULL) AS follow,\n              0::smallint AS trashed\n              FROM links\n              LEFT JOIN perm_value pv ON pv.name = links.name\n              LEFT JOIN groups ON pv.val<3 AND groups.uuid = links.head_uuid\n              WHERE links.link_class = 'permission'\n       UNION ALL\n       SELECT owner_uuid, uuid, 3, true,\n              CASE WHEN trash_at IS NOT NULL and trash_at < clock_timestamp() THEN 1 ELSE 0 END\n              FROM groups\n       ),\nperm (val, follow, user_uuid, target_uuid, trashed) AS (\n     SELECT 3::smallint             AS val,\n            true                    AS follow,\n            users.uuid::varchar(32) AS user_uuid,\n            users.uuid::varchar(32) AS target_uuid,\n            0::smallint             AS trashed\n            FROM users\n     UNION\n     SELECT LEAST(perm.val, edges.val)::smallint  AS val,\n            edges.follow                          AS follow,\n            perm.user_uuid::varchar(32)           AS user_uuid,\n            edges.head_uuid::varchar(32)          AS target_uuid,\n            GREATEST(perm.trashed, edges.trashed)::smallint AS trashed\n            FROM perm\n            INNER JOIN perm_edges edges\n            ON perm.follow AND edges.tail_uuid = perm.target_uuid\n)\nSELECT user_uuid,\n       target_uuid,\n       MAX(val) AS perm_level,\n       CASE follow WHEN true THEN target_uuid ELSE NULL END AS target_owner_uuid,\n       MAX(trashed) AS trashed\n       FROM perm\n       GROUP BY user_uuid, target_uuid, target_owner_uuid;\n\")\n    add_index :materialized_permission_view, [:trashed, :target_uuid], name: 'permission_target_trashed'\n    add_index :materialized_permission_view, [:user_uuid, :trashed, :perm_level], name: 'permission_target_user_trashed_level'\n\n    # Indexes on the other tables are essential to for the query planner to\n    # construct an efficient join with permission_view.\n    #\n    # Our default query uses \"ORDER BY modified_by desc, uuid\"\n    #\n    # It turns out the existing simple index on modified_by can't be used\n    # because of the additional ordering on \"uuid\".\n    #\n    # To be able to utilize the index, the index ordering has to match the\n    # ORDER BY clause.  For more detail see:\n    #\n    # https://www.postgresql.org/docs/9.3/static/indexes-ordering.html\n    #\n    @@idxtables.each do |table|\n      ActiveRecord::Base.connection.execute(\"CREATE INDEX index_#{table.to_s}_on_modified_at_uuid ON #{table.to_s} USING btree (modified_at desc, uuid asc)\")\n    end\n\n    create_table :permission_refresh_lock\n    ActiveRecord::Base.connection.execute(\"REFRESH MATERIALIZED VIEW materialized_permission_view\")\n  end\n\n  def down\n    drop_table :permission_refresh_lock\n    remove_index :materialized_permission_view, name: 'permission_target_trashed'\n    remove_index :materialized_permission_view, name: 'permission_target_user_trashed_level'\n    @@idxtables.each do |table|\n      ActiveRecord::Base.connection.execute(\"DROP INDEX IF EXISTS index_#{table.to_s}_on_modified_at_uuid\")\n    end\n    ActiveRecord::Base.connection.execute(\"DROP MATERIALIZED VIEW IF EXISTS materialized_permission_view\")\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20171027183824_add_index_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddIndexToContainers < ActiveRecord::Migration[4.2]\n  def up\n    ActiveRecord::Base.connection.execute(\"CREATE INDEX index_containers_on_modified_at_uuid ON containers USING btree (modified_at desc, uuid asc)\")\n    ActiveRecord::Base.connection.execute(\"CREATE INDEX index_container_requests_on_container_uuid on container_requests (container_uuid)\")\n  end\n\n  def down\n    ActiveRecord::Base.connection.execute(\"DROP INDEX IF EXISTS index_containers_on_modified_at_uuid\")\n    ActiveRecord::Base.connection.execute(\"DROP INDEX IF EXISTS index_container_requests_on_container_uuid\")\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20171208203841_fix_trash_flag_follow.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass FixTrashFlagFollow < ActiveRecord::Migration[4.2]\n  def change\n    ActiveRecord::Base.connection.execute(\"DROP MATERIALIZED VIEW materialized_permission_view\")\n    ActiveRecord::Base.connection.execute(\n\"-- constructing perm_edges\n--   1. get the list of all permission links,\n--   2. any can_manage link or permission link to a group means permission should 'follow through'\n--      (as a special case, can_manage links to a user grant access to everything owned by the user,\n--       unlike can_read or can_write which only grant access to the user record)\n--   3. add all owner->owned relationships between groups as can_manage edges\n--\n-- constructing permissions\n--   1. base case: start with set of all users as the working set\n--   2. recursive case:\n--      join with edges where the tail is in the working set and 'follow' is true\n--      produce a new working set with the head (target) of each edge\n--      set permission to the least permission encountered on the path\n--      propagate trashed flag down\n\nCREATE MATERIALIZED VIEW materialized_permission_view AS\nWITH RECURSIVE\nperm_value (name, val) AS (\n     VALUES\n     ('can_read',   1::smallint),\n     ('can_login',  1),\n     ('can_write',  2),\n     ('can_manage', 3)\n     ),\nperm_edges (tail_uuid, head_uuid, val, follow, trashed) AS (\n       SELECT links.tail_uuid,\n              links.head_uuid,\n              pv.val,\n              (pv.val = 3 OR groups.uuid IS NOT NULL) AS follow,\n              0::smallint AS trashed,\n              0::smallint AS followtrash\n              FROM links\n              LEFT JOIN perm_value pv ON pv.name = links.name\n              LEFT JOIN groups ON pv.val<3 AND groups.uuid = links.head_uuid\n              WHERE links.link_class = 'permission'\n       UNION ALL\n       SELECT owner_uuid, uuid, 3, true,\n              CASE WHEN trash_at IS NOT NULL and trash_at < clock_timestamp() THEN 1 ELSE 0 END,\n              1\n              FROM groups\n       ),\nperm (val, follow, user_uuid, target_uuid, trashed) AS (\n     SELECT 3::smallint             AS val,\n            true                    AS follow,\n            users.uuid::varchar(32) AS user_uuid,\n            users.uuid::varchar(32) AS target_uuid,\n            0::smallint             AS trashed\n            FROM users\n     UNION\n     SELECT LEAST(perm.val, edges.val)::smallint  AS val,\n            edges.follow                          AS follow,\n            perm.user_uuid::varchar(32)           AS user_uuid,\n            edges.head_uuid::varchar(32)          AS target_uuid,\n            (GREATEST(perm.trashed, edges.trashed) * edges.followtrash)::smallint AS trashed\n            FROM perm\n            INNER JOIN perm_edges edges\n            ON perm.follow AND edges.tail_uuid = perm.target_uuid\n)\nSELECT user_uuid,\n       target_uuid,\n       MAX(val) AS perm_level,\n       CASE follow WHEN true THEN target_uuid ELSE NULL END AS target_owner_uuid,\n       MAX(trashed) AS trashed\n       FROM perm\n       GROUP BY user_uuid, target_uuid, target_owner_uuid;\n\")\n    add_index :materialized_permission_view, [:trashed, :target_uuid], name: 'permission_target_trashed'\n    add_index :materialized_permission_view, [:user_uuid, :trashed, :perm_level], name: 'permission_target_user_trashed_level'\n    ActiveRecord::Base.connection.execute(\"REFRESH MATERIALIZED VIEW materialized_permission_view\")\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20171212153352_add_gin_index_to_collection_properties.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddGinIndexToCollectionProperties < ActiveRecord::Migration[4.2]\n  def up\n    ActiveRecord::Base.connection.execute(\"CREATE INDEX collection_index_on_properties ON collections USING gin (properties);\")\n  end\n  def down\n    ActiveRecord::Base.connection.execute(\"DROP INDEX collection_index_on_properties\")\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180216203422_add_storage_classes_to_collections.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddStorageClassesToCollections < ActiveRecord::Migration[4.2]\n  def up\n    add_column :collections, :storage_classes_desired, :jsonb, :default => [\"default\"]\n    add_column :collections, :storage_classes_confirmed, :jsonb, :default => []\n    add_column :collections, :storage_classes_confirmed_at, :datetime, :default => nil, :null => true\n  end\n\n  def down\n    remove_column :collections, :storage_classes_desired\n    remove_column :collections, :storage_classes_confirmed\n    remove_column :collections, :storage_classes_confirmed_at\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180228220311_add_secret_mounts_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddSecretMountsToContainers < ActiveRecord::Migration[4.2]\n  def change\n    add_column :container_requests, :secret_mounts, :jsonb, default: {}\n    add_column :containers, :secret_mounts, :jsonb, default: {}\n    add_column :containers, :secret_mounts_md5, :string, default: \"99914b932bd37a50b983c5e7c90ae93b\"\n    add_index :containers, :secret_mounts_md5\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180313180114_change_container_priority_bigint.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ChangeContainerPriorityBigint < ActiveRecord::Migration[4.2]\n  def change\n    change_column :containers, :priority, :integer, limit: 8\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180501182859_add_redirect_to_user_uuid_to_users.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddRedirectToUserUuidToUsers < ActiveRecord::Migration[4.2]\n  def up\n    add_column :users, :redirect_to_user_uuid, :string\n    User.reset_column_information\n    remove_index :users, name: 'users_search_index'\n    add_index :users, User.searchable_columns('ilike') - ['prefs'], name: 'users_search_index'\n  end\n\n  def down\n    remove_index :users, name: 'users_search_index'\n    remove_column :users, :redirect_to_user_uuid\n    User.reset_column_information\n    add_index :users, User.searchable_columns('ilike') - ['prefs'], name: 'users_search_index'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180514135529_add_container_auth_uuid_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddContainerAuthUuidIndex < ActiveRecord::Migration[4.2]\n  def change\n    add_index :containers, :auth_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180607175050_properties_to_jsonb.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire './db/migrate/20161213172944_full_text_search_indexes'\n\nclass PropertiesToJsonb < ActiveRecord::Migration[4.2]\n\n  @@tables_columns = [[\"nodes\", \"properties\"],\n                      [\"nodes\", \"info\"],\n                      [\"container_requests\", \"properties\"],\n                      [\"links\", \"properties\"]]\n\n  def up\n    @@tables_columns.each do |table, column|\n      # Drop the FT index before changing column type to avoid\n      # \"PG::DatatypeMismatch: ERROR: COALESCE types jsonb and text\n      # cannot be matched\".\n      ActiveRecord::Base.connection.execute \"DROP INDEX IF EXISTS #{table}_full_text_search_idx\"\n      ActiveRecord::Base.connection.execute \"ALTER TABLE #{table} ALTER COLUMN #{column} TYPE jsonb USING #{column}::jsonb\"\n      ActiveRecord::Base.connection.execute \"CREATE INDEX #{table}_index_on_#{column} ON #{table} USING gin (#{column})\"\n    end\n    FullTextSearchIndexes.new.replace_index(\"container_requests\")\n  end\n\n  def down\n    @@tables_columns.each do |table, column|\n      ActiveRecord::Base.connection.execute \"DROP INDEX IF EXISTS #{table}_index_on_#{column}\"\n      ActiveRecord::Base.connection.execute \"ALTER TABLE #{table} ALTER COLUMN #{column} TYPE text\"\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180608123145_add_properties_to_groups.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire './db/migrate/20161213172944_full_text_search_indexes'\n\nclass AddPropertiesToGroups < ActiveRecord::Migration[4.2]\n  def up\n    add_column :groups, :properties, :jsonb, default: {}\n    ActiveRecord::Base.connection.execute(\"CREATE INDEX group_index_on_properties ON groups USING gin (properties);\")\n    FullTextSearchIndexes.new.replace_index('groups')\n  end\n\n  def down\n    ActiveRecord::Base.connection.execute(\"DROP INDEX IF EXISTS group_index_on_properties\")\n    remove_column :groups, :properties\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180806133039_index_all_filenames.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass IndexAllFilenames < ActiveRecord::Migration[4.2]\n  def up\n    ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN file_names TYPE text'\n  end\n  def down\n    ActiveRecord::Base.connection.execute 'ALTER TABLE collections ALTER COLUMN file_names TYPE varchar(8192)'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180820130357_add_pdh_and_trash_index_to_collections.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddPdhAndTrashIndexToCollections < ActiveRecord::Migration[4.2]\n  def change\n    add_index :collections, [:portable_data_hash, :trash_at]\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180820132617_add_lock_index_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddLockIndexToContainers < ActiveRecord::Migration[4.2]\n  def change\n    # For the current code in sdk/go/dispatch:\n    add_index :containers, [:locked_by_uuid, :priority]\n    # For future dispatchers that use filters instead of offset for\n    # more predictable paging:\n    add_index :containers, [:locked_by_uuid, :uuid]\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180820135808_drop_pdh_index_from_collections.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass DropPdhIndexFromCollections < ActiveRecord::Migration[4.2]\n  def change\n    remove_index :collections, column: :portable_data_hash\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180824152014_add_md5_index_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddMd5IndexToContainers < ActiveRecord::Migration[4.2]\n  def up\n    ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_reuse_columns on containers (md5(command), cwd, md5(environment), output_path, container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints))'\n  end\n  def down\n    ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_reuse_columns'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180824155207_add_queue_index_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddQueueIndexToContainers < ActiveRecord::Migration[4.2]\n  def up\n    ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_queued_state on containers (state, (priority > 0))'\n  end\n  def down\n    ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_queued_state'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180904110712_add_runtime_status_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddRuntimeStatusToContainers < ActiveRecord::Migration[4.2]\n  def change\n    add_column :containers, :runtime_status, :jsonb, default: {}\n    add_index :containers, :runtime_status, using: :gin\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180913175443_add_version_info_to_collections.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddVersionInfoToCollections < ActiveRecord::Migration[4.2]\n  def change\n    # Do changes in bulk to save time on huge tables\n    change_table :collections, :bulk => true do |t|\n      t.string :current_version_uuid\n      t.integer :version, null: false, default: 1\n      t.index [:current_version_uuid, :version], unique: true\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180915155335_set_current_version_uuid_on_collections.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass SetCurrentVersionUuidOnCollections < ActiveRecord::Migration[4.2]\n  def up\n    # Set the current version uuid as itself\n    Collection.where(current_version_uuid: nil).update_all(\"current_version_uuid=uuid\")\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180917200000_replace_full_text_indexes.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire './db/migrate/20161213172944_full_text_search_indexes'\n\nclass ReplaceFullTextIndexes < ActiveRecord::Migration[4.2]\n  def up\n    FullTextSearchIndexes.new.up\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180917205609_recompute_file_names_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RecomputeFileNamesIndex < ActiveRecord::Migration[4.2]\n  def do_batch(pdhs:)\n    ActiveRecord::Base.connection.exec_query('BEGIN')\n    Collection.select(:portable_data_hash, :manifest_text).where(portable_data_hash: pdhs).distinct(:portable_data_hash).each do |c|\n      ActiveRecord::Base.connection.exec_query(\"update collections set file_names=$1 where portable_data_hash=$2\",\n                                               \"update file_names index\",\n                                               [c.manifest_files, c.portable_data_hash])\n    end\n    ActiveRecord::Base.connection.exec_query('COMMIT')\n  end\n  def up\n    # Process collections in multiple transactions, where the total\n    # size of all manifest_texts processed in a transaction is no more\n    # than batch_size_max.  Collections whose manifest_text is bigger\n    # than batch_size_max are updated in their own individual\n    # transactions.\n    batch_size_max = 1 << 28    # 256 MiB\n    batch_size = 0\n    batch_pdhs = {}\n    last_pdh = '0'\n    total = Collection.distinct.count(:portable_data_hash)\n    done = 0\n    any = true\n    while any\n      any = false\n      Collection.\n        unscoped.\n        select(:portable_data_hash).distinct.\n        order(:portable_data_hash).\n        where('portable_data_hash > ?', last_pdh).\n        limit(1000).each do |c|\n        any = true\n        last_pdh = c.portable_data_hash\n        manifest_size = c.portable_data_hash.split('+')[1].to_i\n        if batch_size > 0 && batch_size + manifest_size > batch_size_max\n          do_batch(pdhs: batch_pdhs.keys)\n          done += batch_pdhs.size\n          Rails.logger.info(\"RecomputeFileNamesIndex: #{done}/#{total}\")\n          batch_pdhs = {}\n          batch_size = 0\n        end\n        batch_pdhs[c.portable_data_hash] = true\n        batch_size += manifest_size\n      end\n    end\n    do_batch(pdhs: batch_pdhs.keys)\n    Rails.logger.info(\"RecomputeFileNamesIndex: finished\")\n  end\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20180919001158_recreate_collection_unique_name_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RecreateCollectionUniqueNameIndex < ActiveRecord::Migration[4.2]\n  def up\n    Collection.transaction do\n      remove_index(:collections,\n                   name: 'index_collections_on_owner_uuid_and_name')\n      add_index(:collections, [:owner_uuid, :name],\n                unique: true,\n                where: 'is_trashed = false AND current_version_uuid = uuid',\n                name: 'index_collections_on_owner_uuid_and_name')\n    end\n  end\n\n  def down\n    Collection.transaction do\n      remove_index(:collections,\n                   name: 'index_collections_on_owner_uuid_and_name')\n      add_index(:collections, [:owner_uuid, :name],\n                unique: true,\n                where: 'is_trashed = false',\n                name: 'index_collections_on_owner_uuid_and_name')\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20181001175023_add_preserve_version_to_collections.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddPreserveVersionToCollections < ActiveRecord::Migration[4.2]\n  def change\n    add_column :collections, :preserve_version, :boolean, default: false\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20181004131141_add_current_version_uuid_to_collection_search_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddCurrentVersionUuidToCollectionSearchIndex < ActiveRecord::Migration[4.2]\n  disable_ddl_transaction!\n\n  def up\n    remove_index :collections, :name => 'collections_search_index'\n    add_index :collections, [\"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"portable_data_hash\", \"uuid\", \"name\", \"current_version_uuid\"], name: 'collections_search_index', algorithm: :concurrently\n  end\n\n  def down\n    remove_index :collections, :name => 'collections_search_index'\n    add_index :collections, [\"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"portable_data_hash\", \"uuid\", \"name\"], name: 'collections_search_index', algorithm: :concurrently\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20181005192222_add_container_runtime_token.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddContainerRuntimeToken < ActiveRecord::Migration[4.2]\n  def change\n    add_column :container_requests, :runtime_token, :text, :null => true\n    add_column :containers, :runtime_user_uuid, :text, :null => true\n    add_column :containers, :runtime_auth_scopes, :jsonb, :null => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20181011184200_add_runtime_token_to_container.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddRuntimeTokenToContainer < ActiveRecord::Migration[4.2]\n  def change\n    add_column :containers, :runtime_token, :text, :null => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20181213183234_add_expression_index_to_links.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddExpressionIndexToLinks < ActiveRecord::Migration[4.2]\n  def up\n    ActiveRecord::Base.connection.execute 'CREATE INDEX index_links_on_substring_head_uuid on links (substring(head_uuid, 7, 5))'\n    ActiveRecord::Base.connection.execute 'CREATE INDEX index_links_on_substring_tail_uuid on links (substring(tail_uuid, 7, 5))'\n  end\n\n  def down\n    ActiveRecord::Base.connection.execute 'DROP INDEX index_links_on_substring_head_uuid'\n    ActiveRecord::Base.connection.execute 'DROP INDEX index_links_on_substring_tail_uuid'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20190214214814_add_container_lock_count.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddContainerLockCount < ActiveRecord::Migration[4.2]\n  def change\n    add_column :containers, :lock_count, :int, :null => false, :default => 0\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20190322174136_add_file_info_to_collection.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddFileInfoToCollection < ActiveRecord::Migration[4.2]\n  def up\n    add_column :collections, :file_count, :integer, default: 0, null: false\n    add_column :collections, :file_size_total, :integer, limit: 8, default: 0, null: false\n\n    puts \"Collections now have two new columns, file_count and file_size_total.\"\n    puts \"They were initialized with a zero value. If you are upgrading an Arvados\"\n    puts \"installation, please run the populate-file-info-columns-in-collections.rb\"\n    puts \"script to populate the columns. If this is a new installation, that is not\"\n    puts \"necessary.\"\n  end\n\n  def down\n    remove_column :collections, :file_count\n    remove_column :collections, :file_size_total\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20190422144631_fill_missing_modified_at.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass FillMissingModifiedAt < ActiveRecord::Migration[5.0]\n  def up\n    Collection.where('modified_at is null').update_all('modified_at = created_at')\n  end\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20190523180148_add_trigram_index_for_text_search.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddTrigramIndexForTextSearch < ActiveRecord::Migration[5.0]\n  def trgm_indexes\n    {\n      \"collections\" => \"collections_trgm_text_search_idx\",\n      \"container_requests\" => \"container_requests_trgm_text_search_idx\",\n      \"groups\" => \"groups_trgm_text_search_idx\",\n      \"jobs\" => \"jobs_trgm_text_search_idx\",\n      \"pipeline_instances\" => \"pipeline_instances_trgm_text_search_idx\",\n      \"pipeline_templates\" => \"pipeline_templates_trgm_text_search_idx\",\n      \"workflows\" => \"workflows_trgm_text_search_idx\",\n    }\n  end\n\n  def up\n    begin\n      execute \"CREATE EXTENSION IF NOT EXISTS pg_trgm\"\n    rescue ActiveRecord::StatementInvalid => e\n      puts \"Cannot create the pg_trgm extension.\"\n      if e.cause.is_a?(PG::InsufficientPrivilege)\n        puts \"The user must have a SUPERUSER role.\"\n      elsif e.cause.is_a?(PG::UndefinedFile)\n        puts \"The postgresql-contrib package is most likely not installed.\"\n      else\n        puts \"Unknown Error.\"\n      end\n      puts \"Please visit https://doc.arvados.org/admin/upgrading.html for instructions on how to run this migration.\"\n      throw e\n    end\n\n    trgm_indexes.each do |model, indx|\n      execute \"CREATE INDEX #{indx} ON #{model} USING gin((#{model.classify.constantize.full_text_trgm}) gin_trgm_ops)\"\n    end\n  end\n\n  def down\n    trgm_indexes.each do |_, indx|\n      execute \"DROP INDEX IF EXISTS #{indx}\"\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20190808145904_drop_commit_ancestors.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass DropCommitAncestors < ActiveRecord::Migration[5.0]\n  def change\n    drop_table :commit_ancestors\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20190809135453_remove_commits_table.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RemoveCommitsTable < ActiveRecord::Migration[5.0]\n  def change\n        drop_table :commits\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20190905151603_enforce_unique_identity_url.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass EnforceUniqueIdentityUrl < ActiveRecord::Migration[5.0]\n  def change\n    add_index :users, [:identity_url], :unique => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20200501150153_permission_table.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire '20200501150153_permission_table_constants'\n\nclass PermissionTable < ActiveRecord::Migration[5.0]\n  def up\n    # This is a major migration.  We are replacing the\n    # materialized_permission_view, which is fully recomputed any time\n    # a permission changes (and becomes very expensive as the number\n    # of users/groups becomes large), with a new strategy that only\n    # recomputes permissions for the subset of objects that are\n    # potentially affected by the addition or removal of a permission\n    # relationship (i.e. ownership or a permission link).\n    #\n    # This also disentangles the concept of \"trashed groups\" from the\n    # permissions system.  Updating trashed items follows a similar\n    # (but less complicated) strategy to updating permissions, so it\n    # may be helpful to look at that first.\n\n    ActiveRecord::Base.connection.execute \"DROP MATERIALIZED VIEW IF EXISTS materialized_permission_view;\"\n    drop_table :permission_refresh_lock\n\n    # This table stores the set of trashed groups and their trash_at\n    # time.  Used to exclude trashed projects and their contents when\n    # getting object listings.\n    create_table :trashed_groups, :id => false do |t|\n      t.string :group_uuid\n      t.datetime :trash_at\n    end\n    add_index :trashed_groups, :group_uuid, :unique => true\n\n    ActiveRecord::Base.connection.execute %{\ncreate or replace function project_subtree_with_trash_at (starting_uuid varchar(27), starting_trash_at timestamp)\nreturns table (target_uuid varchar(27), trash_at timestamp)\nSTABLE\nlanguage SQL\nas $$\n/* Starting from a project, recursively traverse all the projects\n  underneath it and return a set of project uuids and trash_at times\n  (may be null).  The initial trash_at can be a timestamp or null.\n  The trash_at time propagates downward to groups it owns, i.e. when a\n  group is trashed, everything underneath it in the ownership\n  hierarchy is also considered trashed.  However, this is fact is\n  recorded in the trashed_groups table, not by updating trash_at field\n  in the groups table.\n*/\nWITH RECURSIVE\n        project_subtree(uuid, trash_at) as (\n        values (starting_uuid, starting_trash_at)\n        union\n        select groups.uuid, LEAST(project_subtree.trash_at, groups.trash_at)\n          from groups join project_subtree on (groups.owner_uuid = project_subtree.uuid)\n        )\n        select uuid, trash_at from project_subtree;\n$$;\n}\n\n    # Now populate the table.  For a non-test databse this is the only\n    # time this ever happens, after this the trash table is updated\n    # incrementally.  See app/models/group.rb#update_trash\n    refresh_trashed\n\n    # The table to store the flattened permissions.  This is almost\n    # exactly the same as the old materalized_permission_view except\n    # that the target_owner_uuid colunm in the view is now just a\n    # boolean traverse_owned (the column was only ever tested for null\n    # or non-null).\n    #\n    # For details on how this table is used to apply permissions to\n    # queries, see app/models/arvados_model.rb#readable_by\n    #\n    create_table :materialized_permissions, :id => false do |t|\n      t.string :user_uuid\n      t.string :target_uuid\n      t.integer :perm_level\n      t.boolean :traverse_owned\n    end\n    add_index :materialized_permissions, [:user_uuid, :target_uuid], unique: true, name: 'permission_user_target'\n    add_index :materialized_permissions, [:target_uuid], unique: false, name: 'permission_target'\n\n    ActiveRecord::Base.connection.execute %{\ncreate or replace function should_traverse_owned (starting_uuid varchar(27),\n                                                  starting_perm integer)\n  returns bool\nIMMUTABLE\nlanguage SQL\nas $$\n/* Helper function.  Determines if permission on an object implies\n   transitive permission to things the object owns.  This is always\n   true for groups, but only true for users when the permission level\n   is can_manage.\n*/\nselect starting_uuid like '_____-j7d0g-_______________' or\n       (starting_uuid like '_____-tpzed-_______________' and starting_perm >= 3);\n$$;\n}\n\n    # Merge all permission relationships into a single view.  This\n    # consists of: groups owned by users and projects, users owned\n    # by other users, users have permission on themselves,\n    # and explicit permission links.\n    #\n    # A SQL view gets inlined into the query where it is used as a\n    # subquery.  This enables the query planner to inject constraints,\n    # so it only has to look up edges it plans to traverse and avoid a brute\n    # force query of all edges.\n    ActiveRecord::Base.connection.execute %{\ncreate view permission_graph_edges as\n  select groups.owner_uuid as tail_uuid, groups.uuid as head_uuid,\n         (3) as val, groups.uuid as edge_id from groups\nunion all\n  select users.owner_uuid as tail_uuid, users.uuid as head_uuid,\n         (3) as val, users.uuid as edge_id from users\nunion all\n  select users.uuid as tail_uuid, users.uuid as head_uuid,\n         (3) as val, '' as edge_id from users\nunion all\n  select links.tail_uuid,\n         links.head_uuid,\n         CASE\n           WHEN links.name = 'can_read'   THEN 1\n           WHEN links.name = 'can_login'  THEN 1\n           WHEN links.name = 'can_write'  THEN 2\n           WHEN links.name = 'can_manage' THEN 3\n           ELSE 0\n         END as val,\n         links.uuid as edge_id\n      from links\n      where links.link_class='permission'\n}\n\n    # This is used to ensure that the permission edge passed into\n    # compute_permission_subgraph takes replaces the existing edge in\n    # the \"edges\" view that is about to be removed.\n    edge_perm = %{\ncase (edges.edge_id = perm_edge_id)\n                               when true then starting_perm\n                               else edges.val\n                            end\n}\n\n    # The primary function to compute permissions for a subgraph.\n    # Comments on how it works are inline.\n    #\n    # Due to performance issues due to the query optimizer not\n    # working across function and \"with\" expression boundaries, I\n    # had to fall back on using string templates for repeated code\n    # in order to inline it.\n\n    ActiveRecord::Base.connection.execute %{\ncreate or replace function compute_permission_subgraph (perm_origin_uuid varchar(27),\n                                                        starting_uuid varchar(27),\n                                                        starting_perm integer,\n                                                        perm_edge_id varchar(27))\nreturns table (user_uuid varchar(27), target_uuid varchar(27), val integer, traverse_owned bool)\nSTABLE\nlanguage SQL\nas $$\n\n/* The purpose of this function is to compute the permissions for a\n   subgraph of the database, starting from a given edge.  The newly\n   computed permissions are used to add and remove rows from the main\n   permissions table.\n\n   perm_origin_uuid: The object that 'gets' the permission.\n\n   starting_uuid: The starting object the permission applies to.\n\n   starting_perm: The permission that perm_origin_uuid 'has' on\n                  starting_uuid One of 1, 2, 3 for can_read,\n                  can_write, can_manage respectively, or 0 to revoke\n                  permissions.\n\n   perm_edge_id: Identifies the permission edge that is being updated.\n                 Changes of ownership, this is starting_uuid.\n                 For links, this is the uuid of the link object.\n                 This is used to override the edge value in the database\n                 with starting_perm.  This is necessary when revoking\n                 permissions because the update happens before edge is\n                 actually removed.\n*/\nwith\n  /* Starting from starting_uuid, determine the set of objects that\n     could be affected by this permission change.\n\n     Note: We don't traverse users unless it is an \"identity\"\n     permission (permission origin is self).\n  */\n  perm_from_start(perm_origin_uuid, target_uuid, val, traverse_owned) as (\n    #{PERM_QUERY_TEMPLATE % {:base_case => %{\n             values (perm_origin_uuid, starting_uuid, starting_perm,\n                    should_traverse_owned(starting_uuid, starting_perm),\n                    (perm_origin_uuid = starting_uuid or starting_uuid not like '_____-tpzed-_______________'))\n},\n:edge_perm => edge_perm\n} }),\n\n  /* Find other inbound edges that grant permissions to 'targets' in\n     perm_from_start, and compute permissions that originate from\n     those.\n\n     This is necessary for two reasons:\n\n       1) Other users may have access to a subset of the objects\n       through other permission links than the one we started from.\n       If we don't recompute them, their permission will get dropped.\n\n       2) There may be more than one path through which a user gets\n       permission to an object.  For example, a user owns a project\n       and also shares it can_read with a group the user belongs\n       to. adding the can_read link must not overwrite the existing\n       can_manage permission granted by ownership.\n  */\n  additional_perms(perm_origin_uuid, target_uuid, val, traverse_owned) as (\n    #{PERM_QUERY_TEMPLATE % {:base_case => %{\n    select edges.tail_uuid as origin_uuid, edges.head_uuid as target_uuid, edges.val,\n           should_traverse_owned(edges.head_uuid, edges.val),\n           edges.head_uuid like '_____-j7d0g-_______________'\n      from permission_graph_edges as edges\n      where edges.edge_id != perm_edge_id and\n            edges.tail_uuid not in (select target_uuid from perm_from_start where target_uuid like '_____-j7d0g-_______________') and\n            edges.head_uuid in (select target_uuid from perm_from_start)\n},\n:edge_perm => edge_perm\n} }),\n\n  /* Combine the permissions computed in the first two phases. */\n  all_perms(perm_origin_uuid, target_uuid, val, traverse_owned) as (\n      select * from perm_from_start\n    union all\n      select * from additional_perms\n  )\n\n  /* The actual query that produces rows to be added or removed\n     from the materialized_permissions table.  This is the clever\n     bit.\n\n     Key insights:\n\n     * For every group, the materialized_permissions lists all users\n       that can access to that group.\n\n     * The all_perms subquery has computed permissions on on a set of\n       objects for all inbound \"origins\", which are users or groups.\n\n     * Permissions through groups are transitive.\n\n     We can infer:\n\n     1) The materialized_permissions table declares that user X has permission N on group Y\n     2) The all_perms result has determined group Y has permission M on object Z\n     3) Therefore, user X has permission min(N, M) on object Z\n\n     This allows us to efficiently determine the set of users that\n     have permissions on the subset of objects, without having to\n     follow the chain of permission back up to find those users.\n\n     In addition, because users always have permission on themselves, this\n     query also makes sure those permission rows are always\n     returned.\n  */\n  select v.user_uuid, v.target_uuid, max(v.perm_level), bool_or(v.traverse_owned) from\n    (select m.user_uuid,\n         u.target_uuid,\n         least(u.val, m.perm_level) as perm_level,\n         u.traverse_owned\n      from all_perms as u, materialized_permissions as m\n           where u.perm_origin_uuid = m.target_uuid AND m.traverse_owned\n           AND (m.user_uuid = m.target_uuid or m.target_uuid not like '_____-tpzed-_______________')\n    union all\n      select target_uuid as user_uuid, target_uuid, 3, true\n        from all_perms\n        where all_perms.target_uuid like '_____-tpzed-_______________') as v\n    group by v.user_uuid, v.target_uuid\n$$;\n     }\n\n    #\n    # Populate materialized_permissions by traversing permissions\n    # starting at each user.\n    #\n    refresh_permissions\n  end\n\n  def down\n    drop_table :materialized_permissions\n    drop_table :trashed_groups\n\n    ActiveRecord::Base.connection.execute \"DROP function project_subtree_with_trash_at (varchar, timestamp);\"\n    ActiveRecord::Base.connection.execute \"DROP function compute_permission_subgraph (varchar, varchar, integer, varchar);\"\n    ActiveRecord::Base.connection.execute \"DROP function should_traverse_owned(varchar, integer);\"\n    ActiveRecord::Base.connection.execute \"DROP view permission_graph_edges;\"\n\n    ActiveRecord::Base.connection.execute(%{\nCREATE MATERIALIZED VIEW materialized_permission_view AS\n WITH RECURSIVE perm_value(name, val) AS (\n         VALUES ('can_read'::text,(1)::smallint), ('can_login'::text,1), ('can_write'::text,2), ('can_manage'::text,3)\n        ), perm_edges(tail_uuid, head_uuid, val, follow, trashed) AS (\n         SELECT links.tail_uuid,\n            links.head_uuid,\n            pv.val,\n            ((pv.val = 3) OR (groups.uuid IS NOT NULL)) AS follow,\n            (0)::smallint AS trashed,\n            (0)::smallint AS followtrash\n           FROM ((public.links\n             LEFT JOIN perm_value pv ON ((pv.name = (links.name)::text)))\n             LEFT JOIN public.groups ON (((pv.val < 3) AND ((groups.uuid)::text = (links.head_uuid)::text))))\n          WHERE ((links.link_class)::text = 'permission'::text)\n        UNION ALL\n         SELECT groups.owner_uuid,\n            groups.uuid,\n            3,\n            true AS bool,\n                CASE\n                    WHEN ((groups.trash_at IS NOT NULL) AND (groups.trash_at < clock_timestamp())) THEN 1\n                    ELSE 0\n                END AS \"case\",\n            1\n           FROM public.groups\n        ), perm(val, follow, user_uuid, target_uuid, trashed) AS (\n         SELECT (3)::smallint AS val,\n            true AS follow,\n            (users.uuid)::character varying(32) AS user_uuid,\n            (users.uuid)::character varying(32) AS target_uuid,\n            (0)::smallint AS trashed\n           FROM public.users\n        UNION\n         SELECT (LEAST((perm_1.val)::integer, edges.val))::smallint AS val,\n            edges.follow,\n            perm_1.user_uuid,\n            (edges.head_uuid)::character varying(32) AS target_uuid,\n            ((GREATEST((perm_1.trashed)::integer, edges.trashed) * edges.followtrash))::smallint AS trashed\n           FROM (perm perm_1\n             JOIN perm_edges edges ON ((perm_1.follow AND ((edges.tail_uuid)::text = (perm_1.target_uuid)::text))))\n        )\n SELECT perm.user_uuid,\n    perm.target_uuid,\n    max(perm.val) AS perm_level,\n        CASE perm.follow\n            WHEN true THEN perm.target_uuid\n            ELSE NULL::character varying\n        END AS target_owner_uuid,\n    max(perm.trashed) AS trashed\n   FROM perm\n  GROUP BY perm.user_uuid, perm.target_uuid,\n        CASE perm.follow\n            WHEN true THEN perm.target_uuid\n            ELSE NULL::character varying\n        END\n  WITH NO DATA;\n}\n    )\n\n    add_index :materialized_permission_view, [:trashed, :target_uuid], name: 'permission_target_trashed'\n    add_index :materialized_permission_view, [:user_uuid, :trashed, :perm_level], name: 'permission_target_user_trashed_level'\n    create_table :permission_refresh_lock\n\n    ActiveRecord::Base.connection.execute 'REFRESH MATERIALIZED VIEW materialized_permission_view;'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20200602141328_fix_roles_projects.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'fix_roles_projects'\n\nclass FixRolesProjects < ActiveRecord::Migration[5.0]\n  def up\n    # defined in a function for easy testing.\n    fix_roles_projects\n  end\n\n  def down\n    # This migration is not reversible.  However, the results are\n    # backwards compatible.\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20200914203202_public_favorites_project.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass PublicFavoritesProject < ActiveRecord::Migration[5.2]\n  include CurrentApiClient\n  def up\n    act_as_system_user do\n      public_project_group\n      public_project_read_permission\n      Link.where(link_class: \"star\",\n                 owner_uuid: system_user_uuid,\n                 tail_uuid: all_users_group_uuid).each do |ln|\n        ln.owner_uuid = public_project_uuid\n        ln.tail_uuid = public_project_uuid\n        ln.save!\n      end\n    end\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20201103170213_refresh_trashed_groups.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire '20200501150153_permission_table_constants'\n\nclass RefreshTrashedGroups < ActiveRecord::Migration[5.2]\n  def change\n    # The original refresh_trashed query had a bug, it would insert\n    # all trashed rows, including those with null trash_at times.\n    # This went unnoticed because null trash_at behaved the same as\n    # not having those rows at all, but it is inefficient to fetch\n    # rows we'll never use.  That bug is fixed in the original query\n    # but we need another migration to make sure it runs.\n    refresh_trashed\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20201105190435_refresh_permissions.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire '20200501150153_permission_table_constants'\n\nclass RefreshPermissions < ActiveRecord::Migration[5.2]\n  def change\n    # There was a report of deadlocks resulting in failing permission\n    # updates.  These failures should not have corrupted permissions\n    # (the failure should have rolled back the entire update) but we\n    # will refresh the permissions out of an abundance of caution.\n    refresh_permissions\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20201202174753_fix_collection_versions_timestamps.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'fix_collection_versions_timestamps'\n\nclass FixCollectionVersionsTimestamps < ActiveRecord::Migration[5.2]\n  def up\n    # Defined in a function for easy testing.\n    fix_collection_versions_timestamps\n  end\n\n  def down\n    # This migration is not reversible.  However, the results are\n    # backwards compatible.\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20210108033940_add_gateway_address_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddGatewayAddressToContainers < ActiveRecord::Migration[5.2]\n  def change\n    add_column :containers, :gateway_address, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20210126183521_add_interactive_session_started_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddInteractiveSessionStartedToContainers < ActiveRecord::Migration[5.2]\n  def change\n    add_column :containers, :interactive_session_started, :boolean, null: false, default: false\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20210621204455_add_container_output_storage_class.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddContainerOutputStorageClass < ActiveRecord::Migration[5.2]\n  def change\n    add_column :container_requests, :output_storage_classes, :jsonb, :default => [\"default\"]\n    add_column :containers, :output_storage_classes, :jsonb, :default => [\"default\"]\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20210816191509_drop_fts_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass DropFtsIndex < ActiveRecord::Migration[5.2]\n  def fts_indexes\n    {\n      \"collections\" => \"collections_full_text_search_idx\",\n      \"container_requests\" => \"container_requests_full_text_search_idx\",\n      \"groups\" => \"groups_full_text_search_idx\",\n      \"jobs\" => \"jobs_full_text_search_idx\",\n      \"pipeline_instances\" => \"pipeline_instances_full_text_search_idx\",\n      \"pipeline_templates\" => \"pipeline_templates_full_text_search_idx\",\n      \"workflows\" => \"workflows_full_text_search_idx\",\n    }\n  end\n\n  def up\n    fts_indexes.keys.each do |t|\n      i = fts_indexes[t]\n      execute \"DROP INDEX IF EXISTS #{i}\"\n    end\n  end\n\n  def down\n    fts_indexes.keys.each do |t|\n      i = fts_indexes[t]\n      execute \"CREATE INDEX #{i} ON #{t} USING gin(#{t.classify.constantize.full_text_tsvector})\"\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20211027154300_delete_disabled_user_tokens_and_keys.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass DeleteDisabledUserTokensAndKeys < ActiveRecord::Migration[5.2]\n  def up\n    execute \"delete from api_client_authorizations where user_id in (select id from users where is_active ='false' and uuid not like '%-tpzed-anonymouspublic' and uuid not like '%-tpzed-000000000000000')\"\n    execute \"delete from authorized_keys where owner_uuid in (select uuid from users where is_active ='false' and uuid not like '%-tpzed-anonymouspublic' and uuid not like '%-tpzed-000000000000000')\"\n    execute \"delete from authorized_keys where authorized_user_uuid in (select uuid from users where is_active ='false' and uuid not like '%-tpzed-anonymouspublic' and uuid not like '%-tpzed-000000000000000')\"\n  end\n\n  def down\n    # This migration is not reversible.\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20220224203102_add_frozen_by_uuid_to_groups.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddFrozenByUuidToGroups < ActiveRecord::Migration[5.2]\n  def change\n    add_column :groups, :frozen_by_uuid, :string\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20220301155729_frozen_groups.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire '20200501150153_permission_table_constants'\n\nclass FrozenGroups < ActiveRecord::Migration[5.0]\n  def up\n    create_table :frozen_groups, :id => false do |t|\n      t.string :uuid\n    end\n    add_index :frozen_groups, :uuid, :unique => true\n\n    ActiveRecord::Base.connection.execute %{\ncreate or replace function project_subtree_with_is_frozen (starting_uuid varchar(27), starting_is_frozen boolean)\nreturns table (uuid varchar(27), is_frozen boolean)\nSTABLE\nlanguage SQL\nas $$\nWITH RECURSIVE\n  project_subtree(uuid, is_frozen) as (\n    values (starting_uuid, starting_is_frozen)\n    union\n    select groups.uuid, project_subtree.is_frozen or groups.frozen_by_uuid is not null\n      from groups join project_subtree on project_subtree.uuid = groups.owner_uuid\n  )\n  select uuid, is_frozen from project_subtree;\n$$;\n}\n\n    # Initialize the table. After this, it is updated incrementally.\n    # See app/models/group.rb#update_frozen_groups\n    refresh_frozen\n  end\n\n  def down\n    drop_table :frozen_groups\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20220303204419_add_frozen_by_uuid_to_group_search_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddFrozenByUuidToGroupSearchIndex < ActiveRecord::Migration[5.0]\n  disable_ddl_transaction!\n\n  def up\n    remove_index :groups, :name => 'groups_search_index'\n    add_index :groups, [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"group_class\", \"frozen_by_uuid\"], name: 'groups_search_index', algorithm: :concurrently\n  end\n\n  def down\n    remove_index :groups, :name => 'groups_search_index'\n    add_index :groups, [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"group_class\"], name: 'groups_search_index', algorithm: :concurrently\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20220401153101_fix_created_at_indexes.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass FixCreatedAtIndexes < ActiveRecord::Migration[5.2]\n  @@idxtables = [:collections, :container_requests, :groups, :links, :repositories, :users, :virtual_machines, :workflows, :logs]\n\n  def up\n    @@idxtables.each do |table|\n      ActiveRecord::Base.connection.execute(\"DROP INDEX IF EXISTS index_#{table.to_s}_on_created_at\")\n      ActiveRecord::Base.connection.execute(\"DROP INDEX IF EXISTS index_#{table.to_s}_on_created_at_uuid\")\n      ActiveRecord::Base.connection.execute(\"DROP INDEX IF EXISTS index_#{table.to_s}_on_created_at_and_uuid\")\n      ActiveRecord::Base.connection.execute(\"DROP INDEX IF EXISTS index_#{table.to_s}_on_modified_at\")\n      ActiveRecord::Base.connection.execute(\"DROP INDEX IF EXISTS index_#{table.to_s}_on_modified_at_uuid\")\n      ActiveRecord::Base.connection.execute(\"DROP INDEX IF EXISTS index_#{table.to_s}_on_modified_at_and_uuid\")\n\n      ActiveRecord::Base.connection.execute(\"CREATE INDEX IF NOT EXISTS index_#{table.to_s}_on_created_at_and_uuid ON #{table.to_s} USING btree (created_at, uuid)\")\n      ActiveRecord::Base.connection.execute(\"CREATE INDEX IF NOT EXISTS index_#{table.to_s}_on_modified_at_and_uuid ON #{table.to_s} USING btree (modified_at, uuid)\")\n    end\n  end\n\n  def down\n    @@idxtables.each do |table|\n      ActiveRecord::Base.connection.execute(\"DROP INDEX IF EXISTS index_#{table.to_s}_on_modified_at_and_uuid\")\n      ActiveRecord::Base.connection.execute(\"CREATE INDEX IF NOT EXISTS index_#{table.to_s}_on_modified_at_uuid ON #{table.to_s} USING btree (modified_at desc, uuid asc)\")\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20220505112900_add_output_properties.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddOutputProperties < ActiveRecord::Migration[5.2]\n  def trgm_indexes\n    {\n      \"container_requests\" => \"container_requests_trgm_text_search_idx\",\n    }\n  end\n\n  def up\n    add_column :container_requests, :output_properties, :jsonb, default: {}\n    add_column :containers, :output_properties, :jsonb, default: {}\n\n    trgm_indexes.each do |model, indx|\n      execute \"DROP INDEX IF EXISTS #{indx}\"\n      execute \"CREATE INDEX #{indx} ON #{model} USING gin((#{model.classify.constantize.full_text_trgm}) gin_trgm_ops)\"\n    end\n  end\n\n  def down\n    remove_column :container_requests, :output_properties\n    remove_column :containers, :output_properties\n\n    trgm_indexes.each do |model, indx|\n      execute \"DROP INDEX IF EXISTS #{indx}\"\n      execute \"CREATE INDEX #{indx} ON #{model} USING gin((#{model.classify.constantize.full_text_trgm}) gin_trgm_ops)\"\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20220726034131_write_via_all_users.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass WriteViaAllUsers < ActiveRecord::Migration[5.2]\n  include CurrentApiClient\n  def up\n    changelinks(from: \"can_read\", to: \"can_write\")\n  end\n  def down\n    changelinks(from: \"can_write\", to: \"can_read\")\n  end\n  def changelinks(from:, to:)\n    ActiveRecord::Base.connection.exec_query(\n      \"update links set name=$1 where link_class=$2 and name=$3 and tail_uuid like $4 and head_uuid = $5\",\n      \"migrate\", [\n        to,\n        \"permission\",\n        from,\n        \"_____-tpzed-_______________\",\n        all_users_group_uuid,\n      ])\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20220804133317_add_cost_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddCostToContainers < ActiveRecord::Migration[5.2]\n  def change\n    add_column :containers, :cost, :float, null: false, default: 0\n    add_column :containers, :subrequests_cost, :float, null: false, default: 0\n    add_column :container_requests, :cumulative_cost, :float, null: false, default: 0\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20221219165512_dedup_permission_links.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'update_permissions'\n\nclass DedupPermissionLinks < ActiveRecord::Migration[5.2]\n  include CurrentApiClient\n  def up\n    act_as_system_user do\n      batch_update_permissions do\n        rows = ActiveRecord::Base.connection.select_all(\"SELECT MIN(uuid) AS uuid, COUNT(uuid) AS n FROM links\n          WHERE tail_uuid IS NOT NULL\n           AND head_uuid IS NOT NULL\n           AND link_class = 'permission'\n           AND name in ('can_read', 'can_write', 'can_manage')\n          GROUP BY (tail_uuid, head_uuid)\n          HAVING COUNT(uuid) > 1\")\n        rows.each do |row|\n          Rails.logger.debug \"DedupPermissionLinks: consolidating #{row['n']} links into #{row['uuid']}\"\n          link = Link.find_by_uuid(row['uuid'])\n          # This no-op update has the side effect that the update hooks\n          # will merge the highest available permission into this one\n          # and then delete the others.\n          link.update!(properties: link.properties.dup)\n        end\n\n        rows = ActiveRecord::Base.connection.select_all(\"SELECT MIN(uuid) AS uuid, COUNT(uuid) AS n FROM links\n          WHERE tail_uuid IS NOT NULL\n           AND head_uuid IS NOT NULL\n           AND link_class = 'permission'\n           AND name = 'can_login'\n          GROUP BY (tail_uuid, head_uuid, properties)\n          HAVING COUNT(uuid) > 1\")\n        rows.each do |row|\n          Rails.logger.debug \"DedupPermissionLinks: consolidating #{row['n']} links into #{row['uuid']}\"\n          link = Link.find_by_uuid(row['uuid'])\n          link.update!(properties: link.properties.dup)\n        end\n      end\n    end\n  end\n  def down\n    # no-op -- restoring redundant records would still be redundant\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20221230155924_bigint_id.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass BigintId < ActiveRecord::Migration[5.2]\n  disable_ddl_transaction!\n  def up\n    change_column :api_client_authorizations, :id, :bigint\n    change_column :api_client_authorizations, :api_client_id, :bigint\n    change_column :api_client_authorizations, :user_id, :bigint\n    change_column :api_clients, :id, :bigint\n    change_column :authorized_keys, :id, :bigint\n    change_column :collections, :id, :bigint\n    change_column :container_requests, :id, :bigint\n    change_column :containers, :id, :bigint\n    change_column :groups, :id, :bigint\n    change_column :humans, :id, :bigint\n    change_column :job_tasks, :id, :bigint\n    change_column :jobs, :id, :bigint\n    change_column :keep_disks, :id, :bigint\n    change_column :keep_services, :id, :bigint\n    change_column :links, :id, :bigint\n    change_column :logs, :id, :bigint\n    change_column :nodes, :id, :bigint\n    change_column :users, :id, :bigint\n    change_column :pipeline_instances, :id, :bigint\n    change_column :pipeline_templates, :id, :bigint\n    change_column :repositories, :id, :bigint\n    change_column :specimens, :id, :bigint\n    change_column :traits, :id, :bigint\n    change_column :virtual_machines, :id, :bigint\n    change_column :workflows, :id, :bigint\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20230421142716_add_name_index_to_collections_and_groups.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddNameIndexToCollectionsAndGroups < ActiveRecord::Migration[5.2]\n  def up\n    ActiveRecord::Base.connection.execute 'CREATE INDEX index_groups_on_name on groups USING gin (name gin_trgm_ops)'\n    ActiveRecord::Base.connection.execute 'CREATE INDEX index_collections_on_name on collections USING gin (name gin_trgm_ops)'\n  end\n  def down\n    ActiveRecord::Base.connection.execute 'DROP INDEX index_collections_on_name'\n    ActiveRecord::Base.connection.execute 'DROP INDEX index_groups_on_name'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20230503224107_priority_update_functions.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass PriorityUpdateFunctions < ActiveRecord::Migration[5.2]\n  def up\n    ActiveRecord::Base.connection.execute %{\nCREATE OR REPLACE FUNCTION container_priority(for_container_uuid character varying, inherited bigint, inherited_from character varying) returns bigint\n    LANGUAGE sql\n    AS $$\n/* Determine the priority of an individual container.\n   The \"inherited\" priority comes from the path we followed from the root, the parent container\n   priority hasn't been updated in the table yet but we need to behave it like it has been.\n*/\nselect coalesce(max(case when containers.uuid = inherited_from then inherited\n                         when containers.priority is not NULL then containers.priority\n                         else container_requests.priority * 1125899906842624::bigint - (extract(epoch from container_requests.created_at)*1000)::bigint\n                    end), 0) from\n    container_requests left outer join containers on container_requests.requesting_container_uuid = containers.uuid\n    where container_requests.container_uuid = for_container_uuid and container_requests.state = 'Committed' and container_requests.priority > 0;\n$$;\n}\n\n    ActiveRecord::Base.connection.execute %{\nCREATE OR REPLACE FUNCTION container_tree_priorities(for_container_uuid character varying) returns table (pri_container_uuid character varying, upd_priority bigint)\n    LANGUAGE sql\n    AS $$\n/* Calculate the priorities of all containers starting from for_container_uuid.\n   This traverses the process tree downward and calls container_priority for each container\n   and returns a table of container uuids and their new priorities.\n*/\nwith recursive tab(upd_container_uuid, upd_priority) as (\n  select for_container_uuid, container_priority(for_container_uuid, 0, '')\nunion\n  select containers.uuid, container_priority(containers.uuid, child_requests.upd_priority, child_requests.upd_container_uuid)\n  from (tab join container_requests on tab.upd_container_uuid = container_requests.requesting_container_uuid) as child_requests\n  join containers on child_requests.container_uuid = containers.uuid\n  where containers.state in ('Queued', 'Locked', 'Running')\n)\nselect upd_container_uuid, upd_priority from tab;\n$$;\n}\n\n    ActiveRecord::Base.connection.execute %{\nCREATE OR REPLACE FUNCTION container_tree(for_container_uuid character varying) returns table (pri_container_uuid character varying)\n    LANGUAGE sql\n    AS $$\n/* A lighter weight version of the update_priorities query that only returns the containers in a tree,\n   used by SELECT FOR UPDATE.\n*/\nwith recursive tab(upd_container_uuid) as (\n  select for_container_uuid\nunion\n  select containers.uuid\n  from (tab join container_requests on tab.upd_container_uuid = container_requests.requesting_container_uuid) as child_requests\n  join containers on child_requests.container_uuid = containers.uuid\n  where containers.state in ('Queued', 'Locked', 'Running')\n)\nselect upd_container_uuid from tab;\n$$;\n}\n  end\n\n  def down\n    ActiveRecord::Base.connection.execute \"DROP FUNCTION container_priority\"\n    ActiveRecord::Base.connection.execute \"DROP FUNCTION container_tree_priorities\"\n    ActiveRecord::Base.connection.execute \"DROP FUNCTION container_tree\"\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20230815160000_jsonb_exists_functions.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass JsonbExistsFunctions < ActiveRecord::Migration[5.2]\n  def up\n\n    # Define functions for the \"?\" and \"?&\" operators.  We can't use\n    # \"?\" and \"?&\" directly in ActiveRecord queries because \"?\" is\n    # used for parameter substitution.\n    #\n    # We used to use jsonb_exists() and jsonb_exists_all() but\n    # apparently Postgres associates indexes with operators but not\n    # with functions, so while a query using an operator can use the\n    # index, the equivalent clause using the function will always\n    # perform a full row scan.\n    #\n    # See ticket https://dev.arvados.org/issues/20858 for examples.\n    #\n    # As a workaround, we can define IMMUTABLE functions, which are\n    # directly inlined into the query, which then uses the index as\n    # intended.\n    #\n    # Huge shout out to this stack overflow post that explained what\n    # is going on and provides the workaround used here.\n    #\n    # https://dba.stackexchange.com/questions/90002/postgresql-operator-uses-index-but-underlying-function-does-not\n\n    ActiveRecord::Base.connection.execute %{\nCREATE OR REPLACE FUNCTION jsonb_exists_inline_op(jsonb, text)\nRETURNS bool\nLANGUAGE sql\nIMMUTABLE\nAS $$SELECT $1 ? $2$$\n}\n\n    ActiveRecord::Base.connection.execute %{\nCREATE OR REPLACE FUNCTION jsonb_exists_all_inline_op(jsonb, text[])\nRETURNS bool\nLANGUAGE sql\nIMMUTABLE\nAS 'SELECT $1 ?& $2'\n}\n  end\n\n  def down\n    ActiveRecord::Base.connection.execute \"DROP FUNCTION jsonb_exists_inline_op\"\n    ActiveRecord::Base.connection.execute \"DROP FUNCTION jsonb_exists_all_inline_op\"\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20230821000000_priority_update_fix.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass PriorityUpdateFix < ActiveRecord::Migration[5.2]\n  def up\n    ActiveRecord::Base.connection.execute %{\nCREATE OR REPLACE FUNCTION container_priority(for_container_uuid character varying, inherited bigint, inherited_from character varying) returns bigint\n    LANGUAGE sql\n    AS $$\n/* Determine the priority of an individual container.\n   The \"inherited\" priority comes from the path we followed from the root, the parent container\n   priority hasn't been updated in the table yet but we need to behave it like it has been.\n*/\nselect coalesce(max(case when containers.uuid = inherited_from then inherited\n                         when containers.priority is not NULL then containers.priority\n                         else container_requests.priority * 1125899906842624::bigint - (extract(epoch from container_requests.created_at)*1000)::bigint\n                    end), 0) from\n    container_requests left outer join containers on container_requests.requesting_container_uuid = containers.uuid\n    where container_requests.container_uuid = for_container_uuid and\n          container_requests.state = 'Committed' and\n          container_requests.priority > 0 and\n          container_requests.owner_uuid not in (select group_uuid from trashed_groups);\n$$;\n}\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20230922000000_add_btree_name_index_to_collections_and_groups.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddBtreeNameIndexToCollectionsAndGroups < ActiveRecord::Migration[5.2]\n  #\n  # We previously added 'index_groups_on_name' and\n  # 'index_collections_on_name' but those are 'gin_trgm_ops' which is\n  # used with 'ilike' searches but despite documentation suggesting\n  # they would be, experience has shown these indexes don't get used\n  # for '=' (and/or they are much slower than the btree for exact\n  # matches).\n  #\n  # So we want to add a regular btree index.\n  #\n  def up\n    ActiveRecord::Base.connection.execute 'CREATE INDEX index_groups_on_name_btree on groups USING btree (name)'\n    ActiveRecord::Base.connection.execute 'CREATE INDEX index_collections_on_name_btree on collections USING btree (name)'\n  end\n  def down\n    ActiveRecord::Base.connection.execute 'DROP INDEX IF EXISTS index_collections_on_name_btree'\n    ActiveRecord::Base.connection.execute 'DROP INDEX IF EXISTS index_groups_on_name_btree'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20231013000000_compute_permission_index.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ComputePermissionIndex < ActiveRecord::Migration[5.2]\n  def up\n    # The inner part of compute_permission_subgraph has a query clause like this:\n    #\n    #    where u.perm_origin_uuid = m.target_uuid AND m.traverse_owned\n    #         AND (m.user_uuid = m.target_uuid or m.target_uuid not like '_____-tpzed-_______________')\n    #\n    # This will end up doing a sequential scan on\n    # materialized_permissions, which can easily have millions of\n    # rows, unless we fully index the table for this query.  In one test,\n    # this brought the compute_permission_subgraph query from over 6\n    # seconds down to 250ms.\n    #\n    ActiveRecord::Base.connection.execute \"drop index if exists index_materialized_permissions_target_is_not_user\"\n    ActiveRecord::Base.connection.execute %{\ncreate index index_materialized_permissions_target_is_not_user on materialized_permissions (target_uuid, traverse_owned, (user_uuid = target_uuid or target_uuid not like '_____-tpzed-_______________'));\n}\n  end\n\n  def down\n    ActiveRecord::Base.connection.execute \"drop index if exists index_materialized_permissions_target_is_not_user\"\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20240329173437_add_output_glob_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddOutputGlobToContainers < ActiveRecord::Migration[7.0]\n  def change\n    add_column :containers, :output_glob, :text, default: '[]'\n    add_column :container_requests, :output_glob, :text, default: '[]'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20240402162733_add_output_glob_index_to_containers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddOutputGlobIndexToContainers < ActiveRecord::Migration[4.2]\n  def up\n    ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_reuse_columns'\n    ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_reuse_columns on containers (md5(command), cwd, md5(environment), output_path, md5(output_glob), container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints))'\n  end\n  def down\n    ActiveRecord::Base.connection.execute 'DROP INDEX index_containers_on_reuse_columns'\n    ActiveRecord::Base.connection.execute 'CREATE INDEX index_containers_on_reuse_columns on containers (md5(command), cwd, md5(environment), output_path, container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints))'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20240604183200_exclude_uuids_and_hashes_from_text_search.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ExcludeUuidsAndHashesFromTextSearch < ActiveRecord::Migration[7.0]\n  def trgm_indexes\n    [\n      # Table name, index name, pre-migration full_text_trgm\n      [\"collections\", \"collections_trgm_text_search_idx\", \"(coalesce(owner_uuid,'') || ' ' || coalesce(modified_by_client_uuid,'') || ' ' || coalesce(modified_by_user_uuid,'') || ' ' || coalesce(portable_data_hash,'') || ' ' || coalesce(uuid,'') || ' ' || coalesce(name,'') || ' ' || coalesce(description,'') || ' ' || coalesce(properties::text,'') || ' ' || coalesce(file_names,''))\"],\n      # container_requests handled by 20240820202230_exclude_container_image_from_text_search.rb\n      [\"groups\", \"groups_trgm_text_search_idx\", \"(coalesce(uuid,'') || ' ' || coalesce(owner_uuid,'') || ' ' || coalesce(modified_by_client_uuid,'') || ' ' || coalesce(modified_by_user_uuid,'') || ' ' || coalesce(name,'') || ' ' || coalesce(description,'') || ' ' || coalesce(group_class,'') || ' ' || coalesce(properties::text,''))\"],\n      [\"workflows\", \"workflows_trgm_text_search_idx\", \"(coalesce(uuid,'') || ' ' || coalesce(owner_uuid,'') || ' ' || coalesce(modified_by_client_uuid,'') || ' ' || coalesce(modified_by_user_uuid,'') || ' ' || coalesce(name,'') || ' ' || coalesce(description,''))\"],\n    ]\n  end\n\n  def up\n    trgm_indexes.each do |model, indx, _|\n      execute \"DROP INDEX IF EXISTS #{indx}\"\n      execute \"CREATE INDEX #{indx} ON #{model} USING gin((#{model.classify.constantize.full_text_trgm}) gin_trgm_ops)\"\n    end\n  end\n\n  def down\n    trgm_indexes.each do |model, indx, full_text_trgm|\n      execute \"DROP INDEX IF EXISTS #{indx}\"\n      execute \"CREATE INDEX #{indx} ON #{model} USING gin((#{full_text_trgm}) gin_trgm_ops)\"\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20240618121312_create_uuid_locks.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateUuidLocks < ActiveRecord::Migration[7.0]\n  def change\n    create_table :uuid_locks, id: false do |t|\n      t.string :uuid, null: false, index: {unique: true}\n      t.integer :n, null: false, default: 0\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20240627201747_set_default_api_client_id.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass SetDefaultApiClientId < ActiveRecord::Migration[7.0]\n  def change\n    ActiveRecord::Base.connection.execute 'ALTER TABLE api_client_authorizations ALTER COLUMN api_client_id SET DEFAULT 0'\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20240820202230_exclude_container_image_from_text_search.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ExcludeContainerImageFromTextSearch < ActiveRecord::Migration[7.0]\n  def trgm_indexes\n    [\n      # Table name, index name, pre-migration full_text_trgm\n      [\"container_requests\", \"container_requests_trgm_text_search_idx\", \"(coalesce(uuid,'') || ' ' || coalesce(owner_uuid,'') || ' ' || coalesce(modified_by_client_uuid,'') || ' ' || coalesce(modified_by_user_uuid,'') || ' ' || coalesce(name,'') || ' ' || coalesce(description,'') || ' ' || coalesce(properties::text,'') || ' ' || coalesce(state,'') || ' ' || coalesce(requesting_container_uuid,'') || ' ' || coalesce(container_uuid,'') || ' ' || coalesce(runtime_constraints::text,'') || ' ' || coalesce(container_image,'') || ' ' || coalesce(environment::text,'') || ' ' || coalesce(cwd,'') || ' ' || coalesce(command::text,'') || ' ' || coalesce(output_path,'') || ' ' || coalesce(filters,'') || ' ' || coalesce(scheduling_parameters::text,'') || ' ' || coalesce(output_uuid,'') || ' ' || coalesce(log_uuid,'') || ' ' || coalesce(output_name,'') || ' ' || coalesce(output_properties::text,''))\"],\n    ]\n  end\n\n  def up\n    trgm_indexes.each do |model, indx, _|\n      execute \"DROP INDEX IF EXISTS #{indx}\"\n      execute \"CREATE INDEX #{indx} ON #{model} USING gin((#{model.classify.constantize.full_text_trgm}) gin_trgm_ops)\"\n    end\n  end\n\n  def down\n    trgm_indexes.each do |model, indx, full_text_trgm|\n      execute \"DROP INDEX IF EXISTS #{indx}\"\n      execute \"CREATE INDEX #{indx} ON #{model} USING gin((#{full_text_trgm}) gin_trgm_ops)\"\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20241118110000_index_on_container_request_name.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass IndexOnContainerRequestName < ActiveRecord::Migration[7.0]\n  def up\n    add_index :container_requests, [\"name\", \"owner_uuid\"]\n  end\n\n  def down\n    remove_index :container_requests, [\"name\", \"owner_uuid\"]\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20250115145250_drop_fts_index_again.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Fulltext search indexes were removed in\n# 7f4d69cf43a7a743a491105665b3b878a3cfd11c (#15430), but then for no\n# apparent reason dcdf385b2852acf95f41e2340d07cd68cb34e371 (#12430)\n# re-added the FTS index for container_requests.\nclass DropFtsIndexAgain < ActiveRecord::Migration[7.0]\n  def up\n    execute \"DROP INDEX IF EXISTS container_requests_full_text_search_idx\"\n  end\n\n  def down\n    # No-op because the index was not used by prior versions either.\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20250312141843_add_refreshes_at_to_api_client_authorizations.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddRefreshesAtToApiClientAuthorizations < ActiveRecord::Migration[7.1]\n  def change\n    add_column :api_client_authorizations, :refreshes_at, :timestamp, null: true\n    add_index :api_client_authorizations, :refreshes_at\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20250315222222_add_services_and_published_ports.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddServicesAndPublishedPorts < ActiveRecord::Migration[7.1]\n  def change\n    add_column :containers, :service, :boolean, null: false, :default => false\n    add_column :container_requests, :service, :boolean, null: false, :default => false\n\n    add_column :containers, :published_ports, :jsonb, :default => {}\n    add_column :container_requests, :published_ports, :jsonb, :default => {}\n\n    add_index :links, :name, :where => \"link_class = 'published_port'\", :unique => true\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20250402131700_add_collection_uuid_to_workflows.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddCollectionUuidToWorkflows < ActiveRecord::Migration[7.1]\n\n  def up\n    remove_index :workflows, name: 'workflows_search_idx'\n    add_column :workflows, :collection_uuid, :string, null: true\n    add_index :workflows, [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\", \"collection_uuid\"], name: 'workflows_search_index'\n  end\n\n  def down\n    remove_index :workflows, name: 'workflows_search_index'\n    remove_column :workflows, :collection_uuid\n    add_index :workflows, [\"uuid\", \"owner_uuid\", \"modified_by_client_uuid\", \"modified_by_user_uuid\", \"name\"], name: 'workflows_search_idx'\n  end\n\nend\n"
  },
  {
    "path": "services/api/db/migrate/20250422103000_create_credentials_table.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CreateCredentialsTable < ActiveRecord::Migration[7.1]\n  def change\n    create_table :credentials, :id => :string, :primary_key => :uuid do |t|\n      t.string :owner_uuid, :null => false\n      t.datetime :created_at, :null => false\n      t.datetime :modified_at, :null => false\n      t.string :modified_by_user_uuid\n      t.string :name\n      t.text :description\n      t.string :credential_class\n      t.jsonb :scopes, :default => []\n      t.string :external_id\n      t.text :secret\n      t.datetime :expires_at, :null => false\n    end\n    add_index :credentials, :uuid, unique: true\n    add_index :credentials, :owner_uuid\n    add_index :credentials, [:owner_uuid, :name], unique: true\n    add_index :credentials, [:uuid, :owner_uuid, :modified_by_user_uuid, :name, :credential_class, :external_id]\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20250426201300_priority_update_check_trash_at.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass PriorityUpdateCheckTrashAt < ActiveRecord::Migration[7.1]\n  def up\n    ActiveRecord::Base.connection.execute %{\nCREATE OR REPLACE FUNCTION container_priority(for_container_uuid character varying, inherited bigint, inherited_from character varying) returns bigint\n    LANGUAGE sql\n    AS $$\n/* Determine the priority of an individual container.\n   The \"inherited\" priority comes from the path we followed from the root, the parent container\n   priority hasn't been updated in the table yet but we need to behave it like it has been.\n*/\nselect coalesce(max(case when containers.uuid = inherited_from then inherited\n                         when containers.priority is not NULL then containers.priority\n                         else container_requests.priority * 1125899906842624::bigint - (extract(epoch from container_requests.created_at)*1000)::bigint\n                    end), 0) from\n    container_requests left outer join containers on container_requests.requesting_container_uuid = containers.uuid\n    where container_requests.container_uuid = for_container_uuid and\n          container_requests.state = 'Committed' and\n          container_requests.priority > 0 and\n          container_requests.owner_uuid not in (select group_uuid from trashed_groups WHERE trash_at <= statement_timestamp());\n$$;\n}\n  end\n\n  def down\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20250527181323_add_container_ports.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AddContainerPorts < ActiveRecord::Migration[7.1]\n  def change\n    create_table :container_ports, :id => false do |t|\n      t.integer :external_port, :null => false\n      t.integer :container_port, :null => false\n      t.string :container_uuid, :null => false\n    end\n    add_index :container_ports, :external_port, unique: true\n    add_index :container_ports, :container_uuid\n  end\nend\n"
  },
  {
    "path": "services/api/db/migrate/20251006181234_enforce_required_credential_fields.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass EnforceRequiredCredentialFields < ActiveRecord::Migration[7.0]\n  def up\n    execute <<~SQL\n      UPDATE credentials SET name = '' WHERE name IS NULL;\n      UPDATE credentials SET credential_class = '' WHERE credential_class IS NULL;\n      UPDATE credentials SET external_id = '' WHERE external_id IS NULL;\n      UPDATE credentials SET secret = '' WHERE secret IS NULL;\n      UPDATE credentials SET expires_at = NOW() WHERE expires_at IS NULL;\n    SQL\n\n    change_column_null :credentials, :name, false\n    change_column_null :credentials, :credential_class, false\n    change_column_null :credentials, :external_id, false\n    change_column_null :credentials, :secret, false\n    change_column_null :credentials, :expires_at, false\n  end\n\n  def down\n    execute <<~SQL\n      ALTER TABLE credentials\n      DROP CONSTRAINT IF EXISTS credentials_name_not_null,\n      DROP CONSTRAINT IF EXISTS credentials_credential_class_not_null,\n      DROP CONSTRAINT IF EXISTS credentials_external_id_not_null,\n      DROP CONSTRAINT IF EXISTS credentials_secret_not_null,\n      DROP CONSTRAINT IF EXISTS credentials_expires_at_not_null;\n    SQL\n  end\nend\n\n"
  },
  {
    "path": "services/api/db/seeds.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# This file seeds the database with initial/default values if needed.\n# It is safe to re-run on an existing database.\n#\n# It is invoked by `rake db:seed` and `rake db:setup`.\n\nDatabaseSeeds.install\n"
  },
  {
    "path": "services/api/db/structure.sql",
    "content": "-- Copyright (C) The Arvados Authors. All rights reserved.\n--\n-- SPDX-License-Identifier: AGPL-3.0\n\nSET statement_timeout = 0;\nSET client_encoding = 'UTF8';\nSET standard_conforming_strings = on;\nSELECT pg_catalog.set_config('search_path', '', false);\nSET check_function_bodies = false;\nSET xmloption = content;\nSET client_min_messages = warning;\n\n--\n-- Name: pg_trgm; Type: EXTENSION; Schema: -; Owner: -\n--\n\nCREATE EXTENSION IF NOT EXISTS pg_trgm WITH SCHEMA public;\n\n\n--\n-- Name: EXTENSION pg_trgm; Type: COMMENT; Schema: -; Owner: -\n--\n\n-- COMMENT ON EXTENSION pg_trgm IS 'text similarity measurement and index searching based on trigrams';\n\n\n--\n-- Name: compute_permission_subgraph(character varying, character varying, integer, character varying); Type: FUNCTION; Schema: public; Owner: -\n--\n\nCREATE FUNCTION public.compute_permission_subgraph(perm_origin_uuid character varying, starting_uuid character varying, starting_perm integer, perm_edge_id character varying) RETURNS TABLE(user_uuid character varying, target_uuid character varying, val integer, traverse_owned boolean)\n    LANGUAGE sql STABLE\n    AS $$\n\n/* The purpose of this function is to compute the permissions for a\n   subgraph of the database, starting from a given edge.  The newly\n   computed permissions are used to add and remove rows from the main\n   permissions table.\n\n   perm_origin_uuid: The object that 'gets' the permission.\n\n   starting_uuid: The starting object the permission applies to.\n\n   starting_perm: The permission that perm_origin_uuid 'has' on\n                  starting_uuid One of 1, 2, 3 for can_read,\n                  can_write, can_manage respectively, or 0 to revoke\n                  permissions.\n\n   perm_edge_id: Identifies the permission edge that is being updated.\n                 Changes of ownership, this is starting_uuid.\n                 For links, this is the uuid of the link object.\n                 This is used to override the edge value in the database\n                 with starting_perm.  This is necessary when revoking\n                 permissions because the update happens before edge is\n                 actually removed.\n*/\nwith\n  /* Starting from starting_uuid, determine the set of objects that\n     could be affected by this permission change.\n\n     Note: We don't traverse users unless it is an \"identity\"\n     permission (permission origin is self).\n  */\n  perm_from_start(perm_origin_uuid, target_uuid, val, traverse_owned) as (\n\nWITH RECURSIVE\n        traverse_graph(origin_uuid, target_uuid, val, traverse_owned, starting_set) as (\n\n             values (perm_origin_uuid, starting_uuid, starting_perm,\n                    should_traverse_owned(starting_uuid, starting_perm),\n                    (perm_origin_uuid = starting_uuid or starting_uuid not like '_____-tpzed-_______________'))\n\n          union\n            (select traverse_graph.origin_uuid,\n                    edges.head_uuid,\n                      least(\ncase (edges.edge_id = perm_edge_id)\n                               when true then starting_perm\n                               else edges.val\n                            end\n,\n                            traverse_graph.val),\n                    should_traverse_owned(edges.head_uuid, edges.val),\n                    false\n             from permission_graph_edges as edges, traverse_graph\n             where traverse_graph.target_uuid = edges.tail_uuid\n             and (edges.tail_uuid like '_____-j7d0g-_______________' or\n                  traverse_graph.starting_set)))\n        select traverse_graph.origin_uuid, target_uuid, max(val) as val, bool_or(traverse_owned) as traverse_owned from traverse_graph\n        group by (traverse_graph.origin_uuid, target_uuid)\n),\n\n  /* Find other inbound edges that grant permissions to 'targets' in\n     perm_from_start, and compute permissions that originate from\n     those.\n\n     This is necessary for two reasons:\n\n       1) Other users may have access to a subset of the objects\n       through other permission links than the one we started from.\n       If we don't recompute them, their permission will get dropped.\n\n       2) There may be more than one path through which a user gets\n       permission to an object.  For example, a user owns a project\n       and also shares it can_read with a group the user belongs\n       to. adding the can_read link must not overwrite the existing\n       can_manage permission granted by ownership.\n  */\n  additional_perms(perm_origin_uuid, target_uuid, val, traverse_owned) as (\n\nWITH RECURSIVE\n        traverse_graph(origin_uuid, target_uuid, val, traverse_owned, starting_set) as (\n\n    select edges.tail_uuid as origin_uuid, edges.head_uuid as target_uuid, edges.val,\n           should_traverse_owned(edges.head_uuid, edges.val),\n           edges.head_uuid like '_____-j7d0g-_______________'\n      from permission_graph_edges as edges\n      where edges.edge_id != perm_edge_id and\n            edges.tail_uuid not in (select target_uuid from perm_from_start where target_uuid like '_____-j7d0g-_______________') and\n            edges.head_uuid in (select target_uuid from perm_from_start)\n\n          union\n            (select traverse_graph.origin_uuid,\n                    edges.head_uuid,\n                      least(\ncase (edges.edge_id = perm_edge_id)\n                               when true then starting_perm\n                               else edges.val\n                            end\n,\n                            traverse_graph.val),\n                    should_traverse_owned(edges.head_uuid, edges.val),\n                    false\n             from permission_graph_edges as edges, traverse_graph\n             where traverse_graph.target_uuid = edges.tail_uuid\n             and (edges.tail_uuid like '_____-j7d0g-_______________' or\n                  traverse_graph.starting_set)))\n        select traverse_graph.origin_uuid, target_uuid, max(val) as val, bool_or(traverse_owned) as traverse_owned from traverse_graph\n        group by (traverse_graph.origin_uuid, target_uuid)\n),\n\n  /* Combine the permissions computed in the first two phases. */\n  all_perms(perm_origin_uuid, target_uuid, val, traverse_owned) as (\n      select * from perm_from_start\n    union all\n      select * from additional_perms\n  )\n\n  /* The actual query that produces rows to be added or removed\n     from the materialized_permissions table.  This is the clever\n     bit.\n\n     Key insights:\n\n     * For every group, the materialized_permissions lists all users\n       that can access to that group.\n\n     * The all_perms subquery has computed permissions on on a set of\n       objects for all inbound \"origins\", which are users or groups.\n\n     * Permissions through groups are transitive.\n\n     We can infer:\n\n     1) The materialized_permissions table declares that user X has permission N on group Y\n     2) The all_perms result has determined group Y has permission M on object Z\n     3) Therefore, user X has permission min(N, M) on object Z\n\n     This allows us to efficiently determine the set of users that\n     have permissions on the subset of objects, without having to\n     follow the chain of permission back up to find those users.\n\n     In addition, because users always have permission on themselves, this\n     query also makes sure those permission rows are always\n     returned.\n  */\n  select v.user_uuid, v.target_uuid, max(v.perm_level), bool_or(v.traverse_owned) from\n    (select m.user_uuid,\n         u.target_uuid,\n         least(u.val, m.perm_level) as perm_level,\n         u.traverse_owned\n      from all_perms as u, materialized_permissions as m\n           where u.perm_origin_uuid = m.target_uuid AND m.traverse_owned\n           AND (m.user_uuid = m.target_uuid or m.target_uuid not like '_____-tpzed-_______________')\n    union all\n      select target_uuid as user_uuid, target_uuid, 3, true\n        from all_perms\n        where all_perms.target_uuid like '_____-tpzed-_______________') as v\n    group by v.user_uuid, v.target_uuid\n$$;\n\n\n--\n-- Name: container_priority(character varying, bigint, character varying); Type: FUNCTION; Schema: public; Owner: -\n--\n\nCREATE FUNCTION public.container_priority(for_container_uuid character varying, inherited bigint, inherited_from character varying) RETURNS bigint\n    LANGUAGE sql\n    AS $$\n/* Determine the priority of an individual container.\n   The \"inherited\" priority comes from the path we followed from the root, the parent container\n   priority hasn't been updated in the table yet but we need to behave it like it has been.\n*/\nselect coalesce(max(case when containers.uuid = inherited_from then inherited\n                         when containers.priority is not NULL then containers.priority\n                         else container_requests.priority * 1125899906842624::bigint - (extract(epoch from container_requests.created_at)*1000)::bigint\n                    end), 0) from\n    container_requests left outer join containers on container_requests.requesting_container_uuid = containers.uuid\n    where container_requests.container_uuid = for_container_uuid and\n          container_requests.state = 'Committed' and\n          container_requests.priority > 0 and\n          container_requests.owner_uuid not in (select group_uuid from trashed_groups WHERE trash_at <= statement_timestamp());\n$$;\n\n\n--\n-- Name: container_tree(character varying); Type: FUNCTION; Schema: public; Owner: -\n--\n\nCREATE FUNCTION public.container_tree(for_container_uuid character varying) RETURNS TABLE(pri_container_uuid character varying)\n    LANGUAGE sql\n    AS $$\n/* A lighter weight version of the update_priorities query that only returns the containers in a tree,\n   used by SELECT FOR UPDATE.\n*/\nwith recursive tab(upd_container_uuid) as (\n  select for_container_uuid\nunion\n  select containers.uuid\n  from (tab join container_requests on tab.upd_container_uuid = container_requests.requesting_container_uuid) as child_requests\n  join containers on child_requests.container_uuid = containers.uuid\n  where containers.state in ('Queued', 'Locked', 'Running')\n)\nselect upd_container_uuid from tab;\n$$;\n\n\n--\n-- Name: container_tree_priorities(character varying); Type: FUNCTION; Schema: public; Owner: -\n--\n\nCREATE FUNCTION public.container_tree_priorities(for_container_uuid character varying) RETURNS TABLE(pri_container_uuid character varying, upd_priority bigint)\n    LANGUAGE sql\n    AS $$\n/* Calculate the priorities of all containers starting from for_container_uuid.\n   This traverses the process tree downward and calls container_priority for each container\n   and returns a table of container uuids and their new priorities.\n*/\nwith recursive tab(upd_container_uuid, upd_priority) as (\n  select for_container_uuid, container_priority(for_container_uuid, 0, '')\nunion\n  select containers.uuid, container_priority(containers.uuid, child_requests.upd_priority, child_requests.upd_container_uuid)\n  from (tab join container_requests on tab.upd_container_uuid = container_requests.requesting_container_uuid) as child_requests\n  join containers on child_requests.container_uuid = containers.uuid\n  where containers.state in ('Queued', 'Locked', 'Running')\n)\nselect upd_container_uuid, upd_priority from tab;\n$$;\n\n\n--\n-- Name: jsonb_exists_all_inline_op(jsonb, text[]); Type: FUNCTION; Schema: public; Owner: -\n--\n\nCREATE FUNCTION public.jsonb_exists_all_inline_op(jsonb, text[]) RETURNS boolean\n    LANGUAGE sql IMMUTABLE\n    AS $_$SELECT $1 ?& $2$_$;\n\n\n--\n-- Name: jsonb_exists_inline_op(jsonb, text); Type: FUNCTION; Schema: public; Owner: -\n--\n\nCREATE FUNCTION public.jsonb_exists_inline_op(jsonb, text) RETURNS boolean\n    LANGUAGE sql IMMUTABLE\n    AS $_$SELECT $1 ? $2$_$;\n\n\n--\n-- Name: project_subtree_with_is_frozen(character varying, boolean); Type: FUNCTION; Schema: public; Owner: -\n--\n\nCREATE FUNCTION public.project_subtree_with_is_frozen(starting_uuid character varying, starting_is_frozen boolean) RETURNS TABLE(uuid character varying, is_frozen boolean)\n    LANGUAGE sql STABLE\n    AS $$\nWITH RECURSIVE\n  project_subtree(uuid, is_frozen) as (\n    values (starting_uuid, starting_is_frozen)\n    union\n    select groups.uuid, project_subtree.is_frozen or groups.frozen_by_uuid is not null\n      from groups join project_subtree on (groups.owner_uuid = project_subtree.uuid)\n  )\n  select uuid, is_frozen from project_subtree;\n$$;\n\n\n--\n-- Name: project_subtree_with_trash_at(character varying, timestamp without time zone); Type: FUNCTION; Schema: public; Owner: -\n--\n\nCREATE FUNCTION public.project_subtree_with_trash_at(starting_uuid character varying, starting_trash_at timestamp without time zone) RETURNS TABLE(target_uuid character varying, trash_at timestamp without time zone)\n    LANGUAGE sql STABLE\n    AS $$\n/* Starting from a project, recursively traverse all the projects\n  underneath it and return a set of project uuids and trash_at times\n  (may be null).  The initial trash_at can be a timestamp or null.\n  The trash_at time propagates downward to groups it owns, i.e. when a\n  group is trashed, everything underneath it in the ownership\n  hierarchy is also considered trashed.  However, this is fact is\n  recorded in the trashed_groups table, not by updating trash_at field\n  in the groups table.\n*/\nWITH RECURSIVE\n        project_subtree(uuid, trash_at) as (\n        values (starting_uuid, starting_trash_at)\n        union\n        select groups.uuid, LEAST(project_subtree.trash_at, groups.trash_at)\n          from groups join project_subtree on (groups.owner_uuid = project_subtree.uuid)\n        )\n        select uuid, trash_at from project_subtree;\n$$;\n\n\n--\n-- Name: should_traverse_owned(character varying, integer); Type: FUNCTION; Schema: public; Owner: -\n--\n\nCREATE FUNCTION public.should_traverse_owned(starting_uuid character varying, starting_perm integer) RETURNS boolean\n    LANGUAGE sql IMMUTABLE\n    AS $$\n/* Helper function.  Determines if permission on an object implies\n   transitive permission to things the object owns.  This is always\n   true for groups, but only true for users when the permission level\n   is can_manage.\n*/\nselect starting_uuid like '_____-j7d0g-_______________' or\n       (starting_uuid like '_____-tpzed-_______________' and starting_perm >= 3);\n$$;\n\n\nSET default_tablespace = '';\n\nSET default_table_access_method = heap;\n\n--\n-- Name: api_client_authorizations; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.api_client_authorizations (\n    id bigint NOT NULL,\n    api_token character varying(255) NOT NULL,\n    api_client_id bigint DEFAULT 0 NOT NULL,\n    user_id bigint NOT NULL,\n    created_by_ip_address character varying(255),\n    last_used_by_ip_address character varying(255),\n    last_used_at timestamp without time zone,\n    expires_at timestamp without time zone,\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL,\n    default_owner_uuid character varying(255),\n    scopes text DEFAULT '[\"all\"]'::text,\n    uuid character varying(255) NOT NULL,\n    refreshes_at timestamp without time zone\n);\n\n\n--\n-- Name: api_client_authorizations_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.api_client_authorizations_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: api_client_authorizations_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.api_client_authorizations_id_seq OWNED BY public.api_client_authorizations.id;\n\n\n--\n-- Name: api_clients; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.api_clients (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    name character varying(255),\n    url_prefix character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL,\n    is_trusted boolean DEFAULT false\n);\n\n\n--\n-- Name: api_clients_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.api_clients_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: api_clients_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.api_clients_id_seq OWNED BY public.api_clients.id;\n\n\n--\n-- Name: ar_internal_metadata; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.ar_internal_metadata (\n    key character varying NOT NULL,\n    value character varying,\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL\n);\n\n\n--\n-- Name: authorized_keys; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.authorized_keys (\n    id bigint NOT NULL,\n    uuid character varying(255) NOT NULL,\n    owner_uuid character varying(255) NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    name character varying(255),\n    key_type character varying(255),\n    authorized_user_uuid character varying(255),\n    public_key text,\n    expires_at timestamp without time zone,\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL\n);\n\n\n--\n-- Name: authorized_keys_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.authorized_keys_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: authorized_keys_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.authorized_keys_id_seq OWNED BY public.authorized_keys.id;\n\n\n--\n-- Name: collections; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.collections (\n    id bigint NOT NULL,\n    owner_uuid character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    portable_data_hash character varying(255),\n    replication_desired integer,\n    replication_confirmed_at timestamp without time zone,\n    replication_confirmed integer,\n    updated_at timestamp without time zone NOT NULL,\n    uuid character varying(255),\n    manifest_text text,\n    name character varying(255),\n    description character varying(524288),\n    properties jsonb,\n    delete_at timestamp without time zone,\n    file_names text,\n    trash_at timestamp without time zone,\n    is_trashed boolean DEFAULT false NOT NULL,\n    storage_classes_desired jsonb DEFAULT '[\"default\"]'::jsonb,\n    storage_classes_confirmed jsonb DEFAULT '[]'::jsonb,\n    storage_classes_confirmed_at timestamp without time zone,\n    current_version_uuid character varying,\n    version integer DEFAULT 1 NOT NULL,\n    preserve_version boolean DEFAULT false,\n    file_count integer DEFAULT 0 NOT NULL,\n    file_size_total bigint DEFAULT 0 NOT NULL\n);\n\n\n--\n-- Name: collections_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.collections_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: collections_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.collections_id_seq OWNED BY public.collections.id;\n\n\n--\n-- Name: container_ports; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.container_ports (\n    external_port integer NOT NULL,\n    container_port integer NOT NULL,\n    container_uuid character varying NOT NULL\n);\n\n\n--\n-- Name: container_requests; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.container_requests (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    modified_at timestamp without time zone,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    name character varying(255),\n    description text,\n    properties jsonb,\n    state character varying(255),\n    requesting_container_uuid character varying(255),\n    container_uuid character varying(255),\n    container_count_max integer,\n    mounts text,\n    runtime_constraints text,\n    container_image character varying(255),\n    environment text,\n    cwd character varying(255),\n    command text,\n    output_path character varying(255),\n    priority integer,\n    expires_at timestamp without time zone,\n    filters text,\n    updated_at timestamp without time zone NOT NULL,\n    container_count integer DEFAULT 0,\n    use_existing boolean DEFAULT true,\n    scheduling_parameters text,\n    output_uuid character varying(255),\n    log_uuid character varying(255),\n    output_name character varying(255) DEFAULT NULL::character varying,\n    output_ttl integer DEFAULT 0 NOT NULL,\n    secret_mounts jsonb DEFAULT '{}'::jsonb,\n    runtime_token text,\n    output_storage_classes jsonb DEFAULT '[\"default\"]'::jsonb,\n    output_properties jsonb DEFAULT '{}'::jsonb,\n    cumulative_cost double precision DEFAULT 0.0 NOT NULL,\n    output_glob text DEFAULT '[]'::text,\n    service boolean DEFAULT false NOT NULL,\n    published_ports jsonb DEFAULT '{}'::jsonb\n);\n\n\n--\n-- Name: container_requests_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.container_requests_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: container_requests_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.container_requests_id_seq OWNED BY public.container_requests.id;\n\n\n--\n-- Name: containers; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.containers (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    modified_at timestamp without time zone,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    state character varying(255),\n    started_at timestamp without time zone,\n    finished_at timestamp without time zone,\n    log character varying(255),\n    environment text,\n    cwd character varying(255),\n    command text,\n    output_path character varying(255),\n    mounts text,\n    runtime_constraints text,\n    output character varying(255),\n    container_image character varying(255),\n    progress double precision,\n    priority bigint,\n    updated_at timestamp without time zone NOT NULL,\n    exit_code integer,\n    auth_uuid character varying(255),\n    locked_by_uuid character varying(255),\n    scheduling_parameters text,\n    secret_mounts jsonb DEFAULT '{}'::jsonb,\n    secret_mounts_md5 character varying DEFAULT '99914b932bd37a50b983c5e7c90ae93b'::character varying,\n    runtime_status jsonb DEFAULT '{}'::jsonb,\n    runtime_user_uuid text,\n    runtime_auth_scopes jsonb,\n    runtime_token text,\n    lock_count integer DEFAULT 0 NOT NULL,\n    gateway_address character varying,\n    interactive_session_started boolean DEFAULT false NOT NULL,\n    output_storage_classes jsonb DEFAULT '[\"default\"]'::jsonb,\n    output_properties jsonb DEFAULT '{}'::jsonb,\n    cost double precision DEFAULT 0.0 NOT NULL,\n    subrequests_cost double precision DEFAULT 0.0 NOT NULL,\n    output_glob text DEFAULT '[]'::text,\n    service boolean DEFAULT false NOT NULL,\n    published_ports jsonb DEFAULT '{}'::jsonb\n);\n\n\n--\n-- Name: containers_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.containers_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: containers_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.containers_id_seq OWNED BY public.containers.id;\n\n\n--\n-- Name: credentials; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.credentials (\n    uuid character varying NOT NULL,\n    owner_uuid character varying NOT NULL,\n    created_at timestamp(6) without time zone NOT NULL,\n    modified_at timestamp(6) without time zone NOT NULL,\n    modified_by_user_uuid character varying,\n    name character varying NOT NULL,\n    description text,\n    credential_class character varying NOT NULL,\n    scopes jsonb DEFAULT '[]'::jsonb,\n    external_id character varying NOT NULL,\n    secret text NOT NULL,\n    expires_at timestamp(6) without time zone NOT NULL\n);\n\n\n--\n-- Name: frozen_groups; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.frozen_groups (\n    uuid character varying\n);\n\n\n--\n-- Name: groups; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.groups (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    name character varying(255) NOT NULL,\n    description character varying(524288),\n    updated_at timestamp without time zone NOT NULL,\n    group_class character varying(255),\n    trash_at timestamp without time zone,\n    is_trashed boolean DEFAULT false NOT NULL,\n    delete_at timestamp without time zone,\n    properties jsonb DEFAULT '{}'::jsonb,\n    frozen_by_uuid character varying\n);\n\n\n--\n-- Name: groups_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.groups_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: groups_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.groups_id_seq OWNED BY public.groups.id;\n\n\n--\n-- Name: humans; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.humans (\n    id bigint NOT NULL,\n    uuid character varying(255) NOT NULL,\n    owner_uuid character varying(255) NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    properties text,\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL\n);\n\n\n--\n-- Name: humans_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.humans_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: humans_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.humans_id_seq OWNED BY public.humans.id;\n\n\n--\n-- Name: job_tasks; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.job_tasks (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    job_uuid character varying(255),\n    sequence integer,\n    parameters text,\n    output text,\n    progress double precision,\n    success boolean,\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL,\n    created_by_job_task_uuid character varying(255),\n    qsequence bigint,\n    started_at timestamp without time zone,\n    finished_at timestamp without time zone\n);\n\n\n--\n-- Name: job_tasks_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.job_tasks_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: job_tasks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.job_tasks_id_seq OWNED BY public.job_tasks.id;\n\n\n--\n-- Name: job_tasks_qsequence_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.job_tasks_qsequence_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: job_tasks_qsequence_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.job_tasks_qsequence_seq OWNED BY public.job_tasks.qsequence;\n\n\n--\n-- Name: jobs; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.jobs (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    submit_id character varying(255),\n    script character varying(255),\n    script_version character varying(255),\n    script_parameters text,\n    cancelled_by_client_uuid character varying(255),\n    cancelled_by_user_uuid character varying(255),\n    cancelled_at timestamp without time zone,\n    started_at timestamp without time zone,\n    finished_at timestamp without time zone,\n    running boolean,\n    success boolean,\n    output character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL,\n    is_locked_by_uuid character varying(255),\n    log character varying(255),\n    tasks_summary text,\n    runtime_constraints text,\n    nondeterministic boolean,\n    repository character varying(255),\n    supplied_script_version character varying(255),\n    docker_image_locator character varying(255),\n    priority integer DEFAULT 0 NOT NULL,\n    description character varying(524288),\n    state character varying(255),\n    arvados_sdk_version character varying(255),\n    components text,\n    script_parameters_digest character varying(255)\n);\n\n\n--\n-- Name: jobs_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.jobs_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: jobs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.jobs_id_seq OWNED BY public.jobs.id;\n\n\n--\n-- Name: keep_disks; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.keep_disks (\n    id bigint NOT NULL,\n    uuid character varying(255) NOT NULL,\n    owner_uuid character varying(255) NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    ping_secret character varying(255) NOT NULL,\n    node_uuid character varying(255),\n    filesystem_uuid character varying(255),\n    bytes_total integer,\n    bytes_free integer,\n    is_readable boolean DEFAULT true NOT NULL,\n    is_writable boolean DEFAULT true NOT NULL,\n    last_read_at timestamp without time zone,\n    last_write_at timestamp without time zone,\n    last_ping_at timestamp without time zone,\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL,\n    keep_service_uuid character varying(255)\n);\n\n\n--\n-- Name: keep_disks_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.keep_disks_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: keep_disks_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.keep_disks_id_seq OWNED BY public.keep_disks.id;\n\n\n--\n-- Name: keep_services; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.keep_services (\n    id bigint NOT NULL,\n    uuid character varying(255) NOT NULL,\n    owner_uuid character varying(255) NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    service_host character varying(255),\n    service_port integer,\n    service_ssl_flag boolean,\n    service_type character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL,\n    read_only boolean DEFAULT false NOT NULL\n);\n\n\n--\n-- Name: keep_services_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.keep_services_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: keep_services_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.keep_services_id_seq OWNED BY public.keep_services.id;\n\n\n--\n-- Name: links; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.links (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    tail_uuid character varying(255),\n    link_class character varying(255),\n    name character varying(255),\n    head_uuid character varying(255),\n    properties jsonb,\n    updated_at timestamp without time zone NOT NULL\n);\n\n\n--\n-- Name: links_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.links_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: links_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.links_id_seq OWNED BY public.links.id;\n\n\n--\n-- Name: logs; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.logs (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    object_uuid character varying(255),\n    event_at timestamp without time zone,\n    event_type character varying(255),\n    summary text,\n    properties text,\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL,\n    modified_at timestamp without time zone,\n    object_owner_uuid character varying(255)\n);\n\n\n--\n-- Name: logs_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.logs_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: logs_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.logs_id_seq OWNED BY public.logs.id;\n\n\n--\n-- Name: materialized_permissions; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.materialized_permissions (\n    user_uuid character varying,\n    target_uuid character varying,\n    perm_level integer,\n    traverse_owned boolean\n);\n\n\n--\n-- Name: nodes; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.nodes (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    slot_number integer,\n    hostname character varying(255),\n    domain character varying(255),\n    ip_address character varying(255),\n    first_ping_at timestamp without time zone,\n    last_ping_at timestamp without time zone,\n    info jsonb,\n    updated_at timestamp without time zone NOT NULL,\n    properties jsonb,\n    job_uuid character varying(255)\n);\n\n\n--\n-- Name: nodes_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.nodes_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: nodes_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.nodes_id_seq OWNED BY public.nodes.id;\n\n\n--\n-- Name: users; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.users (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255) NOT NULL,\n    created_at timestamp without time zone NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    email character varying(255),\n    first_name character varying(255),\n    last_name character varying(255),\n    identity_url character varying(255),\n    is_admin boolean,\n    prefs text,\n    updated_at timestamp without time zone NOT NULL,\n    default_owner_uuid character varying(255),\n    is_active boolean DEFAULT false,\n    username character varying(255),\n    redirect_to_user_uuid character varying\n);\n\n\n--\n-- Name: permission_graph_edges; Type: VIEW; Schema: public; Owner: -\n--\n\nCREATE VIEW public.permission_graph_edges AS\n SELECT groups.owner_uuid AS tail_uuid,\n    groups.uuid AS head_uuid,\n    3 AS val,\n    groups.uuid AS edge_id\n   FROM public.groups\nUNION ALL\n SELECT users.owner_uuid AS tail_uuid,\n    users.uuid AS head_uuid,\n    3 AS val,\n    users.uuid AS edge_id\n   FROM public.users\nUNION ALL\n SELECT users.uuid AS tail_uuid,\n    users.uuid AS head_uuid,\n    3 AS val,\n    ''::character varying AS edge_id\n   FROM public.users\nUNION ALL\n SELECT links.tail_uuid,\n    links.head_uuid,\n        CASE\n            WHEN ((links.name)::text = 'can_read'::text) THEN 1\n            WHEN ((links.name)::text = 'can_login'::text) THEN 1\n            WHEN ((links.name)::text = 'can_write'::text) THEN 2\n            WHEN ((links.name)::text = 'can_manage'::text) THEN 3\n            ELSE 0\n        END AS val,\n    links.uuid AS edge_id\n   FROM public.links\n  WHERE ((links.link_class)::text = 'permission'::text);\n\n\n--\n-- Name: pipeline_instances; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.pipeline_instances (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    pipeline_template_uuid character varying(255),\n    name character varying(255),\n    components text,\n    updated_at timestamp without time zone NOT NULL,\n    properties text,\n    state character varying(255),\n    components_summary text,\n    started_at timestamp without time zone,\n    finished_at timestamp without time zone,\n    description character varying(524288)\n);\n\n\n--\n-- Name: pipeline_instances_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.pipeline_instances_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: pipeline_instances_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.pipeline_instances_id_seq OWNED BY public.pipeline_instances.id;\n\n\n--\n-- Name: pipeline_templates; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.pipeline_templates (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    name character varying(255),\n    components text,\n    updated_at timestamp without time zone NOT NULL,\n    description character varying(524288)\n);\n\n\n--\n-- Name: pipeline_templates_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.pipeline_templates_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: pipeline_templates_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.pipeline_templates_id_seq OWNED BY public.pipeline_templates.id;\n\n\n--\n-- Name: repositories; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.repositories (\n    id bigint NOT NULL,\n    uuid character varying(255) NOT NULL,\n    owner_uuid character varying(255) NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    name character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL\n);\n\n\n--\n-- Name: repositories_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.repositories_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: repositories_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.repositories_id_seq OWNED BY public.repositories.id;\n\n\n--\n-- Name: schema_migrations; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.schema_migrations (\n    version character varying(255) NOT NULL\n);\n\n\n--\n-- Name: specimens; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.specimens (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    material character varying(255),\n    updated_at timestamp without time zone NOT NULL,\n    properties text\n);\n\n\n--\n-- Name: specimens_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.specimens_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: specimens_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.specimens_id_seq OWNED BY public.specimens.id;\n\n\n--\n-- Name: traits; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.traits (\n    id bigint NOT NULL,\n    uuid character varying(255) NOT NULL,\n    owner_uuid character varying(255) NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    name character varying(255),\n    properties text,\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL\n);\n\n\n--\n-- Name: traits_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.traits_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: traits_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.traits_id_seq OWNED BY public.traits.id;\n\n\n--\n-- Name: trashed_groups; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.trashed_groups (\n    group_uuid character varying,\n    trash_at timestamp without time zone\n);\n\n\n--\n-- Name: users_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.users_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: users_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.users_id_seq OWNED BY public.users.id;\n\n\n--\n-- Name: uuid_locks; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.uuid_locks (\n    uuid character varying NOT NULL,\n    n integer DEFAULT 0 NOT NULL\n);\n\n\n--\n-- Name: virtual_machines; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.virtual_machines (\n    id bigint NOT NULL,\n    uuid character varying(255) NOT NULL,\n    owner_uuid character varying(255) NOT NULL,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    modified_at timestamp without time zone,\n    hostname character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    updated_at timestamp without time zone NOT NULL\n);\n\n\n--\n-- Name: virtual_machines_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.virtual_machines_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: virtual_machines_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.virtual_machines_id_seq OWNED BY public.virtual_machines.id;\n\n\n--\n-- Name: workflows; Type: TABLE; Schema: public; Owner: -\n--\n\nCREATE TABLE public.workflows (\n    id bigint NOT NULL,\n    uuid character varying(255),\n    owner_uuid character varying(255),\n    created_at timestamp without time zone NOT NULL,\n    modified_at timestamp without time zone,\n    modified_by_client_uuid character varying(255),\n    modified_by_user_uuid character varying(255),\n    name character varying(255),\n    description text,\n    definition text,\n    updated_at timestamp without time zone NOT NULL,\n    collection_uuid character varying\n);\n\n\n--\n-- Name: workflows_id_seq; Type: SEQUENCE; Schema: public; Owner: -\n--\n\nCREATE SEQUENCE public.workflows_id_seq\n    START WITH 1\n    INCREMENT BY 1\n    NO MINVALUE\n    NO MAXVALUE\n    CACHE 1;\n\n\n--\n-- Name: workflows_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: -\n--\n\nALTER SEQUENCE public.workflows_id_seq OWNED BY public.workflows.id;\n\n\n--\n-- Name: api_client_authorizations id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.api_client_authorizations ALTER COLUMN id SET DEFAULT nextval('public.api_client_authorizations_id_seq'::regclass);\n\n\n--\n-- Name: api_clients id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.api_clients ALTER COLUMN id SET DEFAULT nextval('public.api_clients_id_seq'::regclass);\n\n\n--\n-- Name: authorized_keys id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.authorized_keys ALTER COLUMN id SET DEFAULT nextval('public.authorized_keys_id_seq'::regclass);\n\n\n--\n-- Name: collections id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.collections ALTER COLUMN id SET DEFAULT nextval('public.collections_id_seq'::regclass);\n\n\n--\n-- Name: container_requests id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.container_requests ALTER COLUMN id SET DEFAULT nextval('public.container_requests_id_seq'::regclass);\n\n\n--\n-- Name: containers id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.containers ALTER COLUMN id SET DEFAULT nextval('public.containers_id_seq'::regclass);\n\n\n--\n-- Name: groups id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.groups ALTER COLUMN id SET DEFAULT nextval('public.groups_id_seq'::regclass);\n\n\n--\n-- Name: humans id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.humans ALTER COLUMN id SET DEFAULT nextval('public.humans_id_seq'::regclass);\n\n\n--\n-- Name: job_tasks id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.job_tasks ALTER COLUMN id SET DEFAULT nextval('public.job_tasks_id_seq'::regclass);\n\n\n--\n-- Name: jobs id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.jobs ALTER COLUMN id SET DEFAULT nextval('public.jobs_id_seq'::regclass);\n\n\n--\n-- Name: keep_disks id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.keep_disks ALTER COLUMN id SET DEFAULT nextval('public.keep_disks_id_seq'::regclass);\n\n\n--\n-- Name: keep_services id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.keep_services ALTER COLUMN id SET DEFAULT nextval('public.keep_services_id_seq'::regclass);\n\n\n--\n-- Name: links id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.links ALTER COLUMN id SET DEFAULT nextval('public.links_id_seq'::regclass);\n\n\n--\n-- Name: logs id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.logs ALTER COLUMN id SET DEFAULT nextval('public.logs_id_seq'::regclass);\n\n\n--\n-- Name: nodes id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.nodes ALTER COLUMN id SET DEFAULT nextval('public.nodes_id_seq'::regclass);\n\n\n--\n-- Name: pipeline_instances id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.pipeline_instances ALTER COLUMN id SET DEFAULT nextval('public.pipeline_instances_id_seq'::regclass);\n\n\n--\n-- Name: pipeline_templates id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.pipeline_templates ALTER COLUMN id SET DEFAULT nextval('public.pipeline_templates_id_seq'::regclass);\n\n\n--\n-- Name: repositories id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.repositories ALTER COLUMN id SET DEFAULT nextval('public.repositories_id_seq'::regclass);\n\n\n--\n-- Name: specimens id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.specimens ALTER COLUMN id SET DEFAULT nextval('public.specimens_id_seq'::regclass);\n\n\n--\n-- Name: traits id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.traits ALTER COLUMN id SET DEFAULT nextval('public.traits_id_seq'::regclass);\n\n\n--\n-- Name: users id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.users ALTER COLUMN id SET DEFAULT nextval('public.users_id_seq'::regclass);\n\n\n--\n-- Name: virtual_machines id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.virtual_machines ALTER COLUMN id SET DEFAULT nextval('public.virtual_machines_id_seq'::regclass);\n\n\n--\n-- Name: workflows id; Type: DEFAULT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.workflows ALTER COLUMN id SET DEFAULT nextval('public.workflows_id_seq'::regclass);\n\n\n--\n-- Name: api_client_authorizations api_client_authorizations_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.api_client_authorizations\n    ADD CONSTRAINT api_client_authorizations_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: api_clients api_clients_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.api_clients\n    ADD CONSTRAINT api_clients_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: ar_internal_metadata ar_internal_metadata_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.ar_internal_metadata\n    ADD CONSTRAINT ar_internal_metadata_pkey PRIMARY KEY (key);\n\n\n--\n-- Name: authorized_keys authorized_keys_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.authorized_keys\n    ADD CONSTRAINT authorized_keys_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: collections collections_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.collections\n    ADD CONSTRAINT collections_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: container_requests container_requests_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.container_requests\n    ADD CONSTRAINT container_requests_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: containers containers_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.containers\n    ADD CONSTRAINT containers_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: credentials credentials_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.credentials\n    ADD CONSTRAINT credentials_pkey PRIMARY KEY (uuid);\n\n\n--\n-- Name: groups groups_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.groups\n    ADD CONSTRAINT groups_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: humans humans_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.humans\n    ADD CONSTRAINT humans_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: job_tasks job_tasks_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.job_tasks\n    ADD CONSTRAINT job_tasks_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: jobs jobs_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.jobs\n    ADD CONSTRAINT jobs_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: keep_disks keep_disks_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.keep_disks\n    ADD CONSTRAINT keep_disks_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: keep_services keep_services_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.keep_services\n    ADD CONSTRAINT keep_services_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: links links_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.links\n    ADD CONSTRAINT links_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: logs logs_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.logs\n    ADD CONSTRAINT logs_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: nodes nodes_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.nodes\n    ADD CONSTRAINT nodes_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: pipeline_instances pipeline_instances_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.pipeline_instances\n    ADD CONSTRAINT pipeline_instances_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: pipeline_templates pipeline_templates_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.pipeline_templates\n    ADD CONSTRAINT pipeline_templates_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: repositories repositories_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.repositories\n    ADD CONSTRAINT repositories_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: specimens specimens_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.specimens\n    ADD CONSTRAINT specimens_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: traits traits_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.traits\n    ADD CONSTRAINT traits_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: users users_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.users\n    ADD CONSTRAINT users_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: virtual_machines virtual_machines_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.virtual_machines\n    ADD CONSTRAINT virtual_machines_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: workflows workflows_pkey; Type: CONSTRAINT; Schema: public; Owner: -\n--\n\nALTER TABLE ONLY public.workflows\n    ADD CONSTRAINT workflows_pkey PRIMARY KEY (id);\n\n\n--\n-- Name: api_client_authorizations_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX api_client_authorizations_search_index ON public.api_client_authorizations USING btree (api_token, created_by_ip_address, last_used_by_ip_address, default_owner_uuid, uuid);\n\n\n--\n-- Name: api_clients_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX api_clients_search_index ON public.api_clients USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, url_prefix);\n\n\n--\n-- Name: authorized_keys_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX authorized_keys_search_index ON public.authorized_keys USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, key_type, authorized_user_uuid);\n\n\n--\n-- Name: collection_index_on_properties; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX collection_index_on_properties ON public.collections USING gin (properties);\n\n\n--\n-- Name: collections_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX collections_search_index ON public.collections USING btree (owner_uuid, modified_by_client_uuid, modified_by_user_uuid, portable_data_hash, uuid, name, current_version_uuid);\n\n\n--\n-- Name: collections_trgm_text_search_idx; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX collections_trgm_text_search_idx ON public.collections USING gin (((((((((COALESCE(name, ''::character varying))::text || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || COALESCE(file_names, ''::text))) public.gin_trgm_ops);\n\n\n--\n-- Name: container_requests_index_on_properties; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX container_requests_index_on_properties ON public.container_requests USING gin (properties);\n\n\n--\n-- Name: container_requests_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX container_requests_search_index ON public.container_requests USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, state, requesting_container_uuid, container_uuid, container_image, cwd, output_path, output_uuid, log_uuid, output_name);\n\n\n--\n-- Name: container_requests_trgm_text_search_idx; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX container_requests_trgm_text_search_idx ON public.container_requests USING gin (((((((((((((((((((((((((((COALESCE(name, ''::character varying))::text || ' '::text) || COALESCE(description, ''::text)) || ' '::text) || COALESCE((properties)::text, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || COALESCE(environment, ''::text)) || ' '::text) || (COALESCE(cwd, ''::character varying))::text) || ' '::text) || COALESCE(command, ''::text)) || ' '::text) || (COALESCE(output_path, ''::character varying))::text) || ' '::text) || COALESCE(filters, ''::text)) || ' '::text) || COALESCE(scheduling_parameters, ''::text)) || ' '::text) || (COALESCE(output_name, ''::character varying))::text) || ' '::text) || COALESCE((output_properties)::text, ''::text))) public.gin_trgm_ops);\n\n\n--\n-- Name: containers_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX containers_search_index ON public.containers USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, state, log, cwd, output_path, output, container_image, auth_uuid, locked_by_uuid);\n\n\n--\n-- Name: group_index_on_properties; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX group_index_on_properties ON public.groups USING gin (properties);\n\n\n--\n-- Name: groups_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX groups_search_index ON public.groups USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, group_class, frozen_by_uuid);\n\n\n--\n-- Name: groups_trgm_text_search_idx; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX groups_trgm_text_search_idx ON public.groups USING gin (((((((((COALESCE(name, ''::character varying))::text || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(group_class, ''::character varying))::text) || ' '::text) || COALESCE((properties)::text, ''::text))) public.gin_trgm_ops);\n\n\n--\n-- Name: humans_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX humans_search_index ON public.humans USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid);\n\n\n--\n-- Name: idx_on_uuid_owner_uuid_modified_by_user_uuid_name_c_8f8cf5e570; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX idx_on_uuid_owner_uuid_modified_by_user_uuid_name_c_8f8cf5e570 ON public.credentials USING btree (uuid, owner_uuid, modified_by_user_uuid, name, credential_class, external_id);\n\n\n--\n-- Name: index_api_client_authorizations_on_api_client_id; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_api_client_authorizations_on_api_client_id ON public.api_client_authorizations USING btree (api_client_id);\n\n\n--\n-- Name: index_api_client_authorizations_on_api_token; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_api_client_authorizations_on_api_token ON public.api_client_authorizations USING btree (api_token);\n\n\n--\n-- Name: index_api_client_authorizations_on_expires_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_api_client_authorizations_on_expires_at ON public.api_client_authorizations USING btree (expires_at);\n\n\n--\n-- Name: index_api_client_authorizations_on_refreshes_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_api_client_authorizations_on_refreshes_at ON public.api_client_authorizations USING btree (refreshes_at);\n\n\n--\n-- Name: index_api_client_authorizations_on_user_id; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_api_client_authorizations_on_user_id ON public.api_client_authorizations USING btree (user_id);\n\n\n--\n-- Name: index_api_client_authorizations_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_api_client_authorizations_on_uuid ON public.api_client_authorizations USING btree (uuid);\n\n\n--\n-- Name: index_api_clients_on_created_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_api_clients_on_created_at ON public.api_clients USING btree (created_at);\n\n\n--\n-- Name: index_api_clients_on_modified_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_api_clients_on_modified_at ON public.api_clients USING btree (modified_at);\n\n\n--\n-- Name: index_api_clients_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_api_clients_on_owner_uuid ON public.api_clients USING btree (owner_uuid);\n\n\n--\n-- Name: index_api_clients_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_api_clients_on_uuid ON public.api_clients USING btree (uuid);\n\n\n--\n-- Name: index_authkeys_on_user_and_expires_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_authkeys_on_user_and_expires_at ON public.authorized_keys USING btree (authorized_user_uuid, expires_at);\n\n\n--\n-- Name: index_authorized_keys_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_authorized_keys_on_owner_uuid ON public.authorized_keys USING btree (owner_uuid);\n\n\n--\n-- Name: index_authorized_keys_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_authorized_keys_on_uuid ON public.authorized_keys USING btree (uuid);\n\n\n--\n-- Name: index_collections_on_created_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_collections_on_created_at_and_uuid ON public.collections USING btree (created_at, uuid);\n\n\n--\n-- Name: index_collections_on_current_version_uuid_and_version; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_collections_on_current_version_uuid_and_version ON public.collections USING btree (current_version_uuid, version);\n\n\n--\n-- Name: index_collections_on_delete_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_collections_on_delete_at ON public.collections USING btree (delete_at);\n\n\n--\n-- Name: index_collections_on_is_trashed; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_collections_on_is_trashed ON public.collections USING btree (is_trashed);\n\n\n--\n-- Name: index_collections_on_modified_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_collections_on_modified_at_and_uuid ON public.collections USING btree (modified_at, uuid);\n\n\n--\n-- Name: index_collections_on_name; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_collections_on_name ON public.collections USING gin (name public.gin_trgm_ops);\n\n\n--\n-- Name: index_collections_on_name_btree; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_collections_on_name_btree ON public.collections USING btree (name);\n\n\n--\n-- Name: index_collections_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_collections_on_owner_uuid ON public.collections USING btree (owner_uuid);\n\n\n--\n-- Name: index_collections_on_owner_uuid_and_name; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_collections_on_owner_uuid_and_name ON public.collections USING btree (owner_uuid, name) WHERE ((is_trashed = false) AND ((current_version_uuid)::text = (uuid)::text));\n\n\n--\n-- Name: index_collections_on_portable_data_hash_and_trash_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_collections_on_portable_data_hash_and_trash_at ON public.collections USING btree (portable_data_hash, trash_at);\n\n\n--\n-- Name: index_collections_on_trash_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_collections_on_trash_at ON public.collections USING btree (trash_at);\n\n\n--\n-- Name: index_collections_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_collections_on_uuid ON public.collections USING btree (uuid);\n\n\n--\n-- Name: index_container_ports_on_container_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_container_ports_on_container_uuid ON public.container_ports USING btree (container_uuid);\n\n\n--\n-- Name: index_container_ports_on_external_port; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_container_ports_on_external_port ON public.container_ports USING btree (external_port);\n\n\n--\n-- Name: index_container_requests_on_container_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_container_requests_on_container_uuid ON public.container_requests USING btree (container_uuid);\n\n\n--\n-- Name: index_container_requests_on_created_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_container_requests_on_created_at_and_uuid ON public.container_requests USING btree (created_at, uuid);\n\n\n--\n-- Name: index_container_requests_on_modified_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_container_requests_on_modified_at_and_uuid ON public.container_requests USING btree (modified_at, uuid);\n\n\n--\n-- Name: index_container_requests_on_name_and_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_container_requests_on_name_and_owner_uuid ON public.container_requests USING btree (name, owner_uuid);\n\n\n--\n-- Name: index_container_requests_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_container_requests_on_owner_uuid ON public.container_requests USING btree (owner_uuid);\n\n\n--\n-- Name: index_container_requests_on_requesting_container_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_container_requests_on_requesting_container_uuid ON public.container_requests USING btree (requesting_container_uuid);\n\n\n--\n-- Name: index_container_requests_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_container_requests_on_uuid ON public.container_requests USING btree (uuid);\n\n\n--\n-- Name: index_containers_on_auth_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_containers_on_auth_uuid ON public.containers USING btree (auth_uuid);\n\n\n--\n-- Name: index_containers_on_locked_by_uuid_and_priority; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_containers_on_locked_by_uuid_and_priority ON public.containers USING btree (locked_by_uuid, priority);\n\n\n--\n-- Name: index_containers_on_locked_by_uuid_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_containers_on_locked_by_uuid_and_uuid ON public.containers USING btree (locked_by_uuid, uuid);\n\n\n--\n-- Name: index_containers_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_containers_on_modified_at_uuid ON public.containers USING btree (modified_at DESC, uuid);\n\n\n--\n-- Name: index_containers_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_containers_on_owner_uuid ON public.containers USING btree (owner_uuid);\n\n\n--\n-- Name: index_containers_on_queued_state; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_containers_on_queued_state ON public.containers USING btree (state, ((priority > 0)));\n\n\n--\n-- Name: index_containers_on_reuse_columns; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_containers_on_reuse_columns ON public.containers USING btree (md5(command), cwd, md5(environment), output_path, md5(output_glob), container_image, md5(mounts), secret_mounts_md5, md5(runtime_constraints));\n\n\n--\n-- Name: index_containers_on_runtime_status; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_containers_on_runtime_status ON public.containers USING gin (runtime_status);\n\n\n--\n-- Name: index_containers_on_secret_mounts_md5; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_containers_on_secret_mounts_md5 ON public.containers USING btree (secret_mounts_md5);\n\n\n--\n-- Name: index_containers_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_containers_on_uuid ON public.containers USING btree (uuid);\n\n\n--\n-- Name: index_credentials_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_credentials_on_owner_uuid ON public.credentials USING btree (owner_uuid);\n\n\n--\n-- Name: index_credentials_on_owner_uuid_and_name; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_credentials_on_owner_uuid_and_name ON public.credentials USING btree (owner_uuid, name);\n\n\n--\n-- Name: index_credentials_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_credentials_on_uuid ON public.credentials USING btree (uuid);\n\n\n--\n-- Name: index_frozen_groups_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_frozen_groups_on_uuid ON public.frozen_groups USING btree (uuid);\n\n\n--\n-- Name: index_groups_on_created_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_groups_on_created_at_and_uuid ON public.groups USING btree (created_at, uuid);\n\n\n--\n-- Name: index_groups_on_delete_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_groups_on_delete_at ON public.groups USING btree (delete_at);\n\n\n--\n-- Name: index_groups_on_group_class; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_groups_on_group_class ON public.groups USING btree (group_class);\n\n\n--\n-- Name: index_groups_on_is_trashed; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_groups_on_is_trashed ON public.groups USING btree (is_trashed);\n\n\n--\n-- Name: index_groups_on_modified_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_groups_on_modified_at_and_uuid ON public.groups USING btree (modified_at, uuid);\n\n\n--\n-- Name: index_groups_on_name; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_groups_on_name ON public.groups USING gin (name public.gin_trgm_ops);\n\n\n--\n-- Name: index_groups_on_name_btree; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_groups_on_name_btree ON public.groups USING btree (name);\n\n\n--\n-- Name: index_groups_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_groups_on_owner_uuid ON public.groups USING btree (owner_uuid);\n\n\n--\n-- Name: index_groups_on_owner_uuid_and_name; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_groups_on_owner_uuid_and_name ON public.groups USING btree (owner_uuid, name) WHERE (is_trashed = false);\n\n\n--\n-- Name: index_groups_on_trash_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_groups_on_trash_at ON public.groups USING btree (trash_at);\n\n\n--\n-- Name: index_groups_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_groups_on_uuid ON public.groups USING btree (uuid);\n\n\n--\n-- Name: index_humans_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_humans_on_owner_uuid ON public.humans USING btree (owner_uuid);\n\n\n--\n-- Name: index_humans_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_humans_on_uuid ON public.humans USING btree (uuid);\n\n\n--\n-- Name: index_job_tasks_on_created_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_job_tasks_on_created_at ON public.job_tasks USING btree (created_at);\n\n\n--\n-- Name: index_job_tasks_on_created_by_job_task_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_job_tasks_on_created_by_job_task_uuid ON public.job_tasks USING btree (created_by_job_task_uuid);\n\n\n--\n-- Name: index_job_tasks_on_job_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_job_tasks_on_job_uuid ON public.job_tasks USING btree (job_uuid);\n\n\n--\n-- Name: index_job_tasks_on_modified_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_job_tasks_on_modified_at ON public.job_tasks USING btree (modified_at);\n\n\n--\n-- Name: index_job_tasks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_job_tasks_on_owner_uuid ON public.job_tasks USING btree (owner_uuid);\n\n\n--\n-- Name: index_job_tasks_on_sequence; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_job_tasks_on_sequence ON public.job_tasks USING btree (sequence);\n\n\n--\n-- Name: index_job_tasks_on_success; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_job_tasks_on_success ON public.job_tasks USING btree (success);\n\n\n--\n-- Name: index_job_tasks_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_job_tasks_on_uuid ON public.job_tasks USING btree (uuid);\n\n\n--\n-- Name: index_jobs_on_created_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_jobs_on_created_at ON public.jobs USING btree (created_at);\n\n\n--\n-- Name: index_jobs_on_finished_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_jobs_on_finished_at ON public.jobs USING btree (finished_at);\n\n\n--\n-- Name: index_jobs_on_modified_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_jobs_on_modified_at ON public.jobs USING btree (modified_at);\n\n\n--\n-- Name: index_jobs_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_jobs_on_modified_at_uuid ON public.jobs USING btree (modified_at DESC, uuid);\n\n\n--\n-- Name: index_jobs_on_output; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_jobs_on_output ON public.jobs USING btree (output);\n\n\n--\n-- Name: index_jobs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_jobs_on_owner_uuid ON public.jobs USING btree (owner_uuid);\n\n\n--\n-- Name: index_jobs_on_script; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_jobs_on_script ON public.jobs USING btree (script);\n\n\n--\n-- Name: index_jobs_on_script_parameters_digest; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_jobs_on_script_parameters_digest ON public.jobs USING btree (script_parameters_digest);\n\n\n--\n-- Name: index_jobs_on_started_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_jobs_on_started_at ON public.jobs USING btree (started_at);\n\n\n--\n-- Name: index_jobs_on_submit_id; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_jobs_on_submit_id ON public.jobs USING btree (submit_id);\n\n\n--\n-- Name: index_jobs_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_jobs_on_uuid ON public.jobs USING btree (uuid);\n\n\n--\n-- Name: index_keep_disks_on_filesystem_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_keep_disks_on_filesystem_uuid ON public.keep_disks USING btree (filesystem_uuid);\n\n\n--\n-- Name: index_keep_disks_on_last_ping_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_keep_disks_on_last_ping_at ON public.keep_disks USING btree (last_ping_at);\n\n\n--\n-- Name: index_keep_disks_on_node_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_keep_disks_on_node_uuid ON public.keep_disks USING btree (node_uuid);\n\n\n--\n-- Name: index_keep_disks_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_keep_disks_on_owner_uuid ON public.keep_disks USING btree (owner_uuid);\n\n\n--\n-- Name: index_keep_disks_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_keep_disks_on_uuid ON public.keep_disks USING btree (uuid);\n\n\n--\n-- Name: index_keep_services_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_keep_services_on_owner_uuid ON public.keep_services USING btree (owner_uuid);\n\n\n--\n-- Name: index_keep_services_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_keep_services_on_uuid ON public.keep_services USING btree (uuid);\n\n\n--\n-- Name: index_links_on_created_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_links_on_created_at_and_uuid ON public.links USING btree (created_at, uuid);\n\n\n--\n-- Name: index_links_on_head_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_links_on_head_uuid ON public.links USING btree (head_uuid);\n\n\n--\n-- Name: index_links_on_modified_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_links_on_modified_at_and_uuid ON public.links USING btree (modified_at, uuid);\n\n\n--\n-- Name: index_links_on_name; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_links_on_name ON public.links USING btree (name) WHERE ((link_class)::text = 'published_port'::text);\n\n\n--\n-- Name: index_links_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_links_on_owner_uuid ON public.links USING btree (owner_uuid);\n\n\n--\n-- Name: index_links_on_substring_head_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_links_on_substring_head_uuid ON public.links USING btree (\"substring\"((head_uuid)::text, 7, 5));\n\n\n--\n-- Name: index_links_on_substring_tail_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_links_on_substring_tail_uuid ON public.links USING btree (\"substring\"((tail_uuid)::text, 7, 5));\n\n\n--\n-- Name: index_links_on_tail_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_links_on_tail_uuid ON public.links USING btree (tail_uuid);\n\n\n--\n-- Name: index_links_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_links_on_uuid ON public.links USING btree (uuid);\n\n\n--\n-- Name: index_logs_on_created_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_logs_on_created_at_and_uuid ON public.logs USING btree (created_at, uuid);\n\n\n--\n-- Name: index_logs_on_event_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_logs_on_event_at ON public.logs USING btree (event_at);\n\n\n--\n-- Name: index_logs_on_event_type; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_logs_on_event_type ON public.logs USING btree (event_type);\n\n\n--\n-- Name: index_logs_on_modified_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_logs_on_modified_at_and_uuid ON public.logs USING btree (modified_at, uuid);\n\n\n--\n-- Name: index_logs_on_object_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_logs_on_object_owner_uuid ON public.logs USING btree (object_owner_uuid);\n\n\n--\n-- Name: index_logs_on_object_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_logs_on_object_uuid ON public.logs USING btree (object_uuid);\n\n\n--\n-- Name: index_logs_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_logs_on_owner_uuid ON public.logs USING btree (owner_uuid);\n\n\n--\n-- Name: index_logs_on_summary; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_logs_on_summary ON public.logs USING btree (summary);\n\n\n--\n-- Name: index_logs_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_logs_on_uuid ON public.logs USING btree (uuid);\n\n\n--\n-- Name: index_materialized_permissions_target_is_not_user; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_materialized_permissions_target_is_not_user ON public.materialized_permissions USING btree (target_uuid, traverse_owned, ((((user_uuid)::text = (target_uuid)::text) OR ((target_uuid)::text !~~ '_____-tpzed-_______________'::text))));\n\n\n--\n-- Name: index_nodes_on_created_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_nodes_on_created_at ON public.nodes USING btree (created_at);\n\n\n--\n-- Name: index_nodes_on_hostname; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_nodes_on_hostname ON public.nodes USING btree (hostname);\n\n\n--\n-- Name: index_nodes_on_modified_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_nodes_on_modified_at ON public.nodes USING btree (modified_at);\n\n\n--\n-- Name: index_nodes_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_nodes_on_owner_uuid ON public.nodes USING btree (owner_uuid);\n\n\n--\n-- Name: index_nodes_on_slot_number; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_nodes_on_slot_number ON public.nodes USING btree (slot_number);\n\n\n--\n-- Name: index_nodes_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_nodes_on_uuid ON public.nodes USING btree (uuid);\n\n\n--\n-- Name: index_pipeline_instances_on_created_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_pipeline_instances_on_created_at ON public.pipeline_instances USING btree (created_at);\n\n\n--\n-- Name: index_pipeline_instances_on_modified_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_pipeline_instances_on_modified_at ON public.pipeline_instances USING btree (modified_at);\n\n\n--\n-- Name: index_pipeline_instances_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_pipeline_instances_on_modified_at_uuid ON public.pipeline_instances USING btree (modified_at DESC, uuid);\n\n\n--\n-- Name: index_pipeline_instances_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_pipeline_instances_on_owner_uuid ON public.pipeline_instances USING btree (owner_uuid);\n\n\n--\n-- Name: index_pipeline_instances_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_pipeline_instances_on_uuid ON public.pipeline_instances USING btree (uuid);\n\n\n--\n-- Name: index_pipeline_templates_on_created_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_pipeline_templates_on_created_at ON public.pipeline_templates USING btree (created_at);\n\n\n--\n-- Name: index_pipeline_templates_on_modified_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_pipeline_templates_on_modified_at ON public.pipeline_templates USING btree (modified_at);\n\n\n--\n-- Name: index_pipeline_templates_on_modified_at_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_pipeline_templates_on_modified_at_uuid ON public.pipeline_templates USING btree (modified_at DESC, uuid);\n\n\n--\n-- Name: index_pipeline_templates_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_pipeline_templates_on_owner_uuid ON public.pipeline_templates USING btree (owner_uuid);\n\n\n--\n-- Name: index_pipeline_templates_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_pipeline_templates_on_uuid ON public.pipeline_templates USING btree (uuid);\n\n\n--\n-- Name: index_repositories_on_created_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_repositories_on_created_at_and_uuid ON public.repositories USING btree (created_at, uuid);\n\n\n--\n-- Name: index_repositories_on_modified_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_repositories_on_modified_at_and_uuid ON public.repositories USING btree (modified_at, uuid);\n\n\n--\n-- Name: index_repositories_on_name; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_repositories_on_name ON public.repositories USING btree (name);\n\n\n--\n-- Name: index_repositories_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_repositories_on_owner_uuid ON public.repositories USING btree (owner_uuid);\n\n\n--\n-- Name: index_repositories_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_repositories_on_uuid ON public.repositories USING btree (uuid);\n\n\n--\n-- Name: index_specimens_on_created_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_specimens_on_created_at ON public.specimens USING btree (created_at);\n\n\n--\n-- Name: index_specimens_on_modified_at; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_specimens_on_modified_at ON public.specimens USING btree (modified_at);\n\n\n--\n-- Name: index_specimens_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_specimens_on_owner_uuid ON public.specimens USING btree (owner_uuid);\n\n\n--\n-- Name: index_specimens_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_specimens_on_uuid ON public.specimens USING btree (uuid);\n\n\n--\n-- Name: index_traits_on_name; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_traits_on_name ON public.traits USING btree (name);\n\n\n--\n-- Name: index_traits_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_traits_on_owner_uuid ON public.traits USING btree (owner_uuid);\n\n\n--\n-- Name: index_traits_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_traits_on_uuid ON public.traits USING btree (uuid);\n\n\n--\n-- Name: index_trashed_groups_on_group_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_trashed_groups_on_group_uuid ON public.trashed_groups USING btree (group_uuid);\n\n\n--\n-- Name: index_users_on_created_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_users_on_created_at_and_uuid ON public.users USING btree (created_at, uuid);\n\n\n--\n-- Name: index_users_on_identity_url; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_users_on_identity_url ON public.users USING btree (identity_url);\n\n\n--\n-- Name: index_users_on_modified_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_users_on_modified_at_and_uuid ON public.users USING btree (modified_at, uuid);\n\n\n--\n-- Name: index_users_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_users_on_owner_uuid ON public.users USING btree (owner_uuid);\n\n\n--\n-- Name: index_users_on_username; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_users_on_username ON public.users USING btree (username);\n\n\n--\n-- Name: index_users_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_users_on_uuid ON public.users USING btree (uuid);\n\n\n--\n-- Name: index_uuid_locks_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_uuid_locks_on_uuid ON public.uuid_locks USING btree (uuid);\n\n\n--\n-- Name: index_virtual_machines_on_created_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_virtual_machines_on_created_at_and_uuid ON public.virtual_machines USING btree (created_at, uuid);\n\n\n--\n-- Name: index_virtual_machines_on_hostname; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_virtual_machines_on_hostname ON public.virtual_machines USING btree (hostname);\n\n\n--\n-- Name: index_virtual_machines_on_modified_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_virtual_machines_on_modified_at_and_uuid ON public.virtual_machines USING btree (modified_at, uuid);\n\n\n--\n-- Name: index_virtual_machines_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_virtual_machines_on_owner_uuid ON public.virtual_machines USING btree (owner_uuid);\n\n\n--\n-- Name: index_virtual_machines_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_virtual_machines_on_uuid ON public.virtual_machines USING btree (uuid);\n\n\n--\n-- Name: index_workflows_on_created_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_workflows_on_created_at_and_uuid ON public.workflows USING btree (created_at, uuid);\n\n\n--\n-- Name: index_workflows_on_modified_at_and_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_workflows_on_modified_at_and_uuid ON public.workflows USING btree (modified_at, uuid);\n\n\n--\n-- Name: index_workflows_on_owner_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX index_workflows_on_owner_uuid ON public.workflows USING btree (owner_uuid);\n\n\n--\n-- Name: index_workflows_on_uuid; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX index_workflows_on_uuid ON public.workflows USING btree (uuid);\n\n\n--\n-- Name: job_tasks_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX job_tasks_search_index ON public.job_tasks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, job_uuid, created_by_job_task_uuid);\n\n\n--\n-- Name: jobs_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX jobs_search_index ON public.jobs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, submit_id, script, script_version, cancelled_by_client_uuid, cancelled_by_user_uuid, output, is_locked_by_uuid, log, repository, supplied_script_version, docker_image_locator, state, arvados_sdk_version);\n\n\n--\n-- Name: jobs_trgm_text_search_idx; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX jobs_trgm_text_search_idx ON public.jobs USING gin (((((((((((((((((((((((((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(submit_id, ''::character varying))::text) || ' '::text) || (COALESCE(script, ''::character varying))::text) || ' '::text) || (COALESCE(script_version, ''::character varying))::text) || ' '::text) || COALESCE(script_parameters, ''::text)) || ' '::text) || (COALESCE(cancelled_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(cancelled_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(output, ''::character varying))::text) || ' '::text) || (COALESCE(is_locked_by_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(log, ''::character varying))::text) || ' '::text) || COALESCE(tasks_summary, ''::text)) || ' '::text) || COALESCE(runtime_constraints, ''::text)) || ' '::text) || (COALESCE(repository, ''::character varying))::text) || ' '::text) || (COALESCE(supplied_script_version, ''::character varying))::text) || ' '::text) || (COALESCE(docker_image_locator, ''::character varying))::text) || ' '::text) || (COALESCE(description, ''::character varying))::text) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || (COALESCE(arvados_sdk_version, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text))) public.gin_trgm_ops);\n\n\n--\n-- Name: keep_disks_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX keep_disks_search_index ON public.keep_disks USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, ping_secret, node_uuid, filesystem_uuid, keep_service_uuid);\n\n\n--\n-- Name: keep_services_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX keep_services_search_index ON public.keep_services USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, service_host, service_type);\n\n\n--\n-- Name: links_index_on_properties; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX links_index_on_properties ON public.links USING gin (properties);\n\n\n--\n-- Name: links_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX links_search_index ON public.links USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, tail_uuid, link_class, name, head_uuid);\n\n\n--\n-- Name: links_tail_name_unique_if_link_class_name; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX links_tail_name_unique_if_link_class_name ON public.links USING btree (tail_uuid, name) WHERE ((link_class)::text = 'name'::text);\n\n\n--\n-- Name: logs_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX logs_search_index ON public.logs USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, object_uuid, event_type, object_owner_uuid);\n\n\n--\n-- Name: nodes_index_on_info; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX nodes_index_on_info ON public.nodes USING gin (info);\n\n\n--\n-- Name: nodes_index_on_properties; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX nodes_index_on_properties ON public.nodes USING gin (properties);\n\n\n--\n-- Name: nodes_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX nodes_search_index ON public.nodes USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname, domain, ip_address, job_uuid);\n\n\n--\n-- Name: permission_target; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX permission_target ON public.materialized_permissions USING btree (target_uuid);\n\n\n--\n-- Name: permission_user_target; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX permission_user_target ON public.materialized_permissions USING btree (user_uuid, target_uuid);\n\n\n--\n-- Name: pipeline_instances_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX pipeline_instances_search_index ON public.pipeline_instances USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, pipeline_template_uuid, name, state);\n\n\n--\n-- Name: pipeline_instances_trgm_text_search_idx; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX pipeline_instances_trgm_text_search_idx ON public.pipeline_instances USING gin (((((((((((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(pipeline_template_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || COALESCE(properties, ''::text)) || ' '::text) || (COALESCE(state, ''::character varying))::text) || ' '::text) || COALESCE(components_summary, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)) public.gin_trgm_ops);\n\n\n--\n-- Name: pipeline_template_owner_uuid_name_unique; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX pipeline_template_owner_uuid_name_unique ON public.pipeline_templates USING btree (owner_uuid, name);\n\n\n--\n-- Name: pipeline_templates_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX pipeline_templates_search_index ON public.pipeline_templates USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);\n\n\n--\n-- Name: pipeline_templates_trgm_text_search_idx; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX pipeline_templates_trgm_text_search_idx ON public.pipeline_templates USING gin (((((((((((((((COALESCE(uuid, ''::character varying))::text || ' '::text) || (COALESCE(owner_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_client_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(modified_by_user_uuid, ''::character varying))::text) || ' '::text) || (COALESCE(name, ''::character varying))::text) || ' '::text) || COALESCE(components, ''::text)) || ' '::text) || (COALESCE(description, ''::character varying))::text)) public.gin_trgm_ops);\n\n\n--\n-- Name: repositories_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX repositories_search_index ON public.repositories USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);\n\n\n--\n-- Name: specimens_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX specimens_search_index ON public.specimens USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, material);\n\n\n--\n-- Name: traits_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX traits_search_index ON public.traits USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name);\n\n\n--\n-- Name: unique_schema_migrations; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE UNIQUE INDEX unique_schema_migrations ON public.schema_migrations USING btree (version);\n\n\n--\n-- Name: users_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX users_search_index ON public.users USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, email, first_name, last_name, identity_url, default_owner_uuid, username, redirect_to_user_uuid);\n\n\n--\n-- Name: virtual_machines_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX virtual_machines_search_index ON public.virtual_machines USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, hostname);\n\n\n--\n-- Name: workflows_search_index; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX workflows_search_index ON public.workflows USING btree (uuid, owner_uuid, modified_by_client_uuid, modified_by_user_uuid, name, collection_uuid);\n\n\n--\n-- Name: workflows_trgm_text_search_idx; Type: INDEX; Schema: public; Owner: -\n--\n\nCREATE INDEX workflows_trgm_text_search_idx ON public.workflows USING gin (((((COALESCE(name, ''::character varying))::text || ' '::text) || COALESCE(description, ''::text))) public.gin_trgm_ops);\n\n\n--\n-- PostgreSQL database dump complete\n--\n\nSET search_path TO \"$user\", public;\n\nINSERT INTO \"schema_migrations\" (version) VALUES\n('20251006181234'),\n('20250527181323'),\n('20250426201300'),\n('20250422103000'),\n('20250402131700'),\n('20250315222222'),\n('20250312141843'),\n('20250115145250'),\n('20241118110000'),\n('20240820202230'),\n('20240627201747'),\n('20240618121312'),\n('20240604183200'),\n('20240402162733'),\n('20240329173437'),\n('20231013000000'),\n('20230922000000'),\n('20230821000000'),\n('20230815160000'),\n('20230503224107'),\n('20230421142716'),\n('20221230155924'),\n('20221219165512'),\n('20220804133317'),\n('20220726034131'),\n('20220505112900'),\n('20220401153101'),\n('20220303204419'),\n('20220301155729'),\n('20220224203102'),\n('20211027154300'),\n('20210816191509'),\n('20210621204455'),\n('20210126183521'),\n('20210108033940'),\n('20201202174753'),\n('20201105190435'),\n('20201103170213'),\n('20200914203202'),\n('20200602141328'),\n('20200501150153'),\n('20190905151603'),\n('20190809135453'),\n('20190808145904'),\n('20190523180148'),\n('20190422144631'),\n('20190322174136'),\n('20190214214814'),\n('20181213183234'),\n('20181011184200'),\n('20181005192222'),\n('20181004131141'),\n('20181001175023'),\n('20180919001158'),\n('20180917205609'),\n('20180917200000'),\n('20180915155335'),\n('20180913175443'),\n('20180904110712'),\n('20180824155207'),\n('20180824152014'),\n('20180820135808'),\n('20180820132617'),\n('20180820130357'),\n('20180806133039'),\n('20180608123145'),\n('20180607175050'),\n('20180514135529'),\n('20180501182859'),\n('20180313180114'),\n('20180228220311'),\n('20180216203422'),\n('20171212153352'),\n('20171208203841'),\n('20171027183824'),\n('20170906224040'),\n('20170824202826'),\n('20170706141334'),\n('20170704160233'),\n('20170628185847'),\n('20170419175801'),\n('20170419173712'),\n('20170419173031'),\n('20170330012505'),\n('20170328215436'),\n('20170319063406'),\n('20170301225558'),\n('20170216170823'),\n('20170105160302'),\n('20170105160301'),\n('20170102153111'),\n('20161223090712'),\n('20161222153434'),\n('20161213172944'),\n('20161115174218'),\n('20161115171221'),\n('20161111143147'),\n('20161019171346'),\n('20160926194129'),\n('20160909181442'),\n('20160901210110'),\n('20160819195725'),\n('20160819195557'),\n('20160808151559'),\n('20160509143250'),\n('20160506175108'),\n('20160324144017'),\n('20160209155729'),\n('20160208210629'),\n('20151229214707'),\n('20151215134304'),\n('20151202151426'),\n('20150526180251'),\n('20150512193020'),\n('20150423145759'),\n('20150324152204'),\n('20150317132720'),\n('20150312151136'),\n('20150303210106'),\n('20150216193428'),\n('20150206230342'),\n('20150206210804'),\n('20150203180223'),\n('20150123142953'),\n('20150122175935'),\n('20141208185217'),\n('20141208174653'),\n('20141208174553'),\n('20141208164553'),\n('20141111133038'),\n('20140924091559'),\n('20140918153705'),\n('20140918153541'),\n('20140918141529'),\n('20140911221252'),\n('20140909183946'),\n('20140828141043'),\n('20140826180337'),\n('20140818125735'),\n('20140817035914'),\n('20140811184643'),\n('20140714184006'),\n('20140709172343'),\n('20140627210837'),\n('20140611173003'),\n('20140607150616'),\n('20140602143352'),\n('20140601022548'),\n('20140530200539'),\n('20140527152921'),\n('20140519205916'),\n('20140501165548'),\n('20140423133559'),\n('20140423132913'),\n('20140422011506'),\n('20140421151940'),\n('20140421151939'),\n('20140421140924'),\n('20140407184311'),\n('20140402001908'),\n('20140325175653'),\n('20140324024606'),\n('20140321191343'),\n('20140319160547'),\n('20140317135600'),\n('20140129184311'),\n('20140124222114'),\n('20140117231056'),\n('20131007180607'),\n('20130724153034'),\n('20130708185153'),\n('20130708182912'),\n('20130708163414'),\n('20130627184333'),\n('20130627154537'),\n('20130626022810'),\n('20130626002829'),\n('20130617150007'),\n('20130612042554'),\n('20130611163736'),\n('20130610202538'),\n('20130608053730'),\n('20130606183519'),\n('20130528134100'),\n('20130524042319'),\n('20130523060213'),\n('20130523060112'),\n('20130425214427'),\n('20130425024459'),\n('20130415020241'),\n('20130326182917'),\n('20130326173804'),\n('20130320000107'),\n('20130319235957'),\n('20130319201431'),\n('20130319194637'),\n('20130319180730'),\n('20130319165853'),\n('20130318002138'),\n('20130315213205'),\n('20130315183626'),\n('20130315155820'),\n('20130313175417'),\n('20130226170000'),\n('20130218181504'),\n('20130207195855'),\n('20130203115329'),\n('20130203104824'),\n('20130203104818'),\n('20130130205749'),\n('20130128231343'),\n('20130128202518'),\n('20130125220425'),\n('20130123180228'),\n('20130123180224'),\n('20130123174514'),\n('20130122221616'),\n('20130122201442'),\n('20130122020042'),\n('20130118002239'),\n('20130116215213'),\n('20130116024233'),\n('20130113214204'),\n('20130109220548'),\n('20130109175700'),\n('20130107212832'),\n('20130107181109'),\n('20130105224618'),\n('20130105224358'),\n('20130105203021'),\n('20121016005009');\n"
  },
  {
    "path": "services/api/fpm-info.sh",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nfpm_depends+=(\n    # Dependencies to build gems\n    bison\n    make\n    \"ruby >= 3.0.0\"\n    # Postinst script dependencies\n    diffutils\n    # Passenger dependencies\n    curl\n    procps\n    tar\n    # Dependencies of our API server code\n    \"git >= 1.7.10\"\n    shared-mime-info\n)\n\ncase \"$TARGET\" in\n    rocky8)\n        fpm_depends+=(\n            # Dependencies to build gems\n            automake\n            gcc-c++\n            libcurl-devel\n            libyaml-devel\n            postgresql\n            postgresql-devel\n            redhat-rpm-config\n            \"ruby-devel >= 3.0.0\"\n            zlib-devel\n            # Passenger runtime dependencies\n            libnsl\n            openssl-devel\n            rubygem-rake\n        )\n        ;;\n    rocky*)\n        fpm_depends+=(\n            # Dependencies to build gems\n            automake\n            gcc-c++\n            libcurl-devel\n            libyaml-devel\n            postgresql\n            postgresql-devel\n            redhat-rpm-config\n            \"ruby-devel >= 3.0.0\"\n            zlib-devel\n            # Passenger runtime dependencies\n            libnsl\n            openssl-devel\n            rubygem-rake\n            # nginx compilation dependencies\n            pcre2-devel\n        )\n        ;;\n    debian* | ubuntu*)\n        fpm_depends+=(\n            # Dependencies to build gems\n            g++\n            libcurl-ssl-dev\n            libpq-dev\n            libyaml-dev\n            postgresql-client\n            \"ruby-dev >= 3.0.0\"\n            zlib1g-dev\n            # Passenger runtime dependencies\n            libnsl2\n            libnss-systemd\n            libssl-dev\n            rake\n        )\n        ;;\nesac\n"
  },
  {
    "path": "services/api/lib/20200501150153_permission_table_constants.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# These constants are used in both\n# db/migrate/20200501150153_permission_table and update_permissions\n#\n# This file allows them to be easily imported by both to avoid duplication.\n#\n# Don't mess with this!  Any changes will affect both the current\n# update_permissions and the past migration.  If you are tinkering\n# with the permission system and need to change how\n# PERM_QUERY_TEMPLATE, refresh_trashed or refresh_permissions works,\n# you should make a new file with your modified functions and have\n# update_permissions reference that file instead.\n\nPERMISSION_VIEW = \"materialized_permissions\"\nTRASHED_GROUPS = \"trashed_groups\"\nFROZEN_GROUPS = \"frozen_groups\"\n\n# We need to use this parameterized query in a few different places,\n# including as a subquery in a larger query.\n#\n# There's basically two options, the way I did this originally was to\n# put this in a postgres function and do a lateral join over it.\n# However, postgres functions impose an optimization barrier, and\n# possibly have other overhead with temporary tables, so I ended up\n# going with the brute force approach of inlining the whole thing.\n#\n# The two substitutions are \"base_case\" which determines the initial\n# set of permission origins and \"edge_perm\" which is used to ensure\n# that the new permission takes precedence over the one in the edges\n# table (but some queries don't need that.)\n#\nPERM_QUERY_TEMPLATE = %{\nWITH RECURSIVE\n        traverse_graph(origin_uuid, target_uuid, val, traverse_owned, starting_set) as (\n            %{base_case}\n          union\n            (select traverse_graph.origin_uuid,\n                    edges.head_uuid,\n                      least(%{edge_perm},\n                            traverse_graph.val),\n                    should_traverse_owned(edges.head_uuid, edges.val),\n                    false\n             from permission_graph_edges as edges, traverse_graph\n             where traverse_graph.target_uuid = edges.tail_uuid\n             and (edges.tail_uuid like '_____-j7d0g-_______________' or\n                  traverse_graph.starting_set)))\n        select traverse_graph.origin_uuid, target_uuid, max(val) as val, bool_or(traverse_owned) as traverse_owned from traverse_graph\n        group by (traverse_graph.origin_uuid, target_uuid)\n}\n\ndef refresh_trashed\n  ActiveRecord::Base.transaction do\n    ActiveRecord::Base.connection.execute(\"LOCK TABLE #{TRASHED_GROUPS}\")\n    ActiveRecord::Base.connection.execute(\"DELETE FROM #{TRASHED_GROUPS}\")\n\n    # Helper populate trashed_groups table. This starts with\n    #   each group owned by a user and computes the subtree under that\n    #   group to find any groups that are trashed.\n    ActiveRecord::Base.connection.execute(%{\nINSERT INTO #{TRASHED_GROUPS}\nselect ps.target_uuid as group_uuid, ps.trash_at from groups,\n  lateral project_subtree_with_trash_at(groups.uuid, groups.trash_at) ps\n  where groups.owner_uuid like '_____-tpzed-_______________' and ps.trash_at is not NULL\n})\n  end\nend\n\ndef refresh_permissions\n  ActiveRecord::Base.transaction do\n    ActiveRecord::Base.connection.execute(\"LOCK TABLE #{PERMISSION_VIEW}\")\n    ActiveRecord::Base.connection.execute(\"DELETE FROM #{PERMISSION_VIEW}\")\n\n    ActiveRecord::Base.connection.execute %{\nINSERT INTO materialized_permissions\n    #{PERM_QUERY_TEMPLATE % {:base_case => %{\n        select uuid, uuid, 3, true, true from users\n},\n:edge_perm => 'edges.val'\n} }\n}, \"refresh_permission_view.do\"\n  end\nend\n\ndef refresh_frozen\n  ActiveRecord::Base.transaction do\n    ActiveRecord::Base.connection.execute(\"LOCK TABLE #{FROZEN_GROUPS}\")\n    ActiveRecord::Base.connection.execute(\"DELETE FROM #{FROZEN_GROUPS}\")\n\n    # Compute entire frozen_groups table, starting with top-level\n    # projects (i.e., project groups owned by a user).\n    ActiveRecord::Base.connection.execute(%{\nINSERT INTO #{FROZEN_GROUPS}\nselect ps.uuid from groups,\n  lateral project_subtree_with_is_frozen(groups.uuid, groups.frozen_by_uuid is not null) ps\n  where groups.owner_uuid like '_____-tpzed-_______________'\n    and group_class = 'project'\n    and ps.is_frozen\n})\n  end\nend\n"
  },
  {
    "path": "services/api/lib/app_version.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass AppVersion\n  def self.git(*args, &block)\n    IO.popen([\"git\", \"--git-dir\", \".git\"] + args, \"r\",\n             chdir: Rails.root.join('../..'),\n             err: \"/dev/null\",\n             &block)\n  end\n\n  def self.forget\n    @hash = nil\n    @package_version = nil\n  end\n\n  # Return abbrev commit hash for current code version: \"abc1234\", or\n  # \"abc1234-modified\" if there are uncommitted changes. If present,\n  # return contents of {root}/git-commit.version instead.\n  def self.hash\n    if (cached = Rails.configuration.source_version || @hash)\n      return cached\n    end\n\n    # Read the version from our package's git-commit.version file, if available.\n    begin\n      @hash = IO.read(Rails.root.join(\"git-commit.version\")).strip\n    rescue Errno::ENOENT\n    end\n\n    if @hash.nil? or @hash.empty?\n      begin\n        local_modified = false\n        git(\"status\", \"--porcelain\") do |git_pipe|\n          git_pipe.each_line do |_|\n            local_modified = true\n            # Continue reading the pipe so git doesn't get SIGPIPE.\n          end\n        end\n        if $?.success?\n          git(\"log\", \"-n1\", \"--format=%H\") do |git_pipe|\n            git_pipe.each_line do |line|\n              @hash = line.chomp[0...8] + (local_modified ? '-modified' : '')\n            end\n          end\n        end\n      rescue SystemCallError\n      end\n    end\n\n    @hash || \"unknown\"\n  end\n\n  def self.package_version\n    if (cached = Rails.configuration.package_version || @package_version)\n      return cached\n    end\n\n    begin\n      @package_version = IO.read(Rails.root.join(\"package-build.version\")).strip\n    rescue Errno::ENOENT\n      @package_version = \"unknown\"\n    end\n\n    @package_version\n  end\nend\n"
  },
  {
    "path": "services/api/lib/arvados_model_updates.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule ArvadosModelUpdates\n  # ArvadosModel checks this to decide whether it should update the\n  # 'modified_by_user_uuid' field.\n  def anonymous_updater\n    Thread.current[:anonymous_updater] || false\n  end\n\n  def leave_modified_by_user_alone\n    anonymous_updater_was = anonymous_updater\n    begin\n      Thread.current[:anonymous_updater] = true\n      yield\n    ensure\n      Thread.current[:anonymous_updater] = anonymous_updater_was\n    end\n  end\n\n  # ArvadosModel checks this to decide whether it should update the\n  # 'modified_at' field.\n  def timeless_updater\n    Thread.current[:timeless_updater] || false\n  end\n\n  def leave_modified_at_alone\n    timeless_updater_was = timeless_updater\n    begin\n      Thread.current[:timeless_updater] = true\n      yield\n    ensure\n      Thread.current[:timeless_updater] = timeless_updater_was\n    end\n  end\n\nend\n"
  },
  {
    "path": "services/api/lib/assets/.gitkeep",
    "content": ""
  },
  {
    "path": "services/api/lib/audit_logs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'current_api_client'\nrequire 'db_current_time'\n\nmodule AuditLogs\n  extend CurrentApiClient\n  extend DbCurrentTime\n\n  def self.delete_old(max_age:, max_batch:)\n    act_as_system_user do\n      if !File.owned?(Rails.root.join('tmp'))\n        Rails.logger.warn(\"AuditLogs: not owner of #{Rails.root}/tmp, skipping\")\n        return\n      end\n      lockfile = Rails.root.join('tmp', 'audit_logs.lock')\n      File.open(lockfile, File::RDWR|File::CREAT, 0600) do |f|\n        return unless f.flock(File::LOCK_NB|File::LOCK_EX)\n\n        sql = \"select clock_timestamp() - interval '#{'%.9f' % max_age} seconds'\"\n        threshold = ActiveRecord::Base.connection.select_value(sql).to_time.utc\n        Rails.logger.info \"AuditLogs: deleting logs older than #{threshold}\"\n\n        did_total = 0\n        loop do\n          sql = Log.unscoped.\n                select(:id).\n                order(:created_at).\n                where('event_type in (?)', ['create', 'update', 'destroy', 'delete']).\n                where('created_at < ?', threshold).\n                limit(max_batch).\n                to_sql\n          did = Log.unscoped.where(\"id in (#{sql})\").delete_all\n          did_total += did\n\n          Rails.logger.info \"AuditLogs: deleted batch of #{did}\"\n          break if did == 0\n        end\n        Rails.logger.info \"AuditLogs: deleted total #{did_total}\"\n      end\n    end\n  end\n\n  def self.tidy_in_background\n    max_age = Rails.configuration.AuditLogs.MaxAge.to_i\n    max_batch = Rails.configuration.AuditLogs.MaxDeleteBatch\n    return if max_age <= 0 || max_batch <= 0\n\n    exp = (max_age/14).seconds\n    need = false\n    Rails.cache.fetch('AuditLogs', expires_in: exp) do\n      need = true\n    end\n    return if !need\n\n    Thread.new do\n      Thread.current.abort_on_exception = false\n      begin\n        delete_old(max_age: max_age, max_batch: max_batch)\n      rescue => e\n        Rails.logger.error \"#{e.class}: #{e}\\n#{e.backtrace.join(\"\\n\\t\")}\"\n      ensure\n        # Rails 5.1+ makes test threads share a database connection, so we can't\n        # close a connection shared with other threads.\n        # https://github.com/rails/rails/commit/deba47799ff905f778e0c98a015789a1327d5087\n        if Rails.env != \"test\"\n          ActiveRecord::Base.connection.close\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/lib/can_be_an_owner.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Protect referential integrity of owner_uuid columns in other tables\n# that can refer to the uuid column in this table.\n\nmodule CanBeAnOwner\n\n  def self.included(base)\n    base.extend(ClassMethods)\n\n    # Rails' \"has_many\" can prevent us from destroying the owner\n    # record when other objects refer to it.\n    ActiveRecord::Base.connection.tables.each do |t|\n      next if t == base.table_name\n      next if t.in?([\n                      # in-use tables that should be skipped\n                      'ar_internal_metadata',\n                      'permission_refresh_lock',\n                      'schema_migrations',\n                      'uuid_locks',\n                      # obsolete tables from removed APIs\n                      'api_clients',\n                      'commit_ancestors',\n                      'commits',\n                      'humans',\n                      'jobs',\n                      'job_tasks',\n                      'keep_disks',\n                      'materialized_permissions',\n                      'nodes',\n                      'pipeline_instances',\n                      'pipeline_templates',\n                      'repositories',\n                      'specimens',\n                      'traits',\n                    ])\n      klass = t.classify.constantize\n      next unless klass and 'owner_uuid'.in?(klass.columns.collect(&:name))\n      base.has_many(t.to_sym,\n                    foreign_key: 'owner_uuid',\n                    primary_key: 'uuid',\n                    dependent: :restrict_with_exception)\n    end\n    # We need custom protection for changing an owner's primary\n    # key. (Apart from this restriction, admins are allowed to change\n    # UUIDs.)\n    base.validate :restrict_uuid_change_breaking_associations\n  end\n\n  module ClassMethods\n    def install_view(type)\n      conn = ActiveRecord::Base.connection\n      transaction do\n        # Check whether the temporary view has already been created\n        # during this connection. If not, create it.\n        conn.exec_query \"SAVEPOINT check_#{type}_view\"\n        begin\n          conn.exec_query(\"SELECT 1 FROM #{type}_view LIMIT 0\")\n        rescue\n          conn.exec_query \"ROLLBACK TO SAVEPOINT check_#{type}_view\"\n          sql = File.read(Rails.root.join(\"lib\", \"create_#{type}_view.sql\"))\n          conn.exec_query(sql)\n        ensure\n          conn.exec_query \"RELEASE SAVEPOINT check_#{type}_view\"\n        end\n      end\n    end\n  end\n\n  def descendant_project_uuids\n    self.class.install_view('ancestor')\n    ActiveRecord::Base.connection.\n      exec_query('SELECT ancestor_view.uuid\n                  FROM ancestor_view\n                  LEFT JOIN groups ON groups.uuid=ancestor_view.uuid\n                  WHERE ancestor_uuid = $1 AND groups.group_class = $2',\n                  # \"name\" arg is a query label that appears in logs:\n                  \"descendant_project_uuids for #{self.uuid}\",\n                  # \"binds\" arg is an array of [col_id, value] for '$1' vars:\n                  [self.uuid, 'project'],\n                  ).rows.map do |project_uuid,|\n      project_uuid\n    end\n  end\n\n  protected\n\n  def restrict_uuid_change_breaking_associations\n    return true if new_record? or not uuid_changed?\n\n    # Check for objects that have my old uuid listed as their owner.\n    self.class.reflect_on_all_associations(:has_many).each do |assoc|\n      next unless assoc.foreign_key == 'owner_uuid'\n      if assoc.klass.where(owner_uuid: uuid_was).any?\n        errors.add(:uuid,\n                   \"cannot be changed on a #{self.class} that owns objects\")\n        return false\n      end\n    end\n\n    # if I owned myself before, I'll just continue to own myself with\n    # my new uuid.\n    if owner_uuid == uuid_was\n      self.owner_uuid = uuid\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/lib/common_api_template.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule CommonApiTemplate\n  def self.included(base)\n    base.acts_as_api\n    base.class_eval do\n      alias_method :as_api_response_orig, :as_api_response\n      include InstanceMethods\n    end\n    base.extend(ClassMethods)\n    base.api_accessible :common do |t|\n      t.add :kind\n      t.add :etag\n      t.add :uuid\n      t.add :owner_uuid\n      t.add :created_at\n      t.add :modified_by_user_uuid\n      t.add :modified_at\n    end\n  end\n\n  module InstanceMethods\n    # choose template based on opts[:for_user]\n    def as_api_response(template=nil, opts={})\n      if template.nil?\n        user = opts[:for_user] || current_user\n        if user.andand.is_admin and self.respond_to? :api_accessible_superuser\n          template = :superuser\n        else\n          template = :user\n        end\n      end\n      self.as_api_response_orig(template, opts)\n    end\n  end\n\n  module ClassMethods\n  end\nend\n"
  },
  {
    "path": "services/api/lib/config_loader.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# When loading YAML, deserialize :foo as \":foo\", rather than raising\n# \"Psych::DisallowedClass: Tried to load unspecified class: Symbol\"\nclass Psych::ScalarScanner\n  alias :orig_tokenize :tokenize\n  def tokenize string\n    return string if string =~ /^:[a-zA-Z]/\n    orig_tokenize(string)\n  end\nend\n\nmodule Psych\n  module Visitors\n    class YAMLTree < Psych::Visitors::Visitor\n      def visit_ActiveSupport_Duration o\n        seconds = o.to_i\n        outstr = \"\"\n        if seconds / 3600 > 0\n          outstr += \"#{seconds / 3600}h\"\n          seconds = seconds % 3600\n        end\n        if seconds / 60 > 0\n          outstr += \"#{seconds / 60}m\"\n          seconds = seconds % 60\n        end\n        if seconds > 0\n          outstr += \"#{seconds}s\"\n        end\n        if outstr == \"\"\n          outstr = \"0s\"\n        end\n        @emitter.scalar outstr, nil, nil, true, false, Nodes::Scalar::ANY\n      end\n\n      def visit_URI_Generic o\n        @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY\n      end\n\n      def visit_URI_HTTP o\n        @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY\n      end\n\n      def visit_Pathname o\n        @emitter.scalar o.to_s, nil, nil, true, false, Nodes::Scalar::ANY\n      end\n    end\n  end\nend\n\n\nmodule Boolean; end\nclass TrueClass; include Boolean; end\nclass FalseClass; include Boolean; end\n\nclass NonemptyString < String\nend\n\nclass ConfigLoader\n  def initialize\n    @config_migrate_map = {}\n    @config_types = {}\n  end\n\n  def declare_config(assign_to, configtype, migrate_from=nil, migrate_fn=nil)\n    if migrate_from\n      @config_migrate_map[migrate_from] = migrate_fn || ->(cfg, k, v) {\n        ConfigLoader.set_cfg cfg, assign_to, v\n      }\n    end\n    @config_types[assign_to] = configtype\n  end\n\n\n  def migrate_config from_config, to_config\n    remainders = {}\n    from_config.each do |k, v|\n      if @config_migrate_map[k.to_sym]\n        @config_migrate_map[k.to_sym].call to_config, k, v\n      else\n        remainders[k] = v\n      end\n    end\n    remainders\n  end\n\n  def coercion_and_check check_cfg, check_nonempty: true\n    @config_types.each do |cfgkey, cfgtype|\n      cfg = check_cfg\n      k = cfgkey\n      ks = k.split '.'\n      k = ks.pop\n      ks.each do |kk|\n        cfg = cfg[kk]\n        if cfg.nil?\n          break\n        end\n      end\n\n      if cfg.nil?\n        raise \"missing #{cfgkey}\"\n      end\n\n      if cfgtype == String and !cfg[k]\n        cfg[k] = \"\"\n      end\n\n      if cfgtype == String and cfg[k].is_a? Symbol\n        cfg[k] = cfg[k].to_s\n      end\n\n      if cfgtype == Pathname and cfg[k].is_a? String\n\n        if cfg[k] == \"\"\n          cfg[k] = Pathname.new(\"\")\n        else\n          cfg[k] = Pathname.new(cfg[k])\n          if !cfg[k].exist?\n            raise \"#{cfgkey} path #{cfg[k]} does not exist\"\n          end\n        end\n      end\n\n      if cfgtype == NonemptyString\n        if (!cfg[k] || cfg[k] == \"\") && check_nonempty\n          raise \"#{cfgkey} cannot be empty\"\n        end\n        if cfg[k].is_a? String\n          next\n        end\n      end\n\n      if cfgtype == ActiveSupport::Duration\n        if cfg[k].is_a? Integer\n          cfg[k] = cfg[k].seconds\n        elsif cfg[k].is_a? String\n          cfg[k] = ConfigLoader.parse_duration(cfg[k], cfgkey: cfgkey)\n        end\n      end\n\n      if cfgtype == URI\n        cfg[k] = URI(cfg[k])\n      end\n\n      if cfgtype == Integer && cfg[k].is_a?(String)\n        v = cfg[k].sub(/B\\s*$/, '')\n        if mt = /(-?\\d*\\.?\\d+)\\s*([KMGTPE]i?)$/.match(v)\n          if mt[1].index('.')\n            v = mt[1].to_f\n          else\n            v = mt[1].to_i\n          end\n          cfg[k] = v * {\n            'K' => 1000,\n            'Ki' => 1 << 10,\n            'M' => 1000000,\n            'Mi' => 1 << 20,\n            \"G\" =>  1000000000,\n            \"Gi\" => 1 << 30,\n            \"T\" =>  1000000000000,\n            \"Ti\" => 1 << 40,\n            \"P\" =>  1000000000000000,\n            \"Pi\" => 1 << 50,\n            \"E\" =>  1000000000000000000,\n            \"Ei\" => 1 << 60,\n          }[mt[2]]\n        end\n      end\n\n      if !cfg[k].is_a? cfgtype\n        raise \"#{cfgkey} expected #{cfgtype} but was #{cfg[k].class}\"\n      end\n    end\n  end\n\n  def self.set_cfg cfg, k, v\n    # \"foo.bar = baz\" --> { cfg[\"foo\"][\"bar\"] = baz }\n    ks = k.split '.'\n    k = ks.pop\n    ks.each do |kk|\n      cfg = cfg[kk]\n      if cfg.nil?\n        break\n      end\n    end\n    if !cfg.nil?\n      cfg[k] = v\n    end\n  end\n\n  def self.parse_duration(durstr, cfgkey:)\n    sign = 1\n    if durstr[0] == '-'\n      durstr = durstr[1..-1]\n      sign = -1\n    end\n    duration_re = /(\\d+(\\.\\d+)?)(s|m|h)/\n    dursec = 0\n    while durstr != \"\"\n      mt = duration_re.match durstr\n      if !mt\n        raise \"#{cfgkey} not a valid duration: '#{durstr}', accepted suffixes are s, m, h\"\n      end\n      multiplier = {s: 1, m: 60, h: 3600}\n      dursec += (Float(mt[1]) * multiplier[mt[3].to_sym] * sign)\n      durstr = durstr[mt[0].length..-1]\n    end\n    return dursec.seconds\n  end\n\n  def self.copy_into_config src, dst\n    src.each do |k, v|\n      dst.send \"#{k}=\", self.to_OrderedOptions(v)\n    end\n  end\n\n  def self.to_OrderedOptions confs\n    if confs.is_a? Hash\n      opts = ActiveSupport::OrderedOptions.new\n      confs.each do |k,v|\n        opts[k] = self.to_OrderedOptions(v)\n      end\n      opts\n    elsif confs.is_a? Array\n      confs.map { |v| self.to_OrderedOptions v }\n    else\n      confs\n    end\n  end\n\n  def self.load path, erb: false\n    if File.exist? path\n      yaml = IO.read path\n      if erb\n        yaml = ERB.new(yaml).result(binding)\n      end\n      YAML.safe_load(yaml)\n    else\n      {}\n    end\n  end\n\nend\n"
  },
  {
    "path": "services/api/lib/create_ancestor_view.sql",
    "content": "-- Copyright (C) The Arvados Authors. All rights reserved.\n--\n-- SPDX-License-Identifier: AGPL-3.0\n\nCREATE TEMPORARY VIEW ancestor_view AS\nWITH RECURSIVE\nancestor (uuid, ancestor_uuid) AS (\n     SELECT groups.uuid::varchar(32)       AS uuid,\n            groups.owner_uuid::varchar(32) AS ancestor_uuid\n            FROM groups\n     UNION\n     SELECT ancestor.uuid::varchar(32)     AS uuid,\n            groups.owner_uuid::varchar(32) AS ancestor_uuid\n            FROM ancestor\n            INNER JOIN groups\n            ON groups.uuid = ancestor.ancestor_uuid\n)\nSELECT * FROM ancestor;\n"
  },
  {
    "path": "services/api/lib/create_permission_view.sql",
    "content": "-- Copyright (C) The Arvados Authors. All rights reserved.\n--\n-- SPDX-License-Identifier: AGPL-3.0\n\n-- Note: this is not the current code used for permission checks (that is\n-- materialized_permission_view), but is retained here for migration purposes.\n\nCREATE TEMPORARY VIEW permission_view AS\nWITH RECURSIVE\nperm_value (name, val) AS (\n     VALUES\n     ('can_read',   1::smallint),\n     ('can_login',  1),\n     ('can_write',  2),\n     ('can_manage', 3)\n     ),\nperm_edges (tail_uuid, head_uuid, val, follow) AS (\n       SELECT links.tail_uuid,\n              links.head_uuid,\n              pv.val,\n              (pv.val = 3 OR groups.uuid IS NOT NULL) AS follow\n              FROM links\n              LEFT JOIN perm_value pv ON pv.name = links.name\n              LEFT JOIN groups ON pv.val<3 AND groups.uuid = links.head_uuid\n              WHERE links.link_class = 'permission'\n       UNION ALL\n       SELECT owner_uuid, uuid, 3, true FROM groups\n       ),\nperm (val, follow, user_uuid, target_uuid) AS (\n     SELECT 3::smallint             AS val,\n            true                    AS follow,\n            users.uuid::varchar(32) AS user_uuid,\n            users.uuid::varchar(32) AS target_uuid\n            FROM users\n     UNION\n     SELECT LEAST(perm.val, edges.val)::smallint AS val,\n            edges.follow                         AS follow,\n            perm.user_uuid::varchar(32)          AS user_uuid,\n            edges.head_uuid::varchar(32)         AS target_uuid\n            FROM perm\n            INNER JOIN perm_edges edges\n            ON perm.follow AND edges.tail_uuid = perm.target_uuid\n)\nSELECT user_uuid,\n       target_uuid,\n       val AS perm_level,\n       CASE follow WHEN true THEN target_uuid ELSE NULL END AS target_owner_uuid\n       FROM perm;\n"
  },
  {
    "path": "services/api/lib/current_api_client.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule CurrentApiClient\n  def current_user\n    Thread.current[:user]\n  end\n\n  def current_api_client_authorization\n    Thread.current[:api_client_authorization]\n  end\n\n  def current_api_base\n    Thread.current[:api_url_base]\n  end\n\n  # Where is the client connecting from?\n  def current_api_client_ip_address\n    Thread.current[:api_client_ip_address]\n  end\n\n  def system_user_uuid\n    [Rails.configuration.ClusterID,\n     User.uuid_prefix,\n     '000000000000000'].join('-')\n  end\n\n  def system_group_uuid\n    [Rails.configuration.ClusterID,\n     Group.uuid_prefix,\n     '000000000000000'].join('-')\n  end\n\n  def anonymous_group_uuid\n    [Rails.configuration.ClusterID,\n     Group.uuid_prefix,\n     'anonymouspublic'].join('-')\n  end\n\n  def anonymous_user_uuid\n    [Rails.configuration.ClusterID,\n     User.uuid_prefix,\n     'anonymouspublic'].join('-')\n  end\n\n  def public_project_uuid\n    [Rails.configuration.ClusterID,\n     Group.uuid_prefix,\n     'publicfavorites'].join('-')\n  end\n\n  def system_user\n    real_current_user = Thread.current[:user]\n    begin\n      Thread.current[:user] = User.new(is_admin: true,\n                                       is_active: true,\n                                       uuid: system_user_uuid)\n      $system_user = check_cache($system_user) do\n        User.where(uuid: system_user_uuid).\n          first_or_create!(is_active: true,\n                           is_admin: true,\n                           email: 'root',\n                           first_name: 'root',\n                           last_name: '')\n      end\n    ensure\n      Thread.current[:user] = real_current_user\n    end\n  end\n\n  def system_group\n    $system_group = check_cache($system_group) do\n      act_as_system_user do\n        ActiveRecord::Base.transaction do\n          Group.where(uuid: system_group_uuid).\n            first_or_create!(name: \"System group\",\n                             description: \"System group\",\n                             group_class: \"role\") do |g|\n            g.save!\n            User.all.collect(&:uuid).each do |user_uuid|\n              Link.create!(link_class: 'permission',\n                           name: 'can_manage',\n                           tail_uuid: system_group_uuid,\n                           head_uuid: user_uuid)\n            end\n          end\n        end\n      end\n    end\n  end\n\n  def all_users_group_uuid\n    [Rails.configuration.ClusterID,\n     Group.uuid_prefix,\n     'fffffffffffffff'].join('-')\n  end\n\n  def all_users_group\n    $all_users_group = check_cache($all_users_group) do\n      act_as_system_user do\n        ActiveRecord::Base.transaction do\n          Group.where(uuid: all_users_group_uuid).\n            first_or_create!(name: \"All users\",\n                             description: \"All users\",\n                             group_class: \"role\")\n        end\n      end\n    end\n  end\n\n  def act_as_system_user\n    if block_given?\n      act_as_user system_user do\n        yield\n      end\n    else\n      Thread.current[:user] = system_user\n    end\n  end\n\n  def act_as_user user\n    user_was = Thread.current[:user]\n    Thread.current[:user] = user\n    begin\n      yield\n    ensure\n      Thread.current[:user] = user_was\n      if user_was\n        user_was.forget_cached_group_perms\n      end\n    end\n  end\n\n  def anonymous_group\n    $anonymous_group = check_cache($anonymous_group) do\n      act_as_system_user do\n        ActiveRecord::Base.transaction do\n          Group.where(uuid: anonymous_group_uuid).\n            first_or_create!(group_class: \"role\",\n                             name: \"Anonymous users\",\n                             description: \"Anonymous users\")\n        end\n      end\n    end\n  end\n\n  def anonymous_group_read_permission\n    $anonymous_group_read_permission = check_cache($anonymous_group_read_permission) do\n      act_as_system_user do\n        Link.where(tail_uuid: all_users_group.uuid,\n                   head_uuid: anonymous_group.uuid,\n                   link_class: \"permission\",\n                   name: \"can_read\").first_or_create!\n      end\n    end\n  end\n\n  def anonymous_user\n    $anonymous_user = check_cache($anonymous_user) do\n      act_as_system_user do\n        User.where(uuid: anonymous_user_uuid).\n          first_or_create!(is_active: false,\n                           is_admin: false,\n                           email: 'anonymous',\n                           first_name: 'Anonymous',\n                           last_name: '') do |u|\n          u.save!\n          Link.where(tail_uuid: anonymous_user_uuid,\n                     head_uuid: anonymous_group.uuid,\n                     link_class: 'permission',\n                     name: 'can_read').\n            first_or_create!\n        end\n      end\n    end\n  end\n\n  def public_project_group\n    $public_project_group = check_cache($public_project_group) do\n      act_as_system_user do\n        ActiveRecord::Base.transaction do\n          Group.where(uuid: public_project_uuid).\n            first_or_create!(group_class: \"project\",\n                             name: \"Public favorites\",\n                             description: \"Public favorites\")\n        end\n      end\n    end\n  end\n\n  def public_project_read_permission\n    $public_project_group_read_permission = check_cache($public_project_group_read_permission) do\n      act_as_system_user do\n        Link.where(tail_uuid: anonymous_group.uuid,\n                   head_uuid: public_project_group.uuid,\n                   link_class: \"permission\",\n                   name: \"can_read\").first_or_create!\n      end\n    end\n  end\n\n  def empty_collection_pdh\n    'd41d8cd98f00b204e9800998ecf8427e+0'\n  end\n\n  def empty_collection\n    $empty_collection = check_cache($empty_collection) do\n      act_as_system_user do\n        ActiveRecord::Base.transaction do\n          Collection.\n            where(portable_data_hash: empty_collection_pdh).\n            first_or_create(manifest_text: '', owner_uuid: system_user.uuid, name: \"empty collection\") do |c|\n            c.save!\n            Link.where(tail_uuid: anonymous_group.uuid,\n                       head_uuid: c.uuid,\n                       link_class: 'permission',\n                       name: 'can_read').\n                  first_or_create!\n            c\n          end\n        end\n      end\n    end\n  end\n\n  # Purge the module globals if necessary. If the cached value is\n  # non-nil and the globals weren't purged, return the cached\n  # value. Otherwise, call the block.\n  #\n  # Purge is only done in test mode.\n  def check_cache(cached)\n    if Rails.env != 'test'\n      return (cached || yield)\n    end\n    t = Rails.cache.fetch \"CurrentApiClient.$system_globals_reset\" do\n      Time.now.to_f\n    end\n    if t != $system_globals_reset\n      reset_system_globals(t)\n      yield\n    else\n      cached || yield\n    end\n  end\n\n  def reset_system_globals(t)\n    $system_globals_reset = t\n    $system_user = nil\n    $system_group = nil\n    $all_users_group = nil\n    $anonymous_group = nil\n    $anonymous_group_read_permission = nil\n    $anonymous_user = nil\n    $public_project_group = nil\n    $public_project_group_read_permission = nil\n    $empty_collection = nil\n  end\n  module_function :reset_system_globals\nend\n\nCurrentApiClient.reset_system_globals(0)\n"
  },
  {
    "path": "services/api/lib/db_current_time.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule DbCurrentTime\n  CURRENT_TIME_SQL = \"SELECT clock_timestamp() AT TIME ZONE 'UTC'\"\n\n  def db_current_time\n    ActiveRecord::Base.connection.select_value(CURRENT_TIME_SQL)\n  end\n\n  def db_transaction_time\n    ActiveRecord::Base.connection.select_value(\"SELECT current_timestamp AT TIME ZONE 'UTC'\")\n  end\nend\n"
  },
  {
    "path": "services/api/lib/fix_collection_versions_timestamps.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'set'\n\ninclude CurrentApiClient\ninclude ArvadosModelUpdates\n\ndef fix_collection_versions_timestamps\n  act_as_system_user do\n    uuids = [].to_set\n    # Get UUIDs from collections with more than 1 version\n    Collection.where(version: 2).find_each(batch_size: 100) do |c|\n      uuids.add(c.current_version_uuid)\n    end\n    uuids.each do |uuid|\n      first_pair = true\n      # All versions of a particular collection get fixed together.\n      ActiveRecord::Base.transaction do\n        Collection.where(current_version_uuid: uuid).order(version: :desc).each_cons(2) do |c1, c2|\n          # Skip if the 2 newest versions' modified_at values are separate enough;\n          # this means that this collection doesn't require fixing, allowing for\n          # migration re-runs in case of transient problems.\n          break if first_pair && (c2.modified_at.to_f - c1.modified_at.to_f) > 1\n          first_pair = false\n          # Fix modified_at timestamps by assigning to N-1's value to N.\n          # Special case: the first version's modified_at will be == to created_at\n          leave_modified_by_user_alone do\n            leave_modified_at_alone do\n              c1.modified_at = c2.modified_at\n              c1.save!(validate: false)\n              if c2.version == 1\n                c2.modified_at = c2.created_at\n                c2.save!(validate: false)\n              end\n            end\n          end\n        end\n      end\n    end\n  end\nend"
  },
  {
    "path": "services/api/lib/fix_roles_projects.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'update_permissions'\n\ninclude CurrentApiClient\n\ndef fix_roles_projects\n  batch_update_permissions do\n    # This migration is not reversible.  However, the behavior it\n    # enforces is backwards-compatible, and most of the time there\n    # shouldn't be anything to do at all.\n    act_as_system_user do\n      ActiveRecord::Base.transaction do\n        Group.where(\"(group_class != 'project' and group_class != 'filter') or group_class is null\").each do |g|\n          # 1) any group not group_class != project and != filter becomes a 'role' (both empty and invalid groups)\n          old_owner = g.owner_uuid\n          g.owner_uuid = system_user_uuid\n          g.group_class = 'role'\n          g.save_with_unique_name!\n\n          if old_owner != system_user_uuid\n            # 2) Ownership of a role becomes a can_manage link\n            Link.new(link_class: 'permission',\n                         name: 'can_manage',\n                         tail_uuid: old_owner,\n                         head_uuid: g.uuid).\n              save!(validate: false)\n          end\n        end\n\n        ActiveRecord::Base.descendants.reject(&:abstract_class?).each do |klass|\n          next if [ApiClientAuthorization,\n                   AuthorizedKey,\n                   Log,\n                   Group].include?(klass)\n          next if !klass.columns.collect(&:name).include?('owner_uuid')\n\n          # 3) If a role owns anything, give it to system user and it\n          # becomes a can_manage link\n          klass.joins(\"join groups on groups.uuid=#{klass.table_name}.owner_uuid and groups.group_class='role'\").each do |owned|\n            Link.new(link_class: 'permission',\n                     name: 'can_manage',\n                     tail_uuid: owned.owner_uuid,\n                     head_uuid: owned.uuid).\n              save!(validate: false)\n            owned.owner_uuid = system_user_uuid\n            owned.save_with_unique_name!\n          end\n        end\n\n        Group.joins(\"join groups as g2 on g2.uuid=groups.owner_uuid and g2.group_class='role'\").each do |owned|\n          Link.new(link_class: 'permission',\n                       name: 'can_manage',\n                       tail_uuid: owned.owner_uuid,\n                       head_uuid: owned.uuid).\n            save!(validate: false)\n          owned.owner_uuid = system_user_uuid\n          owned.save_with_unique_name!\n        end\n\n        # 4) Projects can't have outgoing permission links.  Just\n        # print a warning and delete them.\n        q = ActiveRecord::Base.connection.exec_query %{\nselect links.uuid from links, groups where groups.uuid = links.tail_uuid and\n       links.link_class = 'permission' and groups.group_class = 'project'\n}\n        q.each do |lu|\n          ln = Link.find_by_uuid(lu['uuid'])\n          puts \"WARNING: Projects cannot have outgoing permission links, removing '#{ln.name}' link #{ln.uuid} from #{ln.tail_uuid} to #{ln.head_uuid}\"\n          Rails.logger.warn \"Projects cannot have outgoing permission links, removing '#{ln.name}' link #{ln.uuid} from #{ln.tail_uuid} to #{ln.head_uuid}\"\n          ln.destroy!\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/lib/group_pdhs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule GroupPdhs\n  # NOTE: Migration 20190322174136_add_file_info_to_collection.rb relies on this function.\n  #\n  # Change with caution!\n  #\n  # Correctly groups pdhs to use for batch database updates. Helps avoid\n  # updating too many database rows in a single transaction.\n  def self.group_pdhs_for_multiple_transactions(distinct_ordered_pdhs, distinct_pdh_count, batch_size_max, log_prefix)\n    batch_size = 0\n    batch_pdhs = {}\n    last_pdh = '0'\n    done = 0\n    any = true\n\n    while any\n      any = false\n      distinct_ordered_pdhs.call(last_pdh) do |pdh|\n        any = true\n        last_pdh = pdh\n        manifest_size = pdh.split('+')[1].to_i\n        if batch_size > 0 && batch_size + manifest_size > batch_size_max\n          yield batch_pdhs.keys\n          done += batch_pdhs.size\n          Rails.logger.info(log_prefix + \": #{done}/#{distinct_pdh_count}\")\n          batch_pdhs = {}\n          batch_size = 0\n        end\n        batch_pdhs[pdh] = true\n        batch_size += manifest_size\n      end\n    end\n    yield batch_pdhs.keys\n    Rails.logger.info(log_prefix + \": finished\")\n  end\nend\n"
  },
  {
    "path": "services/api/lib/has_uuid.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule HasUuid\n\n  UUID_REGEX = /^[0-9a-z]{5}-([0-9a-z]{5})-[0-9a-z]{15}$/\n\n  def self.included(base)\n    base.extend(ClassMethods)\n    base.validate :validate_uuid\n    base.before_create :assign_uuid\n    base.before_destroy :destroy_permission_links\n    base.has_many(:links_via_head,\n                  -> { where(\"not (link_class = 'permission')\") },\n                  class_name: 'Link',\n                  foreign_key: 'head_uuid',\n                  primary_key: 'uuid',\n                  dependent: :destroy)\n    base.has_many(:links_via_tail,\n                  -> { where(\"not (link_class = 'permission')\") },\n                  class_name: 'Link',\n                  foreign_key: 'tail_uuid',\n                  primary_key: 'uuid',\n                  dependent: :destroy)\n  end\n\n  module ClassMethods\n    def uuid_prefix\n      Digest::MD5.hexdigest(self.to_s).to_i(16).to_s(36)[-5..-1]\n    end\n    def generate_uuid\n      [Rails.configuration.ClusterID,\n       self.uuid_prefix,\n       rand(2**256).to_s(36)[-15..-1]].\n        join '-'\n    end\n  end\n\n  protected\n\n  def respond_to_uuid?\n    self.respond_to? :uuid\n  end\n\n  def validate_uuid\n    if self.respond_to_uuid? and self.uuid_changed?\n      if current_user.andand.is_admin and self.uuid.is_a?(String)\n        if (re = self.uuid.match HasUuid::UUID_REGEX)\n          if re[1] == self.class.uuid_prefix\n            return true\n          else\n            self.errors.add(:uuid, \"type field is '#{re[1]}', expected '#{self.class.uuid_prefix}'\")\n            return false\n          end\n        else\n          self.errors.add(:uuid, \"not a valid Arvados uuid '#{self.uuid}'\")\n          return false\n        end\n      else\n        if self.new_record?\n          self.errors.add(:uuid, \"assignment not permitted\")\n        else\n          self.errors.add(:uuid, \"change not permitted\")\n        end\n        return false\n      end\n    else\n      return true\n    end\n  end\n\n  def assign_uuid\n    if self.respond_to_uuid? and self.uuid.nil? or self.uuid.empty?\n      self.uuid = self.class.generate_uuid\n    end\n    true\n  end\n\n  def destroy_permission_links\n    if uuid\n      Link.where(['link_class=? and (head_uuid=? or tail_uuid=?)',\n                  'permission', uuid, uuid]).destroy_all\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/lib/kind_and_etag.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule KindAndEtag\n\n  def self.included(base)\n    base.extend(ClassMethods)\n  end\n\n  module ClassMethods\n    def kind\n      'arvados#' + self.to_s.camelcase(:lower)\n    end\n  end\n\n  def kind\n    self.class.kind\n  end\n\n  def etag attrs=nil\n    Digest::MD5.hexdigest((attrs || self.attributes).inspect).to_i(16).to_s(36)\n  end\nend\n"
  },
  {
    "path": "services/api/lib/load_param.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Mixin module for reading out query parameters from request params.\n#\n# Expects:\n#   +params+ Hash\n# Sets:\n#   @where, @filters, @limit, @offset, @orders\nmodule LoadParam\n\n  # Default number of rows to return in a single query.\n  DEFAULT_LIMIT = 100\n\n  # Load params[:where] into @where\n  def load_where_param\n    if params[:where].nil? or params[:where] == \"\"\n      @where = {}\n    elsif [Hash, ActionController::Parameters].include? params[:where].class\n      @where = params[:where]\n    elsif params[:where].is_a? String\n      begin\n        @where = SafeJSON.load(params[:where])\n        raise unless @where.is_a? Hash\n      rescue\n        raise ArgumentError.new(\"Could not parse \\\"where\\\" param as an object\")\n      end\n    end\n    @where = @where.with_indifferent_access\n  end\n\n  # Load params[:filters] into @filters\n  def load_filters_param\n    @filters ||= []\n    if params[:filters].is_a? Array\n      @filters += params[:filters]\n    elsif params[:filters].is_a? String and !params[:filters].empty?\n      begin\n        f = SafeJSON.load(params[:filters])\n        if not f.nil?\n          raise unless f.is_a? Array\n          @filters += f\n        end\n      rescue\n        raise ArgumentError.new(\"Could not parse \\\"filters\\\" param as an array\")\n      end\n    end\n  end\n\n  # Load params[:limit], params[:offset] and params[:order]\n  # into @limit, @offset, @orders.\n  #\n  # If fill_table_names is true, @orders will be populated with fully\n  # qualified columns (table_name.column_name).  Otherwise, column\n  # names might be ambiguous and the caller should call\n  # optimize_orders(@orders) to fix them.\n  def load_limit_offset_order_params(fill_table_names: true)\n    if params[:limit]\n      unless params[:limit].to_s.match(/^\\d+$/)\n        raise ArgumentError.new(\"Invalid value for limit parameter\")\n      end\n      @limit = [params[:limit].to_i,\n                Rails.configuration.API.MaxItemsPerResponse].min\n    else\n      @limit = DEFAULT_LIMIT\n    end\n\n    if params[:offset]\n      unless params[:offset].to_s.match(/^\\d+$/)\n        raise ArgumentError.new(\"Invalid value for offset parameter\")\n      end\n      @offset = params[:offset].to_i\n    else\n      @offset = 0\n    end\n\n    @orders = []\n    if (params[:order].is_a?(Array) && !params[:order].empty?) || !params[:order].blank?\n      od = []\n      (case params[:order]\n       when String\n         if params[:order].starts_with? '['\n           od = SafeJSON.load(params[:order])\n           raise unless od.is_a? Array\n           od\n         else\n           params[:order].split(',')\n         end\n       when Array\n         params[:order]\n       else\n         []\n       end).each do |order|\n        order = order.to_s\n        attr, direction = order.strip.split \" \"\n        direction ||= 'asc'\n        # The attr can have its table unspecified if it happens to be for the current \"model_class\" (the first case)\n        # or it can be fully specified with the database tablename (the second case) (e.g. \"collections.name\").\n        # NB that the security check for the second case table_name will not work if the model\n        # has used table_name= to use an alternate table name from the Rails standard.\n        # I could not find a perfect way to handle this well, but ActiveRecord::Base.send(:descendants)\n        # would be a place to start if this ever becomes necessary.\n        if (attr.match(/^[a-z][_a-z0-9]+$/) &&\n            model_class.columns.collect(&:name).index(attr) &&\n            ['asc','desc'].index(direction.downcase))\n          if fill_table_names\n            @orders << \"#{model_class.table_name}.#{attr} #{direction.downcase}\"\n          else\n            @orders << \"#{attr} #{direction.downcase}\"\n          end\n        elsif attr.match(/^([a-z][_a-z0-9]+)\\.([a-z][_a-z0-9]+)$/) and\n            ['asc','desc'].index(direction.downcase) and\n            ActiveRecord::Base.connection.tables.include?($1) and\n            $1.classify.constantize.columns.collect(&:name).index($2)\n          # $1 in the above checks references the first match from the regular expression, which is expected to be the database table name\n          # $2 is of course the actual database column name\n          @orders << \"#{attr} #{direction.downcase}\"\n        end\n      end\n    end\n\n    if fill_table_names\n      @orders = optimize_orders(@orders, model_class: model_class)\n    end\n\n    @distinct = params[:distinct] && true\n  end\n\n  # If the client-specified orders don't amount to a full ordering\n  # (e.g., [] or ['owner_uuid desc']), fall back on the default\n  # orders to ensure repeating the same request (possibly with\n  # different limit/offset) will return records in the same order.\n  #\n  # Clean up the resulting list of orders such that no column\n  # uselessly appears twice (Postgres might not optimize this out\n  # for us) and no columns uselessly appear after a unique column\n  # (Postgres does not optimize this out for us; as of 9.2, \"order\n  # by id, modified_at desc, uuid\" is slow but \"order by id\" is\n  # fast).\n  def optimize_orders(orders_given, model_class:)\n    orders_given_and_default = orders_given + model_class.default_orders\n    order_cols_used = {}\n    optimized = []\n    orders_given_and_default.each do |order|\n      otablecol = order.split(' ')[0]\n\n      next if order_cols_used[otablecol]\n      order_cols_used[otablecol] = true\n\n      optimized << order\n\n      if otablecol.index('.')\n        otable, ocol = otablecol.split('.')\n      else\n        otable, ocol = model_class.table_name, otablecol\n      end\n      if otable == model_class.table_name && model_class.unique_columns.include?(ocol)\n        # we already have a full ordering; subsequent entries would be\n        # superfluous\n        break\n      end\n    end\n    return optimized\n  end\n\n  def load_select_param\n    case params[:select]\n    when Array\n      @select = params[:select]\n    when String\n      begin\n        @select = SafeJSON.load(params[:select])\n        raise unless @select.is_a? Array or @select.nil? or !@select\n      rescue\n        raise ArgumentError.new(\"Could not parse \\\"select\\\" param as an array\")\n      end\n    end\n\n    if @select\n      # The modified_by_client_uuid field is no longer offered. For\n      # the sake of compatibility with workbench2, ignore it when a\n      # client asks for it explicitly (rather than returning an\n      # \"invalid field\" error).\n      @select -= ['modified_by_client_uuid']\n    end\n\n    if @select && @orders\n      # Any ordering columns must be selected when doing select,\n      # otherwise it is an SQL error, so filter out invaliding orderings.\n      @orders.select! { |o|\n        col, _ = o.split\n        # match select column against order array entry\n        @select.select { |s| col == \"#{table_name}.#{s}\" }.any?\n      }\n    end\n  end\n\nend\n"
  },
  {
    "path": "services/api/lib/log_reuse_info.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule LogReuseInfo\n  # log_reuse_info logs whatever the given block returns, if\n  # log_reuse_decisions is enabled. It accepts a block instead of a\n  # string because in some cases constructing the strings involves\n  # doing expensive things like database queries, and we want to skip\n  # those when logging is disabled.\n  def log_reuse_info(candidates=nil)\n    if Rails.configuration.Containers.LogReuseDecisions\n      msg = yield\n      if !candidates.nil?\n        msg = \"have #{candidates.count} candidates \" + msg\n      end\n      Rails.logger.info(\"find_reusable: \" + msg)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/lib/migrate_yaml_to_json.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule MigrateYAMLToJSON\n  def self.migrate(table, column)\n    conn = ActiveRecord::Base.connection\n    n = conn.update(\n      \"UPDATE #{table} SET #{column}=$1 WHERE #{column}=$2\",\n      \"#{table}.#{column} convert YAML to JSON\",\n      [\"{}\", \"--- {}\\n\"])\n    Rails.logger.info(\"#{table}.#{column}: #{n} rows updated using empty hash\")\n    finished = false\n    while !finished\n      n = 0\n      conn.exec_query(\n        \"SELECT id, #{column} FROM #{table} WHERE #{column} LIKE $1 LIMIT 100\",\n        \"#{table}.#{column} check for YAML\",\n        ['---%'],\n      ).rows.map do |id, yaml|\n        n += 1\n        json = SafeJSON.dump(YAML.safe_load(yaml))\n        conn.exec_query(\n          \"UPDATE #{table} SET #{column}=$1 WHERE id=$2 AND #{column}=$3\",\n          \"#{table}.#{column} convert YAML to JSON\",\n          [json, id, yaml])\n      end\n      Rails.logger.info(\"#{table}.#{column}: #{n} rows updated\")\n      finished = (n == 0)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/lib/record_filters.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Mixin module providing a method to convert filters into a list of SQL\n# fragments suitable to be fed to ActiveRecord #where.\n#\n# Expects:\n#   model_class\n# Operates on:\n#   @objects\n\nrequire 'safe_json'\n\nmodule RecordFilters\n\n  # Input:\n  # +filters+        array of conditions, each being [column, operator, operand]\n  # +model_class+    subclass of ActiveRecord being filtered\n  #\n  # Output:\n  # Hash with the following keys:\n  # :cond_out  array of SQL fragments for each filter expression\n  # :param_out array of values for parameter substitution in cond_out\n  # :joins     array of joins: either [] or [\"JOIN containers ON ...\"]\n  def record_filters filters, model_class\n    conds_out = []\n    param_out = []\n    joins = []\n\n    model_table_name = model_class.table_name\n    filters.each do |filter|\n      attrs_in, operator, operand = filter\n      if operator == '@@'\n        raise ArgumentError.new(\"Full text search operator is no longer supported\")\n      end\n      if attrs_in == 'any'\n        attrs = model_class.any_searchable_columns(operator)\n      elsif attrs_in.is_a? Array\n        attrs = attrs_in\n      else\n        attrs = [attrs_in]\n      end\n      if !filter.is_a? Array\n        raise ArgumentError.new(\"Invalid element in filters array: #{filter.inspect} is not an array\")\n      elsif !operator.is_a? String\n        raise ArgumentError.new(\"Invalid operator '#{operator}' (#{operator.class}) in filter\")\n      end\n\n      operator = operator.downcase\n      cond_out = []\n\n      if attrs_in == 'any' && (operator == 'ilike' || operator == 'like') && (operand.is_a? String) && operand.match('^[%].*[%]$')\n        # Trigram index search\n        cond_out << model_class.full_text_trgm + \" #{operator} ?\"\n        param_out << operand\n        # Skip the generic per-column operator loop below\n        attrs = []\n      end\n\n      attrs.each do |attr|\n        subproperty = attr.split(\".\", 2)\n\n        if subproperty.length == 2 && subproperty[0] == 'container' && model_table_name == \"container_requests\"\n          # attr is \"tablename.colname\" -- e.g., [\"container.state\", \"=\", \"Complete\"]\n          joins = [\"JOIN containers ON container_requests.container_uuid = containers.uuid\"]\n          attr_model_class = Container\n          attr_table_name = \"containers\"\n          subproperty = subproperty[1].split(\".\", 2)\n        elsif subproperty.length == 2 && subproperty[0] == 'collection' && model_table_name == \"workflows\"\n          # attr is \"tablename.colname\" -- e.g., [\"collection.properties.category\", \"=\", \"WGS\"]\n          joins = [\"JOIN collections ON workflows.collection_uuid = collections.uuid\"]\n          attr_model_class = Collection\n          attr_table_name = \"collections\"\n          subproperty = subproperty[1].split(\".\", 2)\n        else\n          attr_model_class = model_class\n          attr_table_name = model_table_name\n        end\n\n        attr = subproperty[0]\n        proppath = subproperty[1]\n        col = attr_model_class.columns.select { |c| c.name == attr }.first\n\n        if proppath\n          if col.nil? or col.type != :jsonb\n            raise ArgumentError.new(\"Invalid attribute '#{attr}' for subproperty filter\")\n          end\n\n          if proppath[0] == \"<\" and proppath[-1] == \">\"\n            proppath = proppath[1..-2]\n          end\n\n          # jsonb search\n          case operator\n          when '=', '!='\n            not_in = if operator == \"!=\" then \"NOT \" else \"\" end\n            cond_out << \"#{not_in}(#{attr_table_name}.#{attr} @> ?::jsonb)\"\n            param_out << SafeJSON.dump({proppath => operand})\n          when 'in'\n            if operand.is_a? Array\n              operand.each do |opr|\n                cond_out << \"#{attr_table_name}.#{attr} @> ?::jsonb\"\n                param_out << SafeJSON.dump({proppath => opr})\n              end\n            else\n              raise ArgumentError.new(\"Invalid operand type '#{operand.class}' \"\\\n                                      \"for '#{operator}' operator in filters\")\n            end\n          when '<', '<=', '>', '>='\n            cond_out << \"#{attr_table_name}.#{attr}->? #{operator} ?::jsonb\"\n            param_out << proppath\n            param_out << SafeJSON.dump(operand)\n          when 'like', 'ilike'\n            cond_out << \"#{attr_table_name}.#{attr}->>? #{operator} ?\"\n            param_out << proppath\n            param_out << operand\n          when 'not in'\n            if operand.is_a? Array\n              cond_out << \"#{attr_table_name}.#{attr}->>? NOT IN (?) OR #{attr_table_name}.#{attr}->>? IS NULL\"\n              param_out << proppath\n              param_out << operand\n              param_out << proppath\n            else\n              raise ArgumentError.new(\"Invalid operand type '#{operand.class}' \"\\\n                                      \"for '#{operator}' operator in filters\")\n            end\n          when 'exists'\n            if operand == true\n              cond_out << \"jsonb_exists_inline_op(#{attr_table_name}.#{attr}, ?)\"\n            elsif operand == false\n              cond_out << \"(NOT jsonb_exists_inline_op(#{attr_table_name}.#{attr}, ?)) OR #{attr_table_name}.#{attr} is NULL\"\n            else\n              raise ArgumentError.new(\"Invalid operand '#{operand}' for '#{operator}' must be true or false\")\n            end\n            param_out << proppath\n          when 'contains'\n            cond_out << \"#{attr_table_name}.#{attr} @> ?::jsonb OR #{attr_table_name}.#{attr} @> ?::jsonb\"\n            param_out << SafeJSON.dump({proppath => operand})\n            param_out << SafeJSON.dump({proppath => [operand]})\n          else\n            raise ArgumentError.new(\"Invalid operator for subproperty search '#{operator}'\")\n          end\n        elsif operator == \"exists\"\n          if col.nil? or col.type != :jsonb\n            raise ArgumentError.new(\"Invalid attribute '#{attr}' for operator '#{operator}' in filter\")\n          end\n\n          cond_out << \"jsonb_exists_inline_op(#{attr_table_name}.#{attr}, ?)\"\n          param_out << operand\n        elsif expr = /^ *\\( *(\\w+) *(<=?|>=?|=) *(\\w+) *\\) *$/.match(attr)\n          if operator != '=' || ![true,\"true\"].index(operand)\n            raise ArgumentError.new(\"Invalid expression filter '#{attr}': subsequent elements must be [\\\"=\\\", true]\")\n          end\n          operator = expr[2]\n          attr1, attr2 = expr[1], expr[3]\n          allowed = attr_model_class.searchable_columns(operator)\n          [attr1, attr2].each do |tok|\n            if !allowed.index(tok)\n              raise ArgumentError.new(\"Invalid attribute in expression: '#{tok}'\")\n            end\n            col = attr_model_class.columns.select { |c| c.name == tok }.first\n            if col.type != :integer\n              raise ArgumentError.new(\"Non-numeric attribute in expression: '#{tok}'\")\n            end\n          end\n          cond_out << \"#{attr1} #{operator} #{attr2}\"\n        else\n          if !attr_model_class.searchable_columns(operator).index(attr) &&\n             !(col.andand.type == :jsonb && ['contains', '=', '<>', '!='].index(operator))\n            raise ArgumentError.new(\"Invalid attribute '#{attr}' in filter\")\n          end\n          attr_type = attr_model_class.attribute_column(attr).type\n\n          case operator\n          when '=', '<', '<=', '>', '>=', '!=', 'like', 'ilike'\n            operator = '<>' if operator == '!='\n            if operand.is_a? String\n              if attr_type == :boolean\n                if not ['=', '<>'].include?(operator)\n                  raise ArgumentError.new(\"Invalid operator '#{operator}' for \" \\\n                                          \"boolean attribute '#{attr}'\")\n                end\n                case operand.downcase\n                when '1', 't', 'true', 'y', 'yes'\n                  operand = true\n                when '0', 'f', 'false', 'n', 'no'\n                  operand = false\n                else\n                  raise ArgumentError.new(\"Invalid operand '#{operand}' for \" \\\n                                          \"boolean attribute '#{attr}'\")\n                end\n              end\n              if operator == '<>'\n                # explicitly allow NULL\n                cond_out << \"#{attr_table_name}.#{attr} #{operator} ? OR #{attr_table_name}.#{attr} IS NULL\"\n              else\n                cond_out << \"#{attr_table_name}.#{attr} #{operator} ?\"\n              end\n              if (# any operator that operates on value rather than\n                # representation:\n                operator.match(/[<=>]/) and (attr_type == :datetime))\n                operand = Time.parse operand\n              end\n              param_out << operand\n            elsif operand.nil? and operator == '='\n              cond_out << \"#{attr_table_name}.#{attr} is null\"\n            elsif operand.nil? and operator == '<>'\n              cond_out << \"#{attr_table_name}.#{attr} is not null\"\n            elsif (attr_type == :boolean) and ['=', '<>'].include?(operator) and\n                 [true, false].include?(operand)\n              cond_out << \"#{attr_table_name}.#{attr} #{operator} ?\"\n              param_out << operand\n            elsif (attr_type == :integer)\n              if !operand.is_a?(Integer) || operand.bit_length > 64\n                raise ArgumentError.new(\"Invalid operand '#{operand}' \"\\\n                                        \"for integer attribute '#{attr}'\")\n              end\n              cond_out << \"#{attr_table_name}.#{attr} #{operator} ?\"\n              param_out << operand\n            else\n              raise ArgumentError.new(\"Invalid operand type '#{operand.class}' \"\\\n                                      \"for '#{operator}' operator in filters\")\n            end\n          when 'in', 'not in'\n            if !operand.is_a? Array\n              raise ArgumentError.new(\"Invalid operand type '#{operand.class}' \"\\\n                                      \"for '#{operator}' operator in filters\")\n            end\n            if attr_type == :integer\n              operand.each do |el|\n                if !el.is_a?(Integer) || el.bit_length > 64\n                  raise ArgumentError.new(\"Invalid element '#{el}' in array \"\\\n                                          \"for integer attribute '#{attr}'\")\n                end\n              end\n            end\n            cond_out << \"#{attr_table_name}.#{attr} #{operator} (?)\"\n            param_out << operand\n            if operator == 'not in' and not operand.include?(nil)\n              # explicitly allow NULL\n              cond_out[-1] = \"(#{cond_out[-1]} OR #{attr_table_name}.#{attr} IS NULL)\"\n            end\n          when 'is_a'\n            operand = [operand] unless operand.is_a? Array\n            cond = []\n            operand.each do |op|\n              cl = ArvadosModel::kind_class op\n              if cl\n                if attr == 'uuid'\n                  if attr_model_class.uuid_prefix == cl.uuid_prefix\n                    cond << \"1=1\"\n                  else\n                    cond << \"1=0\"\n                  end\n                else\n                  # Use a substring query to support remote uuids\n                  cond << \"substring(#{attr_table_name}.#{attr}, 7, 5) = ?\"\n                  param_out << cl.uuid_prefix\n                end\n              else\n                cond << \"1=0\"\n              end\n            end\n            cond_out << cond.join(' OR ')\n          when 'contains'\n            if col.andand.type != :jsonb\n              raise ArgumentError.new(\"Invalid attribute '#{attr}' for '#{operator}' operator\")\n            end\n            if operand == []\n              raise ArgumentError.new(\"Invalid operand '#{operand.inspect}' for '#{operator}' operator\")\n            end\n            operand = [operand] unless operand.is_a? Array\n            operand.each do |op|\n              if !op.is_a?(String)\n                raise ArgumentError.new(\"Invalid element #{operand.inspect} in operand for #{operator.inspect} operator (operand must be a string or array of strings)\")\n              end\n            end\n            # We use jsonb_exists_all_inline_op(a,b) instead of \"a ?&\n            # b\" because the pg gem thinks \"?\" is a bind var.\n            #\n            # See note in migration\n            # 20230815160000_jsonb_exists_functions about _inline_op\n            # functions.\n            #\n            # We use string interpolation instead of param_out\n            # because the pg gem flattens param_out / doesn't support\n            # passing arrays as bind vars.\n            q = operand.map { |s| ActiveRecord::Base.connection.quote(s) }.join(',')\n            cond_out << \"jsonb_exists_all_inline_op(#{attr_table_name}.#{attr}, array[#{q}])\"\n          else\n            raise ArgumentError.new(\"Invalid operator '#{operator}'\")\n          end\n        end\n      end\n      conds_out << cond_out.join(' OR ') if cond_out.any?\n    end\n\n    {:cond_out => conds_out, :param_out => param_out, :joins => joins}\n  end\n\n  def apply_filters query, filters\n    ft = record_filters filters, self\n    if not ft[:cond_out].any?\n      return query\n    end\n    ft[:joins].each do |t|\n      query = query.joins(t)\n    end\n    query.where('(' + ft[:cond_out].join(') AND (') + ')',\n                          *ft[:param_out])\n  end\n\n  def attribute_column attr\n    self.columns.select { |col| col.name == attr.to_s }.first\n  end\nend\n"
  },
  {
    "path": "services/api/lib/request_error.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass RequestError < StandardError\nend\n"
  },
  {
    "path": "services/api/lib/safe_json.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass SafeJSON\n  def self.dump(o)\n    return Oj.dump(o, mode: :compat)\n  end\n  def self.load(s)\n    if s.nil? or s == ''\n      # Oj 2.18.5 used to return nil. Not anymore on 3.6.4.\n      # Upgraded for performance issues (see #13803 and\n      # https://github.com/ohler55/oj/issues/441)\n      return nil\n    end\n    Oj.strict_load(s, symbol_keys: false)\n  end\nend\n"
  },
  {
    "path": "services/api/lib/safer_file_store.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass SaferFileStore < ActiveSupport::Cache::FileStore\n  private\n  def delete_empty_directories(dir)\n    # It is not safe to delete an empty directory. Another thread or\n    # process might be in write_entry(), having just created an empty\n    # directory via ensure_cache_path(). If we delete that empty\n    # directory, the other thread/process will crash in\n    # File.atomic_write():\n    #\n    # #<Errno::ENOENT: No such file or directory @ rb_sysopen - /.../tmp/cache/94F/070/.permissions_check.13730420.54542.801783>\n  end\nend\n"
  },
  {
    "path": "services/api/lib/salvage_collection.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule SalvageCollection\n  # Take two input parameters: a collection uuid and reason\n  # Get \"src_collection\" with the given uuid\n  # Create a new collection with:\n  #   src_collection.manifest_text as \"invalid_manifest_text.txt\"\n  #   Locators from src_collection.manifest_text as \"salvaged_data\"\n  # Update src_collection:\n  #   Set src_collection.manifest_text to: \"\"\n  #   Append to src_collection.name: \" (reason; salvaged data at new_collection.uuid)\"\n  #   Set portable_data_hash to \"d41d8cd98f00b204e9800998ecf8427e+0\"\n\n  require File.dirname(__FILE__) + '/../config/environment'\n  include ApplicationHelper\n  require 'tempfile'\n  require 'shellwords'\n\n  def salvage_collection_arv_put cmd\n    new_manifest = %x(#{cmd})\n    if $?.success?\n      new_manifest\n    else\n      raise \"Error during arv-put: #{$?} (cmd was #{cmd.inspect})\"\n    end\n  end\n\n  # Get all the locators (and perhaps other strings that look a lot\n  # like a locators) from the original manifest, even if they don't\n  # appear in the correct positions with the correct space delimiters.\n  def salvage_collection_locator_data manifest\n    locators = []\n    size = 0\n    manifest.scan(/(^|[^[:xdigit:]])([[:xdigit:]]{32})((\\+\\d+)(\\+|\\b))?/) do |_, hash, _, sizehint, _|\n      if sizehint\n        locators << hash.downcase + sizehint\n        size += sizehint.to_i\n      else\n        locators << hash.downcase\n      end\n    end\n    locators << 'd41d8cd98f00b204e9800998ecf8427e+0' if !locators.any?\n    return [locators, size]\n  end\n\n  def salvage_collection uuid, reason='salvaged - see #6277, #6859'\n    act_as_system_user do\n      if !ENV['ARVADOS_API_TOKEN'].present? or !ENV['ARVADOS_API_HOST'].present?\n        raise \"ARVADOS environment variables missing. Please set your admin user credentials as ARVADOS environment variables.\"\n      end\n\n      if !uuid.present?\n        raise \"Collection UUID is required.\"\n      end\n\n      src_collection = Collection.find_by_uuid uuid\n      if !src_collection\n        raise \"No collection found for #{uuid}.\"\n      end\n\n      src_manifest = src_collection.manifest_text || ''\n\n      # create new collection using 'arv-put' with original manifest_text as the data\n      temp_file = Tempfile.new('temp')\n      temp_file.write(src_manifest)\n\n      temp_file.close\n\n      new_manifest = salvage_collection_arv_put \"arv-put --as-stream --use-filename invalid_manifest_text.txt #{Shellwords::shellescape(temp_file.path)}\"\n\n      temp_file.unlink\n\n      # Get the locator data in the format [[locators], size] from the original manifest\n      locator_data = salvage_collection_locator_data src_manifest\n\n      new_manifest += (\". #{locator_data[0].join(' ')} 0:#{locator_data[1]}:salvaged_data\\n\")\n\n      new_collection = Collection.new\n      new_collection.name = \"salvaged from #{src_collection.uuid}, #{src_collection.portable_data_hash}\"\n      new_collection.manifest_text = new_manifest\n\n      created = new_collection.save!\n      raise \"New collection creation failed.\" if !created\n\n      $stderr.puts \"Salvaged manifest and data for #{uuid} are in #{new_collection.uuid}.\"\n      puts \"Created new collection #{new_collection.uuid}\"\n\n      # update src_collection collection name, pdh, and manifest_text\n      src_collection.name = (src_collection.name || '') + ' (' + (reason || '') + '; salvaged data at ' + new_collection.uuid + ')'\n      src_collection.manifest_text = ''\n      src_collection.portable_data_hash = 'd41d8cd98f00b204e9800998ecf8427e+0'\n      src_collection.save!\n      $stderr.puts \"Collection #{uuid} emptied and renamed to #{src_collection.name.inspect}.\"\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/lib/serializers.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'safe_json'\n\nclass Serializer\n  def self.dump(val)\n    SafeJSON.dump(val)\n  end\n\n  def self.legacy_load(s)\n    val = Psych.safe_load(s, permitted_classes: [Time])\n    if val.is_a? String\n      # If apiserver was downgraded to a YAML-only version after\n      # storing JSON in the database, the old code would have loaded\n      # the JSON document as a plain string, and then YAML-encoded\n      # it when saving it back to the database. It's too late now to\n      # make the old code behave better, but at least we can\n      # gracefully handle the mess it leaves in the database by\n      # double-decoding on the way out.\n      return SafeJSON.load(val)\n    else\n      return val\n    end\n  end\n\n  def self.load(s)\n    if s.is_a?(object_class)\n      # Rails already deserialized for us\n      s\n    elsif s.nil?\n      object_class.new()\n    elsif s[0..2] == \"---\"\n      legacy_load(s)\n    else\n      SafeJSON.load(s)\n    end\n  end\nend\n\nclass HashSerializer < Serializer\n  def self.object_class\n    ::Hash\n  end\nend\n\nclass ArraySerializer < Serializer\n  def self.object_class\n    ::Array\n  end\nend\n"
  },
  {
    "path": "services/api/lib/tasks/.gitkeep",
    "content": ""
  },
  {
    "path": "services/api/lib/tasks/config.rake",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ndef diff_hash base, final\n  diffed = {}\n  base.each do |k,v|\n    bk = base[k]\n    fk = final[k]\n    if bk.is_a? Hash\n      d = diff_hash bk, fk\n      if d.length > 0\n        diffed[k] = d\n      end\n    else\n      if bk.to_yaml != fk.to_yaml\n        diffed[k] = fk\n      end\n    end\n  end\n  diffed\nend\n\nnamespace :config do\n  desc 'Print items that differ between legacy application.yml and system config.yml'\n  task diff: :environment do\n    diffed = diff_hash $arvados_config_global, $arvados_config\n    cfg = { \"Clusters\" => {}}\n    cfg[\"Clusters\"][$arvados_config[\"ClusterID\"]] = diffed.select {|k,v| k != \"ClusterID\"}\n    if cfg[\"Clusters\"][$arvados_config[\"ClusterID\"]].empty?\n      puts \"No migrations required for /etc/arvados/config.yml\"\n    else\n      puts cfg.to_yaml\n    end\n  end\n\n  desc 'Print config.yml after merging with legacy application.yml'\n  task migrate: :environment do\n    diffed = diff_hash $arvados_config_defaults, $arvados_config\n    cfg = { \"Clusters\" => {}}\n    cfg[\"Clusters\"][$arvados_config[\"ClusterID\"]] = diffed.select {|k,v| k != \"ClusterID\"}\n    puts cfg.to_yaml\n  end\n\n  desc 'Print configuration as accessed through Rails.configuration'\n  task dump: :environment do\n    combined = $arvados_config.deep_dup\n    combined.update $remaining_config\n    puts combined.to_yaml\n  end\n\n  desc 'Legacy config check task -- it is a noop now'\n  task check: :environment do\n    # This exists so that build/rails-package-scripts/postinst.sh doesn't fail.\n  end\nend\n"
  },
  {
    "path": "services/api/lib/tasks/delete_old_container_logs.rake",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# This task finds containers that have been finished for at least as long as\n# the duration specified in the `clean_container_log_rows_after` config setting,\n# and deletes their stdout, stderr, arv-mount, crunch-run, and  crunchstat logs\n# from the logs table.\n\nnamespace :db do\n  desc \"deprecated / no-op\"\n\n  task delete_old_container_logs: :environment do\n    Rails.logger.info \"this db:delete_old_container_logs rake task is no longer used\"\n  end\nend\n"
  },
  {
    "path": "services/api/lib/tasks/manage_long_lived_tokens.rake",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Tasks that can be useful when changing token expiration policies by assigning\n# a non-zero value to Login.TokenLifetime config.\n\nrequire 'set'\nrequire 'current_api_client'\n\nnamespace :db do\n  desc \"Apply expiration policy on long lived tokens\"\n  task fix_long_lived_tokens: :environment do\n    lifetime = Rails.configuration.API.MaxTokenLifetime\n    if lifetime.nil? or lifetime == 0\n      lifetime = Rails.configuration.Login.TokenLifetime\n    end\n    if lifetime.nil? or lifetime == 0\n      puts(\"No expiration policy set (API.MaxTokenLifetime nor Login.TokenLifetime is set), nothing to do.\")\n      # abort the rake task\n      next\n    end\n    exp_date = Time.now + lifetime\n    puts(\"Setting token expiration to: #{exp_date}\")\n    token_count = 0\n    ll_tokens(lifetime).each do |auth|\n      if auth.user.nil?\n        printf(\"*** WARNING, found ApiClientAuthorization with invalid user: auth id: %d, user id: %d\\n\", auth.id, auth.user_id)\n        # skip this token\n        next\n      end\n      if (auth.user.uuid =~ /-tpzed-000000000000000/).nil? and (auth.user.uuid =~ /-tpzed-anonymouspublic/).nil?\n        CurrentApiClientHelper.act_as_system_user do\n          auth.update!(expires_at: exp_date)\n        end\n        token_count += 1\n      end\n    end\n    puts(\"#{token_count} tokens updated.\")\n  end\n\n  desc \"Show users with long lived tokens\"\n  task check_long_lived_tokens: :environment do\n    lifetime = Rails.configuration.API.MaxTokenLifetime\n    if lifetime.nil? or lifetime == 0\n      lifetime = Rails.configuration.Login.TokenLifetime\n    end\n    if lifetime.nil? or lifetime == 0\n      puts(\"No expiration policy set (API.MaxTokenLifetime nor Login.TokenLifetime is set), nothing to do.\")\n      # abort the rake task\n      next\n    end\n    user_ids = Set.new()\n    token_count = 0\n    ll_tokens(lifetime).each do |auth|\n      if auth.user.nil?\n        printf(\"*** WARNING, found ApiClientAuthorization with invalid user: auth id: %d, user id: %d\\n\", auth.id, auth.user_id)\n        # skip this token\n        next\n      end\n      if not auth.user.nil? and (auth.user.uuid =~ /-tpzed-000000000000000/).nil? and (auth.user.uuid =~ /-tpzed-anonymouspublic/).nil?\n        user_ids.add(auth.user_id)\n        token_count += 1\n      end\n    end\n\n    if user_ids.size > 0\n      puts(\"Found #{token_count} long-lived tokens from users:\")\n      user_ids.each do |uid|\n        u = User.find(uid)\n        puts(\"#{u.username},#{u.email},#{u.uuid}\") if !u.nil?\n      end\n    else\n      puts(\"No long-lived tokens found.\")\n    end\n  end\n\n  def ll_tokens(lifetime)\n    query = ApiClientAuthorization.where(expires_at: nil)\n    query = query.or(ApiClientAuthorization.where(\"expires_at > ?\", Time.now + lifetime))\n    query\n  end\nend\n"
  },
  {
    "path": "services/api/lib/tasks/statement_timeout.rake",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nRake::Task.tasks.each do |task|\n  if task.name =~ /^(db:migrate(:.*)?|db:rollback)$/\n    task.enhance([\"db:disable_timeout\"])\n  end\nend\n\nnamespace :db do\n  desc 'disable postgresql statement_timeout and lock_timeout before running migrations'\n  task disable_timeout: :environment do\n    ActiveRecord::ConnectionAdapters::AbstractAdapter.set_callback :checkout, :before, ->(conn) do\n      # override the default timeouts set by\n      # config/initializers/db_timeout.rb\n      conn.execute \"SET statement_timeout = 0\"\n      conn.execute \"SET lock_timeout = 0\"\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/lib/tasks/test_tasks.rake",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nnamespace :test do\n  new_task = Rake::TestTask.new(tasks: \"test:prepare\") do |t|\n    t.libs << \"test\"\n    t.pattern = \"test/tasks/**/*_test.rb\"\n  end\nend\n"
  },
  {
    "path": "services/api/lib/trashable.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule Trashable\n  def self.included(base)\n    base.before_validation :set_validation_timestamp\n    base.before_validation :ensure_trash_at_not_in_past\n    base.before_validation :sync_trash_state\n    base.before_validation :default_trash_interval\n    base.validate :validate_trash_and_delete_timing\n  end\n\n  # Use a single timestamp for all validations, even though each\n  # validation runs at a different time.\n  def set_validation_timestamp\n    @validation_timestamp = db_current_time\n  end\n\n  # If trash_at is being changed to a time in the past, change it to\n  # now. This allows clients to say \"expires {client-current-time}\"\n  # without failing due to clock skew, while avoiding odd log entries\n  # like \"expiry date changed to {1 year ago}\".\n  def ensure_trash_at_not_in_past\n    if trash_at_changed? && trash_at\n      self.trash_at = [@validation_timestamp, trash_at].max\n    end\n  end\n\n  # Caller can move into/out of trash by setting/clearing is_trashed\n  # -- however, if the caller also changes trash_at, then any changes\n  # to is_trashed are ignored.\n  def sync_trash_state\n    if is_trashed_changed? && !trash_at_changed?\n      if is_trashed\n        self.trash_at = @validation_timestamp\n      else\n        self.trash_at = nil\n        self.delete_at = nil\n      end\n    end\n    self.is_trashed = trash_at && trash_at <= @validation_timestamp || false\n    true\n  end\n\n  def default_delete_after_trash_interval\n    Rails.configuration.Collections.DefaultTrashLifetime\n  end\n\n  def minimum_delete_after_trash_interval\n    Rails.configuration.Collections.BlobSigningTTL\n  end\n\n  def default_trash_interval\n    if trash_at_changed? && !delete_at_changed?\n      # If trash_at is updated without touching delete_at,\n      # automatically update delete_at to a sensible value.\n      if trash_at.nil?\n        self.delete_at = nil\n      else\n        self.delete_at = trash_at + self.default_delete_after_trash_interval\n      end\n    elsif !trash_at || !delete_at || trash_at > delete_at\n      # Not trash, or bogus arguments? Just validate in\n      # validate_trash_and_delete_timing.\n    elsif delete_at_changed? && delete_at >= trash_at\n      # Fix delete_at if needed, so it's not earlier than the expiry\n      # time on any permission tokens that might have been given out.\n\n      # In any case there are no signatures expiring after now+TTL.\n      # Also, if the existing trash_at time has already passed, we\n      # know we haven't given out any signatures since then.\n      earliest_delete = [\n        @validation_timestamp,\n        trash_at_was,\n      ].compact.min + minimum_delete_after_trash_interval\n\n      # The previous value of delete_at is also an upper bound on the\n      # longest-lived permission token. For example, if TTL=14,\n      # trash_at_was=now-7, delete_at_was=now+7, then it is safe to\n      # set trash_at=now+6, delete_at=now+8.\n      earliest_delete = [earliest_delete, delete_at_was].compact.min\n\n      # If delete_at is too soon, use the earliest possible time.\n      if delete_at < earliest_delete\n        self.delete_at = earliest_delete\n      end\n    end\n  end\n\n  def validate_trash_and_delete_timing\n    if trash_at.nil? != delete_at.nil?\n      errors.add :delete_at, \"must be set if trash_at is set, and must be nil otherwise\"\n    elsif delete_at && delete_at < trash_at\n      errors.add :delete_at, \"must not be earlier than trash_at\"\n    end\n    true\n  end\nend\n\nmodule TrashableController\n  def self.included(base)\n    def base._trash_method_description\n      match = name.match(/\\b(\\w+)Controller$/)\n      \"Trash a #{match[1].singularize.underscore.humanize.downcase}.\"\n    end\n    def base._untrash_method_description\n      match = name.match(/\\b(\\w+)Controller$/)\n      \"Untrash a #{match[1].singularize.underscore.humanize.downcase}.\"\n    end\n  end\n\n  def destroy\n    if !@object.is_trashed\n      @object.update!(trash_at: db_current_time)\n    end\n    earliest_delete = (@object.trash_at + @object.minimum_delete_after_trash_interval)\n    if @object.delete_at > earliest_delete\n      @object.update!(delete_at: earliest_delete)\n    end\n    show\n  end\n\n  def trash\n    if !@object.is_trashed\n      @object.update!(trash_at: db_current_time)\n    end\n    show\n  end\n\n  def untrash\n    if !@object.is_trashed\n      raise ArvadosModel::InvalidStateTransitionError.new(\"Item is not trashed, cannot untrash\")\n    end\n\n    if db_current_time >= @object.delete_at\n      raise ArvadosModel::InvalidStateTransitionError.new(\"delete_at time has already passed, cannot untrash\")\n    end\n\n    @object.trash_at = nil\n\n    if params[:ensure_unique_name]\n      @object.save_with_unique_name!\n    else\n      @object.save!\n    end\n\n    show\n  end\nend\n"
  },
  {
    "path": "services/api/lib/update_permissions.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire_relative '20200501150153_permission_table_constants'\n\nREVOKE_PERM = 0\nCAN_MANAGE_PERM = 3\n\ndef update_permissions perm_origin_uuid, starting_uuid, perm_level, edge_id=nil, update_all_users=false\n  return if Thread.current[:suppress_update_permissions]\n\n  #\n  # Update a subset of the permission table affected by adding or\n  # removing a particular permission relationship (ownership or a\n  # permission link).\n  #\n  # perm_origin_uuid: This is the object that 'gets' the permission.\n  # It is the owner_uuid or tail_uuid.\n  #\n  # starting_uuid: The object we are computing permission for (or head_uuid)\n  #\n  # perm_level: The level of permission that perm_origin_uuid gets for starting_uuid.\n  #\n  # perm_level is a number from 0-3\n  #   can_read=1\n  #   can_write=2\n  #   can_manage=3\n  #   or call with perm_level=0 to revoke permissions\n  #\n  # check: for testing/debugging, compare the result of the\n  # incremental update against a full table recompute.  Throws an\n  # error if the contents are not identical (ie they produce different\n  # permission results)\n\n  # Theory of operation\n  #\n  # Give a change in a specific permission relationship, we recompute\n  # the set of permissions (for all users) that could possibly be\n  # affected by that relationship.  For example, if a project is\n  # shared with another user, we recompute all permissions for all\n  # projects in the hierarchy.  This returns a set of updated\n  # permissions, which we stash in a temporary table.\n  #\n  # Then, for each user_uuid/target_uuid in the updated permissions\n  # result set we insert/update a permission row in\n  # materialized_permissions, and delete any rows that exist in\n  # materialized_permissions that are not in the result set or have\n  # perm_level=0.\n  #\n  # see db/migrate/20200501150153_permission_table.rb for details on\n  # how the permissions are computed.\n\n  if edge_id.nil?\n    # For changes of ownership, edge_id is starting_uuid.  In turns\n    # out most invocations of update_permissions are for changes of\n    # ownership, so make this parameter optional to reduce\n    # clutter.\n    # For permission links, the uuid of the link object will be passed in for edge_id.\n    edge_id = starting_uuid\n  end\n\n  ActiveRecord::Base.transaction do\n\n    # \"Conflicts with the ROW SHARE, ROW EXCLUSIVE, SHARE UPDATE\n    # EXCLUSIVE, SHARE, SHARE ROW EXCLUSIVE, EXCLUSIVE, and ACCESS\n    # EXCLUSIVE lock modes. This mode allows only concurrent ACCESS\n    # SHARE locks, i.e., only reads from the table can proceed in\n    # parallel with a transaction holding this lock mode.\"\n    ActiveRecord::Base.connection.execute \"LOCK TABLE #{PERMISSION_VIEW} in EXCLUSIVE MODE\"\n\n    # Workaround for\n    # BUG #15160: planner overestimates number of rows in join when there are more than 200 rows coming from CTE\n    # https://www.postgresql.org/message-id/152395805004.19366.3107109716821067806@wrigleys.postgresql.org\n    #\n    # For a crucial join in the compute_permission_subgraph() query, the\n    # planner mis-estimates the number of rows in a Common Table\n    # Expression (CTE, this is a subquery in a WITH clause) and as a\n    # result it chooses the wrong join order.  The join starts with the\n    # permissions table because it mistakenly thinks\n    # count(materalized_permissions) < count(new computed permissions)\n    # when actually it is the other way around.\n    #\n    # Because of the incorrect join order, it choose the wrong join\n    # strategy (merge join, which works best when two tables are roughly\n    # the same size).  As a workaround, we can tell it not to use that\n    # join strategy, this causes it to pick hash join instead, which\n    # turns out to be a bit better.  However, because the join order is\n    # still wrong, we don't get the full benefit of the index.\n    #\n    # This is very unfortunate because it makes the query performance\n    # dependent on the size of the materalized_permissions table, when\n    # the goal of this design was to make permission updates scale-free\n    # and only depend on the number of permissions affected and not the\n    # total table size.  In several hours of researching I wasn't able\n    # to find a way to force the correct join order, so I'm calling it\n    # here and I have to move on.\n    #\n    # This is apparently addressed in Postgres 12, but I developed &\n    # tested this on Postgres 9.6, so in the future we should reevaluate\n    # the performance & query plan on Postgres 12.\n    #\n    # Update: as of 2023-10-13, incorrect merge join behavior is still\n    # observed on at least one major user installation that is using\n    # Postgres 14, so it seems this workaround is still needed.\n    #\n    # https://git.furworks.de/opensourcemirror/postgresql/commit/a314c34079cf06d05265623dd7c056f8fa9d577f\n    #\n    # Disable merge join for just this query (also local for this transaction), then reenable it.\n    ActiveRecord::Base.connection.exec_query \"SET LOCAL enable_mergejoin to false;\"\n\n    if perm_origin_uuid[5..11] == '-tpzed-' && !update_all_users\n      # Modifying permission granted to a user, recompute the all permissions for that user\n\n      ActiveRecord::Base.connection.exec_query %{\nwith origin_user_perms as (\n    select pq.origin_uuid as user_uuid, target_uuid, pq.val, pq.traverse_owned from (\n    #{PERM_QUERY_TEMPLATE % {:base_case => %{\n        select '#{perm_origin_uuid}'::varchar(255), '#{perm_origin_uuid}'::varchar(255), 3, true, true\n               where exists (select uuid from users where uuid='#{perm_origin_uuid}')\n},\n:edge_perm => %{\ncase (edges.edge_id = '#{edge_id}')\n                               when true then #{perm_level}\n                               else edges.val\n                            end\n}\n} }) as pq),\n\n/*\n     Because users always have permission on themselves, this\n     query also makes sure those permission rows are always\n     returned.\n*/\ntemptable_perms as (\n      select * from origin_user_perms\n    union all\n      select target_uuid as user_uuid, target_uuid, 3, true\n        from origin_user_perms\n        where origin_user_perms.target_uuid like '_____-tpzed-_______________' and\n              origin_user_perms.target_uuid != '#{perm_origin_uuid}'\n),\n\n/*\n    Now that we have recomputed a set of permissions, delete any\n    rows from the materialized_permissions table where (target_uuid,\n    user_uuid) is not present or has perm_level=0 in the recomputed\n    set.\n*/\ndelete_rows as (\n  delete from #{PERMISSION_VIEW} where\n    user_uuid='#{perm_origin_uuid}' and\n    not exists (select 1 from temptable_perms\n                where target_uuid=#{PERMISSION_VIEW}.target_uuid and\n                      user_uuid='#{perm_origin_uuid}' and\n                      val>0)\n)\n\n/*\n  Now insert-or-update permissions in the recomputed set.  The\n  WHERE clause is important to avoid redundantly updating rows\n  that haven't actually changed.\n*/\ninsert into #{PERMISSION_VIEW} (user_uuid, target_uuid, perm_level, traverse_owned)\n  select user_uuid, target_uuid, val as perm_level, traverse_owned from temptable_perms where val>0\non conflict (user_uuid, target_uuid) do update\nset perm_level=EXCLUDED.perm_level, traverse_owned=EXCLUDED.traverse_owned\nwhere #{PERMISSION_VIEW}.user_uuid=EXCLUDED.user_uuid and\n      #{PERMISSION_VIEW}.target_uuid=EXCLUDED.target_uuid and\n       (#{PERMISSION_VIEW}.perm_level != EXCLUDED.perm_level or\n        #{PERMISSION_VIEW}.traverse_owned != EXCLUDED.traverse_owned);\n\n}\n    else\n      # Modifying permission granted to a group, recompute permissions for everything accessible through that group\n    ActiveRecord::Base.connection.exec_query %{\nwith temptable_perms as (\n  select * from compute_permission_subgraph($1, $2, $3, $4)),\n\n/*\n    Now that we have recomputed a set of permissions, delete any\n    rows from the materialized_permissions table where (target_uuid,\n    user_uuid) is not present or has perm_level=0 in the recomputed\n    set.\n*/\ndelete_rows as (\n  delete from #{PERMISSION_VIEW} where\n    target_uuid in (select target_uuid from temptable_perms) and\n    not exists (select 1 from temptable_perms\n                where target_uuid=#{PERMISSION_VIEW}.target_uuid and\n                      user_uuid=#{PERMISSION_VIEW}.user_uuid and\n                      val>0)\n)\n\n/*\n  Now insert-or-update permissions in the recomputed set.  The\n  WHERE clause is important to avoid redundantly updating rows\n  that haven't actually changed.\n*/\ninsert into #{PERMISSION_VIEW} (user_uuid, target_uuid, perm_level, traverse_owned)\n  select user_uuid, target_uuid, val as perm_level, traverse_owned from temptable_perms where val>0\non conflict (user_uuid, target_uuid) do update\nset perm_level=EXCLUDED.perm_level, traverse_owned=EXCLUDED.traverse_owned\nwhere #{PERMISSION_VIEW}.user_uuid=EXCLUDED.user_uuid and\n      #{PERMISSION_VIEW}.target_uuid=EXCLUDED.target_uuid and\n       (#{PERMISSION_VIEW}.perm_level != EXCLUDED.perm_level or\n        #{PERMISSION_VIEW}.traverse_owned != EXCLUDED.traverse_owned);\n},\n                                             'update_permissions.select',\n                                             [perm_origin_uuid,\n                                              starting_uuid,\n                                              perm_level,\n                                              edge_id]\n    end\n\n    if perm_level>0\n      check_permissions_against_full_refresh\n    end\n  end\nend\n\n\ndef check_permissions_against_full_refresh\n  # No-op except when running tests\n  return unless Rails.env == 'test' and !Thread.current[:no_check_permissions_against_full_refresh] and !Thread.current[:suppress_update_permissions]\n\n  # For checking correctness of the incremental permission updates.\n  # Check contents of the current 'materialized_permission' table\n  # against a from-scratch permission refresh.\n\n  q1 = ActiveRecord::Base.connection.exec_query %{\nselect user_uuid, target_uuid, perm_level, traverse_owned from #{PERMISSION_VIEW}\norder by user_uuid, target_uuid\n}, \"check_permissions_against_full_refresh.permission_table\"\n\n  q2 = ActiveRecord::Base.connection.exec_query %{\n    select pq.origin_uuid as user_uuid, target_uuid, pq.val as perm_level, pq.traverse_owned from (\n    #{PERM_QUERY_TEMPLATE % {:base_case => %{\n        select uuid, uuid, 3, true, true from users\n},\n:edge_perm => 'edges.val'\n} }) as pq order by origin_uuid, target_uuid\n}, \"check_permissions_against_full_refresh.full_recompute\"\n\n  if q1.count != q2.count\n    puts \"Didn't match incremental+: #{q1.count} != full refresh-: #{q2.count}\"\n  end\n\n  if q1.count > q2.count\n    q1.each_with_index do |r, i|\n      if r != q2[i]\n        puts \"+#{r}\\n-#{q2[i]}\"\n        raise \"Didn't match\"\n      end\n    end\n  else\n    q2.each_with_index do |r, i|\n      if r != q1[i]\n        puts \"+#{q1[i]}\\n-#{r}\"\n        raise \"Didn't match\"\n      end\n    end\n  end\nend\n\ndef skip_check_permissions_against_full_refresh\n  check_perm_was = Thread.current[:no_check_permissions_against_full_refresh]\n  Thread.current[:no_check_permissions_against_full_refresh] = true\n  begin\n    yield\n  ensure\n    Thread.current[:no_check_permissions_against_full_refresh] = check_perm_was\n  end\nend\n\ndef batch_update_permissions\n  check_perm_was = Thread.current[:suppress_update_permissions]\n  Thread.current[:suppress_update_permissions] = true\n  begin\n    yield\n  ensure\n    Thread.current[:suppress_update_permissions] = check_perm_was\n    refresh_permissions\n  end\nend\n\n# Used to account for permissions that a user gains by having\n# can_manage on another user.\n#\n# note: in theory a user could have can_manage access to a user\n# through multiple levels, that isn't handled here (would require a\n# recursive query).  I think that's okay because users getting\n# transitive access through \"can_manage\" on a user is is rarely/never\n# used feature and something we probably want to deprecate and remove.\nUSER_UUIDS_SUBQUERY_TEMPLATE = %{\nselect target_uuid from materialized_permissions where user_uuid in (%{user})\nand target_uuid like '_____-tpzed-_______________' and traverse_owned=true and perm_level >= %{perm_level}\n}\n"
  },
  {
    "path": "services/api/lib/update_priorities.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ndef row_lock_for_priority_update container_uuid\n  # Locks all the containers under this container, and also any\n  # immediate parent containers.  This ensures we have locked\n  # everything that gets touched by either a priority update or state\n  # update.\n  # This method assumes we are already in a transaction.\n  ActiveRecord::Base.connection.exec_query %{\n        select containers.id from containers where containers.uuid in (\n  select pri_container_uuid from container_tree($1)\nUNION\n  select container_requests.requesting_container_uuid from container_requests\n    where container_requests.container_uuid = $1\n          and container_requests.state = 'Committed'\n          and container_requests.requesting_container_uuid is not NULL\n)\n        order by containers.id for update of containers\n  }, 'select_for_update_priorities', [container_uuid]\nend\n\ndef update_priorities starting_container_uuid\n  Container.transaction do\n    # Ensure the row locks were taken in order\n    row_lock_for_priority_update starting_container_uuid\n\n    ActiveRecord::Base.connection.exec_query %{\nupdate containers set priority=computed.upd_priority from container_tree_priorities($1) as computed\n where containers.uuid = computed.pri_container_uuid and priority != computed.upd_priority\n}, 'update_priorities', [starting_container_uuid]\n  end\nend\n"
  },
  {
    "path": "services/api/lib/validate_serialized.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# ArrayOfStringsValidator is invoked via:\n#     validates :attr_name, array_of_strings: {allow_empty_strings: true}\nclass ArrayOfStringsValidator < ActiveModel::EachValidator\n  def validate_each(record, attr, val)\n    errtxt = \"must be an array of #{\"non-empty \" unless options[:allow_empty_strings]}strings\"\n    if !val.is_a?(Array)\n      record.errors.add attr, errtxt\n      return\n    end\n    val.each do |ent|\n      if !ent.is_a?(String) || (ent == \"\" && !options[:allow_empty_strings])\n        record.errors.add attr, errtxt\n        return\n      end\n    end\n  end\nend\n\n# HashAttrValidator is invoked via:\n#     validates :attr_name, hash_attr: true\n# We don't call it just HashValidator because that conflicts with the\n# hash_validator gem.\nclass HashAttrValidator < ActiveModel::EachValidator\n  def validate_each(record, attr, val)\n    if !val.is_a?(Hash)\n      record.errors.add attr, \"must be a hash\"\n      return\n    end\n    # Note this should be a no-op since JSON is the only way we accept\n    # serialized attributes.  But we still explicitly check that keys\n    # are strings, etc., before running code that assumes so.\n    ensure_json_representable(record, attr, val)\n  end\n\n  def ensure_json_representable(record, attr, v)\n    case v\n    when String, Integer, Float, BigDecimal, Boolean, NilClass\n      return true\n    when Array\n      v.each do |v|\n        if !ensure_json_representable(record, attr, v)\n          return false\n        end\n      end\n    when Hash\n      v.each do |k, v|\n        if !k.is_a?(String)\n          record.errors.add attr, \"contains non-string hash key #{k.inspect}\"\n          return false\n        end\n        if !ensure_json_representable(record, attr, v)\n          return false\n        end\n      end\n    else\n      record.errors.add attr, \"contains value #{v.inspect} with unexpected class #{v.class}\"\n      return false\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/lib/whitelist_update.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule WhitelistUpdate\n  def check_update_whitelist permitted_fields\n    attribute_names.each do |field|\n      if !permitted_fields.include?(field.to_sym) && really_changed(field)\n        errors.add field, \"cannot be modified in state '#{self.state}' (#{send(field+\"_was\").inspect}, #{send(field).inspect})\"\n      end\n    end\n  end\n\n  def really_changed(attr)\n    return false if !send(attr+\"_changed?\")\n    old = send(attr+\"_was\")\n    new = send(attr)\n    if (old.nil? || old == [] || old == {}) && (new.nil? || new == [] || new == {})\n      false\n    else\n      old != new\n    end\n  end\n\n  def validate_state_change\n    if self.state_changed?\n      unless state_transitions[self.state_was].andand.include? self.state\n        errors.add :state, \"cannot change from #{self.state_was} to #{self.state}\"\n        return false\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/public/404.html",
    "content": "<!-- Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 -->\n\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n  <title>The page you were looking for doesn't exist (404)</title>\n  <style type=\"text/css\">\n    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }\n    div.dialog {\n      width: 25em;\n      padding: 0 4em;\n      margin: 4em auto 0 auto;\n      border: 1px solid #ccc;\n      border-right-color: #999;\n      border-bottom-color: #999;\n    }\n    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }\n  </style>\n</head>\n\n<body>\n  <!-- This file lives in public/404.html -->\n  <div class=\"dialog\">\n    <h1>The page you were looking for doesn't exist.</h1>\n    <p>You may have mistyped the address or the page may have moved.</p>\n  </div>\n</body>\n</html>\n"
  },
  {
    "path": "services/api/public/422.html",
    "content": "<!-- Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 -->\n\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n  <title>The change you wanted was rejected (422)</title>\n  <style type=\"text/css\">\n    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }\n    div.dialog {\n      width: 25em;\n      padding: 0 4em;\n      margin: 4em auto 0 auto;\n      border: 1px solid #ccc;\n      border-right-color: #999;\n      border-bottom-color: #999;\n    }\n    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }\n  </style>\n</head>\n\n<body>\n  <!-- This file lives in public/422.html -->\n  <div class=\"dialog\">\n    <h1>The change you wanted was rejected.</h1>\n    <p>Maybe you tried to change something you didn't have access to.</p>\n  </div>\n</body>\n</html>\n"
  },
  {
    "path": "services/api/public/500.html",
    "content": "<!-- Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 -->\n\n<!DOCTYPE html>\n<html lang=\"en\">\n<head>\n  <title>We're sorry, but something went wrong (500)</title>\n  <style type=\"text/css\">\n    body { background-color: #fff; color: #666; text-align: center; font-family: arial, sans-serif; }\n    div.dialog {\n      width: 25em;\n      padding: 0 4em;\n      margin: 4em auto 0 auto;\n      border: 1px solid #ccc;\n      border-right-color: #999;\n      border-bottom-color: #999;\n    }\n    h1 { font-size: 100%; color: #f00; line-height: 1.5em; }\n  </style>\n</head>\n\n<body>\n  <!-- This file lives in public/500.html -->\n  <div class=\"dialog\">\n    <h1>We're sorry, but something went wrong.</h1>\n    <p>We've been notified about this issue and we'll take a look at it shortly.</p>\n  </div>\n</body>\n</html>\n"
  },
  {
    "path": "services/api/public/robots.txt",
    "content": "# See http://www.robotstxt.org/wc/norobots.html for documentation on how to use the robots.txt file\n#\n# To ban all spiders from the entire site uncomment the next two lines:\n# User-Agent: *\n# Disallow: /\n"
  },
  {
    "path": "services/api/script/populate-file-info-columns-in-collections.rb",
    "content": "#!/usr/bin/env ruby\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Arvados version 1.4.0 introduces two new columns on the collections table named\n#   file_count\n#   file_size_total\n#\n# The database migration that adds these columns does not populate them with data,\n# it initializes them set to zero.\n#\n# This script will populate the columns, if file_count is zero. It will ignore\n# collections that have invalid manifests, but it will spit out details for those\n# collections.\n#\n# Run the script as\n#\n# cd scripts\n# RAILS_ENV=production bundle exec populate-file-info-columns-in-collections.rb\n#\n\nENV[\"RAILS_ENV\"] = ARGV[0] || ENV[\"RAILS_ENV\"] || \"development\"\nrequire File.dirname(__FILE__) + '/../config/boot'\nrequire File.dirname(__FILE__) + '/../config/environment'\n\nrequire \"arvados/keep\"\nrequire \"group_pdhs\"\n\n  def do_batch(pdhs)\n    pdhs_str = ''\n    pdhs.each do |pdh|\n      pdhs_str << \"'\" << pdh << \"'\" << \",\"\n    end\n\n    collections = ActiveRecord::Base.connection.exec_query(\n      \"SELECT DISTINCT portable_data_hash, manifest_text FROM collections \"\\\n      \"WHERE portable_data_hash IN (#{pdhs_str[0..-2]}) \"\n    )\n    collections.rows.each do |row|\n      begin\n        manifest = Keep::Manifest.new(row[1])\n        ActiveRecord::Base.connection.exec_query(\"BEGIN\")\n        ActiveRecord::Base.connection.exec_query(\"UPDATE collections SET file_count=#{manifest.files_count}, \"\\\n                                                 \"file_size_total=#{manifest.files_size} \"\\\n                                                 \"WHERE portable_data_hash='#{row[0]}'\")\n        ActiveRecord::Base.connection.exec_query(\"COMMIT\")\n      rescue ArgumentError => detail\n        require 'pp'\n        puts\n        puts \"*************** Row detail ***************\"\n        puts\n        pp row\n        puts\n        puts \"************ Collection detail ***********\"\n        puts\n        pp Collection.find_by_portable_data_hash(row[0])\n        puts\n        puts \"************** Error detail **************\"\n        puts\n        pp detail\n        puts\n        puts \"Skipping this collection, continuing!\"\n        next\n      end\n    end\n  end\n\n\ndef main\n\n  distinct_pdh_count = ActiveRecord::Base.connection.exec_query(\n    \"SELECT DISTINCT portable_data_hash FROM collections where file_count=0\"\n  ).rows.count\n\n  # Generator that queries for all the distinct pdhs greater than last_pdh\n  ordered_pdh_query = lambda { |last_pdh, &block|\n    pdhs = ActiveRecord::Base.connection.exec_query(\n      \"SELECT DISTINCT portable_data_hash FROM collections \"\\\n      \"WHERE file_count=0 and portable_data_hash > '#{last_pdh}' \"\\\n      \"ORDER BY portable_data_hash LIMIT 1000\"\n    )\n    pdhs.rows.each do |row|\n      block.call(row[0])\n    end\n  }\n\n  batch_size_max = 1 << 28 # 256 MiB\n  GroupPdhs.group_pdhs_for_multiple_transactions(ordered_pdh_query,\n                                                 distinct_pdh_count,\n                                                 batch_size_max,\n                                                 \"AddFileInfoToCollection\") do |pdhs|\n    do_batch(pdhs)\n  end\nend\n\nmain\n"
  },
  {
    "path": "services/api/script/rails",
    "content": "#!/usr/bin/env ruby\n# This command will automatically be run when you run \"rails\" with Rails 3 gems installed from the root of your application.\n\n\n##### SSL - ward, 2012-10-15\nrequire 'rubygems'\nrequire 'rails/command'\nrequire 'rack'\nrequire 'webrick'\nrequire 'webrick/https'\n\nmodule Rails\n    class Server < ::Rack::Server\n        def default_options\n            super.merge({\n                :Port => 3030,\n                :environment => (ENV['RAILS_ENV'] || \"development\").dup,\n                :daemonize => false,\n                :debugger => false,\n                :pid => File.expand_path(\"tmp/pids/server.pid\"),\n                :config => File.expand_path(\"config.ru\"),\n                :SSLEnable => true,\n                :SSLVerifyClient => OpenSSL::SSL::VERIFY_NONE,\n                :SSLCertName => [[\"CN\", \"#{WEBrick::Utils::getservername} #{Time.now().to_s}\"]]\n            })\n        end\n    end\nend\n######### /SSL\n\n\nAPP_PATH = File.expand_path('../../config/application',  __FILE__)\nrequire File.expand_path('../../config/boot',  __FILE__)\nrequire 'rails/commands'\n"
  },
  {
    "path": "services/api/script/rake_test.sh",
    "content": "#! /bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# This script invokes `rake test' in a fresh Docker instance of the\n# API server, e.g.:\n#   docker run -t -i arvados/api /usr/src/arvados/services/api/script/rake_test.sh\n\n/etc/init.d/postgresql start\n\nexport RAILS_ENV=test\ncd /usr/src/arvados/services/api\ncp config/environments/test.rb.example config/environments/test.rb\nbundle exec rake db:setup\nbundle exec rake test\n"
  },
  {
    "path": "services/api/script/restart-dns-server",
    "content": "#!/usr/bin/env bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# usage:\n# \"restart-dns-server <path-to-restart.txt>\" (restart now if needed)\n# or\n# \"restart-dns-server <path-to-restart.txt> -d\" (wait for restart to be needed, restart, repeat)\n\nRESTART_TXT_PATH=$1\n\nif [[ \"$RESTART_TXT_PATH\" == \"\" ]]; then\n  echo\n  echo \"Usage: \"\n  echo \"   $0 <path-to-restart.txt>      # restart now if needed\"\n  echo \"   $0 <path-to-restart.txt> -d   # wait for restart to be needed, restart, repeat\"\n  echo\n  exit 1\nfi\n\nwhile :\ndo\n  if [ -e $RESTART_TXT_PATH ]; then\n    RESTART_COMMAND=`cat $RESTART_TXT_PATH`\n    echo \"restart command: $RESTART_COMMAND\"\n    rm -f \"$RESTART_TXT_PATH\"\n    echo restarting\n    $RESTART_COMMAND\n  fi\n  if [ \"-d\" = \"$2\" ]\n  then\n    sleep 2\n  else\n    exit 0\n  fi\ndone\n"
  },
  {
    "path": "services/api/script/salvage_collection.rb",
    "content": "#!/usr/bin/env ruby\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Take two input parameters: a collection uuid and reason\n# Get \"src_collection\" with the given uuid\n# Create a new collection with:\n#   src_collection.manifest_text as \"invalid_manifest_text.txt\"\n#   Locators from src_collection.manifest_text as \"salvaged_data\"\n# Update src_collection:\n#   Set src_collection.manifest_text to: \"\"\n#   Append to src_collection.name: \" (reason; salvaged data at new_collection.uuid)\"\n#   Set portable_data_hash to \"d41d8cd98f00b204e9800998ecf8427e+0\"\n\nrequire 'optimist'\nrequire './lib/salvage_collection'\ninclude SalvageCollection\n\nopts = Optimist::options do\n  banner ''\n  banner \"Usage: salvage_collection.rb \" +\n    \"{uuid} {reason}\"\n  banner ''\n  opt :uuid, \"uuid of the collection to be salvaged.\", type: :string, required: true\n  opt :reason, \"Reason for salvaging.\", type: :string, required: false\nend\n\n# Salvage the collection with the given uuid\nSalvageCollection.salvage_collection opts.uuid, opts.reason\n"
  },
  {
    "path": "services/api/script/setup-new-user.rb",
    "content": "#!/usr/bin/env ruby\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nabort 'Error: Ruby >= 1.9.3 required.' if RUBY_VERSION < '1.9.3'\n\nrequire 'logger'\nrequire 'optimist'\n\nlog = Logger.new STDERR\nlog.progname = $0.split('/').last\n\nopts = Optimist::options do\n  banner ''\n  banner \"Usage: #{log.progname} \" +\n    \"{user_uuid_or_email} {user_and_repo_name} {vm_uuid}\"\n  banner ''\n  opt :debug, <<-eos\nShow debug messages.\n  eos\n  opt :openid_prefix, <<-eos, default: 'https://www.google.com/accounts/o8/id'\nIf creating a new user record, require authentication from an OpenID \\\nwith this OpenID prefix *and* a matching email address in order to \\\nclaim the account.\n  eos\n  opt :send_notification_email, <<-eos, default: 'true'\nSend notification email after successfully setting up the user.\n  eos\nend\n\nlog.level = (ENV['DEBUG'] || opts.debug) ? Logger::DEBUG : Logger::WARN\n\nif ARGV.count != 3\n  Optimist::die \"required arguments are missing\"\nend\n\nuser_arg, user_repo_name, vm_uuid = ARGV\n\nrequire 'arvados'\narv = Arvados.new(api_version: 'v1')\n\n# Look up the given user by uuid or, failing that, email address.\nbegin\n  found_user = arv.user.get(uuid: user_arg)\nrescue Arvados::TransactionFailedError\n  found = arv.user.list(where: {email: user_arg})[:items]\n\n  if found.count == 0\n    if !user_arg.match(/\\w\\@\\w+\\.\\w+/)\n      abort \"About to create new user, but #{user_arg.inspect} \" +\n               \"does not look like an email address. Stop.\"\n    end\n  elsif found.count != 1\n    abort \"Found #{found.count} users with email. Stop.\"\n  else\n    found_user = found.first\n  end\nend\n\n# Invoke user setup method\nif (found_user)\n  user = arv.user.setup uuid: found_user[:uuid], repo_name: user_repo_name,\n          vm_uuid: vm_uuid, openid_prefix: opts.openid_prefix,\n          send_notification_email: opts.send_notification_email\nelse\n  user = arv.user.setup user: {email: user_arg}, repo_name: user_repo_name,\n          vm_uuid: vm_uuid, openid_prefix: opts.openid_prefix,\n          send_notification_email: opts.send_notification_email\nend\n\nlog.info {\"user uuid: \" + user[:uuid]}\n\nputs user.inspect\n"
  },
  {
    "path": "services/api/test/factories/api_client_authorization.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nFactoryBot.define do\n  factory :api_client_authorization do\n    scopes { ['all'] }\n\n    factory :token do\n      # Just provides shorthand for \"create :api_client_authorization\"\n    end\n\n    to_create do |instance|\n      CurrentApiClientHelper.act_as_user instance.user do\n        instance.save!\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/factories/group.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nFactoryBot.define do\n  factory :group do\n  end\nend\n"
  },
  {
    "path": "services/api/test/factories/link.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nFactoryBot.define do\n  factory :link do\n    factory :permission_link do\n      link_class { 'permission' }\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/factories/user.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass CurrentApiClientHelper\n  extend CurrentApiClient\nend\n\nFactoryBot.define do\n  factory :user do\n    transient do\n      join_groups { [] }\n    end\n    after :create do |user, evaluator|\n      CurrentApiClientHelper.act_as_system_user do\n        evaluator.join_groups.each do |g|\n          Link.create!(tail_uuid: user.uuid,\n                       head_uuid: g.uuid,\n                       link_class: 'permission',\n                       name: 'can_read')\n          Link.create!(tail_uuid: g.uuid,\n                       head_uuid: user.uuid,\n                       link_class: 'permission',\n                       name: 'can_read')\n        end\n      end\n    end\n    first_name { \"Factory\" }\n    last_name { \"Factory\" }\n    identity_url do\n      \"https://example.com/#{rand(2**24).to_s(36)}\"\n    end\n    factory :active_user do\n      is_active { true }\n      after :create do |user|\n        CurrentApiClientHelper.act_as_system_user do\n          Link.create!(tail_uuid: user.uuid,\n                       head_uuid: Group.where('uuid ~ ?', '-f+$').first.uuid,\n                       link_class: 'permission',\n                       name: 'can_read')\n        end\n      end\n    end\n    to_create do |instance|\n      CurrentApiClientHelper.act_as_system_user do\n        instance.save!\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/fixtures/.gitkeep",
    "content": ""
  },
  {
    "path": "services/api/test/fixtures/api_client_authorizations.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/Fixtures.html\n\n# This record's api_token is the SystemRootToken used by the test\n# suite (in fact, sdk/python/tests/run_test_server.py copies it from\n# this file into the test suite config file). That token is accepted\n# regardless of database contents (see\n# ApiClientAuthorization.check_system_root_token) but having a fixture\n# for it allows test cases to access it the same way as other token\n# fixtures, i.e., api_client_authorizations(:system_user).\nsystem_user:\n  uuid: zzzzz-gj3su-000000000000000\n  user: system_user\n  api_token: systemusertesttoken1234567890aoeuidhtnsqjkxbmwvzpy\n  expires_at: 2038-01-01 00:00:00\n\nadmin:\n  uuid: zzzzz-gj3su-027z32aux8dg2s1\n  user: admin\n  api_token: 4axaw8zxe0qm22wa6urpp5nskcne8z88cvbupv653y1njyi05h\n  expires_at: 2038-01-01 00:00:00\n\nadmin_trustedclient:\n  uuid: zzzzz-gj3su-037z32aux8dg2s1\n  user: admin\n  api_token: 1a9ffdcga2o7cw8q12dndskomgs1ygli3ns9k2o9hgzgmktc78\n  expires_at: 2038-01-01 00:00:00\n\ndata_manager:\n  uuid: zzzzz-gj3su-047z32aux8dg2s1\n  user: system_user\n  api_token: 320mkve8qkswstz7ff61glpk3mhgghmg67wmic7elw4z41pke1\n  expires_at: 2038-01-01 00:00:00\n  scopes:\n    - GET /arvados/v1/collections\n    - GET /arvados/v1/keep_services\n    - GET /arvados/v1/keep_services/accessible\n    - GET /arvados/v1/users/current\n    - POST /arvados/v1/logs\n\nminiadmin:\n  uuid: zzzzz-gj3su-057z32aux8dg2s1\n  user: miniadmin\n  api_token: 2zb2y9pw3e70270te7oe3ewaantea3adyxjascvkz0zob7q7xb\n  expires_at: 2038-01-01 00:00:00\n\nrominiadmin:\n  uuid: zzzzz-gj3su-067z32aux8dg2s1\n  user: rominiadmin\n  api_token: 5tsb2pc3zlatn1ortl98s2tqsehpby88wmmnzmpsjmzwa6payh\n  expires_at: 2038-01-01 00:00:00\n\nactive:\n  uuid: zzzzz-gj3su-077z32aux8dg2s1\n  user: active\n  api_token: 3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi\n  expires_at: 2038-01-01 00:00:00\n\nactive_trustedclient:\n  uuid: zzzzz-gj3su-087z32aux8dg2s1\n  user: active\n  api_token: 27bnddk6x2nmq00a1e3gq43n9tsl5v87a3faqar2ijj8tud5en\n  expires_at: 2038-01-01 00:00:00\n\nactive_noscope:\n  uuid: zzzzz-gj3su-097z32aux8dg2s1\n  user: active\n  api_token: activenoscopeabcdefghijklmnopqrstuvwxyz12345678901\n  expires_at: 2038-01-01 00:00:00\n  scopes: []\n\nproject_viewer:\n  uuid: zzzzz-gj3su-107z32aux8dg2s1\n  user: project_viewer\n  api_token: projectviewertoken1234567890abcdefghijklmnopqrstuv\n  expires_at: 2038-01-01 00:00:00\n\nproject_viewer_trustedclient:\n  uuid: zzzzz-gj3su-117z32aux8dg2s1\n  user: project_viewer\n  api_token: projectviewertrustedtoken1234567890abcdefghijklmno\n  expires_at: 2038-01-01 00:00:00\n\nsubproject_admin:\n  uuid: zzzzz-gj3su-127z32aux8dg2s1\n  user: subproject_admin\n  api_token: subprojectadmintoken1234567890abcdefghijklmnopqrst\n  expires_at: 2038-01-01 00:00:00\n\nadmin_vm:\n  uuid: zzzzz-gj3su-137z32aux8dg2s1\n  user: admin\n  api_token: adminvirtualmachineabcdefghijklmnopqrstuvwxyz12345\n  expires_at: 2038-01-01 00:00:00\n  # scope refers to the testvm fixture.\n  scopes: [\"GET /arvados/v1/virtual_machines/zzzzz-2x53u-382brsig8rp3064/logins\"]\n\nadmin_noscope:\n  uuid: zzzzz-gj3su-147z32aux8dg2s1\n  user: admin\n  api_token: adminnoscopeabcdefghijklmnopqrstuvwxyz123456789012\n  expires_at: 2038-01-01 00:00:00\n  scopes: []\n\nactive_all_collections:\n  uuid: zzzzz-gj3su-157z32aux8dg2s1\n  user: active\n  api_token: activecollectionsabcdefghijklmnopqrstuvwxyz1234567\n  expires_at: 2038-01-01 00:00:00\n  scopes: [\"GET /arvados/v1/collections/\", \"GET /arvados/v1/keep_services/accessible\"]\n\nactive_userlist:\n  uuid: zzzzz-gj3su-167z32aux8dg2s1\n  user: active\n  api_token: activeuserlistabcdefghijklmnopqrstuvwxyz1234568900\n  expires_at: 2038-01-01 00:00:00\n  scopes: [\"GET /arvados/v1/users\"]\n\nactive_apitokens:\n  uuid: zzzzz-gj3su-187z32aux8dg2s1\n  user: active\n  api_token: activeapitokensabcdefghijklmnopqrstuvwxyz123456789\n  expires_at: 2038-01-01 00:00:00\n  scopes: [\"GET /arvados/v1/api_client_authorizations\",\n           \"POST /arvados/v1/api_client_authorizations\"]\n\nactive_readonly:\n  uuid: zzzzz-gj3su-197z32aux8dg2s1\n  user: active\n  api_token: activereadonlyabcdefghijklmnopqrstuvwxyz1234568790\n  expires_at: 2038-01-01 00:00:00\n  scopes: [\"GET /\"]\n\nspectator:\n  uuid: zzzzz-gj3su-207z32aux8dg2s1\n  user: spectator\n  api_token: zw2f4gwx8hw8cjre7yp6v1zylhrhn3m5gvjq73rtpwhmknrybu\n  expires_at: 2038-01-01 00:00:00\n\nfoo:\n  uuid: zzzzz-gj3su-fohzae5ib1aseiv\n  user: user_foo_in_sharing_group\n  api_token: lokah4xip8ahgee8oof5zitah3ohdai6je9cu1uogh4bai3ohw\n  expires_at: 2038-01-01 00:00:00\n\nfoo_collections:\n  uuid: zzzzz-gj3su-217z32aux8dg2s1\n  user: user_foo_in_sharing_group\n  api_token: spectatorcollectionscdefghijklmnopqrstuvwxyz123245\n  expires_at: 2038-01-01 00:00:00\n  scopes: [\"GET /arvados/v1/collections\", \"GET /arvados/v1/collections/\",\n           \"POST /arvados/v1/collections\"]\n\ninactive:\n  uuid: zzzzz-gj3su-227z32aux8dg2s1\n  user: inactive\n  api_token: 5s29oj2hzmcmpq80hx9cta0rl5wuf3xfd6r7disusaptz7h9m0\n  expires_at: 2038-01-01 00:00:00\n\ninactive_uninvited:\n  uuid: zzzzz-gj3su-237z32aux8dg2s1\n  user: inactive_uninvited\n  api_token: 62mhllc0otp78v08e3rpa3nsmf8q8ogk47f7u5z4erp5gpj9al\n  expires_at: 2038-01-01 00:00:00\n\ninactive_uninvited_trustedclient:\n  uuid: zzzzz-gj3su-228z32aux8dg2s1\n  user: inactive_uninvited\n  api_token: 7s29oj2hzmcmpq80hx9cta0rl5wuf3xfd6r7disusaptz7h9m0\n  expires_at: 2038-01-01 00:00:00\n\ninactive_but_signed_user_agreement:\n  uuid: zzzzz-gj3su-247z32aux8dg2s1\n  user: inactive_but_signed_user_agreement\n  api_token: 64k3bzw37iwpdlexczj02rw3m333rrb8ydvn2qq99ohv68so5k\n  expires_at: 2038-01-01 00:00:00\n\nexpired:\n  uuid: zzzzz-gj3su-257z32aux8dg2s1\n  user: active\n  api_token: 2ym314ysp27sk7h943q6vtc378srb06se3pq6ghurylyf3pdmx\n  expires_at: 1970-01-01 00:00:00\n\nexpired_trustedclient:\n  uuid: zzzzz-gj3su-267z32aux8dg2s1\n  user: active\n  api_token: 5hpni7izokzcatku2896xxwqdbt5ptomn04r6auc7fohnli82v\n  expires_at: 1970-01-01 00:00:00\n\nvalid_token_deleted_user:\n  uuid: zzzzz-gj3su-277z32aux8dg2s1\n  user_id: 1234567\n  api_token: tewfa58099sndckyqhlgd37za6e47o6h03r9l1vpll23hudm8b\n  expires_at: 2038-01-01 00:00:00\n\nanonymous:\n  uuid: zzzzz-gj3su-287z32aux8dg2s1\n  user: anonymous\n  api_token: 4kg6k6lzmp9kj4cpkcoxie964cmvjahbt4fod9zru44k4jqdmi\n  expires_at: 2038-01-01 00:00:00\n  scopes: [\"GET /\"]\n\njob_reader:\n  uuid: zzzzz-gj3su-297z32aux8dg2s1\n  user: job_reader\n  api_token: e99512cdc0f3415c2428b9758f33bdfb07bc3561b00e86e7e6\n  expires_at: 2038-01-01 00:00:00\n\njob_reader2:\n  uuid: zzzzz-gj3su-jobreader2auth1\n  user: job_reader2\n  api_token: jobreader2415c2428b9758f33bdfb07bc3561b0jobreader2\n  expires_at: 2038-01-01 00:00:00\n\nactive_no_prefs:\n  uuid: zzzzz-gj3su-307z32aux8dg2s1\n  user: active_no_prefs\n  api_token: 3kg612cdc0f3415c2428b9758f33bdfb07bc3561b00e86qdmi\n  expires_at: 2038-01-01 00:00:00\n\nactive_no_prefs_profile_no_getting_started_shown:\n  uuid: zzzzz-gj3su-317z32aux8dg2s1\n  user: active_no_prefs_profile_no_getting_started_shown\n  api_token: 3kg612cdc0f3415c242856758f33bdfb07bc3561b00e86qdmi\n  expires_at: 2038-01-01 00:00:00\n\nactive_no_prefs_profile_with_getting_started_shown:\n  uuid: zzzzz-gj3su-327z32aux8dg2s1\n  user: active_no_prefs_profile_with_getting_started_shown\n  api_token: 3kg612cdc0f3415c245786758f33bdfb07babcd1b00e86qdmi\n  expires_at: 2038-01-01 00:00:00\n\nactive_with_prefs_profile_no_getting_started_shown:\n  uuid: zzzzz-gj3su-337z32aux8dg2s1\n  user: active_with_prefs_profile_no_getting_started_shown\n  api_token: 3kg612cdc0f3415c245786758f33bdfb07befgh1b00e86qdmi\n  expires_at: 2038-01-01 00:00:00\n\nuser_foo_in_sharing_group:\n  uuid: zzzzz-gj3su-347z32aux8dg2s1\n  user: user_foo_in_sharing_group\n  api_token: 2p1pou8p4ls208mcbedeewlotghppenobcyrmyhq8pyf51xd8u\n  expires_at: 2038-01-01 00:00:00\n\nuser_bar_in_sharing_group:\n  uuid: zzzzz-gj3su-62hryf5fht531mz\n  user: user_bar_in_sharing_group\n  api_token: 5vy55akwq85vghh80wc2cuxl4p8psay73lkpqf5c2cxvp6rmm6\n  expires_at: 2038-01-01 00:00:00\n\nuser1_with_load:\n  uuid: zzzzz-gj3su-357z32aux8dg2s1\n  user: user1_with_load\n  api_token: 1234k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi\n  expires_at: 2038-01-01 00:00:00\n\nfuse:\n  uuid: zzzzz-gj3su-367z32aux8dg2s1\n  user: fuse\n  api_token: 4nagbkv8eap0uok7pxm72nossq5asihls3yn5p4xmvqx5t5e7p\n  expires_at: 2038-01-01 00:00:00\n\ndispatch2:\n  uuid: zzzzz-gj3su-jrriu629zljsnuf\n  user: system_user\n  api_token: pbe3v4v5oag83tjwxjh0a551j44xdu8t7ol5ljw3ixsq8oh50q\n  expires_at: 2038-01-01 00:00:00\n\nrunning_container_auth:\n  uuid: zzzzz-gj3su-077z32aux8dg2s2\n  user: active\n  api_token: it2gl94mgu3rbn5s2d06vzh73ns1y6cthct0tvg82qdlsxvbwk\n  expires_at: 2038-01-01 00:00:00\n\nrunning_container_with_logs_auth:\n  uuid: zzzzz-gj3su-n4xycwjpvvi776n\n  user: active\n  api_token: mkpdp5jbytt471lw9so1by2t5ylciojdur845rfn4dtm0etl33\n  expires_at: 2038-01-01 00:00:00\n\nrunning_to_be_deleted_container_auth:\n  uuid: zzzzz-gj3su-ty6lvu9d7u7c2sq\n  user: active\n  api_token: ge1pez7dkk7nqntwcsj922g2b7a2t27xz6nsx39r15kbcqmp55\n  expires_at: 2038-01-01 00:00:00\n\npermission_perftest:\n  uuid: zzzzz-gj3su-077z32anoj93boo\n  user: permission_perftest\n  api_token: 3kg6k6lzmp9kjabonentustoecn5bahbt2fod9zru30k1jqdmi\n  expires_at: 2038-01-01 00:00:00\n\nfoo_collection_sharing_token:\n  uuid: zzzzz-gj3su-gf02tdm4g1z3e3u\n  user: active\n  api_token: iknqgmunrhgsyfok8uzjlwun9iscwm3xacmzmg65fa1j1lpdss\n  expires_at: 2038-01-01 00:00:00\n  scopes:\n  - GET /arvados/v1/collections/zzzzz-4zz18-znfnqtbbv4spc3w\n  - GET /arvados/v1/collections/zzzzz-4zz18-znfnqtbbv4spc3w/\n  - GET /arvados/v1/keep_services/accessible\n\ncontainer_runtime_token:\n  uuid: zzzzz-gj3su-2nj68s291f50gd9\n  user: container_runtime_token_user\n  api_token: 2d19ue6ofx26o3mm7fs9u6t7hov9um0v92dzwk1o2xed3abprw\n  expires_at: 2038-01-01 00:00:00\n\ncrt_user:\n  uuid: zzzzz-gj3su-3r47qqy5ja5d54v\n  user: container_runtime_token_user\n  api_token: 13z1tz9deoryml3twep0vsahi4862097pe5lsmesugnkgpgpwk\n  expires_at: 2038-01-01 00:00:00\n\nruntime_token_limited_scope:\n  uuid: zzzzz-gj3su-2fljvypjrr4yr9m\n  user: container_runtime_token_user\n  api_token: 1fwc3be1m13qkypix2gd01i4bq5ju483zjfc0cf4babjseirbm\n  expires_at: 2038-01-01 00:00:00\n  scopes: [\"GET /\"]\n"
  },
  {
    "path": "services/api/test/fixtures/authorized_keys.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nactive:\n  uuid: zzzzz-fngyi-12nc9ov4osp8nae\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  authorized_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  key_type: SSH\n  name: active\n  public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCo+8pc/xNohU3Mo2pAieLohLJcWy9OmNOnsEWlegYYoeynkczimicKRmB2iP50v2oKrtshIXwigfU26b0rGEJayFvsA7FCstz5G/tJy3YJGnQUDmrQBuB8SsQDL/O0Nnh8B8XmKSlxuv3FxLyPhUmcxxjIUIEMWVMlIKAfzmySsPby/QREJffUkFPa+luNkOVd5cyvwd6dnl0SLbrqZgcF3fbkOLDVgv3oceIYLjcy/SjqGR4wtGWHFFuna0M2/5YEvWpxD/HNO3WkFEdlAUEEWpvd/u3bmHq2p7ADbaX9ZaNDb8YbjFIOUxaJh+Vf0V6nDhEnUPylzM07F3fnvXQM53Xu5oYA6cp0Com61MBaXUDwM/w6PS2RtF8CG3ICMs5AsIy+Cnsuowj3fRlK29dgZ7K2pYRV2SlQj4vxjwpUcQCL/TFv31VnCMFKQBqmqh8iwZV3U6LLc3cwL9COXnIPF4lXjODL3geWsBNXo3hfoj6qD+2/+9/zOZUtGbQXlBmNC/wG/cK1A1L4S9docZT4QAiaSCdwcLB68hIvQMEOpffoeQhNZj0SddLLdEyjJY6rfWjbmnV68TzXoDz26hoPtagD+wvHOxz3D8BQ9RIqfNI1jNlwVkoKNVfszIPmESwJCu99+6TnyJl4923MTEXNOrJ7LgVUemWchOlkTDINuw== active-user@arvados.local\n\nadmin:\n  uuid: zzzzz-fngyi-g290j3i3u701duh\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  authorized_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  key_type: SSH\n  name: admin\n  public_key: ssh-dss AAAAB3NzaC1kc3MAAACBAKy1IDMGwa7/Yjas77vLSShBE3SzpPXqXu6nRMC9zdIoMdctjhfP+GOOyQQP12rMs16NYmfdOxX+sa2t9syI/8NhDxTmNbHVw2jHimC6SL02v8WHDIw2vaBCVN+CHdeYbZsBB/8/M+2PO3uUWbr0TjoXcxrKYScS/aTTjSAWRg4ZAAAAFQDR/xAdrewj1ORNIQs+kWWdjmiO0wAAAIBC+G92r2ZeGaHLCMI0foKnfuQzg9fKp5krEvE6tvRNju7iOqtB9xe1qsAqr6GPZQjfSrNPac6T1pxMoh+an4PfNs5xgBIpvy93oqALd4maQt6483vsIyVCw6nQD7s/8IpIHpwxFEFs5/5moYxzY64eY0ldSXJwvPsrBTruhuUdugAAAIBut96rWQYTnYUdngyUK9EoJzgKn3l7gg0IQoFC4hS96D8vUm0wIdSEQHt01pSc0KR1Nnb4JrnNz/qCH45wOy5oB9msQ/2Pq2brTDZJcIPcN1LbMCps9PetUruz1OjK1NzDuLmvsrP3GBLxJrtmrCoKHLzPZ6QSefW0OymFgaDFGg==\n\nspectator:\n  uuid: zzzzz-fngyi-3uze1ipbnz2c2c2\n  owner_uuid: zzzzz-tpzed-l1s2piq4t4mps8r\n  modified_by_user_uuid: zzzzz-tpzed-l1s2piq4t4mps8r\n  authorized_user_uuid: zzzzz-tpzed-l1s2piq4t4mps8r\n  key_type: SSH\n  name: spectator\n  public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDJK4hxmgXzg1gty+91JfkpgikAZxTvFTQoaFUJYTHIygz2V3FgU64NkK3yfwh+bhs7n8YIMftuCHfojKEJTtedbiv/mYpItetzdOwYONCGSEk1VnfipGhnFvL7FZDESTxLN9KNve3ZmZh8HvO6s8fdlTlqTTNKpsdwLiQn2s3W1TWvru/NP504MD5qPeZ4+8jZEh/uiuRaeXqPDAlE9QGPV4FRAA1xo0dBZIrRMwQC8kOttq/i2pLgHq1xW9p4J23oV68O/kkeBb7VwrX3Av/M61kvRsP8tA5gqh+HMKVO2qTP4yG6eGkAobIokQAcyZetPQIDmfVeoB0NzwPfAy4r\n\nproject_viewer:\n  uuid: zzzzz-fngyi-5d3av1396niwcej\n  owner_uuid: zzzzz-tpzed-projectviewer1a\n  modified_by_user_uuid: zzzzz-tpzed-projectviewer1a\n  authorized_user_uuid: zzzzz-tpzed-projectviewer1a\n  key_type: SSH\n  name: project_viewer\n  public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPkOJMQzT9n6RousrLMU7c/KFKTI7I5JifDIEtGJJ1MMZW0GVoxtXALU90HcaRjEOwGPvQPxj7IDYqXs2N9uvm8SUWJMiz6c8NIjhGTkUoOnTFl4E9YTvkkKNs0P+3eT1Y+6zfTcFJHKP3AR4kZX+oiPHowRpCIlnLjXCFxX+E+YI554A7bS4yfOZO9lf6vtiT9I+6EqxC8a0hzZauPC1ZC3d/AFgBnrXJ2fBlAEySznru39quHN1u3v4qHTyaO2pDbG6vdI6O3JDCXCJKRv/B2FLuLTlzB0YesM1FiE6w8QgPxqb42B+uWTZb969UZliH8Pzw/mscOLAjmARDC02z\n"
  },
  {
    "path": "services/api/test/fixtures/collections.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nuser_agreement:\n  uuid: zzzzz-4zz18-t68oksiu9m80s4y\n  current_version_uuid: zzzzz-4zz18-t68oksiu9m80s4y\n  portable_data_hash: b519d9cb706a29fc7ea24dbea2f05851+93\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2013-12-26T19:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2013-12-26T19:22:54Z\n  updated_at: 2013-12-26T19:22:54Z\n  manifest_text: \". 6a4ff0499484c6c79c95cd8c566bd25f+249025 0:249025:GNU_General_Public_License,_version_3.pdf\\n\"\n  name: user_agreement\n\ncollection_owned_by_active:\n  uuid: zzzzz-4zz18-bv31uwvy3neko21\n  current_version_uuid: zzzzz-4zz18-bv31uwvy3neko21\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T18:22:54Z\n  updated_at: 2014-02-03T18:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: owned_by_active\n  version: 2\n\ncollection_owned_by_active_with_file_stats:\n  uuid: zzzzz-4zz18-fjeod4od92kfj5f\n  current_version_uuid: zzzzz-4zz18-fjeod4od92kfj5f\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  file_count: 1\n  file_size_total: 3\n  name: owned_by_active_with_file_stats\n  version: 1\n\ncollection_owned_by_active_past_version_1:\n  uuid: zzzzz-4zz18-znfnqtbbv4spast\n  current_version_uuid: zzzzz-4zz18-bv31uwvy3neko21\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T18:22:54Z\n  updated_at: 2014-02-03T18:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: owned_by_active_version_1\n  version: 1\n\nfoo_file:\n  uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  current_version_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2015-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-03T17:22:54Z\n  updated_at: 2015-02-03T17:22:54Z\n  manifest_text: \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\"\n  name: foo_file\n\nbar_file:\n  uuid: zzzzz-4zz18-ehbhgtheo8909or\n  current_version_uuid: zzzzz-4zz18-ehbhgtheo8909or\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2015-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-03T17:22:54Z\n  updated_at: 2015-02-03T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: bar_file\n\nbaz_file:\n  uuid: zzzzz-4zz18-y9vne9npefyxh8g\n  current_version_uuid: zzzzz-4zz18-y9vne9npefyxh8g\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  name: baz_file\n\nw_a_z_file:\n  uuid: zzzzz-4zz18-25k12570yk134b3\n  current_version_uuid: zzzzz-4zz18-25k12570yk134b3\n  portable_data_hash: 44a8da9ec82098323895cd14e178386f+56\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-09T10:53:38Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-09T10:55:38Z\n  updated_at: 2015-02-09T10:55:38Z\n  manifest_text: \". 4c6c2c0ac8aa0696edd7316a3be5ca3c+5 0:5:w\\\\040\\\\141\\\\040z\\n\"\n  name: \"\\\"w a z\\\" file\"\n  version: 2\n\nw_a_z_file_version_1:\n  uuid: zzzzz-4zz18-25k12570yk1ver1\n  current_version_uuid: zzzzz-4zz18-25k12570yk134b3\n  portable_data_hash: ba4ba4c7b99a58806b1ed70ea1263afe+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-09T10:53:38Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-09T10:55:38Z\n  updated_at: 2015-02-09T10:55:38Z\n  manifest_text: \". 4d20280d5e516a0109768d49ab0f3318+3 0:3:waz\\n\"\n  name: \"waz file\"\n  version: 1\n\nmultilevel_collection_1:\n  uuid: zzzzz-4zz18-pyw8yp9g3pr7irn\n  current_version_uuid: zzzzz-4zz18-pyw8yp9g3pr7irn\n  portable_data_hash: f9ddda46bb293b6847da984e3aa735db+290\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\\n./dir1 d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\\n./dir1/subdir d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\\n./dir2 d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2 0:0:file3\\n\"\n  name: multilevel_collection_1\n\nmultilevel_collection_2:\n  uuid: zzzzz-4zz18-45xf9hw1sxkhl6q\n  current_version_uuid: zzzzz-4zz18-45xf9hw1sxkhl6q\n  # All of this collection's files are deep in subdirectories.\n  portable_data_hash: 8591cc5caeca80fc62fd529ba1d63bf3+118\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \"./dir1/sub1 d41d8cd98f00b204e9800998ecf8427e+0 0:0:a 0:0:b\\n./dir2/sub2 d41d8cd98f00b204e9800998ecf8427e+0 0:0:c 0:0:d\\n\"\n  name: multilevel_collection_2\n\ndocker_image:\n  uuid: zzzzz-4zz18-1v45jub259sjjgb\n  current_version_uuid: zzzzz-4zz18-1v45jub259sjjgb\n  # This Collection has links with Docker image metadata.\n  portable_data_hash: fa3c1a9cb6783f85f2ecda037e07b8c3+167\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-06-11T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-06-11T17:22:54Z\n  updated_at: 2014-06-11T17:22:54Z\n  manifest_text: \". d21353cfe035e3e384563ee55eadbb2f+67108864 5c77a43e329b9838cbec18ff42790e57+55605760 0:122714624:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\\n\"\n  name: docker_image\n\n# tagged docker image with sha256:{hash}.tar filename\ndocker_image_1_12:\n  uuid: zzzzz-4zz18-1g4g0vhpjn9wq7i\n  current_version_uuid: zzzzz-4zz18-1g4g0vhpjn9wq7i\n  portable_data_hash: d740a57097711e08eb9b2a93518f20ab+174\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2016-10-19 08:50:45.653552268 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2016-10-19 08:50:45.652930000 Z\n  updated_at: 2016-10-19 08:50:45.652930000 Z\n  manifest_text: \". d21353cfe035e3e384563ee55eadbb2f+67108864 5c77a43e329b9838cbec18ff42790e57+55605760 0:122714624:sha256:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\\n\"\n  name: docker_image_1_12\n\nunlinked_docker_image:\n  uuid: zzzzz-4zz18-d0d8z5wofvfgwad\n  current_version_uuid: zzzzz-4zz18-d0d8z5wofvfgwad\n  # This Collection contains a file that looks like a Docker image,\n  # but has no Docker metadata links pointing to it.\n  portable_data_hash: 9ae44d5792468c58bcf85ce7353c7027+124\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-06-11T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-06-11T17:22:54Z\n  updated_at: 2014-06-11T17:22:54Z\n  manifest_text: \". fca529cfe035e3e384563ee55eadbb2f+67108863 0:67108863:bcd02158b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\\n\"\n  name: unlinked_docker_image\n\nempty:\n  uuid: zzzzz-4zz18-gs9ooj1h9sd5mde\n  current_version_uuid: zzzzz-4zz18-gs9ooj1h9sd5mde\n  portable_data_hash: d41d8cd98f00b204e9800998ecf8427e+0\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-06-11T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-06-11T17:22:54Z\n  updated_at: 2014-06-11T17:22:54Z\n  manifest_text: \"\"\n  name: \"empty collection for python test\"\n\nfoo_collection_in_aproject:\n  uuid: zzzzz-4zz18-fy296fx3hot09f7\n  current_version_uuid: zzzzz-4zz18-fy296fx3hot09f7\n  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  manifest_text: \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\"\n  name: \"zzzzz-4zz18-fy296fx3hot09f7 added sometime\"\n\nfuse_filters_test_foo:\n  uuid: zzzzz-4zz18-4e2kjqv891jl3p3\n  current_version_uuid: zzzzz-4zz18-4e2kjqv891jl3p3\n  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  owner_uuid: zzzzz-tpzed-fusefiltertest1\n  created_at: 2024-02-09T12:01:00Z\n  modified_at: 2024-02-09T12:01:01Z\n  updated_at: 2024-02-09T12:01:01Z\n  manifest_text: \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\"\n  name: foo\n  properties:\n    MainFile: foo\n\nfuse_filters_test_bar:\n  uuid: zzzzz-4zz18-qpxqtq2wbjnu630\n  current_version_uuid: zzzzz-4zz18-qpxqtq2wbjnu630\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  owner_uuid: zzzzz-tpzed-fusefiltertest1\n  created_at: 2024-02-09T12:02:00Z\n  modified_at: 2024-02-09T12:02:01Z\n  updated_at: 2024-02-09T12:02:01Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: bar\n  properties:\n    MainFile: bar\n\nfuse_filters_test_baz:\n  uuid: zzzzz-4zz18-ls97ezovrkkpfxz\n  current_version_uuid: zzzzz-4zz18-ls97ezovrkkpfxz\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  owner_uuid: zzzzz-tpzed-fusefiltertest1\n  created_at: 2024-02-09T12:03:00Z\n  modified_at: 2024-02-09T12:03:01Z\n  updated_at: 2024-02-09T12:03:01Z\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  name: baz\n  properties:\n    MainFile: baz\n\nuser_agreement_in_anonymously_accessible_project:\n  uuid: zzzzz-4zz18-uukreo9rbgwsujr\n  current_version_uuid: zzzzz-4zz18-uukreo9rbgwsujr\n  portable_data_hash: b519d9cb706a29fc7ea24dbea2f05851+93\n  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0\n  created_at: 2014-06-13 20:42:26 -0800\n  modified_at: 2014-06-13 20:42:26 -0800\n  updated_at: 2014-06-13 20:42:26 -0800\n  manifest_text: \". 6a4ff0499484c6c79c95cd8c566bd25f+249025 0:249025:GNU_General_Public_License,_version_3.pdf\\n\"\n  name: GNU General Public License, version 3\n\npublic_text_file:\n  uuid: zzzzz-4zz18-4en62shvi99lxd4\n  current_version_uuid: zzzzz-4zz18-4en62shvi99lxd4\n  portable_data_hash: 55713e6a34081eb03609e7ad5fcad129+62\n  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0\n  created_at: 2015-02-12 16:58:03 -0500\n  modified_at: 2015-02-12 16:58:03 -0500\n  updated_at: 2015-02-12 16:58:03 -0500\n  manifest_text: \". f0ef7081e1539ac00ef5b761b4fb01b3+12 0:12:Hello\\\\040world.txt\\n\"\n  name: Hello world\n\nbaz_collection_name_in_asubproject:\n  uuid: zzzzz-4zz18-lsitwcf548ui4oe\n  current_version_uuid: zzzzz-4zz18-lsitwcf548ui4oe\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  name: \"zzzzz-4zz18-lsitwcf548ui4oe added sometime\"\n\nempty_collection_name_in_active_user_home_project:\n  uuid: zzzzz-4zz18-5qa38qghh1j3nvv\n  current_version_uuid: zzzzz-4zz18-5qa38qghh1j3nvv\n  portable_data_hash: d41d8cd98f00b204e9800998ecf8427e+0\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-08-06 22:11:51.242392533 Z\n  modified_at: 2014-08-06 22:11:51.242150425 Z\n  manifest_text: \"\"\n  name: Empty collection\n\nbaz_file_in_asubproject:\n  uuid: zzzzz-4zz18-0mri2x4u7ftngez\n  current_version_uuid: zzzzz-4zz18-0mri2x4u7ftngez\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  name: baz_file\n\ncollection_to_move_around_in_aproject:\n  uuid: zzzzz-4zz18-0mri2x4u7ft1234\n  current_version_uuid: zzzzz-4zz18-0mri2x4u7ft1234\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  name: collection_to_move_around\n\n# Note: collections(:expired_collection) fixture finder won't work\n# because it is not in default scope\nexpired_collection:\n  uuid: zzzzz-4zz18-mto52zx1s7sn3ih\n  current_version_uuid: zzzzz-4zz18-mto52zx1s7sn3ih\n  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  is_trashed: true\n  trash_at: 2001-01-01T00:00:00Z\n  delete_at: 2038-01-01T00:00:00Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\\n\"\n  name: expired_collection\n  version: 2\n\nexpired_collection_past_version:\n  uuid: zzzzz-4zz18-mto52zx1s7oldie\n  current_version_uuid: zzzzz-4zz18-mto52zx1s7sn3ih\n  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:12:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:17:54Z\n  updated_at: 2014-02-03T17:17:54Z\n  is_trashed: true\n  trash_at: 2001-01-01T00:00:00Z\n  delete_at: 2038-01-01T00:00:00Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\\n\"\n  name: expired_collection original\n  version: 1\n\ntrashed_on_next_sweep:\n  uuid: zzzzz-4zz18-4guozfh77ewd2f0\n  current_version_uuid: zzzzz-4zz18-4guozfh77ewd2f0\n  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2016-12-07T22:01:00.123456Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2016-12-27T22:01:30.123456Z\n  updated_at: 2016-12-27T22:01:30.123456Z\n  is_trashed: false\n  trash_at: 2016-12-07T22:01:30.123456Z\n  delete_at: 2112-01-01T00:00:00Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\\n\"\n  name: trashed_on_next_sweep\n\n# Note: collections(:deleted_on_next_sweep) fixture finder won't work\n# because it is not in default scope\ndeleted_on_next_sweep:\n  uuid: zzzzz-4zz18-3u1p5umicfpqszp\n  current_version_uuid: zzzzz-4zz18-3u1p5umicfpqszp\n  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2016-12-07T22:01:00.234567Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2016-12-27T22:01:30.234567Z\n  updated_at: 2016-12-27T22:01:30.234567Z\n  is_trashed: true\n  trash_at: 2016-12-07T22:01:30.234567Z\n  delete_at: 2016-12-27T22:01:30.234567Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\\n\"\n  name: deleted_on_next_sweep\n\ncollection_expires_in_future:\n  uuid: zzzzz-4zz18-padkqo7yb8d9i3j\n  current_version_uuid: zzzzz-4zz18-padkqo7yb8d9i3j\n  portable_data_hash: 0b21a217243bfce5617fb9224b95bcb9+49\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  trash_at: 2038-01-01T00:00:00Z\n  delete_at: 2038-03-01T00:00:00Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:expired\\n\"\n  name: collection_expires_in_future\n\nunique_expired_collection:\n  uuid: zzzzz-4zz18-mto52zx1s7sn3jk\n  current_version_uuid: zzzzz-4zz18-mto52zx1s7sn3jk\n  portable_data_hash: 4ad199f90029935844dc3f098f4fca2a+49\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  is_trashed: true\n  trash_at: 2001-01-01T00:00:00Z\n  delete_at: 2038-01-01T00:00:00Z\n  manifest_text: \". 29d7797f1888013986899bc9083783fa+3 0:3:expired\\n\"\n  name: unique_expired_collection1\n\nunique_expired_collection2:\n  uuid: zzzzz-4zz18-mto52zx1s7sn3jr\n  current_version_uuid: zzzzz-4zz18-mto52zx1s7sn3jr\n  portable_data_hash: 64a2bed1ef0f40fe3a7d39bcf2584cb8+50\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  is_trashed: true\n  trash_at: 2001-01-01T00:00:00Z\n  delete_at: 2038-01-01T00:00:00Z\n  manifest_text: \". 29d7797f1888013986899bc9083783fa+3 0:3:expired2\\n\"\n  name: unique_expired_collection2\n\ncollection_in_home_project_with_same_name_as_in_aproject:\n  uuid: zzzzz-4zz18-12342x4u7ftabcd\n  current_version_uuid: zzzzz-4zz18-12342x4u7ftabcd\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  name: collection_with_same_name_in_aproject_and_home_project\n\ncollection_in_aproject_with_same_name_as_in_home_project:\n  uuid: zzzzz-4zz18-56782x4u7ftefgh\n  current_version_uuid: zzzzz-4zz18-56782x4u7ftefgh\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  name: collection_with_same_name_in_aproject_and_home_project\n\ncollection_owned_by_foo:\n  uuid: zzzzz-4zz18-50surkhkbhsp31b\n  current_version_uuid: zzzzz-4zz18-50surkhkbhsp31b\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  owner_uuid: zzzzz-tpzed-81hsbo6mk8nl05c\n  created_at: 2014-02-03T17:22:54Z\n  modified_at: 2014-02-03T17:22:54Z\n  name: collection_owned_by_foo\n\ncollection_to_remove_from_subproject:\n  # The Workbench tests remove this from subproject.\n  uuid: zzzzz-4zz18-subprojgonecoll\n  current_version_uuid: zzzzz-4zz18-subprojgonecoll\n  portable_data_hash: 2386ca6e3fffd4be5e197a72c6c80fb2+51\n  manifest_text: \". 8258b505536a9ab47baa2f4281cb932a+9 0:9:missingno\\n\"\n  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x\n  created_at: 2014-10-15T10:45:00\n  modified_at: 2014-10-15T10:45:00\n  name: Collection to remove from subproject\n\ncollection_with_files_in_subdir:\n  uuid: zzzzz-4zz18-filesinsubdir00\n  current_version_uuid: zzzzz-4zz18-filesinsubdir00\n  name: collection_files_in_subdir\n  portable_data_hash: 7eb64275355980ebc93411b44050c137+281\n  owner_uuid: zzzzz-tpzed-user1withloadab\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-user1withloadab\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 85877ca2d7e05498dd3d109baf2df106+95 0:95:file_in_subdir1\\n./subdir2/subdir3 2bbc341c702df4d8f42ec31f16c10120+64 0:32:file1_in_subdir3.txt 32:32:file2_in_subdir3.txt\\n./subdir2/subdir3/subdir4 2bbc341c702df4d8f42ec31f16c10120+64 0:32:file1_in_subdir4.txt 32:32:file2_in_subdir4.txt\\n\"\n\ngraph_test_collection1:\n  uuid: zzzzz-4zz18-bv31uwvy3neko22\n  current_version_uuid: zzzzz-4zz18-bv31uwvy3neko22\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: bar_file\n  created_at: 2014-02-03T17:22:54Z\n  modified_at: 2014-02-03T17:22:54Z\n\ngraph_test_collection2:\n  uuid: zzzzz-4zz18-uukreo9rbgwsujx\n  current_version_uuid: zzzzz-4zz18-uukreo9rbgwsujx\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  portable_data_hash: 65b17c95fdbc9800fc48acda4e9dcd0b+93\n  manifest_text: \". 6a4ff0499484c6c79c95cd8c566bd25f+249025 0:249025:FOO_General_Public_License,_version_3.pdf\\n\"\n  name: \"FOO General Public License, version 3\"\n  created_at: 2014-02-03T17:22:54Z\n  modified_at: 2014-02-03T17:22:54Z\n\ngraph_test_collection3:\n  uuid: zzzzz-4zz18-uukreo9rbgwsujj\n  current_version_uuid: zzzzz-4zz18-uukreo9rbgwsujj\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  name: \"baz file\"\n  created_at: 2014-02-03T17:22:54Z\n  modified_at: 2014-02-03T17:22:54Z\n\ncollection_1_owned_by_fuse:\n  uuid: zzzzz-4zz18-ovx05bfzormx3bg\n  current_version_uuid: zzzzz-4zz18-ovx05bfzormx3bg\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-0fusedrivertest\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: \"collection #1 owned by FUSE\"\n\ncollection_2_owned_by_fuse:\n  uuid: zzzzz-4zz18-8ubpy4w74twtwzr\n  current_version_uuid: zzzzz-4zz18-8ubpy4w74twtwzr\n  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  owner_uuid: zzzzz-tpzed-0fusedrivertest\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\"\n  name: \"collection #2 owned by FUSE\"\n\ncollection_in_fuse_project:\n  uuid: zzzzz-4zz18-vx4mtkjqfrb534f\n  current_version_uuid: zzzzz-4zz18-vx4mtkjqfrb534f\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  owner_uuid: zzzzz-j7d0g-0000ownedbyfuse\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  name: \"collection in FUSE project\"\n\ncollection_with_no_name_in_aproject:\n  uuid: zzzzz-4zz18-00000nonamecoll\n  current_version_uuid: zzzzz-4zz18-00000nonamecoll\n  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  manifest_text: \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\"\n\ncollection_to_search_for_in_aproject:\n  uuid: zzzzz-4zz18-abcd6fx123409f7\n  current_version_uuid: zzzzz-4zz18-abcd6fx123409f7\n  portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  manifest_text: \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\"\n  name: \"zzzzz-4zz18-abcd6fx123409f7 used to search with any\"\n\nupload_sandbox:\n  uuid: zzzzz-4zz18-js48y3ykkfdfjd3\n  current_version_uuid: zzzzz-4zz18-js48y3ykkfdfjd3\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-12-09 15:03:16\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-12-09 15:03:16\n  portable_data_hash: d41d8cd98f00b204e9800998ecf8427e+0\n  updated_at: 2014-12-09 15:03:16\n  manifest_text: ''\n  name: upload sandbox\n\ncollection_with_unique_words_to_test_full_text_search:\n  uuid: zzzzz-4zz18-mnt690klmb51aud\n  current_version_uuid: zzzzz-4zz18-mnt690klmb51aud\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection_with_some_unique_words\n  description: The quick_brown_fox jumps over the lazy_dog\n\nreplication_undesired_unconfirmed:\n  uuid: zzzzz-4zz18-wjxq7uzx2m9jj4a\n  current_version_uuid: zzzzz-4zz18-wjxq7uzx2m9jj4a\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-07 00:19:28.596506247 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2015-02-07 00:19:28.596338465 Z\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  replication_desired: ~\n  replication_confirmed_at: ~\n  replication_confirmed: ~\n  updated_at: 2015-02-07 00:19:28.596236608 Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: replication want=null have=null\n\nreplication_desired_2_unconfirmed:\n  uuid: zzzzz-4zz18-3t236wrz4769h7x\n  current_version_uuid: zzzzz-4zz18-3t236wrz4769h7x\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-07 00:21:35.050333515 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2015-02-07 00:21:35.050189104 Z\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  replication_desired: 2\n  replication_confirmed_at: ~\n  replication_confirmed: ~\n  updated_at: 2015-02-07 00:21:35.050126576 Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: replication want=2 have=null\n\nreplication_desired_2_confirmed_2:\n  uuid: zzzzz-4zz18-434zv1tnnf2rygp\n  current_version_uuid: zzzzz-4zz18-434zv1tnnf2rygp\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-07 00:19:28.596506247 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2015-02-07 00:19:28.596338465 Z\n  portable_data_hash: ec53808e4c23e6aeebea24d998ae5346+88\n  replication_desired: 2\n  replication_confirmed_at: 2015-02-07 00:24:52.983381227 Z\n  replication_confirmed: 2\n  updated_at: 2015-02-07 00:24:52.983381227 Z\n  manifest_text: \". acbd18db4cc2f85cedef654fccc4a4d8+3 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 3:3:bar\\n\"\n  name: replication want=2 have=2\n\nstorage_classes_desired_default_unconfirmed:\n  uuid: zzzzz-4zz18-3t236wrz4769tga\n  current_version_uuid: zzzzz-4zz18-3t236wrz4769tga\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-07 00:21:35.050333515 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2015-02-07 00:21:35.050189104 Z\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  storage_classes_desired: [\"default\"]\n  storage_classes_confirmed_at: ~\n  storage_classes_confirmed: ~\n  updated_at: 2015-02-07 00:21:35.050126576 Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: storage classes want=[default] have=[]\n\nstorage_classes_desired_default_confirmed_default:\n  uuid: zzzzz-4zz18-3t236wr12769tga\n  current_version_uuid: zzzzz-4zz18-3t236wr12769tga\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-07 00:21:35.050333515 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2015-02-07 00:21:35.050189104 Z\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  storage_classes_desired: [\"default\"]\n  storage_classes_confirmed_at: 2015-02-07 00:21:35.050126576 Z\n  storage_classes_confirmed: [\"default\"]\n  updated_at: 2015-02-07 00:21:35.050126576 Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: storage classes want=[default] have=[default]\n\nstorage_classes_desired_archive_confirmed_default:\n  uuid: zzzzz-4zz18-3t236wr12769qqa\n  current_version_uuid: zzzzz-4zz18-3t236wr12769qqa\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-07 00:21:35.050333515 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2015-02-07 00:21:35.050189104 Z\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  storage_classes_desired: [\"archive\"]\n  storage_classes_confirmed_at: ~\n  storage_classes_confirmed: [\"default\"]\n  updated_at: 2015-02-07 00:21:35.050126576 Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: storage classes want=[archive] have=[default]\n\ncollection_with_empty_properties:\n  uuid: zzzzz-4zz18-emptyproperties\n  current_version_uuid: zzzzz-4zz18-emptyproperties\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with empty properties\n  properties: {}\n\ncollection_with_one_property:\n  uuid: zzzzz-4zz18-withoneproperty\n  current_version_uuid: zzzzz-4zz18-withoneproperty\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with one property\n  properties:\n    property1: value1\n\n# The following four collections are used to test combining collections with repeated filenames\ncollection_with_repeated_filenames_and_contents_in_two_dirs_1:\n  uuid: zzzzz-4zz18-duplicatenames1\n  current_version_uuid: zzzzz-4zz18-duplicatenames1\n  portable_data_hash: ce437b12aa73ab34f7af5227f556c9e6+142\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  name: collection_with_repeated_filenames_and_contents_in_two_dirs_1\n  manifest_text: \"./dir1 92b53930db60fe94be2a73fc771ba921+34 0:12:alice 12:12:alice.txt 24:10:bob.txt\\n./dir2 56ac2557b1ded11ccab7293dc47d1e88+44 0:27:alice.txt\\n\"\n\ncollection_with_repeated_filenames_and_contents_in_two_dirs_2:\n  uuid: zzzzz-4zz18-duplicatenames2\n  current_version_uuid: zzzzz-4zz18-duplicatenames2\n  portable_data_hash: f3a67fad3a19c31c658982fb8158fa58+144\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  name: collection_with_repeated_filenames_and_contents_in_two_dirs_2\n  manifest_text: \"./dir1 92b53930db60fe94be2a73fc771ba921+34 0:12:alice 12:12:alice.txt 24:10:carol.txt\\n./dir2 56ac2557b1ded11ccab7293dc47d1e88+44 0:27:alice.txt\\n\"\n\nfoo_and_bar_files_in_dir:\n  uuid: zzzzz-4zz18-foonbarfilesdir\n  current_version_uuid: zzzzz-4zz18-foonbarfilesdir\n  portable_data_hash: 870369fc72738603c2fad16664e50e2d+58\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  name: foo_file_in_dir\n  manifest_text: \"./dir1 3858f62230ac3c915f300c664312c63f+6 3:3:bar 0:3:foo\\n\"\n\nmulti_level_to_combine:\n  uuid: zzzzz-4zz18-pyw8yp9g3ujh45f\n  current_version_uuid: zzzzz-4zz18-pyw8yp9g3ujh45f\n  portable_data_hash: 7a6ef4c162a5c6413070a8bd0bffc818+150\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 85877ca2d7e05498dd3d109baf2df106+95 0:0:file1 0:0:file2 0:0:file3\\n./dir1 85877ca2d7e05498dd3d109baf2df106+95 0:0:file1 0:0:file2 0:0:file3\\n./dir1/subdir 85877ca2d7e05498dd3d109baf2df106+95 0:0:file1 0:0:file2 0:0:file3\\n./dir2 85877ca2d7e05498dd3d109baf2df106+95 0:0:file1 0:0:file2 0:0:file3\\n\"\n  name: multi_level_to_combine\n\n# collection with several file types to test view icon enabled state in collection show page\ncollection_with_several_supported_file_types:\n  uuid: zzzzz-4zz18-supportedtypes1\n  current_version_uuid: zzzzz-4zz18-supportedtypes1\n  portable_data_hash: 020d82cf7dedb70fd2b7788b5d0634da+269\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file.csv 0:0:file.fa 0:0:file.fasta 0:0:file.gif 0:0:file.json 0:0:file.md 0:0:file.pdf 0:0:file.py 0:0:file.R 0:0:file.sam 0:0:file.sh 0:0:file.tiff 0:0:file.tsv 0:0:file.txt 0:0:file.vcf 0:0:file.xml 0:0:file.xsl 0:0:file.yml\\n\"\n  name: collection_with_several_supported_file_types\n\ncollection_with_several_unsupported_file_types:\n  uuid: zzzzz-4zz18-supportedtypes2\n  current_version_uuid: zzzzz-4zz18-supportedtypes2\n  portable_data_hash: 71ac42f87464ee5f9fd396d560d400c3+59\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file 0:0:file.bam\\n\"\n  name: collection_with_several_unsupported_file_types\n\ncollection_not_readable_by_active:\n  uuid: zzzzz-4zz18-cd42uwvy3neko21\n  current_version_uuid: zzzzz-4zz18-cd42uwvy3neko21\n  portable_data_hash: b9e51a238ce08a698e7d7f8f101aee18+55\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar 0:0:empty\\n\"\n  name: collection_not_readable_by_active\n\ncollection_to_remove_and_rename_files:\n  uuid: zzzzz-4zz18-a21ux3541sxa8sf\n  current_version_uuid: zzzzz-4zz18-a21ux3541sxa8sf\n  portable_data_hash: 21aed8fd508bd6263704b673455949ba+57\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\\n\"\n  name: collection to remove and rename files\n\ncollection_with_tags_owned_by_active:\n  uuid: zzzzz-4zz18-taggedcolletion\n  current_version_uuid: zzzzz-4zz18-taggedcolletion\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with tags\n  properties:\n    existing tag 1: value for existing tag 1\n    existing tag 2: value for existing tag 2\n\ntrashed_collection_to_test_name_conflict_on_untrash:\n  uuid: zzzzz-4zz18-trashedcolnamec\n  current_version_uuid: zzzzz-4zz18-trashedcolnamec\n  portable_data_hash: 21aed8fd508bd6263704b673455949ba+57\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\\n\"\n  name: same name for trashed and persisted collections\n  is_trashed: true\n  trash_at: 2001-01-01T00:00:00Z\n  delete_at: 2038-01-01T00:00:00Z\n\nsame_name_as_trashed_coll_to_test_name_conflict_on_untrash:\n  uuid: zzzzz-4zz18-namesameastrash\n  current_version_uuid: zzzzz-4zz18-namesameastrash\n  portable_data_hash: 21aed8fd508bd6263704b673455949ba+57\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\\n\"\n  name: same name for trashed and persisted collections\n\ncollection_in_trashed_subproject:\n  uuid: zzzzz-4zz18-trashedproj2col\n  current_version_uuid: zzzzz-4zz18-trashedproj2col\n  portable_data_hash: 21aed8fd508bd6263704b673455949ba+57\n  owner_uuid: zzzzz-j7d0g-trashedproject2\n  created_at: 2014-02-03T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-02-03T17:22:54Z\n  updated_at: 2014-02-03T17:22:54Z\n  manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:file1 0:0:file2\\n\"\n  name: collection in trashed subproject\n\ncollection_with_prop1_value1:\n  uuid: zzzzz-4zz18-withprop1value1\n  current_version_uuid: zzzzz-4zz18-withprop1value1\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with prop1 value1\n  properties:\n    prop1: value1\n\ncollection_with_prop1_value2:\n  uuid: zzzzz-4zz18-withprop1value2\n  current_version_uuid: zzzzz-4zz18-withprop1value2\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with prop1 value2\n  properties:\n    prop1: value2\n\ncollection_with_prop1_value3:\n  uuid: zzzzz-4zz18-withprop1value3\n  current_version_uuid: zzzzz-4zz18-withprop1value3\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with prop1 value3\n  properties:\n    prop1: value3\n\ncollection_with_prop1_other1:\n  uuid: zzzzz-4zz18-withprop1other1\n  current_version_uuid: zzzzz-4zz18-withprop1other1\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with prop1 other1\n  properties:\n    prop1: other1\n\ncollection_with_prop2_1:\n  uuid: zzzzz-4zz18-withprop2value1\n  current_version_uuid: zzzzz-4zz18-withprop2value1\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with prop2 1\n  properties:\n    prop2: 1\n\ncollection_with_prop2_5:\n  uuid: zzzzz-4zz18-withprop2value5\n  current_version_uuid: zzzzz-4zz18-withprop2value5\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with prop2 5\n  properties:\n    prop2: 5\n\ncollection_with_list_prop_odd:\n  uuid: zzzzz-4zz18-listpropertyodd\n  current_version_uuid: zzzzz-4zz18-listpropertyodd\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with list property with odd values\n  properties:\n    listprop: [elem1, elem3, 5]\n\ncollection_with_list_prop_even:\n  uuid: zzzzz-4zz18-listpropertyevn\n  current_version_uuid: zzzzz-4zz18-listpropertyevn\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with list property with even values\n  properties:\n    listprop: [elem2, 4, elem6, ELEM8]\n\ncollection_with_listprop_elem1:\n  uuid: zzzzz-4zz18-listpropelemen1\n  current_version_uuid: zzzzz-4zz18-listpropelemen1\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with list property with string value\n  properties:\n    listprop: elem1\n\ncollection_with_uri_prop:\n  uuid: zzzzz-4zz18-withuripropval1\n  current_version_uuid: zzzzz-4zz18-withuripropval1\n  portable_data_hash: fa7aeb5140e2848d39b416daeef4ffc5+45\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2015-02-13T17:22:54Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-02-13T17:22:54Z\n  updated_at: 2015-02-13T17:22:54Z\n  manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n  name: collection with RDF-style URI property key\n  properties:\n    \"http://schema.org/example\": \"value1\"\n\ncontainer_log_collection:\n  uuid: zzzzz-4zz18-logcollection00\n  current_version_uuid: zzzzz-4zz18-logcollection00\n  portable_data_hash: b1e66f713c04d28ddbaced89096f4838+210\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2020-10-29T00:51:44.075594000Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2020-10-29T00:51:44.072109000Z\n  manifest_text: \". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n\"\n  name: a real log collection for a completed container\n\nlog_collection:\n  uuid: zzzzz-4zz18-logcollection01\n  current_version_uuid: zzzzz-4zz18-logcollection01\n  portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2020-10-29T00:51:44.075594000Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2020-10-29T00:51:44.072109000Z\n  manifest_text: \". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n./log\\\\040for\\\\040container\\\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n\"\n  name: a real log collection for a completed container request\n\nlog_collection2:\n  uuid: zzzzz-4zz18-logcollection02\n  current_version_uuid: zzzzz-4zz18-logcollection02\n  portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2020-10-29T00:51:44.075594000Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2020-10-29T00:51:44.072109000Z\n  manifest_text: \". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n./log\\\\040for\\\\040container\\\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n\"\n  name: another real log collection for a completed container\n\ndiagnostics_request_container_log_collection:\n  uuid: zzzzz-4zz18-diagcompreqlog1\n  current_version_uuid: zzzzz-4zz18-diagcompreqlog1\n  portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2020-11-02T00:20:44.007557000Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2020-11-02T00:20:44.005381000Z\n  manifest_text: \". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n./log\\\\040for\\\\040container\\\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n\"\n  name: Container log for request zzzzz-xvhdp-diagnostics0001\n\nhasher1_log_collection:\n  uuid: zzzzz-4zz18-dlogcollhash001\n  current_version_uuid: zzzzz-4zz18-dlogcollhash001\n  portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2020-11-02T00:16:55.272606000Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2020-11-02T00:16:55.267006000Z\n  manifest_text: \". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n./log\\\\040for\\\\040container\\\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n\"\n  name: hasher1 log collection\n\nhasher2_log_collection:\n  uuid: zzzzz-4zz18-dlogcollhash002\n  current_version_uuid: zzzzz-4zz18-dlogcollhash002\n  portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2020-11-02T00:20:23.547251000Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2020-11-02T00:20:23.545275000Z\n  manifest_text: \". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n./log\\\\040for\\\\040container\\\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n\"\n  name: hasher2 log collection\n\nhasher3_log_collection:\n  uuid: zzzzz-4zz18-dlogcollhash003\n  current_version_uuid: zzzzz-4zz18-dlogcollhash003\n  portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2020-11-02T00:20:38.789204000Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2020-11-02T00:20:38.787329000Z\n  manifest_text: \". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n./log\\\\040for\\\\040container\\\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n\"\n  name: hasher3 log collection\n\ndiagnostics_request_container_log_collection2:\n  uuid: zzzzz-4zz18-diagcompreqlog2\n  current_version_uuid: zzzzz-4zz18-diagcompreqlog2\n  portable_data_hash: 680c855fd6cf2c78778b3728b268925a+475\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2020-11-03T16:17:53.351593000Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2020-11-03T16:17:53.346969000Z\n  manifest_text: \". 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n./log\\\\040for\\\\040container\\\\040ce8i5-dz642-h4kd64itncdcz8l 8c12f5f5297b7337598170c6f531fcee+7882 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\\n\"\n  name: Container log for request zzzzz-xvhdp-diagnostics0002\n\n# Test Helper trims the rest of the file\n\n# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper\n\n# collections in project_with_10_collections\n<% for i in 1..10 do %>\ncollection_<%=i%>_of_10:\n  name: Collection_<%= i %>\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  uuid: zzzzz-4zz18-10gneyn6brkx<%= i.to_s.rjust(3, '0') %>\n  current_version_uuid: zzzzz-4zz18-10gneyn6brkx<%= i.to_s.rjust(3, '0') %>\n  owner_uuid: zzzzz-j7d0g-0010collections\n  created_at: <%= i.minute.ago.to_fs(:db) %>\n  modified_at: <%= i.minute.ago.to_fs(:db) %>\n<% end %>\n\n# collections in project_with_201_collections\n<% for i in 1..201 do %>\ncollection_<%=i%>_of_201:\n  name: Collection_<%= i %>\n  portable_data_hash: ea10d51bcf88862dbcc36eb292017dfd+45\n  manifest_text: \". 73feffa4b7f6bb68e44cf984c85f6e88+3 0:3:baz\\n\"\n  uuid: zzzzz-4zz18-201gneyn6brd<%= i.to_s.rjust(3, '0') %>\n  current_version_uuid: zzzzz-4zz18-201gneyn6brd<%= i.to_s.rjust(3, '0') %>\n  owner_uuid: zzzzz-j7d0g-0201collections\n  created_at: <%= i.minute.ago.to_fs(:db) %>\n  modified_at: <%= i.minute.ago.to_fs(:db) %>\n<% end %>\n\n# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper\n"
  },
  {
    "path": "services/api/test/fixtures/container_requests.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nqueued:\n  uuid: zzzzz-xvhdp-cr4queuedcontnr\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: queued\n  state: Committed\n  priority: 1\n  created_at: <%= 2.minute.ago.to_fs(:db) %>\n  updated_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-queuedcontainer\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n    gpu:\n      device_count: 0\n      driver_version: \"\"\n      hardware_target: []\n      stack: \"\"\n      vram: 0\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 1000000\n\nrunning:\n  uuid: zzzzz-xvhdp-cr4runningcntnr\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: running\n  state: Committed\n  priority: 501\n  created_at: <%= 2.minute.ago.to_fs(:db) %>\n  updated_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-runningcontainr\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 24000000000\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n\nrequester_for_running:\n  uuid: zzzzz-xvhdp-req4runningcntr\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: requester_for_running_cr\n  state: Committed\n  priority: 1\n  created_at: <%= 2.minute.ago.to_fs(:db) %>\n  updated_at: <%= 2.minute.ago.to_fs(:db) %>\n  modified_at: <%= 2.minute.ago.to_fs(:db) %>\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-logscontainer03\n  requesting_container_uuid: zzzzz-dz642-runningcontainr\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 24000000000\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n\nrunning_older:\n  uuid: zzzzz-xvhdp-cr4runningcntn2\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: running\n  state: Committed\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-12 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-runningcontain2\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 24000000000\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n\ncompleted:\n  uuid: zzzzz-xvhdp-cr4completedctr\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: completed container request\n  state: Final\n  priority: 1\n  created_at: <%= 2.minute.ago.to_fs(:db) %>\n  updated_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-compltcontainer\n  log_uuid: zzzzz-4zz18-logcollection01\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 1000000\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n  mounts: {}\n\ncompleted-older:\n  uuid: zzzzz-xvhdp-cr4completedcr2\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: completed\n  state: Final\n  priority: 1\n  created_at: <%= 30.minute.ago.to_fs(:db) %>\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"arvados-cwl-runner\", \"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-compltcontainr2\n  log_uuid: zzzzz-4zz18-logcollection02\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 1000000\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n  mounts: {}\n\ncompleted_diagnostics:\n  name: CWL diagnostics hasher\n  uuid: zzzzz-xvhdp-diagnostics0001\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Final\n  priority: 1\n  created_at: 2020-11-02T00:03:50.229364000Z\n  modified_at: 2020-11-02T00:20:44.041122000Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  output_path: /var/spool/cwl\n  command: [\n             \"arvados-cwl-runner\",\n             \"--local\",\n             \"--api=containers\",\n             \"--no-log-timestamps\",\n             \"--disable-validate\",\n             \"--disable-color\",\n             \"--eval-timeout=20\",\n             \"--thread-count=1\",\n             \"--disable-reuse\",\n             \"--collection-cache-size=256\",\n             \"--on-error=continue\",\n             \"/var/lib/cwl/workflow.json#main\",\n             \"/var/lib/cwl/cwl.input.json\"\n           ]\n  container_uuid: zzzzz-dz642-diagcompreq0001\n  log_uuid: zzzzz-4zz18-diagcompreqlog1\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  mounts:\n    /var/spool/cwl:\n      kind: json\n      content: {}\n  runtime_constraints:\n    vcpus: 1\n    ram: 1342177280\n    API: true\n\ncompleted_diagnostics_hasher1:\n  name: hasher1\n  uuid: zzzzz-xvhdp-diag1hasher0001\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Final\n  priority: 500\n  created_at: 2020-11-02T00:03:50.229364000Z\n  modified_at: 2020-11-02T00:20:44.041122000Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  output_name: Output for step hasher1\n  output_path: /var/spool/cwl\n  command: [\n             \"md5sum\",\n             \"/keep/9f26a86b6030a69ad222cf67d71c9502+65/hasher-input-file.txt\"\n           ]\n  container_uuid: zzzzz-dz642-diagcomphasher1\n  requesting_container_uuid: zzzzz-dz642-diagcompreq0001\n  log_uuid: zzzzz-4zz18-dlogcollhash001\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  mounts:\n    /var/spool/cwl:\n      kind: json\n      content: {}\n  runtime_constraints:\n    vcpus: 1\n    ram: 2684354560\n    API: true\n\ncompleted_diagnostics_hasher2:\n  name: hasher2\n  uuid: zzzzz-xvhdp-diag1hasher0002\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Final\n  priority: 500\n  created_at: 2020-11-02T00:17:07.067464000Z\n  modified_at: 2020-11-02T00:20:23.557498000Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  output_name: Output for step hasher2\n  output_path: /var/spool/cwl\n  command: [\n             \"md5sum\",\n             \"/keep/d3a687732e84061f3bae15dc7e313483+62/hasher1.md5sum.txt\"\n           ]\n  container_uuid: zzzzz-dz642-diagcomphasher2\n  requesting_container_uuid: zzzzz-dz642-diagcompreq0001\n  log_uuid: zzzzz-4zz18-dlogcollhash002\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  mounts:\n    /var/spool/cwl:\n      kind: json\n      content: {}\n  runtime_constraints:\n    vcpus: 2\n    ram: 2684354560\n    API: true\n\ncompleted_diagnostics_hasher3:\n  name: hasher3\n  uuid: zzzzz-xvhdp-diag1hasher0003\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Final\n  priority: 500\n  created_at: 2020-11-02T00:20:30.960251000Z\n  modified_at: 2020-11-02T00:20:38.799377000Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  output_name: Output for step hasher3\n  output_path: /var/spool/cwl\n  command: [\n             \"md5sum\",\n             \"/keep/6bd770f6cf8f83e7647c602eecfaeeb8+62/hasher2.md5sum.txt\"\n           ]\n  container_uuid: zzzzz-dz642-diagcomphasher3\n  requesting_container_uuid: zzzzz-dz642-diagcompreq0001\n  log_uuid: zzzzz-4zz18-dlogcollhash003\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  mounts:\n    /var/spool/cwl:\n      kind: json\n      content: {}\n  runtime_constraints:\n    vcpus: 1\n    ram: 2684354560\n    API: true\n\ncompleted_diagnostics2:\n  name: Copy of CWL diagnostics hasher\n  uuid: zzzzz-xvhdp-diagnostics0002\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Final\n  priority: 1\n  created_at: 2020-11-03T15:54:30.098485000Z\n  modified_at: 2020-11-03T16:17:53.406809000Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  output_path: /var/spool/cwl\n  command: [\n             \"arvados-cwl-runner\",\n             \"--local\",\n             \"--api=containers\",\n             \"--no-log-timestamps\",\n             \"--disable-validate\",\n             \"--disable-color\",\n             \"--eval-timeout=20\",\n             \"--thread-count=1\",\n             \"--disable-reuse\",\n             \"--collection-cache-size=256\",\n             \"--on-error=continue\",\n             \"/var/lib/cwl/workflow.json#main\",\n             \"/var/lib/cwl/cwl.input.json\"\n           ]\n  container_uuid: zzzzz-dz642-diagcompreq0002\n  log_uuid: zzzzz-4zz18-diagcompreqlog2\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  mounts:\n    /var/spool/cwl:\n      kind: json\n      content: {}\n  runtime_constraints:\n    vcpus: 1\n    ram: 1342177280\n    API: true\n\ncompleted_diagnostics_hasher1_reuse:\n  name: hasher1\n  uuid: zzzzz-xvhdp-diag2hasher0001\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Final\n  priority: 500\n  created_at: 2020-11-02T00:03:50.229364000Z\n  modified_at: 2020-11-02T00:20:44.041122000Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  output_name: Output for step hasher1\n  output_path: /var/spool/cwl\n  command: [\n             \"md5sum\",\n             \"/keep/9f26a86b6030a69ad222cf67d71c9502+65/hasher-input-file.txt\"\n           ]\n  container_uuid: zzzzz-dz642-diagcomphasher1\n  requesting_container_uuid: zzzzz-dz642-diagcompreq0002\n  log_uuid: zzzzz-4zz18-dlogcollhash001\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  mounts:\n    /var/spool/cwl:\n      kind: json\n      content: {}\n  runtime_constraints:\n    vcpus: 1\n    ram: 2684354560\n    API: true\n\ncompleted_diagnostics_hasher2_reuse:\n  name: hasher2\n  uuid: zzzzz-xvhdp-diag2hasher0002\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Final\n  priority: 500\n  created_at: 2020-11-02T00:17:07.067464000Z\n  modified_at: 2020-11-02T00:20:23.557498000Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  output_name: Output for step hasher2\n  output_path: /var/spool/cwl\n  command: [\n             \"md5sum\",\n             \"/keep/d3a687732e84061f3bae15dc7e313483+62/hasher1.md5sum.txt\"\n           ]\n  container_uuid: zzzzz-dz642-diagcomphasher2\n  requesting_container_uuid: zzzzz-dz642-diagcompreq0002\n  log_uuid: zzzzz-4zz18-dlogcollhash002\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  mounts:\n    /var/spool/cwl:\n      kind: json\n      content: {}\n  runtime_constraints:\n    vcpus: 2\n    ram: 2684354560\n    API: true\n\ncompleted_diagnostics_hasher3_reuse:\n  name: hasher3\n  uuid: zzzzz-xvhdp-diag2hasher0003\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Final\n  priority: 500\n  created_at: 2020-11-02T00:20:30.960251000Z\n  modified_at: 2020-11-02T00:20:38.799377000Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  output_name: Output for step hasher3\n  output_path: /var/spool/cwl\n  command: [\n             \"md5sum\",\n             \"/keep/6bd770f6cf8f83e7647c602eecfaeeb8+62/hasher2.md5sum.txt\"\n           ]\n  container_uuid: zzzzz-dz642-diagcomphasher3\n  requesting_container_uuid: zzzzz-dz642-diagcompreq0002\n  log_uuid: zzzzz-4zz18-dlogcollhash003\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  mounts:\n    /var/spool/cwl:\n      kind: json\n      content: {}\n  runtime_constraints:\n    vcpus: 1\n    ram: 2684354560\n    API: true\n\nrequester:\n  uuid: zzzzz-xvhdp-9zacv3o1xw6sxz5\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: requester\n  state: Committed\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /\n  output_path: /output\n  command: [\"request-another-container\", \"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-requestingcntnr\n  mounts:\n    /output:\n      kind: tmp\n      capacity: 1000000\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n  mounts: {}\n\ncr_for_requester:\n  uuid: zzzzz-xvhdp-cr4requestercnt\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: requester_cr\n  state: Final\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-requestercntnr1\n  requesting_container_uuid: zzzzz-dz642-requestingcntnr\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 1000000\n\ncr_for_requester2:\n  uuid: zzzzz-xvhdp-cr4requestercn2\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: requester_cr2\n  state: Final\n  priority: 1\n  created_at: <%= 30.minute.ago.to_fs(:db) %>\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  requesting_container_uuid: zzzzz-dz642-requestercntnr1\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 1000000\n\nrunning_anonymous_accessible:\n  uuid: zzzzz-xvhdp-runninganonaccs\n  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0\n  name: running anonymously accessible cr\n  state: Committed\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-runningcontain2\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 24000000000\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n\ncr_for_failed:\n  uuid: zzzzz-xvhdp-cr4failedcontnr\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: cr for container exit code not 0\n  state: Committed\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-failedcontainr1\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 1000000\n\ncanceled_with_queued_container:\n  uuid: zzzzz-xvhdp-canceledqueuedc\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: canceled with queued container\n  state: Final\n  priority: 0\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-queuedcontainer\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 1000000\n\ncanceled_with_locked_container:\n  uuid: zzzzz-xvhdp-canceledlocekdc\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: canceled with locked container\n  state: Final\n  priority: 0\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-lockedcontainer\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 1000000\n\ncanceled_with_running_container:\n  uuid: zzzzz-xvhdp-canceledrunning\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: canceled with running container\n  state: Committed\n  priority: 0\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-runningcontainr\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 24000000000\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n\nrunning_to_be_deleted:\n  uuid: zzzzz-xvhdp-cr5runningcntnr\n  owner_uuid: zzzzz-j7d0g-rew6elm53kancon\n  name: running to be deleted\n  state: Committed\n  priority: 1\n  created_at: <%= 2.days.ago.to_fs(:db) %>\n  updated_at: <%= 1.days.ago.to_fs(:db) %>\n  modified_at: <%= 1.days.ago.to_fs(:db) %>\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 24000000000\n  container_uuid: zzzzz-dz642-runnincntrtodel\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n\ncompleted_with_input_mounts:\n  uuid: zzzzz-xvhdp-crwithinputmnts\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: completed container request\n  state: Final\n  priority: 1\n  created_at: <%= 24.hour.ago.to_fs(:db) %>\n  updated_at: <%= 24.hour.ago.to_fs(:db) %>\n  modified_at: <%= 24.hour.ago.to_fs(:db) %>\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\", \"/bin/sh\", \"-c\", \"'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'\"]\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n  container_uuid: zzzzz-dz642-compltcontainer\n  log_uuid: zzzzz-4zz18-logcollection01\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 1000000\n    /var/lib/cwl/cwl.input.json: {\n      \"kind\": \"json\",\n      \"content\": {\n        \"input1\": {\n          \"basename\": \"foo\",\n          \"class\": \"File\",\n          \"location\": \"keep:fa7aeb5140e2848d39b416daeef4ffc5+45/foo\",\n        },\n        \"input2\": {\n          \"basename\": \"bar\",\n          \"class\": \"File\",\n          \"location\": \"keep:fa7aeb5140e2848d39b416daeef4ffc5+45/bar\",\n        }\n      }\n    }\n\nuncommitted:\n  uuid: zzzzz-xvhdp-cr4uncommittedc\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: uncommitted\n  created_at: <%= 2.minute.ago.to_fs(:db) %>\n  updated_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  command: [\"arvados-cwl-runner\", \"--local\", \"--api=containers\",\n            \"/var/lib/cwl/workflow.json\", \"/var/lib/cwl/cwl.input.json\"]\n  output_path: \"/var/spool/cwl\"\n  cwd: \"/var/spool/cwl\"\n  priority: 1\n  state: \"Uncommitted\"\n  container_image: arvados/jobs\n  mounts: {\n        \"/var/lib/cwl/workflow.json\": {\n            \"kind\": \"json\",\n            \"content\": {\n                \"cwlVersion\": \"v1.0\",\n                \"$graph\": [{\n                \"id\": \"#main\",\n                \"class\": \"CommandLineTool\",\n                \"baseCommand\": [\"echo\"],\n                \"inputs\": [\n                    {\n                        \"doc\": \"a longer documentation string for this parameter (optional)\",\n                        \"type\": \"boolean\",\n                        \"id\": \"ex_boolean\",\n                        \"label\": \"a short label for this parameter (optional)\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": [\"null\", \"boolean\"],\n                        \"id\": \"ex_boolean_opt\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"doc\": \"directory selection should present the workbench collection picker\",\n                        \"type\": \"Directory\",\n                        \"id\": \"ex_dir\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": \"double\",\n                        \"id\": \"ex_double\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"doc\": \"file selection should present the workbench file picker\",\n                        \"type\": \"File\",\n                        \"id\": \"ex_file\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": \"float\",\n                        \"id\": \"ex_float\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": \"int\",\n                        \"id\": \"ex_int\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": [\"null\", \"int\"],\n                        \"id\": \"ex_int_opt\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": \"long\",\n                        \"id\": \"ex_long\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"id\": \"ex_string\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": [\"null\", \"string\"],\n                        \"id\": \"ex_string_opt\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": {\n                            \"type\": \"enum\",\n                            \"symbols\": [\"a\", \"b\", \"c\"]\n                        },\n                        \"id\": \"ex_enum\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": [\"null\", {\n                            \"type\": \"enum\",\n                            \"symbols\": [\"a\", \"b\", \"c\"]\n                        }],\n                        \"id\": \"ex_enum_opt\",\n                        \"inputBinding\": {\"position\": 1}\n                    }\n                ],\n                \"outputs\": []\n            }]\n          }\n        },\n        \"/var/lib/cwl/cwl.input.json\": {\n            \"kind\": \"json\",\n            \"content\": {}\n        },\n        \"stdout\": {\n            \"kind\": \"file\",\n            \"path\": \"/var/spool/cwl/cwl.output.json\"\n        },\n        \"/var/spool/cwl\": {\n            \"kind\": \"collection\",\n            \"writable\": true\n        }\n    }\n  runtime_constraints:\n    vcpus: 1\n    ram: 256000000\n    API: true\n\nuncommitted_ready_to_run:\n  uuid: zzzzz-xvhdp-cr4uncommittedd\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: uncommitted_ready_to_run\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  command: [\"arvados-cwl-runner\", \"--local\", \"--api=containers\",\n            \"/var/lib/cwl/workflow.json\", \"/var/lib/cwl/cwl.input.json\"]\n  output_path: \"/var/spool/cwl\"\n  cwd: \"/var/spool/cwl\"\n  priority: 1\n  state: \"Uncommitted\"\n  container_image: arvados/jobs\n  mounts: {\n        \"/var/lib/cwl/workflow.json\": {\n            \"kind\": \"json\",\n            \"content\": {\n                \"cwlVersion\": \"v1.0\",\n                \"class\": \"CommandLineTool\",\n                \"baseCommand\": [\"echo\"],\n                \"inputs\": [\n                    {\n                        \"doc\": \"a longer documentation string for this parameter (optional)\",\n                        \"type\": \"boolean\",\n                        \"id\": \"ex_boolean\",\n                        \"label\": \"a short label for this parameter (optional)\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": [\"null\", \"boolean\"],\n                        \"id\": \"ex_boolean_opt\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"doc\": \"directory selection should present the workbench collection picker\",\n                        \"type\": \"Directory\",\n                        \"id\": \"ex_dir\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": \"double\",\n                        \"id\": \"ex_double\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"doc\": \"file selection should present the workbench file picker\",\n                        \"type\": \"File\",\n                        \"id\": \"ex_file\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": \"float\",\n                        \"id\": \"ex_float\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": \"int\",\n                        \"id\": \"ex_int\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": [\"null\", \"int\"],\n                        \"id\": \"ex_int_opt\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": \"long\",\n                        \"id\": \"ex_long\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": \"string\",\n                        \"id\": \"ex_string\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": [\"null\", \"string\"],\n                        \"id\": \"ex_string_opt\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": {\n                            \"type\": \"enum\",\n                            \"symbols\": [\"a\", \"b\", \"c\"]\n                        },\n                        \"id\": \"ex_enum\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": [\"null\", {\n                            \"type\": \"enum\",\n                            \"symbols\": [\"a\", \"b\", \"c\"]\n                        }],\n                        \"id\": \"ex_enum_opt\",\n                        \"inputBinding\": {\"position\": 1}\n                    }\n                ],\n                \"outputs\": []\n            }\n        },\n        \"/var/lib/cwl/cwl.input.json\": {\n            \"kind\": \"json\",\n            \"content\": {\n              \"ex_string_opt\": null,\n              \"ex_int_opt\": null,\n              \"ex_boolean\": false,\n              \"ex_boolean_opt\": true,\n              \"ex_dir\": {\n                \"class\": \"Directory\",\n                \"location\": \"keep:1f4b0bc7583c2a7f9102c395f4ffc5e3+45\",\n                \"arv:collection\": \"zzzzz-4zz18-znfnqtbbv4spc3w\"\n              },\n              \"ex_double\": 66.0,\n              \"ex_file\": {\n                \"class\": \"File\",\n                \"location\": \"keep:1f4b0bc7583c2a7f9102c395f4ffc5e3+45/foo\",\n                \"arv:collection\": \"zzzzz-4zz18-znfnqtbbv4spc3w/foo\"\n              },\n              \"ex_float\": 55.0,\n              \"ex_int\": 55,\n              \"ex_long\": 22,\n              \"ex_string\": \"qq\",\n              \"ex_enum\": \"a\"\n            }\n        },\n        \"stdout\": {\n            \"kind\": \"file\",\n            \"path\": \"/var/spool/cwl/cwl.output.json\"\n        },\n        \"/var/spool/cwl\": {\n            \"kind\": \"collection\",\n            \"writable\": true\n        }\n    }\n  runtime_constraints:\n    vcpus: 1\n    ram: 256000000\n    API: true\n\nuncommitted-with-directory-input:\n  uuid: zzzzz-xvhdp-cr4uncommitted2\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: uncommitted with directory input\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  command: [\"arvados-cwl-runner\", \"--local\", \"--api=containers\",\n            \"/var/lib/cwl/workflow.json\", \"/var/lib/cwl/cwl.input.json\"]\n  output_path: \"/var/spool/cwl\"\n  cwd: \"/var/spool/cwl\"\n  priority: 1\n  state: Uncommitted\n  container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167\n  mounts: {\n        \"/var/lib/cwl/workflow.json\": {\n            \"kind\": \"json\",\n            \"content\": {\n                \"cwlVersion\": \"v1.0\",\n                \"class\": \"CommandLineTool\",\n                \"baseCommand\": [\"echo\"],\n                \"inputs\": [\n                    {\n                        \"type\": \"Directory\",\n                        \"id\": \"directory_type\",\n                        \"inputBinding\": {\"position\": 1}\n                    }\n                ],\n                \"outputs\": []\n            }\n        },\n        \"/var/lib/cwl/cwl.input.json\": {\n            \"kind\": \"json\",\n            \"content\": {}\n        },\n        \"stdout\": {\n            \"kind\": \"file\",\n            \"path\": \"/var/spool/cwl/cwl.output.json\"\n        },\n        \"/var/spool/cwl\": {\n            \"kind\": \"collection\",\n            \"writable\": true\n        }\n    }\n  runtime_constraints:\n    vcpus: 1\n    ram: 256000000\n    API: true\n\nuncommitted-with-file-input:\n  uuid: zzzzz-xvhdp-cr4uncommittedf\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: uncommitted with directory input\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  command: [\"arvados-cwl-runner\", \"--local\", \"--api=containers\",\n            \"/var/lib/cwl/workflow.json\", \"/var/lib/cwl/cwl.input.json\"]\n  output_path: \"/var/spool/cwl\"\n  cwd: \"/var/spool/cwl\"\n  priority: 1\n  state: Uncommitted\n  container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167\n  mounts: {\n        \"/var/lib/cwl/workflow.json\": {\n            \"kind\": \"json\",\n            \"content\": {\n                \"cwlVersion\": \"v1.0\",\n                \"class\": \"CommandLineTool\",\n                \"baseCommand\": [\"echo\"],\n                \"inputs\": [\n                    {\n                        \"type\": \"File\",\n                        \"id\": \"file_type\",\n                        \"inputBinding\": {\"position\": 1}\n                    }\n                ],\n                \"outputs\": []\n            }\n        },\n        \"/var/lib/cwl/cwl.input.json\": {\n            \"kind\": \"json\",\n            \"content\": {}\n        },\n        \"stdout\": {\n            \"kind\": \"file\",\n            \"path\": \"/var/spool/cwl/cwl.output.json\"\n        },\n        \"/var/spool/cwl\": {\n            \"kind\": \"collection\",\n            \"writable\": true\n        }\n    }\n  runtime_constraints:\n    vcpus: 1\n    ram: 256000000\n    API: true\n\nuncommitted-with-required-and-optional-inputs:\n  uuid: zzzzz-xvhdp-cr4uncommitted3\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: uncommitted with required and optional inputs\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  modified_at: 2016-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  command: [\"arvados-cwl-runner\", \"--local\", \"--api=containers\",\n            \"/var/lib/cwl/workflow.json\", \"/var/lib/cwl/cwl.input.json\"]\n  output_path: \"/var/spool/cwl\"\n  cwd: \"/var/spool/cwl\"\n  priority: 1\n  state: Uncommitted\n  container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167\n  mounts: {\n        \"/var/lib/cwl/workflow.json\": {\n            \"kind\": \"json\",\n            \"content\": {\n                \"cwlVersion\": \"v1.0\",\n                \"class\": \"CommandLineTool\",\n                \"baseCommand\": [\"echo\"],\n                \"inputs\": [\n                    {\n                        \"type\": \"int\",\n                        \"id\": \"int_required\",\n                        \"inputBinding\": {\"position\": 1}\n                    },\n                    {\n                        \"type\": [\"null\", \"int\"],\n                        \"id\": \"int_optional\",\n                        \"inputBinding\": {\"position\": 1}\n                    }\n                ],\n                \"outputs\": []\n            }\n        },\n        \"/var/lib/cwl/cwl.input.json\": {\n            \"kind\": \"json\",\n            \"content\": {}\n        },\n        \"stdout\": {\n            \"kind\": \"file\",\n            \"path\": \"/var/spool/cwl/cwl.output.json\"\n        },\n        \"/var/spool/cwl\": {\n            \"kind\": \"collection\",\n            \"writable\": true\n        }\n    }\n  runtime_constraints:\n    vcpus: 1\n    ram: 256000000\n    API: true\n\ncr_in_trashed_project:\n  uuid: zzzzz-xvhdp-cr5trashedcontr\n  owner_uuid: zzzzz-j7d0g-trashedproject1\n  name: completed container request\n  state: Final\n  priority: 1\n  created_at: <%= 2.minute.ago.to_fs(:db) %>\n  updated_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-compltcontainer\n  log_uuid: zzzzz-4zz18-logcollection01\n  output_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 1000000\n\nruntime_token:\n  uuid: zzzzz-xvhdp-11eklkhy0n4dm86\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: runtime token\n  state: Committed\n  priority: 1\n  created_at: <%= 2.minute.ago.to_fs(:db) %>\n  updated_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_at: <%= 1.minute.ago.to_fs(:db) %>\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  container_uuid: zzzzz-dz642-20isqbkl8xwnsao\n  runtime_token: v2/zzzzz-gj3su-2nj68s291f50gd9/2d19ue6ofx26o3mm7fs9u6t7hov9um0v92dzwk1o2xed3abprw\n  runtime_constraints:\n    vcpus: 1\n    ram: 123\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 24000000000\n    /var/spool/cwl:\n      kind: tmp\n      capacity: 24000000000\n\nread_foo_write_bar:\n  uuid: zzzzz-xvdhp-readfoowritebar\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Final\n  created_at: 2024-01-11 11:11:11.111111111 Z\n  updated_at: 2024-01-11 11:11:11.111111111 Z\n  modified_at: 2024-01-11 11:11:11.111111111 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  container_image: test\n  cwd: /\n  mounts:\n    stdin:\n      kind: collection\n      portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n      path: /foo\n    stdout:\n      kind: file\n      path: /mnt/out/bar\n    /mnt/out:\n      kind: tmp\n      capacity: 1000\n  container_uuid: zzzzz-dz642-readfoowritebar\n  log_uuid: zzzzz-4zz18-logcollection01\n  output_uuid: zzzzz-4zz18-ehbhgtheo8909or\n  output_path: /mnt/out\n  command: [\"echo\", \"-n\", \"bar\"]\n  runtime_constraints:\n    ram: 10000000\n    vcpus: 1\n\n\n# Test Helper trims the rest of the file\n\n# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper\n\n# container requests in project_with_2_pipelines_and_60_crs\n<% for i in 1..60 do %>\ncr_<%=i%>_of_60:\n  uuid: zzzzz-xvhdp-oneof60crs<%= i.to_s.rjust(5, '0') %>\n  created_at: <%= ((i+5)/5).hour.ago.to_fs(:db) %>\n  owner_uuid: zzzzz-j7d0g-nnncrspipelines\n  name: cr-<%= i.to_s %>\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  mounts:\n    /tmp:\n      kind: tmp\n      capacity: 1000000\n<% end %>\n\n# Do not add your fixtures below this line as the rest of this file will be trimmed by test_helper\n"
  },
  {
    "path": "services/api/test/fixtures/containers.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nqueued:\n  uuid: zzzzz-dz642-queuedcontainer\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Queued\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n    gpu:\n      stack: \"\"\n      driver_version: \"\"\n      hardware_capability: \"\"\n      device_count: 0\n      vram: 0\n  mounts:\n    /tmp:\n      capacity: 24000000000\n      kind: tmp\n    /var/spool/cwl:\n      capacity: 24000000000\n      kind: tmp\n\nrunning:\n  uuid: zzzzz-dz642-runningcontainr\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Running\n  priority: 12\n  created_at: <%= 1.minute.ago.to_fs(:db) %>\n  updated_at: <%= 1.minute.ago.to_fs(:db) %>\n  started_at: <%= 1.minute.ago.to_fs(:db) %>\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  mounts:\n    /tmp:\n      capacity: 24000000000\n      kind: tmp\n  secret_mounts:\n    /secret/6x9:\n      content: \"42\\n\"\n      kind: text\n  secret_mounts_md5: <%= Digest::MD5.hexdigest(SafeJSON.dump({'/secret/6x9' => {'content' => \"42\\n\", 'kind' => 'text'}})) %>\n  auth_uuid: zzzzz-gj3su-077z32aux8dg2s2\n\nrunning_older:\n  uuid: zzzzz-dz642-runningcontain2\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Running\n  priority: 1\n  created_at: <%= 2.minute.ago.to_fs(:db) %>\n  updated_at: <%= 2.minute.ago.to_fs(:db) %>\n  started_at: <%= 2.minute.ago.to_fs(:db) %>\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  mounts:\n    /tmp:\n      capacity: 24000000000\n      kind: tmp\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\nlocked:\n  uuid: zzzzz-dz642-lockedcontainer\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Locked\n  locked_by_uuid: zzzzz-gj3su-000000000000000\n  priority: 0\n  created_at: <%= 2.minute.ago.to_fs(:db) %>\n  updated_at: <%= 2.minute.ago.to_fs(:db) %>\n  modified_at: <%= 2.minute.ago.to_fs(:db) %>\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  mounts:\n    /tmp:\n      capacity: 1000000\n      kind: tmp\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\ncompleted:\n  uuid: zzzzz-dz642-compltcontainer\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  started_at: 2016-01-11 11:11:11.111111111 Z\n  finished_at: 2016-01-12 11:12:13.111111111 Z\n  container_image: test\n  cwd: /tmp\n  log: ea10d51bcf88862dbcc36eb292017dfd+45\n  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  output_path: /tmp\n  command: [\"echo\", \"hello\", \"/bin/sh\", \"-c\", \"'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'\"]\n  mounts:\n    /tmp:\n      capacity: 1000000\n      kind: tmp\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\ncompleted_older:\n  uuid: zzzzz-dz642-compltcontainr2\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  started_at: 2016-01-13 11:11:11.111111111 Z\n  finished_at: 2016-01-14 11:12:13.111111111 Z\n  container_image: test\n  cwd: /tmp\n  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  mounts:\n    /tmp:\n      capacity: 1000000\n      kind: tmp\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\ndiagnostics_completed_requester:\n  uuid: zzzzz-dz642-diagcompreq0001\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 562948349145881771\n  created_at: 2020-11-02T00:03:50.192697000Z\n  modified_at: 2020-11-02T00:20:43.987275000Z\n  started_at: 2020-11-02T00:08:07.186711000Z\n  finished_at: 2020-11-02T00:20:43.975416000Z\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  log: 6129e376cb05c942f75a0c36083383e8+244\n  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  output_path: /var/spool/cwl\n  mounts:\n    /var/spool/cwl:\n      content: {}\n      kind: json\n  command: [\n             \"arvados-cwl-runner\",\n             \"--local\",\n             \"--api=containers\",\n             \"--no-log-timestamps\",\n             \"--disable-validate\",\n             \"--disable-color\",\n             \"--eval-timeout=20\",\n             \"--thread-count=1\",\n             \"--disable-reuse\",\n             \"--collection-cache-size=256\",\n             \"--on-error=continue\",\n             \"/var/lib/cwl/workflow.json#main\",\n             \"/var/lib/cwl/cwl.input.json\"\n           ]\n  runtime_constraints:\n    API: true\n    keep_cache_disk: 0\n    keep_cache_ram: 268435456\n    ram: 1342177280\n    vcpus: 1\n\ndiagnostics_completed_hasher1:\n  uuid: zzzzz-dz642-diagcomphasher1\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 562948349145881771\n  created_at: 2020-11-02T00:08:18.829222000Z\n  modified_at: 2020-11-02T00:16:55.142023000Z\n  started_at: 2020-11-02T00:16:52.375871000Z\n  finished_at: 2020-11-02T00:16:55.105985000Z\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  log: fed8fb19fe8e3a320c29fed0edab12dd+220\n  output: d3a687732e84061f3bae15dc7e313483+62\n  output_path: /var/spool/cwl\n  command: [\n             \"md5sum\",\n             \"/keep/9f26a86b6030a69ad222cf67d71c9502+65/hasher-input-file.txt\"\n           ]\n  mounts:\n    /var/spool/cwl:\n      content: {}\n      kind: json\n  runtime_constraints:\n    API: true\n    keep_cache_disk: 0\n    keep_cache_ram: 268435456\n    ram: 268435456\n    vcpus: 1\n\ndiagnostics_completed_hasher2:\n  uuid: zzzzz-dz642-diagcomphasher2\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 562948349145881771\n  created_at: 2020-11-02T00:17:07.026493000Z\n  modified_at: 2020-11-02T00:20:23.505908000Z\n  started_at: 2020-11-02T00:20:21.513185000Z\n  finished_at: 2020-11-02T00:20:23.478317000Z\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  log: 4fc03b95fc2646b0dec7383dbb7d56d8+221\n  output: 6bd770f6cf8f83e7647c602eecfaeeb8+62\n  output_path: /var/spool/cwl\n  command: [\n             \"md5sum\",\n             \"/keep/d3a687732e84061f3bae15dc7e313483+62/hasher1.md5sum.txt\"\n           ]\n  mounts:\n    /var/spool/cwl:\n      content: {}\n      kind: json\n  runtime_constraints:\n    API: true\n    keep_cache_disk: 0\n    keep_cache_ram: 268435456\n    ram: 268435456\n    vcpus: 2\n\ndiagnostics_completed_hasher3:\n  uuid: zzzzz-dz642-diagcomphasher3\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 562948349145881771\n  created_at: 2020-11-02T00:20:30.943856000Z\n  modified_at: 2020-11-02T00:20:38.746541000Z\n  started_at: 2020-11-02T00:20:36.748957000Z\n  finished_at: 2020-11-02T00:20:38.732199000Z\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  log: 1eeaf70de0f65b1346e54c59f09e848d+210\n  output: 11b5fdaa380102e760c3eb6de80a9876+62\n  output_path: /var/spool/cwl\n  command: [\n             \"md5sum\",\n             \"/keep/6bd770f6cf8f83e7647c602eecfaeeb8+62/hasher2.md5sum.txt\"\n           ]\n  mounts:\n    /var/spool/cwl:\n      content: {}\n      kind: json\n  runtime_constraints:\n    API: true\n    keep_cache_disk: 0\n    keep_cache_ram: 268435456\n    ram: 268435456\n    vcpus: 1\n\ndiagnostics_completed_requester2:\n  uuid: zzzzz-dz642-diagcompreq0002\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 1124295487972526\n  created_at: 2020-11-03T15:54:36.504661000Z\n  modified_at: 2020-11-03T16:17:53.242868000Z\n  started_at: 2020-11-03T16:09:51.123659000Z\n  finished_at: 2020-11-03T16:17:53.220358000Z\n  container_image: d967ef4a1ca90a096a39f5ce68e4a2e7+261\n  cwd: /var/spool/cwl\n  log: f1933bf5191f576613ea7f65bd0ead53+244\n  output: 941b71a57208741ce8742eca62352fb1+123\n  output_path: /var/spool/cwl\n  command: [\n             \"arvados-cwl-runner\",\n             \"--local\",\n             \"--api=containers\",\n             \"--no-log-timestamps\",\n             \"--disable-validate\",\n             \"--disable-color\",\n             \"--eval-timeout=20\",\n             \"--thread-count=1\",\n             \"--disable-reuse\",\n             \"--collection-cache-size=256\",\n             \"--on-error=continue\",\n             \"/var/lib/cwl/workflow.json#main\",\n             \"/var/lib/cwl/cwl.input.json\"\n           ]\n  mounts:\n    /var/spool/cwl:\n      content: {}\n      kind: json\n  runtime_constraints:\n    API: true\n    keep_cache_disk: 0\n    keep_cache_ram: 268435456\n    ram: 1342177280\n    vcpus: 1\n\nrequester:\n  uuid: zzzzz-dz642-requestingcntnr\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  container_image: test\n  cwd: /tmp\n  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  mounts:\n    /tmp:\n      capacity: 1000000\n      kind: tmp\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\nrequester_container:\n  uuid: zzzzz-dz642-requestercntnr1\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  container_image: test\n  cwd: /tmp\n  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  mounts:\n    /tmp:\n      capacity: 1000000\n      kind: tmp\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\nfailed_container:\n  uuid: zzzzz-dz642-failedcontainr1\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Complete\n  exit_code: 33\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  container_image: test\n  cwd: /tmp\n  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  mounts:\n    /tmp:\n      capacity: 1000000\n      kind: tmp\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\nancient_container_with_logs:\n  uuid: zzzzz-dz642-logscontainer01\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Complete\n  exit_code: 0\n  priority: 1\n  created_at: <%= 2.year.ago.to_fs(:db) %>\n  updated_at: <%= 2.year.ago.to_fs(:db) %>\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  mounts:\n    /tmp:\n      capacity: 1000000\n      kind: tmp\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  finished_at: <%= 2.year.ago.to_fs(:db) %>\n  log: ea10d51bcf88862dbcc36eb292017dfd+45\n  output: test\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\nprevious_container_with_logs:\n  uuid: zzzzz-dz642-logscontainer02\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Complete\n  exit_code: 0\n  priority: 1\n  created_at: <%= 1.month.ago.to_fs(:db) %>\n  updated_at: <%= 1.month.ago.to_fs(:db) %>\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  mounts:\n    /tmp:\n      capacity: 1000000\n      kind: tmp\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  finished_at: <%= 1.month.ago.to_fs(:db) %>\n  log: ea10d51bcf88862dbcc36eb292017dfd+45\n  output: test\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\nrunning_container_with_logs:\n  uuid: zzzzz-dz642-logscontainer03\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  state: Running\n  priority: 1\n  created_at: <%= 1.hour.ago.to_fs(:db) %>\n  updated_at: <%= 1.hour.ago.to_fs(:db) %>\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  mounts:\n    /tmp:\n      capacity: 24000000000\n      kind: tmp\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n  auth_uuid: zzzzz-gj3su-n4xycwjpvvi776n\n\nrunning_to_be_deleted:\n  uuid: zzzzz-dz642-runnincntrtodel\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Running\n  priority: 1\n  created_at: <%= 1.minute.ago.to_fs(:db) %>\n  updated_at: <%= 1.minute.ago.to_fs(:db) %>\n  started_at: <%= 1.minute.ago.to_fs(:db) %>\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  mounts:\n    /tmp:\n      capacity: 24000000000\n      kind: tmp\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  auth_uuid: zzzzz-gj3su-ty6lvu9d7u7c2sq\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\nruntime_token:\n  uuid: zzzzz-dz642-20isqbkl8xwnsao\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Locked\n  locked_by_uuid: zzzzz-gj3su-jrriu629zljsnuf\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  container_image: test\n  cwd: /tmp\n  output_path: /tmp\n  command: [\"echo\", \"hello\"]\n  runtime_token: v2/zzzzz-gj3su-2nj68s291f50gd9/2d19ue6ofx26o3mm7fs9u6t7hov9um0v92dzwk1o2xed3abprw\n  runtime_user_uuid: zzzzz-tpzed-l3skomkti0c4vg4\n  runtime_auth_scopes: [\"all\"]\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n  mounts:\n    /tmp:\n      capacity: 24000000000\n      kind: tmp\n    /var/spool/cwl:\n      capacity: 24000000000\n      kind: tmp\n\nlegacy_cuda_container:\n  uuid: zzzzz-dz642-cudagpcontainer\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  started_at: 2016-01-11 11:11:11.111111111 Z\n  finished_at: 2016-01-12 11:12:13.111111111 Z\n  container_image: fa3c1a9cb6783f85f2ecda037e07b8c3+167\n  cwd: /tmp\n  log: ea10d51bcf88862dbcc36eb292017dfd+45\n  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  output_path: /tmp\n  command: [\"echo\", \"hello\", \"/bin/sh\", \"-c\", \"'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'\"]\n  mounts:\n    /tmp:\n      capacity: 1000000\n      kind: tmp\n  runtime_constraints:\n    API: false\n    cuda:\n      device_count: 1\n      driver_version: \"11.0\"\n      hardware_capability: \"9.0\"\n    keep_cache_ram: 0\n    ram: 12000000000\n    vcpus: 4\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\ngpu_container:\n  uuid: zzzzz-dz642-gengpucontainer\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 1\n  created_at: 2016-01-11 11:11:11.111111111 Z\n  updated_at: 2016-01-11 11:11:11.111111111 Z\n  started_at: 2016-01-11 11:11:11.111111111 Z\n  finished_at: 2016-01-12 11:12:13.111111111 Z\n  container_image: test\n  cwd: /tmp\n  log: ea10d51bcf88862dbcc36eb292017dfd+45\n  output: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n  output_path: /tmp\n  command: [\"echo\", \"hello\", \"/bin/sh\", \"-c\", \"'cat' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/foobar' '/keep/fa7aeb5140e2848d39b416daeef4ffc5+45/baz' '|' 'gzip' '>' '/dev/null'\"]\n  mounts:\n    /tmp:\n      capacity: 1000000\n      kind: tmp\n  runtime_constraints:\n    ram: 12000000000\n    vcpus: 4\n    gpu:\n      device_count: 1\n      driver_version: \"11.0\"\n      hardware_target: [\"9.0\"]\n      stack: \"cuda\"\n      vram: 8000000000\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n\nread_foo_write_bar:\n  uuid: zzzzz-dz642-readfoowritebar\n  owner_uuid: zzzzz-tpzed-000000000000000\n  state: Complete\n  exit_code: 0\n  priority: 1\n  created_at: 2024-01-11 11:11:11.111111111 Z\n  updated_at: 2024-01-11 11:11:11.111111111 Z\n  started_at: 2024-01-11 11:11:11.111111111 Z\n  finished_at: 2024-01-12 11:12:13.111111111 Z\n  container_image: test\n  cwd: /\n  mounts:\n    stdin:\n      kind: collection\n      path: /foo\n      portable_data_hash: 1f4b0bc7583c2a7f9102c395f4ffc5e3+45\n    stdout:\n      kind: file\n      path: /mnt/out/bar\n    /mnt/out:\n      capacity: 1000\n      kind: tmp\n  log: ea10d51bcf88862dbcc36eb292017dfd+45\n  output: fa7aeb5140e2848d39b416daeef4ffc5+45\n  output_path: /mnt/out\n  command: [\"echo\", \"-n\", \"bar\"]\n  runtime_constraints:\n    ram: 10000000\n    vcpus: 1\n  secret_mounts: {}\n  secret_mounts_md5: 99914b932bd37a50b983c5e7c90ae93b\n"
  },
  {
    "path": "services/api/test/fixtures/files/proc_stat",
    "content": "cpu  1632063 14136 880034 1195938459 1041039 63 21266 52811 0 0\ncpu0 291707 2191 123004 199461836 32816 58 4488 13329 0 0\ncpu1 279247 2288 168096 199443605 20358 0 3320 7776 0 0\ncpu2 243805 1099 145178 199516577 19542 0 2656 6975 0 0\ncpu3 225772 3025 145032 199534463 21217 0 2260 6578 0 0\ncpu4 280505 2581 151177 198587478 885147 2 4446 10116 0 0\ncpu5 311025 2950 147545 199394498 61957 2 4093 8035 0 0\nintr 165887918 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8993615 9108946 8200 0 50911 0 12573182 7875376 8631 0 44633 0 10027365 7325091 8544 0 59992 0 9835855 6999541 8145 0 65576 0 9789778 8583897 8184 0 55917 0 10003804 8546910 8448 0 53484 0 463 150 3174990 11523 3836341 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\nctxt 255708943\nbtime 1448378837\nprocesses 924315\nprocs_running 1\nprocs_blocked 0\nsoftirq 105120691 0 21194262 1261637 20292759 0 0 40708 13638302 27046 48665977\n"
  },
  {
    "path": "services/api/test/fixtures/groups.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\npublic:\n  uuid: zzzzz-j7d0g-it30l961gq3t0oi\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  name: Public\n  description: Public Project\n  group_class: project\n\npublic_role:\n  uuid: zzzzz-j7d0g-jt30l961gq3t0oi\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  name: Public Role\n  description: Public Role\n  group_class: role\n\nprivate:\n  uuid: zzzzz-j7d0g-rew6elm53kancon\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: Private\n  description: Private Project\n  group_class: project\n\nprivate_role:\n  uuid: zzzzz-j7d0g-pew6elm53kancon\n  owner_uuid: zzzzz-tpzed-000000000000000\n  name: Private Role\n  description: Private Role\n  group_class: role\n\nprivate_and_can_read_foofile:\n  uuid: zzzzz-j7d0g-22xp1wpjul508rk\n  owner_uuid: zzzzz-tpzed-000000000000000\n  name: Private and Can Read Foofile\n  description: Another Private Group\n  group_class: role\n\nactiveandfriends:\n  uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-08-22 14:02:18.481582707 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-08-22 14:02:18.481319501 Z\n  name: Active User and friends\n  description:\n  updated_at: 2014-08-22 14:02:18.481166435 Z\n  group_class: role\n\nsystem_group:\n  uuid: zzzzz-j7d0g-000000000000000\n  owner_uuid: zzzzz-tpzed-000000000000000\n  name: System Private\n  description: System-owned Group\n  group_class: role\n\npublic_favorites_project:\n  uuid: zzzzz-j7d0g-publicfavorites\n  owner_uuid: zzzzz-tpzed-000000000000000\n  name: Public favorites\n  description: Public favorites\n  group_class: project\n\nempty_lonely_group:\n  uuid: zzzzz-j7d0g-jtp06ulmvsezgyu\n  owner_uuid: zzzzz-tpzed-000000000000000\n  name: Empty\n  description: Empty Group\n  group_class: role\n\nall_users:\n  uuid: zzzzz-j7d0g-fffffffffffffff\n  owner_uuid: zzzzz-tpzed-000000000000000\n  name: All users\n  description: All users\n  group_class: role\n\ntestusergroup_admins:\n  uuid: zzzzz-j7d0g-48foin4vonvc2at\n  owner_uuid: zzzzz-tpzed-000000000000000\n  name: Administrators of a subset of users\n  group_class: role\n\naproject:\n  uuid: zzzzz-j7d0g-v955i6s2oi1cbso\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: A Project\n  description: Test project belonging to active user\n  group_class: project\n\nasubproject:\n  uuid: zzzzz-j7d0g-axqo7eu9pwvna1x\n  owner_uuid: zzzzz-j7d0g-v955i6s2oi1cbso\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: A Subproject\n  description: \"Test project belonging to active user's first test project\"\n  group_class: project\n\nafiltergroup:\n  uuid: zzzzz-j7d0g-thisfiltergroup\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: This filter group\n  group_class: filter\n  properties:\n    filters: [[ \"collections.name\", \"like\", \"baz%\" ], [ \"groups.name\", \"=\", \"A Subproject\" ]]\n\nafiltergroup2:\n  uuid: zzzzz-j7d0g-afiltergrouptwo\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: A filter group without filters\n  group_class: filter\n  properties:\n    filters: []\n\nafiltergroup3:\n  uuid: zzzzz-j7d0g-filtergroupthre\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: A filter group with an is_a collection filter\n  group_class: filter\n  properties:\n    filters: [[\"uuid\", \"is_a\", \"arvados#collection\"]]\n\nafiltergroup4:\n  uuid: zzzzz-j7d0g-filtergroupfour\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: A filter group with an exists collections filter\n  group_class: filter\n  properties:\n    filters: [[\"collections.properties.listprop\",\"exists\",true],[\"uuid\", \"is_a\", \"arvados#collection\"]]\n\nafiltergroup5:\n  uuid: zzzzz-j7d0g-filtergroupfive\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: A filter group with a contains collections filter\n  group_class: filter\n  properties:\n    filters: [[\"collections.properties.listprop\",\"contains\",\"elem1\"],[\"uuid\", \"is_a\", \"arvados#collection\"]]\n\nfuse_filters_test_project:\n  uuid: zzzzz-j7d0g-fusefiltertest1\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2024-02-09T12:00:00Z\n  modified_at: 2024-02-09T12:00:01Z\n  updated_at: 2024-02-09T12:00:01Z\n  name: FUSE Filters Test Project 1\n  group_class: project\n\nfuture_project_viewing_group:\n  uuid: zzzzz-j7d0g-futrprojviewgrp\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: Future Project Viewing Group\n  description: \"Group used to test granting Group Project viewing\"\n  group_class: role\n\nbad_group_has_ownership_cycle_a:\n  uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs\n  owner_uuid: zzzzz-j7d0g-0077nzts8c178lw\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-05-03 18:50:08 -0400\n  modified_at: 2014-05-03 18:50:08 -0400\n  updated_at: 2014-05-03 18:50:08 -0400\n  name: Owned by bad group b\n  group_class: project\n\nbad_group_has_ownership_cycle_b:\n  uuid: zzzzz-j7d0g-0077nzts8c178lw\n  owner_uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-05-03 18:50:08 -0400\n  modified_at: 2014-05-03 18:50:08 -0400\n  updated_at: 2014-05-03 18:50:08 -0400\n  name: Owned by bad group a\n  group_class: project\n\nanonymous_group:\n  uuid: zzzzz-j7d0g-anonymouspublic\n  owner_uuid: zzzzz-tpzed-000000000000000\n  name: Anonymous users\n  group_class: role\n  description: Anonymous users\n\nanonymously_accessible_project:\n  uuid: zzzzz-j7d0g-zhxawtyetzwc5f0\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  name: Unrestricted public data\n  group_class: project\n  description: An anonymously accessible project\n\nsubproject_in_anonymous_accessible_project:\n  uuid: zzzzz-j7d0g-mhtfesvgmkolpyf\n  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0\n  created_at: 2014-04-21 15:37:48 -0400\n  name: Subproject in anonymous accessible project\n  description: Description for subproject in anonymous accessible project\n  group_class: project\n\nactive_user_has_can_manage:\n  uuid: zzzzz-j7d0g-ptt1ou6a9lxrv07\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  name: Active user has can_manage\n  group_class: project\n\n# Group for testing granting permission between users who share a group.\ngroup_for_sharing_tests:\n  uuid: zzzzz-j7d0g-t4ucgncwteul7zt\n  owner_uuid: zzzzz-tpzed-000000000000000\n  name: Group for sharing tests\n  description: Users who can share objects with each other\n  group_class: role\n\nproject_owned_by_foo:\n  uuid:  zzzzz-j7d0g-lsjm0ibr0ydwpzx\n  owner_uuid: zzzzz-tpzed-81hsbo6mk8nl05c\n  created_at: 2014-02-03T17:22:54Z\n  modified_at: 2014-02-03T17:22:54Z\n  name: project_owned_by_foo\n  group_class: project\n\nempty_project:\n  uuid: zzzzz-j7d0g-9otoxmrksam74q6\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-12-16 15:56:27.967534940 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-12-16 15:56:27.967358199 Z\n  name: Empty project\n  description: ~\n  updated_at: 2014-12-16 15:56:27.967242142 Z\n  group_class: project\n\nproject_with_10_collections:\n  uuid: zzzzz-j7d0g-0010collections\n  owner_uuid: zzzzz-tpzed-user1withloadab\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-user1withloadab\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: project with 10 collections\n  description: This will result in one page in the display\n  group_class: project\n\nproject_with_201_collections:\n  uuid: zzzzz-j7d0g-0201collections\n  owner_uuid: zzzzz-tpzed-user1withloadab\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-user1withloadab\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: project with 201 collections\n  description: This will result in two pages in the display\n  group_class: project\n\nproject_with_10_pipelines:\n  uuid: zzzzz-j7d0g-000010pipelines\n  owner_uuid: zzzzz-tpzed-user1withloadab\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-user1withloadab\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: project with 10 pipelines\n  description: project with 10 pipelines\n  group_class: project\n\nproject_with_2_pipelines_and_60_crs:\n  uuid: zzzzz-j7d0g-nnncrspipelines\n  owner_uuid: zzzzz-tpzed-user1withloadab\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-user1withloadab\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: project with 2 pipelines and 60 crs\n  description: This will result in two pages in the display\n  group_class: project\n\nproject_with_25_pipelines:\n  uuid: zzzzz-j7d0g-000025pipelines\n  owner_uuid: zzzzz-tpzed-user1withloadab\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-user1withloadab\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: project with 25 pipelines\n  description: project with 25 pipelines\n  group_class: project\n\nfuse_owned_project:\n  uuid: zzzzz-j7d0g-0000ownedbyfuse\n  owner_uuid: zzzzz-tpzed-0fusedrivertest\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-0fusedrivertest\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: FUSE Test Project\n  description: Test project belonging to FUSE test user\n  group_class: project\n\n# This wouldn't pass model validation, but it enables a workbench\n# infinite-loop test. See #4389\nproject_owns_itself:\n  uuid: zzzzz-j7d0g-7rqh7hdshd5yp5t\n  owner_uuid: zzzzz-j7d0g-7rqh7hdshd5yp5t\n  created_at: 2014-11-05 22:31:24.258424340 Z\n  modified_by_user_uuid: 6pbr1-tpzed-000000000000000\n  modified_at: 2014-11-05 22:31:24.258242890 Z\n  name: zzzzz-j7d0g-7rqh7hdshd5yp5t\n  description: ~\n  updated_at: 2014-11-05 22:31:24.258093171 Z\n  group_class: project\n\n# Used to test renaming when removed from the \"asubproject\" while\n# another such object with same name exists in home project.\nsubproject_in_active_user_home_project_to_test_unique_key_violation:\n  uuid: zzzzz-j7d0g-subprojsamenam1\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2013-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2013-04-21 15:37:48 -0400\n  updated_at: 2013-04-21 15:37:48 -0400\n  name: Subproject to test owner uuid and name unique key violation upon removal\n  description: Subproject in active user home project to test owner uuid and name unique key violation upon removal\n  group_class: project\n\nsubproject_in_asubproject_with_same_name_as_one_in_active_user_home:\n  uuid: zzzzz-j7d0g-subprojsamenam2\n  owner_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x\n  created_at: 2013-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2013-04-21 15:37:48 -0400\n  updated_at: 2013-04-21 15:37:48 -0400\n  name: Subproject to test owner uuid and name unique key violation upon removal\n  description: \"Removing this will result in name conflict with 'A project' in Home project and hence get renamed.\"\n  group_class: project\n\nstarred_and_shared_active_user_project:\n  uuid: zzzzz-j7d0g-starredshared01\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  name: Starred and shared active user project\n  description: Starred and shared active user project\n  group_class: project\n\ntrashed_project:\n  uuid: zzzzz-j7d0g-trashedproject1\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: trashed project\n  group_class: project\n  trash_at: 2001-01-01T00:00:00Z\n  delete_at: 2038-03-01T00:00:00Z\n  is_trashed: true\n  modified_at: 2001-01-01T00:00:00Z\n\ntrashed_subproject:\n  uuid: zzzzz-j7d0g-trashedproject2\n  owner_uuid: zzzzz-j7d0g-trashedproject1\n  name: trashed subproject\n  group_class: project\n  is_trashed: false\n  modified_at: 2001-01-01T00:00:00Z\n\ntrashed_subproject3:\n  uuid: zzzzz-j7d0g-trashedproject3\n  owner_uuid: zzzzz-j7d0g-trashedproject1\n  name: trashed subproject 3\n  group_class: project\n  trash_at: 2001-01-01T00:00:00Z\n  delete_at: 2038-03-01T00:00:00Z\n  is_trashed: true\n  modified_at: 2001-01-01T00:00:00Z\n\ntrashed_on_next_sweep:\n  uuid: zzzzz-j7d0g-soontobetrashed\n  owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz\n  name: soon to be trashed project\n  group_class: project\n  trash_at: 2001-01-01T00:00:00Z\n  delete_at: 2038-03-01T00:00:00Z\n  is_trashed: false\n  modified_at: 2001-01-01T00:00:00Z\n\ntrashed_role_on_next_sweep:\n  uuid: zzzzz-j7d0g-soontobetrashd2\n  owner_uuid: zzzzz-tpzed-000000000000000\n  name: soon to be trashed role group\n  group_class: role\n  trash_at: 2001-01-01T00:00:00Z\n  delete_at: 2001-01-01T00:00:00Z\n  is_trashed: false\n  modified_at: 2001-01-01T00:00:00Z\n"
  },
  {
    "path": "services/api/test/fixtures/keep_services.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nkeep0:\n  uuid: zzzzz-bi6l4-6zhilxar6r8ey90\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  service_host: keep0.zzzzz.arvadosapi.com\n  service_port: 25107\n  service_ssl_flag: false\n  service_type: disk\n\nkeep1:\n  uuid: zzzzz-bi6l4-rsnj3c76ndxb7o0\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  service_host: keep1.zzzzz.arvadosapi.com\n  service_port: 25107\n  service_ssl_flag: false\n  service_type: disk\n\nproxy:\n  uuid: zzzzz-bi6l4-h0a0xwut9qa6g3a\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  service_host: keep.zzzzz.arvadosapi.com\n  service_port: 25333\n  service_ssl_flag: true\n  service_type: proxy\n"
  },
  {
    "path": "services/api/test/fixtures/links.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nuser_agreement_required:\n  uuid: zzzzz-o0j2j-j2qe76q7s3c8aro\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2013-12-26T19:52:21Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2013-12-26T19:52:21Z\n  updated_at: 2013-12-26T19:52:21Z\n  tail_uuid: zzzzz-tpzed-000000000000000\n  link_class: signature\n  name: require\n  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y\n  properties: {}\n\nuser_agreement_readable:\n  uuid: zzzzz-o0j2j-qpf60gg4fwjlmex\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-j7d0g-fffffffffffffff\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y\n  properties: {}\n\nall_users_can_read_anonymous_group:\n  uuid: zzzzz-o0j2j-0lhbqyjab4g0bwp\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2015-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2015-01-24 20:42:26 -0800\n  updated_at: 2015-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-j7d0g-fffffffffffffff\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-j7d0g-anonymouspublic\n  properties: {}\n\nactive_user_member_of_all_users_group:\n  uuid: zzzzz-o0j2j-ctbysaduejxfrs5\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_write\n  head_uuid: zzzzz-j7d0g-fffffffffffffff\n  properties: {}\n\nactive_user_can_manage_group:\n  uuid: zzzzz-o0j2j-3sa30nd3bqn1msh\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-02-03 15:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-02-03 15:42:26 -0800\n  updated_at: 2014-02-03 15:42:26 -0800\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_manage\n  head_uuid: zzzzz-j7d0g-ptt1ou6a9lxrv07\n  properties: {}\n\nuser_agreement_signed_by_active:\n  uuid: zzzzz-o0j2j-4x85a69tqlrud1z\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2013-12-26T20:52:21Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2013-12-26T20:52:21Z\n  updated_at: 2013-12-26T20:52:21Z\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: signature\n  name: click\n  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y\n  properties: {}\n\nuser_agreement_signed_by_inactive:\n  uuid: zzzzz-o0j2j-lh7er2o3k6bmetw\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2013-12-26T20:52:21Z\n  modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs\n  modified_at: 2013-12-26T20:52:21Z\n  updated_at: 2013-12-26T20:52:21Z\n  tail_uuid: zzzzz-tpzed-7sg468ezxwnodxs\n  link_class: signature\n  name: click\n  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y\n  properties: {}\n\nspectator_user_member_of_all_users_group:\n  uuid: zzzzz-o0j2j-0s8ql1redzf8kvn\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r\n  link_class: permission\n  name: can_write\n  head_uuid: zzzzz-j7d0g-fffffffffffffff\n  properties: {}\n\ninactive_user_member_of_all_users_group:\n  uuid: zzzzz-o0j2j-osckxpy5hl5fjk5\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2013-12-26T20:52:21Z\n  modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs\n  modified_at: 2013-12-26T20:52:21Z\n  updated_at: 2013-12-26T20:52:21Z\n  tail_uuid: zzzzz-tpzed-x9kqpd79egh49c7\n  link_class: permission\n  name: can_write\n  head_uuid: zzzzz-j7d0g-fffffffffffffff\n  properties: {}\n\ninactive_signed_ua_user_member_of_all_users_group:\n  uuid: zzzzz-o0j2j-qkhyjcr6tidk652\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2013-12-26T20:52:21Z\n  modified_by_user_uuid: zzzzz-tpzed-7sg468ezxwnodxs\n  modified_at: 2013-12-26T20:52:21Z\n  updated_at: 2013-12-26T20:52:21Z\n  tail_uuid: zzzzz-tpzed-7sg468ezxwnodxs\n  link_class: permission\n  name: can_write\n  head_uuid: zzzzz-j7d0g-fffffffffffffff\n  properties: {}\n\nfoo_file_readable_by_active:\n  uuid: zzzzz-o0j2j-dp1d8395ldqw22r\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  properties: {}\n\nfoo_file_readable_by_federated_active:\n  uuid: zzzzz-o0j2j-dp1d8395ldqw23r\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zbbbb-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  properties: {}\n\nfoo_file_readable_by_active_duplicate_permission:\n  uuid: zzzzz-o0j2j-2qlmhgothiur55r\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  properties: {}\n\nfoo_file_readable_by_active_redundant_permission_via_private_group:\n  uuid: zzzzz-o0j2j-5s8ry7sn6bwxb7w\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-j7d0g-22xp1wpjul508rk\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  properties: {}\n\nfoo_file_readable_by_project_viewer:\n  uuid: zzzzz-o0j2j-fp1d8395ldqw22p\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-projectviewer1a\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  properties: {}\n\nbar_file_readable_by_active:\n  uuid: zzzzz-o0j2j-8hppiuduf8eqdng\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-ehbhgtheo8909or\n  properties: {}\n\nbar_file_readable_by_spectator:\n  uuid: zzzzz-o0j2j-0mhldkqozsltcli\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-ehbhgtheo8909or\n  properties: {}\n\nbaz_file_publicly_readable:\n  uuid: zzzzz-o0j2j-132ne3lk954vtoc\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-j7d0g-fffffffffffffff\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-y9vne9npefyxh8g\n  properties: {}\n\nminiadmin_user_is_a_testusergroup_admin:\n  uuid: zzzzz-o0j2j-38vvkciz7qc12j9\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-04-01 13:53:33 -0400\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-04-01 13:53:33 -0400\n  updated_at: 2014-04-01 13:53:33 -0400\n  tail_uuid: zzzzz-tpzed-2bg9x0oeydcw5hm\n  link_class: permission\n  name: can_manage\n  head_uuid: zzzzz-j7d0g-48foin4vonvc2at\n  properties: {}\n\nrominiadmin_user_is_a_testusergroup_admin:\n  uuid: zzzzz-o0j2j-6b0hz5hr107mc90\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-04-01 13:53:33 -0400\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-04-01 13:53:33 -0400\n  updated_at: 2014-04-01 13:53:33 -0400\n  tail_uuid: zzzzz-tpzed-4hvxm4n25emegis\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-j7d0g-48foin4vonvc2at\n  properties: {}\n\ntestusergroup_can_manage_active_user:\n  uuid: zzzzz-o0j2j-2vaqhxz6hsf4k1d\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-04-01 13:56:10 -0400\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-04-01 13:56:10 -0400\n  updated_at: 2014-04-01 13:56:10 -0400\n  tail_uuid: zzzzz-j7d0g-48foin4vonvc2at\n  link_class: permission\n  name: can_manage\n  head_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  properties: {}\n\ntest_timestamps:\n  uuid: zzzzz-o0j2j-4abnk2w5t86x4uc\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-04-15 13:17:14 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-15 13:17:14 -0400\n  updated_at: 2014-04-15 13:17:14 -0400\n  link_class: test\n  name: test\n  properties: {}\n\nadmin_can_write_aproject:\n  # Yes, this permission is effectively redundant.\n  # We use it to test that other project admins can see\n  # all the project's sharing.\n  uuid: zzzzz-o0j2j-adminmgsproject\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  link_class: permission\n  name: can_write\n  head_uuid: zzzzz-j7d0g-v955i6s2oi1cbso\n  properties: {}\n\nproject_viewer_member_of_all_users_group:\n  uuid: zzzzz-o0j2j-cdnq6627g0h0r2x\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2015-07-28T21:34:41.361747000Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2015-07-28T21:34:41.361747000Z\n  updated_at: 2015-07-28T21:34:41.361747000Z\n  tail_uuid: zzzzz-tpzed-projectviewer1a\n  link_class: permission\n  name: can_write\n  head_uuid: zzzzz-j7d0g-fffffffffffffff\n  properties: {}\n\nproject_viewer_can_read_project:\n  uuid: zzzzz-o0j2j-projviewerreadp\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-projectviewer1a\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-j7d0g-v955i6s2oi1cbso\n  properties: {}\n\nsubproject_admin_can_manage_subproject:\n  uuid: zzzzz-o0j2j-subprojadminlnk\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-10-15 10:00:00 -0000\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-10-15 10:00:00 -0000\n  updated_at: 2014-10-15 10:00:00 -0000\n  tail_uuid: zzzzz-tpzed-subprojectadmin\n  link_class: permission\n  name: can_manage\n  head_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x\n  properties: {}\n\nfoo_collection_tag:\n  uuid: zzzzz-o0j2j-eedahfaho8aphiv\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  tail_uuid: ~\n  head_uuid: zzzzz-4zz18-fy296fx3hot09f7\n  link_class: tag\n  name: foo_tag\n  properties: {}\n\nactive_user_can_manage_bad_group_cx2al9cqkmsf1hs:\n  uuid: zzzzz-o0j2j-ezv55ahzc9lvjwe\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-05-03 18:50:08 -0400\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-05-03 18:50:08 -0400\n  updated_at: 2014-05-03 18:50:08 -0400\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_manage\n  head_uuid: zzzzz-j7d0g-cx2al9cqkmsf1hs\n  properties: {}\n\nmultilevel_collection_1_readable_by_active:\n  uuid: zzzzz-o0j2j-dp1d8395ldqw22j\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-pyw8yp9g3pr7irn\n  properties: {}\n\n#\n# This fixture was used in the test \"Stringify symbols coming from\n# serialized attribute in database\" which tested the hook\n# \"convert_serialized_symbols_to_strings\".  That hook (and the\n# corresponding test) was removed in #15311.  This fixture remains to\n# facilitate manual testing of the \"rake symbols:check\" and \"rake\n# symbols:stringify\" tasks that we added to assist with database\n# fixup.\n#\nhas_symbol_keys_in_database_somehow:\n  uuid: zzzzz-o0j2j-enl1wg58310loc6\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-05-28 16:24:02.314722162 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-05-28 16:24:02.314484982 Z\n  tail_uuid: ~\n  link_class: test\n  name: ~\n  head_uuid: ~\n  properties:\n    :foo: \"bar\"\n    baz:\n      - waz\n      - :waz\n      - :waz\n      - 1\n      - ~\n      - false\n      - true\n  updated_at: 2014-05-28 16:24:02.314296411 Z\n\nbug2931_link_with_null_head_uuid:\n  uuid: zzzzz-o0j2j-uru66qok2wruasb\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-05-30 14:30:00.184389725 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-05-30 14:30:00.184019565 Z\n  updated_at: 2014-05-30 14:30:00.183829316 Z\n  link_class: permission\n  name: bug2931\n  tail_uuid: ~\n  head_uuid: ~\n  properties: {}\n\nanonymous_group_can_read_anonymously_accessible_project:\n  uuid: zzzzz-o0j2j-15gpzezqjg4bc4z\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-05-30 14:30:00.184389725 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-05-30 14:30:00.184019565 Z\n  updated_at: 2014-05-30 14:30:00.183829316 Z\n  link_class: permission\n  name: can_read\n  tail_uuid: zzzzz-j7d0g-anonymouspublic\n  head_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0\n  properties: {}\n\nanonymous_user_can_read_anonymously_accessible_project:\n  uuid: zzzzz-o0j2j-82nbli3jptwksj1\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-05-30 14:30:00.184389725 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-05-30 14:30:00.184019565 Z\n  updated_at: 2014-05-30 14:30:00.183829316 Z\n  link_class: permission\n  name: can_read\n  tail_uuid: zzzzz-tpzed-anonymouspublic\n  head_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0\n  properties: {}\n\nuser_agreement_readable_by_anonymously_accessible_project:\n  uuid: zzzzz-o0j2j-o5ds5gvhkztdc8h\n  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0\n  created_at: 2014-06-13 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-06-13 20:42:26 -0800\n  updated_at: 2014-06-13 20:42:26 -0800\n  link_class: permission\n  name: can_read\n\nactive_user_permission_to_docker_image_collection:\n  uuid: zzzzz-o0j2j-dp1d8395ldqw33s\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-1v45jub259sjjgb\n  properties: {}\n\nactive_user_permission_to_unlinked_docker_image_collection:\n  uuid: zzzzz-o0j2j-g5i0sa8cr3b1psf\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-d0d8z5wofvfgwad\n  properties: {}\n\ncrt_user_permission_to_unlinked_docker_image_collection:\n  uuid: zzzzz-o0j2j-20zvdi9b4odcfz3\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-l3skomkti0c4vg4\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-d0d8z5wofvfgwad\n  properties: {}\n\ndocker_image_collection_hash:\n  uuid: zzzzz-o0j2j-dockercollhasha\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-06-11 14:30:00.184389725 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-06-11 14:30:00.184019565 Z\n  updated_at: 2014-06-11 14:30:00.183829316 Z\n  link_class: docker_image_hash\n  name: d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678\n  tail_uuid: ~\n  head_uuid: zzzzz-4zz18-1v45jub259sjjgb\n  properties:\n    image_timestamp: \"2014-06-10T14:30:00.184019565Z\"\n\ndocker_image_collection_tag:\n  uuid: zzzzz-o0j2j-dockercolltagbb\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-06-11 14:30:00.184389725 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-06-11 14:30:00.184019565 Z\n  updated_at: 2014-06-11 14:30:00.183829316 Z\n  link_class: docker_image_repo+tag\n  name: arvados/apitestfixture:latest\n  tail_uuid: ~\n  head_uuid: zzzzz-4zz18-1v45jub259sjjgb\n  properties:\n    image_timestamp: \"2014-06-10T14:30:00.184019565Z\"\n\ndocker_image_collection_tag2:\n  uuid: zzzzz-o0j2j-dockercolltagbc\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-06-11 14:30:00.184389725 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-06-11 14:30:00.184019565 Z\n  updated_at: 2014-06-11 14:30:00.183829316 Z\n  link_class: docker_image_repo+tag\n  name: arvados/apitestfixture:june10\n  tail_uuid: ~\n  head_uuid: zzzzz-4zz18-1v45jub259sjjgb\n  properties:\n    image_timestamp: \"2014-06-10T14:30:00.184019565Z\"\n\ndocker_image_collection_hextag:\n  uuid: zzzzz-o0j2j-2591ao7zubhaoxh\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2017-02-13 21:41:06.769936997 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2017-02-13 21:41:06.769422000 Z\n  tail_uuid: ~\n  link_class: docker_image_repo+tag\n  name: arvados/apitestfixture:b25678748af0cac6d1180b9ca4ce3ef31f2b06602f471aad8dfd421e149b0d75\n  head_uuid: zzzzz-4zz18-1v45jub259sjjgb\n  properties: {}\n  updated_at: 2017-02-13 21:41:06.769422000 Z\n\ndocker_1_12_image_hash:\n  uuid: zzzzz-o0j2j-f58l58fn65n8v6k\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2017-02-13 21:35:12.602828136 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2017-02-13 21:35:12.602309000 Z\n  tail_uuid: ~\n  link_class: docker_image_hash\n  name: sha256:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678\n  head_uuid: zzzzz-4zz18-1g4g0vhpjn9wq7i\n  properties: {}\n  updated_at: 2017-02-13 21:35:12.602309000 Z\n\ndocker_1_12_image_tag:\n  uuid: zzzzz-o0j2j-dybsy0m3u96jkbv\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2017-02-13 21:37:47.441406362 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2017-02-13 21:37:47.440882000 Z\n  tail_uuid: ~\n  link_class: docker_image_repo+tag\n  name: arvados/apitestfixture:latest\n  head_uuid: zzzzz-4zz18-1g4g0vhpjn9wq7i\n  properties: {}\n  updated_at: 2017-02-13 21:37:47.440882000 Z\n\ndocker_1_12_image_hextag:\n  uuid: zzzzz-o0j2j-06hzef4u1hbk1g5\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2017-02-13 21:37:47.441406362 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2017-02-13 21:37:47.440882000 Z\n  tail_uuid: ~\n  link_class: docker_image_repo+tag\n  name: arvados/apitestfixture:b25678748af0cac6d1180b9ca4ce3ef31f2b06602f471aad8dfd421e149b0d75\n  head_uuid: zzzzz-4zz18-1g4g0vhpjn9wq7i\n  properties: {}\n  updated_at: 2017-02-13 21:37:47.440882000 Z\n\nancient_docker_image_collection_hash:\n  # This image helps test that searches for Docker images find\n  # the latest available image: the hash is the same as\n  # docker_image_collection_hash, but it points to a different\n  # Collection and has an older image timestamp.\n  uuid: zzzzz-o0j2j-dockercollhashz\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-06-12 14:30:00.184389725 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-06-12 14:30:00.184019565 Z\n  updated_at: 2014-06-12 14:30:00.183829316 Z\n  link_class: docker_image_hash\n  name: d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678\n  tail_uuid: ~\n  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y\n  properties:\n    image_timestamp: \"2010-06-10T14:30:00.184019565Z\"\n\nancient_docker_image_collection_tag:\n  uuid: zzzzz-o0j2j-dockercolltagzz\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-06-12 14:30:00.184389725 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-06-12 14:30:00.184019565 Z\n  updated_at: 2014-06-12 14:30:00.183829316 Z\n  link_class: docker_image_repo+tag\n  name: arvados/apitestfixture:latest\n  tail_uuid: ~\n  head_uuid: zzzzz-4zz18-t68oksiu9m80s4y\n  properties:\n    image_timestamp: \"2010-06-10T14:30:00.184019565Z\"\n\ndocker_image_tag_like_hash:\n  uuid: zzzzz-o0j2j-dockerhashtagaa\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-06-11 14:30:00.184389725 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-06-11 14:30:00.184019565 Z\n  updated_at: 2014-06-11 14:30:00.183829316 Z\n  link_class: docker_image_repo+tag\n  name: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:latest\n  tail_uuid: ~\n  head_uuid: zzzzz-4zz18-1v45jub259sjjgb\n  properties:\n    image_timestamp: \"2014-06-10T14:30:00.184019565Z\"\n\nbaz_collection_name_in_asubproject:\n  uuid: zzzzz-o0j2j-bazprojectname2\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-04-21 15:37:48 -0400\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-04-21 15:37:48 -0400\n  updated_at: 2014-04-21 15:37:48 -0400\n  tail_uuid: zzzzz-j7d0g-axqo7eu9pwvna1x\n  head_uuid: ea10d51bcf88862dbcc36eb292017dfd+45\n  link_class: name\n  # This should resemble the default name assigned when a\n  # Collection is added to a Project.\n  name: \"ea10d51bcf88862dbcc36eb292017dfd+45 added sometime\"\n  properties: {}\n\nempty_collection_name_in_active_user_home_project:\n  uuid: zzzzz-o0j2j-i3n6m552x6tmoi4\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-08-06 22:11:51.242392533 Z\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_at: 2014-08-06 22:11:51.242150425 Z\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: name\n  name: Empty collection\n  head_uuid: d41d8cd98f00b204e9800998ecf8427e+0\n  properties: {}\n  updated_at: 2014-08-06 22:11:51.242010312 Z\n\nactive_user_can_read_activeandfriends:\n  uuid: zzzzz-o0j2j-8184f5vk8c851ts\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-08-22 14:03:46.321059945 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-08-22 14:03:46.320865926 Z\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7\n  properties: {}\n  updated_at: 2014-08-22 14:03:46.320743213 Z\n\nactive_user_joined_activeandfriends:\n  uuid: zzzzz-o0j2j-t63rdd7vupqvnco\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-08-22 14:03:28.835064240 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-08-22 14:03:28.834849409 Z\n  tail_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  properties: {}\n  updated_at: 2014-08-22 14:03:28.834720558 Z\n\nfuture_project_can_read_activeandfriends:\n  uuid: zzzzz-o0j2j-bkdtnddpmwxqiza\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-08-22 14:04:18.811622057 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-08-22 14:04:18.811463859 Z\n  tail_uuid: zzzzz-tpzed-futureprojview2\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7\n  properties: {}\n  updated_at: 2014-08-22 14:04:18.811387314 Z\n\nfuture_project_user_joined_activeandfriends:\n  uuid: zzzzz-o0j2j-ksl8bo92eokv332\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-08-22 14:04:24.182103355 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-08-22 14:04:24.181939129 Z\n  tail_uuid: zzzzz-j7d0g-swqu6hmi4pa7bk7\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-tpzed-futureprojview2\n  properties: {}\n  updated_at: 2014-08-22 14:04:24.181799856 Z\n\nauto_setup_vm_login_username_can_login_to_test_vm:\n  uuid: zzzzz-o0j2j-i3n6m98766tmoi4\n  owner_uuid: zzzzz-tpzed-xabcdjxw79nv3jz\n  created_at: 2014-08-06 22:11:51.242392533 Z\n  modified_by_user_uuid: zzzzz-tpzed-xabcdjxw79nv3jz\n  modified_at: 2014-08-06 22:11:51.242150425 Z\n  tail_uuid: zzzzz-tpzed-xabcdjxw79nv3jz\n  link_class: permission\n  name: can_login\n  head_uuid: zzzzz-2x53u-382brsig8rp3064\n  properties: {username: 'auto_setup_vm_login'}\n  updated_at: 2014-08-06 22:11:51.242010312 Z\n\nadmin_can_login_to_testvm2:\n  uuid: zzzzz-o0j2j-peek9mecohgh3ai\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  created_at: 2014-08-06 22:11:51.242392533 Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-08-06 22:11:51.242150425 Z\n  tail_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  link_class: permission\n  name: can_login\n  head_uuid: zzzzz-2x53u-382brsig8rp3065\n  # username is not obviously related to other user data.\n  properties: {username: 'adminroot', groups: ['docker', 'admin']}\n  updated_at: 2014-08-06 22:11:51.242010312 Z\n\nactive_can_login_to_testvm2:\n  uuid: zzzzz-o0j2j-rah2ya1ohx9xaev\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  created_at: 2014-08-06 22:11:51.242392533 Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-08-06 22:11:51.242150425 Z\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_login\n  head_uuid: zzzzz-2x53u-382brsig8rp3065\n  # No groups.\n  properties: {username: 'active'}\n  updated_at: 2014-08-06 22:11:51.242010312 Z\n\nspectator_login_link_for_testvm2_without_username:\n  uuid: zzzzz-o0j2j-aem0eilie1jigh9\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  created_at: 2014-08-06 22:11:51.242392533 Z\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-08-06 22:11:51.242150425 Z\n  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r\n  link_class: permission\n  name: can_login\n  head_uuid: zzzzz-2x53u-382brsig8rp3065\n  updated_at: 2014-08-06 22:11:51.242010312 Z\n\nuser_foo_can_read_sharing_group:\n  uuid: zzzzz-o0j2j-gdpvwvpj9kjs5in\n  owner_uuid: zzzzz-tpzed-000000000000000\n  tail_uuid: zzzzz-tpzed-81hsbo6mk8nl05c\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-j7d0g-t4ucgncwteul7zt\n\nuser_foo_is_in_sharing_group:\n  uuid: zzzzz-o0j2j-bwmcf9nqwomvtny\n  owner_uuid: zzzzz-tpzed-000000000000000\n  tail_uuid: zzzzz-j7d0g-t4ucgncwteul7zt\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-tpzed-81hsbo6mk8nl05c\n\nuser_bar_can_read_sharing_group:\n  uuid: zzzzz-o0j2j-23djaoza9g2zvjx\n  owner_uuid: zzzzz-tpzed-000000000000000\n  tail_uuid: zzzzz-tpzed-n3oaj4sm5fcnwib\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-j7d0g-t4ucgncwteul7zt\n\nuser_bar_is_in_sharing_group:\n  uuid: zzzzz-o0j2j-ga7fgy3xsz4hu28\n  owner_uuid: zzzzz-tpzed-000000000000000\n  tail_uuid: zzzzz-j7d0g-t4ucgncwteul7zt\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-tpzed-n3oaj4sm5fcnwib\n\nuser1-with-load_member_of_all_users_group:\n  uuid: zzzzz-o0j2j-user1-with-load\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-user1withloadab\n  link_class: permission\n  name: can_write\n  head_uuid: zzzzz-j7d0g-fffffffffffffff\n  properties: {}\n\nempty_collection_name_in_fuse_user_home_project:\n  uuid: zzzzz-o0j2j-hw3mcg3c8pwo6ar\n  owner_uuid: zzzzz-tpzed-0fusedrivertest\n  created_at: 2014-08-06 22:11:51.242392533 Z\n  modified_by_user_uuid: zzzzz-tpzed-0fusedrivertest\n  modified_at: 2014-08-06 22:11:51.242150425 Z\n  tail_uuid: zzzzz-tpzed-0fusedrivertest\n  link_class: name\n  name: Empty collection\n  head_uuid: d41d8cd98f00b204e9800998ecf8427e+0\n  properties: {}\n  updated_at: 2014-08-06 22:11:51.242010312 Z\n\nstar_project_for_active_user:\n  uuid: zzzzz-o0j2j-starredbyactive\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: star\n  name: zzzzz-j7d0g-starredshared01\n  head_uuid: zzzzz-j7d0g-starredshared01\n  properties: {}\n\nshare_starred_project_with_project_viewer:\n  uuid: zzzzz-o0j2j-sharewithviewer\n  owner_uuid: zzzzz-tpzed-000000000000000\n  tail_uuid: zzzzz-tpzed-projectviewer1a\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-j7d0g-starredshared01\n\nstar_shared_project_for_project_viewer:\n  uuid: zzzzz-o0j2j-starredbyviewer\n  owner_uuid: zzzzz-tpzed-projectviewer1a\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-projectviewer1a\n  link_class: star\n  name: zzzzz-j7d0g-starredshared01\n  head_uuid: zzzzz-j7d0g-starredshared01\n  properties: {}\n\ntagged_collection_readable_by_spectator:\n  uuid: zzzzz-o0j2j-readacl4tagcoll\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-l1s2piq4t4mps8r\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-taggedcolletion\n  properties: {}\n\nactive_manages_viewing_group:\n  uuid: zzzzz-o0j2j-activemanagesvi\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  link_class: permission\n  name: can_manage\n  head_uuid: zzzzz-j7d0g-futrprojviewgrp\n  properties: {}\n\npublic_favorites_permission_link:\n  uuid: zzzzz-o0j2j-testpublicfavor\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-j7d0g-anonymouspublic\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-j7d0g-publicfavorites\n  properties: {}\n\nfuture_project_user_member_of_all_users_group:\n  uuid: zzzzz-o0j2j-cdnq6627g0h0r2a\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2015-07-28T21:34:41.361747000Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2015-07-28T21:34:41.361747000Z\n  updated_at: 2015-07-28T21:34:41.361747000Z\n  tail_uuid: zzzzz-tpzed-futureprojview2\n  link_class: permission\n  name: can_write\n  head_uuid: zzzzz-j7d0g-fffffffffffffff\n  properties: {}\n\nfoo_file_readable_by_soon_to_be_trashed_role:\n  uuid: zzzzz-o0j2j-5s8ry7sn7bwxb7w\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-01-24 20:42:26 -0800\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-01-24 20:42:26 -0800\n  updated_at: 2014-01-24 20:42:26 -0800\n  tail_uuid: zzzzz-j7d0g-soontobetrashd2\n  link_class: permission\n  name: can_read\n  head_uuid: zzzzz-4zz18-znfnqtbbv4spc3w\n  properties: {}\n"
  },
  {
    "path": "services/api/test/fixtures/logs.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nnoop: # nothing happened ...to the 'spectator' user\n  id: 1\n  uuid: zzzzz-57u5n-pshmckwoma9plh7\n  owner_uuid: zzzzz-tpzed-000000000000000\n  object_uuid: zzzzz-tpzed-l1s2piq4t4mps8r\n  object_owner_uuid: zzzzz-tpzed-000000000000000\n  event_at: <%= 1.minute.ago.to_fs(:db) %>\n  created_at: <%= 1.minute.ago.to_fs(:db) %>\n\nadmin_changes_collection_owned_by_active:\n  id: 2\n  uuid: zzzzz-57u5n-pshmckwoma00002\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user\n  object_uuid: zzzzz-4zz18-bv31uwvy3neko21 # collection_owned_by_active\n  object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user\n  created_at: <%= 2.minute.ago.to_fs(:db) %>\n  event_at: <%= 2.minute.ago.to_fs(:db) %>\n  event_type: update\n\nadmin_changes_collection_owned_by_foo:\n  id: 3\n  uuid: zzzzz-57u5n-pshmckwoma00003\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f # admin user\n  object_uuid: zzzzz-4zz18-50surkhkbhsp31b # collection_owned_by_foo\n  object_owner_uuid: zzzzz-tpzed-81hsbo6mk8nl05c # foo user\n  created_at: <%= 3.minute.ago.to_fs(:db) %>\n  event_at: <%= 3.minute.ago.to_fs(:db) %>\n  event_type: update\n\nsystem_adds_foo_file: # foo collection added, readable by active through link\n  id: 4\n  uuid: zzzzz-57u5n-pshmckwoma00004\n  owner_uuid: zzzzz-tpzed-000000000000000 # system user\n  object_uuid: zzzzz-4zz18-znfnqtbbv4spc3w # foo file\n  object_owner_uuid: zzzzz-tpzed-000000000000000 # system user\n  created_at: <%= 4.minute.ago.to_fs(:db) %>\n  event_at: <%= 4.minute.ago.to_fs(:db) %>\n  event_type: create\n\nsystem_adds_baz: # baz collection added, readable by active and spectator through group 'all users' group membership\n  id: 5\n  uuid: zzzzz-57u5n-pshmckwoma00005\n  owner_uuid: zzzzz-tpzed-000000000000000 # system user\n  object_uuid: zzzzz-4zz18-y9vne9npefyxh8g # baz file\n  object_owner_uuid: zzzzz-tpzed-000000000000000 # system user\n  created_at: <%= 5.minute.ago.to_fs(:db) %>\n  event_at: <%= 5.minute.ago.to_fs(:db) %>\n  event_type: create\n\nlog_owned_by_active:\n  id: 6\n  uuid: zzzzz-57u5n-pshmckwoma12345\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user\n  object_uuid: zzzzz-2x53u-382brsig8rp3667 # repository foo\n  object_owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz # active user\n  event_at: <%= 2.minute.ago.to_fs(:db) %>\n  summary: non-admin use can read own logs\n\nstderr_for_ancient_container:\n  id: 12\n  uuid: zzzzz-57u5n-containerlog001\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  object_uuid: zzzzz-dz642-logscontainer01\n  event_at: <%= 2.year.ago.to_fs(:db) %>\n  event_type: stderr\n  summary: ~\n  properties:\n    text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer01 29610 1 stderr crunchstat:\n      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user\n      0.9900 sys'\n  created_at: <%= 2.year.ago.to_fs(:db) %>\n  updated_at: <%= 2.year.ago.to_fs(:db) %>\n  modified_at: <%= 2.year.ago.to_fs(:db) %>\n  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz\n\ncrunchstat_for_ancient_container:\n  id: 13\n  uuid: zzzzz-57u5n-containerlog002\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  object_uuid: zzzzz-dz642-logscontainer01\n  event_at: <%= 2.year.ago.to_fs(:db) %>\n  event_type: crunchstat\n  summary: ~\n  properties:\n    text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer01 29610 1 stderr crunchstat:\n      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user\n      0.9900 sys'\n  created_at: <%= 2.year.ago.to_fs(:db) %>\n  updated_at: <%= 2.year.ago.to_fs(:db) %>\n  modified_at: <%= 2.year.ago.to_fs(:db) %>\n  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz\n\nstderr_for_previous_container:\n  id: 14\n  uuid: zzzzz-57u5n-containerlog003\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  object_uuid: zzzzz-dz642-logscontainer02\n  event_at: <%= 1.month.ago.to_fs(:db) %>\n  event_type: stderr\n  summary: ~\n  properties:\n    text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer02 29610 1 stderr crunchstat:\n      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user\n      0.9900 sys'\n  created_at: <%= 1.month.ago.to_fs(:db) %>\n  updated_at: <%= 1.month.ago.to_fs(:db) %>\n  modified_at: <%= 1.month.ago.to_fs(:db) %>\n  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz\n\ncrunchstat_for_previous_container:\n  id: 15\n  uuid: zzzzz-57u5n-containerlog004\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  object_uuid: zzzzz-dz642-logscontainer02\n  event_at: <%= 1.month.ago.to_fs(:db) %>\n  event_type: crunchstat\n  summary: ~\n  properties:\n    text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer02 29610 1 stderr crunchstat:\n      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user\n      0.9900 sys'\n  created_at: <%= 1.month.ago.to_fs(:db) %>\n  updated_at: <%= 1.month.ago.to_fs(:db) %>\n  modified_at: <%= 1.month.ago.to_fs(:db) %>\n  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz\n\nstderr_for_running_container:\n  id: 16\n  uuid: zzzzz-57u5n-containerlog005\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  object_uuid: zzzzz-dz642-logscontainer03\n  event_at: <%= 1.hour.ago.to_fs(:db) %>\n  event_type: crunchstat\n  summary: ~\n  properties:\n    text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer03 29610 1 stderr crunchstat:\n      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user\n      0.9900 sys'\n  created_at: <%= 1.hour.ago.to_fs(:db) %>\n  updated_at: <%= 1.hour.ago.to_fs(:db) %>\n  modified_at: <%= 1.hour.ago.to_fs(:db) %>\n  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz\n\ncrunchstat_for_running_container:\n  id: 17\n  uuid: zzzzz-57u5n-containerlog006\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  object_uuid: zzzzz-dz642-logscontainer03\n  event_at: <%= 1.hour.ago.to_fs(:db) %>\n  event_type: crunchstat\n  summary: ~\n  properties:\n    text: '2013-11-07_23:33:41 zzzzz-dz642-logscontainer03 29610 1 stderr crunchstat:\n      cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user\n      0.9900 sys'\n  created_at: <%= 1.hour.ago.to_fs(:db) %>\n  updated_at: <%= 1.hour.ago.to_fs(:db) %>\n  modified_at: <%= 1.hour.ago.to_fs(:db) %>\n  object_owner_uuid: zzzzz-j7d0g-xurymjxw79nv3jz\n"
  },
  {
    "path": "services/api/test/fixtures/users.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Read about fixtures at http://api.rubyonrails.org/classes/ActiveRecord/Fixtures.html\n\nsystem_user:\n  uuid: zzzzz-tpzed-000000000000000\n  owner_uuid: zzzzz-tpzed-000000000000000\n  created_at: 2014-11-27 06:38:21.215463000 Z\n  modified_by_user_uuid: zzzzz-tpzed-000000000000000\n  modified_at: 2014-11-27 06:38:21.208036000 Z\n  email: root\n  first_name: root\n  last_name: ''\n  identity_url:\n  is_admin: true\n  prefs: {}\n  updated_at: 2014-11-27 06:38:21.207873000 Z\n  is_active: true\n\nadmin:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-d9tiejq69daie8f\n  email: admin@arvados.local\n  first_name: TestCase\n  last_name: Administrator\n  identity_url: https://admin.openid.local\n  is_active: true\n  is_admin: true\n  username: admin\n  prefs:\n    profile:\n      organization: example.com\n      role: IT\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nminiadmin:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-2bg9x0oeydcw5hm\n  email: miniadmin@arvados.local\n  first_name: TestCase\n  last_name: User Group Administrator\n  identity_url: https://miniadmin.openid.local\n  is_active: true\n  is_admin: false\n  username: miniadmin\n  prefs:\n    profile:\n      organization: example.com\n      role: IT\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nrominiadmin:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-4hvxm4n25emegis\n  email: rominiadmin@arvados.local\n  first_name: TestCase\n  last_name: Read-Only User Group Administrator\n  identity_url: https://rominiadmin.openid.local\n  is_active: true\n  is_admin: false\n  username: rominiadmin\n  prefs:\n    profile:\n      organization: example.com\n      role: IT\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nactive:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  email: active-user@arvados.local\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  first_name: Active\n  last_name: User\n  identity_url: https://active-user.openid.local\n  is_active: true\n  is_admin: false\n  modified_at: 2015-03-26 12:34:56.789000000 Z\n  username: active\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nfederated_active:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zbbbb-tpzed-xurymjxw79nv3jz\n  email: zbbbb-active-user@arvados.local\n  first_name: Active\n  last_name: User\n  identity_url: https://federated-active-user.openid.local\n  is_active: true\n  is_admin: false\n  username: federatedactive\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nproject_viewer:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-projectviewer1a\n  email: project-viewer@arvados.local\n  first_name: Project\n  last_name: Viewer\n  identity_url: https://project-viewer.openid.local\n  is_active: true\n  is_admin: false\n  username: projectviewer\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nfuture_project_user:\n  # Workbench tests give this user permission on aproject.\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-futureprojview2\n  email: future-project-user@arvados.local\n  first_name: Future Project\n  last_name: User\n  identity_url: https://future-project-user.openid.local\n  is_active: true\n  is_admin: false\n  username: futureprojectviewer\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nsubproject_admin:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-subprojectadmin\n  email: subproject-admin@arvados.local\n  first_name: Subproject\n  last_name: Admin\n  identity_url: https://subproject-admin.openid.local\n  is_active: true\n  is_admin: false\n  username: subprojectadmin\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nspectator:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-l1s2piq4t4mps8r\n  email: spectator@arvados.local\n  first_name: Spect\n  last_name: Ator\n  identity_url: https://spectator.openid.local\n  is_active: true\n  is_admin: false\n  username: spectator\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\ncontainer_runtime_token_user:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-l3skomkti0c4vg4\n  email: container_runtime_token_user@arvados.local\n  first_name: Spect\n  last_name: Ator\n  identity_url: https://container_runtime_token_user.openid.local\n  is_active: true\n  is_admin: false\n  username: containerruntimetokenuser\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\ninactive_uninvited:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-rf2ec3ryh4vb5ma\n  email: inactive-uninvited-user@arvados.local\n  first_name: Inactive and Uninvited\n  last_name: User\n  identity_url: https://inactive-uninvited-user.openid.local\n  is_active: false\n  is_admin: false\n  username: inactiveuninvited\n  prefs: {}\n\ninactive:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-x9kqpd79egh49c7\n  email: inactive-user@arvados.local\n  first_name: Inactive\n  last_name: User\n  identity_url: https://inactive-user.openid.local\n  is_active: false\n  is_admin: false\n  username: inactiveuser\n  prefs: {}\n\ninactive_but_signed_user_agreement:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-7sg468ezxwnodxs\n  email: inactive-user-signed-ua@arvados.local\n  first_name: Inactive But Agreeable\n  last_name: User\n  identity_url: https://inactive-but-agreeable-user.openid.local\n  is_active: false\n  is_admin: false\n  username: inactiveusersignedua\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nanonymous:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-anonymouspublic\n  email: anonymouspublic\n  first_name: anonymouspublic\n  last_name: anonymouspublic\n  is_active: false\n  is_admin: false\n  prefs: {}\n\njob_reader:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-905b42d1dd4a354\n  email: jobber@arvados.local\n  first_name: Job\n  last_name: Er\n  identity_url: https://job_reader.openid.local\n  is_active: true\n  is_admin: false\n  username: jobber\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\njob_reader2:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-readjobwithcomp\n  email: job_reader2@arvados.local\n  first_name: Job\n  last_name: Reader2\n  identity_url: https://job_reader2.openid.local\n  is_active: true\n  is_admin: false\n  username: jobreader2\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nactive_no_prefs:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-a46c42d1td4aoj4\n  email: active_no_prefs@arvados.local\n  first_name: NoPrefs\n  last_name: NoProfile\n  identity_url: https://active_no_prefs.openid.local\n  is_active: true\n  is_admin: false\n  username: activenoprefs\n  prefs: {}\n\nactive_no_prefs_profile_no_getting_started_shown:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-a46c98d1td4aoj4\n  email: active_no_prefs_profile_no_gs@arvados.local\n  first_name: HasPrefs\n  last_name: NoProfile\n  identity_url: https://active_no_prefs_profile.openid.local\n  is_active: true\n  is_admin: false\n  username: activenoprefsprofilenogs\n  prefs:\n    test: abc\n\nactive_no_prefs_profile_with_getting_started_shown:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-getstartnoprofl\n  email: active_no_prefs_profile@arvados.local\n  first_name: HasPrefs\n  last_name: NoProfileWithGettingStartedShown\n  identity_url: https://active_no_prefs_profile_seen_gs.openid.local\n  is_active: true\n  is_admin: false\n  username: activenoprefsprofile\n  prefs:\n    test: abc\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nactive_with_prefs_profile_no_getting_started_shown:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-nogettinstarted\n  email: active_nogettinstarted@arvados.local\n  first_name: HasPrefsProfile\n  last_name: NoGettingStartedShown\n  identity_url: https://active_nogettinstarted.openid.local\n  is_active: true\n  username: activenogettinstarted\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n\n# Fixtures to test granting and removing permissions.\n\nuser_foo_in_sharing_group:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-81hsbo6mk8nl05c\n  email: user_foo_in_sharing_group@arvados.local\n  first_name: Foo\n  last_name: Sharing\n  identity_url: https://user_foo_in_sharing_group.openid.local\n  is_active: true\n  is_admin: false\n  username: fooinsharing\n\nuser_bar_in_sharing_group:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-n3oaj4sm5fcnwib\n  email: user_bar_in_sharing_group@arvados.local\n  first_name: Bar\n  last_name: Sharing\n  identity_url: https://user_bar_in_sharing_group.openid.local\n  is_active: true\n  is_admin: false\n  username: barinsharing\n\nuser1_with_load:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-user1withloadab\n  email: user1_with_load@arvados.local\n  first_name: user1_with_load\n  last_name: User\n  identity_url: https://user1_with_load.openid.local\n  is_active: true\n  is_admin: false\n  username: user1withload\n  prefs:\n    profile:\n      organization: example.com\n      role: IT\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nfuse:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-0fusedrivertest\n  email: fuse@arvados.local\n  first_name: FUSE\n  last_name: User\n  identity_url: https://fuse.openid.local\n  is_active: true\n  is_admin: false\n  username: FUSE\n  prefs:\n    profile:\n      organization: example.com\n      role: IT\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\npermission_perftest:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-permissionptest\n  email: permission_perftest@arvados.local\n  first_name: FUSE\n  last_name: User\n  identity_url: https://permission_perftest.openid.local\n  is_active: true\n  is_admin: false\n  username: perftest\n  prefs:\n    profile:\n      organization: example.com\n      role: IT\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nredirects_to_active:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-1au3is3g3chtthd\n  email: redirects-to-active-user@arvados.local\n  first_name: Active2\n  last_name: User2\n  identity_url: https://redirects-to-active-user.openid.local\n  is_active: true\n  is_admin: false\n  username: redirect_active\n  redirect_to_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\ndouble_redirects_to_active:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-oiusowoxoz0pk3p\n  email: double-redirects-to-active-user@arvados.local\n  first_name: Active3\n  last_name: User3\n  identity_url: https://double-redirects-to-active-user.openid.local\n  is_active: true\n  is_admin: false\n  username: double_redirect_active\n  redirect_to_user_uuid: zzzzz-tpzed-1au3is3g3chtthd\n  prefs:\n    profile:\n      organization: example.com\n      role: Computational biologist\n    getting_started_shown: 2015-03-26 12:34:56.789000000 Z\n\nhas_can_login_permission:\n  owner_uuid: zzzzz-tpzed-000000000000000\n  uuid: zzzzz-tpzed-xabcdjxw79nv3jz\n  email: can-login-user@arvados.local\n  modified_by_user_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  first_name: Can_login\n  last_name: User\n  identity_url: https://can-login-user.openid.local\n  is_active: true\n  is_admin: false\n  modified_at: 2015-03-26 12:34:56.789000000 Z\n  username: canLoginUser\n"
  },
  {
    "path": "services/api/test/fixtures/virtual_machines.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ntestvm:\n  uuid: zzzzz-2x53u-382brsig8rp3064\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  hostname: testvm.shell\n\ntestvm2:\n  uuid: zzzzz-2x53u-382brsig8rp3065\n  owner_uuid: zzzzz-tpzed-d9tiejq69daie8f\n  hostname: testvm2.shell\n"
  },
  {
    "path": "services/api/test/fixtures/workflows.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nworkflow_with_definition_yml:\n  uuid: zzzzz-7fd4e-validworkfloyml\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: Valid workflow with name and desc\n  description: this workflow has a valid definition yaml\n  definition: \"name: foo\\ndesc: bar\"\n  created_at: 2016-08-15 12:00:00\n\nworkflow_with_no_definition_yml:\n  uuid: zzzzz-7fd4e-validbutnoyml00\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  name: Valid workflow with no definition yaml\n  description: this workflow does not have a definition yaml\n  created_at: 2016-08-15 12:00:00\n\nworkflow_with_no_name_and_desc:\n  uuid: zzzzz-7fd4e-validnonamedesc\n  owner_uuid: zzzzz-tpzed-xurymjxw79nv3jz\n  definition: this is valid yaml\n  created_at: 2016-08-15 12:00:01\n\nworkflow_with_input_specifications:\n  uuid: zzzzz-7fd4e-validwithinputs\n  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0\n  name: Workflow with input specifications\n  description: this workflow has inputs specified\n  created_at: <%= 1.minute.ago.to_fs(:db) %>\n  definition: |\n    cwlVersion: v1.0\n    class: CommandLineTool\n    baseCommand:\n    - echo\n    inputs:\n    - doc: a longer documentation string for this parameter (optional)\n      type: boolean\n      id: ex_boolean\n      label: a short label for this parameter (optional)\n      inputBinding:\n        position: 1\n    - type:\n      - 'null'\n      - boolean\n      id: ex_boolean_opt\n      inputBinding:\n        position: 1\n    outputs: []\n\nworkflow_with_input_defaults:\n  uuid: zzzzz-7fd4e-validwithinput2\n  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0\n  name: Workflow with default input specifications\n  description: this workflow has inputs specified\n  created_at: <%= 1.minute.ago.to_fs(:db) %>\n  definition: |\n    cwlVersion: v1.0\n    class: CommandLineTool\n    baseCommand:\n    - echo\n    inputs:\n    - type: string\n      id: ex_string\n    - type: string\n      id: ex_string_def\n      default: hello-testing-123\n    outputs: []\n\nworkflow_with_wrr:\n  uuid: zzzzz-7fd4e-validwithinput3\n  owner_uuid: zzzzz-j7d0g-zhxawtyetzwc5f0\n  name: Workflow with WorkflowRunnerResources\n  description: this workflow has WorkflowRunnerResources\n  created_at: <%= 1.minute.ago.to_fs(:db) %>\n  definition: |\n    cwlVersion: v1.0\n    class: CommandLineTool\n    hints:\n      - class: http://arvados.org/cwl#WorkflowRunnerResources\n        acrContainerImage: arvados/jobs:2.0.4\n        ramMin: 1234\n        coresMin: 2\n        keep_cache: 678\n    baseCommand:\n    - echo\n    inputs:\n    - type: string\n      id: ex_string\n    - type: string\n      id: ex_string_def\n      default: hello-testing-123\n    outputs: []\n"
  },
  {
    "path": "services/api/test/functional/.gitkeep",
    "content": ""
  },
  {
    "path": "services/api/test/functional/application_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ApplicationControllerTest < ActionController::TestCase\n  BAD_UUID = \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\"\n\n  def now_timestamp\n    Time.now.utc.to_i\n  end\n\n  setup do\n    # These tests are meant to check behavior in ApplicationController.\n    # We instantiate an arbitrary concrete controller.\n    @controller = Arvados::V1::CollectionsController.new\n    @start_stamp = now_timestamp\n  end\n\n  def check_error_token\n    token = json_response['error_token']\n    assert_not_nil token\n    token_time = token.split('+', 2).first.to_i\n    assert_operator(token_time, :>=, @start_stamp, \"error token too old\")\n    assert_operator(token_time, :<=, now_timestamp, \"error token too new\")\n  end\n\n  def check_404(errmsg=\"Path not found\")\n    assert_response 404\n    json_response['errors'].each do |err|\n      assert(err.include?(errmsg), \"error message '#{err}' expected to include '#{errmsg}'\")\n    end\n    check_error_token\n  end\n\n  test \"requesting nonexistent object returns 404 error\" do\n    authorize_with :admin\n    get(:show, params: {id: BAD_UUID})\n    check_404\n  end\n\n  test \"requesting object without read permission returns 404 error\" do\n    authorize_with :spectator\n    get(:show, params: {id: collections(:collection_owned_by_active).uuid})\n    check_404\n  end\n\n  test \"submitting bad object returns error\" do\n    authorize_with :spectator\n    post(:create, params: {collection: {badattr: \"badvalue\"}})\n    assert_response 422\n    check_error_token\n  end\n\n  ['foo', '', 'FALSE', 'TRUE', nil, [true], {a:true}, '\"true\"'].each do |bogus|\n    test \"bogus boolean parameter #{bogus.inspect} returns error\" do\n      @controller = Arvados::V1::GroupsController.new\n      authorize_with :active\n      post :create, params: {\n        group: {},\n        ensure_unique_name: bogus\n      }\n      assert_response 422\n      assert_match(/parameter must be a boolean/, json_response['errors'].first,\n                   'Helpful error message not found')\n    end\n  end\n\n  [[true, [true, 'true', 1, '1']],\n   [false, [false, 'false', 0, '0']]].each do |bool, boolparams|\n    boolparams.each do |boolparam|\n      # Ensure boolparam is acceptable as a boolean\n      test \"boolean parameter #{boolparam.inspect} acceptable\" do\n        @controller = Arvados::V1::GroupsController.new\n        authorize_with :active\n        post :create, params: {\n          group: {group_class: \"project\"},\n          ensure_unique_name: boolparam\n        }\n        assert_response :success\n      end\n\n      # Ensure boolparam is acceptable as the _intended_ boolean\n      test \"boolean parameter #{boolparam.inspect} accepted as #{bool.inspect}\" do\n        @controller = Arvados::V1::GroupsController.new\n        authorize_with :active\n        post :create, params: {\n          group: {\n            name: groups(:aproject).name,\n            owner_uuid: groups(:aproject).owner_uuid,\n            group_class: \"project\"\n          },\n          ensure_unique_name: boolparam\n        }\n        assert_response (bool ? :success : 422)\n      end\n    end\n  end\n\n  [[500, ActiveRecord::Deadlocked],\n   [500, ActiveRecord::QueryAborted],\n   [422, ActiveRecord::RecordNotUnique]].each do |status, etype|\n    test \"return status #{status} for #{etype}\" do\n      Group.stubs(:new).raises(etype)\n      @controller = Arvados::V1::GroupsController.new\n      authorize_with :active\n      post :create, params: {group: {}}\n      assert_response status\n    end\n  end\n\n  test \"exceptions with backtraces get logged at exception_backtrace key\" do\n    Group.stubs(:new).raises(Exception, 'Whoops')\n    Rails.logger.expects(:info).with(any_parameters) do |param|\n      param.include?('Whoops') and param.include?('\"exception_backtrace\":')\n    end\n    @controller = Arvados::V1::GroupsController.new\n    authorize_with :active\n    post :create, params: {\n      group: {},\n    }\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/api_client_authorizations_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::ApiClientAuthorizationsControllerTest < ActionController::TestCase\n  test \"should get index\" do\n    authorize_with :active_trustedclient\n    get :index\n    assert_response :success\n  end\n\n  test \"should not get index with expired auth\" do\n    authorize_with :expired\n    get :index, params: {format: :json}\n    assert_response 401\n  end\n\n  test \"create system auth\" do\n    authorize_with :admin_trustedclient\n    post :create_system_auth, params: {scopes: '[\"test\"]'}\n    assert_response :success\n    assert_not_nil JSON.parse(@response.body)['uuid']\n  end\n\n  test \"prohibit create system auth by non-admin\" do\n    authorize_with :active\n    post :create_system_auth, params: {scopes: '[\"test\"]'}\n    assert_response 403\n  end\n\n  def assert_found_tokens(auth, search_params, expected)\n    authorize_with auth\n    expected_tokens = expected.map do |name|\n      api_client_authorizations(name).api_token\n    end\n    get :index, params: search_params\n    assert_response :success\n    got_tokens = JSON.parse(@response.body)['items']\n      .map { |a| a['api_token'] }\n    assert_equal(expected_tokens.sort, got_tokens.sort,\n                 \"wrong results for #{search_params.inspect}\")\n  end\n\n  # Three-tuples with auth to use, scopes to find, and expected tokens.\n  # Make two tests for each tuple, one searching with where and the other\n  # with filter.\n  [[:admin_trustedclient, [], [:admin_noscope]],\n   [:active_trustedclient, [\"GET /arvados/v1/users\"], [:active_userlist]],\n   [:active_trustedclient,\n    [\"POST /arvados/v1/api_client_authorizations\",\n     \"GET /arvados/v1/api_client_authorizations\"],\n    [:active_apitokens]],\n  ].each do |auth, scopes, expected|\n    test \"#{auth.to_s} can find auths where scopes=#{scopes.inspect}\" do\n      assert_found_tokens(auth, {where: {scopes: scopes}}, expected)\n    end\n\n    test \"#{auth.to_s} can find auths filtered with scopes=#{scopes.inspect}\" do\n      assert_found_tokens(auth, {filters: [['scopes', '=', scopes]]}, expected)\n    end\n\n    test \"#{auth.to_s} offset works with filter scopes=#{scopes.inspect}\" do\n      assert_found_tokens(auth, {\n                            offset: expected.length,\n                            filters: [['scopes', '=', scopes]]\n                          }, [])\n    end\n  end\n\n  [:admin, :active].each do |token|\n    test \"using '#{token}', get token details via 'current'\" do\n      authorize_with token\n      get :current\n      assert_response 200\n      assert_equal json_response['scopes'], ['all']\n    end\n  end\n\n  [# anyone can look up the token they're currently using\n   [:admin, :admin, 200, 200, 1],\n   [:active, :active, 200, 200, 1],\n   # cannot look up other tokens for other users\n   [:admin_trustedclient, :active, 404, 200, 0],\n   [:active_trustedclient, :admin, 404, 200, 0],\n   # system root token is always trusted\n   [:system_user, :active, 200, 200, 1],\n   [:system_user, :admin, 200, 200, 1],\n  ].each do |auth_token, target_token, expect_get_response, expect_list_response, expect_list_items|\n    test \"using '#{auth_token}', get '#{target_token}' by uuid\" do\n      authorize_with auth_token\n      get :show, params: {\n        id: api_client_authorizations(target_token).uuid,\n      }\n      assert_response expect_get_response\n    end\n\n    test \"using '#{auth_token}', update '#{target_token}' by uuid\" do\n      authorize_with auth_token\n      put :update, params: {\n        id: api_client_authorizations(target_token).uuid,\n        api_client_authorization: {},\n      }\n      assert_response expect_get_response\n    end\n\n    test \"using '#{auth_token}', delete '#{target_token}' by uuid\" do\n      authorize_with auth_token\n      post :destroy, params: {\n        id: api_client_authorizations(target_token).uuid,\n      }\n      assert_response expect_get_response\n    end\n\n    test \"using '#{auth_token}', list '#{target_token}' by uuid\" do\n      authorize_with auth_token\n      get :index, params: {\n        filters: [['uuid','=',api_client_authorizations(target_token).uuid]],\n      }\n      assert_response expect_list_response\n      if expect_list_items\n        assert_equal assigns(:objects).length, expect_list_items\n        assert_equal json_response['items_available'], expect_list_items\n      end\n    end\n\n    if expect_list_items\n      test \"using '#{auth_token}', list '#{target_token}' by uuid with offset\" do\n        authorize_with auth_token\n        get :index, params: {\n          filters: [['uuid','=',api_client_authorizations(target_token).uuid]],\n          offset: expect_list_items,\n        }\n        assert_response expect_list_response\n        assert_equal json_response['items_available'], expect_list_items\n        assert_equal json_response['items'].length, 0\n      end\n    end\n\n    test \"using '#{auth_token}', list '#{target_token}' by token\" do\n      authorize_with auth_token\n      get :index, params: {\n        filters: [['api_token','=',api_client_authorizations(target_token).api_token]],\n      }\n      assert_response expect_list_response\n      if expect_list_items\n        assert_equal assigns(:objects).length, expect_list_items\n        assert_equal json_response['items_available'], expect_list_items\n      end\n    end\n  end\n\n  test \"scoped token cannot change its own scopes\" do\n    authorize_with :admin_vm\n    put :update, params: {\n      id: api_client_authorizations(:admin_vm).uuid,\n      api_client_authorization: {scopes: ['all']},\n    }\n    assert_response 403\n  end\n\n  test \"token cannot change its own uuid\" do\n    authorize_with :admin\n    put :update, params: {\n      id: api_client_authorizations(:admin).uuid,\n      api_client_authorization: {uuid: 'zzzzz-gj3su-zzzzzzzzzzzzzzz'},\n    }\n    assert_response 403\n  end\n\n  test \"get current token\" do\n    authorize_with :active\n    get :current\n    assert_response :success\n    assert_equal(json_response['api_token'],\n                 api_client_authorizations(:active).api_token)\n  end\n\n  test \"get current token using SystemRootToken\" do\n    Rails.configuration.SystemRootToken = \"xyzzy-systemroottoken\"\n    authorize_with_token Rails.configuration.SystemRootToken\n    get :current\n    assert_response :success\n    assert_equal(Rails.configuration.SystemRootToken, json_response['api_token'])\n    assert_not_empty(json_response['uuid'])\n  end\n\n  [\n    :active_noscope,\n    :active_all_collections,\n    :active_userlist,\n    :foo_collection_sharing_token,\n  ].each do |auth|\n    test \"#{auth} can get current token without the appropriate scope\" do\n      authorize_with auth\n      get :current\n      assert_response :success\n    end\n  end\n\n  test \"get current token, no auth\" do\n    get :current\n    assert_response 401\n  end\n\n  # Tests regression #18801\n  test \"select param is respected in 'show' response\" do\n    authorize_with :active\n    get :show, params: {\n          id: api_client_authorizations(:active).uuid,\n          select: [\"uuid\"],\n        }\n    assert_response :success\n    assert_raises ActiveModel::MissingAttributeError do\n      assigns(:object).api_token\n    end\n    assert_nil json_response[\"expires_at\"]\n    assert_nil json_response[\"api_token\"]\n    assert_equal api_client_authorizations(:active).uuid, json_response[\"uuid\"]\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/authorized_keys_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::AuthorizedKeysControllerTest < ActionController::TestCase\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/collections_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::CollectionsControllerTest < ActionController::TestCase\n  include DbCurrentTime\n\n  PERM_TOKEN_RE = /\\+A[[:xdigit:]]+@[[:xdigit:]]{8}\\b/\n\n  def permit_unsigned_manifests isok=true\n    # Set security model for the life of a test.\n    Rails.configuration.Collections.BlobSigning = !isok\n  end\n\n  def assert_signed_manifest manifest_text, label='', token: false\n    assert_not_nil manifest_text, \"#{label} manifest_text was nil\"\n    manifest_text.scan(/ [[:xdigit:]]{32}\\S*/) do |tok|\n      assert_match(PERM_TOKEN_RE, tok,\n                   \"Locator in #{label} manifest_text was not signed\")\n      if token\n        bare = tok.gsub(/\\+A[^\\+]*/, '').sub(/^ /, '')\n        exp = tok[/\\+A[[:xdigit:]]+@([[:xdigit:]]+)/, 1].to_i(16)\n        sig = Blob.sign_locator(\n          bare,\n          key: Rails.configuration.Collections.BlobSigningKey,\n          expire: exp,\n          api_token: token)[/\\+A[^\\+]*/, 0]\n        assert_includes tok, sig\n      end\n    end\n  end\n\n  def assert_unsigned_manifest txt, label=''\n    assert_not_nil(txt, \"#{label} unsigned_manifest_text was nil\")\n    locs = 0\n    txt.scan(/ [[:xdigit:]]{32}\\S*/) do |tok|\n      locs += 1\n      refute_match(PERM_TOKEN_RE, tok,\n                   \"Locator in #{label} unsigned_manifest_text was signed: #{tok}\")\n    end\n    return locs\n  end\n\n  test \"should get index\" do\n    authorize_with :active\n    get :index\n    assert_response :success\n    assert(assigns(:objects).andand.any?, \"no Collections returned in index\")\n    refute(json_response[\"items\"].any? { |c| c.has_key?(\"manifest_text\") },\n           \"basic Collections index included manifest_text\")\n    refute(json_response[\"items\"].any? { |c| c[\"uuid\"] == collections(:collection_owned_by_active_past_version_1).uuid },\n           \"basic Collections index included past version\")\n  end\n\n  test \"get index with include_old_versions\" do\n    authorize_with :active\n    get :index, params: {\n      include_old_versions: true\n    }\n    assert_response :success\n    assert(assigns(:objects).andand.any?, \"no Collections returned in index\")\n    assert(json_response[\"items\"].any? { |c| c[\"uuid\"] == collections(:collection_owned_by_active_past_version_1).uuid },\n           \"past version not included on index\")\n  end\n\n  test \"collections.get returns unsigned locators, and no unsigned_manifest_text\" do\n    permit_unsigned_manifests\n    authorize_with :active\n    get :show, params: {id: collections(:foo_file).uuid}\n    assert_response :success\n    assert_unsigned_manifest json_response[\"manifest_text\"], 'foo_file'\n    refute_includes json_response, 'unsigned_manifest_text'\n  end\n\n  ['v1token', 'v2token'].each do |token_method|\n    test \"signatures with #{token_method} are accepted\" do\n      token = api_client_authorizations(:active).send(token_method)\n      signed = Blob.sign_locator(\n        'acbd18db4cc2f85cedef654fccc4a4d8+3',\n        key: Rails.configuration.Collections.BlobSigningKey,\n        api_token: token)\n      authorize_with_token token\n      put :update, params: {\n            id: collections(:collection_owned_by_active).uuid,\n            collection: {\n              manifest_text: \". #{signed} 0:3:foo.txt\\n\",\n            },\n          }\n      assert_response :success\n      assert_unsigned_manifest json_response['manifest_text'], 'updated'\n    end\n  end\n\n  test \"index with manifest_text selected returns unsigned locators\" do\n    columns = %w(uuid owner_uuid manifest_text)\n    authorize_with :active\n    get :index, params: {select: columns}\n    assert_response :success\n    assert(assigns(:objects).andand.any?,\n           \"no Collections returned for index with columns selected\")\n    json_response[\"items\"].each do |coll|\n      assert_equal(coll.keys - ['kind'], columns,\n                   \"Collections index did not respect selected columns\")\n      assert_unsigned_manifest coll['manifest_text'], coll['uuid']\n    end\n  end\n\n  test \"index with unsigned_manifest_text selected returns only unsigned locators\" do\n    authorize_with :active\n    get :index, params: {select: ['unsigned_manifest_text']}\n    assert_response :success\n    assert_operator json_response[\"items\"].count, :>, 0\n    locs = 0\n    json_response[\"items\"].each do |coll|\n      assert_equal(coll.keys - ['kind'], ['unsigned_manifest_text'],\n                   \"Collections index did not respect selected columns\")\n      assert_nil coll['manifest_text']\n      locs += assert_unsigned_manifest coll['unsigned_manifest_text'], coll['uuid']\n    end\n    assert_operator locs, :>, 0, \"no locators found in any manifests\"\n  end\n\n  test 'index without select returns everything except manifest' do\n    authorize_with :active\n    get :index\n    assert_response :success\n    assert json_response['items'].any?\n    json_response['items'].each do |coll|\n      assert_includes(coll.keys, 'uuid')\n      assert_includes(coll.keys, 'name')\n      assert_includes(coll.keys, 'created_at')\n      refute_includes(coll.keys, 'manifest_text')\n    end\n  end\n\n  ['', nil, false, 'null'].each do |select|\n    test \"index with select=#{select.inspect} returns everything except manifest\" do\n      authorize_with :active\n      get :index, params: {select: select}\n      assert_response :success\n      assert json_response['items'].any?\n      json_response['items'].each do |coll|\n        assert_includes(coll.keys, 'uuid')\n        assert_includes(coll.keys, 'name')\n        assert_includes(coll.keys, 'created_at')\n        refute_includes(coll.keys, 'manifest_text')\n      end\n    end\n  end\n\n  [[\"uuid\"],\n   [\"uuid\", \"manifest_text\"],\n   '[\"uuid\"]',\n   '[\"uuid\", \"manifest_text\"]'].each do |select|\n    test \"index with select=#{select.inspect} returns no name\" do\n      authorize_with :active\n      get :index, params: {select: select}\n      assert_response :success\n      assert json_response['items'].any?\n      json_response['items'].each do |coll|\n        refute_includes(coll.keys, 'name')\n      end\n    end\n  end\n\n  test \"ignore modified_by_client_uuid in select param\" do\n    authorize_with :active\n    get :index, params: {select: [\"uuid\", \"modified_by_client_uuid\"]}\n    assert_response :success\n    json_response['items'].each do |coll|\n      assert_includes(coll.keys, 'uuid')\n      refute_includes(coll.keys, 'name')\n    end\n  end\n\n  test \"reject invalid field in select param\" do\n    authorize_with :active\n    get :index, params: {select: [\"uuid\", \"field_does_not_exist\"]}\n    assert_response 422\n  end\n\n  [0,1,2].each do |limit|\n    test \"get index with limit=#{limit}\" do\n      authorize_with :active\n      get :index, params: {limit: limit}\n      assert_response :success\n      assert_equal limit, assigns(:objects).count\n      resp = JSON.parse(@response.body)\n      assert_equal limit, resp['limit']\n    end\n  end\n\n  test \"items.count == items_available\" do\n    authorize_with :active\n    get :index, params: {limit: 100000}\n    assert_response :success\n    resp = JSON.parse(@response.body)\n    assert_equal resp['items_available'], assigns(:objects).length\n    assert_equal resp['items_available'], resp['items'].count\n    unique_uuids = resp['items'].collect { |i| i['uuid'] }.compact.uniq\n    assert_equal unique_uuids.count, resp['items'].count\n  end\n\n  test \"items.count == items_available with filters\" do\n    authorize_with :active\n    get :index, params: {\n      limit: 100,\n      filters: [['uuid','=',collections(:foo_file).uuid]]\n    }\n    assert_response :success\n    assert_equal 1, assigns(:objects).length\n    assert_equal 1, json_response['items_available']\n    assert_equal 1, json_response['items'].count\n  end\n\n  test \"get index with limit=2 offset=99999\" do\n    # Assume there are not that many test fixtures.\n    authorize_with :active\n    get :index, params: {limit: 2, offset: 99999}\n    assert_response :success\n    assert_equal 0, assigns(:objects).count\n    resp = JSON.parse(@response.body)\n    assert_equal 2, resp['limit']\n    assert_equal 99999, resp['offset']\n  end\n\n  def request_capped_index(params={})\n    authorize_with :user1_with_load\n    coll1 = collections(:collection_1_of_201)\n    Rails.configuration.API.MaxIndexDatabaseRead =\n      yield(coll1.manifest_text.size)\n    get :index, params: {\n      select: %w(uuid manifest_text),\n      filters: [[\"owner_uuid\", \"=\", coll1.owner_uuid]],\n      limit: 300,\n    }.merge(params)\n  end\n\n  test \"index with manifest_text limited by max_index_database_read returns non-empty\" do\n    request_capped_index() { |_| 1 }\n    assert_response :success\n    assert_equal(1, json_response[\"items\"].size)\n    assert_equal(1, json_response[\"limit\"])\n    assert_equal(201, json_response[\"items_available\"])\n  end\n\n  test \"max_index_database_read size check follows same order as real query\" do\n    authorize_with :user1_with_load\n    txt = '.' + ' d41d8cd98f00b204e9800998ecf8427e+0'*1000 + \" 0:0:empty.txt\\n\"\n    c = Collection.create! manifest_text: txt, name: '0000000000000000000'\n    request_capped_index(select: %w(uuid manifest_text name),\n                         order: ['name asc'],\n                         filters: [['name','>=',c.name]]) do |_|\n      txt.length - 1\n    end\n    assert_response :success\n    assert_equal(1, json_response[\"items\"].size)\n    assert_equal(1, json_response[\"limit\"])\n    assert_equal(c.uuid, json_response[\"items\"][0][\"uuid\"])\n    # The effectiveness of the test depends on >1 item matching the filters.\n    assert_operator(1, :<, json_response[\"items_available\"])\n  end\n\n  test \"index with manifest_text limited by max_index_database_read\" do\n    request_capped_index() { |size| (size * 3) + 1 }\n    assert_response :success\n    assert_equal(3, json_response[\"items\"].size)\n    assert_equal(3, json_response[\"limit\"])\n    assert_equal(201, json_response[\"items_available\"])\n  end\n\n  test \"max_index_database_read does not interfere with limit\" do\n    request_capped_index(limit: 5) { |size| size * 20 }\n    assert_response :success\n    assert_equal(5, json_response[\"items\"].size)\n    assert_equal(5, json_response[\"limit\"])\n    assert_equal(201, json_response[\"items_available\"])\n  end\n\n  test \"max_index_database_read does not interfere with order\" do\n    request_capped_index(select: %w(uuid manifest_text name),\n                         order: \"name DESC\") { |size| (size * 11) + 1 }\n    assert_response :success\n    assert_equal(11, json_response[\"items\"].size)\n    assert_empty(json_response[\"items\"].reject do |coll|\n                   coll[\"name\"] =~ /^Collection_9/\n                 end)\n    assert_equal(11, json_response[\"limit\"])\n    assert_equal(201, json_response[\"items_available\"])\n  end\n\n  test \"admin can create collection with unsigned manifest\" do\n    authorize_with :admin\n    test_collection = {\n      manifest_text: <<-EOS\n. d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\n. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar.txt\n. acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar.txt\n./baz acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:bar.txt\nEOS\n    }\n    test_collection[:portable_data_hash] =\n      Digest::MD5.hexdigest(test_collection[:manifest_text]) +\n      '+' +\n      test_collection[:manifest_text].length.to_s\n\n    # post :create will modify test_collection in place, so we save a copy first.\n    # Hash.deep_dup is not sufficient as it preserves references of strings (??!?)\n    post_collection = Marshal.load(Marshal.dump(test_collection))\n    post :create, params: {\n      collection: post_collection\n    }\n\n    assert_response :success\n    assert_nil assigns(:objects)\n\n    response_collection = assigns(:object)\n\n    stored_collection = Collection.select([:uuid, :portable_data_hash, :manifest_text]).\n      where(portable_data_hash: response_collection['portable_data_hash']).first\n\n    assert_equal test_collection[:portable_data_hash], stored_collection['portable_data_hash']\n\n    # The manifest in the response will have had permission hints added.\n    # Remove any permission hints in the response before comparing it to the source.\n    stripped_manifest = stored_collection['manifest_text'].gsub(/\\+A[A-Za-z0-9@_-]+/, '')\n    assert_equal test_collection[:manifest_text], stripped_manifest\n\n    # TBD: create action should add permission signatures to manifest_text in the response,\n    # and we need to check those permission signatures here.\n  end\n\n  [:admin, :active].each do |user|\n    test \"#{user} can get collection using portable data hash\" do\n      authorize_with user\n\n      foo_collection = collections(:foo_file)\n\n      # Get foo_file using its portable data hash\n      get :show, params: {\n        id: foo_collection[:portable_data_hash]\n      }\n      assert_response :success\n      assert_not_nil assigns(:object)\n      resp = assigns(:object)\n      assert_equal foo_collection[:portable_data_hash], resp[:portable_data_hash]\n      assert_unsigned_manifest resp[:manifest_text]\n\n      # The manifest in the response will have had permission hints added.\n      # Remove any permission hints in the response before comparing it to the source.\n      stripped_manifest = resp[:manifest_text].gsub(/\\+A[A-Za-z0-9@_-]+/, '')\n      assert_equal foo_collection[:manifest_text], stripped_manifest\n    end\n  end\n\n  test \"create with owner_uuid set to owned group\" do\n    permit_unsigned_manifests\n    authorize_with :active\n    manifest_text = \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\"\n    post :create, params: {\n      collection: {\n        owner_uuid: 'zzzzz-j7d0g-rew6elm53kancon',\n        manifest_text: manifest_text,\n        portable_data_hash: \"d30fe8ae534397864cb96c544f4cf102+47\"\n      }\n    }\n    assert_response :success\n    resp = JSON.parse(@response.body)\n    assert_equal 'zzzzz-j7d0g-rew6elm53kancon', resp['owner_uuid']\n  end\n\n  test \"create fails with duplicate name\" do\n    permit_unsigned_manifests\n    authorize_with :admin\n    manifest_text = \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\"\n    post :create, params: {\n      collection: {\n        owner_uuid: 'zzzzz-tpzed-000000000000000',\n        manifest_text: manifest_text,\n        portable_data_hash: \"d30fe8ae534397864cb96c544f4cf102+47\",\n        name: \"foo_file\"\n      }\n    }\n    assert_response 422\n    response_errors = json_response['errors']\n    assert_not_nil response_errors, 'Expected error in response'\n    assert(response_errors.first.include?('duplicate key'),\n           \"Expected 'duplicate key' error in #{response_errors.first}\")\n  end\n\n  [false, true].each do |ensure_unique_name|\n    test \"create failure with duplicate name, ensure_unique_name #{ensure_unique_name}\" do\n      authorize_with :active\n      post :create, params: {\n             collection: {\n               owner_uuid: users(:active).uuid,\n               manifest_text: \"\",\n               name: \"this...............................................................................................................................................................................................................................................................name is too long\"\n             },\n             ensure_unique_name: ensure_unique_name\n           }\n      assert_response 422\n      # check the real error isn't masked by an\n      # ensure_unique_name-related error (#19698)\n      assert_match /value too long for type/, json_response['errors'][0]\n    end\n  end\n\n  [false, true].each do |unsigned|\n    test \"create with duplicate name, ensure_unique_name, unsigned=#{unsigned}\" do\n      permit_unsigned_manifests unsigned\n      authorize_with :active\n      manifest_text = \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:foo.txt\\n\"\n      if !unsigned\n        manifest_text = Collection.sign_manifest_only_for_tests manifest_text, api_token(:active)\n      end\n      post :create, params: {\n        collection: {\n          owner_uuid: users(:active).uuid,\n          manifest_text: manifest_text,\n          name: \"owned_by_active\"\n        },\n        ensure_unique_name: true\n      }\n      assert_response :success\n      assert_match /^owned_by_active \\(#{json_response['uuid'][-15..-1]}\\)$/, json_response['name']\n    end\n  end\n\n  test \"create with owner_uuid set to group i can_manage\" do\n    permit_unsigned_manifests\n    authorize_with :active\n    manifest_text = \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\"\n    post :create, params: {\n      collection: {\n        owner_uuid: groups(:active_user_has_can_manage).uuid,\n        manifest_text: manifest_text,\n        portable_data_hash: \"d30fe8ae534397864cb96c544f4cf102+47\"\n      }\n    }\n    assert_response :success\n    resp = JSON.parse(@response.body)\n    assert_equal groups(:active_user_has_can_manage).uuid, resp['owner_uuid']\n  end\n\n  test \"create with owner_uuid fails on group with only can_read permission\" do\n    permit_unsigned_manifests\n    authorize_with :active\n    manifest_text = \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\"\n    post :create, params: {\n      collection: {\n        owner_uuid: groups(:all_users).uuid,\n        manifest_text: manifest_text,\n        portable_data_hash: \"d30fe8ae534397864cb96c544f4cf102+47\"\n      }\n    }\n    assert_response 403\n  end\n\n  test \"create with owner_uuid fails on group with no permission\" do\n    permit_unsigned_manifests\n    authorize_with :active\n    manifest_text = \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\"\n    post :create, params: {\n      collection: {\n        owner_uuid: groups(:public).uuid,\n        manifest_text: manifest_text,\n        portable_data_hash: \"d30fe8ae534397864cb96c544f4cf102+47\"\n      }\n    }\n    assert_response 422\n  end\n\n  test \"admin create with owner_uuid set to group with no permission\" do\n    permit_unsigned_manifests\n    authorize_with :admin\n    manifest_text = \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\"\n    post :create, params: {\n      collection: {\n        owner_uuid: 'zzzzz-j7d0g-it30l961gq3t0oi',\n        manifest_text: manifest_text,\n        portable_data_hash: \"d30fe8ae534397864cb96c544f4cf102+47\"\n      }\n    }\n    assert_response :success\n  end\n\n  test \"should create with collection passed as json\" do\n    permit_unsigned_manifests\n    authorize_with :active\n    post :create, params: {\n      collection: <<-EOS\n      {\n        \"manifest_text\":\". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\",\\\n        \"portable_data_hash\":\"d30fe8ae534397864cb96c544f4cf102+47\"\\\n      }\n      EOS\n    }\n    assert_response :success\n  end\n\n  test \"should fail to create with checksum mismatch\" do\n    permit_unsigned_manifests\n    authorize_with :active\n    post :create, params: {\n      collection: <<-EOS\n      {\n        \"manifest_text\":\". d41d8cd98f00b204e9800998ecf8427e 0:0:bar.txt\\n\",\\\n        \"portable_data_hash\":\"d30fe8ae534397864cb96c544f4cf102+47\"\\\n      }\n      EOS\n    }\n    assert_response 422\n  end\n\n  test \"collection UUID is normalized when created\" do\n    permit_unsigned_manifests\n    authorize_with :active\n    post :create, params: {\n      collection: {\n        manifest_text: \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\",\n        portable_data_hash: \"d30fe8ae534397864cb96c544f4cf102+47+Khint+Xhint+Zhint\"\n      }\n    }\n    assert_response :success\n    assert_not_nil assigns(:object)\n    resp = JSON.parse(@response.body)\n    assert_equal \"d30fe8ae534397864cb96c544f4cf102+47\", resp['portable_data_hash']\n  end\n\n  test \"get full provenance for baz file\" do\n    authorize_with :active\n    get :provenance, params: {id: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'}\n    assert_response :success\n    resp = JSON.parse(@response.body)\n    assert_not_nil resp['1f4b0bc7583c2a7f9102c395f4ffc5e3+45'] # baz collection\n  end\n\n  test \"get no provenance for foo file\" do\n    # spectator user cannot even see baz collection\n    authorize_with :spectator\n    get :provenance, params: {id: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'}\n    assert_response 404\n  end\n\n  test \"get partial provenance for baz file\" do\n    # spectator user can see bar->baz job, but not foo->bar job\n    authorize_with :spectator\n    get :provenance, params: {id: 'ea10d51bcf88862dbcc36eb292017dfd+45'}\n    assert_response :success\n    resp = JSON.parse(@response.body)\n    assert_not_nil resp['ea10d51bcf88862dbcc36eb292017dfd+45'] # baz\n    assert_nil resp['fa7aeb5140e2848d39b416daeef4ffc5+45'] # foo->bar\n  end\n\n  test \"search collections with 'any' operator\" do\n    expect_pdh = collections(:docker_image).portable_data_hash\n    authorize_with :active\n    get :index, params: {\n      where: { any: ['contains', expect_pdh[5..25]] }\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_equal 1, found.count\n    assert_equal expect_pdh, found.first.portable_data_hash\n  end\n\n  [false, true].each do |permit_unsigned|\n    test \"create collection with signed manifest, permit_unsigned=#{permit_unsigned}\" do\n      permit_unsigned_manifests permit_unsigned\n      authorize_with :active\n      locators = %w(\n      d41d8cd98f00b204e9800998ecf8427e+0\n      acbd18db4cc2f85cedef654fccc4a4d8+3\n      ea10d51bcf88862dbcc36eb292017dfd+45)\n\n      unsigned_manifest = locators.map { |loc|\n        \". \" + loc + \" 0:0:foo.txt\\n\"\n      }.join()\n      manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +\n        '+' +\n        unsigned_manifest.length.to_s\n\n      # Build a manifest with both signed and unsigned locators.\n      signing_opts = {\n        key: Rails.configuration.Collections.BlobSigningKey,\n        api_token: api_token(:active),\n      }\n      signed_locators = locators.collect do |x|\n        Blob.sign_locator x, signing_opts\n      end\n      if permit_unsigned\n        # Leave a non-empty blob unsigned.\n        signed_locators[1] = locators[1]\n      else\n        # Leave the empty blob unsigned. This should still be allowed.\n        signed_locators[0] = locators[0]\n      end\n      signed_manifest =\n        \". \" + signed_locators[0] + \" 0:0:foo.txt\\n\" +\n        \". \" + signed_locators[1] + \" 0:0:foo.txt\\n\" +\n        \". \" + signed_locators[2] + \" 0:0:foo.txt\\n\"\n\n      post :create, params: {\n        collection: {\n          manifest_text: signed_manifest,\n          portable_data_hash: manifest_uuid,\n        }\n      }\n      assert_response :success\n      assert_not_nil assigns(:object)\n      resp = JSON.parse(@response.body)\n      assert_equal manifest_uuid, resp['portable_data_hash']\n      # All of the signatures in the output must be valid.\n      resp['manifest_text'].lines.each do |entry|\n        m = /([[:xdigit:]]{32}\\+\\S+)/.match(entry)\n        if m && m[0].index('+A')\n          assert Blob.verify_signature m[0], signing_opts\n        end\n      end\n    end\n  end\n\n  test \"create collection with signed manifest and explicit TTL\" do\n    authorize_with :active\n    locators = %w(\n      d41d8cd98f00b204e9800998ecf8427e+0\n      acbd18db4cc2f85cedef654fccc4a4d8+3\n      ea10d51bcf88862dbcc36eb292017dfd+45)\n\n    unsigned_manifest = locators.map { |loc|\n      \". \" + loc + \" 0:0:foo.txt\\n\"\n    }.join()\n    manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +\n      '+' +\n      unsigned_manifest.length.to_s\n\n    # build a manifest with both signed and unsigned locators.\n    # TODO(twp): in phase 4, all locators will need to be signed, so\n    # this test should break and will need to be rewritten. Issue #2755.\n    signing_opts = {\n      key: Rails.configuration.Collections.BlobSigningKey,\n      api_token: api_token(:active),\n      ttl: 3600   # 1 hour\n    }\n    signed_manifest =\n      \". \" + locators[0] + \" 0:0:foo.txt\\n\" +\n      \". \" + Blob.sign_locator(locators[1], signing_opts) + \" 0:0:foo.txt\\n\" +\n      \". \" + Blob.sign_locator(locators[2], signing_opts) + \" 0:0:foo.txt\\n\"\n\n    post :create, params: {\n      collection: {\n        manifest_text: signed_manifest,\n        portable_data_hash: manifest_uuid,\n      }\n    }\n    assert_response :success\n    assert_not_nil assigns(:object)\n    resp = JSON.parse(@response.body)\n    assert_equal manifest_uuid, resp['portable_data_hash']\n    # All of the signatures in the output must be valid.\n    resp['manifest_text'].lines.each do |entry|\n      m = /([[:xdigit:]]{32}\\+\\S+)/.match(entry)\n      if m && m[0].index('+A')\n        assert Blob.verify_signature m[0], signing_opts\n      end\n    end\n  end\n\n  test \"create fails with invalid signature\" do\n    authorize_with :active\n    signing_opts = {\n      key: Rails.configuration.Collections.BlobSigningKey,\n      api_token: api_token(:active),\n    }\n\n    # Generate a locator with a bad signature.\n    unsigned_locator = \"acbd18db4cc2f85cedef654fccc4a4d8+3\"\n    bad_locator = unsigned_locator + \"+Affffffffffffffffffffffffffffffffffffffff@ffffffff\"\n    assert !Blob.verify_signature(bad_locator, signing_opts)\n\n    # Creating a collection with this locator should\n    # produce 403 Permission denied.\n    unsigned_manifest = \". #{unsigned_locator} 0:0:foo.txt\\n\"\n    manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest) +\n      '+' +\n      unsigned_manifest.length.to_s\n\n    bad_manifest = \". #{bad_locator} 0:0:foo.txt\\n\"\n    post :create, params: {\n      collection: {\n        manifest_text: bad_manifest,\n        portable_data_hash: manifest_uuid\n      }\n    }\n\n    assert_response 403\n  end\n\n  test \"create fails with uuid of signed manifest\" do\n    authorize_with :active\n    signing_opts = {\n      key: Rails.configuration.Collections.BlobSigningKey,\n      api_token: api_token(:active),\n    }\n\n    unsigned_locator = \"d41d8cd98f00b204e9800998ecf8427e+0\"\n    signed_locator = Blob.sign_locator(unsigned_locator, signing_opts)\n    signed_manifest = \". #{signed_locator} 0:0:foo.txt\\n\"\n    manifest_uuid = Digest::MD5.hexdigest(signed_manifest) +\n      '+' +\n      signed_manifest.length.to_s\n\n    post :create, params: {\n      collection: {\n        manifest_text: signed_manifest,\n        portable_data_hash: manifest_uuid\n      }\n    }\n\n    assert_response 422\n  end\n\n  test \"reject manifest with unsigned block as stream name\" do\n    authorize_with :active\n    post :create, params: {\n      collection: {\n        manifest_text: \"00000000000000000000000000000000+1234 d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\"\n      }\n    }\n    assert_includes [422, 403], response.code.to_i\n  end\n\n  test \"multiple locators per line\" do\n    permit_unsigned_manifests\n    authorize_with :active\n    locators = %w(\n      d41d8cd98f00b204e9800998ecf8427e+0\n      acbd18db4cc2f85cedef654fccc4a4d8+3\n      ea10d51bcf88862dbcc36eb292017dfd+45)\n\n    manifest_text = [\".\", *locators, \"0:0:foo.txt\\n\"].join(\" \")\n    manifest_uuid = Digest::MD5.hexdigest(manifest_text) +\n      '+' +\n      manifest_text.length.to_s\n\n    test_collection = {\n      manifest_text: manifest_text,\n      portable_data_hash: manifest_uuid,\n    }\n    post_collection = Marshal.load(Marshal.dump(test_collection))\n    post :create, params: {\n      collection: post_collection\n    }\n    assert_response :success\n    assert_not_nil assigns(:object)\n    resp = JSON.parse(@response.body)\n    assert_equal manifest_uuid, resp['portable_data_hash']\n\n    # The manifest in the response will have had permission hints added.\n    # Remove any permission hints in the response before comparing it to the source.\n    stripped_manifest = resp['manifest_text'].gsub(/\\+A[A-Za-z0-9@_-]+/, '')\n    assert_equal manifest_text, stripped_manifest\n  end\n\n  test 'Reject manifest with unsigned blob' do\n    permit_unsigned_manifests false\n    authorize_with :active\n    unsigned_manifest = \". 0cc175b9c0f1b6a831c399e269772661+1 0:1:a.txt\\n\"\n    manifest_uuid = Digest::MD5.hexdigest(unsigned_manifest)\n    post :create, params: {\n      collection: {\n        manifest_text: unsigned_manifest,\n        portable_data_hash: manifest_uuid,\n      }\n    }\n    assert_response 403,\n    \"Creating a collection with unsigned blobs should respond 403\"\n    assert_empty Collection.where('uuid like ?', manifest_uuid+'%'),\n    \"Collection should not exist in database after failed create\"\n  end\n\n  test 'List expired collection returns empty list' do\n    authorize_with :active\n    get :index, params: {\n      where: {name: 'expired_collection'},\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_equal 0, found.count\n  end\n\n  test 'Show expired collection returns 404' do\n    authorize_with :active\n    get :show, params: {\n      id: 'zzzzz-4zz18-mto52zx1s7sn3ih',\n    }\n    assert_response 404\n  end\n\n  test 'Update expired collection returns 404' do\n    authorize_with :active\n    post :update, params: {\n      id: 'zzzzz-4zz18-mto52zx1s7sn3ih',\n      collection: {\n        name: \"still expired\"\n      }\n    }\n    assert_response 404\n  end\n\n  test 'List collection with future expiration time succeeds' do\n    authorize_with :active\n    get :index, params: {\n      where: {name: 'collection_expires_in_future'},\n    }\n    found = assigns(:objects)\n    assert_equal 1, found.count\n  end\n\n\n  test 'Show collection with future expiration time succeeds' do\n    authorize_with :active\n    get :show, params: {\n      id: 'zzzzz-4zz18-padkqo7yb8d9i3j',\n    }\n    assert_response :success\n  end\n\n  test 'Update collection with future expiration time succeeds' do\n    authorize_with :active\n    post :update, params: {\n      id: 'zzzzz-4zz18-padkqo7yb8d9i3j',\n      collection: {\n        name: \"still not expired\"\n      }\n    }\n    assert_response :success\n  end\n\n  test \"get collection and verify that file_names is not included\" do\n    authorize_with :active\n    get :show, params: {id: collections(:foo_file).uuid}\n    assert_response :success\n    assert_equal collections(:foo_file).uuid, json_response['uuid']\n    assert_nil json_response['file_names']\n    assert json_response['manifest_text']\n  end\n\n  [\n    [2**8, :success],\n    [2**18, 422],\n  ].each do |description_size, expected_response|\n    # Descriptions are not part of search indexes. Skip until\n    # full-text search is implemented, at which point replace with a\n    # search in description.\n    skip \"create collection with description size #{description_size}\n          and expect response #{expected_response}\" do\n      authorize_with :active\n\n      description = 'here is a collection with a very large description'\n      while description.length < description_size\n        description = description + description\n      end\n\n      post :create, params: {\n        collection: {\n          manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo.txt\\n\",\n          description: description,\n        }\n      }\n\n      assert_response expected_response\n    end\n  end\n\n  [1, 5, nil].each do |ask|\n    test \"Set replication_desired=#{ask.inspect}\" do\n      Rails.configuration.Collections.DefaultReplication = 2\n      authorize_with :active\n      put :update, params: {\n        id: collections(:replication_undesired_unconfirmed).uuid,\n        collection: {\n          replication_desired: ask,\n        },\n      }\n      assert_response :success\n      assert_equal ask, json_response['replication_desired']\n    end\n  end\n\n  test \"get collection with properties\" do\n    authorize_with :active\n    get :show, params: {id: collections(:collection_with_one_property).uuid}\n    assert_response :success\n    assert_not_nil json_response['uuid']\n    assert_equal 'value1', json_response['properties']['property1']\n  end\n\n  [\n    {'property_1' => 'value_1'},\n    \"{\\\"property_1\\\":\\\"value_1\\\"}\",\n  ].each do |p|\n    test \"create collection with valid properties param #{p.inspect}\" do\n      authorize_with :active\n      manifest_text = \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\"\n      post :create, params: {\n        collection: {\n          manifest_text: manifest_text,\n          portable_data_hash: \"d30fe8ae534397864cb96c544f4cf102+47\",\n          properties: p\n        }\n      }\n      assert_response :success\n      assert_not_nil json_response['uuid']\n      assert_equal Hash, json_response['properties'].class, 'Collection properties attribute should be of type hash'\n      assert_equal 'value_1', json_response['properties']['property_1']\n    end\n  end\n\n  [\n    false,\n    [],\n    42,\n    'some string',\n    '[\"json\", \"encoded\", \"array\"]',\n  ].each do |p|\n    test \"create collection with non-valid properties param #{p.inspect}\" do\n      authorize_with :active\n      post :create, params: {\n        collection: {\n          name: \"test collection with non-valid properties param '#{p.inspect}'\",\n          manifest_text: '',\n          properties: p\n        }\n      }\n      assert_response 422\n      response_errors = json_response['errors']\n      assert_not_nil response_errors, 'Expected error in response'\n    end\n  end\n\n  [\n    [\". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\\n\", 1, 34],\n    [\". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt 0:30:foo.txt 0:30:foo1.txt 0:30:foo2.txt 0:30:foo3.txt 0:30:foo4.txt\\n\", 5, 184],\n    [\". d41d8cd98f00b204e9800998ecf8427e 0:0:.\\n\", 0, 0]\n  ].each do |manifest, count, size|\n    test \"create collection with valid manifest #{manifest} and expect file stats\" do\n      authorize_with :active\n      post :create, params: {\n        collection: {\n          manifest_text: manifest\n        }\n      }\n      assert_response 200\n      assert_equal count, json_response['file_count']\n      assert_equal size, json_response['file_size_total']\n    end\n  end\n\n  test \"update collection manifest and expect new file stats\" do\n    authorize_with :active\n    post :update, params: {\n      id: collections(:collection_owned_by_active_with_file_stats).uuid,\n      collection: {\n        manifest_text: \". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\\n\"\n      }\n    }\n    assert_response 200\n    assert_equal 1, json_response['file_count']\n    assert_equal 34, json_response['file_size_total']\n  end\n\n  [\n    ['file_count', 1],\n    ['file_size_total', 34]\n  ].each do |attribute, val|\n    test \"create collection with #{attribute} and expect overwrite\" do\n      authorize_with :active\n      post :create, params: {\n        collection: {\n          manifest_text: \". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\\n\",\n          \"#{attribute}\": 10\n        }\n      }\n      assert_response 200\n      assert_equal val, json_response[attribute]\n    end\n  end\n\n  [\n    ['file_count', 1],\n    ['file_size_total', 3]\n  ].each do |attribute, val|\n    test \"update collection with #{attribute} and expect ignore\" do\n      authorize_with :active\n      post :update, params: {\n        id: collections(:collection_owned_by_active_with_file_stats).uuid,\n        collection: {\n          \"#{attribute}\": 10\n        }\n      }\n      assert_response 200\n      assert_equal val, json_response[attribute]\n    end\n  end\n\n  [\n    ['file_count', 1],\n    ['file_size_total', 34]\n  ].each do |attribute, val|\n    test \"update collection with #{attribute} and manifest and expect manifest values\" do\n      authorize_with :active\n      post :update, params: {\n        id: collections(:collection_owned_by_active_with_file_stats).uuid,\n        collection: {\n          manifest_text: \". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\\n\",\n          \"#{attribute}\": 10\n        }\n      }\n      assert_response 200\n      assert_equal val, json_response[attribute]\n    end\n  end\n\n  [\n    \". 0:0:foo.txt\",\n    \". d41d8cd98f00b204e9800998ecf8427e foo.txt\",\n    \"d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\",\n    \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\",\n  ].each do |manifest_text|\n    test \"create collection with invalid manifest #{manifest_text} and expect error\" do\n      authorize_with :active\n      post :create, params: {\n        collection: {\n          manifest_text: manifest_text,\n          portable_data_hash: \"d41d8cd98f00b204e9800998ecf8427e+0\"\n        }\n      }\n      assert_response 422\n      response_errors = json_response['errors']\n      assert_not_nil response_errors, 'Expected error in response'\n      assert(response_errors.first.include?('Invalid manifest'),\n             \"Expected 'Invalid manifest' error in #{response_errors.first}\")\n    end\n  end\n\n  [\n    [nil, \"d41d8cd98f00b204e9800998ecf8427e+0\"],\n    [\"\", \"d41d8cd98f00b204e9800998ecf8427e+0\"],\n    [\". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\", \"d30fe8ae534397864cb96c544f4cf102+47\"],\n  ].each do |manifest_text, pdh|\n    test \"create collection with valid manifest #{manifest_text.inspect} and expect success\" do\n      authorize_with :active\n      post :create, params: {\n        collection: {\n          manifest_text: manifest_text,\n          portable_data_hash: pdh\n        }\n      }\n      assert_response 200\n    end\n  end\n\n  [\n    \". 0:0:foo.txt\",\n    \". d41d8cd98f00b204e9800998ecf8427e foo.txt\",\n    \"d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\",\n    \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\",\n  ].each do |manifest_text|\n    test \"update collection with invalid manifest #{manifest_text} and expect error\" do\n      authorize_with :active\n      post :update, params: {\n        id: 'zzzzz-4zz18-bv31uwvy3neko21',\n        collection: {\n          manifest_text: manifest_text,\n        }\n      }\n      assert_response 422\n      response_errors = json_response['errors']\n      assert_not_nil response_errors, 'Expected error in response'\n      assert(response_errors.first.include?('Invalid manifest'),\n             \"Expected 'Invalid manifest' error in #{response_errors.first}\")\n    end\n  end\n\n  [\n    nil,\n    \"\",\n    \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\",\n  ].each do |manifest_text|\n    test \"update collection with valid manifest #{manifest_text.inspect} and expect success\" do\n      authorize_with :active\n      post :update, params: {\n        id: 'zzzzz-4zz18-bv31uwvy3neko21',\n        collection: {\n          manifest_text: manifest_text,\n        }\n      }\n      assert_response 200\n    end\n  end\n\n  [true, false].each do |include_trash|\n    test \"get trashed collection with include_trash=#{include_trash}\" do\n      uuid = 'zzzzz-4zz18-mto52zx1s7sn3ih' # expired_collection\n      authorize_with :active\n      get :show, params: {\n        id: uuid,\n        include_trash: include_trash,\n      }\n      if include_trash\n        assert_response 200\n      else\n        assert_response 404\n      end\n    end\n  end\n\n  [:admin, :active].each do |user|\n    test \"get trashed collection via filters and #{user} user without including its past versions\" do\n      uuid = 'zzzzz-4zz18-mto52zx1s7sn3ih' # expired_collection\n      authorize_with user\n      get :index, params: {\n        filters: [[\"current_version_uuid\", \"=\", uuid]],\n        include_trash: true,\n      }\n      assert_response 200\n      # Only the current version is returned\n      assert_equal 1, json_response[\"items\"].size\n    end\n  end\n\n  [:admin, :active].each do |user|\n    test \"get trashed collection via filters and #{user} user, including its past versions\" do\n      uuid = 'zzzzz-4zz18-mto52zx1s7sn3ih' # expired_collection\n      authorize_with :admin\n      get :index, params: {\n        filters: [[\"current_version_uuid\", \"=\", uuid]],\n        include_trash: true,\n        include_old_versions: true,\n      }\n      assert_response 200\n      # Both current & past version are returned\n      assert_equal 2, json_response[\"items\"].size\n    end\n  end\n\n  test \"trash collection also trash its past versions\" do\n    uuid = collections(:collection_owned_by_active).uuid\n    authorize_with :active\n    versions = Collection.where(current_version_uuid: uuid)\n    assert_equal 2, versions.size\n    versions.each do |col|\n      refute col.is_trashed\n    end\n    post :trash, params: {\n      id: uuid,\n    }\n    assert_response 200\n    versions = Collection.where(current_version_uuid: uuid)\n    assert_equal 2, versions.size\n    versions.each do |col|\n      assert col.is_trashed\n    end\n  end\n\n  test 'get trashed collection without include_trash' do\n    uuid = 'zzzzz-4zz18-mto52zx1s7sn3ih' # expired_collection\n    authorize_with :active\n    get :show, params: {\n      id: uuid,\n    }\n    assert_response 404\n  end\n\n  test 'trash collection using http DELETE verb' do\n    uuid = collections(:collection_owned_by_active).uuid\n    authorize_with :active\n    delete :destroy, params: {\n      id: uuid,\n    }\n    assert_response 200\n    c = Collection.find_by_uuid(uuid)\n    assert_operator c.trash_at, :<, db_current_time\n    assert_equal c.delete_at, c.trash_at + Rails.configuration.Collections.BlobSigningTTL\n  end\n\n  test 'delete long-trashed collection immediately using http DELETE verb' do\n    uuid = 'zzzzz-4zz18-mto52zx1s7sn3ih' # expired_collection\n    authorize_with :active\n    delete :destroy, params: {\n      id: uuid,\n    }\n    assert_response 200\n    c = Collection.find_by_uuid(uuid)\n    assert_operator c.trash_at, :<, db_current_time\n    assert_operator c.delete_at, :<, db_current_time\n  end\n\n  ['zzzzz-4zz18-mto52zx1s7sn3ih', # expired_collection\n   :empty_collection_name_in_active_user_home_project,\n  ].each do |fixture|\n    test \"trash collection #{fixture} via trash action with grace period\" do\n      if fixture.is_a? String\n        uuid = fixture\n      else\n        uuid = collections(fixture).uuid\n      end\n      authorize_with :active\n      time_before_trashing = db_current_time\n      post :trash, params: {\n        id: uuid,\n      }\n      assert_response 200\n      c = Collection.find_by_uuid(uuid)\n      assert_operator c.trash_at, :<, db_current_time\n      assert_operator c.delete_at, :>=, time_before_trashing + Rails.configuration.Collections.DefaultTrashLifetime\n    end\n  end\n\n  test 'untrash a trashed collection' do\n    authorize_with :active\n    post :untrash, params: {\n      id: collections(:expired_collection).uuid,\n    }\n    assert_response 200\n    assert_equal false, json_response['is_trashed']\n    assert_nil json_response['trash_at']\n  end\n\n  test 'untrash a trashed collection by assigning nil to trash_at' do\n    authorize_with :active\n    post :update, params: {\n           id: collections(:expired_collection).uuid,\n           collection: {\n             trash_at: nil,\n           },\n           include_trash: true,\n    }\n    assert_response 200\n    assert_equal false, json_response['is_trashed']\n    assert_nil json_response['trash_at']\n  end\n\n  test 'untrash error on not trashed collection' do\n    authorize_with :active\n    post :untrash, params: {\n      id: collections(:collection_owned_by_active).uuid,\n    }\n    assert_response 422\n  end\n\n  [:active, :admin].each do |user|\n    test \"get trashed collections as #{user}\" do\n      authorize_with user\n      get :index, params: {\n        filters: [[\"is_trashed\", \"=\", true]],\n        include_trash: true,\n      }\n      assert_response :success\n\n      items = []\n      json_response[\"items\"].each do |coll|\n        items << coll['uuid']\n      end\n\n      assert_includes(items, collections('unique_expired_collection')['uuid'])\n      if user == :admin\n        assert_includes(items, collections('unique_expired_collection2')['uuid'])\n      else\n        assert_not_includes(items, collections('unique_expired_collection2')['uuid'])\n      end\n    end\n  end\n\n  test 'untrash collection with same name as another with no ensure unique name' do\n    authorize_with :active\n    post :untrash, params: {\n      id: collections(:trashed_collection_to_test_name_conflict_on_untrash).uuid,\n    }\n    assert_response 422\n  end\n\n  test 'untrash collection with same name as another with ensure unique name' do\n    authorize_with :active\n    post :untrash, params: {\n      id: collections(:trashed_collection_to_test_name_conflict_on_untrash).uuid,\n      ensure_unique_name: true\n    }\n    assert_response 200\n    assert_equal false, json_response['is_trashed']\n    assert_nil json_response['trash_at']\n    assert_nil json_response['delete_at']\n    assert_match /^same name for trashed and persisted collections \\(#{json_response['uuid'][-15..-1]}\\)$/, json_response['name']\n  end\n\n  test 'cannot show collection in trashed subproject' do\n    authorize_with :active\n    get :show, params: {\n      id: collections(:collection_in_trashed_subproject).uuid,\n      format: :json\n    }\n    assert_response 404\n  end\n\n  test 'can show collection in untrashed subproject' do\n    authorize_with :active\n    Group.find_by_uuid(groups(:trashed_project).uuid).update! is_trashed: false\n    get :show, params: {\n      id: collections(:collection_in_trashed_subproject).uuid,\n      format: :json,\n    }\n    assert_response :success\n  end\n\n  test 'cannot index collection in trashed subproject' do\n    authorize_with :active\n    get :index, params: { limit: 1000 }\n    assert_response :success\n    item_uuids = json_response['items'].map do |item|\n      item['uuid']\n    end\n    assert_not_includes(item_uuids, collections(:collection_in_trashed_subproject).uuid)\n  end\n\n  test 'can index collection in untrashed subproject' do\n    authorize_with :active\n    Group.find_by_uuid(groups(:trashed_project).uuid).update! is_trashed: false\n    get :index, params: { limit: 1000 }\n    assert_response :success\n    item_uuids = json_response['items'].map do |item|\n      item['uuid']\n    end\n    assert_includes(item_uuids, collections(:collection_in_trashed_subproject).uuid)\n  end\n\n  test 'can index trashed subproject collection with include_trash' do\n    authorize_with :active\n    get :index, params: {\n          include_trash: true,\n          limit: 1000\n        }\n    assert_response :success\n    item_uuids = json_response['items'].map do |item|\n      item['uuid']\n    end\n    assert_includes(item_uuids, collections(:collection_in_trashed_subproject).uuid)\n  end\n\n  test 'can get collection with past versions' do\n    authorize_with :active\n    get :index, params: {\n      filters: [['current_version_uuid','=',collections(:collection_owned_by_active).uuid]],\n      include_old_versions: true\n    }\n    assert_response :success\n    assert_equal 2, assigns(:objects).length\n    assert_equal 2, json_response['items_available']\n    assert_equal 2, json_response['items'].count\n    json_response['items'].each do |c|\n      assert_equal collections(:collection_owned_by_active).uuid,\n                   c['current_version_uuid'],\n                   'response includes a version from a different collection'\n    end\n  end\n\n  test 'can get old version collection by uuid' do\n    authorize_with :active\n    get :show, params: {\n      id: collections(:collection_owned_by_active_past_version_1).uuid,\n    }\n    assert_response :success\n    assert_equal collections(:collection_owned_by_active_past_version_1).name,\n                  json_response['name']\n  end\n\n  test 'can get old version collection by PDH' do\n    authorize_with :active\n    get :show, params: {\n      id: collections(:collection_owned_by_active_past_version_1).portable_data_hash,\n    }\n    assert_response :success\n    assert_equal collections(:collection_owned_by_active_past_version_1).portable_data_hash,\n                  json_response['portable_data_hash']\n  end\n\n  test 'version and current_version_uuid are ignored at creation time' do\n    permit_unsigned_manifests\n    authorize_with :active\n    manifest_text = \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\"\n    post :create, params: {\n      collection: {\n        name: 'Test collection',\n        version: 42,\n        current_version_uuid: collections(:collection_owned_by_active).uuid,\n        manifest_text: manifest_text,\n      }\n    }\n    assert_response :success\n    resp = JSON.parse(@response.body)\n    assert_equal 1, resp['version']\n    assert_equal resp['uuid'], resp['current_version_uuid']\n  end\n\n  test \"update collection with versioning enabled\" do\n    Rails.configuration.Collections.CollectionVersioning = true\n    Rails.configuration.Collections.PreserveVersionIfIdle = 1 # 1 second\n\n    col = collections(:collection_owned_by_active)\n    assert_equal 2, col.version\n    assert col.modified_at < Time.now - 1.second\n\n    token = api_client_authorizations(:active).v2token\n    signed = Blob.sign_locator(\n      'acbd18db4cc2f85cedef654fccc4a4d8+3',\n      key: Rails.configuration.Collections.BlobSigningKey,\n      api_token: token)\n    authorize_with_token token\n    put :update, params: {\n          id: col.uuid,\n          collection: {\n            manifest_text: \". #{signed} 0:3:foo.txt\\n\",\n          },\n        }\n    assert_response :success\n    assert_equal 3, json_response['version']\n  end\n\n  test \"delete collection with versioning enabled\" do\n    Rails.configuration.Collections.CollectionVersioning = true\n    Rails.configuration.Collections.PreserveVersionIfIdle = 1 # 1 second\n\n    col = collections(:collection_owned_by_active)\n    assert_equal 2, col.version\n    assert col.modified_at < Time.now - 1.second\n\n    authorize_with(:active)\n    post :trash, params: {\n      id: col.uuid,\n    }\n    assert_response :success\n    assert_equal col.version, json_response['version'], 'Trashing a collection should not create a new version'\n  end\n\n  [['<', :<],\n   ['<=', :<=],\n   ['>', :>],\n   ['>=', :>=],\n   ['=', :==]].each do |op, rubyop|\n    test \"filter collections by replication_desired #{op} replication_confirmed\" do\n      authorize_with(:active)\n      get :index, params: {\n            filters: [[\"(replication_desired #{op} replication_confirmed)\", \"=\", true]],\n          }\n      assert_response :success\n      json_response[\"items\"].each do |c|\n        assert_operator(c[\"replication_desired\"], rubyop, c[\"replication_confirmed\"])\n      end\n    end\n  end\n\n  [\"(replication_desired < bogus)\",\n   \"replication_desired < replication_confirmed\",\n   \"(replication_desired < replication_confirmed\",\n   \"(replication_desired ! replication_confirmed)\",\n   \"(replication_desired <)\",\n   \"(replication_desired < manifest_text)\",\n   \"(manifest_text < manifest_text)\", # currently only numeric attrs are supported\n   \"(replication_desired < 2)\", # currently only attrs are supported, not literals\n   \"(1 < 2)\",\n  ].each do |expr|\n    test \"invalid filter expression #{expr}\" do\n      authorize_with(:active)\n      get :index, params: {\n            filters: [[expr, \"=\", true]],\n          }\n      assert_response 422\n    end\n  end\n\n  test \"invalid op/arg with filter expression\" do\n    authorize_with(:active)\n    get :index, params: {\n          filters: [[\"replication_desired < replication_confirmed\", \"!=\", false]],\n        }\n    assert_response 422\n  end\n\n  [\"storage_classes_desired\", \"storage_classes_confirmed\"].each do |attr|\n    test \"filter collections by #{attr}\" do\n      authorize_with(:active)\n      get :index, params: {\n            filters: [[attr, \"=\", '[\"default\"]']]\n          }\n      assert_response :success\n      assert_not_equal 0, json_response[\"items\"].length\n      json_response[\"items\"].each do |c|\n        assert_equal [\"default\"], c[attr]\n      end\n    end\n  end\n\n  test \"select param is respected in 'show' response\" do\n    authorize_with :active\n    get :show, params: {\n          id: collections(:collection_owned_by_active).uuid,\n          select: [\"name\"],\n        }\n    assert_response :success\n    assert_raises ActiveModel::MissingAttributeError do\n      assigns(:object).manifest_text\n    end\n    assert_nil json_response[\"manifest_text\"]\n    assert_nil json_response[\"properties\"]\n    assert_equal collections(:collection_owned_by_active).name, json_response[\"name\"]\n  end\n\n  test \"select param is respected in 'update' response\" do\n    authorize_with :active\n    post :update, params: {\n          id: collections(:collection_owned_by_active).uuid,\n          collection: {\n            manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foobar.txt\\n\",\n          },\n          select: [\"name\"],\n        }\n    assert_response :success\n    assert_nil json_response[\"manifest_text\"]\n    assert_nil json_response[\"properties\"]\n    assert_equal collections(:collection_owned_by_active).name, json_response[\"name\"]\n  end\n\n  [nil,\n   [],\n   [\"is_trashed\", \"trash_at\"],\n   [\"is_trashed\", \"trash_at\", \"portable_data_hash\"],\n   [\"portable_data_hash\"],\n   [\"portable_data_hash\", \"manifest_text\"],\n  ].each do |select|\n    test \"select=#{select.inspect} param is respected in 'get by pdh' response\" do\n      authorize_with :active\n      get :show, params: {\n            id: collections(:collection_owned_by_active).portable_data_hash,\n            select: select,\n          }\n      assert_response :success\n      if !select || select.index(\"manifest_text\")\n        assert_not_nil json_response[\"manifest_text\"]\n      else\n        assert_nil json_response[\"manifest_text\"]\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/computed_permissions_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::ComputedPermissionsControllerTest < ActionController::TestCase\n  test \"require auth\" do\n    get :index, params: {}\n    assert_response 401\n  end\n\n  test \"require admin\" do\n    authorize_with :active\n    get :index, params: {}\n    assert_response 403\n  end\n\n  test \"index with no options\" do\n    authorize_with :admin\n    get :index, params: {}\n    assert_response :success\n    assert_operator 0, :<, json_response['items'].length\n\n    last_user = ''\n    last_target = ''\n    json_response['items'].each do |item|\n      assert_not_empty item['user_uuid']\n      assert_not_empty item['target_uuid']\n      assert_not_empty item['perm_level']\n      # check default ordering\n      assert_operator last_user, :<=, item['user_uuid']\n      if last_user == item['user_uuid']\n        assert_operator last_target, :<=, item['target_uuid']\n      end\n      last_user = item['user_uuid']\n      last_target = item['target_uuid']\n    end\n  end\n\n  test \"index with limit\" do\n    authorize_with :admin\n    get :index, params: {limit: 10}\n    assert_response :success\n    assert_equal 10, json_response['items'].length\n  end\n\n  test \"index with filter on user_uuid\" do\n    user_uuid = users(:active).uuid\n    authorize_with :admin\n    get :index, params: {filters: [['user_uuid', '=', user_uuid]]}\n    assert_response :success\n    assert_not_equal 0, json_response['items'].length\n    json_response['items'].each do |item|\n      assert_equal user_uuid, item['user_uuid']\n    end\n  end\n\n  test \"index with filter on user_uuid and target_uuid\" do\n    user_uuid = users(:active).uuid\n    target_uuid = groups(:aproject).uuid\n    authorize_with :admin\n    get :index, params: {filters: [\n                           ['user_uuid', '=', user_uuid],\n                           ['target_uuid', '=', target_uuid],\n                         ]}\n    assert_response :success\n    assert_equal([{\"user_uuid\" => user_uuid,\n                   \"target_uuid\" => target_uuid,\n                   \"perm_level\" => \"can_manage\",\n                  }],\n                 json_response['items'])\n  end\n\n  test \"index with disallowed filters\" do\n    authorize_with :admin\n    get :index, params: {filters: [['perm_level', '=', 'can_manage']]}\n    assert_response 422\n  end\n\n  %w(user_uuid target_uuid perm_level).each do |attr|\n    test \"select only #{attr}\" do\n      authorize_with :admin\n      get :index, params: {select: [attr], limit: 1}\n      assert_response :success\n      assert_operator 0, :<, json_response['items'][0][attr].length\n      assert_equal([{attr => json_response['items'][0][attr]}], json_response['items'])\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/container_requests_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::ContainerRequestsControllerTest < ActionController::TestCase\n  def minimal_cr\n    {\n      command: ['echo', 'hello'],\n      container_image: 'arvados/apitestfixture:latest',\n      output_path: '/test',\n      runtime_constraints: {vcpus: 1, ram: 1},\n      mounts: {\n        '/test' => {\n          kind: 'tmp',\n          capacity: 1000000,\n        },\n      },\n    }\n  end\n\n  test 'create with scheduling parameters' do\n    authorize_with :active\n\n    sp = {'partitions' => ['test1', 'test2']}\n    post :create, params: {\n           container_request: minimal_cr.merge(scheduling_parameters: sp.dup, state: \"Committed\")\n         }\n    assert_response :success\n\n    cr = JSON.parse(@response.body)\n    assert_not_nil cr, 'Expected container request'\n    assert_equal sp['partitions'], cr['scheduling_parameters']['partitions']\n    assert_equal false, cr['scheduling_parameters']['preemptible']\n    assert_equal false, cr['scheduling_parameters']['supervisor']\n  end\n\n  test 'create a-c-r should be supervisor' do\n    authorize_with :active\n\n    post :create, params: {\n           container_request: minimal_cr.merge(command: [\"arvados-cwl-runner\", \"my-workflow.cwl\"], state: \"Committed\")\n         }\n    assert_response :success\n\n    cr = JSON.parse(@response.body)\n    assert_not_nil cr, 'Expected container request'\n    assert_equal true, cr['scheduling_parameters']['supervisor']\n  end\n\n  test \"secret_mounts not in #create responses\" do\n    authorize_with :active\n\n    post :create, params: {\n           container_request: minimal_cr.merge(\n             secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}}),\n         }\n    assert_response :success\n\n    resp = JSON.parse(@response.body)\n    refute resp.has_key?('secret_mounts')\n\n    req = ContainerRequest.where(uuid: resp['uuid']).first\n    assert_equal 'bar', req.secret_mounts['/foo']['content']\n  end\n\n  test \"update with secret_mounts\" do\n    authorize_with :active\n    req = container_requests(:uncommitted)\n\n    patch :update, params: {\n            id: req.uuid,\n            container_request: {\n              secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}},\n            },\n          }\n    assert_response :success\n\n    resp = JSON.parse(@response.body)\n    refute resp.has_key?('secret_mounts')\n\n    req.reload\n    assert_equal 'bar', req.secret_mounts['/foo']['content']\n  end\n\n  test \"cancel with runtime_constraints and scheduling_params with default values\" do\n    authorize_with :active\n    req = container_requests(:queued)\n\n    patch :update, params: {\n      id: req.uuid,\n      container_request: {\n        state: 'Final',\n        priority: 0,\n        runtime_constraints: {\n          'vcpus' => 1,\n          'ram' => 123,\n          'keep_cache_ram' => 0,\n        },\n        scheduling_parameters: {\n          \"preemptible\"=>false\n        }\n      },\n    }\n    assert_response :success\n  end\n\n  test \"update without deleting secret_mounts\" do\n    authorize_with :active\n    req = container_requests(:uncommitted)\n    req.update!(secret_mounts: {'/foo' => {'kind' => 'json', 'content' => 'bar'}})\n\n    patch :update, params: {\n            id: req.uuid,\n            container_request: {\n              command: ['echo', 'test'],\n            },\n          }\n    assert_response :success\n\n    resp = JSON.parse(@response.body)\n    refute resp.has_key?('secret_mounts')\n\n    req.reload\n    assert_equal 'bar', req.secret_mounts['/foo']['content']\n  end\n\n  test \"runtime_token not in #create responses\" do\n    authorize_with :active\n\n    post :create, params: {\n           container_request: minimal_cr.merge(\n             runtime_token: api_client_authorizations(:spectator).token)\n         }\n    assert_response :success\n\n    resp = JSON.parse(@response.body)\n    refute resp.has_key?('runtime_token')\n\n    req = ContainerRequest.where(uuid: resp['uuid']).first\n    assert_equal api_client_authorizations(:spectator).token, req.runtime_token\n  end\n\n  %w(Running Complete).each do |state|\n    test \"filter on container.state = #{state}\" do\n      authorize_with :active\n      get :index, params: {\n            filters: [['container.state', '=', state]],\n          }\n      assert_response :success\n      assert_operator json_response['items'].length, :>, 0\n      json_response['items'].each do |cr|\n        assert_equal state, Container.find_by_uuid(cr['container_uuid']).state\n      end\n    end\n  end\n\n  test \"filter on container success\" do\n    authorize_with :active\n    get :index, params: {\n          filters: [\n            ['container.state', '=', 'Complete'],\n            ['container.exit_code', '=', '0'],\n          ],\n        }\n    assert_response :success\n    assert_operator json_response['items'].length, :>, 0\n    json_response['items'].each do |cr|\n      assert_equal 'Complete', Container.find_by_uuid(cr['container_uuid']).state\n      assert_equal 0, Container.find_by_uuid(cr['container_uuid']).exit_code\n    end\n  end\n\n  test \"filter on container subproperty runtime_status[foo] = bar\" do\n    ctr = containers(:running)\n    act_as_system_user do\n      ctr.update!(runtime_status: {foo: 'bar'})\n    end\n    authorize_with :active\n    get :index, params: {\n          filters: [\n            ['container.runtime_status.foo', '=', 'bar'],\n          ],\n        }\n    assert_response :success\n    assert_equal [ctr.uuid], json_response['items'].collect { |cr| cr['container_uuid'] }.uniq\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/containers_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::ContainersControllerTest < ActionController::TestCase\n  test 'create' do\n    authorize_with :system_user\n    post :create, params: {\n      container: {\n        command: ['echo', 'hello'],\n        container_image: 'test',\n        output_path: 'test',\n      },\n    }\n    assert_response :success\n  end\n\n  [Container::Queued, Container::Complete].each do |state|\n    test \"cannot get auth in #{state} state\" do\n      authorize_with :system_user\n      get :auth, params: {id: containers(:queued).uuid}\n      assert_response 403\n    end\n  end\n\n  test 'cannot get auth with wrong token' do\n    authorize_with :dispatch2\n    c = containers(:queued)\n    assert c.lock, show_errors(c)\n\n    authorize_with :system_user\n    get :auth, params: {id: c.uuid}\n    assert_response 403\n  end\n\n  test 'get auth' do\n    authorize_with :system_user\n    c = containers(:queued)\n    assert c.lock, show_errors(c)\n    get :auth, params: {id: c.uuid}\n    assert_response :success\n    assert_operator 32, :<, json_response['api_token'].length\n    assert_equal 'arvados#apiClientAuthorization', json_response['kind']\n  end\n\n  test 'no auth or secret_mounts in container response' do\n    authorize_with :system_user\n    c = containers(:queued)\n    assert c.lock, show_errors(c)\n    get :show, params: {id: c.uuid}\n    assert_response :success\n    assert_nil json_response['auth']\n    assert_nil json_response['secret_mounts']\n  end\n\n  test \"lock container\" do\n    authorize_with :system_user\n    uuid = containers(:queued).uuid\n    post :lock, params: {id: uuid}\n    assert_response :success\n    assert_nil json_response['mounts']\n    assert_nil json_response['command']\n    assert_not_nil json_response['auth_uuid']\n    assert_not_nil json_response['locked_by_uuid']\n    assert_equal containers(:queued).uuid, json_response['uuid']\n    assert_equal 'Locked', json_response['state']\n    assert_equal containers(:queued).priority, json_response['priority']\n\n    container = Container.where(uuid: uuid).first\n    assert_equal 'Locked', container.state\n    assert_not_nil container.locked_by_uuid\n    assert_not_nil container.auth_uuid\n  end\n\n  test \"unlock container\" do\n    authorize_with :system_user\n    uuid = containers(:locked).uuid\n    post :unlock, params: {id: uuid}\n    assert_response :success\n    assert_nil json_response['mounts']\n    assert_nil json_response['command']\n    assert_nil json_response['auth_uuid']\n    assert_nil json_response['locked_by_uuid']\n    assert_equal containers(:locked).uuid, json_response['uuid']\n    assert_equal 'Queued', json_response['state']\n    assert_equal containers(:locked).priority, json_response['priority']\n\n    container = Container.where(uuid: uuid).first\n    assert_equal 'Queued', container.state\n    assert_nil container.locked_by_uuid\n    assert_nil container.auth_uuid\n  end\n\n  test \"unlock container locked by different dispatcher\" do\n    authorize_with :dispatch2\n    uuid = containers(:locked).uuid\n    post :unlock, params: {id: uuid}\n    assert_response 403\n  end\n\n  [\n    [:queued, :lock, :success, 'Locked'],\n    [:queued, :unlock, 422, 'Queued'],\n    [:locked, :lock, 422, 'Locked'],\n    [:running, :lock, 422, 'Running'],\n    [:running, :unlock, 422, 'Running'],\n  ].each do |fixture, action, response, state|\n    test \"state transitions from #{fixture} to #{action}\" do\n      authorize_with :system_user\n      uuid = containers(fixture).uuid\n      post action, params: {id: uuid}\n      assert_response response\n      assert_equal state, Container.where(uuid: uuid).first.state\n    end\n  end\n\n  test 'get current container for token' do\n    authorize_with :running_container_auth\n    get :current\n    assert_response :success\n    assert_equal containers(:running).uuid, json_response['uuid']\n  end\n\n  test 'no container associated with token' do\n    authorize_with :system_user\n    get :current\n    assert_response 404\n  end\n\n  test 'try get current container, no token' do\n    get :current\n    assert_response 401\n  end\n\n  [\n    [true, :running_container_auth],\n    [false, :dispatch2],\n    [false, :admin],\n    [false, :active],\n  ].each do |expect_success, auth|\n    test \"get secret_mounts with #{auth} token\" do\n      authorize_with auth\n      get :secret_mounts, params: {id: containers(:running).uuid}\n      if expect_success\n        assert_response :success\n        assert_equal \"42\\n\", json_response[\"secret_mounts\"][\"/secret/6x9\"][\"content\"]\n      else\n        assert_response 403\n      end\n    end\n  end\n\n  test 'get runtime_token auth' do\n    authorize_with :dispatch2\n    c = containers(:runtime_token)\n    get :auth, params: {id: c.uuid}\n    assert_response :success\n    assert_equal \"v2/#{json_response['uuid']}/#{json_response['api_token']}\", api_client_authorizations(:container_runtime_token).token\n    assert_equal 'arvados#apiClientAuthorization', json_response['kind']\n  end\n\n  test 'update_priority' do\n    ActiveRecord::Base.connection.execute \"update containers set priority=0 where uuid='#{containers(:running).uuid}'\"\n    authorize_with :admin\n    post :update_priority, params: {id: containers(:running).uuid}\n    assert_response :success\n    assert_not_equal 0, Container.find_by_uuid(containers(:running).uuid).priority\n  end\n\n  test 'update runtime_status, runtime_status is toplevel key' do\n    authorize_with :system_user\n    c = containers(:running)\n    patch :update, params: {id: containers(:running).uuid, runtime_status: {activity: \"foo\", activityDetail: \"bar\"}}\n    assert_response :success\n  end\n\n  test 'update runtime_status, container is toplevel key' do\n    authorize_with :system_user\n    c = containers(:running)\n    patch :update, params: {id: containers(:running).uuid, container: {runtime_status: {activity: \"foo\", activityDetail: \"bar\"}}}\n    assert_response :success\n  end\n\n  test 'update state, state is toplevel key' do\n    authorize_with :system_user\n    c = containers(:running)\n    patch :update, params: {id: containers(:running).uuid, state: \"Complete\", runtime_status: {activity: \"finishing\"}}\n    assert_response :success\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/filters_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::FiltersTest < ActionController::TestCase\n  test '\"not in\" filter passes null values' do\n    @controller = Arvados::V1::ContainerRequestsController.new\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['container_uuid', 'not in', ['zzzzz-dz642-queuedcontainer', 'zzzzz-dz642-runningcontainr']] ],\n      controller: 'container_requests',\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_includes(found.collect(&:container_uuid), nil,\n                    \"'container_uuid not in [zzzzz-dz642-queuedcontainer, zzzzz-dz642-runningcontainr]' filter should pass null\")\n  end\n\n  test 'error message for non-array element in filters array' do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :active\n    get :index, params: {\n      filters: [{bogus: 'filter'}],\n    }\n    assert_response 422\n    assert_match(/Invalid element in filters array/,\n                 json_response['errors'].join(' '))\n  end\n\n  test 'error message for unsupported full text search' do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :active\n    get :index, params: {\n      filters: [['uuid', '@@', 'abcdef']],\n    }\n    assert_response 422\n    assert_match(/no longer supported/, json_response['errors'].join(' '))\n  end\n\n  test 'error message for int64 overflow' do\n    # some versions of ActiveRecord cast >64-bit ints to postgres\n    # numeric type, but this is never useful because database content\n    # is 64 bit.\n    @controller = Arvados::V1::LogsController.new\n    authorize_with :active\n    get :index, params: {\n      filters: [['id', '=', 123412341234123412341234]],\n    }\n    assert_response 422\n    assert_match(/Invalid operand .* integer attribute/, json_response['errors'].join(' '))\n  end\n\n  ['in', 'not in'].each do |operator|\n    test \"error message for int64 overflow ('#{operator}' filter)\" do\n      @controller = Arvados::V1::ContainerRequestsController.new\n      authorize_with :active\n      get :index, params: {\n            filters: [['priority', operator, [9, 123412341234123412341234]]],\n          }\n      assert_response 422\n      assert_match(/Invalid element .* integer attribute/, json_response['errors'].join(' '))\n    end\n  end\n\n  test 'error message for invalid boolean operand' do\n    @controller = Arvados::V1::GroupsController.new\n    authorize_with :active\n    get :index, params: {\n      filters: [['is_trashed', '=', 'fourty']],\n    }\n    assert_response 422\n    assert_match(/Invalid operand .* boolean attribute/, json_response['errors'].join(' '))\n  end\n\n  test 'api responses provide timestamps with nanoseconds' do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :active\n    get :index\n    assert_response :success\n    assert_not_empty json_response['items']\n    json_response['items'].each do |item|\n      %w(created_at modified_at).each do |attr|\n        # Pass fixtures with null timestamps.\n        next if item[attr].nil?\n        assert_match(/^\\d{4}-\\d\\d-\\d\\dT\\d\\d:\\d\\d:\\d\\d.\\d{9}Z$/, item[attr])\n      end\n    end\n  end\n\n  %w(< > <= >= =).each do |operator|\n    test \"timestamp #{operator} filters work with nanosecond precision\" do\n      # Python clients like Node Manager rely on this exact format.\n      # If you must change this format for some reason, make sure you\n      # coordinate the change with them.\n      expect_match = !!operator.index('=')\n      mine = act_as_user users(:active) do\n        Collection.create!(manifest_text: '')\n      end\n      timestamp = mine.modified_at.strftime('%Y-%m-%dT%H:%M:%S.%NZ')\n      @controller = Arvados::V1::CollectionsController.new\n      authorize_with :active\n      get :index, params: {\n        filters: [['modified_at', operator, timestamp],\n                  ['uuid', '=', mine.uuid]],\n      }\n      assert_response :success\n      uuids = json_response['items'].map { |item| item['uuid'] }\n      if expect_match\n        assert_includes uuids, mine.uuid\n      else\n        assert_not_includes uuids, mine.uuid\n      end\n    end\n  end\n\n  [['prop1', '=', 'value1', [:collection_with_prop1_value1], [:collection_with_prop1_value2, :collection_with_prop2_1]],\n   ['prop1', '!=', 'value1', [:collection_with_prop1_value2, :collection_with_prop2_1], [:collection_with_prop1_value1]],\n   ['prop1', 'exists', true, [:collection_with_prop1_value1, :collection_with_prop1_value2, :collection_with_prop1_value3, :collection_with_prop1_other1], [:collection_with_prop2_1]],\n   ['prop1', 'exists', false, [:collection_with_prop2_1], [:collection_with_prop1_value1, :collection_with_prop1_value2, :collection_with_prop1_value3, :collection_with_prop1_other1]],\n   ['prop1', 'in', ['value1', 'value2'], [:collection_with_prop1_value1, :collection_with_prop1_value2], [:collection_with_prop1_value3, :collection_with_prop2_1]],\n   ['prop1', 'in', ['value1', 'valueX'], [:collection_with_prop1_value1], [:collection_with_prop1_value3, :collection_with_prop2_1]],\n   ['prop1', 'not in', ['value1', 'value2'], [:collection_with_prop1_value3, :collection_with_prop1_other1, :collection_with_prop2_1], [:collection_with_prop1_value1, :collection_with_prop1_value2]],\n   ['prop1', 'not in', ['value1', 'valueX'], [:collection_with_prop1_value2, :collection_with_prop1_value3, :collection_with_prop1_other1, :collection_with_prop2_1], [:collection_with_prop1_value1]],\n   ['prop1', '>', 'value2', [:collection_with_prop1_value3], [:collection_with_prop1_other1, :collection_with_prop1_value1]],\n   ['prop1', '<', 'value2', [:collection_with_prop1_other1, :collection_with_prop1_value1], [:collection_with_prop1_value2, :collection_with_prop1_value2]],\n   ['prop1', '<=', 'value2', [:collection_with_prop1_other1, :collection_with_prop1_value1, :collection_with_prop1_value2], [:collection_with_prop1_value3]],\n   ['prop1', '>=', 'value2', [:collection_with_prop1_value2, :collection_with_prop1_value3], [:collection_with_prop1_other1, :collection_with_prop1_value1]],\n   ['prop1', 'like', 'value%', [:collection_with_prop1_value1, :collection_with_prop1_value2, :collection_with_prop1_value3], [:collection_with_prop1_other1]],\n   ['prop1', 'like', '%1', [:collection_with_prop1_value1, :collection_with_prop1_other1], [:collection_with_prop1_value2, :collection_with_prop1_value3]],\n   ['prop1', 'ilike', 'VALUE%', [:collection_with_prop1_value1, :collection_with_prop1_value2, :collection_with_prop1_value3], [:collection_with_prop1_other1]],\n   ['prop2', '>',  1, [:collection_with_prop2_5], [:collection_with_prop2_1]],\n   ['prop2', '<',  5, [:collection_with_prop2_1], [:collection_with_prop2_5]],\n   ['prop2', '<=', 5, [:collection_with_prop2_1, :collection_with_prop2_5], []],\n   ['prop2', '>=', 1, [:collection_with_prop2_1, :collection_with_prop2_5], []],\n   ['<http://schema.org/example>', '=', \"value1\", [:collection_with_uri_prop], []],\n   ['listprop', 'contains', 'elem1', [:collection_with_list_prop_odd, :collection_with_listprop_elem1], [:collection_with_list_prop_even]],\n   ['listprop', '=', 'elem1', [:collection_with_listprop_elem1], [:collection_with_list_prop_odd]],\n   ['listprop', 'contains', 5, [:collection_with_list_prop_odd], [:collection_with_list_prop_even, :collection_with_listprop_elem1]],\n   ['listprop', 'contains', 'elem2', [:collection_with_list_prop_even], [:collection_with_list_prop_odd, :collection_with_listprop_elem1]],\n   ['listprop', 'contains', 'ELEM2', [], [:collection_with_list_prop_even]],\n   ['listprop', 'contains', 'elem8', [], [:collection_with_list_prop_even]],\n   ['listprop', 'contains', 4, [:collection_with_list_prop_even], [:collection_with_list_prop_odd, :collection_with_listprop_elem1]],\n  ].each do |prop, op, opr, inc, ex|\n    test \"jsonb filter properties.#{prop} #{op} #{opr})\" do\n      @controller = Arvados::V1::CollectionsController.new\n      authorize_with :admin\n      get :index, params: {\n            filters: SafeJSON.dump([ [\"properties.#{prop}\", op, opr] ]),\n            limit: 1000\n          }\n      assert_response :success\n      found = assigns(:objects).collect(&:uuid)\n\n      inc.each do |i|\n        assert_includes(found, collections(i).uuid)\n      end\n\n      ex.each do |e|\n        assert_not_includes(found, collections(e).uuid)\n      end\n    end\n  end\n\n  test \"jsonb hash 'exists' and '!=' filter\" do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['properties.prop1', 'exists', true], ['properties.prop1', '!=', 'value1'] ]\n    }\n    assert_response :success\n    found = assigns(:objects).collect(&:uuid)\n    assert_equal found.length, 3\n    assert_not_includes(found, collections(:collection_with_prop1_value1).uuid)\n    assert_includes(found, collections(:collection_with_prop1_value2).uuid)\n    assert_includes(found, collections(:collection_with_prop1_value3).uuid)\n    assert_includes(found, collections(:collection_with_prop1_other1).uuid)\n  end\n\n  test \"jsonb array 'exists'\" do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['storage_classes_confirmed.default', 'exists', true] ]\n    }\n    assert_response :success\n    found = assigns(:objects).collect(&:uuid)\n    assert_equal 2, found.length\n    assert_not_includes(found,\n      collections(:storage_classes_desired_default_unconfirmed).uuid)\n    assert_includes(found,\n      collections(:storage_classes_desired_default_confirmed_default).uuid)\n    assert_includes(found,\n      collections(:storage_classes_desired_archive_confirmed_default).uuid)\n  end\n\n  test \"jsonb hash alternate form 'exists' and '!=' filter\" do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['properties', 'exists', 'prop1'], ['properties.prop1', '!=', 'value1'] ]\n    }\n    assert_response :success\n    found = assigns(:objects).collect(&:uuid)\n    assert_equal found.length, 3\n    assert_not_includes(found, collections(:collection_with_prop1_value1).uuid)\n    assert_includes(found, collections(:collection_with_prop1_value2).uuid)\n    assert_includes(found, collections(:collection_with_prop1_value3).uuid)\n    assert_includes(found, collections(:collection_with_prop1_other1).uuid)\n  end\n\n  test \"jsonb array alternate form 'exists' filter\" do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['storage_classes_confirmed', 'exists', 'default'] ]\n    }\n    assert_response :success\n    found = assigns(:objects).collect(&:uuid)\n    assert_equal 2, found.length\n    assert_not_includes(found,\n      collections(:storage_classes_desired_default_unconfirmed).uuid)\n    assert_includes(found,\n      collections(:storage_classes_desired_default_confirmed_default).uuid)\n    assert_includes(found,\n      collections(:storage_classes_desired_archive_confirmed_default).uuid)\n  end\n\n  test \"jsonb 'exists' must be boolean\" do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['properties.prop1', 'exists', nil] ]\n    }\n    assert_response 422\n    assert_match(/Invalid operand '' for 'exists' must be true or false/,\n                 json_response['errors'].join(' '))\n  end\n\n  test \"jsonb checks column exists\" do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['puppies.prop1', '=', 'value1'] ]\n    }\n    assert_response 422\n    assert_match(/Invalid attribute 'puppies' for subproperty filter/,\n                 json_response['errors'].join(' '))\n  end\n\n  test \"jsonb checks column is valid\" do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['name.prop1', '=', 'value1'] ]\n    }\n    assert_response 422\n    assert_match(/Invalid attribute 'name' for subproperty filter/,\n                 json_response['errors'].join(' '))\n  end\n\n  test \"jsonb invalid operator\" do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['properties.prop1', '###', 'value1'] ]\n    }\n    assert_response 422\n    assert_match(/Invalid operator for subproperty search '###'/,\n                 json_response['errors'].join(' '))\n  end\n\n  test \"groups contents with properties filter succeeds on objects with properties field\" do\n    @controller = Arvados::V1::GroupsController.new\n    authorize_with :admin\n    get :contents, params: {\n      filters: [\n        ['properties', 'exists', 'foo'],\n        ['uuid', 'is_a', [\"arvados#group\",\"arvados#collection\",\"arvados#containerRequest\"]],\n      ]\n    }\n    assert_response 200\n    assert json_response['items'].length == 0\n  end\n\n  # Tests bug #19297\n  test \"groups contents with properties filter succeeds on some objects with properties field\" do\n    @controller = Arvados::V1::GroupsController.new\n    authorize_with :admin\n    get :contents, params: {\n      filters: [\n        ['properties', 'exists', 'foo'],\n        ['uuid', 'is_a', [\"arvados#group\",\"arvados#workflow\"]],\n      ]\n    }\n    assert_response 200\n    assert json_response['items'].length == 0\n  end\n\n  # Tests bug #19297\n  test \"groups contents with properties filter fails on objects without properties field\" do\n    @controller = Arvados::V1::GroupsController.new\n    authorize_with :admin\n    get :contents, params: {\n      filters: [\n        ['properties', 'exists', 'foo'],\n        ['uuid', 'is_a', [\"arvados#workflow\"]],\n      ]\n    }\n    assert_response 422\n    assert_match(/Invalid attribute 'properties' for operator 'exists'.*on object type Workflow/, json_response['errors'].join(' '))\n  end\n\n  test \"groups contents without filters and limit=0, count=none\" do\n    @controller = Arvados::V1::GroupsController.new\n    authorize_with :admin\n    get :contents, params: {\n      limit: 0,\n      count: 'none',\n    }\n    assert_response 200\n    assert json_response['items'].length == 0\n  end\n\n  test \"replication_desired = 2\" do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :admin\n    get :index, params: {\n      filters: SafeJSON.dump([ ['replication_desired', '=', 2] ])\n    }\n    assert_response :success\n    found = assigns(:objects).collect(&:uuid)\n    assert_includes(found, collections(:replication_desired_2_unconfirmed).uuid)\n    assert_includes(found, collections(:replication_desired_2_confirmed_2).uuid)\n  end\n\n  [\n    [1, \"foo\"],\n    [1, [\"foo\"]],\n    [1, [\"bar\"]],\n    [1, [\"bar\", \"foo\"]],\n    [0, [\"foo\", \"qux\"]],\n    [0, [\"qux\"]],\n    [nil, []],\n    [nil, [[]]],\n    [nil, [[\"bogus\"]]],\n    [nil, [{\"foo\" => \"bar\"}]],\n    [nil, {\"foo\" => \"bar\"}],\n  ].each do |results, operand|\n    test \"storage_classes_desired contains #{operand.inspect}\" do\n      @controller = Arvados::V1::CollectionsController.new\n      authorize_with(:active)\n      c = Collection.create!(\n        manifest_text: \"\",\n        storage_classes_desired: [\"foo\", \"bar\", \"baz\"])\n      get :index, params: {\n            filters: [[\"storage_classes_desired\", \"contains\", operand]],\n          }\n      if results.nil?\n        assert_response 422\n        next\n      end\n      assert_response :success\n      assert_equal results, json_response[\"items\"].length\n      if results > 0\n        assert_equal c.uuid, json_response[\"items\"][0][\"uuid\"]\n      end\n    end\n  end\n\n  test \"collections properties contains top level key\" do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with(:active)\n    get :index, params: {\n          filters: [[\"properties\", \"contains\", \"prop1\"]],\n        }\n    assert_response :success\n    assert_not_empty json_response[\"items\"]\n    json_response[\"items\"].each do |c|\n      assert c[\"properties\"].has_key?(\"prop1\")\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/groups_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::GroupsControllerTest < ActionController::TestCase\n\n  test \"attempt to delete group that cannot be seen\" do\n    Rails.configuration.Users.RoleGroupsVisibleToAll = false\n    authorize_with :active\n    post :destroy, params: {id: groups(:empty_lonely_group).uuid}\n    assert_response 404\n  end\n\n  test \"attempt to delete group without read or write access\" do\n    authorize_with :active\n    post :destroy, params: {id: groups(:empty_lonely_group).uuid}\n    assert_response 403\n  end\n\n  test \"attempt to delete group without write access\" do\n    authorize_with :active\n    post :destroy, params: {id: groups(:all_users).uuid}\n    assert_response 403\n  end\n\n  test \"get list of projects\" do\n    authorize_with :active\n    get :index, params: {filters: [['group_class', '=', 'project']], format: :json}\n    assert_response :success\n    group_uuids = []\n    json_response['items'].each do |group|\n      assert_equal 'project', group['group_class']\n      group_uuids << group['uuid']\n    end\n    assert_includes group_uuids, groups(:aproject).uuid\n    assert_includes group_uuids, groups(:asubproject).uuid\n    assert_includes group_uuids, groups(:private).uuid\n    assert_not_includes group_uuids, groups(:system_group).uuid\n    assert_not_includes group_uuids, groups(:private_and_can_read_foofile).uuid\n  end\n\n  test \"get list of groups that are not projects\" do\n    authorize_with :active\n    get :index, params: {filters: [['group_class', '!=', 'project']], format: :json}\n    assert_response :success\n    group_uuids = []\n    json_response['items'].each do |group|\n      assert_not_equal 'project', group['group_class']\n      group_uuids << group['uuid']\n    end\n    assert_not_includes group_uuids, groups(:aproject).uuid\n    assert_not_includes group_uuids, groups(:asubproject).uuid\n  end\n\n  test \"get list of groups with bogus group_class\" do\n    authorize_with :active\n    get :index, params: {\n      filters: [['group_class', '=', 'nogrouphasthislittleclass']],\n      format: :json,\n    }\n    assert_response :success\n    assert_equal [], json_response['items']\n    assert_equal 0, json_response['items_available']\n  end\n\n  def check_project_contents_response\n    assert_response :success\n    assert_operator 2, :<=, json_response['items_available']\n    assert_operator 2, :<=, json_response['items'].count\n    kinds = json_response['items'].collect { |i| i['kind'] }.uniq\n    expect_kinds = %w'arvados#group'\n    assert_equal expect_kinds, (expect_kinds & kinds)\n\n    json_response['items'].each do |i|\n      if i['kind'] == 'arvados#group'\n        assert(i['group_class'] == 'project',\n               \"group#contents returned a non-project group\")\n      end\n    end\n  end\n\n  test 'get group-owned objects' do\n    authorize_with :active\n    get :contents, params: {\n      id: groups(:aproject).uuid,\n      format: :json,\n    }\n    check_project_contents_response\n  end\n\n  test \"user with project read permission can see project objects\" do\n    authorize_with :project_viewer\n    get :contents, params: {\n      id: groups(:aproject).uuid,\n      format: :json,\n    }\n    check_project_contents_response\n  end\n\n  test \"list objects across projects\" do\n    authorize_with :project_viewer\n    get :contents, params: {\n      format: :json,\n      filters: [['uuid', 'is_a', 'arvados#collection']]\n    }\n    assert_response :success\n    found_uuids = json_response['items'].collect { |i| i['uuid'] }\n    [[:foo_collection_in_aproject, true],\n     [:baz_collection_name_in_asubproject, true],\n     [:collection_not_readable_by_active, false]].each do |collection_fixture, should_find|\n      if should_find\n        assert_includes found_uuids, collections(collection_fixture).uuid, \"did not find collection fixture '#{collection_fixture}'\"\n      else\n        refute_includes found_uuids, collections(collection_fixture).uuid, \"found collection fixture '#{collection_fixture}'\"\n      end\n    end\n  end\n\n  test \"list trashed collections and projects\" do\n    authorize_with :active\n    get(:contents, params: {\n          format: :json,\n          include_trash: true,\n          filters: [\n            ['uuid', 'is_a', ['arvados#collection', 'arvados#group']],\n            ['is_trashed', '=', true],\n          ],\n          limit: 10000,\n        })\n    assert_response :success\n    found_uuids = json_response['items'].collect { |i| i['uuid'] }\n    assert_includes found_uuids, groups(:trashed_project).uuid\n    refute_includes found_uuids, groups(:aproject).uuid\n    assert_includes found_uuids, collections(:expired_collection).uuid\n    refute_includes found_uuids, collections(:w_a_z_file).uuid\n  end\n\n  test \"list objects in home project\" do\n    authorize_with :active\n    get :contents, params: {\n      format: :json,\n      limit: 200,\n      id: users(:active).uuid\n    }\n    assert_response :success\n    found_uuids = json_response['items'].collect { |i| i['uuid'] }\n    assert_includes found_uuids, collections(:collection_owned_by_active).uuid, \"collection did not appear in home project\"\n    refute_includes found_uuids, collections(:foo_collection_in_aproject).uuid, \"collection appeared unexpectedly in home project\"\n  end\n\n  test \"list collections in home project\" do\n    authorize_with :active\n    get(:contents, params: {\n          format: :json,\n          filters: [\n            ['uuid', 'is_a', 'arvados#collection'],\n          ],\n          limit: 200,\n          id: users(:active).uuid,\n        })\n    assert_response :success\n    found_uuids = json_response['items'].collect { |i| i['uuid'] }\n    assert_includes found_uuids, collections(:collection_owned_by_active).uuid, \"collection did not appear in home project\"\n    refute_includes found_uuids, collections(:collection_owned_by_active_past_version_1).uuid, \"collection appeared unexpectedly in home project\"\n  end\n\n  test \"list collections in home project, including old versions\" do\n    authorize_with :active\n    get(:contents, params: {\n          format: :json,\n          include_old_versions: true,\n          filters: [\n            ['uuid', 'is_a', 'arvados#collection'],\n          ],\n          limit: 200,\n          id: users(:active).uuid,\n        })\n    assert_response :success\n    found_uuids = json_response['items'].collect { |i| i['uuid'] }\n    assert_includes found_uuids, collections(:collection_owned_by_active).uuid, \"collection did not appear in home project\"\n    assert_includes found_uuids, collections(:collection_owned_by_active_past_version_1).uuid, \"old collection version did not appear in home project\"\n  end\n\n  test \"user with project read permission can see project collections\" do\n    authorize_with :project_viewer\n    get :contents, params: {\n      id: groups(:asubproject).uuid,\n      format: :json,\n    }\n    ids = json_response['items'].map { |item| item[\"uuid\"] }\n    assert_includes ids, collections(:baz_file_in_asubproject).uuid\n  end\n\n  [\n    ['collections.name', 'asc', :<=, \"name\"],\n    ['collections.name', 'desc', :>=, \"name\"],\n    ['name', 'asc', :<=, \"name\"],\n    ['name', 'desc', :>=, \"name\"],\n    ['collections.created_at', 'asc', :<=, \"created_at\"],\n    ['collections.created_at', 'desc', :>=, \"created_at\"],\n    ['created_at', 'asc', :<=, \"created_at\"],\n    ['created_at', 'desc', :>=, \"created_at\"],\n  ].each do |column, order, operator, field|\n    test \"user with project read permission can sort projects on #{column} #{order}\" do\n      authorize_with :project_viewer\n      get :contents, params: {\n        id: groups(:asubproject).uuid,\n        format: :json,\n        filters: [['uuid', 'is_a', \"arvados#collection\"]],\n        order: \"#{column} #{order}\"\n      }\n      sorted_values = json_response['items'].collect { |item| item[field] }\n      if field == \"name\"\n        # Here we avoid assuming too much about the database\n        # collation. Both \"alice\"<\"Bob\" and \"alice\">\"Bob\" can be\n        # correct. Hopefully it _is_ safe to assume that if \"a\" comes\n        # before \"b\" in the ascii alphabet, \"aX\">\"bY\" is never true for\n        # any strings X and Y.\n        reliably_sortable_names = sorted_values.select do |name|\n          name[0] >= 'a' && name[0] <= 'z'\n        end.uniq do |name|\n          name[0]\n        end\n        # Preserve order of sorted_values. But do not use &=. If\n        # sorted_values has out-of-order duplicates, we want to preserve\n        # them here, so we can detect them and fail the test below.\n        sorted_values.select! do |name|\n          reliably_sortable_names.include? name\n        end\n      end\n      assert_sorted(operator, sorted_values)\n    end\n  end\n\n  def assert_sorted(operator, sorted_items)\n    actually_checked_anything = false\n    previous = nil\n    sorted_items.each do |entry|\n      if !previous.nil?\n        assert_operator(previous, operator, entry,\n                        \"Entries sorted incorrectly.\")\n        actually_checked_anything = true\n      end\n      previous = entry\n    end\n    assert actually_checked_anything, \"Didn't even find two items to compare.\"\n  end\n\n  # Even though the project_viewer tests go through other controllers,\n  # I'm putting them here so they're easy to find alongside the other\n  # project tests.\n  def check_new_project_link_fails(link_attrs)\n    @controller = Arvados::V1::LinksController.new\n    post :create, params: {\n      link: {\n        link_class: \"permission\",\n        name: \"can_read\",\n        head_uuid: groups(:aproject).uuid,\n      }.merge(link_attrs)\n    }\n    assert_includes(403..422, response.status)\n  end\n\n  test \"user with project read permission can't add users to it\" do\n    authorize_with :project_viewer\n    check_new_project_link_fails(tail_uuid: users(:spectator).uuid)\n  end\n\n  test \"user with project read permission can't add items to it\" do\n    authorize_with :project_viewer\n    check_new_project_link_fails(tail_uuid: collections(:baz_file).uuid)\n  end\n\n  test \"user with project read permission can't rename items in it\" do\n    authorize_with :project_viewer\n    @controller = Arvados::V1::CollectionsController.new\n    post :update, params: {\n      id: collections(:collection_to_search_for_in_aproject).uuid,\n      name: \"Denied test name\",\n    }\n    assert_includes(403..404, response.status)\n  end\n\n  test \"user with project read permission can't remove items from it\" do\n    @controller = Arvados::V1::CollectionsController.new\n    authorize_with :project_viewer\n    post :update, params: {\n      id: collections(:collection_to_search_for_in_aproject).uuid,\n      collection: {\n        owner_uuid: users(:project_viewer).uuid,\n      }\n    }\n    assert_response 403\n  end\n\n  test \"user with project read permission can't delete it\" do\n    authorize_with :project_viewer\n    post :destroy, params: {id: groups(:aproject).uuid}\n    assert_response 403\n  end\n\n  test 'get group-owned objects with limit' do\n    authorize_with :active\n    get :contents, params: {\n      id: groups(:aproject).uuid,\n      limit: 1,\n      format: :json,\n    }\n    assert_response :success\n    assert_operator 1, :<, json_response['items_available']\n    assert_equal 1, json_response['items'].count\n  end\n\n  test 'get group-owned objects with limit and offset' do\n    authorize_with :active\n    get :contents, params: {\n      id: groups(:aproject).uuid,\n      limit: 1,\n      offset: 12345,\n      format: :json,\n    }\n    assert_response :success\n    assert_operator 1, :<, json_response['items_available']\n    assert_equal 0, json_response['items'].count\n  end\n\n  test 'get group-owned objects with select' do\n    authorize_with :active\n    get :contents, params: {\n      id: groups(:aproject).uuid,\n      limit: 100,\n      format: :json,\n      select: [\"uuid\", \"storage_classes_desired\"]\n    }\n    assert_response :success\n    assert_equal 6, json_response['items_available']\n    assert_equal 6, json_response['items'].count\n    json_response['items'].each do |item|\n      # Expect collections to have a storage_classes field, other items should not.\n      if item[\"kind\"] == \"arvados#collection\"\n        assert !item[\"storage_classes_desired\"].nil?\n      else\n        assert item[\"storage_classes_desired\"].nil?\n      end\n    end\n  end\n\n  test 'get objects with ambiguous column name in order param' do\n    authorize_with :active\n    get :contents, params: {\n      format: :json,\n      # bug #22785 (\"ambiguous column reference\") only occurred when\n      # filtering by a column in a joined table...\n      filters: [[\"uuid\", \"is_a\", \"arvados#containerRequest\"],\n                [\"container.state\", \"in\", [\"Queued\",\"Locked\"]]],\n      # ...and ordering by a column that is in both primary and joined\n      # tables.\n      order: \"created_at desc\",\n    }\n    assert_response :success\n  end\n\n  test 'get group-owned objects with invalid field in select' do\n    authorize_with :active\n    get :contents, params: {\n      id: groups(:aproject).uuid,\n      limit: 100,\n      format: :json,\n      select: [\"uuid\", \"storage_classes_desire\"]\n    }\n    assert_response 422\n  end\n\n  test 'get group-owned objects with additional filter matching nothing' do\n    authorize_with :active\n    get :contents, params: {\n      id: groups(:aproject).uuid,\n      filters: [['uuid', 'in', ['foo_not_a_uuid','bar_not_a_uuid']]],\n      format: :json,\n    }\n    assert_response :success\n    assert_equal [], json_response['items']\n    assert_equal 0, json_response['items_available']\n  end\n\n  %w(offset limit).each do |arg|\n    ['foo', '', '1234five', '0x10', '-8'].each do |val|\n      test \"Raise error on bogus #{arg} parameter #{val.inspect}\" do\n        authorize_with :active\n        get :contents, params: {\n          :id => groups(:aproject).uuid,\n          :format => :json,\n          arg => val,\n        }\n        assert_response 422\n      end\n    end\n  end\n\n  test \"Collection contents don't include manifest_text or unsigned_manifest_text\" do\n    authorize_with :active\n    get :contents, params: {\n      id: groups(:aproject).uuid,\n      filters: [[\"uuid\", \"is_a\", \"arvados#collection\"]],\n      format: :json,\n    }\n    assert_response :success\n    refute(json_response[\"items\"].any? { |c| not c[\"portable_data_hash\"] },\n           \"response included an item without a portable data hash\")\n    refute(json_response[\"items\"].any? { |c| c.include?(\"manifest_text\") },\n           \"response included an item with manifest_text\")\n    refute(json_response[\"items\"].any? { |c| c.include?(\"unsigned_manifest_text\") },\n           \"response included an item with unsigned_manifest_text\")\n  end\n\n  test 'get writable_by list for owned group' do\n    authorize_with :active\n    get :show, params: {\n      id: groups(:aproject).uuid,\n      format: :json\n    }\n    assert_response :success\n    assert_not_nil(json_response['writable_by'],\n                   \"Should receive uuid list in 'writable_by' field\")\n    assert_includes(json_response['writable_by'], users(:active).uuid,\n                    \"owner should be included in writable_by list\")\n  end\n\n  test 'no writable_by list for group with read-only access' do\n    authorize_with :rominiadmin\n    get :show, params: {\n      id: groups(:testusergroup_admins).uuid,\n      format: :json\n    }\n    assert_response :success\n    assert_equal([json_response['owner_uuid']],\n                 json_response['writable_by'],\n                 \"Should only see owner_uuid in 'writable_by' field\")\n  end\n\n  test 'get writable_by list by admin user' do\n    authorize_with :admin\n    get :show, params: {\n      id: groups(:testusergroup_admins).uuid,\n      format: :json\n    }\n    assert_response :success\n    assert_not_nil(json_response['writable_by'],\n                   \"Should receive uuid list in 'writable_by' field\")\n    assert_includes(json_response['writable_by'],\n                    users(:admin).uuid,\n                    \"Current user should be included in 'writable_by' field\")\n  end\n\n  test 'creating subproject with duplicate name fails' do\n    authorize_with :active\n    post :create, params: {\n      group: {\n        name: 'A Project',\n        owner_uuid: users(:active).uuid,\n        group_class: 'project',\n      },\n    }\n    assert_response 422\n    response_errors = json_response['errors']\n    assert_not_nil response_errors, 'Expected error in response'\n    assert(response_errors.first.include?('duplicate key'),\n           \"Expected 'duplicate key' error in #{response_errors.first}\")\n  end\n\n  test 'creating duplicate named subproject succeeds with ensure_unique_name' do\n    authorize_with :active\n    post :create, params: {\n      group: {\n        name: 'A Project',\n        owner_uuid: users(:active).uuid,\n        group_class: 'project',\n      },\n      ensure_unique_name: true\n    }\n    assert_response :success\n    new_project = json_response\n    assert_not_equal(new_project['uuid'],\n                     groups(:aproject).uuid,\n                     \"create returned same uuid as existing project\")\n    assert_match(/^A Project \\(#{new_project['uuid'][-15..-1]}\\)$/,\n                 new_project['name'])\n  end\n\n  [\n    [['owner_uuid', '!=', 'zzzzz-tpzed-xurymjxw79nv3jz'], 200,\n        'zzzzz-j7d0g-publicfavorites', 'zzzzz-xvhdp-cr4queuedcontnr'],\n    [[\"container_requests.state\", \"not in\", [\"Final\"]], 200,\n        'zzzzz-xvhdp-cr4queuedcontnr', 'zzzzz-xvhdp-cr4completedctr'],\n    [['container_requests.requesting_container_uuid', '=', nil], 200,\n        'zzzzz-xvhdp-cr4queuedcontnr', 'zzzzz-xvhdp-cr4requestercn2'],\n    [['container_requests.no_such_column', '=', nil], 422],\n    [['container_requests.', '=', nil], 422],\n    [['.requesting_container_uuid', '=', nil], 422],\n    [['no_such_table.uuid', '!=', 'zzzzz-tpzed-xurymjxw79nv3jz'], 422],\n    [[\"container.state\", \"=\", \"Complete\"], 200,\n        'zzzzz-xvhdp-cr4completedctr', 'zzzzz-xvhdp-cr5trashedcontr'],\n  ].each do |filter, expect_code, expect_uuid, not_expect_uuid|\n    test \"get contents with '#{filter}' filter\" do\n      authorize_with :active\n      get :contents, params: {filters: [filter], format: :json}\n      assert_response expect_code\n      if expect_code == 200\n        assert_not_empty json_response['items']\n        item_uuids = json_response['items'].collect {|item| item['uuid']}\n        assert_includes(item_uuids, expect_uuid)\n        assert_not_includes(item_uuids, not_expect_uuid)\n      end\n    end\n  end\n\n  test 'get contents with low max_index_database_read' do\n    # Some result will certainly have at least 12 bytes in a\n    # restricted column.\n    #\n    # We cannot use collections.manifest_text to test this, because\n    # GroupsController refuses to select manifest_text, because\n    # controller doesn't sign manifests in a groups#contents response.\n    Rails.configuration.API.MaxIndexDatabaseRead = 12\n    authorize_with :active\n    get :contents, params: {\n          uuid: users(:active).uuid,\n          format: :json,\n        }\n    assert_response :success\n    assert_not_empty(json_response['items'])\n    assert_operator(json_response['items'].count,\n                    :<, json_response['items_available'])\n  end\n\n  test 'get contents, recursive=true' do\n    authorize_with :active\n    params = {\n      id: groups(:aproject).uuid,\n      recursive: true,\n      format: :json,\n    }\n    get :contents, params: params\n    owners = json_response['items'].map do |item|\n      item['owner_uuid']\n    end\n    assert_includes(owners, groups(:aproject).uuid)\n    assert_includes(owners, groups(:asubproject).uuid)\n  end\n\n  [false, nil].each do |recursive|\n    test \"get contents, recursive=#{recursive.inspect}\" do\n      authorize_with :active\n      params = {\n        id: groups(:aproject).uuid,\n        format: :json,\n      }\n      params[:recursive] = false if recursive == false\n      get :contents, params: params\n      owners = json_response['items'].map do |item|\n        item['owner_uuid']\n      end\n      assert_includes(owners, groups(:aproject).uuid)\n      refute_includes(owners, groups(:asubproject).uuid)\n    end\n  end\n\n  test 'get home project contents, recursive=true' do\n    authorize_with :active\n    get :contents, params: {\n          id: users(:active).uuid,\n          recursive: true,\n          format: :json,\n        }\n    owners = json_response['items'].map do |item|\n      item['owner_uuid']\n    end\n    assert_includes(owners, users(:active).uuid)\n    assert_includes(owners, groups(:aproject).uuid)\n    assert_includes(owners, groups(:asubproject).uuid)\n  end\n\n  [:afiltergroup, :private_role].each do |grp|\n    test \"delete non-project group #{grp}\" do\n      authorize_with :admin\n      assert_not_nil Group.find_by_uuid(groups(grp).uuid)\n      assert !Group.find_by_uuid(groups(grp).uuid).is_trashed\n      post :destroy, params: {\n            id: groups(grp).uuid,\n            format: :json,\n          }\n      assert_response :success\n      # Should be trashed\n      assert Group.find_by_uuid(groups(grp).uuid).is_trashed\n    end\n  end\n\n  [\n    [false, :inactive, :private_role, false],\n    [false, :spectator, :private_role, false],\n    [false, :admin, :private_role, true],\n    [true, :inactive, :private_role, false],\n    [true, :spectator, :private_role, true],\n    [true, :admin, :private_role, true],\n    # project (non-role) groups are invisible even when RoleGroupsVisibleToAll is true\n    [true, :inactive, :private, false],\n    [true, :spectator, :private, false],\n    [true, :admin, :private, true],\n  ].each do |visibleToAll, userFixture, groupFixture, visible|\n    test \"with RoleGroupsVisibleToAll=#{visibleToAll}, #{groupFixture} group is #{visible ? '' : 'in'}visible to #{userFixture} user\" do\n      Rails.configuration.Users.RoleGroupsVisibleToAll = visibleToAll\n      authorize_with userFixture\n      get :show, params: {id: groups(groupFixture).uuid, format: :json}\n      if visible\n        assert_response :success\n      else\n        assert_response 404\n      end\n    end\n  end\n\n  ### trashed project tests ###\n\n  #\n  # The structure is\n  #\n  # trashed_project         (zzzzz-j7d0g-trashedproject1)\n  #   trashed_subproject    (zzzzz-j7d0g-trashedproject2)\n  #   trashed_subproject3   (zzzzz-j7d0g-trashedproject3)\n  #   zzzzz-xvhdp-cr5trashedcontr\n\n  [:active,\n   :admin].each do |auth|\n    # project: to query,    to untrash,    is visible, parent contents listing success\n    [\n     [:trashed_project,     [],                 false, true],\n     [:trashed_project,     [:trashed_project], true,  true],\n     [:trashed_subproject,  [],                 false, false],\n     [:trashed_subproject,  [:trashed_project], true,  true],\n     [:trashed_subproject3, [:trashed_project], false, true],\n     [:trashed_subproject3, [:trashed_subproject3], false, false],\n     [:trashed_subproject3, [:trashed_project, :trashed_subproject3], true, true],\n    ].each do |project, untrash, visible, success|\n\n      test \"contents listing #{project} #{untrash} as #{auth}\" do\n        authorize_with auth\n        untrash.each do |pr|\n          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false\n        end\n        get :contents, params: {\n              id: groups(project).owner_uuid,\n              format: :json\n            }\n        if success\n          assert_response :success\n          item_uuids = json_response['items'].map do |item|\n            item['uuid']\n          end\n          if visible\n            assert_includes(item_uuids, groups(project).uuid)\n          else\n            assert_not_includes(item_uuids, groups(project).uuid)\n          end\n        else\n          assert_response 404\n        end\n      end\n\n      test \"contents of #{project} #{untrash} as #{auth}\" do\n        authorize_with auth\n        untrash.each do |pr|\n          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false\n        end\n        get :contents, params: {\n              id: groups(project).uuid,\n              format: :json\n            }\n        if visible\n          assert_response :success\n        else\n          assert_response 404\n        end\n      end\n\n      test \"index #{project} #{untrash} as #{auth}\" do\n        authorize_with auth\n        untrash.each do |pr|\n          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false\n        end\n        get :index, params: {\n              format: :json,\n            }\n        assert_response :success\n        item_uuids = json_response['items'].map do |item|\n          item['uuid']\n        end\n        if visible\n          assert_includes(item_uuids, groups(project).uuid)\n        else\n          assert_not_includes(item_uuids, groups(project).uuid)\n        end\n      end\n\n      test \"show #{project} #{untrash} as #{auth}\" do\n        authorize_with auth\n        untrash.each do |pr|\n          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false\n        end\n        get :show, params: {\n              id: groups(project).uuid,\n              format: :json\n            }\n        if visible\n          assert_response :success\n        else\n          assert_response 404\n        end\n      end\n\n      test \"show include_trash=false #{project} #{untrash} as #{auth}\" do\n        authorize_with auth\n        untrash.each do |pr|\n          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false\n        end\n        get :show, params: {\n              id: groups(project).uuid,\n              format: :json,\n              include_trash: false\n            }\n        if visible\n          assert_response :success\n        else\n          assert_response 404\n        end\n      end\n\n      test \"show include_trash #{project} #{untrash} as #{auth}\" do\n        authorize_with auth\n        untrash.each do |pr|\n          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false\n        end\n        get :show, params: {\n              id: groups(project).uuid,\n              format: :json,\n              include_trash: true\n            }\n        assert_response :success\n      end\n\n      test \"index include_trash #{project} #{untrash} as #{auth}\" do\n        authorize_with auth\n        untrash.each do |pr|\n          Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false\n        end\n        get :index, params: {\n              format: :json,\n              include_trash: true\n            }\n        assert_response :success\n        item_uuids = json_response['items'].map do |item|\n          item['uuid']\n        end\n        assert_includes(item_uuids, groups(project).uuid)\n      end\n    end\n\n    test \"delete project #{auth}\" do\n      authorize_with auth\n      [:trashed_project].each do |pr|\n        Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false\n      end\n      assert !Group.find_by_uuid(groups(:trashed_project).uuid).is_trashed\n      post :destroy, params: {\n            id: groups(:trashed_project).uuid,\n            format: :json,\n          }\n      assert_response :success\n      assert Group.find_by_uuid(groups(:trashed_project).uuid).is_trashed\n    end\n\n    test \"untrash project #{auth}\" do\n      authorize_with auth\n      assert Group.find_by_uuid(groups(:trashed_project).uuid).is_trashed\n      post :untrash, params: {\n            id: groups(:trashed_project).uuid,\n            format: :json,\n          }\n      assert_response :success\n      assert !Group.find_by_uuid(groups(:trashed_project).uuid).is_trashed\n    end\n\n    test \"untrash project with name conflict #{auth}\" do\n      authorize_with auth\n      [:trashed_project].each do |pr|\n        Group.find_by_uuid(groups(pr).uuid).update! is_trashed: false\n      end\n      gc = Group.create!({owner_uuid: \"zzzzz-j7d0g-trashedproject1\",\n                         name: \"trashed subproject 3\",\n                         group_class: \"project\"})\n      post :untrash, params: {\n            id: groups(:trashed_subproject3).uuid,\n            format: :json,\n            ensure_unique_name: true\n           }\n      assert_response :success\n      assert_match /^trashed subproject 3 \\(#{json_response['uuid'][-15..-1]}\\)$/, json_response['name']\n    end\n\n    test \"move trashed subproject to new owner #{auth}\" do\n      authorize_with auth\n      assert_nil Group.readable_by(users(auth)).where(uuid: groups(:trashed_subproject).uuid).first\n      put :update, params: {\n            id: groups(:trashed_subproject).uuid,\n            group: {\n              owner_uuid: users(:active).uuid\n            },\n            include_trash: true,\n            format: :json,\n          }\n      assert_response :success\n      assert_not_nil Group.readable_by(users(auth)).where(uuid: groups(:trashed_subproject).uuid).first\n    end\n  end\n\n  # the group class overrides the destroy method. Make sure that the destroyed\n  # object is returned\n  [\n    {group_class: \"project\"},\n    {group_class: \"role\"},\n    {group_class: \"filter\", properties: {\"filters\":[]}},\n  ].each do |params|\n    test \"destroy group #{params} returns object\" do\n      authorize_with :active\n\n      group = Group.create!(params)\n\n      post :destroy, params: {\n            id: group.uuid,\n            format: :json,\n          }\n      assert_response :success\n      assert_not_nil json_response\n      assert_equal group.uuid, json_response[\"uuid\"]\n    end\n  end\n\n  test 'get shared owned by another user' do\n    authorize_with :user_bar_in_sharing_group\n\n    act_as_system_user do\n      Link.create!(\n        tail_uuid: users(:user_bar_in_sharing_group).uuid,\n        link_class: 'permission',\n        name: 'can_read',\n        head_uuid: groups(:project_owned_by_foo).uuid)\n    end\n\n    get :shared, params: {:filters => [[\"group_class\", \"=\", \"project\"]], :include => \"owner_uuid\"}\n\n    assert_equal 1, json_response['items'].length\n    assert_equal json_response['items'][0][\"uuid\"], groups(:project_owned_by_foo).uuid\n\n    assert_equal 1, json_response['included'].length\n    assert_equal json_response['included'][0][\"uuid\"], users(:user_foo_in_sharing_group).uuid\n  end\n\n  test 'get shared, owned by unreadable project' do\n    authorize_with :user_bar_in_sharing_group\n\n    act_as_system_user do\n      Group.find_by_uuid(groups(:project_owned_by_foo).uuid).update!(owner_uuid: groups(:aproject).uuid)\n      Link.create!(\n        tail_uuid: users(:user_bar_in_sharing_group).uuid,\n        link_class: 'permission',\n        name: 'can_read',\n        head_uuid: groups(:project_owned_by_foo).uuid)\n    end\n\n    get :shared, params: {:filters => [[\"group_class\", \"=\", \"project\"]], :include => \"owner_uuid\"}\n\n    assert_equal 1, json_response['items'].length\n    assert_equal json_response['items'][0][\"uuid\"], groups(:project_owned_by_foo).uuid\n\n    assert_equal 0, json_response['included'].length\n  end\n\n  test 'get shared, add permission link' do\n    authorize_with :user_bar_in_sharing_group\n\n    act_as_system_user do\n      Link.create!(tail_uuid: groups(:group_for_sharing_tests).uuid,\n                   head_uuid: groups(:project_owned_by_foo).uuid,\n                   link_class: 'permission',\n                   name: 'can_manage')\n    end\n\n    get :shared, params: {:filters => [[\"group_class\", \"=\", \"project\"]], :include => \"owner_uuid\"}\n\n    assert_equal 1, json_response['items'].length\n    assert_equal groups(:project_owned_by_foo).uuid, json_response['items'][0][\"uuid\"]\n\n    assert_equal 1, json_response['included'].length\n    assert_equal users(:user_foo_in_sharing_group).uuid, json_response['included'][0][\"uuid\"]\n  end\n\n  ### contents with exclude_home_project\n\n  test 'contents, exclude home owned by another user' do\n    authorize_with :user_bar_in_sharing_group\n\n    act_as_system_user do\n      Link.create!(\n        tail_uuid: users(:user_bar_in_sharing_group).uuid,\n        link_class: 'permission',\n        name: 'can_read',\n        head_uuid: groups(:project_owned_by_foo).uuid)\n      Link.create!(\n        tail_uuid: users(:user_bar_in_sharing_group).uuid,\n        link_class: 'permission',\n        name: 'can_read',\n        head_uuid: collections(:collection_owned_by_foo).uuid)\n    end\n\n    get :contents, params: {:include => \"owner_uuid\", :exclude_home_project => true}\n\n    assert_equal 2, json_response['items'].length\n    assert_equal json_response['items'][0][\"uuid\"], groups(:project_owned_by_foo).uuid\n    assert_equal json_response['items'][1][\"uuid\"], collections(:collection_owned_by_foo).uuid\n\n    assert_equal 1, json_response['included'].length\n    assert_equal json_response['included'][0][\"uuid\"], users(:user_foo_in_sharing_group).uuid\n  end\n\n  test 'contents, exclude home, owned by unreadable project' do\n    authorize_with :user_bar_in_sharing_group\n\n    act_as_system_user do\n      Group.find_by_uuid(groups(:project_owned_by_foo).uuid).update!(owner_uuid: groups(:aproject).uuid)\n      Link.create!(\n        tail_uuid: users(:user_bar_in_sharing_group).uuid,\n        link_class: 'permission',\n        name: 'can_read',\n        head_uuid: groups(:project_owned_by_foo).uuid)\n    end\n\n    get :contents, params: {:include => \"owner_uuid\", :exclude_home_project => true}\n\n    assert_equal 1, json_response['items'].length\n    assert_equal json_response['items'][0][\"uuid\"], groups(:project_owned_by_foo).uuid\n\n    assert_equal 0, json_response['included'].length\n  end\n\n  test 'contents, exclude home, add permission link' do\n    authorize_with :user_bar_in_sharing_group\n\n    act_as_system_user do\n      Link.create!(tail_uuid: groups(:group_for_sharing_tests).uuid,\n                   head_uuid: groups(:project_owned_by_foo).uuid,\n                   link_class: 'permission',\n                   name: 'can_manage')\n    end\n\n    get :contents, params: {:include => \"owner_uuid\", :exclude_home_project => true}\n    assert_response 200\n\n    assert_equal 1, json_response['items'].length\n    assert_equal groups(:project_owned_by_foo).uuid, json_response['items'][0][\"uuid\"]\n\n    assert_equal 1, json_response['included'].length\n    assert_equal users(:user_foo_in_sharing_group).uuid, json_response['included'][0][\"uuid\"]\n  end\n\n  test 'contents, exclude home, with parent specified' do\n    authorize_with :active\n\n    get :contents, params: {id: groups(:aproject).uuid, :include => \"owner_uuid\", :exclude_home_project => true}\n\n    assert_response 422\n  end\n\n  [[false, 'owner_uuid'],\n   [false, []],\n   [false, ''],\n   [true, 'container_uuid'],\n   [true, ['container_uuid']],\n   [true, ['owner_uuid', 'container_uuid'], ['uuid', 'container_uuid', 'state', 'output']],\n  ].each do |check_container_included, include_param, select_param|\n    test \"contents, include=#{include_param.inspect}\" do\n      authorize_with :active\n      get :contents, params: {\n            :id => users(:active).uuid,\n            :include => include_param,\n            :limit => 1000,\n            :select => select_param,\n          }\n      assert_response 200\n      if include_param.empty?\n        assert_equal false, json_response.include?('included')\n        return\n      end\n      incl = {}\n      json_response['included'].andand.each do |ctr|\n        incl[ctr['uuid']] = ctr\n      end\n      next if !check_container_included\n      checked_crs = 0\n      json_response['items'].each do |item|\n        next if !item['container_uuid']\n        assert_equal item['container_uuid'], incl[item['container_uuid']]['uuid']\n        assert_not_empty incl[item['container_uuid']]['state']\n        checked_crs += 1\n      end\n      assert_operator 0, :<, checked_crs\n    end\n  end\n\n  test \"include_trash does not return trash inside frozen project\" do\n    authorize_with :active\n    trashtime = Time.now - 1.second\n    outerproj = Group.create!(group_class: 'project')\n    innerproj = Group.create!(group_class: 'project', owner_uuid: outerproj.uuid)\n    innercoll = Collection.create!(name: 'inner-not-trashed', owner_uuid: innerproj.uuid)\n    innertrash = Collection.create!(name: 'inner-trashed', owner_uuid: innerproj.uuid, trash_at: trashtime)\n    innertrashproj = Group.create!(group_class: 'project', name: 'inner-trashed-proj', owner_uuid: innerproj.uuid, trash_at: trashtime)\n    outertrash = Collection.create!(name: 'outer-trashed', owner_uuid: outerproj.uuid, trash_at: trashtime)\n    innerproj.update!(frozen_by_uuid: users(:active).uuid)\n    get :contents, params: {id: outerproj.uuid, include_trash: true, recursive: true}\n    assert_response :success\n    uuids = json_response['items'].collect { |item| item['uuid'] }\n    assert_includes uuids, outertrash.uuid\n    assert_includes uuids, innerproj.uuid\n    assert_includes uuids, innercoll.uuid\n    refute_includes uuids, innertrash.uuid\n    refute_includes uuids, innertrashproj.uuid\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/keep_services_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::KeepServicesControllerTest < ActionController::TestCase\n\n  test \"search by service_port with < query\" do\n    authorize_with :active\n    get :index, params: {\n      filters: [['service_port', '<', 25107]]\n    }\n    assert_response :success\n    assert_equal false, assigns(:objects).any?\n  end\n\n  test \"search by service_port with >= query\" do\n    authorize_with :active\n    get :index, params: {\n      filters: [['service_port', '>=', 25107]]\n    }\n    assert_response :success\n    assert_equal true, assigns(:objects).any?\n  end\n\n  [:admin, :active, :inactive, :anonymous, nil].each do |u|\n    test \"accessible to #{u.inspect} user\" do\n      authorize_with(u) if u\n      get :accessible\n      assert_response :success\n      assert_not_empty json_response['items']\n      json_response['items'].each do |ks|\n        assert_not_equal ks['service_type'], 'proxy'\n      end\n    end\n  end\n\n  test \"report configured servers if db is empty\" do\n    KeepService.unscoped.all.delete_all\n    expect_rvz = {}\n    n = 0\n    Rails.configuration.Services.Keepstore.InternalURLs.each do |k,v|\n      n += 1\n      rvz = \"%015x\" % n\n      expect_rvz[k.to_s] = rvz\n      Rails.configuration.Services.Keepstore.InternalURLs[k].Rendezvous = rvz\n    end\n    expect_rvz[Rails.configuration.Services.Keepproxy.ExternalURL] = true\n    refute_empty expect_rvz\n    authorize_with :active\n    get :index,\n      params: {:format => :json}\n    assert_response :success\n    json_response['items'].each do |svc|\n      url = \"#{svc['service_ssl_flag'] ? 'https' : 'http'}://#{svc['service_host']}:#{svc['service_port']}/\"\n      assert_equal true, expect_rvz.has_key?(url), \"#{url} does not match any configured service: expecting #{expect_rvz}\"\n      rvz = expect_rvz[url]\n      if rvz.is_a? String\n        assert_equal \"zzzzz-bi6l4-#{rvz}\", svc['uuid'], \"exported service UUID should match InternalURLs.*.Rendezvous value\"\n      end\n      expect_rvz.delete(url)\n    end\n    assert_equal({}, expect_rvz, \"all configured Keepstore and Keepproxy services should be returned\")\n  end\n\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/links_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::LinksControllerTest < ActionController::TestCase\n\n  ['link', 'link_json'].each do |formatted_link|\n    test \"no symbol keys in serialized hash #{formatted_link}\" do\n      link = {\n        properties: {username: 'testusername'},\n        link_class: 'test',\n        name: 'encoding',\n        tail_uuid: users(:admin).uuid,\n        head_uuid: virtual_machines(:testvm).uuid\n      }\n      authorize_with :admin\n      if formatted_link == 'link_json'\n        post :create, params: {link: link.to_json}\n      else\n        post :create, params: {link: link}\n      end\n      assert_response :success\n      assert_not_nil assigns(:object)\n      assert_equal 'testusername', assigns(:object).properties['username']\n      assert_equal false, assigns(:object).properties.has_key?(:username)\n    end\n  end\n\n  %w(created_at modified_at).each do |attr|\n    {nil: nil, bogus: 2.days.ago}.each do |bogustype, bogusvalue|\n      test \"cannot set #{bogustype} #{attr} in create\" do\n        authorize_with :active\n        post :create, params: {\n          link: {\n            properties: {},\n            link_class: 'test',\n            name: 'test',\n          }.merge(attr => bogusvalue)\n        }\n        assert_response :success\n        resp = JSON.parse @response.body\n        assert_in_delta Time.now, Time.parse(resp[attr]), 3.0\n      end\n      test \"cannot set #{bogustype} #{attr} in update\" do\n        really_created_at = links(:test_timestamps).created_at\n        authorize_with :active\n        put :update, params: {\n          id: links(:test_timestamps).uuid,\n          link: {\n            :properties => {test: 'test'},\n            attr => bogusvalue\n          }\n        }\n        assert_response :success\n        resp = JSON.parse @response.body\n        case attr\n        when 'created_at'\n          assert_in_delta really_created_at, Time.parse(resp[attr]), 0.001\n        else\n          assert_in_delta Time.now, Time.parse(resp[attr]), 3.0\n        end\n      end\n    end\n  end\n\n  test \"head must exist\" do\n    link = {\n      link_class: 'test',\n      name: 'stuff',\n      tail_uuid: users(:active).uuid,\n      head_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'\n    }\n    authorize_with :admin\n    post :create, params: {link: link}\n    assert_response 422\n  end\n\n  test \"tail must exist\" do\n    link = {\n      link_class: 'test',\n      name: 'stuff',\n      head_uuid: users(:active).uuid,\n      tail_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'\n    }\n    authorize_with :admin\n    post :create, params: {link: link}\n    assert_response 422\n  end\n\n  test \"head and tail exist, head_kind and tail_kind are returned\" do\n    link = {\n      link_class: 'test',\n      name: 'stuff',\n      head_uuid: users(:active).uuid,\n      tail_uuid: users(:spectator).uuid,\n    }\n    authorize_with :admin\n    post :create, params: {link: link}\n    assert_response :success\n    l = JSON.parse(@response.body)\n    assert 'arvados#user', l['head_kind']\n    assert 'arvados#user', l['tail_kind']\n  end\n\n  test \"can supply head_kind and tail_kind without error\" do\n    link = {\n      link_class: 'test',\n      name: 'stuff',\n      head_uuid: users(:active).uuid,\n      tail_uuid: users(:spectator).uuid,\n      head_kind: \"arvados#user\",\n      tail_kind: \"arvados#user\",\n    }\n    authorize_with :admin\n    post :create, params: {link: link}\n    assert_response :success\n    l = JSON.parse(@response.body)\n    assert 'arvados#user', l['head_kind']\n    assert 'arvados#user', l['tail_kind']\n  end\n\n  test \"tail must be visible by user\" do\n    link = {\n      link_class: 'test',\n      name: 'stuff',\n      head_uuid: users(:active).uuid,\n      tail_uuid: authorized_keys(:admin).uuid,\n    }\n    authorize_with :active\n    post :create, params: {link: link}\n    assert_response 422\n  end\n\n  test \"filter links with 'is_a' operator\" do\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['tail_uuid', 'is_a', 'arvados#user'] ]\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_not_equal 0, found.count\n    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count\n  end\n\n  test \"filter links with 'is_a' operator includes remote objects\" do\n    authorize_with :admin\n    get :index, params: {\n      filters: [\n        ['tail_uuid', 'is_a', 'arvados#user'],\n        ['link_class', '=', 'permission'],\n        ['name', '=', 'can_read'],\n        ['head_uuid', '=', collections(:foo_file).uuid],\n      ]\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_not_equal 0, found.count\n    assert_includes(found.map(&:tail_uuid),\n                    users(:federated_active).uuid)\n  end\n\n  test \"filter links with 'is_a' operator with more than one\" do\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['tail_uuid', 'is_a', ['arvados#user', 'arvados#group'] ] ],\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_not_equal 0, found.count\n    assert_equal found.count, (found.select { |f|\n                                 f.tail_uuid.match User.uuid_regex or\n                                 f.tail_uuid.match Group.uuid_regex\n                               }).count\n  end\n\n  test \"filter links with 'is_a' operator with bogus type\" do\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['tail_uuid', 'is_a', ['arvados#bogus'] ] ],\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_equal 0, found.count\n  end\n\n  test \"filter links with 'is_a' operator with collection\" do\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['head_uuid', 'is_a', ['arvados#collection'] ] ],\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_not_equal 0, found.count\n    assert_equal found.count, (found.select { |f| f.head_uuid.match Collection.uuid_regex}).count\n  end\n\n  test \"test can still use where tail_kind\" do\n    authorize_with :admin\n    get :index, params: {\n      where: { tail_kind: 'arvados#user' }\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_not_equal 0, found.count\n    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count\n  end\n\n  test \"test can still use where head_kind\" do\n    authorize_with :admin\n    get :index, params: {\n      where: { head_kind: 'arvados#user' }\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_not_equal 0, found.count\n    assert_equal found.count, (found.select { |f| f.head_uuid.match User.uuid_regex }).count\n  end\n\n  test \"test can still use filter tail_kind\" do\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['tail_kind', '=', 'arvados#user'] ]\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_not_equal 0, found.count\n    assert_equal found.count, (found.select { |f| f.tail_uuid.match User.uuid_regex }).count\n  end\n\n  test \"test can still use filter head_kind\" do\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['head_kind', '=', 'arvados#user'] ]\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_not_equal 0, found.count\n    assert_equal found.count, (found.select { |f| f.head_uuid.match User.uuid_regex }).count\n  end\n\n  test \"head_kind matches head_uuid\" do\n    link = {\n      link_class: 'test',\n      name: 'stuff',\n      head_uuid: groups(:public).uuid,\n      head_kind: \"arvados#user\",\n      tail_uuid: users(:spectator).uuid,\n      tail_kind: \"arvados#user\",\n    }\n    authorize_with :admin\n    post :create, params: {link: link}\n    assert_response 422\n  end\n\n  test \"tail_kind matches tail_uuid\" do\n    link = {\n      link_class: 'test',\n      name: 'stuff',\n      head_uuid: users(:active).uuid,\n      head_kind: \"arvados#user\",\n      tail_uuid: groups(:public).uuid,\n      tail_kind: \"arvados#user\",\n    }\n    authorize_with :admin\n    post :create, params: {link: link}\n    assert_response 422\n  end\n\n  test \"test with virtual_machine\" do\n    link = {\n      tail_kind: \"arvados#user\",\n      tail_uuid: users(:active).uuid,\n      head_kind: \"arvados#virtual_machine\",\n      head_uuid: virtual_machines(:testvm).uuid,\n      link_class: \"permission\",\n      name: \"can_login\",\n      properties: {username: \"repo_and_user_name\"}\n    }\n    authorize_with :admin\n    post :create, params: {link: link}\n    assert_response 422\n  end\n\n  test \"test with virtualMachine\" do\n    link = {\n      tail_kind: \"arvados#user\",\n      tail_uuid: users(:active).uuid,\n      head_kind: \"arvados#virtualMachine\",\n      head_uuid: virtual_machines(:testvm).uuid,\n      link_class: \"permission\",\n      name: \"can_login\",\n      properties: {username: \"repo_and_user_name\"}\n    }\n    authorize_with :admin\n    post :create, params: {link: link}\n    assert_response :success\n  end\n\n  test \"project owner can show a project permission\" do\n    uuid = links(:project_viewer_can_read_project).uuid\n    authorize_with :active\n    get :show, params: {id: uuid}\n    assert_response :success\n    assert_equal(uuid, assigns(:object).andand.uuid)\n  end\n\n  test \"admin can show a project permission\" do\n    uuid = links(:project_viewer_can_read_project).uuid\n    authorize_with :admin\n    get :show, params: {id: uuid}\n    assert_response :success\n    assert_equal(uuid, assigns(:object).andand.uuid)\n  end\n\n  test \"project viewer can't show others' project permissions\" do\n    authorize_with :project_viewer\n    get :show, params: {id: links(:admin_can_write_aproject).uuid}\n    assert_response 404\n  end\n\n  test \"requesting a nonexistent link returns 404\" do\n    authorize_with :active\n    get :show, params: {id: 'zzzzz-zzzzz-zzzzzzzzzzzzzzz'}\n    assert_response 404\n  end\n\n  # not implemented\n  skip \"retrieve all permissions using generic links index api\" do\n    # Links.readable_by() does not return the full set of permission\n    # links that are visible to a user (i.e., all permission links\n    # whose head_uuid references an object for which the user has\n    # ownership or can_manage permission). Therefore, neither does\n    # /arvados/v1/links.\n    #\n    # It is possible to retrieve the full set of permissions for a\n    # single object via /arvados/v1/permissions.\n    authorize_with :active\n    get :index, params: {\n      filters: [['link_class', '=', 'permission'],\n                ['head_uuid', '=', groups(:aproject).uuid]]\n    }\n    assert_response :success\n    assert_not_nil assigns(:objects)\n    assert_includes(assigns(:objects).map(&:uuid),\n                    links(:project_viewer_can_read_project).uuid)\n  end\n\n  test \"admin can index project permissions\" do\n    authorize_with :admin\n    get :index, params: {\n      filters: [['link_class', '=', 'permission'],\n                ['head_uuid', '=', groups(:aproject).uuid]]\n    }\n    assert_response :success\n    assert_not_nil assigns(:objects)\n    assert_includes(assigns(:objects).map(&:uuid),\n                    links(:project_viewer_can_read_project).uuid)\n  end\n\n  test \"project viewer can't index others' project permissions\" do\n    authorize_with :project_viewer\n    get :index, params: {\n      filters: [['link_class', '=', 'permission'],\n                ['head_uuid', '=', groups(:aproject).uuid],\n                ['tail_uuid', '!=', users(:project_viewer).uuid]]\n    }\n    assert_response :success\n    assert_not_nil assigns(:objects)\n    assert_empty assigns(:objects)\n  end\n\n  # Granting permissions.\n  test \"grant can_read on project to other users in group\" do\n    authorize_with :user_foo_in_sharing_group\n\n    refute users(:user_bar_in_sharing_group).can?(read: collections(:collection_owned_by_foo).uuid)\n\n    post :create, params: {\n      link: {\n        tail_uuid: users(:user_bar_in_sharing_group).uuid,\n        link_class: 'permission',\n        name: 'can_read',\n        head_uuid: collections(:collection_owned_by_foo).uuid,\n      }\n    }\n    assert_response :success\n    assert users(:user_bar_in_sharing_group).can?(read: collections(:collection_owned_by_foo).uuid)\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/logs_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::LogsControllerTest < ActionController::TestCase\n  fixtures :logs\n\n  test \"non-admins can create their own logs\" do\n    authorize_with :active\n    post :create, params: {log: {summary: 'test log'}}\n    assert_response :success\n    resp = assigns(:object)\n    assert_not_nil resp.uuid\n    assert_equal('test log', resp.summary, \"loaded wrong log after creation\")\n  end\n\n  test \"non-admins can read their own logs\" do\n    authorize_with :active\n    my_log = logs(:log_owned_by_active)\n    get :show, params: {id: my_log[:uuid]}\n    assert_response(:success, \"failed to get log\")\n    resp = assigns(:object)\n    assert_equal(my_log[:summary], resp.summary, \"got wrong log\")\n  end\n\n  test \"test can still use where object_kind\" do\n    authorize_with :admin\n    get :index, params: {\n      where: { object_kind: 'arvados#user' }\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_not_equal 0, found.count\n    assert_equal found.count, (found.select { |f| f.object_uuid.match User.uuid_regex }).count\n    l = JSON.parse(@response.body)\n    assert_equal 'arvados#user', l['items'][0]['object_kind']\n  end\n\n  test \"test can still use filter object_kind\" do\n    authorize_with :admin\n    get :index, params: {\n      filters: [ ['object_kind', '=', 'arvados#user'] ]\n    }\n    assert_response :success\n    found = assigns(:objects)\n    assert_not_equal 0, found.count\n    assert_equal found.count, (found.select { |f| f.object_uuid.match User.uuid_regex }).count\n  end\n\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/management_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::ManagementControllerTest < ActionController::TestCase\n  [\n    [false, nil, 404, 'disabled'],\n    [true, nil, 401, 'authorization required'],\n    [true, 'badformatwithnoBearer', 403, 'authorization error'],\n    [true, 'Bearer wrongtoken', 403, 'authorization error'],\n    [true, 'Bearer configuredmanagementtoken', 200, '{\"health\":\"OK\"}'],\n  ].each do |enabled, header, error_code, error_msg|\n    test \"_health/ping when #{if enabled then 'enabled' else 'disabled' end} with header '#{header}'\" do\n      if enabled\n        Rails.configuration.ManagementToken = 'configuredmanagementtoken'\n      else\n        Rails.configuration.ManagementToken = \"\"\n      end\n\n      @request.headers['Authorization'] = header\n      get :health, params: {check: 'ping'}\n      assert_response error_code\n\n      resp = JSON.parse(@response.body)\n      if error_code == 200\n        assert_equal(JSON.load('{\"health\":\"OK\"}'), resp)\n      else\n        assert_equal(error_msg, resp['errors'])\n      end\n    end\n  end\n\n  test \"metrics\" do\n    mtime = File.mtime(ENV[\"ARVADOS_CONFIG\"])\n    hash = Digest::SHA256.hexdigest(File.read(ENV[\"ARVADOS_CONFIG\"]))\n    Rails.configuration.ManagementToken = \"configuredmanagementtoken\"\n    @request.headers['Authorization'] = \"Bearer configuredmanagementtoken\"\n    get :metrics\n    assert_response :success\n    assert_equal 'text/plain', @response.media_type\n    assert_equal 'utf-8', @response.charset\n\n    assert_match /\\narvados_config_source_timestamp_seconds{sha256=\"#{hash}\"} #{Regexp.escape mtime.utc.to_f.to_s}\\n/, @response.body\n\n    # Expect mtime < loadtime < now\n    m = @response.body.match(/\\narvados_config_load_timestamp_seconds{sha256=\"#{hash}\"} (.*?)\\n/)\n    assert_operator m[1].to_f, :>, mtime.utc.to_f\n    assert_operator m[1].to_f, :<, Time.now.utc.to_f\n\n    assert_match /\\narvados_version_running{version=\"#{Regexp.escape AppVersion.package_version}\"} 1\\n/, @response.body\n  end\n\n  test \"metrics disabled\" do\n    Rails.configuration.ManagementToken = \"\"\n    @request.headers['Authorization'] = \"Bearer configuredmanagementtoken\"\n    get :metrics\n    assert_response 404\n  end\n\n  test \"metrics bad token\" do\n    Rails.configuration.ManagementToken = \"configuredmanagementtoken\"\n    @request.headers['Authorization'] = \"Bearer asdf\"\n    get :metrics\n    assert_response 403\n  end\n\n  test \"metrics unauthorized\" do\n    Rails.configuration.ManagementToken = \"configuredmanagementtoken\"\n    get :metrics\n    assert_response 401\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/query_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::QueryTest < ActionController::TestCase\n  test 'no fallback orders when order is unambiguous' do\n    @controller = Arvados::V1::LogsController.new\n    authorize_with :active\n    get :index, params: {\n      order: ['id asc'],\n      controller: 'logs',\n    }\n    assert_response :success\n    assert_equal ['logs.id asc'], assigns(:objects).order_values\n  end\n\n  test 'fallback orders when order is ambiguous' do\n    @controller = Arvados::V1::LogsController.new\n    authorize_with :active\n    get :index, params: {\n      order: ['event_type asc'],\n      controller: 'logs',\n    }\n    assert_response :success\n    assert_equal('logs.event_type asc, logs.modified_at desc, logs.uuid desc',\n                 assigns(:objects).order_values.join(', '))\n  end\n\n  test 'skip fallback orders already given by client' do\n    @controller = Arvados::V1::LogsController.new\n    authorize_with :active\n    get :index, params: {\n      order: ['modified_at asc'],\n      controller: 'logs',\n    }\n    assert_response :success\n    assert_equal('logs.modified_at asc, logs.uuid desc',\n                 assigns(:objects).order_values.join(', '))\n  end\n\n  test 'eliminate superfluous orders' do\n    @controller = Arvados::V1::LogsController.new\n    authorize_with :active\n    get :index, params: {\n      order: ['logs.modified_at asc',\n              'modified_at desc',\n              'event_type desc',\n              'logs.event_type asc'],\n      controller: 'logs',\n    }\n    assert_response :success\n    assert_equal('logs.modified_at asc, logs.event_type desc, logs.uuid desc',\n                 assigns(:objects).order_values.join(', '))\n  end\n\n  test 'eliminate orders after the first unique column' do\n    @controller = Arvados::V1::LogsController.new\n    authorize_with :active\n    get :index, params: {\n      order: ['event_type asc',\n              'id asc',\n              'uuid asc',\n              'modified_at desc'],\n      controller: 'logs',\n    }\n    assert_response :success\n    assert_equal('logs.event_type asc, logs.id asc',\n                 assigns(:objects).order_values.join(', '))\n  end\n\n  test 'do not count items_available if count=none' do\n    @controller = Arvados::V1::LinksController.new\n    authorize_with :active\n    get :index, params: {\n      count: 'none',\n    }\n    assert_response(:success)\n    refute(json_response.has_key?('items_available'))\n  end\n\n  test 'do not count items_available if count=none for group contents endpoint' do\n    @controller = Arvados::V1::GroupsController.new\n    authorize_with :active\n    get :contents, params: {\n      count: 'none',\n    }\n    assert_response(:success)\n    refute(json_response.has_key?('items_available'))\n  end\n\n  [{}, {count: nil}, {count: ''}, {count: 'exact'}].each do |params|\n    test \"count items_available if params=#{params.inspect}\" do\n      @controller = Arvados::V1::LinksController.new\n      authorize_with :active\n      get :index, params: params\n      assert_response(:success)\n      assert_operator(json_response['items_available'], :>, 0)\n    end\n  end\n\n  test 'error if count=bogus' do\n    @controller = Arvados::V1::LinksController.new\n    authorize_with :active\n    get :index, params: {\n      count: 'bogus',\n    }\n    assert_response(422)\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/schema_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::SchemaControllerTest < ActionController::TestCase\n\n  setup do forget end\n  teardown do forget end\n  def forget\n    AppVersion.forget\n  end\n\n  test \"should get fresh discovery document\" do\n    MAX_SCHEMA_AGE = 60\n    get :index\n    assert_response :success\n    discovery_doc = JSON.parse(@response.body)\n    assert_equal 'discovery#restDescription', discovery_doc['kind']\n    assert_equal(true,\n                 Time.now - MAX_SCHEMA_AGE.seconds < discovery_doc['generatedAt'],\n                 \"discovery document was generated >#{MAX_SCHEMA_AGE}s ago\")\n  end\n\n  test \"discovery document fields\" do\n    get :index\n    assert_response :success\n    discovery_doc = JSON.parse(@response.body)\n    assert_includes discovery_doc, 'defaultTrashLifetime'\n    assert_equal discovery_doc['defaultTrashLifetime'], Rails.configuration.Collections.DefaultTrashLifetime\n    assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['source_version'])\n    assert_match(/^[0-9a-f]+(-modified)?$/, discovery_doc['sourceVersion'])\n    assert_equal discovery_doc['websocketUrl'], Rails.configuration.Services.Websocket.ExternalURL.to_s\n    assert_equal discovery_doc['workbenchUrl'], Rails.configuration.Services.Workbench1.ExternalURL.to_s\n    assert_equal('zzzzz', discovery_doc['uuidPrefix'])\n  end\n\n  test \"discovery document overrides source_version & sourceVersion with config\" do\n    Rails.configuration.source_version = 'aaa888fff'\n    get :index\n    assert_response :success\n    discovery_doc = JSON.parse(@response.body)\n    # Key source_version will be replaced with sourceVersion\n    assert_equal 'aaa888fff', discovery_doc['source_version']\n    assert_equal 'aaa888fff', discovery_doc['sourceVersion']\n  end\n\n  [\"unknown\", \"1.0.1-stable\"].each do |pkg_version|\n    test \"packageVersion #{pkg_version} comes from AppVersion\" do\n      AppVersion.stubs(:package_version).returns(pkg_version)\n      get :index\n      assert_response :success\n      assert_equal(pkg_version, json_response[\"packageVersion\"])\n    end\n  end\n\n  test \"discovery document overrides packageVersion with config\" do\n    Rails.configuration.package_version = '1.0.0-stable'\n    get :index\n    assert_response :success\n    discovery_doc = JSON.parse(@response.body)\n    assert_equal '1.0.0-stable', discovery_doc['packageVersion']\n  end\n\n  test \"empty disable_api_methods\" do\n    get :index\n    assert_response :success\n    discovery_doc = JSON.parse(@response.body)\n    assert_equal('POST',\n                 discovery_doc['resources']['collections']['methods']['create']['httpMethod'])\n  end\n\n  test \"non-empty disable_api_methods\" do\n    Rails.configuration.API.DisabledAPIs = ConfigLoader.to_OrderedOptions(\n      {'collections.create'=>{}, 'workflows.create'=>{}})\n    get :index\n    assert_response :success\n    discovery_doc = JSON.parse(@response.body)\n    ['collections', 'workflows'].each do |r|\n      refute_includes(discovery_doc['resources'][r]['methods'].keys(), 'create')\n    end\n  end\n\n  test \"groups contents parameters\" do\n    get :index\n    assert_response :success\n\n    groups_methods = JSON.parse(@response.body)['resources']['groups']['methods']\n    group_index_params = groups_methods['list']['parameters'].each_pair.to_a\n    group_contents_params = groups_methods['contents']['parameters'].each_pair.to_a\n\n    assert_equal(\n      group_contents_params & group_index_params, group_index_params,\n      \"group contents methods does not take all the same parameters index does\",\n    )\n\n    recursive_param = groups_methods['contents']['parameters']['recursive']\n    assert_equal 'boolean', recursive_param['type']\n    assert_equal false, recursive_param['required']\n    assert_equal 'query', recursive_param['location']\n  end\n\n  test \"collections index parameters\" do\n    get :index\n    assert_response :success\n\n    discovery_doc = JSON.parse(@response.body)\n\n    workflows_index_params = discovery_doc['resources']['workflows']['methods']['list']['parameters']  # no changes from super\n    coll_index_params = discovery_doc['resources']['collections']['methods']['list']['parameters']\n\n    assert_equal (workflows_index_params.keys + ['include_trash', 'include_old_versions']).sort, coll_index_params.keys.sort\n\n    include_trash_param = coll_index_params['include_trash']\n    assert_equal 'boolean', include_trash_param['type']\n    assert_equal false, include_trash_param['required']\n    assert_equal 'query', include_trash_param['location']\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/user_agreements_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::UserAgreementsControllerTest < ActionController::TestCase\n\n  test \"active user get user agreements\" do\n    authorize_with :active\n    get :index\n    assert_response :success\n    assert_not_nil assigns(:objects)\n    agreements_list = JSON.parse(@response.body)\n    assert_not_nil agreements_list['items']\n    assert_not_nil agreements_list['items'][0]\n  end\n\n  test \"active user get user agreement signatures\" do\n    authorize_with :active\n    get :signatures\n    assert_response :success\n    assert_not_nil assigns(:objects)\n    agreements_list = JSON.parse(@response.body)\n    assert_not_nil agreements_list['items']\n    assert_not_nil agreements_list['items'][0]\n    assert_equal 1, agreements_list['items'].count\n  end\n\n  test \"inactive user get user agreements\" do\n    authorize_with :inactive\n    get :index\n    assert_response :success\n    assert_not_nil assigns(:objects)\n    agreements_list = JSON.parse(@response.body)\n    assert_not_nil agreements_list['items']\n    assert_not_nil agreements_list['items'][0]\n  end\n\n  test \"uninvited user receives empty list of user agreements\" do\n    authorize_with :inactive_uninvited\n    get :index\n    assert_response :success\n    assert_not_nil assigns(:objects)\n    agreements_list = JSON.parse(@response.body)\n    assert_not_nil agreements_list['items']\n    assert_nil agreements_list['items'][0]\n  end\n\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/users_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'helpers/users_test_helper'\n\nclass Arvados::V1::UsersControllerTest < ActionController::TestCase\n  include CurrentApiClient\n  include UsersTestHelper\n\n  setup do\n    @initial_link_count = Link.count\n    @vm_uuid = virtual_machines(:testvm).uuid\n    ActionMailer::Base.deliveries = []\n    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false\n  end\n\n  test \"activate a user after signing UA\" do\n    authorize_with :inactive_but_signed_user_agreement\n    post :activate, params: {id: users(:inactive_but_signed_user_agreement).uuid}\n    assert_response :success\n    assert_not_nil assigns(:object)\n    me = JSON.parse(@response.body)\n    assert_equal true, me['is_active']\n  end\n\n  test \"refuse to activate a user before signing UA\" do\n    act_as_system_user do\n    required_uuids = Link.where(\"owner_uuid = ? and link_class = ? and name = ? and tail_uuid = ? and head_uuid like ?\",\n                                system_user_uuid,\n                                'signature',\n                                'require',\n                                system_user_uuid,\n                                Collection.uuid_like_pattern).\n      collect(&:head_uuid)\n\n      assert required_uuids.length > 0\n\n      signed_uuids = Link.where(owner_uuid: system_user_uuid,\n                                link_class: 'signature',\n                                name: 'click',\n                                tail_uuid: users(:inactive).uuid,\n                                head_uuid: required_uuids).\n                          collect(&:head_uuid)\n\n      assert_equal 0, signed_uuids.length\n    end\n\n    authorize_with :inactive\n    assert_equal false, users(:inactive).is_active\n\n    post :activate, params: {id: users(:inactive).uuid}\n    assert_response 403\n\n    resp = json_response\n    assert resp['errors'].first.include? 'Cannot activate without user agreements'\n    assert_nil resp['is_active']\n  end\n\n  test \"activate an already-active user\" do\n    authorize_with :active\n    post :activate, params: {id: users(:active).uuid}\n    assert_response :success\n    me = JSON.parse(@response.body)\n    assert_equal true, me['is_active']\n  end\n\n  test \"respond 401 if given token exists but user record is missing\" do\n    authorize_with :valid_token_deleted_user\n    get :current, format: :json\n    assert_response 401\n  end\n\n  test \"create new user with user as input\" do\n    authorize_with :admin\n    post :create, params: {\n      user: {\n        first_name: \"test_first_name\",\n        last_name: \"test_last_name\",\n        email: \"foo@example.com\"\n      }\n    }\n    assert_response :success\n    created = JSON.parse(@response.body)\n    assert_equal 'test_first_name', created['first_name']\n    assert_not_nil created['uuid'], 'expected uuid for the newly created user'\n    assert_not_nil created['email'], 'expected non-nil email'\n    assert_nil created['identity_url'], 'expected no identity_url'\n  end\n\n  test \"create new user with empty username\" do\n    authorize_with :admin\n    post :create, params: {\n      user: {\n        first_name: \"test_first_name\",\n        last_name: \"test_last_name\",\n        username: \"\"\n      }\n    }\n    assert_response :success\n    created = JSON.parse(@response.body)\n    assert_equal 'test_first_name', created['first_name']\n    assert_not_nil created['uuid'], 'expected uuid for the newly created user'\n    assert_nil created['email'], 'expected no email'\n    assert_nil created['username'], 'expected no username'\n  end\n\n  test \"update user with empty username\" do\n    authorize_with :admin\n    user = users('spectator')\n    assert_not_nil user['username']\n    put :update, params: {\n      id: users('spectator')['uuid'],\n      user: {\n        username: \"\"\n      }\n    }\n    assert_response :success\n    updated = JSON.parse(@response.body)\n    assert_nil updated['username'], 'expected no username'\n  end\n\n  test \"create user with user and vm as input\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      user: {\n        uuid: 'zzzzz-tpzed-abcdefghijklmno',\n        first_name: \"in_create_test_first_name\",\n        last_name: \"test_last_name\",\n        email: \"foo@example.com\"\n      }\n    }\n    assert_response :success\n    response_items = JSON.parse(@response.body)['items']\n\n    created = find_obj_in_resp response_items, 'User', nil\n\n    assert_equal 'in_create_test_first_name', created['first_name']\n    assert_not_nil created['uuid'], 'expected non-null uuid for the new user'\n    assert_equal 'zzzzz-tpzed-abcdefghijklmno', created['uuid']\n    assert_not_nil created['email'], 'expected non-nil email'\n    assert_nil created['identity_url'], 'expected no identity_url'\n\n    # added links: vm permission, 'all users' group\n    verify_links_added 2\n\n    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',\n        'All users', created['uuid'], 'arvados#group', true, 'Group'\n\n    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',\n        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n\n    verify_system_group_permission_link_for created['uuid']\n  end\n\n  test \"setup user with bogus uuid and expect error\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      uuid: 'bogus_uuid',\n      vm_uuid: @vm_uuid\n    }\n    response_body = JSON.parse(@response.body)\n    response_errors = response_body['errors']\n    assert_not_nil response_errors, 'Expected error in response'\n    assert (response_errors.first.include? 'Path not found'), 'Expected 404'\n  end\n\n  test \"setup user with bogus uuid in user and expect error\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      user: {uuid: 'bogus_uuid'},\n      vm_uuid: @vm_uuid,\n    }\n    response_body = JSON.parse(@response.body)\n    response_errors = response_body['errors']\n    assert_not_nil response_errors, 'Expected error in response'\n    assert (response_errors.first.include? 'ArgumentError: Require user email'),\n      'Expected RuntimeError'\n  end\n\n  test \"setup user with no uuid and user, expect error\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      vm_uuid: @vm_uuid,\n    }\n    response_body = JSON.parse(@response.body)\n    response_errors = response_body['errors']\n    assert_not_nil response_errors, 'Expected error in response'\n    assert (response_errors.first.include? 'Required uuid or user'),\n        'Expected ArgumentError'\n  end\n\n  test \"setup user with no uuid and email, expect error\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      user: {},\n      vm_uuid: @vm_uuid,\n    }\n    response_body = JSON.parse(@response.body)\n    response_errors = response_body['errors']\n    assert_not_nil response_errors, 'Expected error in response'\n    assert (response_errors.first.include? '<ArgumentError: Require user email'),\n        'Expected ArgumentError'\n  end\n\n  test \"invoke setup with existing uuid and vm permission, and verify links\" do\n    authorize_with :admin\n    inactive_user = users(:inactive)\n\n    post :setup, params: {\n      uuid: users(:inactive).uuid,\n      vm_uuid: @vm_uuid\n    }\n\n    assert_response :success\n\n    response_items = JSON.parse(@response.body)['items']\n    resp_obj = find_obj_in_resp response_items, 'User', nil\n\n    assert_not_nil resp_obj['uuid'], 'expected uuid for the new user'\n    assert_equal inactive_user['uuid'], resp_obj['uuid']\n    assert_equal inactive_user['email'], resp_obj['email'],\n        'expecting inactive user email'\n\n    # expect vm permission link\n    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',\n        @vm_uuid, resp_obj['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n  end\n\n  test \"invoke setup with existing uuid but different email, expect original email\" do\n    authorize_with :admin\n    inactive_user = users(:inactive)\n\n    post :setup, params: {\n      uuid: inactive_user['uuid'],\n      user: {email: 'junk_email'}\n    }\n\n    assert_response :success\n\n    response_items = JSON.parse(@response.body)['items']\n    resp_obj = find_obj_in_resp response_items, 'User', nil\n\n    assert_not_nil resp_obj['uuid'], 'expected uuid for the new user'\n    assert_equal inactive_user['uuid'], resp_obj['uuid']\n    assert_equal inactive_user['email'], resp_obj['email'],\n        'expecting inactive user email'\n  end\n\n  test \"setup user with valid email and repo(ignored) as input\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      repo_name: 'usertestrepo',\n      user: {email: 'foo@example.com'},\n    }\n\n    assert_response :success\n    response_items = JSON.parse(@response.body)['items']\n    response_object = find_obj_in_resp response_items, 'User', nil\n    assert_not_nil response_object['uuid'], 'expected uuid for the new user'\n    assert_equal response_object['email'], 'foo@example.com', 'expected given email'\n\n    # added links: system_group, 'all users' group.\n    verify_links_added 2\n  end\n\n  test \"setup user with fake vm and expect error\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      vm_uuid: 'no_such_vm',\n      user: {email: 'foo@example.com'},\n    }\n\n    response_body = JSON.parse(@response.body)\n    response_errors = response_body['errors']\n    assert_not_nil response_errors, 'Expected error in response'\n    assert (response_errors.first.include? \"No vm found for no_such_vm\"),\n          'Expected RuntimeError: No vm found for no_such_vm'\n  end\n\n  test \"setup user with valid email and real vm as input\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      vm_uuid: @vm_uuid,\n      user: {email: 'foo@example.com'}\n    }\n\n    assert_response :success\n    response_items = JSON.parse(@response.body)['items']\n    response_object = find_obj_in_resp response_items, 'User', nil\n    assert_not_nil response_object['uuid'], 'expected uuid for the new user'\n    assert_equal response_object['email'], 'foo@example.com', 'expected given email'\n\n    # added links; system_group, 'all users' group, vm.\n    verify_links_added 3\n  end\n\n  test \"setup user with valid email, no vm and no repo as input\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      user: {email: 'foo@example.com'},\n    }\n\n    assert_response :success\n    response_items = JSON.parse(@response.body)['items']\n    response_object = find_obj_in_resp response_items, 'User', nil\n    assert_not_nil response_object['uuid'], 'expected uuid for new user'\n    assert_equal response_object['email'], 'foo@example.com', 'expected given email'\n\n    # added links; system_group, 'all users' group.\n    verify_links_added 2\n\n    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',\n        'All users', response_object['uuid'], 'arvados#group', true, 'Group'\n\n    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',\n        nil, response_object['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n  end\n\n  test \"setup user with email, first name, and vm uuid\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      vm_uuid: @vm_uuid,\n      user: {\n        first_name: 'test_first_name',\n        email: 'foo@example.com'\n      }\n    }\n\n    assert_response :success\n    response_items = JSON.parse(@response.body)['items']\n    response_object = find_obj_in_resp response_items, 'User', nil\n    assert_not_nil response_object['uuid'], 'expected uuid for new user'\n    assert_equal response_object['email'], 'foo@example.com', 'expected given email'\n    assert_equal 'test_first_name', response_object['first_name'],\n        'expecting first name'\n\n    # added links: system_group, 'all users' group, vm.\n    verify_links_added 3\n  end\n\n  test \"setup user with an existing user email and check different object is created\" do\n    authorize_with :admin\n    inactive_user = users(:inactive)\n\n    post :setup, params: {\n      user: {\n        email: inactive_user['email']\n      }\n    }\n\n    assert_response :success\n    response_items = JSON.parse(@response.body)['items']\n    response_object = find_obj_in_resp response_items, 'User', nil\n    assert_not_nil response_object['uuid'], 'expected uuid for new user'\n    assert_not_equal response_object['uuid'], inactive_user['uuid'],\n        'expected different uuid after create operation'\n    assert_equal inactive_user['email'], response_object['email'], 'expected given email'\n    # added links: system_group, 'all users' group.\n    verify_links_added 2\n  end\n\n  test \"setup user with openid prefix\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      user: {\n        first_name: \"in_create_test_first_name\",\n        last_name: \"test_last_name\",\n        email: \"foo@example.com\"\n      }\n    }\n\n    assert_response :success\n\n    response_items = JSON.parse(@response.body)['items']\n    created = find_obj_in_resp response_items, 'User', nil\n\n    assert_equal 'in_create_test_first_name', created['first_name']\n    assert_not_nil created['uuid'], 'expected uuid for new user'\n    assert_not_nil created['email'], 'expected non-nil email'\n    assert_nil created['identity_url'], 'expected no identity_url'\n\n    # added links: system_group, 'all users' group.\n    verify_links_added 2\n\n    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',\n        'All users', created['uuid'], 'arvados#group', true, 'Group'\n\n    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',\n        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n  end\n\n  test \"setup user with user and vm, and verify links\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      user: {\n        first_name: \"in_create_test_first_name\",\n        last_name: \"test_last_name\",\n        email: \"foo@example.com\"\n      },\n      vm_uuid: @vm_uuid,\n    }\n\n    assert_response :success\n\n    response_items = JSON.parse(@response.body)['items']\n    created = find_obj_in_resp response_items, 'User', nil\n\n    assert_equal 'in_create_test_first_name', created['first_name']\n    assert_not_nil created['uuid'], 'expected uuid for new user'\n    assert_not_nil created['email'], 'expected non-nil email'\n    assert_nil created['identity_url'], 'expected no identity_url'\n\n    # added links: system_group, 'all users' group, vm\n    verify_links_added 3\n\n    # system_group isn't part of the response.  See User#add_system_group_permission_link\n\n    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',\n        'All users', created['uuid'], 'arvados#group', true, 'Group'\n\n    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',\n        @vm_uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n  end\n\n  test \"create user as non admin user and expect error\" do\n    authorize_with :active\n\n    post :create, params: {\n      user: {email: 'foo@example.com'}\n    }\n\n    response_body = JSON.parse(@response.body)\n    response_errors = response_body['errors']\n    assert_not_nil response_errors, 'Expected error in response'\n    assert (response_errors.first.include? 'PermissionDenied'),\n          'Expected PermissionDeniedError'\n  end\n\n  test \"setup user as non admin user and expect error\" do\n    authorize_with :active\n\n    post :setup, params: {\n      user: {email: 'foo@example.com'}\n    }\n\n    response_body = JSON.parse(@response.body)\n    response_errors = response_body['errors']\n    assert_not_nil response_errors, 'Expected error in response'\n    assert (response_errors.first.include? 'Forbidden'),\n          'Expected Forbidden error'\n  end\n\n  test \"setup active user with no vm\" do\n    authorize_with :admin\n    active_user = users(:active)\n\n    post :setup, params: {\n      uuid: active_user['uuid']\n    }\n\n    assert_response :success\n\n    response_items = JSON.parse(@response.body)['items']\n    created = find_obj_in_resp response_items, 'User', nil\n\n    assert_equal active_user[:email], created['email'], 'expected input email'\n\n    # verify links\n    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',\n        'All users', created['uuid'], 'arvados#group', true, 'Group'\n\n    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',\n        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n  end\n\n  test \"setup active user with vm and no repo\" do\n    authorize_with :admin\n    active_user = users(:active)\n\n    post :setup, params: {\n      vm_uuid: @vm_uuid,\n      uuid: active_user['uuid'],\n      email: 'junk_email'\n    }\n\n    assert_response :success\n\n    response_items = JSON.parse(@response.body)['items']\n    created = find_obj_in_resp response_items, 'User', nil\n\n    assert_equal active_user['email'], created['email'], 'expected original email'\n\n    # verify links\n    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',\n        'All users', created['uuid'], 'arvados#group', true, 'Group'\n\n    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',\n        @vm_uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n  end\n\n  test \"unsetup active user\" do\n    active_user = users(:active)\n    assert_not_nil active_user['uuid'], 'expected uuid for the active user'\n    assert active_user['is_active'], 'expected is_active for active user'\n\n    verify_link_existence active_user['uuid'], active_user['email'],\n          false, true, true, true, true\n\n    authorize_with :admin\n\n    # now unsetup this user\n    post :unsetup, params: {id: active_user['uuid']}\n    assert_response :success\n\n    response_user = JSON.parse(@response.body)\n    assert_not_nil response_user['uuid'], 'expected uuid for the upsetup user'\n    assert_equal active_user['uuid'], response_user['uuid'], 'expected uuid not found'\n    assert !response_user['is_active'], 'expected user to be inactive'\n    assert !response_user['is_invited'], 'expected user to be uninvited'\n\n    verify_link_existence response_user['uuid'], response_user['email'],\n          false, false, false, false, false\n\n    active_user = User.find_by_uuid(users(:active).uuid)\n    readable_groups = active_user.groups_i_can(:read)\n    all_users_group = Group.all.collect(&:uuid).select { |g| g.match(/-f+$/) }\n    refute_includes(readable_groups, all_users_group,\n                    \"active user can read All Users group after being deactivated\")\n    assert_equal(false, active_user.is_invited,\n                 \"active user is_invited after being deactivated & reloaded\")\n  end\n\n  test \"setup user with send notification param false and verify no email\" do\n    authorize_with :admin\n\n    post :setup, params: {\n      send_notification_email: 'false',\n      user: {\n        email: \"foo@example.com\"\n      }\n    }\n\n    assert_response :success\n    response_items = JSON.parse(@response.body)['items']\n    created = find_obj_in_resp response_items, 'User', nil\n    assert_not_nil created['uuid'], 'expected uuid for the new user'\n    assert_equal created['email'], 'foo@example.com', 'expected given email'\n\n    setup_email = ActionMailer::Base.deliveries.last\n    assert_nil setup_email, 'expected no setup email'\n  end\n\n  test \"setup user with send notification param true and verify email\" do\n    authorize_with :admin\n\n    Rails.configuration.Users.UserSetupMailText = %{\n<% if not @user.full_name.empty? -%>\n<%= @user.full_name %>,\n<% else -%>\nHi there,\n<% end -%>\n\nYour Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.Services.Workbench1.ExternalURL %>at\n\n<%= Rails.configuration.Services.Workbench1.ExternalURL %><%= \"/\" if !Rails.configuration.Services.Workbench1.ExternalURL.to_s.end_with?(\"/\") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %>\n\nfor connection instructions.\n\nThanks,\nThe Arvados team.\n}\n\n    post :setup, params: {\n      send_notification_email: 'true',\n      user: {\n        email: \"foo@example.com\"\n      }\n    }\n\n    assert_response :success\n    response_items = JSON.parse(@response.body)['items']\n    created = find_obj_in_resp response_items, 'User', nil\n    assert_not_nil created['uuid'], 'expected uuid for the new user'\n    assert_equal created['email'], 'foo@example.com', 'expected given email'\n\n    setup_email = ActionMailer::Base.deliveries.last\n    assert_not_nil setup_email, 'Expected email after setup'\n\n    assert_equal Rails.configuration.Users.UserNotifierEmailFrom, setup_email.from[0]\n    assert_equal 'foo@example.com', setup_email.to[0]\n    assert_equal 'Welcome to Arvados - account enabled', setup_email.subject\n    assert (setup_email.body.to_s.include? 'Your Arvados shell account has been set up'),\n        'Expected Your Arvados shell account has been set up in email body'\n    assert (setup_email.body.to_s.include? \"#{Rails.configuration.Services.Workbench1.ExternalURL}users/#{created['uuid']}/virtual_machines\"), 'Expected virtual machines url in email body'\n  end\n\n  test \"setup inactive user by changing is_active to true\" do\n    authorize_with :admin\n    active_user = users(:active)\n\n    put :update, params: {\n          id: active_user['uuid'],\n          user: {\n            is_active: true,\n          }\n        }\n    assert_response :success\n    assert_equal active_user['uuid'], json_response['uuid']\n    updated = User.where(uuid: active_user['uuid']).first\n    assert_equal(true, updated.is_active)\n    assert_equal({read: true, write: true}, updated.group_permissions[all_users_group_uuid])\n  end\n\n  test \"non-admin user can get basic information about readable users\" do\n    authorize_with :spectator\n    get(:index)\n    check_non_admin_index\n    check_readable_users_index [:spectator], [:inactive, :active]\n    json_response[\"items\"].each do |u|\n      if u[\"uuid\"] == users(:spectator).uuid\n        assert_equal true, u[\"can_write\"]\n        assert_equal true, u[\"can_manage\"]\n      end\n    end\n  end\n\n  test \"non-bool include_trash param is silently ignored\" do\n    authorize_with :spectator\n    Rails.logger.expects(:warn).never\n    get(:index, params: {include_trash: '-1'})\n  end\n\n  test \"select only computed field full_name\" do\n    authorize_with :active\n    get :show, params: {id: users(:active).uuid, select: [\"uuid\", \"full_name\"]}\n    assert_response :success\n    assert_equal(\"Active User\", json_response[\"full_name\"])\n  end\n\n  test \"non-admin user gets only safe attributes from users#show\" do\n    g = act_as_system_user do\n      create :group, group_class: \"role\"\n    end\n    users = create_list :active_user, 2, join_groups: [g]\n    token = create :token, user: users[0]\n    authorize_with_token token\n    get :show, params: {id: users[1].uuid}\n    check_non_admin_show\n  end\n\n  [2, 4].each do |limit|\n    test \"non-admin user can limit index to #{limit}\" do\n      g = act_as_system_user do\n        create :group, group_class: \"role\"\n      end\n      users = create_list :active_user, 4, join_groups: [g]\n      token = create :token, user: users[0]\n\n      authorize_with_token token\n      get(:index, params: {limit: limit})\n      check_non_admin_index\n      assert_equal(limit, json_response[\"items\"].size,\n                   \"non-admin index limit was ineffective\")\n    end\n  end\n\n  test \"admin has full index powers\" do\n    authorize_with :admin\n    check_inactive_user_findable\n  end\n\n  test \"reader token can grant admin index powers\" do\n    authorize_with :spectator\n    check_inactive_user_findable(reader_tokens: [api_token(:admin)])\n  end\n\n  test \"admin can filter on user.is_active\" do\n    authorize_with :admin\n    get(:index, params: {filters: [[\"is_active\", \"=\", \"true\"]]})\n    assert_response :success\n    check_readable_users_index [:active, :spectator], [:inactive]\n  end\n\n  test \"admin can search where user.is_active\" do\n    authorize_with :admin\n    get(:index, params: {where: {is_active: true}})\n    assert_response :success\n    check_readable_users_index [:active, :spectator], [:inactive]\n  end\n\n  test \"update active_no_prefs user profile and expect notification email\" do\n    authorize_with :admin\n\n    put :update, params: {\n      id: users(:active_no_prefs).uuid,\n      user: {\n        prefs: {:profile => {'organization' => 'example.com'}}\n      }\n    }\n    assert_response :success\n\n    found_email = false\n    ActionMailer::Base.deliveries.andand.each do |email|\n      if email.subject == \"Profile created by #{users(:active_no_prefs).email}\"\n        found_email = true\n        break\n      end\n    end\n    assert_equal true, found_email, 'Expected email after creating profile'\n  end\n\n  test \"update active_no_prefs_profile user profile and expect notification email\" do\n    authorize_with :admin\n\n    user = {}\n    user[:prefs] = users(:active_no_prefs_profile_no_getting_started_shown).prefs\n    user[:prefs][:profile] = {:profile => {'organization' => 'example.com'}}\n    put :update, params: {\n      id: users(:active_no_prefs_profile_no_getting_started_shown).uuid,\n      user: user\n    }\n    assert_response :success\n\n    found_email = false\n    ActionMailer::Base.deliveries.andand.each do |email|\n      if email.subject == \"Profile created by #{users(:active_no_prefs_profile_no_getting_started_shown).email}\"\n        found_email = true\n        break\n      end\n    end\n    assert_equal true, found_email, 'Expected email after creating profile'\n  end\n\n  test \"update active user profile and expect no notification email\" do\n    authorize_with :admin\n\n    put :update, params: {\n      id: users(:active).uuid,\n      user: {\n        prefs: {:profile => {'organization' => 'anotherexample.com'}}\n      }\n    }\n    assert_response :success\n\n    found_email = false\n    ActionMailer::Base.deliveries.andand.each do |email|\n      if email.subject == \"Profile created by #{users(:active).email}\"\n        found_email = true\n        break\n      end\n    end\n    assert_equal false, found_email, 'Expected no email after updating profile'\n  end\n\n  test \"user API response includes writable_by\" do\n    authorize_with :active\n    get :current\n    assert_response :success\n    assert_includes(json_response[\"writable_by\"], users(:active).uuid,\n                    \"user's writable_by should include self\")\n    assert_includes(json_response[\"writable_by\"], users(:active).owner_uuid,\n                    \"user's writable_by should include its owner_uuid\")\n  end\n\n  test \"merge with redirect_to_user_uuid=false\" do\n    authorize_with :project_viewer_trustedclient\n    tok = api_client_authorizations(:project_viewer).api_token\n    post :merge, params: {\n           new_user_token: api_client_authorizations(:active_trustedclient).api_token,\n           new_owner_uuid: users(:active).uuid,\n           redirect_to_new_user: false,\n         }\n    assert_response(:success)\n    assert_nil(User.unscoped.find_by_uuid(users(:project_viewer).uuid).redirect_to_user_uuid)\n\n    # because redirect_to_new_user=false, token owned by\n    # project_viewer should be deleted\n    auth = ApiClientAuthorization.validate(token: tok)\n    assert_nil(auth)\n  end\n\n  test \"merge remote to local as admin\" do\n    authorize_with :admin\n\n    remoteuser = User.create!(uuid: \"zbbbb-tpzed-remotremotremot\")\n    tok = ApiClientAuthorization.create!(user: remoteuser).api_token\n\n    auth = ApiClientAuthorization.validate(token: tok)\n    assert_not_nil(auth)\n    assert_nil(remoteuser.redirect_to_user_uuid)\n\n    post :merge, params: {\n           new_user_uuid: users(:active).uuid,\n           old_user_uuid: remoteuser.uuid,\n           new_owner_uuid: users(:active).uuid,\n           redirect_to_new_user: true,\n         }\n    assert_response(:success)\n    remoteuser.reload\n    assert_equal(users(:active).uuid, remoteuser.redirect_to_user_uuid)\n\n    # token owned by remoteuser should be deleted\n    auth = ApiClientAuthorization.validate(token: tok)\n    assert_nil(auth)\n  end\n\n  test \"refuse to merge user into self\" do\n    authorize_with(:active_trustedclient)\n    post(:merge, params: {\n           new_user_token: api_client_authorizations(:active_trustedclient).api_token,\n           new_owner_uuid: users(:active).uuid,\n           redirect_to_new_user: true,\n         })\n    assert_response(422)\n  end\n\n  [[:expired_trustedclient, :project_viewer_trustedclient],\n   [:project_viewer_trustedclient, :expired_trustedclient]].each do |src, dst|\n    test \"refuse to merge with expired token (#{src} -> #{dst})\" do\n      authorize_with(src)\n      post(:merge, params: {\n             new_user_token: api_client_authorizations(dst).api_token,\n             new_owner_uuid: api_client_authorizations(dst).user.uuid,\n             redirect_to_new_user: true,\n           })\n      assert_response(401)\n    end\n  end\n\n  [['src', :active_trustedclient],\n   ['dst', :project_viewer_trustedclient]].each do |which_scoped, auth|\n    test \"refuse to merge with scoped #{which_scoped} token\" do\n      act_as_system_user do\n        api_client_authorizations(auth).update(scopes: [\"GET /\", \"POST /\", \"PUT /\"])\n      end\n      authorize_with(:active_trustedclient)\n      post(:merge, params: {\n             new_user_token: api_client_authorizations(:project_viewer_trustedclient).api_token,\n             new_owner_uuid: users(:project_viewer).uuid,\n             redirect_to_new_user: true,\n           })\n      assert_response(403)\n    end\n  end\n\n  test \"refuse to merge if new_owner_uuid is not writable\" do\n    authorize_with(:project_viewer_trustedclient)\n    post(:merge, params: {\n           new_user_token: api_client_authorizations(:active_trustedclient).api_token,\n           new_owner_uuid: groups(:anonymously_accessible_project).uuid,\n           redirect_to_new_user: true,\n         })\n    assert_response(403)\n  end\n\n  test \"refuse to merge if new_owner_uuid is empty\" do\n    authorize_with(:project_viewer_trustedclient)\n    post(:merge, params: {\n           new_user_token: api_client_authorizations(:active_trustedclient).api_token,\n           new_owner_uuid: \"\",\n           redirect_to_new_user: true,\n         })\n    assert_response(422)\n  end\n\n  test \"refuse to merge if new_owner_uuid is not provided\" do\n    authorize_with(:project_viewer_trustedclient)\n    post(:merge, params: {\n           new_user_token: api_client_authorizations(:active_trustedclient).api_token,\n           redirect_to_new_user: true,\n         })\n    assert_response(422)\n  end\n\n  test \"refuse to update redirect_to_user_uuid directly\" do\n    authorize_with(:active_trustedclient)\n    patch(:update, params: {\n            id: users(:active).uuid,\n            user: {\n              redirect_to_user_uuid: users(:active).uuid,\n            },\n          })\n    assert_response(403)\n  end\n\n  test \"merge 'project_viewer' account into 'active' account\" do\n    authorize_with(:project_viewer_trustedclient)\n    post(:merge, params: {\n           new_user_token: api_client_authorizations(:active_trustedclient).api_token,\n           new_owner_uuid: users(:active).uuid,\n           redirect_to_new_user: true,\n         })\n    assert_response(:success)\n    assert_equal(users(:active).uuid, User.unscoped.find_by_uuid(users(:project_viewer).uuid).redirect_to_user_uuid)\n\n    auth = ApiClientAuthorization.validate(token: api_client_authorizations(:project_viewer).api_token)\n    assert_not_nil(auth)\n    assert_not_nil(auth.user)\n    assert_equal(users(:active).uuid, auth.user.uuid)\n  end\n\n\n  test \"merge 'project_viewer' account into 'active' account using uuids\" do\n    authorize_with(:admin)\n    post(:merge, params: {\n           old_user_uuid: users(:project_viewer).uuid,\n           new_user_uuid: users(:active).uuid,\n           new_owner_uuid: users(:active).uuid,\n           redirect_to_new_user: true,\n         })\n    assert_response(:success)\n    assert_equal(users(:active).uuid, User.unscoped.find_by_uuid(users(:project_viewer).uuid).redirect_to_user_uuid)\n\n    auth = ApiClientAuthorization.validate(token: api_client_authorizations(:project_viewer).api_token)\n    assert_not_nil(auth)\n    assert_not_nil(auth.user)\n    assert_equal(users(:active).uuid, auth.user.uuid)\n  end\n\n  test \"merge 'project_viewer' account into 'active' account using uuids denied for non-admin\" do\n    authorize_with(:active)\n    post(:merge, params: {\n           old_user_uuid: users(:project_viewer).uuid,\n           new_user_uuid: users(:active).uuid,\n           new_owner_uuid: users(:active).uuid,\n           redirect_to_new_user: true,\n         })\n    assert_response(403)\n    assert_nil(users(:project_viewer).redirect_to_user_uuid)\n  end\n\n  test \"merge 'project_viewer' account into 'active' account using uuids denied missing old_user_uuid\" do\n    authorize_with(:admin)\n    post(:merge, params: {\n           new_user_uuid: users(:active).uuid,\n           new_owner_uuid: users(:active).uuid,\n           redirect_to_new_user: true,\n         })\n    assert_response(422)\n    assert_nil(users(:project_viewer).redirect_to_user_uuid)\n  end\n\n  test \"merge 'project_viewer' account into 'active' account using uuids denied missing new_user_uuid\" do\n    authorize_with(:admin)\n    post(:merge, params: {\n           old_user_uuid: users(:project_viewer).uuid,\n           new_owner_uuid: users(:active).uuid,\n           redirect_to_new_user: true,\n         })\n    assert_response(422)\n    assert_nil(users(:project_viewer).redirect_to_user_uuid)\n  end\n\n  test \"merge 'project_viewer' account into 'active' account using uuids denied bogus old_user_uuid\" do\n    authorize_with(:admin)\n    post(:merge, params: {\n           old_user_uuid: \"zzzzz-tpzed-bogusbogusbogus\",\n           new_user_uuid: users(:active).uuid,\n           new_owner_uuid: users(:active).uuid,\n           redirect_to_new_user: true,\n         })\n    assert_response(422)\n    assert_nil(users(:project_viewer).redirect_to_user_uuid)\n  end\n\n  test \"merge 'project_viewer' account into 'active' account using uuids denied bogus new_user_uuid\" do\n    authorize_with(:admin)\n    post(:merge, params: {\n           old_user_uuid: users(:project_viewer).uuid,\n           new_user_uuid: \"zzzzz-tpzed-bogusbogusbogus\",\n           new_owner_uuid: users(:active).uuid,\n           redirect_to_new_user: true,\n         })\n    assert_response(422)\n    assert_nil(users(:project_viewer).redirect_to_user_uuid)\n  end\n\n  test \"batch update fails for non-admin\" do\n    authorize_with(:active)\n    patch(:batch_update, params: {updates: {}})\n    assert_response(403)\n  end\n\n  test \"batch update\" do\n    existinguuid = 'remot-tpzed-foobarbazwazqux'\n    newuuid = 'remot-tpzed-newnarnazwazqux'\n    unchanginguuid = 'remot-tpzed-nochangingattrs'\n    conflictinguuid1 = 'remot-tpzed-conflictingnam1'\n    conflictinguuid2 = 'remot-tpzed-conflictingnam2'\n    act_as_system_user do\n      User.create!(uuid: existinguuid, email: 'root@existing.example.com')\n      User.create!(uuid: unchanginguuid, email: 'root@unchanging.example.com', prefs: {'foo' => {'bar' => 'baz'}})\n    end\n    assert_equal(1, Log.where(object_uuid: unchanginguuid).count)\n\n    Rails.configuration.Login.LoginCluster = 'remot'\n\n    authorize_with(:admin)\n    patch(:batch_update,\n          params: {\n            updates: {\n              existinguuid => {\n                'first_name' => 'root',\n                'email' => 'root@remot.example.com',\n                'is_active' => true,\n                'is_admin' => true,\n                'prefs' => {'foo' => 'bar'},\n                'is_invited' => true\n              },\n              newuuid => {\n                'first_name' => 'noot',\n                'email' => 'root@remot.example.com',\n                'username' => '',\n                'is_invited' => true\n              },\n              unchanginguuid => {\n                'email' => 'root@unchanging.example.com',\n                'prefs' => {'foo' => {'bar' => 'baz'}},\n                'is_invited' => true\n              },\n              conflictinguuid1 => {\n                'email' => 'root@conflictingname1.example.com',\n                'username' => 'active',\n                'is_invited' => true\n              },\n              conflictinguuid2 => {\n                'email' => 'root@conflictingname2.example.com',\n                'username' => 'federatedactive',\n                'is_invited' => true\n              },\n            }})\n    assert_response(:success)\n\n    assert_equal('root', User.find_by_uuid(existinguuid).first_name)\n    assert_equal('root@remot.example.com', User.find_by_uuid(existinguuid).email)\n    assert_equal(true, User.find_by_uuid(existinguuid).is_active)\n    assert_equal(true, User.find_by_uuid(existinguuid).is_admin)\n    assert_equal({'foo' => 'bar'}, User.find_by_uuid(existinguuid).prefs)\n\n    assert_equal('noot', User.find_by_uuid(newuuid).first_name)\n    assert_equal('root@remot.example.com', User.find_by_uuid(newuuid).email)\n\n    assert_equal(1, Log.where(object_uuid: unchanginguuid).count)\n  end\n\n  test 'batch update does not produce spurious log events' do\n    # test for bug #21304\n\n    existinguuid = 'remot-tpzed-foobarbazwazqux'\n    act_as_system_user do\n      User.create!(uuid: existinguuid,\n                   first_name: 'root',\n                   is_active: true,\n                  )\n    end\n    assert_equal(1, Log.where(object_uuid: existinguuid).count)\n\n    Rails.configuration.Login.LoginCluster = 'remot'\n\n    authorize_with(:admin)\n    patch(:batch_update,\n          params: {\n            updates: {\n              existinguuid => {\n                'first_name' => 'root',\n                'email' => '',\n                'username' => '',\n                'is_active' => true,\n                'is_invited' => true\n              },\n            }})\n    assert_response(:success)\n\n    assert_equal(1, Log.where(object_uuid: existinguuid).count)\n  end\n\n  NON_ADMIN_USER_DATA = [\"uuid\", \"kind\", \"is_active\", \"is_admin\", \"is_invited\", \"email\", \"first_name\",\n                         \"last_name\", \"username\", \"can_write\", \"can_manage\"].sort\n\n  def check_non_admin_index\n    assert_response :success\n    response_items = json_response[\"items\"]\n    assert_not_nil response_items\n    response_items.each do |user_data|\n      check_non_admin_item user_data\n      assert(user_data[\"is_active\"], \"non-admin index returned inactive user\")\n    end\n  end\n\n  def check_non_admin_show\n    assert_response :success\n    check_non_admin_item json_response\n  end\n\n  def check_non_admin_item user_data\n    assert_equal(NON_ADMIN_USER_DATA, user_data.keys.sort,\n                 \"data in response had missing or extra attributes\")\n    assert_equal(\"arvados#user\", user_data[\"kind\"])\n  end\n\n\n  def check_readable_users_index expect_present, expect_missing\n    response_uuids = json_response[\"items\"].map { |u| u[\"uuid\"] }\n    expect_present.each do |user_key|\n      assert_includes(response_uuids, users(user_key).uuid,\n                      \"#{user_key} missing from index\")\n    end\n    expect_missing.each do |user_key|\n      refute_includes(response_uuids, users(user_key).uuid,\n                      \"#{user_key} included in index\")\n    end\n  end\n\n  def check_inactive_user_findable(params={})\n    inactive_user = users(:inactive)\n    get(:index, params: params.merge(filters: [[\"email\", \"=\", inactive_user.email]]))\n    assert_response :success\n    user_list = json_response[\"items\"]\n    assert_equal(1, user_list.andand.count)\n    # This test needs to check a column non-admins have no access to,\n    # to ensure that admins see all user information.\n    assert_equal(inactive_user.identity_url, user_list.first[\"identity_url\"],\n                 \"admin's filtered index did not return inactive user\")\n  end\n\n  def verify_links_added more\n    assert_equal @initial_link_count+more, Link.count,\n        \"Started with #{@initial_link_count} links, expected #{more} more\"\n  end\n\n  def find_obj_in_resp (response_items, object_type, head_kind=nil)\n    return_obj = nil\n    response_items.each { |x|\n      if !x\n        next\n      end\n\n      if object_type == 'User'\n        if ArvadosModel::resource_class_for_uuid(x['uuid']) == User\n          return_obj = x\n          break\n        end\n      else  # looking for a link\n        if x['head_uuid'] and ArvadosModel::resource_class_for_uuid(x['head_uuid']).kind == head_kind\n          return_obj = x\n          break\n        end\n      end\n    }\n    return return_obj\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/arvados/v1/virtual_machines_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass Arvados::V1::VirtualMachinesControllerTest < ActionController::TestCase\n  def get_logins_for(vm_sym)\n    authorize_with :admin\n    get(:logins, params: {id: virtual_machines(vm_sym).uuid})\n  end\n\n  def find_login(sshkey_sym)\n    assert_response :success\n    want_key = authorized_keys(sshkey_sym).public_key\n    logins = json_response[\"items\"].select do |login|\n      login[\"public_key\"] == want_key\n    end\n    assert_equal(1, logins.size, \"failed to find #{sshkey_sym} login\")\n    logins.first\n  end\n\n  test \"username propagated from permission\" do\n    get_logins_for(:testvm2)\n    admin_login = find_login(:admin)\n    perm = links(:admin_can_login_to_testvm2)\n    assert_equal(perm.properties[\"username\"], admin_login[\"username\"])\n  end\n\n  test \"groups propagated from permission\" do\n    get_logins_for(:testvm2)\n    admin_login = find_login(:admin)\n    perm = links(:admin_can_login_to_testvm2)\n    assert_equal(perm.properties[\"groups\"], admin_login[\"groups\"])\n  end\n\n  test \"groups is an empty list by default\" do\n    get_logins_for(:testvm2)\n    active_login = find_login(:active)\n    assert_equal([], active_login[\"groups\"])\n  end\n\n  test \"logins without usernames not listed\" do\n    get_logins_for(:testvm2)\n    assert_response :success\n    spectator_uuid = users(:spectator).uuid\n    assert_empty(json_response.\n                 select { |login| login[\"user_uuid\"] == spectator_uuid })\n  end\n\n  test \"logins without ssh keys are listed\" do\n    u, vm = nil\n    act_as_system_user do\n      u = create :active_user, first_name: 'Bob', last_name: 'Blogin'\n      vm = VirtualMachine.create! hostname: 'foo.shell'\n      Link.create!(tail_uuid: u.uuid,\n                   head_uuid: vm.uuid,\n                   link_class: 'permission',\n                   name: 'can_login',\n                   properties: {'username' => 'bobblogin'})\n    end\n    authorize_with :admin\n    get :logins, params: {id: vm.uuid}\n    assert_response :success\n    assert_equal 1, json_response['items'].length\n    assert_nil json_response['items'][0]['public_key']\n    assert_nil json_response['items'][0]['authorized_key_uuid']\n    assert_equal u.uuid, json_response['items'][0]['user_uuid']\n    assert_equal 'bobblogin', json_response['items'][0]['username']\n  end\n\n  test 'get all logins' do\n    authorize_with :admin\n    get :get_all_logins\n    find_login :admin\n    find_login :active\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/database_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass DatabaseControllerTest < ActionController::TestCase\n  include CurrentApiClient\n\n  test \"reset fails with non-admin token\" do\n    authorize_with :active\n    post :reset\n    assert_response 403\n  end\n\n  test \"route not found when not in test mode\" do\n    authorize_with :admin\n    env_was = Rails.env\n    begin\n      Rails.env = 'production'\n      Rails.application.reload_routes!\n      assert_raises ActionController::UrlGenerationError do\n        post :reset\n      end\n    ensure\n      Rails.env = env_was\n      Rails.application.reload_routes!\n    end\n  end\n\n  test \"reset fails when a non-test-fixture user exists\" do\n    act_as_system_user do\n      User.create!(uuid: 'abcde-tpzed-123451234512345', email: 'bar@example.net')\n    end\n    authorize_with :admin\n    post :reset\n    assert_response 403\n  end\n\n  test \"reset succeeds with admin token\" do\n    new_uuid = nil\n    act_as_system_user do\n      new_uuid = Collection.create.uuid\n    end\n    assert_not_empty Collection.where(uuid: new_uuid)\n    authorize_with :admin\n    post :reset\n    assert_response 200\n    assert_empty Collection.where(uuid: new_uuid)\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/sys_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass SysControllerTest < ActionController::TestCase\n  include CurrentApiClient\n  include DbCurrentTime\n\n  test \"trash_sweep - delete expired tokens\" do\n    assert_not_empty ApiClientAuthorization.where(uuid: api_client_authorizations(:expired).uuid)\n    authorize_with :admin\n    post :trash_sweep\n    assert_response :success\n    assert_empty ApiClientAuthorization.where(uuid: api_client_authorizations(:expired).uuid)\n  end\n\n  test \"trash_sweep - fail with non-admin token\" do\n    authorize_with :active\n    post :trash_sweep\n    assert_response 403\n  end\n\n  test \"trash_sweep - move collections to trash\" do\n    c = collections(:trashed_on_next_sweep)\n    refute_empty Collection.where('uuid=? and is_trashed=false', c.uuid)\n    assert_raises(ActiveRecord::RecordNotUnique) do\n      act_as_user users(:active) do\n        Collection.create!(owner_uuid: c.owner_uuid,\n                           name: c.name)\n      end\n    end\n    authorize_with :admin\n    post :trash_sweep\n    assert_response :success\n    c = Collection.where('uuid=? and is_trashed=true', c.uuid).first\n    assert c\n    act_as_user users(:active) do\n      assert Collection.create!(owner_uuid: c.owner_uuid,\n                                name: c.name)\n    end\n  end\n\n  test \"trash_sweep - delete collections\" do\n    uuid = 'zzzzz-4zz18-3u1p5umicfpqszp' # deleted_on_next_sweep\n    assert_not_empty Collection.where(uuid: uuid)\n    authorize_with :admin\n    post :trash_sweep\n    assert_response :success\n    assert_empty Collection.where(uuid: uuid)\n  end\n\n  test \"trash_sweep - delete referring links\" do\n    uuid = collections(:trashed_on_next_sweep).uuid\n    act_as_system_user do\n      assert_raises ActiveRecord::RecordInvalid do\n        # Cannot create because :trashed_on_next_sweep is already trashed\n        Link.create!(head_uuid: uuid,\n                     tail_uuid: system_user_uuid,\n                     link_class: 'whatever',\n                     name: 'something')\n      end\n\n      # Bump trash_at to now + 1 minute\n      Collection.where(uuid: uuid).\n        update(trash_at: db_current_time + (1).minute)\n\n      # Not considered trashed now\n      Link.create!(head_uuid: uuid,\n                   tail_uuid: system_user_uuid,\n                   link_class: 'whatever',\n                   name: 'something')\n    end\n    past = db_current_time\n    Collection.where(uuid: uuid).\n      update_all(is_trashed: true, trash_at: past, delete_at: past)\n    assert_not_empty Collection.where(uuid: uuid)\n    authorize_with :admin\n    post :trash_sweep\n    assert_response :success\n    assert_empty Collection.where(uuid: uuid)\n  end\n\n  test \"trash_sweep - move projects to trash\" do\n    p = groups(:trashed_on_next_sweep)\n    assert_empty Group.where('uuid=? and is_trashed=true', p.uuid)\n    authorize_with :admin\n    post :trash_sweep\n    assert_response :success\n    assert_not_empty Group.where('uuid=? and is_trashed=true', p.uuid)\n  end\n\n  test \"trash_sweep - role groups are deleted\" do\n    p = groups(:trashed_role_on_next_sweep)\n    assert_empty Group.where('uuid=? and is_trashed=true', p.uuid)\n    assert_not_empty Link.where(uuid: links(:foo_file_readable_by_soon_to_be_trashed_role).uuid)\n    authorize_with :admin\n    post :trash_sweep\n    assert_response :success\n    assert_empty Group.where(uuid: p.uuid)\n    assert_empty Link.where(uuid: links(:foo_file_readable_by_soon_to_be_trashed_role).uuid)\n  end\n\n  test \"trash_sweep - delete projects and their contents\" do\n    g_foo = groups(:trashed_project)\n    g_bar = groups(:trashed_subproject)\n    g_baz = groups(:trashed_subproject3)\n    col = collections(:collection_in_trashed_subproject)\n    cr = container_requests(:cr_in_trashed_project)\n    # Save how many objects were before the sweep\n    user_nr_was = User.all.length\n    coll_nr_was = Collection.all.length\n    group_nr_was = Group.where('group_class<>?', 'project').length\n    project_nr_was = Group.where(group_class: 'project').length\n    cr_nr_was = ContainerRequest.all.length\n    assert_not_empty Group.where(uuid: g_foo.uuid)\n    assert_not_empty Group.where(uuid: g_bar.uuid)\n    assert_not_empty Group.where(uuid: g_baz.uuid)\n    assert_not_empty Collection.where(uuid: col.uuid)\n    assert_not_empty ContainerRequest.where(uuid: cr.uuid)\n\n    authorize_with :admin\n    Group.find_by_uuid(g_foo.uuid).update!(delete_at: Time.now - 1.second)\n\n    post :trash_sweep\n    assert_response :success\n\n    assert_empty Group.where(uuid: g_foo.uuid)\n    assert_empty Group.where(uuid: g_bar.uuid)\n    assert_empty Group.where(uuid: g_baz.uuid)\n    assert_empty Collection.where(uuid: col.uuid)\n    assert_empty ContainerRequest.where(uuid: cr.uuid)\n    # No unwanted deletions should have happened\n    assert_equal user_nr_was, User.all.length\n    assert_equal coll_nr_was-2,        # collection_in_trashed_subproject\n                 Collection.all.length # & deleted_on_next_sweep collections\n    assert_equal group_nr_was-1,       # trashed_role_on_next_sweep\n                 Group.where('group_class<>?', 'project').length\n    assert_equal project_nr_was-3, Group.where(group_class: 'project').length\n    assert_equal cr_nr_was-1, ContainerRequest.all.length\n  end\n\n  test \"trash_sweep - delete unused uuid_locks\" do\n    uuid_active = \"zzzzz-zzzzz-uuidlockstest11\"\n    uuid_inactive = \"zzzzz-zzzzz-uuidlockstest00\"\n\n    ready = Queue.new\n    insertsql = \"INSERT INTO uuid_locks (uuid) VALUES ($1) ON CONFLICT (uuid) do UPDATE SET n = uuid_locks.n+1\"\n    url = ENV[\"DATABASE_URL\"].sub(/\\?.*/, '')\n    Thread.new do\n      conn = PG::Connection.new(url)\n      conn.exec_params(insertsql, [uuid_active])\n      conn.exec_params(insertsql, [uuid_inactive])\n      conn.transaction do |conn|\n        conn.exec_params(insertsql, [uuid_active])\n        ready << true\n        # If we keep this transaction open while trash_sweep runs, the\n        # uuid_active row shouldn't get deleted.\n        sleep 10\n      rescue\n        # Unblock main thread\n        ready << false\n        raise\n      end\n    end\n    assert_equal true, ready.pop\n    authorize_with :admin\n    post :trash_sweep\n    rows = ActiveRecord::Base.connection.exec_query(\"SELECT uuid FROM uuid_locks ORDER BY uuid\", \"\", []).rows\n    assert_includes(rows, [uuid_active], \"row with active lock (still held by thread) should not have been deleted\")\n    refute_includes(rows, [uuid_inactive], \"row with inactive lock should have been deleted\")\n  end\nend\n"
  },
  {
    "path": "services/api/test/functional/user_sessions_controller_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass UserSessionsControllerTest < ActionController::TestCase\n\n  setup do\n    @allowed_return_to = \",https://controller.api.client.invalid\"\n  end\n\n  test \"login route deleted\" do\n    @request.headers['Authorization'] = 'Bearer '+Rails.configuration.SystemRootToken\n    get :login, params: {provider: 'controller', return_to: @allowed_return_to}\n    assert_response 404\n  end\n\n  test \"controller cannot create session without SystemRootToken\" do\n    get :create, params: {provider: 'controller', auth_info: {email: \"foo@bar.com\"}, return_to: @allowed_return_to}\n    assert_response 401\n  end\n\n  test \"controller cannot create session with wrong SystemRootToken\" do\n    @request.headers['Authorization'] = 'Bearer blah'\n    get :create, params: {provider: 'controller', auth_info: {email: \"foo@bar.com\"}, return_to: @allowed_return_to}\n    assert_response 401\n  end\n\n  test \"controller can create session using SystemRootToken\" do\n    @request.headers['Authorization'] = 'Bearer '+Rails.configuration.SystemRootToken\n    get :create, params: {provider: 'controller', auth_info: {email: \"foo@bar.com\"}, return_to: @allowed_return_to}\n    assert_response :redirect\n    api_client_auth = assigns(:api_client_auth)\n    assert_not_nil api_client_auth\n    assert_includes(@response.redirect_url, 'api_token='+api_client_auth.token)\n  end\nend\n"
  },
  {
    "path": "services/api/test/helpers/container_test_helper.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule ContainerTestHelper\n  def secret_string\n    'UNGU3554BL3'\n  end\n\n  def assert_no_secrets_logged\n    Log.all.map(&:properties).each do |props|\n      refute_match /secret\\/6x9|#{secret_string}/, SafeJSON.dump(props)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/helpers/docker_migration_helper.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule DockerMigrationHelper\n  include CurrentApiClient\n\n  def add_docker19_migration_link\n    act_as_system_user do\n      assert(Link.create!(owner_uuid: system_user_uuid,\n                          link_class: 'docker_image_migration',\n                          name: 'migrate_1.9_1.10',\n                          tail_uuid: collections(:docker_image).portable_data_hash,\n                          head_uuid: collections(:docker_image_1_12).portable_data_hash))\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/helpers/manifest_examples.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule ManifestExamples\n  def make_manifest opts={}\n    opts = {\n      bytes_per_block: 1,\n      blocks_per_file: 1,\n      files_per_stream: 1,\n      streams: 1,\n    }.merge(opts)\n    datablip = \"x\" * opts[:bytes_per_block]\n    locator = Blob.sign_locator(Digest::MD5.hexdigest(datablip) +\n                                '+' + datablip.length.to_s,\n                                api_token: opts[:api_token])\n    filesize = datablip.length * opts[:blocks_per_file]\n    txt = ''\n    (1..opts[:streams]).each do |s|\n      streamtoken = \"./stream#{s}\"\n      streamsize = 0\n      blocktokens = []\n      filetokens = []\n      (1..opts[:files_per_stream]).each do |f|\n        filetokens << \"#{streamsize}:#{filesize}:file#{f}.txt\"\n        (1..opts[:blocks_per_file]).each do |b|\n          blocktokens << locator\n        end\n        streamsize += filesize\n      end\n      txt << ([streamtoken] + blocktokens + filetokens).join(' ') + \"\\n\"\n    end\n    txt\n  end\nend\n"
  },
  {
    "path": "services/api/test/helpers/time_block.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nclass ActiveSupport::TestCase\n  def time_block label\n    t0 = Time.now\n    begin\n      yield\n    ensure\n      t1 = Time.now\n      $stderr.puts \"#{t1 - t0}s #{label}\"\n    end\n  end\n\n  def vmpeak c\n    open(\"/proc/self/status\").each_line do |line|\n      print \"Begin #{c} #{line}\" if (line =~ /^VmHWM:/)\n    end\n    n = yield\n    open(\"/proc/self/status\").each_line do |line|\n      print \"End #{c} #{line}\" if (line =~ /^VmHWM:/)\n    end\n    n\n  end\n\nend\n"
  },
  {
    "path": "services/api/test/helpers/users_test_helper.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nmodule UsersTestHelper\n  include CurrentApiClient\n\n  def verify_link(response_items, link_object_name, expect_link, link_class,\n        link_name, head_uuid, tail_uuid, head_kind, fetch_object, class_name)\n    link = find_obj_in_resp response_items, 'arvados#link', link_object_name\n\n    if !expect_link\n      assert_nil link, \"Expected no link for #{link_object_name}\"\n      return\n    end\n\n    assert_not_nil link, \"Expected link for #{link_object_name}\"\n\n    if fetch_object\n      object = Object.const_get(class_name).where(name: head_uuid)\n      assert [] != object, \"expected #{class_name} with name #{head_uuid}\"\n      head_uuid = object.first[:uuid]\n    end\n    assert_equal link_class, link['link_class'],\n        \"did not find expected link_class for #{link_object_name}\"\n\n    assert_equal link_name, link['name'],\n        \"did not find expected link_name for #{link_object_name}\"\n\n    assert_equal tail_uuid, link['tail_uuid'],\n        \"did not find expected tail_uuid for #{link_object_name}\"\n\n    assert_equal head_kind, link['head_kind'],\n        \"did not find expected head_kind for #{link_object_name}\"\n\n    assert_equal head_uuid, link['head_uuid'],\n        \"did not find expected head_uuid for #{link_object_name}\"\n  end\n\n  def verify_system_group_permission_link_for user_uuid\n    assert_equal 1, Link.where(link_class: 'permission',\n                               name: 'can_manage',\n                               tail_uuid: system_group_uuid,\n                               head_uuid: user_uuid).count\n  end\n\n  def verify_link_existence uuid, email, expect_oid_login_perms,\n      expect_repo_perms, expect_vm_perms, expect_group_perms, expect_signatures\n    # verify that all links are deleted for the user\n    oid_login_perms = Link.where(tail_uuid: email,\n                                 link_class: 'permission',\n                                 name: 'can_login').where(\"head_uuid like ?\", User.uuid_like_pattern)\n\n    # these don't get added any more!  they shouldn't appear ever.\n    assert !oid_login_perms.any?, \"expected all oid_login_perms deleted\"\n\n    # these don't get added any more!  they shouldn't appear ever.\n    repo_perms = Link.where(tail_uuid: uuid,\n                            link_class: 'permission').where(\"head_uuid like ?\", '_____-s0uqq-_______________')\n    assert !repo_perms.any?, \"expected all repo_perms deleted\"\n\n    vm_login_perms = Link.\n      where(tail_uuid: uuid,\n            link_class: 'permission',\n            name: 'can_login').\n      where(\"head_uuid like ?\",\n            VirtualMachine.uuid_like_pattern).\n      where('uuid <> ?',\n            links(:auto_setup_vm_login_username_can_login_to_test_vm).uuid)\n    if expect_vm_perms\n      assert vm_login_perms.any?, \"expected vm_login_perms\"\n    else\n      assert !vm_login_perms.any?, \"expected all vm_login_perms deleted\"\n    end\n\n    group_write_perms = Link.where(tail_uuid: uuid,\n                                  head_uuid: all_users_group_uuid,\n                                  link_class: 'permission',\n                                  name: 'can_write')\n    if expect_group_perms\n      assert group_write_perms.any?, \"expected all users group write perms\"\n    else\n      assert !group_write_perms.any?, \"expected all users group write perms deleted\"\n    end\n\n    signed_uuids = Link.where(link_class: 'signature',\n                              tail_uuid: uuid)\n\n    if expect_signatures\n      assert signed_uuids.any?, \"expected signatures\"\n    else\n      assert !signed_uuids.any?, \"expected all signatures deleted\"\n    end\n\n  end\n\nend\n"
  },
  {
    "path": "services/api/test/integration/.gitkeep",
    "content": ""
  },
  {
    "path": "services/api/test/integration/api_client_authorizations_api_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ApiClientAuthorizationsApiTest < ActionDispatch::IntegrationTest\n  include DbCurrentTime\n  extend DbCurrentTime\n  fixtures :all\n\n  test \"create system auth\" do\n    post \"/arvados/v1/api_client_authorizations/create_system_auth\",\n      params: {:format => :json, :scopes => ['test'].to_json},\n      headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:admin_trustedclient).api_token}\"}\n    assert_response :success\n  end\n\n  [\n    [true, :active, 403],\n    [true, :admin, 200],\n    [true, :system_user, 200],\n    [false, :active, 403],\n    [false, :admin, 403],\n    [false, :system_user, 200],\n  ].each do |issue_trusted_tokens, tk, expect_response|\n    test \"create token for different user using #{tk} with IssueTrustedTokens=#{issue_trusted_tokens}\" do\n      Rails.configuration.Login.IssueTrustedTokens = issue_trusted_tokens\n      post \"/arvados/v1/api_client_authorizations\",\n           params: {\n             :format => :json,\n             :api_client_authorization => {\n               :owner_uuid => users(:spectator).uuid\n             }\n           },\n           headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(tk).api_token}\"}\n\n      assert_response expect_response\n      return if expect_response >= 300\n\n      get \"/arvados/v1/users/current\",\n          params: {:format => :json},\n          headers: {'HTTP_AUTHORIZATION' => \"Bearer #{json_response['api_token']}\"}\n      @json_response = nil\n      assert_equal json_response['uuid'], users(:spectator).uuid\n    end\n  end\n\n  test \"System root token is system user\" do\n    token = \"xyzzy-SystemRootToken\"\n    Rails.configuration.SystemRootToken = token\n    get \"/arvados/v1/users/current\",\n        params: {:format => :json},\n        headers: {'HTTP_AUTHORIZATION' => \"Bearer #{token}\"}\n    assert_equal json_response['uuid'], system_user_uuid\n  end\n\n  test \"refuse to create token for different user if not admin\" do\n    post \"/arvados/v1/api_client_authorizations\",\n      params: {\n        :format => :json,\n        :api_client_authorization => {\n          :owner_uuid => users(:spectator).uuid\n        }\n      },\n      headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:active_trustedclient).api_token}\"}\n    assert_response 403\n  end\n\n  [nil, db_current_time + 2.hours].each do |desired_expiration|\n    [false, true].each do |admin|\n      test \"expires_at gets clamped on #{admin ? 'admins' : 'non-admins'} when API.MaxTokenLifetime is set and desired expires_at #{desired_expiration.nil? ? 'is not set' : 'exceeds the limit'}\" do\n        Rails.configuration.API.MaxTokenLifetime = 1.hour\n        token = api_client_authorizations(admin ? :admin_trustedclient : :active_trustedclient).api_token\n\n        # Test token creation\n        start_t = db_current_time\n        post \"/arvados/v1/api_client_authorizations\",\n             params: {\n               :format => :json,\n               :api_client_authorization => {\n                 :owner_uuid => users(admin ? :admin : :active).uuid,\n                 :expires_at => desired_expiration,\n               }\n             },\n             headers: {'HTTP_AUTHORIZATION' => \"Bearer #{token}\"}\n        assert_response 200\n        expiration_t = json_response['expires_at'].to_time\n        if admin && desired_expiration\n          assert_in_delta desired_expiration.to_f, expiration_t.to_f, 1\n        else\n          assert_in_delta (start_t + Rails.configuration.API.MaxTokenLifetime).to_f, expiration_t.to_f, 2\n        end\n\n        # Test token update\n        previous_expiration = expiration_t\n        token_uuid = json_response[\"uuid\"]\n\n        start_t = db_current_time\n        patch \"/arvados/v1/api_client_authorizations/#{token_uuid}\",\n            params: {\n              :api_client_authorization => {\n                :expires_at => desired_expiration\n              }\n            },\n            headers: {'HTTP_AUTHORIZATION' => \"Bearer #{token}\"}\n        assert_response 200\n        expiration_t = json_response['expires_at'].to_time\n        if admin && desired_expiration\n          assert_in_delta desired_expiration.to_f, expiration_t.to_f, 1\n        else\n          assert_in_delta (start_t + Rails.configuration.API.MaxTokenLifetime).to_f, expiration_t.to_f, 2\n        end\n      end\n    end\n  end\n\n  test \"get current token using salted token\" do\n    salted = salt_token(fixture: :active, remote: 'abcde')\n    get('/arvados/v1/api_client_authorizations/current',\n        params: {remote: 'abcde'},\n        headers: {'HTTP_AUTHORIZATION' => \"Bearer #{salted}\"})\n    assert_response :success\n    assert_equal(json_response['uuid'], api_client_authorizations(:active).uuid)\n    assert_equal(json_response['scopes'], ['all'])\n    assert_not_nil(json_response['expires_at'])\n    assert_nil(json_response['api_token'])\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/api_client_authorizations_scopes_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# The v1 API uses token scopes to control access to the REST API at the path\n# level.  This is enforced in the base ApplicationController, making it a\n# functional test that we can run against many different controllers.\n\nrequire 'test_helper'\n\nclass ApiTokensScopeTest < ActionDispatch::IntegrationTest\n  fixtures :all\n\n  def v1_url(*parts)\n    (['', 'arvados', 'v1'] + parts).join('/')\n  end\n\n  test \"user list token can only list users\" do\n    get_args = {params: {}, headers: auth(:active_userlist)}\n    get(v1_url('users'), **get_args)\n    assert_response :success\n    get(v1_url('users', ''), **get_args)  # Add trailing slash.\n    assert_response :success\n    get(v1_url('users', 'current'), **get_args)\n    assert_response 403\n    get(v1_url('virtual_machines'), **get_args)\n    assert_response 403\n  end\n\n  test \"narrow + wide scoped tokens for different users\" do\n    get_args = {\n      params: {\n        reader_tokens: [api_client_authorizations(:anonymous).api_token]\n      },\n      headers: auth(:active_userlist),\n    }\n    get(v1_url('users'), **get_args)\n    assert_response :success\n    get(v1_url('users', ''), **get_args)  # Add trailing slash.\n    assert_response :success\n    get(v1_url('users', 'current'), **get_args)\n    assert_response 403\n    get(v1_url('virtual_machines'), **get_args)\n    assert_response 403\n   end\n\n  test \"collections token can see exactly owned collections\" do\n    get_args = {params: {}, headers: auth(:active_all_collections)}\n    get(v1_url('collections'), **get_args)\n    assert_response 403\n    get(v1_url('collections', collections(:collection_owned_by_active).uuid), **get_args)\n    assert_response :success\n    head(v1_url('collections', collections(:collection_owned_by_active).uuid), **get_args)\n    assert_response :success\n    get(v1_url('collections', collections(:collection_owned_by_foo).uuid), **get_args)\n    assert_includes(403..404, @response.status)\n  end\n\n  test \"token with multiple scopes can use them all\" do\n    def get_token_count\n      get(v1_url('api_client_authorizations'),\n        params: {},\n        headers: auth(:active_apitokens))\n      assert_response :success\n      token_count = JSON.parse(@response.body)['items_available']\n      assert_not_nil(token_count, \"could not find token count\")\n      token_count\n    end\n    # Test the GET scope.\n    token_count = get_token_count\n    # Test the POST scope.\n    post(v1_url('api_client_authorizations'),\n      params: {api_client_authorization: {owner_uuid: users(:active).uuid}},\n      headers: auth(:active_apitokens))\n    assert_response :success\n    assert_equal(token_count + 1, get_token_count,\n                 \"token count suggests POST was not accepted\")\n    # Test other requests are denied.\n    get(v1_url('api_client_authorizations',\n               api_client_authorizations(:active_apitokens).uuid),\n        params: {}, headers: auth(:active_apitokens))\n    assert_response 403\n  end\n\n  test \"token without scope has no access\" do\n    # Logs are good for this test, because logs have relatively\n    # few access controls enforced at the model level.\n    req_args = {params: {}, headers: auth(:admin_noscope)}\n    get(v1_url('logs'), **req_args)\n    assert_response 403\n    get(v1_url('logs', logs(:noop).uuid), **req_args)\n    assert_response 403\n    post(v1_url('logs'), **req_args)\n    assert_response 403\n  end\n\n  test \"VM login scopes work\" do\n    # A system administration script makes an API token with limited scope\n    # for virtual machines to let it see logins.\n    def vm_logins_url(name)\n      v1_url('virtual_machines', virtual_machines(name).uuid, 'logins')\n    end\n    get_args = {params: {}, headers: auth(:admin_vm)}\n    get(vm_logins_url(:testvm), **get_args)\n    assert_response :success\n    get(vm_logins_url(:testvm2), **get_args)\n    assert_includes(400..419, @response.status,\n                    \"getting testvm2 logins should have failed\")\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/bundler_version_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass BundlerVersionTest < ActionDispatch::IntegrationTest\n  test \"Bundler version matches expectations\" do\n    # The expected version range should be the latest that supports all the\n    # versions of Ruby we intend to support. This test checks that a developer\n    # doesn't accidentally update Bundler past that point.\n    expected = Gem::Dependency.new(\"\", \"~> 2.5.23\")\n    actual = Bundler.gem_version\n    assert(\n      expected.match?(\"\", actual),\n      \"Bundler version #{actual} did not match #{expected}\",\n    )\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/collections_api_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass CollectionsApiTest < ActionDispatch::IntegrationTest\n  fixtures :all\n\n  test \"should get index\" do\n    get \"/arvados/v1/collections\",\n      params: {:format => :json},\n      headers: auth(:active)\n    assert_response :success\n    assert_equal \"arvados#collectionList\", json_response['kind']\n  end\n\n  test \"get index with filters= (empty string)\" do\n    get \"/arvados/v1/collections\",\n      params: {:format => :json, :filters => ''},\n      headers: auth(:active)\n    assert_response :success\n    assert_equal \"arvados#collectionList\", json_response['kind']\n  end\n\n  test \"get index with invalid filters (array of strings) responds 422\" do\n    get \"/arvados/v1/collections\",\n      params: {\n        :format => :json,\n        :filters => ['uuid', '=', 'ad02e37b6a7f45bbe2ead3c29a109b8a+54'].to_json\n      },\n      headers: auth(:active)\n    assert_response 422\n    assert_match(/nvalid element.*not an array/, json_response['errors'].join(' '))\n  end\n\n  test \"get index with invalid filters (unsearchable column) responds 422\" do\n    get \"/arvados/v1/collections\",\n      params: {\n        :format => :json,\n        :filters => [['this_column_does_not_exist', '=', 'bogus']].to_json\n      },\n      headers: auth(:active)\n    assert_response 422\n    assert_match(/nvalid attribute/, json_response['errors'].join(' '))\n  end\n\n  test \"get index with invalid filters (invalid operator) responds 422\" do\n    get \"/arvados/v1/collections\",\n      params: {\n        :format => :json,\n        :filters => [['uuid', ':-(', 'displeased']].to_json\n      },\n      headers: auth(:active)\n    assert_response 422\n    assert_match(/nvalid operator/, json_response['errors'].join(' '))\n  end\n\n  test \"get index with invalid filters (invalid operand type) responds 422\" do\n    get \"/arvados/v1/collections\",\n      params: {\n        :format => :json,\n        :filters => [['uuid', '=', {foo: 'bar'}]].to_json\n      },\n      headers: auth(:active)\n    assert_response 422\n    assert_match(/nvalid operand type/, json_response['errors'].join(' '))\n  end\n\n  test \"get index with where= (empty string)\" do\n    get \"/arvados/v1/collections\",\n      params: {:format => :json, :where => ''},\n      headers: auth(:active)\n    assert_response :success\n    assert_equal \"arvados#collectionList\", json_response['kind']\n  end\n\n  test \"get index with select= (valid attribute)\" do\n    get \"/arvados/v1/collections\",\n      params: {\n        :format => :json,\n        :select => ['portable_data_hash'].to_json\n      },\n      headers: auth(:active)\n    assert_response :success\n    assert json_response['items'][0].keys.include?('portable_data_hash')\n    assert not(json_response['items'][0].keys.include?('uuid'))\n  end\n\n  test \"get index with select= (invalid attribute) responds 422\" do\n    get \"/arvados/v1/collections\",\n      params: {\n        :format => :json,\n        :select => ['bogus'].to_json\n      },\n      headers: auth(:active)\n    assert_response 422\n    assert_match(/Invalid attribute.*bogus/, json_response['errors'].join(' '))\n  end\n\n  test \"get index with select= (invalid attribute type) responds 422\" do\n    get \"/arvados/v1/collections\",\n      params: {\n        :format => :json,\n        :select => [['bogus']].to_json\n      },\n      headers: auth(:active)\n    assert_response 422\n    assert_match(/Invalid attribute.*bogus/, json_response['errors'].join(' '))\n  end\n\n  test \"controller 404 response is json\" do\n    get \"/arvados/v1/thingsthatdonotexist\",\n      params: {:format => :xml},\n      headers: auth(:active)\n    assert_response 404\n    assert_equal 1, json_response['errors'].length\n    assert_equal true, json_response['errors'][0].is_a?(String)\n  end\n\n  test \"object 404 response is json\" do\n    get \"/arvados/v1/groups/zzzzz-j7d0g-o5ba971173cup4f\",\n      params: {},\n      headers: auth(:active)\n    assert_response 404\n    assert_equal 1, json_response['errors'].length\n    assert_equal true, json_response['errors'][0].is_a?(String)\n  end\n\n  test \"store collection as json\" do\n    signing_opts = {\n      key: Rails.configuration.Collections.BlobSigningKey,\n      api_token: api_token(:active),\n    }\n    signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',\n                                       signing_opts)\n    post \"/arvados/v1/collections\",\n      params: {\n        format: :json,\n        collection: \"{\\\"manifest_text\\\":\\\". #{signed_locator} 0:44:md5sum.txt\\\\n\\\",\\\"portable_data_hash\\\":\\\"ad02e37b6a7f45bbe2ead3c29a109b8a+54\\\"}\"\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']\n  end\n\n  test \"store collection with manifest_text only\" do\n    signing_opts = {\n      key: Rails.configuration.Collections.BlobSigningKey,\n      api_token: api_token(:active),\n    }\n    signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',\n                                       signing_opts)\n    post \"/arvados/v1/collections\",\n      params: {\n        format: :json,\n        collection: \"{\\\"manifest_text\\\":\\\". #{signed_locator} 0:44:md5sum.txt\\\\n\\\"}\"\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']\n  end\n\n  test \"store collection then update name\" do\n    signing_opts = {\n      key: Rails.configuration.Collections.BlobSigningKey,\n      api_token: api_token(:active),\n    }\n    signed_locator = Blob.sign_locator('bad42fa702ae3ea7d888fef11b46f450+44',\n                                       signing_opts)\n    post \"/arvados/v1/collections\",\n      params: {\n        format: :json,\n        collection: \"{\\\"manifest_text\\\":\\\". #{signed_locator} 0:44:md5sum.txt\\\\n\\\",\\\"portable_data_hash\\\":\\\"ad02e37b6a7f45bbe2ead3c29a109b8a+54\\\"}\"\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']\n\n    put \"/arvados/v1/collections/#{json_response['uuid']}\",\n      params: {\n        format: :json,\n        collection: { name: \"a name\" }\n      },\n      headers: auth(:active)\n\n    assert_response 200\n    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']\n    assert_equal 'a name', json_response['name']\n\n    get \"/arvados/v1/collections/#{json_response['uuid']}\",\n      params: {format: :json},\n      headers: auth(:active)\n\n    assert_response 200\n    assert_equal 'ad02e37b6a7f45bbe2ead3c29a109b8a+54', json_response['portable_data_hash']\n    assert_equal 'a name', json_response['name']\n  end\n\n  test \"update description for a collection, and search for that description\" do\n    collection = collections(:multilevel_collection_1)\n\n    # update collection's description\n    put \"/arvados/v1/collections/#{collection['uuid']}\",\n      params: {\n        format: :json,\n        collection: { description: \"something specific\" }\n      },\n      headers: auth(:active)\n    assert_response :success\n    assert_equal 'something specific', json_response['description']\n\n    # get the collection and verify newly added description\n    get \"/arvados/v1/collections/#{collection['uuid']}\",\n      params: {format: :json},\n      headers: auth(:active)\n    assert_response 200\n    assert_equal 'something specific', json_response['description']\n\n    # search\n    search_using_filter 'specific', 1\n    search_using_filter 'not specific enough', 0\n  end\n\n  test \"create collection, update manifest, and search with filename\" do\n    # create collection\n    signed_manifest = Collection.sign_manifest_only_for_tests(\". bad42fa702ae3ea7d888fef11b46f450+44 0:44:my_test_file.txt\\n\", api_token(:active))\n    post \"/arvados/v1/collections\",\n      params: {\n        format: :json,\n        collection: {manifest_text: signed_manifest}.to_json,\n      },\n      headers: auth(:active)\n    assert_response :success\n    assert_equal true, json_response['manifest_text'].include?('my_test_file.txt')\n    assert_includes json_response['manifest_text'], 'my_test_file.txt'\n\n    created = json_response\n\n    # search using the filename\n    search_using_filter 'my_test_file.txt', 1\n\n    # update the collection's manifest text\n    signed_manifest = Collection.sign_manifest_only_for_tests(\". bad42fa702ae3ea7d888fef11b46f450+44 0:44:my_updated_test_file.txt\\n\", api_token(:active))\n    put \"/arvados/v1/collections/#{created['uuid']}\",\n      params: {\n        format: :json,\n        collection: {manifest_text: signed_manifest}.to_json,\n      },\n      headers: auth(:active)\n    assert_response :success\n    assert_equal created['uuid'], json_response['uuid']\n    assert_includes json_response['manifest_text'], 'my_updated_test_file.txt'\n    assert_not_includes json_response['manifest_text'], 'my_test_file.txt'\n\n    # search using the new filename\n    search_using_filter 'my_updated_test_file.txt', 1\n    search_using_filter 'my_test_file.txt', 0\n    search_using_filter 'there_is_no_such_file.txt', 0\n  end\n\n  def search_using_filter search_filter, expected_items\n    get '/arvados/v1/collections',\n      params: {:filters => [['any', 'ilike', \"%#{search_filter}%\"]].to_json},\n      headers: auth(:active)\n    assert_response :success\n    response_items = json_response['items']\n    assert_not_nil response_items\n    if expected_items == 0\n      assert_empty response_items\n    else\n      refute_empty response_items\n      first_item = response_items.first\n      assert_not_nil first_item\n    end\n  end\n\n  [\n    [\"false\", false],\n    [\"0\", false],\n    [\"true\", true],\n    [\"1\", true]\n  ].each do |param, truthiness|\n    test \"include_trash=#{param.inspect} param JSON-encoded should be interpreted as include_trash=#{truthiness}\" do\n      expired_col = collections(:expired_collection)\n      assert expired_col.is_trashed\n      # Try #index first\n      post \"/arvados/v1/collections\",\n          params: {\n            :_method => 'GET',\n            :include_trash => param,\n            :filters => [['uuid', '=', expired_col.uuid]].to_json\n          },\n          headers: auth(:active)\n      assert_response :success\n      assert_not_nil json_response['items']\n      assert_equal truthiness, json_response['items'].collect {|c| c['uuid']}.include?(expired_col.uuid)\n      # Try #show next\n      post \"/arvados/v1/collections/#{expired_col.uuid}\",\n        params: {\n          :_method => 'GET',\n          :include_trash => param,\n        },\n        headers: auth(:active)\n      if truthiness\n        assert_response :success\n      else\n        assert_response 404\n      end\n    end\n  end\n\n  [\n    [\"false\", false],\n    [\"0\", false],\n    [\"true\", true],\n    [\"1\", true]\n  ].each do |param, truthiness|\n    test \"include_trash=#{param.inspect} param encoding via query string should be interpreted as include_trash=#{truthiness}\" do\n      expired_col = collections(:expired_collection)\n      assert expired_col.is_trashed\n      # Try #index first\n      get(\"/arvados/v1/collections?include_trash=#{param}&filters=#{[['uuid','=',expired_col.uuid]].to_json}\",\n          headers: auth(:active))\n      assert_response :success\n      assert_not_nil json_response['items']\n      assert_equal truthiness, json_response['items'].collect {|c| c['uuid']}.include?(expired_col.uuid)\n      # Try #show next\n      get(\"/arvados/v1/collections/#{expired_col.uuid}?include_trash=#{param}\",\n        headers: auth(:active))\n      if truthiness\n        assert_response :success\n      else\n        assert_response 404\n      end\n    end\n  end\n\n  [\n    [\"false\", false],\n    [\"0\", false],\n    [\"true\", true],\n    [\"1\", true]\n  ].each do |param, truthiness|\n    test \"include_trash=#{param.inspect} form-encoded param should be interpreted as include_trash=#{truthiness}\" do\n      expired_col = collections(:expired_collection)\n      assert expired_col.is_trashed\n      params = [\n        ['_method', 'GET'],\n        ['include_trash', param],\n        ['filters', [['uuid','=',expired_col.uuid]].to_json],\n      ]\n      # Try #index first\n      post \"/arvados/v1/collections\",\n        params: URI.encode_www_form(params),\n        headers: {\n          \"Content-type\" => \"application/x-www-form-urlencoded\"\n        }.update(auth(:active))\n      assert_response :success\n      assert_not_nil json_response['items']\n      assert_equal truthiness, json_response['items'].collect {|c| c['uuid']}.include?(expired_col.uuid)\n      # Try #show next\n      post \"/arvados/v1/collections/#{expired_col.uuid}\",\n        params: URI.encode_www_form([['_method', 'GET'],['include_trash', param]]),\n        headers: {\n          \"Content-type\" => \"application/x-www-form-urlencoded\"\n        }.update(auth(:active))\n      if truthiness\n        assert_response :success\n      else\n        assert_response 404\n      end\n    end\n  end\n\n  test \"create and get collection with properties\" do\n    # create collection to be searched for\n    signed_manifest = Collection.sign_manifest_only_for_tests(\". bad42fa702ae3ea7d888fef11b46f450+44 0:44:my_test_file.txt\\n\", api_token(:active))\n    post \"/arvados/v1/collections\",\n      params: {\n        format: :json,\n        collection: {manifest_text: signed_manifest}.to_json,\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_not_nil json_response['uuid']\n    assert_not_nil json_response['properties']\n    assert_empty json_response['properties']\n\n    # update collection's properties\n    put \"/arvados/v1/collections/#{json_response['uuid']}\",\n      params: {\n        format: :json,\n        collection: { properties: {'property_1' => 'value_1'} }\n      },\n      headers: auth(:active)\n    assert_response :success\n    assert_equal Hash, json_response['properties'].class, 'Collection properties attribute should be of type hash'\n    assert_equal 'value_1', json_response['properties']['property_1']\n  end\n\n  test \"create collection and update it with json encoded hash properties\" do\n    # create collection to be searched for\n    signed_manifest = Collection.sign_manifest_only_for_tests(\". bad42fa702ae3ea7d888fef11b46f450+44 0:44:my_test_file.txt\\n\", api_token(:active))\n    post \"/arvados/v1/collections\",\n      params: {\n        format: :json,\n        collection: {manifest_text: signed_manifest}.to_json,\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_not_nil json_response['uuid']\n    assert_not_nil json_response['properties']\n    assert_empty json_response['properties']\n\n    # update collection's properties\n    put \"/arvados/v1/collections/#{json_response['uuid']}\",\n      params: {\n        format: :json,\n        collection: {\n          properties: \"{\\\"property_1\\\":\\\"value_1\\\"}\"\n        }\n      },\n      headers: auth(:active)\n    assert_response :success\n    assert_equal Hash, json_response['properties'].class, 'Collection properties attribute should be of type hash'\n    assert_equal 'value_1', json_response['properties']['property_1']\n  end\n\n  test \"update collection with versioning enabled and using preserve_version\" do\n    Rails.configuration.Collections.CollectionVersioning = true\n    Rails.configuration.Collections.PreserveVersionIfIdle = -1 # Disable auto versioning\n\n    signed_manifest = Collection.sign_manifest_only_for_tests(\". bad42fa702ae3ea7d888fef11b46f450+44 0:44:my_test_file.txt\\n\", api_token(:active))\n    post \"/arvados/v1/collections\",\n      params: {\n        format: :json,\n        collection: {\n          name: 'Test collection',\n          manifest_text: signed_manifest,\n        }.to_json,\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_not_nil json_response['uuid']\n    assert_equal 1, json_response['version']\n    assert_equal false, json_response['preserve_version']\n\n    # Versionable update including preserve_version=true should create a new\n    # version that will also be persisted.\n    put \"/arvados/v1/collections/#{json_response['uuid']}\",\n      params: {\n        format: :json,\n        collection: {\n          name: 'Test collection v2',\n          preserve_version: true,\n        }.to_json,\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_equal 2, json_response['version']\n    assert_equal true, json_response['preserve_version']\n\n    # 2nd versionable update including preserve_version=true should create a new\n    # version that will also be persisted.\n    put \"/arvados/v1/collections/#{json_response['uuid']}\",\n      params: {\n        format: :json,\n        collection: {\n          name: 'Test collection v3',\n          preserve_version: true,\n        }.to_json,\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_equal 3, json_response['version']\n    assert_equal true, json_response['preserve_version']\n\n    # 3rd versionable update without including preserve_version should create a new\n    # version that will have its preserve_version attr reset to false.\n    put \"/arvados/v1/collections/#{json_response['uuid']}\",\n      params: {\n        format: :json,\n        collection: {\n          name: 'Test collection v4',\n        }.to_json,\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_equal 4, json_response['version']\n    assert_equal false, json_response['preserve_version']\n\n    # 4th versionable update without including preserve_version=true should NOT\n    # create a new version.\n    put \"/arvados/v1/collections/#{json_response['uuid']}\",\n      params: {\n        format: :json,\n        collection: {\n          name: 'Test collection v5?',\n        }.to_json,\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_equal 4, json_response['version']\n    assert_equal false, json_response['preserve_version']\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/collections_performance_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'safe_json'\nrequire 'test_helper'\nrequire 'helpers/manifest_examples'\nrequire 'helpers/time_block'\n\nclass CollectionsApiPerformanceTest < ActionDispatch::IntegrationTest\n  include ManifestExamples\n\n  slow_test \"crud cycle for a collection with a big manifest\" do\n    bigmanifest = time_block 'make example' do\n      make_manifest(streams: 100,\n                    files_per_stream: 100,\n                    blocks_per_file: 20,\n                    bytes_per_block: 2**26,\n                    api_token: api_token(:active))\n    end\n    json = time_block \"JSON encode #{bigmanifest.length>>20}MiB manifest\" do\n      SafeJSON.dump({\"manifest_text\" => bigmanifest})\n    end\n    time_block 'create' do\n      post '/arvados/v1/collections',\n        params: {collection: json},\n        headers: auth(:active)\n      assert_response :success\n    end\n    uuid = json_response['uuid']\n    time_block 'read' do\n      get '/arvados/v1/collections/' + uuid, params: {}, headers: auth(:active)\n      assert_response :success\n    end\n    time_block 'list' do\n      get '/arvados/v1/collections',\n        params: {select: ['manifest_text'], filters: [['uuid', '=', uuid]].to_json},\n        headers: auth(:active)\n      assert_response :success\n    end\n    time_block 'update' do\n      put '/arvados/v1/collections/' + uuid,\n        params: {collection: json},\n        headers: auth(:active)\n      assert_response :success\n    end\n    time_block 'delete' do\n      delete '/arvados/v1/collections/' + uuid, params: {}, headers: auth(:active)\n    end\n  end\n\n  slow_test \"memory usage\" do\n    hugemanifest = make_manifest(streams: 1,\n                                 files_per_stream: 2000,\n                                 blocks_per_file: 200,\n                                 bytes_per_block: 2**26,\n                                 api_token: api_token(:active))\n    json = time_block \"JSON encode #{hugemanifest.length>>20}MiB manifest\" do\n      SafeJSON.dump({manifest_text: hugemanifest})\n    end\n    vmpeak \"post\" do\n      post '/arvados/v1/collections',\n        params: {collection: json},\n        headers: auth(:active)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/computed_permissions_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ComputedPermissionsTest < ActionDispatch::IntegrationTest\n  include DbCurrentTime\n  fixtures :users, :groups, :api_client_authorizations, :collections\n\n  test \"non-admin forbidden\" do\n    get \"/arvados/v1/computed_permissions\",\n      params: {:format => :json},\n      headers: auth(:active)\n    assert_response 403\n  end\n\n  test \"admin get permission for specified user\" do\n    get \"/arvados/v1/computed_permissions\",\n      params: {\n        :format => :json,\n        :filters => [['user_uuid', '=', users(:active).uuid]].to_json,\n      },\n      headers: auth(:admin)\n    assert_response :success\n    assert_equal users(:active).uuid, json_response['items'][0]['user_uuid']\n    assert_nil json_response['count']\n  end\n\n  test \"admin get implicit permission for specified user and target\" do\n    get \"/arvados/v1/computed_permissions\",\n      params: {\n        :format => :json,\n        :filters => [\n          ['user_uuid', '=', users(:active).uuid],\n          ['target_uuid', '=', groups(:private).uuid],\n        ].to_json,\n      },\n      headers: auth(:admin)\n    assert_response :success\n    assert_equal 1, json_response['items'].length\n    assert_equal users(:active).uuid, json_response['items'][0]['user_uuid']\n    assert_equal groups(:private).uuid, json_response['items'][0]['target_uuid']\n    assert_equal 'can_manage', json_response['items'][0]['perm_level']\n  end\n\n  test \"reject count=exact\" do\n    get \"/arvados/v1/computed_permissions\",\n      params: {\n        :format => :json,\n        :count => 'exact',\n      },\n      headers: auth(:admin)\n    assert_response 422\n  end\n\n  test \"reject offset>0\" do\n    get \"/arvados/v1/computed_permissions\",\n      params: {\n        :format => :json,\n        :offset => 7,\n      },\n      headers: auth(:admin)\n    assert_response 422\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/container_auth_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ContainerAuthTest < ActionDispatch::IntegrationTest\n  fixtures :all\n\n  test \"container token validate, Running, regular auth\" do\n    get \"/arvados/v1/containers/current\",\n      params: {:format => :json},\n      headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}\"}\n    # Container is Running, token can be used\n    assert_response :success\n    assert_equal containers(:running).uuid, json_response['uuid']\n  end\n\n  test \"container token validate, Locked, runtime_token\" do\n    get \"/arvados/v1/containers/current\",\n      params: {:format => :json},\n      headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:container_runtime_token).token}/#{containers(:runtime_token).uuid}\"}\n    # Container is Running, token can be used\n    assert_response :success\n    assert_equal containers(:runtime_token).uuid, json_response['uuid']\n  end\n\n  test \"container token validate, Cancelled, runtime_token\" do\n    put \"/arvados/v1/containers/#{containers(:runtime_token).uuid}\",\n      params: {\n        :format => :json,\n        :container => {:state => \"Cancelled\"}\n      },\n      headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:system_user).token}\"}\n    assert_response :success\n    get \"/arvados/v1/containers/current\",\n      params: {:format => :json},\n      headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:container_runtime_token).token}/#{containers(:runtime_token).uuid}\"}\n    # Container is Queued, token cannot be used\n    assert_response 401\n  end\n\n  test \"container token validate, Running, without optional portion\" do\n    get \"/arvados/v1/containers/current\",\n      params: {:format => :json},\n      headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:running_container_auth).token}\"}\n    # Container is Running, token can be used\n    assert_response :success\n    assert_equal containers(:running).uuid, json_response['uuid']\n  end\n\n  test \"container token validate, Locked, runtime_token, without optional portion\" do\n    get \"/arvados/v1/containers/current\",\n      params: {:format => :json},\n      headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:container_runtime_token).token}\"}\n    # runtime_token without container uuid won't return 'current'\n    assert_response 404\n  end\n\n  test \"container token validate, wrong container uuid\" do\n    get \"/arvados/v1/containers/current\",\n      params: {:format => :json},\n      headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:container_runtime_token).token}/#{containers(:running).uuid}\"}\n    # Container uuid mismatch, token can't be used\n    assert_response 401\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/container_dispatch_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ContainerDispatchTest < ActionDispatch::IntegrationTest\n  test \"lock container with SystemRootToken\" do\n    Rails.configuration.SystemRootToken = \"xyzzy-SystemRootToken\"\n    authheaders = {'HTTP_AUTHORIZATION' => \"Bearer \"+Rails.configuration.SystemRootToken}\n    get(\"/arvados/v1/api_client_authorizations/current\",\n        headers: authheaders)\n    assert_response 200\n\n    system_auth_uuid = json_response['uuid']\n    post(\"/arvados/v1/containers/#{containers(:queued).uuid}/lock\",\n         headers: authheaders)\n    assert_response 200\n    assert_equal system_auth_uuid, Container.find_by_uuid(containers(:queued).uuid).locked_by_uuid\n\n    get(\"/arvados/v1/containers\",\n        params: {filters: SafeJSON.dump([['locked_by_uuid', '=', system_auth_uuid]])},\n        headers: authheaders)\n    assert_response 200\n    assert_equal containers(:queued).uuid, json_response['items'][0]['uuid']\n    assert_equal system_auth_uuid, json_response['items'][0]['locked_by_uuid']\n\n    post(\"/arvados/v1/containers/#{containers(:queued).uuid}/unlock\",\n         headers: authheaders)\n    assert_response 200\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/container_request_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ContainerRequestIntegrationTest < ActionDispatch::IntegrationTest\n\n  test \"test colon in input\" do\n    # Tests for bug #15311 where strings with leading colons get\n    # corrupted when the leading \":\" is stripped.\n    val = {\"itemSeparator\" => \":\"}\n    post \"/arvados/v1/container_requests\",\n      params: {\n        :container_request => {\n          :name => \"workflow\",\n          :state => \"Committed\",\n          :command => [\"echo\"],\n          :container_image => \"fa3c1a9cb6783f85f2ecda037e07b8c3+167\",\n          :output_path => \"/foo\",\n          :priority => 1,\n          :runtime_constraints => {\"vcpus\" => 1, \"ram\" => 1},\n          :mounts => {\n            \"/foo\" => {\n              :kind => \"json\",\n              :content => JSON.parse(SafeJSON.dump(val)),\n            }\n          }\n        }\n      }.to_json,\n      headers: {\n        'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:active).api_token}\",\n        'CONTENT_TYPE' => 'application/json'\n      }\n    assert_response :success\n    assert_equal \"arvados#containerRequest\", json_response['kind']\n    assert_equal val, json_response['mounts']['/foo']['content']\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/credentials_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass CredentialsApiTest < ActionDispatch::IntegrationTest\n  fixtures :all\n\n  def credential_create_helper\n    post \"/arvados/v1/credentials\",\n         params: {:format => :json,\n                  credential: {\n                    name: \"test credential\",\n                    description: \"the credential for test\",\n                    credential_class: \"basic_auth\",\n                    external_id: \"my_username\",\n                    secret: \"my_password\",\n                    expires_at: Time.now+2.weeks\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response :success\n    json_response\n  end\n\n  def test_credential\n    {\n      name: \"test credential\" + rand(100000).to_s,\n      description: \"the credential for test\",\n      credential_class: \"basic_auth\",\n      external_id: \"my_username\",\n      secret: \"my_password\",\n      expires_at: Time.now+2.weeks\n    }\n  end\n\n  test \"credential create and query\" do\n    jr = credential_create_helper\n\n    # fields other than secret is are returned by the API\n    assert_equal \"test credential\", jr[\"name\"]\n    assert_equal \"the credential for test\", jr[\"description\"]\n    assert_equal \"basic_auth\", jr[\"credential_class\"]\n    assert_equal \"my_username\", jr[\"external_id\"]\n    assert_nil jr[\"secret\"]\n\n    # secret is not returned by the API\n    get \"/arvados/v1/credentials/#{jr['uuid']}\", headers: auth(:active)\n    assert_response :success\n    jr = json_response\n    assert_equal \"test credential\", jr[\"name\"]\n    assert_equal \"the credential for test\", jr[\"description\"]\n    assert_equal \"basic_auth\", jr[\"credential_class\"]\n    assert_equal \"my_username\", jr[\"external_id\"]\n    assert_nil jr[\"secret\"]\n\n    # can get credential from the database and it has the password\n    assert_equal \"my_password\", Credential.find_by_uuid(jr[\"uuid\"]).secret\n\n    # secret cannot appear in queries\n    get \"/arvados/v1/credentials\",\n        params: {:format => :json,\n                 :filters => [[\"secret\", \"=\", \"my_password\"]].to_json,\n                },\n        headers: auth(:active)\n    assert_response 403\n    assert_match(/Cannot filter on 'secret'/, json_response[\"errors\"][0])\n\n    get \"/arvados/v1/credentials\",\n        params: {:format => :json,\n                 :where => {secret: \"my_password\"}.to_json\n                },\n        headers: auth(:active)\n    assert_response 403\n    assert_match(/Cannot use 'secret' in where clause/, json_response[\"errors\"][0])\n\n    get \"/arvados/v1/credentials\",\n        params: {:format => :json,\n                 :order => [\"secret\"].to_json\n                },\n        headers: auth(:active)\n    assert_response 403\n    assert_match(/Cannot order by 'secret'/, json_response[\"errors\"][0])\n\n    get \"/arvados/v1/credentials\",\n        params: {:format => :json,\n                 :where => {any: \"my_password\"}.to_json\n                },\n        headers: auth(:active)\n    assert_response 200\n    assert_equal [], json_response[\"items\"]\n\n    get \"/arvados/v1/credentials\",\n        params: {:format => :json,\n                 :filters => [[\"any\", \"=\", \"my_password\"]].to_json\n                },\n        headers: auth(:active)\n    assert_response 200\n    assert_equal [], json_response[\"items\"]\n\n    get \"/arvados/v1/credentials\",\n        params: {:format => :json,\n                 :filters => [[\"any\", \"ilike\", \"my_pass%\"]].to_json\n                },\n        headers: auth(:active)\n    assert_response 200\n    assert_equal [], json_response[\"items\"]\n\n  end\n\n  test \"credential fetch by container\" do\n    jr = credential_create_helper\n\n    # cannot fetch secret using a regular token\n    get \"/arvados/v1/credentials/#{jr['uuid']}/secret\", headers: auth(:active)\n    assert_response 403\n\n    get \"/arvados/v1/credentials/#{jr['uuid']}/secret\",\n        headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}\"}\n    assert_response :success\n    assert_equal \"my_password\", json_response[\"secret\"]\n\n    lg = Log.where(object_uuid: jr['uuid'], event_type: \"secret_access\").first\n    assert_equal jr[\"name\"], lg[\"properties\"][\"name\"]\n    assert_equal jr[\"credential_class\"], lg[\"properties\"][\"credential_class\"]\n    assert_equal jr[\"external_id\"], lg[\"properties\"][\"external_id\"]\n  end\n\n  test \"credential owned by admin\" do\n    post \"/arvados/v1/credentials\",\n         params: {:format => :json,\n                  credential: {\n                    name: \"test credential\",\n                    description: \"the credential for test\",\n                    credential_class: \"basic_auth\",\n                    external_id: \"my_username\",\n                    secret: \"my_password\",\n                    expires_at: Time.now+2.weeks\n                  }\n                 },\n         headers: auth(:admin),\n         as: :json\n    assert_response :success\n    jr = json_response\n\n    # cannot fetch secret using a regular token, even by admin\n    get \"/arvados/v1/credentials/#{jr['uuid']}/secret\", headers: auth(:admin)\n    assert_response 403\n\n    # user 'active' can't see it\n    get \"/arvados/v1/credentials/#{jr['uuid']}\", headers: auth(:active)\n    assert_response 404\n\n    # not readable by container run by 'active' user returns a 404\n    # here like the previous check because the credential itself isn't\n    # considered visible to the user\n    get \"/arvados/v1/credentials/#{jr['uuid']}/secret\",\n        headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}\"}\n    assert_response 404\n  end\n\n  test \"credential sharing\" do\n    post \"/arvados/v1/credentials\",\n         params: {:format => :json,\n                  credential: {\n                    name: \"test credential\",\n                    description: \"the credential for test\",\n                    credential_class: \"basic_auth\",\n                    external_id: \"my_username\",\n                    secret: \"my_password\",\n                    expires_at: Time.now+2.weeks\n                  }\n                 },\n         headers: auth(:admin),\n         as: :json\n    assert_response :success\n    jr = json_response\n\n    # user 'active' can't see it\n    get \"/arvados/v1/credentials/#{jr['uuid']}\", headers: auth(:active)\n    assert_response 404\n\n    # not readable by container run by 'active' user returns a 404\n    # here like the previous check because the credential itself isn't\n    # considered visible to the user\n    get \"/arvados/v1/credentials/#{jr['uuid']}/secret\",\n        headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}\"}\n    assert_response 404\n\n    # active user can't share\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: users(:active).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: jr[\"uuid\"],\n          properties: {}\n        }\n      },\n      headers: auth(:active)\n    assert_response 422\n\n    # admin can share\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: users(:active).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: jr[\"uuid\"],\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n\n    # now the 'active' user can read it\n    get \"/arvados/v1/credentials/#{jr['uuid']}/secret\",\n        headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}\"}\n    assert_response :success\n  end\n\n  test \"credential expiration\" do\n    post \"/arvados/v1/credentials\",\n         params: {:format => :json,\n                  credential: {\n                    name: \"test credential\",\n                    description: \"the credential for test\",\n                    credential_class: \"basic_auth\",\n                    external_id: \"my_username\",\n                    secret: \"my_password\",\n                    expires_at: Time.now+5.seconds\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response :success\n    jr = json_response\n\n    get \"/arvados/v1/credentials/#{jr['uuid']}/secret\",\n        headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}\"}\n    assert_response :success\n    assert_equal \"my_username\", json_response[\"external_id\"]\n    assert_equal \"my_password\", json_response[\"secret\"]\n\n    assert_equal \"my_password\", Credential.find_by_uuid(jr[\"uuid\"]).secret\n\n    Credential.where(uuid: jr[\"uuid\"]).update_all(expires_at: Time.now)\n\n    get \"/arvados/v1/credentials/#{jr['uuid']}/secret\",\n        headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:running_container_auth).token}/#{containers(:running).uuid}\"}\n    assert_response 403\n    assert_match(/Credential has expired/, json_response[\"errors\"][0])\n\n    post \"/sys/trash_sweep\",\n      headers: auth(:admin)\n    assert_response :success\n\n    assert_equal \"\", Credential.find_by_uuid(jr[\"uuid\"]).secret\n  end\n\n  test \"credential names are unique\" do\n    post \"/arvados/v1/credentials\",\n         params: {:format => :json,\n                  credential: {\n                    name: \"test credential\",\n                    description: \"the credential for test\",\n                    credential_class: \"basic_auth\",\n                    external_id: \"my_username\",\n                    secret: \"my_password\",\n                    expires_at: Time.now+2.weeks\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response :success\n\n    post \"/arvados/v1/credentials\",\n         params: {:format => :json,\n                  credential: {\n                    name: \"test credential\",\n                    description: \"the credential for test\",\n                    credential_class: \"basic_auth\",\n                    external_id: \"my_username\",\n                    secret: \"my_password\",\n                    expires_at: Time.now+2.weeks\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response 422\n    assert_match(/RecordNotUnique/, json_response[\"errors\"][0])\n  end\n\n  test \"credential name cannot be empty or only spaces/tabs\" do\n    [\"\", \"   \", \"\\t\\t\"].each do |bad_name|\n      post \"/arvados/v1/credentials\",\n           params: {:format => :json,\n                    credential: {\n                      name: bad_name,\n                      description: \"the credential for test\",\n                      credential_class: \"basic_auth\",\n                      external_id: \"my_username\",\n                      secret: \"my_password\",\n                      expires_at: Time.now+2.weeks\n                    }\n                   },\n           headers: auth(:active),\n           as: :json\n      assert_response 422\n      assert_includes json_response[\"errors\"].first, \"Name can't be blank\"\n    end\n  end\n\n  test \"credential required fields must be set\" do\n    field_error_msg_hash = {\n      name: \"Name\",\n      credential_class: \"Credential class\",\n      external_id: \"External\",\n      secret: \"Secret\",\n      expires_at: \"Expires at\",\n    }\n\n    field_error_msg_hash.each do |field, error_msg|\n\n      post \"/arvados/v1/credentials\",\n        params: {\n          format: :json,\n          credential: test_credential.except(field)\n        },\n        headers: auth(:active),\n        as: :json\n\n      assert_response 422\n      assert_match(\n        /^#{error_msg} can't be blank \\(req-[^)]+\\)$/,\n        json_response[\"errors\"].first,\n        \"Expected validation error for missing field '#{field}'\"\n      )\n    end\n  end\n\n  test \"credential scopes must be an array of strings or nil\" do\n    [nil, [], [\"scope1\", \"scope2\"]].each do |good_scopes|\n      post \"/arvados/v1/credentials\",\n           params: {:format => :json,\n                    credential: test_credential.merge(scopes: good_scopes)\n                   },\n           headers: auth(:active),\n           as: :json\n      assert_response 200\n    end\n\n    [\"not an array\", [ \"valid_scope\", 123 ], [ \"valid_scope\", [\"nested_array\"] ] ].each do |bad_scopes|\n      post \"/arvados/v1/credentials\",\n           params: {:format => :json,\n                    credential: test_credential.merge(scopes: bad_scopes)\n                   },\n           headers: auth(:active),\n           as: :json\n      assert_response 422\n      assert_match(/Scopes must be an array/, json_response[\"errors\"][0])\n    end\n  end\n\n  [\n    {\n      body: {\n        name: \"valid scopes for arv:aws_access_key\",\n        credential_class: \"arv:aws_access_key\",\n        scopes: [\"s3://my-bucket\", \"s3://*\"],\n      },\n      error_msg: nil\n    },\n    {\n      body: {\n        name: \"nil scopes for arv:aws_access_key\",\n        credential_class: \"arv:aws_access_key\",\n        scopes: nil,\n      },\n      error_msg: /Scopes cannot be blank for credential class arv:aws_access_key/\n    },\n    {\n      body: {\n        name: \"empty scopes for arv:aws_access_key\",\n        credential_class: \"arv:aws_access_key\",\n        scopes: [],\n      },\n      error_msg: /Scopes cannot be blank for credential class arv:aws_access_key/\n    },\n    {\n      body: {\n        name: \"invalid scopes for arv:aws_access_key\",\n        credential_class: \"arv:aws_access_key\",\n        scopes: [\"invalid-scope\", \"s3://another-bucket\"],\n      },\n      error_msg: /Scopes not valid for credential class arv:aws_access_key: invalid-scope/\n    },\n    {\n      body: {\n        name: \"not implemented credential_class\",\n        credential_class: \"arv:not_implemented_credential_class\",\n        scopes: [\"totally-valid-scope-name\"],\n      },\n      error_msg: /Credential class arv:not_implemented_credential_class is not implemented/\n    },\n    {\n      body: {\n        name: \"conflicting credential_class without arv: prefix\",\n        credential_class: \"aws_access_key\", # without arv: prefix\n        scopes: [\"s3://my-bucket\"],\n      },\n      error_msg: /Credential class aws_access_key conflicts with reserved credential class arv:aws_access_key/\n    }\n  ].each do |tc|\n    test \"credential validation for case: #{tc[:body][:name]}\" do\n      post \"/arvados/v1/credentials\",\n           params: {:format => :json,\n                    credential: test_credential.merge(tc[:body])\n                   },\n           headers: auth(:active),\n           as: :json\n      if tc[:error_msg].nil?\n        assert_response :success\n      else\n        assert_response 422\n        assert_match(tc[:error_msg], json_response[\"errors\"][0])\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/cross_origin_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass CrossOriginTest < ActionDispatch::IntegrationTest\n  def options path, **kwargs\n    # Rails doesn't support OPTIONS the same way as GET, POST, etc.\n    reset! unless integration_session\n    integration_session.__send__(:process, :options, path, **kwargs).tap do\n      copy_session_variables!\n    end\n  end\n\n  %w(/login /logout /auth/example/callback /auth/joshid).each do |path|\n    test \"OPTIONS requests are refused at #{path}\" do\n      options path, params: {}, headers: {}\n      assert_no_cors_headers\n    end\n\n    test \"CORS headers do not exist at GET #{path}\" do\n      get path, params: {}, headers: {}\n      assert_no_cors_headers\n    end\n  end\n\n  %w(/discovery/v1/apis/arvados/v1/rest).each do |path|\n    test \"CORS headers are set at GET #{path}\" do\n      get path, params: {}, headers: {}\n      assert_response :success\n      assert_cors_headers\n    end\n  end\n\n  ['/arvados/v1/collections',\n   '/arvados/v1/users',\n   '/arvados/v1/api_client_authorizations'].each do |path|\n    test \"CORS headers are set and body is empty at OPTIONS #{path}\" do\n      options path, params: {}, headers: {}\n      assert_response :success\n      assert_cors_headers\n      assert_equal '', response.body\n    end\n\n    test \"CORS headers are set at authenticated GET #{path}\" do\n      get path, params: {}, headers: auth(:active_trustedclient)\n      assert_response :success\n      assert_cors_headers\n    end\n\n    # CORS headers are OK only if cookies are *not* used to determine\n    # whether a transaction is allowed. The following is a (far from\n    # perfect) test that the usual Rails cookie->session mechanism\n    # does not grant access to any resources.\n    ['GET', 'POST'].each do |method|\n      test \"Session does not work at #{method} #{path}\" do\n        send method.downcase, path, params: {format: 'json'}, headers: {user_id: 1}\n        assert_response 401\n        assert_cors_headers\n      end\n    end\n  end\n\n  protected\n  def assert_cors_headers\n    assert_equal '*', response.headers['Access-Control-Allow-Origin']\n    allowed = response.headers['Access-Control-Allow-Methods'].split(', ')\n    %w(GET HEAD POST PUT DELETE).each do |m|\n      assert_includes allowed, m, \"A-C-A-Methods should include #{m}\"\n    end\n    assert_equal 'Authorization, Content-Type', response.headers['Access-Control-Allow-Headers']\n  end\n\n  def assert_no_cors_headers\n    response.headers.keys.each do |h|\n      assert_no_match(/^Access-Control-/i, h)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/database_reset_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass DatabaseResetTest < ActionDispatch::IntegrationTest\n  slow_test \"reset fails when Rails.env != 'test'\" do\n    rails_env_was = Rails.env\n    begin\n      Rails.env = 'production'\n      Rails.application.reload_routes!\n      post '/database/reset', params: {}, headers: auth(:admin)\n      assert_response 404\n    ensure\n      Rails.env = rails_env_was\n      Rails.application.reload_routes!\n    end\n  end\n\n  test \"reset fails with non-admin token\" do\n    post '/database/reset', params: {}, headers: auth(:active)\n    assert_response 403\n  end\n\n  slow_test \"database reset doesn't break basic CRUD operations\" do\n    active_auth = auth(:active)\n    admin_auth = auth(:admin)\n\n    authorize_with :admin\n    post '/database/reset', params: {}, headers: admin_auth\n    assert_response :success\n\n    post '/arvados/v1/collections', params: {collection: '{}'}, headers: active_auth\n    assert_response :success\n    new_uuid = json_response['uuid']\n\n    get '/arvados/v1/collections/'+new_uuid, params: {}, headers: active_auth\n    assert_response :success\n\n    put('/arvados/v1/collections/'+new_uuid,\n      params: {collection: '{\"properties\":{}}'},\n      headers: active_auth)\n    assert_response :success\n\n    delete '/arvados/v1/collections/'+new_uuid, params: {}, headers: active_auth\n    assert_response :success\n\n    get '/arvados/v1/collections/'+new_uuid, params: {}, headers: active_auth\n    assert_response 404\n  end\n\n  slow_test \"roll back database change\" do\n    active_auth = auth(:active)\n    admin_auth = auth(:admin)\n\n    old_uuid = collections(:collection_owned_by_active).uuid\n    authorize_with :admin\n    post '/database/reset', params: {}, headers: admin_auth\n    assert_response :success\n\n    delete '/arvados/v1/collections/' + old_uuid, params: {}, headers: active_auth\n    assert_response :success\n    post '/arvados/v1/collections', params: {collection: '{}'}, headers: active_auth\n    assert_response :success\n    new_uuid = json_response['uuid']\n\n    # Reset to fixtures.\n    post '/database/reset', params: {}, headers: admin_auth\n    assert_response :success\n\n    # New collection should disappear. Old collection should reappear.\n    get '/arvados/v1/collections/'+new_uuid, params: {}, headers: active_auth\n    assert_response 404\n    get '/arvados/v1/collections/'+old_uuid, params: {}, headers: active_auth\n    assert_response :success\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/discovery_document_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass DiscoveryDocumentTest < ActionDispatch::IntegrationTest\n  CANONICAL_FIELDS = [\n    \"auth\",\n    \"basePath\",\n    \"batchPath\",\n    \"description\",\n    \"discoveryVersion\",\n    \"documentationLink\",\n    \"id\",\n    \"kind\",\n    \"name\",\n    \"parameters\",\n    \"protocol\",\n    \"resources\",\n    \"revision\",\n    \"schemas\",\n    \"servicePath\",\n    \"title\",\n    \"version\",\n  ]\n\n  test \"canonical discovery document is saved to checkout\" do\n    get \"/discovery/v1/apis/arvados/v1/rest\"\n    assert_response :success\n    canonical = Hash[CANONICAL_FIELDS.map { |key| [key, json_response[key]] }]\n    missing = canonical.select { |key| canonical[key].nil? }\n    assert(missing.empty?, \"discovery document missing required fields\")\n    actual_json = JSON.pretty_generate(canonical)\n    # Check committed copies of the discovery document that support code or\n    # documentation generation for other Arvados components.\n    bad_copies = [\n      \"sdk/python/arvados-v1-discovery.json\",\n      \"contrib/R-sdk/arvados-v1-discovery.json\",\n    ].filter_map do |rel_path|\n      src_path = Rails.root.join(\"..\", \"..\", rel_path)\n      begin\n        expected_json = File.open(src_path) { |f| f.read }\n      rescue Errno::ENOENT\n        expected_json = \"(#{src_path} not found)\"\n      end\n      if expected_json == actual_json\n        nil\n      else\n        src_path\n      end\n    end.to_a\n    if bad_copies.any?\n      out_path = Rails.root.join(\"tmp\", \"test-arvados-v1-discovery.json\")\n      File.open(out_path, \"w\") { |f| f.write(actual_json) }\n    end\n    assert_equal([], bad_copies,\n                 \"Live discovery document did not match the copies at:\\n\" +\n                 bad_copies.map { |path| \" * #{path}\\n\" }.join(\"\") +\n                 \"If the live version is correct, copy it to these paths by running:\\n\" +\n                 bad_copies.map { |path| \"   cp #{out_path} #{path}\\n\"}.join(\"\"))\n  end\n\n  test \"all methods have full descriptions\" do\n    get \"/discovery/v1/apis/arvados/v1/rest\"\n    assert_response :success\n    missing = []\n    def missing.check(name, key, spec)\n      self << \"#{name} #{key}\" if spec[key].blank?\n    end\n\n    Enumerator::Chain.new(\n      *json_response[\"resources\"].map { |_, res| res[\"methods\"].each_value }\n    ).each do |method|\n      method_name = method[\"id\"]\n      missing.check(method_name, \"description\", method)\n      method[\"parameters\"].andand.each_pair do |param_name, param|\n        missing.check(\"#{method_name} #{param_name} parameter\", \"description\", param)\n      end\n    end\n\n    json_response[\"schemas\"].each_pair do |schema_name, schema|\n      missing.check(schema_name, \"description\", schema)\n      schema[\"properties\"].andand.each_pair do |prop_name, prop|\n        missing.check(\"#{schema_name} #{prop_name} property\", \"description\", prop)\n      end\n    end\n\n    assert_equal(\n      missing, [],\n      \"named methods and schemas are missing documentation\",\n    )\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/errors_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ErrorsTest < ActionDispatch::IntegrationTest\n  fixtures :api_client_authorizations\n\n  %w(/arvados/v1/shoes /arvados/shoes /shoes /nodes /users).each do |path|\n    test \"non-existent route #{path}\" do\n      get path, params: {:format => :json}, headers: auth(:active)\n      assert_nil assigns(:objects)\n      assert_nil assigns(:object)\n      assert_not_nil json_response['errors']\n      assert_response 404\n      assert_match /^req-[0-9a-zA-Z]{20}$/, response.headers['X-Request-Id']\n    end\n  end\n\n  n=0\n  Rails.application.routes.routes.each do |route|\n    test \"route #{n += 1} '#{route.path.spec.to_s}' is not an accident\" do\n      # Generally, new routes should appear under /arvados/v1/. If\n      # they appear elsewhere, that might have been caused by default\n      # rails generator behavior that we don't want.\n      assert_match(/^\\/(|\\*a|arvados\\/v1\\/.*|auth\\/.*|login|logout|database\\/reset|discovery\\/.*|static\\/.*|sys\\/trash_sweep|themes\\/.*|assets|_health\\/.*|metrics)(\\(\\.:format\\))?$/,\n                   route.path.spec.to_s,\n                   \"Unexpected new route: #{route.path.spec}\")\n    end\n  end\n\n  test \"X-Request-Id header\" do\n    get \"/\", headers: auth(:spectator)\n    assert_match /^req-[0-9a-zA-Z]{20}$/, response.headers['X-Request-Id']\n  end\n\n  test \"X-Request-Id header on non-existant object URL\" do\n    get \"/arvados/v1/container_requests/invalid\",\n      params: {:format => :json}, headers: auth(:active)\n    assert_response 404\n    assert_match /^req-[0-9a-zA-Z]{20}$/, response.headers['X-Request-Id']\n  end\n\n  # The response header is the one that gets logged, so this test also\n  # ensures we log the ID supplied in the request, if any.\n  test \"X-Request-Id given by client\" do\n    get \"/\", headers: auth(:spectator).merge({'X-Request-Id': 'abcdefG'})\n    assert_equal 'abcdefG', response.headers['X-Request-Id']\n  end\n\n  test \"X-Request-Id given by client is ignored if too long\" do\n    authorize_with :spectator\n    long_reqId = 'abcdefG' * 1000\n    get \"/\", headers: auth(:spectator).merge({'X-Request-Id': long_reqId})\n    assert_match /^req-[0-9a-zA-Z]{20}$/, response.headers['X-Request-Id']\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/groups_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass GroupsTest < ActionDispatch::IntegrationTest\n  [[], ['replication_confirmed']].each do |orders|\n    test \"results are consistent when provided orders #{orders} is incomplete\" do\n      last = nil\n      (0..20).each do\n        get '/arvados/v1/groups/contents',\n          params: {\n            id: groups(:aproject).uuid,\n            filters: [[\"uuid\", \"is_a\", \"arvados#collection\"]].to_json,\n            orders: orders.to_json,\n            format: :json,\n          },\n          headers: auth(:active)\n        assert_response :success\n        if last.nil?\n          last = json_response['items']\n        else\n          assert_equal last, json_response['items']\n        end\n      end\n    end\n  end\n\n  test \"get all pages of group-owned objects\" do\n    limit = 5\n    offset = 0\n    items_available = nil\n    uuid_received = {}\n    owner_received = {}\n    while true\n      get \"/arvados/v1/groups/contents\",\n        params: {\n          id: groups(:aproject).uuid,\n          limit: limit,\n          offset: offset,\n          format: :json,\n        },\n        headers: auth(:active)\n\n      assert_response :success\n      assert_operator(0, :<, json_response['items'].count,\n                      \"items_available=#{items_available} but received 0 \"\\\n                      \"items with offset=#{offset}\")\n      items_available ||= json_response['items_available']\n      assert_equal(items_available, json_response['items_available'],\n                   \"items_available changed between page #{offset/limit} \"\\\n                   \"and page #{1+offset/limit}\")\n      json_response['items'].each do |item|\n        uuid = item['uuid']\n        assert_equal(nil, uuid_received[uuid],\n                     \"Received '#{uuid}' again on page #{1+offset/limit}\")\n        uuid_received[uuid] = true\n        owner_received[item['owner_uuid']] = true\n        offset += 1\n        assert_equal groups(:aproject).uuid, item['owner_uuid']\n      end\n      break if offset >= items_available\n    end\n  end\n\n  test \"group contents with include trash collections\" do\n    get \"/arvados/v1/groups/contents\",\n      params: {\n        include_trash: \"true\",\n        filters: [[\"uuid\", \"is_a\", \"arvados#collection\"]].to_json,\n        limit: 1000\n      },\n      headers: auth(:active)\n    assert_response 200\n\n    coll_uuids = []\n    json_response['items'].each { |c| coll_uuids << c['uuid'] }\n    assert_includes coll_uuids, collections(:foo_collection_in_aproject).uuid\n    assert_includes coll_uuids, collections(:expired_collection).uuid\n  end\n\n  test \"group contents without trash collections\" do\n    get \"/arvados/v1/groups/contents\",\n      params: {\n        filters: [[\"uuid\", \"is_a\", \"arvados#collection\"]].to_json,\n        limit: 1000\n      },\n      headers: auth(:active)\n    assert_response 200\n\n    coll_uuids = []\n    json_response['items'].each { |c| coll_uuids << c['uuid'] }\n    assert_includes coll_uuids, collections(:foo_collection_in_aproject).uuid\n    assert_not_includes coll_uuids, collections(:expired_collection).uuid\n  end\n\n  test \"unsharing a project results in hiding it from previously shared user\" do\n    # remove sharing link for project\n    delete \"/arvados/v1/links/#{links(:share_starred_project_with_project_viewer).uuid}\", headers: auth(:admin)\n    assert_response 200\n\n    # verify that the user can no longer see the project\n    get \"/arvados/v1/groups\",\n      params: {\n        filters: [['group_class', '=', 'project']].to_json,\n        limit: 1000\n      }, headers: auth(:project_viewer)\n    assert_response 200\n    found_projects = {}\n    json_response['items'].each do |g|\n      found_projects[g['uuid']] = g\n    end\n    assert_equal false, found_projects.include?(groups(:starred_and_shared_active_user_project).uuid)\n\n    # share the project\n    post \"/arvados/v1/links\", params: {\n      link: {\n        link_class: \"permission\",\n        name: \"can_read\",\n        head_uuid: groups(:starred_and_shared_active_user_project).uuid,\n        tail_uuid: users(:project_viewer).uuid,\n      }\n    }, headers: auth(:system_user)\n    assert_response 200\n    assert_equal 'permission', json_response['link_class']\n\n    # verify that project_viewer user can now see shared project again\n    get \"/arvados/v1/groups\", params: {\n      filters: [['group_class', '=', 'project']].to_json,\n      limit: 1000\n    }, headers: auth(:project_viewer)\n    assert_response 200\n    found_projects = {}\n    json_response['items'].each do |g|\n      found_projects[g['uuid']] = g\n    end\n    assert_equal true, found_projects.include?(groups(:starred_and_shared_active_user_project).uuid)\n  end\n\n  test 'count none works with offset' do\n    first_results = nil\n    (0..5).each do |offset|\n      get \"/arvados/v1/groups/contents\", params: {\n        id: groups(:aproject).uuid,\n        offset: offset,\n        format: :json,\n        order: :uuid,\n        count: :none,\n      }, headers: auth(:active)\n      assert_response :success\n      assert_nil json_response['items_available']\n      if first_results.nil?\n        first_results = json_response['items']\n        # should get back at least two different kinds of objects, to\n        # test offset paging properly.\n        kinds = first_results.map { |i| i['kind'] }\n        assert_equal 2, kinds.uniq.length\n      else\n        assert_equal first_results[offset]['uuid'], json_response['items'][0]['uuid']\n      end\n    end\n  end\n\n  test \"group contents with include=array\" do\n    get \"/arvados/v1/groups/contents\",\n      params: {\n        filters: [[\"uuid\", \"is_a\", \"arvados#container_request\"]].to_json,\n        include: [\"container_uuid\"].to_json,\n        select: [\"uuid\", \"state\"],\n        limit: 1000,\n      },\n      headers: auth(:active)\n    assert_response 200\n    incl = {}\n    json_response['included'].each { |i| incl[i['uuid']] = i }\n    json_response['items'].each do |c|\n      assert_not_nil incl[c['container_uuid']]['state']\n    end\n  end\nend\n\nclass NonTransactionalGroupsTest < ActionDispatch::IntegrationTest\n  # Transactional tests are disabled to be able to test the concurrent\n  # asynchronous permissions update feature.\n  # This is needed because nested transactions share the connection pool, so\n  # one thread is locked while trying to talk to the database, until the other\n  # one finishes.\n  self.use_transactional_tests = false\n\n  teardown do\n    # Explicitly reset the database after each test.\n    post '/database/reset', params: {}, headers: auth(:admin)\n    assert_response :success\n  end\n\n  test \"create request with async=true does not defer permissions update\" do\n    Rails.configuration.API.AsyncPermissionsUpdateInterval = 1 # second\n    name = \"Random group #{rand(1000)}\"\n    assert_equal nil, Group.find_by_name(name)\n\n    # Following the implementation of incremental permission updates\n    # (#16007) the async flag is now a no-op.  Permission changes are\n    # visible immediately.\n\n    # Trigger the asynchronous permission update by using async=true parameter.\n    post \"/arvados/v1/groups\",\n      params: {\n        group: {\n          name: name,\n          group_class: \"project\"\n        },\n        async: true\n      },\n      headers: auth(:active)\n    assert_response 202\n\n    # The group exists in the database\n    assert_not_nil Group.find_by_name(name)\n    get \"/arvados/v1/groups\",\n      params: {\n        filters: [[\"name\", \"=\", name]].to_json,\n        limit: 10\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_equal 1, json_response['items_available']\n\n    # Wait a bit and try again.\n    sleep(1)\n    get \"/arvados/v1/groups\",\n      params: {\n        filters: [[\"name\", \"=\", name]].to_json,\n        limit: 10\n      },\n      headers: auth(:active)\n    assert_response 200\n    assert_equal 1, json_response['items_available']\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/http_quirks_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass HttpQuirksTest < ActionDispatch::IntegrationTest\n  fixtures :all\n\n  test \"GET request with empty Content-Type header\" do\n    authorize_with :active\n    get \"/arvados/v1/collections\",\n        headers: auth(:active).merge(\"Content-Type\" => \"\")\n    assert_response :success\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/keep_proxy_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass KeepProxyTest < ActionDispatch::IntegrationTest\n  test \"request keep disks\" do\n    get \"/arvados/v1/keep_services/accessible\",\n      params: {:format => :json},\n      headers: auth(:active)\n    assert_response :success\n    services = json_response['items']\n\n    assert_operator 2, :<=, services.length\n    services.each do |service|\n      assert_equal 'disk', service['service_type']\n    end\n  end\n\n  test \"request keep proxy\" do\n    get \"/arvados/v1/keep_services/accessible\",\n      params: {:format => :json},\n      headers: auth(:active).merge({'HTTP_X_EXTERNAL_CLIENT' => '1'})\n    assert_response :success\n    services = json_response['items']\n\n    assert_equal 1, services.length\n\n    assert_equal keep_services(:proxy).uuid, services[0]['uuid']\n    assert_equal keep_services(:proxy).service_host, services[0]['service_host']\n    assert_equal keep_services(:proxy).service_port, services[0]['service_port']\n    assert_equal keep_services(:proxy).service_ssl_flag, services[0]['service_ssl_flag']\n    assert_equal 'proxy', services[0]['service_type']\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/logging_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'stringio'\nrequire 'test_helper'\n\nclass LoggingTest < ActionDispatch::IntegrationTest\n  fixtures :collections\n\n  test \"request_id\" do\n    buf = StringIO.new\n    logcopy = ActiveSupport::Logger.new(buf)\n    logcopy.level = :info\n    begin\n      Rails.logger.broadcast_to(logcopy)\n      get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n          params: {:format => :json},\n          headers: auth(:active).merge({ 'X-Request-Id' => 'req-aaaaaaaaaaaaaaaaaaaa' })\n      assert_response :success\n      assert_match /^{.*\"request_id\":\"req-aaaaaaaaaaaaaaaaaaaa\"/, buf.string\n    ensure\n      Rails.logger.broadcasts.delete(logcopy)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/login_workflow_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass LoginWorkflowTest < ActionDispatch::IntegrationTest\n  test \"default prompt to login is JSON\" do\n    post('/arvados/v1/collections',\n      params: {collection: {}},\n      headers: {'HTTP_ACCEPT' => ''})\n    assert_response 401\n    json_response['errors'].each do |err|\n      assert(err.include?(\"Not logged in\"), \"error message '#{err}' expected to include 'Not logged in'\")\n    end\n  end\n\n  test \"login prompt respects JSON Accept header\" do\n    post('/arvados/v1/collections',\n      params: {collection: {}},\n      headers: {'HTTP_ACCEPT' => 'application/json'})\n    assert_response 401\n    json_response['errors'].each do |err|\n      assert(err.include?(\"Not logged in\"), \"error message '#{err}' expected to include 'Not logged in'\")\n    end\n  end\n\n  test \"login prompt respects HTML Accept header\" do\n    post('/arvados/v1/collections',\n      params: {collection: {}},\n      headers: {'HTTP_ACCEPT' => 'text/html'})\n    assert_response 302\n    assert_match(%r{http://www.example.com/login$}, @response.headers['Location'],\n                 \"HTML login prompt did not include expected redirect\")\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/noop_deep_munge_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass NoopDeepMungeTest < ActionDispatch::IntegrationTest\n  test \"empty array\" do\n    check({\"foo\" => []})\n  end\n\n  test \"null in array\" do\n    check({\"foo\" => [\"foo\", nil]})\n  end\n\n  test \"array of nulls\" do\n    check({\"foo\" => [nil, nil, nil]})\n  end\n\n  protected\n\n  def check(val)\n    post \"/arvados/v1/container_requests\",\n      params: {\n        :container_request => {\n          :name => \"workflow\",\n          :state => \"Uncommitted\",\n          :command => [\"echo\"],\n          :container_image => \"arvados/jobs\",\n          :output_path => \"/foo\",\n          :mounts => {\n            \"/foo\" => {\n              :kind => \"json\",\n              :content => JSON.parse(SafeJSON.dump(val)),\n            }\n          }\n        }\n      }.to_json,\n      headers: {\n        'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:admin).api_token}\",\n        'CONTENT_TYPE' => 'application/json'\n      }\n    assert_response :success\n    assert_equal \"arvados#containerRequest\", json_response['kind']\n    assert_equal val, json_response['mounts']['/foo']['content']\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/passenger_config_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass PassengerConfigTest < ActionDispatch::IntegrationTest\n  def setup\n    super\n    @passenger_config ||= File.open(Rails.root.join(\"Passengerfile.json\")) do |f|\n      JSON.parse(f)\n    end\n  end\n\n  test \"Passenger disables exception extension gems\" do\n    # For security, consistency, and performance reasons, we do not want these\n    # gems to extend exception messages included in API error responses.\n    begin\n      rubyopt = @passenger_config[\"envvars\"][\"RUBYOPT\"].split\n    rescue NoMethodError, TypeError\n      rubyopt = [\"<RUBYOPT not configured>\"]\n    end\n    assert_includes(rubyopt, \"--disable-did_you_mean\")\n    assert_includes(rubyopt, \"--disable-error_highlight\")\n    assert_includes(rubyopt, \"--disable-syntax_suggest\")\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/permissions_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass PermissionsTest < ActionDispatch::IntegrationTest\n  include DbCurrentTime\n  fixtures :users, :groups, :api_client_authorizations, :collections\n\n  test \"adding and removing direct can_read links\" do\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n    # try to add permission as spectator\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: users(:spectator).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: collections(:foo_file).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:spectator)\n    assert_response 422\n\n    # add permission as admin\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: users(:spectator).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: collections(:foo_file).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    u = json_response['uuid']\n    assert_response :success\n\n    # read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response :success\n\n    # try to delete permission as spectator\n    delete \"/arvados/v1/links/#{u}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 403\n\n    # delete permission as admin\n    delete \"/arvados/v1/links/#{u}\",\n      params: {:format => :json},\n      headers: auth(:admin)\n    assert_response :success\n\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n  end\n\n\n  test \"adding can_read links from user to group, group to collection\" do\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n    # add permission for spectator to read group\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: users(:spectator).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: groups(:private_role).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n    # add permission for group to read collection\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: groups(:private_role).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: collections(:foo_file).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    u = json_response['uuid']\n    assert_response :success\n\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response :success\n\n    # delete permission for group to read collection\n    delete \"/arvados/v1/links/#{u}\",\n      params: {:format => :json},\n      headers: auth(:admin)\n    assert_response :success\n\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n  end\n\n\n  test \"adding can_read links from group to collection, user to group\" do\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n    # add permission for group to read collection\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: groups(:private_role).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: collections(:foo_file).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n    # add permission for spectator to read group\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: users(:spectator).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: groups(:private_role).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    u = json_response['uuid']\n    assert_response :success\n\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response :success\n\n    # delete permission for spectator to read group\n    delete \"/arvados/v1/links/#{u}\",\n      params: {:format => :json},\n      headers: auth(:admin)\n    assert_response :success\n\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n  end\n\n  test \"adding can_read links from user to group, group to group, group to collection\" do\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n    # add permission for user to read group\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: users(:spectator).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: groups(:private_role).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n\n    # add permission for group to read group\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: groups(:private_role).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: groups(:empty_lonely_group).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n\n    # add permission for group to read collection\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: groups(:empty_lonely_group).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: collections(:foo_file).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    u = json_response['uuid']\n    assert_response :success\n\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response :success\n\n    # delete permission for group to read collection\n    delete \"/arvados/v1/links/#{u}\",\n      params: {:format => :json},\n      headers: auth(:admin)\n    assert_response :success\n\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n  end\n\n  test \"adding can_read links from group to collection, user to group, then trash group\" do\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n    # add permission for group to read collection\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: groups(:private_role).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: collections(:foo_file).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n    # add permission for spectator to read group\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: users(:spectator).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: groups(:private_role).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    u = json_response['uuid']\n    assert_response :success\n\n    # try to read collection as spectator\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response :success\n\n    # put the group in the trash, this should keep the group members\n    # but delete the permissions.\n    post \"/arvados/v1/groups/#{groups(:private_role).uuid}/trash\",\n      params: {:format => :json},\n      headers: auth(:admin)\n    assert_response :success\n\n    # try to read collection as spectator, should fail now\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n    # should not be able to grant permission to a trashed group\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: groups(:private_role).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: collections(:foo_file).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response 422\n\n    # can't take group out of the trash\n    post \"/arvados/v1/groups/#{groups(:private_role).uuid}/untrash\",\n      params: {:format => :json},\n      headers: auth(:admin)\n    assert_response 422\n\n    # when a role group is untrashed the permissions don't\n    # automatically come back\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n\n    # can't add permission for group to read collection either\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: groups(:private_role).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: collections(:foo_file).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response 422\n\n    # still can't read foo file\n    get \"/arvados/v1/collections/#{collections(:foo_file).uuid}\",\n      params: {:format => :json},\n      headers: auth(:spectator)\n    assert_response 404\n  end\n\n  test \"read-only group-admin cannot modify administered user\" do\n    put \"/arvados/v1/users/#{users(:active).uuid}\",\n      params: {\n        :user => {\n          first_name: 'KilroyWasHere'\n        },\n        :format => :json\n      },\n      headers: auth(:rominiadmin)\n    assert_response 403\n  end\n\n  test \"read-only group-admin cannot read or update non-administered user\" do\n    get \"/arvados/v1/users/#{users(:spectator).uuid}\",\n      params: {:format => :json},\n      headers: auth(:rominiadmin)\n    assert_response 404\n\n    put \"/arvados/v1/users/#{users(:spectator).uuid}\",\n      params: {\n        :user => {\n          first_name: 'KilroyWasHere'\n        },\n        :format => :json\n      },\n      headers: auth(:rominiadmin)\n    assert_response 404\n  end\n\n  test \"RO group-admin finds user's collections, RW group-admin can update\" do\n    other_user_collection = act_as_user(users(:user_foo_in_sharing_group)) do\n      Collection.create()\n    end\n    [[:rominiadmin, false],\n     [:miniadmin, true]].each do |which_user, update_should_succeed|\n      get \"/arvados/v1/collections\",\n        params: {:format => :json},\n        headers: auth(which_user)\n      assert_response :success\n      resp_uuids = json_response['items'].collect { |i| i['uuid'] }\n      [[true, collections(:collection_owned_by_active).uuid],\n       [true, collections(:foo_collection_in_aproject).uuid],\n       [false, other_user_collection.uuid],\n      ].each do |should_find, uuid|\n        assert_equal(should_find, !resp_uuids.index(uuid).nil?,\n                     \"%s should%s see %s in collection list\" %\n                     [which_user.to_s,\n                      should_find ? '' : ' not',\n                      uuid])\n        put \"/arvados/v1/collections/#{uuid}\",\n          params: {\n            :collection => {\n              properties: {\n                miniadmin_was_here: true\n              }\n            },\n            :format => :json\n          },\n          headers: auth(which_user)\n        if !should_find\n          assert_response 404\n        elsif !update_should_succeed\n          assert_response 403\n        else\n          assert_response :success\n        end\n      end\n    end\n  end\n\n  test \"get_permissions returns list\" do\n    # First confirm that user :active cannot get permissions on group :public\n    get \"/arvados/v1/permissions/#{groups(:public).uuid}\",\n      params: nil,\n      headers: auth(:active)\n    assert_response 404\n\n    get \"/arvados/v1/links\",\n        params: {\n          :filters => [[\"link_class\", \"=\", \"permission\"], [\"head_uuid\", \"=\", groups(:public).uuid]].to_json\n        },\n      headers: auth(:active)\n    assert_response :success\n    assert_equal [], json_response['items']\n\n    ### add some permissions, including can_manage\n    ### permission for user :active\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: users(:spectator).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: groups(:public).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n    can_read_uuid = json_response['uuid']\n\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: users(:inactive).uuid,\n          link_class: 'permission',\n          name: 'can_write',\n          head_uuid: groups(:public).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n    can_write_uuid = json_response['uuid']\n\n    # Still should not be able read these permission links\n    get \"/arvados/v1/permissions/#{groups(:public).uuid}\",\n      params: nil,\n      headers: auth(:active)\n    assert_response 404\n\n    get \"/arvados/v1/links\",\n        params: {\n          :filters => [[\"link_class\", \"=\", \"permission\"], [\"head_uuid\", \"=\", groups(:public).uuid]].to_json\n        },\n      headers: auth(:active)\n    assert_response :success\n    assert_equal [], json_response['items']\n\n    # Shouldn't be able to read links directly either\n    get \"/arvados/v1/links/#{can_read_uuid}\",\n        params: {},\n      headers: auth(:active)\n    assert_response 404\n\n    ### Now add a can_manage link\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: users(:active).uuid,\n          link_class: 'permission',\n          name: 'can_manage',\n          head_uuid: groups(:public).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n    can_manage_uuid = json_response['uuid']\n\n    # user :active should be able to retrieve permissions\n    # on group :public using get_permissions\n    get(\"/arvados/v1/permissions/#{groups(:public).uuid}\",\n      params: { :format => :json },\n      headers: auth(:active))\n    assert_response :success\n\n    perm_uuids = json_response['items'].map { |item| item['uuid'] }\n    assert_includes perm_uuids, can_read_uuid, \"can_read_uuid not found\"\n    assert_includes perm_uuids, can_write_uuid, \"can_write_uuid not found\"\n    assert_includes perm_uuids, can_manage_uuid, \"can_manage_uuid not found\"\n\n    # user :active should be able to retrieve permissions\n    # on group :public using link list\n    get \"/arvados/v1/links\",\n        params: {\n          :filters => [[\"link_class\", \"=\", \"permission\"], [\"head_uuid\", \"=\", groups(:public).uuid]].to_json\n        },\n      headers: auth(:active)\n    assert_response :success\n\n    perm_uuids = json_response['items'].map { |item| item['uuid'] }\n    assert_includes perm_uuids, can_read_uuid, \"can_read_uuid not found\"\n    assert_includes perm_uuids, can_write_uuid, \"can_write_uuid not found\"\n    assert_includes perm_uuids, can_manage_uuid, \"can_manage_uuid not found\"\n\n    # Should be able to read links directly too\n    get \"/arvados/v1/links/#{can_read_uuid}\",\n      headers: auth(:active)\n    assert_response :success\n\n    ### Create some objects of different types (other than projects)\n    ### inside a subproject inside the shared project, and share those\n    ### individual objects with a 3rd user (\"spectator\").\n    post '/arvados/v1/groups',\n         params: {\n           group: {\n             owner_uuid: groups(:public).uuid,\n             name: 'permission test subproject',\n             group_class: 'project',\n           },\n         },\n         headers: auth(:admin)\n    assert_response :success\n    subproject_uuid = json_response['uuid']\n\n    test_types = ['collection', 'workflow', 'container_request']\n    test_type_create_attrs = {\n      'container_request' => {\n        command: [\"echo\", \"foo\"],\n        container_image: links(:docker_image_collection_tag).name,\n        cwd: \"/tmp\",\n        environment: {},\n        mounts: {\"/out\" => {kind: \"tmp\", capacity: 1000000}},\n        output_path: \"/out\",\n        runtime_constraints: {\"vcpus\" => 1, \"ram\" => 2},\n      },\n    }\n\n    test_object = {}\n    test_object_perm_link = {}\n    test_types.each do |test_type|\n      post \"/arvados/v1/#{test_type}s\",\n           params: {\n             test_type.to_sym => {\n               owner_uuid: subproject_uuid,\n               name: \"permission test #{test_type} in subproject\",\n             }.merge(test_type_create_attrs[test_type] || {}).to_json,\n           },\n           headers: auth(:admin)\n      assert_response :success\n      test_object[test_type] = json_response\n\n      post '/arvados/v1/links',\n           params: {\n             link: {\n               tail_uuid: users(:spectator).uuid,\n               link_class: 'permission',\n               name: 'can_read',\n               head_uuid: test_object[test_type]['uuid'],\n             }\n           },\n           headers: auth(:admin)\n      assert_response :success\n      test_object_perm_link[test_type] = json_response\n    end\n\n    # The \"active-can_manage-project\" permission should cause the\n    # \"spectator-can_read-object\" links to be visible to the \"active\"\n    # user.\n    test_types.each do |test_type|\n      get \"/arvados/v1/permissions/#{test_object[test_type]['uuid']}\",\n          headers: auth(:active)\n      assert_response :success\n      perm_uuids = json_response['items'].map { |item| item['uuid'] }\n      assert_includes perm_uuids, test_object_perm_link[test_type]['uuid'], \"can_read_uuid not found\"\n\n      get \"/arvados/v1/links/#{test_object_perm_link[test_type]['uuid']}\",\n          headers: auth(:active)\n      assert_response :success\n\n      [\n        ['head_uuid', '=', test_object[test_type]['uuid']],\n        ['head_uuid', 'in', [test_object[test_type]['uuid']]],\n        ['head_uuid', 'in', [users(:admin).uuid, test_object[test_type]['uuid']]],\n      ].each do |filter|\n        get \"/arvados/v1/links\",\n            params: {\n              filters: ([['link_class', '=', 'permission'], filter]).to_json,\n            },\n            headers: auth(:active)\n        assert_response :success\n        assert_not_empty json_response['items'], \"could not find can_read link using index with filter #{filter}\"\n        assert_equal test_object_perm_link[test_type]['uuid'], json_response['items'][0]['uuid']\n      end\n\n      # The \"spectator-can_read-object\" link should be visible to the\n      # subject user (\"spectator\") in a filter query, even without\n      # can_manage permission on the target object.\n      [\n        ['tail_uuid', '=', users(:spectator).uuid],\n      ].each do |filter|\n        get \"/arvados/v1/links\",\n            params: {\n              filters: ([['link_class', '=', 'permission'], filter]).to_json,\n            },\n            headers: auth(:spectator)\n        assert_response :success\n        perm_uuids = json_response['items'].map { |item| item['uuid'] }\n        assert_includes perm_uuids, test_object_perm_link[test_type]['uuid'], \"could not find can_read link using index with filter #{filter}\"\n      end\n    end\n\n    ### Now delete the can_manage link\n    delete \"/arvados/v1/links/#{can_manage_uuid}\",\n      headers: auth(:active)\n    assert_response :success\n\n    # Should not be able read these permission links again\n    test_types.each do |test_type|\n      get \"/arvados/v1/permissions/#{groups(:public).uuid}\",\n          headers: auth(:active)\n      assert_response 404\n\n      get \"/arvados/v1/permissions/#{test_object[test_type]['uuid']}\",\n          headers: auth(:active)\n      assert_response 404\n\n      get \"/arvados/v1/links\",\n          params: {\n            filters: [[\"link_class\", \"=\", \"permission\"], [\"head_uuid\", \"=\", groups(:public).uuid]].to_json\n          },\n          headers: auth(:active)\n      assert_response :success\n      assert_equal [], json_response['items']\n\n      [\n        ['head_uuid', '=', test_object[test_type]['uuid']],\n        ['head_uuid', 'in', [users(:admin).uuid, test_object[test_type]['uuid']]],\n        ['head_uuid', 'in', []],\n      ].each do |filter|\n        get \"/arvados/v1/links\",\n            params: {\n              :filters => [[\"link_class\", \"=\", \"permission\"], filter].to_json\n            },\n            headers: auth(:active)\n        assert_response :success\n        assert_equal [], json_response['items']\n      end\n\n      # Should not be able to read links directly either\n      get \"/arvados/v1/links/#{can_read_uuid}\",\n          headers: auth(:active)\n      assert_response 404\n\n      test_types.each do |test_type|\n        get \"/arvados/v1/links/#{test_object_perm_link[test_type]['uuid']}\",\n            headers: auth(:active)\n        assert_response 404\n      end\n    end\n\n    ### Create a collection, and share it with a direct permission\n    ### link (as opposed to sharing its parent project)\n    post \"/arvados/v1/collections\",\n      params: {\n        collection: {\n          name: 'permission test',\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n    collection_uuid = json_response['uuid']\n    post \"/arvados/v1/links\",\n      params: {\n        link: {\n          tail_uuid: users(:spectator).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: collection_uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n    can_read_collection_uuid = json_response['uuid']\n\n    # Should not be able read the permission link via permissions API,\n    # because permission is only can_read, not can_manage\n    get \"/arvados/v1/permissions/#{collection_uuid}\",\n      headers: auth(:active)\n    assert_response 404\n\n    # Should not be able to read the permission link directly, for\n    # same reason\n    get \"/arvados/v1/links/#{can_read_collection_uuid}\",\n      headers: auth(:active)\n    assert_response 404\n\n    ### Now add a can_manage link\n    post \"/arvados/v1/links\",\n      params: {\n        link: {\n          tail_uuid: users(:active).uuid,\n          link_class: 'permission',\n          name: 'can_manage',\n          head_uuid: collection_uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n    can_manage_collection_uuid = json_response['uuid']\n\n    # Should be able read both permission links via permissions API\n    get \"/arvados/v1/permissions/#{collection_uuid}\",\n      headers: auth(:active)\n    assert_response :success\n    perm_uuids = json_response['items'].map { |item| item['uuid'] }\n    assert_includes perm_uuids, can_read_collection_uuid, \"can_read_uuid not found\"\n    assert_includes perm_uuids, can_manage_collection_uuid, \"can_manage_uuid not found\"\n\n    # Should be able to read both permission links directly\n    [can_read_collection_uuid, can_manage_collection_uuid].each do |uuid|\n      get \"/arvados/v1/links/#{uuid}\",\n        headers: auth(:active)\n      assert_response :success\n    end\n  end\n\n  test \"get_permissions returns 404 for nonexistent uuid\" do\n    nonexistent = Group.generate_uuid\n    # make sure it really doesn't exist\n    get \"/arvados/v1/groups/#{nonexistent}\", params: nil, headers: auth(:admin)\n    assert_response 404\n\n    get \"/arvados/v1/permissions/#{nonexistent}\", params: nil, headers: auth(:active)\n    assert_response 404\n  end\n\n  test \"get_permissions returns 403 if user can read but not manage\" do\n    post \"/arvados/v1/links\",\n      params: {\n        :link => {\n          tail_uuid: users(:active).uuid,\n          link_class: 'permission',\n          name: 'can_read',\n          head_uuid: groups(:public).uuid,\n          properties: {}\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n\n    get \"/arvados/v1/permissions/#{groups(:public).uuid}\",\n      params: nil,\n      headers: auth(:active)\n    assert_response 403\n  end\n\n  test \"active user can read the empty collection\" do\n    # The active user should be able to read the empty collection.\n\n    get(\"/arvados/v1/collections/#{empty_collection_pdh}\",\n      params: {:format => :json},\n      headers: auth(:active))\n    assert_response :success\n    assert_empty json_response['manifest_text'], \"empty collection manifest_text is not empty\"\n  end\n\n  [['can_write', 'can_read', 'can_write'],\n   ['can_manage', 'can_write', 'can_manage'],\n   ['can_manage', 'can_read', 'can_manage'],\n   ['can_read', 'can_write', 'can_write'],\n   ['can_read', 'can_manage', 'can_manage'],\n   ['can_write', 'can_manage', 'can_manage'],\n  ].each do |perm1, perm2, expect|\n    test \"creating #{perm2} permission returns existing #{perm1} link as #{expect}\" do\n      link1 = act_as_system_user do\n        Link.create!({\n                       link_class: \"permission\",\n                       tail_uuid: users(:active).uuid,\n                       head_uuid: collections(:baz_file).uuid,\n                       name: perm1,\n                     })\n      end\n      post \"/arvados/v1/links\",\n           params: {\n             link: {\n               link_class: \"permission\",\n               tail_uuid: users(:active).uuid,\n               head_uuid: collections(:baz_file).uuid,\n               name: perm2,\n             },\n           },\n           headers: auth(:admin)\n      assert_response :success\n      assert_equal link1.uuid, json_response[\"uuid\"]\n      assert_equal expect, json_response[\"name\"]\n      link1.reload\n      assert_equal expect, link1.name\n    end\n  end\n\n  test \"creating duplicate login permission returns existing link\" do\n    link1 = act_as_system_user do\n      Link.create!({\n                     link_class: \"permission\",\n                     tail_uuid: users(:active).uuid,\n                     head_uuid: virtual_machines(:testvm2).uuid,\n                     name: \"can_login\",\n                     properties: {\"username\": \"foo1\"}\n                   })\n    end\n    link2 = act_as_system_user do\n      Link.create!({\n                     link_class: \"permission\",\n                     tail_uuid: users(:active).uuid,\n                     head_uuid: virtual_machines(:testvm2).uuid,\n                     name: \"can_login\",\n                     properties: {\"username\": \"foo2\"}\n                   })\n    end\n    link3 = act_as_system_user do\n      Link.create!({\n                     link_class: \"permission\",\n                     tail_uuid: users(:active).uuid,\n                     head_uuid: virtual_machines(:testvm2).uuid,\n                     name: \"can_read\",\n                   })\n    end\n    post \"/arvados/v1/links\",\n         params: {\n           link: {\n             link_class: \"permission\",\n             tail_uuid: users(:active).uuid,\n             head_uuid: virtual_machines(:testvm2).uuid,\n             name: \"can_login\",\n             properties: {\"username\": \"foo2\"},\n           },\n         },\n         headers: auth(:admin)\n    assert_response :success\n    assert_equal link2.uuid, json_response[\"uuid\"]\n    assert_equal link2.created_at.to_date, json_response[\"created_at\"].to_date\n    assert_equal \"can_login\", json_response[\"name\"]\n    assert_equal \"foo2\", json_response[\"properties\"][\"username\"]\n    link1.reload\n    assert_equal \"foo1\", link1.properties[\"username\"]\n    link2.reload\n    assert_equal \"foo2\", link2.properties[\"username\"]\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/pipeline_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass PipelineIntegrationTest < ActionDispatch::IntegrationTest\nend\n"
  },
  {
    "path": "services/api/test/integration/reader_tokens_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ReaderTokensTest < ActionDispatch::IntegrationTest\n  fixtures :all\n\n  def owned_by_foo\n    collections(:collection_owned_by_foo).uuid\n  end\n\n  def get_collections(main_auth, read_auth, formatter=:to_a)\n    params = {}\n    params[:reader_tokens] = [api_token(read_auth)].send(formatter) if read_auth\n    headers = {}\n    headers.merge!(auth(main_auth)) if main_auth\n    get('/arvados/v1/collections', params: params, headers: headers)\n  end\n\n  def get_collection_uuids(main_auth, read_auth, formatter=:to_a)\n    get_collections(main_auth, read_auth, formatter)\n    assert_response :success\n    json_response['items'].map { |spec| spec['uuid'] }\n  end\n\n  def assert_post_denied(main_auth, read_auth, formatter=:to_a)\n    if main_auth\n      headers = auth(main_auth)\n      expected = 403\n    else\n      headers = {}\n      expected = 401\n    end\n    post('/arvados/v1/collections.json',\n      params: {collection: {}, reader_tokens: [api_token(read_auth)].send(formatter)},\n      headers: headers)\n    assert_response expected\n  end\n\n  test \"active user can't see foo-owned collection\" do\n    # Other tests in this suite assume that the active user doesn't\n    # have read permission to the owned_by_foo collection.\n    # This test checks that this assumption still holds.\n    refute_includes(get_collection_uuids(:active, nil), owned_by_foo,\n                    [\"active user can read the owned_by_foo collection\",\n                     \"other tests will return false positives\"].join(\" - \"))\n  end\n\n  [nil, :active_noscope].each do |main_auth|\n    [:foo, :foo_collections].each do |read_auth|\n      [:to_a, :to_json].each do |formatter|\n        test \"#{main_auth.inspect} auth with #{formatter} reader token #{read_auth} can#{\"'t\" if main_auth} read\" do\n          get_collections(main_auth, read_auth)\n          assert_response(if main_auth then 403 else 200 end)\n        end\n\n        test \"#{main_auth.inspect} auth with #{formatter} reader token #{read_auth} can't write\" do\n          assert_post_denied(main_auth, read_auth, formatter)\n        end\n      end\n    end\n  end\n\n  test \"scopes are still limited with reader tokens\" do\n    get('/arvados/v1/collections',\n      params: {reader_tokens: [api_token(:foo_collections)]},\n      headers: auth(:active_noscope))\n    assert_response 403\n  end\n\n  test \"reader tokens grant no permissions when expired\" do\n    get_collections(:active_noscope, :expired)\n    assert_response 403\n  end\n\n  test \"reader tokens grant no permissions outside their scope\" do\n    refute_includes(get_collection_uuids(:active, :admin_vm), owned_by_foo,\n                    \"scoped reader token granted permissions out of scope\")\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/remote_user_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'webrick'\nrequire 'webrick/https'\nrequire 'test_helper'\nrequire 'helpers/users_test_helper'\n\nclass RemoteUsersTest < ActionDispatch::IntegrationTest\n  include DbCurrentTime\n\n  def salted_active_token(remote:)\n    salt_token(fixture: :active, remote: remote).sub('/zzzzz-', '/'+remote+'-')\n  end\n\n  def auth(remote:)\n    token = salted_active_token(remote: remote)\n    {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"}\n  end\n\n  # For remote authentication tests, we bring up a simple stub server\n  # (on a port chosen by webrick) and configure the SUT so the stub is\n  # responsible for clusters \"zbbbb\" (a well-behaved cluster) and\n  # \"zbork\" (a misbehaving cluster).\n  #\n  # Test cases can override the stub's default response to\n  # .../users/current by changing @stub_status and @stub_content.\n  setup do\n    clnt = HTTPClient.new\n    clnt.ssl_config.verify_mode = OpenSSL::SSL::VERIFY_NONE\n    HTTPClient.stubs(:new).returns clnt\n\n    @controller = Arvados::V1::UsersController.new\n    ready = Thread::Queue.new\n\n    @remote_server = []\n    @remote_host = []\n\n    ['zbbbb', 'zbork'].each do |clusterid|\n      srv = WEBrick::HTTPServer.new(\n        Port: 0,\n        Logger: WEBrick::Log.new(\n          Rails.root.join(\"log\", \"webrick.log\").to_s,\n          WEBrick::Log::INFO),\n        AccessLog: [[File.open(Rails.root.join(\n                                 \"log\", \"webrick_access.log\").to_s, 'a+'),\n                     WEBrick::AccessLog::COMBINED_LOG_FORMAT]],\n        SSLEnable: true,\n        SSLVerifyClient: OpenSSL::SSL::VERIFY_NONE,\n        SSLPrivateKey: OpenSSL::PKey::RSA.new(\n          File.open(Rails.root.join(\"tmp\", \"self-signed.key\")).read),\n        SSLCertificate: OpenSSL::X509::Certificate.new(\n          File.open(Rails.root.join(\"tmp\", \"self-signed.pem\")).read),\n        SSLCertName: [[\"CN\", WEBrick::Utils::getservername]],\n        StartCallback: lambda { ready.push(true) })\n      srv.mount_proc '/discovery/v1/apis/arvados/v1/rest' do |req, res|\n        res.body = Arvados::V1::SchemaController.new.send(:discovery_doc).to_json\n      end\n      srv.mount_proc '/arvados/v1/users/current' do |req, res|\n        if clusterid == 'zbbbb' and req.header['authorization'][0][10..14] == 'zbork'\n          # asking zbbbb about zbork should yield an error, zbbbb doesn't trust zbork\n          res.status = 401\n          return\n        end\n        res.status = @stub_status\n        res.body = @stub_content.is_a?(String) ? @stub_content : @stub_content.to_json\n      end\n      srv.mount_proc '/arvados/v1/api_client_authorizations/current' do |req, res|\n        if clusterid == 'zbbbb' and req.header['authorization'][0][10..14] == 'zbork'\n          # asking zbbbb about zbork should yield an error, zbbbb doesn't trust zbork\n          res.status = 401\n          return\n        end\n        res.status = @stub_token_status\n        if res.status == 200\n          body = {\n            uuid: @stub_token_uuid || api_client_authorizations(:active).uuid.sub('zzzzz', clusterid),\n            owner_uuid: \"#{clusterid}-tpzed-00000000000000z\",\n            expires_at: '2067-07-01T00:00:00.000000000Z',\n            scopes: @stub_token_scopes,\n          }\n          if @stub_content.is_a?(Hash) and owner_uuid = @stub_content[:uuid]\n            body[:owner_uuid] = owner_uuid\n          end\n          res.body = body.to_json\n        end\n      end\n      Thread.new do\n        srv.start\n      end\n      ready.pop\n      @remote_server << srv\n      @remote_host << \"127.0.0.1:#{srv.config[:Port]}\"\n    end\n    Rails.configuration.RemoteClusters = Rails.configuration.RemoteClusters.merge({zbbbb: ActiveSupport::InheritableOptions.new({Host: @remote_host[0]}),\n                                                                                   zbork: ActiveSupport::InheritableOptions.new({Host: @remote_host[1]})})\n    Arvados::V1::SchemaController.any_instance.stubs(:root_url).returns \"https://#{@remote_host[0]}\"\n    @stub_status = 200\n    @stub_content = {\n      uuid: 'zbbbb-tpzed-000000000000001',\n      email: 'foo@example.com',\n      username: 'barney',\n      first_name: \"Barney\",\n      last_name: \"Foo\",\n      is_admin: true,\n      is_active: true,\n      is_invited: true,\n    }\n    @stub_token_status = 200\n    @stub_token_scopes = [\"all\"]\n    @stub_token_uuid = nil\n    ActionMailer::Base.deliveries = []\n  end\n\n  teardown do\n    @remote_server.each do |srv|\n      srv.stop\n    end\n  end\n\n  def uncache_token(src)\n    if match = src.match(/\\b(?:[a-z0-9]{5}-){2}[a-z0-9]{15}\\b/)\n      tokens = ApiClientAuthorization.where(uuid: match[0])\n    else\n      tokens = ApiClientAuthorization.where(\"uuid like ?\", \"#{src}-%\")\n    end\n    tokens.update_all(refreshes_at: \"1995-05-15T01:02:03Z\")\n  end\n\n  test 'authenticate with remote token that has limited scope' do\n    get '/arvados/v1/collections',\n        params: {format: 'json'},\n        headers: auth(remote: 'zbbbb')\n    assert_response :success\n\n    @stub_token_scopes = [\"GET /arvados/v1/users/current\"]\n\n    # re-authorize before cache refresh time arrives\n    get '/arvados/v1/collections',\n        params: {format: 'json'},\n        headers: auth(remote: 'zbbbb')\n    assert_response :success\n\n    uncache_token('zbbbb')\n    # re-authorize after cache refresh time arrives\n    get '/arvados/v1/collections',\n        params: {format: 'json'},\n        headers: auth(remote: 'zbbbb')\n    assert_response 403\n  end\n\n  test \"authenticate with remote token with limited initial scope\" do\n    @stub_token_scopes = [\"GET /arvados/v1/users/\"]\n    get \"/arvados/v1/users/#{@stub_content[:uuid]}\",\n        params: {format: \"json\"},\n        headers: auth(remote: \"zbbbb\")\n    assert_response :success\n  end\n\n  test 'expires_at is from remote cluster, refreshes_at reflects RemoteTokenRefresh' do\n    2.times do\n      get '/arvados/v1/api_client_authorizations/current',\n          params: {format: 'json'},\n          headers: auth(remote: 'zbbbb')\n      assert_response :success\n      assert_equal '2067-07-01T00:00:00.000000000Z', json_response['expires_at']\n      got_refresh = ApiClientAuthorization.find_by_uuid(json_response['uuid']).refreshes_at\n      expect_refresh = (db_current_time + Rails.configuration.Login.RemoteTokenRefresh).to_datetime\n      assert_operator (got_refresh - expect_refresh).to_f.abs, :<, 1.second.to_f\n    end\n  end\n\n  test 'authenticate with remote token' do\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']\n    assert_equal false, json_response['is_admin']\n    assert_equal false, json_response['is_active']\n    assert_equal 'foo@example.com', json_response['email']\n    assert_equal 'barney', json_response['username']\n\n    # revoke original token\n    @stub_token_status = 401\n\n    # re-authorize before cache refresh time arrives\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n\n    uncache_token('zbbbb')\n    # re-authorize after cache refresh time arrives\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response 401\n\n    # simulate cached token indicating wrong user (e.g., local user\n    # entry was migrated out of the way taking the cached token with\n    # it, or authorizing cluster reassigned auth to a different user)\n    ApiClientAuthorization.where(\n      uuid: salted_active_token(remote: 'zbbbb').split('/')[1]).\n      update_all(user_id: users(:active).id)\n\n    # revive original token and re-authorize\n    @stub_token_status = 200\n    @stub_content[:username] = 'blarney'\n    @stub_content[:email] = 'blarney@example.com'\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'barney', json_response['username'], 'local username should not change once assigned'\n    assert_equal 'blarney@example.com', json_response['email']\n  end\n\n  test 'remote user is deactivated' do\n    Rails.configuration.RemoteClusters['zbbbb'].ActivateUsers = true\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal true, json_response['is_active']\n\n    # revoke original token\n    @stub_content[:is_active] = false\n    @stub_content[:is_invited] = false\n\n    uncache_token('zbbbb')\n    # re-authorize after cache refresh time arrives\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_equal false, json_response['is_active']\n\n  end\n\n  test 'authenticate with remote token, remote username conflicts with local' do\n    @stub_content[:username] = 'active'\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'active2', json_response['username']\n  end\n\n  test 'authenticate with remote token, remote username is nil' do\n    @stub_content.delete :username\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'foo', json_response['username']\n  end\n\n  test 'authenticate with remote token with secret part identical to previously cached token' do\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    get '/arvados/v1/api_client_authorizations/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n\n    # Update refreshes_at to a time in the past, to induce a re-fetch\n    # from the stub cluster.\n    @cached_token_uuid = json_response['uuid']\n    act_as_system_user do\n      ApiClientAuthorization.where(uuid: @cached_token_uuid).update_all(refreshes_at: db_current_time() - 1.day)\n    end\n\n    # Now use the same bare token, but set up the remote cluster to\n    # return a different UUID this time.\n    @stub_token_uuid = 'zbbbb-gj3su-123451234512345'\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n\n    # Confirm that we actually retrieved the new UUID from the stub\n    # cluster -- otherwise we didn't really test the conflicting-UUID\n    # case.\n    get '/arvados/v1/api_client_authorizations/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal @stub_token_uuid, json_response['uuid']\n  end\n\n  test 'authenticate with remote token from misbehaving remote cluster' do\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbork')\n    assert_response 401\n  end\n\n  test 'authenticate with remote token that fails validate' do\n    @stub_status = 401\n    @stub_content = {\n      error: 'not authorized',\n    }\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response 401\n  end\n\n  ['v2',\n   'v2/',\n   'v2//',\n   'v2///',\n   \"v2/'; delete from users where 1=1; commit; select '/lol\",\n   'v2/foo/bar',\n   'v2/zzzzz-gj3su-077z32aux8dg2s1',\n   'v2/zzzzz-gj3su-077z32aux8dg2s1/',\n   'v2/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi',\n   'v2/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi/zzzzz-gj3su-077z32aux8dg2s1',\n   'v2//3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi',\n   'v8/zzzzz-gj3su-077z32aux8dg2s1/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi',\n   '/zzzzz-gj3su-077z32aux8dg2s1/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi',\n   '\"v2/zzzzz-gj3su-077z32aux8dg2s1/3kg6k6lzmp9kj5cpkcoxie963cmvjahbt2fod9zru30k1jqdmi\"',\n   '/',\n   '//',\n   '///',\n  ].each do |token|\n    test \"authenticate with malformed remote token #{token}\" do\n      get '/arvados/v1/users/current',\n        params: {format: 'json'},\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"}\n      assert_response 401\n    end\n  end\n\n  test \"ignore extra fields in remote token\" do\n    token = salted_active_token(remote: 'zbbbb') + '/foo/bar/baz/*'\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"}\n    assert_response :success\n  end\n\n  test 'remote api server is not an api server' do\n    @stub_status = 200\n    @stub_content = '<html>bad</html>'\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response 401\n  end\n\n  ['zbbbb', 'z0000'].each do |token_valid_for|\n    test \"validate #{token_valid_for}-salted token for remote cluster zbbbb\" do\n      salted_token = salt_token(fixture: :active, remote: token_valid_for)\n      get '/arvados/v1/users/current',\n        params: {format: 'json', remote: 'zbbbb'},\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{salted_token}\"}\n      if token_valid_for == 'zbbbb'\n        assert_response 200\n        assert_equal(users(:active).uuid, json_response['uuid'])\n      else\n        assert_response 401\n      end\n    end\n  end\n\n  test \"list readable groups with salted token\" do\n    Rails.configuration.Users.RoleGroupsVisibleToAll = false\n    salted_token = salt_token(fixture: :active, remote: 'zbbbb')\n    get '/arvados/v1/groups',\n      params: {\n        format: 'json',\n        remote: 'zbbbb',\n        limit: 10000,\n      },\n      headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{salted_token}\"}\n    assert_response 200\n    group_uuids = json_response['items'].collect { |i| i['uuid'] }\n    assert_includes(group_uuids, 'zzzzz-j7d0g-fffffffffffffff')\n    refute_includes(group_uuids, 'zzzzz-j7d0g-000000000000000')\n    assert_includes(group_uuids, groups(:aproject).uuid)\n    refute_includes(group_uuids, groups(:trashed_project).uuid)\n    refute_includes(group_uuids, groups(:testusergroup_admins).uuid)\n  end\n\n  test 'do not auto-activate user from untrusted cluster' do\n    Rails.configuration.RemoteClusters['zbbbb'].AutoSetupNewUsers = false\n    Rails.configuration.RemoteClusters['zbbbb'].ActivateUsers = false\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']\n    assert_equal false, json_response['is_admin']\n    assert_equal false, json_response['is_active']\n    assert_equal 'foo@example.com', json_response['email']\n    assert_equal 'barney', json_response['username']\n    post '/arvados/v1/users/zbbbb-tpzed-000000000000001/activate',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response 422\n  end\n\n  test 'auto-activate user from trusted cluster' do\n    Rails.configuration.RemoteClusters['zbbbb'].ActivateUsers = true\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']\n    assert_equal false, json_response['is_admin']\n    assert_equal true, json_response['is_active']\n    assert_equal 'foo@example.com', json_response['email']\n    assert_equal 'barney', json_response['username']\n  end\n\n  test 'get user from Login cluster' do\n    Rails.configuration.Login.LoginCluster = 'zbbbb'\n    email_dest = ActiveSupport::OrderedOptions.new\n    email_dest[:'arvados-admin@example.com'] = ActiveSupport::OrderedOptions.new\n    Rails.configuration.Users.SendUserSetupNotificationEmail = true\n    Rails.configuration.Users.UserNotifierEmailBcc = email_dest\n    Rails.configuration.Users.NewUserNotificationRecipients = email_dest\n    Rails.configuration.Users.NewInactiveUserNotificationRecipients = email_dest\n\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']\n    assert_equal true, json_response['is_admin']\n    assert_equal true, json_response['is_active']\n    assert_equal 'foo@example.com', json_response['email']\n    assert_equal 'barney', json_response['username']\n\n    assert_equal 2, ActionMailer::Base.deliveries.length\n    assert_equal \"Welcome to Arvados - account enabled\", ActionMailer::Base.deliveries[0].subject\n    assert_equal \"[ARVADOS] New user created notification\", ActionMailer::Base.deliveries[1].subject\n  end\n\n  [true, false].each do |trusted|\n    [true, false].each do |logincluster|\n      [true, false, nil].each do |admin|\n        [true, false, nil].each do |active|\n          [true, false].each do |autosetup|\n            [true, false, nil].each do |invited|\n              test \"get invited=#{invited}, active=#{active}, admin=#{admin} user from #{if logincluster then \"Login\" else \"peer\" end} cluster when AutoSetupNewUsers=#{autosetup} ActivateUsers=#{trusted}\" do\n                Rails.configuration.Login.LoginCluster = 'zbbbb' if logincluster\n                Rails.configuration.RemoteClusters['zbbbb'].ActivateUsers = trusted\n                Rails.configuration.Users.AutoSetupNewUsers = autosetup\n                @stub_content = {\n                  uuid: 'zbbbb-tpzed-000000000000001',\n                  email: 'foo@example.com',\n                  username: 'barney',\n                  is_admin: admin,\n                  is_active: active,\n                  is_invited: invited,\n                }\n                get '/arvados/v1/users/current',\n                    params: {format: 'json'},\n                    headers: auth(remote: 'zbbbb')\n                assert_response :success\n                assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']\n                assert_equal (logincluster && !!admin && (invited != false) && !!active), json_response['is_admin']\n                assert_equal ((invited == true || (invited == nil && !!active)) && (logincluster || trusted || autosetup)), json_response['is_invited']\n                assert_equal ((invited != false) && (logincluster || trusted) && !!active), json_response['is_active']\n                assert_equal 'foo@example.com', json_response['email']\n                assert_equal 'barney', json_response['username']\n              end\n            end\n          end\n        end\n      end\n    end\n  end\n\n  test 'get active user from Login cluster when AutoSetupNewUsers is set' do\n    Rails.configuration.Login.LoginCluster = 'zbbbb'\n    Rails.configuration.Users.AutoSetupNewUsers = true\n    @stub_content = {\n      uuid: 'zbbbb-tpzed-000000000000001',\n      email: 'foo@example.com',\n      username: 'barney',\n      is_admin: false,\n      is_active: true,\n      is_invited: true,\n    }\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']\n    assert_equal false, json_response['is_admin']\n    assert_equal true, json_response['is_active']\n    assert_equal true, json_response['is_invited']\n    assert_equal 'foo@example.com', json_response['email']\n    assert_equal 'barney', json_response['username']\n\n    @stub_content = {\n      uuid: 'zbbbb-tpzed-000000000000001',\n      email: 'foo@example.com',\n      username: 'barney',\n      is_admin: false,\n      is_active: false,\n      is_invited: false,\n    }\n\n    # Use cached value.  User will still be active because we haven't\n    # re-queried the upstream cluster.\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']\n    assert_equal false, json_response['is_admin']\n    assert_equal true, json_response['is_active']\n    assert_equal true, json_response['is_invited']\n    assert_equal 'foo@example.com', json_response['email']\n    assert_equal 'barney', json_response['username']\n\n    uncache_token('zbbbb')\n    # User should be inactive now.\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'zbbbb-tpzed-000000000000001', json_response['uuid']\n    assert_equal false, json_response['is_admin']\n    assert_equal false, json_response['is_active']\n    assert_equal false, json_response['is_invited']\n    assert_equal 'foo@example.com', json_response['email']\n    assert_equal 'barney', json_response['username']\n\n  end\n\n  test 'pre-activate remote user' do\n    @stub_content = {\n      uuid: 'zbbbb-tpzed-000000000001234',\n      email: 'foo@example.com',\n      username: 'barney',\n      is_admin: true,\n      is_active: true,\n      is_invited: true,\n    }\n\n    post '/arvados/v1/users',\n      params: {\n        \"user\" => {\n          \"uuid\" => \"zbbbb-tpzed-000000000001234\",\n          \"email\" => 'foo@example.com',\n          \"username\" => 'barney',\n          \"is_active\" => true,\n          \"is_admin\" => false\n        }\n      },\n      headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_token(:admin)}\"}\n    assert_response :success\n\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'zbbbb-tpzed-000000000001234', json_response['uuid']\n    assert_equal false, json_response['is_admin']\n    assert_equal true, json_response['is_active']\n    assert_equal 'foo@example.com', json_response['email']\n    assert_equal 'barney', json_response['username']\n  end\n\n\n  test 'remote user inactive without pre-activation' do\n    @stub_content = {\n      uuid: 'zbbbb-tpzed-000000000001234',\n      email: 'foo@example.com',\n      username: 'barney',\n      is_admin: true,\n      is_active: true,\n      is_invited: true,\n    }\n\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'zbbbb-tpzed-000000000001234', json_response['uuid']\n    assert_equal false, json_response['is_admin']\n    assert_equal false, json_response['is_active']\n    assert_equal 'foo@example.com', json_response['email']\n    assert_equal 'barney', json_response['username']\n  end\n\n  test \"validate unsalted v2 token for remote cluster zbbbb\" do\n    auth = api_client_authorizations(:active)\n    token = \"v2/#{auth.uuid}/#{auth.api_token}\"\n    get '/arvados/v1/users/current',\n      params: {format: 'json', remote: 'zbbbb'},\n      headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"}\n    assert_response :success\n    assert_equal(users(:active).uuid, json_response['uuid'])\n  end\n\n  [[\"valid local\", :active, nil],\n   [\"valid remote\", \"zbbbb-gj3su-000000000000000\", nil],\n   [\"invalid local\", :active, \"fakeactivetoken\"],\n   [\"invalid remote\", \"zbork-gj3su-000000000000000\", nil],\n  ].each do |label, auth_uuid, auth_token|\n    test \"container request with #{label} runtime_token\" do\n      case auth_uuid\n      when Symbol\n        aca = api_client_authorizations(auth_uuid)\n        auth_uuid = aca.uuid\n        auth_token ||= aca.api_token\n      when String\n        auth_token ||= \"fakeremotetoken\"\n      else\n        flunk \"test case uses an unsupported auth identifier: #{auth_uuid}\"\n      end\n      runtime_token = \"v2/#{auth_uuid}/#{auth_token}\"\n      post '/arvados/v1/container_requests',\n        params: {\n          \"container_request\" => {\n            \"command\" => [\"echo\"],\n            \"container_image\" => \"xyz\",\n            \"output_path\" => \"/tmp\",\n            \"mounts\" => {\n              \"/tmp\" => {\n                \"kind\" => \"tmp\",\n                \"capacity\" => 1000000,\n              },\n            },\n            \"cwd\" => \"/\",\n            \"runtime_token\" => runtime_token\n          }.to_json,\n          # Without .to_json here, the mount capacity value gets\n          # serialized as a string, which does not validate.\n        },\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{api_client_authorizations(:active).api_token}\"}\n      if label.start_with? \"invalid\"\n        assert_response 422\n      else\n        assert_response :success\n      end\n    end\n  end\n\n  test 'authenticate with remote token, remote user is system user' do\n    @stub_content[:uuid] = 'zbbbb-tpzed-000000000000000'\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_equal 'from cluster zbbbb', json_response['last_name']\n  end\n\n  test 'authenticate with remote token, remote user is anonymous user' do\n    @stub_content[:uuid] = 'zbbbb-tpzed-anonymouspublic'\n    get '/arvados/v1/users/current',\n      params: {format: 'json'},\n      headers: auth(remote: 'zbbbb')\n    assert_response :success\n    assert_equal 'zzzzz-tpzed-anonymouspublic', json_response['uuid']\n  end\n\n  [400, 401, 403, 422, 500, 502, 503].each do |status|\n    test \"handle #{status} response when checking remote-provided v2 token\" do\n      @stub_token_status = status\n      get \"/arvados/v1/users/#{@stub_content[:uuid]}\",\n          params: {format: \"json\"},\n          headers: auth(remote: \"zbbbb\")\n      assert_response(status < 500 ? 401 : status)\n    end\n\n    test \"handle #{status} response when checking remote-provided v2 token at anonymously accessible endpoint\" do\n      @stub_token_status = status\n      get \"/arvados/v1/keep_services/accessible\",\n          params: {format: \"json\"},\n          headers: auth(remote: \"zbbbb\")\n      assert_response(status < 500 ? :success : status)\n    end\n\n    test \"handle #{status} response when checking token issued by login cluster\" do\n      @stub_token_status = status\n      Rails.configuration.Login.LoginCluster = \"zbbbb\"\n      get \"/arvados/v1/users/current\",\n          params: {format: \"json\"},\n          headers: {'HTTP_AUTHORIZATION' => \"Bearer badtoken\"}\n      assert_response(status < 500 ? 401 : status)\n    end\n\n    test \"handle #{status} response when checking token issued by login cluster at anonymously accessible endpoint\" do\n      @stub_token_status = status\n      Rails.configuration.Login.LoginCluster = \"zbbbb\"\n      get \"/arvados/v1/keep_services/accessible\",\n          params: {format: \"json\"},\n          headers: {'HTTP_AUTHORIZATION' => \"Bearer badtoken\"}\n      assert_response(status < 500 ? :success : status)\n    end\n  end\n\n  [401, 403, 422, 500, 502, 503].each do |status|\n    test \"propagate #{status} response from getting uncached user\" do\n      @stub_status = status\n      get \"/arvados/v1/users/#{@stub_content[:uuid]}\",\n          params: {format: \"json\"},\n          headers: auth(remote: \"zbbbb\")\n      assert_response status\n    end\n\n    test \"use cached user after getting #{status} response\" do\n      url_path = \"/arvados/v1/users/#{@stub_content[:uuid]}\"\n      params = {format: \"json\"}\n      headers = auth(remote: \"zbbbb\")\n\n      get url_path, params: params, headers: headers\n      assert_response :success\n\n      uncache_token(headers[\"HTTP_AUTHORIZATION\"])\n      expect_email = @stub_content[:email]\n      @stub_content[:email] = \"new#{expect_email}\"\n      @stub_status = status\n      get url_path, params: params, headers: headers\n      assert_response :success\n      user = User.find_by_uuid(@stub_content[:uuid])\n      assert_not_nil user\n      assert_equal expect_email, user.email\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/select_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass SelectTest < ActionDispatch::IntegrationTest\n  test \"should select just two columns\" do\n    get \"/arvados/v1/links\",\n      params: {:format => :json, :select => ['uuid', 'link_class']},\n      headers: auth(:active)\n    assert_response :success\n    assert_equal json_response['items'].count, json_response['items'].select { |i|\n      i.count == 3 and i['uuid'] != nil and i['link_class'] != nil\n    }.count\n  end\n\n  test \"fewer distinct than total count\" do\n    get \"/arvados/v1/links\",\n      params: {:format => :json, :select => ['link_class']},\n      headers: auth(:active)\n    assert_response :success\n    distinct_unspecified = json_response['items']\n\n    get \"/arvados/v1/links\",\n      params: {:format => :json, :select => ['link_class'], :distinct => false},\n      headers: auth(:active)\n    assert_response :success\n    distinct_false = json_response['items']\n\n    get \"/arvados/v1/links\",\n      params: {:format => :json, :select => ['link_class'], :distinct => true},\n      headers: auth(:active)\n    assert_response :success\n    distinct = json_response['items']\n\n    assert_operator(distinct.count, :<, distinct_false.count,\n                    \"distinct=true count should be less than distinct=false count\")\n    assert_equal(distinct_unspecified.count, distinct_false.count,\n                    \"distinct=false should be the default\")\n    assert_equal distinct_false.uniq.count, distinct.count\n  end\n\n  test \"select with order\" do\n    get \"/arvados/v1/links\",\n      params: {:format => :json, :select => ['uuid'], :order => [\"uuid asc\"]},\n      headers: auth(:active)\n    assert_response :success\n\n    assert json_response['items'].length > 0\n\n    p = \"\"\n    json_response['items'].each do |i|\n      assert i['uuid'] > p\n      p = i['uuid']\n    end\n  end\n\n  test \"select with default order\" do\n    get \"/arvados/v1/links\",\n      params: {format: :json, select: ['uuid']},\n      headers: auth(:admin)\n    assert_response :success\n    uuids = json_response['items'].collect { |i| i['uuid'] }\n    assert_equal uuids, uuids.sort.reverse\n  end\n\n  def assert_link_classes_ascend(current_class, prev_class)\n    # Databases and Ruby don't always agree about string ordering with\n    # punctuation.  If the strings aren't ascending normally, check\n    # that they're equal up to punctuation.\n    if current_class < prev_class\n      class_prefix = current_class.split(/\\W/).first\n      assert prev_class.start_with?(class_prefix)\n    end\n  end\n\n  test \"select two columns with order\" do\n    get \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :select => ['link_class', 'uuid'], :order => ['link_class asc', \"uuid desc\"]\n      },\n      headers: auth(:active)\n    assert_response :success\n\n    assert json_response['items'].length > 0\n\n    prev_link_class = \"\"\n    prev_uuid = \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\"\n\n    json_response['items'].each do |i|\n      if prev_link_class != i['link_class']\n        prev_uuid = \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\"\n      end\n\n      assert_link_classes_ascend(i['link_class'], prev_link_class)\n      assert i['uuid'] < prev_uuid\n\n      prev_link_class = i['link_class']\n      prev_uuid = i['uuid']\n    end\n  end\n\n  test \"select two columns with old-style order syntax\" do\n    get \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :select => ['link_class', 'uuid'], :order => 'link_class asc, uuid desc'\n      },\n      headers: auth(:active)\n    assert_response :success\n\n    assert json_response['items'].length > 0\n\n    prev_link_class = \"\"\n    prev_uuid = \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\"\n\n    json_response['items'].each do |i|\n      if prev_link_class != i['link_class']\n        prev_uuid = \"zzzzz-zzzzz-zzzzzzzzzzzzzzz\"\n      end\n\n      assert_link_classes_ascend(i['link_class'], prev_link_class)\n      assert i['uuid'] < prev_uuid\n\n      prev_link_class = i['link_class']\n      prev_uuid = i['uuid']\n    end\n  end\n\nend\n"
  },
  {
    "path": "services/api/test/integration/serialized_encoding_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass SerializedEncodingTest < ActionDispatch::IntegrationTest\n  fixtures :all\n\n  {\n    api_client_authorization: {scopes: []},\n    link: {link_class: 'test', name: 'test', properties: {foo: :bar}},\n    user: {prefs: {cookies: 'thin mint'}},\n  }.each_pair do |resource, postdata|\n    test \"create json-encoded #{resource.to_s}\" do\n      post(\"/arvados/v1/#{resource.to_s.pluralize}\",\n        params: {resource => postdata.to_json},\n        headers: auth(:admin_trustedclient))\n      assert_response :success\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/user_sessions_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass UserSessionsApiTest < ActionDispatch::IntegrationTest\n  # remote prefix & return url packed into the return_to param passed around\n  # between API and SSO provider.\n  def client_url(remote: nil)\n    url = ',https://controller.api.client.invalid'\n    url = \"#{remote}#{url}\" unless remote.nil?\n    url\n  end\n\n  def mock_auth_with(email: nil, username: nil, identity_url: nil, remote: nil, expected_response: :redirect)\n    mock = {\n        'identity_url' => 'https://edward.example.com',\n        'name' => 'Edward Example',\n        'first_name' => 'Edward',\n        'last_name' => 'Example',\n    }\n    mock['email'] = email unless email.nil?\n    mock['username'] = username unless username.nil?\n    mock['identity_url'] = identity_url unless identity_url.nil?\n    post('/auth/controller/callback',\n      params: {return_to: client_url(remote: remote), :auth_info => SafeJSON.dump(mock)},\n      headers: {'Authorization' => 'Bearer ' + Rails.configuration.SystemRootToken})\n\n    errors = {\n      :redirect => 'Did not redirect to client with token',\n      400 => 'Did not return Bad Request error',\n    }\n    assert_response expected_response, errors[expected_response]\n  end\n\n  test 'assign username from sso' do\n    mock_auth_with(email: 'foo@example.com', username: 'bar')\n    u = assigns(:user)\n    assert_equal 'bar', u.username\n  end\n\n  test 'no assign username from sso' do\n    mock_auth_with(email: 'foo@example.com')\n    u = assigns(:user)\n    assert_equal 'foo', u.username\n  end\n\n  test 'existing user login' do\n    mock_auth_with(identity_url: \"https://active-user.openid.local\")\n    u = assigns(:user)\n    assert_equal users(:active).uuid, u.uuid\n  end\n\n  test 'user redirect_to_user_uuid' do\n    mock_auth_with(identity_url: \"https://redirects-to-active-user.openid.local\")\n    u = assigns(:user)\n    assert_equal users(:active).uuid, u.uuid\n  end\n\n  test 'user double redirect_to_user_uuid' do\n    mock_auth_with(identity_url: \"https://double-redirects-to-active-user.openid.local\")\n    u = assigns(:user)\n    assert_equal users(:active).uuid, u.uuid\n  end\n\n  test 'create new user during omniauth callback' do\n    mock_auth_with(email: 'edward@example.com')\n    assert_equal(0, @response.redirect_url.index(client_url.split(',', 2)[1]),\n                 'Redirected to wrong address after succesful login: was ' +\n                 @response.redirect_url + ', expected ' + client_url.split(',', 2)[1] + '[...]')\n    assert_not_nil(@response.redirect_url.index('api_token='),\n                   'Expected api_token in query string of redirect url ' +\n                   @response.redirect_url)\n  end\n\n  test 'issue salted token from omniauth callback with remote param' do\n    mock_auth_with(email: 'edward@example.com', remote: 'zbbbb')\n    api_client_auth = assigns(:api_client_auth)\n    assert_not_nil api_client_auth\n    assert_includes(@response.redirect_url, 'api_token=' + api_client_auth.salted_token(remote: 'zbbbb'))\n  end\n\n  test 'error out from omniauth callback with invalid remote param' do\n    mock_auth_with(email: 'edward@example.com', remote: 'invalid_cluster_id', expected_response: 400)\n  end\n\n  # Test various combinations of auto_setup configuration and email\n  # address provided during a new user's first session setup.\n  [{result: :nope, email: nil, cfg: {auto: true, vm: true}},\n   {result: :yup, email: nil, cfg: {auto: true}},\n   {result: :nope, email: '@example.com', cfg: {auto: true, vm: true}},\n   {result: :yup, email: '@example.com', cfg: {auto: true}},\n   {result: :nope, email: 'root@', cfg: {auto: true, vm: true}},\n   {result: :nope, email: 'root@', cfg: {auto: true}},\n   {result: :nope, email: 'gitolite@', cfg: {auto: true}},\n   {result: :nope, email: '*_*@', cfg: {auto: true, vm: true}},\n   {result: :yup, email: 'toor@', cfg: {auto: true, vm: true, repo: true}},\n   {result: :yup, email: 'foo@', cfg: {auto: true, vm: true},\n     uniqprefix: 'foo'},\n   {result: :yup, email: 'foo@', cfg: {auto: true},\n     uniqprefix: 'foo'},\n   {result: :yup, email: 'auto_setup_vm_login@', cfg: {auto: true},\n     uniqprefix: 'auto_setup_vm_login'},\n   ].each do |testcase|\n    test \"user auto-activate #{testcase.inspect}\" do\n      # Configure auto_setup behavior according to testcase[:cfg]\n      Rails.configuration.Users.NewUsersAreActive = false\n      Rails.configuration.Users.AutoSetupNewUsers = testcase[:cfg][:auto]\n      Rails.configuration.Users.AutoSetupNewUsersWithVmUUID =\n        (testcase[:cfg][:vm] ? virtual_machines(:testvm).uuid : \"\")\n\n      mock_auth_with(email: testcase[:email])\n      u = assigns(:user)\n      vm_links = Link.where('link_class=? and tail_uuid=? and head_uuid like ?',\n                            'permission', u.uuid,\n                            '%-' + VirtualMachine.uuid_prefix + '-%')\n      case u[:result]\n      when :nope\n        assert_equal false, u.is_invited, \"should not have been set up\"\n        assert_empty vm_links, \"should not have VM login permission\"\n      when :yup\n        assert_equal true, u.is_invited\n        if testcase[:cfg][:vm]\n          assert_equal 1, vm_links.count, \"wrong number of VM perm links\"\n        else\n          assert_empty vm_links, \"should not have VM login permission\"\n        end\n      end\n      if (prefix = testcase[:uniqprefix])\n        # This email address conflicts with a test fixture. Make sure\n        # every VM login got digits added to make it unique.\n        vm_links.collect { |link| link.properties['username'] }.each do |name|\n          r = name.match(/^(.{#{prefix.length}})(\\d+)$/)\n          assert_not_nil r, \"#{name.inspect} does not match {prefix}\\\\d+\"\n          assert_equal(prefix, r[1],\n                       \"#{name.inspect} was not {#{prefix.inspect} plus digits}\")\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/users_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'helpers/users_test_helper'\n\nclass UsersTest < ActionDispatch::IntegrationTest\n  include UsersTestHelper\n\n  test \"setup user multiple times\" do\n    post \"/arvados/v1/users/setup\",\n      params: {\n        user: {\n          uuid: 'zzzzz-tpzed-abcdefghijklmno',\n          first_name: \"in_create_test_first_name\",\n          last_name: \"test_last_name\",\n          email: \"foo@example.com\"\n        }\n      },\n      headers: auth(:admin)\n\n    assert_response :success\n\n    response_items = json_response['items']\n\n    created = find_obj_in_resp response_items, 'arvados#user', nil\n\n    assert_equal 'in_create_test_first_name', created['first_name']\n    assert_not_nil created['uuid'], 'expected non-null uuid for the new user'\n    assert_equal 'zzzzz-tpzed-abcdefghijklmno', created['uuid']\n    assert_not_nil created['email'], 'expected non-nil email'\n    assert_nil created['identity_url'], 'expected no identity_url'\n\n    # link to add user to 'All users' group\n\n    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',\n        'All users', created['uuid'], 'arvados#group', true, 'Group'\n\n    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',\n        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n\n    verify_system_group_permission_link_for created['uuid']\n\n    # invoke setup again with the same data\n    post \"/arvados/v1/users/setup\",\n      params: {\n        vm_uuid: virtual_machines(:testvm).uuid,\n        user: {\n          uuid: 'zzzzz-tpzed-abcdefghijklmno',\n          first_name: \"in_create_test_first_name\",\n          last_name: \"test_last_name\",\n          email: \"foo@example.com\"\n        }\n      },\n      headers: auth(:admin)\n    assert_response 422         # cannot create another user with same UUID\n\n    # invoke setup on the same user\n    post \"/arvados/v1/users/setup\",\n      params: {\n        vm_uuid: virtual_machines(:testvm).uuid,\n        uuid: 'zzzzz-tpzed-abcdefghijklmno',\n      },\n      headers: auth(:admin)\n\n    response_items = json_response['items']\n\n    created = find_obj_in_resp response_items, 'arvados#user', nil\n    assert_equal 'in_create_test_first_name', created['first_name']\n    assert_not_nil created['uuid'], 'expected non-null uuid for the new user'\n    assert_equal 'zzzzz-tpzed-abcdefghijklmno', created['uuid']\n    assert_not_nil created['email'], 'expected non-nil email'\n    assert_nil created['identity_url'], 'expected no identity_url'\n\n    # arvados#user, and link to add user to 'All users' group\n    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',\n        'All users', created['uuid'], 'arvados#group', true, 'Group'\n\n    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',\n        virtual_machines(:testvm).uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n\n    verify_system_group_permission_link_for created['uuid']\n  end\n\n  test \"setup user in multiple steps and verify response\" do\n    post \"/arvados/v1/users/setup\",\n      params: {\n        user: {\n          email: \"foo@example.com\"\n        }\n      },\n      headers: auth(:admin)\n\n    assert_response :success\n    response_items = json_response['items']\n    created = find_obj_in_resp response_items, 'arvados#user', nil\n\n    assert_not_nil created['uuid'], 'expected uuid for new user'\n    assert_not_nil created['email'], 'expected non-nil email'\n    assert_equal created['email'], 'foo@example.com', 'expected input email'\n\n    # two new links: system_group, and 'All users' group.\n\n    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',\n        'All users', created['uuid'], 'arvados#group', true, 'Group'\n\n    verify_link response_items, 'arvados#virtualMachine', false, 'permission', 'can_login',\n        nil, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n\n    # invoke setup with a vm_uuid\n    post \"/arvados/v1/users/setup\",\n      params: {\n        vm_uuid: virtual_machines(:testvm).uuid,\n        user: {\n          email: 'junk_email'\n        },\n        uuid: created['uuid']\n      },\n      headers: auth(:admin)\n\n    assert_response :success\n\n    response_items = json_response['items']\n    created = find_obj_in_resp response_items, 'arvados#user', nil\n\n    assert_equal created['email'], 'foo@example.com', 'expected original email'\n\n    # verify links\n    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',\n        'All users', created['uuid'], 'arvados#group', true, 'Group'\n\n    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',\n        virtual_machines(:testvm).uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n  end\n\n  test \"setup and unsetup user\" do\n    post \"/arvados/v1/users/setup\",\n      params: {\n        vm_uuid: virtual_machines(:testvm).uuid,\n        user: {email: 'foo@example.com'},\n      },\n      headers: auth(:admin)\n\n    assert_response :success\n    response_items = json_response['items']\n    created = find_obj_in_resp response_items, 'arvados#user', nil\n    assert_not_nil created['uuid'], 'expected uuid for the new user'\n    assert_equal created['email'], 'foo@example.com', 'expected given email'\n\n    # three extra links: system_group, login, group and vm\n\n    verify_link response_items, 'arvados#group', true, 'permission', 'can_write',\n        'All users', created['uuid'], 'arvados#group', true, 'Group'\n\n    verify_link response_items, 'arvados#virtualMachine', true, 'permission', 'can_login',\n        virtual_machines(:testvm).uuid, created['uuid'], 'arvados#virtualMachine', false, 'VirtualMachine'\n\n    verify_link_existence created['uuid'], created['email'], true, true, true, true, false\n\n    # create a token\n    token = act_as_system_user do\n      ApiClientAuthorization.create!(user: User.find_by_uuid(created['uuid'])).api_token\n    end\n\n    # share project and collections with the new user\n    act_as_system_user do\n      Link.create!(tail_uuid: created['uuid'],\n                   head_uuid: groups(:aproject).uuid,\n                   link_class: 'permission',\n                   name: 'can_manage')\n      Link.create!(tail_uuid: created['uuid'],\n                   head_uuid: collections(:collection_owned_by_active).uuid,\n                   link_class: 'permission',\n                   name: 'can_read')\n      Link.create!(tail_uuid: created['uuid'],\n                   head_uuid: collections(:collection_owned_by_active_with_file_stats).uuid,\n                   link_class: 'permission',\n                   name: 'can_write')\n    end\n\n    assert_equal 1, ApiClientAuthorization.where(user_id: User.find_by_uuid(created['uuid']).id).size, 'expected token not found'\n\n    post \"/arvados/v1/users/#{created['uuid']}/unsetup\", params: {}, headers: auth(:admin)\n\n    assert_response :success\n\n    created2 = json_response\n    assert_not_nil created2['uuid'], 'expected uuid for the newly created user'\n    assert_equal created['uuid'], created2['uuid'], 'expected uuid not found'\n    assert_equal 0, ApiClientAuthorization.where(user_id: User.find_by_uuid(created['uuid']).id).size, 'token should have been deleted by user unsetup'\n    # check permissions are deleted\n    assert_empty Link.where(tail_uuid: created['uuid'])\n\n    verify_link_existence created['uuid'], created['email'], false, false, false, false, false\n  end\n\n  def find_obj_in_resp (response_items, kind, head_kind=nil)\n    response_items.each do |x|\n      if x && x['kind']\n        return x if (x['kind'] == kind && x['head_kind'] == head_kind)\n      end\n    end\n    nil\n  end\n\n  test 'merge active into project_viewer account' do\n    post('/arvados/v1/groups',\n      params: {\n        group: {\n          group_class: 'project',\n          name: \"active user's stuff\",\n        },\n      },\n      headers: auth(:project_viewer))\n    assert_response(:success)\n    project_uuid = json_response['uuid']\n\n    post('/arvados/v1/users/merge',\n      params: {\n        new_user_token: api_client_authorizations(:project_viewer_trustedclient).api_token,\n        new_owner_uuid: project_uuid,\n        redirect_to_new_user: true,\n      },\n      headers: auth(:active_trustedclient))\n    assert_response(:success)\n\n    get('/arvados/v1/users/current', params: {}, headers: auth(:active))\n    assert_response(:success)\n    assert_equal(users(:project_viewer).uuid, json_response['uuid'])\n\n    get('/arvados/v1/authorized_keys/' + authorized_keys(:active).uuid,\n      params: {},\n      headers: auth(:active))\n    assert_response(:success)\n    assert_equal(users(:project_viewer).uuid, json_response['owner_uuid'])\n    assert_equal(users(:project_viewer).uuid, json_response['authorized_user_uuid'])\n\n    get('/arvados/v1/groups/' + groups(:aproject).uuid,\n      params: {},\n      headers: auth(:active))\n    assert_response(:success)\n    assert_equal(project_uuid, json_response['owner_uuid'])\n  end\n\n  test 'pre-activate user' do\n    post '/arvados/v1/users',\n      params: {\n        \"user\" => {\n          \"email\" => 'foo@example.com',\n          \"is_active\" => true,\n          \"username\" => \"barney\"\n        }\n      },\n      headers: {'HTTP_AUTHORIZATION' => \"Bearer #{api_token(:admin)}\"}\n    assert_response :success\n    rp = json_response\n    assert_not_nil rp[\"uuid\"]\n    assert_equal true, rp[\"is_active\"]\n    assert_equal false, rp[\"is_admin\"]\n\n    get \"/arvados/v1/users/#{rp['uuid']}\",\n      params: {format: 'json'},\n      headers: auth(:admin)\n    assert_response :success\n    assert_equal rp[\"uuid\"], json_response['uuid']\n    assert_equal false, json_response['is_admin']\n    assert_equal true, json_response['is_active']\n    assert_equal 'foo@example.com', json_response['email']\n    assert_equal 'barney', json_response['username']\n  end\n\n  test \"cannot set is_active to false directly\" do\n    post('/arvados/v1/users',\n      params: {\n        user: {\n          email: \"bob@example.com\",\n          username: \"bobby\"\n        },\n      },\n      headers: auth(:admin))\n    assert_response(:success)\n    user = json_response\n    assert_equal false, user['is_active']\n\n    token = act_as_system_user do\n      ApiClientAuthorization.create!(user: User.find_by_uuid(user['uuid'])).api_token\n    end\n    post(\"/arvados/v1/user_agreements/sign\",\n        params: {uuid: 'zzzzz-4zz18-t68oksiu9m80s4y'},\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"})\n    assert_response :success\n\n    post(\"/arvados/v1/users/#{user['uuid']}/activate\",\n      params: {},\n      headers: auth(:admin))\n    assert_response(:success)\n    user = json_response\n    assert_equal true, user['is_active']\n\n    put(\"/arvados/v1/users/#{user['uuid']}\",\n         params: {\n           user: {is_active: false}\n         },\n         headers: auth(:admin))\n    assert_response 422\n  end\n\n  test \"cannot self activate when AutoSetupNewUsers is false\" do\n    Rails.configuration.Users.NewUsersAreActive = false\n    Rails.configuration.Users.AutoSetupNewUsers = false\n\n    user = nil\n    token = nil\n    act_as_system_user do\n      user = User.create!(email: \"bob@example.com\", username: \"bobby\")\n      ap = ApiClientAuthorization.create!(user: user)\n      token = ap.api_token\n    end\n\n    get(\"/arvados/v1/users/#{user['uuid']}\",\n        params: {},\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"})\n    assert_response(:success)\n    user = json_response\n    assert_equal false, user['is_active']\n\n    post(\"/arvados/v1/users/#{user['uuid']}/activate\",\n        params: {},\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"})\n    assert_response 422\n    assert_match(/Cannot activate without being invited/, json_response['errors'][0])\n  end\n\n\n  test \"cannot self activate after unsetup\" do\n    Rails.configuration.Users.NewUsersAreActive = false\n    Rails.configuration.Users.AutoSetupNewUsers = false\n\n    user = nil\n    token = nil\n    act_as_system_user do\n      user = User.create!(email: \"bob@example.com\", username: \"bobby\")\n      ap = ApiClientAuthorization.create!(user: user)\n      token = ap.api_token\n    end\n\n    post(\"/arvados/v1/users/setup\",\n        params: {uuid: user['uuid']},\n        headers: auth(:admin))\n    assert_response :success\n\n    post(\"/arvados/v1/users/#{user['uuid']}/activate\",\n        params: {},\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"})\n    assert_response 403\n    assert_match(/Cannot activate without user agreements/, json_response['errors'][0])\n\n    post(\"/arvados/v1/user_agreements/sign\",\n        params: {uuid: 'zzzzz-4zz18-t68oksiu9m80s4y'},\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"})\n    assert_response :success\n\n    post(\"/arvados/v1/users/#{user['uuid']}/activate\",\n        params: {},\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"})\n    assert_response :success\n\n    get(\"/arvados/v1/users/#{user['uuid']}\",\n        params: {},\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"})\n    assert_response(:success)\n    userJSON = json_response\n    assert_equal true, userJSON['is_active']\n\n    post(\"/arvados/v1/users/#{user['uuid']}/unsetup\",\n        params: {},\n        headers: auth(:admin))\n    assert_response :success\n\n    # Need to get a new token, the old one was invalidated by the unsetup call\n    act_as_system_user do\n      ap = ApiClientAuthorization.create!(user: user)\n      token = ap.api_token\n    end\n\n    get(\"/arvados/v1/users/#{user['uuid']}\",\n        params: {},\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"})\n    assert_response(:success)\n    userJSON = json_response\n    assert_equal false, userJSON['is_active']\n\n    post(\"/arvados/v1/users/#{user['uuid']}/activate\",\n        params: {},\n        headers: {\"HTTP_AUTHORIZATION\" => \"Bearer #{token}\"})\n    assert_response 422\n    assert_match(/Cannot activate without being invited/, json_response['errors'][0])\n  end\n\n  test \"bypass_federation only accepted for admins\" do\n    get \"/arvados/v1/users\",\n      params: {\n        bypass_federation: true\n      },\n      headers: auth(:admin)\n\n    assert_response :success\n\n    get \"/arvados/v1/users\",\n      params: {\n        bypass_federation: true\n      },\n      headers: auth(:active)\n\n    assert_response 403\n  end\n\n  test \"disabling system root user not permitted\" do\n    put(\"/arvados/v1/users/#{users(:system_user).uuid}\",\n      params: {\n        user: {is_admin: false}\n      },\n      headers: auth(:admin))\n    assert_response 422\n\n    post(\"/arvados/v1/users/#{users(:system_user).uuid}/unsetup\",\n      params: {},\n      headers: auth(:admin))\n    assert_response 422\n  end\n\n  test \"creating users only accepted for admins\" do\n    assert_equal false, users(:active).is_admin\n    post '/arvados/v1/users',\n      params: {\n        \"user\" => {\n          \"email\" => 'foo@example.com',\n          \"username\" => \"barney\"\n        }\n      },\n      headers: auth(:active)\n    assert_response 403\n  end\n\n  test \"create users assigns the system root user as their owner\" do\n    post '/arvados/v1/users',\n      params: {\n        \"user\" => {\n          \"email\" => 'foo@example.com',\n          \"username\" => \"barney\"\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n    assert_not_nil json_response[\"uuid\"]\n    assert_equal users(:system_user).uuid, json_response[\"owner_uuid\"]\n  end\n\n  test \"create users ignores provided owner_uuid field\" do\n    assert_equal false, users(:admin).uuid == users(:system_user).uuid\n    post '/arvados/v1/users',\n      params: {\n        \"user\" => {\n          \"email\" => 'foo@example.com',\n          \"owner_uuid\" => users(:admin).uuid,\n          \"username\" => \"barney\"\n        }\n      },\n      headers: auth(:admin)\n    assert_response :success\n    assert_not_nil json_response[\"uuid\"]\n    assert_equal users(:system_user).uuid, json_response[\"owner_uuid\"]\n  end\nend\n"
  },
  {
    "path": "services/api/test/integration/valid_links_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ValidLinksTest < ActionDispatch::IntegrationTest\n  fixtures :all\n\n  test \"tail must exist on update\" do\n    admin_auth = {'HTTP_AUTHORIZATION' => \"Bearer #{api_client_authorizations(:admin).api_token}\"}\n\n    post \"/arvados/v1/links\",\n      params: {\n        :format => :json,\n        :link => {\n          link_class: 'test',\n          name: 'stuff',\n          head_uuid: users(:active).uuid,\n          tail_uuid: virtual_machines(:testvm).uuid\n        }\n      },\n      headers: admin_auth\n    assert_response :success\n    u = json_response['uuid']\n\n    put \"/arvados/v1/links/#{u}\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: virtual_machines(:testvm2).uuid\n        }\n      },\n      headers: admin_auth\n    assert_response :success\n    assert_equal virtual_machines(:testvm2).uuid, (ActiveSupport::JSON.decode @response.body)['tail_uuid']\n\n    put \"/arvados/v1/links/#{u}\",\n      params: {\n        :format => :json,\n        :link => {\n          tail_uuid: 'zzzzz-tpzed-xyzxyzxerrrorxx'\n        }\n      },\n      headers: admin_auth\n    assert_response 422\n  end\n\nend\n"
  },
  {
    "path": "services/api/test/integration/workflows_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass WorkflowsApiTest < ActionDispatch::IntegrationTest\n  fixtures :all\n\n  teardown do\n    $enable_workflow_collection_linking_for_tests = false\n  end\n\n  def create_workflow_collection_helper\n    post \"/arvados/v1/collections\",\n         params: {:format => :json,\n                  collection: {\n                    name: \"test workflow\",\n                    description: \"the workflow that tests linking collection and workflow records\",\n                    properties: {\n                      \"type\": \"workflow\",\n                                 \"arv:workflowMain\": \"foo.cwl\",\n                                 \"arv:cwl_inputs\": [{\n                                                      \"id\": \"#main/x\",\n                                                     \"type\": \"int\",\n                                                    }\n                                                   ],\n                                 \"arv:cwl_outputs\": [{\n                                                      \"id\": \"#main/y\",\n                                                     \"type\": \"File\",\n                                                     }],\n                                 \"arv:cwl_requirements\": [\n                                                         ],\n                                 \"arv:cwl_hints\": [\n                                                  ],\n                    }\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response :success\n    json_response\n  end\n\n  # This test will be removed in 23057.\n  test \"cannot link a workflow to a collection until #23057\" do\n    collection_response = create_workflow_collection_helper\n    assert_equal(collection_response[\"name\"], \"test workflow\")\n    assert_equal(collection_response[\"description\"], \"the workflow that tests linking collection and workflow records\")\n    assert_equal(collection_response[\"owner_uuid\"], users(:active).uuid)\n\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response[\"uuid\"]\n                  }\n                 },\n         headers: auth(:active)\n    assert_response 422\n    assert_match(/Collection uuid must be null/, json_response['errors'][0])\n\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {}},\n         headers: auth(:active)\n    assert_response :success\n    workflow_response = json_response\n\n    patch \"/arvados/v1/workflows/#{workflow_response[\"uuid\"]}\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response[\"uuid\"]\n                  }\n                 },\n         headers: auth(:active)\n    assert_response 422\n    assert_match(/Collection uuid must be null/, json_response['errors'][0])\n  end\n\n  test \"link a workflow to a collection\" do\n    $enable_workflow_collection_linking_for_tests = true\n\n    collection_response = create_workflow_collection_helper\n    assert_equal(collection_response[\"name\"], \"test workflow\")\n    assert_equal(collection_response[\"description\"], \"the workflow that tests linking collection and workflow records\")\n    assert_equal(collection_response[\"owner_uuid\"], users(:active).uuid)\n\n    # Now create a workflow linked to the collection.\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response[\"uuid\"]\n                  }\n                 },\n      headers: auth(:active)\n    assert_response :success\n    workflow_response = json_response\n    assert_equal(collection_response[\"name\"], workflow_response[\"name\"])\n    assert_equal(collection_response[\"description\"], workflow_response[\"description\"])\n    assert_equal(collection_response[\"owner_uuid\"], workflow_response[\"owner_uuid\"])\n    assert_equal({\"cwlVersion\"=>\"v1.2\",\n                  \"$graph\"=>[\n                    {\"class\"=>\"Workflow\",\n                     \"id\"=>\"#main\",\n                     \"inputs\"=>[{\"id\"=>\"#main/x\", \"type\"=>\"int\"}],\n                     \"outputs\"=>[{\"id\"=>\"#main/y\", \"type\"=>\"File\", \"outputSource\"=>\"#main/step/y\"}],\n                     \"steps\"=>[{\"id\"=>\"#main/foo.cwl\",\n                                \"in\"=>[{\"id\"=>\"#main/step/x\", \"source\"=>\"#main/x\"}],\n                                \"out\"=>[{\"id\"=>\"#main/step/y\"}],\n                                \"run\"=>\"keep:d41d8cd98f00b204e9800998ecf8427e+0/foo.cwl\",\n                                \"label\"=>\"test workflow\"}],\n                     \"requirements\"=>[{\"class\"=>\"SubworkflowFeatureRequirement\"}],\n                     \"hints\"=>[]}]},\n                 JSON.parse(workflow_response[\"definition\"]))\n\n    # Now update the collection and check that the linked workflow record was also updated.\n    patch \"/arvados/v1/collections/#{collection_response['uuid']}\",\n         params: {:format => :json,\n                  collection: {\n                    name: \"test workflow v2\",\n                    description: \"the second version of the workflow that tests linking collection and workflow records\",\n                    owner_uuid: groups(:private).uuid,\n                    properties: {\n                      \"type\": \"workflow\",\n                                 \"arv:workflowMain\": \"foo.cwl\",\n                                 \"arv:cwl_inputs\": [{\n                                                      \"id\": \"#main/w\",\n                                                     \"type\": \"int\",\n                                                    },\n                                                    {\n                                                      \"id\": \"#main/x\",\n                                                     \"type\": \"int\",\n                                                    }\n                                                   ],\n                                 \"arv:cwl_outputs\": [{\n                                                      \"id\": \"#main/y\",\n                                                     \"type\": \"File\",\n                                                     },\n                                                    {\n                                                      \"id\": \"#main/z\",\n                                                     \"type\": \"File\",\n                                                     }],\n                                 \"arv:cwl_requirements\": [\n                                                         ],\n                                 \"arv:cwl_hints\": [\n                                                  ],\n                    }\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response :success\n    collection_response = json_response\n    assert_equal(collection_response[\"name\"], \"test workflow v2\")\n    assert_equal(collection_response[\"description\"], \"the second version of the workflow that tests linking collection and workflow records\")\n    assert_equal(collection_response[\"owner_uuid\"], groups(:private).uuid)\n\n    get \"/arvados/v1/workflows/#{workflow_response['uuid']}\", headers: auth(:active)\n    assert_response :success\n    workflow_response = json_response\n    assert_equal(collection_response[\"name\"], workflow_response[\"name\"])\n    assert_equal(collection_response[\"description\"], workflow_response[\"description\"])\n    assert_equal(collection_response[\"owner_uuid\"], workflow_response[\"owner_uuid\"])\n    assert_equal({\"cwlVersion\"=>\"v1.2\",\n                  \"$graph\"=>[\n                    {\"class\"=>\"Workflow\",\n                     \"id\"=>\"#main\",\n                     \"inputs\"=>[{\"id\"=>\"#main/w\", \"type\"=>\"int\"},\n                                {\"id\"=>\"#main/x\", \"type\"=>\"int\"}\n                               ],\n                     \"outputs\"=>[{\"id\"=>\"#main/y\", \"type\"=>\"File\", \"outputSource\"=>\"#main/step/y\"},\n                                 {\"id\"=>\"#main/z\", \"type\"=>\"File\", \"outputSource\"=>\"#main/step/z\"}],\n                     \"steps\"=>[{\"id\"=>\"#main/foo.cwl\",\n                                \"in\"=>[{\"id\"=>\"#main/step/w\", \"source\"=>\"#main/w\"},\n                                       {\"id\"=>\"#main/step/x\", \"source\"=>\"#main/x\"}],\n                                \"out\"=>[{\"id\"=>\"#main/step/y\"}, {\"id\"=>\"#main/step/z\"}],\n                                \"run\"=>\"keep:d41d8cd98f00b204e9800998ecf8427e+0/foo.cwl\",\n                                \"label\"=>\"test workflow v2\"}],\n                     \"requirements\"=>[{\"class\"=>\"SubworkflowFeatureRequirement\"}],\n                     \"hints\"=>[]}]},\n\n                 JSON.parse(workflow_response[\"definition\"]))\n  end\n\n  test \"workflow cannot be modified after it is linked\" do\n    $enable_workflow_collection_linking_for_tests = true\n\n    # Now create a workflow linked to the collection.\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {\n                    name: \"legacy\"\n                  }\n                 },\n      headers: auth(:active)\n    assert_response :success\n    workflow_response = json_response\n    assert_equal(\"legacy\", workflow_response[\"name\"])\n\n    patch \"/arvados/v1/workflows/#{workflow_response['uuid']}\",\n         params: {:format => :json,\n                  :workflow => {\n                    name: \"legacy v2\"\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response :success\n    workflow_response = json_response\n    assert_equal(\"legacy v2\", workflow_response[\"name\"])\n\n    collection_response = create_workflow_collection_helper\n    patch \"/arvados/v1/workflows/#{workflow_response['uuid']}\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response['uuid']\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response :success\n    workflow_response = json_response\n    assert_equal(collection_response['name'], workflow_response[\"name\"])\n\n    patch \"/arvados/v1/workflows/#{workflow_response['uuid']}\",\n         params: {:format => :json,\n                  :workflow => {\n                    name: \"legacy v2\"\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response 403\n\n  end\n\n  test \"trashing collection also hides workflow\" do\n    $enable_workflow_collection_linking_for_tests = true\n\n    collection_response = create_workflow_collection_helper\n\n    # Now create a workflow linked to the collection.\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response[\"uuid\"]\n                  }\n                 },\n      headers: auth(:active)\n    assert_response :success\n    workflow_response = json_response\n\n    get \"/arvados/v1/workflows/#{workflow_response['uuid']}\", headers: auth(:active)\n    assert_response :success\n\n    # Now trash the collection\n    post \"/arvados/v1/collections/#{collection_response['uuid']}/trash\", headers: auth(:active)\n    assert_response :success\n\n    get \"/arvados/v1/collections/#{collection_response['uuid']}\", headers: auth(:active)\n    assert_response 404\n\n    get \"/arvados/v1/workflows/#{workflow_response['uuid']}\", headers: auth(:active)\n    assert_response 404\n\n    # Now untrash the collection\n    post \"/arvados/v1/collections/#{collection_response['uuid']}/untrash\", headers: auth(:active)\n    assert_response :success\n\n    get \"/arvados/v1/collections/#{collection_response['uuid']}\", headers: auth(:active)\n    assert_response :success\n\n    get \"/arvados/v1/workflows/#{workflow_response['uuid']}\", headers: auth(:active)\n    assert_response :success\n  end\n\n  test \"collection is missing cwl_inputs\" do\n    $enable_workflow_collection_linking_for_tests = true\n\n    # The following is allowed, because it isn't linked.\n    # This is what legacy arvados-cwl-runner instances\n    # have been creating, so we want to make sure we can still\n    # create them, but not link them.\n    post \"/arvados/v1/collections\",\n         params: {:format => :json,\n                  collection: {\n                    name: \"test workflow\",\n                    description: \"the workflow that tests linking collection and workflow records\",\n                    properties: {\n                      \"type\": \"workflow\",\n                      \"arv:workflowMain\": \"foo.cwl\"\n                    }\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response :success\n    collection_response = json_response\n\n    # But it can't be linked because it doesn't have all the fields\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response[\"uuid\"]\n                  }\n                 },\n      headers: auth(:active)\n    assert_response 422\n    assert_match(/missing field 'arv:cwl_inputs' in collection properties/, json_response[\"errors\"][0])\n  end\n\n  test \"collection cwl_inputs wrong type\" do\n    $enable_workflow_collection_linking_for_tests = true\n\n    post \"/arvados/v1/collections\",\n         params: {:format => :json,\n                  collection: {\n                    name: \"test workflow\",\n                    description: \"the workflow that tests linking collection and workflow records\",\n                    properties: {\n                      \"type\": \"workflow\",\n                                 \"arv:workflowMain\": \"foo.cwl\",\n                                 \"arv:cwl_inputs\": { \"#main/x\": {\n                                                                  \"type\": \"int\"\n                                                                }\n                                                   },\n                                 \"arv:cwl_outputs\": [{\n                                                      \"id\": \"#main/y\",\n                                                     \"type\": \"File\",\n                                                     }],\n                                 \"arv:cwl_requirements\": [\n                                                         ],\n                                 \"arv:cwl_hints\": [\n                                                  ],\n\n                    }\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response :success\n    collection_response = json_response\n\n    # But it can't be linked because one of the fields is invalid\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response[\"uuid\"]\n                  }\n                 },\n      headers: auth(:active)\n    assert_response 422\n    assert_match(/expected field 'arv:cwl_inputs' in collection properties to be a Array/, json_response[\"errors\"][0])\n  end\n\n  test \"cannot change collection type as long as there is a linked workflow\" do\n    $enable_workflow_collection_linking_for_tests = true\n\n    collection_response = create_workflow_collection_helper\n\n    # create a workflow linked to the collection.\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response[\"uuid\"]\n                  }\n                 },\n      headers: auth(:active)\n    assert_response :success\n    workflow_response = json_response\n\n    # now try to change the type property, should fail\n    properties = collection_response[\"properties\"]\n    properties[\"type\"] = \"something else\"\n\n    patch \"/arvados/v1/collections/#{collection_response['uuid']}\",\n         params: {:format => :json,\n                  collection: {\n                    properties: properties,\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response 422\n    assert_match(/cannot change 'type' property when there are linked workflows/, json_response[\"errors\"][0])\n\n    # Delete the linked workflow\n    delete \"/arvados/v1/workflows/#{workflow_response['uuid']}\",\n         params: {:format => :json},\n         headers: auth(:active)\n    assert_response :success\n\n    # Now we can change the type property\n    patch \"/arvados/v1/collections/#{collection_response['uuid']}\",\n         params: {:format => :json,\n                  collection: {\n                    properties: properties,\n                  }\n                 },\n         headers: auth(:active),\n         as: :json\n    assert_response :success\n\n    # But we can't make a new linked workflow, because the type is wrong\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response[\"uuid\"]\n                  }\n                 },\n      headers: auth(:active)\n    assert_response 422\n    assert_match(/properties does not have type: workflow/, json_response[\"errors\"][0])\n  end\n\n  test \"destroying collection destroys linked workflow\" do\n    $enable_workflow_collection_linking_for_tests = true\n\n    collection_response = create_workflow_collection_helper\n\n    # Now create a workflow linked to the collection.\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response[\"uuid\"]\n                  }\n                 },\n      headers: auth(:active)\n    assert_response :success\n    workflow_response = json_response\n\n    assert_not_nil Collection.find_by_uuid(collection_response['uuid'])\n    assert_not_nil Workflow.find_by_uuid(workflow_response['uuid'])\n\n    delete \"/arvados/v1/workflows/#{workflow_response['uuid']}\",\n         params: {:format => :json},\n         headers: auth(:active)\n    assert_response :success\n    workflow_response = json_response\n\n    assert_not_nil Collection.find_by_uuid(collection_response['uuid'])\n    assert_nil Workflow.find_by_uuid(workflow_response['uuid'])\n  end\n\n  test \"workflow can be deleted without deleting collection\" do\n    $enable_workflow_collection_linking_for_tests = true\n\n    collection_response = create_workflow_collection_helper\n\n    # Now create a workflow linked to the collection.\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response[\"uuid\"]\n                  }\n                 },\n      headers: auth(:active)\n    assert_response :success\n    workflow_response = json_response\n\n    assert_not_nil Collection.find_by_uuid(collection_response['uuid'])\n    assert_not_nil Workflow.find_by_uuid(workflow_response['uuid'])\n\n    Collection.find_by_uuid(collection_response['uuid']).destroy\n\n    assert_nil Collection.find_by_uuid(collection_response['uuid'])\n    assert_nil Workflow.find_by_uuid(workflow_response['uuid'])\n  end\n\n  test \"group contents endpoint supports include=collection_uuid and query on collection.properties\" do\n    $enable_workflow_collection_linking_for_tests = true\n\n    collection_response = create_workflow_collection_helper\n\n    # Now create a workflow linked to the collection.\n    post \"/arvados/v1/workflows\",\n         params: {:format => :json,\n                  :workflow => {\n                    collection_uuid: collection_response[\"uuid\"]\n                  }\n                 },\n      headers: auth(:active)\n    assert_response :success\n    workflow_response = json_response\n\n    # no manifest text by default\n    get '/arvados/v1/groups/contents',\n        params: {\n          filters: [[\"uuid\", \"is_a\", \"arvados#workflow\"], [\"collection.properties.arv:workflowMain\", \"=\", \"foo.cwl\"]].to_json,\n          include: '[\"collection_uuid\"]',\n          format: :json,\n        },\n        headers: auth(:active)\n    assert_response :success\n    assert_equal workflow_response[\"uuid\"], json_response[\"items\"][0][\"uuid\"]\n    assert_equal collection_response[\"uuid\"], json_response[\"included\"][0][\"uuid\"]\n    assert_nil json_response[\"included\"][0][\"manifest_text\"]\n    assert_nil json_response[\"included\"][0][\"unsigned_manifest_text\"]\n    assert_equal collection_response[\"properties\"][\"arv:workflowMain\"], json_response[\"included\"][0][\"properties\"][\"arv:workflowMain\"]\n\n    # select didn't include manifest text, so still shouldn't get it\n    get '/arvados/v1/groups/contents',\n        params: {\n          filters: [[\"uuid\", \"is_a\", \"arvados#workflow\"], [\"collection.properties.arv:workflowMain\", \"=\", \"foo.cwl\"]].to_json,\n          include: '[\"collection_uuid\"]',\n          select: '[\"uuid\", \"collection_uuid\", \"properties\"]',\n          format: :json,\n        },\n        headers: auth(:active)\n    assert_response :success\n    assert_equal workflow_response[\"uuid\"], json_response[\"items\"][0][\"uuid\"]\n    assert_equal collection_response[\"uuid\"], json_response[\"included\"][0][\"uuid\"]\n    assert_nil json_response[\"included\"][0][\"manifest_text\"]\n    assert_nil json_response[\"included\"][0][\"unsigned_manifest_text\"]\n    assert_equal collection_response[\"properties\"][\"arv:workflowMain\"], json_response[\"included\"][0][\"properties\"][\"arv:workflowMain\"]\n\n    # currently, with the group contents API, you won't get\n    # manifest_text even if you ask for it, because it won't be signed\n    # by controller.\n    get '/arvados/v1/groups/contents',\n        params: {\n          filters: [[\"uuid\", \"is_a\", \"arvados#workflow\"], [\"collection.properties.arv:workflowMain\", \"=\", \"foo.cwl\"]].to_json,\n          include: '[\"collection_uuid\"]',\n          select: '[\"uuid\", \"collection_uuid\", \"properties\", \"manifest_text\"]',\n          format: :json,\n        },\n        headers: auth(:active)\n    assert_response :success\n    assert_equal workflow_response[\"uuid\"], json_response[\"items\"][0][\"uuid\"]\n    assert_equal collection_response[\"uuid\"], json_response[\"included\"][0][\"uuid\"]\n    assert_nil json_response[\"included\"][0][\"manifest_text\"]\n    assert_nil json_response[\"included\"][0][\"unsigned_manifest_text\"]\n    assert_equal collection_response[\"properties\"][\"arv:workflowMain\"], json_response[\"included\"][0][\"properties\"][\"arv:workflowMain\"]\n\n    # However, you can get unsigned_manifest_text\n    get '/arvados/v1/groups/contents',\n        params: {\n          filters: [[\"uuid\", \"is_a\", \"arvados#workflow\"], [\"collection.properties.arv:workflowMain\", \"=\", \"foo.cwl\"]].to_json,\n          include: '[\"collection_uuid\"]',\n          select: '[\"uuid\", \"collection_uuid\", \"properties\", \"unsigned_manifest_text\"]',\n          format: :json,\n        },\n        headers: auth(:active)\n    assert_response :success\n    assert_equal workflow_response[\"uuid\"], json_response[\"items\"][0][\"uuid\"]\n    assert_equal collection_response[\"uuid\"], json_response[\"included\"][0][\"uuid\"]\n    assert_nil json_response[\"included\"][0][\"manifest_text\"]\n    assert_equal \"\", json_response[\"included\"][0][\"unsigned_manifest_text\"]\n    assert_equal collection_response[\"properties\"][\"arv:workflowMain\"], json_response[\"included\"][0][\"properties\"][\"arv:workflowMain\"]\n\n  end\n\nend\n"
  },
  {
    "path": "services/api/test/performance/links_index_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'benchmark'\n\nclass IndexTest < ActionDispatch::IntegrationTest\n  def test_links_index\n    puts(\"Get links index: \", Benchmark.measure do\n      get '/arvados/v1/links', params: {\n        limit: 1000,\n        format: :json\n      }, headers: auth(:admin)\n    end)\n  end\n  def test_links_index_with_filters\n    puts(\"Get links index with filters: \", Benchmark.measure do\n      get '/arvados/v1/links', params: {\n        format: :json,\n        filters: [%w[head_uuid is_a arvados#collection]].to_json\n      }, headers: auth(:admin)\n    end)\n  end\n  def test_collections_index\n    puts(\"Get collections index: \", Benchmark.measure do\n      get '/arvados/v1/collections', params: {\n        format: :json\n        }, headers: auth(:admin)\n      end)\n  end\nend\n"
  },
  {
    "path": "services/api/test/performance/permission_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'benchmark'\n\n\ndef create_eight parent\n  uuids = []\n  values = []\n  (0..8).each do\n    uuid = Group.generate_uuid\n    values.push \"('#{uuid}', '#{parent}', now(), now(), '#{uuid}')\"\n    uuids.push uuid\n  end\n  ActiveRecord::Base.connection.execute(\"INSERT INTO groups (uuid, owner_uuid, created_at, updated_at, name) VALUES #{values.join ','}\")\n  uuids\nend\n\nclass PermissionPerfTest < ActionDispatch::IntegrationTest\n  def test_groups_index\n    n = 0\n    act_as_system_user do\n      puts(\"Time spent creating records:\", Benchmark.measure do\n             ActiveRecord::Base.transaction do\n               root = Group.create!(owner_uuid: users(:permission_perftest).uuid, group_class: \"project\")\n               n += 1\n               a = create_eight root.uuid\n               n += 8\n               a.each do |p1|\n                 b = create_eight p1\n                 n += 8\n                 b.each do |p2|\n                   c = create_eight p2\n                   n += 8\n                   c.each do |p3|\n                     d = create_eight p3\n                     n += 8\n                   end\n                 end\n               end\n               refresh_permissions\n             end\n           end)\n    end\n    puts \"created #{n}\"\n    puts \"Time spent getting group index:\"\n    (0..1).each do\n      puts(Benchmark.measure do\n             get '/arvados/v1/groups', params: {format: :json}, headers: auth(:permission_perftest)\n             assert json_response['items_available'] >= n\n           end)\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/test_helper.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire_relative '../lib/update_permissions'\n\nENV[\"RAILS_ENV\"] = \"test\"\nunless ENV[\"NO_COVERAGE_TEST\"]\n  begin\n    verbose_orig = $VERBOSE\n    begin\n      $VERBOSE = nil\n      require 'simplecov'\n      require 'simplecov-rcov'\n    ensure\n      $VERBOSE = verbose_orig\n    end\n    class SimpleCov::Formatter::MergedFormatter\n      def format(result)\n        SimpleCov::Formatter::HTMLFormatter.new.format(result)\n        SimpleCov::Formatter::RcovFormatter.new.format(result)\n      end\n    end\n    SimpleCov.formatter = SimpleCov::Formatter::MergedFormatter\n    SimpleCov.start do\n      add_filter '/test/'\n      add_filter 'initializers/secret_token'\n    end\n  rescue Exception => e\n    $stderr.puts \"SimpleCov unavailable (#{e}). Proceeding without.\"\n  end\nend\n\nrequire File.expand_path('../../config/environment', __FILE__)\nrequire 'rails/test_help'\nrequire 'mocha'\nrequire 'mocha/minitest'\n\nmodule ArvadosTestSupport\n  def json_response\n    Oj.strict_load response.body\n  end\n\n  def api_token(api_client_auth_name)\n    api_client_authorizations(api_client_auth_name).token\n  end\n\n  def auth(api_client_auth_name)\n    {'HTTP_AUTHORIZATION' => \"Bearer #{api_token(api_client_auth_name)}\"}\n  end\n\n  def full_text_excluded_columns\n    [\n      # All the columns that contain a UUID or PDH as of June 2024/Arvados 3.0.\n      # It's okay if this list gets out-of-date, it just needs to be complete\n      # enough to test that full text indexes exclude the right columns.\n      \"authorized_user_uuid\",\n      \"auth_uuid\",\n      \"cancelled_by_client_uuid\",\n      \"cancelled_by_user_uuid\",\n      \"container_image\",\n      \"container_uuid\",\n      \"current_version_uuid\",\n      \"for_container_uuid\",\n      \"frozen_by_uuid\",\n      \"group_uuid\",\n      \"head_uuid\",\n      \"is_locked_by_uuid\",\n      \"locked_by_uuid\",\n      \"log_uuid\",\n      \"modified_by_client_uuid\",\n      \"modified_by_user_uuid\",\n      \"node_uuid\",\n      \"object_owner_uuid\",\n      \"object_uuid\",\n      \"output_uuid\",\n      \"owner_uuid\",\n      \"perm_origin_uuid\",\n      \"portable_data_hash\",\n      \"pri_container_uuid\",\n      \"redirect_to_user_uuid\",\n      \"requesting_container_uuid\",\n      \"starting_uuid\",\n      \"tail_uuid\",\n      \"target_uuid\",\n      \"user_uuid\",\n      \"uuid\",\n    ]\n  end\n\n  def show_errors model\n    return lambda { model.errors.full_messages.inspect }\n  end\nend\n\nclass ActiveSupport::TestCase\n  include FactoryBot::Syntax::Methods\n  fixtures :all\n\n  include ArvadosTestSupport\n  include CurrentApiClient\n\n  setup do\n    Thread.current[:api_client_ip_address] = nil\n    Thread.current[:api_client_authorization] = nil\n    Thread.current[:token] = nil\n    Thread.current[:user] = nil\n    restore_configuration\n  end\n\n  teardown do\n    # Confirm that any changed configuration doesn't include non-symbol keys\n    $arvados_config.keys.each do |conf_name|\n      conf = Rails.configuration.send(conf_name)\n      confirm_keys_as_symbols(conf, conf_name) if conf.respond_to?('keys')\n    end\n  end\n\n  def assert_equal(expect, *args)\n    if expect.nil?\n      assert_nil(*args)\n    else\n      super\n    end\n  end\n\n  def assert_not_allowed\n    # Provide a block that calls a Rails boolean \"true or false\" success value,\n    # like model.save or model.destroy.  This method will test that it either\n    # returns false, or raises a Permission Denied exception.\n    begin\n      refute(yield)\n    rescue ArvadosModel::PermissionDeniedError\n    end\n  end\n\n  def add_permission_link from_who, to_what, perm_type\n    act_as_system_user do\n      Link.create!(tail_uuid: from_who.uuid,\n                   head_uuid: to_what.uuid,\n                   link_class: 'permission',\n                   name: perm_type)\n    end\n  end\n\n  def confirm_keys_as_symbols(conf, conf_name)\n    assert(conf.is_a?(ActiveSupport::OrderedOptions), \"#{conf_name} should be an OrderedOptions object\")\n    conf.keys.each do |k|\n      assert(k.is_a?(Symbol), \"Key '#{k}' on section '#{conf_name}' should be a Symbol\")\n      confirm_keys_as_symbols(conf[k], \"#{conf_name}.#{k}\") if conf[k].respond_to?('keys')\n    end\n  end\n\n  def restore_configuration\n    # Restore configuration settings changed during tests\n    ConfigLoader.copy_into_config $arvados_config, Rails.configuration\n    ConfigLoader.copy_into_config $remaining_config, Rails.configuration\n  end\n\n  def set_user_from_auth(auth_name)\n    client_auth = api_client_authorizations(auth_name)\n    client_auth.user.forget_cached_group_perms\n    Thread.current[:api_client_authorization] = client_auth\n    Thread.current[:user] = client_auth.user\n    Thread.current[:token] = client_auth.token\n  end\n\n  def expect_json\n    self.request.headers[\"Accept\"] = \"text/json\"\n  end\n\n  def authorize_with api_client_auth_name\n    authorize_with_token api_client_authorizations(api_client_auth_name).token\n  end\n\n  def authorize_with_token token\n    t = token\n    t = t.token if t.respond_to? :token\n    ArvadosApiToken.new.call(\"rack.input\" => \"\",\n                             \"HTTP_AUTHORIZATION\" => \"Bearer #{t}\")\n  end\n\n  def salt_token(fixture:, remote:)\n    auth = api_client_authorizations(fixture)\n    uuid = auth.uuid\n    token = auth.api_token\n    hmac = OpenSSL::HMAC.hexdigest('sha1', token, remote)\n    return \"v2/#{uuid}/#{hmac}\"\n  end\n\n  def self.skip_slow_tests?\n    !(ENV['RAILS_TEST_SHORT'] || '').empty?\n  end\n\n  def self.skip(*args, &block)\n  end\n\n  def self.slow_test(name, &block)\n    test(name, &block) unless skip_slow_tests?\n  end\nend\n\nclass ActionController::TestCase\n  setup do\n    @test_counter = 0\n    self.request.headers['Accept'] = 'application/json'\n    self.request.headers['Content-Type'] = 'application/json'\n  end\n\n  def check_counter action\n    @test_counter += 1\n    if @test_counter == 2\n      assert_equal 1, 2, \"Multiple actions in functional test\"\n    end\n  end\n\n  [:get, :post, :put, :patch, :delete].each do |method|\n    define_method method do |action, **kwargs|\n      check_counter action\n      # After Rails 5.0 upgrade, some params don't get properly serialized.\n      # One case are filters: [['attr', 'op', 'val']] become [['attr'], ['op'], ['val']]\n      # if not passed upstream as a JSON string.\n      if kwargs[:params].is_a?(Hash)\n        kwargs[:params].each do |key, _|\n          next if key == :exclude_script_versions # Job Reuse tests\n          # Keys could be: :filters, :where, etc\n          if [Array, Hash].include?(kwargs[:params][key].class)\n            kwargs[:params][key] = SafeJSON.dump(kwargs[:params][key])\n          end\n        end\n      end\n      super action, **kwargs\n    end\n  end\n\n  def self.suite\n    s = super\n    def s.run(*args)\n      @test_case.startup()\n      begin\n        super\n      ensure\n        @test_case.shutdown()\n      end\n    end\n    s\n  end\n  def self.startup; end\n  def self.shutdown; end\nend\n\nclass ActionDispatch::IntegrationTest\n  teardown do\n    Thread.current[:api_client_ip_address] = nil\n    Thread.current[:api_client_authorization] = nil\n    Thread.current[:token] = nil\n    Thread.current[:user] = nil\n  end\nend\n\n# Ensure permissions are computed from the test fixtures.\nrefresh_permissions\nrefresh_trashed\n"
  },
  {
    "path": "services/api/test/unit/.gitkeep",
    "content": ""
  },
  {
    "path": "services/api/test/unit/api_client_authorization_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ApiClientAuthorizationTest < ActiveSupport::TestCase\n  include CurrentApiClient\n\n  [:admin_trustedclient, :active_trustedclient].each do |token|\n    test \"ApiClientAuthorization can be created then deleted by #{token}\" do\n      set_user_from_auth token\n      x = ApiClientAuthorization.create!(user_id: current_user.id,\n                                         scopes: [])\n      newtoken = x.api_token\n      assert x.destroy, \"Failed to destroy new ApiClientAuth\"\n      assert_empty ApiClientAuthorization.where(api_token: newtoken), \"Destroyed ApiClientAuth is still in database\"\n    end\n  end\n\n  test \"accepts SystemRootToken\" do\n    assert_nil ApiClientAuthorization.validate(token: \"xxxSystemRootTokenxxx\")\n\n    # will create a new ApiClientAuthorization record\n    Rails.configuration.SystemRootToken = \"xxxSystemRootTokenxxx\"\n\n    auth = ApiClientAuthorization.validate(token: \"xxxSystemRootTokenxxx\")\n    assert_equal \"xxxSystemRootTokenxxx\", auth.api_token\n    assert_equal User.find_by_uuid(system_user_uuid).id, auth.user_id\n\n    # now change the token and try to use the old one first\n    Rails.configuration.SystemRootToken = \"newxxxSystemRootTokenxxx\"\n\n    # old token will fail\n    assert_nil ApiClientAuthorization.validate(token: \"xxxSystemRootTokenxxx\")\n    # new token will work\n    auth = ApiClientAuthorization.validate(token: \"newxxxSystemRootTokenxxx\")\n    assert_equal \"newxxxSystemRootTokenxxx\", auth.api_token\n    assert_equal User.find_by_uuid(system_user_uuid).id, auth.user_id\n\n    # now change the token again and use the new one first\n    Rails.configuration.SystemRootToken = \"new2xxxSystemRootTokenxxx\"\n\n    # new token will work\n    auth = ApiClientAuthorization.validate(token: \"new2xxxSystemRootTokenxxx\")\n    assert_equal \"new2xxxSystemRootTokenxxx\", auth.api_token\n    assert_equal User.find_by_uuid(system_user_uuid).id, auth.user_id\n    # old token will fail\n    assert_nil ApiClientAuthorization.validate(token: \"newxxxSystemRootTokenxxx\")\n  end\n\n\nend\n"
  },
  {
    "path": "services/api/test/unit/app_version_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass AppVersionTest < ActiveSupport::TestCase\n\n  setup do AppVersion.forget end\n\n  teardown do AppVersion.forget end\n\n  test 'invoke git processes only on first call' do\n    AppVersion.expects(:git).\n      with(\"status\", \"--porcelain\").once.\n      yields \" M services/api/README\\n\"\n    AppVersion.expects(:git).\n      with(\"log\", \"-n1\", \"--format=%H\").once.\n      yields \"da39a3ee5e6b4b0d3255bfef95601890afd80709\\n\"\n\n    (0..4).each do\n      v = AppVersion.hash\n      assert_equal 'da39a3ee-modified', v\n    end\n  end\n\n  test 'override with configuration \"foobar\"' do\n    Rails.configuration.source_version = 'foobar'\n    assert_equal 'foobar', AppVersion.hash\n  end\n\n  test 'override with configuration false' do\n    Rails.configuration.source_version = false\n    assert_not_equal 'foobar', AppVersion.hash\n  end\n\n  test 'override with file' do\n    path = Rails.root.join 'git-commit.version'\n    assert(!File.exist?(path),\n           \"Packaged version file found in source tree: #{path}\")\n    begin\n      File.open(path, 'w') do |f|\n        f.write \"0.1.abc123\\n\"\n      end\n      assert_equal \"0.1.abc123\", AppVersion.hash\n    ensure\n      File.unlink path\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/application_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ApplicationTest < ActiveSupport::TestCase\n  include CurrentApiClient\n\n  test \"act_as_system_user\" do\n    Thread.current[:user] = users(:active)\n    assert_equal users(:active), Thread.current[:user]\n    act_as_system_user do\n      assert_not_equal users(:active), Thread.current[:user]\n      assert_equal system_user, Thread.current[:user]\n    end\n    assert_equal users(:active), Thread.current[:user]\n  end\n\n  test \"act_as_system_user is exception safe\" do\n    Thread.current[:user] = users(:active)\n    assert_equal users(:active), Thread.current[:user]\n    caught = false\n    begin\n      act_as_system_user do\n        assert_not_equal users(:active), Thread.current[:user]\n        assert_equal system_user, Thread.current[:user]\n        raise \"Fail\"\n      end\n    rescue\n      caught = true\n    end\n    assert caught\n    assert_equal users(:active), Thread.current[:user]\n  end\n\n  test \"config maps' keys are returned as symbols\" do\n    assert Rails.configuration.Users.AutoSetupUsernameBlacklist.is_a? ActiveSupport::OrderedOptions\n    assert Rails.configuration.Users.AutoSetupUsernameBlacklist.keys.size > 0\n    Rails.configuration.Users.AutoSetupUsernameBlacklist.keys.each do |k|\n      assert k.is_a? Symbol\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/arvados_model_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass ArvadosModelTest < ActiveSupport::TestCase\n  fixtures :all\n\n  def create_with_attrs attrs\n    a = Collection.create({properties: {'foo' => 'bar'}}.merge(attrs))\n    a if a.valid?\n  end\n\n  test 'non-admin cannot assign uuid' do\n    set_user_from_auth :active_trustedclient\n    want_uuid = Collection.generate_uuid\n    a = create_with_attrs(uuid: want_uuid)\n    assert_nil a, \"Non-admin should not assign uuid.\"\n  end\n\n  test 'admin can assign valid uuid' do\n    set_user_from_auth :admin_trustedclient\n    want_uuid = Collection.generate_uuid\n    a = create_with_attrs(uuid: want_uuid)\n    assert_equal want_uuid, a.uuid, \"Admin should assign valid uuid.\"\n    assert a.uuid.length==27, \"Auto assigned uuid length is wrong.\"\n  end\n\n  test 'admin cannot assign uuid with wrong object type' do\n    set_user_from_auth :admin_trustedclient\n    want_uuid = Group.generate_uuid\n    a = create_with_attrs(uuid: want_uuid)\n    assert_nil a, \"Admin should not be able to assign invalid uuid.\"\n  end\n\n  test 'admin cannot assign badly formed uuid' do\n    set_user_from_auth :admin_trustedclient\n    a = create_with_attrs(uuid: \"ntoheunthaoesunhasoeuhtnsaoeunhtsth\")\n    assert_nil a, \"Admin should not be able to assign invalid uuid.\"\n  end\n\n  test 'admin cannot assign empty uuid' do\n    set_user_from_auth :admin_trustedclient\n    a = create_with_attrs(uuid: \"\")\n    assert_nil a, \"Admin cannot assign empty uuid.\"\n  end\n\n  [ {:a => 'foo'},\n    {'a' => :foo},\n    {:a => ['foo', 'bar']},\n    {'a' => [:foo, 'bar']},\n    {'a' => ['foo', :bar]},\n    {:a => [:foo, :bar]},\n    {:a => {'foo' => {'bar' => 'baz'}}},\n    {'a' => {:foo => {'bar' => 'baz'}}},\n    {'a' => {'foo' => {:bar => 'baz'}}},\n    {'a' => {'foo' => {'bar' => :baz}}},\n    {'a' => {'foo' => ['bar', :baz]}},\n  ].each do |x|\n    test \"prevent symbol keys in serialized db columns: #{x.inspect}\" do\n      set_user_from_auth :active\n      link = Link.create!(link_class: 'test',\n                          properties: x)\n      raw = ActiveRecord::Base.connection.\n          select_value(\"select properties from links where uuid='#{link.uuid}'\")\n      refute_match(/:[fb]/, raw)\n    end\n  end\n\n  [ {['foo'] => 'bar'},\n    {'a' => {['foo', :foo] => 'bar'}},\n    {'a' => {{'foo' => 'bar'} => 'bar'}},\n    {'a' => {['foo', :foo] => ['bar', 'baz']}},\n  ].each do |x|\n    test \"refuse non-string keys in serialized db columns: #{x.inspect}\" do\n      set_user_from_auth :active\n      assert_raises(ArgumentError) do\n        Link.create!(link_class: 'test',\n                     properties: x)\n      end\n    end\n  end\n\n  test \"No HashWithIndifferentAccess in database\" do\n    set_user_from_auth :admin_trustedclient\n    link = Link.create!(link_class: 'test',\n                        properties: {'foo' => 'bar'}.with_indifferent_access)\n    raw = ActiveRecord::Base.connection.\n      select_value(\"select properties from links where uuid='#{link.uuid}'\")\n    assert_equal '{\"foo\": \"bar\"}', raw\n  end\n\n  test \"store long string\" do\n    set_user_from_auth :active\n    longstring = \"a\"\n    while longstring.length < 2**16\n      longstring = longstring + longstring\n    end\n    g = Group.create! name: 'Has a long description', description: longstring, group_class: \"project\"\n    g = Group.find_by_uuid g.uuid\n    assert_equal g.description, longstring\n  end\n\n  [['uuid', {unique: true}],\n   ['owner_uuid', {}]].each do |the_column, requires|\n    test \"unique index on all models with #{the_column}\" do\n      checked = 0\n      ActiveRecord::Base.connection.tables.each do |table|\n        columns = ActiveRecord::Base.connection.columns(table)\n\n        next unless columns.collect(&:name).include? the_column\n\n        indexes = ActiveRecord::Base.connection.indexes(table).reject do |index|\n          requires.map do |key, val|\n            index.send(key) == val\n          end.include? false\n        end\n        assert_includes indexes.collect(&:columns), [the_column], 'no index'\n        checked += 1\n      end\n      # Sanity check: make sure we didn't just systematically miss everything.\n      assert_operator(10, :<, checked,\n                      \"Only #{checked} tables have a #{the_column}?!\")\n    end\n  end\n\n  test \"search index exists on models that go into projects\" do\n    ActiveRecord::Base.descendants.each do |model_class|\n      next if model_class.abstract_class?\n      next if !model_class.respond_to?('searchable_columns')\n\n      search_index_columns = model_class.searchable_columns('ilike')\n      # Disappointing, but text columns aren't indexed yet.\n      search_index_columns -= model_class.columns.select { |c|\n        c.type == :text or c.name == 'description' or c.name == 'file_names'\n      }.collect(&:name)\n      next if search_index_columns.empty?\n\n      indexes = ActiveRecord::Base.connection.indexes(model_class.table_name)\n      search_index_by_columns = indexes.select do |index|\n        # After rails 5.0 upgrade, AR::Base.connection.indexes() started to include\n        # GIN indexes, with its 'columns' attribute being a String like\n        # 'to_tsvector(...)'\n        index.columns.is_a?(Array) ? index.columns.sort == search_index_columns.sort : false\n      end\n      search_index_by_name = indexes.select do |index|\n        index.name == \"#{model_class.table_name}_search_index\"\n      end\n      assert !search_index_by_columns.empty?, \"#{model_class.table_name} (#{model_class.to_s}) has no search index with columns #{search_index_columns}. Instead found search index with columns #{search_index_by_name.first.andand.columns}\"\n    end\n  end\n\n  [Collection, ContainerRequest, Group, Workflow].each do |model|\n    test \"trigram index exists on #{model} model\" do\n      expect = model.full_text_searchable_columns\n      conn = ActiveRecord::Base.connection\n      index_name = \"#{model.table_name}_trgm_text_search_idx\"\n      indexes = conn.exec_query(\"SELECT indexdef FROM pg_indexes WHERE tablename = '#{model.table_name}' AND indexname = '#{index_name}'\")\n      assert_not_equal(indexes.length, 0)\n      indexes.each do |res|\n        searchable = res['indexdef'].scan(/COALESCE\\(+([A-Za-z_]+)/).flatten\n        assert_equal(\n          searchable, expect,\n          \"Invalid or no trigram index for #{model} named #{index_name}\\nexpect: #{expect.inspect}\\nfound: #{searchable}\",\n        )\n      end\n    end\n\n    test \"UUID and hash columns are excluded from #{model} full text index\" do\n      assert_equal(\n        model.full_text_searchable_columns & full_text_excluded_columns, [],\n        \"UUID/hash columns returned by #{model}.full_text_searchable_columns\",\n      )\n    end\n  end\n\n  test \"selectable_attributes includes database attributes\" do\n    assert_includes(Collection.selectable_attributes, \"name\")\n  end\n\n  test \"selectable_attributes includes non-database attributes\" do\n    assert_includes(Collection.selectable_attributes, \"unsigned_manifest_text\")\n  end\n\n  test \"selectable_attributes includes common attributes in extensions\" do\n    assert_includes(Collection.selectable_attributes, \"uuid\")\n  end\n\n  test \"selectable_attributes does not include unexposed attributes\" do\n    refute_includes(Collection.selectable_attributes, \"id\")\n  end\n\n  test \"selectable_attributes on a non-default template\" do\n    attr_a = Collection.selectable_attributes(:common)\n    assert_includes(attr_a, \"uuid\")\n    refute_includes(attr_a, \"name\")\n  end\n\n  test 'create and retrieve using created_at time' do\n    set_user_from_auth :active\n    group = Group.create! name: 'test create and retrieve group', group_class: \"project\"\n    assert group.valid?, \"group is not valid\"\n\n    results = Group.where(created_at: group.created_at)\n    assert_includes results.map(&:uuid), group.uuid,\n      \"Expected new group uuid in results when searched with its created_at timestamp\"\n  end\n\n  test 'create and update twice and expect different update times' do\n    set_user_from_auth :active\n    group = Group.create! name: 'test create and retrieve group', group_class: \"project\"\n    assert group.valid?, \"group is not valid\"\n\n    # update 1\n    group.update!(name: \"test create and update name 1\")\n    results = Group.where(uuid: group.uuid)\n    assert_equal \"test create and update name 1\", results.first.name, \"Expected name to be updated to 1\"\n    modified_at_1 = results.first.modified_at.to_f\n\n    # update 2\n    group.update!(name: \"test create and update name 2\")\n    results = Group.where(uuid: group.uuid)\n    assert_equal \"test create and update name 2\", results.first.name, \"Expected name to be updated to 2\"\n    modified_at_2 = results.first.modified_at.to_f\n\n    assert_equal true, (modified_at_2 > modified_at_1), \"Expected modified time 2 to be newer than 1\"\n  end\n\n  test 'jsonb column' do\n    set_user_from_auth :active\n\n    c = Collection.create!(properties: {})\n    assert_equal({}, c.properties)\n\n    c.update(properties: {'foo' => 'foo'})\n    c.reload\n    assert_equal({'foo' => 'foo'}, c.properties)\n\n    c.update(properties: nil)\n    c.reload\n    assert_equal({}, c.properties)\n\n    c.update(properties: {foo: 'bar'})\n    assert_equal({'foo' => 'bar'}, c.properties)\n    c.reload\n    assert_equal({'foo' => 'bar'}, c.properties)\n  end\n\n  {\n    Collection => [\"description\", \"manifest_text\"],\n    Container => [\n      \"command\",\n      \"environment\",\n      \"output_properties\",\n      \"runtime_constraints\",\n      \"secret_mounts\",\n    ],\n    ContainerRequest => [\n      \"command\",\n      \"environment\",\n      \"mounts\",\n      \"output_glob\",\n      \"output_properties\",\n      \"properties\",\n      \"runtime_constraints\",\n      \"secret_mounts\",\n    ],\n    Group => [\"description\", \"properties\"],\n    Log => [\"properties\", \"summary\"],\n  }.each_pair do |model, expect|\n    test \"#{model.name} limits expected columns on index\" do\n      assert_equal(\n        (model.limit_index_columns_read & expect).sort,\n        expect.sort,\n      )\n    end\n  end\n\n  {\n    Collection => [\"delete_at\", \"preserve_version\", \"trash_at\", \"version\"],\n    Container => [\"cost\", \"progress\", \"state\", \"subrequests_cost\"],\n    ContainerRequest => [\"container_uuid\", \"cwd\", \"requesting_container_uuid\"],\n    Group => [\"group_class\", \"is_trashed\", \"trashed_at\"],\n    Log => [\"event_at\", \"event_type\"],\n  }.each_pair do |model, colnames|\n    test \"#{model.name} does not limit expected columns on index\" do\n      assert_equal(model.limit_index_columns_read & colnames, [])\n    end\n  end\n\n  test 'serialized attributes dirty tracking with audit log settings' do\n    Rails.configuration.AuditLogs.MaxDeleteBatch = 1000\n    set_user_from_auth :admin\n    [false, true].each do |auditlogs_enabled|\n      if auditlogs_enabled\n        Rails.configuration.AuditLogs.MaxAge = 3600\n      else\n        Rails.configuration.AuditLogs.MaxAge = 0\n      end\n      tested_serialized = false\n      [\n        User.find_by_uuid(users(:active).uuid),\n        ContainerRequest.find_by_uuid(container_requests(:queued).uuid),\n        Container.find_by_uuid(containers(:queued).uuid),\n        Group.find_by_uuid(groups(:afiltergroup).uuid),\n        Collection.find_by_uuid(collections(:collection_with_one_property).uuid),\n      ].each do |obj|\n        if !obj.class.serialized_attributes.empty?\n          tested_serialized = true\n        end\n        # obj shouldn't have changed since it's just retrieved from the database\n        assert_not(obj.changed?, \"#{obj.class} model's attribute(s) appear as changed: '#{obj.changes.keys.join(',')}' with audit logs #{auditlogs_enabled ? '': 'not '}enabled.\")\n      end\n      assert(tested_serialized, \"did not test any models with serialized attributes\")\n    end\n  end\n\n  [\n    # prefs column uses `serialize [...], Hash`\n    ['users(:active)', 'prefs', '\"baddata\"'],\n    ['users(:active)', 'prefs', '[]'],\n    # output_properties column uses `attribute ..., :jsonbHash`\n    ['container_requests(:running)', 'output_properties', '\"baddata\"'],\n    ['container_requests(:running)', 'output_properties', '[\"baddata\"]'],\n    # output_storage_classes column uses `attribute ..., :jsonbArray`\n    ['container_requests(:running)', 'output_storage_classes', '\"baddata\"'],\n    ['container_requests(:running)', 'output_storage_classes', '{}'],\n    # environment column uses `serialize [...], Hash`\n    ['container_requests(:running)', 'environment', '\"baddata\"'],\n    ['container_requests(:running)', 'environment', '[]'],\n    # output_glob column uses `serialize [...], Array`\n    ['container_requests(:running)', 'output_glob', '\"baddata\"'],\n    ['container_requests(:running)', 'output_glob', '{}'],\n  ].each do |get_fixture, attr, bad_data|\n    test \"refuse to load #{get_fixture} from database with wrong type of serialized attribute #{attr}, #{bad_data}\" do\n      object = eval(get_fixture)\n      initial_value = object.attributes[attr].dup\n      ActiveRecord::Base.connection.exec_query(\n        \"UPDATE #{object.class.table_name} SET #{attr}=$2 WHERE uuid=$1\",\n        \"\",\n        [object.uuid, bad_data])\n      begin\n        e = assert_raises(RuntimeError) do\n          object.reload\n        end\n        assert_match /invalid serialized data for #{object.class.to_s} #{attr}/, e.message\n      ensure\n        ActiveRecord::Base.connection.exec_query(\n          \"UPDATE #{object.class.table_name} SET #{attr}=$2 WHERE uuid=$1\",\n          \"\",\n          [object.uuid, initial_value.to_json])\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/authorized_key_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass AuthorizedKeyTest < ActiveSupport::TestCase\n  TEST_KEY = \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCf5aTI55uyWr44TckP/ELUAyPsdnf5fTZDcSDN4qiMZYAL7TYV2ixwnbPObLObM0GmHSSFLV1KqsuFICUPgkyKoHbAH6XPgmtfOLU60VkGf1v5uxQ/kXCECRCJmPb3K9dIXGEw+1DXPdOV/xG7rJNvo4a9WK9iqqZr8p+VGKM6C017b8BDLk0tuEEjZ5jXcT/ka/hTScxWkKgF6auPOVQ79OA5+0VaYm4uQLzVUdgwVUPWQQecRrtnc08XYM1htpcLDIAbWfUNK7uE6XR3/OhtrJGf05FGbtGguPgi33F9W3Q3yw6saOK5Y3TfLbskgFaEdLgzqK/QSBRk2zBF49Tj test@localhost\"\n\n  test 'create and update key' do\n    u1 = users(:active)\n    act_as_user u1 do\n      ak = AuthorizedKey.new(name: \"foo\", public_key: TEST_KEY, authorized_user_uuid: u1.uuid)\n      assert ak.save, ak.errors.full_messages.to_s\n      ak.name = \"bar\"\n      assert ak.valid?, ak.errors.full_messages.to_s\n      assert ak.save, ak.errors.full_messages.to_s\n    end\n  end\n\n  test 'duplicate key not permitted' do\n    u1 = users(:active)\n    act_as_user u1 do\n      ak = AuthorizedKey.new(name: \"foo\", public_key: TEST_KEY, authorized_user_uuid: u1.uuid)\n      assert ak.save\n    end\n    u2 = users(:spectator)\n    act_as_user u2 do\n      ak2 = AuthorizedKey.new(name: \"bar\", public_key: TEST_KEY, authorized_user_uuid: u2.uuid)\n      refute ak2.valid?\n      refute ak2.save\n      assert_match(/already exists/, ak2.errors.full_messages.to_s)\n    end\n  end\n\n  test 'attach key to wrong user account' do\n    act_as_user users(:active) do\n      ak = AuthorizedKey.new(name: \"foo\", public_key: TEST_KEY)\n      ak.authorized_user_uuid = users(:spectator).uuid\n      refute ak.save\n      ak.uuid = nil\n      ak.authorized_user_uuid = users(:admin).uuid\n      refute ak.save\n      ak.uuid = nil\n      ak.authorized_user_uuid = users(:active).uuid\n      assert ak.save, ak.errors.full_messages.to_s\n      ak.authorized_user_uuid = users(:admin).uuid\n      refute ak.save\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/blob_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass BlobTest < ActiveSupport::TestCase\n  @@api_token = rand(2**512).to_s(36)[0..49]\n  @@key = rand(2**2048).to_s(36)\n  @@blob_data = 'foo'\n  @@blob_locator = Digest::MD5.hexdigest(@@blob_data) +\n    '+' + @@blob_data.size.to_s\n\n  @@known_locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3'\n  @@known_token = 'hocfupkn2pjhrpgp2vxv8rsku7tvtx49arbc9s4bvu7p7wxqvk'\n  @@known_key = '13u9fkuccnboeewr0ne3mvapk28epf68a3bhj9q8sb4l6e4e5mkk' +\n    'p6nhj2mmpscgu1zze5h5enydxfe3j215024u16ij4hjaiqs5u4pzsl3nczmaoxnc' +\n    'ljkm4875xqn4xv058koz3vkptmzhyheiy6wzevzjmdvxhvcqsvr5abhl15c2d4o4' +\n    'jhl0s91lojy1mtrzqqvprqcverls0xvy9vai9t1l1lvvazpuadafm71jl4mrwq2y' +\n    'gokee3eamvjy8qq1fvy238838enjmy5wzy2md7yvsitp5vztft6j4q866efym7e6' +\n    'vu5wm9fpnwjyxfldw3vbo01mgjs75rgo7qioh8z8ij7jpyp8508okhgbbex3ceei' +\n    '786u5rw2a9gx743dj3fgq2irk'\n  @@known_signed_locator = 'acbd18db4cc2f85cedef654fccc4a4d8+3' +\n    '+A89118b78732c33104a4d6231e8b5a5fa1e4301e3@7fffffff'\n\n  test 'generate predictable invincible signature' do\n    signed = Blob.sign_locator @@known_locator, {\n      api_token: @@known_token,\n      key: @@known_key,\n      expire: 0x7fffffff,\n    }\n    assert_equal @@known_signed_locator, signed\n  end\n\n  test 'verify predictable invincible signature' do\n    assert_equal true, Blob.verify_signature!(@@known_signed_locator,\n                                              api_token: @@known_token,\n                                              key: @@known_key)\n  end\n\n  test 'correct' do\n    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key\n    assert_equal true, Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)\n  end\n\n  test 'expired' do\n    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key, ttl: -1\n    assert_raise Blob::InvalidSignatureError do\n      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)\n    end\n  end\n\n  test 'expired, but no raise' do\n    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key, ttl: -1\n    assert_equal false, Blob.verify_signature(signed,\n                                              api_token: @@api_token,\n                                              key: @@key)\n  end\n\n  test 'bogus, wrong block hash' do\n    signed = Blob.sign_locator @@blob_locator, api_token: @@api_token, key: @@key\n    assert_raise Blob::InvalidSignatureError do\n      Blob.verify_signature!(signed.sub('acbd','abcd'), api_token: @@api_token, key: @@key)\n    end\n  end\n\n  test 'bogus, expired' do\n    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa@531641bf'\n    assert_raises Blob::InvalidSignatureError do\n      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)\n    end\n  end\n\n  test 'bogus, wrong key' do\n    signed = Blob.sign_locator(@@blob_locator,\n                               api_token: @@api_token,\n                               key: (@@key+'x'))\n    assert_raise Blob::InvalidSignatureError do\n      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)\n    end\n  end\n\n  test 'bogus, wrong api token' do\n    signed = Blob.sign_locator(@@blob_locator,\n                               api_token: @@api_token.reverse,\n                               key: @@key)\n    assert_raise Blob::InvalidSignatureError do\n      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)\n    end\n  end\n\n  test 'bogus, signature format 1' do\n    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa@'\n    assert_raise Blob::InvalidSignatureError do\n      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)\n    end\n  end\n\n  test 'bogus, signature format 2' do\n    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+A@531641bf'\n    assert_raise Blob::InvalidSignatureError do\n      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)\n    end\n  end\n\n  test 'bogus, signature format 3' do\n    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Axyzzy@531641bf'\n    assert_raise Blob::InvalidSignatureError do\n      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)\n    end\n  end\n\n  test 'bogus, timestamp format' do\n    signed = 'acbd18db4cc2f85cedef654fccc4a4d8+3+Aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa@xyzzy'\n    assert_raise Blob::InvalidSignatureError do\n      Blob.verify_signature!(signed, api_token: @@api_token, key: @@key)\n    end\n  end\n\n  test 'no signature at all' do\n    assert_raise Blob::InvalidSignatureError do\n      Blob.verify_signature!(@@blob_locator, api_token: @@api_token, key: @@key)\n    end\n  end\n\n  test 'signature changes when ttl changes' do\n    signed = Blob.sign_locator @@known_locator, {\n      api_token: @@known_token,\n      key: @@known_key,\n      expire: 0x7fffffff,\n    }\n\n    original_ttl = Rails.configuration.Collections.BlobSigningTTL\n    Rails.configuration.Collections.BlobSigningTTL = original_ttl*2\n    signed2 = Blob.sign_locator @@known_locator, {\n      api_token: @@known_token,\n      key: @@known_key,\n      expire: 0x7fffffff,\n    }\n    Rails.configuration.Collections.BlobSigningTTL = original_ttl\n\n    assert_not_equal signed, signed2\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/collection_performance_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'helpers/manifest_examples'\nrequire 'helpers/time_block'\n\nclass CollectionModelPerformanceTest < ActiveSupport::TestCase\n  include ManifestExamples\n\n  setup do\n    # The Collection model needs to have a current token, not just a\n    # current user, to sign & verify manifests:\n    Thread.current[:token] = api_client_authorizations(:active).token\n  end\n\n  teardown do\n    Thread.current[:token] = nil\n  end\n\n  # \"crrud\" == \"create read render update delete\", not a typo\n  slow_test \"crrud cycle for a collection with a big manifest)\" do\n    bigmanifest = time_block 'make example' do\n      make_manifest(streams: 100,\n                    files_per_stream: 100,\n                    blocks_per_file: 20,\n                    bytes_per_block: 2**26,\n                    api_token: api_client_authorizations(:active).token)\n    end\n    act_as_user users(:active) do\n      c = time_block \"new (manifest_text is #{bigmanifest.length>>20}MiB)\" do\n        Collection.new manifest_text: bigmanifest.dup\n      end\n      time_block 'check signatures' do\n        c.check_signatures\n      end\n      time_block 'check signatures + save' do\n        c.instance_eval do @signatures_checked = false end\n        c.save!\n      end\n      c = time_block 'read' do\n        Collection.find_by_uuid(c.uuid)\n      end\n      time_block 'render' do\n        c.as_api_response(nil)\n      end\n      loc = Blob.sign_locator(Digest::MD5.hexdigest('foo') + '+3',\n                              api_token: api_client_authorizations(:active).token)\n      # Note Collection's strip_manifest_text method has now removed\n      # the signatures from c.manifest_text, so we have to start from\n      # bigmanifest again here instead of just appending with \"+=\".\n      c.manifest_text = bigmanifest.dup + \". #{loc} 0:3:foo.txt\\n\"\n      time_block 'update' do\n        c.save!\n      end\n      time_block 'delete' do\n        c.destroy\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/collection_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'fix_collection_versions_timestamps'\n\nclass CollectionTest < ActiveSupport::TestCase\n  include DbCurrentTime\n\n  def create_collection name, enc=nil\n    txt = \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:#{name}.txt\\n\"\n    txt.force_encoding(enc) if enc\n    return Collection.create(manifest_text: txt, name: name)\n  end\n\n  test 'accept ASCII manifest_text' do\n    act_as_system_user do\n      c = create_collection 'foo', Encoding::US_ASCII\n      assert c.valid?\n    end\n  end\n\n  test 'accept UTF-8 manifest_text' do\n    act_as_system_user do\n      c = create_collection \"f\\xc3\\x98\\xc3\\x98\", Encoding::UTF_8\n      assert c.valid?\n    end\n  end\n\n  test 'refuse manifest_text with invalid UTF-8 byte sequence' do\n    act_as_system_user do\n      c = create_collection \"f\\xc8o\", Encoding::UTF_8\n      assert !c.valid?\n      assert_equal [:manifest_text], c.errors.messages.keys\n      assert_match(/UTF-8/, c.errors.messages[:manifest_text].first)\n    end\n  end\n\n  test 'refuse manifest_text with non-UTF-8 encoding' do\n    act_as_system_user do\n      c = create_collection \"f\\xc8o\", Encoding::ASCII_8BIT\n      assert !c.valid?\n      assert_equal [:manifest_text], c.errors.messages.keys\n      assert_match(/UTF-8/, c.errors.messages[:manifest_text].first)\n    end\n  end\n\n  [\n    \". 0:0:foo.txt\",\n    \". d41d8cd98f00b204e9800998ecf8427e foo.txt\",\n    \"d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\",\n    \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\",\n  ].each do |manifest_text|\n    test \"create collection with invalid manifest text #{manifest_text} and expect error\" do\n      act_as_system_user do\n        c = Collection.create(manifest_text: manifest_text)\n        assert !c.valid?\n      end\n    end\n  end\n\n  [\n    [\". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\\n\", 1, 34],\n    [\". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt 0:30:foo.txt 0:30:foo1.txt 0:30:foo2.txt 0:30:foo3.txt 0:30:foo4.txt\\n\", 5, 184],\n    [\". d41d8cd98f00b204e9800998ecf8427e 0:0:.\\n\", 0, 0]\n  ].each do |manifest, count, size|\n    test \"file stats on create collection with #{manifest}\" do\n      act_as_system_user do\n        c = Collection.create(manifest_text: manifest)\n        assert_equal count, c.file_count\n        assert_equal size, c.file_size_total\n      end\n    end\n  end\n\n  test \"file stats cannot be changed unless through manifest change\" do\n    act_as_system_user do\n      # Direct changes to file stats should be ignored\n      c = Collection.create(manifest_text: \". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\\n\")\n      c.file_count = 6\n      c.file_size_total = 30\n      assert c.valid?\n      assert_equal 1, c.file_count\n      assert_equal 34, c.file_size_total\n\n      # File stats specified on create should be ignored and overwritten\n      c = Collection.create(manifest_text: \". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\\n\", file_count: 10, file_size_total: 10)\n      assert c.valid?\n      assert_equal 1, c.file_count\n      assert_equal 34, c.file_size_total\n\n      # Updating the manifest should change file stats\n      c.update(manifest_text: \". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt 0:34:foo2.txt\\n\")\n      assert c.valid?\n      assert_equal 2, c.file_count\n      assert_equal 68, c.file_size_total\n\n      # Updating file stats and the manifest should use manifest values\n      c.update(manifest_text: \". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\\n\", file_count:10, file_size_total: 10)\n      assert c.valid?\n      assert_equal 1, c.file_count\n      assert_equal 34, c.file_size_total\n\n      # Updating just the file stats should be ignored\n      c.update(file_count: 10, file_size_total: 10)\n      assert c.valid?\n      assert_equal 1, c.file_count\n      assert_equal 34, c.file_size_total\n    end\n  end\n\n  [\n    nil,\n    \"\",\n    \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\",\n  ].each do |manifest_text|\n    test \"create collection with valid manifest text #{manifest_text.inspect} and expect success\" do\n      act_as_system_user do\n        c = Collection.create(manifest_text: manifest_text)\n        assert c.valid?\n      end\n    end\n  end\n\n  [\n    \". 0:0:foo.txt\",\n    \". d41d8cd98f00b204e9800998ecf8427e foo.txt\",\n    \"d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\",\n    \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\",\n  ].each do |manifest_text|\n    test \"update collection with invalid manifest text #{manifest_text} and expect error\" do\n      act_as_system_user do\n        c = create_collection 'foo', Encoding::US_ASCII\n        assert c.valid?\n\n        c.update_attribute 'manifest_text', manifest_text\n        assert !c.valid?\n      end\n    end\n  end\n\n  [\n    nil,\n    \"\",\n    \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\",\n  ].each do |manifest_text|\n    test \"update collection with valid manifest text #{manifest_text.inspect} and expect success\" do\n      act_as_system_user do\n        c = create_collection 'foo', Encoding::US_ASCII\n        assert c.valid?\n\n        c.update_attribute 'manifest_text', manifest_text\n        assert c.valid?\n      end\n    end\n  end\n\n  test \"auto-create version after idle setting\" do\n    Rails.configuration.Collections.CollectionVersioning = true\n    Rails.configuration.Collections.PreserveVersionIfIdle = 600 # 10 minutes\n    act_as_user users(:active) do\n      # Set up initial collection\n      c = create_collection 'foo', Encoding::US_ASCII\n      assert c.valid?\n      assert_equal 1, c.version\n      assert_equal false, c.preserve_version\n      # Make a versionable update, it shouldn't create a new version yet\n      c.update!({'name' => 'bar'})\n      c.reload\n      assert_equal 'bar', c.name\n      assert_equal 1, c.version\n      # Update modified_at to trigger a version auto-creation\n      fifteen_min_ago = Time.now - 15.minutes\n      c.update_column('modified_at', fifteen_min_ago) # Update without validations/callbacks\n      c.reload\n      assert_equal fifteen_min_ago.to_i, c.modified_at.to_i\n      c.update!({'name' => 'baz'})\n      c.reload\n      assert_equal 'baz', c.name\n      assert_equal 2, c.version\n      # Make another update, no new version should be created\n      c.update!({'name' => 'foobar'})\n      c.reload\n      assert_equal 'foobar', c.name\n      assert_equal 2, c.version\n      # Simulate a keep-balance run and trigger a new versionable update\n      # This tests bug #18005\n      assert_nil c.replication_confirmed\n      assert_nil c.replication_confirmed_at\n      # Updates without validations/callbacks\n      c.update_column('modified_at', fifteen_min_ago)\n      c.update_column('replication_confirmed_at', Time.now)\n      c.update_column('replication_confirmed', 2)\n      c.reload\n      assert_equal fifteen_min_ago.to_i, c.modified_at.to_i\n      assert_not_nil c.replication_confirmed_at\n      assert_not_nil c.replication_confirmed\n      # Make the versionable update\n      c.update!({'name' => 'foobarbaz'})\n      c.reload\n      assert_equal 'foobarbaz', c.name\n      assert_equal 3, c.version\n    end\n  end\n\n  test \"preserve_version updates\" do\n    Rails.configuration.Collections.CollectionVersioning = true\n    Rails.configuration.Collections.PreserveVersionIfIdle = -1 # disabled\n    act_as_user users(:active) do\n      # Set up initial collection\n      c = create_collection 'foo', Encoding::US_ASCII\n      assert c.valid?\n      assert_equal 1, c.version\n      assert_equal false, c.preserve_version\n      # This update shouldn't produce a new version, as the idle time is not up\n      c.update!({\n        'name' => 'bar'\n      })\n      c.reload\n      assert_equal 1, c.version\n      assert_equal 'bar', c.name\n      assert_equal false, c.preserve_version\n      # This update should produce a new version, even if the idle time is not up\n      # and also keep the preserve_version=true flag to persist it.\n      c.update!({\n        'name' => 'baz',\n        'preserve_version' => true\n      })\n      c.reload\n      assert_equal 2, c.version\n      assert_equal 'baz', c.name\n      assert_equal true, c.preserve_version\n      # Make sure preserve_version is not disabled after being enabled, unless\n      # a new version is created.\n      # This is a non-versionable update\n      c.update!({\n        'preserve_version' => false,\n        'replication_desired' => 2\n      })\n      c.reload\n      assert_equal 2, c.version\n      assert_equal 2, c.replication_desired\n      assert_equal true, c.preserve_version\n      # This is a versionable update\n      c.update!({\n        'preserve_version' => false,\n        'name' => 'foobar'\n      })\n      c.reload\n      assert_equal 3, c.version\n      assert_equal false, c.preserve_version\n      assert_equal 'foobar', c.name\n      # Flipping only 'preserve_version' to true doesn't create a new version\n      c.update!({'preserve_version' => true})\n      c.reload\n      assert_equal 3, c.version\n      assert_equal true, c.preserve_version\n    end\n  end\n\n  test \"preserve_version updates don't change modified_at timestamp\" do\n    act_as_user users(:active) do\n      c = create_collection 'foo', Encoding::US_ASCII\n      assert c.valid?\n      assert_equal false, c.preserve_version\n      modified_at = c.modified_at.to_f\n      c.update!({'preserve_version' => true})\n      c.reload\n      assert_equal true, c.preserve_version\n      assert_equal modified_at, c.modified_at.to_f,\n        'preserve_version updates should not trigger modified_at changes'\n    end\n  end\n\n  [\n    ['version', 10],\n    ['current_version_uuid', 'zzzzz-4zz18-bv31uwvy3neko21'],\n  ].each do |name, new_value|\n    test \"'#{name}' updates on current version collections are not allowed\" do\n      act_as_user users(:active) do\n        # Set up initial collection\n        c = create_collection 'foo', Encoding::US_ASCII\n        assert c.valid?\n        assert_equal 1, c.version\n\n        assert_raises(ActiveRecord::RecordInvalid) do\n          c.update!({\n            name => new_value\n          })\n        end\n      end\n    end\n  end\n\n  test \"uuid updates on current version make older versions update their pointers\" do\n    Rails.configuration.Collections.CollectionVersioning = true\n    Rails.configuration.Collections.PreserveVersionIfIdle = 0\n    act_as_system_user do\n      # Set up initial collection\n      c = create_collection 'foo', Encoding::US_ASCII\n      assert c.valid?\n      assert_equal 1, c.version\n      # Make changes so that a new version is created\n      c.update!({'name' => 'bar'})\n      c.reload\n      assert_equal 2, c.version\n      assert_equal 2, Collection.where(current_version_uuid: c.uuid).count\n      new_uuid = 'zzzzz-4zz18-somefakeuuidnow'\n      assert_empty Collection.where(uuid: new_uuid)\n      # Update UUID on current version, check that both collections point to it\n      c.update!({'uuid' => new_uuid})\n      c.reload\n      assert_equal new_uuid, c.uuid\n      assert_equal 2, Collection.where(current_version_uuid: new_uuid).count\n    end\n  end\n\n  # This test exposes a bug related to JSONB attributes, see #15725.\n  test \"recently loaded collection shouldn't list changed attributes\" do\n    col = Collection.where(\"properties != '{}'::jsonb\").limit(1).first\n    refute col.properties_changed?, 'Properties field should not be seen as changed'\n  end\n\n  [\n    [\n      true,\n      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},\n      {:foo=>:bar, :lst=>[1, 3, 5, 7], :hsh=>{'baz'=>'qux', :foobar=>true, 'hsh'=>{:nested=>true}}, :delete_at=>nil},\n    ],\n    [\n      true,\n      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},\n      {'delete_at'=>nil, 'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}},\n    ],\n    [\n      true,\n      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},\n      {'delete_at'=>nil, 'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'foobar'=>true, 'hsh'=>{'nested'=>true}, 'baz'=>'qux'}},\n    ],\n    [\n      false,\n      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},\n      {'foo'=>'bar', 'lst'=>[1, 3, 5, 42], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},\n    ],\n    [\n      false,\n      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},\n      {'foo'=>'bar', 'lst'=>[1, 3, 7, 5], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},\n    ],\n    [\n      false,\n      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},\n      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>false}}, 'delete_at'=>nil},\n    ],\n    [\n      false,\n      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>nil},\n      {'foo'=>'bar', 'lst'=>[1, 3, 5, 7], 'hsh'=>{'baz'=>'qux', 'foobar'=>true, 'hsh'=>{'nested'=>true}}, 'delete_at'=>1234567890},\n    ],\n  ].each do |should_be_equal, value_1, value_2|\n    test \"JSONB properties #{value_1} is#{should_be_equal ? '' : ' not'} equal to #{value_2}\" do\n      act_as_user users(:active) do\n        # Set up initial collection\n        c = create_collection 'foo', Encoding::US_ASCII\n        assert c.valid?\n        c.update!({'properties' => value_1})\n        c.reload\n        assert c.changes.keys.empty?\n        c.properties = value_2\n        if should_be_equal\n          assert c.changes.keys.empty?, \"Properties #{value_1.inspect} should be equal to #{value_2.inspect}\"\n        else\n          refute c.changes.keys.empty?, \"Properties #{value_1.inspect} should not be equal to #{value_2.inspect}\"\n        end\n      end\n    end\n  end\n\n  test \"older versions' modified_at indicate when they're created\" do\n    Rails.configuration.Collections.CollectionVersioning = true\n    Rails.configuration.Collections.PreserveVersionIfIdle = 0\n    act_as_user users(:active) do\n      # Set up initial collection\n      c = create_collection 'foo', Encoding::US_ASCII\n      assert c.valid?\n      original_version_modified_at = c.modified_at.to_f\n      # Make changes so that a new version is created\n      c.update!({'name' => 'bar'})\n      c.reload\n      assert_equal 2, c.version\n      # Get the old version\n      c_old = Collection.where(current_version_uuid: c.uuid, version: 1).first\n      assert_not_nil c_old\n\n      version_creation_datetime = c_old.modified_at.to_f\n      assert_equal c.created_at.to_f, c_old.created_at.to_f\n      assert_equal original_version_modified_at, version_creation_datetime\n\n      # Make update on current version so old version get the attribute synced;\n      # its modified_at should not change.\n      new_replication = 3\n      c.update!({'replication_desired' => new_replication})\n      c.reload\n      assert_equal new_replication, c.replication_desired\n      c_old.reload\n      assert_equal new_replication, c_old.replication_desired\n      assert_equal version_creation_datetime, c_old.modified_at.to_f\n      assert_operator c.modified_at.to_f, :>, c_old.modified_at.to_f\n    end\n  end\n\n  # Bug #17152 - This test relies on fixtures simulating the problem.\n  test \"migration fixing collection versions' modified_at timestamps\" do\n    versioned_collection_fixtures = [\n      collections(:w_a_z_file).uuid,\n      collections(:collection_owned_by_active).uuid\n    ]\n    versioned_collection_fixtures.each do |uuid|\n      cols = Collection.where(current_version_uuid: uuid).order(version: :desc)\n      assert_equal cols.size, 2\n      # cols[0] -> head version // cols[1] -> old version\n      assert_operator (cols[0].modified_at.to_f - cols[1].modified_at.to_f), :==, 0\n      assert cols[1].modified_at != cols[1].created_at\n    end\n    fix_collection_versions_timestamps\n    versioned_collection_fixtures.each do |uuid|\n      cols = Collection.where(current_version_uuid: uuid).order(version: :desc)\n      assert_equal cols.size, 2\n      # cols[0] -> head version // cols[1] -> old version\n      assert_operator (cols[0].modified_at.to_f - cols[1].modified_at.to_f), :>, 1\n      assert_operator cols[1].modified_at, :==, cols[1].created_at\n    end\n  end\n\n  test \"past versions should not be directly updatable\" do\n    Rails.configuration.Collections.CollectionVersioning = true\n    Rails.configuration.Collections.PreserveVersionIfIdle = 0\n    act_as_system_user do\n      # Set up initial collection\n      c = create_collection 'foo', Encoding::US_ASCII\n      assert c.valid?\n      # Make changes so that a new version is created\n      c.update!({'name' => 'bar'})\n      c.reload\n      assert_equal 2, c.version\n      # Get the old version\n      c_old = Collection.where(current_version_uuid: c.uuid, version: 1).first\n      assert_not_nil c_old\n      # With collection versioning still being enabled, try to update\n      c_old.name = 'this was foo'\n      assert c_old.invalid?\n      c_old.reload\n      # Try to fool the validator attempting to make c_old to look like a\n      # current version, it should also fail.\n      c_old.current_version_uuid = c_old.uuid\n      assert c_old.invalid?\n      c_old.reload\n      # Now disable collection versioning, it should behave the same way\n      Rails.configuration.Collections.CollectionVersioning = false\n      c_old.name = 'this was foo'\n      assert c_old.invalid?\n    end\n  end\n\n  [\n    ['owner_uuid', 'zzzzz-tpzed-d9tiejq69daie8f', 'zzzzz-tpzed-xurymjxw79nv3jz'],\n    ['replication_desired', 2, 3],\n    ['storage_classes_desired', ['hot'], ['archive']],\n  ].each do |attr, first_val, second_val|\n    test \"sync #{attr} with older versions\" do\n      Rails.configuration.Collections.CollectionVersioning = true\n      Rails.configuration.Collections.PreserveVersionIfIdle = 0\n      act_as_system_user do\n        # Set up initial collection\n        c = create_collection 'foo', Encoding::US_ASCII\n        assert c.valid?\n        assert_equal 1, c.version\n        assert_not_equal first_val, c.attributes[attr]\n        # Make changes so that a new version is created and a synced field is\n        # updated on both\n        c.update!({'name' => 'bar', attr => first_val})\n        c.reload\n        assert_equal 2, c.version\n        assert_equal first_val, c.attributes[attr]\n        assert_equal 2, Collection.where(current_version_uuid: c.uuid).count\n        assert_equal first_val, Collection.where(current_version_uuid: c.uuid, version: 1).first.attributes[attr]\n        # Only make an update on the same synced field & check that the previously\n        # created version also gets it.\n        c.update!({attr => second_val})\n        c.reload\n        assert_equal 2, c.version\n        assert_equal second_val, c.attributes[attr]\n        assert_equal 2, Collection.where(current_version_uuid: c.uuid).count\n        assert_equal second_val, Collection.where(current_version_uuid: c.uuid, version: 1).first.attributes[attr]\n      end\n    end\n  end\n\n  [\n    [false, 'name', 'bar', false],\n    [false, 'description', 'The quick brown fox jumps over the lazy dog', false],\n    [false, 'properties', {'new_version' => true}, false],\n    [false, 'manifest_text', \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\", false],\n    [true, 'name', 'bar', true],\n    [true, 'description', 'The quick brown fox jumps over the lazy dog', true],\n    [true, 'properties', {'new_version' => true}, true],\n    [true, 'manifest_text', \". d41d8cd98f00b204e9800998ecf8427e 0:0:foo.txt\\n\", true],\n    # Non-versionable attribute updates shouldn't create new versions\n    [true, 'replication_desired', 5, false],\n    [false, 'replication_desired', 5, false],\n  ].each do |versioning, attr, val, new_version_expected|\n    test \"update #{attr} with versioning #{versioning ? '' : 'not '}enabled should #{new_version_expected ? '' : 'not '}create a new version\" do\n      Rails.configuration.Collections.CollectionVersioning = versioning\n      Rails.configuration.Collections.PreserveVersionIfIdle = 0\n      act_as_user users(:active) do\n        # Create initial collection\n        c = create_collection 'foo', Encoding::US_ASCII\n        assert c.valid?\n        assert_equal 'foo', c.name\n\n        # Check current version attributes\n        assert_equal 1, c.version\n        assert_equal c.uuid, c.current_version_uuid\n\n        # Update attribute and check if version number should be incremented\n        old_value = c.attributes[attr]\n        c.update!({attr => val})\n        assert_equal new_version_expected, c.version == 2\n        assert_equal val, c.attributes[attr]\n\n        if versioning && new_version_expected\n          # Search for the snapshot & previous value\n          assert_equal 2, Collection.where(current_version_uuid: c.uuid).count\n          s = Collection.where(current_version_uuid: c.uuid, version: 1).first\n          assert_not_nil s\n          assert_equal old_value, s.attributes[attr]\n        else\n          # If versioning is disabled or no versionable attribute was updated,\n          # only the current version should exist\n          assert_equal 1, Collection.where(current_version_uuid: c.uuid).count\n          assert_equal c, Collection.where(current_version_uuid: c.uuid).first\n        end\n      end\n    end\n  end\n\n  test 'current_version_uuid is ignored during update' do\n    Rails.configuration.Collections.CollectionVersioning = true\n    Rails.configuration.Collections.PreserveVersionIfIdle = 0\n    act_as_user users(:active) do\n      # Create 1st collection\n      col1 = create_collection 'foo', Encoding::US_ASCII\n      assert col1.valid?\n      assert_equal 1, col1.version\n\n      # Create 2nd collection, update it so it becomes version:2\n      # (to avoid unique index violation)\n      col2 = create_collection 'bar', Encoding::US_ASCII\n      assert col2.valid?\n      assert_equal 1, col2.version\n      col2.update({name: 'baz'})\n      assert_equal 2, col2.version\n\n      # Try to make col2 a past version of col1. It shouldn't be possible\n      col2.update({current_version_uuid: col1.uuid})\n      assert col2.invalid?\n      col2.reload\n      assert_not_equal col1.uuid, col2.current_version_uuid\n    end\n  end\n\n  test 'with versioning enabled, simultaneous updates increment version correctly' do\n    Rails.configuration.Collections.CollectionVersioning = true\n    Rails.configuration.Collections.PreserveVersionIfIdle = 0\n    act_as_user users(:active) do\n      # Create initial collection\n      col = create_collection 'foo', Encoding::US_ASCII\n      assert col.valid?\n      assert_equal 1, col.version\n\n      # Simulate simultaneous updates\n      c1 = Collection.where(uuid: col.uuid).first\n      assert_equal 1, c1.version\n      c1.name = 'bar'\n      c2 = Collection.where(uuid: col.uuid).first\n      c2.description = 'foo collection'\n      c1.save!\n      assert_equal 1, c2.version\n      # with_lock forces a reload, so this shouldn't produce an unique violation error\n      c2.save!\n      assert_equal 3, c2.version\n      assert_equal 'foo collection', c2.description\n    end\n  end\n\n  test 'create and update collection and verify file_names' do\n    act_as_system_user do\n      c = create_collection 'foo', Encoding::US_ASCII\n      assert c.valid?\n      created_file_names = c.file_names\n      assert created_file_names\n      assert_match(/foo.txt/, c.file_names)\n\n      c.update_attribute 'manifest_text', \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo2.txt\\n\"\n      assert_not_equal created_file_names, c.file_names\n      assert_match(/foo2.txt/, c.file_names)\n    end\n  end\n\n  [\n    [2**8, false],\n    [2**18, true],\n  ].each do |manifest_size, allow_truncate|\n    test \"create collection with manifest size #{manifest_size} with allow_truncate=#{allow_truncate},\n          and not expect exceptions even on very large manifest texts\" do\n      # file_names has a max size, hence there will be no errors even on large manifests\n      act_as_system_user do\n        manifest_text = ''\n        index = 0\n        while manifest_text.length < manifest_size\n          manifest_text += \"./blurfl d41d8cd98f00b204e9800998ecf8427e+0 0:0:veryverylongfilename000000000000#{index}.txt\\n\"\n          index += 1\n        end\n        manifest_text += \"./laststreamname d41d8cd98f00b204e9800998ecf8427e+0 0:0:veryverylastfilename.txt\\n\"\n        c = Collection.create(manifest_text: manifest_text)\n\n        assert c.valid?\n        assert c.file_names\n        assert_match(/veryverylongfilename0000000000001.txt/, c.file_names)\n        assert_match(/veryverylongfilename0000000000002.txt/, c.file_names)\n        if not allow_truncate\n          assert_match(/veryverylastfilename/, c.file_names)\n          assert_match(/laststreamname/, c.file_names)\n        end\n      end\n    end\n  end\n\n  test \"full text search for collections\" do\n    # file_names column does not get populated when fixtures are loaded, hence setup test data\n    act_as_system_user do\n      Collection.create(manifest_text: \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\")\n      Collection.create(manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\")\n      Collection.create(manifest_text: \". 85877ca2d7e05498dd3d109baf2df106+95+A3a4e26a366ee7e4ed3e476ccf05354761be2e4ae@545a9920 0:95:file_in_subdir1\\n./subdir2/subdir3 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file1.txt 32:32:file2.txt\\n./subdir2/subdir3/subdir4 2bbc341c702df4d8f42ec31f16c10120+64+A315d7e7bad2ce937e711fc454fae2d1194d14d64@545a9920 0:32:file3.txt 32:32:file4.txt\\n\")\n    end\n\n    [\n      ['foo', true],\n      ['foo bar', false],                     # no collection matching both\n      ['foo&bar', false],                     # no collection matching both\n      ['foo|bar', true],                      # works only no spaces between the words\n      ['Gnu public', true],                   # both prefixes found, though not consecutively\n      ['Gnu&public', true],                   # both prefixes found, though not consecutively\n      ['file4', true],                        # prefix match\n      ['file4.txt', true],                    # whole string match\n      ['filex', false],                       # no such prefix\n      ['subdir', true],                       # prefix matches\n      ['subdir2', true],\n      ['subdir2/', true],\n      ['subdir2/subdir3', true],\n      ['subdir2/subdir3/subdir4', true],\n      ['subdir2 file4', true],                # look for both prefixes\n      ['subdir4', false],                     # not a prefix match\n    ].each do |search_filter, expect_results|\n      search_filters = search_filter.split.each {|s| s.concat(':*')}.join('&')\n      results = Collection.where(\"#{Collection.full_text_tsvector} @@ to_tsquery(?)\",\n                                 \"#{search_filters}\")\n      if expect_results\n        refute_empty results\n      else\n        assert_empty results\n      end\n    end\n  end\n\n  test 'portable data hash with missing size hints' do\n    [[\". d41d8cd98f00b204e9800998ecf8427e+0+Bar 0:0:x\\n\",\n      \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:x\\n\"],\n     [\". d41d8cd98f00b204e9800998ecf8427e+Foo 0:0:x\\n\",\n      \". d41d8cd98f00b204e9800998ecf8427e 0:0:x\\n\"],\n     [\". d41d8cd98f00b204e9800998ecf8427e 0:0:x\\n\",\n      \". d41d8cd98f00b204e9800998ecf8427e 0:0:x\\n\"],\n    ].each do |unportable, portable|\n      c = Collection.new(manifest_text: unportable)\n      assert c.valid?\n      assert_equal(Digest::MD5.hexdigest(portable)+\"+#{portable.length}\",\n                   c.portable_data_hash)\n    end\n  end\n\n  pdhmanifest = \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:x\\n\"\n  pdhmd5 = Digest::MD5.hexdigest pdhmanifest\n  [[true, nil],\n   [true, pdhmd5],\n   [true, pdhmd5+'+12345'],\n   [true, pdhmd5+'+'+pdhmanifest.length.to_s],\n   [true, pdhmd5+'+12345+Foo'],\n   [true, pdhmd5+'+Foo'],\n   [false, Digest::MD5.hexdigest(pdhmanifest.strip)],\n   [false, Digest::MD5.hexdigest(pdhmanifest.strip)+'+'+pdhmanifest.length.to_s],\n   [false, pdhmd5[0..30]],\n   [false, pdhmd5[0..30]+'z'],\n   [false, pdhmd5[0..24]+'000000000'],\n   [false, pdhmd5[0..24]+'000000000+0']].each do |isvalid, pdh|\n    test \"portable_data_hash #{pdh.inspect} valid? == #{isvalid}\" do\n      c = Collection.new manifest_text: pdhmanifest, portable_data_hash: pdh\n      assert_equal isvalid, c.valid?, c.errors.full_messages.to_s\n    end\n  end\n\n  test \"storage_classes_desired default respects config\" do\n    saved = Rails.configuration.DefaultStorageClasses\n    Rails.configuration.DefaultStorageClasses = [\"foo\"]\n    begin\n      act_as_user users(:active) do\n        c = Collection.create!\n        assert_equal [\"foo\"], c.storage_classes_desired\n      end\n    ensure\n      Rails.configuration.DefaultStorageClasses = saved\n    end\n  end\n\n  test \"storage_classes_desired cannot be empty\" do\n    act_as_user users(:active) do\n      c = collections(:collection_owned_by_active)\n      c.update storage_classes_desired: [\"hot\"]\n      assert_equal [\"hot\"], c.storage_classes_desired\n      assert_equal false, c.update(storage_classes_desired: [])\n      assert_match /Storage classes desired must not be empty/, c.errors.full_messages.to_s\n    end\n  end\n\n  test \"storage classes lists should only contain non-empty strings\" do\n    c = collections(:storage_classes_desired_default_unconfirmed)\n    act_as_user users(:admin) do\n      assert c.update(storage_classes_desired: [\"default\", \"a_string\"],\n                                 storage_classes_confirmed: [\"another_string\"])\n      [\n        [\"storage_classes_desired\", [\"default\", 42]],\n        [\"storage_classes_confirmed\", [{the_answer: 42}]],\n        [\"storage_classes_desired\", [\"default\", \"\"]],\n        [\"storage_classes_confirmed\", [\"\"]],\n        [\"storage_classes_confirmed\", \"\"],\n        [\"storage_classes_confirmed\", \"default\"],\n        [\"storage_classes_confirmed\", {foo: :bar}],\n      ].each do |attr, val|\n        assert_equal false, c.update({attr => val})\n        assert_match /Storage classes .* must be an array of non-empty strings/, c.errors.full_messages.to_s\n        c.reload\n      end\n    end\n  end\n\n  test \"storage_classes_confirmed* can be set by admin user\" do\n    c = collections(:storage_classes_desired_default_unconfirmed)\n    act_as_user users(:admin) do\n      assert c.update(storage_classes_confirmed: [\"default\"],\n                                 storage_classes_confirmed_at: Time.now)\n    end\n  end\n\n  test \"storage_classes_confirmed* cannot be set by non-admin user\" do\n    act_as_user users(:active) do\n      c = collections(:storage_classes_desired_default_unconfirmed)\n      # Cannot set just one at a time.\n      assert_raise ArvadosModel::PermissionDeniedError do\n        c.update storage_classes_confirmed: [\"default\"]\n      end\n      c.reload\n      assert_raise ArvadosModel::PermissionDeniedError do\n        c.update storage_classes_confirmed_at: Time.now\n      end\n      # Cannot set bot at once, either.\n      c.reload\n      assert_raise ArvadosModel::PermissionDeniedError do\n        assert c.update(storage_classes_confirmed: [\"default\"],\n                                   storage_classes_confirmed_at: Time.now)\n      end\n    end\n  end\n\n  test \"storage_classes_confirmed* can be cleared (but only together) by non-admin user\" do\n    act_as_user users(:active) do\n      c = collections(:storage_classes_desired_default_confirmed_default)\n      # Cannot clear just one at a time.\n      assert_raise ArvadosModel::PermissionDeniedError do\n        c.update storage_classes_confirmed: []\n      end\n      c.reload\n      assert_raise ArvadosModel::PermissionDeniedError do\n        c.update storage_classes_confirmed_at: nil\n      end\n      # Can clear both at once.\n      c.reload\n      assert c.update(storage_classes_confirmed: [],\n                                 storage_classes_confirmed_at: nil)\n    end\n  end\n\n  [0, 2, 4, nil].each do |ask|\n    test \"set replication_desired to #{ask.inspect}\" do\n      Rails.configuration.Collections.DefaultReplication = 2\n      act_as_user users(:active) do\n        c = collections(:replication_undesired_unconfirmed)\n        c.update replication_desired: ask\n        assert_equal ask, c.replication_desired\n      end\n    end\n  end\n\n  test \"replication_confirmed* can be set by admin user\" do\n    c = collections(:replication_desired_2_unconfirmed)\n    act_as_user users(:admin) do\n      assert c.update(replication_confirmed: 2,\n                                 replication_confirmed_at: Time.now)\n    end\n  end\n\n  test \"replication_confirmed* cannot be set by non-admin user\" do\n    act_as_user users(:active) do\n      c = collections(:replication_desired_2_unconfirmed)\n      # Cannot set just one at a time.\n      assert_raise ArvadosModel::PermissionDeniedError do\n        c.update replication_confirmed: 1\n      end\n      assert_raise ArvadosModel::PermissionDeniedError do\n        c.update replication_confirmed_at: Time.now\n      end\n      # Cannot set both at once, either.\n      assert_raise ArvadosModel::PermissionDeniedError do\n        c.update(replication_confirmed: 1,\n                            replication_confirmed_at: Time.now)\n      end\n    end\n  end\n\n  test \"replication_confirmed* can be cleared (but only together) by non-admin user\" do\n    act_as_user users(:active) do\n      c = collections(:replication_desired_2_confirmed_2)\n      # Cannot clear just one at a time.\n      assert_raise ArvadosModel::PermissionDeniedError do\n        c.update replication_confirmed: nil\n      end\n      c.reload\n      assert_raise ArvadosModel::PermissionDeniedError do\n        c.update replication_confirmed_at: nil\n      end\n      # Can clear both at once.\n      c.reload\n      assert c.update(replication_confirmed: nil,\n                                 replication_confirmed_at: nil)\n    end\n  end\n\n  test \"clear replication_confirmed* when introducing a new block in manifest\" do\n    c = collections(:replication_desired_2_confirmed_2)\n    act_as_user users(:active) do\n      assert c.update(manifest_text: collections(:user_agreement).signed_manifest_text_only_for_tests)\n      assert_nil c.replication_confirmed\n      assert_nil c.replication_confirmed_at\n    end\n  end\n\n  test \"don't clear replication_confirmed* when just renaming a file\" do\n    c = collections(:replication_desired_2_confirmed_2)\n    act_as_user users(:active) do\n      new_manifest = c.signed_manifest_text_only_for_tests.sub(':bar', ':foo')\n      assert c.update(manifest_text: new_manifest)\n      assert_equal 2, c.replication_confirmed\n      assert_not_nil c.replication_confirmed_at\n    end\n  end\n\n  test \"don't clear replication_confirmed* when just deleting a data block\" do\n    c = collections(:replication_desired_2_confirmed_2)\n    act_as_user users(:active) do\n      new_manifest = c.signed_manifest_text_only_for_tests\n      new_manifest.sub!(/ \\S+:bar/, '')\n      new_manifest.sub!(/ acbd\\S+/, '')\n\n      # Confirm that we did just remove a block from the manifest (if\n      # not, this test would pass without testing the relevant case):\n      assert_operator new_manifest.length+40, :<, c.signed_manifest_text_only_for_tests.length\n\n      assert c.update(manifest_text: new_manifest)\n      assert_equal 2, c.replication_confirmed\n      assert_not_nil c.replication_confirmed_at\n    end\n  end\n\n  test 'signature expiry does not exceed trash_at' do\n    act_as_user users(:active) do\n      t0 = db_current_time\n      c = Collection.create!(manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:x\\n\", name: 'foo')\n      c.update! trash_at: (t0 + 1.hours)\n      c.reload\n      sig_exp = /\\+A[0-9a-f]{40}\\@([0-9]+)/.match(c.signed_manifest_text_only_for_tests)[1].to_i\n      assert_operator sig_exp.to_i, :<=, (t0 + 1.hours).to_i\n    end\n  end\n\n  test 'far-future expiry date cannot be used to circumvent configured permission ttl' do\n    act_as_user users(:active) do\n      c = Collection.create!(manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:x\\n\",\n                             name: 'foo',\n                             trash_at: db_current_time + 1.years)\n      sig_exp = /\\+A[0-9a-f]{40}\\@([0-9]+)/.match(c.signed_manifest_text_only_for_tests)[1].to_i\n      expect_max_sig_exp = db_current_time.to_i + Rails.configuration.Collections.BlobSigningTTL.to_i\n      assert_operator c.trash_at.to_i, :>, expect_max_sig_exp\n      assert_operator sig_exp.to_i, :<=, expect_max_sig_exp\n    end\n  end\n\n  test \"create collection with properties\" do\n    act_as_system_user do\n      c = Collection.create(manifest_text: \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\",\n                            properties: {'property_1' => 'value_1'})\n      assert c.valid?\n      assert_equal 'value_1', c.properties['property_1']\n    end\n  end\n\n  test 'create, delete, recreate collection with same name and owner' do\n    act_as_user users(:active) do\n      # create collection with name\n      c = Collection.create(manifest_text: '',\n                            name: \"test collection name\")\n      assert c.valid?\n      uuid = c.uuid\n\n      c = Collection.readable_by(current_user).where(uuid: uuid)\n      assert_not_empty c, 'Should be able to find live collection'\n\n      # mark collection as expired\n      c.first.update!(trash_at: Time.new.strftime(\"%Y-%m-%d\"))\n      c = Collection.readable_by(current_user).where(uuid: uuid)\n      assert_empty c, 'Should not be able to find expired collection'\n\n      # recreate collection with the same name\n      c = Collection.create(manifest_text: '',\n                            name: \"test collection name\")\n      assert c.valid?\n    end\n  end\n\n  test 'trash_at cannot be set too far in the past' do\n    act_as_user users(:active) do\n      t0 = db_current_time\n      c = Collection.create!(manifest_text: '', name: 'foo')\n      c.update! trash_at: (t0 - 2.weeks)\n      c.reload\n      assert_operator c.trash_at, :>, t0\n    end\n  end\n\n  now = Time.now\n  [['trash-to-delete interval negative',\n    :collection_owned_by_active,\n    {trash_at: now+2.weeks, delete_at: now},\n    {state: :invalid}],\n   ['now-to-delete interval short',\n    :collection_owned_by_active,\n    {trash_at: now+3.days, delete_at: now+7.days},\n    {state: :trash_future}],\n   ['now-to-delete interval short, trash=delete',\n    :collection_owned_by_active,\n    {trash_at: now+3.days, delete_at: now+3.days},\n    {state: :trash_future}],\n   ['trash-to-delete interval ok',\n    :collection_owned_by_active,\n    {trash_at: now, delete_at: now+15.days},\n    {state: :trash_now}],\n   ['trash-to-delete interval short, but far enough in future',\n    :collection_owned_by_active,\n    {trash_at: now+13.days, delete_at: now+15.days},\n    {state: :trash_future}],\n   ['trash by setting is_trashed bool',\n    :collection_owned_by_active,\n    {is_trashed: true},\n    {state: :trash_now}],\n   ['trash in future by setting just trash_at',\n    :collection_owned_by_active,\n    {trash_at: now+1.week},\n    {state: :trash_future}],\n   ['trash in future by setting trash_at and delete_at',\n    :collection_owned_by_active,\n    {trash_at: now+1.week, delete_at: now+4.weeks},\n    {state: :trash_future}],\n   ['untrash by clearing is_trashed bool',\n    :expired_collection,\n    {is_trashed: false},\n    {state: :not_trash}],\n  ].each do |test_name, fixture_name, updates, expect|\n    test test_name do\n      act_as_user users(:active) do\n        min_exp = (db_current_time +\n                   Rails.configuration.Collections.BlobSigningTTL)\n        if fixture_name == :expired_collection\n          # Fixture-finder shorthand doesn't find trashed collections\n          # because they're not in the default scope.\n          c = Collection.find_by_uuid('zzzzz-4zz18-mto52zx1s7sn3ih')\n        else\n          c = collections(fixture_name)\n        end\n        updates_ok = c.update(updates)\n        expect_valid = expect[:state] != :invalid\n        assert_equal expect_valid, updates_ok, c.errors.full_messages.to_s\n        case expect[:state]\n        when :invalid\n          refute c.valid?\n        when :trash_now\n          assert c.is_trashed\n          assert_not_nil c.trash_at\n          assert_operator c.trash_at, :<=, db_current_time\n          assert_not_nil c.delete_at\n          assert_operator c.delete_at, :>=, min_exp\n        when :trash_future\n          refute c.is_trashed\n          assert_not_nil c.trash_at\n          assert_operator c.trash_at, :>, db_current_time\n          assert_not_nil c.delete_at\n          assert_operator c.delete_at, :>=, c.trash_at\n          # Currently this minimum interval is needed to prevent early\n          # garbage collection:\n          assert_operator c.delete_at, :>=, min_exp\n        when :not_trash\n          refute c.is_trashed\n          assert_nil c.trash_at\n          assert_nil c.delete_at\n        else\n          raise \"bad expect[:state]==#{expect[:state].inspect} in test case\"\n        end\n      end\n    end\n  end\n\n  test 'default trash interval > blob signature ttl' do\n    Rails.configuration.Collections.DefaultTrashLifetime = 86400 * 21 # 3 weeks\n    start = db_current_time\n    act_as_user users(:active) do\n      c = Collection.create!(manifest_text: '', name: 'foo')\n      c.update!(trash_at: start + 86400.seconds)\n      assert_operator c.delete_at, :>=, start + (86400*22).seconds\n      assert_operator c.delete_at, :<, start + (86400*22 + 30).seconds\n      c.destroy\n\n      c = Collection.create!(manifest_text: '', name: 'foo')\n      c.update!(is_trashed: true)\n      assert_operator c.delete_at, :>=, start + (86400*21).seconds\n    end\n  end\n\n  test \"find_all_for_docker_image resolves names that look like hashes\" do\n    coll_list = Collection.\n      find_all_for_docker_image('a' * 64, nil, [users(:active)])\n    coll_uuids = coll_list.map(&:uuid)\n    assert_includes(coll_uuids, collections(:docker_image).uuid)\n  end\n\n  test \"empty names are exempt from name uniqueness\" do\n    act_as_user users(:active) do\n      c1 = Collection.new(name: nil, manifest_text: '', owner_uuid: groups(:aproject).uuid)\n      assert c1.save\n      c2 = Collection.new(name: '', manifest_text: '', owner_uuid: groups(:aproject).uuid)\n      assert c2.save\n      c3 = Collection.new(name: '', manifest_text: '', owner_uuid: groups(:aproject).uuid)\n      assert c3.save\n      c4 = Collection.new(name: 'c4', manifest_text: '', owner_uuid: groups(:aproject).uuid)\n      assert c4.save\n      c5 = Collection.new(name: 'c4', manifest_text: '', owner_uuid: groups(:aproject).uuid)\n      assert_raises(ActiveRecord::RecordNotUnique) do\n        c5.save\n      end\n    end\n  end\n\n  test \"create collections with managed properties\" do\n    Rails.configuration.Collections.ManagedProperties = ConfigLoader.to_OrderedOptions({\n      'default_prop1' => {'Value' => 'prop1_value'},\n      'responsible_person_uuid' => {'Function' => 'original_owner'}\n    })\n    # Test collection without initial properties\n    act_as_user users(:active) do\n      c = create_collection 'foo', Encoding::US_ASCII\n      assert c.valid?\n      assert_not_empty c.properties\n      assert_equal 'prop1_value', c.properties['default_prop1']\n      assert_equal users(:active).uuid, c.properties['responsible_person_uuid']\n    end\n    # Test collection with default_prop1 property already set\n    act_as_user users(:active) do\n      c = Collection.create(manifest_text: \". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\\n\",\n                            properties: {'default_prop1' => 'custom_value'})\n      assert c.valid?\n      assert_not_empty c.properties\n      assert_equal 'custom_value', c.properties['default_prop1']\n      assert_equal users(:active).uuid, c.properties['responsible_person_uuid']\n    end\n    # Test collection inside a sub project\n    act_as_user users(:active) do\n      c = Collection.create(manifest_text: \". d41d8cd98f00b204e9800998ecf8427e 0:34:foo.txt\\n\",\n                            owner_uuid: groups(:asubproject).uuid)\n      assert c.valid?\n      assert_not_empty c.properties\n      assert_equal users(:active).uuid, c.properties['responsible_person_uuid']\n    end\n  end\n\n  test \"update collection with protected managed properties\" do\n    Rails.configuration.Collections.ManagedProperties = ConfigLoader.to_OrderedOptions({\n      'default_prop1' => {'Value' => 'prop1_value', 'Protected' => true},\n    })\n    act_as_user users(:active) do\n      c = create_collection 'foo', Encoding::US_ASCII\n      assert c.valid?\n      assert_not_empty c.properties\n      assert_equal 'prop1_value', c.properties['default_prop1']\n      # Add new property\n      c.properties['prop2'] = 'value2'\n      c.save!\n      c.reload\n      assert_equal 'value2', c.properties['prop2']\n      # Try to change protected property's value\n      c.properties['default_prop1'] = 'new_value'\n      assert_raises(ArvadosModel::PermissionDeniedError) do\n        c.save!\n      end\n      # Admins are allowed to change protected properties\n      act_as_system_user do\n        c.properties['default_prop1'] = 'new_value'\n        c.save!\n        c.reload\n        assert_equal 'new_value', c.properties['default_prop1']\n      end\n    end\n  end\n\n  test \"collection names must be displayable in a filesystem\" do\n    set_user_from_auth :active\n    [\"\", \"{SOLIDUS}\"].each do |subst|\n      Rails.configuration.Collections.ForwardSlashNameSubstitution = subst\n      c = Collection.create\n      [[nil, true],\n       [\"\", true],\n       [\".\", false],\n       [\"..\", false],\n       [\"...\", true],\n       [\"..z..\", true],\n       [\"foo/bar\", subst != \"\"],\n       [\"../..\", subst != \"\"],\n       [\"/\", subst != \"\"],\n      ].each do |name, valid|\n        c.name = name\n        assert_equal valid, c.valid?, \"#{name.inspect} should be #{valid ? \"valid\" : \"invalid\"}\"\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/container_request_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'helpers/container_test_helper'\nrequire 'helpers/docker_migration_helper'\nrequire 'arvados/collection'\n\nclass ContainerRequestTest < ActiveSupport::TestCase\n  include DockerMigrationHelper\n  include DbCurrentTime\n  include ContainerTestHelper\n\n  def with_container_auth(ctr)\n    auth_was = Thread.current[:api_client_authorization]\n    token_was = Thread.current[:token]\n    user_was = Thread.current[:user]\n    auth = ApiClientAuthorization.find_by_uuid(ctr.auth_uuid)\n    Thread.current[:api_client_authorization] = auth\n    Thread.current[:token] = auth.token\n    Thread.current[:user] = auth.user\n    begin\n      yield\n    ensure\n      Thread.current[:api_client_authorization] = auth_was\n      Thread.current[:token] = token_was\n      Thread.current[:user] = user_was\n    end\n  end\n\n  def lock_and_run(ctr)\n      act_as_system_user do\n        ctr.update!(state: Container::Locked)\n        ctr.update!(state: Container::Running)\n      end\n  end\n\n  def create_minimal_req! attrs={}\n    defaults = {\n      command: [\"echo\", \"foo\"],\n      container_image: links(:docker_image_collection_tag).name,\n      cwd: \"/tmp\",\n      environment: {},\n      mounts: {\"/out\" => {\"kind\" => \"tmp\", \"capacity\" => 1000000}},\n      output_path: \"/out\",\n      runtime_constraints: {\"vcpus\" => 1, \"ram\" => 2},\n      name: \"foo\",\n      description: \"bar\",\n    }\n    cr = ContainerRequest.create!(defaults.merge(attrs))\n    cr.reload\n    return cr\n  end\n\n  def check_bogus_states cr\n    [nil, \"Flubber\"].each do |state|\n      assert_raises(ActiveRecord::RecordInvalid) do\n        cr.state = state\n        cr.save!\n      end\n      cr.reload\n    end\n  end\n\n  def configure_preemptible_instance_type\n    Rails.configuration.InstanceTypes = ConfigLoader.to_OrderedOptions({\n      \"a1.small.pre\" => {\n        \"Preemptible\" => true,\n        \"Price\" => 0.1,\n        \"ProviderType\" => \"a1.small\",\n        \"VCPUs\" => 1,\n        \"RAM\" => 1000000000,\n      },\n    })\n  end\n\n  test \"Container request create\" do\n    set_user_from_auth :active\n    cr = create_minimal_req!\n\n    assert_nil cr.container_uuid\n    assert_equal 0, cr.priority\n\n    check_bogus_states cr\n\n    # Ensure we can modify all attributes\n    cr.command = [\"echo\", \"foo3\"]\n    cr.container_image = \"img3\"\n    cr.cwd = \"/tmp3\"\n    cr.environment = {\"BUP\" => \"BOP\"}\n    cr.mounts = {\"/BAR\" => {\"kind\" => \"tmp\"}}\n    cr.output_path = \"/tmp4\"\n    cr.priority = 2\n    cr.runtime_constraints = {\"vcpus\" => 4}\n    cr.name = \"foo3\"\n    cr.description = \"bar3\"\n    cr.save!\n\n    assert_nil cr.container_uuid\n  end\n\n  [\n    [/ram.*must be a positive integer/, {\"runtime_constraints\" => {\"vcpus\" => 1}}],\n    [/ram.*must be a positive integer/, {\"runtime_constraints\" => {\"vcpus\" => 1, \"ram\" => \"1234567\"}}],\n    [/ram.*must be a positive integer/, {\"runtime_constraints\" => {\"vcpus\" => 1, \"ram\" => 0}}],\n    [/ram.*must be a positive integer/, {\"runtime_constraints\" => {\"vcpus\" => 1, \"ram\" => nil}}],\n    [/ram.*must be a positive integer/, {\"runtime_constraints\" => {\"vcpus\" => 1, \"ram\" => -1}}],\n    [/vcpus.*must be a positive integer/, {\"runtime_constraints\" => {\"vcpus\" => 0, \"ram\" => 1234567}}],\n    [/vcpus.*must be a positive integer/, {\"runtime_constraints\" => {\"vcpus\" => \"1\", \"ram\" => 1234567}}],\n    [/Runtime constraints.*unexpected key.*badkey/, {\"runtime_constraints\" => {\"vcpus\" => 1, \"ram\" => 1234567, \"badkey\" => 2}}],\n    [/Runtime constraints.*unexpected key.*badkey/, {\"runtime_constraints\" => {\"vcpus\" => 1, \"ram\" => 1234567, \"gpu\" => {\"badkey\" => \"badvalue\"}}}],\n    [/gpu.*must be a hash/, {\"runtime_constraints\" => {\"vcpus\" => 1, \"ram\" => 1234567, \"gpu\" => []}}],\n    [/gpu.*must be a hash/, {\"runtime_constraints\" => {\"vcpus\" => 1, \"ram\" => 1234567, \"gpu\" => \"bad\"}}],\n    [/Runtime constraints.*must be a hash/, {\"runtime_constraints\" => [\"bad\"]}],\n    [/Runtime constraints.*must be a hash/, {\"runtime_constraints\" => \"bad\"}],\n    [/Scheduling parameters.*unexpected key.*badkey/, {\"scheduling_parameters\" => {\"badkey\" => \"value\"}}],\n    [/Scheduling parameters.*unexpected key.*badkey/, {\"scheduling_parameters\" => {\"badkey\" => [\"value\"]}}],\n    [/Mounts \\[foo\\].*invalid target/, {\"mounts\" => {\"foo\" => {\"kind\" => \"tmp\"}}}],\n    [/Mounts \\[\\/foo\\].*must be a hash/, {\"mounts\" => {\"/foo\" => \"BAR\"}}],\n    [/Mounts \\[\\/foo\\]\\[kind\\]: unsupported value/, {\"mounts\" => {\"/foo\" => {}}}],\n    [/Mounts \\[\\/foo\\]\\[kind\\]: unsupported value/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"\"}}}],\n    [/Mounts \\[\\/foo\\]\\[kind\\]: unsupported value.*badkind/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"badkind\", \"content\" => {}}}}],\n    [/Mounts \\[\\/foo\\]\\[capacity\\]: incompatible value type: integer required/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"tmp\", \"capacity\" => 42.222}}}],\n    [/Mounts \\[\\/foo\\]\\[content\\]: parameter is not supported for a collection mount/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"collection\", \"content\" => {\"foo\" => \"bar\"}}}}],\n    [/Mounts \\[\\/foo\\]\\[bad\\]: parameter is not supported for a collection mount/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"collection\", \"bad\" => \"bad\"}}}],\n    [/Mounts \\[\\/foo\\]\\[path\\]: parameter is not supported for a json mount/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"json\", \"path\" => \"bad\"}}}],\n    [/Mounts \\[\\/foo\\]\\[device_type\\]: parameter is not supported for a json mount/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"json\", \"device_type\" => \"ram\"}}}],\n    [/Mounts \\[\\/foo\\]\\[content\\]: incompatible value type: string required/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"text\", \"content\" => {\"bad\" => \"bad\"}}}}],\n    [/Mounts \\[\\/foo\\]\\[portable_data_hash\\]: parameter is not supported for a tmp mount/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"tmp\", \"portable_data_hash\" => \"d41d8cd98f00b204e9800998ecf8427e+0\"}}}],\n    [/Mounts \\[\\/foo\\]\\[uuid\\]: parameter is not supported for a keep mount/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"keep\", \"uuid\" => \"zzzzz-tpzed-badbadbadbadbad\"}}}],\n    [/Mounts \\[\\/foo\\]\\[uuid\\]: parameter is not supported for a file mount/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"file\", \"uuid\" => \"zzzzz-tpzed-badbadbadbadbad\"}}}],\n    [/Mounts \\[\\/foo\\]\\[content\\]: parameter is not supported for a file mount/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"file\", \"content\" => \"bad\"}}}],\n    [/Mounts \\[\\/foo\\]\\[badkey\\]: parameter is not supported for a json mount/, {\"mounts\" => {\"/foo\" => {\"kind\" => \"json\", \"content\" => \"ok\", \"badkey\" => \"value\"}}}],\n    [/Secret mounts \\[\\/foo\\]\\[kind\\]: unsupported value \\\"tmp\\\"/, {\"secret_mounts\" => {\"/foo\" => {\"kind\" => \"tmp\", \"capacity\" => 1234567}}}],\n    [/Secret mounts \\[\\/foo\\]\\[content\\]: incompatible value type: string required/, {\"secret_mounts\" => {\"/foo\" => {\"kind\" => \"text\", \"content\" => {\"bad\" => \"bad\"}}}}],\n    [/Secret mounts \\[stdout\\]: invalid target: must be stdin or an absolute path/, {\"secret_mounts\" => {\"stdout\" => {\"kind\" => \"json\", \"content\" => {}}}}],\n    [/Secret mounts \\[stderr\\]: invalid target: must be stdin or an absolute path/, {\"secret_mounts\" => {\"stderr\" => {\"kind\" => \"json\", \"content\" => {}}}}],\n    [/Command must be an array of strings/, {\"command\" => [\"echo\", 55]}],\n    [/Environment key cannot be empty/, {\"environment\" => {\"\" => \"baz\"}}],\n    [/Environment \\[FOO\\]: incompatible value type: string required/, {\"environment\" => {\"FOO\" => 55}}],\n    [/Environment key.*contains.*invalid character/, {\"environment\" => {\"FOO\\0\" => \"baz\"}}],\n    [/Environment key.*contains.*invalid character/, {\"environment\" => {\"FOO=BAR\" => \"baz\"}}],\n    [/Environment key.*contains.*invalid character/, {\"environment\" => {\"=\" => \"baz\"}}],\n    [/Environment value.*contains.*invalid character/, {\"environment\" => {\"FOO\" => \"BAR\\0BAZ\"}}],\n    [/Output glob must be an array of non-empty strings/, {\"output_glob\" => [false]}],\n    [/Output glob must be an array of non-empty strings/, {\"output_glob\" => [[\"bad\"]]}],\n    [/Output glob must be an array of non-empty strings/, {\"output_glob\" => [\"cannot\",\"contain\",\"empty\",\"\"]}],\n    [/Output glob must be an array of non-empty strings/, {\"output_glob\" => [{bad:\"bad\"}]}],\n    [/Output glob must be an array of non-empty strings/, {\"output_glob\" => \"bad\"}],\n    [/Output glob must be an array of non-empty strings/, {\"output_glob\" => [\"nope\", -1]}],\n  ].each do |error_regexp, value|\n    test \"Create with invalid #{value}\" do\n      set_user_from_auth :active\n      err = assert_raises(ActiveRecord::RecordInvalid) do\n        cr = create_minimal_req!({state: \"Committed\", priority: 1}.merge(value))\n      end\n      assert_match /Validation failed: /, err.message\n    end\n\n    test \"Update with invalid #{value}\" do\n      set_user_from_auth :active\n      cr = create_minimal_req!(state: \"Uncommitted\", priority: 1)\n      cr.save!\n      cr = ContainerRequest.find_by_uuid cr.uuid\n      err = assert_raises(ActiveRecord::RecordInvalid) do\n        cr.update!({state: \"Committed\", priority: 1}.merge(value))\n      end\n      assert_match error_regexp, err.message\n    end\n  end\n\n  test \"Update from fixture\" do\n    set_user_from_auth :active\n    cr = ContainerRequest.find_by_uuid(container_requests(:running).uuid)\n    cr.update!(description: \"New description\")\n    assert_equal \"New description\", cr.description\n  end\n\n  test \"Update with valid runtime constraints\" do\n      set_user_from_auth :active\n      cr = create_minimal_req!(state: \"Uncommitted\", priority: 1)\n      cr.save!\n      cr = ContainerRequest.find_by_uuid cr.uuid\n      cr.update!(state: \"Committed\",\n                            runtime_constraints: {\"vcpus\" => 1, \"ram\" => 23})\n      assert_not_nil cr.container_uuid\n  end\n\n  test \"Container request priority must be non-nil\" do\n    set_user_from_auth :active\n    cr = create_minimal_req!\n    cr.priority = nil\n    cr.state = \"Committed\"\n    assert_raises(ActiveRecord::RecordInvalid) do\n      cr.save!\n    end\n  end\n\n  test \"Container request commit\" do\n    set_user_from_auth :active\n    cr = create_minimal_req!(runtime_constraints: {\"vcpus\" => 2, \"ram\" => 300000000})\n\n    assert_nil cr.container_uuid\n\n    cr.reload\n    cr.state = \"Committed\"\n    cr.priority = 1\n    cr.save!\n\n    cr.reload\n\n    assert_empty({\"vcpus\" => 2, \"ram\" => 300000000}.to_a - cr.runtime_constraints.to_a)\n\n    assert_equal 0, Rails.configuration.Containers.DefaultKeepCacheRAM\n\n    assert_not_nil cr.container_uuid\n    c = Container.find_by_uuid cr.container_uuid\n    assert_not_nil c\n    assert_equal [\"echo\", \"foo\"], c.command\n    assert_equal collections(:docker_image).portable_data_hash, c.container_image\n    assert_equal \"/tmp\", c.cwd\n    assert_equal({}, c.environment)\n    assert_equal({\"/out\" => {\"kind\"=>\"tmp\", \"capacity\"=>1000000}}, c.mounts)\n    assert_equal \"/out\", c.output_path\n    assert ({\"keep_cache_disk\" => 2<<30, \"keep_cache_ram\" => 0, \"vcpus\" => 2, \"ram\" => 300000000}.to_a - c.runtime_constraints.to_a).empty?\n    assert_operator 0, :<, c.priority\n\n    assert_raises(ActiveRecord::RecordInvalid) do\n      cr.priority = nil\n      cr.save!\n    end\n\n    cr.priority = 0\n    cr.save!\n\n    cr.reload\n    c.reload\n    assert_equal 0, cr.priority\n    assert_equal 0, c.priority\n  end\n\n  test \"Independent container requests\" do\n    set_user_from_auth :active\n    cr1 = create_minimal_req!(command: [\"foo\", \"1\"], priority: 5, state: \"Committed\")\n    cr2 = create_minimal_req!(command: [\"foo\", \"2\"], priority: 10, state: \"Committed\")\n\n    c1 = Container.find_by_uuid cr1.container_uuid\n    assert_operator 0, :<, c1.priority\n\n    c2 = Container.find_by_uuid cr2.container_uuid\n    assert_operator c1.priority, :<, c2.priority\n    c2priority_was = c2.priority\n\n    cr1.update!(priority: 0)\n\n    c1.reload\n    assert_equal 0, c1.priority\n\n    c2.reload\n    assert_equal c2priority_was, c2.priority\n  end\n\n  test \"Request is finalized when its container is cancelled\" do\n    set_user_from_auth :active\n    cr = create_minimal_req!(priority: 1, state: \"Committed\", container_count_max: 1)\n    assert_equal users(:active).uuid, cr.modified_by_user_uuid\n\n    act_as_system_user do\n      Container.find_by_uuid(cr.container_uuid).\n        update!(state: Container::Cancelled, cost: 1.25)\n    end\n\n    cr.reload\n    assert_equal \"Final\", cr.state\n    assert_equal 1.25, cr.cumulative_cost\n    assert_equal users(:active).uuid, cr.modified_by_user_uuid\n  end\n\n  test \"Request is finalized when its container is completed\" do\n    set_user_from_auth :active\n    project = groups(:private)\n    cr = create_minimal_req!(owner_uuid: project.uuid,\n                             priority: 1,\n                             state: \"Committed\")\n    assert_equal users(:active).uuid, cr.modified_by_user_uuid\n\n    c = act_as_system_user do\n      c = Container.find_by_uuid(cr.container_uuid)\n      c.update!(state: Container::Locked)\n      c.update!(state: Container::Running)\n      c\n    end\n\n    cr.reload\n    assert_equal \"Committed\", cr.state\n\n    output_pdh = '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'\n    log_pdh = 'fa7aeb5140e2848d39b416daeef4ffc5+45'\n    act_as_system_user do\n      c.update!(state: Container::Complete,\n                           cost: 1.25,\n                           output: output_pdh,\n                           log: log_pdh)\n    end\n\n    cr.reload\n    assert_equal \"Final\", cr.state\n    assert_equal 1.25, cr.cumulative_cost\n    assert_equal users(:active).uuid, cr.modified_by_user_uuid\n\n    assert_not_nil cr.output_uuid\n    assert_not_nil cr.log_uuid\n    output = Collection.find_by_uuid cr.output_uuid\n    assert_equal output_pdh, output.portable_data_hash\n    assert_equal output.owner_uuid, project.uuid, \"Container output should be copied to #{project.uuid}\"\n    assert_not_nil output.modified_at\n\n    log = Collection.find_by_uuid cr.log_uuid\n    assert_equal log.manifest_text, \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n./log\\\\040for\\\\040container\\\\040#{cr.container_uuid} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n\n    assert_equal log.owner_uuid, project.uuid, \"Container log should be copied to #{project.uuid}\"\n  end\n\n  # This tests bug report #16144\n  test \"Request is finalized when its container is completed even when log & output don't exist\" do\n    set_user_from_auth :active\n    project = groups(:private)\n    cr = create_minimal_req!(owner_uuid: project.uuid,\n                             priority: 1,\n                             state: \"Committed\")\n    assert_equal users(:active).uuid, cr.modified_by_user_uuid\n\n    output_pdh = '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'\n    log_pdh = 'fa7aeb5140e2848d39b416daeef4ffc5+45'\n\n    c = act_as_system_user do\n      c = Container.find_by_uuid(cr.container_uuid)\n      c.update!(state: Container::Locked)\n      c.update!(state: Container::Running,\n                           output: output_pdh,\n                           log: log_pdh)\n      c\n    end\n\n    cr.reload\n    assert_equal \"Committed\", cr.state\n\n    act_as_system_user do\n      Collection.where(portable_data_hash: output_pdh).delete_all\n      Collection.where(portable_data_hash: log_pdh).delete_all\n      c.update!(state: Container::Complete)\n    end\n\n    cr.reload\n    assert_equal \"Final\", cr.state\n  end\n\n  # This tests bug report #16144\n  test \"Can destroy CR even if its container doesn't exist\" do\n    set_user_from_auth :active\n    project = groups(:private)\n    cr = create_minimal_req!(owner_uuid: project.uuid,\n                             priority: 1,\n                             state: \"Committed\")\n    assert_equal users(:active).uuid, cr.modified_by_user_uuid\n\n    c = act_as_system_user do\n      c = Container.find_by_uuid(cr.container_uuid)\n      c.update!(state: Container::Locked)\n      c.update!(state: Container::Running)\n      c\n    end\n\n    cr.reload\n    assert_equal \"Committed\", cr.state\n\n    cr_uuid = cr.uuid\n    act_as_system_user do\n      Container.find_by_uuid(cr.container_uuid).destroy\n      cr.destroy\n    end\n    assert_nil ContainerRequest.find_by_uuid(cr_uuid)\n  end\n\n  test \"Container makes container request, then is cancelled\" do\n    set_user_from_auth :active\n    cr = create_minimal_req!(priority: 5, state: \"Committed\", container_count_max: 1)\n\n    c = Container.find_by_uuid cr.container_uuid\n    assert_operator 0, :<, c.priority\n    lock_and_run(c)\n\n    cr2 = with_container_auth(c) do\n      create_minimal_req!(priority: 10, state: \"Committed\", container_count_max: 1, command: [\"echo\", \"foo2\"])\n    end\n    assert_equal c.uuid, cr2.requesting_container_uuid\n    assert_equal users(:active).uuid, cr2.modified_by_user_uuid\n\n    c2 = Container.find_by_uuid cr2.container_uuid\n    assert_operator 0, :<, c2.priority\n\n    act_as_system_user do\n      c.state = \"Cancelled\"\n      c.save!\n    end\n\n    cr.reload\n    assert_equal \"Final\", cr.state\n\n    cr2.reload\n    assert_equal 0, cr2.priority\n    assert_equal users(:active).uuid, cr2.modified_by_user_uuid\n\n    c2.reload\n    assert_equal 0, c2.priority\n  end\n\n  test \"child container priority follows same ordering as corresponding top-level ancestors\" do\n    findctr = lambda { |cr| Container.find_by_uuid(cr.container_uuid) }\n\n    set_user_from_auth :active\n\n    toplevel_crs = [\n      create_minimal_req!(priority: 5, state: \"Committed\", environment: {\"workflow\" => \"0\"}),\n      create_minimal_req!(priority: 5, state: \"Committed\", environment: {\"workflow\" => \"1\"}),\n      create_minimal_req!(priority: 5, state: \"Committed\", environment: {\"workflow\" => \"2\"}),\n    ]\n    parents = toplevel_crs.map(&findctr)\n\n    children_crs = parents.map do |parent|\n      lock_and_run(parent)\n      with_container_auth(parent) do\n        create_minimal_req!(state: \"Committed\",\n                            priority: 1,\n                            environment: {\"child\" => parent.environment[\"workflow\"]})\n      end\n    end\n    children = children_crs.map(&findctr)\n\n    grandchildren = children.reverse.map do |child|\n      lock_and_run(child)\n      with_container_auth(child) do\n        create_minimal_req!(state: \"Committed\",\n                            priority: 1,\n                            environment: {\"grandchild\" => child.environment[\"child\"]})\n      end\n    end.reverse.map(&findctr)\n\n    shared_grandchildren = children.map do |child|\n      with_container_auth(child) do\n        create_minimal_req!(state: \"Committed\",\n                            priority: 1,\n                            environment: {\"grandchild\" => \"shared\"})\n      end\n    end.map(&findctr)\n\n    assert_equal shared_grandchildren[0].uuid, shared_grandchildren[1].uuid\n    assert_equal shared_grandchildren[0].uuid, shared_grandchildren[2].uuid\n    shared_grandchild = shared_grandchildren[0]\n\n    set_user_from_auth :active\n\n    # parents should be prioritized by submit time.\n    assert_operator parents[0].priority, :>, parents[1].priority\n    assert_operator parents[1].priority, :>, parents[2].priority\n\n    # children should be prioritized in same order as their respective\n    # parents.\n    assert_operator children[0].priority, :>, children[1].priority\n    assert_operator children[1].priority, :>, children[2].priority\n\n    # grandchildren should also be prioritized in the same order,\n    # despite having been submitted in the opposite order.\n    assert_operator grandchildren[0].priority, :>, grandchildren[1].priority\n    assert_operator grandchildren[1].priority, :>, grandchildren[2].priority\n\n    # shared grandchild container should be prioritized above\n    # everything that isn't needed by parents[0], but not above\n    # earlier-submitted descendants of parents[0]\n    assert_operator shared_grandchild.priority, :>, grandchildren[1].priority\n    assert_operator shared_grandchild.priority, :>, children[1].priority\n    assert_operator shared_grandchild.priority, :>, parents[1].priority\n    assert_operator shared_grandchild.priority, :<=, grandchildren[0].priority\n    assert_operator shared_grandchild.priority, :<=, children[0].priority\n    assert_operator shared_grandchild.priority, :<=, parents[0].priority\n\n    # increasing priority of the most recent toplevel container should\n    # reprioritize all of its descendants (including the shared\n    # grandchild) above everything else.\n    toplevel_crs[2].update!(priority: 72)\n    (parents + children + grandchildren + [shared_grandchild]).map(&:reload)\n    assert_operator shared_grandchild.priority, :>, grandchildren[0].priority\n    assert_operator shared_grandchild.priority, :>, children[0].priority\n    assert_operator shared_grandchild.priority, :>, parents[0].priority\n    assert_operator shared_grandchild.priority, :>, grandchildren[1].priority\n    assert_operator shared_grandchild.priority, :>, children[1].priority\n    assert_operator shared_grandchild.priority, :>, parents[1].priority\n    # ...but the shared container should not have higher priority than\n    # the earlier-submitted descendants of the high-priority workflow.\n    assert_operator shared_grandchild.priority, :<=, grandchildren[2].priority\n    assert_operator shared_grandchild.priority, :<=, children[2].priority\n    assert_operator shared_grandchild.priority, :<=, parents[2].priority\n\n    # cancelling the most recent toplevel container should\n    # reprioritize all of its descendants (except the shared\n    # grandchild) to zero\n    toplevel_crs[2].update!(priority: 0)\n    (parents + children + grandchildren + [shared_grandchild]).map(&:reload)\n    assert_operator 0, :==, parents[2].priority\n    assert_operator 0, :==, children[2].priority\n    assert_operator 0, :==, grandchildren[2].priority\n    assert_operator shared_grandchild.priority, :==, grandchildren[0].priority\n\n    # cancel a child request, the parent should be > 0 but\n    # the child and grandchild go to 0.\n    children_crs[1].update!(priority: 0)\n    (parents + children + grandchildren + [shared_grandchild]).map(&:reload)\n    assert_operator 0, :<, parents[1].priority\n    assert_operator parents[0].priority, :>, parents[1].priority\n    assert_operator 0, :==, children[1].priority\n    assert_operator 0, :==, grandchildren[1].priority\n    assert_operator shared_grandchild.priority, :==, grandchildren[0].priority\n\n    # update the parent, it should get a higher priority but the children and\n    # grandchildren should remain at 0\n    toplevel_crs[1].update!(priority: 6)\n    (parents + children + grandchildren + [shared_grandchild]).map(&:reload)\n    assert_operator 0, :<, parents[1].priority\n    assert_operator parents[0].priority, :<, parents[1].priority\n    assert_operator 0, :==, children[1].priority\n    assert_operator 0, :==, grandchildren[1].priority\n    assert_operator shared_grandchild.priority, :==, grandchildren[0].priority\n  end\n\n  [\n    ['running_container_auth', 'zzzzz-dz642-runningcontainr', 501],\n    ['active_no_prefs', nil, 0]\n  ].each do |token, expected, expected_priority|\n    test \"create as #{token} and expect requesting_container_uuid to be #{expected}\" do\n      set_user_from_auth token\n      cr = create_minimal_req!\n      assert_not_nil cr.uuid, 'uuid should be set for newly created container_request'\n      assert_equal expected, cr.requesting_container_uuid\n      assert_equal expected_priority, cr.priority\n    end\n  end\n\n  [\n    [:admin, 0, \"output\"],\n    [:admin, 19, \"output\"],\n    [:admin, nil, \"output\"],\n    [:running_container_auth, 0, \"intermediate\"],\n    [:running_container_auth, 29, \"intermediate\"],\n    [:running_container_auth, nil, \"intermediate\"],\n  ].each do |token, exit_code, expect_output_type|\n    test \"container with exit_code #{exit_code} has collection types set with output type #{expect_output_type}\" do\n      final_state = if exit_code.nil?\n                      Container::Cancelled\n                    else\n                      Container::Complete\n                    end\n      set_user_from_auth token\n      request = create_minimal_req!(\n        container_count_max: 1,\n        priority: 500,\n        state: ContainerRequest::Committed,\n      )\n      run_container(request, final_state: final_state, exit_code: exit_code)\n      request.reload\n      assert_equal(ContainerRequest::Final, request.state)\n\n      output = Collection.find_by_uuid(request.output_uuid)\n      assert_not_nil(output)\n      assert_equal(request.uuid, output.properties[\"container_request\"])\n      assert_equal(expect_output_type, output.properties[\"type\"])\n\n      log = Collection.find_by_uuid(request.log_uuid)\n      assert_not_nil(log)\n      assert_equal(request.uuid, log.properties[\"container_request\"])\n      assert_equal(\"log\", log.properties[\"type\"])\n    end\n  end\n\n  test \"create as container_runtime_token and expect requesting_container_uuid to be zzzzz-dz642-20isqbkl8xwnsao\" do\n    set_user_from_auth :container_runtime_token\n    Thread.current[:token] = \"#{Thread.current[:token]}/zzzzz-dz642-20isqbkl8xwnsao\"\n    cr = create_minimal_req!\n    assert_not_nil cr.uuid, 'uuid should be set for newly created container_request'\n    assert_equal 'zzzzz-dz642-20isqbkl8xwnsao', cr.requesting_container_uuid\n    assert_equal 1, cr.priority\n  end\n\n  [[{\"vcpus\" => [2, nil]},\n    lambda { |resolved| resolved[\"vcpus\"] == 2 }],\n   [{\"vcpus\" => [3, 7]},\n    lambda { |resolved| resolved[\"vcpus\"] == 3 }],\n   [{\"vcpus\" => 4},\n    lambda { |resolved| resolved[\"vcpus\"] == 4 }],\n   [{\"ram\" => [1000000000, 2000000000]},\n    lambda { |resolved| resolved[\"ram\"] == 1000000000 }],\n   [{\"ram\" => [1234234234]},\n    lambda { |resolved| resolved[\"ram\"] == 1234234234 }],\n  ].each do |rc, okfunc|\n    test \"resolve runtime constraint range #{rc} to values\" do\n      resolved = Container.resolve_runtime_constraints(rc)\n      assert(okfunc.call(resolved),\n             \"container runtime_constraints was #{resolved.inspect}\")\n    end\n  end\n\n  [[{\"/out\" => {\n        \"kind\" => \"collection\",\n        \"uuid\" => \"zzzzz-4zz18-znfnqtbbv4spc3w\",\n        \"path\" => \"/foo\"}},\n    lambda do |resolved|\n      resolved[\"/out\"] == {\n        \"portable_data_hash\" => \"1f4b0bc7583c2a7f9102c395f4ffc5e3+45\",\n        \"kind\" => \"collection\",\n        \"path\" => \"/foo\",\n      }\n    end],\n   [{\"/out\" => {\n        \"kind\" => \"collection\",\n        \"uuid\" => \"zzzzz-4zz18-znfnqtbbv4spc3w\",\n        \"portable_data_hash\" => \"1f4b0bc7583c2a7f9102c395f4ffc5e3+45\",\n        \"path\" => \"/foo\"}},\n    lambda do |resolved|\n      resolved[\"/out\"] == {\n        \"portable_data_hash\" => \"1f4b0bc7583c2a7f9102c395f4ffc5e3+45\",\n        \"kind\" => \"collection\",\n        \"path\" => \"/foo\",\n      }\n    end],\n   [{\"/out\" => {\n      \"kind\" => \"collection\",\n      \"portable_data_hash\" => \"1f4b0bc7583c2a7f9102c395f4ffc5e3+45\",\n      \"path\" => \"/foo\"}},\n    lambda do |resolved|\n      resolved[\"/out\"] == {\n        \"portable_data_hash\" => \"1f4b0bc7583c2a7f9102c395f4ffc5e3+45\",\n        \"kind\" => \"collection\",\n        \"path\" => \"/foo\",\n      }\n    end],\n    # Empty collection\n    [{\"/out\" => {\n      \"kind\" => \"collection\",\n      \"path\" => \"/foo\"}},\n    lambda do |resolved|\n      resolved[\"/out\"] == {\n        \"kind\" => \"collection\",\n        \"path\" => \"/foo\",\n      }\n    end],\n  ].each do |mounts, okfunc|\n    test \"resolve mounts #{mounts.inspect} to values\" do\n      set_user_from_auth :active\n      resolved = Container.resolve_mounts(mounts)\n      assert(okfunc.call(resolved),\n             \"Container.resolve_mounts returned #{resolved.inspect}\")\n    end\n  end\n\n  test 'mount unreadable collection' do\n    set_user_from_auth :spectator\n    m = {\n      \"/foo\" => {\n        \"kind\" => \"collection\",\n        \"uuid\" => \"zzzzz-4zz18-znfnqtbbv4spc3w\",\n        \"path\" => \"/foo\",\n      },\n    }\n    assert_raises(ArvadosModel::UnresolvableContainerError) do\n      Container.resolve_mounts(m)\n    end\n  end\n\n  test 'mount collection with mismatched UUID and PDH' do\n    set_user_from_auth :active\n    m = {\n      \"/foo\" => {\n        \"kind\" => \"collection\",\n        \"uuid\" => \"zzzzz-4zz18-znfnqtbbv4spc3w\",\n        \"portable_data_hash\" => \"fa7aeb5140e2848d39b416daeef4ffc5+45\",\n        \"path\" => \"/foo\",\n      },\n    }\n    resolved_mounts = Container.resolve_mounts(m)\n    assert_equal m['portable_data_hash'], resolved_mounts['portable_data_hash']\n  end\n\n  ['arvados/apitestfixture:latest',\n   'arvados/apitestfixture',\n   'd8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678',\n  ].each do |tag|\n    test \"Container.resolve_container_image(#{tag.inspect})\" do\n      set_user_from_auth :active\n      resolved = Container.resolve_container_image(tag)\n      assert_equal resolved, collections(:docker_image).portable_data_hash\n    end\n  end\n\n  test \"Container.resolve_container_image(pdh)\" do\n    set_user_from_auth :active\n    [[:docker_image, 'v1'], [:docker_image_1_12, 'v2']].each do |coll, ver|\n      Rails.configuration.Containers.SupportedDockerImageFormats = ConfigLoader.to_OrderedOptions({ver=>{}})\n      pdh = collections(coll).portable_data_hash\n      resolved = Container.resolve_container_image(pdh)\n      assert_equal resolved, pdh\n    end\n  end\n\n  ['acbd18db4cc2f85cedef654fccc4a4d8+3',\n   'ENOEXIST',\n   'arvados/apitestfixture:ENOEXIST',\n  ].each do |img|\n    test \"container_image_for_container(#{img.inspect}) => 422\" do\n      set_user_from_auth :active\n      assert_raises(ArvadosModel::UnresolvableContainerError) do\n        Container.resolve_container_image(img)\n      end\n    end\n  end\n\n  test \"allow unrecognized container when there are remote_hosts\" do\n    set_user_from_auth :active\n    Rails.configuration.RemoteClusters = Rails.configuration.RemoteClusters.merge({foooo: ActiveSupport::InheritableOptions.new({Host: \"bar.com\"})})\n    Container.resolve_container_image('acbd18db4cc2f85cedef654fccc4a4d8+3')\n  end\n\n  test \"migrated docker image\" do\n    Rails.configuration.Containers.SupportedDockerImageFormats = ConfigLoader.to_OrderedOptions({'v2'=>{}})\n    add_docker19_migration_link\n\n    # Test that it returns only v2 images even though request is for v1 image.\n\n    set_user_from_auth :active\n    cr = create_minimal_req!(command: [\"true\", \"1\"],\n                             container_image: collections(:docker_image).portable_data_hash)\n    assert_equal(Container.resolve_container_image(cr.container_image),\n                 collections(:docker_image_1_12).portable_data_hash)\n\n    cr = create_minimal_req!(command: [\"true\", \"2\"],\n                             container_image: links(:docker_image_collection_tag).name)\n    assert_equal(Container.resolve_container_image(cr.container_image),\n                 collections(:docker_image_1_12).portable_data_hash)\n  end\n\n  test \"use unmigrated docker image\" do\n    Rails.configuration.Containers.SupportedDockerImageFormats = ConfigLoader.to_OrderedOptions({'v1'=>{}})\n    add_docker19_migration_link\n\n    # Test that it returns only supported v1 images even though there is a\n    # migration link.\n\n    set_user_from_auth :active\n    cr = create_minimal_req!(command: [\"true\", \"1\"],\n                             container_image: collections(:docker_image).portable_data_hash)\n    assert_equal(Container.resolve_container_image(cr.container_image),\n                 collections(:docker_image).portable_data_hash)\n\n    cr = create_minimal_req!(command: [\"true\", \"2\"],\n                             container_image: links(:docker_image_collection_tag).name)\n    assert_equal(Container.resolve_container_image(cr.container_image),\n                 collections(:docker_image).portable_data_hash)\n  end\n\n  test \"incompatible docker image v1\" do\n    Rails.configuration.Containers.SupportedDockerImageFormats = ConfigLoader.to_OrderedOptions({'v1'=>{}})\n    add_docker19_migration_link\n\n    # Don't return unsupported v2 image even if we ask for it directly.\n    set_user_from_auth :active\n    cr = create_minimal_req!(command: [\"true\", \"1\"],\n                             container_image: collections(:docker_image_1_12).portable_data_hash)\n    assert_raises(ArvadosModel::UnresolvableContainerError) do\n      Container.resolve_container_image(cr.container_image)\n    end\n  end\n\n  test \"incompatible docker image v2\" do\n    Rails.configuration.Containers.SupportedDockerImageFormats = ConfigLoader.to_OrderedOptions({'v2'=>{}})\n    # No migration link, don't return unsupported v1 image,\n\n    set_user_from_auth :active\n    cr = create_minimal_req!(command: [\"true\", \"1\"],\n                             container_image: collections(:docker_image).portable_data_hash)\n    assert_raises(ArvadosModel::UnresolvableContainerError) do\n      Container.resolve_container_image(cr.container_image)\n    end\n    cr = create_minimal_req!(command: [\"true\", \"2\"],\n                             container_image: links(:docker_image_collection_tag).name)\n    assert_raises(ArvadosModel::UnresolvableContainerError) do\n      Container.resolve_container_image(cr.container_image)\n    end\n  end\n\n  test \"requestor can retrieve container owned by dispatch\" do\n    assert_not_empty Container.readable_by(users(:admin)).where(uuid: containers(:running).uuid)\n    assert_not_empty Container.readable_by(users(:active)).where(uuid: containers(:running).uuid)\n    assert_empty Container.readable_by(users(:spectator)).where(uuid: containers(:running).uuid)\n  end\n\n  [\n    [{\"var\" => \"value1\"}, {\"var\" => \"value1\"}, nil],\n    [{\"var\" => \"value1\"}, {\"var\" => \"value1\"}, true],\n    [{\"var\" => \"value1\"}, {\"var\" => \"value1\"}, false],\n    [{\"var\" => \"value1\"}, {\"var\" => \"value2\"}, nil],\n  ].each do |env1, env2, use_existing|\n    test \"Container request #{((env1 == env2) and (use_existing.nil? or use_existing == true)) ? 'does' : 'does not'} reuse container when committed#{use_existing.nil? ? '' : use_existing ? ' and use_existing == true' : ' and use_existing == false'}\" do\n      common_attrs = {cwd: \"/test\",\n                      priority: 1,\n                      command: [\"echo\", \"hello\"],\n                      output_path: \"/test\",\n                      runtime_constraints: {\"vcpus\" => 4,\n                                            \"ram\" => 12000000000},\n                      mounts: {\"/test\" => {\"kind\" => \"json\"}}}\n      set_user_from_auth :active\n      cr1 = create_minimal_req!(common_attrs.merge({state: ContainerRequest::Committed,\n                                                    environment: env1}))\n      run_container(cr1)\n      cr1.reload\n      if use_existing.nil?\n        # Testing with use_existing default value\n        cr2 = create_minimal_req!(common_attrs.merge({state: ContainerRequest::Uncommitted,\n                                                      environment: env2}))\n      else\n\n        cr2 = create_minimal_req!(common_attrs.merge({state: ContainerRequest::Uncommitted,\n                                                      environment: env2,\n                                                      use_existing: use_existing}))\n      end\n      assert_not_nil cr1.container_uuid\n      assert_nil cr2.container_uuid\n\n      # Update cr2 to commited state and check for container equality on different cases:\n      # * When env1 and env2 are equal and use_existing is true, the same container\n      #   should be assigned.\n      # * When use_existing is false, a different container should be assigned.\n      # * When env1 and env2 are different, a different container should be assigned.\n      cr2.update!({state: ContainerRequest::Committed})\n      assert_equal (cr2.use_existing == true and (env1 == env2)),\n                   (cr1.container_uuid == cr2.container_uuid)\n    end\n  end\n\n  test \"requesting_container_uuid at create is not allowed\" do\n    set_user_from_auth :active\n    assert_raises(ActiveRecord::RecordInvalid) do\n      create_minimal_req!(state: \"Uncommitted\", priority: 1, requesting_container_uuid: 'youcantdothat')\n    end\n  end\n\n  test \"Retry on container cancelled\" do\n    set_user_from_auth :active\n    cr = create_minimal_req!(priority: 1, state: \"Committed\", container_count_max: 2)\n    cr2 = create_minimal_req!(priority: 1, state: \"Committed\", container_count_max: 2, command: [\"echo\", \"baz\"])\n    prev_container_uuid = cr.container_uuid\n\n    c = act_as_system_user do\n      c = Container.find_by_uuid(cr.container_uuid)\n      c.update!(state: Container::Locked)\n      c.update!(state: Container::Running)\n      c\n    end\n\n    cr.reload\n    cr2.reload\n    assert_equal \"Committed\", cr.state\n    assert_equal prev_container_uuid, cr.container_uuid\n    assert_not_equal cr2.container_uuid, cr.container_uuid\n    prev_container_uuid = cr.container_uuid\n\n    act_as_system_user do\n      c.update!(cost: 0.5, subrequests_cost: 1.25)\n      c.update!(state: Container::Cancelled)\n    end\n\n    cr.reload\n    cr2.reload\n    assert_equal \"Committed\", cr.state\n    assert_not_equal prev_container_uuid, cr.container_uuid\n    assert_not_equal cr2.container_uuid, cr.container_uuid\n    prev_container_uuid = cr.container_uuid\n\n    c = act_as_system_user do\n      c = Container.find_by_uuid(cr.container_uuid)\n      c.update!(state: Container::Locked)\n      c.update!(state: Container::Running)\n      c.update!(cost: 0.125)\n      c.update!(state: Container::Cancelled)\n      c\n    end\n\n    cr.reload\n    cr2.reload\n    assert_equal \"Final\", cr.state\n    assert_equal prev_container_uuid, cr.container_uuid\n    assert_not_equal cr2.container_uuid, cr.container_uuid\n    assert_equal 1.875, cr.cumulative_cost\n  end\n\n  test \"Retry on container cancelled with runtime_token\" do\n    set_user_from_auth :spectator\n    spec = api_client_authorizations(:active)\n    cr = create_minimal_req!(priority: 1, state: \"Committed\",\n                             runtime_token: spec.token,\n                             container_count_max: 2)\n    prev_container_uuid = cr.container_uuid\n\n    c = act_as_system_user do\n      c = Container.find_by_uuid(cr.container_uuid)\n      assert_equal spec.token, c.runtime_token\n      c.update!(state: Container::Locked)\n      c.update!(state: Container::Running)\n      c\n    end\n\n    cr.reload\n    assert_equal \"Committed\", cr.state\n    assert_equal prev_container_uuid, cr.container_uuid\n    prev_container_uuid = cr.container_uuid\n\n    act_as_system_user do\n      c.update!(state: Container::Cancelled)\n    end\n\n    cr.reload\n    assert_equal \"Committed\", cr.state\n    assert_not_equal prev_container_uuid, cr.container_uuid\n    prev_container_uuid = cr.container_uuid\n\n    c = act_as_system_user do\n      c = Container.find_by_uuid(cr.container_uuid)\n      assert_equal spec.token, c.runtime_token\n      c.update!(state: Container::Cancelled)\n      c\n    end\n\n    cr.reload\n    assert_equal \"Final\", cr.state\n    assert_equal prev_container_uuid, cr.container_uuid\n  end\n\n\n  test \"Retry saves logs from previous attempts\" do\n    set_user_from_auth :active\n    cr = create_minimal_req!(priority: 1, state: \"Committed\", container_count_max: 3)\n\n    c = act_as_system_user do\n      c = Container.find_by_uuid(cr.container_uuid)\n      c.update!(state: Container::Locked)\n      c.update!(state: Container::Running)\n      c\n    end\n\n    container_uuids = []\n\n    [0, 1, 2].each do\n      cr.reload\n      assert_equal \"Committed\", cr.state\n      container_uuids << cr.container_uuid\n\n      c = act_as_system_user do\n        logc = Collection.new(manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\")\n        logc.save!\n        c = Container.find_by_uuid(cr.container_uuid)\n        c.update!(state: Container::Cancelled, log: logc.portable_data_hash)\n        c\n      end\n    end\n\n    container_uuids.sort!\n\n    cr.reload\n    assert_equal \"Final\", cr.state\n    assert_equal 3, cr.container_count\n    assert_equal \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n./log\\\\040for\\\\040container\\\\040#{container_uuids[0]} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n./log\\\\040for\\\\040container\\\\040#{container_uuids[1]} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n./log\\\\040for\\\\040container\\\\040#{container_uuids[2]} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\n\" , Collection.find_by_uuid(cr.log_uuid).manifest_text\n\n  end\n\n  test \"Retry sub-request on error\" do\n    set_user_from_auth :active\n    cr1 = create_minimal_req!(priority: 1, state: \"Committed\", container_count_max: 2, command: [\"echo\", \"foo1\"])\n    c1 = Container.find_by_uuid(cr1.container_uuid)\n    act_as_system_user do\n      c1.update!(state: Container::Locked)\n      c1.update!(state: Container::Running)\n    end\n\n    cr2 = with_container_auth(c1) do\n      create_minimal_req!(priority: 10, state: \"Committed\", container_count_max: 2, command: [\"echo\", \"foo2\"])\n    end\n    c2 = Container.find_by_uuid(cr2.container_uuid)\n    act_as_system_user do\n      c2.update!(state: Container::Locked)\n      c2.update!(state: Container::Running)\n    end\n\n    cr3 = with_container_auth(c2) do\n      create_minimal_req!(priority: 10, state: \"Committed\", container_count_max: 2, command: [\"echo\", \"foo3\"])\n    end\n    c3 = Container.find_by_uuid(cr3.container_uuid)\n\n    act_as_system_user do\n      c3.update!(state: Container::Locked)\n      c3.update!(state: Container::Running)\n    end\n\n    # All the containers are in running state\n\n    c3.reload\n    cr3.reload\n\n    # c3 still running\n    assert_equal 'Running', c3.state\n    assert_equal 1, cr3.container_count\n    assert_equal 'Committed', cr3.state\n\n    # c3 goes to cancelled state\n    act_as_system_user do\n      c3.state = \"Cancelled\"\n      c3.save!\n    end\n\n    cr3.reload\n\n    # Because the parent request is still live, it should\n    # be retried.\n    assert_equal 2, cr3.container_count\n    assert_equal 'Committed', cr3.state\n  end\n\n  test \"Do not retry sub-request when process tree is cancelled\" do\n    set_user_from_auth :active\n    cr1 = create_minimal_req!(priority: 1, state: \"Committed\", container_count_max: 2, command: [\"echo\", \"foo1\"])\n    c1 = Container.find_by_uuid(cr1.container_uuid)\n    act_as_system_user do\n      c1.update!(state: Container::Locked)\n      c1.update!(state: Container::Running)\n    end\n\n    cr2 = with_container_auth(c1) do\n      create_minimal_req!(priority: 10, state: \"Committed\", container_count_max: 2, command: [\"echo\", \"foo2\"])\n    end\n    c2 = Container.find_by_uuid(cr2.container_uuid)\n    act_as_system_user do\n      c2.update!(state: Container::Locked)\n      c2.update!(state: Container::Running)\n    end\n\n    cr3 = with_container_auth(c2) do\n      create_minimal_req!(priority: 10, state: \"Committed\", container_count_max: 2, command: [\"echo\", \"foo3\"])\n    end\n    c3 = Container.find_by_uuid(cr3.container_uuid)\n\n    act_as_system_user do\n      c3.update!(state: Container::Locked)\n      c3.update!(state: Container::Running)\n    end\n\n    # All the containers are in running state\n\n    # Now cancel the toplevel container request\n    act_as_system_user do\n      cr1.priority = 0\n      cr1.save!\n    end\n\n    c3.reload\n    cr3.reload\n\n    # c3 still running\n    assert_equal 'Running', c3.state\n    assert_equal 1, cr3.container_count\n    assert_equal 'Committed', cr3.state\n\n    # c3 goes to cancelled state\n    act_as_system_user do\n      assert_equal 0, c3.priority\n      c3.state = \"Cancelled\"\n      c3.save!\n    end\n\n    cr3.reload\n\n    # Because the parent process was cancelled, it _should not_ be\n    # retried.\n    assert_equal 1, cr3.container_count\n    assert_equal 'Final', cr3.state\n  end\n\n  test \"Retry process tree on error\" do\n    set_user_from_auth :active\n    cr1 = create_minimal_req!(priority: 1, state: \"Committed\", container_count_max: 2, command: [\"echo\", \"foo1\"])\n    c1 = Container.find_by_uuid(cr1.container_uuid)\n    act_as_system_user do\n      c1.update!(state: Container::Locked)\n      c1.update!(state: Container::Running)\n    end\n\n    cr2 = with_container_auth(c1) do\n      create_minimal_req!(priority: 10, state: \"Committed\", container_count_max: 2, command: [\"echo\", \"foo2\"])\n    end\n    c2 = Container.find_by_uuid(cr2.container_uuid)\n    act_as_system_user do\n      c2.update!(state: Container::Locked)\n      c2.update!(state: Container::Running)\n    end\n\n    cr3 = with_container_auth(c2) do\n      create_minimal_req!(priority: 10, state: \"Committed\", container_count_max: 2, command: [\"echo\", \"foo3\"])\n    end\n    c3 = Container.find_by_uuid(cr3.container_uuid)\n\n    act_as_system_user do\n      c3.update!(state: Container::Locked)\n      c3.update!(state: Container::Running)\n    end\n\n    # All the containers are in running state\n\n    c1.reload\n\n    # c1 goes to cancelled state\n    act_as_system_user do\n      c1.state = \"Cancelled\"\n      c1.save!\n    end\n\n    cr1.reload\n    cr2.reload\n    cr3.reload\n\n    # Because the root request is still live, it should be retried.\n    # Assumes the root is something like arvados-cwl-runner where\n    # container reuse enables it to more or less pick up where it left\n    # off.\n    assert_equal 2, cr1.container_count\n    assert_equal 'Committed', cr1.state\n\n    # These keep running.\n    assert_equal 1, cr2.container_count\n    assert_equal 'Committed', cr2.state\n\n    assert_equal 1, cr3.container_count\n    assert_equal 'Committed', cr3.state\n  end\n\n  test \"Output collection name setting using output_name with name collision resolution\" do\n    set_user_from_auth :active\n    output_name = 'unimaginative name'\n    Collection.create!(name: output_name)\n\n    cr = create_minimal_req!(priority: 1,\n                             state: ContainerRequest::Committed,\n                             output_name: output_name)\n    run_container(cr)\n    cr.reload\n    assert_equal ContainerRequest::Final, cr.state\n    output_coll = Collection.find_by_uuid(cr.output_uuid)\n    # Make sure the resulting output collection name include the original name\n    # plus the last 15 characters of uuid\n    assert_not_equal output_name, output_coll.name,\n                     \"more than one collection with the same owner and name\"\n    assert output_coll.name.include?(output_name),\n           \"New name should include original name\"\n    assert_match /#{output_coll.uuid[-15..-1]}/, output_coll.name,\n                 \"New name should include last 15 characters of uuid\"\n  end\n\n  [[0, :check_output_ttl_0],\n   [1, :check_output_ttl_1s],\n   [365*86400, :check_output_ttl_1y],\n  ].each do |ttl, checker|\n    test \"output_ttl=#{ttl}\" do\n      act_as_user users(:active) do\n        cr = create_minimal_req!(priority: 1,\n                                 state: ContainerRequest::Committed,\n                                 output_name: 'foo',\n                                 output_ttl: ttl)\n        run_container(cr)\n        cr.reload\n        output = Collection.find_by_uuid(cr.output_uuid)\n        send(checker, db_current_time, output.trash_at, output.delete_at)\n      end\n    end\n  end\n\n  def check_output_ttl_0(now, trash, delete)\n    assert_nil(trash)\n    assert_nil(delete)\n  end\n\n  def check_output_ttl_1s(now, trash, delete)\n    assert_not_nil(trash)\n    assert_not_nil(delete)\n    assert_in_delta(trash, now + 1.second, 10)\n    assert_in_delta(delete, now + Rails.configuration.Collections.BlobSigningTTL, 10)\n  end\n\n  def check_output_ttl_1y(now, trash, delete)\n    year = (86400*365).second\n    assert_not_nil(trash)\n    assert_not_nil(delete)\n    assert_in_delta(trash, now + year, 10)\n    assert_in_delta(delete, now + year, 10)\n  end\n\n  def run_container(cr, final_state: Container::Complete, exit_code: 0)\n    act_as_system_user do\n      logc = Collection.new(owner_uuid: system_user_uuid,\n                            manifest_text: \". ef772b2f28e2c8ca84de45466ed19ee9+7815 0:0:arv-mount.txt\\n\")\n      logc.save!\n\n      c = Container.find_by_uuid(cr.container_uuid)\n      c.update!(state: Container::Locked)\n      c.update!(state: Container::Running)\n      c.update!(state: final_state,\n                           exit_code: exit_code,\n                           output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',\n                           log: logc.portable_data_hash)\n      logc.destroy\n      c\n    end\n  end\n\n  test \"Finalize committed request when reusing a finished container\" do\n    set_user_from_auth :active\n    cr = create_minimal_req!(priority: 1, state: ContainerRequest::Committed)\n    cr.reload\n    assert_equal ContainerRequest::Committed, cr.state\n    run_container(cr)\n    cr.reload\n    assert_equal ContainerRequest::Final, cr.state\n\n    cr2 = create_minimal_req!(priority: 1, state: ContainerRequest::Committed)\n    assert_equal cr.container_uuid, cr2.container_uuid\n    assert_equal ContainerRequest::Final, cr2.state\n\n    cr3 = create_minimal_req!(priority: 1, state: ContainerRequest::Uncommitted)\n    assert_equal ContainerRequest::Uncommitted, cr3.state\n    cr3.update!(state: ContainerRequest::Committed)\n    assert_equal cr.container_uuid, cr3.container_uuid\n    assert_equal ContainerRequest::Final, cr3.state\n  end\n\n  [\n    # client requests preemptible, but types are not configured\n    [false, false, false, true, ActiveRecord::RecordInvalid],\n    [true, false, false, true, ActiveRecord::RecordInvalid],\n    # client requests preemptible, types are configured\n    [false, true, false, true, true],\n    [true, true, false, true, true],\n    # client requests non-preemptible for top-level container\n    [false, false, false, false, false],\n    [true, false, false, false, false],\n    [false, true, false, false, false],\n    [true, true, false, false, false],\n    # client requests non-preemptible for child container, preemptible\n    # is enabled anyway if AlwaysUsePreemptibleInstances and instance types\n    # are configured.\n    [false, false, true, false, false],\n    [true, false, true, false, false],\n    [false, true, true, false, false],\n    [true, true, true, false, true],\n  ].each do |use_preemptible, have_preemptible, is_child, ask, expect|\n    test \"with AlwaysUsePreemptibleInstances=#{use_preemptible} and preemptible types #{have_preemptible ? '' : 'not '}configured, create #{is_child ? 'child' : 'top-level'} container request with preemptible=#{ask} and expect #{expect}\" do\n      Rails.configuration.Containers.AlwaysUsePreemptibleInstances = use_preemptible\n      if have_preemptible\n        configure_preemptible_instance_type\n      end\n      common_attrs = {\n        state: ContainerRequest::Uncommitted,\n        cwd: \"/test\",\n        priority: 1,\n        command: [\"echo\", \"hello\"],\n        output_path: \"/test\",\n        scheduling_parameters: {\"preemptible\" => ask},\n        mounts: {\"/test\" => {\"kind\" => \"json\"}},\n      }\n      set_user_from_auth :active\n\n      if is_child\n        cr = with_container_auth(containers(:running)) do\n          create_minimal_req!(common_attrs)\n        end\n      else\n        cr = create_minimal_req!(common_attrs)\n      end\n\n      cr.reload\n      cr.state = ContainerRequest::Committed\n\n      if expect == true || expect == false\n        cr.save!\n        assert_equal expect, cr.scheduling_parameters[\"preemptible\"]\n      else\n        assert_raises(expect) do\n          cr.save!\n        end\n      end\n    end\n  end\n\n  test \"config update does not flip preemptible flag on already-committed container requests\" do\n    parent = containers(:running_container_with_logs)\n    attrs_p = {\n      scheduling_parameters: {\"preemptible\" => true},\n      \"state\" => \"Committed\",\n      \"priority\" => 1,\n    }\n    attrs_nonp = {\n      scheduling_parameters: {\"preemptible\" => false},\n      \"state\" => \"Committed\",\n      \"priority\" => 1,\n    }\n    expect = {false => [], true => []}\n\n    with_container_auth(parent) do\n      configure_preemptible_instance_type\n      Rails.configuration.Containers.AlwaysUsePreemptibleInstances = false\n\n      expect[true].push create_minimal_req!(attrs_p)\n      expect[false].push create_minimal_req!(attrs_nonp)\n\n      Rails.configuration.Containers.AlwaysUsePreemptibleInstances = true\n\n      expect[true].push create_minimal_req!(attrs_p)\n      expect[true].push create_minimal_req!(attrs_nonp)\n      commit_later = create_minimal_req!()\n\n      Rails.configuration.InstanceTypes = ConfigLoader.to_OrderedOptions({})\n\n      expect[false].push create_minimal_req!(attrs_nonp)\n\n      # Even though preemptible is not allowed, we should be able to\n      # commit a CR that was created earlier when preemptible was the\n      # default.\n      commit_later.update!(priority: 1, state: \"Committed\")\n      expect[false].push commit_later\n    end\n\n    set_user_from_auth :active\n    [false, true].each do |pflag|\n      expect[pflag].each do |cr|\n        cr.reload\n        assert_equal pflag, cr.scheduling_parameters['preemptible']\n      end\n    end\n\n    act_as_system_user do\n      # Cancelling the parent used to fail while updating the child\n      # containers' priority, because the child containers' unchanged\n      # preemptible fields caused validation to fail.\n      parent.update!(state: 'Cancelled')\n\n      [false, true].each do |pflag|\n        expect[pflag].each do |cr|\n          cr.reload\n          assert_equal 0, cr.priority, \"unexpected non-zero priority #{cr.priority} for #{cr.uuid}\"\n        end\n      end\n    end\n  end\n\n  [\n    [{\"partitions\" => [\"fastcpu\",\"vfastcpu\", 100]}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],\n    [{\"partitions\" => [\"fastcpu\",\"vfastcpu\", 100]}, ContainerRequest::Uncommitted, ActiveRecord::RecordInvalid],\n    [{\"partitions\" => \"fastcpu\"}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],\n    [{\"partitions\" => \"fastcpu\"}, ContainerRequest::Uncommitted, ActiveRecord::RecordInvalid],\n    [{\"partitions\" => [\"fastcpu\",\"vfastcpu\"]}, ContainerRequest::Committed],\n    [{\"max_run_time\" => \"one day\"}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],\n    [{\"max_run_time\" => \"one day\"}, ContainerRequest::Uncommitted, ActiveRecord::RecordInvalid],\n    [{\"max_run_time\" => -1}, ContainerRequest::Committed, ActiveRecord::RecordInvalid],\n    [{\"max_run_time\" => -1}, ContainerRequest::Uncommitted, ActiveRecord::RecordInvalid],\n    [{\"max_run_time\" => 86400}, ContainerRequest::Committed],\n  ].each do |sp, state, expected|\n    test \"create container request with scheduling_parameters #{sp} in state #{state} and verify #{expected}\" do\n      common_attrs = {cwd: \"/test\",\n                      priority: 1,\n                      command: [\"echo\", \"hello\"],\n                      output_path: \"/test\",\n                      scheduling_parameters: sp,\n                      mounts: {\"/test\" => {\"kind\" => \"json\"}}}\n      set_user_from_auth :active\n\n      if expected == ActiveRecord::RecordInvalid\n        assert_raises(ActiveRecord::RecordInvalid) do\n          create_minimal_req!(common_attrs.merge({state: state}))\n        end\n      else\n        cr = create_minimal_req!(common_attrs.merge({state: state}))\n        assert (sp.to_a - cr.scheduling_parameters.to_a).empty?\n\n        if state == ContainerRequest::Committed\n          c = Container.find_by_uuid(cr.container_uuid)\n          assert (sp.to_a - c.scheduling_parameters.to_a).empty?\n        end\n      end\n    end\n  end\n\n  test \"AlwaysUsePreemptibleInstances makes child containers preemptible\" do\n    Rails.configuration.Containers.AlwaysUsePreemptibleInstances = true\n    common_attrs = {cwd: \"/test\",\n                    priority: 1,\n                    command: [\"echo\", \"hello\"],\n                    output_path: \"/test\",\n                    state: ContainerRequest::Committed,\n                    mounts: {\"/test\" => {\"kind\" => \"json\"}}}\n    set_user_from_auth :active\n    configure_preemptible_instance_type\n\n    cr = with_container_auth(Container.find_by_uuid 'zzzzz-dz642-runningcontainr') do\n      create_minimal_req!(common_attrs)\n    end\n    assert_equal 'zzzzz-dz642-runningcontainr', cr.requesting_container_uuid\n    assert_equal true, cr.scheduling_parameters[\"preemptible\"]\n\n    c = Container.find_by_uuid(cr.container_uuid)\n    assert_equal true, c.scheduling_parameters[\"preemptible\"]\n  end\n\n  [['Committed', true, {name: \"foobar\", priority: 123}],\n   ['Committed', false, {container_count: 2}],\n   ['Committed', false, {container_count: 0}],\n   ['Committed', false, {container_count: nil}],\n   ['Committed', true, {priority: 0, mounts: {\"/out\" => {\"kind\" => \"tmp\", \"capacity\" => 1000000}}}],\n   ['Committed', true, {priority: 0, mounts: {\"/out\" => {\"capacity\" => 1000000, \"kind\" => \"tmp\"}}}],\n   # Addition of default values for mounts / runtime_constraints /\n   # scheduling_parameters, as happens in a round-trip through\n   # controller, does not have any real effect and should be\n   # accepted/ignored rather than causing an error when the CR state\n   # dictates those attributes are not allowed to change.\n   ['Committed', true, {priority: 0, mounts: {\"/out\" => {\"capacity\" => 0, \"kind\" => \"tmp\"}}}, {mounts: {\"/out\" => {\"kind\" => \"tmp\"}}}],\n   ['Committed', true, {priority: 0, mounts: {\"/out\" => {\"capacity\" => 1000000, \"kind\" => \"tmp\", \"exclude_from_output\": false}}}],\n   ['Committed', true, {priority: 0, mounts: {\"/out\" => {\"capacity\" => 1000000, \"kind\" => \"tmp\", \"repository_name\": \"\"}}}],\n   ['Committed', true, {priority: 0, mounts: {\"/out\" => {\"capacity\" => 1000000, \"kind\" => \"tmp\", \"content\": nil}}}],\n   ['Committed', false, {priority: 0, mounts: {\"/out\" => {\"capacity\" => 1000000, \"kind\" => \"tmp\", \"content\": {}}}}],\n   ['Committed', false, {priority: 0, mounts: {\"/out\" => {\"capacity\" => 1000000, \"kind\" => \"tmp\", \"repository_name\": \"foo\"}}}],\n   ['Committed', false, {priority: 0, mounts: {\"/out\" => {\"kind\" => \"tmp\", \"capacity\" => 1234567}}}],\n   ['Committed', false, {priority: 0, mounts: {}}],\n   ['Committed', true, {priority: 0, runtime_constraints: {\"vcpus\" => 1, \"ram\" => 2}}],\n   ['Committed', true, {priority: 0, runtime_constraints: {\"vcpus\" => 1, \"ram\" => 2, \"keep_cache_ram\" => 0}}],\n   ['Committed', true, {priority: 0, runtime_constraints: {\"vcpus\" => 1, \"ram\" => 2, \"API\" => false}}],\n   ['Committed', false, {priority: 0, runtime_constraints: {\"vcpus\" => 1, \"ram\" => 2, \"keep_cache_ram\" => 1}}],\n   ['Committed', false, {priority: 0, runtime_constraints: {\"vcpus\" => 1, \"ram\" => 2, \"API\" => true}}],\n   ['Committed', true, {priority: 0, scheduling_parameters: {\"preemptible\" => false}}],\n   ['Committed', true, {priority: 0, scheduling_parameters: {\"partitions\" => []}}],\n   ['Committed', true, {priority: 0, scheduling_parameters: {\"max_run_time\" => 0}}],\n   ['Committed', false, {priority: 0, scheduling_parameters: {\"preemptible\" => true}}],\n   ['Committed', false, {priority: 0, scheduling_parameters: {\"partitions\" => [\"foo\"]}}],\n   ['Committed', false, {priority: 0, scheduling_parameters: {\"max_run_time\" => 1}}],\n   ['Final', false, {state: ContainerRequest::Committed, name: \"foobar\"}],\n   ['Final', false, {name: \"foobar\", priority: 123}],\n   ['Final', false, {name: \"foobar\", output_uuid: \"zzzzz-4zz18-znfnqtbbv4spc3w\"}],\n   ['Final', false, {name: \"foobar\", log_uuid: \"zzzzz-4zz18-znfnqtbbv4spc3w\"}],\n   ['Final', false, {log_uuid: \"zzzzz-4zz18-znfnqtbbv4spc3w\"}],\n   ['Final', false, {priority: 123}],\n   ['Final', false, {mounts: {}}],\n   ['Final', false, {container_count: 2}],\n   ['Final', true, {name: \"foobar\"}],\n   ['Final', true, {name: \"foobar\", description: \"baz\"}],\n  ].each do |state, permitted, updates, create_attrs|\n    test \"state=#{state} can#{'not' if !permitted} update #{updates.inspect}\" do\n      act_as_user users(:active) do\n        attrs = {\n          priority: 1,\n          state: \"Committed\",\n          container_count_max: 1\n        }\n        if !create_attrs.nil?\n          attrs.merge!(create_attrs)\n        end\n        cr = create_minimal_req!(attrs)\n        case state\n        when 'Committed'\n          # already done\n        when 'Final'\n          act_as_system_user do\n            Container.find_by_uuid(cr.container_uuid).\n              update!(state: Container::Cancelled)\n          end\n          cr.reload\n        else\n          raise 'broken test case'\n        end\n        assert_equal state, cr.state\n        if permitted\n          assert cr.update!(updates)\n        else\n          assert_raises(ActiveRecord::RecordInvalid) do\n            cr.update!(updates)\n          end\n        end\n      end\n    end\n  end\n\n  test \"delete container_request and check its container's priority\" do\n    act_as_user users(:active) do\n      cr = ContainerRequest.find_by_uuid container_requests(:running_to_be_deleted).uuid\n\n      # initially the cr's container has priority > 0\n      c = Container.find_by_uuid(cr.container_uuid)\n      assert_equal 1, c.priority\n\n      cr.destroy\n\n      # the cr's container now has priority of 0\n      c.reload\n      assert_equal 0, c.priority\n    end\n  end\n\n  test \"trash the project containing a container_request and check its container's priority\" do\n    act_as_user users(:active) do\n      cr = ContainerRequest.find_by_uuid container_requests(:running_to_be_deleted).uuid\n\n      # initially the cr's container has priority > 0\n      c = Container.find_by_uuid(cr.container_uuid)\n      assert_equal 1, c.priority\n\n      prj = Group.find_by_uuid cr.owner_uuid\n      prj.update!(trash_at: db_current_time)\n\n      # the cr's container now has priority of 0\n      c.reload\n      assert_equal 0, c.priority\n\n      assert_equal c.state, 'Running'\n      assert_equal cr.state, 'Committed'\n\n      # mark the container as cancelled, this should cause the\n      # container request to go to final state and run the finalize\n      # function\n      act_as_system_user do\n        c.update!(state: 'Cancelled', log: 'fa7aeb5140e2848d39b416daeef4ffc5+45')\n      end\n      c.reload\n      cr.reload\n\n      assert_equal c.state, 'Cancelled'\n      assert_equal cr.state, 'Final'\n      assert_equal nil, cr.log_uuid\n    end\n  end\n\n  test \"delete container_request in final state and expect no error due to before_destroy callback\" do\n    act_as_user users(:active) do\n      cr = ContainerRequest.find_by_uuid container_requests(:completed).uuid\n      assert_nothing_raised {cr.destroy}\n    end\n  end\n\n  test \"Container request valid priority\" do\n    set_user_from_auth :active\n    cr = create_minimal_req!\n\n    assert_raises(ActiveRecord::RecordInvalid) do\n      cr.priority = -1\n      cr.save!\n    end\n\n    cr.priority = 0\n    cr.save!\n\n    cr.priority = 1\n    cr.save!\n\n    cr.priority = 500\n    cr.save!\n\n    cr.priority = 999\n    cr.save!\n\n    cr.priority = 1000\n    cr.save!\n\n    assert_raises(ActiveRecord::RecordInvalid) do\n      cr.priority = 1001\n      cr.save!\n    end\n  end\n\n  # Note: some of these tests might look redundant because they test\n  # that out-of-order spellings of hashes are still considered equal\n  # regardless of whether the existing (container) or new (container\n  # request) hash needs to be re-ordered.\n  secrets = {\"/foo\" => {\"kind\" => \"text\", \"content\" => \"xyzzy\"}}\n  same_secrets = {\"/foo\" => {\"content\" => \"xyzzy\", \"kind\" => \"text\"}}\n  different_secrets = {\"/foo\" => {\"kind\" => \"text\", \"content\" => \"something completely different\"}}\n  [\n    [true, nil, nil],\n    [true, nil, {}],\n    [true, {}, nil],\n    [true, {}, {}],\n    [true, secrets, same_secrets],\n    [true, same_secrets, secrets],\n    [false, nil, secrets],\n    [false, {}, secrets],\n    [false, secrets, {}],\n    [false, secrets, nil],\n    [false, secrets, different_secrets],\n  ].each do |expect_reuse, sm1, sm2|\n    test \"container reuse secret_mounts #{sm1.inspect}, #{sm2.inspect}\" do\n      set_user_from_auth :active\n      cr1 = create_minimal_req!(state: \"Committed\", priority: 1, secret_mounts: sm1)\n      cr2 = create_minimal_req!(state: \"Committed\", priority: 1, secret_mounts: sm2)\n      assert_not_nil cr1.container_uuid\n      assert_not_nil cr2.container_uuid\n      if expect_reuse\n        assert_equal cr1.container_uuid, cr2.container_uuid\n      else\n        assert_not_equal cr1.container_uuid, cr2.container_uuid\n      end\n    end\n  end\n\n  test \"scrub secret_mounts but reuse container for request with identical secret_mounts\" do\n    set_user_from_auth :active\n    sm = {'/secret/foo' => {'kind' => 'text', 'content' => secret_string}}\n    cr1 = create_minimal_req!(state: \"Committed\", priority: 1, secret_mounts: sm.dup)\n    run_container(cr1)\n    cr1.reload\n\n    # secret_mounts scrubbed from db\n    c = Container.where(uuid: cr1.container_uuid).first\n    assert_equal({}, c.secret_mounts)\n    assert_equal({}, cr1.secret_mounts)\n\n    # can reuse container if secret_mounts match\n    cr2 = create_minimal_req!(state: \"Committed\", priority: 1, secret_mounts: sm.dup)\n    assert_equal cr1.container_uuid, cr2.container_uuid\n\n    # don't reuse container if secret_mounts don't match\n    cr3 = create_minimal_req!(state: \"Committed\", priority: 1, secret_mounts: {})\n    assert_not_equal cr1.container_uuid, cr3.container_uuid\n\n    assert_no_secrets_logged\n  end\n\n  test \"conflicting key in mounts and secret_mounts\" do\n    sm = {'/secret/foo' => {'kind' => 'text', 'content' => secret_string}}\n    set_user_from_auth :active\n    cr = create_minimal_req!\n    assert_equal false, cr.update(state: \"Committed\",\n                                             priority: 1,\n                                             mounts: cr.mounts.merge(sm),\n                                             secret_mounts: sm)\n    assert_equal [:secret_mounts], cr.errors.messages.keys\n  end\n\n  test \"using runtime_token\" do\n    set_user_from_auth :spectator\n    spec = api_client_authorizations(:active)\n    cr = create_minimal_req!(state: \"Committed\", runtime_token: spec.token, priority: 1)\n    cr.save!\n    c = Container.find_by_uuid cr.container_uuid\n    lock_and_run c\n    assert_nil c.auth_uuid\n    assert_equal c.runtime_token, spec.token\n\n    assert_not_nil ApiClientAuthorization.find_by_uuid(spec.uuid)\n\n    act_as_system_user do\n      c.update!(state: Container::Complete,\n                           exit_code: 0,\n                           output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',\n                           log: 'fa7aeb5140e2848d39b416daeef4ffc5+45')\n    end\n\n    cr.reload\n    c.reload\n    assert_nil cr.runtime_token\n    assert_nil c.runtime_token\n  end\n\n  test \"invalid runtime_token\" do\n    set_user_from_auth :active\n    spec = api_client_authorizations(:spectator)\n    err = assert_raises(ActiveRecord::RecordInvalid) do\n      cr = create_minimal_req!(state: \"Committed\", runtime_token: \"#{spec.token}xx\")\n      cr.save!\n    end\n    assert_match /Runtime token failed validation/, err.message\n  end\n\n  test \"default output_storage_classes\" do\n    saved = Rails.configuration.DefaultStorageClasses\n    Rails.configuration.DefaultStorageClasses = [\"foo\"]\n    begin\n      act_as_user users(:active) do\n        cr = create_minimal_req!(priority: 1,\n                                 state: ContainerRequest::Committed,\n                                 output_name: 'foo')\n        run_container(cr)\n        cr.reload\n        output = Collection.find_by_uuid(cr.output_uuid)\n        assert_equal [\"foo\"], output.storage_classes_desired\n      end\n    ensure\n      Rails.configuration.DefaultStorageClasses = saved\n    end\n  end\n\n  test \"setting output_storage_classes\" do\n    act_as_user users(:active) do\n      cr = create_minimal_req!(priority: 1,\n                               state: ContainerRequest::Committed,\n                               output_name: 'foo',\n                               output_storage_classes: [\"foo_storage_class\", \"bar_storage_class\"])\n      run_container(cr)\n      cr.reload\n      output = Collection.find_by_uuid(cr.output_uuid)\n      assert_equal [\"foo_storage_class\", \"bar_storage_class\"], output.storage_classes_desired\n      log = Collection.find_by_uuid(cr.log_uuid)\n      assert_equal [\"foo_storage_class\", \"bar_storage_class\"], log.storage_classes_desired\n    end\n  end\n\n  test \"reusing container with different container_request.output_storage_classes\" do\n    common_attrs = {cwd: \"/test\",\n                    priority: 1,\n                    command: [\"echo\", \"hello\"],\n                    output_path: \"/test\",\n                    runtime_constraints: {\"vcpus\" => 4,\n                                          \"ram\" => 12000000000},\n                    mounts: {\"/test\" => {\"kind\" => \"json\"}},\n                    environment: {\"var\" => \"value1\"},\n                    output_storage_classes: [\"foo_storage_class\"]}\n    set_user_from_auth :active\n    cr1 = create_minimal_req!(common_attrs.merge({state: ContainerRequest::Committed}))\n    cont1 = run_container(cr1)\n    cr1.reload\n\n    output1 = Collection.find_by_uuid(cr1.output_uuid)\n\n    # Testing with use_existing default value\n    cr2 = create_minimal_req!(common_attrs.merge({state: ContainerRequest::Uncommitted,\n                                                  output_storage_classes: [\"bar_storage_class\"]}))\n\n    assert_not_nil cr1.container_uuid\n    assert_nil cr2.container_uuid\n\n    # Update cr2 to commited state, check for reuse, then run it\n    cr2.update!({state: ContainerRequest::Committed})\n    assert_equal cr1.container_uuid, cr2.container_uuid\n\n    cr2.reload\n    output2 = Collection.find_by_uuid(cr2.output_uuid)\n\n    # the original CR output has the original storage class,\n    # but the second CR output has the new storage class.\n    assert_equal [\"foo_storage_class\"], cont1.output_storage_classes\n    assert_equal [\"foo_storage_class\"], output1.storage_classes_desired\n    assert_equal [\"bar_storage_class\"], output2.storage_classes_desired\n  end\n\n  [\n    [{},               {},           {\"type\": \"output\"}],\n    [{\"a1\": \"b1\"},     {},           {\"type\": \"output\", \"a1\": \"b1\"}],\n    [{},               {\"a1\": \"b1\"}, {\"type\": \"output\", \"a1\": \"b1\"}],\n    [{\"a1\": \"b1\"},     {\"a1\": \"c1\"}, {\"type\": \"output\", \"a1\": \"b1\"}],\n    [{\"a1\": \"b1\"},     {\"a2\": \"c2\"}, {\"type\": \"output\", \"a1\": \"b1\", \"a2\": \"c2\"}],\n    [{\"type\": \"blah\"}, {},           {\"type\": \"blah\"}],\n  ].each do |cr_prop, container_prop, expect_prop|\n    test \"setting output_properties #{cr_prop} #{container_prop} on current container\" do\n      act_as_user users(:active) do\n        cr = create_minimal_req!(priority: 1,\n                                 state: ContainerRequest::Committed,\n                                 output_name: 'foo',\n                                 output_properties: cr_prop)\n\n        act_as_system_user do\n          logc = Collection.new(owner_uuid: system_user_uuid,\n                                manifest_text: \". ef772b2f28e2c8ca84de45466ed19ee9+7815 0:0:arv-mount.txt\\n\")\n          logc.save!\n\n          c = Container.find_by_uuid(cr.container_uuid)\n          c.update!(state: Container::Locked)\n          c.update!(state: Container::Running)\n\n          c.update!(output_properties: container_prop)\n\n          c.update!(state: Container::Complete,\n                               exit_code: 0,\n                               output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',\n                               log: logc.portable_data_hash)\n          logc.destroy\n        end\n\n        cr.reload\n        expect_prop[\"container_request\"] = cr.uuid\n        output = Collection.find_by_uuid(cr.output_uuid)\n        assert_equal expect_prop.symbolize_keys, output.properties.symbolize_keys\n      end\n    end\n  end\n\n  test \"Cumulative cost includes retried attempts but not reused containers\" do\n    set_user_from_auth :active\n    cr = create_minimal_req!(priority: 5, state: \"Committed\", container_count_max: 3)\n    c = Container.find_by_uuid cr.container_uuid\n    act_as_system_user do\n      c.update!(state: Container::Locked)\n      c.update!(state: Container::Running)\n      c.update!(state: Container::Cancelled, cost: 3)\n    end\n    cr.reload\n    assert_equal 3, cr.cumulative_cost\n\n    c = Container.find_by_uuid cr.container_uuid\n    lock_and_run c\n    c.reload\n    assert_equal 0, c.subrequests_cost\n\n    # cr2 is a child/subrequest\n    cr2 = with_container_auth(c) do\n      create_minimal_req!(priority: 10, state: \"Committed\", container_count_max: 1, command: [\"echo\", \"foo2\"])\n    end\n    assert_equal c.uuid, cr2.requesting_container_uuid\n    c2 = Container.find_by_uuid cr2.container_uuid\n    act_as_system_user do\n      c2.update!(state: Container::Locked)\n      c2.update!(state: Container::Running)\n      logc = Collection.new(owner_uuid: system_user_uuid,\n                            manifest_text: \". ef772b2f28e2c8ca84de45466ed19ee9+7815 0:0:arv-mount.txt\\n\")\n      logc.save!\n      c2.update!(state: Container::Complete,\n                            exit_code: 0,\n                            output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45',\n                            log: logc.portable_data_hash,\n                            cost: 7)\n    end\n    c.reload\n    assert_equal 7, c.subrequests_cost\n\n    # cr3 is an identical child/subrequest, will reuse c2\n    cr3 = with_container_auth(c) do\n      create_minimal_req!(priority: 10, state: \"Committed\", container_count_max: 1, command: [\"echo\", \"foo2\"])\n    end\n    assert_equal c.uuid, cr3.requesting_container_uuid\n    c3 = Container.find_by_uuid cr3.container_uuid\n    assert_equal c2.uuid, c3.uuid\n    assert_equal Container::Complete, c3.state\n    c.reload\n    assert_equal 7, c.subrequests_cost\n\n    act_as_system_user do\n      c.update!(state: Container::Complete, exit_code: 0, cost: 9)\n    end\n\n    c.reload\n    assert_equal 7, c.subrequests_cost\n    cr.reload\n    assert_equal 3+7+9, cr.cumulative_cost\n  end\n\n  test \"Service cannot use existing container\" do\n    set_user_from_auth :active\n    cr = create_minimal_req!\n    cr.service = true\n    cr.use_existing = true\n    cr.state = \"Committed\"\n    assert_raises(ActiveRecord::RecordInvalid) do\n      cr.save!\n    end\n  end\n\n  [\n    [true, {\"1\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [true, {\"9000\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [true, {\"65535\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    # invalid ports:\n    [false, {\"\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {\"0\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {\"1e4\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {\"-1\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {\"bad\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {\"0x101f\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {\":9000\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {\"0.0.0.0:9000\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {\"localhost:9000\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {\"9000000\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {\"65536\" => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    # not a hash:\n    [false, {\"9000\" => nil}],\n    [false, {\"9000\" => \"\"}],\n    [false, {\"9000\" => []}],\n    # missing/invalid arguments:\n    [false, {\"9000\" => {}}],\n    [false, {\"9000\" => {\"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {\"9000\" => {\"label\" => \"stuff\", \"initial_path\" => \"\", \"access\" => \"invalid\"}}],\n    [false, {\"9000\" => {\"initial_path\" => \"\", \"access\" => \"public\"}}],\n    [false, {\"9000\" => {\"initial_path\" => \"\", \"access\" => \"public\", \"label\" => \"\"}}],\n    [false, {\"9000\" => {\"label\" => \"stuff\", \"access\" => \"public\"}}],\n    # non-string key: (note non-string keys are all converted to\n    # strings before they hit our validations, so we can't actually\n    # test a numeric key here)\n    [false, {[\"9000\"] => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {nil => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n    [false, {false => {\"access\" => \"public\", \"label\" => \"stuff\", \"initial_path\" => \"\"}}],\n  ].each do |ok, pp_spec|\n    test \"published_ports validation for #{pp_spec}\" do\n      set_user_from_auth :active\n      cr = create_minimal_req!\n      cr.use_existing = false\n      cr.service = true\n      cr.published_ports = pp_spec\n      if ok\n        assert cr.save!\n      else\n        assert_raises(ActiveRecord::RecordInvalid) do\n          cr.save!\n        end\n      end\n    end\n  end\n\n  test \"container request in a project with trash_at in the future\" do\n    # Tests edge case where a container request is created in a\n    # project which has trash_at set in the future.\n    #\n    # A user actually encountered this in the wild, they created a\n    # temporary project to run some tests and set it expire\n    # automatically as a cleanup operation.  However, because of bug\n    # #22768, the containers were assigned priority 0.\n    #\n    # This tests that the behavior now works as intended, which is the\n    # container has nonzero priority while the project remains live,\n    # and then goes to zero once trash_at has passed.\n\n    set_user_from_auth :active\n\n    project = Group.create!(group_class: \"project\", name: \"trashed_project\", trash_at: Time.now+5.minutes)\n\n    cr = create_minimal_req!({state: \"Committed\", priority: 500, owner_uuid: project.uuid})\n\n    assert_equal 500, cr.priority\n\n    c = Container.find_by_uuid cr.container_uuid\n\n    # Nonzero priority, which means runnable, because the project\n    # isn't trashed yet\n    assert_operator c.priority, :>, 0\n\n    project.trash_at = Time.now\n    project.save!\n\n    c.reload\n\n    # Project is now trashed, so the container has zero priority,\n    # which means it won't run and will be cancelled if it was already\n    # running.\n    assert_equal 0, c.priority\n  end\n\n  test \"Skip cascading priority update when container is already finished\" do\n    initial_logger = ActiveRecord::Base.logger\n    begin\n      logs = StringIO.new\n      ActiveRecord::Base.logger = Logger.new(logs)\n\n      set_user_from_auth :active\n      cr = create_minimal_req!(priority: 5, state: ContainerRequest::Committed)\n      cr.reload\n      assert_equal ContainerRequest::Committed, cr.state\n\n      # When reusing a *queued* container, we should see log entries\n      # about doing the cascading priority update.\n      #\n      # (If we don't see them here, that means we're not reading logs\n      # properly, and their absence in the finished-container case\n      # below won't really confirm anything.)\n      logs.truncate(0)\n      cr2 = create_minimal_req!(priority: 1, state: ContainerRequest::Committed)\n      assert_equal cr.container_uuid, cr2.container_uuid\n      assert_not_nil(logs.string =~ /select_for_update_priorities/)\n      assert_not_nil(logs.string =~ /update containers set priority=/)\n      assert_not_nil(logs.string =~ /SELECT .* FROM \"containers\" .* FOR UPDATE/, \"Should have skipped 'select ... from containers ... for update'\")\n\n      run_container(cr)\n      cr.reload\n      assert_equal ContainerRequest::Final, cr.state\n\n      # When reusing a *completed* container, we should *not* see log\n      # entries about doing the cascading priority update.\n      logs = StringIO.new\n      ActiveRecord::Base.logger = Logger.new(logs)\n      cr3 = create_minimal_req!(priority: 2, state: ContainerRequest::Uncommitted)\n      cr3.update!(priority: 3, state: ContainerRequest::Committed)\n      assert_equal cr.container_uuid, cr3.container_uuid\n      assert_equal ContainerRequest::Final, cr3.state\n      # (note we don't use assert_match here, because that prints an\n      # extremely long escaped multiline string when it fails)\n      assert_nil(logs.string =~ /select_for_update_priorities/, \"Should have skipped select_for_update_priorities\")\n      assert_nil(logs.string =~ /update containers set priority=/, \"Should have skipped 'update containers set priority=...'\")\n      assert_nil(logs.string =~ /SELECT .* FROM \"containers\" .* FOR UPDATE/, \"Should have skipped 'select ... from containers ... for update'\")\n    ensure\n      ActiveRecord::Base.logger = initial_logger\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/container_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'helpers/container_test_helper'\n\nclass ContainerTest < ActiveSupport::TestCase\n  include DbCurrentTime\n  include ContainerTestHelper\n\n  DEFAULT_ATTRS = {\n    command: ['echo', 'foo'],\n    container_image: 'fa3c1a9cb6783f85f2ecda037e07b8c3+167',\n    output_path: '/tmp',\n    priority: 1,\n    runtime_constraints: {\"vcpus\" => 1, \"ram\" => 1, \"cuda\" => {\"device_count\":0, \"driver_version\": \"\", \"hardware_capability\": \"\"}},\n    mounts: {\n      \"/tmp\" => {\n        \"kind\" => \"tmp\",\n        \"capacity\" => 1000000,\n      },\n    },\n  }\n\n  REUSABLE_COMMON_ATTRS = {\n    container_image: \"9ae44d5792468c58bcf85ce7353c7027+124\",\n    cwd: \"/test\",\n    command: [\"echo\", \"hello\"],\n    output_path: \"/test\",\n    output_glob: [],\n    runtime_constraints: {\n      \"API\" => false,\n      \"keep_cache_disk\" => 0,\n      \"keep_cache_ram\" => 0,\n      \"ram\" => 12000000000,\n      \"vcpus\" => 4\n    },\n    mounts: {\n      \"/test\" => {\"kind\" => \"json\"},\n    },\n    environment: {\n      \"var\" => \"val\",\n    },\n    secret_mounts: {},\n    runtime_user_uuid: \"zzzzz-tpzed-xurymjxw79nv3jz\",\n    runtime_auth_scopes: [\"all\"],\n    scheduling_parameters: {},\n  }\n\n  REUSABLE_ATTRS_SLIM = {\n    command: [\"echo\", \"slim\"],\n    container_image: \"9ae44d5792468c58bcf85ce7353c7027+124\",\n    cwd: \"/test\",\n    environment: {},\n    mounts: {\n      \"/test\" => {\n        \"kind\" => \"tmp\",\n        \"capacity\" => 1000000,\n      },\n    },\n    output_path: \"/test\",\n    output_glob: [],\n    runtime_auth_scopes: [\"all\"],\n    runtime_constraints: {\n      \"API\" => false,\n      \"keep_cache_disk\" => 0,\n      \"keep_cache_ram\" => 0,\n      \"ram\" => 8 << 30,\n      \"vcpus\" => 4\n    },\n    runtime_user_uuid: \"zzzzz-tpzed-xurymjxw79nv3jz\",\n    secret_mounts: {},\n    scheduling_parameters: {},\n  }\n\n  def request_only attrs\n    attrs.reject {|k| [:runtime_user_uuid, :runtime_auth_scopes].include? k}\n  end\n\n  def minimal_new attrs={}\n    cr = ContainerRequest.new request_only(DEFAULT_ATTRS.merge(attrs))\n    cr.state = ContainerRequest::Committed\n    cr.save!\n    c = Container.find_by_uuid cr.container_uuid\n    assert_not_nil c\n    return c, cr\n  end\n\n  def check_illegal_updates c, bad_updates\n    bad_updates.each do |u|\n      refute c.update(u), u.inspect\n      refute c.valid?, u.inspect\n      c.reload\n    end\n  end\n\n  def check_illegal_modify c\n    check_illegal_updates c, [{command: [\"echo\", \"bar\"]},\n                              {container_image: \"arvados/apitestfixture:june10\"},\n                              {cwd: \"/tmp2\"},\n                              {environment: {\"FOO\" => \"BAR\"}},\n                              {mounts: {\"/FOO\" => {\"kind\" => \"tmp\"}}},\n                              {output_path: \"/tmp3\"},\n                              {locked_by_uuid: api_client_authorizations(:admin).uuid},\n                              {auth_uuid: api_client_authorizations(:system_user).uuid},\n                              {runtime_constraints: {\"ram\" => 1234567}}]\n  end\n\n  def check_bogus_states c\n    check_illegal_updates c, [{state: nil},\n                              {state: \"Flubber\"}]\n  end\n\n  def check_no_change_from_cancelled c\n    check_illegal_modify c\n    check_bogus_states c\n    check_illegal_updates c, [{ priority: 3 },\n                              { state: Container::Queued },\n                              { state: Container::Locked },\n                              { state: Container::Running },\n                              { state: Container::Complete }]\n  end\n\n  test \"Container create\" do\n    act_as_system_user do\n      c, _ = minimal_new(environment: {},\n                         mounts: {\"/tmp\" => {\"kind\" => \"tmp\"}},\n                         output_path: \"/tmp\",\n                         priority: 1,\n                         runtime_constraints: {\"vcpus\" => 1, \"ram\" => 1})\n\n      check_illegal_modify c\n      check_bogus_states c\n\n      c.reload\n      c.priority = 2\n      c.save!\n    end\n  end\n\n  test \"Container valid priority\" do\n    act_as_system_user do\n      c, _ = minimal_new(environment: {},\n                         mounts: {\"/tmp\" => {\"kind\" => \"tmp\"}},\n                         output_path: \"/tmp\",\n                         priority: 1,\n                         runtime_constraints: {\"vcpus\" => 1, \"ram\" => 1})\n\n      assert_raises(ActiveRecord::RecordInvalid) do\n        c.priority = -1\n        c.save!\n      end\n\n      c.priority = 0\n      c.save!\n\n      c.priority = 1\n      c.save!\n\n      c.priority = 500\n      c.save!\n\n      c.priority = 999\n      c.save!\n\n      c.priority = 1000\n      c.save!\n\n      c.priority = 1000 << 50\n      c.save!\n    end\n  end\n\n  test \"Container runtime_status data types\" do\n    set_user_from_auth :active\n    attrs = {\n      environment: {},\n      mounts: {\"/tmp\" => {\"kind\" => \"tmp\"}},\n      output_path: \"/tmp\",\n      priority: 1,\n      runtime_constraints: {\"vcpus\" => 1, \"ram\" => 1}\n    }\n    c, _ = minimal_new(attrs)\n    assert_equal c.runtime_status, {}\n    assert_equal Container::Queued, c.state\n\n    set_user_from_auth :system_user\n    c.update! state: Container::Locked\n    c.update! state: Container::Running\n\n    [\n      'error', 'errorDetail', 'warning', 'warningDetail', 'activity'\n    ].each do |k|\n      # String type is allowed\n      string_val = 'A string is accepted'\n      c.update! runtime_status: {k => string_val}\n      assert_equal string_val, c.runtime_status[k]\n\n      # Other types aren't allowed\n      [\n        42, false, [], {}, nil\n      ].each do |unallowed_val|\n        assert_raises ActiveRecord::RecordInvalid do\n          c.update! runtime_status: {k => unallowed_val}\n        end\n      end\n    end\n  end\n\n  test \"Container runtime_status updates\" do\n    set_user_from_auth :active\n    attrs = {\n      environment: {},\n      mounts: {\"/tmp\" => {\"kind\" => \"tmp\"}},\n      output_path: \"/tmp\",\n      priority: 1,\n      runtime_constraints: {\"vcpus\" => 1, \"ram\" => 1}\n    }\n    c1, _ = minimal_new(attrs)\n    assert_equal c1.runtime_status, {}\n\n    assert_equal Container::Queued, c1.state\n    assert_raises ArvadosModel::PermissionDeniedError do\n      c1.update! runtime_status: {'error' => 'Oops!'}\n    end\n\n    set_user_from_auth :system_user\n\n    # Allow updates when state = Locked\n    c1.update! state: Container::Locked\n    c1.update! runtime_status: {'error' => 'Oops!'}\n    assert c1.runtime_status.key? 'error'\n\n    # Reset when transitioning from Locked to Queued\n    c1.update! state: Container::Queued\n    assert_equal c1.runtime_status, {}\n\n    # Allow updates when state = Running\n    c1.update! state: Container::Locked\n    c1.update! state: Container::Running\n    c1.update! runtime_status: {'error' => 'Oops!'}\n    assert c1.runtime_status.key? 'error'\n\n    # Don't allow updates on other states\n    c1.update! state: Container::Complete\n    assert_raises ActiveRecord::RecordInvalid do\n      c1.update! runtime_status: {'error' => 'Some other error'}\n    end\n\n    set_user_from_auth :active\n    c2, _ = minimal_new(attrs)\n    assert_equal c2.runtime_status, {}\n    set_user_from_auth :system_user\n    c2.update! state: Container::Locked\n    c2.update! state: Container::Running\n    c2.update! state: Container::Cancelled\n    assert_raises ActiveRecord::RecordInvalid do\n      c2.update! runtime_status: {'error' => 'Oops!'}\n    end\n  end\n\n  test \"Container serialized hash attributes sorted before save\" do\n    set_user_from_auth :active\n    env = {\"C\" => \"3\", \"B\" => \"2\", \"A\" => \"1\"}\n    m = DEFAULT_ATTRS[:mounts].merge({\"/F\" => {\"kind\" => \"tmp\"}, \"/E\" => {\"kind\" => \"tmp\"}, \"/D\" => {\"kind\" => \"tmp\"}})\n    rc = {\"vcpus\" => 1, \"ram\" => 1, \"keep_cache_ram\" => 1, \"keep_cache_disk\" => 0, \"API\" => true, \"gpu\" => {\"stack\": \"\", \"device_count\":0, \"driver_version\": \"\", \"hardware_target\": [], \"vram\": 0}}\n    c, _ = minimal_new(environment: env, mounts: m, runtime_constraints: rc)\n    c.reload\n    assert_equal Container.deep_sort_hash(env).to_json, c.environment.to_json\n    assert_equal Container.deep_sort_hash(m).to_json, c.mounts.to_json\n    assert_equal Container.deep_sort_hash(rc).to_json, c.runtime_constraints.to_json\n  end\n\n  test 'deep_sort_hash on array of hashes' do\n    a = {'z' => [[{'a' => 'a', 'b' => 'b'}]]}\n    b = {'z' => [[{'b' => 'b', 'a' => 'a'}]]}\n    assert_equal Container.deep_sort_hash(a).to_json, Container.deep_sort_hash(b).to_json\n  end\n\n  test \"find_reusable method should select higher priority queued container\" do\n        Rails.configuration.Containers.LogReuseDecisions = true\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment:{\"var\" => \"queued\"}})\n    c_low_priority, _ = minimal_new(common_attrs.merge({use_existing:false, priority:1}))\n    c_high_priority, _ = minimal_new(common_attrs.merge({use_existing:false, priority:2}))\n    assert_not_equal c_low_priority.uuid, c_high_priority.uuid\n    assert_equal Container::Queued, c_low_priority.state\n    assert_equal Container::Queued, c_high_priority.state\n    reused = Container.find_reusable(common_attrs)\n    assert_not_nil reused\n    assert_equal reused.uuid, c_high_priority.uuid\n  end\n\n  test \"find_reusable method should select latest completed container\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"var\" => \"complete\"}})\n    completed_attrs = {\n      state: Container::Complete,\n      exit_code: 0,\n      log: 'ea10d51bcf88862dbcc36eb292017dfd+45',\n      output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'\n    }\n\n    c_older, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_recent, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    assert_not_equal c_older.uuid, c_recent.uuid\n\n    set_user_from_auth :system_user\n    c_older.update!({state: Container::Locked})\n    c_older.update!({state: Container::Running})\n    c_older.update!(completed_attrs)\n\n    c_recent.update!({state: Container::Locked})\n    c_recent.update!({state: Container::Running})\n    c_recent.update!(completed_attrs)\n\n    reused = Container.find_reusable(common_attrs)\n    assert_not_nil reused\n    assert_equal reused.uuid, c_older.uuid\n  end\n\n  test \"find_reusable method should select oldest completed container when inconsistent outputs exist\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"var\" => \"complete\"}, priority: 1})\n    completed_attrs = {\n      state: Container::Complete,\n      exit_code: 0,\n      log: 'ea10d51bcf88862dbcc36eb292017dfd+45',\n    }\n\n    cr = ContainerRequest.new request_only(common_attrs)\n    cr.use_existing = false\n    cr.state = ContainerRequest::Committed\n    cr.save!\n    c_output1 = Container.where(uuid: cr.container_uuid).first\n\n    cr = ContainerRequest.new request_only(common_attrs)\n    cr.use_existing = false\n    cr.state = ContainerRequest::Committed\n    cr.save!\n    c_output2 = Container.where(uuid: cr.container_uuid).first\n\n    assert_not_equal c_output1.uuid, c_output2.uuid\n\n    set_user_from_auth :system_user\n\n    out1 = '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'\n    log1 = collections(:log_collection).portable_data_hash\n    c_output1.update!({state: Container::Locked})\n    c_output1.update!({state: Container::Running})\n    c_output1.update!(completed_attrs.merge({log: log1, output: out1}))\n\n    out2 = 'fa7aeb5140e2848d39b416daeef4ffc5+45'\n    c_output2.update!({state: Container::Locked})\n    c_output2.update!({state: Container::Running})\n    c_output2.update!(completed_attrs.merge({log: log1, output: out2}))\n\n    set_user_from_auth :active\n    reused = Container.resolve(ContainerRequest.new(request_only(common_attrs)))\n    assert_equal c_output1.uuid, reused.uuid\n  end\n\n  test \"find_reusable method should select running container by start date\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"var\" => \"running\"}})\n    c_slower, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_faster_started_first, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_faster_started_second, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    # Confirm the 3 container UUIDs are different.\n    assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length\n    set_user_from_auth :system_user\n    c_slower.update!({state: Container::Locked})\n    c_slower.update!({state: Container::Running,\n                                 progress: 0.1})\n    c_faster_started_first.update!({state: Container::Locked})\n    c_faster_started_first.update!({state: Container::Running,\n                                               progress: 0.15})\n    c_faster_started_second.update!({state: Container::Locked})\n    c_faster_started_second.update!({state: Container::Running,\n                                                progress: 0.15})\n    reused = Container.find_reusable(common_attrs)\n    assert_not_nil reused\n    # Selected container is the one that started first\n    assert_equal reused.uuid, c_faster_started_first.uuid\n  end\n\n  test \"find_reusable method should select running container by progress\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"var\" => \"running2\"}})\n    c_slower, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_faster_started_first, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_faster_started_second, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    # Confirm the 3 container UUIDs are different.\n    assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length\n    set_user_from_auth :system_user\n    c_slower.update!({state: Container::Locked})\n    c_slower.update!({state: Container::Running,\n                                 progress: 0.1})\n    c_faster_started_first.update!({state: Container::Locked})\n    c_faster_started_first.update!({state: Container::Running,\n                                               progress: 0.15})\n    c_faster_started_second.update!({state: Container::Locked})\n    c_faster_started_second.update!({state: Container::Running,\n                                                progress: 0.2})\n    reused = Container.find_reusable(common_attrs)\n    assert_not_nil reused\n    # Selected container is the one with most progress done\n    assert_equal reused.uuid, c_faster_started_second.uuid\n  end\n\n  test \"find_reusable method should select non-failing running container\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"var\" => \"running2\"}})\n    c_slower, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_faster_started_first, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_faster_started_second, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    # Confirm the 3 container UUIDs are different.\n    assert_equal 3, [c_slower.uuid, c_faster_started_first.uuid, c_faster_started_second.uuid].uniq.length\n    set_user_from_auth :system_user\n    c_slower.update!({state: Container::Locked})\n    c_slower.update!({state: Container::Running,\n                                 progress: 0.1})\n    c_faster_started_first.update!({state: Container::Locked})\n    c_faster_started_first.update!({state: Container::Running,\n                                               runtime_status: {'warning' => 'This is not an error'},\n                                               progress: 0.15})\n    c_faster_started_second.update!({state: Container::Locked})\n    assert_equal 0, Container.where(\"runtime_status->'error' is not null\").count\n    c_faster_started_second.update!({state: Container::Running,\n                                                runtime_status: {'error' => 'Something bad happened'},\n                                                progress: 0.2})\n    assert_equal 1, Container.where(\"runtime_status->'error' is not null\").count\n    reused = Container.find_reusable(common_attrs)\n    assert_not_nil reused\n    # Selected the non-failing container even if it's the one with less progress done\n    assert_equal reused.uuid, c_faster_started_first.uuid\n  end\n\n  test \"find_reusable method should select locked container most likely to start sooner\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"var\" => \"locked\"}})\n    c_low_priority, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_high_priority_older, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_high_priority_newer, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    # Confirm the 3 container UUIDs are different.\n    assert_equal 3, [c_low_priority.uuid, c_high_priority_older.uuid, c_high_priority_newer.uuid].uniq.length\n    set_user_from_auth :system_user\n    c_low_priority.update!({state: Container::Locked,\n                                       priority: 1})\n    c_high_priority_older.update!({state: Container::Locked,\n                                              priority: 2})\n    c_high_priority_newer.update!({state: Container::Locked,\n                                              priority: 2})\n    reused = Container.find_reusable(common_attrs)\n    assert_not_nil reused\n    assert_equal reused.uuid, c_high_priority_older.uuid\n  end\n\n  test \"find_reusable method should select running over failed container\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"var\" => \"failed_vs_running\"}})\n    c_failed, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    assert_not_equal c_failed.uuid, c_running.uuid\n    set_user_from_auth :system_user\n    c_failed.update!({state: Container::Locked})\n    c_failed.update!({state: Container::Running})\n    c_failed.update!({state: Container::Complete,\n                                 exit_code: 42,\n                                 log: 'ea10d51bcf88862dbcc36eb292017dfd+45',\n                                 output: 'ea10d51bcf88862dbcc36eb292017dfd+45'})\n    c_running.update!({state: Container::Locked})\n    c_running.update!({state: Container::Running,\n                                  progress: 0.15})\n    reused = Container.find_reusable(common_attrs)\n    assert_not_nil reused\n    assert_equal reused.uuid, c_running.uuid\n  end\n\n  test \"find_reusable method should select complete over running container\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"var\" => \"completed_vs_running\"}})\n    c_completed, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    assert_not_equal c_completed.uuid, c_running.uuid\n    set_user_from_auth :system_user\n    c_completed.update!({state: Container::Locked})\n    c_completed.update!({state: Container::Running})\n    c_completed.update!({state: Container::Complete,\n                                    exit_code: 0,\n                                    log: 'ea10d51bcf88862dbcc36eb292017dfd+45',\n                                    output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'})\n    c_running.update!({state: Container::Locked})\n    c_running.update!({state: Container::Running,\n                                  progress: 0.15})\n    reused = Container.find_reusable(common_attrs)\n    assert_not_nil reused\n    assert_equal c_completed.uuid, reused.uuid\n  end\n\n  test \"find_reusable method should select running over locked container\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"var\" => \"running_vs_locked\"}})\n    c_locked, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_running, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    assert_not_equal c_running.uuid, c_locked.uuid\n    set_user_from_auth :system_user\n    c_locked.update!({state: Container::Locked})\n    c_running.update!({state: Container::Locked})\n    c_running.update!({state: Container::Running,\n                                  progress: 0.15})\n    reused = Container.find_reusable(common_attrs)\n    assert_not_nil reused\n    assert_equal reused.uuid, c_running.uuid\n  end\n\n  test \"find_reusable method should select locked over queued container\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"var\" => \"running_vs_locked\"}})\n    c_locked, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    c_queued, _ = minimal_new(common_attrs.merge({use_existing: false}))\n    assert_not_equal c_queued.uuid, c_locked.uuid\n    set_user_from_auth :system_user\n    c_locked.update!({state: Container::Locked})\n    reused = Container.find_reusable(common_attrs)\n    assert_not_nil reused\n    assert_equal reused.uuid, c_locked.uuid\n  end\n\n  test \"find_reusable method should not select failed container\" do\n    set_user_from_auth :active\n    attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"var\" => \"failed\"}})\n    c, _ = minimal_new(attrs)\n    set_user_from_auth :system_user\n    c.update!({state: Container::Locked})\n    c.update!({state: Container::Running})\n    c.update!({state: Container::Complete,\n                          exit_code: 33})\n    reused = Container.find_reusable(attrs)\n    assert_nil reused\n  end\n\n  [[false, false, true],\n   [false, true, true],\n   [true, false, false],\n   [true, true, true]\n  ].each do |c1_preemptible, c2_preemptible, should_reuse|\n    [[Container::Queued, 1],\n     [Container::Locked, 1],\n     [Container::Running, 0],   # not cancelled yet, but obviously will be soon\n    ].each do |c1_state, c1_priority|\n      test \"find_reusable for #{c2_preemptible ? '' : 'non-'}preemptible req should #{should_reuse ? '' : 'not'} reuse a #{c1_state} #{c1_preemptible ? '' : 'non-'}preemptible container with priority #{c1_priority}\" do\n        configure_preemptible_instance_type\n        set_user_from_auth :active\n        c1_attrs = REUSABLE_COMMON_ATTRS.merge({environment: {\"test\" => name, \"state\" => c1_state}, scheduling_parameters: {\"preemptible\" => c1_preemptible}})\n        c1, _ = minimal_new(c1_attrs)\n        set_user_from_auth :system_user\n        c1.update!({state: Container::Locked}) if c1_state != Container::Queued\n        c1.update!({state: Container::Running, priority: c1_priority}) if c1_state == Container::Running\n        c2_attrs = c1_attrs.merge({scheduling_parameters: {\"preemptible\" => c2_preemptible}})\n        reused = Container.find_reusable(c2_attrs)\n        if should_reuse && c1_priority > 0\n          assert_not_nil reused\n        else\n          assert_nil reused\n        end\n      end\n    end\n  end\n\n  test \"find_reusable with logging disabled\" do\n    set_user_from_auth :active\n    Rails.logger.expects(:info).never\n    Container.find_reusable(REUSABLE_COMMON_ATTRS)\n  end\n\n  test \"find_reusable with logging enabled\" do\n    set_user_from_auth :active\n    Rails.configuration.Containers.LogReuseDecisions = true\n    Rails.logger.expects(:info).at_least(3)\n    Container.find_reusable(REUSABLE_COMMON_ATTRS)\n  end\n\n  def runtime_token_attr tok\n    auth = api_client_authorizations(tok)\n    {runtime_user_uuid: User.find_by_id(auth.user_id).uuid,\n     runtime_auth_scopes: auth.scopes,\n     runtime_token: auth.token}\n  end\n\n  test \"find_reusable method with same runtime_token\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{\"var\" => \"queued\"}})\n    c1, _ = minimal_new(common_attrs.merge({runtime_token: api_client_authorizations(:container_runtime_token).token}))\n    assert_equal Container::Queued, c1.state\n    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))\n    assert_not_nil reused\n    assert_equal reused.uuid, c1.uuid\n  end\n\n  test \"find_reusable method with different runtime_token, same user\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{\"var\" => \"queued\"}})\n    c1, _ = minimal_new(common_attrs.merge({runtime_token: api_client_authorizations(:crt_user).token}))\n    assert_equal Container::Queued, c1.state\n    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))\n    assert_not_nil reused\n    assert_equal reused.uuid, c1.uuid\n  end\n\n  test \"find_reusable method with nil runtime_token, then runtime_token with same user\" do\n    set_user_from_auth :crt_user\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{\"var\" => \"queued\"}})\n    c1, _ = minimal_new(common_attrs)\n    assert_equal Container::Queued, c1.state\n    assert_equal users(:container_runtime_token_user).uuid, c1.runtime_user_uuid\n    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))\n    assert_not_nil reused\n    assert_equal reused.uuid, c1.uuid\n  end\n\n  test \"find_reusable method with different runtime_token, different user\" do\n    set_user_from_auth :crt_user\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{\"var\" => \"queued\"}})\n    c1, _ = minimal_new(common_attrs.merge({runtime_token: api_client_authorizations(:active).token}))\n    assert_equal Container::Queued, c1.state\n    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))\n    # See #14584\n    assert_not_nil reused\n    assert_equal c1.uuid, reused.uuid\n  end\n\n  test \"find_reusable method with nil runtime_token, then runtime_token with different user\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{\"var\" => \"queued\"}})\n    c1, _ = minimal_new(common_attrs.merge({runtime_token: nil}))\n    assert_equal Container::Queued, c1.state\n    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))\n    # See #14584\n    assert_not_nil reused\n    assert_equal c1.uuid, reused.uuid\n  end\n\n  test \"find_reusable method with different runtime_token, different scope, same user\" do\n    set_user_from_auth :active\n    common_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{\"var\" => \"queued\"}})\n    c1, _ = minimal_new(common_attrs.merge({runtime_token: api_client_authorizations(:runtime_token_limited_scope).token}))\n    assert_equal Container::Queued, c1.state\n    reused = Container.find_reusable(common_attrs.merge(runtime_token_attr(:container_runtime_token)))\n    # See #14584\n    assert_not_nil reused\n    assert_equal c1.uuid, reused.uuid\n  end\n\n  test \"find_reusable method with cuda\" do\n    set_user_from_auth :active\n    # No cuda\n    no_cuda_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{\"var\" => \"queued\"},\n                                                runtime_constraints: {\"vcpus\" => 1, \"ram\" => 1, \"keep_cache_disk\"=>0, \"keep_cache_ram\"=>268435456, \"API\" => false,\n                                                                      \"cuda\" => {\"device_count\" => 0, \"driver_version\" => \"\", \"hardware_capability\" => \"\"}},})\n    c1, _ = minimal_new(no_cuda_attrs)\n    assert_equal Container::Queued, c1.state\n\n    # has cuda\n    cuda_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{\"var\" => \"queued\"},\n                                                runtime_constraints: {\"vcpus\" => 1, \"ram\" => 1, \"keep_cache_disk\"=>0, \"keep_cache_ram\"=>268435456, \"API\" => false,\n                                                                      \"cuda\" => {\"device_count\" => 1, \"driver_version\" => \"11.0\", \"hardware_capability\" => \"9.0\"}},})\n    c2, _ = minimal_new(cuda_attrs)\n    assert_equal Container::Queued, c2.state\n\n    no_cuda_attrs[:runtime_constraints] = Container.resolve_runtime_constraints(no_cuda_attrs[:runtime_constraints])\n    cuda_attrs[:runtime_constraints] = Container.resolve_runtime_constraints(cuda_attrs[:runtime_constraints])\n\n    # should find the no cuda one\n    reused = Container.find_reusable(no_cuda_attrs)\n    assert_not_nil reused\n    assert_equal reused.uuid, c1.uuid\n\n    # should find the cuda one\n    reused = Container.find_reusable(cuda_attrs)\n    assert_not_nil reused\n    assert_equal reused.uuid, c2.uuid\n  end\n\n  test \"find_reusable with legacy cuda\" do\n    set_user_from_auth :active\n\n    fixture = containers(:legacy_cuda_container)\n    req = ContainerRequest.create!(\n      command: fixture.command,\n      cwd: fixture.cwd,\n      environment: fixture.environment,\n      output_path: fixture.output_path,\n      output_glob: fixture.output_glob,\n      container_image: fixture.container_image,\n      mounts: fixture.mounts,\n      runtime_constraints: {\n        \"cuda\" => {\n          \"device_count\" => 1,\n          \"driver_version\" => \"11.0\",\n          \"hardware_capability\" => \"9.0\",\n        },\n        \"ram\" => 12000000000,\n        \"vcpus\" => 4,\n      },\n      scheduling_parameters: fixture.scheduling_parameters,\n      secret_mounts: fixture.secret_mounts,\n    )\n\n    Rails.configuration.Containers.LogReuseDecisions = true\n    # should find the gpu one\n    ctr = Container.resolve(req)\n    assert_equal ctr.uuid, containers(:legacy_cuda_container).uuid\n  end\n\n  test \"find_reusable method with gpu\" do\n    set_user_from_auth :active\n    # No gpu\n    no_gpu_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{\"var\" => \"queued\"},\n                                                runtime_constraints: {\"vcpus\" => 1, \"ram\" => 1, \"keep_cache_disk\"=>0, \"keep_cache_ram\"=>268435456, \"API\" => false,\n                                                                      \"gpu\" => {\"device_count\" => 0, \"driver_version\" => \"\",\n                                                                                \"hardware_target\" => [], \"stack\" => \"\", \"vram\" => 0}},})\n    c1, _ = minimal_new(no_gpu_attrs)\n    assert_equal Container::Queued, c1.state\n\n    # wants gpu\n    gpu_attrs = REUSABLE_COMMON_ATTRS.merge({use_existing:false, priority:1, environment:{\"var\" => \"queued\"},\n                                                runtime_constraints: {\"vcpus\" => 1, \"ram\" => 1, \"keep_cache_disk\"=>0, \"keep_cache_ram\"=>268435456, \"API\" => false,\n                                                                      \"gpu\" => {\"device_count\" => 1, \"driver_version\" => \"11.0\",\n                                                                                \"hardware_target\" => [\"9.0\"], \"stack\" => \"cuda\",\n                                                                                \"vram\" => 2000000000}},})\n    c2, _ = minimal_new(gpu_attrs)\n    assert_equal Container::Queued, c2.state\n\n    no_gpu_attrs[:runtime_constraints] = Container.resolve_runtime_constraints(no_gpu_attrs[:runtime_constraints])\n    gpu_attrs[:runtime_constraints] = Container.resolve_runtime_constraints(gpu_attrs[:runtime_constraints])\n\n    # should find the no gpu one\n    reused = Container.find_reusable(no_gpu_attrs)\n    assert_not_nil reused\n    assert_equal reused.uuid, c1.uuid\n\n    # should find the gpu one\n    reused = Container.find_reusable(gpu_attrs)\n    assert_not_nil reused\n    assert_equal reused.uuid, c2.uuid\n  end\n\n  test \"Container running\" do\n    set_user_from_auth :active\n    c, _ = minimal_new priority: 1\n\n    set_user_from_auth :system_user\n    check_illegal_updates c, [{state: Container::Running},\n                              {state: Container::Complete}]\n\n    c.lock\n    c.update! state: Container::Running\n\n    check_illegal_modify c\n    check_bogus_states c\n\n    check_illegal_updates c, [{state: Container::Queued}]\n    c.reload\n\n    c.update! priority: 3\n  end\n\n  test \"Lock and unlock\" do\n    set_user_from_auth :active\n    c, cr = minimal_new priority: 0\n\n    set_user_from_auth :system_user\n    assert_equal Container::Queued, c.state\n\n    assert_raise(ArvadosModel::LockFailedError) do\n      # \"no priority\"\n      c.lock\n    end\n    c.reload\n    assert cr.update priority: 1\n\n    refute c.update(state: Container::Running), \"not locked\"\n    c.reload\n    refute c.update(state: Container::Complete), \"not locked\"\n    c.reload\n\n    assert c.lock, show_errors(c)\n    assert c.locked_by_uuid\n    assert c.auth_uuid\n\n    assert_raise(ArvadosModel::LockFailedError) {c.lock}\n    c.reload\n\n    assert c.unlock, show_errors(c)\n    refute c.locked_by_uuid\n    refute c.auth_uuid\n\n    refute c.update(state: Container::Running), \"not locked\"\n    c.reload\n    refute c.locked_by_uuid\n    refute c.auth_uuid\n\n    assert c.lock, show_errors(c)\n    assert c.update(state: Container::Running), show_errors(c)\n    assert c.locked_by_uuid\n    assert c.auth_uuid\n\n    auth_uuid_was = c.auth_uuid\n\n    assert_raise(ArvadosModel::LockFailedError) do\n      # Running to Locked is not allowed\n      c.lock\n    end\n    c.reload\n    assert_raise(ArvadosModel::InvalidStateTransitionError) do\n      # Running to Queued is not allowed\n      c.unlock\n    end\n    c.reload\n\n    assert c.update(state: Container::Complete), show_errors(c)\n    refute c.locked_by_uuid\n    refute c.auth_uuid\n\n    auth_exp = ApiClientAuthorization.find_by_uuid(auth_uuid_was).expires_at\n    assert_operator auth_exp, :<, db_current_time\n\n    assert_nil ApiClientAuthorization.validate(token: ApiClientAuthorization.find_by_uuid(auth_uuid_was).token)\n  end\n\n  test \"Exceed maximum lock-unlock cycles\" do\n    Rails.configuration.Containers.MaxDispatchAttempts = 3\n\n    set_user_from_auth :active\n    c, cr = minimal_new\n\n    set_user_from_auth :system_user\n    assert_equal Container::Queued, c.state\n    assert_equal 0, c.lock_count\n\n    c.lock\n    c.reload\n    assert_equal 1, c.lock_count\n    assert_equal Container::Locked, c.state\n\n    c.unlock\n    c.reload\n    assert_equal 1, c.lock_count\n    assert_equal Container::Queued, c.state\n\n    c.lock\n    c.reload\n    assert_equal 2, c.lock_count\n    assert_equal Container::Locked, c.state\n\n    c.unlock\n    c.reload\n    assert_equal 2, c.lock_count\n    assert_equal Container::Queued, c.state\n\n    c.lock\n    c.reload\n    assert_equal 3, c.lock_count\n    assert_equal Container::Locked, c.state\n\n    c.unlock\n    c.reload\n    assert_equal 3, c.lock_count\n    assert_equal Container::Cancelled, c.state\n\n    assert_raise(ArvadosModel::LockFailedError) do\n      # Cancelled to Locked is not allowed\n      c.lock\n    end\n  end\n\n  test \"Container queued cancel\" do\n    set_user_from_auth :active\n    c, cr = minimal_new({container_count_max: 1})\n    set_user_from_auth :system_user\n    assert c.update(state: Container::Cancelled), show_errors(c)\n    check_no_change_from_cancelled c\n    cr.reload\n    assert_equal ContainerRequest::Final, cr.state\n  end\n\n  test \"Container queued count\" do\n    assert_equal 1, Container.readable_by(users(:active)).where(state: \"Queued\").count\n  end\n\n  test \"Containers with no matching request are readable by admin\" do\n    uuids = Container.includes('container_requests').where(container_requests: {uuid: nil}).collect(&:uuid)\n    assert_not_empty uuids\n    assert_empty Container.readable_by(users(:active)).where(uuid: uuids)\n    assert_not_empty Container.readable_by(users(:admin)).where(uuid: uuids)\n    assert_equal uuids.count, Container.readable_by(users(:admin)).where(uuid: uuids).count\n  end\n\n  test \"Container locked cancel\" do\n    set_user_from_auth :active\n    c, _ = minimal_new\n    set_user_from_auth :system_user\n    assert c.lock, show_errors(c)\n    assert c.update(state: Container::Cancelled), show_errors(c)\n    check_no_change_from_cancelled c\n  end\n\n  test \"Container locked with non-expiring token\" do\n    Rails.configuration.API.TokenMaxLifetime = 1.hour\n    set_user_from_auth :active\n    c, _ = minimal_new\n    set_user_from_auth :system_user\n    assert c.lock, show_errors(c)\n    refute c.auth.nil?\n    assert c.auth.expires_at.nil?\n    assert c.auth.user_id == User.find_by_uuid(users(:active).uuid).id\n  end\n\n  test \"Container locked cancel with log\" do\n    set_user_from_auth :active\n    c, _ = minimal_new\n    set_user_from_auth :system_user\n    assert c.lock, show_errors(c)\n    assert c.update(\n             state: Container::Cancelled,\n             log: collections(:log_collection).portable_data_hash,\n           ), show_errors(c)\n    check_no_change_from_cancelled c\n  end\n\n  test \"Container running cancel\" do\n    set_user_from_auth :active\n    c, _ = minimal_new\n    set_user_from_auth :system_user\n    c.lock\n    c.update! state: Container::Running\n    c.update! state: Container::Cancelled\n    check_no_change_from_cancelled c\n  end\n\n  test \"Container create forbidden for non-admin\" do\n    set_user_from_auth :active_trustedclient\n    c = Container.new DEFAULT_ATTRS\n    c.environment = {}\n    c.mounts = {\"BAR\" => \"FOO\"}\n    c.output_path = \"/tmp\"\n    c.priority = 1\n    c.runtime_constraints = {}\n    assert_raises(ArvadosModel::PermissionDeniedError) do\n      c.save!\n    end\n  end\n\n  [\n    [Container::Queued, {state: Container::Locked}],\n    [Container::Queued, {state: Container::Running}],\n    [Container::Queued, {state: Container::Complete}],\n    [Container::Queued, {state: Container::Cancelled}],\n    [Container::Queued, {priority: 123456789}],\n    [Container::Queued, {runtime_status: {'error' => 'oops'}}],\n    [Container::Queued, {cwd: '/'}],\n    [Container::Locked, {state: Container::Running}],\n    [Container::Locked, {state: Container::Queued}],\n    [Container::Locked, {priority: 123456789}],\n    [Container::Locked, {runtime_status: {'error' => 'oops'}}],\n    [Container::Locked, {cwd: '/'}],\n    [Container::Running, {state: Container::Complete}],\n    [Container::Running, {state: Container::Cancelled}],\n    [Container::Running, {priority: 123456789}],\n    [Container::Running, {runtime_status: {'error' => 'oops'}}],\n    [Container::Running, {cwd: '/'}],\n    [Container::Running, {gateway_address: \"172.16.0.1:12345\"}],\n    [Container::Running, {interactive_session_started: true}],\n    [Container::Complete, {state: Container::Cancelled}],\n    [Container::Complete, {priority: 123456789}],\n    [Container::Complete, {runtime_status: {'error' => 'oops'}}],\n    [Container::Complete, {cwd: '/'}],\n    [Container::Cancelled, {cwd: '/'}],\n  ].each do |start_state, updates|\n    test \"Container update #{updates.inspect} when #{start_state} forbidden for non-admin\" do\n      set_user_from_auth :active\n      c, _ = minimal_new\n      if start_state != Container::Queued\n        set_user_from_auth :system_user\n        c.lock\n        if start_state != Container::Locked\n          c.update! state: Container::Running\n          if start_state != Container::Running\n            c.update! state: start_state\n          end\n        end\n      end\n      assert_equal c.state, start_state\n      set_user_from_auth :active\n      assert_raises(ArvadosModel::PermissionDeniedError) do\n        c.update! updates\n      end\n    end\n  end\n\n  test \"can only change exit code while running and at completion\" do\n    set_user_from_auth :active\n    c, _ = minimal_new\n    set_user_from_auth :system_user\n    c.lock\n    check_illegal_updates c, [{exit_code: 1}]\n    c.update! state: Container::Running\n    assert c.update(exit_code: 1)\n    assert c.update(exit_code: 1, state: Container::Complete)\n  end\n\n  test \"locked_by_uuid can update log when locked/running, and output when running\" do\n    set_user_from_auth :active\n    logcoll = collections(:container_log_collection)\n    c, cr1 = minimal_new\n    cr2 = ContainerRequest.new(DEFAULT_ATTRS)\n    cr2.state = ContainerRequest::Committed\n    act_as_user users(:active) do\n      cr2.save!\n    end\n    assert_equal cr1.container_uuid, cr2.container_uuid\n\n    logpdh_time1 = logcoll.portable_data_hash\n\n    set_user_from_auth :system_user\n    c.lock\n    assert_equal c.locked_by_uuid, Thread.current[:api_client_authorization].uuid\n    c.update!(log: logpdh_time1)\n    c.update!(state: Container::Running)\n    cr1.reload\n    cr2.reload\n    cr1log_uuid = cr1.log_uuid\n    cr2log_uuid = cr2.log_uuid\n    assert_not_nil cr1log_uuid\n    assert_not_nil cr2log_uuid\n    assert_not_equal logcoll.uuid, cr1log_uuid\n    assert_not_equal logcoll.uuid, cr2log_uuid\n    assert_not_equal cr1log_uuid, cr2log_uuid\n\n    logcoll.update!(manifest_text: logcoll.manifest_text + \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo.txt\\n\")\n    logpdh_time2 = logcoll.portable_data_hash\n\n    assert c.update(output: collections(:collection_owned_by_active).portable_data_hash)\n    assert c.update(log: logpdh_time2)\n    assert c.update(state: Container::Complete, log: logcoll.portable_data_hash)\n    c.reload\n    assert_equal collections(:collection_owned_by_active).portable_data_hash, c.output\n    assert_equal logpdh_time2, c.log\n    refute c.update(output: nil)\n    refute c.update(log: nil)\n    cr1.reload\n    cr2.reload\n    assert_equal cr1log_uuid, cr1.log_uuid\n    assert_equal cr2log_uuid, cr2.log_uuid\n    assert_equal 1, Collection.where(uuid: [cr1log_uuid, cr2log_uuid]).to_a.collect(&:portable_data_hash).uniq.length\n    assert_equal \". 8c12f5f5297b7337598170c6f531fcee+7882 acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 7882:3:foo.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n./log\\\\040for\\\\040container\\\\040#{cr1.container_uuid} 8c12f5f5297b7337598170c6f531fcee+7882 acbd18db4cc2f85cedef654fccc4a4d8+3 0:0:arv-mount.txt 0:1910:container.json 1910:1264:crunch-run.txt 3174:1005:crunchstat.txt 7882:3:foo.txt 4179:659:hoststat.txt 4838:2811:node-info.txt 7649:233:node.json 0:0:stderr.txt\n\", Collection.find_by_uuid(cr1log_uuid).manifest_text\n  end\n\n  [\"auth_uuid\", \"runtime_token\"].each do |tok|\n    test \"#{tok} can set output, progress, runtime_status, state, exit_code on running container -- but not log\" do\n      if tok == \"runtime_token\"\n        set_user_from_auth :spectator\n        c, _ = minimal_new(container_image: \"9ae44d5792468c58bcf85ce7353c7027+124\",\n                           runtime_token: api_client_authorizations(:active).token)\n      else\n        set_user_from_auth :active\n        c, _ = minimal_new\n      end\n      set_user_from_auth :system_user\n      c.lock\n      c.update! state: Container::Running\n\n      if tok == \"runtime_token\"\n        auth = ApiClientAuthorization.validate(token: c.runtime_token)\n        Thread.current[:api_client_authorization] = auth\n        Thread.current[:token] = auth.token\n        Thread.current[:user] = auth.user\n      else\n        auth = ApiClientAuthorization.find_by_uuid(c.auth_uuid)\n        Thread.current[:api_client_authorization] = auth\n        Thread.current[:token] = auth.token\n        Thread.current[:user] = auth.user\n      end\n\n      assert c.update(gateway_address: \"127.0.0.1:9\")\n      assert c.update(output: collections(:collection_owned_by_active).portable_data_hash)\n      assert c.update(runtime_status: {'warning' => 'something happened'})\n      assert c.update(progress: 0.5)\n      assert c.update(exit_code: 0)\n      refute c.update(log: collections(:log_collection).portable_data_hash)\n      c.reload\n      assert c.update(state: Container::Complete, exit_code: 0)\n    end\n  end\n\n  test \"not allowed to set output that is not readable by current user\" do\n    set_user_from_auth :active\n    c, _ = minimal_new\n    set_user_from_auth :system_user\n    c.lock\n    c.update! state: Container::Running\n\n    Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(c.auth_uuid)\n    Thread.current[:user] = User.find_by_id(Thread.current[:api_client_authorization].user_id)\n\n    assert_raises ActiveRecord::RecordInvalid do\n      c.update! output: collections(:collection_not_readable_by_active).portable_data_hash\n    end\n  end\n\n  test \"other token cannot set output on running container\" do\n    set_user_from_auth :active\n    c, _ = minimal_new\n    set_user_from_auth :system_user\n    c.lock\n    c.update! state: Container::Running\n\n    set_user_from_auth :running_to_be_deleted_container_auth\n    assert_raises(ArvadosModel::PermissionDeniedError) do\n      c.update(output: collections(:foo_file).portable_data_hash)\n    end\n  end\n\n  test \"can set trashed output on running container\" do\n    set_user_from_auth :active\n    c, _ = minimal_new\n    set_user_from_auth :system_user\n    c.lock\n    c.update! state: Container::Running\n\n    output = Collection.find_by_uuid('zzzzz-4zz18-mto52zx1s7sn3jk')\n\n    assert output.is_trashed\n    assert c.update output: output.portable_data_hash\n    assert c.update! state: Container::Complete\n  end\n\n  test \"not allowed to set trashed output that is not readable by current user\" do\n    set_user_from_auth :active\n    c, _ = minimal_new\n    set_user_from_auth :system_user\n    c.lock\n    c.update! state: Container::Running\n\n    output = Collection.find_by_uuid('zzzzz-4zz18-mto52zx1s7sn3jr')\n\n    Thread.current[:api_client_authorization] = ApiClientAuthorization.find_by_uuid(c.auth_uuid)\n    Thread.current[:user] = User.find_by_id(Thread.current[:api_client_authorization].user_id)\n\n    assert_raises ActiveRecord::RecordInvalid do\n      c.update! output: output.portable_data_hash\n    end\n  end\n\n  test \"user cannot delete\" do\n    set_user_from_auth :active\n    c, _ = minimal_new\n    assert_raises ArvadosModel::PermissionDeniedError do\n      c.destroy\n    end\n    assert Container.find_by_uuid(c.uuid)\n  end\n\n  [\n    {state: Container::Complete, exit_code: 0, output: '1f4b0bc7583c2a7f9102c395f4ffc5e3+45'},\n    {state: Container::Cancelled},\n  ].each do |final_attrs|\n    test \"secret_mounts and runtime_token are null after container is #{final_attrs[:state]}\" do\n      set_user_from_auth :active\n      c, cr = minimal_new(secret_mounts: {'/secret' => {'kind' => 'text', 'content' => 'foo'}},\n                          container_count_max: 1, runtime_token: api_client_authorizations(:active).token)\n      set_user_from_auth :system_user\n      c.lock\n      c.update!(state: Container::Running)\n      c.reload\n      assert c.secret_mounts.has_key?('/secret')\n      assert_equal api_client_authorizations(:active).token, c.runtime_token\n\n      c.update!(final_attrs)\n      c.reload\n      assert_equal({}, c.secret_mounts)\n      assert_nil c.runtime_token\n      cr.reload\n      assert_equal({}, cr.secret_mounts)\n      assert_nil cr.runtime_token\n      assert_no_secrets_logged\n    end\n  end\n\n  def configure_preemptible_instance_type\n    Rails.configuration.InstanceTypes = ConfigLoader.to_OrderedOptions({\n      \"a1.small.pre\" => {\n        \"Preemptible\" => true,\n        \"Price\" => 0.1,\n        \"ProviderType\" => \"a1.small\",\n        \"VCPUs\" => 1,\n        \"RAM\" => 1000000000,\n      },\n    })\n  end\n\n  def vary_parameters(**kwargs)\n    # kwargs is a hash that maps parameters to an array of values.\n    # This function enumerates every possible hash where each key has one of\n    # the values from its array.\n    # The output keys are strings since that's what container hash attributes\n    # want.\n    # A nil value yields a hash without that key.\n    [[:_, nil]].product(\n      *kwargs.map { |(key, values)| [key.to_s].product(values) },\n    ).map { |param_pairs| Hash[param_pairs].compact }\n  end\n\n  def retry_with_scheduling_parameters(param_hashes)\n    set_user_from_auth :admin\n    containers = {}\n    requests = []\n    param_hashes.each do |scheduling_parameters|\n      container, request = minimal_new(scheduling_parameters: scheduling_parameters)\n      containers[container.uuid] = container\n      requests << request\n    end\n    refute(containers.empty?, \"buggy test: no scheduling parameters enumerated\")\n    assert_equal(1, containers.length)\n    _, container1 = containers.shift\n    container1.lock\n    container1.update!(state: Container::Cancelled)\n    container1.reload\n    request1 = requests.shift\n    request1.reload\n    assert_not_equal(container1.uuid, request1.container_uuid)\n    requests.each do |request|\n      request.reload\n      assert_equal(request1.container_uuid, request.container_uuid)\n    end\n    container2 = Container.find_by_uuid(request1.container_uuid)\n    assert_not_nil(container2)\n    return container2\n  end\n\n  preemptible_values = [true, false, nil]\n  preemptible_values.permutation(1).chain(\n    preemptible_values.product(preemptible_values),\n    preemptible_values.product(preemptible_values, preemptible_values),\n  ).each do |preemptible_a|\n    # If the first req has preemptible=true but a subsequent req\n    # doesn't, we want to avoid reusing the first container, so this\n    # test isn't appropriate.\n    next if preemptible_a[0] &&\n            ((preemptible_a.length > 1 && !preemptible_a[1]) ||\n             (preemptible_a.length > 2 && !preemptible_a[2]))\n    test \"retry requests scheduled with preemptible=#{preemptible_a}\" do\n      configure_preemptible_instance_type\n      param_hashes = vary_parameters(preemptible: preemptible_a)\n      container = retry_with_scheduling_parameters(param_hashes)\n      assert_equal(preemptible_a.all?,\n                   container.scheduling_parameters[\"preemptible\"] || false)\n    end\n  end\n\n  partition_values = [nil, [], [\"alpha\"], [\"alpha\", \"bravo\"], [\"bravo\", \"charlie\"]]\n  partition_values.permutation(1).chain(\n    partition_values.permutation(2),\n  ).each do |partitions_a|\n    test \"retry requests scheduled with partitions=#{partitions_a}\" do\n      param_hashes = vary_parameters(partitions: partitions_a)\n      container = retry_with_scheduling_parameters(param_hashes)\n      expected = if partitions_a.any? { |value| value.nil? or value.empty? }\n                   []\n                 else\n                   partitions_a.flatten.uniq\n                 end\n      actual = container.scheduling_parameters[\"partitions\"] || []\n      assert_equal(expected.sort, actual.sort)\n    end\n  end\n\n  runtime_values = [nil, 0, 1, 2, 3]\n  runtime_values.permutation(1).chain(\n    runtime_values.permutation(2),\n    runtime_values.permutation(3),\n  ).each do |max_run_time_a|\n    test \"retry requests scheduled with max_run_time=#{max_run_time_a}\" do\n      param_hashes = vary_parameters(max_run_time: max_run_time_a)\n      container = retry_with_scheduling_parameters(param_hashes)\n      expected = if max_run_time_a.any? { |value| value.nil? or value == 0 }\n                   0\n                 else\n                   max_run_time_a.max\n                 end\n      actual = container.scheduling_parameters[\"max_run_time\"] || 0\n      assert_equal(expected, actual)\n    end\n  end\n\n  test \"retry requests with multi-varied scheduling parameters\" do\n    configure_preemptible_instance_type\n    param_hashes = [{\n                     \"partitions\": [\"alpha\", \"bravo\"],\n                     \"preemptible\": false,\n                     \"max_run_time\": 10,\n                    }, {\n                     \"partitions\": [\"alpha\", \"charlie\"],\n                     \"max_run_time\": 20,\n                    }, {\n                     \"partitions\": [\"bravo\", \"charlie\"],\n                     \"preemptible\": true,\n                     \"max_run_time\": 30,\n                    }]\n    container = retry_with_scheduling_parameters(param_hashes)\n    actual = container.scheduling_parameters\n    assert_equal([\"alpha\", \"bravo\", \"charlie\"], actual[\"partitions\"]&.sort)\n    assert_equal(false, actual[\"preemptible\"] || false)\n    assert_equal(30, actual[\"max_run_time\"])\n  end\n\n  test \"retry requests with unset scheduling parameters\" do\n    configure_preemptible_instance_type\n    param_hashes = vary_parameters(\n      preemptible: [nil, true],\n      partitions: [nil, [\"alpha\"]],\n      max_run_time: [nil, 5],\n    )\n    container = retry_with_scheduling_parameters(param_hashes)\n    actual = container.scheduling_parameters\n    assert_equal([], actual[\"partitions\"] || [])\n    assert_equal(false, actual[\"preemptible\"] || false)\n    assert_equal(0, actual[\"max_run_time\"] || 0)\n  end\n\n  test \"retry requests with default scheduling parameters\" do\n    configure_preemptible_instance_type\n    param_hashes = vary_parameters(\n      preemptible: [false, true],\n      partitions: [[], [\"bravo\"]],\n      max_run_time: [0, 1],\n    )\n    container = retry_with_scheduling_parameters(param_hashes)\n    actual = container.scheduling_parameters\n    assert_equal([], actual[\"partitions\"] || [])\n    assert_equal(false, actual[\"preemptible\"] || false)\n    assert_equal(0, actual[\"max_run_time\"] || 0)\n  end\n\n  def run_container(request_params, final_attrs)\n    final_attrs[:state] ||= Container::Complete\n    if final_attrs[:state] == Container::Complete\n      final_attrs[:exit_code] ||= 0\n      final_attrs[:log] ||= collections(:log_collection).portable_data_hash\n      final_attrs[:output] ||= collections(:multilevel_collection_1).portable_data_hash\n    end\n    container, request = minimal_new(request_params)\n    container.lock\n    container.update!(state: Container::Running)\n    container.update!(final_attrs)\n    return container, request\n  end\n\n  def check_reuse_with_variations(default_keep_cache_ram, vary_attr, start_value, variations)\n    container_params = REUSABLE_ATTRS_SLIM.merge(vary_attr => start_value)\n    orig_default = Rails.configuration.Containers.DefaultKeepCacheRAM\n    begin\n      Rails.configuration.Containers.DefaultKeepCacheRAM = default_keep_cache_ram\n      set_user_from_auth :admin\n      expected, _ = run_container(container_params, {})\n      variations.each do |variation|\n        full_variation = REUSABLE_ATTRS_SLIM[vary_attr].merge(variation)\n        parameters = REUSABLE_ATTRS_SLIM.merge(vary_attr => full_variation)\n        actual = Container.find_reusable(parameters)\n        assert_equal(expected.uuid, actual&.uuid,\n                     \"request with #{vary_attr}=#{variation} did not reuse container\")\n      end\n    ensure\n      Rails.configuration.Containers.DefaultKeepCacheRAM = orig_default\n    end\n  end\n\n  # Test that we can reuse a container with a known keep_cache_ram constraint,\n  # no matter what keep_cache_* constraints the new request uses.\n  [0, 2 << 30, 4 << 30].product(\n    [0, 1],\n    [true, false],\n  ).each do |(default_keep_cache_ram, multiplier, keep_disk_constraint)|\n    test \"reuse request with DefaultKeepCacheRAM=#{default_keep_cache_ram}, keep_cache_ram*=#{multiplier}, keep_cache_disk=#{keep_disk_constraint}\" do\n      runtime_constraints = REUSABLE_ATTRS_SLIM[:runtime_constraints].merge(\n        \"keep_cache_ram\" => default_keep_cache_ram * multiplier,\n      )\n      if not keep_disk_constraint\n        # Simulate a container that predates keep_cache_disk by deleting\n        # the constraint entirely.\n        runtime_constraints.delete(\"keep_cache_disk\")\n      end\n      # Important values are:\n      # * 0\n      # * 2GiB, the minimum default keep_cache_disk\n      # * 8GiB, the default keep_cache_disk based on container ram\n      # * 32GiB, the maximum default keep_cache_disk\n      # Check these values and values in between.\n      vary_values = [0, 1, 2, 6, 8, 10, 32, 33].map { |v| v << 30 }.to_a\n      variations = vary_parameters(keep_cache_ram: vary_values)\n                     .chain(vary_parameters(keep_cache_disk: vary_values))\n      check_reuse_with_variations(\n        default_keep_cache_ram,\n        :runtime_constraints,\n        runtime_constraints,\n        variations,\n      )\n    end\n  end\n\n  # Test that we can reuse a container with a known keep_cache_disk constraint,\n  # no matter what keep_cache_* constraints the new request uses.\n  # keep_cache_disk values are the important values discussed in the test above.\n  [0, 2 << 30, 4 << 30]\n    .product([0, 2 << 30, 8 << 30, 32 << 30])\n    .each do |(default_keep_cache_ram, keep_cache_disk)|\n    test \"reuse request with DefaultKeepCacheRAM=#{default_keep_cache_ram} and keep_cache_disk=#{keep_cache_disk}\" do\n      runtime_constraints = REUSABLE_ATTRS_SLIM[:runtime_constraints].merge(\n        \"keep_cache_disk\" => keep_cache_disk,\n      )\n      vary_values = [0, 1, 2, 6, 8, 10, 32, 33].map { |v| v << 30 }.to_a\n      variations = vary_parameters(keep_cache_ram: vary_values)\n                     .chain(vary_parameters(keep_cache_disk: vary_values))\n      check_reuse_with_variations(\n        default_keep_cache_ram,\n        :runtime_constraints,\n        runtime_constraints,\n        variations,\n      )\n    end\n  end\n\n  # Test that a container request can reuse a container with an exactly\n  # matching keep_cache_* constraint, no matter what the defaults.\n  [0, 2 << 30, 4 << 30].product(\n    [\"keep_cache_disk\", \"keep_cache_ram\"],\n    [135790, 13 << 30, 135 << 30],\n  ).each do |(default_keep_cache_ram, constraint_key, constraint_value)|\n    test \"reuse request with #{constraint_key}=#{constraint_value} and DefaultKeepCacheRAM=#{default_keep_cache_ram}\" do\n      runtime_constraints = REUSABLE_ATTRS_SLIM[:runtime_constraints].merge(\n        constraint_key => constraint_value,\n      )\n      check_reuse_with_variations(\n        default_keep_cache_ram,\n        :runtime_constraints,\n        runtime_constraints,\n        [runtime_constraints],\n      )\n    end\n  end\n\n  test \"published_ports base_url when ExternalURL is wildcard\" do\n    Rails.configuration.Services.ContainerWebServices.ExternalURL = URI.parse(\"https://*.example.com/\")\n    set_user_from_auth :active\n    c, _ = minimal_new(\n         published_ports:\n           {\"1234\" => {\n              \"access\": \"public\",\n              \"label\": \"example\",\n              \"initial_path\": \"initial_path\"}})\n    set_user_from_auth :system_user\n    c.lock\n    c.update! state: Container::Running\n\n    c.reload\n    assert_equal \"https://#{c.uuid}-1234.example.com/\", c.published_ports[\"1234\"][\"base_url\"]\n    assert_equal \"https://#{c.uuid}-1234.example.com/initial_path\", c.published_ports[\"1234\"][\"initial_url\"]\n  end\n\n  test \"published_ports base_url when ExternalURL has port range\" do\n    Rails.configuration.Services.ContainerWebServices.ExternalURL = URI.parse(\"https://example.com/\")\n    Rails.configuration.Services.ContainerWebServices.ExternalPortMin = 2000\n    Rails.configuration.Services.ContainerWebServices.ExternalPortMax = 3000\n    set_user_from_auth :active\n    c, _ = minimal_new(\n         published_ports:\n           {\"1234\" => {\n              \"access\": \"public\",\n              \"label\": \"example\",\n              \"initial_path\": \"/initial_path\"},\n            \"9999\" => {\n              \"access\": \"private\",\n              \"label\": \"label\",\n              \"initial_path\": \"\"}})\n    set_user_from_auth :system_user\n    c.lock\n    c.update! state: Container::Running\n\n    c.reload\n    assert_equal \"https://example.com:2000/\", c.published_ports[\"1234\"][\"base_url\"]\n    assert_equal \"https://example.com:2000/initial_path\", c.published_ports[\"1234\"][\"initial_url\"]\n    assert_equal \"https://example.com:2001/\", c.published_ports[\"9999\"][\"base_url\"]\n    assert_equal \"https://example.com:2001/\", c.published_ports[\"9999\"][\"initial_url\"]\n    assert_equal [[1234,2000], [9999,2001]], assigned_ports_for_container(c.uuid)\n\n    c.update! state: Container::Cancelled\n\n    assert_equal [], assigned_ports_for_container(c.uuid)\n  end\n\n  def assigned_ports_for_container(uuid)\n    ActiveRecord::Base.connection.exec_query(\n      'select * from container_ports where container_uuid=$1',\n      '',\n      [uuid]).map do |row|\n      [row['container_port'], row['external_port']]\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/credential_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire \"test_helper\"\n\nclass CredentialTest < ActiveSupport::TestCase\n  setup do\n    @valid_attrs = {\n      name: \"My Credential\",\n      description: \"Test credential\",\n      credential_class: \"basic_auth\",\n      external_id: \"user123\",\n      secret: \"secret_value\",\n      expires_at: Time.now + 1.week\n    }\n  end\n\n  test \"credential requires required fields\" do\n    [:name, :credential_class, :external_id, :secret, :expires_at].each do |field|\n      attrs = @valid_attrs.dup\n      good_credential = Credential.new(attrs)\n      assert good_credential.valid?\n      attrs[field] = nil\n      bad_credential = Credential.new(attrs)\n      assert_not bad_credential.valid?\n    end\n  end\n\n  test \"credential name must not be empty or only spaces/tabs\" do\n  attrs = @valid_attrs.dup\n\n  [\"\", \"   \", \"\\t\\t\"].each do |bad_name|\n    credential = Credential.new(attrs.merge(name: bad_name))\n    assert_not credential.valid?\n    assert_includes credential.errors.full_messages, \"Name can't be blank\"\n  end\n\n  credential = Credential.new(attrs.merge(name: \"  valid name  \"))\n  assert credential.valid?\nend\n\n\n  test \"credential scopes defaults to empty array\" do\n    credential = Credential.new(@valid_attrs)\n    assert_equal [], credential.scopes\n  end\n\n  test \"credential scopes array must only contain strings\" do\n    credential = Credential.new(@valid_attrs)\n    assert credential.valid?\n\n    credential.scopes = []\n    assert credential.valid?\n\n    credential.scopes = [\"foo\", \"bar\"]\n    assert credential.valid?\n\n    credential.scopes = [\"foo\", 1]\n    assert_not credential.valid?\n\n    credential.scopes = [\"foo\", [\"bar\"]]\n    assert_not credential.valid?\n  end\n\n  test \"credential logged_attributes excludes secret\" do\n    credential = nil\n    act_as_system_user do\n      credential = Credential.create!(@valid_attrs.merge(owner_uuid: system_user_uuid))\n    end\n    logged_attrs = credential.logged_attributes\n    # required fields should always be logged\n    assert_includes logged_attrs, \"name\"\n    assert_includes logged_attrs, \"credential_class\"\n    assert_includes logged_attrs, \"external_id\"\n    assert_includes logged_attrs, \"expires_at\"\n\n    # secret should never be logged\n    refute logged_attrs.key?(\"secret\")\n  end\n\n  test \"credential ensure_owner_uuid_is_permitted sets owner to system_user for new record\" do\n    credential = Credential.new(@valid_attrs.merge(owner_uuid: SecureRandom.uuid))\n    system_uuid = Credential.system_user_uuid\n    assert credential.ensure_owner_uuid_is_permitted\n    assert_equal system_uuid, credential.owner_uuid\n  end\n\n  test \"credential ensure_owner_uuid_is_permitted raises if owner_uuid is not system user\" do\n    credential = nil\n    act_as_system_user do\n      credential = Credential.create!(@valid_attrs.merge(owner_uuid: system_user_uuid))\n    end\n    assert_nothing_raised do\n      credential.ensure_owner_uuid_is_permitted\n    end\n    credential.owner_uuid = \"some_other_uuid\"\n    assert_raises RuntimeError do\n      credential.ensure_owner_uuid_is_permitted\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/group_pdhs_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'group_pdhs'\n\n# NOTE: Migration 20190322174136_add_file_info_to_collection.rb\n# relies on this test. Change with caution!\nclass GroupPdhsTest < ActiveSupport::TestCase\n  test \"pdh_grouping_by_manifest_size\" do\n    batch_size_max = 200\n    pdhs_in = ['x1+30', 'x2+30', 'x3+201', 'x4+100', 'x5+100']\n    pdh_lambda = lambda { |last_pdh, &block|\n      pdhs = pdhs_in.select{|pdh| pdh > last_pdh} \n      pdhs.each do |p|\n        block.call(p)\n      end\n    }\n    batched_pdhs = []\n    GroupPdhs.group_pdhs_for_multiple_transactions(pdh_lambda, pdhs_in.size, batch_size_max, \"\") do |pdhs|\n      batched_pdhs << pdhs\n    end\n    expected = [['x1+30', 'x2+30'], ['x3+201'], ['x4+100', 'x5+100']]\n    assert_equal(batched_pdhs, expected)\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/group_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'fix_roles_projects'\n\nclass GroupTest < ActiveSupport::TestCase\n  include DbCurrentTime\n\n  test \"cannot set owner_uuid to object with existing ownership cycle\" do\n    set_user_from_auth :active_trustedclient\n\n    # First make sure we have lots of permission on the bad group by\n    # renaming it to \"{current name} is mine all mine\"\n    g = groups(:bad_group_has_ownership_cycle_b)\n    g.name += \" is mine all mine\"\n    assert g.save, \"active user should be able to modify group #{g.uuid}\"\n\n    # Use the group as the owner of a new object\n    s = Collection.\n      create(owner_uuid: groups(:bad_group_has_ownership_cycle_b).uuid)\n    assert s.valid?, \"ownership should pass validation #{s.errors.messages}\"\n    assert_equal false, s.save, \"should not save object with #{g.uuid} as owner\"\n\n    # Use the group as the new owner of an existing object\n    s = collections(:collection_owned_by_active)\n    s.owner_uuid = groups(:bad_group_has_ownership_cycle_b).uuid\n    assert s.valid?, \"ownership should pass validation\"\n    assert_equal false, s.save, \"should not save object with #{g.uuid} as owner\"\n  end\n\n  test \"cannot create a new ownership cycle\" do\n    set_user_from_auth :active_trustedclient\n\n    g_foo = Group.create!(name: \"foo\", group_class: \"project\")\n    g_bar = Group.create!(name: \"bar\", group_class: \"project\")\n\n    g_foo.owner_uuid = g_bar.uuid\n    assert g_foo.save, lambda { g_foo.errors.messages }\n    g_bar.owner_uuid = g_foo.uuid\n    assert g_bar.valid?, \"ownership cycle should not prevent validation\"\n    assert_equal false, g_bar.save, \"should not create an ownership loop\"\n    assert g_bar.errors.messages[:owner_uuid].join(\" \").match(/ownership cycle/)\n  end\n\n  test \"cannot create a single-object ownership cycle\" do\n    set_user_from_auth :active_trustedclient\n\n    g_foo = Group.create!(name: \"foo\", group_class: \"project\")\n    assert g_foo.save\n\n    # Ensure I have permission to manage this group even when its owner changes\n    perm_link = Link.create!(tail_uuid: users(:active).uuid,\n                            head_uuid: g_foo.uuid,\n                            link_class: 'permission',\n                            name: 'can_manage')\n    assert perm_link.save\n\n    g_foo.owner_uuid = g_foo.uuid\n    assert_equal false, g_foo.save, \"should not create an ownership loop\"\n    assert g_foo.errors.messages[:owner_uuid].join(\" \").match(/ownership cycle/)\n  end\n\n  test \"cannot create a group that is not a 'role' or 'project' or 'filter'\" do\n    set_user_from_auth :active_trustedclient\n\n    assert_raises(ActiveRecord::RecordInvalid) do\n      Group.create!(name: \"foo\")\n    end\n\n    assert_raises(ActiveRecord::RecordInvalid) do\n      Group.create!(name: \"foo\", group_class: \"\")\n    end\n\n    assert_raises(ActiveRecord::RecordInvalid) do\n      Group.create!(name: \"foo\", group_class: \"bogus\")\n    end\n  end\n\n  test \"cannot change group_class on an already created group\" do\n    set_user_from_auth :active_trustedclient\n    g = Group.create!(name: \"foo\", group_class: \"role\")\n    assert_raises(ActiveRecord::RecordInvalid) do\n      g.update!(group_class: \"project\")\n    end\n  end\n\n  test \"role cannot own things\" do\n    set_user_from_auth :active_trustedclient\n    role = Group.create!(name: \"foo\", group_class: \"role\")\n    assert_raises(ArvadosModel::PermissionDeniedError) do\n      Collection.create!(name: \"bzzz123\", owner_uuid: role.uuid)\n    end\n\n    c = Collection.create!(name: \"bzzz124\")\n    assert_raises(ArvadosModel::PermissionDeniedError) do\n      c.update!(owner_uuid: role.uuid)\n    end\n  end\n\n  test \"trash group hides contents\" do\n    set_user_from_auth :active_trustedclient\n\n    g_foo = Group.create!(name: \"foo\", group_class: \"project\")\n    col = Collection.create!(owner_uuid: g_foo.uuid)\n\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).any?\n    g_foo.update! is_trashed: true\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).empty?\n    assert Collection.readable_by(users(:active), {:include_trash => true}).where(uuid: col.uuid).any?\n    g_foo.update! is_trashed: false\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).any?\n  end\n\n  test \"trash group\" do\n    set_user_from_auth :active_trustedclient\n\n    g_foo = Group.create!(name: \"foo\", group_class: \"project\")\n    g_bar = Group.create!(name: \"bar\", owner_uuid: g_foo.uuid, group_class: \"project\")\n    g_baz = Group.create!(name: \"baz\", owner_uuid: g_bar.uuid, group_class: \"project\")\n\n    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?\n    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).any?\n    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).any?\n    g_foo.update! is_trashed: true\n    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).empty?\n    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).empty?\n    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?\n\n    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_foo.uuid).any?\n    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_bar.uuid).any?\n    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_baz.uuid).any?\n  end\n\n\n  test \"trash subgroup\" do\n    set_user_from_auth :active_trustedclient\n\n    g_foo = Group.create!(name: \"foo\", group_class: \"project\")\n    g_bar = Group.create!(name: \"bar\", owner_uuid: g_foo.uuid, group_class: \"project\")\n    g_baz = Group.create!(name: \"baz\", owner_uuid: g_bar.uuid, group_class: \"project\")\n\n    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?\n    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).any?\n    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).any?\n    g_bar.update! is_trashed: true\n\n    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?\n    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).empty?\n    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?\n\n    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_bar.uuid).any?\n    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_baz.uuid).any?\n  end\n\n  test \"trash subsubgroup\" do\n    set_user_from_auth :active_trustedclient\n\n    g_foo = Group.create!(name: \"foo\", group_class: \"project\")\n    g_bar = Group.create!(name: \"bar\", owner_uuid: g_foo.uuid, group_class: \"project\")\n    g_baz = Group.create!(name: \"baz\", owner_uuid: g_bar.uuid, group_class: \"project\")\n\n    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?\n    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).any?\n    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).any?\n    g_baz.update! is_trashed: true\n    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?\n    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).any?\n    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?\n    assert Group.readable_by(users(:active), {:include_trash => true}).where(uuid: g_baz.uuid).any?\n  end\n\n\n  test \"trash group propagates to subgroups\" do\n    set_user_from_auth :active_trustedclient\n\n    g_foo = groups(:trashed_project)\n    g_bar = groups(:trashed_subproject)\n    g_baz = groups(:trashed_subproject3)\n    col = collections(:collection_in_trashed_subproject)\n\n    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).empty?\n    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).empty?\n    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).empty?\n\n    set_user_from_auth :admin\n    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).empty?\n    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).empty?\n    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).empty?\n\n    set_user_from_auth :active_trustedclient\n    g_foo.update! is_trashed: false\n    assert Group.readable_by(users(:active)).where(uuid: g_foo.uuid).any?\n    assert Group.readable_by(users(:active)).where(uuid: g_bar.uuid).any?\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).any?\n\n    # this one should still be trashed.\n    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).empty?\n\n    g_baz.update! is_trashed: false\n    assert Group.readable_by(users(:active)).where(uuid: g_baz.uuid).any?\n  end\n\n  test \"trashed does not propagate across permission links\" do\n    set_user_from_auth :admin\n\n    g_foo = Group.create!(name: \"foo\", group_class: \"role\")\n    u_bar = User.create!(first_name: \"bar\")\n\n    assert Group.readable_by(users(:admin)).where(uuid: g_foo.uuid).any?\n    assert User.readable_by(users(:admin)).where(uuid:  u_bar.uuid).any?\n    g_foo.update! is_trashed: true\n\n    assert Group.readable_by(users(:admin)).where(uuid: g_foo.uuid).empty?\n    assert User.readable_by(users(:admin)).where(uuid:  u_bar.uuid).any?\n\n    g_foo.update! is_trashed: false\n    ln = Link.create!(tail_uuid: g_foo.uuid,\n                      head_uuid: u_bar.uuid,\n                      link_class: \"permission\",\n                      name: \"can_read\")\n    g_foo.update! is_trashed: true\n\n    assert Group.readable_by(users(:admin)).where(uuid: g_foo.uuid).empty?\n    assert User.readable_by(users(:admin)).where(uuid:  u_bar.uuid).any?\n  end\n\n  test \"project names must be displayable in a filesystem\" do\n    set_user_from_auth :active\n    [\"\", \"{SOLIDUS}\"].each do |subst|\n      Rails.configuration.Collections.ForwardSlashNameSubstitution = subst\n      proj = Group.create group_class: \"project\"\n      role = Group.create group_class: \"role\"\n      filt = Group.create group_class: \"filter\", properties: {\"filters\":[]}\n      [[nil, true],\n       [\"\", true],\n       [\".\", false],\n       [\"..\", false],\n       [\"...\", true],\n       [\"..z..\", true],\n       [\"foo/bar\", subst != \"\"],\n       [\"../..\", subst != \"\"],\n       [\"/\", subst != \"\"],\n      ].each do |name, valid|\n        role.name = name\n        assert_equal true, role.valid?\n        proj.name = name\n        assert_equal valid, proj.valid?, \"project: #{name.inspect} should be #{valid ? \"valid\" : \"invalid\"}\"\n        filt.name = name\n        assert_equal valid, filt.valid?, \"filter: #{name.inspect} should be #{valid ? \"valid\" : \"invalid\"}\"\n      end\n    end\n  end\n\n  def insert_group uuid, owner_uuid, name, group_class\n    q = ActiveRecord::Base.connection.exec_query %{\ninsert into groups (uuid, owner_uuid, name, group_class, created_at, updated_at, modified_at)\n       values ('#{uuid}', '#{owner_uuid}',\n               '#{name}', #{if group_class then \"'\"+group_class+\"'\" else 'NULL' end},\n               statement_timestamp(), statement_timestamp(), statement_timestamp())\n}\n    uuid\n  end\n\n  test \"migration to fix roles and projects\" do\n    g1 = insert_group Group.generate_uuid, system_user_uuid, 'group with no class', nil\n    g2 = insert_group Group.generate_uuid, users(:active).uuid, 'role owned by a user', 'role'\n\n    g3 = insert_group Group.generate_uuid, system_user_uuid, 'role that owns a project', 'role'\n    g4 = insert_group Group.generate_uuid, g3, 'the project', 'project'\n\n    g5 = insert_group Group.generate_uuid, users(:active).uuid, 'a project with an outgoing permission link', 'project'\n\n    g6 = insert_group Group.generate_uuid, system_user_uuid, 'name collision', 'role'\n    g7 = insert_group Group.generate_uuid, users(:active).uuid, 'name collision', 'role'\n\n    g8 = insert_group Group.generate_uuid, users(:active).uuid, 'trashed with no class', nil\n    g8obj = Group.find_by_uuid(g8)\n    g8obj.trash_at = db_current_time\n    g8obj.delete_at = db_current_time\n    act_as_system_user do\n      g8obj.save!(validate: false)\n    end\n\n    refresh_permissions\n\n    act_as_system_user do\n      l1 = Link.create!(link_class: 'permission', name: 'can_manage', tail_uuid: g3, head_uuid: g4)\n      q = ActiveRecord::Base.connection.exec_query %{\nupdate links set tail_uuid='#{g5}' where uuid='#{l1.uuid}'\n}\n    refresh_permissions\n    end\n\n    assert_equal nil, Group.find_by_uuid(g1).group_class\n    assert_equal nil, Group.find_by_uuid(g8).group_class\n    assert_equal users(:active).uuid, Group.find_by_uuid(g2).owner_uuid\n    assert_equal g3, Group.find_by_uuid(g4).owner_uuid\n    assert !Link.where(tail_uuid: users(:active).uuid, head_uuid: g2, link_class: \"permission\", name: \"can_manage\").any?\n    assert !Link.where(tail_uuid: g3, head_uuid: g4, link_class: \"permission\", name: \"can_manage\").any?\n    assert Link.where(link_class: 'permission', name: 'can_manage', tail_uuid: g5, head_uuid: g4).any?\n\n    fix_roles_projects\n\n    assert_equal 'role', Group.find_by_uuid(g1).group_class\n    assert_equal 'role', Group.find_by_uuid(g8).group_class\n    assert_equal system_user_uuid, Group.find_by_uuid(g2).owner_uuid\n    assert_equal system_user_uuid, Group.find_by_uuid(g4).owner_uuid\n    assert Link.where(tail_uuid: users(:active).uuid, head_uuid: g2, link_class: \"permission\", name: \"can_manage\").any?\n    assert Link.where(tail_uuid: g3, head_uuid: g4, link_class: \"permission\", name: \"can_manage\").any?\n    assert !Link.where(link_class: 'permission', name: 'can_manage', tail_uuid: g5, head_uuid: g4).any?\n  end\n\n  test \"freeze project\" do\n    act_as_user users(:active) do\n      Rails.configuration.API.UnfreezeProjectRequiresAdmin = false\n\n      test_cr_attrs = {\n        command: [\"echo\", \"foo\"],\n        container_image: links(:docker_image_collection_tag).name,\n        cwd: \"/tmp\",\n        environment: {},\n        mounts: {\"/out\" => {\"kind\" => \"tmp\", \"capacity\" => 1000000}},\n        output_path: \"/out\",\n        runtime_constraints: {\"vcpus\" => 1, \"ram\" => 2},\n        name: \"foo\",\n        description: \"bar\",\n      }\n      parent = Group.create!(group_class: 'project', name: 'freeze-test-parent', owner_uuid: users(:active).uuid)\n      proj = Group.create!(group_class: 'project', name: 'freeze-test', owner_uuid: parent.uuid)\n      proj_inner = Group.create!(group_class: 'project', name: 'freeze-test-inner', owner_uuid: proj.uuid)\n      coll = Collection.create!(name: 'freeze-test-collection', manifest_text: '', owner_uuid: proj_inner.uuid)\n\n      # Cannot set frozen_by_uuid to a different user\n      assert_raises do\n        proj.update!(frozen_by_uuid: users(:spectator).uuid)\n      end\n      proj.reload\n\n      # Cannot set frozen_by_uuid without can_manage permission\n      act_as_system_user do\n        Link.create!(link_class: 'permission', name: 'can_write', tail_uuid: users(:spectator).uuid, head_uuid: proj.uuid)\n      end\n      act_as_user users(:spectator) do\n        # First confirm we have write permission\n        assert Collection.create(name: 'bar', owner_uuid: proj.uuid)\n        assert_raises(ArvadosModel::PermissionDeniedError) do\n          proj.update!(frozen_by_uuid: users(:spectator).uuid)\n        end\n      end\n      proj.reload\n\n      # Cannot set frozen_by_uuid without description (if so configured)\n      Rails.configuration.API.FreezeProjectRequiresDescription = true\n      err = assert_raises do\n        proj.update!(frozen_by_uuid: users(:active).uuid)\n      end\n      assert_match /can only be set if description is non-empty/, err.inspect\n      proj.reload\n      err = assert_raises do\n        proj.update!(frozen_by_uuid: users(:active).uuid, description: '')\n      end\n      assert_match /can only be set if description is non-empty/, err.inspect\n      proj.reload\n\n      # Cannot set frozen_by_uuid without properties (if so configured)\n      Rails.configuration.API.FreezeProjectRequiresProperties['frobity'] = true\n      err = assert_raises do\n        proj.update!(\n          frozen_by_uuid: users(:active).uuid,\n          description: 'ready to freeze')\n      end\n      assert_match /can only be set if properties\\[frobity\\] value is non-empty/, err.inspect\n      proj.reload\n\n      # Cannot set frozen_by_uuid while project or its parent is\n      # trashed\n      [parent, proj].each do |trashed|\n        trashed.update!(trash_at: db_current_time)\n        err = assert_raises do\n          proj.update!(\n            frozen_by_uuid: users(:active).uuid,\n            description: 'ready to freeze',\n            properties: {'frobity' => 'bar baz'})\n        end\n        assert_match /cannot be set on a trashed project/, err.inspect\n        proj.reload\n        trashed.update!(trash_at: nil)\n      end\n\n      # Can set frozen_by_uuid if all conditions are met\n      ok = proj.update(\n        frozen_by_uuid: users(:active).uuid,\n        description: 'ready to freeze',\n        properties: {'frobity' => 'bar baz'})\n      assert ok, proj.errors.messages.inspect\n\n      [:active, :admin].each do |u|\n        act_as_user users(u) do\n          # Once project is frozen, cannot create new items inside it or\n          # its descendants\n          [proj, proj_inner].each do |frozen|\n            assert_raises do\n              collections(:collection_owned_by_active).update!(owner_uuid: frozen.uuid)\n            end\n            assert_raises do\n              Collection.create!(owner_uuid: frozen.uuid, name: 'inside-frozen-project')\n            end\n            assert_raises do\n              Group.create!(owner_uuid: frozen.uuid, group_class: 'project', name: 'inside-frozen-project')\n            end\n            cr = ContainerRequest.new(test_cr_attrs.merge(owner_uuid: frozen.uuid))\n            assert_raises ArvadosModel::PermissionDeniedError do\n              cr.save\n            end\n            assert_match /frozen/, cr.errors.inspect\n            # Check the frozen-parent condition is the only reason save failed.\n            cr.owner_uuid = users(u).uuid\n            assert cr.save\n            cr.destroy\n          end\n\n          # Once project is frozen, cannot change name/contents, move,\n          # trash, or delete the project or anything beneath it\n          [proj, proj_inner, coll].each do |frozen|\n            assert_raises(StandardError, \"should reject rename of #{frozen.uuid} (#{frozen.name}) with parent #{frozen.owner_uuid}\") do\n              frozen.update!(name: 'foo2')\n            end\n            frozen.reload\n\n            if frozen.is_a?(Collection)\n              assert_raises(StandardError, \"should reject manifest change of #{frozen.uuid}\") do\n                frozen.update!(manifest_text: \". d41d8cd98f00b204e9800998ecf8427e+0 0:0:foo\\n\")\n              end\n            else\n              assert_raises(StandardError, \"should reject moving a project into #{frozen.uuid}\") do\n                groups(:private).update!(owner_uuid: frozen.uuid)\n              end\n            end\n            frozen.reload\n\n            assert_raises(StandardError, \"should reject moving #{frozen.uuid} to a different parent project\") do\n              frozen.update!(owner_uuid: groups(:private).uuid)\n            end\n            frozen.reload\n            assert_raises(StandardError, \"should reject setting trash_at of #{frozen.uuid}\") do\n              frozen.update!(trash_at: db_current_time)\n            end\n            frozen.reload\n            assert_raises(StandardError, \"should reject setting delete_at of #{frozen.uuid}\") do\n              frozen.update!(delete_at: db_current_time)\n            end\n            frozen.reload\n            assert_raises(StandardError, \"should reject delete of #{frozen.uuid}\") do\n              frozen.destroy\n            end\n            frozen.reload\n            if frozen != proj\n              assert_equal [], frozen.writable_by\n            end\n          end\n        end\n      end\n\n      # User with write permission (but not manage) cannot unfreeze\n      act_as_user users(:spectator) do\n        # First confirm we have write permission on the parent project\n        assert Collection.create(name: 'bar', owner_uuid: parent.uuid)\n        assert_raises(ArvadosModel::PermissionDeniedError) do\n          proj.update!(frozen_by_uuid: nil)\n        end\n      end\n      proj.reload\n\n      # User with manage permission can unfreeze, then create items\n      # inside it and its children\n      assert proj.update(frozen_by_uuid: nil)\n      assert Collection.create!(owner_uuid: proj.uuid, name: 'inside-unfrozen-project')\n      assert Collection.create!(owner_uuid: proj_inner.uuid, name: 'inside-inner-unfrozen-project')\n\n      # Re-freeze, and reconfigure so only admins can unfreeze.\n      assert proj.update(frozen_by_uuid: users(:active).uuid)\n      Rails.configuration.API.UnfreezeProjectRequiresAdmin = true\n\n      # Owner cannot unfreeze, because not admin.\n      err = assert_raises do\n        proj.update!(frozen_by_uuid: nil)\n      end\n      assert_match /can only be changed by an admin user, once set/, err.inspect\n      proj.reload\n\n      # Cannot trash or delete a frozen project's ancestor\n      assert_raises(StandardError, \"should not be able to set trash_at on parent of frozen project\") do\n        parent.update!(trash_at: db_current_time)\n      end\n      parent.reload\n      assert_raises(StandardError, \"should not be able to set delete_at on parent of frozen project\") do\n        parent.update!(delete_at: db_current_time)\n      end\n      parent.reload\n      assert_nil parent.frozen_by_uuid\n\n      act_as_user users(:admin) do\n        # Even admin cannot change frozen_by_uuid to someone else's UUID.\n        err = assert_raises do\n          proj.update!(frozen_by_uuid: users(:project_viewer).uuid)\n        end\n        assert_match /can only be set to the current user's UUID/, err.inspect\n        proj.reload\n\n        # Admin can unfreeze.\n        assert proj.update(frozen_by_uuid: nil), proj.errors.messages\n      end\n\n      # Cannot freeze a project if it contains container requests in\n      # Committed state (this would cause operations on the relevant\n      # Containers to fail when syncing container request state)\n      creq_uncommitted = ContainerRequest.create!(test_cr_attrs.merge(owner_uuid: proj_inner.uuid))\n      creq_committed = ContainerRequest.create!(test_cr_attrs.merge(owner_uuid: proj_inner.uuid, state: 'Committed'))\n      err = assert_raises do\n        proj.update!(frozen_by_uuid: users(:active).uuid)\n      end\n      assert_match /container request zzzzz-xvhdp-.* with state = Committed/, err.inspect\n      proj.reload\n\n      # Can freeze once all container requests are in Uncommitted or\n      # Final state\n      creq_committed.update!(state: ContainerRequest::Final)\n      assert proj.update(frozen_by_uuid: users(:active).uuid)\n    end\n  end\n\n  [\n    [false, :admin, true],\n    [false, :active, false],\n    [true, :admin, true],\n    [true, :active, true],\n    [true, :inactive, false],\n  ].each do |conf, user, allowed|\n    test \"config.Users.CanCreateRoleGroups conf=#{conf}, user=#{user}\" do\n      Rails.configuration.Users.CanCreateRoleGroups = conf\n      act_as_user users(user) do\n        if allowed\n          Group.create!(name: 'admin-created', group_class: 'role')\n        else\n          assert_raises(ArvadosModel::PermissionDeniedError) do\n            Group.create!(name: 'user-created', group_class: 'role')\n          end\n        end\n      end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/keep_disk_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass KeepDiskTest < ActiveSupport::TestCase\n  # test \"the truth\" do\n  #   assert true\n  # end\nend\n"
  },
  {
    "path": "services/api/test/unit/keep_service_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass KeepServiceTest < ActiveSupport::TestCase\n  test \"non-admins cannot create services\" do\n    set_user_from_auth :active\n    ks = KeepService.new\n    assert_not_allowed do\n      ks.save\n    end\n  end\n\n  test \"non-admins cannot update services\" do\n    set_user_from_auth :active\n    ks = keep_services(:proxy)\n    ks.service_port = 64434\n    assert_not_allowed do\n      ks.save\n    end\n  end\n\n  test \"admins can create services\" do\n    set_user_from_auth :admin\n    ks = KeepService.new\n    assert(ks.save, \"saving new service failed\")\n  end\n\n  test \"admins can update services\" do\n    set_user_from_auth :admin\n    ks = keep_services(:proxy)\n    ks.service_port = 64434\n    assert(ks.save, \"saving updated service failed\")\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/link_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass LinkTest < ActiveSupport::TestCase\n  fixtures :all\n\n  setup do\n    set_user_from_auth :admin_trustedclient\n  end\n\n  test \"cannot delete an object referenced by unwritable links\" do\n    ob = act_as_user users(:active) do\n      Collection.create\n    end\n    link = act_as_user users(:admin) do\n      Link.create(tail_uuid: users(:active).uuid,\n                  head_uuid: ob.uuid,\n                  link_class: 'test',\n                  name: 'test')\n    end\n    assert_equal users(:admin).uuid, link.owner_uuid\n    assert_raises(ArvadosModel::PermissionDeniedError,\n                  \"should not delete #{ob.uuid} with link #{link.uuid}\") do\n      act_as_user users(:active) do\n        ob.destroy\n      end\n    end\n    act_as_user users(:admin) do\n      ob.destroy\n    end\n    assert_empty Link.where(uuid: link.uuid)\n  end\n\n  def new_active_link_valid?(link_attrs)\n    set_user_from_auth :active\n    begin\n      Link.\n        create({link_class: \"permission\",\n                 name: \"can_read\",\n                 head_uuid: groups(:aproject).uuid,\n               }.merge(link_attrs)).\n        valid?\n    rescue ArvadosModel::PermissionDeniedError\n      false\n    end\n  end\n\n  test \"non-admin project owner can make it public\" do\n    assert(new_active_link_valid?(tail_uuid: groups(:anonymous_group).uuid),\n           \"non-admin project owner can't make their project public\")\n  end\n\n  test \"link granting permission to nonexistent user is invalid\" do\n    refute new_active_link_valid?(tail_uuid:\n                                  users(:active).uuid.sub(/-\\w+$/, \"-#{'z' * 15}\"))\n  end\n\n  test \"link granting permission to remote user is valid\" do\n    refute new_active_link_valid?(tail_uuid:\n                                  users(:active).uuid.sub(/^\\w+-/, \"foooo-\"))\n    Rails.configuration.RemoteClusters = Rails.configuration.RemoteClusters.merge({foooo: ActiveSupport::InheritableOptions.new({Host: \"bar.com\"})})\n    assert new_active_link_valid?(tail_uuid:\n                                  users(:active).uuid.sub(/^\\w+-/, \"foooo-\"))\n  end\n\n  test \"link granting non-project permission to unreadable user is invalid\" do\n    refute new_active_link_valid?(tail_uuid: users(:admin).uuid,\n                                  head_uuid: collections(:bar_file).uuid)\n  end\n\n  test \"user can't add a Collection to a Project without permission\" do\n    refute new_active_link_valid?(link_class: \"name\",\n                                  name: \"Permission denied test name\",\n                                  tail_uuid: collections(:bar_file).uuid)\n  end\n\n  test \"user can't add a User to a Project\" do\n    # Users *can* give other users permissions to projects.\n    # This test helps ensure that that exception is specific to permissions.\n    refute new_active_link_valid?(link_class: \"name\",\n                                  name: \"Permission denied test name\",\n                                  tail_uuid: users(:admin).uuid)\n  end\n\n  test \"link granting project permissions to unreadable user is invalid\" do\n    refute new_active_link_valid?(tail_uuid: users(:admin).uuid)\n  end\n\n  test \"permission link can't exist on past collection versions\" do\n    refute new_active_link_valid?(tail_uuid: groups(:public).uuid,\n                                  head_uuid: collections(:w_a_z_file_version_1).uuid)\n  end\n\n  def create_overlapping_permissions(names=[], attrs={})\n    names.map do |name|\n      link = Link.create!({\n                            link_class: \"tmp\",\n                            tail_uuid: users(:active).uuid,\n                            head_uuid: collections(:baz_file).uuid,\n                            name: name,\n                          }.merge(attrs).merge({name: name}))\n      ActiveRecord::Base.connection.execute \"update links set link_class='permission' where uuid='#{link.uuid}'\"\n      link.uuid\n    end\n  end\n\n  test \"updating permission causes any conflicting links to be deleted\" do\n    link1, link2 = create_overlapping_permissions(['can_read', 'can_manage'])\n    Link.find_by_uuid(link2).update!(name: 'can_write')\n    assert_empty Link.where(uuid: link1)\n  end\n\n  test \"deleting permission causes any conflicting links to be deleted\" do\n    rlink, wlink = create_overlapping_permissions(['can_read', 'can_write'])\n    Link.find_by_uuid(wlink).destroy\n    assert_empty Link.where(uuid: rlink)\n  end\n\n  test \"updating login permission causes any conflicting links to be deleted\" do\n    link1, link2 = create_overlapping_permissions(['can_login', 'can_login'], {properties: {username: 'foo1'}})\n    Link.find_by_uuid(link1).update!(properties: {'username' => 'foo2'})\n    Link.find_by_uuid(link2).update!(properties: {'username' => 'foo2'})\n    assert_empty Link.where(uuid: link1)\n  end\n\n  test \"deleting login permission causes any conflicting links to be deleted\" do\n    link1, link2 = create_overlapping_permissions(['can_login', 'can_login'], {properties: {username: 'foo1'}})\n    Link.find_by_uuid(link1).destroy\n    assert_empty Link.where(uuid: link2)\n  end\n\n  ['zzzzz-dz642-runningcontainr', ''].each do |head_uuid|\n    test \"published_port link is invalid because head_uuid #{head_uuid.inspect} is not a container request UUID\" do\n      act_as_user users(:active) do\n        link = Link.create(head_uuid: head_uuid,\n                           link_class: 'published_port',\n                           name: 'service1',\n                           properties: {\"port\" => 80})\n        assert_equal(false, link.valid?)\n        assert_equal(\"must be a container request UUID\", link.errors.messages[:head_uuid].first)\n      end\n    end\n  end\n\n  test \"Cannot create two published_port links with the same name\" do\n    act_as_user users(:active) do\n      Link.create!(head_uuid: container_requests(:running).uuid,\n                   link_class: 'published_port',\n                   name: 'service1',\n                   properties: {\"port\" => 80})\n\n      # not ok\n      assert_raises(ActiveRecord::RecordNotUnique,\n                    \"should not be able to create two published_port links with the same name\") do\n        Link.create!(head_uuid: container_requests(:running_older).uuid,\n                     link_class: 'published_port',\n                     name: 'service1',\n                     properties: {\"port\" => 80})\n      end\n\n      # ok\n      Link.create!(head_uuid: container_requests(:running_older).uuid,\n                   link_class: 'published_port',\n                   name: 'service2',\n                   properties: {\"port\" => 80})\n\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/log_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'audit_logs'\n\nclass LogTest < ActiveSupport::TestCase\n  include CurrentApiClient\n\n  EVENT_TEST_METHODS = {\n    :create => [:created_at, :assert_nil, :assert_not_nil],\n    :update => [:modified_at, :assert_not_nil, :assert_not_nil],\n    :delete => [nil, :assert_not_nil, :assert_nil],\n  }\n\n  setup do\n    @start_time = Time.now\n    @log_count = 1\n  end\n\n  def assert_properties(test_method, event, props, *keys)\n    verb = (test_method == :assert_nil) ? 'have nil' : 'define'\n    keys.each do |prop_name|\n      assert_includes(props, prop_name, \"log properties missing #{prop_name}\")\n      self.send(test_method, props[prop_name],\n                \"#{event.to_s} log should #{verb} #{prop_name}\")\n    end\n  end\n\n  def get_logs_about(thing)\n    Log.where(object_uuid: thing.uuid).order(\"created_at ASC\").all\n  end\n\n  def clear_logs_about(thing)\n    Log.where(object_uuid: thing.uuid).delete_all\n  end\n\n  def assert_logged(thing, event_type)\n    logs = get_logs_about(thing)\n    assert_equal(@log_count, logs.size, \"log count mismatch\")\n    @log_count += 1\n    log = logs.last\n    props = log.properties\n    assert_equal(current_user.andand.uuid, log.owner_uuid,\n                 \"log is not owned by current user\")\n    assert_equal(current_user.andand.uuid, log.modified_by_user_uuid,\n                 \"log is not 'modified by' current user\")\n    assert_equal(thing.uuid, log.object_uuid, \"log UUID mismatch\")\n    assert_equal(event_type.to_s, log.event_type, \"log event type mismatch\")\n    time_method, old_props_test, new_props_test = EVENT_TEST_METHODS[event_type]\n    if time_method.nil? or (timestamp = thing.send(time_method)).nil?\n      assert(log.event_at >= @start_time, \"log timestamp too old\")\n    else\n      assert_in_delta(timestamp, log.event_at, 1, \"log timestamp mismatch\")\n    end\n    assert_properties(old_props_test, event_type, props,\n                      'old_etag', 'old_attributes')\n    assert_properties(new_props_test, event_type, props,\n                      'new_etag', 'new_attributes')\n    ['old_attributes', 'new_attributes'].each do |logattr|\n      next if !props[logattr]\n      assert_match /\"created_at\":\"\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{9}Z\"/, Oj.dump(props, mode: :compat)\n    end\n    yield props if block_given?\n  end\n\n  def assert_logged_with_clean_properties(obj, event_type, excluded_attr)\n    assert_logged(obj, event_type) do |props|\n      ['old_attributes', 'new_attributes'].map do |logattr|\n        attributes = props[logattr]\n        next if attributes.nil?\n        refute_includes(attributes, excluded_attr,\n                        \"log #{logattr} includes #{excluded_attr}\")\n      end\n      yield props if block_given?\n    end\n  end\n\n  test \"creating a user makes a log\" do\n    set_user_from_auth :admin_trustedclient\n    u = User.new(first_name: \"Log\", last_name: \"Test\")\n    u.save!\n    assert_logged(u, :create) do |props|\n      assert_equal(u.etag, props['new_etag'], \"new user etag mismatch\")\n      assert_equal(u.first_name, props['new_attributes']['first_name'],\n                   \"new user first name mismatch\")\n      assert_equal(u.last_name, props['new_attributes']['last_name'],\n                   \"new user first name mismatch\")\n    end\n  end\n\n  test \"updating a virtual machine makes a log\" do\n    set_user_from_auth :admin_trustedclient\n    vm = virtual_machines(:testvm)\n    orig_etag = vm.etag\n    vm.hostname = 'testvm.testshell'\n    vm.save!\n    assert_logged(vm, :update) do |props|\n      assert_equal(orig_etag, props['old_etag'], \"updated VM old etag mismatch\")\n      assert_equal(vm.etag, props['new_etag'], \"updated VM new etag mismatch\")\n      assert_equal('testvm.shell', props['old_attributes']['hostname'],\n                   \"updated VM old name mismatch\")\n      assert_equal('testvm.testshell', props['new_attributes']['hostname'],\n                   \"updated VM new name mismatch\")\n    end\n  end\n\n  test \"old_attributes preserves values deep inside a hash\" do\n    set_user_from_auth :active\n    it = collections(:collection_owned_by_active)\n    clear_logs_about it\n    it.properties = {'foo' => {'bar' => ['baz', 'qux', {'quux' => 'bleat'}]}}\n    it.save!\n    assert_logged it, :update\n    it.properties['foo']['bar'][2]['quux'] = 'blert'\n    it.save!\n    assert_logged it, :update do |props|\n      assert_equal 'bleat', props['old_attributes']['properties']['foo']['bar'][2]['quux']\n      assert_equal 'blert', props['new_attributes']['properties']['foo']['bar'][2]['quux']\n    end\n  end\n\n  test \"destroying an authorization makes a log\" do\n    set_user_from_auth :admin_trustedclient\n    auth = api_client_authorizations(:spectator)\n    orig_etag = auth.etag\n    orig_attrs = auth.attributes\n    orig_attrs.delete 'api_token'\n    auth.destroy\n    assert_logged(auth, :delete) do |props|\n      assert_equal(orig_etag, props['old_etag'], \"destroyed auth etag mismatch\")\n      assert_equal(orig_attrs, props['old_attributes'],\n                   \"destroyed auth attributes mismatch\")\n    end\n  end\n\n  test \"updating a group twice makes two logs\" do\n    set_user_from_auth :admin_trustedclient\n    group = groups(:empty_lonely_group)\n    name1 = group.name\n    name2 = \"#{name1} under test\"\n    group.name = name2\n    group.save!\n    assert_logged(group, :update) do |props|\n      assert_equal(name1, props['old_attributes']['name'],\n                   \"group start name mismatch\")\n      assert_equal(name2, props['new_attributes']['name'],\n                   \"group updated name mismatch\")\n    end\n    group.name = name1\n    group.save!\n    assert_logged(group, :update) do |props|\n      assert_equal(name2, props['old_attributes']['name'],\n                   \"group pre-revert name mismatch\")\n      assert_equal(name1, props['new_attributes']['name'],\n                   \"group final name mismatch\")\n    end\n  end\n\n  test \"making a log doesn't get logged\" do\n    set_user_from_auth :active_trustedclient\n    log = Log.new\n    log.save!\n    assert_equal(0, get_logs_about(log).size, \"made a Log about a Log\")\n  end\n\n  test \"non-admins can't modify or delete logs\" do\n    set_user_from_auth :active_trustedclient\n    log = Log.new(summary: \"immutable log test\")\n    assert_nothing_raised { log.save! }\n    log.summary = \"log mutation test should fail\"\n    assert_raise(ArvadosModel::PermissionDeniedError) { log.save! }\n    assert_raise(ArvadosModel::PermissionDeniedError) { log.destroy }\n  end\n\n  test \"admins can modify and delete logs\" do\n    set_user_from_auth :admin_trustedclient\n    log = Log.new(summary: \"admin log mutation test\")\n    assert_nothing_raised { log.save! }\n    log.summary = \"admin mutated log test\"\n    assert_nothing_raised { log.save! }\n    assert_nothing_raised { log.destroy }\n  end\n\n  test \"failure saving log causes failure saving object\" do\n    Log.class_eval do\n      alias_method :_orig_validations, :perform_validations\n      def perform_validations(options)\n        false\n      end\n    end\n    begin\n      set_user_from_auth :active_trustedclient\n      user = users(:active)\n      user.first_name = 'Test'\n      assert_raise(ActiveRecord::RecordInvalid) { user.save! }\n    ensure\n      Log.class_eval do\n        alias_method :perform_validations, :_orig_validations\n      end\n    end\n  end\n\n  test \"don't log changes only to ApiClientAuthorization.last_used_*\" do\n    set_user_from_auth :admin_trustedclient\n    auth = api_client_authorizations(:spectator)\n    start_log_count = get_logs_about(auth).size\n    auth.last_used_at = Time.now\n    auth.last_used_by_ip_address = '::1'\n    auth.save!\n    assert_equal(start_log_count, get_logs_about(auth).size,\n                 \"log count changed after 'using' ApiClientAuthorization\")\n    auth.created_by_ip_address = '::1'\n    auth.save!\n    assert_logged(auth, :update)\n  end\n\n  test \"don't log changes only to Collection.preserve_version\" do\n    set_user_from_auth :admin_trustedclient\n    col = collections(:collection_owned_by_active)\n    clear_logs_about col\n    start_log_count = get_logs_about(col).size\n    assert_equal false, col.preserve_version\n    col.preserve_version = true\n    col.save!\n    assert_equal(start_log_count, get_logs_about(col).size,\n                 \"log count changed after updating Collection.preserve_version\")\n    col.name = 'updated by admin'\n    col.save!\n    assert_logged(col, :update)\n  end\n\n  test \"token isn't included in ApiClientAuthorization logs\" do\n    set_user_from_auth :admin_trustedclient\n    auth = ApiClientAuthorization.new\n    auth.user = users(:spectator)\n    auth.save!\n    assert_logged_with_clean_properties(auth, :create, 'api_token')\n    auth.expires_at = Time.now\n    auth.save!\n    assert_logged_with_clean_properties(auth, :update, 'api_token')\n    auth.destroy\n    assert_logged_with_clean_properties(auth, :delete, 'api_token')\n  end\n\n  test \"use ownership and permission links to determine which logs a user can see\" do\n    known_logs = [:noop,\n                  :admin_changes_collection_owned_by_active,\n                  :admin_changes_collection_owned_by_foo,\n                  :system_adds_foo_file,\n                  :system_adds_baz,\n                  :log_owned_by_active,\n                  :crunchstat_for_running_container]\n\n    c = Log.readable_by(users(:admin)).order(\"id asc\").each.to_a\n    assert_log_result c, known_logs, known_logs\n\n    c = Log.readable_by(users(:active)).order(\"id asc\").each.to_a\n    assert_log_result c, known_logs, [:admin_changes_collection_owned_by_active,\n                                      :system_adds_foo_file,             # readable via link\n                                      :system_adds_baz,                  # readable via 'all users' group\n                                      :log_owned_by_active,              # log owned by active\n                                      :crunchstat_for_running_container] # log & job owned by active\n\n    c = Log.readable_by(users(:spectator)).order(\"id asc\").each.to_a\n    assert_log_result c, known_logs, [:noop,                             # object_uuid is spectator\n                                      :system_adds_baz]                  # readable via 'all users' group\n\n    c = Log.readable_by(users(:user_foo_in_sharing_group)).order(\"id asc\").each.to_a\n    assert_log_result c, known_logs, [:admin_changes_collection_owned_by_foo] # collection's parent is readable via role group\n  end\n\n  def assert_log_result result, known_logs, expected_logs\n    # All of expected_logs must appear in result. Additional logs can\n    # appear too, but only if they are _not_ listed in known_logs\n    # (i.e., we do not make any assertions about logs not mentioned in\n    # either \"known\" or \"expected\".)\n    result_ids = result.collect(&:id)\n    expected_logs.each do |want|\n      assert_includes result_ids, logs(want).id\n    end\n    (known_logs - expected_logs).each do |notwant|\n      refute_includes result_ids, logs(notwant).id\n    end\n  end\n\n  test \"non-empty configuration.unlogged_attributes\" do\n    Rails.configuration.AuditLogs.UnloggedAttributes = ConfigLoader.to_OrderedOptions({\"manifest_text\"=>{}})\n    txt = \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\"\n\n    act_as_system_user do\n      coll = Collection.create(manifest_text: txt)\n      assert_logged_with_clean_properties(coll, :create, 'manifest_text')\n      coll.name = \"testing\"\n      coll.save!\n      assert_logged_with_clean_properties(coll, :update, 'manifest_text')\n      coll.destroy\n      assert_logged_with_clean_properties(coll, :delete, 'manifest_text')\n    end\n  end\n\n  test \"empty configuration.unlogged_attributes\" do\n    Rails.configuration.AuditLogs.UnloggedAttributes = ConfigLoader.to_OrderedOptions({})\n    txt = \". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\"\n\n    act_as_system_user do\n      coll = Collection.create(manifest_text: txt)\n      assert_logged(coll, :create) do |props|\n        assert_equal(txt, props['new_attributes']['manifest_text'])\n      end\n      coll.update!(name: \"testing\")\n      assert_logged(coll, :update) do |props|\n        assert_equal(txt, props['old_attributes']['manifest_text'])\n        assert_equal(txt, props['new_attributes']['manifest_text'])\n      end\n      coll.destroy\n      assert_logged(coll, :delete) do |props|\n        assert_equal(txt, props['old_attributes']['manifest_text'])\n      end\n    end\n  end\n\n  def assert_no_logs_deleted\n    logs_before = Log.unscoped.all.count\n    assert logs_before > 0\n    yield\n    assert_equal logs_before, Log.unscoped.all.count\n  end\n\n  def remaining_audit_logs\n    Log.unscoped.where('event_type in (?)', %w(create update destroy delete))\n  end\n\n  # Default settings should not delete anything -- some sites rely on\n  # the original \"keep everything forever\" behavior.\n  test 'retain old audit logs with default settings' do\n    assert_no_logs_deleted do\n      AuditLogs.delete_old(\n        max_age: Rails.configuration.AuditLogs.MaxAge,\n        max_batch: Rails.configuration.AuditLogs.MaxDeleteBatch)\n    end\n  end\n\n  # Batch size 0 should retain all logs -- even if max_age is very\n  # short, and even if the default settings (and associated test) have\n  # changed.\n  test 'retain old audit logs with max_audit_log_delete_batch=0' do\n    assert_no_logs_deleted do\n      AuditLogs.delete_old(max_age: 1, max_batch: 0)\n    end\n  end\n\n  # We recommend a more conservative age of 5 minutes for production,\n  # but 3 minutes suits our test data better (and is test-worthy in\n  # that it's expected to work correctly in production).\n  test 'delete old audit logs with production settings' do\n    initial_log_count = remaining_audit_logs.count\n    assert initial_log_count > 0\n    AuditLogs.delete_old(max_age: 180, max_batch: 100000)\n    assert_operator remaining_audit_logs.count, :<, initial_log_count\n  end\n\n  test 'delete all audit logs in multiple batches' do\n    assert remaining_audit_logs.count > 2\n    AuditLogs.delete_old(max_age: 0.00001, max_batch: 2)\n    assert_equal [], remaining_audit_logs.collect(&:uuid)\n  end\n\n  test 'delete old audit logs in thread' do\n    Rails.configuration.AuditLogs.MaxAge = 20\n    Rails.configuration.AuditLogs.MaxDeleteBatch = 100000\n    Rails.cache.delete 'AuditLogs'\n    initial_audit_log_count = remaining_audit_logs.count\n    assert initial_audit_log_count > 0\n    act_as_system_user do\n      Log.create!()\n    end\n    deadline = Time.now + 10\n    while remaining_audit_logs.count == initial_audit_log_count\n      if Time.now > deadline\n        raise \"timed out\"\n      end\n      sleep 0.1\n    end\n    assert_operator remaining_audit_logs.count, :<, initial_audit_log_count\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/owner_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\n# Test referential integrity: ensure we cannot leave any object\n# without owners by deleting a user or group.\n#\n# \"o\" is an owner.\n# \"i\" is an item.\n\nclass OwnerTest < ActiveSupport::TestCase\n  fixtures :users, :groups\n\n  setup do\n    set_user_from_auth :admin_trustedclient\n  end\n\n  User.all\n  Group.all\n  [User, Group].each do |o_class|\n    test \"create object with legit #{o_class} owner\" do\n      if o_class == Group\n        o = o_class.create! group_class: \"project\"\n      else\n        o = o_class.create!\n      end\n      i = Collection.create(owner_uuid: o.uuid)\n      assert i.valid?, \"new item should pass validation\"\n      assert i.uuid, \"new item should have an ID\"\n      assert Collection.where(uuid: i.uuid).any?, \"new item should really be in DB\"\n    end\n\n    test \"create object with non-existent #{o_class} owner\" do\n      assert_raises(ActiveRecord::RecordInvalid,\n                    \"create should fail with random owner_uuid\") do\n        Collection.create!(owner_uuid: o_class.generate_uuid)\n      end\n\n      i = Collection.create(owner_uuid: o_class.generate_uuid)\n      assert !i.valid?, \"object with random owner_uuid should not be valid?\"\n\n      i = Collection.new(owner_uuid: o_class.generate_uuid)\n      assert !i.valid?, \"new item should not pass validation\"\n      assert !i.uuid, \"new item should not have an ID\"\n    end\n\n    [User, Group].each do |new_o_class|\n      test \"change owner from legit #{o_class} to legit #{new_o_class} owner\" do\n        o = if o_class == Group\n              o_class.create! group_class: \"project\"\n            else\n              o_class.create!\n            end\n        i = Collection.create!(owner_uuid: o.uuid)\n\n        new_o = if new_o_class == Group\n              new_o_class.create! group_class: \"project\"\n            else\n              new_o_class.create!\n            end\n\n        assert(Collection.where(uuid: i.uuid).any?,\n               \"new item should really be in DB\")\n        assert(i.update(owner_uuid: new_o.uuid),\n               \"should change owner_uuid from #{o.uuid} to #{new_o.uuid}\")\n      end\n    end\n\n    test \"delete #{o_class} that owns nothing\" do\n      if o_class == Group\n        o = o_class.create! group_class: \"project\"\n      else\n        o = o_class.create!\n      end\n      assert(o_class.where(uuid: o.uuid).any?,\n             \"new #{o_class} should really be in DB\")\n      assert(o.destroy, \"should delete #{o_class} that owns nothing\")\n      assert_equal(false, o_class.where(uuid: o.uuid).any?,\n                   \"#{o.uuid} should not be in DB after deleting\")\n    end\n\n    test \"change uuid of #{o_class} that owns nothing\" do\n      # (we're relying on our admin credentials here)\n      if o_class == Group\n        o = o_class.create! group_class: \"project\"\n      else\n        o = o_class.create!\n      end\n      assert(o_class.where(uuid: o.uuid).any?,\n             \"new #{o_class} should really be in DB\")\n      old_uuid = o.uuid\n      new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])\n      assert(o.update(uuid: new_uuid),\n              \"should change #{o_class} uuid from #{old_uuid} to #{new_uuid}\")\n      assert_equal(false, o_class.where(uuid: old_uuid).any?,\n                   \"#{old_uuid} should disappear when renamed to #{new_uuid}\")\n    end\n  end\n\n  ['users(:active)', 'groups(:aproject)'].each do |ofixt|\n    test \"delete #{ofixt} that owns other objects\" do\n      o = eval ofixt\n      assert_equal(true, Collection.where(owner_uuid: o.uuid).any?,\n                   \"need something to be owned by #{o.uuid} for this test\")\n\n      skip_check_permissions_against_full_refresh do\n        assert_raises(ActiveRecord::DeleteRestrictionError,\n                      \"should not delete #{ofixt} that owns objects\") do\n          o.destroy\n        end\n      end\n    end\n\n    test \"change uuid of #{ofixt} that owns other objects\" do\n      o = eval ofixt\n      assert_equal(true, Collection.where(owner_uuid: o.uuid).any?,\n                   \"need something to be owned by #{o.uuid} for this test\")\n      new_uuid = o.uuid.sub(/..........$/, rand(2**256).to_s(36)[0..9])\n      assert(!o.update(uuid: new_uuid),\n             \"should not change uuid of #{ofixt} that owns objects\")\n    end\n  end\n\n  test \"delete User that owns self\" do\n    o = User.create!\n    assert User.where(uuid: o.uuid).any?, \"new User should really be in DB\"\n    assert_equal(true, o.update(owner_uuid: o.uuid),\n                 \"setting owner to self should work\")\n\n    skip_check_permissions_against_full_refresh do\n      assert(o.destroy, \"should delete User that owns self\")\n    end\n\n    assert_equal(false, User.where(uuid: o.uuid).any?,\n                 \"#{o.uuid} should not be in DB after deleting\")\n    check_permissions_against_full_refresh\n  end\n\nend\n"
  },
  {
    "path": "services/api/test/unit/permission_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass PermissionTest < ActiveSupport::TestCase\n  include CurrentApiClient\n\n  test \"Grant permissions on an object I own\" do\n    set_user_from_auth :active_trustedclient\n\n    ob = Collection.create\n    assert ob.save\n\n    # Ensure I have permission to manage this group even when its owner changes\n    perm_link = Link.create(tail_uuid: users(:active).uuid,\n                            head_uuid: ob.uuid,\n                            link_class: 'permission',\n                            name: 'can_manage')\n    assert perm_link.save, \"should give myself permission on my own object\"\n  end\n\n  test \"Delete permission links when deleting an object\" do\n    set_user_from_auth :active_trustedclient\n\n    ob = Collection.create!\n    Link.create!(tail_uuid: users(:active).uuid,\n                 head_uuid: ob.uuid,\n                 link_class: 'permission',\n                 name: 'can_manage')\n    ob_uuid = ob.uuid\n    assert ob.destroy, \"Could not destroy object with 1 permission link\"\n    assert_empty(Link.where(head_uuid: ob_uuid),\n                 \"Permission link was not deleted when object was deleted\")\n  end\n\n  test \"permission links owned by root\" do\n    set_user_from_auth :active_trustedclient\n    ob = Collection.create!\n    perm_link = Link.create!(tail_uuid: users(:active).uuid,\n                             head_uuid: ob.uuid,\n                             link_class: 'permission',\n                             name: 'can_read')\n    assert_equal system_user_uuid, perm_link.owner_uuid\n  end\n\n  test \"readable_by\" do\n    set_user_from_auth :admin\n\n    ob = Collection.create!\n    Link.create!(tail_uuid: users(:active).uuid,\n                 head_uuid: ob.uuid,\n                 link_class: 'permission',\n                 name: 'can_read')\n    assert Collection.readable_by(users(:active)).where(uuid: ob.uuid).any?, \"user does not have read permission\"\n  end\n\n  test \"writable_by\" do\n    set_user_from_auth :admin\n\n    ob = Collection.create!\n    Link.create!(tail_uuid: users(:active).uuid,\n                 head_uuid: ob.uuid,\n                 link_class: 'permission',\n                 name: 'can_write')\n    assert ob.writable_by.include?(users(:active).uuid), \"user does not have write permission\"\n  end\n\n  test \"update permission link\" do\n    set_user_from_auth :admin\n\n    grp = Group.create! name: \"blah project\", group_class: \"project\"\n    ob = Collection.create! owner_uuid: grp.uuid\n\n    assert !users(:active).can?(write: ob)\n    assert !users(:active).can?(read: ob)\n\n    l1 = Link.create!(tail_uuid: users(:active).uuid,\n                 head_uuid: grp.uuid,\n                 link_class: 'permission',\n                 name: 'can_write')\n\n    assert users(:active).can?(write: ob)\n    assert users(:active).can?(read: ob)\n\n    l1.update!(name: 'can_read')\n\n    assert !users(:active).can?(write: ob)\n    assert users(:active).can?(read: ob)\n\n    l1.destroy\n\n    assert !users(:active).can?(write: ob)\n    assert !users(:active).can?(read: ob)\n  end\n\n  test \"writable_by reports requesting user's own uuid for a writable project\" do\n    invited_to_write = users(:project_viewer)\n    group = groups(:asubproject)\n\n    # project_view can read, but cannot see write or see writers list\n    set_user_from_auth :project_viewer\n    assert_equal([group.owner_uuid],\n                 group.writable_by,\n                 \"writers list should just have owner_uuid\")\n\n    # allow project_viewer to write for the remainder of the test\n    set_user_from_auth :admin\n    Link.create!(tail_uuid: invited_to_write.uuid,\n                 head_uuid: group.uuid,\n                 link_class: 'permission',\n                 name: 'can_write')\n    group.permissions.reload\n\n    # project_viewer should see self in writers list (but not all writers)\n    set_user_from_auth :project_viewer\n    assert_not_nil(group.writable_by,\n                    \"can write but cannot see writers list\")\n    assert_includes(group.writable_by, invited_to_write.uuid,\n                    \"self missing from writers list\")\n    assert_includes(group.writable_by, group.owner_uuid,\n                    \"project owner missing from writers list\")\n    refute_includes(group.writable_by, users(:active).uuid,\n                    \"saw :active user in writers list\")\n\n    # active user should see full writers list\n    set_user_from_auth :active\n    assert_includes(group.writable_by, invited_to_write.uuid,\n                    \"permission just added, but missing from writers list\")\n\n    # allow project_viewer to manage for the remainder of the test\n    set_user_from_auth :admin\n    Link.create!(tail_uuid: invited_to_write.uuid,\n                 head_uuid: group.uuid,\n                 link_class: 'permission',\n                 name: 'can_manage')\n    # invite another writer we can test for\n    Link.create!(tail_uuid: users(:spectator).uuid,\n                 head_uuid: group.uuid,\n                 link_class: 'permission',\n                 name: 'can_write')\n    group.permissions.reload\n\n    set_user_from_auth :project_viewer\n    assert_not_nil(group.writable_by,\n                    \"can manage but cannot see writers list\")\n    assert_includes(group.writable_by, users(:spectator).uuid,\n                    \":spectator missing from writers list\")\n  end\n\n  test \"user owns group, group can_manage object's group, user can add permissions\" do\n    set_user_from_auth :admin\n\n    owner_grp = Group.create!(owner_uuid: users(:active).uuid, group_class: \"role\")\n\n    sp_grp = Group.create!(group_class: \"project\")\n\n    Link.create!(link_class: 'permission',\n                 name: 'can_manage',\n                 tail_uuid: owner_grp.uuid,\n                 head_uuid: sp_grp.uuid)\n\n    sp = Collection.create!(owner_uuid: sp_grp.uuid)\n\n    # active user owns owner_grp, which has can_manage permission on sp_grp\n    # user should be able to add permissions on sp.\n    set_user_from_auth :active_trustedclient\n    test_perm = Link.create(tail_uuid: users(:active).uuid,\n                            head_uuid: sp.uuid,\n                            link_class: 'permission',\n                            name: 'can_write')\n    assert test_perm.save, \"could not save new permission on target object\"\n    assert test_perm.destroy, \"could not delete new permission on target object\"\n  end\n\n  # bug #3091\n  skip \"can_manage permission on a non-group object\" do\n    set_user_from_auth :admin\n\n    ob = Collection.create!\n    # grant can_manage permission to active\n    perm_link = Link.create!(tail_uuid: users(:active).uuid,\n                             head_uuid: ob.uuid,\n                             link_class: 'permission',\n                             name: 'can_manage')\n    # ob is owned by :admin, the link is owned by root\n    assert_equal users(:admin).uuid, ob.owner_uuid\n    assert_equal system_user_uuid, perm_link.owner_uuid\n\n    # user \"active\" can modify the permission link\n    set_user_from_auth :active_trustedclient\n    perm_link.properties[\"foo\"] = 'bar'\n    assert perm_link.save, \"could not save modified link\"\n\n    assert_equal 'bar', perm_link.properties['foo'], \"link properties do not include foo = bar\"\n  end\n\n  test \"user without can_manage permission may not modify permission link\" do\n    set_user_from_auth :admin\n\n    ob = Collection.create!\n    # grant can_manage permission to active\n    perm_link = Link.create!(tail_uuid: users(:active).uuid,\n                             head_uuid: ob.uuid,\n                             link_class: 'permission',\n                             name: 'can_read')\n    # ob is owned by :admin, the link is owned by root\n    assert_equal ob.owner_uuid, users(:admin).uuid\n    assert_equal perm_link.owner_uuid, system_user_uuid\n\n    # user \"active\" may not modify the permission link\n    set_user_from_auth :active_trustedclient\n    perm_link.name = 'can_manage'\n    assert_raises ArvadosModel::PermissionDeniedError do\n      perm_link.save\n    end\n  end\n\n  test \"manager user gets permission to minions' articles via can_manage link\" do\n    Rails.configuration.Users.RoleGroupsVisibleToAll = false\n    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false\n    manager = create :active_user, first_name: \"Manage\", last_name: \"Er\"\n    minion = create :active_user, first_name: \"Min\", last_name: \"Ion\"\n    minions_collection = act_as_user minion do\n      g = Group.create! name: \"minon project\", group_class: \"project\"\n      Collection.create! owner_uuid: g.uuid\n    end\n    # Manager creates a group. (Make sure it doesn't magically give\n    # anyone any additional permissions.)\n    g = nil\n    act_as_user manager do\n      g = create :group, name: \"NoBigSecret Lab\", group_class: \"role\"\n      assert_empty(User.readable_by(manager).where(uuid: minion.uuid),\n                   \"saw a user I shouldn't see\")\n      assert_raises(ArvadosModel::PermissionDeniedError,\n                    ActiveRecord::RecordInvalid,\n                    \"gave can_read permission to a user I shouldn't see\") do\n        create(:permission_link,\n               name: 'can_read', tail_uuid: minion.uuid, head_uuid: g.uuid)\n      end\n      %w(can_manage can_write can_read).each do |perm_type|\n        assert_raises(ArvadosModel::PermissionDeniedError,\n                      ActiveRecord::RecordInvalid,\n                      \"escalated privileges\") do\n          create(:permission_link,\n                 name: perm_type, tail_uuid: g.uuid, head_uuid: minion.uuid)\n        end\n      end\n      assert_empty(User.readable_by(manager).where(uuid: minion.uuid),\n                   \"manager saw minion too soon\")\n      assert_empty(User.readable_by(minion).where(uuid: manager.uuid),\n                   \"minion saw manager too soon\")\n      assert_empty(Group.readable_by(minion).where(uuid: g.uuid),\n                   \"minion saw manager's new NoBigSecret Lab group too soon\")\n\n      # Manager declares everybody on the system should be able to see\n      # the NoBigSecret Lab group.\n      create(:permission_link,\n             name: 'can_read',\n             tail_uuid: 'zzzzz-j7d0g-fffffffffffffff',\n             head_uuid: g.uuid)\n      # ...but nobody has joined the group yet. Manager still can't see\n      # minion.\n      assert_empty(User.readable_by(manager).where(uuid: minion.uuid),\n                   \"manager saw minion too soon\")\n    end\n\n    act_as_user minion do\n      # Minion can see the group.\n      assert_not_empty(Group.readable_by(minion).where(uuid: g.uuid),\n                       \"minion could not see the NoBigSecret Lab group\")\n      # Minion joins the group.\n      create(:permission_link,\n             name: 'can_read',\n             tail_uuid: g.uuid,\n             head_uuid: minion.uuid)\n    end\n\n    act_as_user manager do\n      # Now, manager can see minion.\n      assert_not_empty(User.readable_by(manager).where(uuid: minion.uuid),\n                       \"manager could not see minion\")\n      # But cannot obtain further privileges this way.\n      assert_raises(ArvadosModel::PermissionDeniedError,\n                    \"escalated privileges\") do\n        create(:permission_link,\n               name: 'can_manage', tail_uuid: manager.uuid, head_uuid: minion.uuid)\n      end\n      assert_empty(Collection\n                     .readable_by(manager)\n                     .where(uuid: minions_collection.uuid),\n                   \"manager saw the minion's private stuff\")\n      assert_raises(ArvadosModel::PermissionDeniedError,\n                   \"manager could update minion's private stuff\") do\n        minions_collection.update(properties: {'x' => 'y'})\n      end\n    end\n\n    act_as_system_user do\n      # Root can give Manager more privileges over Minion.\n      create(:permission_link,\n             name: 'can_manage', tail_uuid: g.uuid, head_uuid: minion.uuid)\n    end\n\n    act_as_user manager do\n      # Now, manager can read and write Minion's stuff.\n      assert_not_empty(Collection\n                         .readable_by(manager)\n                         .where(uuid: minions_collection.uuid),\n                       \"manager could not find minion's collection by uuid\")\n      assert_equal(true,\n                   minions_collection.update(properties: {'x' => 'y'}),\n                   \"manager could not update minion's collection object\")\n    end\n  end\n\n  test \"users with bidirectional read permission in group can see each other, but cannot see each other's private articles\" do\n    Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = false\n    a = create :active_user, first_name: \"A\"\n    b = create :active_user, first_name: \"B\"\n    other = create :active_user, first_name: \"OTHER\"\n\n    assert_empty(User.readable_by(b).where(uuid: a.uuid),\n                     \"#{b.first_name} should not be able to see 'a' in the user list\")\n    assert_empty(User.readable_by(a).where(uuid: b.uuid),\n                     \"#{a.first_name} should not be able to see 'b' in the user list\")\n\n    act_as_system_user do\n      g = create :group, group_class: \"role\"\n      [a,b].each do |u|\n        create(:permission_link,\n               name: 'can_read', tail_uuid: u.uuid, head_uuid: g.uuid)\n        create(:permission_link,\n               name: 'can_read', head_uuid: u.uuid, tail_uuid: g.uuid)\n      end\n    end\n\n    assert_not_empty(User.readable_by(b).where(uuid: a.uuid),\n                     \"#{b.first_name} should be able to see 'a' in the user list\")\n    assert_not_empty(User.readable_by(a).where(uuid: b.uuid),\n                     \"#{a.first_name} should be able to see 'b' in the user list\")\n\n    a_collection = act_as_user a do\n      Collection.create!\n    end\n    assert_not_empty(Collection.readable_by(a).where(uuid: a_collection.uuid),\n                     \"A cannot read own Collection, following test probably useless.\")\n    assert_empty(Collection.readable_by(b).where(uuid: a_collection.uuid),\n                 \"B can read A's Collection\")\n    [a,b].each do |u|\n      assert_empty(User.readable_by(u).where(uuid: other.uuid),\n                   \"#{u.first_name} can see OTHER in the user list\")\n      assert_empty(User.readable_by(other).where(uuid: u.uuid),\n                   \"OTHER can see #{u.first_name} in the user list\")\n      act_as_user u do\n        assert_raises ArvadosModel::PermissionDeniedError, \"wrote without perm\" do\n          other.update!(prefs: {'pwned' => true})\n        end\n        assert_equal(true, u.update!(prefs: {'thisisme' => true}),\n                     \"#{u.first_name} can't update its own prefs\")\n      end\n      act_as_user other do\n        assert_raises(ArvadosModel::PermissionDeniedError,\n                        \"OTHER wrote #{u.first_name} without perm\") do\n          u.update!(prefs: {'pwned' => true})\n        end\n        assert_equal(true, other.update!(prefs: {'thisisme' => true}),\n                     \"OTHER can't update its own prefs\")\n      end\n    end\n  end\n\n  test \"cannot create with owner = unwritable user\" do\n    set_user_from_auth :rominiadmin\n    assert_raises ArvadosModel::PermissionDeniedError, \"created with owner = unwritable user\" do\n      Collection.create!(owner_uuid: users(:active).uuid)\n    end\n  end\n\n  test \"cannot change owner to unwritable user\" do\n    set_user_from_auth :rominiadmin\n    ob = Collection.create!\n    assert_raises ArvadosModel::PermissionDeniedError, \"changed owner to unwritable user\" do\n      ob.update!(owner_uuid: users(:active).uuid)\n    end\n  end\n\n  test \"cannot create with owner = unwritable group\" do\n    set_user_from_auth :rominiadmin\n    assert_raises ArvadosModel::PermissionDeniedError, \"created with owner = unwritable group\" do\n      Collection.create!(owner_uuid: groups(:aproject).uuid)\n    end\n  end\n\n  test \"cannot change owner to unwritable group\" do\n    set_user_from_auth :rominiadmin\n    ob = Collection.create!\n    assert_raises ArvadosModel::PermissionDeniedError, \"changed owner to unwritable group\" do\n      ob.update!(owner_uuid: groups(:aproject).uuid)\n    end\n  end\n\n  def container_logs(container, user)\n    Log.readable_by(users(user)).\n      where(object_uuid: containers(container).uuid, event_type: \"test\")\n  end\n\n  test \"container logs created by dispatch are visible to container requestor\" do\n    set_user_from_auth :system_user\n    Log.create!(object_uuid: containers(:running).uuid,\n                event_type: \"test\")\n\n    assert_not_empty container_logs(:running, :admin)\n    assert_not_empty container_logs(:running, :active)\n    assert_empty container_logs(:running, :spectator)\n  end\n\n  test \"container logs created by dispatch are public if container request is public\" do\n    set_user_from_auth :system_user\n    Log.create!(object_uuid: containers(:running_older).uuid,\n                event_type: \"test\")\n\n    assert_not_empty container_logs(:running_older, :anonymous)\n  end\n\n  test \"add user to group, then remove them\" do\n    set_user_from_auth :admin\n    grp = Group.create!(owner_uuid: system_user_uuid, group_class: \"role\")\n    col = Collection.create!(owner_uuid: system_user_uuid)\n\n    l0 = Link.create!(tail_uuid: grp.uuid,\n                 head_uuid: col.uuid,\n                 link_class: 'permission',\n                 name: 'can_read')\n\n    assert_empty Collection.readable_by(users(:active)).where(uuid: col.uuid)\n    assert_empty User.readable_by(users(:active)).where(uuid: users(:project_viewer).uuid)\n\n    l1 = Link.create!(tail_uuid: users(:active).uuid,\n                 head_uuid: grp.uuid,\n                 link_class: 'permission',\n                 name: 'can_read')\n    l2 = Link.create!(tail_uuid: grp.uuid,\n                 head_uuid: users(:active).uuid,\n                 link_class: 'permission',\n                 name: 'can_read')\n\n    l3 = Link.create!(tail_uuid: users(:project_viewer).uuid,\n                 head_uuid: grp.uuid,\n                 link_class: 'permission',\n                 name: 'can_read')\n    l4 = Link.create!(tail_uuid: grp.uuid,\n                 head_uuid: users(:project_viewer).uuid,\n                 link_class: 'permission',\n                 name: 'can_read')\n\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).first\n    assert User.readable_by(users(:active)).where(uuid: users(:project_viewer).uuid).first\n\n    l1.destroy\n    l2.destroy\n\n    assert_empty Collection.readable_by(users(:active)).where(uuid: col.uuid)\n    assert_empty User.readable_by(users(:active)).where(uuid: users(:project_viewer).uuid)\n\n  end\n\n\n  test \"add user to group, then change permission level\" do\n    set_user_from_auth :admin\n    grp = Group.create!(owner_uuid: system_user_uuid, group_class: \"project\")\n    col = Collection.create!(owner_uuid: grp.uuid)\n    assert_empty Collection.readable_by(users(:active)).where(uuid: col.uuid)\n    assert_empty User.readable_by(users(:active)).where(uuid: users(:project_viewer).uuid)\n\n    l1 = Link.create!(tail_uuid: users(:active).uuid,\n                 head_uuid: grp.uuid,\n                 link_class: 'permission',\n                 name: 'can_manage')\n\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).first\n    assert users(:active).can?(read: col.uuid)\n    assert users(:active).can?(write: col.uuid)\n    assert users(:active).can?(manage: col.uuid)\n\n    l1.name = 'can_read'\n    l1.save!\n\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).first\n    assert users(:active).can?(read: col.uuid)\n    assert !users(:active).can?(write: col.uuid)\n    assert !users(:active).can?(manage: col.uuid)\n\n    l1.name = 'can_write'\n    l1.save!\n\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).first\n    assert users(:active).can?(read: col.uuid)\n    assert users(:active).can?(write: col.uuid)\n    assert !users(:active).can?(manage: col.uuid)\n  end\n\n\n  test \"add user to group, then add overlapping permission link to group\" do\n    set_user_from_auth :admin\n    grp = Group.create!(owner_uuid: system_user_uuid, group_class: \"project\")\n    col = Collection.create!(owner_uuid: grp.uuid)\n    assert_empty Collection.readable_by(users(:active)).where(uuid: col.uuid)\n    assert_empty User.readable_by(users(:active)).where(uuid: users(:project_viewer).uuid)\n\n    l1 = Link.create!(tail_uuid: users(:active).uuid,\n                 head_uuid: grp.uuid,\n                 link_class: 'permission',\n                 name: 'can_manage')\n\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).first\n    assert users(:active).can?(read: col.uuid)\n    assert users(:active).can?(write: col.uuid)\n    assert users(:active).can?(manage: col.uuid)\n\n    l3 = Link.create!(tail_uuid: users(:active).uuid,\n                 head_uuid: grp.uuid,\n                 link_class: 'permission',\n                 name: 'can_read')\n\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).first\n    assert users(:active).can?(read: col.uuid)\n    assert users(:active).can?(write: col.uuid)\n    assert users(:active).can?(manage: col.uuid)\n\n    # Creating l3 should have automatically deleted l1 and upgraded to\n    # the max permission of {l1, l3}, i.e., can_manage (see #18693) so\n    # there should be no can_read link now.\n    refute Link.where(tail_uuid: l3.tail_uuid,\n                      head_uuid: l3.head_uuid,\n                      link_class: 'permission',\n                      name: 'can_read').any?\n\n    assert Collection.readable_by(users(:active)).where(uuid: col.uuid).first\n    assert users(:active).can?(read: col.uuid)\n    assert users(:active).can?(write: col.uuid)\n    assert users(:active).can?(manage: col.uuid)\n  end\n\n\n  test \"add user to group, then add overlapping permission link to subproject\" do\n    set_user_from_auth :admin\n    grp = Group.create!(owner_uuid: system_user_uuid, group_class: \"role\")\n    prj = Group.create!(owner_uuid: system_user_uuid, group_class: \"project\")\n\n    l0 = Link.create!(tail_uuid: grp.uuid,\n                 head_uuid: prj.uuid,\n                 link_class: 'permission',\n                 name: 'can_manage')\n\n    assert_empty Group.readable_by(users(:active)).where(uuid: prj.uuid)\n    assert_empty User.readable_by(users(:active)).where(uuid: users(:project_viewer).uuid)\n\n    l1 = Link.create!(tail_uuid: users(:active).uuid,\n                 head_uuid: grp.uuid,\n                 link_class: 'permission',\n                 name: 'can_manage')\n    l2 = Link.create!(tail_uuid: grp.uuid,\n                 head_uuid: users(:active).uuid,\n                 link_class: 'permission',\n                 name: 'can_read')\n\n    assert Group.readable_by(users(:active)).where(uuid: prj.uuid).first\n    assert users(:active).can?(read: prj.uuid)\n    assert users(:active).can?(write: prj.uuid)\n    assert users(:active).can?(manage: prj.uuid)\n\n    l3 = Link.create!(tail_uuid: grp.uuid,\n                 head_uuid: prj.uuid,\n                 link_class: 'permission',\n                 name: 'can_read')\n\n    assert Group.readable_by(users(:active)).where(uuid: prj.uuid).first\n    assert users(:active).can?(read: prj.uuid)\n    assert users(:active).can?(write: prj.uuid)\n    assert users(:active).can?(manage: prj.uuid)\n\n    # Creating l3 should have automatically deleted l0 and upgraded to\n    # the max permission of {l0, l3}, i.e., can_manage (see #18693) so\n    # there should be no can_read link now.\n    refute Link.where(tail_uuid: l3.tail_uuid,\n                      head_uuid: l3.head_uuid,\n                      link_class: 'permission',\n                      name: 'can_read').any?\n\n    assert Group.readable_by(users(:active)).where(uuid: prj.uuid).first\n    assert users(:active).can?(read: prj.uuid)\n    assert users(:active).can?(write: prj.uuid)\n    assert users(:active).can?(manage: prj.uuid)\n  end\n\n  [system_user_uuid, anonymous_user_uuid].each do |u|\n    test \"cannot delete system user #{u}\" do\n      act_as_system_user do\n        assert_raises ArvadosModel::PermissionDeniedError do\n          User.find_by_uuid(u).destroy\n        end\n      end\n    end\n  end\n\n  [system_group_uuid, anonymous_group_uuid, public_project_uuid].each do |g|\n    test \"cannot delete system group #{g}\" do\n      act_as_system_user do\n        assert_raises ArvadosModel::PermissionDeniedError do\n          Group.find_by_uuid(g).destroy\n        end\n      end\n    end\n  end\n\n  # Show query plan for readable_by query. The plan for a test db\n  # might not resemble the plan for a production db, but it doesn't\n  # hurt to show the test db plan in test logs, and the .\n  [false, true].each do |include_trash|\n    test \"query plan, include_trash=#{include_trash}\" do\n      sql = Collection.readable_by(users(:active), include_trash: include_trash).to_sql\n      sql = \"explain analyze #{sql}\"\n      STDERR.puts sql\n      q = ActiveRecord::Base.connection.exec_query(sql)\n      q.rows.each do |row| STDERR.puts(row) end\n    end\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/salvage_collection_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'salvage_collection'\nrequire 'shellwords'\n\n# Valid manifest_text\nTEST_MANIFEST = \". 341dabea2bd78ad0d6fc3f5b926b450e+85626+Ad391622a17f61e4a254eda85d1ca751c4f368da9@55e076ce 0:85626:brca2-hg19.fa\\n. d7321a918923627c972d8f8080c07d29+82570+A22e0a1d9b9bc85c848379d98bedc64238b0b1532@55e076ce 0:82570:brca1-hg19.fa\\n\"\nTEST_MANIFEST_STRIPPED = \". 341dabea2bd78ad0d6fc3f5b926b450e+85626 0:85626:brca2-hg19.fa\\n. d7321a918923627c972d8f8080c07d29+82570 0:82570:brca1-hg19.fa\\n\"\n\n# This invalid manifest_text has the following flaws:\n#   Missing stream name with locator in it's place\n#   Invalid locators:\n#     foofaafaafaabd78ad0d6fc3f5b926b450e+foo\n#     bar-baabaabaabd78ad0d6fc3f5b926b450e\n#     bad12345dae58ad0d6fc3f5b926b450e+\n#     341dabea2bd78ad0d6fc3f5b926b450e+abc\n#     341dabea2bd78ad0d6fc3f5b926abcdf\n# Expectation: All these locators are preserved in salvaged_data\nBAD_MANIFEST = \"faafaafaabd78ad0d6fc3f5b926b450e+foo bar-baabaabaabd78ad0d6fc3f5b926b450e_bad12345dae58ad0d6fc3f5b926b450e+ 341dabea2bd78ad0d6fc3f5b926b450e+abc 341dabea2bd78ad0d6fc3f5b926abcdf 0:85626:brca2-hg19.fa\\n. abcdabea2bd78ad0d6fc3f5b926b450e+1000 0:1000:brca-hg19.fa\\n. d7321a918923627c972d8f8080c07d29+2000+A22e0a1d9b9bc85c848379d98bedc64238b0b1532@55e076ce 0:2000:brca1-hg19.fa\\n\"\n\nclass SalvageCollectionTest < ActiveSupport::TestCase\n  include SalvageCollection\n\n  setup do\n    set_user_from_auth :admin\n    # arv-put needs ARV env variables\n    ENV['ARVADOS_API_HOST'] = 'unused_by_test'\n    ENV['ARVADOS_API_TOKEN'] = 'unused_by_test'\n    @backtick_mock_failure = false\n  end\n\n  teardown do\n    ENV['ARVADOS_API_HOST'] = ''\n    ENV['ARVADOS_API_TOKEN'] = ''\n  end\n\n  def ` cmd # mock Kernel `\n    assert_equal 'arv-put', cmd.shellsplit[0]\n    if @backtick_mock_failure\n      # run a process so $? indicates failure\n      return super 'false'\n    end\n    # run a process so $? indicates success\n    super 'true'\n    file_contents = File.open(cmd.shellsplit[-1], \"r\").read\n    \". \" +\n      Digest::MD5.hexdigest(file_contents) + \"+\" + file_contents.length.to_s +\n      \" 0:\" + file_contents.length.to_s + \":invalid_manifest_text.txt\\n\"\n  end\n\n  test \"salvage test collection with valid manifest text\" do\n    # create a collection to test salvaging\n    src_collection = Collection.new name: \"test collection\", manifest_text: TEST_MANIFEST\n    src_collection.save!\n\n    # salvage this collection\n    salvage_collection src_collection.uuid, 'test salvage collection - see #6277, #6859'\n\n    # verify the updated src_collection data\n    updated_src_collection = Collection.find_by_uuid src_collection.uuid\n    updated_name = updated_src_collection.name\n    assert_equal true, updated_name.include?(src_collection.name)\n\n    match = updated_name.match(/^test collection.*salvaged data at (.*)\\)$/)\n    assert_not_nil match\n    assert_not_nil match[1]\n    assert_empty updated_src_collection.manifest_text\n\n    # match[1] is the uuid of the new collection created from src_collection's salvaged data\n    # use this to get the new collection and verify\n    new_collection = Collection.find_by_uuid match[1]\n    match = new_collection.name.match(/^salvaged from (.*),.*/)\n    assert_not_nil match\n    assert_equal src_collection.uuid, match[1]\n\n    # verify the new collection's manifest format\n    expected_manifest = \". \" + Digest::MD5.hexdigest(TEST_MANIFEST_STRIPPED) + \"+\" +\n      TEST_MANIFEST_STRIPPED.length.to_s + \" 0:\" + TEST_MANIFEST_STRIPPED.length.to_s +\n      \":invalid_manifest_text.txt\\n. 341dabea2bd78ad0d6fc3f5b926b450e+85626 d7321a918923627c972d8f8080c07d29+82570 0:168196:salvaged_data\\n\"\n    assert_equal expected_manifest, new_collection.manifest_text\n  end\n\n  test \"salvage collection with no uuid required argument\" do\n    assert_raises RuntimeError do\n      salvage_collection nil\n    end\n  end\n\n  test \"salvage collection with bogus uuid\" do\n    e = assert_raises RuntimeError do\n      salvage_collection 'bogus-uuid'\n    end\n    assert_equal \"No collection found for bogus-uuid.\", e.message\n  end\n\n  test \"salvage collection with no env ARVADOS_API_HOST\" do\n    e = assert_raises RuntimeError do\n      ENV['ARVADOS_API_HOST'] = ''\n      ENV['ARVADOS_API_TOKEN'] = ''\n      salvage_collection collections('user_agreement').uuid\n    end\n    assert_equal \"ARVADOS environment variables missing. Please set your admin user credentials as ARVADOS environment variables.\", e.message\n  end\n\n  test \"salvage collection with error during arv-put\" do\n    # try to salvage collection while mimicking error during arv-put\n    @backtick_mock_failure = true\n    e = assert_raises RuntimeError do\n      salvage_collection collections('user_agreement').uuid\n    end\n    assert_match(/Error during arv-put: pid \\d+ exit \\d+ \\(cmd was \\\"arv-put .*\\\"\\)/, e.message)\n  end\n\n  # This test uses BAD_MANIFEST, which has the following flaws:\n  #   Missing stream name with locator in it's place\n  #   Invalid locators:\n  #     foo-faafaafaabd78ad0d6fc3f5b926b450e+foo\n  #     bar-baabaabaabd78ad0d6fc3f5b926b450e\n  #     bad12345dae58ad0d6fc3f5b926b450e+\n  #     341dabea2bd78ad0d6fc3f5b926b450e+abc\n  #     341dabea2bd78ad0d6fc3f5b926abcdf\n  # Expectation: All these locators are preserved in salvaged_data\n  test \"invalid locators preserved during salvaging\" do\n    locator_data = salvage_collection_locator_data BAD_MANIFEST\n    assert_equal \\\n    [\"faafaafaabd78ad0d6fc3f5b926b450e\",\n     \"baabaabaabd78ad0d6fc3f5b926b450e\",\n     \"bad12345dae58ad0d6fc3f5b926b450e\",\n     \"341dabea2bd78ad0d6fc3f5b926b450e\",\n     \"341dabea2bd78ad0d6fc3f5b926abcdf\",\n     \"abcdabea2bd78ad0d6fc3f5b926b450e+1000\",\n     \"d7321a918923627c972d8f8080c07d29+2000\",\n    ], locator_data[0]\n    assert_equal 1000+2000, locator_data[1]\n  end\n\n  test \"salvage a collection with invalid manifest text\" do\n    # create a collection to test salvaging\n    src_collection = Collection.new name: \"test collection\", manifest_text: BAD_MANIFEST, owner_uuid: 'zzzzz-tpzed-000000000000000'\n    src_collection.save!(validate: false)\n\n    # salvage this collection\n    salvage_collection src_collection.uuid, 'test salvage collection - see #6277, #6859'\n\n    # verify the updated src_collection data\n    updated_src_collection = Collection.find_by_uuid src_collection.uuid\n    updated_name = updated_src_collection.name\n    assert_equal true, updated_name.include?(src_collection.name)\n\n    match = updated_name.match(/^test collection.*salvaged data at (.*)\\)$/)\n    assert_not_nil match\n    assert_not_nil match[1]\n    assert_empty updated_src_collection.manifest_text\n\n    # match[1] is the uuid of the new collection created from src_collection's salvaged data\n    # use this to get the new collection and verify\n    new_collection = Collection.find_by_uuid match[1]\n    match = new_collection.name.match(/^salvaged from (.*),.*/)\n    assert_not_nil match\n    assert_equal src_collection.uuid, match[1]\n    # verify the new collection's manifest includes the bad locators\n    expected_manifest = \". \" + Digest::MD5.hexdigest(BAD_MANIFEST) + \"+\" + BAD_MANIFEST.length.to_s +\n      \" 0:\" + BAD_MANIFEST.length.to_s + \":invalid_manifest_text.txt\\n. faafaafaabd78ad0d6fc3f5b926b450e baabaabaabd78ad0d6fc3f5b926b450e bad12345dae58ad0d6fc3f5b926b450e 341dabea2bd78ad0d6fc3f5b926b450e 341dabea2bd78ad0d6fc3f5b926abcdf abcdabea2bd78ad0d6fc3f5b926b450e+1000 d7321a918923627c972d8f8080c07d29+2000 0:3000:salvaged_data\\n\"\n    assert_equal expected_manifest, new_collection.manifest_text\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/seralizer_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\nrequire 'serializers'\n\nclass SerializerTest < ActiveSupport::TestCase\n  test 'serialize' do\n    assert_equal('{}', HashSerializer.dump({}))\n    assert_equal('{\"foo\":\"bar\"}', HashSerializer.dump(foo: 'bar'))\n    assert_equal('{\"foo\":\"bar\"}', HashSerializer.dump('foo' => 'bar'))\n    assert_equal('[]', ArraySerializer.dump([]))\n    assert_equal('[\"foo\",{\"foo\":\"bar\"}]',\n                 ArraySerializer.dump(['foo', 'foo' => 'bar']))\n    assert_equal(['foo'],\n                 ArraySerializer.load(ArraySerializer.dump([:foo])))\n    assert_equal([1,'bar'],\n                 ArraySerializer.load(ArraySerializer.dump([1,'bar'])))\n  end\n\n  test 'load array that was saved as json, then mangled by an old version' do\n    assert_equal(['foo'],\n                 ArraySerializer.load(YAML.dump(ArraySerializer.dump(['foo']))))\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/time_zone_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass TimeZoneTest < ActiveSupport::TestCase\n  test \"Database connection time zone\" do\n    # This is pointless if the testing host is already using the UTC\n    # time zone.  But if not, the test confirms that\n    # config/initializers/time_zone.rb has successfully changed the\n    # database connection time zone to UTC.\n    assert_equal('UTC', ActiveRecord::Base.connection.select_value(\"show timezone\"))\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/user_notifier_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass UserNotifierTest < ActionMailer::TestCase\n\n  # Send the email, then test that it got queued\n  test \"account is setup\" do\n    user = users :active\n\n    Rails.configuration.Users.UserNotifierEmailBcc = ConfigLoader.to_OrderedOptions({\"bcc-notify@example.com\"=>{},\"bcc-notify2@example.com\"=>{}})\n    Rails.configuration.Users.UserSetupMailText = %{\n<% if not @user.full_name.empty? -%>\n<%= @user.full_name %>,\n<% else -%>\nHi there,\n<% end -%>\n\nYour Arvados shell account has been set up. Please visit the virtual machines page <% if Rails.configuration.Services.Workbench1.ExternalURL %>at\n\n<%= Rails.configuration.Services.Workbench1.ExternalURL %><%= \"/\" if !Rails.configuration.Services.Workbench1.ExternalURL.to_s.end_with?(\"/\") %>users/<%= @user.uuid%>/virtual_machines <% else %><% end %>\n\nfor connection instructions.\n\nThanks,\nThe Arvados team.\n}\n\n    email = UserNotifier.account_is_setup user\n\n    assert_not_nil email\n\n    # Test the body of the sent email contains what we expect it to\n    assert_equal Rails.configuration.Users.UserNotifierEmailFrom, email.from.first\n    assert_equal Rails.configuration.Users.UserNotifierEmailBcc.stringify_keys.keys, email.bcc\n    assert_equal user.email, email.to.first\n    assert_equal 'Welcome to Arvados - account enabled', email.subject\n    assert (email.body.to_s.include? 'Your Arvados shell account has been set up'),\n        'Expected Your Arvados shell account has been set up in email body'\n    assert (email.body.to_s.include? Rails.configuration.Services.Workbench1.ExternalURL.to_s),\n        'Expected workbench url in email body'\n  end\n\nend\n"
  },
  {
    "path": "services/api/test/unit/user_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass UserTest < ActiveSupport::TestCase\n  include CurrentApiClient\n\n  # The fixture services/api/test/fixtures/users.yml serves as the input for this test case\n  setup do\n    # Make sure system_user exists before making \"pre-test users\" list\n    system_user\n  end\n\n  %w(a aa a0 aA Aa AA A0).each do |username|\n    test \"#{username.inspect} is a valid username\" do\n      user = User.new(username: username)\n      assert(user.valid?)\n    end\n  end\n\n  test \"username is not required\" do\n    user = User.new(username: nil)\n    assert(user.valid?)\n  end\n\n  test \"username beginning with numeral is invalid\" do\n    user = User.new(username: \"0a\")\n    refute(user.valid?)\n  end\n\n  \"\\\\.-_/!@#$%^&*()[]{}\".each_char do |bad_char|\n    test \"username containing #{bad_char.inspect} is invalid\" do\n      user = User.new(username: \"bad#{bad_char}username\")\n      refute(user.valid?)\n    end\n  end\n\n  test \"username must be unique\" do\n    user = User.new(username: users(:active).username)\n    refute(user.valid?)\n  end\n\n  test \"non-admin can't update username\" do\n    set_user_from_auth :rominiadmin\n    user = User.find_by_uuid(users(:rominiadmin).uuid)\n    user.username = \"selfupdate\"\n    assert_not_allowed { user.save }\n  end\n\n  def check_admin_username_change(fixture_name)\n    set_user_from_auth :admin_trustedclient\n    user = User.find_by_uuid(users(fixture_name).uuid)\n    user.username = \"newnamefromtest\"\n    assert(user.save)\n  end\n\n  test \"admin can set username\" do\n    check_admin_username_change(:active_no_prefs)\n  end\n\n  test \"admin can update username\" do\n    check_admin_username_change(:active)\n  end\n\n  test \"admin can update own username\" do\n    check_admin_username_change(:admin)\n  end\n\n  def check_new_username_setting(email_name, expect_name)\n    set_user_from_auth :admin\n    user = User.create!(email: \"#{email_name}@example.org\")\n    assert_equal(expect_name, user.username)\n  end\n\n  test \"new username set from e-mail\" do\n    check_new_username_setting(\"dakota\", \"dakota\")\n  end\n\n  test \"new username set from e-mail with leading digits\" do\n    check_new_username_setting(\"1dakota9\", \"dakota9\")\n  end\n\n  test \"new username set from e-mail with punctuation\" do\n    check_new_username_setting(\"dakota.9\", \"dakota9\")\n  end\n\n  test \"new username set from e-mail with leading digits and punctuation\" do\n    check_new_username_setting(\"1.dakota.z\", \"dakotaz\")\n  end\n\n  test \"new username set from e-mail with extra part\" do\n    check_new_username_setting(\"dakota+arvados\", \"dakota\")\n  end\n\n  test \"new username set with deduplication\" do\n    name = users(:active).username\n    check_new_username_setting(name, \"#{name}2\")\n    check_new_username_setting(name, \"#{name}3\")\n    # Insert some out-of-order conflicts, to ensure our \"sort by\n    # username, stop when we see a hole\" strategy doesn't depend on\n    # insert order.\n    check_new_username_setting(\"#{name}13\", \"#{name}13\")\n    check_new_username_setting(\"#{name}5\", \"#{name}5\")\n    check_new_username_setting(name, \"#{name}4\")\n    6.upto(12).each do |n|\n      check_new_username_setting(name, \"#{name}#{n}\")\n    end\n  end\n\n  test \"new username set avoiding blacklist\" do\n    Rails.configuration.Users.AutoSetupUsernameBlacklist = ConfigLoader.to_OrderedOptions({\"root\"=>{}})\n    check_new_username_setting(\"root\", \"root2\")\n  end\n\n  test \"no username set when no base available\" do\n    check_new_username_setting(\"_\", nil)\n  end\n\n  test \"admin can clear username\" do\n    set_user_from_auth :admin\n    user = users(:spectator)\n    user.username = nil\n    assert(user.save)\n    assert_nil(user.username)\n  end\n\n  [[false, 'foo@example.com', true, false],\n   [false, 'bar@example.com', false, true],\n   [true, 'foo@example.com', true, false],\n   [true, 'bar@example.com', true, true],\n   [false, '', false, false],\n   [true, '', true, false]\n  ].each do |auto_admin_first_user_config, auto_admin_user_config, foo_should_be_admin, bar_should_be_admin|\n    # In each case, 'foo' is created first, then 'bar', then 'bar2', then 'baz'.\n    test \"auto admin with auto_admin_first=#{auto_admin_first_user_config} auto_admin=#{auto_admin_user_config}\" do\n\n      if auto_admin_first_user_config\n        # This test requires no admin users exist (except for the system user)\n        act_as_system_user do\n          users(:admin).update!(is_admin: false)\n        end\n        @all_users = User.where(\"uuid not like '%-000000000000000'\").where(:is_admin => true)\n        assert_equal 0, @all_users.count, \"No admin users should exist (except for the system user)\"\n      end\n\n      Rails.configuration.Users.AutoAdminFirstUser = auto_admin_first_user_config\n      Rails.configuration.Users.AutoAdminUserWithEmail = auto_admin_user_config\n\n      # See if the foo user has is_admin\n      foo = User.new\n      foo.first_name = 'foo'\n      foo.email = 'foo@example.com'\n\n      act_as_system_user do\n        foo.save!\n      end\n\n      foo = User.find(foo.id)   # get the user back\n      assert_equal foo_should_be_admin, foo.is_admin, \"is_admin is wrong for user foo\"\n      assert_equal 'foo', foo.first_name\n\n      # See if the bar user has is_admin\n      bar = User.new\n      bar.first_name = 'bar'\n      bar.email = 'bar@example.com'\n\n      act_as_system_user do\n        bar.save!\n      end\n\n      bar = User.find(bar.id)   # get the user back\n      assert_equal bar_should_be_admin, bar.is_admin, \"is_admin is wrong for user bar\"\n      assert_equal 'bar', bar.first_name\n\n      # A subsequent user with the bar@example.com address should never be\n      # elevated to admin\n      bar2 = User.new\n      bar2.first_name = 'bar2'\n      bar2.email = 'bar@example.com'\n\n      act_as_system_user do\n        bar2.save!\n      end\n\n      bar2 = User.find(bar2.id)   # get the user back\n      assert !bar2.is_admin, \"is_admin is wrong for user bar2\"\n      assert_equal 'bar2', bar2.first_name\n\n      # An ordinary new user should not be elevated to admin\n      baz = User.new\n      baz.first_name = 'baz'\n      baz.email = 'baz@example.com'\n\n      act_as_system_user do\n        baz.save!\n      end\n\n      baz = User.find(baz.id)   # get the user back\n      assert !baz.is_admin\n      assert_equal 'baz', baz.first_name\n\n    end\n  end\n\n  test \"check non-admin active user properties\" do\n    @active_user = users(:active)     # get the active user\n    assert !@active_user.is_admin, 'is_admin should not be set for a non-admin user'\n    assert @active_user.is_active, 'user should be active'\n    assert @active_user.is_invited, 'is_invited should be set'\n    assert_not_nil @active_user.prefs, \"user's preferences should be non-null, but may be size zero\"\n    assert (@active_user.can? :read=>\"#{@active_user.uuid}\"), \"user should be able to read own object\"\n    assert (@active_user.can? :write=>\"#{@active_user.uuid}\"), \"user should be able to write own object\"\n    assert (@active_user.can? :manage=>\"#{@active_user.uuid}\"), \"user should be able to manage own object\"\n\n    assert @active_user.groups_i_can(:read).size > 0, \"active user should be able read at least one group\"\n\n    # non-admin user cannot manage or write other user objects\n    @uninvited_user = users(:inactive_uninvited)     # get the uninvited user\n    assert !(@active_user.can? :read=>\"#{@uninvited_user.uuid}\")\n    assert !(@active_user.can? :write=>\"#{@uninvited_user.uuid}\")\n    assert !(@active_user.can? :manage=>\"#{@uninvited_user.uuid}\")\n  end\n\n  test \"check admin user properties\" do\n    @admin_user = users(:admin)     # get the admin user\n    assert @admin_user.is_admin, 'is_admin should be set for admin user'\n    assert @admin_user.is_active, 'admin user cannot be inactive'\n    assert @admin_user.is_invited, 'is_invited should be set'\n    assert_not_nil @admin_user.uuid.size, \"user's uuid should be non-null\"\n    assert_not_nil @admin_user.prefs, \"user's preferences should be non-null, but may be size zero\"\n    assert @admin_user.identity_url.size > 0, \"user's identity url is expected\"\n    assert @admin_user.can? :read=>\"#{@admin_user.uuid}\"\n    assert @admin_user.can? :write=>\"#{@admin_user.uuid}\"\n    assert @admin_user.can? :manage=>\"#{@admin_user.uuid}\"\n\n    assert @admin_user.groups_i_can(:read).size > 0, \"admin active user should be able read at least one group\"\n    assert @admin_user.groups_i_can(:write).size > 0, \"admin active user should be able write to at least one group\"\n    assert @admin_user.groups_i_can(:manage).size > 0, \"admin active user should be able manage at least one group\"\n\n    # admin user can also write or manage other users\n    @uninvited_user = users(:inactive_uninvited)     # get the uninvited user\n    assert @admin_user.can? :read=>\"#{@uninvited_user.uuid}\"\n    assert @admin_user.can? :write=>\"#{@uninvited_user.uuid}\"\n    assert @admin_user.can? :manage=>\"#{@uninvited_user.uuid}\"\n  end\n\n  test \"check inactive and uninvited user properties\" do\n    @uninvited_user = users(:inactive_uninvited)     # get the uninvited user\n    assert !@uninvited_user.is_admin, 'is_admin should not be set for a non-admin user'\n    assert !@uninvited_user.is_active, 'user should be inactive'\n    assert !@uninvited_user.is_invited, 'is_invited should not be set'\n    assert @uninvited_user.can? :read=>\"#{@uninvited_user.uuid}\"\n    assert @uninvited_user.can? :write=>\"#{@uninvited_user.uuid}\"\n    assert @uninvited_user.can? :manage=>\"#{@uninvited_user.uuid}\"\n\n    assert_equal(@uninvited_user.groups_i_can(:read).sort,\n                 [@uninvited_user.uuid, groups(:anonymous_group).uuid].sort)\n    assert_equal(@uninvited_user.groups_i_can(:write),\n                 [@uninvited_user.uuid])\n    assert_equal(@uninvited_user.groups_i_can(:manage),\n                 [@uninvited_user.uuid])\n  end\n\n  test \"find user method checks\" do\n    User.all.each do |user|\n      assert_not_nil user.uuid, \"non-null uuid expected for \" + user.full_name\n    end\n\n    user = users(:active)     # get the active user\n\n    found_user = User.find(user.id)   # find a user by the row id\n\n    assert_equal found_user.full_name, user.first_name + ' ' + user.last_name\n    assert_equal found_user.identity_url, user.identity_url\n  end\n\n  test \"full name should not contain spurious whitespace\" do\n    set_user_from_auth :admin\n\n    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: 'foo@example.com' })\n\n    assert_equal '', user.full_name\n\n    user.first_name = 'John'\n    user.last_name = 'Smith'\n\n    assert_equal user.first_name + ' ' + user.last_name, user.full_name\n  end\n\n  test \"create new user\" do\n    set_user_from_auth :admin\n\n    @all_users = User.all.to_a\n\n    user = User.new\n    user.first_name = \"first_name_for_newly_created_user\"\n    user.save\n\n    # verify there is one extra user in the db now\n    assert_equal @all_users.size+1, User.all.count\n\n    user = User.find(user.id)   # get the user back\n    assert_equal(user.first_name, 'first_name_for_newly_created_user')\n    assert_not_nil user.uuid, 'uuid should be set for newly created user'\n    assert_nil user.email, 'email should be null for newly created user, because it was not passed in'\n    assert_nil user.identity_url, 'identity_url should be null for newly created user, because it was not passed in'\n\n    user.first_name = 'first_name_for_newly_created_user_updated'\n    user.save\n    user = User.find(user.id)   # get the user back\n    assert_equal(user.first_name, 'first_name_for_newly_created_user_updated')\n  end\n\n  active_notify_list = ConfigLoader.to_OrderedOptions({\"active-notify@example.com\"=>{}})\n  inactive_notify_list = ConfigLoader.to_OrderedOptions({\"inactive-notify@example.com\"=>{}})\n  empty_notify_list = ConfigLoader.to_OrderedOptions({})\n\n  test \"create new user with notifications\" do\n    set_user_from_auth :admin\n\n    Rails.configuration.Users.AutoSetupNewUsers = false\n\n    create_user_and_verify_setup_and_notifications true, active_notify_list, inactive_notify_list, nil, nil\n    create_user_and_verify_setup_and_notifications true, active_notify_list, empty_notify_list, nil, nil\n    create_user_and_verify_setup_and_notifications true, empty_notify_list, empty_notify_list, nil, nil\n    create_user_and_verify_setup_and_notifications false, empty_notify_list, inactive_notify_list, nil, nil\n    create_user_and_verify_setup_and_notifications false, empty_notify_list, inactive_notify_list, nil, nil\n    create_user_and_verify_setup_and_notifications false, empty_notify_list, empty_notify_list, nil, nil\n  end\n\n  [\n    # Easy inactive user tests.\n    [false, empty_notify_list, empty_notify_list, \"inactive-none@example.com\", false, \"inactivenone\"],\n    [false, empty_notify_list, empty_notify_list, \"inactive-vm@example.com\", true, \"inactivevm\"],\n\n    # Easy active user tests.\n    [true, active_notify_list, inactive_notify_list, \"active-none@example.com\", false, \"activenone\"],\n    [true, active_notify_list, inactive_notify_list, \"active-vm@example.com\", true, \"activevm\"],\n\n    # Test users with malformed e-mail addresses.\n    [false, empty_notify_list, empty_notify_list, nil, true, nil],\n    [false, empty_notify_list, empty_notify_list, \"arvados\", true, nil],\n    [false, empty_notify_list, empty_notify_list, \"@example.com\", true, nil],\n    [true, active_notify_list, inactive_notify_list, \"*!*@example.com\", true, nil],\n    [true, active_notify_list, inactive_notify_list, \"*!*@example.com\", false, nil],\n\n    # Test users with various username transformations.\n    [false, empty_notify_list, empty_notify_list, \"arvados@example.com\", false, \"arvados2\"],\n    [true, active_notify_list, inactive_notify_list, \"arvados@example.com\", false, \"arvados2\"],\n    [true, active_notify_list, inactive_notify_list, \"root@example.com\", true, \"root2\"],\n    [false, active_notify_list, empty_notify_list, \"root@example.com\", true, \"root2\"],\n    [true, active_notify_list, inactive_notify_list, \"roo_t@example.com\", false, \"root2\"],\n    [false, empty_notify_list, empty_notify_list, \"^^incorrect_format@example.com\", true, \"incorrectformat\"],\n    [true, active_notify_list, inactive_notify_list, \"&4a_d9.@example.com\", true, \"ad9\"],\n    [true, active_notify_list, inactive_notify_list, \"&4a_d9.@example.com\", false, \"ad9\"],\n    [false, active_notify_list, empty_notify_list, \"&4a_d9.@example.com\", true, \"ad9\"],\n    [false, active_notify_list, empty_notify_list, \"&4a_d9.@example.com\", false, \"ad9\"],\n  ].each do |active, new_user_recipients, inactive_recipients, email, auto_setup_vm, expect_username|\n    test \"create new user with auto setup active=#{active} email=#{email} vm=#{auto_setup_vm}\" do\n      set_user_from_auth :admin\n\n      Rails.configuration.Users.AutoSetupNewUsers = true\n\n      if auto_setup_vm\n        Rails.configuration.Users.AutoSetupNewUsersWithVmUUID = virtual_machines(:testvm)['uuid']\n      else\n        Rails.configuration.Users.AutoSetupNewUsersWithVmUUID = \"\"\n      end\n\n      create_user_and_verify_setup_and_notifications active, new_user_recipients, inactive_recipients, email, expect_username\n    end\n  end\n\n  test \"update existing user\" do\n    set_user_from_auth :active    # set active user as current user\n\n    @active_user = users(:active)     # get the active user\n\n    @active_user.first_name = \"first_name_changed\"\n    @active_user.save\n\n    @active_user = User.find(@active_user.id)   # get the user back\n    assert_equal(@active_user.first_name, 'first_name_changed')\n\n    # admin user also should be able to update the \"active\" user info\n    set_user_from_auth :admin # set admin user as current user\n    @active_user.first_name = \"first_name_changed_by_admin_for_active_user\"\n    @active_user.save\n\n    @active_user = User.find(@active_user.id)   # get the user back\n    assert_equal(@active_user.first_name, 'first_name_changed_by_admin_for_active_user')\n  end\n\n  test \"delete a user and verify\" do\n    @active_user = users(:active)     # get the active user\n    active_user_uuid = @active_user.uuid\n\n    set_user_from_auth :admin\n    @active_user.delete\n\n    found_deleted_user = false\n    User.all.each do |user|\n      if user.uuid == active_user_uuid\n        found_deleted_user = true\n        break\n      end\n    end\n    assert !found_deleted_user, \"found deleted user: \"+active_user_uuid\n\n  end\n\n  test \"create new user as non-admin user\" do\n    set_user_from_auth :active\n    assert_not_allowed { User.new.save }\n  end\n\n  [true, false].each do |visible|\n    test \"setup new user with ActivatedUsersAreVisibleToOthers=#{visible}\" do\n      Rails.configuration.Users.ActivatedUsersAreVisibleToOthers = visible\n      set_user_from_auth :admin\n\n      email = 'foo@example.com'\n\n      user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})\n\n      vm = VirtualMachine.create\n\n      response = user.setup(vm_uuid: vm.uuid)\n\n      resp_user = find_obj_in_resp response, 'User'\n      verify_user resp_user, email\n\n      group_perm = find_obj_in_resp response, 'Link', 'arvados#group'\n      verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid\n\n      group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'\n      if visible\n        verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil\n      else\n        assert_nil group_perm2\n      end\n\n      vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'\n      verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid\n      assert_equal(\"foo\", vm_perm.properties[\"username\"])\n    end\n  end\n\n  test \"setup new user with junk in database\" do\n    set_user_from_auth :admin\n\n    email = 'foo@example.com'\n\n    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})\n\n    vm = VirtualMachine.create\n\n    response = user.setup(vm_uuid: vm.uuid)\n\n    resp_user = find_obj_in_resp response, 'User'\n    verify_user resp_user, email\n\n    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'\n    verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid\n\n    vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'\n    verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid\n    assert_equal(\"foo\", vm_perm.properties[\"username\"])\n  end\n\n  test \"setup new user in multiple steps\" do\n    set_user_from_auth :admin\n\n    email = 'foo@example.com'\n\n    user = User.create ({uuid: 'zzzzz-tpzed-abcdefghijklmno', email: email})\n\n    response = user.setup()\n\n    resp_user = find_obj_in_resp response, 'User'\n    verify_user resp_user, email\n\n    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'\n    verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid\n\n    group_perm2 = find_obj_in_resp response, 'Link', 'arvados#user'\n    verify_link group_perm2, 'permission', 'can_read', groups(:all_users).uuid, nil\n\n    # invoke setup again with a vm_uuid\n    vm = VirtualMachine.create\n\n    response = user.setup(vm_uuid: vm.uuid)\n\n    resp_user = find_obj_in_resp response, 'User', nil\n    verify_user resp_user, email\n    assert_equal user.uuid, resp_user[:uuid], 'expected uuid not found'\n\n    group_perm = find_obj_in_resp response, 'Link', 'arvados#group'\n    verify_link group_perm, 'permission', 'can_write', resp_user[:uuid], groups(:all_users).uuid\n\n    vm_perm = find_obj_in_resp response, 'Link', 'arvados#virtualMachine'\n    verify_link vm_perm, 'permission', 'can_login', resp_user[:uuid], vm.uuid\n    assert_equal(\"foo\", vm_perm.properties[\"username\"])\n  end\n\n  def find_obj_in_resp (response_items, object_type, head_kind=nil)\n    return_obj = nil\n    response_items.each { |x|\n      if !x\n        next\n      end\n\n      if object_type == 'User'\n        if ArvadosModel::resource_class_for_uuid(x['uuid']) == User\n          return_obj = x\n          break\n        end\n      else  # looking for a link\n        if ArvadosModel::resource_class_for_uuid(x['head_uuid']).andand.kind == head_kind\n          return_obj = x\n          break\n        end\n      end\n    }\n    return return_obj\n  end\n\n  def verify_user (resp_user, email)\n    assert_not_nil resp_user, 'expected user object'\n    assert_not_nil resp_user['uuid'], 'expected user object'\n    assert_equal email, resp_user['email'], 'expected email not found'\n  end\n\n  def verify_link (link_object, link_class, link_name, tail_uuid, head_uuid)\n    assert_not_nil link_object, \"expected link for #{link_class} #{link_name}\"\n    assert_not_nil link_object[:uuid],\n        \"expected non-nil uuid for link for #{link_class} #{link_name}\"\n    assert_equal link_class, link_object[:link_class],\n        \"expected link_class not found for #{link_class} #{link_name}\"\n    assert_equal link_name, link_object[:name],\n        \"expected link_name not found for #{link_class} #{link_name}\"\n    assert_equal tail_uuid, link_object[:tail_uuid],\n        \"expected tail_uuid not found for #{link_class} #{link_name}\"\n    if head_uuid\n      assert_equal head_uuid, link_object[:head_uuid],\n          \"expected head_uuid not found for #{link_class} #{link_name}\"\n    end\n  end\n\n  def create_user_and_verify_setup_and_notifications (active, new_user_recipients, inactive_recipients, email, expect_username)\n    Rails.configuration.Users.NewUserNotificationRecipients = new_user_recipients\n    Rails.configuration.Users.NewInactiveUserNotificationRecipients = inactive_recipients\n\n    ActionMailer::Base.deliveries = []\n\n    can_setup = (Rails.configuration.Users.AutoSetupNewUsers and\n                 (not expect_username.nil?))\n\n    user = User.new\n    user.first_name = \"first_name_for_newly_created_user\"\n    user.email = email\n    user.is_active = active\n    user.save!\n    assert_equal(expect_username, user.username)\n\n    # check user setup\n    verify_link_exists(Rails.configuration.Users.AutoSetupNewUsers || active,\n                       groups(:all_users).uuid, user.uuid,\n                       \"permission\", \"can_write\")\n\n    # Check for VM login.\n    if (auto_vm_uuid = Rails.configuration.Users.AutoSetupNewUsersWithVmUUID) != \"\"\n      verify_link_exists(can_setup, auto_vm_uuid, user.uuid,\n                         \"permission\", \"can_login\", \"username\", expect_username)\n    end\n\n    # check email notifications\n    new_user_email = nil\n    new_inactive_user_email = nil\n\n    new_user_email_subject = \"#{Rails.configuration.Users.EmailSubjectPrefix}New user created notification\"\n    if Rails.configuration.Users.AutoSetupNewUsers\n      new_user_email_subject = (expect_username or active) ?\n                                 \"#{Rails.configuration.Users.EmailSubjectPrefix}New user created and setup notification\" :\n                                 \"#{Rails.configuration.Users.EmailSubjectPrefix}New user created, but not setup notification\"\n    end\n\n    ActionMailer::Base.deliveries.each do |d|\n      if d.subject == new_user_email_subject then\n        new_user_email = d\n      elsif d.subject == \"#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification\" then\n        new_inactive_user_email = d\n      end\n    end\n\n    # both active and inactive user creations should result in new user creation notification mails,\n    # if the new user email recipients config parameter is set\n    if not new_user_recipients.empty? then\n      assert_not_nil new_user_email, 'Expected new user email after setup'\n      assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_user_email.from[0]\n      assert_equal new_user_recipients.stringify_keys.keys.first, new_user_email.to[0]\n      assert_equal new_user_email_subject, new_user_email.subject\n    else\n      assert_nil new_user_email, 'Did not expect new user email after setup'\n    end\n\n    if not active\n      if not inactive_recipients.empty? then\n        assert_not_nil new_inactive_user_email, 'Expected new inactive user email after setup'\n        assert_equal Rails.configuration.Users.UserNotifierEmailFrom, new_inactive_user_email.from[0]\n        assert_equal inactive_recipients.stringify_keys.keys.first, new_inactive_user_email.to[0]\n        assert_equal \"#{Rails.configuration.Users.EmailSubjectPrefix}New inactive user notification\", new_inactive_user_email.subject\n      else\n        assert_nil new_inactive_user_email, 'Did not expect new inactive user email after setup'\n      end\n    else\n      assert_nil new_inactive_user_email, 'Expected no inactive user email after setting up active user'\n    end\n    ActionMailer::Base.deliveries = []\n  end\n\n  def verify_link_exists link_exists, head_uuid, tail_uuid, link_class, link_name, property_name=nil, property_value=nil\n    all_links = Link.where(head_uuid: head_uuid,\n                           tail_uuid: tail_uuid,\n                           link_class: link_class,\n                           name: link_name)\n    assert_equal link_exists, all_links.any?, \"Link#{' not' if link_exists} found for #{link_name} #{link_class} #{property_value}\"\n    if link_exists && property_name && property_value\n      all_links.each do |link|\n        assert_equal true, all_links.first.properties[property_name].start_with?(property_value), 'Property not found in link'\n      end\n    end\n  end\n\n  def assert_update_success(old_uuid:, new_uuid:, expect_owned_objects: true)\n    [[User, :uuid],\n     [Link, :head_uuid],\n     [Link, :tail_uuid],\n     [Group, :owner_uuid],\n     [Collection, :owner_uuid],\n    ].each do |klass, attr|\n      assert_empty klass.where(attr => old_uuid)\n      if klass == User || expect_owned_objects\n        assert_not_empty klass.where(attr => new_uuid)\n      end\n    end\n  end\n\n  test \"lookup user by email\" do\n    u = User.register({\"email\" => \"active-user@arvados.local\", \"identity_url\" => \"different-identity-url\"})\n    active = User.find_by_uuid(users(:active).uuid)\n    assert_equal active.uuid, u.uuid\n    assert_equal \"active-user@arvados.local\", active.email\n    # identity_url is not updated\n    assert_equal \"https://active-user.openid.local\", active.identity_url\n  end\n\n  test \"lookup user by alternate email\" do\n    # register method will find that active-user@arvados.local already\n    # exists and return existing 'active' user.\n    u = User.register({\"email\" => \"user@parent-company.com\",\n                       \"alternate_emails\" => [\"active-user@arvados.local\"],\n                       \"identity_url\" => \"different-identity-url\"})\n    active = User.find_by_uuid(users(:active).uuid)\n    assert_equal active.uuid, u.uuid\n\n    # email should be updated\n    assert_equal \"user@parent-company.com\", active.email\n\n    # identity_url is not updated\n    assert_equal \"https://active-user.openid.local\", active.identity_url\n  end\n\n  test \"register new user\" do\n    u = User.register({\"email\" => \"never-before-seen-user@arvados.local\",\n                       \"identity_url\" => \"different-identity-url\",\n                       \"first_name\" => \"Robert\",\n                       \"last_name\" => \"Baratheon\",\n                       \"username\" => \"bobby\"})\n    nbs = User.find_by_uuid(u.uuid)\n    assert_equal nbs.uuid, u.uuid\n    assert_equal \"different-identity-url\", nbs.identity_url\n    assert_equal \"never-before-seen-user@arvados.local\", nbs.email\n    assert_equal false, nbs.is_admin\n    assert_equal false , nbs.is_active\n    assert_equal \"bobby\", nbs.username\n    assert_equal \"Robert\", nbs.first_name\n    assert_equal \"Baratheon\", nbs.last_name\n  end\n\n  test \"fail when email address is ambiguous\" do\n    User.register({\"email\" => \"active-user@arvados.local\"})\n    u = User.register({\"email\" => \"never-before-seen-user@arvados.local\"})\n    u.email = \"active-user@arvados.local\"\n    act_as_system_user do\n      u.save!\n    end\n    assert_raises do\n      User.register({\"email\" => \"active-user@arvados.local\"})\n    end\n  end\n\n  test \"fail lookup without identifiers\" do\n    assert_raises do\n      User.register({\"first_name\" => \"Robert\", \"last_name\" => \"Baratheon\"})\n    end\n    assert_raises do\n      User.register({\"first_name\" => \"Robert\", \"last_name\" => \"Baratheon\", \"identity_url\" => \"\", \"email\" => \"\"})\n    end\n  end\n\n  test \"user can update name\" do\n    set_user_from_auth :active\n    user = users(:active)\n    user.first_name = \"MyNewName\"\n    assert user.save\n  end\n\n  test \"user cannot update email\" do\n    set_user_from_auth :active\n    user = users(:active)\n    user.email = \"new-name@example.com\"\n    assert_not_allowed { user.save }\n  end\n\n  test \"admin can update email\" do\n    set_user_from_auth :admin\n    user = users(:active)\n    user.email = \"new-name@example.com\"\n    assert user.save\n  end\n\n  test \"empty identity_url saves as null\" do\n    set_user_from_auth :admin\n    user = users(:active)\n    assert user.update(identity_url: '')\n    user.reload\n    assert_nil user.identity_url\n  end\n\n  test \"id overflows int32\" do\n    uuid = users(:active).uuid\n    ActiveRecord::Base.connection.execute \"update users set id=333222111000 where uuid='#{uuid}'\"\n    u = User.find_by_uuid(uuid)\n    assert_equal 333222111000, u.id\n  end\n\n  test \"prefs must be a hash\" do\n    set_user_from_auth :active\n    u = users(:active)\n\n    assert_raises ActiveRecord::RecordInvalid do\n      u.update!(prefs: [])\n    end\n\n    u.update!(prefs: {\"a\" => \"b\"})\n    assert_equal({\"a\" => \"b\"}, u.prefs)\n\n    u.update!(prefs: nil)\n    assert_equal({}, u.prefs)\n  end\nend\n"
  },
  {
    "path": "services/api/test/unit/virtual_machine_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass VirtualMachineTest < ActiveSupport::TestCase\n  # test \"the truth\" do\n  #   assert true\n  # end\nend\n"
  },
  {
    "path": "services/api/test/unit/workflow_test.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'test_helper'\n\nclass WorkflowTest < ActiveSupport::TestCase\n  test \"create workflow with no definition yaml\" do\n    set_user_from_auth :active\n\n    wf = {\n      name: \"test name\",\n    }\n\n    w = Workflow.create!(wf)\n    assert_not_nil w.uuid\n  end\n\n  test \"create workflow with valid definition yaml\" do\n    set_user_from_auth :active\n\n    wf = {\n      name: \"test name\",\n      definition: \"k1:\\n v1: x\\n v2: y\"\n    }\n\n    w = Workflow.create!(wf)\n    assert_not_nil w.uuid\n  end\n\n  test \"create workflow with simple string as definition\" do\n    set_user_from_auth :active\n\n    wf = {\n      name: \"test name\",\n      definition: \"this is valid yaml\"\n    }\n\n    w = Workflow.create!(wf)\n    assert_not_nil w.uuid\n  end\n\n  test \"create workflow with invalid definition yaml\" do\n    set_user_from_auth :active\n\n    wf = {\n      name: \"test name\",\n      definition: \"k1:\\n v1: x\\n  v2: y\"\n    }\n\n    assert_raises(ActiveRecord::RecordInvalid) do\n      Workflow.create! wf\n    end\n  end\n\n  test \"update workflow with invalid definition yaml\" do\n    set_user_from_auth :active\n\n    w = Workflow.find_by_uuid(workflows(:workflow_with_definition_yml).uuid)\n    definition = \"k1:\\n v1: x\\n  v2: y\"\n\n    assert_raises(ActiveRecord::RecordInvalid) do\n      w.update!(definition: definition)\n    end\n  end\n\n  test \"update workflow and verify name and description\" do\n    set_user_from_auth :active\n\n    # Workflow name and desc should be set with values from definition yaml\n    # when it does not already have custom values for these fields\n    w = Workflow.find_by_uuid(workflows(:workflow_with_no_name_and_desc).uuid)\n    definition = \"name: test name 1\\ndescription: test desc 1\\nother: some more\"\n    w.update!(definition: definition)\n    w.reload\n    assert_equal \"test name 1\", w.name\n    assert_equal \"test desc 1\", w.description\n\n    # Workflow name and desc should be set with values from definition yaml\n    # when it does not already have custom values for these fields\n    definition = \"name: test name 2\\ndescription: test desc 2\\nother: some more\"\n    w.update!(definition: definition)\n    w.reload\n    assert_equal \"test name 2\", w.name\n    assert_equal \"test desc 2\", w.description\n\n    # Workflow name and desc should be set with values from definition yaml\n    # even if it means emptying them out\n    definition = \"more: etc\"\n    w.update!(definition: definition)\n    w.reload\n    assert_nil w.name\n    assert_nil w.description\n\n    # Workflow name and desc set using definition yaml should be cleared\n    # if definition yaml is cleared\n    definition = \"name: test name 2\\ndescription: test desc 2\\nother: some more\"\n    w.update!(definition: definition)\n    w.reload\n    definition = nil\n    w.update!(definition: definition)\n    w.reload\n    assert_nil w.name\n    assert_nil w.description\n\n    # Workflow name and desc should be set to provided custom values\n    definition = \"name: test name 3\\ndescription: test desc 3\\nother: some more\"\n    w.update!(name: \"remains\", description: \"remains\", definition: definition)\n    w.reload\n    assert_equal \"remains\", w.name\n    assert_equal \"remains\", w.description\n\n    # Workflow name and desc should retain provided custom values\n    # and should not be overwritten by values from yaml\n    definition = \"name: test name 4\\ndescription: test desc 4\\nother: some more\"\n    w.update!(definition: definition)\n    w.reload\n    assert_equal \"remains\", w.name\n    assert_equal \"remains\", w.description\n\n    # Workflow name and desc should retain provided custom values\n    # and not be affected by the clearing of the definition yaml\n    definition = nil\n    w.update!(definition: definition)\n    w.reload\n    assert_equal \"remains\", w.name\n    assert_equal \"remains\", w.description\n  end\nend\n"
  },
  {
    "path": "services/crunch-dispatch-slurm/crunch-dispatch-slurm.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// Dispatcher service for Crunch that submits containers to the slurm queue.\npackage dispatchslurm\n\nimport (\n\t\"context\"\n\t\"crypto/hmac\"\n\t\"crypto/sha256\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"net/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/controller/dblock\"\n\t\"git.arvados.org/arvados.git/lib/ctrlctx\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/container\"\n\t\"git.arvados.org/arvados.git/lib/service\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/dispatch\"\n\t\"github.com/coreos/go-systemd/daemon\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nvar Command cmd.Handler = service.Command(arvados.ServiceNameDispatchSLURM, newHandler)\n\nfunc newHandler(ctx context.Context, cluster *arvados.Cluster, _ string, _ *prometheus.Registry) service.Handler {\n\tlogger := ctxlog.FromContext(ctx)\n\tdisp := &Dispatcher{logger: logger, cluster: cluster}\n\tif err := disp.configure(); err != nil {\n\t\treturn service.ErrorHandler(ctx, cluster, err)\n\t}\n\tdisp.setup()\n\tgo func() {\n\t\tdisp.err = disp.run()\n\t\tclose(disp.done)\n\t}()\n\treturn disp\n}\n\ntype logger interface {\n\tdispatch.Logger\n\tFatalf(string, ...interface{})\n}\n\nconst initialNiceValue int64 = 10000\n\ntype Dispatcher struct {\n\t*dispatch.Dispatcher\n\tlogger      logrus.FieldLogger\n\tcluster     *arvados.Cluster\n\tsqCheck     *SqueueChecker\n\tslurm       Slurm\n\tdbConnector ctrlctx.DBConnector\n\n\tdone chan struct{}\n\terr  error\n\n\tClient arvados.Client\n}\n\nfunc (disp *Dispatcher) CheckHealth() error {\n\treturn disp.err\n}\n\nfunc (disp *Dispatcher) Done() <-chan struct{} {\n\treturn disp.done\n}\n\nfunc (disp *Dispatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\thttp.NotFound(w, r)\n}\n\n// configure() loads config files. Some tests skip this (see\n// StubbedSuite).\nfunc (disp *Dispatcher) configure() error {\n\tif disp.logger == nil {\n\t\tdisp.logger = logrus.StandardLogger()\n\t}\n\tdisp.logger = disp.logger.WithField(\"ClusterID\", disp.cluster.ClusterID)\n\tdisp.logger.Printf(\"crunch-dispatch-slurm %s started\", cmd.Version.String())\n\n\tdisp.Client.APIHost = disp.cluster.Services.Controller.ExternalURL.Host\n\tdisp.Client.AuthToken = disp.cluster.SystemRootToken\n\tdisp.Client.Insecure = disp.cluster.TLS.Insecure\n\tdisp.dbConnector = ctrlctx.DBConnector{PostgreSQL: disp.cluster.PostgreSQL}\n\n\tif disp.Client.APIHost != \"\" || disp.Client.AuthToken != \"\" {\n\t\t// Copy real configs into env vars so [a]\n\t\t// MakeArvadosClient() uses them, and [b] they get\n\t\t// propagated to crunch-run via SLURM.\n\t\tos.Setenv(\"ARVADOS_API_HOST\", disp.Client.APIHost)\n\t\tos.Setenv(\"ARVADOS_API_TOKEN\", disp.Client.AuthToken)\n\t\tos.Setenv(\"ARVADOS_API_HOST_INSECURE\", \"\")\n\t\tif disp.Client.Insecure {\n\t\t\tos.Setenv(\"ARVADOS_API_HOST_INSECURE\", \"1\")\n\t\t}\n\t\tfor k, v := range disp.cluster.Containers.SLURM.SbatchEnvironmentVariables {\n\t\t\tos.Setenv(k, v)\n\t\t}\n\t} else {\n\t\tdisp.logger.Warnf(\"Client credentials missing from config, so falling back on environment variables (deprecated).\")\n\t}\n\treturn nil\n}\n\n// setup() initializes private fields after configure().\nfunc (disp *Dispatcher) setup() {\n\tdisp.done = make(chan struct{})\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tif err != nil {\n\t\tdisp.logger.Fatalf(\"Error making Arvados client: %v\", err)\n\t}\n\tarv.Retries = 25\n\n\tdisp.slurm = NewSlurmCLI()\n\tdisp.sqCheck = &SqueueChecker{\n\t\tLogger:         disp.logger,\n\t\tPeriod:         time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),\n\t\tPrioritySpread: disp.cluster.Containers.SLURM.PrioritySpread,\n\t\tSlurm:          disp.slurm,\n\t}\n\tdisp.Dispatcher = &dispatch.Dispatcher{\n\t\tArv:            arv,\n\t\tLogger:         disp.logger,\n\t\tBatchSize:      disp.cluster.API.MaxItemsPerResponse,\n\t\tRunContainer:   disp.runContainer,\n\t\tPollPeriod:     time.Duration(disp.cluster.Containers.CloudVMs.PollInterval),\n\t\tMinRetryPeriod: time.Duration(disp.cluster.Containers.MinRetryPeriod),\n\t}\n}\n\nfunc (disp *Dispatcher) run() error {\n\tdblock.Dispatch.Lock(context.Background(), disp.dbConnector.GetDB)\n\tdefer dblock.Dispatch.Unlock()\n\tdefer disp.sqCheck.Stop()\n\n\tif disp.cluster != nil && len(disp.cluster.InstanceTypes) > 0 {\n\t\tgo SlurmNodeTypeFeatureKludge(disp.cluster)\n\t}\n\n\tif _, err := daemon.SdNotify(false, \"READY=1\"); err != nil {\n\t\tlog.Printf(\"Error notifying init daemon: %v\", err)\n\t}\n\tgo disp.checkSqueueForOrphans()\n\treturn disp.Dispatcher.Run(context.Background())\n}\n\nvar containerUuidPattern = regexp.MustCompile(`^[a-z0-9]{5}-dz642-[a-z0-9]{15}$`)\n\n// Check the next squeue report, and invoke TrackContainer for all the\n// containers in the report. This gives us a chance to cancel slurm\n// jobs started by a previous dispatch process that never released\n// their slurm allocations even though their container states are\n// Cancelled or Complete. See https://dev.arvados.org/issues/10979\nfunc (disp *Dispatcher) checkSqueueForOrphans() {\n\tfor _, uuid := range disp.sqCheck.All() {\n\t\tif !containerUuidPattern.MatchString(uuid) || !strings.HasPrefix(uuid, disp.cluster.ClusterID) {\n\t\t\tcontinue\n\t\t}\n\t\terr := disp.TrackContainer(uuid)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"checkSqueueForOrphans: TrackContainer(%s): %s\", uuid, err)\n\t\t}\n\t}\n}\n\nvar rePercentAny = regexp.MustCompile(`%.`)\n\nfunc (disp *Dispatcher) sbatchArgs(ctr arvados.Container) ([]string, error) {\n\tinstancetype := \"\"\n\tif disp.cluster == nil {\n\t\t// no instance types configured\n\t} else if types, err := container.ChooseInstanceType(disp.cluster, &ctr); err == container.ErrInstanceTypesNotConfigured {\n\t\t// ditto\n\t} else if err != nil {\n\t\treturn nil, err\n\t} else {\n\t\t// Note types[0] is the lowest-cost suitable instance type.\n\t\tinstancetype = types[0].Name\n\t}\n\tmaxrunminutes := int64(math.Ceil(float64(ctr.SchedulingParameters.MaxRunTime) / 60))\n\tmem := int64(math.Ceil(float64(ctr.RuntimeConstraints.RAM+\n\t\tctr.RuntimeConstraints.KeepCacheRAM+\n\t\tint64(disp.cluster.Containers.ReserveExtraRAM)) / float64(1048576)))\n\ttmp := container.EstimateScratchSpace(&ctr)\n\ttmp = int64(math.Ceil(float64(tmp) / float64(1048576)))\n\trepl := map[string]string{\n\t\t\"%%\": \"%\",\n\t\t\"%C\": fmt.Sprintf(\"%d\", ctr.RuntimeConstraints.VCPUs),\n\t\t\"%G\": fmt.Sprintf(\"%d\", ctr.RuntimeConstraints.GPU.DeviceCount),\n\t\t\"%I\": fmt.Sprintf(\"%s\", instancetype),\n\t\t\"%M\": fmt.Sprintf(\"%d\", mem),\n\t\t\"%P\": strings.Join(ctr.SchedulingParameters.Partitions, \",\"),\n\t\t\"%T\": fmt.Sprintf(\"%d\", tmp),\n\t\t\"%U\": ctr.UUID,\n\t\t\"%W\": fmt.Sprintf(\"%d\", maxrunminutes),\n\t}\n\targtmpl := disp.cluster.Containers.SLURM.SbatchArgumentsList\n\tif ctr.RuntimeConstraints.GPU.DeviceCount > 0 {\n\t\targtmpl = append(argtmpl, disp.cluster.Containers.SLURM.SbatchGPUArgumentsList...)\n\t}\n\targs := []string{fmt.Sprintf(\"--nice=%d\", initialNiceValue), \"--no-requeue\"}\n\tfor _, arg := range argtmpl {\n\t\tvar err error\n\t\tvar skip bool\n\t\targ = rePercentAny.ReplaceAllStringFunc(arg, func(s string) string {\n\t\t\tsubst, ok := repl[s]\n\t\t\tif !ok {\n\t\t\t\terr = fmt.Errorf(\"Unknown substitution parameter %s in SbatchArgumentsList or SbatchGPUArgumentsList\", s)\n\t\t\t}\n\t\t\tif s == \"%W\" && subst == \"0\" ||\n\t\t\t\ts == \"%P\" && subst == \"\" {\n\t\t\t\tskip = true\n\t\t\t\treturn \"\"\n\t\t\t}\n\t\t\treturn subst\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif skip {\n\t\t\tcontinue\n\t\t}\n\t\targs = append(args, arg)\n\t}\n\treturn args, nil\n}\n\nfunc (disp *Dispatcher) submit(container arvados.Container, crunchRunCommand []string) error {\n\t// append() here avoids modifying crunchRunCommand's\n\t// underlying array, which is shared with other goroutines.\n\tcrArgs := append([]string(nil), crunchRunCommand...)\n\tcrArgs = append(crArgs, \"--runtime-engine=\"+disp.cluster.Containers.RuntimeEngine)\n\tcrArgs = append(crArgs, container.UUID)\n\n\th := hmac.New(sha256.New, []byte(disp.cluster.SystemRootToken))\n\tfmt.Fprint(h, container.UUID)\n\tauthsecret := fmt.Sprintf(\"%x\", h.Sum(nil))\n\n\tcrScript := strings.NewReader(execScript(crArgs, map[string]string{\"GatewayAuthSecret\": authsecret}))\n\n\tsbArgs, err := disp.sbatchArgs(container)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"running sbatch %+q\", sbArgs)\n\treturn disp.slurm.Batch(crScript, sbArgs)\n}\n\n// Submit a container to the slurm queue (or resume monitoring if it's\n// already in the queue).  Cancel the slurm job if the container's\n// priority changes to zero or its state indicates it's no longer\n// running.\nfunc (disp *Dispatcher) runContainer(_ *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) error {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tif ctr.State == dispatch.Locked && !disp.sqCheck.HasUUID(ctr.UUID) {\n\t\tlog.Printf(\"Submitting container %s to slurm\", ctr.UUID)\n\t\tcmd := []string{disp.cluster.Containers.CrunchRunCommand}\n\t\tcmd = append(cmd, disp.cluster.Containers.CrunchRunArgumentsList...)\n\t\terr := disp.submit(ctr, cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlog.Printf(\"Start monitoring container %v in state %q\", ctr.UUID, ctr.State)\n\tdefer log.Printf(\"Done monitoring container %s\", ctr.UUID)\n\n\t// If the container disappears from the slurm queue, there is\n\t// no point in waiting for further dispatch updates: just\n\t// clean up and return.\n\tgo func(uuid string) {\n\t\tfor ctx.Err() == nil && disp.sqCheck.HasUUID(uuid) {\n\t\t}\n\t\tcancel()\n\t}(ctr.UUID)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t// Disappeared from squeue\n\t\t\tif err := disp.Arv.Get(\"containers\", ctr.UUID, nil, &ctr); err != nil {\n\t\t\t\tlog.Printf(\"error getting final container state for %s: %s\", ctr.UUID, err)\n\t\t\t}\n\t\t\tswitch ctr.State {\n\t\t\tcase dispatch.Running:\n\t\t\t\tdisp.UpdateState(ctr.UUID, dispatch.Cancelled)\n\t\t\tcase dispatch.Locked:\n\t\t\t\tdisp.Unlock(ctr.UUID)\n\t\t\t}\n\t\t\treturn nil\n\t\tcase updated, ok := <-status:\n\t\t\tif !ok {\n\t\t\t\tlog.Printf(\"container %s is done: cancel slurm job\", ctr.UUID)\n\t\t\t\tdisp.scancel(ctr)\n\t\t\t} else if updated.Priority == 0 {\n\t\t\t\tlog.Printf(\"container %s has state %q, priority %d: cancel slurm job\", ctr.UUID, updated.State, updated.Priority)\n\t\t\t\tdisp.scancel(ctr)\n\t\t\t} else {\n\t\t\t\tp := int64(updated.Priority)\n\t\t\t\tif p <= 1000 {\n\t\t\t\t\t// API is providing\n\t\t\t\t\t// user-assigned priority. If\n\t\t\t\t\t// ctrs have equal priority,\n\t\t\t\t\t// run the older one first.\n\t\t\t\t\tp = int64(p)<<50 - (updated.CreatedAt.UnixNano() >> 14)\n\t\t\t\t}\n\t\t\t\tdisp.sqCheck.SetPriority(ctr.UUID, p)\n\t\t\t}\n\t\t}\n\t}\n}\nfunc (disp *Dispatcher) scancel(ctr arvados.Container) {\n\terr := disp.slurm.Cancel(ctr.UUID)\n\tif err != nil {\n\t\tlog.Printf(\"scancel: %s\", err)\n\t\ttime.Sleep(time.Second)\n\t} else if disp.sqCheck.HasUUID(ctr.UUID) {\n\t\tlog.Printf(\"container %s is still in squeue after scancel\", ctr.UUID)\n\t\ttime.Sleep(time.Second)\n\t}\n}\n"
  },
  {
    "path": "services/crunch-dispatch-slurm/crunch-dispatch-slurm_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchslurm\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/dispatchcloud/container\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/dispatch\"\n\t\"github.com/sirupsen/logrus\"\n\t. \"gopkg.in/check.v1\"\n)\n\n// Gocheck boilerplate\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\nvar _ = Suite(&IntegrationSuite{})\nvar _ = Suite(&StubbedSuite{})\n\ntype IntegrationSuite struct {\n\tdisp  Dispatcher\n\tslurm slurmFake\n}\n\nfunc (s *IntegrationSuite) SetUpTest(c *C) {\n\tarvadostest.ResetEnv()\n\tarvadostest.ResetDB(c)\n\tos.Setenv(\"ARVADOS_API_TOKEN\", arvadostest.SystemRootToken)\n\n\tldr := config.NewLoader(nil, ctxlog.TestLogger(c))\n\tcfg, err := ldr.Load()\n\tc.Assert(err, IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, IsNil)\n\tcluster.Containers.ReserveExtraRAM = 550 << 20\n\n\ts.disp = Dispatcher{cluster: cluster}\n\ts.disp.setup()\n\ts.slurm = slurmFake{}\n}\n\nfunc (s *IntegrationSuite) TearDownTest(c *C) {\n\tarvadostest.ResetEnv()\n\tarvadostest.ResetDB(c)\n}\n\ntype slurmFake struct {\n\tdidBatch      [][]string\n\tdidCancel     []string\n\tdidRelease    []string\n\tdidRenice     [][]string\n\tqueue         string\n\trejectNice10K bool\n\t// If non-nil, run this func during the 2nd+ call to Cancel()\n\tonCancel func()\n\t// Error returned by Batch()\n\terrBatch error\n}\n\nfunc (sf *slurmFake) Batch(script io.Reader, args []string) error {\n\tsf.didBatch = append(sf.didBatch, args)\n\treturn sf.errBatch\n}\n\nfunc (sf *slurmFake) QueueCommand(args []string) *exec.Cmd {\n\treturn exec.Command(\"echo\", sf.queue)\n}\n\nfunc (sf *slurmFake) Release(name string) error {\n\tsf.didRelease = append(sf.didRelease, name)\n\treturn nil\n}\n\nfunc (sf *slurmFake) Renice(name string, nice int64) error {\n\tsf.didRenice = append(sf.didRenice, []string{name, fmt.Sprintf(\"%d\", nice)})\n\tif sf.rejectNice10K && nice > 10000 {\n\t\treturn errors.New(\"scontrol: error: Invalid nice value, must be between -10000 and 10000\")\n\t}\n\treturn nil\n}\n\nfunc (sf *slurmFake) Cancel(name string) error {\n\tsf.didCancel = append(sf.didCancel, name)\n\tif len(sf.didCancel) == 1 {\n\t\t// simulate error on first attempt\n\t\treturn errors.New(\"something terrible happened\")\n\t}\n\tif sf.onCancel != nil {\n\t\tsf.onCancel()\n\t}\n\treturn nil\n}\n\nfunc (s *IntegrationSuite) integrationTest(c *C,\n\texpectBatch [][]string,\n\trunContainer func(*dispatch.Dispatcher, arvados.Container)) (arvados.Container, error) {\n\tarvadostest.ResetEnv()\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tc.Assert(err, IsNil)\n\n\t// There should be one queued container\n\tparams := arvadosclient.Dict{\n\t\t\"filters\": [][]string{{\"state\", \"=\", \"Queued\"}},\n\t}\n\tvar containers arvados.ContainerList\n\terr = arv.List(\"containers\", params, &containers)\n\tc.Check(err, IsNil)\n\tc.Assert(len(containers.Items), Equals, 1)\n\n\ts.disp.cluster.Containers.CrunchRunCommand = \"echo\"\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdoneRun := make(chan struct{})\n\tdoneDispatch := make(chan error)\n\n\ts.disp.Dispatcher = &dispatch.Dispatcher{\n\t\tArv:        arv,\n\t\tPollPeriod: time.Second,\n\t\tRunContainer: func(disp *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) error {\n\t\t\tgo func() {\n\t\t\t\trunContainer(disp, ctr)\n\t\t\t\ts.slurm.queue = \"\"\n\t\t\t\tdoneRun <- struct{}{}\n\t\t\t}()\n\t\t\terr := s.disp.runContainer(disp, ctr, status)\n\t\t\tcancel()\n\t\t\tdoneDispatch <- err\n\t\t\treturn nil\n\t\t},\n\t}\n\n\ts.disp.slurm = &s.slurm\n\ts.disp.sqCheck = &SqueueChecker{\n\t\tLogger: logrus.StandardLogger(),\n\t\tPeriod: 500 * time.Millisecond,\n\t\tSlurm:  s.disp.slurm,\n\t}\n\n\terr = s.disp.Dispatcher.Run(ctx)\n\t<-doneRun\n\tc.Assert(err, Equals, context.Canceled)\n\terrDispatch := <-doneDispatch\n\n\ts.disp.sqCheck.Stop()\n\n\tc.Check(s.slurm.didBatch, DeepEquals, expectBatch)\n\n\t// There should be no queued containers now\n\terr = arv.List(\"containers\", params, &containers)\n\tc.Check(err, IsNil)\n\tc.Check(len(containers.Items), Equals, 0)\n\n\t// Previously \"Queued\" container should now be in \"Complete\" state\n\tvar container arvados.Container\n\terr = arv.Get(\"containers\", \"zzzzz-dz642-queuedcontainer\", nil, &container)\n\tc.Check(err, IsNil)\n\treturn container, errDispatch\n}\n\nfunc (s *IntegrationSuite) TestNormal(c *C) {\n\ts.slurm = slurmFake{queue: \"zzzzz-dz642-queuedcontainer 10000 100 PENDING Resources\\n\"}\n\tcontainer, _ := s.integrationTest(c,\n\t\tnil,\n\t\tfunc(dispatcher *dispatch.Dispatcher, container arvados.Container) {\n\t\t\tdispatcher.UpdateState(container.UUID, dispatch.Running)\n\t\t\ttime.Sleep(3 * time.Second)\n\t\t\tdispatcher.UpdateState(container.UUID, dispatch.Complete)\n\t\t})\n\tc.Check(container.State, Equals, arvados.ContainerStateComplete)\n}\n\nfunc (s *IntegrationSuite) TestCancel(c *C) {\n\ts.slurm = slurmFake{queue: \"zzzzz-dz642-queuedcontainer 10000 100 PENDING Resources\\n\"}\n\treadyToCancel := make(chan bool)\n\ts.slurm.onCancel = func() { <-readyToCancel }\n\tcontainer, _ := s.integrationTest(c,\n\t\tnil,\n\t\tfunc(dispatcher *dispatch.Dispatcher, container arvados.Container) {\n\t\t\tdispatcher.UpdateState(container.UUID, dispatch.Running)\n\t\t\ttime.Sleep(time.Second)\n\t\t\tdispatcher.Arv.Update(\"containers\", container.UUID,\n\t\t\t\tarvadosclient.Dict{\n\t\t\t\t\t\"container\": arvadosclient.Dict{\"priority\": 0}},\n\t\t\t\tnil)\n\t\t\treadyToCancel <- true\n\t\t\tclose(readyToCancel)\n\t\t})\n\tc.Check(container.State, Equals, arvados.ContainerStateCancelled)\n\tc.Check(len(s.slurm.didCancel) > 1, Equals, true)\n\tc.Check(s.slurm.didCancel[:2], DeepEquals, []string{\"zzzzz-dz642-queuedcontainer\", \"zzzzz-dz642-queuedcontainer\"})\n}\n\nfunc (s *IntegrationSuite) TestMissingFromSqueue(c *C) {\n\tcontainer, _ := s.integrationTest(c,\n\t\t[][]string{{\n\t\t\tfmt.Sprintf(\"--nice=%d\", 10000),\n\t\t\t\"--no-requeue\",\n\t\t\tfmt.Sprintf(\"--job-name=%s\", \"zzzzz-dz642-queuedcontainer\"),\n\t\t\tfmt.Sprintf(\"--mem=%d\", 11995),\n\t\t\tfmt.Sprintf(\"--cpus-per-task=%d\", 4),\n\t\t\tfmt.Sprintf(\"--tmp=%d\", 45777),\n\t\t}},\n\t\tfunc(dispatcher *dispatch.Dispatcher, container arvados.Container) {\n\t\t\tdispatcher.UpdateState(container.UUID, dispatch.Running)\n\t\t\ttime.Sleep(3 * time.Second)\n\t\t\tdispatcher.UpdateState(container.UUID, dispatch.Complete)\n\t\t})\n\tc.Check(container.State, Equals, arvados.ContainerStateCancelled)\n}\n\nfunc (s *IntegrationSuite) TestSbatchFail(c *C) {\n\ts.slurm = slurmFake{errBatch: errors.New(\"something terrible happened\")}\n\tcontainer, err := s.integrationTest(c,\n\t\t[][]string{{\"--nice=10000\", \"--no-requeue\", \"--job-name=zzzzz-dz642-queuedcontainer\", \"--mem=11995\", \"--cpus-per-task=4\", \"--tmp=45777\"}},\n\t\tfunc(dispatcher *dispatch.Dispatcher, container arvados.Container) {\n\t\t\tdispatcher.UpdateState(container.UUID, dispatch.Running)\n\t\t\tdispatcher.UpdateState(container.UUID, dispatch.Complete)\n\t\t})\n\tc.Check(container.State, Equals, arvados.ContainerStateComplete)\n\tc.Check(err, ErrorMatches, `something terrible happened`)\n}\n\ntype StubbedSuite struct {\n\tdisp Dispatcher\n}\n\nfunc (s *StubbedSuite) SetUpTest(c *C) {\n\tldr := config.NewLoader(nil, ctxlog.TestLogger(c))\n\tcfg, err := ldr.Load()\n\tc.Assert(err, IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, IsNil)\n\tcluster.Containers.ReserveExtraRAM = 550 << 20\n\n\ts.disp = Dispatcher{cluster: cluster}\n\ts.disp.setup()\n}\n\nfunc (s *StubbedSuite) TestAPIErrorGettingContainers(c *C) {\n\tapiStubResponses := make(map[string]arvadostest.StubResponse)\n\tapiStubResponses[\"/arvados/v1/api_client_authorizations/current\"] = arvadostest.StubResponse{200, `{\"uuid\":\"zzzzz-gj3su-000000000000000\"}`}\n\tapiStubResponses[\"/arvados/v1/containers\"] = arvadostest.StubResponse{500, string(`{}`)}\n\n\ts.testWithServerStub(c, apiStubResponses, \"echo\", \"error getting count of containers\")\n}\n\nfunc (s *StubbedSuite) testWithServerStub(c *C, apiStubResponses map[string]arvadostest.StubResponse, crunchCmd string, expected string) {\n\tapiStub := arvadostest.ServerStub{apiStubResponses}\n\n\tapi := httptest.NewServer(&apiStub)\n\tdefer api.Close()\n\n\tarv := &arvadosclient.ArvadosClient{\n\t\tScheme:    \"http\",\n\t\tApiServer: api.URL[7:],\n\t\tApiToken:  \"abc123\",\n\t\tClient:    &http.Client{Transport: &http.Transport{}},\n\t\tRetries:   0,\n\t}\n\n\tbuf := bytes.NewBuffer(nil)\n\tlogrus.SetOutput(io.MultiWriter(buf, os.Stderr))\n\tdefer logrus.SetOutput(os.Stderr)\n\n\ts.disp.cluster.Containers.CrunchRunCommand = \"crunchCmd\"\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdispatcher := dispatch.Dispatcher{\n\t\tArv:        arv,\n\t\tPollPeriod: time.Second,\n\t\tRunContainer: func(disp *dispatch.Dispatcher, ctr arvados.Container, status <-chan arvados.Container) error {\n\t\t\tgo func() {\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\tdisp.UpdateState(ctr.UUID, dispatch.Running)\n\t\t\t\tdisp.UpdateState(ctr.UUID, dispatch.Complete)\n\t\t\t}()\n\t\t\ts.disp.runContainer(disp, ctr, status)\n\t\t\tcancel()\n\t\t\treturn nil\n\t\t},\n\t}\n\n\tgo func() {\n\t\tfor i := 0; i < 80 && !strings.Contains(buf.String(), expected); i++ {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t}\n\t\tcancel()\n\t}()\n\n\terr := dispatcher.Run(ctx)\n\tc.Assert(err, Equals, context.Canceled)\n\n\tc.Check(buf.String(), Matches, `(?ms).*`+expected+`.*`)\n}\n\nfunc (s *StubbedSuite) TestSbatchArgs(c *C) {\n\tcontainer := arvados.Container{\n\t\tUUID:               \"123\",\n\t\tRuntimeConstraints: arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 2},\n\t\tPriority:           1,\n\t}\n\n\tfor _, defaults := range [][]string{\n\t\tnil,\n\t\t{},\n\t\t{\"--arg1=v1\", \"--arg2\"},\n\t} {\n\t\tc.Logf(\"%#v\", defaults)\n\t\ts.disp.cluster.Containers.SLURM.SbatchArgumentsList = defaults\n\n\t\targs, err := s.disp.sbatchArgs(container)\n\t\tc.Check(args, DeepEquals, append([]string{\"--nice=10000\", \"--no-requeue\"}, defaults...))\n\t\tc.Check(err, IsNil)\n\t}\n}\n\nfunc (s *StubbedSuite) TestSbatchArgs_GPU(c *C) {\n\tcontainer := arvados.Container{\n\t\tUUID: \"123\",\n\t\tRuntimeConstraints: arvados.RuntimeConstraints{\n\t\t\tRAM:   250000000,\n\t\t\tVCPUs: 2,\n\t\t\tGPU: arvados.GPURuntimeConstraints{\n\t\t\t\tDeviceCount: 3,\n\t\t\t},\n\t\t},\n\t\tPriority: 1,\n\t}\n\ts.disp.cluster.Containers.SLURM.SbatchArgumentsList = append([]string{\"--account=accountname\"}, s.disp.cluster.Containers.SLURM.SbatchArgumentsList...)\n\ts.disp.cluster.Containers.SLURM.SbatchGPUArgumentsList = []string{\"--gpus=%G\", \"--partition=gpupartition\"}\n\targs, err := s.disp.sbatchArgs(container)\n\tc.Check(args, DeepEquals, []string{\"--nice=10000\", \"--no-requeue\", \"--account=accountname\", \"--job-name=123\", \"--mem=789\", \"--cpus-per-task=2\", \"--tmp=0\", \"--gpus=3\", \"--partition=gpupartition\"})\n\tc.Check(err, IsNil)\n}\n\nfunc (s *StubbedSuite) TestSbatchInstanceTypeConstraint(c *C) {\n\tctr := arvados.Container{\n\t\tUUID:               \"123\",\n\t\tRuntimeConstraints: arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 2},\n\t\tPriority:           1,\n\t}\n\n\tfor _, trial := range []struct {\n\t\ttypes      map[string]arvados.InstanceType\n\t\tconfigArgs []string\n\t\texpectArgs []string\n\t\terr        error\n\t}{\n\t\t// Choose node type => use --constraint arg\n\t\t{\n\t\t\ttypes: map[string]arvados.InstanceType{\n\t\t\t\t\"a1.tiny\":   {Name: \"a1.tiny\", Price: 0.02, RAM: 128000000, VCPUs: 1},\n\t\t\t\t\"a1.small\":  {Name: \"a1.small\", Price: 0.04, RAM: 256000000, VCPUs: 2},\n\t\t\t\t\"a1.medium\": {Name: \"a1.medium\", Price: 0.08, RAM: 512000000, VCPUs: 4},\n\t\t\t\t\"a1.large\":  {Name: \"a1.large\", Price: 0.16, RAM: 1024000000, VCPUs: 8},\n\t\t\t},\n\t\t\tconfigArgs: []string{\"--constraint=instancetype=%I\"},\n\t\t\texpectArgs: []string{\"--constraint=instancetype=a1.medium\"},\n\t\t},\n\t\t// No node types configured => empty %I value\n\t\t{\n\t\t\ttypes:      nil,\n\t\t\tconfigArgs: []string{\"--constraint=instancetype=%I\"},\n\t\t\texpectArgs: []string{\"--constraint=instancetype=\"},\n\t\t},\n\t\t// No node type is big enough => error\n\t\t{\n\t\t\ttypes: map[string]arvados.InstanceType{\n\t\t\t\t\"a1.tiny\": {Name: \"a1.tiny\", Price: 0.02, RAM: 128000000, VCPUs: 1},\n\t\t\t},\n\t\t\terr: container.ConstraintsNotSatisfiableError{},\n\t\t},\n\t} {\n\t\tc.Logf(\"%#v\", trial)\n\t\ts.disp.cluster = &arvados.Cluster{InstanceTypes: trial.types}\n\t\ts.disp.cluster.Containers.SLURM.SbatchArgumentsList = []string{\"--constraint=instancetype=%I\"}\n\t\ts.disp.cluster.Containers.SLURM.SbatchGPUArgumentsList = []string{\"--gpus=%G\"}\n\n\t\targs, err := s.disp.sbatchArgs(ctr)\n\t\tc.Check(err == nil, Equals, trial.err == nil)\n\t\tif trial.err == nil {\n\t\t\tc.Check(args, DeepEquals, append([]string{\"--nice=10000\", \"--no-requeue\"}, trial.expectArgs...))\n\t\t} else {\n\t\t\tc.Check(len(err.(container.ConstraintsNotSatisfiableError).AvailableTypes), Equals, len(trial.types))\n\t\t}\n\t}\n}\n\nfunc (s *StubbedSuite) TestSbatchPartition(c *C) {\n\tcontainer := arvados.Container{\n\t\tUUID:                 \"123\",\n\t\tRuntimeConstraints:   arvados.RuntimeConstraints{RAM: 250000000, VCPUs: 1},\n\t\tSchedulingParameters: arvados.SchedulingParameters{Partitions: []string{\"blurb\", \"b2\"}},\n\t\tPriority:             1,\n\t}\n\n\targs, err := s.disp.sbatchArgs(container)\n\tc.Check(args, DeepEquals, []string{\n\t\t\"--nice=10000\", \"--no-requeue\",\n\t\t\"--job-name=123\",\n\t\t\"--mem=789\", \"--cpus-per-task=1\", \"--tmp=0\",\n\t\t\"--partition=blurb,b2\",\n\t})\n\tc.Check(err, IsNil)\n}\n\nfunc (s *StubbedSuite) TestSbatchArgumentsError(c *C) {\n\ts.disp.cluster.Containers.SLURM.SbatchArgumentsList = []string{\"--bad% template\"}\n\tcontainer := arvados.Container{}\n\t_, err := s.disp.sbatchArgs(container)\n\tc.Check(err, ErrorMatches, `Unknown substitution parameter %  in .*`)\n}\n\nfunc (s *StubbedSuite) TestLoadLegacyConfig(c *C) {\n\tlog := ctxlog.TestLogger(c)\n\tcontent := []byte(`\nClient:\n  APIHost: example.com\n  AuthToken: abcdefg\n  KeepServiceURIs:\n    - https://example.com/keep1\n    - https://example.com/keep2\nSbatchArguments: [\"--foo\", \"bar\"]\nPollPeriod: 12s\nPrioritySpread: 42\nCrunchRunCommand: [\"x-crunch-run\", \"--cgroup-parent-subsystem=memory\"]\nReserveExtraRAM: 12345\nMinRetryPeriod: 13s\nBatchSize: 99\n`)\n\ttmpfile := c.MkDir() + \"/config.yml\"\n\terr := ioutil.WriteFile(tmpfile, content, 0777)\n\tc.Assert(err, IsNil)\n\n\tos.Setenv(\"ARVADOS_KEEP_SERVICES\", \"\")\n\n\tflags := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tflags.SetOutput(os.Stderr)\n\tloader := config.NewLoader(&bytes.Buffer{}, log)\n\tloader.SetupFlags(flags)\n\targs := loader.MungeLegacyConfigArgs(log, []string{\"-config\", tmpfile}, \"-legacy-\"+string(arvados.ServiceNameDispatchSLURM)+\"-config\")\n\tok, _ := cmd.ParseFlags(flags, \"crunch-dispatch-slurm\", args, \"\", os.Stderr)\n\tc.Check(ok, Equals, true)\n\tcfg, err := loader.Load()\n\tc.Assert(err, IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, IsNil)\n\n\tc.Check(cluster.Services.Controller.ExternalURL, Equals, arvados.URL{Scheme: \"https\", Host: \"example.com\", Path: \"/\"})\n\tc.Check(cluster.SystemRootToken, Equals, \"abcdefg\")\n\tc.Check(cluster.Containers.SLURM.SbatchArgumentsList, DeepEquals, []string{\"--foo\", \"bar\"})\n\tc.Check(cluster.Containers.CloudVMs.PollInterval, Equals, arvados.Duration(12*time.Second))\n\tc.Check(cluster.Containers.SLURM.PrioritySpread, Equals, int64(42))\n\tc.Check(cluster.Containers.CrunchRunCommand, Equals, \"x-crunch-run\")\n\tc.Check(cluster.Containers.CrunchRunArgumentsList, DeepEquals, []string{\"--cgroup-parent-subsystem=memory\"})\n\tc.Check(cluster.Containers.ReserveExtraRAM, Equals, arvados.ByteSize(12345))\n\tc.Check(cluster.Containers.MinRetryPeriod, Equals, arvados.Duration(13*time.Second))\n\tc.Check(cluster.API.MaxItemsPerResponse, Equals, 99)\n\tc.Check(cluster.Containers.SLURM.SbatchEnvironmentVariables, DeepEquals, map[string]string{\n\t\t\"ARVADOS_KEEP_SERVICES\": \"https://example.com/keep1 https://example.com/keep2\",\n\t})\n\n\t// Ensure configure() copies SbatchEnvironmentVariables into\n\t// the current process's environment (that's how they end up\n\t// getting passed to sbatch).\n\ts.disp.cluster = cluster\n\ts.disp.configure()\n\tc.Check(os.Getenv(\"ARVADOS_KEEP_SERVICES\"), Equals, \"https://example.com/keep1 https://example.com/keep2\")\n}\n"
  },
  {
    "path": "services/crunch-dispatch-slurm/node_type.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchslurm\n\nimport (\n\t\"log\"\n\t\"os/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// SlurmNodeTypeFeatureKludge ensures SLURM accepts every instance\n// type name as a valid feature name, even if no instances of that\n// type have appeared yet.\n//\n// It takes advantage of some SLURM peculiarities:\n//\n// (1) A feature is valid after it has been offered by a node, even if\n// it is no longer offered by any node. So, to make a feature name\n// valid, we can add it to a dummy node (\"compute0\"), then remove it.\n//\n// (2) To test whether a set of feature names are valid without\n// actually submitting a job, we can call srun --test-only with the\n// desired features.\n//\n// SlurmNodeTypeFeatureKludge does a test-and-fix operation\n// immediately, and then periodically, in case slurm restarts and\n// forgets the list of valid features. It never returns (unless there\n// are no node types configured, in which case it returns\n// immediately), so it should generally be invoked with \"go\".\nfunc SlurmNodeTypeFeatureKludge(cc *arvados.Cluster) {\n\tif len(cc.InstanceTypes) == 0 {\n\t\treturn\n\t}\n\tvar features []string\n\tfor _, it := range cc.InstanceTypes {\n\t\tfeatures = append(features, \"instancetype=\"+it.Name)\n\t}\n\tfor {\n\t\tslurmKludge(features)\n\t\ttime.Sleep(2 * time.Second)\n\t}\n}\n\nconst slurmDummyNode = \"compute0\"\n\nfunc slurmKludge(features []string) {\n\tallFeatures := strings.Join(features, \",\")\n\n\tcmd := exec.Command(\"sinfo\", \"--nodes=\"+slurmDummyNode, \"--format=%f\", \"--noheader\")\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tlog.Printf(\"running %q %q: %s (output was %q)\", cmd.Path, cmd.Args, err, out)\n\t\treturn\n\t}\n\tif string(out) == allFeatures+\"\\n\" {\n\t\t// Already configured correctly, nothing to do.\n\t\treturn\n\t}\n\n\tlog.Printf(\"configuring node %q with all node type features\", slurmDummyNode)\n\tcmd = exec.Command(\"scontrol\", \"update\", \"NodeName=\"+slurmDummyNode, \"Features=\"+allFeatures)\n\tlog.Printf(\"running: %q %q\", cmd.Path, cmd.Args)\n\tout, err = cmd.CombinedOutput()\n\tif err != nil {\n\t\tlog.Printf(\"error: scontrol: %s (output was %q)\", err, out)\n\t}\n}\n"
  },
  {
    "path": "services/crunch-dispatch-slurm/priority.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchslurm\n\nconst defaultSpread int64 = 10\n\n// wantNice calculates appropriate nice values for a set of SLURM\n// jobs. The returned slice will have len(jobs) elements.\n//\n// spread is a positive amount of space to leave between adjacent\n// priorities when making adjustments. Generally, increasing spread\n// reduces the total number of adjustments made. A smaller spread\n// produces lower nice values, which is useful for old SLURM versions\n// with a limited \"nice\" range and for sites where SLURM is also\n// running non-Arvados jobs with low nice values.\n//\n// If spread<1, a sensible default (10) is used.\nfunc wantNice(jobs []*slurmJob, spread int64) []int64 {\n\tif len(jobs) == 0 {\n\t\treturn nil\n\t}\n\n\tif spread < 1 {\n\t\tspread = defaultSpread\n\t}\n\trenice := make([]int64, len(jobs))\n\n\t// highest usable priority (without going out of order)\n\tvar target int64\n\tfor i, job := range jobs {\n\t\tif i == 0 {\n\t\t\t// renice[0] is always zero, so our highest\n\t\t\t// priority container gets the highest\n\t\t\t// possible slurm priority.\n\t\t\ttarget = job.priority + job.nice\n\t\t} else if space := target - job.priority; space >= 0 && space < (spread-1)*10 {\n\t\t\t// Ordering is correct, and interval isn't too\n\t\t\t// large. Leave existing nice value alone.\n\t\t\trenice[i] = job.nice\n\t\t\ttarget = job.priority\n\t\t} else {\n\t\t\ttarget -= (spread - 1)\n\t\t\tif possible := job.priority + job.nice; target > possible {\n\t\t\t\t// renice[i] is already 0, that's the\n\t\t\t\t// best we can do\n\t\t\t\ttarget = possible\n\t\t\t} else {\n\t\t\t\trenice[i] = possible - target\n\t\t\t}\n\t\t}\n\t\ttarget--\n\t}\n\treturn renice\n}\n"
  },
  {
    "path": "services/crunch-dispatch-slurm/priority_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchslurm\n\nimport (\n\t. \"gopkg.in/check.v1\"\n)\n\nvar _ = Suite(&PrioritySuite{})\n\ntype PrioritySuite struct{}\n\nfunc (s *PrioritySuite) TestReniceCorrect(c *C) {\n\tfor _, test := range []struct {\n\t\tspread int64\n\t\tin     []*slurmJob\n\t\tout    []int64\n\t}{\n\t\t{\n\t\t\t0,\n\t\t\tnil,\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t0,\n\t\t\t[]*slurmJob{},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t10,\n\t\t\t[]*slurmJob{{priority: 4294000111, nice: 10000}},\n\t\t\t[]int64{0},\n\t\t},\n\t\t{\n\t\t\t10,\n\t\t\t[]*slurmJob{\n\t\t\t\t{priority: 4294000111, nice: 10000},\n\t\t\t\t{priority: 4294000111, nice: 10000},\n\t\t\t\t{priority: 4294000111, nice: 10000},\n\t\t\t\t{priority: 4294000111, nice: 10000},\n\t\t\t},\n\t\t\t[]int64{0, 10, 20, 30},\n\t\t},\n\t\t{ // smaller spread than necessary, but correctly ordered => leave nice alone\n\t\t\t10,\n\t\t\t[]*slurmJob{\n\t\t\t\t{priority: 4294000113, nice: 0},\n\t\t\t\t{priority: 4294000112, nice: 1},\n\t\t\t\t{priority: 4294000111, nice: 99},\n\t\t\t},\n\t\t\t[]int64{0, 1, 99},\n\t\t},\n\t\t{ // larger spread than necessary, but less than 10x => leave nice alone\n\t\t\t10,\n\t\t\t[]*slurmJob{\n\t\t\t\t{priority: 4294000144, nice: 0},\n\t\t\t\t{priority: 4294000122, nice: 20},\n\t\t\t\t{priority: 4294000111, nice: 30},\n\t\t\t},\n\t\t\t[]int64{0, 20, 30},\n\t\t},\n\t\t{ // > 10x spread => reduce nice to achieve spread=10\n\t\t\t10,\n\t\t\t[]*slurmJob{\n\t\t\t\t{priority: 4000, nice: 0},    // max pri 4000\n\t\t\t\t{priority: 3000, nice: 999},  // max pri 3999\n\t\t\t\t{priority: 2000, nice: 1998}, // max pri 3998\n\t\t\t},\n\t\t\t[]int64{0, 9, 18},\n\t\t},\n\t\t{ // > 10x spread, but spread=10 is impossible without negative nice\n\t\t\t10,\n\t\t\t[]*slurmJob{\n\t\t\t\t{priority: 4000, nice: 0},    // max pri 4000\n\t\t\t\t{priority: 3000, nice: 500},  // max pri 3500\n\t\t\t\t{priority: 2000, nice: 2000}, // max pri 4000\n\t\t\t},\n\t\t\t[]int64{0, 0, 510},\n\t\t},\n\t\t{ // default spread, needs reorder\n\t\t\t0,\n\t\t\t[]*slurmJob{\n\t\t\t\t{priority: 4000, nice: 0}, // max pri 4000\n\t\t\t\t{priority: 5000, nice: 0}, // max pri 5000\n\t\t\t\t{priority: 6000, nice: 0}, // max pri 6000\n\t\t\t},\n\t\t\t[]int64{0, 1000 + defaultSpread, 2000 + defaultSpread*2},\n\t\t},\n\t\t{ // minimum spread\n\t\t\t1,\n\t\t\t[]*slurmJob{\n\t\t\t\t{priority: 4000, nice: 0}, // max pri 4000\n\t\t\t\t{priority: 5000, nice: 0}, // max pri 5000\n\t\t\t\t{priority: 6000, nice: 0}, // max pri 6000\n\t\t\t\t{priority: 3000, nice: 0}, // max pri 3000\n\t\t\t},\n\t\t\t[]int64{0, 1001, 2002, 0},\n\t\t},\n\t} {\n\t\tc.Logf(\"spread=%d %+v -> %+v\", test.spread, test.in, test.out)\n\t\tc.Check(wantNice(test.in, test.spread), DeepEquals, test.out)\n\n\t\tif len(test.in) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// After making the adjustments, calling wantNice\n\t\t// again should return the same recommendations.\n\t\tupdated := make([]*slurmJob, len(test.in))\n\t\tfor i, in := range test.in {\n\t\t\tupdated[i] = &slurmJob{\n\t\t\t\tnice:     test.out[i],\n\t\t\t\tpriority: in.priority + in.nice - test.out[i],\n\t\t\t}\n\t\t}\n\t\tc.Check(wantNice(updated, test.spread), DeepEquals, test.out)\n\t}\n}\n\nfunc (s *PrioritySuite) TestReniceChurn(c *C) {\n\tconst spread = 10\n\tjobs := make([]*slurmJob, 1000)\n\tfor i := range jobs {\n\t\tjobs[i] = &slurmJob{priority: 4294000000 - int64(i), nice: 10000}\n\t}\n\tadjustments := 0\n\tqueue := jobs\n\tfor len(queue) > 0 {\n\t\trenice := wantNice(queue, spread)\n\t\tfor i := range queue {\n\t\t\tif renice[i] == queue[i].nice {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tqueue[i].priority += queue[i].nice - renice[i]\n\t\t\tqueue[i].nice = renice[i]\n\t\t\tadjustments++\n\t\t}\n\t\tqueue = queue[1:]\n\t}\n\tc.Logf(\"processed queue of %d with %d renice ops\", len(jobs), adjustments)\n\tc.Check(adjustments < len(jobs)*len(jobs)/10, Equals, true)\n}\n"
  },
  {
    "path": "services/crunch-dispatch-slurm/script.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchslurm\n\nimport (\n\t\"strings\"\n)\n\nfunc execScript(args []string, env map[string]string) string {\n\ts := \"#!/bin/sh\\n\"\n\tfor k, v := range env {\n\t\ts += k + `='`\n\t\ts += strings.Replace(v, `'`, `'\\''`, -1)\n\t\ts += `' `\n\t}\n\ts += `exec`\n\tfor _, w := range args {\n\t\ts += ` '`\n\t\ts += strings.Replace(w, `'`, `'\\''`, -1)\n\t\ts += `'`\n\t}\n\treturn s + \"\\n\"\n}\n"
  },
  {
    "path": "services/crunch-dispatch-slurm/script_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchslurm\n\nimport (\n\t. \"gopkg.in/check.v1\"\n)\n\nvar _ = Suite(&ScriptSuite{})\n\ntype ScriptSuite struct{}\n\nfunc (s *ScriptSuite) TestExecScript(c *C) {\n\tfor _, test := range []struct {\n\t\targs   []string\n\t\tscript string\n\t}{\n\t\t{nil, `exec`},\n\t\t{[]string{`foo`}, `exec 'foo'`},\n\t\t{[]string{`foo`, `bar baz`}, `exec 'foo' 'bar baz'`},\n\t\t{[]string{`foo\"`, \"'waz 'qux\\n\"}, `exec 'foo\"' ''\\''waz '\\''qux` + \"\\n\" + `'`},\n\t} {\n\t\tc.Logf(\"%+v -> %+v\", test.args, test.script)\n\t\tc.Check(execScript(test.args, nil), Equals, \"#!/bin/sh\\n\"+test.script+\"\\n\")\n\t}\n\tc.Check(execScript([]string{\"sh\", \"-c\", \"echo $foo\"}, map[string]string{\"foo\": \"b'ar\"}), Equals, \"#!/bin/sh\\nfoo='b'\\\\''ar' exec 'sh' '-c' 'echo $foo'\\n\")\n}\n"
  },
  {
    "path": "services/crunch-dispatch-slurm/slurm.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchslurm\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os/exec\"\n\t\"strings\"\n)\n\ntype Slurm interface {\n\tBatch(script io.Reader, args []string) error\n\tCancel(name string) error\n\tQueueCommand(args []string) *exec.Cmd\n\tRelease(name string) error\n\tRenice(name string, nice int64) error\n}\n\ntype slurmCLI struct {\n\trunSemaphore chan bool\n}\n\nfunc NewSlurmCLI() *slurmCLI {\n\treturn &slurmCLI{\n\t\trunSemaphore: make(chan bool, 3),\n\t}\n}\n\nfunc (scli *slurmCLI) Batch(script io.Reader, args []string) error {\n\treturn scli.run(script, \"sbatch\", args)\n}\n\nfunc (scli *slurmCLI) Cancel(name string) error {\n\tfor _, args := range [][]string{\n\t\t// If the slurm job hasn't started yet, remove it from\n\t\t// the queue.\n\t\t{\"--state=pending\"},\n\t\t// If the slurm job has started, send SIGTERM. If we\n\t\t// cancel a running job without a --signal argument,\n\t\t// slurm will send SIGTERM and then (after some\n\t\t// site-configured interval) SIGKILL. This would kill\n\t\t// crunch-run without stopping the container, which we\n\t\t// don't want.\n\t\t{\"--batch\", \"--signal=TERM\", \"--state=running\"},\n\t\t{\"--batch\", \"--signal=TERM\", \"--state=suspended\"},\n\t} {\n\t\terr := scli.run(nil, \"scancel\", append([]string{\"--name=\" + name}, args...))\n\t\tif err != nil {\n\t\t\t// scancel exits 0 if no job matches the given\n\t\t\t// name and state. Any error from scancel here\n\t\t\t// really indicates something is wrong.\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (scli *slurmCLI) QueueCommand(args []string) *exec.Cmd {\n\treturn exec.Command(\"squeue\", args...)\n}\n\nfunc (scli *slurmCLI) Release(name string) error {\n\treturn scli.run(nil, \"scontrol\", []string{\"release\", \"Name=\" + name})\n}\n\nfunc (scli *slurmCLI) Renice(name string, nice int64) error {\n\treturn scli.run(nil, \"scontrol\", []string{\"update\", \"JobName=\" + name, fmt.Sprintf(\"Nice=%d\", nice)})\n}\n\nfunc (scli *slurmCLI) run(stdin io.Reader, prog string, args []string) error {\n\tscli.runSemaphore <- true\n\tdefer func() { <-scli.runSemaphore }()\n\tcmd := exec.Command(prog, args...)\n\tcmd.Stdin = stdin\n\tout, err := cmd.CombinedOutput()\n\toutTrim := strings.TrimSpace(string(out))\n\tif err != nil || len(out) > 0 {\n\t\tlog.Printf(\"%q %q: %q\", cmd.Path, cmd.Args, outTrim)\n\t}\n\tif err != nil {\n\t\terr = fmt.Errorf(\"%s: %s (%q)\", cmd.Path, err, outTrim)\n\t}\n\treturn err\n}\n"
  },
  {
    "path": "services/crunch-dispatch-slurm/squeue.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchslurm\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst slurm15NiceLimit int64 = 10000\n\ntype slurmJob struct {\n\tuuid         string\n\twantPriority int64\n\tpriority     int64 // current slurm priority (incorporates nice value)\n\tnice         int64 // current slurm nice value\n\thitNiceLimit bool\n}\n\n// SqueueChecker implements asynchronous polling monitor of the SLURM queue\n// using the command 'squeue'.\ntype SqueueChecker struct {\n\tLogger         logger\n\tPeriod         time.Duration\n\tPrioritySpread int64\n\tSlurm          Slurm\n\tqueue          map[string]*slurmJob\n\tstartOnce      sync.Once\n\tdone           chan struct{}\n\tlock           sync.RWMutex\n\tnotify         sync.Cond\n}\n\n// HasUUID checks if a given container UUID is in the slurm queue.\n// This does not run squeue directly, but instead blocks until woken\n// up by next successful update of squeue.\nfunc (sqc *SqueueChecker) HasUUID(uuid string) bool {\n\tsqc.startOnce.Do(sqc.start)\n\n\tsqc.lock.RLock()\n\tdefer sqc.lock.RUnlock()\n\n\t// block until next squeue broadcast signaling an update.\n\tsqc.notify.Wait()\n\t_, exists := sqc.queue[uuid]\n\treturn exists\n}\n\n// SetPriority sets or updates the desired (Arvados) priority for a\n// container.\nfunc (sqc *SqueueChecker) SetPriority(uuid string, want int64) {\n\tsqc.startOnce.Do(sqc.start)\n\n\tsqc.lock.RLock()\n\tjob := sqc.queue[uuid]\n\tif job == nil {\n\t\t// Wait in case the slurm job was just submitted and\n\t\t// will appear in the next squeue update.\n\t\tsqc.notify.Wait()\n\t\tjob = sqc.queue[uuid]\n\t}\n\tneedUpdate := job != nil && job.wantPriority != want\n\tsqc.lock.RUnlock()\n\n\tif needUpdate {\n\t\tsqc.lock.Lock()\n\t\tjob.wantPriority = want\n\t\tsqc.lock.Unlock()\n\t}\n}\n\n// adjust slurm job nice values as needed to ensure slurm priority\n// order matches Arvados priority order.\nfunc (sqc *SqueueChecker) reniceAll() {\n\t// This is slow (it shells out to scontrol many times) and no\n\t// other goroutines update sqc.queue or any of the job fields\n\t// we use here, so we don't acquire a lock.\n\tjobs := make([]*slurmJob, 0, len(sqc.queue))\n\tfor _, j := range sqc.queue {\n\t\tif j.wantPriority == 0 {\n\t\t\t// SLURM job with unknown Arvados priority\n\t\t\t// (perhaps it's not an Arvados job)\n\t\t\tcontinue\n\t\t}\n\t\tif j.priority <= 2*slurm15NiceLimit {\n\t\t\t// SLURM <= 15.x implements \"hold\" by setting\n\t\t\t// priority to 0. If we include held jobs\n\t\t\t// here, we'll end up trying to push other\n\t\t\t// jobs below them using negative priority,\n\t\t\t// which won't help anything.\n\t\t\tcontinue\n\t\t}\n\t\tjobs = append(jobs, j)\n\t}\n\n\tsort.Slice(jobs, func(i, j int) bool {\n\t\tif jobs[i].wantPriority != jobs[j].wantPriority {\n\t\t\treturn jobs[i].wantPriority > jobs[j].wantPriority\n\t\t}\n\t\t// break ties with container uuid --\n\t\t// otherwise, the ordering would change from\n\t\t// one interval to the next, and we'd do many\n\t\t// pointless slurm queue rearrangements.\n\t\treturn jobs[i].uuid > jobs[j].uuid\n\t})\n\trenice := wantNice(jobs, sqc.PrioritySpread)\n\tfor i, job := range jobs {\n\t\tniceNew := renice[i]\n\t\tif job.hitNiceLimit && niceNew > slurm15NiceLimit {\n\t\t\tniceNew = slurm15NiceLimit\n\t\t}\n\t\tif niceNew == job.nice {\n\t\t\tcontinue\n\t\t}\n\t\terr := sqc.Slurm.Renice(job.uuid, niceNew)\n\t\tif err != nil && niceNew > slurm15NiceLimit && strings.Contains(err.Error(), \"Invalid nice value\") {\n\t\t\tsqc.Logger.Warnf(\"container %q clamping nice values at %d, priority order will not be correct -- see https://dev.arvados.org/projects/arvados/wiki/SLURM_integration#Limited-nice-values-SLURM-15\", job.uuid, slurm15NiceLimit)\n\t\t\tjob.hitNiceLimit = true\n\t\t}\n\t}\n}\n\n// Stop stops the squeue monitoring goroutine. Do not call HasUUID\n// after calling Stop.\nfunc (sqc *SqueueChecker) Stop() {\n\tif sqc.done != nil {\n\t\tclose(sqc.done)\n\t}\n}\n\n// check gets the names of jobs in the SLURM queue (running and\n// queued). If it succeeds, it updates sqc.queue and wakes up any\n// goroutines that are waiting in HasUUID() or All().\nfunc (sqc *SqueueChecker) check() {\n\tcmd := sqc.Slurm.QueueCommand([]string{\"--all\", \"--noheader\", \"--format=%j %y %Q %T %r\"})\n\tstdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}\n\tcmd.Stdout, cmd.Stderr = stdout, stderr\n\tif err := cmd.Run(); err != nil {\n\t\tsqc.Logger.Warnf(\"Error running %q %q: %s %q\", cmd.Path, cmd.Args, err, stderr.String())\n\t\treturn\n\t}\n\n\tlines := strings.Split(stdout.String(), \"\\n\")\n\tnewq := make(map[string]*slurmJob, len(lines))\n\tfor _, line := range lines {\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tvar uuid, state, reason string\n\t\tvar n, p int64\n\t\tif _, err := fmt.Sscan(line, &uuid, &n, &p, &state, &reason); err != nil {\n\t\t\tsqc.Logger.Warnf(\"ignoring unparsed line in squeue output: %q\", line)\n\t\t\tcontinue\n\t\t}\n\n\t\t// No other goroutines write to jobs' priority or nice\n\t\t// fields, so we can read and write them without\n\t\t// locks.\n\t\treplacing, ok := sqc.queue[uuid]\n\t\tif !ok {\n\t\t\treplacing = &slurmJob{uuid: uuid}\n\t\t}\n\t\treplacing.priority = p\n\t\treplacing.nice = n\n\t\tnewq[uuid] = replacing\n\n\t\tif state == \"PENDING\" && ((reason == \"BadConstraints\" && p <= 2*slurm15NiceLimit) || reason == \"launch failed requeued held\") && replacing.wantPriority > 0 {\n\t\t\t// When using SLURM 14.x or 15.x, our queued\n\t\t\t// jobs land in this state when \"scontrol\n\t\t\t// reconfigure\" invalidates their feature\n\t\t\t// constraints by clearing all node features.\n\t\t\t// They stay in this state even after the\n\t\t\t// features reappear, until we run \"scontrol\n\t\t\t// release {jobid}\". Priority is usually 0 in\n\t\t\t// this state, but sometimes (due to a race\n\t\t\t// with nice adjustments?) it's a small\n\t\t\t// positive value.\n\t\t\t//\n\t\t\t// \"scontrol release\" is silent and successful\n\t\t\t// regardless of whether the features have\n\t\t\t// reappeared, so rather than second-guessing\n\t\t\t// whether SLURM is ready, we just keep trying\n\t\t\t// this until it works.\n\t\t\t//\n\t\t\t// \"launch failed requeued held\" seems to be\n\t\t\t// another manifestation of this problem,\n\t\t\t// resolved the same way.\n\t\t\tsqc.Logger.Printf(\"releasing held job %q (priority=%d, state=%q, reason=%q)\", uuid, p, state, reason)\n\t\t\tsqc.Slurm.Release(uuid)\n\t\t} else if state != \"RUNNING\" && p <= 2*slurm15NiceLimit && replacing.wantPriority > 0 {\n\t\t\tsqc.Logger.Warnf(\"job %q has low priority %d, nice %d, state %q, reason %q\", uuid, p, n, state, reason)\n\t\t}\n\t}\n\tsqc.lock.Lock()\n\tsqc.queue = newq\n\tsqc.lock.Unlock()\n\tsqc.notify.Broadcast()\n}\n\n// Initialize, and start a goroutine to call check() once per\n// squeue.Period until terminated by calling Stop().\nfunc (sqc *SqueueChecker) start() {\n\tsqc.notify.L = sqc.lock.RLocker()\n\tsqc.done = make(chan struct{})\n\tgo func() {\n\t\tticker := time.NewTicker(sqc.Period)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-sqc.done:\n\t\t\t\tticker.Stop()\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tsqc.check()\n\t\t\t\tsqc.reniceAll()\n\t\t\t\tselect {\n\t\t\t\tcase <-ticker.C:\n\t\t\t\t\t// If this iteration took\n\t\t\t\t\t// longer than sqc.Period,\n\t\t\t\t\t// consume the next tick and\n\t\t\t\t\t// wait. Otherwise we would\n\t\t\t\t\t// starve other goroutines.\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\n// All waits for the next squeue invocation, and returns all job\n// names reported by squeue.\nfunc (sqc *SqueueChecker) All() []string {\n\tsqc.startOnce.Do(sqc.start)\n\tsqc.lock.RLock()\n\tdefer sqc.lock.RUnlock()\n\tsqc.notify.Wait()\n\tvar uuids []string\n\tfor u := range sqc.queue {\n\t\tuuids = append(uuids, u)\n\t}\n\treturn uuids\n}\n"
  },
  {
    "path": "services/crunch-dispatch-slurm/squeue_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchslurm\n\nimport (\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t. \"gopkg.in/check.v1\"\n)\n\nvar _ = Suite(&SqueueSuite{})\n\ntype SqueueSuite struct{}\n\nfunc (s *SqueueSuite) TestReleasePending(c *C) {\n\tuuids := []string{\n\t\t\"zzzzz-dz642-fake0fake0fake0\",\n\t\t\"zzzzz-dz642-fake1fake1fake1\",\n\t\t\"zzzzz-dz642-fake2fake2fake2\",\n\t}\n\tslurm := &slurmFake{\n\t\tqueue: uuids[0] + \" 10000 4294000000 PENDING Resources\\n\" + uuids[1] + \" 10000 4294000111 PENDING Resources\\n\" + uuids[2] + \" 10000 0 PENDING BadConstraints\\n\",\n\t}\n\tsqc := &SqueueChecker{\n\t\tLogger: logrus.StandardLogger(),\n\t\tSlurm:  slurm,\n\t\tPeriod: time.Hour,\n\t}\n\tsqc.startOnce.Do(sqc.start)\n\tdefer sqc.Stop()\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor _, u := range uuids {\n\t\t\tsqc.SetPriority(u, 1)\n\t\t}\n\t\tclose(done)\n\t}()\n\tcallUntilReady(sqc.check, done)\n\n\tslurm.didRelease = nil\n\tsqc.check()\n\tc.Check(slurm.didRelease, DeepEquals, []string{uuids[2]})\n}\n\nfunc (s *SqueueSuite) TestReniceAll(c *C) {\n\tuuids := []string{\"zzzzz-dz642-fake0fake0fake0\", \"zzzzz-dz642-fake1fake1fake1\", \"zzzzz-dz642-fake2fake2fake2\"}\n\tfor _, test := range []struct {\n\t\tspread int64\n\t\tsqueue string\n\t\twant   map[string]int64\n\t\texpect [][]string\n\t}{\n\t\t{\n\t\t\tspread: 1,\n\t\t\tsqueue: uuids[0] + \" 10000 4294000000 PENDING Resources\\n\",\n\t\t\twant:   map[string]int64{uuids[0]: 1},\n\t\t\texpect: [][]string{{uuids[0], \"0\"}},\n\t\t},\n\t\t{ // fake0 priority is too high\n\t\t\tspread: 1,\n\t\t\tsqueue: uuids[0] + \" 10000 4294000777 PENDING Resources\\n\" + uuids[1] + \" 10000 4294000444 PENDING Resources\\n\",\n\t\t\twant:   map[string]int64{uuids[0]: 1, uuids[1]: 999},\n\t\t\texpect: [][]string{{uuids[1], \"0\"}, {uuids[0], \"334\"}},\n\t\t},\n\t\t{ // specify spread\n\t\t\tspread: 100,\n\t\t\tsqueue: uuids[0] + \" 10000 4294000777 PENDING Resources\\n\" + uuids[1] + \" 10000 4294000444 PENDING Resources\\n\",\n\t\t\twant:   map[string]int64{uuids[0]: 1, uuids[1]: 999},\n\t\t\texpect: [][]string{{uuids[1], \"0\"}, {uuids[0], \"433\"}},\n\t\t},\n\t\t{ // ignore fake2 because SetPriority() not called\n\t\t\tspread: 1,\n\t\t\tsqueue: uuids[0] + \" 10000 4294000000 PENDING Resources\\n\" + uuids[1] + \" 10000 4294000111 PENDING Resources\\n\" + uuids[2] + \" 10000 4294000222 PENDING Resources\\n\",\n\t\t\twant:   map[string]int64{uuids[0]: 999, uuids[1]: 1},\n\t\t\texpect: [][]string{{uuids[0], \"0\"}, {uuids[1], \"112\"}},\n\t\t},\n\t\t{ // ignore fake2 because slurm priority=0\n\t\t\tspread: 1,\n\t\t\tsqueue: uuids[0] + \" 10000 4294000000 PENDING Resources\\n\" + uuids[1] + \" 10000 4294000111 PENDING Resources\\n\" + uuids[2] + \" 10000 0 PENDING Resources\\n\",\n\t\t\twant:   map[string]int64{uuids[0]: 999, uuids[1]: 1, uuids[2]: 997},\n\t\t\texpect: [][]string{{uuids[0], \"0\"}, {uuids[1], \"112\"}},\n\t\t},\n\t} {\n\t\tc.Logf(\"spread=%d squeue=%q want=%v -> expect=%v\", test.spread, test.squeue, test.want, test.expect)\n\t\tslurm := &slurmFake{\n\t\t\tqueue: test.squeue,\n\t\t}\n\t\tsqc := &SqueueChecker{\n\t\t\tLogger:         logrus.StandardLogger(),\n\t\t\tSlurm:          slurm,\n\t\t\tPrioritySpread: test.spread,\n\t\t\tPeriod:         time.Hour,\n\t\t}\n\t\tsqc.startOnce.Do(sqc.start)\n\t\tsqc.check()\n\t\tfor uuid, pri := range test.want {\n\t\t\tsqc.SetPriority(uuid, pri)\n\t\t}\n\t\tsqc.reniceAll()\n\t\tc.Check(slurm.didRenice, DeepEquals, test.expect)\n\t\tsqc.Stop()\n\t}\n}\n\n// If a limited nice range prevents desired priority adjustments, give\n// up and clamp nice to 10K.\nfunc (s *SqueueSuite) TestReniceInvalidNiceValue(c *C) {\n\tuuids := []string{\"zzzzz-dz642-fake0fake0fake0\", \"zzzzz-dz642-fake1fake1fake1\", \"zzzzz-dz642-fake2fake2fake2\"}\n\tslurm := &slurmFake{\n\t\tqueue:         uuids[0] + \" 0 4294000222 PENDING Resources\\n\" + uuids[1] + \" 0 4294555222 PENDING Resources\\n\",\n\t\trejectNice10K: true,\n\t}\n\tsqc := &SqueueChecker{\n\t\tLogger:         logrus.StandardLogger(),\n\t\tSlurm:          slurm,\n\t\tPrioritySpread: 1,\n\t\tPeriod:         time.Hour,\n\t}\n\tsqc.startOnce.Do(sqc.start)\n\tsqc.check()\n\tsqc.SetPriority(uuids[0], 2)\n\tsqc.SetPriority(uuids[1], 1)\n\n\t// First attempt should renice to 555001, which will fail\n\tsqc.reniceAll()\n\tc.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], \"555001\"}})\n\n\t// Next attempt should renice to 10K, which will succeed\n\tsqc.reniceAll()\n\tc.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], \"555001\"}, {uuids[1], \"10000\"}})\n\t// ...so we'll change the squeue response to reflect the\n\t// updated priority+nice, and make sure sqc sees that...\n\tslurm.queue = uuids[0] + \" 0 4294000222 PENDING Resources\\n\" + uuids[1] + \" 10000 4294545222 PENDING Resources\\n\"\n\tsqc.check()\n\n\t// Next attempt should leave nice alone because it's already\n\t// at the 10K limit\n\tsqc.reniceAll()\n\tc.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], \"555001\"}, {uuids[1], \"10000\"}})\n\n\t// Back to normal if desired nice value falls below 10K\n\tslurm.queue = uuids[0] + \" 0 4294000222 PENDING Resources\\n\" + uuids[1] + \" 10000 4294000111 PENDING Resources\\n\"\n\tsqc.check()\n\tsqc.reniceAll()\n\tc.Check(slurm.didRenice, DeepEquals, [][]string{{uuids[1], \"555001\"}, {uuids[1], \"10000\"}, {uuids[1], \"9890\"}})\n\n\tsqc.Stop()\n}\n\n// If the given UUID isn't in the slurm queue yet, SetPriority()\n// should wait for it to appear on the very next poll, then give up.\nfunc (s *SqueueSuite) TestSetPriorityBeforeQueued(c *C) {\n\tuuidGood := \"zzzzz-dz642-fake0fake0fake0\"\n\tuuidBad := \"zzzzz-dz642-fake1fake1fake1\"\n\n\tslurm := &slurmFake{}\n\tsqc := &SqueueChecker{\n\t\tLogger: logrus.StandardLogger(),\n\t\tSlurm:  slurm,\n\t\tPeriod: time.Hour,\n\t}\n\tsqc.startOnce.Do(sqc.start)\n\tsqc.Stop()\n\tsqc.check()\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tsqc.SetPriority(uuidGood, 123)\n\t\tsqc.SetPriority(uuidBad, 345)\n\t\tclose(done)\n\t}()\n\tc.Check(sqc.queue[uuidGood], IsNil)\n\tc.Check(sqc.queue[uuidBad], IsNil)\n\ttimeout := time.NewTimer(time.Second)\n\tdefer timeout.Stop()\n\ttick := time.NewTicker(time.Millisecond)\n\tdefer tick.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t\tslurm.queue = uuidGood + \" 0 12345 PENDING Resources\\n\"\n\t\t\tsqc.check()\n\n\t\t\t// Avoid immediately selecting this case again\n\t\t\t// on the next iteration if check() took\n\t\t\t// longer than one tick.\n\t\t\tselect {\n\t\t\tcase <-tick.C:\n\t\t\tdefault:\n\t\t\t}\n\t\tcase <-timeout.C:\n\t\t\tc.Fatal(\"timed out\")\n\t\tcase <-done:\n\t\t\tc.Assert(sqc.queue[uuidGood], NotNil)\n\t\t\tc.Check(sqc.queue[uuidGood].wantPriority, Equals, int64(123))\n\t\t\tc.Check(sqc.queue[uuidBad], IsNil)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc callUntilReady(fn func(), done <-chan struct{}) {\n\ttick := time.NewTicker(time.Millisecond)\n\tdefer tick.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn\n\t\tcase <-tick.C:\n\t\t\tfn()\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "services/crunch-dispatch-slurm/usage.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage dispatchslurm\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n)\n\nfunc usage(fs *flag.FlagSet) {\n\tfmt.Fprintf(fs.Output(), `\ncrunch-dispatch-slurm runs queued Arvados containers by submitting\nSLURM batch jobs.\n\nOptions:\n`)\n\tfs.PrintDefaults()\n\tfmt.Fprintf(fs.Output(), `\n\nFor configuration instructions see https://doc.arvados.org/install/crunch2-slurm/install-dispatch.html\n`)\n}\n"
  },
  {
    "path": "services/dockercleaner/MANIFEST.in",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ninclude agpl-3.0.txt\ninclude arvados-docker-cleaner.service\ninclude arvados_version.py"
  },
  {
    "path": "services/dockercleaner/README.rst",
    "content": ".. Copyright (C) The Arvados Authors. All rights reserved.\n..\n.. SPDX-License-Identifier: Apache-2.0\n\nArvados Docker Cleaner.\n"
  },
  {
    "path": "services/dockercleaner/agpl-3.0.txt",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "services/dockercleaner/arvados-docker-cleaner.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[Unit]\nDescription=Arvados Docker Image Cleaner\nDocumentation=https://doc.arvados.org/\nAfter=network.target\nStartLimitIntervalSec=0\n\n[Service]\nType=simple\nRestart=always\nRestartSec=10s\nRestartPreventExitStatus=2\nExecStart=/usr/bin/arvados-docker-cleaner\n\n[Install]\nWantedBy=multi-user.target\n"
  },
  {
    "path": "services/dockercleaner/arvados_docker/__init__.py",
    "content": ""
  },
  {
    "path": "services/dockercleaner/arvados_docker/cleaner.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n\"\"\"arvados_docker.cleaner - Remove unused Docker images from compute nodes\n\nUsage:\n  python3 -m arvados_docker.cleaner --quota 50G\n\"\"\"\n\nimport argparse\nimport collections\nimport copy\nimport functools\nimport json\nimport logging\nimport sys\nimport time\n\nimport docker\nimport json\n\nDEFAULT_CONFIG_FILE = '/etc/arvados/docker-cleaner/docker-cleaner.json'\n\nSUFFIX_SIZES = {suffix: 1024 ** exp for exp, suffix in enumerate('kmgt', 1)}\n\nlogger = logging.getLogger('arvados_docker.cleaner')\n\n\ndef return_when_docker_not_found(result=None):\n    # If the decorated function raises a 404 error from Docker, return\n    # `result` instead.\n    def docker_not_found_decorator(orig_func):\n        @functools.wraps(orig_func)\n        def docker_not_found_wrapper(*args, **kwargs):\n            try:\n                return orig_func(*args, **kwargs)\n            except docker.errors.APIError as error:\n                if error.response.status_code != 404:\n                    raise\n                return result\n        return docker_not_found_wrapper\n    return docker_not_found_decorator\n\n\nclass DockerImage:\n\n    def __init__(self, image_hash):\n        self.docker_id = image_hash['Id']\n        self.size = image_hash['Size']\n        self.last_used = -1\n\n    def used_at(self, use_time):\n        self.last_used = max(self.last_used, use_time)\n\n\nclass DockerImages:\n\n    def __init__(self, target_size):\n        self.target_size = target_size\n        self.images = {}\n        self.container_image_map = {}\n\n    @classmethod\n    def from_daemon(cls, target_size, docker_client):\n        images = cls(target_size)\n        for image in docker_client.images():\n            images.add_image(image)\n        return images\n\n    def add_image(self, image_hash):\n        image = DockerImage(image_hash)\n        self.images[image.docker_id] = image\n        logger.debug(\"Registered image %s\", image.docker_id)\n\n    def del_image(self, image_id):\n        if image_id in self.images:\n            del self.images[image_id]\n            self.container_image_map = {\n                cid: cid_image\n                for cid, cid_image in self.container_image_map.items()\n                if cid_image != image_id}\n            logger.debug(\"Unregistered image %s\", image_id)\n\n    def has_image(self, image_id):\n        return image_id in self.images\n\n    def add_user(self, container_hash, use_time):\n        image_id = container_hash['Image']\n        if image_id in self.images:\n            self.container_image_map[container_hash['Id']] = image_id\n            self.images[image_id].used_at(use_time)\n            logger.debug(\"Registered container %s using image %s\",\n                         container_hash['Id'], image_id)\n\n    def end_user(self, cid):\n        self.container_image_map.pop(cid, None)\n        logger.debug(\"Unregistered container %s\", cid)\n\n    def should_delete(self):\n        if not self.images:\n            return\n        # Build a list of images, ordered by use time.\n        lru_images = list(self.images.values())\n        lru_images.sort(key=lambda image: image.last_used)\n        # Make sure we don't delete any images in use, or if there are\n        # none, the most recently used image.\n        if self.container_image_map:\n            keep_ids = set(self.container_image_map.values())\n        else:\n            keep_ids = {lru_images[-1].docker_id}\n        space_left = (self.target_size - sum(self.images[image_id].size\n                                             for image_id in keep_ids))\n        # Go through the list most recently used first, and note which\n        # images can be saved with the space allotted.\n        for image in reversed(lru_images):\n            if (image.docker_id not in keep_ids) and (image.size <= space_left):\n                keep_ids.add(image.docker_id)\n                space_left -= image.size\n        # Yield the Docker IDs of any image we don't want to save, least\n        # recently used first.\n        for image in lru_images:\n            if image.docker_id not in keep_ids:\n                yield image.docker_id\n\n\nclass DockerEventHandlers:\n    # This class maps Docker event types to the names of methods that should\n    # receive those events.\n\n    def __init__(self):\n        self.handler_names = collections.defaultdict(list)\n\n    def on(self, *status_names):\n        def register_handler(handler_method):\n            for status in status_names:\n                self.handler_names[status].append(handler_method.__name__)\n            return handler_method\n        return register_handler\n\n    def for_event(self, status):\n        return iter(self.handler_names[status])\n\n    def copy(self):\n        result = self.__class__()\n        result.handler_names = copy.deepcopy(self.handler_names)\n        return result\n\n\nclass DockerEventListener:\n    # To use this class, define event_handlers as an instance of\n    # DockerEventHandlers.  Call run() to iterate over events and call the\n    # handler methods as they come in.\n    ENCODING = 'utf-8'\n\n    def __init__(self, events):\n        self.events = events\n\n    def run(self):\n        for event in self.events:\n            event = json.loads(event.decode(self.ENCODING))\n            if event.get('Type', 'container') != 'container':\n                continue\n            for method_name in self.event_handlers.for_event(event.get('status')):\n                getattr(self, method_name)(event)\n\n\nclass DockerImageUseRecorder(DockerEventListener):\n    event_handlers = DockerEventHandlers()\n\n    def __init__(self, images, docker_client, events):\n        self.images = images\n        self.docker_client = docker_client\n        super().__init__(events)\n\n    @event_handlers.on('create')\n    @return_when_docker_not_found()\n    def load_container(self, event):\n        container_hash = self.docker_client.inspect_container(event['id'])\n        self.new_container(event, container_hash)\n\n    def new_container(self, event, container_hash):\n        self.images.add_user(container_hash, event['time'])\n\n    @event_handlers.on('destroy')\n    def container_stopped(self, event):\n        self.images.end_user(event['id'])\n\n\nclass DockerImageCleaner(DockerImageUseRecorder):\n    event_handlers = DockerImageUseRecorder.event_handlers.copy()\n\n    def __init__(self, images, docker_client, events, remove_containers_onexit=False):\n        super().__init__(images, docker_client, events)\n        self.logged_unknown = set()\n        self.remove_containers_onexit = remove_containers_onexit\n\n    def new_container(self, event, container_hash):\n        container_image_id = container_hash['Image']\n        if not self.images.has_image(container_image_id):\n            image_hash = self.docker_client.inspect_image(container_image_id)\n            self.images.add_image(image_hash)\n        return super().new_container(event, container_hash)\n\n    def _remove_container(self, cid):\n        try:\n            self.docker_client.remove_container(cid, v=True)\n        except docker.errors.APIError as error:\n            logger.warning(\"Failed to remove container %s: %s\", cid, error)\n        else:\n            logger.info(\"Removed container %s\", cid)\n\n    @event_handlers.on('die')\n    def clean_container(self, event=None):\n        if self.remove_containers_onexit:\n            self._remove_container(event['id'])\n\n    def check_stopped_containers(self, remove=False):\n        logger.info(\"Checking for stopped containers\")\n        for c in self.docker_client.containers(filters={'status': 'exited'}):\n            logger.info(\"Container %s %s\", c['Id'], c['Status'])\n            if c['Status'][:6] != 'Exited':\n                logger.error(\"Unexpected status %s for container %s\",\n                             c['Status'], c['Id'])\n            elif remove:\n                self._remove_container(c['Id'])\n\n    @event_handlers.on('destroy')\n    def clean_images(self, event=None):\n        for image_id in self.images.should_delete():\n            try:\n                self.docker_client.remove_image(image_id)\n            except docker.errors.APIError as error:\n                logger.warning(\n                    \"Failed to remove image %s: %s\", image_id, error)\n            else:\n                logger.info(\"Removed image %s\", image_id)\n                self.images.del_image(image_id)\n\n    @event_handlers.on('destroy')\n    def log_unknown_images(self, event):\n        unknown_ids = {image['Id'] for image in self.docker_client.images()\n                       if not self.images.has_image(image['Id'])}\n        for image_id in (unknown_ids - self.logged_unknown):\n            logger.info(\n                \"Image %s is loaded but unused, so it won't be cleaned\",\n                image_id)\n        self.logged_unknown = unknown_ids\n\n\ndef human_size(size_str):\n    size_str = size_str.lower().rstrip('b')\n    multiplier = SUFFIX_SIZES.get(size_str[-1])\n    if multiplier is None:\n        multiplier = 1\n    else:\n        size_str = size_str[:-1]\n    return int(size_str) * multiplier\n\n\ndef load_config(arguments):\n    args = parse_arguments(arguments)\n\n    config = default_config()\n    try:\n        with open(args.config, 'r') as f:\n            c = json.load(f)\n            config.update(c)\n    except (FileNotFoundError, IOError, ValueError) as error:\n        if (isinstance(error, FileNotFoundError) and\n            args.config == DEFAULT_CONFIG_FILE):\n            logger.warning(\"DEPRECATED: default config file %s not found; \"\n                           \"relying on command line configuration\",\n                           repr(DEFAULT_CONFIG_FILE))\n        else:\n            sys.exit('error reading config file {}: {}'.format(\n                args.config, error))\n\n    configargs = vars(args).copy()\n    configargs.pop('config')\n    config.update({k: v for k, v in configargs.items() if v})\n\n    if isinstance(config['Quota'], str):\n        config['Quota'] = human_size(config['Quota'])\n\n    return config\n\n\ndef default_config():\n    return {\n        'Quota': '1G',\n        'RemoveStoppedContainers': 'always',\n        'Verbose': 0,\n    }\n\n\ndef parse_arguments(arguments):\n    class Formatter(argparse.ArgumentDefaultsHelpFormatter,\n                    argparse.RawDescriptionHelpFormatter):\n        pass\n    parser = argparse.ArgumentParser(\n        prog=\"arvados_docker.cleaner\",\n        description=\"clean old Docker images from Arvados compute nodes\",\n        epilog=\"Example config file:\\n\\n{}\".format(\n            json.dumps(default_config(), indent=4)),\n        formatter_class=Formatter,\n    )\n    parser.add_argument(\n        '--config', action='store', type=str, default=DEFAULT_CONFIG_FILE,\n        help=\"configuration file\")\n\n    deprecated = \" (DEPRECATED -- use config file instead)\"\n    parser.add_argument(\n        '--quota', action='store', type=human_size, dest='Quota',\n        help=\"space allowance for Docker images, suffixed with K/M/G/T\" + deprecated)\n    parser.add_argument(\n        '--remove-stopped-containers', type=str, default='always', dest='RemoveStoppedContainers',\n        choices=['never', 'onexit', 'always'],\n        help=\"\"\"when to remove stopped containers (default: always, i.e., remove\n        stopped containers found at startup, and remove containers as\n        soon as they exit)\"\"\" + deprecated)\n    parser.add_argument(\n        '--verbose', '-v', action='count', default=0, dest='Verbose',\n        help=\"log more information\" + deprecated)\n\n    return parser.parse_args(arguments)\n\n\ndef setup_logging():\n    log_handler = logging.StreamHandler()\n    log_handler.setFormatter(logging.Formatter(\n        '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',\n        '%Y-%m-%d %H:%M:%S'))\n    logger.addHandler(log_handler)\n\n\ndef configure_logging(config):\n    logger.setLevel(logging.ERROR - (10 * config['Verbose']))\n\n\ndef run(config, docker_client):\n    start_time = int(time.time())\n    logger.debug(\"Loading Docker activity through present\")\n    images = DockerImages.from_daemon(config['Quota'], docker_client)\n    use_recorder = DockerImageUseRecorder(\n        images, docker_client, docker_client.events(since=1, until=start_time))\n    use_recorder.run()\n    cleaner = DockerImageCleaner(\n        images, docker_client, docker_client.events(since=start_time),\n        remove_containers_onexit=config['RemoveStoppedContainers'] != 'never')\n    cleaner.check_stopped_containers(\n        remove=config['RemoveStoppedContainers'] == 'always')\n    logger.info(\"Checking image quota at startup\")\n    cleaner.clean_images()\n    logger.info(\"Listening for docker events\")\n    cleaner.run()\n\n\ndef main(arguments=sys.argv[1:]):\n    setup_logging()\n    config = load_config(arguments)\n    configure_logging(config)\n    try:\n        run(config, docker.APIClient(version='1.48'))\n    except KeyboardInterrupt:\n        sys.exit(1)\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "services/dockercleaner/arvados_version.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport dataclasses\nimport os\nimport re\nimport runpy\nimport subprocess\nimport typing as t\n\nfrom pathlib import Path, PurePath, PurePosixPath\n\nimport setuptools\nimport setuptools.command.build\n\nSETUP_DIR = Path(__file__).absolute().parent\nVERSION_SCRIPT_PATH = PurePath('build', 'version-at-commit.sh')\n# Built by ArvadosPythonPackage.register\nARVADOS_PYTHON_MODULES: dict[str, 'ArvadosPythonPackage'] = {}\n\n### Metadata generation\n\n@dataclasses.dataclass\nclass ArvadosPythonPackage:\n    package_name: str\n    module_name: str\n    src_path: PurePath\n    dependencies: t.Sequence['ArvadosPythonPackage']\n\n    _VERSION_SUBS = {\n        'development-': '',\n        '~dev': '.dev',\n        '~rc': 'rc',\n    }\n\n    @classmethod\n    def register(\n            cls,\n            package_name: str,\n            module_name: str,\n            src_path: PurePath | str,\n            *dependencies: str,\n    ) -> 'ArvadosPythonPackage':\n        if not isinstance(src_path, PurePath):\n            src_path = PurePosixPath(src_path)\n        deps = [ARVADOS_PYTHON_MODULES[key] for key in dependencies]\n        this_pkg = cls(package_name, module_name, src_path, deps)\n        ARVADOS_PYTHON_MODULES[package_name] = this_pkg\n        return this_pkg\n\n    def version_file_path(self):\n        return PurePath(self.module_name, '_version.py')\n\n    def _workspace_path(self, workdir):\n        try:\n            workspace = Path(os.environ['WORKSPACE'])\n            # This will raise ValueError if they're not related,\n            # in which case we don't want to use this $WORKSPACE.\n            workdir.relative_to(workspace)\n        except KeyError:\n            # $WORKSPACE isn't set. Fall back to the Git worktree toplevel.\n            try:\n                git_proc = subprocess.run(\n                    ['git', 'rev-parse', '--show-toplevel'],\n                    capture_output=True,\n                    check=True,\n                    cwd=workdir,\n                    text=True,\n                )\n                workspace = Path(git_proc.stdout.removesuffix('\\n'))\n            except (subprocess.CalledProcessError, FileNotFoundError, ValueError):\n                return None\n        except ValueError:\n            return None\n        if (workspace / VERSION_SCRIPT_PATH).exists():\n            return workspace\n        else:\n            return None\n\n    def _git_version(self, workdir):\n        workspace = self._workspace_path(workdir)\n        if workspace is None:\n            return None\n        git_log_cmd = [\n            'git', 'log', '-n1', '--format=%H', '--',\n            str(VERSION_SCRIPT_PATH), str(self.src_path),\n        ]\n        git_log_cmd.extend(str(dep.src_path) for dep in self.dependencies)\n        git_log_proc = subprocess.run(\n            git_log_cmd,\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        version_proc = subprocess.run(\n            [str(VERSION_SCRIPT_PATH), git_log_proc.stdout.rstrip('\\n')],\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        return version_proc.stdout.rstrip('\\n')\n\n    def _sdist_version(self, workdir):\n        try:\n            pkg_info = (workdir / 'PKG-INFO').open()\n        except FileNotFoundError:\n            return None\n        with pkg_info:\n            for line in pkg_info:\n                key, _, val = line.partition(': ')\n                if key == 'Version':\n                    return val.rstrip('\\n')\n        raise Exception(\"found PKG-INFO file but not Version metadata in it\")\n\n    def get_version(self, workdir=SETUP_DIR):\n        version = (\n            # If we're building out of a distribution, we should pass that\n            # version through unchanged.\n            self._sdist_version(workdir)\n            # Otherwise follow the usual Arvados versioning rules.\n            or os.environ.get('ARVADOS_BUILDING_VERSION')\n            or self._git_version(workdir)\n        )\n        if not version:\n            raise Exception(f\"no version information available for {self.package_name}\")\n        else:\n            return re.sub(\n                r'(^development-|~dev|~rc)',\n                lambda match: self._VERSION_SUBS[match.group(0)],\n                version,\n            )\n\n    def get_dependencies_version(self, workdir=SETUP_DIR, version=None):\n        if version is None:\n            version = self.get_version(workdir)\n        # A packaged development release should be installed with other\n        # development packages built from the same source, but those\n        # dependencies may have earlier \"dev\" versions (read: less recent\n        # Git commit timestamps). This compatible version dependency\n        # expresses that as closely as possible. Allowing versions\n        # compatible with .dev0 allows any development release.\n        # Regular expression borrowed partially from\n        # <https://packaging.python.org/en/latest/specifications/version-specifiers/#version-specifiers-regex>\n        dep_ver, match_count = re.subn(r'\\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)\n        return ('~=' if match_count else '==', dep_ver)\n\n    def iter_dependencies(self, workdir=SETUP_DIR, version=None, extras=None):\n        if extras is None:\n            extras = {}\n        dep_op, dep_ver = self.get_dependencies_version(workdir, version)\n        for dep in self.dependencies:\n            try:\n                dep_extras = f'[{\",\".join(extras[dep.package_name])}]'\n            except KeyError:\n                dep_extras = ''\n            yield f'{dep.package_name}{dep_extras} {dep_op} {dep_ver}'\n\n\n### Package database\n\nArvadosPythonPackage.register(\n    'arvados-python-client',\n    'arvados',\n    'sdk/python',\n),\nArvadosPythonPackage.register(\n    'crunchstat_summary',\n    'crunchstat_summary',\n    'tools/crunchstat-summary',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cluster-activity',\n    'arvados_cluster_activity',\n    'tools/cluster-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cwl-runner',\n    'arvados_cwl',\n    'sdk/cwl',\n    'arvados-python-client',\n    'crunchstat_summary',\n)\nArvadosPythonPackage.register(\n    'arvados_fuse',\n    'arvados_fuse',\n    'services/fuse',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-user-activity',\n    'arvados_user_activity',\n    'tools/user-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-tools',\n    'NO SRCDIR',\n    'tools/python-metapackage',\n    *ARVADOS_PYTHON_MODULES,\n)\nArvadosPythonPackage.register(\n    'arvados-docker-cleaner',\n    'arvados_docker',\n    'services/dockercleaner',\n)\n\n### setuptools integration\n\nclass BuildArvadosVersion(setuptools.Command):\n    \"\"\"Write _version.py for an Arvados module\"\"\"\n    def initialize_options(self):\n        self.build_lib = None\n\n    def finalize_options(self):\n        self.set_undefined_options(\"build_py\", (\"build_lib\", \"build_lib\"))\n        arv_mod = ARVADOS_PYTHON_MODULES[self.distribution.get_name()]\n        self.out_path = Path(self.build_lib, arv_mod.version_file_path())\n\n    def run(self):\n        with self.out_path.open('w') as out_file:\n            print(f'__version__ = {self.distribution.get_version()!r}', file=out_file)\n\n    def get_outputs(self):\n        return [str(self.out_path)]\n\n    def get_source_files(self):\n        return []\n\n    def get_output_mapping(self):\n        return {}\n\n\nclass ArvadosBuildCommand(setuptools.command.build.build):\n    sub_commands = [\n        *setuptools.command.build.build.sub_commands,\n        ('build_arvados_version', None),\n    ]\n\n\nCMDCLASS = {\n    'build': ArvadosBuildCommand,\n    'build_arvados_version': BuildArvadosVersion,\n}\n"
  },
  {
    "path": "services/dockercleaner/bin/arvados-docker-cleaner",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nfrom arvados_docker.cleaner import main\nmain()\n"
  },
  {
    "path": "services/dockercleaner/pyproject.toml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[build-system]\nrequires = [\"setuptools ~= 80.9\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\ndynamic = [\"dependencies\", \"version\"]\nname = \"arvados-docker-cleaner\"\ndescription = \"Arvados Docker image cleaner\"\nauthors = [\n  {name = \"Arvados\", email = \"info@arvados.org\"},\n]\nclassifiers = [\n  \"Development Status :: 5 - Production/Stable\",\n  \"Environment :: Console\",\n  \"Intended Audience :: Science/Research\",\n  \"Operating System :: POSIX\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\",\n  \"Programming Language :: Python :: 3.12\",\n  \"Programming Language :: Python :: 3.13\",\n]\nlicense = \"AGPL-3.0-only\"\nlicense-files = [\n  \"agpl-3.0.txt\",\n]\nreadme = \"README.rst\"\nrequires-python = \"~= 3.10\"\n\n[project.scripts]\narvados-docker-cleaner = \"arvados_docker.cleaner:main\"\n\n[project.urls]\nHomepage = \"https://arvados.org\"\nDocumentation = \"https://doc.arvados.org\"\nRepository = \"https://github.com/arvados/arvados\"\nIssues = \"https://github.com/arvados/arvados/issues\"\nChangelog = \"https://arvados.org/releases/\"\n\n[tool.setuptools.data-files]\n\"share/doc/arvados-docker-cleaner\" = [\n  \"agpl-3.0.txt\",\n  \"arvados-docker-cleaner.service\",\n  \"README.rst\",\n]\n\n[tool.setuptools.packages.find]\nexclude = [\"tests*\"]\n"
  },
  {
    "path": "services/dockercleaner/setup.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport setuptools\nimport runpy\n\nfrom pathlib import Path\n\narvados_version = runpy.run_path(Path(__file__).with_name('arvados_version.py'))\narv_mod = arvados_version['ARVADOS_PYTHON_MODULES']['arvados-docker-cleaner']\nversion = arv_mod.get_version()\nsetuptools.setup(\n    cmdclass=arvados_version['CMDCLASS'],\n    install_requires=[\n        *arv_mod.iter_dependencies(version=version),\n        'docker >= 6.1.0',\n    ],\n    version=version,\n)\n"
  },
  {
    "path": "services/dockercleaner/tests/__init__.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport logging\nlogging.getLogger('').setLevel(logging.CRITICAL)\n"
  },
  {
    "path": "services/dockercleaner/tests/test_cleaner.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport collections\nimport itertools\nimport json\nimport os\nimport random\nimport tempfile\nimport time\nimport unittest\n\nimport docker\nfrom unittest import mock\n\nfrom arvados_docker import cleaner\n\nMAX_DOCKER_ID = (16 ** 64) - 1\n\n\ndef MockDockerId():\n    return '{:064x}'.format(random.randint(0, MAX_DOCKER_ID))\n\n\ndef MockContainer(image_hash):\n    return {'Id': MockDockerId(),\n            'Image': image_hash['Id']}\n\n\ndef MockImage(*, size=None, tags=[]):\n    if size is None:\n        size = random.randint(100, 2000000)\n    return {'Id': MockDockerId(),\n            'ParentId': MockDockerId(),\n            'RepoTags': list(tags),\n            'Size': size}\n\n\nclass MockEvent(dict):\n    ENCODING = 'utf-8'\n    event_seq = itertools.count(1)\n\n    def __init__(self, status, docker_id=None, **event_data):\n        if docker_id is None:\n            docker_id = MockDockerId()\n        super().__init__(self, **event_data)\n        self['status'] = status\n        self['id'] = docker_id\n        self.setdefault('time', next(self.event_seq))\n\n    def encoded(self):\n        return json.dumps(self).encode(self.ENCODING)\n\n\nclass MockException(docker.errors.APIError):\n\n    def __init__(self, status_code):\n        response = mock.Mock(name='response')\n        response.status_code = status_code\n        super().__init__(\"mock exception\", response)\n\n\nclass DockerImageTestCase(unittest.TestCase):\n\n    def test_used_at_sets_last_used(self):\n        image = cleaner.DockerImage(MockImage())\n        image.used_at(5)\n        self.assertEqual(5, image.last_used)\n\n    def test_used_at_moves_forward(self):\n        image = cleaner.DockerImage(MockImage())\n        image.used_at(6)\n        image.used_at(8)\n        self.assertEqual(8, image.last_used)\n\n    def test_used_at_does_not_go_backward(self):\n        image = cleaner.DockerImage(MockImage())\n        image.used_at(4)\n        image.used_at(2)\n        self.assertEqual(4, image.last_used)\n\n\nclass DockerImagesTestCase(unittest.TestCase):\n\n    def setUp(self):\n        self.mock_images = []\n\n    def setup_mock_images(self, *sizes):\n        self.mock_images.extend(MockImage(size=size) for size in sizes)\n\n    def setup_images(self, *sizes, target_size=1000000):\n        self.setup_mock_images(*sizes)\n        images = cleaner.DockerImages(target_size)\n        for image in self.mock_images:\n            images.add_image(image)\n        return images\n\n    def test_has_image(self):\n        images = self.setup_images(None)\n        self.assertTrue(images.has_image(self.mock_images[0]['Id']))\n        self.assertFalse(images.has_image(MockDockerId()))\n\n    def test_del_image(self):\n        images = self.setup_images(None)\n        images.del_image(self.mock_images[0]['Id'])\n        self.assertFalse(images.has_image(self.mock_images[0]['Id']))\n\n    def test_del_nonexistent_image(self):\n        images = self.setup_images(None)\n        images.del_image(MockDockerId())\n        self.assertTrue(images.has_image(self.mock_images[0]['Id']))\n\n    def test_one_image_always_kept(self):\n        # When crunch-job starts a job, it makes sure each compute node\n        # has the Docker image loaded, then it runs all the tasks with\n        # the assumption the image is on each node.  As long as that's\n        # true, the cleaner should avoid removing every installed image:\n        # crunch-job might be counting on the most recent one to be\n        # available, even if it's not currently in use.\n        images = self.setup_images(None, None, target_size=1)\n        for use_time, image in enumerate(self.mock_images, 1):\n            user = MockContainer(image)\n            images.add_user(user, use_time)\n            images.end_user(user['Id'])\n        self.assertEqual([self.mock_images[0]['Id']],\n                         list(images.should_delete()))\n\n    def test_images_under_target_not_deletable(self):\n        # The images are used in this order.  target_size is set so it\n        # could hold the largest image, but not after the most recently\n        # used image is kept; then we have to fall back to the previous one.\n        images = self.setup_images(20, 30, 40, 10, target_size=45)\n        for use_time, image in enumerate(self.mock_images, 1):\n            user = MockContainer(image)\n            images.add_user(user, use_time)\n            images.end_user(user['Id'])\n        self.assertEqual([self.mock_images[ii]['Id'] for ii in [0, 2]],\n                         list(images.should_delete()))\n\n    def test_images_in_use_not_deletable(self):\n        images = self.setup_images(None, None, target_size=1)\n        users = [MockContainer(image) for image in self.mock_images]\n        images.add_user(users[0], 1)\n        images.add_user(users[1], 2)\n        images.end_user(users[1]['Id'])\n        self.assertEqual([self.mock_images[1]['Id']],\n                         list(images.should_delete()))\n\n    def test_image_deletable_after_unused(self):\n        images = self.setup_images(None, None, target_size=1)\n        users = [MockContainer(image) for image in self.mock_images]\n        images.add_user(users[0], 1)\n        images.add_user(users[1], 2)\n        images.end_user(users[0]['Id'])\n        self.assertEqual([self.mock_images[0]['Id']],\n                         list(images.should_delete()))\n\n    def test_image_not_deletable_if_user_restarts(self):\n        images = self.setup_images(None, target_size=1)\n        user = MockContainer(self.mock_images[-1])\n        images.add_user(user, 1)\n        images.end_user(user['Id'])\n        images.add_user(user, 2)\n        self.assertEqual([], list(images.should_delete()))\n\n    def test_image_not_deletable_if_any_user_remains(self):\n        images = self.setup_images(None, target_size=1)\n        users = [MockContainer(self.mock_images[0]) for ii in range(2)]\n        images.add_user(users[0], 1)\n        images.add_user(users[1], 2)\n        images.end_user(users[0]['Id'])\n        self.assertEqual([], list(images.should_delete()))\n\n    def test_image_deletable_after_all_users_end(self):\n        images = self.setup_images(None, None, target_size=1)\n        users = [MockContainer(self.mock_images[ii]) for ii in [0, 1, 1]]\n        images.add_user(users[0], 1)\n        images.add_user(users[1], 2)\n        images.add_user(users[2], 3)\n        images.end_user(users[1]['Id'])\n        images.end_user(users[2]['Id'])\n        self.assertEqual([self.mock_images[-1]['Id']],\n                         list(images.should_delete()))\n\n    def test_images_suggested_for_deletion_by_lru(self):\n        images = self.setup_images(10, 10, 10, target_size=1)\n        users = [MockContainer(image) for image in self.mock_images]\n        images.add_user(users[0], 3)\n        images.add_user(users[1], 1)\n        images.add_user(users[2], 2)\n        for user in users:\n            images.end_user(user['Id'])\n        self.assertEqual([self.mock_images[ii]['Id'] for ii in [1, 2]],\n                         list(images.should_delete()))\n\n    def test_adding_user_without_image_does_not_implicitly_add_image(self):\n        images = self.setup_images(10)\n        images.add_user(MockContainer(MockImage()), 1)\n        self.assertEqual([], list(images.should_delete()))\n\n    def test_nonexistent_user_removed(self):\n        images = self.setup_images()\n        images.end_user('nonexistent')\n        # No exception should be raised.\n\n    def test_del_image_effective_with_users_present(self):\n        images = self.setup_images(None, target_size=1)\n        user = MockContainer(self.mock_images[0])\n        images.add_user(user, 1)\n        images.del_image(self.mock_images[0]['Id'])\n        images.end_user(user['Id'])\n        self.assertEqual([], list(images.should_delete()))\n\n    def setup_from_daemon(self, *sizes, target_size=1500000):\n        self.setup_mock_images(*sizes)\n        docker_client = mock.MagicMock(name='docker_client')\n        docker_client.images.return_value = iter(self.mock_images)\n        return cleaner.DockerImages.from_daemon(target_size, docker_client)\n\n    def test_images_loaded_from_daemon(self):\n        images = self.setup_from_daemon(None, None)\n        for image in self.mock_images:\n            self.assertTrue(images.has_image(image['Id']))\n\n    def test_target_size_set_from_daemon(self):\n        images = self.setup_from_daemon(20, 10, 5, target_size=15)\n        user = MockContainer(self.mock_images[-1])\n        images.add_user(user, 1)\n        self.assertEqual([self.mock_images[0]['Id']],\n                         list(images.should_delete()))\n\n\nclass DockerImageUseRecorderTestCase(unittest.TestCase):\n    TEST_CLASS = cleaner.DockerImageUseRecorder\n    TEST_CLASS_INIT_KWARGS = {}\n\n    def setUp(self):\n        self.images = mock.MagicMock(name='images')\n        self.docker_client = mock.MagicMock(name='docker_client')\n        self.events = []\n        self.recorder = self.TEST_CLASS(self.images, self.docker_client,\n                                        self.encoded_events, **self.TEST_CLASS_INIT_KWARGS)\n\n    @property\n    def encoded_events(self):\n        return (event.encoded() for event in self.events)\n\n    def test_unknown_events_ignored(self):\n        self.events.append(MockEvent('mock!event'))\n        self.recorder.run()\n        # No exception should be raised.\n\n    def test_fetches_container_on_create(self):\n        self.events.append(MockEvent('create'))\n        self.recorder.run()\n        self.docker_client.inspect_container.assert_called_with(\n            self.events[0]['id'])\n\n    def test_adds_user_on_container_create(self):\n        self.events.append(MockEvent('create'))\n        self.recorder.run()\n        self.images.add_user.assert_called_with(\n            self.docker_client.inspect_container(), self.events[0]['time'])\n\n    def test_unknown_image_handling(self):\n        # The use recorder should not fetch any images.\n        self.events.append(MockEvent('create'))\n        self.recorder.run()\n        self.assertFalse(self.docker_client.inspect_image.called)\n\n    def test_unfetchable_containers_ignored(self):\n        self.events.append(MockEvent('create'))\n        self.docker_client.inspect_container.side_effect = MockException(404)\n        self.recorder.run()\n        self.assertFalse(self.images.add_user.called)\n\n    def test_ends_user_on_container_destroy(self):\n        self.events.append(MockEvent('destroy'))\n        self.recorder.run()\n        self.images.end_user.assert_called_with(self.events[0]['id'])\n\n\nclass DockerImageCleanerTestCase(DockerImageUseRecorderTestCase):\n    TEST_CLASS = cleaner.DockerImageCleaner\n\n    def test_unknown_image_handling(self):\n        # The image cleaner should fetch and record new images.\n        self.images.has_image.return_value = False\n        self.events.append(MockEvent('create'))\n        self.recorder.run()\n        self.docker_client.inspect_image.assert_called_with(\n            self.docker_client.inspect_container()['Image'])\n        self.images.add_image.assert_called_with(\n            self.docker_client.inspect_image())\n\n    def test_unfetchable_images_ignored(self):\n        self.images.has_image.return_value = False\n        self.docker_client.inspect_image.side_effect = MockException(404)\n        self.events.append(MockEvent('create'))\n        self.recorder.run()\n        self.docker_client.inspect_image.assert_called_with(\n            self.docker_client.inspect_container()['Image'])\n        self.assertFalse(self.images.add_image.called)\n\n    def test_deletions_after_destroy(self):\n        delete_id = MockDockerId()\n        self.images.should_delete.return_value = [delete_id]\n        self.events.append(MockEvent('destroy'))\n        self.recorder.run()\n        self.docker_client.remove_image.assert_called_with(delete_id)\n        self.images.del_image.assert_called_with(delete_id)\n\n    def test_failed_deletion_handling(self):\n        delete_id = MockDockerId()\n        self.images.should_delete.return_value = [delete_id]\n        self.docker_client.remove_image.side_effect = MockException(500)\n        self.events.append(MockEvent('destroy'))\n        self.recorder.run()\n        self.docker_client.remove_image.assert_called_with(delete_id)\n        self.assertFalse(self.images.del_image.called)\n\n\nclass DockerContainerCleanerTestCase(DockerImageUseRecorderTestCase):\n    TEST_CLASS = cleaner.DockerImageCleaner\n    TEST_CLASS_INIT_KWARGS = {'remove_containers_onexit': True}\n\n    def test_container_deletion_deletes_volumes(self):\n        cid = MockDockerId()\n        self.events.append(MockEvent('die', docker_id=cid))\n        self.recorder.run()\n        self.docker_client.remove_container.assert_called_with(cid, v=True)\n\n    @mock.patch('arvados_docker.cleaner.logger')\n    def test_failed_container_deletion_handling(self, mockLogger):\n        cid = MockDockerId()\n        self.docker_client.remove_container.side_effect = MockException(500)\n        self.events.append(MockEvent('die', docker_id=cid))\n        self.recorder.run()\n        self.docker_client.remove_container.assert_called_with(cid, v=True)\n        self.assertEqual(\"Failed to remove container %s: %s\",\n                         mockLogger.warning.call_args[0][0])\n        self.assertEqual(cid,\n                         mockLogger.warning.call_args[0][1])\n\n\nclass HumanSizeTestCase(unittest.TestCase):\n\n    def check(self, human_str, count, exp):\n        self.assertEqual(count * (1024 ** exp),\n                         cleaner.human_size(human_str))\n\n    def test_bytes(self):\n        self.check('1', 1, 0)\n        self.check('82', 82, 0)\n\n    def test_kibibytes(self):\n        self.check('2K', 2, 1)\n        self.check('3k', 3, 1)\n\n    def test_mebibytes(self):\n        self.check('4M', 4, 2)\n        self.check('5m', 5, 2)\n\n    def test_gibibytes(self):\n        self.check('6G', 6, 3)\n        self.check('7g', 7, 3)\n\n    def test_tebibytes(self):\n        self.check('8T', 8, 4)\n        self.check('9t', 9, 4)\n\n\nclass RunTestCase(unittest.TestCase):\n\n    def setUp(self):\n        self.config = cleaner.default_config()\n        self.config['Quota'] = 1000000\n        self.docker_client = mock.MagicMock(name='docker_client')\n\n    def test_run(self):\n        test_start_time = int(time.time())\n        self.docker_client.events.return_value = []\n        cleaner.run(self.config, self.docker_client)\n        self.assertEqual(2, self.docker_client.events.call_count)\n        event_kwargs = [args[1] for args in\n                        self.docker_client.events.call_args_list]\n        self.assertIn('since', event_kwargs[0])\n        self.assertIn('until', event_kwargs[0])\n        self.assertLessEqual(test_start_time, event_kwargs[0]['until'])\n        self.assertIn('since', event_kwargs[1])\n        self.assertEqual(event_kwargs[0]['until'], event_kwargs[1]['since'])\n\n\n@mock.patch('docker.APIClient', name='docker_client')\n@mock.patch('arvados_docker.cleaner.run', name='cleaner_run')\nclass MainTestCase(unittest.TestCase):\n\n    def test_client_api_version(self, run_mock, docker_client):\n        with tempfile.NamedTemporaryFile(mode='wt') as cf:\n            cf.write('{\"Quota\":\"1000T\"}')\n            cf.flush()\n            cleaner.main(['--config', cf.name])\n        self.assertEqual(1, docker_client.call_count)\n        # We are standardized on Docker API version 1.48.\n        # See DockerAPIVersion in lib/crunchrun/docker.go.\n        self.assertEqual('1.48',\n                         docker_client.call_args[1].get('version'))\n        self.assertEqual(1, run_mock.call_count)\n        self.assertIs(run_mock.call_args[0][1], docker_client())\n\n\nclass ConfigTestCase(unittest.TestCase):\n\n    def test_load_config(self):\n        with tempfile.NamedTemporaryFile(mode='wt') as cf:\n            cf.write(\n                '{\"Quota\":\"1000T\", \"RemoveStoppedContainers\":\"always\", \"Verbose\":2}')\n            cf.flush()\n            config = cleaner.load_config(['--config', cf.name])\n        self.assertEqual(1000 << 40, config['Quota'])\n        self.assertEqual(\"always\", config['RemoveStoppedContainers'])\n        self.assertEqual(2, config['Verbose'])\n\n    def test_args_override_config(self):\n        with tempfile.NamedTemporaryFile(mode='wt') as cf:\n            cf.write(\n                '{\"Quota\":\"1000T\", \"RemoveStoppedContainers\":\"always\", \"Verbose\":2}')\n            cf.flush()\n            config = cleaner.load_config([\n                '--config', cf.name,\n                '--quota', '1G',\n                '--remove-stopped-containers', 'never',\n                '--verbose',\n            ])\n        self.assertEqual(1 << 30, config['Quota'])\n        self.assertEqual('never', config['RemoveStoppedContainers'])\n        self.assertEqual(1, config['Verbose'])\n\n    def test_args_no_config(self):\n        self.assertEqual(False, os.path.exists(cleaner.DEFAULT_CONFIG_FILE))\n        config = cleaner.load_config(['--quota', '1G'])\n        self.assertEqual(1 << 30, config['Quota'])\n\n\nclass ContainerRemovalTestCase(unittest.TestCase):\n    LIFECYCLE = ['create', 'attach', 'start', 'resize', 'die', 'destroy']\n\n    def setUp(self):\n        self.config = cleaner.default_config()\n        self.docker_client = mock.MagicMock(name='docker_client')\n        self.existingCID = MockDockerId()\n        self.docker_client.containers.return_value = [{\n            'Id': self.existingCID,\n            'Status': 'Exited (0) 6 weeks ago',\n        }, {\n            # If docker_client.containers() returns non-exited\n            # containers for some reason, do not remove them.\n            'Id': MockDockerId(),\n            'Status': 'Running',\n        }]\n        self.newCID = MockDockerId()\n        self.docker_client.events.return_value = [\n            MockEvent(e, docker_id=self.newCID).encoded()\n            for e in self.LIFECYCLE]\n\n    def test_remove_onexit(self):\n        self.config['RemoveStoppedContainers'] = 'onexit'\n        cleaner.run(self.config, self.docker_client)\n        self.docker_client.remove_container.assert_called_once_with(\n            self.newCID, v=True)\n\n    def test_remove_always(self):\n        self.config['RemoveStoppedContainers'] = 'always'\n        cleaner.run(self.config, self.docker_client)\n        self.docker_client.remove_container.assert_any_call(\n            self.existingCID, v=True)\n        self.docker_client.remove_container.assert_any_call(\n            self.newCID, v=True)\n        self.assertEqual(2, self.docker_client.remove_container.call_count)\n\n    def test_remove_never(self):\n        self.config['RemoveStoppedContainers'] = 'never'\n        cleaner.run(self.config, self.docker_client)\n        self.assertEqual(0, self.docker_client.remove_container.call_count)\n\n    def test_container_exited_between_subscribe_events_and_check_existing(self):\n        self.config['RemoveStoppedContainers'] = 'always'\n        self.docker_client.events.return_value = [\n            MockEvent(e, docker_id=self.existingCID).encoded()\n            for e in ['die', 'destroy']]\n        cleaner.run(self.config, self.docker_client)\n        # Subscribed to events before getting the list of existing\n        # exited containers?\n        self.docker_client.assert_has_calls([\n            mock.call.events(since=mock.ANY),\n            mock.call.containers(filters={'status': 'exited'})])\n        # Asked to delete the container twice?\n        self.docker_client.remove_container.assert_has_calls(\n            [mock.call(self.existingCID, v=True)] * 2)\n        self.assertEqual(2, self.docker_client.remove_container.call_count)\n"
  },
  {
    "path": "services/fuse/MANIFEST.in",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ninclude agpl-3.0.txt\ninclude README.rst\ninclude arvados_version.py"
  },
  {
    "path": "services/fuse/README.rst",
    "content": ".. Copyright (C) The Arvados Authors. All rights reserved.\n..\n.. SPDX-License-Identifier: AGPL-3.0\n\n========================\nArvados Keep FUSE Driver\n========================\n\nOverview\n--------\n\nThis package provides a FUSE driver for Keep, the Arvados_ storage\nsystem.  It allows you to read data from your collections as if they\nwere on the local filesystem.\n\n.. _Arvados: https://arvados.org/\n\nInstallation\n------------\n\nInstalling under your user account\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis method lets you install the package without root access.  However,\nother users on the same system will need to reconfigure their shell in order\nto be able to use it. Run the following to install the package in an\nenvironment at ``~/arvclients``::\n\n  python3 -m venv ~/arvclients\n  ~/arvclients/bin/pip install arvados_fuse\n\nCommand line tools will be installed under ``~/arvclients/bin``. You can\ntest one by running::\n\n  ~/arvclients/bin/arv-mount --version\n\nYou can run these tools by specifying the full path every time, or you can\nadd the directory to your shell's search path by running::\n\n  export PATH=\"$PATH:$HOME/arvclients/bin\"\n\nYou can make this search path change permanent by adding this command to\nyour shell's configuration, for example ``~/.bashrc`` if you're using bash.\nYou can test the change by running::\n\n  arv-mount --version\n\nInstalling on Debian and Ubuntu systems\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nArvados publishes packages for Debian 12 \"bookworm,\" Ubuntu 22.04 \"jammy,\" and Ubuntu 24.04 \"noble.\" You can install the Python SDK package on any of these distributions by running the following commands::\n\n  sudo install -d /etc/apt/keyrings\n  sudo curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg\n  sudo tee /etc/apt/sources.list.d/arvados.sources >/dev/null <<EOF\n  Types: deb\n  URIs: https://apt.arvados.org/$(lsb_release -cs)\n  Suites: $(lsb_release -cs)\n  Components: main\n  Signed-by: /etc/apt/keyrings/arvados.asc\n  EOF\n  sudo apt update\n  sudo apt install python3-arvados-fuse\n\nInstalling on Red Hat, AlmaLinux, and Rocky Linux\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nArvados publishes packages for RHEL 8 and 9, as well as distributions based on those. Note that these packages depend on, and will automatically enable, the Python 3.11 module. You can install the Python SDK package on any of these distributions by running the following commands::\n\n  sudo tee /etc/yum.repos.d/arvados.repo >/dev/null <<'EOF'\n  [arvados]\n  name=Arvados\n  baseurl=https://rpm.arvados.org/RHEL/$releasever/os/$basearch/\n  gpgcheck=1\n  gpgkey=https://rpm.arvados.org/RHEL/$releasever/RPM-GPG-KEY-arvados\n  EOF\n  sudo dnf install python3-arvados-fuse\n\nConfiguration\n-------------\n\nThis driver needs two pieces of information to connect to\nArvados: the DNS name of the API server, and an API authorization\ntoken.  `The Arvados user\ndocumentation\n<http://doc.arvados.org/user/reference/api-tokens.html>`_ describes\nhow to find this information in the Arvados Workbench, and install it\non your system.\n\nTesting and Development\n-----------------------\n\nDebian packages you need to build llfuse::\n\n  sudo apt install python-dev pkg-config libfuse-dev\n\nThis package is one part of the Arvados source package, and it has\nintegration tests to check interoperability with other Arvados\ncomponents.  Our `hacking guide\n<https://dev.arvados.org/projects/arvados/wiki/Hacking_Python_SDK>`_\ndescribes how to set up a development environment and run tests.\n"
  },
  {
    "path": "services/fuse/agpl-3.0.txt",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "services/fuse/arvados_fuse/__init__.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n\"\"\"FUSE driver for Arvados Keep\n\nArchitecture:\n\nThere is one `Operations` object per mount point.  It is the entry point for all\nread and write requests from the llfuse module.\n\nThe operations object owns an `Inodes` object.  The inodes object stores the\nmapping from numeric inode (used throughout the file system API to uniquely\nidentify files) to the Python objects that implement files and directories.\n\nThe `Inodes` object owns an `InodeCache` object.  The inode cache records the\nmemory footprint of file system objects and when they are last used.  When the\ncache limit is exceeded, the least recently used objects are cleared.\n\nFile system objects inherit from `fresh.FreshBase` which manages the object lifecycle.\n\nFile objects inherit from `fusefile.File`.  Key methods are `readfrom` and `writeto`\nwhich implement actual reads and writes.\n\nDirectory objects inherit from `fusedir.Directory`.  The directory object wraps\na Python dict which stores the mapping from filenames to directory entries.\nDirectory contents can be accessed through the Python operators such as `[]`\nand `in`.  These methods automatically check if the directory is fresh (up to\ndate) or stale (needs update) and will call `update` if necessary before\nreturing a result.\n\nThe general FUSE operation flow is as follows:\n\n- The request handler is called with either an inode or file handle that is the\n  subject of the operation.\n\n- Look up the inode using the Inodes table or the file handle in the\n  filehandles table to get the file system object.\n\n- For methods that alter files or directories, check that the operation is\n  valid and permitted using _check_writable().\n\n- Call the relevant method on the file system object.\n\n- Return the result.\n\nThe FUSE driver supports the Arvados event bus.  When an event is received for\nan object that is live in the inode cache, that object is immediately updated.\n\nImplementation note: in the code, the terms 'object', 'entry' and\n'inode' are used somewhat interchangeably, but generally mean an\narvados_fuse.File or arvados_fuse.Directory object which has numeric\ninode assigned to it and appears in the Inodes._entries dictionary.\n\n\"\"\"\n\nimport os\nimport llfuse\nimport errno\nimport stat\nimport threading\nimport arvados\nimport arvados.events\nimport logging\nimport time\nimport threading\nimport itertools\nimport collections\nimport functools\nimport arvados.keep\nfrom prometheus_client import Summary\nimport queue\nfrom dataclasses import dataclass\nimport typing\n\nfrom .fusedir import Directory, CollectionDirectory, TmpCollectionDirectory, MagicDirectory, TagsDirectory, ProjectDirectory, SharedDirectory, CollectionDirectoryBase\nfrom .fusefile import File, StringFile, FuseArvadosFile\n\n_logger = logging.getLogger('arvados.arvados_fuse')\n\n# Uncomment this to enable llfuse debug logging.\n# log_handler = logging.StreamHandler()\n# llogger = logging.getLogger('llfuse')\n# llogger.addHandler(log_handler)\n# llogger.setLevel(logging.DEBUG)\n\nclass Handle(object):\n    \"\"\"Connects a numeric file handle to a File or Directory object that has\n    been opened by the client.\"\"\"\n\n    def __init__(self, fh, obj):\n        self.fh = fh\n        self.obj = obj\n        self.obj.inc_use()\n\n    def release(self):\n        self.obj.dec_use()\n\n    def flush(self, force):\n        pass\n\n\nclass FileHandle(Handle):\n    \"\"\"Connects a numeric file handle to a File  object that has\n    been opened by the client.\"\"\"\n\n    def __init__(self, fh, obj, parent_obj, open_for_writing):\n        super(FileHandle, self).__init__(fh, obj)\n        self.parent_obj = parent_obj\n        if self.parent_obj is not None:\n            self.parent_obj.inc_use()\n        self.open_for_writing = open_for_writing\n\n    def release(self):\n        super(FileHandle, self).release()\n        if self.parent_obj is not None:\n            self.parent_obj.dec_use()\n\n    def flush(self, force):\n        if not self.open_for_writing and not force:\n            return\n        self.obj.flush()\n        if self.parent_obj is not None:\n            self.parent_obj.flush()\n\nclass DirectoryHandle(Handle):\n    \"\"\"Connects a numeric file handle to a Directory object that has\n    been opened by the client.\n\n    DirectoryHandle is used by opendir() and readdir() to get\n    directory listings.  Entries returned by readdir() don't increment\n    the lookup count (kernel references), so increment our internal\n    \"use count\" to avoid having an item being removed mid-read.\n\n    \"\"\"\n\n    def __init__(self, fh, dirobj, entries):\n        super(DirectoryHandle, self).__init__(fh, dirobj)\n        self.entries = entries\n\n        for ent in self.entries:\n            ent[1].inc_use()\n\n    def release(self):\n        for ent in self.entries:\n            ent[1].dec_use()\n        super(DirectoryHandle, self).release()\n\n    def flush(self, force):\n        self.obj.flush()\n\n\nclass InodeCache(object):\n    \"\"\"Records the memory footprint of objects and when they are last used.\n\n    When the cache limit is exceeded, the least recently used objects\n    are cleared.  Clearing the object means discarding its contents to\n    release memory.  The next time the object is accessed, it must be\n    re-fetched from the server.  Note that the inode cache limit is a\n    soft limit; the cache limit may be exceeded if necessary to load\n    very large projects or collections, it may also be exceeded if an\n    inode can't be safely discarded based on kernel lookups\n    (has_ref()) or internal use count (in_use()).\n\n    \"\"\"\n\n    def __init__(self, cap, min_entries=4):\n        # Standard dictionaries are ordered, but OrderedDict is still better here, see\n        # https://docs.python.org/3.11/library/collections.html#ordereddict-objects\n        # specifically we use move_to_end() which standard dicts don't have.\n        self._cache_entries = collections.OrderedDict()\n        self.cap = cap\n        self._total = 0\n        self.min_entries = min_entries\n\n    def total(self):\n        return self._total\n\n    def evict_candidates(self):\n        \"\"\"Yield entries that are candidates to be evicted\n        and stop when the cache total has shrunk sufficiently.\n\n        Implements a LRU cache, when an item is added or touch()ed it\n        goes to the back of the OrderedDict, so items in the front are\n        oldest.  The Inodes._remove() function determines if the entry\n        can actually be removed safely.\n\n        \"\"\"\n\n        if self._total <= self.cap:\n            return\n\n        _logger.debug(\"InodeCache evict_candidates total %i cap %i entries %i\", self._total, self.cap, len(self._cache_entries))\n\n        # Copy this into a deque for two reasons:\n        #\n        # 1. _cache_entries is modified by unmanage() which is called\n        # by _remove\n        #\n        # 2. popping off the front means the reference goes away\n        # immediately intead of sticking around for the lifetime of\n        # \"values\"\n        values = collections.deque(self._cache_entries.values())\n\n        while values:\n            if self._total < self.cap or len(self._cache_entries) < self.min_entries:\n                break\n            yield values.popleft()\n\n    def unmanage(self, entry):\n        \"\"\"Stop managing an object in the cache.\n\n        This happens when an object is being removed from the inode\n        entries table.\n\n        \"\"\"\n\n        if entry.inode not in self._cache_entries:\n            return\n\n        # manage cache size running sum\n        self._total -= entry.cache_size\n        entry.cache_size = 0\n\n        # Now forget about it\n        del self._cache_entries[entry.inode]\n\n    def update_cache_size(self, obj):\n        \"\"\"Update the cache total in response to the footprint of an\n        object changing (usually because it has been loaded or\n        cleared).\n\n        Adds or removes entries to the cache list based on the object\n        cache size.\n\n        \"\"\"\n\n        if not obj.persisted():\n            return\n\n        if obj.inode in self._cache_entries:\n            self._total -= obj.cache_size\n\n        obj.cache_size = obj.objsize()\n\n        if obj.cache_size > 0 or obj.parent_inode is None:\n            self._total += obj.cache_size\n            self._cache_entries[obj.inode] = obj\n        elif obj.cache_size == 0 and obj.inode in self._cache_entries:\n            del self._cache_entries[obj.inode]\n\n    def touch(self, obj):\n        \"\"\"Indicate an object was used recently, making it low\n        priority to be removed from the cache.\n\n        \"\"\"\n        if obj.inode in self._cache_entries:\n            self._cache_entries.move_to_end(obj.inode)\n            return True\n        return False\n\n    def clear(self):\n        self._cache_entries.clear()\n        self._total = 0\n\n@dataclass\nclass RemoveInode:\n    entry: typing.Union[Directory, File]\n    def inode_op(self, inodes, locked_ops):\n        if locked_ops is None:\n            inodes._remove(self.entry)\n            return True\n        else:\n            locked_ops.append(self)\n            return False\n\n@dataclass\nclass InvalidateInode:\n    inode: int\n    def inode_op(self, inodes, locked_ops):\n        llfuse.invalidate_inode(self.inode)\n        return True\n\n@dataclass\nclass InvalidateEntry:\n    inode: int\n    name: str\n    def inode_op(self, inodes, locked_ops):\n        llfuse.invalidate_entry(self.inode, self.name)\n        return True\n\n@dataclass\nclass EvictCandidates:\n    def inode_op(self, inodes, locked_ops):\n        return True\n\n\nclass Inodes(object):\n    \"\"\"Manage the set of inodes.\n\n    This is the mapping from a numeric id to a concrete File or\n    Directory object\n\n    \"\"\"\n\n    def __init__(self, inode_cache, encoding=\"utf-8\", fsns=None, shutdown_started=None):\n        self._entries = {}\n        self._counter = itertools.count(llfuse.ROOT_INODE)\n        self.inode_cache = inode_cache\n        self.encoding = encoding\n        self._fsns = fsns\n        self._shutdown_started = shutdown_started or threading.Event()\n\n        self._inode_remove_queue = queue.Queue()\n        self._inode_remove_thread = threading.Thread(None, self._inode_remove)\n        self._inode_remove_thread.daemon = True\n        self._inode_remove_thread.start()\n\n        self._by_uuid = collections.defaultdict(list)\n\n    def __getitem__(self, item):\n        return self._entries[item]\n\n    def __setitem__(self, key, item):\n        self._entries[key] = item\n\n    def __iter__(self):\n        return iter(self._entries.keys())\n\n    def items(self):\n        return self._entries.items()\n\n    def __contains__(self, k):\n        return k in self._entries\n\n    def touch(self, entry):\n        \"\"\"Update the access time, adjust the cache position, and\n        notify the _inode_remove thread to recheck the cache.\n\n        \"\"\"\n\n        entry._atime = time.time()\n        if self.inode_cache.touch(entry):\n            self.cap_cache()\n\n    def cap_cache(self):\n        \"\"\"Notify the _inode_remove thread to recheck the cache.\"\"\"\n        if self._inode_remove_queue.empty():\n            self._inode_remove_queue.put(EvictCandidates())\n\n    def update_uuid(self, entry):\n        \"\"\"Update the Arvados uuid associated with an inode entry.\n\n        This is used to look up inodes that need to be invalidated\n        when a websocket event indicates the object has changed on the\n        API server.\n\n        \"\"\"\n        if entry.cache_uuid and entry in self._by_uuid[entry.cache_uuid]:\n            self._by_uuid[entry.cache_uuid].remove(entry)\n\n        entry.cache_uuid = entry.uuid()\n        if entry.cache_uuid and entry not in self._by_uuid[entry.cache_uuid]:\n            self._by_uuid[entry.cache_uuid].append(entry)\n\n        if not self._by_uuid[entry.cache_uuid]:\n            del self._by_uuid[entry.cache_uuid]\n\n    def add_entry(self, entry):\n        \"\"\"Assign a numeric inode to a new entry.\"\"\"\n\n        entry.inode = next(self._counter)\n        if entry.inode == llfuse.ROOT_INODE:\n            entry.inc_ref()\n        self._entries[entry.inode] = entry\n\n        self.update_uuid(entry)\n        self.inode_cache.update_cache_size(entry)\n        self.cap_cache()\n        return entry\n\n    def del_entry(self, entry):\n        \"\"\"Remove entry from the inode table.\n\n        Indicate this inode entry is pending deletion by setting\n        parent_inode to None.  Notify the _inode_remove thread to try\n        and remove it.\n\n        \"\"\"\n\n        entry.parent_inode = None\n        self._inode_remove_queue.put(RemoveInode(entry))\n        _logger.debug(\"del_entry on inode %i with refcount %i\", entry.inode, entry.ref_count)\n\n    def _inode_remove(self):\n        \"\"\"Background thread to handle tasks related to invalidating\n        inodes in the kernel, and removing objects from the inodes\n        table entirely.\n\n        \"\"\"\n\n        locked_ops = collections.deque()\n        shutting_down = False\n        while not shutting_down:\n            tasks_done = 0\n            blocking_get = True\n            while True:\n                try:\n                    qentry = self._inode_remove_queue.get(blocking_get)\n                except queue.Empty:\n                    break\n\n                blocking_get = False\n                if qentry is None:\n                    shutting_down = True\n                    continue\n\n                # Process (or defer) this entry\n                qentry.inode_op(self, locked_ops)\n                tasks_done += 1\n\n                # Give up the reference\n                qentry = None\n\n            with llfuse.lock:\n                while locked_ops:\n                    locked_ops.popleft().inode_op(self, None)\n                for entry in self.inode_cache.evict_candidates():\n                    self._remove(entry)\n\n            # Unblock _inode_remove_queue.join() only when all of the\n            # deferred work is done, i.e., after calling inode_op()\n            # and then evict_candidates().\n            for _ in range(tasks_done):\n                self._inode_remove_queue.task_done()\n\n    def wait_remove_queue_empty(self):\n        # used by tests\n        self._inode_remove_queue.join()\n\n    def _remove(self, entry):\n        \"\"\"Remove an inode entry if possible.\n\n        If the entry is still referenced or in use, don't do anything.\n        If this is not referenced but the parent is still referenced,\n        clear any data held by the object (which may include directory\n        entries under the object) but don't remove it from the inode\n        table.\n\n        \"\"\"\n        try:\n            if entry.inode is None:\n                # Removed already\n                return\n\n            if entry.inode == llfuse.ROOT_INODE:\n                return\n\n            if entry.in_use():\n                # referenced internally, stay pinned\n                #_logger.debug(\"InodeCache cannot clear inode %i, in use\", entry.inode)\n                return\n\n            # Tell the kernel it should forget about it\n            entry.kernel_invalidate()\n\n            if entry.has_ref():\n                # has kernel reference, could still be accessed.\n                # when the kernel forgets about it, we can delete it.\n                #_logger.debug(\"InodeCache cannot clear inode %i, is referenced\", entry.inode)\n                return\n\n            # commit any pending changes\n            with llfuse.lock_released:\n                entry.finalize()\n\n            # Clear the contents\n            entry.clear()\n\n            if entry.parent_inode is None:\n                _logger.debug(\"InodeCache forgetting inode %i, object cache_size %i, cache total %i, forget_inode True, inode entries %i, type %s\",\n                              entry.inode, entry.cache_size, self.inode_cache.total(),\n                              len(self._entries), type(entry))\n\n                if entry.cache_uuid:\n                    self._by_uuid[entry.cache_uuid].remove(entry)\n                    if not self._by_uuid[entry.cache_uuid]:\n                        del self._by_uuid[entry.cache_uuid]\n                    entry.cache_uuid = None\n\n                self.inode_cache.unmanage(entry)\n\n                del self._entries[entry.inode]\n                entry.inode = None\n\n        except Exception as e:\n            _logger.exception(\"failed remove\")\n\n    def invalidate_inode(self, entry):\n        if entry.has_ref():\n            # Only necessary if the kernel has previously done a lookup on this\n            # inode and hasn't yet forgotten about it.\n            self._inode_remove_queue.put(InvalidateInode(entry.inode))\n\n    def invalidate_entry(self, entry, name):\n        if entry.has_ref():\n            # Only necessary if the kernel has previously done a lookup on this\n            # inode and hasn't yet forgotten about it.\n            self._inode_remove_queue.put(InvalidateEntry(entry.inode, name.encode(self.encoding)))\n\n    def begin_shutdown(self):\n        self._inode_remove_queue.put(None)\n        if self._inode_remove_thread is not None:\n            self._inode_remove_thread.join()\n        self._inode_remove_thread = None\n\n    def clear(self):\n        with llfuse.lock_released:\n            self.begin_shutdown()\n\n        self.inode_cache.clear()\n        self._by_uuid.clear()\n\n        for k,v in self._entries.items():\n            try:\n                v.finalize()\n            except Exception as e:\n                _logger.exception(\"Error during finalize of inode %i\", k)\n\n        self._entries.clear()\n\n    def forward_slash_subst(self):\n        return self._fsns\n\n    def find_by_uuid(self, uuid):\n        \"\"\"Return a list of zero or more inode entries corresponding\n        to this Arvados UUID.\"\"\"\n        return self._by_uuid.get(uuid, [])\n\n\ndef catch_exceptions(orig_func):\n    \"\"\"Catch uncaught exceptions and log them consistently.\"\"\"\n\n    @functools.wraps(orig_func)\n    def catch_exceptions_wrapper(self, *args, **kwargs):\n        try:\n            return orig_func(self, *args, **kwargs)\n        except llfuse.FUSEError:\n            raise\n        except EnvironmentError as e:\n            raise llfuse.FUSEError(e.errno)\n        except NotImplementedError:\n            raise llfuse.FUSEError(errno.ENOTSUP)\n        except arvados.errors.KeepWriteError as e:\n            _logger.error(\"Keep write error: \" + str(e))\n            raise llfuse.FUSEError(errno.EIO)\n        except arvados.errors.NotFoundError as e:\n            _logger.error(\"Block not found error: \" + str(e))\n            raise llfuse.FUSEError(errno.EIO)\n        except:\n            _logger.exception(\"Unhandled exception during FUSE operation\")\n            raise llfuse.FUSEError(errno.EIO)\n\n    return catch_exceptions_wrapper\n\n\nclass Operations(llfuse.Operations):\n    \"\"\"This is the main interface with llfuse.\n\n    The methods on this object are called by llfuse threads to service FUSE\n    events to query and read from the file system.\n\n    llfuse has its own global lock which is acquired before calling a request handler,\n    so request handlers do not run concurrently unless the lock is explicitly released\n    using 'with llfuse.lock_released:'\n\n    \"\"\"\n\n    fuse_time = Summary('arvmount_fuse_operations_seconds', 'Time spent during FUSE operations', labelnames=['op'])\n    read_time = fuse_time.labels(op='read')\n    write_time = fuse_time.labels(op='write')\n    destroy_time = fuse_time.labels(op='destroy')\n    on_event_time = fuse_time.labels(op='on_event')\n    getattr_time = fuse_time.labels(op='getattr')\n    setattr_time = fuse_time.labels(op='setattr')\n    lookup_time = fuse_time.labels(op='lookup')\n    forget_time = fuse_time.labels(op='forget')\n    open_time = fuse_time.labels(op='open')\n    release_time = fuse_time.labels(op='release')\n    opendir_time = fuse_time.labels(op='opendir')\n    readdir_time = fuse_time.labels(op='readdir')\n    statfs_time = fuse_time.labels(op='statfs')\n    create_time = fuse_time.labels(op='create')\n    mkdir_time = fuse_time.labels(op='mkdir')\n    unlink_time = fuse_time.labels(op='unlink')\n    rmdir_time = fuse_time.labels(op='rmdir')\n    rename_time = fuse_time.labels(op='rename')\n    flush_time = fuse_time.labels(op='flush')\n\n    def __init__(self, uid, gid, api_client, encoding=\"utf-8\", inode_cache=None, num_retries=4, enable_write=False, fsns=None):\n        super(Operations, self).__init__()\n\n        self._api_client = api_client\n\n        if not inode_cache:\n            inode_cache = InodeCache(cap=256*1024*1024)\n\n        if fsns is None:\n            try:\n                fsns = self._api_client.config()[\"Collections\"][\"ForwardSlashNameSubstitution\"]\n            except KeyError:\n                # old API server with no FSNS config\n                fsns = '_'\n            else:\n                if fsns == '' or fsns == '/':\n                    fsns = None\n\n        # If we get overlapping shutdown events (e.g., fusermount -u\n        # -z and operations.destroy()) llfuse calls forget() on inodes\n        # that have already been deleted. To avoid this, we make\n        # forget() a no-op if called after destroy().\n        self._shutdown_started = threading.Event()\n\n        self.inodes = Inodes(inode_cache, encoding=encoding, fsns=fsns,\n                             shutdown_started=self._shutdown_started)\n        self.uid = uid\n        self.gid = gid\n        self.enable_write = enable_write\n\n        # dict of inode to filehandle\n        self._filehandles = {}\n        self._filehandles_counter = itertools.count(0)\n\n        # Other threads that need to wait until the fuse driver\n        # is fully initialized should wait() on this event object.\n        self.initlock = threading.Event()\n\n        self.num_retries = num_retries\n\n        self.read_counter = arvados.keep._Counter()\n        self.write_counter = arvados.keep._Counter()\n        self.read_ops_counter = arvados.keep._Counter()\n        self.write_ops_counter = arvados.keep._Counter()\n\n        self.events = None\n\n    def init(self):\n        # Allow threads that are waiting for the driver to be finished\n        # initializing to continue\n        self.initlock.set()\n\n    def metric_samples(self):\n        return self.fuse_time.collect()[0].samples\n\n    def metric_op_names(self):\n        ops = []\n        for cur_op in [sample.labels['op'] for sample in self.metric_samples()]:\n            if cur_op not in ops:\n                ops.append(cur_op)\n        return ops\n\n    def metric_value(self, opname, metric):\n        op_value = [sample.value for sample in self.metric_samples()\n                    if sample.name == metric and sample.labels['op'] == opname]\n        return op_value[0] if len(op_value) == 1 else None\n\n    def metric_sum_func(self, opname):\n        return lambda: self.metric_value(opname, \"arvmount_fuse_operations_seconds_sum\")\n\n    def metric_count_func(self, opname):\n        return lambda: int(self.metric_value(opname, \"arvmount_fuse_operations_seconds_count\"))\n\n    def begin_shutdown(self):\n        self._shutdown_started.set()\n        self.inodes.begin_shutdown()\n\n    @destroy_time.time()\n    @catch_exceptions\n    def destroy(self):\n        _logger.debug(\"arv-mount destroy: start\")\n\n        with llfuse.lock_released:\n            self.begin_shutdown()\n\n        if self.events:\n            self.events.close()\n            self.events = None\n\n        self.inodes.clear()\n\n        _logger.debug(\"arv-mount destroy: complete\")\n\n\n    def access(self, inode, mode, ctx):\n        return True\n\n    def listen_for_events(self):\n        self.events = arvados.events.subscribe(\n            self._api_client,\n            [[\"event_type\", \"in\", [\"create\", \"update\", \"delete\"]]],\n            self.on_event)\n\n    @on_event_time.time()\n    @catch_exceptions\n    def on_event(self, ev):\n        if 'event_type' not in ev or ev[\"event_type\"] not in (\"create\", \"update\", \"delete\"):\n            return\n        with llfuse.lock:\n            properties = ev.get(\"properties\") or {}\n            old_attrs = properties.get(\"old_attributes\") or {}\n            new_attrs = properties.get(\"new_attributes\") or {}\n\n            for item in self.inodes.find_by_uuid(ev[\"object_uuid\"]):\n                item.invalidate()\n\n            oldowner = old_attrs.get(\"owner_uuid\")\n            newowner = ev.get(\"object_owner_uuid\")\n            for parent in (\n                    self.inodes.find_by_uuid(oldowner) +\n                    self.inodes.find_by_uuid(newowner)):\n                parent.invalidate()\n\n    @getattr_time.time()\n    @catch_exceptions\n    def getattr(self, inode, ctx=None):\n        if inode not in self.inodes:\n            _logger.debug(\"arv-mount getattr: inode %i missing\", inode)\n            raise llfuse.FUSEError(errno.ENOENT)\n\n        e = self.inodes[inode]\n        self.inodes.touch(e)\n        parent = None\n        if e.parent_inode:\n            parent = self.inodes[e.parent_inode]\n            self.inodes.touch(parent)\n\n        entry = llfuse.EntryAttributes()\n        entry.st_ino = inode\n        entry.generation = 0\n        entry.entry_timeout = parent.time_to_next_poll() if parent is not None else 0\n        entry.attr_timeout = e.time_to_next_poll() if e.allow_attr_cache else 0\n\n        entry.st_mode = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH\n        if isinstance(e, Directory):\n            entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | stat.S_IFDIR\n        else:\n            entry.st_mode |= stat.S_IFREG\n            if isinstance(e, FuseArvadosFile):\n                entry.st_mode |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH\n\n        if self.enable_write and e.writable():\n            entry.st_mode |= stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH\n\n        entry.st_nlink = 1\n        entry.st_uid = self.uid\n        entry.st_gid = self.gid\n        entry.st_rdev = 0\n\n        entry.st_size = e.size()\n\n        entry.st_blksize = 512\n        entry.st_blocks = (entry.st_size // 512) + 1\n        if hasattr(entry, 'st_atime_ns'):\n            # llfuse >= 0.42\n            entry.st_atime_ns = int(e.atime() * 1000000000)\n            entry.st_mtime_ns = int(e.mtime() * 1000000000)\n            entry.st_ctime_ns = int(e.mtime() * 1000000000)\n        else:\n            # llfuse < 0.42\n            entry.st_atime = int(e.atime)\n            entry.st_mtime = int(e.mtime)\n            entry.st_ctime = int(e.mtime)\n\n        return entry\n\n    @setattr_time.time()\n    @catch_exceptions\n    def setattr(self, inode, attr, fields=None, fh=None, ctx=None):\n        entry = self.getattr(inode)\n\n        if fh is not None and fh in self._filehandles:\n            handle = self._filehandles[fh]\n            e = handle.obj\n        else:\n            e = self.inodes[inode]\n\n        if fields is None:\n            # llfuse < 0.42\n            update_size = attr.st_size is not None\n        else:\n            # llfuse >= 0.42\n            update_size = fields.update_size\n        if update_size and isinstance(e, FuseArvadosFile):\n            with llfuse.lock_released:\n                e.arvfile.truncate(attr.st_size)\n                entry.st_size = e.arvfile.size()\n\n        return entry\n\n    @lookup_time.time()\n    @catch_exceptions\n    def lookup(self, parent_inode, name, ctx=None):\n        name = str(name, self.inodes.encoding)\n        inode = None\n\n        if name == '.':\n            inode = parent_inode\n        elif parent_inode in self.inodes:\n            p = self.inodes[parent_inode]\n            self.inodes.touch(p)\n            if name == '..':\n                inode = p.parent_inode\n            elif isinstance(p, Directory) and name in p:\n                if p[name].inode is None:\n                    _logger.debug(\"arv-mount lookup: parent_inode %i name '%s' found but inode was None\",\n                                  parent_inode, name)\n                    raise llfuse.FUSEError(errno.ENOENT)\n\n                inode = p[name].inode\n\n        if inode != None:\n            _logger.debug(\"arv-mount lookup: parent_inode %i name '%s' inode %i\",\n                      parent_inode, name, inode)\n            self.inodes.touch(self.inodes[inode])\n            self.inodes[inode].inc_ref()\n            return self.getattr(inode)\n        else:\n            _logger.debug(\"arv-mount lookup: parent_inode %i name '%s' not found\",\n                      parent_inode, name)\n            raise llfuse.FUSEError(errno.ENOENT)\n\n    @forget_time.time()\n    @catch_exceptions\n    def forget(self, inodes):\n        if self._shutdown_started.is_set():\n            return\n        for inode, nlookup in inodes:\n            ent = self.inodes[inode]\n            _logger.debug(\"arv-mount forget: inode %i nlookup %i ref_count %i\", inode, nlookup, ent.ref_count)\n            if ent.dec_ref(nlookup) == 0 and ent.parent_inode is None:\n                self.inodes.del_entry(ent)\n\n    @open_time.time()\n    @catch_exceptions\n    def open(self, inode, flags, ctx=None):\n        if inode in self.inodes:\n            p = self.inodes[inode]\n        else:\n            _logger.debug(\"arv-mount open: inode %i missing\", inode)\n            raise llfuse.FUSEError(errno.ENOENT)\n\n        if isinstance(p, Directory):\n            raise llfuse.FUSEError(errno.EISDIR)\n\n        open_for_writing = (flags & os.O_WRONLY) or (flags & os.O_RDWR)\n        if open_for_writing and not p.writable():\n            raise llfuse.FUSEError(errno.EPERM)\n\n        fh = next(self._filehandles_counter)\n\n        if p.stale():\n            p.checkupdate()\n            self.inodes.invalidate_inode(p)\n\n        parent_inode = self.inodes[p.parent_inode] if p.parent_inode in self.inodes else None\n        self._filehandles[fh] = FileHandle(fh, p, parent_inode, open_for_writing)\n        self.inodes.touch(p)\n\n        # Normally, we will have received an \"update\" event if the\n        # parent collection is stale here. However, even if the parent\n        # collection hasn't changed, the manifest might have been\n        # fetched so long ago that the signatures on the data block\n        # locators have expired. Calling checkupdate() on all\n        # ancestors ensures the signatures will be refreshed if\n        # necessary.\n        while p.parent_inode in self.inodes:\n            if p == self.inodes[p.parent_inode]:\n                break\n            p = self.inodes[p.parent_inode]\n            self.inodes.touch(p)\n            p.checkupdate()\n\n        _logger.debug(\"arv-mount open inode %i flags %x fh %i\", inode, flags, fh)\n\n        return fh\n\n    @read_time.time()\n    @catch_exceptions\n    def read(self, fh, off, size):\n        _logger.debug(\"arv-mount read fh %i off %i size %i\", fh, off, size)\n        self.read_ops_counter.add(1)\n\n        if fh in self._filehandles:\n            handle = self._filehandles[fh]\n        else:\n            raise llfuse.FUSEError(errno.EBADF)\n\n        self.inodes.touch(handle.obj)\n\n        r = handle.obj.readfrom(off, size, self.num_retries)\n        if r:\n            self.read_counter.add(len(r))\n        return r\n\n    @write_time.time()\n    @catch_exceptions\n    def write(self, fh, off, buf):\n        _logger.debug(\"arv-mount write %i %i %i\", fh, off, len(buf))\n        self.write_ops_counter.add(1)\n\n        if fh in self._filehandles:\n            handle = self._filehandles[fh]\n        else:\n            raise llfuse.FUSEError(errno.EBADF)\n\n        if not handle.obj.writable():\n            raise llfuse.FUSEError(errno.EPERM)\n\n        self.inodes.touch(handle.obj)\n\n        w = handle.obj.writeto(off, buf, self.num_retries)\n        if w:\n            self.write_counter.add(w)\n        return w\n\n    @release_time.time()\n    @catch_exceptions\n    def release(self, fh):\n        if fh in self._filehandles:\n            _logger.debug(\"arv-mount release fh %i\", fh)\n            try:\n                self._filehandles[fh].flush(False)\n            except Exception:\n                raise\n            finally:\n                self._filehandles[fh].release()\n                del self._filehandles[fh]\n        self.inodes.cap_cache()\n\n    def releasedir(self, fh):\n        self.release(fh)\n\n    @opendir_time.time()\n    @catch_exceptions\n    def opendir(self, inode, ctx=None):\n        _logger.debug(\"arv-mount opendir: inode %i\", inode)\n\n        if inode in self.inodes:\n            p = self.inodes[inode]\n        else:\n            _logger.debug(\"arv-mount opendir: called with unknown or removed inode %i\", inode)\n            raise llfuse.FUSEError(errno.ENOENT)\n\n        if not isinstance(p, Directory):\n            raise llfuse.FUSEError(errno.ENOTDIR)\n\n        fh = next(self._filehandles_counter)\n        if p.parent_inode in self.inodes:\n            parent = self.inodes[p.parent_inode]\n        else:\n            _logger.warning(\"arv-mount opendir: parent inode %i of %i is missing\", p.parent_inode, inode)\n            raise llfuse.FUSEError(errno.EIO)\n\n        _logger.debug(\"arv-mount opendir: inode %i fh %i \", inode, fh)\n\n        # update atime\n        p.inc_use()\n        self._filehandles[fh] = DirectoryHandle(fh, p, [('.', p), ('..', parent)] + p.items())\n        p.dec_use()\n        self.inodes.touch(p)\n        return fh\n\n    @readdir_time.time()\n    @catch_exceptions\n    def readdir(self, fh, off):\n        _logger.debug(\"arv-mount readdir: fh %i off %i\", fh, off)\n\n        if fh in self._filehandles:\n            handle = self._filehandles[fh]\n        else:\n            raise llfuse.FUSEError(errno.EBADF)\n\n        e = off\n        while e < len(handle.entries):\n            ent = handle.entries[e]\n            if ent[1].inode in self.inodes:\n                yield (ent[0].encode(self.inodes.encoding), self.getattr(ent[1].inode), e+1)\n            e += 1\n\n    @statfs_time.time()\n    @catch_exceptions\n    def statfs(self, ctx=None):\n        st = llfuse.StatvfsData()\n        st.f_bsize = 128 * 1024\n        st.f_blocks = 0\n        st.f_files = 0\n\n        st.f_bfree = 0\n        st.f_bavail = 0\n\n        st.f_ffree = 0\n        st.f_favail = 0\n\n        st.f_frsize = 0\n        return st\n\n    def _check_writable(self, inode_parent):\n        if not self.enable_write:\n            raise llfuse.FUSEError(errno.EROFS)\n\n        if inode_parent in self.inodes:\n            p = self.inodes[inode_parent]\n        else:\n            raise llfuse.FUSEError(errno.ENOENT)\n\n        if not isinstance(p, Directory):\n            raise llfuse.FUSEError(errno.ENOTDIR)\n\n        if not p.writable():\n            raise llfuse.FUSEError(errno.EPERM)\n\n        return p\n\n    @create_time.time()\n    @catch_exceptions\n    def create(self, inode_parent, name, mode, flags, ctx=None):\n        name = name.decode(encoding=self.inodes.encoding)\n        _logger.debug(\"arv-mount create: parent_inode %i '%s' %o\", inode_parent, name, mode)\n\n        p = self._check_writable(inode_parent)\n        p.create(name)\n\n        # The file entry should have been implicitly created by callback.\n        f = p[name]\n        fh = next(self._filehandles_counter)\n        self._filehandles[fh] = FileHandle(fh, f, p, True)\n        self.inodes.touch(p)\n\n        f.inc_ref()\n        return (fh, self.getattr(f.inode))\n\n    @mkdir_time.time()\n    @catch_exceptions\n    def mkdir(self, inode_parent, name, mode, ctx=None):\n        name = name.decode(encoding=self.inodes.encoding)\n        _logger.debug(\"arv-mount mkdir: parent_inode %i '%s' %o\", inode_parent, name, mode)\n\n        p = self._check_writable(inode_parent)\n        p.mkdir(name)\n\n        # The dir entry should have been implicitly created by callback.\n        d = p[name]\n\n        d.inc_ref()\n        return self.getattr(d.inode)\n\n    @unlink_time.time()\n    @catch_exceptions\n    def unlink(self, inode_parent, name, ctx=None):\n        name = name.decode(encoding=self.inodes.encoding)\n        _logger.debug(\"arv-mount unlink: parent_inode %i '%s'\", inode_parent, name)\n        p = self._check_writable(inode_parent)\n        p.unlink(name)\n\n    @rmdir_time.time()\n    @catch_exceptions\n    def rmdir(self, inode_parent, name, ctx=None):\n        name = name.decode(encoding=self.inodes.encoding)\n        _logger.debug(\"arv-mount rmdir: parent_inode %i '%s'\", inode_parent, name)\n        p = self._check_writable(inode_parent)\n        p.rmdir(name)\n\n    @rename_time.time()\n    @catch_exceptions\n    def rename(self, inode_parent_old, name_old, inode_parent_new, name_new, ctx=None):\n        name_old = name_old.decode(encoding=self.inodes.encoding)\n        name_new = name_new.decode(encoding=self.inodes.encoding)\n        _logger.debug(\"arv-mount rename: old_parent_inode %i '%s' new_parent_inode %i '%s'\", inode_parent_old, name_old, inode_parent_new, name_new)\n        src = self._check_writable(inode_parent_old)\n        dest = self._check_writable(inode_parent_new)\n        dest.rename(name_old, name_new, src)\n\n    @flush_time.time()\n    @catch_exceptions\n    def flush(self, fh):\n        if fh in self._filehandles:\n            self._filehandles[fh].flush(False)\n\n    def fsync(self, fh, datasync):\n        if fh in self._filehandles:\n            self._filehandles[fh].flush(True)\n            self.inodes.invalidate_inode(self._filehandles[fh].obj)\n\n    def fsyncdir(self, fh, datasync):\n        if fh in self._filehandles:\n            self._filehandles[fh].flush(True)\n\n    @catch_exceptions\n    def mknod(self, parent_inode, name, mode, rdev, ctx=None):\n        if not stat.S_ISREG(mode):\n            # Can only be used to create regular files.\n            raise NotImplementedError()\n\n        name = name.decode(encoding=self.inodes.encoding)\n        _logger.debug(\"arv-mount mknod: parent_inode %i '%s' %o\", parent_inode, name, mode)\n\n        p = self._check_writable(parent_inode)\n        p.create(name)\n\n        # The file entry should have been implicitly created by callback.\n        f = p[name]\n        self.inodes.touch(p)\n\n        f.inc_ref()\n        return self.getattr(f.inode)\n"
  },
  {
    "path": "services/fuse/arvados_fuse/command.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport argparse\nimport arvados\nimport daemon\nimport llfuse\nimport logging\nimport os\nimport resource\nimport signal\nimport subprocess\nimport sys\nimport time\nimport resource\n\nimport arvados.commands._util as arv_cmd\nfrom arvados_fuse import crunchstat\nfrom arvados_fuse import *\nfrom arvados_fuse.unmount import unmount\nfrom arvados_fuse._version import __version__\n\nclass ArgumentParser(argparse.ArgumentParser):\n    def __init__(self):\n        super(ArgumentParser, self).__init__(\n            parents=[arv_cmd.retry_opt],\n            description=\"Interact with Arvados data through a local filesystem\",\n        )\n        self.add_argument(\n            '--version',\n            action='version',\n            version=u\"%s %s\" % (sys.argv[0], __version__),\n            help=\"Print version and exit\",\n        )\n        self.add_argument(\n            'mountpoint',\n            metavar='MOUNT_DIR',\n            help=\"Directory path to mount data\",\n        )\n\n        mode_group = self.add_argument_group(\"Mount contents\")\n        mode = mode_group.add_mutually_exclusive_group()\n        mode.add_argument(\n            '--all',\n            action='store_const',\n            const='all',\n            dest='mode',\n            help=\"\"\"\nMount a subdirectory for each mode: `home`, `shared`, `by_id`, and `by_tag`\n(default if no `--mount-*` options are given)\n\"\"\",\n        )\n        mode.add_argument(\n            '--custom',\n            action='store_const',\n            const=None,\n            dest='mode',\n            help=\"\"\"\nMount a subdirectory for each mode specified by a `--mount-*` option\n(default if any `--mount-*` options are given;\nsee \"Mount custom layout and filtering\" section)\n\"\"\",\n        )\n        mode.add_argument(\n            '--collection',\n            metavar='UUID_OR_PDH',\n            help=\"Mount the specified collection\",\n        )\n        mode.add_argument(\n            '--home',\n            action='store_const',\n            const='home',\n            dest='mode',\n            help=\"Mount your home project\",\n        )\n        mode.add_argument(\n            '--project',\n            metavar='UUID',\n            help=\"Mount the specified project\",\n        )\n        mode.add_argument(\n            '--shared',\n            action='store_const',\n            const='shared',\n            dest='mode',\n            help=\"Mount a subdirectory for each project shared with you\",\n        )\n        mode.add_argument(\n            '--by-id',\n            action='store_const',\n            const='by_id',\n            dest='mode',\n            help=\"\"\"\nMount a magic directory where collections and projects are accessible through\nsubdirectories named after their UUID or portable data hash\n\"\"\",\n        )\n        mode.add_argument(\n            '--by-pdh',\n            action='store_const',\n            const='by_pdh',\n            dest='mode',\n            help=\"\"\"\nMount a magic directory where collections are accessible through\nsubdirectories named after their portable data hash\n\"\"\",\n        )\n        mode.add_argument(\n            '--by-tag',\n            action='store_const',\n            const='by_tag',\n            dest='mode',\n            help=\"Mount a subdirectory for each tag attached to a collection or project\",\n        )\n\n        mounts = self.add_argument_group(\"Mount custom layout and filtering\")\n        mounts.add_argument(\n            '--filters',\n            type=arv_cmd.JSONArgument(\n                validator=arv_cmd.validate_filters,\n                pretty_name=\"Arvados API filter\"\n            ),\n            help=\"\"\"\nFilters to apply to all project, shared, and tag directory contents.\nPass filters as either a JSON string or a path to a JSON file (use \"-\" for\nstandard input). The JSON object should be a list of filters in Arvados API\nlist filter syntax.\n\"\"\",\n        )\n        mounts.add_argument(\n            '--mount-home',\n            metavar='PATH',\n            action='append',\n            default=[],\n            help=\"Make your home project available under the mount at `PATH`\",\n        )\n        mounts.add_argument(\n            '--mount-shared',\n            metavar='PATH',\n            action='append',\n            default=[],\n            help=\"Make projects shared with you available under the mount at `PATH`\",\n        )\n        mounts.add_argument(\n            '--mount-tmp',\n            metavar='PATH',\n            action='append',\n            default=[],\n            help=\"\"\"\nMake a new temporary writable collection available under the mount at `PATH`.\nThis collection is deleted when the mount is unmounted.\n\"\"\",\n        )\n        mounts.add_argument(\n            '--mount-by-id',\n            metavar='PATH',\n            action='append',\n            default=[],\n            help=\"\"\"\nMake a magic directory available under the mount at `PATH` where collections and\nprojects are accessible through subdirectories named after their UUID or\nportable data hash\n\"\"\",\n        )\n        mounts.add_argument(\n            '--mount-by-pdh',\n            metavar='PATH',\n            action='append',\n            default=[],\n            help=\"\"\"\nMake a magic directory available under the mount at `PATH` where collections\nare accessible through subdirectories named after portable data hash\n\"\"\",\n        )\n        mounts.add_argument(\n            '--mount-by-tag',\n            metavar='PATH',\n            action='append',\n            default=[],\n            help=\"\"\"\nMake a subdirectory for each tag attached to a collection or project available\nunder the mount at `PATH`\n\"\"\" ,\n        )\n\n        perms = self.add_argument_group(\"Mount access and permissions\")\n        perms.add_argument(\n            '--allow-other',\n            action='store_true',\n            help=\"Let other users on this system read mounted data (default false)\",\n        )\n        perms.add_argument(\n            '--read-only',\n            action='store_false',\n            default=False,\n            dest='enable_write',\n            help=\"Mounted data cannot be modified from the mount (default)\",\n        )\n        perms.add_argument(\n            '--read-write',\n            action='store_true',\n            default=False,\n            dest='enable_write',\n            help=\"Mounted data can be modified from the mount\",\n        )\n\n        lifecycle = self.add_argument_group(\"Mount lifecycle management\")\n        lifecycle.add_argument(\n            '--exec',\n            nargs=argparse.REMAINDER,\n            dest=\"exec_args\",\n            help=\"\"\"\nMount data, run the specified command, then unmount and exit.\n`--exec` reads all remaining options as the command to run,\nso it must be the last option you specify.\nEither end your command arguments (and other options) with a `--` argument,\nor specify `--exec` after your mount point.\n\"\"\",\n        )\n        lifecycle.add_argument(\n            '--foreground',\n            action='store_true',\n            default=False,\n            help=\"Run mount process in the foreground instead of daemonizing (default false)\",\n        )\n        lifecycle.add_argument(\n            '--subtype',\n            help=\"Set mounted filesystem type to `fuse.SUBTYPE` (default is just `fuse`)\",\n        )\n        unmount = lifecycle.add_mutually_exclusive_group()\n        unmount.add_argument(\n            '--replace',\n            action='store_true',\n            default=False,\n            help=\"\"\"\nIf a FUSE mount is already mounted at the given directory,\nunmount it before mounting the requested data.\nIf `--subtype` is specified, unmount only if the mount has that subtype.\nWARNING: This command can affect any kind of FUSE mount, not just arv-mount.\n\"\"\",\n        )\n        unmount.add_argument(\n            '--unmount',\n            action='store_true',\n            default=False,\n            help=\"\"\"\nIf a FUSE mount is already mounted at the given directory, unmount it and exit.\nIf `--subtype` is specified, unmount only if the mount has that subtype.\nWARNING: This command can affect any kind of FUSE mount, not just arv-mount.\n\"\"\",\n        )\n        unmount.add_argument(\n            '--unmount-all',\n            action='store_true',\n            default=False,\n            help=\"\"\"\nUnmount all FUSE mounts at or below the given directory, then exit.\nIf `--subtype` is specified, unmount only if the mount has that subtype.\nWARNING: This command can affect any kind of FUSE mount, not just arv-mount.\n\"\"\",\n        )\n        lifecycle.add_argument(\n            '--unmount-timeout',\n            type=float,\n            default=2.0,\n            metavar='SECONDS',\n            help=\"\"\"\nThe number of seconds to wait for a clean unmount after an `--exec` command has\nexited (default %(default).01f).\nAfter this time, the mount will be forcefully unmounted.\n\"\"\",\n        )\n\n        reporting = self.add_argument_group(\"Mount logging and statistics\")\n        reporting.add_argument(\n            '--crunchstat-interval',\n            type=float,\n            default=0.0,\n            metavar='SECONDS',\n            help=\"Write stats to stderr every N seconds (default disabled)\",\n        )\n        reporting.add_argument(\n            '--debug',\n            action='store_true',\n            help=\"Log debug information\",\n        )\n        reporting.add_argument(\n            '--logfile',\n            help=\"Write debug logs and errors to the specified file (default stderr)\",\n        )\n\n        cache = self.add_argument_group(\"Mount local cache setup\")\n        cachetype = cache.add_mutually_exclusive_group()\n        cachetype.add_argument(\n            '--disk-cache',\n            action='store_true',\n            default=True,\n            dest='disk_cache',\n            help=\"Cache data on the local filesystem (default)\",\n        )\n        cachetype.add_argument(\n            '--ram-cache',\n            action='store_false',\n            default=True,\n            dest='disk_cache',\n            help=\"Cache data in memory\",\n        )\n        cache.add_argument(\n            '--disk-cache-dir',\n            metavar=\"DIRECTORY\",\n            help=\"Set custom filesystem cache location\",\n        )\n        cache.add_argument(\n            '--directory-cache',\n            type=int,\n            default=128*1024*1024,\n            metavar='BYTES',\n            help=\"Size of directory data cache in bytes (default 128 MiB)\",\n        )\n        cache.add_argument(\n            '--file-cache',\n            type=int,\n            default=0,\n            metavar='BYTES',\n            help=\"\"\"\nSize of file data cache in bytes\n(default 8 GiB for filesystem cache, 256 MiB for memory cache)\n\"\"\",\n        )\n\n        plumbing = self.add_argument_group(\"Mount interactions with Arvados and Linux\")\n        plumbing.add_argument(\n            '--disable-event-listening',\n            action='store_true',\n            dest='disable_event_listening',\n            default=False,\n            help=\"Don't subscribe to events on the API server to update mount contents\",\n        )\n        plumbing.add_argument(\n            '--encoding',\n            default=\"utf-8\",\n            help=\"\"\"\nFilesystem character encoding\n(default %(default)r; specify a name from the Python codec registry)\n\"\"\",\n        )\n        plumbing.add_argument(\n            '--storage-classes',\n            metavar='CLASSES',\n            help=\"Comma-separated list of storage classes to request for new collections\",\n        )\n        plumbing.add_argument(\n            '--refresh-time',\n            metavar='SECONDS',\n            default=15,\n            type=int,\n            help=\"Upper limit on how long mount contents may be out of date with upstream Arvados before being refreshed on next access (default 15 seconds)\",\n        )\n        # This is a hidden argument used by tests.  Normally this\n        # value will be extracted from the cluster config, but mocking\n        # the cluster config under the presence of multiple threads\n        # and processes turned out to be too complicated and brittle.\n        plumbing.add_argument(\n            '--fsns',\n            type=str,\n            default=None,\n            help=argparse.SUPPRESS)\n\nclass Mount(object):\n    def __init__(self, args, logger=logging.getLogger('arvados.arv-mount')):\n        self.daemon = False\n        self.logger = logger\n        self.args = args\n        self.listen_for_events = False\n\n        self.args.mountpoint = os.path.realpath(self.args.mountpoint)\n        if self.args.logfile:\n            self.args.logfile = os.path.realpath(self.args.logfile)\n\n        try:\n            self._setup_logging()\n        except Exception as e:\n            self.logger.exception(\"exception during setup: %s\", e)\n            exit(1)\n\n        try:\n            nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)\n\n            minlimit = 10240\n            if self.args.file_cache:\n                # Adjust the file handle limit so it can meet\n                # the desired cache size. Multiply by 8 because the\n                # number of 64 MiB cache slots that keepclient\n                # allocates is RLIMIT_NOFILE / 8\n                minlimit = int((self.args.file_cache/(64*1024*1024)) * 8)\n\n            if nofile_limit[0] < minlimit:\n                resource.setrlimit(resource.RLIMIT_NOFILE, (min(minlimit, nofile_limit[1]), nofile_limit[1]))\n\n            if minlimit > nofile_limit[1]:\n                self.logger.warning(\"file handles required to meet --file-cache (%s) exceeds hard file handle limit (%s), cache size will be smaller than requested\", minlimit, nofile_limit[1])\n\n        except Exception as e:\n            self.logger.warning(\"unable to adjust file handle limit: %s\", e)\n\n        nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)\n        self.logger.info(\"file cache capped at %s bytes or less based on available disk (RLIMIT_NOFILE is %s)\", ((nofile_limit[0]//8)*64*1024*1024), nofile_limit)\n\n        try:\n            self._setup_api()\n            self._setup_mount()\n        except Exception as e:\n            self.logger.exception(\"exception during setup: %s\", e)\n            exit(1)\n\n    def __enter__(self):\n        if self.args.replace:\n            unmount(path=self.args.mountpoint,\n                    timeout=self.args.unmount_timeout)\n        llfuse.init(self.operations, str(self.args.mountpoint), self._fuse_options())\n        if self.daemon:\n            daemon.DaemonContext(\n                working_directory=os.path.dirname(self.args.mountpoint),\n                files_preserve=list(range(\n                    3, resource.getrlimit(resource.RLIMIT_NOFILE)[1]))\n            ).open()\n        if self.listen_for_events and not self.args.disable_event_listening:\n            self.operations.listen_for_events()\n        self.llfuse_thread = threading.Thread(None, lambda: self._llfuse_main())\n        self.llfuse_thread.daemon = True\n        self.llfuse_thread.start()\n        self.operations.initlock.wait()\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        if self.operations.events:\n            self.operations.events.close(timeout=self.args.unmount_timeout)\n        subprocess.call([\"fusermount\", \"-u\", \"-z\", self.args.mountpoint])\n        self.llfuse_thread.join(timeout=self.args.unmount_timeout)\n        self.api.keep.block_cache.clear()\n        if self.llfuse_thread.is_alive():\n            self.logger.warning(\"Mount.__exit__:\"\n                                \" llfuse thread still alive %fs after umount\"\n                                \" -- abandoning and exiting anyway\",\n                                self.args.unmount_timeout)\n\n    def run(self):\n        if self.args.unmount or self.args.unmount_all:\n            unmount(path=self.args.mountpoint,\n                    subtype=self.args.subtype,\n                    timeout=self.args.unmount_timeout,\n                    recursive=self.args.unmount_all)\n        elif self.args.exec_args:\n            self._run_exec()\n        else:\n            self._run_standalone()\n\n    def _fuse_options(self):\n        \"\"\"FUSE mount options; see mount.fuse(8)\"\"\"\n        opts = [optname for optname in ['allow_other', 'debug']\n                if getattr(self.args, optname)]\n        # Increase default read/write size from 4KiB to 128KiB\n        opts += [\"big_writes\", \"max_read=131072\"]\n        if self.args.subtype:\n            opts += [\"subtype=\"+self.args.subtype]\n        return opts\n\n    def _setup_logging(self):\n        # Configure a log handler based on command-line switches.\n        if self.args.logfile:\n            log_handler = logging.FileHandler(self.args.logfile)\n            log_handler.setFormatter(logging.Formatter(\n                '%(asctime)s %(name)s[%(process)d] %(levelname)s: %(message)s',\n                '%Y-%m-%d %H:%M:%S'))\n        else:\n            log_handler = None\n\n        if log_handler is not None:\n            arvados.logger.removeHandler(arvados.log_handler)\n            arvados.logger.addHandler(log_handler)\n\n        if self.args.debug:\n            arvados.logger.setLevel(logging.DEBUG)\n            logging.getLogger('arvados.keep').setLevel(logging.DEBUG)\n            logging.getLogger('arvados.api').setLevel(logging.DEBUG)\n            logging.getLogger('arvados.collection').setLevel(logging.DEBUG)\n            self.logger.debug(\"arv-mount debugging enabled\")\n\n        self.logger.info(\"%s %s started\", sys.argv[0], __version__)\n        self.logger.info(\"enable write is %s\", self.args.enable_write)\n\n    def _setup_api(self):\n        try:\n            # default value of file_cache is 0, this tells KeepBlockCache to\n            # choose a default based on whether disk_cache is enabled or not.\n\n            block_cache = arvados.keep.KeepBlockCache(cache_max=self.args.file_cache,\n                                                      disk_cache=self.args.disk_cache,\n                                                      disk_cache_dir=self.args.disk_cache_dir)\n\n            self.api = arvados.safeapi.ThreadSafeApiCache(\n                apiconfig=arvados.config.settings(),\n                api_params={\n                    'num_retries': self.args.retries,\n                },\n                keep_params={\n                    'block_cache': block_cache,\n                    'num_retries': self.args.retries,\n                },\n                version='v1',\n            )\n        except KeyError as e:\n            self.logger.error(\"Missing environment: %s\", e)\n            exit(1)\n        # Do a sanity check that we have a working arvados host + token.\n        self.api.users().current().execute()\n\n    def _setup_mount(self):\n        self.operations = Operations(\n            os.getuid(),\n            os.getgid(),\n            api_client=self.api,\n            encoding=self.args.encoding,\n            inode_cache=InodeCache(cap=self.args.directory_cache),\n            enable_write=self.args.enable_write,\n            fsns=self.args.fsns)\n\n        if self.args.crunchstat_interval:\n            statsthread = threading.Thread(\n                target=crunchstat.statlogger,\n                args=(self.args.crunchstat_interval,\n                      self.api.keep,\n                      self.operations))\n            statsthread.daemon = True\n            statsthread.start()\n\n        usr = self.api.users().current().execute(num_retries=self.args.retries)\n        now = time.time()\n        dir_class = None\n        dir_args = [\n            llfuse.ROOT_INODE,\n            self.operations.inodes,\n            self.api,\n            self.args.retries,\n            self.args.enable_write,\n            self.args.filters,\n        ]\n        mount_readme = False\n\n        storage_classes = None\n        if self.args.storage_classes is not None:\n            storage_classes = self.args.storage_classes.replace(' ', '').split(',')\n            self.logger.info(\"Storage classes requested for new collections: {}\".format(', '.join(storage_classes)))\n\n        if self.args.collection is not None:\n            # Set up the request handler with the collection at the root\n            # First check that the collection is readable\n            self.api.collections().get(uuid=self.args.collection).execute()\n            self.args.mode = 'collection'\n            dir_class = CollectionDirectory\n            dir_args.append(self.args.collection)\n        elif self.args.project is not None:\n            self.args.mode = 'project'\n            dir_class = ProjectDirectory\n            dir_args.append(\n                self.api.groups().get(uuid=self.args.project).execute(\n                    num_retries=self.args.retries))\n\n        if (self.args.mount_by_id or\n            self.args.mount_by_pdh or\n            self.args.mount_by_tag or\n            self.args.mount_home or\n            self.args.mount_shared or\n            self.args.mount_tmp):\n            if self.args.mode is not None:\n                sys.exit(\n                    \"Cannot combine '{}' mode with custom --mount-* options.\".\n                    format(self.args.mode))\n        elif self.args.mode is None:\n            # If no --mount-custom or custom mount args, --all is the default\n            self.args.mode = 'all'\n\n        if self.args.mode in ['by_id', 'by_pdh']:\n            # Set up the request handler with the 'magic directory' at the root\n            dir_class = MagicDirectory\n            dir_args.append(self.args.mode == 'by_pdh')\n        elif self.args.mode == 'by_tag':\n            dir_class = TagsDirectory\n        elif self.args.mode == 'shared':\n            dir_class = SharedDirectory\n            dir_args.append(usr)\n        elif self.args.mode == 'home':\n            dir_class = ProjectDirectory\n            dir_args.append(usr)\n        elif self.args.mode == 'all':\n            self.args.mount_by_id = ['by_id']\n            self.args.mount_by_tag = ['by_tag']\n            self.args.mount_home = ['home']\n            self.args.mount_shared = ['shared']\n            mount_readme = True\n\n        if dir_class is not None:\n            if dir_class in [TagsDirectory, CollectionDirectory]:\n                ent = dir_class(*dir_args, poll_time=self.args.refresh_time)\n            else:\n                ent = dir_class(*dir_args, storage_classes=storage_classes, poll_time=self.args.refresh_time)\n            self.operations.inodes.add_entry(ent)\n            self.listen_for_events = ent.want_event_subscribe()\n            return\n\n        e = self.operations.inodes.add_entry(Directory(\n            llfuse.ROOT_INODE,\n            self.operations.inodes,\n            self.args.enable_write,\n            self.args.filters,\n        ))\n        dir_args[0] = e.inode\n\n        for name in self.args.mount_by_id:\n            self._add_mount(e, name, MagicDirectory(*dir_args, pdh_only=False,\n                                                    storage_classes=storage_classes,\n                                                    poll_time=self.args.refresh_time))\n        for name in self.args.mount_by_pdh:\n            self._add_mount(e, name, MagicDirectory(*dir_args, pdh_only=True,\n                                                    poll_time=self.args.refresh_time))\n        for name in self.args.mount_by_tag:\n            self._add_mount(e, name, TagsDirectory(*dir_args))\n        for name in self.args.mount_home:\n            self._add_mount(e, name, ProjectDirectory(*dir_args, project_object=usr,\n                                                      storage_classes=storage_classes,\n                                                      poll_time=self.args.refresh_time))\n        for name in self.args.mount_shared:\n            self._add_mount(e, name, SharedDirectory(*dir_args, exclude=usr,\n                                                     storage_classes=storage_classes,\n                                                     poll_time=self.args.refresh_time))\n        for name in self.args.mount_tmp:\n            self._add_mount(e, name, TmpCollectionDirectory(*dir_args,\n                                                            storage_classes=storage_classes))\n\n        if mount_readme:\n            text = self._readme_text(\n                arvados.config.get('ARVADOS_API_HOST'),\n                usr['email'])\n            self._add_mount(e, 'README', StringFile(e.inode, text, now))\n\n    def _add_mount(self, tld, name, ent):\n        if name in ['', '.', '..'] or '/' in name:\n            sys.exit(\"Mount point '{}' is not supported.\".format(name))\n        tld._entries[name] = self.operations.inodes.add_entry(ent)\n        self.listen_for_events = (self.listen_for_events or ent.want_event_subscribe())\n\n    def _readme_text(self, api_host, user_email):\n        return '''\nWelcome to Arvados!  This directory provides file system access to\nfiles and objects available on the Arvados installation located at\n'{}' using credentials for user '{}'.\n\nFrom here, the following directories are available:\n\n  by_id/     Access to Keep collections by uuid or portable data hash (see by_id/README for details).\n  by_tag/    Access to Keep collections organized by tag.\n  home/      The contents of your home project.\n  shared/    Projects shared with you.\n\n'''.format(api_host, user_email)\n\n    def _run_exec(self):\n        rc = 255\n        with self:\n            try:\n                sp = subprocess.Popen(self.args.exec_args, shell=False)\n\n                # forward signals to the process.\n                signal.signal(signal.SIGINT, lambda signum, frame: sp.send_signal(signum))\n                signal.signal(signal.SIGTERM, lambda signum, frame: sp.send_signal(signum))\n                signal.signal(signal.SIGQUIT, lambda signum, frame: sp.send_signal(signum))\n\n                # wait for process to complete.\n                rc = sp.wait()\n\n                # restore default signal handlers.\n                signal.signal(signal.SIGINT, signal.SIG_DFL)\n                signal.signal(signal.SIGTERM, signal.SIG_DFL)\n                signal.signal(signal.SIGQUIT, signal.SIG_DFL)\n            except Exception as e:\n                self.logger.exception(\n                    'arv-mount: exception during exec %s', self.args.exec_args)\n                try:\n                    rc = e.errno\n                except AttributeError:\n                    pass\n        exit(rc)\n\n    def _run_standalone(self):\n        try:\n            self.daemon = not self.args.foreground\n            with self:\n                self.llfuse_thread.join(timeout=None)\n        except Exception as e:\n            self.logger.exception('arv-mount: exception during mount: %s', e)\n            exit(getattr(e, 'errno', 1))\n        exit(0)\n\n    def _llfuse_main(self):\n        try:\n            llfuse.main(workers=10)\n        except:\n            llfuse.close(unmount=False)\n            raise\n        self.operations.begin_shutdown()\n        llfuse.close()\n"
  },
  {
    "path": "services/fuse/arvados_fuse/crunchstat.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport sys\nimport time\n\nfrom collections import namedtuple\n\nStat = namedtuple(\"Stat\", ['name', 'get'])\n\nclass StatWriter(object):\n    def __init__(self, prefix, interval, stats):\n        self.prefix = prefix\n        self.interval = interval\n        self.stats = stats\n        self.previous_stats = []\n        self.update_previous_stats()\n\n    def update_previous_stats(self):\n        self.previous_stats = [stat.get() for stat in self.stats]\n\n    def update(self):\n        def append_by_type(string, name, value):\n            if type(value) is float:\n                string += \" %.6f %s\" % (value, name)\n            else:\n                string += \" %s %s\" % (str(value), name)\n            return string\n\n        out = \"crunchstat: %s\" % self.prefix\n        delta = \"-- interval %.4f seconds\" % self.interval\n        for i, stat in enumerate(self.stats):\n            value = stat.get()\n            diff = value - self.previous_stats[i]\n            delta = append_by_type(delta, stat.name, diff)\n            out = append_by_type(out, stat.name, value)\n\n        sys.stderr.write(\"%s %s\\n\" % (out, delta))\n        self.update_previous_stats()\n\ndef statlogger(interval, keep, ops):\n    calls = StatWriter(\"keepcalls\", interval, [\n        Stat(\"put\", keep.put_counter.get),\n        Stat(\"get\", keep.get_counter.get)\n    ])\n    net = StatWriter(\"net:keep0\", interval, [\n        Stat(\"tx\", keep.upload_counter.get),\n        Stat(\"rx\", keep.download_counter.get)\n    ])\n    cache = StatWriter(\"keepcache\", interval, [\n        Stat(\"hit\", keep.hits_counter.get),\n        Stat(\"miss\", keep.misses_counter.get)\n    ])\n    fuseops = StatWriter(\"fuseops\", interval, [\n        Stat(\"write\", ops.write_ops_counter.get),\n        Stat(\"read\", ops.read_ops_counter.get)\n    ])\n    fusetimes = []\n    for cur_op in ops.metric_op_names():\n        name = \"fuseop:{0}\".format(cur_op)\n        fusetimes.append(StatWriter(name, interval, [\n            Stat(\"count\", ops.metric_count_func(cur_op)),\n            Stat(\"time\", ops.metric_sum_func(cur_op))\n        ]))\n    blk = StatWriter(\"blkio:0:0\", interval, [\n        Stat(\"write\", ops.write_counter.get),\n        Stat(\"read\", ops.read_counter.get)\n    ])\n\n    while True:\n        time.sleep(interval)\n        calls.update()\n        net.update()\n        cache.update()\n        blk.update()\n        fuseops.update()\n        for ftime in fusetimes:\n            ftime.update()\n\n\n"
  },
  {
    "path": "services/fuse/arvados_fuse/fresh.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport ciso8601\nimport calendar\nimport functools\nimport time\n\ndef convertTime(t):\n    \"\"\"Parse Arvados timestamp to unix time.\"\"\"\n    if not t:\n        return 0\n    try:\n        return calendar.timegm(ciso8601.parse_datetime_as_naive(t).timetuple())\n    except (TypeError, ValueError):\n        return 0\n\ndef use_counter(orig_func):\n    @functools.wraps(orig_func)\n    def use_counter_wrapper(self, *args, **kwargs):\n        try:\n            self.inc_use()\n            return orig_func(self, *args, **kwargs)\n        finally:\n            self.dec_use()\n    return use_counter_wrapper\n\ndef check_update(orig_func):\n    @functools.wraps(orig_func)\n    def check_update_wrapper(self, *args, **kwargs):\n        self.checkupdate()\n        return orig_func(self, *args, **kwargs)\n    return check_update_wrapper\n\nclass FreshBase(object):\n    \"\"\"Base class for maintaining object lifecycle.\n\n    Functions include:\n\n    * Indicate if an object is up to date (stale() == false) or needs to be\n      updated sets stale() == True).  Use invalidate() to mark the object as\n      stale.  An object is also automatically stale if it has not been updated\n      in `_poll_time` seconds.\n\n    * Record access time (atime) timestamp\n\n    * Manage internal use count used by the inode cache (\"inc_use\" and\n      \"dec_use\").  An object which is in use cannot be cleared by the inode\n      cache.\n\n    * Manage the kernel reference count (\"inc_ref\" and \"dec_ref\").  An object\n      which is referenced by the kernel cannot have its inode entry deleted.\n\n    * Record cache footprint, cache priority\n\n    * Record Arvados uuid at the time the object is placed in the cache\n\n    * Clear the object contents (invalidates the object)\n\n    \"\"\"\n\n    __slots__ = (\"_stale\", \"_poll\", \"_last_update\", \"_atime\", \"_poll_time\", \"use_count\",\n                 \"ref_count\", \"cache_size\", \"cache_uuid\", \"allow_attr_cache\")\n\n    def __init__(self):\n        self._stale = True\n        self._poll = False\n        self._last_update = time.time()\n        self._atime = time.time()\n        self._poll_time = 60\n        self.use_count = 0\n        self.ref_count = 0\n        self.cache_size = 0\n        self.cache_uuid = None\n\n        # Can the kernel cache attributes?\n        self.allow_attr_cache = True\n\n    def invalidate(self):\n        \"\"\"Indicate that object contents should be refreshed from source.\"\"\"\n        self._stale = True\n\n    def kernel_invalidate(self):\n        \"\"\"Indicate that an invalidation for this object should be sent to the kernel.\"\"\"\n        pass\n\n    # Test if the entries dict is stale.\n    def stale(self):\n        if self._stale:\n            return True\n        if self._poll:\n            return (self._last_update + self._poll_time) < self._atime\n        return False\n\n    def fresh(self):\n        self._stale = False\n        self._last_update = time.time()\n\n    def atime(self):\n        return self._atime\n\n    def persisted(self):\n        return False\n\n    def clear(self):\n        pass\n\n    def in_use(self):\n        return self.use_count > 0\n\n    def inc_use(self):\n        self.use_count += 1\n\n    def dec_use(self):\n        self.use_count -= 1\n\n    def inc_ref(self):\n        self.ref_count += 1\n        return self.ref_count\n\n    def dec_ref(self, n):\n        self.ref_count -= n\n        return self.ref_count\n\n    def has_ref(self):\n        \"\"\"Determine if there are any kernel references to this\n        object.\n        \"\"\"\n        return self.ref_count > 0\n\n    def objsize(self):\n        return 0\n\n    def uuid(self):\n        return None\n\n    def finalize(self):\n        pass\n\n    def child_event(self, ev):\n        pass\n\n    def time_to_next_poll(self):\n        if self._poll:\n            t = (self._last_update + self._poll_time) - self._atime\n            if t < 0:\n                return 0\n            else:\n                return t\n        else:\n            return self._poll_time\n\n    def update(self):\n        pass\n\n    def checkupdate(self):\n        if self.stale():\n            self.update()\n"
  },
  {
    "path": "services/fuse/arvados_fuse/fusedir.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport apiclient\nimport arvados\nimport errno\nimport functools\nimport llfuse\nimport logging\nimport re\nimport sys\nimport threading\nimport time\nfrom apiclient import errors as apiclient_errors\n\nfrom .fusefile import StringFile, ObjectFile, FuncToJSONFile, FuseArvadosFile\nfrom .fresh import FreshBase, convertTime, use_counter, check_update\n\nimport arvados.collection\nfrom arvados.util import portable_data_hash_pattern, uuid_pattern, collection_uuid_pattern, group_uuid_pattern, user_uuid_pattern, link_uuid_pattern\n\n_logger = logging.getLogger('arvados.arvados_fuse')\n\n\n# Match any character which FUSE or Linux cannot accommodate as part\n# of a filename. (If present in a collection filename, they will\n# appear as underscores in the fuse mount.)\n_disallowed_filename_characters = re.compile(r'[\\x00/]')\n\n\nclass Directory(FreshBase):\n    \"\"\"Generic directory object, backed by a dict.\n\n    Consists of a set of entries with the key representing the filename\n    and the value referencing a File or Directory object.\n    \"\"\"\n\n    __slots__ = (\"inode\", \"parent_inode\", \"inodes\", \"_entries\", \"_mtime\", \"_enable_write\", \"_filters\")\n\n    def __init__(self, parent_inode, inodes, enable_write, filters):\n        \"\"\"parent_inode is the integer inode number\"\"\"\n\n        super(Directory, self).__init__()\n\n        self.inode = None\n        if not isinstance(parent_inode, int):\n            raise Exception(\"parent_inode should be an int\")\n        self.parent_inode = parent_inode\n        self.inodes = inodes\n        self._entries = {}\n        self._mtime = time.time()\n        self._enable_write = enable_write\n        self._filters = filters or []\n\n    def _filters_for(self, subtype, *, qualified):\n        for f in self._filters:\n            f_type, _, f_name = f[0].partition('.')\n            if not f_name:\n                yield f\n            elif f_type != subtype:\n                pass\n            elif qualified:\n                yield f\n            else:\n                yield [f_name, *f[1:]]\n\n    def unsanitize_filename(self, incoming):\n        \"\"\"Replace ForwardSlashNameSubstitution value with /\"\"\"\n        fsns = self.inodes.forward_slash_subst()\n        if isinstance(fsns, str):\n            return incoming.replace(fsns, '/')\n        else:\n            return incoming\n\n    def sanitize_filename(self, dirty):\n        \"\"\"Replace disallowed filename characters according to\n        ForwardSlashNameSubstitution in self.api_config.\"\"\"\n        # '.' and '..' are not reachable if API server is newer than #6277\n        if dirty is None:\n            return None\n        elif dirty == '':\n            return '_'\n        elif dirty == '.':\n            return '_'\n        elif dirty == '..':\n            return '__'\n        else:\n            fsns = self.inodes.forward_slash_subst()\n            if isinstance(fsns, str):\n                dirty = dirty.replace('/', fsns)\n            return _disallowed_filename_characters.sub('_', dirty)\n\n\n    #  Overridden by subclasses to implement logic to update the\n    #  entries dict when the directory is stale\n    @use_counter\n    def update(self):\n        pass\n\n    # Only used when computing the size of the disk footprint of the directory\n    # (stub)\n    def size(self):\n        return 0\n\n    def persisted(self):\n        return False\n\n    def checkupdate(self):\n        if self.stale():\n            try:\n                self.update()\n            except apiclient.errors.HttpError as e:\n                _logger.warn(e)\n\n    @use_counter\n    @check_update\n    def __getitem__(self, item):\n        return self._entries[item]\n\n    @use_counter\n    @check_update\n    def items(self):\n        return list(self._entries.items())\n\n    @use_counter\n    @check_update\n    def __contains__(self, k):\n        return k in self._entries\n\n    @use_counter\n    @check_update\n    def __len__(self):\n        return len(self._entries)\n\n    def fresh(self):\n        self.inodes.touch(self)\n        super(Directory, self).fresh()\n\n    def objsize(self):\n        # Rough estimate of memory footprint based on using pympler\n        return len(self._entries) * 1024\n\n    def merge(self, items, fn, same, new_entry):\n        \"\"\"Helper method for updating the contents of the directory.\n\n        Takes a list describing the new contents of the directory, reuse\n        entries that are the same in both the old and new lists, create new\n        entries, and delete old entries missing from the new list.\n\n        Arguments:\n        * items: Iterable --- New directory contents\n\n        * fn: Callable --- Takes an entry in 'items' and return the desired file or\n        directory name, or None if this entry should be skipped\n\n        * same: Callable --- Compare an existing entry (a File or Directory\n        object) with an entry in the items list to determine whether to keep\n        the existing entry.\n\n        * new_entry: Callable --- Create a new directory entry (File or Directory\n        object) from an entry in the items list.\n\n        \"\"\"\n\n        oldentries = self._entries\n        self._entries = {}\n        changed = False\n        for i in items:\n            name = self.sanitize_filename(fn(i))\n            if not name:\n                continue\n            if name in oldentries:\n                ent = oldentries[name]\n                if same(ent, i) and ent.parent_inode == self.inode:\n                    # move existing directory entry over\n                    self._entries[name] = ent\n                    del oldentries[name]\n                    self.inodes.inode_cache.touch(ent)\n\n        for i in items:\n            name = self.sanitize_filename(fn(i))\n            if not name:\n                continue\n            if name not in self._entries:\n                # create new directory entry\n                ent = new_entry(i)\n                if ent is not None:\n                    self._entries[name] = self.inodes.add_entry(ent)\n                    # need to invalidate this just in case there was a\n                    # previous entry that couldn't be moved over or a\n                    # lookup that returned file not found and cached\n                    # a negative result\n                    self.inodes.invalidate_entry(self, name)\n                    changed = True\n                _logger.debug(\"Added entry '%s' as inode %i to parent inode %i\", name, ent.inode, self.inode)\n\n        # delete any other directory entries that were not in found in 'items'\n        for name, ent in oldentries.items():\n            _logger.debug(\"Detaching entry '%s' from parent_inode %i\", name, self.inode)\n            self.inodes.invalidate_entry(self, name)\n            self.inodes.del_entry(ent)\n            changed = True\n\n        if changed:\n            self._mtime = time.time()\n            self.inodes.inode_cache.update_cache_size(self)\n\n        self.fresh()\n\n    def in_use(self):\n        if super(Directory, self).in_use():\n            return True\n        for v in self._entries.values():\n            if v.in_use():\n                return True\n        return False\n\n    def clear(self):\n        \"\"\"Delete all entries\"\"\"\n        if not self._entries:\n            return\n        oldentries = self._entries\n        self._entries = {}\n        self.invalidate()\n        for name, ent in oldentries.items():\n            ent.clear()\n            self.inodes.invalidate_entry(self, name)\n            self.inodes.del_entry(ent)\n        self.inodes.inode_cache.update_cache_size(self)\n\n    def kernel_invalidate(self):\n        # Invalidating the dentry on the parent implies invalidating all paths\n        # below it as well.\n        if self.parent_inode in self.inodes:\n            parent = self.inodes[self.parent_inode]\n        else:\n            # parent was removed already.\n            return\n\n        # Find self on the parent in order to invalidate this path.\n        # Calling the public items() method might trigger a refresh,\n        # which we definitely don't want, so read the internal dict directly.\n        for k,v in parent._entries.items():\n            if v is self:\n                self.inodes.invalidate_entry(parent, k)\n                break\n\n    def mtime(self):\n        return self._mtime\n\n    def writable(self):\n        return False\n\n    def flush(self):\n        pass\n\n    def want_event_subscribe(self):\n        raise NotImplementedError()\n\n    def create(self, name):\n        raise NotImplementedError()\n\n    def mkdir(self, name):\n        raise NotImplementedError()\n\n    def unlink(self, name):\n        raise NotImplementedError()\n\n    def rmdir(self, name):\n        raise NotImplementedError()\n\n    def rename(self, name_old, name_new, src):\n        raise NotImplementedError()\n\n\nclass CollectionDirectoryBase(Directory):\n    \"\"\"Represent an Arvados Collection as a directory.\n\n    This class is used for Subcollections, and is also the base class for\n    CollectionDirectory, which implements collection loading/saving on\n    Collection records.\n\n    Most operations act only the underlying Arvados `Collection` object.  The\n    `Collection` object signals via a notify callback to\n    `CollectionDirectoryBase.on_event` that an item was added, removed or\n    modified.  FUSE inodes and directory entries are created, deleted or\n    invalidated in response to these events.\n\n    \"\"\"\n\n    __slots__ = (\"collection\", \"collection_root\", \"collection_record_file\")\n\n    def __init__(self, parent_inode, inodes, enable_write, filters, collection, collection_root, poll_time=15):\n        super(CollectionDirectoryBase, self).__init__(parent_inode, inodes, enable_write, filters)\n        self.collection = collection\n        self.collection_root = collection_root\n        self.collection_record_file = None\n        self._poll_time = poll_time\n\n    def new_entry(self, name, item, mtime):\n        name = self.sanitize_filename(name)\n        if hasattr(item, \"fuse_entry\") and item.fuse_entry is not None:\n            if item.fuse_entry.parent_inode is not None:\n                raise Exception(\"Can only reparent unparented inode entry\")\n            if item.fuse_entry.inode is None:\n                raise Exception(\"Reparented entry must still have valid inode\")\n            item.fuse_entry.parent_inode = self.inode\n            self._entries[name] = item.fuse_entry\n        elif isinstance(item, arvados.collection.RichCollectionBase):\n            self._entries[name] = self.inodes.add_entry(CollectionDirectoryBase(\n                self.inode,\n                self.inodes,\n                self._enable_write,\n                self._filters,\n                item,\n                self.collection_root,\n                poll_time=self._poll_time\n            ))\n            self._entries[name].populate(mtime)\n        else:\n            self._entries[name] = self.inodes.add_entry(FuseArvadosFile(self.inode, item, mtime,\n                                                                        self._enable_write,\n                                                                        self._poll, self._poll_time))\n        item.fuse_entry = self._entries[name]\n\n    def on_event(self, event, collection, name, item):\n\n        # These are events from the Collection object (ADD/DEL/MOD)\n        # emitted by operations on the Collection object (like\n        # \"mkdirs\" or \"remove\"), and by \"update\", which we need to\n        # synchronize with our FUSE objects that are assigned inodes.\n        if collection != self.collection:\n            return\n\n        name = self.sanitize_filename(name)\n        if event == arvados.collection.ADD:\n            self.new_entry(name, item, self.mtime())\n        elif event == arvados.collection.DEL:\n            ent = self._entries.pop(name)\n            self.inodes.invalidate_entry(self, name)\n            self.inodes.del_entry(ent)\n        elif event == arvados.collection.MOD:\n            # MOD events have (modified_from, newitem)\n            _, newitem = item\n            entry = getattr(newitem, \"fuse_entry\", None) or self._entries.get(name)\n            if entry is not None:\n                entry.invalidate()\n                self.inodes.invalidate_inode(entry)\n\n        # TOK and WRITE events just invalidate the\n        # collection record file.\n        if self.collection_record_file is not None:\n            self.collection_record_file.invalidate()\n            self.inodes.invalidate_inode(self.collection_record_file)\n\n    def populate(self, mtime):\n        self._mtime = mtime\n        with self.collection.lock:\n            self.collection.subscribe(self.on_event)\n            for entry, item in self.collection.items():\n                self.new_entry(entry, item, self.mtime())\n\n    def writable(self):\n        return self._enable_write and self.collection.writable()\n\n    @use_counter\n    def flush(self):\n        self.collection_root.flush()\n\n    @use_counter\n    @check_update\n    def create(self, name):\n        if not self.writable():\n            raise llfuse.FUSEError(errno.EROFS)\n        with llfuse.lock_released:\n            self.collection.open(name, \"w\").close()\n\n    @use_counter\n    @check_update\n    def mkdir(self, name):\n        if not self.writable():\n            raise llfuse.FUSEError(errno.EROFS)\n        with llfuse.lock_released:\n            self.collection.mkdirs(name)\n\n    @use_counter\n    @check_update\n    def unlink(self, name):\n        if not self.writable():\n            raise llfuse.FUSEError(errno.EROFS)\n        with llfuse.lock_released:\n            self.collection.remove(name)\n        self.flush()\n\n    @use_counter\n    @check_update\n    def rmdir(self, name):\n        if not self.writable():\n            raise llfuse.FUSEError(errno.EROFS)\n        with llfuse.lock_released:\n            self.collection.remove(name)\n        self.flush()\n\n    @use_counter\n    @check_update\n    def rename(self, name_old, name_new, src):\n        if not self.writable():\n            raise llfuse.FUSEError(errno.EROFS)\n\n        if not isinstance(src, CollectionDirectoryBase):\n            raise llfuse.FUSEError(errno.EPERM)\n\n        if name_new in self:\n            ent = src[name_old]\n            tgt = self[name_new]\n            if isinstance(ent, FuseArvadosFile) and isinstance(tgt, FuseArvadosFile):\n                pass\n            elif isinstance(ent, CollectionDirectoryBase) and isinstance(tgt, CollectionDirectoryBase):\n                if len(tgt) > 0:\n                    raise llfuse.FUSEError(errno.ENOTEMPTY)\n            elif isinstance(ent, CollectionDirectoryBase) and isinstance(tgt, FuseArvadosFile):\n                raise llfuse.FUSEError(errno.ENOTDIR)\n            elif isinstance(ent, FuseArvadosFile) and isinstance(tgt, CollectionDirectoryBase):\n                raise llfuse.FUSEError(errno.EISDIR)\n\n        with llfuse.lock_released:\n            self.collection.rename(name_old, name_new, source_collection=src.collection, overwrite=True)\n        self.flush()\n        src.flush()\n\n    def clear(self):\n        super(CollectionDirectoryBase, self).clear()\n        if self.collection is not None:\n            self.collection.unsubscribe()\n        self.collection = None\n\n    def objsize(self):\n        # objsize for the whole collection is represented at the root,\n        # don't double-count it\n        return 0\n\nclass CollectionDirectory(CollectionDirectoryBase):\n    \"\"\"Represents the root of a directory tree representing a collection.\"\"\"\n\n    __slots__ = (\"api\", \"num_retries\", \"collection_locator\",\n                 \"_manifest_size\", \"_writable\", \"_updating_lock\")\n\n    def __init__(self, parent_inode, inodes, api, num_retries, enable_write,\n                 filters=None, collection_record=None,\n                 poll_time=15):\n        super(CollectionDirectory, self).__init__(parent_inode, inodes, enable_write, filters, None, self)\n        self.api = api\n        self.num_retries = num_retries\n        self._poll = True\n\n        if isinstance(collection_record, dict):\n            self.collection_locator = collection_record['uuid']\n            self._mtime = convertTime(collection_record.get('modified_at'))\n        else:\n            self.collection_locator = collection_record\n            self._mtime = 0\n\n        is_uuid = (self.collection_locator is not None) and (uuid_pattern.match(self.collection_locator) is not None)\n\n        if is_uuid:\n            # It is a uuid, it may be updated upstream, so recheck it periodically.\n            self._poll_time = poll_time\n        else:\n            # It is not a uuid.  For immutable collections, collection\n            # only needs to be refreshed if it is very long lived\n            # (long enough that there's a risk of the blob signatures\n            # expiring).\n            try:\n                self._poll_time = (api._rootDesc.get('blobSignatureTtl', 60*60*2) // 2)\n            except:\n                _logger.debug(\"Error getting blobSignatureTtl from discovery document: %s\", sys.exc_info()[0])\n                self._poll_time = 60*60\n\n        self._writable = is_uuid and enable_write\n        self._manifest_size = 0\n        self._updating_lock = threading.Lock()\n\n    def same(self, i):\n        return i['uuid'] == self.collection_locator or i['portable_data_hash'] == self.collection_locator\n\n    def writable(self):\n        return self._enable_write and (self.collection.writable() if self.collection is not None else self._writable)\n\n    @use_counter\n    def flush(self):\n        with llfuse.lock_released:\n            with self._updating_lock:\n                if self.collection.committed():\n                    self.collection.update()\n                else:\n                    self.collection.save()\n                self.new_collection_record(self.collection.api_response())\n\n    def want_event_subscribe(self):\n        return (uuid_pattern.match(self.collection_locator) is not None)\n\n    def new_collection(self, new_collection_record, coll_reader):\n        if self.inode:\n            self.clear()\n        self.collection = coll_reader\n        self.new_collection_record(new_collection_record)\n        self.populate(self.mtime())\n\n    def new_collection_record(self, new_collection_record):\n        if not new_collection_record:\n            raise Exception(\"invalid new_collection_record\")\n        self._mtime = convertTime(new_collection_record.get('modified_at'))\n        self._manifest_size = len(new_collection_record[\"manifest_text\"])\n        self.collection_locator = new_collection_record[\"uuid\"]\n        if self.collection_record_file is not None:\n            self.collection_record_file.invalidate()\n            self.inodes.invalidate_inode(self.collection_record_file)\n            _logger.debug(\"parent_inode %s invalidated collection record file inode %s\", self.inode,\n                          self.collection_record_file.inode)\n        self.inodes.update_uuid(self)\n        self.inodes.inode_cache.update_cache_size(self)\n        self.fresh()\n\n    def uuid(self):\n        return self.collection_locator\n\n    @use_counter\n    def update(self):\n        try:\n            if self.collection_locator is None:\n                # No collection locator to retrieve from\n                self.fresh()\n                return True\n\n            new_collection_record = None\n            try:\n                with llfuse.lock_released:\n                    self._updating_lock.acquire()\n                    if not self.stale():\n                        return True\n\n                    _logger.debug(\"Updating collection %s inode %s\", self.collection_locator, self.inode)\n                    coll_reader = None\n                    if self.collection is not None:\n                        # Already have a collection object\n                        self.collection.update()\n                        new_collection_record = self.collection.api_response()\n                    else:\n                        # Create a new collection object\n                        if uuid_pattern.match(self.collection_locator):\n                            coll_reader = arvados.collection.Collection(\n                                self.collection_locator, self.api, self.api.keep,\n                                num_retries=self.num_retries)\n                        else:\n                            coll_reader = arvados.collection.CollectionReader(\n                                self.collection_locator, self.api, self.api.keep,\n                                num_retries=self.num_retries)\n                        new_collection_record = coll_reader.api_response() or {}\n                        # If the Collection only exists in Keep, there will be no API\n                        # response.  Fill in the fields we need.\n                        if 'uuid' not in new_collection_record:\n                            new_collection_record['uuid'] = self.collection_locator\n                        if \"portable_data_hash\" not in new_collection_record:\n                            new_collection_record[\"portable_data_hash\"] = new_collection_record[\"uuid\"]\n                        if 'manifest_text' not in new_collection_record:\n                            new_collection_record['manifest_text'] = coll_reader.manifest_text()\n                        if 'storage_classes_desired' not in new_collection_record:\n                            new_collection_record['storage_classes_desired'] = coll_reader.storage_classes_desired()\n\n                # end with llfuse.lock_released, re-acquire lock\n\n                if new_collection_record is not None:\n                    if coll_reader is not None:\n                        self.new_collection(new_collection_record, coll_reader)\n                    else:\n                        self.new_collection_record(new_collection_record)\n\n                return True\n            finally:\n                self._updating_lock.release()\n        except arvados.errors.NotFoundError as e:\n            _logger.error(\"Error fetching collection '%s': %s\", self.collection_locator, e)\n        except arvados.errors.ArgumentError as detail:\n            _logger.warning(\"arv-mount %s: error %s\", self.collection_locator, detail)\n            if new_collection_record is not None and \"manifest_text\" in new_collection_record:\n                _logger.warning(\"arv-mount manifest_text is: %s\", new_collection_record[\"manifest_text\"])\n        except Exception:\n            _logger.exception(\"arv-mount %s: error\", self.collection_locator)\n            if new_collection_record is not None and \"manifest_text\" in new_collection_record:\n                _logger.error(\"arv-mount manifest_text is: %s\", new_collection_record[\"manifest_text\"])\n        self.invalidate()\n        return False\n\n    @use_counter\n    @check_update\n    def collection_record(self):\n        self.flush()\n        return self.collection.api_response()\n\n    @use_counter\n    @check_update\n    def __getitem__(self, item):\n        if item == '.arvados#collection':\n            if self.collection_record_file is None:\n                self.collection_record_file = FuncToJSONFile(\n                    self.inode, self.collection_record)\n                self.inodes.add_entry(self.collection_record_file)\n            self.invalidate()  # use lookup as a signal to force update\n            return self.collection_record_file\n        else:\n            return super(CollectionDirectory, self).__getitem__(item)\n\n    def __contains__(self, k):\n        if k == '.arvados#collection':\n            return True\n        else:\n            return super(CollectionDirectory, self).__contains__(k)\n\n    def invalidate(self):\n        if self.collection_record_file is not None:\n            self.collection_record_file.invalidate()\n            self.inodes.invalidate_inode(self.collection_record_file)\n        super(CollectionDirectory, self).invalidate()\n\n    def persisted(self):\n        return (self.collection_locator is not None)\n\n    def objsize(self):\n        # This is a rough guess of the amount of overhead involved for\n        # a collection; the assumptions are that that each file\n        # averages 128 bytes in the manifest, but consume 1024 bytes\n        # of Python data structures, so 1024/128=8 means we estimate\n        # the RAM footprint at 8 times the size of bare manifest text.\n        return self._manifest_size * 8\n\n    def finalize(self):\n        if self.collection is None:\n            return\n\n        if self.writable():\n            try:\n                self.collection.save()\n            except Exception as e:\n                _logger.exception(\"Failed to save collection %s\", self.collection_locator)\n        self.collection.stop_threads()\n\n    def clear(self):\n        if self.collection is not None:\n            self.collection.stop_threads()\n        self._manifest_size = 0\n        super(CollectionDirectory, self).clear()\n        if self.collection_record_file is not None:\n            self.inodes.del_entry(self.collection_record_file)\n        self.collection_record_file = None\n\n\nclass TmpCollectionDirectory(CollectionDirectoryBase):\n    \"\"\"A directory backed by an Arvados collection that never gets saved.\n\n    This supports using Keep as scratch space. A userspace program can\n    read the .arvados#collection file to get a current manifest in\n    order to save a snapshot of the scratch data or use it as a crunch\n    job output.\n    \"\"\"\n\n    class UnsaveableCollection(arvados.collection.Collection):\n        def save(self):\n            pass\n        def save_new(self):\n            pass\n\n    def __init__(self, parent_inode, inodes, api_client, num_retries, enable_write, filters=None, storage_classes=None):\n        collection = self.UnsaveableCollection(\n            api_client=api_client,\n            keep_client=api_client.keep,\n            num_retries=num_retries,\n            storage_classes_desired=storage_classes)\n        # This is always enable_write=True because it never tries to\n        # save to the backend\n        super(TmpCollectionDirectory, self).__init__(\n            parent_inode, inodes, True, filters, collection, self)\n        self._poll = False\n        self.populate(self.mtime())\n\n    def collection_record(self):\n        with llfuse.lock_released:\n            return {\n                \"uuid\": None,\n                \"manifest_text\": self.collection.manifest_text(),\n                \"portable_data_hash\": self.collection.portable_data_hash(),\n                \"storage_classes_desired\": self.collection.storage_classes_desired(),\n            }\n\n    def __contains__(self, k):\n        return (k == '.arvados#collection' or\n                super(TmpCollectionDirectory, self).__contains__(k))\n\n    @use_counter\n    def __getitem__(self, item):\n        if item == '.arvados#collection':\n            if self.collection_record_file is None:\n                self.collection_record_file = FuncToJSONFile(\n                    self.inode, self.collection_record)\n                self.inodes.add_entry(self.collection_record_file)\n            return self.collection_record_file\n        return super(TmpCollectionDirectory, self).__getitem__(item)\n\n    def persisted(self):\n        return False\n\n    def writable(self):\n        return True\n\n    def flush(self):\n        pass\n\n    def want_event_subscribe(self):\n        return False\n\n    def finalize(self):\n        self.collection.stop_threads()\n\n    def invalidate(self):\n        if self.collection_record_file:\n            self.collection_record_file.invalidate()\n        super(TmpCollectionDirectory, self).invalidate()\n\n\nclass MagicDirectory(Directory):\n    \"\"\"A special directory that logically contains the set of all extant keep locators.\n\n    When a file is referenced by lookup(), it is tested to see if it is a valid\n    keep locator to a manifest, and if so, loads the manifest contents as a\n    subdirectory of this directory with the locator as the directory name.\n    Since querying a list of all extant keep locators is impractical, only\n    collections that have already been accessed are visible to readdir().\n\n    \"\"\"\n\n    README_TEXT = \"\"\"\nThis directory provides access to Arvados collections as subdirectories listed\nby uuid (in the form 'zzzzz-4zz18-1234567890abcde') or portable data hash (in\nthe form '1234567890abcdef0123456789abcdef+123'), and Arvados projects by uuid\n(in the form 'zzzzz-j7d0g-1234567890abcde').\n\nNote that this directory will appear empty until you attempt to access a\nspecific collection or project subdirectory (such as trying to 'cd' into it),\nat which point the collection or project will actually be looked up on the server\nand the directory will appear if it exists.\n\n\"\"\".lstrip()\n\n    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,\n                 pdh_only=False, storage_classes=None, poll_time=15):\n        super(MagicDirectory, self).__init__(parent_inode, inodes, enable_write, filters)\n        self.api = api\n        self.num_retries = num_retries\n        self.pdh_only = pdh_only\n        self.storage_classes = storage_classes\n        self._poll = False\n        self._poll_time = poll_time\n\n    def __setattr__(self, name, value):\n        super(MagicDirectory, self).__setattr__(name, value)\n        # When we're assigned an inode, add a README.\n        if ((name == 'inode') and (self.inode is not None) and\n              (not self._entries)):\n            self._entries['README'] = self.inodes.add_entry(\n                StringFile(self.inode, self.README_TEXT, time.time()))\n            # If we're the root directory, add an identical by_id subdirectory.\n            if self.inode == llfuse.ROOT_INODE:\n                self._entries['by_id'] = self.inodes.add_entry(MagicDirectory(\n                    self.inode,\n                    self.inodes,\n                    self.api,\n                    self.num_retries,\n                    self._enable_write,\n                    self._filters,\n                    pdh_only=self.pdh_only,\n                    storage_classes=self.storage_classes,\n                    poll_time=self._poll_time\n                ))\n\n    def __contains__(self, k):\n        if k in self._entries:\n            return True\n\n        if not portable_data_hash_pattern.match(k) and (self.pdh_only or not uuid_pattern.match(k)):\n            return False\n\n        try:\n            e = None\n\n            if group_uuid_pattern.match(k):\n                project = self.api.groups().list(\n                    filters=[\n                        ['group_class', 'in', ['project','filter']],\n                        [\"uuid\", \"=\", k],\n                        *self._filters_for('groups', qualified=False),\n                    ],\n                ).execute(num_retries=self.num_retries)\n                if project[u'items_available'] == 0:\n                    return False\n                e = self.inodes.add_entry(ProjectDirectory(\n                    self.inode,\n                    self.inodes,\n                    self.api,\n                    self.num_retries,\n                    self._enable_write,\n                    self._filters,\n                    project[u'items'][0],\n                    storage_classes=self.storage_classes,\n                    poll_time=self._poll_time\n                ))\n            else:\n                e = self.inodes.add_entry(CollectionDirectory(\n                    self.inode,\n                    self.inodes,\n                    self.api,\n                    self.num_retries,\n                    self._enable_write,\n                    self._filters,\n                    k,\n                    poll_time=self._poll_time\n                ))\n\n            if e.update():\n                if k not in self._entries:\n                    self._entries[k] = e\n                else:\n                    self.inodes.del_entry(e)\n                return True\n            else:\n                self.inodes.invalidate_entry(self, k)\n                self.inodes.del_entry(e)\n                return False\n        except Exception as ex:\n            _logger.exception(\"arv-mount lookup '%s':\", k)\n            if e is not None:\n                self.inodes.del_entry(e)\n            return False\n\n    def __getitem__(self, item):\n        if item in self:\n            return self._entries[item]\n        else:\n            raise KeyError(\"No collection with id \" + item)\n\n    def clear(self):\n        pass\n\n    def want_event_subscribe(self):\n        return not self.pdh_only\n\n\nclass TagsDirectory(Directory):\n    \"\"\"A special directory that contains as subdirectories all tags visible to the user.\"\"\"\n\n    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, poll_time=60):\n        super(TagsDirectory, self).__init__(parent_inode, inodes, enable_write, filters)\n        self.api = api\n        self.num_retries = num_retries\n        self._poll = True\n        self._poll_time = poll_time\n        self._extra = set()\n\n    def want_event_subscribe(self):\n        return True\n\n    @use_counter\n    def update(self):\n        with llfuse.lock_released:\n            tags = self.api.links().list(\n                filters=[\n                    ['link_class', '=', 'tag'],\n                    ['name', '!=', ''],\n                    *self._filters_for('links', qualified=False),\n                ],\n                select=['name'],\n                distinct=True,\n                limit=1000,\n            ).execute(num_retries=self.num_retries)\n        if \"items\" in tags:\n            self.merge(\n                tags['items']+[{\"name\": n} for n in self._extra],\n                lambda i: i['name'],\n                lambda a, i: a.tag == i['name'],\n                lambda i: TagDirectory(\n                    self.inode,\n                    self.inodes,\n                    self.api,\n                    self.num_retries,\n                    self._enable_write,\n                    self._filters,\n                    i['name'],\n                    poll=self._poll,\n                    poll_time=self._poll_time,\n                ),\n            )\n\n    @use_counter\n    @check_update\n    def __getitem__(self, item):\n        if super(TagsDirectory, self).__contains__(item):\n            return super(TagsDirectory, self).__getitem__(item)\n        with llfuse.lock_released:\n            tags = self.api.links().list(\n                filters=[\n                    ['link_class', '=', 'tag'],\n                    ['name', '=', item],\n                    *self._filters_for('links', qualified=False),\n                ],\n                limit=1,\n            ).execute(num_retries=self.num_retries)\n        if tags[\"items\"]:\n            self._extra.add(item)\n            self.update()\n        return super(TagsDirectory, self).__getitem__(item)\n\n    @use_counter\n    @check_update\n    def __contains__(self, k):\n        if super(TagsDirectory, self).__contains__(k):\n            return True\n        try:\n            self[k]\n            return True\n        except KeyError:\n            pass\n        return False\n\n\nclass TagDirectory(Directory):\n    \"\"\"A special directory that contains as subdirectories all collections visible\n    to the user that are tagged with a particular tag.\n    \"\"\"\n\n    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters, tag,\n                 poll=False, poll_time=60):\n        super(TagDirectory, self).__init__(parent_inode, inodes, enable_write, filters)\n        self.api = api\n        self.num_retries = num_retries\n        self.tag = tag\n        self._poll = poll\n        self._poll_time = poll_time\n\n    def want_event_subscribe(self):\n        return True\n\n    @use_counter\n    def update(self):\n        with llfuse.lock_released:\n            taggedcollections = self.api.links().list(\n                filters=[\n                    ['link_class', '=', 'tag'],\n                    ['name', '=', self.tag],\n                    ['head_uuid', 'is_a', 'arvados#collection'],\n                    *self._filters_for('links', qualified=False),\n                ],\n                select=['head_uuid'],\n            ).execute(num_retries=self.num_retries)\n        self.merge(\n            taggedcollections['items'],\n            lambda i: i['head_uuid'],\n            lambda a, i: a.collection_locator == i['head_uuid'],\n            lambda i: CollectionDirectory(\n                self.inode,\n                self.inodes,\n                self.api,\n                self.num_retries,\n                self._enable_write,\n                self._filters,\n                i['head_uuid'],\n            ),\n        )\n\n\nclass ProjectDirectory(Directory):\n    \"\"\"A special directory that contains the contents of a project.\"\"\"\n\n    __slots__ = (\"api\", \"num_retries\", \"project_object\", \"project_object_file\",\n                 \"project_uuid\", \"_updating_lock\",\n                 \"_current_user\", \"_full_listing\", \"storage_classes\", \"recursively_contained\")\n\n    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,\n                 project_object, poll_time=15, storage_classes=None):\n        super(ProjectDirectory, self).__init__(parent_inode, inodes, enable_write, filters)\n        self.api = api\n        self.num_retries = num_retries\n        self.project_object = project_object\n        self.project_object_file = None\n        self.project_uuid = project_object['uuid']\n        self._poll = True\n        self._poll_time = poll_time\n        self._updating_lock = threading.Lock()\n        self._current_user = None\n        self._full_listing = False\n        self.storage_classes = storage_classes\n        self.recursively_contained = False\n\n        # Filter groups can contain themselves, which causes tools\n        # that walk the filesystem to get stuck in an infinite loop,\n        # so suppress returning a listing in that case.\n        if self.project_object.get(\"group_class\") == \"filter\":\n            iter_parent_inode = parent_inode\n            while iter_parent_inode != llfuse.ROOT_INODE:\n                parent_dir = self.inodes[iter_parent_inode]\n                if isinstance(parent_dir, ProjectDirectory) and parent_dir.project_uuid == self.project_uuid:\n                    self.recursively_contained = True\n                    break\n                iter_parent_inode = parent_dir.parent_inode\n\n    def want_event_subscribe(self):\n        return True\n\n    def createDirectory(self, i):\n        common_args = (self.inode, self.inodes, self.api, self.num_retries, self._enable_write, self._filters)\n        if collection_uuid_pattern.match(i['uuid']):\n            return CollectionDirectory(*common_args, i, poll_time=self._poll_time)\n        elif group_uuid_pattern.match(i['uuid']):\n            return ProjectDirectory(*common_args, i, poll_time=self._poll_time,\n                                    storage_classes=self.storage_classes)\n        elif link_uuid_pattern.match(i['uuid']):\n            if i['head_kind'] == 'arvados#collection' or portable_data_hash_pattern.match(i['head_uuid']):\n                return CollectionDirectory(*common_args, i['head_uuid'], poll_time=self._poll_time)\n            else:\n                return None\n        elif uuid_pattern.match(i['uuid']):\n            return ObjectFile(self.parent_inode, i)\n        else:\n            return None\n\n    def uuid(self):\n        return self.project_uuid\n\n    def items(self):\n        self._full_listing = True\n        return super(ProjectDirectory, self).items()\n\n    def namefn(self, i):\n        if 'name' in i:\n            if i['name'] is None or len(i['name']) == 0:\n                return None\n            elif \"uuid\" in i and (collection_uuid_pattern.match(i['uuid']) or group_uuid_pattern.match(i['uuid'])):\n                # collection or subproject\n                return i['name']\n            elif link_uuid_pattern.match(i['uuid']) and i['head_kind'] == 'arvados#collection':\n                # name link\n                return i['name']\n            elif 'kind' in i and i['kind'].startswith('arvados#'):\n                # something else\n                return \"{}.{}\".format(i['name'], i['kind'][8:])\n        else:\n            return None\n\n\n    @use_counter\n    def update(self):\n        if self.project_object_file == None:\n            self.project_object_file = ObjectFile(self.inode, self.project_object)\n            self.inodes.add_entry(self.project_object_file)\n\n        if self.recursively_contained or not self._full_listing:\n            return True\n\n        def samefn(a, i):\n            if isinstance(a, CollectionDirectory) or isinstance(a, ProjectDirectory):\n                return a.uuid() == i['uuid']\n            elif isinstance(a, ObjectFile):\n                return a.uuid() == i['uuid'] and not a.stale()\n            return False\n\n        try:\n            with llfuse.lock_released:\n                self._updating_lock.acquire()\n                if not self.stale():\n                    return\n\n                if group_uuid_pattern.match(self.project_uuid):\n                    self.project_object = self.api.groups().get(\n                        uuid=self.project_uuid).execute(num_retries=self.num_retries)\n                elif user_uuid_pattern.match(self.project_uuid):\n                    self.project_object = self.api.users().get(\n                        uuid=self.project_uuid).execute(num_retries=self.num_retries)\n                # do this in 2 steps until #17424 is fixed\n                contents = list(arvados.util.keyset_list_all(\n                    self.api.groups().contents,\n                    order_key='uuid',\n                    num_retries=self.num_retries,\n                    uuid=self.project_uuid,\n                    filters=[\n                        ['uuid', 'is_a', 'arvados#group'],\n                        ['groups.group_class', 'in', ['project', 'filter']],\n                        *self._filters_for('groups', qualified=True),\n                    ],\n                ))\n                contents.extend(obj for obj in arvados.util.keyset_list_all(\n                    self.api.groups().contents,\n                    order_key='uuid',\n                    num_retries=self.num_retries,\n                    uuid=self.project_uuid,\n                    filters=[\n                        ['uuid', 'is_a', 'arvados#collection'],\n                        *self._filters_for('collections', qualified=True),\n                    ],\n                ) if obj['current_version_uuid'] == obj['uuid'])\n            # end with llfuse.lock_released, re-acquire lock\n\n            self.merge(contents,\n                       self.namefn,\n                       samefn,\n                       self.createDirectory)\n            return True\n        finally:\n            self._updating_lock.release()\n\n    def _add_entry(self, i, name):\n        ent = self.createDirectory(i)\n        self._entries[name] = self.inodes.add_entry(ent)\n        return self._entries[name]\n\n    @use_counter\n    @check_update\n    def __getitem__(self, k):\n        if k == '.arvados#project':\n            return self.project_object_file\n        elif self._full_listing or super(ProjectDirectory, self).__contains__(k):\n            return super(ProjectDirectory, self).__getitem__(k)\n        with llfuse.lock_released:\n            k2 = self.unsanitize_filename(k)\n            if k2 == k:\n                namefilter = [\"name\", \"=\", k]\n            else:\n                namefilter = [\"name\", \"in\", [k, k2]]\n            contents = self.api.groups().list(\n                filters=[\n                    [\"owner_uuid\", \"=\", self.project_uuid],\n                    [\"group_class\", \"in\", [\"project\",\"filter\"]],\n                    namefilter,\n                    *self._filters_for('groups', qualified=False),\n                ],\n                limit=2,\n            ).execute(num_retries=self.num_retries)[\"items\"]\n            if not contents:\n                contents = self.api.collections().list(\n                    filters=[\n                        [\"owner_uuid\", \"=\", self.project_uuid],\n                        namefilter,\n                        *self._filters_for('collections', qualified=False),\n                    ],\n                    limit=2,\n                ).execute(num_retries=self.num_retries)[\"items\"]\n        if contents:\n            if len(contents) > 1 and contents[1]['name'] == k:\n                # If \"foo/bar\" and \"foo[SUBST]bar\" both exist, use\n                # \"foo[SUBST]bar\".\n                contents = [contents[1]]\n            name = self.sanitize_filename(self.namefn(contents[0]))\n            if name != k:\n                raise KeyError(k)\n            return self._add_entry(contents[0], name)\n\n        # Didn't find item\n        raise KeyError(k)\n\n    def __contains__(self, k):\n        if k == '.arvados#project':\n            return True\n        try:\n            self[k]\n            return True\n        except KeyError:\n            pass\n        return False\n\n    @use_counter\n    @check_update\n    def writable(self):\n        if not self._enable_write:\n            return False\n        return self.project_object.get(\"can_write\") is True\n\n    def persisted(self):\n        return True\n\n    def clear(self):\n        super(ProjectDirectory, self).clear()\n        if self.project_object_file is not None:\n            self.inodes.del_entry(self.project_object_file)\n        self.project_object_file = None\n\n    @use_counter\n    @check_update\n    def mkdir(self, name):\n        if not self.writable():\n            raise llfuse.FUSEError(errno.EROFS)\n\n        try:\n            with llfuse.lock_released:\n                c = {\n                    \"owner_uuid\": self.project_uuid,\n                    \"name\": name,\n                    \"manifest_text\": \"\" }\n                if self.storage_classes is not None:\n                    c[\"storage_classes_desired\"] = self.storage_classes\n                try:\n                    self.api.collections().create(body=c).execute(num_retries=self.num_retries)\n                except Exception as e:\n                    raise\n            self.invalidate()\n        except apiclient_errors.Error as error:\n            _logger.error(error)\n            raise llfuse.FUSEError(errno.EEXIST)\n\n    @use_counter\n    @check_update\n    def rmdir(self, name):\n        if not self.writable():\n            raise llfuse.FUSEError(errno.EROFS)\n\n        if name not in self:\n            raise llfuse.FUSEError(errno.ENOENT)\n        if not isinstance(self[name], CollectionDirectory):\n            raise llfuse.FUSEError(errno.EPERM)\n        if len(self[name]) > 0:\n            raise llfuse.FUSEError(errno.ENOTEMPTY)\n        with llfuse.lock_released:\n            self.api.collections().delete(uuid=self[name].uuid()).execute(num_retries=self.num_retries)\n        self.invalidate()\n\n    @use_counter\n    @check_update\n    def rename(self, name_old, name_new, src):\n        if not self.writable():\n            raise llfuse.FUSEError(errno.EROFS)\n\n        if not isinstance(src, ProjectDirectory):\n            raise llfuse.FUSEError(errno.EPERM)\n\n        ent = src[name_old]\n\n        if not isinstance(ent, CollectionDirectory):\n            raise llfuse.FUSEError(errno.EPERM)\n\n        if name_new in self:\n            # POSIX semantics for replacing one directory with another is\n            # tricky (the target directory must be empty, the operation must be\n            # atomic which isn't possible with the Arvados API as of this\n            # writing) so don't support that.\n            raise llfuse.FUSEError(errno.EPERM)\n\n        self.api.collections().update(uuid=ent.uuid(),\n                                      body={\"owner_uuid\": self.uuid(),\n                                            \"name\": name_new}).execute(num_retries=self.num_retries)\n\n        # Acually move the entry from source directory to this directory.\n        del src._entries[name_old]\n        self._entries[name_new] = ent\n        self.inodes.invalidate_entry(src, name_old)\n\n    @use_counter\n    def child_event(self, ev):\n        properties = ev.get(\"properties\") or {}\n        old_attrs = properties.get(\"old_attributes\") or {}\n        new_attrs = properties.get(\"new_attributes\") or {}\n        old_attrs[\"uuid\"] = ev[\"object_uuid\"]\n        new_attrs[\"uuid\"] = ev[\"object_uuid\"]\n        old_name = self.sanitize_filename(self.namefn(old_attrs))\n        new_name = self.sanitize_filename(self.namefn(new_attrs))\n\n        # create events will have a new name, but not an old name\n        # delete events will have an old name, but not a new name\n        # update events will have an old and new name, and they may be same or different\n        # if they are the same, an unrelated field changed and there is nothing to do.\n\n        if old_attrs.get(\"owner_uuid\") != self.project_uuid:\n            # Was moved from somewhere else, so don't try to remove entry.\n            old_name = None\n        if ev.get(\"object_owner_uuid\") != self.project_uuid:\n            # Was moved to somewhere else, so don't try to add entry\n            new_name = None\n\n        if old_attrs.get(\"is_trashed\"):\n            # Was previously deleted\n            old_name = None\n        if new_attrs.get(\"is_trashed\"):\n            # Has been deleted\n            new_name = None\n\n        if new_name != old_name:\n            ent = None\n            if old_name in self._entries:\n                ent = self._entries[old_name]\n                del self._entries[old_name]\n                self.inodes.invalidate_entry(self, old_name)\n\n            if new_name:\n                if ent is not None:\n                    self._entries[new_name] = ent\n                else:\n                    self._add_entry(new_attrs, new_name)\n            elif ent is not None:\n                self.inodes.del_entry(ent)\n\n\nclass SharedDirectory(Directory):\n    \"\"\"A special directory that represents users or groups who have shared projects with me.\"\"\"\n\n    def __init__(self, parent_inode, inodes, api, num_retries, enable_write, filters,\n                 exclude, poll_time=60, storage_classes=None):\n        super(SharedDirectory, self).__init__(parent_inode, inodes, enable_write, filters)\n        self.api = api\n        self.num_retries = num_retries\n        self.current_user = api.users().current().execute(num_retries=num_retries)\n        self._poll = True\n        self._poll_time = poll_time\n        self._updating_lock = threading.Lock()\n        self.storage_classes = storage_classes\n\n    @use_counter\n    def update(self):\n        try:\n            with llfuse.lock_released:\n                self._updating_lock.acquire()\n                if not self.stale():\n                    return\n\n                contents = {}\n                roots = []\n                root_owners = set()\n                objects = {}\n\n                methods = self.api._rootDesc.get('resources')[\"groups\"]['methods']\n                if 'httpMethod' in methods.get('shared', {}):\n                    page = []\n                    while True:\n                        resp = self.api.groups().shared(\n                            filters=[\n                                ['group_class', 'in', ['project','filter']],\n                                *page,\n                                *self._filters_for('groups', qualified=False),\n                            ],\n                            order=\"uuid\",\n                            limit=10000,\n                            count=\"none\",\n                            include=\"owner_uuid\",\n                        ).execute()\n                        if not resp[\"items\"]:\n                            break\n                        page = [[\"uuid\", \">\", resp[\"items\"][len(resp[\"items\"])-1][\"uuid\"]]]\n                        for r in resp[\"items\"]:\n                            objects[r[\"uuid\"]] = r\n                            roots.append(r[\"uuid\"])\n                        for r in resp[\"included\"]:\n                            objects[r[\"uuid\"]] = r\n                            root_owners.add(r[\"uuid\"])\n                else:\n                    all_projects = list(arvados.util.keyset_list_all(\n                        self.api.groups().list,\n                        order_key=\"uuid\",\n                        num_retries=self.num_retries,\n                        filters=[\n                            ['group_class', 'in', ['project','filter']],\n                            *self._filters_for('groups', qualified=False),\n                        ],\n                        select=[\"uuid\", \"owner_uuid\"],\n                    ))\n                    for ob in all_projects:\n                        objects[ob['uuid']] = ob\n\n                    current_uuid = self.current_user['uuid']\n                    for ob in all_projects:\n                        if ob['owner_uuid'] != current_uuid and ob['owner_uuid'] not in objects:\n                            roots.append(ob['uuid'])\n                            root_owners.add(ob['owner_uuid'])\n\n                    lusers = arvados.util.keyset_list_all(\n                        self.api.users().list,\n                        order_key=\"uuid\",\n                        num_retries=self.num_retries,\n                        filters=[\n                            ['uuid', 'in', list(root_owners)],\n                            *self._filters_for('users', qualified=False),\n                        ],\n                    )\n                    lgroups = arvados.util.keyset_list_all(\n                        self.api.groups().list,\n                        order_key=\"uuid\",\n                        num_retries=self.num_retries,\n                        filters=[\n                            ['uuid', 'in', list(root_owners)+roots],\n                            *self._filters_for('groups', qualified=False),\n                        ],\n                    )\n                    for l in lusers:\n                        objects[l[\"uuid\"]] = l\n                    for l in lgroups:\n                        objects[l[\"uuid\"]] = l\n\n                for r in root_owners:\n                    if r in objects:\n                        obr = objects[r]\n                        if obr.get(\"name\"):\n                            contents[obr[\"name\"]] = obr\n                        elif \"first_name\" in obr:\n                            contents[u\"{} {}\".format(obr[\"first_name\"], obr[\"last_name\"])] = obr\n\n                for r in roots:\n                    if r in objects:\n                        obr = objects[r]\n                        if obr['owner_uuid'] not in objects:\n                            contents[obr[\"name\"]] = obr\n\n            # end with llfuse.lock_released, re-acquire lock\n\n            self.merge(\n                contents.items(),\n                lambda i: i[0],\n                lambda a, i: a.uuid() == i[1]['uuid'],\n                lambda i: ProjectDirectory(\n                    self.inode,\n                    self.inodes,\n                    self.api,\n                    self.num_retries,\n                    self._enable_write,\n                    self._filters,\n                    i[1],\n                    poll_time=self._poll_time,\n                    storage_classes=self.storage_classes,\n                ),\n            )\n        except Exception:\n            _logger.exception(\"arv-mount shared dir error\")\n        finally:\n            self._updating_lock.release()\n\n    def want_event_subscribe(self):\n        return True\n"
  },
  {
    "path": "services/fuse/arvados_fuse/fusefile.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport json\nimport llfuse\nimport logging\nimport re\nimport time\n\nfrom .fresh import FreshBase, convertTime, check_update\n\n_logger = logging.getLogger('arvados.arvados_fuse')\n\nclass File(FreshBase):\n    \"\"\"Base for file objects.\"\"\"\n\n    __slots__ = (\"inode\", \"parent_inode\", \"_mtime\")\n\n    def __init__(self, parent_inode, _mtime=0, poll=False, poll_time=0):\n        super(File, self).__init__()\n        self.inode = None\n        self.parent_inode = parent_inode\n        self._mtime = _mtime\n        self._poll = poll\n        self._poll_time = poll_time\n\n    def size(self):\n        return 0\n\n    def readfrom(self, off, size, num_retries=0):\n        return ''\n\n    def writeto(self, off, size, num_retries=0):\n        raise Exception(\"Not writable\")\n\n    def mtime(self):\n        return self._mtime\n\n    def clear(self):\n        pass\n\n    def writable(self):\n        return False\n\n    def flush(self):\n        pass\n\n\nclass FuseArvadosFile(File):\n    \"\"\"Wraps a ArvadosFile.\"\"\"\n\n    __slots__ = ('arvfile', '_enable_write')\n\n    def __init__(self, parent_inode, arvfile, _mtime, enable_write, poll, poll_time):\n        super(FuseArvadosFile, self).__init__(parent_inode, _mtime, poll=poll, poll_time=poll_time)\n        self.arvfile = arvfile\n        self._enable_write = enable_write\n\n    def size(self):\n        with llfuse.lock_released:\n            return self.arvfile.size()\n\n    def readfrom(self, off, size, num_retries=0):\n        with llfuse.lock_released:\n            return self.arvfile.readfrom(off, size, num_retries, exact=True, return_memoryview=True)\n\n    def writeto(self, off, buf, num_retries=0):\n        with llfuse.lock_released:\n            return self.arvfile.writeto(off, buf, num_retries)\n\n    def writable(self):\n        return self._enable_write and self.arvfile.writable()\n\n    def flush(self):\n        with llfuse.lock_released:\n            if self.writable():\n                self.arvfile.parent.root_collection().save()\n\n    def clear(self):\n        if self.parent_inode is None:\n            self.arvfile.fuse_entry = None\n            self.arvfile = None\n\n\nclass StringFile(File):\n    \"\"\"Wrap a simple string as a file\"\"\"\n\n    __slots__ = (\"contents\",)\n\n    def __init__(self, parent_inode, contents, _mtime):\n        super(StringFile, self).__init__(parent_inode, _mtime)\n        self.contents = contents\n\n    def size(self):\n        return len(self.contents)\n\n    def readfrom(self, off, size, num_retries=0):\n        return bytes(self.contents[off:(off+size)], encoding='utf-8')\n\n\nclass ObjectFile(StringFile):\n    \"\"\"Wrap a dict as a serialized json object.\"\"\"\n\n    __slots__ = (\"object_uuid\",)\n\n    def __init__(self, parent_inode, obj):\n        super(ObjectFile, self).__init__(parent_inode, \"\", 0)\n        self.object_uuid = obj['uuid']\n        self.update(obj)\n\n    def uuid(self):\n        return self.object_uuid\n\n    def update(self, obj=None):\n        if obj is None:\n            # TODO: retrieve the current record for self.object_uuid\n            # from the server. For now, at least don't crash when\n            # someone tells us it's a good time to update but doesn't\n            # pass us a fresh obj. See #8345\n            return\n        self._mtime = convertTime(obj['modified_at']) if 'modified_at' in obj else 0\n        self.contents = json.dumps(obj, indent=4, sort_keys=True) + \"\\n\"\n\n    def persisted(self):\n        return True\n\n\nclass FuncToJSONFile(StringFile):\n    \"\"\"File content is the return value of a given function, encoded as JSON.\n\n    The function is called at the time the file is read. The result is\n    cached until invalidate() is called.\n    \"\"\"\n\n    __slots__ = (\"func\",)\n\n    def __init__(self, parent_inode, func):\n        super(FuncToJSONFile, self).__init__(parent_inode, \"\", 0)\n        self.func = func\n\n        # invalidate_inode() is asynchronous with no callback to wait for. In\n        # order to guarantee userspace programs don't get stale data that was\n        # generated before the last invalidate(), we must disallow inode\n        # caching entirely.\n        self.allow_attr_cache = False\n\n    @check_update\n    def size(self):\n        return super(FuncToJSONFile, self).size()\n\n    @check_update\n    def readfrom(self, *args, **kwargs):\n        return super(FuncToJSONFile, self).readfrom(*args, **kwargs)\n\n    def update(self):\n        self._mtime = time.time()\n        obj = self.func()\n        self.contents = json.dumps(obj, indent=4, sort_keys=True) + \"\\n\"\n        self.fresh()\n"
  },
  {
    "path": "services/fuse/arvados_fuse/unmount.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport collections\nimport errno\nimport os\nimport subprocess\nimport sys\nimport time\n\n\nMountInfo = collections.namedtuple(\n    'MountInfo', ['is_fuse', 'major', 'minor', 'mnttype', 'path'])\n\n\ndef mountinfo():\n    mi = []\n    with open('/proc/self/mountinfo') as f:\n        for m in f.readlines():\n            mntid, pmntid, dev, root, path, extra = m.split(\" \", 5)\n            mnttype = extra.split(\" - \")[1].split(\" \", 1)[0]\n            major, minor = dev.split(\":\")\n            mi.append(MountInfo(\n                is_fuse=(mnttype == \"fuse\" or mnttype.startswith(\"fuse.\")),\n                major=major,\n                minor=minor,\n                mnttype=mnttype,\n                path=path,\n            ))\n    return mi\n\n\ndef paths_to_unmount(path, mnttype):\n    paths = []\n    for m in mountinfo():\n        if m.path == path or m.path.startswith(path+\"/\"):\n            paths.append(m.path)\n            if not (m.is_fuse and (mnttype is None or\n                                   mnttype == m.mnttype)):\n                raise Exception(\n                    \"cannot unmount {}: mount type is {}\".format(\n                        path, m.mnttype))\n    return paths\n\n\ndef safer_realpath(path, loop=True):\n    \"\"\"Similar to os.path.realpath(), but avoids calling lstat().\n\n    Leaves some symlinks unresolved.\"\"\"\n    if path == '/':\n        return path, True\n    elif not path.startswith('/'):\n        path = os.path.abspath(path)\n    while True:\n        path = path.rstrip('/')\n        dirname, basename = os.path.split(path)\n        try:\n            path, resolved = safer_realpath(os.path.join(dirname, os.readlink(path)), loop=False)\n        except OSError as e:\n            # Path is not a symlink (EINVAL), or is unreadable, or\n            # doesn't exist. If the error was EINVAL and dirname can\n            # be resolved, we will have eliminated all symlinks and it\n            # will be safe to call normpath().\n            dirname, resolved = safer_realpath(dirname, loop=loop)\n            path = os.path.join(dirname, basename)\n            if resolved and e.errno == errno.EINVAL:\n                return os.path.normpath(path), True\n            else:\n                return path, False\n        except RuntimeError:\n            if not loop:\n                # Unwind to the point where we first started following\n                # symlinks.\n                raise\n            # Resolving the whole path landed in a symlink cycle, but\n            # we might still be able to resolve dirname.\n            dirname, _ = safer_realpath(dirname, loop=loop)\n            return os.path.join(dirname, basename), False\n\n\ndef unmount(path, subtype=None, timeout=10, recursive=False):\n    \"\"\"Unmount the fuse mount at path.\n\n    Unmounting is done by writing 1 to the \"abort\" control file in\n    sysfs to kill the fuse driver process, then executing \"fusermount\n    -u -z\" to detach the mount point, and repeating these steps until\n    the mount is no longer listed in /proc/self/mountinfo.\n\n    This procedure should enable a non-root user to reliably unmount\n    their own fuse filesystem without risk of deadlock.\n\n    Returns True if unmounting was successful, False if it wasn't a\n    fuse mount at all. Raises an exception if it cannot be unmounted.\n    \"\"\"\n\n    path, _ = safer_realpath(path)\n\n    if subtype is None:\n        mnttype = None\n    elif subtype == '':\n        mnttype = 'fuse'\n    else:\n        mnttype = 'fuse.' + subtype\n\n    if recursive:\n        paths = paths_to_unmount(path, mnttype)\n        if not paths:\n            # We might not have found any mounts merely because path\n            # contains symlinks, so we should resolve them and try\n            # again. We didn't do this from the outset because\n            # realpath() can hang (see explanation below).\n            paths = paths_to_unmount(os.path.realpath(path), mnttype)\n        for path in sorted(paths, key=len, reverse=True):\n            unmount(path, timeout=timeout, recursive=False)\n        return len(paths) > 0\n\n    was_mounted = False\n    attempted = False\n    fusermount_output = b''\n    if timeout is None:\n        deadline = None\n    else:\n        deadline = time.time() + timeout\n\n    while True:\n        mounted = False\n        for m in mountinfo():\n            if m.is_fuse and (mnttype is None or mnttype == m.mnttype):\n                try:\n                    if m.path == path:\n                        was_mounted = True\n                        mounted = True\n                        break\n                except OSError:\n                    continue\n        if not was_mounted and path != os.path.realpath(path):\n            # If the specified path contains symlinks, it won't appear\n            # verbatim in mountinfo.\n            #\n            # It might seem like we should have called realpath() from\n            # the outset. But we can't: realpath() hangs (in lstat())\n            # if we call it on an unresponsive mount point, and this\n            # is an important and common scenario.\n            #\n            # By waiting until now to try realpath(), we avoid this\n            # problem in the most common cases, which are: (1) the\n            # specified path has no symlinks and is a mount point, in\n            # which case was_mounted==True and we can proceed without\n            # calling realpath(); and (2) the specified path is not a\n            # mount point (e.g., it was already unmounted by someone\n            # else, or it's a typo), and realpath() can determine that\n            # without hitting any other unresponsive mounts.\n            path = os.path.realpath(path)\n            continue\n        elif not mounted:\n            if was_mounted:\n                # This appears to avoid a race condition where we\n                # return control to the caller after running\n                # \"fusermount -u -z\" (see below), the caller (e.g.,\n                # arv-mount --replace) immediately tries to attach a\n                # new fuse mount at the same mount point, the\n                # lazy-unmount process unmounts that _new_ mount while\n                # it is being initialized, and the setup code waits\n                # forever for the new mount to be initialized.\n                time.sleep(1)\n            return was_mounted\n\n        if attempted:\n            # Report buffered stderr from previous call to fusermount,\n            # now that we know it didn't succeed.\n            sys.stderr.buffer.write(fusermount_output)\n\n            delay = 1\n            if deadline:\n                delay = min(delay, deadline - time.time())\n                if delay <= 0:\n                    raise Exception(\"timed out\")\n            time.sleep(delay)\n\n        try:\n            with open('/sys/fs/fuse/connections/{}/abort'.format(m.minor),\n                      'w') as f:\n                f.write(\"1\")\n        except IOError as e:\n            if e.errno != errno.ENOENT:\n                raise\n\n        attempted = True\n        try:\n            subprocess.check_output(\n                [\"fusermount\", \"-u\", \"-z\", path],\n                stderr=subprocess.STDOUT)\n        except subprocess.CalledProcessError as e:\n            fusermount_output = e.output\n        else:\n            fusermount_output = b''\n"
  },
  {
    "path": "services/fuse/arvados_version.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport dataclasses\nimport os\nimport re\nimport runpy\nimport subprocess\nimport typing as t\n\nfrom pathlib import Path, PurePath, PurePosixPath\n\nimport setuptools\nimport setuptools.command.build\n\nSETUP_DIR = Path(__file__).absolute().parent\nVERSION_SCRIPT_PATH = PurePath('build', 'version-at-commit.sh')\n# Built by ArvadosPythonPackage.register\nARVADOS_PYTHON_MODULES: dict[str, 'ArvadosPythonPackage'] = {}\n\n### Metadata generation\n\n@dataclasses.dataclass\nclass ArvadosPythonPackage:\n    package_name: str\n    module_name: str\n    src_path: PurePath\n    dependencies: t.Sequence['ArvadosPythonPackage']\n\n    _VERSION_SUBS = {\n        'development-': '',\n        '~dev': '.dev',\n        '~rc': 'rc',\n    }\n\n    @classmethod\n    def register(\n            cls,\n            package_name: str,\n            module_name: str,\n            src_path: PurePath | str,\n            *dependencies: str,\n    ) -> 'ArvadosPythonPackage':\n        if not isinstance(src_path, PurePath):\n            src_path = PurePosixPath(src_path)\n        deps = [ARVADOS_PYTHON_MODULES[key] for key in dependencies]\n        this_pkg = cls(package_name, module_name, src_path, deps)\n        ARVADOS_PYTHON_MODULES[package_name] = this_pkg\n        return this_pkg\n\n    def version_file_path(self):\n        return PurePath(self.module_name, '_version.py')\n\n    def _workspace_path(self, workdir):\n        try:\n            workspace = Path(os.environ['WORKSPACE'])\n            # This will raise ValueError if they're not related,\n            # in which case we don't want to use this $WORKSPACE.\n            workdir.relative_to(workspace)\n        except KeyError:\n            # $WORKSPACE isn't set. Fall back to the Git worktree toplevel.\n            try:\n                git_proc = subprocess.run(\n                    ['git', 'rev-parse', '--show-toplevel'],\n                    capture_output=True,\n                    check=True,\n                    cwd=workdir,\n                    text=True,\n                )\n                workspace = Path(git_proc.stdout.removesuffix('\\n'))\n            except (subprocess.CalledProcessError, FileNotFoundError, ValueError):\n                return None\n        except ValueError:\n            return None\n        if (workspace / VERSION_SCRIPT_PATH).exists():\n            return workspace\n        else:\n            return None\n\n    def _git_version(self, workdir):\n        workspace = self._workspace_path(workdir)\n        if workspace is None:\n            return None\n        git_log_cmd = [\n            'git', 'log', '-n1', '--format=%H', '--',\n            str(VERSION_SCRIPT_PATH), str(self.src_path),\n        ]\n        git_log_cmd.extend(str(dep.src_path) for dep in self.dependencies)\n        git_log_proc = subprocess.run(\n            git_log_cmd,\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        version_proc = subprocess.run(\n            [str(VERSION_SCRIPT_PATH), git_log_proc.stdout.rstrip('\\n')],\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        return version_proc.stdout.rstrip('\\n')\n\n    def _sdist_version(self, workdir):\n        try:\n            pkg_info = (workdir / 'PKG-INFO').open()\n        except FileNotFoundError:\n            return None\n        with pkg_info:\n            for line in pkg_info:\n                key, _, val = line.partition(': ')\n                if key == 'Version':\n                    return val.rstrip('\\n')\n        raise Exception(\"found PKG-INFO file but not Version metadata in it\")\n\n    def get_version(self, workdir=SETUP_DIR):\n        version = (\n            # If we're building out of a distribution, we should pass that\n            # version through unchanged.\n            self._sdist_version(workdir)\n            # Otherwise follow the usual Arvados versioning rules.\n            or os.environ.get('ARVADOS_BUILDING_VERSION')\n            or self._git_version(workdir)\n        )\n        if not version:\n            raise Exception(f\"no version information available for {self.package_name}\")\n        else:\n            return re.sub(\n                r'(^development-|~dev|~rc)',\n                lambda match: self._VERSION_SUBS[match.group(0)],\n                version,\n            )\n\n    def get_dependencies_version(self, workdir=SETUP_DIR, version=None):\n        if version is None:\n            version = self.get_version(workdir)\n        # A packaged development release should be installed with other\n        # development packages built from the same source, but those\n        # dependencies may have earlier \"dev\" versions (read: less recent\n        # Git commit timestamps). This compatible version dependency\n        # expresses that as closely as possible. Allowing versions\n        # compatible with .dev0 allows any development release.\n        # Regular expression borrowed partially from\n        # <https://packaging.python.org/en/latest/specifications/version-specifiers/#version-specifiers-regex>\n        dep_ver, match_count = re.subn(r'\\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)\n        return ('~=' if match_count else '==', dep_ver)\n\n    def iter_dependencies(self, workdir=SETUP_DIR, version=None, extras=None):\n        if extras is None:\n            extras = {}\n        dep_op, dep_ver = self.get_dependencies_version(workdir, version)\n        for dep in self.dependencies:\n            try:\n                dep_extras = f'[{\",\".join(extras[dep.package_name])}]'\n            except KeyError:\n                dep_extras = ''\n            yield f'{dep.package_name}{dep_extras} {dep_op} {dep_ver}'\n\n\n### Package database\n\nArvadosPythonPackage.register(\n    'arvados-python-client',\n    'arvados',\n    'sdk/python',\n),\nArvadosPythonPackage.register(\n    'crunchstat_summary',\n    'crunchstat_summary',\n    'tools/crunchstat-summary',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cluster-activity',\n    'arvados_cluster_activity',\n    'tools/cluster-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cwl-runner',\n    'arvados_cwl',\n    'sdk/cwl',\n    'arvados-python-client',\n    'crunchstat_summary',\n)\nArvadosPythonPackage.register(\n    'arvados_fuse',\n    'arvados_fuse',\n    'services/fuse',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-user-activity',\n    'arvados_user_activity',\n    'tools/user-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-tools',\n    'NO SRCDIR',\n    'tools/python-metapackage',\n    *ARVADOS_PYTHON_MODULES,\n)\nArvadosPythonPackage.register(\n    'arvados-docker-cleaner',\n    'arvados_docker',\n    'services/dockercleaner',\n)\n\n### setuptools integration\n\nclass BuildArvadosVersion(setuptools.Command):\n    \"\"\"Write _version.py for an Arvados module\"\"\"\n    def initialize_options(self):\n        self.build_lib = None\n\n    def finalize_options(self):\n        self.set_undefined_options(\"build_py\", (\"build_lib\", \"build_lib\"))\n        arv_mod = ARVADOS_PYTHON_MODULES[self.distribution.get_name()]\n        self.out_path = Path(self.build_lib, arv_mod.version_file_path())\n\n    def run(self):\n        with self.out_path.open('w') as out_file:\n            print(f'__version__ = {self.distribution.get_version()!r}', file=out_file)\n\n    def get_outputs(self):\n        return [str(self.out_path)]\n\n    def get_source_files(self):\n        return []\n\n    def get_output_mapping(self):\n        return {}\n\n\nclass ArvadosBuildCommand(setuptools.command.build.build):\n    sub_commands = [\n        *setuptools.command.build.build.sub_commands,\n        ('build_arvados_version', None),\n    ]\n\n\nCMDCLASS = {\n    'build': ArvadosBuildCommand,\n    'build_arvados_version': BuildArvadosVersion,\n}\n"
  },
  {
    "path": "services/fuse/bin/arv-mount",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados_fuse.command\n\nif __name__ == '__main__':\n    args = arvados_fuse.command.ArgumentParser().parse_args()\n    arvados_fuse.command.Mount(args).run()\n"
  },
  {
    "path": "services/fuse/fpm-info.sh",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# We depend on the fuse package because arv-mount may run the `fusermount` tool.\nfpm_depends+=(fuse)\n\ncase \"$TARGET\" in\n    centos*|rocky*)\n        # We depend on libfuse for llfuse.\n        # We should declare a libcurl dependency, but it's a little academic\n        # because rpm itself depends on it, so we can be pretty sure it's installed.\n        fpm_depends+=(fuse-libs)\n        ;;\n\n    debian12 | ubuntu2204 )\n        # See comment below for rationale.\n        fpm_depends+=(libfuse2 libcurl4)\n        ;;\n\n    debian* | ubuntu* )\n        # We depend on libfuse2 for llfuse.\n        # We depend on libcurl because the Python SDK does for its Keep client.\n        fpm_depends+=(libfuse2t64 libcurl4t64)\n        ;;\nesac\n"
  },
  {
    "path": "services/fuse/pyproject.toml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[build-system]\nrequires = [\"setuptools ~= 80.9\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\ndynamic = [\"dependencies\", \"version\"]\nname = \"arvados_fuse\"\ndescription = \"Arvados FUSE driver\"\nauthors = [\n  {name = \"Arvados\", email = \"info@arvados.org\"},\n]\nclassifiers = [\n  \"Development Status :: 5 - Production/Stable\",\n  \"Environment :: Console\",\n  \"Intended Audience :: Science/Research\",\n  \"Operating System :: POSIX\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\",\n  \"Programming Language :: Python :: 3.12\",\n  \"Programming Language :: Python :: 3.13\",\n  \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n]\nlicense = \"AGPL-3.0-only\"\nlicense-files = [\n  \"agpl-3.0.txt\",\n]\nreadme = \"README.rst\"\nrequires-python = \"~= 3.10\"\n\n[project.urls]\nHomepage = \"https://arvados.org\"\nDocumentation = \"https://doc.arvados.org\"\nRepository = \"https://github.com/arvados/arvados\"\nIssues = \"https://github.com/arvados/arvados/issues\"\nChangelog = \"https://arvados.org/releases/\"\n\n[tool.setuptools]\nscript-files = [\n  \"bin/arv-mount\",\n]\n\n[tool.setuptools.data-files]\n\"share/doc/arvados_fuse\" = [\n  \"agpl-3.0.txt\",\n  \"README.rst\",\n]\n\n[tool.setuptools.packages.find]\nexclude = [\"tests*\"]\n"
  },
  {
    "path": "services/fuse/setup.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport setuptools\nimport runpy\n\nfrom pathlib import Path\n\narvados_version = runpy.run_path(Path(__file__).with_name('arvados_version.py'))\narv_mod = arvados_version['ARVADOS_PYTHON_MODULES']['arvados_fuse']\nversion = arv_mod.get_version()\nsetuptools.setup(\n    cmdclass=arvados_version['CMDCLASS'],\n    install_requires=[\n        *arv_mod.iter_dependencies(version=version),\n        'arvados-llfuse >= 1.5.1',\n        'python-daemon',\n        'ciso8601 >= 2.0.0',\n        'setuptools',\n        \"prometheus_client\"\n    ],\n    version=version,\n)\n"
  },
  {
    "path": "services/fuse/tests/__init__.py",
    "content": ""
  },
  {
    "path": "services/fuse/tests/fstest.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport os\nimport subprocess\nimport sys\n\nfrom multiprocessing import Process\nfrom . import prof\n\ndef fn(n):\n    return \"file%i\" % n\n\ndef createfiles(d, n):\n    for j in range(1, 5):\n        print(\"Starting small file %s %i, %i\" % (d, n, j))\n        if d:\n            os.mkdir(d)\n            ld = os.listdir('.')\n            if d not in ld:\n                print(\"ERROR %s missing\" % d)\n            os.chdir(d)\n\n        for i in range(n, n+10):\n            with open(fn(i), \"w\") as f:\n                f.write(fn(i))\n\n        ld = os.listdir('.')\n        for i in range(n, n+10):\n            if fn(i) not in ld:\n                print(\"ERROR %s missing\" % fn(i))\n\n        for i in range(n, n+10):\n            with open(fn(i), \"r\") as f:\n                if f.read() != fn(i):\n                    print(\"ERROR %s doesn't have expected contents\" % fn(i))\n\n        for i in range(n, n+10):\n            os.remove(fn(i))\n\n        ld = os.listdir('.')\n        for i in range(n, n+10):\n            if fn(i) in ld:\n                print(\"ERROR %s should have been removed\" % fn(i))\n\n        if d:\n            os.chdir('..')\n            os.rmdir(d)\n            ld = os.listdir('.')\n            if d in ld:\n                print(\"ERROR %s should have been removed\" % d)\n\n\ndef createbigfile(d, n):\n    for j in range(1, 5):\n        print(\"Starting big file %s %i, %i\" % (d, n, j))\n        i = n\n        if d:\n            os.mkdir(d)\n            ld = os.listdir('.')\n            if d not in ld:\n                print(\"ERROR %s missing\" % d)\n            os.chdir(d)\n\n        with open(fn(i), \"w\") as f:\n            for j in range(0, 1000):\n                f.write((str(j) + fn(i)) * 10000)\n\n        ld = os.listdir('.')\n        if fn(i) not in ld:\n            print(\"ERROR %s missing\" % fn(i))\n\n        with open(fn(i), \"r\") as f:\n            for j in range(0, 1000):\n                expect = (str(j) + fn(i)) * 10000\n                if f.read(len(expect)) != expect:\n                    print(\"ERROR %s doesn't have expected contents\" % fn(i))\n\n        os.remove(fn(i))\n\n        ld = os.listdir('.')\n        if fn(i) in ld:\n            print(\"ERROR %s should have been removed\" % fn(i))\n\n        if d:\n            os.chdir('..')\n            os.rmdir(d)\n            ld = os.listdir('.')\n            if d in ld:\n                print(\"ERROR %s should have been removed\" % d)\n\ndef do_ls():\n    with open(\"/dev/null\", \"w\") as nul:\n        for j in range(1, 50):\n            subprocess.call([\"ls\", \"-l\"], stdout=nul, stderr=nul)\n\ndef runit(target, indir):\n    procs = []\n    for n in range(0, 20):\n        if indir:\n            p = Process(target=target, args=(\"dir%i\" % n, n*10,))\n        else:\n            p = Process(target=target, args=(\"\", n*10,))\n        p.start()\n        procs.append(p)\n\n    p = Process(target=do_ls, args=())\n    p.start()\n    procs.append(p)\n\n    for p in procs:\n        p.join()\n\n    if os.listdir('.'):\n        print(\"ERROR there are left over files in the directory\")\n\n\nif __name__ == '__main__':\n    if os.listdir('.'):\n        print(\"ERROR starting directory is not empty\")\n        sys.exit()\n\n    print(\"Single directory small files\")\n    with prof.CountTime():\n        runit(createfiles, False)\n\n    print(\"Separate directories small files\")\n    with prof.CountTime():\n        runit(createfiles, True)\n\n    print(\"Single directory large files\")\n    with prof.CountTime():\n        runit(createbigfile, False)\n\n    print(\"Separate directories large files\")\n    with prof.CountTime():\n        runit(createbigfile, True)\n"
  },
  {
    "path": "services/fuse/tests/integration_test.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados\nimport arvados_fuse\nimport arvados_fuse.command\nimport atexit\nimport functools\nimport inspect\nimport logging\nimport multiprocessing\nimport os\nimport signal\nimport sys\nimport tempfile\nimport unittest\n\nimport pytest\n\nfrom . import run_test_server\n\n@atexit.register\ndef _pool_cleanup():\n    if _pool is None:\n        return\n    _pool.close()\n    _pool.join()\n\n\ndef wrap_static_test_method(modName, clsName, funcName, args, kwargs):\n    class Test(unittest.TestCase):\n        def runTest(self, *args, **kwargs):\n            getattr(getattr(sys.modules[modName], clsName), funcName)(self, *args, **kwargs)\n    Test().runTest(*args, **kwargs)\n\n\n# To avoid Python's threading+multiprocessing=deadlock problems, we\n# use a single global pool with maxtasksperchild=None for the entire\n# test suite.\n_pool = None\ndef workerPool():\n    global _pool\n    if _pool is None:\n        _pool = multiprocessing.Pool(processes=1, maxtasksperchild=None)\n    return _pool\n\n\nclass IntegrationTest(unittest.TestCase):\n    def pool_test(self, *args, **kwargs):\n        \"\"\"Run a static method as a unit test, in a different process.\n\n        If called by method 'foobar', the static method '_foobar' of\n        the same class will be called in the other process.\n        \"\"\"\n        modName = inspect.getmodule(self).__name__\n        clsName = self.__class__.__name__\n        funcName = inspect.currentframe().f_back.f_code.co_name\n        workerPool().apply(\n            wrap_static_test_method,\n            (modName, clsName, '_'+funcName, args, kwargs))\n\n    @classmethod\n    def setUpClass(cls):\n        run_test_server.run()\n        run_test_server.run_keep(blob_signing=True, num_servers=2)\n\n    @classmethod\n    def tearDownClass(cls):\n        run_test_server.stop_keep(num_servers=2)\n\n    def setUp(self):\n        self.mnt = tempfile.mkdtemp()\n        run_test_server.authorize_with('active')\n\n    def tearDown(self):\n        os.rmdir(self.mnt)\n        run_test_server.reset()\n\n    @staticmethod\n    def mount(argv):\n        \"\"\"Decorator. Sets up a FUSE mount at self.mnt with the given args.\"\"\"\n        def decorator(func):\n            @functools.wraps(func)\n            def wrapper(self, *args, **kwargs):\n                self.mount = None\n                try:\n                    with arvados_fuse.command.Mount(\n                            arvados_fuse.command.ArgumentParser().parse_args(\n                                argv + ['--foreground',\n                                        '--unmount-timeout=60',\n                                        self.mnt])) as self.mount:\n                        return func(self, *args, **kwargs)\n                finally:\n                    if self.mount and self.mount.llfuse_thread.is_alive():\n                        # pytest uses exit status 2 when test collection failed.\n                        # A UnitTest failing in setup/teardown counts as a\n                        # collection failure, so pytest will exit with status 2\n                        # no matter what status you specify here. run-tests.sh\n                        # looks for this status, so specify 2 just to keep\n                        # everything as consistent as possible.\n                        # TODO: If we refactor these tests so they're not built\n                        # on unittest, consider using a dedicated, non-pytest\n                        # exit code like TEMPFAIL.\n                        pytest.exit(\"llfuse thread outlived test - aborting test suite to avoid deadlock\", 2)\n            return wrapper\n        return decorator\n"
  },
  {
    "path": "services/fuse/tests/mount_test_base.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados\nimport arvados.keep\nimport arvados_fuse as fuse\nimport arvados.safeapi\nimport llfuse\nimport logging\nimport multiprocessing\nimport os\nimport shutil\nimport signal\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\nimport unittest\n\nimport pytest\n\nfrom . import run_test_server\nfrom .integration_test import workerPool\n\nlogger = logging.getLogger('arvados.arv-mount')\n\nclass MountTestBase(unittest.TestCase):\n    disk_cache = False\n\n    @classmethod\n    def setUpClass(cls):\n        if cls.disk_cache:\n            cls._disk_cache_dir = tempfile.mkdtemp(prefix='MountTest-')\n        else:\n            cls._disk_cache_dir = None\n        cls._keep_block_cache = arvados.keep.KeepBlockCache(\n            disk_cache=cls.disk_cache,\n            disk_cache_dir=cls._disk_cache_dir,\n        )\n\n    def setUp(self, api=None, local_store=True):\n        # The underlying C implementation of open() makes a fstat() syscall\n        # with the GIL still held.  When the GETATTR message comes back to\n        # llfuse (which in these tests is in the same interpreter process) it\n        # can't acquire the GIL, so it can't service the fstat() call, so it\n        # deadlocks.  The workaround is to run some of our test code in a\n        # separate process.  Forturnately the multiprocessing module makes this\n        # relatively easy.\n\n        self.pool = workerPool()\n        if local_store:\n            self.keeptmp = tempfile.mkdtemp()\n            os.environ['KEEP_LOCAL_STORE'] = self.keeptmp\n        else:\n            self.keeptmp = None\n        self.mounttmp = tempfile.mkdtemp()\n        run_test_server.run()\n        run_test_server.authorize_with(\"admin\")\n\n        self.api = api if api else arvados.safeapi.ThreadSafeApiCache(\n            arvados.config.settings(),\n            keep_params={\"block_cache\": self._keep_block_cache},\n            version='v1',\n        )\n        self.llfuse_thread = None\n\n    @classmethod\n    def tearDownClass(cls):\n        if cls._disk_cache_dir:\n            shutil.rmtree(cls._disk_cache_dir)\n\n    # This is a copy of Mount's method.  TODO: Refactor MountTestBase\n    # to use a Mount instead of copying its code.\n    def _llfuse_main(self):\n        try:\n            llfuse.main()\n        except:\n            llfuse.close(unmount=False)\n            raise\n        llfuse.close()\n\n    def make_mount(self, root_class, fuse_options=None, **root_kwargs):\n        if fuse_options is None:\n            fuse_options = []\n        enable_write = root_kwargs.pop('enable_write', True)\n        self.operations = fuse.Operations(\n            os.getuid(),\n            os.getgid(),\n            api_client=self.api,\n            enable_write=enable_write,\n        )\n        self.operations.inodes.add_entry(root_class(\n            llfuse.ROOT_INODE,\n            self.operations.inodes,\n            self.api,\n            0,\n            enable_write,\n            root_kwargs.pop('filters', None),\n            **root_kwargs,\n        ))\n        llfuse.init(self.operations, self.mounttmp, fuse_options)\n        self.llfuse_thread = threading.Thread(None, lambda: self._llfuse_main())\n        self.llfuse_thread.daemon = True\n        self.llfuse_thread.start()\n        # wait until the driver is finished initializing\n        self.operations.initlock.wait()\n        return self.operations.inodes[llfuse.ROOT_INODE]\n\n    def tearDown(self):\n        if self.llfuse_thread:\n            if self.operations.events:\n                self.operations.events.close(timeout=10)\n            subprocess.call([\"fusermount\", \"-u\", \"-z\", self.mounttmp])\n            t0 = time.time()\n            self.llfuse_thread.join(timeout=60)\n            if self.llfuse_thread.is_alive():\n                # pytest uses exit status 2 when test collection failed.\n                # A UnitTest failing in setup/teardown counts as a\n                # collection failure, so pytest will exit with status 2\n                # no matter what status you specify here. run-tests.sh\n                # looks for this status, so specify 2 just to keep\n                # everything as consistent as possible.\n                # TODO: If we refactor these tests so they're not built\n                # on unittest, consider using a dedicated, non-pytest\n                # exit code like TEMPFAIL.\n                pytest.exit(\"llfuse thread outlived test - aborting test suite to avoid deadlock\", 2)\n            waited = time.time() - t0\n            if waited > 0.1:\n                logger.warning(\"MountTestBase.tearDown(): waited %f s for llfuse thread to end\", waited)\n\n        os.rmdir(self.mounttmp)\n        if self.keeptmp:\n            shutil.rmtree(self.keeptmp)\n            os.environ.pop('KEEP_LOCAL_STORE')\n        run_test_server.reset()\n\n    def assertDirContents(self, subdir, expect_content):\n        path = self.mounttmp\n        if subdir:\n            path = os.path.join(path, subdir)\n        self.assertEqual(sorted(expect_content), sorted(llfuse.listdir(str(path))))\n"
  },
  {
    "path": "services/fuse/tests/prof.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport time\n\nclass CountTime(object):\n    def __init__(self, tag=\"\", size=None):\n        self.tag = tag\n        self.size = size\n\n    def __enter__(self):\n        self.start = time.time()\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        sec = (time.time() - self.start)\n        th = \"\"\n        if self.size:\n            th = \"throughput %s/sec\" % (self.size // sec)\n        print(\"%s time %s micoseconds %s\" % (self.tag, sec*1000000, th))\n"
  },
  {
    "path": "services/fuse/tests/test_cache.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados\nimport arvados.collection\nimport arvados_fuse\nimport arvados_fuse.command\nimport json\nimport logging\nimport os\nimport tempfile\nimport unittest\n\nfrom .integration_test import IntegrationTest\nfrom .mount_test_base import MountTestBase\n\nclass CacheTest(IntegrationTest):\n    mnt_args = [\"--by-id\", \"--directory-cache=0\"]\n\n    @IntegrationTest.mount(argv=mnt_args)\n    def test_cache_spill(self):\n        pdh = []\n        for i in range(0, 8):\n            cw = arvados.collection.Collection()\n            f = cw.open(\"blurg%i\" % i, \"w\")\n            f.write(\"bloop%i\" % i)\n\n            cw.mkdirs(\"dir%i\" % i)\n            f = cw.open(\"dir%i/blurg\" % i, \"w\")\n            f.write(\"dirbloop%i\" % i)\n\n            cw.save_new()\n            pdh.append(cw.portable_data_hash())\n        self.pool_test(self.mnt, pdh)\n\n    @staticmethod\n    def _test_cache_spill(self, mnt, pdh):\n        for i,v in enumerate(pdh):\n            j = os.path.join(mnt, \"by_id\", v, \"blurg%i\" % i)\n            self.assertTrue(os.path.exists(j))\n            j = os.path.join(mnt, \"by_id\", v, \"dir%i/blurg\" % i)\n            self.assertTrue(os.path.exists(j))\n\n        for i,v in enumerate(pdh):\n            j = os.path.join(mnt, \"by_id\", v, \"blurg%i\" % i)\n            self.assertTrue(os.path.exists(j))\n            j = os.path.join(mnt, \"by_id\", v, \"dir%i/blurg\" % i)\n            self.assertTrue(os.path.exists(j))\n"
  },
  {
    "path": "services/fuse/tests/test_command_args.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados\nimport arvados_fuse\nimport arvados_fuse.command\nimport contextlib\nimport functools\nimport io\nimport json\nimport llfuse\nimport logging\nimport os\nimport sys\nimport tempfile\nimport unittest\nimport resource\n\nfrom unittest import mock\n\nfrom . import run_test_server\n\ndef noexit(func):\n    \"\"\"If argparse or arvados_fuse tries to exit, fail the test instead\"\"\"\n    class SystemExitCaught(Exception):\n        pass\n    @functools.wraps(func)\n    def wrapper(*args, **kwargs):\n        try:\n            return func(*args, **kwargs)\n        except SystemExit:\n            raise SystemExitCaught\n    return wrapper\n\n@contextlib.contextmanager\ndef nostderr():\n    with open(os.devnull, 'w') as dn:\n        orig, sys.stderr = sys.stderr, dn\n        try:\n            yield\n        finally:\n            sys.stderr = orig\n\n\nclass MountArgsTest(unittest.TestCase):\n    def setUp(self):\n        self.mntdir = tempfile.mkdtemp()\n        run_test_server.authorize_with('active')\n\n    def tearDown(self):\n        os.rmdir(self.mntdir)\n\n    def lookup(self, mnt, *path):\n        ent = mnt.operations.inodes[llfuse.ROOT_INODE]\n        for p in path:\n            ent = ent[p]\n        return ent\n\n    @contextlib.contextmanager\n    def stderrMatches(self, stderr):\n        orig, sys.stderr = sys.stderr, stderr\n        try:\n            yield\n        finally:\n            sys.stderr = orig\n\n    def check_ent_type(self, cls, *path):\n        ent = self.lookup(self.mnt, *path)\n        self.assertEqual(ent.__class__, cls)\n        return ent\n\n    @noexit\n    def test_default_all(self):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--refresh-time=27',\n            '--foreground', self.mntdir])\n        self.assertEqual(args.mode, None)\n        self.mnt = arvados_fuse.command.Mount(args)\n\n        e = self.check_ent_type(arvados_fuse.ProjectDirectory, 'home')\n        self.assertEqual(e.project_object['uuid'],\n                         run_test_server.fixture('users')['active']['uuid'])\n        self.assertEqual(e._poll_time, 27)\n\n        e = self.check_ent_type(arvados_fuse.MagicDirectory, 'by_id')\n        self.assertEqual(e._poll_time, 27)\n\n        e = self.check_ent_type(arvados_fuse.StringFile, 'README')\n        readme = e.readfrom(0, -1).decode()\n        self.assertRegex(readme, r'active-user@arvados\\.local')\n        self.assertRegex(readme, r'\\n$')\n\n        e = self.check_ent_type(arvados_fuse.StringFile, 'by_id', 'README')\n        txt = e.readfrom(0, -1).decode()\n        self.assertRegex(txt, r'portable data hash')\n        self.assertRegex(txt, r'\\n$')\n\n    @noexit\n    def test_by_id(self):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--by-id',\n            '--foreground', self.mntdir])\n        self.assertEqual(args.mode, 'by_id')\n        self.mnt = arvados_fuse.command.Mount(args)\n        e = self.check_ent_type(arvados_fuse.MagicDirectory)\n        self.assertEqual(e.pdh_only, False)\n        self.assertEqual(True, self.mnt.listen_for_events)\n\n    @noexit\n    def test_by_pdh(self):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--by-pdh',\n            '--foreground', self.mntdir])\n        self.assertEqual(args.mode, 'by_pdh')\n        self.mnt = arvados_fuse.command.Mount(args)\n        e = self.check_ent_type(arvados_fuse.MagicDirectory)\n        self.assertEqual(e.pdh_only, True)\n        self.assertEqual(False, self.mnt.listen_for_events)\n\n    @noexit\n    def test_by_tag(self):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--by-tag',\n            '--foreground', self.mntdir])\n        self.assertEqual(args.mode, 'by_tag')\n        self.mnt = arvados_fuse.command.Mount(args)\n        e = self.check_ent_type(arvados_fuse.TagsDirectory)\n        self.assertEqual(True, self.mnt.listen_for_events)\n\n    @noexit\n    def test_collection(self, id_type='uuid'):\n        c = run_test_server.fixture('collections')['public_text_file']\n        cid = c[id_type]\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--collection', cid,\n            '--refresh-time=27',\n            '--foreground', self.mntdir])\n        self.mnt = arvados_fuse.command.Mount(args)\n        e = self.check_ent_type(arvados_fuse.CollectionDirectory)\n        self.assertEqual(e.collection_locator, cid)\n        self.assertEqual(id_type == 'uuid', self.mnt.listen_for_events)\n        if id_type == 'uuid':\n            self.assertEqual(e._poll_time, 27)\n        else:\n            self.assertGreaterEqual(e._poll_time, 60*60)\n\n    def test_collection_pdh(self):\n        self.test_collection('portable_data_hash')\n\n    @noexit\n    def test_home(self):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--home',\n            '--foreground', self.mntdir])\n        self.assertEqual(args.mode, 'home')\n        self.mnt = arvados_fuse.command.Mount(args)\n        e = self.check_ent_type(arvados_fuse.ProjectDirectory)\n        self.assertEqual(e.project_object['uuid'],\n                         run_test_server.fixture('users')['active']['uuid'])\n        self.assertEqual(True, self.mnt.listen_for_events)\n        self.assertEqual(e._poll_time, 15)\n\n    def test_mutually_exclusive_args(self):\n        cid = run_test_server.fixture('collections')['public_text_file']['uuid']\n        gid = run_test_server.fixture('groups')['aproject']['uuid']\n        for badargs in [\n                ['--mount-tmp', 'foo', '--collection', cid],\n                ['--mount-tmp', 'foo', '--project', gid],\n                ['--collection', cid, '--project', gid],\n                ['--by-id', '--project', gid],\n                ['--mount-tmp', 'foo', '--by-id'],\n        ]:\n            with nostderr():\n                with self.assertRaises(SystemExit):\n                    args = arvados_fuse.command.ArgumentParser().parse_args(\n                        badargs + ['--foreground', self.mntdir])\n                    arvados_fuse.command.Mount(args)\n    @noexit\n    def test_project(self):\n        uuid = run_test_server.fixture('groups')['aproject']['uuid']\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--project', uuid,\n            '--foreground', self.mntdir])\n        self.mnt = arvados_fuse.command.Mount(args)\n        e = self.check_ent_type(arvados_fuse.ProjectDirectory)\n        self.assertEqual(e.project_object['uuid'], uuid)\n        self.assertEqual(e._poll_time, 15)\n\n    @noexit\n    def test_shared(self):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--shared',\n            '--foreground', self.mntdir])\n        self.assertEqual(args.mode, 'shared')\n        self.mnt = arvados_fuse.command.Mount(args)\n        e = self.check_ent_type(arvados_fuse.SharedDirectory)\n        self.assertEqual(e.current_user['uuid'],\n                         run_test_server.fixture('users')['active']['uuid'])\n        self.assertEqual(True, self.mnt.listen_for_events)\n\n    def test_version_argument(self):\n        # The argparse version action prints to stderr in Python 2 and stdout\n        # in Python 3.4 and up. Write both to the same stream so the test can pass\n        # in both cases.\n        origerr = sys.stderr\n        origout = sys.stdout\n        sys.stderr = io.StringIO()\n        sys.stdout = sys.stderr\n\n        with self.assertRaises(SystemExit):\n            args = arvados_fuse.command.ArgumentParser().parse_args(['--version'])\n        self.assertRegex(sys.stdout.getvalue(), r'[0-9]+\\.[0-9]+\\.[0-9]+')\n        sys.stderr.close()\n        sys.stderr = origerr\n        sys.stdout = origout\n\n    @noexit\n    @mock.patch('arvados.events.subscribe')\n    def test_disable_event_listening(self, mock_subscribe):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--disable-event-listening',\n            '--by-id',\n            '--foreground', self.mntdir])\n        self.mnt = arvados_fuse.command.Mount(args)\n        self.assertEqual(True, self.mnt.listen_for_events)\n        self.assertEqual(True, self.mnt.args.disable_event_listening)\n        with self.mnt:\n            pass\n        self.assertEqual(0, mock_subscribe.call_count)\n\n    @noexit\n    @mock.patch('arvados.events.subscribe')\n    def test_custom(self, mock_subscribe):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--mount-tmp', 'foo',\n            '--mount-tmp', 'bar',\n            '--mount-home', 'my_home',\n            '--foreground', self.mntdir])\n        self.assertEqual(args.mode, None)\n        self.mnt = arvados_fuse.command.Mount(args)\n        self.check_ent_type(arvados_fuse.Directory)\n        self.check_ent_type(arvados_fuse.TmpCollectionDirectory, 'foo')\n        self.check_ent_type(arvados_fuse.TmpCollectionDirectory, 'bar')\n        e = self.check_ent_type(arvados_fuse.ProjectDirectory, 'my_home')\n        self.assertEqual(e.project_object['uuid'],\n                         run_test_server.fixture('users')['active']['uuid'])\n        self.assertEqual(True, self.mnt.listen_for_events)\n        with self.mnt:\n            pass\n        self.assertEqual(1, mock_subscribe.call_count)\n\n    @noexit\n    @mock.patch('arvados.events.subscribe')\n    def test_custom_no_listen(self, mock_subscribe):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--mount-by-pdh', 'pdh',\n            '--mount-tmp', 'foo',\n            '--mount-tmp', 'bar',\n            '--foreground', self.mntdir])\n        self.mnt = arvados_fuse.command.Mount(args)\n        self.assertEqual(False, self.mnt.listen_for_events)\n        with self.mnt:\n            pass\n        self.assertEqual(0, mock_subscribe.call_count)\n\n    def test_custom_unsupported_layouts(self):\n        for name in ['.', '..', '', 'foo/bar', '/foo']:\n            with nostderr():\n                with self.assertRaises(SystemExit):\n                    args = arvados_fuse.command.ArgumentParser().parse_args([\n                        '--mount-tmp', name,\n                        '--foreground', self.mntdir])\n                    arvados_fuse.command.Mount(args)\n\n    @noexit\n    @mock.patch('resource.setrlimit')\n    @mock.patch('resource.getrlimit')\n    def test_default_file_cache(self, getrlimit, setrlimit):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--foreground', self.mntdir])\n        self.assertEqual(args.mode, None)\n        getrlimit.return_value = (1024, 1048576)\n        self.mnt = arvados_fuse.command.Mount(args)\n        setrlimit.assert_called_with(resource.RLIMIT_NOFILE, (10240, 1048576))\n\n    @noexit\n    @mock.patch('resource.setrlimit')\n    @mock.patch('resource.getrlimit')\n    def test_small_file_cache(self, getrlimit, setrlimit):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--foreground', '--file-cache=256000000', self.mntdir])\n        self.assertEqual(args.mode, None)\n        getrlimit.return_value = (1024, 1048576)\n        self.mnt = arvados_fuse.command.Mount(args)\n        setrlimit.assert_not_called()\n\n    @noexit\n    @mock.patch('resource.setrlimit')\n    @mock.patch('resource.getrlimit')\n    def test_large_file_cache(self, getrlimit, setrlimit):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--foreground', '--file-cache=256000000000', self.mntdir])\n        self.assertEqual(args.mode, None)\n        getrlimit.return_value = (1024, 1048576)\n        self.mnt = arvados_fuse.command.Mount(args)\n        setrlimit.assert_called_with(resource.RLIMIT_NOFILE, (30517, 1048576))\n\n    @noexit\n    @mock.patch('resource.setrlimit')\n    @mock.patch('resource.getrlimit')\n    def test_file_cache_hard_limit(self, getrlimit, setrlimit):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--foreground', '--file-cache=256000000000', self.mntdir])\n        self.assertEqual(args.mode, None)\n        getrlimit.return_value = (1024, 2048)\n        self.mnt = arvados_fuse.command.Mount(args)\n        setrlimit.assert_called_with(resource.RLIMIT_NOFILE, (2048, 2048))\n\nclass MountErrorTest(unittest.TestCase):\n    def setUp(self):\n        self.mntdir = tempfile.mkdtemp()\n        run_test_server.run()\n        run_test_server.authorize_with(\"active\")\n        self.logger = logging.getLogger(\"null\")\n        self.logger.setLevel(logging.CRITICAL+1)\n\n    def tearDown(self):\n        if os.path.exists(self.mntdir):\n            # If the directory was not unmounted, this will raise an exception.\n            os.rmdir(self.mntdir)\n        run_test_server.reset()\n\n    def test_no_token(self):\n        del arvados.config._settings[\"ARVADOS_API_TOKEN\"]\n        arvados.config._settings = {}\n        with self.assertRaises(SystemExit) as ex:\n            args = arvados_fuse.command.ArgumentParser().parse_args([self.mntdir])\n            arvados_fuse.command.Mount(args, logger=self.logger).run()\n        self.assertEqual(1, ex.exception.code)\n\n    def test_no_host(self):\n        del arvados.config._settings[\"ARVADOS_API_HOST\"]\n        with self.assertRaises(SystemExit) as ex:\n            args = arvados_fuse.command.ArgumentParser().parse_args([self.mntdir])\n            arvados_fuse.command.Mount(args, logger=self.logger).run()\n        self.assertEqual(1, ex.exception.code)\n\n    def test_bogus_host(self):\n        arvados.config._settings[\"ARVADOS_API_HOST\"] = \"100::\"\n        with self.assertRaises(SystemExit) as ex, mock.patch('time.sleep'):\n            args = arvados_fuse.command.ArgumentParser().parse_args([self.mntdir])\n            arvados_fuse.command.Mount(args, logger=self.logger).run()\n        self.assertEqual(1, ex.exception.code)\n\n    def test_bogus_token(self):\n        arvados.config._settings[\"ARVADOS_API_TOKEN\"] = \"zzzzzzzzzzzzz\"\n        with self.assertRaises(SystemExit) as ex:\n            args = arvados_fuse.command.ArgumentParser().parse_args([self.mntdir])\n            arvados_fuse.command.Mount(args, logger=self.logger).run()\n        self.assertEqual(1, ex.exception.code)\n\n    def test_bogus_mount_dir(self):\n        # All FUSE errors in llfuse.init() are raised as RuntimeError\n        # An easy error to trigger is to supply a nonexistent mount point,\n        # so test that one.\n        #\n        # Other possible errors that also raise RuntimeError (but are much\n        # harder to test automatically because they depend on operating\n        # system configuration):\n        #\n        # The user doesn't have permission to use FUSE\n        # The user specified --allow-other but user_allow_other is not set\n        # in /etc/fuse.conf\n        os.rmdir(self.mntdir)\n        with self.assertRaises(SystemExit) as ex:\n            args = arvados_fuse.command.ArgumentParser().parse_args([self.mntdir])\n            arvados_fuse.command.Mount(args, logger=self.logger).run()\n        self.assertEqual(1, ex.exception.code)\n\n    def test_unreadable_collection(self):\n        with self.assertRaises(SystemExit) as ex:\n            args = arvados_fuse.command.ArgumentParser().parse_args([\n                \"--collection\", \"zzzzz-4zz18-zzzzzzzzzzzzzzz\", self.mntdir])\n            arvados_fuse.command.Mount(args, logger=self.logger).run()\n        self.assertEqual(1, ex.exception.code)\n\n    def test_unreadable_project(self):\n        with self.assertRaises(SystemExit) as ex:\n            args = arvados_fuse.command.ArgumentParser().parse_args([\n                \"--project\", \"zzzzz-j7d0g-zzzzzzzzzzzzzzz\", self.mntdir])\n            arvados_fuse.command.Mount(args, logger=self.logger).run()\n        self.assertEqual(1, ex.exception.code)\n"
  },
  {
    "path": "services/fuse/tests/test_concurrency.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport abc\nimport contextlib\nimport dataclasses\nimport enum\nimport functools\nimport hashlib\nimport itertools\nimport json\nimport os\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport threading\nimport time\n\nimport typing as t\n\nfrom collections import abc as cabc\nfrom pathlib import Path, PurePath, PurePosixPath\n\nimport arvados\nimport arvados_fuse as fuse\nimport googleapiclient\nimport pytest\n\nfrom . import run_test_server\nfrom .mount_test_base import MountTestBase\n\n@pytest.fixture(scope='module', autouse=True)\ndef keepstore_servers():\n    cmd = [\n        sys.executable,\n        run_test_server.__file__,\n        'start_keep',\n         '--keep-blob-signing',\n         '--num-keep-servers=2',\n    ]\n    yield subprocess.run(cmd, check=True, stdin=subprocess.DEVNULL)\n    cmd[2] = 'stop_keep'\n    subprocess.run(cmd, check=True, stdin=subprocess.DEVNULL)\n\n\ndef _cmd2str(cmd):\n    return ' '.join(shlex.quote(s) for s in cmd)\n\n\n@dataclasses.dataclass\nclass MountProc:\n    env: cabc.Mapping[str, str]\n    cmd: t.Sequence[str]\n    mount_subdir: PurePath\n    _mount_root: Path | None = dataclasses.field(init=False, default=None)\n    _mount_proc: subprocess.Popen | None = dataclasses.field(init=False, default=None)\n\n    BIN_PATH: t.ClassVar[Path] = Path(__file__).parent.parent / 'bin/arv-mount'\n\n    @classmethod\n    def for_collection(cls, env, coll_id, *, opts=()):\n        cmd = [\n            sys.executable, str(cls.BIN_PATH),\n            '--foreground',\n            '--ram-cache',\n            '--read-write',\n            '--refresh-time=0',\n            '--collection', coll_id,\n            *opts,\n        ]\n        return cls(env, cmd, PurePath('.'))\n\n    @classmethod\n    def for_tmp(cls, env, subdir=PurePath('tmp'), *, opts=()):\n        cmd = [\n            sys.executable, str(cls.BIN_PATH),\n            '--foreground',\n            '--ram-cache',\n            '--read-write',\n            '--refresh-time=0',\n            '--mount-tmp', str(subdir),\n            *opts,\n        ]\n        return cls(env, cmd, subdir)\n\n    def __enter__(self):\n        env = {\n            key: val\n            for key, val in os.environ.items()\n            if not key.startswith('ARVADOS_API_')\n        }\n        env.update(self.env)\n        self._mount_root = Path(tempfile.mkdtemp(prefix='arv-mount-'))\n        cmd = list(self.cmd)\n        cmd.append(str(self._mount_root))\n        self._mount_proc = subprocess.Popen(cmd, env=env, stdin=subprocess.DEVNULL)\n\n        retry_time = .2\n        for retry_count in range(round(10 / retry_time)):\n            if self._mount_root.is_mount():\n                break\n            time.sleep(retry_time)\n        else:\n            # The mount didn't come up in time. Do our best to clean up, but\n            # don't assume anything is going to work.\n            subprocess.Popen(\n                ['fusermount', '-u', '-z', str(self._mount_root)],\n                stdin=subprocess.DEVNULL,\n            )\n            assert self._mount_proc.wait(timeout=10) == os.EX_OK\n            raise Exception(\"mount did not come up in time (but exited OK?)\")\n\n        return self\n\n    def __exit__(self, exc_type, exc_value, exc_tb):\n        try:\n            subprocess.run(\n                ['fusermount', '-u', '-z', str(self._mount_root)],\n                stdin=subprocess.DEVNULL,\n                check=True,\n                timeout=60,\n            )\n            self._mount_proc.wait(timeout=60)\n        except subprocess.CalledProcessError as err:\n            pytest.exit(2, f\"command `{_cmd2str(err.cmd)}` exited {err.returncode}\")\n        except subprocess.TimeoutExpired as err:\n            pytest.exit(2, f\"command `{_cmd2str(err.cmd)}` did not finish within {err.timeout} seconds\")\n        self._mount_root.rmdir()\n        self._mount_root = None\n        try:\n            assert self._mount_proc.returncode == os.EX_OK\n        finally:\n            self._mount_proc = None\n\n    @property\n    def mount_path(self):\n        return self._mount_root / self.mount_subdir\n\n\n@dataclasses.dataclass\nclass AbstractChange(metaclass=abc.ABCMeta):\n    mount_path: Path\n    arv_client: googleapiclient.discovery.Resource | None = None\n    coll_uuid: str | None = None\n    filename: PurePath = PurePath('bar')\n\n    @abc.abstractmethod\n    def change_thread(self): ...\n    @abc.abstractmethod\n    def check_mount(self): ...\n    @abc.abstractmethod\n    def check_record(self):\n        assert self.arv_client is not None\n        assert self.coll_uuid is not None\n\n    def check_all(self):\n        self.check_mount()\n        self.check_record()\n\n    def _replace_files_thread(self, dst, src, body=None):\n        request = self.arv_client.collections().update(\n            uuid=self.coll_uuid,\n            body=body or {},\n            replace_files={\n                str(PurePosixPath('/', dst)): src,\n            },\n        )\n        return threading.Thread(target=request.execute)\n\n\n@dataclasses.dataclass\nclass AddInMount(AbstractChange):\n    filename: PurePath = PurePath('FUSEbar')\n    _CONTENT: t.ClassVar[str] = b'bar'\n\n    def change_thread(self):\n        path = self.mount_path / self.filename\n        return threading.Thread(target=path.write_bytes, args=(self._CONTENT,))\n\n    def check_mount(self):\n        path = self.mount_path / self.filename\n        assert path.stat().st_size == 3\n        assert path.read_bytes() == self._CONTENT\n\n    def check_record(self):\n        super().check_record()\n        coll = arvados.collection.CollectionReader(self.coll_uuid, self.arv_client)\n        with coll.open(str(self.filename), 'rb') as coll_file:\n            assert coll_file.read() == self._CONTENT\n\n\n@dataclasses.dataclass\nclass AddInRecord(AddInMount):\n    filename: PurePath = PurePath('APIbar')\n\n    def change_thread(self):\n        return self._replace_files_thread(self.filename, 'current/bar')\n\n\n@dataclasses.dataclass\nclass DelInMount(AbstractChange):\n    def change_thread(self):\n        path = self.mount_path / self.filename\n        return threading.Thread(target=path.unlink, kwargs={'missing_ok': True})\n\n    def check_mount(self):\n        assert not (self.mount_path / self.filename).exists()\n\n    def check_record(self):\n        super().check_record()\n        coll = arvados.collection.CollectionReader(self.coll_uuid, self.arv_client)\n        assert self.filename not in coll\n\n\n@dataclasses.dataclass\nclass DelInRecord(DelInMount):\n    def change_thread(self):\n        return self._replace_files_thread(self.filename, '')\n\n\n@dataclasses.dataclass\nclass ModInMount(AbstractChange):\n    _ORIG_CONTENT: t.ClassVar[bytes] = b'bar'\n    _NEW_CONTENT: t.ClassVar[bytes] = b'FUSE'\n\n    def change_thread(self):\n        def append_bytes(path, content):\n            with path.open('ab') as f:\n                f.write(content)\n        path = self.mount_path / self.filename\n        return threading.Thread(target=append_bytes, args=(path, self._NEW_CONTENT))\n\n    def check_mount(self):\n        expected = self._ORIG_CONTENT + self._NEW_CONTENT\n        path = self.mount_path / self.filename\n        assert path.stat().st_size == len(expected)\n        assert path.read_bytes() == expected\n\n    def check_record(self):\n        super().check_record()\n        expected = self._ORIG_CONTENT + self._NEW_CONTENT\n        coll = arvados.collection.CollectionReader(self.coll_uuid, self.arv_client)\n        with coll.open(str(self.filename), 'rb') as coll_file:\n            assert coll_file.read() == expected\n\n\n@dataclasses.dataclass\nclass ModInRecord(ModInMount):\n    _NEW_CONTENT: t.ClassVar[bytes] = b'API'\n    _MANIFEST_TEXT: t.ClassVar[str | None] = None\n\n    def change_thread(self):\n        if self._MANIFEST_TEXT is None:\n            coll = arvados.collection.Collection(self.coll_uuid, self.arv_client)\n            coll.clone()\n            with coll.open(str(self.filename), 'ab') as coll_file:\n                coll_file.write(self._NEW_CONTENT)\n            type(self)._MANIFEST_TEXT = coll.manifest_text()\n        src = str(PurePosixPath('manifest_text', self.filename))\n        return self._replace_files_thread(self.filename, src, body={\n            'manifest_text': self._MANIFEST_TEXT,\n        })\n\n\ndef _config_with_token(token_name):\n    env = {\n        'ARVADOS_API_HOST': os.environ['ARVADOS_API_HOST'],\n        'ARVADOS_API_TOKEN': run_test_server.auth_token(token_name),\n    }\n    try:\n        env['ARVADOS_API_HOST_INSECURE'] = os.environ['ARVADOS_API_HOST_INSECURE']\n    except KeyError:\n        pass\n    return env\n\n\n@pytest.fixture\ndef active_env():\n    return _config_with_token('active')\n\n\n@pytest.fixture(scope='session')\ndef git_src():\n    try:\n        workspace = Path(os.environ['WORKSPACE'])\n    except (KeyError, ValueError):\n        workspace_ok = False\n    else:\n        workspace_ok = workspace.is_dir()\n    if not workspace_ok:\n        raise ValueError(\"$WORKSPACE does not refer to a directory\")\n    git_proc = subprocess.run(\n        ['git', 'rev-parse', '--git-dir'],\n        capture_output=True,\n        check=True,\n        cwd=workspace,\n        text=True,\n    )\n    git_path = Path(git_proc.stdout.removesuffix('\\n'))\n    if git_path.is_absolute():\n        return git_path\n    else:\n        return workspace / git_path\n    \n\ndef new_coll(arv_client, fixture_name='collection_owned_by_active'):\n    coll_record = run_test_server.fixture('collections')[fixture_name]\n    coll = arvados.collection.Collection(coll_record['uuid'], arv_client)\n    coll.save_new()\n    return coll\n\n\ndef run_changes(*changes):\n    threads = [c.change_thread() for c in changes]\n    for t in threads:\n        t.start()\n    errors = []\n    for t in threads:\n        try:\n            t.join(timeout=10)\n        except Exception as err:\n            errors.append(err)\n    assert not errors\n\n\n@pytest.mark.parametrize('mount_ct,record_ct', itertools.product(\n    [AddInMount, DelInMount, ModInMount],\n    [AddInRecord, DelInRecord, ModInRecord],\n))\ndef test_simultaneous_api_mount_updates(active_env, mount_ct, record_ct):\n    if issubclass(mount_ct, ModInMount):\n        pytest.skip(\n            \"TODO: mount writes usually fail with inconsistent state - \"\n            \"this should probably pass\",\n        )\n    arv_client = arvados.api.api_from_config('v1', active_env)\n    coll_uuid = new_coll(arv_client).manifest_locator()\n    with MountProc.for_collection(active_env, coll_uuid) as mount:\n        mount_change = mount_ct(mount.mount_path, arv_client, coll_uuid)\n        record_change = record_ct(mount.mount_path, arv_client, coll_uuid)\n        run_changes(mount_change, record_change)\n        # In this case, given that FUSE does not do idempotent writes, there is\n        # always a possibility that it simply overwrites the API record change.\n        # Therefore we only check that the mount change took.\n        mount_change.check_all()\n\n\n@pytest.mark.parametrize('change_type', [AddInMount, DelInMount, ModInMount])\ndef test_simultaneous_coll_mount_updates(active_env, change_type):\n    arv_client = arvados.api.api_from_config('v1', active_env)\n    coll_uuid = new_coll(arv_client).manifest_locator()\n    with MountProc.for_collection(active_env, coll_uuid) as mount:\n        add = AddInMount(mount.mount_path, arv_client, coll_uuid, change_type.__name__)\n        change = change_type(mount.mount_path, arv_client, coll_uuid)\n        run_changes(add, change)\n        add.check_all()\n        change.check_all()\n\n\n@pytest.mark.parametrize('change_type', [AddInMount, DelInMount, ModInMount])\ndef test_simultaneous_tmp_mount_updates(active_env, change_type):\n    with MountProc.for_tmp(active_env) as mount:\n        (mount.mount_path / 'bar').write_bytes(b'bar')\n        add = AddInMount(mount.mount_path, filename=change_type.__name__)\n        change = change_type(mount.mount_path)\n        run_changes(add, change)\n        add.check_mount()\n        change.check_mount()\n\n\n@pytest.mark.skip(\"TODO: this test should probably pass but never has\")\ndef test_git_clone_to_coll(active_env, git_src):\n    arv_client = arvados.api.api_from_config('v1', active_env)\n    coll = new_coll(arv_client, 'empty_collection_name_in_active_user_home_project')\n    with MountProc.for_collection(active_env, coll.manifest_locator()) as mount:\n        git_proc = subprocess.run([\n            'git', 'clone',\n            '--jobs=3',\n            '--no-hardlinks',\n            '--quiet',\n            str(git_src),\n            str(mount.mount_path),\n        ], stdin=subprocess.DEVNULL)\n    # assert outside the `with` block because if arv-mount exits nonzero,\n    # that's a more interesting failure to report.\n    assert git_proc.returncode == os.EX_OK\n\n\ndef test_git_clone_to_tmp(active_env, git_src):\n    with MountProc.for_tmp(active_env) as mount:\n        git_proc = subprocess.run([\n            'git', 'clone',\n            '--jobs=3',\n            '--no-hardlinks',\n            '--quiet',\n            str(git_src),\n            str(mount.mount_path),\n        ], stdin=subprocess.DEVNULL)\n    # assert outside the `with` block because if arv-mount exits nonzero,\n    # that's a more interesting failure to report.\n    assert git_proc.returncode == os.EX_OK\n"
  },
  {
    "path": "services/fuse/tests/test_crunchstat.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport subprocess\n\nfrom .integration_test import IntegrationTest\n\nclass CrunchstatTest(IntegrationTest):\n    def test_crunchstat(self):\n        output = subprocess.check_output(\n            ['./bin/arv-mount',\n             '--crunchstat-interval', '1',\n             self.mnt,\n             '--exec', 'echo', 'ok'])\n        self.assertEqual(b\"ok\\n\", output)\n"
  },
  {
    "path": "services/fuse/tests/test_exec.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados_fuse.command\nimport json\nimport multiprocessing\nimport os\nimport shlex\nimport tempfile\nimport unittest\n\nfrom . import run_test_server\nfrom .integration_test import workerPool\n\ndef try_exec(mnt, cmd):\n    try:\n        os.environ['KEEP_LOCAL_STORE'] = tempfile.mkdtemp()\n        arvados_fuse.command.Mount(\n            arvados_fuse.command.ArgumentParser().parse_args([\n                '--read-write',\n                '--mount-tmp=zzz',\n                '--unmount-timeout=0.1',\n                mnt,\n                '--exec'] + cmd)).run()\n    except SystemExit:\n        pass\n    else:\n        raise AssertionError('should have exited')\n\n\nclass ExecMode(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        run_test_server.run()\n        run_test_server.run_keep(blob_signing=True, num_servers=2)\n        run_test_server.authorize_with('active')\n\n    @classmethod\n    def tearDownClass(cls):\n        run_test_server.stop_keep(num_servers=2)\n\n    def setUp(self):\n        self.mnt = tempfile.mkdtemp()\n        _, self.okfile = tempfile.mkstemp()\n\n    def tearDown(self):\n        os.rmdir(self.mnt)\n        os.unlink(self.okfile)\n\n    def test_exec(self):\n        workerPool().apply(try_exec, (self.mnt, [\n            'sh', '-c', 'echo -n foo >{}; cp {} {}'.format(\n                shlex.quote(os.path.join(self.mnt, 'zzz', 'foo.txt')),\n                shlex.quote(os.path.join(self.mnt, 'zzz', '.arvados#collection')),\n                shlex.quote(os.path.join(self.okfile)),\n            )]))\n        with open(self.okfile) as f:\n            self.assertRegex(json.load(f)['manifest_text'], r' 0:3:foo.txt\\n')\n"
  },
  {
    "path": "services/fuse/tests/test_inodes.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados_fuse\nimport unittest\nimport llfuse\nimport logging\n\nfrom unittest import mock\n\nclass InodeTests(unittest.TestCase):\n\n    # The following tests call next(inodes._counter) because inode 1\n    # (the root directory) gets special treatment.\n\n    def test_inodes_basic(self):\n        cache = arvados_fuse.InodeCache(1000, 4)\n        inodes = arvados_fuse.Inodes(cache)\n        next(inodes._counter)\n\n        # Check that ent1 gets added to inodes\n        ent1 = mock.MagicMock()\n        ent1.in_use.return_value = False\n        ent1.has_ref.return_value = False\n        ent1.persisted.return_value = True\n        ent1.objsize.return_value = 500\n        inodes.add_entry(ent1)\n        self.assertIn(ent1.inode, inodes)\n        self.assertIs(inodes[ent1.inode], ent1)\n        self.assertEqual(500, cache.total())\n\n    def test_inodes_not_persisted(self):\n        cache = arvados_fuse.InodeCache(1000, 4)\n        inodes = arvados_fuse.Inodes(cache)\n        next(inodes._counter)\n\n        ent1 = mock.MagicMock()\n        ent1.in_use.return_value = False\n        ent1.has_ref.return_value = False\n        ent1.persisted.return_value = True\n        ent1.objsize.return_value = 500\n        inodes.add_entry(ent1)\n\n        # ent2 is not persisted, so it doesn't\n        # affect the cache total\n        ent2 = mock.MagicMock()\n        ent2.in_use.return_value = False\n        ent2.has_ref.return_value = False\n        ent2.persisted.return_value = False\n        ent2.objsize.return_value = 600\n        inodes.add_entry(ent2)\n        self.assertEqual(500, cache.total())\n\n    def test_inode_cleared(self):\n        cache = arvados_fuse.InodeCache(1000, 4)\n        inodes = arvados_fuse.Inodes(cache)\n        next(inodes._counter)\n\n        # Check that ent1 gets added to inodes\n        ent1 = mock.MagicMock()\n        ent1.in_use.return_value = False\n        ent1.has_ref.return_value = False\n        ent1.persisted.return_value = True\n        ent1.objsize.return_value = 500\n        inodes.add_entry(ent1)\n\n        # ent3 is persisted, adding it should cause ent1 to get cleared\n        ent3 = mock.MagicMock()\n        ent3.in_use.return_value = False\n        ent3.has_ref.return_value = False\n        ent3.persisted.return_value = True\n        ent3.objsize.return_value = 600\n\n        self.assertFalse(ent1.clear.called)\n        inodes.add_entry(ent3)\n\n        # Won't clear anything because min_entries = 4\n        self.assertEqual(2, len(cache._cache_entries))\n        self.assertFalse(ent1.clear.called)\n        self.assertEqual(1100, cache.total())\n\n        # Change min_entries\n        cache.min_entries = 1\n        ent1.parent_inode = None\n        inodes.cap_cache()\n        inodes.wait_remove_queue_empty()\n        self.assertEqual(600, cache.total())\n        self.assertTrue(ent1.clear.called)\n\n        # Touching ent1 should cause ent3 to get cleared\n        ent3.parent_inode = None\n        self.assertFalse(ent3.clear.called)\n        inodes.inode_cache.update_cache_size(ent1)\n        inodes.touch(ent1)\n        inodes.wait_remove_queue_empty()\n        self.assertTrue(ent3.clear.called)\n        self.assertEqual(500, cache.total())\n\n    def test_clear_in_use(self):\n        cache = arvados_fuse.InodeCache(1000, 4)\n        inodes = arvados_fuse.Inodes(cache)\n        next(inodes._counter)\n\n        ent1 = mock.MagicMock()\n        ent1.in_use.return_value = True\n        ent1.has_ref.return_value = False\n        ent1.persisted.return_value = True\n        ent1.objsize.return_value = 500\n        inodes.add_entry(ent1)\n\n        ent3 = mock.MagicMock()\n        ent3.in_use.return_value = False\n        ent3.has_ref.return_value = True\n        ent3.persisted.return_value = True\n        ent3.objsize.return_value = 600\n        inodes.add_entry(ent3)\n\n        cache.min_entries = 1\n\n        # ent1, ent3 in use, has ref, can't be cleared\n        ent1.clear.called = False\n        ent3.clear.called = False\n        self.assertFalse(ent1.clear.called)\n        self.assertFalse(ent3.clear.called)\n        inodes.touch(ent3)\n        inodes.wait_remove_queue_empty()\n        self.assertFalse(ent1.clear.called)\n        self.assertFalse(ent3.clear.called)\n        # kernel invalidate gets called anyway\n        self.assertTrue(ent3.kernel_invalidate.called)\n        self.assertEqual(1100, cache.total())\n\n        # ent1 still in use, ent3 doesn't have ref,\n        # so ent3 gets cleared\n        ent3.has_ref.return_value = False\n        ent1.clear.called = False\n        ent3.clear.called = False\n        ent3.parent_inode = None\n        inodes.touch(ent3)\n        inodes.wait_remove_queue_empty()\n        self.assertFalse(ent1.clear.called)\n        self.assertTrue(ent3.clear.called)\n        self.assertEqual(500, cache.total())\n\n    def test_delete(self):\n        cache = arvados_fuse.InodeCache(1000, 0)\n        inodes = arvados_fuse.Inodes(cache)\n        next(inodes._counter)\n\n        ent1 = mock.MagicMock()\n        ent1.in_use.return_value = False\n        ent1.has_ref.return_value = False\n        ent1.persisted.return_value = True\n        ent1.objsize.return_value = 500\n        inodes.add_entry(ent1)\n\n        ent3 = mock.MagicMock()\n        ent3.in_use.return_value = False\n        ent3.has_ref.return_value = False\n        ent3.persisted.return_value = True\n        ent3.objsize.return_value = 600\n\n        # Delete ent1\n        self.assertEqual(500, cache.total())\n        ent1.ref_count = 0\n        with llfuse.lock:\n            inodes.del_entry(ent1)\n        inodes.wait_remove_queue_empty()\n        self.assertEqual(0, cache.total())\n\n        inodes.add_entry(ent3)\n        inodes.wait_remove_queue_empty()\n        self.assertEqual(600, cache.total())\n"
  },
  {
    "path": "services/fuse/tests/test_mount.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport errno\nimport json\nimport llfuse\nimport logging\nimport os\nimport subprocess\nimport time\nimport unittest\nimport tempfile\nimport stat\n\nfrom pathlib import Path\nfrom unittest import mock\n\nimport arvados\nimport arvados_fuse as fuse\nimport parameterized\n\nfrom arvados_fuse import fusedir\n\nfrom . import run_test_server\nfrom .integration_test import IntegrationTest\nfrom .mount_test_base import MountTestBase\nfrom .test_tmp_collection import storage_classes_desired\n\nlogger = logging.getLogger('arvados.arv-mount')\n\nclass AssertWithTimeout(object):\n    \"\"\"Allow some time for an assertion to pass.\"\"\"\n\n    def __init__(self, timeout=0):\n        self.timeout = timeout\n\n    def __iter__(self):\n        self.deadline = time.time() + self.timeout\n        self.done = False\n        return self\n\n    def __next__(self):\n        if self.done:\n            raise StopIteration\n        return self.attempt\n\n    def attempt(self, fn, *args, **kwargs):\n        try:\n            fn(*args, **kwargs)\n        except AssertionError:\n            if time.time() > self.deadline:\n                raise\n            time.sleep(0.1)\n        else:\n            self.done = True\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass FuseMountTest(MountTestBase):\n    def setUp(self):\n        super(FuseMountTest, self).setUp()\n\n        cw = arvados.collection.Collection()\n        with cw.open('thing1.txt', 'w') as f:\n            f.write('data 1')\n        with cw.open('thing2.txt', 'w') as f:\n            f.write('data 2')\n\n        with cw.open('dir1/thing3.txt', 'w') as f:\n            f.write('data 3')\n        with cw.open('dir1/thing4.txt', 'w') as f:\n            f.write('data 4')\n\n        with cw.open('dir2/thing5.txt', 'w') as f:\n            f.write('data 5')\n        with cw.open('dir2/thing6.txt', 'w') as f:\n            f.write('data 6')\n\n        with cw.open('dir2/dir3/thing7.txt', 'w') as f:\n            f.write('data 7')\n        with cw.open('dir2/dir3/thing8.txt', 'w') as f:\n            f.write('data 8')\n\n        for fnm in \":/.../-/*/ \".split(\"/\"):\n            with cw.open('edgecases/'+fnm, 'w') as f:\n                f.write('x')\n\n        for fnm in \":/.../-/*/ \".split(\"/\"):\n            with cw.open('edgecases/dirs/'+fnm+'/x/x', 'w') as f:\n                f.write('x')\n\n        self.testcollection = cw.portable_data_hash()\n        self.test_manifest = cw.manifest_text()\n        self.api.collections().create(body={\"manifest_text\": self.test_manifest}).execute()\n\n    def runTest(self):\n        self.make_mount(fuse.CollectionDirectory, collection_record=self.testcollection)\n\n        self.assertDirContents(None, ['thing1.txt', 'thing2.txt',\n                                      'edgecases', 'dir1', 'dir2'])\n        self.assertDirContents('dir1', ['thing3.txt', 'thing4.txt'])\n        self.assertDirContents('dir2', ['thing5.txt', 'thing6.txt', 'dir3'])\n        self.assertDirContents('dir2/dir3', ['thing7.txt', 'thing8.txt'])\n        self.assertDirContents('edgecases',\n                               \"dirs/:/.../-/*/ \".split(\"/\"))\n        self.assertDirContents('edgecases/dirs',\n                               \":/.../-/*/ \".split(\"/\"))\n\n        files = {'thing1.txt': 'data 1',\n                 'thing2.txt': 'data 2',\n                 'dir1/thing3.txt': 'data 3',\n                 'dir1/thing4.txt': 'data 4',\n                 'dir2/thing5.txt': 'data 5',\n                 'dir2/thing6.txt': 'data 6',\n                 'dir2/dir3/thing7.txt': 'data 7',\n                 'dir2/dir3/thing8.txt': 'data 8'}\n\n        for k, v in files.items():\n            with open(os.path.join(self.mounttmp, k), 'rb') as f:\n                self.assertEqual(v, f.read().decode())\n\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass FuseMagicTest(MountTestBase):\n    def setUp(self, api=None):\n        super(FuseMagicTest, self).setUp(api=api)\n\n        self.test_project = run_test_server.fixture('groups')['aproject']['uuid']\n        self.non_project_group = run_test_server.fixture('groups')['public_role']['uuid']\n        self.filter_group = run_test_server.fixture('groups')['afiltergroup']['uuid']\n        self.collection_in_test_project = run_test_server.fixture('collections')['foo_collection_in_aproject']['name']\n        self.collection_in_filter_group = run_test_server.fixture('collections')['baz_file']['name']\n\n        cw = arvados.collection.Collection()\n        with cw.open('thing1.txt', 'w') as f:\n            f.write('data 1')\n\n        self.testcollection = cw.portable_data_hash()\n        self.test_manifest = cw.manifest_text()\n        coll = self.api.collections().create(body={\"manifest_text\":self.test_manifest}).execute()\n        self.test_manifest_pdh = coll['portable_data_hash']\n\n    def runTest(self):\n        self.make_mount(fuse.MagicDirectory)\n\n        mount_ls = llfuse.listdir(self.mounttmp)\n        self.assertIn('README', mount_ls)\n        self.assertFalse(any(arvados.util.keep_locator_pattern.match(fn) or\n                             arvados.util.uuid_pattern.match(fn)\n                             for fn in mount_ls),\n                         \"new FUSE MagicDirectory has no collections or projects\")\n        self.assertDirContents(self.testcollection, ['thing1.txt'])\n        self.assertDirContents(os.path.join('by_id', self.testcollection),\n                               ['thing1.txt'])\n        self.assertIn(self.collection_in_test_project,\n                      llfuse.listdir(os.path.join(self.mounttmp, self.test_project)))\n        self.assertIn(self.collection_in_test_project,\n                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id', self.test_project)))\n        self.assertIn(self.collection_in_filter_group,\n                      llfuse.listdir(os.path.join(self.mounttmp, self.filter_group)))\n        self.assertIn(self.collection_in_filter_group,\n                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id', self.filter_group)))\n\n\n        mount_ls = llfuse.listdir(self.mounttmp)\n        self.assertIn('README', mount_ls)\n        self.assertIn(self.testcollection, mount_ls)\n        self.assertIn(self.testcollection,\n                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id')))\n        self.assertIn(self.test_project, mount_ls)\n        self.assertIn(self.test_project,\n                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id')))\n        self.assertIn(self.filter_group,\n                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id')))\n\n        with self.assertRaises(OSError):\n            llfuse.listdir(os.path.join(self.mounttmp, 'by_id', self.non_project_group))\n\n        files = {}\n        files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'\n\n        for k, v in files.items():\n            with open(os.path.join(self.mounttmp, k), 'rb') as f:\n                self.assertEqual(v, f.read().decode())\n\n\nclass FuseTagsTest(MountTestBase):\n    def runTest(self):\n        self.make_mount(fuse.TagsDirectory)\n\n        d1 = llfuse.listdir(self.mounttmp)\n        d1.sort()\n        self.assertEqual(['foo_tag'], d1)\n\n        d2 = llfuse.listdir(os.path.join(self.mounttmp, 'foo_tag'))\n        d2.sort()\n        self.assertEqual(['zzzzz-4zz18-fy296fx3hot09f7'], d2)\n\n        d3 = llfuse.listdir(os.path.join(self.mounttmp, 'foo_tag', 'zzzzz-4zz18-fy296fx3hot09f7'))\n        d3.sort()\n        self.assertEqual(['foo'], d3)\n\n\nclass FuseTagsUpdateTest(MountTestBase):\n    def tag_collection(self, coll_uuid, tag_name):\n        return self.api.links().create(\n            body={'link': {'head_uuid': coll_uuid,\n                           'link_class': 'tag',\n                           'name': tag_name,\n        }}).execute()\n\n    def runTest(self):\n        self.make_mount(fuse.TagsDirectory, poll_time=1)\n\n        self.assertIn('foo_tag', llfuse.listdir(self.mounttmp))\n\n        bar_uuid = run_test_server.fixture('collections')['bar_file']['uuid']\n        self.tag_collection(bar_uuid, 'fuse_test_tag')\n        for attempt in AssertWithTimeout(10):\n            attempt(self.assertIn, 'fuse_test_tag', llfuse.listdir(self.mounttmp))\n        self.assertDirContents('fuse_test_tag', [bar_uuid])\n\n        baz_uuid = run_test_server.fixture('collections')['baz_file']['uuid']\n        l = self.tag_collection(baz_uuid, 'fuse_test_tag')\n        for attempt in AssertWithTimeout(10):\n            attempt(self.assertDirContents, 'fuse_test_tag', [bar_uuid, baz_uuid])\n\n        self.api.links().delete(uuid=l['uuid']).execute()\n        for attempt in AssertWithTimeout(10):\n            attempt(self.assertDirContents, 'fuse_test_tag', [bar_uuid])\n\n\ndef fuseSharedTestHelper(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            # Double check that we can open and read objects in this folder as a file,\n            # and that its contents are what we expect.\n            baz_path = os.path.join(\n                mounttmp,\n                'FUSE User',\n                'FUSE Test Project',\n                'collection in FUSE project',\n                'baz')\n            with open(baz_path) as f:\n                self.assertEqual(\"baz\", f.read())\n\n            # check mtime on collection\n            st = os.stat(baz_path)\n            try:\n                mtime = st.st_mtime_ns // 1000000000\n            except AttributeError:\n                mtime = st.st_mtime\n            self.assertEqual(mtime, 1391448174)\n\n            # shared_dirs is a list of the directories exposed\n            # by fuse.SharedDirectory (i.e. any object visible\n            # to the current user)\n            shared_dirs = llfuse.listdir(mounttmp)\n            shared_dirs.sort()\n            self.assertIn('FUSE User', shared_dirs)\n\n            # fuse_user_objs is a list of the objects owned by the FUSE\n            # test user (which present as files in the 'FUSE User'\n            # directory)\n            fuse_user_objs = llfuse.listdir(os.path.join(mounttmp, 'FUSE User'))\n            fuse_user_objs.sort()\n            self.assertEqual(['FUSE Test Project',                    # project owned by user\n                              'collection #1 owned by FUSE',          # collection owned by user\n                              'collection #2 owned by FUSE'          # collection owned by user\n                          ], fuse_user_objs)\n\n            # test_proj_files is a list of the files in the FUSE Test Project.\n            test_proj_files = llfuse.listdir(os.path.join(mounttmp, 'FUSE User', 'FUSE Test Project'))\n            test_proj_files.sort()\n            self.assertEqual(['collection in FUSE project'\n                          ], test_proj_files)\n\n\n    Test().runTest()\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass FuseSharedTest(MountTestBase):\n    def runTest(self):\n        self.make_mount(fuse.SharedDirectory,\n                        exclude=self.api.users().current().execute()['uuid'])\n        keep = arvados.keep.KeepClient()\n        keep.put(\"baz\".encode())\n\n        self.pool.apply(fuseSharedTestHelper, (self.mounttmp,))\n\n\nclass FuseHomeTest(MountTestBase):\n    def runTest(self):\n        self.make_mount(fuse.ProjectDirectory,\n                        project_object=self.api.users().current().execute())\n\n        d1 = llfuse.listdir(self.mounttmp)\n        self.assertIn('Unrestricted public data', d1)\n\n        d2 = llfuse.listdir(os.path.join(self.mounttmp, 'Unrestricted public data'))\n        public_project = run_test_server.fixture('groups')[\n            'anonymously_accessible_project']\n        found_in = 0\n        found_not_in = 0\n        for name, item in run_test_server.fixture('collections').items():\n            if 'name' not in item:\n                pass\n            elif item['owner_uuid'] == public_project['uuid']:\n                self.assertIn(item['name'], d2)\n                found_in += 1\n            else:\n                # Artificial assumption here: there is no public\n                # collection fixture with the same name as a\n                # non-public collection.\n                self.assertNotIn(item['name'], d2)\n                found_not_in += 1\n        self.assertNotEqual(0, found_in)\n        self.assertNotEqual(0, found_not_in)\n\n        d3 = llfuse.listdir(os.path.join(self.mounttmp, 'Unrestricted public data', 'GNU General Public License, version 3'))\n        self.assertEqual([\"GNU_General_Public_License,_version_3.pdf\"], d3)\n\n\ndef fuseModifyFileTestHelperReadStartContents(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            d1 = llfuse.listdir(mounttmp)\n            self.assertEqual([\"file1.txt\"], d1)\n            with open(os.path.join(mounttmp, \"file1.txt\")) as f:\n                self.assertEqual(\"blub\", f.read())\n    Test().runTest()\n\ndef fuseModifyFileTestHelperReadEndContents(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            d1 = llfuse.listdir(mounttmp)\n            self.assertEqual([\"file1.txt\"], d1)\n            with open(os.path.join(mounttmp, \"file1.txt\")) as f:\n                self.assertEqual(\"plnp\", f.read())\n    Test().runTest()\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass FuseModifyFileTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        with collection.open(\"file1.txt\", \"w\") as f:\n            f.write(\"blub\")\n\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n\n        self.pool.apply(fuseModifyFileTestHelperReadStartContents, (self.mounttmp,))\n\n        with collection.open(\"file1.txt\", \"w\") as f:\n            f.write(\"plnp\")\n\n        self.pool.apply(fuseModifyFileTestHelperReadEndContents, (self.mounttmp,))\n\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass FuseAddFileToCollectionTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        with collection.open(\"file1.txt\", \"w\") as f:\n            f.write(\"blub\")\n\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n\n        d1 = llfuse.listdir(self.mounttmp)\n        self.assertEqual([\"file1.txt\"], d1)\n\n        with collection.open(\"file2.txt\", \"w\") as f:\n            f.write(\"plnp\")\n\n        d1 = llfuse.listdir(self.mounttmp)\n        self.assertEqual([\"file1.txt\", \"file2.txt\"], sorted(d1))\n\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass FuseRemoveFileFromCollectionTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        with collection.open(\"file1.txt\", \"w\") as f:\n            f.write(\"blub\")\n\n        with collection.open(\"file2.txt\", \"w\") as f:\n            f.write(\"plnp\")\n\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n\n        d1 = llfuse.listdir(self.mounttmp)\n        self.assertEqual([\"file1.txt\", \"file2.txt\"], sorted(d1))\n\n        collection.remove(\"file2.txt\")\n\n        d1 = llfuse.listdir(self.mounttmp)\n        self.assertEqual([\"file1.txt\"], d1)\n\n\ndef fuseCreateFileTestHelper(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            with open(os.path.join(mounttmp, \"file1.txt\"), \"w\") as f:\n                pass\n    Test().runTest()\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass FuseCreateFileTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertEqual(collection2[\"manifest_text\"], \"\")\n\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n        self.assertTrue(m.writable())\n\n        self.assertNotIn(\"file1.txt\", collection)\n\n        self.pool.apply(fuseCreateFileTestHelper, (self.mounttmp,))\n\n        self.assertIn(\"file1.txt\", collection)\n\n        d1 = llfuse.listdir(self.mounttmp)\n        self.assertEqual([\"file1.txt\"], d1)\n\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertRegex(collection2[\"manifest_text\"],\n            r'\\. d41d8cd98f00b204e9800998ecf8427e\\+0\\+A\\S+ 0:0:file1\\.txt$')\n\n\ndef fuseWriteFileTestHelperWriteFile(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            with open(os.path.join(mounttmp, \"file1.txt\"), \"w\") as f:\n                f.write(\"Hello world!\")\n    Test().runTest()\n\ndef fuseWriteFileTestHelperReadFile(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            with open(os.path.join(mounttmp, \"file1.txt\"), \"r\") as f:\n                self.assertEqual(f.read(), \"Hello world!\")\n    Test().runTest()\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass FuseWriteFileTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n        self.assertTrue(m.writable())\n\n        self.assertNotIn(\"file1.txt\", collection)\n\n        self.assertEqual(0, self.operations.write_counter.get())\n        self.pool.apply(fuseWriteFileTestHelperWriteFile, (self.mounttmp,))\n        self.assertEqual(12, self.operations.write_counter.get())\n\n        with collection.open(\"file1.txt\") as f:\n            self.assertEqual(f.read(), \"Hello world!\")\n\n        self.assertEqual(0, self.operations.read_counter.get())\n        self.pool.apply(fuseWriteFileTestHelperReadFile, (self.mounttmp,))\n        self.assertEqual(12, self.operations.read_counter.get())\n\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertRegex(collection2[\"manifest_text\"],\n            r'\\. 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:12:file1\\.txt$')\n\ndef fuseMknodTestHelperReadFile(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            with open(os.path.join(mounttmp, \"file1.txt\"), \"r\") as f:\n                self.assertEqual(f.read(), \"\")\n    Test().runTest()\n\nclass FuseMknodTest(MountTestBase):\n    def runTest(self):\n        # Check that os.mknod() can be used to create normal files.\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n        self.assertTrue(m.writable())\n\n        self.assertNotIn(\"file1.txt\", collection)\n\n        self.assertEqual(0, self.operations.write_counter.get())\n        self.pool.apply(os.mknod, (os.path.join(self.mounttmp, \"file1.txt\"),))\n\n        with collection.open(\"file1.txt\") as f:\n            self.assertEqual(f.read(), \"\")\n\n        self.pool.apply(fuseMknodTestHelperReadFile, (self.mounttmp,))\n\n        # Fail trying to create a FIFO\n        with self.assertRaises(OSError) as exc_check:\n            self.pool.apply(os.mknod, (os.path.join(self.mounttmp, \"file2.txt\"), stat.S_IFIFO))\n\nclass FuseMknodReadOnlyTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory, enable_write=False)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n        self.assertTrue(m.writable() is False)\n        with self.assertRaises(OSError) as exc_check:\n            self.pool.apply(os.mknod, (os.path.join(self.mounttmp, \"file1.txt\"),))\n\nclass FuseMknodProjectTest(MountTestBase):\n    def runTest(self):\n        self.make_mount(fuse.ProjectDirectory,\n                        project_object=self.api.users().current().execute())\n        with self.assertRaises(OSError) as exc_check:\n            self.pool.apply(os.mknod, (os.path.join(self.mounttmp, \"file1.txt\"),))\n\n\ndef fuseUpdateFileTestHelper(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            with open(os.path.join(mounttmp, \"file1.txt\"), \"w\") as f:\n                f.write(\"Hello world!\")\n\n            with open(os.path.join(mounttmp, \"file1.txt\"), \"r+\") as f:\n                fr = f.read()\n                self.assertEqual(fr, \"Hello world!\")\n                f.seek(0)\n                f.write(\"Hola mundo!\")\n                f.seek(0)\n                fr = f.read()\n                self.assertEqual(fr, \"Hola mundo!!\")\n\n            with open(os.path.join(mounttmp, \"file1.txt\"), \"r\") as f:\n                self.assertEqual(f.read(), \"Hola mundo!!\")\n\n    Test().runTest()\n\n@parameterized.parameterized_class([{\"disk_cache\": True}, {\"disk_cache\": False}])\nclass FuseUpdateFileTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n        self.assertTrue(m.writable())\n\n        # See note in MountTestBase.setUp\n        self.pool.apply(fuseUpdateFileTestHelper, (self.mounttmp,))\n\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertRegex(collection2[\"manifest_text\"],\n            r'\\. daaef200ebb921e011e3ae922dd3266b\\+11\\+A\\S+ 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:11:file1\\.txt 22:1:file1\\.txt$')\n\n\ndef fuseMkdirTestHelper(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            with self.assertRaises(IOError):\n                with open(os.path.join(mounttmp, \"testdir\", \"file1.txt\"), \"w\") as f:\n                    f.write(\"Hello world!\")\n\n            os.mkdir(os.path.join(mounttmp, \"testdir\"))\n\n            with self.assertRaises(OSError):\n                os.mkdir(os.path.join(mounttmp, \"testdir\"))\n\n            d1 = llfuse.listdir(mounttmp)\n            self.assertEqual([\"testdir\"], d1)\n\n            with open(os.path.join(mounttmp, \"testdir\", \"file1.txt\"), \"w\") as f:\n                f.write(\"Hello world!\")\n\n            d1 = llfuse.listdir(os.path.join(mounttmp, \"testdir\"))\n            self.assertEqual([\"file1.txt\"], d1)\n\n    Test().runTest()\n\nclass FuseMkdirTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n        self.assertTrue(m.writable())\n\n        self.pool.apply(fuseMkdirTestHelper, (self.mounttmp,))\n\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertRegex(collection2[\"manifest_text\"],\n            r'\\./testdir 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:12:file1\\.txt$')\n\n\ndef fuseRmTestHelperWriteFile(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            os.mkdir(os.path.join(mounttmp, \"testdir\"))\n\n            with open(os.path.join(mounttmp, \"testdir\", \"file1.txt\"), \"w\") as f:\n                f.write(\"Hello world!\")\n\n    Test().runTest()\n\ndef fuseRmTestHelperDeleteFile(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            # Can't delete because it's not empty\n            with self.assertRaises(OSError):\n                os.rmdir(os.path.join(mounttmp, \"testdir\"))\n\n            d1 = llfuse.listdir(os.path.join(mounttmp, \"testdir\"))\n            self.assertEqual([\"file1.txt\"], d1)\n\n            # Delete file\n            os.remove(os.path.join(mounttmp, \"testdir\", \"file1.txt\"))\n\n            # Make sure it's empty\n            d1 = llfuse.listdir(os.path.join(mounttmp, \"testdir\"))\n            self.assertEqual([], d1)\n\n            # Try to delete it again\n            with self.assertRaises(OSError):\n                os.remove(os.path.join(mounttmp, \"testdir\", \"file1.txt\"))\n\n    Test().runTest()\n\ndef fuseRmTestHelperRmdir(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            # Should be able to delete now that it is empty\n            os.rmdir(os.path.join(mounttmp, \"testdir\"))\n\n            # Make sure it's empty\n            d1 = llfuse.listdir(os.path.join(mounttmp))\n            self.assertEqual([], d1)\n\n            # Try to delete it again\n            with self.assertRaises(OSError):\n                os.rmdir(os.path.join(mounttmp, \"testdir\"))\n\n    Test().runTest()\n\nclass FuseRmTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n        self.assertTrue(m.writable())\n\n        self.pool.apply(fuseRmTestHelperWriteFile, (self.mounttmp,))\n\n        # Starting manifest\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertRegex(collection2[\"manifest_text\"],\n            r'\\./testdir 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:12:file1\\.txt$')\n        self.pool.apply(fuseRmTestHelperDeleteFile, (self.mounttmp,))\n\n        # Empty directories are represented by an empty file named \".\"\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertRegex(collection2[\"manifest_text\"],\n                                 r'./testdir d41d8cd98f00b204e9800998ecf8427e\\+0\\+A\\S+ 0:0:\\\\056\\n')\n\n        self.pool.apply(fuseRmTestHelperRmdir, (self.mounttmp,))\n\n        # manifest should be empty now.\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertEqual(collection2[\"manifest_text\"], \"\")\n\n\ndef fuseMvFileTestHelperWriteFile(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            os.mkdir(os.path.join(mounttmp, \"testdir\"))\n\n            with open(os.path.join(mounttmp, \"testdir\", \"file1.txt\"), \"w\") as f:\n                f.write(\"Hello world!\")\n\n    Test().runTest()\n\ndef fuseMvFileTestHelperMoveFile(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            d1 = llfuse.listdir(os.path.join(mounttmp))\n            self.assertEqual([\"testdir\"], d1)\n            d1 = llfuse.listdir(os.path.join(mounttmp, \"testdir\"))\n            self.assertEqual([\"file1.txt\"], d1)\n\n            os.rename(os.path.join(mounttmp, \"testdir\", \"file1.txt\"), os.path.join(mounttmp, \"file1.txt\"))\n\n            d1 = llfuse.listdir(os.path.join(mounttmp))\n            self.assertEqual([\"file1.txt\", \"testdir\"], sorted(d1))\n            d1 = llfuse.listdir(os.path.join(mounttmp, \"testdir\"))\n            self.assertEqual([], d1)\n\n    Test().runTest()\n\nclass FuseMvFileTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n        self.assertTrue(m.writable())\n\n        self.pool.apply(fuseMvFileTestHelperWriteFile, (self.mounttmp,))\n\n        # Starting manifest\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertRegex(collection2[\"manifest_text\"],\n            r'\\./testdir 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:12:file1\\.txt$')\n\n        self.pool.apply(fuseMvFileTestHelperMoveFile, (self.mounttmp,))\n\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertRegex(collection2[\"manifest_text\"],\n            r'\\. 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:12:file1\\.txt\\n\\./testdir d41d8cd98f00b204e9800998ecf8427e\\+0\\+A\\S+ 0:0:\\\\056\\n')\n\n\ndef fuseRenameTestHelper(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            os.mkdir(os.path.join(mounttmp, \"testdir\"))\n\n            with open(os.path.join(mounttmp, \"testdir\", \"file1.txt\"), \"w\") as f:\n                f.write(\"Hello world!\")\n\n    Test().runTest()\n\nclass FuseRenameTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n        self.assertTrue(m.writable())\n\n        self.pool.apply(fuseRenameTestHelper, (self.mounttmp,))\n\n        # Starting manifest\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertRegex(collection2[\"manifest_text\"],\n            r'\\./testdir 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:12:file1\\.txt$')\n\n        d1 = llfuse.listdir(os.path.join(self.mounttmp))\n        self.assertEqual([\"testdir\"], d1)\n        d1 = llfuse.listdir(os.path.join(self.mounttmp, \"testdir\"))\n        self.assertEqual([\"file1.txt\"], d1)\n\n        os.rename(os.path.join(self.mounttmp, \"testdir\"), os.path.join(self.mounttmp, \"testdir2\"))\n\n        d1 = llfuse.listdir(os.path.join(self.mounttmp))\n        self.assertEqual([\"testdir2\"], sorted(d1))\n        d1 = llfuse.listdir(os.path.join(self.mounttmp, \"testdir2\"))\n        self.assertEqual([\"file1.txt\"], d1)\n\n        collection2 = self.api.collections().get(uuid=collection.manifest_locator()).execute()\n        self.assertRegex(collection2[\"manifest_text\"],\n            r'\\./testdir2 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:12:file1\\.txt$')\n\n\nclass FuseUpdateFromEventTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n\n        self.operations.listen_for_events()\n\n        d1 = llfuse.listdir(os.path.join(self.mounttmp))\n        self.assertEqual([], sorted(d1))\n\n        with arvados.collection.Collection(collection.manifest_locator(), api_client=self.api) as collection2:\n            with collection2.open(\"file1.txt\", \"w\") as f:\n                f.write(\"foo\")\n\n        for attempt in AssertWithTimeout(10):\n            attempt(self.assertEqual, [\"file1.txt\"], llfuse.listdir(os.path.join(self.mounttmp)))\n\n\nclass FuseDeleteProjectEventTest(MountTestBase):\n    def runTest(self):\n\n        aproject = self.api.groups().create(body={\n            \"name\": \"aproject\",\n            \"group_class\": \"project\"\n        }).execute()\n\n        bproject = self.api.groups().create(body={\n            \"name\": \"bproject\",\n            \"group_class\": \"project\",\n            \"owner_uuid\": aproject[\"uuid\"]\n        }).execute()\n\n        self.make_mount(fuse.ProjectDirectory,\n                        project_object=self.api.users().current().execute())\n\n        self.operations.listen_for_events()\n\n        d1 = llfuse.listdir(os.path.join(self.mounttmp, \"aproject\"))\n        self.assertEqual([\"bproject\"], sorted(d1))\n\n        self.api.groups().delete(uuid=bproject[\"uuid\"]).execute()\n\n        for attempt in AssertWithTimeout(10):\n            attempt(self.assertEqual, [], llfuse.listdir(os.path.join(self.mounttmp, \"aproject\")))\n\n\ndef fuseFileConflictTestHelper(mounttmp, uuid, keeptmp, settings):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            os.environ['KEEP_LOCAL_STORE'] = keeptmp\n\n            with open(os.path.join(mounttmp, \"file1.txt\"), \"w\") as f:\n                with arvados.collection.Collection(uuid, api_client=arvados.api_from_config('v1', apiconfig=settings)) as collection2:\n                    with collection2.open(\"file1.txt\", \"w\") as f2:\n                        f2.write(\"foo\")\n                f.write(\"bar\")\n\n            d1 = sorted(llfuse.listdir(os.path.join(mounttmp)))\n            self.assertEqual(len(d1), 2)\n\n            with open(os.path.join(mounttmp, \"file1.txt\"), \"r\") as f:\n                self.assertEqual(f.read(), \"bar\")\n\n            self.assertRegex(d1[1],\n                r'file1\\.txt~\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d~conflict~')\n\n            with open(os.path.join(mounttmp, d1[1]), \"r\") as f:\n                self.assertEqual(f.read(), \"foo\")\n\n    Test().runTest()\n\nclass FuseFileConflictTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n\n        d1 = llfuse.listdir(os.path.join(self.mounttmp))\n        self.assertEqual([], sorted(d1))\n\n        # See note in MountTestBase.setUp\n        self.pool.apply(fuseFileConflictTestHelper, (self.mounttmp, collection.manifest_locator(), self.keeptmp, arvados.config.settings()))\n\n\ndef fuseUnlinkOpenFileTest(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            with open(os.path.join(mounttmp, \"file1.txt\"), \"w+\") as f:\n                f.write(\"foo\")\n\n                d1 = llfuse.listdir(os.path.join(mounttmp))\n                self.assertEqual([\"file1.txt\"], sorted(d1))\n\n                os.remove(os.path.join(mounttmp, \"file1.txt\"))\n\n                d1 = llfuse.listdir(os.path.join(mounttmp))\n                self.assertEqual([], sorted(d1))\n\n                f.seek(0)\n                self.assertEqual(f.read(), \"foo\")\n                f.write(\"bar\")\n\n                f.seek(0)\n                self.assertEqual(f.read(), \"foobar\")\n\n    Test().runTest()\n\nclass FuseUnlinkOpenFileTest(MountTestBase):\n    def runTest(self):\n        collection = arvados.collection.Collection(api_client=self.api)\n        collection.save_new()\n\n        m = self.make_mount(fuse.CollectionDirectory)\n        with llfuse.lock:\n            m.new_collection(collection.api_response(), collection)\n\n        # See note in MountTestBase.setUp\n        self.pool.apply(fuseUnlinkOpenFileTest, (self.mounttmp,))\n\n        self.assertEqual(collection.manifest_text(), \"\")\n\n\ndef fuseMvFileBetweenCollectionsTest1(mounttmp, uuid1, uuid2):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            with open(os.path.join(mounttmp, uuid1, \"file1.txt\"), \"w\") as f:\n                f.write(\"Hello world!\")\n\n            d1 = os.listdir(os.path.join(mounttmp, uuid1))\n            self.assertEqual([\"file1.txt\"], sorted(d1))\n            d1 = os.listdir(os.path.join(mounttmp, uuid2))\n            self.assertEqual([], sorted(d1))\n\n    Test().runTest()\n\ndef fuseMvFileBetweenCollectionsTest2(mounttmp, uuid1, uuid2):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            os.rename(os.path.join(mounttmp, uuid1, \"file1.txt\"), os.path.join(mounttmp, uuid2, \"file2.txt\"))\n\n            d1 = os.listdir(os.path.join(mounttmp, uuid1))\n            self.assertEqual([], sorted(d1))\n            d1 = os.listdir(os.path.join(mounttmp, uuid2))\n            self.assertEqual([\"file2.txt\"], sorted(d1))\n\n    Test().runTest()\n\nclass FuseMvFileBetweenCollectionsTest(MountTestBase):\n    def runTest(self):\n        collection1 = arvados.collection.Collection(api_client=self.api)\n        collection1.save_new()\n\n        collection2 = arvados.collection.Collection(api_client=self.api)\n        collection2.save_new()\n\n        m = self.make_mount(fuse.MagicDirectory)\n\n        # See note in MountTestBase.setUp\n        self.pool.apply(fuseMvFileBetweenCollectionsTest1, (self.mounttmp,\n                                                  collection1.manifest_locator(),\n                                                  collection2.manifest_locator()))\n\n        collection1.update()\n        collection2.update()\n\n        self.assertRegex(collection1.manifest_text(), r\"\\. 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:12:file1\\.txt$\")\n        self.assertEqual(collection2.manifest_text(), \"\")\n\n        self.pool.apply(fuseMvFileBetweenCollectionsTest2, (self.mounttmp,\n                                                  collection1.manifest_locator(),\n                                                  collection2.manifest_locator()))\n\n        collection1.update()\n        collection2.update()\n\n        self.assertEqual(collection1.manifest_text(), \"\")\n        self.assertRegex(collection2.manifest_text(), r\"\\. 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:12:file2\\.txt$\")\n\n        collection1.stop_threads()\n        collection2.stop_threads()\n\n\ndef fuseMvDirBetweenCollectionsTest1(mounttmp, uuid1, uuid2):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            os.mkdir(os.path.join(mounttmp, uuid1, \"testdir\"))\n            with open(os.path.join(mounttmp, uuid1, \"testdir\", \"file1.txt\"), \"w\") as f:\n                f.write(\"Hello world!\")\n\n            d1 = os.listdir(os.path.join(mounttmp, uuid1))\n            self.assertEqual([\"testdir\"], sorted(d1))\n            d1 = os.listdir(os.path.join(mounttmp, uuid1, \"testdir\"))\n            self.assertEqual([\"file1.txt\"], sorted(d1))\n\n            d1 = os.listdir(os.path.join(mounttmp, uuid2))\n            self.assertEqual([], sorted(d1))\n\n    Test().runTest()\n\n\ndef fuseMvDirBetweenCollectionsTest2(mounttmp, uuid1, uuid2):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            os.rename(os.path.join(mounttmp, uuid1, \"testdir\"), os.path.join(mounttmp, uuid2, \"testdir2\"))\n\n            d1 = os.listdir(os.path.join(mounttmp, uuid1))\n            self.assertEqual([], sorted(d1))\n\n            d1 = os.listdir(os.path.join(mounttmp, uuid2))\n            self.assertEqual([\"testdir2\"], sorted(d1))\n            d1 = os.listdir(os.path.join(mounttmp, uuid2, \"testdir2\"))\n            self.assertEqual([\"file1.txt\"], sorted(d1))\n\n            with open(os.path.join(mounttmp, uuid2, \"testdir2\", \"file1.txt\"), \"r\") as f:\n                self.assertEqual(f.read(), \"Hello world!\")\n\n    Test().runTest()\n\nclass FuseMvDirBetweenCollectionsTest(MountTestBase):\n    def runTest(self):\n        collection1 = arvados.collection.Collection(api_client=self.api)\n        collection1.save_new()\n\n        collection2 = arvados.collection.Collection(api_client=self.api)\n        collection2.save_new()\n\n        m = self.make_mount(fuse.MagicDirectory)\n\n        # See note in MountTestBase.setUp\n        self.pool.apply(fuseMvDirBetweenCollectionsTest1, (self.mounttmp,\n                                                  collection1.manifest_locator(),\n                                                  collection2.manifest_locator()))\n\n        collection1.update()\n        collection2.update()\n\n        self.assertRegex(collection1.manifest_text(), r\"\\./testdir 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:12:file1\\.txt$\")\n        self.assertEqual(collection2.manifest_text(), \"\")\n\n        self.pool.apply(fuseMvDirBetweenCollectionsTest2, (self.mounttmp,\n                                                  collection1.manifest_locator(),\n                                                  collection2.manifest_locator()))\n\n        collection1.update()\n        collection2.update()\n\n        self.assertEqual(collection1.manifest_text(), \"\")\n        self.assertRegex(collection2.manifest_text(), r\"\\./testdir2 86fb269d190d2c85f6e0468ceca42a20\\+12\\+A\\S+ 0:12:file1\\.txt$\")\n\n        collection1.stop_threads()\n        collection2.stop_threads()\n\ndef fuseProjectMkdirTestHelper1(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            os.mkdir(os.path.join(mounttmp, \"testcollection\"))\n            with self.assertRaises(OSError):\n                os.mkdir(os.path.join(mounttmp, \"testcollection\"))\n    Test().runTest()\n\ndef fuseProjectMkdirTestHelper2(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            with open(os.path.join(mounttmp, \"testcollection\", \"file1.txt\"), \"w\") as f:\n                f.write(\"Hello world!\")\n            with self.assertRaises(OSError):\n                os.rmdir(os.path.join(mounttmp, \"testcollection\"))\n            os.remove(os.path.join(mounttmp, \"testcollection\", \"file1.txt\"))\n            with self.assertRaises(OSError):\n                os.remove(os.path.join(mounttmp, \"testcollection\"))\n            os.rmdir(os.path.join(mounttmp, \"testcollection\"))\n    Test().runTest()\n\nclass FuseProjectMkdirRmdirTest(MountTestBase):\n    def runTest(self):\n        self.make_mount(fuse.ProjectDirectory,\n                        project_object=self.api.users().current().execute())\n\n        d1 = llfuse.listdir(self.mounttmp)\n        self.assertNotIn('testcollection', d1)\n\n        self.pool.apply(fuseProjectMkdirTestHelper1, (self.mounttmp,))\n\n        d1 = llfuse.listdir(self.mounttmp)\n        self.assertIn('testcollection', d1)\n\n        self.pool.apply(fuseProjectMkdirTestHelper2, (self.mounttmp,))\n\n        d1 = llfuse.listdir(self.mounttmp)\n        self.assertNotIn('testcollection', d1)\n\n\ndef fuseProjectMvTestHelper1(mounttmp):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            d1 = llfuse.listdir(mounttmp)\n            self.assertNotIn('testcollection', d1)\n\n            os.mkdir(os.path.join(mounttmp, \"testcollection\"))\n\n            d1 = llfuse.listdir(mounttmp)\n            self.assertIn('testcollection', d1)\n\n            with self.assertRaises(OSError):\n                os.rename(os.path.join(mounttmp, \"testcollection\"), os.path.join(mounttmp, 'Unrestricted public data'))\n\n            os.rename(os.path.join(mounttmp, \"testcollection\"), os.path.join(mounttmp, 'Unrestricted public data', 'testcollection'))\n\n            d1 = llfuse.listdir(mounttmp)\n            self.assertNotIn('testcollection', d1)\n\n            d1 = llfuse.listdir(os.path.join(mounttmp, 'Unrestricted public data'))\n            self.assertIn('testcollection', d1)\n\n    Test().runTest()\n\nclass FuseProjectMvTest(MountTestBase):\n    def runTest(self):\n        self.make_mount(fuse.ProjectDirectory,\n                        project_object=self.api.users().current().execute())\n\n        self.pool.apply(fuseProjectMvTestHelper1, (self.mounttmp,))\n\n\ndef fuseFsyncTestHelper(mounttmp, k):\n    class Test(unittest.TestCase):\n        def runTest(self):\n            fd = os.open(os.path.join(mounttmp, k), os.O_RDONLY)\n            os.fsync(fd)\n            os.close(fd)\n\n    Test().runTest()\n\nclass FuseFsyncTest(FuseMagicTest):\n    def runTest(self):\n        self.make_mount(fuse.MagicDirectory)\n        self.pool.apply(fuseFsyncTestHelper, (self.mounttmp, self.testcollection))\n\n\nclass MagicDirApiError(FuseMagicTest):\n    def setUp(self):\n        api = mock.MagicMock()\n        api.keep.block_cache = mock.MagicMock(cache_max=1)\n        super(MagicDirApiError, self).setUp(api=api)\n        api.collections().get().execute.side_effect = iter([\n            Exception('API fail'),\n            {\n                \"manifest_text\": self.test_manifest,\n                \"portable_data_hash\": self.test_manifest_pdh,\n            },\n        ])\n        api.keep.get.side_effect = Exception('Keep fail')\n\n    def runTest(self):\n        with mock.patch('arvados_fuse.fresh.FreshBase._poll_time', new_callable=mock.PropertyMock, return_value=60) as mock_poll_time:\n            self.make_mount(fuse.MagicDirectory)\n\n            self.operations.inodes.inode_cache.cap = 1\n            self.operations.inodes.inode_cache.min_entries = 2\n\n            with self.assertRaises(OSError):\n                llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))\n\n            llfuse.listdir(os.path.join(self.mounttmp, self.testcollection))\n\n\nclass SanitizeFilenameTest(MountTestBase):\n    def test_sanitize_filename(self):\n        pdir = fuse.ProjectDirectory(\n            1, fuse.Inodes(None), self.api, 0, False, None,\n            project_object=self.api.users().current().execute(),\n        )\n        acceptable = [\n            \"foo.txt\",\n            \".foo\",\n            \"..foo\",\n            \"...\",\n            \"foo...\",\n            \"foo..\",\n            \"foo.\",\n            \"-\",\n            \"\\x01\\x02\\x03\",\n            ]\n        unacceptable = [\n            \"f\\00\",\n            \"\\00\\00\",\n            \"/foo\",\n            \"foo/\",\n            \"//\",\n            ]\n        for f in acceptable:\n            self.assertEqual(f, pdir.sanitize_filename(f))\n        for f in unacceptable:\n            self.assertNotEqual(f, pdir.sanitize_filename(f))\n            # The sanitized filename should be the same length, though.\n            self.assertEqual(len(f), len(pdir.sanitize_filename(f)))\n        # Special cases\n        self.assertEqual(\"_\", pdir.sanitize_filename(\"\"))\n        self.assertEqual(\"_\", pdir.sanitize_filename(\".\"))\n        self.assertEqual(\"__\", pdir.sanitize_filename(\"..\"))\n\n\nclass FuseMagicTestPDHOnly(MountTestBase):\n    def setUp(self, api=None):\n        super(FuseMagicTestPDHOnly, self).setUp(api=api)\n\n        cw = arvados.collection.Collection()\n        with cw.open('thing1.txt', 'w') as f:\n            f.write('data 1')\n\n        self.testcollection = cw.portable_data_hash()\n        self.test_manifest = cw.manifest_text()\n        created = self.api.collections().create(body={\"manifest_text\":self.test_manifest}).execute()\n        self.testcollectionuuid = str(created['uuid'])\n\n    def verify_pdh_only(self, pdh_only=False, skip_pdh_only=False):\n        if skip_pdh_only is True:\n            self.make_mount(fuse.MagicDirectory)    # in this case, the default by_id applies\n        else:\n            self.make_mount(fuse.MagicDirectory, pdh_only=pdh_only)\n\n        mount_ls = llfuse.listdir(self.mounttmp)\n        self.assertIn('README', mount_ls)\n        self.assertFalse(any(arvados.util.keep_locator_pattern.match(fn) or\n                             arvados.util.uuid_pattern.match(fn)\n                             for fn in mount_ls),\n                         \"new FUSE MagicDirectory lists Collection\")\n\n        # look up using pdh should succeed in all cases\n        self.assertDirContents(self.testcollection, ['thing1.txt'])\n        self.assertDirContents(os.path.join('by_id', self.testcollection),\n                               ['thing1.txt'])\n        mount_ls = llfuse.listdir(self.mounttmp)\n        self.assertIn('README', mount_ls)\n        self.assertIn(self.testcollection, mount_ls)\n        self.assertIn(self.testcollection,\n                      llfuse.listdir(os.path.join(self.mounttmp, 'by_id')))\n\n        files = {}\n        files[os.path.join(self.mounttmp, self.testcollection, 'thing1.txt')] = 'data 1'\n\n        for k, v in files.items():\n            with open(os.path.join(self.mounttmp, k), 'rb') as f:\n                self.assertEqual(v, f.read().decode())\n\n        # look up using uuid should fail when pdh_only is set\n        if pdh_only is True:\n            with self.assertRaises(OSError):\n                self.assertDirContents(os.path.join('by_id', self.testcollectionuuid),\n                               ['thing1.txt'])\n        else:\n            self.assertDirContents(os.path.join('by_id', self.testcollectionuuid),\n                               ['thing1.txt'])\n\n    def test_with_pdh_only_true(self):\n        self.verify_pdh_only(pdh_only=True)\n\n    def test_with_pdh_only_false(self):\n        self.verify_pdh_only(pdh_only=False)\n\n    def test_with_default_by_id(self):\n        self.verify_pdh_only(skip_pdh_only=True)\n\n\nclass SlashSubstitutionTest(IntegrationTest):\n    mnt_args = [\n        '--read-write',\n        '--mount-home', 'zzz',\n        '--fsns', '[SLASH]'\n    ]\n\n    def setUp(self):\n        super(SlashSubstitutionTest, self).setUp()\n\n        self.api = arvados.safeapi.ThreadSafeApiCache(\n            arvados.config.settings(),\n            version='v1'\n        )\n        self.testcoll = self.api.collections().create(body={\"name\": \"foo/bar/baz\"}).execute()\n        self.testcolleasy = self.api.collections().create(body={\"name\": \"foo-bar-baz\"}).execute()\n        self.fusename = 'foo[SLASH]bar[SLASH]baz'\n\n    @IntegrationTest.mount(argv=mnt_args)\n    def test_slash_substitution_before_listing(self):\n        self.pool_test(os.path.join(self.mnt, 'zzz'), self.fusename)\n        self.checkContents()\n    @staticmethod\n    def _test_slash_substitution_before_listing(self, tmpdir, fusename):\n        with open(os.path.join(tmpdir, 'foo-bar-baz', 'waz'), 'w') as f:\n            f.write('xxx')\n        with open(os.path.join(tmpdir, fusename, 'waz'), 'w') as f:\n            f.write('foo')\n\n    @IntegrationTest.mount(argv=mnt_args)\n    @mock.patch('arvados.util.get_config_once')\n    def test_slash_substitution_after_listing(self, get_config_once):\n        get_config_once.return_value = {\"Collections\": {\"ForwardSlashNameSubstitution\": \"[SLASH]\"}}\n        self.pool_test(os.path.join(self.mnt, 'zzz'), self.fusename)\n        self.checkContents()\n    @staticmethod\n    def _test_slash_substitution_after_listing(self, tmpdir, fusename):\n        with open(os.path.join(tmpdir, 'foo-bar-baz', 'waz'), 'w') as f:\n            f.write('xxx')\n        os.listdir(tmpdir)\n        with open(os.path.join(tmpdir, fusename, 'waz'), 'w') as f:\n            f.write('foo')\n\n    def checkContents(self):\n        self.assertRegex(self.api.collections().get(uuid=self.testcoll['uuid']).execute()['manifest_text'], r' acbd18db') # md5(foo)\n        self.assertRegex(self.api.collections().get(uuid=self.testcolleasy['uuid']).execute()['manifest_text'], r' f561aaf6') # md5(xxx)\n\n    @IntegrationTest.mount(argv=mnt_args)\n    @mock.patch('arvados.util.get_config_once')\n    def test_slash_substitution_conflict(self, get_config_once):\n        self.testcollconflict = self.api.collections().create(body={\"name\": self.fusename}).execute()\n        get_config_once.return_value = {\"Collections\": {\"ForwardSlashNameSubstitution\": \"[SLASH]\"}}\n        self.pool_test(os.path.join(self.mnt, 'zzz'), self.fusename)\n        self.assertRegex(self.api.collections().get(uuid=self.testcollconflict['uuid']).execute()['manifest_text'], r' acbd18db') # md5(foo)\n        # foo/bar/baz collection unchanged, because it is masked by foo[SLASH]bar[SLASH]baz\n        self.assertEqual(self.api.collections().get(uuid=self.testcoll['uuid']).execute()['manifest_text'], '')\n    @staticmethod\n    def _test_slash_substitution_conflict(self, tmpdir, fusename):\n        with open(os.path.join(tmpdir, fusename, 'waz'), 'w') as f:\n            f.write('foo')\n\nclass StorageClassesTest(IntegrationTest):\n    mnt_args = [\n        '--read-write',\n        '--mount-home', 'homedir',\n    ]\n\n    def setUp(self):\n        super(StorageClassesTest, self).setUp()\n        self.api = arvados.safeapi.ThreadSafeApiCache(\n            arvados.config.settings(),\n            version='v1',\n        )\n\n    @IntegrationTest.mount(argv=mnt_args)\n    def test_collection_default_storage_classes(self):\n        coll_path = os.path.join(self.mnt, 'homedir', 'a_collection')\n        self.api.collections().create(body={'name':'a_collection'}).execute()\n        self.pool_test(coll_path)\n    @staticmethod\n    def _test_collection_default_storage_classes(self, coll):\n        self.assertEqual(storage_classes_desired(coll), ['default'])\n\n    @IntegrationTest.mount(argv=mnt_args+['--storage-classes', 'foo'])\n    def test_collection_custom_storage_classes(self):\n        coll_path = os.path.join(self.mnt, 'homedir', 'new_coll')\n        os.mkdir(coll_path)\n        self.pool_test(coll_path)\n    @staticmethod\n    def _test_collection_custom_storage_classes(self, coll):\n        self.assertEqual(storage_classes_desired(coll), ['foo'])\n\ndef _readonlyCollectionTestHelper(mounttmp):\n    f = open(os.path.join(mounttmp, 'thing1.txt'), 'rt')\n    # Testing that close() doesn't raise an error.\n    f.close()\n\nclass ReadonlyCollectionTest(MountTestBase):\n    def setUp(self):\n        super(ReadonlyCollectionTest, self).setUp()\n        cw = arvados.collection.Collection()\n        with cw.open('thing1.txt', 'wt') as f:\n            f.write(\"data 1\")\n        cw.save_new(owner_uuid=run_test_server.fixture(\"groups\")[\"aproject\"][\"uuid\"])\n        self.testcollection = cw.api_response()\n\n    def runTest(self):\n        settings = arvados.config.settings().copy()\n        settings[\"ARVADOS_API_TOKEN\"] = run_test_server.fixture(\"api_client_authorizations\")[\"project_viewer\"][\"api_token\"]\n        self.api = arvados.safeapi.ThreadSafeApiCache(settings, version='v1')\n        self.make_mount(fuse.CollectionDirectory, collection_record=self.testcollection, enable_write=False)\n\n        self.pool.apply(_readonlyCollectionTestHelper, (self.mounttmp,))\n\n\n@parameterized.parameterized_class([\n    {'root_class': fusedir.ProjectDirectory, 'root_kwargs': {\n        'project_object': run_test_server.fixture('users')['admin'],\n    }},\n    {'root_class': fusedir.ProjectDirectory, 'root_kwargs': {\n        'project_object': run_test_server.fixture('groups')['public'],\n    }},\n])\nclass UnsupportedCreateTest(MountTestBase):\n    root_class = None\n    root_kwargs = {}\n\n    def setUp(self):\n        super().setUp()\n        if 'prefs' in self.root_kwargs.get('project_object', ()):\n            self.root_kwargs['project_object']['prefs'] = {}\n        self.make_mount(self.root_class, **self.root_kwargs)\n        # Make sure the directory knows about its top-level ents.\n        os.listdir(self.mounttmp)\n\n    def test_create(self):\n        test_path = Path(self.mounttmp, 'test_create')\n        with self.assertRaises(OSError) as exc_check:\n            with test_path.open('w'):\n                pass\n        self.assertEqual(exc_check.exception.errno, errno.ENOTSUP)\n\n\n# FIXME: IMO, for consistency with the \"create inside a project\" case,\n# these operations should also return ENOTSUP instead of EPERM.\n# Right now they're returning EPERM because the clasess' writable() method\n# usually returns False, and the Operations class transforms that accordingly.\n# However, for cases where the mount will never be writable, I think ENOTSUP\n# is a clearer error: it lets the user know they can't fix the problem by\n# adding permissions in Arvados, etc.\n@parameterized.parameterized_class([\n    {'root_class': fusedir.MagicDirectory,\n     'preset_dir': 'by_id',\n     'preset_file': 'README',\n     },\n\n    {'root_class': fusedir.SharedDirectory,\n     'root_kwargs': {\n         'exclude': run_test_server.fixture('users')['admin']['uuid'],\n     },\n     'preset_dir': 'Active User',\n     },\n\n    {'root_class': fusedir.TagDirectory,\n     'root_kwargs': {\n         'tag': run_test_server.fixture('links')['foo_collection_tag']['name'],\n     },\n     'preset_dir': run_test_server.fixture('collections')['foo_collection_in_aproject']['uuid'],\n     },\n\n    {'root_class': fusedir.TagsDirectory,\n     'preset_dir': run_test_server.fixture('links')['foo_collection_tag']['name'],\n     },\n])\nclass UnsupportedOperationsTest(UnsupportedCreateTest):\n    preset_dir = None\n    preset_file = None\n\n    def test_create(self):\n        test_path = Path(self.mounttmp, 'test_create')\n        with self.assertRaises(OSError) as exc_check:\n            with test_path.open('w'):\n                pass\n        self.assertEqual(exc_check.exception.errno, errno.EPERM)\n\n    def test_mkdir(self):\n        test_path = Path(self.mounttmp, 'test_mkdir')\n        with self.assertRaises(OSError) as exc_check:\n            test_path.mkdir()\n        self.assertEqual(exc_check.exception.errno, errno.EPERM)\n\n    def test_rename(self):\n        src_name = self.preset_dir or self.preset_file\n        if src_name is None:\n            return\n        test_src = Path(self.mounttmp, src_name)\n        test_dst = test_src.with_name('test_dst')\n        with self.assertRaises(OSError) as exc_check:\n            test_src.rename(test_dst)\n        self.assertEqual(exc_check.exception.errno, errno.EPERM)\n\n    def test_rmdir(self):\n        if self.preset_dir is None:\n            return\n        test_path = Path(self.mounttmp, self.preset_dir)\n        with self.assertRaises(OSError) as exc_check:\n            test_path.rmdir()\n        self.assertEqual(exc_check.exception.errno, errno.EPERM)\n\n    def test_unlink(self):\n        if self.preset_file is None:\n            return\n        test_path = Path(self.mounttmp, self.preset_file)\n        with self.assertRaises(OSError) as exc_check:\n            test_path.unlink()\n        self.assertEqual(exc_check.exception.errno, errno.EPERM)\n\n\nclass DockerRaceTest(MountTestBase):\n    \"\"\"Test race condition when docker mount point is in a collection.\n\n    Before the fix, this test would occasionally fail with either (a) empty\n    stdout, as if test.sh was an empty shell script, or (b) a docker daemon\n    error like this on stderr:\n\n    docker: Error response from daemon: failed to create task for\n    container: failed to create shim task: OCI runtime create failed:\n    runc create failed: unable to start container process: error\n    during container init: error mounting \"/tmp/tmphd8ezs8s\" to rootfs\n    at \"/mnt/test.sh\": mount src=/tmp/tmphd8ezs8s, dst=/mnt/test.sh,\n    dstFd=/proc/thread-self/fd/8, flags=0x5000: no such file or\n    directory: unknown\n\n    See #23136\n    \"\"\"\n\n    def runTest(self):\n        self.make_mount(fuse.TmpCollectionDirectory, fuse_options=[\"allow_other\"])\n        os.chmod(self.mounttmp, 0o755)\n        with tempfile.NamedTemporaryFile(suffix='.sh') as scriptfile:\n            scriptfile.write(b\"#!/bin/sh\\necho OK\\n\")\n            scriptfile.flush()\n            os.chmod(scriptfile.name, 0o755)\n            for _ in range(10):\n                dockerrun = subprocess.run(\n                    [\"docker\", \"run\",\n                     \"--rm\",\n                     \"--workdir\", \"/mnt\",\n                     \"--mount\", f\"type=bind,dst=/mnt,src={self.mounttmp}\",\n                     \"--mount\", f\"type=bind,dst=/mnt/test.sh,src={scriptfile.name}\",\n                     \"busybox:uclibc\", \"sh\", \"test.sh\"],\n                    stdout=subprocess.PIPE,\n                    stderr=2)\n                self.assertEqual(dockerrun.returncode, 0)\n                self.assertEqual(dockerrun.stdout, b\"OK\\n\")\n                os.unlink(os.path.join(self.mounttmp, \"test.sh\"))\n"
  },
  {
    "path": "services/fuse/tests/test_mount_filters.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport collections\nimport itertools\nimport json\nimport re\nimport unittest\n\nfrom pathlib import Path\n\nfrom parameterized import parameterized\n\nfrom arvados_fuse import fusedir\n\nfrom .integration_test import IntegrationTest\nfrom .mount_test_base import MountTestBase\nfrom .run_test_server import fixture\n\n_COLLECTIONS = fixture('collections')\n_GROUPS = fixture('groups')\n_LINKS = fixture('links')\n_USERS = fixture('users')\n\nclass DirectoryFiltersTestCase(MountTestBase):\n    DEFAULT_ROOT_KWARGS = {\n        'enable_write': False,\n        'filters': [\n            ['collections.name', 'like', 'zzzzz-4zz18-%'],\n            # This matches both \"A Project\" (which we use as the test root)\n            # and \"A Subproject\" (which we assert is found under it).\n            ['groups.name', 'like', 'A %roject'],\n        ],\n    }\n    EXPECTED_PATHS = frozenset([\n        _COLLECTIONS['foo_collection_in_aproject']['name'],\n        _GROUPS['asubproject']['name'],\n    ])\n    CHECKED_PATHS = EXPECTED_PATHS.union([\n        _COLLECTIONS['collection_to_move_around_in_aproject']['name'],\n        _GROUPS['subproject_in_active_user_home_project_to_test_unique_key_violation']['name'],\n    ])\n\n    @parameterized.expand([\n        (fusedir.MagicDirectory, {}, _GROUPS['aproject']['uuid']),\n        (fusedir.ProjectDirectory, {'project_object': _GROUPS['aproject']}, '.'),\n        (fusedir.SharedDirectory, {'exclude': None}, Path(\n            '{first_name} {last_name}'.format_map(_USERS['active']),\n            _GROUPS['aproject']['name'],\n        )),\n    ])\n    def test_filtered_path_exists(self, root_class, root_kwargs, subdir):\n        root_kwargs = collections.ChainMap(root_kwargs, self.DEFAULT_ROOT_KWARGS)\n        self.make_mount(root_class, **root_kwargs)\n        dir_path = Path(self.mounttmp, subdir)\n        actual = frozenset(\n            basename\n            for basename in self.CHECKED_PATHS\n            if (dir_path / basename).exists()\n        )\n        self.assertEqual(\n            actual,\n            self.EXPECTED_PATHS,\n            \"mount existence checks did not match expected results\",\n        )\n\n    @parameterized.expand([\n        (fusedir.MagicDirectory, {}, _GROUPS['aproject']['uuid']),\n        (fusedir.ProjectDirectory, {'project_object': _GROUPS['aproject']}, '.'),\n        (fusedir.SharedDirectory, {'exclude': None}, Path(\n            '{first_name} {last_name}'.format_map(_USERS['active']),\n            _GROUPS['aproject']['name'],\n        )),\n    ])\n    def test_filtered_path_listing(self, root_class, root_kwargs, subdir):\n        root_kwargs = collections.ChainMap(root_kwargs, self.DEFAULT_ROOT_KWARGS)\n        self.make_mount(root_class, **root_kwargs)\n        actual = frozenset(path.name for path in Path(self.mounttmp, subdir).iterdir())\n        self.assertEqual(\n            actual & self.EXPECTED_PATHS,\n            self.EXPECTED_PATHS,\n            \"mount listing did not include minimum matches\",\n        )\n        extra = frozenset(\n            name\n            for name in actual\n            if not (name.startswith('zzzzz-4zz18-') or name.endswith('roject'))\n        )\n        self.assertFalse(\n            extra,\n            \"mount listing included results outside filters\",\n        )\n\n\nclass TagFiltersTestCase(MountTestBase):\n    COLL_UUID = _COLLECTIONS['foo_collection_in_aproject']['uuid']\n    TAG_NAME = _LINKS['foo_collection_tag']['name']\n\n    @parameterized.expand([\n        '=',\n        '!=',\n    ])\n    def test_tag_directory_filters(self, op):\n        self.make_mount(\n            fusedir.TagDirectory,\n            enable_write=False,\n            filters=[\n                ['links.head_uuid', op, self.COLL_UUID],\n            ],\n            tag=self.TAG_NAME,\n        )\n        checked_path = Path(self.mounttmp, self.COLL_UUID)\n        self.assertEqual(checked_path.exists(), op == '=')\n\n    @parameterized.expand(itertools.product(\n        ['in', 'not in'],\n        ['=', '!='],\n    ))\n    def test_tags_directory_filters(self, coll_op, link_op):\n        self.make_mount(\n            fusedir.TagsDirectory,\n            enable_write=False,\n            filters=[\n                ['links.head_uuid', coll_op, [self.COLL_UUID]],\n                ['links.name', link_op, self.TAG_NAME],\n            ],\n        )\n        if link_op == '!=':\n            filtered_path = Path(self.mounttmp, self.TAG_NAME)\n        elif coll_op == 'not in':\n            # As of 2024-02-09, foo tag only applies to the single collection.\n            # If you filter it out via head_uuid, then it disappears completely\n            # from the TagsDirectory. Hence we set that tag directory as\n            # filtered_path. If any of this changes in the future,\n            # it would be fine to append self.COLL_UUID to filtered_path here.\n            filtered_path = Path(self.mounttmp, self.TAG_NAME)\n        else:\n            filtered_path = Path(self.mounttmp, self.TAG_NAME, self.COLL_UUID, 'foo', 'nonexistent')\n        expect_path = filtered_path.parent\n        self.assertTrue(\n            expect_path.exists(),\n            f\"path not found but should exist: {expect_path}\",\n        )\n        self.assertFalse(\n            filtered_path.exists(),\n            f\"path was found but should be filtered out: {filtered_path}\",\n        )\n\n\nclass FiltersIntegrationTest(IntegrationTest):\n    COLLECTIONS_BY_PROP = {\n        coll['properties']['MainFile']: coll\n        for coll in _COLLECTIONS.values()\n        if coll['owner_uuid'] == _GROUPS['fuse_filters_test_project']['uuid']\n    }\n    PROP_VALUES = list(COLLECTIONS_BY_PROP)\n\n    for test_n, query in enumerate(['foo', 'ba?']):\n        @IntegrationTest.mount([\n            '--filters', json.dumps([\n                ['collections.properties.MainFile', 'like', query],\n            ]),\n            '--mount-by-pdh', 'by_pdh',\n            '--mount-by-id', 'by_id',\n            '--mount-home', 'home',\n        ])\n        def _test_func(self, query=query):\n            pdh_path = Path(self.mnt, 'by_pdh')\n            id_path = Path(self.mnt, 'by_id')\n            home_path = Path(self.mnt, 'home')\n            query_re = re.compile(query.replace('?', '.'))\n            for prop_val, coll in self.COLLECTIONS_BY_PROP.items():\n                should_exist = query_re.fullmatch(prop_val) is not None\n                for path in [\n                        pdh_path / coll['portable_data_hash'],\n                        id_path / coll['portable_data_hash'],\n                        id_path / coll['uuid'],\n                        home_path / coll['name'],\n                ]:\n                    self.assertEqual(\n                        path.exists(),\n                        should_exist,\n                        f\"{path} from MainFile={prop_val} exists!={should_exist}\",\n                    )\n        exec(f\"test_collection_properties_filters_{test_n} = _test_func\")\n\n    for test_n, mount_opts in enumerate([\n            ['--home'],\n            ['--project', _GROUPS['aproject']['uuid']],\n    ]):\n        @IntegrationTest.mount([\n            '--filters', json.dumps([\n                ['collections.name', 'like', 'zzzzz-4zz18-%'],\n                ['groups.name', 'like', 'A %roject'],\n            ]),\n            *mount_opts,\n        ])\n        def _test_func(self, mount_opts=mount_opts):\n            root_path = Path(self.mnt)\n            root_depth = len(root_path.parts)\n            max_depth = 0\n            name_re = re.compile(r'(zzzzz-4zz18-.*|A .*roject)')\n            dir_queue = [root_path]\n            while dir_queue:\n                root_path = dir_queue.pop()\n                max_depth = max(max_depth, len(root_path.parts))\n                for child in root_path.iterdir():\n                    if not child.is_dir():\n                        continue\n                    match = name_re.fullmatch(child.name)\n                    self.assertIsNotNone(\n                        match,\n                        \"found directory with name that should've been filtered\",\n                    )\n                    if not match.group(1).startswith('zzzzz-4zz18-'):\n                        dir_queue.append(child)\n            self.assertGreaterEqual(\n                max_depth,\n                root_depth + (2 if mount_opts[0] == '--home' else 1),\n                \"test descended fewer subdirectories than expected\",\n            )\n        exec(f\"test_multiple_name_filters_{test_n} = _test_func\")\n"
  },
  {
    "path": "services/fuse/tests/test_mount_type.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport logging\nimport subprocess\n\nfrom .integration_test import IntegrationTest\n\nlogger = logging.getLogger('arvados.arv-mount')\n\n\nclass MountTypeTest(IntegrationTest):\n    @IntegrationTest.mount(argv=[\"--subtype=arv-mount-test\"])\n    def test_mount_type(self):\n        self.pool_test(self.mnt)\n\n    @staticmethod\n    def _test_mount_type(self, mnt):\n        self.assertEqual([\"fuse.arv-mount-test\"], [\n            toks[4]\n            for toks in [\n                line.split(' ')\n                for line in subprocess.check_output(\"mount\").decode().split(\"\\n\")\n            ]\n            if len(toks) > 4 and toks[2] == mnt\n        ])\n"
  },
  {
    "path": "services/fuse/tests/test_retry.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados\nimport arvados_fuse.command\nimport json\nimport os\nimport pycurl\nimport queue\nimport tempfile\nimport unittest\n\nfrom unittest import mock\n\nfrom . import run_test_server\nfrom .integration_test import IntegrationTest\n\nclass KeepClientRetry(unittest.TestCase):\n    origKeepClient = arvados.keep.KeepClient\n\n    def setUp(self):\n        self.mnt = tempfile.mkdtemp()\n        run_test_server.authorize_with('active')\n\n    def tearDown(self):\n        os.rmdir(self.mnt)\n\n    @mock.patch('arvados_fuse.arvados.keep.KeepClient')\n    def _test_retry(self, num_retries, argv, kc):\n        kc.side_effect = lambda *args, **kw: self.origKeepClient(*args, **kw)\n        with arvados_fuse.command.Mount(\n                arvados_fuse.command.ArgumentParser().parse_args(\n                    argv+[self.mnt])):\n            pass\n        self.assertEqual(num_retries, kc.call_args[1].get('num_retries'))\n\n    def test_default_retry_10(self):\n        self._test_retry(10, [])\n\n    def test_retry_2(self):\n        self._test_retry(2, ['--retries=2'])\n\n    def test_no_retry(self):\n        self._test_retry(0, ['--retries=0'])\n\nclass RetryPUT(IntegrationTest):\n    @mock.patch('time.sleep')\n    @IntegrationTest.mount(argv=['--read-write', '--mount-tmp=zzz'])\n    def test_retry_write(self, sleep):\n        mockedCurl = mock.Mock(spec=pycurl.Curl(), wraps=pycurl.Curl())\n        mockedCurl.perform.side_effect = Exception('mock error (ok)')\n        q = queue.Queue()\n        q.put(mockedCurl)\n        q.put(pycurl.Curl())\n        q.put(pycurl.Curl())\n        with mock.patch('arvados.keep.KeepClient._KeepService._get_user_agent', side_effect=q.get_nowait):\n            self.pool_test(os.path.join(self.mnt, 'zzz'))\n            self.assertTrue(mockedCurl.perform.called)\n    @staticmethod\n    def _test_retry_write(self, tmp):\n        with open(os.path.join(tmp, 'foo'), 'w') as f:\n            f.write('foo')\n        json.load(open(os.path.join(tmp, '.arvados#collection')))\n"
  },
  {
    "path": "services/fuse/tests/test_tmp_collection.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados\nimport arvados_fuse\nimport arvados_fuse.command\nimport json\nimport logging\nimport os\nimport tempfile\nimport unittest\n\nfrom .integration_test import IntegrationTest\nfrom .mount_test_base import MountTestBase\n\nlogger = logging.getLogger('arvados.arv-mount')\n\nclass TmpCollectionArgsTest(unittest.TestCase):\n    def setUp(self):\n        self.tmpdir = tempfile.mkdtemp()\n\n    def tearDown(self):\n        os.rmdir(self.tmpdir)\n\n    def test_tmp_only(self):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--mount-tmp', 'tmp1',\n            '--mount-tmp', 'tmp2',\n            self.tmpdir,\n        ])\n        self.assertIn(args.mode, [None, 'custom'])\n        self.assertEqual(['tmp1', 'tmp2'], args.mount_tmp)\n        for mtype in ['home', 'shared', 'by_id', 'by_pdh', 'by_tag']:\n            self.assertEqual([], getattr(args, 'mount_'+mtype))\n\n    def test_tmp_and_home(self):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            '--mount-tmp', 'test_tmp',\n            '--mount-home', 'test_home',\n            self.tmpdir,\n        ])\n        self.assertIn(args.mode, [None, 'custom'])\n        self.assertEqual(['test_tmp'], args.mount_tmp)\n        self.assertEqual(['test_home'], args.mount_home)\n\n    def test_no_tmp(self):\n        args = arvados_fuse.command.ArgumentParser().parse_args([\n            self.tmpdir,\n        ])\n        self.assertEqual([], args.mount_tmp)\n\n\ndef current_manifest(tmpdir):\n    with open(os.path.join(tmpdir, '.arvados#collection')) as tmp:\n        return json.load(tmp)['manifest_text']\n\ndef storage_classes_desired(tmpdir):\n    with open(os.path.join(tmpdir, '.arvados#collection')) as tmp:\n        return json.load(tmp)['storage_classes_desired']\n\nclass TmpCollectionTest(IntegrationTest):\n    mnt_args = [\n        '--read-write',\n        '--mount-tmp', 'zzz',\n    ]\n\n    @IntegrationTest.mount(argv=mnt_args+['--storage-classes', 'foo, bar'])\n    def test_storage_classes(self):\n        self.pool_test(os.path.join(self.mnt, 'zzz'))\n    @staticmethod\n    def _test_storage_classes(self, zzz):\n        self.assertEqual(storage_classes_desired(zzz), ['foo', 'bar'])\n\n    @IntegrationTest.mount(argv=mnt_args+['--mount-tmp', 'yyy'])\n    def test_two_tmp(self):\n        self.pool_test(os.path.join(self.mnt, 'zzz'),\n                       os.path.join(self.mnt, 'yyy'))\n    @staticmethod\n    def _test_two_tmp(self, zzz, yyy):\n        self.assertEqual(current_manifest(zzz), \"\")\n        self.assertEqual(current_manifest(yyy), \"\")\n        with open(os.path.join(zzz, 'foo'), 'w') as f:\n            f.write('foo')\n        self.assertNotEqual(current_manifest(zzz), \"\")\n        self.assertEqual(current_manifest(yyy), \"\")\n        os.unlink(os.path.join(zzz, 'foo'))\n        with open(os.path.join(yyy, 'bar'), 'w') as f:\n            f.write('bar')\n        self.assertEqual(current_manifest(zzz), \"\")\n        self.assertNotEqual(current_manifest(yyy), \"\")\n\n    @IntegrationTest.mount(argv=mnt_args)\n    def test_tmp_empty(self):\n        self.pool_test(os.path.join(self.mnt, 'zzz'))\n    @staticmethod\n    def _test_tmp_empty(self, tmpdir):\n        self.assertEqual(current_manifest(tmpdir), \"\")\n\n    @IntegrationTest.mount(argv=mnt_args)\n    def test_tmp_onefile(self):\n        self.pool_test(os.path.join(self.mnt, 'zzz'))\n    @staticmethod\n    def _test_tmp_onefile(self, tmpdir):\n        with open(os.path.join(tmpdir, 'foo'), 'w') as f:\n            f.write('foo')\n        self.assertRegex(\n            current_manifest(tmpdir),\n            r'^\\. acbd18db4cc2f85cedef654fccc4a4d8\\+3(\\+\\S+)? 0:3:foo\\n$')\n\n    @IntegrationTest.mount(argv=mnt_args)\n    def test_tmp_snapshots(self):\n        self.pool_test(os.path.join(self.mnt, 'zzz'))\n    @staticmethod\n    def _test_tmp_snapshots(self, tmpdir):\n        ops = [\n            ('foo', 'bar',\n             r'^\\. 37b51d194a7513e45b56f6524f2d51f2\\+3(\\+\\S+)? 0:3:foo\\n$'),\n            ('foo', 'foo',\n             r'^\\. acbd18db4cc2f85cedef654fccc4a4d8\\+3(\\+\\S+)? 0:3:foo\\n$'),\n            ('bar', 'bar',\n             r'^\\. 37b51d194a7513e45b56f6524f2d51f2\\+3(\\+\\S+)? acbd18db4cc2f85cedef654fccc4a4d8\\+3(\\+\\S+)? 0:3:bar 3:3:foo\\n$'),\n            ('foo', None,\n             r'^\\. 37b51d194a7513e45b56f6524f2d51f2\\+3(\\+\\S+)? 0:3:bar\\n$'),\n            ('bar', None,\n             r'^$'),\n        ]\n        for _ in range(10):\n            for fn, content, expect in ops:\n                path = os.path.join(tmpdir, fn)\n                if content is None:\n                    os.unlink(path)\n                else:\n                    with open(path, 'w') as f:\n                        f.write(content)\n                self.assertRegex(current_manifest(tmpdir), expect)\n\n    @IntegrationTest.mount(argv=mnt_args)\n    def test_tmp_rewrite(self):\n        self.pool_test(os.path.join(self.mnt, 'zzz'))\n    @staticmethod\n    def _test_tmp_rewrite(self, tmpdir):\n        with open(os.path.join(tmpdir, \"b1\"), 'w') as f:\n            f.write(\"b1\")\n        with open(os.path.join(tmpdir, \"b2\"), 'w') as f:\n            f.write(\"b2\")\n        with open(os.path.join(tmpdir, \"b1\"), 'w') as f:\n            f.write(\"1b\")\n        self.assertRegex(current_manifest(tmpdir), r'^\\. ed4f3f67c70b02b29c50ce1ea26666bd\\+4(\\+\\S+)? 0:2:b1 2:2:b2\\n$')\n"
  },
  {
    "path": "services/fuse/tests/test_token_expiry.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport apiclient\nimport arvados\nimport arvados_fuse\nimport logging\nimport multiprocessing\nimport os\nimport re\nimport sys\nimport time\nimport unittest\n\nfrom unittest import mock\n\nfrom .integration_test import IntegrationTest\n\nlogger = logging.getLogger('arvados.arv-mount')\n\nclass TokenExpiryTest(IntegrationTest):\n    def setUp(self):\n        super(TokenExpiryTest, self).setUp()\n        self.test_start_time = time.time()\n        self.time_now = int(time.time())+1\n\n    def fake_time(self):\n        self.time_now += 1\n        return self.time_now\n\n    orig_open = arvados_fuse.Operations.open\n    def fake_open(self, operations, *args, **kwargs):\n        self.time_now += 86400*13\n        logger.debug('opening file at time=%f', self.time_now)\n        return TokenExpiryTest.orig_open(operations, *args, **kwargs)\n\n    @mock.patch.object(arvados_fuse.Operations, 'open', autospec=True)\n    @mock.patch.object(time, 'time', return_value=0)\n    @mock.patch('arvados.keep.KeepClient.get')\n    @IntegrationTest.mount(argv=['--mount-by-id', 'zzz'])\n    def test_refresh_old_manifest(self, mocked_get, mocked_time, mocked_open):\n        # This test (and associated behavior) is still not strong\n        # enough. We should ensure old tokens are never used even if\n        # blobSignatureTtl seconds elapse between open() and\n        # read(). See https://dev.arvados.org/issues/10008\n\n        mocked_get.return_value = b'fake data'\n        mocked_time.side_effect = self.fake_time\n        mocked_open.side_effect = self.fake_open\n\n        with mock.patch.object(self.mount.api, 'collections', wraps=self.mount.api.collections) as mocked_collections:\n            mocked_collections.return_value = mocked_collections()\n            with mock.patch.object(self.mount.api.collections(), 'get', wraps=self.mount.api.collections().get) as mocked_get:\n                self.pool_test(os.path.join(self.mnt, 'zzz'))\n\n        # open() several times here to make sure we don't reach our\n        # quota of mocked_get.call_count dishonestly (e.g., the first\n        # open causes 5 mocked_get, and the rest cause none).\n        self.assertEqual(8, mocked_open.call_count)\n        self.assertGreaterEqual(\n            mocked_get.call_count, 8,\n            'Not enough calls to collections().get(): expected 8, got {!r}'.format(\n                mocked_get.mock_calls))\n\n    @staticmethod\n    def _test_refresh_old_manifest(self, zzz):\n        uuid = 'zzzzz-4zz18-logcollection02'\n        fnm = 'crunch-run.txt'\n        os.listdir(os.path.join(zzz, uuid))\n        for _ in range(8):\n            with open(os.path.join(zzz, uuid, fnm)) as f:\n                f.read()\n"
  },
  {
    "path": "services/fuse/tests/test_unmount.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados_fuse.unmount\nimport os\nimport subprocess\nimport shutil\nimport tempfile\nimport time\nimport unittest\n\nfrom .integration_test import IntegrationTest\n\nclass UnmountTest(IntegrationTest):\n    def setUp(self):\n        super(UnmountTest, self).setUp()\n        self.tmp = self.mnt\n        self.to_delete = []\n\n    def tearDown(self):\n        for d in self.to_delete:\n            os.rmdir(d)\n        super(UnmountTest, self).tearDown()\n\n    def test_replace(self):\n        subprocess.check_call(\n            ['./bin/arv-mount', '--subtype', 'test', '--replace',\n             self.mnt])\n        subprocess.check_call(\n            ['./bin/arv-mount', '--subtype', 'test', '--replace',\n             '--unmount-timeout', '60',\n             self.mnt])\n        subprocess.check_call(\n            ['./bin/arv-mount', '--subtype', 'test', '--replace',\n             '--unmount-timeout', '60',\n             self.mnt,\n             '--exec', 'true'])\n        for m in subprocess.check_output(['mount']).splitlines():\n            expected = bytes(' ' + self.mnt + ' ', encoding='utf-8')\n            self.assertNotIn(expected, m)\n\n    def _mounted(self, mounts):\n        all_mounts = subprocess.check_output(['mount'])\n        return [m for m in mounts\n                if bytes(' ' + m + ' ', encoding='utf-8') in all_mounts]\n\n    def _wait_for_mounts(self, mounts):\n        deadline = time.time() + 10\n        while self._mounted(mounts) != mounts:\n            time.sleep(0.1)\n            self.assertLess(time.time(), deadline)\n\n    def test_unmount_subtype(self):\n        mounts = []\n        for d in ['foo', 'bar']:\n            mnt = self.tmp+'/'+d\n            os.mkdir(mnt)\n            self.to_delete.insert(0, mnt)\n            mounts.append(mnt)\n            subprocess.check_call(\n                ['./bin/arv-mount', '--subtype', d, mnt])\n\n        self._wait_for_mounts(mounts)\n        self.assertEqual(mounts, self._mounted(mounts))\n        subprocess.call(['./bin/arv-mount', '--subtype', 'baz', '--unmount-all', self.tmp])\n        self.assertEqual(mounts, self._mounted(mounts))\n        subprocess.call(['./bin/arv-mount', '--subtype', 'bar', '--unmount', mounts[0]])\n        self.assertEqual(mounts, self._mounted(mounts))\n        subprocess.call(['./bin/arv-mount', '--subtype', '', '--unmount', self.tmp])\n        self.assertEqual(mounts, self._mounted(mounts))\n        subprocess.check_call(['./bin/arv-mount', '--subtype', 'foo', '--unmount', mounts[0]])\n        self.assertEqual(mounts[1:], self._mounted(mounts))\n        subprocess.check_call(['./bin/arv-mount', '--subtype', '', '--unmount-all', mounts[0]])\n        self.assertEqual(mounts[1:], self._mounted(mounts))\n        subprocess.check_call(['./bin/arv-mount', '--subtype', 'bar', '--unmount-all', self.tmp])\n        self.assertEqual([], self._mounted(mounts))\n\n    def test_unmount_children(self):\n        for d in ['foo', 'foo/bar', 'bar']:\n            mnt = self.tmp+'/'+d\n            os.mkdir(mnt)\n            self.to_delete.insert(0, mnt)\n        mounts = []\n        for d in ['bar', 'foo/bar']:\n            mnt = self.tmp+'/'+d\n            mounts.append(mnt)\n            subprocess.check_call(\n                ['./bin/arv-mount', '--subtype', 'test', mnt])\n\n        self._wait_for_mounts(mounts)\n        self.assertEqual(mounts, self._mounted(mounts))\n        subprocess.check_call(['./bin/arv-mount', '--unmount', self.tmp])\n        self.assertEqual(mounts, self._mounted(mounts))\n        subprocess.check_call(['./bin/arv-mount', '--unmount-all', self.tmp])\n        self.assertEqual([], self._mounted(mounts))\n\n\n\nclass SaferRealpath(unittest.TestCase):\n    def setUp(self):\n        self.tmp = tempfile.mkdtemp()\n\n    def tearDown(self):\n        shutil.rmtree(self.tmp)\n\n    def test_safer_realpath(self):\n        os.mkdir(self.tmp+\"/dir\")\n        os.mkdir(self.tmp+\"/dir/dir2\")\n        os.symlink(\"missing\", self.tmp+\"/relative-missing\")\n        os.symlink(\"dir\", self.tmp+\"/./relative-dir\")\n        os.symlink(\"relative-dir\", self.tmp+\"/relative-indirect\")\n        os.symlink(self.tmp+\"/dir\", self.tmp+\"/absolute-dir\")\n        os.symlink(\"./dir/../loop\", self.tmp+\"/loop\")\n        os.symlink(\".\", self.tmp+\"/dir/self\")\n        os.symlink(\"..\", self.tmp+\"/dir/dir2/parent\")\n        os.symlink(\"../dir3\", self.tmp+\"/dir/dir2/sibling\")\n        os.symlink(\"../missing/../danger\", self.tmp+\"/dir/tricky\")\n        os.symlink(\"/proc/1/fd/12345\", self.tmp+\"/eperm\")\n        for (inpath, outpath, ok) in [\n                (\"dir/self\", \"dir\", True),\n                (\"dir/dir2/parent\", \"dir\", True),\n                (\"dir/dir2/sibling\", \"dir/dir3\", False),\n                (\"dir\", \"dir\", True),\n                (\"relative-dir\", \"dir\", True),\n                (\"relative-missing\", \"missing\", False),\n                (\"relative-indirect\", \"dir\", True),\n                (\"absolute-dir\", \"dir\", True),\n                (\"loop\", \"loop\", False),\n                # \"missing\" doesn't exist, so \"missing/..\" isn't our\n                # tmpdir; it's important not to contract this to just\n                # \"danger\".\n                (\"dir/tricky\", \"missing/../danger\", False),\n                (\"eperm\", \"/proc/1/fd/12345\", False),\n        ]:\n            if not outpath.startswith('/'):\n                outpath = self.tmp + '/' + outpath\n            self.assertEqual((outpath, ok), arvados_fuse.unmount.safer_realpath(self.tmp+\"/\"+inpath))\n"
  },
  {
    "path": "services/keep-balance/balance.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/md5\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/dblock\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"github.com/jmoiron/sqlx\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// Balancer compares the contents of keepstore servers with the\n// collections stored in Arvados, and issues pull/trash requests\n// needed to get (closer to) the optimal data layout.\n//\n// In the optimal data layout: every data block referenced by a\n// collection is replicated at least as many times as desired by the\n// collection; there are no unreferenced data blocks older than\n// BlobSignatureTTL; and all N existing replicas of a given data block\n// are in the N best positions in rendezvous probe order.\ntype Balancer struct {\n\tDB      *sqlx.DB\n\tLogger  logrus.FieldLogger\n\tDumper  logrus.FieldLogger\n\tMetrics *metrics\n\n\tChunkPrefix    string\n\tLostBlocksFile string\n\n\t*BlockStateMap\n\tKeepServices       map[string]*KeepService\n\tDefaultReplication int\n\tMinMtime           int64\n\n\tclasses       []string\n\tmounts        int\n\tmountsByClass map[string]map[*KeepMount]bool\n\tcollScanned   int64\n\tserviceRoots  map[string]string\n\terrors        []error\n\tstats         balancerStats\n\tmutex         sync.Mutex\n\tlostBlocks    io.Writer\n}\n\n// Run performs a balance operation using the given config and\n// runOptions, and returns RunOptions suitable for passing to a\n// subsequent balance operation.\n//\n// Run should only be called once on a given Balancer object.\nfunc (bal *Balancer) Run(ctx context.Context, client *arvados.Client, cluster *arvados.Cluster, runOptions RunOptions) (nextRunOptions RunOptions, err error) {\n\tnextRunOptions = runOptions\n\n\tbal.logf(\"acquiring active lock\")\n\tif !dblock.KeepBalanceActive.Lock(ctx, func(context.Context) (*sqlx.DB, error) { return bal.DB, nil }) {\n\t\t// context canceled\n\t\treturn\n\t}\n\tdefer dblock.KeepBalanceActive.Unlock()\n\n\tdefer bal.time(\"sweep\", \"wall clock time to run one full sweep\")()\n\n\tctx, cancel := context.WithDeadline(ctx, time.Now().Add(cluster.Collections.BalanceTimeout.Duration()))\n\tdefer cancel()\n\n\tgo bal.reportMemorySize(ctx)\n\n\tvar lbFile *os.File\n\tif bal.LostBlocksFile != \"\" {\n\t\ttmpfn := bal.LostBlocksFile + \".tmp\"\n\t\tlbFile, err = os.OpenFile(tmpfn, os.O_CREATE|os.O_WRONLY, 0777)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer lbFile.Close()\n\t\terr = syscall.Flock(int(lbFile.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer func() {\n\t\t\t// Remove the tempfile only if we didn't get\n\t\t\t// as far as successfully renaming it.\n\t\t\tif lbFile != nil {\n\t\t\t\tos.Remove(tmpfn)\n\t\t\t}\n\t\t}()\n\t\tbal.lostBlocks = lbFile\n\t} else {\n\t\tbal.lostBlocks = ioutil.Discard\n\t}\n\n\terr = bal.DiscoverKeepServices(client)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, srv := range bal.KeepServices {\n\t\terr = srv.discoverMounts(client)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tbal.cleanupMounts()\n\n\tif err = bal.CheckSanityEarly(client); err != nil {\n\t\treturn\n\t}\n\n\t// On a big site, indexing and sending trash/pull lists can\n\t// take much longer than the usual 5 minute client\n\t// timeout. From here on, we rely on the context deadline\n\t// instead, aborting the entire operation if any part takes\n\t// too long.\n\tclient.Timeout = 0\n\n\trs := bal.rendezvousState()\n\tif cluster.Collections.BalanceTrashLimit > 0 && rs != runOptions.SafeRendezvousState {\n\t\tif runOptions.SafeRendezvousState != \"\" {\n\t\t\tbal.logf(\"notice: KeepServices list has changed since last run\")\n\t\t}\n\t\tbal.logf(\"clearing existing trash lists, in case the new rendezvous order differs from previous run\")\n\t\tif err = bal.ClearTrashLists(ctx, client); err != nil {\n\t\t\treturn\n\t\t}\n\t\t// The current rendezvous state becomes \"safe\" (i.e.,\n\t\t// OK to compute changes for that state without\n\t\t// clearing existing trash lists) only now, after we\n\t\t// succeed in clearing existing trash lists.\n\t\tnextRunOptions.SafeRendezvousState = rs\n\t}\n\n\tif err = bal.GetCurrentState(ctx, client, cluster.Collections.BalanceCollectionBatch, cluster.Collections.BalanceCollectionBuffers); err != nil {\n\t\treturn\n\t}\n\tbal.setupLookupTables(cluster)\n\tbal.ComputeChangeSets()\n\tbal.PrintStatistics()\n\tif err = bal.CheckSanityLate(); err != nil {\n\t\treturn\n\t}\n\tif lbFile != nil {\n\t\terr = lbFile.Sync()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\terr = os.Rename(bal.LostBlocksFile+\".tmp\", bal.LostBlocksFile)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tlbFile = nil\n\t}\n\tif cluster.Collections.BalancePullLimit > 0 {\n\t\terr = bal.CommitPulls(ctx, client)\n\t\tif err != nil {\n\t\t\t// Skip trash if we can't pull. (Too cautious?)\n\t\t\treturn\n\t\t}\n\t}\n\tif cluster.Collections.BalanceTrashLimit > 0 {\n\t\terr = bal.CommitTrash(ctx, client)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif runOptions.CommitConfirmedFields {\n\t\terr = bal.updateCollections(ctx, client, cluster)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n// SetKeepServices sets the list of KeepServices to operate on.\nfunc (bal *Balancer) SetKeepServices(srvList arvados.KeepServiceList) error {\n\tbal.KeepServices = make(map[string]*KeepService)\n\tfor _, srv := range srvList.Items {\n\t\tbal.KeepServices[srv.UUID] = &KeepService{\n\t\t\tKeepService: srv,\n\t\t\tChangeSet:   &ChangeSet{},\n\t\t}\n\t}\n\treturn nil\n}\n\n// DiscoverKeepServices sets the list of KeepServices by calling the\n// API to get a list of all services, and selecting the ones whose\n// ServiceType is \"disk\"\nfunc (bal *Balancer) DiscoverKeepServices(c *arvados.Client) error {\n\tbal.KeepServices = make(map[string]*KeepService)\n\treturn c.EachKeepService(func(srv arvados.KeepService) error {\n\t\tif srv.ServiceType == \"disk\" {\n\t\t\tbal.KeepServices[srv.UUID] = &KeepService{\n\t\t\t\tKeepService: srv,\n\t\t\t\tChangeSet:   &ChangeSet{},\n\t\t\t}\n\t\t} else {\n\t\t\tbal.logf(\"skipping %v with service type %q\", srv.UUID, srv.ServiceType)\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (bal *Balancer) cleanupMounts() {\n\trwdev := map[string]*KeepService{}\n\tfor _, srv := range bal.KeepServices {\n\t\tfor _, mnt := range srv.mounts {\n\t\t\tif mnt.AllowWrite {\n\t\t\t\trwdev[mnt.UUID] = srv\n\t\t\t}\n\t\t}\n\t}\n\t// Drop the readonly mounts whose device is mounted RW\n\t// elsewhere.\n\tfor _, srv := range bal.KeepServices {\n\t\tvar dedup []*KeepMount\n\t\tfor _, mnt := range srv.mounts {\n\t\t\tif !mnt.AllowWrite && rwdev[mnt.UUID] != nil {\n\t\t\t\tbal.logf(\"skipping srv %s readonly mount %q because same volume is mounted read-write on srv %s\", srv, mnt.UUID, rwdev[mnt.UUID])\n\t\t\t} else {\n\t\t\t\tdedup = append(dedup, mnt)\n\t\t\t}\n\t\t}\n\t\tsrv.mounts = dedup\n\t}\n\tfor _, srv := range bal.KeepServices {\n\t\tfor _, mnt := range srv.mounts {\n\t\t\tif mnt.Replication <= 0 {\n\t\t\t\tlog.Printf(\"%s: mount %s reports replication=%d, using replication=1\", srv, mnt.UUID, mnt.Replication)\n\t\t\t\tmnt.Replication = 1\n\t\t\t}\n\t\t}\n\t}\n}\n\n// CheckSanityEarly checks for configuration and runtime errors that\n// can be detected before GetCurrentState() and ComputeChangeSets()\n// are called.\n//\n// If it returns an error, it is pointless to run GetCurrentState or\n// ComputeChangeSets: after doing so, the statistics would be\n// meaningless and it would be dangerous to run any Commit methods.\nfunc (bal *Balancer) CheckSanityEarly(c *arvados.Client) error {\n\tu, err := c.CurrentUser()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"CurrentUser(): %v\", err)\n\t}\n\tif !u.IsActive || !u.IsAdmin {\n\t\treturn fmt.Errorf(\"current user (%s) is not an active admin user\", u.UUID)\n\t}\n\tfor _, srv := range bal.KeepServices {\n\t\tif srv.ServiceType == \"proxy\" {\n\t\t\treturn fmt.Errorf(\"config error: %s: proxy servers cannot be balanced\", srv)\n\t\t}\n\t}\n\tfor _, c := range bal.ChunkPrefix {\n\t\tif !strings.ContainsRune(\"0123456789abcdef\", c) {\n\t\t\treturn fmt.Errorf(\"invalid char %q in chunk prefix %q: only lowercase hex digits make sense\", string(c), bal.ChunkPrefix)\n\t\t}\n\t}\n\tif len(bal.ChunkPrefix) > 32 {\n\t\treturn fmt.Errorf(\"invalid chunk prefix %q: longer than a block hash\", bal.ChunkPrefix)\n\t}\n\n\tmountProblem := false\n\ttype deviceMount struct {\n\t\tsrv *KeepService\n\t\tmnt *KeepMount\n\t}\n\tdeviceMounted := map[string]deviceMount{} // DeviceID -> mount\n\tfor _, srv := range bal.KeepServices {\n\t\tfor _, mnt := range srv.mounts {\n\t\t\tif first, dup := deviceMounted[mnt.DeviceID]; dup && first.mnt.UUID != mnt.UUID && mnt.DeviceID != \"\" {\n\t\t\t\tbal.logf(\"config error: device %s is mounted with multiple volume UUIDs: %s on %s, and %s on %s\",\n\t\t\t\t\tmnt.DeviceID,\n\t\t\t\t\tfirst.mnt.UUID, first.srv,\n\t\t\t\t\tmnt.UUID, srv)\n\t\t\t\tmountProblem = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdeviceMounted[mnt.DeviceID] = deviceMount{srv, mnt}\n\t\t}\n\t}\n\tif mountProblem {\n\t\treturn errors.New(\"cannot continue with config errors (see above)\")\n\t}\n\n\tvar checkPage arvados.CollectionList\n\tif err = c.RequestAndDecode(&checkPage, \"GET\", \"arvados/v1/collections\", nil, arvados.ResourceListParams{\n\t\tLimit:              new(int),\n\t\tCount:              \"exact\",\n\t\tIncludeTrash:       true,\n\t\tIncludeOldVersions: true,\n\t\tFilters: []arvados.Filter{{\n\t\t\tAttr:     \"modified_at\",\n\t\t\tOperator: \"=\",\n\t\t\tOperand:  nil,\n\t\t}},\n\t}); err != nil {\n\t\treturn err\n\t} else if n := checkPage.ItemsAvailable; n > 0 {\n\t\treturn fmt.Errorf(\"%d collections exist with null modified_at; cannot fetch reliably\", n)\n\t}\n\n\treturn nil\n}\n\n// rendezvousState returns a fingerprint (e.g., a sorted list of\n// UUID+host+port) of the current set of keep services.\nfunc (bal *Balancer) rendezvousState() string {\n\tsrvs := make([]string, 0, len(bal.KeepServices))\n\tfor _, srv := range bal.KeepServices {\n\t\tsrvs = append(srvs, srv.String())\n\t}\n\tsort.Strings(srvs)\n\treturn strings.Join(srvs, \"; \")\n}\n\n// ClearTrashLists sends an empty trash list to each keep\n// service. Calling this before GetCurrentState avoids races.\n//\n// When a block appears in an index, we assume that replica will still\n// exist after we delete other replicas on other servers. However,\n// it's possible that a previous rebalancing operation made different\n// decisions (e.g., servers were added/removed, and rendezvous order\n// changed). In this case, the replica might already be on that\n// server's trash list, and it might be deleted before we send a\n// replacement trash list.\n//\n// We avoid this problem if we clear all trash lists before getting\n// indexes. (We also assume there is only one rebalancing process\n// running at a time.)\nfunc (bal *Balancer) ClearTrashLists(ctx context.Context, c *arvados.Client) error {\n\tfor _, srv := range bal.KeepServices {\n\t\tsrv.ChangeSet = &ChangeSet{}\n\t}\n\treturn bal.CommitTrash(ctx, c)\n}\n\n// GetCurrentState determines the current replication state, and the\n// desired replication level, for every block that is either\n// retrievable or referenced.\n//\n// It determines the current replication state by reading the block index\n// from every known Keep service.\n//\n// It determines the desired replication level by retrieving all\n// collection manifests in the database (API server).\n//\n// It encodes the resulting information in BlockStateMap.\nfunc (bal *Balancer) GetCurrentState(ctx context.Context, c *arvados.Client, pageSize, bufs int) error {\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tdefer bal.time(\"get_state\", \"wall clock time to get current state\")()\n\n\tdd, err := c.DiscoveryDocument()\n\tif err != nil {\n\t\treturn err\n\t}\n\tbal.DefaultReplication = dd.DefaultCollectionReplication\n\tbal.MinMtime = time.Now().UnixNano() - dd.BlobSignatureTTL*1e9\n\n\terrs := make(chan error, 1)\n\twg := sync.WaitGroup{}\n\n\t// When a device is mounted more than once, we will get its\n\t// index only once, and call AddReplicas on all of the mounts.\n\t// equivMount keys are the mounts that will be indexed, and\n\t// each value is a list of mounts to apply the received index\n\t// to.\n\tequivMount := map[*KeepMount][]*KeepMount{}\n\t// deviceMount maps each device ID to the one mount that will\n\t// be indexed for that device.\n\tdeviceMount := map[string]*KeepMount{}\n\tfor _, srv := range bal.KeepServices {\n\t\tfor _, mnt := range srv.mounts {\n\t\t\tequiv := deviceMount[mnt.UUID]\n\t\t\tif equiv == nil {\n\t\t\t\tequiv = mnt\n\t\t\t\tdeviceMount[mnt.UUID] = equiv\n\t\t\t}\n\t\t\tequivMount[equiv] = append(equivMount[equiv], mnt)\n\t\t}\n\t}\n\n\t// Determine max possible replication (i.e., total replication\n\t// of a block stored on every mount)\n\tmaxRepl := 0\n\tfor mnt := range equivMount {\n\t\tmaxRepl += mnt.Replication\n\t}\n\tbal.BlockStateMap = NewBlockStateMap(maxRepl)\n\t// Start one goroutine for each (non-redundant) mount:\n\t// retrieve the index, and add the returned blocks to\n\t// BlockStateMap.\n\tfor _, mounts := range equivMount {\n\t\twg.Add(1)\n\t\tgo func(mounts []*KeepMount) {\n\t\t\tdefer wg.Done()\n\t\t\tbal.logf(\"mount %s: retrieve index from %s\", mounts[0], mounts[0].KeepService)\n\t\t\tidx, err := mounts[0].KeepService.IndexMount(ctx, c, mounts[0].UUID, bal.ChunkPrefix)\n\t\t\tif err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase errs <- fmt.Errorf(\"%s: retrieve index: %v\", mounts[0], err):\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tcancel()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(errs) > 0 {\n\t\t\t\t// Some other goroutine encountered an\n\t\t\t\t// error -- any further effort here\n\t\t\t\t// will be wasted.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, mount := range mounts {\n\t\t\t\tbal.logf(\"%s: add %d entries to map\", mount, len(idx))\n\t\t\t\tbal.BlockStateMap.AddReplicas(mount, idx)\n\t\t\t\tbal.logf(\"%s: added %d entries to map at %dx (%d replicas)\", mount, len(idx), mount.Replication, len(idx)*mount.Replication)\n\t\t\t}\n\t\t\tbal.logf(\"mount %s: index done\", mounts[0])\n\t\t}(mounts)\n\t}\n\n\tcollQ := make(chan arvados.Collection, bufs)\n\n\t// Retrieve all collections from the database and send them to\n\t// collQ.\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\terr = EachCollection(ctx, bal.DB, c,\n\t\t\tfunc(coll arvados.Collection) error {\n\t\t\t\tcollQ <- coll\n\t\t\t\tif len(errs) > 0 {\n\t\t\t\t\t// some other GetCurrentState\n\t\t\t\t\t// error happened: no point\n\t\t\t\t\t// getting any more\n\t\t\t\t\t// collections.\n\t\t\t\t\treturn fmt.Errorf(\"\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, func(done, total int) {\n\t\t\t\tbal.logf(\"collections: %d/%d\", done, total)\n\t\t\t})\n\t\tclose(collQ)\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\tcase errs <- err:\n\t\t\tdefault:\n\t\t\t}\n\t\t\tcancel()\n\t\t}\n\t}()\n\n\t// Parse manifests from collQ and pass the block hashes to\n\t// BlockStateMap to track desired replication.\n\tfor i := 0; i < runtime.NumCPU(); i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor coll := range collQ {\n\t\t\t\tif len(errs) > 0 {\n\t\t\t\t\t// already failing, just drain\n\t\t\t\t\t// the channel\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\terr := bal.addCollection(coll)\n\t\t\t\tif err != nil {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase errs <- err:\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\t\tcancel()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tatomic.AddInt64(&bal.collScanned, 1)\n\t\t\t}\n\t\t}()\n\t}\n\n\twg.Wait()\n\tif len(errs) > 0 {\n\t\treturn <-errs\n\t}\n\treturn nil\n}\n\nfunc (bal *Balancer) addCollection(coll arvados.Collection) error {\n\tblkids, err := coll.SizedDigests()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%v: %v\", coll.UUID, err)\n\t}\n\trepl := bal.DefaultReplication\n\tif coll.ReplicationDesired != nil {\n\t\trepl = *coll.ReplicationDesired\n\t}\n\tif bal.ChunkPrefix != \"\" {\n\t\t// Throw out blocks that don't match the requested\n\t\t// prefix.  (We save a bit of GC work here by\n\t\t// preallocating based on each hex digit in\n\t\t// ChunkPrefix reducing the expected size of the\n\t\t// filtered set by ~16x.)\n\t\tfiltered := make([]arvados.SizedDigest, 0, len(blkids)>>(4*len(bal.ChunkPrefix)-1))\n\t\tfor _, blkid := range blkids {\n\t\t\tif strings.HasPrefix(string(blkid), bal.ChunkPrefix) {\n\t\t\t\tfiltered = append(filtered, blkid)\n\t\t\t}\n\t\t}\n\t\tblkids = filtered\n\t}\n\tbal.Logger.Debugf(\"%v: %d blocks x%d\", coll.UUID, len(blkids), repl)\n\t// Pass pdh to IncreaseDesired only if LostBlocksFile is being\n\t// written -- otherwise it's just a waste of memory.\n\tpdh := \"\"\n\tif bal.LostBlocksFile != \"\" {\n\t\tpdh = coll.PortableDataHash\n\t}\n\tbal.BlockStateMap.IncreaseDesired(pdh, coll.StorageClassesDesired, repl, blkids)\n\treturn nil\n}\n\n// ComputeChangeSets compares, for each known block, the current and\n// desired replication states. If it is possible to get closer to the\n// desired state by copying or deleting blocks, it adds those changes\n// to the relevant KeepServices' ChangeSets.\n//\n// It does not actually apply any of the computed changes.\nfunc (bal *Balancer) ComputeChangeSets() {\n\t// This just calls balanceBlock() once for each block, using a\n\t// pool of worker goroutines.\n\tdefer bal.time(\"changeset_compute\", \"wall clock time to compute changesets\")()\n\n\ttype balanceTask struct {\n\t\tblkid arvados.SizedDigest\n\t\tblk   *BlockState\n\t}\n\tworkers := runtime.GOMAXPROCS(-1)\n\ttodo := make(chan balanceTask, workers)\n\tgo func() {\n\t\tbal.BlockStateMap.Apply(func(blkid arvados.SizedDigest, blk *BlockState) {\n\t\t\ttodo <- balanceTask{\n\t\t\t\tblkid: blkid,\n\t\t\t\tblk:   blk,\n\t\t\t}\n\t\t})\n\t\tclose(todo)\n\t}()\n\tresults := make(chan balanceResult, workers)\n\tgo func() {\n\t\tvar wg sync.WaitGroup\n\t\tfor i := 0; i < workers; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tfor work := range todo {\n\t\t\t\t\tresults <- bal.balanceBlock(work.blkid, work.blk)\n\t\t\t\t}\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t}\n\t\twg.Wait()\n\t\tclose(results)\n\t}()\n\tbal.collectStatistics(results)\n}\n\nfunc (bal *Balancer) setupLookupTables(cluster *arvados.Cluster) {\n\tbal.serviceRoots = make(map[string]string)\n\tbal.classes = defaultClasses\n\tbal.mountsByClass = map[string]map[*KeepMount]bool{\"default\": {}}\n\tbal.mounts = 0\n\tfor _, srv := range bal.KeepServices {\n\t\tbal.serviceRoots[srv.UUID] = srv.UUID\n\t\tfor _, mnt := range srv.mounts {\n\t\t\tbal.mounts++\n\n\t\t\tif srv.ReadOnly {\n\t\t\t\t// All mounts on a read-only service\n\t\t\t\t// are effectively read-only.\n\t\t\t\tmnt.AllowWrite = false\n\t\t\t}\n\n\t\t\tfor class := range mnt.StorageClasses {\n\t\t\t\tif mbc := bal.mountsByClass[class]; mbc == nil {\n\t\t\t\t\tbal.classes = append(bal.classes, class)\n\t\t\t\t\tbal.mountsByClass[class] = map[*KeepMount]bool{mnt: true}\n\t\t\t\t} else {\n\t\t\t\t\tmbc[mnt] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// Consider classes in lexicographic order to avoid flapping\n\t// between balancing runs.  The outcome of the \"prefer a mount\n\t// we're already planning to use for a different storage\n\t// class\" case in balanceBlock depends on the order classes\n\t// are considered.\n\tsort.Strings(bal.classes)\n\n\tfor _, srv := range bal.KeepServices {\n\t\tsrv.ChangeSet = &ChangeSet{\n\t\t\tPullLimit:  cluster.Collections.BalancePullLimit,\n\t\t\tTrashLimit: cluster.Collections.BalanceTrashLimit,\n\t\t}\n\t}\n}\n\nconst (\n\tchangeStay = iota\n\tchangePull\n\tchangeTrash\n\tchangeNone\n)\n\nvar changeName = map[int]string{\n\tchangeStay:  \"stay\",\n\tchangePull:  \"pull\",\n\tchangeTrash: \"trash\",\n\tchangeNone:  \"none\",\n}\n\ntype balancedBlockState struct {\n\tneeded       int\n\tunneeded     int\n\tpulling      int\n\tunachievable bool\n}\n\ntype balanceResult struct {\n\tblk        *BlockState\n\tblkid      arvados.SizedDigest\n\tlost       bool\n\tblockState balancedBlockState\n\tclassState map[string]balancedBlockState\n}\n\ntype slot struct {\n\tmnt  *KeepMount // never nil\n\trepl *Replica   // replica already stored here (or nil)\n\twant bool       // we should pull/leave a replica here\n}\n\n// balanceBlock compares current state to desired state for a single\n// block, and makes the appropriate ChangeSet calls.\nfunc (bal *Balancer) balanceBlock(blkid arvados.SizedDigest, blk *BlockState) balanceResult {\n\tbal.Logger.Debugf(\"balanceBlock: %v %+v\", blkid, blk)\n\n\t// Build a list of all slots (one per mounted volume).\n\tslots := make([]slot, 0, bal.mounts)\n\tfor _, srv := range bal.KeepServices {\n\t\tfor _, mnt := range srv.mounts {\n\t\t\tvar repl *Replica\n\t\t\tfor r := range blk.Replicas {\n\t\t\t\tif blk.Replicas[r].KeepMount == mnt {\n\t\t\t\t\trepl = &blk.Replicas[r]\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Initial value of \"want\" is \"have, and can't\n\t\t\t// delete\". These untrashable replicas get\n\t\t\t// prioritized when sorting slots: otherwise,\n\t\t\t// non-optimal readonly copies would cause us\n\t\t\t// to overreplicate.\n\t\t\tslots = append(slots, slot{\n\t\t\t\tmnt:  mnt,\n\t\t\t\trepl: repl,\n\t\t\t\twant: repl != nil && !mnt.AllowTrash,\n\t\t\t})\n\t\t}\n\t}\n\n\tuuids := keepclient.NewRootSorter(bal.serviceRoots, string(blkid[:32])).GetSortedRoots()\n\tsrvRendezvous := make(map[*KeepService]int, len(uuids))\n\tfor i, uuid := range uuids {\n\t\tsrv := bal.KeepServices[uuid]\n\t\tsrvRendezvous[srv] = i\n\t}\n\n\t// Below we set underreplicated=true if we find any storage\n\t// class that's currently underreplicated -- in that case we\n\t// won't want to trash any replicas.\n\tunderreplicated := false\n\n\tunsafeToDelete := make(map[int64]bool, len(slots))\n\tfor _, class := range bal.classes {\n\t\tif blk.Desired == nil {\n\t\t\tcontinue\n\t\t}\n\t\tdesired := (*blk.Desired)[class]\n\t\tif desired == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Sort the slots by desirability.\n\t\tsort.Slice(slots, func(i, j int) bool {\n\t\t\tsi, sj := slots[i], slots[j]\n\t\t\tif classi, classj := bal.mountsByClass[class][si.mnt], bal.mountsByClass[class][sj.mnt]; classi != classj {\n\t\t\t\t// Prefer a mount that satisfies the\n\t\t\t\t// desired class.\n\t\t\t\treturn bal.mountsByClass[class][si.mnt]\n\t\t\t} else if si.want != sj.want {\n\t\t\t\t// Prefer a mount that will have a\n\t\t\t\t// replica no matter what we do here\n\t\t\t\t// -- either because it already has an\n\t\t\t\t// untrashable replica, or because we\n\t\t\t\t// already need it to satisfy a\n\t\t\t\t// different storage class.\n\t\t\t\treturn si.want\n\t\t\t} else if orderi, orderj := srvRendezvous[si.mnt.KeepService], srvRendezvous[sj.mnt.KeepService]; orderi != orderj {\n\t\t\t\t// Prefer a better rendezvous\n\t\t\t\t// position.\n\t\t\t\treturn orderi < orderj\n\t\t\t} else if repli, replj := si.repl != nil, sj.repl != nil; repli != replj {\n\t\t\t\t// Prefer a mount that already has a\n\t\t\t\t// replica.\n\t\t\t\treturn repli\n\t\t\t} else {\n\t\t\t\t// If pull/trash turns out to be\n\t\t\t\t// needed, distribute the\n\t\t\t\t// new/remaining replicas uniformly\n\t\t\t\t// across qualifying mounts on a given\n\t\t\t\t// server.\n\t\t\t\treturn rendezvousLess(si.mnt.UUID, sj.mnt.UUID, blkid)\n\t\t\t}\n\t\t})\n\n\t\t// Servers/mounts/devices (with or without existing\n\t\t// replicas) that are part of the best achievable\n\t\t// layout for this storage class.\n\t\twantSrv := map[*KeepService]bool{}\n\t\twantMnt := map[*KeepMount]bool{}\n\t\twantDev := map[string]bool{}\n\t\t// Positions (with existing replicas) that have been\n\t\t// protected (via unsafeToDelete) to ensure we don't\n\t\t// reduce replication below desired level when\n\t\t// trashing replicas that aren't optimal positions for\n\t\t// any storage class.\n\t\tprotMnt := map[*KeepMount]bool{}\n\t\t// Replication planned so far (corresponds to wantMnt).\n\t\treplWant := 0\n\t\t// Protected replication (corresponds to protMnt).\n\t\treplProt := 0\n\n\t\t// trySlot tries using a slot to meet requirements,\n\t\t// and returns true if all requirements are met.\n\t\ttrySlot := func(i int) bool {\n\t\t\tslot := slots[i]\n\t\t\tif wantMnt[slot.mnt] || wantDev[slot.mnt.UUID] {\n\t\t\t\t// Already allocated a replica to this\n\t\t\t\t// backend device, possibly on a\n\t\t\t\t// different server.\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif replProt < desired && slot.repl != nil && !protMnt[slot.mnt] {\n\t\t\t\tunsafeToDelete[slot.repl.Mtime] = true\n\t\t\t\tprotMnt[slot.mnt] = true\n\t\t\t\treplProt += slot.mnt.Replication\n\t\t\t}\n\t\t\tif replWant < desired && (slot.repl != nil || slot.mnt.AllowWrite) {\n\t\t\t\tslots[i].want = true\n\t\t\t\twantSrv[slot.mnt.KeepService] = true\n\t\t\t\twantMnt[slot.mnt] = true\n\t\t\t\twantDev[slot.mnt.UUID] = true\n\t\t\t\treplWant += slot.mnt.Replication\n\t\t\t}\n\t\t\treturn replProt >= desired && replWant >= desired\n\t\t}\n\n\t\t// First try to achieve desired replication without\n\t\t// using the same server twice.\n\t\tdone := false\n\t\tfor i := 0; i < len(slots) && !done; i++ {\n\t\t\tif !wantSrv[slots[i].mnt.KeepService] {\n\t\t\t\tdone = trySlot(i)\n\t\t\t}\n\t\t}\n\n\t\t// If that didn't suffice, do another pass without the\n\t\t// \"distinct services\" restriction. (Achieving the\n\t\t// desired volume replication on fewer than the\n\t\t// desired number of services is better than\n\t\t// underreplicating.)\n\t\tfor i := 0; i < len(slots) && !done; i++ {\n\t\t\tdone = trySlot(i)\n\t\t}\n\n\t\tif !underreplicated {\n\t\t\tsafe := 0\n\t\t\tfor _, slot := range slots {\n\t\t\t\tif slot.repl == nil || !bal.mountsByClass[class][slot.mnt] {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif safe += slot.mnt.Replication; safe >= desired {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tunderreplicated = safe < desired\n\t\t}\n\n\t\t// Avoid deleting wanted replicas from devices that\n\t\t// are mounted on multiple servers -- even if they\n\t\t// haven't already been added to unsafeToDelete\n\t\t// because the servers report different Mtimes.\n\t\tfor _, slot := range slots {\n\t\t\tif slot.repl != nil && wantDev[slot.mnt.UUID] {\n\t\t\t\tunsafeToDelete[slot.repl.Mtime] = true\n\t\t\t}\n\t\t}\n\t}\n\n\t// TODO: If multiple replicas are trashable, prefer the oldest\n\t// replica that doesn't have a timestamp collision with\n\t// others.\n\n\tfor i, slot := range slots {\n\t\t// Don't trash (1) any replicas of an underreplicated\n\t\t// block, even if they're in the wrong positions, or\n\t\t// (2) any replicas whose Mtimes are identical to\n\t\t// needed replicas (in case we're really seeing the\n\t\t// same copy via different mounts).\n\t\tif slot.repl != nil && (underreplicated || unsafeToDelete[slot.repl.Mtime]) {\n\t\t\tslots[i].want = true\n\t\t}\n\t}\n\n\tclassState := make(map[string]balancedBlockState, len(bal.classes))\n\tfor _, class := range bal.classes {\n\t\tdesired := 0\n\t\tif blk.Desired != nil {\n\t\t\tdesired = (*blk.Desired)[class]\n\t\t}\n\t\tclassState[class] = computeBlockState(slots, bal.mountsByClass[class], len(blk.Replicas), desired)\n\t}\n\tblockState := computeBlockState(slots, nil, len(blk.Replicas), 0)\n\n\t// Sort the slots by rendezvous order. This ensures \"trash the\n\t// first of N replicas with identical timestamps\" is\n\t// predictable (helpful for testing) and well distributed\n\t// across servers.\n\tsort.Slice(slots, func(i, j int) bool {\n\t\tsi, sj := slots[i], slots[j]\n\t\tif orderi, orderj := srvRendezvous[si.mnt.KeepService], srvRendezvous[sj.mnt.KeepService]; orderi != orderj {\n\t\t\treturn orderi < orderj\n\t\t} else {\n\t\t\treturn rendezvousLess(si.mnt.UUID, sj.mnt.UUID, blkid)\n\t\t}\n\t})\n\n\tvar (\n\t\tlost         bool\n\t\tchanges      []string\n\t\ttrashedMtime = make(map[int64]bool, len(slots))\n\t)\n\tfor _, slot := range slots {\n\t\t// TODO: request a Touch if Mtime is duplicated.\n\t\tvar change int\n\t\tswitch {\n\t\tcase !slot.want && slot.repl != nil && slot.repl.Mtime < bal.MinMtime:\n\t\t\tif trashedMtime[slot.repl.Mtime] {\n\t\t\t\t// Don't trash multiple replicas with\n\t\t\t\t// identical timestamps. If they are\n\t\t\t\t// multiple views of the same backing\n\t\t\t\t// storage, asking both servers to\n\t\t\t\t// trash is redundant and can cause\n\t\t\t\t// races (see #20242). If they are\n\t\t\t\t// distinct replicas that happen to\n\t\t\t\t// have identical timestamps, we'll\n\t\t\t\t// get this one on the next sweep.\n\t\t\t\tchange = changeNone\n\t\t\t} else {\n\t\t\t\tslot.mnt.KeepService.AddTrash(Trash{\n\t\t\t\t\tSizedDigest: blkid,\n\t\t\t\t\tMtime:       slot.repl.Mtime,\n\t\t\t\t\tFrom:        slot.mnt,\n\t\t\t\t})\n\t\t\t\tchange = changeTrash\n\t\t\t\ttrashedMtime[slot.repl.Mtime] = true\n\t\t\t}\n\t\tcase slot.repl == nil && slot.want && len(blk.Replicas) == 0:\n\t\t\tlost = true\n\t\t\tchange = changeNone\n\t\tcase slot.repl == nil && slot.want && slot.mnt.AllowWrite:\n\t\t\tslot.mnt.KeepService.AddPull(Pull{\n\t\t\t\tSizedDigest: blkid,\n\t\t\t\tFrom:        blk.Replicas[0].KeepMount.KeepService,\n\t\t\t\tTo:          slot.mnt,\n\t\t\t})\n\t\t\tchange = changePull\n\t\tcase slot.repl != nil:\n\t\t\tchange = changeStay\n\t\tdefault:\n\t\t\tchange = changeNone\n\t\t}\n\t\tif bal.Dumper != nil {\n\t\t\tvar mtime int64\n\t\t\tif slot.repl != nil {\n\t\t\t\tmtime = slot.repl.Mtime\n\t\t\t}\n\t\t\tsrv := slot.mnt.KeepService\n\t\t\tchanges = append(changes, fmt.Sprintf(\"%s:%d/%s=%s,%d\", srv.ServiceHost, srv.ServicePort, slot.mnt.UUID, changeName[change], mtime))\n\t\t}\n\t}\n\tif bal.Dumper != nil {\n\t\tvar desired map[string]int\n\t\tif blk.Desired != nil {\n\t\t\tdesired = *blk.Desired\n\t\t}\n\t\tbal.Dumper.Printf(\"%s refs=%d needed=%d unneeded=%d pulling=%v %v %v\", blkid, blk.RefCount, blockState.needed, blockState.unneeded, blockState.pulling, desired, changes)\n\t}\n\treturn balanceResult{\n\t\tblk:        blk,\n\t\tblkid:      blkid,\n\t\tlost:       lost,\n\t\tblockState: blockState,\n\t\tclassState: classState,\n\t}\n}\n\nfunc computeBlockState(slots []slot, onlyCount map[*KeepMount]bool, have, needRepl int) (bbs balancedBlockState) {\n\trepl := 0\n\tcountedDev := map[string]bool{}\n\tfor _, slot := range slots {\n\t\tif onlyCount != nil && !onlyCount[slot.mnt] {\n\t\t\tcontinue\n\t\t}\n\t\tif countedDev[slot.mnt.UUID] {\n\t\t\tcontinue\n\t\t}\n\t\tswitch {\n\t\tcase slot.repl != nil && slot.want:\n\t\t\tbbs.needed++\n\t\t\trepl += slot.mnt.Replication\n\t\tcase slot.repl != nil && !slot.want:\n\t\t\tbbs.unneeded++\n\t\t\trepl += slot.mnt.Replication\n\t\tcase slot.repl == nil && slot.want && have > 0:\n\t\t\tbbs.pulling++\n\t\t\trepl += slot.mnt.Replication\n\t\t}\n\t\tcountedDev[slot.mnt.UUID] = true\n\t}\n\tif repl < needRepl {\n\t\tbbs.unachievable = true\n\t}\n\treturn\n}\n\ntype blocksNBytes struct {\n\treplicas int\n\tblocks   int\n\tbytes    int64\n}\n\nfunc (bb blocksNBytes) String() string {\n\treturn fmt.Sprintf(\"%d replicas (%d blocks, %d bytes)\", bb.replicas, bb.blocks, bb.bytes)\n}\n\ntype replicationStats struct {\n\tneeded       blocksNBytes\n\tunneeded     blocksNBytes\n\tpulling      blocksNBytes\n\tunachievable blocksNBytes\n}\n\ntype balancerStats struct {\n\tlost            blocksNBytes\n\toverrep         blocksNBytes\n\tunref           blocksNBytes\n\tgarbage         blocksNBytes\n\tunderrep        blocksNBytes\n\tunachievable    blocksNBytes\n\tjustright       blocksNBytes\n\tdesired         blocksNBytes\n\tcurrent         blocksNBytes\n\tpulls           int\n\tpullsDeferred   int\n\ttrashes         int\n\ttrashesDeferred int\n\treplHistogram   []int\n\tclassStats      map[string]replicationStats\n\n\t// collectionBytes / collectionBlockBytes = deduplication ratio\n\tcollectionBytes      int64 // sum(bytes in referenced blocks) across all collections\n\tcollectionBlockBytes int64 // sum(block size) across all blocks referenced by collections\n\tcollectionBlockRefs  int64 // sum(number of blocks referenced) across all collections\n\tcollectionBlocks     int64 // number of blocks referenced by any collection\n}\n\nfunc (s *balancerStats) dedupByteRatio() float64 {\n\tif s.collectionBlockBytes == 0 {\n\t\treturn 0\n\t}\n\treturn float64(s.collectionBytes) / float64(s.collectionBlockBytes)\n}\n\nfunc (s *balancerStats) dedupBlockRatio() float64 {\n\tif s.collectionBlocks == 0 {\n\t\treturn 0\n\t}\n\treturn float64(s.collectionBlockRefs) / float64(s.collectionBlocks)\n}\n\nfunc (bal *Balancer) collectStatistics(results <-chan balanceResult) {\n\tvar s balancerStats\n\ts.replHistogram = make([]int, 2)\n\ts.classStats = make(map[string]replicationStats, len(bal.classes))\n\tfor result := range results {\n\t\tbytes := result.blkid.Size()\n\n\t\tif rc := int64(result.blk.RefCount); rc > 0 {\n\t\t\ts.collectionBytes += rc * bytes\n\t\t\ts.collectionBlockBytes += bytes\n\t\t\ts.collectionBlockRefs += rc\n\t\t\ts.collectionBlocks++\n\t\t}\n\n\t\tfor class, state := range result.classState {\n\t\t\tcs := s.classStats[class]\n\t\t\tif state.unachievable {\n\t\t\t\tcs.unachievable.replicas++\n\t\t\t\tcs.unachievable.blocks++\n\t\t\t\tcs.unachievable.bytes += bytes\n\t\t\t}\n\t\t\tif state.needed > 0 {\n\t\t\t\tcs.needed.replicas += state.needed\n\t\t\t\tcs.needed.blocks++\n\t\t\t\tcs.needed.bytes += bytes * int64(state.needed)\n\t\t\t}\n\t\t\tif state.unneeded > 0 {\n\t\t\t\tcs.unneeded.replicas += state.unneeded\n\t\t\t\tcs.unneeded.blocks++\n\t\t\t\tcs.unneeded.bytes += bytes * int64(state.unneeded)\n\t\t\t}\n\t\t\tif state.pulling > 0 {\n\t\t\t\tcs.pulling.replicas += state.pulling\n\t\t\t\tcs.pulling.blocks++\n\t\t\t\tcs.pulling.bytes += bytes * int64(state.pulling)\n\t\t\t}\n\t\t\ts.classStats[class] = cs\n\t\t}\n\n\t\tbs := result.blockState\n\t\tswitch {\n\t\tcase result.lost:\n\t\t\ts.lost.replicas++\n\t\t\ts.lost.blocks++\n\t\t\ts.lost.bytes += bytes\n\t\t\tfmt.Fprintf(bal.lostBlocks, \"%s\", strings.SplitN(string(result.blkid), \"+\", 2)[0])\n\t\t\tfor pdh := range result.blk.Refs {\n\t\t\t\tfmt.Fprintf(bal.lostBlocks, \" %s\", pdh)\n\t\t\t}\n\t\t\tfmt.Fprint(bal.lostBlocks, \"\\n\")\n\t\tcase bs.pulling > 0:\n\t\t\ts.underrep.replicas += bs.pulling\n\t\t\ts.underrep.blocks++\n\t\t\ts.underrep.bytes += bytes * int64(bs.pulling)\n\t\tcase bs.unachievable:\n\t\t\ts.underrep.replicas++\n\t\t\ts.underrep.blocks++\n\t\t\ts.underrep.bytes += bytes\n\t\tcase bs.unneeded > 0 && bs.needed == 0:\n\t\t\t// Count as \"garbage\" if all replicas are old\n\t\t\t// enough to trash, otherwise count as\n\t\t\t// \"unref\".\n\t\t\tcounter := &s.garbage\n\t\t\tfor _, r := range result.blk.Replicas {\n\t\t\t\tif r.Mtime >= bal.MinMtime {\n\t\t\t\t\tcounter = &s.unref\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tcounter.replicas += bs.unneeded\n\t\t\tcounter.blocks++\n\t\t\tcounter.bytes += bytes * int64(bs.unneeded)\n\t\tcase bs.unneeded > 0:\n\t\t\ts.overrep.replicas += bs.unneeded\n\t\t\ts.overrep.blocks++\n\t\t\ts.overrep.bytes += bytes * int64(bs.unneeded)\n\t\tdefault:\n\t\t\ts.justright.replicas += bs.needed\n\t\t\ts.justright.blocks++\n\t\t\ts.justright.bytes += bytes * int64(bs.needed)\n\t\t}\n\n\t\tif bs.needed > 0 {\n\t\t\ts.desired.replicas += bs.needed\n\t\t\ts.desired.blocks++\n\t\t\ts.desired.bytes += bytes * int64(bs.needed)\n\t\t}\n\t\tif bs.needed+bs.unneeded > 0 {\n\t\t\ts.current.replicas += bs.needed + bs.unneeded\n\t\t\ts.current.blocks++\n\t\t\ts.current.bytes += bytes * int64(bs.needed+bs.unneeded)\n\t\t}\n\n\t\tfor len(s.replHistogram) <= bs.needed+bs.unneeded {\n\t\t\ts.replHistogram = append(s.replHistogram, 0)\n\t\t}\n\t\ts.replHistogram[bs.needed+bs.unneeded]++\n\t}\n\tfor _, srv := range bal.KeepServices {\n\t\ts.pulls += len(srv.ChangeSet.Pulls)\n\t\ts.pullsDeferred += srv.ChangeSet.PullsDeferred\n\t\ts.trashes += len(srv.ChangeSet.Trashes)\n\t\ts.trashesDeferred += srv.ChangeSet.TrashesDeferred\n\t}\n\tbal.stats = s\n\tbal.Metrics.UpdateStats(s)\n}\n\n// PrintStatistics writes statistics about the computed changes to\n// bal.Logger. It should not be called until ComputeChangeSets has\n// finished.\nfunc (bal *Balancer) PrintStatistics() {\n\tbal.logf(\"===\")\n\tbal.logf(\"%s lost (0=have<want)\", bal.stats.lost)\n\tbal.logf(\"%s underreplicated (0<have<want)\", bal.stats.underrep)\n\tbal.logf(\"%s just right (have=want)\", bal.stats.justright)\n\tbal.logf(\"%s overreplicated (have>want>0)\", bal.stats.overrep)\n\tbal.logf(\"%s unreferenced (have>want=0, new)\", bal.stats.unref)\n\tbal.logf(\"%s garbage (have>want=0, old)\", bal.stats.garbage)\n\tfor _, class := range bal.classes {\n\t\tcs := bal.stats.classStats[class]\n\t\tbal.logf(\"===\")\n\t\tbal.logf(\"storage class %q: %s needed\", class, cs.needed)\n\t\tbal.logf(\"storage class %q: %s unneeded\", class, cs.unneeded)\n\t\tbal.logf(\"storage class %q: %s pulling\", class, cs.pulling)\n\t\tbal.logf(\"storage class %q: %s unachievable\", class, cs.unachievable)\n\t}\n\tbal.logf(\"===\")\n\tbal.logf(\"%s total commitment (excluding unreferenced)\", bal.stats.desired)\n\tbal.logf(\"%s total usage\", bal.stats.current)\n\tbal.logf(\"===\")\n\tfor _, srv := range bal.KeepServices {\n\t\tbal.logf(\"%s: %v\\n\", srv, srv.ChangeSet)\n\t}\n\tbal.logf(\"===\")\n\tbal.printHistogram(60)\n\tbal.logf(\"===\")\n}\n\nfunc (bal *Balancer) printHistogram(hashColumns int) {\n\tbal.logf(\"Replication level distribution:\")\n\tmaxCount := 0\n\tfor _, count := range bal.stats.replHistogram {\n\t\tif maxCount < count {\n\t\t\tmaxCount = count\n\t\t}\n\t}\n\thashes := strings.Repeat(\"#\", hashColumns)\n\tcountWidth := 1 + int(math.Log10(float64(maxCount+1)))\n\tscaleCount := 10 * float64(hashColumns) / math.Floor(1+10*math.Log10(float64(maxCount+1)))\n\tfor repl, count := range bal.stats.replHistogram {\n\t\tnHashes := int(scaleCount * math.Log10(float64(count+1)))\n\t\tbal.logf(\"%2d: %*d %s\", repl, countWidth, count, hashes[:nHashes])\n\t}\n}\n\n// CheckSanityLate checks for configuration and runtime errors after\n// GetCurrentState() and ComputeChangeSets() have finished.\n//\n// If it returns an error, it is dangerous to run any Commit methods.\nfunc (bal *Balancer) CheckSanityLate() error {\n\tif bal.errors != nil {\n\t\tfor _, err := range bal.errors {\n\t\t\tbal.logf(\"deferred error: %v\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"cannot proceed safely after deferred errors\")\n\t}\n\n\tif bal.collScanned == 0 {\n\t\treturn fmt.Errorf(\"received zero collections\")\n\t}\n\n\tanyDesired := false\n\tbal.BlockStateMap.Apply(func(_ arvados.SizedDigest, blk *BlockState) {\n\t\tif blk.Desired == nil {\n\t\t\treturn\n\t\t}\n\t\tfor _, desired := range *blk.Desired {\n\t\t\tif desired > 0 {\n\t\t\t\tanyDesired = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t})\n\tif !anyDesired {\n\t\treturn fmt.Errorf(\"zero blocks have desired replication>0\")\n\t}\n\n\tif dr := bal.DefaultReplication; dr < 1 {\n\t\treturn fmt.Errorf(\"Default replication (%d) is less than 1\", dr)\n\t}\n\n\t// TODO: no two services have identical indexes\n\t// TODO: no collisions (same md5, different size)\n\treturn nil\n}\n\n// CommitPulls sends the computed lists of pull requests to the\n// keepstore servers. This has the effect of increasing replication of\n// existing blocks that are either underreplicated or poorly\n// distributed according to rendezvous hashing.\nfunc (bal *Balancer) CommitPulls(ctx context.Context, c *arvados.Client) error {\n\tdefer bal.time(\"send_pull_lists\", \"wall clock time to send pull lists\")()\n\treturn bal.commitAsync(c, \"send pull list\",\n\t\tfunc(srv *KeepService) error {\n\t\t\treturn srv.CommitPulls(ctx, c)\n\t\t})\n}\n\n// CommitTrash sends the computed lists of trash requests to the\n// keepstore servers. This has the effect of deleting blocks that are\n// overreplicated or unreferenced.\nfunc (bal *Balancer) CommitTrash(ctx context.Context, c *arvados.Client) error {\n\tdefer bal.time(\"send_trash_lists\", \"wall clock time to send trash lists\")()\n\treturn bal.commitAsync(c, \"send trash list\",\n\t\tfunc(srv *KeepService) error {\n\t\t\treturn srv.CommitTrash(ctx, c)\n\t\t})\n}\n\nfunc (bal *Balancer) commitAsync(c *arvados.Client, label string, f func(srv *KeepService) error) error {\n\terrs := make(chan error)\n\tfor _, srv := range bal.KeepServices {\n\t\tgo func(srv *KeepService) {\n\t\t\tvar err error\n\t\t\tdefer func() { errs <- err }()\n\t\t\tlabel := fmt.Sprintf(\"%s: %v\", srv, label)\n\t\t\terr = f(srv)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"%s: %v\", label, err)\n\t\t\t}\n\t\t}(srv)\n\t}\n\tvar lastErr error\n\tfor range bal.KeepServices {\n\t\tif err := <-errs; err != nil {\n\t\t\tbal.logf(\"%v\", err)\n\t\t\tlastErr = err\n\t\t}\n\t}\n\treturn lastErr\n}\n\nfunc (bal *Balancer) logf(f string, args ...interface{}) {\n\tif bal.Logger != nil {\n\t\tbal.Logger.Printf(f, args...)\n\t}\n}\n\nfunc (bal *Balancer) time(name, help string) func() {\n\tobserver := bal.Metrics.DurationObserver(name+\"_seconds\", help)\n\tt0 := time.Now()\n\tbal.Logger.Printf(\"%s: start\", name)\n\treturn func() {\n\t\tdur := time.Since(t0)\n\t\tobserver.Observe(dur.Seconds())\n\t\tbal.Logger.Printf(\"%s: took %vs\", name, dur.Seconds())\n\t}\n}\n\n// Log current memory usage: once now, at least once every 10 minutes,\n// and when memory grows by 40% since the last log. Log one more time\n// and then stop when ctx is canceled.\nfunc (bal *Balancer) reportMemorySize(ctx context.Context) {\n\tbuf, _ := os.ReadFile(\"/proc/self/smaps\")\n\tm := regexp.MustCompile(`\\nKernelPageSize:\\s*(\\d+) kB\\n`).FindSubmatch(buf)\n\tvar pagesize int64\n\tif len(m) == 2 {\n\t\tpagesize, _ = strconv.ParseInt(string(m[1]), 10, 64)\n\t\tpagesize <<= 10\n\t}\n\tif pagesize == 0 {\n\t\tbal.logf(\"cannot log OS-reported memory size: failed to parse KernelPageSize from /proc/self/smaps\")\n\t}\n\tosstats := func() string {\n\t\tif pagesize == 0 {\n\t\t\treturn \"\"\n\t\t}\n\t\tbuf, _ := os.ReadFile(\"/proc/self/statm\")\n\t\tfields := strings.Split(string(buf), \" \")\n\t\tif len(fields) < 2 {\n\t\t\treturn \"\"\n\t\t}\n\t\tvirt, _ := strconv.ParseInt(fields[0], 10, 64)\n\t\tvirt *= pagesize\n\t\tres, _ := strconv.ParseInt(fields[1], 10, 64)\n\t\tres *= pagesize\n\t\tif virt == 0 || res == 0 {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn fmt.Sprintf(\" virt %d res %d\", virt, res)\n\t}\n\n\tvar nextTime time.Time\n\tvar nextMem uint64\n\tconst maxInterval = time.Minute * 10\n\tconst maxIncrease = 1.4\n\n\tticker := time.NewTicker(time.Second)\n\tvar memstats runtime.MemStats\n\tfor done := false; !done; {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\t// log one more time, then return\n\t\t\tdone = true\n\t\tcase <-ticker.C:\n\t\t}\n\t\tnow := time.Now()\n\t\truntime.ReadMemStats(&memstats)\n\t\tmem := memstats.StackInuse + memstats.HeapInuse\n\t\tif now.After(nextTime) || mem >= nextMem || done {\n\t\t\tbal.logf(\"heap %d stack %d heapalloc %d%s\", memstats.HeapInuse, memstats.StackInuse, memstats.HeapAlloc, osstats())\n\t\t\tnextMem = uint64(float64(mem) * maxIncrease)\n\t\t\tnextTime = now.Add(maxInterval)\n\t\t}\n\t}\n}\n\n// Rendezvous hash sort function. Less efficient than sorting on\n// precomputed rendezvous hashes, but also rarely used.\nfunc rendezvousLess(i, j string, blkid arvados.SizedDigest) bool {\n\ta := md5.Sum([]byte(string(blkid[:32]) + i))\n\tb := md5.Sum([]byte(string(blkid[:32]) + j))\n\treturn bytes.Compare(a[:], b[:]) < 0\n}\n"
  },
  {
    "path": "services/keep-balance/balance_run_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/jmoiron/sqlx\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&runSuite{})\n\ntype reqTracker struct {\n\treqs []http.Request\n\tsync.Mutex\n}\n\nfunc (rt *reqTracker) Count() int {\n\trt.Lock()\n\tdefer rt.Unlock()\n\treturn len(rt.reqs)\n}\n\nfunc (rt *reqTracker) Add(req *http.Request) int {\n\trt.Lock()\n\tdefer rt.Unlock()\n\trt.reqs = append(rt.reqs, *req)\n\treturn len(rt.reqs)\n}\n\nvar stubServices = []arvados.KeepService{\n\t{\n\t\tUUID:           \"zzzzz-bi6l4-000000000000000\",\n\t\tServiceHost:    \"keep0.zzzzz.arvadosapi.com\",\n\t\tServicePort:    25107,\n\t\tServiceSSLFlag: false,\n\t\tServiceType:    \"disk\",\n\t},\n\t{\n\t\tUUID:           \"zzzzz-bi6l4-000000000000001\",\n\t\tServiceHost:    \"keep1.zzzzz.arvadosapi.com\",\n\t\tServicePort:    25107,\n\t\tServiceSSLFlag: false,\n\t\tServiceType:    \"disk\",\n\t},\n\t{\n\t\tUUID:           \"zzzzz-bi6l4-000000000000002\",\n\t\tServiceHost:    \"keep2.zzzzz.arvadosapi.com\",\n\t\tServicePort:    25107,\n\t\tServiceSSLFlag: false,\n\t\tServiceType:    \"disk\",\n\t},\n\t{\n\t\tUUID:           \"zzzzz-bi6l4-000000000000003\",\n\t\tServiceHost:    \"keep3.zzzzz.arvadosapi.com\",\n\t\tServicePort:    25107,\n\t\tServiceSSLFlag: false,\n\t\tServiceType:    \"disk\",\n\t},\n\t{\n\t\tUUID:           \"zzzzz-bi6l4-h0a0xwut9qa6g3a\",\n\t\tServiceHost:    \"keep.zzzzz.arvadosapi.com\",\n\t\tServicePort:    25333,\n\t\tServiceSSLFlag: true,\n\t\tServiceType:    \"proxy\",\n\t},\n}\n\nvar stubMounts = map[string][]arvados.KeepMount{\n\t\"keep0.zzzzz.arvadosapi.com:25107\": {{\n\t\tUUID:           \"zzzzz-ivpuk-000000000000000\",\n\t\tDeviceID:       \"keep0-vol0\",\n\t\tStorageClasses: map[string]bool{\"default\": true},\n\t\tAllowWrite:     true,\n\t\tAllowTrash:     true,\n\t}},\n\t\"keep1.zzzzz.arvadosapi.com:25107\": {{\n\t\tUUID:           \"zzzzz-ivpuk-100000000000000\",\n\t\tDeviceID:       \"keep1-vol0\",\n\t\tStorageClasses: map[string]bool{\"default\": true},\n\t\tAllowWrite:     true,\n\t\tAllowTrash:     true,\n\t}},\n\t\"keep2.zzzzz.arvadosapi.com:25107\": {{\n\t\tUUID:           \"zzzzz-ivpuk-200000000000000\",\n\t\tDeviceID:       \"keep2-vol0\",\n\t\tStorageClasses: map[string]bool{\"default\": true},\n\t\tAllowWrite:     true,\n\t\tAllowTrash:     true,\n\t}},\n\t\"keep3.zzzzz.arvadosapi.com:25107\": {{\n\t\tUUID:           \"zzzzz-ivpuk-300000000000000\",\n\t\tDeviceID:       \"keep3-vol0\",\n\t\tStorageClasses: map[string]bool{\"default\": true},\n\t\tAllowWrite:     true,\n\t\tAllowTrash:     true,\n\t}},\n}\n\n// stubServer is an HTTP transport that intercepts and processes all\n// requests using its own handlers.\ntype stubServer struct {\n\tmux      *http.ServeMux\n\tsrv      *httptest.Server\n\tmutex    sync.Mutex\n\tRequests reqTracker\n\tlogf     func(string, ...interface{})\n}\n\n// Start initializes the stub server and returns an *http.Client that\n// uses the stub server to handle all requests.\n//\n// A stubServer that has been started should eventually be shut down\n// with Close().\nfunc (s *stubServer) Start() *http.Client {\n\t// Set up a config.Client that forwards all requests to s.mux\n\t// via s.srv. Test cases will attach handlers to s.mux to get\n\t// the desired responses.\n\ts.mux = http.NewServeMux()\n\ts.srv = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ts.mutex.Lock()\n\t\ts.Requests.Add(r)\n\t\ts.mutex.Unlock()\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\ts.mux.ServeHTTP(w, r)\n\t}))\n\treturn &http.Client{Transport: s}\n}\n\nfunc (s *stubServer) RoundTrip(req *http.Request) (*http.Response, error) {\n\tw := httptest.NewRecorder()\n\ts.mux.ServeHTTP(w, req)\n\treturn &http.Response{\n\t\tStatusCode: w.Code,\n\t\tStatus:     fmt.Sprintf(\"%d %s\", w.Code, http.StatusText(w.Code)),\n\t\tHeader:     w.HeaderMap,\n\t\tBody:       ioutil.NopCloser(w.Body)}, nil\n}\n\n// Close releases resources used by the server.\nfunc (s *stubServer) Close() {\n\ts.srv.Close()\n}\n\nfunc (s *stubServer) serveStatic(path, data string) *reqTracker {\n\trt := &reqTracker{}\n\ts.mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {\n\t\trt.Add(r)\n\t\tif r.Body != nil {\n\t\t\tioutil.ReadAll(r.Body)\n\t\t\tr.Body.Close()\n\t\t}\n\t\tio.WriteString(w, data)\n\t})\n\treturn rt\n}\n\nfunc (s *stubServer) serveCurrentUserAdmin() *reqTracker {\n\treturn s.serveStatic(\"/arvados/v1/users/current\",\n\t\t`{\"uuid\":\"zzzzz-tpzed-000000000000000\",\"is_admin\":true,\"is_active\":true}`)\n}\n\nfunc (s *stubServer) serveCurrentUserNotAdmin() *reqTracker {\n\treturn s.serveStatic(\"/arvados/v1/users/current\",\n\t\t`{\"uuid\":\"zzzzz-tpzed-000000000000000\",\"is_admin\":false,\"is_active\":true}`)\n}\n\nfunc (s *stubServer) serveDiscoveryDoc() *reqTracker {\n\treturn s.serveStatic(\"/discovery/v1/apis/arvados/v1/rest\",\n\t\t`{\"defaultCollectionReplication\":2}`)\n}\n\nfunc (s *stubServer) serveZeroCollections() *reqTracker {\n\treturn s.serveStatic(\"/arvados/v1/collections\",\n\t\t`{\"items\":[],\"items_available\":0}`)\n}\n\nfunc (s *stubServer) serveFooBarFileCollections() *reqTracker {\n\trt := &reqTracker{}\n\ts.mux.HandleFunc(\"/arvados/v1/collections\", func(w http.ResponseWriter, r *http.Request) {\n\t\tr.ParseForm()\n\t\trt.Add(r)\n\t\tif strings.Contains(r.Form.Get(\"filters\"), `modified_at`) {\n\t\t\tio.WriteString(w, `{\"items_available\":0,\"items\":[]}`)\n\t\t} else {\n\t\t\tio.WriteString(w, `{\"items_available\":3,\"items\":[\n\t\t\t\t{\"uuid\":\"zzzzz-4zz18-aaaaaaaaaaaaaaa\",\"portable_data_hash\":\"fa7aeb5140e2848d39b416daeef4ffc5+45\",\"manifest_text\":\". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\"modified_at\":\"2014-02-03T17:22:54Z\"},\n\t\t\t\t{\"uuid\":\"zzzzz-4zz18-ehbhgtheo8909or\",\"portable_data_hash\":\"fa7aeb5140e2848d39b416daeef4ffc5+45\",\"manifest_text\":\". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\"modified_at\":\"2014-02-03T17:22:54Z\"},\n\t\t\t\t{\"uuid\":\"zzzzz-4zz18-znfnqtbbv4spc3w\",\"portable_data_hash\":\"1f4b0bc7583c2a7f9102c395f4ffc5e3+45\",\"manifest_text\":\". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\",\"modified_at\":\"2014-02-03T17:22:54Z\"}]}`)\n\t\t}\n\t})\n\treturn rt\n}\n\nfunc (s *stubServer) serveCollectionsButSkipOne() *reqTracker {\n\trt := &reqTracker{}\n\ts.mux.HandleFunc(\"/arvados/v1/collections\", func(w http.ResponseWriter, r *http.Request) {\n\t\tr.ParseForm()\n\t\trt.Add(r)\n\t\tif strings.Contains(r.Form.Get(\"filters\"), `\"modified_at\",\"\\u003c=\"`) {\n\t\t\tio.WriteString(w, `{\"items_available\":3,\"items\":[]}`)\n\t\t} else if strings.Contains(r.Form.Get(\"filters\"), `\"modified_at\",\"\\u003e`) {\n\t\t\tio.WriteString(w, `{\"items_available\":0,\"items\":[]}`)\n\t\t} else if strings.Contains(r.Form.Get(\"filters\"), `\"modified_at\",\"=\"`) && strings.Contains(r.Form.Get(\"filters\"), `\"uuid\",\"\\u003e\"`) {\n\t\t\tio.WriteString(w, `{\"items_available\":0,\"items\":[]}`)\n\t\t} else if strings.Contains(r.Form.Get(\"filters\"), `\"modified_at\",\"=\",null`) {\n\t\t\tio.WriteString(w, `{\"items_available\":0,\"items\":[]}`)\n\t\t} else {\n\t\t\tio.WriteString(w, `{\"items_available\":2,\"items\":[\n\t\t\t\t{\"uuid\":\"zzzzz-4zz18-ehbhgtheo8909or\",\"portable_data_hash\":\"fa7aeb5140e2848d39b416daeef4ffc5+45\",\"manifest_text\":\". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\"modified_at\":\"2014-02-03T17:22:54Z\"},\n\t\t\t\t{\"uuid\":\"zzzzz-4zz18-znfnqtbbv4spc3w\",\"portable_data_hash\":\"1f4b0bc7583c2a7f9102c395f4ffc5e3+45\",\"manifest_text\":\". acbd18db4cc2f85cedef654fccc4a4d8+3 0:3:foo\\n\",\"modified_at\":\"2014-02-03T17:22:54Z\"}]}`)\n\t\t}\n\t})\n\treturn rt\n}\n\nfunc (s *stubServer) serveZeroKeepServices() *reqTracker {\n\treturn s.serveJSON(\"/arvados/v1/keep_services\", arvados.KeepServiceList{})\n}\n\nfunc (s *stubServer) serveKeepServices(svcs []arvados.KeepService) *reqTracker {\n\treturn s.serveJSON(\"/arvados/v1/keep_services\", arvados.KeepServiceList{\n\t\tItemsAvailable: len(svcs),\n\t\tItems:          svcs,\n\t})\n}\n\nfunc (s *stubServer) serveJSON(path string, resp interface{}) *reqTracker {\n\trt := &reqTracker{}\n\ts.mux.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) {\n\t\trt.Add(r)\n\t\tjson.NewEncoder(w).Encode(resp)\n\t})\n\treturn rt\n}\n\nfunc (s *stubServer) serveKeepstoreMounts() *reqTracker {\n\trt := &reqTracker{}\n\ts.mux.HandleFunc(\"/mounts\", func(w http.ResponseWriter, r *http.Request) {\n\t\trt.Add(r)\n\t\tjson.NewEncoder(w).Encode(stubMounts[r.Host])\n\t})\n\treturn rt\n}\n\nfunc (s *stubServer) serveKeepstoreIndexFoo4Bar1() *reqTracker {\n\tfooLine := func(mt int) string { return fmt.Sprintf(\"acbd18db4cc2f85cedef654fccc4a4d8+3 %d\\n\", 12345678+mt) }\n\tbarLine := \"37b51d194a7513e45b56f6524f2d51f2+3 12345678\\n\"\n\trt := &reqTracker{}\n\ts.mux.HandleFunc(\"/index/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tcount := rt.Add(r)\n\t\tif r.Host == \"keep0.zzzzz.arvadosapi.com:25107\" && strings.HasPrefix(barLine, r.URL.Path[7:]) {\n\t\t\tio.WriteString(w, barLine)\n\t\t}\n\t\tif strings.HasPrefix(fooLine(count), r.URL.Path[7:]) {\n\t\t\tio.WriteString(w, fooLine(count))\n\t\t}\n\t\tio.WriteString(w, \"\\n\")\n\t})\n\tfor _, mounts := range stubMounts {\n\t\tfor i, mnt := range mounts {\n\t\t\ti := i\n\t\t\ts.mux.HandleFunc(fmt.Sprintf(\"/mounts/%s/blocks\", mnt.UUID), func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tcount := rt.Add(r)\n\t\t\t\tr.ParseForm()\n\t\t\t\tif i == 0 && r.Host == \"keep0.zzzzz.arvadosapi.com:25107\" && strings.HasPrefix(barLine, r.Form.Get(\"prefix\")) {\n\t\t\t\t\tio.WriteString(w, barLine)\n\t\t\t\t}\n\t\t\t\tif i == 0 && strings.HasPrefix(fooLine(count), r.Form.Get(\"prefix\")) {\n\t\t\t\t\tio.WriteString(w, fooLine(count))\n\t\t\t\t}\n\t\t\t\tio.WriteString(w, \"\\n\")\n\t\t\t})\n\t\t}\n\t}\n\treturn rt\n}\n\nfunc (s *stubServer) serveKeepstoreIndexFoo1() *reqTracker {\n\tfooLine := \"acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\\n\"\n\trt := &reqTracker{}\n\ts.mux.HandleFunc(\"/index/\", func(w http.ResponseWriter, r *http.Request) {\n\t\trt.Add(r)\n\t\tif r.Host == \"keep0.zzzzz.arvadosapi.com:25107\" && strings.HasPrefix(fooLine, r.URL.Path[7:]) {\n\t\t\tio.WriteString(w, fooLine)\n\t\t}\n\t\tio.WriteString(w, \"\\n\")\n\t})\n\tfor _, mounts := range stubMounts {\n\t\tfor i, mnt := range mounts {\n\t\t\ti := i\n\t\t\ts.mux.HandleFunc(fmt.Sprintf(\"/mounts/%s/blocks\", mnt.UUID), func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\trt.Add(r)\n\t\t\t\tif i == 0 && strings.HasPrefix(fooLine, r.Form.Get(\"prefix\")) {\n\t\t\t\t\tio.WriteString(w, fooLine)\n\t\t\t\t}\n\t\t\t\tio.WriteString(w, \"\\n\")\n\t\t\t})\n\t\t}\n\t}\n\treturn rt\n}\n\nfunc (s *stubServer) serveKeepstoreIndexIgnoringPrefix() *reqTracker {\n\tfooLine := \"acbd18db4cc2f85cedef654fccc4a4d8+3 12345678\\n\"\n\trt := &reqTracker{}\n\ts.mux.HandleFunc(\"/index/\", func(w http.ResponseWriter, r *http.Request) {\n\t\trt.Add(r)\n\t\tio.WriteString(w, fooLine)\n\t\tio.WriteString(w, \"\\n\")\n\t})\n\tfor _, mounts := range stubMounts {\n\t\tfor _, mnt := range mounts {\n\t\t\ts.mux.HandleFunc(fmt.Sprintf(\"/mounts/%s/blocks\", mnt.UUID), func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\trt.Add(r)\n\t\t\t\tio.WriteString(w, fooLine)\n\t\t\t\tio.WriteString(w, \"\\n\")\n\t\t\t})\n\t\t}\n\t}\n\treturn rt\n}\n\nfunc (s *stubServer) serveKeepstoreTrash() *reqTracker {\n\treturn s.serveStatic(\"/trash\", `{}`)\n}\n\nfunc (s *stubServer) serveKeepstorePull() *reqTracker {\n\treturn s.serveStatic(\"/pull\", `{}`)\n}\n\ntype runSuite struct {\n\tstub   stubServer\n\tconfig *arvados.Cluster\n\tdb     *sqlx.DB\n\tclient *arvados.Client\n}\n\nfunc (s *runSuite) newServer(options *RunOptions) *Server {\n\tsrv := &Server{\n\t\tCluster:    s.config,\n\t\tArvClient:  s.client,\n\t\tRunOptions: *options,\n\t\tMetrics:    newMetrics(prometheus.NewRegistry()),\n\t\tLogger:     options.Logger,\n\t\tDumper:     options.Dumper,\n\t\tDB:         s.db,\n\t}\n\treturn srv\n}\n\nfunc (s *runSuite) SetUpTest(c *check.C) {\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, check.Equals, nil)\n\ts.config, err = cfg.GetCluster(\"\")\n\tc.Assert(err, check.Equals, nil)\n\ts.db, err = sqlx.Open(\"postgres\", s.config.PostgreSQL.Connection.String())\n\tc.Assert(err, check.IsNil)\n\n\ts.config.Collections.BalancePeriod = arvados.Duration(time.Second)\n\tarvadostest.SetServiceURL(&s.config.Services.Keepbalance, \"http://localhost:/\")\n\n\ts.client = &arvados.Client{\n\t\tAuthToken: \"xyzzy\",\n\t\tAPIHost:   \"zzzzz.arvadosapi.com\",\n\t\tClient:    s.stub.Start()}\n\n\ts.stub.serveDiscoveryDoc()\n\ts.stub.logf = c.Logf\n}\n\nfunc (s *runSuite) TearDownTest(c *check.C) {\n\ts.stub.Close()\n}\n\nfunc (s *runSuite) TestRefuseZeroCollections(c *check.C) {\n\tdefer arvados.NewClientFromEnv().RequestAndDecode(nil, \"POST\", \"database/reset\", nil, nil)\n\t_, err := s.db.Exec(`delete from collections`)\n\tc.Assert(err, check.IsNil)\n\topts := RunOptions{\n\t\tLogger: ctxlog.TestLogger(c),\n\t}\n\ts.stub.serveCurrentUserAdmin()\n\ts.stub.serveZeroCollections()\n\ts.stub.serveKeepServices(stubServices)\n\ts.stub.serveKeepstoreMounts()\n\ts.stub.serveKeepstoreIndexFoo4Bar1()\n\ttrashReqs := s.stub.serveKeepstoreTrash()\n\tpullReqs := s.stub.serveKeepstorePull()\n\tsrv := s.newServer(&opts)\n\t_, err = srv.runOnce(context.Background())\n\tc.Check(err, check.ErrorMatches, \"received zero collections\")\n\tc.Check(trashReqs.Count(), check.Equals, 4)\n\tc.Check(pullReqs.Count(), check.Equals, 0)\n}\n\nfunc (s *runSuite) TestRefuseBadIndex(c *check.C) {\n\topts := RunOptions{\n\t\tChunkPrefix: \"abc\",\n\t\tLogger:      ctxlog.TestLogger(c),\n\t}\n\ts.stub.serveCurrentUserAdmin()\n\ts.stub.serveFooBarFileCollections()\n\ts.stub.serveKeepServices(stubServices)\n\ts.stub.serveKeepstoreMounts()\n\ts.stub.serveKeepstoreIndexIgnoringPrefix()\n\ttrashReqs := s.stub.serveKeepstoreTrash()\n\tpullReqs := s.stub.serveKeepstorePull()\n\tsrv := s.newServer(&opts)\n\tbal, err := srv.runOnce(context.Background())\n\tc.Check(err, check.ErrorMatches, \".*Index response included block .* despite asking for prefix \\\"abc\\\"\")\n\tc.Check(trashReqs.Count(), check.Equals, 4)\n\tc.Check(pullReqs.Count(), check.Equals, 0)\n\tc.Check(bal.stats.trashes, check.Equals, 0)\n\tc.Check(bal.stats.pulls, check.Equals, 0)\n}\n\nfunc (s *runSuite) TestRefuseNonAdmin(c *check.C) {\n\topts := RunOptions{\n\t\tLogger: ctxlog.TestLogger(c),\n\t}\n\ts.stub.serveCurrentUserNotAdmin()\n\ts.stub.serveZeroCollections()\n\ts.stub.serveKeepServices(stubServices)\n\ts.stub.serveKeepstoreMounts()\n\ttrashReqs := s.stub.serveKeepstoreTrash()\n\tpullReqs := s.stub.serveKeepstorePull()\n\tsrv := s.newServer(&opts)\n\t_, err := srv.runOnce(context.Background())\n\tc.Check(err, check.ErrorMatches, \"current user .* is not .* admin user\")\n\tc.Check(trashReqs.Count(), check.Equals, 0)\n\tc.Check(pullReqs.Count(), check.Equals, 0)\n}\n\nfunc (s *runSuite) TestInvalidChunkPrefix(c *check.C) {\n\tfor _, trial := range []struct {\n\t\tprefix string\n\t\terrRe  string\n\t}{\n\t\t{\"123ABC\", \"invalid char \\\"A\\\" in chunk prefix.*\"},\n\t\t{\"123xyz\", \"invalid char \\\"x\\\" in chunk prefix.*\"},\n\t\t{\"123456789012345678901234567890123\", \"invalid chunk prefix .* longer than a block hash\"},\n\t} {\n\t\ts.SetUpTest(c)\n\t\tc.Logf(\"trying invalid prefix %q\", trial.prefix)\n\t\topts := RunOptions{\n\t\t\tChunkPrefix: trial.prefix,\n\t\t\tLogger:      ctxlog.TestLogger(c),\n\t\t}\n\t\ts.stub.serveCurrentUserAdmin()\n\t\ts.stub.serveFooBarFileCollections()\n\t\ts.stub.serveKeepServices(stubServices)\n\t\ts.stub.serveKeepstoreMounts()\n\t\ttrashReqs := s.stub.serveKeepstoreTrash()\n\t\tpullReqs := s.stub.serveKeepstorePull()\n\t\tsrv := s.newServer(&opts)\n\t\t_, err := srv.runOnce(context.Background())\n\t\tc.Check(err, check.ErrorMatches, trial.errRe)\n\t\tc.Check(trashReqs.Count(), check.Equals, 0)\n\t\tc.Check(pullReqs.Count(), check.Equals, 0)\n\t}\n}\n\nfunc (s *runSuite) TestRefuseSameDeviceDifferentVolumes(c *check.C) {\n\topts := RunOptions{\n\t\tLogger: ctxlog.TestLogger(c),\n\t}\n\ts.stub.serveCurrentUserAdmin()\n\ts.stub.serveZeroCollections()\n\ts.stub.serveKeepServices(stubServices)\n\ts.stub.mux.HandleFunc(\"/mounts\", func(w http.ResponseWriter, r *http.Request) {\n\t\thostid := r.Host[:5] // \"keep0.zzzzz.arvadosapi.com:25107\" => \"keep0\"\n\t\tjson.NewEncoder(w).Encode([]arvados.KeepMount{{\n\t\t\tUUID:           \"zzzzz-ivpuk-0000000000\" + hostid,\n\t\t\tDeviceID:       \"keep0-vol0\",\n\t\t\tStorageClasses: map[string]bool{\"default\": true},\n\t\t}})\n\t})\n\ttrashReqs := s.stub.serveKeepstoreTrash()\n\tpullReqs := s.stub.serveKeepstorePull()\n\tsrv := s.newServer(&opts)\n\t_, err := srv.runOnce(context.Background())\n\tc.Check(err, check.ErrorMatches, \"cannot continue with config errors.*\")\n\tc.Check(trashReqs.Count(), check.Equals, 0)\n\tc.Check(pullReqs.Count(), check.Equals, 0)\n}\n\nfunc (s *runSuite) TestWriteLostBlocks(c *check.C) {\n\tlostf, err := ioutil.TempFile(\"\", \"keep-balance-lost-blocks-test-\")\n\tc.Assert(err, check.IsNil)\n\ts.config.Collections.BlobMissingReport = lostf.Name()\n\tdefer os.Remove(lostf.Name())\n\topts := RunOptions{\n\t\tLogger: ctxlog.TestLogger(c),\n\t}\n\ts.stub.serveCurrentUserAdmin()\n\ts.stub.serveFooBarFileCollections()\n\ts.stub.serveKeepServices(stubServices)\n\ts.stub.serveKeepstoreMounts()\n\ts.stub.serveKeepstoreIndexFoo1()\n\ts.stub.serveKeepstoreTrash()\n\ts.stub.serveKeepstorePull()\n\tsrv := s.newServer(&opts)\n\tc.Assert(err, check.IsNil)\n\t_, err = srv.runOnce(context.Background())\n\tc.Check(err, check.IsNil)\n\tlost, err := ioutil.ReadFile(lostf.Name())\n\tc.Assert(err, check.IsNil)\n\tc.Check(string(lost), check.Matches, `(?ms).*37b51d194a7513e45b56f6524f2d51f2.* fa7aeb5140e2848d39b416daeef4ffc5\\+45.*`)\n}\n\nfunc (s *runSuite) TestDryRun(c *check.C) {\n\ts.config.Collections.BalanceTrashLimit = 0\n\ts.config.Collections.BalancePullLimit = 0\n\topts := RunOptions{\n\t\tLogger: ctxlog.TestLogger(c),\n\t}\n\ts.stub.serveCurrentUserAdmin()\n\tcollReqs := s.stub.serveFooBarFileCollections()\n\ts.stub.serveKeepServices(stubServices)\n\ts.stub.serveKeepstoreMounts()\n\ts.stub.serveKeepstoreIndexFoo4Bar1()\n\ttrashReqs := s.stub.serveKeepstoreTrash()\n\tpullReqs := s.stub.serveKeepstorePull()\n\tsrv := s.newServer(&opts)\n\tbal, err := srv.runOnce(context.Background())\n\tc.Check(err, check.IsNil)\n\tfor _, req := range collReqs.reqs {\n\t\tc.Check(req.Form.Get(\"include_trash\"), check.Equals, \"true\")\n\t\tc.Check(req.Form.Get(\"include_old_versions\"), check.Equals, \"true\")\n\t}\n\tc.Check(trashReqs.Count(), check.Equals, 0)\n\tc.Check(pullReqs.Count(), check.Equals, 0)\n\tc.Check(bal.stats.pulls, check.Equals, 0)\n\tc.Check(bal.stats.pullsDeferred, check.Not(check.Equals), 0)\n\tc.Check(bal.stats.trashes, check.Equals, 0)\n\tc.Check(bal.stats.trashesDeferred, check.Not(check.Equals), 0)\n\tc.Check(bal.stats.underrep.replicas, check.Not(check.Equals), 0)\n\tc.Check(bal.stats.overrep.replicas, check.Not(check.Equals), 0)\n\n\tmetrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg)\n\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_trash_entries_deferred_count [1-9].*`)\n\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_pull_entries_deferred_count [1-9].*`)\n}\n\nfunc (s *runSuite) TestCommit(c *check.C) {\n\ts.config.Collections.BlobMissingReport = c.MkDir() + \"/keep-balance-lost-blocks-test-\"\n\ts.config.ManagementToken = \"xyzzy\"\n\topts := RunOptions{\n\t\tLogger: ctxlog.TestLogger(c),\n\t\tDumper: ctxlog.TestLogger(c),\n\t}\n\ts.stub.serveCurrentUserAdmin()\n\ts.stub.serveFooBarFileCollections()\n\ts.stub.serveKeepServices(stubServices)\n\ts.stub.serveKeepstoreMounts()\n\ts.stub.serveKeepstoreIndexFoo4Bar1()\n\ttrashReqs := s.stub.serveKeepstoreTrash()\n\tpullReqs := s.stub.serveKeepstorePull()\n\tsrv := s.newServer(&opts)\n\tbal, err := srv.runOnce(context.Background())\n\tc.Check(err, check.IsNil)\n\tc.Check(trashReqs.Count(), check.Equals, 8)\n\tc.Check(pullReqs.Count(), check.Equals, 4)\n\t// \"foo\" block is overreplicated by 2\n\tc.Check(bal.stats.trashes, check.Equals, 2)\n\t// \"bar\" block is underreplicated by 1, and its only copy is\n\t// in a poor rendezvous position\n\tc.Check(bal.stats.pulls, check.Equals, 2)\n\n\tlost, err := ioutil.ReadFile(s.config.Collections.BlobMissingReport)\n\tc.Assert(err, check.IsNil)\n\tc.Check(string(lost), check.Not(check.Matches), `(?ms).*acbd18db4cc2f85cedef654fccc4a4d8.*`)\n\n\tmetrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg)\n\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_total_bytes 15\\n.*`)\n\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keepbalance_changeset_compute_seconds_sum [0-9\\.]+\\n.*`)\n\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keepbalance_changeset_compute_seconds_count 1\\n.*`)\n\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_dedup_byte_ratio [1-9].*`)\n\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_dedup_block_ratio [1-9].*`)\n\n\tfor _, cat := range []string{\n\t\t\"dedup_byte_ratio\", \"dedup_block_ratio\", \"collection_bytes\",\n\t\t\"referenced_bytes\", \"referenced_blocks\", \"reference_count\",\n\t\t\"pull_entries_sent_count\",\n\t\t\"trash_entries_sent_count\",\n\t} {\n\t\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_`+cat+` [1-9].*`)\n\t}\n\n\tfor _, cat := range []string{\n\t\t\"pull_entries_deferred_count\",\n\t\t\"trash_entries_deferred_count\",\n\t} {\n\t\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_`+cat+` 0\\n.*`)\n\t}\n\n\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_replicated_block_count{replicas=\"0\"} [1-9].*`)\n\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_replicated_block_count{replicas=\"1\"} [1-9].*`)\n\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_replicated_block_count{replicas=\"9\"} 0\\n.*`)\n\n\tfor _, sub := range []string{\"replicas\", \"blocks\", \"bytes\"} {\n\t\tfor _, cat := range []string{\"needed\", \"unneeded\", \"unachievable\", \"pulling\"} {\n\t\t\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_usage_`+sub+`{status=\"`+cat+`\",storage_class=\"default\"} [1-9].*`)\n\t\t}\n\t\tfor _, cat := range []string{\"total\", \"garbage\", \"transient\", \"overreplicated\", \"underreplicated\", \"unachievable\", \"balanced\", \"desired\", \"lost\"} {\n\t\t\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keep_`+cat+`_`+sub+` [0-9].*`)\n\t\t}\n\t}\n\tc.Logf(\"%s\", metrics)\n}\n\nfunc (s *runSuite) TestChunkPrefix(c *check.C) {\n\ts.config.Collections.BlobMissingReport = c.MkDir() + \"/keep-balance-lost-blocks-test-\"\n\topts := RunOptions{\n\t\tChunkPrefix: \"ac\", // catch \"foo\" but not \"bar\"\n\t\tLogger:      ctxlog.TestLogger(c),\n\t\tDumper:      ctxlog.TestLogger(c),\n\t}\n\ts.stub.serveCurrentUserAdmin()\n\ts.stub.serveFooBarFileCollections()\n\ts.stub.serveKeepServices(stubServices)\n\ts.stub.serveKeepstoreMounts()\n\ts.stub.serveKeepstoreIndexFoo4Bar1()\n\ttrashReqs := s.stub.serveKeepstoreTrash()\n\tpullReqs := s.stub.serveKeepstorePull()\n\tsrv := s.newServer(&opts)\n\tbal, err := srv.runOnce(context.Background())\n\tc.Check(err, check.IsNil)\n\tc.Check(trashReqs.Count(), check.Equals, 8)\n\tc.Check(pullReqs.Count(), check.Equals, 4)\n\t// \"foo\" block is overreplicated by 2\n\tc.Check(bal.stats.trashes, check.Equals, 2)\n\t// \"bar\" block is underreplicated but does not match prefix\n\tc.Check(bal.stats.pulls, check.Equals, 0)\n\n\tlost, err := ioutil.ReadFile(s.config.Collections.BlobMissingReport)\n\tc.Assert(err, check.IsNil)\n\tc.Check(string(lost), check.Equals, \"\")\n}\n\nfunc (s *runSuite) TestRunForever_TriggeredByTimer(c *check.C) {\n\ts.config.ManagementToken = \"xyzzy\"\n\topts := RunOptions{\n\t\tLogger: ctxlog.TestLogger(c),\n\t\tDumper: ctxlog.TestLogger(c),\n\t}\n\ts.stub.serveCurrentUserAdmin()\n\ts.stub.serveFooBarFileCollections()\n\ts.stub.serveKeepServices(stubServices)\n\ts.stub.serveKeepstoreMounts()\n\ts.stub.serveKeepstoreIndexFoo4Bar1()\n\ttrashReqs := s.stub.serveKeepstoreTrash()\n\tpullReqs := s.stub.serveKeepstorePull()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\ts.config.Collections.BalancePeriod = arvados.Duration(10 * time.Millisecond)\n\tsrv := s.newServer(&opts)\n\n\tdone := make(chan bool)\n\tgo func() {\n\t\tsrv.runForever(ctx)\n\t\tclose(done)\n\t}()\n\n\t// Each run should send 4 pull lists + 4 trash lists. The\n\t// first run should also send 4 empty trash lists at\n\t// startup. We should complete at least four runs in much less\n\t// than 10s.\n\tfor t0 := time.Now(); time.Since(t0) < 10*time.Second; {\n\t\tpulls := pullReqs.Count()\n\t\tif pulls >= 16 && trashReqs.Count() == pulls+4 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\tcancel()\n\t<-done\n\tc.Check(pullReqs.Count() >= 16, check.Equals, true)\n\tc.Check(trashReqs.Count() >= 20, check.Equals, true)\n\n\t// We should have completed 4 runs before calling cancel().\n\t// But the next run might also have started before we called\n\t// cancel(), in which case the extra run will be included in\n\t// the changeset_compute_seconds_count metric.\n\tcompleted := pullReqs.Count() / 4\n\tmetrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg)\n\tc.Check(metrics, check.Matches, fmt.Sprintf(`(?ms).*\\narvados_keepbalance_changeset_compute_seconds_count (%d|%d)\\n.*`, completed, completed+1))\n}\n\nfunc (s *runSuite) TestRunForever_TriggeredBySignal(c *check.C) {\n\ts.config.ManagementToken = \"xyzzy\"\n\topts := RunOptions{\n\t\tLogger: ctxlog.TestLogger(c),\n\t\tDumper: ctxlog.TestLogger(c),\n\t}\n\ts.stub.serveCurrentUserAdmin()\n\ts.stub.serveFooBarFileCollections()\n\ts.stub.serveKeepServices(stubServices)\n\ts.stub.serveKeepstoreMounts()\n\ts.stub.serveKeepstoreIndexFoo4Bar1()\n\ttrashReqs := s.stub.serveKeepstoreTrash()\n\tpullReqs := s.stub.serveKeepstorePull()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\ts.config.Collections.BalancePeriod = arvados.Duration(time.Minute)\n\tsrv := s.newServer(&opts)\n\n\tdone := make(chan bool)\n\tgo func() {\n\t\tsrv.runForever(ctx)\n\t\tclose(done)\n\t}()\n\n\tprocself, err := os.FindProcess(os.Getpid())\n\tc.Assert(err, check.IsNil)\n\n\t// Each run should send 4 pull lists + 4 trash lists. The\n\t// first run should also send 4 empty trash lists at\n\t// startup. We should be able to complete four runs in much\n\t// less than 10s.\n\tcompletedRuns := 0\n\tfor t0 := time.Now(); time.Since(t0) < 10*time.Second; {\n\t\tpulls := pullReqs.Count()\n\t\tif pulls >= 16 && trashReqs.Count() == pulls+4 {\n\t\t\tbreak\n\t\t}\n\t\t// Once the 1st run has started automatically, we\n\t\t// start sending a single SIGUSR1 at the end of each\n\t\t// run, to ensure we get exactly 4 runs in total.\n\t\tif pulls > 0 && pulls%4 == 0 && pulls <= 12 && pulls/4 > completedRuns {\n\t\t\tcompletedRuns = pulls / 4\n\t\t\tc.Logf(\"completed run %d, sending SIGUSR1 to trigger next run\", completedRuns)\n\t\t\tprocself.Signal(syscall.SIGUSR1)\n\t\t}\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\tcancel()\n\t<-done\n\tc.Check(pullReqs.Count(), check.Equals, 16)\n\tc.Check(trashReqs.Count(), check.Equals, 20)\n\n\tmetrics := arvadostest.GatherMetricsAsString(srv.Metrics.reg)\n\tc.Check(metrics, check.Matches, `(?ms).*\\narvados_keepbalance_changeset_compute_seconds_count 4\\n.*`)\n}\n"
  },
  {
    "path": "services/keep-balance/balance_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"crypto/md5\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// Test with Gocheck\nfunc Test(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nvar _ = check.Suite(&balancerSuite{})\n\ntype balancerSuite struct {\n\tBalancer\n\tconfig          *arvados.Cluster\n\tsrvs            []*KeepService\n\tblks            map[string]tester\n\tknownRendezvous [][]int\n\tsignatureTTL    int64\n}\n\nconst (\n\t// index into knownRendezvous\n\tknown0 = 0\n)\n\ntype slots []int\n\ntype tester struct {\n\tknown       int\n\tdesired     map[string]int\n\tcurrent     slots\n\ttimestamps  []int64\n\tshouldPull  slots\n\tshouldTrash slots\n\n\tshouldPullMounts  []string\n\tshouldTrashMounts []string\n\n\texpectBlockState *balancedBlockState\n\texpectClassState map[string]balancedBlockState\n}\n\nfunc (bal *balancerSuite) SetUpSuite(c *check.C) {\n\tbal.knownRendezvous = nil\n\tfor _, str := range []string{\n\t\t\"3eab2d5fc9681074\",\n\t\t\"097dba52e648f1c3\",\n\t\t\"c5b4e023f8a7d691\",\n\t\t\"9d81c02e76a3bf54\",\n\t} {\n\t\tvar slots []int\n\t\tfor _, c := range []byte(str) {\n\t\t\tpos, _ := strconv.ParseUint(string(c), 16, 4)\n\t\t\tslots = append(slots, int(pos))\n\t\t}\n\t\tbal.knownRendezvous = append(bal.knownRendezvous, slots)\n\t}\n\n\tbal.signatureTTL = 3600\n\tbal.Logger = ctxlog.TestLogger(c)\n\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, check.Equals, nil)\n\tbal.config, err = cfg.GetCluster(\"\")\n\tc.Assert(err, check.Equals, nil)\n}\n\nfunc (bal *balancerSuite) SetUpTest(c *check.C) {\n\tbal.srvs = make([]*KeepService, 16)\n\tbal.KeepServices = make(map[string]*KeepService)\n\tfor i := range bal.srvs {\n\t\tsrv := &KeepService{\n\t\t\tKeepService: arvados.KeepService{\n\t\t\t\tUUID: fmt.Sprintf(\"zzzzz-bi6l4-%015x\", i),\n\t\t\t},\n\t\t}\n\t\tsrv.mounts = []*KeepMount{{\n\t\t\tKeepMount: arvados.KeepMount{\n\t\t\t\tUUID:           fmt.Sprintf(\"zzzzz-mount-%015x\", i),\n\t\t\t\tStorageClasses: map[string]bool{\"default\": true},\n\t\t\t\tAllowWrite:     true,\n\t\t\t\tAllowTrash:     true,\n\t\t\t},\n\t\t\tKeepService: srv,\n\t\t}}\n\t\tbal.srvs[i] = srv\n\t\tbal.KeepServices[srv.UUID] = srv\n\t}\n\n\tbal.MinMtime = time.Now().UnixNano() - bal.signatureTTL*1e9\n\tbal.cleanupMounts()\n}\n\nfunc (bal *balancerSuite) TestPerfect(c *check.C) {\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{0, 1},\n\t\tshouldPull:  nil,\n\t\tshouldTrash: nil,\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded: 2,\n\t\t}})\n}\n\nfunc (bal *balancerSuite) TestDecreaseRepl(c *check.C) {\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{0, 2, 1},\n\t\tshouldTrash: slots{2},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:   2,\n\t\t\tunneeded: 1,\n\t\t}})\n}\n\nfunc (bal *balancerSuite) TestDecreaseReplToZero(c *check.C) {\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 0},\n\t\tcurrent:     slots{0, 1, 3},\n\t\tshouldTrash: slots{0, 1, 3},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tunneeded: 3,\n\t\t}})\n}\n\nfunc (bal *balancerSuite) TestIncreaseRepl(c *check.C) {\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 4},\n\t\tcurrent:    slots{0, 1},\n\t\tshouldPull: slots{2, 3},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:  2,\n\t\t\tpulling: 2,\n\t\t}})\n}\n\nfunc (bal *balancerSuite) TestSkipReadonly(c *check.C) {\n\tbal.srvList(0, slots{3})[0].ReadOnly = true\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 4},\n\t\tcurrent:    slots{0, 1},\n\t\tshouldPull: slots{2, 4},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:  2,\n\t\t\tpulling: 2,\n\t\t}})\n}\n\nfunc (bal *balancerSuite) TestAllowTrashWhenReadOnly(c *check.C) {\n\tsrvs := bal.srvList(0, slots{3})\n\tsrvs[0].mounts[0].KeepMount.AllowWrite = false\n\tsrvs[0].mounts[0].KeepMount.AllowTrash = true\n\t// can't pull to slot 3, so pull to slot 4 instead\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 4},\n\t\tcurrent:    slots{0, 1},\n\t\tshouldPull: slots{2, 4},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:  2,\n\t\t\tpulling: 2,\n\t\t}})\n\t// expect to be able to trash slot 3 in future, so pull to\n\t// slot 1\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{0, 3},\n\t\tshouldPull: slots{1},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:  2,\n\t\t\tpulling: 1,\n\t\t}})\n\t// trash excess from slot 3\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{0, 1, 3},\n\t\tshouldTrash: slots{3},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:   2,\n\t\t\tunneeded: 1,\n\t\t}})\n}\n\nfunc (bal *balancerSuite) TestMultipleViewsReadOnly(c *check.C) {\n\tbal.testMultipleViews(c, false, false)\n}\n\nfunc (bal *balancerSuite) TestMultipleViewsReadOnlyAllowTrash(c *check.C) {\n\tbal.testMultipleViews(c, false, true)\n}\n\nfunc (bal *balancerSuite) TestMultipleViews(c *check.C) {\n\tbal.testMultipleViews(c, true, true)\n}\n\nfunc (bal *balancerSuite) testMultipleViews(c *check.C, allowWrite, allowTrash bool) {\n\tfor i, srv := range bal.srvs {\n\t\t// Add a mount to each service\n\t\tsrv.mounts[0].KeepMount.DeviceID = fmt.Sprintf(\"writable-by-srv-%x\", i)\n\t\tsrv.mounts = append(srv.mounts, &KeepMount{\n\t\t\tKeepMount: arvados.KeepMount{\n\t\t\t\tDeviceID:       bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.DeviceID,\n\t\t\t\tUUID:           bal.srvs[(i+1)%len(bal.srvs)].mounts[0].KeepMount.UUID,\n\t\t\t\tAllowWrite:     allowWrite,\n\t\t\t\tAllowTrash:     allowTrash,\n\t\t\t\tReplication:    1,\n\t\t\t\tStorageClasses: map[string]bool{\"default\": true},\n\t\t\t},\n\t\t\tKeepService: srv,\n\t\t})\n\t}\n\tfor i := 1; i < len(bal.srvs); i++ {\n\t\tc.Logf(\"i=%d\", i)\n\t\tif i == 4 {\n\t\t\t// Timestamps are all different, but one of\n\t\t\t// the mounts on srv[4] has the same device ID\n\t\t\t// where the non-deletable replica is stored\n\t\t\t// on srv[3], so only one replica is safe to\n\t\t\t// trash.\n\t\t\tbal.try(c, tester{\n\t\t\t\tdesired:     map[string]int{\"default\": 1},\n\t\t\t\tcurrent:     slots{0, i, i},\n\t\t\t\tshouldTrash: slots{i}})\n\t\t} else if !allowTrash {\n\t\t\t// Timestamps are all different, and the third\n\t\t\t// replica can't be trashed because it's on a\n\t\t\t// read-only mount (with\n\t\t\t// AllowTrashWhenReadOnly=false), so the first\n\t\t\t// two replicas should be trashed.\n\t\t\tbal.try(c, tester{\n\t\t\t\tdesired:     map[string]int{\"default\": 1},\n\t\t\t\tcurrent:     slots{0, i, i},\n\t\t\t\tshouldTrash: slots{0, i}})\n\t\t} else {\n\t\t\t// Timestamps are all different, so both\n\t\t\t// replicas on the non-optimal server should\n\t\t\t// be trashed.\n\t\t\tbal.try(c, tester{\n\t\t\t\tdesired:     map[string]int{\"default\": 1},\n\t\t\t\tcurrent:     slots{0, i, i},\n\t\t\t\tshouldTrash: slots{i, i}})\n\t\t}\n\t\t// If the three replicas have identical timestamps,\n\t\t// none of them can be trashed safely.\n\t\tbal.try(c, tester{\n\t\t\tdesired:    map[string]int{\"default\": 1},\n\t\t\tcurrent:    slots{0, i, i},\n\t\t\ttimestamps: []int64{12345678, 12345678, 12345678}})\n\t\t// If the first and third replicas have identical\n\t\t// timestamps, only the second replica should be\n\t\t// trashed.\n\t\tbal.try(c, tester{\n\t\t\tdesired:     map[string]int{\"default\": 1},\n\t\t\tcurrent:     slots{0, i, i},\n\t\t\ttimestamps:  []int64{12345678, 12345679, 12345678},\n\t\t\tshouldTrash: slots{i}})\n\t}\n}\n\nfunc (bal *balancerSuite) TestFixUnbalanced(c *check.C) {\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{2, 0},\n\t\tshouldPull: slots{1}})\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{2, 7},\n\t\tshouldPull: slots{0, 1}})\n\t// if only one of the pulls succeeds, we'll see this next:\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{2, 1, 7},\n\t\tshouldPull:  slots{0},\n\t\tshouldTrash: slots{7}})\n\t// if both pulls succeed, we'll see this next:\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{2, 0, 1, 7},\n\t\tshouldTrash: slots{2, 7}})\n\n\t// unbalanced + excessive replication => pull + trash\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{2, 5, 7},\n\t\tshouldPull:  slots{0, 1},\n\t\tshouldTrash: slots{7}})\n}\n\nfunc (bal *balancerSuite) TestMultipleReplicasPerService(c *check.C) {\n\tfor s, srv := range bal.srvs {\n\t\tfor i := 0; i < 3; i++ {\n\t\t\tm := *(srv.mounts[0])\n\t\t\tm.UUID = fmt.Sprintf(\"zzzzz-mount-%015x\", (s<<10)+i)\n\t\t\tsrv.mounts = append(srv.mounts, &m)\n\t\t}\n\t}\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{0, 0},\n\t\tshouldPull: slots{1}})\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{2, 2},\n\t\tshouldPull: slots{0, 1}})\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{0, 0, 1},\n\t\tshouldTrash: slots{0}})\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{1, 1, 0},\n\t\tshouldTrash: slots{1}})\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{1, 0, 1, 0, 2},\n\t\tshouldTrash: slots{0, 1, 2}})\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{1, 1, 1, 0, 2},\n\t\tshouldTrash: slots{1, 1, 2}})\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{1, 1, 2},\n\t\tshouldPull:  slots{0},\n\t\tshouldTrash: slots{1}})\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{1, 1, 0},\n\t\ttimestamps:  []int64{12345678, 12345678, 12345679},\n\t\tshouldTrash: nil})\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{1, 1},\n\t\tshouldPull: slots{0}})\n}\n\nfunc (bal *balancerSuite) TestIncreaseReplTimestampCollision(c *check.C) {\n\t// For purposes of increasing replication, we assume identical\n\t// replicas are distinct.\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 4},\n\t\tcurrent:    slots{0, 1},\n\t\ttimestamps: []int64{12345678, 12345678},\n\t\tshouldPull: slots{2, 3}})\n}\n\nfunc (bal *balancerSuite) TestDecreaseReplTimestampCollision(c *check.C) {\n\t// For purposes of decreasing replication, we assume identical\n\t// replicas are NOT distinct.\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{0, 1, 2},\n\t\ttimestamps: []int64{12345678, 12345678, 12345678}})\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{0, 1, 2},\n\t\ttimestamps: []int64{12345678, 10000000, 10000000}})\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 0},\n\t\tcurrent:     slots{0, 1, 2},\n\t\ttimestamps:  []int64{12345678, 12345678, 12345678},\n\t\tshouldTrash: slots{0},\n\t\tshouldTrashMounts: []string{\n\t\t\tbal.srvs[bal.knownRendezvous[0][0]].mounts[0].UUID}})\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{0, 1, 2, 5, 6},\n\t\ttimestamps:  []int64{12345678, 12345679, 10000000, 10000000, 10000000},\n\t\tshouldTrash: slots{2},\n\t\tshouldTrashMounts: []string{\n\t\t\tbal.srvs[bal.knownRendezvous[0][2]].mounts[0].UUID}})\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{0, 1, 2, 5, 6},\n\t\ttimestamps:  []int64{12345678, 12345679, 12345671, 10000000, 10000000},\n\t\tshouldTrash: slots{2, 5},\n\t\tshouldTrashMounts: []string{\n\t\t\tbal.srvs[bal.knownRendezvous[0][2]].mounts[0].UUID,\n\t\t\tbal.srvs[bal.knownRendezvous[0][5]].mounts[0].UUID}})\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{0, 1, 2, 5, 6},\n\t\ttimestamps:  []int64{12345678, 12345679, 12345679, 10000000, 10000000},\n\t\tshouldTrash: slots{5},\n\t\tshouldTrashMounts: []string{\n\t\t\tbal.srvs[bal.knownRendezvous[0][5]].mounts[0].UUID}})\n}\n\nfunc (bal *balancerSuite) TestDecreaseReplBlockTooNew(c *check.C) {\n\toldTime := bal.MinMtime - 3600\n\tnewTime := bal.MinMtime + 3600\n\t// The excess replica is too new to delete.\n\tbal.try(c, tester{\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{0, 1, 2},\n\t\ttimestamps: []int64{oldTime, newTime, newTime + 1},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:   2,\n\t\t\tunneeded: 1,\n\t\t}})\n\t// The best replicas are too new to delete, but the excess\n\t// replica is old enough.\n\tbal.try(c, tester{\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{0, 1, 2},\n\t\ttimestamps:  []int64{newTime, newTime + 1, oldTime},\n\t\tshouldTrash: slots{2}})\n}\n\nfunc (bal *balancerSuite) TestCleanupMounts(c *check.C) {\n\tbal.srvs[3].mounts[0].KeepMount.AllowWrite = false\n\tbal.srvs[3].mounts[0].KeepMount.DeviceID = \"abcdef\"\n\tbal.srvs[14].mounts[0].KeepMount.UUID = bal.srvs[3].mounts[0].KeepMount.UUID\n\tbal.srvs[14].mounts[0].KeepMount.DeviceID = \"abcdef\"\n\tc.Check(len(bal.srvs[3].mounts), check.Equals, 1)\n\tbal.cleanupMounts()\n\tc.Check(len(bal.srvs[3].mounts), check.Equals, 0)\n\tbal.try(c, tester{\n\t\tknown:      0,\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{1},\n\t\tshouldPull: slots{2}})\n}\n\nfunc (bal *balancerSuite) TestVolumeReplication(c *check.C) {\n\tbal.srvs[0].mounts[0].KeepMount.Replication = 2  // srv 0\n\tbal.srvs[14].mounts[0].KeepMount.Replication = 2 // srv e\n\tbal.cleanupMounts()\n\t// block 0 rendezvous is 3,e,a -- so slot 1 has repl=2\n\tbal.try(c, tester{\n\t\tknown:      0,\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{1},\n\t\tshouldPull: slots{0},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:  1,\n\t\t\tpulling: 1,\n\t\t}})\n\tbal.try(c, tester{\n\t\tknown:      0,\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{0, 1},\n\t\tshouldPull: nil,\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded: 2,\n\t\t}})\n\tbal.try(c, tester{\n\t\tknown:       0,\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{0, 1, 2},\n\t\tshouldTrash: slots{2},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:   2,\n\t\t\tunneeded: 1,\n\t\t}})\n\tbal.try(c, tester{\n\t\tknown:       0,\n\t\tdesired:     map[string]int{\"default\": 3},\n\t\tcurrent:     slots{0, 2, 3, 4},\n\t\tshouldPull:  slots{1},\n\t\tshouldTrash: slots{4},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:   3,\n\t\t\tunneeded: 1,\n\t\t\tpulling:  1,\n\t\t}})\n\tbal.try(c, tester{\n\t\tknown:       0,\n\t\tdesired:     map[string]int{\"default\": 3},\n\t\tcurrent:     slots{0, 1, 2, 3, 4},\n\t\tshouldTrash: slots{2, 3, 4},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:   2,\n\t\t\tunneeded: 3,\n\t\t}})\n\tbal.try(c, tester{\n\t\tknown:       0,\n\t\tdesired:     map[string]int{\"default\": 4},\n\t\tcurrent:     slots{0, 1, 2, 3, 4},\n\t\tshouldTrash: slots{3, 4},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:   3,\n\t\t\tunneeded: 2,\n\t\t}})\n\t// block 1 rendezvous is 0,9,7 -- so slot 0 has repl=2\n\tbal.try(c, tester{\n\t\tknown:   1,\n\t\tdesired: map[string]int{\"default\": 2},\n\t\tcurrent: slots{0},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded: 1,\n\t\t}})\n\tbal.try(c, tester{\n\t\tknown:      1,\n\t\tdesired:    map[string]int{\"default\": 3},\n\t\tcurrent:    slots{0},\n\t\tshouldPull: slots{1},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:  1,\n\t\t\tpulling: 1,\n\t\t}})\n\tbal.try(c, tester{\n\t\tknown:      1,\n\t\tdesired:    map[string]int{\"default\": 4},\n\t\tcurrent:    slots{0},\n\t\tshouldPull: slots{1, 2},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:  1,\n\t\t\tpulling: 2,\n\t\t}})\n\tbal.try(c, tester{\n\t\tknown:      1,\n\t\tdesired:    map[string]int{\"default\": 4},\n\t\tcurrent:    slots{2},\n\t\tshouldPull: slots{0, 1},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:  1,\n\t\t\tpulling: 2,\n\t\t}})\n\tbal.try(c, tester{\n\t\tknown:      1,\n\t\tdesired:    map[string]int{\"default\": 4},\n\t\tcurrent:    slots{7},\n\t\tshouldPull: slots{0, 1, 2},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:  1,\n\t\t\tpulling: 3,\n\t\t}})\n\tbal.try(c, tester{\n\t\tknown:       1,\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{1, 2, 3, 4},\n\t\tshouldPull:  slots{0},\n\t\tshouldTrash: slots{3, 4},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:   2,\n\t\t\tunneeded: 2,\n\t\t\tpulling:  1,\n\t\t}})\n\tbal.try(c, tester{\n\t\tknown:       1,\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{0, 1, 2},\n\t\tshouldTrash: slots{1, 2},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:   1,\n\t\t\tunneeded: 2,\n\t\t}})\n}\n\nfunc (bal *balancerSuite) TestDeviceRWMountedByMultipleServers(c *check.C) {\n\tdupUUID := bal.srvs[0].mounts[0].KeepMount.UUID\n\tbal.srvs[9].mounts[0].KeepMount.UUID = dupUUID\n\tbal.srvs[14].mounts[0].KeepMount.UUID = dupUUID\n\t// block 0 belongs on servers 3 and e, which have different\n\t// UUIDs.\n\tbal.try(c, tester{\n\t\tknown:      0,\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{1},\n\t\tshouldPull: slots{0}})\n\t// block 1 belongs on servers 0 and 9, which both report\n\t// having a replica, but the replicas are on the same volume\n\t// -- so we should pull to the third position (7).\n\tbal.try(c, tester{\n\t\tknown:      1,\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{0, 1},\n\t\tshouldPull: slots{2}})\n\t// block 1 can be pulled to the doubly-mounted volume, but the\n\t// pull should only be done on the first of the two servers.\n\tbal.try(c, tester{\n\t\tknown:      1,\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{2},\n\t\tshouldPull: slots{0}})\n\t// block 0 has one replica on a single volume mounted on two\n\t// servers (e,9 at positions 1,9). Trashing the replica on 9\n\t// would lose the block.\n\tbal.try(c, tester{\n\t\tknown:      0,\n\t\tdesired:    map[string]int{\"default\": 2},\n\t\tcurrent:    slots{1, 9},\n\t\tshouldPull: slots{0},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:  1,\n\t\t\tpulling: 1,\n\t\t}})\n\t// block 0 is overreplicated, but the second and third\n\t// replicas are the same replica according to volume UUID\n\t// (despite different Mtimes). Don't trash the third replica.\n\tbal.try(c, tester{\n\t\tknown:   0,\n\t\tdesired: map[string]int{\"default\": 2},\n\t\tcurrent: slots{0, 1, 9},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded: 2,\n\t\t}})\n\t// block 0 is overreplicated; the third and fifth replicas are\n\t// extra, but the fourth is another view of the second and\n\t// shouldn't be trashed.\n\tbal.try(c, tester{\n\t\tknown:       0,\n\t\tdesired:     map[string]int{\"default\": 2},\n\t\tcurrent:     slots{0, 1, 5, 9, 12},\n\t\tshouldTrash: slots{5, 12},\n\t\texpectBlockState: &balancedBlockState{\n\t\t\tneeded:   2,\n\t\t\tunneeded: 2,\n\t\t}})\n}\n\nfunc (bal *balancerSuite) TestChangeStorageClasses(c *check.C) {\n\t// For known blocks 0/1/2/3, server 9 is slot 9/1/14/0 in\n\t// probe order. For these tests we give it two mounts, one\n\t// with classes=[special], one with\n\t// classes=[special,special2].\n\tbal.srvs[9].mounts = []*KeepMount{{\n\t\tKeepMount: arvados.KeepMount{\n\t\t\tAllowWrite:     true,\n\t\t\tAllowTrash:     true,\n\t\t\tReplication:    1,\n\t\t\tStorageClasses: map[string]bool{\"special\": true},\n\t\t\tUUID:           \"zzzzz-mount-special00000009\",\n\t\t\tDeviceID:       \"9-special\",\n\t\t},\n\t\tKeepService: bal.srvs[9],\n\t}, {\n\t\tKeepMount: arvados.KeepMount{\n\t\t\tAllowWrite:     true,\n\t\t\tAllowTrash:     true,\n\t\t\tReplication:    1,\n\t\t\tStorageClasses: map[string]bool{\"special\": true, \"special2\": true},\n\t\t\tUUID:           \"zzzzz-mount-special20000009\",\n\t\t\tDeviceID:       \"9-special-and-special2\",\n\t\t},\n\t\tKeepService: bal.srvs[9],\n\t}}\n\t// For known blocks 0/1/2/3, server 13 (d) is slot 5/3/11/1 in\n\t// probe order. We give it two mounts, one with\n\t// classes=[special3], one with classes=[default].\n\tbal.srvs[13].mounts = []*KeepMount{{\n\t\tKeepMount: arvados.KeepMount{\n\t\t\tAllowWrite:     true,\n\t\t\tAllowTrash:     true,\n\t\t\tReplication:    1,\n\t\t\tStorageClasses: map[string]bool{\"special2\": true},\n\t\t\tUUID:           \"zzzzz-mount-special2000000d\",\n\t\t\tDeviceID:       \"13-special2\",\n\t\t},\n\t\tKeepService: bal.srvs[13],\n\t}, {\n\t\tKeepMount: arvados.KeepMount{\n\t\t\tAllowWrite:     true,\n\t\t\tAllowTrash:     true,\n\t\t\tReplication:    1,\n\t\t\tStorageClasses: map[string]bool{\"default\": true},\n\t\t\tUUID:           \"zzzzz-mount-00000000000000d\",\n\t\t\tDeviceID:       \"13-default\",\n\t\t},\n\t\tKeepService: bal.srvs[13],\n\t}}\n\t// Pull to slot 9 because that's the only server with the\n\t// desired class \"special\".\n\tbal.try(c, tester{\n\t\tknown:            0,\n\t\tdesired:          map[string]int{\"default\": 2, \"special\": 1},\n\t\tcurrent:          slots{0, 1},\n\t\tshouldPull:       slots{9},\n\t\tshouldPullMounts: []string{\"zzzzz-mount-special20000009\"}})\n\t// If some storage classes are not satisfied, don't trash any\n\t// excess replicas. (E.g., if someone desires repl=1 on\n\t// class=durable, and we have two copies on class=volatile, we\n\t// should wait for pull to succeed before trashing anything).\n\tbal.try(c, tester{\n\t\tknown:            0,\n\t\tdesired:          map[string]int{\"special\": 1},\n\t\tcurrent:          slots{0, 1},\n\t\tshouldPull:       slots{9},\n\t\tshouldPullMounts: []string{\"zzzzz-mount-special20000009\"}})\n\t// Once storage classes are satisfied, trash excess replicas\n\t// that appear earlier in probe order but aren't needed to\n\t// satisfy the desired classes.\n\tbal.try(c, tester{\n\t\tknown:       0,\n\t\tdesired:     map[string]int{\"special\": 1},\n\t\tcurrent:     slots{0, 1, 9},\n\t\tshouldTrash: slots{0, 1}})\n\t// Pull to slot 5, the best server with class \"special2\".\n\tbal.try(c, tester{\n\t\tknown:            0,\n\t\tdesired:          map[string]int{\"special2\": 1},\n\t\tcurrent:          slots{0, 1},\n\t\tshouldPull:       slots{5},\n\t\tshouldPullMounts: []string{\"zzzzz-mount-special2000000d\"}})\n\t// Pull to slot 5 and 9 to get replication 2 in desired class\n\t// \"special2\".\n\tbal.try(c, tester{\n\t\tknown:            0,\n\t\tdesired:          map[string]int{\"special2\": 2},\n\t\tcurrent:          slots{0, 1},\n\t\tshouldPull:       slots{5, 9},\n\t\tshouldPullMounts: []string{\"zzzzz-mount-special20000009\", \"zzzzz-mount-special2000000d\"}})\n\t// Slot 0 has a replica in \"default\", slot 1 has a replica\n\t// in \"special\"; we need another replica in \"default\", i.e.,\n\t// on slot 2.\n\tbal.try(c, tester{\n\t\tknown:      1,\n\t\tdesired:    map[string]int{\"default\": 2, \"special\": 1},\n\t\tcurrent:    slots{0, 1},\n\t\tshouldPull: slots{2}})\n\t// Pull to best probe position 0 (despite wrong storage class)\n\t// if it's impossible to achieve desired replication in the\n\t// desired class (only slots 1 and 3 have special2).\n\tbal.try(c, tester{\n\t\tknown:      1,\n\t\tdesired:    map[string]int{\"special2\": 3},\n\t\tcurrent:    slots{3},\n\t\tshouldPull: slots{0, 1}})\n\t// Trash excess replica.\n\tbal.try(c, tester{\n\t\tknown:       3,\n\t\tdesired:     map[string]int{\"special\": 1},\n\t\tcurrent:     slots{0, 1},\n\t\tshouldTrash: slots{1}})\n\t// Leave one copy on slot 1 because slot 0 (server 9) only\n\t// gives us repl=1.\n\tbal.try(c, tester{\n\t\tknown:   3,\n\t\tdesired: map[string]int{\"special\": 2},\n\t\tcurrent: slots{0, 1}})\n}\n\n// Clear all servers' changesets, balance a single block, and verify\n// the appropriate changes for that block have been added to the\n// changesets.\nfunc (bal *balancerSuite) try(c *check.C, t tester) {\n\tbal.setupLookupTables(bal.config)\n\tblk := &BlockState{\n\t\tReplicas: bal.replList(t.known, t.current),\n\t\tDesired:  &t.desired,\n\t}\n\tfor i, t := range t.timestamps {\n\t\tblk.Replicas[i].Mtime = t\n\t}\n\tresult := bal.balanceBlock(knownBlkid(t.known), blk)\n\n\tvar didPull, didTrash slots\n\tvar didPullMounts, didTrashMounts []string\n\tfor i, srv := range bal.srvs {\n\t\tvar slot int\n\t\tfor probeOrder, srvNum := range bal.knownRendezvous[t.known] {\n\t\t\tif srvNum == i {\n\t\t\t\tslot = probeOrder\n\t\t\t}\n\t\t}\n\t\tfor _, pull := range srv.Pulls {\n\t\t\tdidPull = append(didPull, slot)\n\t\t\tdidPullMounts = append(didPullMounts, pull.To.UUID)\n\t\t\tc.Check(pull.SizedDigest, check.Equals, knownBlkid(t.known))\n\t\t}\n\t\tfor _, trash := range srv.Trashes {\n\t\t\tdidTrash = append(didTrash, slot)\n\t\t\tdidTrashMounts = append(didTrashMounts, trash.From.UUID)\n\t\t\tc.Check(trash.SizedDigest, check.Equals, knownBlkid(t.known))\n\t\t}\n\t}\n\n\tfor _, list := range []slots{didPull, didTrash, t.shouldPull, t.shouldTrash} {\n\t\tsort.Sort(sort.IntSlice(list))\n\t}\n\tc.Check(didPull, check.DeepEquals, t.shouldPull)\n\tc.Check(didTrash, check.DeepEquals, t.shouldTrash)\n\tif t.shouldPullMounts != nil {\n\t\tsort.Strings(didPullMounts)\n\t\tc.Check(didPullMounts, check.DeepEquals, t.shouldPullMounts)\n\t}\n\tif t.shouldTrashMounts != nil {\n\t\tsort.Strings(didTrashMounts)\n\t\tc.Check(didTrashMounts, check.DeepEquals, t.shouldTrashMounts)\n\t}\n\tif t.expectBlockState != nil {\n\t\tc.Check(result.blockState, check.Equals, *t.expectBlockState)\n\t}\n\tif t.expectClassState != nil {\n\t\tc.Check(result.classState, check.DeepEquals, t.expectClassState)\n\t}\n}\n\n// srvList returns the KeepServices, sorted in rendezvous order and\n// then selected by idx. For example, srvList(3, slots{0, 1, 4})\n// returns the first-, second-, and fifth-best servers for storing\n// bal.knownBlkid(3).\nfunc (bal *balancerSuite) srvList(knownBlockID int, order slots) (srvs []*KeepService) {\n\tfor _, i := range order {\n\t\tsrvs = append(srvs, bal.srvs[bal.knownRendezvous[knownBlockID][i]])\n\t}\n\treturn\n}\n\n// replList is like srvList but returns an \"existing replicas\" slice,\n// suitable for a BlockState test fixture.\nfunc (bal *balancerSuite) replList(knownBlockID int, order slots) (repls []Replica) {\n\tnextMnt := map[*KeepService]int{}\n\tmtime := time.Now().UnixNano() - (bal.signatureTTL+86400)*1e9\n\tfor _, srv := range bal.srvList(knownBlockID, order) {\n\t\t// round-robin repls onto each srv's mounts\n\t\tn := nextMnt[srv]\n\t\tnextMnt[srv] = (n + 1) % len(srv.mounts)\n\n\t\trepls = append(repls, Replica{srv.mounts[n], mtime})\n\t\tmtime++\n\t}\n\treturn\n}\n\n// generate the same data hashes that are tested in\n// sdk/go/keepclient/root_sorter_test.go\nfunc knownBlkid(i int) arvados.SizedDigest {\n\treturn arvados.SizedDigest(fmt.Sprintf(\"%x+64\", md5.Sum([]byte(fmt.Sprintf(\"%064x\", i)))))\n}\n"
  },
  {
    "path": "services/keep-balance/block_state.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"maps\"\n\t\"sync\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// Replica is a file on disk (or object in an S3 bucket, or blob in an\n// Azure storage container, etc.) as reported in a keepstore index\n// response.\ntype Replica struct {\n\t*KeepMount\n\tMtime int64\n}\n\n// BlockState indicates the desired storage class and number of\n// replicas (according to the collections we know about) and the\n// replicas actually stored (according to the keepstore indexes we\n// know about).\ntype BlockState struct {\n\tRefs     map[string]bool // pdh => true (only tracked when len(Replicas)==0)\n\tRefCount int\n\tReplicas []Replica\n\tDesired  mapPoolEnt\n}\n\nvar defaultClasses = []string{\"default\"}\n\nfunc (bs *BlockState) addReplica(r Replica) {\n\tbs.Replicas = append(bs.Replicas, r)\n\t// Free up memory wasted by tracking PDHs that will never be\n\t// reported (see comment in increaseDesired)\n\tbs.Refs = nil\n}\n\nfunc (bs *BlockState) increaseDesired(pool *mapPool, pdh string, classes []string, n int) {\n\tif pdh != \"\" && len(bs.Replicas) == 0 {\n\t\t// Note we only track PDHs if there's a possibility\n\t\t// that we will report the list of referring PDHs,\n\t\t// i.e., if we haven't yet seen a replica.\n\t\tif bs.Refs == nil {\n\t\t\tbs.Refs = map[string]bool{}\n\t\t}\n\t\tbs.Refs[pdh] = true\n\t}\n\tbs.RefCount++\n\tif len(classes) == 0 {\n\t\tclasses = defaultClasses\n\t}\n\tfor _, class := range classes {\n\t\tbs.Desired = pool.setMinimum(bs.Desired, class, n)\n\t}\n}\n\n// BlockStateMap is a goroutine-safe wrapper around a\n// map[arvados.SizedDigest]*BlockState.\ntype BlockStateMap struct {\n\tentries map[arvados.SizedDigest]*BlockState\n\tpool    mapPool\n\tmutex   sync.Mutex\n}\n\n// NewBlockStateMap returns a newly allocated BlockStateMap.\nfunc NewBlockStateMap(maxReplication int) *BlockStateMap {\n\treturn &BlockStateMap{\n\t\tentries: make(map[arvados.SizedDigest]*BlockState),\n\t\tpool: mapPool{\n\t\t\tMaximum: maxReplication + 1,\n\t\t},\n\t}\n}\n\n// return a BlockState entry, allocating a new one if needed. (Private\n// method: not goroutine-safe.)\nfunc (bsm *BlockStateMap) get(blkid arvados.SizedDigest) *BlockState {\n\t// TODO? Allocate BlockState structs a slice at a time,\n\t// instead of one at a time.\n\tblk := bsm.entries[blkid]\n\tif blk == nil {\n\t\tblk = &BlockState{}\n\t\tbsm.entries[blkid] = blk\n\t}\n\treturn blk\n}\n\n// Apply runs f on each entry in the map.\nfunc (bsm *BlockStateMap) Apply(f func(arvados.SizedDigest, *BlockState)) {\n\tbsm.mutex.Lock()\n\tdefer bsm.mutex.Unlock()\n\n\tfor blkid, blk := range bsm.entries {\n\t\tf(blkid, blk)\n\t}\n}\n\n// AddReplicas updates the map to indicate that mnt has a replica of\n// each block in idx.\nfunc (bsm *BlockStateMap) AddReplicas(mnt *KeepMount, idx []arvados.KeepServiceIndexEntry) {\n\tbsm.mutex.Lock()\n\tdefer bsm.mutex.Unlock()\n\n\tfor _, ent := range idx {\n\t\tbsm.get(ent.SizedDigest).addReplica(Replica{\n\t\t\tKeepMount: mnt,\n\t\t\tMtime:     ent.Mtime,\n\t\t})\n\t}\n}\n\n// IncreaseDesired updates the map to indicate the desired replication\n// for the given blocks in the given storage class is at least n.\n//\n// If pdh is non-empty, it will be tracked and reported in the \"lost\n// blocks\" report.\nfunc (bsm *BlockStateMap) IncreaseDesired(pdh string, classes []string, n int, blocks []arvados.SizedDigest) {\n\tbsm.mutex.Lock()\n\tdefer bsm.mutex.Unlock()\n\n\tfor _, blkid := range blocks {\n\t\tbsm.get(blkid).increaseDesired(&bsm.pool, pdh, classes, n)\n\t}\n}\n\n// GetConfirmedReplication returns the replication level of the given\n// blocks, considering only the specified storage classes.\n//\n// If len(classes)==0, returns the replication level without regard to\n// storage classes.\n//\n// Safe to call concurrently with other calls to GetCurrent, but not\n// with different BlockStateMap methods.\nfunc (bsm *BlockStateMap) GetConfirmedReplication(blkids []arvados.SizedDigest, classes []string) int {\n\tdefaultClasses := map[string]bool{\"default\": true}\n\tmin := 0\n\tfor _, blkid := range blkids {\n\t\ttotal := 0\n\t\tperclass := make(map[string]int, len(classes))\n\t\tfor _, c := range classes {\n\t\t\tperclass[c] = 0\n\t\t}\n\t\tbs, ok := bsm.entries[blkid]\n\t\tif !ok {\n\t\t\treturn 0\n\t\t}\n\t\tfor _, r := range bs.Replicas {\n\t\t\ttotal += r.KeepMount.Replication\n\t\t\tmntclasses := r.KeepMount.StorageClasses\n\t\t\tif len(mntclasses) == 0 {\n\t\t\t\tmntclasses = defaultClasses\n\t\t\t}\n\t\t\tfor c := range mntclasses {\n\t\t\t\tn, ok := perclass[c]\n\t\t\t\tif !ok {\n\t\t\t\t\t// Don't care about this storage class\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tperclass[c] = n + r.KeepMount.Replication\n\t\t\t}\n\t\t}\n\t\tif total == 0 {\n\t\t\treturn 0\n\t\t}\n\t\tfor _, n := range perclass {\n\t\t\tif n == 0 {\n\t\t\t\treturn 0\n\t\t\t}\n\t\t\tif n < min || min == 0 {\n\t\t\t\tmin = n\n\t\t\t}\n\t\t}\n\t\tif len(perclass) == 0 && (min == 0 || min > total) {\n\t\t\tmin = total\n\t\t}\n\t}\n\treturn min\n}\n\n// mapPool manages a pool of distinct maps of type map[string]int.\n// See (*mapPool)setMinimum() and (*BlockState)increaseDesired() for\n// usage.\ntype mapPool struct {\n\tMaximum int\n\tnext    map[mapPoolTransition]mapPoolEnt\n\tlock    sync.RWMutex\n}\n\ntype mapPoolEnt *map[string]int\n\ntype mapPoolTransition struct {\n\tent     mapPoolEnt\n\tclass   string\n\tminimum int\n}\n\n// setMinimum returns a singleton mapPoolEnt that has\n// ent[class]>=minimum and is equivalent to the provided ent for all\n// other classes.\n//\n// The provided ent must be either nil, or a mapPoolEnt previously\n// returned by this method.\n//\n// The provided ent will be returned if it already satisfies\n// ent[class]>minimum.\n//\n// Functionally, it is equivalent to\n//\n//\tif p.Maximum > 0 && minimum > p.Maximum {\n//\t        minimum = p.Maximum\n//\t}\n//\tif ent[class] < minimum {\n//\t        ent[class] = minimum\n//\t}\n//\n// Except that, as long as the mapPool is shared by many\n// BlockState callers, it uses far less memory.\n//\n// The caller should not modify the returned mapPoolEnt.\nfunc (p *mapPool) setMinimum(ent mapPoolEnt, class string, minimum int) mapPoolEnt {\n\tif ent != nil && (*ent)[class] >= minimum {\n\t\treturn ent\n\t}\n\tif minimum > p.Maximum {\n\t\t// Clamp ent.minimum, otherwise p.next can become\n\t\t// excessively large when users set\n\t\t// replication_desired to unrealistic values.\n\t\tminimum = p.Maximum\n\t}\n\ttransition := mapPoolTransition{\n\t\tent:     ent,\n\t\tclass:   class,\n\t\tminimum: minimum,\n\t}\n\tp.lock.RLock()\n\tnext := p.next[transition]\n\tp.lock.RUnlock()\n\tif next != nil {\n\t\treturn next\n\t}\n\tvar newmap map[string]int\n\tif ent != nil {\n\t\tnewmap = maps.Clone(*ent)\n\t\tnewmap[class] = minimum\n\t} else {\n\t\tnewmap = map[string]int{class: minimum}\n\t}\n\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\t// The following find_matching_ent loop is not especially fast\n\t// -- O(poolsize*mapsize) at best -- but that's okay because\n\t// it runs only ~once per distinct pool entry, and the number\n\t// of pool entries is bounded by configuration (maximum\n\t// achievable replication ** number of storage classes)\n\t// regardless of how many blocks are being processed.\n\t//\n\t// Typically setMinimum is called millions of times but we\n\t// arrive at this loop less than 100 times.\nfind_matching_ent:\n\tfor _, existing := range p.next {\n\t\tif len(*existing) != len(newmap) {\n\t\t\tcontinue find_matching_ent\n\t\t}\n\t\tfor c, d := range *existing {\n\t\t\tif newmap[c] != d {\n\t\t\t\tcontinue find_matching_ent\n\t\t\t}\n\t\t}\n\t\t// reuse existing pool entry, previously added as\n\t\t// outcome of a different transition (or by another\n\t\t// goroutine between RUnlock and Lock, in which case\n\t\t// this is a no-op)\n\t\tp.next[transition] = existing\n\t\treturn ent\n\t}\n\tif p.next == nil {\n\t\tp.next = make(map[mapPoolTransition]mapPoolEnt)\n\t}\n\tp.next[transition] = &newmap\n\treturn &newmap\n}\n"
  },
  {
    "path": "services/keep-balance/block_state_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/rand\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&confirmedReplicationSuite{})\n\ntype confirmedReplicationSuite struct {\n\tblockStateMap *BlockStateMap\n\tmtime         int64\n}\n\nfunc (s *confirmedReplicationSuite) SetUpTest(c *check.C) {\n\tt, _ := time.Parse(time.RFC3339Nano, time.RFC3339Nano)\n\ts.mtime = t.UnixNano()\n\ts.blockStateMap = NewBlockStateMap(8)\n\ts.blockStateMap.AddReplicas(&KeepMount{KeepMount: arvados.KeepMount{\n\t\tReplication:    1,\n\t\tStorageClasses: map[string]bool{\"default\": true},\n\t}}, []arvados.KeepServiceIndexEntry{\n\t\t{SizedDigest: knownBlkid(10), Mtime: s.mtime},\n\t})\n\ts.blockStateMap.AddReplicas(&KeepMount{KeepMount: arvados.KeepMount{\n\t\tReplication:    2,\n\t\tStorageClasses: map[string]bool{\"default\": true},\n\t}}, []arvados.KeepServiceIndexEntry{\n\t\t{SizedDigest: knownBlkid(20), Mtime: s.mtime},\n\t})\n}\n\nfunc (s *confirmedReplicationSuite) TestZeroReplication(c *check.C) {\n\tn := s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(404), knownBlkid(409)}, []string{\"default\"})\n\tc.Check(n, check.Equals, 0)\n\tn = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(10), knownBlkid(404)}, []string{\"default\"})\n\tc.Check(n, check.Equals, 0)\n\tn = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(10), knownBlkid(404)}, nil)\n\tc.Check(n, check.Equals, 0)\n}\n\nfunc (s *confirmedReplicationSuite) TestBlocksWithDifferentReplication(c *check.C) {\n\tn := s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(10), knownBlkid(20)}, []string{\"default\"})\n\tc.Check(n, check.Equals, 1)\n}\n\nfunc (s *confirmedReplicationSuite) TestBlocksInDifferentClasses(c *check.C) {\n\ts.blockStateMap.AddReplicas(&KeepMount{KeepMount: arvados.KeepMount{\n\t\tReplication:    3,\n\t\tStorageClasses: map[string]bool{\"three\": true},\n\t}}, []arvados.KeepServiceIndexEntry{\n\t\t{SizedDigest: knownBlkid(30), Mtime: s.mtime},\n\t})\n\n\tn := s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(30)}, []string{\"three\"})\n\tc.Check(n, check.Equals, 3)\n\tn = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(20), knownBlkid(30)}, []string{\"default\"})\n\tc.Check(n, check.Equals, 0) // block 30 has repl 0 @ \"default\"\n\tn = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(20), knownBlkid(30)}, []string{\"three\"})\n\tc.Check(n, check.Equals, 0) // block 20 has repl 0 @ \"three\"\n\tn = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(20), knownBlkid(30)}, nil)\n\tc.Check(n, check.Equals, 2)\n}\n\nfunc (s *confirmedReplicationSuite) TestBlocksOnMultipleMounts(c *check.C) {\n\ts.blockStateMap.AddReplicas(&KeepMount{KeepMount: arvados.KeepMount{\n\t\tReplication:    2,\n\t\tStorageClasses: map[string]bool{\"default\": true, \"four\": true},\n\t}}, []arvados.KeepServiceIndexEntry{\n\t\t{SizedDigest: knownBlkid(40), Mtime: s.mtime},\n\t\t{SizedDigest: knownBlkid(41), Mtime: s.mtime},\n\t})\n\ts.blockStateMap.AddReplicas(&KeepMount{KeepMount: arvados.KeepMount{\n\t\tReplication:    2,\n\t\tStorageClasses: map[string]bool{\"four\": true},\n\t}}, []arvados.KeepServiceIndexEntry{\n\t\t{SizedDigest: knownBlkid(40), Mtime: s.mtime},\n\t\t{SizedDigest: knownBlkid(41), Mtime: s.mtime},\n\t})\n\tn := s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(40), knownBlkid(41)}, []string{\"default\"})\n\tc.Check(n, check.Equals, 2)\n\tn = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(40), knownBlkid(41)}, []string{\"four\"})\n\tc.Check(n, check.Equals, 4)\n\tn = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(40), knownBlkid(41)}, []string{\"default\", \"four\"})\n\tc.Check(n, check.Equals, 2)\n\tn = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(40), knownBlkid(41)}, nil)\n\tc.Check(n, check.Equals, 4)\n}\n\nfunc (s *confirmedReplicationSuite) TestConcurrency(c *check.C) {\n\tvar wg sync.WaitGroup\n\tfor i := 1000; i < 1256; i++ {\n\t\ti := i\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn := s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(i), knownBlkid(i)}, []string{\"default\"})\n\t\t\tc.Check(n, check.Equals, 0)\n\t\t}()\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tn := s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(10)}, []string{\"default\"})\n\t\t\tc.Check(n, check.Equals, 1)\n\t\t\tn = s.blockStateMap.GetConfirmedReplication([]arvados.SizedDigest{knownBlkid(20)}, []string{\"default\"})\n\t\t\tc.Check(n, check.Equals, 2)\n\t\t}()\n\t}\n\twg.Wait()\n}\n\nvar _ = check.Suite(&mapPoolSuite{})\n\ntype mapPoolSuite struct{}\n\nfunc (s *mapPoolSuite) TestMapPool(c *check.C) {\n\tmaxPoolReplication := 8\n\tmaxDesired := 1000 // unrealistically high replication_desired\n\tnblocks := 10000\n\tclasses := []string{\"class_one\", \"class_two\", \"class_three\"}\n\tbsm := NewBlockStateMap(maxPoolReplication)\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < nblocks; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tbsm.IncreaseDesired(\"\", classes, rand.Int()%maxDesired, []arvados.SizedDigest{knownBlkid(i)})\n\t\t}()\n\t}\n\twg.Wait()\n\n\t// Check that the mapPool's \"next\" transition map does not get\n\t// too large, even with unrealistically high\n\t// replication_desired values.\n\tc.Logf(\"blocks==%d len(classes)==%d --> len(pool)==%d\", nblocks, len(classes), len(bsm.pool.next))\n\tc.Check(len(bsm.pool.next) <= int(math.Pow(float64(maxPoolReplication+1), float64(len(classes)))), check.Equals, true)\n\n\t// Check that all pool entries are unique, i.e., if ent1 !=\n\t// ent2, then maps *ent1 and *ent2 have different content.\n\tents := map[mapPoolEnt]bool{}\n\tfor transition, ent := range bsm.pool.next {\n\t\tents[ent] = true\n\t\tents[transition.ent] = true\n\t}\n\tseen := map[string]bool{}\n\tfor ent := range ents {\n\t\tvar txt string\n\t\tif ent != nil {\n\t\t\tvar classes []string\n\t\t\tfor class := range *ent {\n\t\t\t\tclasses = append(classes, class)\n\t\t\t}\n\t\t\tsort.Strings(classes)\n\t\t\tfor _, class := range classes {\n\t\t\t\ttxt += fmt.Sprintf(\"%s %d \", class, (*ent)[class])\n\t\t\t}\n\t\t}\n\t\tc.Check(seen[txt], check.Equals, false, check.Commentf(\"seen twice: %s\", txt))\n\t\tseen[txt] = true\n\t}\n\tc.Assert(seen, check.HasLen, len(ents))\n}\n"
  },
  {
    "path": "services/keep-balance/change_set.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/services/keepstore\"\n)\n\n// Pull is a request to retrieve a block from a remote server, and\n// store it locally.\ntype Pull struct {\n\tarvados.SizedDigest\n\tFrom *KeepService\n\tTo   *KeepMount\n}\n\n// MarshalJSON formats a pull request the way keepstore wants to see\n// it.\nfunc (p Pull) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(keepstore.PullListItem{\n\t\tLocator:   string(p.SizedDigest),\n\t\tServers:   []string{p.From.URLBase()},\n\t\tMountUUID: p.To.KeepMount.UUID,\n\t})\n}\n\n// Trash is a request to delete a block.\ntype Trash struct {\n\tarvados.SizedDigest\n\tMtime int64\n\tFrom  *KeepMount\n}\n\n// MarshalJSON formats a trash request the way keepstore wants to see\n// it, i.e., as a bare locator with no +size hint.\nfunc (t Trash) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(keepstore.TrashListItem{\n\t\tLocator:    string(t.SizedDigest),\n\t\tBlockMtime: t.Mtime,\n\t\tMountUUID:  t.From.KeepMount.UUID,\n\t})\n}\n\n// ChangeSet is a set of change requests that will be sent to a\n// keepstore server.\ntype ChangeSet struct {\n\tPullLimit  int\n\tTrashLimit int\n\n\tPulls           []Pull\n\tPullsDeferred   int // number that weren't added because of PullLimit\n\tTrashes         []Trash\n\tTrashesDeferred int // number that weren't added because of TrashLimit\n\tmutex           sync.Mutex\n}\n\n// AddPull adds a Pull operation.\nfunc (cs *ChangeSet) AddPull(p Pull) {\n\tcs.mutex.Lock()\n\tif len(cs.Pulls) < cs.PullLimit {\n\t\tcs.Pulls = append(cs.Pulls, p)\n\t} else {\n\t\tcs.PullsDeferred++\n\t}\n\tcs.mutex.Unlock()\n}\n\n// AddTrash adds a Trash operation\nfunc (cs *ChangeSet) AddTrash(t Trash) {\n\tcs.mutex.Lock()\n\tif len(cs.Trashes) < cs.TrashLimit {\n\t\tcs.Trashes = append(cs.Trashes, t)\n\t} else {\n\t\tcs.TrashesDeferred++\n\t}\n\tcs.mutex.Unlock()\n}\n\n// String implements fmt.Stringer.\nfunc (cs *ChangeSet) String() string {\n\tcs.mutex.Lock()\n\tdefer cs.mutex.Unlock()\n\treturn fmt.Sprintf(\"ChangeSet{Pulls:%d, Trashes:%d} Deferred{Pulls:%d Trashes:%d}\", len(cs.Pulls), len(cs.Trashes), cs.PullsDeferred, cs.TrashesDeferred)\n}\n"
  },
  {
    "path": "services/keep-balance/change_set_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"encoding/json\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&changeSetSuite{})\n\ntype changeSetSuite struct{}\n\nfunc (s *changeSetSuite) TestJSONFormat(c *check.C) {\n\tmnt := &KeepMount{\n\t\tKeepMount: arvados.KeepMount{\n\t\t\tUUID: \"zzzzz-mount-abcdefghijklmno\"}}\n\tsrv := &KeepService{\n\t\tKeepService: arvados.KeepService{\n\t\t\tUUID:           \"zzzzz-bi6l4-000000000000001\",\n\t\t\tServiceType:    \"disk\",\n\t\t\tServiceSSLFlag: false,\n\t\t\tServiceHost:    \"keep1.zzzzz.arvadosapi.com\",\n\t\t\tServicePort:    25107}}\n\n\tbuf, err := json.Marshal([]Pull{{\n\t\tSizedDigest: arvados.SizedDigest(\"acbd18db4cc2f85cedef654fccc4a4d8+3\"),\n\t\tTo:          mnt,\n\t\tFrom:        srv}})\n\tc.Check(err, check.IsNil)\n\tc.Check(string(buf), check.Equals, `[{\"locator\":\"acbd18db4cc2f85cedef654fccc4a4d8+3\",\"servers\":[\"http://keep1.zzzzz.arvadosapi.com:25107\"],\"mount_uuid\":\"zzzzz-mount-abcdefghijklmno\"}]`)\n\n\tbuf, err = json.Marshal([]Trash{{\n\t\tSizedDigest: arvados.SizedDigest(\"acbd18db4cc2f85cedef654fccc4a4d8+3\"),\n\t\tFrom:        mnt,\n\t\tMtime:       123456789}})\n\tc.Check(err, check.IsNil)\n\tc.Check(string(buf), check.Equals, `[{\"locator\":\"acbd18db4cc2f85cedef654fccc4a4d8+3\",\"block_mtime\":123456789,\"mount_uuid\":\"zzzzz-mount-abcdefghijklmno\"}]`)\n}\n"
  },
  {
    "path": "services/keep-balance/collection.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/jmoiron/sqlx\"\n)\n\nfunc countCollections(c *arvados.Client, params arvados.ResourceListParams) (int, error) {\n\tvar page arvados.CollectionList\n\tvar zero int\n\tparams.Limit = &zero\n\tparams.Count = \"exact\"\n\terr := c.RequestAndDecode(&page, \"GET\", \"arvados/v1/collections\", nil, params)\n\treturn page.ItemsAvailable, err\n}\n\n// EachCollection calls f once for every readable\n// collection. EachCollection stops if it encounters an error, such as\n// f returning a non-nil error.\n//\n// The progress function is called periodically with done (number of\n// times f has been called) and total (number of times f is expected\n// to be called).\nfunc EachCollection(ctx context.Context, db *sqlx.DB, c *arvados.Client, f func(arvados.Collection) error, progress func(done, total int)) error {\n\tif progress == nil {\n\t\tprogress = func(_, _ int) {}\n\t}\n\n\texpectCount, err := countCollections(c, arvados.ResourceListParams{\n\t\tIncludeTrash:       true,\n\t\tIncludeOldVersions: true,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar newestModifiedAt time.Time\n\n\trows, err := db.QueryxContext(ctx, `SELECT\n\t\tuuid, manifest_text, modified_at, portable_data_hash,\n\t\treplication_desired, replication_confirmed, replication_confirmed_at,\n\t\tstorage_classes_desired, storage_classes_confirmed, storage_classes_confirmed_at,\n\t\tis_trashed\n\t\tFROM collections`)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\tprogressTicker := time.NewTicker(10 * time.Second)\n\tdefer progressTicker.Stop()\n\tcallCount := 0\n\tfor rows.Next() {\n\t\tvar coll arvados.Collection\n\t\tvar classesDesired, classesConfirmed []byte\n\t\terr = rows.Scan(&coll.UUID, &coll.ManifestText, &coll.ModifiedAt, &coll.PortableDataHash,\n\t\t\t&coll.ReplicationDesired, &coll.ReplicationConfirmed, &coll.ReplicationConfirmedAt,\n\t\t\t&classesDesired, &classesConfirmed, &coll.StorageClassesConfirmedAt,\n\t\t\t&coll.IsTrashed)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = json.Unmarshal(classesDesired, &coll.StorageClassesDesired)\n\t\tif err != nil && len(classesDesired) > 0 {\n\t\t\treturn err\n\t\t}\n\t\terr = json.Unmarshal(classesConfirmed, &coll.StorageClassesConfirmed)\n\t\tif err != nil && len(classesConfirmed) > 0 {\n\t\t\treturn err\n\t\t}\n\t\tif newestModifiedAt.IsZero() || newestModifiedAt.Before(coll.ModifiedAt) {\n\t\t\tnewestModifiedAt = coll.ModifiedAt\n\t\t}\n\t\tcallCount++\n\t\terr = f(coll)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tselect {\n\t\tcase <-progressTicker.C:\n\t\t\tprogress(callCount, expectCount)\n\t\tdefault:\n\t\t}\n\t}\n\tprogress(callCount, expectCount)\n\terr = rows.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif checkCount, err := countCollections(c, arvados.ResourceListParams{\n\t\tFilters: []arvados.Filter{{\n\t\t\tAttr:     \"modified_at\",\n\t\t\tOperator: \"<=\",\n\t\t\tOperand:  newestModifiedAt}},\n\t\tIncludeTrash:       true,\n\t\tIncludeOldVersions: true,\n\t}); err != nil {\n\t\treturn err\n\t} else if callCount < checkCount {\n\t\treturn fmt.Errorf(\"Retrieved %d collections with modtime <= T=%q, but server now reports there are %d collections with modtime <= T\", callCount, newestModifiedAt, checkCount)\n\t}\n\n\treturn nil\n}\n\nfunc (bal *Balancer) updateCollections(ctx context.Context, c *arvados.Client, cluster *arvados.Cluster) error {\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tdefer bal.time(\"update_collections\", \"wall clock time to update collections\")()\n\tthreshold := time.Now()\n\tthresholdStr := threshold.Format(time.RFC3339Nano)\n\n\tupdated := int64(0)\n\n\terrs := make(chan error, 1)\n\tcollQ := make(chan arvados.Collection, cluster.Collections.BalanceCollectionBuffers)\n\tgoSendErr(errs, func() error {\n\t\tdefer close(collQ)\n\t\treachedLimit := errors.New(\"reached limit\")\n\t\terr := EachCollection(ctx, bal.DB, c, func(coll arvados.Collection) error {\n\t\t\tif atomic.LoadInt64(&updated) >= int64(cluster.Collections.BalanceUpdateLimit) {\n\t\t\t\tbal.logf(\"reached BalanceUpdateLimit (%d)\", cluster.Collections.BalanceUpdateLimit)\n\t\t\t\treturn reachedLimit\n\t\t\t}\n\t\t\tcollQ <- coll\n\t\t\treturn nil\n\t\t}, func(done, total int) {\n\t\t\tbal.logf(\"update collections: %d/%d (%d updated @ %.01f updates/s)\", done, total, atomic.LoadInt64(&updated), float64(atomic.LoadInt64(&updated))/time.Since(threshold).Seconds())\n\t\t})\n\t\tif err == reachedLimit {\n\t\t\terr = nil\n\t\t}\n\t\treturn err\n\t})\n\n\tvar wg sync.WaitGroup\n\n\t// Use about 1 goroutine per 2 CPUs. Based on experiments with\n\t// a 2-core host, using more concurrent database\n\t// calls/transactions makes this process slower, not faster.\n\tfor i := 0; i < (runtime.NumCPU()+1)/2; i++ {\n\t\twg.Add(1)\n\t\tgoSendErr(errs, func() error {\n\t\t\tdefer wg.Done()\n\t\t\ttx, err := bal.DB.Beginx()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttxPending := 0\n\t\t\tflush := func(final bool) error {\n\t\t\t\terr := tx.Commit()\n\t\t\t\tif err != nil {\n\t\t\t\t\ttx.Rollback()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ttxPending = 0\n\t\t\t\tif final || ctx.Err() != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\ttx, err = bal.DB.Beginx()\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttxBatch := 100\n\t\t\tfor coll := range collQ {\n\t\t\t\tif ctx.Err() != nil || len(errs) > 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tblkids, err := coll.SizedDigests()\n\t\t\t\tif err != nil {\n\t\t\t\t\tbal.logf(\"%s: %s\", coll.UUID, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\trepl := bal.BlockStateMap.GetConfirmedReplication(blkids, coll.StorageClassesDesired)\n\n\t\t\t\tdesired := bal.DefaultReplication\n\t\t\t\tif coll.ReplicationDesired != nil {\n\t\t\t\t\tdesired = *coll.ReplicationDesired\n\t\t\t\t}\n\t\t\t\tif repl > desired {\n\t\t\t\t\t// If actual>desired, confirm\n\t\t\t\t\t// the desired number rather\n\t\t\t\t\t// than actual to avoid\n\t\t\t\t\t// flapping updates when\n\t\t\t\t\t// replication increases\n\t\t\t\t\t// temporarily.\n\t\t\t\t\trepl = desired\n\t\t\t\t}\n\t\t\t\tneedUpdate := coll.ReplicationConfirmed == nil ||\n\t\t\t\t\t*coll.ReplicationConfirmed != repl ||\n\t\t\t\t\trepl > 0 && len(coll.StorageClassesConfirmed) != len(coll.StorageClassesDesired) ||\n\t\t\t\t\trepl == 0 && len(coll.StorageClassesConfirmed) != 0\n\t\t\t\tif !needUpdate && repl > 0 {\n\t\t\t\t\tfor i := range coll.StorageClassesDesired {\n\t\t\t\t\t\tif coll.StorageClassesDesired[i] != coll.StorageClassesConfirmed[i] {\n\t\t\t\t\t\t\tneedUpdate = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !needUpdate {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tclasses := emptyJSONArray\n\t\t\t\tif repl > 0 {\n\t\t\t\t\tclasses, err = json.Marshal(coll.StorageClassesDesired)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tbal.logf(\"BUG? json.Marshal(%v) failed: %s\", classes, err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t_, err = tx.ExecContext(ctx, `update collections set\n\t\t\t\t\treplication_confirmed=$1,\n\t\t\t\t\treplication_confirmed_at=$2,\n\t\t\t\t\tstorage_classes_confirmed=$3,\n\t\t\t\t\tstorage_classes_confirmed_at=$2\n\t\t\t\t\twhere uuid=$4`,\n\t\t\t\t\trepl, thresholdStr, classes, coll.UUID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif ctx.Err() == nil {\n\t\t\t\t\t\tbal.logf(\"%s: update failed: %s\", coll.UUID, err)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tatomic.AddInt64(&updated, 1)\n\t\t\t\tif txPending++; txPending >= txBatch {\n\t\t\t\t\terr = flush(false)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn flush(true)\n\t\t})\n\t}\n\twg.Wait()\n\tbal.logf(\"updated %d collections\", updated)\n\t// Drain collQ in case all workers exited before completion\n\tfor range collQ {\n\t}\n\tif len(errs) > 0 {\n\t\treturn fmt.Errorf(\"error updating collections: %w\", <-errs)\n\t}\n\treturn nil\n}\n\n// Call f in a new goroutine. If it returns a non-nil error, send the\n// error to the errs channel (unless the channel is already full with\n// another error).\nfunc goSendErr(errs chan<- error, f func() error) {\n\tgo func() {\n\t\terr := f()\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\tcase errs <- err:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}()\n}\n\nvar emptyJSONArray = []byte(\"[]\")\n"
  },
  {
    "path": "services/keep-balance/collection_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"context\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/jmoiron/sqlx\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\n// TestMissedCollections exercises EachCollection's sanity check:\n// #collections processed >= #old collections that exist in database\n// after processing.\nfunc (s *integrationSuite) TestMissedCollections(c *check.C) {\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\tdb, err := sqlx.Open(\"postgres\", cluster.PostgreSQL.Connection.String())\n\tc.Assert(err, check.IsNil)\n\n\tdefer db.Exec(`delete from collections where uuid = 'zzzzz-4zz18-404040404040404'`)\n\tinsertedOld := false\n\terr = EachCollection(context.Background(), db, s.client, func(coll arvados.Collection) error {\n\t\tif !insertedOld {\n\t\t\tinsertedOld = true\n\t\t\t_, err := db.Exec(`insert into collections (uuid, created_at, updated_at, modified_at) values ('zzzzz-4zz18-404040404040404', '2002-02-02T02:02:02Z', '2002-02-02T02:02:02Z', '2002-02-02T02:02:02Z')`)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}, nil)\n\tc.Check(err, check.ErrorMatches, `Retrieved .* collections .* but server now reports .* collections.*`)\n}\n"
  },
  {
    "path": "services/keep-balance/integration_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"github.com/jmoiron/sqlx\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&integrationSuite{})\n\ntype integrationSuite struct {\n\tconfig     *arvados.Cluster\n\tdb         *sqlx.DB\n\tclient     *arvados.Client\n\tkeepClient *keepclient.KeepClient\n}\n\nfunc (s *integrationSuite) SetUpSuite(c *check.C) {\n\tif testing.Short() {\n\t\tc.Skip(\"-short\")\n\t}\n\tarvadostest.ResetEnv()\n\tarvadostest.StartKeep(4, true)\n\n\tarv, err := arvadosclient.MakeArvadosClient()\n\tarv.ApiToken = arvadostest.SystemRootToken\n\tc.Assert(err, check.IsNil)\n\n\ts.keepClient, err = keepclient.MakeKeepClient(arv)\n\tc.Assert(err, check.IsNil)\n\ts.keepClient.DiskCacheSize = keepclient.DiskCacheDisabled\n\ts.putReplicas(c, \"foo\", 4)\n\ts.putReplicas(c, \"bar\", 1)\n}\n\nfunc (s *integrationSuite) putReplicas(c *check.C, data string, replicas int) {\n\ts.keepClient.Want_replicas = replicas\n\t_, _, err := s.keepClient.PutB([]byte(data))\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (s *integrationSuite) TearDownSuite(c *check.C) {\n\tif testing.Short() {\n\t\tc.Skip(\"-short\")\n\t}\n\tarvadostest.StopKeep(4)\n}\n\nfunc (s *integrationSuite) SetUpTest(c *check.C) {\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, check.Equals, nil)\n\ts.config, err = cfg.GetCluster(\"\")\n\tc.Assert(err, check.Equals, nil)\n\ts.db, err = sqlx.Open(\"postgres\", s.config.PostgreSQL.Connection.String())\n\tc.Assert(err, check.IsNil)\n\ts.config.Collections.BalancePeriod = arvados.Duration(time.Second)\n\n\ts.client = &arvados.Client{\n\t\tAPIHost:   os.Getenv(\"ARVADOS_API_HOST\"),\n\t\tAuthToken: arvadostest.SystemRootToken,\n\t\tInsecure:  true,\n\t}\n}\n\nfunc (s *integrationSuite) TestBalanceAPIFixtures(c *check.C) {\n\tvar logBuf bytes.Buffer\n\tfor iter := 0; iter < 20; iter++ {\n\t\tlogBuf.Reset()\n\t\tlogger := logrus.New()\n\t\tlogger.Out = io.MultiWriter(&logBuf, ctxlog.LogWriter(c.Log))\n\t\topts := RunOptions{\n\t\t\tCommitConfirmedFields: true,\n\t\t\tLogger:                logger,\n\t\t}\n\n\t\tbal := &Balancer{\n\t\t\tDB:      s.db,\n\t\t\tLogger:  logger,\n\t\t\tMetrics: newMetrics(prometheus.NewRegistry()),\n\t\t}\n\t\tnextOpts, err := bal.Run(context.Background(), s.client, s.config, opts)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(nextOpts.SafeRendezvousState, check.Not(check.Equals), \"\")\n\t\tif iter == 0 {\n\t\t\tc.Check(logBuf.String(), check.Matches, `(?ms).*ChangeSet{Pulls:1.*`)\n\t\t\tc.Check(logBuf.String(), check.Not(check.Matches), `(?ms).*ChangeSet{.*Trashes:[^0]}*`)\n\t\t} else if !strings.Contains(logBuf.String(), \"ChangeSet{Pulls:1\") {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(200 * time.Millisecond)\n\t}\n\tc.Check(logBuf.String(), check.Not(check.Matches), `(?ms).*0 replicas (0 blocks, 0 bytes) underreplicated.*`)\n\n\tfor _, trial := range []struct {\n\t\tuuid    string\n\t\trepl    int\n\t\tclasses []string\n\t}{\n\t\t{arvadostest.EmptyCollectionUUID, 0, []string{}},\n\t\t{arvadostest.FooCollection, 2, []string{\"default\"}},                                // \"foo\" blk\n\t\t{arvadostest.StorageClassesDesiredDefaultConfirmedDefault, 2, []string{\"default\"}}, // \"bar\" blk\n\t\t{arvadostest.StorageClassesDesiredArchiveConfirmedDefault, 0, []string{}},          // \"bar\" blk\n\t} {\n\t\tc.Logf(\"%#v\", trial)\n\t\tvar coll arvados.Collection\n\t\ts.client.RequestAndDecode(&coll, \"GET\", \"arvados/v1/collections/\"+trial.uuid, nil, nil)\n\t\tif c.Check(coll.ReplicationConfirmed, check.NotNil) {\n\t\t\tc.Check(*coll.ReplicationConfirmed, check.Equals, trial.repl)\n\t\t}\n\t\tc.Check(coll.StorageClassesConfirmed, check.DeepEquals, trial.classes)\n\t}\n}\n"
  },
  {
    "path": "services/keep-balance/keep_service.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// KeepService represents a keepstore server that is being rebalanced.\ntype KeepService struct {\n\tarvados.KeepService\n\tmounts []*KeepMount\n\t*ChangeSet\n}\n\n// String implements fmt.Stringer.\nfunc (srv *KeepService) String() string {\n\treturn fmt.Sprintf(\"%s (%s:%d, %s)\", srv.UUID, srv.ServiceHost, srv.ServicePort, srv.ServiceType)\n}\n\nvar ksSchemes = map[bool]string{false: \"http\", true: \"https\"}\n\n// URLBase returns scheme://host:port for this server.\nfunc (srv *KeepService) URLBase() string {\n\treturn fmt.Sprintf(\"%s://%s:%d\", ksSchemes[srv.ServiceSSLFlag], srv.ServiceHost, srv.ServicePort)\n}\n\n// CommitPulls sends the current list of pull requests to the storage\n// server (even if the list is empty).\nfunc (srv *KeepService) CommitPulls(ctx context.Context, c *arvados.Client) error {\n\treturn srv.put(ctx, c, \"pull\", srv.ChangeSet.Pulls)\n}\n\n// CommitTrash sends the current list of trash requests to the storage\n// server (even if the list is empty).\nfunc (srv *KeepService) CommitTrash(ctx context.Context, c *arvados.Client) error {\n\treturn srv.put(ctx, c, \"trash\", srv.ChangeSet.Trashes)\n}\n\n// Perform a PUT request at path, with data (as JSON) in the request\n// body.\nfunc (srv *KeepService) put(ctx context.Context, c *arvados.Client, path string, data interface{}) error {\n\t// We'll start a goroutine to do the JSON encoding, so we can\n\t// stream it to the http client through a Pipe, rather than\n\t// keeping the entire encoded version in memory.\n\tjsonR, jsonW := io.Pipe()\n\n\t// errC communicates any encoding errors back to our main\n\t// goroutine.\n\terrC := make(chan error, 1)\n\n\tgo func() {\n\t\tenc := json.NewEncoder(jsonW)\n\t\terrC <- enc.Encode(data)\n\t\tjsonW.Close()\n\t}()\n\n\turl := srv.URLBase() + \"/\" + path\n\treq, err := http.NewRequestWithContext(ctx, \"PUT\", url, ioutil.NopCloser(jsonR))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"building request for %s: %v\", url, err)\n\t}\n\terr = c.DoAndDecode(nil, req)\n\n\t// If there was an error encoding the request body, report\n\t// that instead of the response: obviously we won't get a\n\t// useful response if our request wasn't properly encoded.\n\tif encErr := <-errC; encErr != nil {\n\t\treturn fmt.Errorf(\"encoding data for %s: %v\", url, encErr)\n\t}\n\n\treturn err\n}\n\nfunc (srv *KeepService) discoverMounts(c *arvados.Client) error {\n\tmounts, err := srv.Mounts(c)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s: error retrieving mounts: %v\", srv, err)\n\t}\n\tsrv.mounts = nil\n\tfor _, m := range mounts {\n\t\tsrv.mounts = append(srv.mounts, &KeepMount{\n\t\t\tKeepMount:   m,\n\t\t\tKeepService: srv,\n\t\t})\n\t}\n\treturn nil\n}\n\ntype KeepMount struct {\n\tarvados.KeepMount\n\tKeepService *KeepService\n}\n\n// String implements fmt.Stringer.\nfunc (mnt *KeepMount) String() string {\n\treturn fmt.Sprintf(\"%s (%s) on %s\", mnt.UUID, mnt.DeviceID, mnt.KeepService)\n}\n"
  },
  {
    "path": "services/keep-balance/main.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t_ \"net/http/pprof\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/service\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/health\"\n\t\"github.com/jmoiron/sqlx\"\n\t_ \"github.com/lib/pq\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype command struct{}\n\nvar Command = command{}\n\nfunc (command) RunCommand(prog string, args []string, stdin io.Reader, stdout, stderr io.Writer) int {\n\tvar options RunOptions\n\tflags := flag.NewFlagSet(prog, flag.ContinueOnError)\n\tflags.BoolVar(&options.Once, \"once\", false,\n\t\t\"balance once and then exit\")\n\tdeprCommitPulls := flags.Bool(\"commit-pulls\", true,\n\t\t\"send pull requests (must be true -- configure Collections.BalancePullLimit = 0 to disable.)\")\n\tdeprCommitTrash := flags.Bool(\"commit-trash\", true,\n\t\t\"send trash requests (must be true -- configure Collections.BalanceTrashLimit = 0 to disable.)\")\n\tflags.BoolVar(&options.CommitConfirmedFields, \"commit-confirmed-fields\", true,\n\t\t\"update collection fields (replicas_confirmed, storage_classes_confirmed, etc.)\")\n\tflags.StringVar(&options.ChunkPrefix, \"chunk-prefix\", \"\",\n\t\t\"operate only on blocks with the given prefix (experimental, see https://dev.arvados.org/issues/19923)\")\n\t// These options are implemented by service.Command, so we\n\t// don't need the vars here -- we just need the flags\n\t// to pass flags.Parse().\n\tflags.Bool(\"dump\", false, \"dump details for each block to stdout\")\n\tflags.String(\"pprof\", \"\", \"serve Go profile data at `[addr]:port`\")\n\tflags.Bool(\"version\", false, \"Write version information to stdout and exit 0\")\n\n\tlogger := ctxlog.New(stderr, \"json\", \"info\")\n\tloader := config.NewLoader(&bytes.Buffer{}, logger)\n\tloader.SetupFlags(flags)\n\tmunged := loader.MungeLegacyConfigArgs(logger, args, \"-legacy-keepbalance-config\")\n\tif ok, code := cmd.ParseFlags(flags, prog, munged, \"\", stderr); !ok {\n\t\treturn code\n\t}\n\n\tif !*deprCommitPulls || !*deprCommitTrash {\n\t\tfmt.Fprint(stderr,\n\t\t\t\"Usage error: the -commit-pulls or -commit-trash command line flags are no longer supported.\\n\",\n\t\t\t\"Use Collections.BalancePullLimit and Collections.BalanceTrashLimit instead.\\n\")\n\t\treturn cmd.EXIT_INVALIDARGUMENT\n\t}\n\n\t// Drop our custom args that would be rejected by the generic\n\t// service.Command\n\targs = nil\n\tdropFlag := map[string]bool{\n\t\t\"once\":                    true,\n\t\t\"commit-pulls\":            true,\n\t\t\"commit-trash\":            true,\n\t\t\"commit-confirmed-fields\": true,\n\t\t\"dump\":                    true,\n\t}\n\tflags.Visit(func(f *flag.Flag) {\n\t\tif !dropFlag[f.Name] {\n\t\t\targs = append(args, \"-\"+f.Name+\"=\"+f.Value.String())\n\t\t}\n\t})\n\n\treturn service.Command(arvados.ServiceNameKeepbalance,\n\t\tfunc(ctx context.Context, cluster *arvados.Cluster, token string, registry *prometheus.Registry) service.Handler {\n\t\t\tif !options.Once && cluster.Collections.BalancePeriod == arvados.Duration(0) {\n\t\t\t\treturn service.ErrorHandler(ctx, cluster, fmt.Errorf(\"cannot start service: Collections.BalancePeriod is zero (if you want to run once and then exit, use the -once flag)\"))\n\t\t\t}\n\n\t\t\tac, err := arvados.NewClientFromConfig(cluster)\n\t\t\tac.AuthToken = token\n\t\t\tif err != nil {\n\t\t\t\treturn service.ErrorHandler(ctx, cluster, fmt.Errorf(\"error initializing client from cluster config: %s\", err))\n\t\t\t}\n\n\t\t\tdb, err := sqlx.Open(\"postgres\", cluster.PostgreSQL.Connection.String())\n\t\t\tif err != nil {\n\t\t\t\treturn service.ErrorHandler(ctx, cluster, fmt.Errorf(\"postgresql connection failed: %s\", err))\n\t\t\t}\n\t\t\tif p := cluster.PostgreSQL.ConnectionPool; p > 0 {\n\t\t\t\tdb.SetMaxOpenConns(p)\n\t\t\t}\n\t\t\terr = db.Ping()\n\t\t\tif err != nil {\n\t\t\t\treturn service.ErrorHandler(ctx, cluster, fmt.Errorf(\"postgresql connection succeeded but ping failed: %s\", err))\n\t\t\t}\n\n\t\t\tif options.Logger == nil {\n\t\t\t\toptions.Logger = ctxlog.FromContext(ctx)\n\t\t\t}\n\n\t\t\tsrv := &Server{\n\t\t\t\tCluster:    cluster,\n\t\t\t\tArvClient:  ac,\n\t\t\t\tRunOptions: options,\n\t\t\t\tMetrics:    newMetrics(registry),\n\t\t\t\tLogger:     options.Logger,\n\t\t\t\tDumper:     options.Dumper,\n\t\t\t\tDB:         db,\n\t\t\t}\n\t\t\tsrv.Handler = &health.Handler{\n\t\t\t\tToken:  cluster.ManagementToken,\n\t\t\t\tPrefix: \"/_health/\",\n\t\t\t\tRoutes: health.Routes{\"ping\": srv.CheckHealth},\n\t\t\t}\n\n\t\t\tgo srv.run(ctx)\n\t\t\treturn srv\n\t\t}).RunCommand(prog, args, stdin, stdout, stderr)\n}\n"
  },
  {
    "path": "services/keep-balance/main_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"bytes\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/ghodss/yaml\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&mainSuite{})\n\ntype mainSuite struct{}\n\nfunc (s *mainSuite) TestVersionFlag(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tCommand.RunCommand(\"keep-balance\", []string{\"-version\"}, nil, &stdout, &stderr)\n\tc.Check(stderr.String(), check.Equals, \"\")\n\tc.Log(stdout.String())\n\tc.Check(stdout.String(), check.Matches, `keep-balance.*\\(go1.*\\)\\n`)\n}\n\nfunc (s *mainSuite) TestHTTPServer(c *check.C) {\n\tarvadostest.StartKeep(2, true)\n\n\tln, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\tc.Fatal(err)\n\t}\n\t_, p, err := net.SplitHostPort(ln.Addr().String())\n\tc.Check(err, check.IsNil)\n\tln.Close()\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, check.IsNil)\n\tcluster.Services.Keepbalance.InternalURLs[arvados.URL{Host: \"localhost:\" + p, Path: \"/\"}] = arvados.ServiceInstance{}\n\tcfg.Clusters[cluster.ClusterID] = *cluster\n\tconfig, err := yaml.Marshal(cfg)\n\tc.Assert(err, check.IsNil)\n\n\tvar stdout bytes.Buffer\n\tgo Command.RunCommand(\"keep-balance\", []string{\"-config\", \"-\"}, bytes.NewBuffer(config), &stdout, &stdout)\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\t\tfor {\n\t\t\ttime.Sleep(time.Second / 10)\n\t\t\treq, err := http.NewRequest(http.MethodGet, \"http://:\"+p+\"/metrics\", nil)\n\t\t\tif err != nil {\n\t\t\t\tc.Fatal(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\treq.Header.Set(\"Authorization\", \"Bearer \"+cluster.ManagementToken)\n\t\t\tresp, err := http.DefaultClient.Do(req)\n\t\t\tif err != nil {\n\t\t\t\tc.Logf(\"error %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t\tc.Logf(\"http status %d\", resp.StatusCode)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbuf, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\tc.Logf(\"read body: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.Check(string(buf), check.Matches, `(?ms).*arvados_keepbalance_sweep_seconds_sum.*`)\n\t\t\treturn\n\t\t}\n\t}()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(time.Second):\n\t\tc.Log(stdout.String())\n\t\tc.Fatal(\"timeout\")\n\t}\n\tc.Log(stdout.String())\n\n\t// Check non-metrics URL that gets passed through to us from\n\t// service.Command\n\treq, err := http.NewRequest(http.MethodGet, \"http://:\"+p+\"/not-metrics\", nil)\n\tc.Assert(err, check.IsNil)\n\tresp, err := http.DefaultClient.Do(req)\n\tc.Check(err, check.IsNil)\n\tdefer resp.Body.Close()\n\tc.Check(resp.StatusCode, check.Equals, http.StatusNotFound)\n}\n"
  },
  {
    "path": "services/keep-balance/metrics.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n)\n\ntype observer interface{ Observe(float64) }\ntype setter interface{ Set(float64) }\n\ntype metrics struct {\n\treg            *prometheus.Registry\n\tstatsGauges    map[string]setter\n\tstatsGaugeVecs map[string]*prometheus.GaugeVec\n\tobservers      map[string]observer\n\tsetupOnce      sync.Once\n\tmtx            sync.Mutex\n}\n\nfunc newMetrics(registry *prometheus.Registry) *metrics {\n\treturn &metrics{\n\t\treg:            registry,\n\t\tstatsGauges:    map[string]setter{},\n\t\tstatsGaugeVecs: map[string]*prometheus.GaugeVec{},\n\t\tobservers:      map[string]observer{},\n\t}\n}\n\nfunc (m *metrics) DurationObserver(name, help string) observer {\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\tif obs, ok := m.observers[name]; ok {\n\t\treturn obs\n\t}\n\tsummary := prometheus.NewSummary(prometheus.SummaryOpts{\n\t\tNamespace: \"arvados\",\n\t\tName:      name,\n\t\tSubsystem: \"keepbalance\",\n\t\tHelp:      help,\n\t})\n\tm.reg.MustRegister(summary)\n\tm.observers[name] = summary\n\treturn summary\n}\n\n// UpdateStats updates prometheus metrics using the given\n// balancerStats. It creates and registers the needed gauges on its\n// first invocation.\nfunc (m *metrics) UpdateStats(s balancerStats) {\n\ttype gauge struct {\n\t\tValue interface{}\n\t\tHelp  string\n\t}\n\ts2g := map[string]gauge{\n\t\t\"total\":             {s.current, \"current backend storage usage\"},\n\t\t\"garbage\":           {s.garbage, \"garbage (unreferenced, old)\"},\n\t\t\"transient\":         {s.unref, \"transient (unreferenced, new)\"},\n\t\t\"overreplicated\":    {s.overrep, \"overreplicated\"},\n\t\t\"underreplicated\":   {s.underrep, \"underreplicated\"},\n\t\t\"unachievable\":      {s.unachievable, \"unachievable\"},\n\t\t\"balanced\":          {s.justright, \"optimally balanced\"},\n\t\t\"desired\":           {s.desired, \"desired\"},\n\t\t\"lost\":              {s.lost, \"lost\"},\n\t\t\"dedup_byte_ratio\":  {s.dedupByteRatio(), \"deduplication ratio, bytes referenced / bytes stored\"},\n\t\t\"dedup_block_ratio\": {s.dedupBlockRatio(), \"deduplication ratio, blocks referenced / blocks stored\"},\n\t\t\"collection_bytes\":  {s.collectionBytes, \"total apparent size of all collections\"},\n\t\t\"referenced_bytes\":  {s.collectionBlockBytes, \"total size of unique referenced blocks\"},\n\t\t\"reference_count\":   {s.collectionBlockRefs, \"block references in all collections\"},\n\t\t\"referenced_blocks\": {s.collectionBlocks, \"blocks referenced by any collection\"},\n\n\t\t\"pull_entries_sent_count\":      {s.pulls, \"total entries sent in pull lists\"},\n\t\t\"pull_entries_deferred_count\":  {s.pullsDeferred, \"total entries deferred (not sent) in pull lists\"},\n\t\t\"trash_entries_sent_count\":     {s.trashes, \"total entries sent in trash lists\"},\n\t\t\"trash_entries_deferred_count\": {s.trashesDeferred, \"total entries deferred (not sent) in trash lists\"},\n\n\t\t\"replicated_block_count\": {s.replHistogram, \"blocks with indicated number of replicas at last count\"},\n\t\t\"usage\":                  {s.classStats, \"stored in indicated storage class\"},\n\t}\n\tm.setupOnce.Do(func() {\n\t\t// Register gauge(s) for each balancerStats field.\n\t\taddGauge := func(name, help string) {\n\t\t\tg := prometheus.NewGauge(prometheus.GaugeOpts{\n\t\t\t\tNamespace: \"arvados\",\n\t\t\t\tName:      name,\n\t\t\t\tSubsystem: \"keep\",\n\t\t\t\tHelp:      help,\n\t\t\t})\n\t\t\tm.reg.MustRegister(g)\n\t\t\tm.statsGauges[name] = g\n\t\t}\n\t\tfor name, gauge := range s2g {\n\t\t\tswitch gauge.Value.(type) {\n\t\t\tcase blocksNBytes:\n\t\t\t\tfor _, sub := range []string{\"blocks\", \"bytes\", \"replicas\"} {\n\t\t\t\t\taddGauge(name+\"_\"+sub, sub+\" of \"+gauge.Help)\n\t\t\t\t}\n\t\t\tcase int, int64, float64:\n\t\t\t\taddGauge(name, gauge.Help)\n\t\t\tcase []int:\n\t\t\t\t// replHistogram\n\t\t\t\tgv := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\tNamespace: \"arvados\",\n\t\t\t\t\tName:      name,\n\t\t\t\t\tSubsystem: \"keep\",\n\t\t\t\t\tHelp:      gauge.Help,\n\t\t\t\t}, []string{\"replicas\"})\n\t\t\t\tm.reg.MustRegister(gv)\n\t\t\t\tm.statsGaugeVecs[name] = gv\n\t\t\tcase map[string]replicationStats:\n\t\t\t\t// classStats\n\t\t\t\tfor _, sub := range []string{\"blocks\", \"bytes\", \"replicas\"} {\n\t\t\t\t\tname := name + \"_\" + sub\n\t\t\t\t\tgv := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\t\t\t\t\tNamespace: \"arvados\",\n\t\t\t\t\t\tName:      name,\n\t\t\t\t\t\tSubsystem: \"keep\",\n\t\t\t\t\t\tHelp:      gauge.Help,\n\t\t\t\t\t}, []string{\"storage_class\", \"status\"})\n\t\t\t\t\tm.reg.MustRegister(gv)\n\t\t\t\t\tm.statsGaugeVecs[name] = gv\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"bad gauge type %T\", gauge.Value))\n\t\t\t}\n\t\t}\n\t})\n\t// Set gauges to values from s.\n\tfor name, gauge := range s2g {\n\t\tswitch val := gauge.Value.(type) {\n\t\tcase blocksNBytes:\n\t\t\tm.statsGauges[name+\"_blocks\"].Set(float64(val.blocks))\n\t\t\tm.statsGauges[name+\"_bytes\"].Set(float64(val.bytes))\n\t\t\tm.statsGauges[name+\"_replicas\"].Set(float64(val.replicas))\n\t\tcase int:\n\t\t\tm.statsGauges[name].Set(float64(val))\n\t\tcase int64:\n\t\t\tm.statsGauges[name].Set(float64(val))\n\t\tcase float64:\n\t\t\tm.statsGauges[name].Set(float64(val))\n\t\tcase []int:\n\t\t\t// replHistogram\n\t\t\tfor r, n := range val {\n\t\t\t\tm.statsGaugeVecs[name].WithLabelValues(strconv.Itoa(r)).Set(float64(n))\n\t\t\t}\n\t\t\t// Record zero for higher-than-max-replication\n\t\t\t// metrics, so we don't incorrectly continue\n\t\t\t// to report stale metrics.\n\t\t\t//\n\t\t\t// For example, if we previously reported n=1\n\t\t\t// for repl=6, but have since restarted\n\t\t\t// keep-balance and the most replicated block\n\t\t\t// now has repl=5, then the repl=6 gauge will\n\t\t\t// still say n=1 until we clear it explicitly\n\t\t\t// here.\n\t\t\tfor r := len(val); r < len(val)+4 || r < len(val)*2; r++ {\n\t\t\t\tm.statsGaugeVecs[name].WithLabelValues(strconv.Itoa(r)).Set(0)\n\t\t\t}\n\t\tcase map[string]replicationStats:\n\t\t\t// classStats\n\t\t\tfor class, cs := range val {\n\t\t\t\tfor label, val := range map[string]blocksNBytes{\n\t\t\t\t\t\"needed\":       cs.needed,\n\t\t\t\t\t\"unneeded\":     cs.unneeded,\n\t\t\t\t\t\"pulling\":      cs.pulling,\n\t\t\t\t\t\"unachievable\": cs.unachievable,\n\t\t\t\t} {\n\t\t\t\t\tm.statsGaugeVecs[name+\"_blocks\"].WithLabelValues(class, label).Set(float64(val.blocks))\n\t\t\t\t\tm.statsGaugeVecs[name+\"_bytes\"].WithLabelValues(class, label).Set(float64(val.bytes))\n\t\t\t\t\tm.statsGaugeVecs[name+\"_replicas\"].WithLabelValues(class, label).Set(float64(val.replicas))\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"bad gauge type %T\", gauge.Value))\n\t\t}\n\t}\n}\n\nfunc (m *metrics) Handler(log promhttp.Logger) http.Handler {\n\treturn promhttp.HandlerFor(m.reg, promhttp.HandlerOpts{\n\t\tErrorLog: log,\n\t})\n}\n"
  },
  {
    "path": "services/keep-balance/server.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepbalance\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/controller/dblock\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/jmoiron/sqlx\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// RunOptions controls runtime behavior. The flags/options that belong\n// here are the ones that are useful for interactive use. For example,\n// \"CommitTrash\" is a runtime option rather than a config item because\n// it invokes a troubleshooting feature rather than expressing how\n// balancing is meant to be done at a given site.\n//\n// RunOptions fields are controlled by command line flags.\ntype RunOptions struct {\n\tOnce                  bool\n\tCommitConfirmedFields bool\n\tChunkPrefix           string\n\tLogger                logrus.FieldLogger\n\tDumper                logrus.FieldLogger\n\n\t// SafeRendezvousState from the most recent balance operation,\n\t// or \"\" if unknown. If this changes from one run to the next,\n\t// we need to watch out for races. See\n\t// (*Balancer)ClearTrashLists.\n\tSafeRendezvousState string\n}\n\ntype Server struct {\n\thttp.Handler\n\n\tCluster    *arvados.Cluster\n\tArvClient  *arvados.Client\n\tRunOptions RunOptions\n\tMetrics    *metrics\n\n\tLogger logrus.FieldLogger\n\tDumper logrus.FieldLogger\n\n\tDB *sqlx.DB\n}\n\n// CheckHealth implements service.Handler.\nfunc (srv *Server) CheckHealth() error {\n\treturn srv.DB.Ping()\n}\n\n// Done implements service.Handler.\nfunc (srv *Server) Done() <-chan struct{} {\n\treturn nil\n}\n\nfunc (srv *Server) run(ctx context.Context) {\n\tvar err error\n\tif srv.RunOptions.Once {\n\t\t_, err = srv.runOnce(ctx)\n\t} else {\n\t\terr = srv.runForever(ctx)\n\t}\n\tif err != nil {\n\t\tsrv.Logger.Error(err)\n\t\tos.Exit(1)\n\t} else {\n\t\tos.Exit(0)\n\t}\n}\n\nfunc (srv *Server) runOnce(ctx context.Context) (*Balancer, error) {\n\tbal := &Balancer{\n\t\tDB:             srv.DB,\n\t\tLogger:         srv.Logger,\n\t\tDumper:         srv.Dumper,\n\t\tMetrics:        srv.Metrics,\n\t\tLostBlocksFile: srv.Cluster.Collections.BlobMissingReport,\n\t\tChunkPrefix:    srv.RunOptions.ChunkPrefix,\n\t}\n\tvar err error\n\tsrv.RunOptions, err = bal.Run(ctx, srv.ArvClient, srv.Cluster, srv.RunOptions)\n\treturn bal, err\n}\n\n// RunForever runs forever, or until ctx is cancelled.\nfunc (srv *Server) runForever(ctx context.Context) error {\n\tlogger := srv.Logger\n\n\tticker := time.NewTicker(time.Duration(srv.Cluster.Collections.BalancePeriod))\n\n\tsigUSR1 := make(chan os.Signal, 1)\n\tsignal.Notify(sigUSR1, syscall.SIGUSR1)\n\tdefer signal.Stop(sigUSR1)\n\n\tlogger.Info(\"acquiring service lock\")\n\tdblock.KeepBalanceService.Lock(ctx, func(context.Context) (*sqlx.DB, error) { return srv.DB, nil })\n\tdefer dblock.KeepBalanceService.Unlock()\n\n\tlogger.Printf(\"starting up: will scan every %v and on SIGUSR1\", srv.Cluster.Collections.BalancePeriod)\n\n\tfor {\n\t\tif srv.Cluster.Collections.BalancePullLimit < 1 && srv.Cluster.Collections.BalanceTrashLimit < 1 {\n\t\t\tlogger.Print(\"WARNING: Will scan periodically, but no changes will be committed.\")\n\t\t\tlogger.Print(\"=======  To commit changes, set BalancePullLimit and BalanceTrashLimit values greater than zero.\")\n\t\t}\n\n\t\tif !dblock.KeepBalanceService.Check() {\n\t\t\t// context canceled\n\t\t\treturn nil\n\t\t}\n\t\t_, err := srv.runOnce(ctx)\n\t\tif err != nil {\n\t\t\tlogger.Print(\"run failed: \", err)\n\t\t} else {\n\t\t\tlogger.Print(\"run succeeded\")\n\t\t}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\tcase <-ticker.C:\n\t\t\tlogger.Print(\"timer went off\")\n\t\tcase <-sigUSR1:\n\t\t\tlogger.Print(\"received SIGUSR1, resetting timer\")\n\t\t\t// Reset the timer so we don't start the N+1st\n\t\t\t// run too soon after the Nth run is triggered\n\t\t\t// by SIGUSR1.\n\t\t\tticker.Reset(time.Duration(srv.Cluster.Collections.BalancePeriod))\n\t\t}\n\t\tlogger.Print(\"starting next run\")\n\t}\n}\n"
  },
  {
    "path": "services/keepproxy/keepproxy.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepproxy\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"net/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/service\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/health\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"git.arvados.org/arvados.git/services/keepstore\"\n\t\"github.com/gorilla/mux\"\n\tlru \"github.com/hashicorp/golang-lru\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst rfc3339NanoFixed = \"2006-01-02T15:04:05.000000000Z07:00\"\n\nvar Command = service.Command(arvados.ServiceNameKeepproxy, newHandlerOrErrorHandler)\n\nfunc newHandlerOrErrorHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {\n\tclient, err := arvados.NewClientFromConfig(cluster)\n\tif err != nil {\n\t\treturn service.ErrorHandler(ctx, cluster, fmt.Errorf(\"Error setting up arvados client: %w\", err))\n\t}\n\tarv, err := arvadosclient.New(client)\n\tif err != nil {\n\t\treturn service.ErrorHandler(ctx, cluster, fmt.Errorf(\"Error setting up arvados client: %w\", err))\n\t}\n\tkc, err := keepclient.MakeKeepClient(arv)\n\tif err != nil {\n\t\treturn service.ErrorHandler(ctx, cluster, fmt.Errorf(\"Error setting up keep client: %w\", err))\n\t}\n\terr = kc.RegisterMetrics(reg)\n\tif err != nil {\n\t\treturn service.ErrorHandler(ctx, cluster, fmt.Errorf(\"Error setting up keep client metrics: %w\", err))\n\t}\n\tkeepclient.RefreshServiceDiscoveryOnSIGHUP()\n\trouter, err := newHandler(ctx, kc, time.Duration(keepclient.DefaultProxyRequestTimeout), cluster)\n\tif err != nil {\n\t\treturn service.ErrorHandler(ctx, cluster, err)\n\t}\n\treturn router\n}\n\ntype tokenCacheEntry struct {\n\texpire int64\n\tuser   *arvados.User\n}\n\ntype apiTokenCache struct {\n\ttokens     *lru.TwoQueueCache\n\texpireTime int64\n}\n\n// RememberToken caches the token and set an expire time.  If the\n// token is already in the cache, it is not updated.\nfunc (cache *apiTokenCache) RememberToken(token string, user *arvados.User) {\n\tnow := time.Now().Unix()\n\t_, ok := cache.tokens.Get(token)\n\tif !ok {\n\t\tcache.tokens.Add(token, tokenCacheEntry{\n\t\t\texpire: now + cache.expireTime,\n\t\t\tuser:   user,\n\t\t})\n\t}\n}\n\n// RecallToken checks if the cached token is known and still believed to be\n// valid.\nfunc (cache *apiTokenCache) RecallToken(token string) (bool, *arvados.User) {\n\tval, ok := cache.tokens.Get(token)\n\tif !ok {\n\t\treturn false, nil\n\t}\n\n\tcacheEntry := val.(tokenCacheEntry)\n\tnow := time.Now().Unix()\n\tif now < cacheEntry.expire {\n\t\t// Token is known and still valid\n\t\treturn true, cacheEntry.user\n\t} else {\n\t\t// Token is expired\n\t\tcache.tokens.Remove(token)\n\t\treturn false, nil\n\t}\n}\n\nfunc (h *proxyHandler) Done() <-chan struct{} {\n\treturn nil\n}\n\nfunc (h *proxyHandler) CheckHealth() error {\n\treturn nil\n}\n\nfunc (h *proxyHandler) checkAuthorizationHeader(req *http.Request) (pass bool, tok string, user *arvados.User) {\n\tparts := strings.SplitN(req.Header.Get(\"Authorization\"), \" \", 2)\n\tif len(parts) < 2 || !(parts[0] == \"OAuth2\" || parts[0] == \"Bearer\") || len(parts[1]) == 0 {\n\t\treturn false, \"\", nil\n\t}\n\ttok = parts[1]\n\n\t// Tokens are validated differently depending on what kind of\n\t// operation is being performed. For example, tokens in\n\t// collection-sharing links permit GET requests, but not\n\t// PUT requests.\n\tvar op string\n\tif req.Method == \"GET\" || req.Method == \"HEAD\" {\n\t\top = \"read\"\n\t} else {\n\t\top = \"write\"\n\t}\n\n\tif ok, user := h.apiTokenCache.RecallToken(op + \":\" + tok); ok {\n\t\t// Valid in the cache, short circuit\n\t\treturn true, tok, user\n\t}\n\n\tvar err error\n\tarv := *h.KeepClient.Arvados\n\tarv.ApiToken = tok\n\tarv.RequestID = req.Header.Get(\"X-Request-Id\")\n\tuser = &arvados.User{}\n\tuserCurrentError := arv.Call(\"GET\", \"users\", \"\", \"current\", nil, user)\n\terr = userCurrentError\n\tif err != nil && op == \"read\" {\n\t\tapiError, ok := err.(arvadosclient.APIServerError)\n\t\tif ok && apiError.HttpStatusCode == http.StatusForbidden {\n\t\t\t// If it was a scoped \"sharing\" token it will\n\t\t\t// return 403 instead of 401 for the current\n\t\t\t// user check.  If it is a download operation\n\t\t\t// and they have permission to read the\n\t\t\t// keep_services table, we can allow it.\n\t\t\terr = arv.Call(\"HEAD\", \"keep_services\", \"\", \"accessible\", nil, nil)\n\t\t}\n\t}\n\tif err != nil {\n\t\tctxlog.FromContext(req.Context()).WithError(err).Info(\"checkAuthorizationHeader error\")\n\t\treturn false, \"\", nil\n\t}\n\n\tif userCurrentError == nil && user.IsAdmin {\n\t\t// checking userCurrentError is probably redundant,\n\t\t// IsAdmin would be false anyway. But can't hurt.\n\t\tif op == \"read\" && !h.cluster.Collections.KeepproxyPermission.Admin.Download {\n\t\t\treturn false, \"\", nil\n\t\t}\n\t\tif op == \"write\" && !h.cluster.Collections.KeepproxyPermission.Admin.Upload {\n\t\t\treturn false, \"\", nil\n\t\t}\n\t} else {\n\t\tif op == \"read\" && !h.cluster.Collections.KeepproxyPermission.User.Download {\n\t\t\treturn false, \"\", nil\n\t\t}\n\t\tif op == \"write\" && !h.cluster.Collections.KeepproxyPermission.User.Upload {\n\t\t\treturn false, \"\", nil\n\t\t}\n\t}\n\n\t// Success!  Update cache\n\th.apiTokenCache.RememberToken(op+\":\"+tok, user)\n\n\treturn true, tok, user\n}\n\n// We can't copy the default http transport because http.Transport has\n// a mutex field, so we make our own using the values of the exported\n// fields.\nvar defaultTransport = http.Transport{\n\tProxy:                 http.DefaultTransport.(*http.Transport).Proxy,\n\tDialContext:           http.DefaultTransport.(*http.Transport).DialContext,\n\tForceAttemptHTTP2:     http.DefaultTransport.(*http.Transport).ForceAttemptHTTP2,\n\tMaxIdleConns:          http.DefaultTransport.(*http.Transport).MaxIdleConns,\n\tIdleConnTimeout:       http.DefaultTransport.(*http.Transport).IdleConnTimeout,\n\tTLSHandshakeTimeout:   http.DefaultTransport.(*http.Transport).TLSHandshakeTimeout,\n\tExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout,\n}\n\ntype proxyHandler struct {\n\thttp.Handler\n\t*keepclient.KeepClient\n\t*apiTokenCache\n\ttimeout   time.Duration\n\ttransport *http.Transport\n\tcluster   *arvados.Cluster\n}\n\nfunc newHandler(ctx context.Context, kc *keepclient.KeepClient, timeout time.Duration, cluster *arvados.Cluster) (service.Handler, error) {\n\trest := mux.NewRouter()\n\n\t// We can't copy the default http transport because\n\t// http.Transport has a mutex field, so we copy the fields\n\t// that we know have non-zero values in http.DefaultTransport.\n\ttransport := &http.Transport{\n\t\tProxy:                 http.DefaultTransport.(*http.Transport).Proxy,\n\t\tForceAttemptHTTP2:     http.DefaultTransport.(*http.Transport).ForceAttemptHTTP2,\n\t\tMaxIdleConns:          http.DefaultTransport.(*http.Transport).MaxIdleConns,\n\t\tIdleConnTimeout:       http.DefaultTransport.(*http.Transport).IdleConnTimeout,\n\t\tExpectContinueTimeout: http.DefaultTransport.(*http.Transport).ExpectContinueTimeout,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout:   keepclient.DefaultConnectTimeout,\n\t\t\tKeepAlive: keepclient.DefaultKeepAlive,\n\t\t\tDualStack: true,\n\t\t}).DialContext,\n\t\tTLSClientConfig:     arvadosclient.MakeTLSConfig(kc.Arvados.ApiInsecure),\n\t\tTLSHandshakeTimeout: keepclient.DefaultTLSHandshakeTimeout,\n\t}\n\n\tcacheQ, err := lru.New2Q(500)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error from lru.New2Q: %v\", err)\n\t}\n\n\th := &proxyHandler{\n\t\tHandler:    rest,\n\t\tKeepClient: kc,\n\t\ttimeout:    timeout,\n\t\ttransport:  transport,\n\t\tapiTokenCache: &apiTokenCache{\n\t\t\ttokens:     cacheQ,\n\t\t\texpireTime: 300,\n\t\t},\n\t\tcluster: cluster,\n\t}\n\n\trest.HandleFunc(`/{locator:[0-9a-f]{32}\\+.*}`, h.Get).Methods(\"GET\", \"HEAD\")\n\trest.HandleFunc(`/{locator:[0-9a-f]{32}}`, h.Get).Methods(\"GET\", \"HEAD\")\n\n\t// List all blocks\n\trest.HandleFunc(`/index`, h.Index).Methods(\"GET\")\n\n\t// List blocks whose hash has the given prefix\n\trest.HandleFunc(`/index/{prefix:[0-9a-f]{0,32}}`, h.Index).Methods(\"GET\")\n\n\trest.HandleFunc(`/{locator:[0-9a-f]{32}\\+.*}`, h.Put).Methods(\"PUT\")\n\trest.HandleFunc(`/{locator:[0-9a-f]{32}}`, h.Put).Methods(\"PUT\")\n\trest.HandleFunc(`/`, h.Put).Methods(\"POST\")\n\trest.HandleFunc(`/{any}`, h.Options).Methods(\"OPTIONS\")\n\trest.HandleFunc(`/`, h.Options).Methods(\"OPTIONS\")\n\n\trest.Handle(\"/_health/{check}\", &health.Handler{\n\t\tToken:  cluster.ManagementToken,\n\t\tPrefix: \"/_health/\",\n\t}).Methods(\"GET\")\n\n\trest.NotFoundHandler = invalidPathHandler{}\n\treturn h, nil\n}\n\nvar errLoopDetected = errors.New(\"loop detected\")\n\nfunc (h *proxyHandler) checkLoop(resp http.ResponseWriter, req *http.Request) error {\n\tif via := req.Header.Get(\"Via\"); strings.Index(via, \" \"+viaAlias) >= 0 {\n\t\tctxlog.FromContext(req.Context()).Printf(\"proxy loop detected (request has Via: %q): perhaps keepproxy is misidentified by gateway config as an external client, or its keep_services record does not have service_type=proxy?\", via)\n\t\thttp.Error(resp, errLoopDetected.Error(), http.StatusInternalServerError)\n\t\treturn errLoopDetected\n\t}\n\treturn nil\n}\n\nfunc setCORSHeaders(resp http.ResponseWriter) {\n\tkeepstore.SetCORSHeaders(resp)\n\tacam := \"Access-Control-Allow-Methods\"\n\tresp.Header().Set(acam, resp.Header().Get(acam)+\", POST\")\n}\n\ntype invalidPathHandler struct{}\n\nfunc (invalidPathHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\thttp.Error(resp, \"Bad request\", http.StatusBadRequest)\n}\n\nfunc (h *proxyHandler) Options(resp http.ResponseWriter, req *http.Request) {\n\tsetCORSHeaders(resp)\n}\n\nvar errBadAuthorizationHeader = errors.New(\"Missing or invalid Authorization header, or method not allowed\")\nvar errContentLengthMismatch = errors.New(\"Actual length != expected content length\")\nvar errMethodNotSupported = errors.New(\"Method not supported\")\n\nvar removeHint, _ = regexp.Compile(\"\\\\+K@[a-z0-9]{5}(\\\\+|$)\")\n\nfunc (h *proxyHandler) Get(resp http.ResponseWriter, req *http.Request) {\n\tif err := h.checkLoop(resp, req); err != nil {\n\t\treturn\n\t}\n\tsetCORSHeaders(resp)\n\tresp.Header().Set(\"Via\", req.Proto+\" \"+viaAlias)\n\n\tlocator := mux.Vars(req)[\"locator\"]\n\tvar err error\n\tvar status int\n\tvar expectLength, responseLength int64\n\n\tlogger := ctxlog.FromContext(req.Context())\n\tdefer func() {\n\t\thttpserver.SetResponseLogFields(req.Context(), logrus.Fields{\n\t\t\t\"locator\":        locator,\n\t\t\t\"expectLength\":   expectLength,\n\t\t\t\"responseLength\": responseLength,\n\t\t\t\"err\":            err,\n\t\t})\n\t\tif status != http.StatusOK {\n\t\t\thttp.Error(resp, err.Error(), status)\n\t\t}\n\t}()\n\n\tkc := h.makeKeepClient(req)\n\tkc.DiskCacheSize = keepclient.DiskCacheDisabled\n\n\tvar pass bool\n\tvar tok string\n\tvar user *arvados.User\n\tif pass, tok, user = h.checkAuthorizationHeader(req); !pass {\n\t\tstatus, err = http.StatusForbidden, errBadAuthorizationHeader\n\t\treturn\n\t}\n\thttpserver.SetResponseLogFields(req.Context(), logrus.Fields{\n\t\t\"userUUID\":     user.UUID,\n\t\t\"userFullName\": user.FullName,\n\t})\n\n\t// Copy ArvadosClient struct and use the client's API token\n\tarvclient := *kc.Arvados\n\tarvclient.ApiToken = tok\n\tkc.Arvados = &arvclient\n\n\tvar reader io.ReadCloser\n\n\tlocator = removeHint.ReplaceAllString(locator, \"$1\")\n\n\tswitch req.Method {\n\tcase \"HEAD\":\n\t\texpectLength, _, err = kc.Ask(locator)\n\tcase \"GET\":\n\t\treader, expectLength, _, err = kc.Get(locator)\n\t\tif reader != nil {\n\t\t\tdefer reader.Close()\n\t\t}\n\tdefault:\n\t\tstatus, err = http.StatusNotImplemented, errMethodNotSupported\n\t\treturn\n\t}\n\n\tif expectLength == -1 {\n\t\tlogger.Warn(\"Content-Length not provided\")\n\t}\n\n\tswitch respErr := err.(type) {\n\tcase nil:\n\t\tstatus = http.StatusOK\n\t\tresp.Header().Set(\"Content-Length\", fmt.Sprint(expectLength))\n\t\tswitch req.Method {\n\t\tcase \"HEAD\":\n\t\t\tresponseLength = 0\n\t\tcase \"GET\":\n\t\t\tresponseLength, err = io.Copy(resp, reader)\n\t\t\tif err == nil && expectLength > -1 && responseLength != expectLength {\n\t\t\t\terr = errContentLengthMismatch\n\t\t\t}\n\t\t}\n\tcase keepclient.Error:\n\t\tif respErr == keepclient.BlockNotFound {\n\t\t\tstatus = http.StatusNotFound\n\t\t} else if respErr.Temporary() {\n\t\t\tstatus = http.StatusBadGateway\n\t\t} else {\n\t\t\tstatus = 422\n\t\t}\n\tdefault:\n\t\tstatus = http.StatusInternalServerError\n\t}\n}\n\nvar errLengthRequired = errors.New(http.StatusText(http.StatusLengthRequired))\nvar errLengthMismatch = errors.New(\"Locator size hint does not match Content-Length header\")\n\nfunc (h *proxyHandler) Put(resp http.ResponseWriter, req *http.Request) {\n\tif err := h.checkLoop(resp, req); err != nil {\n\t\treturn\n\t}\n\tsetCORSHeaders(resp)\n\tresp.Header().Set(\"Via\", \"HTTP/1.1 \"+viaAlias)\n\n\tkc := h.makeKeepClient(req)\n\n\tvar err error\n\tvar expectLength int64\n\tvar status = http.StatusInternalServerError\n\tvar wroteReplicas int\n\tvar locatorOut string = \"-\"\n\n\tdefer func() {\n\t\thttpserver.SetResponseLogFields(req.Context(), logrus.Fields{\n\t\t\t\"expectLength\":  expectLength,\n\t\t\t\"wantReplicas\":  kc.Want_replicas,\n\t\t\t\"wroteReplicas\": wroteReplicas,\n\t\t\t\"locator\":       strings.SplitN(locatorOut, \"+A\", 2)[0],\n\t\t\t\"err\":           err,\n\t\t})\n\t\tif status != http.StatusOK {\n\t\t\thttp.Error(resp, err.Error(), status)\n\t\t}\n\t}()\n\n\tlocatorIn := mux.Vars(req)[\"locator\"]\n\n\t// Check if the client specified storage classes\n\tif req.Header.Get(keepclient.XKeepStorageClasses) != \"\" {\n\t\tvar scl []string\n\t\tfor _, sc := range strings.Split(req.Header.Get(keepclient.XKeepStorageClasses), \",\") {\n\t\t\tscl = append(scl, strings.Trim(sc, \" \"))\n\t\t}\n\t\tkc.SetStorageClasses(scl)\n\t}\n\n\t_, err = fmt.Sscanf(req.Header.Get(\"Content-Length\"), \"%d\", &expectLength)\n\tif err != nil || expectLength < 0 {\n\t\terr = errLengthRequired\n\t\tstatus = http.StatusLengthRequired\n\t\treturn\n\t}\n\n\tif locatorIn != \"\" {\n\t\tvar loc *keepclient.Locator\n\t\tif loc, err = keepclient.MakeLocator(locatorIn); err != nil {\n\t\t\tstatus = http.StatusBadRequest\n\t\t\treturn\n\t\t} else if loc.Size > 0 && int64(loc.Size) != expectLength {\n\t\t\terr = errLengthMismatch\n\t\t\tstatus = http.StatusBadRequest\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar pass bool\n\tvar tok string\n\tvar user *arvados.User\n\tif pass, tok, user = h.checkAuthorizationHeader(req); !pass {\n\t\terr = errBadAuthorizationHeader\n\t\tstatus = http.StatusForbidden\n\t\treturn\n\t}\n\thttpserver.SetResponseLogFields(req.Context(), logrus.Fields{\n\t\t\"userUUID\":     user.UUID,\n\t\t\"userFullName\": user.FullName,\n\t})\n\n\t// Copy ArvadosClient struct and use the client's API token\n\tarvclient := *kc.Arvados\n\tarvclient.ApiToken = tok\n\tkc.Arvados = &arvclient\n\n\t// Check if the client specified the number of replicas\n\tif desiredReplicas := req.Header.Get(keepclient.XKeepDesiredReplicas); desiredReplicas != \"\" {\n\t\tvar r int\n\t\t_, err := fmt.Sscanf(desiredReplicas, \"%d\", &r)\n\t\tif err == nil {\n\t\t\tkc.Want_replicas = r\n\t\t}\n\t}\n\n\t// Now try to put the block through\n\tif locatorIn == \"\" {\n\t\tbytes, err2 := ioutil.ReadAll(req.Body)\n\t\tif err2 != nil {\n\t\t\terr = fmt.Errorf(\"Error reading request body: %s\", err2)\n\t\t\tstatus = http.StatusInternalServerError\n\t\t\treturn\n\t\t}\n\t\tlocatorOut, wroteReplicas, err = kc.PutB(bytes)\n\t} else {\n\t\tlocatorOut, wroteReplicas, err = kc.PutHR(locatorIn, req.Body, expectLength)\n\t}\n\n\t// Tell the client how many successful PUTs we accomplished\n\tresp.Header().Set(keepclient.XKeepReplicasStored, fmt.Sprintf(\"%d\", wroteReplicas))\n\n\tswitch err.(type) {\n\tcase nil:\n\t\tstatus = http.StatusOK\n\t\tif len(kc.StorageClasses) > 0 {\n\t\t\t// A successful PUT request with storage classes means that all\n\t\t\t// storage classes were fulfilled, so the client will get a\n\t\t\t// confirmation via the X-Storage-Classes-Confirmed header.\n\t\t\thdr := \"\"\n\t\t\tisFirst := true\n\t\t\tfor _, sc := range kc.StorageClasses {\n\t\t\t\tif isFirst {\n\t\t\t\t\thdr = fmt.Sprintf(\"%s=%d\", sc, wroteReplicas)\n\t\t\t\t\tisFirst = false\n\t\t\t\t} else {\n\t\t\t\t\thdr += fmt.Sprintf(\", %s=%d\", sc, wroteReplicas)\n\t\t\t\t}\n\t\t\t}\n\t\t\tresp.Header().Set(keepclient.XKeepStorageClassesConfirmed, hdr)\n\t\t}\n\t\t_, err = io.WriteString(resp, locatorOut)\n\tcase keepclient.OversizeBlockError:\n\t\t// Too much data\n\t\tstatus = http.StatusRequestEntityTooLarge\n\tcase keepclient.InsufficientReplicasError:\n\t\tstatus = http.StatusServiceUnavailable\n\tdefault:\n\t\tstatus = http.StatusBadGateway\n\t}\n}\n\n// ServeHTTP implementation for IndexHandler\n// Supports only GET requests for /index/{prefix:[0-9a-f]{0,32}}\n// For each keep server found in LocalRoots:\n// - Invokes GetIndex using keepclient\n// - Expects \"complete\" response (terminating with blank new line)\n// - Aborts on any errors\n// Concatenates responses from all those keep servers and returns\nfunc (h *proxyHandler) Index(resp http.ResponseWriter, req *http.Request) {\n\tsetCORSHeaders(resp)\n\n\tprefix := mux.Vars(req)[\"prefix\"]\n\tvar err error\n\tvar status int\n\n\tdefer func() {\n\t\tif status != http.StatusOK {\n\t\t\thttp.Error(resp, err.Error(), status)\n\t\t}\n\t}()\n\n\tkc := h.makeKeepClient(req)\n\tok, token, _ := h.checkAuthorizationHeader(req)\n\tif !ok {\n\t\tstatus, err = http.StatusForbidden, errBadAuthorizationHeader\n\t\treturn\n\t}\n\n\t// Copy ArvadosClient struct and use the client's API token\n\tarvclient := *kc.Arvados\n\tarvclient.ApiToken = token\n\tkc.Arvados = &arvclient\n\n\t// Only GET method is supported\n\tif req.Method != \"GET\" {\n\t\tstatus, err = http.StatusNotImplemented, errMethodNotSupported\n\t\treturn\n\t}\n\n\t// Get index from all LocalRoots and write to resp\n\tvar reader io.Reader\n\tfor uuid := range kc.LocalRoots() {\n\t\treader, err = kc.GetIndex(uuid, prefix)\n\t\tif err != nil {\n\t\t\tstatus = http.StatusBadGateway\n\t\t\treturn\n\t\t}\n\n\t\t_, err = io.Copy(resp, reader)\n\t\tif err != nil {\n\t\t\tstatus = http.StatusBadGateway\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Got index from all the keep servers and wrote to resp\n\tstatus = http.StatusOK\n\tresp.Write([]byte(\"\\n\"))\n}\n\nfunc (h *proxyHandler) makeKeepClient(req *http.Request) *keepclient.KeepClient {\n\tkc := h.KeepClient.Clone()\n\tkc.RequestID = req.Header.Get(\"X-Request-Id\")\n\tkc.HTTPClient = &proxyClient{\n\t\tclient: &http.Client{\n\t\t\tTimeout:   h.timeout,\n\t\t\tTransport: h.transport,\n\t\t},\n\t\tproto: req.Proto,\n\t}\n\treturn kc\n}\n"
  },
  {
    "path": "services/keepproxy/keepproxy_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepproxy\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/md5\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadosclient\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tlog \"github.com/sirupsen/logrus\"\n\n\t\"gopkg.in/check.v1\"\n\t. \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tkeepclient.DefaultRetryDelay = time.Millisecond\n\tTestingT(t)\n}\n\n// Gocheck boilerplate\nvar _ = Suite(&ServerRequiredSuite{})\n\n// Tests that require the Keep server running\ntype ServerRequiredSuite struct{}\n\n// Gocheck boilerplate\nvar _ = Suite(&ServerRequiredConfigYmlSuite{})\n\n// Tests that require the Keep servers running as defined in config.yml\ntype ServerRequiredConfigYmlSuite struct{}\n\n// Gocheck boilerplate\nvar _ = Suite(&NoKeepServerSuite{})\n\n// Test with no keepserver to simulate errors\ntype NoKeepServerSuite struct{}\n\nvar TestProxyUUID = \"zzzzz-bi6l4-lrixqc4fxofbmzz\"\n\nfunc (s *ServerRequiredSuite) SetUpSuite(c *C) {\n\tarvadostest.StartKeep(2, false)\n}\n\nfunc (s *ServerRequiredSuite) SetUpTest(c *C) {\n\tarvadostest.ResetEnv()\n}\n\nfunc (s *ServerRequiredSuite) TearDownSuite(c *C) {\n\tarvadostest.StopKeep(2)\n}\n\nfunc (s *ServerRequiredConfigYmlSuite) SetUpSuite(c *C) {\n\t// config.yml defines 4 keepstores\n\tarvadostest.StartKeep(4, false)\n}\n\nfunc (s *ServerRequiredConfigYmlSuite) SetUpTest(c *C) {\n\tarvadostest.ResetEnv()\n}\n\nfunc (s *ServerRequiredConfigYmlSuite) TearDownSuite(c *C) {\n\tarvadostest.StopKeep(4)\n}\n\nfunc (s *NoKeepServerSuite) SetUpSuite(c *C) {\n\t// We need API to have some keep services listed, but the\n\t// services themselves should be unresponsive.\n\tarvadostest.StartKeep(2, false)\n\tarvadostest.StopKeep(2)\n}\n\nfunc (s *NoKeepServerSuite) SetUpTest(c *C) {\n\tarvadostest.ResetEnv()\n}\n\ntype testServer struct {\n\t*httpserver.Server\n\tproxyHandler *proxyHandler\n}\n\nfunc runProxy(c *C, bogusClientToken bool, loadKeepstoresFromConfig bool, kp *arvados.UploadDownloadRolePermissions) (*testServer, *keepclient.KeepClient, *bytes.Buffer) {\n\tcfg, err := config.NewLoader(nil, ctxlog.TestLogger(c)).Load()\n\tc.Assert(err, Equals, nil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Assert(err, Equals, nil)\n\n\tif !loadKeepstoresFromConfig {\n\t\t// Do not load Keepstore InternalURLs from the config file\n\t\tcluster.Services.Keepstore.InternalURLs = make(map[arvados.URL]arvados.ServiceInstance)\n\t}\n\n\tcluster.Services.Keepproxy.InternalURLs = map[arvados.URL]arvados.ServiceInstance{{Host: \":0\"}: {}}\n\n\tif kp != nil {\n\t\tcluster.Collections.KeepproxyPermission = *kp\n\t}\n\n\tlogbuf := &bytes.Buffer{}\n\tlogger := log.New()\n\tlogger.Out = logbuf\n\tctx := ctxlog.Context(context.Background(), logger)\n\n\thandler := newHandlerOrErrorHandler(ctx, cluster, cluster.SystemRootToken, prometheus.NewRegistry()).(*proxyHandler)\n\tsrv := &testServer{\n\t\tServer: &httpserver.Server{\n\t\t\tServer: http.Server{\n\t\t\t\tBaseContext: func(net.Listener) context.Context { return ctx },\n\t\t\t\tHandler: httpserver.AddRequestIDs(\n\t\t\t\t\thttpserver.LogRequests(handler)),\n\t\t\t},\n\t\t\tAddr: \":\",\n\t\t},\n\t\tproxyHandler: handler,\n\t}\n\terr = srv.Start()\n\tc.Assert(err, IsNil)\n\n\tclient := arvados.NewClientFromEnv()\n\tarv, err := arvadosclient.New(client)\n\tc.Assert(err, IsNil)\n\tif bogusClientToken {\n\t\tarv.ApiToken = \"bogus-token\"\n\t}\n\tkc := keepclient.New(arv)\n\tkc.DiskCacheSize = keepclient.DiskCacheDisabled\n\tsr := map[string]string{\n\t\tTestProxyUUID: \"http://\" + srv.Addr,\n\t}\n\tkc.SetServiceRoots(sr, sr, sr)\n\treturn srv, kc, logbuf\n}\n\nfunc (s *ServerRequiredSuite) TestResponseViaHeader(c *C) {\n\tsrv, _, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\treq, err := http.NewRequest(\"POST\",\n\t\t\"http://\"+srv.Addr+\"/\",\n\t\tstrings.NewReader(\"TestViaHeader\"))\n\tc.Assert(err, Equals, nil)\n\treq.Header.Add(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\tresp, err := (&http.Client{}).Do(req)\n\tc.Assert(err, Equals, nil)\n\tc.Check(resp.Header.Get(\"Via\"), Equals, \"HTTP/1.1 keepproxy\")\n\tc.Assert(resp.StatusCode, Equals, http.StatusOK)\n\tlocator, err := ioutil.ReadAll(resp.Body)\n\tc.Assert(err, Equals, nil)\n\tresp.Body.Close()\n\n\treq, err = http.NewRequest(\"GET\",\n\t\t\"http://\"+srv.Addr+\"/\"+string(locator),\n\t\tnil)\n\tc.Assert(err, Equals, nil)\n\tresp, err = (&http.Client{}).Do(req)\n\tc.Assert(err, Equals, nil)\n\tc.Check(resp.Header.Get(\"Via\"), Equals, \"HTTP/1.1 keepproxy\")\n\tresp.Body.Close()\n}\n\nfunc (s *ServerRequiredSuite) TestLoopDetection(c *C) {\n\tsrv, kc, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\tsr := map[string]string{\n\t\tTestProxyUUID: \"http://\" + srv.Addr,\n\t}\n\tsrv.proxyHandler.KeepClient.SetServiceRoots(sr, sr, sr)\n\n\tcontent := []byte(\"TestLoopDetection\")\n\t_, _, err := kc.PutB(content)\n\tc.Check(err, ErrorMatches, `.*loop detected.*`)\n\n\thash := fmt.Sprintf(\"%x\", md5.Sum(content))\n\t_, _, _, err = kc.Get(hash)\n\tc.Check(err, ErrorMatches, `.*loop detected.*`)\n}\n\nfunc (s *ServerRequiredSuite) TestStorageClassesHeader(c *C) {\n\tsrv, kc, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\t// Set up fake keepstore to record request headers\n\tvar hdr http.Header\n\tts := httptest.NewServer(http.HandlerFunc(\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\thdr = r.Header\n\t\t\thttp.Error(w, \"Error\", http.StatusInternalServerError)\n\t\t}))\n\tdefer ts.Close()\n\n\t// Point keepproxy router's keepclient to the fake keepstore\n\tsr := map[string]string{\n\t\tTestProxyUUID: ts.URL,\n\t}\n\tsrv.proxyHandler.KeepClient.SetServiceRoots(sr, sr, sr)\n\n\t// Set up client to ask for storage classes to keepproxy\n\tkc.StorageClasses = []string{\"secure\"}\n\tcontent := []byte(\"Very important data\")\n\t_, _, err := kc.PutB(content)\n\tc.Check(err, NotNil)\n\tc.Check(hdr.Get(\"X-Keep-Storage-Classes\"), Equals, \"secure\")\n}\n\nfunc (s *ServerRequiredSuite) TestStorageClassesConfirmedHeader(c *C) {\n\tsrv, _, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\tcontent := []byte(\"foo\")\n\thash := fmt.Sprintf(\"%x\", md5.Sum(content))\n\tclient := &http.Client{}\n\n\treq, err := http.NewRequest(\"PUT\",\n\t\tfmt.Sprintf(\"http://%s/%s\", srv.Addr, hash),\n\t\tbytes.NewReader(content))\n\tc.Assert(err, IsNil)\n\treq.Header.Set(\"X-Keep-Storage-Classes\", \"default\")\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\treq.Header.Set(\"Content-Type\", \"application/octet-stream\")\n\n\tresp, err := client.Do(req)\n\tc.Assert(err, IsNil)\n\tc.Assert(resp.StatusCode, Equals, http.StatusOK)\n\tc.Assert(resp.Header.Get(\"X-Keep-Storage-Classes-Confirmed\"), Equals, \"default=2\")\n}\n\nfunc (s *ServerRequiredSuite) TestDesiredReplicas(c *C) {\n\tsrv, kc, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\tcontent := []byte(\"TestDesiredReplicas\")\n\thash := fmt.Sprintf(\"%x\", md5.Sum(content))\n\n\tfor _, kc.Want_replicas = range []int{0, 1, 2, 3} {\n\t\tlocator, rep, err := kc.PutB(content)\n\t\tif kc.Want_replicas < 3 {\n\t\t\tc.Check(err, Equals, nil)\n\t\t\tc.Check(rep, Equals, kc.Want_replicas)\n\t\t\tif rep > 0 {\n\t\t\t\tc.Check(locator, Matches, fmt.Sprintf(`^%s\\+%d(\\+.+)?$`, hash, len(content)))\n\t\t\t}\n\t\t} else {\n\t\t\tc.Check(err, ErrorMatches, \".*503.*\")\n\t\t}\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestPutWrongContentLength(c *C) {\n\tsrv, kc, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\tcontent := []byte(\"TestPutWrongContentLength\")\n\thash := fmt.Sprintf(\"%x\", md5.Sum(content))\n\n\t// If we use http.Client to send these requests to the network\n\t// server we just started, the Go http library automatically\n\t// fixes the invalid Content-Length header. In order to test\n\t// our server behavior, we have to call the handler directly\n\t// using an httptest.ResponseRecorder.\n\trtr, err := newHandler(context.Background(), kc, 10*time.Second, &arvados.Cluster{})\n\tc.Assert(err, check.IsNil)\n\n\ttype testcase struct {\n\t\tsendLength   string\n\t\texpectStatus int\n\t}\n\n\tfor _, t := range []testcase{\n\t\t{\"1\", http.StatusBadRequest},\n\t\t{\"\", http.StatusLengthRequired},\n\t\t{\"-1\", http.StatusLengthRequired},\n\t\t{\"abcdef\", http.StatusLengthRequired},\n\t} {\n\t\treq, err := http.NewRequest(\"PUT\",\n\t\t\tfmt.Sprintf(\"http://%s/%s+%d\", srv.Addr, hash, len(content)),\n\t\t\tbytes.NewReader(content))\n\t\tc.Assert(err, IsNil)\n\t\treq.Header.Set(\"Content-Length\", t.sendLength)\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\t\treq.Header.Set(\"Content-Type\", \"application/octet-stream\")\n\n\t\tresp := httptest.NewRecorder()\n\t\trtr.ServeHTTP(resp, req)\n\t\tc.Check(resp.Code, Equals, t.expectStatus)\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestManyFailedPuts(c *C) {\n\tsrv, kc, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\tsrv.proxyHandler.timeout = time.Nanosecond\n\n\tbuf := make([]byte, 1<<20)\n\trand.Read(buf)\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 128; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tkc.PutB(buf)\n\t\t}()\n\t}\n\tdone := make(chan bool)\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(done)\n\t}()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(10 * time.Second):\n\t\tc.Error(\"timeout\")\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestPutAskGet(c *C) {\n\tsrv, kc, logbuf := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\thash := fmt.Sprintf(\"%x\", md5.Sum([]byte(\"foo\")))\n\tvar hash2 string\n\n\t{\n\t\t_, _, err := kc.Ask(hash)\n\t\tc.Check(err, Equals, keepclient.BlockNotFound)\n\t\tc.Log(\"Finished Ask (expected BlockNotFound)\")\n\t}\n\n\t{\n\t\treader, _, _, err := kc.Get(hash + \"+3\")\n\t\tc.Check(reader, Equals, nil)\n\t\tc.Check(err, Equals, keepclient.BlockNotFound)\n\t\tc.Log(\"Finished Get (expected BlockNotFound)\")\n\t}\n\n\t// Note in bug #5309 among other errors keepproxy would set\n\t// Content-Length incorrectly on the 404 BlockNotFound response, this\n\t// would result in a protocol violation that would prevent reuse of the\n\t// connection, which would manifest by the next attempt to use the\n\t// connection (in this case the PutB below) failing.  So to test for\n\t// that bug it's necessary to trigger an error response (such as\n\t// BlockNotFound) and then do something else with the same httpClient\n\t// connection.\n\n\t{\n\t\tvar rep int\n\t\tvar err error\n\t\thash2, rep, err = kc.PutB([]byte(\"foo\"))\n\t\tc.Check(hash2, Matches, fmt.Sprintf(`^%s\\+3(\\+.+)?$`, hash))\n\t\tc.Check(rep, Equals, 2)\n\t\tc.Check(err, Equals, nil)\n\t\tc.Log(\"Finished PutB (expected success)\")\n\n\t\tc.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\\+3.* userFullName=\"TestCase Administrator\".* userUUID=zzzzz-tpzed-d9tiejq69daie8f.*`)\n\t\tlogbuf.Reset()\n\t}\n\n\t{\n\t\tblocklen, _, err := kc.Ask(hash2)\n\t\tc.Assert(err, Equals, nil)\n\t\tc.Check(blocklen, Equals, int64(3))\n\t\tc.Log(\"Finished Ask (expected success)\")\n\t\tc.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\\+3.* userFullName=\"TestCase Administrator\".* userUUID=zzzzz-tpzed-d9tiejq69daie8f.*`)\n\t\tlogbuf.Reset()\n\t}\n\n\t{\n\t\treader, blocklen, _, err := kc.Get(hash2)\n\t\tc.Assert(err, Equals, nil)\n\t\tall, err := ioutil.ReadAll(reader)\n\t\tc.Check(err, IsNil)\n\t\tc.Check(all, DeepEquals, []byte(\"foo\"))\n\t\tc.Check(blocklen, Equals, int64(3))\n\t\tc.Log(\"Finished Get (expected success)\")\n\t\tc.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\\+3.* userFullName=\"TestCase Administrator\".* userUUID=zzzzz-tpzed-d9tiejq69daie8f.*`)\n\t\tlogbuf.Reset()\n\t}\n\n\t{\n\t\tvar rep int\n\t\tvar err error\n\t\thash2, rep, err = kc.PutB([]byte(\"\"))\n\t\tc.Check(hash2, Matches, `^d41d8cd98f00b204e9800998ecf8427e\\+0(\\+.+)?$`)\n\t\tc.Check(rep, Equals, 2)\n\t\tc.Check(err, Equals, nil)\n\t\tc.Log(\"Finished PutB zero block\")\n\t}\n\n\t{\n\t\treader, blocklen, _, err := kc.Get(\"d41d8cd98f00b204e9800998ecf8427e\")\n\t\tc.Assert(err, IsNil)\n\t\tall, err := ioutil.ReadAll(reader)\n\t\tc.Check(err, IsNil)\n\t\tc.Check(all, DeepEquals, []byte(\"\"))\n\t\tc.Check(blocklen, Equals, int64(0))\n\t\tc.Log(\"Finished Get zero block\")\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestPutAskGetForbidden(c *C) {\n\tsrv, kc, _ := runProxy(c, true, false, nil)\n\tdefer srv.Close()\n\n\thash := fmt.Sprintf(\"%x+3\", md5.Sum([]byte(\"bar\")))\n\n\t_, _, err := kc.Ask(hash)\n\tc.Check(err, FitsTypeOf, &keepclient.ErrNotFound{})\n\n\thash2, rep, err := kc.PutB([]byte(\"bar\"))\n\tc.Check(hash2, Equals, \"\")\n\tc.Check(rep, Equals, 0)\n\tc.Check(err, FitsTypeOf, keepclient.InsufficientReplicasError{})\n\n\tblocklen, _, err := kc.Ask(hash)\n\tc.Check(err, FitsTypeOf, &keepclient.ErrNotFound{})\n\tc.Check(err, ErrorMatches, \".*HTTP 403.*\")\n\tc.Check(blocklen, Equals, int64(0))\n\n\t_, blocklen, _, err = kc.Get(hash)\n\tc.Check(err, FitsTypeOf, &keepclient.ErrNotFound{})\n\tc.Check(err, ErrorMatches, \".*HTTP 403.*\")\n\tc.Check(blocklen, Equals, int64(0))\n}\n\nfunc testPermission(c *C, admin bool, perm arvados.UploadDownloadPermission) {\n\tkp := arvados.UploadDownloadRolePermissions{}\n\tif admin {\n\t\tkp.Admin = perm\n\t\tkp.User = arvados.UploadDownloadPermission{Upload: true, Download: true}\n\t} else {\n\t\tkp.Admin = arvados.UploadDownloadPermission{Upload: true, Download: true}\n\t\tkp.User = perm\n\t}\n\n\tsrv, kc, logbuf := runProxy(c, false, false, &kp)\n\tdefer srv.Close()\n\tif admin {\n\t\tkc.Arvados.ApiToken = arvadostest.AdminToken\n\t} else {\n\t\tkc.Arvados.ApiToken = arvadostest.ActiveToken\n\t}\n\n\thash := fmt.Sprintf(\"%x\", md5.Sum([]byte(\"foo\")))\n\tvar hash2 string\n\n\t{\n\t\tvar rep int\n\t\tvar err error\n\t\thash2, rep, err = kc.PutB([]byte(\"foo\"))\n\n\t\tif perm.Upload {\n\t\t\tc.Check(hash2, Matches, fmt.Sprintf(`^%s\\+3(\\+.+)?$`, hash))\n\t\t\tc.Check(rep, Equals, 2)\n\t\t\tc.Check(err, Equals, nil)\n\t\t\tc.Log(\"Finished PutB (expected success)\")\n\t\t\tif admin {\n\t\t\t\tc.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\\+3.* userFullName=\"TestCase Administrator\".* userUUID=zzzzz-tpzed-d9tiejq69daie8f.*`)\n\t\t\t} else {\n\n\t\t\t\tc.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\\+3.* userFullName=\"Active User\".* userUUID=zzzzz-tpzed-xurymjxw79nv3jz.*`)\n\t\t\t}\n\t\t} else {\n\t\t\tc.Check(hash2, Equals, \"\")\n\t\t\tc.Check(rep, Equals, 0)\n\t\t\tc.Check(err, FitsTypeOf, keepclient.InsufficientReplicasError{})\n\t\t}\n\t\tlogbuf.Reset()\n\t}\n\tif perm.Upload {\n\t\t// can't test download without upload.\n\n\t\treader, blocklen, _, err := kc.Get(hash2)\n\t\tif perm.Download {\n\t\t\tc.Assert(err, Equals, nil)\n\t\t\tall, err := ioutil.ReadAll(reader)\n\t\t\tc.Check(err, IsNil)\n\t\t\tc.Check(all, DeepEquals, []byte(\"foo\"))\n\t\t\tc.Check(blocklen, Equals, int64(3))\n\t\t\tc.Log(\"Finished Get (expected success)\")\n\t\t\tif admin {\n\t\t\t\tc.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\\+3.* userFullName=\"TestCase Administrator\".* userUUID=zzzzz-tpzed-d9tiejq69daie8f.*`)\n\t\t\t} else {\n\t\t\t\tc.Check(logbuf.String(), Matches, `(?ms).* locator=acbd18db4cc2f85cedef654fccc4a4d8\\+3.* userFullName=\"Active User\".* userUUID=zzzzz-tpzed-xurymjxw79nv3jz.*`)\n\t\t\t}\n\t\t} else {\n\t\t\tc.Check(err, FitsTypeOf, &keepclient.ErrNotFound{})\n\t\t\tc.Check(err, ErrorMatches, \".*Missing or invalid Authorization header, or method not allowed.*\")\n\t\t\tc.Check(blocklen, Equals, int64(0))\n\t\t}\n\t\tlogbuf.Reset()\n\t}\n\n}\n\nfunc (s *ServerRequiredSuite) TestPutGetPermission(c *C) {\n\n\tfor _, adminperm := range []bool{true, false} {\n\t\tfor _, userperm := range []bool{true, false} {\n\n\t\t\ttestPermission(c, true,\n\t\t\t\tarvados.UploadDownloadPermission{\n\t\t\t\t\tUpload:   adminperm,\n\t\t\t\t\tDownload: true,\n\t\t\t\t})\n\t\t\ttestPermission(c, true,\n\t\t\t\tarvados.UploadDownloadPermission{\n\t\t\t\t\tUpload:   true,\n\t\t\t\t\tDownload: adminperm,\n\t\t\t\t})\n\t\t\ttestPermission(c, false,\n\t\t\t\tarvados.UploadDownloadPermission{\n\t\t\t\t\tUpload:   true,\n\t\t\t\t\tDownload: userperm,\n\t\t\t\t})\n\t\t\ttestPermission(c, false,\n\t\t\t\tarvados.UploadDownloadPermission{\n\t\t\t\t\tUpload:   true,\n\t\t\t\t\tDownload: userperm,\n\t\t\t\t})\n\t\t}\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestCorsHeaders(c *C) {\n\tsrv, _, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\t{\n\t\tclient := http.Client{}\n\t\treq, err := http.NewRequest(\"OPTIONS\",\n\t\t\tfmt.Sprintf(\"http://%s/%x+3\", srv.Addr, md5.Sum([]byte(\"foo\"))),\n\t\t\tnil)\n\t\tc.Assert(err, IsNil)\n\t\treq.Header.Add(\"Access-Control-Request-Method\", \"PUT\")\n\t\treq.Header.Add(\"Access-Control-Request-Headers\", \"Authorization, X-Keep-Desired-Replicas\")\n\t\tresp, err := client.Do(req)\n\t\tc.Check(err, Equals, nil)\n\t\tc.Check(resp.StatusCode, Equals, 200)\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tc.Check(err, IsNil)\n\t\tc.Check(string(body), Equals, \"\")\n\t\tc.Check(resp.Header.Get(\"Access-Control-Allow-Methods\"), Equals, \"GET, HEAD, PUT, OPTIONS, POST\")\n\t\tc.Check(resp.Header.Get(\"Access-Control-Allow-Origin\"), Equals, \"*\")\n\t}\n\n\t{\n\t\tresp, err := http.Get(fmt.Sprintf(\"http://%s/%x+3\", srv.Addr, md5.Sum([]byte(\"foo\"))))\n\t\tc.Check(err, Equals, nil)\n\t\tc.Check(resp.Header.Get(\"Access-Control-Allow-Headers\"), Equals, \"Authorization, Content-Length, Content-Type, X-Keep-Desired-Replicas, X-Keep-Signature, X-Keep-Storage-Classes\")\n\t\tc.Check(resp.Header.Get(\"Access-Control-Allow-Origin\"), Equals, \"*\")\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestPostWithoutHash(c *C) {\n\tsrv, _, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\t{\n\t\tclient := http.Client{}\n\t\treq, err := http.NewRequest(\"POST\",\n\t\t\t\"http://\"+srv.Addr+\"/\",\n\t\t\tstrings.NewReader(\"qux\"))\n\t\tc.Check(err, IsNil)\n\t\treq.Header.Add(\"Authorization\", \"Bearer \"+arvadostest.ActiveToken)\n\t\treq.Header.Add(\"Content-Type\", \"application/octet-stream\")\n\t\tresp, err := client.Do(req)\n\t\tc.Check(err, Equals, nil)\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tc.Check(err, Equals, nil)\n\t\tc.Check(string(body), Matches,\n\t\t\tfmt.Sprintf(`^%x\\+3(\\+.+)?$`, md5.Sum([]byte(\"qux\"))))\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestStripHint(c *C) {\n\tc.Check(removeHint.ReplaceAllString(\"http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73+K@zzzzz\", \"$1\"),\n\t\tEquals,\n\t\t\"http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73\")\n\tc.Check(removeHint.ReplaceAllString(\"http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+K@zzzzz+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73\", \"$1\"),\n\t\tEquals,\n\t\t\"http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73\")\n\tc.Check(removeHint.ReplaceAllString(\"http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73+K@zzzzz-zzzzz-zzzzzzzzzzzzzzz\", \"$1\"),\n\t\tEquals,\n\t\t\"http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73+K@zzzzz-zzzzz-zzzzzzzzzzzzzzz\")\n\tc.Check(removeHint.ReplaceAllString(\"http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+K@zzzzz-zzzzz-zzzzzzzzzzzzzzz+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73\", \"$1\"),\n\t\tEquals,\n\t\t\"http://keep.zzzzz.arvadosapi.com:25107/2228819a18d3727630fa30c81853d23f+67108864+K@zzzzz-zzzzz-zzzzzzzzzzzzzzz+A37b6ab198qqqq28d903b975266b23ee711e1852c@55635f73\")\n\n}\n\n// Test GetIndex\n// - Put one block, with 2 replicas\n// - With no prefix (expect the block locator, twice)\n// - With an existing prefix (expect the block locator, twice)\n// - With a valid but non-existing prefix (expect \"\\n\")\n// - With an invalid prefix (expect error)\nfunc (s *ServerRequiredSuite) TestGetIndex(c *C) {\n\tgetIndexWorker(c, false)\n}\n\n// Test GetIndex\n// - Uses config.yml\n// - Put one block, with 2 replicas\n// - With no prefix (expect the block locator, twice)\n// - With an existing prefix (expect the block locator, twice)\n// - With a valid but non-existing prefix (expect \"\\n\")\n// - With an invalid prefix (expect error)\nfunc (s *ServerRequiredConfigYmlSuite) TestGetIndex(c *C) {\n\tgetIndexWorker(c, true)\n}\n\nfunc getIndexWorker(c *C, useConfig bool) {\n\tsrv, kc, _ := runProxy(c, false, useConfig, nil)\n\tdefer srv.Close()\n\n\t// Put \"index-data\" blocks\n\tdata := []byte(\"index-data\")\n\thash := fmt.Sprintf(\"%x\", md5.Sum(data))\n\n\thash2, rep, err := kc.PutB(data)\n\tc.Check(hash2, Matches, fmt.Sprintf(`^%s\\+10(\\+.+)?$`, hash))\n\tc.Check(rep, Equals, 2)\n\tc.Check(err, Equals, nil)\n\n\treader, blocklen, _, err := kc.Get(hash2)\n\tc.Assert(err, IsNil)\n\tc.Check(blocklen, Equals, int64(10))\n\tall, err := ioutil.ReadAll(reader)\n\tc.Assert(err, IsNil)\n\tc.Check(all, DeepEquals, data)\n\n\t// Put some more blocks\n\t_, _, err = kc.PutB([]byte(\"some-more-index-data\"))\n\tc.Check(err, IsNil)\n\n\tkc.Arvados.ApiToken = arvadostest.SystemRootToken\n\n\t// Invoke GetIndex\n\tfor _, spec := range []struct {\n\t\tprefix         string\n\t\texpectTestHash bool\n\t\texpectOther    bool\n\t}{\n\t\t{\"\", true, true},         // with no prefix\n\t\t{hash[:3], true, false},  // with matching prefix\n\t\t{\"abcdef\", false, false}, // with no such prefix\n\t} {\n\t\tindexReader, err := kc.GetIndex(TestProxyUUID, spec.prefix)\n\t\tc.Assert(err, Equals, nil)\n\t\tindexResp, err := ioutil.ReadAll(indexReader)\n\t\tc.Assert(err, Equals, nil)\n\t\tlocators := strings.Split(string(indexResp), \"\\n\")\n\t\tgotTestHash := 0\n\t\tgotOther := 0\n\t\tfor _, locator := range locators {\n\t\t\tif locator == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.Check(locator[:len(spec.prefix)], Equals, spec.prefix)\n\t\t\tif locator[:32] == hash {\n\t\t\t\tgotTestHash++\n\t\t\t} else {\n\t\t\t\tgotOther++\n\t\t\t}\n\t\t}\n\t\tc.Check(gotTestHash == 2, Equals, spec.expectTestHash)\n\t\tc.Check(gotOther > 0, Equals, spec.expectOther)\n\t}\n\n\t// GetIndex with invalid prefix\n\t_, err = kc.GetIndex(TestProxyUUID, \"xyz\")\n\tc.Assert((err != nil), Equals, true)\n}\n\nfunc (s *ServerRequiredSuite) TestCollectionSharingToken(c *C) {\n\tsrv, kc, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\thash, _, err := kc.PutB([]byte(\"shareddata\"))\n\tc.Check(err, IsNil)\n\tkc.Arvados.ApiToken = arvadostest.FooFileCollectionSharingToken\n\trdr, _, _, err := kc.Get(hash)\n\tc.Assert(err, IsNil)\n\tdata, err := ioutil.ReadAll(rdr)\n\tc.Check(err, IsNil)\n\tc.Check(data, DeepEquals, []byte(\"shareddata\"))\n}\n\nfunc (s *ServerRequiredSuite) TestPutAskGetInvalidToken(c *C) {\n\tsrv, kc, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\t// Put a test block\n\thash, rep, err := kc.PutB([]byte(\"foo\"))\n\tc.Check(err, IsNil)\n\tc.Check(rep, Equals, 2)\n\n\tfor _, badToken := range []string{\n\t\t\"nosuchtoken\",\n\t\t\"2ym314ysp27sk7h943q6vtc378srb06se3pq6ghurylyf3pdmx\", // expired\n\t} {\n\t\tkc.Arvados.ApiToken = badToken\n\n\t\t// Ask and Get will fail only if the upstream\n\t\t// keepstore server checks for valid signatures.\n\t\t// Without knowing the blob signing key, there is no\n\t\t// way for keepproxy to know whether a given token is\n\t\t// permitted to read a block.  So these tests fail:\n\t\tif false {\n\t\t\t_, _, err = kc.Ask(hash)\n\t\t\tc.Assert(err, FitsTypeOf, &keepclient.ErrNotFound{})\n\t\t\tc.Check(err.(*keepclient.ErrNotFound).Temporary(), Equals, false)\n\t\t\tc.Check(err, ErrorMatches, \".*HTTP 403.*\")\n\n\t\t\t_, _, _, err = kc.Get(hash)\n\t\t\tc.Assert(err, FitsTypeOf, &keepclient.ErrNotFound{})\n\t\t\tc.Check(err.(*keepclient.ErrNotFound).Temporary(), Equals, false)\n\t\t\tc.Check(err, ErrorMatches, \".*HTTP 403 \\\"Missing or invalid Authorization header, or method not allowed\\\".*\")\n\t\t}\n\n\t\t_, _, err = kc.PutB([]byte(\"foo\"))\n\t\tc.Check(err, ErrorMatches, \".*403.*Missing or invalid Authorization header, or method not allowed\")\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestAskGetKeepProxyConnectionError(c *C) {\n\tsrv, kc, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\t// Point keepproxy at a non-existent keepstore\n\tlocals := map[string]string{\n\t\tTestProxyUUID: \"http://localhost:12345\",\n\t}\n\tsrv.proxyHandler.KeepClient.SetServiceRoots(locals, nil, nil)\n\n\t// Ask should result in temporary bad gateway error\n\thash := fmt.Sprintf(\"%x\", md5.Sum([]byte(\"foo\")))\n\t_, _, err := kc.Ask(hash)\n\tc.Check(err, NotNil)\n\terrNotFound, _ := err.(*keepclient.ErrNotFound)\n\tc.Check(errNotFound.Temporary(), Equals, true)\n\tc.Assert(err, ErrorMatches, \".*HTTP 502.*\")\n\n\t// Get should result in temporary bad gateway error\n\t_, _, _, err = kc.Get(hash)\n\tc.Check(err, NotNil)\n\terrNotFound, _ = err.(*keepclient.ErrNotFound)\n\tc.Check(errNotFound.Temporary(), Equals, true)\n\tc.Assert(err, ErrorMatches, \".*HTTP 502.*\")\n}\n\nfunc (s *NoKeepServerSuite) TestAskGetNoKeepServerError(c *C) {\n\tsrv, kc, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\thash := fmt.Sprintf(\"%x\", md5.Sum([]byte(\"foo\")))\n\tfor _, f := range []func() error{\n\t\tfunc() error {\n\t\t\t_, _, err := kc.Ask(hash)\n\t\t\treturn err\n\t\t},\n\t\tfunc() error {\n\t\t\t_, _, _, err := kc.Get(hash)\n\t\t\treturn err\n\t\t},\n\t} {\n\t\terr := f()\n\t\tc.Check(err, NotNil)\n\t\terrNotFound, _ := err.(*keepclient.ErrNotFound)\n\t\tif c.Check(errNotFound, NotNil) {\n\t\t\tc.Check(errNotFound.Temporary(), Equals, true)\n\t\t\tc.Check(err, ErrorMatches, `.*HTTP 502.*`)\n\t\t}\n\t}\n}\n\nfunc (s *ServerRequiredSuite) TestPing(c *C) {\n\tsrv, kc, _ := runProxy(c, false, false, nil)\n\tdefer srv.Close()\n\n\trtr, err := newHandler(context.Background(), kc, 10*time.Second, &arvados.Cluster{ManagementToken: arvadostest.ManagementToken})\n\tc.Assert(err, check.IsNil)\n\n\treq, err := http.NewRequest(\"GET\",\n\t\t\"http://\"+srv.Addr+\"/_health/ping\",\n\t\tnil)\n\tc.Assert(err, IsNil)\n\treq.Header.Set(\"Authorization\", \"Bearer \"+arvadostest.ManagementToken)\n\n\tresp := httptest.NewRecorder()\n\trtr.ServeHTTP(resp, req)\n\tc.Check(resp.Code, Equals, 200)\n\tc.Assert(resp.Body.String(), Matches, `{\"health\":\"OK\"}\\n?`)\n}\n"
  },
  {
    "path": "services/keepproxy/pkg-extras/etc/default/keepproxy",
    "content": "user=\"root\"\ngroup=\"root\"\nchroot=\"/\"\nchdir=\"/\"\nnice=\"\"\nargs=\"-listen=':9100'\"\n\n"
  },
  {
    "path": "services/keepproxy/pkg-extras/etc/init.d/keepproxy",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Init script for keepproxy\n# Maintained by \n# Generated by pleaserun.\n# Implemented based on LSB Core 3.1:\n#   * Sections: 20.2, 20.3\n#\n### BEGIN INIT INFO\n# Provides:          keepproxy\n# Required-Start:    $remote_fs $syslog\n# Required-Stop:     $remote_fs $syslog\n# Default-Start:     2 3 4 5\n# Default-Stop:      0 1 6\n# Short-Description: \n# Description:       no description given\n### END INIT INFO\n\nPATH=/sbin:/usr/sbin:/bin:/usr/bin\nexport PATH\n\nname=keepproxy\nprogram=/usr/bin/keepproxy\nargs=''\npidfile=\"/var/run/$name.pid\"\n\n[ -r /etc/default/$name ] && . /etc/default/$name\n[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name\n\ntrace() {\n  logger -t \"/etc/init.d/keepproxy\" \"$@\"\n}\n\nemit() {\n  trace \"$@\"\n  echo \"$@\"\n}\n\nstart() {\n\n  # Ensure the log directory is setup correctly.\n  [ ! -d \"/var/log/\" ] && mkdir \"/var/log/\"\n  chown \"$user\":\"$group\" \"/var/log/\"\n  chmod 755 \"/var/log/\"\n\n\n  # Setup any environmental stuff beforehand\n  \n\n  # Run the program!\n  \n  chroot --userspec \"$user\":\"$group\" \"$chroot\" sh -c \"\n    \n    cd \\\"$chdir\\\"\n    exec \\\"$program\\\" $args\n  \" >> /var/log/keepproxy.stdout 2>> /var/log/keepproxy.stderr &\n\n  # Generate the pidfile from here. If we instead made the forked process\n  # generate it there will be a race condition between the pidfile writing\n  # and a process possibly asking for status.\n  echo $! > $pidfile\n\n  emit \"$name started\"\n  return 0\n}\n\nstop() {\n  # Try a few times to kill TERM the program\n  if status ; then\n    pid=$(cat \"$pidfile\")\n    trace \"Killing $name (pid $pid) with SIGTERM\"\n    kill -TERM $pid\n    # Wait for it to exit.\n    for i in 1 2 3 4 5 ; do\n      trace \"Waiting $name (pid $pid) to die...\"\n      status || break\n      sleep 1\n    done\n    if status ; then\n      emit \"$name stop failed; still running.\"\n    else\n      emit \"$name stopped.\"\n    fi\n  fi\n}\n\nstatus() {\n  if [ -f \"$pidfile\" ] ; then\n    pid=$(cat \"$pidfile\")\n    if ps -p $pid > /dev/null 2> /dev/null ; then\n      # process by this pid is running.\n      # It may not be our pid, but that's what you get with just pidfiles.\n      # TODO(sissel): Check if this process seems to be the same as the one we\n      # expect. It'd be nice to use flock here, but flock uses fork, not exec,\n      # so it makes it quite awkward to use in this case.\n      return 0\n    else\n      return 2 # program is dead but pid file exists\n    fi\n  else\n    return 3 # program is not running\n  fi\n}\n\nforce_stop() {\n  if status ; then\n    stop\n    status && kill -KILL $(cat \"$pidfile\")\n  fi\n}\n\n\ncase \"$1\" in\n  force-start|start|stop|force-stop|restart)\n    trace \"Attempting '$1' on keepproxy\"\n    ;;\nesac\n\ncase \"$1\" in\n  force-start)\n    PRESTART=no\n    exec \"$0\" start\n    ;;\n  start)\n    status\n    code=$?\n    if [ $code -eq 0 ]; then\n      emit \"$name is already running\"\n      exit $code\n    else\n      start\n      exit $?\n    fi\n    ;;\n  stop) stop ;;\n  force-stop) force_stop ;;\n  status) \n    status\n    code=$?\n    if [ $code -eq 0 ] ; then\n      emit \"$name is running\"\n    else\n      emit \"$name is not running\"\n    fi\n    exit $code\n    ;;\n  restart) \n    \n    stop && start \n    ;;\n  *)\n    echo \"Usage: $SCRIPTNAME {start|force-start|stop|force-start|force-stop|status|restart}\" >&2\n    exit 3\n  ;;\nesac\n\nexit $?\n"
  },
  {
    "path": "services/keepproxy/proxy_client.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage keepproxy\n\nimport (\n\t\"net/http\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/keepclient\"\n)\n\nvar viaAlias = \"keepproxy\"\n\ntype proxyClient struct {\n\tclient keepclient.HTTPClient\n\tproto  string\n}\n\nfunc (pc *proxyClient) Do(req *http.Request) (*http.Response, error) {\n\treq.Header.Add(\"Via\", pc.proto+\" \"+viaAlias)\n\treturn pc.client.Do(req)\n}\n"
  },
  {
    "path": "services/login-sync/.gitignore",
    "content": "*.gem\nGemfile.lock"
  },
  {
    "path": "services/login-sync/Gemfile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nsource 'https://rubygems.org'\ngemspec\ngroup :test, :performance do\n  gem 'minitest', '>= 5'\n  gem 'mocha', '>= 2.1', require: false\n  gem 'rake'\nend\n"
  },
  {
    "path": "services/login-sync/Rakefile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'rake/testtask'\n\nRake::TestTask.new do |t|\n  t.libs << 'test'\nend\n\ndesc 'Run tests'\ntask default: :test\n"
  },
  {
    "path": "services/login-sync/agpl-3.0.txt",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "services/login-sync/arvados-login-sync.gemspec",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nbegin\n  git_root = \"#{__dir__}/../..\"\n  git_timestamp, git_hash = IO.popen(\n    [\"git\", \"-C\", git_root,\n     \"log\", \"-n1\", \"--first-parent\", \"--format=%ct:%H\",\n     \"--\", \"build/version-at-commit.sh\", \"sdk/ruby\", \"services/login-sync\"],\n  ) do |git_log|\n    git_log.readline.chomp.split(\":\")\n  end\nrescue Errno::ENOENT\n  $stderr.puts(\"failed to get version information: 'git' not found\")\n  exit 69  # EX_UNAVAILABLE\nend\n\nif $? != 0\n  $stderr.puts(\"failed to get version information: 'git log' exited #{$?}\")\n  exit 65  # EX_DATAERR\nend\ngit_timestamp = Time.at(git_timestamp.to_i).utc\nversion = ENV[\"ARVADOS_BUILDING_VERSION\"] || IO.popen(\n            [\"#{git_root}/build/version-at-commit.sh\", git_hash],\n          ) do |ver_out|\n  ver_out.readline.chomp.encode(\"utf-8\")\nend\nversion = version.sub(\"~dev\", \".dev\").sub(\"~rc\", \".rc\")\narv_dep_version = if dev_index = (version =~ /\\.dev/)\n                    \"~> #{version[...dev_index]}.a\"\n                  else\n                    \"= #{version}\"\n                  end\n\nGem::Specification.new do |s|\n  s.name        = 'arvados-login-sync'\n  s.version     = version\n  s.date        = git_timestamp.strftime(\"%Y-%m-%d\")\n  s.summary     = \"Set up local login accounts for Arvados users\"\n  s.description = \"Creates and updates local login accounts for Arvados users. Built from git commit #{git_hash}\"\n  s.authors     = [\"Arvados Authors\"]\n  s.email       = 'packaging@arvados.org'\n  s.licenses    = ['AGPL-3.0']\n  s.files       = [\"bin/arvados-login-sync\", \"agpl-3.0.txt\"]\n  s.executables << \"arvados-login-sync\"\n  s.required_ruby_version = '>= 3.0.0'\n  # The minimum version's 'a' suffix is necessary to enable bundler\n  # to consider 'pre-release' versions.  See:\n  # https://github.com/rubygems/bundler/issues/4340\n  s.add_runtime_dependency 'arvados', arv_dep_version\n  # launchy 3.0.0 dropped Ruby 2.\n  # launchy 3.1.0 stopped testing against Ruby 3.0.\n  s.add_runtime_dependency 'launchy', '>= 2.5', '< 3.1'\n  # arvados fork of google-api-client gem with old API and new\n  # compatibility fixes, built from ../../sdk/ruby-google-api-client/\n  s.add_runtime_dependency('arvados-google-api-client', '~> 0.8.7.5')\n  s.homepage    =\n    'https://arvados.org'\nend\n"
  },
  {
    "path": "services/login-sync/bin/arvados-login-sync",
    "content": "#!/usr/bin/env ruby\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'rubygems'\nrequire 'pp'\nrequire 'arvados'\nrequire 'etc'\nrequire 'fileutils'\nrequire 'yaml'\nrequire 'optparse'\nrequire 'open3'\n\ndef ensure_dir(path, mode, owner, group)\n  begin\n    Dir.mkdir(path, mode)\n  rescue Errno::EEXIST\n    # No change needed\n    false\n  else\n    FileUtils.chown(owner, group, path)\n    true\n  end\nend\n\nreq_envs = %w(ARVADOS_API_HOST ARVADOS_API_TOKEN ARVADOS_VIRTUAL_MACHINE_UUID)\nreq_envs.each do |k|\n  unless ENV[k]\n    abort \"Fatal: These environment vars must be set: #{req_envs}\"\n  end\nend\n\noptions = {}\nOptionParser.new do |parser|\n  parser.on('--exclusive', 'Manage SSH keys file exclusively.')\n  parser.on('--rotate-tokens', 'Force a rotation of all user tokens.')\n  parser.on('--skip-missing-users', \"Don't try to create any local accounts.\")\n  parser.on('--token-lifetime SECONDS', 'Create user tokens that expire after SECONDS.', Integer)\n  parser.on('--debug', 'Enable debug output')\nend.parse!(into: options)\n\nexclusive_banner = \"#######################################################################################\n#  THIS FILE IS MANAGED BY #{$0} -- CHANGES WILL BE OVERWRITTEN  #\n#######################################################################################\\n\\n\"\nstart_banner = \"### BEGIN Arvados-managed keys -- changes between markers will be overwritten\\n\"\nend_banner = \"### END Arvados-managed keys -- changes between markers will be overwritten\\n\"\n\nactions = {\n  # These names correspond to the names in the cluster Users configuration.\n  # Managing everything was the original behavior.\n  SyncUserAccounts: true,\n  SyncUserGroups: true,\n  SyncUserSSHKeys: true,\n  SyncUserAPITokens: true,\n}\n\nkeys = ''\n\nbegin\n  debug = false\n  if options[:\"debug\"]\n    debug = true\n  end\n  arv = Arvados.new({ :suppress_ssl_warnings => false })\n  logincluster_host = ENV['ARVADOS_API_HOST']\n  logincluster_name = arv.cluster_config['Login']['LoginCluster'] or ''\n\n  # Requiring the fuse group was previous hardcoded behavior\n  minimum_groups = arv.cluster_config['Users']['SyncRequiredGroups'] || ['fuse']\n  ignored_groups = arv.cluster_config['Users']['SyncIgnoredGroups'] || []\n  (minimum_groups & ignored_groups).each do |group_name|\n    STDERR.puts \"WARNING: #{group_name} is listed in both SyncRequiredGroups and SyncIgnoredGroups. It will be ignored.\"\n  end\n\n  actions.each_pair do |key, default|\n    actions[key] = arv.cluster_config['Users'].fetch(key.to_s, default)\n  end\n\n  if logincluster_name != '' and logincluster_name != arv.cluster_config['ClusterID']\n    logincluster_host = arv.cluster_config['RemoteClusters'][logincluster_name]['Host']\n  end\n  logincluster_arv = Arvados.new({ :api_host => logincluster_host,\n                                   :suppress_ssl_warnings => false })\n\n  vm_uuid = ENV['ARVADOS_VIRTUAL_MACHINE_UUID']\n\n  logins = arv.virtual_machine.logins(:uuid => vm_uuid)[:items]\n  logins = [] if logins.nil?\n  logins = logins.reject { |l| l[:username].nil? or l[:hostname].nil? or l[:virtual_machine_uuid] != vm_uuid }\n\n  # No system users\n  uid_min = 1000\n  open(\"/etc/login.defs\", encoding: \"utf-8\") do |login_defs|\n    login_defs.each_line do |line|\n      next unless match = /^UID_MIN\\s+(\\S+)$/.match(line)\n      if match[1].start_with?(\"0x\")\n        base = 16\n      elsif match[1].start_with?(\"0\")\n        base = 8\n      else\n        base = 10\n      end\n      new_uid_min = match[1].to_i(base)\n      uid_min = new_uid_min if (new_uid_min > 0)\n    end\n  end\n\n  pwnam = Hash.new()\n  logins.reject! do |l|\n    if not pwnam[l[:username]]\n      begin\n        pwnam[l[:username]] = Etc.getpwnam(l[:username])\n      rescue\n        if options[:\"skip-missing-users\"]\n          STDERR.puts \"Account #{l[:username]} not found. Skipping\"\n          true\n        end\n      else\n        if pwnam[l[:username]].uid < uid_min\n          STDERR.puts \"Account #{l[:username]} uid #{pwnam[l[:username]].uid} < uid_min #{uid_min}. Skipping\" if debug\n          true\n        end\n      end\n    end\n  end\n  keys = Hash.new()\n\n  # Collect all keys\n  logins.each do |l|\n    STDERR.puts(\"Considering #{l[:username]} ...\") if debug\n    keys[l[:username]] = Array.new() if not keys.has_key?(l[:username])\n    key = l[:public_key]\n    if !key.nil?\n      # Handle putty-style ssh public keys\n      key.sub!(/^(Comment: \"r[^\\n]*\\n)(.*)$/m,'ssh-rsa \\2 \\1')\n      key.sub!(/^(Comment: \"d[^\\n]*\\n)(.*)$/m,'ssh-dss \\2 \\1')\n      key.gsub!(/\\n/,'')\n      key.strip\n\n      keys[l[:username]].push(key) if not keys[l[:username]].include?(key)\n    end\n  end\n\n  seen = Hash.new()\n\n  all_groups = []\n  current_user_groups = Hash.new { |hash, key| hash[key] = [] }\n  while (ent = Etc.getgrent()) do\n    all_groups << ent.name\n    ent.mem.each do |member|\n      current_user_groups[member] << ent.name\n    end\n  end\n  Etc.endgrent()\n\n  logins.each do |l|\n    next if seen[l[:username]]\n    seen[l[:username]] = true\n\n    username = l[:username]\n\n    unless pwnam[l[:username]]\n      unless actions[:SyncUserAccounts]\n        STDERR.puts \"User #{username} does not exist and SyncUserAccounts=false. Skipping.\"\n        next\n      end\n      STDERR.puts \"Creating account #{l[:username]}\"\n      # Create new user\n      out, st = Open3.capture2e(\"useradd\", \"-m\",\n                \"-c\", username,\n                \"-s\", \"/bin/bash\",\n                username)\n      if st.exitstatus != 0\n        STDERR.puts \"Account creation failed for #{l[:username]}:\\n#{out}\"\n        next\n      end\n      begin\n        pwnam[username] = Etc.getpwnam(username)\n      rescue => e\n        STDERR.puts \"Created account but then getpwnam() failed for #{l[:username]}: #{e}\"\n        raise\n      end\n    end\n\n    user_gid = pwnam[username].gid\n    homedir = pwnam[l[:username]].dir\n    if !File.exist?(homedir)\n      STDERR.puts \"Cannot set up user #{username} because their home directory #{homedir} does not exist. Skipping.\"\n      next\n    end\n\n    if actions[:SyncUserGroups]\n      have_groups = current_user_groups[username] - ignored_groups\n      want_groups = l[:groups] || []\n      want_groups |= minimum_groups\n      want_groups -= ignored_groups\n      want_groups &= all_groups\n\n      (want_groups - have_groups).each do |addgroup|\n        # User should be in group, but isn't, so add them.\n        STDERR.puts \"Add user #{username} to #{addgroup} group\"\n        out, st = Open3.capture2e(\"usermod\", \"-aG\", addgroup, username)\n        if st.exitstatus != 0\n          STDERR.puts \"Failed to add #{username} to #{addgroup} group:\\n#{out}\"\n        end\n      end\n\n      (have_groups - want_groups).each do |removegroup|\n        # User is in a group, but shouldn't be, so remove them.\n        STDERR.puts \"Remove user #{username} from #{removegroup} group\"\n        out, st = Open3.capture2e(\"gpasswd\", \"-d\", username, removegroup)\n        if st.exitstatus != 0\n          STDERR.puts \"Failed to remove user #{username} from #{removegroup} group:\\n#{out}\"\n        end\n      end\n    end\n\n    if actions[:SyncUserSSHKeys]\n      userdotssh = File.join(homedir, \".ssh\")\n      ensure_dir(userdotssh, 0700, username, user_gid)\n\n      newkeys = \"###\\n###\\n\" + keys[l[:username]].join(\"\\n\") + \"\\n###\\n###\\n\"\n\n      keysfile = File.join(userdotssh, \"authorized_keys\")\n      begin\n        oldkeys = File.read(keysfile)\n      rescue Errno::ENOENT\n        oldkeys = \"\"\n      end\n\n      if options[:exclusive]\n        newkeys = exclusive_banner + newkeys\n      elsif oldkeys.start_with?(exclusive_banner)\n        newkeys = start_banner + newkeys + end_banner\n      elsif (m = /^(.*?\\n|)#{start_banner}(.*?\\n|)#{end_banner}(.*)/m.match(oldkeys))\n        newkeys = m[1] + start_banner + newkeys + end_banner + m[3]\n      else\n        newkeys = start_banner + newkeys + end_banner + oldkeys\n      end\n\n      if oldkeys != newkeys then\n        File.open(keysfile, 'w', 0600) do |f|\n          f.write(newkeys)\n        end\n        FileUtils.chown(username, user_gid, keysfile)\n      end\n    end\n\n    if actions[:SyncUserAPITokens]\n      userdotconfig = File.join(homedir, \".config\")\n      ensure_dir(userdotconfig, 0755, username, user_gid)\n      configarvados = File.join(userdotconfig, \"arvados\")\n      ensure_dir(configarvados, 0700, username, user_gid)\n\n      tokenfile = File.join(configarvados, \"settings.conf\")\n\n      begin\n        STDERR.puts \"Processing #{tokenfile} ...\" if debug\n        newToken = false\n        if File.exist?(tokenfile)\n          # check if the token is still valid\n          myToken = ENV[\"ARVADOS_API_TOKEN\"]\n          userEnv = File.read(tokenfile)\n          if (m = /^ARVADOS_API_TOKEN=(.*?\\n)/m.match(userEnv))\n            begin\n              tmp_arv = Arvados.new({ :api_host => logincluster_host,\n                                      :api_token => (m[1]),\n                                      :suppress_ssl_warnings => false })\n              tmp_arv.user.current\n            rescue Arvados::TransactionFailedError => e\n              if e.to_s =~ /401 Unauthorized/\n                STDERR.puts \"Account #{l[:username]} token not valid, creating new token.\"\n                newToken = true\n              else\n                raise\n              end\n            end\n          end\n        elsif !File.exist?(tokenfile) || options[:\"rotate-tokens\"]\n          STDERR.puts \"Account #{l[:username]} token file not found, creating new token.\"\n          newToken = true\n        end\n        if newToken\n          aca_params = {owner_uuid: l[:user_uuid]}\n          if options[:\"token-lifetime\"] && options[:\"token-lifetime\"] > 0\n            aca_params.merge!(expires_at: (Time.now + options[:\"token-lifetime\"]))\n          end\n          user_token = logincluster_arv.api_client_authorization.create(api_client_authorization: aca_params)\n          File.open(tokenfile, 'w', 0600) do |f|\n            f.write(\"ARVADOS_API_HOST=#{ENV['ARVADOS_API_HOST']}\\n\")\n            f.write(\"ARVADOS_API_TOKEN=v2/#{user_token[:uuid]}/#{user_token[:api_token]}\\n\")\n          end\n          FileUtils.chown(username, user_gid, tokenfile)\n        end\n      rescue => e\n        STDERR.puts \"Error setting token for #{l[:username]}: #{e}\"\n      end\n    end\n  end\n\nrescue Exception => bang\n  puts \"Error: \" + bang.to_s\n  puts bang.backtrace.join(\"\\n\")\n  exit 1\nend\n"
  },
  {
    "path": "services/login-sync/test/binstub_new_user/useradd",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nstub=\"${0##*/}\"\n\n# Record what actually happened in the \"spy\" file\necho \"$stub $*\" >> \"$ARVADOS_LOGIN_SYNC_TMPDIR/spy\"\n\n# Exit 0 if this command was listed in the \"succeed\" file\nexec fgrep -qx -- \"$stub $*\" \"$ARVADOS_LOGIN_SYNC_TMPDIR/succeed\"\n"
  },
  {
    "path": "services/login-sync/test/stubs.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'etc'\nrequire 'mocha/minitest'\nrequire 'ostruct'\n\nmodule Stubs\n  def stubpasswd\n    [{name: 'root', uid: 0}]\n  end\n\n  def stubgroup\n    [{name: 'root', gid: 0}]\n  end\n\n\n  def setup\n    super\n\n    # These Etc mocks help only when we run arvados-login-sync in-process.\n    ENV['ARVADOS_VIRTUAL_MACHINE_UUID'] = 'testvm2.shell'\n    Etc.stubs(:to_enum).with(:passwd).returns stubpasswd.map { |x| OpenStruct.new x }\n    Etc.stubs(:to_enum).with(:group).returns stubgroup.map { |x| OpenStruct.new x }\n\n    # These child-ENV tricks help only when we run arvados-login-sync as a subprocess.\n    @env_was = Hash[ENV]\n    @tmpdir = Dir.mktmpdir\n  end\n\n  def teardown\n    FileUtils.remove_dir(@tmpdir)\n    ENV.select! { |k| @env_was.has_key? k }\n    @env_was.each do |k,v| ENV[k]=v end\n    super\n  end\n\n  def stubenv opts={}\n    # Use UUID of testvm2.shell fixture, unless otherwise specified by test case.\n    Hash[ENV].merge('ARVADOS_VIRTUAL_MACHINE_UUID' => 'zzzzz-2x53u-382brsig8rp3065',\n                    'ARVADOS_LOGIN_SYNC_TMPDIR' => @tmpdir)\n  end\n\n  def invoke_sync opts={}\n    env = stubenv.merge(opts[:env] || {})\n    (opts[:binstubs] || []).each do |binstub|\n      env['PATH'] = File.absolute_path('../binstub_'+binstub, __FILE__) + ':' + env['PATH']\n    end\n    login_sync_path = File.absolute_path '../../bin/arvados-login-sync', __FILE__\n    system env, login_sync_path\n  end\nend\n"
  },
  {
    "path": "services/login-sync/test/test_add_user.rb",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nrequire 'minitest/autorun'\n\nrequire 'stubs'\n\nclass TestAddUser < Minitest::Test\n  include Stubs\n\n  def test_useradd_error\n    valid_groups = %w(docker admin fuse).select { |g| Etc.getgrnam(g) rescue false }\n    # binstub_new_user/useradd will exit non-zero because its args\n    # won't match any line in this empty file:\n    File.open(@tmpdir+'/succeed', 'w') do |f| end\n    invoke_sync binstubs: ['new_user']\n    spied = File.read(@tmpdir+'/spy')\n    assert_match %r{useradd -m -c active -s /bin/bash active}, spied\n    assert_match %r{useradd -m -c adminroot -s /bin/bash adminroot}, spied\n  end\n\n  def test_useradd_success\n    # binstub_new_user/useradd will succeed.\n    File.open(@tmpdir+'/succeed', 'w') do |f|\n      f.puts 'useradd -m -c active -s /bin/bash -G active'\n      f.puts 'useradd -m -c adminroot -s /bin/bash adminroot'\n    end\n    $stderr.puts \"*** Expect crash after getpwnam() fails:\"\n    invoke_sync binstubs: ['new_user']\n    assert !$?.success?\n    spied = File.read(@tmpdir+'/spy')\n    # Expect a crash after adding one user, because Dir.mkdir({home}) fails.\n    assert_match %r{^useradd -m -c [^\\n]+\\n$}s, spied\n  end\nend\n"
  },
  {
    "path": "services/workbench2/.gitignore",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n# See https://help.github.com/ignore-files/ for more about ignoring files.\n\n# dependencies\n/node_modules\n\n# vscode\n/.vs\n\n# testing\n/coverage\n/cypress/videos\n/cypress/screenshots\n/cypress/downloads\n\n# production\n/build\n\n# misc\n.DS_Store\n.env.local\n.env.development.local\n.env.test.local\n.env.production.local\n.npm.local\n\nnpm-debug.log*\nyarn-debug.log*\nyarn-error.log*\n\n.idea\n.vscode\n.eslintcache\n/public/config.json\n/public/_health/\n\n# see https://yarnpkg.com/getting-started/qa#which-files-should-be-gitignored\n.pnp.*\n.yarn/*\n!.yarn/patches\n!.yarn/plugins\n!.yarn/releases\n!.yarn/sdks\n!.yarn/versions\n"
  },
  {
    "path": "services/workbench2/.npmrc",
    "content": "save-exact=true\n"
  },
  {
    "path": "services/workbench2/.yarn/releases/yarn-3.2.0.cjs",
    "content": "#!/usr/bin/env node\n/* eslint-disable */\n//prettier-ignore\n(()=>{var afe=Object.create,Oh=Object.defineProperty,Afe=Object.defineProperties,lfe=Object.getOwnPropertyDescriptor,cfe=Object.getOwnPropertyDescriptors,ufe=Object.getOwnPropertyNames,OE=Object.getOwnPropertySymbols,gfe=Object.getPrototypeOf,lQ=Object.prototype.hasOwnProperty,iM=Object.prototype.propertyIsEnumerable;var nM=(t,e,r)=>e in t?Oh(t,e,{enumerable:!0,configurable:!0,writable:!0,value:r}):t[e]=r,N=(t,e)=>{for(var r in e||(e={}))lQ.call(e,r)&&nM(t,r,e[r]);if(OE)for(var r of OE(e))iM.call(e,r)&&nM(t,r,e[r]);return t},te=(t,e)=>Afe(t,cfe(e)),ffe=t=>Oh(t,\"__esModule\",{value:!0});var Tr=(t,e)=>{var r={};for(var i in t)lQ.call(t,i)&&e.indexOf(i)<0&&(r[i]=t[i]);if(t!=null&&OE)for(var i of OE(t))e.indexOf(i)<0&&iM.call(t,i)&&(r[i]=t[i]);return r},hfe=(t,e)=>()=>(t&&(e=t(t=0)),e),w=(t,e)=>()=>(e||t((e={exports:{}}).exports,e),e.exports),ft=(t,e)=>{for(var r in e)Oh(t,r,{get:e[r],enumerable:!0})},pfe=(t,e,r)=>{if(e&&typeof e==\"object\"||typeof e==\"function\")for(let i of ufe(e))!lQ.call(t,i)&&i!==\"default\"&&Oh(t,i,{get:()=>e[i],enumerable:!(r=lfe(e,i))||r.enumerable});return t},ge=t=>pfe(ffe(Oh(t!=null?afe(gfe(t)):{},\"default\",t&&t.__esModule&&\"default\"in t?{get:()=>t.default,enumerable:!0}:{value:t,enumerable:!0})),t);var PM=w(($Xe,vM)=>{vM.exports=SM;SM.sync=Rfe;var kM=require(\"fs\");function Ffe(t,e){var r=e.pathExt!==void 0?e.pathExt:process.env.PATHEXT;if(!r||(r=r.split(\";\"),r.indexOf(\"\")!==-1))return!0;for(var i=0;i<r.length;i++){var n=r[i].toLowerCase();if(n&&t.substr(-n.length).toLowerCase()===n)return!0}return!1}function xM(t,e,r){return!t.isSymbolicLink()&&!t.isFile()?!1:Ffe(e,r)}function SM(t,e,r){kM.stat(t,function(i,n){r(i,i?!1:xM(n,t,e))})}function Rfe(t,e){return xM(kM.statSync(t),t,e)}});var LM=w((eZe,DM)=>{DM.exports=RM;RM.sync=Nfe;var FM=require(\"fs\");function RM(t,e,r){FM.stat(t,function(i,n){r(i,i?!1:NM(n,e))})}function Nfe(t,e){return NM(FM.statSync(t),e)}function NM(t,e){return t.isFile()&&Lfe(t,e)}function Lfe(t,e){var r=t.mode,i=t.uid,n=t.gid,s=e.uid!==void 0?e.uid:process.getuid&&process.getuid(),o=e.gid!==void 0?e.gid:process.getgid&&process.getgid(),a=parseInt(\"100\",8),l=parseInt(\"010\",8),c=parseInt(\"001\",8),u=a|l,g=r&c||r&l&&n===o||r&a&&i===s||r&u&&s===0;return g}});var OM=w((rZe,TM)=>{var tZe=require(\"fs\"),XE;process.platform===\"win32\"||global.TESTING_WINDOWS?XE=PM():XE=LM();TM.exports=vQ;vQ.sync=Tfe;function vQ(t,e,r){if(typeof e==\"function\"&&(r=e,e={}),!r){if(typeof Promise!=\"function\")throw new TypeError(\"callback not provided\");return new Promise(function(i,n){vQ(t,e||{},function(s,o){s?n(s):i(o)})})}XE(t,e||{},function(i,n){i&&(i.code===\"EACCES\"||e&&e.ignoreErrors)&&(i=null,n=!1),r(i,n)})}function Tfe(t,e){try{return XE.sync(t,e||{})}catch(r){if(e&&e.ignoreErrors||r.code===\"EACCES\")return!1;throw r}}});var YM=w((iZe,MM)=>{var Ju=process.platform===\"win32\"||process.env.OSTYPE===\"cygwin\"||process.env.OSTYPE===\"msys\",UM=require(\"path\"),Ofe=Ju?\";\":\":\",KM=OM(),HM=t=>Object.assign(new Error(`not found: ${t}`),{code:\"ENOENT\"}),jM=(t,e)=>{let r=e.colon||Ofe,i=t.match(/\\//)||Ju&&t.match(/\\\\/)?[\"\"]:[...Ju?[process.cwd()]:[],...(e.path||process.env.PATH||\"\").split(r)],n=Ju?e.pathExt||process.env.PATHEXT||\".EXE;.CMD;.BAT;.COM\":\"\",s=Ju?n.split(r):[\"\"];return Ju&&t.indexOf(\".\")!==-1&&s[0]!==\"\"&&s.unshift(\"\"),{pathEnv:i,pathExt:s,pathExtExe:n}},GM=(t,e,r)=>{typeof e==\"function\"&&(r=e,e={}),e||(e={});let{pathEnv:i,pathExt:n,pathExtExe:s}=jM(t,e),o=[],a=c=>new Promise((u,g)=>{if(c===i.length)return e.all&&o.length?u(o):g(HM(t));let f=i[c],h=/^\".*\"$/.test(f)?f.slice(1,-1):f,p=UM.join(h,t),m=!h&&/^\\.[\\\\\\/]/.test(t)?t.slice(0,2)+p:p;u(l(m,c,0))}),l=(c,u,g)=>new Promise((f,h)=>{if(g===n.length)return f(a(u+1));let p=n[g];KM(c+p,{pathExt:s},(m,y)=>{if(!m&&y)if(e.all)o.push(c+p);else return f(c+p);return f(l(c,u,g+1))})});return r?a(0).then(c=>r(null,c),r):a(0)},Mfe=(t,e)=>{e=e||{};let{pathEnv:r,pathExt:i,pathExtExe:n}=jM(t,e),s=[];for(let o=0;o<r.length;o++){let a=r[o],l=/^\".*\"$/.test(a)?a.slice(1,-1):a,c=UM.join(l,t),u=!l&&/^\\.[\\\\\\/]/.test(t)?t.slice(0,2)+c:c;for(let g=0;g<i.length;g++){let f=u+i[g];try{if(KM.sync(f,{pathExt:n}))if(e.all)s.push(f);else return f}catch(h){}}}if(e.all&&s.length)return s;if(e.nothrow)return null;throw HM(t)};MM.exports=GM;GM.sync=Mfe});var JM=w((nZe,SQ)=>{\"use strict\";var qM=(t={})=>{let e=t.env||process.env;return(t.platform||process.platform)!==\"win32\"?\"PATH\":Object.keys(e).reverse().find(i=>i.toUpperCase()===\"PATH\")||\"Path\"};SQ.exports=qM;SQ.exports.default=qM});var VM=w((sZe,WM)=>{\"use strict\";var zM=require(\"path\"),Ufe=YM(),Kfe=JM();function _M(t,e){let r=t.options.env||process.env,i=process.cwd(),n=t.options.cwd!=null,s=n&&process.chdir!==void 0&&!process.chdir.disabled;if(s)try{process.chdir(t.options.cwd)}catch(a){}let o;try{o=Ufe.sync(t.command,{path:r[Kfe({env:r})],pathExt:e?zM.delimiter:void 0})}catch(a){}finally{s&&process.chdir(i)}return o&&(o=zM.resolve(n?t.options.cwd:\"\",o)),o}function Hfe(t){return _M(t)||_M(t,!0)}WM.exports=Hfe});var XM=w((oZe,kQ)=>{\"use strict\";var xQ=/([()\\][%!^\"`<>&|;, *?])/g;function jfe(t){return t=t.replace(xQ,\"^$1\"),t}function Gfe(t,e){return t=`${t}`,t=t.replace(/(\\\\*)\"/g,'$1$1\\\\\"'),t=t.replace(/(\\\\*)$/,\"$1$1\"),t=`\"${t}\"`,t=t.replace(xQ,\"^$1\"),e&&(t=t.replace(xQ,\"^$1\")),t}kQ.exports.command=jfe;kQ.exports.argument=Gfe});var $M=w((aZe,ZM)=>{\"use strict\";ZM.exports=/^#!(.*)/});var t1=w((AZe,e1)=>{\"use strict\";var Yfe=$M();e1.exports=(t=\"\")=>{let e=t.match(Yfe);if(!e)return null;let[r,i]=e[0].replace(/#! ?/,\"\").split(\" \"),n=r.split(\"/\").pop();return n===\"env\"?i:i?`${n} ${i}`:n}});var i1=w((lZe,r1)=>{\"use strict\";var PQ=require(\"fs\"),qfe=t1();function Jfe(t){let e=150,r=Buffer.alloc(e),i;try{i=PQ.openSync(t,\"r\"),PQ.readSync(i,r,0,e,0),PQ.closeSync(i)}catch(n){}return qfe(r.toString())}r1.exports=Jfe});var a1=w((cZe,n1)=>{\"use strict\";var Wfe=require(\"path\"),s1=VM(),o1=XM(),zfe=i1(),_fe=process.platform===\"win32\",Vfe=/\\.(?:com|exe)$/i,Xfe=/node_modules[\\\\/].bin[\\\\/][^\\\\/]+\\.cmd$/i;function Zfe(t){t.file=s1(t);let e=t.file&&zfe(t.file);return e?(t.args.unshift(t.file),t.command=e,s1(t)):t.file}function $fe(t){if(!_fe)return t;let e=Zfe(t),r=!Vfe.test(e);if(t.options.forceShell||r){let i=Xfe.test(e);t.command=Wfe.normalize(t.command),t.command=o1.command(t.command),t.args=t.args.map(s=>o1.argument(s,i));let n=[t.command].concat(t.args).join(\" \");t.args=[\"/d\",\"/s\",\"/c\",`\"${n}\"`],t.command=process.env.comspec||\"cmd.exe\",t.options.windowsVerbatimArguments=!0}return t}function ehe(t,e,r){e&&!Array.isArray(e)&&(r=e,e=null),e=e?e.slice(0):[],r=Object.assign({},r);let i={command:t,args:e,options:r,file:void 0,original:{command:t,args:e}};return r.shell?i:$fe(i)}n1.exports=ehe});var c1=w((uZe,A1)=>{\"use strict\";var DQ=process.platform===\"win32\";function RQ(t,e){return Object.assign(new Error(`${e} ${t.command} ENOENT`),{code:\"ENOENT\",errno:\"ENOENT\",syscall:`${e} ${t.command}`,path:t.command,spawnargs:t.args})}function the(t,e){if(!DQ)return;let r=t.emit;t.emit=function(i,n){if(i===\"exit\"){let s=l1(n,e,\"spawn\");if(s)return r.call(t,\"error\",s)}return r.apply(t,arguments)}}function l1(t,e){return DQ&&t===1&&!e.file?RQ(e.original,\"spawn\"):null}function rhe(t,e){return DQ&&t===1&&!e.file?RQ(e.original,\"spawnSync\"):null}A1.exports={hookChildProcess:the,verifyENOENT:l1,verifyENOENTSync:rhe,notFoundError:RQ}});var LQ=w((gZe,Wu)=>{\"use strict\";var u1=require(\"child_process\"),FQ=a1(),NQ=c1();function g1(t,e,r){let i=FQ(t,e,r),n=u1.spawn(i.command,i.args,i.options);return NQ.hookChildProcess(n,i),n}function ihe(t,e,r){let i=FQ(t,e,r),n=u1.spawnSync(i.command,i.args,i.options);return n.error=n.error||NQ.verifyENOENTSync(n.status,i),n}Wu.exports=g1;Wu.exports.spawn=g1;Wu.exports.sync=ihe;Wu.exports._parse=FQ;Wu.exports._enoent=NQ});var h1=w((fZe,f1)=>{\"use strict\";function nhe(t,e){function r(){this.constructor=t}r.prototype=e.prototype,t.prototype=new r}function nc(t,e,r,i){this.message=t,this.expected=e,this.found=r,this.location=i,this.name=\"SyntaxError\",typeof Error.captureStackTrace==\"function\"&&Error.captureStackTrace(this,nc)}nhe(nc,Error);nc.buildMessage=function(t,e){var r={literal:function(c){return'\"'+n(c.text)+'\"'},class:function(c){var u=\"\",g;for(g=0;g<c.parts.length;g++)u+=c.parts[g]instanceof Array?s(c.parts[g][0])+\"-\"+s(c.parts[g][1]):s(c.parts[g]);return\"[\"+(c.inverted?\"^\":\"\")+u+\"]\"},any:function(c){return\"any character\"},end:function(c){return\"end of input\"},other:function(c){return c.description}};function i(c){return c.charCodeAt(0).toString(16).toUpperCase()}function n(c){return c.replace(/\\\\/g,\"\\\\\\\\\").replace(/\"/g,'\\\\\"').replace(/\\0/g,\"\\\\0\").replace(/\\t/g,\"\\\\t\").replace(/\\n/g,\"\\\\n\").replace(/\\r/g,\"\\\\r\").replace(/[\\x00-\\x0F]/g,function(u){return\"\\\\x0\"+i(u)}).replace(/[\\x10-\\x1F\\x7F-\\x9F]/g,function(u){return\"\\\\x\"+i(u)})}function s(c){return c.replace(/\\\\/g,\"\\\\\\\\\").replace(/\\]/g,\"\\\\]\").replace(/\\^/g,\"\\\\^\").replace(/-/g,\"\\\\-\").replace(/\\0/g,\"\\\\0\").replace(/\\t/g,\"\\\\t\").replace(/\\n/g,\"\\\\n\").replace(/\\r/g,\"\\\\r\").replace(/[\\x00-\\x0F]/g,function(u){return\"\\\\x0\"+i(u)}).replace(/[\\x10-\\x1F\\x7F-\\x9F]/g,function(u){return\"\\\\x\"+i(u)})}function o(c){return r[c.type](c)}function a(c){var u=new Array(c.length),g,f;for(g=0;g<c.length;g++)u[g]=o(c[g]);if(u.sort(),u.length>0){for(g=1,f=1;g<u.length;g++)u[g-1]!==u[g]&&(u[f]=u[g],f++);u.length=f}switch(u.length){case 1:return u[0];case 2:return u[0]+\" or \"+u[1];default:return u.slice(0,-1).join(\", \")+\", or \"+u[u.length-1]}}function l(c){return c?'\"'+n(c)+'\"':\"end of input\"}return\"Expected \"+a(t)+\" but \"+l(e)+\" found.\"};function she(t,e){e=e!==void 0?e:{};var r={},i={Start:OA},n=OA,s=function(C){return C||[]},o=function(C,b,F){return[{command:C,type:b}].concat(F||[])},a=function(C,b){return[{command:C,type:b||\";\"}]},l=function(C){return C},c=\";\",u=Ce(\";\",!1),g=\"&\",f=Ce(\"&\",!1),h=function(C,b){return b?{chain:C,then:b}:{chain:C}},p=function(C,b){return{type:C,line:b}},m=\"&&\",y=Ce(\"&&\",!1),Q=\"||\",S=Ce(\"||\",!1),x=function(C,b){return b?te(N({},C),{then:b}):C},M=function(C,b){return{type:C,chain:b}},Y=\"|&\",U=Ce(\"|&\",!1),J=\"|\",W=Ce(\"|\",!1),ee=\"=\",Z=Ce(\"=\",!1),A=function(C,b){return{name:C,args:[b]}},ne=function(C){return{name:C,args:[]}},le=\"(\",Ae=Ce(\"(\",!1),T=\")\",L=Ce(\")\",!1),Ee=function(C,b){return{type:\"subshell\",subshell:C,args:b}},we=\"{\",qe=Ce(\"{\",!1),re=\"}\",se=Ce(\"}\",!1),Qe=function(C,b){return{type:\"group\",group:C,args:b}},he=function(C,b){return{type:\"command\",args:b,envs:C}},Fe=function(C){return{type:\"envs\",envs:C}},Ue=function(C){return C},xe=function(C){return C},ve=/^[0-9]/,pe=_e([[\"0\",\"9\"]],!1,!1),X=function(C,b,F){return{type:\"redirection\",subtype:b,fd:C!==null?parseInt(C):null,args:[F]}},be=\">>\",ce=Ce(\">>\",!1),fe=\">&\",gt=Ce(\">&\",!1),Ht=\">\",Mt=Ce(\">\",!1),mi=\"<<<\",jt=Ce(\"<<<\",!1),Qr=\"<&\",Ti=Ce(\"<&\",!1),_s=\"<\",Un=Ce(\"<\",!1),Kn=function(C){return{type:\"argument\",segments:[].concat(...C)}},vr=function(C){return C},Hn=\"$'\",us=Ce(\"$'\",!1),Ia=\"'\",SA=Ce(\"'\",!1),Du=function(C){return[{type:\"text\",text:C}]},gs='\"\"',kA=Ce('\"\"',!1),ya=function(){return{type:\"text\",text:\"\"}},Ru='\"',xA=Ce('\"',!1),PA=function(C){return C},Sr=function(C){return{type:\"arithmetic\",arithmetic:C,quoted:!0}},jl=function(C){return{type:\"shell\",shell:C,quoted:!0}},Fu=function(C){return te(N({type:\"variable\"},C),{quoted:!0})},So=function(C){return{type:\"text\",text:C}},Nu=function(C){return{type:\"arithmetic\",arithmetic:C,quoted:!1}},Qh=function(C){return{type:\"shell\",shell:C,quoted:!1}},vh=function(C){return te(N({type:\"variable\"},C),{quoted:!1})},oe=function(C){return{type:\"glob\",pattern:C}},Oi=/^[^']/,ko=_e([\"'\"],!0,!1),jn=function(C){return C.join(\"\")},Lu=/^[^$\"]/,vt=_e([\"$\",'\"'],!0,!1),Gl=`\\\\\n`,Gn=Ce(`\\\\\n`,!1),fs=function(){return\"\"},hs=\"\\\\\",pt=Ce(\"\\\\\",!1),xo=/^[\\\\$\"`]/,lt=_e([\"\\\\\",\"$\",'\"',\"`\"],!1,!1),mn=function(C){return C},v=\"\\\\a\",Tt=Ce(\"\\\\a\",!1),Tu=function(){return\"a\"},Yl=\"\\\\b\",Sh=Ce(\"\\\\b\",!1),kh=function(){return\"\\b\"},xh=/^[Ee]/,Ph=_e([\"E\",\"e\"],!1,!1),Dh=function(){return\"\u001b\"},G=\"\\\\f\",yt=Ce(\"\\\\f\",!1),DA=function(){return\"\\f\"},$i=\"\\\\n\",ql=Ce(\"\\\\n\",!1),$e=function(){return`\n`},wa=\"\\\\r\",Ou=Ce(\"\\\\r\",!1),SE=function(){return\"\\r\"},Rh=\"\\\\t\",kE=Ce(\"\\\\t\",!1),gr=function(){return\"\t\"},Yn=\"\\\\v\",Jl=Ce(\"\\\\v\",!1),Fh=function(){return\"\\v\"},Vs=/^[\\\\'\"?]/,Ba=_e([\"\\\\\",\"'\",'\"',\"?\"],!1,!1),En=function(C){return String.fromCharCode(parseInt(C,16))},Oe=\"\\\\x\",Mu=Ce(\"\\\\x\",!1),Wl=\"\\\\u\",Xs=Ce(\"\\\\u\",!1),zl=\"\\\\U\",RA=Ce(\"\\\\U\",!1),Uu=function(C){return String.fromCodePoint(parseInt(C,16))},Ku=/^[0-7]/,ba=_e([[\"0\",\"7\"]],!1,!1),Qa=/^[0-9a-fA-f]/,it=_e([[\"0\",\"9\"],[\"a\",\"f\"],[\"A\",\"f\"]],!1,!1),Po=ot(),FA=\"-\",_l=Ce(\"-\",!1),Zs=\"+\",Vl=Ce(\"+\",!1),xE=\".\",Nh=Ce(\".\",!1),Hu=function(C,b,F){return{type:\"number\",value:(C===\"-\"?-1:1)*parseFloat(b.join(\"\")+\".\"+F.join(\"\"))}},Lh=function(C,b){return{type:\"number\",value:(C===\"-\"?-1:1)*parseInt(b.join(\"\"))}},PE=function(C){return N({type:\"variable\"},C)},Xl=function(C){return{type:\"variable\",name:C}},DE=function(C){return C},ju=\"*\",NA=Ce(\"*\",!1),Lr=\"/\",RE=Ce(\"/\",!1),$s=function(C,b,F){return{type:b===\"*\"?\"multiplication\":\"division\",right:F}},eo=function(C,b){return b.reduce((F,H)=>N({left:F},H),C)},Gu=function(C,b,F){return{type:b===\"+\"?\"addition\":\"subtraction\",right:F}},LA=\"$((\",R=Ce(\"$((\",!1),q=\"))\",de=Ce(\"))\",!1),He=function(C){return C},Te=\"$(\",Xe=Ce(\"$(\",!1),Et=function(C){return C},Rt=\"${\",qn=Ce(\"${\",!1),Jb=\":-\",xO=Ce(\":-\",!1),PO=function(C,b){return{name:C,defaultValue:b}},Wb=\":-}\",DO=Ce(\":-}\",!1),RO=function(C){return{name:C,defaultValue:[]}},zb=\":+\",FO=Ce(\":+\",!1),NO=function(C,b){return{name:C,alternativeValue:b}},_b=\":+}\",LO=Ce(\":+}\",!1),TO=function(C){return{name:C,alternativeValue:[]}},Vb=function(C){return{name:C}},OO=\"$\",MO=Ce(\"$\",!1),UO=function(C){return e.isGlobPattern(C)},KO=function(C){return C},Xb=/^[a-zA-Z0-9_]/,Zb=_e([[\"a\",\"z\"],[\"A\",\"Z\"],[\"0\",\"9\"],\"_\"],!1,!1),$b=function(){return O()},eQ=/^[$@*?#a-zA-Z0-9_\\-]/,tQ=_e([\"$\",\"@\",\"*\",\"?\",\"#\",[\"a\",\"z\"],[\"A\",\"Z\"],[\"0\",\"9\"],\"_\",\"-\"],!1,!1),HO=/^[(){}<>$|&; \\t\"']/,Yu=_e([\"(\",\")\",\"{\",\"}\",\"<\",\">\",\"$\",\"|\",\"&\",\";\",\" \",\"\t\",'\"',\"'\"],!1,!1),rQ=/^[<>&; \\t\"']/,iQ=_e([\"<\",\">\",\"&\",\";\",\" \",\"\t\",'\"',\"'\"],!1,!1),FE=/^[ \\t]/,NE=_e([\" \",\"\t\"],!1,!1),B=0,Ke=0,TA=[{line:1,column:1}],d=0,E=[],I=0,D;if(\"startRule\"in e){if(!(e.startRule in i))throw new Error(`Can't start parsing from rule \"`+e.startRule+'\".');n=i[e.startRule]}function O(){return t.substring(Ke,B)}function V(){return It(Ke,B)}function ie(C,b){throw b=b!==void 0?b:It(Ke,B),Mi([ut(C)],t.substring(Ke,B),b)}function Be(C,b){throw b=b!==void 0?b:It(Ke,B),Jn(C,b)}function Ce(C,b){return{type:\"literal\",text:C,ignoreCase:b}}function _e(C,b,F){return{type:\"class\",parts:C,inverted:b,ignoreCase:F}}function ot(){return{type:\"any\"}}function wt(){return{type:\"end\"}}function ut(C){return{type:\"other\",description:C}}function nt(C){var b=TA[C],F;if(b)return b;for(F=C-1;!TA[F];)F--;for(b=TA[F],b={line:b.line,column:b.column};F<C;)t.charCodeAt(F)===10?(b.line++,b.column=1):b.column++,F++;return TA[C]=b,b}function It(C,b){var F=nt(C),H=nt(b);return{start:{offset:C,line:F.line,column:F.column},end:{offset:b,line:H.line,column:H.column}}}function ke(C){B<d||(B>d&&(d=B,E=[]),E.push(C))}function Jn(C,b){return new nc(C,null,null,b)}function Mi(C,b,F){return new nc(nc.buildMessage(C,b),C,b,F)}function OA(){var C,b;return C=B,b=Gr(),b===r&&(b=null),b!==r&&(Ke=C,b=s(b)),C=b,C}function Gr(){var C,b,F,H,ue;if(C=B,b=Yr(),b!==r){for(F=[],H=je();H!==r;)F.push(H),H=je();F!==r?(H=va(),H!==r?(ue=ps(),ue===r&&(ue=null),ue!==r?(Ke=C,b=o(b,H,ue),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)}else B=C,C=r;if(C===r)if(C=B,b=Yr(),b!==r){for(F=[],H=je();H!==r;)F.push(H),H=je();F!==r?(H=va(),H===r&&(H=null),H!==r?(Ke=C,b=a(b,H),C=b):(B=C,C=r)):(B=C,C=r)}else B=C,C=r;return C}function ps(){var C,b,F,H,ue;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r)if(F=Gr(),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();H!==r?(Ke=C,b=l(F),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r;return C}function va(){var C;return t.charCodeAt(B)===59?(C=c,B++):(C=r,I===0&&ke(u)),C===r&&(t.charCodeAt(B)===38?(C=g,B++):(C=r,I===0&&ke(f))),C}function Yr(){var C,b,F;return C=B,b=jO(),b!==r?(F=Yge(),F===r&&(F=null),F!==r?(Ke=C,b=h(b,F),C=b):(B=C,C=r)):(B=C,C=r),C}function Yge(){var C,b,F,H,ue,De,Ct;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r)if(F=qge(),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();if(H!==r)if(ue=Yr(),ue!==r){for(De=[],Ct=je();Ct!==r;)De.push(Ct),Ct=je();De!==r?(Ke=C,b=p(F,ue),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r;return C}function qge(){var C;return t.substr(B,2)===m?(C=m,B+=2):(C=r,I===0&&ke(y)),C===r&&(t.substr(B,2)===Q?(C=Q,B+=2):(C=r,I===0&&ke(S))),C}function jO(){var C,b,F;return C=B,b=zge(),b!==r?(F=Jge(),F===r&&(F=null),F!==r?(Ke=C,b=x(b,F),C=b):(B=C,C=r)):(B=C,C=r),C}function Jge(){var C,b,F,H,ue,De,Ct;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r)if(F=Wge(),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();if(H!==r)if(ue=jO(),ue!==r){for(De=[],Ct=je();Ct!==r;)De.push(Ct),Ct=je();De!==r?(Ke=C,b=M(F,ue),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r;return C}function Wge(){var C;return t.substr(B,2)===Y?(C=Y,B+=2):(C=r,I===0&&ke(U)),C===r&&(t.charCodeAt(B)===124?(C=J,B++):(C=r,I===0&&ke(W))),C}function LE(){var C,b,F,H,ue,De;if(C=B,b=eM(),b!==r)if(t.charCodeAt(B)===61?(F=ee,B++):(F=r,I===0&&ke(Z)),F!==r)if(H=qO(),H!==r){for(ue=[],De=je();De!==r;)ue.push(De),De=je();ue!==r?(Ke=C,b=A(b,H),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r;else B=C,C=r;if(C===r)if(C=B,b=eM(),b!==r)if(t.charCodeAt(B)===61?(F=ee,B++):(F=r,I===0&&ke(Z)),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();H!==r?(Ke=C,b=ne(b),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r;return C}function zge(){var C,b,F,H,ue,De,Ct,bt,Zr,Ei,ds;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r)if(t.charCodeAt(B)===40?(F=le,B++):(F=r,I===0&&ke(Ae)),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();if(H!==r)if(ue=Gr(),ue!==r){for(De=[],Ct=je();Ct!==r;)De.push(Ct),Ct=je();if(De!==r)if(t.charCodeAt(B)===41?(Ct=T,B++):(Ct=r,I===0&&ke(L)),Ct!==r){for(bt=[],Zr=je();Zr!==r;)bt.push(Zr),Zr=je();if(bt!==r){for(Zr=[],Ei=Th();Ei!==r;)Zr.push(Ei),Ei=Th();if(Zr!==r){for(Ei=[],ds=je();ds!==r;)Ei.push(ds),ds=je();Ei!==r?(Ke=C,b=Ee(ue,Zr),C=b):(B=C,C=r)}else B=C,C=r}else B=C,C=r}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r;if(C===r){for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r)if(t.charCodeAt(B)===123?(F=we,B++):(F=r,I===0&&ke(qe)),F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();if(H!==r)if(ue=Gr(),ue!==r){for(De=[],Ct=je();Ct!==r;)De.push(Ct),Ct=je();if(De!==r)if(t.charCodeAt(B)===125?(Ct=re,B++):(Ct=r,I===0&&ke(se)),Ct!==r){for(bt=[],Zr=je();Zr!==r;)bt.push(Zr),Zr=je();if(bt!==r){for(Zr=[],Ei=Th();Ei!==r;)Zr.push(Ei),Ei=Th();if(Zr!==r){for(Ei=[],ds=je();ds!==r;)Ei.push(ds),ds=je();Ei!==r?(Ke=C,b=Qe(ue,Zr),C=b):(B=C,C=r)}else B=C,C=r}else B=C,C=r}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r}else B=C,C=r;else B=C,C=r;if(C===r){for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r){for(F=[],H=LE();H!==r;)F.push(H),H=LE();if(F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();if(H!==r){if(ue=[],De=YO(),De!==r)for(;De!==r;)ue.push(De),De=YO();else ue=r;if(ue!==r){for(De=[],Ct=je();Ct!==r;)De.push(Ct),Ct=je();De!==r?(Ke=C,b=he(F,ue),C=b):(B=C,C=r)}else B=C,C=r}else B=C,C=r}else B=C,C=r}else B=C,C=r;if(C===r){for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r){if(F=[],H=LE(),H!==r)for(;H!==r;)F.push(H),H=LE();else F=r;if(F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();H!==r?(Ke=C,b=Fe(F),C=b):(B=C,C=r)}else B=C,C=r}else B=C,C=r}}}return C}function GO(){var C,b,F,H,ue;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r){if(F=[],H=TE(),H!==r)for(;H!==r;)F.push(H),H=TE();else F=r;if(F!==r){for(H=[],ue=je();ue!==r;)H.push(ue),ue=je();H!==r?(Ke=C,b=Ue(F),C=b):(B=C,C=r)}else B=C,C=r}else B=C,C=r;return C}function YO(){var C,b,F;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();if(b!==r?(F=Th(),F!==r?(Ke=C,b=xe(F),C=b):(B=C,C=r)):(B=C,C=r),C===r){for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();b!==r?(F=TE(),F!==r?(Ke=C,b=xe(F),C=b):(B=C,C=r)):(B=C,C=r)}return C}function Th(){var C,b,F,H,ue;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();return b!==r?(ve.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(pe)),F===r&&(F=null),F!==r?(H=_ge(),H!==r?(ue=TE(),ue!==r?(Ke=C,b=X(F,H,ue),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C}function _ge(){var C;return t.substr(B,2)===be?(C=be,B+=2):(C=r,I===0&&ke(ce)),C===r&&(t.substr(B,2)===fe?(C=fe,B+=2):(C=r,I===0&&ke(gt)),C===r&&(t.charCodeAt(B)===62?(C=Ht,B++):(C=r,I===0&&ke(Mt)),C===r&&(t.substr(B,3)===mi?(C=mi,B+=3):(C=r,I===0&&ke(jt)),C===r&&(t.substr(B,2)===Qr?(C=Qr,B+=2):(C=r,I===0&&ke(Ti)),C===r&&(t.charCodeAt(B)===60?(C=_s,B++):(C=r,I===0&&ke(Un))))))),C}function TE(){var C,b,F;for(C=B,b=[],F=je();F!==r;)b.push(F),F=je();return b!==r?(F=qO(),F!==r?(Ke=C,b=xe(F),C=b):(B=C,C=r)):(B=C,C=r),C}function qO(){var C,b,F;if(C=B,b=[],F=JO(),F!==r)for(;F!==r;)b.push(F),F=JO();else b=r;return b!==r&&(Ke=C,b=Kn(b)),C=b,C}function JO(){var C,b;return C=B,b=Vge(),b!==r&&(Ke=C,b=vr(b)),C=b,C===r&&(C=B,b=Xge(),b!==r&&(Ke=C,b=vr(b)),C=b,C===r&&(C=B,b=Zge(),b!==r&&(Ke=C,b=vr(b)),C=b,C===r&&(C=B,b=$ge(),b!==r&&(Ke=C,b=vr(b)),C=b))),C}function Vge(){var C,b,F,H;return C=B,t.substr(B,2)===Hn?(b=Hn,B+=2):(b=r,I===0&&ke(us)),b!==r?(F=rfe(),F!==r?(t.charCodeAt(B)===39?(H=Ia,B++):(H=r,I===0&&ke(SA)),H!==r?(Ke=C,b=Du(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C}function Xge(){var C,b,F,H;return C=B,t.charCodeAt(B)===39?(b=Ia,B++):(b=r,I===0&&ke(SA)),b!==r?(F=efe(),F!==r?(t.charCodeAt(B)===39?(H=Ia,B++):(H=r,I===0&&ke(SA)),H!==r?(Ke=C,b=Du(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C}function Zge(){var C,b,F,H;if(C=B,t.substr(B,2)===gs?(b=gs,B+=2):(b=r,I===0&&ke(kA)),b!==r&&(Ke=C,b=ya()),C=b,C===r)if(C=B,t.charCodeAt(B)===34?(b=Ru,B++):(b=r,I===0&&ke(xA)),b!==r){for(F=[],H=WO();H!==r;)F.push(H),H=WO();F!==r?(t.charCodeAt(B)===34?(H=Ru,B++):(H=r,I===0&&ke(xA)),H!==r?(Ke=C,b=PA(F),C=b):(B=C,C=r)):(B=C,C=r)}else B=C,C=r;return C}function $ge(){var C,b,F;if(C=B,b=[],F=zO(),F!==r)for(;F!==r;)b.push(F),F=zO();else b=r;return b!==r&&(Ke=C,b=PA(b)),C=b,C}function WO(){var C,b;return C=B,b=ZO(),b!==r&&(Ke=C,b=Sr(b)),C=b,C===r&&(C=B,b=$O(),b!==r&&(Ke=C,b=jl(b)),C=b,C===r&&(C=B,b=aQ(),b!==r&&(Ke=C,b=Fu(b)),C=b,C===r&&(C=B,b=tfe(),b!==r&&(Ke=C,b=So(b)),C=b))),C}function zO(){var C,b;return C=B,b=ZO(),b!==r&&(Ke=C,b=Nu(b)),C=b,C===r&&(C=B,b=$O(),b!==r&&(Ke=C,b=Qh(b)),C=b,C===r&&(C=B,b=aQ(),b!==r&&(Ke=C,b=vh(b)),C=b,C===r&&(C=B,b=sfe(),b!==r&&(Ke=C,b=oe(b)),C=b,C===r&&(C=B,b=nfe(),b!==r&&(Ke=C,b=So(b)),C=b)))),C}function efe(){var C,b,F;for(C=B,b=[],Oi.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(ko));F!==r;)b.push(F),Oi.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(ko));return b!==r&&(Ke=C,b=jn(b)),C=b,C}function tfe(){var C,b,F;if(C=B,b=[],F=_O(),F===r&&(Lu.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(vt))),F!==r)for(;F!==r;)b.push(F),F=_O(),F===r&&(Lu.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(vt)));else b=r;return b!==r&&(Ke=C,b=jn(b)),C=b,C}function _O(){var C,b,F;return C=B,t.substr(B,2)===Gl?(b=Gl,B+=2):(b=r,I===0&&ke(Gn)),b!==r&&(Ke=C,b=fs()),C=b,C===r&&(C=B,t.charCodeAt(B)===92?(b=hs,B++):(b=r,I===0&&ke(pt)),b!==r?(xo.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(lt)),F!==r?(Ke=C,b=mn(F),C=b):(B=C,C=r)):(B=C,C=r)),C}function rfe(){var C,b,F;for(C=B,b=[],F=VO(),F===r&&(Oi.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(ko)));F!==r;)b.push(F),F=VO(),F===r&&(Oi.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(ko)));return b!==r&&(Ke=C,b=jn(b)),C=b,C}function VO(){var C,b,F;return C=B,t.substr(B,2)===v?(b=v,B+=2):(b=r,I===0&&ke(Tt)),b!==r&&(Ke=C,b=Tu()),C=b,C===r&&(C=B,t.substr(B,2)===Yl?(b=Yl,B+=2):(b=r,I===0&&ke(Sh)),b!==r&&(Ke=C,b=kh()),C=b,C===r&&(C=B,t.charCodeAt(B)===92?(b=hs,B++):(b=r,I===0&&ke(pt)),b!==r?(xh.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(Ph)),F!==r?(Ke=C,b=Dh(),C=b):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===G?(b=G,B+=2):(b=r,I===0&&ke(yt)),b!==r&&(Ke=C,b=DA()),C=b,C===r&&(C=B,t.substr(B,2)===$i?(b=$i,B+=2):(b=r,I===0&&ke(ql)),b!==r&&(Ke=C,b=$e()),C=b,C===r&&(C=B,t.substr(B,2)===wa?(b=wa,B+=2):(b=r,I===0&&ke(Ou)),b!==r&&(Ke=C,b=SE()),C=b,C===r&&(C=B,t.substr(B,2)===Rh?(b=Rh,B+=2):(b=r,I===0&&ke(kE)),b!==r&&(Ke=C,b=gr()),C=b,C===r&&(C=B,t.substr(B,2)===Yn?(b=Yn,B+=2):(b=r,I===0&&ke(Jl)),b!==r&&(Ke=C,b=Fh()),C=b,C===r&&(C=B,t.charCodeAt(B)===92?(b=hs,B++):(b=r,I===0&&ke(pt)),b!==r?(Vs.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(Ba)),F!==r?(Ke=C,b=mn(F),C=b):(B=C,C=r)):(B=C,C=r),C===r&&(C=ife()))))))))),C}function ife(){var C,b,F,H,ue,De,Ct,bt,Zr,Ei,ds,AQ;return C=B,t.charCodeAt(B)===92?(b=hs,B++):(b=r,I===0&&ke(pt)),b!==r?(F=nQ(),F!==r?(Ke=C,b=En(F),C=b):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Oe?(b=Oe,B+=2):(b=r,I===0&&ke(Mu)),b!==r?(F=B,H=B,ue=nQ(),ue!==r?(De=Wn(),De!==r?(ue=[ue,De],H=ue):(B=H,H=r)):(B=H,H=r),H===r&&(H=nQ()),H!==r?F=t.substring(F,B):F=H,F!==r?(Ke=C,b=En(F),C=b):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Wl?(b=Wl,B+=2):(b=r,I===0&&ke(Xs)),b!==r?(F=B,H=B,ue=Wn(),ue!==r?(De=Wn(),De!==r?(Ct=Wn(),Ct!==r?(bt=Wn(),bt!==r?(ue=[ue,De,Ct,bt],H=ue):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r),H!==r?F=t.substring(F,B):F=H,F!==r?(Ke=C,b=En(F),C=b):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===zl?(b=zl,B+=2):(b=r,I===0&&ke(RA)),b!==r?(F=B,H=B,ue=Wn(),ue!==r?(De=Wn(),De!==r?(Ct=Wn(),Ct!==r?(bt=Wn(),bt!==r?(Zr=Wn(),Zr!==r?(Ei=Wn(),Ei!==r?(ds=Wn(),ds!==r?(AQ=Wn(),AQ!==r?(ue=[ue,De,Ct,bt,Zr,Ei,ds,AQ],H=ue):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r)):(B=H,H=r),H!==r?F=t.substring(F,B):F=H,F!==r?(Ke=C,b=Uu(F),C=b):(B=C,C=r)):(B=C,C=r)))),C}function nQ(){var C;return Ku.test(t.charAt(B))?(C=t.charAt(B),B++):(C=r,I===0&&ke(ba)),C}function Wn(){var C;return Qa.test(t.charAt(B))?(C=t.charAt(B),B++):(C=r,I===0&&ke(it)),C}function nfe(){var C,b,F,H,ue;if(C=B,b=[],F=B,t.charCodeAt(B)===92?(H=hs,B++):(H=r,I===0&&ke(pt)),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r),F===r&&(F=B,H=B,I++,ue=tM(),I--,ue===r?H=void 0:(B=H,H=r),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r)),F!==r)for(;F!==r;)b.push(F),F=B,t.charCodeAt(B)===92?(H=hs,B++):(H=r,I===0&&ke(pt)),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r),F===r&&(F=B,H=B,I++,ue=tM(),I--,ue===r?H=void 0:(B=H,H=r),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r));else b=r;return b!==r&&(Ke=C,b=jn(b)),C=b,C}function sQ(){var C,b,F,H,ue,De;if(C=B,t.charCodeAt(B)===45?(b=FA,B++):(b=r,I===0&&ke(_l)),b===r&&(t.charCodeAt(B)===43?(b=Zs,B++):(b=r,I===0&&ke(Vl))),b===r&&(b=null),b!==r){if(F=[],ve.test(t.charAt(B))?(H=t.charAt(B),B++):(H=r,I===0&&ke(pe)),H!==r)for(;H!==r;)F.push(H),ve.test(t.charAt(B))?(H=t.charAt(B),B++):(H=r,I===0&&ke(pe));else F=r;if(F!==r)if(t.charCodeAt(B)===46?(H=xE,B++):(H=r,I===0&&ke(Nh)),H!==r){if(ue=[],ve.test(t.charAt(B))?(De=t.charAt(B),B++):(De=r,I===0&&ke(pe)),De!==r)for(;De!==r;)ue.push(De),ve.test(t.charAt(B))?(De=t.charAt(B),B++):(De=r,I===0&&ke(pe));else ue=r;ue!==r?(Ke=C,b=Hu(b,F,ue),C=b):(B=C,C=r)}else B=C,C=r;else B=C,C=r}else B=C,C=r;if(C===r){if(C=B,t.charCodeAt(B)===45?(b=FA,B++):(b=r,I===0&&ke(_l)),b===r&&(t.charCodeAt(B)===43?(b=Zs,B++):(b=r,I===0&&ke(Vl))),b===r&&(b=null),b!==r){if(F=[],ve.test(t.charAt(B))?(H=t.charAt(B),B++):(H=r,I===0&&ke(pe)),H!==r)for(;H!==r;)F.push(H),ve.test(t.charAt(B))?(H=t.charAt(B),B++):(H=r,I===0&&ke(pe));else F=r;F!==r?(Ke=C,b=Lh(b,F),C=b):(B=C,C=r)}else B=C,C=r;if(C===r&&(C=B,b=aQ(),b!==r&&(Ke=C,b=PE(b)),C=b,C===r&&(C=B,b=Zl(),b!==r&&(Ke=C,b=Xl(b)),C=b,C===r)))if(C=B,t.charCodeAt(B)===40?(b=le,B++):(b=r,I===0&&ke(Ae)),b!==r){for(F=[],H=je();H!==r;)F.push(H),H=je();if(F!==r)if(H=XO(),H!==r){for(ue=[],De=je();De!==r;)ue.push(De),De=je();ue!==r?(t.charCodeAt(B)===41?(De=T,B++):(De=r,I===0&&ke(L)),De!==r?(Ke=C,b=DE(H),C=b):(B=C,C=r)):(B=C,C=r)}else B=C,C=r;else B=C,C=r}else B=C,C=r}return C}function oQ(){var C,b,F,H,ue,De,Ct,bt;if(C=B,b=sQ(),b!==r){for(F=[],H=B,ue=[],De=je();De!==r;)ue.push(De),De=je();if(ue!==r)if(t.charCodeAt(B)===42?(De=ju,B++):(De=r,I===0&&ke(NA)),De===r&&(t.charCodeAt(B)===47?(De=Lr,B++):(De=r,I===0&&ke(RE))),De!==r){for(Ct=[],bt=je();bt!==r;)Ct.push(bt),bt=je();Ct!==r?(bt=sQ(),bt!==r?(Ke=H,ue=$s(b,De,bt),H=ue):(B=H,H=r)):(B=H,H=r)}else B=H,H=r;else B=H,H=r;for(;H!==r;){for(F.push(H),H=B,ue=[],De=je();De!==r;)ue.push(De),De=je();if(ue!==r)if(t.charCodeAt(B)===42?(De=ju,B++):(De=r,I===0&&ke(NA)),De===r&&(t.charCodeAt(B)===47?(De=Lr,B++):(De=r,I===0&&ke(RE))),De!==r){for(Ct=[],bt=je();bt!==r;)Ct.push(bt),bt=je();Ct!==r?(bt=sQ(),bt!==r?(Ke=H,ue=$s(b,De,bt),H=ue):(B=H,H=r)):(B=H,H=r)}else B=H,H=r;else B=H,H=r}F!==r?(Ke=C,b=eo(b,F),C=b):(B=C,C=r)}else B=C,C=r;return C}function XO(){var C,b,F,H,ue,De,Ct,bt;if(C=B,b=oQ(),b!==r){for(F=[],H=B,ue=[],De=je();De!==r;)ue.push(De),De=je();if(ue!==r)if(t.charCodeAt(B)===43?(De=Zs,B++):(De=r,I===0&&ke(Vl)),De===r&&(t.charCodeAt(B)===45?(De=FA,B++):(De=r,I===0&&ke(_l))),De!==r){for(Ct=[],bt=je();bt!==r;)Ct.push(bt),bt=je();Ct!==r?(bt=oQ(),bt!==r?(Ke=H,ue=Gu(b,De,bt),H=ue):(B=H,H=r)):(B=H,H=r)}else B=H,H=r;else B=H,H=r;for(;H!==r;){for(F.push(H),H=B,ue=[],De=je();De!==r;)ue.push(De),De=je();if(ue!==r)if(t.charCodeAt(B)===43?(De=Zs,B++):(De=r,I===0&&ke(Vl)),De===r&&(t.charCodeAt(B)===45?(De=FA,B++):(De=r,I===0&&ke(_l))),De!==r){for(Ct=[],bt=je();bt!==r;)Ct.push(bt),bt=je();Ct!==r?(bt=oQ(),bt!==r?(Ke=H,ue=Gu(b,De,bt),H=ue):(B=H,H=r)):(B=H,H=r)}else B=H,H=r;else B=H,H=r}F!==r?(Ke=C,b=eo(b,F),C=b):(B=C,C=r)}else B=C,C=r;return C}function ZO(){var C,b,F,H,ue,De;if(C=B,t.substr(B,3)===LA?(b=LA,B+=3):(b=r,I===0&&ke(R)),b!==r){for(F=[],H=je();H!==r;)F.push(H),H=je();if(F!==r)if(H=XO(),H!==r){for(ue=[],De=je();De!==r;)ue.push(De),De=je();ue!==r?(t.substr(B,2)===q?(De=q,B+=2):(De=r,I===0&&ke(de)),De!==r?(Ke=C,b=He(H),C=b):(B=C,C=r)):(B=C,C=r)}else B=C,C=r;else B=C,C=r}else B=C,C=r;return C}function $O(){var C,b,F,H;return C=B,t.substr(B,2)===Te?(b=Te,B+=2):(b=r,I===0&&ke(Xe)),b!==r?(F=Gr(),F!==r?(t.charCodeAt(B)===41?(H=T,B++):(H=r,I===0&&ke(L)),H!==r?(Ke=C,b=Et(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C}function aQ(){var C,b,F,H,ue,De;return C=B,t.substr(B,2)===Rt?(b=Rt,B+=2):(b=r,I===0&&ke(qn)),b!==r?(F=Zl(),F!==r?(t.substr(B,2)===Jb?(H=Jb,B+=2):(H=r,I===0&&ke(xO)),H!==r?(ue=GO(),ue!==r?(t.charCodeAt(B)===125?(De=re,B++):(De=r,I===0&&ke(se)),De!==r?(Ke=C,b=PO(F,ue),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Rt?(b=Rt,B+=2):(b=r,I===0&&ke(qn)),b!==r?(F=Zl(),F!==r?(t.substr(B,3)===Wb?(H=Wb,B+=3):(H=r,I===0&&ke(DO)),H!==r?(Ke=C,b=RO(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Rt?(b=Rt,B+=2):(b=r,I===0&&ke(qn)),b!==r?(F=Zl(),F!==r?(t.substr(B,2)===zb?(H=zb,B+=2):(H=r,I===0&&ke(FO)),H!==r?(ue=GO(),ue!==r?(t.charCodeAt(B)===125?(De=re,B++):(De=r,I===0&&ke(se)),De!==r?(Ke=C,b=NO(F,ue),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Rt?(b=Rt,B+=2):(b=r,I===0&&ke(qn)),b!==r?(F=Zl(),F!==r?(t.substr(B,3)===_b?(H=_b,B+=3):(H=r,I===0&&ke(LO)),H!==r?(Ke=C,b=TO(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.substr(B,2)===Rt?(b=Rt,B+=2):(b=r,I===0&&ke(qn)),b!==r?(F=Zl(),F!==r?(t.charCodeAt(B)===125?(H=re,B++):(H=r,I===0&&ke(se)),H!==r?(Ke=C,b=Vb(F),C=b):(B=C,C=r)):(B=C,C=r)):(B=C,C=r),C===r&&(C=B,t.charCodeAt(B)===36?(b=OO,B++):(b=r,I===0&&ke(MO)),b!==r?(F=Zl(),F!==r?(Ke=C,b=Vb(F),C=b):(B=C,C=r)):(B=C,C=r)))))),C}function sfe(){var C,b,F;return C=B,b=ofe(),b!==r?(Ke=B,F=UO(b),F?F=void 0:F=r,F!==r?(Ke=C,b=KO(b),C=b):(B=C,C=r)):(B=C,C=r),C}function ofe(){var C,b,F,H,ue;if(C=B,b=[],F=B,H=B,I++,ue=rM(),I--,ue===r?H=void 0:(B=H,H=r),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r),F!==r)for(;F!==r;)b.push(F),F=B,H=B,I++,ue=rM(),I--,ue===r?H=void 0:(B=H,H=r),H!==r?(t.length>B?(ue=t.charAt(B),B++):(ue=r,I===0&&ke(Po)),ue!==r?(Ke=F,H=mn(ue),F=H):(B=F,F=r)):(B=F,F=r);else b=r;return b!==r&&(Ke=C,b=jn(b)),C=b,C}function eM(){var C,b,F;if(C=B,b=[],Xb.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(Zb)),F!==r)for(;F!==r;)b.push(F),Xb.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(Zb));else b=r;return b!==r&&(Ke=C,b=$b()),C=b,C}function Zl(){var C,b,F;if(C=B,b=[],eQ.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(tQ)),F!==r)for(;F!==r;)b.push(F),eQ.test(t.charAt(B))?(F=t.charAt(B),B++):(F=r,I===0&&ke(tQ));else b=r;return b!==r&&(Ke=C,b=$b()),C=b,C}function tM(){var C;return HO.test(t.charAt(B))?(C=t.charAt(B),B++):(C=r,I===0&&ke(Yu)),C}function rM(){var C;return rQ.test(t.charAt(B))?(C=t.charAt(B),B++):(C=r,I===0&&ke(iQ)),C}function je(){var C,b;if(C=[],FE.test(t.charAt(B))?(b=t.charAt(B),B++):(b=r,I===0&&ke(NE)),b!==r)for(;b!==r;)C.push(b),FE.test(t.charAt(B))?(b=t.charAt(B),B++):(b=r,I===0&&ke(NE));else C=r;return C}if(D=n(),D!==r&&B===t.length)return D;throw D!==r&&B<t.length&&ke(wt()),Mi(E,d<t.length?t.charAt(d):null,d<t.length?It(d,d+1):It(d,d))}f1.exports={SyntaxError:nc,parse:she}});var C1=w((SZe,d1)=>{\"use strict\";function ohe(t,e){function r(){this.constructor=t}r.prototype=e.prototype,t.prototype=new r}function oc(t,e,r,i){this.message=t,this.expected=e,this.found=r,this.location=i,this.name=\"SyntaxError\",typeof Error.captureStackTrace==\"function\"&&Error.captureStackTrace(this,oc)}ohe(oc,Error);oc.buildMessage=function(t,e){var r={literal:function(c){return'\"'+n(c.text)+'\"'},class:function(c){var u=\"\",g;for(g=0;g<c.parts.length;g++)u+=c.parts[g]instanceof Array?s(c.parts[g][0])+\"-\"+s(c.parts[g][1]):s(c.parts[g]);return\"[\"+(c.inverted?\"^\":\"\")+u+\"]\"},any:function(c){return\"any character\"},end:function(c){return\"end of input\"},other:function(c){return c.description}};function i(c){return c.charCodeAt(0).toString(16).toUpperCase()}function n(c){return c.replace(/\\\\/g,\"\\\\\\\\\").replace(/\"/g,'\\\\\"').replace(/\\0/g,\"\\\\0\").replace(/\\t/g,\"\\\\t\").replace(/\\n/g,\"\\\\n\").replace(/\\r/g,\"\\\\r\").replace(/[\\x00-\\x0F]/g,function(u){return\"\\\\x0\"+i(u)}).replace(/[\\x10-\\x1F\\x7F-\\x9F]/g,function(u){return\"\\\\x\"+i(u)})}function s(c){return c.replace(/\\\\/g,\"\\\\\\\\\").replace(/\\]/g,\"\\\\]\").replace(/\\^/g,\"\\\\^\").replace(/-/g,\"\\\\-\").replace(/\\0/g,\"\\\\0\").replace(/\\t/g,\"\\\\t\").replace(/\\n/g,\"\\\\n\").replace(/\\r/g,\"\\\\r\").replace(/[\\x00-\\x0F]/g,function(u){return\"\\\\x0\"+i(u)}).replace(/[\\x10-\\x1F\\x7F-\\x9F]/g,function(u){return\"\\\\x\"+i(u)})}function o(c){return r[c.type](c)}function a(c){var u=new Array(c.length),g,f;for(g=0;g<c.length;g++)u[g]=o(c[g]);if(u.sort(),u.length>0){for(g=1,f=1;g<u.length;g++)u[g-1]!==u[g]&&(u[f]=u[g],f++);u.length=f}switch(u.length){case 1:return u[0];case 2:return u[0]+\" or \"+u[1];default:return u.slice(0,-1).join(\", \")+\", or \"+u[u.length-1]}}function l(c){return c?'\"'+n(c)+'\"':\"end of input\"}return\"Expected \"+a(t)+\" but \"+l(e)+\" found.\"};function ahe(t,e){e=e!==void 0?e:{};var r={},i={resolution:he},n=he,s=\"/\",o=le(\"/\",!1),a=function(pe,X){return{from:pe,descriptor:X}},l=function(pe){return{descriptor:pe}},c=\"@\",u=le(\"@\",!1),g=function(pe,X){return{fullName:pe,description:X}},f=function(pe){return{fullName:pe}},h=function(){return ee()},p=/^[^\\/@]/,m=Ae([\"/\",\"@\"],!0,!1),y=/^[^\\/]/,Q=Ae([\"/\"],!0,!1),S=0,x=0,M=[{line:1,column:1}],Y=0,U=[],J=0,W;if(\"startRule\"in e){if(!(e.startRule in i))throw new Error(`Can't start parsing from rule \"`+e.startRule+'\".');n=i[e.startRule]}function ee(){return t.substring(x,S)}function Z(){return qe(x,S)}function A(pe,X){throw X=X!==void 0?X:qe(x,S),Qe([Ee(pe)],t.substring(x,S),X)}function ne(pe,X){throw X=X!==void 0?X:qe(x,S),se(pe,X)}function le(pe,X){return{type:\"literal\",text:pe,ignoreCase:X}}function Ae(pe,X,be){return{type:\"class\",parts:pe,inverted:X,ignoreCase:be}}function T(){return{type:\"any\"}}function L(){return{type:\"end\"}}function Ee(pe){return{type:\"other\",description:pe}}function we(pe){var X=M[pe],be;if(X)return X;for(be=pe-1;!M[be];)be--;for(X=M[be],X={line:X.line,column:X.column};be<pe;)t.charCodeAt(be)===10?(X.line++,X.column=1):X.column++,be++;return M[pe]=X,X}function qe(pe,X){var be=we(pe),ce=we(X);return{start:{offset:pe,line:be.line,column:be.column},end:{offset:X,line:ce.line,column:ce.column}}}function re(pe){S<Y||(S>Y&&(Y=S,U=[]),U.push(pe))}function se(pe,X){return new oc(pe,null,null,X)}function Qe(pe,X,be){return new oc(oc.buildMessage(pe,X),pe,X,be)}function he(){var pe,X,be,ce;return pe=S,X=Fe(),X!==r?(t.charCodeAt(S)===47?(be=s,S++):(be=r,J===0&&re(o)),be!==r?(ce=Fe(),ce!==r?(x=pe,X=a(X,ce),pe=X):(S=pe,pe=r)):(S=pe,pe=r)):(S=pe,pe=r),pe===r&&(pe=S,X=Fe(),X!==r&&(x=pe,X=l(X)),pe=X),pe}function Fe(){var pe,X,be,ce;return pe=S,X=Ue(),X!==r?(t.charCodeAt(S)===64?(be=c,S++):(be=r,J===0&&re(u)),be!==r?(ce=ve(),ce!==r?(x=pe,X=g(X,ce),pe=X):(S=pe,pe=r)):(S=pe,pe=r)):(S=pe,pe=r),pe===r&&(pe=S,X=Ue(),X!==r&&(x=pe,X=f(X)),pe=X),pe}function Ue(){var pe,X,be,ce,fe;return pe=S,t.charCodeAt(S)===64?(X=c,S++):(X=r,J===0&&re(u)),X!==r?(be=xe(),be!==r?(t.charCodeAt(S)===47?(ce=s,S++):(ce=r,J===0&&re(o)),ce!==r?(fe=xe(),fe!==r?(x=pe,X=h(),pe=X):(S=pe,pe=r)):(S=pe,pe=r)):(S=pe,pe=r)):(S=pe,pe=r),pe===r&&(pe=S,X=xe(),X!==r&&(x=pe,X=h()),pe=X),pe}function xe(){var pe,X,be;if(pe=S,X=[],p.test(t.charAt(S))?(be=t.charAt(S),S++):(be=r,J===0&&re(m)),be!==r)for(;be!==r;)X.push(be),p.test(t.charAt(S))?(be=t.charAt(S),S++):(be=r,J===0&&re(m));else X=r;return X!==r&&(x=pe,X=h()),pe=X,pe}function ve(){var pe,X,be;if(pe=S,X=[],y.test(t.charAt(S))?(be=t.charAt(S),S++):(be=r,J===0&&re(Q)),be!==r)for(;be!==r;)X.push(be),y.test(t.charAt(S))?(be=t.charAt(S),S++):(be=r,J===0&&re(Q));else X=r;return X!==r&&(x=pe,X=h()),pe=X,pe}if(W=n(),W!==r&&S===t.length)return W;throw W!==r&&S<t.length&&re(L()),Qe(U,Y<t.length?t.charAt(Y):null,Y<t.length?qe(Y,Y+1):qe(Y,Y))}d1.exports={SyntaxError:oc,parse:ahe}});var Ac=w((xZe,ac)=>{\"use strict\";function E1(t){return typeof t==\"undefined\"||t===null}function Ahe(t){return typeof t==\"object\"&&t!==null}function lhe(t){return Array.isArray(t)?t:E1(t)?[]:[t]}function che(t,e){var r,i,n,s;if(e)for(s=Object.keys(e),r=0,i=s.length;r<i;r+=1)n=s[r],t[n]=e[n];return t}function uhe(t,e){var r=\"\",i;for(i=0;i<e;i+=1)r+=t;return r}function ghe(t){return t===0&&Number.NEGATIVE_INFINITY===1/t}ac.exports.isNothing=E1;ac.exports.isObject=Ahe;ac.exports.toArray=lhe;ac.exports.repeat=uhe;ac.exports.isNegativeZero=ghe;ac.exports.extend=che});var Vu=w((PZe,I1)=>{\"use strict\";function ep(t,e){Error.call(this),this.name=\"YAMLException\",this.reason=t,this.mark=e,this.message=(this.reason||\"(unknown reason)\")+(this.mark?\" \"+this.mark.toString():\"\"),Error.captureStackTrace?Error.captureStackTrace(this,this.constructor):this.stack=new Error().stack||\"\"}ep.prototype=Object.create(Error.prototype);ep.prototype.constructor=ep;ep.prototype.toString=function(e){var r=this.name+\": \";return r+=this.reason||\"(unknown reason)\",!e&&this.mark&&(r+=\" \"+this.mark.toString()),r};I1.exports=ep});var B1=w((DZe,y1)=>{\"use strict\";var w1=Ac();function HQ(t,e,r,i,n){this.name=t,this.buffer=e,this.position=r,this.line=i,this.column=n}HQ.prototype.getSnippet=function(e,r){var i,n,s,o,a;if(!this.buffer)return null;for(e=e||4,r=r||75,i=\"\",n=this.position;n>0&&`\\0\\r\n\\x85\\u2028\\u2029`.indexOf(this.buffer.charAt(n-1))===-1;)if(n-=1,this.position-n>r/2-1){i=\" ... \",n+=5;break}for(s=\"\",o=this.position;o<this.buffer.length&&`\\0\\r\n\\x85\\u2028\\u2029`.indexOf(this.buffer.charAt(o))===-1;)if(o+=1,o-this.position>r/2-1){s=\" ... \",o-=5;break}return a=this.buffer.slice(n,o),w1.repeat(\" \",e)+i+a+s+`\n`+w1.repeat(\" \",e+this.position-n+i.length)+\"^\"};HQ.prototype.toString=function(e){var r,i=\"\";return this.name&&(i+='in \"'+this.name+'\" '),i+=\"at line \"+(this.line+1)+\", column \"+(this.column+1),e||(r=this.getSnippet(),r&&(i+=`:\n`+r)),i};y1.exports=HQ});var li=w((RZe,b1)=>{\"use strict\";var Q1=Vu(),fhe=[\"kind\",\"resolve\",\"construct\",\"instanceOf\",\"predicate\",\"represent\",\"defaultStyle\",\"styleAliases\"],hhe=[\"scalar\",\"sequence\",\"mapping\"];function phe(t){var e={};return t!==null&&Object.keys(t).forEach(function(r){t[r].forEach(function(i){e[String(i)]=r})}),e}function dhe(t,e){if(e=e||{},Object.keys(e).forEach(function(r){if(fhe.indexOf(r)===-1)throw new Q1('Unknown option \"'+r+'\" is met in definition of \"'+t+'\" YAML type.')}),this.tag=t,this.kind=e.kind||null,this.resolve=e.resolve||function(){return!0},this.construct=e.construct||function(r){return r},this.instanceOf=e.instanceOf||null,this.predicate=e.predicate||null,this.represent=e.represent||null,this.defaultStyle=e.defaultStyle||null,this.styleAliases=phe(e.styleAliases||null),hhe.indexOf(this.kind)===-1)throw new Q1('Unknown kind \"'+this.kind+'\" is specified for \"'+t+'\" YAML type.')}b1.exports=dhe});var lc=w((FZe,v1)=>{\"use strict\";var S1=Ac(),nI=Vu(),Che=li();function jQ(t,e,r){var i=[];return t.include.forEach(function(n){r=jQ(n,e,r)}),t[e].forEach(function(n){r.forEach(function(s,o){s.tag===n.tag&&s.kind===n.kind&&i.push(o)}),r.push(n)}),r.filter(function(n,s){return i.indexOf(s)===-1})}function mhe(){var t={scalar:{},sequence:{},mapping:{},fallback:{}},e,r;function i(n){t[n.kind][n.tag]=t.fallback[n.tag]=n}for(e=0,r=arguments.length;e<r;e+=1)arguments[e].forEach(i);return t}function Xu(t){this.include=t.include||[],this.implicit=t.implicit||[],this.explicit=t.explicit||[],this.implicit.forEach(function(e){if(e.loadKind&&e.loadKind!==\"scalar\")throw new nI(\"There is a non-scalar type in the implicit list of a schema. Implicit resolving of such types is not supported.\")}),this.compiledImplicit=jQ(this,\"implicit\",[]),this.compiledExplicit=jQ(this,\"explicit\",[]),this.compiledTypeMap=mhe(this.compiledImplicit,this.compiledExplicit)}Xu.DEFAULT=null;Xu.create=function(){var e,r;switch(arguments.length){case 1:e=Xu.DEFAULT,r=arguments[0];break;case 2:e=arguments[0],r=arguments[1];break;default:throw new nI(\"Wrong number of arguments for Schema.create function\")}if(e=S1.toArray(e),r=S1.toArray(r),!e.every(function(i){return i instanceof Xu}))throw new nI(\"Specified list of super schemas (or a single Schema object) contains a non-Schema object.\");if(!r.every(function(i){return i instanceof Che}))throw new nI(\"Specified list of YAML types (or a single Type object) contains a non-Type object.\");return new Xu({include:e,explicit:r})};v1.exports=Xu});var x1=w((NZe,k1)=>{\"use strict\";var Ehe=li();k1.exports=new Ehe(\"tag:yaml.org,2002:str\",{kind:\"scalar\",construct:function(t){return t!==null?t:\"\"}})});var D1=w((LZe,P1)=>{\"use strict\";var Ihe=li();P1.exports=new Ihe(\"tag:yaml.org,2002:seq\",{kind:\"sequence\",construct:function(t){return t!==null?t:[]}})});var F1=w((TZe,R1)=>{\"use strict\";var yhe=li();R1.exports=new yhe(\"tag:yaml.org,2002:map\",{kind:\"mapping\",construct:function(t){return t!==null?t:{}}})});var sI=w((OZe,N1)=>{\"use strict\";var whe=lc();N1.exports=new whe({explicit:[x1(),D1(),F1()]})});var T1=w((MZe,L1)=>{\"use strict\";var Bhe=li();function bhe(t){if(t===null)return!0;var e=t.length;return e===1&&t===\"~\"||e===4&&(t===\"null\"||t===\"Null\"||t===\"NULL\")}function Qhe(){return null}function vhe(t){return t===null}L1.exports=new Bhe(\"tag:yaml.org,2002:null\",{kind:\"scalar\",resolve:bhe,construct:Qhe,predicate:vhe,represent:{canonical:function(){return\"~\"},lowercase:function(){return\"null\"},uppercase:function(){return\"NULL\"},camelcase:function(){return\"Null\"}},defaultStyle:\"lowercase\"})});var M1=w((UZe,O1)=>{\"use strict\";var She=li();function khe(t){if(t===null)return!1;var e=t.length;return e===4&&(t===\"true\"||t===\"True\"||t===\"TRUE\")||e===5&&(t===\"false\"||t===\"False\"||t===\"FALSE\")}function xhe(t){return t===\"true\"||t===\"True\"||t===\"TRUE\"}function Phe(t){return Object.prototype.toString.call(t)===\"[object Boolean]\"}O1.exports=new She(\"tag:yaml.org,2002:bool\",{kind:\"scalar\",resolve:khe,construct:xhe,predicate:Phe,represent:{lowercase:function(t){return t?\"true\":\"false\"},uppercase:function(t){return t?\"TRUE\":\"FALSE\"},camelcase:function(t){return t?\"True\":\"False\"}},defaultStyle:\"lowercase\"})});var K1=w((KZe,U1)=>{\"use strict\";var Dhe=Ac(),Rhe=li();function Fhe(t){return 48<=t&&t<=57||65<=t&&t<=70||97<=t&&t<=102}function Nhe(t){return 48<=t&&t<=55}function Lhe(t){return 48<=t&&t<=57}function The(t){if(t===null)return!1;var e=t.length,r=0,i=!1,n;if(!e)return!1;if(n=t[r],(n===\"-\"||n===\"+\")&&(n=t[++r]),n===\"0\"){if(r+1===e)return!0;if(n=t[++r],n===\"b\"){for(r++;r<e;r++)if(n=t[r],n!==\"_\"){if(n!==\"0\"&&n!==\"1\")return!1;i=!0}return i&&n!==\"_\"}if(n===\"x\"){for(r++;r<e;r++)if(n=t[r],n!==\"_\"){if(!Fhe(t.charCodeAt(r)))return!1;i=!0}return i&&n!==\"_\"}for(;r<e;r++)if(n=t[r],n!==\"_\"){if(!Nhe(t.charCodeAt(r)))return!1;i=!0}return i&&n!==\"_\"}if(n===\"_\")return!1;for(;r<e;r++)if(n=t[r],n!==\"_\"){if(n===\":\")break;if(!Lhe(t.charCodeAt(r)))return!1;i=!0}return!i||n===\"_\"?!1:n!==\":\"?!0:/^(:[0-5]?[0-9])+$/.test(t.slice(r))}function Ohe(t){var e=t,r=1,i,n,s=[];return e.indexOf(\"_\")!==-1&&(e=e.replace(/_/g,\"\")),i=e[0],(i===\"-\"||i===\"+\")&&(i===\"-\"&&(r=-1),e=e.slice(1),i=e[0]),e===\"0\"?0:i===\"0\"?e[1]===\"b\"?r*parseInt(e.slice(2),2):e[1]===\"x\"?r*parseInt(e,16):r*parseInt(e,8):e.indexOf(\":\")!==-1?(e.split(\":\").forEach(function(o){s.unshift(parseInt(o,10))}),e=0,n=1,s.forEach(function(o){e+=o*n,n*=60}),r*e):r*parseInt(e,10)}function Mhe(t){return Object.prototype.toString.call(t)===\"[object Number]\"&&t%1==0&&!Dhe.isNegativeZero(t)}U1.exports=new Rhe(\"tag:yaml.org,2002:int\",{kind:\"scalar\",resolve:The,construct:Ohe,predicate:Mhe,represent:{binary:function(t){return t>=0?\"0b\"+t.toString(2):\"-0b\"+t.toString(2).slice(1)},octal:function(t){return t>=0?\"0\"+t.toString(8):\"-0\"+t.toString(8).slice(1)},decimal:function(t){return t.toString(10)},hexadecimal:function(t){return t>=0?\"0x\"+t.toString(16).toUpperCase():\"-0x\"+t.toString(16).toUpperCase().slice(1)}},defaultStyle:\"decimal\",styleAliases:{binary:[2,\"bin\"],octal:[8,\"oct\"],decimal:[10,\"dec\"],hexadecimal:[16,\"hex\"]}})});var G1=w((HZe,H1)=>{\"use strict\";var j1=Ac(),Uhe=li(),Khe=new RegExp(\"^(?:[-+]?(?:0|[1-9][0-9_]*)(?:\\\\.[0-9_]*)?(?:[eE][-+]?[0-9]+)?|\\\\.[0-9_]+(?:[eE][-+]?[0-9]+)?|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]*|[-+]?\\\\.(?:inf|Inf|INF)|\\\\.(?:nan|NaN|NAN))$\");function Hhe(t){return!(t===null||!Khe.test(t)||t[t.length-1]===\"_\")}function jhe(t){var e,r,i,n;return e=t.replace(/_/g,\"\").toLowerCase(),r=e[0]===\"-\"?-1:1,n=[],\"+-\".indexOf(e[0])>=0&&(e=e.slice(1)),e===\".inf\"?r===1?Number.POSITIVE_INFINITY:Number.NEGATIVE_INFINITY:e===\".nan\"?NaN:e.indexOf(\":\")>=0?(e.split(\":\").forEach(function(s){n.unshift(parseFloat(s,10))}),e=0,i=1,n.forEach(function(s){e+=s*i,i*=60}),r*e):r*parseFloat(e,10)}var Ghe=/^[-+]?[0-9]+e/;function Yhe(t,e){var r;if(isNaN(t))switch(e){case\"lowercase\":return\".nan\";case\"uppercase\":return\".NAN\";case\"camelcase\":return\".NaN\"}else if(Number.POSITIVE_INFINITY===t)switch(e){case\"lowercase\":return\".inf\";case\"uppercase\":return\".INF\";case\"camelcase\":return\".Inf\"}else if(Number.NEGATIVE_INFINITY===t)switch(e){case\"lowercase\":return\"-.inf\";case\"uppercase\":return\"-.INF\";case\"camelcase\":return\"-.Inf\"}else if(j1.isNegativeZero(t))return\"-0.0\";return r=t.toString(10),Ghe.test(r)?r.replace(\"e\",\".e\"):r}function qhe(t){return Object.prototype.toString.call(t)===\"[object Number]\"&&(t%1!=0||j1.isNegativeZero(t))}H1.exports=new Uhe(\"tag:yaml.org,2002:float\",{kind:\"scalar\",resolve:Hhe,construct:jhe,predicate:qhe,represent:Yhe,defaultStyle:\"lowercase\"})});var GQ=w((jZe,Y1)=>{\"use strict\";var Jhe=lc();Y1.exports=new Jhe({include:[sI()],implicit:[T1(),M1(),K1(),G1()]})});var YQ=w((GZe,q1)=>{\"use strict\";var Whe=lc();q1.exports=new Whe({include:[GQ()]})});var _1=w((YZe,J1)=>{\"use strict\";var zhe=li(),W1=new RegExp(\"^([0-9][0-9][0-9][0-9])-([0-9][0-9])-([0-9][0-9])$\"),z1=new RegExp(\"^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:[Tt]|[ \\\\t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\\\.([0-9]*))?(?:[ \\\\t]*(Z|([-+])([0-9][0-9]?)(?::([0-9][0-9]))?))?$\");function _he(t){return t===null?!1:W1.exec(t)!==null||z1.exec(t)!==null}function Vhe(t){var e,r,i,n,s,o,a,l=0,c=null,u,g,f;if(e=W1.exec(t),e===null&&(e=z1.exec(t)),e===null)throw new Error(\"Date resolve error\");if(r=+e[1],i=+e[2]-1,n=+e[3],!e[4])return new Date(Date.UTC(r,i,n));if(s=+e[4],o=+e[5],a=+e[6],e[7]){for(l=e[7].slice(0,3);l.length<3;)l+=\"0\";l=+l}return e[9]&&(u=+e[10],g=+(e[11]||0),c=(u*60+g)*6e4,e[9]===\"-\"&&(c=-c)),f=new Date(Date.UTC(r,i,n,s,o,a,l)),c&&f.setTime(f.getTime()-c),f}function Xhe(t){return t.toISOString()}J1.exports=new zhe(\"tag:yaml.org,2002:timestamp\",{kind:\"scalar\",resolve:_he,construct:Vhe,instanceOf:Date,represent:Xhe})});var X1=w((qZe,V1)=>{\"use strict\";var Zhe=li();function $he(t){return t===\"<<\"||t===null}V1.exports=new Zhe(\"tag:yaml.org,2002:merge\",{kind:\"scalar\",resolve:$he})});var eU=w((JZe,Z1)=>{\"use strict\";var cc;try{$1=require,cc=$1(\"buffer\").Buffer}catch(t){}var $1,epe=li(),qQ=`ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=\n\\r`;function tpe(t){if(t===null)return!1;var e,r,i=0,n=t.length,s=qQ;for(r=0;r<n;r++)if(e=s.indexOf(t.charAt(r)),!(e>64)){if(e<0)return!1;i+=6}return i%8==0}function rpe(t){var e,r,i=t.replace(/[\\r\\n=]/g,\"\"),n=i.length,s=qQ,o=0,a=[];for(e=0;e<n;e++)e%4==0&&e&&(a.push(o>>16&255),a.push(o>>8&255),a.push(o&255)),o=o<<6|s.indexOf(i.charAt(e));return r=n%4*6,r===0?(a.push(o>>16&255),a.push(o>>8&255),a.push(o&255)):r===18?(a.push(o>>10&255),a.push(o>>2&255)):r===12&&a.push(o>>4&255),cc?cc.from?cc.from(a):new cc(a):a}function ipe(t){var e=\"\",r=0,i,n,s=t.length,o=qQ;for(i=0;i<s;i++)i%3==0&&i&&(e+=o[r>>18&63],e+=o[r>>12&63],e+=o[r>>6&63],e+=o[r&63]),r=(r<<8)+t[i];return n=s%3,n===0?(e+=o[r>>18&63],e+=o[r>>12&63],e+=o[r>>6&63],e+=o[r&63]):n===2?(e+=o[r>>10&63],e+=o[r>>4&63],e+=o[r<<2&63],e+=o[64]):n===1&&(e+=o[r>>2&63],e+=o[r<<4&63],e+=o[64],e+=o[64]),e}function npe(t){return cc&&cc.isBuffer(t)}Z1.exports=new epe(\"tag:yaml.org,2002:binary\",{kind:\"scalar\",resolve:tpe,construct:rpe,predicate:npe,represent:ipe})});var rU=w((WZe,tU)=>{\"use strict\";var spe=li(),ope=Object.prototype.hasOwnProperty,ape=Object.prototype.toString;function Ape(t){if(t===null)return!0;var e=[],r,i,n,s,o,a=t;for(r=0,i=a.length;r<i;r+=1){if(n=a[r],o=!1,ape.call(n)!==\"[object Object]\")return!1;for(s in n)if(ope.call(n,s))if(!o)o=!0;else return!1;if(!o)return!1;if(e.indexOf(s)===-1)e.push(s);else return!1}return!0}function lpe(t){return t!==null?t:[]}tU.exports=new spe(\"tag:yaml.org,2002:omap\",{kind:\"sequence\",resolve:Ape,construct:lpe})});var nU=w((zZe,iU)=>{\"use strict\";var cpe=li(),upe=Object.prototype.toString;function gpe(t){if(t===null)return!0;var e,r,i,n,s,o=t;for(s=new Array(o.length),e=0,r=o.length;e<r;e+=1){if(i=o[e],upe.call(i)!==\"[object Object]\"||(n=Object.keys(i),n.length!==1))return!1;s[e]=[n[0],i[n[0]]]}return!0}function fpe(t){if(t===null)return[];var e,r,i,n,s,o=t;for(s=new Array(o.length),e=0,r=o.length;e<r;e+=1)i=o[e],n=Object.keys(i),s[e]=[n[0],i[n[0]]];return s}iU.exports=new cpe(\"tag:yaml.org,2002:pairs\",{kind:\"sequence\",resolve:gpe,construct:fpe})});var oU=w((_Ze,sU)=>{\"use strict\";var hpe=li(),ppe=Object.prototype.hasOwnProperty;function dpe(t){if(t===null)return!0;var e,r=t;for(e in r)if(ppe.call(r,e)&&r[e]!==null)return!1;return!0}function Cpe(t){return t!==null?t:{}}sU.exports=new hpe(\"tag:yaml.org,2002:set\",{kind:\"mapping\",resolve:dpe,construct:Cpe})});var Zu=w((VZe,aU)=>{\"use strict\";var mpe=lc();aU.exports=new mpe({include:[YQ()],implicit:[_1(),X1()],explicit:[eU(),rU(),nU(),oU()]})});var lU=w((XZe,AU)=>{\"use strict\";var Epe=li();function Ipe(){return!0}function ype(){}function wpe(){return\"\"}function Bpe(t){return typeof t==\"undefined\"}AU.exports=new Epe(\"tag:yaml.org,2002:js/undefined\",{kind:\"scalar\",resolve:Ipe,construct:ype,predicate:Bpe,represent:wpe})});var uU=w((ZZe,cU)=>{\"use strict\";var bpe=li();function Qpe(t){if(t===null||t.length===0)return!1;var e=t,r=/\\/([gim]*)$/.exec(t),i=\"\";return!(e[0]===\"/\"&&(r&&(i=r[1]),i.length>3||e[e.length-i.length-1]!==\"/\"))}function vpe(t){var e=t,r=/\\/([gim]*)$/.exec(t),i=\"\";return e[0]===\"/\"&&(r&&(i=r[1]),e=e.slice(1,e.length-i.length-1)),new RegExp(e,i)}function Spe(t){var e=\"/\"+t.source+\"/\";return t.global&&(e+=\"g\"),t.multiline&&(e+=\"m\"),t.ignoreCase&&(e+=\"i\"),e}function kpe(t){return Object.prototype.toString.call(t)===\"[object RegExp]\"}cU.exports=new bpe(\"tag:yaml.org,2002:js/regexp\",{kind:\"scalar\",resolve:Qpe,construct:vpe,predicate:kpe,represent:Spe})});var hU=w(($Ze,gU)=>{\"use strict\";var oI;try{fU=require,oI=fU(\"esprima\")}catch(t){typeof window!=\"undefined\"&&(oI=window.esprima)}var fU,xpe=li();function Ppe(t){if(t===null)return!1;try{var e=\"(\"+t+\")\",r=oI.parse(e,{range:!0});return!(r.type!==\"Program\"||r.body.length!==1||r.body[0].type!==\"ExpressionStatement\"||r.body[0].expression.type!==\"ArrowFunctionExpression\"&&r.body[0].expression.type!==\"FunctionExpression\")}catch(i){return!1}}function Dpe(t){var e=\"(\"+t+\")\",r=oI.parse(e,{range:!0}),i=[],n;if(r.type!==\"Program\"||r.body.length!==1||r.body[0].type!==\"ExpressionStatement\"||r.body[0].expression.type!==\"ArrowFunctionExpression\"&&r.body[0].expression.type!==\"FunctionExpression\")throw new Error(\"Failed to resolve function\");return r.body[0].expression.params.forEach(function(s){i.push(s.name)}),n=r.body[0].expression.body.range,r.body[0].expression.body.type===\"BlockStatement\"?new Function(i,e.slice(n[0]+1,n[1]-1)):new Function(i,\"return \"+e.slice(n[0],n[1]))}function Rpe(t){return t.toString()}function Fpe(t){return Object.prototype.toString.call(t)===\"[object Function]\"}gU.exports=new xpe(\"tag:yaml.org,2002:js/function\",{kind:\"scalar\",resolve:Ppe,construct:Dpe,predicate:Fpe,represent:Rpe})});var tp=w((e$e,pU)=>{\"use strict\";var dU=lc();pU.exports=dU.DEFAULT=new dU({include:[Zu()],explicit:[lU(),uU(),hU()]})});var LU=w((t$e,rp)=>{\"use strict\";var Fa=Ac(),CU=Vu(),Npe=B1(),mU=Zu(),Lpe=tp(),HA=Object.prototype.hasOwnProperty,aI=1,EU=2,IU=3,AI=4,JQ=1,Tpe=2,yU=3,Ope=/[\\x00-\\x08\\x0B\\x0C\\x0E-\\x1F\\x7F-\\x84\\x86-\\x9F\\uFFFE\\uFFFF]|[\\uD800-\\uDBFF](?![\\uDC00-\\uDFFF])|(?:[^\\uD800-\\uDBFF]|^)[\\uDC00-\\uDFFF]/,Mpe=/[\\x85\\u2028\\u2029]/,Upe=/[,\\[\\]\\{\\}]/,wU=/^(?:!|!!|![a-z\\-]+!)$/i,BU=/^(?:!|[^,\\[\\]\\{\\}])(?:%[0-9a-f]{2}|[0-9a-z\\-#;\\/\\?:@&=\\+\\$,_\\.!~\\*'\\(\\)\\[\\]])*$/i;function bU(t){return Object.prototype.toString.call(t)}function Ro(t){return t===10||t===13}function uc(t){return t===9||t===32}function yn(t){return t===9||t===32||t===10||t===13}function $u(t){return t===44||t===91||t===93||t===123||t===125}function Kpe(t){var e;return 48<=t&&t<=57?t-48:(e=t|32,97<=e&&e<=102?e-97+10:-1)}function Hpe(t){return t===120?2:t===117?4:t===85?8:0}function jpe(t){return 48<=t&&t<=57?t-48:-1}function QU(t){return t===48?\"\\0\":t===97?\"\\x07\":t===98?\"\\b\":t===116||t===9?\"\t\":t===110?`\n`:t===118?\"\\v\":t===102?\"\\f\":t===114?\"\\r\":t===101?\"\u001b\":t===32?\" \":t===34?'\"':t===47?\"/\":t===92?\"\\\\\":t===78?\"\\x85\":t===95?\"\\xA0\":t===76?\"\\u2028\":t===80?\"\\u2029\":\"\"}function Gpe(t){return t<=65535?String.fromCharCode(t):String.fromCharCode((t-65536>>10)+55296,(t-65536&1023)+56320)}var vU=new Array(256),SU=new Array(256);for(var eg=0;eg<256;eg++)vU[eg]=QU(eg)?1:0,SU[eg]=QU(eg);function Ype(t,e){this.input=t,this.filename=e.filename||null,this.schema=e.schema||Lpe,this.onWarning=e.onWarning||null,this.legacy=e.legacy||!1,this.json=e.json||!1,this.listener=e.listener||null,this.implicitTypes=this.schema.compiledImplicit,this.typeMap=this.schema.compiledTypeMap,this.length=t.length,this.position=0,this.line=0,this.lineStart=0,this.lineIndent=0,this.documents=[]}function kU(t,e){return new CU(e,new Npe(t.filename,t.input,t.position,t.line,t.position-t.lineStart))}function dt(t,e){throw kU(t,e)}function lI(t,e){t.onWarning&&t.onWarning.call(null,kU(t,e))}var xU={YAML:function(e,r,i){var n,s,o;e.version!==null&&dt(e,\"duplication of %YAML directive\"),i.length!==1&&dt(e,\"YAML directive accepts exactly one argument\"),n=/^([0-9]+)\\.([0-9]+)$/.exec(i[0]),n===null&&dt(e,\"ill-formed argument of the YAML directive\"),s=parseInt(n[1],10),o=parseInt(n[2],10),s!==1&&dt(e,\"unacceptable YAML version of the document\"),e.version=i[0],e.checkLineBreaks=o<2,o!==1&&o!==2&&lI(e,\"unsupported YAML version of the document\")},TAG:function(e,r,i){var n,s;i.length!==2&&dt(e,\"TAG directive accepts exactly two arguments\"),n=i[0],s=i[1],wU.test(n)||dt(e,\"ill-formed tag handle (first argument) of the TAG directive\"),HA.call(e.tagMap,n)&&dt(e,'there is a previously declared suffix for \"'+n+'\" tag handle'),BU.test(s)||dt(e,\"ill-formed tag prefix (second argument) of the TAG directive\"),e.tagMap[n]=s}};function jA(t,e,r,i){var n,s,o,a;if(e<r){if(a=t.input.slice(e,r),i)for(n=0,s=a.length;n<s;n+=1)o=a.charCodeAt(n),o===9||32<=o&&o<=1114111||dt(t,\"expected valid JSON character\");else Ope.test(a)&&dt(t,\"the stream contains non-printable characters\");t.result+=a}}function PU(t,e,r,i){var n,s,o,a;for(Fa.isObject(r)||dt(t,\"cannot merge mappings; the provided source object is unacceptable\"),n=Object.keys(r),o=0,a=n.length;o<a;o+=1)s=n[o],HA.call(e,s)||(e[s]=r[s],i[s]=!0)}function tg(t,e,r,i,n,s,o,a){var l,c;if(Array.isArray(n))for(n=Array.prototype.slice.call(n),l=0,c=n.length;l<c;l+=1)Array.isArray(n[l])&&dt(t,\"nested arrays are not supported inside keys\"),typeof n==\"object\"&&bU(n[l])===\"[object Object]\"&&(n[l]=\"[object Object]\");if(typeof n==\"object\"&&bU(n)===\"[object Object]\"&&(n=\"[object Object]\"),n=String(n),e===null&&(e={}),i===\"tag:yaml.org,2002:merge\")if(Array.isArray(s))for(l=0,c=s.length;l<c;l+=1)PU(t,e,s[l],r);else PU(t,e,s,r);else!t.json&&!HA.call(r,n)&&HA.call(e,n)&&(t.line=o||t.line,t.position=a||t.position,dt(t,\"duplicated mapping key\")),e[n]=s,delete r[n];return e}function WQ(t){var e;e=t.input.charCodeAt(t.position),e===10?t.position++:e===13?(t.position++,t.input.charCodeAt(t.position)===10&&t.position++):dt(t,\"a line break is expected\"),t.line+=1,t.lineStart=t.position}function $r(t,e,r){for(var i=0,n=t.input.charCodeAt(t.position);n!==0;){for(;uc(n);)n=t.input.charCodeAt(++t.position);if(e&&n===35)do n=t.input.charCodeAt(++t.position);while(n!==10&&n!==13&&n!==0);if(Ro(n))for(WQ(t),n=t.input.charCodeAt(t.position),i++,t.lineIndent=0;n===32;)t.lineIndent++,n=t.input.charCodeAt(++t.position);else break}return r!==-1&&i!==0&&t.lineIndent<r&&lI(t,\"deficient indentation\"),i}function cI(t){var e=t.position,r;return r=t.input.charCodeAt(e),!!((r===45||r===46)&&r===t.input.charCodeAt(e+1)&&r===t.input.charCodeAt(e+2)&&(e+=3,r=t.input.charCodeAt(e),r===0||yn(r)))}function zQ(t,e){e===1?t.result+=\" \":e>1&&(t.result+=Fa.repeat(`\n`,e-1))}function qpe(t,e,r){var i,n,s,o,a,l,c,u,g=t.kind,f=t.result,h;if(h=t.input.charCodeAt(t.position),yn(h)||$u(h)||h===35||h===38||h===42||h===33||h===124||h===62||h===39||h===34||h===37||h===64||h===96||(h===63||h===45)&&(n=t.input.charCodeAt(t.position+1),yn(n)||r&&$u(n)))return!1;for(t.kind=\"scalar\",t.result=\"\",s=o=t.position,a=!1;h!==0;){if(h===58){if(n=t.input.charCodeAt(t.position+1),yn(n)||r&&$u(n))break}else if(h===35){if(i=t.input.charCodeAt(t.position-1),yn(i))break}else{if(t.position===t.lineStart&&cI(t)||r&&$u(h))break;if(Ro(h))if(l=t.line,c=t.lineStart,u=t.lineIndent,$r(t,!1,-1),t.lineIndent>=e){a=!0,h=t.input.charCodeAt(t.position);continue}else{t.position=o,t.line=l,t.lineStart=c,t.lineIndent=u;break}}a&&(jA(t,s,o,!1),zQ(t,t.line-l),s=o=t.position,a=!1),uc(h)||(o=t.position+1),h=t.input.charCodeAt(++t.position)}return jA(t,s,o,!1),t.result?!0:(t.kind=g,t.result=f,!1)}function Jpe(t,e){var r,i,n;if(r=t.input.charCodeAt(t.position),r!==39)return!1;for(t.kind=\"scalar\",t.result=\"\",t.position++,i=n=t.position;(r=t.input.charCodeAt(t.position))!==0;)if(r===39)if(jA(t,i,t.position,!0),r=t.input.charCodeAt(++t.position),r===39)i=t.position,t.position++,n=t.position;else return!0;else Ro(r)?(jA(t,i,n,!0),zQ(t,$r(t,!1,e)),i=n=t.position):t.position===t.lineStart&&cI(t)?dt(t,\"unexpected end of the document within a single quoted scalar\"):(t.position++,n=t.position);dt(t,\"unexpected end of the stream within a single quoted scalar\")}function Wpe(t,e){var r,i,n,s,o,a;if(a=t.input.charCodeAt(t.position),a!==34)return!1;for(t.kind=\"scalar\",t.result=\"\",t.position++,r=i=t.position;(a=t.input.charCodeAt(t.position))!==0;){if(a===34)return jA(t,r,t.position,!0),t.position++,!0;if(a===92){if(jA(t,r,t.position,!0),a=t.input.charCodeAt(++t.position),Ro(a))$r(t,!1,e);else if(a<256&&vU[a])t.result+=SU[a],t.position++;else if((o=Hpe(a))>0){for(n=o,s=0;n>0;n--)a=t.input.charCodeAt(++t.position),(o=Kpe(a))>=0?s=(s<<4)+o:dt(t,\"expected hexadecimal character\");t.result+=Gpe(s),t.position++}else dt(t,\"unknown escape sequence\");r=i=t.position}else Ro(a)?(jA(t,r,i,!0),zQ(t,$r(t,!1,e)),r=i=t.position):t.position===t.lineStart&&cI(t)?dt(t,\"unexpected end of the document within a double quoted scalar\"):(t.position++,i=t.position)}dt(t,\"unexpected end of the stream within a double quoted scalar\")}function zpe(t,e){var r=!0,i,n=t.tag,s,o=t.anchor,a,l,c,u,g,f={},h,p,m,y;if(y=t.input.charCodeAt(t.position),y===91)l=93,g=!1,s=[];else if(y===123)l=125,g=!0,s={};else return!1;for(t.anchor!==null&&(t.anchorMap[t.anchor]=s),y=t.input.charCodeAt(++t.position);y!==0;){if($r(t,!0,e),y=t.input.charCodeAt(t.position),y===l)return t.position++,t.tag=n,t.anchor=o,t.kind=g?\"mapping\":\"sequence\",t.result=s,!0;r||dt(t,\"missed comma between flow collection entries\"),p=h=m=null,c=u=!1,y===63&&(a=t.input.charCodeAt(t.position+1),yn(a)&&(c=u=!0,t.position++,$r(t,!0,e))),i=t.line,rg(t,e,aI,!1,!0),p=t.tag,h=t.result,$r(t,!0,e),y=t.input.charCodeAt(t.position),(u||t.line===i)&&y===58&&(c=!0,y=t.input.charCodeAt(++t.position),$r(t,!0,e),rg(t,e,aI,!1,!0),m=t.result),g?tg(t,s,f,p,h,m):c?s.push(tg(t,null,f,p,h,m)):s.push(h),$r(t,!0,e),y=t.input.charCodeAt(t.position),y===44?(r=!0,y=t.input.charCodeAt(++t.position)):r=!1}dt(t,\"unexpected end of the stream within a flow collection\")}function _pe(t,e){var r,i,n=JQ,s=!1,o=!1,a=e,l=0,c=!1,u,g;if(g=t.input.charCodeAt(t.position),g===124)i=!1;else if(g===62)i=!0;else return!1;for(t.kind=\"scalar\",t.result=\"\";g!==0;)if(g=t.input.charCodeAt(++t.position),g===43||g===45)JQ===n?n=g===43?yU:Tpe:dt(t,\"repeat of a chomping mode identifier\");else if((u=jpe(g))>=0)u===0?dt(t,\"bad explicit indentation width of a block scalar; it cannot be less than one\"):o?dt(t,\"repeat of an indentation width identifier\"):(a=e+u-1,o=!0);else break;if(uc(g)){do g=t.input.charCodeAt(++t.position);while(uc(g));if(g===35)do g=t.input.charCodeAt(++t.position);while(!Ro(g)&&g!==0)}for(;g!==0;){for(WQ(t),t.lineIndent=0,g=t.input.charCodeAt(t.position);(!o||t.lineIndent<a)&&g===32;)t.lineIndent++,g=t.input.charCodeAt(++t.position);if(!o&&t.lineIndent>a&&(a=t.lineIndent),Ro(g)){l++;continue}if(t.lineIndent<a){n===yU?t.result+=Fa.repeat(`\n`,s?1+l:l):n===JQ&&s&&(t.result+=`\n`);break}for(i?uc(g)?(c=!0,t.result+=Fa.repeat(`\n`,s?1+l:l)):c?(c=!1,t.result+=Fa.repeat(`\n`,l+1)):l===0?s&&(t.result+=\" \"):t.result+=Fa.repeat(`\n`,l):t.result+=Fa.repeat(`\n`,s?1+l:l),s=!0,o=!0,l=0,r=t.position;!Ro(g)&&g!==0;)g=t.input.charCodeAt(++t.position);jA(t,r,t.position,!1)}return!0}function DU(t,e){var r,i=t.tag,n=t.anchor,s=[],o,a=!1,l;for(t.anchor!==null&&(t.anchorMap[t.anchor]=s),l=t.input.charCodeAt(t.position);l!==0&&!(l!==45||(o=t.input.charCodeAt(t.position+1),!yn(o)));){if(a=!0,t.position++,$r(t,!0,-1)&&t.lineIndent<=e){s.push(null),l=t.input.charCodeAt(t.position);continue}if(r=t.line,rg(t,e,IU,!1,!0),s.push(t.result),$r(t,!0,-1),l=t.input.charCodeAt(t.position),(t.line===r||t.lineIndent>e)&&l!==0)dt(t,\"bad indentation of a sequence entry\");else if(t.lineIndent<e)break}return a?(t.tag=i,t.anchor=n,t.kind=\"sequence\",t.result=s,!0):!1}function Vpe(t,e,r){var i,n,s,o,a=t.tag,l=t.anchor,c={},u={},g=null,f=null,h=null,p=!1,m=!1,y;for(t.anchor!==null&&(t.anchorMap[t.anchor]=c),y=t.input.charCodeAt(t.position);y!==0;){if(i=t.input.charCodeAt(t.position+1),s=t.line,o=t.position,(y===63||y===58)&&yn(i))y===63?(p&&(tg(t,c,u,g,f,null),g=f=h=null),m=!0,p=!0,n=!0):p?(p=!1,n=!0):dt(t,\"incomplete explicit mapping pair; a key node is missed; or followed by a non-tabulated empty line\"),t.position+=1,y=i;else if(rg(t,r,EU,!1,!0))if(t.line===s){for(y=t.input.charCodeAt(t.position);uc(y);)y=t.input.charCodeAt(++t.position);if(y===58)y=t.input.charCodeAt(++t.position),yn(y)||dt(t,\"a whitespace character is expected after the key-value separator within a block mapping\"),p&&(tg(t,c,u,g,f,null),g=f=h=null),m=!0,p=!1,n=!1,g=t.tag,f=t.result;else if(m)dt(t,\"can not read an implicit mapping pair; a colon is missed\");else return t.tag=a,t.anchor=l,!0}else if(m)dt(t,\"can not read a block mapping entry; a multiline key may not be an implicit key\");else return t.tag=a,t.anchor=l,!0;else break;if((t.line===s||t.lineIndent>e)&&(rg(t,e,AI,!0,n)&&(p?f=t.result:h=t.result),p||(tg(t,c,u,g,f,h,s,o),g=f=h=null),$r(t,!0,-1),y=t.input.charCodeAt(t.position)),t.lineIndent>e&&y!==0)dt(t,\"bad indentation of a mapping entry\");else if(t.lineIndent<e)break}return p&&tg(t,c,u,g,f,null),m&&(t.tag=a,t.anchor=l,t.kind=\"mapping\",t.result=c),m}function Xpe(t){var e,r=!1,i=!1,n,s,o;if(o=t.input.charCodeAt(t.position),o!==33)return!1;if(t.tag!==null&&dt(t,\"duplication of a tag property\"),o=t.input.charCodeAt(++t.position),o===60?(r=!0,o=t.input.charCodeAt(++t.position)):o===33?(i=!0,n=\"!!\",o=t.input.charCodeAt(++t.position)):n=\"!\",e=t.position,r){do o=t.input.charCodeAt(++t.position);while(o!==0&&o!==62);t.position<t.length?(s=t.input.slice(e,t.position),o=t.input.charCodeAt(++t.position)):dt(t,\"unexpected end of the stream within a verbatim tag\")}else{for(;o!==0&&!yn(o);)o===33&&(i?dt(t,\"tag suffix cannot contain exclamation marks\"):(n=t.input.slice(e-1,t.position+1),wU.test(n)||dt(t,\"named tag handle cannot contain such characters\"),i=!0,e=t.position+1)),o=t.input.charCodeAt(++t.position);s=t.input.slice(e,t.position),Upe.test(s)&&dt(t,\"tag suffix cannot contain flow indicator characters\")}return s&&!BU.test(s)&&dt(t,\"tag name cannot contain such characters: \"+s),r?t.tag=s:HA.call(t.tagMap,n)?t.tag=t.tagMap[n]+s:n===\"!\"?t.tag=\"!\"+s:n===\"!!\"?t.tag=\"tag:yaml.org,2002:\"+s:dt(t,'undeclared tag handle \"'+n+'\"'),!0}function Zpe(t){var e,r;if(r=t.input.charCodeAt(t.position),r!==38)return!1;for(t.anchor!==null&&dt(t,\"duplication of an anchor property\"),r=t.input.charCodeAt(++t.position),e=t.position;r!==0&&!yn(r)&&!$u(r);)r=t.input.charCodeAt(++t.position);return t.position===e&&dt(t,\"name of an anchor node must contain at least one character\"),t.anchor=t.input.slice(e,t.position),!0}function $pe(t){var e,r,i;if(i=t.input.charCodeAt(t.position),i!==42)return!1;for(i=t.input.charCodeAt(++t.position),e=t.position;i!==0&&!yn(i)&&!$u(i);)i=t.input.charCodeAt(++t.position);return t.position===e&&dt(t,\"name of an alias node must contain at least one character\"),r=t.input.slice(e,t.position),HA.call(t.anchorMap,r)||dt(t,'unidentified alias \"'+r+'\"'),t.result=t.anchorMap[r],$r(t,!0,-1),!0}function rg(t,e,r,i,n){var s,o,a,l=1,c=!1,u=!1,g,f,h,p,m;if(t.listener!==null&&t.listener(\"open\",t),t.tag=null,t.anchor=null,t.kind=null,t.result=null,s=o=a=AI===r||IU===r,i&&$r(t,!0,-1)&&(c=!0,t.lineIndent>e?l=1:t.lineIndent===e?l=0:t.lineIndent<e&&(l=-1)),l===1)for(;Xpe(t)||Zpe(t);)$r(t,!0,-1)?(c=!0,a=s,t.lineIndent>e?l=1:t.lineIndent===e?l=0:t.lineIndent<e&&(l=-1)):a=!1;if(a&&(a=c||n),(l===1||AI===r)&&(aI===r||EU===r?p=e:p=e+1,m=t.position-t.lineStart,l===1?a&&(DU(t,m)||Vpe(t,m,p))||zpe(t,p)?u=!0:(o&&_pe(t,p)||Jpe(t,p)||Wpe(t,p)?u=!0:$pe(t)?(u=!0,(t.tag!==null||t.anchor!==null)&&dt(t,\"alias node should not have any properties\")):qpe(t,p,aI===r)&&(u=!0,t.tag===null&&(t.tag=\"?\")),t.anchor!==null&&(t.anchorMap[t.anchor]=t.result)):l===0&&(u=a&&DU(t,m))),t.tag!==null&&t.tag!==\"!\")if(t.tag===\"?\"){for(t.result!==null&&t.kind!==\"scalar\"&&dt(t,'unacceptable node kind for !<?> tag; it should be \"scalar\", not \"'+t.kind+'\"'),g=0,f=t.implicitTypes.length;g<f;g+=1)if(h=t.implicitTypes[g],h.resolve(t.result)){t.result=h.construct(t.result),t.tag=h.tag,t.anchor!==null&&(t.anchorMap[t.anchor]=t.result);break}}else HA.call(t.typeMap[t.kind||\"fallback\"],t.tag)?(h=t.typeMap[t.kind||\"fallback\"][t.tag],t.result!==null&&h.kind!==t.kind&&dt(t,\"unacceptable node kind for !<\"+t.tag+'> tag; it should be \"'+h.kind+'\", not \"'+t.kind+'\"'),h.resolve(t.result)?(t.result=h.construct(t.result),t.anchor!==null&&(t.anchorMap[t.anchor]=t.result)):dt(t,\"cannot resolve a node with !<\"+t.tag+\"> explicit tag\")):dt(t,\"unknown tag !<\"+t.tag+\">\");return t.listener!==null&&t.listener(\"close\",t),t.tag!==null||t.anchor!==null||u}function ede(t){var e=t.position,r,i,n,s=!1,o;for(t.version=null,t.checkLineBreaks=t.legacy,t.tagMap={},t.anchorMap={};(o=t.input.charCodeAt(t.position))!==0&&($r(t,!0,-1),o=t.input.charCodeAt(t.position),!(t.lineIndent>0||o!==37));){for(s=!0,o=t.input.charCodeAt(++t.position),r=t.position;o!==0&&!yn(o);)o=t.input.charCodeAt(++t.position);for(i=t.input.slice(r,t.position),n=[],i.length<1&&dt(t,\"directive name must not be less than one character in length\");o!==0;){for(;uc(o);)o=t.input.charCodeAt(++t.position);if(o===35){do o=t.input.charCodeAt(++t.position);while(o!==0&&!Ro(o));break}if(Ro(o))break;for(r=t.position;o!==0&&!yn(o);)o=t.input.charCodeAt(++t.position);n.push(t.input.slice(r,t.position))}o!==0&&WQ(t),HA.call(xU,i)?xU[i](t,i,n):lI(t,'unknown document directive \"'+i+'\"')}if($r(t,!0,-1),t.lineIndent===0&&t.input.charCodeAt(t.position)===45&&t.input.charCodeAt(t.position+1)===45&&t.input.charCodeAt(t.position+2)===45?(t.position+=3,$r(t,!0,-1)):s&&dt(t,\"directives end mark is expected\"),rg(t,t.lineIndent-1,AI,!1,!0),$r(t,!0,-1),t.checkLineBreaks&&Mpe.test(t.input.slice(e,t.position))&&lI(t,\"non-ASCII line breaks are interpreted as content\"),t.documents.push(t.result),t.position===t.lineStart&&cI(t)){t.input.charCodeAt(t.position)===46&&(t.position+=3,$r(t,!0,-1));return}if(t.position<t.length-1)dt(t,\"end of the stream or a document separator is expected\");else return}function RU(t,e){t=String(t),e=e||{},t.length!==0&&(t.charCodeAt(t.length-1)!==10&&t.charCodeAt(t.length-1)!==13&&(t+=`\n`),t.charCodeAt(0)===65279&&(t=t.slice(1)));var r=new Ype(t,e),i=t.indexOf(\"\\0\");for(i!==-1&&(r.position=i,dt(r,\"null byte is not allowed in input\")),r.input+=\"\\0\";r.input.charCodeAt(r.position)===32;)r.lineIndent+=1,r.position+=1;for(;r.position<r.length-1;)ede(r);return r.documents}function FU(t,e,r){e!==null&&typeof e==\"object\"&&typeof r==\"undefined\"&&(r=e,e=null);var i=RU(t,r);if(typeof e!=\"function\")return i;for(var n=0,s=i.length;n<s;n+=1)e(i[n])}function NU(t,e){var r=RU(t,e);if(r.length!==0){if(r.length===1)return r[0];throw new CU(\"expected a single document in the stream, but found more\")}}function tde(t,e,r){return typeof e==\"object\"&&e!==null&&typeof r==\"undefined\"&&(r=e,e=null),FU(t,e,Fa.extend({schema:mU},r))}function rde(t,e){return NU(t,Fa.extend({schema:mU},e))}rp.exports.loadAll=FU;rp.exports.load=NU;rp.exports.safeLoadAll=tde;rp.exports.safeLoad=rde});var nK=w((r$e,_Q)=>{\"use strict\";var ip=Ac(),np=Vu(),ide=tp(),nde=Zu(),TU=Object.prototype.toString,OU=Object.prototype.hasOwnProperty,sde=9,sp=10,ode=13,ade=32,Ade=33,lde=34,MU=35,cde=37,ude=38,gde=39,fde=42,UU=44,hde=45,KU=58,pde=61,dde=62,Cde=63,mde=64,HU=91,jU=93,Ede=96,GU=123,Ide=124,YU=125,Ui={};Ui[0]=\"\\\\0\";Ui[7]=\"\\\\a\";Ui[8]=\"\\\\b\";Ui[9]=\"\\\\t\";Ui[10]=\"\\\\n\";Ui[11]=\"\\\\v\";Ui[12]=\"\\\\f\";Ui[13]=\"\\\\r\";Ui[27]=\"\\\\e\";Ui[34]='\\\\\"';Ui[92]=\"\\\\\\\\\";Ui[133]=\"\\\\N\";Ui[160]=\"\\\\_\";Ui[8232]=\"\\\\L\";Ui[8233]=\"\\\\P\";var yde=[\"y\",\"Y\",\"yes\",\"Yes\",\"YES\",\"on\",\"On\",\"ON\",\"n\",\"N\",\"no\",\"No\",\"NO\",\"off\",\"Off\",\"OFF\"];function wde(t,e){var r,i,n,s,o,a,l;if(e===null)return{};for(r={},i=Object.keys(e),n=0,s=i.length;n<s;n+=1)o=i[n],a=String(e[o]),o.slice(0,2)===\"!!\"&&(o=\"tag:yaml.org,2002:\"+o.slice(2)),l=t.compiledTypeMap.fallback[o],l&&OU.call(l.styleAliases,a)&&(a=l.styleAliases[a]),r[o]=a;return r}function qU(t){var e,r,i;if(e=t.toString(16).toUpperCase(),t<=255)r=\"x\",i=2;else if(t<=65535)r=\"u\",i=4;else if(t<=4294967295)r=\"U\",i=8;else throw new np(\"code point within a string may not be greater than 0xFFFFFFFF\");return\"\\\\\"+r+ip.repeat(\"0\",i-e.length)+e}function Bde(t){this.schema=t.schema||ide,this.indent=Math.max(1,t.indent||2),this.noArrayIndent=t.noArrayIndent||!1,this.skipInvalid=t.skipInvalid||!1,this.flowLevel=ip.isNothing(t.flowLevel)?-1:t.flowLevel,this.styleMap=wde(this.schema,t.styles||null),this.sortKeys=t.sortKeys||!1,this.lineWidth=t.lineWidth||80,this.noRefs=t.noRefs||!1,this.noCompatMode=t.noCompatMode||!1,this.condenseFlow=t.condenseFlow||!1,this.implicitTypes=this.schema.compiledImplicit,this.explicitTypes=this.schema.compiledExplicit,this.tag=null,this.result=\"\",this.duplicates=[],this.usedDuplicates=null}function JU(t,e){for(var r=ip.repeat(\" \",e),i=0,n=-1,s=\"\",o,a=t.length;i<a;)n=t.indexOf(`\n`,i),n===-1?(o=t.slice(i),i=a):(o=t.slice(i,n+1),i=n+1),o.length&&o!==`\n`&&(s+=r),s+=o;return s}function VQ(t,e){return`\n`+ip.repeat(\" \",t.indent*e)}function bde(t,e){var r,i,n;for(r=0,i=t.implicitTypes.length;r<i;r+=1)if(n=t.implicitTypes[r],n.resolve(e))return!0;return!1}function XQ(t){return t===ade||t===sde}function ig(t){return 32<=t&&t<=126||161<=t&&t<=55295&&t!==8232&&t!==8233||57344<=t&&t<=65533&&t!==65279||65536<=t&&t<=1114111}function Qde(t){return ig(t)&&!XQ(t)&&t!==65279&&t!==ode&&t!==sp}function WU(t,e){return ig(t)&&t!==65279&&t!==UU&&t!==HU&&t!==jU&&t!==GU&&t!==YU&&t!==KU&&(t!==MU||e&&Qde(e))}function vde(t){return ig(t)&&t!==65279&&!XQ(t)&&t!==hde&&t!==Cde&&t!==KU&&t!==UU&&t!==HU&&t!==jU&&t!==GU&&t!==YU&&t!==MU&&t!==ude&&t!==fde&&t!==Ade&&t!==Ide&&t!==pde&&t!==dde&&t!==gde&&t!==lde&&t!==cde&&t!==mde&&t!==Ede}function zU(t){var e=/^\\n* /;return e.test(t)}var _U=1,VU=2,XU=3,ZU=4,uI=5;function Sde(t,e,r,i,n){var s,o,a,l=!1,c=!1,u=i!==-1,g=-1,f=vde(t.charCodeAt(0))&&!XQ(t.charCodeAt(t.length-1));if(e)for(s=0;s<t.length;s++){if(o=t.charCodeAt(s),!ig(o))return uI;a=s>0?t.charCodeAt(s-1):null,f=f&&WU(o,a)}else{for(s=0;s<t.length;s++){if(o=t.charCodeAt(s),o===sp)l=!0,u&&(c=c||s-g-1>i&&t[g+1]!==\" \",g=s);else if(!ig(o))return uI;a=s>0?t.charCodeAt(s-1):null,f=f&&WU(o,a)}c=c||u&&s-g-1>i&&t[g+1]!==\" \"}return!l&&!c?f&&!n(t)?_U:VU:r>9&&zU(t)?uI:c?ZU:XU}function Pde(t,e,r,i){t.dump=function(){if(e.length===0)return\"''\";if(!t.noCompatMode&&yde.indexOf(e)!==-1)return\"'\"+e+\"'\";var n=t.indent*Math.max(1,r),s=t.lineWidth===-1?-1:Math.max(Math.min(t.lineWidth,40),t.lineWidth-n),o=i||t.flowLevel>-1&&r>=t.flowLevel;function a(l){return bde(t,l)}switch(Sde(e,o,t.indent,s,a)){case _U:return e;case VU:return\"'\"+e.replace(/'/g,\"''\")+\"'\";case XU:return\"|\"+$U(e,t.indent)+eK(JU(e,n));case ZU:return\">\"+$U(e,t.indent)+eK(JU(kde(e,s),n));case uI:return'\"'+xde(e,s)+'\"';default:throw new np(\"impossible error: invalid scalar style\")}}()}function $U(t,e){var r=zU(t)?String(e):\"\",i=t[t.length-1]===`\n`,n=i&&(t[t.length-2]===`\n`||t===`\n`),s=n?\"+\":i?\"\":\"-\";return r+s+`\n`}function eK(t){return t[t.length-1]===`\n`?t.slice(0,-1):t}function kde(t,e){for(var r=/(\\n+)([^\\n]*)/g,i=function(){var c=t.indexOf(`\n`);return c=c!==-1?c:t.length,r.lastIndex=c,tK(t.slice(0,c),e)}(),n=t[0]===`\n`||t[0]===\" \",s,o;o=r.exec(t);){var a=o[1],l=o[2];s=l[0]===\" \",i+=a+(!n&&!s&&l!==\"\"?`\n`:\"\")+tK(l,e),n=s}return i}function tK(t,e){if(t===\"\"||t[0]===\" \")return t;for(var r=/ [^ ]/g,i,n=0,s,o=0,a=0,l=\"\";i=r.exec(t);)a=i.index,a-n>e&&(s=o>n?o:a,l+=`\n`+t.slice(n,s),n=s+1),o=a;return l+=`\n`,t.length-n>e&&o>n?l+=t.slice(n,o)+`\n`+t.slice(o+1):l+=t.slice(n),l.slice(1)}function xde(t){for(var e=\"\",r,i,n,s=0;s<t.length;s++){if(r=t.charCodeAt(s),r>=55296&&r<=56319&&(i=t.charCodeAt(s+1),i>=56320&&i<=57343)){e+=qU((r-55296)*1024+i-56320+65536),s++;continue}n=Ui[r],e+=!n&&ig(r)?t[s]:n||qU(r)}return e}function Dde(t,e,r){var i=\"\",n=t.tag,s,o;for(s=0,o=r.length;s<o;s+=1)gc(t,e,r[s],!1,!1)&&(s!==0&&(i+=\",\"+(t.condenseFlow?\"\":\" \")),i+=t.dump);t.tag=n,t.dump=\"[\"+i+\"]\"}function Rde(t,e,r,i){var n=\"\",s=t.tag,o,a;for(o=0,a=r.length;o<a;o+=1)gc(t,e+1,r[o],!0,!0)&&((!i||o!==0)&&(n+=VQ(t,e)),t.dump&&sp===t.dump.charCodeAt(0)?n+=\"-\":n+=\"- \",n+=t.dump);t.tag=s,t.dump=n||\"[]\"}function Fde(t,e,r){var i=\"\",n=t.tag,s=Object.keys(r),o,a,l,c,u;for(o=0,a=s.length;o<a;o+=1)u=\"\",o!==0&&(u+=\", \"),t.condenseFlow&&(u+='\"'),l=s[o],c=r[l],!!gc(t,e,l,!1,!1)&&(t.dump.length>1024&&(u+=\"? \"),u+=t.dump+(t.condenseFlow?'\"':\"\")+\":\"+(t.condenseFlow?\"\":\" \"),!!gc(t,e,c,!1,!1)&&(u+=t.dump,i+=u));t.tag=n,t.dump=\"{\"+i+\"}\"}function Nde(t,e,r,i){var n=\"\",s=t.tag,o=Object.keys(r),a,l,c,u,g,f;if(t.sortKeys===!0)o.sort();else if(typeof t.sortKeys==\"function\")o.sort(t.sortKeys);else if(t.sortKeys)throw new np(\"sortKeys must be a boolean or a function\");for(a=0,l=o.length;a<l;a+=1)f=\"\",(!i||a!==0)&&(f+=VQ(t,e)),c=o[a],u=r[c],!!gc(t,e+1,c,!0,!0,!0)&&(g=t.tag!==null&&t.tag!==\"?\"||t.dump&&t.dump.length>1024,g&&(t.dump&&sp===t.dump.charCodeAt(0)?f+=\"?\":f+=\"? \"),f+=t.dump,g&&(f+=VQ(t,e)),!!gc(t,e+1,u,!0,g)&&(t.dump&&sp===t.dump.charCodeAt(0)?f+=\":\":f+=\": \",f+=t.dump,n+=f));t.tag=s,t.dump=n||\"{}\"}function rK(t,e,r){var i,n,s,o,a,l;for(n=r?t.explicitTypes:t.implicitTypes,s=0,o=n.length;s<o;s+=1)if(a=n[s],(a.instanceOf||a.predicate)&&(!a.instanceOf||typeof e==\"object\"&&e instanceof a.instanceOf)&&(!a.predicate||a.predicate(e))){if(t.tag=r?a.tag:\"?\",a.represent){if(l=t.styleMap[a.tag]||a.defaultStyle,TU.call(a.represent)===\"[object Function]\")i=a.represent(e,l);else if(OU.call(a.represent,l))i=a.represent[l](e,l);else throw new np(\"!<\"+a.tag+'> tag resolver accepts not \"'+l+'\" style');t.dump=i}return!0}return!1}function gc(t,e,r,i,n,s){t.tag=null,t.dump=r,rK(t,r,!1)||rK(t,r,!0);var o=TU.call(t.dump);i&&(i=t.flowLevel<0||t.flowLevel>e);var a=o===\"[object Object]\"||o===\"[object Array]\",l,c;if(a&&(l=t.duplicates.indexOf(r),c=l!==-1),(t.tag!==null&&t.tag!==\"?\"||c||t.indent!==2&&e>0)&&(n=!1),c&&t.usedDuplicates[l])t.dump=\"*ref_\"+l;else{if(a&&c&&!t.usedDuplicates[l]&&(t.usedDuplicates[l]=!0),o===\"[object Object]\")i&&Object.keys(t.dump).length!==0?(Nde(t,e,t.dump,n),c&&(t.dump=\"&ref_\"+l+t.dump)):(Fde(t,e,t.dump),c&&(t.dump=\"&ref_\"+l+\" \"+t.dump));else if(o===\"[object Array]\"){var u=t.noArrayIndent&&e>0?e-1:e;i&&t.dump.length!==0?(Rde(t,u,t.dump,n),c&&(t.dump=\"&ref_\"+l+t.dump)):(Dde(t,u,t.dump),c&&(t.dump=\"&ref_\"+l+\" \"+t.dump))}else if(o===\"[object String]\")t.tag!==\"?\"&&Pde(t,t.dump,e,s);else{if(t.skipInvalid)return!1;throw new np(\"unacceptable kind of an object to dump \"+o)}t.tag!==null&&t.tag!==\"?\"&&(t.dump=\"!<\"+t.tag+\"> \"+t.dump)}return!0}function Lde(t,e){var r=[],i=[],n,s;for(ZQ(t,r,i),n=0,s=i.length;n<s;n+=1)e.duplicates.push(r[i[n]]);e.usedDuplicates=new Array(s)}function ZQ(t,e,r){var i,n,s;if(t!==null&&typeof t==\"object\")if(n=e.indexOf(t),n!==-1)r.indexOf(n)===-1&&r.push(n);else if(e.push(t),Array.isArray(t))for(n=0,s=t.length;n<s;n+=1)ZQ(t[n],e,r);else for(i=Object.keys(t),n=0,s=i.length;n<s;n+=1)ZQ(t[i[n]],e,r)}function iK(t,e){e=e||{};var r=new Bde(e);return r.noRefs||Lde(t,r),gc(r,0,t,!0,!0)?r.dump+`\n`:\"\"}function Tde(t,e){return iK(t,ip.extend({schema:nde},e))}_Q.exports.dump=iK;_Q.exports.safeDump=Tde});var oK=w((i$e,Or)=>{\"use strict\";var gI=LU(),sK=nK();function fI(t){return function(){throw new Error(\"Function \"+t+\" is deprecated and cannot be used.\")}}Or.exports.Type=li();Or.exports.Schema=lc();Or.exports.FAILSAFE_SCHEMA=sI();Or.exports.JSON_SCHEMA=GQ();Or.exports.CORE_SCHEMA=YQ();Or.exports.DEFAULT_SAFE_SCHEMA=Zu();Or.exports.DEFAULT_FULL_SCHEMA=tp();Or.exports.load=gI.load;Or.exports.loadAll=gI.loadAll;Or.exports.safeLoad=gI.safeLoad;Or.exports.safeLoadAll=gI.safeLoadAll;Or.exports.dump=sK.dump;Or.exports.safeDump=sK.safeDump;Or.exports.YAMLException=Vu();Or.exports.MINIMAL_SCHEMA=sI();Or.exports.SAFE_SCHEMA=Zu();Or.exports.DEFAULT_SCHEMA=tp();Or.exports.scan=fI(\"scan\");Or.exports.parse=fI(\"parse\");Or.exports.compose=fI(\"compose\");Or.exports.addConstructor=fI(\"addConstructor\")});var AK=w((n$e,aK)=>{\"use strict\";var Ode=oK();aK.exports=Ode});var cK=w((s$e,lK)=>{\"use strict\";function Mde(t,e){function r(){this.constructor=t}r.prototype=e.prototype,t.prototype=new r}function fc(t,e,r,i){this.message=t,this.expected=e,this.found=r,this.location=i,this.name=\"SyntaxError\",typeof Error.captureStackTrace==\"function\"&&Error.captureStackTrace(this,fc)}Mde(fc,Error);fc.buildMessage=function(t,e){var r={literal:function(c){return'\"'+n(c.text)+'\"'},class:function(c){var u=\"\",g;for(g=0;g<c.parts.length;g++)u+=c.parts[g]instanceof Array?s(c.parts[g][0])+\"-\"+s(c.parts[g][1]):s(c.parts[g]);return\"[\"+(c.inverted?\"^\":\"\")+u+\"]\"},any:function(c){return\"any character\"},end:function(c){return\"end of input\"},other:function(c){return c.description}};function i(c){return c.charCodeAt(0).toString(16).toUpperCase()}function n(c){return c.replace(/\\\\/g,\"\\\\\\\\\").replace(/\"/g,'\\\\\"').replace(/\\0/g,\"\\\\0\").replace(/\\t/g,\"\\\\t\").replace(/\\n/g,\"\\\\n\").replace(/\\r/g,\"\\\\r\").replace(/[\\x00-\\x0F]/g,function(u){return\"\\\\x0\"+i(u)}).replace(/[\\x10-\\x1F\\x7F-\\x9F]/g,function(u){return\"\\\\x\"+i(u)})}function s(c){return c.replace(/\\\\/g,\"\\\\\\\\\").replace(/\\]/g,\"\\\\]\").replace(/\\^/g,\"\\\\^\").replace(/-/g,\"\\\\-\").replace(/\\0/g,\"\\\\0\").replace(/\\t/g,\"\\\\t\").replace(/\\n/g,\"\\\\n\").replace(/\\r/g,\"\\\\r\").replace(/[\\x00-\\x0F]/g,function(u){return\"\\\\x0\"+i(u)}).replace(/[\\x10-\\x1F\\x7F-\\x9F]/g,function(u){return\"\\\\x\"+i(u)})}function o(c){return r[c.type](c)}function a(c){var u=new Array(c.length),g,f;for(g=0;g<c.length;g++)u[g]=o(c[g]);if(u.sort(),u.length>0){for(g=1,f=1;g<u.length;g++)u[g-1]!==u[g]&&(u[f]=u[g],f++);u.length=f}switch(u.length){case 1:return u[0];case 2:return u[0]+\" or \"+u[1];default:return u.slice(0,-1).join(\", \")+\", or \"+u[u.length-1]}}function l(c){return c?'\"'+n(c)+'\"':\"end of input\"}return\"Expected \"+a(t)+\" but \"+l(e)+\" found.\"};function Ude(t,e){e=e!==void 0?e:{};var r={},i={Start:Xs},n=Xs,s=function(R){return[].concat(...R)},o=\"-\",a=gr(\"-\",!1),l=function(R){return R},c=function(R){return Object.assign({},...R)},u=\"#\",g=gr(\"#\",!1),f=Jl(),h=function(){return{}},p=\":\",m=gr(\":\",!1),y=function(R,q){return{[R]:q}},Q=\",\",S=gr(\",\",!1),x=function(R,q){return q},M=function(R,q,de){return Object.assign({},...[R].concat(q).map(He=>({[He]:de})))},Y=function(R){return R},U=function(R){return R},J=Vs(\"correct indentation\"),W=\" \",ee=gr(\" \",!1),Z=function(R){return R.length===LA*Gu},A=function(R){return R.length===(LA+1)*Gu},ne=function(){return LA++,!0},le=function(){return LA--,!0},Ae=function(){return Ou()},T=Vs(\"pseudostring\"),L=/^[^\\r\\n\\t ?:,\\][{}#&*!|>'\"%@`\\-]/,Ee=Yn([\"\\r\",`\n`,\"\t\",\" \",\"?\",\":\",\",\",\"]\",\"[\",\"{\",\"}\",\"#\",\"&\",\"*\",\"!\",\"|\",\">\",\"'\",'\"',\"%\",\"@\",\"`\",\"-\"],!0,!1),we=/^[^\\r\\n\\t ,\\][{}:#\"']/,qe=Yn([\"\\r\",`\n`,\"\t\",\" \",\",\",\"]\",\"[\",\"{\",\"}\",\":\",\"#\",'\"',\"'\"],!0,!1),re=function(){return Ou().replace(/^ *| *$/g,\"\")},se=\"--\",Qe=gr(\"--\",!1),he=/^[a-zA-Z\\/0-9]/,Fe=Yn([[\"a\",\"z\"],[\"A\",\"Z\"],\"/\",[\"0\",\"9\"]],!1,!1),Ue=/^[^\\r\\n\\t :,]/,xe=Yn([\"\\r\",`\n`,\"\t\",\" \",\":\",\",\"],!0,!1),ve=\"null\",pe=gr(\"null\",!1),X=function(){return null},be=\"true\",ce=gr(\"true\",!1),fe=function(){return!0},gt=\"false\",Ht=gr(\"false\",!1),Mt=function(){return!1},mi=Vs(\"string\"),jt='\"',Qr=gr('\"',!1),Ti=function(){return\"\"},_s=function(R){return R},Un=function(R){return R.join(\"\")},Kn=/^[^\"\\\\\\0-\\x1F\\x7F]/,vr=Yn(['\"',\"\\\\\",[\"\\0\",\"\u001f\"],\"\\x7F\"],!0,!1),Hn='\\\\\"',us=gr('\\\\\"',!1),Ia=function(){return'\"'},SA=\"\\\\\\\\\",Du=gr(\"\\\\\\\\\",!1),gs=function(){return\"\\\\\"},kA=\"\\\\/\",ya=gr(\"\\\\/\",!1),Ru=function(){return\"/\"},xA=\"\\\\b\",PA=gr(\"\\\\b\",!1),Sr=function(){return\"\\b\"},jl=\"\\\\f\",Fu=gr(\"\\\\f\",!1),So=function(){return\"\\f\"},Nu=\"\\\\n\",Qh=gr(\"\\\\n\",!1),vh=function(){return`\n`},oe=\"\\\\r\",Oi=gr(\"\\\\r\",!1),ko=function(){return\"\\r\"},jn=\"\\\\t\",Lu=gr(\"\\\\t\",!1),vt=function(){return\"\t\"},Gl=\"\\\\u\",Gn=gr(\"\\\\u\",!1),fs=function(R,q,de,He){return String.fromCharCode(parseInt(`0x${R}${q}${de}${He}`))},hs=/^[0-9a-fA-F]/,pt=Yn([[\"0\",\"9\"],[\"a\",\"f\"],[\"A\",\"F\"]],!1,!1),xo=Vs(\"blank space\"),lt=/^[ \\t]/,mn=Yn([\" \",\"\t\"],!1,!1),v=Vs(\"white space\"),Tt=/^[ \\t\\n\\r]/,Tu=Yn([\" \",\"\t\",`\n`,\"\\r\"],!1,!1),Yl=`\\r\n`,Sh=gr(`\\r\n`,!1),kh=`\n`,xh=gr(`\n`,!1),Ph=\"\\r\",Dh=gr(\"\\r\",!1),G=0,yt=0,DA=[{line:1,column:1}],$i=0,ql=[],$e=0,wa;if(\"startRule\"in e){if(!(e.startRule in i))throw new Error(`Can't start parsing from rule \"`+e.startRule+'\".');n=i[e.startRule]}function Ou(){return t.substring(yt,G)}function SE(){return En(yt,G)}function Rh(R,q){throw q=q!==void 0?q:En(yt,G),Wl([Vs(R)],t.substring(yt,G),q)}function kE(R,q){throw q=q!==void 0?q:En(yt,G),Mu(R,q)}function gr(R,q){return{type:\"literal\",text:R,ignoreCase:q}}function Yn(R,q,de){return{type:\"class\",parts:R,inverted:q,ignoreCase:de}}function Jl(){return{type:\"any\"}}function Fh(){return{type:\"end\"}}function Vs(R){return{type:\"other\",description:R}}function Ba(R){var q=DA[R],de;if(q)return q;for(de=R-1;!DA[de];)de--;for(q=DA[de],q={line:q.line,column:q.column};de<R;)t.charCodeAt(de)===10?(q.line++,q.column=1):q.column++,de++;return DA[R]=q,q}function En(R,q){var de=Ba(R),He=Ba(q);return{start:{offset:R,line:de.line,column:de.column},end:{offset:q,line:He.line,column:He.column}}}function Oe(R){G<$i||(G>$i&&($i=G,ql=[]),ql.push(R))}function Mu(R,q){return new fc(R,null,null,q)}function Wl(R,q,de){return new fc(fc.buildMessage(R,q),R,q,de)}function Xs(){var R;return R=Uu(),R}function zl(){var R,q,de;for(R=G,q=[],de=RA();de!==r;)q.push(de),de=RA();return q!==r&&(yt=R,q=s(q)),R=q,R}function RA(){var R,q,de,He,Te;return R=G,q=Qa(),q!==r?(t.charCodeAt(G)===45?(de=o,G++):(de=r,$e===0&&Oe(a)),de!==r?(He=Lr(),He!==r?(Te=ba(),Te!==r?(yt=R,q=l(Te),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r),R}function Uu(){var R,q,de;for(R=G,q=[],de=Ku();de!==r;)q.push(de),de=Ku();return q!==r&&(yt=R,q=c(q)),R=q,R}function Ku(){var R,q,de,He,Te,Xe,Et,Rt,qn;if(R=G,q=Lr(),q===r&&(q=null),q!==r){if(de=G,t.charCodeAt(G)===35?(He=u,G++):(He=r,$e===0&&Oe(g)),He!==r){if(Te=[],Xe=G,Et=G,$e++,Rt=eo(),$e--,Rt===r?Et=void 0:(G=Et,Et=r),Et!==r?(t.length>G?(Rt=t.charAt(G),G++):(Rt=r,$e===0&&Oe(f)),Rt!==r?(Et=[Et,Rt],Xe=Et):(G=Xe,Xe=r)):(G=Xe,Xe=r),Xe!==r)for(;Xe!==r;)Te.push(Xe),Xe=G,Et=G,$e++,Rt=eo(),$e--,Rt===r?Et=void 0:(G=Et,Et=r),Et!==r?(t.length>G?(Rt=t.charAt(G),G++):(Rt=r,$e===0&&Oe(f)),Rt!==r?(Et=[Et,Rt],Xe=Et):(G=Xe,Xe=r)):(G=Xe,Xe=r);else Te=r;Te!==r?(He=[He,Te],de=He):(G=de,de=r)}else G=de,de=r;if(de===r&&(de=null),de!==r){if(He=[],Te=$s(),Te!==r)for(;Te!==r;)He.push(Te),Te=$s();else He=r;He!==r?(yt=R,q=h(),R=q):(G=R,R=r)}else G=R,R=r}else G=R,R=r;if(R===r&&(R=G,q=Qa(),q!==r?(de=_l(),de!==r?(He=Lr(),He===r&&(He=null),He!==r?(t.charCodeAt(G)===58?(Te=p,G++):(Te=r,$e===0&&Oe(m)),Te!==r?(Xe=Lr(),Xe===r&&(Xe=null),Xe!==r?(Et=ba(),Et!==r?(yt=R,q=y(de,Et),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r),R===r&&(R=G,q=Qa(),q!==r?(de=Zs(),de!==r?(He=Lr(),He===r&&(He=null),He!==r?(t.charCodeAt(G)===58?(Te=p,G++):(Te=r,$e===0&&Oe(m)),Te!==r?(Xe=Lr(),Xe===r&&(Xe=null),Xe!==r?(Et=ba(),Et!==r?(yt=R,q=y(de,Et),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r),R===r))){if(R=G,q=Qa(),q!==r)if(de=Zs(),de!==r)if(He=Lr(),He!==r)if(Te=xE(),Te!==r){if(Xe=[],Et=$s(),Et!==r)for(;Et!==r;)Xe.push(Et),Et=$s();else Xe=r;Xe!==r?(yt=R,q=y(de,Te),R=q):(G=R,R=r)}else G=R,R=r;else G=R,R=r;else G=R,R=r;else G=R,R=r;if(R===r)if(R=G,q=Qa(),q!==r)if(de=Zs(),de!==r){if(He=[],Te=G,Xe=Lr(),Xe===r&&(Xe=null),Xe!==r?(t.charCodeAt(G)===44?(Et=Q,G++):(Et=r,$e===0&&Oe(S)),Et!==r?(Rt=Lr(),Rt===r&&(Rt=null),Rt!==r?(qn=Zs(),qn!==r?(yt=Te,Xe=x(de,qn),Te=Xe):(G=Te,Te=r)):(G=Te,Te=r)):(G=Te,Te=r)):(G=Te,Te=r),Te!==r)for(;Te!==r;)He.push(Te),Te=G,Xe=Lr(),Xe===r&&(Xe=null),Xe!==r?(t.charCodeAt(G)===44?(Et=Q,G++):(Et=r,$e===0&&Oe(S)),Et!==r?(Rt=Lr(),Rt===r&&(Rt=null),Rt!==r?(qn=Zs(),qn!==r?(yt=Te,Xe=x(de,qn),Te=Xe):(G=Te,Te=r)):(G=Te,Te=r)):(G=Te,Te=r)):(G=Te,Te=r);else He=r;He!==r?(Te=Lr(),Te===r&&(Te=null),Te!==r?(t.charCodeAt(G)===58?(Xe=p,G++):(Xe=r,$e===0&&Oe(m)),Xe!==r?(Et=Lr(),Et===r&&(Et=null),Et!==r?(Rt=ba(),Rt!==r?(yt=R,q=M(de,He,Rt),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)}else G=R,R=r;else G=R,R=r}return R}function ba(){var R,q,de,He,Te,Xe,Et;if(R=G,q=G,$e++,de=G,He=eo(),He!==r?(Te=it(),Te!==r?(t.charCodeAt(G)===45?(Xe=o,G++):(Xe=r,$e===0&&Oe(a)),Xe!==r?(Et=Lr(),Et!==r?(He=[He,Te,Xe,Et],de=He):(G=de,de=r)):(G=de,de=r)):(G=de,de=r)):(G=de,de=r),$e--,de!==r?(G=q,q=void 0):q=r,q!==r?(de=$s(),de!==r?(He=Po(),He!==r?(Te=zl(),Te!==r?(Xe=FA(),Xe!==r?(yt=R,q=Y(Te),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r),R===r&&(R=G,q=eo(),q!==r?(de=Po(),de!==r?(He=Uu(),He!==r?(Te=FA(),Te!==r?(yt=R,q=Y(He),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r),R===r))if(R=G,q=Vl(),q!==r){if(de=[],He=$s(),He!==r)for(;He!==r;)de.push(He),He=$s();else de=r;de!==r?(yt=R,q=U(q),R=q):(G=R,R=r)}else G=R,R=r;return R}function Qa(){var R,q,de;for($e++,R=G,q=[],t.charCodeAt(G)===32?(de=W,G++):(de=r,$e===0&&Oe(ee));de!==r;)q.push(de),t.charCodeAt(G)===32?(de=W,G++):(de=r,$e===0&&Oe(ee));return q!==r?(yt=G,de=Z(q),de?de=void 0:de=r,de!==r?(q=[q,de],R=q):(G=R,R=r)):(G=R,R=r),$e--,R===r&&(q=r,$e===0&&Oe(J)),R}function it(){var R,q,de;for(R=G,q=[],t.charCodeAt(G)===32?(de=W,G++):(de=r,$e===0&&Oe(ee));de!==r;)q.push(de),t.charCodeAt(G)===32?(de=W,G++):(de=r,$e===0&&Oe(ee));return q!==r?(yt=G,de=A(q),de?de=void 0:de=r,de!==r?(q=[q,de],R=q):(G=R,R=r)):(G=R,R=r),R}function Po(){var R;return yt=G,R=ne(),R?R=void 0:R=r,R}function FA(){var R;return yt=G,R=le(),R?R=void 0:R=r,R}function _l(){var R;return R=Xl(),R===r&&(R=Nh()),R}function Zs(){var R,q,de;if(R=Xl(),R===r){if(R=G,q=[],de=Hu(),de!==r)for(;de!==r;)q.push(de),de=Hu();else q=r;q!==r&&(yt=R,q=Ae()),R=q}return R}function Vl(){var R;return R=Lh(),R===r&&(R=PE(),R===r&&(R=Xl(),R===r&&(R=Nh()))),R}function xE(){var R;return R=Lh(),R===r&&(R=Xl(),R===r&&(R=Hu())),R}function Nh(){var R,q,de,He,Te,Xe;if($e++,R=G,L.test(t.charAt(G))?(q=t.charAt(G),G++):(q=r,$e===0&&Oe(Ee)),q!==r){for(de=[],He=G,Te=Lr(),Te===r&&(Te=null),Te!==r?(we.test(t.charAt(G))?(Xe=t.charAt(G),G++):(Xe=r,$e===0&&Oe(qe)),Xe!==r?(Te=[Te,Xe],He=Te):(G=He,He=r)):(G=He,He=r);He!==r;)de.push(He),He=G,Te=Lr(),Te===r&&(Te=null),Te!==r?(we.test(t.charAt(G))?(Xe=t.charAt(G),G++):(Xe=r,$e===0&&Oe(qe)),Xe!==r?(Te=[Te,Xe],He=Te):(G=He,He=r)):(G=He,He=r);de!==r?(yt=R,q=re(),R=q):(G=R,R=r)}else G=R,R=r;return $e--,R===r&&(q=r,$e===0&&Oe(T)),R}function Hu(){var R,q,de,He,Te;if(R=G,t.substr(G,2)===se?(q=se,G+=2):(q=r,$e===0&&Oe(Qe)),q===r&&(q=null),q!==r)if(he.test(t.charAt(G))?(de=t.charAt(G),G++):(de=r,$e===0&&Oe(Fe)),de!==r){for(He=[],Ue.test(t.charAt(G))?(Te=t.charAt(G),G++):(Te=r,$e===0&&Oe(xe));Te!==r;)He.push(Te),Ue.test(t.charAt(G))?(Te=t.charAt(G),G++):(Te=r,$e===0&&Oe(xe));He!==r?(yt=R,q=re(),R=q):(G=R,R=r)}else G=R,R=r;else G=R,R=r;return R}function Lh(){var R,q;return R=G,t.substr(G,4)===ve?(q=ve,G+=4):(q=r,$e===0&&Oe(pe)),q!==r&&(yt=R,q=X()),R=q,R}function PE(){var R,q;return R=G,t.substr(G,4)===be?(q=be,G+=4):(q=r,$e===0&&Oe(ce)),q!==r&&(yt=R,q=fe()),R=q,R===r&&(R=G,t.substr(G,5)===gt?(q=gt,G+=5):(q=r,$e===0&&Oe(Ht)),q!==r&&(yt=R,q=Mt()),R=q),R}function Xl(){var R,q,de,He;return $e++,R=G,t.charCodeAt(G)===34?(q=jt,G++):(q=r,$e===0&&Oe(Qr)),q!==r?(t.charCodeAt(G)===34?(de=jt,G++):(de=r,$e===0&&Oe(Qr)),de!==r?(yt=R,q=Ti(),R=q):(G=R,R=r)):(G=R,R=r),R===r&&(R=G,t.charCodeAt(G)===34?(q=jt,G++):(q=r,$e===0&&Oe(Qr)),q!==r?(de=DE(),de!==r?(t.charCodeAt(G)===34?(He=jt,G++):(He=r,$e===0&&Oe(Qr)),He!==r?(yt=R,q=_s(de),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)),$e--,R===r&&(q=r,$e===0&&Oe(mi)),R}function DE(){var R,q,de;if(R=G,q=[],de=ju(),de!==r)for(;de!==r;)q.push(de),de=ju();else q=r;return q!==r&&(yt=R,q=Un(q)),R=q,R}function ju(){var R,q,de,He,Te,Xe;return Kn.test(t.charAt(G))?(R=t.charAt(G),G++):(R=r,$e===0&&Oe(vr)),R===r&&(R=G,t.substr(G,2)===Hn?(q=Hn,G+=2):(q=r,$e===0&&Oe(us)),q!==r&&(yt=R,q=Ia()),R=q,R===r&&(R=G,t.substr(G,2)===SA?(q=SA,G+=2):(q=r,$e===0&&Oe(Du)),q!==r&&(yt=R,q=gs()),R=q,R===r&&(R=G,t.substr(G,2)===kA?(q=kA,G+=2):(q=r,$e===0&&Oe(ya)),q!==r&&(yt=R,q=Ru()),R=q,R===r&&(R=G,t.substr(G,2)===xA?(q=xA,G+=2):(q=r,$e===0&&Oe(PA)),q!==r&&(yt=R,q=Sr()),R=q,R===r&&(R=G,t.substr(G,2)===jl?(q=jl,G+=2):(q=r,$e===0&&Oe(Fu)),q!==r&&(yt=R,q=So()),R=q,R===r&&(R=G,t.substr(G,2)===Nu?(q=Nu,G+=2):(q=r,$e===0&&Oe(Qh)),q!==r&&(yt=R,q=vh()),R=q,R===r&&(R=G,t.substr(G,2)===oe?(q=oe,G+=2):(q=r,$e===0&&Oe(Oi)),q!==r&&(yt=R,q=ko()),R=q,R===r&&(R=G,t.substr(G,2)===jn?(q=jn,G+=2):(q=r,$e===0&&Oe(Lu)),q!==r&&(yt=R,q=vt()),R=q,R===r&&(R=G,t.substr(G,2)===Gl?(q=Gl,G+=2):(q=r,$e===0&&Oe(Gn)),q!==r?(de=NA(),de!==r?(He=NA(),He!==r?(Te=NA(),Te!==r?(Xe=NA(),Xe!==r?(yt=R,q=fs(de,He,Te,Xe),R=q):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)):(G=R,R=r)))))))))),R}function NA(){var R;return hs.test(t.charAt(G))?(R=t.charAt(G),G++):(R=r,$e===0&&Oe(pt)),R}function Lr(){var R,q;if($e++,R=[],lt.test(t.charAt(G))?(q=t.charAt(G),G++):(q=r,$e===0&&Oe(mn)),q!==r)for(;q!==r;)R.push(q),lt.test(t.charAt(G))?(q=t.charAt(G),G++):(q=r,$e===0&&Oe(mn));else R=r;return $e--,R===r&&(q=r,$e===0&&Oe(xo)),R}function RE(){var R,q;if($e++,R=[],Tt.test(t.charAt(G))?(q=t.charAt(G),G++):(q=r,$e===0&&Oe(Tu)),q!==r)for(;q!==r;)R.push(q),Tt.test(t.charAt(G))?(q=t.charAt(G),G++):(q=r,$e===0&&Oe(Tu));else R=r;return $e--,R===r&&(q=r,$e===0&&Oe(v)),R}function $s(){var R,q,de,He,Te,Xe;if(R=G,q=eo(),q!==r){for(de=[],He=G,Te=Lr(),Te===r&&(Te=null),Te!==r?(Xe=eo(),Xe!==r?(Te=[Te,Xe],He=Te):(G=He,He=r)):(G=He,He=r);He!==r;)de.push(He),He=G,Te=Lr(),Te===r&&(Te=null),Te!==r?(Xe=eo(),Xe!==r?(Te=[Te,Xe],He=Te):(G=He,He=r)):(G=He,He=r);de!==r?(q=[q,de],R=q):(G=R,R=r)}else G=R,R=r;return R}function eo(){var R;return t.substr(G,2)===Yl?(R=Yl,G+=2):(R=r,$e===0&&Oe(Sh)),R===r&&(t.charCodeAt(G)===10?(R=kh,G++):(R=r,$e===0&&Oe(xh)),R===r&&(t.charCodeAt(G)===13?(R=Ph,G++):(R=r,$e===0&&Oe(Dh)))),R}let Gu=2,LA=0;if(wa=n(),wa!==r&&G===t.length)return wa;throw wa!==r&&G<t.length&&Oe(Fh()),Wl(ql,$i<t.length?t.charAt($i):null,$i<t.length?En($i,$i+1):En($i,$i))}lK.exports={SyntaxError:fc,parse:Ude}});var dK=w((c$e,tv)=>{\"use strict\";var Yde=t=>{let e=!1,r=!1,i=!1;for(let n=0;n<t.length;n++){let s=t[n];e&&/[a-zA-Z]/.test(s)&&s.toUpperCase()===s?(t=t.slice(0,n)+\"-\"+t.slice(n),e=!1,i=r,r=!0,n++):r&&i&&/[a-zA-Z]/.test(s)&&s.toLowerCase()===s?(t=t.slice(0,n-1)+\"-\"+t.slice(n-1),i=r,r=!1,e=!0):(e=s.toLowerCase()===s&&s.toUpperCase()!==s,i=r,r=s.toUpperCase()===s&&s.toLowerCase()!==s)}return t},pK=(t,e)=>{if(!(typeof t==\"string\"||Array.isArray(t)))throw new TypeError(\"Expected the input to be `string | string[]`\");e=Object.assign({pascalCase:!1},e);let r=n=>e.pascalCase?n.charAt(0).toUpperCase()+n.slice(1):n;return Array.isArray(t)?t=t.map(n=>n.trim()).filter(n=>n.length).join(\"-\"):t=t.trim(),t.length===0?\"\":t.length===1?e.pascalCase?t.toUpperCase():t.toLowerCase():(t!==t.toLowerCase()&&(t=Yde(t)),t=t.replace(/^[_.\\- ]+/,\"\").toLowerCase().replace(/[_.\\- ]+(\\w|$)/g,(n,s)=>s.toUpperCase()).replace(/\\d+(\\w|$)/g,n=>n.toUpperCase()),r(t))};tv.exports=pK;tv.exports.default=pK});var mK=w((u$e,CK)=>{CK.exports=[{name:\"AppVeyor\",constant:\"APPVEYOR\",env:\"APPVEYOR\",pr:\"APPVEYOR_PULL_REQUEST_NUMBER\"},{name:\"Azure Pipelines\",constant:\"AZURE_PIPELINES\",env:\"SYSTEM_TEAMFOUNDATIONCOLLECTIONURI\",pr:\"SYSTEM_PULLREQUEST_PULLREQUESTID\"},{name:\"Appcircle\",constant:\"APPCIRCLE\",env:\"AC_APPCIRCLE\"},{name:\"Bamboo\",constant:\"BAMBOO\",env:\"bamboo_planKey\"},{name:\"Bitbucket Pipelines\",constant:\"BITBUCKET\",env:\"BITBUCKET_COMMIT\",pr:\"BITBUCKET_PR_ID\"},{name:\"Bitrise\",constant:\"BITRISE\",env:\"BITRISE_IO\",pr:\"BITRISE_PULL_REQUEST\"},{name:\"Buddy\",constant:\"BUDDY\",env:\"BUDDY_WORKSPACE_ID\",pr:\"BUDDY_EXECUTION_PULL_REQUEST_ID\"},{name:\"Buildkite\",constant:\"BUILDKITE\",env:\"BUILDKITE\",pr:{env:\"BUILDKITE_PULL_REQUEST\",ne:\"false\"}},{name:\"CircleCI\",constant:\"CIRCLE\",env:\"CIRCLECI\",pr:\"CIRCLE_PULL_REQUEST\"},{name:\"Cirrus CI\",constant:\"CIRRUS\",env:\"CIRRUS_CI\",pr:\"CIRRUS_PR\"},{name:\"AWS CodeBuild\",constant:\"CODEBUILD\",env:\"CODEBUILD_BUILD_ARN\"},{name:\"Codefresh\",constant:\"CODEFRESH\",env:\"CF_BUILD_ID\",pr:{any:[\"CF_PULL_REQUEST_NUMBER\",\"CF_PULL_REQUEST_ID\"]}},{name:\"Codeship\",constant:\"CODESHIP\",env:{CI_NAME:\"codeship\"}},{name:\"Drone\",constant:\"DRONE\",env:\"DRONE\",pr:{DRONE_BUILD_EVENT:\"pull_request\"}},{name:\"dsari\",constant:\"DSARI\",env:\"DSARI\"},{name:\"GitHub Actions\",constant:\"GITHUB_ACTIONS\",env:\"GITHUB_ACTIONS\",pr:{GITHUB_EVENT_NAME:\"pull_request\"}},{name:\"GitLab CI\",constant:\"GITLAB\",env:\"GITLAB_CI\",pr:\"CI_MERGE_REQUEST_ID\"},{name:\"GoCD\",constant:\"GOCD\",env:\"GO_PIPELINE_LABEL\"},{name:\"LayerCI\",constant:\"LAYERCI\",env:\"LAYERCI\",pr:\"LAYERCI_PULL_REQUEST\"},{name:\"Hudson\",constant:\"HUDSON\",env:\"HUDSON_URL\"},{name:\"Jenkins\",constant:\"JENKINS\",env:[\"JENKINS_URL\",\"BUILD_ID\"],pr:{any:[\"ghprbPullId\",\"CHANGE_ID\"]}},{name:\"Magnum CI\",constant:\"MAGNUM\",env:\"MAGNUM\"},{name:\"Netlify CI\",constant:\"NETLIFY\",env:\"NETLIFY\",pr:{env:\"PULL_REQUEST\",ne:\"false\"}},{name:\"Nevercode\",constant:\"NEVERCODE\",env:\"NEVERCODE\",pr:{env:\"NEVERCODE_PULL_REQUEST\",ne:\"false\"}},{name:\"Render\",constant:\"RENDER\",env:\"RENDER\",pr:{IS_PULL_REQUEST:\"true\"}},{name:\"Sail CI\",constant:\"SAIL\",env:\"SAILCI\",pr:\"SAIL_PULL_REQUEST_NUMBER\"},{name:\"Semaphore\",constant:\"SEMAPHORE\",env:\"SEMAPHORE\",pr:\"PULL_REQUEST_NUMBER\"},{name:\"Screwdriver\",constant:\"SCREWDRIVER\",env:\"SCREWDRIVER\",pr:{env:\"SD_PULL_REQUEST\",ne:\"false\"}},{name:\"Shippable\",constant:\"SHIPPABLE\",env:\"SHIPPABLE\",pr:{IS_PULL_REQUEST:\"true\"}},{name:\"Solano CI\",constant:\"SOLANO\",env:\"TDDIUM\",pr:\"TDDIUM_PR_ID\"},{name:\"Strider CD\",constant:\"STRIDER\",env:\"STRIDER\"},{name:\"TaskCluster\",constant:\"TASKCLUSTER\",env:[\"TASK_ID\",\"RUN_ID\"]},{name:\"TeamCity\",constant:\"TEAMCITY\",env:\"TEAMCITY_VERSION\"},{name:\"Travis CI\",constant:\"TRAVIS\",env:\"TRAVIS\",pr:{env:\"TRAVIS_PULL_REQUEST\",ne:\"false\"}},{name:\"Vercel\",constant:\"VERCEL\",env:\"NOW_BUILDER\"},{name:\"Visual Studio App Center\",constant:\"APPCENTER\",env:\"APPCENTER_BUILD_ID\"}]});var hc=w(_n=>{\"use strict\";var EK=mK(),Fo=process.env;Object.defineProperty(_n,\"_vendors\",{value:EK.map(function(t){return t.constant})});_n.name=null;_n.isPR=null;EK.forEach(function(t){let r=(Array.isArray(t.env)?t.env:[t.env]).every(function(i){return IK(i)});if(_n[t.constant]=r,r)switch(_n.name=t.name,typeof t.pr){case\"string\":_n.isPR=!!Fo[t.pr];break;case\"object\":\"env\"in t.pr?_n.isPR=t.pr.env in Fo&&Fo[t.pr.env]!==t.pr.ne:\"any\"in t.pr?_n.isPR=t.pr.any.some(function(i){return!!Fo[i]}):_n.isPR=IK(t.pr);break;default:_n.isPR=null}});_n.isCI=!!(Fo.CI||Fo.CONTINUOUS_INTEGRATION||Fo.BUILD_NUMBER||Fo.RUN_ID||_n.name);function IK(t){return typeof t==\"string\"?!!Fo[t]:Object.keys(t).every(function(e){return Fo[e]===t[e]})}});var sg={};ft(sg,{KeyRelationship:()=>Cc,applyCascade:()=>fp,base64RegExp:()=>QK,colorStringAlphaRegExp:()=>bK,colorStringRegExp:()=>BK,computeKey:()=>GA,getPrintable:()=>ei,hasExactLength:()=>PK,hasForbiddenKeys:()=>wCe,hasKeyRelationship:()=>lv,hasMaxLength:()=>sCe,hasMinLength:()=>nCe,hasMutuallyExclusiveKeys:()=>BCe,hasRequiredKeys:()=>yCe,hasUniqueItems:()=>oCe,isArray:()=>Vde,isAtLeast:()=>lCe,isAtMost:()=>cCe,isBase64:()=>ECe,isBoolean:()=>Wde,isDate:()=>_de,isDict:()=>Zde,isEnum:()=>nn,isHexColor:()=>mCe,isISO8601:()=>CCe,isInExclusiveRange:()=>gCe,isInInclusiveRange:()=>uCe,isInstanceOf:()=>eCe,isInteger:()=>fCe,isJSON:()=>ICe,isLiteral:()=>qde,isLowerCase:()=>hCe,isNegative:()=>aCe,isNullable:()=>iCe,isNumber:()=>zde,isObject:()=>$de,isOneOf:()=>tCe,isOptional:()=>rCe,isPositive:()=>ACe,isString:()=>gp,isTuple:()=>Xde,isUUID4:()=>dCe,isUnknown:()=>xK,isUpperCase:()=>pCe,iso8601RegExp:()=>Av,makeCoercionFn:()=>dc,makeSetter:()=>kK,makeTrait:()=>SK,makeValidator:()=>St,matchesRegExp:()=>hp,plural:()=>CI,pushError:()=>mt,simpleKeyRegExp:()=>wK,uuid4RegExp:()=>vK});function St({test:t}){return SK(t)()}function ei(t){return t===null?\"null\":t===void 0?\"undefined\":t===\"\"?\"an empty string\":JSON.stringify(t)}function GA(t,e){var r,i,n;return typeof e==\"number\"?`${(r=t==null?void 0:t.p)!==null&&r!==void 0?r:\".\"}[${e}]`:wK.test(e)?`${(i=t==null?void 0:t.p)!==null&&i!==void 0?i:\"\"}.${e}`:`${(n=t==null?void 0:t.p)!==null&&n!==void 0?n:\".\"}[${JSON.stringify(e)}]`}function dc(t,e){return r=>{let i=t[e];return t[e]=r,dc(t,e).bind(null,i)}}function kK(t,e){return r=>{t[e]=r}}function CI(t,e,r){return t===1?e:r}function mt({errors:t,p:e}={},r){return t==null||t.push(`${e!=null?e:\".\"}: ${r}`),!1}function qde(t){return St({test:(e,r)=>e!==t?mt(r,`Expected a literal (got ${ei(t)})`):!0})}function nn(t){let e=Array.isArray(t)?t:Object.values(t),r=new Set(e);return St({test:(i,n)=>r.has(i)?!0:mt(n,`Expected a valid enumeration value (got ${ei(i)})`)})}var wK,BK,bK,QK,vK,Av,SK,xK,gp,Jde,Wde,zde,_de,Vde,Xde,Zde,$de,eCe,tCe,fp,rCe,iCe,nCe,sCe,PK,oCe,aCe,ACe,lCe,cCe,uCe,gCe,fCe,hp,hCe,pCe,dCe,CCe,mCe,ECe,ICe,yCe,wCe,BCe,Cc,bCe,lv,Es=hfe(()=>{wK=/^[a-zA-Z_][a-zA-Z0-9_]*$/,BK=/^#[0-9a-f]{6}$/i,bK=/^#[0-9a-f]{6}([0-9a-f]{2})?$/i,QK=/^(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$/,vK=/^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89aAbB][a-f0-9]{3}-[a-f0-9]{12}$/i,Av=/^(?:[1-9]\\d{3}(-?)(?:(?:0[1-9]|1[0-2])\\1(?:0[1-9]|1\\d|2[0-8])|(?:0[13-9]|1[0-2])\\1(?:29|30)|(?:0[13578]|1[02])(?:\\1)31|00[1-9]|0[1-9]\\d|[12]\\d{2}|3(?:[0-5]\\d|6[0-5]))|(?:[1-9]\\d(?:0[48]|[2468][048]|[13579][26])|(?:[2468][048]|[13579][26])00)(?:(-?)02(?:\\2)29|-?366))T(?:[01]\\d|2[0-3])(:?)[0-5]\\d(?:\\3[0-5]\\d)?(?:Z|[+-][01]\\d(?:\\3[0-5]\\d)?)$/,SK=t=>()=>t;xK=()=>St({test:(t,e)=>!0});gp=()=>St({test:(t,e)=>typeof t!=\"string\"?mt(e,`Expected a string (got ${ei(t)})`):!0});Jde=new Map([[\"true\",!0],[\"True\",!0],[\"1\",!0],[1,!0],[\"false\",!1],[\"False\",!1],[\"0\",!1],[0,!1]]),Wde=()=>St({test:(t,e)=>{var r;if(typeof t!=\"boolean\"){if(typeof(e==null?void 0:e.coercions)!=\"undefined\"){if(typeof(e==null?void 0:e.coercion)==\"undefined\")return mt(e,\"Unbound coercion result\");let i=Jde.get(t);if(typeof i!=\"undefined\")return e.coercions.push([(r=e.p)!==null&&r!==void 0?r:\".\",e.coercion.bind(null,i)]),!0}return mt(e,`Expected a boolean (got ${ei(t)})`)}return!0}}),zde=()=>St({test:(t,e)=>{var r;if(typeof t!=\"number\"){if(typeof(e==null?void 0:e.coercions)!=\"undefined\"){if(typeof(e==null?void 0:e.coercion)==\"undefined\")return mt(e,\"Unbound coercion result\");let i;if(typeof t==\"string\"){let n;try{n=JSON.parse(t)}catch(s){}if(typeof n==\"number\")if(JSON.stringify(n)===t)i=n;else return mt(e,`Received a number that can't be safely represented by the runtime (${t})`)}if(typeof i!=\"undefined\")return e.coercions.push([(r=e.p)!==null&&r!==void 0?r:\".\",e.coercion.bind(null,i)]),!0}return mt(e,`Expected a number (got ${ei(t)})`)}return!0}}),_de=()=>St({test:(t,e)=>{var r;if(!(t instanceof Date)){if(typeof(e==null?void 0:e.coercions)!=\"undefined\"){if(typeof(e==null?void 0:e.coercion)==\"undefined\")return mt(e,\"Unbound coercion result\");let i;if(typeof t==\"string\"&&Av.test(t))i=new Date(t);else{let n;if(typeof t==\"string\"){let s;try{s=JSON.parse(t)}catch(o){}typeof s==\"number\"&&(n=s)}else typeof t==\"number\"&&(n=t);if(typeof n!=\"undefined\")if(Number.isSafeInteger(n)||!Number.isSafeInteger(n*1e3))i=new Date(n*1e3);else return mt(e,`Received a timestamp that can't be safely represented by the runtime (${t})`)}if(typeof i!=\"undefined\")return e.coercions.push([(r=e.p)!==null&&r!==void 0?r:\".\",e.coercion.bind(null,i)]),!0}return mt(e,`Expected a date (got ${ei(t)})`)}return!0}}),Vde=(t,{delimiter:e}={})=>St({test:(r,i)=>{var n;if(typeof r==\"string\"&&typeof e!=\"undefined\"&&typeof(i==null?void 0:i.coercions)!=\"undefined\"){if(typeof(i==null?void 0:i.coercion)==\"undefined\")return mt(i,\"Unbound coercion result\");r=r.split(e),i.coercions.push([(n=i.p)!==null&&n!==void 0?n:\".\",i.coercion.bind(null,r)])}if(!Array.isArray(r))return mt(i,`Expected an array (got ${ei(r)})`);let s=!0;for(let o=0,a=r.length;o<a&&(s=t(r[o],Object.assign(Object.assign({},i),{p:GA(i,o),coercion:dc(r,o)}))&&s,!(!s&&(i==null?void 0:i.errors)==null));++o);return s}}),Xde=(t,{delimiter:e}={})=>{let r=PK(t.length);return St({test:(i,n)=>{var s;if(typeof i==\"string\"&&typeof e!=\"undefined\"&&typeof(n==null?void 0:n.coercions)!=\"undefined\"){if(typeof(n==null?void 0:n.coercion)==\"undefined\")return mt(n,\"Unbound coercion result\");i=i.split(e),n.coercions.push([(s=n.p)!==null&&s!==void 0?s:\".\",n.coercion.bind(null,i)])}if(!Array.isArray(i))return mt(n,`Expected a tuple (got ${ei(i)})`);let o=r(i,Object.assign({},n));for(let a=0,l=i.length;a<l&&a<t.length&&(o=t[a](i[a],Object.assign(Object.assign({},n),{p:GA(n,a),coercion:dc(i,a)}))&&o,!(!o&&(n==null?void 0:n.errors)==null));++a);return o}})},Zde=(t,{keys:e=null}={})=>St({test:(r,i)=>{if(typeof r!=\"object\"||r===null)return mt(i,`Expected an object (got ${ei(r)})`);let n=Object.keys(r),s=!0;for(let o=0,a=n.length;o<a&&(s||(i==null?void 0:i.errors)!=null);++o){let l=n[o],c=r[l];if(l===\"__proto__\"||l===\"constructor\"){s=mt(Object.assign(Object.assign({},i),{p:GA(i,l)}),\"Unsafe property name\");continue}if(e!==null&&!e(l,i)){s=!1;continue}if(!t(c,Object.assign(Object.assign({},i),{p:GA(i,l),coercion:dc(r,l)}))){s=!1;continue}}return s}}),$de=(t,{extra:e=null}={})=>{let r=Object.keys(t);return St({test:(i,n)=>{if(typeof i!=\"object\"||i===null)return mt(n,`Expected an object (got ${ei(i)})`);let s=new Set([...r,...Object.keys(i)]),o={},a=!0;for(let l of s){if(l===\"constructor\"||l===\"__proto__\")a=mt(Object.assign(Object.assign({},n),{p:GA(n,l)}),\"Unsafe property name\");else{let c=Object.prototype.hasOwnProperty.call(t,l)?t[l]:void 0,u=Object.prototype.hasOwnProperty.call(i,l)?i[l]:void 0;typeof c!=\"undefined\"?a=c(u,Object.assign(Object.assign({},n),{p:GA(n,l),coercion:dc(i,l)}))&&a:e===null?a=mt(Object.assign(Object.assign({},n),{p:GA(n,l)}),`Extraneous property (got ${ei(u)})`):Object.defineProperty(o,l,{enumerable:!0,get:()=>u,set:kK(i,l)})}if(!a&&(n==null?void 0:n.errors)==null)break}return e!==null&&(a||(n==null?void 0:n.errors)!=null)&&(a=e(o,n)&&a),a}})},eCe=t=>St({test:(e,r)=>e instanceof t?!0:mt(r,`Expected an instance of ${t.name} (got ${ei(e)})`)}),tCe=(t,{exclusive:e=!1}={})=>St({test:(r,i)=>{var n,s,o;let a=[],l=typeof(i==null?void 0:i.errors)!=\"undefined\"?[]:void 0;for(let c=0,u=t.length;c<u;++c){let g=typeof(i==null?void 0:i.errors)!=\"undefined\"?[]:void 0,f=typeof(i==null?void 0:i.coercions)!=\"undefined\"?[]:void 0;if(t[c](r,Object.assign(Object.assign({},i),{errors:g,coercions:f,p:`${(n=i==null?void 0:i.p)!==null&&n!==void 0?n:\".\"}#${c+1}`}))){if(a.push([`#${c+1}`,f]),!e)break}else l==null||l.push(g[0])}if(a.length===1){let[,c]=a[0];return typeof c!=\"undefined\"&&((s=i==null?void 0:i.coercions)===null||s===void 0||s.push(...c)),!0}return a.length>1?mt(i,`Expected to match exactly a single predicate (matched ${a.join(\", \")})`):(o=i==null?void 0:i.errors)===null||o===void 0||o.push(...l),!1}}),fp=(t,e)=>St({test:(r,i)=>{var n,s;let o={value:r},a=typeof(i==null?void 0:i.coercions)!=\"undefined\"?dc(o,\"value\"):void 0,l=typeof(i==null?void 0:i.coercions)!=\"undefined\"?[]:void 0;if(!t(r,Object.assign(Object.assign({},i),{coercion:a,coercions:l})))return!1;let c=[];if(typeof l!=\"undefined\")for(let[,u]of l)c.push(u());try{if(typeof(i==null?void 0:i.coercions)!=\"undefined\"){if(o.value!==r){if(typeof(i==null?void 0:i.coercion)==\"undefined\")return mt(i,\"Unbound coercion result\");i.coercions.push([(n=i.p)!==null&&n!==void 0?n:\".\",i.coercion.bind(null,o.value)])}(s=i==null?void 0:i.coercions)===null||s===void 0||s.push(...l)}return e.every(u=>u(o.value,i))}finally{for(let u of c)u()}}}),rCe=t=>St({test:(e,r)=>typeof e==\"undefined\"?!0:t(e,r)}),iCe=t=>St({test:(e,r)=>e===null?!0:t(e,r)}),nCe=t=>St({test:(e,r)=>e.length>=t?!0:mt(r,`Expected to have a length of at least ${t} elements (got ${e.length})`)}),sCe=t=>St({test:(e,r)=>e.length<=t?!0:mt(r,`Expected to have a length of at most ${t} elements (got ${e.length})`)}),PK=t=>St({test:(e,r)=>e.length!==t?mt(r,`Expected to have a length of exactly ${t} elements (got ${e.length})`):!0}),oCe=({map:t}={})=>St({test:(e,r)=>{let i=new Set,n=new Set;for(let s=0,o=e.length;s<o;++s){let a=e[s],l=typeof t!=\"undefined\"?t(a):a;if(i.has(l)){if(n.has(l))continue;mt(r,`Expected to contain unique elements; got a duplicate with ${ei(e)}`),n.add(l)}else i.add(l)}return n.size===0}}),aCe=()=>St({test:(t,e)=>t<=0?!0:mt(e,`Expected to be negative (got ${t})`)}),ACe=()=>St({test:(t,e)=>t>=0?!0:mt(e,`Expected to be positive (got ${t})`)}),lCe=t=>St({test:(e,r)=>e>=t?!0:mt(r,`Expected to be at least ${t} (got ${e})`)}),cCe=t=>St({test:(e,r)=>e<=t?!0:mt(r,`Expected to be at most ${t} (got ${e})`)}),uCe=(t,e)=>St({test:(r,i)=>r>=t&&r<=e?!0:mt(i,`Expected to be in the [${t}; ${e}] range (got ${r})`)}),gCe=(t,e)=>St({test:(r,i)=>r>=t&&r<e?!0:mt(i,`Expected to be in the [${t}; ${e}[ range (got ${r})`)}),fCe=({unsafe:t=!1}={})=>St({test:(e,r)=>e!==Math.round(e)?mt(r,`Expected to be an integer (got ${e})`):Number.isSafeInteger(e)?!0:mt(r,`Expected to be a safe integer (got ${e})`)}),hp=t=>St({test:(e,r)=>t.test(e)?!0:mt(r,`Expected to match the pattern ${t.toString()} (got ${ei(e)})`)}),hCe=()=>St({test:(t,e)=>t!==t.toLowerCase()?mt(e,`Expected to be all-lowercase (got ${t})`):!0}),pCe=()=>St({test:(t,e)=>t!==t.toUpperCase()?mt(e,`Expected to be all-uppercase (got ${t})`):!0}),dCe=()=>St({test:(t,e)=>vK.test(t)?!0:mt(e,`Expected to be a valid UUID v4 (got ${ei(t)})`)}),CCe=()=>St({test:(t,e)=>Av.test(t)?!1:mt(e,`Expected to be a valid ISO 8601 date string (got ${ei(t)})`)}),mCe=({alpha:t=!1})=>St({test:(e,r)=>(t?BK.test(e):bK.test(e))?!0:mt(r,`Expected to be a valid hexadecimal color string (got ${ei(e)})`)}),ECe=()=>St({test:(t,e)=>QK.test(t)?!0:mt(e,`Expected to be a valid base 64 string (got ${ei(t)})`)}),ICe=(t=xK())=>St({test:(e,r)=>{let i;try{i=JSON.parse(e)}catch(n){return mt(r,`Expected to be a valid JSON string (got ${ei(e)})`)}return t(i,r)}}),yCe=t=>{let e=new Set(t);return St({test:(r,i)=>{let n=new Set(Object.keys(r)),s=[];for(let o of e)n.has(o)||s.push(o);return s.length>0?mt(i,`Missing required ${CI(s.length,\"property\",\"properties\")} ${s.map(o=>`\"${o}\"`).join(\", \")}`):!0}})},wCe=t=>{let e=new Set(t);return St({test:(r,i)=>{let n=new Set(Object.keys(r)),s=[];for(let o of e)n.has(o)&&s.push(o);return s.length>0?mt(i,`Forbidden ${CI(s.length,\"property\",\"properties\")} ${s.map(o=>`\"${o}\"`).join(\", \")}`):!0}})},BCe=t=>{let e=new Set(t);return St({test:(r,i)=>{let n=new Set(Object.keys(r)),s=[];for(let o of e)n.has(o)&&s.push(o);return s.length>1?mt(i,`Mutually exclusive properties ${s.map(o=>`\"${o}\"`).join(\", \")}`):!0}})};(function(t){t.Forbids=\"Forbids\",t.Requires=\"Requires\"})(Cc||(Cc={}));bCe={[Cc.Forbids]:{expect:!1,message:\"forbids using\"},[Cc.Requires]:{expect:!0,message:\"requires using\"}},lv=(t,e,r,{ignore:i=[]}={})=>{let n=new Set(i),s=new Set(r),o=bCe[e];return St({test:(a,l)=>{let c=new Set(Object.keys(a));if(!c.has(t)||n.has(a[t]))return!0;let u=[];for(let g of s)(c.has(g)&&!n.has(a[g]))!==o.expect&&u.push(g);return u.length>=1?mt(l,`Property \"${t}\" ${o.message} ${CI(u.length,\"property\",\"properties\")} ${u.map(g=>`\"${g}\"`).join(\", \")}`):!0}})}});var _K=w((fet,zK)=>{\"use strict\";zK.exports=(t,...e)=>new Promise(r=>{r(t(...e))})});var ag=w((het,dv)=>{\"use strict\";var HCe=_K(),VK=t=>{if(t<1)throw new TypeError(\"Expected `concurrency` to be a number from 1 and up\");let e=[],r=0,i=()=>{r--,e.length>0&&e.shift()()},n=(a,l,...c)=>{r++;let u=HCe(a,...c);l(u),u.then(i,i)},s=(a,l,...c)=>{r<t?n(a,l,...c):e.push(n.bind(null,a,l,...c))},o=(a,...l)=>new Promise(c=>s(a,c,...l));return Object.defineProperties(o,{activeCount:{get:()=>r},pendingCount:{get:()=>e.length}}),o};dv.exports=VK;dv.exports.default=VK});var mp=w((det,XK)=>{var jCe=\"2.0.0\",GCe=256,YCe=Number.MAX_SAFE_INTEGER||9007199254740991,qCe=16;XK.exports={SEMVER_SPEC_VERSION:jCe,MAX_LENGTH:GCe,MAX_SAFE_INTEGER:YCe,MAX_SAFE_COMPONENT_LENGTH:qCe}});var Ep=w((Cet,ZK)=>{var JCe=typeof process==\"object\"&&process.env&&process.env.NODE_DEBUG&&/\\bsemver\\b/i.test(process.env.NODE_DEBUG)?(...t)=>console.error(\"SEMVER\",...t):()=>{};ZK.exports=JCe});var mc=w((qA,$K)=>{var{MAX_SAFE_COMPONENT_LENGTH:Cv}=mp(),WCe=Ep();qA=$K.exports={};var zCe=qA.re=[],tt=qA.src=[],rt=qA.t={},_Ce=0,kt=(t,e,r)=>{let i=_Ce++;WCe(i,e),rt[t]=i,tt[i]=e,zCe[i]=new RegExp(e,r?\"g\":void 0)};kt(\"NUMERICIDENTIFIER\",\"0|[1-9]\\\\d*\");kt(\"NUMERICIDENTIFIERLOOSE\",\"[0-9]+\");kt(\"NONNUMERICIDENTIFIER\",\"\\\\d*[a-zA-Z-][a-zA-Z0-9-]*\");kt(\"MAINVERSION\",`(${tt[rt.NUMERICIDENTIFIER]})\\\\.(${tt[rt.NUMERICIDENTIFIER]})\\\\.(${tt[rt.NUMERICIDENTIFIER]})`);kt(\"MAINVERSIONLOOSE\",`(${tt[rt.NUMERICIDENTIFIERLOOSE]})\\\\.(${tt[rt.NUMERICIDENTIFIERLOOSE]})\\\\.(${tt[rt.NUMERICIDENTIFIERLOOSE]})`);kt(\"PRERELEASEIDENTIFIER\",`(?:${tt[rt.NUMERICIDENTIFIER]}|${tt[rt.NONNUMERICIDENTIFIER]})`);kt(\"PRERELEASEIDENTIFIERLOOSE\",`(?:${tt[rt.NUMERICIDENTIFIERLOOSE]}|${tt[rt.NONNUMERICIDENTIFIER]})`);kt(\"PRERELEASE\",`(?:-(${tt[rt.PRERELEASEIDENTIFIER]}(?:\\\\.${tt[rt.PRERELEASEIDENTIFIER]})*))`);kt(\"PRERELEASELOOSE\",`(?:-?(${tt[rt.PRERELEASEIDENTIFIERLOOSE]}(?:\\\\.${tt[rt.PRERELEASEIDENTIFIERLOOSE]})*))`);kt(\"BUILDIDENTIFIER\",\"[0-9A-Za-z-]+\");kt(\"BUILD\",`(?:\\\\+(${tt[rt.BUILDIDENTIFIER]}(?:\\\\.${tt[rt.BUILDIDENTIFIER]})*))`);kt(\"FULLPLAIN\",`v?${tt[rt.MAINVERSION]}${tt[rt.PRERELEASE]}?${tt[rt.BUILD]}?`);kt(\"FULL\",`^${tt[rt.FULLPLAIN]}$`);kt(\"LOOSEPLAIN\",`[v=\\\\s]*${tt[rt.MAINVERSIONLOOSE]}${tt[rt.PRERELEASELOOSE]}?${tt[rt.BUILD]}?`);kt(\"LOOSE\",`^${tt[rt.LOOSEPLAIN]}$`);kt(\"GTLT\",\"((?:<|>)?=?)\");kt(\"XRANGEIDENTIFIERLOOSE\",`${tt[rt.NUMERICIDENTIFIERLOOSE]}|x|X|\\\\*`);kt(\"XRANGEIDENTIFIER\",`${tt[rt.NUMERICIDENTIFIER]}|x|X|\\\\*`);kt(\"XRANGEPLAIN\",`[v=\\\\s]*(${tt[rt.XRANGEIDENTIFIER]})(?:\\\\.(${tt[rt.XRANGEIDENTIFIER]})(?:\\\\.(${tt[rt.XRANGEIDENTIFIER]})(?:${tt[rt.PRERELEASE]})?${tt[rt.BUILD]}?)?)?`);kt(\"XRANGEPLAINLOOSE\",`[v=\\\\s]*(${tt[rt.XRANGEIDENTIFIERLOOSE]})(?:\\\\.(${tt[rt.XRANGEIDENTIFIERLOOSE]})(?:\\\\.(${tt[rt.XRANGEIDENTIFIERLOOSE]})(?:${tt[rt.PRERELEASELOOSE]})?${tt[rt.BUILD]}?)?)?`);kt(\"XRANGE\",`^${tt[rt.GTLT]}\\\\s*${tt[rt.XRANGEPLAIN]}$`);kt(\"XRANGELOOSE\",`^${tt[rt.GTLT]}\\\\s*${tt[rt.XRANGEPLAINLOOSE]}$`);kt(\"COERCE\",`(^|[^\\\\d])(\\\\d{1,${Cv}})(?:\\\\.(\\\\d{1,${Cv}}))?(?:\\\\.(\\\\d{1,${Cv}}))?(?:$|[^\\\\d])`);kt(\"COERCERTL\",tt[rt.COERCE],!0);kt(\"LONETILDE\",\"(?:~>?)\");kt(\"TILDETRIM\",`(\\\\s*)${tt[rt.LONETILDE]}\\\\s+`,!0);qA.tildeTrimReplace=\"$1~\";kt(\"TILDE\",`^${tt[rt.LONETILDE]}${tt[rt.XRANGEPLAIN]}$`);kt(\"TILDELOOSE\",`^${tt[rt.LONETILDE]}${tt[rt.XRANGEPLAINLOOSE]}$`);kt(\"LONECARET\",\"(?:\\\\^)\");kt(\"CARETTRIM\",`(\\\\s*)${tt[rt.LONECARET]}\\\\s+`,!0);qA.caretTrimReplace=\"$1^\";kt(\"CARET\",`^${tt[rt.LONECARET]}${tt[rt.XRANGEPLAIN]}$`);kt(\"CARETLOOSE\",`^${tt[rt.LONECARET]}${tt[rt.XRANGEPLAINLOOSE]}$`);kt(\"COMPARATORLOOSE\",`^${tt[rt.GTLT]}\\\\s*(${tt[rt.LOOSEPLAIN]})$|^$`);kt(\"COMPARATOR\",`^${tt[rt.GTLT]}\\\\s*(${tt[rt.FULLPLAIN]})$|^$`);kt(\"COMPARATORTRIM\",`(\\\\s*)${tt[rt.GTLT]}\\\\s*(${tt[rt.LOOSEPLAIN]}|${tt[rt.XRANGEPLAIN]})`,!0);qA.comparatorTrimReplace=\"$1$2$3\";kt(\"HYPHENRANGE\",`^\\\\s*(${tt[rt.XRANGEPLAIN]})\\\\s+-\\\\s+(${tt[rt.XRANGEPLAIN]})\\\\s*$`);kt(\"HYPHENRANGELOOSE\",`^\\\\s*(${tt[rt.XRANGEPLAINLOOSE]})\\\\s+-\\\\s+(${tt[rt.XRANGEPLAINLOOSE]})\\\\s*$`);kt(\"STAR\",\"(<|>)?=?\\\\s*\\\\*\");kt(\"GTE0\",\"^\\\\s*>=\\\\s*0.0.0\\\\s*$\");kt(\"GTE0PRE\",\"^\\\\s*>=\\\\s*0.0.0-0\\\\s*$\")});var Ip=w((met,e2)=>{var VCe=[\"includePrerelease\",\"loose\",\"rtl\"],XCe=t=>t?typeof t!=\"object\"?{loose:!0}:VCe.filter(e=>t[e]).reduce((e,r)=>(e[r]=!0,e),{}):{};e2.exports=XCe});var bI=w((Eet,t2)=>{var r2=/^[0-9]+$/,i2=(t,e)=>{let r=r2.test(t),i=r2.test(e);return r&&i&&(t=+t,e=+e),t===e?0:r&&!i?-1:i&&!r?1:t<e?-1:1},ZCe=(t,e)=>i2(e,t);t2.exports={compareIdentifiers:i2,rcompareIdentifiers:ZCe}});var Hi=w((Iet,n2)=>{var QI=Ep(),{MAX_LENGTH:s2,MAX_SAFE_INTEGER:vI}=mp(),{re:o2,t:a2}=mc(),$Ce=Ip(),{compareIdentifiers:yp}=bI(),ys=class{constructor(e,r){if(r=$Ce(r),e instanceof ys){if(e.loose===!!r.loose&&e.includePrerelease===!!r.includePrerelease)return e;e=e.version}else if(typeof e!=\"string\")throw new TypeError(`Invalid Version: ${e}`);if(e.length>s2)throw new TypeError(`version is longer than ${s2} characters`);QI(\"SemVer\",e,r),this.options=r,this.loose=!!r.loose,this.includePrerelease=!!r.includePrerelease;let i=e.trim().match(r.loose?o2[a2.LOOSE]:o2[a2.FULL]);if(!i)throw new TypeError(`Invalid Version: ${e}`);if(this.raw=e,this.major=+i[1],this.minor=+i[2],this.patch=+i[3],this.major>vI||this.major<0)throw new TypeError(\"Invalid major version\");if(this.minor>vI||this.minor<0)throw new TypeError(\"Invalid minor version\");if(this.patch>vI||this.patch<0)throw new TypeError(\"Invalid patch version\");i[4]?this.prerelease=i[4].split(\".\").map(n=>{if(/^[0-9]+$/.test(n)){let s=+n;if(s>=0&&s<vI)return s}return n}):this.prerelease=[],this.build=i[5]?i[5].split(\".\"):[],this.format()}format(){return this.version=`${this.major}.${this.minor}.${this.patch}`,this.prerelease.length&&(this.version+=`-${this.prerelease.join(\".\")}`),this.version}toString(){return this.version}compare(e){if(QI(\"SemVer.compare\",this.version,this.options,e),!(e instanceof ys)){if(typeof e==\"string\"&&e===this.version)return 0;e=new ys(e,this.options)}return e.version===this.version?0:this.compareMain(e)||this.comparePre(e)}compareMain(e){return e instanceof ys||(e=new ys(e,this.options)),yp(this.major,e.major)||yp(this.minor,e.minor)||yp(this.patch,e.patch)}comparePre(e){if(e instanceof ys||(e=new ys(e,this.options)),this.prerelease.length&&!e.prerelease.length)return-1;if(!this.prerelease.length&&e.prerelease.length)return 1;if(!this.prerelease.length&&!e.prerelease.length)return 0;let r=0;do{let i=this.prerelease[r],n=e.prerelease[r];if(QI(\"prerelease compare\",r,i,n),i===void 0&&n===void 0)return 0;if(n===void 0)return 1;if(i===void 0)return-1;if(i===n)continue;return yp(i,n)}while(++r)}compareBuild(e){e instanceof ys||(e=new ys(e,this.options));let r=0;do{let i=this.build[r],n=e.build[r];if(QI(\"prerelease compare\",r,i,n),i===void 0&&n===void 0)return 0;if(n===void 0)return 1;if(i===void 0)return-1;if(i===n)continue;return yp(i,n)}while(++r)}inc(e,r){switch(e){case\"premajor\":this.prerelease.length=0,this.patch=0,this.minor=0,this.major++,this.inc(\"pre\",r);break;case\"preminor\":this.prerelease.length=0,this.patch=0,this.minor++,this.inc(\"pre\",r);break;case\"prepatch\":this.prerelease.length=0,this.inc(\"patch\",r),this.inc(\"pre\",r);break;case\"prerelease\":this.prerelease.length===0&&this.inc(\"patch\",r),this.inc(\"pre\",r);break;case\"major\":(this.minor!==0||this.patch!==0||this.prerelease.length===0)&&this.major++,this.minor=0,this.patch=0,this.prerelease=[];break;case\"minor\":(this.patch!==0||this.prerelease.length===0)&&this.minor++,this.patch=0,this.prerelease=[];break;case\"patch\":this.prerelease.length===0&&this.patch++,this.prerelease=[];break;case\"pre\":if(this.prerelease.length===0)this.prerelease=[0];else{let i=this.prerelease.length;for(;--i>=0;)typeof this.prerelease[i]==\"number\"&&(this.prerelease[i]++,i=-2);i===-1&&this.prerelease.push(0)}r&&(this.prerelease[0]===r?isNaN(this.prerelease[1])&&(this.prerelease=[r,0]):this.prerelease=[r,0]);break;default:throw new Error(`invalid increment argument: ${e}`)}return this.format(),this.raw=this.version,this}};n2.exports=ys});var Ec=w((yet,A2)=>{var{MAX_LENGTH:eme}=mp(),{re:l2,t:c2}=mc(),u2=Hi(),tme=Ip(),rme=(t,e)=>{if(e=tme(e),t instanceof u2)return t;if(typeof t!=\"string\"||t.length>eme||!(e.loose?l2[c2.LOOSE]:l2[c2.FULL]).test(t))return null;try{return new u2(t,e)}catch(i){return null}};A2.exports=rme});var f2=w((wet,g2)=>{var ime=Ec(),nme=(t,e)=>{let r=ime(t,e);return r?r.version:null};g2.exports=nme});var p2=w((Bet,h2)=>{var sme=Ec(),ome=(t,e)=>{let r=sme(t.trim().replace(/^[=v]+/,\"\"),e);return r?r.version:null};h2.exports=ome});var C2=w((bet,d2)=>{var ame=Hi(),Ame=(t,e,r,i)=>{typeof r==\"string\"&&(i=r,r=void 0);try{return new ame(t,r).inc(e,i).version}catch(n){return null}};d2.exports=Ame});var ws=w((Qet,m2)=>{var E2=Hi(),lme=(t,e,r)=>new E2(t,r).compare(new E2(e,r));m2.exports=lme});var SI=w((vet,I2)=>{var cme=ws(),ume=(t,e,r)=>cme(t,e,r)===0;I2.exports=ume});var B2=w((ket,y2)=>{var w2=Ec(),gme=SI(),fme=(t,e)=>{if(gme(t,e))return null;{let r=w2(t),i=w2(e),n=r.prerelease.length||i.prerelease.length,s=n?\"pre\":\"\",o=n?\"prerelease\":\"\";for(let a in r)if((a===\"major\"||a===\"minor\"||a===\"patch\")&&r[a]!==i[a])return s+a;return o}};y2.exports=fme});var Q2=w((xet,b2)=>{var hme=Hi(),pme=(t,e)=>new hme(t,e).major;b2.exports=pme});var S2=w((Pet,v2)=>{var dme=Hi(),Cme=(t,e)=>new dme(t,e).minor;v2.exports=Cme});var x2=w((Det,k2)=>{var mme=Hi(),Eme=(t,e)=>new mme(t,e).patch;k2.exports=Eme});var D2=w((Ret,P2)=>{var Ime=Ec(),yme=(t,e)=>{let r=Ime(t,e);return r&&r.prerelease.length?r.prerelease:null};P2.exports=yme});var F2=w((Fet,R2)=>{var wme=ws(),Bme=(t,e,r)=>wme(e,t,r);R2.exports=Bme});var L2=w((Net,N2)=>{var bme=ws(),Qme=(t,e)=>bme(t,e,!0);N2.exports=Qme});var kI=w((Let,T2)=>{var O2=Hi(),vme=(t,e,r)=>{let i=new O2(t,r),n=new O2(e,r);return i.compare(n)||i.compareBuild(n)};T2.exports=vme});var U2=w((Tet,M2)=>{var Sme=kI(),kme=(t,e)=>t.sort((r,i)=>Sme(r,i,e));M2.exports=kme});var H2=w((Oet,K2)=>{var xme=kI(),Pme=(t,e)=>t.sort((r,i)=>xme(i,r,e));K2.exports=Pme});var wp=w((Met,j2)=>{var Dme=ws(),Rme=(t,e,r)=>Dme(t,e,r)>0;j2.exports=Rme});var xI=w((Uet,G2)=>{var Fme=ws(),Nme=(t,e,r)=>Fme(t,e,r)<0;G2.exports=Nme});var mv=w((Ket,Y2)=>{var Lme=ws(),Tme=(t,e,r)=>Lme(t,e,r)!==0;Y2.exports=Tme});var PI=w((Het,q2)=>{var Ome=ws(),Mme=(t,e,r)=>Ome(t,e,r)>=0;q2.exports=Mme});var DI=w((jet,J2)=>{var Ume=ws(),Kme=(t,e,r)=>Ume(t,e,r)<=0;J2.exports=Kme});var Ev=w((Get,W2)=>{var Hme=SI(),jme=mv(),Gme=wp(),Yme=PI(),qme=xI(),Jme=DI(),Wme=(t,e,r,i)=>{switch(e){case\"===\":return typeof t==\"object\"&&(t=t.version),typeof r==\"object\"&&(r=r.version),t===r;case\"!==\":return typeof t==\"object\"&&(t=t.version),typeof r==\"object\"&&(r=r.version),t!==r;case\"\":case\"=\":case\"==\":return Hme(t,r,i);case\"!=\":return jme(t,r,i);case\">\":return Gme(t,r,i);case\">=\":return Yme(t,r,i);case\"<\":return qme(t,r,i);case\"<=\":return Jme(t,r,i);default:throw new TypeError(`Invalid operator: ${e}`)}};W2.exports=Wme});var _2=w((Yet,z2)=>{var zme=Hi(),_me=Ec(),{re:RI,t:FI}=mc(),Vme=(t,e)=>{if(t instanceof zme)return t;if(typeof t==\"number\"&&(t=String(t)),typeof t!=\"string\")return null;e=e||{};let r=null;if(!e.rtl)r=t.match(RI[FI.COERCE]);else{let i;for(;(i=RI[FI.COERCERTL].exec(t))&&(!r||r.index+r[0].length!==t.length);)(!r||i.index+i[0].length!==r.index+r[0].length)&&(r=i),RI[FI.COERCERTL].lastIndex=i.index+i[1].length+i[2].length;RI[FI.COERCERTL].lastIndex=-1}return r===null?null:_me(`${r[2]}.${r[3]||\"0\"}.${r[4]||\"0\"}`,e)};z2.exports=Vme});var X2=w((qet,V2)=>{\"use strict\";V2.exports=function(t){t.prototype[Symbol.iterator]=function*(){for(let e=this.head;e;e=e.next)yield e.value}}});var Bp=w((Jet,Z2)=>{\"use strict\";Z2.exports=Gt;Gt.Node=Ic;Gt.create=Gt;function Gt(t){var e=this;if(e instanceof Gt||(e=new Gt),e.tail=null,e.head=null,e.length=0,t&&typeof t.forEach==\"function\")t.forEach(function(n){e.push(n)});else if(arguments.length>0)for(var r=0,i=arguments.length;r<i;r++)e.push(arguments[r]);return e}Gt.prototype.removeNode=function(t){if(t.list!==this)throw new Error(\"removing node which does not belong to this list\");var e=t.next,r=t.prev;return e&&(e.prev=r),r&&(r.next=e),t===this.head&&(this.head=e),t===this.tail&&(this.tail=r),t.list.length--,t.next=null,t.prev=null,t.list=null,e};Gt.prototype.unshiftNode=function(t){if(t!==this.head){t.list&&t.list.removeNode(t);var e=this.head;t.list=this,t.next=e,e&&(e.prev=t),this.head=t,this.tail||(this.tail=t),this.length++}};Gt.prototype.pushNode=function(t){if(t!==this.tail){t.list&&t.list.removeNode(t);var e=this.tail;t.list=this,t.prev=e,e&&(e.next=t),this.tail=t,this.head||(this.head=t),this.length++}};Gt.prototype.push=function(){for(var t=0,e=arguments.length;t<e;t++)Xme(this,arguments[t]);return this.length};Gt.prototype.unshift=function(){for(var t=0,e=arguments.length;t<e;t++)Zme(this,arguments[t]);return this.length};Gt.prototype.pop=function(){if(!!this.tail){var t=this.tail.value;return this.tail=this.tail.prev,this.tail?this.tail.next=null:this.head=null,this.length--,t}};Gt.prototype.shift=function(){if(!!this.head){var t=this.head.value;return this.head=this.head.next,this.head?this.head.prev=null:this.tail=null,this.length--,t}};Gt.prototype.forEach=function(t,e){e=e||this;for(var r=this.head,i=0;r!==null;i++)t.call(e,r.value,i,this),r=r.next};Gt.prototype.forEachReverse=function(t,e){e=e||this;for(var r=this.tail,i=this.length-1;r!==null;i--)t.call(e,r.value,i,this),r=r.prev};Gt.prototype.get=function(t){for(var e=0,r=this.head;r!==null&&e<t;e++)r=r.next;if(e===t&&r!==null)return r.value};Gt.prototype.getReverse=function(t){for(var e=0,r=this.tail;r!==null&&e<t;e++)r=r.prev;if(e===t&&r!==null)return r.value};Gt.prototype.map=function(t,e){e=e||this;for(var r=new Gt,i=this.head;i!==null;)r.push(t.call(e,i.value,this)),i=i.next;return r};Gt.prototype.mapReverse=function(t,e){e=e||this;for(var r=new Gt,i=this.tail;i!==null;)r.push(t.call(e,i.value,this)),i=i.prev;return r};Gt.prototype.reduce=function(t,e){var r,i=this.head;if(arguments.length>1)r=e;else if(this.head)i=this.head.next,r=this.head.value;else throw new TypeError(\"Reduce of empty list with no initial value\");for(var n=0;i!==null;n++)r=t(r,i.value,n),i=i.next;return r};Gt.prototype.reduceReverse=function(t,e){var r,i=this.tail;if(arguments.length>1)r=e;else if(this.tail)i=this.tail.prev,r=this.tail.value;else throw new TypeError(\"Reduce of empty list with no initial value\");for(var n=this.length-1;i!==null;n--)r=t(r,i.value,n),i=i.prev;return r};Gt.prototype.toArray=function(){for(var t=new Array(this.length),e=0,r=this.head;r!==null;e++)t[e]=r.value,r=r.next;return t};Gt.prototype.toArrayReverse=function(){for(var t=new Array(this.length),e=0,r=this.tail;r!==null;e++)t[e]=r.value,r=r.prev;return t};Gt.prototype.slice=function(t,e){e=e||this.length,e<0&&(e+=this.length),t=t||0,t<0&&(t+=this.length);var r=new Gt;if(e<t||e<0)return r;t<0&&(t=0),e>this.length&&(e=this.length);for(var i=0,n=this.head;n!==null&&i<t;i++)n=n.next;for(;n!==null&&i<e;i++,n=n.next)r.push(n.value);return r};Gt.prototype.sliceReverse=function(t,e){e=e||this.length,e<0&&(e+=this.length),t=t||0,t<0&&(t+=this.length);var r=new Gt;if(e<t||e<0)return r;t<0&&(t=0),e>this.length&&(e=this.length);for(var i=this.length,n=this.tail;n!==null&&i>e;i--)n=n.prev;for(;n!==null&&i>t;i--,n=n.prev)r.push(n.value);return r};Gt.prototype.splice=function(t,e,...r){t>this.length&&(t=this.length-1),t<0&&(t=this.length+t);for(var i=0,n=this.head;n!==null&&i<t;i++)n=n.next;for(var s=[],i=0;n&&i<e;i++)s.push(n.value),n=this.removeNode(n);n===null&&(n=this.tail),n!==this.head&&n!==this.tail&&(n=n.prev);for(var i=0;i<r.length;i++)n=$me(this,n,r[i]);return s};Gt.prototype.reverse=function(){for(var t=this.head,e=this.tail,r=t;r!==null;r=r.prev){var i=r.prev;r.prev=r.next,r.next=i}return this.head=e,this.tail=t,this};function $me(t,e,r){var i=e===t.head?new Ic(r,null,e,t):new Ic(r,e,e.next,t);return i.next===null&&(t.tail=i),i.prev===null&&(t.head=i),t.length++,i}function Xme(t,e){t.tail=new Ic(e,t.tail,null,t),t.head||(t.head=t.tail),t.length++}function Zme(t,e){t.head=new Ic(e,null,t.head,t),t.tail||(t.tail=t.head),t.length++}function Ic(t,e,r,i){if(!(this instanceof Ic))return new Ic(t,e,r,i);this.list=i,this.value=t,e?(e.next=this,this.prev=e):this.prev=null,r?(r.prev=this,this.next=r):this.next=null}try{X2()(Gt)}catch(t){}});var sH=w((Wet,$2)=>{\"use strict\";var eEe=Bp(),yc=Symbol(\"max\"),Ta=Symbol(\"length\"),Ag=Symbol(\"lengthCalculator\"),bp=Symbol(\"allowStale\"),wc=Symbol(\"maxAge\"),Oa=Symbol(\"dispose\"),eH=Symbol(\"noDisposeOnSet\"),Ii=Symbol(\"lruList\"),no=Symbol(\"cache\"),tH=Symbol(\"updateAgeOnGet\"),Iv=()=>1,rH=class{constructor(e){if(typeof e==\"number\"&&(e={max:e}),e||(e={}),e.max&&(typeof e.max!=\"number\"||e.max<0))throw new TypeError(\"max must be a non-negative number\");let r=this[yc]=e.max||Infinity,i=e.length||Iv;if(this[Ag]=typeof i!=\"function\"?Iv:i,this[bp]=e.stale||!1,e.maxAge&&typeof e.maxAge!=\"number\")throw new TypeError(\"maxAge must be a number\");this[wc]=e.maxAge||0,this[Oa]=e.dispose,this[eH]=e.noDisposeOnSet||!1,this[tH]=e.updateAgeOnGet||!1,this.reset()}set max(e){if(typeof e!=\"number\"||e<0)throw new TypeError(\"max must be a non-negative number\");this[yc]=e||Infinity,Qp(this)}get max(){return this[yc]}set allowStale(e){this[bp]=!!e}get allowStale(){return this[bp]}set maxAge(e){if(typeof e!=\"number\")throw new TypeError(\"maxAge must be a non-negative number\");this[wc]=e,Qp(this)}get maxAge(){return this[wc]}set lengthCalculator(e){typeof e!=\"function\"&&(e=Iv),e!==this[Ag]&&(this[Ag]=e,this[Ta]=0,this[Ii].forEach(r=>{r.length=this[Ag](r.value,r.key),this[Ta]+=r.length})),Qp(this)}get lengthCalculator(){return this[Ag]}get length(){return this[Ta]}get itemCount(){return this[Ii].length}rforEach(e,r){r=r||this;for(let i=this[Ii].tail;i!==null;){let n=i.prev;nH(this,e,i,r),i=n}}forEach(e,r){r=r||this;for(let i=this[Ii].head;i!==null;){let n=i.next;nH(this,e,i,r),i=n}}keys(){return this[Ii].toArray().map(e=>e.key)}values(){return this[Ii].toArray().map(e=>e.value)}reset(){this[Oa]&&this[Ii]&&this[Ii].length&&this[Ii].forEach(e=>this[Oa](e.key,e.value)),this[no]=new Map,this[Ii]=new eEe,this[Ta]=0}dump(){return this[Ii].map(e=>NI(this,e)?!1:{k:e.key,v:e.value,e:e.now+(e.maxAge||0)}).toArray().filter(e=>e)}dumpLru(){return this[Ii]}set(e,r,i){if(i=i||this[wc],i&&typeof i!=\"number\")throw new TypeError(\"maxAge must be a number\");let n=i?Date.now():0,s=this[Ag](r,e);if(this[no].has(e)){if(s>this[yc])return lg(this,this[no].get(e)),!1;let l=this[no].get(e).value;return this[Oa]&&(this[eH]||this[Oa](e,l.value)),l.now=n,l.maxAge=i,l.value=r,this[Ta]+=s-l.length,l.length=s,this.get(e),Qp(this),!0}let o=new iH(e,r,s,n,i);return o.length>this[yc]?(this[Oa]&&this[Oa](e,r),!1):(this[Ta]+=o.length,this[Ii].unshift(o),this[no].set(e,this[Ii].head),Qp(this),!0)}has(e){if(!this[no].has(e))return!1;let r=this[no].get(e).value;return!NI(this,r)}get(e){return yv(this,e,!0)}peek(e){return yv(this,e,!1)}pop(){let e=this[Ii].tail;return e?(lg(this,e),e.value):null}del(e){lg(this,this[no].get(e))}load(e){this.reset();let r=Date.now();for(let i=e.length-1;i>=0;i--){let n=e[i],s=n.e||0;if(s===0)this.set(n.k,n.v);else{let o=s-r;o>0&&this.set(n.k,n.v,o)}}}prune(){this[no].forEach((e,r)=>yv(this,r,!1))}},yv=(t,e,r)=>{let i=t[no].get(e);if(i){let n=i.value;if(NI(t,n)){if(lg(t,i),!t[bp])return}else r&&(t[tH]&&(i.value.now=Date.now()),t[Ii].unshiftNode(i));return n.value}},NI=(t,e)=>{if(!e||!e.maxAge&&!t[wc])return!1;let r=Date.now()-e.now;return e.maxAge?r>e.maxAge:t[wc]&&r>t[wc]},Qp=t=>{if(t[Ta]>t[yc])for(let e=t[Ii].tail;t[Ta]>t[yc]&&e!==null;){let r=e.prev;lg(t,e),e=r}},lg=(t,e)=>{if(e){let r=e.value;t[Oa]&&t[Oa](r.key,r.value),t[Ta]-=r.length,t[no].delete(r.key),t[Ii].removeNode(e)}},iH=class{constructor(e,r,i,n,s){this.key=e,this.value=r,this.length=i,this.now=n,this.maxAge=s||0}},nH=(t,e,r,i)=>{let n=r.value;NI(t,n)&&(lg(t,r),t[bp]||(n=void 0)),n&&e.call(i,n.value,n.key,t)};$2.exports=rH});var Bs=w((zet,oH)=>{var cg=class{constructor(e,r){if(r=tEe(r),e instanceof cg)return e.loose===!!r.loose&&e.includePrerelease===!!r.includePrerelease?e:new cg(e.raw,r);if(e instanceof wv)return this.raw=e.value,this.set=[[e]],this.format(),this;if(this.options=r,this.loose=!!r.loose,this.includePrerelease=!!r.includePrerelease,this.raw=e,this.set=e.split(/\\s*\\|\\|\\s*/).map(i=>this.parseRange(i.trim())).filter(i=>i.length),!this.set.length)throw new TypeError(`Invalid SemVer Range: ${e}`);if(this.set.length>1){let i=this.set[0];if(this.set=this.set.filter(n=>!AH(n[0])),this.set.length===0)this.set=[i];else if(this.set.length>1){for(let n of this.set)if(n.length===1&&oEe(n[0])){this.set=[n];break}}}this.format()}format(){return this.range=this.set.map(e=>e.join(\" \").trim()).join(\"||\").trim(),this.range}toString(){return this.range}parseRange(e){e=e.trim();let i=`parseRange:${Object.keys(this.options).join(\",\")}:${e}`,n=aH.get(i);if(n)return n;let s=this.options.loose,o=s?ji[ki.HYPHENRANGELOOSE]:ji[ki.HYPHENRANGE];e=e.replace(o,lEe(this.options.includePrerelease)),Wr(\"hyphen replace\",e),e=e.replace(ji[ki.COMPARATORTRIM],iEe),Wr(\"comparator trim\",e,ji[ki.COMPARATORTRIM]),e=e.replace(ji[ki.TILDETRIM],nEe),e=e.replace(ji[ki.CARETTRIM],sEe),e=e.split(/\\s+/).join(\" \");let a=s?ji[ki.COMPARATORLOOSE]:ji[ki.COMPARATOR],l=e.split(\" \").map(f=>aEe(f,this.options)).join(\" \").split(/\\s+/).map(f=>AEe(f,this.options)).filter(this.options.loose?f=>!!f.match(a):()=>!0).map(f=>new wv(f,this.options)),c=l.length,u=new Map;for(let f of l){if(AH(f))return[f];u.set(f.value,f)}u.size>1&&u.has(\"\")&&u.delete(\"\");let g=[...u.values()];return aH.set(i,g),g}intersects(e,r){if(!(e instanceof cg))throw new TypeError(\"a Range is required\");return this.set.some(i=>lH(i,r)&&e.set.some(n=>lH(n,r)&&i.every(s=>n.every(o=>s.intersects(o,r)))))}test(e){if(!e)return!1;if(typeof e==\"string\")try{e=new rEe(e,this.options)}catch(r){return!1}for(let r=0;r<this.set.length;r++)if(cEe(this.set[r],e,this.options))return!0;return!1}};oH.exports=cg;var uEe=sH(),aH=new uEe({max:1e3}),tEe=Ip(),wv=vp(),Wr=Ep(),rEe=Hi(),{re:ji,t:ki,comparatorTrimReplace:iEe,tildeTrimReplace:nEe,caretTrimReplace:sEe}=mc(),AH=t=>t.value===\"<0.0.0-0\",oEe=t=>t.value===\"\",lH=(t,e)=>{let r=!0,i=t.slice(),n=i.pop();for(;r&&i.length;)r=i.every(s=>n.intersects(s,e)),n=i.pop();return r},aEe=(t,e)=>(Wr(\"comp\",t,e),t=fEe(t,e),Wr(\"caret\",t),t=gEe(t,e),Wr(\"tildes\",t),t=hEe(t,e),Wr(\"xrange\",t),t=pEe(t,e),Wr(\"stars\",t),t),on=t=>!t||t.toLowerCase()===\"x\"||t===\"*\",gEe=(t,e)=>t.trim().split(/\\s+/).map(r=>dEe(r,e)).join(\" \"),dEe=(t,e)=>{let r=e.loose?ji[ki.TILDELOOSE]:ji[ki.TILDE];return t.replace(r,(i,n,s,o,a)=>{Wr(\"tilde\",t,i,n,s,o,a);let l;return on(n)?l=\"\":on(s)?l=`>=${n}.0.0 <${+n+1}.0.0-0`:on(o)?l=`>=${n}.${s}.0 <${n}.${+s+1}.0-0`:a?(Wr(\"replaceTilde pr\",a),l=`>=${n}.${s}.${o}-${a} <${n}.${+s+1}.0-0`):l=`>=${n}.${s}.${o} <${n}.${+s+1}.0-0`,Wr(\"tilde return\",l),l})},fEe=(t,e)=>t.trim().split(/\\s+/).map(r=>CEe(r,e)).join(\" \"),CEe=(t,e)=>{Wr(\"caret\",t,e);let r=e.loose?ji[ki.CARETLOOSE]:ji[ki.CARET],i=e.includePrerelease?\"-0\":\"\";return t.replace(r,(n,s,o,a,l)=>{Wr(\"caret\",t,n,s,o,a,l);let c;return on(s)?c=\"\":on(o)?c=`>=${s}.0.0${i} <${+s+1}.0.0-0`:on(a)?s===\"0\"?c=`>=${s}.${o}.0${i} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.0${i} <${+s+1}.0.0-0`:l?(Wr(\"replaceCaret pr\",l),s===\"0\"?o===\"0\"?c=`>=${s}.${o}.${a}-${l} <${s}.${o}.${+a+1}-0`:c=`>=${s}.${o}.${a}-${l} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.${a}-${l} <${+s+1}.0.0-0`):(Wr(\"no pr\"),s===\"0\"?o===\"0\"?c=`>=${s}.${o}.${a}${i} <${s}.${o}.${+a+1}-0`:c=`>=${s}.${o}.${a}${i} <${s}.${+o+1}.0-0`:c=`>=${s}.${o}.${a} <${+s+1}.0.0-0`),Wr(\"caret return\",c),c})},hEe=(t,e)=>(Wr(\"replaceXRanges\",t,e),t.split(/\\s+/).map(r=>mEe(r,e)).join(\" \")),mEe=(t,e)=>{t=t.trim();let r=e.loose?ji[ki.XRANGELOOSE]:ji[ki.XRANGE];return t.replace(r,(i,n,s,o,a,l)=>{Wr(\"xRange\",t,i,n,s,o,a,l);let c=on(s),u=c||on(o),g=u||on(a),f=g;return n===\"=\"&&f&&(n=\"\"),l=e.includePrerelease?\"-0\":\"\",c?n===\">\"||n===\"<\"?i=\"<0.0.0-0\":i=\"*\":n&&f?(u&&(o=0),a=0,n===\">\"?(n=\">=\",u?(s=+s+1,o=0,a=0):(o=+o+1,a=0)):n===\"<=\"&&(n=\"<\",u?s=+s+1:o=+o+1),n===\"<\"&&(l=\"-0\"),i=`${n+s}.${o}.${a}${l}`):u?i=`>=${s}.0.0${l} <${+s+1}.0.0-0`:g&&(i=`>=${s}.${o}.0${l} <${s}.${+o+1}.0-0`),Wr(\"xRange return\",i),i})},pEe=(t,e)=>(Wr(\"replaceStars\",t,e),t.trim().replace(ji[ki.STAR],\"\")),AEe=(t,e)=>(Wr(\"replaceGTE0\",t,e),t.trim().replace(ji[e.includePrerelease?ki.GTE0PRE:ki.GTE0],\"\")),lEe=t=>(e,r,i,n,s,o,a,l,c,u,g,f,h)=>(on(i)?r=\"\":on(n)?r=`>=${i}.0.0${t?\"-0\":\"\"}`:on(s)?r=`>=${i}.${n}.0${t?\"-0\":\"\"}`:o?r=`>=${r}`:r=`>=${r}${t?\"-0\":\"\"}`,on(c)?l=\"\":on(u)?l=`<${+c+1}.0.0-0`:on(g)?l=`<${c}.${+u+1}.0-0`:f?l=`<=${c}.${u}.${g}-${f}`:t?l=`<${c}.${u}.${+g+1}-0`:l=`<=${l}`,`${r} ${l}`.trim()),cEe=(t,e,r)=>{for(let i=0;i<t.length;i++)if(!t[i].test(e))return!1;if(e.prerelease.length&&!r.includePrerelease){for(let i=0;i<t.length;i++)if(Wr(t[i].semver),t[i].semver!==wv.ANY&&t[i].semver.prerelease.length>0){let n=t[i].semver;if(n.major===e.major&&n.minor===e.minor&&n.patch===e.patch)return!0}return!1}return!0}});var vp=w((_et,cH)=>{var Sp=Symbol(\"SemVer ANY\"),kp=class{static get ANY(){return Sp}constructor(e,r){if(r=EEe(r),e instanceof kp){if(e.loose===!!r.loose)return e;e=e.value}bv(\"comparator\",e,r),this.options=r,this.loose=!!r.loose,this.parse(e),this.semver===Sp?this.value=\"\":this.value=this.operator+this.semver.version,bv(\"comp\",this)}parse(e){let r=this.options.loose?uH[gH.COMPARATORLOOSE]:uH[gH.COMPARATOR],i=e.match(r);if(!i)throw new TypeError(`Invalid comparator: ${e}`);this.operator=i[1]!==void 0?i[1]:\"\",this.operator===\"=\"&&(this.operator=\"\"),i[2]?this.semver=new fH(i[2],this.options.loose):this.semver=Sp}toString(){return this.value}test(e){if(bv(\"Comparator.test\",e,this.options.loose),this.semver===Sp||e===Sp)return!0;if(typeof e==\"string\")try{e=new fH(e,this.options)}catch(r){return!1}return Bv(e,this.operator,this.semver,this.options)}intersects(e,r){if(!(e instanceof kp))throw new TypeError(\"a Comparator is required\");if((!r||typeof r!=\"object\")&&(r={loose:!!r,includePrerelease:!1}),this.operator===\"\")return this.value===\"\"?!0:new hH(e.value,r).test(this.value);if(e.operator===\"\")return e.value===\"\"?!0:new hH(this.value,r).test(e.semver);let i=(this.operator===\">=\"||this.operator===\">\")&&(e.operator===\">=\"||e.operator===\">\"),n=(this.operator===\"<=\"||this.operator===\"<\")&&(e.operator===\"<=\"||e.operator===\"<\"),s=this.semver.version===e.semver.version,o=(this.operator===\">=\"||this.operator===\"<=\")&&(e.operator===\">=\"||e.operator===\"<=\"),a=Bv(this.semver,\"<\",e.semver,r)&&(this.operator===\">=\"||this.operator===\">\")&&(e.operator===\"<=\"||e.operator===\"<\"),l=Bv(this.semver,\">\",e.semver,r)&&(this.operator===\"<=\"||this.operator===\"<\")&&(e.operator===\">=\"||e.operator===\">\");return i||n||s&&o||a||l}};cH.exports=kp;var EEe=Ip(),{re:uH,t:gH}=mc(),Bv=Ev(),bv=Ep(),fH=Hi(),hH=Bs()});var xp=w((Vet,pH)=>{var IEe=Bs(),yEe=(t,e,r)=>{try{e=new IEe(e,r)}catch(i){return!1}return e.test(t)};pH.exports=yEe});var CH=w((Xet,dH)=>{var wEe=Bs(),BEe=(t,e)=>new wEe(t,e).set.map(r=>r.map(i=>i.value).join(\" \").trim().split(\" \"));dH.exports=BEe});var EH=w((Zet,mH)=>{var bEe=Hi(),QEe=Bs(),vEe=(t,e,r)=>{let i=null,n=null,s=null;try{s=new QEe(e,r)}catch(o){return null}return t.forEach(o=>{s.test(o)&&(!i||n.compare(o)===-1)&&(i=o,n=new bEe(i,r))}),i};mH.exports=vEe});var yH=w(($et,IH)=>{var SEe=Hi(),kEe=Bs(),xEe=(t,e,r)=>{let i=null,n=null,s=null;try{s=new kEe(e,r)}catch(o){return null}return t.forEach(o=>{s.test(o)&&(!i||n.compare(o)===1)&&(i=o,n=new SEe(i,r))}),i};IH.exports=xEe});var bH=w((ett,wH)=>{var Qv=Hi(),PEe=Bs(),BH=wp(),DEe=(t,e)=>{t=new PEe(t,e);let r=new Qv(\"0.0.0\");if(t.test(r)||(r=new Qv(\"0.0.0-0\"),t.test(r)))return r;r=null;for(let i=0;i<t.set.length;++i){let n=t.set[i],s=null;n.forEach(o=>{let a=new Qv(o.semver.version);switch(o.operator){case\">\":a.prerelease.length===0?a.patch++:a.prerelease.push(0),a.raw=a.format();case\"\":case\">=\":(!s||BH(a,s))&&(s=a);break;case\"<\":case\"<=\":break;default:throw new Error(`Unexpected operation: ${o.operator}`)}}),s&&(!r||BH(r,s))&&(r=s)}return r&&t.test(r)?r:null};wH.exports=DEe});var vH=w((ttt,QH)=>{var REe=Bs(),FEe=(t,e)=>{try{return new REe(t,e).range||\"*\"}catch(r){return null}};QH.exports=FEe});var LI=w((rtt,SH)=>{var NEe=Hi(),kH=vp(),{ANY:LEe}=kH,TEe=Bs(),OEe=xp(),xH=wp(),PH=xI(),MEe=DI(),UEe=PI(),KEe=(t,e,r,i)=>{t=new NEe(t,i),e=new TEe(e,i);let n,s,o,a,l;switch(r){case\">\":n=xH,s=MEe,o=PH,a=\">\",l=\">=\";break;case\"<\":n=PH,s=UEe,o=xH,a=\"<\",l=\"<=\";break;default:throw new TypeError('Must provide a hilo val of \"<\" or \">\"')}if(OEe(t,e,i))return!1;for(let c=0;c<e.set.length;++c){let u=e.set[c],g=null,f=null;if(u.forEach(h=>{h.semver===LEe&&(h=new kH(\">=0.0.0\")),g=g||h,f=f||h,n(h.semver,g.semver,i)?g=h:o(h.semver,f.semver,i)&&(f=h)}),g.operator===a||g.operator===l||(!f.operator||f.operator===a)&&s(t,f.semver))return!1;if(f.operator===l&&o(t,f.semver))return!1}return!0};SH.exports=KEe});var RH=w((itt,DH)=>{var HEe=LI(),jEe=(t,e,r)=>HEe(t,e,\">\",r);DH.exports=jEe});var NH=w((ntt,FH)=>{var GEe=LI(),YEe=(t,e,r)=>GEe(t,e,\"<\",r);FH.exports=YEe});var OH=w((stt,LH)=>{var TH=Bs(),qEe=(t,e,r)=>(t=new TH(t,r),e=new TH(e,r),t.intersects(e));LH.exports=qEe});var UH=w((ott,MH)=>{var JEe=xp(),WEe=ws();MH.exports=(t,e,r)=>{let i=[],n=null,s=null,o=t.sort((u,g)=>WEe(u,g,r));for(let u of o)JEe(u,e,r)?(s=u,n||(n=u)):(s&&i.push([n,s]),s=null,n=null);n&&i.push([n,null]);let a=[];for(let[u,g]of i)u===g?a.push(u):!g&&u===o[0]?a.push(\"*\"):g?u===o[0]?a.push(`<=${g}`):a.push(`${u} - ${g}`):a.push(`>=${u}`);let l=a.join(\" || \"),c=typeof e.raw==\"string\"?e.raw:String(e);return l.length<c.length?l:e}});var YH=w((att,KH)=>{var HH=Bs(),TI=vp(),{ANY:vv}=TI,Pp=xp(),Sv=ws(),_Ee=(t,e,r={})=>{if(t===e)return!0;t=new HH(t,r),e=new HH(e,r);let i=!1;e:for(let n of t.set){for(let s of e.set){let o=zEe(n,s,r);if(i=i||o!==null,o)continue e}if(i)return!1}return!0},zEe=(t,e,r)=>{if(t===e)return!0;if(t.length===1&&t[0].semver===vv){if(e.length===1&&e[0].semver===vv)return!0;r.includePrerelease?t=[new TI(\">=0.0.0-0\")]:t=[new TI(\">=0.0.0\")]}if(e.length===1&&e[0].semver===vv){if(r.includePrerelease)return!0;e=[new TI(\">=0.0.0\")]}let i=new Set,n,s;for(let h of t)h.operator===\">\"||h.operator===\">=\"?n=jH(n,h,r):h.operator===\"<\"||h.operator===\"<=\"?s=GH(s,h,r):i.add(h.semver);if(i.size>1)return null;let o;if(n&&s){if(o=Sv(n.semver,s.semver,r),o>0)return null;if(o===0&&(n.operator!==\">=\"||s.operator!==\"<=\"))return null}for(let h of i){if(n&&!Pp(h,String(n),r)||s&&!Pp(h,String(s),r))return null;for(let p of e)if(!Pp(h,String(p),r))return!1;return!0}let a,l,c,u,g=s&&!r.includePrerelease&&s.semver.prerelease.length?s.semver:!1,f=n&&!r.includePrerelease&&n.semver.prerelease.length?n.semver:!1;g&&g.prerelease.length===1&&s.operator===\"<\"&&g.prerelease[0]===0&&(g=!1);for(let h of e){if(u=u||h.operator===\">\"||h.operator===\">=\",c=c||h.operator===\"<\"||h.operator===\"<=\",n){if(f&&h.semver.prerelease&&h.semver.prerelease.length&&h.semver.major===f.major&&h.semver.minor===f.minor&&h.semver.patch===f.patch&&(f=!1),h.operator===\">\"||h.operator===\">=\"){if(a=jH(n,h,r),a===h&&a!==n)return!1}else if(n.operator===\">=\"&&!Pp(n.semver,String(h),r))return!1}if(s){if(g&&h.semver.prerelease&&h.semver.prerelease.length&&h.semver.major===g.major&&h.semver.minor===g.minor&&h.semver.patch===g.patch&&(g=!1),h.operator===\"<\"||h.operator===\"<=\"){if(l=GH(s,h,r),l===h&&l!==s)return!1}else if(s.operator===\"<=\"&&!Pp(s.semver,String(h),r))return!1}if(!h.operator&&(s||n)&&o!==0)return!1}return!(n&&c&&!s&&o!==0||s&&u&&!n&&o!==0||f||g)},jH=(t,e,r)=>{if(!t)return e;let i=Sv(t.semver,e.semver,r);return i>0?t:i<0||e.operator===\">\"&&t.operator===\">=\"?e:t},GH=(t,e,r)=>{if(!t)return e;let i=Sv(t.semver,e.semver,r);return i<0?t:i>0||e.operator===\"<\"&&t.operator===\"<=\"?e:t};KH.exports=_Ee});var ti=w((Att,qH)=>{var kv=mc();qH.exports={re:kv.re,src:kv.src,tokens:kv.t,SEMVER_SPEC_VERSION:mp().SEMVER_SPEC_VERSION,SemVer:Hi(),compareIdentifiers:bI().compareIdentifiers,rcompareIdentifiers:bI().rcompareIdentifiers,parse:Ec(),valid:f2(),clean:p2(),inc:C2(),diff:B2(),major:Q2(),minor:S2(),patch:x2(),prerelease:D2(),compare:ws(),rcompare:F2(),compareLoose:L2(),compareBuild:kI(),sort:U2(),rsort:H2(),gt:wp(),lt:xI(),eq:SI(),neq:mv(),gte:PI(),lte:DI(),cmp:Ev(),coerce:_2(),Comparator:vp(),Range:Bs(),satisfies:xp(),toComparators:CH(),maxSatisfying:EH(),minSatisfying:yH(),minVersion:bH(),validRange:vH(),outside:LI(),gtr:RH(),ltr:NH(),intersects:OH(),simplifyRange:UH(),subset:YH()}});var xv=w(OI=>{\"use strict\";Object.defineProperty(OI,\"__esModule\",{value:!0});OI.VERSION=void 0;OI.VERSION=\"9.1.0\"});var Yt=w((exports,module)=>{\"use strict\";var __spreadArray=exports&&exports.__spreadArray||function(t,e,r){if(r||arguments.length===2)for(var i=0,n=e.length,s;i<n;i++)(s||!(i in e))&&(s||(s=Array.prototype.slice.call(e,0,i)),s[i]=e[i]);return t.concat(s||Array.prototype.slice.call(e))};Object.defineProperty(exports,\"__esModule\",{value:!0});exports.toFastProperties=exports.timer=exports.peek=exports.isES2015MapSupported=exports.PRINT_WARNING=exports.PRINT_ERROR=exports.packArray=exports.IDENTITY=exports.NOOP=exports.merge=exports.groupBy=exports.defaults=exports.assignNoOverwrite=exports.assign=exports.zipObject=exports.sortBy=exports.indexOf=exports.some=exports.difference=exports.every=exports.isObject=exports.isRegExp=exports.isArray=exports.partial=exports.uniq=exports.compact=exports.reduce=exports.findAll=exports.find=exports.cloneObj=exports.cloneArr=exports.contains=exports.has=exports.pick=exports.reject=exports.filter=exports.dropRight=exports.drop=exports.isFunction=exports.isUndefined=exports.isString=exports.forEach=exports.last=exports.first=exports.flatten=exports.map=exports.mapValues=exports.values=exports.keys=exports.isEmpty=void 0;exports.upperFirst=void 0;function isEmpty(t){return t&&t.length===0}exports.isEmpty=isEmpty;function keys(t){return t==null?[]:Object.keys(t)}exports.keys=keys;function values(t){for(var e=[],r=Object.keys(t),i=0;i<r.length;i++)e.push(t[r[i]]);return e}exports.values=values;function mapValues(t,e){for(var r=[],i=keys(t),n=0;n<i.length;n++){var s=i[n];r.push(e.call(null,t[s],s))}return r}exports.mapValues=mapValues;function map(t,e){for(var r=[],i=0;i<t.length;i++)r.push(e.call(null,t[i],i));return r}exports.map=map;function flatten(t){for(var e=[],r=0;r<t.length;r++){var i=t[r];Array.isArray(i)?e=e.concat(flatten(i)):e.push(i)}return e}exports.flatten=flatten;function first(t){return isEmpty(t)?void 0:t[0]}exports.first=first;function last(t){var e=t&&t.length;return e?t[e-1]:void 0}exports.last=last;function forEach(t,e){if(Array.isArray(t))for(var r=0;r<t.length;r++)e.call(null,t[r],r);else if(isObject(t))for(var i=keys(t),r=0;r<i.length;r++){var n=i[r],s=t[n];e.call(null,s,n)}else throw Error(\"non exhaustive match\")}exports.forEach=forEach;function isString(t){return typeof t==\"string\"}exports.isString=isString;function isUndefined(t){return t===void 0}exports.isUndefined=isUndefined;function isFunction(t){return t instanceof Function}exports.isFunction=isFunction;function drop(t,e){return e===void 0&&(e=1),t.slice(e,t.length)}exports.drop=drop;function dropRight(t,e){return e===void 0&&(e=1),t.slice(0,t.length-e)}exports.dropRight=dropRight;function filter(t,e){var r=[];if(Array.isArray(t))for(var i=0;i<t.length;i++){var n=t[i];e.call(null,n)&&r.push(n)}return r}exports.filter=filter;function reject(t,e){return filter(t,function(r){return!e(r)})}exports.reject=reject;function pick(t,e){for(var r=Object.keys(t),i={},n=0;n<r.length;n++){var s=r[n],o=t[s];e(o)&&(i[s]=o)}return i}exports.pick=pick;function has(t,e){return isObject(t)?t.hasOwnProperty(e):!1}exports.has=has;function contains(t,e){return find(t,function(r){return r===e})!==void 0}exports.contains=contains;function cloneArr(t){for(var e=[],r=0;r<t.length;r++)e.push(t[r]);return e}exports.cloneArr=cloneArr;function cloneObj(t){var e={};for(var r in t)Object.prototype.hasOwnProperty.call(t,r)&&(e[r]=t[r]);return e}exports.cloneObj=cloneObj;function find(t,e){for(var r=0;r<t.length;r++){var i=t[r];if(e.call(null,i))return i}}exports.find=find;function findAll(t,e){for(var r=[],i=0;i<t.length;i++){var n=t[i];e.call(null,n)&&r.push(n)}return r}exports.findAll=findAll;function reduce(t,e,r){for(var i=Array.isArray(t),n=i?t:values(t),s=i?[]:keys(t),o=r,a=0;a<n.length;a++)o=e.call(null,o,n[a],i?a:s[a]);return o}exports.reduce=reduce;function compact(t){return reject(t,function(e){return e==null})}exports.compact=compact;function uniq(t,e){e===void 0&&(e=function(i){return i});var r=[];return reduce(t,function(i,n){var s=e(n);return contains(r,s)?i:(r.push(s),i.concat(n))},[])}exports.uniq=uniq;function partial(t){for(var e=[],r=1;r<arguments.length;r++)e[r-1]=arguments[r];var i=[null],n=i.concat(e);return Function.bind.apply(t,n)}exports.partial=partial;function isArray(t){return Array.isArray(t)}exports.isArray=isArray;function isRegExp(t){return t instanceof RegExp}exports.isRegExp=isRegExp;function isObject(t){return t instanceof Object}exports.isObject=isObject;function every(t,e){for(var r=0;r<t.length;r++)if(!e(t[r],r))return!1;return!0}exports.every=every;function difference(t,e){return reject(t,function(r){return contains(e,r)})}exports.difference=difference;function some(t,e){for(var r=0;r<t.length;r++)if(e(t[r]))return!0;return!1}exports.some=some;function indexOf(t,e){for(var r=0;r<t.length;r++)if(t[r]===e)return r;return-1}exports.indexOf=indexOf;function sortBy(t,e){var r=cloneArr(t);return r.sort(function(i,n){return e(i)-e(n)}),r}exports.sortBy=sortBy;function zipObject(t,e){if(t.length!==e.length)throw Error(\"can't zipObject with different number of keys and values!\");for(var r={},i=0;i<t.length;i++)r[t[i]]=e[i];return r}exports.zipObject=zipObject;function assign(t){for(var e=[],r=1;r<arguments.length;r++)e[r-1]=arguments[r];for(var i=0;i<e.length;i++)for(var n=e[i],s=keys(n),o=0;o<s.length;o++){var a=s[o];t[a]=n[a]}return t}exports.assign=assign;function assignNoOverwrite(t){for(var e=[],r=1;r<arguments.length;r++)e[r-1]=arguments[r];for(var i=0;i<e.length;i++)for(var n=e[i],s=keys(n),o=0;o<s.length;o++){var a=s[o];has(t,a)||(t[a]=n[a])}return t}exports.assignNoOverwrite=assignNoOverwrite;function defaults(){for(var t=[],e=0;e<arguments.length;e++)t[e]=arguments[e];return assignNoOverwrite.apply(void 0,__spreadArray([{}],t,!1))}exports.defaults=defaults;function groupBy(t,e){var r={};return forEach(t,function(i){var n=e(i),s=r[n];s?s.push(i):r[n]=[i]}),r}exports.groupBy=groupBy;function merge(t,e){for(var r=cloneObj(t),i=keys(e),n=0;n<i.length;n++){var s=i[n],o=e[s];r[s]=o}return r}exports.merge=merge;function NOOP(){}exports.NOOP=NOOP;function IDENTITY(t){return t}exports.IDENTITY=IDENTITY;function packArray(t){for(var e=[],r=0;r<t.length;r++){var i=t[r];e.push(i!==void 0?i:void 0)}return e}exports.packArray=packArray;function PRINT_ERROR(t){console&&console.error&&console.error(\"Error: \"+t)}exports.PRINT_ERROR=PRINT_ERROR;function PRINT_WARNING(t){console&&console.warn&&console.warn(\"Warning: \"+t)}exports.PRINT_WARNING=PRINT_WARNING;function isES2015MapSupported(){return typeof Map==\"function\"}exports.isES2015MapSupported=isES2015MapSupported;function peek(t){return t[t.length-1]}exports.peek=peek;function timer(t){var e=new Date().getTime(),r=t(),i=new Date().getTime(),n=i-e;return{time:n,value:r}}exports.timer=timer;function toFastProperties(toBecomeFast){function FakeConstructor(){}FakeConstructor.prototype=toBecomeFast;var fakeInstance=new FakeConstructor;function fakeAccess(){return typeof fakeInstance.bar}return fakeAccess(),fakeAccess(),toBecomeFast;eval(toBecomeFast)}exports.toFastProperties=toFastProperties;function upperFirst(t){if(!t)return t;var e=getCharacterFromCodePointAt(t,0);return e.toUpperCase()+t.substring(e.length)}exports.upperFirst=upperFirst;var surrogatePairPattern=/[\\uD800-\\uDBFF][\\uDC00-\\uDFFF]/;function getCharacterFromCodePointAt(t,e){var r=t.substring(e,e+1);return surrogatePairPattern.test(r)?r:t[e]}});var UI=w((JH,MI)=>{(function(t,e){typeof define==\"function\"&&define.amd?define([],e):typeof MI==\"object\"&&MI.exports?MI.exports=e():t.regexpToAst=e()})(typeof self!=\"undefined\"?self:JH,function(){function t(){}t.prototype.saveState=function(){return{idx:this.idx,input:this.input,groupIdx:this.groupIdx}},t.prototype.restoreState=function(p){this.idx=p.idx,this.input=p.input,this.groupIdx=p.groupIdx},t.prototype.pattern=function(p){this.idx=0,this.input=p,this.groupIdx=0,this.consumeChar(\"/\");var m=this.disjunction();this.consumeChar(\"/\");for(var y={type:\"Flags\",loc:{begin:this.idx,end:p.length},global:!1,ignoreCase:!1,multiLine:!1,unicode:!1,sticky:!1};this.isRegExpFlag();)switch(this.popChar()){case\"g\":o(y,\"global\");break;case\"i\":o(y,\"ignoreCase\");break;case\"m\":o(y,\"multiLine\");break;case\"u\":o(y,\"unicode\");break;case\"y\":o(y,\"sticky\");break}if(this.idx!==this.input.length)throw Error(\"Redundant input: \"+this.input.substring(this.idx));return{type:\"Pattern\",flags:y,value:m,loc:this.loc(0)}},t.prototype.disjunction=function(){var p=[],m=this.idx;for(p.push(this.alternative());this.peekChar()===\"|\";)this.consumeChar(\"|\"),p.push(this.alternative());return{type:\"Disjunction\",value:p,loc:this.loc(m)}},t.prototype.alternative=function(){for(var p=[],m=this.idx;this.isTerm();)p.push(this.term());return{type:\"Alternative\",value:p,loc:this.loc(m)}},t.prototype.term=function(){return this.isAssertion()?this.assertion():this.atom()},t.prototype.assertion=function(){var p=this.idx;switch(this.popChar()){case\"^\":return{type:\"StartAnchor\",loc:this.loc(p)};case\"$\":return{type:\"EndAnchor\",loc:this.loc(p)};case\"\\\\\":switch(this.popChar()){case\"b\":return{type:\"WordBoundary\",loc:this.loc(p)};case\"B\":return{type:\"NonWordBoundary\",loc:this.loc(p)}}throw Error(\"Invalid Assertion Escape\");case\"(\":this.consumeChar(\"?\");var m;switch(this.popChar()){case\"=\":m=\"Lookahead\";break;case\"!\":m=\"NegativeLookahead\";break}a(m);var y=this.disjunction();return this.consumeChar(\")\"),{type:m,value:y,loc:this.loc(p)}}l()},t.prototype.quantifier=function(p){var m,y=this.idx;switch(this.popChar()){case\"*\":m={atLeast:0,atMost:Infinity};break;case\"+\":m={atLeast:1,atMost:Infinity};break;case\"?\":m={atLeast:0,atMost:1};break;case\"{\":var Q=this.integerIncludingZero();switch(this.popChar()){case\"}\":m={atLeast:Q,atMost:Q};break;case\",\":var S;this.isDigit()?(S=this.integerIncludingZero(),m={atLeast:Q,atMost:S}):m={atLeast:Q,atMost:Infinity},this.consumeChar(\"}\");break}if(p===!0&&m===void 0)return;a(m);break}if(!(p===!0&&m===void 0))return a(m),this.peekChar(0)===\"?\"?(this.consumeChar(\"?\"),m.greedy=!1):m.greedy=!0,m.type=\"Quantifier\",m.loc=this.loc(y),m},t.prototype.atom=function(){var p,m=this.idx;switch(this.peekChar()){case\".\":p=this.dotAll();break;case\"\\\\\":p=this.atomEscape();break;case\"[\":p=this.characterClass();break;case\"(\":p=this.group();break}return p===void 0&&this.isPatternCharacter()&&(p=this.patternCharacter()),a(p),p.loc=this.loc(m),this.isQuantifier()&&(p.quantifier=this.quantifier()),p},t.prototype.dotAll=function(){return this.consumeChar(\".\"),{type:\"Set\",complement:!0,value:[n(`\n`),n(\"\\r\"),n(\"\\u2028\"),n(\"\\u2029\")]}},t.prototype.atomEscape=function(){switch(this.consumeChar(\"\\\\\"),this.peekChar()){case\"1\":case\"2\":case\"3\":case\"4\":case\"5\":case\"6\":case\"7\":case\"8\":case\"9\":return this.decimalEscapeAtom();case\"d\":case\"D\":case\"s\":case\"S\":case\"w\":case\"W\":return this.characterClassEscape();case\"f\":case\"n\":case\"r\":case\"t\":case\"v\":return this.controlEscapeAtom();case\"c\":return this.controlLetterEscapeAtom();case\"0\":return this.nulCharacterAtom();case\"x\":return this.hexEscapeSequenceAtom();case\"u\":return this.regExpUnicodeEscapeSequenceAtom();default:return this.identityEscapeAtom()}},t.prototype.decimalEscapeAtom=function(){var p=this.positiveInteger();return{type:\"GroupBackReference\",value:p}},t.prototype.characterClassEscape=function(){var p,m=!1;switch(this.popChar()){case\"d\":p=u;break;case\"D\":p=u,m=!0;break;case\"s\":p=f;break;case\"S\":p=f,m=!0;break;case\"w\":p=g;break;case\"W\":p=g,m=!0;break}return a(p),{type:\"Set\",value:p,complement:m}},t.prototype.controlEscapeAtom=function(){var p;switch(this.popChar()){case\"f\":p=n(\"\\f\");break;case\"n\":p=n(`\n`);break;case\"r\":p=n(\"\\r\");break;case\"t\":p=n(\"\t\");break;case\"v\":p=n(\"\\v\");break}return a(p),{type:\"Character\",value:p}},t.prototype.controlLetterEscapeAtom=function(){this.consumeChar(\"c\");var p=this.popChar();if(/[a-zA-Z]/.test(p)===!1)throw Error(\"Invalid \");var m=p.toUpperCase().charCodeAt(0)-64;return{type:\"Character\",value:m}},t.prototype.nulCharacterAtom=function(){return this.consumeChar(\"0\"),{type:\"Character\",value:n(\"\\0\")}},t.prototype.hexEscapeSequenceAtom=function(){return this.consumeChar(\"x\"),this.parseHexDigits(2)},t.prototype.regExpUnicodeEscapeSequenceAtom=function(){return this.consumeChar(\"u\"),this.parseHexDigits(4)},t.prototype.identityEscapeAtom=function(){var p=this.popChar();return{type:\"Character\",value:n(p)}},t.prototype.classPatternCharacterAtom=function(){switch(this.peekChar()){case`\n`:case\"\\r\":case\"\\u2028\":case\"\\u2029\":case\"\\\\\":case\"]\":throw Error(\"TBD\");default:var p=this.popChar();return{type:\"Character\",value:n(p)}}},t.prototype.characterClass=function(){var p=[],m=!1;for(this.consumeChar(\"[\"),this.peekChar(0)===\"^\"&&(this.consumeChar(\"^\"),m=!0);this.isClassAtom();){var y=this.classAtom(),Q=y.type===\"Character\";if(Q&&this.isRangeDash()){this.consumeChar(\"-\");var S=this.classAtom(),x=S.type===\"Character\";if(x){if(S.value<y.value)throw Error(\"Range out of order in character class\");p.push({from:y.value,to:S.value})}else s(y.value,p),p.push(n(\"-\")),s(S.value,p)}else s(y.value,p)}return this.consumeChar(\"]\"),{type:\"Set\",complement:m,value:p}},t.prototype.classAtom=function(){switch(this.peekChar()){case\"]\":case`\n`:case\"\\r\":case\"\\u2028\":case\"\\u2029\":throw Error(\"TBD\");case\"\\\\\":return this.classEscape();default:return this.classPatternCharacterAtom()}},t.prototype.classEscape=function(){switch(this.consumeChar(\"\\\\\"),this.peekChar()){case\"b\":return this.consumeChar(\"b\"),{type:\"Character\",value:n(\"\\b\")};case\"d\":case\"D\":case\"s\":case\"S\":case\"w\":case\"W\":return this.characterClassEscape();case\"f\":case\"n\":case\"r\":case\"t\":case\"v\":return this.controlEscapeAtom();case\"c\":return this.controlLetterEscapeAtom();case\"0\":return this.nulCharacterAtom();case\"x\":return this.hexEscapeSequenceAtom();case\"u\":return this.regExpUnicodeEscapeSequenceAtom();default:return this.identityEscapeAtom()}},t.prototype.group=function(){var p=!0;switch(this.consumeChar(\"(\"),this.peekChar(0)){case\"?\":this.consumeChar(\"?\"),this.consumeChar(\":\"),p=!1;break;default:this.groupIdx++;break}var m=this.disjunction();this.consumeChar(\")\");var y={type:\"Group\",capturing:p,value:m};return p&&(y.idx=this.groupIdx),y},t.prototype.positiveInteger=function(){var p=this.popChar();if(i.test(p)===!1)throw Error(\"Expecting a positive integer\");for(;r.test(this.peekChar(0));)p+=this.popChar();return parseInt(p,10)},t.prototype.integerIncludingZero=function(){var p=this.popChar();if(r.test(p)===!1)throw Error(\"Expecting an integer\");for(;r.test(this.peekChar(0));)p+=this.popChar();return parseInt(p,10)},t.prototype.patternCharacter=function(){var p=this.popChar();switch(p){case`\n`:case\"\\r\":case\"\\u2028\":case\"\\u2029\":case\"^\":case\"$\":case\"\\\\\":case\".\":case\"*\":case\"+\":case\"?\":case\"(\":case\")\":case\"[\":case\"|\":throw Error(\"TBD\");default:return{type:\"Character\",value:n(p)}}},t.prototype.isRegExpFlag=function(){switch(this.peekChar(0)){case\"g\":case\"i\":case\"m\":case\"u\":case\"y\":return!0;default:return!1}},t.prototype.isRangeDash=function(){return this.peekChar()===\"-\"&&this.isClassAtom(1)},t.prototype.isDigit=function(){return r.test(this.peekChar(0))},t.prototype.isClassAtom=function(p){switch(p===void 0&&(p=0),this.peekChar(p)){case\"]\":case`\n`:case\"\\r\":case\"\\u2028\":case\"\\u2029\":return!1;default:return!0}},t.prototype.isTerm=function(){return this.isAtom()||this.isAssertion()},t.prototype.isAtom=function(){if(this.isPatternCharacter())return!0;switch(this.peekChar(0)){case\".\":case\"\\\\\":case\"[\":case\"(\":return!0;default:return!1}},t.prototype.isAssertion=function(){switch(this.peekChar(0)){case\"^\":case\"$\":return!0;case\"\\\\\":switch(this.peekChar(1)){case\"b\":case\"B\":return!0;default:return!1}case\"(\":return this.peekChar(1)===\"?\"&&(this.peekChar(2)===\"=\"||this.peekChar(2)===\"!\");default:return!1}},t.prototype.isQuantifier=function(){var p=this.saveState();try{return this.quantifier(!0)!==void 0}catch(m){return!1}finally{this.restoreState(p)}},t.prototype.isPatternCharacter=function(){switch(this.peekChar()){case\"^\":case\"$\":case\"\\\\\":case\".\":case\"*\":case\"+\":case\"?\":case\"(\":case\")\":case\"[\":case\"|\":case\"/\":case`\n`:case\"\\r\":case\"\\u2028\":case\"\\u2029\":return!1;default:return!0}},t.prototype.parseHexDigits=function(p){for(var m=\"\",y=0;y<p;y++){var Q=this.popChar();if(e.test(Q)===!1)throw Error(\"Expecting a HexDecimal digits\");m+=Q}var S=parseInt(m,16);return{type:\"Character\",value:S}},t.prototype.peekChar=function(p){return p===void 0&&(p=0),this.input[this.idx+p]},t.prototype.popChar=function(){var p=this.peekChar(0);return this.consumeChar(),p},t.prototype.consumeChar=function(p){if(p!==void 0&&this.input[this.idx]!==p)throw Error(\"Expected: '\"+p+\"' but found: '\"+this.input[this.idx]+\"' at offset: \"+this.idx);if(this.idx>=this.input.length)throw Error(\"Unexpected end of input\");this.idx++},t.prototype.loc=function(p){return{begin:p,end:this.idx}};var e=/[0-9a-fA-F]/,r=/[0-9]/,i=/[1-9]/;function n(p){return p.charCodeAt(0)}function s(p,m){p.length!==void 0?p.forEach(function(y){m.push(y)}):m.push(p)}function o(p,m){if(p[m]===!0)throw\"duplicate flag \"+m;p[m]=!0}function a(p){if(p===void 0)throw Error(\"Internal Error - Should never get here!\")}function l(){throw Error(\"Internal Error - Should never get here!\")}var c,u=[];for(c=n(\"0\");c<=n(\"9\");c++)u.push(c);var g=[n(\"_\")].concat(u);for(c=n(\"a\");c<=n(\"z\");c++)g.push(c);for(c=n(\"A\");c<=n(\"Z\");c++)g.push(c);var f=[n(\" \"),n(\"\\f\"),n(`\n`),n(\"\\r\"),n(\"\t\"),n(\"\\v\"),n(\"\t\"),n(\"\\xA0\"),n(\"\\u1680\"),n(\"\\u2000\"),n(\"\\u2001\"),n(\"\\u2002\"),n(\"\\u2003\"),n(\"\\u2004\"),n(\"\\u2005\"),n(\"\\u2006\"),n(\"\\u2007\"),n(\"\\u2008\"),n(\"\\u2009\"),n(\"\\u200A\"),n(\"\\u2028\"),n(\"\\u2029\"),n(\"\\u202F\"),n(\"\\u205F\"),n(\"\\u3000\"),n(\"\\uFEFF\")];function h(){}return h.prototype.visitChildren=function(p){for(var m in p){var y=p[m];p.hasOwnProperty(m)&&(y.type!==void 0?this.visit(y):Array.isArray(y)&&y.forEach(function(Q){this.visit(Q)},this))}},h.prototype.visit=function(p){switch(p.type){case\"Pattern\":this.visitPattern(p);break;case\"Flags\":this.visitFlags(p);break;case\"Disjunction\":this.visitDisjunction(p);break;case\"Alternative\":this.visitAlternative(p);break;case\"StartAnchor\":this.visitStartAnchor(p);break;case\"EndAnchor\":this.visitEndAnchor(p);break;case\"WordBoundary\":this.visitWordBoundary(p);break;case\"NonWordBoundary\":this.visitNonWordBoundary(p);break;case\"Lookahead\":this.visitLookahead(p);break;case\"NegativeLookahead\":this.visitNegativeLookahead(p);break;case\"Character\":this.visitCharacter(p);break;case\"Set\":this.visitSet(p);break;case\"Group\":this.visitGroup(p);break;case\"GroupBackReference\":this.visitGroupBackReference(p);break;case\"Quantifier\":this.visitQuantifier(p);break}this.visitChildren(p)},h.prototype.visitPattern=function(p){},h.prototype.visitFlags=function(p){},h.prototype.visitDisjunction=function(p){},h.prototype.visitAlternative=function(p){},h.prototype.visitStartAnchor=function(p){},h.prototype.visitEndAnchor=function(p){},h.prototype.visitWordBoundary=function(p){},h.prototype.visitNonWordBoundary=function(p){},h.prototype.visitLookahead=function(p){},h.prototype.visitNegativeLookahead=function(p){},h.prototype.visitCharacter=function(p){},h.prototype.visitSet=function(p){},h.prototype.visitGroup=function(p){},h.prototype.visitGroupBackReference=function(p){},h.prototype.visitQuantifier=function(p){},{RegExpParser:t,BaseRegExpVisitor:h,VERSION:\"0.5.0\"}})});var HI=w(ug=>{\"use strict\";Object.defineProperty(ug,\"__esModule\",{value:!0});ug.clearRegExpParserCache=ug.getRegExpAst=void 0;var VEe=UI(),KI={},XEe=new VEe.RegExpParser;function ZEe(t){var e=t.toString();if(KI.hasOwnProperty(e))return KI[e];var r=XEe.pattern(e);return KI[e]=r,r}ug.getRegExpAst=ZEe;function $Ee(){KI={}}ug.clearRegExpParserCache=$Ee});var XH=w(Bn=>{\"use strict\";var eIe=Bn&&Bn.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!=\"function\"&&r!==null)throw new TypeError(\"Class extends value \"+String(r)+\" is not a constructor or null\");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Bn,\"__esModule\",{value:!0});Bn.canMatchCharCode=Bn.firstCharOptimizedIndices=Bn.getOptimizedStartCodesIndices=Bn.failedOptimizationPrefixMsg=void 0;var WH=UI(),bs=Yt(),zH=HI(),Ma=Pv(),_H=\"Complement Sets are not supported for first char optimization\";Bn.failedOptimizationPrefixMsg=`Unable to use \"first char\" lexer optimizations:\n`;function tIe(t,e){e===void 0&&(e=!1);try{var r=(0,zH.getRegExpAst)(t),i=jI(r.value,{},r.flags.ignoreCase);return i}catch(s){if(s.message===_H)e&&(0,bs.PRINT_WARNING)(\"\"+Bn.failedOptimizationPrefixMsg+(\"\tUnable to optimize: < \"+t.toString()+` >\n`)+`\tComplement Sets cannot be automatically optimized.\n\tThis will disable the lexer's first char optimizations.\n\tSee: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#COMPLEMENT for details.`);else{var n=\"\";e&&(n=`\n\tThis will disable the lexer's first char optimizations.\n\tSee: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#REGEXP_PARSING for details.`),(0,bs.PRINT_ERROR)(Bn.failedOptimizationPrefixMsg+`\n`+(\"\tFailed parsing: < \"+t.toString()+` >\n`)+(\"\tUsing the regexp-to-ast library version: \"+WH.VERSION+`\n`)+\"\tPlease open an issue at: https://github.com/bd82/regexp-to-ast/issues\"+n)}}return[]}Bn.getOptimizedStartCodesIndices=tIe;function jI(t,e,r){switch(t.type){case\"Disjunction\":for(var i=0;i<t.value.length;i++)jI(t.value[i],e,r);break;case\"Alternative\":for(var n=t.value,i=0;i<n.length;i++){var s=n[i];switch(s.type){case\"EndAnchor\":case\"GroupBackReference\":case\"Lookahead\":case\"NegativeLookahead\":case\"StartAnchor\":case\"WordBoundary\":case\"NonWordBoundary\":continue}var o=s;switch(o.type){case\"Character\":GI(o.value,e,r);break;case\"Set\":if(o.complement===!0)throw Error(_H);(0,bs.forEach)(o.value,function(c){if(typeof c==\"number\")GI(c,e,r);else{var u=c;if(r===!0)for(var g=u.from;g<=u.to;g++)GI(g,e,r);else{for(var g=u.from;g<=u.to&&g<Ma.minOptimizationVal;g++)GI(g,e,r);if(u.to>=Ma.minOptimizationVal)for(var f=u.from>=Ma.minOptimizationVal?u.from:Ma.minOptimizationVal,h=u.to,p=(0,Ma.charCodeToOptimizedIndex)(f),m=(0,Ma.charCodeToOptimizedIndex)(h),y=p;y<=m;y++)e[y]=y}}});break;case\"Group\":jI(o.value,e,r);break;default:throw Error(\"Non Exhaustive Match\")}var a=o.quantifier!==void 0&&o.quantifier.atLeast===0;if(o.type===\"Group\"&&Dv(o)===!1||o.type!==\"Group\"&&a===!1)break}break;default:throw Error(\"non exhaustive match!\")}return(0,bs.values)(e)}Bn.firstCharOptimizedIndices=jI;function GI(t,e,r){var i=(0,Ma.charCodeToOptimizedIndex)(t);e[i]=i,r===!0&&rIe(t,e)}function rIe(t,e){var r=String.fromCharCode(t),i=r.toUpperCase();if(i!==r){var n=(0,Ma.charCodeToOptimizedIndex)(i.charCodeAt(0));e[n]=n}else{var s=r.toLowerCase();if(s!==r){var n=(0,Ma.charCodeToOptimizedIndex)(s.charCodeAt(0));e[n]=n}}}function VH(t,e){return(0,bs.find)(t.value,function(r){if(typeof r==\"number\")return(0,bs.contains)(e,r);var i=r;return(0,bs.find)(e,function(n){return i.from<=n&&n<=i.to})!==void 0})}function Dv(t){return t.quantifier&&t.quantifier.atLeast===0?!0:t.value?(0,bs.isArray)(t.value)?(0,bs.every)(t.value,Dv):Dv(t.value):!1}var iIe=function(t){eIe(e,t);function e(r){var i=t.call(this)||this;return i.targetCharCodes=r,i.found=!1,i}return e.prototype.visitChildren=function(r){if(this.found!==!0){switch(r.type){case\"Lookahead\":this.visitLookahead(r);return;case\"NegativeLookahead\":this.visitNegativeLookahead(r);return}t.prototype.visitChildren.call(this,r)}},e.prototype.visitCharacter=function(r){(0,bs.contains)(this.targetCharCodes,r.value)&&(this.found=!0)},e.prototype.visitSet=function(r){r.complement?VH(r,this.targetCharCodes)===void 0&&(this.found=!0):VH(r,this.targetCharCodes)!==void 0&&(this.found=!0)},e}(WH.BaseRegExpVisitor);function nIe(t,e){if(e instanceof RegExp){var r=(0,zH.getRegExpAst)(e),i=new iIe(t);return i.visit(r),i.found}else return(0,bs.find)(e,function(n){return(0,bs.contains)(t,n.charCodeAt(0))})!==void 0}Bn.canMatchCharCode=nIe});var Pv=w(Ze=>{\"use strict\";var ZH=Ze&&Ze.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!=\"function\"&&r!==null)throw new TypeError(\"Class extends value \"+String(r)+\" is not a constructor or null\");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Ze,\"__esModule\",{value:!0});Ze.charCodeToOptimizedIndex=Ze.minOptimizationVal=Ze.buildLineBreakIssueMessage=Ze.LineTerminatorOptimizedTester=Ze.isShortPattern=Ze.isCustomPattern=Ze.cloneEmptyGroups=Ze.performWarningRuntimeChecks=Ze.performRuntimeChecks=Ze.addStickyFlag=Ze.addStartOfInput=Ze.findUnreachablePatterns=Ze.findModesThatDoNotExist=Ze.findInvalidGroupType=Ze.findDuplicatePatterns=Ze.findUnsupportedFlags=Ze.findStartOfInputAnchor=Ze.findEmptyMatchRegExps=Ze.findEndOfInputAnchor=Ze.findInvalidPatterns=Ze.findMissingPatterns=Ze.validatePatterns=Ze.analyzeTokenTypes=Ze.enableSticky=Ze.disableSticky=Ze.SUPPORT_STICKY=Ze.MODES=Ze.DEFAULT_MODE=void 0;var $H=UI(),Ar=Dp(),Ne=Yt(),gg=XH(),ej=HI(),Lo=\"PATTERN\";Ze.DEFAULT_MODE=\"defaultMode\";Ze.MODES=\"modes\";Ze.SUPPORT_STICKY=typeof new RegExp(\"(?:)\").sticky==\"boolean\";function sIe(){Ze.SUPPORT_STICKY=!1}Ze.disableSticky=sIe;function oIe(){Ze.SUPPORT_STICKY=!0}Ze.enableSticky=oIe;function AIe(t,e){e=(0,Ne.defaults)(e,{useSticky:Ze.SUPPORT_STICKY,debug:!1,safeMode:!1,positionTracking:\"full\",lineTerminatorCharacters:[\"\\r\",`\n`],tracer:function(S,x){return x()}});var r=e.tracer;r(\"initCharCodeToOptimizedIndexMap\",function(){aIe()});var i;r(\"Reject Lexer.NA\",function(){i=(0,Ne.reject)(t,function(S){return S[Lo]===Ar.Lexer.NA})});var n=!1,s;r(\"Transform Patterns\",function(){n=!1,s=(0,Ne.map)(i,function(S){var x=S[Lo];if((0,Ne.isRegExp)(x)){var M=x.source;return M.length===1&&M!==\"^\"&&M!==\"$\"&&M!==\".\"&&!x.ignoreCase?M:M.length===2&&M[0]===\"\\\\\"&&!(0,Ne.contains)([\"d\",\"D\",\"s\",\"S\",\"t\",\"r\",\"n\",\"t\",\"0\",\"c\",\"b\",\"B\",\"f\",\"v\",\"w\",\"W\"],M[1])?M[1]:e.useSticky?Fv(x):Rv(x)}else{if((0,Ne.isFunction)(x))return n=!0,{exec:x};if((0,Ne.has)(x,\"exec\"))return n=!0,x;if(typeof x==\"string\"){if(x.length===1)return x;var Y=x.replace(/[\\\\^$.*+?()[\\]{}|]/g,\"\\\\$&\"),U=new RegExp(Y);return e.useSticky?Fv(U):Rv(U)}else throw Error(\"non exhaustive match\")}})});var o,a,l,c,u;r(\"misc mapping\",function(){o=(0,Ne.map)(i,function(S){return S.tokenTypeIdx}),a=(0,Ne.map)(i,function(S){var x=S.GROUP;if(x!==Ar.Lexer.SKIPPED){if((0,Ne.isString)(x))return x;if((0,Ne.isUndefined)(x))return!1;throw Error(\"non exhaustive match\")}}),l=(0,Ne.map)(i,function(S){var x=S.LONGER_ALT;if(x){var M=(0,Ne.isArray)(x)?(0,Ne.map)(x,function(Y){return(0,Ne.indexOf)(i,Y)}):[(0,Ne.indexOf)(i,x)];return M}}),c=(0,Ne.map)(i,function(S){return S.PUSH_MODE}),u=(0,Ne.map)(i,function(S){return(0,Ne.has)(S,\"POP_MODE\")})});var g;r(\"Line Terminator Handling\",function(){var S=ij(e.lineTerminatorCharacters);g=(0,Ne.map)(i,function(x){return!1}),e.positionTracking!==\"onlyOffset\"&&(g=(0,Ne.map)(i,function(x){if((0,Ne.has)(x,\"LINE_BREAKS\"))return x.LINE_BREAKS;if(rj(x,S)===!1)return(0,gg.canMatchCharCode)(S,x.PATTERN)}))});var f,h,p,m;r(\"Misc Mapping #2\",function(){f=(0,Ne.map)(i,Nv),h=(0,Ne.map)(s,tj),p=(0,Ne.reduce)(i,function(S,x){var M=x.GROUP;return(0,Ne.isString)(M)&&M!==Ar.Lexer.SKIPPED&&(S[M]=[]),S},{}),m=(0,Ne.map)(s,function(S,x){return{pattern:s[x],longerAlt:l[x],canLineTerminator:g[x],isCustom:f[x],short:h[x],group:a[x],push:c[x],pop:u[x],tokenTypeIdx:o[x],tokenType:i[x]}})});var y=!0,Q=[];return e.safeMode||r(\"First Char Optimization\",function(){Q=(0,Ne.reduce)(i,function(S,x,M){if(typeof x.PATTERN==\"string\"){var Y=x.PATTERN.charCodeAt(0),U=Tv(Y);Lv(S,U,m[M])}else if((0,Ne.isArray)(x.START_CHARS_HINT)){var J;(0,Ne.forEach)(x.START_CHARS_HINT,function(ee){var Z=typeof ee==\"string\"?ee.charCodeAt(0):ee,A=Tv(Z);J!==A&&(J=A,Lv(S,A,m[M]))})}else if((0,Ne.isRegExp)(x.PATTERN))if(x.PATTERN.unicode)y=!1,e.ensureOptimizations&&(0,Ne.PRINT_ERROR)(\"\"+gg.failedOptimizationPrefixMsg+(\"\tUnable to analyze < \"+x.PATTERN.toString()+` > pattern.\n`)+`\tThe regexp unicode flag is not currently supported by the regexp-to-ast library.\n\tThis will disable the lexer's first char optimizations.\n\tFor details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#UNICODE_OPTIMIZE`);else{var W=(0,gg.getOptimizedStartCodesIndices)(x.PATTERN,e.ensureOptimizations);(0,Ne.isEmpty)(W)&&(y=!1),(0,Ne.forEach)(W,function(ee){Lv(S,ee,m[M])})}else e.ensureOptimizations&&(0,Ne.PRINT_ERROR)(\"\"+gg.failedOptimizationPrefixMsg+(\"\tTokenType: <\"+x.name+`> is using a custom token pattern without providing <start_chars_hint> parameter.\n`)+`\tThis will disable the lexer's first char optimizations.\n\tFor details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#CUSTOM_OPTIMIZE`),y=!1;return S},[])}),r(\"ArrayPacking\",function(){Q=(0,Ne.packArray)(Q)}),{emptyGroups:p,patternIdxToConfig:m,charCodeToPatternIdxToConfig:Q,hasCustom:n,canBeOptimized:y}}Ze.analyzeTokenTypes=AIe;function cIe(t,e){var r=[],i=nj(t);r=r.concat(i.errors);var n=sj(i.valid),s=n.valid;return r=r.concat(n.errors),r=r.concat(lIe(s)),r=r.concat(oj(s)),r=r.concat(aj(s,e)),r=r.concat(Aj(s)),r}Ze.validatePatterns=cIe;function lIe(t){var e=[],r=(0,Ne.filter)(t,function(i){return(0,Ne.isRegExp)(i[Lo])});return e=e.concat(lj(r)),e=e.concat(uj(r)),e=e.concat(gj(r)),e=e.concat(fj(r)),e=e.concat(cj(r)),e}function nj(t){var e=(0,Ne.filter)(t,function(n){return!(0,Ne.has)(n,Lo)}),r=(0,Ne.map)(e,function(n){return{message:\"Token Type: ->\"+n.name+\"<- missing static 'PATTERN' property\",type:Ar.LexerDefinitionErrorType.MISSING_PATTERN,tokenTypes:[n]}}),i=(0,Ne.difference)(t,e);return{errors:r,valid:i}}Ze.findMissingPatterns=nj;function sj(t){var e=(0,Ne.filter)(t,function(n){var s=n[Lo];return!(0,Ne.isRegExp)(s)&&!(0,Ne.isFunction)(s)&&!(0,Ne.has)(s,\"exec\")&&!(0,Ne.isString)(s)}),r=(0,Ne.map)(e,function(n){return{message:\"Token Type: ->\"+n.name+\"<- static 'PATTERN' can only be a RegExp, a Function matching the {CustomPatternMatcherFunc} type or an Object matching the {ICustomPattern} interface.\",type:Ar.LexerDefinitionErrorType.INVALID_PATTERN,tokenTypes:[n]}}),i=(0,Ne.difference)(t,e);return{errors:r,valid:i}}Ze.findInvalidPatterns=sj;var uIe=/[^\\\\][\\$]/;function lj(t){var e=function(n){ZH(s,n);function s(){var o=n!==null&&n.apply(this,arguments)||this;return o.found=!1,o}return s.prototype.visitEndAnchor=function(o){this.found=!0},s}($H.BaseRegExpVisitor),r=(0,Ne.filter)(t,function(n){var s=n[Lo];try{var o=(0,ej.getRegExpAst)(s),a=new e;return a.visit(o),a.found}catch(l){return uIe.test(s.source)}}),i=(0,Ne.map)(r,function(n){return{message:`Unexpected RegExp Anchor Error:\n\tToken Type: ->`+n.name+`<- static 'PATTERN' cannot contain end of input anchor '$'\n\tSee chevrotain.io/docs/guide/resolving_lexer_errors.html#ANCHORS\tfor details.`,type:Ar.LexerDefinitionErrorType.EOI_ANCHOR_FOUND,tokenTypes:[n]}});return i}Ze.findEndOfInputAnchor=lj;function cj(t){var e=(0,Ne.filter)(t,function(i){var n=i[Lo];return n.test(\"\")}),r=(0,Ne.map)(e,function(i){return{message:\"Token Type: ->\"+i.name+\"<- static 'PATTERN' must not match an empty string\",type:Ar.LexerDefinitionErrorType.EMPTY_MATCH_PATTERN,tokenTypes:[i]}});return r}Ze.findEmptyMatchRegExps=cj;var gIe=/[^\\\\[][\\^]|^\\^/;function uj(t){var e=function(n){ZH(s,n);function s(){var o=n!==null&&n.apply(this,arguments)||this;return o.found=!1,o}return s.prototype.visitStartAnchor=function(o){this.found=!0},s}($H.BaseRegExpVisitor),r=(0,Ne.filter)(t,function(n){var s=n[Lo];try{var o=(0,ej.getRegExpAst)(s),a=new e;return a.visit(o),a.found}catch(l){return gIe.test(s.source)}}),i=(0,Ne.map)(r,function(n){return{message:`Unexpected RegExp Anchor Error:\n\tToken Type: ->`+n.name+`<- static 'PATTERN' cannot contain start of input anchor '^'\n\tSee https://chevrotain.io/docs/guide/resolving_lexer_errors.html#ANCHORS\tfor details.`,type:Ar.LexerDefinitionErrorType.SOI_ANCHOR_FOUND,tokenTypes:[n]}});return i}Ze.findStartOfInputAnchor=uj;function gj(t){var e=(0,Ne.filter)(t,function(i){var n=i[Lo];return n instanceof RegExp&&(n.multiline||n.global)}),r=(0,Ne.map)(e,function(i){return{message:\"Token Type: ->\"+i.name+\"<- static 'PATTERN' may NOT contain global('g') or multiline('m')\",type:Ar.LexerDefinitionErrorType.UNSUPPORTED_FLAGS_FOUND,tokenTypes:[i]}});return r}Ze.findUnsupportedFlags=gj;function fj(t){var e=[],r=(0,Ne.map)(t,function(s){return(0,Ne.reduce)(t,function(o,a){return s.PATTERN.source===a.PATTERN.source&&!(0,Ne.contains)(e,a)&&a.PATTERN!==Ar.Lexer.NA&&(e.push(a),o.push(a)),o},[])});r=(0,Ne.compact)(r);var i=(0,Ne.filter)(r,function(s){return s.length>1}),n=(0,Ne.map)(i,function(s){var o=(0,Ne.map)(s,function(l){return l.name}),a=(0,Ne.first)(s).PATTERN;return{message:\"The same RegExp pattern ->\"+a+\"<-\"+(\"has been used in all of the following Token Types: \"+o.join(\", \")+\" <-\"),type:Ar.LexerDefinitionErrorType.DUPLICATE_PATTERNS_FOUND,tokenTypes:s}});return n}Ze.findDuplicatePatterns=fj;function oj(t){var e=(0,Ne.filter)(t,function(i){if(!(0,Ne.has)(i,\"GROUP\"))return!1;var n=i.GROUP;return n!==Ar.Lexer.SKIPPED&&n!==Ar.Lexer.NA&&!(0,Ne.isString)(n)}),r=(0,Ne.map)(e,function(i){return{message:\"Token Type: ->\"+i.name+\"<- static 'GROUP' can only be Lexer.SKIPPED/Lexer.NA/A String\",type:Ar.LexerDefinitionErrorType.INVALID_GROUP_TYPE_FOUND,tokenTypes:[i]}});return r}Ze.findInvalidGroupType=oj;function aj(t,e){var r=(0,Ne.filter)(t,function(n){return n.PUSH_MODE!==void 0&&!(0,Ne.contains)(e,n.PUSH_MODE)}),i=(0,Ne.map)(r,function(n){var s=\"Token Type: ->\"+n.name+\"<- static 'PUSH_MODE' value cannot refer to a Lexer Mode ->\"+n.PUSH_MODE+\"<-which does not exist\";return{message:s,type:Ar.LexerDefinitionErrorType.PUSH_MODE_DOES_NOT_EXIST,tokenTypes:[n]}});return i}Ze.findModesThatDoNotExist=aj;function Aj(t){var e=[],r=(0,Ne.reduce)(t,function(i,n,s){var o=n.PATTERN;return o===Ar.Lexer.NA||((0,Ne.isString)(o)?i.push({str:o,idx:s,tokenType:n}):(0,Ne.isRegExp)(o)&&hIe(o)&&i.push({str:o.source,idx:s,tokenType:n})),i},[]);return(0,Ne.forEach)(t,function(i,n){(0,Ne.forEach)(r,function(s){var o=s.str,a=s.idx,l=s.tokenType;if(n<a&&fIe(o,i.PATTERN)){var c=\"Token: ->\"+l.name+`<- can never be matched.\n`+(\"Because it appears AFTER the Token Type ->\"+i.name+\"<-\")+`in the lexer's definition.\nSee https://chevrotain.io/docs/guide/resolving_lexer_errors.html#UNREACHABLE`;e.push({message:c,type:Ar.LexerDefinitionErrorType.UNREACHABLE_PATTERN,tokenTypes:[i,l]})}})}),e}Ze.findUnreachablePatterns=Aj;function fIe(t,e){if((0,Ne.isRegExp)(e)){var r=e.exec(t);return r!==null&&r.index===0}else{if((0,Ne.isFunction)(e))return e(t,0,[],{});if((0,Ne.has)(e,\"exec\"))return e.exec(t,0,[],{});if(typeof e==\"string\")return e===t;throw Error(\"non exhaustive match\")}}function hIe(t){var e=[\".\",\"\\\\\",\"[\",\"]\",\"|\",\"^\",\"$\",\"(\",\")\",\"?\",\"*\",\"+\",\"{\"];return(0,Ne.find)(e,function(r){return t.source.indexOf(r)!==-1})===void 0}function Rv(t){var e=t.ignoreCase?\"i\":\"\";return new RegExp(\"^(?:\"+t.source+\")\",e)}Ze.addStartOfInput=Rv;function Fv(t){var e=t.ignoreCase?\"iy\":\"y\";return new RegExp(\"\"+t.source,e)}Ze.addStickyFlag=Fv;function pIe(t,e,r){var i=[];return(0,Ne.has)(t,Ze.DEFAULT_MODE)||i.push({message:\"A MultiMode Lexer cannot be initialized without a <\"+Ze.DEFAULT_MODE+`> property in its definition\n`,type:Ar.LexerDefinitionErrorType.MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE}),(0,Ne.has)(t,Ze.MODES)||i.push({message:\"A MultiMode Lexer cannot be initialized without a <\"+Ze.MODES+`> property in its definition\n`,type:Ar.LexerDefinitionErrorType.MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY}),(0,Ne.has)(t,Ze.MODES)&&(0,Ne.has)(t,Ze.DEFAULT_MODE)&&!(0,Ne.has)(t.modes,t.defaultMode)&&i.push({message:\"A MultiMode Lexer cannot be initialized with a \"+Ze.DEFAULT_MODE+\": <\"+t.defaultMode+`>which does not exist\n`,type:Ar.LexerDefinitionErrorType.MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST}),(0,Ne.has)(t,Ze.MODES)&&(0,Ne.forEach)(t.modes,function(n,s){(0,Ne.forEach)(n,function(o,a){(0,Ne.isUndefined)(o)&&i.push({message:\"A Lexer cannot be initialized using an undefined Token Type. Mode:\"+(\"<\"+s+\"> at index: <\"+a+`>\n`),type:Ar.LexerDefinitionErrorType.LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED})})}),i}Ze.performRuntimeChecks=pIe;function dIe(t,e,r){var i=[],n=!1,s=(0,Ne.compact)((0,Ne.flatten)((0,Ne.mapValues)(t.modes,function(l){return l}))),o=(0,Ne.reject)(s,function(l){return l[Lo]===Ar.Lexer.NA}),a=ij(r);return e&&(0,Ne.forEach)(o,function(l){var c=rj(l,a);if(c!==!1){var u=hj(l,c),g={message:u,type:c.issue,tokenType:l};i.push(g)}else(0,Ne.has)(l,\"LINE_BREAKS\")?l.LINE_BREAKS===!0&&(n=!0):(0,gg.canMatchCharCode)(a,l.PATTERN)&&(n=!0)}),e&&!n&&i.push({message:`Warning: No LINE_BREAKS Found.\n\tThis Lexer has been defined to track line and column information,\n\tBut none of the Token Types can be identified as matching a line terminator.\n\tSee https://chevrotain.io/docs/guide/resolving_lexer_errors.html#LINE_BREAKS \n\tfor details.`,type:Ar.LexerDefinitionErrorType.NO_LINE_BREAKS_FLAGS}),i}Ze.performWarningRuntimeChecks=dIe;function CIe(t){var e={},r=(0,Ne.keys)(t);return(0,Ne.forEach)(r,function(i){var n=t[i];if((0,Ne.isArray)(n))e[i]=[];else throw Error(\"non exhaustive match\")}),e}Ze.cloneEmptyGroups=CIe;function Nv(t){var e=t.PATTERN;if((0,Ne.isRegExp)(e))return!1;if((0,Ne.isFunction)(e))return!0;if((0,Ne.has)(e,\"exec\"))return!0;if((0,Ne.isString)(e))return!1;throw Error(\"non exhaustive match\")}Ze.isCustomPattern=Nv;function tj(t){return(0,Ne.isString)(t)&&t.length===1?t.charCodeAt(0):!1}Ze.isShortPattern=tj;Ze.LineTerminatorOptimizedTester={test:function(t){for(var e=t.length,r=this.lastIndex;r<e;r++){var i=t.charCodeAt(r);if(i===10)return this.lastIndex=r+1,!0;if(i===13)return t.charCodeAt(r+1)===10?this.lastIndex=r+2:this.lastIndex=r+1,!0}return!1},lastIndex:0};function rj(t,e){if((0,Ne.has)(t,\"LINE_BREAKS\"))return!1;if((0,Ne.isRegExp)(t.PATTERN)){try{(0,gg.canMatchCharCode)(e,t.PATTERN)}catch(r){return{issue:Ar.LexerDefinitionErrorType.IDENTIFY_TERMINATOR,errMsg:r.message}}return!1}else{if((0,Ne.isString)(t.PATTERN))return!1;if(Nv(t))return{issue:Ar.LexerDefinitionErrorType.CUSTOM_LINE_BREAK};throw Error(\"non exhaustive match\")}}function hj(t,e){if(e.issue===Ar.LexerDefinitionErrorType.IDENTIFY_TERMINATOR)return`Warning: unable to identify line terminator usage in pattern.\n`+(\"\tThe problem is in the <\"+t.name+`> Token Type\n`)+(\"\t Root cause: \"+e.errMsg+`.\n`)+\"\tFor details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#IDENTIFY_TERMINATOR\";if(e.issue===Ar.LexerDefinitionErrorType.CUSTOM_LINE_BREAK)return`Warning: A Custom Token Pattern should specify the <line_breaks> option.\n`+(\"\tThe problem is in the <\"+t.name+`> Token Type\n`)+\"\tFor details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#CUSTOM_LINE_BREAK\";throw Error(\"non exhaustive match\")}Ze.buildLineBreakIssueMessage=hj;function ij(t){var e=(0,Ne.map)(t,function(r){return(0,Ne.isString)(r)&&r.length>0?r.charCodeAt(0):r});return e}function Lv(t,e,r){t[e]===void 0?t[e]=[r]:t[e].push(r)}Ze.minOptimizationVal=256;var YI=[];function Tv(t){return t<Ze.minOptimizationVal?t:YI[t]}Ze.charCodeToOptimizedIndex=Tv;function aIe(){if((0,Ne.isEmpty)(YI)){YI=new Array(65536);for(var t=0;t<65536;t++)YI[t]=t>255?255+~~(t/255):t}}});var fg=w(Ft=>{\"use strict\";Object.defineProperty(Ft,\"__esModule\",{value:!0});Ft.isTokenType=Ft.hasExtendingTokensTypesMapProperty=Ft.hasExtendingTokensTypesProperty=Ft.hasCategoriesProperty=Ft.hasShortKeyProperty=Ft.singleAssignCategoriesToksMap=Ft.assignCategoriesMapProp=Ft.assignCategoriesTokensProp=Ft.assignTokenDefaultProps=Ft.expandCategories=Ft.augmentTokenTypes=Ft.tokenIdxToClass=Ft.tokenShortNameIdx=Ft.tokenStructuredMatcherNoCategories=Ft.tokenStructuredMatcher=void 0;var ri=Yt();function mIe(t,e){var r=t.tokenTypeIdx;return r===e.tokenTypeIdx?!0:e.isParent===!0&&e.categoryMatchesMap[r]===!0}Ft.tokenStructuredMatcher=mIe;function EIe(t,e){return t.tokenTypeIdx===e.tokenTypeIdx}Ft.tokenStructuredMatcherNoCategories=EIe;Ft.tokenShortNameIdx=1;Ft.tokenIdxToClass={};function IIe(t){var e=pj(t);dj(e),mj(e),Cj(e),(0,ri.forEach)(e,function(r){r.isParent=r.categoryMatches.length>0})}Ft.augmentTokenTypes=IIe;function pj(t){for(var e=(0,ri.cloneArr)(t),r=t,i=!0;i;){r=(0,ri.compact)((0,ri.flatten)((0,ri.map)(r,function(s){return s.CATEGORIES})));var n=(0,ri.difference)(r,e);e=e.concat(n),(0,ri.isEmpty)(n)?i=!1:r=n}return e}Ft.expandCategories=pj;function dj(t){(0,ri.forEach)(t,function(e){Ej(e)||(Ft.tokenIdxToClass[Ft.tokenShortNameIdx]=e,e.tokenTypeIdx=Ft.tokenShortNameIdx++),Ov(e)&&!(0,ri.isArray)(e.CATEGORIES)&&(e.CATEGORIES=[e.CATEGORIES]),Ov(e)||(e.CATEGORIES=[]),Ij(e)||(e.categoryMatches=[]),yj(e)||(e.categoryMatchesMap={})})}Ft.assignTokenDefaultProps=dj;function Cj(t){(0,ri.forEach)(t,function(e){e.categoryMatches=[],(0,ri.forEach)(e.categoryMatchesMap,function(r,i){e.categoryMatches.push(Ft.tokenIdxToClass[i].tokenTypeIdx)})})}Ft.assignCategoriesTokensProp=Cj;function mj(t){(0,ri.forEach)(t,function(e){Mv([],e)})}Ft.assignCategoriesMapProp=mj;function Mv(t,e){(0,ri.forEach)(t,function(r){e.categoryMatchesMap[r.tokenTypeIdx]=!0}),(0,ri.forEach)(e.CATEGORIES,function(r){var i=t.concat(e);(0,ri.contains)(i,r)||Mv(i,r)})}Ft.singleAssignCategoriesToksMap=Mv;function Ej(t){return(0,ri.has)(t,\"tokenTypeIdx\")}Ft.hasShortKeyProperty=Ej;function Ov(t){return(0,ri.has)(t,\"CATEGORIES\")}Ft.hasCategoriesProperty=Ov;function Ij(t){return(0,ri.has)(t,\"categoryMatches\")}Ft.hasExtendingTokensTypesProperty=Ij;function yj(t){return(0,ri.has)(t,\"categoryMatchesMap\")}Ft.hasExtendingTokensTypesMapProperty=yj;function yIe(t){return(0,ri.has)(t,\"tokenTypeIdx\")}Ft.isTokenType=yIe});var Uv=w(qI=>{\"use strict\";Object.defineProperty(qI,\"__esModule\",{value:!0});qI.defaultLexerErrorProvider=void 0;qI.defaultLexerErrorProvider={buildUnableToPopLexerModeMessage:function(t){return\"Unable to pop Lexer Mode after encountering Token ->\"+t.image+\"<- The Mode Stack is empty\"},buildUnexpectedCharactersMessage:function(t,e,r,i,n){return\"unexpected character: ->\"+t.charAt(e)+\"<- at offset: \"+e+\",\"+(\" skipped \"+r+\" characters.\")}}});var Dp=w(Bc=>{\"use strict\";Object.defineProperty(Bc,\"__esModule\",{value:!0});Bc.Lexer=Bc.LexerDefinitionErrorType=void 0;var so=Pv(),lr=Yt(),wIe=fg(),BIe=Uv(),bIe=HI(),QIe;(function(t){t[t.MISSING_PATTERN=0]=\"MISSING_PATTERN\",t[t.INVALID_PATTERN=1]=\"INVALID_PATTERN\",t[t.EOI_ANCHOR_FOUND=2]=\"EOI_ANCHOR_FOUND\",t[t.UNSUPPORTED_FLAGS_FOUND=3]=\"UNSUPPORTED_FLAGS_FOUND\",t[t.DUPLICATE_PATTERNS_FOUND=4]=\"DUPLICATE_PATTERNS_FOUND\",t[t.INVALID_GROUP_TYPE_FOUND=5]=\"INVALID_GROUP_TYPE_FOUND\",t[t.PUSH_MODE_DOES_NOT_EXIST=6]=\"PUSH_MODE_DOES_NOT_EXIST\",t[t.MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE=7]=\"MULTI_MODE_LEXER_WITHOUT_DEFAULT_MODE\",t[t.MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY=8]=\"MULTI_MODE_LEXER_WITHOUT_MODES_PROPERTY\",t[t.MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST=9]=\"MULTI_MODE_LEXER_DEFAULT_MODE_VALUE_DOES_NOT_EXIST\",t[t.LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED=10]=\"LEXER_DEFINITION_CANNOT_CONTAIN_UNDEFINED\",t[t.SOI_ANCHOR_FOUND=11]=\"SOI_ANCHOR_FOUND\",t[t.EMPTY_MATCH_PATTERN=12]=\"EMPTY_MATCH_PATTERN\",t[t.NO_LINE_BREAKS_FLAGS=13]=\"NO_LINE_BREAKS_FLAGS\",t[t.UNREACHABLE_PATTERN=14]=\"UNREACHABLE_PATTERN\",t[t.IDENTIFY_TERMINATOR=15]=\"IDENTIFY_TERMINATOR\",t[t.CUSTOM_LINE_BREAK=16]=\"CUSTOM_LINE_BREAK\"})(QIe=Bc.LexerDefinitionErrorType||(Bc.LexerDefinitionErrorType={}));var Rp={deferDefinitionErrorsHandling:!1,positionTracking:\"full\",lineTerminatorsPattern:/\\n|\\r\\n?/g,lineTerminatorCharacters:[`\n`,\"\\r\"],ensureOptimizations:!1,safeMode:!1,errorMessageProvider:BIe.defaultLexerErrorProvider,traceInitPerf:!1,skipValidations:!1};Object.freeze(Rp);var vIe=function(){function t(e,r){var i=this;if(r===void 0&&(r=Rp),this.lexerDefinition=e,this.lexerDefinitionErrors=[],this.lexerDefinitionWarning=[],this.patternIdxToConfig={},this.charCodeToPatternIdxToConfig={},this.modes=[],this.emptyGroups={},this.config=void 0,this.trackStartLines=!0,this.trackEndLines=!0,this.hasCustom=!1,this.canModeBeOptimized={},typeof r==\"boolean\")throw Error(`The second argument to the Lexer constructor is now an ILexerConfig Object.\na boolean 2nd argument is no longer supported`);this.config=(0,lr.merge)(Rp,r);var n=this.config.traceInitPerf;n===!0?(this.traceInitMaxIdent=Infinity,this.traceInitPerf=!0):typeof n==\"number\"&&(this.traceInitMaxIdent=n,this.traceInitPerf=!0),this.traceInitIndent=-1,this.TRACE_INIT(\"Lexer Constructor\",function(){var s,o=!0;i.TRACE_INIT(\"Lexer Config handling\",function(){if(i.config.lineTerminatorsPattern===Rp.lineTerminatorsPattern)i.config.lineTerminatorsPattern=so.LineTerminatorOptimizedTester;else if(i.config.lineTerminatorCharacters===Rp.lineTerminatorCharacters)throw Error(`Error: Missing <lineTerminatorCharacters> property on the Lexer config.\n\tFor details See: https://chevrotain.io/docs/guide/resolving_lexer_errors.html#MISSING_LINE_TERM_CHARS`);if(r.safeMode&&r.ensureOptimizations)throw Error('\"safeMode\" and \"ensureOptimizations\" flags are mutually exclusive.');i.trackStartLines=/full|onlyStart/i.test(i.config.positionTracking),i.trackEndLines=/full/i.test(i.config.positionTracking),(0,lr.isArray)(e)?(s={modes:{}},s.modes[so.DEFAULT_MODE]=(0,lr.cloneArr)(e),s[so.DEFAULT_MODE]=so.DEFAULT_MODE):(o=!1,s=(0,lr.cloneObj)(e))}),i.config.skipValidations===!1&&(i.TRACE_INIT(\"performRuntimeChecks\",function(){i.lexerDefinitionErrors=i.lexerDefinitionErrors.concat((0,so.performRuntimeChecks)(s,i.trackStartLines,i.config.lineTerminatorCharacters))}),i.TRACE_INIT(\"performWarningRuntimeChecks\",function(){i.lexerDefinitionWarning=i.lexerDefinitionWarning.concat((0,so.performWarningRuntimeChecks)(s,i.trackStartLines,i.config.lineTerminatorCharacters))})),s.modes=s.modes?s.modes:{},(0,lr.forEach)(s.modes,function(u,g){s.modes[g]=(0,lr.reject)(u,function(f){return(0,lr.isUndefined)(f)})});var a=(0,lr.keys)(s.modes);if((0,lr.forEach)(s.modes,function(u,g){i.TRACE_INIT(\"Mode: <\"+g+\"> processing\",function(){if(i.modes.push(g),i.config.skipValidations===!1&&i.TRACE_INIT(\"validatePatterns\",function(){i.lexerDefinitionErrors=i.lexerDefinitionErrors.concat((0,so.validatePatterns)(u,a))}),(0,lr.isEmpty)(i.lexerDefinitionErrors)){(0,wIe.augmentTokenTypes)(u);var f;i.TRACE_INIT(\"analyzeTokenTypes\",function(){f=(0,so.analyzeTokenTypes)(u,{lineTerminatorCharacters:i.config.lineTerminatorCharacters,positionTracking:r.positionTracking,ensureOptimizations:r.ensureOptimizations,safeMode:r.safeMode,tracer:i.TRACE_INIT.bind(i)})}),i.patternIdxToConfig[g]=f.patternIdxToConfig,i.charCodeToPatternIdxToConfig[g]=f.charCodeToPatternIdxToConfig,i.emptyGroups=(0,lr.merge)(i.emptyGroups,f.emptyGroups),i.hasCustom=f.hasCustom||i.hasCustom,i.canModeBeOptimized[g]=f.canBeOptimized}})}),i.defaultMode=s.defaultMode,!(0,lr.isEmpty)(i.lexerDefinitionErrors)&&!i.config.deferDefinitionErrorsHandling){var l=(0,lr.map)(i.lexerDefinitionErrors,function(u){return u.message}),c=l.join(`-----------------------\n`);throw new Error(`Errors detected in definition of Lexer:\n`+c)}(0,lr.forEach)(i.lexerDefinitionWarning,function(u){(0,lr.PRINT_WARNING)(u.message)}),i.TRACE_INIT(\"Choosing sub-methods implementations\",function(){if(so.SUPPORT_STICKY?(i.chopInput=lr.IDENTITY,i.match=i.matchWithTest):(i.updateLastIndex=lr.NOOP,i.match=i.matchWithExec),o&&(i.handleModes=lr.NOOP),i.trackStartLines===!1&&(i.computeNewColumn=lr.IDENTITY),i.trackEndLines===!1&&(i.updateTokenEndLineColumnLocation=lr.NOOP),/full/i.test(i.config.positionTracking))i.createTokenInstance=i.createFullToken;else if(/onlyStart/i.test(i.config.positionTracking))i.createTokenInstance=i.createStartOnlyToken;else if(/onlyOffset/i.test(i.config.positionTracking))i.createTokenInstance=i.createOffsetOnlyToken;else throw Error('Invalid <positionTracking> config option: \"'+i.config.positionTracking+'\"');i.hasCustom?(i.addToken=i.addTokenUsingPush,i.handlePayload=i.handlePayloadWithCustom):(i.addToken=i.addTokenUsingMemberAccess,i.handlePayload=i.handlePayloadNoCustom)}),i.TRACE_INIT(\"Failed Optimization Warnings\",function(){var u=(0,lr.reduce)(i.canModeBeOptimized,function(g,f,h){return f===!1&&g.push(h),g},[]);if(r.ensureOptimizations&&!(0,lr.isEmpty)(u))throw Error(\"Lexer Modes: < \"+u.join(\", \")+` > cannot be optimized.\n\t Disable the \"ensureOptimizations\" lexer config flag to silently ignore this and run the lexer in an un-optimized mode.\n\t Or inspect the console log for details on how to resolve these issues.`)}),i.TRACE_INIT(\"clearRegExpParserCache\",function(){(0,bIe.clearRegExpParserCache)()}),i.TRACE_INIT(\"toFastProperties\",function(){(0,lr.toFastProperties)(i)})})}return t.prototype.tokenize=function(e,r){if(r===void 0&&(r=this.defaultMode),!(0,lr.isEmpty)(this.lexerDefinitionErrors)){var i=(0,lr.map)(this.lexerDefinitionErrors,function(o){return o.message}),n=i.join(`-----------------------\n`);throw new Error(`Unable to Tokenize because Errors detected in definition of Lexer:\n`+n)}var s=this.tokenizeInternal(e,r);return s},t.prototype.tokenizeInternal=function(e,r){var i=this,n,s,o,a,l,c,u,g,f,h,p,m,y,Q,S,x,M=e,Y=M.length,U=0,J=0,W=this.hasCustom?0:Math.floor(e.length/10),ee=new Array(W),Z=[],A=this.trackStartLines?1:void 0,ne=this.trackStartLines?1:void 0,le=(0,so.cloneEmptyGroups)(this.emptyGroups),Ae=this.trackStartLines,T=this.config.lineTerminatorsPattern,L=0,Ee=[],we=[],qe=[],re=[];Object.freeze(re);var se=void 0;function Qe(){return Ee}function he(vr){var Hn=(0,so.charCodeToOptimizedIndex)(vr),us=we[Hn];return us===void 0?re:us}var Fe=function(vr){if(qe.length===1&&vr.tokenType.PUSH_MODE===void 0){var Hn=i.config.errorMessageProvider.buildUnableToPopLexerModeMessage(vr);Z.push({offset:vr.startOffset,line:vr.startLine!==void 0?vr.startLine:void 0,column:vr.startColumn!==void 0?vr.startColumn:void 0,length:vr.image.length,message:Hn})}else{qe.pop();var us=(0,lr.last)(qe);Ee=i.patternIdxToConfig[us],we=i.charCodeToPatternIdxToConfig[us],L=Ee.length;var Ia=i.canModeBeOptimized[us]&&i.config.safeMode===!1;we&&Ia?se=he:se=Qe}};function Ue(vr){qe.push(vr),we=this.charCodeToPatternIdxToConfig[vr],Ee=this.patternIdxToConfig[vr],L=Ee.length,L=Ee.length;var Hn=this.canModeBeOptimized[vr]&&this.config.safeMode===!1;we&&Hn?se=he:se=Qe}Ue.call(this,r);for(var xe;U<Y;){c=null;var ve=M.charCodeAt(U),pe=se(ve),X=pe.length;for(n=0;n<X;n++){xe=pe[n];var be=xe.pattern;u=null;var ce=xe.short;if(ce!==!1?ve===ce&&(c=be):xe.isCustom===!0?(x=be.exec(M,U,ee,le),x!==null?(c=x[0],x.payload!==void 0&&(u=x.payload)):c=null):(this.updateLastIndex(be,U),c=this.match(be,e,U)),c!==null){if(l=xe.longerAlt,l!==void 0){var fe=l.length;for(o=0;o<fe;o++){var gt=Ee[l[o]],Ht=gt.pattern;if(g=null,gt.isCustom===!0?(x=Ht.exec(M,U,ee,le),x!==null?(a=x[0],x.payload!==void 0&&(g=x.payload)):a=null):(this.updateLastIndex(Ht,U),a=this.match(Ht,e,U)),a&&a.length>c.length){c=a,u=g,xe=gt;break}}}break}}if(c!==null){if(f=c.length,h=xe.group,h!==void 0&&(p=xe.tokenTypeIdx,m=this.createTokenInstance(c,U,p,xe.tokenType,A,ne,f),this.handlePayload(m,u),h===!1?J=this.addToken(ee,J,m):le[h].push(m)),e=this.chopInput(e,f),U=U+f,ne=this.computeNewColumn(ne,f),Ae===!0&&xe.canLineTerminator===!0){var Mt=0,mi=void 0,jt=void 0;T.lastIndex=0;do mi=T.test(c),mi===!0&&(jt=T.lastIndex-1,Mt++);while(mi===!0);Mt!==0&&(A=A+Mt,ne=f-jt,this.updateTokenEndLineColumnLocation(m,h,jt,Mt,A,ne,f))}this.handleModes(xe,Fe,Ue,m)}else{for(var Qr=U,Ti=A,_s=ne,Un=!1;!Un&&U<Y;)for(Q=M.charCodeAt(U),e=this.chopInput(e,1),U++,s=0;s<L;s++){var Kn=Ee[s],be=Kn.pattern,ce=Kn.short;if(ce!==!1?M.charCodeAt(U)===ce&&(Un=!0):Kn.isCustom===!0?Un=be.exec(M,U,ee,le)!==null:(this.updateLastIndex(be,U),Un=be.exec(e)!==null),Un===!0)break}y=U-Qr,S=this.config.errorMessageProvider.buildUnexpectedCharactersMessage(M,Qr,y,Ti,_s),Z.push({offset:Qr,line:Ti,column:_s,length:y,message:S})}}return this.hasCustom||(ee.length=J),{tokens:ee,groups:le,errors:Z}},t.prototype.handleModes=function(e,r,i,n){if(e.pop===!0){var s=e.push;r(n),s!==void 0&&i.call(this,s)}else e.push!==void 0&&i.call(this,e.push)},t.prototype.chopInput=function(e,r){return e.substring(r)},t.prototype.updateLastIndex=function(e,r){e.lastIndex=r},t.prototype.updateTokenEndLineColumnLocation=function(e,r,i,n,s,o,a){var l,c;r!==void 0&&(l=i===a-1,c=l?-1:0,n===1&&l===!0||(e.endLine=s+c,e.endColumn=o-1+-c))},t.prototype.computeNewColumn=function(e,r){return e+r},t.prototype.createTokenInstance=function(){for(var e=[],r=0;r<arguments.length;r++)e[r]=arguments[r];return null},t.prototype.createOffsetOnlyToken=function(e,r,i,n){return{image:e,startOffset:r,tokenTypeIdx:i,tokenType:n}},t.prototype.createStartOnlyToken=function(e,r,i,n,s,o){return{image:e,startOffset:r,startLine:s,startColumn:o,tokenTypeIdx:i,tokenType:n}},t.prototype.createFullToken=function(e,r,i,n,s,o,a){return{image:e,startOffset:r,endOffset:r+a-1,startLine:s,endLine:s,startColumn:o,endColumn:o+a-1,tokenTypeIdx:i,tokenType:n}},t.prototype.addToken=function(e,r,i){return 666},t.prototype.addTokenUsingPush=function(e,r,i){return e.push(i),r},t.prototype.addTokenUsingMemberAccess=function(e,r,i){return e[r]=i,r++,r},t.prototype.handlePayload=function(e,r){},t.prototype.handlePayloadNoCustom=function(e,r){},t.prototype.handlePayloadWithCustom=function(e,r){r!==null&&(e.payload=r)},t.prototype.match=function(e,r,i){return null},t.prototype.matchWithTest=function(e,r,i){var n=e.test(r);return n===!0?r.substring(i,e.lastIndex):null},t.prototype.matchWithExec=function(e,r){var i=e.exec(r);return i!==null?i[0]:i},t.prototype.TRACE_INIT=function(e,r){if(this.traceInitPerf===!0){this.traceInitIndent++;var i=new Array(this.traceInitIndent+1).join(\"\t\");this.traceInitIndent<this.traceInitMaxIdent&&console.log(i+\"--> <\"+e+\">\");var n=(0,lr.timer)(r),s=n.time,o=n.value,a=s>10?console.warn:console.log;return this.traceInitIndent<this.traceInitMaxIdent&&a(i+\"<-- <\"+e+\"> time: \"+s+\"ms\"),this.traceInitIndent--,o}else return r()},t.SKIPPED=\"This marks a skipped Token pattern, this means each token identified by it willbe consumed and then thrown into oblivion, this can be used to for example to completely ignore whitespace.\",t.NA=/NOT_APPLICABLE/,t}();Bc.Lexer=vIe});var JA=w(xi=>{\"use strict\";Object.defineProperty(xi,\"__esModule\",{value:!0});xi.tokenMatcher=xi.createTokenInstance=xi.EOF=xi.createToken=xi.hasTokenLabel=xi.tokenName=xi.tokenLabel=void 0;var oo=Yt(),SIe=Dp(),Kv=fg();function kIe(t){return wj(t)?t.LABEL:t.name}xi.tokenLabel=kIe;function xIe(t){return t.name}xi.tokenName=xIe;function wj(t){return(0,oo.isString)(t.LABEL)&&t.LABEL!==\"\"}xi.hasTokenLabel=wj;var PIe=\"parent\",Bj=\"categories\",bj=\"label\",Qj=\"group\",vj=\"push_mode\",Sj=\"pop_mode\",kj=\"longer_alt\",xj=\"line_breaks\",Pj=\"start_chars_hint\";function Dj(t){return DIe(t)}xi.createToken=Dj;function DIe(t){var e=t.pattern,r={};if(r.name=t.name,(0,oo.isUndefined)(e)||(r.PATTERN=e),(0,oo.has)(t,PIe))throw`The parent property is no longer supported.\nSee: https://github.com/chevrotain/chevrotain/issues/564#issuecomment-349062346 for details.`;return(0,oo.has)(t,Bj)&&(r.CATEGORIES=t[Bj]),(0,Kv.augmentTokenTypes)([r]),(0,oo.has)(t,bj)&&(r.LABEL=t[bj]),(0,oo.has)(t,Qj)&&(r.GROUP=t[Qj]),(0,oo.has)(t,Sj)&&(r.POP_MODE=t[Sj]),(0,oo.has)(t,vj)&&(r.PUSH_MODE=t[vj]),(0,oo.has)(t,kj)&&(r.LONGER_ALT=t[kj]),(0,oo.has)(t,xj)&&(r.LINE_BREAKS=t[xj]),(0,oo.has)(t,Pj)&&(r.START_CHARS_HINT=t[Pj]),r}xi.EOF=Dj({name:\"EOF\",pattern:SIe.Lexer.NA});(0,Kv.augmentTokenTypes)([xi.EOF]);function RIe(t,e,r,i,n,s,o,a){return{image:e,startOffset:r,endOffset:i,startLine:n,endLine:s,startColumn:o,endColumn:a,tokenTypeIdx:t.tokenTypeIdx,tokenType:t}}xi.createTokenInstance=RIe;function FIe(t,e){return(0,Kv.tokenStructuredMatcher)(t,e)}xi.tokenMatcher=FIe});var bn=w(Vt=>{\"use strict\";var Ua=Vt&&Vt.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!=\"function\"&&r!==null)throw new TypeError(\"Class extends value \"+String(r)+\" is not a constructor or null\");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Vt,\"__esModule\",{value:!0});Vt.serializeProduction=Vt.serializeGrammar=Vt.Terminal=Vt.Alternation=Vt.RepetitionWithSeparator=Vt.Repetition=Vt.RepetitionMandatoryWithSeparator=Vt.RepetitionMandatory=Vt.Option=Vt.Alternative=Vt.Rule=Vt.NonTerminal=Vt.AbstractProduction=void 0;var fr=Yt(),NIe=JA(),To=function(){function t(e){this._definition=e}return Object.defineProperty(t.prototype,\"definition\",{get:function(){return this._definition},set:function(e){this._definition=e},enumerable:!1,configurable:!0}),t.prototype.accept=function(e){e.visit(this),(0,fr.forEach)(this.definition,function(r){r.accept(e)})},t}();Vt.AbstractProduction=To;var Rj=function(t){Ua(e,t);function e(r){var i=t.call(this,[])||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return Object.defineProperty(e.prototype,\"definition\",{get:function(){return this.referencedRule!==void 0?this.referencedRule.definition:[]},set:function(r){},enumerable:!1,configurable:!0}),e.prototype.accept=function(r){r.visit(this)},e}(To);Vt.NonTerminal=Rj;var Fj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.orgText=\"\",(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.Rule=Fj;var Nj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.ignoreAmbiguities=!1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.Alternative=Nj;var Lj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.Option=Lj;var Tj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.RepetitionMandatory=Tj;var Oj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.RepetitionMandatoryWithSeparator=Oj;var Mj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.Repetition=Mj;var Uj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return e}(To);Vt.RepetitionWithSeparator=Uj;var Kj=function(t){Ua(e,t);function e(r){var i=t.call(this,r.definition)||this;return i.idx=1,i.ignoreAmbiguities=!1,i.hasPredicates=!1,(0,fr.assign)(i,(0,fr.pick)(r,function(n){return n!==void 0})),i}return Object.defineProperty(e.prototype,\"definition\",{get:function(){return this._definition},set:function(r){this._definition=r},enumerable:!1,configurable:!0}),e}(To);Vt.Alternation=Kj;var JI=function(){function t(e){this.idx=1,(0,fr.assign)(this,(0,fr.pick)(e,function(r){return r!==void 0}))}return t.prototype.accept=function(e){e.visit(this)},t}();Vt.Terminal=JI;function LIe(t){return(0,fr.map)(t,Fp)}Vt.serializeGrammar=LIe;function Fp(t){function e(s){return(0,fr.map)(s,Fp)}if(t instanceof Rj){var r={type:\"NonTerminal\",name:t.nonTerminalName,idx:t.idx};return(0,fr.isString)(t.label)&&(r.label=t.label),r}else{if(t instanceof Nj)return{type:\"Alternative\",definition:e(t.definition)};if(t instanceof Lj)return{type:\"Option\",idx:t.idx,definition:e(t.definition)};if(t instanceof Tj)return{type:\"RepetitionMandatory\",idx:t.idx,definition:e(t.definition)};if(t instanceof Oj)return{type:\"RepetitionMandatoryWithSeparator\",idx:t.idx,separator:Fp(new JI({terminalType:t.separator})),definition:e(t.definition)};if(t instanceof Uj)return{type:\"RepetitionWithSeparator\",idx:t.idx,separator:Fp(new JI({terminalType:t.separator})),definition:e(t.definition)};if(t instanceof Mj)return{type:\"Repetition\",idx:t.idx,definition:e(t.definition)};if(t instanceof Kj)return{type:\"Alternation\",idx:t.idx,definition:e(t.definition)};if(t instanceof JI){var i={type:\"Terminal\",name:t.terminalType.name,label:(0,NIe.tokenLabel)(t.terminalType),idx:t.idx};(0,fr.isString)(t.label)&&(i.terminalLabel=t.label);var n=t.terminalType.PATTERN;return t.terminalType.PATTERN&&(i.pattern=(0,fr.isRegExp)(n)?n.source:n),i}else{if(t instanceof Fj)return{type:\"Rule\",name:t.name,orgText:t.orgText,definition:e(t.definition)};throw Error(\"non exhaustive match\")}}}Vt.serializeProduction=Fp});var zI=w(WI=>{\"use strict\";Object.defineProperty(WI,\"__esModule\",{value:!0});WI.RestWalker=void 0;var Hv=Yt(),Qn=bn(),TIe=function(){function t(){}return t.prototype.walk=function(e,r){var i=this;r===void 0&&(r=[]),(0,Hv.forEach)(e.definition,function(n,s){var o=(0,Hv.drop)(e.definition,s+1);if(n instanceof Qn.NonTerminal)i.walkProdRef(n,o,r);else if(n instanceof Qn.Terminal)i.walkTerminal(n,o,r);else if(n instanceof Qn.Alternative)i.walkFlat(n,o,r);else if(n instanceof Qn.Option)i.walkOption(n,o,r);else if(n instanceof Qn.RepetitionMandatory)i.walkAtLeastOne(n,o,r);else if(n instanceof Qn.RepetitionMandatoryWithSeparator)i.walkAtLeastOneSep(n,o,r);else if(n instanceof Qn.RepetitionWithSeparator)i.walkManySep(n,o,r);else if(n instanceof Qn.Repetition)i.walkMany(n,o,r);else if(n instanceof Qn.Alternation)i.walkOr(n,o,r);else throw Error(\"non exhaustive match\")})},t.prototype.walkTerminal=function(e,r,i){},t.prototype.walkProdRef=function(e,r,i){},t.prototype.walkFlat=function(e,r,i){var n=r.concat(i);this.walk(e,n)},t.prototype.walkOption=function(e,r,i){var n=r.concat(i);this.walk(e,n)},t.prototype.walkAtLeastOne=function(e,r,i){var n=[new Qn.Option({definition:e.definition})].concat(r,i);this.walk(e,n)},t.prototype.walkAtLeastOneSep=function(e,r,i){var n=Hj(e,r,i);this.walk(e,n)},t.prototype.walkMany=function(e,r,i){var n=[new Qn.Option({definition:e.definition})].concat(r,i);this.walk(e,n)},t.prototype.walkManySep=function(e,r,i){var n=Hj(e,r,i);this.walk(e,n)},t.prototype.walkOr=function(e,r,i){var n=this,s=r.concat(i);(0,Hv.forEach)(e.definition,function(o){var a=new Qn.Alternative({definition:[o]});n.walk(a,s)})},t}();WI.RestWalker=TIe;function Hj(t,e,r){var i=[new Qn.Option({definition:[new Qn.Terminal({terminalType:t.separator})].concat(t.definition)})],n=i.concat(e,r);return n}});var hg=w(_I=>{\"use strict\";Object.defineProperty(_I,\"__esModule\",{value:!0});_I.GAstVisitor=void 0;var Oo=bn(),OIe=function(){function t(){}return t.prototype.visit=function(e){var r=e;switch(r.constructor){case Oo.NonTerminal:return this.visitNonTerminal(r);case Oo.Alternative:return this.visitAlternative(r);case Oo.Option:return this.visitOption(r);case Oo.RepetitionMandatory:return this.visitRepetitionMandatory(r);case Oo.RepetitionMandatoryWithSeparator:return this.visitRepetitionMandatoryWithSeparator(r);case Oo.RepetitionWithSeparator:return this.visitRepetitionWithSeparator(r);case Oo.Repetition:return this.visitRepetition(r);case Oo.Alternation:return this.visitAlternation(r);case Oo.Terminal:return this.visitTerminal(r);case Oo.Rule:return this.visitRule(r);default:throw Error(\"non exhaustive match\")}},t.prototype.visitNonTerminal=function(e){},t.prototype.visitAlternative=function(e){},t.prototype.visitOption=function(e){},t.prototype.visitRepetition=function(e){},t.prototype.visitRepetitionMandatory=function(e){},t.prototype.visitRepetitionMandatoryWithSeparator=function(e){},t.prototype.visitRepetitionWithSeparator=function(e){},t.prototype.visitAlternation=function(e){},t.prototype.visitTerminal=function(e){},t.prototype.visitRule=function(e){},t}();_I.GAstVisitor=OIe});var Lp=w(Gi=>{\"use strict\";var MIe=Gi&&Gi.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!=\"function\"&&r!==null)throw new TypeError(\"Class extends value \"+String(r)+\" is not a constructor or null\");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Gi,\"__esModule\",{value:!0});Gi.collectMethods=Gi.DslMethodsCollectorVisitor=Gi.getProductionDslName=Gi.isBranchingProd=Gi.isOptionalProd=Gi.isSequenceProd=void 0;var Np=Yt(),kr=bn(),UIe=hg();function KIe(t){return t instanceof kr.Alternative||t instanceof kr.Option||t instanceof kr.Repetition||t instanceof kr.RepetitionMandatory||t instanceof kr.RepetitionMandatoryWithSeparator||t instanceof kr.RepetitionWithSeparator||t instanceof kr.Terminal||t instanceof kr.Rule}Gi.isSequenceProd=KIe;function jv(t,e){e===void 0&&(e=[]);var r=t instanceof kr.Option||t instanceof kr.Repetition||t instanceof kr.RepetitionWithSeparator;return r?!0:t instanceof kr.Alternation?(0,Np.some)(t.definition,function(i){return jv(i,e)}):t instanceof kr.NonTerminal&&(0,Np.contains)(e,t)?!1:t instanceof kr.AbstractProduction?(t instanceof kr.NonTerminal&&e.push(t),(0,Np.every)(t.definition,function(i){return jv(i,e)})):!1}Gi.isOptionalProd=jv;function HIe(t){return t instanceof kr.Alternation}Gi.isBranchingProd=HIe;function jIe(t){if(t instanceof kr.NonTerminal)return\"SUBRULE\";if(t instanceof kr.Option)return\"OPTION\";if(t instanceof kr.Alternation)return\"OR\";if(t instanceof kr.RepetitionMandatory)return\"AT_LEAST_ONE\";if(t instanceof kr.RepetitionMandatoryWithSeparator)return\"AT_LEAST_ONE_SEP\";if(t instanceof kr.RepetitionWithSeparator)return\"MANY_SEP\";if(t instanceof kr.Repetition)return\"MANY\";if(t instanceof kr.Terminal)return\"CONSUME\";throw Error(\"non exhaustive match\")}Gi.getProductionDslName=jIe;var jj=function(t){MIe(e,t);function e(){var r=t!==null&&t.apply(this,arguments)||this;return r.separator=\"-\",r.dslMethods={option:[],alternation:[],repetition:[],repetitionWithSeparator:[],repetitionMandatory:[],repetitionMandatoryWithSeparator:[]},r}return e.prototype.reset=function(){this.dslMethods={option:[],alternation:[],repetition:[],repetitionWithSeparator:[],repetitionMandatory:[],repetitionMandatoryWithSeparator:[]}},e.prototype.visitTerminal=function(r){var i=r.terminalType.name+this.separator+\"Terminal\";(0,Np.has)(this.dslMethods,i)||(this.dslMethods[i]=[]),this.dslMethods[i].push(r)},e.prototype.visitNonTerminal=function(r){var i=r.nonTerminalName+this.separator+\"Terminal\";(0,Np.has)(this.dslMethods,i)||(this.dslMethods[i]=[]),this.dslMethods[i].push(r)},e.prototype.visitOption=function(r){this.dslMethods.option.push(r)},e.prototype.visitRepetitionWithSeparator=function(r){this.dslMethods.repetitionWithSeparator.push(r)},e.prototype.visitRepetitionMandatory=function(r){this.dslMethods.repetitionMandatory.push(r)},e.prototype.visitRepetitionMandatoryWithSeparator=function(r){this.dslMethods.repetitionMandatoryWithSeparator.push(r)},e.prototype.visitRepetition=function(r){this.dslMethods.repetition.push(r)},e.prototype.visitAlternation=function(r){this.dslMethods.alternation.push(r)},e}(UIe.GAstVisitor);Gi.DslMethodsCollectorVisitor=jj;var VI=new jj;function GIe(t){VI.reset(),t.accept(VI);var e=VI.dslMethods;return VI.reset(),e}Gi.collectMethods=GIe});var Yv=w(Mo=>{\"use strict\";Object.defineProperty(Mo,\"__esModule\",{value:!0});Mo.firstForTerminal=Mo.firstForBranching=Mo.firstForSequence=Mo.first=void 0;var XI=Yt(),Gj=bn(),Gv=Lp();function ZI(t){if(t instanceof Gj.NonTerminal)return ZI(t.referencedRule);if(t instanceof Gj.Terminal)return Jj(t);if((0,Gv.isSequenceProd)(t))return Yj(t);if((0,Gv.isBranchingProd)(t))return qj(t);throw Error(\"non exhaustive match\")}Mo.first=ZI;function Yj(t){for(var e=[],r=t.definition,i=0,n=r.length>i,s,o=!0;n&&o;)s=r[i],o=(0,Gv.isOptionalProd)(s),e=e.concat(ZI(s)),i=i+1,n=r.length>i;return(0,XI.uniq)(e)}Mo.firstForSequence=Yj;function qj(t){var e=(0,XI.map)(t.definition,function(r){return ZI(r)});return(0,XI.uniq)((0,XI.flatten)(e))}Mo.firstForBranching=qj;function Jj(t){return[t.terminalType]}Mo.firstForTerminal=Jj});var qv=w($I=>{\"use strict\";Object.defineProperty($I,\"__esModule\",{value:!0});$I.IN=void 0;$I.IN=\"_~IN~_\"});var Xj=w(Qs=>{\"use strict\";var YIe=Qs&&Qs.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!=\"function\"&&r!==null)throw new TypeError(\"Class extends value \"+String(r)+\" is not a constructor or null\");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Qs,\"__esModule\",{value:!0});Qs.buildInProdFollowPrefix=Qs.buildBetweenProdsFollowPrefix=Qs.computeAllProdsFollows=Qs.ResyncFollowsWalker=void 0;var qIe=zI(),JIe=Yv(),Wj=Yt(),zj=qv(),WIe=bn(),Vj=function(t){YIe(e,t);function e(r){var i=t.call(this)||this;return i.topProd=r,i.follows={},i}return e.prototype.startWalking=function(){return this.walk(this.topProd),this.follows},e.prototype.walkTerminal=function(r,i,n){},e.prototype.walkProdRef=function(r,i,n){var s=_j(r.referencedRule,r.idx)+this.topProd.name,o=i.concat(n),a=new WIe.Alternative({definition:o}),l=(0,JIe.first)(a);this.follows[s]=l},e}(qIe.RestWalker);Qs.ResyncFollowsWalker=Vj;function zIe(t){var e={};return(0,Wj.forEach)(t,function(r){var i=new Vj(r).startWalking();(0,Wj.assign)(e,i)}),e}Qs.computeAllProdsFollows=zIe;function _j(t,e){return t.name+e+zj.IN}Qs.buildBetweenProdsFollowPrefix=_j;function _Ie(t){var e=t.terminalType.name;return e+t.idx+zj.IN}Qs.buildInProdFollowPrefix=_Ie});var Tp=w(Ka=>{\"use strict\";Object.defineProperty(Ka,\"__esModule\",{value:!0});Ka.defaultGrammarValidatorErrorProvider=Ka.defaultGrammarResolverErrorProvider=Ka.defaultParserErrorProvider=void 0;var pg=JA(),VIe=Yt(),ao=Yt(),Jv=bn(),Zj=Lp();Ka.defaultParserErrorProvider={buildMismatchTokenMessage:function(t){var e=t.expected,r=t.actual,i=t.previous,n=t.ruleName,s=(0,pg.hasTokenLabel)(e),o=s?\"--> \"+(0,pg.tokenLabel)(e)+\" <--\":\"token of type --> \"+e.name+\" <--\",a=\"Expecting \"+o+\" but found --> '\"+r.image+\"' <--\";return a},buildNotAllInputParsedMessage:function(t){var e=t.firstRedundant,r=t.ruleName;return\"Redundant input, expecting EOF but found: \"+e.image},buildNoViableAltMessage:function(t){var e=t.expectedPathsPerAlt,r=t.actual,i=t.previous,n=t.customUserDescription,s=t.ruleName,o=\"Expecting: \",a=(0,ao.first)(r).image,l=`\nbut found: '`+a+\"'\";if(n)return o+n+l;var c=(0,ao.reduce)(e,function(h,p){return h.concat(p)},[]),u=(0,ao.map)(c,function(h){return\"[\"+(0,ao.map)(h,function(p){return(0,pg.tokenLabel)(p)}).join(\", \")+\"]\"}),g=(0,ao.map)(u,function(h,p){return\"  \"+(p+1)+\". \"+h}),f=`one of these possible Token sequences:\n`+g.join(`\n`);return o+f+l},buildEarlyExitMessage:function(t){var e=t.expectedIterationPaths,r=t.actual,i=t.customUserDescription,n=t.ruleName,s=\"Expecting: \",o=(0,ao.first)(r).image,a=`\nbut found: '`+o+\"'\";if(i)return s+i+a;var l=(0,ao.map)(e,function(u){return\"[\"+(0,ao.map)(u,function(g){return(0,pg.tokenLabel)(g)}).join(\",\")+\"]\"}),c=`expecting at least one iteration which starts with one of these possible Token sequences::\n  `+(\"<\"+l.join(\" ,\")+\">\");return s+c+a}};Object.freeze(Ka.defaultParserErrorProvider);Ka.defaultGrammarResolverErrorProvider={buildRuleNotFoundError:function(t,e){var r=\"Invalid grammar, reference to a rule which is not defined: ->\"+e.nonTerminalName+`<-\ninside top level rule: ->`+t.name+\"<-\";return r}};Ka.defaultGrammarValidatorErrorProvider={buildDuplicateFoundError:function(t,e){function r(u){return u instanceof Jv.Terminal?u.terminalType.name:u instanceof Jv.NonTerminal?u.nonTerminalName:\"\"}var i=t.name,n=(0,ao.first)(e),s=n.idx,o=(0,Zj.getProductionDslName)(n),a=r(n),l=s>0,c=\"->\"+o+(l?s:\"\")+\"<- \"+(a?\"with argument: ->\"+a+\"<-\":\"\")+`\n                  appears more than once (`+e.length+\" times) in the top level rule: ->\"+i+`<-.                  \n                  For further details see: https://chevrotain.io/docs/FAQ.html#NUMERICAL_SUFFIXES \n                  `;return c=c.replace(/[ \\t]+/g,\" \"),c=c.replace(/\\s\\s+/g,`\n`),c},buildNamespaceConflictError:function(t){var e=`Namespace conflict found in grammar.\n`+(\"The grammar has both a Terminal(Token) and a Non-Terminal(Rule) named: <\"+t.name+`>.\n`)+`To resolve this make sure each Terminal and Non-Terminal names are unique\nThis is easy to accomplish by using the convention that Terminal names start with an uppercase letter\nand Non-Terminal names start with a lower case letter.`;return e},buildAlternationPrefixAmbiguityError:function(t){var e=(0,ao.map)(t.prefixPath,function(n){return(0,pg.tokenLabel)(n)}).join(\", \"),r=t.alternation.idx===0?\"\":t.alternation.idx,i=\"Ambiguous alternatives: <\"+t.ambiguityIndices.join(\" ,\")+`> due to common lookahead prefix\n`+(\"in <OR\"+r+\"> inside <\"+t.topLevelRule.name+`> Rule,\n`)+(\"<\"+e+`> may appears as a prefix path in all these alternatives.\n`)+`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#COMMON_PREFIX\nFor Further details.`;return i},buildAlternationAmbiguityError:function(t){var e=(0,ao.map)(t.prefixPath,function(n){return(0,pg.tokenLabel)(n)}).join(\", \"),r=t.alternation.idx===0?\"\":t.alternation.idx,i=\"Ambiguous Alternatives Detected: <\"+t.ambiguityIndices.join(\" ,\")+\"> in <OR\"+r+\">\"+(\" inside <\"+t.topLevelRule.name+`> Rule,\n`)+(\"<\"+e+`> may appears as a prefix path in all these alternatives.\n`);return i=i+`See: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#AMBIGUOUS_ALTERNATIVES\nFor Further details.`,i},buildEmptyRepetitionError:function(t){var e=(0,Zj.getProductionDslName)(t.repetition);t.repetition.idx!==0&&(e+=t.repetition.idx);var r=\"The repetition <\"+e+\"> within Rule <\"+t.topLevelRule.name+`> can never consume any tokens.\nThis could lead to an infinite loop.`;return r},buildTokenNameError:function(t){return\"deprecated\"},buildEmptyAlternationError:function(t){var e=\"Ambiguous empty alternative: <\"+(t.emptyChoiceIdx+1)+\">\"+(\" in <OR\"+t.alternation.idx+\"> inside <\"+t.topLevelRule.name+`> Rule.\n`)+\"Only the last alternative may be an empty alternative.\";return e},buildTooManyAlternativesError:function(t){var e=`An Alternation cannot have more than 256 alternatives:\n`+(\"<OR\"+t.alternation.idx+\"> inside <\"+t.topLevelRule.name+`> Rule.\n has `+(t.alternation.definition.length+1)+\" alternatives.\");return e},buildLeftRecursionError:function(t){var e=t.topLevelRule.name,r=VIe.map(t.leftRecursionPath,function(s){return s.name}),i=e+\" --> \"+r.concat([e]).join(\" --> \"),n=`Left Recursion found in grammar.\n`+(\"rule: <\"+e+`> can be invoked from itself (directly or indirectly)\n`)+(`without consuming any Tokens. The grammar path that causes this is: \n `+i+`\n`)+` To fix this refactor your grammar to remove the left recursion.\nsee: https://en.wikipedia.org/wiki/LL_parser#Left_Factoring.`;return n},buildInvalidRuleNameError:function(t){return\"deprecated\"},buildDuplicateRuleNameError:function(t){var e;t.topLevelRule instanceof Jv.Rule?e=t.topLevelRule.name:e=t.topLevelRule;var r=\"Duplicate definition, rule: ->\"+e+\"<- is already defined in the grammar: ->\"+t.grammarName+\"<-\";return r}}});var tG=w(WA=>{\"use strict\";var XIe=WA&&WA.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!=\"function\"&&r!==null)throw new TypeError(\"Class extends value \"+String(r)+\" is not a constructor or null\");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(WA,\"__esModule\",{value:!0});WA.GastRefResolverVisitor=WA.resolveGrammar=void 0;var ZIe=Xn(),$j=Yt(),$Ie=hg();function eye(t,e){var r=new eG(t,e);return r.resolveRefs(),r.errors}WA.resolveGrammar=eye;var eG=function(t){XIe(e,t);function e(r,i){var n=t.call(this)||this;return n.nameToTopRule=r,n.errMsgProvider=i,n.errors=[],n}return e.prototype.resolveRefs=function(){var r=this;(0,$j.forEach)((0,$j.values)(this.nameToTopRule),function(i){r.currTopLevel=i,i.accept(r)})},e.prototype.visitNonTerminal=function(r){var i=this.nameToTopRule[r.nonTerminalName];if(i)r.referencedRule=i;else{var n=this.errMsgProvider.buildRuleNotFoundError(this.currTopLevel,r);this.errors.push({message:n,type:ZIe.ParserDefinitionErrorType.UNRESOLVED_SUBRULE_REF,ruleName:this.currTopLevel.name,unresolvedRefName:r.nonTerminalName})}},e}($Ie.GAstVisitor);WA.GastRefResolverVisitor=eG});var Mp=w(Mr=>{\"use strict\";var bc=Mr&&Mr.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!=\"function\"&&r!==null)throw new TypeError(\"Class extends value \"+String(r)+\" is not a constructor or null\");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Mr,\"__esModule\",{value:!0});Mr.nextPossibleTokensAfter=Mr.possiblePathsFrom=Mr.NextTerminalAfterAtLeastOneSepWalker=Mr.NextTerminalAfterAtLeastOneWalker=Mr.NextTerminalAfterManySepWalker=Mr.NextTerminalAfterManyWalker=Mr.AbstractNextTerminalAfterProductionWalker=Mr.NextAfterTokenWalker=Mr.AbstractNextPossibleTokensWalker=void 0;var rG=zI(),Ut=Yt(),tye=Yv(),Dt=bn(),iG=function(t){bc(e,t);function e(r,i){var n=t.call(this)||this;return n.topProd=r,n.path=i,n.possibleTokTypes=[],n.nextProductionName=\"\",n.nextProductionOccurrence=0,n.found=!1,n.isAtEndOfPath=!1,n}return e.prototype.startWalking=function(){if(this.found=!1,this.path.ruleStack[0]!==this.topProd.name)throw Error(\"The path does not start with the walker's top Rule!\");return this.ruleStack=(0,Ut.cloneArr)(this.path.ruleStack).reverse(),this.occurrenceStack=(0,Ut.cloneArr)(this.path.occurrenceStack).reverse(),this.ruleStack.pop(),this.occurrenceStack.pop(),this.updateExpectedNext(),this.walk(this.topProd),this.possibleTokTypes},e.prototype.walk=function(r,i){i===void 0&&(i=[]),this.found||t.prototype.walk.call(this,r,i)},e.prototype.walkProdRef=function(r,i,n){if(r.referencedRule.name===this.nextProductionName&&r.idx===this.nextProductionOccurrence){var s=i.concat(n);this.updateExpectedNext(),this.walk(r.referencedRule,s)}},e.prototype.updateExpectedNext=function(){(0,Ut.isEmpty)(this.ruleStack)?(this.nextProductionName=\"\",this.nextProductionOccurrence=0,this.isAtEndOfPath=!0):(this.nextProductionName=this.ruleStack.pop(),this.nextProductionOccurrence=this.occurrenceStack.pop())},e}(rG.RestWalker);Mr.AbstractNextPossibleTokensWalker=iG;var rye=function(t){bc(e,t);function e(r,i){var n=t.call(this,r,i)||this;return n.path=i,n.nextTerminalName=\"\",n.nextTerminalOccurrence=0,n.nextTerminalName=n.path.lastTok.name,n.nextTerminalOccurrence=n.path.lastTokOccurrence,n}return e.prototype.walkTerminal=function(r,i,n){if(this.isAtEndOfPath&&r.terminalType.name===this.nextTerminalName&&r.idx===this.nextTerminalOccurrence&&!this.found){var s=i.concat(n),o=new Dt.Alternative({definition:s});this.possibleTokTypes=(0,tye.first)(o),this.found=!0}},e}(iG);Mr.NextAfterTokenWalker=rye;var Op=function(t){bc(e,t);function e(r,i){var n=t.call(this)||this;return n.topRule=r,n.occurrence=i,n.result={token:void 0,occurrence:void 0,isEndOfRule:void 0},n}return e.prototype.startWalking=function(){return this.walk(this.topRule),this.result},e}(rG.RestWalker);Mr.AbstractNextTerminalAfterProductionWalker=Op;var iye=function(t){bc(e,t);function e(){return t!==null&&t.apply(this,arguments)||this}return e.prototype.walkMany=function(r,i,n){if(r.idx===this.occurrence){var s=(0,Ut.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof Dt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else t.prototype.walkMany.call(this,r,i,n)},e}(Op);Mr.NextTerminalAfterManyWalker=iye;var nye=function(t){bc(e,t);function e(){return t!==null&&t.apply(this,arguments)||this}return e.prototype.walkManySep=function(r,i,n){if(r.idx===this.occurrence){var s=(0,Ut.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof Dt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else t.prototype.walkManySep.call(this,r,i,n)},e}(Op);Mr.NextTerminalAfterManySepWalker=nye;var sye=function(t){bc(e,t);function e(){return t!==null&&t.apply(this,arguments)||this}return e.prototype.walkAtLeastOne=function(r,i,n){if(r.idx===this.occurrence){var s=(0,Ut.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof Dt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else t.prototype.walkAtLeastOne.call(this,r,i,n)},e}(Op);Mr.NextTerminalAfterAtLeastOneWalker=sye;var oye=function(t){bc(e,t);function e(){return t!==null&&t.apply(this,arguments)||this}return e.prototype.walkAtLeastOneSep=function(r,i,n){if(r.idx===this.occurrence){var s=(0,Ut.first)(i.concat(n));this.result.isEndOfRule=s===void 0,s instanceof Dt.Terminal&&(this.result.token=s.terminalType,this.result.occurrence=s.idx)}else t.prototype.walkAtLeastOneSep.call(this,r,i,n)},e}(Op);Mr.NextTerminalAfterAtLeastOneSepWalker=oye;function nG(t,e,r){r===void 0&&(r=[]),r=(0,Ut.cloneArr)(r);var i=[],n=0;function s(c){return c.concat((0,Ut.drop)(t,n+1))}function o(c){var u=nG(s(c),e,r);return i.concat(u)}for(;r.length<e&&n<t.length;){var a=t[n];if(a instanceof Dt.Alternative)return o(a.definition);if(a instanceof Dt.NonTerminal)return o(a.definition);if(a instanceof Dt.Option)i=o(a.definition);else if(a instanceof Dt.RepetitionMandatory){var l=a.definition.concat([new Dt.Repetition({definition:a.definition})]);return o(l)}else if(a instanceof Dt.RepetitionMandatoryWithSeparator){var l=[new Dt.Alternative({definition:a.definition}),new Dt.Repetition({definition:[new Dt.Terminal({terminalType:a.separator})].concat(a.definition)})];return o(l)}else if(a instanceof Dt.RepetitionWithSeparator){var l=a.definition.concat([new Dt.Repetition({definition:[new Dt.Terminal({terminalType:a.separator})].concat(a.definition)})]);i=o(l)}else if(a instanceof Dt.Repetition){var l=a.definition.concat([new Dt.Repetition({definition:a.definition})]);i=o(l)}else{if(a instanceof Dt.Alternation)return(0,Ut.forEach)(a.definition,function(c){(0,Ut.isEmpty)(c.definition)===!1&&(i=o(c.definition))}),i;if(a instanceof Dt.Terminal)r.push(a.terminalType);else throw Error(\"non exhaustive match\")}n++}return i.push({partialPath:r,suffixDef:(0,Ut.drop)(t,n)}),i}Mr.possiblePathsFrom=nG;function Aye(t,e,r,i){var n=\"EXIT_NONE_TERMINAL\",s=[n],o=\"EXIT_ALTERNATIVE\",a=!1,l=e.length,c=l-i-1,u=[],g=[];for(g.push({idx:-1,def:t,ruleStack:[],occurrenceStack:[]});!(0,Ut.isEmpty)(g);){var f=g.pop();if(f===o){a&&(0,Ut.last)(g).idx<=c&&g.pop();continue}var h=f.def,p=f.idx,m=f.ruleStack,y=f.occurrenceStack;if(!(0,Ut.isEmpty)(h)){var Q=h[0];if(Q===n){var S={idx:p,def:(0,Ut.drop)(h),ruleStack:(0,Ut.dropRight)(m),occurrenceStack:(0,Ut.dropRight)(y)};g.push(S)}else if(Q instanceof Dt.Terminal)if(p<l-1){var x=p+1,M=e[x];if(r(M,Q.terminalType)){var S={idx:x,def:(0,Ut.drop)(h),ruleStack:m,occurrenceStack:y};g.push(S)}}else if(p===l-1)u.push({nextTokenType:Q.terminalType,nextTokenOccurrence:Q.idx,ruleStack:m,occurrenceStack:y}),a=!0;else throw Error(\"non exhaustive match\");else if(Q instanceof Dt.NonTerminal){var Y=(0,Ut.cloneArr)(m);Y.push(Q.nonTerminalName);var U=(0,Ut.cloneArr)(y);U.push(Q.idx);var S={idx:p,def:Q.definition.concat(s,(0,Ut.drop)(h)),ruleStack:Y,occurrenceStack:U};g.push(S)}else if(Q instanceof Dt.Option){var J={idx:p,def:(0,Ut.drop)(h),ruleStack:m,occurrenceStack:y};g.push(J),g.push(o);var W={idx:p,def:Q.definition.concat((0,Ut.drop)(h)),ruleStack:m,occurrenceStack:y};g.push(W)}else if(Q instanceof Dt.RepetitionMandatory){var ee=new Dt.Repetition({definition:Q.definition,idx:Q.idx}),Z=Q.definition.concat([ee],(0,Ut.drop)(h)),S={idx:p,def:Z,ruleStack:m,occurrenceStack:y};g.push(S)}else if(Q instanceof Dt.RepetitionMandatoryWithSeparator){var A=new Dt.Terminal({terminalType:Q.separator}),ee=new Dt.Repetition({definition:[A].concat(Q.definition),idx:Q.idx}),Z=Q.definition.concat([ee],(0,Ut.drop)(h)),S={idx:p,def:Z,ruleStack:m,occurrenceStack:y};g.push(S)}else if(Q instanceof Dt.RepetitionWithSeparator){var J={idx:p,def:(0,Ut.drop)(h),ruleStack:m,occurrenceStack:y};g.push(J),g.push(o);var A=new Dt.Terminal({terminalType:Q.separator}),ne=new Dt.Repetition({definition:[A].concat(Q.definition),idx:Q.idx}),Z=Q.definition.concat([ne],(0,Ut.drop)(h)),W={idx:p,def:Z,ruleStack:m,occurrenceStack:y};g.push(W)}else if(Q instanceof Dt.Repetition){var J={idx:p,def:(0,Ut.drop)(h),ruleStack:m,occurrenceStack:y};g.push(J),g.push(o);var ne=new Dt.Repetition({definition:Q.definition,idx:Q.idx}),Z=Q.definition.concat([ne],(0,Ut.drop)(h)),W={idx:p,def:Z,ruleStack:m,occurrenceStack:y};g.push(W)}else if(Q instanceof Dt.Alternation)for(var le=Q.definition.length-1;le>=0;le--){var Ae=Q.definition[le],T={idx:p,def:Ae.definition.concat((0,Ut.drop)(h)),ruleStack:m,occurrenceStack:y};g.push(T),g.push(o)}else if(Q instanceof Dt.Alternative)g.push({idx:p,def:Q.definition.concat((0,Ut.drop)(h)),ruleStack:m,occurrenceStack:y});else if(Q instanceof Dt.Rule)g.push(aye(Q,p,m,y));else throw Error(\"non exhaustive match\")}}return u}Mr.nextPossibleTokensAfter=Aye;function aye(t,e,r,i){var n=(0,Ut.cloneArr)(r);n.push(t.name);var s=(0,Ut.cloneArr)(i);return s.push(1),{idx:e,def:t.definition,ruleStack:n,occurrenceStack:s}}});var Up=w(tr=>{\"use strict\";var sG=tr&&tr.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!=\"function\"&&r!==null)throw new TypeError(\"Class extends value \"+String(r)+\" is not a constructor or null\");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(tr,\"__esModule\",{value:!0});tr.areTokenCategoriesNotUsed=tr.isStrictPrefixOfPath=tr.containsPath=tr.getLookaheadPathsForOptionalProd=tr.getLookaheadPathsForOr=tr.lookAheadSequenceFromAlternatives=tr.buildSingleAlternativeLookaheadFunction=tr.buildAlternativesLookAheadFunc=tr.buildLookaheadFuncForOptionalProd=tr.buildLookaheadFuncForOr=tr.getProdType=tr.PROD_TYPE=void 0;var cr=Yt(),oG=Mp(),lye=zI(),ey=fg(),zA=bn(),cye=hg(),ci;(function(t){t[t.OPTION=0]=\"OPTION\",t[t.REPETITION=1]=\"REPETITION\",t[t.REPETITION_MANDATORY=2]=\"REPETITION_MANDATORY\",t[t.REPETITION_MANDATORY_WITH_SEPARATOR=3]=\"REPETITION_MANDATORY_WITH_SEPARATOR\",t[t.REPETITION_WITH_SEPARATOR=4]=\"REPETITION_WITH_SEPARATOR\",t[t.ALTERNATION=5]=\"ALTERNATION\"})(ci=tr.PROD_TYPE||(tr.PROD_TYPE={}));function uye(t){if(t instanceof zA.Option)return ci.OPTION;if(t instanceof zA.Repetition)return ci.REPETITION;if(t instanceof zA.RepetitionMandatory)return ci.REPETITION_MANDATORY;if(t instanceof zA.RepetitionMandatoryWithSeparator)return ci.REPETITION_MANDATORY_WITH_SEPARATOR;if(t instanceof zA.RepetitionWithSeparator)return ci.REPETITION_WITH_SEPARATOR;if(t instanceof zA.Alternation)return ci.ALTERNATION;throw Error(\"non exhaustive match\")}tr.getProdType=uye;function gye(t,e,r,i,n,s){var o=aG(t,e,r),a=Wv(o)?ey.tokenStructuredMatcherNoCategories:ey.tokenStructuredMatcher;return s(o,i,a,n)}tr.buildLookaheadFuncForOr=gye;function fye(t,e,r,i,n,s){var o=AG(t,e,n,r),a=Wv(o)?ey.tokenStructuredMatcherNoCategories:ey.tokenStructuredMatcher;return s(o[0],a,i)}tr.buildLookaheadFuncForOptionalProd=fye;function hye(t,e,r,i){var n=t.length,s=(0,cr.every)(t,function(l){return(0,cr.every)(l,function(c){return c.length===1})});if(e)return function(l){for(var c=(0,cr.map)(l,function(x){return x.GATE}),u=0;u<n;u++){var g=t[u],f=g.length,h=c[u];if(h!==void 0&&h.call(this)===!1)continue;e:for(var p=0;p<f;p++){for(var m=g[p],y=m.length,Q=0;Q<y;Q++){var S=this.LA(Q+1);if(r(S,m[Q])===!1)continue e}return u}}};if(s&&!i){var o=(0,cr.map)(t,function(l){return(0,cr.flatten)(l)}),a=(0,cr.reduce)(o,function(l,c,u){return(0,cr.forEach)(c,function(g){(0,cr.has)(l,g.tokenTypeIdx)||(l[g.tokenTypeIdx]=u),(0,cr.forEach)(g.categoryMatches,function(f){(0,cr.has)(l,f)||(l[f]=u)})}),l},[]);return function(){var l=this.LA(1);return a[l.tokenTypeIdx]}}else return function(){for(var l=0;l<n;l++){var c=t[l],u=c.length;e:for(var g=0;g<u;g++){for(var f=c[g],h=f.length,p=0;p<h;p++){var m=this.LA(p+1);if(r(m,f[p])===!1)continue e}return l}}}}tr.buildAlternativesLookAheadFunc=hye;function pye(t,e,r){var i=(0,cr.every)(t,function(c){return c.length===1}),n=t.length;if(i&&!r){var s=(0,cr.flatten)(t);if(s.length===1&&(0,cr.isEmpty)(s[0].categoryMatches)){var o=s[0],a=o.tokenTypeIdx;return function(){return this.LA(1).tokenTypeIdx===a}}else{var l=(0,cr.reduce)(s,function(c,u,g){return c[u.tokenTypeIdx]=!0,(0,cr.forEach)(u.categoryMatches,function(f){c[f]=!0}),c},[]);return function(){var c=this.LA(1);return l[c.tokenTypeIdx]===!0}}}else return function(){e:for(var c=0;c<n;c++){for(var u=t[c],g=u.length,f=0;f<g;f++){var h=this.LA(f+1);if(e(h,u[f])===!1)continue e}return!0}return!1}}tr.buildSingleAlternativeLookaheadFunction=pye;var dye=function(t){sG(e,t);function e(r,i,n){var s=t.call(this)||this;return s.topProd=r,s.targetOccurrence=i,s.targetProdType=n,s}return e.prototype.startWalking=function(){return this.walk(this.topProd),this.restDef},e.prototype.checkIsTarget=function(r,i,n,s){return r.idx===this.targetOccurrence&&this.targetProdType===i?(this.restDef=n.concat(s),!0):!1},e.prototype.walkOption=function(r,i,n){this.checkIsTarget(r,ci.OPTION,i,n)||t.prototype.walkOption.call(this,r,i,n)},e.prototype.walkAtLeastOne=function(r,i,n){this.checkIsTarget(r,ci.REPETITION_MANDATORY,i,n)||t.prototype.walkOption.call(this,r,i,n)},e.prototype.walkAtLeastOneSep=function(r,i,n){this.checkIsTarget(r,ci.REPETITION_MANDATORY_WITH_SEPARATOR,i,n)||t.prototype.walkOption.call(this,r,i,n)},e.prototype.walkMany=function(r,i,n){this.checkIsTarget(r,ci.REPETITION,i,n)||t.prototype.walkOption.call(this,r,i,n)},e.prototype.walkManySep=function(r,i,n){this.checkIsTarget(r,ci.REPETITION_WITH_SEPARATOR,i,n)||t.prototype.walkOption.call(this,r,i,n)},e}(lye.RestWalker),lG=function(t){sG(e,t);function e(r,i,n){var s=t.call(this)||this;return s.targetOccurrence=r,s.targetProdType=i,s.targetRef=n,s.result=[],s}return e.prototype.checkIsTarget=function(r,i){r.idx===this.targetOccurrence&&this.targetProdType===i&&(this.targetRef===void 0||r===this.targetRef)&&(this.result=r.definition)},e.prototype.visitOption=function(r){this.checkIsTarget(r,ci.OPTION)},e.prototype.visitRepetition=function(r){this.checkIsTarget(r,ci.REPETITION)},e.prototype.visitRepetitionMandatory=function(r){this.checkIsTarget(r,ci.REPETITION_MANDATORY)},e.prototype.visitRepetitionMandatoryWithSeparator=function(r){this.checkIsTarget(r,ci.REPETITION_MANDATORY_WITH_SEPARATOR)},e.prototype.visitRepetitionWithSeparator=function(r){this.checkIsTarget(r,ci.REPETITION_WITH_SEPARATOR)},e.prototype.visitAlternation=function(r){this.checkIsTarget(r,ci.ALTERNATION)},e}(cye.GAstVisitor);function cG(t){for(var e=new Array(t),r=0;r<t;r++)e[r]=[];return e}function zv(t){for(var e=[\"\"],r=0;r<t.length;r++){for(var i=t[r],n=[],s=0;s<e.length;s++){var o=e[s];n.push(o+\"_\"+i.tokenTypeIdx);for(var a=0;a<i.categoryMatches.length;a++){var l=\"_\"+i.categoryMatches[a];n.push(o+l)}}e=n}return e}function Cye(t,e,r){for(var i=0;i<t.length;i++)if(i!==r)for(var n=t[i],s=0;s<e.length;s++){var o=e[s];if(n[o]===!0)return!1}return!0}function _v(t,e){for(var r=(0,cr.map)(t,function(u){return(0,oG.possiblePathsFrom)([u],1)}),i=cG(r.length),n=(0,cr.map)(r,function(u){var g={};return(0,cr.forEach)(u,function(f){var h=zv(f.partialPath);(0,cr.forEach)(h,function(p){g[p]=!0})}),g}),s=r,o=1;o<=e;o++){var a=s;s=cG(a.length);for(var l=function(u){for(var g=a[u],f=0;f<g.length;f++){var h=g[f].partialPath,p=g[f].suffixDef,m=zv(h),y=Cye(n,m,u);if(y||(0,cr.isEmpty)(p)||h.length===e){var Q=i[u];if(uG(Q,h)===!1){Q.push(h);for(var S=0;S<m.length;S++){var x=m[S];n[u][x]=!0}}}else{var M=(0,oG.possiblePathsFrom)(p,o+1,h);s[u]=s[u].concat(M),(0,cr.forEach)(M,function(Y){var U=zv(Y.partialPath);(0,cr.forEach)(U,function(J){n[u][J]=!0})})}}},c=0;c<a.length;c++)l(c)}return i}tr.lookAheadSequenceFromAlternatives=_v;function aG(t,e,r,i){var n=new lG(t,ci.ALTERNATION,i);return e.accept(n),_v(n.result,r)}tr.getLookaheadPathsForOr=aG;function AG(t,e,r,i){var n=new lG(t,r);e.accept(n);var s=n.result,o=new dye(e,t,r),a=o.startWalking(),l=new zA.Alternative({definition:s}),c=new zA.Alternative({definition:a});return _v([l,c],i)}tr.getLookaheadPathsForOptionalProd=AG;function uG(t,e){e:for(var r=0;r<t.length;r++){var i=t[r];if(i.length===e.length){for(var n=0;n<i.length;n++){var s=e[n],o=i[n],a=s===o||o.categoryMatchesMap[s.tokenTypeIdx]!==void 0;if(a===!1)continue e}return!0}}return!1}tr.containsPath=uG;function mye(t,e){return t.length<e.length&&(0,cr.every)(t,function(r,i){var n=e[i];return r===n||n.categoryMatchesMap[r.tokenTypeIdx]})}tr.isStrictPrefixOfPath=mye;function Wv(t){return(0,cr.every)(t,function(e){return(0,cr.every)(e,function(r){return(0,cr.every)(r,function(i){return(0,cr.isEmpty)(i.categoryMatches)})})})}tr.areTokenCategoriesNotUsed=Wv});var tS=w(Xt=>{\"use strict\";var Vv=Xt&&Xt.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!=\"function\"&&r!==null)throw new TypeError(\"Class extends value \"+String(r)+\" is not a constructor or null\");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Xt,\"__esModule\",{value:!0});Xt.checkPrefixAlternativesAmbiguities=Xt.validateSomeNonEmptyLookaheadPath=Xt.validateTooManyAlts=Xt.RepetionCollector=Xt.validateAmbiguousAlternationAlternatives=Xt.validateEmptyOrAlternative=Xt.getFirstNoneTerminal=Xt.validateNoLeftRecursion=Xt.validateRuleIsOverridden=Xt.validateRuleDoesNotAlreadyExist=Xt.OccurrenceValidationCollector=Xt.identifyProductionForDuplicates=Xt.validateGrammar=void 0;var nr=Yt(),xr=Yt(),Uo=Xn(),Xv=Lp(),dg=Up(),Eye=Mp(),Ao=bn(),Zv=hg();function wye(t,e,r,i,n){var s=nr.map(t,function(h){return Iye(h,i)}),o=nr.map(t,function(h){return $v(h,h,i)}),a=[],l=[],c=[];(0,xr.every)(o,xr.isEmpty)&&(a=(0,xr.map)(t,function(h){return fG(h,i)}),l=(0,xr.map)(t,function(h){return hG(h,e,i)}),c=dG(t,e,i));var u=yye(t,r,i),g=(0,xr.map)(t,function(h){return pG(h,i)}),f=(0,xr.map)(t,function(h){return gG(h,t,n,i)});return nr.flatten(s.concat(c,o,a,l,u,g,f))}Xt.validateGrammar=wye;function Iye(t,e){var r=new EG;t.accept(r);var i=r.allProductions,n=nr.groupBy(i,CG),s=nr.pick(n,function(a){return a.length>1}),o=nr.map(nr.values(s),function(a){var l=nr.first(a),c=e.buildDuplicateFoundError(t,a),u=(0,Xv.getProductionDslName)(l),g={message:c,type:Uo.ParserDefinitionErrorType.DUPLICATE_PRODUCTIONS,ruleName:t.name,dslName:u,occurrence:l.idx},f=mG(l);return f&&(g.parameter=f),g});return o}function CG(t){return(0,Xv.getProductionDslName)(t)+\"_#_\"+t.idx+\"_#_\"+mG(t)}Xt.identifyProductionForDuplicates=CG;function mG(t){return t instanceof Ao.Terminal?t.terminalType.name:t instanceof Ao.NonTerminal?t.nonTerminalName:\"\"}var EG=function(t){Vv(e,t);function e(){var r=t!==null&&t.apply(this,arguments)||this;return r.allProductions=[],r}return e.prototype.visitNonTerminal=function(r){this.allProductions.push(r)},e.prototype.visitOption=function(r){this.allProductions.push(r)},e.prototype.visitRepetitionWithSeparator=function(r){this.allProductions.push(r)},e.prototype.visitRepetitionMandatory=function(r){this.allProductions.push(r)},e.prototype.visitRepetitionMandatoryWithSeparator=function(r){this.allProductions.push(r)},e.prototype.visitRepetition=function(r){this.allProductions.push(r)},e.prototype.visitAlternation=function(r){this.allProductions.push(r)},e.prototype.visitTerminal=function(r){this.allProductions.push(r)},e}(Zv.GAstVisitor);Xt.OccurrenceValidationCollector=EG;function gG(t,e,r,i){var n=[],s=(0,xr.reduce)(e,function(a,l){return l.name===t.name?a+1:a},0);if(s>1){var o=i.buildDuplicateRuleNameError({topLevelRule:t,grammarName:r});n.push({message:o,type:Uo.ParserDefinitionErrorType.DUPLICATE_RULE_NAME,ruleName:t.name})}return n}Xt.validateRuleDoesNotAlreadyExist=gG;function Bye(t,e,r){var i=[],n;return nr.contains(e,t)||(n=\"Invalid rule override, rule: ->\"+t+\"<- cannot be overridden in the grammar: ->\"+r+\"<-as it is not defined in any of the super grammars \",i.push({message:n,type:Uo.ParserDefinitionErrorType.INVALID_RULE_OVERRIDE,ruleName:t})),i}Xt.validateRuleIsOverridden=Bye;function $v(t,e,r,i){i===void 0&&(i=[]);var n=[],s=Kp(e.definition);if(nr.isEmpty(s))return[];var o=t.name,a=nr.contains(s,t);a&&n.push({message:r.buildLeftRecursionError({topLevelRule:t,leftRecursionPath:i}),type:Uo.ParserDefinitionErrorType.LEFT_RECURSION,ruleName:o});var l=nr.difference(s,i.concat([t])),c=nr.map(l,function(u){var g=nr.cloneArr(i);return g.push(u),$v(t,u,r,g)});return n.concat(nr.flatten(c))}Xt.validateNoLeftRecursion=$v;function Kp(t){var e=[];if(nr.isEmpty(t))return e;var r=nr.first(t);if(r instanceof Ao.NonTerminal)e.push(r.referencedRule);else if(r instanceof Ao.Alternative||r instanceof Ao.Option||r instanceof Ao.RepetitionMandatory||r instanceof Ao.RepetitionMandatoryWithSeparator||r instanceof Ao.RepetitionWithSeparator||r instanceof Ao.Repetition)e=e.concat(Kp(r.definition));else if(r instanceof Ao.Alternation)e=nr.flatten(nr.map(r.definition,function(o){return Kp(o.definition)}));else if(!(r instanceof Ao.Terminal))throw Error(\"non exhaustive match\");var i=(0,Xv.isOptionalProd)(r),n=t.length>1;if(i&&n){var s=nr.drop(t);return e.concat(Kp(s))}else return e}Xt.getFirstNoneTerminal=Kp;var eS=function(t){Vv(e,t);function e(){var r=t!==null&&t.apply(this,arguments)||this;return r.alternations=[],r}return e.prototype.visitAlternation=function(r){this.alternations.push(r)},e}(Zv.GAstVisitor);function fG(t,e){var r=new eS;t.accept(r);var i=r.alternations,n=nr.reduce(i,function(s,o){var a=nr.dropRight(o.definition),l=nr.map(a,function(c,u){var g=(0,Eye.nextPossibleTokensAfter)([c],[],null,1);return nr.isEmpty(g)?{message:e.buildEmptyAlternationError({topLevelRule:t,alternation:o,emptyChoiceIdx:u}),type:Uo.ParserDefinitionErrorType.NONE_LAST_EMPTY_ALT,ruleName:t.name,occurrence:o.idx,alternative:u+1}:null});return s.concat(nr.compact(l))},[]);return n}Xt.validateEmptyOrAlternative=fG;function hG(t,e,r){var i=new eS;t.accept(i);var n=i.alternations;n=(0,xr.reject)(n,function(o){return o.ignoreAmbiguities===!0});var s=nr.reduce(n,function(o,a){var l=a.idx,c=a.maxLookahead||e,u=(0,dg.getLookaheadPathsForOr)(l,t,c,a),g=bye(u,a,t,r),f=IG(u,a,t,r);return o.concat(g,f)},[]);return s}Xt.validateAmbiguousAlternationAlternatives=hG;var yG=function(t){Vv(e,t);function e(){var r=t!==null&&t.apply(this,arguments)||this;return r.allProductions=[],r}return e.prototype.visitRepetitionWithSeparator=function(r){this.allProductions.push(r)},e.prototype.visitRepetitionMandatory=function(r){this.allProductions.push(r)},e.prototype.visitRepetitionMandatoryWithSeparator=function(r){this.allProductions.push(r)},e.prototype.visitRepetition=function(r){this.allProductions.push(r)},e}(Zv.GAstVisitor);Xt.RepetionCollector=yG;function pG(t,e){var r=new eS;t.accept(r);var i=r.alternations,n=nr.reduce(i,function(s,o){return o.definition.length>255&&s.push({message:e.buildTooManyAlternativesError({topLevelRule:t,alternation:o}),type:Uo.ParserDefinitionErrorType.TOO_MANY_ALTS,ruleName:t.name,occurrence:o.idx}),s},[]);return n}Xt.validateTooManyAlts=pG;function dG(t,e,r){var i=[];return(0,xr.forEach)(t,function(n){var s=new yG;n.accept(s);var o=s.allProductions;(0,xr.forEach)(o,function(a){var l=(0,dg.getProdType)(a),c=a.maxLookahead||e,u=a.idx,g=(0,dg.getLookaheadPathsForOptionalProd)(u,n,l,c),f=g[0];if((0,xr.isEmpty)((0,xr.flatten)(f))){var h=r.buildEmptyRepetitionError({topLevelRule:n,repetition:a});i.push({message:h,type:Uo.ParserDefinitionErrorType.NO_NON_EMPTY_LOOKAHEAD,ruleName:n.name})}})}),i}Xt.validateSomeNonEmptyLookaheadPath=dG;function bye(t,e,r,i){var n=[],s=(0,xr.reduce)(t,function(a,l,c){return e.definition[c].ignoreAmbiguities===!0||(0,xr.forEach)(l,function(u){var g=[c];(0,xr.forEach)(t,function(f,h){c!==h&&(0,dg.containsPath)(f,u)&&e.definition[h].ignoreAmbiguities!==!0&&g.push(h)}),g.length>1&&!(0,dg.containsPath)(n,u)&&(n.push(u),a.push({alts:g,path:u}))}),a},[]),o=nr.map(s,function(a){var l=(0,xr.map)(a.alts,function(u){return u+1}),c=i.buildAlternationAmbiguityError({topLevelRule:r,alternation:e,ambiguityIndices:l,prefixPath:a.path});return{message:c,type:Uo.ParserDefinitionErrorType.AMBIGUOUS_ALTS,ruleName:r.name,occurrence:e.idx,alternatives:[a.alts]}});return o}function IG(t,e,r,i){var n=[],s=(0,xr.reduce)(t,function(o,a,l){var c=(0,xr.map)(a,function(u){return{idx:l,path:u}});return o.concat(c)},[]);return(0,xr.forEach)(s,function(o){var a=e.definition[o.idx];if(a.ignoreAmbiguities!==!0){var l=o.idx,c=o.path,u=(0,xr.findAll)(s,function(f){return e.definition[f.idx].ignoreAmbiguities!==!0&&f.idx<l&&(0,dg.isStrictPrefixOfPath)(f.path,c)}),g=(0,xr.map)(u,function(f){var h=[f.idx+1,l+1],p=e.idx===0?\"\":e.idx,m=i.buildAlternationPrefixAmbiguityError({topLevelRule:r,alternation:e,ambiguityIndices:h,prefixPath:f.path});return{message:m,type:Uo.ParserDefinitionErrorType.AMBIGUOUS_PREFIX_ALTS,ruleName:r.name,occurrence:p,alternatives:h}});n=n.concat(g)}}),n}Xt.checkPrefixAlternativesAmbiguities=IG;function yye(t,e,r){var i=[],n=(0,xr.map)(e,function(s){return s.name});return(0,xr.forEach)(t,function(s){var o=s.name;if((0,xr.contains)(n,o)){var a=r.buildNamespaceConflictError(s);i.push({message:a,type:Uo.ParserDefinitionErrorType.CONFLICT_TOKENS_RULES_NAMESPACE,ruleName:o})}}),i}});var BG=w(Cg=>{\"use strict\";Object.defineProperty(Cg,\"__esModule\",{value:!0});Cg.validateGrammar=Cg.resolveGrammar=void 0;var rS=Yt(),Qye=tG(),vye=tS(),wG=Tp();function Sye(t){t=(0,rS.defaults)(t,{errMsgProvider:wG.defaultGrammarResolverErrorProvider});var e={};return(0,rS.forEach)(t.rules,function(r){e[r.name]=r}),(0,Qye.resolveGrammar)(e,t.errMsgProvider)}Cg.resolveGrammar=Sye;function kye(t){return t=(0,rS.defaults)(t,{errMsgProvider:wG.defaultGrammarValidatorErrorProvider}),(0,vye.validateGrammar)(t.rules,t.maxLookahead,t.tokenTypes,t.errMsgProvider,t.grammarName)}Cg.validateGrammar=kye});var mg=w(vn=>{\"use strict\";var Hp=vn&&vn.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!=\"function\"&&r!==null)throw new TypeError(\"Class extends value \"+String(r)+\" is not a constructor or null\");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(vn,\"__esModule\",{value:!0});vn.EarlyExitException=vn.NotAllInputParsedException=vn.NoViableAltException=vn.MismatchedTokenException=vn.isRecognitionException=void 0;var xye=Yt(),bG=\"MismatchedTokenException\",QG=\"NoViableAltException\",vG=\"EarlyExitException\",SG=\"NotAllInputParsedException\",kG=[bG,QG,vG,SG];Object.freeze(kG);function Pye(t){return(0,xye.contains)(kG,t.name)}vn.isRecognitionException=Pye;var ty=function(t){Hp(e,t);function e(r,i){var n=this.constructor,s=t.call(this,r)||this;return s.token=i,s.resyncedTokens=[],Object.setPrototypeOf(s,n.prototype),Error.captureStackTrace&&Error.captureStackTrace(s,s.constructor),s}return e}(Error),Dye=function(t){Hp(e,t);function e(r,i,n){var s=t.call(this,r,i)||this;return s.previousToken=n,s.name=bG,s}return e}(ty);vn.MismatchedTokenException=Dye;var Rye=function(t){Hp(e,t);function e(r,i,n){var s=t.call(this,r,i)||this;return s.previousToken=n,s.name=QG,s}return e}(ty);vn.NoViableAltException=Rye;var Fye=function(t){Hp(e,t);function e(r,i){var n=t.call(this,r,i)||this;return n.name=SG,n}return e}(ty);vn.NotAllInputParsedException=Fye;var Nye=function(t){Hp(e,t);function e(r,i,n){var s=t.call(this,r,i)||this;return s.previousToken=n,s.name=vG,s}return e}(ty);vn.EarlyExitException=Nye});var nS=w(Yi=>{\"use strict\";Object.defineProperty(Yi,\"__esModule\",{value:!0});Yi.attemptInRepetitionRecovery=Yi.Recoverable=Yi.InRuleRecoveryException=Yi.IN_RULE_RECOVERY_EXCEPTION=Yi.EOF_FOLLOW_KEY=void 0;var ry=JA(),vs=Yt(),Lye=mg(),Tye=qv(),Oye=Xn();Yi.EOF_FOLLOW_KEY={};Yi.IN_RULE_RECOVERY_EXCEPTION=\"InRuleRecoveryException\";function iS(t){this.name=Yi.IN_RULE_RECOVERY_EXCEPTION,this.message=t}Yi.InRuleRecoveryException=iS;iS.prototype=Error.prototype;var Mye=function(){function t(){}return t.prototype.initRecoverable=function(e){this.firstAfterRepMap={},this.resyncFollows={},this.recoveryEnabled=(0,vs.has)(e,\"recoveryEnabled\")?e.recoveryEnabled:Oye.DEFAULT_PARSER_CONFIG.recoveryEnabled,this.recoveryEnabled&&(this.attemptInRepetitionRecovery=xG)},t.prototype.getTokenToInsert=function(e){var r=(0,ry.createTokenInstance)(e,\"\",NaN,NaN,NaN,NaN,NaN,NaN);return r.isInsertedInRecovery=!0,r},t.prototype.canTokenTypeBeInsertedInRecovery=function(e){return!0},t.prototype.tryInRepetitionRecovery=function(e,r,i,n){for(var s=this,o=this.findReSyncTokenType(),a=this.exportLexerState(),l=[],c=!1,u=this.LA(1),g=this.LA(1),f=function(){var h=s.LA(0),p=s.errorMessageProvider.buildMismatchTokenMessage({expected:n,actual:u,previous:h,ruleName:s.getCurrRuleFullName()}),m=new Lye.MismatchedTokenException(p,u,s.LA(0));m.resyncedTokens=(0,vs.dropRight)(l),s.SAVE_ERROR(m)};!c;)if(this.tokenMatcher(g,n)){f();return}else if(i.call(this)){f(),e.apply(this,r);return}else this.tokenMatcher(g,o)?c=!0:(g=this.SKIP_TOKEN(),this.addToResyncTokens(g,l));this.importLexerState(a)},t.prototype.shouldInRepetitionRecoveryBeTried=function(e,r,i){return!(i===!1||e===void 0||r===void 0||this.tokenMatcher(this.LA(1),e)||this.isBackTracking()||this.canPerformInRuleRecovery(e,this.getFollowsForInRuleRecovery(e,r)))},t.prototype.getFollowsForInRuleRecovery=function(e,r){var i=this.getCurrentGrammarPath(e,r),n=this.getNextPossibleTokenTypes(i);return n},t.prototype.tryInRuleRecovery=function(e,r){if(this.canRecoverWithSingleTokenInsertion(e,r)){var i=this.getTokenToInsert(e);return i}if(this.canRecoverWithSingleTokenDeletion(e)){var n=this.SKIP_TOKEN();return this.consumeToken(),n}throw new iS(\"sad sad panda\")},t.prototype.canPerformInRuleRecovery=function(e,r){return this.canRecoverWithSingleTokenInsertion(e,r)||this.canRecoverWithSingleTokenDeletion(e)},t.prototype.canRecoverWithSingleTokenInsertion=function(e,r){var i=this;if(!this.canTokenTypeBeInsertedInRecovery(e)||(0,vs.isEmpty)(r))return!1;var n=this.LA(1),s=(0,vs.find)(r,function(o){return i.tokenMatcher(n,o)})!==void 0;return s},t.prototype.canRecoverWithSingleTokenDeletion=function(e){var r=this.tokenMatcher(this.LA(2),e);return r},t.prototype.isInCurrentRuleReSyncSet=function(e){var r=this.getCurrFollowKey(),i=this.getFollowSetFromFollowKey(r);return(0,vs.contains)(i,e)},t.prototype.findReSyncTokenType=function(){for(var e=this.flattenFollowSet(),r=this.LA(1),i=2;;){var n=r.tokenType;if((0,vs.contains)(e,n))return n;r=this.LA(i),i++}},t.prototype.getCurrFollowKey=function(){if(this.RULE_STACK.length===1)return Yi.EOF_FOLLOW_KEY;var e=this.getLastExplicitRuleShortName(),r=this.getLastExplicitRuleOccurrenceIndex(),i=this.getPreviousExplicitRuleShortName();return{ruleName:this.shortRuleNameToFullName(e),idxInCallingRule:r,inRule:this.shortRuleNameToFullName(i)}},t.prototype.buildFullFollowKeyStack=function(){var e=this,r=this.RULE_STACK,i=this.RULE_OCCURRENCE_STACK;return(0,vs.map)(r,function(n,s){return s===0?Yi.EOF_FOLLOW_KEY:{ruleName:e.shortRuleNameToFullName(n),idxInCallingRule:i[s],inRule:e.shortRuleNameToFullName(r[s-1])}})},t.prototype.flattenFollowSet=function(){var e=this,r=(0,vs.map)(this.buildFullFollowKeyStack(),function(i){return e.getFollowSetFromFollowKey(i)});return(0,vs.flatten)(r)},t.prototype.getFollowSetFromFollowKey=function(e){if(e===Yi.EOF_FOLLOW_KEY)return[ry.EOF];var r=e.ruleName+e.idxInCallingRule+Tye.IN+e.inRule;return this.resyncFollows[r]},t.prototype.addToResyncTokens=function(e,r){return this.tokenMatcher(e,ry.EOF)||r.push(e),r},t.prototype.reSyncTo=function(e){for(var r=[],i=this.LA(1);this.tokenMatcher(i,e)===!1;)i=this.SKIP_TOKEN(),this.addToResyncTokens(i,r);return(0,vs.dropRight)(r)},t.prototype.attemptInRepetitionRecovery=function(e,r,i,n,s,o,a){},t.prototype.getCurrentGrammarPath=function(e,r){var i=this.getHumanReadableRuleStack(),n=(0,vs.cloneArr)(this.RULE_OCCURRENCE_STACK),s={ruleStack:i,occurrenceStack:n,lastTok:e,lastTokOccurrence:r};return s},t.prototype.getHumanReadableRuleStack=function(){var e=this;return(0,vs.map)(this.RULE_STACK,function(r){return e.shortRuleNameToFullName(r)})},t}();Yi.Recoverable=Mye;function xG(t,e,r,i,n,s,o){var a=this.getKeyForAutomaticLookahead(i,n),l=this.firstAfterRepMap[a];if(l===void 0){var c=this.getCurrRuleFullName(),u=this.getGAstProductions()[c],g=new s(u,n);l=g.startWalking(),this.firstAfterRepMap[a]=l}var f=l.token,h=l.occurrence,p=l.isEndOfRule;this.RULE_STACK.length===1&&p&&f===void 0&&(f=ry.EOF,h=1),this.shouldInRepetitionRecoveryBeTried(f,h,o)&&this.tryInRepetitionRecovery(t,e,r,f)}Yi.attemptInRepetitionRecovery=xG});var iy=w(Jt=>{\"use strict\";Object.defineProperty(Jt,\"__esModule\",{value:!0});Jt.getKeyForAutomaticLookahead=Jt.AT_LEAST_ONE_SEP_IDX=Jt.MANY_SEP_IDX=Jt.AT_LEAST_ONE_IDX=Jt.MANY_IDX=Jt.OPTION_IDX=Jt.OR_IDX=Jt.BITS_FOR_ALT_IDX=Jt.BITS_FOR_RULE_IDX=Jt.BITS_FOR_OCCURRENCE_IDX=Jt.BITS_FOR_METHOD_TYPE=void 0;Jt.BITS_FOR_METHOD_TYPE=4;Jt.BITS_FOR_OCCURRENCE_IDX=8;Jt.BITS_FOR_RULE_IDX=12;Jt.BITS_FOR_ALT_IDX=8;Jt.OR_IDX=1<<Jt.BITS_FOR_OCCURRENCE_IDX;Jt.OPTION_IDX=2<<Jt.BITS_FOR_OCCURRENCE_IDX;Jt.MANY_IDX=3<<Jt.BITS_FOR_OCCURRENCE_IDX;Jt.AT_LEAST_ONE_IDX=4<<Jt.BITS_FOR_OCCURRENCE_IDX;Jt.MANY_SEP_IDX=5<<Jt.BITS_FOR_OCCURRENCE_IDX;Jt.AT_LEAST_ONE_SEP_IDX=6<<Jt.BITS_FOR_OCCURRENCE_IDX;function Uye(t,e,r){return r|e|t}Jt.getKeyForAutomaticLookahead=Uye;var Rtt=32-Jt.BITS_FOR_ALT_IDX});var DG=w(ny=>{\"use strict\";Object.defineProperty(ny,\"__esModule\",{value:!0});ny.LooksAhead=void 0;var Ha=Up(),lo=Yt(),PG=Xn(),ja=iy(),Qc=Lp(),Kye=function(){function t(){}return t.prototype.initLooksAhead=function(e){this.dynamicTokensEnabled=(0,lo.has)(e,\"dynamicTokensEnabled\")?e.dynamicTokensEnabled:PG.DEFAULT_PARSER_CONFIG.dynamicTokensEnabled,this.maxLookahead=(0,lo.has)(e,\"maxLookahead\")?e.maxLookahead:PG.DEFAULT_PARSER_CONFIG.maxLookahead,this.lookAheadFuncsCache=(0,lo.isES2015MapSupported)()?new Map:[],(0,lo.isES2015MapSupported)()?(this.getLaFuncFromCache=this.getLaFuncFromMap,this.setLaFuncCache=this.setLaFuncCacheUsingMap):(this.getLaFuncFromCache=this.getLaFuncFromObj,this.setLaFuncCache=this.setLaFuncUsingObj)},t.prototype.preComputeLookaheadFunctions=function(e){var r=this;(0,lo.forEach)(e,function(i){r.TRACE_INIT(i.name+\" Rule Lookahead\",function(){var n=(0,Qc.collectMethods)(i),s=n.alternation,o=n.repetition,a=n.option,l=n.repetitionMandatory,c=n.repetitionMandatoryWithSeparator,u=n.repetitionWithSeparator;(0,lo.forEach)(s,function(g){var f=g.idx===0?\"\":g.idx;r.TRACE_INIT(\"\"+(0,Qc.getProductionDslName)(g)+f,function(){var h=(0,Ha.buildLookaheadFuncForOr)(g.idx,i,g.maxLookahead||r.maxLookahead,g.hasPredicates,r.dynamicTokensEnabled,r.lookAheadBuilderForAlternatives),p=(0,ja.getKeyForAutomaticLookahead)(r.fullRuleNameToShort[i.name],ja.OR_IDX,g.idx);r.setLaFuncCache(p,h)})}),(0,lo.forEach)(o,function(g){r.computeLookaheadFunc(i,g.idx,ja.MANY_IDX,Ha.PROD_TYPE.REPETITION,g.maxLookahead,(0,Qc.getProductionDslName)(g))}),(0,lo.forEach)(a,function(g){r.computeLookaheadFunc(i,g.idx,ja.OPTION_IDX,Ha.PROD_TYPE.OPTION,g.maxLookahead,(0,Qc.getProductionDslName)(g))}),(0,lo.forEach)(l,function(g){r.computeLookaheadFunc(i,g.idx,ja.AT_LEAST_ONE_IDX,Ha.PROD_TYPE.REPETITION_MANDATORY,g.maxLookahead,(0,Qc.getProductionDslName)(g))}),(0,lo.forEach)(c,function(g){r.computeLookaheadFunc(i,g.idx,ja.AT_LEAST_ONE_SEP_IDX,Ha.PROD_TYPE.REPETITION_MANDATORY_WITH_SEPARATOR,g.maxLookahead,(0,Qc.getProductionDslName)(g))}),(0,lo.forEach)(u,function(g){r.computeLookaheadFunc(i,g.idx,ja.MANY_SEP_IDX,Ha.PROD_TYPE.REPETITION_WITH_SEPARATOR,g.maxLookahead,(0,Qc.getProductionDslName)(g))})})})},t.prototype.computeLookaheadFunc=function(e,r,i,n,s,o){var a=this;this.TRACE_INIT(\"\"+o+(r===0?\"\":r),function(){var l=(0,Ha.buildLookaheadFuncForOptionalProd)(r,e,s||a.maxLookahead,a.dynamicTokensEnabled,n,a.lookAheadBuilderForOptional),c=(0,ja.getKeyForAutomaticLookahead)(a.fullRuleNameToShort[e.name],i,r);a.setLaFuncCache(c,l)})},t.prototype.lookAheadBuilderForOptional=function(e,r,i){return(0,Ha.buildSingleAlternativeLookaheadFunction)(e,r,i)},t.prototype.lookAheadBuilderForAlternatives=function(e,r,i,n){return(0,Ha.buildAlternativesLookAheadFunc)(e,r,i,n)},t.prototype.getKeyForAutomaticLookahead=function(e,r){var i=this.getLastExplicitRuleShortName();return(0,ja.getKeyForAutomaticLookahead)(i,e,r)},t.prototype.getLaFuncFromCache=function(e){},t.prototype.getLaFuncFromMap=function(e){return this.lookAheadFuncsCache.get(e)},t.prototype.getLaFuncFromObj=function(e){return this.lookAheadFuncsCache[e]},t.prototype.setLaFuncCache=function(e,r){},t.prototype.setLaFuncCacheUsingMap=function(e,r){this.lookAheadFuncsCache.set(e,r)},t.prototype.setLaFuncUsingObj=function(e,r){this.lookAheadFuncsCache[e]=r},t}();ny.LooksAhead=Kye});var RG=w(Ko=>{\"use strict\";Object.defineProperty(Ko,\"__esModule\",{value:!0});Ko.addNoneTerminalToCst=Ko.addTerminalToCst=Ko.setNodeLocationFull=Ko.setNodeLocationOnlyOffset=void 0;function Hye(t,e){isNaN(t.startOffset)===!0?(t.startOffset=e.startOffset,t.endOffset=e.endOffset):t.endOffset<e.endOffset&&(t.endOffset=e.endOffset)}Ko.setNodeLocationOnlyOffset=Hye;function jye(t,e){isNaN(t.startOffset)===!0?(t.startOffset=e.startOffset,t.startColumn=e.startColumn,t.startLine=e.startLine,t.endOffset=e.endOffset,t.endColumn=e.endColumn,t.endLine=e.endLine):t.endOffset<e.endOffset&&(t.endOffset=e.endOffset,t.endColumn=e.endColumn,t.endLine=e.endLine)}Ko.setNodeLocationFull=jye;function Gye(t,e,r){t.children[r]===void 0?t.children[r]=[e]:t.children[r].push(e)}Ko.addTerminalToCst=Gye;function Yye(t,e,r){t.children[e]===void 0?t.children[e]=[r]:t.children[e].push(r)}Ko.addNoneTerminalToCst=Yye});var sS=w(_A=>{\"use strict\";Object.defineProperty(_A,\"__esModule\",{value:!0});_A.defineNameProp=_A.functionName=_A.classNameFromInstance=void 0;var qye=Yt();function Jye(t){return FG(t.constructor)}_A.classNameFromInstance=Jye;var NG=\"name\";function FG(t){var e=t.name;return e||\"anonymous\"}_A.functionName=FG;function Wye(t,e){var r=Object.getOwnPropertyDescriptor(t,NG);return(0,qye.isUndefined)(r)||r.configurable?(Object.defineProperty(t,NG,{enumerable:!1,configurable:!0,writable:!1,value:e}),!0):!1}_A.defineNameProp=Wye});var UG=w(Pi=>{\"use strict\";Object.defineProperty(Pi,\"__esModule\",{value:!0});Pi.validateRedundantMethods=Pi.validateMissingCstMethods=Pi.validateVisitor=Pi.CstVisitorDefinitionError=Pi.createBaseVisitorConstructorWithDefaults=Pi.createBaseSemanticVisitorConstructor=Pi.defaultVisit=void 0;var Ss=Yt(),jp=sS();function LG(t,e){for(var r=(0,Ss.keys)(t),i=r.length,n=0;n<i;n++)for(var s=r[n],o=t[s],a=o.length,l=0;l<a;l++){var c=o[l];c.tokenTypeIdx===void 0&&this[c.name](c.children,e)}}Pi.defaultVisit=LG;function zye(t,e){var r=function(){};(0,jp.defineNameProp)(r,t+\"BaseSemantics\");var i={visit:function(n,s){if((0,Ss.isArray)(n)&&(n=n[0]),!(0,Ss.isUndefined)(n))return this[n.name](n.children,s)},validateVisitor:function(){var n=TG(this,e);if(!(0,Ss.isEmpty)(n)){var s=(0,Ss.map)(n,function(o){return o.msg});throw Error(\"Errors Detected in CST Visitor <\"+(0,jp.functionName)(this.constructor)+`>:\n\t`+(\"\"+s.join(`\n\n`).replace(/\\n/g,`\n\t`)))}}};return r.prototype=i,r.prototype.constructor=r,r._RULE_NAMES=e,r}Pi.createBaseSemanticVisitorConstructor=zye;function _ye(t,e,r){var i=function(){};(0,jp.defineNameProp)(i,t+\"BaseSemanticsWithDefaults\");var n=Object.create(r.prototype);return(0,Ss.forEach)(e,function(s){n[s]=LG}),i.prototype=n,i.prototype.constructor=i,i}Pi.createBaseVisitorConstructorWithDefaults=_ye;var oS;(function(t){t[t.REDUNDANT_METHOD=0]=\"REDUNDANT_METHOD\",t[t.MISSING_METHOD=1]=\"MISSING_METHOD\"})(oS=Pi.CstVisitorDefinitionError||(Pi.CstVisitorDefinitionError={}));function TG(t,e){var r=OG(t,e),i=MG(t,e);return r.concat(i)}Pi.validateVisitor=TG;function OG(t,e){var r=(0,Ss.map)(e,function(i){if(!(0,Ss.isFunction)(t[i]))return{msg:\"Missing visitor method: <\"+i+\"> on \"+(0,jp.functionName)(t.constructor)+\" CST Visitor.\",type:oS.MISSING_METHOD,methodName:i}});return(0,Ss.compact)(r)}Pi.validateMissingCstMethods=OG;var Vye=[\"constructor\",\"visit\",\"validateVisitor\"];function MG(t,e){var r=[];for(var i in t)(0,Ss.isFunction)(t[i])&&!(0,Ss.contains)(Vye,i)&&!(0,Ss.contains)(e,i)&&r.push({msg:\"Redundant visitor method: <\"+i+\"> on \"+(0,jp.functionName)(t.constructor)+` CST Visitor\nThere is no Grammar Rule corresponding to this method's name.\n`,type:oS.REDUNDANT_METHOD,methodName:i});return r}Pi.validateRedundantMethods=MG});var HG=w(sy=>{\"use strict\";Object.defineProperty(sy,\"__esModule\",{value:!0});sy.TreeBuilder=void 0;var Eg=RG(),ii=Yt(),KG=UG(),Xye=Xn(),Zye=function(){function t(){}return t.prototype.initTreeBuilder=function(e){if(this.CST_STACK=[],this.outputCst=e.outputCst,this.nodeLocationTracking=(0,ii.has)(e,\"nodeLocationTracking\")?e.nodeLocationTracking:Xye.DEFAULT_PARSER_CONFIG.nodeLocationTracking,!this.outputCst)this.cstInvocationStateUpdate=ii.NOOP,this.cstFinallyStateUpdate=ii.NOOP,this.cstPostTerminal=ii.NOOP,this.cstPostNonTerminal=ii.NOOP,this.cstPostRule=ii.NOOP;else if(/full/i.test(this.nodeLocationTracking))this.recoveryEnabled?(this.setNodeLocationFromToken=Eg.setNodeLocationFull,this.setNodeLocationFromNode=Eg.setNodeLocationFull,this.cstPostRule=ii.NOOP,this.setInitialNodeLocation=this.setInitialNodeLocationFullRecovery):(this.setNodeLocationFromToken=ii.NOOP,this.setNodeLocationFromNode=ii.NOOP,this.cstPostRule=this.cstPostRuleFull,this.setInitialNodeLocation=this.setInitialNodeLocationFullRegular);else if(/onlyOffset/i.test(this.nodeLocationTracking))this.recoveryEnabled?(this.setNodeLocationFromToken=Eg.setNodeLocationOnlyOffset,this.setNodeLocationFromNode=Eg.setNodeLocationOnlyOffset,this.cstPostRule=ii.NOOP,this.setInitialNodeLocation=this.setInitialNodeLocationOnlyOffsetRecovery):(this.setNodeLocationFromToken=ii.NOOP,this.setNodeLocationFromNode=ii.NOOP,this.cstPostRule=this.cstPostRuleOnlyOffset,this.setInitialNodeLocation=this.setInitialNodeLocationOnlyOffsetRegular);else if(/none/i.test(this.nodeLocationTracking))this.setNodeLocationFromToken=ii.NOOP,this.setNodeLocationFromNode=ii.NOOP,this.cstPostRule=ii.NOOP,this.setInitialNodeLocation=ii.NOOP;else throw Error('Invalid <nodeLocationTracking> config option: \"'+e.nodeLocationTracking+'\"')},t.prototype.setInitialNodeLocationOnlyOffsetRecovery=function(e){e.location={startOffset:NaN,endOffset:NaN}},t.prototype.setInitialNodeLocationOnlyOffsetRegular=function(e){e.location={startOffset:this.LA(1).startOffset,endOffset:NaN}},t.prototype.setInitialNodeLocationFullRecovery=function(e){e.location={startOffset:NaN,startLine:NaN,startColumn:NaN,endOffset:NaN,endLine:NaN,endColumn:NaN}},t.prototype.setInitialNodeLocationFullRegular=function(e){var r=this.LA(1);e.location={startOffset:r.startOffset,startLine:r.startLine,startColumn:r.startColumn,endOffset:NaN,endLine:NaN,endColumn:NaN}},t.prototype.cstInvocationStateUpdate=function(e,r){var i={name:e,children:{}};this.setInitialNodeLocation(i),this.CST_STACK.push(i)},t.prototype.cstFinallyStateUpdate=function(){this.CST_STACK.pop()},t.prototype.cstPostRuleFull=function(e){var r=this.LA(0),i=e.location;i.startOffset<=r.startOffset?(i.endOffset=r.endOffset,i.endLine=r.endLine,i.endColumn=r.endColumn):(i.startOffset=NaN,i.startLine=NaN,i.startColumn=NaN)},t.prototype.cstPostRuleOnlyOffset=function(e){var r=this.LA(0),i=e.location;i.startOffset<=r.startOffset?i.endOffset=r.endOffset:i.startOffset=NaN},t.prototype.cstPostTerminal=function(e,r){var i=this.CST_STACK[this.CST_STACK.length-1];(0,Eg.addTerminalToCst)(i,r,e),this.setNodeLocationFromToken(i.location,r)},t.prototype.cstPostNonTerminal=function(e,r){var i=this.CST_STACK[this.CST_STACK.length-1];(0,Eg.addNoneTerminalToCst)(i,r,e),this.setNodeLocationFromNode(i.location,e.location)},t.prototype.getBaseCstVisitorConstructor=function(){if((0,ii.isUndefined)(this.baseCstVisitorConstructor)){var e=(0,KG.createBaseSemanticVisitorConstructor)(this.className,(0,ii.keys)(this.gastProductionsCache));return this.baseCstVisitorConstructor=e,e}return this.baseCstVisitorConstructor},t.prototype.getBaseCstVisitorConstructorWithDefaults=function(){if((0,ii.isUndefined)(this.baseCstVisitorWithDefaultsConstructor)){var e=(0,KG.createBaseVisitorConstructorWithDefaults)(this.className,(0,ii.keys)(this.gastProductionsCache),this.getBaseCstVisitorConstructor());return this.baseCstVisitorWithDefaultsConstructor=e,e}return this.baseCstVisitorWithDefaultsConstructor},t.prototype.getLastExplicitRuleShortName=function(){var e=this.RULE_STACK;return e[e.length-1]},t.prototype.getPreviousExplicitRuleShortName=function(){var e=this.RULE_STACK;return e[e.length-2]},t.prototype.getLastExplicitRuleOccurrenceIndex=function(){var e=this.RULE_OCCURRENCE_STACK;return e[e.length-1]},t}();sy.TreeBuilder=Zye});var GG=w(oy=>{\"use strict\";Object.defineProperty(oy,\"__esModule\",{value:!0});oy.LexerAdapter=void 0;var jG=Xn(),$ye=function(){function t(){}return t.prototype.initLexerAdapter=function(){this.tokVector=[],this.tokVectorLength=0,this.currIdx=-1},Object.defineProperty(t.prototype,\"input\",{get:function(){return this.tokVector},set:function(e){if(this.selfAnalysisDone!==!0)throw Error(\"Missing <performSelfAnalysis> invocation at the end of the Parser's constructor.\");this.reset(),this.tokVector=e,this.tokVectorLength=e.length},enumerable:!1,configurable:!0}),t.prototype.SKIP_TOKEN=function(){return this.currIdx<=this.tokVector.length-2?(this.consumeToken(),this.LA(1)):jG.END_OF_FILE},t.prototype.LA=function(e){var r=this.currIdx+e;return r<0||this.tokVectorLength<=r?jG.END_OF_FILE:this.tokVector[r]},t.prototype.consumeToken=function(){this.currIdx++},t.prototype.exportLexerState=function(){return this.currIdx},t.prototype.importLexerState=function(e){this.currIdx=e},t.prototype.resetLexerState=function(){this.currIdx=-1},t.prototype.moveToTerminatedState=function(){this.currIdx=this.tokVector.length-1},t.prototype.getLexerPosition=function(){return this.exportLexerState()},t}();oy.LexerAdapter=$ye});var qG=w(ay=>{\"use strict\";Object.defineProperty(ay,\"__esModule\",{value:!0});ay.RecognizerApi=void 0;var YG=Yt(),ewe=mg(),aS=Xn(),twe=Tp(),rwe=tS(),iwe=bn(),nwe=function(){function t(){}return t.prototype.ACTION=function(e){return e.call(this)},t.prototype.consume=function(e,r,i){return this.consumeInternal(r,e,i)},t.prototype.subrule=function(e,r,i){return this.subruleInternal(r,e,i)},t.prototype.option=function(e,r){return this.optionInternal(r,e)},t.prototype.or=function(e,r){return this.orInternal(r,e)},t.prototype.many=function(e,r){return this.manyInternal(e,r)},t.prototype.atLeastOne=function(e,r){return this.atLeastOneInternal(e,r)},t.prototype.CONSUME=function(e,r){return this.consumeInternal(e,0,r)},t.prototype.CONSUME1=function(e,r){return this.consumeInternal(e,1,r)},t.prototype.CONSUME2=function(e,r){return this.consumeInternal(e,2,r)},t.prototype.CONSUME3=function(e,r){return this.consumeInternal(e,3,r)},t.prototype.CONSUME4=function(e,r){return this.consumeInternal(e,4,r)},t.prototype.CONSUME5=function(e,r){return this.consumeInternal(e,5,r)},t.prototype.CONSUME6=function(e,r){return this.consumeInternal(e,6,r)},t.prototype.CONSUME7=function(e,r){return this.consumeInternal(e,7,r)},t.prototype.CONSUME8=function(e,r){return this.consumeInternal(e,8,r)},t.prototype.CONSUME9=function(e,r){return this.consumeInternal(e,9,r)},t.prototype.SUBRULE=function(e,r){return this.subruleInternal(e,0,r)},t.prototype.SUBRULE1=function(e,r){return this.subruleInternal(e,1,r)},t.prototype.SUBRULE2=function(e,r){return this.subruleInternal(e,2,r)},t.prototype.SUBRULE3=function(e,r){return this.subruleInternal(e,3,r)},t.prototype.SUBRULE4=function(e,r){return this.subruleInternal(e,4,r)},t.prototype.SUBRULE5=function(e,r){return this.subruleInternal(e,5,r)},t.prototype.SUBRULE6=function(e,r){return this.subruleInternal(e,6,r)},t.prototype.SUBRULE7=function(e,r){return this.subruleInternal(e,7,r)},t.prototype.SUBRULE8=function(e,r){return this.subruleInternal(e,8,r)},t.prototype.SUBRULE9=function(e,r){return this.subruleInternal(e,9,r)},t.prototype.OPTION=function(e){return this.optionInternal(e,0)},t.prototype.OPTION1=function(e){return this.optionInternal(e,1)},t.prototype.OPTION2=function(e){return this.optionInternal(e,2)},t.prototype.OPTION3=function(e){return this.optionInternal(e,3)},t.prototype.OPTION4=function(e){return this.optionInternal(e,4)},t.prototype.OPTION5=function(e){return this.optionInternal(e,5)},t.prototype.OPTION6=function(e){return this.optionInternal(e,6)},t.prototype.OPTION7=function(e){return this.optionInternal(e,7)},t.prototype.OPTION8=function(e){return this.optionInternal(e,8)},t.prototype.OPTION9=function(e){return this.optionInternal(e,9)},t.prototype.OR=function(e){return this.orInternal(e,0)},t.prototype.OR1=function(e){return this.orInternal(e,1)},t.prototype.OR2=function(e){return this.orInternal(e,2)},t.prototype.OR3=function(e){return this.orInternal(e,3)},t.prototype.OR4=function(e){return this.orInternal(e,4)},t.prototype.OR5=function(e){return this.orInternal(e,5)},t.prototype.OR6=function(e){return this.orInternal(e,6)},t.prototype.OR7=function(e){return this.orInternal(e,7)},t.prototype.OR8=function(e){return this.orInternal(e,8)},t.prototype.OR9=function(e){return this.orInternal(e,9)},t.prototype.MANY=function(e){this.manyInternal(0,e)},t.prototype.MANY1=function(e){this.manyInternal(1,e)},t.prototype.MANY2=function(e){this.manyInternal(2,e)},t.prototype.MANY3=function(e){this.manyInternal(3,e)},t.prototype.MANY4=function(e){this.manyInternal(4,e)},t.prototype.MANY5=function(e){this.manyInternal(5,e)},t.prototype.MANY6=function(e){this.manyInternal(6,e)},t.prototype.MANY7=function(e){this.manyInternal(7,e)},t.prototype.MANY8=function(e){this.manyInternal(8,e)},t.prototype.MANY9=function(e){this.manyInternal(9,e)},t.prototype.MANY_SEP=function(e){this.manySepFirstInternal(0,e)},t.prototype.MANY_SEP1=function(e){this.manySepFirstInternal(1,e)},t.prototype.MANY_SEP2=function(e){this.manySepFirstInternal(2,e)},t.prototype.MANY_SEP3=function(e){this.manySepFirstInternal(3,e)},t.prototype.MANY_SEP4=function(e){this.manySepFirstInternal(4,e)},t.prototype.MANY_SEP5=function(e){this.manySepFirstInternal(5,e)},t.prototype.MANY_SEP6=function(e){this.manySepFirstInternal(6,e)},t.prototype.MANY_SEP7=function(e){this.manySepFirstInternal(7,e)},t.prototype.MANY_SEP8=function(e){this.manySepFirstInternal(8,e)},t.prototype.MANY_SEP9=function(e){this.manySepFirstInternal(9,e)},t.prototype.AT_LEAST_ONE=function(e){this.atLeastOneInternal(0,e)},t.prototype.AT_LEAST_ONE1=function(e){return this.atLeastOneInternal(1,e)},t.prototype.AT_LEAST_ONE2=function(e){this.atLeastOneInternal(2,e)},t.prototype.AT_LEAST_ONE3=function(e){this.atLeastOneInternal(3,e)},t.prototype.AT_LEAST_ONE4=function(e){this.atLeastOneInternal(4,e)},t.prototype.AT_LEAST_ONE5=function(e){this.atLeastOneInternal(5,e)},t.prototype.AT_LEAST_ONE6=function(e){this.atLeastOneInternal(6,e)},t.prototype.AT_LEAST_ONE7=function(e){this.atLeastOneInternal(7,e)},t.prototype.AT_LEAST_ONE8=function(e){this.atLeastOneInternal(8,e)},t.prototype.AT_LEAST_ONE9=function(e){this.atLeastOneInternal(9,e)},t.prototype.AT_LEAST_ONE_SEP=function(e){this.atLeastOneSepFirstInternal(0,e)},t.prototype.AT_LEAST_ONE_SEP1=function(e){this.atLeastOneSepFirstInternal(1,e)},t.prototype.AT_LEAST_ONE_SEP2=function(e){this.atLeastOneSepFirstInternal(2,e)},t.prototype.AT_LEAST_ONE_SEP3=function(e){this.atLeastOneSepFirstInternal(3,e)},t.prototype.AT_LEAST_ONE_SEP4=function(e){this.atLeastOneSepFirstInternal(4,e)},t.prototype.AT_LEAST_ONE_SEP5=function(e){this.atLeastOneSepFirstInternal(5,e)},t.prototype.AT_LEAST_ONE_SEP6=function(e){this.atLeastOneSepFirstInternal(6,e)},t.prototype.AT_LEAST_ONE_SEP7=function(e){this.atLeastOneSepFirstInternal(7,e)},t.prototype.AT_LEAST_ONE_SEP8=function(e){this.atLeastOneSepFirstInternal(8,e)},t.prototype.AT_LEAST_ONE_SEP9=function(e){this.atLeastOneSepFirstInternal(9,e)},t.prototype.RULE=function(e,r,i){if(i===void 0&&(i=aS.DEFAULT_RULE_CONFIG),(0,YG.contains)(this.definedRulesNames,e)){var n=twe.defaultGrammarValidatorErrorProvider.buildDuplicateRuleNameError({topLevelRule:e,grammarName:this.className}),s={message:n,type:aS.ParserDefinitionErrorType.DUPLICATE_RULE_NAME,ruleName:e};this.definitionErrors.push(s)}this.definedRulesNames.push(e);var o=this.defineRule(e,r,i);return this[e]=o,o},t.prototype.OVERRIDE_RULE=function(e,r,i){i===void 0&&(i=aS.DEFAULT_RULE_CONFIG);var n=[];n=n.concat((0,rwe.validateRuleIsOverridden)(e,this.definedRulesNames,this.className)),this.definitionErrors=this.definitionErrors.concat(n);var s=this.defineRule(e,r,i);return this[e]=s,s},t.prototype.BACKTRACK=function(e,r){return function(){this.isBackTrackingStack.push(1);var i=this.saveRecogState();try{return e.apply(this,r),!0}catch(n){if((0,ewe.isRecognitionException)(n))return!1;throw n}finally{this.reloadRecogState(i),this.isBackTrackingStack.pop()}}},t.prototype.getGAstProductions=function(){return this.gastProductionsCache},t.prototype.getSerializedGastProductions=function(){return(0,iwe.serializeGrammar)((0,YG.values)(this.gastProductionsCache))},t}();ay.RecognizerApi=nwe});var _G=w(Ay=>{\"use strict\";Object.defineProperty(Ay,\"__esModule\",{value:!0});Ay.RecognizerEngine=void 0;var Rr=Yt(),Zn=iy(),ly=mg(),JG=Up(),Ig=Mp(),WG=Xn(),swe=nS(),zG=JA(),Gp=fg(),owe=sS(),awe=function(){function t(){}return t.prototype.initRecognizerEngine=function(e,r){if(this.className=(0,owe.classNameFromInstance)(this),this.shortRuleNameToFull={},this.fullRuleNameToShort={},this.ruleShortNameIdx=256,this.tokenMatcher=Gp.tokenStructuredMatcherNoCategories,this.definedRulesNames=[],this.tokensMap={},this.isBackTrackingStack=[],this.RULE_STACK=[],this.RULE_OCCURRENCE_STACK=[],this.gastProductionsCache={},(0,Rr.has)(r,\"serializedGrammar\"))throw Error(`The Parser's configuration can no longer contain a <serializedGrammar> property.\n\tSee: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_6-0-0\n\tFor Further details.`);if((0,Rr.isArray)(e)){if((0,Rr.isEmpty)(e))throw Error(`A Token Vocabulary cannot be empty.\n\tNote that the first argument for the parser constructor\n\tis no longer a Token vector (since v4.0).`);if(typeof e[0].startOffset==\"number\")throw Error(`The Parser constructor no longer accepts a token vector as the first argument.\n\tSee: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_4-0-0\n\tFor Further details.`)}if((0,Rr.isArray)(e))this.tokensMap=(0,Rr.reduce)(e,function(o,a){return o[a.name]=a,o},{});else if((0,Rr.has)(e,\"modes\")&&(0,Rr.every)((0,Rr.flatten)((0,Rr.values)(e.modes)),Gp.isTokenType)){var i=(0,Rr.flatten)((0,Rr.values)(e.modes)),n=(0,Rr.uniq)(i);this.tokensMap=(0,Rr.reduce)(n,function(o,a){return o[a.name]=a,o},{})}else if((0,Rr.isObject)(e))this.tokensMap=(0,Rr.cloneObj)(e);else throw new Error(\"<tokensDictionary> argument must be An Array of Token constructors, A dictionary of Token constructors or an IMultiModeLexerDefinition\");this.tokensMap.EOF=zG.EOF;var s=(0,Rr.every)((0,Rr.values)(e),function(o){return(0,Rr.isEmpty)(o.categoryMatches)});this.tokenMatcher=s?Gp.tokenStructuredMatcherNoCategories:Gp.tokenStructuredMatcher,(0,Gp.augmentTokenTypes)((0,Rr.values)(this.tokensMap))},t.prototype.defineRule=function(e,r,i){if(this.selfAnalysisDone)throw Error(\"Grammar rule <\"+e+`> may not be defined after the 'performSelfAnalysis' method has been called'\nMake sure that all grammar rule definitions are done before 'performSelfAnalysis' is called.`);var n=(0,Rr.has)(i,\"resyncEnabled\")?i.resyncEnabled:WG.DEFAULT_RULE_CONFIG.resyncEnabled,s=(0,Rr.has)(i,\"recoveryValueFunc\")?i.recoveryValueFunc:WG.DEFAULT_RULE_CONFIG.recoveryValueFunc,o=this.ruleShortNameIdx<<Zn.BITS_FOR_METHOD_TYPE+Zn.BITS_FOR_OCCURRENCE_IDX;this.ruleShortNameIdx++,this.shortRuleNameToFull[o]=e,this.fullRuleNameToShort[e]=o;function a(u){try{if(this.outputCst===!0){r.apply(this,u);var g=this.CST_STACK[this.CST_STACK.length-1];return this.cstPostRule(g),g}else return r.apply(this,u)}catch(f){return this.invokeRuleCatch(f,n,s)}finally{this.ruleFinallyStateUpdate()}}var l=function(u,g){return u===void 0&&(u=0),this.ruleInvocationStateUpdate(o,e,u),a.call(this,g)},c=\"ruleName\";return l[c]=e,l.originalGrammarAction=r,l},t.prototype.invokeRuleCatch=function(e,r,i){var n=this.RULE_STACK.length===1,s=r&&!this.isBackTracking()&&this.recoveryEnabled;if((0,ly.isRecognitionException)(e)){var o=e;if(s){var a=this.findReSyncTokenType();if(this.isInCurrentRuleReSyncSet(a))if(o.resyncedTokens=this.reSyncTo(a),this.outputCst){var l=this.CST_STACK[this.CST_STACK.length-1];return l.recoveredNode=!0,l}else return i();else{if(this.outputCst){var l=this.CST_STACK[this.CST_STACK.length-1];l.recoveredNode=!0,o.partialCstResult=l}throw o}}else{if(n)return this.moveToTerminatedState(),i();throw o}}else throw e},t.prototype.optionInternal=function(e,r){var i=this.getKeyForAutomaticLookahead(Zn.OPTION_IDX,r);return this.optionInternalLogic(e,r,i)},t.prototype.optionInternalLogic=function(e,r,i){var n=this,s=this.getLaFuncFromCache(i),o,a;if(e.DEF!==void 0){if(o=e.DEF,a=e.GATE,a!==void 0){var l=s;s=function(){return a.call(n)&&l.call(n)}}}else o=e;if(s.call(this)===!0)return o.call(this)},t.prototype.atLeastOneInternal=function(e,r){var i=this.getKeyForAutomaticLookahead(Zn.AT_LEAST_ONE_IDX,e);return this.atLeastOneInternalLogic(e,r,i)},t.prototype.atLeastOneInternalLogic=function(e,r,i){var n=this,s=this.getLaFuncFromCache(i),o,a;if(r.DEF!==void 0){if(o=r.DEF,a=r.GATE,a!==void 0){var l=s;s=function(){return a.call(n)&&l.call(n)}}}else o=r;if(s.call(this)===!0)for(var c=this.doSingleRepetition(o);s.call(this)===!0&&c===!0;)c=this.doSingleRepetition(o);else throw this.raiseEarlyExitException(e,JG.PROD_TYPE.REPETITION_MANDATORY,r.ERR_MSG);this.attemptInRepetitionRecovery(this.atLeastOneInternal,[e,r],s,Zn.AT_LEAST_ONE_IDX,e,Ig.NextTerminalAfterAtLeastOneWalker)},t.prototype.atLeastOneSepFirstInternal=function(e,r){var i=this.getKeyForAutomaticLookahead(Zn.AT_LEAST_ONE_SEP_IDX,e);this.atLeastOneSepFirstInternalLogic(e,r,i)},t.prototype.atLeastOneSepFirstInternalLogic=function(e,r,i){var n=this,s=r.DEF,o=r.SEP,a=this.getLaFuncFromCache(i);if(a.call(this)===!0){s.call(this);for(var l=function(){return n.tokenMatcher(n.LA(1),o)};this.tokenMatcher(this.LA(1),o)===!0;)this.CONSUME(o),s.call(this);this.attemptInRepetitionRecovery(this.repetitionSepSecondInternal,[e,o,l,s,Ig.NextTerminalAfterAtLeastOneSepWalker],l,Zn.AT_LEAST_ONE_SEP_IDX,e,Ig.NextTerminalAfterAtLeastOneSepWalker)}else throw this.raiseEarlyExitException(e,JG.PROD_TYPE.REPETITION_MANDATORY_WITH_SEPARATOR,r.ERR_MSG)},t.prototype.manyInternal=function(e,r){var i=this.getKeyForAutomaticLookahead(Zn.MANY_IDX,e);return this.manyInternalLogic(e,r,i)},t.prototype.manyInternalLogic=function(e,r,i){var n=this,s=this.getLaFuncFromCache(i),o,a;if(r.DEF!==void 0){if(o=r.DEF,a=r.GATE,a!==void 0){var l=s;s=function(){return a.call(n)&&l.call(n)}}}else o=r;for(var c=!0;s.call(this)===!0&&c===!0;)c=this.doSingleRepetition(o);this.attemptInRepetitionRecovery(this.manyInternal,[e,r],s,Zn.MANY_IDX,e,Ig.NextTerminalAfterManyWalker,c)},t.prototype.manySepFirstInternal=function(e,r){var i=this.getKeyForAutomaticLookahead(Zn.MANY_SEP_IDX,e);this.manySepFirstInternalLogic(e,r,i)},t.prototype.manySepFirstInternalLogic=function(e,r,i){var n=this,s=r.DEF,o=r.SEP,a=this.getLaFuncFromCache(i);if(a.call(this)===!0){s.call(this);for(var l=function(){return n.tokenMatcher(n.LA(1),o)};this.tokenMatcher(this.LA(1),o)===!0;)this.CONSUME(o),s.call(this);this.attemptInRepetitionRecovery(this.repetitionSepSecondInternal,[e,o,l,s,Ig.NextTerminalAfterManySepWalker],l,Zn.MANY_SEP_IDX,e,Ig.NextTerminalAfterManySepWalker)}},t.prototype.repetitionSepSecondInternal=function(e,r,i,n,s){for(;i();)this.CONSUME(r),n.call(this);this.attemptInRepetitionRecovery(this.repetitionSepSecondInternal,[e,r,i,n,s],i,Zn.AT_LEAST_ONE_SEP_IDX,e,s)},t.prototype.doSingleRepetition=function(e){var r=this.getLexerPosition();e.call(this);var i=this.getLexerPosition();return i>r},t.prototype.orInternal=function(e,r){var i=this.getKeyForAutomaticLookahead(Zn.OR_IDX,r),n=(0,Rr.isArray)(e)?e:e.DEF,s=this.getLaFuncFromCache(i),o=s.call(this,n);if(o!==void 0){var a=n[o];return a.ALT.call(this)}this.raiseNoAltException(r,e.ERR_MSG)},t.prototype.ruleFinallyStateUpdate=function(){if(this.RULE_STACK.pop(),this.RULE_OCCURRENCE_STACK.pop(),this.cstFinallyStateUpdate(),this.RULE_STACK.length===0&&this.isAtEndOfInput()===!1){var e=this.LA(1),r=this.errorMessageProvider.buildNotAllInputParsedMessage({firstRedundant:e,ruleName:this.getCurrRuleFullName()});this.SAVE_ERROR(new ly.NotAllInputParsedException(r,e))}},t.prototype.subruleInternal=function(e,r,i){var n;try{var s=i!==void 0?i.ARGS:void 0;return n=e.call(this,r,s),this.cstPostNonTerminal(n,i!==void 0&&i.LABEL!==void 0?i.LABEL:e.ruleName),n}catch(o){this.subruleInternalError(o,i,e.ruleName)}},t.prototype.subruleInternalError=function(e,r,i){throw(0,ly.isRecognitionException)(e)&&e.partialCstResult!==void 0&&(this.cstPostNonTerminal(e.partialCstResult,r!==void 0&&r.LABEL!==void 0?r.LABEL:i),delete e.partialCstResult),e},t.prototype.consumeInternal=function(e,r,i){var n;try{var s=this.LA(1);this.tokenMatcher(s,e)===!0?(this.consumeToken(),n=s):this.consumeInternalError(e,s,i)}catch(o){n=this.consumeInternalRecovery(e,r,o)}return this.cstPostTerminal(i!==void 0&&i.LABEL!==void 0?i.LABEL:e.name,n),n},t.prototype.consumeInternalError=function(e,r,i){var n,s=this.LA(0);throw i!==void 0&&i.ERR_MSG?n=i.ERR_MSG:n=this.errorMessageProvider.buildMismatchTokenMessage({expected:e,actual:r,previous:s,ruleName:this.getCurrRuleFullName()}),this.SAVE_ERROR(new ly.MismatchedTokenException(n,r,s))},t.prototype.consumeInternalRecovery=function(e,r,i){if(this.recoveryEnabled&&i.name===\"MismatchedTokenException\"&&!this.isBackTracking()){var n=this.getFollowsForInRuleRecovery(e,r);try{return this.tryInRuleRecovery(e,n)}catch(s){throw s.name===swe.IN_RULE_RECOVERY_EXCEPTION?i:s}}else throw i},t.prototype.saveRecogState=function(){var e=this.errors,r=(0,Rr.cloneArr)(this.RULE_STACK);return{errors:e,lexerState:this.exportLexerState(),RULE_STACK:r,CST_STACK:this.CST_STACK}},t.prototype.reloadRecogState=function(e){this.errors=e.errors,this.importLexerState(e.lexerState),this.RULE_STACK=e.RULE_STACK},t.prototype.ruleInvocationStateUpdate=function(e,r,i){this.RULE_OCCURRENCE_STACK.push(i),this.RULE_STACK.push(e),this.cstInvocationStateUpdate(r,e)},t.prototype.isBackTracking=function(){return this.isBackTrackingStack.length!==0},t.prototype.getCurrRuleFullName=function(){var e=this.getLastExplicitRuleShortName();return this.shortRuleNameToFull[e]},t.prototype.shortRuleNameToFullName=function(e){return this.shortRuleNameToFull[e]},t.prototype.isAtEndOfInput=function(){return this.tokenMatcher(this.LA(1),zG.EOF)},t.prototype.reset=function(){this.resetLexerState(),this.isBackTrackingStack=[],this.errors=[],this.RULE_STACK=[],this.CST_STACK=[],this.RULE_OCCURRENCE_STACK=[]},t}();Ay.RecognizerEngine=awe});var XG=w(cy=>{\"use strict\";Object.defineProperty(cy,\"__esModule\",{value:!0});cy.ErrorHandler=void 0;var AS=mg(),lS=Yt(),VG=Up(),Awe=Xn(),lwe=function(){function t(){}return t.prototype.initErrorHandler=function(e){this._errors=[],this.errorMessageProvider=(0,lS.has)(e,\"errorMessageProvider\")?e.errorMessageProvider:Awe.DEFAULT_PARSER_CONFIG.errorMessageProvider},t.prototype.SAVE_ERROR=function(e){if((0,AS.isRecognitionException)(e))return e.context={ruleStack:this.getHumanReadableRuleStack(),ruleOccurrenceStack:(0,lS.cloneArr)(this.RULE_OCCURRENCE_STACK)},this._errors.push(e),e;throw Error(\"Trying to save an Error which is not a RecognitionException\")},Object.defineProperty(t.prototype,\"errors\",{get:function(){return(0,lS.cloneArr)(this._errors)},set:function(e){this._errors=e},enumerable:!1,configurable:!0}),t.prototype.raiseEarlyExitException=function(e,r,i){for(var n=this.getCurrRuleFullName(),s=this.getGAstProductions()[n],o=(0,VG.getLookaheadPathsForOptionalProd)(e,s,r,this.maxLookahead),a=o[0],l=[],c=1;c<=this.maxLookahead;c++)l.push(this.LA(c));var u=this.errorMessageProvider.buildEarlyExitMessage({expectedIterationPaths:a,actual:l,previous:this.LA(0),customUserDescription:i,ruleName:n});throw this.SAVE_ERROR(new AS.EarlyExitException(u,this.LA(1),this.LA(0)))},t.prototype.raiseNoAltException=function(e,r){for(var i=this.getCurrRuleFullName(),n=this.getGAstProductions()[i],s=(0,VG.getLookaheadPathsForOr)(e,n,this.maxLookahead),o=[],a=1;a<=this.maxLookahead;a++)o.push(this.LA(a));var l=this.LA(0),c=this.errorMessageProvider.buildNoViableAltMessage({expectedPathsPerAlt:s,actual:o,previous:l,customUserDescription:r,ruleName:this.getCurrRuleFullName()});throw this.SAVE_ERROR(new AS.NoViableAltException(c,this.LA(1),l))},t}();cy.ErrorHandler=lwe});var eY=w(uy=>{\"use strict\";Object.defineProperty(uy,\"__esModule\",{value:!0});uy.ContentAssist=void 0;var ZG=Mp(),$G=Yt(),cwe=function(){function t(){}return t.prototype.initContentAssist=function(){},t.prototype.computeContentAssist=function(e,r){var i=this.gastProductionsCache[e];if((0,$G.isUndefined)(i))throw Error(\"Rule ->\"+e+\"<- does not exist in this grammar.\");return(0,ZG.nextPossibleTokensAfter)([i],r,this.tokenMatcher,this.maxLookahead)},t.prototype.getNextPossibleTokenTypes=function(e){var r=(0,$G.first)(e.ruleStack),i=this.getGAstProductions(),n=i[r],s=new ZG.NextAfterTokenWalker(n,e).startWalking();return s},t}();uy.ContentAssist=cwe});var AY=w(gy=>{\"use strict\";Object.defineProperty(gy,\"__esModule\",{value:!0});gy.GastRecorder=void 0;var Sn=Yt(),Ho=bn(),uwe=Dp(),tY=fg(),rY=JA(),gwe=Xn(),fwe=iy(),fy={description:\"This Object indicates the Parser is during Recording Phase\"};Object.freeze(fy);var iY=!0,nY=Math.pow(2,fwe.BITS_FOR_OCCURRENCE_IDX)-1,sY=(0,rY.createToken)({name:\"RECORDING_PHASE_TOKEN\",pattern:uwe.Lexer.NA});(0,tY.augmentTokenTypes)([sY]);var oY=(0,rY.createTokenInstance)(sY,`This IToken indicates the Parser is in Recording Phase\n\tSee: https://chevrotain.io/docs/guide/internals.html#grammar-recording for details`,-1,-1,-1,-1,-1,-1);Object.freeze(oY);var hwe={name:`This CSTNode indicates the Parser is in Recording Phase\n\tSee: https://chevrotain.io/docs/guide/internals.html#grammar-recording for details`,children:{}},dwe=function(){function t(){}return t.prototype.initGastRecorder=function(e){this.recordingProdStack=[],this.RECORDING_PHASE=!1},t.prototype.enableRecording=function(){var e=this;this.RECORDING_PHASE=!0,this.TRACE_INIT(\"Enable Recording\",function(){for(var r=function(n){var s=n>0?n:\"\";e[\"CONSUME\"+s]=function(o,a){return this.consumeInternalRecord(o,n,a)},e[\"SUBRULE\"+s]=function(o,a){return this.subruleInternalRecord(o,n,a)},e[\"OPTION\"+s]=function(o){return this.optionInternalRecord(o,n)},e[\"OR\"+s]=function(o){return this.orInternalRecord(o,n)},e[\"MANY\"+s]=function(o){this.manyInternalRecord(n,o)},e[\"MANY_SEP\"+s]=function(o){this.manySepFirstInternalRecord(n,o)},e[\"AT_LEAST_ONE\"+s]=function(o){this.atLeastOneInternalRecord(n,o)},e[\"AT_LEAST_ONE_SEP\"+s]=function(o){this.atLeastOneSepFirstInternalRecord(n,o)}},i=0;i<10;i++)r(i);e.consume=function(n,s,o){return this.consumeInternalRecord(s,n,o)},e.subrule=function(n,s,o){return this.subruleInternalRecord(s,n,o)},e.option=function(n,s){return this.optionInternalRecord(s,n)},e.or=function(n,s){return this.orInternalRecord(s,n)},e.many=function(n,s){this.manyInternalRecord(n,s)},e.atLeastOne=function(n,s){this.atLeastOneInternalRecord(n,s)},e.ACTION=e.ACTION_RECORD,e.BACKTRACK=e.BACKTRACK_RECORD,e.LA=e.LA_RECORD})},t.prototype.disableRecording=function(){var e=this;this.RECORDING_PHASE=!1,this.TRACE_INIT(\"Deleting Recording methods\",function(){for(var r=0;r<10;r++){var i=r>0?r:\"\";delete e[\"CONSUME\"+i],delete e[\"SUBRULE\"+i],delete e[\"OPTION\"+i],delete e[\"OR\"+i],delete e[\"MANY\"+i],delete e[\"MANY_SEP\"+i],delete e[\"AT_LEAST_ONE\"+i],delete e[\"AT_LEAST_ONE_SEP\"+i]}delete e.consume,delete e.subrule,delete e.option,delete e.or,delete e.many,delete e.atLeastOne,delete e.ACTION,delete e.BACKTRACK,delete e.LA})},t.prototype.ACTION_RECORD=function(e){},t.prototype.BACKTRACK_RECORD=function(e,r){return function(){return!0}},t.prototype.LA_RECORD=function(e){return gwe.END_OF_FILE},t.prototype.topLevelRuleRecord=function(e,r){try{var i=new Ho.Rule({definition:[],name:e});return i.name=e,this.recordingProdStack.push(i),r.call(this),this.recordingProdStack.pop(),i}catch(n){if(n.KNOWN_RECORDER_ERROR!==!0)try{n.message=n.message+`\n\t This error was thrown during the \"grammar recording phase\" For more info see:\n\thttps://chevrotain.io/docs/guide/internals.html#grammar-recording`}catch(s){throw n}throw n}},t.prototype.optionInternalRecord=function(e,r){return Yp.call(this,Ho.Option,e,r)},t.prototype.atLeastOneInternalRecord=function(e,r){Yp.call(this,Ho.RepetitionMandatory,r,e)},t.prototype.atLeastOneSepFirstInternalRecord=function(e,r){Yp.call(this,Ho.RepetitionMandatoryWithSeparator,r,e,iY)},t.prototype.manyInternalRecord=function(e,r){Yp.call(this,Ho.Repetition,r,e)},t.prototype.manySepFirstInternalRecord=function(e,r){Yp.call(this,Ho.RepetitionWithSeparator,r,e,iY)},t.prototype.orInternalRecord=function(e,r){return pwe.call(this,e,r)},t.prototype.subruleInternalRecord=function(e,r,i){if(hy(r),!e||(0,Sn.has)(e,\"ruleName\")===!1){var n=new Error(\"<SUBRULE\"+aY(r)+\"> argument is invalid\"+(\" expecting a Parser method reference but got: <\"+JSON.stringify(e)+\">\")+(`\n inside top level rule: <`+this.recordingProdStack[0].name+\">\"));throw n.KNOWN_RECORDER_ERROR=!0,n}var s=(0,Sn.peek)(this.recordingProdStack),o=e.ruleName,a=new Ho.NonTerminal({idx:r,nonTerminalName:o,label:i==null?void 0:i.LABEL,referencedRule:void 0});return s.definition.push(a),this.outputCst?hwe:fy},t.prototype.consumeInternalRecord=function(e,r,i){if(hy(r),!(0,tY.hasShortKeyProperty)(e)){var n=new Error(\"<CONSUME\"+aY(r)+\"> argument is invalid\"+(\" expecting a TokenType reference but got: <\"+JSON.stringify(e)+\">\")+(`\n inside top level rule: <`+this.recordingProdStack[0].name+\">\"));throw n.KNOWN_RECORDER_ERROR=!0,n}var s=(0,Sn.peek)(this.recordingProdStack),o=new Ho.Terminal({idx:r,terminalType:e,label:i==null?void 0:i.LABEL});return s.definition.push(o),oY},t}();gy.GastRecorder=dwe;function Yp(t,e,r,i){i===void 0&&(i=!1),hy(r);var n=(0,Sn.peek)(this.recordingProdStack),s=(0,Sn.isFunction)(e)?e:e.DEF,o=new t({definition:[],idx:r});return i&&(o.separator=e.SEP),(0,Sn.has)(e,\"MAX_LOOKAHEAD\")&&(o.maxLookahead=e.MAX_LOOKAHEAD),this.recordingProdStack.push(o),s.call(this),n.definition.push(o),this.recordingProdStack.pop(),fy}function pwe(t,e){var r=this;hy(e);var i=(0,Sn.peek)(this.recordingProdStack),n=(0,Sn.isArray)(t)===!1,s=n===!1?t:t.DEF,o=new Ho.Alternation({definition:[],idx:e,ignoreAmbiguities:n&&t.IGNORE_AMBIGUITIES===!0});(0,Sn.has)(t,\"MAX_LOOKAHEAD\")&&(o.maxLookahead=t.MAX_LOOKAHEAD);var a=(0,Sn.some)(s,function(l){return(0,Sn.isFunction)(l.GATE)});return o.hasPredicates=a,i.definition.push(o),(0,Sn.forEach)(s,function(l){var c=new Ho.Alternative({definition:[]});o.definition.push(c),(0,Sn.has)(l,\"IGNORE_AMBIGUITIES\")?c.ignoreAmbiguities=l.IGNORE_AMBIGUITIES:(0,Sn.has)(l,\"GATE\")&&(c.ignoreAmbiguities=!0),r.recordingProdStack.push(c),l.ALT.call(r),r.recordingProdStack.pop()}),fy}function aY(t){return t===0?\"\":\"\"+t}function hy(t){if(t<0||t>nY){var e=new Error(\"Invalid DSL Method idx value: <\"+t+`>\n\t`+(\"Idx value must be a none negative value smaller than \"+(nY+1)));throw e.KNOWN_RECORDER_ERROR=!0,e}}});var cY=w(py=>{\"use strict\";Object.defineProperty(py,\"__esModule\",{value:!0});py.PerformanceTracer=void 0;var lY=Yt(),Cwe=Xn(),mwe=function(){function t(){}return t.prototype.initPerformanceTracer=function(e){if((0,lY.has)(e,\"traceInitPerf\")){var r=e.traceInitPerf,i=typeof r==\"number\";this.traceInitMaxIdent=i?r:Infinity,this.traceInitPerf=i?r>0:r}else this.traceInitMaxIdent=0,this.traceInitPerf=Cwe.DEFAULT_PARSER_CONFIG.traceInitPerf;this.traceInitIndent=-1},t.prototype.TRACE_INIT=function(e,r){if(this.traceInitPerf===!0){this.traceInitIndent++;var i=new Array(this.traceInitIndent+1).join(\"\t\");this.traceInitIndent<this.traceInitMaxIdent&&console.log(i+\"--> <\"+e+\">\");var n=(0,lY.timer)(r),s=n.time,o=n.value,a=s>10?console.warn:console.log;return this.traceInitIndent<this.traceInitMaxIdent&&a(i+\"<-- <\"+e+\"> time: \"+s+\"ms\"),this.traceInitIndent--,o}else return r()},t}();py.PerformanceTracer=mwe});var uY=w(dy=>{\"use strict\";Object.defineProperty(dy,\"__esModule\",{value:!0});dy.applyMixins=void 0;function Ewe(t,e){e.forEach(function(r){var i=r.prototype;Object.getOwnPropertyNames(i).forEach(function(n){if(n!==\"constructor\"){var s=Object.getOwnPropertyDescriptor(i,n);s&&(s.get||s.set)?Object.defineProperty(t.prototype,n,s):t.prototype[n]=r.prototype[n]}})})}dy.applyMixins=Ewe});var Xn=w(Er=>{\"use strict\";var gY=Er&&Er.__extends||function(){var t=function(e,r){return t=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(i,n){i.__proto__=n}||function(i,n){for(var s in n)Object.prototype.hasOwnProperty.call(n,s)&&(i[s]=n[s])},t(e,r)};return function(e,r){if(typeof r!=\"function\"&&r!==null)throw new TypeError(\"Class extends value \"+String(r)+\" is not a constructor or null\");t(e,r);function i(){this.constructor=e}e.prototype=r===null?Object.create(r):(i.prototype=r.prototype,new i)}}();Object.defineProperty(Er,\"__esModule\",{value:!0});Er.EmbeddedActionsParser=Er.CstParser=Er.Parser=Er.EMPTY_ALT=Er.ParserDefinitionErrorType=Er.DEFAULT_RULE_CONFIG=Er.DEFAULT_PARSER_CONFIG=Er.END_OF_FILE=void 0;var an=Yt(),Iwe=Xj(),fY=JA(),hY=Tp(),pY=BG(),ywe=nS(),wwe=DG(),Bwe=HG(),bwe=GG(),Qwe=qG(),vwe=_G(),Swe=XG(),kwe=eY(),xwe=AY(),Pwe=cY(),Dwe=uY();Er.END_OF_FILE=(0,fY.createTokenInstance)(fY.EOF,\"\",NaN,NaN,NaN,NaN,NaN,NaN);Object.freeze(Er.END_OF_FILE);Er.DEFAULT_PARSER_CONFIG=Object.freeze({recoveryEnabled:!1,maxLookahead:3,dynamicTokensEnabled:!1,outputCst:!0,errorMessageProvider:hY.defaultParserErrorProvider,nodeLocationTracking:\"none\",traceInitPerf:!1,skipValidations:!1});Er.DEFAULT_RULE_CONFIG=Object.freeze({recoveryValueFunc:function(){},resyncEnabled:!0});var Rwe;(function(t){t[t.INVALID_RULE_NAME=0]=\"INVALID_RULE_NAME\",t[t.DUPLICATE_RULE_NAME=1]=\"DUPLICATE_RULE_NAME\",t[t.INVALID_RULE_OVERRIDE=2]=\"INVALID_RULE_OVERRIDE\",t[t.DUPLICATE_PRODUCTIONS=3]=\"DUPLICATE_PRODUCTIONS\",t[t.UNRESOLVED_SUBRULE_REF=4]=\"UNRESOLVED_SUBRULE_REF\",t[t.LEFT_RECURSION=5]=\"LEFT_RECURSION\",t[t.NONE_LAST_EMPTY_ALT=6]=\"NONE_LAST_EMPTY_ALT\",t[t.AMBIGUOUS_ALTS=7]=\"AMBIGUOUS_ALTS\",t[t.CONFLICT_TOKENS_RULES_NAMESPACE=8]=\"CONFLICT_TOKENS_RULES_NAMESPACE\",t[t.INVALID_TOKEN_NAME=9]=\"INVALID_TOKEN_NAME\",t[t.NO_NON_EMPTY_LOOKAHEAD=10]=\"NO_NON_EMPTY_LOOKAHEAD\",t[t.AMBIGUOUS_PREFIX_ALTS=11]=\"AMBIGUOUS_PREFIX_ALTS\",t[t.TOO_MANY_ALTS=12]=\"TOO_MANY_ALTS\"})(Rwe=Er.ParserDefinitionErrorType||(Er.ParserDefinitionErrorType={}));function Fwe(t){return t===void 0&&(t=void 0),function(){return t}}Er.EMPTY_ALT=Fwe;var Cy=function(){function t(e,r){this.definitionErrors=[],this.selfAnalysisDone=!1;var i=this;if(i.initErrorHandler(r),i.initLexerAdapter(),i.initLooksAhead(r),i.initRecognizerEngine(e,r),i.initRecoverable(r),i.initTreeBuilder(r),i.initContentAssist(),i.initGastRecorder(r),i.initPerformanceTracer(r),(0,an.has)(r,\"ignoredIssues\"))throw new Error(`The <ignoredIssues> IParserConfig property has been deprecated.\n\tPlease use the <IGNORE_AMBIGUITIES> flag on the relevant DSL method instead.\n\tSee: https://chevrotain.io/docs/guide/resolving_grammar_errors.html#IGNORING_AMBIGUITIES\n\tFor further details.`);this.skipValidations=(0,an.has)(r,\"skipValidations\")?r.skipValidations:Er.DEFAULT_PARSER_CONFIG.skipValidations}return t.performSelfAnalysis=function(e){throw Error(\"The **static** `performSelfAnalysis` method has been deprecated.\t\\nUse the **instance** method with the same name instead.\")},t.prototype.performSelfAnalysis=function(){var e=this;this.TRACE_INIT(\"performSelfAnalysis\",function(){var r;e.selfAnalysisDone=!0;var i=e.className;e.TRACE_INIT(\"toFastProps\",function(){(0,an.toFastProperties)(e)}),e.TRACE_INIT(\"Grammar Recording\",function(){try{e.enableRecording(),(0,an.forEach)(e.definedRulesNames,function(s){var o=e[s],a=o.originalGrammarAction,l=void 0;e.TRACE_INIT(s+\" Rule\",function(){l=e.topLevelRuleRecord(s,a)}),e.gastProductionsCache[s]=l})}finally{e.disableRecording()}});var n=[];if(e.TRACE_INIT(\"Grammar Resolving\",function(){n=(0,pY.resolveGrammar)({rules:(0,an.values)(e.gastProductionsCache)}),e.definitionErrors=e.definitionErrors.concat(n)}),e.TRACE_INIT(\"Grammar Validations\",function(){if((0,an.isEmpty)(n)&&e.skipValidations===!1){var s=(0,pY.validateGrammar)({rules:(0,an.values)(e.gastProductionsCache),maxLookahead:e.maxLookahead,tokenTypes:(0,an.values)(e.tokensMap),errMsgProvider:hY.defaultGrammarValidatorErrorProvider,grammarName:i});e.definitionErrors=e.definitionErrors.concat(s)}}),(0,an.isEmpty)(e.definitionErrors)&&(e.recoveryEnabled&&e.TRACE_INIT(\"computeAllProdsFollows\",function(){var s=(0,Iwe.computeAllProdsFollows)((0,an.values)(e.gastProductionsCache));e.resyncFollows=s}),e.TRACE_INIT(\"ComputeLookaheadFunctions\",function(){e.preComputeLookaheadFunctions((0,an.values)(e.gastProductionsCache))})),!t.DEFER_DEFINITION_ERRORS_HANDLING&&!(0,an.isEmpty)(e.definitionErrors))throw r=(0,an.map)(e.definitionErrors,function(s){return s.message}),new Error(`Parser Definition Errors detected:\n `+r.join(`\n-------------------------------\n`))})},t.DEFER_DEFINITION_ERRORS_HANDLING=!1,t}();Er.Parser=Cy;(0,Dwe.applyMixins)(Cy,[ywe.Recoverable,wwe.LooksAhead,Bwe.TreeBuilder,bwe.LexerAdapter,vwe.RecognizerEngine,Qwe.RecognizerApi,Swe.ErrorHandler,kwe.ContentAssist,xwe.GastRecorder,Pwe.PerformanceTracer]);var Nwe=function(t){gY(e,t);function e(r,i){i===void 0&&(i=Er.DEFAULT_PARSER_CONFIG);var n=this,s=(0,an.cloneObj)(i);return s.outputCst=!0,n=t.call(this,r,s)||this,n}return e}(Cy);Er.CstParser=Nwe;var Lwe=function(t){gY(e,t);function e(r,i){i===void 0&&(i=Er.DEFAULT_PARSER_CONFIG);var n=this,s=(0,an.cloneObj)(i);return s.outputCst=!1,n=t.call(this,r,s)||this,n}return e}(Cy);Er.EmbeddedActionsParser=Lwe});var CY=w(my=>{\"use strict\";Object.defineProperty(my,\"__esModule\",{value:!0});my.createSyntaxDiagramsCode=void 0;var dY=xv();function Twe(t,e){var r=e===void 0?{}:e,i=r.resourceBase,n=i===void 0?\"https://unpkg.com/chevrotain@\"+dY.VERSION+\"/diagrams/\":i,s=r.css,o=s===void 0?\"https://unpkg.com/chevrotain@\"+dY.VERSION+\"/diagrams/diagrams.css\":s,a=`\n<!-- This is a generated file -->\n<!DOCTYPE html>\n<meta charset=\"utf-8\">\n<style>\n  body {\n    background-color: hsl(30, 20%, 95%)\n  }\n</style>\n\n`,l=`\n<link rel='stylesheet' href='`+o+`'>\n`,c=`\n<script src='`+n+`vendor/railroad-diagrams.js'></script>\n<script src='`+n+`src/diagrams_builder.js'></script>\n<script src='`+n+`src/diagrams_behavior.js'></script>\n<script src='`+n+`src/main.js'></script>\n`,u=`\n<div id=\"diagrams\" align=\"center\"></div>    \n`,g=`\n<script>\n    window.serializedGrammar = `+JSON.stringify(t,null,\"  \")+`;\n</script>\n`,f=`\n<script>\n    var diagramsDiv = document.getElementById(\"diagrams\");\n    main.drawDiagramsFromSerializedGrammar(serializedGrammar, diagramsDiv);\n</script>\n`;return a+l+c+u+g+f}my.createSyntaxDiagramsCode=Twe});var IY=w(Ve=>{\"use strict\";Object.defineProperty(Ve,\"__esModule\",{value:!0});Ve.Parser=Ve.createSyntaxDiagramsCode=Ve.clearCache=Ve.GAstVisitor=Ve.serializeProduction=Ve.serializeGrammar=Ve.Terminal=Ve.Rule=Ve.RepetitionWithSeparator=Ve.RepetitionMandatoryWithSeparator=Ve.RepetitionMandatory=Ve.Repetition=Ve.Option=Ve.NonTerminal=Ve.Alternative=Ve.Alternation=Ve.defaultLexerErrorProvider=Ve.NoViableAltException=Ve.NotAllInputParsedException=Ve.MismatchedTokenException=Ve.isRecognitionException=Ve.EarlyExitException=Ve.defaultParserErrorProvider=Ve.tokenName=Ve.tokenMatcher=Ve.tokenLabel=Ve.EOF=Ve.createTokenInstance=Ve.createToken=Ve.LexerDefinitionErrorType=Ve.Lexer=Ve.EMPTY_ALT=Ve.ParserDefinitionErrorType=Ve.EmbeddedActionsParser=Ve.CstParser=Ve.VERSION=void 0;var Owe=xv();Object.defineProperty(Ve,\"VERSION\",{enumerable:!0,get:function(){return Owe.VERSION}});var Ey=Xn();Object.defineProperty(Ve,\"CstParser\",{enumerable:!0,get:function(){return Ey.CstParser}});Object.defineProperty(Ve,\"EmbeddedActionsParser\",{enumerable:!0,get:function(){return Ey.EmbeddedActionsParser}});Object.defineProperty(Ve,\"ParserDefinitionErrorType\",{enumerable:!0,get:function(){return Ey.ParserDefinitionErrorType}});Object.defineProperty(Ve,\"EMPTY_ALT\",{enumerable:!0,get:function(){return Ey.EMPTY_ALT}});var mY=Dp();Object.defineProperty(Ve,\"Lexer\",{enumerable:!0,get:function(){return mY.Lexer}});Object.defineProperty(Ve,\"LexerDefinitionErrorType\",{enumerable:!0,get:function(){return mY.LexerDefinitionErrorType}});var yg=JA();Object.defineProperty(Ve,\"createToken\",{enumerable:!0,get:function(){return yg.createToken}});Object.defineProperty(Ve,\"createTokenInstance\",{enumerable:!0,get:function(){return yg.createTokenInstance}});Object.defineProperty(Ve,\"EOF\",{enumerable:!0,get:function(){return yg.EOF}});Object.defineProperty(Ve,\"tokenLabel\",{enumerable:!0,get:function(){return yg.tokenLabel}});Object.defineProperty(Ve,\"tokenMatcher\",{enumerable:!0,get:function(){return yg.tokenMatcher}});Object.defineProperty(Ve,\"tokenName\",{enumerable:!0,get:function(){return yg.tokenName}});var Mwe=Tp();Object.defineProperty(Ve,\"defaultParserErrorProvider\",{enumerable:!0,get:function(){return Mwe.defaultParserErrorProvider}});var qp=mg();Object.defineProperty(Ve,\"EarlyExitException\",{enumerable:!0,get:function(){return qp.EarlyExitException}});Object.defineProperty(Ve,\"isRecognitionException\",{enumerable:!0,get:function(){return qp.isRecognitionException}});Object.defineProperty(Ve,\"MismatchedTokenException\",{enumerable:!0,get:function(){return qp.MismatchedTokenException}});Object.defineProperty(Ve,\"NotAllInputParsedException\",{enumerable:!0,get:function(){return qp.NotAllInputParsedException}});Object.defineProperty(Ve,\"NoViableAltException\",{enumerable:!0,get:function(){return qp.NoViableAltException}});var Uwe=Uv();Object.defineProperty(Ve,\"defaultLexerErrorProvider\",{enumerable:!0,get:function(){return Uwe.defaultLexerErrorProvider}});var jo=bn();Object.defineProperty(Ve,\"Alternation\",{enumerable:!0,get:function(){return jo.Alternation}});Object.defineProperty(Ve,\"Alternative\",{enumerable:!0,get:function(){return jo.Alternative}});Object.defineProperty(Ve,\"NonTerminal\",{enumerable:!0,get:function(){return jo.NonTerminal}});Object.defineProperty(Ve,\"Option\",{enumerable:!0,get:function(){return jo.Option}});Object.defineProperty(Ve,\"Repetition\",{enumerable:!0,get:function(){return jo.Repetition}});Object.defineProperty(Ve,\"RepetitionMandatory\",{enumerable:!0,get:function(){return jo.RepetitionMandatory}});Object.defineProperty(Ve,\"RepetitionMandatoryWithSeparator\",{enumerable:!0,get:function(){return jo.RepetitionMandatoryWithSeparator}});Object.defineProperty(Ve,\"RepetitionWithSeparator\",{enumerable:!0,get:function(){return jo.RepetitionWithSeparator}});Object.defineProperty(Ve,\"Rule\",{enumerable:!0,get:function(){return jo.Rule}});Object.defineProperty(Ve,\"Terminal\",{enumerable:!0,get:function(){return jo.Terminal}});var EY=bn();Object.defineProperty(Ve,\"serializeGrammar\",{enumerable:!0,get:function(){return EY.serializeGrammar}});Object.defineProperty(Ve,\"serializeProduction\",{enumerable:!0,get:function(){return EY.serializeProduction}});var Kwe=hg();Object.defineProperty(Ve,\"GAstVisitor\",{enumerable:!0,get:function(){return Kwe.GAstVisitor}});function Hwe(){console.warn(`The clearCache function was 'soft' removed from the Chevrotain API.\n\t It performs no action other than printing this message.\n\t Please avoid using it as it will be completely removed in the future`)}Ve.clearCache=Hwe;var jwe=CY();Object.defineProperty(Ve,\"createSyntaxDiagramsCode\",{enumerable:!0,get:function(){return jwe.createSyntaxDiagramsCode}});var Gwe=function(){function t(){throw new Error(`The Parser class has been deprecated, use CstParser or EmbeddedActionsParser instead.\t\nSee: https://chevrotain.io/docs/changes/BREAKING_CHANGES.html#_7-0-0`)}return t}();Ve.Parser=Gwe});var BY=w((Vtt,yY)=>{var Iy=IY(),Ga=Iy.createToken,wY=Iy.tokenMatcher,cS=Iy.Lexer,Ywe=Iy.EmbeddedActionsParser;yY.exports=t=>{let e=Ga({name:\"LogicalOperator\",pattern:cS.NA}),r=Ga({name:\"Or\",pattern:/\\|/,categories:e}),i=Ga({name:\"Xor\",pattern:/\\^/,categories:e}),n=Ga({name:\"And\",pattern:/&/,categories:e}),s=Ga({name:\"Not\",pattern:/!/}),o=Ga({name:\"LParen\",pattern:/\\(/}),a=Ga({name:\"RParen\",pattern:/\\)/}),l=Ga({name:\"Query\",pattern:t}),u=[Ga({name:\"WhiteSpace\",pattern:/\\s+/,group:cS.SKIPPED}),r,i,n,o,a,s,e,l],g=new cS(u);class f extends Ywe{constructor(p){super(u);this.RULE(\"expression\",()=>this.SUBRULE(this.logicalExpression)),this.RULE(\"logicalExpression\",()=>{let y=this.SUBRULE(this.atomicExpression);return this.MANY(()=>{let Q=y,S=this.CONSUME(e),x=this.SUBRULE2(this.atomicExpression);wY(S,r)?y=M=>Q(M)||x(M):wY(S,i)?y=M=>!!(Q(M)^x(M)):y=M=>Q(M)&&x(M)}),y}),this.RULE(\"atomicExpression\",()=>this.OR([{ALT:()=>this.SUBRULE(this.parenthesisExpression)},{ALT:()=>{let{image:m}=this.CONSUME(l);return y=>y(m)}},{ALT:()=>{this.CONSUME(s);let m=this.SUBRULE(this.atomicExpression);return y=>!m(y)}}])),this.RULE(\"parenthesisExpression\",()=>{let m;return this.CONSUME(o),m=this.SUBRULE(this.expression),this.CONSUME(a),m}),this.performSelfAnalysis()}}return{TinylogicLexer:g,TinylogicParser:f}}});var bY=w(yy=>{var qwe=BY();yy.makeParser=(t=/[a-z]+/)=>{let{TinylogicLexer:e,TinylogicParser:r}=qwe(t),i=new r;return(n,s)=>{let o=e.tokenize(n);return i.input=o.tokens,i.expression()(s)}};yy.parse=yy.makeParser()});var vY=w((Ztt,QY)=>{\"use strict\";QY.exports={aliceblue:[240,248,255],antiquewhite:[250,235,215],aqua:[0,255,255],aquamarine:[127,255,212],azure:[240,255,255],beige:[245,245,220],bisque:[255,228,196],black:[0,0,0],blanchedalmond:[255,235,205],blue:[0,0,255],blueviolet:[138,43,226],brown:[165,42,42],burlywood:[222,184,135],cadetblue:[95,158,160],chartreuse:[127,255,0],chocolate:[210,105,30],coral:[255,127,80],cornflowerblue:[100,149,237],cornsilk:[255,248,220],crimson:[220,20,60],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgoldenrod:[184,134,11],darkgray:[169,169,169],darkgreen:[0,100,0],darkgrey:[169,169,169],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkseagreen:[143,188,143],darkslateblue:[72,61,139],darkslategray:[47,79,79],darkslategrey:[47,79,79],darkturquoise:[0,206,209],darkviolet:[148,0,211],deeppink:[255,20,147],deepskyblue:[0,191,255],dimgray:[105,105,105],dimgrey:[105,105,105],dodgerblue:[30,144,255],firebrick:[178,34,34],floralwhite:[255,250,240],forestgreen:[34,139,34],fuchsia:[255,0,255],gainsboro:[220,220,220],ghostwhite:[248,248,255],gold:[255,215,0],goldenrod:[218,165,32],gray:[128,128,128],green:[0,128,0],greenyellow:[173,255,47],grey:[128,128,128],honeydew:[240,255,240],hotpink:[255,105,180],indianred:[205,92,92],indigo:[75,0,130],ivory:[255,255,240],khaki:[240,230,140],lavender:[230,230,250],lavenderblush:[255,240,245],lawngreen:[124,252,0],lemonchiffon:[255,250,205],lightblue:[173,216,230],lightcoral:[240,128,128],lightcyan:[224,255,255],lightgoldenrodyellow:[250,250,210],lightgray:[211,211,211],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightsalmon:[255,160,122],lightseagreen:[32,178,170],lightskyblue:[135,206,250],lightslategray:[119,136,153],lightslategrey:[119,136,153],lightsteelblue:[176,196,222],lightyellow:[255,255,224],lime:[0,255,0],limegreen:[50,205,50],linen:[250,240,230],magenta:[255,0,255],maroon:[128,0,0],mediumaquamarine:[102,205,170],mediumblue:[0,0,205],mediumorchid:[186,85,211],mediumpurple:[147,112,219],mediumseagreen:[60,179,113],mediumslateblue:[123,104,238],mediumspringgreen:[0,250,154],mediumturquoise:[72,209,204],mediumvioletred:[199,21,133],midnightblue:[25,25,112],mintcream:[245,255,250],mistyrose:[255,228,225],moccasin:[255,228,181],navajowhite:[255,222,173],navy:[0,0,128],oldlace:[253,245,230],olive:[128,128,0],olivedrab:[107,142,35],orange:[255,165,0],orangered:[255,69,0],orchid:[218,112,214],palegoldenrod:[238,232,170],palegreen:[152,251,152],paleturquoise:[175,238,238],palevioletred:[219,112,147],papayawhip:[255,239,213],peachpuff:[255,218,185],peru:[205,133,63],pink:[255,192,203],plum:[221,160,221],powderblue:[176,224,230],purple:[128,0,128],rebeccapurple:[102,51,153],red:[255,0,0],rosybrown:[188,143,143],royalblue:[65,105,225],saddlebrown:[139,69,19],salmon:[250,128,114],sandybrown:[244,164,96],seagreen:[46,139,87],seashell:[255,245,238],sienna:[160,82,45],silver:[192,192,192],skyblue:[135,206,235],slateblue:[106,90,205],slategray:[112,128,144],slategrey:[112,128,144],snow:[255,250,250],springgreen:[0,255,127],steelblue:[70,130,180],tan:[210,180,140],teal:[0,128,128],thistle:[216,191,216],tomato:[255,99,71],turquoise:[64,224,208],violet:[238,130,238],wheat:[245,222,179],white:[255,255,255],whitesmoke:[245,245,245],yellow:[255,255,0],yellowgreen:[154,205,50]}});var uS=w(($tt,SY)=>{var Jp=vY(),kY={};for(let t of Object.keys(Jp))kY[Jp[t]]=t;var at={rgb:{channels:3,labels:\"rgb\"},hsl:{channels:3,labels:\"hsl\"},hsv:{channels:3,labels:\"hsv\"},hwb:{channels:3,labels:\"hwb\"},cmyk:{channels:4,labels:\"cmyk\"},xyz:{channels:3,labels:\"xyz\"},lab:{channels:3,labels:\"lab\"},lch:{channels:3,labels:\"lch\"},hex:{channels:1,labels:[\"hex\"]},keyword:{channels:1,labels:[\"keyword\"]},ansi16:{channels:1,labels:[\"ansi16\"]},ansi256:{channels:1,labels:[\"ansi256\"]},hcg:{channels:3,labels:[\"h\",\"c\",\"g\"]},apple:{channels:3,labels:[\"r16\",\"g16\",\"b16\"]},gray:{channels:1,labels:[\"gray\"]}};SY.exports=at;for(let t of Object.keys(at)){if(!(\"channels\"in at[t]))throw new Error(\"missing channels property: \"+t);if(!(\"labels\"in at[t]))throw new Error(\"missing channel labels property: \"+t);if(at[t].labels.length!==at[t].channels)throw new Error(\"channel and label counts mismatch: \"+t);let{channels:e,labels:r}=at[t];delete at[t].channels,delete at[t].labels,Object.defineProperty(at[t],\"channels\",{value:e}),Object.defineProperty(at[t],\"labels\",{value:r})}at.rgb.hsl=function(t){let e=t[0]/255,r=t[1]/255,i=t[2]/255,n=Math.min(e,r,i),s=Math.max(e,r,i),o=s-n,a,l;s===n?a=0:e===s?a=(r-i)/o:r===s?a=2+(i-e)/o:i===s&&(a=4+(e-r)/o),a=Math.min(a*60,360),a<0&&(a+=360);let c=(n+s)/2;return s===n?l=0:c<=.5?l=o/(s+n):l=o/(2-s-n),[a,l*100,c*100]};at.rgb.hsv=function(t){let e,r,i,n,s,o=t[0]/255,a=t[1]/255,l=t[2]/255,c=Math.max(o,a,l),u=c-Math.min(o,a,l),g=function(f){return(c-f)/6/u+1/2};return u===0?(n=0,s=0):(s=u/c,e=g(o),r=g(a),i=g(l),o===c?n=i-r:a===c?n=1/3+e-i:l===c&&(n=2/3+r-e),n<0?n+=1:n>1&&(n-=1)),[n*360,s*100,c*100]};at.rgb.hwb=function(t){let e=t[0],r=t[1],i=t[2],n=at.rgb.hsl(t)[0],s=1/255*Math.min(e,Math.min(r,i));return i=1-1/255*Math.max(e,Math.max(r,i)),[n,s*100,i*100]};at.rgb.cmyk=function(t){let e=t[0]/255,r=t[1]/255,i=t[2]/255,n=Math.min(1-e,1-r,1-i),s=(1-e-n)/(1-n)||0,o=(1-r-n)/(1-n)||0,a=(1-i-n)/(1-n)||0;return[s*100,o*100,a*100,n*100]};function Jwe(t,e){return(t[0]-e[0])**2+(t[1]-e[1])**2+(t[2]-e[2])**2}at.rgb.keyword=function(t){let e=kY[t];if(e)return e;let r=Infinity,i;for(let n of Object.keys(Jp)){let s=Jp[n],o=Jwe(t,s);o<r&&(r=o,i=n)}return i};at.keyword.rgb=function(t){return Jp[t]};at.rgb.xyz=function(t){let e=t[0]/255,r=t[1]/255,i=t[2]/255;e=e>.04045?((e+.055)/1.055)**2.4:e/12.92,r=r>.04045?((r+.055)/1.055)**2.4:r/12.92,i=i>.04045?((i+.055)/1.055)**2.4:i/12.92;let n=e*.4124+r*.3576+i*.1805,s=e*.2126+r*.7152+i*.0722,o=e*.0193+r*.1192+i*.9505;return[n*100,s*100,o*100]};at.rgb.lab=function(t){let e=at.rgb.xyz(t),r=e[0],i=e[1],n=e[2];r/=95.047,i/=100,n/=108.883,r=r>.008856?r**(1/3):7.787*r+16/116,i=i>.008856?i**(1/3):7.787*i+16/116,n=n>.008856?n**(1/3):7.787*n+16/116;let s=116*i-16,o=500*(r-i),a=200*(i-n);return[s,o,a]};at.hsl.rgb=function(t){let e=t[0]/360,r=t[1]/100,i=t[2]/100,n,s,o;if(r===0)return o=i*255,[o,o,o];i<.5?n=i*(1+r):n=i+r-i*r;let a=2*i-n,l=[0,0,0];for(let c=0;c<3;c++)s=e+1/3*-(c-1),s<0&&s++,s>1&&s--,6*s<1?o=a+(n-a)*6*s:2*s<1?o=n:3*s<2?o=a+(n-a)*(2/3-s)*6:o=a,l[c]=o*255;return l};at.hsl.hsv=function(t){let e=t[0],r=t[1]/100,i=t[2]/100,n=r,s=Math.max(i,.01);i*=2,r*=i<=1?i:2-i,n*=s<=1?s:2-s;let o=(i+r)/2,a=i===0?2*n/(s+n):2*r/(i+r);return[e,a*100,o*100]};at.hsv.rgb=function(t){let e=t[0]/60,r=t[1]/100,i=t[2]/100,n=Math.floor(e)%6,s=e-Math.floor(e),o=255*i*(1-r),a=255*i*(1-r*s),l=255*i*(1-r*(1-s));switch(i*=255,n){case 0:return[i,l,o];case 1:return[a,i,o];case 2:return[o,i,l];case 3:return[o,a,i];case 4:return[l,o,i];case 5:return[i,o,a]}};at.hsv.hsl=function(t){let e=t[0],r=t[1]/100,i=t[2]/100,n=Math.max(i,.01),s,o;o=(2-r)*i;let a=(2-r)*n;return s=r*n,s/=a<=1?a:2-a,s=s||0,o/=2,[e,s*100,o*100]};at.hwb.rgb=function(t){let e=t[0]/360,r=t[1]/100,i=t[2]/100,n=r+i,s;n>1&&(r/=n,i/=n);let o=Math.floor(6*e),a=1-i;s=6*e-o,(o&1)!=0&&(s=1-s);let l=r+s*(a-r),c,u,g;switch(o){default:case 6:case 0:c=a,u=l,g=r;break;case 1:c=l,u=a,g=r;break;case 2:c=r,u=a,g=l;break;case 3:c=r,u=l,g=a;break;case 4:c=l,u=r,g=a;break;case 5:c=a,u=r,g=l;break}return[c*255,u*255,g*255]};at.cmyk.rgb=function(t){let e=t[0]/100,r=t[1]/100,i=t[2]/100,n=t[3]/100,s=1-Math.min(1,e*(1-n)+n),o=1-Math.min(1,r*(1-n)+n),a=1-Math.min(1,i*(1-n)+n);return[s*255,o*255,a*255]};at.xyz.rgb=function(t){let e=t[0]/100,r=t[1]/100,i=t[2]/100,n,s,o;return n=e*3.2406+r*-1.5372+i*-.4986,s=e*-.9689+r*1.8758+i*.0415,o=e*.0557+r*-.204+i*1.057,n=n>.0031308?1.055*n**(1/2.4)-.055:n*12.92,s=s>.0031308?1.055*s**(1/2.4)-.055:s*12.92,o=o>.0031308?1.055*o**(1/2.4)-.055:o*12.92,n=Math.min(Math.max(0,n),1),s=Math.min(Math.max(0,s),1),o=Math.min(Math.max(0,o),1),[n*255,s*255,o*255]};at.xyz.lab=function(t){let e=t[0],r=t[1],i=t[2];e/=95.047,r/=100,i/=108.883,e=e>.008856?e**(1/3):7.787*e+16/116,r=r>.008856?r**(1/3):7.787*r+16/116,i=i>.008856?i**(1/3):7.787*i+16/116;let n=116*r-16,s=500*(e-r),o=200*(r-i);return[n,s,o]};at.lab.xyz=function(t){let e=t[0],r=t[1],i=t[2],n,s,o;s=(e+16)/116,n=r/500+s,o=s-i/200;let a=s**3,l=n**3,c=o**3;return s=a>.008856?a:(s-16/116)/7.787,n=l>.008856?l:(n-16/116)/7.787,o=c>.008856?c:(o-16/116)/7.787,n*=95.047,s*=100,o*=108.883,[n,s,o]};at.lab.lch=function(t){let e=t[0],r=t[1],i=t[2],n;n=Math.atan2(i,r)*360/2/Math.PI,n<0&&(n+=360);let o=Math.sqrt(r*r+i*i);return[e,o,n]};at.lch.lab=function(t){let e=t[0],r=t[1],n=t[2]/360*2*Math.PI,s=r*Math.cos(n),o=r*Math.sin(n);return[e,s,o]};at.rgb.ansi16=function(t,e=null){let[r,i,n]=t,s=e===null?at.rgb.hsv(t)[2]:e;if(s=Math.round(s/50),s===0)return 30;let o=30+(Math.round(n/255)<<2|Math.round(i/255)<<1|Math.round(r/255));return s===2&&(o+=60),o};at.hsv.ansi16=function(t){return at.rgb.ansi16(at.hsv.rgb(t),t[2])};at.rgb.ansi256=function(t){let e=t[0],r=t[1],i=t[2];return e===r&&r===i?e<8?16:e>248?231:Math.round((e-8)/247*24)+232:16+36*Math.round(e/255*5)+6*Math.round(r/255*5)+Math.round(i/255*5)};at.ansi16.rgb=function(t){let e=t%10;if(e===0||e===7)return t>50&&(e+=3.5),e=e/10.5*255,[e,e,e];let r=(~~(t>50)+1)*.5,i=(e&1)*r*255,n=(e>>1&1)*r*255,s=(e>>2&1)*r*255;return[i,n,s]};at.ansi256.rgb=function(t){if(t>=232){let s=(t-232)*10+8;return[s,s,s]}t-=16;let e,r=Math.floor(t/36)/5*255,i=Math.floor((e=t%36)/6)/5*255,n=e%6/5*255;return[r,i,n]};at.rgb.hex=function(t){let r=(((Math.round(t[0])&255)<<16)+((Math.round(t[1])&255)<<8)+(Math.round(t[2])&255)).toString(16).toUpperCase();return\"000000\".substring(r.length)+r};at.hex.rgb=function(t){let e=t.toString(16).match(/[a-f0-9]{6}|[a-f0-9]{3}/i);if(!e)return[0,0,0];let r=e[0];e[0].length===3&&(r=r.split(\"\").map(a=>a+a).join(\"\"));let i=parseInt(r,16),n=i>>16&255,s=i>>8&255,o=i&255;return[n,s,o]};at.rgb.hcg=function(t){let e=t[0]/255,r=t[1]/255,i=t[2]/255,n=Math.max(Math.max(e,r),i),s=Math.min(Math.min(e,r),i),o=n-s,a,l;return o<1?a=s/(1-o):a=0,o<=0?l=0:n===e?l=(r-i)/o%6:n===r?l=2+(i-e)/o:l=4+(e-r)/o,l/=6,l%=1,[l*360,o*100,a*100]};at.hsl.hcg=function(t){let e=t[1]/100,r=t[2]/100,i=r<.5?2*e*r:2*e*(1-r),n=0;return i<1&&(n=(r-.5*i)/(1-i)),[t[0],i*100,n*100]};at.hsv.hcg=function(t){let e=t[1]/100,r=t[2]/100,i=e*r,n=0;return i<1&&(n=(r-i)/(1-i)),[t[0],i*100,n*100]};at.hcg.rgb=function(t){let e=t[0]/360,r=t[1]/100,i=t[2]/100;if(r===0)return[i*255,i*255,i*255];let n=[0,0,0],s=e%1*6,o=s%1,a=1-o,l=0;switch(Math.floor(s)){case 0:n[0]=1,n[1]=o,n[2]=0;break;case 1:n[0]=a,n[1]=1,n[2]=0;break;case 2:n[0]=0,n[1]=1,n[2]=o;break;case 3:n[0]=0,n[1]=a,n[2]=1;break;case 4:n[0]=o,n[1]=0,n[2]=1;break;default:n[0]=1,n[1]=0,n[2]=a}return l=(1-r)*i,[(r*n[0]+l)*255,(r*n[1]+l)*255,(r*n[2]+l)*255]};at.hcg.hsv=function(t){let e=t[1]/100,r=t[2]/100,i=e+r*(1-e),n=0;return i>0&&(n=e/i),[t[0],n*100,i*100]};at.hcg.hsl=function(t){let e=t[1]/100,i=t[2]/100*(1-e)+.5*e,n=0;return i>0&&i<.5?n=e/(2*i):i>=.5&&i<1&&(n=e/(2*(1-i))),[t[0],n*100,i*100]};at.hcg.hwb=function(t){let e=t[1]/100,r=t[2]/100,i=e+r*(1-e);return[t[0],(i-e)*100,(1-i)*100]};at.hwb.hcg=function(t){let e=t[1]/100,r=t[2]/100,i=1-r,n=i-e,s=0;return n<1&&(s=(i-n)/(1-n)),[t[0],n*100,s*100]};at.apple.rgb=function(t){return[t[0]/65535*255,t[1]/65535*255,t[2]/65535*255]};at.rgb.apple=function(t){return[t[0]/255*65535,t[1]/255*65535,t[2]/255*65535]};at.gray.rgb=function(t){return[t[0]/100*255,t[0]/100*255,t[0]/100*255]};at.gray.hsl=function(t){return[0,0,t[0]]};at.gray.hsv=at.gray.hsl;at.gray.hwb=function(t){return[0,100,t[0]]};at.gray.cmyk=function(t){return[0,0,0,t[0]]};at.gray.lab=function(t){return[t[0],0,0]};at.gray.hex=function(t){let e=Math.round(t[0]/100*255)&255,i=((e<<16)+(e<<8)+e).toString(16).toUpperCase();return\"000000\".substring(i.length)+i};at.rgb.gray=function(t){return[(t[0]+t[1]+t[2])/3/255*100]}});var PY=w((ert,xY)=>{var wy=uS();function Wwe(){let t={},e=Object.keys(wy);for(let r=e.length,i=0;i<r;i++)t[e[i]]={distance:-1,parent:null};return t}function zwe(t){let e=Wwe(),r=[t];for(e[t].distance=0;r.length;){let i=r.pop(),n=Object.keys(wy[i]);for(let s=n.length,o=0;o<s;o++){let a=n[o],l=e[a];l.distance===-1&&(l.distance=e[i].distance+1,l.parent=i,r.unshift(a))}}return e}function _we(t,e){return function(r){return e(t(r))}}function Vwe(t,e){let r=[e[t].parent,t],i=wy[e[t].parent][t],n=e[t].parent;for(;e[n].parent;)r.unshift(e[n].parent),i=_we(wy[e[n].parent][n],i),n=e[n].parent;return i.conversion=r,i}xY.exports=function(t){let e=zwe(t),r={},i=Object.keys(e);for(let n=i.length,s=0;s<n;s++){let o=i[s];e[o].parent!==null&&(r[o]=Vwe(o,e))}return r}});var RY=w((trt,DY)=>{var gS=uS(),Xwe=PY(),wg={},Zwe=Object.keys(gS);function $we(t){let e=function(...r){let i=r[0];return i==null?i:(i.length>1&&(r=i),t(r))};return\"conversion\"in t&&(e.conversion=t.conversion),e}function eBe(t){let e=function(...r){let i=r[0];if(i==null)return i;i.length>1&&(r=i);let n=t(r);if(typeof n==\"object\")for(let s=n.length,o=0;o<s;o++)n[o]=Math.round(n[o]);return n};return\"conversion\"in t&&(e.conversion=t.conversion),e}Zwe.forEach(t=>{wg[t]={},Object.defineProperty(wg[t],\"channels\",{value:gS[t].channels}),Object.defineProperty(wg[t],\"labels\",{value:gS[t].labels});let e=Xwe(t);Object.keys(e).forEach(i=>{let n=e[i];wg[t][i]=eBe(n),wg[t][i].raw=$we(n)})});DY.exports=wg});var MY=w((rrt,FY)=>{\"use strict\";var NY=(t,e)=>(...r)=>`\u001b[${t(...r)+e}m`,LY=(t,e)=>(...r)=>{let i=t(...r);return`\u001b[${38+e};5;${i}m`},TY=(t,e)=>(...r)=>{let i=t(...r);return`\u001b[${38+e};2;${i[0]};${i[1]};${i[2]}m`},By=t=>t,OY=(t,e,r)=>[t,e,r],Bg=(t,e,r)=>{Object.defineProperty(t,e,{get:()=>{let i=r();return Object.defineProperty(t,e,{value:i,enumerable:!0,configurable:!0}),i},enumerable:!0,configurable:!0})},fS,bg=(t,e,r,i)=>{fS===void 0&&(fS=RY());let n=i?10:0,s={};for(let[o,a]of Object.entries(fS)){let l=o===\"ansi16\"?\"ansi\":o;o===e?s[l]=t(r,n):typeof a==\"object\"&&(s[l]=t(a[e],n))}return s};function tBe(){let t=new Map,e={modifier:{reset:[0,0],bold:[1,22],dim:[2,22],italic:[3,23],underline:[4,24],inverse:[7,27],hidden:[8,28],strikethrough:[9,29]},color:{black:[30,39],red:[31,39],green:[32,39],yellow:[33,39],blue:[34,39],magenta:[35,39],cyan:[36,39],white:[37,39],blackBright:[90,39],redBright:[91,39],greenBright:[92,39],yellowBright:[93,39],blueBright:[94,39],magentaBright:[95,39],cyanBright:[96,39],whiteBright:[97,39]},bgColor:{bgBlack:[40,49],bgRed:[41,49],bgGreen:[42,49],bgYellow:[43,49],bgBlue:[44,49],bgMagenta:[45,49],bgCyan:[46,49],bgWhite:[47,49],bgBlackBright:[100,49],bgRedBright:[101,49],bgGreenBright:[102,49],bgYellowBright:[103,49],bgBlueBright:[104,49],bgMagentaBright:[105,49],bgCyanBright:[106,49],bgWhiteBright:[107,49]}};e.color.gray=e.color.blackBright,e.bgColor.bgGray=e.bgColor.bgBlackBright,e.color.grey=e.color.blackBright,e.bgColor.bgGrey=e.bgColor.bgBlackBright;for(let[r,i]of Object.entries(e)){for(let[n,s]of Object.entries(i))e[n]={open:`\u001b[${s[0]}m`,close:`\u001b[${s[1]}m`},i[n]=e[n],t.set(s[0],s[1]);Object.defineProperty(e,r,{value:i,enumerable:!1})}return Object.defineProperty(e,\"codes\",{value:t,enumerable:!1}),e.color.close=\"\u001b[39m\",e.bgColor.close=\"\u001b[49m\",Bg(e.color,\"ansi\",()=>bg(NY,\"ansi16\",By,!1)),Bg(e.color,\"ansi256\",()=>bg(LY,\"ansi256\",By,!1)),Bg(e.color,\"ansi16m\",()=>bg(TY,\"rgb\",OY,!1)),Bg(e.bgColor,\"ansi\",()=>bg(NY,\"ansi16\",By,!0)),Bg(e.bgColor,\"ansi256\",()=>bg(LY,\"ansi256\",By,!0)),Bg(e.bgColor,\"ansi16m\",()=>bg(TY,\"rgb\",OY,!0)),e}Object.defineProperty(FY,\"exports\",{enumerable:!0,get:tBe})});var KY=w((irt,UY)=>{\"use strict\";UY.exports=(t,e=process.argv)=>{let r=t.startsWith(\"-\")?\"\":t.length===1?\"-\":\"--\",i=e.indexOf(r+t),n=e.indexOf(\"--\");return i!==-1&&(n===-1||i<n)}});var GY=w((nrt,HY)=>{\"use strict\";var rBe=require(\"os\"),jY=require(\"tty\"),ks=KY(),{env:ui}=process,VA;ks(\"no-color\")||ks(\"no-colors\")||ks(\"color=false\")||ks(\"color=never\")?VA=0:(ks(\"color\")||ks(\"colors\")||ks(\"color=true\")||ks(\"color=always\"))&&(VA=1);\"FORCE_COLOR\"in ui&&(ui.FORCE_COLOR===\"true\"?VA=1:ui.FORCE_COLOR===\"false\"?VA=0:VA=ui.FORCE_COLOR.length===0?1:Math.min(parseInt(ui.FORCE_COLOR,10),3));function hS(t){return t===0?!1:{level:t,hasBasic:!0,has256:t>=2,has16m:t>=3}}function pS(t,e){if(VA===0)return 0;if(ks(\"color=16m\")||ks(\"color=full\")||ks(\"color=truecolor\"))return 3;if(ks(\"color=256\"))return 2;if(t&&!e&&VA===void 0)return 0;let r=VA||0;if(ui.TERM===\"dumb\")return r;if(process.platform===\"win32\"){let i=rBe.release().split(\".\");return Number(i[0])>=10&&Number(i[2])>=10586?Number(i[2])>=14931?3:2:1}if(\"CI\"in ui)return[\"TRAVIS\",\"CIRCLECI\",\"APPVEYOR\",\"GITLAB_CI\"].some(i=>i in ui)||ui.CI_NAME===\"codeship\"?1:r;if(\"TEAMCITY_VERSION\"in ui)return/^(9\\.(0*[1-9]\\d*)\\.|\\d{2,}\\.)/.test(ui.TEAMCITY_VERSION)?1:0;if(\"GITHUB_ACTIONS\"in ui)return 1;if(ui.COLORTERM===\"truecolor\")return 3;if(\"TERM_PROGRAM\"in ui){let i=parseInt((ui.TERM_PROGRAM_VERSION||\"\").split(\".\")[0],10);switch(ui.TERM_PROGRAM){case\"iTerm.app\":return i>=3?3:2;case\"Apple_Terminal\":return 2}}return/-256(color)?$/i.test(ui.TERM)?2:/^screen|^xterm|^vt100|^vt220|^rxvt|color|ansi|cygwin|linux/i.test(ui.TERM)||\"COLORTERM\"in ui?1:r}function iBe(t){let e=pS(t,t&&t.isTTY);return hS(e)}HY.exports={supportsColor:iBe,stdout:hS(pS(!0,jY.isatty(1))),stderr:hS(pS(!0,jY.isatty(2)))}});var qY=w((srt,YY)=>{\"use strict\";var nBe=(t,e,r)=>{let i=t.indexOf(e);if(i===-1)return t;let n=e.length,s=0,o=\"\";do o+=t.substr(s,i-s)+e+r,s=i+n,i=t.indexOf(e,s);while(i!==-1);return o+=t.substr(s),o},sBe=(t,e,r,i)=>{let n=0,s=\"\";do{let o=t[i-1]===\"\\r\";s+=t.substr(n,(o?i-1:i)-n)+e+(o?`\\r\n`:`\n`)+r,n=i+1,i=t.indexOf(`\n`,n)}while(i!==-1);return s+=t.substr(n),s};YY.exports={stringReplaceAll:nBe,stringEncaseCRLFWithFirstIndex:sBe}});var VY=w((ort,JY)=>{\"use strict\";var oBe=/(?:\\\\(u(?:[a-f\\d]{4}|\\{[a-f\\d]{1,6}\\})|x[a-f\\d]{2}|.))|(?:\\{(~)?(\\w+(?:\\([^)]*\\))?(?:\\.\\w+(?:\\([^)]*\\))?)*)(?:[ \\t]|(?=\\r?\\n)))|(\\})|((?:.|[\\r\\n\\f])+?)/gi,WY=/(?:^|\\.)(\\w+)(?:\\(([^)]*)\\))?/g,aBe=/^(['\"])((?:\\\\.|(?!\\1)[^\\\\])*)\\1$/,ABe=/\\\\(u(?:[a-f\\d]{4}|\\{[a-f\\d]{1,6}\\})|x[a-f\\d]{2}|.)|([^\\\\])/gi,lBe=new Map([[\"n\",`\n`],[\"r\",\"\\r\"],[\"t\",\"\t\"],[\"b\",\"\\b\"],[\"f\",\"\\f\"],[\"v\",\"\\v\"],[\"0\",\"\\0\"],[\"\\\\\",\"\\\\\"],[\"e\",\"\u001b\"],[\"a\",\"\\x07\"]]);function zY(t){let e=t[0]===\"u\",r=t[1]===\"{\";return e&&!r&&t.length===5||t[0]===\"x\"&&t.length===3?String.fromCharCode(parseInt(t.slice(1),16)):e&&r?String.fromCodePoint(parseInt(t.slice(2,-1),16)):lBe.get(t)||t}function cBe(t,e){let r=[],i=e.trim().split(/\\s*,\\s*/g),n;for(let s of i){let o=Number(s);if(!Number.isNaN(o))r.push(o);else if(n=s.match(aBe))r.push(n[2].replace(ABe,(a,l,c)=>l?zY(l):c));else throw new Error(`Invalid Chalk template style argument: ${s} (in style '${t}')`)}return r}function uBe(t){WY.lastIndex=0;let e=[],r;for(;(r=WY.exec(t))!==null;){let i=r[1];if(r[2]){let n=cBe(i,r[2]);e.push([i].concat(n))}else e.push([i])}return e}function _Y(t,e){let r={};for(let n of e)for(let s of n.styles)r[s[0]]=n.inverse?null:s.slice(1);let i=t;for(let[n,s]of Object.entries(r))if(!!Array.isArray(s)){if(!(n in i))throw new Error(`Unknown Chalk style: ${n}`);i=s.length>0?i[n](...s):i[n]}return i}JY.exports=(t,e)=>{let r=[],i=[],n=[];if(e.replace(oBe,(s,o,a,l,c,u)=>{if(o)n.push(zY(o));else if(l){let g=n.join(\"\");n=[],i.push(r.length===0?g:_Y(t,r)(g)),r.push({inverse:a,styles:uBe(l)})}else if(c){if(r.length===0)throw new Error(\"Found extraneous } in Chalk template literal\");i.push(_Y(t,r)(n.join(\"\"))),n=[],r.pop()}else n.push(u)}),i.push(n.join(\"\")),r.length>0){let s=`Chalk template literal is missing ${r.length} closing bracket${r.length===1?\"\":\"s\"} (\\`}\\`)`;throw new Error(s)}return i.join(\"\")}});var IS=w((art,XY)=>{\"use strict\";var Wp=MY(),{stdout:dS,stderr:CS}=GY(),{stringReplaceAll:gBe,stringEncaseCRLFWithFirstIndex:fBe}=qY(),ZY=[\"ansi\",\"ansi\",\"ansi256\",\"ansi16m\"],Qg=Object.create(null),hBe=(t,e={})=>{if(e.level>3||e.level<0)throw new Error(\"The `level` option should be an integer from 0 to 3\");let r=dS?dS.level:0;t.level=e.level===void 0?r:e.level},$Y=class{constructor(e){return eq(e)}},eq=t=>{let e={};return hBe(e,t),e.template=(...r)=>pBe(e.template,...r),Object.setPrototypeOf(e,by.prototype),Object.setPrototypeOf(e.template,e),e.template.constructor=()=>{throw new Error(\"`chalk.constructor()` is deprecated. Use `new chalk.Instance()` instead.\")},e.template.Instance=$Y,e.template};function by(t){return eq(t)}for(let[t,e]of Object.entries(Wp))Qg[t]={get(){let r=Qy(this,mS(e.open,e.close,this._styler),this._isEmpty);return Object.defineProperty(this,t,{value:r}),r}};Qg.visible={get(){let t=Qy(this,this._styler,!0);return Object.defineProperty(this,\"visible\",{value:t}),t}};var tq=[\"rgb\",\"hex\",\"keyword\",\"hsl\",\"hsv\",\"hwb\",\"ansi\",\"ansi256\"];for(let t of tq)Qg[t]={get(){let{level:e}=this;return function(...r){let i=mS(Wp.color[ZY[e]][t](...r),Wp.color.close,this._styler);return Qy(this,i,this._isEmpty)}}};for(let t of tq){let e=\"bg\"+t[0].toUpperCase()+t.slice(1);Qg[e]={get(){let{level:r}=this;return function(...i){let n=mS(Wp.bgColor[ZY[r]][t](...i),Wp.bgColor.close,this._styler);return Qy(this,n,this._isEmpty)}}}}var dBe=Object.defineProperties(()=>{},te(N({},Qg),{level:{enumerable:!0,get(){return this._generator.level},set(t){this._generator.level=t}}})),mS=(t,e,r)=>{let i,n;return r===void 0?(i=t,n=e):(i=r.openAll+t,n=e+r.closeAll),{open:t,close:e,openAll:i,closeAll:n,parent:r}},Qy=(t,e,r)=>{let i=(...n)=>CBe(i,n.length===1?\"\"+n[0]:n.join(\" \"));return i.__proto__=dBe,i._generator=t,i._styler=e,i._isEmpty=r,i},CBe=(t,e)=>{if(t.level<=0||!e)return t._isEmpty?\"\":e;let r=t._styler;if(r===void 0)return e;let{openAll:i,closeAll:n}=r;if(e.indexOf(\"\u001b\")!==-1)for(;r!==void 0;)e=gBe(e,r.close,r.open),r=r.parent;let s=e.indexOf(`\n`);return s!==-1&&(e=fBe(e,n,i,s)),i+e+n},ES,pBe=(t,...e)=>{let[r]=e;if(!Array.isArray(r))return e.join(\" \");let i=e.slice(1),n=[r.raw[0]];for(let s=1;s<r.length;s++)n.push(String(i[s-1]).replace(/[{}\\\\]/g,\"\\\\$&\"),String(r.raw[s]));return ES===void 0&&(ES=VY()),ES(t,n.join(\"\"))};Object.defineProperties(by.prototype,Qg);var zp=by();zp.supportsColor=dS;zp.stderr=by({level:CS?CS.level:0});zp.stderr.supportsColor=CS;zp.Level={None:0,Basic:1,Ansi256:2,TrueColor:3,0:\"None\",1:\"Basic\",2:\"Ansi256\",3:\"TrueColor\"};XY.exports=zp});var vy=w(xs=>{\"use strict\";xs.isInteger=t=>typeof t==\"number\"?Number.isInteger(t):typeof t==\"string\"&&t.trim()!==\"\"?Number.isInteger(Number(t)):!1;xs.find=(t,e)=>t.nodes.find(r=>r.type===e);xs.exceedsLimit=(t,e,r=1,i)=>i===!1||!xs.isInteger(t)||!xs.isInteger(e)?!1:(Number(e)-Number(t))/Number(r)>=i;xs.escapeNode=(t,e=0,r)=>{let i=t.nodes[e];!i||(r&&i.type===r||i.type===\"open\"||i.type===\"close\")&&i.escaped!==!0&&(i.value=\"\\\\\"+i.value,i.escaped=!0)};xs.encloseBrace=t=>t.type!==\"brace\"?!1:t.commas>>0+t.ranges>>0==0?(t.invalid=!0,!0):!1;xs.isInvalidBrace=t=>t.type!==\"brace\"?!1:t.invalid===!0||t.dollar?!0:t.commas>>0+t.ranges>>0==0||t.open!==!0||t.close!==!0?(t.invalid=!0,!0):!1;xs.isOpenOrClose=t=>t.type===\"open\"||t.type===\"close\"?!0:t.open===!0||t.close===!0;xs.reduce=t=>t.reduce((e,r)=>(r.type===\"text\"&&e.push(r.value),r.type===\"range\"&&(r.type=\"text\"),e),[]);xs.flatten=(...t)=>{let e=[],r=i=>{for(let n=0;n<i.length;n++){let s=i[n];Array.isArray(s)?r(s,e):s!==void 0&&e.push(s)}return e};return r(t),e}});var Sy=w((lrt,rq)=>{\"use strict\";var iq=vy();rq.exports=(t,e={})=>{let r=(i,n={})=>{let s=e.escapeInvalid&&iq.isInvalidBrace(n),o=i.invalid===!0&&e.escapeInvalid===!0,a=\"\";if(i.value)return(s||o)&&iq.isOpenOrClose(i)?\"\\\\\"+i.value:i.value;if(i.value)return i.value;if(i.nodes)for(let l of i.nodes)a+=r(l);return a};return r(t)}});var sq=w((crt,nq)=>{\"use strict\";nq.exports=function(t){return typeof t==\"number\"?t-t==0:typeof t==\"string\"&&t.trim()!==\"\"?Number.isFinite?Number.isFinite(+t):isFinite(+t):!1}});var hq=w((urt,oq)=>{\"use strict\";var aq=sq(),vc=(t,e,r)=>{if(aq(t)===!1)throw new TypeError(\"toRegexRange: expected the first argument to be a number\");if(e===void 0||t===e)return String(t);if(aq(e)===!1)throw new TypeError(\"toRegexRange: expected the second argument to be a number.\");let i=N({relaxZeros:!0},r);typeof i.strictZeros==\"boolean\"&&(i.relaxZeros=i.strictZeros===!1);let n=String(i.relaxZeros),s=String(i.shorthand),o=String(i.capture),a=String(i.wrap),l=t+\":\"+e+\"=\"+n+s+o+a;if(vc.cache.hasOwnProperty(l))return vc.cache[l].result;let c=Math.min(t,e),u=Math.max(t,e);if(Math.abs(c-u)===1){let m=t+\"|\"+e;return i.capture?`(${m})`:i.wrap===!1?m:`(?:${m})`}let g=lq(t)||lq(e),f={min:t,max:e,a:c,b:u},h=[],p=[];if(g&&(f.isPadded=g,f.maxLen=String(f.max).length),c<0){let m=u<0?Math.abs(u):1;p=Aq(m,Math.abs(c),f,i),c=f.a=0}return u>=0&&(h=Aq(c,u,f,i)),f.negatives=p,f.positives=h,f.result=mBe(p,h,i),i.capture===!0?f.result=`(${f.result})`:i.wrap!==!1&&h.length+p.length>1&&(f.result=`(?:${f.result})`),vc.cache[l]=f,f.result};function mBe(t,e,r){let i=yS(t,e,\"-\",!1,r)||[],n=yS(e,t,\"\",!1,r)||[],s=yS(t,e,\"-?\",!0,r)||[];return i.concat(s).concat(n).join(\"|\")}function IBe(t,e){let r=1,i=1,n=cq(t,r),s=new Set([e]);for(;t<=n&&n<=e;)s.add(n),r+=1,n=cq(t,r);for(n=uq(e+1,i)-1;t<n&&n<=e;)s.add(n),i+=1,n=uq(e+1,i)-1;return s=[...s],s.sort(EBe),s}function BBe(t,e,r){if(t===e)return{pattern:t,count:[],digits:0};let i=yBe(t,e),n=i.length,s=\"\",o=0;for(let a=0;a<n;a++){let[l,c]=i[a];l===c?s+=l:l!==\"0\"||c!==\"9\"?s+=wBe(l,c,r):o++}return o&&(s+=r.shorthand===!0?\"\\\\d\":\"[0-9]\"),{pattern:s,count:[o],digits:n}}function Aq(t,e,r,i){let n=IBe(t,e),s=[],o=t,a;for(let l=0;l<n.length;l++){let c=n[l],u=BBe(String(o),String(c),i),g=\"\";if(!r.isPadded&&a&&a.pattern===u.pattern){a.count.length>1&&a.count.pop(),a.count.push(u.count[0]),a.string=a.pattern+gq(a.count),o=c+1;continue}r.isPadded&&(g=bBe(c,r,i)),u.string=g+u.pattern+gq(u.count),s.push(u),o=c+1,a=u}return s}function yS(t,e,r,i,n){let s=[];for(let o of t){let{string:a}=o;!i&&!fq(e,\"string\",a)&&s.push(r+a),i&&fq(e,\"string\",a)&&s.push(r+a)}return s}function yBe(t,e){let r=[];for(let i=0;i<t.length;i++)r.push([t[i],e[i]]);return r}function EBe(t,e){return t>e?1:e>t?-1:0}function fq(t,e,r){return t.some(i=>i[e]===r)}function cq(t,e){return Number(String(t).slice(0,-e)+\"9\".repeat(e))}function uq(t,e){return t-t%Math.pow(10,e)}function gq(t){let[e=0,r=\"\"]=t;return r||e>1?`{${e+(r?\",\"+r:\"\")}}`:\"\"}function wBe(t,e,r){return`[${t}${e-t==1?\"\":\"-\"}${e}]`}function lq(t){return/^-?(0+)\\d/.test(t)}function bBe(t,e,r){if(!e.isPadded)return t;let i=Math.abs(e.maxLen-String(t).length),n=r.relaxZeros!==!1;switch(i){case 0:return\"\";case 1:return n?\"0?\":\"0\";case 2:return n?\"0{0,2}\":\"00\";default:return n?`0{0,${i}}`:`0{${i}}`}}vc.cache={};vc.clearCache=()=>vc.cache={};oq.exports=vc});var bS=w((grt,pq)=>{\"use strict\";var QBe=require(\"util\"),dq=hq(),Cq=t=>t!==null&&typeof t==\"object\"&&!Array.isArray(t),vBe=t=>e=>t===!0?Number(e):String(e),wS=t=>typeof t==\"number\"||typeof t==\"string\"&&t!==\"\",_p=t=>Number.isInteger(+t),BS=t=>{let e=`${t}`,r=-1;if(e[0]===\"-\"&&(e=e.slice(1)),e===\"0\")return!1;for(;e[++r]===\"0\";);return r>0},SBe=(t,e,r)=>typeof t==\"string\"||typeof e==\"string\"?!0:r.stringify===!0,kBe=(t,e,r)=>{if(e>0){let i=t[0]===\"-\"?\"-\":\"\";i&&(t=t.slice(1)),t=i+t.padStart(i?e-1:e,\"0\")}return r===!1?String(t):t},mq=(t,e)=>{let r=t[0]===\"-\"?\"-\":\"\";for(r&&(t=t.slice(1),e--);t.length<e;)t=\"0\"+t;return r?\"-\"+t:t},xBe=(t,e)=>{t.negatives.sort((o,a)=>o<a?-1:o>a?1:0),t.positives.sort((o,a)=>o<a?-1:o>a?1:0);let r=e.capture?\"\":\"?:\",i=\"\",n=\"\",s;return t.positives.length&&(i=t.positives.join(\"|\")),t.negatives.length&&(n=`-(${r}${t.negatives.join(\"|\")})`),i&&n?s=`${i}|${n}`:s=i||n,e.wrap?`(${r}${s})`:s},Eq=(t,e,r,i)=>{if(r)return dq(t,e,N({wrap:!1},i));let n=String.fromCharCode(t);if(t===e)return n;let s=String.fromCharCode(e);return`[${n}-${s}]`},Iq=(t,e,r)=>{if(Array.isArray(t)){let i=r.wrap===!0,n=r.capture?\"\":\"?:\";return i?`(${n}${t.join(\"|\")})`:t.join(\"|\")}return dq(t,e,r)},yq=(...t)=>new RangeError(\"Invalid range arguments: \"+QBe.inspect(...t)),wq=(t,e,r)=>{if(r.strictRanges===!0)throw yq([t,e]);return[]},PBe=(t,e)=>{if(e.strictRanges===!0)throw new TypeError(`Expected step \"${t}\" to be a number`);return[]},DBe=(t,e,r=1,i={})=>{let n=Number(t),s=Number(e);if(!Number.isInteger(n)||!Number.isInteger(s)){if(i.strictRanges===!0)throw yq([t,e]);return[]}n===0&&(n=0),s===0&&(s=0);let o=n>s,a=String(t),l=String(e),c=String(r);r=Math.max(Math.abs(r),1);let u=BS(a)||BS(l)||BS(c),g=u?Math.max(a.length,l.length,c.length):0,f=u===!1&&SBe(t,e,i)===!1,h=i.transform||vBe(f);if(i.toRegex&&r===1)return Eq(mq(t,g),mq(e,g),!0,i);let p={negatives:[],positives:[]},m=S=>p[S<0?\"negatives\":\"positives\"].push(Math.abs(S)),y=[],Q=0;for(;o?n>=s:n<=s;)i.toRegex===!0&&r>1?m(n):y.push(kBe(h(n,Q),g,f)),n=o?n-r:n+r,Q++;return i.toRegex===!0?r>1?xBe(p,i):Iq(y,null,N({wrap:!1},i)):y},RBe=(t,e,r=1,i={})=>{if(!_p(t)&&t.length>1||!_p(e)&&e.length>1)return wq(t,e,i);let n=i.transform||(f=>String.fromCharCode(f)),s=`${t}`.charCodeAt(0),o=`${e}`.charCodeAt(0),a=s>o,l=Math.min(s,o),c=Math.max(s,o);if(i.toRegex&&r===1)return Eq(l,c,!1,i);let u=[],g=0;for(;a?s>=o:s<=o;)u.push(n(s,g)),s=a?s-r:s+r,g++;return i.toRegex===!0?Iq(u,null,{wrap:!1,options:i}):u},ky=(t,e,r,i={})=>{if(e==null&&wS(t))return[t];if(!wS(t)||!wS(e))return wq(t,e,i);if(typeof r==\"function\")return ky(t,e,1,{transform:r});if(Cq(r))return ky(t,e,0,r);let n=N({},i);return n.capture===!0&&(n.wrap=!0),r=r||n.step||1,_p(r)?_p(t)&&_p(e)?DBe(t,e,r,n):RBe(t,e,Math.max(Math.abs(r),1),n):r!=null&&!Cq(r)?PBe(r,n):ky(t,e,1,r)};pq.exports=ky});var Qq=w((frt,Bq)=>{\"use strict\";var FBe=bS(),bq=vy(),NBe=(t,e={})=>{let r=(i,n={})=>{let s=bq.isInvalidBrace(n),o=i.invalid===!0&&e.escapeInvalid===!0,a=s===!0||o===!0,l=e.escapeInvalid===!0?\"\\\\\":\"\",c=\"\";if(i.isOpen===!0||i.isClose===!0)return l+i.value;if(i.type===\"open\")return a?l+i.value:\"(\";if(i.type===\"close\")return a?l+i.value:\")\";if(i.type===\"comma\")return i.prev.type===\"comma\"?\"\":a?i.value:\"|\";if(i.value)return i.value;if(i.nodes&&i.ranges>0){let u=bq.reduce(i.nodes),g=FBe(...u,te(N({},e),{wrap:!1,toRegex:!0}));if(g.length!==0)return u.length>1&&g.length>1?`(${g})`:g}if(i.nodes)for(let u of i.nodes)c+=r(u,i);return c};return r(t)};Bq.exports=NBe});var kq=w((hrt,vq)=>{\"use strict\";var LBe=bS(),Sq=Sy(),vg=vy(),Sc=(t=\"\",e=\"\",r=!1)=>{let i=[];if(t=[].concat(t),e=[].concat(e),!e.length)return t;if(!t.length)return r?vg.flatten(e).map(n=>`{${n}}`):e;for(let n of t)if(Array.isArray(n))for(let s of n)i.push(Sc(s,e,r));else for(let s of e)r===!0&&typeof s==\"string\"&&(s=`{${s}}`),i.push(Array.isArray(s)?Sc(n,s,r):n+s);return vg.flatten(i)},TBe=(t,e={})=>{let r=e.rangeLimit===void 0?1e3:e.rangeLimit,i=(n,s={})=>{n.queue=[];let o=s,a=s.queue;for(;o.type!==\"brace\"&&o.type!==\"root\"&&o.parent;)o=o.parent,a=o.queue;if(n.invalid||n.dollar){a.push(Sc(a.pop(),Sq(n,e)));return}if(n.type===\"brace\"&&n.invalid!==!0&&n.nodes.length===2){a.push(Sc(a.pop(),[\"{}\"]));return}if(n.nodes&&n.ranges>0){let g=vg.reduce(n.nodes);if(vg.exceedsLimit(...g,e.step,r))throw new RangeError(\"expanded array length exceeds range limit. Use options.rangeLimit to increase or disable the limit.\");let f=LBe(...g,e);f.length===0&&(f=Sq(n,e)),a.push(Sc(a.pop(),f)),n.nodes=[];return}let l=vg.encloseBrace(n),c=n.queue,u=n;for(;u.type!==\"brace\"&&u.type!==\"root\"&&u.parent;)u=u.parent,c=u.queue;for(let g=0;g<n.nodes.length;g++){let f=n.nodes[g];if(f.type===\"comma\"&&n.type===\"brace\"){g===1&&c.push(\"\"),c.push(\"\");continue}if(f.type===\"close\"){a.push(Sc(a.pop(),c,l));continue}if(f.value&&f.type!==\"open\"){c.push(Sc(c.pop(),f.value));continue}f.nodes&&i(f,n)}return c};return vg.flatten(i(t))};vq.exports=TBe});var Pq=w((prt,xq)=>{\"use strict\";xq.exports={MAX_LENGTH:1024*64,CHAR_0:\"0\",CHAR_9:\"9\",CHAR_UPPERCASE_A:\"A\",CHAR_LOWERCASE_A:\"a\",CHAR_UPPERCASE_Z:\"Z\",CHAR_LOWERCASE_Z:\"z\",CHAR_LEFT_PARENTHESES:\"(\",CHAR_RIGHT_PARENTHESES:\")\",CHAR_ASTERISK:\"*\",CHAR_AMPERSAND:\"&\",CHAR_AT:\"@\",CHAR_BACKSLASH:\"\\\\\",CHAR_BACKTICK:\"`\",CHAR_CARRIAGE_RETURN:\"\\r\",CHAR_CIRCUMFLEX_ACCENT:\"^\",CHAR_COLON:\":\",CHAR_COMMA:\",\",CHAR_DOLLAR:\"$\",CHAR_DOT:\".\",CHAR_DOUBLE_QUOTE:'\"',CHAR_EQUAL:\"=\",CHAR_EXCLAMATION_MARK:\"!\",CHAR_FORM_FEED:\"\\f\",CHAR_FORWARD_SLASH:\"/\",CHAR_HASH:\"#\",CHAR_HYPHEN_MINUS:\"-\",CHAR_LEFT_ANGLE_BRACKET:\"<\",CHAR_LEFT_CURLY_BRACE:\"{\",CHAR_LEFT_SQUARE_BRACKET:\"[\",CHAR_LINE_FEED:`\n`,CHAR_NO_BREAK_SPACE:\"\\xA0\",CHAR_PERCENT:\"%\",CHAR_PLUS:\"+\",CHAR_QUESTION_MARK:\"?\",CHAR_RIGHT_ANGLE_BRACKET:\">\",CHAR_RIGHT_CURLY_BRACE:\"}\",CHAR_RIGHT_SQUARE_BRACKET:\"]\",CHAR_SEMICOLON:\";\",CHAR_SINGLE_QUOTE:\"'\",CHAR_SPACE:\" \",CHAR_TAB:\"\t\",CHAR_UNDERSCORE:\"_\",CHAR_VERTICAL_LINE:\"|\",CHAR_ZERO_WIDTH_NOBREAK_SPACE:\"\\uFEFF\"}});var Lq=w((drt,Dq)=>{\"use strict\";var OBe=Sy(),{MAX_LENGTH:Rq,CHAR_BACKSLASH:QS,CHAR_BACKTICK:MBe,CHAR_COMMA:UBe,CHAR_DOT:KBe,CHAR_LEFT_PARENTHESES:HBe,CHAR_RIGHT_PARENTHESES:jBe,CHAR_LEFT_CURLY_BRACE:GBe,CHAR_RIGHT_CURLY_BRACE:YBe,CHAR_LEFT_SQUARE_BRACKET:Fq,CHAR_RIGHT_SQUARE_BRACKET:Nq,CHAR_DOUBLE_QUOTE:qBe,CHAR_SINGLE_QUOTE:JBe,CHAR_NO_BREAK_SPACE:WBe,CHAR_ZERO_WIDTH_NOBREAK_SPACE:zBe}=Pq(),_Be=(t,e={})=>{if(typeof t!=\"string\")throw new TypeError(\"Expected a string\");let r=e||{},i=typeof r.maxLength==\"number\"?Math.min(Rq,r.maxLength):Rq;if(t.length>i)throw new SyntaxError(`Input length (${t.length}), exceeds max characters (${i})`);let n={type:\"root\",input:t,nodes:[]},s=[n],o=n,a=n,l=0,c=t.length,u=0,g=0,f,h={},p=()=>t[u++],m=y=>{if(y.type===\"text\"&&a.type===\"dot\"&&(a.type=\"text\"),a&&a.type===\"text\"&&y.type===\"text\"){a.value+=y.value;return}return o.nodes.push(y),y.parent=o,y.prev=a,a=y,y};for(m({type:\"bos\"});u<c;)if(o=s[s.length-1],f=p(),!(f===zBe||f===WBe)){if(f===QS){m({type:\"text\",value:(e.keepEscaping?f:\"\")+p()});continue}if(f===Nq){m({type:\"text\",value:\"\\\\\"+f});continue}if(f===Fq){l++;let y=!0,Q;for(;u<c&&(Q=p());){if(f+=Q,Q===Fq){l++;continue}if(Q===QS){f+=p();continue}if(Q===Nq&&(l--,l===0))break}m({type:\"text\",value:f});continue}if(f===HBe){o=m({type:\"paren\",nodes:[]}),s.push(o),m({type:\"text\",value:f});continue}if(f===jBe){if(o.type!==\"paren\"){m({type:\"text\",value:f});continue}o=s.pop(),m({type:\"text\",value:f}),o=s[s.length-1];continue}if(f===qBe||f===JBe||f===MBe){let y=f,Q;for(e.keepQuotes!==!0&&(f=\"\");u<c&&(Q=p());){if(Q===QS){f+=Q+p();continue}if(Q===y){e.keepQuotes===!0&&(f+=Q);break}f+=Q}m({type:\"text\",value:f});continue}if(f===GBe){g++;let y=a.value&&a.value.slice(-1)===\"$\"||o.dollar===!0;o=m({type:\"brace\",open:!0,close:!1,dollar:y,depth:g,commas:0,ranges:0,nodes:[]}),s.push(o),m({type:\"open\",value:f});continue}if(f===YBe){if(o.type!==\"brace\"){m({type:\"text\",value:f});continue}let y=\"close\";o=s.pop(),o.close=!0,m({type:y,value:f}),g--,o=s[s.length-1];continue}if(f===UBe&&g>0){if(o.ranges>0){o.ranges=0;let y=o.nodes.shift();o.nodes=[y,{type:\"text\",value:OBe(o)}]}m({type:\"comma\",value:f}),o.commas++;continue}if(f===KBe&&g>0&&o.commas===0){let y=o.nodes;if(g===0||y.length===0){m({type:\"text\",value:f});continue}if(a.type===\"dot\"){if(o.range=[],a.value+=f,a.type=\"range\",o.nodes.length!==3&&o.nodes.length!==5){o.invalid=!0,o.ranges=0,a.type=\"text\";continue}o.ranges++,o.args=[];continue}if(a.type===\"range\"){y.pop();let Q=y[y.length-1];Q.value+=a.value+f,a=Q,o.ranges--;continue}m({type:\"dot\",value:f});continue}m({type:\"text\",value:f})}do if(o=s.pop(),o.type!==\"root\"){o.nodes.forEach(S=>{S.nodes||(S.type===\"open\"&&(S.isOpen=!0),S.type===\"close\"&&(S.isClose=!0),S.nodes||(S.type=\"text\"),S.invalid=!0)});let y=s[s.length-1],Q=y.nodes.indexOf(o);y.nodes.splice(Q,1,...o.nodes)}while(s.length>0);return m({type:\"eos\"}),n};Dq.exports=_Be});var Mq=w((Crt,Tq)=>{\"use strict\";var Oq=Sy(),VBe=Qq(),XBe=kq(),ZBe=Lq(),$n=(t,e={})=>{let r=[];if(Array.isArray(t))for(let i of t){let n=$n.create(i,e);Array.isArray(n)?r.push(...n):r.push(n)}else r=[].concat($n.create(t,e));return e&&e.expand===!0&&e.nodupes===!0&&(r=[...new Set(r)]),r};$n.parse=(t,e={})=>ZBe(t,e);$n.stringify=(t,e={})=>typeof t==\"string\"?Oq($n.parse(t,e),e):Oq(t,e);$n.compile=(t,e={})=>(typeof t==\"string\"&&(t=$n.parse(t,e)),VBe(t,e));$n.expand=(t,e={})=>{typeof t==\"string\"&&(t=$n.parse(t,e));let r=XBe(t,e);return e.noempty===!0&&(r=r.filter(Boolean)),e.nodupes===!0&&(r=[...new Set(r)]),r};$n.create=(t,e={})=>t===\"\"||t.length<3?[t]:e.expand!==!0?$n.compile(t,e):$n.expand(t,e);Tq.exports=$n});var Vp=w((mrt,Uq)=>{\"use strict\";var $Be=require(\"path\"),Go=\"\\\\\\\\/\",Kq=`[^${Go}]`,Ya=\"\\\\.\",e0e=\"\\\\+\",t0e=\"\\\\?\",xy=\"\\\\/\",r0e=\"(?=.)\",Hq=\"[^/]\",vS=`(?:${xy}|$)`,jq=`(?:^|${xy})`,SS=`${Ya}{1,2}${vS}`,i0e=`(?!${Ya})`,n0e=`(?!${jq}${SS})`,s0e=`(?!${Ya}{0,1}${vS})`,o0e=`(?!${SS})`,a0e=`[^.${xy}]`,A0e=`${Hq}*?`,Gq={DOT_LITERAL:Ya,PLUS_LITERAL:e0e,QMARK_LITERAL:t0e,SLASH_LITERAL:xy,ONE_CHAR:r0e,QMARK:Hq,END_ANCHOR:vS,DOTS_SLASH:SS,NO_DOT:i0e,NO_DOTS:n0e,NO_DOT_SLASH:s0e,NO_DOTS_SLASH:o0e,QMARK_NO_DOT:a0e,STAR:A0e,START_ANCHOR:jq},l0e=te(N({},Gq),{SLASH_LITERAL:`[${Go}]`,QMARK:Kq,STAR:`${Kq}*?`,DOTS_SLASH:`${Ya}{1,2}(?:[${Go}]|$)`,NO_DOT:`(?!${Ya})`,NO_DOTS:`(?!(?:^|[${Go}])${Ya}{1,2}(?:[${Go}]|$))`,NO_DOT_SLASH:`(?!${Ya}{0,1}(?:[${Go}]|$))`,NO_DOTS_SLASH:`(?!${Ya}{1,2}(?:[${Go}]|$))`,QMARK_NO_DOT:`[^.${Go}]`,START_ANCHOR:`(?:^|[${Go}])`,END_ANCHOR:`(?:[${Go}]|$)`}),c0e={alnum:\"a-zA-Z0-9\",alpha:\"a-zA-Z\",ascii:\"\\\\x00-\\\\x7F\",blank:\" \\\\t\",cntrl:\"\\\\x00-\\\\x1F\\\\x7F\",digit:\"0-9\",graph:\"\\\\x21-\\\\x7E\",lower:\"a-z\",print:\"\\\\x20-\\\\x7E \",punct:\"\\\\-!\\\"#$%&'()\\\\*+,./:;<=>?@[\\\\]^_`{|}~\",space:\" \\\\t\\\\r\\\\n\\\\v\\\\f\",upper:\"A-Z\",word:\"A-Za-z0-9_\",xdigit:\"A-Fa-f0-9\"};Uq.exports={MAX_LENGTH:1024*64,POSIX_REGEX_SOURCE:c0e,REGEX_BACKSLASH:/\\\\(?![*+?^${}(|)[\\]])/g,REGEX_NON_SPECIAL_CHARS:/^[^@![\\].,$*+?^{}()|\\\\/]+/,REGEX_SPECIAL_CHARS:/[-*+?.^${}(|)[\\]]/,REGEX_SPECIAL_CHARS_BACKREF:/(\\\\?)((\\W)(\\3*))/g,REGEX_SPECIAL_CHARS_GLOBAL:/([-*+?.^${}(|)[\\]])/g,REGEX_REMOVE_BACKSLASH:/(?:\\[.*?[^\\\\]\\]|\\\\(?=.))/g,REPLACEMENTS:{\"***\":\"*\",\"**/**\":\"**\",\"**/**/**\":\"**\"},CHAR_0:48,CHAR_9:57,CHAR_UPPERCASE_A:65,CHAR_LOWERCASE_A:97,CHAR_UPPERCASE_Z:90,CHAR_LOWERCASE_Z:122,CHAR_LEFT_PARENTHESES:40,CHAR_RIGHT_PARENTHESES:41,CHAR_ASTERISK:42,CHAR_AMPERSAND:38,CHAR_AT:64,CHAR_BACKWARD_SLASH:92,CHAR_CARRIAGE_RETURN:13,CHAR_CIRCUMFLEX_ACCENT:94,CHAR_COLON:58,CHAR_COMMA:44,CHAR_DOT:46,CHAR_DOUBLE_QUOTE:34,CHAR_EQUAL:61,CHAR_EXCLAMATION_MARK:33,CHAR_FORM_FEED:12,CHAR_FORWARD_SLASH:47,CHAR_GRAVE_ACCENT:96,CHAR_HASH:35,CHAR_HYPHEN_MINUS:45,CHAR_LEFT_ANGLE_BRACKET:60,CHAR_LEFT_CURLY_BRACE:123,CHAR_LEFT_SQUARE_BRACKET:91,CHAR_LINE_FEED:10,CHAR_NO_BREAK_SPACE:160,CHAR_PERCENT:37,CHAR_PLUS:43,CHAR_QUESTION_MARK:63,CHAR_RIGHT_ANGLE_BRACKET:62,CHAR_RIGHT_CURLY_BRACE:125,CHAR_RIGHT_SQUARE_BRACKET:93,CHAR_SEMICOLON:59,CHAR_SINGLE_QUOTE:39,CHAR_SPACE:32,CHAR_TAB:9,CHAR_UNDERSCORE:95,CHAR_VERTICAL_LINE:124,CHAR_ZERO_WIDTH_NOBREAK_SPACE:65279,SEP:$Be.sep,extglobChars(t){return{\"!\":{type:\"negate\",open:\"(?:(?!(?:\",close:`))${t.STAR})`},\"?\":{type:\"qmark\",open:\"(?:\",close:\")?\"},\"+\":{type:\"plus\",open:\"(?:\",close:\")+\"},\"*\":{type:\"star\",open:\"(?:\",close:\")*\"},\"@\":{type:\"at\",open:\"(?:\",close:\")\"}}},globChars(t){return t===!0?l0e:Gq}}});var Xp=w(kn=>{\"use strict\";var u0e=require(\"path\"),g0e=process.platform===\"win32\",{REGEX_BACKSLASH:f0e,REGEX_REMOVE_BACKSLASH:h0e,REGEX_SPECIAL_CHARS:p0e,REGEX_SPECIAL_CHARS_GLOBAL:d0e}=Vp();kn.isObject=t=>t!==null&&typeof t==\"object\"&&!Array.isArray(t);kn.hasRegexChars=t=>p0e.test(t);kn.isRegexChar=t=>t.length===1&&kn.hasRegexChars(t);kn.escapeRegex=t=>t.replace(d0e,\"\\\\$1\");kn.toPosixSlashes=t=>t.replace(f0e,\"/\");kn.removeBackslashes=t=>t.replace(h0e,e=>e===\"\\\\\"?\"\":e);kn.supportsLookbehinds=()=>{let t=process.version.slice(1).split(\".\").map(Number);return t.length===3&&t[0]>=9||t[0]===8&&t[1]>=10};kn.isWindows=t=>t&&typeof t.windows==\"boolean\"?t.windows:g0e===!0||u0e.sep===\"\\\\\";kn.escapeLast=(t,e,r)=>{let i=t.lastIndexOf(e,r);return i===-1?t:t[i-1]===\"\\\\\"?kn.escapeLast(t,e,i-1):`${t.slice(0,i)}\\\\${t.slice(i)}`};kn.removePrefix=(t,e={})=>{let r=t;return r.startsWith(\"./\")&&(r=r.slice(2),e.prefix=\"./\"),r};kn.wrapOutput=(t,e={},r={})=>{let i=r.contains?\"\":\"^\",n=r.contains?\"\":\"$\",s=`${i}(?:${t})${n}`;return e.negated===!0&&(s=`(?:^(?!${s}).*$)`),s}});var Xq=w((Irt,Yq)=>{\"use strict\";var qq=Xp(),{CHAR_ASTERISK:kS,CHAR_AT:C0e,CHAR_BACKWARD_SLASH:Zp,CHAR_COMMA:m0e,CHAR_DOT:xS,CHAR_EXCLAMATION_MARK:PS,CHAR_FORWARD_SLASH:Jq,CHAR_LEFT_CURLY_BRACE:DS,CHAR_LEFT_PARENTHESES:RS,CHAR_LEFT_SQUARE_BRACKET:E0e,CHAR_PLUS:I0e,CHAR_QUESTION_MARK:Wq,CHAR_RIGHT_CURLY_BRACE:y0e,CHAR_RIGHT_PARENTHESES:zq,CHAR_RIGHT_SQUARE_BRACKET:w0e}=Vp(),_q=t=>t===Jq||t===Zp,Vq=t=>{t.isPrefix!==!0&&(t.depth=t.isGlobstar?Infinity:1)},B0e=(t,e)=>{let r=e||{},i=t.length-1,n=r.parts===!0||r.scanToEnd===!0,s=[],o=[],a=[],l=t,c=-1,u=0,g=0,f=!1,h=!1,p=!1,m=!1,y=!1,Q=!1,S=!1,x=!1,M=!1,Y=!1,U=0,J,W,ee={value:\"\",depth:0,isGlob:!1},Z=()=>c>=i,A=()=>l.charCodeAt(c+1),ne=()=>(J=W,l.charCodeAt(++c));for(;c<i;){W=ne();let Ee;if(W===Zp){S=ee.backslashes=!0,W=ne(),W===DS&&(Q=!0);continue}if(Q===!0||W===DS){for(U++;Z()!==!0&&(W=ne());){if(W===Zp){S=ee.backslashes=!0,ne();continue}if(W===DS){U++;continue}if(Q!==!0&&W===xS&&(W=ne())===xS){if(f=ee.isBrace=!0,p=ee.isGlob=!0,Y=!0,n===!0)continue;break}if(Q!==!0&&W===m0e){if(f=ee.isBrace=!0,p=ee.isGlob=!0,Y=!0,n===!0)continue;break}if(W===y0e&&(U--,U===0)){Q=!1,f=ee.isBrace=!0,Y=!0;break}}if(n===!0)continue;break}if(W===Jq){if(s.push(c),o.push(ee),ee={value:\"\",depth:0,isGlob:!1},Y===!0)continue;if(J===xS&&c===u+1){u+=2;continue}g=c+1;continue}if(r.noext!==!0&&(W===I0e||W===C0e||W===kS||W===Wq||W===PS)===!0&&A()===RS){if(p=ee.isGlob=!0,m=ee.isExtglob=!0,Y=!0,W===PS&&c===u&&(M=!0),n===!0){for(;Z()!==!0&&(W=ne());){if(W===Zp){S=ee.backslashes=!0,W=ne();continue}if(W===zq){p=ee.isGlob=!0,Y=!0;break}}continue}break}if(W===kS){if(J===kS&&(y=ee.isGlobstar=!0),p=ee.isGlob=!0,Y=!0,n===!0)continue;break}if(W===Wq){if(p=ee.isGlob=!0,Y=!0,n===!0)continue;break}if(W===E0e){for(;Z()!==!0&&(Ee=ne());){if(Ee===Zp){S=ee.backslashes=!0,ne();continue}if(Ee===w0e){h=ee.isBracket=!0,p=ee.isGlob=!0,Y=!0;break}}if(n===!0)continue;break}if(r.nonegate!==!0&&W===PS&&c===u){x=ee.negated=!0,u++;continue}if(r.noparen!==!0&&W===RS){if(p=ee.isGlob=!0,n===!0){for(;Z()!==!0&&(W=ne());){if(W===RS){S=ee.backslashes=!0,W=ne();continue}if(W===zq){Y=!0;break}}continue}break}if(p===!0){if(Y=!0,n===!0)continue;break}}r.noext===!0&&(m=!1,p=!1);let le=l,Ae=\"\",T=\"\";u>0&&(Ae=l.slice(0,u),l=l.slice(u),g-=u),le&&p===!0&&g>0?(le=l.slice(0,g),T=l.slice(g)):p===!0?(le=\"\",T=l):le=l,le&&le!==\"\"&&le!==\"/\"&&le!==l&&_q(le.charCodeAt(le.length-1))&&(le=le.slice(0,-1)),r.unescape===!0&&(T&&(T=qq.removeBackslashes(T)),le&&S===!0&&(le=qq.removeBackslashes(le)));let L={prefix:Ae,input:t,start:u,base:le,glob:T,isBrace:f,isBracket:h,isGlob:p,isExtglob:m,isGlobstar:y,negated:x,negatedExtglob:M};if(r.tokens===!0&&(L.maxDepth=0,_q(W)||o.push(ee),L.tokens=o),r.parts===!0||r.tokens===!0){let Ee;for(let we=0;we<s.length;we++){let qe=Ee?Ee+1:u,re=s[we],se=t.slice(qe,re);r.tokens&&(we===0&&u!==0?(o[we].isPrefix=!0,o[we].value=Ae):o[we].value=se,Vq(o[we]),L.maxDepth+=o[we].depth),(we!==0||se!==\"\")&&a.push(se),Ee=re}if(Ee&&Ee+1<t.length){let we=t.slice(Ee+1);a.push(we),r.tokens&&(o[o.length-1].value=we,Vq(o[o.length-1]),L.maxDepth+=o[o.length-1].depth)}L.slashes=s,L.parts=a}return L};Yq.exports=B0e});var tJ=w((yrt,Zq)=>{\"use strict\";var Py=Vp(),es=Xp(),{MAX_LENGTH:Dy,POSIX_REGEX_SOURCE:b0e,REGEX_NON_SPECIAL_CHARS:Q0e,REGEX_SPECIAL_CHARS_BACKREF:v0e,REPLACEMENTS:$q}=Py,S0e=(t,e)=>{if(typeof e.expandRange==\"function\")return e.expandRange(...t,e);t.sort();let r=`[${t.join(\"-\")}]`;try{new RegExp(r)}catch(i){return t.map(n=>es.escapeRegex(n)).join(\"..\")}return r},Sg=(t,e)=>`Missing ${t}: \"${e}\" - use \"\\\\\\\\${e}\" to match literal characters`,eJ=(t,e)=>{if(typeof t!=\"string\")throw new TypeError(\"Expected a string\");t=$q[t]||t;let r=N({},e),i=typeof r.maxLength==\"number\"?Math.min(Dy,r.maxLength):Dy,n=t.length;if(n>i)throw new SyntaxError(`Input length: ${n}, exceeds maximum allowed length: ${i}`);let s={type:\"bos\",value:\"\",output:r.prepend||\"\"},o=[s],a=r.capture?\"\":\"?:\",l=es.isWindows(e),c=Py.globChars(l),u=Py.extglobChars(c),{DOT_LITERAL:g,PLUS_LITERAL:f,SLASH_LITERAL:h,ONE_CHAR:p,DOTS_SLASH:m,NO_DOT:y,NO_DOT_SLASH:Q,NO_DOTS_SLASH:S,QMARK:x,QMARK_NO_DOT:M,STAR:Y,START_ANCHOR:U}=c,J=X=>`(${a}(?:(?!${U}${X.dot?m:g}).)*?)`,W=r.dot?\"\":y,ee=r.dot?x:M,Z=r.bash===!0?J(r):Y;r.capture&&(Z=`(${Z})`),typeof r.noext==\"boolean\"&&(r.noextglob=r.noext);let A={input:t,index:-1,start:0,dot:r.dot===!0,consumed:\"\",output:\"\",prefix:\"\",backtrack:!1,negated:!1,brackets:0,braces:0,parens:0,quotes:0,globstar:!1,tokens:o};t=es.removePrefix(t,A),n=t.length;let ne=[],le=[],Ae=[],T=s,L,Ee=()=>A.index===n-1,we=A.peek=(X=1)=>t[A.index+X],qe=A.advance=()=>t[++A.index]||\"\",re=()=>t.slice(A.index+1),se=(X=\"\",be=0)=>{A.consumed+=X,A.index+=be},Qe=X=>{A.output+=X.output!=null?X.output:X.value,se(X.value)},he=()=>{let X=1;for(;we()===\"!\"&&(we(2)!==\"(\"||we(3)===\"?\");)qe(),A.start++,X++;return X%2==0?!1:(A.negated=!0,A.start++,!0)},Fe=X=>{A[X]++,Ae.push(X)},Ue=X=>{A[X]--,Ae.pop()},xe=X=>{if(T.type===\"globstar\"){let be=A.braces>0&&(X.type===\"comma\"||X.type===\"brace\"),ce=X.extglob===!0||ne.length&&(X.type===\"pipe\"||X.type===\"paren\");X.type!==\"slash\"&&X.type!==\"paren\"&&!be&&!ce&&(A.output=A.output.slice(0,-T.output.length),T.type=\"star\",T.value=\"*\",T.output=Z,A.output+=T.output)}if(ne.length&&X.type!==\"paren\"&&(ne[ne.length-1].inner+=X.value),(X.value||X.output)&&Qe(X),T&&T.type===\"text\"&&X.type===\"text\"){T.value+=X.value,T.output=(T.output||\"\")+X.value;return}X.prev=T,o.push(X),T=X},ve=(X,be)=>{let ce=te(N({},u[be]),{conditions:1,inner:\"\"});ce.prev=T,ce.parens=A.parens,ce.output=A.output;let fe=(r.capture?\"(\":\"\")+ce.open;Fe(\"parens\"),xe({type:X,value:be,output:A.output?\"\":p}),xe({type:\"paren\",extglob:!0,value:qe(),output:fe}),ne.push(ce)},pe=X=>{let be=X.close+(r.capture?\")\":\"\"),ce;if(X.type===\"negate\"){let fe=Z;X.inner&&X.inner.length>1&&X.inner.includes(\"/\")&&(fe=J(r)),(fe!==Z||Ee()||/^\\)+$/.test(re()))&&(be=X.close=`)$))${fe}`),X.inner.includes(\"*\")&&(ce=re())&&/^\\.[^\\\\/.]+$/.test(ce)&&(be=X.close=`)${ce})${fe})`),X.prev.type===\"bos\"&&(A.negatedExtglob=!0)}xe({type:\"paren\",extglob:!0,value:L,output:be}),Ue(\"parens\")};if(r.fastpaths!==!1&&!/(^[*!]|[/()[\\]{}\"])/.test(t)){let X=!1,be=t.replace(v0e,(ce,fe,gt,Ht,Mt,mi)=>Ht===\"\\\\\"?(X=!0,ce):Ht===\"?\"?fe?fe+Ht+(Mt?x.repeat(Mt.length):\"\"):mi===0?ee+(Mt?x.repeat(Mt.length):\"\"):x.repeat(gt.length):Ht===\".\"?g.repeat(gt.length):Ht===\"*\"?fe?fe+Ht+(Mt?Z:\"\"):Z:fe?ce:`\\\\${ce}`);return X===!0&&(r.unescape===!0?be=be.replace(/\\\\/g,\"\"):be=be.replace(/\\\\+/g,ce=>ce.length%2==0?\"\\\\\\\\\":ce?\"\\\\\":\"\")),be===t&&r.contains===!0?(A.output=t,A):(A.output=es.wrapOutput(be,A,e),A)}for(;!Ee();){if(L=qe(),L===\"\\0\")continue;if(L===\"\\\\\"){let ce=we();if(ce===\"/\"&&r.bash!==!0||ce===\".\"||ce===\";\")continue;if(!ce){L+=\"\\\\\",xe({type:\"text\",value:L});continue}let fe=/^\\\\+/.exec(re()),gt=0;if(fe&&fe[0].length>2&&(gt=fe[0].length,A.index+=gt,gt%2!=0&&(L+=\"\\\\\")),r.unescape===!0?L=qe():L+=qe(),A.brackets===0){xe({type:\"text\",value:L});continue}}if(A.brackets>0&&(L!==\"]\"||T.value===\"[\"||T.value===\"[^\")){if(r.posix!==!1&&L===\":\"){let ce=T.value.slice(1);if(ce.includes(\"[\")&&(T.posix=!0,ce.includes(\":\"))){let fe=T.value.lastIndexOf(\"[\"),gt=T.value.slice(0,fe),Ht=T.value.slice(fe+2),Mt=b0e[Ht];if(Mt){T.value=gt+Mt,A.backtrack=!0,qe(),!s.output&&o.indexOf(T)===1&&(s.output=p);continue}}}(L===\"[\"&&we()!==\":\"||L===\"-\"&&we()===\"]\")&&(L=`\\\\${L}`),L===\"]\"&&(T.value===\"[\"||T.value===\"[^\")&&(L=`\\\\${L}`),r.posix===!0&&L===\"!\"&&T.value===\"[\"&&(L=\"^\"),T.value+=L,Qe({value:L});continue}if(A.quotes===1&&L!=='\"'){L=es.escapeRegex(L),T.value+=L,Qe({value:L});continue}if(L==='\"'){A.quotes=A.quotes===1?0:1,r.keepQuotes===!0&&xe({type:\"text\",value:L});continue}if(L===\"(\"){Fe(\"parens\"),xe({type:\"paren\",value:L});continue}if(L===\")\"){if(A.parens===0&&r.strictBrackets===!0)throw new SyntaxError(Sg(\"opening\",\"(\"));let ce=ne[ne.length-1];if(ce&&A.parens===ce.parens+1){pe(ne.pop());continue}xe({type:\"paren\",value:L,output:A.parens?\")\":\"\\\\)\"}),Ue(\"parens\");continue}if(L===\"[\"){if(r.nobracket===!0||!re().includes(\"]\")){if(r.nobracket!==!0&&r.strictBrackets===!0)throw new SyntaxError(Sg(\"closing\",\"]\"));L=`\\\\${L}`}else Fe(\"brackets\");xe({type:\"bracket\",value:L});continue}if(L===\"]\"){if(r.nobracket===!0||T&&T.type===\"bracket\"&&T.value.length===1){xe({type:\"text\",value:L,output:`\\\\${L}`});continue}if(A.brackets===0){if(r.strictBrackets===!0)throw new SyntaxError(Sg(\"opening\",\"[\"));xe({type:\"text\",value:L,output:`\\\\${L}`});continue}Ue(\"brackets\");let ce=T.value.slice(1);if(T.posix!==!0&&ce[0]===\"^\"&&!ce.includes(\"/\")&&(L=`/${L}`),T.value+=L,Qe({value:L}),r.literalBrackets===!1||es.hasRegexChars(ce))continue;let fe=es.escapeRegex(T.value);if(A.output=A.output.slice(0,-T.value.length),r.literalBrackets===!0){A.output+=fe,T.value=fe;continue}T.value=`(${a}${fe}|${T.value})`,A.output+=T.value;continue}if(L===\"{\"&&r.nobrace!==!0){Fe(\"braces\");let ce={type:\"brace\",value:L,output:\"(\",outputIndex:A.output.length,tokensIndex:A.tokens.length};le.push(ce),xe(ce);continue}if(L===\"}\"){let ce=le[le.length-1];if(r.nobrace===!0||!ce){xe({type:\"text\",value:L,output:L});continue}let fe=\")\";if(ce.dots===!0){let gt=o.slice(),Ht=[];for(let Mt=gt.length-1;Mt>=0&&(o.pop(),gt[Mt].type!==\"brace\");Mt--)gt[Mt].type!==\"dots\"&&Ht.unshift(gt[Mt].value);fe=S0e(Ht,r),A.backtrack=!0}if(ce.comma!==!0&&ce.dots!==!0){let gt=A.output.slice(0,ce.outputIndex),Ht=A.tokens.slice(ce.tokensIndex);ce.value=ce.output=\"\\\\{\",L=fe=\"\\\\}\",A.output=gt;for(let Mt of Ht)A.output+=Mt.output||Mt.value}xe({type:\"brace\",value:L,output:fe}),Ue(\"braces\"),le.pop();continue}if(L===\"|\"){ne.length>0&&ne[ne.length-1].conditions++,xe({type:\"text\",value:L});continue}if(L===\",\"){let ce=L,fe=le[le.length-1];fe&&Ae[Ae.length-1]===\"braces\"&&(fe.comma=!0,ce=\"|\"),xe({type:\"comma\",value:L,output:ce});continue}if(L===\"/\"){if(T.type===\"dot\"&&A.index===A.start+1){A.start=A.index+1,A.consumed=\"\",A.output=\"\",o.pop(),T=s;continue}xe({type:\"slash\",value:L,output:h});continue}if(L===\".\"){if(A.braces>0&&T.type===\"dot\"){T.value===\".\"&&(T.output=g);let ce=le[le.length-1];T.type=\"dots\",T.output+=L,T.value+=L,ce.dots=!0;continue}if(A.braces+A.parens===0&&T.type!==\"bos\"&&T.type!==\"slash\"){xe({type:\"text\",value:L,output:g});continue}xe({type:\"dot\",value:L,output:g});continue}if(L===\"?\"){if(!(T&&T.value===\"(\")&&r.noextglob!==!0&&we()===\"(\"&&we(2)!==\"?\"){ve(\"qmark\",L);continue}if(T&&T.type===\"paren\"){let fe=we(),gt=L;if(fe===\"<\"&&!es.supportsLookbehinds())throw new Error(\"Node.js v10 or higher is required for regex lookbehinds\");(T.value===\"(\"&&!/[!=<:]/.test(fe)||fe===\"<\"&&!/<([!=]|\\w+>)/.test(re()))&&(gt=`\\\\${L}`),xe({type:\"text\",value:L,output:gt});continue}if(r.dot!==!0&&(T.type===\"slash\"||T.type===\"bos\")){xe({type:\"qmark\",value:L,output:M});continue}xe({type:\"qmark\",value:L,output:x});continue}if(L===\"!\"){if(r.noextglob!==!0&&we()===\"(\"&&(we(2)!==\"?\"||!/[!=<:]/.test(we(3)))){ve(\"negate\",L);continue}if(r.nonegate!==!0&&A.index===0){he();continue}}if(L===\"+\"){if(r.noextglob!==!0&&we()===\"(\"&&we(2)!==\"?\"){ve(\"plus\",L);continue}if(T&&T.value===\"(\"||r.regex===!1){xe({type:\"plus\",value:L,output:f});continue}if(T&&(T.type===\"bracket\"||T.type===\"paren\"||T.type===\"brace\")||A.parens>0){xe({type:\"plus\",value:L});continue}xe({type:\"plus\",value:f});continue}if(L===\"@\"){if(r.noextglob!==!0&&we()===\"(\"&&we(2)!==\"?\"){xe({type:\"at\",extglob:!0,value:L,output:\"\"});continue}xe({type:\"text\",value:L});continue}if(L!==\"*\"){(L===\"$\"||L===\"^\")&&(L=`\\\\${L}`);let ce=Q0e.exec(re());ce&&(L+=ce[0],A.index+=ce[0].length),xe({type:\"text\",value:L});continue}if(T&&(T.type===\"globstar\"||T.star===!0)){T.type=\"star\",T.star=!0,T.value+=L,T.output=Z,A.backtrack=!0,A.globstar=!0,se(L);continue}let X=re();if(r.noextglob!==!0&&/^\\([^?]/.test(X)){ve(\"star\",L);continue}if(T.type===\"star\"){if(r.noglobstar===!0){se(L);continue}let ce=T.prev,fe=ce.prev,gt=ce.type===\"slash\"||ce.type===\"bos\",Ht=fe&&(fe.type===\"star\"||fe.type===\"globstar\");if(r.bash===!0&&(!gt||X[0]&&X[0]!==\"/\")){xe({type:\"star\",value:L,output:\"\"});continue}let Mt=A.braces>0&&(ce.type===\"comma\"||ce.type===\"brace\"),mi=ne.length&&(ce.type===\"pipe\"||ce.type===\"paren\");if(!gt&&ce.type!==\"paren\"&&!Mt&&!mi){xe({type:\"star\",value:L,output:\"\"});continue}for(;X.slice(0,3)===\"/**\";){let jt=t[A.index+4];if(jt&&jt!==\"/\")break;X=X.slice(3),se(\"/**\",3)}if(ce.type===\"bos\"&&Ee()){T.type=\"globstar\",T.value+=L,T.output=J(r),A.output=T.output,A.globstar=!0,se(L);continue}if(ce.type===\"slash\"&&ce.prev.type!==\"bos\"&&!Ht&&Ee()){A.output=A.output.slice(0,-(ce.output+T.output).length),ce.output=`(?:${ce.output}`,T.type=\"globstar\",T.output=J(r)+(r.strictSlashes?\")\":\"|$)\"),T.value+=L,A.globstar=!0,A.output+=ce.output+T.output,se(L);continue}if(ce.type===\"slash\"&&ce.prev.type!==\"bos\"&&X[0]===\"/\"){let jt=X[1]!==void 0?\"|$\":\"\";A.output=A.output.slice(0,-(ce.output+T.output).length),ce.output=`(?:${ce.output}`,T.type=\"globstar\",T.output=`${J(r)}${h}|${h}${jt})`,T.value+=L,A.output+=ce.output+T.output,A.globstar=!0,se(L+qe()),xe({type:\"slash\",value:\"/\",output:\"\"});continue}if(ce.type===\"bos\"&&X[0]===\"/\"){T.type=\"globstar\",T.value+=L,T.output=`(?:^|${h}|${J(r)}${h})`,A.output=T.output,A.globstar=!0,se(L+qe()),xe({type:\"slash\",value:\"/\",output:\"\"});continue}A.output=A.output.slice(0,-T.output.length),T.type=\"globstar\",T.output=J(r),T.value+=L,A.output+=T.output,A.globstar=!0,se(L);continue}let be={type:\"star\",value:L,output:Z};if(r.bash===!0){be.output=\".*?\",(T.type===\"bos\"||T.type===\"slash\")&&(be.output=W+be.output),xe(be);continue}if(T&&(T.type===\"bracket\"||T.type===\"paren\")&&r.regex===!0){be.output=L,xe(be);continue}(A.index===A.start||T.type===\"slash\"||T.type===\"dot\")&&(T.type===\"dot\"?(A.output+=Q,T.output+=Q):r.dot===!0?(A.output+=S,T.output+=S):(A.output+=W,T.output+=W),we()!==\"*\"&&(A.output+=p,T.output+=p)),xe(be)}for(;A.brackets>0;){if(r.strictBrackets===!0)throw new SyntaxError(Sg(\"closing\",\"]\"));A.output=es.escapeLast(A.output,\"[\"),Ue(\"brackets\")}for(;A.parens>0;){if(r.strictBrackets===!0)throw new SyntaxError(Sg(\"closing\",\")\"));A.output=es.escapeLast(A.output,\"(\"),Ue(\"parens\")}for(;A.braces>0;){if(r.strictBrackets===!0)throw new SyntaxError(Sg(\"closing\",\"}\"));A.output=es.escapeLast(A.output,\"{\"),Ue(\"braces\")}if(r.strictSlashes!==!0&&(T.type===\"star\"||T.type===\"bracket\")&&xe({type:\"maybe_slash\",value:\"\",output:`${h}?`}),A.backtrack===!0){A.output=\"\";for(let X of A.tokens)A.output+=X.output!=null?X.output:X.value,X.suffix&&(A.output+=X.suffix)}return A};eJ.fastpaths=(t,e)=>{let r=N({},e),i=typeof r.maxLength==\"number\"?Math.min(Dy,r.maxLength):Dy,n=t.length;if(n>i)throw new SyntaxError(`Input length: ${n}, exceeds maximum allowed length: ${i}`);t=$q[t]||t;let s=es.isWindows(e),{DOT_LITERAL:o,SLASH_LITERAL:a,ONE_CHAR:l,DOTS_SLASH:c,NO_DOT:u,NO_DOTS:g,NO_DOTS_SLASH:f,STAR:h,START_ANCHOR:p}=Py.globChars(s),m=r.dot?g:u,y=r.dot?f:u,Q=r.capture?\"\":\"?:\",S={negated:!1,prefix:\"\"},x=r.bash===!0?\".*?\":h;r.capture&&(x=`(${x})`);let M=W=>W.noglobstar===!0?x:`(${Q}(?:(?!${p}${W.dot?c:o}).)*?)`,Y=W=>{switch(W){case\"*\":return`${m}${l}${x}`;case\".*\":return`${o}${l}${x}`;case\"*.*\":return`${m}${x}${o}${l}${x}`;case\"*/*\":return`${m}${x}${a}${l}${y}${x}`;case\"**\":return m+M(r);case\"**/*\":return`(?:${m}${M(r)}${a})?${y}${l}${x}`;case\"**/*.*\":return`(?:${m}${M(r)}${a})?${y}${x}${o}${l}${x}`;case\"**/.*\":return`(?:${m}${M(r)}${a})?${o}${l}${x}`;default:{let ee=/^(.*?)\\.(\\w+)$/.exec(W);if(!ee)return;let Z=Y(ee[1]);return Z?Z+o+ee[2]:void 0}}},U=es.removePrefix(t,S),J=Y(U);return J&&r.strictSlashes!==!0&&(J+=`${a}?`),J};Zq.exports=eJ});var iJ=w((wrt,rJ)=>{\"use strict\";var k0e=require(\"path\"),x0e=Xq(),FS=tJ(),NS=Xp(),P0e=Vp(),D0e=t=>t&&typeof t==\"object\"&&!Array.isArray(t),zr=(t,e,r=!1)=>{if(Array.isArray(t)){let u=t.map(f=>zr(f,e,r));return f=>{for(let h of u){let p=h(f);if(p)return p}return!1}}let i=D0e(t)&&t.tokens&&t.input;if(t===\"\"||typeof t!=\"string\"&&!i)throw new TypeError(\"Expected pattern to be a non-empty string\");let n=e||{},s=NS.isWindows(e),o=i?zr.compileRe(t,e):zr.makeRe(t,e,!1,!0),a=o.state;delete o.state;let l=()=>!1;if(n.ignore){let u=te(N({},e),{ignore:null,onMatch:null,onResult:null});l=zr(n.ignore,u,r)}let c=(u,g=!1)=>{let{isMatch:f,match:h,output:p}=zr.test(u,o,e,{glob:t,posix:s}),m={glob:t,state:a,regex:o,posix:s,input:u,output:p,match:h,isMatch:f};return typeof n.onResult==\"function\"&&n.onResult(m),f===!1?(m.isMatch=!1,g?m:!1):l(u)?(typeof n.onIgnore==\"function\"&&n.onIgnore(m),m.isMatch=!1,g?m:!1):(typeof n.onMatch==\"function\"&&n.onMatch(m),g?m:!0)};return r&&(c.state=a),c};zr.test=(t,e,r,{glob:i,posix:n}={})=>{if(typeof t!=\"string\")throw new TypeError(\"Expected input to be a string\");if(t===\"\")return{isMatch:!1,output:\"\"};let s=r||{},o=s.format||(n?NS.toPosixSlashes:null),a=t===i,l=a&&o?o(t):t;return a===!1&&(l=o?o(t):t,a=l===i),(a===!1||s.capture===!0)&&(s.matchBase===!0||s.basename===!0?a=zr.matchBase(t,e,r,n):a=e.exec(l)),{isMatch:Boolean(a),match:a,output:l}};zr.matchBase=(t,e,r,i=NS.isWindows(r))=>(e instanceof RegExp?e:zr.makeRe(e,r)).test(k0e.basename(t));zr.isMatch=(t,e,r)=>zr(e,r)(t);zr.parse=(t,e)=>Array.isArray(t)?t.map(r=>zr.parse(r,e)):FS(t,te(N({},e),{fastpaths:!1}));zr.scan=(t,e)=>x0e(t,e);zr.compileRe=(t,e,r=!1,i=!1)=>{if(r===!0)return t.output;let n=e||{},s=n.contains?\"\":\"^\",o=n.contains?\"\":\"$\",a=`${s}(?:${t.output})${o}`;t&&t.negated===!0&&(a=`^(?!${a}).*$`);let l=zr.toRegex(a,e);return i===!0&&(l.state=t),l};zr.makeRe=(t,e={},r=!1,i=!1)=>{if(!t||typeof t!=\"string\")throw new TypeError(\"Expected a non-empty string\");let n={negated:!1,fastpaths:!0};return e.fastpaths!==!1&&(t[0]===\".\"||t[0]===\"*\")&&(n.output=FS.fastpaths(t,e)),n.output||(n=FS(t,e)),zr.compileRe(n,e,r,i)};zr.toRegex=(t,e)=>{try{let r=e||{};return new RegExp(t,r.flags||(r.nocase?\"i\":\"\"))}catch(r){if(e&&e.debug===!0)throw r;return/$^/}};zr.constants=P0e;rJ.exports=zr});var LS=w((Brt,nJ)=>{\"use strict\";nJ.exports=iJ()});var ts=w((brt,sJ)=>{\"use strict\";var oJ=require(\"util\"),aJ=Mq(),Yo=LS(),TS=Xp(),AJ=t=>t===\"\"||t===\"./\",Pr=(t,e,r)=>{e=[].concat(e),t=[].concat(t);let i=new Set,n=new Set,s=new Set,o=0,a=u=>{s.add(u.output),r&&r.onResult&&r.onResult(u)};for(let u=0;u<e.length;u++){let g=Yo(String(e[u]),te(N({},r),{onResult:a}),!0),f=g.state.negated||g.state.negatedExtglob;f&&o++;for(let h of t){let p=g(h,!0);!(f?!p.isMatch:p.isMatch)||(f?i.add(p.output):(i.delete(p.output),n.add(p.output)))}}let c=(o===e.length?[...s]:[...n]).filter(u=>!i.has(u));if(r&&c.length===0){if(r.failglob===!0)throw new Error(`No matches found for \"${e.join(\", \")}\"`);if(r.nonull===!0||r.nullglob===!0)return r.unescape?e.map(u=>u.replace(/\\\\/g,\"\")):e}return c};Pr.match=Pr;Pr.matcher=(t,e)=>Yo(t,e);Pr.isMatch=(t,e,r)=>Yo(e,r)(t);Pr.any=Pr.isMatch;Pr.not=(t,e,r={})=>{e=[].concat(e).map(String);let i=new Set,n=[],s=a=>{r.onResult&&r.onResult(a),n.push(a.output)},o=Pr(t,e,te(N({},r),{onResult:s}));for(let a of n)o.includes(a)||i.add(a);return[...i]};Pr.contains=(t,e,r)=>{if(typeof t!=\"string\")throw new TypeError(`Expected a string: \"${oJ.inspect(t)}\"`);if(Array.isArray(e))return e.some(i=>Pr.contains(t,i,r));if(typeof e==\"string\"){if(AJ(t)||AJ(e))return!1;if(t.includes(e)||t.startsWith(\"./\")&&t.slice(2).includes(e))return!0}return Pr.isMatch(t,e,te(N({},r),{contains:!0}))};Pr.matchKeys=(t,e,r)=>{if(!TS.isObject(t))throw new TypeError(\"Expected the first argument to be an object\");let i=Pr(Object.keys(t),e,r),n={};for(let s of i)n[s]=t[s];return n};Pr.some=(t,e,r)=>{let i=[].concat(t);for(let n of[].concat(e)){let s=Yo(String(n),r);if(i.some(o=>s(o)))return!0}return!1};Pr.every=(t,e,r)=>{let i=[].concat(t);for(let n of[].concat(e)){let s=Yo(String(n),r);if(!i.every(o=>s(o)))return!1}return!0};Pr.all=(t,e,r)=>{if(typeof t!=\"string\")throw new TypeError(`Expected a string: \"${oJ.inspect(t)}\"`);return[].concat(e).every(i=>Yo(i,r)(t))};Pr.capture=(t,e,r)=>{let i=TS.isWindows(r),s=Yo.makeRe(String(t),te(N({},r),{capture:!0})).exec(i?TS.toPosixSlashes(e):e);if(s)return s.slice(1).map(o=>o===void 0?\"\":o)};Pr.makeRe=(...t)=>Yo.makeRe(...t);Pr.scan=(...t)=>Yo.scan(...t);Pr.parse=(t,e)=>{let r=[];for(let i of[].concat(t||[]))for(let n of aJ(String(i),e))r.push(Yo.parse(n,e));return r};Pr.braces=(t,e)=>{if(typeof t!=\"string\")throw new TypeError(\"Expected a string\");return e&&e.nobrace===!0||!/\\{.*\\}/.test(t)?[t]:aJ(t,e)};Pr.braceExpand=(t,e)=>{if(typeof t!=\"string\")throw new TypeError(\"Expected a string\");return Pr.braces(t,te(N({},e),{expand:!0}))};sJ.exports=Pr});var cJ=w((Qrt,lJ)=>{\"use strict\";lJ.exports=({onlyFirst:t=!1}={})=>{let e=[\"[\\\\u001B\\\\u009B][[\\\\]()#;?]*(?:(?:(?:(?:;[-a-zA-Z\\\\d\\\\/#&.:=?%@~_]+)*|[a-zA-Z\\\\d]+(?:;[-a-zA-Z\\\\d\\\\/#&.:=?%@~_]*)*)?\\\\u0007)\",\"(?:(?:\\\\d{1,4}(?:;\\\\d{0,4})*)?[\\\\dA-PR-TZcf-ntqry=><~]))\"].join(\"|\");return new RegExp(e,t?void 0:\"g\")}});var gJ=w((vrt,uJ)=>{\"use strict\";var R0e=cJ();uJ.exports=t=>typeof t==\"string\"?t.replace(R0e(),\"\"):t});var kJ=w((Yrt,SJ)=>{\"use strict\";SJ.exports=(...t)=>[...new Set([].concat(...t))]});var XS=w((qrt,xJ)=>{\"use strict\";var Y0e=require(\"stream\"),PJ=Y0e.PassThrough,q0e=Array.prototype.slice;xJ.exports=J0e;function J0e(){let t=[],e=!1,r=q0e.call(arguments),i=r[r.length-1];i&&!Array.isArray(i)&&i.pipe==null?r.pop():i={};let n=i.end!==!1;i.objectMode==null&&(i.objectMode=!0),i.highWaterMark==null&&(i.highWaterMark=64*1024);let s=PJ(i);function o(){for(let c=0,u=arguments.length;c<u;c++)t.push(DJ(arguments[c],i));return a(),this}function a(){if(e)return;e=!0;let c=t.shift();if(!c){process.nextTick(l);return}Array.isArray(c)||(c=[c]);let u=c.length+1;function g(){--u>0||(e=!1,a())}function f(h){function p(){h.removeListener(\"merge2UnpipeEnd\",p),h.removeListener(\"end\",p),g()}if(h._readableState.endEmitted)return g();h.on(\"merge2UnpipeEnd\",p),h.on(\"end\",p),h.pipe(s,{end:!1}),h.resume()}for(let h=0;h<c.length;h++)f(c[h]);g()}function l(){return e=!1,s.emit(\"queueDrain\"),n&&s.end()}return s.setMaxListeners(0),s.add=o,s.on(\"unpipe\",function(c){c.emit(\"merge2UnpipeEnd\")}),r.length&&o.apply(null,r),s}function DJ(t,e){if(Array.isArray(t))for(let r=0,i=t.length;r<i;r++)t[r]=DJ(t[r],e);else{if(!t._readableState&&t.pipe&&(t=t.pipe(PJ(e))),!t._readableState||!t.pause||!t.pipe)throw new Error(\"Only readable stream can be merged.\");t.pause()}return t}});var RJ=w(Ty=>{\"use strict\";Object.defineProperty(Ty,\"__esModule\",{value:!0});function W0e(t){return t.reduce((e,r)=>[].concat(e,r),[])}Ty.flatten=W0e;function z0e(t,e){let r=[[]],i=0;for(let n of t)e(n)?(i++,r[i]=[]):r[i].push(n);return r}Ty.splitWhen=z0e});var FJ=w(ZS=>{\"use strict\";Object.defineProperty(ZS,\"__esModule\",{value:!0});function _0e(t){return t.code===\"ENOENT\"}ZS.isEnoentCodeError=_0e});var LJ=w($S=>{\"use strict\";Object.defineProperty($S,\"__esModule\",{value:!0});var NJ=class{constructor(e,r){this.name=e,this.isBlockDevice=r.isBlockDevice.bind(r),this.isCharacterDevice=r.isCharacterDevice.bind(r),this.isDirectory=r.isDirectory.bind(r),this.isFIFO=r.isFIFO.bind(r),this.isFile=r.isFile.bind(r),this.isSocket=r.isSocket.bind(r),this.isSymbolicLink=r.isSymbolicLink.bind(r)}};function V0e(t,e){return new NJ(t,e)}$S.createDirentFromStats=V0e});var TJ=w(Ng=>{\"use strict\";Object.defineProperty(Ng,\"__esModule\",{value:!0});var X0e=require(\"path\"),Z0e=2,$0e=/(\\\\?)([()*?[\\]{|}]|^!|[!+@](?=\\())/g;function ebe(t){return t.replace(/\\\\/g,\"/\")}Ng.unixify=ebe;function tbe(t,e){return X0e.resolve(t,e)}Ng.makeAbsolute=tbe;function rbe(t){return t.replace($0e,\"\\\\$2\")}Ng.escape=rbe;function ibe(t){if(t.charAt(0)===\".\"){let e=t.charAt(1);if(e===\"/\"||e===\"\\\\\")return t.slice(Z0e)}return t}Ng.removeLeadingDotSegment=ibe});var MJ=w((Vrt,OJ)=>{OJ.exports=function(e){if(typeof e!=\"string\"||e===\"\")return!1;for(var r;r=/(\\\\).|([@?!+*]\\(.*\\))/g.exec(e);){if(r[2])return!0;e=e.slice(r.index+r[0].length)}return!1}});var HJ=w((Xrt,UJ)=>{var nbe=MJ(),KJ={\"{\":\"}\",\"(\":\")\",\"[\":\"]\"},sbe=function(t){if(t[0]===\"!\")return!0;for(var e=0,r=-2,i=-2,n=-2,s=-2,o=-2;e<t.length;){if(t[e]===\"*\"||t[e+1]===\"?\"&&/[\\].+)]/.test(t[e])||i!==-1&&t[e]===\"[\"&&t[e+1]!==\"]\"&&(i<e&&(i=t.indexOf(\"]\",e)),i>e&&(o===-1||o>i||(o=t.indexOf(\"\\\\\",e),o===-1||o>i)))||n!==-1&&t[e]===\"{\"&&t[e+1]!==\"}\"&&(n=t.indexOf(\"}\",e),n>e&&(o=t.indexOf(\"\\\\\",e),o===-1||o>n))||s!==-1&&t[e]===\"(\"&&t[e+1]===\"?\"&&/[:!=]/.test(t[e+2])&&t[e+3]!==\")\"&&(s=t.indexOf(\")\",e),s>e&&(o=t.indexOf(\"\\\\\",e),o===-1||o>s))||r!==-1&&t[e]===\"(\"&&t[e+1]!==\"|\"&&(r<e&&(r=t.indexOf(\"|\",e)),r!==-1&&t[r+1]!==\")\"&&(s=t.indexOf(\")\",r),s>r&&(o=t.indexOf(\"\\\\\",r),o===-1||o>s))))return!0;if(t[e]===\"\\\\\"){var a=t[e+1];e+=2;var l=KJ[a];if(l){var c=t.indexOf(l,e);c!==-1&&(e=c+1)}if(t[e]===\"!\")return!0}else e++}return!1},obe=function(t){if(t[0]===\"!\")return!0;for(var e=0;e<t.length;){if(/[*?{}()[\\]]/.test(t[e]))return!0;if(t[e]===\"\\\\\"){var r=t[e+1];e+=2;var i=KJ[r];if(i){var n=t.indexOf(i,e);n!==-1&&(e=n+1)}if(t[e]===\"!\")return!0}else e++}return!1};UJ.exports=function(e,r){if(typeof e!=\"string\"||e===\"\")return!1;if(nbe(e))return!0;var i=sbe;return r&&r.strict===!1&&(i=obe),i(e)}});var GJ=w((Zrt,jJ)=>{\"use strict\";var abe=HJ(),Abe=require(\"path\").posix.dirname,lbe=require(\"os\").platform()===\"win32\",ek=\"/\",cbe=/\\\\/g,ube=/[\\{\\[].*[\\}\\]]$/,gbe=/(^|[^\\\\])([\\{\\[]|\\([^\\)]+$)/,fbe=/\\\\([\\!\\*\\?\\|\\[\\]\\(\\)\\{\\}])/g;jJ.exports=function(e,r){var i=Object.assign({flipBackslashes:!0},r);i.flipBackslashes&&lbe&&e.indexOf(ek)<0&&(e=e.replace(cbe,ek)),ube.test(e)&&(e+=ek),e+=\"a\";do e=Abe(e);while(abe(e)||gbe.test(e));return e.replace(fbe,\"$1\")}});var ZJ=w(ni=>{\"use strict\";Object.defineProperty(ni,\"__esModule\",{value:!0});var hbe=require(\"path\"),pbe=GJ(),YJ=ts(),dbe=LS(),qJ=\"**\",Cbe=\"\\\\\",mbe=/[*?]|^!/,Ebe=/\\[.*]/,Ibe=/(?:^|[^!*+?@])\\(.*\\|.*\\)/,ybe=/[!*+?@]\\(.*\\)/,wbe=/{.*(?:,|\\.\\.).*}/;function WJ(t,e={}){return!JJ(t,e)}ni.isStaticPattern=WJ;function JJ(t,e={}){return!!(e.caseSensitiveMatch===!1||t.includes(Cbe)||mbe.test(t)||Ebe.test(t)||Ibe.test(t)||e.extglob!==!1&&ybe.test(t)||e.braceExpansion!==!1&&wbe.test(t))}ni.isDynamicPattern=JJ;function Bbe(t){return Oy(t)?t.slice(1):t}ni.convertToPositivePattern=Bbe;function bbe(t){return\"!\"+t}ni.convertToNegativePattern=bbe;function Oy(t){return t.startsWith(\"!\")&&t[1]!==\"(\"}ni.isNegativePattern=Oy;function zJ(t){return!Oy(t)}ni.isPositivePattern=zJ;function Qbe(t){return t.filter(Oy)}ni.getNegativePatterns=Qbe;function vbe(t){return t.filter(zJ)}ni.getPositivePatterns=vbe;function Sbe(t){return pbe(t,{flipBackslashes:!1})}ni.getBaseDirectory=Sbe;function kbe(t){return t.includes(qJ)}ni.hasGlobStar=kbe;function _J(t){return t.endsWith(\"/\"+qJ)}ni.endsWithSlashGlobStar=_J;function xbe(t){let e=hbe.basename(t);return _J(t)||WJ(e)}ni.isAffectDepthOfReadingPattern=xbe;function Pbe(t){return t.reduce((e,r)=>e.concat(VJ(r)),[])}ni.expandPatternsWithBraceExpansion=Pbe;function VJ(t){return YJ.braces(t,{expand:!0,nodupes:!0})}ni.expandBraceExpansion=VJ;function Dbe(t,e){let r=dbe.scan(t,Object.assign(Object.assign({},e),{parts:!0}));return r.parts.length===0?[t]:r.parts}ni.getPatternParts=Dbe;function XJ(t,e){return YJ.makeRe(t,e)}ni.makeRe=XJ;function Rbe(t,e){return t.map(r=>XJ(r,e))}ni.convertPatternsToRe=Rbe;function Fbe(t,e){return e.some(r=>r.test(t))}ni.matchAny=Fbe});var e3=w(tk=>{\"use strict\";Object.defineProperty(tk,\"__esModule\",{value:!0});var Nbe=XS();function Lbe(t){let e=Nbe(t);return t.forEach(r=>{r.once(\"error\",i=>e.emit(\"error\",i))}),e.once(\"close\",()=>$J(t)),e.once(\"end\",()=>$J(t)),e}tk.merge=Lbe;function $J(t){t.forEach(e=>e.emit(\"close\"))}});var t3=w(My=>{\"use strict\";Object.defineProperty(My,\"__esModule\",{value:!0});function Tbe(t){return typeof t==\"string\"}My.isString=Tbe;function Obe(t){return t===\"\"}My.isEmpty=Obe});var Wa=w(Ja=>{\"use strict\";Object.defineProperty(Ja,\"__esModule\",{value:!0});var Mbe=RJ();Ja.array=Mbe;var Ube=FJ();Ja.errno=Ube;var Kbe=LJ();Ja.fs=Kbe;var Hbe=TJ();Ja.path=Hbe;var jbe=ZJ();Ja.pattern=jbe;var Gbe=e3();Ja.stream=Gbe;var Ybe=t3();Ja.string=Ybe});var o3=w(za=>{\"use strict\";Object.defineProperty(za,\"__esModule\",{value:!0});var Rc=Wa();function qbe(t,e){let r=r3(t),i=i3(t,e.ignore),n=r.filter(l=>Rc.pattern.isStaticPattern(l,e)),s=r.filter(l=>Rc.pattern.isDynamicPattern(l,e)),o=rk(n,i,!1),a=rk(s,i,!0);return o.concat(a)}za.generate=qbe;function rk(t,e,r){let i=n3(t);return\".\"in i?[ik(\".\",t,e,r)]:s3(i,e,r)}za.convertPatternsToTasks=rk;function r3(t){return Rc.pattern.getPositivePatterns(t)}za.getPositivePatterns=r3;function i3(t,e){return Rc.pattern.getNegativePatterns(t).concat(e).map(Rc.pattern.convertToPositivePattern)}za.getNegativePatternsAsPositive=i3;function n3(t){let e={};return t.reduce((r,i)=>{let n=Rc.pattern.getBaseDirectory(i);return n in r?r[n].push(i):r[n]=[i],r},e)}za.groupPatternsByBaseDirectory=n3;function s3(t,e,r){return Object.keys(t).map(i=>ik(i,t[i],e,r))}za.convertPatternGroupsToTasks=s3;function ik(t,e,r,i){return{dynamic:i,positive:e,negative:r,base:t,patterns:[].concat(e,r.map(Rc.pattern.convertToNegativePattern))}}za.convertPatternGroupToTask=ik});var A3=w(Uy=>{\"use strict\";Object.defineProperty(Uy,\"__esModule\",{value:!0});Uy.read=void 0;function Jbe(t,e,r){e.fs.lstat(t,(i,n)=>{if(i!==null){a3(r,i);return}if(!n.isSymbolicLink()||!e.followSymbolicLink){nk(r,n);return}e.fs.stat(t,(s,o)=>{if(s!==null){if(e.throwErrorOnBrokenSymbolicLink){a3(r,s);return}nk(r,n);return}e.markSymbolicLink&&(o.isSymbolicLink=()=>!0),nk(r,o)})})}Uy.read=Jbe;function a3(t,e){t(e)}function nk(t,e){t(null,e)}});var l3=w(Ky=>{\"use strict\";Object.defineProperty(Ky,\"__esModule\",{value:!0});Ky.read=void 0;function Wbe(t,e){let r=e.fs.lstatSync(t);if(!r.isSymbolicLink()||!e.followSymbolicLink)return r;try{let i=e.fs.statSync(t);return e.markSymbolicLink&&(i.isSymbolicLink=()=>!0),i}catch(i){if(!e.throwErrorOnBrokenSymbolicLink)return r;throw i}}Ky.read=Wbe});var c3=w(XA=>{\"use strict\";Object.defineProperty(XA,\"__esModule\",{value:!0});XA.createFileSystemAdapter=XA.FILE_SYSTEM_ADAPTER=void 0;var Hy=require(\"fs\");XA.FILE_SYSTEM_ADAPTER={lstat:Hy.lstat,stat:Hy.stat,lstatSync:Hy.lstatSync,statSync:Hy.statSync};function zbe(t){return t===void 0?XA.FILE_SYSTEM_ADAPTER:Object.assign(Object.assign({},XA.FILE_SYSTEM_ADAPTER),t)}XA.createFileSystemAdapter=zbe});var g3=w(sk=>{\"use strict\";Object.defineProperty(sk,\"__esModule\",{value:!0});var _be=c3(),u3=class{constructor(e={}){this._options=e,this.followSymbolicLink=this._getValue(this._options.followSymbolicLink,!0),this.fs=_be.createFileSystemAdapter(this._options.fs),this.markSymbolicLink=this._getValue(this._options.markSymbolicLink,!1),this.throwErrorOnBrokenSymbolicLink=this._getValue(this._options.throwErrorOnBrokenSymbolicLink,!0)}_getValue(e,r){return e!=null?e:r}};sk.default=u3});var Fc=w(ZA=>{\"use strict\";Object.defineProperty(ZA,\"__esModule\",{value:!0});ZA.statSync=ZA.stat=ZA.Settings=void 0;var f3=A3(),Vbe=l3(),ok=g3();ZA.Settings=ok.default;function Xbe(t,e,r){if(typeof e==\"function\"){f3.read(t,ak(),e);return}f3.read(t,ak(e),r)}ZA.stat=Xbe;function Zbe(t,e){let r=ak(e);return Vbe.read(t,r)}ZA.statSync=Zbe;function ak(t={}){return t instanceof ok.default?t:new ok.default(t)}});var p3=w((lit,h3)=>{h3.exports=$be;function $be(t,e){var r,i,n,s=!0;Array.isArray(t)?(r=[],i=t.length):(n=Object.keys(t),r={},i=n.length);function o(l){function c(){e&&e(l,r),e=null}s?process.nextTick(c):c()}function a(l,c,u){r[l]=u,(--i==0||c)&&o(c)}i?n?n.forEach(function(l){t[l](function(c,u){a(l,c,u)})}):t.forEach(function(l,c){l(function(u,g){a(c,u,g)})}):o(null),s=!1}});var Ak=w(jy=>{\"use strict\";Object.defineProperty(jy,\"__esModule\",{value:!0});jy.IS_SUPPORT_READDIR_WITH_FILE_TYPES=void 0;var Gy=process.versions.node.split(\".\");if(Gy[0]===void 0||Gy[1]===void 0)throw new Error(`Unexpected behavior. The 'process.versions.node' variable has invalid value: ${process.versions.node}`);var d3=Number.parseInt(Gy[0],10),eQe=Number.parseInt(Gy[1],10),C3=10,tQe=10,rQe=d3>C3,iQe=d3===C3&&eQe>=tQe;jy.IS_SUPPORT_READDIR_WITH_FILE_TYPES=rQe||iQe});var E3=w(Yy=>{\"use strict\";Object.defineProperty(Yy,\"__esModule\",{value:!0});Yy.createDirentFromStats=void 0;var m3=class{constructor(e,r){this.name=e,this.isBlockDevice=r.isBlockDevice.bind(r),this.isCharacterDevice=r.isCharacterDevice.bind(r),this.isDirectory=r.isDirectory.bind(r),this.isFIFO=r.isFIFO.bind(r),this.isFile=r.isFile.bind(r),this.isSocket=r.isSocket.bind(r),this.isSymbolicLink=r.isSymbolicLink.bind(r)}};function nQe(t,e){return new m3(t,e)}Yy.createDirentFromStats=nQe});var lk=w(qy=>{\"use strict\";Object.defineProperty(qy,\"__esModule\",{value:!0});qy.fs=void 0;var sQe=E3();qy.fs=sQe});var ck=w(Jy=>{\"use strict\";Object.defineProperty(Jy,\"__esModule\",{value:!0});Jy.joinPathSegments=void 0;function oQe(t,e,r){return t.endsWith(r)?t+e:t+r+e}Jy.joinPathSegments=oQe});var Q3=w($A=>{\"use strict\";Object.defineProperty($A,\"__esModule\",{value:!0});$A.readdir=$A.readdirWithFileTypes=$A.read=void 0;var aQe=Fc(),I3=p3(),AQe=Ak(),y3=lk(),w3=ck();function lQe(t,e,r){if(!e.stats&&AQe.IS_SUPPORT_READDIR_WITH_FILE_TYPES){B3(t,e,r);return}b3(t,e,r)}$A.read=lQe;function B3(t,e,r){e.fs.readdir(t,{withFileTypes:!0},(i,n)=>{if(i!==null){Wy(r,i);return}let s=n.map(a=>({dirent:a,name:a.name,path:w3.joinPathSegments(t,a.name,e.pathSegmentSeparator)}));if(!e.followSymbolicLinks){uk(r,s);return}let o=s.map(a=>cQe(a,e));I3(o,(a,l)=>{if(a!==null){Wy(r,a);return}uk(r,l)})})}$A.readdirWithFileTypes=B3;function cQe(t,e){return r=>{if(!t.dirent.isSymbolicLink()){r(null,t);return}e.fs.stat(t.path,(i,n)=>{if(i!==null){if(e.throwErrorOnBrokenSymbolicLink){r(i);return}r(null,t);return}t.dirent=y3.fs.createDirentFromStats(t.name,n),r(null,t)})}}function b3(t,e,r){e.fs.readdir(t,(i,n)=>{if(i!==null){Wy(r,i);return}let s=n.map(o=>{let a=w3.joinPathSegments(t,o,e.pathSegmentSeparator);return l=>{aQe.stat(a,e.fsStatSettings,(c,u)=>{if(c!==null){l(c);return}let g={name:o,path:a,dirent:y3.fs.createDirentFromStats(o,u)};e.stats&&(g.stats=u),l(null,g)})}});I3(s,(o,a)=>{if(o!==null){Wy(r,o);return}uk(r,a)})})}$A.readdir=b3;function Wy(t,e){t(e)}function uk(t,e){t(null,e)}});var P3=w(el=>{\"use strict\";Object.defineProperty(el,\"__esModule\",{value:!0});el.readdir=el.readdirWithFileTypes=el.read=void 0;var uQe=Fc(),gQe=Ak(),v3=lk(),S3=ck();function fQe(t,e){return!e.stats&&gQe.IS_SUPPORT_READDIR_WITH_FILE_TYPES?k3(t,e):x3(t,e)}el.read=fQe;function k3(t,e){return e.fs.readdirSync(t,{withFileTypes:!0}).map(i=>{let n={dirent:i,name:i.name,path:S3.joinPathSegments(t,i.name,e.pathSegmentSeparator)};if(n.dirent.isSymbolicLink()&&e.followSymbolicLinks)try{let s=e.fs.statSync(n.path);n.dirent=v3.fs.createDirentFromStats(n.name,s)}catch(s){if(e.throwErrorOnBrokenSymbolicLink)throw s}return n})}el.readdirWithFileTypes=k3;function x3(t,e){return e.fs.readdirSync(t).map(i=>{let n=S3.joinPathSegments(t,i,e.pathSegmentSeparator),s=uQe.statSync(n,e.fsStatSettings),o={name:i,path:n,dirent:v3.fs.createDirentFromStats(i,s)};return e.stats&&(o.stats=s),o})}el.readdir=x3});var D3=w(tl=>{\"use strict\";Object.defineProperty(tl,\"__esModule\",{value:!0});tl.createFileSystemAdapter=tl.FILE_SYSTEM_ADAPTER=void 0;var Lg=require(\"fs\");tl.FILE_SYSTEM_ADAPTER={lstat:Lg.lstat,stat:Lg.stat,lstatSync:Lg.lstatSync,statSync:Lg.statSync,readdir:Lg.readdir,readdirSync:Lg.readdirSync};function hQe(t){return t===void 0?tl.FILE_SYSTEM_ADAPTER:Object.assign(Object.assign({},tl.FILE_SYSTEM_ADAPTER),t)}tl.createFileSystemAdapter=hQe});var F3=w(gk=>{\"use strict\";Object.defineProperty(gk,\"__esModule\",{value:!0});var pQe=require(\"path\"),dQe=Fc(),CQe=D3(),R3=class{constructor(e={}){this._options=e,this.followSymbolicLinks=this._getValue(this._options.followSymbolicLinks,!1),this.fs=CQe.createFileSystemAdapter(this._options.fs),this.pathSegmentSeparator=this._getValue(this._options.pathSegmentSeparator,pQe.sep),this.stats=this._getValue(this._options.stats,!1),this.throwErrorOnBrokenSymbolicLink=this._getValue(this._options.throwErrorOnBrokenSymbolicLink,!0),this.fsStatSettings=new dQe.Settings({followSymbolicLink:this.followSymbolicLinks,fs:this.fs,throwErrorOnBrokenSymbolicLink:this.throwErrorOnBrokenSymbolicLink})}_getValue(e,r){return e!=null?e:r}};gk.default=R3});var zy=w(rl=>{\"use strict\";Object.defineProperty(rl,\"__esModule\",{value:!0});rl.Settings=rl.scandirSync=rl.scandir=void 0;var N3=Q3(),mQe=P3(),fk=F3();rl.Settings=fk.default;function EQe(t,e,r){if(typeof e==\"function\"){N3.read(t,hk(),e);return}N3.read(t,hk(e),r)}rl.scandir=EQe;function IQe(t,e){let r=hk(e);return mQe.read(t,r)}rl.scandirSync=IQe;function hk(t={}){return t instanceof fk.default?t:new fk.default(t)}});var T3=w((Eit,L3)=>{\"use strict\";function yQe(t){var e=new t,r=e;function i(){var s=e;return s.next?e=s.next:(e=new t,r=e),s.next=null,s}function n(s){r.next=s,r=s}return{get:i,release:n}}L3.exports=yQe});var M3=w((Iit,pk)=>{\"use strict\";var wQe=T3();function O3(t,e,r){if(typeof t==\"function\"&&(r=e,e=t,t=null),r<1)throw new Error(\"fastqueue concurrency must be greater than 1\");var i=wQe(BQe),n=null,s=null,o=0,a=null,l={push:m,drain:Wo,saturated:Wo,pause:u,paused:!1,concurrency:r,running:c,resume:h,idle:p,length:g,getQueue:f,unshift:y,empty:Wo,kill:S,killAndDrain:x,error:M};return l;function c(){return o}function u(){l.paused=!0}function g(){for(var Y=n,U=0;Y;)Y=Y.next,U++;return U}function f(){for(var Y=n,U=[];Y;)U.push(Y.value),Y=Y.next;return U}function h(){if(!!l.paused){l.paused=!1;for(var Y=0;Y<l.concurrency;Y++)o++,Q()}}function p(){return o===0&&l.length()===0}function m(Y,U){var J=i.get();J.context=t,J.release=Q,J.value=Y,J.callback=U||Wo,J.errorHandler=a,o===l.concurrency||l.paused?s?(s.next=J,s=J):(n=J,s=J,l.saturated()):(o++,e.call(t,J.value,J.worked))}function y(Y,U){var J=i.get();J.context=t,J.release=Q,J.value=Y,J.callback=U||Wo,o===l.concurrency||l.paused?n?(J.next=n,n=J):(n=J,s=J,l.saturated()):(o++,e.call(t,J.value,J.worked))}function Q(Y){Y&&i.release(Y);var U=n;U?l.paused?o--:(s===n&&(s=null),n=U.next,U.next=null,e.call(t,U.value,U.worked),s===null&&l.empty()):--o==0&&l.drain()}function S(){n=null,s=null,l.drain=Wo}function x(){n=null,s=null,l.drain(),l.drain=Wo}function M(Y){a=Y}}function Wo(){}function BQe(){this.value=null,this.callback=Wo,this.next=null,this.release=Wo,this.context=null,this.errorHandler=null;var t=this;this.worked=function(r,i){var n=t.callback,s=t.errorHandler,o=t.value;t.value=null,t.callback=Wo,t.errorHandler&&s(r,o),n.call(t.context,r,i),t.release(t)}}function bQe(t,e,r){typeof t==\"function\"&&(r=e,e=t,t=null);function i(c,u){e.call(this,c).then(function(g){u(null,g)},u)}var n=O3(t,i,r),s=n.push,o=n.unshift;return n.push=a,n.unshift=l,n;function a(c){return new Promise(function(u,g){s(c,function(f,h){if(f){g(f);return}u(h)})})}function l(c){return new Promise(function(u,g){o(c,function(f,h){if(f){g(f);return}u(h)})})}}pk.exports=O3;pk.exports.promise=bQe});var _y=w(zo=>{\"use strict\";Object.defineProperty(zo,\"__esModule\",{value:!0});zo.joinPathSegments=zo.replacePathSegmentSeparator=zo.isAppliedFilter=zo.isFatalError=void 0;function QQe(t,e){return t.errorFilter===null?!0:!t.errorFilter(e)}zo.isFatalError=QQe;function vQe(t,e){return t===null||t(e)}zo.isAppliedFilter=vQe;function SQe(t,e){return t.split(/[/\\\\]/).join(e)}zo.replacePathSegmentSeparator=SQe;function kQe(t,e,r){return t===\"\"?e:t.endsWith(r)?t+e:t+r+e}zo.joinPathSegments=kQe});var Ck=w(dk=>{\"use strict\";Object.defineProperty(dk,\"__esModule\",{value:!0});var xQe=_y(),U3=class{constructor(e,r){this._root=e,this._settings=r,this._root=xQe.replacePathSegmentSeparator(e,r.pathSegmentSeparator)}};dk.default=U3});var Ek=w(mk=>{\"use strict\";Object.defineProperty(mk,\"__esModule\",{value:!0});var PQe=require(\"events\"),DQe=zy(),RQe=M3(),Vy=_y(),FQe=Ck(),K3=class extends FQe.default{constructor(e,r){super(e,r);this._settings=r,this._scandir=DQe.scandir,this._emitter=new PQe.EventEmitter,this._queue=RQe(this._worker.bind(this),this._settings.concurrency),this._isFatalError=!1,this._isDestroyed=!1,this._queue.drain=()=>{this._isFatalError||this._emitter.emit(\"end\")}}read(){return this._isFatalError=!1,this._isDestroyed=!1,setImmediate(()=>{this._pushToQueue(this._root,this._settings.basePath)}),this._emitter}get isDestroyed(){return this._isDestroyed}destroy(){if(this._isDestroyed)throw new Error(\"The reader is already destroyed\");this._isDestroyed=!0,this._queue.killAndDrain()}onEntry(e){this._emitter.on(\"entry\",e)}onError(e){this._emitter.once(\"error\",e)}onEnd(e){this._emitter.once(\"end\",e)}_pushToQueue(e,r){let i={directory:e,base:r};this._queue.push(i,n=>{n!==null&&this._handleError(n)})}_worker(e,r){this._scandir(e.directory,this._settings.fsScandirSettings,(i,n)=>{if(i!==null){r(i,void 0);return}for(let s of n)this._handleEntry(s,e.base);r(null,void 0)})}_handleError(e){this._isDestroyed||!Vy.isFatalError(this._settings,e)||(this._isFatalError=!0,this._isDestroyed=!0,this._emitter.emit(\"error\",e))}_handleEntry(e,r){if(this._isDestroyed||this._isFatalError)return;let i=e.path;r!==void 0&&(e.path=Vy.joinPathSegments(r,e.name,this._settings.pathSegmentSeparator)),Vy.isAppliedFilter(this._settings.entryFilter,e)&&this._emitEntry(e),e.dirent.isDirectory()&&Vy.isAppliedFilter(this._settings.deepFilter,e)&&this._pushToQueue(i,e.path)}_emitEntry(e){this._emitter.emit(\"entry\",e)}};mk.default=K3});var j3=w(Ik=>{\"use strict\";Object.defineProperty(Ik,\"__esModule\",{value:!0});var NQe=Ek(),H3=class{constructor(e,r){this._root=e,this._settings=r,this._reader=new NQe.default(this._root,this._settings),this._storage=new Set}read(e){this._reader.onError(r=>{LQe(e,r)}),this._reader.onEntry(r=>{this._storage.add(r)}),this._reader.onEnd(()=>{TQe(e,[...this._storage])}),this._reader.read()}};Ik.default=H3;function LQe(t,e){t(e)}function TQe(t,e){t(null,e)}});var Y3=w(yk=>{\"use strict\";Object.defineProperty(yk,\"__esModule\",{value:!0});var OQe=require(\"stream\"),MQe=Ek(),G3=class{constructor(e,r){this._root=e,this._settings=r,this._reader=new MQe.default(this._root,this._settings),this._stream=new OQe.Readable({objectMode:!0,read:()=>{},destroy:()=>{this._reader.isDestroyed||this._reader.destroy()}})}read(){return this._reader.onError(e=>{this._stream.emit(\"error\",e)}),this._reader.onEntry(e=>{this._stream.push(e)}),this._reader.onEnd(()=>{this._stream.push(null)}),this._reader.read(),this._stream}};yk.default=G3});var J3=w(wk=>{\"use strict\";Object.defineProperty(wk,\"__esModule\",{value:!0});var UQe=zy(),Xy=_y(),KQe=Ck(),q3=class extends KQe.default{constructor(){super(...arguments);this._scandir=UQe.scandirSync,this._storage=new Set,this._queue=new Set}read(){return this._pushToQueue(this._root,this._settings.basePath),this._handleQueue(),[...this._storage]}_pushToQueue(e,r){this._queue.add({directory:e,base:r})}_handleQueue(){for(let e of this._queue.values())this._handleDirectory(e.directory,e.base)}_handleDirectory(e,r){try{let i=this._scandir(e,this._settings.fsScandirSettings);for(let n of i)this._handleEntry(n,r)}catch(i){this._handleError(i)}}_handleError(e){if(!!Xy.isFatalError(this._settings,e))throw e}_handleEntry(e,r){let i=e.path;r!==void 0&&(e.path=Xy.joinPathSegments(r,e.name,this._settings.pathSegmentSeparator)),Xy.isAppliedFilter(this._settings.entryFilter,e)&&this._pushToStorage(e),e.dirent.isDirectory()&&Xy.isAppliedFilter(this._settings.deepFilter,e)&&this._pushToQueue(i,e.path)}_pushToStorage(e){this._storage.add(e)}};wk.default=q3});var z3=w(Bk=>{\"use strict\";Object.defineProperty(Bk,\"__esModule\",{value:!0});var HQe=J3(),W3=class{constructor(e,r){this._root=e,this._settings=r,this._reader=new HQe.default(this._root,this._settings)}read(){return this._reader.read()}};Bk.default=W3});var V3=w(bk=>{\"use strict\";Object.defineProperty(bk,\"__esModule\",{value:!0});var jQe=require(\"path\"),GQe=zy(),_3=class{constructor(e={}){this._options=e,this.basePath=this._getValue(this._options.basePath,void 0),this.concurrency=this._getValue(this._options.concurrency,Number.POSITIVE_INFINITY),this.deepFilter=this._getValue(this._options.deepFilter,null),this.entryFilter=this._getValue(this._options.entryFilter,null),this.errorFilter=this._getValue(this._options.errorFilter,null),this.pathSegmentSeparator=this._getValue(this._options.pathSegmentSeparator,jQe.sep),this.fsScandirSettings=new GQe.Settings({followSymbolicLinks:this._options.followSymbolicLinks,fs:this._options.fs,pathSegmentSeparator:this._options.pathSegmentSeparator,stats:this._options.stats,throwErrorOnBrokenSymbolicLink:this._options.throwErrorOnBrokenSymbolicLink})}_getValue(e,r){return e!=null?e:r}};bk.default=_3});var vk=w(_o=>{\"use strict\";Object.defineProperty(_o,\"__esModule\",{value:!0});_o.Settings=_o.walkStream=_o.walkSync=_o.walk=void 0;var X3=j3(),YQe=Y3(),qQe=z3(),Qk=V3();_o.Settings=Qk.default;function JQe(t,e,r){if(typeof e==\"function\"){new X3.default(t,Zy()).read(e);return}new X3.default(t,Zy(e)).read(r)}_o.walk=JQe;function WQe(t,e){let r=Zy(e);return new qQe.default(t,r).read()}_o.walkSync=WQe;function zQe(t,e){let r=Zy(e);return new YQe.default(t,r).read()}_o.walkStream=zQe;function Zy(t={}){return t instanceof Qk.default?t:new Qk.default(t)}});var kk=w(Sk=>{\"use strict\";Object.defineProperty(Sk,\"__esModule\",{value:!0});var _Qe=require(\"path\"),VQe=Fc(),Z3=Wa(),$3=class{constructor(e){this._settings=e,this._fsStatSettings=new VQe.Settings({followSymbolicLink:this._settings.followSymbolicLinks,fs:this._settings.fs,throwErrorOnBrokenSymbolicLink:this._settings.followSymbolicLinks})}_getFullEntryPath(e){return _Qe.resolve(this._settings.cwd,e)}_makeEntry(e,r){let i={name:r,path:r,dirent:Z3.fs.createDirentFromStats(r,e)};return this._settings.stats&&(i.stats=e),i}_isFatalError(e){return!Z3.errno.isEnoentCodeError(e)&&!this._settings.suppressErrors}};Sk.default=$3});var Pk=w(xk=>{\"use strict\";Object.defineProperty(xk,\"__esModule\",{value:!0});var XQe=require(\"stream\"),ZQe=Fc(),$Qe=vk(),eve=kk(),eW=class extends eve.default{constructor(){super(...arguments);this._walkStream=$Qe.walkStream,this._stat=ZQe.stat}dynamic(e,r){return this._walkStream(e,r)}static(e,r){let i=e.map(this._getFullEntryPath,this),n=new XQe.PassThrough({objectMode:!0});n._write=(s,o,a)=>this._getEntry(i[s],e[s],r).then(l=>{l!==null&&r.entryFilter(l)&&n.push(l),s===i.length-1&&n.end(),a()}).catch(a);for(let s=0;s<i.length;s++)n.write(s);return n}_getEntry(e,r,i){return this._getStat(e).then(n=>this._makeEntry(n,r)).catch(n=>{if(i.errorFilter(n))return null;throw n})}_getStat(e){return new Promise((r,i)=>{this._stat(e,this._fsStatSettings,(n,s)=>n===null?r(s):i(n))})}};xk.default=eW});var rW=w(Dk=>{\"use strict\";Object.defineProperty(Dk,\"__esModule\",{value:!0});var Tg=Wa(),tW=class{constructor(e,r,i){this._patterns=e,this._settings=r,this._micromatchOptions=i,this._storage=[],this._fillStorage()}_fillStorage(){let e=Tg.pattern.expandPatternsWithBraceExpansion(this._patterns);for(let r of e){let i=this._getPatternSegments(r),n=this._splitSegmentsIntoSections(i);this._storage.push({complete:n.length<=1,pattern:r,segments:i,sections:n})}}_getPatternSegments(e){return Tg.pattern.getPatternParts(e,this._micromatchOptions).map(i=>Tg.pattern.isDynamicPattern(i,this._settings)?{dynamic:!0,pattern:i,patternRe:Tg.pattern.makeRe(i,this._micromatchOptions)}:{dynamic:!1,pattern:i})}_splitSegmentsIntoSections(e){return Tg.array.splitWhen(e,r=>r.dynamic&&Tg.pattern.hasGlobStar(r.pattern))}};Dk.default=tW});var nW=w(Rk=>{\"use strict\";Object.defineProperty(Rk,\"__esModule\",{value:!0});var tve=rW(),iW=class extends tve.default{match(e){let r=e.split(\"/\"),i=r.length,n=this._storage.filter(s=>!s.complete||s.segments.length>i);for(let s of n){let o=s.sections[0];if(!s.complete&&i>o.length||r.every((l,c)=>{let u=s.segments[c];return!!(u.dynamic&&u.patternRe.test(l)||!u.dynamic&&u.pattern===l)}))return!0}return!1}};Rk.default=iW});var oW=w(Fk=>{\"use strict\";Object.defineProperty(Fk,\"__esModule\",{value:!0});var $y=Wa(),rve=nW(),sW=class{constructor(e,r){this._settings=e,this._micromatchOptions=r}getFilter(e,r,i){let n=this._getMatcher(r),s=this._getNegativePatternsRe(i);return o=>this._filter(e,o,n,s)}_getMatcher(e){return new rve.default(e,this._settings,this._micromatchOptions)}_getNegativePatternsRe(e){let r=e.filter($y.pattern.isAffectDepthOfReadingPattern);return $y.pattern.convertPatternsToRe(r,this._micromatchOptions)}_filter(e,r,i,n){let s=this._getEntryLevel(e,r.path);if(this._isSkippedByDeep(s)||this._isSkippedSymbolicLink(r))return!1;let o=$y.path.removeLeadingDotSegment(r.path);return this._isSkippedByPositivePatterns(o,i)?!1:this._isSkippedByNegativePatterns(o,n)}_isSkippedByDeep(e){return e>=this._settings.deep}_isSkippedSymbolicLink(e){return!this._settings.followSymbolicLinks&&e.dirent.isSymbolicLink()}_getEntryLevel(e,r){let i=e.split(\"/\").length;return r.split(\"/\").length-(e===\"\"?0:i)}_isSkippedByPositivePatterns(e,r){return!this._settings.baseNameMatch&&!r.match(e)}_isSkippedByNegativePatterns(e,r){return!$y.pattern.matchAny(e,r)}};Fk.default=sW});var AW=w(Nk=>{\"use strict\";Object.defineProperty(Nk,\"__esModule\",{value:!0});var sd=Wa(),aW=class{constructor(e,r){this._settings=e,this._micromatchOptions=r,this.index=new Map}getFilter(e,r){let i=sd.pattern.convertPatternsToRe(e,this._micromatchOptions),n=sd.pattern.convertPatternsToRe(r,this._micromatchOptions);return s=>this._filter(s,i,n)}_filter(e,r,i){if(this._settings.unique){if(this._isDuplicateEntry(e))return!1;this._createIndexRecord(e)}if(this._onlyFileFilter(e)||this._onlyDirectoryFilter(e)||this._isSkippedByAbsoluteNegativePatterns(e,i))return!1;let n=this._settings.baseNameMatch?e.name:e.path;return this._isMatchToPatterns(n,r)&&!this._isMatchToPatterns(e.path,i)}_isDuplicateEntry(e){return this.index.has(e.path)}_createIndexRecord(e){this.index.set(e.path,void 0)}_onlyFileFilter(e){return this._settings.onlyFiles&&!e.dirent.isFile()}_onlyDirectoryFilter(e){return this._settings.onlyDirectories&&!e.dirent.isDirectory()}_isSkippedByAbsoluteNegativePatterns(e,r){if(!this._settings.absolute)return!1;let i=sd.path.makeAbsolute(this._settings.cwd,e.path);return this._isMatchToPatterns(i,r)}_isMatchToPatterns(e,r){let i=sd.path.removeLeadingDotSegment(e);return sd.pattern.matchAny(i,r)}};Nk.default=aW});var cW=w(Lk=>{\"use strict\";Object.defineProperty(Lk,\"__esModule\",{value:!0});var ive=Wa(),lW=class{constructor(e){this._settings=e}getFilter(){return e=>this._isNonFatalError(e)}_isNonFatalError(e){return ive.errno.isEnoentCodeError(e)||this._settings.suppressErrors}};Lk.default=lW});var fW=w(Tk=>{\"use strict\";Object.defineProperty(Tk,\"__esModule\",{value:!0});var uW=Wa(),gW=class{constructor(e){this._settings=e}getTransformer(){return e=>this._transform(e)}_transform(e){let r=e.path;return this._settings.absolute&&(r=uW.path.makeAbsolute(this._settings.cwd,r),r=uW.path.unixify(r)),this._settings.markDirectories&&e.dirent.isDirectory()&&(r+=\"/\"),this._settings.objectMode?Object.assign(Object.assign({},e),{path:r}):r}};Tk.default=gW});var ew=w(Ok=>{\"use strict\";Object.defineProperty(Ok,\"__esModule\",{value:!0});var nve=require(\"path\"),sve=oW(),ove=AW(),ave=cW(),Ave=fW(),hW=class{constructor(e){this._settings=e,this.errorFilter=new ave.default(this._settings),this.entryFilter=new ove.default(this._settings,this._getMicromatchOptions()),this.deepFilter=new sve.default(this._settings,this._getMicromatchOptions()),this.entryTransformer=new Ave.default(this._settings)}_getRootDirectory(e){return nve.resolve(this._settings.cwd,e.base)}_getReaderOptions(e){let r=e.base===\".\"?\"\":e.base;return{basePath:r,pathSegmentSeparator:\"/\",concurrency:this._settings.concurrency,deepFilter:this.deepFilter.getFilter(r,e.positive,e.negative),entryFilter:this.entryFilter.getFilter(e.positive,e.negative),errorFilter:this.errorFilter.getFilter(),followSymbolicLinks:this._settings.followSymbolicLinks,fs:this._settings.fs,stats:this._settings.stats,throwErrorOnBrokenSymbolicLink:this._settings.throwErrorOnBrokenSymbolicLink,transform:this.entryTransformer.getTransformer()}}_getMicromatchOptions(){return{dot:this._settings.dot,matchBase:this._settings.baseNameMatch,nobrace:!this._settings.braceExpansion,nocase:!this._settings.caseSensitiveMatch,noext:!this._settings.extglob,noglobstar:!this._settings.globstar,posix:!0,strictSlashes:!1}}};Ok.default=hW});var dW=w(Mk=>{\"use strict\";Object.defineProperty(Mk,\"__esModule\",{value:!0});var lve=Pk(),cve=ew(),pW=class extends cve.default{constructor(){super(...arguments);this._reader=new lve.default(this._settings)}read(e){let r=this._getRootDirectory(e),i=this._getReaderOptions(e),n=[];return new Promise((s,o)=>{let a=this.api(r,e,i);a.once(\"error\",o),a.on(\"data\",l=>n.push(i.transform(l))),a.once(\"end\",()=>s(n))})}api(e,r,i){return r.dynamic?this._reader.dynamic(e,i):this._reader.static(r.patterns,i)}};Mk.default=pW});var mW=w(Uk=>{\"use strict\";Object.defineProperty(Uk,\"__esModule\",{value:!0});var uve=require(\"stream\"),gve=Pk(),fve=ew(),CW=class extends fve.default{constructor(){super(...arguments);this._reader=new gve.default(this._settings)}read(e){let r=this._getRootDirectory(e),i=this._getReaderOptions(e),n=this.api(r,e,i),s=new uve.Readable({objectMode:!0,read:()=>{}});return n.once(\"error\",o=>s.emit(\"error\",o)).on(\"data\",o=>s.emit(\"data\",i.transform(o))).once(\"end\",()=>s.emit(\"end\")),s.once(\"close\",()=>n.destroy()),s}api(e,r,i){return r.dynamic?this._reader.dynamic(e,i):this._reader.static(r.patterns,i)}};Uk.default=CW});var IW=w(Kk=>{\"use strict\";Object.defineProperty(Kk,\"__esModule\",{value:!0});var hve=Fc(),pve=vk(),dve=kk(),EW=class extends dve.default{constructor(){super(...arguments);this._walkSync=pve.walkSync,this._statSync=hve.statSync}dynamic(e,r){return this._walkSync(e,r)}static(e,r){let i=[];for(let n of e){let s=this._getFullEntryPath(n),o=this._getEntry(s,n,r);o===null||!r.entryFilter(o)||i.push(o)}return i}_getEntry(e,r,i){try{let n=this._getStat(e);return this._makeEntry(n,r)}catch(n){if(i.errorFilter(n))return null;throw n}}_getStat(e){return this._statSync(e,this._fsStatSettings)}};Kk.default=EW});var wW=w(Hk=>{\"use strict\";Object.defineProperty(Hk,\"__esModule\",{value:!0});var Cve=IW(),mve=ew(),yW=class extends mve.default{constructor(){super(...arguments);this._reader=new Cve.default(this._settings)}read(e){let r=this._getRootDirectory(e),i=this._getReaderOptions(e);return this.api(r,e,i).map(i.transform)}api(e,r,i){return r.dynamic?this._reader.dynamic(e,i):this._reader.static(r.patterns,i)}};Hk.default=yW});var bW=w(od=>{\"use strict\";Object.defineProperty(od,\"__esModule\",{value:!0});var Og=require(\"fs\"),Eve=require(\"os\"),Ive=Eve.cpus().length;od.DEFAULT_FILE_SYSTEM_ADAPTER={lstat:Og.lstat,lstatSync:Og.lstatSync,stat:Og.stat,statSync:Og.statSync,readdir:Og.readdir,readdirSync:Og.readdirSync};var BW=class{constructor(e={}){this._options=e,this.absolute=this._getValue(this._options.absolute,!1),this.baseNameMatch=this._getValue(this._options.baseNameMatch,!1),this.braceExpansion=this._getValue(this._options.braceExpansion,!0),this.caseSensitiveMatch=this._getValue(this._options.caseSensitiveMatch,!0),this.concurrency=this._getValue(this._options.concurrency,Ive),this.cwd=this._getValue(this._options.cwd,process.cwd()),this.deep=this._getValue(this._options.deep,Infinity),this.dot=this._getValue(this._options.dot,!1),this.extglob=this._getValue(this._options.extglob,!0),this.followSymbolicLinks=this._getValue(this._options.followSymbolicLinks,!0),this.fs=this._getFileSystemMethods(this._options.fs),this.globstar=this._getValue(this._options.globstar,!0),this.ignore=this._getValue(this._options.ignore,[]),this.markDirectories=this._getValue(this._options.markDirectories,!1),this.objectMode=this._getValue(this._options.objectMode,!1),this.onlyDirectories=this._getValue(this._options.onlyDirectories,!1),this.onlyFiles=this._getValue(this._options.onlyFiles,!0),this.stats=this._getValue(this._options.stats,!1),this.suppressErrors=this._getValue(this._options.suppressErrors,!1),this.throwErrorOnBrokenSymbolicLink=this._getValue(this._options.throwErrorOnBrokenSymbolicLink,!1),this.unique=this._getValue(this._options.unique,!0),this.onlyDirectories&&(this.onlyFiles=!1),this.stats&&(this.objectMode=!0)}_getValue(e,r){return e===void 0?r:e}_getFileSystemMethods(e={}){return Object.assign(Object.assign({},od.DEFAULT_FILE_SYSTEM_ADAPTER),e)}};od.default=BW});var tw=w((Yit,QW)=>{\"use strict\";var vW=o3(),yve=dW(),wve=mW(),Bve=wW(),jk=bW(),Nc=Wa();async function Yk(t,e){Mg(t);let r=Gk(t,yve.default,e),i=await Promise.all(r);return Nc.array.flatten(i)}(function(t){function e(o,a){Mg(o);let l=Gk(o,Bve.default,a);return Nc.array.flatten(l)}t.sync=e;function r(o,a){Mg(o);let l=Gk(o,wve.default,a);return Nc.stream.merge(l)}t.stream=r;function i(o,a){Mg(o);let l=[].concat(o),c=new jk.default(a);return vW.generate(l,c)}t.generateTasks=i;function n(o,a){Mg(o);let l=new jk.default(a);return Nc.pattern.isDynamicPattern(o,l)}t.isDynamicPattern=n;function s(o){return Mg(o),Nc.path.escape(o)}t.escapePath=s})(Yk||(Yk={}));function Gk(t,e,r){let i=[].concat(t),n=new jk.default(r),s=vW.generate(i,n),o=new e(n);return s.map(o.read,o)}function Mg(t){if(![].concat(t).every(i=>Nc.string.isString(i)&&!Nc.string.isEmpty(i)))throw new TypeError(\"Patterns must be a string (non empty) or an array of strings\")}QW.exports=Yk});var kW=w(Lc=>{\"use strict\";var{promisify:bve}=require(\"util\"),SW=require(\"fs\");async function qk(t,e,r){if(typeof r!=\"string\")throw new TypeError(`Expected a string, got ${typeof r}`);try{return(await bve(SW[t])(r))[e]()}catch(i){if(i.code===\"ENOENT\")return!1;throw i}}function Jk(t,e,r){if(typeof r!=\"string\")throw new TypeError(`Expected a string, got ${typeof r}`);try{return SW[t](r)[e]()}catch(i){if(i.code===\"ENOENT\")return!1;throw i}}Lc.isFile=qk.bind(null,\"stat\",\"isFile\");Lc.isDirectory=qk.bind(null,\"stat\",\"isDirectory\");Lc.isSymlink=qk.bind(null,\"lstat\",\"isSymbolicLink\");Lc.isFileSync=Jk.bind(null,\"statSync\",\"isFile\");Lc.isDirectorySync=Jk.bind(null,\"statSync\",\"isDirectory\");Lc.isSymlinkSync=Jk.bind(null,\"lstatSync\",\"isSymbolicLink\")});var FW=w((Jit,Wk)=>{\"use strict\";var Tc=require(\"path\"),xW=kW(),PW=t=>t.length>1?`{${t.join(\",\")}}`:t[0],DW=(t,e)=>{let r=t[0]===\"!\"?t.slice(1):t;return Tc.isAbsolute(r)?r:Tc.join(e,r)},Qve=(t,e)=>Tc.extname(t)?`**/${t}`:`**/${t}.${PW(e)}`,RW=(t,e)=>{if(e.files&&!Array.isArray(e.files))throw new TypeError(`Expected \\`files\\` to be of type \\`Array\\` but received type \\`${typeof e.files}\\``);if(e.extensions&&!Array.isArray(e.extensions))throw new TypeError(`Expected \\`extensions\\` to be of type \\`Array\\` but received type \\`${typeof e.extensions}\\``);return e.files&&e.extensions?e.files.map(r=>Tc.posix.join(t,Qve(r,e.extensions))):e.files?e.files.map(r=>Tc.posix.join(t,`**/${r}`)):e.extensions?[Tc.posix.join(t,`**/*.${PW(e.extensions)}`)]:[Tc.posix.join(t,\"**\")]};Wk.exports=async(t,e)=>{if(e=N({cwd:process.cwd()},e),typeof e.cwd!=\"string\")throw new TypeError(`Expected \\`cwd\\` to be of type \\`string\\` but received type \\`${typeof e.cwd}\\``);let r=await Promise.all([].concat(t).map(async i=>await xW.isDirectory(DW(i,e.cwd))?RW(i,e):i));return[].concat.apply([],r)};Wk.exports.sync=(t,e)=>{if(e=N({cwd:process.cwd()},e),typeof e.cwd!=\"string\")throw new TypeError(`Expected \\`cwd\\` to be of type \\`string\\` but received type \\`${typeof e.cwd}\\``);let r=[].concat(t).map(i=>xW.isDirectorySync(DW(i,e.cwd))?RW(i,e):i);return[].concat.apply([],r)}});var GW=w((Wit,NW)=>{function LW(t){return Array.isArray(t)?t:[t]}var TW=\"\",OW=\" \",zk=\"\\\\\",vve=/^\\s+$/,Sve=/^\\\\!/,kve=/^\\\\#/,xve=/\\r?\\n/g,Pve=/^\\.*\\/|^\\.+$/,_k=\"/\",MW=typeof Symbol!=\"undefined\"?Symbol.for(\"node-ignore\"):\"node-ignore\",Dve=(t,e,r)=>Object.defineProperty(t,e,{value:r}),Rve=/([0-z])-([0-z])/g,Fve=t=>t.replace(Rve,(e,r,i)=>r.charCodeAt(0)<=i.charCodeAt(0)?e:TW),Nve=t=>{let{length:e}=t;return t.slice(0,e-e%2)},Lve=[[/\\\\?\\s+$/,t=>t.indexOf(\"\\\\\")===0?OW:TW],[/\\\\\\s/g,()=>OW],[/[\\\\$.|*+(){^]/g,t=>`\\\\${t}`],[/(?!\\\\)\\?/g,()=>\"[^/]\"],[/^\\//,()=>\"^\"],[/\\//g,()=>\"\\\\/\"],[/^\\^*\\\\\\*\\\\\\*\\\\\\//,()=>\"^(?:.*\\\\/)?\"],[/^(?=[^^])/,function(){return/\\/(?!$)/.test(this)?\"^\":\"(?:^|\\\\/)\"}],[/\\\\\\/\\\\\\*\\\\\\*(?=\\\\\\/|$)/g,(t,e,r)=>e+6<r.length?\"(?:\\\\/[^\\\\/]+)*\":\"\\\\/.+\"],[/(^|[^\\\\]+)\\\\\\*(?=.+)/g,(t,e)=>`${e}[^\\\\/]*`],[/\\\\\\\\\\\\(?=[$.|*+(){^])/g,()=>zk],[/\\\\\\\\/g,()=>zk],[/(\\\\)?\\[([^\\]/]*?)(\\\\*)($|\\])/g,(t,e,r,i,n)=>e===zk?`\\\\[${r}${Nve(i)}${n}`:n===\"]\"&&i.length%2==0?`[${Fve(r)}${i}]`:\"[]\"],[/(?:[^*])$/,t=>/\\/$/.test(t)?`${t}$`:`${t}(?=$|\\\\/$)`],[/(\\^|\\\\\\/)?\\\\\\*$/,(t,e)=>`${e?`${e}[^/]+`:\"[^/]*\"}(?=$|\\\\/$)`]],UW=Object.create(null),Tve=(t,e)=>{let r=UW[t];return r||(r=Lve.reduce((i,n)=>i.replace(n[0],n[1].bind(t)),t),UW[t]=r),e?new RegExp(r,\"i\"):new RegExp(r)},Vk=t=>typeof t==\"string\",Ove=t=>t&&Vk(t)&&!vve.test(t)&&t.indexOf(\"#\")!==0,Mve=t=>t.split(xve),KW=class{constructor(e,r,i,n){this.origin=e,this.pattern=r,this.negative=i,this.regex=n}},Uve=(t,e)=>{let r=t,i=!1;t.indexOf(\"!\")===0&&(i=!0,t=t.substr(1)),t=t.replace(Sve,\"!\").replace(kve,\"#\");let n=Tve(t,e);return new KW(r,t,i,n)},Kve=(t,e)=>{throw new e(t)},_a=(t,e,r)=>Vk(t)?t?_a.isNotRelative(t)?r(`path should be a \\`path.relative()\\`d string, but got \"${e}\"`,RangeError):!0:r(\"path must not be empty\",TypeError):r(`path must be a string, but got \\`${e}\\``,TypeError),HW=t=>Pve.test(t);_a.isNotRelative=HW;_a.convert=t=>t;var jW=class{constructor({ignorecase:e=!0}={}){Dve(this,MW,!0),this._rules=[],this._ignorecase=e,this._initCache()}_initCache(){this._ignoreCache=Object.create(null),this._testCache=Object.create(null)}_addPattern(e){if(e&&e[MW]){this._rules=this._rules.concat(e._rules),this._added=!0;return}if(Ove(e)){let r=Uve(e,this._ignorecase);this._added=!0,this._rules.push(r)}}add(e){return this._added=!1,LW(Vk(e)?Mve(e):e).forEach(this._addPattern,this),this._added&&this._initCache(),this}addPattern(e){return this.add(e)}_testOne(e,r){let i=!1,n=!1;return this._rules.forEach(s=>{let{negative:o}=s;if(n===o&&i!==n||o&&!i&&!n&&!r)return;s.regex.test(e)&&(i=!o,n=o)}),{ignored:i,unignored:n}}_test(e,r,i,n){let s=e&&_a.convert(e);return _a(s,e,Kve),this._t(s,r,i,n)}_t(e,r,i,n){if(e in r)return r[e];if(n||(n=e.split(_k)),n.pop(),!n.length)return r[e]=this._testOne(e,i);let s=this._t(n.join(_k)+_k,r,i,n);return r[e]=s.ignored?s:this._testOne(e,i)}ignores(e){return this._test(e,this._ignoreCache,!1).ignored}createFilter(){return e=>!this.ignores(e)}filter(e){return LW(e).filter(this.createFilter())}test(e){return this._test(e,this._testCache,!0)}},rw=t=>new jW(t),Hve=()=>!1,jve=t=>_a(t&&_a.convert(t),t,Hve);rw.isPathValid=jve;rw.default=rw;NW.exports=rw;if(typeof process!=\"undefined\"&&(process.env&&process.env.IGNORE_TEST_WIN32||process.platform===\"win32\")){let t=r=>/^\\\\\\\\\\?\\\\/.test(r)||/[\"<>|\\u0000-\\u001F]+/u.test(r)?r:r.replace(/\\\\/g,\"/\");_a.convert=t;let e=/^[a-z]:\\//i;_a.isNotRelative=r=>e.test(r)||HW(r)}});var qW=w((zit,YW)=>{\"use strict\";YW.exports=t=>{let e=/^\\\\\\\\\\?\\\\/.test(t),r=/[^\\u0000-\\u0080]+/.test(t);return e||r?t:t.replace(/\\\\/g,\"/\")}});var ZW=w((_it,Xk)=>{\"use strict\";var{promisify:Gve}=require(\"util\"),JW=require(\"fs\"),Va=require(\"path\"),WW=tw(),Yve=GW(),ad=qW(),zW=[\"**/node_modules/**\",\"**/flow-typed/**\",\"**/coverage/**\",\"**/.git\"],qve=Gve(JW.readFile),Jve=t=>e=>e.startsWith(\"!\")?\"!\"+Va.posix.join(t,e.slice(1)):Va.posix.join(t,e),Wve=(t,e)=>{let r=ad(Va.relative(e.cwd,Va.dirname(e.fileName)));return t.split(/\\r?\\n/).filter(Boolean).filter(i=>!i.startsWith(\"#\")).map(Jve(r))},_W=t=>{let e=Yve();for(let r of t)e.add(Wve(r.content,{cwd:r.cwd,fileName:r.filePath}));return e},zve=(t,e)=>{if(t=ad(t),Va.isAbsolute(e)){if(ad(e).startsWith(t))return e;throw new Error(`Path ${e} is not in cwd ${t}`)}return Va.join(t,e)},VW=(t,e)=>r=>t.ignores(ad(Va.relative(e,zve(e,r.path||r)))),_ve=async(t,e)=>{let r=Va.join(e,t),i=await qve(r,\"utf8\");return{cwd:e,filePath:r,content:i}},Vve=(t,e)=>{let r=Va.join(e,t),i=JW.readFileSync(r,\"utf8\");return{cwd:e,filePath:r,content:i}},XW=({ignore:t=[],cwd:e=ad(process.cwd())}={})=>({ignore:t,cwd:e});Xk.exports=async t=>{t=XW(t);let e=await WW(\"**/.gitignore\",{ignore:zW.concat(t.ignore),cwd:t.cwd}),r=await Promise.all(e.map(n=>_ve(n,t.cwd))),i=_W(r);return VW(i,t.cwd)};Xk.exports.sync=t=>{t=XW(t);let r=WW.sync(\"**/.gitignore\",{ignore:zW.concat(t.ignore),cwd:t.cwd}).map(n=>Vve(n,t.cwd)),i=_W(r);return VW(i,t.cwd)}});var r8=w((Vit,$W)=>{\"use strict\";var{Transform:Xve}=require(\"stream\"),Zk=class extends Xve{constructor(){super({objectMode:!0})}},e8=class extends Zk{constructor(e){super();this._filter=e}_transform(e,r,i){this._filter(e)&&this.push(e),i()}},t8=class extends Zk{constructor(){super();this._pushed=new Set}_transform(e,r,i){this._pushed.has(e)||(this.push(e),this._pushed.add(e)),i()}};$W.exports={FilterStream:e8,UniqueStream:t8}});var rx=w((Xit,Oc)=>{\"use strict\";var i8=require(\"fs\"),iw=kJ(),Zve=XS(),nw=tw(),sw=FW(),$k=ZW(),{FilterStream:$ve,UniqueStream:eSe}=r8(),n8=()=>!1,s8=t=>t[0]===\"!\",tSe=t=>{if(!t.every(e=>typeof e==\"string\"))throw new TypeError(\"Patterns must be a string or an array of strings\")},rSe=(t={})=>{if(!t.cwd)return;let e;try{e=i8.statSync(t.cwd)}catch{return}if(!e.isDirectory())throw new Error(\"The `cwd` option must be a path to a directory\")},iSe=t=>t.stats instanceof i8.Stats?t.path:t,ow=(t,e)=>{t=iw([].concat(t)),tSe(t),rSe(e);let r=[];e=N({ignore:[],expandDirectories:!0},e);for(let[i,n]of t.entries()){if(s8(n))continue;let s=t.slice(i).filter(a=>s8(a)).map(a=>a.slice(1)),o=te(N({},e),{ignore:e.ignore.concat(s)});r.push({pattern:n,options:o})}return r},nSe=(t,e)=>{let r={};return t.options.cwd&&(r.cwd=t.options.cwd),Array.isArray(t.options.expandDirectories)?r=te(N({},r),{files:t.options.expandDirectories}):typeof t.options.expandDirectories==\"object\"&&(r=N(N({},r),t.options.expandDirectories)),e(t.pattern,r)},ex=(t,e)=>t.options.expandDirectories?nSe(t,e):[t.pattern],o8=t=>t&&t.gitignore?$k.sync({cwd:t.cwd,ignore:t.ignore}):n8,tx=t=>e=>{let{options:r}=t;return r.ignore&&Array.isArray(r.ignore)&&r.expandDirectories&&(r.ignore=sw.sync(r.ignore)),{pattern:e,options:r}};Oc.exports=async(t,e)=>{let r=ow(t,e),i=async()=>e&&e.gitignore?$k({cwd:e.cwd,ignore:e.ignore}):n8,n=async()=>{let l=await Promise.all(r.map(async c=>{let u=await ex(c,sw);return Promise.all(u.map(tx(c)))}));return iw(...l)},[s,o]=await Promise.all([i(),n()]),a=await Promise.all(o.map(l=>nw(l.pattern,l.options)));return iw(...a).filter(l=>!s(iSe(l)))};Oc.exports.sync=(t,e)=>{let r=ow(t,e),i=[];for(let o of r){let a=ex(o,sw.sync).map(tx(o));i.push(...a)}let n=o8(e),s=[];for(let o of i)s=iw(s,nw.sync(o.pattern,o.options));return s.filter(o=>!n(o))};Oc.exports.stream=(t,e)=>{let r=ow(t,e),i=[];for(let a of r){let l=ex(a,sw.sync).map(tx(a));i.push(...l)}let n=o8(e),s=new $ve(a=>!n(a)),o=new eSe;return Zve(i.map(a=>nw.stream(a.pattern,a.options))).pipe(s).pipe(o)};Oc.exports.generateGlobTasks=ow;Oc.exports.hasMagic=(t,e)=>[].concat(t).some(r=>nw.isDynamicPattern(r,e));Oc.exports.gitignore=$k});var Rn=w((bnt,y8)=>{function CSe(t){var e=typeof t;return t!=null&&(e==\"object\"||e==\"function\")}y8.exports=CSe});var ux=w((Qnt,w8)=>{var mSe=typeof global==\"object\"&&global&&global.Object===Object&&global;w8.exports=mSe});var Rs=w((vnt,B8)=>{var ESe=ux(),ISe=typeof self==\"object\"&&self&&self.Object===Object&&self,ySe=ESe||ISe||Function(\"return this\")();B8.exports=ySe});var Q8=w((Snt,b8)=>{var wSe=Rs(),BSe=function(){return wSe.Date.now()};b8.exports=BSe});var S8=w((knt,v8)=>{var bSe=/\\s/;function QSe(t){for(var e=t.length;e--&&bSe.test(t.charAt(e)););return e}v8.exports=QSe});var x8=w((xnt,k8)=>{var vSe=S8(),SSe=/^\\s+/;function kSe(t){return t&&t.slice(0,vSe(t)+1).replace(SSe,\"\")}k8.exports=kSe});var Kc=w((Pnt,P8)=>{var xSe=Rs(),PSe=xSe.Symbol;P8.exports=PSe});var N8=w((Dnt,D8)=>{var R8=Kc(),F8=Object.prototype,DSe=F8.hasOwnProperty,RSe=F8.toString,Ed=R8?R8.toStringTag:void 0;function FSe(t){var e=DSe.call(t,Ed),r=t[Ed];try{t[Ed]=void 0;var i=!0}catch(s){}var n=RSe.call(t);return i&&(e?t[Ed]=r:delete t[Ed]),n}D8.exports=FSe});var T8=w((Rnt,L8)=>{var NSe=Object.prototype,LSe=NSe.toString;function TSe(t){return LSe.call(t)}L8.exports=TSe});var Hc=w((Fnt,O8)=>{var M8=Kc(),OSe=N8(),MSe=T8(),USe=\"[object Null]\",KSe=\"[object Undefined]\",U8=M8?M8.toStringTag:void 0;function HSe(t){return t==null?t===void 0?KSe:USe:U8&&U8 in Object(t)?OSe(t):MSe(t)}O8.exports=HSe});var Zo=w((Nnt,K8)=>{function jSe(t){return t!=null&&typeof t==\"object\"}K8.exports=jSe});var Id=w((Lnt,H8)=>{var GSe=Hc(),YSe=Zo(),qSe=\"[object Symbol]\";function JSe(t){return typeof t==\"symbol\"||YSe(t)&&GSe(t)==qSe}H8.exports=JSe});var q8=w((Tnt,j8)=>{var WSe=x8(),G8=Rn(),zSe=Id(),Y8=0/0,_Se=/^[-+]0x[0-9a-f]+$/i,VSe=/^0b[01]+$/i,XSe=/^0o[0-7]+$/i,ZSe=parseInt;function $Se(t){if(typeof t==\"number\")return t;if(zSe(t))return Y8;if(G8(t)){var e=typeof t.valueOf==\"function\"?t.valueOf():t;t=G8(e)?e+\"\":e}if(typeof t!=\"string\")return t===0?t:+t;t=WSe(t);var r=VSe.test(t);return r||XSe.test(t)?ZSe(t.slice(2),r?2:8):_Se.test(t)?Y8:+t}j8.exports=$Se});var z8=w((Ont,J8)=>{var eke=Rn(),gx=Q8(),W8=q8(),tke=\"Expected a function\",rke=Math.max,ike=Math.min;function nke(t,e,r){var i,n,s,o,a,l,c=0,u=!1,g=!1,f=!0;if(typeof t!=\"function\")throw new TypeError(tke);e=W8(e)||0,eke(r)&&(u=!!r.leading,g=\"maxWait\"in r,s=g?rke(W8(r.maxWait)||0,e):s,f=\"trailing\"in r?!!r.trailing:f);function h(U){var J=i,W=n;return i=n=void 0,c=U,o=t.apply(W,J),o}function p(U){return c=U,a=setTimeout(Q,e),u?h(U):o}function m(U){var J=U-l,W=U-c,ee=e-J;return g?ike(ee,s-W):ee}function y(U){var J=U-l,W=U-c;return l===void 0||J>=e||J<0||g&&W>=s}function Q(){var U=gx();if(y(U))return S(U);a=setTimeout(Q,m(U))}function S(U){return a=void 0,f&&i?h(U):(i=n=void 0,o)}function x(){a!==void 0&&clearTimeout(a),c=0,i=l=n=a=void 0}function M(){return a===void 0?o:S(gx())}function Y(){var U=gx(),J=y(U);if(i=arguments,n=this,l=U,J){if(a===void 0)return p(l);if(g)return clearTimeout(a),a=setTimeout(Q,e),h(l)}return a===void 0&&(a=setTimeout(Q,e)),o}return Y.cancel=x,Y.flush=M,Y}J8.exports=nke});var V8=w((Mnt,_8)=>{var ske=z8(),oke=Rn(),ake=\"Expected a function\";function Ake(t,e,r){var i=!0,n=!0;if(typeof t!=\"function\")throw new TypeError(ake);return oke(r)&&(i=\"leading\"in r?!!r.leading:i,n=\"trailing\"in r?!!r.trailing:n),ske(t,e,{leading:i,maxWait:e,trailing:n})}_8.exports=Ake});var $a=w((Za,vw)=>{\"use strict\";Object.defineProperty(Za,\"__esModule\",{value:!0});var nz=[\"Int8Array\",\"Uint8Array\",\"Uint8ClampedArray\",\"Int16Array\",\"Uint16Array\",\"Int32Array\",\"Uint32Array\",\"Float32Array\",\"Float64Array\",\"BigInt64Array\",\"BigUint64Array\"];function yke(t){return nz.includes(t)}var wke=[\"Function\",\"Generator\",\"AsyncGenerator\",\"GeneratorFunction\",\"AsyncGeneratorFunction\",\"AsyncFunction\",\"Observable\",\"Array\",\"Buffer\",\"Object\",\"RegExp\",\"Date\",\"Error\",\"Map\",\"Set\",\"WeakMap\",\"WeakSet\",\"ArrayBuffer\",\"SharedArrayBuffer\",\"DataView\",\"Promise\",\"URL\",\"FormData\",\"URLSearchParams\",\"HTMLElement\",...nz];function Bke(t){return wke.includes(t)}var bke=[\"null\",\"undefined\",\"string\",\"number\",\"bigint\",\"boolean\",\"symbol\"];function Qke(t){return bke.includes(t)}function Jg(t){return e=>typeof e===t}var{toString:sz}=Object.prototype,Sd=t=>{let e=sz.call(t).slice(8,-1);if(/HTML\\w+Element/.test(e)&&_.domElement(t))return\"HTMLElement\";if(Bke(e))return e},hr=t=>e=>Sd(e)===t;function _(t){if(t===null)return\"null\";switch(typeof t){case\"undefined\":return\"undefined\";case\"string\":return\"string\";case\"number\":return\"number\";case\"boolean\":return\"boolean\";case\"function\":return\"Function\";case\"bigint\":return\"bigint\";case\"symbol\":return\"symbol\";default:}if(_.observable(t))return\"Observable\";if(_.array(t))return\"Array\";if(_.buffer(t))return\"Buffer\";let e=Sd(t);if(e)return e;if(t instanceof String||t instanceof Boolean||t instanceof Number)throw new TypeError(\"Please don't use object wrappers for primitive types\");return\"Object\"}_.undefined=Jg(\"undefined\");_.string=Jg(\"string\");var vke=Jg(\"number\");_.number=t=>vke(t)&&!_.nan(t);_.bigint=Jg(\"bigint\");_.function_=Jg(\"function\");_.null_=t=>t===null;_.class_=t=>_.function_(t)&&t.toString().startsWith(\"class \");_.boolean=t=>t===!0||t===!1;_.symbol=Jg(\"symbol\");_.numericString=t=>_.string(t)&&!_.emptyStringOrWhitespace(t)&&!Number.isNaN(Number(t));_.array=(t,e)=>Array.isArray(t)?_.function_(e)?t.every(e):!0:!1;_.buffer=t=>{var e,r,i,n;return(n=(i=(r=(e=t)===null||e===void 0?void 0:e.constructor)===null||r===void 0?void 0:r.isBuffer)===null||i===void 0?void 0:i.call(r,t))!==null&&n!==void 0?n:!1};_.nullOrUndefined=t=>_.null_(t)||_.undefined(t);_.object=t=>!_.null_(t)&&(typeof t==\"object\"||_.function_(t));_.iterable=t=>{var e;return _.function_((e=t)===null||e===void 0?void 0:e[Symbol.iterator])};_.asyncIterable=t=>{var e;return _.function_((e=t)===null||e===void 0?void 0:e[Symbol.asyncIterator])};_.generator=t=>_.iterable(t)&&_.function_(t.next)&&_.function_(t.throw);_.asyncGenerator=t=>_.asyncIterable(t)&&_.function_(t.next)&&_.function_(t.throw);_.nativePromise=t=>hr(\"Promise\")(t);var Ske=t=>{var e,r;return _.function_((e=t)===null||e===void 0?void 0:e.then)&&_.function_((r=t)===null||r===void 0?void 0:r.catch)};_.promise=t=>_.nativePromise(t)||Ske(t);_.generatorFunction=hr(\"GeneratorFunction\");_.asyncGeneratorFunction=t=>Sd(t)===\"AsyncGeneratorFunction\";_.asyncFunction=t=>Sd(t)===\"AsyncFunction\";_.boundFunction=t=>_.function_(t)&&!t.hasOwnProperty(\"prototype\");_.regExp=hr(\"RegExp\");_.date=hr(\"Date\");_.error=hr(\"Error\");_.map=t=>hr(\"Map\")(t);_.set=t=>hr(\"Set\")(t);_.weakMap=t=>hr(\"WeakMap\")(t);_.weakSet=t=>hr(\"WeakSet\")(t);_.int8Array=hr(\"Int8Array\");_.uint8Array=hr(\"Uint8Array\");_.uint8ClampedArray=hr(\"Uint8ClampedArray\");_.int16Array=hr(\"Int16Array\");_.uint16Array=hr(\"Uint16Array\");_.int32Array=hr(\"Int32Array\");_.uint32Array=hr(\"Uint32Array\");_.float32Array=hr(\"Float32Array\");_.float64Array=hr(\"Float64Array\");_.bigInt64Array=hr(\"BigInt64Array\");_.bigUint64Array=hr(\"BigUint64Array\");_.arrayBuffer=hr(\"ArrayBuffer\");_.sharedArrayBuffer=hr(\"SharedArrayBuffer\");_.dataView=hr(\"DataView\");_.directInstanceOf=(t,e)=>Object.getPrototypeOf(t)===e.prototype;_.urlInstance=t=>hr(\"URL\")(t);_.urlString=t=>{if(!_.string(t))return!1;try{return new URL(t),!0}catch(e){return!1}};_.truthy=t=>Boolean(t);_.falsy=t=>!t;_.nan=t=>Number.isNaN(t);_.primitive=t=>_.null_(t)||Qke(typeof t);_.integer=t=>Number.isInteger(t);_.safeInteger=t=>Number.isSafeInteger(t);_.plainObject=t=>{if(sz.call(t)!==\"[object Object]\")return!1;let e=Object.getPrototypeOf(t);return e===null||e===Object.getPrototypeOf({})};_.typedArray=t=>yke(Sd(t));var kke=t=>_.safeInteger(t)&&t>=0;_.arrayLike=t=>!_.nullOrUndefined(t)&&!_.function_(t)&&kke(t.length);_.inRange=(t,e)=>{if(_.number(e))return t>=Math.min(0,e)&&t<=Math.max(e,0);if(_.array(e)&&e.length===2)return t>=Math.min(...e)&&t<=Math.max(...e);throw new TypeError(`Invalid range: ${JSON.stringify(e)}`)};var xke=1,Pke=[\"innerHTML\",\"ownerDocument\",\"style\",\"attributes\",\"nodeValue\"];_.domElement=t=>_.object(t)&&t.nodeType===xke&&_.string(t.nodeName)&&!_.plainObject(t)&&Pke.every(e=>e in t);_.observable=t=>{var e,r,i,n;return t?t===((r=(e=t)[Symbol.observable])===null||r===void 0?void 0:r.call(e))||t===((n=(i=t)[\"@@observable\"])===null||n===void 0?void 0:n.call(i)):!1};_.nodeStream=t=>_.object(t)&&_.function_(t.pipe)&&!_.observable(t);_.infinite=t=>t===Infinity||t===-Infinity;var oz=t=>e=>_.integer(e)&&Math.abs(e%2)===t;_.evenInteger=oz(0);_.oddInteger=oz(1);_.emptyArray=t=>_.array(t)&&t.length===0;_.nonEmptyArray=t=>_.array(t)&&t.length>0;_.emptyString=t=>_.string(t)&&t.length===0;_.nonEmptyString=t=>_.string(t)&&t.length>0;var Dke=t=>_.string(t)&&!/\\S/.test(t);_.emptyStringOrWhitespace=t=>_.emptyString(t)||Dke(t);_.emptyObject=t=>_.object(t)&&!_.map(t)&&!_.set(t)&&Object.keys(t).length===0;_.nonEmptyObject=t=>_.object(t)&&!_.map(t)&&!_.set(t)&&Object.keys(t).length>0;_.emptySet=t=>_.set(t)&&t.size===0;_.nonEmptySet=t=>_.set(t)&&t.size>0;_.emptyMap=t=>_.map(t)&&t.size===0;_.nonEmptyMap=t=>_.map(t)&&t.size>0;_.propertyKey=t=>_.any([_.string,_.number,_.symbol],t);_.formData=t=>hr(\"FormData\")(t);_.urlSearchParams=t=>hr(\"URLSearchParams\")(t);var az=(t,e,r)=>{if(!_.function_(e))throw new TypeError(`Invalid predicate: ${JSON.stringify(e)}`);if(r.length===0)throw new TypeError(\"Invalid number of values\");return t.call(r,e)};_.any=(t,...e)=>(_.array(t)?t:[t]).some(i=>az(Array.prototype.some,i,e));_.all=(t,...e)=>az(Array.prototype.every,t,e);var We=(t,e,r,i={})=>{if(!t){let{multipleValues:n}=i,s=n?`received values of types ${[...new Set(r.map(o=>`\\`${_(o)}\\``))].join(\", \")}`:`received value of type \\`${_(r)}\\``;throw new TypeError(`Expected value which is \\`${e}\\`, ${s}.`)}};Za.assert={undefined:t=>We(_.undefined(t),\"undefined\",t),string:t=>We(_.string(t),\"string\",t),number:t=>We(_.number(t),\"number\",t),bigint:t=>We(_.bigint(t),\"bigint\",t),function_:t=>We(_.function_(t),\"Function\",t),null_:t=>We(_.null_(t),\"null\",t),class_:t=>We(_.class_(t),\"Class\",t),boolean:t=>We(_.boolean(t),\"boolean\",t),symbol:t=>We(_.symbol(t),\"symbol\",t),numericString:t=>We(_.numericString(t),\"string with a number\",t),array:(t,e)=>{We(_.array(t),\"Array\",t),e&&t.forEach(e)},buffer:t=>We(_.buffer(t),\"Buffer\",t),nullOrUndefined:t=>We(_.nullOrUndefined(t),\"null or undefined\",t),object:t=>We(_.object(t),\"Object\",t),iterable:t=>We(_.iterable(t),\"Iterable\",t),asyncIterable:t=>We(_.asyncIterable(t),\"AsyncIterable\",t),generator:t=>We(_.generator(t),\"Generator\",t),asyncGenerator:t=>We(_.asyncGenerator(t),\"AsyncGenerator\",t),nativePromise:t=>We(_.nativePromise(t),\"native Promise\",t),promise:t=>We(_.promise(t),\"Promise\",t),generatorFunction:t=>We(_.generatorFunction(t),\"GeneratorFunction\",t),asyncGeneratorFunction:t=>We(_.asyncGeneratorFunction(t),\"AsyncGeneratorFunction\",t),asyncFunction:t=>We(_.asyncFunction(t),\"AsyncFunction\",t),boundFunction:t=>We(_.boundFunction(t),\"Function\",t),regExp:t=>We(_.regExp(t),\"RegExp\",t),date:t=>We(_.date(t),\"Date\",t),error:t=>We(_.error(t),\"Error\",t),map:t=>We(_.map(t),\"Map\",t),set:t=>We(_.set(t),\"Set\",t),weakMap:t=>We(_.weakMap(t),\"WeakMap\",t),weakSet:t=>We(_.weakSet(t),\"WeakSet\",t),int8Array:t=>We(_.int8Array(t),\"Int8Array\",t),uint8Array:t=>We(_.uint8Array(t),\"Uint8Array\",t),uint8ClampedArray:t=>We(_.uint8ClampedArray(t),\"Uint8ClampedArray\",t),int16Array:t=>We(_.int16Array(t),\"Int16Array\",t),uint16Array:t=>We(_.uint16Array(t),\"Uint16Array\",t),int32Array:t=>We(_.int32Array(t),\"Int32Array\",t),uint32Array:t=>We(_.uint32Array(t),\"Uint32Array\",t),float32Array:t=>We(_.float32Array(t),\"Float32Array\",t),float64Array:t=>We(_.float64Array(t),\"Float64Array\",t),bigInt64Array:t=>We(_.bigInt64Array(t),\"BigInt64Array\",t),bigUint64Array:t=>We(_.bigUint64Array(t),\"BigUint64Array\",t),arrayBuffer:t=>We(_.arrayBuffer(t),\"ArrayBuffer\",t),sharedArrayBuffer:t=>We(_.sharedArrayBuffer(t),\"SharedArrayBuffer\",t),dataView:t=>We(_.dataView(t),\"DataView\",t),urlInstance:t=>We(_.urlInstance(t),\"URL\",t),urlString:t=>We(_.urlString(t),\"string with a URL\",t),truthy:t=>We(_.truthy(t),\"truthy\",t),falsy:t=>We(_.falsy(t),\"falsy\",t),nan:t=>We(_.nan(t),\"NaN\",t),primitive:t=>We(_.primitive(t),\"primitive\",t),integer:t=>We(_.integer(t),\"integer\",t),safeInteger:t=>We(_.safeInteger(t),\"integer\",t),plainObject:t=>We(_.plainObject(t),\"plain object\",t),typedArray:t=>We(_.typedArray(t),\"TypedArray\",t),arrayLike:t=>We(_.arrayLike(t),\"array-like\",t),domElement:t=>We(_.domElement(t),\"HTMLElement\",t),observable:t=>We(_.observable(t),\"Observable\",t),nodeStream:t=>We(_.nodeStream(t),\"Node.js Stream\",t),infinite:t=>We(_.infinite(t),\"infinite number\",t),emptyArray:t=>We(_.emptyArray(t),\"empty array\",t),nonEmptyArray:t=>We(_.nonEmptyArray(t),\"non-empty array\",t),emptyString:t=>We(_.emptyString(t),\"empty string\",t),nonEmptyString:t=>We(_.nonEmptyString(t),\"non-empty string\",t),emptyStringOrWhitespace:t=>We(_.emptyStringOrWhitespace(t),\"empty string or whitespace\",t),emptyObject:t=>We(_.emptyObject(t),\"empty object\",t),nonEmptyObject:t=>We(_.nonEmptyObject(t),\"non-empty object\",t),emptySet:t=>We(_.emptySet(t),\"empty set\",t),nonEmptySet:t=>We(_.nonEmptySet(t),\"non-empty set\",t),emptyMap:t=>We(_.emptyMap(t),\"empty map\",t),nonEmptyMap:t=>We(_.nonEmptyMap(t),\"non-empty map\",t),propertyKey:t=>We(_.propertyKey(t),\"PropertyKey\",t),formData:t=>We(_.formData(t),\"FormData\",t),urlSearchParams:t=>We(_.urlSearchParams(t),\"URLSearchParams\",t),evenInteger:t=>We(_.evenInteger(t),\"even integer\",t),oddInteger:t=>We(_.oddInteger(t),\"odd integer\",t),directInstanceOf:(t,e)=>We(_.directInstanceOf(t,e),\"T\",t),inRange:(t,e)=>We(_.inRange(t,e),\"in range\",t),any:(t,...e)=>We(_.any(t,...e),\"predicate returns truthy for any value\",e,{multipleValues:!0}),all:(t,...e)=>We(_.all(t,...e),\"predicate returns truthy for all values\",e,{multipleValues:!0})};Object.defineProperties(_,{class:{value:_.class_},function:{value:_.function_},null:{value:_.null_}});Object.defineProperties(Za.assert,{class:{value:Za.assert.class_},function:{value:Za.assert.function_},null:{value:Za.assert.null_}});Za.default=_;vw.exports=_;vw.exports.default=_;vw.exports.assert=Za.assert});var Az=w((Gst,Rx)=>{\"use strict\";var Fx=class extends Error{constructor(e){super(e||\"Promise was canceled\");this.name=\"CancelError\"}get isCanceled(){return!0}},kd=class{static fn(e){return(...r)=>new kd((i,n,s)=>{r.push(s),e(...r).then(i,n)})}constructor(e){this._cancelHandlers=[],this._isPending=!0,this._isCanceled=!1,this._rejectOnCancel=!0,this._promise=new Promise((r,i)=>{this._reject=i;let n=a=>{this._isPending=!1,r(a)},s=a=>{this._isPending=!1,i(a)},o=a=>{if(!this._isPending)throw new Error(\"The `onCancel` handler was attached after the promise settled.\");this._cancelHandlers.push(a)};return Object.defineProperties(o,{shouldReject:{get:()=>this._rejectOnCancel,set:a=>{this._rejectOnCancel=a}}}),e(n,s,o)})}then(e,r){return this._promise.then(e,r)}catch(e){return this._promise.catch(e)}finally(e){return this._promise.finally(e)}cancel(e){if(!(!this._isPending||this._isCanceled)){if(this._cancelHandlers.length>0)try{for(let r of this._cancelHandlers)r()}catch(r){this._reject(r)}this._isCanceled=!0,this._rejectOnCancel&&this._reject(new Fx(e))}}get isCanceled(){return this._isCanceled}};Object.setPrototypeOf(kd.prototype,Promise.prototype);Rx.exports=kd;Rx.exports.CancelError=Fx});var lz=w((Nx,Lx)=>{\"use strict\";Object.defineProperty(Nx,\"__esModule\",{value:!0});var Rke=require(\"tls\"),Tx=(t,e)=>{let r;typeof e==\"function\"?r={connect:e}:r=e;let i=typeof r.connect==\"function\",n=typeof r.secureConnect==\"function\",s=typeof r.close==\"function\",o=()=>{i&&r.connect(),t instanceof Rke.TLSSocket&&n&&(t.authorized?r.secureConnect():t.authorizationError||t.once(\"secureConnect\",r.secureConnect)),s&&t.once(\"close\",r.close)};t.writable&&!t.connecting?o():t.connecting?t.once(\"connect\",o):t.destroyed&&s&&r.close(t._hadError)};Nx.default=Tx;Lx.exports=Tx;Lx.exports.default=Tx});var cz=w((Ox,Mx)=>{\"use strict\";Object.defineProperty(Ox,\"__esModule\",{value:!0});var Fke=lz(),Nke=Number(process.versions.node.split(\".\")[0]),Ux=t=>{let e={start:Date.now(),socket:void 0,lookup:void 0,connect:void 0,secureConnect:void 0,upload:void 0,response:void 0,end:void 0,error:void 0,abort:void 0,phases:{wait:void 0,dns:void 0,tcp:void 0,tls:void 0,request:void 0,firstByte:void 0,download:void 0,total:void 0}};t.timings=e;let r=o=>{let a=o.emit.bind(o);o.emit=(l,...c)=>(l===\"error\"&&(e.error=Date.now(),e.phases.total=e.error-e.start,o.emit=a),a(l,...c))};r(t),t.prependOnceListener(\"abort\",()=>{e.abort=Date.now(),(!e.response||Nke>=13)&&(e.phases.total=Date.now()-e.start)});let i=o=>{e.socket=Date.now(),e.phases.wait=e.socket-e.start;let a=()=>{e.lookup=Date.now(),e.phases.dns=e.lookup-e.socket};o.prependOnceListener(\"lookup\",a),Fke.default(o,{connect:()=>{e.connect=Date.now(),e.lookup===void 0&&(o.removeListener(\"lookup\",a),e.lookup=e.connect,e.phases.dns=e.lookup-e.socket),e.phases.tcp=e.connect-e.lookup},secureConnect:()=>{e.secureConnect=Date.now(),e.phases.tls=e.secureConnect-e.connect}})};t.socket?i(t.socket):t.prependOnceListener(\"socket\",i);let n=()=>{var o;e.upload=Date.now(),e.phases.request=e.upload-(o=e.secureConnect,o!=null?o:e.connect)};return(()=>typeof t.writableFinished==\"boolean\"?t.writableFinished:t.finished&&t.outputSize===0&&(!t.socket||t.socket.writableLength===0))()?n():t.prependOnceListener(\"finish\",n),t.prependOnceListener(\"response\",o=>{e.response=Date.now(),e.phases.firstByte=e.response-e.upload,o.timings=e,r(o),o.prependOnceListener(\"end\",()=>{e.end=Date.now(),e.phases.download=e.end-e.response,e.phases.total=e.end-e.start})}),e};Ox.default=Ux;Mx.exports=Ux;Mx.exports.default=Ux});var Cz=w((Yst,Kx)=>{\"use strict\";var{V4MAPPED:Lke,ADDRCONFIG:Tke,ALL:uz,promises:{Resolver:gz},lookup:Oke}=require(\"dns\"),{promisify:Hx}=require(\"util\"),Mke=require(\"os\"),Wg=Symbol(\"cacheableLookupCreateConnection\"),jx=Symbol(\"cacheableLookupInstance\"),fz=Symbol(\"expires\"),Uke=typeof uz==\"number\",hz=t=>{if(!(t&&typeof t.createConnection==\"function\"))throw new Error(\"Expected an Agent instance as the first argument\")},Kke=t=>{for(let e of t)e.family!==6&&(e.address=`::ffff:${e.address}`,e.family=6)},pz=()=>{let t=!1,e=!1;for(let r of Object.values(Mke.networkInterfaces()))for(let i of r)if(!i.internal&&(i.family===\"IPv6\"?e=!0:t=!0,t&&e))return{has4:t,has6:e};return{has4:t,has6:e}},Hke=t=>Symbol.iterator in t,dz={ttl:!0},jke={all:!0},Gx=class{constructor({cache:e=new Map,maxTtl:r=Infinity,fallbackDuration:i=3600,errorTtl:n=.15,resolver:s=new gz,lookup:o=Oke}={}){if(this.maxTtl=r,this.errorTtl=n,this._cache=e,this._resolver=s,this._dnsLookup=Hx(o),this._resolver instanceof gz?(this._resolve4=this._resolver.resolve4.bind(this._resolver),this._resolve6=this._resolver.resolve6.bind(this._resolver)):(this._resolve4=Hx(this._resolver.resolve4.bind(this._resolver)),this._resolve6=Hx(this._resolver.resolve6.bind(this._resolver))),this._iface=pz(),this._pending={},this._nextRemovalTime=!1,this._hostnamesToFallback=new Set,i<1)this._fallback=!1;else{this._fallback=!0;let a=setInterval(()=>{this._hostnamesToFallback.clear()},i*1e3);a.unref&&a.unref()}this.lookup=this.lookup.bind(this),this.lookupAsync=this.lookupAsync.bind(this)}set servers(e){this.clear(),this._resolver.setServers(e)}get servers(){return this._resolver.getServers()}lookup(e,r,i){if(typeof r==\"function\"?(i=r,r={}):typeof r==\"number\"&&(r={family:r}),!i)throw new Error(\"Callback must be a function.\");this.lookupAsync(e,r).then(n=>{r.all?i(null,n):i(null,n.address,n.family,n.expires,n.ttl)},i)}async lookupAsync(e,r={}){typeof r==\"number\"&&(r={family:r});let i=await this.query(e);if(r.family===6){let n=i.filter(s=>s.family===6);r.hints&Lke&&(Uke&&r.hints&uz||n.length===0)?Kke(i):i=n}else r.family===4&&(i=i.filter(n=>n.family===4));if(r.hints&Tke){let{_iface:n}=this;i=i.filter(s=>s.family===6?n.has6:n.has4)}if(i.length===0){let n=new Error(`cacheableLookup ENOTFOUND ${e}`);throw n.code=\"ENOTFOUND\",n.hostname=e,n}return r.all?i:i[0]}async query(e){let r=await this._cache.get(e);if(!r){let i=this._pending[e];if(i)r=await i;else{let n=this.queryAndCache(e);this._pending[e]=n,r=await n}}return r=r.map(i=>N({},i)),r}async _resolve(e){let r=async c=>{try{return await c}catch(u){if(u.code===\"ENODATA\"||u.code===\"ENOTFOUND\")return[];throw u}},[i,n]=await Promise.all([this._resolve4(e,dz),this._resolve6(e,dz)].map(c=>r(c))),s=0,o=0,a=0,l=Date.now();for(let c of i)c.family=4,c.expires=l+c.ttl*1e3,s=Math.max(s,c.ttl);for(let c of n)c.family=6,c.expires=l+c.ttl*1e3,o=Math.max(o,c.ttl);return i.length>0?n.length>0?a=Math.min(s,o):a=s:a=o,{entries:[...i,...n],cacheTtl:a}}async _lookup(e){try{return{entries:await this._dnsLookup(e,{all:!0}),cacheTtl:0}}catch(r){return{entries:[],cacheTtl:0}}}async _set(e,r,i){if(this.maxTtl>0&&i>0){i=Math.min(i,this.maxTtl)*1e3,r[fz]=Date.now()+i;try{await this._cache.set(e,r,i)}catch(n){this.lookupAsync=async()=>{let s=new Error(\"Cache Error. Please recreate the CacheableLookup instance.\");throw s.cause=n,s}}Hke(this._cache)&&this._tick(i)}}async queryAndCache(e){if(this._hostnamesToFallback.has(e))return this._dnsLookup(e,jke);try{let r=await this._resolve(e);r.entries.length===0&&this._fallback&&(r=await this._lookup(e),r.entries.length!==0&&this._hostnamesToFallback.add(e));let i=r.entries.length===0?this.errorTtl:r.cacheTtl;return await this._set(e,r.entries,i),delete this._pending[e],r.entries}catch(r){throw delete this._pending[e],r}}_tick(e){let r=this._nextRemovalTime;(!r||e<r)&&(clearTimeout(this._removalTimeout),this._nextRemovalTime=e,this._removalTimeout=setTimeout(()=>{this._nextRemovalTime=!1;let i=Infinity,n=Date.now();for(let[s,o]of this._cache){let a=o[fz];n>=a?this._cache.delete(s):a<i&&(i=a)}i!==Infinity&&this._tick(i-n)},e),this._removalTimeout.unref&&this._removalTimeout.unref())}install(e){if(hz(e),Wg in e)throw new Error(\"CacheableLookup has been already installed\");e[Wg]=e.createConnection,e[jx]=this,e.createConnection=(r,i)=>(\"lookup\"in r||(r.lookup=this.lookup),e[Wg](r,i))}uninstall(e){if(hz(e),e[Wg]){if(e[jx]!==this)throw new Error(\"The agent is not owned by this CacheableLookup instance\");e.createConnection=e[Wg],delete e[Wg],delete e[jx]}}updateInterfaceInfo(){let{_iface:e}=this;this._iface=pz(),(e.has4&&!this._iface.has4||e.has6&&!this._iface.has6)&&this._cache.clear()}clear(e){if(e){this._cache.delete(e);return}this._cache.clear()}};Kx.exports=Gx;Kx.exports.default=Gx});var Iz=w((qst,Yx)=>{\"use strict\";var Gke=typeof URL==\"undefined\"?require(\"url\").URL:URL,Yke=\"text/plain\",qke=\"us-ascii\",mz=(t,e)=>e.some(r=>r instanceof RegExp?r.test(t):r===t),Jke=(t,{stripHash:e})=>{let r=t.match(/^data:([^,]*?),([^#]*?)(?:#(.*))?$/);if(!r)throw new Error(`Invalid URL: ${t}`);let i=r[1].split(\";\"),n=r[2],s=e?\"\":r[3],o=!1;i[i.length-1]===\"base64\"&&(i.pop(),o=!0);let a=(i.shift()||\"\").toLowerCase(),c=[...i.map(u=>{let[g,f=\"\"]=u.split(\"=\").map(h=>h.trim());return g===\"charset\"&&(f=f.toLowerCase(),f===qke)?\"\":`${g}${f?`=${f}`:\"\"}`}).filter(Boolean)];return o&&c.push(\"base64\"),(c.length!==0||a&&a!==Yke)&&c.unshift(a),`data:${c.join(\";\")},${o?n.trim():n}${s?`#${s}`:\"\"}`},Ez=(t,e)=>{if(e=N({defaultProtocol:\"http:\",normalizeProtocol:!0,forceHttp:!1,forceHttps:!1,stripAuthentication:!0,stripHash:!1,stripWWW:!0,removeQueryParameters:[/^utm_\\w+/i],removeTrailingSlash:!0,removeDirectoryIndex:!1,sortQueryParameters:!0},e),Reflect.has(e,\"normalizeHttps\"))throw new Error(\"options.normalizeHttps is renamed to options.forceHttp\");if(Reflect.has(e,\"normalizeHttp\"))throw new Error(\"options.normalizeHttp is renamed to options.forceHttps\");if(Reflect.has(e,\"stripFragment\"))throw new Error(\"options.stripFragment is renamed to options.stripHash\");if(t=t.trim(),/^data:/i.test(t))return Jke(t,e);let r=t.startsWith(\"//\");!r&&/^\\.*\\//.test(t)||(t=t.replace(/^(?!(?:\\w+:)?\\/\\/)|^\\/\\//,e.defaultProtocol));let n=new Gke(t);if(e.forceHttp&&e.forceHttps)throw new Error(\"The `forceHttp` and `forceHttps` options cannot be used together\");if(e.forceHttp&&n.protocol===\"https:\"&&(n.protocol=\"http:\"),e.forceHttps&&n.protocol===\"http:\"&&(n.protocol=\"https:\"),e.stripAuthentication&&(n.username=\"\",n.password=\"\"),e.stripHash&&(n.hash=\"\"),n.pathname&&(n.pathname=n.pathname.replace(/((?!:).|^)\\/{2,}/g,(s,o)=>/^(?!\\/)/g.test(o)?`${o}/`:\"/\")),n.pathname&&(n.pathname=decodeURI(n.pathname)),e.removeDirectoryIndex===!0&&(e.removeDirectoryIndex=[/^index\\.[a-z]+$/]),Array.isArray(e.removeDirectoryIndex)&&e.removeDirectoryIndex.length>0){let s=n.pathname.split(\"/\"),o=s[s.length-1];mz(o,e.removeDirectoryIndex)&&(s=s.slice(0,s.length-1),n.pathname=s.slice(1).join(\"/\")+\"/\")}if(n.hostname&&(n.hostname=n.hostname.replace(/\\.$/,\"\"),e.stripWWW&&/^www\\.([a-z\\-\\d]{2,63})\\.([a-z.]{2,5})$/.test(n.hostname)&&(n.hostname=n.hostname.replace(/^www\\./,\"\"))),Array.isArray(e.removeQueryParameters))for(let s of[...n.searchParams.keys()])mz(s,e.removeQueryParameters)&&n.searchParams.delete(s);return e.sortQueryParameters&&n.searchParams.sort(),e.removeTrailingSlash&&(n.pathname=n.pathname.replace(/\\/$/,\"\")),t=n.toString(),(e.removeTrailingSlash||n.pathname===\"/\")&&n.hash===\"\"&&(t=t.replace(/\\/$/,\"\")),r&&!e.normalizeProtocol&&(t=t.replace(/^http:\\/\\//,\"//\")),e.stripProtocol&&(t=t.replace(/^(?:https?:)?\\/\\//,\"\")),t};Yx.exports=Ez;Yx.exports.default=Ez});var Bz=w((Jst,yz)=>{yz.exports=wz;function wz(t,e){if(t&&e)return wz(t)(e);if(typeof t!=\"function\")throw new TypeError(\"need wrapper function\");return Object.keys(t).forEach(function(i){r[i]=t[i]}),r;function r(){for(var i=new Array(arguments.length),n=0;n<i.length;n++)i[n]=arguments[n];var s=t.apply(this,i),o=i[i.length-1];return typeof s==\"function\"&&s!==o&&Object.keys(o).forEach(function(a){s[a]=o[a]}),s}}});var Jx=w((Wst,qx)=>{var bz=Bz();qx.exports=bz(Sw);qx.exports.strict=bz(Qz);Sw.proto=Sw(function(){Object.defineProperty(Function.prototype,\"once\",{value:function(){return Sw(this)},configurable:!0}),Object.defineProperty(Function.prototype,\"onceStrict\",{value:function(){return Qz(this)},configurable:!0})});function Sw(t){var e=function(){return e.called?e.value:(e.called=!0,e.value=t.apply(this,arguments))};return e.called=!1,e}function Qz(t){var e=function(){if(e.called)throw new Error(e.onceError);return e.called=!0,e.value=t.apply(this,arguments)},r=t.name||\"Function wrapped with `once`\";return e.onceError=r+\" shouldn't be called more than once\",e.called=!1,e}});var Wx=w((zst,vz)=>{var Wke=Jx(),zke=function(){},_ke=function(t){return t.setHeader&&typeof t.abort==\"function\"},Vke=function(t){return t.stdio&&Array.isArray(t.stdio)&&t.stdio.length===3},Sz=function(t,e,r){if(typeof e==\"function\")return Sz(t,null,e);e||(e={}),r=Wke(r||zke);var i=t._writableState,n=t._readableState,s=e.readable||e.readable!==!1&&t.readable,o=e.writable||e.writable!==!1&&t.writable,a=function(){t.writable||l()},l=function(){o=!1,s||r.call(t)},c=function(){s=!1,o||r.call(t)},u=function(p){r.call(t,p?new Error(\"exited with error code: \"+p):null)},g=function(p){r.call(t,p)},f=function(){if(s&&!(n&&n.ended))return r.call(t,new Error(\"premature close\"));if(o&&!(i&&i.ended))return r.call(t,new Error(\"premature close\"))},h=function(){t.req.on(\"finish\",l)};return _ke(t)?(t.on(\"complete\",l),t.on(\"abort\",f),t.req?h():t.on(\"request\",h)):o&&!i&&(t.on(\"end\",a),t.on(\"close\",a)),Vke(t)&&t.on(\"exit\",u),t.on(\"end\",c),t.on(\"finish\",l),e.error!==!1&&t.on(\"error\",g),t.on(\"close\",f),function(){t.removeListener(\"complete\",l),t.removeListener(\"abort\",f),t.removeListener(\"request\",h),t.req&&t.req.removeListener(\"finish\",l),t.removeListener(\"end\",a),t.removeListener(\"close\",a),t.removeListener(\"finish\",l),t.removeListener(\"exit\",u),t.removeListener(\"end\",c),t.removeListener(\"error\",g),t.removeListener(\"close\",f)}};vz.exports=Sz});var Pz=w((_st,kz)=>{var Xke=Jx(),Zke=Wx(),zx=require(\"fs\"),xd=function(){},$ke=/^v?\\.0/.test(process.version),kw=function(t){return typeof t==\"function\"},exe=function(t){return!$ke||!zx?!1:(t instanceof(zx.ReadStream||xd)||t instanceof(zx.WriteStream||xd))&&kw(t.close)},txe=function(t){return t.setHeader&&kw(t.abort)},rxe=function(t,e,r,i){i=Xke(i);var n=!1;t.on(\"close\",function(){n=!0}),Zke(t,{readable:e,writable:r},function(o){if(o)return i(o);n=!0,i()});var s=!1;return function(o){if(!n&&!s){if(s=!0,exe(t))return t.close(xd);if(txe(t))return t.abort();if(kw(t.destroy))return t.destroy();i(o||new Error(\"stream was destroyed\"))}}},xz=function(t){t()},ixe=function(t,e){return t.pipe(e)},nxe=function(){var t=Array.prototype.slice.call(arguments),e=kw(t[t.length-1]||xd)&&t.pop()||xd;if(Array.isArray(t[0])&&(t=t[0]),t.length<2)throw new Error(\"pump requires two streams per minimum\");var r,i=t.map(function(n,s){var o=s<t.length-1,a=s>0;return rxe(n,o,a,function(l){r||(r=l),l&&i.forEach(xz),!o&&(i.forEach(xz),e(r))})});return t.reduce(ixe)};kz.exports=nxe});var Rz=w((Vst,Dz)=>{\"use strict\";var{PassThrough:sxe}=require(\"stream\");Dz.exports=t=>{t=N({},t);let{array:e}=t,{encoding:r}=t,i=r===\"buffer\",n=!1;e?n=!(r||i):r=r||\"utf8\",i&&(r=null);let s=new sxe({objectMode:n});r&&s.setEncoding(r);let o=0,a=[];return s.on(\"data\",l=>{a.push(l),n?o=a.length:o+=l.length}),s.getBufferedValue=()=>e?a:i?Buffer.concat(a,o):a.join(\"\"),s.getBufferedLength=()=>o,s}});var Fz=w((Xst,zg)=>{\"use strict\";var oxe=Pz(),axe=Rz(),_x=class extends Error{constructor(){super(\"maxBuffer exceeded\");this.name=\"MaxBufferError\"}};async function xw(t,e){if(!t)return Promise.reject(new Error(\"Expected a stream\"));e=N({maxBuffer:Infinity},e);let{maxBuffer:r}=e,i;return await new Promise((n,s)=>{let o=a=>{a&&(a.bufferedData=i.getBufferedValue()),s(a)};i=oxe(t,axe(e),a=>{if(a){o(a);return}n()}),i.on(\"data\",()=>{i.getBufferedLength()>r&&o(new _x)})}),i.getBufferedValue()}zg.exports=xw;zg.exports.default=xw;zg.exports.buffer=(t,e)=>xw(t,te(N({},e),{encoding:\"buffer\"}));zg.exports.array=(t,e)=>xw(t,te(N({},e),{array:!0}));zg.exports.MaxBufferError=_x});var Lz=w(($st,Nz)=>{\"use strict\";var Axe=[200,203,204,206,300,301,404,405,410,414,501],lxe=[200,203,204,300,301,302,303,307,308,404,405,410,414,501],cxe={date:!0,connection:!0,\"keep-alive\":!0,\"proxy-authenticate\":!0,\"proxy-authorization\":!0,te:!0,trailer:!0,\"transfer-encoding\":!0,upgrade:!0},uxe={\"content-length\":!0,\"content-encoding\":!0,\"transfer-encoding\":!0,\"content-range\":!0};function Vx(t){let e={};if(!t)return e;let r=t.trim().split(/\\s*,\\s*/);for(let i of r){let[n,s]=i.split(/\\s*=\\s*/,2);e[n]=s===void 0?!0:s.replace(/^\"|\"$/g,\"\")}return e}function gxe(t){let e=[];for(let r in t){let i=t[r];e.push(i===!0?r:r+\"=\"+i)}if(!!e.length)return e.join(\", \")}Nz.exports=class{constructor(e,r,{shared:i,cacheHeuristic:n,immutableMinTimeToLive:s,ignoreCargoCult:o,trustServerDate:a,_fromObject:l}={}){if(l){this._fromObject(l);return}if(!r||!r.headers)throw Error(\"Response headers missing\");this._assertRequestHasHeaders(e),this._responseTime=this.now(),this._isShared=i!==!1,this._trustServerDate=a!==void 0?a:!0,this._cacheHeuristic=n!==void 0?n:.1,this._immutableMinTtl=s!==void 0?s:24*3600*1e3,this._status=\"status\"in r?r.status:200,this._resHeaders=r.headers,this._rescc=Vx(r.headers[\"cache-control\"]),this._method=\"method\"in e?e.method:\"GET\",this._url=e.url,this._host=e.headers.host,this._noAuthorization=!e.headers.authorization,this._reqHeaders=r.headers.vary?e.headers:null,this._reqcc=Vx(e.headers[\"cache-control\"]),o&&\"pre-check\"in this._rescc&&\"post-check\"in this._rescc&&(delete this._rescc[\"pre-check\"],delete this._rescc[\"post-check\"],delete this._rescc[\"no-cache\"],delete this._rescc[\"no-store\"],delete this._rescc[\"must-revalidate\"],this._resHeaders=Object.assign({},this._resHeaders,{\"cache-control\":gxe(this._rescc)}),delete this._resHeaders.expires,delete this._resHeaders.pragma),!r.headers[\"cache-control\"]&&/no-cache/.test(r.headers.pragma)&&(this._rescc[\"no-cache\"]=!0)}now(){return Date.now()}storable(){return!!(!this._reqcc[\"no-store\"]&&(this._method===\"GET\"||this._method===\"HEAD\"||this._method===\"POST\"&&this._hasExplicitExpiration())&&lxe.indexOf(this._status)!==-1&&!this._rescc[\"no-store\"]&&(!this._isShared||!this._rescc.private)&&(!this._isShared||this._noAuthorization||this._allowsStoringAuthenticated())&&(this._resHeaders.expires||this._rescc.public||this._rescc[\"max-age\"]||this._rescc[\"s-maxage\"]||Axe.indexOf(this._status)!==-1))}_hasExplicitExpiration(){return this._isShared&&this._rescc[\"s-maxage\"]||this._rescc[\"max-age\"]||this._resHeaders.expires}_assertRequestHasHeaders(e){if(!e||!e.headers)throw Error(\"Request headers missing\")}satisfiesWithoutRevalidation(e){this._assertRequestHasHeaders(e);let r=Vx(e.headers[\"cache-control\"]);return r[\"no-cache\"]||/no-cache/.test(e.headers.pragma)||r[\"max-age\"]&&this.age()>r[\"max-age\"]||r[\"min-fresh\"]&&this.timeToLive()<1e3*r[\"min-fresh\"]||this.stale()&&!(r[\"max-stale\"]&&!this._rescc[\"must-revalidate\"]&&(r[\"max-stale\"]===!0||r[\"max-stale\"]>this.age()-this.maxAge()))?!1:this._requestMatches(e,!1)}_requestMatches(e,r){return(!this._url||this._url===e.url)&&this._host===e.headers.host&&(!e.method||this._method===e.method||r&&e.method===\"HEAD\")&&this._varyMatches(e)}_allowsStoringAuthenticated(){return this._rescc[\"must-revalidate\"]||this._rescc.public||this._rescc[\"s-maxage\"]}_varyMatches(e){if(!this._resHeaders.vary)return!0;if(this._resHeaders.vary===\"*\")return!1;let r=this._resHeaders.vary.trim().toLowerCase().split(/\\s*,\\s*/);for(let i of r)if(e.headers[i]!==this._reqHeaders[i])return!1;return!0}_copyWithoutHopByHopHeaders(e){let r={};for(let i in e)cxe[i]||(r[i]=e[i]);if(e.connection){let i=e.connection.trim().split(/\\s*,\\s*/);for(let n of i)delete r[n]}if(r.warning){let i=r.warning.split(/,/).filter(n=>!/^\\s*1[0-9][0-9]/.test(n));i.length?r.warning=i.join(\",\").trim():delete r.warning}return r}responseHeaders(){let e=this._copyWithoutHopByHopHeaders(this._resHeaders),r=this.age();return r>3600*24&&!this._hasExplicitExpiration()&&this.maxAge()>3600*24&&(e.warning=(e.warning?`${e.warning}, `:\"\")+'113 - \"rfc7234 5.5.4\"'),e.age=`${Math.round(r)}`,e.date=new Date(this.now()).toUTCString(),e}date(){return this._trustServerDate?this._serverDate():this._responseTime}_serverDate(){let e=Date.parse(this._resHeaders.date);if(isFinite(e)){let r=8*3600*1e3;if(Math.abs(this._responseTime-e)<r)return e}return this._responseTime}age(){let e=Math.max(0,(this._responseTime-this.date())/1e3);if(this._resHeaders.age){let i=this._ageValue();i>e&&(e=i)}let r=(this.now()-this._responseTime)/1e3;return e+r}_ageValue(){let e=parseInt(this._resHeaders.age);return isFinite(e)?e:0}maxAge(){if(!this.storable()||this._rescc[\"no-cache\"]||this._isShared&&this._resHeaders[\"set-cookie\"]&&!this._rescc.public&&!this._rescc.immutable||this._resHeaders.vary===\"*\")return 0;if(this._isShared){if(this._rescc[\"proxy-revalidate\"])return 0;if(this._rescc[\"s-maxage\"])return parseInt(this._rescc[\"s-maxage\"],10)}if(this._rescc[\"max-age\"])return parseInt(this._rescc[\"max-age\"],10);let e=this._rescc.immutable?this._immutableMinTtl:0,r=this._serverDate();if(this._resHeaders.expires){let i=Date.parse(this._resHeaders.expires);return Number.isNaN(i)||i<r?0:Math.max(e,(i-r)/1e3)}if(this._resHeaders[\"last-modified\"]){let i=Date.parse(this._resHeaders[\"last-modified\"]);if(isFinite(i)&&r>i)return Math.max(e,(r-i)/1e3*this._cacheHeuristic)}return e}timeToLive(){return Math.max(0,this.maxAge()-this.age())*1e3}stale(){return this.maxAge()<=this.age()}static fromObject(e){return new this(void 0,void 0,{_fromObject:e})}_fromObject(e){if(this._responseTime)throw Error(\"Reinitialized\");if(!e||e.v!==1)throw Error(\"Invalid serialization\");this._responseTime=e.t,this._isShared=e.sh,this._cacheHeuristic=e.ch,this._immutableMinTtl=e.imm!==void 0?e.imm:24*3600*1e3,this._status=e.st,this._resHeaders=e.resh,this._rescc=e.rescc,this._method=e.m,this._url=e.u,this._host=e.h,this._noAuthorization=e.a,this._reqHeaders=e.reqh,this._reqcc=e.reqcc}toObject(){return{v:1,t:this._responseTime,sh:this._isShared,ch:this._cacheHeuristic,imm:this._immutableMinTtl,st:this._status,resh:this._resHeaders,rescc:this._rescc,m:this._method,u:this._url,h:this._host,a:this._noAuthorization,reqh:this._reqHeaders,reqcc:this._reqcc}}revalidationHeaders(e){this._assertRequestHasHeaders(e);let r=this._copyWithoutHopByHopHeaders(e.headers);if(delete r[\"if-range\"],!this._requestMatches(e,!0)||!this.storable())return delete r[\"if-none-match\"],delete r[\"if-modified-since\"],r;if(this._resHeaders.etag&&(r[\"if-none-match\"]=r[\"if-none-match\"]?`${r[\"if-none-match\"]}, ${this._resHeaders.etag}`:this._resHeaders.etag),r[\"accept-ranges\"]||r[\"if-match\"]||r[\"if-unmodified-since\"]||this._method&&this._method!=\"GET\"){if(delete r[\"if-modified-since\"],r[\"if-none-match\"]){let n=r[\"if-none-match\"].split(/,/).filter(s=>!/^\\s*W\\//.test(s));n.length?r[\"if-none-match\"]=n.join(\",\").trim():delete r[\"if-none-match\"]}}else this._resHeaders[\"last-modified\"]&&!r[\"if-modified-since\"]&&(r[\"if-modified-since\"]=this._resHeaders[\"last-modified\"]);return r}revalidatedPolicy(e,r){if(this._assertRequestHasHeaders(e),!r||!r.headers)throw Error(\"Response headers missing\");let i=!1;if(r.status!==void 0&&r.status!=304?i=!1:r.headers.etag&&!/^\\s*W\\//.test(r.headers.etag)?i=this._resHeaders.etag&&this._resHeaders.etag.replace(/^\\s*W\\//,\"\")===r.headers.etag:this._resHeaders.etag&&r.headers.etag?i=this._resHeaders.etag.replace(/^\\s*W\\//,\"\")===r.headers.etag.replace(/^\\s*W\\//,\"\"):this._resHeaders[\"last-modified\"]?i=this._resHeaders[\"last-modified\"]===r.headers[\"last-modified\"]:!this._resHeaders.etag&&!this._resHeaders[\"last-modified\"]&&!r.headers.etag&&!r.headers[\"last-modified\"]&&(i=!0),!i)return{policy:new this.constructor(e,r),modified:r.status!=304,matches:!1};let n={};for(let o in this._resHeaders)n[o]=o in r.headers&&!uxe[o]?r.headers[o]:this._resHeaders[o];let s=Object.assign({},r,{status:this._status,method:this._method,headers:n});return{policy:new this.constructor(e,s,{shared:this._isShared,cacheHeuristic:this._cacheHeuristic,immutableMinTimeToLive:this._immutableMinTtl,trustServerDate:this._trustServerDate}),modified:!1,matches:!0}}}});var Pw=w((eot,Tz)=>{\"use strict\";Tz.exports=t=>{let e={};for(let[r,i]of Object.entries(t))e[r.toLowerCase()]=i;return e}});var Uz=w((tot,Oz)=>{\"use strict\";var fxe=require(\"stream\").Readable,hxe=Pw(),Mz=class extends fxe{constructor(e,r,i,n){if(typeof e!=\"number\")throw new TypeError(\"Argument `statusCode` should be a number\");if(typeof r!=\"object\")throw new TypeError(\"Argument `headers` should be an object\");if(!(i instanceof Buffer))throw new TypeError(\"Argument `body` should be a buffer\");if(typeof n!=\"string\")throw new TypeError(\"Argument `url` should be a string\");super();this.statusCode=e,this.headers=hxe(r),this.body=i,this.url=n}_read(){this.push(this.body),this.push(null)}};Oz.exports=Mz});var Hz=w((rot,Kz)=>{\"use strict\";var pxe=[\"destroy\",\"setTimeout\",\"socket\",\"headers\",\"trailers\",\"rawHeaders\",\"statusCode\",\"httpVersion\",\"httpVersionMinor\",\"httpVersionMajor\",\"rawTrailers\",\"statusMessage\"];Kz.exports=(t,e)=>{let r=new Set(Object.keys(t).concat(pxe));for(let i of r)i in e||(e[i]=typeof t[i]==\"function\"?t[i].bind(t):t[i])}});var Gz=w((iot,jz)=>{\"use strict\";var dxe=require(\"stream\").PassThrough,Cxe=Hz(),mxe=t=>{if(!(t&&t.pipe))throw new TypeError(\"Parameter `response` must be a response stream.\");let e=new dxe;return Cxe(t,e),t.pipe(e)};jz.exports=mxe});var Yz=w(Xx=>{Xx.stringify=function t(e){if(typeof e==\"undefined\")return e;if(e&&Buffer.isBuffer(e))return JSON.stringify(\":base64:\"+e.toString(\"base64\"));if(e&&e.toJSON&&(e=e.toJSON()),e&&typeof e==\"object\"){var r=\"\",i=Array.isArray(e);r=i?\"[\":\"{\";var n=!0;for(var s in e){var o=typeof e[s]==\"function\"||!i&&typeof e[s]==\"undefined\";Object.hasOwnProperty.call(e,s)&&!o&&(n||(r+=\",\"),n=!1,i?e[s]==null?r+=\"null\":r+=t(e[s]):e[s]!==void 0&&(r+=t(s)+\":\"+t(e[s])))}return r+=i?\"]\":\"}\",r}else return typeof e==\"string\"?JSON.stringify(/^:/.test(e)?\":\"+e:e):typeof e==\"undefined\"?\"null\":JSON.stringify(e)};Xx.parse=function(t){return JSON.parse(t,function(e,r){return typeof r==\"string\"?/^:base64:/.test(r)?Buffer.from(r.substring(8),\"base64\"):/^:/.test(r)?r.substring(1):r:r})}});var zz=w((sot,qz)=>{\"use strict\";var Exe=require(\"events\"),Jz=Yz(),Ixe=t=>{let e={redis:\"@keyv/redis\",mongodb:\"@keyv/mongo\",mongo:\"@keyv/mongo\",sqlite:\"@keyv/sqlite\",postgresql:\"@keyv/postgres\",postgres:\"@keyv/postgres\",mysql:\"@keyv/mysql\"};if(t.adapter||t.uri){let r=t.adapter||/^[^:]*/.exec(t.uri)[0];return new(require(e[r]))(t)}return new Map},Wz=class extends Exe{constructor(e,r){super();if(this.opts=Object.assign({namespace:\"keyv\",serialize:Jz.stringify,deserialize:Jz.parse},typeof e==\"string\"?{uri:e}:e,r),!this.opts.store){let i=Object.assign({},this.opts);this.opts.store=Ixe(i)}typeof this.opts.store.on==\"function\"&&this.opts.store.on(\"error\",i=>this.emit(\"error\",i)),this.opts.store.namespace=this.opts.namespace}_getKeyPrefix(e){return`${this.opts.namespace}:${e}`}get(e,r){e=this._getKeyPrefix(e);let{store:i}=this.opts;return Promise.resolve().then(()=>i.get(e)).then(n=>typeof n==\"string\"?this.opts.deserialize(n):n).then(n=>{if(n!==void 0){if(typeof n.expires==\"number\"&&Date.now()>n.expires){this.delete(e);return}return r&&r.raw?n:n.value}})}set(e,r,i){e=this._getKeyPrefix(e),typeof i==\"undefined\"&&(i=this.opts.ttl),i===0&&(i=void 0);let{store:n}=this.opts;return Promise.resolve().then(()=>{let s=typeof i==\"number\"?Date.now()+i:null;return r={value:r,expires:s},this.opts.serialize(r)}).then(s=>n.set(e,s,i)).then(()=>!0)}delete(e){e=this._getKeyPrefix(e);let{store:r}=this.opts;return Promise.resolve().then(()=>r.delete(e))}clear(){let{store:e}=this.opts;return Promise.resolve().then(()=>e.clear())}};qz.exports=Wz});var Xz=w((oot,_z)=>{\"use strict\";var yxe=require(\"events\"),Dw=require(\"url\"),wxe=Iz(),Bxe=Fz(),Zx=Lz(),Vz=Uz(),bxe=Pw(),Qxe=Gz(),vxe=zz(),ea=class{constructor(e,r){if(typeof e!=\"function\")throw new TypeError(\"Parameter `request` must be a function\");return this.cache=new vxe({uri:typeof r==\"string\"&&r,store:typeof r!=\"string\"&&r,namespace:\"cacheable-request\"}),this.createCacheableRequest(e)}createCacheableRequest(e){return(r,i)=>{let n;if(typeof r==\"string\")n=$x(Dw.parse(r)),r={};else if(r instanceof Dw.URL)n=$x(Dw.parse(r.toString())),r={};else{let[g,...f]=(r.path||\"\").split(\"?\"),h=f.length>0?`?${f.join(\"?\")}`:\"\";n=$x(te(N({},r),{pathname:g,search:h}))}r=N(N({headers:{},method:\"GET\",cache:!0,strictTtl:!1,automaticFailover:!1},r),Sxe(n)),r.headers=bxe(r.headers);let s=new yxe,o=wxe(Dw.format(n),{stripWWW:!1,removeTrailingSlash:!1,stripAuthentication:!1}),a=`${r.method}:${o}`,l=!1,c=!1,u=g=>{c=!0;let f=!1,h,p=new Promise(y=>{h=()=>{f||(f=!0,y())}}),m=y=>{if(l&&!g.forceRefresh){y.status=y.statusCode;let S=Zx.fromObject(l.cachePolicy).revalidatedPolicy(g,y);if(!S.modified){let x=S.policy.responseHeaders();y=new Vz(l.statusCode,x,l.body,l.url),y.cachePolicy=S.policy,y.fromCache=!0}}y.fromCache||(y.cachePolicy=new Zx(g,y,g),y.fromCache=!1);let Q;g.cache&&y.cachePolicy.storable()?(Q=Qxe(y),(async()=>{try{let S=Bxe.buffer(y);if(await Promise.race([p,new Promise(U=>y.once(\"end\",U))]),f)return;let x=await S,M={cachePolicy:y.cachePolicy.toObject(),url:y.url,statusCode:y.fromCache?l.statusCode:y.statusCode,body:x},Y=g.strictTtl?y.cachePolicy.timeToLive():void 0;g.maxTtl&&(Y=Y?Math.min(Y,g.maxTtl):g.maxTtl),await this.cache.set(a,M,Y)}catch(S){s.emit(\"error\",new ea.CacheError(S))}})()):g.cache&&l&&(async()=>{try{await this.cache.delete(a)}catch(S){s.emit(\"error\",new ea.CacheError(S))}})(),s.emit(\"response\",Q||y),typeof i==\"function\"&&i(Q||y)};try{let y=e(g,m);y.once(\"error\",h),y.once(\"abort\",h),s.emit(\"request\",y)}catch(y){s.emit(\"error\",new ea.RequestError(y))}};return(async()=>{let g=async h=>{await Promise.resolve();let p=h.cache?await this.cache.get(a):void 0;if(typeof p==\"undefined\")return u(h);let m=Zx.fromObject(p.cachePolicy);if(m.satisfiesWithoutRevalidation(h)&&!h.forceRefresh){let y=m.responseHeaders(),Q=new Vz(p.statusCode,y,p.body,p.url);Q.cachePolicy=m,Q.fromCache=!0,s.emit(\"response\",Q),typeof i==\"function\"&&i(Q)}else l=p,h.headers=m.revalidationHeaders(h),u(h)},f=h=>s.emit(\"error\",new ea.CacheError(h));this.cache.once(\"error\",f),s.on(\"response\",()=>this.cache.removeListener(\"error\",f));try{await g(r)}catch(h){r.automaticFailover&&!c&&u(r),s.emit(\"error\",new ea.CacheError(h))}})(),s}}};function Sxe(t){let e=N({},t);return e.path=`${t.pathname||\"/\"}${t.search||\"\"}`,delete e.pathname,delete e.search,e}function $x(t){return{protocol:t.protocol,auth:t.auth,hostname:t.hostname||t.host||\"localhost\",port:t.port,pathname:t.pathname,search:t.search}}ea.RequestError=class extends Error{constructor(t){super(t.message);this.name=\"RequestError\",Object.assign(this,t)}};ea.CacheError=class extends Error{constructor(t){super(t.message);this.name=\"CacheError\",Object.assign(this,t)}};_z.exports=ea});var $z=w((aot,Zz)=>{\"use strict\";var kxe=[\"aborted\",\"complete\",\"headers\",\"httpVersion\",\"httpVersionMinor\",\"httpVersionMajor\",\"method\",\"rawHeaders\",\"rawTrailers\",\"setTimeout\",\"socket\",\"statusCode\",\"statusMessage\",\"trailers\",\"url\"];Zz.exports=(t,e)=>{if(e._readableState.autoDestroy)throw new Error(\"The second stream must have the `autoDestroy` option set to `false`\");let r=new Set(Object.keys(t).concat(kxe)),i={};for(let n of r)n in e||(i[n]={get(){let s=t[n];return typeof s==\"function\"?s.bind(t):s},set(s){t[n]=s},enumerable:!0,configurable:!1});return Object.defineProperties(e,i),t.once(\"aborted\",()=>{e.destroy(),e.emit(\"aborted\")}),t.once(\"close\",()=>{t.complete&&e.readable?e.once(\"end\",()=>{e.emit(\"close\")}):e.emit(\"close\")}),e}});var t4=w((Aot,e4)=>{\"use strict\";var{Transform:xxe,PassThrough:Pxe}=require(\"stream\"),eP=require(\"zlib\"),Dxe=$z();e4.exports=t=>{let e=(t.headers[\"content-encoding\"]||\"\").toLowerCase();if(![\"gzip\",\"deflate\",\"br\"].includes(e))return t;let r=e===\"br\";if(r&&typeof eP.createBrotliDecompress!=\"function\")return t.destroy(new Error(\"Brotli is not supported on Node.js < 12\")),t;let i=!0,n=new xxe({transform(a,l,c){i=!1,c(null,a)},flush(a){a()}}),s=new Pxe({autoDestroy:!1,destroy(a,l){t.destroy(),l(a)}}),o=r?eP.createBrotliDecompress():eP.createUnzip();return o.once(\"error\",a=>{if(i&&!t.readable){s.end();return}s.destroy(a)}),Dxe(t,s),t.pipe(n).pipe(o).pipe(s),s}});var tP=w((lot,r4)=>{\"use strict\";var i4=class{constructor(e={}){if(!(e.maxSize&&e.maxSize>0))throw new TypeError(\"`maxSize` must be a number greater than 0\");this.maxSize=e.maxSize,this.onEviction=e.onEviction,this.cache=new Map,this.oldCache=new Map,this._size=0}_set(e,r){if(this.cache.set(e,r),this._size++,this._size>=this.maxSize){if(this._size=0,typeof this.onEviction==\"function\")for(let[i,n]of this.oldCache.entries())this.onEviction(i,n);this.oldCache=this.cache,this.cache=new Map}}get(e){if(this.cache.has(e))return this.cache.get(e);if(this.oldCache.has(e)){let r=this.oldCache.get(e);return this.oldCache.delete(e),this._set(e,r),r}}set(e,r){return this.cache.has(e)?this.cache.set(e,r):this._set(e,r),this}has(e){return this.cache.has(e)||this.oldCache.has(e)}peek(e){if(this.cache.has(e))return this.cache.get(e);if(this.oldCache.has(e))return this.oldCache.get(e)}delete(e){let r=this.cache.delete(e);return r&&this._size--,this.oldCache.delete(e)||r}clear(){this.cache.clear(),this.oldCache.clear(),this._size=0}*keys(){for(let[e]of this)yield e}*values(){for(let[,e]of this)yield e}*[Symbol.iterator](){for(let e of this.cache)yield e;for(let e of this.oldCache){let[r]=e;this.cache.has(r)||(yield e)}}get size(){let e=0;for(let r of this.oldCache.keys())this.cache.has(r)||e++;return Math.min(this._size+e,this.maxSize)}};r4.exports=i4});var iP=w((cot,n4)=>{\"use strict\";var Rxe=require(\"events\"),Fxe=require(\"tls\"),Nxe=require(\"http2\"),Lxe=tP(),gn=Symbol(\"currentStreamsCount\"),s4=Symbol(\"request\"),Fs=Symbol(\"cachedOriginSet\"),_g=Symbol(\"gracefullyClosing\"),Txe=[\"maxDeflateDynamicTableSize\",\"maxSessionMemory\",\"maxHeaderListPairs\",\"maxOutstandingPings\",\"maxReservedRemoteStreams\",\"maxSendHeaderBlockLength\",\"paddingStrategy\",\"localAddress\",\"path\",\"rejectUnauthorized\",\"minDHSize\",\"ca\",\"cert\",\"clientCertEngine\",\"ciphers\",\"key\",\"pfx\",\"servername\",\"minVersion\",\"maxVersion\",\"secureProtocol\",\"crl\",\"honorCipherOrder\",\"ecdhCurve\",\"dhparam\",\"secureOptions\",\"sessionIdContext\"],Oxe=(t,e,r)=>{let i=0,n=t.length;for(;i<n;){let s=i+n>>>1;r(t[s],e)?i=s+1:n=s}return i},Mxe=(t,e)=>t.remoteSettings.maxConcurrentStreams>e.remoteSettings.maxConcurrentStreams,rP=(t,e)=>{for(let r of t)r[Fs].length<e[Fs].length&&r[Fs].every(i=>e[Fs].includes(i))&&r[gn]+e[gn]<=e.remoteSettings.maxConcurrentStreams&&o4(r)},Uxe=(t,e)=>{for(let r of t)e[Fs].length<r[Fs].length&&e[Fs].every(i=>r[Fs].includes(i))&&e[gn]+r[gn]<=r.remoteSettings.maxConcurrentStreams&&o4(e)},a4=({agent:t,isFree:e})=>{let r={};for(let i in t.sessions){let s=t.sessions[i].filter(o=>{let a=o[eA.kCurrentStreamsCount]<o.remoteSettings.maxConcurrentStreams;return e?a:!a});s.length!==0&&(r[i]=s)}return r},o4=t=>{t[_g]=!0,t[gn]===0&&t.close()},eA=class extends Rxe{constructor({timeout:e=6e4,maxSessions:r=Infinity,maxFreeSessions:i=10,maxCachedTlsSessions:n=100}={}){super();this.sessions={},this.queue={},this.timeout=e,this.maxSessions=r,this.maxFreeSessions=i,this._freeSessionsCount=0,this._sessionsCount=0,this.settings={enablePush:!1},this.tlsSessionCache=new Lxe({maxSize:n})}static normalizeOrigin(e,r){return typeof e==\"string\"&&(e=new URL(e)),r&&e.hostname!==r&&(e.hostname=r),e.origin}normalizeOptions(e){let r=\"\";if(e)for(let i of Txe)e[i]&&(r+=`:${e[i]}`);return r}_tryToCreateNewSession(e,r){if(!(e in this.queue)||!(r in this.queue[e]))return;let i=this.queue[e][r];this._sessionsCount<this.maxSessions&&!i.completed&&(i.completed=!0,i())}getSession(e,r,i){return new Promise((n,s)=>{Array.isArray(i)?(i=[...i],n()):i=[{resolve:n,reject:s}];let o=this.normalizeOptions(r),a=eA.normalizeOrigin(e,r&&r.servername);if(a===void 0){for(let{reject:u}of i)u(new TypeError(\"The `origin` argument needs to be a string or an URL object\"));return}if(o in this.sessions){let u=this.sessions[o],g=-1,f=-1,h;for(let p of u){let m=p.remoteSettings.maxConcurrentStreams;if(m<g)break;if(p[Fs].includes(a)){let y=p[gn];if(y>=m||p[_g]||p.destroyed)continue;h||(g=m),y>f&&(h=p,f=y)}}if(h){if(i.length!==1){for(let{reject:p}of i){let m=new Error(`Expected the length of listeners to be 1, got ${i.length}.\nPlease report this to https://github.com/szmarczak/http2-wrapper/`);p(m)}return}i[0].resolve(h);return}}if(o in this.queue){if(a in this.queue[o]){this.queue[o][a].listeners.push(...i),this._tryToCreateNewSession(o,a);return}}else this.queue[o]={};let l=()=>{o in this.queue&&this.queue[o][a]===c&&(delete this.queue[o][a],Object.keys(this.queue[o]).length===0&&delete this.queue[o])},c=()=>{let u=`${a}:${o}`,g=!1;try{let f=Nxe.connect(e,N({createConnection:this.createConnection,settings:this.settings,session:this.tlsSessionCache.get(u)},r));f[gn]=0,f[_g]=!1;let h=()=>f[gn]<f.remoteSettings.maxConcurrentStreams,p=!0;f.socket.once(\"session\",y=>{this.tlsSessionCache.set(u,y)}),f.once(\"error\",y=>{for(let{reject:Q}of i)Q(y);this.tlsSessionCache.delete(u)}),f.setTimeout(this.timeout,()=>{f.destroy()}),f.once(\"close\",()=>{if(g){p&&this._freeSessionsCount--,this._sessionsCount--;let y=this.sessions[o];y.splice(y.indexOf(f),1),y.length===0&&delete this.sessions[o]}else{let y=new Error(\"Session closed without receiving a SETTINGS frame\");y.code=\"HTTP2WRAPPER_NOSETTINGS\";for(let{reject:Q}of i)Q(y);l()}this._tryToCreateNewSession(o,a)});let m=()=>{if(!(!(o in this.queue)||!h())){for(let y of f[Fs])if(y in this.queue[o]){let{listeners:Q}=this.queue[o][y];for(;Q.length!==0&&h();)Q.shift().resolve(f);let S=this.queue[o];if(S[y].listeners.length===0&&(delete S[y],Object.keys(S).length===0)){delete this.queue[o];break}if(!h())break}}};f.on(\"origin\",()=>{f[Fs]=f.originSet,!!h()&&(m(),rP(this.sessions[o],f))}),f.once(\"remoteSettings\",()=>{if(f.ref(),f.unref(),this._sessionsCount++,c.destroyed){let y=new Error(\"Agent has been destroyed\");for(let Q of i)Q.reject(y);f.destroy();return}f[Fs]=f.originSet;{let y=this.sessions;if(o in y){let Q=y[o];Q.splice(Oxe(Q,f,Mxe),0,f)}else y[o]=[f]}this._freeSessionsCount+=1,g=!0,this.emit(\"session\",f),m(),l(),f[gn]===0&&this._freeSessionsCount>this.maxFreeSessions&&f.close(),i.length!==0&&(this.getSession(a,r,i),i.length=0),f.on(\"remoteSettings\",()=>{m(),rP(this.sessions[o],f)})}),f[s4]=f.request,f.request=(y,Q)=>{if(f[_g])throw new Error(\"The session is gracefully closing. No new streams are allowed.\");let S=f[s4](y,Q);return f.ref(),++f[gn],f[gn]===f.remoteSettings.maxConcurrentStreams&&this._freeSessionsCount--,S.once(\"close\",()=>{if(p=h(),--f[gn],!f.destroyed&&!f.closed&&(Uxe(this.sessions[o],f),h()&&!f.closed)){p||(this._freeSessionsCount++,p=!0);let x=f[gn]===0;x&&f.unref(),x&&(this._freeSessionsCount>this.maxFreeSessions||f[_g])?f.close():(rP(this.sessions[o],f),m())}}),S}}catch(f){for(let h of i)h.reject(f);l()}};c.listeners=i,c.completed=!1,c.destroyed=!1,this.queue[o][a]=c,this._tryToCreateNewSession(o,a)})}request(e,r,i,n){return new Promise((s,o)=>{this.getSession(e,r,[{reject:o,resolve:a=>{try{s(a.request(i,n))}catch(l){o(l)}}}])})}createConnection(e,r){return eA.connect(e,r)}static connect(e,r){r.ALPNProtocols=[\"h2\"];let i=e.port||443,n=e.hostname||e.host;return typeof r.servername==\"undefined\"&&(r.servername=n),Fxe.connect(i,n,r)}closeFreeSessions(){for(let e of Object.values(this.sessions))for(let r of e)r[gn]===0&&r.close()}destroy(e){for(let r of Object.values(this.sessions))for(let i of r)i.destroy(e);for(let r of Object.values(this.queue))for(let i of Object.values(r))i.destroyed=!0;this.queue={}}get freeSessions(){return a4({agent:this,isFree:!0})}get busySessions(){return a4({agent:this,isFree:!1})}};eA.kCurrentStreamsCount=gn;eA.kGracefullyClosing=_g;n4.exports={Agent:eA,globalAgent:new eA}});var nP=w((uot,A4)=>{\"use strict\";var{Readable:Kxe}=require(\"stream\"),l4=class extends Kxe{constructor(e,r){super({highWaterMark:r,autoDestroy:!1});this.statusCode=null,this.statusMessage=\"\",this.httpVersion=\"2.0\",this.httpVersionMajor=2,this.httpVersionMinor=0,this.headers={},this.trailers={},this.req=null,this.aborted=!1,this.complete=!1,this.upgrade=null,this.rawHeaders=[],this.rawTrailers=[],this.socket=e,this.connection=e,this._dumped=!1}_destroy(e){this.req._request.destroy(e)}setTimeout(e,r){return this.req.setTimeout(e,r),this}_dump(){this._dumped||(this._dumped=!0,this.removeAllListeners(\"data\"),this.resume())}_read(){this.req&&this.req._request.resume()}};A4.exports=l4});var sP=w((got,c4)=>{\"use strict\";c4.exports=t=>{let e={protocol:t.protocol,hostname:typeof t.hostname==\"string\"&&t.hostname.startsWith(\"[\")?t.hostname.slice(1,-1):t.hostname,host:t.host,hash:t.hash,search:t.search,pathname:t.pathname,href:t.href,path:`${t.pathname||\"\"}${t.search||\"\"}`};return typeof t.port==\"string\"&&t.port.length!==0&&(e.port=Number(t.port)),(t.username||t.password)&&(e.auth=`${t.username||\"\"}:${t.password||\"\"}`),e}});var g4=w((fot,u4)=>{\"use strict\";u4.exports=(t,e,r)=>{for(let i of r)t.on(i,(...n)=>e.emit(i,...n))}});var h4=w((hot,f4)=>{\"use strict\";f4.exports=t=>{switch(t){case\":method\":case\":scheme\":case\":authority\":case\":path\":return!0;default:return!1}}});var d4=w((dot,p4)=>{\"use strict\";var Vg=(t,e,r)=>{p4.exports[e]=class extends t{constructor(...n){super(typeof r==\"string\"?r:r(n));this.name=`${super.name} [${e}]`,this.code=e}}};Vg(TypeError,\"ERR_INVALID_ARG_TYPE\",t=>{let e=t[0].includes(\".\")?\"property\":\"argument\",r=t[1],i=Array.isArray(r);return i&&(r=`${r.slice(0,-1).join(\", \")} or ${r.slice(-1)}`),`The \"${t[0]}\" ${e} must be ${i?\"one of\":\"of\"} type ${r}. Received ${typeof t[2]}`});Vg(TypeError,\"ERR_INVALID_PROTOCOL\",t=>`Protocol \"${t[0]}\" not supported. Expected \"${t[1]}\"`);Vg(Error,\"ERR_HTTP_HEADERS_SENT\",t=>`Cannot ${t[0]} headers after they are sent to the client`);Vg(TypeError,\"ERR_INVALID_HTTP_TOKEN\",t=>`${t[0]} must be a valid HTTP token [${t[1]}]`);Vg(TypeError,\"ERR_HTTP_INVALID_HEADER_VALUE\",t=>`Invalid value \"${t[0]} for header \"${t[1]}\"`);Vg(TypeError,\"ERR_INVALID_CHAR\",t=>`Invalid character in ${t[0]} [${t[1]}]`)});var lP=w((Cot,C4)=>{\"use strict\";var Hxe=require(\"http2\"),{Writable:jxe}=require(\"stream\"),{Agent:m4,globalAgent:Gxe}=iP(),Yxe=nP(),qxe=sP(),Jxe=g4(),Wxe=h4(),{ERR_INVALID_ARG_TYPE:oP,ERR_INVALID_PROTOCOL:zxe,ERR_HTTP_HEADERS_SENT:E4,ERR_INVALID_HTTP_TOKEN:_xe,ERR_HTTP_INVALID_HEADER_VALUE:Vxe,ERR_INVALID_CHAR:Xxe}=d4(),{HTTP2_HEADER_STATUS:I4,HTTP2_HEADER_METHOD:y4,HTTP2_HEADER_PATH:w4,HTTP2_METHOD_CONNECT:Zxe}=Hxe.constants,Wi=Symbol(\"headers\"),aP=Symbol(\"origin\"),AP=Symbol(\"session\"),B4=Symbol(\"options\"),Rw=Symbol(\"flushedHeaders\"),Pd=Symbol(\"jobs\"),$xe=/^[\\^`\\-\\w!#$%&*+.|~]+$/,ePe=/[^\\t\\u0020-\\u007E\\u0080-\\u00FF]/,b4=class extends jxe{constructor(e,r,i){super({autoDestroy:!1});let n=typeof e==\"string\"||e instanceof URL;if(n&&(e=qxe(e instanceof URL?e:new URL(e))),typeof r==\"function\"||r===void 0?(i=r,r=n?e:N({},e)):r=N(N({},e),r),r.h2session)this[AP]=r.h2session;else if(r.agent===!1)this.agent=new m4({maxFreeSessions:0});else if(typeof r.agent==\"undefined\"||r.agent===null)typeof r.createConnection==\"function\"?(this.agent=new m4({maxFreeSessions:0}),this.agent.createConnection=r.createConnection):this.agent=Gxe;else if(typeof r.agent.request==\"function\")this.agent=r.agent;else throw new oP(\"options.agent\",[\"Agent-like Object\",\"undefined\",\"false\"],r.agent);if(r.protocol&&r.protocol!==\"https:\")throw new zxe(r.protocol,\"https:\");let s=r.port||r.defaultPort||this.agent&&this.agent.defaultPort||443,o=r.hostname||r.host||\"localhost\";delete r.hostname,delete r.host,delete r.port;let{timeout:a}=r;if(r.timeout=void 0,this[Wi]=Object.create(null),this[Pd]=[],this.socket=null,this.connection=null,this.method=r.method||\"GET\",this.path=r.path,this.res=null,this.aborted=!1,this.reusedSocket=!1,r.headers)for(let[l,c]of Object.entries(r.headers))this.setHeader(l,c);r.auth&&!(\"authorization\"in this[Wi])&&(this[Wi].authorization=\"Basic \"+Buffer.from(r.auth).toString(\"base64\")),r.session=r.tlsSession,r.path=r.socketPath,this[B4]=r,s===443?(this[aP]=`https://${o}`,\":authority\"in this[Wi]||(this[Wi][\":authority\"]=o)):(this[aP]=`https://${o}:${s}`,\":authority\"in this[Wi]||(this[Wi][\":authority\"]=`${o}:${s}`)),a&&this.setTimeout(a),i&&this.once(\"response\",i),this[Rw]=!1}get method(){return this[Wi][y4]}set method(e){e&&(this[Wi][y4]=e.toUpperCase())}get path(){return this[Wi][w4]}set path(e){e&&(this[Wi][w4]=e)}get _mustNotHaveABody(){return this.method===\"GET\"||this.method===\"HEAD\"||this.method===\"DELETE\"}_write(e,r,i){if(this._mustNotHaveABody){i(new Error(\"The GET, HEAD and DELETE methods must NOT have a body\"));return}this.flushHeaders();let n=()=>this._request.write(e,r,i);this._request?n():this[Pd].push(n)}_final(e){if(this.destroyed)return;this.flushHeaders();let r=()=>{if(this._mustNotHaveABody){e();return}this._request.end(e)};this._request?r():this[Pd].push(r)}abort(){this.res&&this.res.complete||(this.aborted||process.nextTick(()=>this.emit(\"abort\")),this.aborted=!0,this.destroy())}_destroy(e,r){this.res&&this.res._dump(),this._request&&this._request.destroy(),r(e)}async flushHeaders(){if(this[Rw]||this.destroyed)return;this[Rw]=!0;let e=this.method===Zxe,r=i=>{if(this._request=i,this.destroyed){i.destroy();return}e||Jxe(i,this,[\"timeout\",\"continue\",\"close\",\"error\"]);let n=o=>(...a)=>{!this.writable&&!this.destroyed?o(...a):this.once(\"finish\",()=>{o(...a)})};i.once(\"response\",n((o,a,l)=>{let c=new Yxe(this.socket,i.readableHighWaterMark);this.res=c,c.req=this,c.statusCode=o[I4],c.headers=o,c.rawHeaders=l,c.once(\"end\",()=>{this.aborted?(c.aborted=!0,c.emit(\"aborted\")):(c.complete=!0,c.socket=null,c.connection=null)}),e?(c.upgrade=!0,this.emit(\"connect\",c,i,Buffer.alloc(0))?this.emit(\"close\"):i.destroy()):(i.on(\"data\",u=>{!c._dumped&&!c.push(u)&&i.pause()}),i.once(\"end\",()=>{c.push(null)}),this.emit(\"response\",c)||c._dump())})),i.once(\"headers\",n(o=>this.emit(\"information\",{statusCode:o[I4]}))),i.once(\"trailers\",n((o,a,l)=>{let{res:c}=this;c.trailers=o,c.rawTrailers=l}));let{socket:s}=i.session;this.socket=s,this.connection=s;for(let o of this[Pd])o();this.emit(\"socket\",this.socket)};if(this[AP])try{r(this[AP].request(this[Wi]))}catch(i){this.emit(\"error\",i)}else{this.reusedSocket=!0;try{r(await this.agent.request(this[aP],this[B4],this[Wi]))}catch(i){this.emit(\"error\",i)}}}getHeader(e){if(typeof e!=\"string\")throw new oP(\"name\",\"string\",e);return this[Wi][e.toLowerCase()]}get headersSent(){return this[Rw]}removeHeader(e){if(typeof e!=\"string\")throw new oP(\"name\",\"string\",e);if(this.headersSent)throw new E4(\"remove\");delete this[Wi][e.toLowerCase()]}setHeader(e,r){if(this.headersSent)throw new E4(\"set\");if(typeof e!=\"string\"||!$xe.test(e)&&!Wxe(e))throw new _xe(\"Header name\",e);if(typeof r==\"undefined\")throw new Vxe(r,e);if(ePe.test(r))throw new Xxe(\"header content\",e);this[Wi][e.toLowerCase()]=r}setNoDelay(){}setSocketKeepAlive(){}setTimeout(e,r){let i=()=>this._request.setTimeout(e,r);return this._request?i():this[Pd].push(i),this}get maxHeadersCount(){if(!this.destroyed&&this._request)return this._request.session.localSettings.maxHeaderListSize}set maxHeadersCount(e){}};C4.exports=b4});var v4=w((mot,Q4)=>{\"use strict\";var tPe=require(\"tls\");Q4.exports=(t={})=>new Promise((e,r)=>{let i=tPe.connect(t,()=>{t.resolveSocket?(i.off(\"error\",r),e({alpnProtocol:i.alpnProtocol,socket:i})):(i.destroy(),e({alpnProtocol:i.alpnProtocol}))});i.on(\"error\",r)})});var k4=w((Eot,S4)=>{\"use strict\";var rPe=require(\"net\");S4.exports=t=>{let e=t.host,r=t.headers&&t.headers.host;return r&&(r.startsWith(\"[\")?r.indexOf(\"]\")===-1?e=r:e=r.slice(1,-1):e=r.split(\":\",1)[0]),rPe.isIP(e)?\"\":e}});var D4=w((Iot,cP)=>{\"use strict\";var x4=require(\"http\"),uP=require(\"https\"),iPe=v4(),nPe=tP(),sPe=lP(),oPe=k4(),aPe=sP(),Fw=new nPe({maxSize:100}),Dd=new Map,P4=(t,e,r)=>{e._httpMessage={shouldKeepAlive:!0};let i=()=>{t.emit(\"free\",e,r)};e.on(\"free\",i);let n=()=>{t.removeSocket(e,r)};e.on(\"close\",n);let s=()=>{t.removeSocket(e,r),e.off(\"close\",n),e.off(\"free\",i),e.off(\"agentRemove\",s)};e.on(\"agentRemove\",s),t.emit(\"free\",e,r)},APe=async t=>{let e=`${t.host}:${t.port}:${t.ALPNProtocols.sort()}`;if(!Fw.has(e)){if(Dd.has(e))return(await Dd.get(e)).alpnProtocol;let{path:r,agent:i}=t;t.path=t.socketPath;let n=iPe(t);Dd.set(e,n);try{let{socket:s,alpnProtocol:o}=await n;if(Fw.set(e,o),t.path=r,o===\"h2\")s.destroy();else{let{globalAgent:a}=uP,l=uP.Agent.prototype.createConnection;i?i.createConnection===l?P4(i,s,t):s.destroy():a.createConnection===l?P4(a,s,t):s.destroy()}return Dd.delete(e),o}catch(s){throw Dd.delete(e),s}}return Fw.get(e)};cP.exports=async(t,e,r)=>{if((typeof t==\"string\"||t instanceof URL)&&(t=aPe(new URL(t))),typeof e==\"function\"&&(r=e,e=void 0),e=te(N(N({ALPNProtocols:[\"h2\",\"http/1.1\"]},t),e),{resolveSocket:!0}),!Array.isArray(e.ALPNProtocols)||e.ALPNProtocols.length===0)throw new Error(\"The `ALPNProtocols` option must be an Array with at least one entry\");e.protocol=e.protocol||\"https:\";let i=e.protocol===\"https:\";e.host=e.hostname||e.host||\"localhost\",e.session=e.tlsSession,e.servername=e.servername||oPe(e),e.port=e.port||(i?443:80),e._defaultAgent=i?uP.globalAgent:x4.globalAgent;let n=e.agent;if(n){if(n.addRequest)throw new Error(\"The `options.agent` object can contain only `http`, `https` or `http2` properties\");e.agent=n[i?\"https\":\"http\"]}return i&&await APe(e)===\"h2\"?(n&&(e.agent=n.http2),new sPe(e,r)):x4.request(e,r)};cP.exports.protocolCache=Fw});var F4=w((yot,R4)=>{\"use strict\";var lPe=require(\"http2\"),cPe=iP(),gP=lP(),uPe=nP(),gPe=D4(),fPe=(t,e,r)=>new gP(t,e,r),hPe=(t,e,r)=>{let i=new gP(t,e,r);return i.end(),i};R4.exports=te(N(te(N({},lPe),{ClientRequest:gP,IncomingMessage:uPe}),cPe),{request:fPe,get:hPe,auto:gPe})});var hP=w(fP=>{\"use strict\";Object.defineProperty(fP,\"__esModule\",{value:!0});var N4=$a();fP.default=t=>N4.default.nodeStream(t)&&N4.default.function_(t.getBoundary)});var M4=w(pP=>{\"use strict\";Object.defineProperty(pP,\"__esModule\",{value:!0});var L4=require(\"fs\"),T4=require(\"util\"),O4=$a(),pPe=hP(),dPe=T4.promisify(L4.stat);pP.default=async(t,e)=>{if(e&&\"content-length\"in e)return Number(e[\"content-length\"]);if(!t)return 0;if(O4.default.string(t))return Buffer.byteLength(t);if(O4.default.buffer(t))return t.length;if(pPe.default(t))return T4.promisify(t.getLength.bind(t))();if(t instanceof L4.ReadStream){let{size:r}=await dPe(t.path);return r===0?void 0:r}}});var CP=w(dP=>{\"use strict\";Object.defineProperty(dP,\"__esModule\",{value:!0});function CPe(t,e,r){let i={};for(let n of r)i[n]=(...s)=>{e.emit(n,...s)},t.on(n,i[n]);return()=>{for(let n of r)t.off(n,i[n])}}dP.default=CPe});var U4=w(mP=>{\"use strict\";Object.defineProperty(mP,\"__esModule\",{value:!0});mP.default=()=>{let t=[];return{once(e,r,i){e.once(r,i),t.push({origin:e,event:r,fn:i})},unhandleAll(){for(let e of t){let{origin:r,event:i,fn:n}=e;r.removeListener(i,n)}t.length=0}}}});var H4=w(Rd=>{\"use strict\";Object.defineProperty(Rd,\"__esModule\",{value:!0});Rd.TimeoutError=void 0;var mPe=require(\"net\"),EPe=U4(),K4=Symbol(\"reentry\"),IPe=()=>{},EP=class extends Error{constructor(e,r){super(`Timeout awaiting '${r}' for ${e}ms`);this.event=r,this.name=\"TimeoutError\",this.code=\"ETIMEDOUT\"}};Rd.TimeoutError=EP;Rd.default=(t,e,r)=>{if(K4 in t)return IPe;t[K4]=!0;let i=[],{once:n,unhandleAll:s}=EPe.default(),o=(g,f,h)=>{var p;let m=setTimeout(f,g,g,h);(p=m.unref)===null||p===void 0||p.call(m);let y=()=>{clearTimeout(m)};return i.push(y),y},{host:a,hostname:l}=r,c=(g,f)=>{t.destroy(new EP(g,f))},u=()=>{for(let g of i)g();s()};if(t.once(\"error\",g=>{if(u(),t.listenerCount(\"error\")===0)throw g}),t.once(\"close\",u),n(t,\"response\",g=>{n(g,\"end\",u)}),typeof e.request!=\"undefined\"&&o(e.request,c,\"request\"),typeof e.socket!=\"undefined\"){let g=()=>{c(e.socket,\"socket\")};t.setTimeout(e.socket,g),i.push(()=>{t.removeListener(\"timeout\",g)})}return n(t,\"socket\",g=>{var f;let{socketPath:h}=t;if(g.connecting){let p=Boolean(h!=null?h:mPe.isIP((f=l!=null?l:a)!==null&&f!==void 0?f:\"\")!==0);if(typeof e.lookup!=\"undefined\"&&!p&&typeof g.address().address==\"undefined\"){let m=o(e.lookup,c,\"lookup\");n(g,\"lookup\",m)}if(typeof e.connect!=\"undefined\"){let m=()=>o(e.connect,c,\"connect\");p?n(g,\"connect\",m()):n(g,\"lookup\",y=>{y===null&&n(g,\"connect\",m())})}typeof e.secureConnect!=\"undefined\"&&r.protocol===\"https:\"&&n(g,\"connect\",()=>{let m=o(e.secureConnect,c,\"secureConnect\");n(g,\"secureConnect\",m)})}if(typeof e.send!=\"undefined\"){let p=()=>o(e.send,c,\"send\");g.connecting?n(g,\"connect\",()=>{n(t,\"upload-complete\",p())}):n(t,\"upload-complete\",p())}}),typeof e.response!=\"undefined\"&&n(t,\"upload-complete\",()=>{let g=o(e.response,c,\"response\");n(t,\"response\",g)}),u}});var G4=w(IP=>{\"use strict\";Object.defineProperty(IP,\"__esModule\",{value:!0});var j4=$a();IP.default=t=>{t=t;let e={protocol:t.protocol,hostname:j4.default.string(t.hostname)&&t.hostname.startsWith(\"[\")?t.hostname.slice(1,-1):t.hostname,host:t.host,hash:t.hash,search:t.search,pathname:t.pathname,href:t.href,path:`${t.pathname||\"\"}${t.search||\"\"}`};return j4.default.string(t.port)&&t.port.length>0&&(e.port=Number(t.port)),(t.username||t.password)&&(e.auth=`${t.username||\"\"}:${t.password||\"\"}`),e}});var Y4=w(yP=>{\"use strict\";Object.defineProperty(yP,\"__esModule\",{value:!0});var yPe=require(\"url\"),wPe=[\"protocol\",\"host\",\"hostname\",\"port\",\"pathname\",\"search\"];yP.default=(t,e)=>{var r,i;if(e.path){if(e.pathname)throw new TypeError(\"Parameters `path` and `pathname` are mutually exclusive.\");if(e.search)throw new TypeError(\"Parameters `path` and `search` are mutually exclusive.\");if(e.searchParams)throw new TypeError(\"Parameters `path` and `searchParams` are mutually exclusive.\")}if(e.search&&e.searchParams)throw new TypeError(\"Parameters `search` and `searchParams` are mutually exclusive.\");if(!t){if(!e.protocol)throw new TypeError(\"No URL protocol specified\");t=`${e.protocol}//${(i=(r=e.hostname)!==null&&r!==void 0?r:e.host)!==null&&i!==void 0?i:\"\"}`}let n=new yPe.URL(t);if(e.path){let s=e.path.indexOf(\"?\");s===-1?e.pathname=e.path:(e.pathname=e.path.slice(0,s),e.search=e.path.slice(s+1)),delete e.path}for(let s of wPe)e[s]&&(n[s]=e[s].toString());return n}});var J4=w(wP=>{\"use strict\";Object.defineProperty(wP,\"__esModule\",{value:!0});var q4=class{constructor(){this.weakMap=new WeakMap,this.map=new Map}set(e,r){typeof e==\"object\"?this.weakMap.set(e,r):this.map.set(e,r)}get(e){return typeof e==\"object\"?this.weakMap.get(e):this.map.get(e)}has(e){return typeof e==\"object\"?this.weakMap.has(e):this.map.has(e)}};wP.default=q4});var bP=w(BP=>{\"use strict\";Object.defineProperty(BP,\"__esModule\",{value:!0});var BPe=async t=>{let e=[],r=0;for await(let i of t)e.push(i),r+=Buffer.byteLength(i);return Buffer.isBuffer(e[0])?Buffer.concat(e,r):Buffer.from(e.join(\"\"))};BP.default=BPe});var z4=w(Yc=>{\"use strict\";Object.defineProperty(Yc,\"__esModule\",{value:!0});Yc.dnsLookupIpVersionToFamily=Yc.isDnsLookupIpVersion=void 0;var W4={auto:0,ipv4:4,ipv6:6};Yc.isDnsLookupIpVersion=t=>t in W4;Yc.dnsLookupIpVersionToFamily=t=>{if(Yc.isDnsLookupIpVersion(t))return W4[t];throw new Error(\"Invalid DNS lookup IP version\")}});var QP=w(Nw=>{\"use strict\";Object.defineProperty(Nw,\"__esModule\",{value:!0});Nw.isResponseOk=void 0;Nw.isResponseOk=t=>{let{statusCode:e}=t,r=t.request.options.followRedirect?299:399;return e>=200&&e<=r||e===304}});var V4=w(vP=>{\"use strict\";Object.defineProperty(vP,\"__esModule\",{value:!0});var _4=new Set;vP.default=t=>{_4.has(t)||(_4.add(t),process.emitWarning(`Got: ${t}`,{type:\"DeprecationWarning\"}))}});var X4=w(SP=>{\"use strict\";Object.defineProperty(SP,\"__esModule\",{value:!0});var Ir=$a(),bPe=(t,e)=>{if(Ir.default.null_(t.encoding))throw new TypeError(\"To get a Buffer, set `options.responseType` to `buffer` instead\");Ir.assert.any([Ir.default.string,Ir.default.undefined],t.encoding),Ir.assert.any([Ir.default.boolean,Ir.default.undefined],t.resolveBodyOnly),Ir.assert.any([Ir.default.boolean,Ir.default.undefined],t.methodRewriting),Ir.assert.any([Ir.default.boolean,Ir.default.undefined],t.isStream),Ir.assert.any([Ir.default.string,Ir.default.undefined],t.responseType),t.responseType===void 0&&(t.responseType=\"text\");let{retry:r}=t;if(e?t.retry=N({},e.retry):t.retry={calculateDelay:i=>i.computedValue,limit:0,methods:[],statusCodes:[],errorCodes:[],maxRetryAfter:void 0},Ir.default.object(r)?(t.retry=N(N({},t.retry),r),t.retry.methods=[...new Set(t.retry.methods.map(i=>i.toUpperCase()))],t.retry.statusCodes=[...new Set(t.retry.statusCodes)],t.retry.errorCodes=[...new Set(t.retry.errorCodes)]):Ir.default.number(r)&&(t.retry.limit=r),Ir.default.undefined(t.retry.maxRetryAfter)&&(t.retry.maxRetryAfter=Math.min(...[t.timeout.request,t.timeout.connect].filter(Ir.default.number))),Ir.default.object(t.pagination)){e&&(t.pagination=N(N({},e.pagination),t.pagination));let{pagination:i}=t;if(!Ir.default.function_(i.transform))throw new Error(\"`options.pagination.transform` must be implemented\");if(!Ir.default.function_(i.shouldContinue))throw new Error(\"`options.pagination.shouldContinue` must be implemented\");if(!Ir.default.function_(i.filter))throw new TypeError(\"`options.pagination.filter` must be implemented\");if(!Ir.default.function_(i.paginate))throw new Error(\"`options.pagination.paginate` must be implemented\")}return t.responseType===\"json\"&&t.headers.accept===void 0&&(t.headers.accept=\"application/json\"),t};SP.default=bPe});var Z4=w(Fd=>{\"use strict\";Object.defineProperty(Fd,\"__esModule\",{value:!0});Fd.retryAfterStatusCodes=void 0;Fd.retryAfterStatusCodes=new Set([413,429,503]);var QPe=({attemptCount:t,retryOptions:e,error:r,retryAfter:i})=>{if(t>e.limit)return 0;let n=e.methods.includes(r.options.method),s=e.errorCodes.includes(r.code),o=r.response&&e.statusCodes.includes(r.response.statusCode);if(!n||!s&&!o)return 0;if(r.response){if(i)return e.maxRetryAfter===void 0||i>e.maxRetryAfter?0:i;if(r.response.statusCode===413)return 0}let a=Math.random()*100;return 2**(t-1)*1e3+a};Fd.default=QPe});var Ld=w(qt=>{\"use strict\";Object.defineProperty(qt,\"__esModule\",{value:!0});qt.UnsupportedProtocolError=qt.ReadError=qt.TimeoutError=qt.UploadError=qt.CacheError=qt.HTTPError=qt.MaxRedirectsError=qt.RequestError=qt.setNonEnumerableProperties=qt.knownHookEvents=qt.withoutBody=qt.kIsNormalizedAlready=void 0;var $4=require(\"util\"),e_=require(\"stream\"),vPe=require(\"fs\"),al=require(\"url\"),t_=require(\"http\"),kP=require(\"http\"),SPe=require(\"https\"),kPe=cz(),xPe=Cz(),r_=Xz(),PPe=t4(),DPe=F4(),RPe=Pw(),me=$a(),FPe=M4(),i_=hP(),NPe=CP(),n_=H4(),LPe=G4(),s_=Y4(),TPe=J4(),OPe=bP(),o_=z4(),MPe=QP(),Al=V4(),UPe=X4(),KPe=Z4(),xP,Ri=Symbol(\"request\"),Lw=Symbol(\"response\"),Xg=Symbol(\"responseSize\"),Zg=Symbol(\"downloadedSize\"),$g=Symbol(\"bodySize\"),ef=Symbol(\"uploadedSize\"),Tw=Symbol(\"serverResponsesPiped\"),a_=Symbol(\"unproxyEvents\"),A_=Symbol(\"isFromCache\"),PP=Symbol(\"cancelTimeouts\"),l_=Symbol(\"startedReading\"),tf=Symbol(\"stopReading\"),Ow=Symbol(\"triggerRead\"),ll=Symbol(\"body\"),Nd=Symbol(\"jobs\"),c_=Symbol(\"originalResponse\"),u_=Symbol(\"retryTimeout\");qt.kIsNormalizedAlready=Symbol(\"isNormalizedAlready\");var HPe=me.default.string(process.versions.brotli);qt.withoutBody=new Set([\"GET\",\"HEAD\"]);qt.knownHookEvents=[\"init\",\"beforeRequest\",\"beforeRedirect\",\"beforeError\",\"beforeRetry\",\"afterResponse\"];function jPe(t){for(let e in t){let r=t[e];if(!me.default.string(r)&&!me.default.number(r)&&!me.default.boolean(r)&&!me.default.null_(r)&&!me.default.undefined(r))throw new TypeError(`The \\`searchParams\\` value '${String(r)}' must be a string, number, boolean or null`)}}function GPe(t){return me.default.object(t)&&!(\"statusCode\"in t)}var DP=new TPe.default,YPe=async t=>new Promise((e,r)=>{let i=n=>{r(n)};t.pending||e(),t.once(\"error\",i),t.once(\"ready\",()=>{t.off(\"error\",i),e()})}),qPe=new Set([300,301,302,303,304,307,308]),JPe=[\"context\",\"body\",\"json\",\"form\"];qt.setNonEnumerableProperties=(t,e)=>{let r={};for(let i of t)if(!!i)for(let n of JPe)n in i&&(r[n]={writable:!0,configurable:!0,enumerable:!1,value:i[n]});Object.defineProperties(e,r)};var fi=class extends Error{constructor(e,r,i){var n;super(e);if(Error.captureStackTrace(this,this.constructor),this.name=\"RequestError\",this.code=r.code,i instanceof RP?(Object.defineProperty(this,\"request\",{enumerable:!1,value:i}),Object.defineProperty(this,\"response\",{enumerable:!1,value:i[Lw]}),Object.defineProperty(this,\"options\",{enumerable:!1,value:i.options})):Object.defineProperty(this,\"options\",{enumerable:!1,value:i}),this.timings=(n=this.request)===null||n===void 0?void 0:n.timings,me.default.string(r.stack)&&me.default.string(this.stack)){let s=this.stack.indexOf(this.message)+this.message.length,o=this.stack.slice(s).split(`\n`).reverse(),a=r.stack.slice(r.stack.indexOf(r.message)+r.message.length).split(`\n`).reverse();for(;a.length!==0&&a[0]===o[0];)o.shift();this.stack=`${this.stack.slice(0,s)}${o.reverse().join(`\n`)}${a.reverse().join(`\n`)}`}}};qt.RequestError=fi;var FP=class extends fi{constructor(e){super(`Redirected ${e.options.maxRedirects} times. Aborting.`,{},e);this.name=\"MaxRedirectsError\"}};qt.MaxRedirectsError=FP;var NP=class extends fi{constructor(e){super(`Response code ${e.statusCode} (${e.statusMessage})`,{},e.request);this.name=\"HTTPError\"}};qt.HTTPError=NP;var LP=class extends fi{constructor(e,r){super(e.message,e,r);this.name=\"CacheError\"}};qt.CacheError=LP;var TP=class extends fi{constructor(e,r){super(e.message,e,r);this.name=\"UploadError\"}};qt.UploadError=TP;var OP=class extends fi{constructor(e,r,i){super(e.message,e,i);this.name=\"TimeoutError\",this.event=e.event,this.timings=r}};qt.TimeoutError=OP;var Mw=class extends fi{constructor(e,r){super(e.message,e,r);this.name=\"ReadError\"}};qt.ReadError=Mw;var MP=class extends fi{constructor(e){super(`Unsupported protocol \"${e.url.protocol}\"`,{},e);this.name=\"UnsupportedProtocolError\"}};qt.UnsupportedProtocolError=MP;var WPe=[\"socket\",\"connect\",\"continue\",\"information\",\"upgrade\",\"timeout\"],RP=class extends e_.Duplex{constructor(e,r={},i){super({autoDestroy:!1,highWaterMark:0});this[Zg]=0,this[ef]=0,this.requestInitialized=!1,this[Tw]=new Set,this.redirects=[],this[tf]=!1,this[Ow]=!1,this[Nd]=[],this.retryCount=0,this._progressCallbacks=[];let n=()=>this._unlockWrite(),s=()=>this._lockWrite();this.on(\"pipe\",c=>{c.prependListener(\"data\",n),c.on(\"data\",s),c.prependListener(\"end\",n),c.on(\"end\",s)}),this.on(\"unpipe\",c=>{c.off(\"data\",n),c.off(\"data\",s),c.off(\"end\",n),c.off(\"end\",s)}),this.on(\"pipe\",c=>{c instanceof kP.IncomingMessage&&(this.options.headers=N(N({},c.headers),this.options.headers))});let{json:o,body:a,form:l}=r;if((o||a||l)&&this._lockWrite(),qt.kIsNormalizedAlready in r)this.options=r;else try{this.options=this.constructor.normalizeArguments(e,r,i)}catch(c){me.default.nodeStream(r.body)&&r.body.destroy(),this.destroy(c);return}(async()=>{var c;try{this.options.body instanceof vPe.ReadStream&&await YPe(this.options.body);let{url:u}=this.options;if(!u)throw new TypeError(\"Missing `url` property\");if(this.requestUrl=u.toString(),decodeURI(this.requestUrl),await this._finalizeBody(),await this._makeRequest(),this.destroyed){(c=this[Ri])===null||c===void 0||c.destroy();return}for(let g of this[Nd])g();this[Nd].length=0,this.requestInitialized=!0}catch(u){if(u instanceof fi){this._beforeError(u);return}this.destroyed||this.destroy(u)}})()}static normalizeArguments(e,r,i){var n,s,o,a,l;let c=r;if(me.default.object(e)&&!me.default.urlInstance(e))r=N(N(N({},i),e),r);else{if(e&&r&&r.url!==void 0)throw new TypeError(\"The `url` option is mutually exclusive with the `input` argument\");r=N(N({},i),r),e!==void 0&&(r.url=e),me.default.urlInstance(r.url)&&(r.url=new al.URL(r.url.toString()))}if(r.cache===!1&&(r.cache=void 0),r.dnsCache===!1&&(r.dnsCache=void 0),me.assert.any([me.default.string,me.default.undefined],r.method),me.assert.any([me.default.object,me.default.undefined],r.headers),me.assert.any([me.default.string,me.default.urlInstance,me.default.undefined],r.prefixUrl),me.assert.any([me.default.object,me.default.undefined],r.cookieJar),me.assert.any([me.default.object,me.default.string,me.default.undefined],r.searchParams),me.assert.any([me.default.object,me.default.string,me.default.undefined],r.cache),me.assert.any([me.default.object,me.default.number,me.default.undefined],r.timeout),me.assert.any([me.default.object,me.default.undefined],r.context),me.assert.any([me.default.object,me.default.undefined],r.hooks),me.assert.any([me.default.boolean,me.default.undefined],r.decompress),me.assert.any([me.default.boolean,me.default.undefined],r.ignoreInvalidCookies),me.assert.any([me.default.boolean,me.default.undefined],r.followRedirect),me.assert.any([me.default.number,me.default.undefined],r.maxRedirects),me.assert.any([me.default.boolean,me.default.undefined],r.throwHttpErrors),me.assert.any([me.default.boolean,me.default.undefined],r.http2),me.assert.any([me.default.boolean,me.default.undefined],r.allowGetBody),me.assert.any([me.default.string,me.default.undefined],r.localAddress),me.assert.any([o_.isDnsLookupIpVersion,me.default.undefined],r.dnsLookupIpVersion),me.assert.any([me.default.object,me.default.undefined],r.https),me.assert.any([me.default.boolean,me.default.undefined],r.rejectUnauthorized),r.https&&(me.assert.any([me.default.boolean,me.default.undefined],r.https.rejectUnauthorized),me.assert.any([me.default.function_,me.default.undefined],r.https.checkServerIdentity),me.assert.any([me.default.string,me.default.object,me.default.array,me.default.undefined],r.https.certificateAuthority),me.assert.any([me.default.string,me.default.object,me.default.array,me.default.undefined],r.https.key),me.assert.any([me.default.string,me.default.object,me.default.array,me.default.undefined],r.https.certificate),me.assert.any([me.default.string,me.default.undefined],r.https.passphrase),me.assert.any([me.default.string,me.default.buffer,me.default.array,me.default.undefined],r.https.pfx)),me.assert.any([me.default.object,me.default.undefined],r.cacheOptions),me.default.string(r.method)?r.method=r.method.toUpperCase():r.method=\"GET\",r.headers===(i==null?void 0:i.headers)?r.headers=N({},r.headers):r.headers=RPe(N(N({},i==null?void 0:i.headers),r.headers)),\"slashes\"in r)throw new TypeError(\"The legacy `url.Url` has been deprecated. Use `URL` instead.\");if(\"auth\"in r)throw new TypeError(\"Parameter `auth` is deprecated. Use `username` / `password` instead.\");if(\"searchParams\"in r&&r.searchParams&&r.searchParams!==(i==null?void 0:i.searchParams)){let h;if(me.default.string(r.searchParams)||r.searchParams instanceof al.URLSearchParams)h=new al.URLSearchParams(r.searchParams);else{jPe(r.searchParams),h=new al.URLSearchParams;for(let p in r.searchParams){let m=r.searchParams[p];m===null?h.append(p,\"\"):m!==void 0&&h.append(p,m)}}(n=i==null?void 0:i.searchParams)===null||n===void 0||n.forEach((p,m)=>{h.has(m)||h.append(m,p)}),r.searchParams=h}if(r.username=(s=r.username)!==null&&s!==void 0?s:\"\",r.password=(o=r.password)!==null&&o!==void 0?o:\"\",me.default.undefined(r.prefixUrl)?r.prefixUrl=(a=i==null?void 0:i.prefixUrl)!==null&&a!==void 0?a:\"\":(r.prefixUrl=r.prefixUrl.toString(),r.prefixUrl!==\"\"&&!r.prefixUrl.endsWith(\"/\")&&(r.prefixUrl+=\"/\")),me.default.string(r.url)){if(r.url.startsWith(\"/\"))throw new Error(\"`input` must not start with a slash when using `prefixUrl`\");r.url=s_.default(r.prefixUrl+r.url,r)}else(me.default.undefined(r.url)&&r.prefixUrl!==\"\"||r.protocol)&&(r.url=s_.default(r.prefixUrl,r));if(r.url){\"port\"in r&&delete r.port;let{prefixUrl:h}=r;Object.defineProperty(r,\"prefixUrl\",{set:m=>{let y=r.url;if(!y.href.startsWith(m))throw new Error(`Cannot change \\`prefixUrl\\` from ${h} to ${m}: ${y.href}`);r.url=new al.URL(m+y.href.slice(h.length)),h=m},get:()=>h});let{protocol:p}=r.url;if(p===\"unix:\"&&(p=\"http:\",r.url=new al.URL(`http://unix${r.url.pathname}${r.url.search}`)),r.searchParams&&(r.url.search=r.searchParams.toString()),p!==\"http:\"&&p!==\"https:\")throw new MP(r);r.username===\"\"?r.username=r.url.username:r.url.username=r.username,r.password===\"\"?r.password=r.url.password:r.url.password=r.password}let{cookieJar:u}=r;if(u){let{setCookie:h,getCookieString:p}=u;me.assert.function_(h),me.assert.function_(p),h.length===4&&p.length===0&&(h=$4.promisify(h.bind(r.cookieJar)),p=$4.promisify(p.bind(r.cookieJar)),r.cookieJar={setCookie:h,getCookieString:p})}let{cache:g}=r;if(g&&(DP.has(g)||DP.set(g,new r_((h,p)=>{let m=h[Ri](h,p);return me.default.promise(m)&&(m.once=(y,Q)=>{if(y===\"error\")m.catch(Q);else if(y===\"abort\")(async()=>{try{(await m).once(\"abort\",Q)}catch(S){}})();else throw new Error(`Unknown HTTP2 promise event: ${y}`);return m}),m},g))),r.cacheOptions=N({},r.cacheOptions),r.dnsCache===!0)xP||(xP=new xPe.default),r.dnsCache=xP;else if(!me.default.undefined(r.dnsCache)&&!r.dnsCache.lookup)throw new TypeError(`Parameter \\`dnsCache\\` must be a CacheableLookup instance or a boolean, got ${me.default(r.dnsCache)}`);me.default.number(r.timeout)?r.timeout={request:r.timeout}:i&&r.timeout!==i.timeout?r.timeout=N(N({},i.timeout),r.timeout):r.timeout=N({},r.timeout),r.context||(r.context={});let f=r.hooks===(i==null?void 0:i.hooks);r.hooks=N({},r.hooks);for(let h of qt.knownHookEvents)if(h in r.hooks)if(me.default.array(r.hooks[h]))r.hooks[h]=[...r.hooks[h]];else throw new TypeError(`Parameter \\`${h}\\` must be an Array, got ${me.default(r.hooks[h])}`);else r.hooks[h]=[];if(i&&!f)for(let h of qt.knownHookEvents)i.hooks[h].length>0&&(r.hooks[h]=[...i.hooks[h],...r.hooks[h]]);if(\"family\"in r&&Al.default('\"options.family\" was never documented, please use \"options.dnsLookupIpVersion\"'),(i==null?void 0:i.https)&&(r.https=N(N({},i.https),r.https)),\"rejectUnauthorized\"in r&&Al.default('\"options.rejectUnauthorized\" is now deprecated, please use \"options.https.rejectUnauthorized\"'),\"checkServerIdentity\"in r&&Al.default('\"options.checkServerIdentity\" was never documented, please use \"options.https.checkServerIdentity\"'),\"ca\"in r&&Al.default('\"options.ca\" was never documented, please use \"options.https.certificateAuthority\"'),\"key\"in r&&Al.default('\"options.key\" was never documented, please use \"options.https.key\"'),\"cert\"in r&&Al.default('\"options.cert\" was never documented, please use \"options.https.certificate\"'),\"passphrase\"in r&&Al.default('\"options.passphrase\" was never documented, please use \"options.https.passphrase\"'),\"pfx\"in r&&Al.default('\"options.pfx\" was never documented, please use \"options.https.pfx\"'),\"followRedirects\"in r)throw new TypeError(\"The `followRedirects` option does not exist. Use `followRedirect` instead.\");if(r.agent){for(let h in r.agent)if(h!==\"http\"&&h!==\"https\"&&h!==\"http2\")throw new TypeError(`Expected the \\`options.agent\\` properties to be \\`http\\`, \\`https\\` or \\`http2\\`, got \\`${h}\\``)}return r.maxRedirects=(l=r.maxRedirects)!==null&&l!==void 0?l:0,qt.setNonEnumerableProperties([i,c],r),UPe.default(r,i)}_lockWrite(){let e=()=>{throw new TypeError(\"The payload has been already provided\")};this.write=e,this.end=e}_unlockWrite(){this.write=super.write,this.end=super.end}async _finalizeBody(){let{options:e}=this,{headers:r}=e,i=!me.default.undefined(e.form),n=!me.default.undefined(e.json),s=!me.default.undefined(e.body),o=i||n||s,a=qt.withoutBody.has(e.method)&&!(e.method===\"GET\"&&e.allowGetBody);if(this._cannotHaveBody=a,o){if(a)throw new TypeError(`The \\`${e.method}\\` method cannot be used with a body`);if([s,i,n].filter(l=>l).length>1)throw new TypeError(\"The `body`, `json` and `form` options are mutually exclusive\");if(s&&!(e.body instanceof e_.Readable)&&!me.default.string(e.body)&&!me.default.buffer(e.body)&&!i_.default(e.body))throw new TypeError(\"The `body` option must be a stream.Readable, string or Buffer\");if(i&&!me.default.object(e.form))throw new TypeError(\"The `form` option must be an Object\");{let l=!me.default.string(r[\"content-type\"]);s?(i_.default(e.body)&&l&&(r[\"content-type\"]=`multipart/form-data; boundary=${e.body.getBoundary()}`),this[ll]=e.body):i?(l&&(r[\"content-type\"]=\"application/x-www-form-urlencoded\"),this[ll]=new al.URLSearchParams(e.form).toString()):(l&&(r[\"content-type\"]=\"application/json\"),this[ll]=e.stringifyJson(e.json));let c=await FPe.default(this[ll],e.headers);me.default.undefined(r[\"content-length\"])&&me.default.undefined(r[\"transfer-encoding\"])&&!a&&!me.default.undefined(c)&&(r[\"content-length\"]=String(c))}}else a?this._lockWrite():this._unlockWrite();this[$g]=Number(r[\"content-length\"])||void 0}async _onResponseBase(e){let{options:r}=this,{url:i}=r;this[c_]=e,r.decompress&&(e=PPe(e));let n=e.statusCode,s=e;s.statusMessage=s.statusMessage?s.statusMessage:t_.STATUS_CODES[n],s.url=r.url.toString(),s.requestUrl=this.requestUrl,s.redirectUrls=this.redirects,s.request=this,s.isFromCache=e.fromCache||!1,s.ip=this.ip,s.retryCount=this.retryCount,this[A_]=s.isFromCache,this[Xg]=Number(e.headers[\"content-length\"])||void 0,this[Lw]=e,e.once(\"end\",()=>{this[Xg]=this[Zg],this.emit(\"downloadProgress\",this.downloadProgress)}),e.once(\"error\",a=>{e.destroy(),this._beforeError(new Mw(a,this))}),e.once(\"aborted\",()=>{this._beforeError(new Mw({name:\"Error\",message:\"The server aborted pending request\",code:\"ECONNRESET\"},this))}),this.emit(\"downloadProgress\",this.downloadProgress);let o=e.headers[\"set-cookie\"];if(me.default.object(r.cookieJar)&&o){let a=o.map(async l=>r.cookieJar.setCookie(l,i.toString()));r.ignoreInvalidCookies&&(a=a.map(async l=>l.catch(()=>{})));try{await Promise.all(a)}catch(l){this._beforeError(l);return}}if(r.followRedirect&&e.headers.location&&qPe.has(n)){if(e.resume(),this[Ri]&&(this[PP](),delete this[Ri],this[a_]()),(n===303&&r.method!==\"GET\"&&r.method!==\"HEAD\"||!r.methodRewriting)&&(r.method=\"GET\",\"body\"in r&&delete r.body,\"json\"in r&&delete r.json,\"form\"in r&&delete r.form,this[ll]=void 0,delete r.headers[\"content-length\"]),this.redirects.length>=r.maxRedirects){this._beforeError(new FP(this));return}try{let l=Buffer.from(e.headers.location,\"binary\").toString(),c=new al.URL(l,i),u=c.toString();decodeURI(u),c.hostname!==i.hostname||c.port!==i.port?(\"host\"in r.headers&&delete r.headers.host,\"cookie\"in r.headers&&delete r.headers.cookie,\"authorization\"in r.headers&&delete r.headers.authorization,(r.username||r.password)&&(r.username=\"\",r.password=\"\")):(c.username=r.username,c.password=r.password),this.redirects.push(u),r.url=c;for(let g of r.hooks.beforeRedirect)await g(r,s);this.emit(\"redirect\",s,r),await this._makeRequest()}catch(l){this._beforeError(l);return}return}if(r.isStream&&r.throwHttpErrors&&!MPe.isResponseOk(s)){this._beforeError(new NP(s));return}e.on(\"readable\",()=>{this[Ow]&&this._read()}),this.on(\"resume\",()=>{e.resume()}),this.on(\"pause\",()=>{e.pause()}),e.once(\"end\",()=>{this.push(null)}),this.emit(\"response\",e);for(let a of this[Tw])if(!a.headersSent){for(let l in e.headers){let c=r.decompress?l!==\"content-encoding\":!0,u=e.headers[l];c&&a.setHeader(l,u)}a.statusCode=n}}async _onResponse(e){try{await this._onResponseBase(e)}catch(r){this._beforeError(r)}}_onRequest(e){let{options:r}=this,{timeout:i,url:n}=r;kPe.default(e),this[PP]=n_.default(e,i,n);let s=r.cache?\"cacheableResponse\":\"response\";e.once(s,l=>{this._onResponse(l)}),e.once(\"error\",l=>{var c;e.destroy(),(c=e.res)===null||c===void 0||c.removeAllListeners(\"end\"),l=l instanceof n_.TimeoutError?new OP(l,this.timings,this):new fi(l.message,l,this),this._beforeError(l)}),this[a_]=NPe.default(e,this,WPe),this[Ri]=e,this.emit(\"uploadProgress\",this.uploadProgress);let o=this[ll],a=this.redirects.length===0?this:e;me.default.nodeStream(o)?(o.pipe(a),o.once(\"error\",l=>{this._beforeError(new TP(l,this))})):(this._unlockWrite(),me.default.undefined(o)?(this._cannotHaveBody||this._noPipe)&&(a.end(),this._lockWrite()):(this._writeRequest(o,void 0,()=>{}),a.end(),this._lockWrite())),this.emit(\"request\",e)}async _createCacheableRequest(e,r){return new Promise((i,n)=>{Object.assign(r,LPe.default(e)),delete r.url;let s,o=DP.get(r.cache)(r,async a=>{a._readableState.autoDestroy=!1,s&&(await s).emit(\"cacheableResponse\",a),i(a)});r.url=e,o.once(\"error\",n),o.once(\"request\",async a=>{s=a,i(s)})})}async _makeRequest(){var e,r,i,n,s;let{options:o}=this,{headers:a}=o;for(let Q in a)if(me.default.undefined(a[Q]))delete a[Q];else if(me.default.null_(a[Q]))throw new TypeError(`Use \\`undefined\\` instead of \\`null\\` to delete the \\`${Q}\\` header`);if(o.decompress&&me.default.undefined(a[\"accept-encoding\"])&&(a[\"accept-encoding\"]=HPe?\"gzip, deflate, br\":\"gzip, deflate\"),o.cookieJar){let Q=await o.cookieJar.getCookieString(o.url.toString());me.default.nonEmptyString(Q)&&(o.headers.cookie=Q)}for(let Q of o.hooks.beforeRequest){let S=await Q(o);if(!me.default.undefined(S)){o.request=()=>S;break}}o.body&&this[ll]!==o.body&&(this[ll]=o.body);let{agent:l,request:c,timeout:u,url:g}=o;if(o.dnsCache&&!(\"lookup\"in o)&&(o.lookup=o.dnsCache.lookup),g.hostname===\"unix\"){let Q=/(?<socketPath>.+?):(?<path>.+)/.exec(`${g.pathname}${g.search}`);if(Q==null?void 0:Q.groups){let{socketPath:S,path:x}=Q.groups;Object.assign(o,{socketPath:S,path:x,host:\"\"})}}let f=g.protocol===\"https:\",h;o.http2?h=DPe.auto:h=f?SPe.request:t_.request;let p=(e=o.request)!==null&&e!==void 0?e:h,m=o.cache?this._createCacheableRequest:p;l&&!o.http2&&(o.agent=l[f?\"https\":\"http\"]),o[Ri]=p,delete o.request,delete o.timeout;let y=o;if(y.shared=(r=o.cacheOptions)===null||r===void 0?void 0:r.shared,y.cacheHeuristic=(i=o.cacheOptions)===null||i===void 0?void 0:i.cacheHeuristic,y.immutableMinTimeToLive=(n=o.cacheOptions)===null||n===void 0?void 0:n.immutableMinTimeToLive,y.ignoreCargoCult=(s=o.cacheOptions)===null||s===void 0?void 0:s.ignoreCargoCult,o.dnsLookupIpVersion!==void 0)try{y.family=o_.dnsLookupIpVersionToFamily(o.dnsLookupIpVersion)}catch(Q){throw new Error(\"Invalid `dnsLookupIpVersion` option value\")}o.https&&(\"rejectUnauthorized\"in o.https&&(y.rejectUnauthorized=o.https.rejectUnauthorized),o.https.checkServerIdentity&&(y.checkServerIdentity=o.https.checkServerIdentity),o.https.certificateAuthority&&(y.ca=o.https.certificateAuthority),o.https.certificate&&(y.cert=o.https.certificate),o.https.key&&(y.key=o.https.key),o.https.passphrase&&(y.passphrase=o.https.passphrase),o.https.pfx&&(y.pfx=o.https.pfx));try{let Q=await m(g,y);me.default.undefined(Q)&&(Q=h(g,y)),o.request=c,o.timeout=u,o.agent=l,o.https&&(\"rejectUnauthorized\"in o.https&&delete y.rejectUnauthorized,o.https.checkServerIdentity&&delete y.checkServerIdentity,o.https.certificateAuthority&&delete y.ca,o.https.certificate&&delete y.cert,o.https.key&&delete y.key,o.https.passphrase&&delete y.passphrase,o.https.pfx&&delete y.pfx),GPe(Q)?this._onRequest(Q):this.writable?(this.once(\"finish\",()=>{this._onResponse(Q)}),this._unlockWrite(),this.end(),this._lockWrite()):this._onResponse(Q)}catch(Q){throw Q instanceof r_.CacheError?new LP(Q,this):new fi(Q.message,Q,this)}}async _error(e){try{for(let r of this.options.hooks.beforeError)e=await r(e)}catch(r){e=new fi(r.message,r,this)}this.destroy(e)}_beforeError(e){if(this[tf])return;let{options:r}=this,i=this.retryCount+1;this[tf]=!0,e instanceof fi||(e=new fi(e.message,e,this));let n=e,{response:s}=n;(async()=>{if(s&&!s.body){s.setEncoding(this._readableState.encoding);try{s.rawBody=await OPe.default(s),s.body=s.rawBody.toString()}catch(o){}}if(this.listenerCount(\"retry\")!==0){let o;try{let a;s&&\"retry-after\"in s.headers&&(a=Number(s.headers[\"retry-after\"]),Number.isNaN(a)?(a=Date.parse(s.headers[\"retry-after\"])-Date.now(),a<=0&&(a=1)):a*=1e3),o=await r.retry.calculateDelay({attemptCount:i,retryOptions:r.retry,error:n,retryAfter:a,computedValue:KPe.default({attemptCount:i,retryOptions:r.retry,error:n,retryAfter:a,computedValue:0})})}catch(a){this._error(new fi(a.message,a,this));return}if(o){let a=async()=>{try{for(let l of this.options.hooks.beforeRetry)await l(this.options,n,i)}catch(l){this._error(new fi(l.message,e,this));return}this.destroyed||(this.destroy(),this.emit(\"retry\",i,e))};this[u_]=setTimeout(a,o);return}}this._error(n)})()}_read(){this[Ow]=!0;let e=this[Lw];if(e&&!this[tf]){e.readableLength&&(this[Ow]=!1);let r;for(;(r=e.read())!==null;){this[Zg]+=r.length,this[l_]=!0;let i=this.downloadProgress;i.percent<1&&this.emit(\"downloadProgress\",i),this.push(r)}}}_write(e,r,i){let n=()=>{this._writeRequest(e,r,i)};this.requestInitialized?n():this[Nd].push(n)}_writeRequest(e,r,i){this[Ri].destroyed||(this._progressCallbacks.push(()=>{this[ef]+=Buffer.byteLength(e,r);let n=this.uploadProgress;n.percent<1&&this.emit(\"uploadProgress\",n)}),this[Ri].write(e,r,n=>{!n&&this._progressCallbacks.length>0&&this._progressCallbacks.shift()(),i(n)}))}_final(e){let r=()=>{for(;this._progressCallbacks.length!==0;)this._progressCallbacks.shift()();if(!(Ri in this)){e();return}if(this[Ri].destroyed){e();return}this[Ri].end(i=>{i||(this[$g]=this[ef],this.emit(\"uploadProgress\",this.uploadProgress),this[Ri].emit(\"upload-complete\")),e(i)})};this.requestInitialized?r():this[Nd].push(r)}_destroy(e,r){var i;this[tf]=!0,clearTimeout(this[u_]),Ri in this&&(this[PP](),((i=this[Lw])===null||i===void 0?void 0:i.complete)||this[Ri].destroy()),e!==null&&!me.default.undefined(e)&&!(e instanceof fi)&&(e=new fi(e.message,e,this)),r(e)}get _isAboutToError(){return this[tf]}get ip(){var e;return(e=this.socket)===null||e===void 0?void 0:e.remoteAddress}get aborted(){var e,r,i;return((r=(e=this[Ri])===null||e===void 0?void 0:e.destroyed)!==null&&r!==void 0?r:this.destroyed)&&!((i=this[c_])===null||i===void 0?void 0:i.complete)}get socket(){var e,r;return(r=(e=this[Ri])===null||e===void 0?void 0:e.socket)!==null&&r!==void 0?r:void 0}get downloadProgress(){let e;return this[Xg]?e=this[Zg]/this[Xg]:this[Xg]===this[Zg]?e=1:e=0,{percent:e,transferred:this[Zg],total:this[Xg]}}get uploadProgress(){let e;return this[$g]?e=this[ef]/this[$g]:this[$g]===this[ef]?e=1:e=0,{percent:e,transferred:this[ef],total:this[$g]}}get timings(){var e;return(e=this[Ri])===null||e===void 0?void 0:e.timings}get isFromCache(){return this[A_]}pipe(e,r){if(this[l_])throw new Error(\"Failed to pipe. The response has been emitted already.\");return e instanceof kP.ServerResponse&&this[Tw].add(e),super.pipe(e,r)}unpipe(e){return e instanceof kP.ServerResponse&&this[Tw].delete(e),super.unpipe(e),this}};qt.default=RP});var Td=w(ho=>{\"use strict\";var zPe=ho&&ho.__createBinding||(Object.create?function(t,e,r,i){i===void 0&&(i=r),Object.defineProperty(t,i,{enumerable:!0,get:function(){return e[r]}})}:function(t,e,r,i){i===void 0&&(i=r),t[i]=e[r]}),_Pe=ho&&ho.__exportStar||function(t,e){for(var r in t)r!==\"default\"&&!Object.prototype.hasOwnProperty.call(e,r)&&zPe(e,t,r)};Object.defineProperty(ho,\"__esModule\",{value:!0});ho.CancelError=ho.ParseError=void 0;var g_=Ld(),f_=class extends g_.RequestError{constructor(e,r){let{options:i}=r.request;super(`${e.message} in \"${i.url.toString()}\"`,e,r.request);this.name=\"ParseError\"}};ho.ParseError=f_;var h_=class extends g_.RequestError{constructor(e){super(\"Promise was canceled\",{},e);this.name=\"CancelError\"}get isCanceled(){return!0}};ho.CancelError=h_;_Pe(Ld(),ho)});var d_=w(UP=>{\"use strict\";Object.defineProperty(UP,\"__esModule\",{value:!0});var p_=Td(),VPe=(t,e,r,i)=>{let{rawBody:n}=t;try{if(e===\"text\")return n.toString(i);if(e===\"json\")return n.length===0?\"\":r(n.toString());if(e===\"buffer\")return n;throw new p_.ParseError({message:`Unknown body type '${e}'`,name:\"Error\"},t)}catch(s){throw new p_.ParseError(s,t)}};UP.default=VPe});var KP=w(cl=>{\"use strict\";var XPe=cl&&cl.__createBinding||(Object.create?function(t,e,r,i){i===void 0&&(i=r),Object.defineProperty(t,i,{enumerable:!0,get:function(){return e[r]}})}:function(t,e,r,i){i===void 0&&(i=r),t[i]=e[r]}),ZPe=cl&&cl.__exportStar||function(t,e){for(var r in t)r!==\"default\"&&!Object.prototype.hasOwnProperty.call(e,r)&&XPe(e,t,r)};Object.defineProperty(cl,\"__esModule\",{value:!0});var $Pe=require(\"events\"),eDe=$a(),tDe=Az(),Uw=Td(),C_=d_(),m_=Ld(),rDe=CP(),iDe=bP(),E_=QP(),nDe=[\"request\",\"response\",\"redirect\",\"uploadProgress\",\"downloadProgress\"];function I_(t){let e,r,i=new $Pe.EventEmitter,n=new tDe((o,a,l)=>{let c=u=>{let g=new m_.default(void 0,t);g.retryCount=u,g._noPipe=!0,l(()=>g.destroy()),l.shouldReject=!1,l(()=>a(new Uw.CancelError(g))),e=g,g.once(\"response\",async p=>{var m;if(p.retryCount=u,p.request.aborted)return;let y;try{y=await iDe.default(g),p.rawBody=y}catch(M){return}if(g._isAboutToError)return;let Q=((m=p.headers[\"content-encoding\"])!==null&&m!==void 0?m:\"\").toLowerCase(),S=[\"gzip\",\"deflate\",\"br\"].includes(Q),{options:x}=g;if(S&&!x.decompress)p.body=y;else try{p.body=C_.default(p,x.responseType,x.parseJson,x.encoding)}catch(M){if(p.body=y.toString(),E_.isResponseOk(p)){g._beforeError(M);return}}try{for(let[M,Y]of x.hooks.afterResponse.entries())p=await Y(p,async U=>{let J=m_.default.normalizeArguments(void 0,te(N({},U),{retry:{calculateDelay:()=>0},throwHttpErrors:!1,resolveBodyOnly:!1}),x);J.hooks.afterResponse=J.hooks.afterResponse.slice(0,M);for(let ee of J.hooks.beforeRetry)await ee(J);let W=I_(J);return l(()=>{W.catch(()=>{}),W.cancel()}),W})}catch(M){g._beforeError(new Uw.RequestError(M.message,M,g));return}if(!E_.isResponseOk(p)){g._beforeError(new Uw.HTTPError(p));return}r=p,o(g.options.resolveBodyOnly?p.body:p)});let f=p=>{if(n.isCanceled)return;let{options:m}=g;if(p instanceof Uw.HTTPError&&!m.throwHttpErrors){let{response:y}=p;o(g.options.resolveBodyOnly?y.body:y);return}a(p)};g.once(\"error\",f);let h=g.options.body;g.once(\"retry\",(p,m)=>{var y,Q;if(h===((y=m.request)===null||y===void 0?void 0:y.options.body)&&eDe.default.nodeStream((Q=m.request)===null||Q===void 0?void 0:Q.options.body)){f(m);return}c(p)}),rDe.default(g,i,nDe)};c(0)});n.on=(o,a)=>(i.on(o,a),n);let s=o=>{let a=(async()=>{await n;let{options:l}=r.request;return C_.default(r,o,l.parseJson,l.encoding)})();return Object.defineProperties(a,Object.getOwnPropertyDescriptors(n)),a};return n.json=()=>{let{headers:o}=e.options;return!e.writableFinished&&o.accept===void 0&&(o.accept=\"application/json\"),s(\"json\")},n.buffer=()=>s(\"buffer\"),n.text=()=>s(\"text\"),n}cl.default=I_;ZPe(Td(),cl)});var y_=w(HP=>{\"use strict\";Object.defineProperty(HP,\"__esModule\",{value:!0});var sDe=Td();function oDe(t,...e){let r=(async()=>{if(t instanceof sDe.RequestError)try{for(let n of e)if(n)for(let s of n)t=await s(t)}catch(n){t=n}throw t})(),i=()=>r;return r.json=i,r.text=i,r.buffer=i,r.on=i,r}HP.default=oDe});var b_=w(jP=>{\"use strict\";Object.defineProperty(jP,\"__esModule\",{value:!0});var w_=$a();function B_(t){for(let e of Object.values(t))(w_.default.plainObject(e)||w_.default.array(e))&&B_(e);return Object.freeze(t)}jP.default=B_});var v_=w(Q_=>{\"use strict\";Object.defineProperty(Q_,\"__esModule\",{value:!0})});var GP=w(Ns=>{\"use strict\";var aDe=Ns&&Ns.__createBinding||(Object.create?function(t,e,r,i){i===void 0&&(i=r),Object.defineProperty(t,i,{enumerable:!0,get:function(){return e[r]}})}:function(t,e,r,i){i===void 0&&(i=r),t[i]=e[r]}),ADe=Ns&&Ns.__exportStar||function(t,e){for(var r in t)r!==\"default\"&&!Object.prototype.hasOwnProperty.call(e,r)&&aDe(e,t,r)};Object.defineProperty(Ns,\"__esModule\",{value:!0});Ns.defaultHandler=void 0;var S_=$a(),Ls=KP(),lDe=y_(),Kw=Ld(),cDe=b_(),uDe={RequestError:Ls.RequestError,CacheError:Ls.CacheError,ReadError:Ls.ReadError,HTTPError:Ls.HTTPError,MaxRedirectsError:Ls.MaxRedirectsError,TimeoutError:Ls.TimeoutError,ParseError:Ls.ParseError,CancelError:Ls.CancelError,UnsupportedProtocolError:Ls.UnsupportedProtocolError,UploadError:Ls.UploadError},gDe=async t=>new Promise(e=>{setTimeout(e,t)}),{normalizeArguments:Hw}=Kw.default,k_=(...t)=>{let e;for(let r of t)e=Hw(void 0,r,e);return e},fDe=t=>t.isStream?new Kw.default(void 0,t):Ls.default(t),hDe=t=>\"defaults\"in t&&\"options\"in t.defaults,pDe=[\"get\",\"post\",\"put\",\"patch\",\"head\",\"delete\"];Ns.defaultHandler=(t,e)=>e(t);var x_=(t,e)=>{if(t)for(let r of t)r(e)},P_=t=>{t._rawHandlers=t.handlers,t.handlers=t.handlers.map(i=>(n,s)=>{let o,a=i(n,l=>(o=s(l),o));if(a!==o&&!n.isStream&&o){let l=a,{then:c,catch:u,finally:g}=l;Object.setPrototypeOf(l,Object.getPrototypeOf(o)),Object.defineProperties(l,Object.getOwnPropertyDescriptors(o)),l.then=c,l.catch=u,l.finally=g}return a});let e=(i,n={},s)=>{var o,a;let l=0,c=u=>t.handlers[l++](u,l===t.handlers.length?fDe:c);if(S_.default.plainObject(i)){let u=N(N({},i),n);Kw.setNonEnumerableProperties([i,n],u),n=u,i=void 0}try{let u;try{x_(t.options.hooks.init,n),x_((o=n.hooks)===null||o===void 0?void 0:o.init,n)}catch(f){u=f}let g=Hw(i,n,s!=null?s:t.options);if(g[Kw.kIsNormalizedAlready]=!0,u)throw new Ls.RequestError(u.message,u,g);return c(g)}catch(u){if(n.isStream)throw u;return lDe.default(u,t.options.hooks.beforeError,(a=n.hooks)===null||a===void 0?void 0:a.beforeError)}};e.extend=(...i)=>{let n=[t.options],s=[...t._rawHandlers],o;for(let a of i)hDe(a)?(n.push(a.defaults.options),s.push(...a.defaults._rawHandlers),o=a.defaults.mutableDefaults):(n.push(a),\"handlers\"in a&&s.push(...a.handlers),o=a.mutableDefaults);return s=s.filter(a=>a!==Ns.defaultHandler),s.length===0&&s.push(Ns.defaultHandler),P_({options:k_(...n),handlers:s,mutableDefaults:Boolean(o)})};let r=async function*(i,n){let s=Hw(i,n,t.options);s.resolveBodyOnly=!1;let o=s.pagination;if(!S_.default.object(o))throw new TypeError(\"`options.pagination` must be implemented\");let a=[],{countLimit:l}=o,c=0;for(;c<o.requestLimit;){c!==0&&await gDe(o.backoff);let u=await e(void 0,void 0,s),g=await o.transform(u),f=[];for(let p of g)if(o.filter(p,a,f)&&(!o.shouldContinue(p,a,f)||(yield p,o.stackAllItems&&a.push(p),f.push(p),--l<=0)))return;let h=o.paginate(u,a,f);if(h===!1)return;h===u.request.options?s=u.request.options:h!==void 0&&(s=Hw(void 0,h,s)),c++}};e.paginate=r,e.paginate.all=async(i,n)=>{let s=[];for await(let o of r(i,n))s.push(o);return s},e.paginate.each=r,e.stream=(i,n)=>e(i,te(N({},n),{isStream:!0}));for(let i of pDe)e[i]=(n,s)=>e(n,te(N({},s),{method:i})),e.stream[i]=(n,s)=>e(n,te(N({},s),{method:i,isStream:!0}));return Object.assign(e,uDe),Object.defineProperty(e,\"defaults\",{value:t.mutableDefaults?t:cDe.default(t),writable:t.mutableDefaults,configurable:t.mutableDefaults,enumerable:!0}),e.mergeOptions=k_,e};Ns.default=P_;ADe(v_(),Ns)});var Gw=w((tA,jw)=>{\"use strict\";var dDe=tA&&tA.__createBinding||(Object.create?function(t,e,r,i){i===void 0&&(i=r),Object.defineProperty(t,i,{enumerable:!0,get:function(){return e[r]}})}:function(t,e,r,i){i===void 0&&(i=r),t[i]=e[r]}),D_=tA&&tA.__exportStar||function(t,e){for(var r in t)r!==\"default\"&&!Object.prototype.hasOwnProperty.call(e,r)&&dDe(e,t,r)};Object.defineProperty(tA,\"__esModule\",{value:!0});var CDe=require(\"url\"),R_=GP(),mDe={options:{method:\"GET\",retry:{limit:2,methods:[\"GET\",\"PUT\",\"HEAD\",\"DELETE\",\"OPTIONS\",\"TRACE\"],statusCodes:[408,413,429,500,502,503,504,521,522,524],errorCodes:[\"ETIMEDOUT\",\"ECONNRESET\",\"EADDRINUSE\",\"ECONNREFUSED\",\"EPIPE\",\"ENOTFOUND\",\"ENETUNREACH\",\"EAI_AGAIN\"],maxRetryAfter:void 0,calculateDelay:({computedValue:t})=>t},timeout:{},headers:{\"user-agent\":\"got (https://github.com/sindresorhus/got)\"},hooks:{init:[],beforeRequest:[],beforeRedirect:[],beforeRetry:[],beforeError:[],afterResponse:[]},cache:void 0,dnsCache:void 0,decompress:!0,throwHttpErrors:!0,followRedirect:!0,isStream:!1,responseType:\"text\",resolveBodyOnly:!1,maxRedirects:10,prefixUrl:\"\",methodRewriting:!0,ignoreInvalidCookies:!1,context:{},http2:!1,allowGetBody:!1,https:void 0,pagination:{transform:t=>t.request.options.responseType===\"json\"?t.body:JSON.parse(t.body),paginate:t=>{if(!Reflect.has(t.headers,\"link\"))return!1;let e=t.headers.link.split(\",\"),r;for(let i of e){let n=i.split(\";\");if(n[1].includes(\"next\")){r=n[0].trimStart().trim(),r=r.slice(1,-1);break}}return r?{url:new CDe.URL(r)}:!1},filter:()=>!0,shouldContinue:()=>!0,countLimit:Infinity,backoff:0,requestLimit:1e4,stackAllItems:!0},parseJson:t=>JSON.parse(t),stringifyJson:t=>JSON.stringify(t),cacheOptions:{}},handlers:[R_.defaultHandler],mutableDefaults:!1},YP=R_.default(mDe);tA.default=YP;jw.exports=YP;jw.exports.default=YP;jw.exports.__esModule=!0;D_(GP(),tA);D_(KP(),tA)});var T_=w(rf=>{\"use strict\";var Yot=require(\"net\"),EDe=require(\"tls\"),qP=require(\"http\"),F_=require(\"https\"),IDe=require(\"events\"),qot=require(\"assert\"),yDe=require(\"util\");rf.httpOverHttp=wDe;rf.httpsOverHttp=BDe;rf.httpOverHttps=bDe;rf.httpsOverHttps=QDe;function wDe(t){var e=new rA(t);return e.request=qP.request,e}function BDe(t){var e=new rA(t);return e.request=qP.request,e.createSocket=N_,e.defaultPort=443,e}function bDe(t){var e=new rA(t);return e.request=F_.request,e}function QDe(t){var e=new rA(t);return e.request=F_.request,e.createSocket=N_,e.defaultPort=443,e}function rA(t){var e=this;e.options=t||{},e.proxyOptions=e.options.proxy||{},e.maxSockets=e.options.maxSockets||qP.Agent.defaultMaxSockets,e.requests=[],e.sockets=[],e.on(\"free\",function(i,n,s,o){for(var a=L_(n,s,o),l=0,c=e.requests.length;l<c;++l){var u=e.requests[l];if(u.host===a.host&&u.port===a.port){e.requests.splice(l,1),u.request.onSocket(i);return}}i.destroy(),e.removeSocket(i)})}yDe.inherits(rA,IDe.EventEmitter);rA.prototype.addRequest=function(e,r,i,n){var s=this,o=JP({request:e},s.options,L_(r,i,n));if(s.sockets.length>=this.maxSockets){s.requests.push(o);return}s.createSocket(o,function(a){a.on(\"free\",l),a.on(\"close\",c),a.on(\"agentRemove\",c),e.onSocket(a);function l(){s.emit(\"free\",a,o)}function c(u){s.removeSocket(a),a.removeListener(\"free\",l),a.removeListener(\"close\",c),a.removeListener(\"agentRemove\",c)}})};rA.prototype.createSocket=function(e,r){var i=this,n={};i.sockets.push(n);var s=JP({},i.proxyOptions,{method:\"CONNECT\",path:e.host+\":\"+e.port,agent:!1,headers:{host:e.host+\":\"+e.port}});e.localAddress&&(s.localAddress=e.localAddress),s.proxyAuth&&(s.headers=s.headers||{},s.headers[\"Proxy-Authorization\"]=\"Basic \"+new Buffer(s.proxyAuth).toString(\"base64\")),ul(\"making CONNECT request\");var o=i.request(s);o.useChunkedEncodingByDefault=!1,o.once(\"response\",a),o.once(\"upgrade\",l),o.once(\"connect\",c),o.once(\"error\",u),o.end();function a(g){g.upgrade=!0}function l(g,f,h){process.nextTick(function(){c(g,f,h)})}function c(g,f,h){if(o.removeAllListeners(),f.removeAllListeners(),g.statusCode!==200){ul(\"tunneling socket could not be established, statusCode=%d\",g.statusCode),f.destroy();var p=new Error(\"tunneling socket could not be established, statusCode=\"+g.statusCode);p.code=\"ECONNRESET\",e.request.emit(\"error\",p),i.removeSocket(n);return}if(h.length>0){ul(\"got illegal response body from proxy\"),f.destroy();var p=new Error(\"got illegal response body from proxy\");p.code=\"ECONNRESET\",e.request.emit(\"error\",p),i.removeSocket(n);return}return ul(\"tunneling connection has established\"),i.sockets[i.sockets.indexOf(n)]=f,r(f)}function u(g){o.removeAllListeners(),ul(`tunneling socket could not be established, cause=%s\n`,g.message,g.stack);var f=new Error(\"tunneling socket could not be established, cause=\"+g.message);f.code=\"ECONNRESET\",e.request.emit(\"error\",f),i.removeSocket(n)}};rA.prototype.removeSocket=function(e){var r=this.sockets.indexOf(e);if(r!==-1){this.sockets.splice(r,1);var i=this.requests.shift();i&&this.createSocket(i,function(n){i.request.onSocket(n)})}};function N_(t,e){var r=this;rA.prototype.createSocket.call(r,t,function(i){var n=t.request.getHeader(\"host\"),s=JP({},r.options,{socket:i,servername:n?n.replace(/:.*$/,\"\"):t.host}),o=EDe.connect(0,s);r.sockets[r.sockets.indexOf(i)]=o,e(o)})}function L_(t,e,r){return typeof t==\"string\"?{host:t,port:e,localAddress:r}:t}function JP(t){for(var e=1,r=arguments.length;e<r;++e){var i=arguments[e];if(typeof i==\"object\")for(var n=Object.keys(i),s=0,o=n.length;s<o;++s){var a=n[s];i[a]!==void 0&&(t[a]=i[a])}}return t}var ul;process.env.NODE_DEBUG&&/\\btunnel\\b/.test(process.env.NODE_DEBUG)?ul=function(){var t=Array.prototype.slice.call(arguments);typeof t[0]==\"string\"?t[0]=\"TUNNEL: \"+t[0]:t.unshift(\"TUNNEL:\"),console.error.apply(console,t)}:ul=function(){};rf.debug=ul});var M_=w((Wot,O_)=>{O_.exports=T_()});var z_=w((Jw,XP)=>{var W_=Object.assign({},require(\"fs\")),ZP=function(){var t=typeof document!=\"undefined\"&&document.currentScript?document.currentScript.src:void 0;return typeof __filename!=\"undefined\"&&(t=t||__filename),function(e){e=e||{};var r=typeof e!=\"undefined\"?e:{},i,n;r.ready=new Promise(function(d,E){i=d,n=E});var s={},o;for(o in r)r.hasOwnProperty(o)&&(s[o]=r[o]);var a=[],l=\"./this.program\",c=function(d,E){throw E},u=!1,g=!0,f=\"\";function h(d){return r.locateFile?r.locateFile(d,f):f+d}var p,m,y,Q;g&&(u?f=require(\"path\").dirname(f)+\"/\":f=__dirname+\"/\",p=function(E,I){var D=ba(E);return D?I?D:D.toString():(y||(y=W_),Q||(Q=require(\"path\")),E=Q.normalize(E),y.readFileSync(E,I?null:\"utf8\"))},m=function(E){var I=p(E,!0);return I.buffer||(I=new Uint8Array(I)),Ae(I.buffer),I},process.argv.length>1&&(l=process.argv[1].replace(/\\\\/g,\"/\")),a=process.argv.slice(2),c=function(d){process.exit(d)},r.inspect=function(){return\"[Emscripten Module object]\"});var S=r.print||console.log.bind(console),x=r.printErr||console.warn.bind(console);for(o in s)s.hasOwnProperty(o)&&(r[o]=s[o]);s=null,r.arguments&&(a=r.arguments),r.thisProgram&&(l=r.thisProgram),r.quit&&(c=r.quit);var M=16;function Y(d,E){return E||(E=M),Math.ceil(d/E)*E}var U=0,J=function(d){U=d},W;r.wasmBinary&&(W=r.wasmBinary);var ee=r.noExitRuntime||!0;typeof WebAssembly!=\"object\"&&Sr(\"no native wasm support detected\");function Z(d,E,I){switch(E=E||\"i8\",E.charAt(E.length-1)===\"*\"&&(E=\"i32\"),E){case\"i1\":return pe[d>>0];case\"i8\":return pe[d>>0];case\"i16\":return be[d>>1];case\"i32\":return fe[d>>2];case\"i64\":return fe[d>>2];case\"float\":return Ht[d>>2];case\"double\":return Mt[d>>3];default:Sr(\"invalid type for getValue: \"+E)}return null}var A,ne=!1,le;function Ae(d,E){d||Sr(\"Assertion failed: \"+E)}function T(d){var E=r[\"_\"+d];return Ae(E,\"Cannot call unknown function \"+d+\", make sure it is exported\"),E}function L(d,E,I,D,O){var V={string:function(nt){var It=0;if(nt!=null&&nt!==0){var ke=(nt.length<<2)+1;It=B(ke),Qe(nt,It,ke)}return It},array:function(nt){var It=B(nt.length);return Ue(nt,It),It}};function ie(nt){return E===\"string\"?re(nt):E===\"boolean\"?Boolean(nt):nt}var Be=T(d),Ce=[],_e=0;if(D)for(var ot=0;ot<D.length;ot++){var wt=V[I[ot]];wt?(_e===0&&(_e=FE()),Ce[ot]=wt(D[ot])):Ce[ot]=D[ot]}var ut=Be.apply(null,Ce);return ut=ie(ut),_e!==0&&NE(_e),ut}function Ee(d,E,I,D){I=I||[];var O=I.every(function(ie){return ie===\"number\"}),V=E!==\"string\";return V&&O&&!D?T(d):function(){return L(d,E,I,arguments,D)}}var we=typeof TextDecoder!=\"undefined\"?new TextDecoder(\"utf8\"):void 0;function qe(d,E,I){for(var D=E+I,O=E;d[O]&&!(O>=D);)++O;if(O-E>16&&d.subarray&&we)return we.decode(d.subarray(E,O));for(var V=\"\";E<O;){var ie=d[E++];if(!(ie&128)){V+=String.fromCharCode(ie);continue}var Be=d[E++]&63;if((ie&224)==192){V+=String.fromCharCode((ie&31)<<6|Be);continue}var Ce=d[E++]&63;if((ie&240)==224?ie=(ie&15)<<12|Be<<6|Ce:ie=(ie&7)<<18|Be<<12|Ce<<6|d[E++]&63,ie<65536)V+=String.fromCharCode(ie);else{var _e=ie-65536;V+=String.fromCharCode(55296|_e>>10,56320|_e&1023)}}return V}function re(d,E){return d?qe(X,d,E):\"\"}function se(d,E,I,D){if(!(D>0))return 0;for(var O=I,V=I+D-1,ie=0;ie<d.length;++ie){var Be=d.charCodeAt(ie);if(Be>=55296&&Be<=57343){var Ce=d.charCodeAt(++ie);Be=65536+((Be&1023)<<10)|Ce&1023}if(Be<=127){if(I>=V)break;E[I++]=Be}else if(Be<=2047){if(I+1>=V)break;E[I++]=192|Be>>6,E[I++]=128|Be&63}else if(Be<=65535){if(I+2>=V)break;E[I++]=224|Be>>12,E[I++]=128|Be>>6&63,E[I++]=128|Be&63}else{if(I+3>=V)break;E[I++]=240|Be>>18,E[I++]=128|Be>>12&63,E[I++]=128|Be>>6&63,E[I++]=128|Be&63}}return E[I]=0,I-O}function Qe(d,E,I){return se(d,X,E,I)}function he(d){for(var E=0,I=0;I<d.length;++I){var D=d.charCodeAt(I);D>=55296&&D<=57343&&(D=65536+((D&1023)<<10)|d.charCodeAt(++I)&1023),D<=127?++E:D<=2047?E+=2:D<=65535?E+=3:E+=4}return E}function Fe(d){var E=he(d)+1,I=Et(E);return I&&se(d,pe,I,E),I}function Ue(d,E){pe.set(d,E)}function xe(d,E){return d%E>0&&(d+=E-d%E),d}var ve,pe,X,be,ce,fe,gt,Ht,Mt;function mi(d){ve=d,r.HEAP8=pe=new Int8Array(d),r.HEAP16=be=new Int16Array(d),r.HEAP32=fe=new Int32Array(d),r.HEAPU8=X=new Uint8Array(d),r.HEAPU16=ce=new Uint16Array(d),r.HEAPU32=gt=new Uint32Array(d),r.HEAPF32=Ht=new Float32Array(d),r.HEAPF64=Mt=new Float64Array(d)}var jt=r.INITIAL_MEMORY||16777216,Qr,Ti=[],_s=[],Un=[],Kn=!1;function vr(){if(r.preRun)for(typeof r.preRun==\"function\"&&(r.preRun=[r.preRun]);r.preRun.length;)Ia(r.preRun.shift());ko(Ti)}function Hn(){Kn=!0,!r.noFSInit&&!v.init.initialized&&v.init(),fs.init(),ko(_s)}function us(){if(r.postRun)for(typeof r.postRun==\"function\"&&(r.postRun=[r.postRun]);r.postRun.length;)Du(r.postRun.shift());ko(Un)}function Ia(d){Ti.unshift(d)}function SA(d){_s.unshift(d)}function Du(d){Un.unshift(d)}var gs=0,kA=null,ya=null;function Ru(d){return d}function xA(d){gs++,r.monitorRunDependencies&&r.monitorRunDependencies(gs)}function PA(d){if(gs--,r.monitorRunDependencies&&r.monitorRunDependencies(gs),gs==0&&(kA!==null&&(clearInterval(kA),kA=null),ya)){var E=ya;ya=null,E()}}r.preloadedImages={},r.preloadedAudios={};function Sr(d){r.onAbort&&r.onAbort(d),d+=\"\",x(d),ne=!0,le=1,d=\"abort(\"+d+\"). Build with -s ASSERTIONS=1 for more info.\";var E=new WebAssembly.RuntimeError(d);throw n(E),E}var jl=\"data:application/octet-stream;base64,\";function Fu(d){return d.startsWith(jl)}var So=\"data:application/octet-stream;base64,AGFzbQEAAAABlAInYAF/AX9gA39/fwF/YAF/AGACf38Bf2ACf38AYAV/f39/fwF/YAR/f39/AX9gA39/fwBgBH9+f38Bf2AAAX9gBX9/f35/AX5gA39+fwF/YAF/AX5gAn9+AX9gBH9/fn8BfmADf35/AX5gA39/fgF/YAR/f35/AX9gBn9/f39/fwF/YAR/f39/AGADf39+AX5gAn5/AX9gA398fwBgBH9/f38BfmADf39/AX5gBn98f39/fwF/YAV/f35/fwF/YAV/fn9/fwF/YAV/f39/fwBgAn9+AGACf38BfmACf3wAYAh/fn5/f39+fwF/YAV/f39+fwBgAABgBX5+f35/AX5gBX9/f39/AX5gAnx/AXxgAn9+AX4CeRQBYQFhAAIBYQFiAAABYQFjAAMBYQFkAAYBYQFlAAEBYQFmAAABYQFnAAYBYQFoAAABYQFpAAMBYQFqAAMBYQFrAAMBYQFsAAEBYQFtAAABYQFuAAUBYQFvAAEBYQFwAAMBYQFxAAEBYQFyAAABYQFzAAMBYQF0AAADggKAAgcCAgQAAQECAgANBA4EBwICAhwLEw0AFA0dAAAMDAIHHgwQAgIDAwICAQAIAAcIFBUEBgAADAAECAgDAQYAAgIBBgAfFwEBAwITAiAPBgIFEQMFAxgBCAIBAAAHBQEYABoSAQIABwQDIREIAyIGAAEBAwMAIwUbASQHAQsVAQMABQMEAA0bFw0BBAALCwMDDAwAAwAHJQMBAAgaAQECBQMBAgMDAAcHBwICAgImEQsICAsECQoJAgAAAAAAAAkFAAUFBQEGAwYGBgUSBgYBARIBAAIJBgABDgABAQ8ACQEEGQkJCQAAAAMECgoBAQIQAAAAAgEDAwAEAQoFAA4ACQAEBQFwAR8fBQcBAYACgIACBgkBfwFB0KDBAgsHvgI8AXUCAAF2AIABAXcAkwIBeADjAQF5APEBAXoA0QEBQQDQAQFCAM8BAUMAzgEBRADMAQFFAMsBAUYAyQEBRwCSAgFIAJECAUkAjwIBSgCKAgFLAOkBAUwA4gEBTQDhAQFOADwBTwD8AQFQAPkBAVEA+AEBUgDwAQFTAPoBAVQA4AEBVQAVAVYAGAFXAMcBAVgAzQEBWQDfAQFaAN4BAV8A3QEBJADkAQJhYQDcAQJiYQDbAQJjYQDaAQJkYQDZAQJlYQDYAQJmYQDXAQJnYQDqAQJoYQCcAQJpYQDWAQJqYQDVAQJrYQDUAQJsYQAvAm1hABsCbmEAygECb2EASAJwYQEAAnFhAGcCcmEA0wECc2EA6AECdGEA0gECdWEA9wECdmEA9gECd2EA9QECeGEA5wECeWEA5gECemEA5QEJQQEAQQELHsgBkAKNAo4CjAKLArcBiQKIAocChgKFAoQCgwKCAoECgAL/Af4B/QH7AVv0AfMB8gHvAe4B7QHsAesBCu+QCYACQAEBfyMAQRBrIgMgADYCDCADIAE2AgggAyACNgIEIAMoAgwEQCADKAIMIAMoAgg2AgAgAygCDCADKAIENgIECwvMDAEHfwJAIABFDQAgAEEIayIDIABBBGsoAgAiAUF4cSIAaiEFAkAgAUEBcQ0AIAFBA3FFDQEgAyADKAIAIgFrIgNB9JsBKAIASQ0BIAAgAWohACADQfibASgCAEcEQCABQf8BTQRAIAMoAggiAiABQQN2IgRBA3RBjJwBakYaIAIgAygCDCIBRgRAQeSbAUHkmwEoAgBBfiAEd3E2AgAMAwsgAiABNgIMIAEgAjYCCAwCCyADKAIYIQYCQCADIAMoAgwiAUcEQCADKAIIIgIgATYCDCABIAI2AggMAQsCQCADQRRqIgIoAgAiBA0AIANBEGoiAigCACIEDQBBACEBDAELA0AgAiEHIAQiAUEUaiICKAIAIgQNACABQRBqIQIgASgCECIEDQALIAdBADYCAAsgBkUNAQJAIAMgAygCHCICQQJ0QZSeAWoiBCgCAEYEQCAEIAE2AgAgAQ0BQeibAUHomwEoAgBBfiACd3E2AgAMAwsgBkEQQRQgBigCECADRhtqIAE2AgAgAUUNAgsgASAGNgIYIAMoAhAiAgRAIAEgAjYCECACIAE2AhgLIAMoAhQiAkUNASABIAI2AhQgAiABNgIYDAELIAUoAgQiAUEDcUEDRw0AQeybASAANgIAIAUgAUF+cTYCBCADIABBAXI2AgQgACADaiAANgIADwsgAyAFTw0AIAUoAgQiAUEBcUUNAAJAIAFBAnFFBEAgBUH8mwEoAgBGBEBB/JsBIAM2AgBB8JsBQfCbASgCACAAaiIANgIAIAMgAEEBcjYCBCADQfibASgCAEcNA0HsmwFBADYCAEH4mwFBADYCAA8LIAVB+JsBKAIARgRAQfibASADNgIAQeybAUHsmwEoAgAgAGoiADYCACADIABBAXI2AgQgACADaiAANgIADwsgAUF4cSAAaiEAAkAgAUH/AU0EQCAFKAIIIgIgAUEDdiIEQQN0QYycAWpGGiACIAUoAgwiAUYEQEHkmwFB5JsBKAIAQX4gBHdxNgIADAILIAIgATYCDCABIAI2AggMAQsgBSgCGCEGAkAgBSAFKAIMIgFHBEAgBSgCCCICQfSbASgCAEkaIAIgATYCDCABIAI2AggMAQsCQCAFQRRqIgIoAgAiBA0AIAVBEGoiAigCACIEDQBBACEBDAELA0AgAiEHIAQiAUEUaiICKAIAIgQNACABQRBqIQIgASgCECIEDQALIAdBADYCAAsgBkUNAAJAIAUgBSgCHCICQQJ0QZSeAWoiBCgCAEYEQCAEIAE2AgAgAQ0BQeibAUHomwEoAgBBfiACd3E2AgAMAgsgBkEQQRQgBigCECAFRhtqIAE2AgAgAUUNAQsgASAGNgIYIAUoAhAiAgRAIAEgAjYCECACIAE2AhgLIAUoAhQiAkUNACABIAI2AhQgAiABNgIYCyADIABBAXI2AgQgACADaiAANgIAIANB+JsBKAIARw0BQeybASAANgIADwsgBSABQX5xNgIEIAMgAEEBcjYCBCAAIANqIAA2AgALIABB/wFNBEAgAEEDdiIBQQN0QYycAWohAAJ/QeSbASgCACICQQEgAXQiAXFFBEBB5JsBIAEgAnI2AgAgAAwBCyAAKAIICyECIAAgAzYCCCACIAM2AgwgAyAANgIMIAMgAjYCCA8LQR8hAiADQgA3AhAgAEH///8HTQRAIABBCHYiASABQYD+P2pBEHZBCHEiAXQiAiACQYDgH2pBEHZBBHEiAnQiBCAEQYCAD2pBEHZBAnEiBHRBD3YgASACciAEcmsiAUEBdCAAIAFBFWp2QQFxckEcaiECCyADIAI2AhwgAkECdEGUngFqIQECQAJAAkBB6JsBKAIAIgRBASACdCIHcUUEQEHomwEgBCAHcjYCACABIAM2AgAgAyABNgIYDAELIABBAEEZIAJBAXZrIAJBH0YbdCECIAEoAgAhAQNAIAEiBCgCBEF4cSAARg0CIAJBHXYhASACQQF0IQIgBCABQQRxaiIHQRBqKAIAIgENAAsgByADNgIQIAMgBDYCGAsgAyADNgIMIAMgAzYCCAwBCyAEKAIIIgAgAzYCDCAEIAM2AgggA0EANgIYIAMgBDYCDCADIAA2AggLQYScAUGEnAEoAgBBAWsiAEF/IAAbNgIACwtCAQF/IwBBEGsiASQAIAEgADYCDCABKAIMBEAgASgCDC0AAUEBcQRAIAEoAgwoAgQQFQsgASgCDBAVCyABQRBqJAALQwEBfyMAQRBrIgIkACACIAA2AgwgAiABNgIIIAIoAgwCfyMAQRBrIgAgAigCCDYCDCAAKAIMQQxqCxBFIAJBEGokAAuiLgEMfyMAQRBrIgwkAAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAAQfQBTQRAQeSbASgCACIFQRAgAEELakF4cSAAQQtJGyIIQQN2IgJ2IgFBA3EEQCABQX9zQQFxIAJqIgNBA3QiAUGUnAFqKAIAIgRBCGohAAJAIAQoAggiAiABQYycAWoiAUYEQEHkmwEgBUF+IAN3cTYCAAwBCyACIAE2AgwgASACNgIICyAEIANBA3QiAUEDcjYCBCABIARqIgEgASgCBEEBcjYCBAwNCyAIQeybASgCACIKTQ0BIAEEQAJAQQIgAnQiAEEAIABrciABIAJ0cSIAQQAgAGtxQQFrIgAgAEEMdkEQcSICdiIBQQV2QQhxIgAgAnIgASAAdiIBQQJ2QQRxIgByIAEgAHYiAUEBdkECcSIAciABIAB2IgFBAXZBAXEiAHIgASAAdmoiA0EDdCIAQZScAWooAgAiBCgCCCIBIABBjJwBaiIARgRAQeSbASAFQX4gA3dxIgU2AgAMAQsgASAANgIMIAAgATYCCAsgBEEIaiEAIAQgCEEDcjYCBCAEIAhqIgIgA0EDdCIBIAhrIgNBAXI2AgQgASAEaiADNgIAIAoEQCAKQQN2IgFBA3RBjJwBaiEHQfibASgCACEEAn8gBUEBIAF0IgFxRQRAQeSbASABIAVyNgIAIAcMAQsgBygCCAshASAHIAQ2AgggASAENgIMIAQgBzYCDCAEIAE2AggLQfibASACNgIAQeybASADNgIADA0LQeibASgCACIGRQ0BIAZBACAGa3FBAWsiACAAQQx2QRBxIgJ2IgFBBXZBCHEiACACciABIAB2IgFBAnZBBHEiAHIgASAAdiIBQQF2QQJxIgByIAEgAHYiAUEBdkEBcSIAciABIAB2akECdEGUngFqKAIAIgEoAgRBeHEgCGshAyABIQIDQAJAIAIoAhAiAEUEQCACKAIUIgBFDQELIAAoAgRBeHEgCGsiAiADIAIgA0kiAhshAyAAIAEgAhshASAAIQIMAQsLIAEgCGoiCSABTQ0CIAEoAhghCyABIAEoAgwiBEcEQCABKAIIIgBB9JsBKAIASRogACAENgIMIAQgADYCCAwMCyABQRRqIgIoAgAiAEUEQCABKAIQIgBFDQQgAUEQaiECCwNAIAIhByAAIgRBFGoiAigCACIADQAgBEEQaiECIAQoAhAiAA0ACyAHQQA2AgAMCwtBfyEIIABBv39LDQAgAEELaiIAQXhxIQhB6JsBKAIAIglFDQBBACAIayEDAkACQAJAAn9BACAIQYACSQ0AGkEfIAhB////B0sNABogAEEIdiIAIABBgP4/akEQdkEIcSICdCIAIABBgOAfakEQdkEEcSIBdCIAIABBgIAPakEQdkECcSIAdEEPdiABIAJyIAByayIAQQF0IAggAEEVanZBAXFyQRxqCyIFQQJ0QZSeAWooAgAiAkUEQEEAIQAMAQtBACEAIAhBAEEZIAVBAXZrIAVBH0YbdCEBA0ACQCACKAIEQXhxIAhrIgcgA08NACACIQQgByIDDQBBACEDIAIhAAwDCyAAIAIoAhQiByAHIAIgAUEddkEEcWooAhAiAkYbIAAgBxshACABQQF0IQEgAg0ACwsgACAEckUEQEECIAV0IgBBACAAa3IgCXEiAEUNAyAAQQAgAGtxQQFrIgAgAEEMdkEQcSICdiIBQQV2QQhxIgAgAnIgASAAdiIBQQJ2QQRxIgByIAEgAHYiAUEBdkECcSIAciABIAB2IgFBAXZBAXEiAHIgASAAdmpBAnRBlJ4BaigCACEACyAARQ0BCwNAIAAoAgRBeHEgCGsiASADSSECIAEgAyACGyEDIAAgBCACGyEEIAAoAhAiAQR/IAEFIAAoAhQLIgANAAsLIARFDQAgA0HsmwEoAgAgCGtPDQAgBCAIaiIGIARNDQEgBCgCGCEFIAQgBCgCDCIBRwRAIAQoAggiAEH0mwEoAgBJGiAAIAE2AgwgASAANgIIDAoLIARBFGoiAigCACIARQRAIAQoAhAiAEUNBCAEQRBqIQILA0AgAiEHIAAiAUEUaiICKAIAIgANACABQRBqIQIgASgCECIADQALIAdBADYCAAwJCyAIQeybASgCACICTQRAQfibASgCACEDAkAgAiAIayIBQRBPBEBB7JsBIAE2AgBB+JsBIAMgCGoiADYCACAAIAFBAXI2AgQgAiADaiABNgIAIAMgCEEDcjYCBAwBC0H4mwFBADYCAEHsmwFBADYCACADIAJBA3I2AgQgAiADaiIAIAAoAgRBAXI2AgQLIANBCGohAAwLCyAIQfCbASgCACIGSQRAQfCbASAGIAhrIgE2AgBB/JsBQfybASgCACICIAhqIgA2AgAgACABQQFyNgIEIAIgCEEDcjYCBCACQQhqIQAMCwtBACEAIAhBL2oiCQJ/QbyfASgCAARAQcSfASgCAAwBC0HInwFCfzcCAEHAnwFCgKCAgICABDcCAEG8nwEgDEEMakFwcUHYqtWqBXM2AgBB0J8BQQA2AgBBoJ8BQQA2AgBBgCALIgFqIgVBACABayIHcSICIAhNDQpBnJ8BKAIAIgQEQEGUnwEoAgAiAyACaiIBIANNDQsgASAESw0LC0GgnwEtAABBBHENBQJAAkBB/JsBKAIAIgMEQEGknwEhAANAIAMgACgCACIBTwRAIAEgACgCBGogA0sNAwsgACgCCCIADQALC0EAED4iAUF/Rg0GIAIhBUHAnwEoAgAiA0EBayIAIAFxBEAgAiABayAAIAFqQQAgA2txaiEFCyAFIAhNDQYgBUH+////B0sNBkGcnwEoAgAiBARAQZSfASgCACIDIAVqIgAgA00NByAAIARLDQcLIAUQPiIAIAFHDQEMCAsgBSAGayAHcSIFQf7///8HSw0FIAUQPiIBIAAoAgAgACgCBGpGDQQgASEACwJAIABBf0YNACAIQTBqIAVNDQBBxJ8BKAIAIgEgCSAFa2pBACABa3EiAUH+////B0sEQCAAIQEMCAsgARA+QX9HBEAgASAFaiEFIAAhAQwIC0EAIAVrED4aDAULIAAiAUF/Rw0GDAQLAAtBACEEDAcLQQAhAQwFCyABQX9HDQILQaCfAUGgnwEoAgBBBHI2AgALIAJB/v///wdLDQEgAhA+IQFBABA+IQAgAUF/Rg0BIABBf0YNASAAIAFNDQEgACABayIFIAhBKGpNDQELQZSfAUGUnwEoAgAgBWoiADYCAEGYnwEoAgAgAEkEQEGYnwEgADYCAAsCQAJAAkBB/JsBKAIAIgcEQEGknwEhAANAIAEgACgCACIDIAAoAgQiAmpGDQIgACgCCCIADQALDAILQfSbASgCACIAQQAgACABTRtFBEBB9JsBIAE2AgALQQAhAEGonwEgBTYCAEGknwEgATYCAEGEnAFBfzYCAEGInAFBvJ8BKAIANgIAQbCfAUEANgIAA0AgAEEDdCIDQZScAWogA0GMnAFqIgI2AgAgA0GYnAFqIAI2AgAgAEEBaiIAQSBHDQALQfCbASAFQShrIgNBeCABa0EHcUEAIAFBCGpBB3EbIgBrIgI2AgBB/JsBIAAgAWoiADYCACAAIAJBAXI2AgQgASADakEoNgIEQYCcAUHMnwEoAgA2AgAMAgsgAC0ADEEIcQ0AIAMgB0sNACABIAdNDQAgACACIAVqNgIEQfybASAHQXggB2tBB3FBACAHQQhqQQdxGyIAaiICNgIAQfCbAUHwmwEoAgAgBWoiASAAayIANgIAIAIgAEEBcjYCBCABIAdqQSg2AgRBgJwBQcyfASgCADYCAAwBC0H0mwEoAgAgAUsEQEH0mwEgATYCAAsgASAFaiECQaSfASEAAkACQAJAAkACQAJAA0AgAiAAKAIARwRAIAAoAggiAA0BDAILCyAALQAMQQhxRQ0BC0GknwEhAANAIAcgACgCACICTwRAIAIgACgCBGoiBCAHSw0DCyAAKAIIIQAMAAsACyAAIAE2AgAgACAAKAIEIAVqNgIEIAFBeCABa0EHcUEAIAFBCGpBB3EbaiIJIAhBA3I2AgQgAkF4IAJrQQdxQQAgAkEIakEHcRtqIgUgCCAJaiIGayECIAUgB0YEQEH8mwEgBjYCAEHwmwFB8JsBKAIAIAJqIgA2AgAgBiAAQQFyNgIEDAMLIAVB+JsBKAIARgRAQfibASAGNgIAQeybAUHsmwEoAgAgAmoiADYCACAGIABBAXI2AgQgACAGaiAANgIADAMLIAUoAgQiAEEDcUEBRgRAIABBeHEhBwJAIABB/wFNBEAgBSgCCCIDIABBA3YiAEEDdEGMnAFqRhogAyAFKAIMIgFGBEBB5JsBQeSbASgCAEF+IAB3cTYCAAwCCyADIAE2AgwgASADNgIIDAELIAUoAhghCAJAIAUgBSgCDCIBRwRAIAUoAggiACABNgIMIAEgADYCCAwBCwJAIAVBFGoiACgCACIDDQAgBUEQaiIAKAIAIgMNAEEAIQEMAQsDQCAAIQQgAyIBQRRqIgAoAgAiAw0AIAFBEGohACABKAIQIgMNAAsgBEEANgIACyAIRQ0AAkAgBSAFKAIcIgNBAnRBlJ4BaiIAKAIARgRAIAAgATYCACABDQFB6JsBQeibASgCAEF+IAN3cTYCAAwCCyAIQRBBFCAIKAIQIAVGG2ogATYCACABRQ0BCyABIAg2AhggBSgCECIABEAgASAANgIQIAAgATYCGAsgBSgCFCIARQ0AIAEgADYCFCAAIAE2AhgLIAUgB2ohBSACIAdqIQILIAUgBSgCBEF+cTYCBCAGIAJBAXI2AgQgAiAGaiACNgIAIAJB/wFNBEAgAkEDdiIAQQN0QYycAWohAgJ/QeSbASgCACIBQQEgAHQiAHFFBEBB5JsBIAAgAXI2AgAgAgwBCyACKAIICyEAIAIgBjYCCCAAIAY2AgwgBiACNgIMIAYgADYCCAwDC0EfIQAgAkH///8HTQRAIAJBCHYiACAAQYD+P2pBEHZBCHEiA3QiACAAQYDgH2pBEHZBBHEiAXQiACAAQYCAD2pBEHZBAnEiAHRBD3YgASADciAAcmsiAEEBdCACIABBFWp2QQFxckEcaiEACyAGIAA2AhwgBkIANwIQIABBAnRBlJ4BaiEEAkBB6JsBKAIAIgNBASAAdCIBcUUEQEHomwEgASADcjYCACAEIAY2AgAgBiAENgIYDAELIAJBAEEZIABBAXZrIABBH0YbdCEAIAQoAgAhAQNAIAEiAygCBEF4cSACRg0DIABBHXYhASAAQQF0IQAgAyABQQRxaiIEKAIQIgENAAsgBCAGNgIQIAYgAzYCGAsgBiAGNgIMIAYgBjYCCAwCC0HwmwEgBUEoayIDQXggAWtBB3FBACABQQhqQQdxGyIAayICNgIAQfybASAAIAFqIgA2AgAgACACQQFyNgIEIAEgA2pBKDYCBEGAnAFBzJ8BKAIANgIAIAcgBEEnIARrQQdxQQAgBEEna0EHcRtqQS9rIgAgACAHQRBqSRsiAkEbNgIEIAJBrJ8BKQIANwIQIAJBpJ8BKQIANwIIQayfASACQQhqNgIAQaifASAFNgIAQaSfASABNgIAQbCfAUEANgIAIAJBGGohAANAIABBBzYCBCAAQQhqIQEgAEEEaiEAIAEgBEkNAAsgAiAHRg0DIAIgAigCBEF+cTYCBCAHIAIgB2siBEEBcjYCBCACIAQ2AgAgBEH/AU0EQCAEQQN2IgBBA3RBjJwBaiECAn9B5JsBKAIAIgFBASAAdCIAcUUEQEHkmwEgACABcjYCACACDAELIAIoAggLIQAgAiAHNgIIIAAgBzYCDCAHIAI2AgwgByAANgIIDAQLQR8hACAHQgA3AhAgBEH///8HTQRAIARBCHYiACAAQYD+P2pBEHZBCHEiAnQiACAAQYDgH2pBEHZBBHEiAXQiACAAQYCAD2pBEHZBAnEiAHRBD3YgASACciAAcmsiAEEBdCAEIABBFWp2QQFxckEcaiEACyAHIAA2AhwgAEECdEGUngFqIQMCQEHomwEoAgAiAkEBIAB0IgFxRQRAQeibASABIAJyNgIAIAMgBzYCACAHIAM2AhgMAQsgBEEAQRkgAEEBdmsgAEEfRht0IQAgAygCACEBA0AgASICKAIEQXhxIARGDQQgAEEddiEBIABBAXQhACACIAFBBHFqIgMoAhAiAQ0ACyADIAc2AhAgByACNgIYCyAHIAc2AgwgByAHNgIIDAMLIAMoAggiACAGNgIMIAMgBjYCCCAGQQA2AhggBiADNgIMIAYgADYCCAsgCUEIaiEADAULIAIoAggiACAHNgIMIAIgBzYCCCAHQQA2AhggByACNgIMIAcgADYCCAtB8JsBKAIAIgAgCE0NAEHwmwEgACAIayIBNgIAQfybAUH8mwEoAgAiAiAIaiIANgIAIAAgAUEBcjYCBCACIAhBA3I2AgQgAkEIaiEADAMLQbSbAUEwNgIAQQAhAAwCCwJAIAVFDQACQCAEKAIcIgJBAnRBlJ4BaiIAKAIAIARGBEAgACABNgIAIAENAUHomwEgCUF+IAJ3cSIJNgIADAILIAVBEEEUIAUoAhAgBEYbaiABNgIAIAFFDQELIAEgBTYCGCAEKAIQIgAEQCABIAA2AhAgACABNgIYCyAEKAIUIgBFDQAgASAANgIUIAAgATYCGAsCQCADQQ9NBEAgBCADIAhqIgBBA3I2AgQgACAEaiIAIAAoAgRBAXI2AgQMAQsgBCAIQQNyNgIEIAYgA0EBcjYCBCADIAZqIAM2AgAgA0H/AU0EQCADQQN2IgBBA3RBjJwBaiECAn9B5JsBKAIAIgFBASAAdCIAcUUEQEHkmwEgACABcjYCACACDAELIAIoAggLIQAgAiAGNgIIIAAgBjYCDCAGIAI2AgwgBiAANgIIDAELQR8hACADQf///wdNBEAgA0EIdiIAIABBgP4/akEQdkEIcSICdCIAIABBgOAfakEQdkEEcSIBdCIAIABBgIAPakEQdkECcSIAdEEPdiABIAJyIAByayIAQQF0IAMgAEEVanZBAXFyQRxqIQALIAYgADYCHCAGQgA3AhAgAEECdEGUngFqIQICQAJAIAlBASAAdCIBcUUEQEHomwEgASAJcjYCACACIAY2AgAgBiACNgIYDAELIANBAEEZIABBAXZrIABBH0YbdCEAIAIoAgAhCANAIAgiASgCBEF4cSADRg0CIABBHXYhAiAAQQF0IQAgASACQQRxaiICKAIQIggNAAsgAiAGNgIQIAYgATYCGAsgBiAGNgIMIAYgBjYCCAwBCyABKAIIIgAgBjYCDCABIAY2AgggBkEANgIYIAYgATYCDCAGIAA2AggLIARBCGohAAwBCwJAIAtFDQACQCABKAIcIgJBAnRBlJ4BaiIAKAIAIAFGBEAgACAENgIAIAQNAUHomwEgBkF+IAJ3cTYCAAwCCyALQRBBFCALKAIQIAFGG2ogBDYCACAERQ0BCyAEIAs2AhggASgCECIABEAgBCAANgIQIAAgBDYCGAsgASgCFCIARQ0AIAQgADYCFCAAIAQ2AhgLAkAgA0EPTQRAIAEgAyAIaiIAQQNyNgIEIAAgAWoiACAAKAIEQQFyNgIEDAELIAEgCEEDcjYCBCAJIANBAXI2AgQgAyAJaiADNgIAIAoEQCAKQQN2IgBBA3RBjJwBaiEEQfibASgCACECAn9BASAAdCIAIAVxRQRAQeSbASAAIAVyNgIAIAQMAQsgBCgCCAshACAEIAI2AgggACACNgIMIAIgBDYCDCACIAA2AggLQfibASAJNgIAQeybASADNgIACyABQQhqIQALIAxBEGokACAAC4MEAQN/IAJBgARPBEAgACABIAIQCxogAA8LIAAgAmohAwJAIAAgAXNBA3FFBEACQCAAQQNxRQRAIAAhAgwBCyACQQFIBEAgACECDAELIAAhAgNAIAIgAS0AADoAACABQQFqIQEgAkEBaiICQQNxRQ0BIAIgA0kNAAsLAkAgA0F8cSIEQcAASQ0AIAIgBEFAaiIFSw0AA0AgAiABKAIANgIAIAIgASgCBDYCBCACIAEoAgg2AgggAiABKAIMNgIMIAIgASgCEDYCECACIAEoAhQ2AhQgAiABKAIYNgIYIAIgASgCHDYCHCACIAEoAiA2AiAgAiABKAIkNgIkIAIgASgCKDYCKCACIAEoAiw2AiwgAiABKAIwNgIwIAIgASgCNDYCNCACIAEoAjg2AjggAiABKAI8NgI8IAFBQGshASACQUBrIgIgBU0NAAsLIAIgBE8NAQNAIAIgASgCADYCACABQQRqIQEgAkEEaiICIARJDQALDAELIANBBEkEQCAAIQIMAQsgACADQQRrIgRLBEAgACECDAELIAAhAgNAIAIgAS0AADoAACACIAEtAAE6AAEgAiABLQACOgACIAIgAS0AAzoAAyABQQRqIQEgAkEEaiICIARNDQALCyACIANJBEADQCACIAEtAAA6AAAgAUEBaiEBIAJBAWoiAiADRw0ACwsgAAvBGAECfyMAQRBrIgQkACAEIAA2AgwgBCABNgIIIAQgAjYCBCAEKAIMIQAgBCgCCCECIAQoAgQhAyMAQSBrIgEkACABIAA2AhggASACNgIUIAEgAzYCEAJAIAEoAhRFBEAgAUEANgIcDAELIAFBATYCDCABLQAMBEAgASgCFCECIAEoAhAhAyMAQSBrIgAgASgCGDYCHCAAIAI2AhggACADNgIUIAAgACgCHDYCECAAIAAoAhBBf3M2AhADQCAAKAIUBH8gACgCGEEDcUEARwVBAAtBAXEEQCAAKAIQIQIgACAAKAIYIgNBAWo2AhggACADLQAAIAJzQf8BcUECdEGgGWooAgAgACgCEEEIdnM2AhAgACAAKAIUQQFrNgIUDAELCyAAIAAoAhg2AgwDQCAAKAIUQSBPBEAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGgGWooAgAgACgCEEEQdkH/AXFBAnRBoCFqKAIAIAAoAhBB/wFxQQJ0QaAxaigCACAAKAIQQQh2Qf8BcUECdEGgKWooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGgGWooAgAgACgCEEEQdkH/AXFBAnRBoCFqKAIAIAAoAhBB/wFxQQJ0QaAxaigCACAAKAIQQQh2Qf8BcUECdEGgKWooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGgGWooAgAgACgCEEEQdkH/AXFBAnRBoCFqKAIAIAAoAhBB/wFxQQJ0QaAxaigCACAAKAIQQQh2Qf8BcUECdEGgKWooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGgGWooAgAgACgCEEEQdkH/AXFBAnRBoCFqKAIAIAAoAhBB/wFxQQJ0QaAxaigCACAAKAIQQQh2Qf8BcUECdEGgKWooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGgGWooAgAgACgCEEEQdkH/AXFBAnRBoCFqKAIAIAAoAhBB/wFxQQJ0QaAxaigCACAAKAIQQQh2Qf8BcUECdEGgKWooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGgGWooAgAgACgCEEEQdkH/AXFBAnRBoCFqKAIAIAAoAhBB/wFxQQJ0QaAxaigCACAAKAIQQQh2Qf8BcUECdEGgKWooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGgGWooAgAgACgCEEEQdkH/AXFBAnRBoCFqKAIAIAAoAhBB/wFxQQJ0QaAxaigCACAAKAIQQQh2Qf8BcUECdEGgKWooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGgGWooAgAgACgCEEEQdkH/AXFBAnRBoCFqKAIAIAAoAhBB/wFxQQJ0QaAxaigCACAAKAIQQQh2Qf8BcUECdEGgKWooAgBzc3M2AhAgACAAKAIUQSBrNgIUDAELCwNAIAAoAhRBBE8EQCAAIAAoAgwiAkEEajYCDCAAIAIoAgAgACgCEHM2AhAgACAAKAIQQRh2QQJ0QaAZaigCACAAKAIQQRB2Qf8BcUECdEGgIWooAgAgACgCEEH/AXFBAnRBoDFqKAIAIAAoAhBBCHZB/wFxQQJ0QaApaigCAHNzczYCECAAIAAoAhRBBGs2AhQMAQsLIAAgACgCDDYCGCAAKAIUBEADQCAAKAIQIQIgACAAKAIYIgNBAWo2AhggACADLQAAIAJzQf8BcUECdEGgGWooAgAgACgCEEEIdnM2AhAgACAAKAIUQQFrIgI2AhQgAg0ACwsgACAAKAIQQX9zNgIQIAEgACgCEDYCHAwBCyABKAIUIQIgASgCECEDIwBBIGsiACABKAIYNgIcIAAgAjYCGCAAIAM2AhQgACAAKAIcQQh2QYD+A3EgACgCHEEYdmogACgCHEGA/gNxQQh0aiAAKAIcQf8BcUEYdGo2AhAgACAAKAIQQX9zNgIQA0AgACgCFAR/IAAoAhhBA3FBAEcFQQALQQFxBEAgACgCEEEYdiECIAAgACgCGCIDQQFqNgIYIAAgAy0AACACc0ECdEGgOWooAgAgACgCEEEIdHM2AhAgACAAKAIUQQFrNgIUDAELCyAAIAAoAhg2AgwDQCAAKAIUQSBPBEAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGg0QBqKAIAIAAoAhBBEHZB/wFxQQJ0QaDJAGooAgAgACgCEEH/AXFBAnRBoDlqKAIAIAAoAhBBCHZB/wFxQQJ0QaDBAGooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGg0QBqKAIAIAAoAhBBEHZB/wFxQQJ0QaDJAGooAgAgACgCEEH/AXFBAnRBoDlqKAIAIAAoAhBBCHZB/wFxQQJ0QaDBAGooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGg0QBqKAIAIAAoAhBBEHZB/wFxQQJ0QaDJAGooAgAgACgCEEH/AXFBAnRBoDlqKAIAIAAoAhBBCHZB/wFxQQJ0QaDBAGooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGg0QBqKAIAIAAoAhBBEHZB/wFxQQJ0QaDJAGooAgAgACgCEEH/AXFBAnRBoDlqKAIAIAAoAhBBCHZB/wFxQQJ0QaDBAGooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGg0QBqKAIAIAAoAhBBEHZB/wFxQQJ0QaDJAGooAgAgACgCEEH/AXFBAnRBoDlqKAIAIAAoAhBBCHZB/wFxQQJ0QaDBAGooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGg0QBqKAIAIAAoAhBBEHZB/wFxQQJ0QaDJAGooAgAgACgCEEH/AXFBAnRBoDlqKAIAIAAoAhBBCHZB/wFxQQJ0QaDBAGooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGg0QBqKAIAIAAoAhBBEHZB/wFxQQJ0QaDJAGooAgAgACgCEEH/AXFBAnRBoDlqKAIAIAAoAhBBCHZB/wFxQQJ0QaDBAGooAgBzc3M2AhAgACAAKAIMIgJBBGo2AgwgACACKAIAIAAoAhBzNgIQIAAgACgCEEEYdkECdEGg0QBqKAIAIAAoAhBBEHZB/wFxQQJ0QaDJAGooAgAgACgCEEH/AXFBAnRBoDlqKAIAIAAoAhBBCHZB/wFxQQJ0QaDBAGooAgBzc3M2AhAgACAAKAIUQSBrNgIUDAELCwNAIAAoAhRBBE8EQCAAIAAoAgwiAkEEajYCDCAAIAIoAgAgACgCEHM2AhAgACAAKAIQQRh2QQJ0QaDRAGooAgAgACgCEEEQdkH/AXFBAnRBoMkAaigCACAAKAIQQf8BcUECdEGgOWooAgAgACgCEEEIdkH/AXFBAnRBoMEAaigCAHNzczYCECAAIAAoAhRBBGs2AhQMAQsLIAAgACgCDDYCGCAAKAIUBEADQCAAKAIQQRh2IQIgACAAKAIYIgNBAWo2AhggACADLQAAIAJzQQJ0QaA5aigCACAAKAIQQQh0czYCECAAIAAoAhRBAWsiAjYCFCACDQALCyAAIAAoAhBBf3M2AhAgASAAKAIQQQh2QYD+A3EgACgCEEEYdmogACgCEEGA/gNxQQh0aiAAKAIQQf8BcUEYdGo2AhwLIAEoAhwhACABQSBqJAAgBEEQaiQAIAAL7AIBAn8jAEEQayIBJAAgASAANgIMAkAgASgCDEUNACABKAIMKAIwBEAgASgCDCIAIAAoAjBBAWs2AjALIAEoAgwoAjANACABKAIMKAIgBEAgASgCDEEBNgIgIAEoAgwQLxoLIAEoAgwoAiRBAUYEQCABKAIMEGILAkAgASgCDCgCLEUNACABKAIMLQAoQQFxDQAgASgCDCECIwBBEGsiACABKAIMKAIsNgIMIAAgAjYCCCAAQQA2AgQDQCAAKAIEIAAoAgwoAkRJBEAgACgCDCgCTCAAKAIEQQJ0aigCACAAKAIIRgRAIAAoAgwoAkwgACgCBEECdGogACgCDCgCTCAAKAIMKAJEQQFrQQJ0aigCADYCACAAKAIMIgAgACgCREEBazYCRAUgACAAKAIEQQFqNgIEDAILCwsLIAEoAgxBAEIAQQUQIBogASgCDCgCAARAIAEoAgwoAgAQGwsgASgCDBAVCyABQRBqJAALnwIBAn8jAEEQayIBJAAgASAANgIMIAEgASgCDCgCHDYCBCABKAIEIQIjAEEQayIAJAAgACACNgIMIAAoAgwQvAEgAEEQaiQAIAEgASgCBCgCFDYCCCABKAIIIAEoAgwoAhBLBEAgASABKAIMKAIQNgIICwJAIAEoAghFDQAgASgCDCgCDCABKAIEKAIQIAEoAggQGRogASgCDCIAIAEoAgggACgCDGo2AgwgASgCBCIAIAEoAgggACgCEGo2AhAgASgCDCIAIAEoAgggACgCFGo2AhQgASgCDCIAIAAoAhAgASgCCGs2AhAgASgCBCIAIAAoAhQgASgCCGs2AhQgASgCBCgCFA0AIAEoAgQgASgCBCgCCDYCEAsgAUEQaiQAC2ABAX8jAEEQayIBJAAgASAANgIIIAEgASgCCEICEB42AgQCQCABKAIERQRAIAFBADsBDgwBCyABIAEoAgQtAAAgASgCBC0AAUEIdGo7AQ4LIAEvAQ4hACABQRBqJAAgAAvpAQEBfyMAQSBrIgIkACACIAA2AhwgAiABNwMQIAIpAxAhASMAQSBrIgAgAigCHDYCGCAAIAE3AxACQAJAAkAgACgCGC0AAEEBcUUNACAAKQMQIAAoAhgpAxAgACkDEHxWDQAgACgCGCkDCCAAKAIYKQMQIAApAxB8Wg0BCyAAKAIYQQA6AAAgAEEANgIcDAELIAAgACgCGCgCBCAAKAIYKQMQp2o2AgwgACAAKAIMNgIcCyACIAAoAhw2AgwgAigCDARAIAIoAhwiACACKQMQIAApAxB8NwMQCyACKAIMIQAgAkEgaiQAIAALbwEBfyMAQRBrIgIkACACIAA2AgggAiABOwEGIAIgAigCCEICEB42AgACQCACKAIARQRAIAJBfzYCDAwBCyACKAIAIAIvAQY6AAAgAigCACACLwEGQQh2OgABIAJBADYCDAsgAigCDBogAkEQaiQAC7YCAQF/IwBBMGsiBCQAIAQgADYCJCAEIAE2AiAgBCACNwMYIAQgAzYCFAJAIAQoAiQpAxhCASAEKAIUrYaDUARAIAQoAiRBDGpBHEEAEBQgBEJ/NwMoDAELAkAgBCgCJCgCAEUEQCAEIAQoAiQoAgggBCgCICAEKQMYIAQoAhQgBCgCJCgCBBEOADcDCAwBCyAEIAQoAiQoAgAgBCgCJCgCCCAEKAIgIAQpAxggBCgCFCAEKAIkKAIEEQoANwMICyAEKQMIQgBTBEACQCAEKAIUQQRGDQAgBCgCFEEORg0AAkAgBCgCJCAEQghBBBAgQgBTBEAgBCgCJEEMakEUQQAQFAwBCyAEKAIkQQxqIAQoAgAgBCgCBBAUCwsLIAQgBCkDCDcDKAsgBCkDKCECIARBMGokACACC48BAQF/IwBBEGsiAiQAIAIgADYCCCACIAE2AgQgAiACKAIIQgQQHjYCAAJAIAIoAgBFBEAgAkF/NgIMDAELIAIoAgAgAigCBDoAACACKAIAIAIoAgRBCHY6AAEgAigCACACKAIEQRB2OgACIAIoAgAgAigCBEEYdjoAAyACQQA2AgwLIAIoAgwaIAJBEGokAAsXACAALQAAQSBxRQRAIAEgAiAAEHEaCwtQAQF/IwBBEGsiASQAIAEgADYCDANAIAEoAgwEQCABIAEoAgwoAgA2AgggASgCDCgCDBAVIAEoAgwQFSABIAEoAgg2AgwMAQsLIAFBEGokAAs+AQF/IwBBEGsiASQAIAEgADYCDCABKAIMBEAgASgCDCgCABAVIAEoAgwoAgwQFSABKAIMEBULIAFBEGokAAt9AQF/IwBBEGsiASQAIAEgADYCDCABKAIMBEAgAUIANwMAA0AgASkDACABKAIMKQMIWkUEQCABKAIMKAIAIAEpAwCnQQR0ahB3IAEgASkDAEIBfDcDAAwBCwsgASgCDCgCABAVIAEoAgwoAigQJCABKAIMEBULIAFBEGokAAtuAQF/IwBBgAJrIgUkAAJAIARBgMAEcQ0AIAIgA0wNACAFIAFB/wFxIAIgA2siAkGAAiACQYACSSIBGxAzIAFFBEADQCAAIAVBgAIQIiACQYACayICQf8BSw0ACwsgACAFIAIQIgsgBUGAAmokAAvRAQEBfyMAQTBrIgMkACADIAA2AiggAyABNwMgIAMgAjYCHAJAIAMoAigtAChBAXEEQCADQX82AiwMAQsCQCADKAIoKAIgBEAgAygCHEUNASADKAIcQQFGDQEgAygCHEECRg0BCyADKAIoQQxqQRJBABAUIANBfzYCLAwBCyADIAMpAyA3AwggAyADKAIcNgIQIAMoAiggA0EIakIQQQYQIEIAUwRAIANBfzYCLAwBCyADKAIoQQA6ADQgA0EANgIsCyADKAIsIQAgA0EwaiQAIAALmBcBAn8jAEEwayIEJAAgBCAANgIsIAQgATYCKCAEIAI2AiQgBCADNgIgIARBADYCFAJAIAQoAiwoAoQBQQBKBEAgBCgCLCgCACgCLEECRgRAIwBBEGsiACAEKAIsNgIIIABB/4D/n382AgQgAEEANgIAAkADQCAAKAIAQR9MBEACQCAAKAIEQQFxRQ0AIAAoAghBlAFqIAAoAgBBAnRqLwEARQ0AIABBADYCDAwDCyAAIAAoAgBBAWo2AgAgACAAKAIEQQF2NgIEDAELCwJAAkAgACgCCC8BuAENACAAKAIILwG8AQ0AIAAoAggvAcgBRQ0BCyAAQQE2AgwMAQsgAEEgNgIAA0AgACgCAEGAAkgEQCAAKAIIQZQBaiAAKAIAQQJ0ai8BAARAIABBATYCDAwDBSAAIAAoAgBBAWo2AgAMAgsACwsgAEEANgIMCyAAKAIMIQAgBCgCLCgCACAANgIsCyAEKAIsIAQoAixBmBZqEHogBCgCLCAEKAIsQaQWahB6IAQoAiwhASMAQRBrIgAkACAAIAE2AgwgACgCDCAAKAIMQZQBaiAAKAIMKAKcFhC6ASAAKAIMIAAoAgxBiBNqIAAoAgwoAqgWELoBIAAoAgwgACgCDEGwFmoQeiAAQRI2AggDQAJAIAAoAghBA0gNACAAKAIMQfwUaiAAKAIILQDgbEECdGovAQINACAAIAAoAghBAWs2AggMAQsLIAAoAgwiASABKAKoLSAAKAIIQQNsQRFqajYCqC0gACgCCCEBIABBEGokACAEIAE2AhQgBCAEKAIsKAKoLUEKakEDdjYCHCAEIAQoAiwoAqwtQQpqQQN2NgIYIAQoAhggBCgCHE0EQCAEIAQoAhg2AhwLDAELIAQgBCgCJEEFaiIANgIYIAQgADYCHAsCQAJAIAQoAhwgBCgCJEEEakkNACAEKAIoRQ0AIAQoAiwgBCgCKCAEKAIkIAQoAiAQXQwBCwJAAkAgBCgCLCgCiAFBBEcEQCAEKAIYIAQoAhxHDQELIARBAzYCEAJAIAQoAiwoArwtQRAgBCgCEGtKBEAgBCAEKAIgQQJqNgIMIAQoAiwiACAALwG4LSAEKAIMQf//A3EgBCgCLCgCvC10cjsBuC0gBCgCLC8BuC1B/wFxIQEgBCgCLCgCCCECIAQoAiwiAygCFCEAIAMgAEEBajYCFCAAIAJqIAE6AAAgBCgCLC8BuC1BCHYhASAEKAIsKAIIIQIgBCgCLCIDKAIUIQAgAyAAQQFqNgIUIAAgAmogAToAACAEKAIsIAQoAgxB//8DcUEQIAQoAiwoArwta3U7AbgtIAQoAiwiACAAKAK8LSAEKAIQQRBrajYCvC0MAQsgBCgCLCIAIAAvAbgtIAQoAiBBAmpB//8DcSAEKAIsKAK8LXRyOwG4LSAEKAIsIgAgBCgCECAAKAK8LWo2ArwtCyAEKAIsQZDgAEGQ6QAQuwEMAQsgBEEDNgIIAkAgBCgCLCgCvC1BECAEKAIIa0oEQCAEIAQoAiBBBGo2AgQgBCgCLCIAIAAvAbgtIAQoAgRB//8DcSAEKAIsKAK8LXRyOwG4LSAEKAIsLwG4LUH/AXEhASAEKAIsKAIIIQIgBCgCLCIDKAIUIQAgAyAAQQFqNgIUIAAgAmogAToAACAEKAIsLwG4LUEIdiEBIAQoAiwoAgghAiAEKAIsIgMoAhQhACADIABBAWo2AhQgACACaiABOgAAIAQoAiwgBCgCBEH//wNxQRAgBCgCLCgCvC1rdTsBuC0gBCgCLCIAIAAoArwtIAQoAghBEGtqNgK8LQwBCyAEKAIsIgAgAC8BuC0gBCgCIEEEakH//wNxIAQoAiwoArwtdHI7AbgtIAQoAiwiACAEKAIIIAAoArwtajYCvC0LIAQoAiwhASAEKAIsKAKcFkEBaiECIAQoAiwoAqgWQQFqIQMgBCgCFEEBaiEFIwBBQGoiACQAIAAgATYCPCAAIAI2AjggACADNgI0IAAgBTYCMCAAQQU2AigCQCAAKAI8KAK8LUEQIAAoAihrSgRAIAAgACgCOEGBAms2AiQgACgCPCIBIAEvAbgtIAAoAiRB//8DcSAAKAI8KAK8LXRyOwG4LSAAKAI8LwG4LUH/AXEhAiAAKAI8KAIIIQMgACgCPCIFKAIUIQEgBSABQQFqNgIUIAEgA2ogAjoAACAAKAI8LwG4LUEIdiECIAAoAjwoAgghAyAAKAI8IgUoAhQhASAFIAFBAWo2AhQgASADaiACOgAAIAAoAjwgACgCJEH//wNxQRAgACgCPCgCvC1rdTsBuC0gACgCPCIBIAEoArwtIAAoAihBEGtqNgK8LQwBCyAAKAI8IgEgAS8BuC0gACgCOEGBAmtB//8DcSAAKAI8KAK8LXRyOwG4LSAAKAI8IgEgACgCKCABKAK8LWo2ArwtCyAAQQU2AiACQCAAKAI8KAK8LUEQIAAoAiBrSgRAIAAgACgCNEEBazYCHCAAKAI8IgEgAS8BuC0gACgCHEH//wNxIAAoAjwoArwtdHI7AbgtIAAoAjwvAbgtQf8BcSECIAAoAjwoAgghAyAAKAI8IgUoAhQhASAFIAFBAWo2AhQgASADaiACOgAAIAAoAjwvAbgtQQh2IQIgACgCPCgCCCEDIAAoAjwiBSgCFCEBIAUgAUEBajYCFCABIANqIAI6AAAgACgCPCAAKAIcQf//A3FBECAAKAI8KAK8LWt1OwG4LSAAKAI8IgEgASgCvC0gACgCIEEQa2o2ArwtDAELIAAoAjwiASABLwG4LSAAKAI0QQFrQf//A3EgACgCPCgCvC10cjsBuC0gACgCPCIBIAAoAiAgASgCvC1qNgK8LQsgAEEENgIYAkAgACgCPCgCvC1BECAAKAIYa0oEQCAAIAAoAjBBBGs2AhQgACgCPCIBIAEvAbgtIAAoAhRB//8DcSAAKAI8KAK8LXRyOwG4LSAAKAI8LwG4LUH/AXEhAiAAKAI8KAIIIQMgACgCPCIFKAIUIQEgBSABQQFqNgIUIAEgA2ogAjoAACAAKAI8LwG4LUEIdiECIAAoAjwoAgghAyAAKAI8IgUoAhQhASAFIAFBAWo2AhQgASADaiACOgAAIAAoAjwgACgCFEH//wNxQRAgACgCPCgCvC1rdTsBuC0gACgCPCIBIAEoArwtIAAoAhhBEGtqNgK8LQwBCyAAKAI8IgEgAS8BuC0gACgCMEEEa0H//wNxIAAoAjwoArwtdHI7AbgtIAAoAjwiASAAKAIYIAEoArwtajYCvC0LIABBADYCLANAIAAoAiwgACgCMEgEQCAAQQM2AhACQCAAKAI8KAK8LUEQIAAoAhBrSgRAIAAgACgCPEH8FGogACgCLC0A4GxBAnRqLwECNgIMIAAoAjwiASABLwG4LSAAKAIMQf//A3EgACgCPCgCvC10cjsBuC0gACgCPC8BuC1B/wFxIQIgACgCPCgCCCEDIAAoAjwiBSgCFCEBIAUgAUEBajYCFCABIANqIAI6AAAgACgCPC8BuC1BCHYhAiAAKAI8KAIIIQMgACgCPCIFKAIUIQEgBSABQQFqNgIUIAEgA2ogAjoAACAAKAI8IAAoAgxB//8DcUEQIAAoAjwoArwta3U7AbgtIAAoAjwiASABKAK8LSAAKAIQQRBrajYCvC0MAQsgACgCPCIBIAEvAbgtIAAoAjxB/BRqIAAoAiwtAOBsQQJ0ai8BAiAAKAI8KAK8LXRyOwG4LSAAKAI8IgEgACgCECABKAK8LWo2ArwtCyAAIAAoAixBAWo2AiwMAQsLIAAoAjwgACgCPEGUAWogACgCOEEBaxC5ASAAKAI8IAAoAjxBiBNqIAAoAjRBAWsQuQEgAEFAayQAIAQoAiwgBCgCLEGUAWogBCgCLEGIE2oQuwELCyAEKAIsEL4BIAQoAiAEQCAEKAIsEL0BCyAEQTBqJAAL1AEBAX8jAEEgayICJAAgAiAANgIYIAIgATcDECACIAIoAhhFOgAPAkAgAigCGEUEQCACIAIpAxCnEBgiADYCGCAARQRAIAJBADYCHAwCCwsgAkEYEBgiADYCCCAARQRAIAItAA9BAXEEQCACKAIYEBULIAJBADYCHAwBCyACKAIIQQE6AAAgAigCCCACKAIYNgIEIAIoAgggAikDEDcDCCACKAIIQgA3AxAgAigCCCACLQAPQQFxOgABIAIgAigCCDYCHAsgAigCHCEAIAJBIGokACAAC3gBAX8jAEEQayIBJAAgASAANgIIIAEgASgCCEIEEB42AgQCQCABKAIERQRAIAFBADYCDAwBCyABIAEoAgQtAAAgASgCBC0AASABKAIELQACIAEoAgQtAANBCHRqQQh0akEIdGo2AgwLIAEoAgwhACABQRBqJAAgAAuHAwEBfyMAQTBrIgMkACADIAA2AiQgAyABNgIgIAMgAjcDGAJAIAMoAiQtAChBAXEEQCADQn83AygMAQsCQAJAIAMoAiQoAiBFDQAgAykDGEL///////////8AVg0AIAMpAxhQDQEgAygCIA0BCyADKAIkQQxqQRJBABAUIANCfzcDKAwBCyADKAIkLQA1QQFxBEAgA0J/NwMoDAELAn8jAEEQayIAIAMoAiQ2AgwgACgCDC0ANEEBcQsEQCADQgA3AygMAQsgAykDGFAEQCADQgA3AygMAQsgA0IANwMQA0AgAykDECADKQMYVARAIAMgAygCJCADKAIgIAMpAxCnaiADKQMYIAMpAxB9QQEQICICNwMIIAJCAFMEQCADKAIkQQE6ADUgAykDEFAEQCADQn83AygMBAsgAyADKQMQNwMoDAMLIAMpAwhQBEAgAygCJEEBOgA0BSADIAMpAwggAykDEHw3AxAMAgsLCyADIAMpAxA3AygLIAMpAyghAiADQTBqJAAgAgthAQF/IwBBEGsiAiAANgIIIAIgATcDAAJAIAIpAwAgAigCCCkDCFYEQCACKAIIQQA6AAAgAkF/NgIMDAELIAIoAghBAToAACACKAIIIAIpAwA3AxAgAkEANgIMCyACKAIMC+8BAQF/IwBBIGsiAiQAIAIgADYCGCACIAE3AxAgAiACKAIYQggQHjYCDAJAIAIoAgxFBEAgAkF/NgIcDAELIAIoAgwgAikDEEL/AYM8AAAgAigCDCACKQMQQgiIQv8BgzwAASACKAIMIAIpAxBCEIhC/wGDPAACIAIoAgwgAikDEEIYiEL/AYM8AAMgAigCDCACKQMQQiCIQv8BgzwABCACKAIMIAIpAxBCKIhC/wGDPAAFIAIoAgwgAikDEEIwiEL/AYM8AAYgAigCDCACKQMQQjiIQv8BgzwAByACQQA2AhwLIAIoAhwaIAJBIGokAAt/AQN/IAAhAQJAIABBA3EEQANAIAEtAABFDQIgAUEBaiIBQQNxDQALCwNAIAEiAkEEaiEBIAIoAgAiA0F/cyADQYGChAhrcUGAgYKEeHFFDQALIANB/wFxRQRAIAIgAGsPCwNAIAItAAEhAyACQQFqIgEhAiADDQALCyABIABrC6YBAQF/IwBBEGsiASQAIAEgADYCCAJAIAEoAggoAiBFBEAgASgCCEEMakESQQAQFCABQX82AgwMAQsgASgCCCIAIAAoAiBBAWs2AiAgASgCCCgCIEUEQCABKAIIQQBCAEECECAaIAEoAggoAgAEQCABKAIIKAIAEC9BAEgEQCABKAIIQQxqQRRBABAUCwsLIAFBADYCDAsgASgCDCEAIAFBEGokACAACzYBAX8jAEEQayIBIAA2AgwCfiABKAIMLQAAQQFxBEAgASgCDCkDCCABKAIMKQMQfQwBC0IACwuyAQIBfwF+IwBBEGsiASQAIAEgADYCBCABIAEoAgRCCBAeNgIAAkAgASgCAEUEQCABQgA3AwgMAQsgASABKAIALQAArSABKAIALQAHrUI4hiABKAIALQAGrUIwhnwgASgCAC0ABa1CKIZ8IAEoAgAtAAStQiCGfCABKAIALQADrUIYhnwgASgCAC0AAq1CEIZ8IAEoAgAtAAGtQgiGfHw3AwgLIAEpAwghAiABQRBqJAAgAgvcAQEBfyMAQRBrIgEkACABIAA2AgwgASgCDARAIAEoAgwoAigEQCABKAIMKAIoQQA2AiggASgCDCgCKEIANwMgIAEoAgwCfiABKAIMKQMYIAEoAgwpAyBWBEAgASgCDCkDGAwBCyABKAIMKQMgCzcDGAsgASABKAIMKQMYNwMAA0AgASkDACABKAIMKQMIWkUEQCABKAIMKAIAIAEpAwCnQQR0aigCABAVIAEgASkDAEIBfDcDAAwBCwsgASgCDCgCABAVIAEoAgwoAgQQFSABKAIMEBULIAFBEGokAAvwAgICfwF+AkAgAkUNACAAIAJqIgNBAWsgAToAACAAIAE6AAAgAkEDSQ0AIANBAmsgAToAACAAIAE6AAEgA0EDayABOgAAIAAgAToAAiACQQdJDQAgA0EEayABOgAAIAAgAToAAyACQQlJDQAgAEEAIABrQQNxIgRqIgMgAUH/AXFBgYKECGwiADYCACADIAIgBGtBfHEiAmoiAUEEayAANgIAIAJBCUkNACADIAA2AgggAyAANgIEIAFBCGsgADYCACABQQxrIAA2AgAgAkEZSQ0AIAMgADYCGCADIAA2AhQgAyAANgIQIAMgADYCDCABQRBrIAA2AgAgAUEUayAANgIAIAFBGGsgADYCACABQRxrIAA2AgAgAiADQQRxQRhyIgFrIgJBIEkNACAArUKBgICAEH4hBSABIANqIQEDQCABIAU3AxggASAFNwMQIAEgBTcDCCABIAU3AwAgAUEgaiEBIAJBIGsiAkEfSw0ACwsLawEBfyMAQSBrIgIgADYCHCACQgEgAigCHK2GNwMQIAJBDGogATYCAANAIAIgAigCDCIAQQRqNgIMIAIgACgCADYCCCACKAIIQQBIRQRAIAIgAikDEEIBIAIoAgithoQ3AxAMAQsLIAIpAxALYAIBfwF+IwBBEGsiASQAIAEgADYCBAJAIAEoAgQoAiRBAUcEQCABKAIEQQxqQRJBABAUIAFCfzcDCAwBCyABIAEoAgRBAEIAQQ0QIDcDCAsgASkDCCECIAFBEGokACACC6UCAQJ/IwBBIGsiAyQAIAMgADYCGCADIAE2AhQgAyACNwMIIAMoAhgoAgAhASADKAIUIQQgAykDCCECIwBBIGsiACQAIAAgATYCFCAAIAQ2AhAgACACNwMIAkACQCAAKAIUKAIkQQFGBEAgACkDCEL///////////8AWA0BCyAAKAIUQQxqQRJBABAUIABCfzcDGAwBCyAAIAAoAhQgACgCECAAKQMIQQsQIDcDGAsgACkDGCECIABBIGokACADIAI3AwACQCACQgBTBEAgAygCGEEIaiADKAIYKAIAEBcgA0F/NgIcDAELIAMpAwAgAykDCFIEQCADKAIYQQhqQQZBGxAUIANBfzYCHAwBCyADQQA2AhwLIAMoAhwhACADQSBqJAAgAAsxAQF/IwBBEGsiASQAIAEgADYCDCABKAIMBEAgASgCDBBSIAEoAgwQFQsgAUEQaiQACy8BAX8jAEEQayIBJAAgASAANgIMIAEoAgwoAggQFSABKAIMQQA2AgggAUEQaiQAC80BAQF/IwBBEGsiAiQAIAIgADYCCCACIAE2AgQCQCACKAIILQAoQQFxBEAgAkF/NgIMDAELIAIoAgRFBEAgAigCCEEMakESQQAQFCACQX82AgwMAQsgAigCBBA7IAIoAggoAgAEQCACKAIIKAIAIAIoAgQQOUEASARAIAIoAghBDGogAigCCCgCABAXIAJBfzYCDAwCCwsgAigCCCACKAIEQjhBAxAgQgBTBEAgAkF/NgIMDAELIAJBADYCDAsgAigCDCEAIAJBEGokACAAC98EAQF/IwBBIGsiAiAANgIYIAIgATYCFAJAIAIoAhhFBEAgAkEBNgIcDAELIAIgAigCGCgCADYCDAJAIAIoAhgoAggEQCACIAIoAhgoAgg2AhAMAQsgAkEBNgIQIAJBADYCCANAAkAgAigCCCACKAIYLwEETw0AAkAgAigCDCACKAIIai0AAEEfSwRAIAIoAgwgAigCCGotAABBgAFJDQELIAIoAgwgAigCCGotAABBDUYNACACKAIMIAIoAghqLQAAQQpGDQAgAigCDCACKAIIai0AAEEJRgRADAELIAJBAzYCEAJAIAIoAgwgAigCCGotAABB4AFxQcABRgRAIAJBATYCAAwBCwJAIAIoAgwgAigCCGotAABB8AFxQeABRgRAIAJBAjYCAAwBCwJAIAIoAgwgAigCCGotAABB+AFxQfABRgRAIAJBAzYCAAwBCyACQQQ2AhAMBAsLCyACKAIYLwEEIAIoAgggAigCAGpNBEAgAkEENgIQDAILIAJBATYCBANAIAIoAgQgAigCAE0EQCACKAIMIAIoAgggAigCBGpqLQAAQcABcUGAAUcEQCACQQQ2AhAMBgUgAiACKAIEQQFqNgIEDAILAAsLIAIgAigCACACKAIIajYCCAsgAiACKAIIQQFqNgIIDAELCwsgAigCGCACKAIQNgIIIAIoAhQEQAJAIAIoAhRBAkcNACACKAIQQQNHDQAgAkECNgIQIAIoAhhBAjYCCAsCQCACKAIUIAIoAhBGDQAgAigCEEEBRg0AIAJBBTYCHAwCCwsgAiACKAIQNgIcCyACKAIcC2oBAX8jAEEQayIBIAA2AgwgASgCDEIANwMAIAEoAgxBADYCCCABKAIMQn83AxAgASgCDEEANgIsIAEoAgxBfzYCKCABKAIMQgA3AxggASgCDEIANwMgIAEoAgxBADsBMCABKAIMQQA7ATILjQUBA38jAEEQayIBJAAgASAANgIMIAEoAgwEQCABKAIMKAIABEAgASgCDCgCABAvGiABKAIMKAIAEBsLIAEoAgwoAhwQFSABKAIMKAIgECQgASgCDCgCJBAkIAEoAgwoAlAhAiMAQRBrIgAkACAAIAI2AgwgACgCDARAIAAoAgwoAhAEQCAAQQA2AggDQCAAKAIIIAAoAgwoAgBJBEAgACgCDCgCECAAKAIIQQJ0aigCAARAIAAoAgwoAhAgACgCCEECdGooAgAhAyMAQRBrIgIkACACIAM2AgwDQCACKAIMBEAgAiACKAIMKAIYNgIIIAIoAgwQFSACIAIoAgg2AgwMAQsLIAJBEGokAAsgACAAKAIIQQFqNgIIDAELCyAAKAIMKAIQEBULIAAoAgwQFQsgAEEQaiQAIAEoAgwoAkAEQCABQgA3AwADQCABKQMAIAEoAgwpAzBUBEAgASgCDCgCQCABKQMAp0EEdGoQdyABIAEpAwBCAXw3AwAMAQsLIAEoAgwoAkAQFQsgAUIANwMAA0AgASkDACABKAIMKAJErVQEQCABKAIMKAJMIAEpAwCnQQJ0aigCACECIwBBEGsiACQAIAAgAjYCDCAAKAIMQQE6ACgCfyMAQRBrIgIgACgCDEEMajYCDCACKAIMKAIARQsEQCAAKAIMQQxqQQhBABAUCyAAQRBqJAAgASABKQMAQgF8NwMADAELCyABKAIMKAJMEBUgASgCDCgCVCECIwBBEGsiACQAIAAgAjYCDCAAKAIMBEAgACgCDCgCCARAIAAoAgwoAgwgACgCDCgCCBECAAsgACgCDBAVCyAAQRBqJAAgASgCDEEIahA4IAEoAgwQFQsgAUEQaiQAC48OAQF/IwBBEGsiAyQAIAMgADYCDCADIAE2AgggAyACNgIEIAMoAgghASADKAIEIQIjAEEgayIAIAMoAgw2AhggACABNgIUIAAgAjYCECAAIAAoAhhBEHY2AgwgACAAKAIYQf//A3E2AhgCQCAAKAIQQQFGBEAgACAAKAIULQAAIAAoAhhqNgIYIAAoAhhB8f8DTwRAIAAgACgCGEHx/wNrNgIYCyAAIAAoAhggACgCDGo2AgwgACgCDEHx/wNPBEAgACAAKAIMQfH/A2s2AgwLIAAgACgCGCAAKAIMQRB0cjYCHAwBCyAAKAIURQRAIABBATYCHAwBCyAAKAIQQRBJBEADQCAAIAAoAhAiAUEBazYCECABBEAgACAAKAIUIgFBAWo2AhQgACABLQAAIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDAwBCwsgACgCGEHx/wNPBEAgACAAKAIYQfH/A2s2AhgLIAAgACgCDEHx/wNwNgIMIAAgACgCGCAAKAIMQRB0cjYCHAwBCwNAIAAoAhBBsCtPBEAgACAAKAIQQbArazYCECAAQdsCNgIIA0AgACAAKAIULQAAIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDCAAIAAoAhQtAAEgACgCGGo2AhggACAAKAIYIAAoAgxqNgIMIAAgACgCFC0AAiAAKAIYajYCGCAAIAAoAhggACgCDGo2AgwgACAAKAIULQADIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDCAAIAAoAhQtAAQgACgCGGo2AhggACAAKAIYIAAoAgxqNgIMIAAgACgCFC0ABSAAKAIYajYCGCAAIAAoAhggACgCDGo2AgwgACAAKAIULQAGIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDCAAIAAoAhQtAAcgACgCGGo2AhggACAAKAIYIAAoAgxqNgIMIAAgACgCFC0ACCAAKAIYajYCGCAAIAAoAhggACgCDGo2AgwgACAAKAIULQAJIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDCAAIAAoAhQtAAogACgCGGo2AhggACAAKAIYIAAoAgxqNgIMIAAgACgCFC0ACyAAKAIYajYCGCAAIAAoAhggACgCDGo2AgwgACAAKAIULQAMIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDCAAIAAoAhQtAA0gACgCGGo2AhggACAAKAIYIAAoAgxqNgIMIAAgACgCFC0ADiAAKAIYajYCGCAAIAAoAhggACgCDGo2AgwgACAAKAIULQAPIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDCAAIAAoAhRBEGo2AhQgACAAKAIIQQFrIgE2AgggAQ0ACyAAIAAoAhhB8f8DcDYCGCAAIAAoAgxB8f8DcDYCDAwBCwsgACgCEARAA0AgACgCEEEQTwRAIAAgACgCEEEQazYCECAAIAAoAhQtAAAgACgCGGo2AhggACAAKAIYIAAoAgxqNgIMIAAgACgCFC0AASAAKAIYajYCGCAAIAAoAhggACgCDGo2AgwgACAAKAIULQACIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDCAAIAAoAhQtAAMgACgCGGo2AhggACAAKAIYIAAoAgxqNgIMIAAgACgCFC0ABCAAKAIYajYCGCAAIAAoAhggACgCDGo2AgwgACAAKAIULQAFIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDCAAIAAoAhQtAAYgACgCGGo2AhggACAAKAIYIAAoAgxqNgIMIAAgACgCFC0AByAAKAIYajYCGCAAIAAoAhggACgCDGo2AgwgACAAKAIULQAIIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDCAAIAAoAhQtAAkgACgCGGo2AhggACAAKAIYIAAoAgxqNgIMIAAgACgCFC0ACiAAKAIYajYCGCAAIAAoAhggACgCDGo2AgwgACAAKAIULQALIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDCAAIAAoAhQtAAwgACgCGGo2AhggACAAKAIYIAAoAgxqNgIMIAAgACgCFC0ADSAAKAIYajYCGCAAIAAoAhggACgCDGo2AgwgACAAKAIULQAOIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDCAAIAAoAhQtAA8gACgCGGo2AhggACAAKAIYIAAoAgxqNgIMIAAgACgCFEEQajYCFAwBCwsDQCAAIAAoAhAiAUEBazYCECABBEAgACAAKAIUIgFBAWo2AhQgACABLQAAIAAoAhhqNgIYIAAgACgCGCAAKAIMajYCDAwBCwsgACAAKAIYQfH/A3A2AhggACAAKAIMQfH/A3A2AgwLIAAgACgCGCAAKAIMQRB0cjYCHAsgACgCHCEAIANBEGokACAAC1IBAn9BkJcBKAIAIgEgAEEDakF8cSICaiEAAkAgAkEAIAAgAU0bDQAgAD8AQRB0SwRAIAAQDEUNAQtBkJcBIAA2AgAgAQ8LQbSbAUEwNgIAQX8LvAIBAX8jAEEgayIEJAAgBCAANgIYIAQgATcDECAEIAI2AgwgBCADNgIIIAQoAghFBEAgBCAEKAIYQQhqNgIICwJAIAQpAxAgBCgCGCkDMFoEQCAEKAIIQRJBABAUIARBADYCHAwBCwJAIAQoAgxBCHFFBEAgBCgCGCgCQCAEKQMQp0EEdGooAgQNAQsgBCgCGCgCQCAEKQMQp0EEdGooAgBFBEAgBCgCCEESQQAQFCAEQQA2AhwMAgsCQCAEKAIYKAJAIAQpAxCnQQR0ai0ADEEBcUUNACAEKAIMQQhxDQAgBCgCCEEXQQAQFCAEQQA2AhwMAgsgBCAEKAIYKAJAIAQpAxCnQQR0aigCADYCHAwBCyAEIAQoAhgoAkAgBCkDEKdBBHRqKAIENgIcCyAEKAIcIQAgBEEgaiQAIAALhAEBAX8jAEEQayIBJAAgASAANgIIIAFB2AAQGCIANgIEAkAgAEUEQCABQQA2AgwMAQsCQCABKAIIBEAgASgCBCABKAIIQdgAEBkaDAELIAEoAgQQUwsgASgCBEEANgIAIAEoAgRBAToABSABIAEoAgQ2AgwLIAEoAgwhACABQRBqJAAgAAtvAQF/IwBBIGsiAyQAIAMgADYCGCADIAE2AhQgAyACNgIQIAMgAygCGCADKAIQrRAeNgIMAkAgAygCDEUEQCADQX82AhwMAQsgAygCDCADKAIUIAMoAhAQGRogA0EANgIcCyADKAIcGiADQSBqJAALogEBAX8jAEEgayIEJAAgBCAANgIYIAQgATcDECAEIAI2AgwgBCADNgIIIAQgBCgCDCAEKQMQECkiADYCBAJAIABFBEAgBCgCCEEOQQAQFCAEQQA2AhwMAQsgBCgCGCAEKAIEKAIEIAQpAxAgBCgCCBBkQQBIBEAgBCgCBBAWIARBADYCHAwBCyAEIAQoAgQ2AhwLIAQoAhwhACAEQSBqJAAgAAugAQEBfyMAQSBrIgMkACADIAA2AhQgAyABNgIQIAMgAjcDCCADIAMoAhA2AgQCQCADKQMIQghUBEAgA0J/NwMYDAELIwBBEGsiACADKAIUNgIMIAAoAgwoAgAhACADKAIEIAA2AgAjAEEQayIAIAMoAhQ2AgwgACgCDCgCBCEAIAMoAgQgADYCBCADQgg3AxgLIAMpAxghAiADQSBqJAAgAguDAQIDfwF+AkAgAEKAgICAEFQEQCAAIQUMAQsDQCABQQFrIgEgACAAQgqAIgVCCn59p0EwcjoAACAAQv////+fAVYhAiAFIQAgAg0ACwsgBaciAgRAA0AgAUEBayIBIAIgAkEKbiIDQQpsa0EwcjoAACACQQlLIQQgAyECIAQNAAsLIAELPwEBfyMAQRBrIgIgADYCDCACIAE2AgggAigCDARAIAIoAgwgAigCCCgCADYCACACKAIMIAIoAggoAgQ2AgQLC9IIAQJ/IwBBIGsiBCQAIAQgADYCGCAEIAE2AhQgBCACNgIQIAQgAzYCDAJAIAQoAhhFBEAgBCgCFARAIAQoAhRBADYCAAsgBEGVFTYCHAwBCyAEKAIQQcAAcUUEQCAEKAIYKAIIRQRAIAQoAhhBABA6GgsCQAJAAkAgBCgCEEGAAXFFDQAgBCgCGCgCCEEBRg0AIAQoAhgoAghBAkcNAQsgBCgCGCgCCEEERw0BCyAEKAIYKAIMRQRAIAQoAhgoAgAhASAEKAIYLwEEIQIgBCgCGEEQaiEDIAQoAgwhBSMAQTBrIgAkACAAIAE2AiggACACNgIkIAAgAzYCICAAIAU2AhwgACAAKAIoNgIYAkAgACgCJEUEQCAAKAIgBEAgACgCIEEANgIACyAAQQA2AiwMAQsgAEEBNgIQIABBADYCDANAIAAoAgwgACgCJEkEQCMAQRBrIgEgACgCGCAAKAIMai0AAEEBdEGgFWovAQA2AggCQCABKAIIQYABSQRAIAFBATYCDAwBCyABKAIIQYAQSQRAIAFBAjYCDAwBCyABKAIIQYCABEkEQCABQQM2AgwMAQsgAUEENgIMCyAAIAEoAgwgACgCEGo2AhAgACAAKAIMQQFqNgIMDAELCyAAIAAoAhAQGCIBNgIUIAFFBEAgACgCHEEOQQAQFCAAQQA2AiwMAQsgAEEANgIIIABBADYCDANAIAAoAgwgACgCJEkEQCAAKAIUIAAoAghqIQIjAEEQayIBIAAoAhggACgCDGotAABBAXRBoBVqLwEANgIIIAEgAjYCBAJAIAEoAghBgAFJBEAgASgCBCABKAIIOgAAIAFBATYCDAwBCyABKAIIQYAQSQRAIAEoAgQgASgCCEEGdkEfcUHAAXI6AAAgASgCBCABKAIIQT9xQYABcjoAASABQQI2AgwMAQsgASgCCEGAgARJBEAgASgCBCABKAIIQQx2QQ9xQeABcjoAACABKAIEIAEoAghBBnZBP3FBgAFyOgABIAEoAgQgASgCCEE/cUGAAXI6AAIgAUEDNgIMDAELIAEoAgQgASgCCEESdkEHcUHwAXI6AAAgASgCBCABKAIIQQx2QT9xQYABcjoAASABKAIEIAEoAghBBnZBP3FBgAFyOgACIAEoAgQgASgCCEE/cUGAAXI6AAMgAUEENgIMCyAAIAEoAgwgACgCCGo2AgggACAAKAIMQQFqNgIMDAELCyAAKAIUIAAoAhBBAWtqQQA6AAAgACgCIARAIAAoAiAgACgCEEEBazYCAAsgACAAKAIUNgIsCyAAKAIsIQEgAEEwaiQAIAQoAhggATYCDCABRQRAIARBADYCHAwECwsgBCgCFARAIAQoAhQgBCgCGCgCEDYCAAsgBCAEKAIYKAIMNgIcDAILCyAEKAIUBEAgBCgCFCAEKAIYLwEENgIACyAEIAQoAhgoAgA2AhwLIAQoAhwhACAEQSBqJAAgAAs5AQF/IwBBEGsiASAANgIMQQAhACABKAIMLQAAQQFxBH8gASgCDCkDECABKAIMKQMIUQVBAAtBAXEL7wIBAX8jAEEQayIBJAAgASAANgIIAkAgASgCCC0AKEEBcQRAIAFBfzYCDAwBCyABKAIIKAIkQQNGBEAgASgCCEEMakEXQQAQFCABQX82AgwMAQsCQCABKAIIKAIgBEACfyMAQRBrIgAgASgCCDYCDCAAKAIMKQMYQsAAg1ALBEAgASgCCEEMakEdQQAQFCABQX82AgwMAwsMAQsgASgCCCgCAARAIAEoAggoAgAQSEEASARAIAEoAghBDGogASgCCCgCABAXIAFBfzYCDAwDCwsgASgCCEEAQgBBABAgQgBTBEAgASgCCCgCAARAIAEoAggoAgAQLxoLIAFBfzYCDAwCCwsgASgCCEEAOgA0IAEoAghBADoANSMAQRBrIgAgASgCCEEMajYCDCAAKAIMBEAgACgCDEEANgIAIAAoAgxBADYCBAsgASgCCCIAIAAoAiBBAWo2AiAgAUEANgIMCyABKAIMIQAgAUEQaiQAIAALdQIBfwF+IwBBEGsiASQAIAEgADYCBAJAIAEoAgQtAChBAXEEQCABQn83AwgMAQsgASgCBCgCIEUEQCABKAIEQQxqQRJBABAUIAFCfzcDCAwBCyABIAEoAgRBAEIAQQcQIDcDCAsgASkDCCECIAFBEGokACACC50BAQF/IwBBEGsiASAANgIIAkACQAJAIAEoAghFDQAgASgCCCgCIEUNACABKAIIKAIkDQELIAFBATYCDAwBCyABIAEoAggoAhw2AgQCQAJAIAEoAgRFDQAgASgCBCgCACABKAIIRw0AIAEoAgQoAgRBtP4ASQ0AIAEoAgQoAgRB0/4ATQ0BCyABQQE2AgwMAQsgAUEANgIMCyABKAIMC4ABAQN/IwBBEGsiAiAANgIMIAIgATYCCCACKAIIQQh2IQEgAigCDCgCCCEDIAIoAgwiBCgCFCEAIAQgAEEBajYCFCAAIANqIAE6AAAgAigCCEH/AXEhASACKAIMKAIIIQMgAigCDCICKAIUIQAgAiAAQQFqNgIUIAAgA2ogAToAAAuZBQEBfyMAQUBqIgQkACAEIAA2AjggBCABNwMwIAQgAjYCLCAEIAM2AiggBEHIABAYIgA2AiQCQCAARQRAIARBADYCPAwBCyAEKAIkQgA3AzggBCgCJEIANwMYIAQoAiRCADcDMCAEKAIkQQA2AgAgBCgCJEEANgIEIAQoAiRCADcDCCAEKAIkQgA3AxAgBCgCJEEANgIoIAQoAiRCADcDIAJAIAQpAzBQBEBBCBAYIQAgBCgCJCAANgIEIABFBEAgBCgCJBAVIAQoAihBDkEAEBQgBEEANgI8DAMLIAQoAiQoAgRCADcDAAwBCyAEKAIkIAQpAzBBABDCAUEBcUUEQCAEKAIoQQ5BABAUIAQoAiQQMiAEQQA2AjwMAgsgBEIANwMIIARCADcDGCAEQgA3AxADQCAEKQMYIAQpAzBUBEAgBCgCOCAEKQMYp0EEdGopAwhQRQRAIAQoAjggBCkDGKdBBHRqKAIARQRAIAQoAihBEkEAEBQgBCgCJBAyIARBADYCPAwFCyAEKAIkKAIAIAQpAxCnQQR0aiAEKAI4IAQpAxinQQR0aigCADYCACAEKAIkKAIAIAQpAxCnQQR0aiAEKAI4IAQpAxinQQR0aikDCDcDCCAEKAIkKAIEIAQpAxinQQN0aiAEKQMINwMAIAQgBCgCOCAEKQMYp0EEdGopAwggBCkDCHw3AwggBCAEKQMQQgF8NwMQCyAEIAQpAxhCAXw3AxgMAQsLIAQoAiQgBCkDEDcDCCAEKAIkIAQoAiwEfkIABSAEKAIkKQMICzcDGCAEKAIkKAIEIAQoAiQpAwinQQN0aiAEKQMINwMAIAQoAiQgBCkDCDcDMAsgBCAEKAIkNgI8CyAEKAI8IQAgBEFAayQAIAALngEBAX8jAEEgayIEJAAgBCAANgIYIAQgATcDECAEIAI2AgwgBCADNgIIIAQgBCgCGCAEKQMQIAQoAgwgBCgCCBA/IgA2AgQCQCAARQRAIARBADYCHAwBCyAEIAQoAgQoAjBBACAEKAIMIAQoAggQRiIANgIAIABFBEAgBEEANgIcDAELIAQgBCgCADYCHAsgBCgCHCEAIARBIGokACAAC5wIAQt/IABFBEAgARAYDwsgAUFATwRAQbSbAUEwNgIAQQAPCwJ/QRAgAUELakF4cSABQQtJGyEGIABBCGsiBSgCBCIJQXhxIQQCQCAJQQNxRQRAQQAgBkGAAkkNAhogBkEEaiAETQRAIAUhAiAEIAZrQcSfASgCAEEBdE0NAgtBAAwCCyAEIAVqIQcCQCAEIAZPBEAgBCAGayIDQRBJDQEgBSAJQQFxIAZyQQJyNgIEIAUgBmoiAiADQQNyNgIEIAcgBygCBEEBcjYCBCACIAMQxgEMAQsgB0H8mwEoAgBGBEBB8JsBKAIAIARqIgQgBk0NAiAFIAlBAXEgBnJBAnI2AgQgBSAGaiIDIAQgBmsiAkEBcjYCBEHwmwEgAjYCAEH8mwEgAzYCAAwBCyAHQfibASgCAEYEQEHsmwEoAgAgBGoiAyAGSQ0CAkAgAyAGayICQRBPBEAgBSAJQQFxIAZyQQJyNgIEIAUgBmoiBCACQQFyNgIEIAMgBWoiAyACNgIAIAMgAygCBEF+cTYCBAwBCyAFIAlBAXEgA3JBAnI2AgQgAyAFaiICIAIoAgRBAXI2AgRBACECQQAhBAtB+JsBIAQ2AgBB7JsBIAI2AgAMAQsgBygCBCIDQQJxDQEgA0F4cSAEaiIKIAZJDQEgCiAGayEMAkAgA0H/AU0EQCAHKAIIIgQgA0EDdiICQQN0QYycAWpGGiAEIAcoAgwiA0YEQEHkmwFB5JsBKAIAQX4gAndxNgIADAILIAQgAzYCDCADIAQ2AggMAQsgBygCGCELAkAgByAHKAIMIghHBEAgBygCCCICQfSbASgCAEkaIAIgCDYCDCAIIAI2AggMAQsCQCAHQRRqIgQoAgAiAg0AIAdBEGoiBCgCACICDQBBACEIDAELA0AgBCEDIAIiCEEUaiIEKAIAIgINACAIQRBqIQQgCCgCECICDQALIANBADYCAAsgC0UNAAJAIAcgBygCHCIDQQJ0QZSeAWoiAigCAEYEQCACIAg2AgAgCA0BQeibAUHomwEoAgBBfiADd3E2AgAMAgsgC0EQQRQgCygCECAHRhtqIAg2AgAgCEUNAQsgCCALNgIYIAcoAhAiAgRAIAggAjYCECACIAg2AhgLIAcoAhQiAkUNACAIIAI2AhQgAiAINgIYCyAMQQ9NBEAgBSAJQQFxIApyQQJyNgIEIAUgCmoiAiACKAIEQQFyNgIEDAELIAUgCUEBcSAGckECcjYCBCAFIAZqIgMgDEEDcjYCBCAFIApqIgIgAigCBEEBcjYCBCADIAwQxgELIAUhAgsgAgsiAgRAIAJBCGoPCyABEBgiBUUEQEEADwsgBSAAQXxBeCAAQQRrKAIAIgJBA3EbIAJBeHFqIgIgASABIAJLGxAZGiAAEBUgBQtDAQN/AkAgAkUNAANAIAAtAAAiBCABLQAAIgVGBEAgAUEBaiEBIABBAWohACACQQFrIgINAQwCCwsgBCAFayEDCyADC4wDAQF/IwBBIGsiBCQAIAQgADYCGCAEIAE7ARYgBCACNgIQIAQgAzYCDAJAIAQvARZFBEAgBEEANgIcDAELAkACQAJAAkAgBCgCEEGAMHEiAARAIABBgBBGDQEgAEGAIEYNAgwDCyAEQQA2AgQMAwsgBEECNgIEDAILIARBBDYCBAwBCyAEKAIMQRJBABAUIARBADYCHAwBCyAEQRQQGCIANgIIIABFBEAgBCgCDEEOQQAQFCAEQQA2AhwMAQsgBC8BFkEBahAYIQAgBCgCCCAANgIAIABFBEAgBCgCCBAVIARBADYCHAwBCyAEKAIIKAIAIAQoAhggBC8BFhAZGiAEKAIIKAIAIAQvARZqQQA6AAAgBCgCCCAELwEWOwEEIAQoAghBADYCCCAEKAIIQQA2AgwgBCgCCEEANgIQIAQoAgQEQCAEKAIIIAQoAgQQOkEFRgRAIAQoAggQJCAEKAIMQRJBABAUIARBADYCHAwCCwsgBCAEKAIINgIcCyAEKAIcIQAgBEEgaiQAIAALNwEBfyMAQRBrIgEgADYCCAJAIAEoAghFBEAgAUEAOwEODAELIAEgASgCCC8BBDsBDgsgAS8BDguJAgEBfyMAQRBrIgEkACABIAA2AgwCQCABKAIMLQAFQQFxBEAgASgCDCgCAEECcUUNAQsgASgCDCgCMBAkIAEoAgxBADYCMAsCQCABKAIMLQAFQQFxBEAgASgCDCgCAEEIcUUNAQsgASgCDCgCNBAjIAEoAgxBADYCNAsCQCABKAIMLQAFQQFxBEAgASgCDCgCAEEEcUUNAQsgASgCDCgCOBAkIAEoAgxBADYCOAsCQCABKAIMLQAFQQFxBEAgASgCDCgCAEGAAXFFDQELIAEoAgwoAlQEQCABKAIMKAJUQQAgASgCDCgCVBAuEDMLIAEoAgwoAlQQFSABKAIMQQA2AlQLIAFBEGokAAvxAQEBfyMAQRBrIgEgADYCDCABKAIMQQA2AgAgASgCDEEAOgAEIAEoAgxBADoABSABKAIMQQE6AAYgASgCDEG/BjsBCCABKAIMQQo7AQogASgCDEEAOwEMIAEoAgxBfzYCECABKAIMQQA2AhQgASgCDEEANgIYIAEoAgxCADcDICABKAIMQgA3AyggASgCDEEANgIwIAEoAgxBADYCNCABKAIMQQA2AjggASgCDEEANgI8IAEoAgxBADsBQCABKAIMQYCA2I14NgJEIAEoAgxCADcDSCABKAIMQQA7AVAgASgCDEEAOwFSIAEoAgxBADYCVAvSEwEBfyMAQbABayIDJAAgAyAANgKoASADIAE2AqQBIAMgAjYCoAEgA0EANgKQASADIAMoAqQBKAIwQQAQOjYClAEgAyADKAKkASgCOEEAEDo2ApgBAkACQAJAAkAgAygClAFBAkYEQCADKAKYAUEBRg0BCyADKAKUAUEBRgRAIAMoApgBQQJGDQELIAMoApQBQQJHDQEgAygCmAFBAkcNAQsgAygCpAEiACAALwEMQYAQcjsBDAwBCyADKAKkASIAIAAvAQxB/+8DcTsBDCADKAKUAUECRgRAIANB9eABIAMoAqQBKAIwIAMoAqgBQQhqEI4BNgKQASADKAKQAUUEQCADQX82AqwBDAMLCwJAIAMoAqABQYACcQ0AIAMoApgBQQJHDQAgA0H1xgEgAygCpAEoAjggAygCqAFBCGoQjgE2AkggAygCSEUEQCADKAKQARAjIANBfzYCrAEMAwsgAygCSCADKAKQATYCACADIAMoAkg2ApABCwsCQCADKAKkAS8BUkUEQCADKAKkASIAIAAvAQxB/v8DcTsBDAwBCyADKAKkASIAIAAvAQxBAXI7AQwLIAMgAygCpAEgAygCoAEQZUEBcToAhgEgAyADKAKgAUGACnFBgApHBH8gAy0AhgEFQQELQQFxOgCHASADAn9BASADKAKkAS8BUkGBAkYNABpBASADKAKkAS8BUkGCAkYNABogAygCpAEvAVJBgwJGC0EBcToAhQEgAy0AhwFBAXEEQCADIANBIGpCHBApNgIcIAMoAhxFBEAgAygCqAFBCGpBDkEAEBQgAygCkAEQIyADQX82AqwBDAILAkAgAygCoAFBgAJxBEACQCADKAKgAUGACHENACADKAKkASkDIEL/////D1YNACADKAKkASkDKEL/////D1gNAgsgAygCHCADKAKkASkDKBAtIAMoAhwgAygCpAEpAyAQLQwBCwJAAkAgAygCoAFBgAhxDQAgAygCpAEpAyBC/////w9WDQAgAygCpAEpAyhC/////w9WDQAgAygCpAEpA0hC/////w9YDQELIAMoAqQBKQMoQv////8PWgRAIAMoAhwgAygCpAEpAygQLQsgAygCpAEpAyBC/////w9aBEAgAygCHCADKAKkASkDIBAtCyADKAKkASkDSEL/////D1oEQCADKAIcIAMoAqQBKQNIEC0LCwsCfyMAQRBrIgAgAygCHDYCDCAAKAIMLQAAQQFxRQsEQCADKAKoAUEIakEUQQAQFCADKAIcEBYgAygCkAEQIyADQX82AqwBDAILIANBAQJ/IwBBEGsiACADKAIcNgIMAn4gACgCDC0AAEEBcQRAIAAoAgwpAxAMAQtCAAunQf//A3ELIANBIGpBgAYQVTYCjAEgAygCHBAWIAMoAowBIAMoApABNgIAIAMgAygCjAE2ApABCyADLQCFAUEBcQRAIAMgA0EVakIHECk2AhAgAygCEEUEQCADKAKoAUEIakEOQQAQFCADKAKQARAjIANBfzYCrAEMAgsgAygCEEECEB8gAygCEEG9EkECEEEgAygCECADKAKkAS8BUkH/AXEQlgEgAygCECADKAKkASgCEEH//wNxEB8CfyMAQRBrIgAgAygCEDYCDCAAKAIMLQAAQQFxRQsEQCADKAKoAUEIakEUQQAQFCADKAIQEBYgAygCkAEQIyADQX82AqwBDAILIANBgbICQQcgA0EVakGABhBVNgIMIAMoAhAQFiADKAIMIAMoApABNgIAIAMgAygCDDYCkAELIAMgA0HQAGpCLhApIgA2AkwgAEUEQCADKAKoAUEIakEOQQAQFCADKAKQARAjIANBfzYCrAEMAQsgAygCTEHxEkH2EiADKAKgAUGAAnEbQQQQQSADKAKgAUGAAnFFBEAgAygCTCADLQCGAUEBcQR/QS0FIAMoAqQBLwEIC0H//wNxEB8LIAMoAkwgAy0AhgFBAXEEf0EtBSADKAKkAS8BCgtB//8DcRAfIAMoAkwgAygCpAEvAQwQHwJAIAMtAIUBQQFxBEAgAygCTEHjABAfDAELIAMoAkwgAygCpAEoAhBB//8DcRAfCyADKAKkASgCFCADQZ4BaiADQZwBahCNASADKAJMIAMvAZ4BEB8gAygCTCADLwGcARAfAkACQCADLQCFAUEBcUUNACADKAKkASkDKEIUWg0AIAMoAkxBABAhDAELIAMoAkwgAygCpAEoAhgQIQsCQAJAIAMoAqABQYACcUGAAkcNACADKAKkASkDIEL/////D1QEQCADKAKkASkDKEL/////D1QNAQsgAygCTEF/ECEgAygCTEF/ECEMAQsCQCADKAKkASkDIEL/////D1QEQCADKAJMIAMoAqQBKQMgpxAhDAELIAMoAkxBfxAhCwJAIAMoAqQBKQMoQv////8PVARAIAMoAkwgAygCpAEpAyinECEMAQsgAygCTEF/ECELCyADKAJMIAMoAqQBKAIwEFFB//8DcRAfIAMgAygCpAEoAjQgAygCoAEQkgFB//8DcSADKAKQAUGABhCSAUH//wNxajYCiAEgAygCTCADKAKIAUH//wNxEB8gAygCoAFBgAJxRQRAIAMoAkwgAygCpAEoAjgQUUH//wNxEB8gAygCTCADKAKkASgCPEH//wNxEB8gAygCTCADKAKkAS8BQBAfIAMoAkwgAygCpAEoAkQQIQJAIAMoAqQBKQNIQv////8PVARAIAMoAkwgAygCpAEpA0inECEMAQsgAygCTEF/ECELCwJ/IwBBEGsiACADKAJMNgIMIAAoAgwtAABBAXFFCwRAIAMoAqgBQQhqQRRBABAUIAMoAkwQFiADKAKQARAjIANBfzYCrAEMAQsgAygCqAEgA0HQAGoCfiMAQRBrIgAgAygCTDYCDAJ+IAAoAgwtAABBAXEEQCAAKAIMKQMQDAELQgALCxA2QQBIBEAgAygCTBAWIAMoApABECMgA0F/NgKsAQwBCyADKAJMEBYgAygCpAEoAjAEQCADKAKoASADKAKkASgCMBCFAUEASARAIAMoApABECMgA0F/NgKsAQwCCwsgAygCkAEEQCADKAKoASADKAKQAUGABhCRAUEASARAIAMoApABECMgA0F/NgKsAQwCCwsgAygCkAEQIyADKAKkASgCNARAIAMoAqgBIAMoAqQBKAI0IAMoAqABEJEBQQBIBEAgA0F/NgKsAQwCCwsgAygCoAFBgAJxRQRAIAMoAqQBKAI4BEAgAygCqAEgAygCpAEoAjgQhQFBAEgEQCADQX82AqwBDAMLCwsgAyADLQCHAUEBcTYCrAELIAMoAqwBIQAgA0GwAWokACAAC+ACAQF/IwBBIGsiBCQAIAQgADsBGiAEIAE7ARggBCACNgIUIAQgAzYCECAEQRAQGCIANgIMAkAgAEUEQCAEQQA2AhwMAQsgBCgCDEEANgIAIAQoAgwgBCgCEDYCBCAEKAIMIAQvARo7AQggBCgCDCAELwEYOwEKAkAgBC8BGARAIAQoAhQhASAELwEYIQIjAEEgayIAJAAgACABNgIYIAAgAjYCFCAAQQA2AhACQCAAKAIURQRAIABBADYCHAwBCyAAIAAoAhQQGDYCDCAAKAIMRQRAIAAoAhBBDkEAEBQgAEEANgIcDAELIAAoAgwgACgCGCAAKAIUEBkaIAAgACgCDDYCHAsgACgCHCEBIABBIGokACABIQAgBCgCDCAANgIMIABFBEAgBCgCDBAVIARBADYCHAwDCwwBCyAEKAIMQQA2AgwLIAQgBCgCDDYCHAsgBCgCHCEAIARBIGokACAAC5EBAQV/IAAoAkxBAE4hAyAAKAIAQQFxIgRFBEAgACgCNCIBBEAgASAAKAI4NgI4CyAAKAI4IgIEQCACIAE2AjQLIABBrKABKAIARgRAQaygASACNgIACwsgABClASEBIAAgACgCDBEAACECIAAoAmAiBQRAIAUQFQsCQCAERQRAIAAQFQwBCyADRQ0ACyABIAJyC/kBAQF/IwBBIGsiAiQAIAIgADYCHCACIAE5AxACQCACKAIcRQ0AIAICfAJ8IAIrAxBEAAAAAAAAAABkBEAgAisDEAwBC0QAAAAAAAAAAAtEAAAAAAAA8D9jBEACfCACKwMQRAAAAAAAAAAAZARAIAIrAxAMAQtEAAAAAAAAAAALDAELRAAAAAAAAPA/CyACKAIcKwMoIAIoAhwrAyChoiACKAIcKwMgoDkDCCACKAIcKwMQIAIrAwggAigCHCsDGKFjRQ0AIAIoAhwoAgAgAisDCCACKAIcKAIMIAIoAhwoAgQRFgAgAigCHCACKwMIOQMYCyACQSBqJAAL4QUCAn8BfiMAQTBrIgQkACAEIAA2AiQgBCABNgIgIAQgAjYCHCAEIAM2AhgCQCAEKAIkRQRAIARCfzcDKAwBCyAEKAIgRQRAIAQoAhhBEkEAEBQgBEJ/NwMoDAELIAQoAhxBgyBxBEAgBEEVQRYgBCgCHEEBcRs2AhQgBEIANwMAA0AgBCkDACAEKAIkKQMwVARAIAQgBCgCJCAEKQMAIAQoAhwgBCgCGBBNNgIQIAQoAhAEQCAEKAIcQQJxBEAgBAJ/IAQoAhAiARAuQQFqIQADQEEAIABFDQEaIAEgAEEBayIAaiICLQAAQS9HDQALIAILNgIMIAQoAgwEQCAEIAQoAgxBAWo2AhALCyAEKAIgIAQoAhAgBCgCFBEDAEUEQCMAQRBrIgAgBCgCGDYCDCAAKAIMBEAgACgCDEEANgIAIAAoAgxBADYCBAsgBCAEKQMANwMoDAULCyAEIAQpAwBCAXw3AwAMAQsLIAQoAhhBCUEAEBQgBEJ/NwMoDAELIAQoAiQoAlAhASAEKAIgIQIgBCgCHCEDIAQoAhghBSMAQTBrIgAkACAAIAE2AiQgACACNgIgIAAgAzYCHCAAIAU2AhgCQAJAIAAoAiQEQCAAKAIgDQELIAAoAhhBEkEAEBQgAEJ/NwMoDAELIAAoAiQpAwhCAFIEQCAAIAAoAiAQczYCFCAAIAAoAhQgACgCJCgCAHA2AhAgACAAKAIkKAIQIAAoAhBBAnRqKAIANgIMA0ACQCAAKAIMRQ0AIAAoAiAgACgCDCgCABBbBEAgACAAKAIMKAIYNgIMDAIFIAAoAhxBCHEEQCAAKAIMKQMIQn9SBEAgACAAKAIMKQMINwMoDAYLDAILIAAoAgwpAxBCf1IEQCAAIAAoAgwpAxA3AygMBQsLCwsLIAAoAhhBCUEAEBQgAEJ/NwMoCyAAKQMoIQYgAEEwaiQAIAQgBjcDKAsgBCkDKCEGIARBMGokACAGC9QDAQF/IwBBIGsiAyQAIAMgADYCGCADIAE2AhQgAyACNgIQAkACQCADKAIYBEAgAygCFA0BCyADKAIQQRJBABAUIANBADoAHwwBCyADKAIYKQMIQgBSBEAgAyADKAIUEHM2AgwgAyADKAIMIAMoAhgoAgBwNgIIIANBADYCACADIAMoAhgoAhAgAygCCEECdGooAgA2AgQDQCADKAIEBEACQCADKAIEKAIcIAMoAgxHDQAgAygCFCADKAIEKAIAEFsNAAJAIAMoAgQpAwhCf1EEQAJAIAMoAgAEQCADKAIAIAMoAgQoAhg2AhgMAQsgAygCGCgCECADKAIIQQJ0aiADKAIEKAIYNgIACyADKAIEEBUgAygCGCIAIAApAwhCAX03AwgCQCADKAIYIgApAwi6IAAoAgC4RHsUrkfheoQ/omNFDQAgAygCGCgCAEGAAk0NACADKAIYIAMoAhgoAgBBAXYgAygCEBBaQQFxRQRAIANBADoAHwwICwsMAQsgAygCBEJ/NwMQCyADQQE6AB8MBAsgAyADKAIENgIAIAMgAygCBCgCGDYCBAwBCwsLIAMoAhBBCUEAEBQgA0EAOgAfCyADLQAfQQFxIQAgA0EgaiQAIAAL3wIBAX8jAEEwayIDJAAgAyAANgIoIAMgATYCJCADIAI2AiACQCADKAIkIAMoAigoAgBGBEAgA0EBOgAvDAELIAMgAygCJEEEEH8iADYCHCAARQRAIAMoAiBBDkEAEBQgA0EAOgAvDAELIAMoAigpAwhCAFIEQCADQQA2AhgDQCADKAIYIAMoAigoAgBPRQRAIAMgAygCKCgCECADKAIYQQJ0aigCADYCFANAIAMoAhQEQCADIAMoAhQoAhg2AhAgAyADKAIUKAIcIAMoAiRwNgIMIAMoAhQgAygCHCADKAIMQQJ0aigCADYCGCADKAIcIAMoAgxBAnRqIAMoAhQ2AgAgAyADKAIQNgIUDAELCyADIAMoAhhBAWo2AhgMAQsLCyADKAIoKAIQEBUgAygCKCADKAIcNgIQIAMoAiggAygCJDYCACADQQE6AC8LIAMtAC9BAXEhACADQTBqJAAgAAtNAQJ/IAEtAAAhAgJAIAAtAAAiA0UNACACIANHDQADQCABLQABIQIgAC0AASIDRQ0BIAFBAWohASAAQQFqIQAgAiADRg0ACwsgAyACawvRCQECfyMAQSBrIgEkACABIAA2AhwgASABKAIcKAIsNgIQA0AgASABKAIcKAI8IAEoAhwoAnRrIAEoAhwoAmxrNgIUIAEoAhwoAmwgASgCECABKAIcKAIsQYYCa2pPBEAgASgCHCgCOCABKAIcKAI4IAEoAhBqIAEoAhAgASgCFGsQGRogASgCHCIAIAAoAnAgASgCEGs2AnAgASgCHCIAIAAoAmwgASgCEGs2AmwgASgCHCIAIAAoAlwgASgCEGs2AlwjAEEgayIAIAEoAhw2AhwgACAAKAIcKAIsNgIMIAAgACgCHCgCTDYCGCAAIAAoAhwoAkQgACgCGEEBdGo2AhADQCAAIAAoAhBBAmsiAjYCECAAIAIvAQA2AhQgACgCEAJ/IAAoAhQgACgCDE8EQCAAKAIUIAAoAgxrDAELQQALOwEAIAAgACgCGEEBayICNgIYIAINAAsgACAAKAIMNgIYIAAgACgCHCgCQCAAKAIYQQF0ajYCEANAIAAgACgCEEECayICNgIQIAAgAi8BADYCFCAAKAIQAn8gACgCFCAAKAIMTwRAIAAoAhQgACgCDGsMAQtBAAs7AQAgACAAKAIYQQFrIgI2AhggAg0ACyABIAEoAhAgASgCFGo2AhQLIAEoAhwoAgAoAgQEQCABIAEoAhwoAgAgASgCHCgCdCABKAIcKAI4IAEoAhwoAmxqaiABKAIUEHY2AhggASgCHCIAIAEoAhggACgCdGo2AnQgASgCHCgCdCABKAIcKAK0LWpBA08EQCABIAEoAhwoAmwgASgCHCgCtC1rNgIMIAEoAhwgASgCHCgCOCABKAIMai0AADYCSCABKAIcIAEoAhwoAlQgASgCHCgCOCABKAIMQQFqai0AACABKAIcKAJIIAEoAhwoAlh0c3E2AkgDQCABKAIcKAK0LQRAIAEoAhwgASgCHCgCVCABKAIcKAI4IAEoAgxBAmpqLQAAIAEoAhwoAkggASgCHCgCWHRzcTYCSCABKAIcKAJAIAEoAgwgASgCHCgCNHFBAXRqIAEoAhwoAkQgASgCHCgCSEEBdGovAQA7AQAgASgCHCgCRCABKAIcKAJIQQF0aiABKAIMOwEAIAEgASgCDEEBajYCDCABKAIcIgAgACgCtC1BAWs2ArQtIAEoAhwoAnQgASgCHCgCtC1qQQNPDQELCwsgASgCHCgCdEGGAkkEfyABKAIcKAIAKAIEQQBHBUEAC0EBcQ0BCwsgASgCHCgCwC0gASgCHCgCPEkEQCABIAEoAhwoAmwgASgCHCgCdGo2AggCQCABKAIcKALALSABKAIISQRAIAEgASgCHCgCPCABKAIIazYCBCABKAIEQYICSwRAIAFBggI2AgQLIAEoAhwoAjggASgCCGpBACABKAIEEDMgASgCHCABKAIIIAEoAgRqNgLALQwBCyABKAIcKALALSABKAIIQYICakkEQCABIAEoAghBggJqIAEoAhwoAsAtazYCBCABKAIEIAEoAhwoAjwgASgCHCgCwC1rSwRAIAEgASgCHCgCPCABKAIcKALALWs2AgQLIAEoAhwoAjggASgCHCgCwC1qQQAgASgCBBAzIAEoAhwiACABKAIEIAAoAsAtajYCwC0LCwsgAUEgaiQAC4YFAQF/IwBBIGsiBCQAIAQgADYCHCAEIAE2AhggBCACNgIUIAQgAzYCECAEQQM2AgwCQCAEKAIcKAK8LUEQIAQoAgxrSgRAIAQgBCgCEDYCCCAEKAIcIgAgAC8BuC0gBCgCCEH//wNxIAQoAhwoArwtdHI7AbgtIAQoAhwvAbgtQf8BcSEBIAQoAhwoAgghAiAEKAIcIgMoAhQhACADIABBAWo2AhQgACACaiABOgAAIAQoAhwvAbgtQQh2IQEgBCgCHCgCCCECIAQoAhwiAygCFCEAIAMgAEEBajYCFCAAIAJqIAE6AAAgBCgCHCAEKAIIQf//A3FBECAEKAIcKAK8LWt1OwG4LSAEKAIcIgAgACgCvC0gBCgCDEEQa2o2ArwtDAELIAQoAhwiACAALwG4LSAEKAIQQf//A3EgBCgCHCgCvC10cjsBuC0gBCgCHCIAIAQoAgwgACgCvC1qNgK8LQsgBCgCHBC9ASAEKAIUQf8BcSEBIAQoAhwoAgghAiAEKAIcIgMoAhQhACADIABBAWo2AhQgACACaiABOgAAIAQoAhRB//8DcUEIdiEBIAQoAhwoAgghAiAEKAIcIgMoAhQhACADIABBAWo2AhQgACACaiABOgAAIAQoAhRBf3NB/wFxIQEgBCgCHCgCCCECIAQoAhwiAygCFCEAIAMgAEEBajYCFCAAIAJqIAE6AAAgBCgCFEF/c0H//wNxQQh2IQEgBCgCHCgCCCECIAQoAhwiAygCFCEAIAMgAEEBajYCFCAAIAJqIAE6AAAgBCgCHCgCCCAEKAIcKAIUaiAEKAIYIAQoAhQQGRogBCgCHCIAIAQoAhQgACgCFGo2AhQgBEEgaiQAC6sBAQF/IwBBEGsiASQAIAEgADYCDCABKAIMKAIIBEAgASgCDCgCCBAbIAEoAgxBADYCCAsCQCABKAIMKAIERQ0AIAEoAgwoAgQoAgBBAXFFDQAgASgCDCgCBCgCEEF+Rw0AIAEoAgwoAgQiACAAKAIAQX5xNgIAIAEoAgwoAgQoAgBFBEAgASgCDCgCBBA3IAEoAgxBADYCBAsLIAEoAgxBADoADCABQRBqJAAL8QMBAX8jAEHQAGsiCCQAIAggADYCSCAIIAE3A0AgCCACNwM4IAggAzYCNCAIIAQ6ADMgCCAFNgIsIAggBjcDICAIIAc2AhwCQAJAAkAgCCgCSEUNACAIKQNAIAgpA0AgCCkDOHxWDQAgCCgCLA0BIAgpAyBQDQELIAgoAhxBEkEAEBQgCEEANgJMDAELIAhBgAEQGCIANgIYIABFBEAgCCgCHEEOQQAQFCAIQQA2AkwMAQsgCCgCGCAIKQNANwMAIAgoAhggCCkDQCAIKQM4fDcDCCAIKAIYQShqEDsgCCgCGCAILQAzOgBgIAgoAhggCCgCLDYCECAIKAIYIAgpAyA3AxgjAEEQayIAIAgoAhhB5ABqNgIMIAAoAgxBADYCACAAKAIMQQA2AgQgACgCDEEANgIIIwBBEGsiACAIKAJINgIMIAAoAgwpAxhC/4EBgyEBIAhBfzYCCCAIQQc2AgQgCEEONgIAQRAgCBA0IAGEIQEgCCgCGCABNwNwIAgoAhggCCgCGCkDcELAAINCAFI6AHggCCgCNARAIAgoAhhBKGogCCgCNCAIKAIcEIQBQQBIBEAgCCgCGBAVIAhBADYCTAwCCwsgCCAIKAJIQQEgCCgCGCAIKAIcEIEBNgJMCyAIKAJMIQAgCEHQAGokACAAC9MEAQJ/IwBBMGsiAyQAIAMgADYCJCADIAE3AxggAyACNgIUAkAgAygCJCgCQCADKQMYp0EEdGooAgBFBEAgAygCFEEUQQAQFCADQgA3AygMAQsgAyADKAIkKAJAIAMpAxinQQR0aigCACkDSDcDCCADKAIkKAIAIAMpAwhBABAnQQBIBEAgAygCFCADKAIkKAIAEBcgA0IANwMoDAELIAMoAiQoAgAhAiADKAIUIQQjAEEwayIAJAAgACACNgIoIABBgAI7ASYgACAENgIgIAAgAC8BJkGAAnFBAEc6ABsgAEEeQS4gAC0AG0EBcRs2AhwCQCAAKAIoQRpBHCAALQAbQQFxG6xBARAnQQBIBEAgACgCICAAKAIoEBcgAEF/NgIsDAELIAAgACgCKEEEQQYgAC0AG0EBcRusIABBDmogACgCIBBCIgI2AgggAkUEQCAAQX82AiwMAQsgAEEANgIUA0AgACgCFEECQQMgAC0AG0EBcRtIBEAgACAAKAIIEB1B//8DcSAAKAIcajYCHCAAIAAoAhRBAWo2AhQMAQsLIAAoAggQR0EBcUUEQCAAKAIgQRRBABAUIAAoAggQFiAAQX82AiwMAQsgACgCCBAWIAAgACgCHDYCLAsgACgCLCECIABBMGokACADIAIiADYCBCAAQQBIBEAgA0IANwMoDAELIAMpAwggAygCBK18Qv///////////wBWBEAgAygCFEEEQRYQFCADQgA3AygMAQsgAyADKQMIIAMoAgStfDcDKAsgAykDKCEBIANBMGokACABC20BAX8jAEEgayIEJAAgBCAANgIYIAQgATYCFCAEIAI2AhAgBCADNgIMAkAgBCgCGEUEQCAEQQA2AhwMAQsgBCAEKAIUIAQoAhAgBCgCDCAEKAIYQQhqEIEBNgIcCyAEKAIcIQAgBEEgaiQAIAALVQEBfyMAQRBrIgEkACABIAA2AgwCQAJAIAEoAgwoAiRBAUYNACABKAIMKAIkQQJGDQAMAQsgASgCDEEAQgBBChAgGiABKAIMQQA2AiQLIAFBEGokAAv/AgEBfyMAQTBrIgUkACAFIAA2AiggBSABNgIkIAUgAjYCICAFIAM6AB8gBSAENgIYAkACQCAFKAIgDQAgBS0AH0EBcQ0AIAVBADYCLAwBCyAFIAUoAiAgBS0AH0EBcWoQGDYCFCAFKAIURQRAIAUoAhhBDkEAEBQgBUEANgIsDAELAkAgBSgCKARAIAUgBSgCKCAFKAIgrRAeNgIQIAUoAhBFBEAgBSgCGEEOQQAQFCAFKAIUEBUgBUEANgIsDAMLIAUoAhQgBSgCECAFKAIgEBkaDAELIAUoAiQgBSgCFCAFKAIgrSAFKAIYEGRBAEgEQCAFKAIUEBUgBUEANgIsDAILCyAFLQAfQQFxBEAgBSgCFCAFKAIgakEAOgAAIAUgBSgCFDYCDANAIAUoAgwgBSgCFCAFKAIgakkEQCAFKAIMLQAARQRAIAUoAgxBIDoAAAsgBSAFKAIMQQFqNgIMDAELCwsgBSAFKAIUNgIsCyAFKAIsIQAgBUEwaiQAIAALwgEBAX8jAEEwayIEJAAgBCAANgIoIAQgATYCJCAEIAI3AxggBCADNgIUAkAgBCkDGEL///////////8AVgRAIAQoAhRBFEEAEBQgBEF/NgIsDAELIAQgBCgCKCAEKAIkIAQpAxgQKyICNwMIIAJCAFMEQCAEKAIUIAQoAigQFyAEQX82AiwMAQsgBCkDCCAEKQMYUwRAIAQoAhRBEUEAEBQgBEF/NgIsDAELIARBADYCLAsgBCgCLCEAIARBMGokACAAC3cBAX8jAEEQayICIAA2AgggAiABNgIEAkACQAJAIAIoAggpAyhC/////w9aDQAgAigCCCkDIEL/////D1oNACACKAIEQYAEcUUNASACKAIIKQNIQv////8PVA0BCyACQQE6AA8MAQsgAkEAOgAPCyACLQAPQQFxC/4BAQF/IwBBIGsiBSQAIAUgADYCGCAFIAE2AhQgBSACOwESIAVBADsBECAFIAM2AgwgBSAENgIIIAVBADYCBAJAA0AgBSgCGARAAkAgBSgCGC8BCCAFLwESRw0AIAUoAhgoAgQgBSgCDHFBgAZxRQ0AIAUoAgQgBS8BEEgEQCAFIAUoAgRBAWo2AgQMAQsgBSgCFARAIAUoAhQgBSgCGC8BCjsBAAsgBSgCGC8BCgRAIAUgBSgCGCgCDDYCHAwECyAFQZAVNgIcDAMLIAUgBSgCGCgCADYCGAwBCwsgBSgCCEEJQQAQFCAFQQA2AhwLIAUoAhwhACAFQSBqJAAgAAumAQEBfyMAQRBrIgIkACACIAA2AgggAiABNgIEAkAgAigCCC0AKEEBcQRAIAJBfzYCDAwBCyACKAIIKAIABEAgAigCCCgCACACKAIEEGdBAEgEQCACKAIIQQxqIAIoAggoAgAQFyACQX82AgwMAgsLIAIoAgggAkEEakIEQRMQIEIAUwRAIAJBfzYCDAwBCyACQQA2AgwLIAIoAgwhACACQRBqJAAgAAuNCAIBfwF+IwBBkAFrIgMkACADIAA2AoQBIAMgATYCgAEgAyACNgJ8IAMQUwJAIAMoAoABKQMIQgBSBEAgAyADKAKAASgCACgCACkDSDcDYCADIAMoAoABKAIAKAIAKQNINwNoDAELIANCADcDYCADQgA3A2gLIANCADcDcAJAA0AgAykDcCADKAKAASkDCFQEQCADKAKAASgCACADKQNwp0EEdGooAgApA0ggAykDaFQEQCADIAMoAoABKAIAIAMpA3CnQQR0aigCACkDSDcDaAsgAykDaCADKAKAASkDIFYEQCADKAJ8QRNBABAUIANCfzcDiAEMAwsgAyADKAKAASgCACADKQNwp0EEdGooAgApA0ggAygCgAEoAgAgAykDcKdBBHRqKAIAKQMgfCADKAKAASgCACADKQNwp0EEdGooAgAoAjAQUUH//wNxrXxCHnw3A1ggAykDWCADKQNgVgRAIAMgAykDWDcDYAsgAykDYCADKAKAASkDIFYEQCADKAJ8QRNBABAUIANCfzcDiAEMAwsgAygChAEoAgAgAygCgAEoAgAgAykDcKdBBHRqKAIAKQNIQQAQJ0EASARAIAMoAnwgAygChAEoAgAQFyADQn83A4gBDAMLIAMgAygChAEoAgBBAEEBIAMoAnwQjAFCf1EEQCADEFIgA0J/NwOIAQwDCwJ/IAMoAoABKAIAIAMpA3CnQQR0aigCACEBIwBBEGsiACQAIAAgATYCCCAAIAM2AgQCQAJAAkAgACgCCC8BCiAAKAIELwEKSA0AIAAoAggoAhAgACgCBCgCEEcNACAAKAIIKAIUIAAoAgQoAhRHDQAgACgCCCgCMCAAKAIEKAIwEIYBDQELIABBfzYCDAwBCwJAAkAgACgCCCgCGCAAKAIEKAIYRw0AIAAoAggpAyAgACgCBCkDIFINACAAKAIIKQMoIAAoAgQpAyhRDQELAkACQCAAKAIELwEMQQhxRQ0AIAAoAgQoAhgNACAAKAIEKQMgQgBSDQAgACgCBCkDKFANAQsgAEF/NgIMDAILCyAAQQA2AgwLIAAoAgwhASAAQRBqJAAgAQsEQCADKAJ8QRVBABAUIAMQUiADQn83A4gBDAMFIAMoAoABKAIAIAMpA3CnQQR0aigCACgCNCADKAI0EJUBIQAgAygCgAEoAgAgAykDcKdBBHRqKAIAIAA2AjQgAygCgAEoAgAgAykDcKdBBHRqKAIAQQE6AAQgA0EANgI0IAMQUiADIAMpA3BCAXw3A3AMAgsACwsgAwJ+IAMpA2AgAykDaH1C////////////AFQEQCADKQNgIAMpA2h9DAELQv///////////wALNwOIAQsgAykDiAEhBCADQZABaiQAIAQL1AQBAX8jAEEgayIDJAAgAyAANgIYIAMgATYCFCADIAI2AhAgAygCECEBIwBBEGsiACQAIAAgATYCCCAAQdgAEBg2AgQCQCAAKAIERQRAIAAoAghBDkEAEBQgAEEANgIMDAELIAAoAgghAiMAQRBrIgEkACABIAI2AgggAUEYEBgiAjYCBAJAIAJFBEAgASgCCEEOQQAQFCABQQA2AgwMAQsgASgCBEEANgIAIAEoAgRCADcDCCABKAIEQQA2AhAgASABKAIENgIMCyABKAIMIQIgAUEQaiQAIAAoAgQgAjYCUCACRQRAIAAoAgQQFSAAQQA2AgwMAQsgACgCBEEANgIAIAAoAgRBADYCBCMAQRBrIgEgACgCBEEIajYCDCABKAIMQQA2AgAgASgCDEEANgIEIAEoAgxBADYCCCAAKAIEQQA2AhggACgCBEEANgIUIAAoAgRBADYCHCAAKAIEQQA2AiQgACgCBEEANgIgIAAoAgRBADoAKCAAKAIEQgA3AzggACgCBEIANwMwIAAoAgRBADYCQCAAKAIEQQA2AkggACgCBEEANgJEIAAoAgRBADYCTCAAKAIEQQA2AlQgACAAKAIENgIMCyAAKAIMIQEgAEEQaiQAIAMgASIANgIMAkAgAEUEQCADQQA2AhwMAQsgAygCDCADKAIYNgIAIAMoAgwgAygCFDYCBCADKAIUQRBxBEAgAygCDCIAIAAoAhRBAnI2AhQgAygCDCIAIAAoAhhBAnI2AhgLIAMgAygCDDYCHAsgAygCHCEAIANBIGokACAAC9UBAQF/IwBBIGsiBCQAIAQgADYCGCAEIAE3AxAgBCACNgIMIAQgAzYCCAJAAkAgBCkDEEL///////////8AVwRAIAQpAxBCgICAgICAgICAf1kNAQsgBCgCCEEEQT0QFCAEQX82AhwMAQsCfyAEKQMQIQEgBCgCDCEAIAQoAhgiAigCTEF/TARAIAIgASAAEKABDAELIAIgASAAEKABC0EASARAIAQoAghBBEG0mwEoAgAQFCAEQX82AhwMAQsgBEEANgIcCyAEKAIcIQAgBEEgaiQAIAALJABBACAAEAUiACAAQRtGGyIABH9BtJsBIAA2AgBBAAVBAAsaC3ABAX8jAEEQayIDJAAgAwJ/IAFBwABxRQRAQQAgAUGAgIQCcUGAgIQCRw0BGgsgAyACQQRqNgIMIAIoAgALNgIAIAAgAUGAgAJyIAMQECIAQYFgTwRAQbSbAUEAIABrNgIAQX8hAAsgA0EQaiQAIAALMwEBfwJ/IAAQByIBQWFGBEAgABARIQELIAFBgWBPCwR/QbSbAUEAIAFrNgIAQX8FIAELC2kBAn8CQCAAKAIUIAAoAhxNDQAgAEEAQQAgACgCJBEBABogACgCFA0AQX8PCyAAKAIEIgEgACgCCCICSQRAIAAgASACa6xBASAAKAIoEQ8AGgsgAEEANgIcIABCADcDECAAQgA3AgRBAAvaAwEGfyMAQRBrIgUkACAFIAI2AgwjAEGgAWsiBCQAIARBCGpBkIcBQZABEBkaIAQgADYCNCAEIAA2AhwgBEF+IABrIgNB/////wcgA0H/////B0kbIgY2AjggBCAAIAZqIgA2AiQgBCAANgIYIARBCGohACMAQdABayIDJAAgAyACNgLMASADQaABakEAQSgQMyADIAMoAswBNgLIAQJAQQAgASADQcgBaiADQdAAaiADQaABahBwQQBIDQAgACgCTEEATiEHIAAoAgAhAiAALABKQQBMBEAgACACQV9xNgIACyACQSBxIQgCfyAAKAIwBEAgACABIANByAFqIANB0ABqIANBoAFqEHAMAQsgAEHQADYCMCAAIANB0ABqNgIQIAAgAzYCHCAAIAM2AhQgACgCLCECIAAgAzYCLCAAIAEgA0HIAWogA0HQAGogA0GgAWoQcCACRQ0AGiAAQQBBACAAKAIkEQEAGiAAQQA2AjAgACACNgIsIABBADYCHCAAQQA2AhAgACgCFBogAEEANgIUQQALGiAAIAAoAgAgCHI2AgAgB0UNAAsgA0HQAWokACAGBEAgBCgCHCIAIAAgBCgCGEZrQQA6AAALIARBoAFqJAAgBUEQaiQAC4wSAg9/AX4jAEHQAGsiBSQAIAUgATYCTCAFQTdqIRMgBUE4aiEQQQAhAQNAAkAgDUEASA0AQf////8HIA1rIAFIBEBBtJsBQT02AgBBfyENDAELIAEgDWohDQsgBSgCTCIHIQECQAJAAkACQAJAAkACQAJAIAUCfwJAIActAAAiBgRAA0ACQAJAIAZB/wFxIgZFBEAgASEGDAELIAZBJUcNASABIQYDQCABLQABQSVHDQEgBSABQQJqIgg2AkwgBkEBaiEGIAEtAAIhDiAIIQEgDkElRg0ACwsgBiAHayEBIAAEQCAAIAcgARAiCyABDQ0gBSgCTCEBIAUoAkwsAAFBMGtBCk8NAyABLQACQSRHDQMgASwAAUEwayEPQQEhESABQQNqDAQLIAUgAUEBaiIINgJMIAEtAAEhBiAIIQEMAAsACyANIQsgAA0IIBFFDQJBASEBA0AgBCABQQJ0aigCACIABEAgAyABQQN0aiAAIAIQqAFBASELIAFBAWoiAUEKRw0BDAoLC0EBIQsgAUEKTw0IA0AgBCABQQJ0aigCAA0IIAFBAWoiAUEKRw0ACwwIC0F/IQ8gAUEBagsiATYCTEEAIQgCQCABLAAAIgxBIGsiBkEfSw0AQQEgBnQiBkGJ0QRxRQ0AA0ACQCAFIAFBAWoiCDYCTCABLAABIgxBIGsiAUEgTw0AQQEgAXQiAUGJ0QRxRQ0AIAEgBnIhBiAIIQEMAQsLIAghASAGIQgLAkAgDEEqRgRAIAUCfwJAIAEsAAFBMGtBCk8NACAFKAJMIgEtAAJBJEcNACABLAABQQJ0IARqQcABa0EKNgIAIAEsAAFBA3QgA2pBgANrKAIAIQpBASERIAFBA2oMAQsgEQ0IQQAhEUEAIQogAARAIAIgAigCACIBQQRqNgIAIAEoAgAhCgsgBSgCTEEBagsiATYCTCAKQX9KDQFBACAKayEKIAhBgMAAciEIDAELIAVBzABqEKcBIgpBAEgNBiAFKAJMIQELQX8hCQJAIAEtAABBLkcNACABLQABQSpGBEACQCABLAACQTBrQQpPDQAgBSgCTCIBLQADQSRHDQAgASwAAkECdCAEakHAAWtBCjYCACABLAACQQN0IANqQYADaygCACEJIAUgAUEEaiIBNgJMDAILIBENByAABH8gAiACKAIAIgFBBGo2AgAgASgCAAVBAAshCSAFIAUoAkxBAmoiATYCTAwBCyAFIAFBAWo2AkwgBUHMAGoQpwEhCSAFKAJMIQELQQAhBgNAIAYhEkF/IQsgASwAAEHBAGtBOUsNByAFIAFBAWoiDDYCTCABLAAAIQYgDCEBIAYgEkE6bGpB74IBai0AACIGQQFrQQhJDQALIAZBE0YNAiAGRQ0GIA9BAE4EQCAEIA9BAnRqIAY2AgAgBSADIA9BA3RqKQMANwNADAQLIAANAQtBACELDAULIAVBQGsgBiACEKgBIAUoAkwhDAwCCyAPQX9KDQMLQQAhASAARQ0ECyAIQf//e3EiDiAIIAhBgMAAcRshBkEAIQtBpAghDyAQIQgCQAJAAkACfwJAAkACQAJAAn8CQAJAAkACQAJAAkACQCAMQQFrLAAAIgFBX3EgASABQQ9xQQNGGyABIBIbIgFB2ABrDiEEEhISEhISEhIOEg8GDg4OEgYSEhISAgUDEhIJEgESEgQACwJAIAFBwQBrDgcOEgsSDg4OAAsgAUHTAEYNCQwRCyAFKQNAIRRBpAgMBQtBACEBAkACQAJAAkACQAJAAkAgEkH/AXEOCAABAgMEFwUGFwsgBSgCQCANNgIADBYLIAUoAkAgDTYCAAwVCyAFKAJAIA2sNwMADBQLIAUoAkAgDTsBAAwTCyAFKAJAIA06AAAMEgsgBSgCQCANNgIADBELIAUoAkAgDaw3AwAMEAsgCUEIIAlBCEsbIQkgBkEIciEGQfgAIQELIBAhByABQSBxIQ4gBSkDQCIUUEUEQANAIAdBAWsiByAUp0EPcUGAhwFqLQAAIA5yOgAAIBRCD1YhDCAUQgSIIRQgDA0ACwsgBSkDQFANAyAGQQhxRQ0DIAFBBHZBpAhqIQ9BAiELDAMLIBAhASAFKQNAIhRQRQRAA0AgAUEBayIBIBSnQQdxQTByOgAAIBRCB1YhByAUQgOIIRQgBw0ACwsgASEHIAZBCHFFDQIgCSAQIAdrIgFBAWogASAJSBshCQwCCyAFKQNAIhRCf1cEQCAFQgAgFH0iFDcDQEEBIQtBpAgMAQsgBkGAEHEEQEEBIQtBpQgMAQtBpghBpAggBkEBcSILGwshDyAUIBAQRCEHCyAGQf//e3EgBiAJQX9KGyEGAkAgBSkDQCIUQgBSDQAgCQ0AQQAhCSAQIQcMCgsgCSAUUCAQIAdraiIBIAEgCUgbIQkMCQsgBSgCQCIBQdgSIAEbIgdBACAJEKsBIgEgByAJaiABGyEIIA4hBiABIAdrIAkgARshCQwICyAJBEAgBSgCQAwCC0EAIQEgAEEgIApBACAGECYMAgsgBUEANgIMIAUgBSkDQD4CCCAFIAVBCGo2AkBBfyEJIAVBCGoLIQhBACEBAkADQCAIKAIAIgdFDQECQCAFQQRqIAcQqgEiB0EASCIODQAgByAJIAFrSw0AIAhBBGohCCAJIAEgB2oiAUsNAQwCCwtBfyELIA4NBQsgAEEgIAogASAGECYgAUUEQEEAIQEMAQtBACEIIAUoAkAhDANAIAwoAgAiB0UNASAFQQRqIAcQqgEiByAIaiIIIAFKDQEgACAFQQRqIAcQIiAMQQRqIQwgASAISw0ACwsgAEEgIAogASAGQYDAAHMQJiAKIAEgASAKSBshAQwFCyAAIAUrA0AgCiAJIAYgAUEXERkAIQEMBAsgBSAFKQNAPAA3QQEhCSATIQcgDiEGDAILQX8hCwsgBUHQAGokACALDwsgAEEgIAsgCCAHayIOIAkgCSAOSBsiDGoiCCAKIAggCkobIgEgCCAGECYgACAPIAsQIiAAQTAgASAIIAZBgIAEcxAmIABBMCAMIA5BABAmIAAgByAOECIgAEEgIAEgCCAGQYDAAHMQJgwACwALkAIBA38CQCABIAIoAhAiBAR/IAQFQQAhBAJ/IAIgAi0ASiIDQQFrIANyOgBKIAIoAgAiA0EIcQRAIAIgA0EgcjYCAEF/DAELIAJCADcCBCACIAIoAiwiAzYCHCACIAM2AhQgAiADIAIoAjBqNgIQQQALDQEgAigCEAsgAigCFCIFa0sEQCACIAAgASACKAIkEQEADwsCfyACLABLQX9KBEAgASEEA0AgASAEIgNFDQIaIAAgA0EBayIEai0AAEEKRw0ACyACIAAgAyACKAIkEQEAIgQgA0kNAiAAIANqIQAgAigCFCEFIAEgA2sMAQsgAQshBCAFIAAgBBAZGiACIAIoAhQgBGo2AhQgASEECyAEC0gCAX8BfiMAQRBrIgMkACADIAA2AgwgAyABNgIIIAMgAjYCBCADKAIMIAMoAgggAygCBCADKAIMQQhqEFghBCADQRBqJAAgBAt3AQF/IwBBEGsiASAANgIIIAFChSo3AwACQCABKAIIRQRAIAFBADYCDAwBCwNAIAEoAggtAAAEQCABIAEoAggtAACtIAEpAwBCIX58Qv////8PgzcDACABIAEoAghBAWo2AggMAQsLIAEgASkDAD4CDAsgASgCDAuHBQEBfyMAQTBrIgUkACAFIAA2AiggBSABNgIkIAUgAjcDGCAFIAM2AhQgBSAENgIQAkACQAJAIAUoAihFDQAgBSgCJEUNACAFKQMYQv///////////wBYDQELIAUoAhBBEkEAEBQgBUEAOgAvDAELIAUoAigoAgBFBEAgBSgCKEGAAiAFKAIQEFpBAXFFBEAgBUEAOgAvDAILCyAFIAUoAiQQczYCDCAFIAUoAgwgBSgCKCgCAHA2AgggBSAFKAIoKAIQIAUoAghBAnRqKAIANgIEA0ACQCAFKAIERQ0AAkAgBSgCBCgCHCAFKAIMRw0AIAUoAiQgBSgCBCgCABBbDQACQAJAIAUoAhRBCHEEQCAFKAIEKQMIQn9SDQELIAUoAgQpAxBCf1ENAQsgBSgCEEEKQQAQFCAFQQA6AC8MBAsMAQsgBSAFKAIEKAIYNgIEDAELCyAFKAIERQRAIAVBIBAYIgA2AgQgAEUEQCAFKAIQQQ5BABAUIAVBADoALwwCCyAFKAIEIAUoAiQ2AgAgBSgCBCAFKAIoKAIQIAUoAghBAnRqKAIANgIYIAUoAigoAhAgBSgCCEECdGogBSgCBDYCACAFKAIEIAUoAgw2AhwgBSgCBEJ/NwMIIAUoAigiACAAKQMIQgF8NwMIAkAgBSgCKCIAKQMIuiAAKAIAuEQAAAAAAADoP6JkRQ0AIAUoAigoAgBBgICAgHhPDQAgBSgCKCAFKAIoKAIAQQF0IAUoAhAQWkEBcUUEQCAFQQA6AC8MAwsLCyAFKAIUQQhxBEAgBSgCBCAFKQMYNwMICyAFKAIEIAUpAxg3AxAgBUEBOgAvCyAFLQAvQQFxIQAgBUEwaiQAIAAL1BEBAX8jAEGwAWsiBiQAIAYgADYCqAEgBiABNgKkASAGIAI2AqABIAYgAzYCnAEgBiAENgKYASAGIAU2ApQBIAZBADYCkAEDQCAGKAKQAUEPS0UEQCAGQSBqIAYoApABQQF0akEAOwEAIAYgBigCkAFBAWo2ApABDAELCyAGQQA2AowBA0AgBigCjAEgBigCoAFPRQRAIAZBIGogBigCpAEgBigCjAFBAXRqLwEAQQF0aiIAIAAvAQBBAWo7AQAgBiAGKAKMAUEBajYCjAEMAQsLIAYgBigCmAEoAgA2AoABIAZBDzYChAEDQAJAIAYoAoQBQQFJDQAgBkEgaiAGKAKEAUEBdGovAQANACAGIAYoAoQBQQFrNgKEAQwBCwsgBigCgAEgBigChAFLBEAgBiAGKAKEATYCgAELAkAgBigChAFFBEAgBkHAADoAWCAGQQE6AFkgBkEAOwFaIAYoApwBIgEoAgAhACABIABBBGo2AgAgACAGQdgAaigBADYBACAGKAKcASIBKAIAIQAgASAAQQRqNgIAIAAgBkHYAGooAQA2AQAgBigCmAFBATYCACAGQQA2AqwBDAELIAZBATYCiAEDQAJAIAYoAogBIAYoAoQBTw0AIAZBIGogBigCiAFBAXRqLwEADQAgBiAGKAKIAUEBajYCiAEMAQsLIAYoAoABIAYoAogBSQRAIAYgBigCiAE2AoABCyAGQQE2AnQgBkEBNgKQAQNAIAYoApABQQ9NBEAgBiAGKAJ0QQF0NgJ0IAYgBigCdCAGQSBqIAYoApABQQF0ai8BAGs2AnQgBigCdEEASARAIAZBfzYCrAEMAwUgBiAGKAKQAUEBajYCkAEMAgsACwsCQCAGKAJ0QQBMDQAgBigCqAEEQCAGKAKEAUEBRg0BCyAGQX82AqwBDAELIAZBADsBAiAGQQE2ApABA0AgBigCkAFBD09FBEAgBigCkAFBAWpBAXQgBmogBigCkAFBAXQgBmovAQAgBkEgaiAGKAKQAUEBdGovAQBqOwEAIAYgBigCkAFBAWo2ApABDAELCyAGQQA2AowBA0AgBigCjAEgBigCoAFJBEAgBigCpAEgBigCjAFBAXRqLwEABEAgBigClAEhASAGKAKkASAGKAKMASICQQF0ai8BAEEBdCAGaiIDLwEAIQAgAyAAQQFqOwEAIABB//8DcUEBdCABaiACOwEACyAGIAYoAowBQQFqNgKMAQwBCwsCQAJAAkACQCAGKAKoAQ4CAAECCyAGIAYoApQBIgA2AkwgBiAANgJQIAZBFDYCSAwCCyAGQYDwADYCUCAGQcDwADYCTCAGQYECNgJIDAELIAZBgPEANgJQIAZBwPEANgJMIAZBADYCSAsgBkEANgJsIAZBADYCjAEgBiAGKAKIATYCkAEgBiAGKAKcASgCADYCVCAGIAYoAoABNgJ8IAZBADYCeCAGQX82AmAgBkEBIAYoAoABdDYCcCAGIAYoAnBBAWs2AlwCQAJAIAYoAqgBQQFGBEAgBigCcEHUBksNAQsgBigCqAFBAkcNASAGKAJwQdAETQ0BCyAGQQE2AqwBDAELA0AgBiAGKAKQASAGKAJ4azoAWQJAIAYoAkggBigClAEgBigCjAFBAXRqLwEAQQFqSwRAIAZBADoAWCAGIAYoApQBIAYoAowBQQF0ai8BADsBWgwBCwJAIAYoApQBIAYoAowBQQF0ai8BACAGKAJITwRAIAYgBigCTCAGKAKUASAGKAKMAUEBdGovAQAgBigCSGtBAXRqLwEAOgBYIAYgBigCUCAGKAKUASAGKAKMAUEBdGovAQAgBigCSGtBAXRqLwEAOwFaDAELIAZB4AA6AFggBkEAOwFaCwsgBkEBIAYoApABIAYoAnhrdDYCaCAGQQEgBigCfHQ2AmQgBiAGKAJkNgKIAQNAIAYgBigCZCAGKAJoazYCZCAGKAJUIAYoAmQgBigCbCAGKAJ4dmpBAnRqIAZB2ABqKAEANgEAIAYoAmQNAAsgBkEBIAYoApABQQFrdDYCaANAIAYoAmwgBigCaHEEQCAGIAYoAmhBAXY2AmgMAQsLAkAgBigCaARAIAYgBigCbCAGKAJoQQFrcTYCbCAGIAYoAmggBigCbGo2AmwMAQsgBkEANgJsCyAGIAYoAowBQQFqNgKMASAGQSBqIAYoApABQQF0aiIBLwEAQQFrIQAgASAAOwEAAkAgAEH//wNxRQRAIAYoApABIAYoAoQBRg0BIAYgBigCpAEgBigClAEgBigCjAFBAXRqLwEAQQF0ai8BADYCkAELAkAgBigCkAEgBigCgAFNDQAgBigCYCAGKAJsIAYoAlxxRg0AIAYoAnhFBEAgBiAGKAKAATYCeAsgBiAGKAJUIAYoAogBQQJ0ajYCVCAGIAYoApABIAYoAnhrNgJ8IAZBASAGKAJ8dDYCdANAAkAgBigChAEgBigCfCAGKAJ4ak0NACAGIAYoAnQgBkEgaiAGKAJ8IAYoAnhqQQF0ai8BAGs2AnQgBigCdEEATA0AIAYgBigCfEEBajYCfCAGIAYoAnRBAXQ2AnQMAQsLIAYgBigCcEEBIAYoAnx0ajYCcAJAAkAgBigCqAFBAUYEQCAGKAJwQdQGSw0BCyAGKAKoAUECRw0BIAYoAnBB0ARNDQELIAZBATYCrAEMBAsgBiAGKAJsIAYoAlxxNgJgIAYoApwBKAIAIAYoAmBBAnRqIAYoAnw6AAAgBigCnAEoAgAgBigCYEECdGogBigCgAE6AAEgBigCnAEoAgAgBigCYEECdGogBigCVCAGKAKcASgCAGtBAnU7AQILDAELCyAGKAJsBEAgBkHAADoAWCAGIAYoApABIAYoAnhrOgBZIAZBADsBWiAGKAJUIAYoAmxBAnRqIAZB2ABqKAEANgEACyAGKAKcASIAIAAoAgAgBigCcEECdGo2AgAgBigCmAEgBigCgAE2AgAgBkEANgKsAQsgBigCrAEhACAGQbABaiQAIAALsQIBAX8jAEEgayIDJAAgAyAANgIYIAMgATYCFCADIAI2AhAgAyADKAIYKAIENgIMIAMoAgwgAygCEEsEQCADIAMoAhA2AgwLAkAgAygCDEUEQCADQQA2AhwMAQsgAygCGCIAIAAoAgQgAygCDGs2AgQgAygCFCADKAIYKAIAIAMoAgwQGRoCQCADKAIYKAIcKAIYQQFGBEAgAygCGCgCMCADKAIUIAMoAgwQPSEAIAMoAhggADYCMAwBCyADKAIYKAIcKAIYQQJGBEAgAygCGCgCMCADKAIUIAMoAgwQGiEAIAMoAhggADYCMAsLIAMoAhgiACADKAIMIAAoAgBqNgIAIAMoAhgiACADKAIMIAAoAghqNgIIIAMgAygCDDYCHAsgAygCHCEAIANBIGokACAACzYBAX8jAEEQayIBJAAgASAANgIMIAEoAgwQXiABKAIMKAIAEDcgASgCDCgCBBA3IAFBEGokAAvtAQEBfyMAQRBrIgEgADYCCAJAAkACQCABKAIIRQ0AIAEoAggoAiBFDQAgASgCCCgCJA0BCyABQQE2AgwMAQsgASABKAIIKAIcNgIEAkACQCABKAIERQ0AIAEoAgQoAgAgASgCCEcNACABKAIEKAIEQSpGDQEgASgCBCgCBEE5Rg0BIAEoAgQoAgRBxQBGDQEgASgCBCgCBEHJAEYNASABKAIEKAIEQdsARg0BIAEoAgQoAgRB5wBGDQEgASgCBCgCBEHxAEYNASABKAIEKAIEQZoFRg0BCyABQQE2AgwMAQsgAUEANgIMCyABKAIMC9IEAQF/IwBBIGsiAyAANgIcIAMgATYCGCADIAI2AhQgAyADKAIcQdwWaiADKAIUQQJ0aigCADYCECADIAMoAhRBAXQ2AgwDQAJAIAMoAgwgAygCHCgC0ChKDQACQCADKAIMIAMoAhwoAtAoTg0AIAMoAhggAygCHCADKAIMQQJ0akHgFmooAgBBAnRqLwEAIAMoAhggAygCHEHcFmogAygCDEECdGooAgBBAnRqLwEATgRAIAMoAhggAygCHCADKAIMQQJ0akHgFmooAgBBAnRqLwEAIAMoAhggAygCHEHcFmogAygCDEECdGooAgBBAnRqLwEARw0BIAMoAhwgAygCDEECdGpB4BZqKAIAIAMoAhxB2Chqai0AACADKAIcQdwWaiADKAIMQQJ0aigCACADKAIcQdgoamotAABKDQELIAMgAygCDEEBajYCDAsgAygCGCADKAIQQQJ0ai8BACADKAIYIAMoAhxB3BZqIAMoAgxBAnRqKAIAQQJ0ai8BAEgNAAJAIAMoAhggAygCEEECdGovAQAgAygCGCADKAIcQdwWaiADKAIMQQJ0aigCAEECdGovAQBHDQAgAygCECADKAIcQdgoamotAAAgAygCHEHcFmogAygCDEECdGooAgAgAygCHEHYKGpqLQAASg0ADAELIAMoAhxB3BZqIAMoAhRBAnRqIAMoAhxB3BZqIAMoAgxBAnRqKAIANgIAIAMgAygCDDYCFCADIAMoAgxBAXQ2AgwMAQsLIAMoAhxB3BZqIAMoAhRBAnRqIAMoAhA2AgAL1xMBA38jAEEwayICJAAgAiAANgIsIAIgATYCKCACIAIoAigoAgA2AiQgAiACKAIoKAIIKAIANgIgIAIgAigCKCgCCCgCDDYCHCACQX82AhAgAigCLEEANgLQKCACKAIsQb0ENgLUKCACQQA2AhgDQCACKAIYIAIoAhxIBEACQCACKAIkIAIoAhhBAnRqLwEABEAgAiACKAIYIgE2AhAgAigCLEHcFmohAyACKAIsIgQoAtAoQQFqIQAgBCAANgLQKCAAQQJ0IANqIAE2AgAgAigCGCACKAIsQdgoampBADoAAAwBCyACKAIkIAIoAhhBAnRqQQA7AQILIAIgAigCGEEBajYCGAwBCwsDQCACKAIsKALQKEECSARAAkAgAigCEEECSARAIAIgAigCEEEBaiIANgIQDAELQQAhAAsgAigCLEHcFmohAyACKAIsIgQoAtAoQQFqIQEgBCABNgLQKCABQQJ0IANqIAA2AgAgAiAANgIMIAIoAiQgAigCDEECdGpBATsBACACKAIMIAIoAixB2ChqakEAOgAAIAIoAiwiACAAKAKoLUEBazYCqC0gAigCIARAIAIoAiwiACAAKAKsLSACKAIgIAIoAgxBAnRqLwECazYCrC0LDAELCyACKAIoIAIoAhA2AgQgAiACKAIsKALQKEECbTYCGANAIAIoAhhBAU4EQCACKAIsIAIoAiQgAigCGBB5IAIgAigCGEEBazYCGAwBCwsgAiACKAIcNgIMA0AgAiACKAIsKALgFjYCGCACKAIsQdwWaiEBIAIoAiwiAygC0CghACADIABBAWs2AtAoIAIoAiwgAEECdCABaigCADYC4BYgAigCLCACKAIkQQEQeSACIAIoAiwoAuAWNgIUIAIoAhghASACKAIsQdwWaiEDIAIoAiwiBCgC1ChBAWshACAEIAA2AtQoIABBAnQgA2ogATYCACACKAIUIQEgAigCLEHcFmohAyACKAIsIgQoAtQoQQFrIQAgBCAANgLUKCAAQQJ0IANqIAE2AgAgAigCJCACKAIMQQJ0aiACKAIkIAIoAhhBAnRqLwEAIAIoAiQgAigCFEECdGovAQBqOwEAIAIoAgwgAigCLEHYKGpqAn8gAigCGCACKAIsQdgoamotAAAgAigCFCACKAIsQdgoamotAABOBEAgAigCGCACKAIsQdgoamotAAAMAQsgAigCFCACKAIsQdgoamotAAALQQFqOgAAIAIoAiQgAigCFEECdGogAigCDCIAOwECIAIoAiQgAigCGEECdGogADsBAiACIAIoAgwiAEEBajYCDCACKAIsIAA2AuAWIAIoAiwgAigCJEEBEHkgAigCLCgC0ChBAk4NAAsgAigCLCgC4BYhASACKAIsQdwWaiEDIAIoAiwiBCgC1ChBAWshACAEIAA2AtQoIABBAnQgA2ogATYCACACKAIoIQEjAEFAaiIAIAIoAiw2AjwgACABNgI4IAAgACgCOCgCADYCNCAAIAAoAjgoAgQ2AjAgACAAKAI4KAIIKAIANgIsIAAgACgCOCgCCCgCBDYCKCAAIAAoAjgoAggoAgg2AiQgACAAKAI4KAIIKAIQNgIgIABBADYCBCAAQQA2AhADQCAAKAIQQQ9MBEAgACgCPEG8FmogACgCEEEBdGpBADsBACAAIAAoAhBBAWo2AhAMAQsLIAAoAjQgACgCPEHcFmogACgCPCgC1ChBAnRqKAIAQQJ0akEAOwECIAAgACgCPCgC1ChBAWo2AhwDQCAAKAIcQb0ESARAIAAgACgCPEHcFmogACgCHEECdGooAgA2AhggACAAKAI0IAAoAjQgACgCGEECdGovAQJBAnRqLwECQQFqNgIQIAAoAhAgACgCIEoEQCAAIAAoAiA2AhAgACAAKAIEQQFqNgIECyAAKAI0IAAoAhhBAnRqIAAoAhA7AQIgACgCGCAAKAIwTARAIAAoAjwgACgCEEEBdGpBvBZqIgEgAS8BAEEBajsBACAAQQA2AgwgACgCGCAAKAIkTgRAIAAgACgCKCAAKAIYIAAoAiRrQQJ0aigCADYCDAsgACAAKAI0IAAoAhhBAnRqLwEAOwEKIAAoAjwiASABKAKoLSAALwEKIAAoAhAgACgCDGpsajYCqC0gACgCLARAIAAoAjwiASABKAKsLSAALwEKIAAoAiwgACgCGEECdGovAQIgACgCDGpsajYCrC0LCyAAIAAoAhxBAWo2AhwMAQsLAkAgACgCBEUNAANAIAAgACgCIEEBazYCEANAIAAoAjxBvBZqIAAoAhBBAXRqLwEARQRAIAAgACgCEEEBazYCEAwBCwsgACgCPCAAKAIQQQF0akG8FmoiASABLwEAQQFrOwEAIAAoAjwgACgCEEEBdGpBvhZqIgEgAS8BAEECajsBACAAKAI8IAAoAiBBAXRqQbwWaiIBIAEvAQBBAWs7AQAgACAAKAIEQQJrNgIEIAAoAgRBAEoNAAsgACAAKAIgNgIQA0AgACgCEEUNASAAIAAoAjxBvBZqIAAoAhBBAXRqLwEANgIYA0AgACgCGARAIAAoAjxB3BZqIQEgACAAKAIcQQFrIgM2AhwgACADQQJ0IAFqKAIANgIUIAAoAhQgACgCMEoNASAAKAI0IAAoAhRBAnRqLwECIAAoAhBHBEAgACgCPCIBIAEoAqgtIAAoAjQgACgCFEECdGovAQAgACgCECAAKAI0IAAoAhRBAnRqLwECa2xqNgKoLSAAKAI0IAAoAhRBAnRqIAAoAhA7AQILIAAgACgCGEEBazYCGAwBCwsgACAAKAIQQQFrNgIQDAALAAsgAigCJCEBIAIoAhAhAyACKAIsQbwWaiEEIwBBQGoiACQAIAAgATYCPCAAIAM2AjggACAENgI0IABBADYCDCAAQQE2AggDQCAAKAIIQQ9MBEAgACAAKAIMIAAoAjQgACgCCEEBa0EBdGovAQBqQQF0NgIMIABBEGogACgCCEEBdGogACgCDDsBACAAIAAoAghBAWo2AggMAQsLIABBADYCBANAIAAoAgQgACgCOEwEQCAAIAAoAjwgACgCBEECdGovAQI2AgAgACgCAARAIABBEGogACgCAEEBdGoiAS8BACEDIAEgA0EBajsBACAAKAIAIQQjAEEQayIBIAM2AgwgASAENgIIIAFBADYCBANAIAEgASgCBCABKAIMQQFxcjYCBCABIAEoAgxBAXY2AgwgASABKAIEQQF0NgIEIAEgASgCCEEBayIDNgIIIANBAEoNAAsgASgCBEEBdiEBIAAoAjwgACgCBEECdGogATsBAAsgACAAKAIEQQFqNgIEDAELCyAAQUBrJAAgAkEwaiQAC04BAX8jAEEQayICIAA7AQogAiABNgIEAkAgAi8BCkEBRgRAIAIoAgRBAUYEQCACQQA2AgwMAgsgAkEENgIMDAELIAJBADYCDAsgAigCDAvOAgEBfyMAQTBrIgUkACAFIAA2AiwgBSABNgIoIAUgAjYCJCAFIAM3AxggBSAENgIUIAVCADcDCANAIAUpAwggBSkDGFQEQCAFIAUoAiQgBSkDCKdqLQAAOgAHIAUoAhRFBEAgBSAFKAIsKAIUQQJyOwESIAUgBS8BEiAFLwESQQFzbEEIdjsBEiAFIAUtAAcgBS8BEkH/AXFzOgAHCyAFKAIoBEAgBSgCKCAFKQMIp2ogBS0ABzoAAAsgBSgCLCgCDEF/cyAFQQdqQQEQGkF/cyEAIAUoAiwgADYCDCAFKAIsIAUoAiwoAhAgBSgCLCgCDEH/AXFqQYWIosAAbEEBajYCECAFIAUoAiwoAhBBGHY6AAcgBSgCLCgCFEF/cyAFQQdqQQEQGkF/cyEAIAUoAiwgADYCFCAFIAUpAwhCAXw3AwgMAQsLIAVBMGokAAttAQF/IwBBIGsiBCQAIAQgADYCGCAEIAE2AhQgBCACNwMIIAQgAzYCBAJAIAQoAhhFBEAgBEEANgIcDAELIAQgBCgCFCAEKQMIIAQoAgQgBCgCGEEIahDEATYCHAsgBCgCHCEAIARBIGokACAAC6cDAQF/IwBBIGsiBCQAIAQgADYCGCAEIAE3AxAgBCACNgIMIAQgAzYCCCAEIAQoAhggBCkDECAEKAIMQQAQPyIANgIAAkAgAEUEQCAEQX82AhwMAQsgBCAEKAIYIAQpAxAgBCgCDBDFASIANgIEIABFBEAgBEF/NgIcDAELAkACQCAEKAIMQQhxDQAgBCgCGCgCQCAEKQMQp0EEdGooAghFDQAgBCgCGCgCQCAEKQMQp0EEdGooAgggBCgCCBA5QQBIBEAgBCgCGEEIakEPQQAQFCAEQX82AhwMAwsMAQsgBCgCCBA7IAQoAgggBCgCACgCGDYCLCAEKAIIIAQoAgApAyg3AxggBCgCCCAEKAIAKAIUNgIoIAQoAgggBCgCACkDIDcDICAEKAIIIAQoAgAoAhA7ATAgBCgCCCAEKAIALwFSOwEyIAQoAghBIEEAIAQoAgAtAAZBAXEbQdwBcq03AwALIAQoAgggBCkDEDcDECAEKAIIIAQoAgQ2AgggBCgCCCIAIAApAwBCA4Q3AwAgBEEANgIcCyAEKAIcIQAgBEEgaiQAIAALWQIBfwF+AkACf0EAIABFDQAaIACtIAGtfiIDpyICIAAgAXJBgIAESQ0AGkF/IAIgA0IgiKcbCyICEBgiAEUNACAAQQRrLQAAQQNxRQ0AIABBACACEDMLIAALAwABC+oBAgF/AX4jAEEgayIEJAAgBCAANgIYIAQgATYCFCAEIAI2AhAgBCADNgIMIAQgBCgCDBCCASIANgIIAkAgAEUEQCAEQQA2AhwMAQsjAEEQayIAIAQoAhg2AgwgACgCDCIAIAAoAjBBAWo2AjAgBCgCCCAEKAIYNgIAIAQoAgggBCgCFDYCBCAEKAIIIAQoAhA2AgggBCgCGCAEKAIQQQBCAEEOIAQoAhQRCgAhBSAEKAIIIAU3AxggBCgCCCkDGEIAUwRAIAQoAghCPzcDGAsgBCAEKAIINgIcCyAEKAIcIQAgBEEgaiQAIAAL6gEBAX8jAEEQayIBJAAgASAANgIIIAFBOBAYIgA2AgQCQCAARQRAIAEoAghBDkEAEBQgAUEANgIMDAELIAEoAgRBADYCACABKAIEQQA2AgQgASgCBEEANgIIIAEoAgRBADYCICABKAIEQQA2AiQgASgCBEEAOgAoIAEoAgRBADYCLCABKAIEQQE2AjAjAEEQayIAIAEoAgRBDGo2AgwgACgCDEEANgIAIAAoAgxBADYCBCAAKAIMQQA2AgggASgCBEEAOgA0IAEoAgRBADoANSABIAEoAgQ2AgwLIAEoAgwhACABQRBqJAAgAAuwAQIBfwF+IwBBIGsiAyQAIAMgADYCGCADIAE2AhQgAyACNgIQIAMgAygCEBCCASIANgIMAkAgAEUEQCADQQA2AhwMAQsgAygCDCADKAIYNgIEIAMoAgwgAygCFDYCCCADKAIUQQBCAEEOIAMoAhgRDgAhBCADKAIMIAQ3AxggAygCDCkDGEIAUwRAIAMoAgxCPzcDGAsgAyADKAIMNgIcCyADKAIcIQAgA0EgaiQAIAALwwIBAX8jAEEQayIDIAA2AgwgAyABNgIIIAMgAjYCBCADKAIIKQMAQgKDQgBSBEAgAygCDCADKAIIKQMQNwMQCyADKAIIKQMAQgSDQgBSBEAgAygCDCADKAIIKQMYNwMYCyADKAIIKQMAQgiDQgBSBEAgAygCDCADKAIIKQMgNwMgCyADKAIIKQMAQhCDQgBSBEAgAygCDCADKAIIKAIoNgIoCyADKAIIKQMAQiCDQgBSBEAgAygCDCADKAIIKAIsNgIsCyADKAIIKQMAQsAAg0IAUgRAIAMoAgwgAygCCC8BMDsBMAsgAygCCCkDAEKAAYNCAFIEQCADKAIMIAMoAggvATI7ATILIAMoAggpAwBCgAKDQgBSBEAgAygCDCADKAIIKAI0NgI0CyADKAIMIgAgAygCCCkDACAAKQMAhDcDAEEAC10BAX8jAEEQayICJAAgAiAANgIIIAIgATYCBAJAIAIoAgRFBEAgAkEANgIMDAELIAIgAigCCCACKAIEKAIAIAIoAgQvAQStEDY2AgwLIAIoAgwhACACQRBqJAAgAAuPAQEBfyMAQRBrIgIkACACIAA2AgggAiABNgIEAkACQCACKAIIBEAgAigCBA0BCyACIAIoAgggAigCBEY2AgwMAQsgAigCCC8BBCACKAIELwEERwRAIAJBADYCDAwBCyACIAIoAggoAgAgAigCBCgCACACKAIILwEEEE9FNgIMCyACKAIMIQAgAkEQaiQAIAALVQEBfyMAQRBrIgEkACABIAA2AgwgAUEAQQBBABAaNgIIIAEoAgwEQCABIAEoAgggASgCDCgCACABKAIMLwEEEBo2AggLIAEoAgghACABQRBqJAAgAAufAgEBfyMAQUBqIgUkACAFIAA3AzAgBSABNwMoIAUgAjYCJCAFIAM3AxggBSAENgIUIAUCfyAFKQMYQhBUBEAgBSgCFEESQQAQFEEADAELIAUoAiQLNgIEAkAgBSgCBEUEQCAFQn83AzgMAQsCQAJAAkACQAJAIAUoAgQoAggOAwIAAQMLIAUgBSkDMCAFKAIEKQMAfDcDCAwDCyAFIAUpAyggBSgCBCkDAHw3AwgMAgsgBSAFKAIEKQMANwMIDAELIAUoAhRBEkEAEBQgBUJ/NwM4DAELAkAgBSkDCEIAWQRAIAUpAwggBSkDKFgNAQsgBSgCFEESQQAQFCAFQn83AzgMAQsgBSAFKQMINwM4CyAFKQM4IQAgBUFAayQAIAALoAEBAX8jAEEgayIFJAAgBSAANgIYIAUgATYCFCAFIAI7ARIgBSADOgARIAUgBDYCDCAFIAUoAhggBSgCFCAFLwESIAUtABFBAXEgBSgCDBBjIgA2AggCQCAARQRAIAVBADYCHAwBCyAFIAUoAgggBS8BEkEAIAUoAgwQUDYCBCAFKAIIEBUgBSAFKAIENgIcCyAFKAIcIQAgBUEgaiQAIAALpgEBAX8jAEEgayIFJAAgBSAANgIYIAUgATcDECAFIAI2AgwgBSADNgIIIAUgBDYCBCAFIAUoAhggBSkDECAFKAIMQQAQPyIANgIAAkAgAEUEQCAFQX82AhwMAQsgBSgCCARAIAUoAgggBSgCAC8BCEEIdjoAAAsgBSgCBARAIAUoAgQgBSgCACgCRDYCAAsgBUEANgIcCyAFKAIcIQAgBUEgaiQAIAALjQIBAX8jAEEwayIDJAAgAyAANgIoIAMgATsBJiADIAI2AiAgAyADKAIoKAI0IANBHmogAy8BJkGABkEAEGY2AhACQCADKAIQRQ0AIAMvAR5BBUkNAAJAIAMoAhAtAABBAUYNAAwBCyADIAMoAhAgAy8BHq0QKSIANgIUIABFBEAMAQsgAygCFBCXARogAyADKAIUECo2AhggAygCIBCHASADKAIYRgRAIAMgAygCFBAwPQEOIAMgAygCFCADLwEOrRAeIAMvAQ5BgBBBABBQNgIIIAMoAggEQCADKAIgECQgAyADKAIINgIgCwsgAygCFBAWCyADIAMoAiA2AiwgAygCLCEAIANBMGokACAAC9oXAgF/AX4jAEGAAWsiBSQAIAUgADYCdCAFIAE2AnAgBSACNgJsIAUgAzoAayAFIAQ2AmQgBSAFKAJsQQBHOgAdIAVBHkEuIAUtAGtBAXEbNgIoAkACQCAFKAJsBEAgBSgCbBAwIAUoAiitVARAIAUoAmRBE0EAEBQgBUJ/NwN4DAMLDAELIAUgBSgCcCAFKAIorSAFQTBqIAUoAmQQQiIANgJsIABFBEAgBUJ/NwN4DAILCyAFKAJsQgQQHiEAQfESQfYSIAUtAGtBAXEbKAAAIAAoAABHBEAgBSgCZEETQQAQFCAFLQAdQQFxRQRAIAUoAmwQFgsgBUJ/NwN4DAELIAUoAnQQUwJAIAUtAGtBAXFFBEAgBSgCbBAdIQAgBSgCdCAAOwEIDAELIAUoAnRBADsBCAsgBSgCbBAdIQAgBSgCdCAAOwEKIAUoAmwQHSEAIAUoAnQgADsBDCAFKAJsEB1B//8DcSEAIAUoAnQgADYCECAFIAUoAmwQHTsBLiAFIAUoAmwQHTsBLCAFLwEuIQEgBS8BLCECIwBBMGsiACQAIAAgATsBLiAAIAI7ASwgAEIANwIAIABBADYCKCAAQgA3AiAgAEIANwIYIABCADcCECAAQgA3AgggAEEANgIgIAAgAC8BLEEJdkHQAGo2AhQgACAALwEsQQV2QQ9xQQFrNgIQIAAgAC8BLEEfcTYCDCAAIAAvAS5BC3Y2AgggACAALwEuQQV2QT9xNgIEIAAgAC8BLkEBdEE+cTYCACAAEBMhASAAQTBqJAAgASEAIAUoAnQgADYCFCAFKAJsECohACAFKAJ0IAA2AhggBSgCbBAqrSEGIAUoAnQgBjcDICAFKAJsECqtIQYgBSgCdCAGNwMoIAUgBSgCbBAdOwEiIAUgBSgCbBAdOwEeAkAgBS0Aa0EBcQRAIAVBADsBICAFKAJ0QQA2AjwgBSgCdEEAOwFAIAUoAnRBADYCRCAFKAJ0QgA3A0gMAQsgBSAFKAJsEB07ASAgBSgCbBAdQf//A3EhACAFKAJ0IAA2AjwgBSgCbBAdIQAgBSgCdCAAOwFAIAUoAmwQKiEAIAUoAnQgADYCRCAFKAJsECqtIQYgBSgCdCAGNwNICwJ/IwBBEGsiACAFKAJsNgIMIAAoAgwtAABBAXFFCwRAIAUoAmRBFEEAEBQgBS0AHUEBcUUEQCAFKAJsEBYLIAVCfzcDeAwBCwJAIAUoAnQvAQxBAXEEQCAFKAJ0LwEMQcAAcQRAIAUoAnRB//8DOwFSDAILIAUoAnRBATsBUgwBCyAFKAJ0QQA7AVILIAUoAnRBADYCMCAFKAJ0QQA2AjQgBSgCdEEANgI4IAUgBS8BICAFLwEiIAUvAR5qajYCJAJAIAUtAB1BAXEEQCAFKAJsEDAgBSgCJK1UBEAgBSgCZEEVQQAQFCAFQn83A3gMAwsMAQsgBSgCbBAWIAUgBSgCcCAFKAIkrUEAIAUoAmQQQiIANgJsIABFBEAgBUJ/NwN4DAILCyAFLwEiBEAgBSgCbCAFKAJwIAUvASJBASAFKAJkEIkBIQAgBSgCdCAANgIwIAUoAnQoAjBFBEACfyMAQRBrIgAgBSgCZDYCDCAAKAIMKAIAQRFGCwRAIAUoAmRBFUEAEBQLIAUtAB1BAXFFBEAgBSgCbBAWCyAFQn83A3gMAgsgBSgCdC8BDEGAEHEEQCAFKAJ0KAIwQQIQOkEFRgRAIAUoAmRBFUEAEBQgBS0AHUEBcUUEQCAFKAJsEBYLIAVCfzcDeAwDCwsLIAUvAR4EQCAFIAUoAmwgBSgCcCAFLwEeQQAgBSgCZBBjNgIYIAUoAhhFBEAgBS0AHUEBcUUEQCAFKAJsEBYLIAVCfzcDeAwCCyAFKAIYIAUvAR5BgAJBgAQgBS0Aa0EBcRsgBSgCdEE0aiAFKAJkEJQBQQFxRQRAIAUoAhgQFSAFLQAdQQFxRQRAIAUoAmwQFgsgBUJ/NwN4DAILIAUoAhgQFSAFLQBrQQFxBEAgBSgCdEEBOgAECwsgBS8BIARAIAUoAmwgBSgCcCAFLwEgQQAgBSgCZBCJASEAIAUoAnQgADYCOCAFKAJ0KAI4RQRAIAUtAB1BAXFFBEAgBSgCbBAWCyAFQn83A3gMAgsgBSgCdC8BDEGAEHEEQCAFKAJ0KAI4QQIQOkEFRgRAIAUoAmRBFUEAEBQgBS0AHUEBcUUEQCAFKAJsEBYLIAVCfzcDeAwDCwsLIAUoAnRB9eABIAUoAnQoAjAQiwEhACAFKAJ0IAA2AjAgBSgCdEH1xgEgBSgCdCgCOBCLASEAIAUoAnQgADYCOAJAAkAgBSgCdCkDKEL/////D1ENACAFKAJ0KQMgQv////8PUQ0AIAUoAnQpA0hC/////w9SDQELIAUgBSgCdCgCNCAFQRZqQQFBgAJBgAQgBS0Aa0EBcRsgBSgCZBBmNgIMIAUoAgxFBEAgBS0AHUEBcUUEQCAFKAJsEBYLIAVCfzcDeAwCCyAFIAUoAgwgBS8BFq0QKSIANgIQIABFBEAgBSgCZEEOQQAQFCAFLQAdQQFxRQRAIAUoAmwQFgsgBUJ/NwN4DAILAkAgBSgCdCkDKEL/////D1EEQCAFKAIQEDEhBiAFKAJ0IAY3AygMAQsgBS0Aa0EBcQRAIAUoAhAhASMAQSBrIgAkACAAIAE2AhggAEIINwMQIAAgACgCGCkDECAAKQMQfDcDCAJAIAApAwggACgCGCkDEFQEQCAAKAIYQQA6AAAgAEF/NgIcDAELIAAgACgCGCAAKQMIECw2AhwLIAAoAhwaIABBIGokAAsLIAUoAnQpAyBC/////w9RBEAgBSgCEBAxIQYgBSgCdCAGNwMgCyAFLQBrQQFxRQRAIAUoAnQpA0hC/////w9RBEAgBSgCEBAxIQYgBSgCdCAGNwNICyAFKAJ0KAI8Qf//A0YEQCAFKAIQECohACAFKAJ0IAA2AjwLCyAFKAIQEEdBAXFFBEAgBSgCZEEVQQAQFCAFKAIQEBYgBS0AHUEBcUUEQCAFKAJsEBYLIAVCfzcDeAwCCyAFKAIQEBYLAn8jAEEQayIAIAUoAmw2AgwgACgCDC0AAEEBcUULBEAgBSgCZEEUQQAQFCAFLQAdQQFxRQRAIAUoAmwQFgsgBUJ/NwN4DAELIAUtAB1BAXFFBEAgBSgCbBAWCyAFKAJ0KQNIQv///////////wBWBEAgBSgCZEEEQRYQFCAFQn83A3gMAQsCfyAFKAJ0IQEgBSgCZCECIwBBIGsiACQAIAAgATYCGCAAIAI2AhQCQCAAKAIYKAIQQeMARwRAIABBAToAHwwBCyAAIAAoAhgoAjQgAEESakGBsgJBgAZBABBmNgIIAkAgACgCCARAIAAvARJBB08NAQsgACgCFEEVQQAQFCAAQQA6AB8MAQsgACAAKAIIIAAvARKtECkiATYCDCABRQRAIAAoAhRBFEEAEBQgAEEAOgAfDAELIABBAToABwJAAkACQCAAKAIMEB1BAWsOAgIAAQsgACgCGCkDKEIUVARAIABBADoABwsMAQsgACgCFEEYQQAQFCAAKAIMEBYgAEEAOgAfDAELIAAoAgxCAhAeLwAAQcGKAUcEQCAAKAIUQRhBABAUIAAoAgwQFiAAQQA6AB8MAQsCQAJAAkACQAJAIAAoAgwQlwFBAWsOAwABAgMLIABBgQI7AQQMAwsgAEGCAjsBBAwCCyAAQYMCOwEEDAELIAAoAhRBGEEAEBQgACgCDBAWIABBADoAHwwBCyAALwESQQdHBEAgACgCFEEVQQAQFCAAKAIMEBYgAEEAOgAfDAELIAAoAhggAC0AB0EBcToABiAAKAIYIAAvAQQ7AVIgACgCDBAdQf//A3EhASAAKAIYIAE2AhAgACgCDBAWIABBAToAHwsgAC0AH0EBcSEBIABBIGokACABQQFxRQsEQCAFQn83A3gMAQsgBSgCdCgCNBCTASEAIAUoAnQgADYCNCAFIAUoAiggBSgCJGqtNwN4CyAFKQN4IQYgBUGAAWokACAGC80BAQF/IwBBEGsiAyQAIAMgADYCDCADIAE2AgggAyACNgIEIAMgA0EMakG4mwEQEjYCAAJAIAMoAgBFBEAgAygCBEEhOwEAIAMoAghBADsBAAwBCyADKAIAKAIUQdAASARAIAMoAgBB0AA2AhQLIAMoAgQgAygCACgCDCADKAIAKAIUQQl0IAMoAgAoAhBBBXRqQeC/AmtqOwEAIAMoAgggAygCACgCCEELdCADKAIAKAIEQQV0aiADKAIAKAIAQQF1ajsBAAsgA0EQaiQAC4MDAQF/IwBBIGsiAyQAIAMgADsBGiADIAE2AhQgAyACNgIQIAMgAygCFCADQQhqQcAAQQAQRiIANgIMAkAgAEUEQCADQQA2AhwMAQsgAygCCEEFakH//wNLBEAgAygCEEESQQAQFCADQQA2AhwMAQsgA0EAIAMoAghBBWqtECkiADYCBCAARQRAIAMoAhBBDkEAEBQgA0EANgIcDAELIAMoAgRBARCWASADKAIEIAMoAhQQhwEQISADKAIEIAMoAgwgAygCCBBBAn8jAEEQayIAIAMoAgQ2AgwgACgCDC0AAEEBcUULBEAgAygCEEEUQQAQFCADKAIEEBYgA0EANgIcDAELIAMgAy8BGgJ/IwBBEGsiACADKAIENgIMAn4gACgCDC0AAEEBcQRAIAAoAgwpAxAMAQtCAAunQf//A3ELAn8jAEEQayIAIAMoAgQ2AgwgACgCDCgCBAtBgAYQVTYCACADKAIEEBYgAyADKAIANgIcCyADKAIcIQAgA0EgaiQAIAALtAIBAX8jAEEwayIDJAAgAyAANgIoIAMgATcDICADIAI2AhwCQCADKQMgUARAIANBAToALwwBCyADIAMoAigpAxAgAykDIHw3AwgCQCADKQMIIAMpAyBaBEAgAykDCEL/////AFgNAQsgAygCHEEOQQAQFCADQQA6AC8MAQsgAyADKAIoKAIAIAMpAwinQQR0EE4iADYCBCAARQRAIAMoAhxBDkEAEBQgA0EAOgAvDAELIAMoAiggAygCBDYCACADIAMoAigpAwg3AxADQCADKQMQIAMpAwhaRQRAIAMoAigoAgAgAykDEKdBBHRqELUBIAMgAykDEEIBfDcDEAwBCwsgAygCKCADKQMIIgE3AxAgAygCKCABNwMIIANBAToALwsgAy0AL0EBcSEAIANBMGokACAAC8wBAQF/IwBBIGsiAiQAIAIgADcDECACIAE2AgwgAkEwEBgiATYCCAJAIAFFBEAgAigCDEEOQQAQFCACQQA2AhwMAQsgAigCCEEANgIAIAIoAghCADcDECACKAIIQgA3AwggAigCCEIANwMgIAIoAghCADcDGCACKAIIQQA2AiggAigCCEEAOgAsIAIoAgggAikDECACKAIMEI8BQQFxRQRAIAIoAggQJSACQQA2AhwMAQsgAiACKAIINgIcCyACKAIcIQEgAkEgaiQAIAEL1gIBAX8jAEEgayIDJAAgAyAANgIYIAMgATYCFCADIAI2AhAgAyADQQxqQgQQKTYCCAJAIAMoAghFBEAgA0F/NgIcDAELA0AgAygCFARAIAMoAhQoAgQgAygCEHFBgAZxBEAgAygCCEIAECwaIAMoAgggAygCFC8BCBAfIAMoAgggAygCFC8BChAfAn8jAEEQayIAIAMoAgg2AgwgACgCDC0AAEEBcUULBEAgAygCGEEIakEUQQAQFCADKAIIEBYgA0F/NgIcDAQLIAMoAhggA0EMakIEEDZBAEgEQCADKAIIEBYgA0F/NgIcDAQLIAMoAhQvAQoEQCADKAIYIAMoAhQoAgwgAygCFC8BCq0QNkEASARAIAMoAggQFiADQX82AhwMBQsLCyADIAMoAhQoAgA2AhQMAQsLIAMoAggQFiADQQA2AhwLIAMoAhwhACADQSBqJAAgAAtoAQF/IwBBEGsiAiAANgIMIAIgATYCCCACQQA7AQYDQCACKAIMBEAgAigCDCgCBCACKAIIcUGABnEEQCACIAIoAgwvAQogAi8BBkEEamo7AQYLIAIgAigCDCgCADYCDAwBCwsgAi8BBgvwAQEBfyMAQRBrIgEkACABIAA2AgwgASABKAIMNgIIIAFBADYCBANAIAEoAgwEQAJAAkAgASgCDC8BCEH1xgFGDQAgASgCDC8BCEH14AFGDQAgASgCDC8BCEGBsgJGDQAgASgCDC8BCEEBRw0BCyABIAEoAgwoAgA2AgAgASgCCCABKAIMRgRAIAEgASgCADYCCAsgASgCDEEANgIAIAEoAgwQIyABKAIEBEAgASgCBCABKAIANgIACyABIAEoAgA2AgwMAgsgASABKAIMNgIEIAEgASgCDCgCADYCDAwBCwsgASgCCCEAIAFBEGokACAAC7IEAQF/IwBBQGoiBSQAIAUgADYCOCAFIAE7ATYgBSACNgIwIAUgAzYCLCAFIAQ2AiggBSAFKAI4IAUvATatECkiADYCJAJAIABFBEAgBSgCKEEOQQAQFCAFQQA6AD8MAQsgBUEANgIgIAVBADYCGANAAn8jAEEQayIAIAUoAiQ2AgwgACgCDC0AAEEBcQsEfyAFKAIkEDBCBFoFQQALQQFxBEAgBSAFKAIkEB07ARYgBSAFKAIkEB07ARQgBSAFKAIkIAUvARStEB42AhAgBSgCEEUEQCAFKAIoQRVBABAUIAUoAiQQFiAFKAIYECMgBUEAOgA/DAMLIAUgBS8BFiAFLwEUIAUoAhAgBSgCMBBVIgA2AhwgAEUEQCAFKAIoQQ5BABAUIAUoAiQQFiAFKAIYECMgBUEAOgA/DAMLAkAgBSgCGARAIAUoAiAgBSgCHDYCACAFIAUoAhw2AiAMAQsgBSAFKAIcIgA2AiAgBSAANgIYCwwBCwsgBSgCJBBHQQFxRQRAIAUgBSgCJBAwPgIMIAUgBSgCJCAFKAIMrRAeNgIIAkACQCAFKAIMQQRPDQAgBSgCCEUNACAFKAIIQZEVIAUoAgwQT0UNAQsgBSgCKEEVQQAQFCAFKAIkEBYgBSgCGBAjIAVBADoAPwwCCwsgBSgCJBAWAkAgBSgCLARAIAUoAiwgBSgCGDYCAAwBCyAFKAIYECMLIAVBAToAPwsgBS0AP0EBcSEAIAVBQGskACAAC+8CAQF/IwBBIGsiAiQAIAIgADYCGCACIAE2AhQCQCACKAIYRQRAIAIgAigCFDYCHAwBCyACIAIoAhg2AggDQCACKAIIKAIABEAgAiACKAIIKAIANgIIDAELCwNAIAIoAhQEQCACIAIoAhQoAgA2AhAgAkEANgIEIAIgAigCGDYCDANAAkAgAigCDEUNAAJAIAIoAgwvAQggAigCFC8BCEcNACACKAIMLwEKIAIoAhQvAQpHDQAgAigCDC8BCgRAIAIoAgwoAgwgAigCFCgCDCACKAIMLwEKEE8NAQsgAigCDCIAIAAoAgQgAigCFCgCBEGABnFyNgIEIAJBATYCBAwBCyACIAIoAgwoAgA2AgwMAQsLIAIoAhRBADYCAAJAIAIoAgQEQCACKAIUECMMAQsgAigCCCACKAIUIgA2AgAgAiAANgIICyACIAIoAhA2AhQMAQsLIAIgAigCGDYCHAsgAigCHCEAIAJBIGokACAAC18BAX8jAEEQayICJAAgAiAANgIIIAIgAToAByACIAIoAghCARAeNgIAAkAgAigCAEUEQCACQX82AgwMAQsgAigCACACLQAHOgAAIAJBADYCDAsgAigCDBogAkEQaiQAC1QBAX8jAEEQayIBJAAgASAANgIIIAEgASgCCEIBEB42AgQCQCABKAIERQRAIAFBADoADwwBCyABIAEoAgQtAAA6AA8LIAEtAA8hACABQRBqJAAgAAucBgECfyMAQSBrIgIkACACIAA2AhggAiABNwMQAkAgAikDECACKAIYKQMwWgRAIAIoAhhBCGpBEkEAEBQgAkF/NgIcDAELIAIoAhgoAhhBAnEEQCACKAIYQQhqQRlBABAUIAJBfzYCHAwBCyACIAIoAhggAikDEEEAIAIoAhhBCGoQTSIANgIMIABFBEAgAkF/NgIcDAELIAIoAhgoAlAgAigCDCACKAIYQQhqEFlBAXFFBEAgAkF/NgIcDAELAn8gAigCGCEDIAIpAxAhASMAQTBrIgAkACAAIAM2AiggACABNwMgIABBATYCHAJAIAApAyAgACgCKCkDMFoEQCAAKAIoQQhqQRJBABAUIABBfzYCLAwBCwJAIAAoAhwNACAAKAIoKAJAIAApAyCnQQR0aigCBEUNACAAKAIoKAJAIAApAyCnQQR0aigCBCgCAEECcUUNAAJAIAAoAigoAkAgACkDIKdBBHRqKAIABEAgACAAKAIoIAApAyBBCCAAKAIoQQhqEE0iAzYCDCADRQRAIABBfzYCLAwECyAAIAAoAiggACgCDEEAQQAQWDcDEAJAIAApAxBCAFMNACAAKQMQIAApAyBRDQAgACgCKEEIakEKQQAQFCAAQX82AiwMBAsMAQsgAEEANgIMCyAAIAAoAiggACkDIEEAIAAoAihBCGoQTSIDNgIIIANFBEAgAEF/NgIsDAILIAAoAgwEQCAAKAIoKAJQIAAoAgwgACkDIEEAIAAoAihBCGoQdEEBcUUEQCAAQX82AiwMAwsLIAAoAigoAlAgACgCCCAAKAIoQQhqEFlBAXFFBEAgACgCKCgCUCAAKAIMQQAQWRogAEF/NgIsDAILCyAAKAIoKAJAIAApAyCnQQR0aigCBBA3IAAoAigoAkAgACkDIKdBBHRqQQA2AgQgACgCKCgCQCAAKQMgp0EEdGoQXiAAQQA2AiwLIAAoAiwhAyAAQTBqJAAgAwsEQCACQX82AhwMAQsgAigCGCgCQCACKQMQp0EEdGpBAToADCACQQA2AhwLIAIoAhwhACACQSBqJAAgAAulBAEBfyMAQTBrIgUkACAFIAA2AiggBSABNwMgIAUgAjYCHCAFIAM6ABsgBSAENgIUAkAgBSgCKCAFKQMgQQBBABA/RQRAIAVBfzYCLAwBCyAFKAIoKAIYQQJxBEAgBSgCKEEIakEZQQAQFCAFQX82AiwMAQsgBSAFKAIoKAJAIAUpAyCnQQR0ajYCECAFAn8gBSgCECgCAARAIAUoAhAoAgAvAQhBCHYMAQtBAws6AAsgBQJ/IAUoAhAoAgAEQCAFKAIQKAIAKAJEDAELQYCA2I14CzYCBEEBIQAgBSAFLQAbIAUtAAtGBH8gBSgCFCAFKAIERwVBAQtBAXE2AgwCQCAFKAIMBEAgBSgCECgCBEUEQCAFKAIQKAIAEEAhACAFKAIQIAA2AgQgAEUEQCAFKAIoQQhqQQ5BABAUIAVBfzYCLAwECwsgBSgCECgCBCAFKAIQKAIELwEIQf8BcSAFLQAbQQh0cjsBCCAFKAIQKAIEIAUoAhQ2AkQgBSgCECgCBCIAIAAoAgBBEHI2AgAMAQsgBSgCECgCBARAIAUoAhAoAgQiACAAKAIAQW9xNgIAAkAgBSgCECgCBCgCAEUEQCAFKAIQKAIEEDcgBSgCEEEANgIEDAELIAUoAhAoAgQgBSgCECgCBC8BCEH/AXEgBS0AC0EIdHI7AQggBSgCECgCBCAFKAIENgJECwsLIAVBADYCLAsgBSgCLCEAIAVBMGokACAAC90PAgF/AX4jAEFAaiIEJAAgBCAANgI0IARCfzcDKCAEIAE2AiQgBCACNgIgIAQgAzYCHAJAIAQoAjQoAhhBAnEEQCAEKAI0QQhqQRlBABAUIARCfzcDOAwBCyAEIAQoAjQpAzA3AxAgBCkDKEJ/UQRAIARCfzcDCCAEKAIcQYDAAHEEQCAEIAQoAjQgBCgCJCAEKAIcQQAQWDcDCAsgBCkDCEJ/UQRAIAQoAjQhASMAQUBqIgAkACAAIAE2AjQCQCAAKAI0KQM4IAAoAjQpAzBCAXxYBEAgACAAKAI0KQM4NwMYIAAgACkDGEIBhjcDEAJAIAApAxBCEFQEQCAAQhA3AxAMAQsgACkDEEKACFYEQCAAQoAINwMQCwsgACAAKQMQIAApAxh8NwMYIAAgACkDGKdBBHStNwMIIAApAwggACgCNCkDOKdBBHStVARAIAAoAjRBCGpBDkEAEBQgAEJ/NwM4DAILIAAgACgCNCgCQCAAKQMYp0EEdBBONgIkIAAoAiRFBEAgACgCNEEIakEOQQAQFCAAQn83AzgMAgsgACgCNCAAKAIkNgJAIAAoAjQgACkDGDcDOAsgACgCNCIBKQMwIQUgASAFQgF8NwMwIAAgBTcDKCAAKAI0KAJAIAApAyinQQR0ahC1ASAAIAApAyg3AzgLIAApAzghBSAAQUBrJAAgBCAFNwMIIAVCAFMEQCAEQn83AzgMAwsLIAQgBCkDCDcDKAsCQCAEKAIkRQ0AIAQoAjQhASAEKQMoIQUgBCgCJCECIAQoAhwhAyMAQUBqIgAkACAAIAE2AjggACAFNwMwIAAgAjYCLCAAIAM2AigCQCAAKQMwIAAoAjgpAzBaBEAgACgCOEEIakESQQAQFCAAQX82AjwMAQsgACgCOCgCGEECcQRAIAAoAjhBCGpBGUEAEBQgAEF/NgI8DAELAkACQCAAKAIsRQ0AIAAoAiwsAABFDQAgACAAKAIsIAAoAiwQLkH//wNxIAAoAiggACgCOEEIahBQIgE2AiAgAUUEQCAAQX82AjwMAwsCQCAAKAIoQYAwcQ0AIAAoAiBBABA6QQNHDQAgACgCIEECNgIICwwBCyAAQQA2AiALIAAgACgCOCAAKAIsQQBBABBYIgU3AxACQCAFQgBTDQAgACkDECAAKQMwUQ0AIAAoAiAQJCAAKAI4QQhqQQpBABAUIABBfzYCPAwBCwJAIAApAxBCAFMNACAAKQMQIAApAzBSDQAgACgCIBAkIABBADYCPAwBCyAAIAAoAjgoAkAgACkDMKdBBHRqNgIkAkAgACgCJCgCAARAIAAgACgCJCgCACgCMCAAKAIgEIYBQQBHOgAfDAELIABBADoAHwsCQCAALQAfQQFxDQAgACgCJCgCBA0AIAAoAiQoAgAQQCEBIAAoAiQgATYCBCABRQRAIAAoAjhBCGpBDkEAEBQgACgCIBAkIABBfzYCPAwCCwsgAAJ/IAAtAB9BAXEEQCAAKAIkKAIAKAIwDAELIAAoAiALQQBBACAAKAI4QQhqEEYiATYCCCABRQRAIAAoAiAQJCAAQX82AjwMAQsCQCAAKAIkKAIEBEAgACAAKAIkKAIEKAIwNgIEDAELAkAgACgCJCgCAARAIAAgACgCJCgCACgCMDYCBAwBCyAAQQA2AgQLCwJAIAAoAgQEQCAAIAAoAgRBAEEAIAAoAjhBCGoQRiIBNgIMIAFFBEAgACgCIBAkIABBfzYCPAwDCwwBCyAAQQA2AgwLIAAoAjgoAlAgACgCCCAAKQMwQQAgACgCOEEIahB0QQFxRQRAIAAoAiAQJCAAQX82AjwMAQsgACgCDARAIAAoAjgoAlAgACgCDEEAEFkaCwJAIAAtAB9BAXEEQCAAKAIkKAIEBEAgACgCJCgCBCgCAEECcQRAIAAoAiQoAgQoAjAQJCAAKAIkKAIEIgEgASgCAEF9cTYCAAJAIAAoAiQoAgQoAgBFBEAgACgCJCgCBBA3IAAoAiRBADYCBAwBCyAAKAIkKAIEIAAoAiQoAgAoAjA2AjALCwsgACgCIBAkDAELIAAoAiQoAgQoAgBBAnEEQCAAKAIkKAIEKAIwECQLIAAoAiQoAgQiASABKAIAQQJyNgIAIAAoAiQoAgQgACgCIDYCMAsgAEEANgI8CyAAKAI8IQEgAEFAayQAIAFFDQAgBCgCNCkDMCAEKQMQUgRAIAQoAjQoAkAgBCkDKKdBBHRqEHcgBCgCNCAEKQMQNwMwCyAEQn83AzgMAQsgBCgCNCgCQCAEKQMop0EEdGoQXgJAIAQoAjQoAkAgBCkDKKdBBHRqKAIARQ0AIAQoAjQoAkAgBCkDKKdBBHRqKAIEBEAgBCgCNCgCQCAEKQMop0EEdGooAgQoAgBBAXENAQsgBCgCNCgCQCAEKQMop0EEdGooAgRFBEAgBCgCNCgCQCAEKQMop0EEdGooAgAQQCEAIAQoAjQoAkAgBCkDKKdBBHRqIAA2AgQgAEUEQCAEKAI0QQhqQQ5BABAUIARCfzcDOAwDCwsgBCgCNCgCQCAEKQMop0EEdGooAgRBfjYCECAEKAI0KAJAIAQpAyinQQR0aigCBCIAIAAoAgBBAXI2AgALIAQoAjQoAkAgBCkDKKdBBHRqIAQoAiA2AgggBCAEKQMoNwM4CyAEKQM4IQUgBEFAayQAIAULqgEBAX8jAEEwayICJAAgAiAANgIoIAIgATcDICACQQA2AhwCQAJAIAIoAigoAiRBAUYEQCACKAIcRQ0BIAIoAhxBAUYNASACKAIcQQJGDQELIAIoAihBDGpBEkEAEBQgAkF/NgIsDAELIAIgAikDIDcDCCACIAIoAhw2AhAgAkF/QQAgAigCKCACQQhqQhBBDBAgQgBTGzYCLAsgAigCLCEAIAJBMGokACAAC6UyAwZ/AX4BfCMAQeAAayIEJAAgBCAANgJYIAQgATYCVCAEIAI2AlACQAJAIAQoAlRBAE4EQCAEKAJYDQELIAQoAlBBEkEAEBQgBEEANgJcDAELIAQgBCgCVDYCTCMAQRBrIgAgBCgCWDYCDCAEIAAoAgwpAxg3A0BB4JoBKQMAQn9RBEAgBEF/NgIUIARBAzYCECAEQQc2AgwgBEEGNgIIIARBAjYCBCAEQQE2AgBB4JoBQQAgBBA0NwMAIARBfzYCNCAEQQ82AjAgBEENNgIsIARBDDYCKCAEQQo2AiQgBEEJNgIgQeiaAUEIIARBIGoQNDcDAAtB4JoBKQMAIAQpA0BB4JoBKQMAg1IEQCAEKAJQQRxBABAUIARBADYCXAwBC0HomgEpAwAgBCkDQEHomgEpAwCDUgRAIAQgBCgCTEEQcjYCTAsgBCgCTEEYcUEYRgRAIAQoAlBBGUEAEBQgBEEANgJcDAELIAQoAlghASAEKAJQIQIjAEHQAGsiACQAIAAgATYCSCAAIAI2AkQgAEEIahA7AkAgACgCSCAAQQhqEDkEQCMAQRBrIgEgACgCSDYCDCAAIAEoAgxBDGo2AgQjAEEQayIBIAAoAgQ2AgwCQCABKAIMKAIAQQVHDQAjAEEQayIBIAAoAgQ2AgwgASgCDCgCBEEsRw0AIABBADYCTAwCCyAAKAJEIAAoAgQQRSAAQX82AkwMAQsgAEEBNgJMCyAAKAJMIQEgAEHQAGokACAEIAE2AjwCQAJAAkAgBCgCPEEBag4CAAECCyAEQQA2AlwMAgsgBCgCTEEBcUUEQCAEKAJQQQlBABAUIARBADYCXAwCCyAEIAQoAlggBCgCTCAEKAJQEGk2AlwMAQsgBCgCTEECcQRAIAQoAlBBCkEAEBQgBEEANgJcDAELIAQoAlgQSEEASARAIAQoAlAgBCgCWBAXIARBADYCXAwBCwJAIAQoAkxBCHEEQCAEIAQoAlggBCgCTCAEKAJQEGk2AjgMAQsgBCgCWCEAIAQoAkwhASAEKAJQIQIjAEHwAGsiAyQAIAMgADYCaCADIAE2AmQgAyACNgJgIANBIGoQOwJAIAMoAmggA0EgahA5QQBIBEAgAygCYCADKAJoEBcgA0EANgJsDAELIAMpAyBCBINQBEAgAygCYEEEQYoBEBQgA0EANgJsDAELIAMgAykDODcDGCADIAMoAmggAygCZCADKAJgEGkiADYCXCAARQRAIANBADYCbAwBCwJAIAMpAxhQRQ0AIAMoAmgQngFBAXFFDQAgAyADKAJcNgJsDAELIAMoAlwhACADKQMYIQkjAEHgAGsiAiQAIAIgADYCWCACIAk3A1ACQCACKQNQQhZUBEAgAigCWEEIakETQQAQFCACQQA2AlwMAQsgAgJ+IAIpA1BCqoAEVARAIAIpA1AMAQtCqoAECzcDMCACKAJYKAIAQgAgAikDMH1BAhAnQQBIBEAjAEEQayIAIAIoAlgoAgA2AgwgAiAAKAIMQQxqNgIIAkACfyMAQRBrIgAgAigCCDYCDCAAKAIMKAIAQQRGCwRAIwBBEGsiACACKAIINgIMIAAoAgwoAgRBFkYNAQsgAigCWEEIaiACKAIIEEUgAkEANgJcDAILCyACIAIoAlgoAgAQSSIJNwM4IAlCAFMEQCACKAJYQQhqIAIoAlgoAgAQFyACQQA2AlwMAQsgAiACKAJYKAIAIAIpAzBBACACKAJYQQhqEEIiADYCDCAARQRAIAJBADYCXAwBCyACQn83AyAgAkEANgJMIAIpAzBCqoAEWgRAIAIoAgxCFBAsGgsgAkEQakETQQAQFCACIAIoAgxCABAeNgJEA0ACQCACKAJEIQEgAigCDBAwQhJ9pyEFIwBBIGsiACQAIAAgATYCGCAAIAU2AhQgAEHsEjYCECAAQQQ2AgwCQAJAIAAoAhQgACgCDE8EQCAAKAIMDQELIABBADYCHAwBCyAAIAAoAhhBAWs2AggDQAJAIAAgACgCCEEBaiAAKAIQLQAAIAAoAhggACgCCGsgACgCFCAAKAIMa2oQqwEiATYCCCABRQ0AIAAoAghBAWogACgCEEEBaiAAKAIMQQFrEE8NASAAIAAoAgg2AhwMAgsLIABBADYCHAsgACgCHCEBIABBIGokACACIAE2AkQgAUUNACACKAIMIAIoAkQCfyMAQRBrIgAgAigCDDYCDCAAKAIMKAIEC2usECwaIAIoAlghASACKAIMIQUgAikDOCEJIwBB8ABrIgAkACAAIAE2AmggACAFNgJkIAAgCTcDWCAAIAJBEGo2AlQjAEEQayIBIAAoAmQ2AgwgAAJ+IAEoAgwtAABBAXEEQCABKAIMKQMQDAELQgALNwMwAkAgACgCZBAwQhZUBEAgACgCVEETQQAQFCAAQQA2AmwMAQsgACgCZEIEEB4oAABB0JaVMEcEQCAAKAJUQRNBABAUIABBADYCbAwBCwJAAkAgACkDMEIUVA0AIwBBEGsiASAAKAJkNgIMIAEoAgwoAgQgACkDMKdqQRRrKAAAQdCWmThHDQAgACgCZCAAKQMwQhR9ECwaIAAoAmgoAgAhBSAAKAJkIQYgACkDWCEJIAAoAmgoAhQhByAAKAJUIQgjAEGwAWsiASQAIAEgBTYCqAEgASAGNgKkASABIAk3A5gBIAEgBzYClAEgASAINgKQASMAQRBrIgUgASgCpAE2AgwgAQJ+IAUoAgwtAABBAXEEQCAFKAIMKQMQDAELQgALNwMYIAEoAqQBQgQQHhogASABKAKkARAdQf//A3E2AhAgASABKAKkARAdQf//A3E2AgggASABKAKkARAxNwM4AkAgASkDOEL///////////8AVgRAIAEoApABQQRBFhAUIAFBADYCrAEMAQsgASkDOEI4fCABKQMYIAEpA5gBfFYEQCABKAKQAUEVQQAQFCABQQA2AqwBDAELAkACQCABKQM4IAEpA5gBVA0AIAEpAzhCOHwgASkDmAECfiMAQRBrIgUgASgCpAE2AgwgBSgCDCkDCAt8Vg0AIAEoAqQBIAEpAzggASkDmAF9ECwaIAFBADoAFwwBCyABKAKoASABKQM4QQAQJ0EASARAIAEoApABIAEoAqgBEBcgAUEANgKsAQwCCyABIAEoAqgBQjggAUFAayABKAKQARBCIgU2AqQBIAVFBEAgAUEANgKsAQwCCyABQQE6ABcLIAEoAqQBQgQQHigAAEHQlpkwRwRAIAEoApABQRVBABAUIAEtABdBAXEEQCABKAKkARAWCyABQQA2AqwBDAELIAEgASgCpAEQMTcDMAJAIAEoApQBQQRxRQ0AIAEpAzAgASkDOHxCDHwgASkDmAEgASkDGHxRDQAgASgCkAFBFUEAEBQgAS0AF0EBcQRAIAEoAqQBEBYLIAFBADYCrAEMAQsgASgCpAFCBBAeGiABIAEoAqQBECo2AgwgASABKAKkARAqNgIEIAEoAhBB//8DRgRAIAEgASgCDDYCEAsgASgCCEH//wNGBEAgASABKAIENgIICwJAIAEoApQBQQRxRQ0AIAEoAgggASgCBEYEQCABKAIQIAEoAgxGDQELIAEoApABQRVBABAUIAEtABdBAXEEQCABKAKkARAWCyABQQA2AqwBDAELAkAgASgCEEUEQCABKAIIRQ0BCyABKAKQAUEBQQAQFCABLQAXQQFxBEAgASgCpAEQFgsgAUEANgKsAQwBCyABIAEoAqQBEDE3AyggASABKAKkARAxNwMgIAEpAyggASkDIFIEQCABKAKQAUEBQQAQFCABLQAXQQFxBEAgASgCpAEQFgsgAUEANgKsAQwBCyABIAEoAqQBEDE3AzAgASABKAKkARAxNwOAAQJ/IwBBEGsiBSABKAKkATYCDCAFKAIMLQAAQQFxRQsEQCABKAKQAUEUQQAQFCABLQAXQQFxBEAgASgCpAEQFgsgAUEANgKsAQwBCyABLQAXQQFxBEAgASgCpAEQFgsCQCABKQOAAUL///////////8AWARAIAEpA4ABIAEpA4ABIAEpAzB8WA0BCyABKAKQAUEEQRYQFCABQQA2AqwBDAELIAEpA4ABIAEpAzB8IAEpA5gBIAEpAzh8VgRAIAEoApABQRVBABAUIAFBADYCrAEMAQsCQCABKAKUAUEEcUUNACABKQOAASABKQMwfCABKQOYASABKQM4fFENACABKAKQAUEVQQAQFCABQQA2AqwBDAELIAEpAyggASkDMEIugFYEQCABKAKQAUEVQQAQFCABQQA2AqwBDAELIAEgASkDKCABKAKQARCQASIFNgKMASAFRQRAIAFBADYCrAEMAQsgASgCjAFBAToALCABKAKMASABKQMwNwMYIAEoAowBIAEpA4ABNwMgIAEgASgCjAE2AqwBCyABKAKsASEFIAFBsAFqJAAgACAFNgJQDAELIAAoAmQgACkDMBAsGiAAKAJkIQUgACkDWCEJIAAoAmgoAhQhBiAAKAJUIQcjAEHQAGsiASQAIAEgBTYCSCABIAk3A0AgASAGNgI8IAEgBzYCOAJAIAEoAkgQMEIWVARAIAEoAjhBFUEAEBQgAUEANgJMDAELIwBBEGsiBSABKAJINgIMIAECfiAFKAIMLQAAQQFxBEAgBSgCDCkDEAwBC0IACzcDCCABKAJIQgQQHhogASgCSBAqBEAgASgCOEEBQQAQFCABQQA2AkwMAQsgASABKAJIEB1B//8Dca03AyggASABKAJIEB1B//8Dca03AyAgASkDICABKQMoUgRAIAEoAjhBE0EAEBQgAUEANgJMDAELIAEgASgCSBAqrTcDGCABIAEoAkgQKq03AxAgASkDECABKQMQIAEpAxh8VgRAIAEoAjhBBEEWEBQgAUEANgJMDAELIAEpAxAgASkDGHwgASkDQCABKQMIfFYEQCABKAI4QRVBABAUIAFBADYCTAwBCwJAIAEoAjxBBHFFDQAgASkDECABKQMYfCABKQNAIAEpAwh8UQ0AIAEoAjhBFUEAEBQgAUEANgJMDAELIAEgASkDICABKAI4EJABIgU2AjQgBUUEQCABQQA2AkwMAQsgASgCNEEAOgAsIAEoAjQgASkDGDcDGCABKAI0IAEpAxA3AyAgASABKAI0NgJMCyABKAJMIQUgAUHQAGokACAAIAU2AlALIAAoAlBFBEAgAEEANgJsDAELIAAoAmQgACkDMEIUfBAsGiAAIAAoAmQQHTsBTiAAKAJQKQMgIAAoAlApAxh8IAApA1ggACkDMHxWBEAgACgCVEEVQQAQFCAAKAJQECUgAEEANgJsDAELAkAgAC8BTkUEQCAAKAJoKAIEQQRxRQ0BCyAAKAJkIAApAzBCFnwQLBogACAAKAJkEDA3AyACQCAAKQMgIAAvAU6tWgRAIAAoAmgoAgRBBHFFDQEgACkDICAALwFOrVENAQsgACgCVEEVQQAQFCAAKAJQECUgAEEANgJsDAILIAAvAU4EQCAAKAJkIAAvAU6tEB4gAC8BTkEAIAAoAlQQUCEBIAAoAlAgATYCKCABRQRAIAAoAlAQJSAAQQA2AmwMAwsLCwJAIAAoAlApAyAgACkDWFoEQCAAKAJkIAAoAlApAyAgACkDWH0QLBogACAAKAJkIAAoAlApAxgQHiIBNgIcIAFFBEAgACgCVEEVQQAQFCAAKAJQECUgAEEANgJsDAMLIAAgACgCHCAAKAJQKQMYECkiATYCLCABRQRAIAAoAlRBDkEAEBQgACgCUBAlIABBADYCbAwDCwwBCyAAQQA2AiwgACgCaCgCACAAKAJQKQMgQQAQJ0EASARAIAAoAlQgACgCaCgCABAXIAAoAlAQJSAAQQA2AmwMAgsgACgCaCgCABBJIAAoAlApAyBSBEAgACgCVEETQQAQFCAAKAJQECUgAEEANgJsDAILCyAAIAAoAlApAxg3AzggAEIANwNAA0ACQCAAKQM4UA0AIABBADoAGyAAKQNAIAAoAlApAwhRBEAgACgCUC0ALEEBcQ0BIAApAzhCLlQNASAAKAJQQoCABCAAKAJUEI8BQQFxRQRAIAAoAlAQJSAAKAIsEBYgAEEANgJsDAQLIABBAToAGwsjAEEQayIBJAAgAUHYABAYIgU2AggCQCAFRQRAIAFBADYCDAwBCyABKAIIEFMgASABKAIINgIMCyABKAIMIQUgAUEQaiQAIAUhASAAKAJQKAIAIAApA0CnQQR0aiABNgIAAkAgAQRAIAAgACgCUCgCACAAKQNAp0EEdGooAgAgACgCaCgCACAAKAIsQQAgACgCVBCMASIJNwMQIAlCAFkNAQsCQCAALQAbQQFxRQ0AIwBBEGsiASAAKAJUNgIMIAEoAgwoAgBBE0cNACAAKAJUQRVBABAUCyAAKAJQECUgACgCLBAWIABBADYCbAwDCyAAIAApA0BCAXw3A0AgACAAKQM4IAApAxB9NwM4DAELCwJAIAApA0AgACgCUCkDCFEEQCAAKQM4UA0BCyAAKAJUQRVBABAUIAAoAiwQFiAAKAJQECUgAEEANgJsDAELIAAoAmgoAgRBBHEEQAJAIAAoAiwEQCAAIAAoAiwQR0EBcToADwwBCyAAIAAoAmgoAgAQSTcDACAAKQMAQgBTBEAgACgCVCAAKAJoKAIAEBcgACgCUBAlIABBADYCbAwDCyAAIAApAwAgACgCUCkDICAAKAJQKQMYfFE6AA8LIAAtAA9BAXFFBEAgACgCVEEVQQAQFCAAKAIsEBYgACgCUBAlIABBADYCbAwCCwsgACgCLBAWIAAgACgCUDYCbAsgACgCbCEBIABB8ABqJAAgAiABNgJIIAEEQAJAIAIoAkwEQCACKQMgQgBXBEAgAiACKAJYIAIoAkwgAkEQahBoNwMgCyACIAIoAlggAigCSCACQRBqEGg3AygCQCACKQMgIAIpAyhTBEAgAigCTBAlIAIgAigCSDYCTCACIAIpAyg3AyAMAQsgAigCSBAlCwwBCyACIAIoAkg2AkwCQCACKAJYKAIEQQRxBEAgAiACKAJYIAIoAkwgAkEQahBoNwMgDAELIAJCADcDIAsLIAJBADYCSAsgAiACKAJEQQFqNgJEIAIoAgwgAigCRAJ/IwBBEGsiACACKAIMNgIMIAAoAgwoAgQLa6wQLBoMAQsLIAIoAgwQFiACKQMgQgBTBEAgAigCWEEIaiACQRBqEEUgAigCTBAlIAJBADYCXAwBCyACIAIoAkw2AlwLIAIoAlwhACACQeAAaiQAIAMgADYCWCAARQRAIAMoAmAgAygCXEEIahBFIwBBEGsiACADKAJoNgIMIAAoAgwiACAAKAIwQQFqNgIwIAMoAlwQPCADQQA2AmwMAQsgAygCXCADKAJYKAIANgJAIAMoAlwgAygCWCkDCDcDMCADKAJcIAMoAlgpAxA3AzggAygCXCADKAJYKAIoNgIgIAMoAlgQFSADKAJcKAJQIQAgAygCXCkDMCEJIAMoAlxBCGohAiMAQSBrIgEkACABIAA2AhggASAJNwMQIAEgAjYCDAJAIAEpAxBQBEAgAUEBOgAfDAELIwBBIGsiACABKQMQNwMQIAAgACkDELpEAAAAAAAA6D+jOQMIAkAgACsDCEQAAOD////vQWQEQCAAQX82AgQMAQsgAAJ/IAArAwgiCkQAAAAAAADwQWMgCkQAAAAAAAAAAGZxBEAgCqsMAQtBAAs2AgQLAkAgACgCBEGAgICAeEsEQCAAQYCAgIB4NgIcDAELIAAgACgCBEEBazYCBCAAIAAoAgQgACgCBEEBdnI2AgQgACAAKAIEIAAoAgRBAnZyNgIEIAAgACgCBCAAKAIEQQR2cjYCBCAAIAAoAgQgACgCBEEIdnI2AgQgACAAKAIEIAAoAgRBEHZyNgIEIAAgACgCBEEBajYCBCAAIAAoAgQ2AhwLIAEgACgCHDYCCCABKAIIIAEoAhgoAgBNBEAgAUEBOgAfDAELIAEoAhggASgCCCABKAIMEFpBAXFFBEAgAUEAOgAfDAELIAFBAToAHwsgAS0AHxogAUEgaiQAIANCADcDEANAIAMpAxAgAygCXCkDMFQEQCADIAMoAlwoAkAgAykDEKdBBHRqKAIAKAIwQQBBACADKAJgEEY2AgwgAygCDEUEQCMAQRBrIgAgAygCaDYCDCAAKAIMIgAgACgCMEEBajYCMCADKAJcEDwgA0EANgJsDAMLIAMoAlwoAlAgAygCDCADKQMQQQggAygCXEEIahB0QQFxRQRAAkAgAygCXCgCCEEKRgRAIAMoAmRBBHFFDQELIAMoAmAgAygCXEEIahBFIwBBEGsiACADKAJoNgIMIAAoAgwiACAAKAIwQQFqNgIwIAMoAlwQPCADQQA2AmwMBAsLIAMgAykDEEIBfDcDEAwBCwsgAygCXCADKAJcKAIUNgIYIAMgAygCXDYCbAsgAygCbCEAIANB8ABqJAAgBCAANgI4CyAEKAI4RQRAIAQoAlgQLxogBEEANgJcDAELIAQgBCgCODYCXAsgBCgCXCEAIARB4ABqJAAgAAuOAQEBfyMAQRBrIgIkACACIAA2AgwgAiABNgIIIAJBADYCBCACKAIIBEAjAEEQayIAIAIoAgg2AgwgAiAAKAIMKAIANgIEIAIoAggQrAFBAUYEQCMAQRBrIgAgAigCCDYCDEG0mwEgACgCDCgCBDYCAAsLIAIoAgwEQCACKAIMIAIoAgQ2AgALIAJBEGokAAuVAQEBfyMAQRBrIgEkACABIAA2AggCQAJ/IwBBEGsiACABKAIINgIMIAAoAgwpAxhCgIAQg1ALBEAgASgCCCgCAARAIAEgASgCCCgCABCeAUEBcToADwwCCyABQQE6AA8MAQsgASABKAIIQQBCAEESECA+AgQgASABKAIEQQBHOgAPCyABLQAPQQFxIQAgAUEQaiQAIAALfwEBfyMAQSBrIgMkACADIAA2AhggAyABNwMQIANBADYCDCADIAI2AggCQCADKQMQQv///////////wBWBEAgAygCCEEEQT0QFCADQX82AhwMAQsgAyADKAIYIAMpAxAgAygCDCADKAIIEGo2AhwLIAMoAhwhACADQSBqJAAgAAt9ACACQQFGBEAgASAAKAIIIAAoAgRrrH0hAQsCQCAAKAIUIAAoAhxLBEAgAEEAQQAgACgCJBEBABogACgCFEUNAQsgAEEANgIcIABCADcDECAAIAEgAiAAKAIoEQ8AQgBTDQAgAEIANwIEIAAgACgCAEFvcTYCAEEADwtBfwvhAgECfyMAQSBrIgMkAAJ/AkACQEGnEiABLAAAEKIBRQRAQbSbAUEcNgIADAELQZgJEBgiAg0BC0EADAELIAJBAEGQARAzIAFBKxCiAUUEQCACQQhBBCABLQAAQfIARhs2AgALAkAgAS0AAEHhAEcEQCACKAIAIQEMAQsgAEEDQQAQBCIBQYAIcUUEQCADIAFBgAhyNgIQIABBBCADQRBqEAQaCyACIAIoAgBBgAFyIgE2AgALIAJB/wE6AEsgAkGACDYCMCACIAA2AjwgAiACQZgBajYCLAJAIAFBCHENACADIANBGGo2AgAgAEGTqAEgAxAODQAgAkEKOgBLCyACQRo2AiggAkEbNgIkIAJBHDYCICACQR02AgxB6J8BKAIARQRAIAJBfzYCTAsgAkGsoAEoAgA2AjhBrKABKAIAIgAEQCAAIAI2AjQLQaygASACNgIAIAILIQAgA0EgaiQAIAAL8AEBAn8CfwJAIAFB/wFxIgMEQCAAQQNxBEADQCAALQAAIgJFDQMgAiABQf8BcUYNAyAAQQFqIgBBA3ENAAsLAkAgACgCACICQX9zIAJBgYKECGtxQYCBgoR4cQ0AIANBgYKECGwhAwNAIAIgA3MiAkF/cyACQYGChAhrcUGAgYKEeHENASAAKAIEIQIgAEEEaiEAIAJBgYKECGsgAkF/c3FBgIGChHhxRQ0ACwsDQCAAIgItAAAiAwRAIAJBAWohACADIAFB/wFxRw0BCwsgAgwCCyAAEC4gAGoMAQsgAAsiAEEAIAAtAAAgAUH/AXFGGwsYACAAKAJMQX9MBEAgABCkAQ8LIAAQpAELYAIBfgJ/IAAoAighAkEBIQMgAEIAIAAtAABBgAFxBH9BAkEBIAAoAhQgACgCHEsbBUEBCyACEQ8AIgFCAFkEfiAAKAIUIAAoAhxrrCABIAAoAgggACgCBGusfXwFIAELC2sBAX8gAARAIAAoAkxBf0wEQCAAEG4PCyAAEG4PC0GwoAEoAgAEQEGwoAEoAgAQpQEhAQtBrKABKAIAIgAEQANAIAAoAkwaIAAoAhQgACgCHEsEQCAAEG4gAXIhAQsgACgCOCIADQALCyABCyIAIAAgARACIgBBgWBPBH9BtJsBQQAgAGs2AgBBfwUgAAsLUwEDfwJAIAAoAgAsAABBMGtBCk8NAANAIAAoAgAiAiwAACEDIAAgAkEBajYCACABIANqQTBrIQEgAiwAAUEwa0EKTw0BIAFBCmwhAQwACwALIAELuwIAAkAgAUEUSw0AAkACQAJAAkACQAJAAkACQAJAAkAgAUEJaw4KAAECAwQFBgcICQoLIAIgAigCACIBQQRqNgIAIAAgASgCADYCAA8LIAIgAigCACIBQQRqNgIAIAAgATQCADcDAA8LIAIgAigCACIBQQRqNgIAIAAgATUCADcDAA8LIAIgAigCAEEHakF4cSIBQQhqNgIAIAAgASkDADcDAA8LIAIgAigCACIBQQRqNgIAIAAgATIBADcDAA8LIAIgAigCACIBQQRqNgIAIAAgATMBADcDAA8LIAIgAigCACIBQQRqNgIAIAAgATAAADcDAA8LIAIgAigCACIBQQRqNgIAIAAgATEAADcDAA8LIAIgAigCAEEHakF4cSIBQQhqNgIAIAAgASsDADkDAA8LIAAgAkEYEQQACwt/AgF/AX4gAL0iA0I0iKdB/w9xIgJB/w9HBHwgAkUEQCABIABEAAAAAAAAAABhBH9BAAUgAEQAAAAAAADwQ6IgARCpASEAIAEoAgBBQGoLNgIAIAAPCyABIAJB/gdrNgIAIANC/////////4eAf4NCgICAgICAgPA/hL8FIAALC5sCACAARQRAQQAPCwJ/AkAgAAR/IAFB/wBNDQECQEGQmQEoAgAoAgBFBEAgAUGAf3FBgL8DRg0DDAELIAFB/w9NBEAgACABQT9xQYABcjoAASAAIAFBBnZBwAFyOgAAQQIMBAsgAUGAsANPQQAgAUGAQHFBgMADRxtFBEAgACABQT9xQYABcjoAAiAAIAFBDHZB4AFyOgAAIAAgAUEGdkE/cUGAAXI6AAFBAwwECyABQYCABGtB//8/TQRAIAAgAUE/cUGAAXI6AAMgACABQRJ2QfABcjoAACAAIAFBBnZBP3FBgAFyOgACIAAgAUEMdkE/cUGAAXI6AAFBBAwECwtBtJsBQRk2AgBBfwVBAQsMAQsgACABOgAAQQELC+MBAQJ/IAJBAEchAwJAAkACQCAAQQNxRQ0AIAJFDQAgAUH/AXEhBANAIAAtAAAgBEYNAiACQQFrIgJBAEchAyAAQQFqIgBBA3FFDQEgAg0ACwsgA0UNAQsCQCAALQAAIAFB/wFxRg0AIAJBBEkNACABQf8BcUGBgoQIbCEDA0AgACgCACADcyIEQX9zIARBgYKECGtxQYCBgoR4cQ0BIABBBGohACACQQRrIgJBA0sNAAsLIAJFDQAgAUH/AXEhAQNAIAEgAC0AAEYEQCAADwsgAEEBaiEAIAJBAWsiAg0ACwtBAAtaAQF/IwBBEGsiASAANgIIAkACQCABKAIIKAIAQQBOBEAgASgCCCgCAEGAFCgCAEgNAQsgAUEANgIMDAELIAEgASgCCCgCAEECdEGQFGooAgA2AgwLIAEoAgwL+QIBAX8jAEEgayIEJAAgBCAANgIYIAQgATcDECAEIAI2AgwgBCADNgIIIAQgBCgCGCAEKAIYIAQpAxAgBCgCDCAEKAIIEK4BIgA2AgACQCAARQRAIARBADYCHAwBCyAEKAIAEEhBAEgEQCAEKAIYQQhqIAQoAgAQFyAEKAIAEBsgBEEANgIcDAELIAQoAhghAiMAQRBrIgAkACAAIAI2AgggAEEYEBgiAjYCBAJAIAJFBEAgACgCCEEIakEOQQAQFCAAQQA2AgwMAQsgACgCBCAAKAIINgIAIwBBEGsiAiAAKAIEQQRqNgIMIAIoAgxBADYCACACKAIMQQA2AgQgAigCDEEANgIIIAAoAgRBADoAECAAKAIEQQA2AhQgACAAKAIENgIMCyAAKAIMIQIgAEEQaiQAIAQgAjYCBCACRQRAIAQoAgAQGyAEQQA2AhwMAQsgBCgCBCAEKAIANgIUIAQgBCgCBDYCHAsgBCgCHCEAIARBIGokACAAC7cOAgN/AX4jAEHAAWsiBSQAIAUgADYCuAEgBSABNgK0ASAFIAI3A6gBIAUgAzYCpAEgBUIANwOYASAFQgA3A5ABIAUgBDYCjAECQCAFKAK4AUUEQCAFQQA2ArwBDAELAkAgBSgCtAEEQCAFKQOoASAFKAK0ASkDMFQNAQsgBSgCuAFBCGpBEkEAEBQgBUEANgK8AQwBCwJAIAUoAqQBQQhxDQAgBSgCtAEoAkAgBSkDqAGnQQR0aigCCEUEQCAFKAK0ASgCQCAFKQOoAadBBHRqLQAMQQFxRQ0BCyAFKAK4AUEIakEPQQAQFCAFQQA2ArwBDAELIAUoArQBIAUpA6gBIAUoAqQBQQhyIAVByABqEH5BAEgEQCAFKAK4AUEIakEUQQAQFCAFQQA2ArwBDAELIAUoAqQBQSBxBEAgBSAFKAKkAUEEcjYCpAELAkAgBSkDmAFQBEAgBSkDkAFQDQELIAUoAqQBQQRxRQ0AIAUoArgBQQhqQRJBABAUIAVBADYCvAEMAQsCQCAFKQOYAVAEQCAFKQOQAVANAQsgBSkDmAEgBSkDmAEgBSkDkAF8WARAIAUpA2AgBSkDmAEgBSkDkAF8Wg0BCyAFKAK4AUEIakESQQAQFCAFQQA2ArwBDAELIAUpA5ABUARAIAUgBSkDYCAFKQOYAX03A5ABCyAFIAUpA5ABIAUpA2BUOgBHIAUgBSgCpAFBIHEEf0EABSAFLwF6QQBHC0EBcToARSAFIAUoAqQBQQRxBH9BAAUgBS8BeEEARwtBAXE6AEQgBQJ/IAUoAqQBQQRxBEBBACAFLwF4DQEaCyAFLQBHQX9zC0EBcToARiAFLQBFQQFxBEAgBSgCjAFFBEAgBSAFKAK4ASgCHDYCjAELIAUoAowBRQRAIAUoArgBQQhqQRpBABAUIAVBADYCvAEMAgsLIAUpA2hQBEAgBSAFKAK4AUEAQgBBABB9NgK8AQwBCwJAAkAgBS0AR0EBcUUNACAFLQBFQQFxDQAgBS0AREEBcQ0AIAUgBSkDkAE3AyAgBSAFKQOQATcDKCAFQQA7ATggBSAFKAJwNgIwIAVC3AA3AwggBSAFKAK0ASgCACAFKQOYASAFKQOQASAFQQhqQQAgBSgCtAEgBSkDqAEgBSgCuAFBCGoQXyIANgKIAQwBCyAFIAUoArQBIAUpA6gBIAUoAqQBIAUoArgBQQhqED8iADYCBCAARQRAIAVBADYCvAEMAgsgBSAFKAK0ASgCAEIAIAUpA2ggBUHIAGogBSgCBC8BDEEBdkEDcSAFKAK0ASAFKQOoASAFKAK4AUEIahBfIgA2AogBCyAARQRAIAVBADYCvAEMAQsCfyAFKAKIASEAIAUoArQBIQMjAEEQayIBJAAgASAANgIMIAEgAzYCCCABKAIMIAEoAgg2AiwgASgCCCEDIAEoAgwhBCMAQSBrIgAkACAAIAM2AhggACAENgIUAkAgACgCGCgCSCAAKAIYKAJEQQFqTQRAIAAgACgCGCgCSEEKajYCDCAAIAAoAhgoAkwgACgCDEECdBBONgIQIAAoAhBFBEAgACgCGEEIakEOQQAQFCAAQX82AhwMAgsgACgCGCAAKAIMNgJIIAAoAhggACgCEDYCTAsgACgCFCEEIAAoAhgoAkwhBiAAKAIYIgcoAkQhAyAHIANBAWo2AkQgA0ECdCAGaiAENgIAIABBADYCHAsgACgCHCEDIABBIGokACABQRBqJAAgA0EASAsEQCAFKAKIARAbIAVBADYCvAEMAQsgBS0ARUEBcQRAIAUgBS8BekEAEHsiADYCACAARQRAIAUoArgBQQhqQRhBABAUIAVBADYCvAEMAgsgBSAFKAK4ASAFKAKIASAFLwF6QQAgBSgCjAEgBSgCABEFADYChAEgBSgCiAEQGyAFKAKEAUUEQCAFQQA2ArwBDAILIAUgBSgChAE2AogBCyAFLQBEQQFxBEAgBSAFKAK4ASAFKAKIASAFLwF4ELABNgKEASAFKAKIARAbIAUoAoQBRQRAIAVBADYCvAEMAgsgBSAFKAKEATYCiAELIAUtAEZBAXEEQCAFIAUoArgBIAUoAogBQQEQrwE2AoQBIAUoAogBEBsgBSgChAFFBEAgBUEANgK8AQwCCyAFIAUoAoQBNgKIAQsCQCAFLQBHQQFxRQ0AIAUtAEVBAXFFBEAgBS0AREEBcUUNAQsgBSgCuAEhASAFKAKIASEDIAUpA5gBIQIgBSkDkAEhCCMAQSBrIgAkACAAIAE2AhwgACADNgIYIAAgAjcDECAAIAg3AwggACgCGCAAKQMQIAApAwhBAEEAQQBCACAAKAIcQQhqEF8hASAAQSBqJAAgBSABNgKEASAFKAKIARAbIAUoAoQBRQRAIAVBADYCvAEMAgsgBSAFKAKEATYCiAELIAUgBSgCiAE2ArwBCyAFKAK8ASEAIAVBwAFqJAAgAAuEAgEBfyMAQSBrIgMkACADIAA2AhggAyABNgIUIAMgAjYCEAJAIAMoAhRFBEAgAygCGEEIakESQQAQFCADQQA2AhwMAQsgA0E4EBgiADYCDCAARQRAIAMoAhhBCGpBDkEAEBQgA0EANgIcDAELIwBBEGsiACADKAIMQQhqNgIMIAAoAgxBADYCACAAKAIMQQA2AgQgACgCDEEANgIIIAMoAgwgAygCEDYCACADKAIMQQA2AgQgAygCDEIANwMoQQBBAEEAEBohACADKAIMIAA2AjAgAygCDEIANwMYIAMgAygCGCADKAIUQRQgAygCDBBhNgIcCyADKAIcIQAgA0EgaiQAIAALQwEBfyMAQRBrIgMkACADIAA2AgwgAyABNgIIIAMgAjYCBCADKAIMIAMoAgggAygCBEEAQQAQsgEhACADQRBqJAAgAAtJAQF/IwBBEGsiASQAIAEgADYCDCABKAIMBEAgASgCDCgCrEAgASgCDCgCqEAoAgQRAgAgASgCDBA4IAEoAgwQFQsgAUEQaiQAC5QFAQF/IwBBMGsiBSQAIAUgADYCKCAFIAE2AiQgBSACNgIgIAUgAzoAHyAFIAQ2AhggBUEANgIMAkAgBSgCJEUEQCAFKAIoQQhqQRJBABAUIAVBADYCLAwBCyAFIAUoAiAgBS0AH0EBcRCzASIANgIMIABFBEAgBSgCKEEIakEQQQAQFCAFQQA2AiwMAQsgBSgCICEBIAUtAB9BAXEhAiAFKAIYIQMgBSgCDCEEIwBBIGsiACQAIAAgATYCGCAAIAI6ABcgACADNgIQIAAgBDYCDCAAQbDAABAYIgE2AggCQCABRQRAIABBADYCHAwBCyMAQRBrIgEgACgCCDYCDCABKAIMQQA2AgAgASgCDEEANgIEIAEoAgxBADYCCCAAKAIIAn8gAC0AF0EBcQRAIAAoAhhBf0cEfyAAKAIYQX5GBUEBC0EBcQwBC0EAC0EARzoADiAAKAIIIAAoAgw2AqhAIAAoAgggACgCGDYCFCAAKAIIIAAtABdBAXE6ABAgACgCCEEAOgAMIAAoAghBADoADSAAKAIIQQA6AA8gACgCCCgCqEAoAgAhAQJ/AkAgACgCGEF/RwRAIAAoAhhBfkcNAQtBCAwBCyAAKAIYC0H//wNxIAAoAhAgACgCCCABEQEAIQEgACgCCCABNgKsQCABRQRAIAAoAggQOCAAKAIIEBUgAEEANgIcDAELIAAgACgCCDYCHAsgACgCHCEBIABBIGokACAFIAE2AhQgAUUEQCAFKAIoQQhqQQ5BABAUIAVBADYCLAwBCyAFIAUoAiggBSgCJEETIAUoAhQQYSIANgIQIABFBEAgBSgCFBCxASAFQQA2AiwMAQsgBSAFKAIQNgIsCyAFKAIsIQAgBUEwaiQAIAALzAEBAX8jAEEgayICIAA2AhggAiABOgAXIAICfwJAIAIoAhhBf0cEQCACKAIYQX5HDQELQQgMAQsgAigCGAs7AQ4gAkEANgIQAkADQCACKAIQQdSXASgCAEkEQCACKAIQQQxsQdiXAWovAQAgAi8BDkYEQCACLQAXQQFxBEAgAiACKAIQQQxsQdiXAWooAgQ2AhwMBAsgAiACKAIQQQxsQdiXAWooAgg2AhwMAwUgAiACKAIQQQFqNgIQDAILAAsLIAJBADYCHAsgAigCHAvkAQEBfyMAQSBrIgMkACADIAA6ABsgAyABNgIUIAMgAjYCECADQcgAEBgiADYCDAJAIABFBEAgAygCEEEBQbSbASgCABAUIANBADYCHAwBCyADKAIMIAMoAhA2AgAgAygCDCADLQAbQQFxOgAEIAMoAgwgAygCFDYCCAJAIAMoAgwoAghBAU4EQCADKAIMKAIIQQlMDQELIAMoAgxBCTYCCAsgAygCDEEAOgAMIAMoAgxBADYCMCADKAIMQQA2AjQgAygCDEEANgI4IAMgAygCDDYCHAsgAygCHCEAIANBIGokACAACzgBAX8jAEEQayIBIAA2AgwgASgCDEEANgIAIAEoAgxBADYCBCABKAIMQQA2AgggASgCDEEAOgAMC+MIAQF/IwBBQGoiAiAANgI4IAIgATYCNCACIAIoAjgoAnw2AjAgAiACKAI4KAI4IAIoAjgoAmxqNgIsIAIgAigCOCgCeDYCICACIAIoAjgoApABNgIcIAICfyACKAI4KAJsIAIoAjgoAixBhgJrSwRAIAIoAjgoAmwgAigCOCgCLEGGAmtrDAELQQALNgIYIAIgAigCOCgCQDYCFCACIAIoAjgoAjQ2AhAgAiACKAI4KAI4IAIoAjgoAmxqQYICajYCDCACIAIoAiwgAigCIEEBa2otAAA6AAsgAiACKAIsIAIoAiBqLQAAOgAKIAIoAjgoAnggAigCOCgCjAFPBEAgAiACKAIwQQJ2NgIwCyACKAIcIAIoAjgoAnRLBEAgAiACKAI4KAJ0NgIcCwNAAkAgAiACKAI4KAI4IAIoAjRqNgIoAkAgAigCKCACKAIgai0AACACLQAKRw0AIAIoAiggAigCIEEBa2otAAAgAi0AC0cNACACKAIoLQAAIAIoAiwtAABHDQAgAiACKAIoIgBBAWo2AiggAC0AASACKAIsLQABRwRADAELIAIgAigCLEECajYCLCACIAIoAihBAWo2AigDQCACIAIoAiwiAEEBajYCLCAALQABIQEgAiACKAIoIgBBAWo2AigCf0EAIAAtAAEgAUcNABogAiACKAIsIgBBAWo2AiwgAC0AASEBIAIgAigCKCIAQQFqNgIoQQAgAC0AASABRw0AGiACIAIoAiwiAEEBajYCLCAALQABIQEgAiACKAIoIgBBAWo2AihBACAALQABIAFHDQAaIAIgAigCLCIAQQFqNgIsIAAtAAEhASACIAIoAigiAEEBajYCKEEAIAAtAAEgAUcNABogAiACKAIsIgBBAWo2AiwgAC0AASEBIAIgAigCKCIAQQFqNgIoQQAgAC0AASABRw0AGiACIAIoAiwiAEEBajYCLCAALQABIQEgAiACKAIoIgBBAWo2AihBACAALQABIAFHDQAaIAIgAigCLCIAQQFqNgIsIAAtAAEhASACIAIoAigiAEEBajYCKEEAIAAtAAEgAUcNABogAiACKAIsIgBBAWo2AiwgAC0AASEBIAIgAigCKCIAQQFqNgIoQQAgAC0AASABRw0AGiACKAIsIAIoAgxJC0EBcQ0ACyACQYICIAIoAgwgAigCLGtrNgIkIAIgAigCDEGCAms2AiwgAigCJCACKAIgSgRAIAIoAjggAigCNDYCcCACIAIoAiQ2AiAgAigCJCACKAIcTg0CIAIgAigCLCACKAIgQQFrai0AADoACyACIAIoAiwgAigCIGotAAA6AAoLCyACIAIoAhQgAigCNCACKAIQcUEBdGovAQAiATYCNEEAIQAgASACKAIYSwR/IAIgAigCMEEBayIANgIwIABBAEcFQQALQQFxDQELCwJAIAIoAiAgAigCOCgCdE0EQCACIAIoAiA2AjwMAQsgAiACKAI4KAJ0NgI8CyACKAI8C5IQAQF/IwBBMGsiAiQAIAIgADYCKCACIAE2AiQgAgJ/IAIoAigoAiwgAigCKCgCDEEFa0kEQCACKAIoKAIsDAELIAIoAigoAgxBBWsLNgIgIAJBADYCECACIAIoAigoAgAoAgQ2AgwDQAJAIAJB//8DNgIcIAIgAigCKCgCvC1BKmpBA3U2AhQgAigCKCgCACgCECACKAIUSQ0AIAIgAigCKCgCACgCECACKAIUazYCFCACIAIoAigoAmwgAigCKCgCXGs2AhggAigCHCACKAIYIAIoAigoAgAoAgRqSwRAIAIgAigCGCACKAIoKAIAKAIEajYCHAsgAigCHCACKAIUSwRAIAIgAigCFDYCHAsCQCACKAIcIAIoAiBPDQACQCACKAIcRQRAIAIoAiRBBEcNAQsgAigCJEUNACACKAIcIAIoAhggAigCKCgCACgCBGpGDQELDAELQQAhACACIAIoAiRBBEYEfyACKAIcIAIoAhggAigCKCgCACgCBGpGBUEAC0EBcTYCECACKAIoQQBBACACKAIQEF0gAigCKCgCCCACKAIoKAIUQQRraiACKAIcOgAAIAIoAigoAgggAigCKCgCFEEDa2ogAigCHEEIdjoAACACKAIoKAIIIAIoAigoAhRBAmtqIAIoAhxBf3M6AAAgAigCKCgCCCACKAIoKAIUQQFraiACKAIcQX9zQQh2OgAAIAIoAigoAgAQHCACKAIYBEAgAigCGCACKAIcSwRAIAIgAigCHDYCGAsgAigCKCgCACgCDCACKAIoKAI4IAIoAigoAlxqIAIoAhgQGRogAigCKCgCACIAIAIoAhggACgCDGo2AgwgAigCKCgCACIAIAAoAhAgAigCGGs2AhAgAigCKCgCACIAIAIoAhggACgCFGo2AhQgAigCKCIAIAIoAhggACgCXGo2AlwgAiACKAIcIAIoAhhrNgIcCyACKAIcBEAgAigCKCgCACACKAIoKAIAKAIMIAIoAhwQdhogAigCKCgCACIAIAIoAhwgACgCDGo2AgwgAigCKCgCACIAIAAoAhAgAigCHGs2AhAgAigCKCgCACIAIAIoAhwgACgCFGo2AhQLIAIoAhBFDQELCyACIAIoAgwgAigCKCgCACgCBGs2AgwgAigCDARAAkAgAigCDCACKAIoKAIsTwRAIAIoAihBAjYCsC0gAigCKCgCOCACKAIoKAIAKAIAIAIoAigoAixrIAIoAigoAiwQGRogAigCKCACKAIoKAIsNgJsDAELIAIoAgwgAigCKCgCPCACKAIoKAJsa08EQCACKAIoIgAgACgCbCACKAIoKAIsazYCbCACKAIoKAI4IAIoAigoAjggAigCKCgCLGogAigCKCgCbBAZGiACKAIoKAKwLUECSQRAIAIoAigiACAAKAKwLUEBajYCsC0LCyACKAIoKAI4IAIoAigoAmxqIAIoAigoAgAoAgAgAigCDGsgAigCDBAZGiACKAIoIgAgAigCDCAAKAJsajYCbAsgAigCKCACKAIoKAJsNgJcIAIoAigiAQJ/IAIoAgwgAigCKCgCLCACKAIoKAK0LWtLBEAgAigCKCgCLCACKAIoKAK0LWsMAQsgAigCDAsgASgCtC1qNgK0LQsgAigCKCgCwC0gAigCKCgCbEkEQCACKAIoIAIoAigoAmw2AsAtCwJAIAIoAhAEQCACQQM2AiwMAQsCQCACKAIkRQ0AIAIoAiRBBEYNACACKAIoKAIAKAIEDQAgAigCKCgCbCACKAIoKAJcRw0AIAJBATYCLAwBCyACIAIoAigoAjwgAigCKCgCbGtBAWs2AhQCQCACKAIoKAIAKAIEIAIoAhRNDQAgAigCKCgCXCACKAIoKAIsSA0AIAIoAigiACAAKAJcIAIoAigoAixrNgJcIAIoAigiACAAKAJsIAIoAigoAixrNgJsIAIoAigoAjggAigCKCgCOCACKAIoKAIsaiACKAIoKAJsEBkaIAIoAigoArAtQQJJBEAgAigCKCIAIAAoArAtQQFqNgKwLQsgAiACKAIoKAIsIAIoAhRqNgIUCyACKAIUIAIoAigoAgAoAgRLBEAgAiACKAIoKAIAKAIENgIUCyACKAIUBEAgAigCKCgCACACKAIoKAI4IAIoAigoAmxqIAIoAhQQdhogAigCKCIAIAIoAhQgACgCbGo2AmwLIAIoAigoAsAtIAIoAigoAmxJBEAgAigCKCACKAIoKAJsNgLALQsgAiACKAIoKAK8LUEqakEDdTYCFCACIAIoAigoAgwgAigCFGtB//8DSwR/Qf//AwUgAigCKCgCDCACKAIUaws2AhQgAgJ/IAIoAhQgAigCKCgCLEsEQCACKAIoKAIsDAELIAIoAhQLNgIgIAIgAigCKCgCbCACKAIoKAJcazYCGAJAIAIoAhggAigCIEkEQCACKAIYRQRAIAIoAiRBBEcNAgsgAigCJEUNASACKAIoKAIAKAIEDQEgAigCGCACKAIUSw0BCyACAn8gAigCGCACKAIUSwRAIAIoAhQMAQsgAigCGAs2AhwgAgJ/QQAgAigCJEEERw0AGkEAIAIoAigoAgAoAgQNABogAigCHCACKAIYRgtBAXE2AhAgAigCKCACKAIoKAI4IAIoAigoAlxqIAIoAhwgAigCEBBdIAIoAigiACACKAIcIAAoAlxqNgJcIAIoAigoAgAQHAsgAkECQQAgAigCEBs2AiwLIAIoAiwhACACQTBqJAAgAAuyAgEBfyMAQRBrIgEkACABIAA2AggCQCABKAIIEHgEQCABQX42AgwMAQsgASABKAIIKAIcKAIENgIEIAEoAggoAhwoAggEQCABKAIIKAIoIAEoAggoAhwoAgggASgCCCgCJBEEAAsgASgCCCgCHCgCRARAIAEoAggoAiggASgCCCgCHCgCRCABKAIIKAIkEQQACyABKAIIKAIcKAJABEAgASgCCCgCKCABKAIIKAIcKAJAIAEoAggoAiQRBAALIAEoAggoAhwoAjgEQCABKAIIKAIoIAEoAggoAhwoAjggASgCCCgCJBEEAAsgASgCCCgCKCABKAIIKAIcIAEoAggoAiQRBAAgASgCCEEANgIcIAFBfUEAIAEoAgRB8QBGGzYCDAsgASgCDCEAIAFBEGokACAAC+sXAQJ/IwBB8ABrIgMgADYCbCADIAE2AmggAyACNgJkIANBfzYCXCADIAMoAmgvAQI2AlQgA0EANgJQIANBBzYCTCADQQQ2AkggAygCVEUEQCADQYoBNgJMIANBAzYCSAsgA0EANgJgA0AgAygCYCADKAJkSkUEQCADIAMoAlQ2AlggAyADKAJoIAMoAmBBAWpBAnRqLwECNgJUIAMgAygCUEEBaiIANgJQAkACQCADKAJMIABMDQAgAygCWCADKAJURw0ADAELAkAgAygCUCADKAJISARAA0AgAyADKAJsQfwUaiADKAJYQQJ0ai8BAjYCRAJAIAMoAmwoArwtQRAgAygCRGtKBEAgAyADKAJsQfwUaiADKAJYQQJ0ai8BADYCQCADKAJsIgAgAC8BuC0gAygCQEH//wNxIAMoAmwoArwtdHI7AbgtIAMoAmwvAbgtQf8BcSEBIAMoAmwoAgghAiADKAJsIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAmwvAbgtQQh2IQEgAygCbCgCCCECIAMoAmwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCbCADKAJAQf//A3FBECADKAJsKAK8LWt1OwG4LSADKAJsIgAgACgCvC0gAygCREEQa2o2ArwtDAELIAMoAmwiACAALwG4LSADKAJsQfwUaiADKAJYQQJ0ai8BACADKAJsKAK8LXRyOwG4LSADKAJsIgAgAygCRCAAKAK8LWo2ArwtCyADIAMoAlBBAWsiADYCUCAADQALDAELAkAgAygCWARAIAMoAlggAygCXEcEQCADIAMoAmxB/BRqIAMoAlhBAnRqLwECNgI8AkAgAygCbCgCvC1BECADKAI8a0oEQCADIAMoAmxB/BRqIAMoAlhBAnRqLwEANgI4IAMoAmwiACAALwG4LSADKAI4Qf//A3EgAygCbCgCvC10cjsBuC0gAygCbC8BuC1B/wFxIQEgAygCbCgCCCECIAMoAmwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCbC8BuC1BCHYhASADKAJsKAIIIQIgAygCbCIEKAIUIQAgBCAAQQFqNgIUIAAgAmogAToAACADKAJsIAMoAjhB//8DcUEQIAMoAmwoArwta3U7AbgtIAMoAmwiACAAKAK8LSADKAI8QRBrajYCvC0MAQsgAygCbCIAIAAvAbgtIAMoAmxB/BRqIAMoAlhBAnRqLwEAIAMoAmwoArwtdHI7AbgtIAMoAmwiACADKAI8IAAoArwtajYCvC0LIAMgAygCUEEBazYCUAsgAyADKAJsLwG+FTYCNAJAIAMoAmwoArwtQRAgAygCNGtKBEAgAyADKAJsLwG8FTYCMCADKAJsIgAgAC8BuC0gAygCMEH//wNxIAMoAmwoArwtdHI7AbgtIAMoAmwvAbgtQf8BcSEBIAMoAmwoAgghAiADKAJsIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAmwvAbgtQQh2IQEgAygCbCgCCCECIAMoAmwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCbCADKAIwQf//A3FBECADKAJsKAK8LWt1OwG4LSADKAJsIgAgACgCvC0gAygCNEEQa2o2ArwtDAELIAMoAmwiACAALwG4LSADKAJsLwG8FSADKAJsKAK8LXRyOwG4LSADKAJsIgAgAygCNCAAKAK8LWo2ArwtCyADQQI2AiwCQCADKAJsKAK8LUEQIAMoAixrSgRAIAMgAygCUEEDazYCKCADKAJsIgAgAC8BuC0gAygCKEH//wNxIAMoAmwoArwtdHI7AbgtIAMoAmwvAbgtQf8BcSEBIAMoAmwoAgghAiADKAJsIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAmwvAbgtQQh2IQEgAygCbCgCCCECIAMoAmwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCbCADKAIoQf//A3FBECADKAJsKAK8LWt1OwG4LSADKAJsIgAgACgCvC0gAygCLEEQa2o2ArwtDAELIAMoAmwiACAALwG4LSADKAJQQQNrQf//A3EgAygCbCgCvC10cjsBuC0gAygCbCIAIAMoAiwgACgCvC1qNgK8LQsMAQsCQCADKAJQQQpMBEAgAyADKAJsLwHCFTYCJAJAIAMoAmwoArwtQRAgAygCJGtKBEAgAyADKAJsLwHAFTYCICADKAJsIgAgAC8BuC0gAygCIEH//wNxIAMoAmwoArwtdHI7AbgtIAMoAmwvAbgtQf8BcSEBIAMoAmwoAgghAiADKAJsIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAmwvAbgtQQh2IQEgAygCbCgCCCECIAMoAmwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCbCADKAIgQf//A3FBECADKAJsKAK8LWt1OwG4LSADKAJsIgAgACgCvC0gAygCJEEQa2o2ArwtDAELIAMoAmwiACAALwG4LSADKAJsLwHAFSADKAJsKAK8LXRyOwG4LSADKAJsIgAgAygCJCAAKAK8LWo2ArwtCyADQQM2AhwCQCADKAJsKAK8LUEQIAMoAhxrSgRAIAMgAygCUEEDazYCGCADKAJsIgAgAC8BuC0gAygCGEH//wNxIAMoAmwoArwtdHI7AbgtIAMoAmwvAbgtQf8BcSEBIAMoAmwoAgghAiADKAJsIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAmwvAbgtQQh2IQEgAygCbCgCCCECIAMoAmwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCbCADKAIYQf//A3FBECADKAJsKAK8LWt1OwG4LSADKAJsIgAgACgCvC0gAygCHEEQa2o2ArwtDAELIAMoAmwiACAALwG4LSADKAJQQQNrQf//A3EgAygCbCgCvC10cjsBuC0gAygCbCIAIAMoAhwgACgCvC1qNgK8LQsMAQsgAyADKAJsLwHGFTYCFAJAIAMoAmwoArwtQRAgAygCFGtKBEAgAyADKAJsLwHEFTYCECADKAJsIgAgAC8BuC0gAygCEEH//wNxIAMoAmwoArwtdHI7AbgtIAMoAmwvAbgtQf8BcSEBIAMoAmwoAgghAiADKAJsIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAmwvAbgtQQh2IQEgAygCbCgCCCECIAMoAmwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCbCADKAIQQf//A3FBECADKAJsKAK8LWt1OwG4LSADKAJsIgAgACgCvC0gAygCFEEQa2o2ArwtDAELIAMoAmwiACAALwG4LSADKAJsLwHEFSADKAJsKAK8LXRyOwG4LSADKAJsIgAgAygCFCAAKAK8LWo2ArwtCyADQQc2AgwCQCADKAJsKAK8LUEQIAMoAgxrSgRAIAMgAygCUEELazYCCCADKAJsIgAgAC8BuC0gAygCCEH//wNxIAMoAmwoArwtdHI7AbgtIAMoAmwvAbgtQf8BcSEBIAMoAmwoAgghAiADKAJsIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAmwvAbgtQQh2IQEgAygCbCgCCCECIAMoAmwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCbCADKAIIQf//A3FBECADKAJsKAK8LWt1OwG4LSADKAJsIgAgACgCvC0gAygCDEEQa2o2ArwtDAELIAMoAmwiACAALwG4LSADKAJQQQtrQf//A3EgAygCbCgCvC10cjsBuC0gAygCbCIAIAMoAgwgACgCvC1qNgK8LQsLCwsgA0EANgJQIAMgAygCWDYCXAJAIAMoAlRFBEAgA0GKATYCTCADQQM2AkgMAQsCQCADKAJYIAMoAlRGBEAgA0EGNgJMIANBAzYCSAwBCyADQQc2AkwgA0EENgJICwsLIAMgAygCYEEBajYCYAwBCwsLkQQBAX8jAEEwayIDIAA2AiwgAyABNgIoIAMgAjYCJCADQX82AhwgAyADKAIoLwECNgIUIANBADYCECADQQc2AgwgA0EENgIIIAMoAhRFBEAgA0GKATYCDCADQQM2AggLIAMoAiggAygCJEEBakECdGpB//8DOwECIANBADYCIANAIAMoAiAgAygCJEpFBEAgAyADKAIUNgIYIAMgAygCKCADKAIgQQFqQQJ0ai8BAjYCFCADIAMoAhBBAWoiADYCEAJAAkAgAygCDCAATA0AIAMoAhggAygCFEcNAAwBCwJAIAMoAhAgAygCCEgEQCADKAIsQfwUaiADKAIYQQJ0aiIAIAMoAhAgAC8BAGo7AQAMAQsCQCADKAIYBEAgAygCGCADKAIcRwRAIAMoAiwgAygCGEECdGpB/BRqIgAgAC8BAEEBajsBAAsgAygCLCIAIABBvBVqLwEAQQFqOwG8FQwBCwJAIAMoAhBBCkwEQCADKAIsIgAgAEHAFWovAQBBAWo7AcAVDAELIAMoAiwiACAAQcQVai8BAEEBajsBxBULCwsgA0EANgIQIAMgAygCGDYCHAJAIAMoAhRFBEAgA0GKATYCDCADQQM2AggMAQsCQCADKAIYIAMoAhRGBEAgA0EGNgIMIANBAzYCCAwBCyADQQc2AgwgA0EENgIICwsLIAMgAygCIEEBajYCIAwBCwsLpxIBAn8jAEHQAGsiAyAANgJMIAMgATYCSCADIAI2AkQgA0EANgI4IAMoAkwoAqAtBEADQCADIAMoAkwoAqQtIAMoAjhBAXRqLwEANgJAIAMoAkwoApgtIQAgAyADKAI4IgFBAWo2AjggAyAAIAFqLQAANgI8AkAgAygCQEUEQCADIAMoAkggAygCPEECdGovAQI2AiwCQCADKAJMKAK8LUEQIAMoAixrSgRAIAMgAygCSCADKAI8QQJ0ai8BADYCKCADKAJMIgAgAC8BuC0gAygCKEH//wNxIAMoAkwoArwtdHI7AbgtIAMoAkwvAbgtQf8BcSEBIAMoAkwoAgghAiADKAJMIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAkwvAbgtQQh2IQEgAygCTCgCCCECIAMoAkwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCTCADKAIoQf//A3FBECADKAJMKAK8LWt1OwG4LSADKAJMIgAgACgCvC0gAygCLEEQa2o2ArwtDAELIAMoAkwiACAALwG4LSADKAJIIAMoAjxBAnRqLwEAIAMoAkwoArwtdHI7AbgtIAMoAkwiACADKAIsIAAoArwtajYCvC0LDAELIAMgAygCPC0A0F02AjQgAyADKAJIIAMoAjRBgQJqQQJ0ai8BAjYCJAJAIAMoAkwoArwtQRAgAygCJGtKBEAgAyADKAJIIAMoAjRBgQJqQQJ0ai8BADYCICADKAJMIgAgAC8BuC0gAygCIEH//wNxIAMoAkwoArwtdHI7AbgtIAMoAkwvAbgtQf8BcSEBIAMoAkwoAgghAiADKAJMIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAkwvAbgtQQh2IQEgAygCTCgCCCECIAMoAkwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCTCADKAIgQf//A3FBECADKAJMKAK8LWt1OwG4LSADKAJMIgAgACgCvC0gAygCJEEQa2o2ArwtDAELIAMoAkwiACAALwG4LSADKAJIIAMoAjRBgQJqQQJ0ai8BACADKAJMKAK8LXRyOwG4LSADKAJMIgAgAygCJCAAKAK8LWo2ArwtCyADIAMoAjRBAnRBkOoAaigCADYCMCADKAIwBEAgAyADKAI8IAMoAjRBAnRBgO0AaigCAGs2AjwgAyADKAIwNgIcAkAgAygCTCgCvC1BECADKAIca0oEQCADIAMoAjw2AhggAygCTCIAIAAvAbgtIAMoAhhB//8DcSADKAJMKAK8LXRyOwG4LSADKAJMLwG4LUH/AXEhASADKAJMKAIIIQIgAygCTCIEKAIUIQAgBCAAQQFqNgIUIAAgAmogAToAACADKAJMLwG4LUEIdiEBIAMoAkwoAgghAiADKAJMIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAkwgAygCGEH//wNxQRAgAygCTCgCvC1rdTsBuC0gAygCTCIAIAAoArwtIAMoAhxBEGtqNgK8LQwBCyADKAJMIgAgAC8BuC0gAygCPEH//wNxIAMoAkwoArwtdHI7AbgtIAMoAkwiACADKAIcIAAoArwtajYCvC0LCyADIAMoAkBBAWs2AkAgAwJ/IAMoAkBBgAJJBEAgAygCQC0A0FkMAQsgAygCQEEHdkGAAmotANBZCzYCNCADIAMoAkQgAygCNEECdGovAQI2AhQCQCADKAJMKAK8LUEQIAMoAhRrSgRAIAMgAygCRCADKAI0QQJ0ai8BADYCECADKAJMIgAgAC8BuC0gAygCEEH//wNxIAMoAkwoArwtdHI7AbgtIAMoAkwvAbgtQf8BcSEBIAMoAkwoAgghAiADKAJMIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAkwvAbgtQQh2IQEgAygCTCgCCCECIAMoAkwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCTCADKAIQQf//A3FBECADKAJMKAK8LWt1OwG4LSADKAJMIgAgACgCvC0gAygCFEEQa2o2ArwtDAELIAMoAkwiACAALwG4LSADKAJEIAMoAjRBAnRqLwEAIAMoAkwoArwtdHI7AbgtIAMoAkwiACADKAIUIAAoArwtajYCvC0LIAMgAygCNEECdEGQ6wBqKAIANgIwIAMoAjAEQCADIAMoAkAgAygCNEECdEGA7gBqKAIAazYCQCADIAMoAjA2AgwCQCADKAJMKAK8LUEQIAMoAgxrSgRAIAMgAygCQDYCCCADKAJMIgAgAC8BuC0gAygCCEH//wNxIAMoAkwoArwtdHI7AbgtIAMoAkwvAbgtQf8BcSEBIAMoAkwoAgghAiADKAJMIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAkwvAbgtQQh2IQEgAygCTCgCCCECIAMoAkwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCTCADKAIIQf//A3FBECADKAJMKAK8LWt1OwG4LSADKAJMIgAgACgCvC0gAygCDEEQa2o2ArwtDAELIAMoAkwiACAALwG4LSADKAJAQf//A3EgAygCTCgCvC10cjsBuC0gAygCTCIAIAMoAgwgACgCvC1qNgK8LQsLCyADKAI4IAMoAkwoAqAtSQ0ACwsgAyADKAJILwGCCDYCBAJAIAMoAkwoArwtQRAgAygCBGtKBEAgAyADKAJILwGACDYCACADKAJMIgAgAC8BuC0gAygCAEH//wNxIAMoAkwoArwtdHI7AbgtIAMoAkwvAbgtQf8BcSEBIAMoAkwoAgghAiADKAJMIgQoAhQhACAEIABBAWo2AhQgACACaiABOgAAIAMoAkwvAbgtQQh2IQEgAygCTCgCCCECIAMoAkwiBCgCFCEAIAQgAEEBajYCFCAAIAJqIAE6AAAgAygCTCADKAIAQf//A3FBECADKAJMKAK8LWt1OwG4LSADKAJMIgAgACgCvC0gAygCBEEQa2o2ArwtDAELIAMoAkwiACAALwG4LSADKAJILwGACCADKAJMKAK8LXRyOwG4LSADKAJMIgAgAygCBCAAKAK8LWo2ArwtCwuXAgEEfyMAQRBrIgEgADYCDAJAIAEoAgwoArwtQRBGBEAgASgCDC8BuC1B/wFxIQIgASgCDCgCCCEDIAEoAgwiBCgCFCEAIAQgAEEBajYCFCAAIANqIAI6AAAgASgCDC8BuC1BCHYhAiABKAIMKAIIIQMgASgCDCIEKAIUIQAgBCAAQQFqNgIUIAAgA2ogAjoAACABKAIMQQA7AbgtIAEoAgxBADYCvC0MAQsgASgCDCgCvC1BCE4EQCABKAIMLwG4LSECIAEoAgwoAgghAyABKAIMIgQoAhQhACAEIABBAWo2AhQgACADaiACOgAAIAEoAgwiACAALwG4LUEIdjsBuC0gASgCDCIAIAAoArwtQQhrNgK8LQsLC+8BAQR/IwBBEGsiASAANgIMAkAgASgCDCgCvC1BCEoEQCABKAIMLwG4LUH/AXEhAiABKAIMKAIIIQMgASgCDCIEKAIUIQAgBCAAQQFqNgIUIAAgA2ogAjoAACABKAIMLwG4LUEIdiECIAEoAgwoAgghAyABKAIMIgQoAhQhACAEIABBAWo2AhQgACADaiACOgAADAELIAEoAgwoArwtQQBKBEAgASgCDC8BuC0hAiABKAIMKAIIIQMgASgCDCIEKAIUIQAgBCAAQQFqNgIUIAAgA2ogAjoAAAsLIAEoAgxBADsBuC0gASgCDEEANgK8LQv8AQEBfyMAQRBrIgEgADYCDCABQQA2AggDQCABKAIIQZ4CTkUEQCABKAIMQZQBaiABKAIIQQJ0akEAOwEAIAEgASgCCEEBajYCCAwBCwsgAUEANgIIA0AgASgCCEEeTkUEQCABKAIMQYgTaiABKAIIQQJ0akEAOwEAIAEgASgCCEEBajYCCAwBCwsgAUEANgIIA0AgASgCCEETTkUEQCABKAIMQfwUaiABKAIIQQJ0akEAOwEAIAEgASgCCEEBajYCCAwBCwsgASgCDEEBOwGUCSABKAIMQQA2AqwtIAEoAgxBADYCqC0gASgCDEEANgKwLSABKAIMQQA2AqAtCyIBAX8jAEEQayIBJAAgASAANgIMIAEoAgwQFSABQRBqJAAL6QEBAX8jAEEwayICIAA2AiQgAiABNwMYIAJCADcDECACIAIoAiQpAwhCAX03AwgCQANAIAIpAxAgAikDCFQEQCACIAIpAxAgAikDCCACKQMQfUIBiHw3AwACQCACKAIkKAIEIAIpAwCnQQN0aikDACACKQMYVgRAIAIgAikDAEIBfTcDCAwBCwJAIAIpAwAgAigCJCkDCFIEQCACKAIkKAIEIAIpAwBCAXynQQN0aikDACACKQMYWA0BCyACIAIpAwA3AygMBAsgAiACKQMAQgF8NwMQCwwBCwsgAiACKQMQNwMoCyACKQMoC6cBAQF/IwBBMGsiBCQAIAQgADYCKCAEIAE2AiQgBCACNwMYIAQgAzYCFCAEIAQoAigpAzggBCgCKCkDMCAEKAIkIAQpAxggBCgCFBCIATcDCAJAIAQpAwhCAFMEQCAEQX82AiwMAQsgBCgCKCAEKQMINwM4IAQoAiggBCgCKCkDOBDAASECIAQoAiggAjcDQCAEQQA2AiwLIAQoAiwhACAEQTBqJAAgAAvrAQEBfyMAQSBrIgMkACADIAA2AhggAyABNwMQIAMgAjYCDAJAIAMpAxAgAygCGCkDEFQEQCADQQE6AB8MAQsgAyADKAIYKAIAIAMpAxBCBIanEE4iADYCCCAARQRAIAMoAgxBDkEAEBQgA0EAOgAfDAELIAMoAhggAygCCDYCACADIAMoAhgoAgQgAykDEEIBfEIDhqcQTiIANgIEIABFBEAgAygCDEEOQQAQFCADQQA6AB8MAQsgAygCGCADKAIENgIEIAMoAhggAykDEDcDECADQQE6AB8LIAMtAB9BAXEhACADQSBqJAAgAAvOAgEBfyMAQTBrIgQkACAEIAA2AiggBCABNwMgIAQgAjYCHCAEIAM2AhgCQAJAIAQoAigNACAEKQMgUA0AIAQoAhhBEkEAEBQgBEEANgIsDAELIAQgBCgCKCAEKQMgIAQoAhwgBCgCGBBMIgA2AgwgAEUEQCAEQQA2AiwMAQsgBEEYEBgiADYCFCAARQRAIAQoAhhBDkEAEBQgBCgCDBAyIARBADYCLAwBCyAEKAIUIAQoAgw2AhAgBCgCFEEANgIUQQAQASEAIAQoAhQgADYCDCMAQRBrIgAgBCgCFDYCDCAAKAIMQQA2AgAgACgCDEEANgIEIAAoAgxBADYCCCAEQQIgBCgCFCAEKAIYEIMBIgA2AhAgAEUEQCAEKAIUKAIQEDIgBCgCFBAVIARBADYCLAwBCyAEIAQoAhA2AiwLIAQoAiwhACAEQTBqJAAgAAupAQEBfyMAQTBrIgQkACAEIAA2AiggBCABNwMgIAQgAjYCHCAEIAM2AhgCQCAEKAIoRQRAIAQpAyBCAFIEQCAEKAIYQRJBABAUIARBADYCLAwCCyAEQQBCACAEKAIcIAQoAhgQwwE2AiwMAQsgBCAEKAIoNgIIIAQgBCkDIDcDECAEIARBCGpCASAEKAIcIAQoAhgQwwE2AiwLIAQoAiwhACAEQTBqJAAgAAtGAQF/IwBBIGsiAyQAIAMgADYCHCADIAE3AxAgAyACNgIMIAMoAhwgAykDECADKAIMIAMoAhxBCGoQTSEAIANBIGokACAAC4sMAQZ/IAAgAWohBQJAAkAgACgCBCICQQFxDQAgAkEDcUUNASAAKAIAIgIgAWohAQJAIAAgAmsiAEH4mwEoAgBHBEAgAkH/AU0EQCAAKAIIIgQgAkEDdiICQQN0QYycAWpGGiAAKAIMIgMgBEcNAkHkmwFB5JsBKAIAQX4gAndxNgIADAMLIAAoAhghBgJAIAAgACgCDCIDRwRAIAAoAggiAkH0mwEoAgBJGiACIAM2AgwgAyACNgIIDAELAkAgAEEUaiICKAIAIgQNACAAQRBqIgIoAgAiBA0AQQAhAwwBCwNAIAIhByAEIgNBFGoiAigCACIEDQAgA0EQaiECIAMoAhAiBA0ACyAHQQA2AgALIAZFDQICQCAAIAAoAhwiBEECdEGUngFqIgIoAgBGBEAgAiADNgIAIAMNAUHomwFB6JsBKAIAQX4gBHdxNgIADAQLIAZBEEEUIAYoAhAgAEYbaiADNgIAIANFDQMLIAMgBjYCGCAAKAIQIgIEQCADIAI2AhAgAiADNgIYCyAAKAIUIgJFDQIgAyACNgIUIAIgAzYCGAwCCyAFKAIEIgJBA3FBA0cNAUHsmwEgATYCACAFIAJBfnE2AgQgACABQQFyNgIEIAUgATYCAA8LIAQgAzYCDCADIAQ2AggLAkAgBSgCBCICQQJxRQRAIAVB/JsBKAIARgRAQfybASAANgIAQfCbAUHwmwEoAgAgAWoiATYCACAAIAFBAXI2AgQgAEH4mwEoAgBHDQNB7JsBQQA2AgBB+JsBQQA2AgAPCyAFQfibASgCAEYEQEH4mwEgADYCAEHsmwFB7JsBKAIAIAFqIgE2AgAgACABQQFyNgIEIAAgAWogATYCAA8LIAJBeHEgAWohAQJAIAJB/wFNBEAgBSgCCCIEIAJBA3YiAkEDdEGMnAFqRhogBCAFKAIMIgNGBEBB5JsBQeSbASgCAEF+IAJ3cTYCAAwCCyAEIAM2AgwgAyAENgIIDAELIAUoAhghBgJAIAUgBSgCDCIDRwRAIAUoAggiAkH0mwEoAgBJGiACIAM2AgwgAyACNgIIDAELAkAgBUEUaiIEKAIAIgINACAFQRBqIgQoAgAiAg0AQQAhAwwBCwNAIAQhByACIgNBFGoiBCgCACICDQAgA0EQaiEEIAMoAhAiAg0ACyAHQQA2AgALIAZFDQACQCAFIAUoAhwiBEECdEGUngFqIgIoAgBGBEAgAiADNgIAIAMNAUHomwFB6JsBKAIAQX4gBHdxNgIADAILIAZBEEEUIAYoAhAgBUYbaiADNgIAIANFDQELIAMgBjYCGCAFKAIQIgIEQCADIAI2AhAgAiADNgIYCyAFKAIUIgJFDQAgAyACNgIUIAIgAzYCGAsgACABQQFyNgIEIAAgAWogATYCACAAQfibASgCAEcNAUHsmwEgATYCAA8LIAUgAkF+cTYCBCAAIAFBAXI2AgQgACABaiABNgIACyABQf8BTQRAIAFBA3YiAkEDdEGMnAFqIQECf0HkmwEoAgAiA0EBIAJ0IgJxRQRAQeSbASACIANyNgIAIAEMAQsgASgCCAshAiABIAA2AgggAiAANgIMIAAgATYCDCAAIAI2AggPC0EfIQIgAEIANwIQIAFB////B00EQCABQQh2IgIgAkGA/j9qQRB2QQhxIgR0IgIgAkGA4B9qQRB2QQRxIgN0IgIgAkGAgA9qQRB2QQJxIgJ0QQ92IAMgBHIgAnJrIgJBAXQgASACQRVqdkEBcXJBHGohAgsgACACNgIcIAJBAnRBlJ4BaiEHAkACQEHomwEoAgAiBEEBIAJ0IgNxRQRAQeibASADIARyNgIAIAcgADYCACAAIAc2AhgMAQsgAUEAQRkgAkEBdmsgAkEfRht0IQIgBygCACEDA0AgAyIEKAIEQXhxIAFGDQIgAkEddiEDIAJBAXQhAiAEIANBBHFqIgdBEGooAgAiAw0ACyAHIAA2AhAgACAENgIYCyAAIAA2AgwgACAANgIIDwsgBCgCCCIBIAA2AgwgBCAANgIIIABBADYCGCAAIAQ2AgwgACABNgIICwsGAEG0mwELtQkBAX8jAEHgwABrIgUkACAFIAA2AtRAIAUgATYC0EAgBSACNgLMQCAFIAM3A8BAIAUgBDYCvEAgBSAFKALQQDYCuEACQAJAAkACQAJAAkACQAJAAkACQAJAAkAgBSgCvEAOEQMEAAYBAgUJCgoKCgoKCAoHCgsgBUIANwPYQAwKCyAFIAUoArhAQeQAaiAFKALMQCAFKQPAQBBDNwPYQAwJCyAFKAK4QBAVIAVCADcD2EAMCAsgBSgCuEAoAhAEQCAFIAUoArhAKAIQIAUoArhAKQMYIAUoArhAQeQAahBgIgM3A5hAIANQBEAgBUJ/NwPYQAwJCyAFKAK4QCkDCCAFKAK4QCkDCCAFKQOYQHxWBEAgBSgCuEBB5ABqQRVBABAUIAVCfzcD2EAMCQsgBSgCuEAiACAFKQOYQCAAKQMAfDcDACAFKAK4QCIAIAUpA5hAIAApAwh8NwMIIAUoArhAQQA2AhALIAUoArhALQB4QQFxRQRAIAVCADcDqEADQCAFKQOoQCAFKAK4QCkDAFQEQCAFIAUoArhAKQMAIAUpA6hAfUKAwABWBH5CgMAABSAFKAK4QCkDACAFKQOoQH0LNwOgQCAFIAUoAtRAIAVBEGogBSkDoEAQKyIDNwOwQCADQgBTBEAgBSgCuEBB5ABqIAUoAtRAEBcgBUJ/NwPYQAwLCyAFKQOwQFAEQCAFKAK4QEHkAGpBEUEAEBQgBUJ/NwPYQAwLBSAFIAUpA7BAIAUpA6hAfDcDqEAMAgsACwsLIAUoArhAIAUoArhAKQMANwMgIAVCADcD2EAMBwsgBSkDwEAgBSgCuEApAwggBSgCuEApAyB9VgRAIAUgBSgCuEApAwggBSgCuEApAyB9NwPAQAsgBSkDwEBQBEAgBUIANwPYQAwHCyAFKAK4QC0AeEEBcQRAIAUoAtRAIAUoArhAKQMgQQAQJ0EASARAIAUoArhAQeQAaiAFKALUQBAXIAVCfzcD2EAMCAsLIAUgBSgC1EAgBSgCzEAgBSkDwEAQKyIDNwOwQCADQgBTBEAgBSgCuEBB5ABqQRFBABAUIAVCfzcD2EAMBwsgBSgCuEAiACAFKQOwQCAAKQMgfDcDICAFKQOwQFAEQCAFKAK4QCkDICAFKAK4QCkDCFQEQCAFKAK4QEHkAGpBEUEAEBQgBUJ/NwPYQAwICwsgBSAFKQOwQDcD2EAMBgsgBSAFKAK4QCkDICAFKAK4QCkDAH0gBSgCuEApAwggBSgCuEApAwB9IAUoAsxAIAUpA8BAIAUoArhAQeQAahCIATcDCCAFKQMIQgBTBEAgBUJ/NwPYQAwGCyAFKAK4QCAFKQMIIAUoArhAKQMAfDcDICAFQgA3A9hADAULIAUgBSgCzEA2AgQgBSgCBCAFKAK4QEEoaiAFKAK4QEHkAGoQhAFBAEgEQCAFQn83A9hADAULIAVCADcD2EAMBAsgBSAFKAK4QCwAYKw3A9hADAMLIAUgBSgCuEApA3A3A9hADAILIAUgBSgCuEApAyAgBSgCuEApAwB9NwPYQAwBCyAFKAK4QEHkAGpBHEEAEBQgBUJ/NwPYQAsgBSkD2EAhAyAFQeDAAGokACADCwgAQQFBDBB/CyIBAX8jAEEQayIBIAA2AgwgASgCDCIAIAAoAjBBAWo2AjALBwAgACgCLAsHACAAKAIoCxgBAX8jAEEQayIBIAA2AgwgASgCDEEMagsHACAAKAIYCwcAIAAoAhALBwAgACgCCAtFAEGgmwFCADcDAEGYmwFCADcDAEGQmwFCADcDAEGImwFCADcDAEGAmwFCADcDAEH4mgFCADcDAEHwmgFCADcDAEHwmgELFAAgACABrSACrUIghoQgAyAEEH4LEwEBfiAAEEkiAUIgiKcQACABpwsVACAAIAGtIAKtQiCGhCADIAQQxAELFAAgACABIAKtIAOtQiCGhCAEEH0LrQQBAX8jAEEgayIFJAAgBSAANgIYIAUgAa0gAq1CIIaENwMQIAUgAzYCDCAFIAQ2AggCQAJAIAUpAxAgBSgCGCkDMFQEQCAFKAIIQQlNDQELIAUoAhhBCGpBEkEAEBQgBUF/NgIcDAELIAUoAhgoAhhBAnEEQCAFKAIYQQhqQRlBABAUIAVBfzYCHAwBCwJ/IAUoAgwhASMAQRBrIgAkACAAIAE2AgggAEEBOgAHAkAgACgCCEUEQCAAQQE6AA8MAQsgACAAKAIIIAAtAAdBAXEQswFBAEc6AA8LIAAtAA9BAXEhASAAQRBqJAAgAUULBEAgBSgCGEEIakEQQQAQFCAFQX82AhwMAQsgBSAFKAIYKAJAIAUpAxCnQQR0ajYCBCAFIAUoAgQoAgAEfyAFKAIEKAIAKAIQBUF/CzYCAAJAIAUoAgwgBSgCAEYEQCAFKAIEKAIEBEAgBSgCBCgCBCIAIAAoAgBBfnE2AgAgBSgCBCgCBEEAOwFQIAUoAgQoAgQoAgBFBEAgBSgCBCgCBBA3IAUoAgRBADYCBAsLDAELIAUoAgQoAgRFBEAgBSgCBCgCABBAIQAgBSgCBCAANgIEIABFBEAgBSgCGEEIakEOQQAQFCAFQX82AhwMAwsLIAUoAgQoAgQgBSgCDDYCECAFKAIEKAIEIAUoAgg7AVAgBSgCBCgCBCIAIAAoAgBBAXI2AgALIAVBADYCHAsgBSgCHCEAIAVBIGokACAACxcBAX4gACABIAIQciIDQiCIpxAAIAOnCx8BAX4gACABIAKtIAOtQiCGhBArIgRCIIinEAAgBKcLrgECAX8BfgJ/IwBBIGsiAiAANgIUIAIgATYCEAJAIAIoAhRFBEAgAkJ/NwMYDAELIAIoAhBBCHEEQCACIAIoAhQpAzA3AwgDQCACKQMIQgBSBH8gAigCFCgCQCACKQMIQgF9p0EEdGooAgAFQQELRQRAIAIgAikDCEIBfTcDCAwBCwsgAiACKQMINwMYDAELIAIgAigCFCkDMDcDGAsgAikDGCIDQiCIpwsQACADpwsTACAAIAGtIAKtQiCGhCADEMUBC4gCAgF/AX4CfyMAQSBrIgQkACAEIAA2AhQgBCABNgIQIAQgAq0gA61CIIaENwMIAkAgBCgCFEUEQCAEQn83AxgMAQsgBCgCFCgCBARAIARCfzcDGAwBCyAEKQMIQv///////////wBWBEAgBCgCFEEEakESQQAQFCAEQn83AxgMAQsCQCAEKAIULQAQQQFxRQRAIAQpAwhQRQ0BCyAEQgA3AxgMAQsgBCAEKAIUKAIUIAQoAhAgBCkDCBArIgU3AwAgBUIAUwRAIAQoAhRBBGogBCgCFCgCFBAXIARCfzcDGAwBCyAEIAQpAwA3AxgLIAQpAxghBSAEQSBqJAAgBUIgiKcLEAAgBacLTwEBfyMAQSBrIgQkACAEIAA2AhwgBCABrSACrUIghoQ3AxAgBCADNgIMIAQoAhwgBCkDECAEKAIMIAQoAhwoAhwQrQEhACAEQSBqJAAgAAvZAwEBfyMAQSBrIgUkACAFIAA2AhggBSABrSACrUIghoQ3AxAgBSADNgIMIAUgBDYCCAJAIAUoAhggBSkDEEEAQQAQP0UEQCAFQX82AhwMAQsgBSgCGCgCGEECcQRAIAUoAhhBCGpBGUEAEBQgBUF/NgIcDAELIAUoAhgoAkAgBSkDEKdBBHRqKAIIBEAgBSgCGCgCQCAFKQMQp0EEdGooAgggBSgCDBBnQQBIBEAgBSgCGEEIakEPQQAQFCAFQX82AhwMAgsgBUEANgIcDAELIAUgBSgCGCgCQCAFKQMQp0EEdGo2AgQgBSAFKAIEKAIABH8gBSgCDCAFKAIEKAIAKAIURwVBAQtBAXE2AgACQCAFKAIABEAgBSgCBCgCBEUEQCAFKAIEKAIAEEAhACAFKAIEIAA2AgQgAEUEQCAFKAIYQQhqQQ5BABAUIAVBfzYCHAwECwsgBSgCBCgCBCAFKAIMNgIUIAUoAgQoAgQiACAAKAIAQSByNgIADAELIAUoAgQoAgQEQCAFKAIEKAIEIgAgACgCAEFfcTYCACAFKAIEKAIEKAIARQRAIAUoAgQoAgQQNyAFKAIEQQA2AgQLCwsgBUEANgIcCyAFKAIcIQAgBUEgaiQAIAALFwAgACABrSACrUIghoQgAyAEIAUQmQELEgAgACABrSACrUIghoQgAxAnC48BAgF/AX4CfyMAQSBrIgQkACAEIAA2AhQgBCABNgIQIAQgAjYCDCAEIAM2AggCQAJAIAQoAhAEQCAEKAIMDQELIAQoAhRBCGpBEkEAEBQgBEJ/NwMYDAELIAQgBCgCFCAEKAIQIAQoAgwgBCgCCBCaATcDGAsgBCkDGCEFIARBIGokACAFQiCIpwsQACAFpwuFBQIBfwF+An8jAEEwayIDJAAgAyAANgIkIAMgATYCICADIAI2AhwCQCADKAIkKAIYQQJxBEAgAygCJEEIakEZQQAQFCADQn83AygMAQsgAygCIEUEQCADKAIkQQhqQRJBABAUIANCfzcDKAwBCyADQQA2AgwgAyADKAIgEC42AhggAygCICADKAIYQQFraiwAAEEvRwRAIAMgAygCGEECahAYIgA2AgwgAEUEQCADKAIkQQhqQQ5BABAUIANCfzcDKAwCCwJAAkAgAygCDCIBIAMoAiAiAHNBA3ENACAAQQNxBEADQCABIAAtAAAiAjoAACACRQ0DIAFBAWohASAAQQFqIgBBA3ENAAsLIAAoAgAiAkF/cyACQYGChAhrcUGAgYKEeHENAANAIAEgAjYCACAAKAIEIQIgAUEEaiEBIABBBGohACACQYGChAhrIAJBf3NxQYCBgoR4cUUNAAsLIAEgAC0AACICOgAAIAJFDQADQCABIAAtAAEiAjoAASABQQFqIQEgAEEBaiEAIAINAAsLIAMoAgwgAygCGGpBLzoAACADKAIMIAMoAhhBAWpqQQA6AAALIAMgAygCJEEAQgBBABB9IgA2AgggAEUEQCADKAIMEBUgA0J/NwMoDAELIAMgAygCJAJ/IAMoAgwEQCADKAIMDAELIAMoAiALIAMoAgggAygCHBCaATcDECADKAIMEBUCQCADKQMQQgBTBEAgAygCCBAbDAELIAMoAiQgAykDEEEAQQNBgID8jwQQmQFBAEgEQCADKAIkIAMpAxAQmAEaIANCfzcDKAwCCwsgAyADKQMQNwMoCyADKQMoIQQgA0EwaiQAIARCIIinCxAAIASnCxEAIAAgAa0gAq1CIIaEEJgBCxcAIAAgAa0gAq1CIIaEIAMgBCAFEIoBC38CAX8BfiMAQSBrIgMkACADIAA2AhggAyABNgIUIAMgAjYCECADIAMoAhggAygCFCADKAIQEHIiBDcDCAJAIARCAFMEQCADQQA2AhwMAQsgAyADKAIYIAMpAwggAygCECADKAIYKAIcEK0BNgIcCyADKAIcIQAgA0EgaiQAIAALEAAjACAAa0FwcSIAJAAgAAsGACAAJAALBAAjAAuCAQIBfwF+IwBBIGsiBCQAIAQgADYCGCAEIAE2AhQgBCACNgIQIAQgAzYCDCAEIAQoAhggBCgCFCAEKAIQEHIiBTcDAAJAIAVCAFMEQCAEQX82AhwMAQsgBCAEKAIYIAQpAwAgBCgCECAEKAIMEH42AhwLIAQoAhwhACAEQSBqJAAgAAvQRQMGfwF+AnwjAEHgAGsiASQAIAEgADYCWAJAIAEoAlhFBEAgAUF/NgJcDAELIwBBIGsiACABKAJYNgIcIAAgAUFAazYCGCAAQQA2AhQgAEIANwMAAkAgACgCHC0AKEEBcUUEQCAAKAIcKAIYIAAoAhwoAhRGDQELIABBATYCFAsgAEIANwMIA0AgACkDCCAAKAIcKQMwVARAAkACQCAAKAIcKAJAIAApAwinQQR0aigCCA0AIAAoAhwoAkAgACkDCKdBBHRqLQAMQQFxDQAgACgCHCgCQCAAKQMIp0EEdGooAgRFDQEgACgCHCgCQCAAKQMIp0EEdGooAgQoAgBFDQELIABBATYCFAsgACgCHCgCQCAAKQMIp0EEdGotAAxBAXFFBEAgACAAKQMAQgF8NwMACyAAIAApAwhCAXw3AwgMAQsLIAAoAhgEQCAAKAIYIAApAwA3AwALIAEgACgCFDYCJCABKQNAUARAAkAgASgCWCgCBEEIcUUEQCABKAIkRQ0BCwJ/IAEoAlgoAgAhAiMAQRBrIgAkACAAIAI2AggCQCAAKAIIKAIkQQNGBEAgAEEANgIMDAELIAAoAggoAiAEQCAAKAIIEC9BAEgEQCAAQX82AgwMAgsLIAAoAggoAiQEQCAAKAIIEGILIAAoAghBAEIAQQ8QIEIAUwRAIABBfzYCDAwBCyAAKAIIQQM2AiQgAEEANgIMCyAAKAIMIQIgAEEQaiQAIAJBAEgLBEACQAJ/IwBBEGsiACABKAJYKAIANgIMIwBBEGsiAiAAKAIMQQxqNgIMIAIoAgwoAgBBFkYLBEAjAEEQayIAIAEoAlgoAgA2AgwjAEEQayICIAAoAgxBDGo2AgwgAigCDCgCBEEsRg0BCyABKAJYQQhqIAEoAlgoAgAQFyABQX82AlwMBAsLCyABKAJYEDwgAUEANgJcDAELIAEoAiRFBEAgASgCWBA8IAFBADYCXAwBCyABKQNAIAEoAlgpAzBWBEAgASgCWEEIakEUQQAQFCABQX82AlwMAQsgASABKQNAp0EDdBAYIgA2AiggAEUEQCABQX82AlwMAQsgAUJ/NwM4IAFCADcDSCABQgA3A1ADQCABKQNQIAEoAlgpAzBUBEACQCABKAJYKAJAIAEpA1CnQQR0aigCAEUNAAJAIAEoAlgoAkAgASkDUKdBBHRqKAIIDQAgASgCWCgCQCABKQNQp0EEdGotAAxBAXENACABKAJYKAJAIAEpA1CnQQR0aigCBEUNASABKAJYKAJAIAEpA1CnQQR0aigCBCgCAEUNAQsgAQJ+IAEpAzggASgCWCgCQCABKQNQp0EEdGooAgApA0hUBEAgASkDOAwBCyABKAJYKAJAIAEpA1CnQQR0aigCACkDSAs3AzgLIAEoAlgoAkAgASkDUKdBBHRqLQAMQQFxRQRAIAEpA0ggASkDQFoEQCABKAIoEBUgASgCWEEIakEUQQAQFCABQX82AlwMBAsgASgCKCABKQNIp0EDdGogASkDUDcDACABIAEpA0hCAXw3A0gLIAEgASkDUEIBfDcDUAwBCwsgASkDSCABKQNAVARAIAEoAigQFSABKAJYQQhqQRRBABAUIAFBfzYCXAwBCwJAAn8jAEEQayIAIAEoAlgoAgA2AgwgACgCDCkDGEKAgAiDUAsEQCABQgA3AzgMAQsgASkDOEJ/UQRAIAFCfzcDGCABQgA3AzggAUIANwNQA0AgASkDUCABKAJYKQMwVARAIAEoAlgoAkAgASkDUKdBBHRqKAIABEAgASgCWCgCQCABKQNQp0EEdGooAgApA0ggASkDOFoEQCABIAEoAlgoAkAgASkDUKdBBHRqKAIAKQNINwM4IAEgASkDUDcDGAsLIAEgASkDUEIBfDcDUAwBCwsgASkDGEJ/UgRAIAEoAlghAiABKQMYIQcgASgCWEEIaiEDIwBBMGsiACQAIAAgAjYCJCAAIAc3AxggACADNgIUIAAgACgCJCAAKQMYIAAoAhQQYCIHNwMIAkAgB1AEQCAAQgA3AygMAQsgACAAKAIkKAJAIAApAxinQQR0aigCADYCBAJAIAApAwggACkDCCAAKAIEKQMgfFgEQCAAKQMIIAAoAgQpAyB8Qv///////////wBYDQELIAAoAhRBBEEWEBQgAEIANwMoDAELIAAgACgCBCkDICAAKQMIfDcDCCAAKAIELwEMQQhxBEAgACgCJCgCACAAKQMIQQAQJ0EASARAIAAoAhQgACgCJCgCABAXIABCADcDKAwCCyAAKAIkKAIAIABCBBArQgRSBEAgACgCFCAAKAIkKAIAEBcgAEIANwMoDAILIAAoAABB0JadwABGBEAgACAAKQMIQgR8NwMICyAAIAApAwhCDHw3AwggACgCBEEAEGVBAXEEQCAAIAApAwhCCHw3AwgLIAApAwhC////////////AFYEQCAAKAIUQQRBFhAUIABCADcDKAwCCwsgACAAKQMINwMoCyAAKQMoIQcgAEEwaiQAIAEgBzcDOCAHUARAIAEoAigQFSABQX82AlwMBAsLCyABKQM4QgBSBEACfyABKAJYKAIAIQIgASkDOCEHIwBBEGsiACQAIAAgAjYCCCAAIAc3AwACQCAAKAIIKAIkQQFGBEAgACgCCEEMakESQQAQFCAAQX82AgwMAQsgACgCCEEAIAApAwBBERAgQgBTBEAgAEF/NgIMDAELIAAoAghBATYCJCAAQQA2AgwLIAAoAgwhAiAAQRBqJAAgAkEASAsEQCABQgA3AzgLCwsgASkDOFAEQAJ/IAEoAlgoAgAhAiMAQRBrIgAkACAAIAI2AggCQCAAKAIIKAIkQQFGBEAgACgCCEEMakESQQAQFCAAQX82AgwMAQsgACgCCEEAQgBBCBAgQgBTBEAgAEF/NgIMDAELIAAoAghBATYCJCAAQQA2AgwLIAAoAgwhAiAAQRBqJAAgAkEASAsEQCABKAJYQQhqIAEoAlgoAgAQFyABKAIoEBUgAUF/NgJcDAILCyABKAJYKAJUIQIjAEEQayIAJAAgACACNgIMIAAoAgwEQCAAKAIMRAAAAAAAAAAAOQMYIAAoAgwoAgBEAAAAAAAAAAAgACgCDCgCDCAAKAIMKAIEERYACyAAQRBqJAAgAUEANgIsIAFCADcDSANAAkAgASkDSCABKQNAWg0AIAEoAlgoAlQhAiABKQNIIge6IAEpA0C6IgijIQkjAEEgayIAJAAgACACNgIcIAAgCTkDECAAIAdCAXy6IAijOQMIIAAoAhwEQCAAKAIcIAArAxA5AyAgACgCHCAAKwMIOQMoIAAoAhxEAAAAAAAAAAAQVwsgAEEgaiQAIAEgASgCKCABKQNIp0EDdGopAwA3A1AgASABKAJYKAJAIAEpA1CnQQR0ajYCEAJAAkAgASgCECgCAEUNACABKAIQKAIAKQNIIAEpAzhaDQAMAQsgAQJ/QQEgASgCECgCCA0AGiABKAIQKAIEBEBBASABKAIQKAIEKAIAQQFxDQEaCyABKAIQKAIEBH8gASgCECgCBCgCAEHAAHFBAEcFQQALC0EBcTYCFCABKAIQKAIERQRAIAEoAhAoAgAQQCEAIAEoAhAgADYCBCAARQRAIAEoAlhBCGpBDkEAEBQgAUEBNgIsDAMLCyABIAEoAhAoAgQ2AgwCfyABKAJYIQIgASkDUCEHIwBBMGsiACQAIAAgAjYCKCAAIAc3AyACQCAAKQMgIAAoAigpAzBaBEAgACgCKEEIakESQQAQFCAAQX82AiwMAQsgACAAKAIoKAJAIAApAyCnQQR0ajYCHAJAIAAoAhwoAgAEQCAAKAIcKAIALQAEQQFxRQ0BCyAAQQA2AiwMAQsgACgCHCgCACkDSEIafEL///////////8AVgRAIAAoAihBCGpBBEEWEBQgAEF/NgIsDAELIAAoAigoAgAgACgCHCgCACkDSEIafEEAECdBAEgEQCAAKAIoQQhqIAAoAigoAgAQFyAAQX82AiwMAQsgACAAKAIoKAIAQgQgAEEYaiAAKAIoQQhqEEIiAjYCFCACRQRAIABBfzYCLAwBCyAAIAAoAhQQHTsBEiAAIAAoAhQQHTsBECAAKAIUEEdBAXFFBEAgACgCFBAWIAAoAihBCGpBFEEAEBQgAEF/NgIsDAELIAAoAhQQFiAALwEQBEAgACgCKCgCACAALwESrUEBECdBAEgEQCAAKAIoQQhqQQRBtJsBKAIAEBQgAEF/NgIsDAILIABBACAAKAIoKAIAIAAvARBBACAAKAIoQQhqEGM2AgggACgCCEUEQCAAQX82AiwMAgsgACgCCCAALwEQQYACIABBDGogACgCKEEIahCUAUEBcUUEQCAAKAIIEBUgAEF/NgIsDAILIAAoAggQFSAAKAIMBEAgACAAKAIMEJMBNgIMIAAoAhwoAgAoAjQgACgCDBCVASECIAAoAhwoAgAgAjYCNAsLIAAoAhwoAgBBAToABAJAIAAoAhwoAgRFDQAgACgCHCgCBC0ABEEBcQ0AIAAoAhwoAgQgACgCHCgCACgCNDYCNCAAKAIcKAIEQQE6AAQLIABBADYCLAsgACgCLCECIABBMGokACACQQBICwRAIAFBATYCLAwCCyABIAEoAlgoAgAQNSIHNwMwIAdCAFMEQCABQQE2AiwMAgsgASgCDCABKQMwNwNIAkAgASgCFARAIAFBADYCCCABKAIQKAIIRQRAIAEgASgCWCABKAJYIAEpA1BBCEEAEK4BIgA2AgggAEUEQCABQQE2AiwMBQsLAn8gASgCWCECAn8gASgCCARAIAEoAggMAQsgASgCECgCCAshAyABKAIMIQQjAEGgAWsiACQAIAAgAjYCmAEgACADNgKUASAAIAQ2ApABAkAgACgClAEgAEE4ahA5QQBIBEAgACgCmAFBCGogACgClAEQFyAAQX82ApwBDAELIAApAzhCwACDUARAIAAgACkDOELAAIQ3AzggAEEAOwFoCwJAAkAgACgCkAEoAhBBf0cEQCAAKAKQASgCEEF+Rw0BCyAALwFoRQ0AIAAoApABIAAvAWg2AhAMAQsCQAJAIAAoApABKAIQDQAgACkDOEIEg1ANACAAIAApAzhCCIQ3AzggACAAKQNQNwNYDAELIAAgACkDOEL3////D4M3AzgLCyAAKQM4QoABg1AEQCAAIAApAzhCgAGENwM4IABBADsBagsgAEGAAjYCJAJAIAApAzhCBINQBEAgACAAKAIkQYAIcjYCJCAAQn83A3AMAQsgACgCkAEgACkDUDcDKCAAIAApA1A3A3ACQCAAKQM4QgiDUARAAkACQAJAAkACQAJ/AkAgACgCkAEoAhBBf0cEQCAAKAKQASgCEEF+Rw0BC0EIDAELIAAoApABKAIQC0H//wNxDg0CAwMDAwMDAwEDAwMAAwsgAEKUwuTzDzcDEAwDCyAAQoODsP8PNwMQDAILIABC/////w83AxAMAQsgAEIANwMQCyAAKQNQIAApAxBWBEAgACAAKAIkQYAIcjYCJAsMAQsgACgCkAEgACkDWDcDIAsLIAAgACgCmAEoAgAQNSIHNwOIASAHQgBTBEAgACgCmAFBCGogACgCmAEoAgAQFyAAQX82ApwBDAELIAAoApABIgIgAi8BDEH3/wNxOwEMIAAgACgCmAEgACgCkAEgACgCJBBUIgI2AiggAkEASARAIABBfzYCnAEMAQsgACAALwFoAn8CQCAAKAKQASgCEEF/RwRAIAAoApABKAIQQX5HDQELQQgMAQsgACgCkAEoAhALQf//A3FHOgAiIAAgAC0AIkEBcQR/IAAvAWhBAEcFQQALQQFxOgAhIAAgAC8BaAR/IAAtACEFQQELQQFxOgAgIAAgAC0AIkEBcQR/IAAoApABKAIQQQBHBUEAC0EBcToAHyAAAn9BASAALQAiQQFxDQAaQQEgACgCkAEoAgBBgAFxDQAaIAAoApABLwFSIAAvAWpHC0EBcToAHiAAIAAtAB5BAXEEfyAALwFqQQBHBUEAC0EBcToAHSAAIAAtAB5BAXEEfyAAKAKQAS8BUkEARwVBAAtBAXE6ABwgACAAKAKUATYCNCMAQRBrIgIgACgCNDYCDCACKAIMIgIgAigCMEEBajYCMCAALQAdQQFxBEAgACAALwFqQQAQeyICNgIMIAJFBEAgACgCmAFBCGpBGEEAEBQgACgCNBAbIABBfzYCnAEMAgsgACAAKAKYASAAKAI0IAAvAWpBACAAKAKYASgCHCAAKAIMEQUAIgI2AjAgAkUEQCAAKAI0EBsgAEF/NgKcAQwCCyAAKAI0EBsgACAAKAIwNgI0CyAALQAhQQFxBEAgACAAKAKYASAAKAI0IAAvAWgQsAEiAjYCMCACRQRAIAAoAjQQGyAAQX82ApwBDAILIAAoAjQQGyAAIAAoAjA2AjQLIAAtACBBAXEEQCAAIAAoApgBIAAoAjRBABCvASICNgIwIAJFBEAgACgCNBAbIABBfzYCnAEMAgsgACgCNBAbIAAgACgCMDYCNAsgAC0AH0EBcQRAIAAoApgBIQMgACgCNCEEIAAoApABKAIQIQUgACgCkAEvAVAhBiMAQRBrIgIkACACIAM2AgwgAiAENgIIIAIgBTYCBCACIAY2AgAgAigCDCACKAIIIAIoAgRBASACKAIAELIBIQMgAkEQaiQAIAAgAyICNgIwIAJFBEAgACgCNBAbIABBfzYCnAEMAgsgACgCNBAbIAAgACgCMDYCNAsgAC0AHEEBcQRAIABBADYCBAJAIAAoApABKAJUBEAgACAAKAKQASgCVDYCBAwBCyAAKAKYASgCHARAIAAgACgCmAEoAhw2AgQLCyAAIAAoApABLwFSQQEQeyICNgIIIAJFBEAgACgCmAFBCGpBGEEAEBQgACgCNBAbIABBfzYCnAEMAgsgACAAKAKYASAAKAI0IAAoApABLwFSQQEgACgCBCAAKAIIEQUAIgI2AjAgAkUEQCAAKAI0EBsgAEF/NgKcAQwCCyAAKAI0EBsgACAAKAIwNgI0CyAAIAAoApgBKAIAEDUiBzcDgAEgB0IAUwRAIAAoApgBQQhqIAAoApgBKAIAEBcgAEF/NgKcAQwBCyAAKAKYASEDIAAoAjQhBCAAKQNwIQcjAEHAwABrIgIkACACIAM2ArhAIAIgBDYCtEAgAiAHNwOoQAJAIAIoArRAEEhBAEgEQCACKAK4QEEIaiACKAK0QBAXIAJBfzYCvEAMAQsgAkEANgIMIAJCADcDEANAAkAgAiACKAK0QCACQSBqQoDAABArIgc3AxggB0IAVw0AIAIoArhAIAJBIGogAikDGBA2QQBIBEAgAkF/NgIMBSACKQMYQoDAAFINAiACKAK4QCgCVEUNAiACKQOoQEIAVw0CIAIgAikDGCACKQMQfDcDECACKAK4QCgCVCACKQMQuSACKQOoQLmjEFcMAgsLCyACKQMYQgBTBEAgAigCuEBBCGogAigCtEAQFyACQX82AgwLIAIoArRAEC8aIAIgAigCDDYCvEALIAIoArxAIQMgAkHAwABqJAAgACADNgIsIAAoAjQgAEE4ahA5QQBIBEAgACgCmAFBCGogACgCNBAXIABBfzYCLAsgACgCNCEDIwBBEGsiAiQAIAIgAzYCCAJAA0AgAigCCARAIAIoAggpAxhCgIAEg0IAUgRAIAIgAigCCEEAQgBBEBAgNwMAIAIpAwBCAFMEQCACQf8BOgAPDAQLIAIpAwBCA1UEQCACKAIIQQxqQRRBABAUIAJB/wE6AA8MBAsgAiACKQMAPAAPDAMFIAIgAigCCCgCADYCCAwCCwALCyACQQA6AA8LIAIsAA8hAyACQRBqJAAgACADIgI6ACMgAkEYdEEYdUEASARAIAAoApgBQQhqIAAoAjQQFyAAQX82AiwLIAAoAjQQGyAAKAIsQQBIBEAgAEF/NgKcAQwBCyAAIAAoApgBKAIAEDUiBzcDeCAHQgBTBEAgACgCmAFBCGogACgCmAEoAgAQFyAAQX82ApwBDAELIAAoApgBKAIAIAApA4gBEJsBQQBIBEAgACgCmAFBCGogACgCmAEoAgAQFyAAQX82ApwBDAELIAApAzhC5ACDQuQAUgRAIAAoApgBQQhqQRRBABAUIABBfzYCnAEMAQsgACgCkAEoAgBBIHFFBEACQCAAKQM4QhCDQgBSBEAgACgCkAEgACgCYDYCFAwBCyAAKAKQAUEUahABGgsLIAAoApABIAAvAWg2AhAgACgCkAEgACgCZDYCGCAAKAKQASAAKQNQNwMoIAAoApABIAApA3ggACkDgAF9NwMgIAAoApABIAAoApABLwEMQfn/A3EgAC0AI0EBdHI7AQwgACgCkAEhAyAAKAIkQYAIcUEARyEEIwBBEGsiAiQAIAIgAzYCDCACIAQ6AAsCQCACKAIMKAIQQQ5GBEAgAigCDEE/OwEKDAELIAIoAgwoAhBBDEYEQCACKAIMQS47AQoMAQsCQCACLQALQQFxRQRAIAIoAgxBABBlQQFxRQ0BCyACKAIMQS07AQoMAQsCQCACKAIMKAIQQQhHBEAgAigCDC8BUkEBRw0BCyACKAIMQRQ7AQoMAQsgAiACKAIMKAIwEFEiAzsBCCADQf//A3EEQCACKAIMKAIwKAIAIAIvAQhBAWtqLQAAQS9GBEAgAigCDEEUOwEKDAILCyACKAIMQQo7AQoLIAJBEGokACAAIAAoApgBIAAoApABIAAoAiQQVCICNgIsIAJBAEgEQCAAQX82ApwBDAELIAAoAiggACgCLEcEQCAAKAKYAUEIakEUQQAQFCAAQX82ApwBDAELIAAoApgBKAIAIAApA3gQmwFBAEgEQCAAKAKYAUEIaiAAKAKYASgCABAXIABBfzYCnAEMAQsgAEEANgKcAQsgACgCnAEhAiAAQaABaiQAIAJBAEgLBEAgAUEBNgIsIAEoAggEQCABKAIIEBsLDAQLIAEoAggEQCABKAIIEBsLDAELIAEoAgwiACAALwEMQff/A3E7AQwgASgCWCABKAIMQYACEFRBAEgEQCABQQE2AiwMAwsgASABKAJYIAEpA1AgASgCWEEIahBgIgc3AwAgB1AEQCABQQE2AiwMAwsgASgCWCgCACABKQMAQQAQJ0EASARAIAEoAlhBCGogASgCWCgCABAXIAFBATYCLAwDCwJ/IAEoAlghAiABKAIMKQMgIQcjAEGgwABrIgAkACAAIAI2AphAIAAgBzcDkEAgACAAKQOQQLo5AwACQANAIAApA5BAUEUEQCAAIAApA5BAQoDAAFYEfkKAwAAFIAApA5BACz4CDCAAKAKYQCgCACAAQRBqIAAoAgytIAAoAphAQQhqEGRBAEgEQCAAQX82ApxADAMLIAAoAphAIABBEGogACgCDK0QNkEASARAIABBfzYCnEAMAwUgACAAKQOQQCAANQIMfTcDkEAgACgCmEAoAlQgACsDACAAKQOQQLqhIAArAwCjEFcMAgsACwsgAEEANgKcQAsgACgCnEAhAiAAQaDAAGokACACQQBICwRAIAFBATYCLAwDCwsLIAEgASkDSEIBfDcDSAwBCwsgASgCLEUEQAJ/IAEoAlghACABKAIoIQMgASkDQCEHIwBBMGsiAiQAIAIgADYCKCACIAM2AiQgAiAHNwMYIAIgAigCKCgCABA1Igc3AxACQCAHQgBTBEAgAkF/NgIsDAELIAIoAighAyACKAIkIQQgAikDGCEHIwBBwAFrIgAkACAAIAM2ArQBIAAgBDYCsAEgACAHNwOoASAAIAAoArQBKAIAEDUiBzcDIAJAIAdCAFMEQCAAKAK0AUEIaiAAKAK0ASgCABAXIABCfzcDuAEMAQsgACAAKQMgNwOgASAAQQA6ABcgAEIANwMYA0AgACkDGCAAKQOoAVQEQCAAIAAoArQBKAJAIAAoArABIAApAxinQQN0aikDAKdBBHRqNgIMIAAgACgCtAECfyAAKAIMKAIEBEAgACgCDCgCBAwBCyAAKAIMKAIAC0GABBBUIgM2AhAgA0EASARAIABCfzcDuAEMAwsgACgCEARAIABBAToAFwsgACAAKQMYQgF8NwMYDAELCyAAIAAoArQBKAIAEDUiBzcDICAHQgBTBEAgACgCtAFBCGogACgCtAEoAgAQFyAAQn83A7gBDAELIAAgACkDICAAKQOgAX03A5gBAkAgACkDoAFC/////w9YBEAgACkDqAFC//8DWA0BCyAAQQE6ABcLIAAgAEEwakLiABApIgM2AiwgA0UEQCAAKAK0AUEIakEOQQAQFCAAQn83A7gBDAELIAAtABdBAXEEQCAAKAIsQecSQQQQQSAAKAIsQiwQLSAAKAIsQS0QHyAAKAIsQS0QHyAAKAIsQQAQISAAKAIsQQAQISAAKAIsIAApA6gBEC0gACgCLCAAKQOoARAtIAAoAiwgACkDmAEQLSAAKAIsIAApA6ABEC0gACgCLEHiEkEEEEEgACgCLEEAECEgACgCLCAAKQOgASAAKQOYAXwQLSAAKAIsQQEQIQsgACgCLEHsEkEEEEEgACgCLEEAECEgACgCLCAAKQOoAUL//wNaBH5C//8DBSAAKQOoAQunQf//A3EQHyAAKAIsIAApA6gBQv//A1oEfkL//wMFIAApA6gBC6dB//8DcRAfIAAoAiwgACkDmAFC/////w9aBH9BfwUgACkDmAGnCxAhIAAoAiwgACkDoAFC/////w9aBH9BfwUgACkDoAGnCxAhIAACfyAAKAK0AS0AKEEBcQRAIAAoArQBKAIkDAELIAAoArQBKAIgCzYClAEgACgCLAJ/IAAoApQBBEAgACgClAEvAQQMAQtBAAtB//8DcRAfAn8jAEEQayIDIAAoAiw2AgwgAygCDC0AAEEBcUULBEAgACgCtAFBCGpBFEEAEBQgACgCLBAWIABCfzcDuAEMAQsgACgCtAECfyMAQRBrIgMgACgCLDYCDCADKAIMKAIECwJ+IwBBEGsiAyAAKAIsNgIMAn4gAygCDC0AAEEBcQRAIAMoAgwpAxAMAQtCAAsLEDZBAEgEQCAAKAIsEBYgAEJ/NwO4AQwBCyAAKAIsEBYgACgClAEEQCAAKAK0ASAAKAKUASgCACAAKAKUAS8BBK0QNkEASARAIABCfzcDuAEMAgsLIAAgACkDmAE3A7gBCyAAKQO4ASEHIABBwAFqJAAgAiAHNwMAIAdCAFMEQCACQX82AiwMAQsgAiACKAIoKAIAEDUiBzcDCCAHQgBTBEAgAkF/NgIsDAELIAJBADYCLAsgAigCLCEAIAJBMGokACAAQQBICwRAIAFBATYCLAsLIAEoAigQFSABKAIsRQRAAn8gASgCWCgCACECIwBBEGsiACQAIAAgAjYCCAJAIAAoAggoAiRBAUcEQCAAKAIIQQxqQRJBABAUIABBfzYCDAwBCyAAKAIIKAIgQQFLBEAgACgCCEEMakEdQQAQFCAAQX82AgwMAQsgACgCCCgCIARAIAAoAggQL0EASARAIABBfzYCDAwCCwsgACgCCEEAQgBBCRAgQgBTBEAgACgCCEECNgIkIABBfzYCDAwBCyAAKAIIQQA2AiQgAEEANgIMCyAAKAIMIQIgAEEQaiQAIAILBEAgASgCWEEIaiABKAJYKAIAEBcgAUEBNgIsCwsgASgCWCgCVCECIwBBEGsiACQAIAAgAjYCDCAAKAIMRAAAAAAAAPA/EFcgAEEQaiQAIAEoAiwEQCABKAJYKAIAEGIgAUF/NgJcDAELIAEoAlgQPCABQQA2AlwLIAEoAlwhACABQeAAaiQAIAAL0g4CB38CfiMAQTBrIgMkACADIAA2AiggAyABNgIkIAMgAjYCICMAQRBrIgAgA0EIajYCDCAAKAIMQQA2AgAgACgCDEEANgIEIAAoAgxBADYCCCADKAIoIQAjAEEgayIEJAAgBCAANgIYIARCADcDECAEQn83AwggBCADQQhqNgIEAkACQCAEKAIYBEAgBCkDCEJ/WQ0BCyAEKAIEQRJBABAUIARBADYCHAwBCyAEKAIYIQAgBCkDECEKIAQpAwghCyAEKAIEIQEjAEGgAWsiAiQAIAIgADYCmAEgAkEANgKUASACIAo3A4gBIAIgCzcDgAEgAkEANgJ8IAIgATYCeAJAAkAgAigClAENACACKAKYAQ0AIAIoAnhBEkEAEBQgAkEANgKcAQwBCyACKQOAAUIAUwRAIAJCADcDgAELAkAgAikDiAFC////////////AFgEQCACKQOIASACKQOIASACKQOAAXxYDQELIAIoAnhBEkEAEBQgAkEANgKcAQwBCyACQYgBEBgiADYCdCAARQRAIAIoAnhBDkEAEBQgAkEANgKcAQwBCyACKAJ0QQA2AhggAigCmAEEQCACKAKYASIAEC5BAWoiARAYIgUEfyAFIAAgARAZBUEACyEAIAIoAnQgADYCGCAARQRAIAIoAnhBDkEAEBQgAigCdBAVIAJBADYCnAEMAgsLIAIoAnQgAigClAE2AhwgAigCdCACKQOIATcDaCACKAJ0IAIpA4ABNwNwAkAgAigCfARAIAIoAnQiACACKAJ8IgEpAwA3AyAgACABKQMwNwNQIAAgASkDKDcDSCAAIAEpAyA3A0AgACABKQMYNwM4IAAgASkDEDcDMCAAIAEpAwg3AyggAigCdEEANgIoIAIoAnQiACAAKQMgQv7///8PgzcDIAwBCyACKAJ0QSBqEDsLIAIoAnQpA3BCAFIEQCACKAJ0IAIoAnQpA3A3AzggAigCdCIAIAApAyBCBIQ3AyALIwBBEGsiACACKAJ0QdgAajYCDCAAKAIMQQA2AgAgACgCDEEANgIEIAAoAgxBADYCCCACKAJ0QQA2AoABIAIoAnRBADYChAEjAEEQayIAIAIoAnQ2AgwgACgCDEEANgIAIAAoAgxBADYCBCAAKAIMQQA2AgggAkF/NgIEIAJBBzYCAEEOIAIQNEI/hCEKIAIoAnQgCjcDEAJAIAIoAnQoAhgEQCACIAIoAnQoAhggAkEYahCmAUEATjoAFyACLQAXQQFxRQRAAkAgAigCdCkDaFBFDQAgAigCdCkDcFBFDQAgAigCdEL//wM3AxALCwwBCwJAIAIoAnQoAhwiACgCTEEASA0ACyAAKAI8IQBBACEFIwBBIGsiBiQAAn8CQCAAIAJBGGoiCRAKIgFBeEYEQCMAQSBrIgckACAAIAdBCGoQCSIIBH9BtJsBIAg2AgBBAAVBAQshCCAHQSBqJAAgCA0BCyABQYFgTwR/QbSbAUEAIAFrNgIAQX8FIAELDAELA0AgBSAGaiIBIAVBxxJqLQAAOgAAIAVBDkchByAFQQFqIQUgBw0ACwJAIAAEQEEPIQUgACEBA0AgAUEKTwRAIAVBAWohBSABQQpuIQEMAQsLIAUgBmpBADoAAANAIAYgBUEBayIFaiAAIABBCm4iAUEKbGtBMHI6AAAgAEEJSyEHIAEhACAHDQALDAELIAFBMDoAACAGQQA6AA8LIAYgCRACIgBBgWBPBH9BtJsBQQAgAGs2AgBBfwUgAAsLIQAgBkEgaiQAIAIgAEEATjoAFwsCQCACLQAXQQFxRQRAIAIoAnRB2ABqQQVBtJsBKAIAEBQMAQsgAigCdCkDIEIQg1AEQCACKAJ0IAIoAlg2AkggAigCdCIAIAApAyBCEIQ3AyALIAIoAiRBgOADcUGAgAJGBEAgAigCdEL/gQE3AxAgAikDQCACKAJ0KQNoIAIoAnQpA3B8VARAIAIoAnhBEkEAEBQgAigCdCgCGBAVIAIoAnQQFSACQQA2ApwBDAMLIAIoAnQpA3BQBEAgAigCdCACKQNAIAIoAnQpA2h9NwM4IAIoAnQiACAAKQMgQgSENwMgAkAgAigCdCgCGEUNACACKQOIAVBFDQAgAigCdEL//wM3AxALCwsLIAIoAnQiACAAKQMQQoCAEIQ3AxAgAkEeIAIoAnQgAigCeBCDASIANgJwIABFBEAgAigCdCgCGBAVIAIoAnQQFSACQQA2ApwBDAELIAIgAigCcDYCnAELIAIoApwBIQAgAkGgAWokACAEIAA2AhwLIAQoAhwhACAEQSBqJAAgAyAANgIYAkAgAEUEQCADKAIgIANBCGoQnQEgA0EIahA4IANBADYCLAwBCyADIAMoAhggAygCJCADQQhqEJwBIgA2AhwgAEUEQCADKAIYEBsgAygCICADQQhqEJ0BIANBCGoQOCADQQA2AiwMAQsgA0EIahA4IAMgAygCHDYCLAsgAygCLCEAIANBMGokACAAC5IfAQZ/IwBB4ABrIgQkACAEIAA2AlQgBCABNgJQIAQgAjcDSCAEIAM2AkQgBCAEKAJUNgJAIAQgBCgCUDYCPAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAIAQoAkQOEwYHAgwEBQoOAQMJEAsPDQgREQARCyAEQgA3A1gMEQsgBCgCQCgCGEUEQCAEKAJAQRxBABAUIARCfzcDWAwRCyAEKAJAIQAjAEGAAWsiASQAIAEgADYCeCABIAEoAngoAhgQLkEIahAYIgA2AnQCQCAARQRAIAEoAnhBDkEAEBQgAUF/NgJ8DAELAkAgASgCeCgCGCABQRBqEKYBRQRAIAEgASgCHDYCbAwBCyABQX82AmwLIAEoAnQhACABIAEoAngoAhg2AgAgAEGrEiABEG8gASgCdCEDIAEoAmwhByMAQTBrIgAkACAAIAM2AiggACAHNgIkIABBADYCECAAIAAoAiggACgCKBAuajYCGCAAIAAoAhhBAWs2AhwDQCAAKAIcIAAoAihPBH8gACgCHCwAAEHYAEYFQQALQQFxBEAgACAAKAIQQQFqNgIQIAAgACgCHEEBazYCHAwBCwsCQCAAKAIQRQRAQbSbAUEcNgIAIABBfzYCLAwBCyAAIAAoAhxBAWo2AhwDQCMAQRBrIgckAAJAAn8jAEEQayIDJAAgAyAHQQhqNgIIIANBBDsBBiADQegLQQBBABBsIgU2AgACQCAFQQBIBEAgA0EAOgAPDAELAn8gAygCACEGIAMoAgghCCADLwEGIQkjAEEQayIFJAAgBSAJNgIMIAUgCDYCCCAGIAVBCGpBASAFQQRqEAYiBgR/QbSbASAGNgIAQX8FQQALIQYgBSgCBCEIIAVBEGokACADLwEGQX8gCCAGG0cLBEAgAygCABBrIANBADoADwwBCyADKAIAEGsgA0EBOgAPCyADLQAPQQFxIQUgA0EQaiQAIAULBEAgByAHKAIINgIMDAELQcCgAS0AAEEBcUUEQEEAEAEhBgJAQciZASgCACIDRQRAQcyZASgCACAGNgIADAELQdCZAUEDQQNBASADQQdGGyADQR9GGzYCAEG8oAFBADYCAEHMmQEoAgAhBSADQQFOBEAgBq0hAkEAIQYDQCAFIAZBAnRqIAJCrf7V5NSF/ajYAH5CAXwiAkIgiD4CACAGQQFqIgYgA0cNAAsLIAUgBSgCAEEBcjYCAAsLQcyZASgCACEDAkBByJkBKAIAIgVFBEAgAyADKAIAQe2cmY4EbEG54ABqQf////8HcSIDNgIADAELIANB0JkBKAIAIgZBAnRqIgggCCgCACADQbygASgCACIIQQJ0aigCAGoiAzYCAEG8oAFBACAIQQFqIgggBSAIRhs2AgBB0JkBQQAgBkEBaiIGIAUgBkYbNgIAIANBAXYhAwsgByADNgIMCyAHKAIMIQMgB0EQaiQAIAAgAzYCDCAAIAAoAhw2AhQDQCAAKAIUIAAoAhhJBEAgACAAKAIMQSRwOgALAn8gACwAC0EKSARAIAAsAAtBMGoMAQsgACwAC0HXAGoLIQMgACAAKAIUIgdBAWo2AhQgByADOgAAIAAgACgCDEEkbjYCDAwBCwsgACgCKCEDIAAgACgCJEF/RgR/QbYDBSAAKAIkCzYCACAAIANBwoEgIAAQbCIDNgIgIANBAE4EQCAAKAIkQX9HBEAgACgCKCAAKAIkEA8iA0GBYE8Ef0G0mwFBACADazYCAEEABSADCxoLIAAgACgCIDYCLAwCC0G0mwEoAgBBFEYNAAsgAEF/NgIsCyAAKAIsIQMgAEEwaiQAIAEgAyIANgJwIABBf0YEQCABKAJ4QQxBtJsBKAIAEBQgASgCdBAVIAFBfzYCfAwBCyABIAEoAnBBoxIQoQEiADYCaCAARQRAIAEoAnhBDEG0mwEoAgAQFCABKAJwEGsgASgCdBBtGiABKAJ0EBUgAUF/NgJ8DAELIAEoAnggASgCaDYChAEgASgCeCABKAJ0NgKAASABQQA2AnwLIAEoAnwhACABQYABaiQAIAQgAKw3A1gMEAsgBCgCQCgCGARAIAQoAkAoAhwQVhogBCgCQEEANgIcCyAEQgA3A1gMDwsgBCgCQCgChAEQVkEASARAIAQoAkBBADYChAEgBCgCQEEGQbSbASgCABAUCyAEKAJAQQA2AoQBIAQoAkAoAoABIAQoAkAoAhgQCCIAQYFgTwR/QbSbAUEAIABrNgIAQX8FIAALQQBIBEAgBCgCQEECQbSbASgCABAUIARCfzcDWAwPCyAEKAJAKAKAARAVIAQoAkBBADYCgAEgBEIANwNYDA4LIAQgBCgCQCAEKAJQIAQpA0gQQzcDWAwNCyAEKAJAKAIYEBUgBCgCQCgCgAEQFSAEKAJAKAIcBEAgBCgCQCgCHBBWGgsgBCgCQBAVIARCADcDWAwMCyAEKAJAKAIYBEAgBCgCQCgCGCEBIwBBIGsiACQAIAAgATYCGCAAQQA6ABcgAEGAgCA2AgwCQCAALQAXQQFxBEAgACAAKAIMQQJyNgIMDAELIAAgACgCDDYCDAsgACgCGCEBIAAoAgwhAyAAQbYDNgIAIAAgASADIAAQbCIBNgIQAkAgAUEASARAIABBADYCHAwBCyAAIAAoAhBBoxJBoBIgAC0AF0EBcRsQoQEiATYCCCABRQRAIABBADYCHAwBCyAAIAAoAgg2AhwLIAAoAhwhASAAQSBqJAAgBCgCQCABNgIcIAFFBEAgBCgCQEELQbSbASgCABAUIARCfzcDWAwNCwsgBCgCQCkDaEIAUgRAIAQoAkAoAhwgBCgCQCkDaCAEKAJAEJ8BQQBIBEAgBEJ/NwNYDA0LCyAEKAJAQgA3A3ggBEIANwNYDAsLAkAgBCgCQCkDcEIAUgRAIAQgBCgCQCkDcCAEKAJAKQN4fTcDMCAEKQMwIAQpA0hWBEAgBCAEKQNINwMwCwwBCyAEIAQpA0g3AzALIAQpAzBC/////w9WBEAgBEL/////DzcDMAsgBAJ/IAQoAjwhByAEKQMwpyEAIAQoAkAoAhwiAygCTBogAyADLQBKIgFBAWsgAXI6AEogAygCCCADKAIEIgVrIgFBAUgEfyAABSAHIAUgASAAIAAgAUsbIgEQGRogAyADKAIEIAFqNgIEIAEgB2ohByAAIAFrCyIBBEADQAJAAn8gAyADLQBKIgVBAWsgBXI6AEogAygCFCADKAIcSwRAIANBAEEAIAMoAiQRAQAaCyADQQA2AhwgA0IANwMQIAMoAgAiBUEEcQRAIAMgBUEgcjYCAEF/DAELIAMgAygCLCADKAIwaiIGNgIIIAMgBjYCBCAFQRt0QR91C0UEQCADIAcgASADKAIgEQEAIgVBAWpBAUsNAQsgACABawwDCyAFIAdqIQcgASAFayIBDQALCyAACyIANgIsIABFBEACfyAEKAJAKAIcIgAoAkxBf0wEQCAAKAIADAELIAAoAgALQQV2QQFxBEAgBCgCQEEFQbSbASgCABAUIARCfzcDWAwMCwsgBCgCQCIAIAApA3ggBCgCLK18NwN4IAQgBCgCLK03A1gMCgsgBCgCQCgCGBBtQQBIBEAgBCgCQEEWQbSbASgCABAUIARCfzcDWAwKCyAEQgA3A1gMCQsgBCgCQCgChAEEQCAEKAJAKAKEARBWGiAEKAJAQQA2AoQBCyAEKAJAKAKAARBtGiAEKAJAKAKAARAVIAQoAkBBADYCgAEgBEIANwNYDAgLIAQCfyAEKQNIQhBUBEAgBCgCQEESQQAQFEEADAELIAQoAlALNgIYIAQoAhhFBEAgBEJ/NwNYDAgLIARBATYCHAJAAkACQAJAAkAgBCgCGCgCCA4DAAIBAwsgBCAEKAIYKQMANwMgDAMLAkAgBCgCQCkDcFAEQCAEKAJAKAIcIAQoAhgpAwBBAiAEKAJAEGpBAEgEQCAEQn83A1gMDQsgBCAEKAJAKAIcEKMBIgI3AyAgAkIAUwRAIAQoAkBBBEG0mwEoAgAQFCAEQn83A1gMDQsgBCAEKQMgIAQoAkApA2h9NwMgIARBADYCHAwBCyAEIAQoAkApA3AgBCgCGCkDAHw3AyALDAILIAQgBCgCQCkDeCAEKAIYKQMAfDcDIAwBCyAEKAJAQRJBABAUIARCfzcDWAwICwJAAkAgBCkDIEIAUw0AIAQoAkApA3BCAFIEQCAEKQMgIAQoAkApA3BWDQELIAQoAkApA2ggBCkDICAEKAJAKQNofFgNAQsgBCgCQEESQQAQFCAEQn83A1gMCAsgBCgCQCAEKQMgNwN4IAQoAhwEQCAEKAJAKAIcIAQoAkApA3ggBCgCQCkDaHwgBCgCQBCfAUEASARAIARCfzcDWAwJCwsgBEIANwNYDAcLIAQCfyAEKQNIQhBUBEAgBCgCQEESQQAQFEEADAELIAQoAlALNgIUIAQoAhRFBEAgBEJ/NwNYDAcLIAQoAkAoAoQBIAQoAhQpAwAgBCgCFCgCCCAEKAJAEGpBAEgEQCAEQn83A1gMBwsgBEIANwNYDAYLIAQpA0hCOFQEQCAEQn83A1gMBgsCfyMAQRBrIgAgBCgCQEHYAGo2AgwgACgCDCgCAAsEQCAEKAJAAn8jAEEQayIAIAQoAkBB2ABqNgIMIAAoAgwoAgALAn8jAEEQayIAIAQoAkBB2ABqNgIMIAAoAgwoAgQLEBQgBEJ/NwNYDAYLIAQoAlAiACAEKAJAIgEpACA3AAAgACABKQBQNwAwIAAgASkASDcAKCAAIAEpAEA3ACAgACABKQA4NwAYIAAgASkAMDcAECAAIAEpACg3AAggBEI4NwNYDAULIAQgBCgCQCkDEDcDWAwECyAEIAQoAkApA3g3A1gMAwsgBCAEKAJAKAKEARCjATcDCCAEKQMIQgBTBEAgBCgCQEEeQbSbASgCABAUIARCfzcDWAwDCyAEIAQpAwg3A1gMAgsgBCgCQCgChAEiACgCTEEAThogACAAKAIAQU9xNgIAIAQCfyAEKAJQIQEgBCkDSKciACAAAn8gBCgCQCgChAEiAygCTEF/TARAIAEgACADEHEMAQsgASAAIAMQcQsiAUYNABogAQs2AgQCQCAEKQNIIAQoAgStUQRAAn8gBCgCQCgChAEiACgCTEF/TARAIAAoAgAMAQsgACgCAAtBBXZBAXFFDQELIAQoAkBBBkG0mwEoAgAQFCAEQn83A1gMAgsgBCAEKAIErTcDWAwBCyAEKAJAQRxBABAUIARCfzcDWAsgBCkDWCECIARB4ABqJAAgAgsJACAAKAI8EAUL5AEBBH8jAEEgayIDJAAgAyABNgIQIAMgAiAAKAIwIgRBAEdrNgIUIAAoAiwhBSADIAQ2AhwgAyAFNgIYQX8hBAJAAkAgACgCPCADQRBqQQIgA0EMahAGIgUEf0G0mwEgBTYCAEF/BUEAC0UEQCADKAIMIgRBAEoNAQsgACAAKAIAIARBMHFBEHNyNgIADAELIAQgAygCFCIGTQ0AIAAgACgCLCIFNgIEIAAgBSAEIAZrajYCCCAAKAIwBEAgACAFQQFqNgIEIAEgAmpBAWsgBS0AADoAAAsgAiEECyADQSBqJAAgBAv0AgEHfyMAQSBrIgMkACADIAAoAhwiBTYCECAAKAIUIQQgAyACNgIcIAMgATYCGCADIAQgBWsiATYCFCABIAJqIQVBAiEHIANBEGohAQJ/AkACQCAAKAI8IANBEGpBAiADQQxqEAMiBAR/QbSbASAENgIAQX8FQQALRQRAA0AgBSADKAIMIgRGDQIgBEF/TA0DIAEgBCABKAIEIghLIgZBA3RqIgkgBCAIQQAgBhtrIgggCSgCAGo2AgAgAUEMQQQgBhtqIgkgCSgCACAIazYCACAFIARrIQUgACgCPCABQQhqIAEgBhsiASAHIAZrIgcgA0EMahADIgQEf0G0mwEgBDYCAEF/BUEAC0UNAAsLIAVBf0cNAQsgACAAKAIsIgE2AhwgACABNgIUIAAgASAAKAIwajYCECACDAELIABBADYCHCAAQgA3AxAgACAAKAIAQSByNgIAQQAgB0ECRg0AGiACIAEoAgRrCyEAIANBIGokACAAC1IBAX8jAEEQayIDJAAgACgCPCABpyABQiCIpyACQf8BcSADQQhqEA0iAAR/QbSbASAANgIAQX8FQQALIQAgAykDCCEBIANBEGokAEJ/IAEgABsL1QQBBX8jAEGwAWsiASQAIAEgADYCqAEgASgCqAEQOAJAAkAgASgCqAEoAgBBAE4EQCABKAKoASgCAEGAFCgCAEgNAQsgASABKAKoASgCADYCECABQSBqQY8SIAFBEGoQbyABQQA2AqQBIAEgAUEgajYCoAEMAQsgASABKAKoASgCAEECdEGAE2ooAgA2AqQBAkACQAJAAkAgASgCqAEoAgBBAnRBkBRqKAIAQQFrDgIAAQILIAEoAqgBKAIEIQJBkJkBKAIAIQRBACEAAkACQANAIAIgAEGgiAFqLQAARwRAQdcAIQMgAEEBaiIAQdcARw0BDAILCyAAIgMNAEGAiQEhAgwBC0GAiQEhAANAIAAtAAAhBSAAQQFqIgIhACAFDQAgAiEAIANBAWsiAw0ACwsgBCgCFBogASACNgKgAQwCCyMAQRBrIgAgASgCqAEoAgQ2AgwgAUEAIAAoAgxrQQJ0QajZAGooAgA2AqABDAELIAFBADYCoAELCwJAIAEoAqABRQRAIAEgASgCpAE2AqwBDAELIAEgASgCoAEQLgJ/IAEoAqQBBEAgASgCpAEQLkECagwBC0EAC2pBAWoQGCIANgIcIABFBEAgAUG4EygCADYCrAEMAQsgASgCHCEAAn8gASgCpAEEQCABKAKkAQwBC0H6EgshA0HfEkH6EiABKAKkARshAiABIAEoAqABNgIIIAEgAjYCBCABIAM2AgAgAEG+CiABEG8gASgCqAEgASgCHDYCCCABIAEoAhw2AqwBCyABKAKsASEAIAFBsAFqJAAgAAsIAEEBQTgQfwszAQF/IAAoAhQiAyABIAIgACgCECADayIBIAEgAksbIgEQGRogACAAKAIUIAFqNgIUIAILjwUCBn4BfyABIAEoAgBBD2pBcHEiAUEQajYCACAAAnwgASkDACEDIAEpAwghBiMAQSBrIggkAAJAIAZC////////////AIMiBEKAgICAgIDAgDx9IARCgICAgICAwP/DAH1UBEAgBkIEhiADQjyIhCEEIANC//////////8PgyIDQoGAgICAgICACFoEQCAEQoGAgICAgICAwAB8IQIMAgsgBEKAgICAgICAgEB9IQIgA0KAgICAgICAgAiFQgBSDQEgAiAEQgGDfCECDAELIANQIARCgICAgICAwP//AFQgBEKAgICAgIDA//8AURtFBEAgBkIEhiADQjyIhEL/////////A4NCgICAgICAgPz/AIQhAgwBC0KAgICAgICA+P8AIQIgBEL///////+//8MAVg0AQgAhAiAEQjCIpyIAQZH3AEkNACADIQIgBkL///////8/g0KAgICAgIDAAIQiBSEHAkAgAEGB9wBrIgFBwABxBEAgAiABQUBqrYYhB0IAIQIMAQsgAUUNACAHIAGtIgSGIAJBwAAgAWutiIQhByACIASGIQILIAggAjcDECAIIAc3AxgCQEGB+AAgAGsiAEHAAHEEQCAFIABBQGqtiCEDQgAhBQwBCyAARQ0AIAVBwAAgAGuthiADIACtIgKIhCEDIAUgAoghBQsgCCADNwMAIAggBTcDCCAIKQMIQgSGIAgpAwAiA0I8iIQhAiAIKQMQIAgpAxiEQgBSrSADQv//////////D4OEIgNCgYCAgICAgIAIWgRAIAJCAXwhAgwBCyADQoCAgICAgICACIVCAFINACACQgGDIAJ8IQILIAhBIGokACACIAZCgICAgICAgICAf4OEvws5AwALrRcDEn8CfgF8IwBBsARrIgkkACAJQQA2AiwCQCABvSIYQn9XBEBBASESQa4IIRMgAZoiAb0hGAwBCyAEQYAQcQRAQQEhEkGxCCETDAELQbQIQa8IIARBAXEiEhshEyASRSEXCwJAIBhCgICAgICAgPj/AINCgICAgICAgPj/AFEEQCAAQSAgAiASQQNqIg0gBEH//3txECYgACATIBIQIiAAQeQLQbUSIAVBIHEiAxtBjw1BuRIgAxsgASABYhtBAxAiDAELIAlBEGohEAJAAn8CQCABIAlBLGoQqQEiASABoCIBRAAAAAAAAAAAYgRAIAkgCSgCLCIGQQFrNgIsIAVBIHIiFEHhAEcNAQwDCyAFQSByIhRB4QBGDQIgCSgCLCELQQYgAyADQQBIGwwBCyAJIAZBHWsiCzYCLCABRAAAAAAAALBBoiEBQQYgAyADQQBIGwshCiAJQTBqIAlB0AJqIAtBAEgbIg4hBwNAIAcCfyABRAAAAAAAAPBBYyABRAAAAAAAAAAAZnEEQCABqwwBC0EACyIDNgIAIAdBBGohByABIAO4oUQAAAAAZc3NQaIiAUQAAAAAAAAAAGINAAsCQCALQQFIBEAgCyEDIAchBiAOIQgMAQsgDiEIIAshAwNAIANBHSADQR1IGyEMAkAgB0EEayIGIAhJDQAgDK0hGUIAIRgDQCAGIAY1AgAgGYYgGHwiGCAYQoCU69wDgCIYQoCU69wDfn0+AgAgCCAGQQRrIgZNBEAgGEL/////D4MhGAwBCwsgGKciA0UNACAIQQRrIgggAzYCAAsDQCAIIAciBkkEQCAGQQRrIgcoAgBFDQELCyAJIAkoAiwgDGsiAzYCLCAGIQcgA0EASg0ACwsgCkEZakEJbSEHIANBf0wEQCAHQQFqIQ0gFEHmAEYhFQNAQQlBACADayADQXdIGyEWAkAgBiAISwRAQYCU69wDIBZ2IQ9BfyAWdEF/cyERQQAhAyAIIQcDQCAHIAMgBygCACIMIBZ2ajYCACAMIBFxIA9sIQMgB0EEaiIHIAZJDQALIAggCEEEaiAIKAIAGyEIIANFDQEgBiADNgIAIAZBBGohBgwBCyAIIAhBBGogCCgCABshCAsgCSAJKAIsIBZqIgM2AiwgDiAIIBUbIgcgDUECdGogBiAGIAdrQQJ1IA1KGyEGIANBAEgNAAsLQQAhBwJAIAYgCE0NACAOIAhrQQJ1QQlsIQcgCCgCACIMQQpJDQBB5AAhAwNAIAdBAWohByADIAxLDQEgA0EKbCEDDAALAAsgCkEAIAcgFEHmAEYbayAUQecARiAKQQBHcWsiAyAGIA5rQQJ1QQlsQQlrSARAIANBgMgAaiIRQQltIgxBAnQgCUEwakEEciAJQdQCaiALQQBIG2pBgCBrIQ1BCiEDAkAgESAMQQlsayIMQQdKDQBB5AAhAwNAIAxBAWoiDEEIRg0BIANBCmwhAwwACwALAkAgDSgCACIRIBEgA24iDCADbGsiD0EBIA1BBGoiCyAGRhtFDQBEAAAAAAAA4D9EAAAAAAAA8D9EAAAAAAAA+D8gBiALRhtEAAAAAAAA+D8gDyADQQF2IgtGGyALIA9LGyEaRAEAAAAAAEBDRAAAAAAAAEBDIAxBAXEbIQECQCAXDQAgEy0AAEEtRw0AIBqaIRogAZohAQsgDSARIA9rIgs2AgAgASAaoCABYQ0AIA0gAyALaiIDNgIAIANBgJTr3ANPBEADQCANQQA2AgAgCCANQQRrIg1LBEAgCEEEayIIQQA2AgALIA0gDSgCAEEBaiIDNgIAIANB/5Pr3ANLDQALCyAOIAhrQQJ1QQlsIQcgCCgCACILQQpJDQBB5AAhAwNAIAdBAWohByADIAtLDQEgA0EKbCEDDAALAAsgDUEEaiIDIAYgAyAGSRshBgsDQCAGIgsgCE0iDEUEQCALQQRrIgYoAgBFDQELCwJAIBRB5wBHBEAgBEEIcSEPDAELIAdBf3NBfyAKQQEgChsiBiAHSiAHQXtKcSIDGyAGaiEKQX9BfiADGyAFaiEFIARBCHEiDw0AQXchBgJAIAwNACALQQRrKAIAIgNFDQBBACEGIANBCnANAEEAIQxB5AAhBgNAIAMgBnBFBEAgDEEBaiEMIAZBCmwhBgwBCwsgDEF/cyEGCyALIA5rQQJ1QQlsIQMgBUFfcUHGAEYEQEEAIQ8gCiADIAZqQQlrIgNBACADQQBKGyIDIAMgCkobIQoMAQtBACEPIAogAyAHaiAGakEJayIDQQAgA0EAShsiAyADIApKGyEKCyAKIA9yQQBHIREgAEEgIAIgBUFfcSIMQcYARgR/IAdBACAHQQBKGwUgECAHIAdBH3UiA2ogA3OtIBAQRCIGa0EBTARAA0AgBkEBayIGQTA6AAAgECAGa0ECSA0ACwsgBkECayIVIAU6AAAgBkEBa0EtQSsgB0EASBs6AAAgECAVawsgCiASaiARampBAWoiDSAEECYgACATIBIQIiAAQTAgAiANIARBgIAEcxAmAkACQAJAIAxBxgBGBEAgCUEQakEIciEDIAlBEGpBCXIhByAOIAggCCAOSxsiBSEIA0AgCDUCACAHEEQhBgJAIAUgCEcEQCAGIAlBEGpNDQEDQCAGQQFrIgZBMDoAACAGIAlBEGpLDQALDAELIAYgB0cNACAJQTA6ABggAyEGCyAAIAYgByAGaxAiIAhBBGoiCCAOTQ0AC0EAIQYgEUUNAiAAQdYSQQEQIiAIIAtPDQEgCkEBSA0BA0AgCDUCACAHEEQiBiAJQRBqSwRAA0AgBkEBayIGQTA6AAAgBiAJQRBqSw0ACwsgACAGIApBCSAKQQlIGxAiIApBCWshBiAIQQRqIgggC08NAyAKQQlKIQMgBiEKIAMNAAsMAgsCQCAKQQBIDQAgCyAIQQRqIAggC0kbIQUgCUEQakEJciELIAlBEGpBCHIhAyAIIQcDQCALIAc1AgAgCxBEIgZGBEAgCUEwOgAYIAMhBgsCQCAHIAhHBEAgBiAJQRBqTQ0BA0AgBkEBayIGQTA6AAAgBiAJQRBqSw0ACwwBCyAAIAZBARAiIAZBAWohBkEAIApBAEwgDxsNACAAQdYSQQEQIgsgACAGIAsgBmsiBiAKIAYgCkgbECIgCiAGayEKIAdBBGoiByAFTw0BIApBf0oNAAsLIABBMCAKQRJqQRJBABAmIAAgFSAQIBVrECIMAgsgCiEGCyAAQTAgBkEJakEJQQAQJgsMAQsgE0EJaiATIAVBIHEiCxshCgJAIANBC0sNAEEMIANrIgZFDQBEAAAAAAAAIEAhGgNAIBpEAAAAAAAAMECiIRogBkEBayIGDQALIAotAABBLUYEQCAaIAGaIBqhoJohAQwBCyABIBqgIBqhIQELIBAgCSgCLCIGIAZBH3UiBmogBnOtIBAQRCIGRgRAIAlBMDoADyAJQQ9qIQYLIBJBAnIhDiAJKAIsIQcgBkECayIMIAVBD2o6AAAgBkEBa0EtQSsgB0EASBs6AAAgBEEIcSEHIAlBEGohCANAIAgiBQJ/IAGZRAAAAAAAAOBBYwRAIAGqDAELQYCAgIB4CyIGQYCHAWotAAAgC3I6AAAgASAGt6FEAAAAAAAAMECiIQECQCAFQQFqIgggCUEQamtBAUcNAAJAIAFEAAAAAAAAAABiDQAgA0EASg0AIAdFDQELIAVBLjoAASAFQQJqIQgLIAFEAAAAAAAAAABiDQALIABBICACIA4CfwJAIANFDQAgCCAJa0ESayADTg0AIAMgEGogDGtBAmoMAQsgECAJQRBqIAxqayAIagsiA2oiDSAEECYgACAKIA4QIiAAQTAgAiANIARBgIAEcxAmIAAgCUEQaiAIIAlBEGprIgUQIiAAQTAgAyAFIBAgDGsiA2prQQBBABAmIAAgDCADECILIABBICACIA0gBEGAwABzECYgCUGwBGokACACIA0gAiANShsLBgBB4J8BCwYAQdyfAQsGAEHUnwELGAEBfyMAQRBrIgEgADYCDCABKAIMQQRqCxgBAX8jAEEQayIBIAA2AgwgASgCDEEIagtpAQF/IwBBEGsiASQAIAEgADYCDCABKAIMKAIUBEAgASgCDCgCFBAbCyABQQA2AgggASgCDCgCBARAIAEgASgCDCgCBDYCCAsgASgCDEEEahA4IAEoAgwQFSABKAIIIQAgAUEQaiQAIAALqQEBA38CQCAALQAAIgJFDQADQCABLQAAIgRFBEAgAiEDDAILAkAgAiAERg0AIAJBIHIgAiACQcEAa0EaSRsgAS0AACICQSByIAIgAkHBAGtBGkkbRg0AIAAtAAAhAwwCCyABQQFqIQEgAC0AASECIABBAWohACACDQALCyADQf8BcSIAQSByIAAgAEHBAGtBGkkbIAEtAAAiAEEgciAAIABBwQBrQRpJG2sLiAEBAX8jAEEQayICJAAgAiAANgIMIAIgATYCCCMAQRBrIgAgAigCDDYCDCAAKAIMQQA2AgAgACgCDEEANgIEIAAoAgxBADYCCCACKAIMIAIoAgg2AgACQCACKAIMEKwBQQFGBEAgAigCDEG0mwEoAgA2AgQMAQsgAigCDEEANgIECyACQRBqJAAL2AkBAX8jAEGwAWsiBSQAIAUgADYCpAEgBSABNgKgASAFIAI2ApwBIAUgAzcDkAEgBSAENgKMASAFIAUoAqABNgKIAQJAAkACQAJAAkACQAJAAkACQAJAAkAgBSgCjAEODwABAgMEBQcICQkJCQkJBgkLIAUoAogBQgA3AyAgBUIANwOoAQwJCyAFIAUoAqQBIAUoApwBIAUpA5ABECsiAzcDgAEgA0IAUwRAIAUoAogBQQhqIAUoAqQBEBcgBUJ/NwOoAQwJCwJAIAUpA4ABUARAIAUoAogBKQMoIAUoAogBKQMgUQRAIAUoAogBQQE2AgQgBSgCiAEgBSgCiAEpAyA3AxggBSgCiAEoAgAEQCAFKAKkASAFQcgAahA5QQBIBEAgBSgCiAFBCGogBSgCpAEQFyAFQn83A6gBDA0LAkAgBSkDSEIgg1ANACAFKAJ0IAUoAogBKAIwRg0AIAUoAogBQQhqQQdBABAUIAVCfzcDqAEMDQsCQCAFKQNIQgSDUA0AIAUpA2AgBSgCiAEpAxhRDQAgBSgCiAFBCGpBFUEAEBQgBUJ/NwOoAQwNCwsLDAELAkAgBSgCiAEoAgQNACAFKAKIASkDICAFKAKIASkDKFYNACAFIAUoAogBKQMoIAUoAogBKQMgfTcDQANAIAUpA0AgBSkDgAFUBEAgBSAFKQOAASAFKQNAfUL/////D1YEfkL/////DwUgBSkDgAEgBSkDQH0LNwM4IAUoAogBKAIwIAUoApwBIAUpA0CnaiAFKQM4pxAaIQAgBSgCiAEgADYCMCAFKAKIASIAIAUpAzggACkDKHw3AyggBSAFKQM4IAUpA0B8NwNADAELCwsLIAUoAogBIgAgBSkDgAEgACkDIHw3AyAgBSAFKQOAATcDqAEMCAsgBUIANwOoAQwHCyAFIAUoApwBNgI0IAUoAogBKAIEBEAgBSgCNCAFKAKIASkDGDcDGCAFKAI0IAUoAogBKAIwNgIsIAUoAjQgBSgCiAEpAxg3AyAgBSgCNEEAOwEwIAUoAjRBADsBMiAFKAI0IgAgACkDAELsAYQ3AwALIAVCADcDqAEMBgsgBSAFKAKIAUEIaiAFKAKcASAFKQOQARBDNwOoAQwFCyAFKAKIARAVIAVCADcDqAEMBAsjAEEQayIAIAUoAqQBNgIMIAUgACgCDCkDGDcDKCAFKQMoQgBTBEAgBSgCiAFBCGogBSgCpAEQFyAFQn83A6gBDAQLIAUpAyghAyAFQX82AhggBUEQNgIUIAVBDzYCECAFQQ02AgwgBUEMNgIIIAVBCjYCBCAFQQk2AgAgBUEIIAUQNEJ/hSADgzcDqAEMAwsgBQJ/IAUpA5ABQhBUBEAgBSgCiAFBCGpBEkEAEBRBAAwBCyAFKAKcAQs2AhwgBSgCHEUEQCAFQn83A6gBDAMLAkAgBSgCpAEgBSgCHCkDACAFKAIcKAIIECdBAE4EQCAFIAUoAqQBEEkiAzcDICADQgBZDQELIAUoAogBQQhqIAUoAqQBEBcgBUJ/NwOoAQwDCyAFKAKIASAFKQMgNwMgIAVCADcDqAEMAgsgBSAFKAKIASkDIDcDqAEMAQsgBSgCiAFBCGpBHEEAEBQgBUJ/NwOoAQsgBSkDqAEhAyAFQbABaiQAIAMLnAwBAX8jAEEwayIFJAAgBSAANgIkIAUgATYCICAFIAI2AhwgBSADNwMQIAUgBDYCDCAFIAUoAiA2AggCQAJAAkACQAJAAkACQAJAAkACQCAFKAIMDhEAAQIDBQYICAgICAgICAcIBAgLIAUoAghCADcDGCAFKAIIQQA6AAwgBSgCCEEAOgANIAUoAghBADoADyAFKAIIQn83AyAgBSgCCCgCrEAgBSgCCCgCqEAoAgwRAABBAXFFBEAgBUJ/NwMoDAkLIAVCADcDKAwICyAFKAIkIQEgBSgCCCECIAUoAhwhBCAFKQMQIQMjAEFAaiIAJAAgACABNgI0IAAgAjYCMCAAIAQ2AiwgACADNwMgAkACfyMAQRBrIgEgACgCMDYCDCABKAIMKAIACwRAIABCfzcDOAwBCwJAIAApAyBQRQRAIAAoAjAtAA1BAXFFDQELIABCADcDOAwBCyAAQgA3AwggAEEAOgAbA0AgAC0AG0EBcQR/QQAFIAApAwggACkDIFQLQQFxBEAgACAAKQMgIAApAwh9NwMAIAAgACgCMCgCrEAgACgCLCAAKQMIp2ogACAAKAIwKAKoQCgCHBEBADYCHCAAKAIcQQJHBEAgACAAKQMAIAApAwh8NwMICwJAAkACQAJAIAAoAhxBAWsOAwACAQMLIAAoAjBBAToADQJAIAAoAjAtAAxBAXENAAsgACgCMCkDIEIAUwRAIAAoAjBBFEEAEBQgAEEBOgAbDAMLAkAgACgCMC0ADkEBcUUNACAAKAIwKQMgIAApAwhWDQAgACgCMEEBOgAPIAAoAjAgACgCMCkDIDcDGCAAKAIsIAAoAjBBKGogACgCMCkDGKcQGRogACAAKAIwKQMYNwM4DAYLIABBAToAGwwCCyAAKAIwLQAMQQFxBEAgAEEBOgAbDAILIAAgACgCNCAAKAIwQShqQoDAABArIgM3AxAgA0IAUwRAIAAoAjAgACgCNBAXIABBAToAGwwCCwJAIAApAxBQBEAgACgCMEEBOgAMIAAoAjAoAqxAIAAoAjAoAqhAKAIYEQIAIAAoAjApAyBCAFMEQCAAKAIwQgA3AyALDAELAkAgACgCMCkDIEIAWQRAIAAoAjBBADoADgwBCyAAKAIwIAApAxA3AyALIAAoAjAoAqxAIAAoAjBBKGogACkDECAAKAIwKAKoQCgCFBEQABoLDAELAn8jAEEQayIBIAAoAjA2AgwgASgCDCgCAEULBEAgACgCMEEUQQAQFAsgAEEBOgAbCwwBCwsgACkDCEIAUgRAIAAoAjBBADoADiAAKAIwIgEgACkDCCABKQMYfDcDGCAAIAApAwg3AzgMAQsgAEF/QQACfyMAQRBrIgEgACgCMDYCDCABKAIMKAIACxusNwM4CyAAKQM4IQMgAEFAayQAIAUgAzcDKAwHCyAFKAIIKAKsQCAFKAIIKAKoQCgCEBEAAEEBcUUEQCAFQn83AygMBwsgBUIANwMoDAYLIAUgBSgCHDYCBAJAIAUoAggtABBBAXEEQCAFKAIILQANQQFxBEAgBSgCBCAFKAIILQAPQQFxBH9BAAUCfwJAIAUoAggoAhRBf0cEQCAFKAIIKAIUQX5HDQELQQgMAQsgBSgCCCgCFAtB//8DcQs7ATAgBSgCBCAFKAIIKQMYNwMgIAUoAgQiACAAKQMAQsgAhDcDAAwCCyAFKAIEIgAgACkDAEK3////D4M3AwAMAQsgBSgCBEEAOwEwIAUoAgQiACAAKQMAQsAAhDcDAAJAIAUoAggtAA1BAXEEQCAFKAIEIAUoAggpAxg3AxggBSgCBCIAIAApAwBCBIQ3AwAMAQsgBSgCBCIAIAApAwBC+////w+DNwMACwsgBUIANwMoDAULIAUgBSgCCC0AD0EBcQR/QQAFIAUoAggoAqxAIAUoAggoAqhAKAIIEQAAC6w3AygMBAsgBSAFKAIIIAUoAhwgBSkDEBBDNwMoDAMLIAUoAggQsQEgBUIANwMoDAILIAVBfzYCACAFQRAgBRA0Qj+ENwMoDAELIAUoAghBFEEAEBQgBUJ/NwMoCyAFKQMoIQMgBUEwaiQAIAMLPAEBfyMAQRBrIgMkACADIAA7AQ4gAyABNgIIIAMgAjYCBEEAIAMoAgggAygCBBC0ASEAIANBEGokACAAC46nAQEEfyMAQSBrIgUkACAFIAA2AhggBSABNgIUIAUgAjYCECAFIAUoAhg2AgwgBSgCDCAFKAIQKQMAQv////8PVgR+Qv////8PBSAFKAIQKQMACz4CICAFKAIMIAUoAhQ2AhwCQCAFKAIMLQAEQQFxBEAgBSgCDEEQaiEBQQRBACAFKAIMLQAMQQFxGyECIwBBQGoiACQAIAAgATYCOCAAIAI2AjQCQAJAAkAgACgCOBB4DQAgACgCNEEFSg0AIAAoAjRBAE4NAQsgAEF+NgI8DAELIAAgACgCOCgCHDYCLAJAAkAgACgCOCgCDEUNACAAKAI4KAIEBEAgACgCOCgCAEUNAQsgACgCLCgCBEGaBUcNASAAKAI0QQRGDQELIAAoAjhBsNkAKAIANgIYIABBfjYCPAwBCyAAKAI4KAIQRQRAIAAoAjhBvNkAKAIANgIYIABBezYCPAwBCyAAIAAoAiwoAig2AjAgACgCLCAAKAI0NgIoAkAgACgCLCgCFARAIAAoAjgQHCAAKAI4KAIQRQRAIAAoAixBfzYCKCAAQQA2AjwMAwsMAQsCQCAAKAI4KAIEDQAgACgCNEEBdEEJQQAgACgCNEEEShtrIAAoAjBBAXRBCUEAIAAoAjBBBEoba0oNACAAKAI0QQRGDQAgACgCOEG82QAoAgA2AhggAEF7NgI8DAILCwJAIAAoAiwoAgRBmgVHDQAgACgCOCgCBEUNACAAKAI4QbzZACgCADYCGCAAQXs2AjwMAQsgACgCLCgCBEEqRgRAIAAgACgCLCgCMEEEdEH4AGtBCHQ2AigCQAJAIAAoAiwoAogBQQJIBEAgACgCLCgChAFBAk4NAQsgAEEANgIkDAELAkAgACgCLCgChAFBBkgEQCAAQQE2AiQMAQsCQCAAKAIsKAKEAUEGRgRAIABBAjYCJAwBCyAAQQM2AiQLCwsgACAAKAIoIAAoAiRBBnRyNgIoIAAoAiwoAmwEQCAAIAAoAihBIHI2AigLIAAgACgCKEEfIAAoAihBH3BrajYCKCAAKAIsIAAoAigQSyAAKAIsKAJsBEAgACgCLCAAKAI4KAIwQRB2EEsgACgCLCAAKAI4KAIwQf//A3EQSwtBAEEAQQAQPSEBIAAoAjggATYCMCAAKAIsQfEANgIEIAAoAjgQHCAAKAIsKAIUBEAgACgCLEF/NgIoIABBADYCPAwCCwsgACgCLCgCBEE5RgRAQQBBAEEAEBohASAAKAI4IAE2AjAgACgCLCgCCCECIAAoAiwiAygCFCEBIAMgAUEBajYCFCABIAJqQR86AAAgACgCLCgCCCECIAAoAiwiAygCFCEBIAMgAUEBajYCFCABIAJqQYsBOgAAIAAoAiwoAgghAiAAKAIsIgMoAhQhASADIAFBAWo2AhQgASACakEIOgAAAkAgACgCLCgCHEUEQCAAKAIsKAIIIQIgACgCLCIDKAIUIQEgAyABQQFqNgIUIAEgAmpBADoAACAAKAIsKAIIIQIgACgCLCIDKAIUIQEgAyABQQFqNgIUIAEgAmpBADoAACAAKAIsKAIIIQIgACgCLCIDKAIUIQEgAyABQQFqNgIUIAEgAmpBADoAACAAKAIsKAIIIQIgACgCLCIDKAIUIQEgAyABQQFqNgIUIAEgAmpBADoAACAAKAIsKAIIIQIgACgCLCIDKAIUIQEgAyABQQFqNgIUIAEgAmpBADoAACAAKAIsKAKEAUEJRgR/QQIFQQRBACAAKAIsKAKIAUECSAR/IAAoAiwoAoQBQQJIBUEBC0EBcRsLIQIgACgCLCgCCCEDIAAoAiwiBCgCFCEBIAQgAUEBajYCFCABIANqIAI6AAAgACgCLCgCCCECIAAoAiwiAygCFCEBIAMgAUEBajYCFCABIAJqQQM6AAAgACgCLEHxADYCBCAAKAI4EBwgACgCLCgCFARAIAAoAixBfzYCKCAAQQA2AjwMBAsMAQsgACgCLCgCHCgCAEVFQQJBACAAKAIsKAIcKAIsG2pBBEEAIAAoAiwoAhwoAhAbakEIQQAgACgCLCgCHCgCHBtqQRBBACAAKAIsKAIcKAIkG2ohAiAAKAIsKAIIIQMgACgCLCIEKAIUIQEgBCABQQFqNgIUIAEgA2ogAjoAACAAKAIsKAIcKAIEQf8BcSECIAAoAiwoAgghAyAAKAIsIgQoAhQhASAEIAFBAWo2AhQgASADaiACOgAAIAAoAiwoAhwoAgRBCHZB/wFxIQIgACgCLCgCCCEDIAAoAiwiBCgCFCEBIAQgAUEBajYCFCABIANqIAI6AAAgACgCLCgCHCgCBEEQdkH/AXEhAiAAKAIsKAIIIQMgACgCLCIEKAIUIQEgBCABQQFqNgIUIAEgA2ogAjoAACAAKAIsKAIcKAIEQRh2IQIgACgCLCgCCCEDIAAoAiwiBCgCFCEBIAQgAUEBajYCFCABIANqIAI6AAAgACgCLCgChAFBCUYEf0ECBUEEQQAgACgCLCgCiAFBAkgEfyAAKAIsKAKEAUECSAVBAQtBAXEbCyECIAAoAiwoAgghAyAAKAIsIgQoAhQhASAEIAFBAWo2AhQgASADaiACOgAAIAAoAiwoAhwoAgxB/wFxIQIgACgCLCgCCCEDIAAoAiwiBCgCFCEBIAQgAUEBajYCFCABIANqIAI6AAAgACgCLCgCHCgCEARAIAAoAiwoAhwoAhRB/wFxIQIgACgCLCgCCCEDIAAoAiwiBCgCFCEBIAQgAUEBajYCFCABIANqIAI6AAAgACgCLCgCHCgCFEEIdkH/AXEhAiAAKAIsKAIIIQMgACgCLCIEKAIUIQEgBCABQQFqNgIUIAEgA2ogAjoAAAsgACgCLCgCHCgCLARAIAAoAjgoAjAgACgCLCgCCCAAKAIsKAIUEBohASAAKAI4IAE2AjALIAAoAixBADYCICAAKAIsQcUANgIECwsgACgCLCgCBEHFAEYEQCAAKAIsKAIcKAIQBEAgACAAKAIsKAIUNgIgIAAgACgCLCgCHCgCFEH//wNxIAAoAiwoAiBrNgIcA0AgACgCLCgCDCAAKAIsKAIUIAAoAhxqSQRAIAAgACgCLCgCDCAAKAIsKAIUazYCGCAAKAIsKAIIIAAoAiwoAhRqIAAoAiwoAhwoAhAgACgCLCgCIGogACgCGBAZGiAAKAIsIAAoAiwoAgw2AhQCQCAAKAIsKAIcKAIsRQ0AIAAoAiwoAhQgACgCIE0NACAAKAI4KAIwIAAoAiwoAgggACgCIGogACgCLCgCFCAAKAIgaxAaIQEgACgCOCABNgIwCyAAKAIsIgEgACgCGCABKAIgajYCICAAKAI4EBwgACgCLCgCFARAIAAoAixBfzYCKCAAQQA2AjwMBQUgAEEANgIgIAAgACgCHCAAKAIYazYCHAwCCwALCyAAKAIsKAIIIAAoAiwoAhRqIAAoAiwoAhwoAhAgACgCLCgCIGogACgCHBAZGiAAKAIsIgEgACgCHCABKAIUajYCFAJAIAAoAiwoAhwoAixFDQAgACgCLCgCFCAAKAIgTQ0AIAAoAjgoAjAgACgCLCgCCCAAKAIgaiAAKAIsKAIUIAAoAiBrEBohASAAKAI4IAE2AjALIAAoAixBADYCIAsgACgCLEHJADYCBAsgACgCLCgCBEHJAEYEQCAAKAIsKAIcKAIcBEAgACAAKAIsKAIUNgIUA0AgACgCLCgCFCAAKAIsKAIMRgRAAkAgACgCLCgCHCgCLEUNACAAKAIsKAIUIAAoAhRNDQAgACgCOCgCMCAAKAIsKAIIIAAoAhRqIAAoAiwoAhQgACgCFGsQGiEBIAAoAjggATYCMAsgACgCOBAcIAAoAiwoAhQEQCAAKAIsQX82AiggAEEANgI8DAULIABBADYCFAsgACgCLCgCHCgCHCECIAAoAiwiAygCICEBIAMgAUEBajYCICAAIAEgAmotAAA2AhAgACgCECECIAAoAiwoAgghAyAAKAIsIgQoAhQhASAEIAFBAWo2AhQgASADaiACOgAAIAAoAhANAAsCQCAAKAIsKAIcKAIsRQ0AIAAoAiwoAhQgACgCFE0NACAAKAI4KAIwIAAoAiwoAgggACgCFGogACgCLCgCFCAAKAIUaxAaIQEgACgCOCABNgIwCyAAKAIsQQA2AiALIAAoAixB2wA2AgQLIAAoAiwoAgRB2wBGBEAgACgCLCgCHCgCJARAIAAgACgCLCgCFDYCDANAIAAoAiwoAhQgACgCLCgCDEYEQAJAIAAoAiwoAhwoAixFDQAgACgCLCgCFCAAKAIMTQ0AIAAoAjgoAjAgACgCLCgCCCAAKAIMaiAAKAIsKAIUIAAoAgxrEBohASAAKAI4IAE2AjALIAAoAjgQHCAAKAIsKAIUBEAgACgCLEF/NgIoIABBADYCPAwFCyAAQQA2AgwLIAAoAiwoAhwoAiQhAiAAKAIsIgMoAiAhASADIAFBAWo2AiAgACABIAJqLQAANgIIIAAoAgghAiAAKAIsKAIIIQMgACgCLCIEKAIUIQEgBCABQQFqNgIUIAEgA2ogAjoAACAAKAIIDQALAkAgACgCLCgCHCgCLEUNACAAKAIsKAIUIAAoAgxNDQAgACgCOCgCMCAAKAIsKAIIIAAoAgxqIAAoAiwoAhQgACgCDGsQGiEBIAAoAjggATYCMAsLIAAoAixB5wA2AgQLIAAoAiwoAgRB5wBGBEAgACgCLCgCHCgCLARAIAAoAiwoAgwgACgCLCgCFEECakkEQCAAKAI4EBwgACgCLCgCFARAIAAoAixBfzYCKCAAQQA2AjwMBAsLIAAoAjgoAjBB/wFxIQIgACgCLCgCCCEDIAAoAiwiBCgCFCEBIAQgAUEBajYCFCABIANqIAI6AAAgACgCOCgCMEEIdkH/AXEhAiAAKAIsKAIIIQMgACgCLCIEKAIUIQEgBCABQQFqNgIUIAEgA2ogAjoAAEEAQQBBABAaIQEgACgCOCABNgIwCyAAKAIsQfEANgIEIAAoAjgQHCAAKAIsKAIUBEAgACgCLEF/NgIoIABBADYCPAwCCwsCQAJAIAAoAjgoAgQNACAAKAIsKAJ0DQAgACgCNEUNASAAKAIsKAIEQZoFRg0BCyAAAn8gACgCLCgChAFFBEAgACgCLCAAKAI0ELcBDAELAn8gACgCLCgCiAFBAkYEQCAAKAIsIQIgACgCNCEDIwBBIGsiASQAIAEgAjYCGCABIAM2AhQCQANAAkAgASgCGCgCdEUEQCABKAIYEFwgASgCGCgCdEUEQCABKAIURQRAIAFBADYCHAwFCwwCCwsgASgCGEEANgJgIAEgASgCGCICKAI4IAIoAmxqLQAAOgAPIAEoAhgiAigCpC0gAigCoC1BAXRqQQA7AQAgAS0ADyEDIAEoAhgiAigCmC0hBCACIAIoAqAtIgJBAWo2AqAtIAIgBGogAzoAACABKAIYIAEtAA9BAnRqIgIgAi8BlAFBAWo7AZQBIAEgASgCGCgCoC0gASgCGCgCnC1BAWtGNgIQIAEoAhgiAiACKAJ0QQFrNgJ0IAEoAhgiAiACKAJsQQFqNgJsIAEoAhAEQCABKAIYAn8gASgCGCgCXEEATgRAIAEoAhgoAjggASgCGCgCXGoMAQtBAAsgASgCGCgCbCABKAIYKAJca0EAECggASgCGCABKAIYKAJsNgJcIAEoAhgoAgAQHCABKAIYKAIAKAIQRQRAIAFBADYCHAwECwsMAQsLIAEoAhhBADYCtC0gASgCFEEERgRAIAEoAhgCfyABKAIYKAJcQQBOBEAgASgCGCgCOCABKAIYKAJcagwBC0EACyABKAIYKAJsIAEoAhgoAlxrQQEQKCABKAIYIAEoAhgoAmw2AlwgASgCGCgCABAcIAEoAhgoAgAoAhBFBEAgAUECNgIcDAILIAFBAzYCHAwBCyABKAIYKAKgLQRAIAEoAhgCfyABKAIYKAJcQQBOBEAgASgCGCgCOCABKAIYKAJcagwBC0EACyABKAIYKAJsIAEoAhgoAlxrQQAQKCABKAIYIAEoAhgoAmw2AlwgASgCGCgCABAcIAEoAhgoAgAoAhBFBEAgAUEANgIcDAILCyABQQE2AhwLIAEoAhwhAiABQSBqJAAgAgwBCwJ/IAAoAiwoAogBQQNGBEAgACgCLCECIAAoAjQhAyMAQTBrIgEkACABIAI2AiggASADNgIkAkADQAJAIAEoAigoAnRBggJNBEAgASgCKBBcAkAgASgCKCgCdEGCAksNACABKAIkDQAgAUEANgIsDAQLIAEoAigoAnRFDQELIAEoAihBADYCYAJAIAEoAigoAnRBA0kNACABKAIoKAJsRQ0AIAEgASgCKCgCOCABKAIoKAJsakEBazYCGCABIAEoAhgtAAA2AhwgASgCHCECIAEgASgCGCIDQQFqNgIYAkAgAy0AASACRw0AIAEoAhwhAiABIAEoAhgiA0EBajYCGCADLQABIAJHDQAgASgCHCECIAEgASgCGCIDQQFqNgIYIAMtAAEgAkcNACABIAEoAigoAjggASgCKCgCbGpBggJqNgIUA0AgASgCHCECIAEgASgCGCIDQQFqNgIYAn9BACADLQABIAJHDQAaIAEoAhwhAiABIAEoAhgiA0EBajYCGEEAIAMtAAEgAkcNABogASgCHCECIAEgASgCGCIDQQFqNgIYQQAgAy0AASACRw0AGiABKAIcIQIgASABKAIYIgNBAWo2AhhBACADLQABIAJHDQAaIAEoAhwhAiABIAEoAhgiA0EBajYCGEEAIAMtAAEgAkcNABogASgCHCECIAEgASgCGCIDQQFqNgIYQQAgAy0AASACRw0AGiABKAIcIQIgASABKAIYIgNBAWo2AhhBACADLQABIAJHDQAaIAEoAhwhAiABIAEoAhgiA0EBajYCGEEAIAMtAAEgAkcNABogASgCGCABKAIUSQtBAXENAAsgASgCKEGCAiABKAIUIAEoAhhrazYCYCABKAIoKAJgIAEoAigoAnRLBEAgASgCKCABKAIoKAJ0NgJgCwsLAkAgASgCKCgCYEEDTwRAIAEgASgCKCgCYEEDazoAEyABQQE7ARAgASgCKCICKAKkLSACKAKgLUEBdGogAS8BEDsBACABLQATIQMgASgCKCICKAKYLSEEIAIgAigCoC0iAkEBajYCoC0gAiAEaiADOgAAIAEgAS8BEEEBazsBECABKAIoIAEtABNB0N0Aai0AAEECdGpBmAlqIgIgAi8BAEEBajsBACABKAIoQYgTagJ/IAEvARBBgAJJBEAgAS8BEC0A0FkMAQsgAS8BEEEHdkGAAmotANBZC0ECdGoiAiACLwEAQQFqOwEAIAEgASgCKCgCoC0gASgCKCgCnC1BAWtGNgIgIAEoAigiAiACKAJ0IAEoAigoAmBrNgJ0IAEoAigiAiABKAIoKAJgIAIoAmxqNgJsIAEoAihBADYCYAwBCyABIAEoAigiAigCOCACKAJsai0AADoADyABKAIoIgIoAqQtIAIoAqAtQQF0akEAOwEAIAEtAA8hAyABKAIoIgIoApgtIQQgAiACKAKgLSICQQFqNgKgLSACIARqIAM6AAAgASgCKCABLQAPQQJ0aiICIAIvAZQBQQFqOwGUASABIAEoAigoAqAtIAEoAigoApwtQQFrRjYCICABKAIoIgIgAigCdEEBazYCdCABKAIoIgIgAigCbEEBajYCbAsgASgCIARAIAEoAigCfyABKAIoKAJcQQBOBEAgASgCKCgCOCABKAIoKAJcagwBC0EACyABKAIoKAJsIAEoAigoAlxrQQAQKCABKAIoIAEoAigoAmw2AlwgASgCKCgCABAcIAEoAigoAgAoAhBFBEAgAUEANgIsDAQLCwwBCwsgASgCKEEANgK0LSABKAIkQQRGBEAgASgCKAJ/IAEoAigoAlxBAE4EQCABKAIoKAI4IAEoAigoAlxqDAELQQALIAEoAigoAmwgASgCKCgCXGtBARAoIAEoAiggASgCKCgCbDYCXCABKAIoKAIAEBwgASgCKCgCACgCEEUEQCABQQI2AiwMAgsgAUEDNgIsDAELIAEoAigoAqAtBEAgASgCKAJ/IAEoAigoAlxBAE4EQCABKAIoKAI4IAEoAigoAlxqDAELQQALIAEoAigoAmwgASgCKCgCXGtBABAoIAEoAiggASgCKCgCbDYCXCABKAIoKAIAEBwgASgCKCgCACgCEEUEQCABQQA2AiwMAgsLIAFBATYCLAsgASgCLCECIAFBMGokACACDAELIAAoAiwgACgCNCAAKAIsKAKEAUEMbEGA7wBqKAIIEQMACwsLNgIEAkAgACgCBEECRwRAIAAoAgRBA0cNAQsgACgCLEGaBTYCBAsCQCAAKAIEBEAgACgCBEECRw0BCyAAKAI4KAIQRQRAIAAoAixBfzYCKAsgAEEANgI8DAILIAAoAgRBAUYEQAJAIAAoAjRBAUYEQCAAKAIsIQIjAEEgayIBJAAgASACNgIcIAFBAzYCGAJAIAEoAhwoArwtQRAgASgCGGtKBEAgAUECNgIUIAEoAhwiAiACLwG4LSABKAIUQf//A3EgASgCHCgCvC10cjsBuC0gASgCHC8BuC1B/wFxIQMgASgCHCgCCCEEIAEoAhwiBigCFCECIAYgAkEBajYCFCACIARqIAM6AAAgASgCHC8BuC1BCHYhAyABKAIcKAIIIQQgASgCHCIGKAIUIQIgBiACQQFqNgIUIAIgBGogAzoAACABKAIcIAEoAhRB//8DcUEQIAEoAhwoArwta3U7AbgtIAEoAhwiAiACKAK8LSABKAIYQRBrajYCvC0MAQsgASgCHCICIAIvAbgtQQIgASgCHCgCvC10cjsBuC0gASgCHCICIAEoAhggAigCvC1qNgK8LQsgAUGS6AAvAQA2AhACQCABKAIcKAK8LUEQIAEoAhBrSgRAIAFBkOgALwEANgIMIAEoAhwiAiACLwG4LSABKAIMQf//A3EgASgCHCgCvC10cjsBuC0gASgCHC8BuC1B/wFxIQMgASgCHCgCCCEEIAEoAhwiBigCFCECIAYgAkEBajYCFCACIARqIAM6AAAgASgCHC8BuC1BCHYhAyABKAIcKAIIIQQgASgCHCIGKAIUIQIgBiACQQFqNgIUIAIgBGogAzoAACABKAIcIAEoAgxB//8DcUEQIAEoAhwoArwta3U7AbgtIAEoAhwiAiACKAK8LSABKAIQQRBrajYCvC0MAQsgASgCHCICIAIvAbgtQZDoAC8BACABKAIcKAK8LXRyOwG4LSABKAIcIgIgASgCECACKAK8LWo2ArwtCyABKAIcELwBIAFBIGokAAwBCyAAKAI0QQVHBEAgACgCLEEAQQBBABBdIAAoAjRBA0YEQCAAKAIsKAJEIAAoAiwoAkxBAWtBAXRqQQA7AQAgACgCLCgCREEAIAAoAiwoAkxBAWtBAXQQMyAAKAIsKAJ0RQRAIAAoAixBADYCbCAAKAIsQQA2AlwgACgCLEEANgK0LQsLCwsgACgCOBAcIAAoAjgoAhBFBEAgACgCLEF/NgIoIABBADYCPAwDCwsLIAAoAjRBBEcEQCAAQQA2AjwMAQsgACgCLCgCGEEATARAIABBATYCPAwBCwJAIAAoAiwoAhhBAkYEQCAAKAI4KAIwQf8BcSECIAAoAiwoAgghAyAAKAIsIgQoAhQhASAEIAFBAWo2AhQgASADaiACOgAAIAAoAjgoAjBBCHZB/wFxIQIgACgCLCgCCCEDIAAoAiwiBCgCFCEBIAQgAUEBajYCFCABIANqIAI6AAAgACgCOCgCMEEQdkH/AXEhAiAAKAIsKAIIIQMgACgCLCIEKAIUIQEgBCABQQFqNgIUIAEgA2ogAjoAACAAKAI4KAIwQRh2IQIgACgCLCgCCCEDIAAoAiwiBCgCFCEBIAQgAUEBajYCFCABIANqIAI6AAAgACgCOCgCCEH/AXEhAiAAKAIsKAIIIQMgACgCLCIEKAIUIQEgBCABQQFqNgIUIAEgA2ogAjoAACAAKAI4KAIIQQh2Qf8BcSECIAAoAiwoAgghAyAAKAIsIgQoAhQhASAEIAFBAWo2AhQgASADaiACOgAAIAAoAjgoAghBEHZB/wFxIQIgACgCLCgCCCEDIAAoAiwiBCgCFCEBIAQgAUEBajYCFCABIANqIAI6AAAgACgCOCgCCEEYdiECIAAoAiwoAgghAyAAKAIsIgQoAhQhASAEIAFBAWo2AhQgASADaiACOgAADAELIAAoAiwgACgCOCgCMEEQdhBLIAAoAiwgACgCOCgCMEH//wNxEEsLIAAoAjgQHCAAKAIsKAIYQQBKBEAgACgCLEEAIAAoAiwoAhhrNgIYCyAAIAAoAiwoAhRFNgI8CyAAKAI8IQEgAEFAayQAIAUgATYCCAwBCyAFKAIMQRBqIQEjAEHgAGsiACQAIAAgATYCWCAAQQI2AlQCQAJAAkAgACgCWBBKDQAgACgCWCgCDEUNACAAKAJYKAIADQEgACgCWCgCBEUNAQsgAEF+NgJcDAELIAAgACgCWCgCHDYCUCAAKAJQKAIEQb/+AEYEQCAAKAJQQcD+ADYCBAsgACAAKAJYKAIMNgJIIAAgACgCWCgCEDYCQCAAIAAoAlgoAgA2AkwgACAAKAJYKAIENgJEIAAgACgCUCgCPDYCPCAAIAAoAlAoAkA2AjggACAAKAJENgI0IAAgACgCQDYCMCAAQQA2AhADQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQCAAKAJQKAIEQbT+AGsOHwABAgMEBQYHCAkKCwwNDg8QERITFBUWFxgZGhscHR4fCyAAKAJQKAIMRQRAIAAoAlBBwP4ANgIEDCELA0AgACgCOEEQSQRAIAAoAkRFDSEgACAAKAJEQQFrNgJEIAAgACgCTCIBQQFqNgJMIAAgACgCPCABLQAAIAAoAjh0ajYCPCAAIAAoAjhBCGo2AjgMAQsLAkAgACgCUCgCDEECcUUNACAAKAI8QZ+WAkcNACAAKAJQKAIoRQRAIAAoAlBBDzYCKAtBAEEAQQAQGiEBIAAoAlAgATYCHCAAIAAoAjw6AAwgACAAKAI8QQh2OgANIAAoAlAoAhwgAEEMakECEBohASAAKAJQIAE2AhwgAEEANgI8IABBADYCOCAAKAJQQbX+ADYCBAwhCyAAKAJQQQA2AhQgACgCUCgCJARAIAAoAlAoAiRBfzYCMAsCQCAAKAJQKAIMQQFxBEAgACgCPEH/AXFBCHQgACgCPEEIdmpBH3BFDQELIAAoAlhBmgw2AhggACgCUEHR/gA2AgQMIQsgACgCPEEPcUEIRwRAIAAoAlhBmw82AhggACgCUEHR/gA2AgQMIQsgACAAKAI8QQR2NgI8IAAgACgCOEEEazYCOCAAIAAoAjxBD3FBCGo2AhQgACgCUCgCKEUEQCAAKAJQIAAoAhQ2AigLAkAgACgCFEEPTQRAIAAoAhQgACgCUCgCKE0NAQsgACgCWEGTDTYCGCAAKAJQQdH+ADYCBAwhCyAAKAJQQQEgACgCFHQ2AhhBAEEAQQAQPSEBIAAoAlAgATYCHCAAKAJYIAE2AjAgACgCUEG9/gBBv/4AIAAoAjxBgARxGzYCBCAAQQA2AjwgAEEANgI4DCALA0AgACgCOEEQSQRAIAAoAkRFDSAgACAAKAJEQQFrNgJEIAAgACgCTCIBQQFqNgJMIAAgACgCPCABLQAAIAAoAjh0ajYCPCAAIAAoAjhBCGo2AjgMAQsLIAAoAlAgACgCPDYCFCAAKAJQKAIUQf8BcUEIRwRAIAAoAlhBmw82AhggACgCUEHR/gA2AgQMIAsgACgCUCgCFEGAwANxBEAgACgCWEGgCTYCGCAAKAJQQdH+ADYCBAwgCyAAKAJQKAIkBEAgACgCUCgCJCAAKAI8QQh2QQFxNgIACwJAIAAoAlAoAhRBgARxRQ0AIAAoAlAoAgxBBHFFDQAgACAAKAI8OgAMIAAgACgCPEEIdjoADSAAKAJQKAIcIABBDGpBAhAaIQEgACgCUCABNgIcCyAAQQA2AjwgAEEANgI4IAAoAlBBtv4ANgIECwNAIAAoAjhBIEkEQCAAKAJERQ0fIAAgACgCREEBazYCRCAAIAAoAkwiAUEBajYCTCAAIAAoAjwgAS0AACAAKAI4dGo2AjwgACAAKAI4QQhqNgI4DAELCyAAKAJQKAIkBEAgACgCUCgCJCAAKAI8NgIECwJAIAAoAlAoAhRBgARxRQ0AIAAoAlAoAgxBBHFFDQAgACAAKAI8OgAMIAAgACgCPEEIdjoADSAAIAAoAjxBEHY6AA4gACAAKAI8QRh2OgAPIAAoAlAoAhwgAEEMakEEEBohASAAKAJQIAE2AhwLIABBADYCPCAAQQA2AjggACgCUEG3/gA2AgQLA0AgACgCOEEQSQRAIAAoAkRFDR4gACAAKAJEQQFrNgJEIAAgACgCTCIBQQFqNgJMIAAgACgCPCABLQAAIAAoAjh0ajYCPCAAIAAoAjhBCGo2AjgMAQsLIAAoAlAoAiQEQCAAKAJQKAIkIAAoAjxB/wFxNgIIIAAoAlAoAiQgACgCPEEIdjYCDAsCQCAAKAJQKAIUQYAEcUUNACAAKAJQKAIMQQRxRQ0AIAAgACgCPDoADCAAIAAoAjxBCHY6AA0gACgCUCgCHCAAQQxqQQIQGiEBIAAoAlAgATYCHAsgAEEANgI8IABBADYCOCAAKAJQQbj+ADYCBAsCQCAAKAJQKAIUQYAIcQRAA0AgACgCOEEQSQRAIAAoAkRFDR8gACAAKAJEQQFrNgJEIAAgACgCTCIBQQFqNgJMIAAgACgCPCABLQAAIAAoAjh0ajYCPCAAIAAoAjhBCGo2AjgMAQsLIAAoAlAgACgCPDYCRCAAKAJQKAIkBEAgACgCUCgCJCAAKAI8NgIUCwJAIAAoAlAoAhRBgARxRQ0AIAAoAlAoAgxBBHFFDQAgACAAKAI8OgAMIAAgACgCPEEIdjoADSAAKAJQKAIcIABBDGpBAhAaIQEgACgCUCABNgIcCyAAQQA2AjwgAEEANgI4DAELIAAoAlAoAiQEQCAAKAJQKAIkQQA2AhALCyAAKAJQQbn+ADYCBAsgACgCUCgCFEGACHEEQCAAIAAoAlAoAkQ2AiwgACgCLCAAKAJESwRAIAAgACgCRDYCLAsgACgCLARAAkAgACgCUCgCJEUNACAAKAJQKAIkKAIQRQ0AIAAgACgCUCgCJCgCFCAAKAJQKAJEazYCFCAAKAJQKAIkKAIQIAAoAhRqIAAoAkwCfyAAKAJQKAIkKAIYIAAoAhQgACgCLGpJBEAgACgCUCgCJCgCGCAAKAIUawwBCyAAKAIsCxAZGgsCQCAAKAJQKAIUQYAEcUUNACAAKAJQKAIMQQRxRQ0AIAAoAlAoAhwgACgCTCAAKAIsEBohASAAKAJQIAE2AhwLIAAgACgCRCAAKAIsazYCRCAAIAAoAiwgACgCTGo2AkwgACgCUCIBIAEoAkQgACgCLGs2AkQLIAAoAlAoAkQNGwsgACgCUEEANgJEIAAoAlBBuv4ANgIECwJAIAAoAlAoAhRBgBBxBEAgACgCREUNGyAAQQA2AiwDQCAAKAJMIQEgACAAKAIsIgJBAWo2AiwgACABIAJqLQAANgIUAkAgACgCUCgCJEUNACAAKAJQKAIkKAIcRQ0AIAAoAlAoAkQgACgCUCgCJCgCIE8NACAAKAIUIQIgACgCUCgCJCgCHCEDIAAoAlAiBCgCRCEBIAQgAUEBajYCRCABIANqIAI6AAALIAAoAhQEfyAAKAIsIAAoAkRJBUEAC0EBcQ0ACwJAIAAoAlAoAhRBgARxRQ0AIAAoAlAoAgxBBHFFDQAgACgCUCgCHCAAKAJMIAAoAiwQGiEBIAAoAlAgATYCHAsgACAAKAJEIAAoAixrNgJEIAAgACgCLCAAKAJMajYCTCAAKAIUDRsMAQsgACgCUCgCJARAIAAoAlAoAiRBADYCHAsLIAAoAlBBADYCRCAAKAJQQbv+ADYCBAsCQCAAKAJQKAIUQYAgcQRAIAAoAkRFDRogAEEANgIsA0AgACgCTCEBIAAgACgCLCICQQFqNgIsIAAgASACai0AADYCFAJAIAAoAlAoAiRFDQAgACgCUCgCJCgCJEUNACAAKAJQKAJEIAAoAlAoAiQoAihPDQAgACgCFCECIAAoAlAoAiQoAiQhAyAAKAJQIgQoAkQhASAEIAFBAWo2AkQgASADaiACOgAACyAAKAIUBH8gACgCLCAAKAJESQVBAAtBAXENAAsCQCAAKAJQKAIUQYAEcUUNACAAKAJQKAIMQQRxRQ0AIAAoAlAoAhwgACgCTCAAKAIsEBohASAAKAJQIAE2AhwLIAAgACgCRCAAKAIsazYCRCAAIAAoAiwgACgCTGo2AkwgACgCFA0aDAELIAAoAlAoAiQEQCAAKAJQKAIkQQA2AiQLCyAAKAJQQbz+ADYCBAsgACgCUCgCFEGABHEEQANAIAAoAjhBEEkEQCAAKAJERQ0aIAAgACgCREEBazYCRCAAIAAoAkwiAUEBajYCTCAAIAAoAjwgAS0AACAAKAI4dGo2AjwgACAAKAI4QQhqNgI4DAELCwJAIAAoAlAoAgxBBHFFDQAgACgCPCAAKAJQKAIcQf//A3FGDQAgACgCWEH7DDYCGCAAKAJQQdH+ADYCBAwaCyAAQQA2AjwgAEEANgI4CyAAKAJQKAIkBEAgACgCUCgCJCAAKAJQKAIUQQl1QQFxNgIsIAAoAlAoAiRBATYCMAtBAEEAQQAQGiEBIAAoAlAgATYCHCAAKAJYIAE2AjAgACgCUEG//gA2AgQMGAsDQCAAKAI4QSBJBEAgACgCREUNGCAAIAAoAkRBAWs2AkQgACAAKAJMIgFBAWo2AkwgACAAKAI8IAEtAAAgACgCOHRqNgI8IAAgACgCOEEIajYCOAwBCwsgACgCUCAAKAI8QQh2QYD+A3EgACgCPEEYdmogACgCPEGA/gNxQQh0aiAAKAI8Qf8BcUEYdGoiATYCHCAAKAJYIAE2AjAgAEEANgI8IABBADYCOCAAKAJQQb7+ADYCBAsgACgCUCgCEEUEQCAAKAJYIAAoAkg2AgwgACgCWCAAKAJANgIQIAAoAlggACgCTDYCACAAKAJYIAAoAkQ2AgQgACgCUCAAKAI8NgI8IAAoAlAgACgCODYCQCAAQQI2AlwMGAtBAEEAQQAQPSEBIAAoAlAgATYCHCAAKAJYIAE2AjAgACgCUEG//gA2AgQLIAAoAlRBBUYNFCAAKAJUQQZGDRQLIAAoAlAoAggEQCAAIAAoAjwgACgCOEEHcXY2AjwgACAAKAI4IAAoAjhBB3FrNgI4IAAoAlBBzv4ANgIEDBULA0AgACgCOEEDSQRAIAAoAkRFDRUgACAAKAJEQQFrNgJEIAAgACgCTCIBQQFqNgJMIAAgACgCPCABLQAAIAAoAjh0ajYCPCAAIAAoAjhBCGo2AjgMAQsLIAAoAlAgACgCPEEBcTYCCCAAIAAoAjxBAXY2AjwgACAAKAI4QQFrNgI4AkACQAJAAkACQCAAKAI8QQNxDgQAAQIDBAsgACgCUEHB/gA2AgQMAwsjAEEQayIBIAAoAlA2AgwgASgCDEGw8gA2AlAgASgCDEEJNgJYIAEoAgxBsIIBNgJUIAEoAgxBBTYCXCAAKAJQQcf+ADYCBCAAKAJUQQZGBEAgACAAKAI8QQJ2NgI8IAAgACgCOEECazYCOAwXCwwCCyAAKAJQQcT+ADYCBAwBCyAAKAJYQfANNgIYIAAoAlBB0f4ANgIECyAAIAAoAjxBAnY2AjwgACAAKAI4QQJrNgI4DBQLIAAgACgCPCAAKAI4QQdxdjYCPCAAIAAoAjggACgCOEEHcWs2AjgDQCAAKAI4QSBJBEAgACgCREUNFCAAIAAoAkRBAWs2AkQgACAAKAJMIgFBAWo2AkwgACAAKAI8IAEtAAAgACgCOHRqNgI8IAAgACgCOEEIajYCOAwBCwsgACgCPEH//wNxIAAoAjxBEHZB//8Dc0cEQCAAKAJYQaEKNgIYIAAoAlBB0f4ANgIEDBQLIAAoAlAgACgCPEH//wNxNgJEIABBADYCPCAAQQA2AjggACgCUEHC/gA2AgQgACgCVEEGRg0SCyAAKAJQQcP+ADYCBAsgACAAKAJQKAJENgIsIAAoAiwEQCAAKAIsIAAoAkRLBEAgACAAKAJENgIsCyAAKAIsIAAoAkBLBEAgACAAKAJANgIsCyAAKAIsRQ0RIAAoAkggACgCTCAAKAIsEBkaIAAgACgCRCAAKAIsazYCRCAAIAAoAiwgACgCTGo2AkwgACAAKAJAIAAoAixrNgJAIAAgACgCLCAAKAJIajYCSCAAKAJQIgEgASgCRCAAKAIsazYCRAwSCyAAKAJQQb/+ADYCBAwRCwNAIAAoAjhBDkkEQCAAKAJERQ0RIAAgACgCREEBazYCRCAAIAAoAkwiAUEBajYCTCAAIAAoAjwgAS0AACAAKAI4dGo2AjwgACAAKAI4QQhqNgI4DAELCyAAKAJQIAAoAjxBH3FBgQJqNgJkIAAgACgCPEEFdjYCPCAAIAAoAjhBBWs2AjggACgCUCAAKAI8QR9xQQFqNgJoIAAgACgCPEEFdjYCPCAAIAAoAjhBBWs2AjggACgCUCAAKAI8QQ9xQQRqNgJgIAAgACgCPEEEdjYCPCAAIAAoAjhBBGs2AjgCQCAAKAJQKAJkQZ4CTQRAIAAoAlAoAmhBHk0NAQsgACgCWEH9CTYCGCAAKAJQQdH+ADYCBAwRCyAAKAJQQQA2AmwgACgCUEHF/gA2AgQLA0AgACgCUCgCbCAAKAJQKAJgSQRAA0AgACgCOEEDSQRAIAAoAkRFDRIgACAAKAJEQQFrNgJEIAAgACgCTCIBQQFqNgJMIAAgACgCPCABLQAAIAAoAjh0ajYCPCAAIAAoAjhBCGo2AjgMAQsLIAAoAjxBB3EhAiAAKAJQQfQAaiEDIAAoAlAiBCgCbCEBIAQgAUEBajYCbCABQQF0QYDyAGovAQBBAXQgA2ogAjsBACAAIAAoAjxBA3Y2AjwgACAAKAI4QQNrNgI4DAELCwNAIAAoAlAoAmxBE0kEQCAAKAJQQfQAaiECIAAoAlAiAygCbCEBIAMgAUEBajYCbCABQQF0QYDyAGovAQBBAXQgAmpBADsBAAwBCwsgACgCUCAAKAJQQbQKajYCcCAAKAJQIAAoAlAoAnA2AlAgACgCUEEHNgJYIABBACAAKAJQQfQAakETIAAoAlBB8ABqIAAoAlBB2ABqIAAoAlBB9AVqEHU2AhAgACgCEARAIAAoAlhBhwk2AhggACgCUEHR/gA2AgQMEAsgACgCUEEANgJsIAAoAlBBxv4ANgIECwNAAkAgACgCUCgCbCAAKAJQKAJkIAAoAlAoAmhqTw0AA0ACQCAAIAAoAlAoAlAgACgCPEEBIAAoAlAoAlh0QQFrcUECdGooAQA2ASAgAC0AISAAKAI4TQ0AIAAoAkRFDREgACAAKAJEQQFrNgJEIAAgACgCTCIBQQFqNgJMIAAgACgCPCABLQAAIAAoAjh0ajYCPCAAIAAoAjhBCGo2AjgMAQsLAkAgAC8BIkEQSQRAIAAgACgCPCAALQAhdjYCPCAAIAAoAjggAC0AIWs2AjggAC8BIiECIAAoAlBB9ABqIQMgACgCUCIEKAJsIQEgBCABQQFqNgJsIAFBAXQgA2ogAjsBAAwBCwJAIAAvASJBEEYEQANAIAAoAjggAC0AIUECakkEQCAAKAJERQ0UIAAgACgCREEBazYCRCAAIAAoAkwiAUEBajYCTCAAIAAoAjwgAS0AACAAKAI4dGo2AjwgACAAKAI4QQhqNgI4DAELCyAAIAAoAjwgAC0AIXY2AjwgACAAKAI4IAAtACFrNgI4IAAoAlAoAmxFBEAgACgCWEHPCTYCGCAAKAJQQdH+ADYCBAwECyAAIAAoAlAgACgCUCgCbEEBdGovAXI2AhQgACAAKAI8QQNxQQNqNgIsIAAgACgCPEECdjYCPCAAIAAoAjhBAms2AjgMAQsCQCAALwEiQRFGBEADQCAAKAI4IAAtACFBA2pJBEAgACgCREUNFSAAIAAoAkRBAWs2AkQgACAAKAJMIgFBAWo2AkwgACAAKAI8IAEtAAAgACgCOHRqNgI8IAAgACgCOEEIajYCOAwBCwsgACAAKAI8IAAtACF2NgI8IAAgACgCOCAALQAhazYCOCAAQQA2AhQgACAAKAI8QQdxQQNqNgIsIAAgACgCPEEDdjYCPCAAIAAoAjhBA2s2AjgMAQsDQCAAKAI4IAAtACFBB2pJBEAgACgCREUNFCAAIAAoAkRBAWs2AkQgACAAKAJMIgFBAWo2AkwgACAAKAI8IAEtAAAgACgCOHRqNgI8IAAgACgCOEEIajYCOAwBCwsgACAAKAI8IAAtACF2NgI8IAAgACgCOCAALQAhazYCOCAAQQA2AhQgACAAKAI8Qf8AcUELajYCLCAAIAAoAjxBB3Y2AjwgACAAKAI4QQdrNgI4CwsgACgCUCgCbCAAKAIsaiAAKAJQKAJkIAAoAlAoAmhqSwRAIAAoAlhBzwk2AhggACgCUEHR/gA2AgQMAgsDQCAAIAAoAiwiAUEBazYCLCABBEAgACgCFCECIAAoAlBB9ABqIQMgACgCUCIEKAJsIQEgBCABQQFqNgJsIAFBAXQgA2ogAjsBAAwBCwsLDAELCyAAKAJQKAIEQdH+AEYNDiAAKAJQLwH0BEUEQCAAKAJYQfULNgIYIAAoAlBB0f4ANgIEDA8LIAAoAlAgACgCUEG0Cmo2AnAgACgCUCAAKAJQKAJwNgJQIAAoAlBBCTYCWCAAQQEgACgCUEH0AGogACgCUCgCZCAAKAJQQfAAaiAAKAJQQdgAaiAAKAJQQfQFahB1NgIQIAAoAhAEQCAAKAJYQesINgIYIAAoAlBB0f4ANgIEDA8LIAAoAlAgACgCUCgCcDYCVCAAKAJQQQY2AlwgAEECIAAoAlBB9ABqIAAoAlAoAmRBAXRqIAAoAlAoAmggACgCUEHwAGogACgCUEHcAGogACgCUEH0BWoQdTYCECAAKAIQBEAgACgCWEG5CTYCGCAAKAJQQdH+ADYCBAwPCyAAKAJQQcf+ADYCBCAAKAJUQQZGDQ0LIAAoAlBByP4ANgIECwJAIAAoAkRBBkkNACAAKAJAQYICSQ0AIAAoAlggACgCSDYCDCAAKAJYIAAoAkA2AhAgACgCWCAAKAJMNgIAIAAoAlggACgCRDYCBCAAKAJQIAAoAjw2AjwgACgCUCAAKAI4NgJAIAAoAjAhAiMAQeAAayIBIAAoAlg2AlwgASACNgJYIAEgASgCXCgCHDYCVCABIAEoAlwoAgA2AlAgASABKAJQIAEoAlwoAgRBBWtqNgJMIAEgASgCXCgCDDYCSCABIAEoAkggASgCWCABKAJcKAIQa2s2AkQgASABKAJIIAEoAlwoAhBBgQJrajYCQCABIAEoAlQoAiw2AjwgASABKAJUKAIwNgI4IAEgASgCVCgCNDYCNCABIAEoAlQoAjg2AjAgASABKAJUKAI8NgIsIAEgASgCVCgCQDYCKCABIAEoAlQoAlA2AiQgASABKAJUKAJUNgIgIAFBASABKAJUKAJYdEEBazYCHCABQQEgASgCVCgCXHRBAWs2AhgDQCABKAIoQQ9JBEAgASABKAJQIgJBAWo2AlAgASABKAIsIAItAAAgASgCKHRqNgIsIAEgASgCKEEIajYCKCABIAEoAlAiAkEBajYCUCABIAEoAiwgAi0AACABKAIodGo2AiwgASABKAIoQQhqNgIoCyABIAEoAiQgASgCLCABKAIccUECdGooAQA2ARACQAJAA0AgASABLQARNgIMIAEgASgCLCABKAIMdjYCLCABIAEoAiggASgCDGs2AiggASABLQAQNgIMIAEoAgxFBEAgAS8BEiECIAEgASgCSCIDQQFqNgJIIAMgAjoAAAwCCyABKAIMQRBxBEAgASABLwESNgIIIAEgASgCDEEPcTYCDCABKAIMBEAgASgCKCABKAIMSQRAIAEgASgCUCICQQFqNgJQIAEgASgCLCACLQAAIAEoAih0ajYCLCABIAEoAihBCGo2AigLIAEgASgCCCABKAIsQQEgASgCDHRBAWtxajYCCCABIAEoAiwgASgCDHY2AiwgASABKAIoIAEoAgxrNgIoCyABKAIoQQ9JBEAgASABKAJQIgJBAWo2AlAgASABKAIsIAItAAAgASgCKHRqNgIsIAEgASgCKEEIajYCKCABIAEoAlAiAkEBajYCUCABIAEoAiwgAi0AACABKAIodGo2AiwgASABKAIoQQhqNgIoCyABIAEoAiAgASgCLCABKAIYcUECdGooAQA2ARACQANAIAEgAS0AETYCDCABIAEoAiwgASgCDHY2AiwgASABKAIoIAEoAgxrNgIoIAEgAS0AEDYCDCABKAIMQRBxBEAgASABLwESNgIEIAEgASgCDEEPcTYCDCABKAIoIAEoAgxJBEAgASABKAJQIgJBAWo2AlAgASABKAIsIAItAAAgASgCKHRqNgIsIAEgASgCKEEIajYCKCABKAIoIAEoAgxJBEAgASABKAJQIgJBAWo2AlAgASABKAIsIAItAAAgASgCKHRqNgIsIAEgASgCKEEIajYCKAsLIAEgASgCBCABKAIsQQEgASgCDHRBAWtxajYCBCABIAEoAiwgASgCDHY2AiwgASABKAIoIAEoAgxrNgIoIAEgASgCSCABKAJEazYCDAJAIAEoAgQgASgCDEsEQCABIAEoAgQgASgCDGs2AgwgASgCDCABKAI4SwRAIAEoAlQoAsQ3BEAgASgCXEHdDDYCGCABKAJUQdH+ADYCBAwKCwsgASABKAIwNgIAAkAgASgCNEUEQCABIAEoAgAgASgCPCABKAIMa2o2AgAgASgCDCABKAIISQRAIAEgASgCCCABKAIMazYCCANAIAEgASgCACICQQFqNgIAIAItAAAhAiABIAEoAkgiA0EBajYCSCADIAI6AAAgASABKAIMQQFrIgI2AgwgAg0ACyABIAEoAkggASgCBGs2AgALDAELAkAgASgCNCABKAIMSQRAIAEgASgCACABKAI8IAEoAjRqIAEoAgxrajYCACABIAEoAgwgASgCNGs2AgwgASgCDCABKAIISQRAIAEgASgCCCABKAIMazYCCANAIAEgASgCACICQQFqNgIAIAItAAAhAiABIAEoAkgiA0EBajYCSCADIAI6AAAgASABKAIMQQFrIgI2AgwgAg0ACyABIAEoAjA2AgAgASgCNCABKAIISQRAIAEgASgCNDYCDCABIAEoAgggASgCDGs2AggDQCABIAEoAgAiAkEBajYCACACLQAAIQIgASABKAJIIgNBAWo2AkggAyACOgAAIAEgASgCDEEBayICNgIMIAINAAsgASABKAJIIAEoAgRrNgIACwsMAQsgASABKAIAIAEoAjQgASgCDGtqNgIAIAEoAgwgASgCCEkEQCABIAEoAgggASgCDGs2AggDQCABIAEoAgAiAkEBajYCACACLQAAIQIgASABKAJIIgNBAWo2AkggAyACOgAAIAEgASgCDEEBayICNgIMIAINAAsgASABKAJIIAEoAgRrNgIACwsLA0AgASgCCEECSwRAIAEgASgCACICQQFqNgIAIAItAAAhAiABIAEoAkgiA0EBajYCSCADIAI6AAAgASABKAIAIgJBAWo2AgAgAi0AACECIAEgASgCSCIDQQFqNgJIIAMgAjoAACABIAEoAgAiAkEBajYCACACLQAAIQIgASABKAJIIgNBAWo2AkggAyACOgAAIAEgASgCCEEDazYCCAwBCwsMAQsgASABKAJIIAEoAgRrNgIAA0AgASABKAIAIgJBAWo2AgAgAi0AACECIAEgASgCSCIDQQFqNgJIIAMgAjoAACABIAEoAgAiAkEBajYCACACLQAAIQIgASABKAJIIgNBAWo2AkggAyACOgAAIAEgASgCACICQQFqNgIAIAItAAAhAiABIAEoAkgiA0EBajYCSCADIAI6AAAgASABKAIIQQNrNgIIIAEoAghBAksNAAsLIAEoAggEQCABIAEoAgAiAkEBajYCACACLQAAIQIgASABKAJIIgNBAWo2AkggAyACOgAAIAEoAghBAUsEQCABIAEoAgAiAkEBajYCACACLQAAIQIgASABKAJIIgNBAWo2AkggAyACOgAACwsMAgsgASgCDEHAAHFFBEAgASABKAIgIAEvARIgASgCLEEBIAEoAgx0QQFrcWpBAnRqKAEANgEQDAELCyABKAJcQYUPNgIYIAEoAlRB0f4ANgIEDAQLDAILIAEoAgxBwABxRQRAIAEgASgCJCABLwESIAEoAixBASABKAIMdEEBa3FqQQJ0aigBADYBEAwBCwsgASgCDEEgcQRAIAEoAlRBv/4ANgIEDAILIAEoAlxB6Q42AhggASgCVEHR/gA2AgQMAQsgASgCUCABKAJMSQR/IAEoAkggASgCQEkFQQALQQFxDQELCyABIAEoAihBA3Y2AgggASABKAJQIAEoAghrNgJQIAEgASgCKCABKAIIQQN0azYCKCABIAEoAixBASABKAIodEEBa3E2AiwgASgCXCABKAJQNgIAIAEoAlwgASgCSDYCDCABKAJcAn8gASgCUCABKAJMSQRAIAEoAkwgASgCUGtBBWoMAQtBBSABKAJQIAEoAkxraws2AgQgASgCXAJ/IAEoAkggASgCQEkEQCABKAJAIAEoAkhrQYECagwBC0GBAiABKAJIIAEoAkBraws2AhAgASgCVCABKAIsNgI8IAEoAlQgASgCKDYCQCAAIAAoAlgoAgw2AkggACAAKAJYKAIQNgJAIAAgACgCWCgCADYCTCAAIAAoAlgoAgQ2AkQgACAAKAJQKAI8NgI8IAAgACgCUCgCQDYCOCAAKAJQKAIEQb/+AEYEQCAAKAJQQX82Asg3CwwNCyAAKAJQQQA2Asg3A0ACQCAAIAAoAlAoAlAgACgCPEEBIAAoAlAoAlh0QQFrcUECdGooAQA2ASAgAC0AISAAKAI4TQ0AIAAoAkRFDQ0gACAAKAJEQQFrNgJEIAAgACgCTCIBQQFqNgJMIAAgACgCPCABLQAAIAAoAjh0ajYCPCAAIAAoAjhBCGo2AjgMAQsLAkAgAC0AIEUNACAALQAgQfABcQ0AIAAgACgBIDYBGANAAkAgACAAKAJQKAJQIAAvARogACgCPEEBIAAtABkgAC0AGGp0QQFrcSAALQAZdmpBAnRqKAEANgEgIAAoAjggAC0AGSAALQAhak8NACAAKAJERQ0OIAAgACgCREEBazYCRCAAIAAoAkwiAUEBajYCTCAAIAAoAjwgAS0AACAAKAI4dGo2AjwgACAAKAI4QQhqNgI4DAELCyAAIAAoAjwgAC0AGXY2AjwgACAAKAI4IAAtABlrNgI4IAAoAlAiASAALQAZIAEoAsg3ajYCyDcLIAAgACgCPCAALQAhdjYCPCAAIAAoAjggAC0AIWs2AjggACgCUCIBIAAtACEgASgCyDdqNgLINyAAKAJQIAAvASI2AkQgAC0AIEUEQCAAKAJQQc3+ADYCBAwNCyAALQAgQSBxBEAgACgCUEF/NgLINyAAKAJQQb/+ADYCBAwNCyAALQAgQcAAcQRAIAAoAlhB6Q42AhggACgCUEHR/gA2AgQMDQsgACgCUCAALQAgQQ9xNgJMIAAoAlBByf4ANgIECyAAKAJQKAJMBEADQCAAKAI4IAAoAlAoAkxJBEAgACgCREUNDSAAIAAoAkRBAWs2AkQgACAAKAJMIgFBAWo2AkwgACAAKAI8IAEtAAAgACgCOHRqNgI8IAAgACgCOEEIajYCOAwBCwsgACgCUCIBIAEoAkQgACgCPEEBIAAoAlAoAkx0QQFrcWo2AkQgACAAKAI8IAAoAlAoAkx2NgI8IAAgACgCOCAAKAJQKAJMazYCOCAAKAJQIgEgACgCUCgCTCABKALIN2o2Asg3CyAAKAJQIAAoAlAoAkQ2Asw3IAAoAlBByv4ANgIECwNAAkAgACAAKAJQKAJUIAAoAjxBASAAKAJQKAJcdEEBa3FBAnRqKAEANgEgIAAtACEgACgCOE0NACAAKAJERQ0LIAAgACgCREEBazYCRCAAIAAoAkwiAUEBajYCTCAAIAAoAjwgAS0AACAAKAI4dGo2AjwgACAAKAI4QQhqNgI4DAELCyAALQAgQfABcUUEQCAAIAAoASA2ARgDQAJAIAAgACgCUCgCVCAALwEaIAAoAjxBASAALQAZIAAtABhqdEEBa3EgAC0AGXZqQQJ0aigBADYBICAAKAI4IAAtABkgAC0AIWpPDQAgACgCREUNDCAAIAAoAkRBAWs2AkQgACAAKAJMIgFBAWo2AkwgACAAKAI8IAEtAAAgACgCOHRqNgI8IAAgACgCOEEIajYCOAwBCwsgACAAKAI8IAAtABl2NgI8IAAgACgCOCAALQAZazYCOCAAKAJQIgEgAC0AGSABKALIN2o2Asg3CyAAIAAoAjwgAC0AIXY2AjwgACAAKAI4IAAtACFrNgI4IAAoAlAiASAALQAhIAEoAsg3ajYCyDcgAC0AIEHAAHEEQCAAKAJYQYUPNgIYIAAoAlBB0f4ANgIEDAsLIAAoAlAgAC8BIjYCSCAAKAJQIAAtACBBD3E2AkwgACgCUEHL/gA2AgQLIAAoAlAoAkwEQANAIAAoAjggACgCUCgCTEkEQCAAKAJERQ0LIAAgACgCREEBazYCRCAAIAAoAkwiAUEBajYCTCAAIAAoAjwgAS0AACAAKAI4dGo2AjwgACAAKAI4QQhqNgI4DAELCyAAKAJQIgEgASgCSCAAKAI8QQEgACgCUCgCTHRBAWtxajYCSCAAIAAoAjwgACgCUCgCTHY2AjwgACAAKAI4IAAoAlAoAkxrNgI4IAAoAlAiASAAKAJQKAJMIAEoAsg3ajYCyDcLIAAoAlBBzP4ANgIECyAAKAJARQ0HIAAgACgCMCAAKAJAazYCLAJAIAAoAlAoAkggACgCLEsEQCAAIAAoAlAoAkggACgCLGs2AiwgACgCLCAAKAJQKAIwSwRAIAAoAlAoAsQ3BEAgACgCWEHdDDYCGCAAKAJQQdH+ADYCBAwMCwsCQCAAKAIsIAAoAlAoAjRLBEAgACAAKAIsIAAoAlAoAjRrNgIsIAAgACgCUCgCOCAAKAJQKAIsIAAoAixrajYCKAwBCyAAIAAoAlAoAjggACgCUCgCNCAAKAIsa2o2AigLIAAoAiwgACgCUCgCREsEQCAAIAAoAlAoAkQ2AiwLDAELIAAgACgCSCAAKAJQKAJIazYCKCAAIAAoAlAoAkQ2AiwLIAAoAiwgACgCQEsEQCAAIAAoAkA2AiwLIAAgACgCQCAAKAIsazYCQCAAKAJQIgEgASgCRCAAKAIsazYCRANAIAAgACgCKCIBQQFqNgIoIAEtAAAhASAAIAAoAkgiAkEBajYCSCACIAE6AAAgACAAKAIsQQFrIgE2AiwgAQ0ACyAAKAJQKAJERQRAIAAoAlBByP4ANgIECwwICyAAKAJARQ0GIAAoAlAoAkQhASAAIAAoAkgiAkEBajYCSCACIAE6AAAgACAAKAJAQQFrNgJAIAAoAlBByP4ANgIEDAcLIAAoAlAoAgwEQANAIAAoAjhBIEkEQCAAKAJERQ0IIAAgACgCREEBazYCRCAAIAAoAkwiAUEBajYCTCAAIAAoAjwgAS0AACAAKAI4dGo2AjwgACAAKAI4QQhqNgI4DAELCyAAIAAoAjAgACgCQGs2AjAgACgCWCIBIAAoAjAgASgCFGo2AhQgACgCUCIBIAAoAjAgASgCIGo2AiACQCAAKAJQKAIMQQRxRQ0AIAAoAjBFDQACfyAAKAJQKAIUBEAgACgCUCgCHCAAKAJIIAAoAjBrIAAoAjAQGgwBCyAAKAJQKAIcIAAoAkggACgCMGsgACgCMBA9CyEBIAAoAlAgATYCHCAAKAJYIAE2AjALIAAgACgCQDYCMAJAIAAoAlAoAgxBBHFFDQACfyAAKAJQKAIUBEAgACgCPAwBCyAAKAI8QQh2QYD+A3EgACgCPEEYdmogACgCPEGA/gNxQQh0aiAAKAI8Qf8BcUEYdGoLIAAoAlAoAhxGDQAgACgCWEHIDDYCGCAAKAJQQdH+ADYCBAwICyAAQQA2AjwgAEEANgI4CyAAKAJQQc/+ADYCBAsCQCAAKAJQKAIMRQ0AIAAoAlAoAhRFDQADQCAAKAI4QSBJBEAgACgCREUNByAAIAAoAkRBAWs2AkQgACAAKAJMIgFBAWo2AkwgACAAKAI8IAEtAAAgACgCOHRqNgI8IAAgACgCOEEIajYCOAwBCwsgACgCPCAAKAJQKAIgRwRAIAAoAlhBsQw2AhggACgCUEHR/gA2AgQMBwsgAEEANgI8IABBADYCOAsgACgCUEHQ/gA2AgQLIABBATYCEAwDCyAAQX02AhAMAgsgAEF8NgJcDAMLIABBfjYCXAwCCwsgACgCWCAAKAJINgIMIAAoAlggACgCQDYCECAAKAJYIAAoAkw2AgAgACgCWCAAKAJENgIEIAAoAlAgACgCPDYCPCAAKAJQIAAoAjg2AkACQAJAIAAoAlAoAiwNACAAKAIwIAAoAlgoAhBGDQEgACgCUCgCBEHR/gBPDQEgACgCUCgCBEHO/gBJDQAgACgCVEEERg0BCwJ/IAAoAlghAiAAKAJYKAIMIQMgACgCMCAAKAJYKAIQayEEIwBBIGsiASQAIAEgAjYCGCABIAM2AhQgASAENgIQIAEgASgCGCgCHDYCDAJAIAEoAgwoAjhFBEAgASgCGCgCKEEBIAEoAgwoAih0QQEgASgCGCgCIBEBACECIAEoAgwgAjYCOCABKAIMKAI4RQRAIAFBATYCHAwCCwsgASgCDCgCLEUEQCABKAIMQQEgASgCDCgCKHQ2AiwgASgCDEEANgI0IAEoAgxBADYCMAsCQCABKAIQIAEoAgwoAixPBEAgASgCDCgCOCABKAIUIAEoAgwoAixrIAEoAgwoAiwQGRogASgCDEEANgI0IAEoAgwgASgCDCgCLDYCMAwBCyABIAEoAgwoAiwgASgCDCgCNGs2AgggASgCCCABKAIQSwRAIAEgASgCEDYCCAsgASgCDCgCOCABKAIMKAI0aiABKAIUIAEoAhBrIAEoAggQGRogASABKAIQIAEoAghrNgIQAkAgASgCEARAIAEoAgwoAjggASgCFCABKAIQayABKAIQEBkaIAEoAgwgASgCEDYCNCABKAIMIAEoAgwoAiw2AjAMAQsgASgCDCICIAEoAgggAigCNGo2AjQgASgCDCgCNCABKAIMKAIsRgRAIAEoAgxBADYCNAsgASgCDCgCMCABKAIMKAIsSQRAIAEoAgwiAiABKAIIIAIoAjBqNgIwCwsLIAFBADYCHAsgASgCHCECIAFBIGokACACCwRAIAAoAlBB0v4ANgIEIABBfDYCXAwCCwsgACAAKAI0IAAoAlgoAgRrNgI0IAAgACgCMCAAKAJYKAIQazYCMCAAKAJYIgEgACgCNCABKAIIajYCCCAAKAJYIgEgACgCMCABKAIUajYCFCAAKAJQIgEgACgCMCABKAIgajYCIAJAIAAoAlAoAgxBBHFFDQAgACgCMEUNAAJ/IAAoAlAoAhQEQCAAKAJQKAIcIAAoAlgoAgwgACgCMGsgACgCMBAaDAELIAAoAlAoAhwgACgCWCgCDCAAKAIwayAAKAIwED0LIQEgACgCUCABNgIcIAAoAlggATYCMAsgACgCWCAAKAJQKAJAQcAAQQAgACgCUCgCCBtqQYABQQAgACgCUCgCBEG//gBGG2pBgAJBACAAKAJQKAIEQcf+AEcEfyAAKAJQKAIEQcL+AEYFQQELQQFxG2o2AiwCQAJAIAAoAjRFBEAgACgCMEUNAQsgACgCVEEERw0BCyAAKAIQDQAgAEF7NgIQCyAAIAAoAhA2AlwLIAAoAlwhASAAQeAAaiQAIAUgATYCCAsgBSgCECIAIAApAwAgBSgCDDUCIH03AwACQAJAAkACQAJAIAUoAghBBWoOBwIDAwMDAAEDCyAFQQA2AhwMAwsgBUEBNgIcDAILIAUoAgwoAhRFBEAgBUEDNgIcDAILCyAFKAIMKAIAQQ0gBSgCCBAUIAVBAjYCHAsgBSgCHCEAIAVBIGokACAACyQBAX8jAEEQayIBIAA2AgwgASABKAIMNgIIIAEoAghBAToADAuXAQEBfyMAQSBrIgMkACADIAA2AhggAyABNgIUIAMgAjcDCCADIAMoAhg2AgQCQAJAIAMpAwhC/////w9YBEAgAygCBCgCFEUNAQsgAygCBCgCAEESQQAQFCADQQA6AB8MAQsgAygCBCADKQMIPgIUIAMoAgQgAygCFDYCECADQQE6AB8LIAMtAB9BAXEhACADQSBqJAAgAAukAgECfyMAQRBrIgEkACABIAA2AgggASABKAIINgIEAkAgASgCBC0ABEEBcQRAIAEgASgCBEEQahC4ATYCAAwBCyABKAIEQRBqIQIjAEEQayIAJAAgACACNgIIAkAgACgCCBBKBEAgAEF+NgIMDAELIAAgACgCCCgCHDYCBCAAKAIEKAI4BEAgACgCCCgCKCAAKAIEKAI4IAAoAggoAiQRBAALIAAoAggoAiggACgCCCgCHCAAKAIIKAIkEQQAIAAoAghBADYCHCAAQQA2AgwLIAAoAgwhAiAAQRBqJAAgASACNgIACwJAIAEoAgAEQCABKAIEKAIAQQ0gASgCABAUIAFBADoADwwBCyABQQE6AA8LIAEtAA9BAXEhACABQRBqJAAgAAuyGAEFfyMAQRBrIgQkACAEIAA2AgggBCAEKAIINgIEIAQoAgRBADYCFCAEKAIEQQA2AhAgBCgCBEEANgIgIAQoAgRBADYCHAJAIAQoAgQtAARBAXEEQCAEKAIEQRBqIQEgBCgCBCgCCCECIwBBMGsiACQAIAAgATYCKCAAIAI2AiQgAEEINgIgIABBcTYCHCAAQQk2AhggAEEANgIUIABBwBI2AhAgAEE4NgIMIABBATYCBAJAAkACQCAAKAIQRQ0AIAAoAhAsAABB+O4ALAAARw0AIAAoAgxBOEYNAQsgAEF6NgIsDAELIAAoAihFBEAgAEF+NgIsDAELIAAoAihBADYCGCAAKAIoKAIgRQRAIAAoAihBBTYCICAAKAIoQQA2AigLIAAoAigoAiRFBEAgACgCKEEGNgIkCyAAKAIkQX9GBEAgAEEGNgIkCwJAIAAoAhxBAEgEQCAAQQA2AgQgAEEAIAAoAhxrNgIcDAELIAAoAhxBD0oEQCAAQQI2AgQgACAAKAIcQRBrNgIcCwsCQAJAIAAoAhhBAUgNACAAKAIYQQlKDQAgACgCIEEIRw0AIAAoAhxBCEgNACAAKAIcQQ9KDQAgACgCJEEASA0AIAAoAiRBCUoNACAAKAIUQQBIDQAgACgCFEEESg0AIAAoAhxBCEcNASAAKAIEQQFGDQELIABBfjYCLAwBCyAAKAIcQQhGBEAgAEEJNgIcCyAAIAAoAigoAihBAUHELSAAKAIoKAIgEQEANgIIIAAoAghFBEAgAEF8NgIsDAELIAAoAiggACgCCDYCHCAAKAIIIAAoAig2AgAgACgCCEEqNgIEIAAoAgggACgCBDYCGCAAKAIIQQA2AhwgACgCCCAAKAIcNgIwIAAoAghBASAAKAIIKAIwdDYCLCAAKAIIIAAoAggoAixBAWs2AjQgACgCCCAAKAIYQQdqNgJQIAAoAghBASAAKAIIKAJQdDYCTCAAKAIIIAAoAggoAkxBAWs2AlQgACgCCCAAKAIIKAJQQQJqQQNuNgJYIAAoAigoAiggACgCCCgCLEECIAAoAigoAiARAQAhASAAKAIIIAE2AjggACgCKCgCKCAAKAIIKAIsQQIgACgCKCgCIBEBACEBIAAoAgggATYCQCAAKAIoKAIoIAAoAggoAkxBAiAAKAIoKAIgEQEAIQEgACgCCCABNgJEIAAoAghBADYCwC0gACgCCEEBIAAoAhhBBmp0NgKcLSAAIAAoAigoAiggACgCCCgCnC1BBCAAKAIoKAIgEQEANgIAIAAoAgggACgCADYCCCAAKAIIIAAoAggoApwtQQJ0NgIMAkACQCAAKAIIKAI4RQ0AIAAoAggoAkBFDQAgACgCCCgCREUNACAAKAIIKAIIDQELIAAoAghBmgU2AgQgACgCKEG42QAoAgA2AhggACgCKBC4ARogAEF8NgIsDAELIAAoAgggACgCACAAKAIIKAKcLUEBdkEBdGo2AqQtIAAoAgggACgCCCgCCCAAKAIIKAKcLUEDbGo2ApgtIAAoAgggACgCJDYChAEgACgCCCAAKAIUNgKIASAAKAIIIAAoAiA6ACQgACgCKCEBIwBBEGsiAyQAIAMgATYCDCADKAIMIQIjAEEQayIBJAAgASACNgIIAkAgASgCCBB4BEAgAUF+NgIMDAELIAEoAghBADYCFCABKAIIQQA2AgggASgCCEEANgIYIAEoAghBAjYCLCABIAEoAggoAhw2AgQgASgCBEEANgIUIAEoAgQgASgCBCgCCDYCECABKAIEKAIYQQBIBEAgASgCBEEAIAEoAgQoAhhrNgIYCyABKAIEIAEoAgQoAhhBAkYEf0E5BUEqQfEAIAEoAgQoAhgbCzYCBAJ/IAEoAgQoAhhBAkYEQEEAQQBBABAaDAELQQBBAEEAED0LIQIgASgCCCACNgIwIAEoAgRBADYCKCABKAIEIQUjAEEQayICJAAgAiAFNgIMIAIoAgwgAigCDEGUAWo2ApgWIAIoAgxB0N8ANgKgFiACKAIMIAIoAgxBiBNqNgKkFiACKAIMQeTfADYCrBYgAigCDCACKAIMQfwUajYCsBYgAigCDEH43wA2ArgWIAIoAgxBADsBuC0gAigCDEEANgK8LSACKAIMEL4BIAJBEGokACABQQA2AgwLIAEoAgwhAiABQRBqJAAgAyACNgIIIAMoAghFBEAgAygCDCgCHCECIwBBEGsiASQAIAEgAjYCDCABKAIMIAEoAgwoAixBAXQ2AjwgASgCDCgCRCABKAIMKAJMQQFrQQF0akEAOwEAIAEoAgwoAkRBACABKAIMKAJMQQFrQQF0EDMgASgCDCABKAIMKAKEAUEMbEGA7wBqLwECNgKAASABKAIMIAEoAgwoAoQBQQxsQYDvAGovAQA2AowBIAEoAgwgASgCDCgChAFBDGxBgO8Aai8BBDYCkAEgASgCDCABKAIMKAKEAUEMbEGA7wBqLwEGNgJ8IAEoAgxBADYCbCABKAIMQQA2AlwgASgCDEEANgJ0IAEoAgxBADYCtC0gASgCDEECNgJ4IAEoAgxBAjYCYCABKAIMQQA2AmggASgCDEEANgJIIAFBEGokAAsgAygCCCEBIANBEGokACAAIAE2AiwLIAAoAiwhASAAQTBqJAAgBCABNgIADAELIAQoAgRBEGohASMAQSBrIgAkACAAIAE2AhggAEFxNgIUIABBwBI2AhAgAEE4NgIMAkACQAJAIAAoAhBFDQAgACgCECwAAEHAEiwAAEcNACAAKAIMQThGDQELIABBejYCHAwBCyAAKAIYRQRAIABBfjYCHAwBCyAAKAIYQQA2AhggACgCGCgCIEUEQCAAKAIYQQU2AiAgACgCGEEANgIoCyAAKAIYKAIkRQRAIAAoAhhBBjYCJAsgACAAKAIYKAIoQQFB0DcgACgCGCgCIBEBADYCBCAAKAIERQRAIABBfDYCHAwBCyAAKAIYIAAoAgQ2AhwgACgCBCAAKAIYNgIAIAAoAgRBADYCOCAAKAIEQbT+ADYCBCAAKAIYIQIgACgCFCEDIwBBIGsiASQAIAEgAjYCGCABIAM2AhQCQCABKAIYEEoEQCABQX42AhwMAQsgASABKAIYKAIcNgIMAkAgASgCFEEASARAIAFBADYCECABQQAgASgCFGs2AhQMAQsgASABKAIUQQR1QQVqNgIQIAEoAhRBMEgEQCABIAEoAhRBD3E2AhQLCwJAIAEoAhRFDQAgASgCFEEITgRAIAEoAhRBD0wNAQsgAUF+NgIcDAELAkAgASgCDCgCOEUNACABKAIMKAIoIAEoAhRGDQAgASgCGCgCKCABKAIMKAI4IAEoAhgoAiQRBAAgASgCDEEANgI4CyABKAIMIAEoAhA2AgwgASgCDCABKAIUNgIoIAEoAhghAiMAQRBrIgMkACADIAI2AggCQCADKAIIEEoEQCADQX42AgwMAQsgAyADKAIIKAIcNgIEIAMoAgRBADYCLCADKAIEQQA2AjAgAygCBEEANgI0IAMoAgghBSMAQRBrIgIkACACIAU2AggCQCACKAIIEEoEQCACQX42AgwMAQsgAiACKAIIKAIcNgIEIAIoAgRBADYCICACKAIIQQA2AhQgAigCCEEANgIIIAIoAghBADYCGCACKAIEKAIMBEAgAigCCCACKAIEKAIMQQFxNgIwCyACKAIEQbT+ADYCBCACKAIEQQA2AgggAigCBEEANgIQIAIoAgRBgIACNgIYIAIoAgRBADYCJCACKAIEQQA2AjwgAigCBEEANgJAIAIoAgQgAigCBEG0CmoiBTYCcCACKAIEIAU2AlQgAigCBCAFNgJQIAIoAgRBATYCxDcgAigCBEF/NgLINyACQQA2AgwLIAIoAgwhBSACQRBqJAAgAyAFNgIMCyADKAIMIQIgA0EQaiQAIAEgAjYCHAsgASgCHCECIAFBIGokACAAIAI2AgggACgCCARAIAAoAhgoAiggACgCBCAAKAIYKAIkEQQAIAAoAhhBADYCHAsgACAAKAIINgIcCyAAKAIcIQEgAEEgaiQAIAQgATYCAAsCQCAEKAIABEAgBCgCBCgCAEENIAQoAgAQFCAEQQA6AA8MAQsgBEEBOgAPCyAELQAPQQFxIQAgBEEQaiQAIAALbwEBfyMAQRBrIgEgADYCCCABIAEoAgg2AgQCQCABKAIELQAEQQFxRQRAIAFBADYCDAwBCyABKAIEKAIIQQNIBEAgAUECNgIMDAELIAEoAgQoAghBB0oEQCABQQE2AgwMAQsgAUEANgIMCyABKAIMCywBAX8jAEEQayIBJAAgASAANgIMIAEgASgCDDYCCCABKAIIEBUgAUEQaiQACzwBAX8jAEEQayIDJAAgAyAAOwEOIAMgATYCCCADIAI2AgRBASADKAIIIAMoAgQQtAEhACADQRBqJAAgAAvBEAECfyMAQSBrIgIkACACIAA2AhggAiABNgIUAkADQAJAIAIoAhgoAnRBhgJJBEAgAigCGBBcAkAgAigCGCgCdEGGAk8NACACKAIUDQAgAkEANgIcDAQLIAIoAhgoAnRFDQELIAJBADYCECACKAIYKAJ0QQNPBEAgAigCGCACKAIYKAJUIAIoAhgoAjggAigCGCgCbEECamotAAAgAigCGCgCSCACKAIYKAJYdHNxNgJIIAIoAhgoAkAgAigCGCgCbCACKAIYKAI0cUEBdGogAigCGCgCRCACKAIYKAJIQQF0ai8BACIAOwEAIAIgAEH//wNxNgIQIAIoAhgoAkQgAigCGCgCSEEBdGogAigCGCgCbDsBAAsgAigCGCACKAIYKAJgNgJ4IAIoAhggAigCGCgCcDYCZCACKAIYQQI2AmACQCACKAIQRQ0AIAIoAhgoAnggAigCGCgCgAFPDQAgAigCGCgCLEGGAmsgAigCGCgCbCACKAIQa0kNACACKAIYIAIoAhAQtgEhACACKAIYIAA2AmACQCACKAIYKAJgQQVLDQAgAigCGCgCiAFBAUcEQCACKAIYKAJgQQNHDQEgAigCGCgCbCACKAIYKAJwa0GAIE0NAQsgAigCGEECNgJgCwsCQAJAIAIoAhgoAnhBA0kNACACKAIYKAJgIAIoAhgoAnhLDQAgAiACKAIYIgAoAmwgACgCdGpBA2s2AgggAiACKAIYKAJ4QQNrOgAHIAIgAigCGCIAKAJsIAAoAmRBf3NqOwEEIAIoAhgiACgCpC0gACgCoC1BAXRqIAIvAQQ7AQAgAi0AByEBIAIoAhgiACgCmC0hAyAAIAAoAqAtIgBBAWo2AqAtIAAgA2ogAToAACACIAIvAQRBAWs7AQQgAigCGCACLQAHQdDdAGotAABBAnRqQZgJaiIAIAAvAQBBAWo7AQAgAigCGEGIE2oCfyACLwEEQYACSQRAIAIvAQQtANBZDAELIAIvAQRBB3ZBgAJqLQDQWQtBAnRqIgAgAC8BAEEBajsBACACIAIoAhgoAqAtIAIoAhgoApwtQQFrRjYCDCACKAIYIgAgACgCdCACKAIYKAJ4QQFrazYCdCACKAIYIgAgACgCeEECazYCeANAIAIoAhgiASgCbEEBaiEAIAEgADYCbCAAIAIoAghNBEAgAigCGCACKAIYKAJUIAIoAhgoAjggAigCGCgCbEECamotAAAgAigCGCgCSCACKAIYKAJYdHNxNgJIIAIoAhgoAkAgAigCGCgCbCACKAIYKAI0cUEBdGogAigCGCgCRCACKAIYKAJIQQF0ai8BACIAOwEAIAIgAEH//wNxNgIQIAIoAhgoAkQgAigCGCgCSEEBdGogAigCGCgCbDsBAAsgAigCGCIBKAJ4QQFrIQAgASAANgJ4IAANAAsgAigCGEEANgJoIAIoAhhBAjYCYCACKAIYIgAgACgCbEEBajYCbCACKAIMBEAgAigCGAJ/IAIoAhgoAlxBAE4EQCACKAIYKAI4IAIoAhgoAlxqDAELQQALIAIoAhgoAmwgAigCGCgCXGtBABAoIAIoAhggAigCGCgCbDYCXCACKAIYKAIAEBwgAigCGCgCACgCEEUEQCACQQA2AhwMBgsLDAELAkAgAigCGCgCaARAIAIgAigCGCIAKAI4IAAoAmxqQQFrLQAAOgADIAIoAhgiACgCpC0gACgCoC1BAXRqQQA7AQAgAi0AAyEBIAIoAhgiACgCmC0hAyAAIAAoAqAtIgBBAWo2AqAtIAAgA2ogAToAACACKAIYIAItAANBAnRqIgAgAC8BlAFBAWo7AZQBIAIgAigCGCgCoC0gAigCGCgCnC1BAWtGNgIMIAIoAgwEQCACKAIYAn8gAigCGCgCXEEATgRAIAIoAhgoAjggAigCGCgCXGoMAQtBAAsgAigCGCgCbCACKAIYKAJca0EAECggAigCGCACKAIYKAJsNgJcIAIoAhgoAgAQHAsgAigCGCIAIAAoAmxBAWo2AmwgAigCGCIAIAAoAnRBAWs2AnQgAigCGCgCACgCEEUEQCACQQA2AhwMBgsMAQsgAigCGEEBNgJoIAIoAhgiACAAKAJsQQFqNgJsIAIoAhgiACAAKAJ0QQFrNgJ0CwsMAQsLIAIoAhgoAmgEQCACIAIoAhgiACgCOCAAKAJsakEBay0AADoAAiACKAIYIgAoAqQtIAAoAqAtQQF0akEAOwEAIAItAAIhASACKAIYIgAoApgtIQMgACAAKAKgLSIAQQFqNgKgLSAAIANqIAE6AAAgAigCGCACLQACQQJ0aiIAIAAvAZQBQQFqOwGUASACIAIoAhgoAqAtIAIoAhgoApwtQQFrRjYCDCACKAIYQQA2AmgLIAIoAhgCfyACKAIYKAJsQQJJBEAgAigCGCgCbAwBC0ECCzYCtC0gAigCFEEERgRAIAIoAhgCfyACKAIYKAJcQQBOBEAgAigCGCgCOCACKAIYKAJcagwBC0EACyACKAIYKAJsIAIoAhgoAlxrQQEQKCACKAIYIAIoAhgoAmw2AlwgAigCGCgCABAcIAIoAhgoAgAoAhBFBEAgAkECNgIcDAILIAJBAzYCHAwBCyACKAIYKAKgLQRAIAIoAhgCfyACKAIYKAJcQQBOBEAgAigCGCgCOCACKAIYKAJcagwBC0EACyACKAIYKAJsIAIoAhgoAlxrQQAQKCACKAIYIAIoAhgoAmw2AlwgAigCGCgCABAcIAIoAhgoAgAoAhBFBEAgAkEANgIcDAILCyACQQE2AhwLIAIoAhwhACACQSBqJAAgAAuVDQECfyMAQSBrIgIkACACIAA2AhggAiABNgIUAkADQAJAIAIoAhgoAnRBhgJJBEAgAigCGBBcAkAgAigCGCgCdEGGAk8NACACKAIUDQAgAkEANgIcDAQLIAIoAhgoAnRFDQELIAJBADYCECACKAIYKAJ0QQNPBEAgAigCGCACKAIYKAJUIAIoAhgoAjggAigCGCgCbEECamotAAAgAigCGCgCSCACKAIYKAJYdHNxNgJIIAIoAhgoAkAgAigCGCgCbCACKAIYKAI0cUEBdGogAigCGCgCRCACKAIYKAJIQQF0ai8BACIAOwEAIAIgAEH//wNxNgIQIAIoAhgoAkQgAigCGCgCSEEBdGogAigCGCgCbDsBAAsCQCACKAIQRQ0AIAIoAhgoAixBhgJrIAIoAhgoAmwgAigCEGtJDQAgAigCGCACKAIQELYBIQAgAigCGCAANgJgCwJAIAIoAhgoAmBBA08EQCACIAIoAhgoAmBBA2s6AAsgAiACKAIYIgAoAmwgACgCcGs7AQggAigCGCIAKAKkLSAAKAKgLUEBdGogAi8BCDsBACACLQALIQEgAigCGCIAKAKYLSEDIAAgACgCoC0iAEEBajYCoC0gACADaiABOgAAIAIgAi8BCEEBazsBCCACKAIYIAItAAtB0N0Aai0AAEECdGpBmAlqIgAgAC8BAEEBajsBACACKAIYQYgTagJ/IAIvAQhBgAJJBEAgAi8BCC0A0FkMAQsgAi8BCEEHdkGAAmotANBZC0ECdGoiACAALwEAQQFqOwEAIAIgAigCGCgCoC0gAigCGCgCnC1BAWtGNgIMIAIoAhgiACAAKAJ0IAIoAhgoAmBrNgJ0AkACQCACKAIYKAJgIAIoAhgoAoABSw0AIAIoAhgoAnRBA0kNACACKAIYIgAgACgCYEEBazYCYANAIAIoAhgiACAAKAJsQQFqNgJsIAIoAhggAigCGCgCVCACKAIYKAI4IAIoAhgoAmxBAmpqLQAAIAIoAhgoAkggAigCGCgCWHRzcTYCSCACKAIYKAJAIAIoAhgoAmwgAigCGCgCNHFBAXRqIAIoAhgoAkQgAigCGCgCSEEBdGovAQAiADsBACACIABB//8DcTYCECACKAIYKAJEIAIoAhgoAkhBAXRqIAIoAhgoAmw7AQAgAigCGCIBKAJgQQFrIQAgASAANgJgIAANAAsgAigCGCIAIAAoAmxBAWo2AmwMAQsgAigCGCIAIAIoAhgoAmAgACgCbGo2AmwgAigCGEEANgJgIAIoAhggAigCGCgCOCACKAIYKAJsai0AADYCSCACKAIYIAIoAhgoAlQgAigCGCgCOCACKAIYKAJsQQFqai0AACACKAIYKAJIIAIoAhgoAlh0c3E2AkgLDAELIAIgAigCGCIAKAI4IAAoAmxqLQAAOgAHIAIoAhgiACgCpC0gACgCoC1BAXRqQQA7AQAgAi0AByEBIAIoAhgiACgCmC0hAyAAIAAoAqAtIgBBAWo2AqAtIAAgA2ogAToAACACKAIYIAItAAdBAnRqIgAgAC8BlAFBAWo7AZQBIAIgAigCGCgCoC0gAigCGCgCnC1BAWtGNgIMIAIoAhgiACAAKAJ0QQFrNgJ0IAIoAhgiACAAKAJsQQFqNgJsCyACKAIMBEAgAigCGAJ/IAIoAhgoAlxBAE4EQCACKAIYKAI4IAIoAhgoAlxqDAELQQALIAIoAhgoAmwgAigCGCgCXGtBABAoIAIoAhggAigCGCgCbDYCXCACKAIYKAIAEBwgAigCGCgCACgCEEUEQCACQQA2AhwMBAsLDAELCyACKAIYAn8gAigCGCgCbEECSQRAIAIoAhgoAmwMAQtBAgs2ArQtIAIoAhRBBEYEQCACKAIYAn8gAigCGCgCXEEATgRAIAIoAhgoAjggAigCGCgCXGoMAQtBAAsgAigCGCgCbCACKAIYKAJca0EBECggAigCGCACKAIYKAJsNgJcIAIoAhgoAgAQHCACKAIYKAIAKAIQRQRAIAJBAjYCHAwCCyACQQM2AhwMAQsgAigCGCgCoC0EQCACKAIYAn8gAigCGCgCXEEATgRAIAIoAhgoAjggAigCGCgCXGoMAQtBAAsgAigCGCgCbCACKAIYKAJca0EAECggAigCGCACKAIYKAJsNgJcIAIoAhgoAgAQHCACKAIYKAIAKAIQRQRAIAJBADYCHAwCCwsgAkEBNgIcCyACKAIcIQAgAkEgaiQAIAALBwAgAC8BMAspAQF/IwBBEGsiAiQAIAIgADYCDCACIAE2AgggAigCCBAVIAJBEGokAAs6AQF/IwBBEGsiAyQAIAMgADYCDCADIAE2AgggAyACNgIEIAMoAgggAygCBGwQGCEAIANBEGokACAAC84FAQF/IwBB0ABrIgUkACAFIAA2AkQgBSABNgJAIAUgAjYCPCAFIAM3AzAgBSAENgIsIAUgBSgCQDYCKAJAAkACQAJAAkACQAJAAkACQCAFKAIsDg8AAQIDBQYHBwcHBwcHBwQHCwJ/IAUoAkQhASAFKAIoIQIjAEHgAGsiACQAIAAgATYCWCAAIAI2AlQgACAAKAJYIABByABqQgwQKyIDNwMIAkAgA0IAUwRAIAAoAlQgACgCWBAXIABBfzYCXAwBCyAAKQMIQgxSBEAgACgCVEERQQAQFCAAQX82AlwMAQsgACgCVCAAQcgAaiAAQcgAakIMQQAQfCAAKAJYIABBEGoQOUEASARAIABBADYCXAwBCyAAKAI4IABBBmogAEEEahCNAQJAIAAtAFMgACgCPEEYdkYNACAALQBTIAAvAQZBCHZGDQAgACgCVEEbQQAQFCAAQX82AlwMAQsgAEEANgJcCyAAKAJcIQEgAEHgAGokACABQQBICwRAIAVCfzcDSAwICyAFQgA3A0gMBwsgBSAFKAJEIAUoAjwgBSkDMBArIgM3AyAgA0IAUwRAIAUoAiggBSgCRBAXIAVCfzcDSAwHCyAFKAJAIAUoAjwgBSgCPCAFKQMgQQAQfCAFIAUpAyA3A0gMBgsgBUIANwNIDAULIAUgBSgCPDYCHCAFKAIcQQA7ATIgBSgCHCIAIAApAwBCgAGENwMAIAUoAhwpAwBCCINCAFIEQCAFKAIcIgAgACkDIEIMfTcDIAsgBUIANwNIDAQLIAVBfzYCFCAFQQU2AhAgBUEENgIMIAVBAzYCCCAFQQI2AgQgBUEBNgIAIAVBACAFEDQ3A0gMAwsgBSAFKAIoIAUoAjwgBSkDMBBDNwNIDAILIAUoAigQvwEgBUIANwNIDAELIAUoAihBEkEAEBQgBUJ/NwNICyAFKQNIIQMgBUHQAGokACADC+4CAQF/IwBBIGsiBSQAIAUgADYCGCAFIAE2AhQgBSACOwESIAUgAzYCDCAFIAQ2AggCQAJAAkAgBSgCCEUNACAFKAIURQ0AIAUvARJBAUYNAQsgBSgCGEEIakESQQAQFCAFQQA2AhwMAQsgBSgCDEEBcQRAIAUoAhhBCGpBGEEAEBQgBUEANgIcDAELIAVBGBAYIgA2AgQgAEUEQCAFKAIYQQhqQQ5BABAUIAVBADYCHAwBCyMAQRBrIgAgBSgCBDYCDCAAKAIMQQA2AgAgACgCDEEANgIEIAAoAgxBADYCCCAFKAIEQfis0ZEBNgIMIAUoAgRBic+VmgI2AhAgBSgCBEGQ8dmiAzYCFCAFKAIEQQAgBSgCCCAFKAIIEC6tQQEQfCAFIAUoAhggBSgCFEEDIAUoAgQQYSIANgIAIABFBEAgBSgCBBC/ASAFQQA2AhwMAQsgBSAFKAIANgIcCyAFKAIcIQAgBUEgaiQAIAALBwAgACgCIAu9GAECfyMAQfAAayIEJAAgBCAANgJkIAQgATYCYCAEIAI3A1ggBCADNgJUIAQgBCgCZDYCUAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAgBCgCVA4UBgcCDAQFCg8AAwkRCxAOCBIBEg0SC0EAQgBBACAEKAJQEEwhACAEKAJQIAA2AhQgAEUEQCAEQn83A2gMEwsgBCgCUCgCFEIANwM4IAQoAlAoAhRCADcDQCAEQgA3A2gMEgsgBCgCUCgCECEBIAQpA1ghAiAEKAJQIQMjAEFAaiIAJAAgACABNgI4IAAgAjcDMCAAIAM2AiwCQCAAKQMwUARAIABBAEIAQQEgACgCLBBMNgI8DAELIAApAzAgACgCOCkDMFYEQCAAKAIsQRJBABAUIABBADYCPAwBCyAAKAI4KAIoBEAgACgCLEEdQQAQFCAAQQA2AjwMAQsgACAAKAI4IAApAzAQwAE3AyAgACAAKQMwIAAoAjgoAgQgACkDIKdBA3RqKQMAfTcDGCAAKQMYUARAIAAgACkDIEIBfTcDICAAIAAoAjgoAgAgACkDIKdBBHRqKQMINwMYCyAAIAAoAjgoAgAgACkDIKdBBHRqKQMIIAApAxh9NwMQIAApAxAgACkDMFYEQCAAKAIsQRxBABAUIABBADYCPAwBCyAAIAAoAjgoAgAgACkDIEIBfEEAIAAoAiwQTCIBNgIMIAFFBEAgAEEANgI8DAELIAAoAgwoAgAgACgCDCkDCEIBfadBBHRqIAApAxg3AwggACgCDCgCBCAAKAIMKQMIp0EDdGogACkDMDcDACAAKAIMIAApAzA3AzAgACgCDAJ+IAAoAjgpAxggACgCDCkDCEIBfVQEQCAAKAI4KQMYDAELIAAoAgwpAwhCAX0LNwMYIAAoAjggACgCDDYCKCAAKAIMIAAoAjg2AiggACgCOCAAKAIMKQMINwMgIAAoAgwgACkDIEIBfDcDICAAIAAoAgw2AjwLIAAoAjwhASAAQUBrJAAgASEAIAQoAlAgADYCFCAARQRAIARCfzcDaAwSCyAEKAJQKAIUIAQpA1g3AzggBCgCUCgCFCAEKAJQKAIUKQMINwNAIARCADcDaAwRCyAEQgA3A2gMEAsgBCgCUCgCEBAyIAQoAlAgBCgCUCgCFDYCECAEKAJQQQA2AhQgBEIANwNoDA8LIAQgBCgCUCAEKAJgIAQpA1gQQzcDaAwOCyAEKAJQKAIQEDIgBCgCUCgCFBAyIAQoAlAQFSAEQgA3A2gMDQsgBCgCUCgCEEIANwM4IAQoAlAoAhBCADcDQCAEQgA3A2gMDAsgBCkDWEL///////////8AVgRAIAQoAlBBEkEAEBQgBEJ/NwNoDAwLIAQoAlAoAhAhASAEKAJgIQMgBCkDWCECIwBBQGoiACQAIAAgATYCNCAAIAM2AjAgACACNwMoIAACfiAAKQMoIAAoAjQpAzAgACgCNCkDOH1UBEAgACkDKAwBCyAAKAI0KQMwIAAoAjQpAzh9CzcDKAJAIAApAyhQBEAgAEIANwM4DAELIAApAyhC////////////AFYEQCAAQn83AzgMAQsgACAAKAI0KQNANwMYIAAgACgCNCkDOCAAKAI0KAIEIAApAxinQQN0aikDAH03AxAgAEIANwMgA0AgACkDICAAKQMoVARAIAACfiAAKQMoIAApAyB9IAAoAjQoAgAgACkDGKdBBHRqKQMIIAApAxB9VARAIAApAyggACkDIH0MAQsgACgCNCgCACAAKQMYp0EEdGopAwggACkDEH0LNwMIIAAoAjAgACkDIKdqIAAoAjQoAgAgACkDGKdBBHRqKAIAIAApAxCnaiAAKQMIpxAZGiAAKQMIIAAoAjQoAgAgACkDGKdBBHRqKQMIIAApAxB9UQRAIAAgACkDGEIBfDcDGAsgACAAKQMIIAApAyB8NwMgIABCADcDEAwBCwsgACgCNCIBIAApAyAgASkDOHw3AzggACgCNCAAKQMYNwNAIAAgACkDIDcDOAsgACkDOCECIABBQGskACAEIAI3A2gMCwsgBEEAQgBBACAEKAJQEEw2AkwgBCgCTEUEQCAEQn83A2gMCwsgBCgCUCgCEBAyIAQoAlAgBCgCTDYCECAEQgA3A2gMCgsgBCgCUCgCFBAyIAQoAlBBADYCFCAEQgA3A2gMCQsgBCAEKAJQKAIQIAQoAmAgBCkDWCAEKAJQEMEBrDcDaAwICyAEIAQoAlAoAhQgBCgCYCAEKQNYIAQoAlAQwQGsNwNoDAcLIAQpA1hCOFQEQCAEKAJQQRJBABAUIARCfzcDaAwHCyAEIAQoAmA2AkggBCgCSBA7IAQoAkggBCgCUCgCDDYCKCAEKAJIIAQoAlAoAhApAzA3AxggBCgCSCAEKAJIKQMYNwMgIAQoAkhBADsBMCAEKAJIQQA7ATIgBCgCSELcATcDACAEQjg3A2gMBgsgBCgCUCAEKAJgKAIANgIMIARCADcDaAwFCyAEQX82AkAgBEETNgI8IARBCzYCOCAEQQ02AjQgBEEMNgIwIARBCjYCLCAEQQ82AiggBEEJNgIkIARBETYCICAEQQg2AhwgBEEHNgIYIARBBjYCFCAEQQU2AhAgBEEENgIMIARBAzYCCCAEQQI2AgQgBEEBNgIAIARBACAEEDQ3A2gMBAsgBCgCUCgCECkDOEL///////////8AVgRAIAQoAlBBHkE9EBQgBEJ/NwNoDAQLIAQgBCgCUCgCECkDODcDaAwDCyAEKAJQKAIUKQM4Qv///////////wBWBEAgBCgCUEEeQT0QFCAEQn83A2gMAwsgBCAEKAJQKAIUKQM4NwNoDAILIAQpA1hC////////////AFYEQCAEKAJQQRJBABAUIARCfzcDaAwCCyAEKAJQKAIUIQEgBCgCYCEDIAQpA1ghAiAEKAJQIQUjAEHgAGsiACQAIAAgATYCVCAAIAM2AlAgACACNwNIIAAgBTYCRAJAIAApA0ggACgCVCkDOCAAKQNIfEL//wN8VgRAIAAoAkRBEkEAEBQgAEJ/NwNYDAELIAAgACgCVCgCBCAAKAJUKQMIp0EDdGopAwA3AyAgACkDICAAKAJUKQM4IAApA0h8VARAIAAgACgCVCkDCCAAKQNIIAApAyAgACgCVCkDOH19Qv//A3xCEIh8NwMYIAApAxggACgCVCkDEFYEQCAAIAAoAlQpAxA3AxAgACkDEFAEQCAAQhA3AxALA0AgACkDECAAKQMYVARAIAAgACkDEEIBhjcDEAwBCwsgACgCVCAAKQMQIAAoAkQQwgFBAXFFBEAgACgCREEOQQAQFCAAQn83A1gMAwsLA0AgACgCVCkDCCAAKQMYVARAQYCABBAYIQEgACgCVCgCACAAKAJUKQMIp0EEdGogATYCACABBEAgACgCVCgCACAAKAJUKQMIp0EEdGpCgIAENwMIIAAoAlQiASABKQMIQgF8NwMIIAAgACkDIEKAgAR8NwMgIAAoAlQoAgQgACgCVCkDCKdBA3RqIAApAyA3AwAMAgUgACgCREEOQQAQFCAAQn83A1gMBAsACwsLIAAgACgCVCkDQDcDMCAAIAAoAlQpAzggACgCVCgCBCAAKQMwp0EDdGopAwB9NwMoIABCADcDOANAIAApAzggACkDSFQEQCAAAn4gACkDSCAAKQM4fSAAKAJUKAIAIAApAzCnQQR0aikDCCAAKQMofVQEQCAAKQNIIAApAzh9DAELIAAoAlQoAgAgACkDMKdBBHRqKQMIIAApAyh9CzcDCCAAKAJUKAIAIAApAzCnQQR0aigCACAAKQMop2ogACgCUCAAKQM4p2ogACkDCKcQGRogACkDCCAAKAJUKAIAIAApAzCnQQR0aikDCCAAKQMofVEEQCAAIAApAzBCAXw3AzALIAAgACkDCCAAKQM4fDcDOCAAQgA3AygMAQsLIAAoAlQiASAAKQM4IAEpAzh8NwM4IAAoAlQgACkDMDcDQCAAKAJUKQM4IAAoAlQpAzBWBEAgACgCVCAAKAJUKQM4NwMwCyAAIAApAzg3A1gLIAApA1ghAiAAQeAAaiQAIAQgAjcDaAwBCyAEKAJQQRxBABAUIARCfzcDaAsgBCkDaCECIARB8ABqJAAgAgsHACAAKAIACxgAQaibAUIANwIAQbCbAUEANgIAQaibAQuGAQIEfwF+IwBBEGsiASQAAkAgACkDMFAEQAwBCwNAAkAgACAFQQAgAUEPaiABQQhqEIoBIgRBf0YNACABLQAPQQNHDQAgAiABKAIIQYCAgIB/cUGAgICAekZqIQILQX8hAyAEQX9GDQEgAiEDIAVCAXwiBSAAKQMwVA0ACwsgAUEQaiQAIAMLC4GNASMAQYAIC4EMaW5zdWZmaWNpZW50IG1lbW9yeQBuZWVkIGRpY3Rpb25hcnkALSsgICAwWDB4AC0wWCswWCAwWC0weCsweCAweABaaXAgYXJjaGl2ZSBpbmNvbnNpc3RlbnQASW52YWxpZCBhcmd1bWVudABpbnZhbGlkIGxpdGVyYWwvbGVuZ3RocyBzZXQAaW52YWxpZCBjb2RlIGxlbmd0aHMgc2V0AHVua25vd24gaGVhZGVyIGZsYWdzIHNldABpbnZhbGlkIGRpc3RhbmNlcyBzZXQAaW52YWxpZCBiaXQgbGVuZ3RoIHJlcGVhdABGaWxlIGFscmVhZHkgZXhpc3RzAHRvbyBtYW55IGxlbmd0aCBvciBkaXN0YW5jZSBzeW1ib2xzAGludmFsaWQgc3RvcmVkIGJsb2NrIGxlbmd0aHMAJXMlcyVzAGJ1ZmZlciBlcnJvcgBObyBlcnJvcgBzdHJlYW0gZXJyb3IAVGVsbCBlcnJvcgBJbnRlcm5hbCBlcnJvcgBTZWVrIGVycm9yAFdyaXRlIGVycm9yAGZpbGUgZXJyb3IAUmVhZCBlcnJvcgBabGliIGVycm9yAGRhdGEgZXJyb3IAQ1JDIGVycm9yAGluY29tcGF0aWJsZSB2ZXJzaW9uAG5hbgAvZGV2L3VyYW5kb20AaW52YWxpZCBjb2RlIC0tIG1pc3NpbmcgZW5kLW9mLWJsb2NrAGluY29ycmVjdCBoZWFkZXIgY2hlY2sAaW5jb3JyZWN0IGxlbmd0aCBjaGVjawBpbmNvcnJlY3QgZGF0YSBjaGVjawBpbnZhbGlkIGRpc3RhbmNlIHRvbyBmYXIgYmFjawBoZWFkZXIgY3JjIG1pc21hdGNoAGluZgBpbnZhbGlkIHdpbmRvdyBzaXplAFJlYWQtb25seSBhcmNoaXZlAE5vdCBhIHppcCBhcmNoaXZlAFJlc291cmNlIHN0aWxsIGluIHVzZQBNYWxsb2MgZmFpbHVyZQBpbnZhbGlkIGJsb2NrIHR5cGUARmFpbHVyZSB0byBjcmVhdGUgdGVtcG9yYXJ5IGZpbGUAQ2FuJ3Qgb3BlbiBmaWxlAE5vIHN1Y2ggZmlsZQBQcmVtYXR1cmUgZW5kIG9mIGZpbGUAQ2FuJ3QgcmVtb3ZlIGZpbGUAaW52YWxpZCBsaXRlcmFsL2xlbmd0aCBjb2RlAGludmFsaWQgZGlzdGFuY2UgY29kZQB1bmtub3duIGNvbXByZXNzaW9uIG1ldGhvZABzdHJlYW0gZW5kAENvbXByZXNzZWQgZGF0YSBpbnZhbGlkAE11bHRpLWRpc2sgemlwIGFyY2hpdmVzIG5vdCBzdXBwb3J0ZWQAT3BlcmF0aW9uIG5vdCBzdXBwb3J0ZWQARW5jcnlwdGlvbiBtZXRob2Qgbm90IHN1cHBvcnRlZABDb21wcmVzc2lvbiBtZXRob2Qgbm90IHN1cHBvcnRlZABFbnRyeSBoYXMgYmVlbiBkZWxldGVkAENvbnRhaW5pbmcgemlwIGFyY2hpdmUgd2FzIGNsb3NlZABDbG9zaW5nIHppcCBhcmNoaXZlIGZhaWxlZABSZW5hbWluZyB0ZW1wb3JhcnkgZmlsZSBmYWlsZWQARW50cnkgaGFzIGJlZW4gY2hhbmdlZABObyBwYXNzd29yZCBwcm92aWRlZABXcm9uZyBwYXNzd29yZCBwcm92aWRlZABVbmtub3duIGVycm9yICVkAHJiAHIrYgByd2EAJXMuWFhYWFhYAE5BTgBJTkYAQUUAMS4yLjExAC9wcm9jL3NlbGYvZmQvAC4AKG51bGwpADogAFBLBgcAUEsGBgBQSwUGAFBLAwQAUEsBAgAAAAAAAFIFAADZBwAArAgAAJEIAACCBQAApAUAAI0FAADFBQAAbwgAADQHAADpBAAAJAcAAAMHAACvBQAA4QYAAMsIAAA3CAAAQQcAAFoEAAC5BgAAcwUAAEEEAABXBwAAWAgAABcIAACnBgAA4ggAAPcIAAD/BwAAywYAAGgFAADBBwAAIABBmBQLEQEAAAABAAAAAQAAAAEAAAABAEG8FAsJAQAAAAEAAAACAEHoFAsBAQBBiBULAQEAQaIVC6REOiY7JmUmZiZjJmAmIiDYJcsl2SVCJkAmaiZrJjwmuiXEJZUhPCC2AKcArCWoIZEhkyGSIZAhHyKUIbIlvCUgACEAIgAjACQAJQAmACcAKAApACoAKwAsAC0ALgAvADAAMQAyADMANAA1ADYANwA4ADkAOgA7ADwAPQA+AD8AQABBAEIAQwBEAEUARgBHAEgASQBKAEsATABNAE4ATwBQAFEAUgBTAFQAVQBWAFcAWABZAFoAWwBcAF0AXgBfAGAAYQBiAGMAZABlAGYAZwBoAGkAagBrAGwAbQBuAG8AcABxAHIAcwB0AHUAdgB3AHgAeQB6AHsAfAB9AH4AAiPHAPwA6QDiAOQA4ADlAOcA6gDrAOgA7wDuAOwAxADFAMkA5gDGAPQA9gDyAPsA+QD/ANYA3ACiAKMApQCnIJIB4QDtAPMA+gDxANEAqgC6AL8AECOsAL0AvAChAKsAuwCRJZIlkyUCJSQlYSViJVYlVSVjJVElVyVdJVwlWyUQJRQlNCUsJRwlACU8JV4lXyVaJVQlaSVmJWAlUCVsJWclaCVkJWUlWSVYJVIlUyVrJWolGCUMJYglhCWMJZAlgCWxA98AkwPAA6MDwwO1AMQDpgOYA6kDtAMeIsYDtQMpImEisQBlImQiICMhI/cASCKwABkitwAaIn8gsgCgJaAAAAAAAJYwB3csYQ7uulEJmRnEbQeP9GpwNaVj6aOVZJ4yiNsOpLjceR7p1eCI2dKXK0y2Cb18sX4HLbjnkR2/kGQQtx3yILBqSHG5895BvoR91Noa6+TdbVG11PTHhdODVphsE8Coa2R6+WL97Mllik9cARTZbAZjYz0P+vUNCI3IIG47XhBpTORBYNVycWei0eQDPEfUBEv9hQ3Sa7UKpfqotTVsmLJC1sm720D5vKzjbNgydVzfRc8N1txZPdGrrDDZJjoA3lGAUdfIFmHQv7X0tCEjxLNWmZW6zw+lvbieuAIoCIgFX7LZDMYk6Quxh3xvLxFMaFirHWHBPS1mtpBB3HYGcdsBvCDSmCoQ1e+JhbFxH7W2BqXkv58z1LjooskHeDT5AA+OqAmWGJgO4bsNan8tPW0Il2xkkQFcY+b0UWtrYmFsHNgwZYVOAGLy7ZUGbHulARvB9AiCV8QP9cbZsGVQ6bcS6ri+i3yIufzfHd1iSS3aFfN804xlTNT7WGGyTc5RtTp0ALyj4jC71EGl30rXldg9bcTRpPv01tNq6WlD/NluNEaIZ63QuGDacy0EROUdAzNfTAqqyXwN3TxxBVCqQQInEBALvoYgDMkltWhXs4VvIAnUZrmf5GHODvneXpjJ2SkimNCwtKjXxxc9s1mBDbQuO1y9t61susAgg7jttrO/mgzitgOa0rF0OUfV6q930p0VJtsEgxbccxILY+OEO2SUPmptDahaanoLzw7knf8JkyeuAAqxngd9RJMP8NKjCIdo8gEe/sIGaV1XYvfLZ2WAcTZsGecGa252G9T+4CvTiVp62hDMSt1nb9+5+fnvvo5DvrcX1Y6wYOij1tZ+k9GhxMLYOFLy30/xZ7vRZ1e8pt0GtT9LNrJI2isN2EwbCq/2SgM2YHoEQcPvYN9V32eo745uMXm+aUaMs2HLGoNmvKDSbyU24mhSlXcMzANHC7u5FgIiLyYFVb47usUoC72yklq0KwRqs1yn/9fCMc/QtYue2Swdrt5bsMJkmybyY+yco2p1CpNtAqkGCZw/Ng7rhWcHchNXAAWCSr+VFHq44q4rsXs4G7YMm47Skg2+1eW379x8Id/bC9TS04ZC4tTx+LPdaG6D2h/NFr6BWya59uF3sG93R7cY5loIiHBqD//KOwZmXAsBEf+eZY9prmL40/9rYUXPbBZ44gqg7tIN11SDBE7CswM5YSZnp/cWYNBNR2lJ23duPkpq0a7cWtbZZgvfQPA72DdTrrypxZ673n/Pskfp/7UwHPK9vYrCusowk7NTpqO0JAU20LqTBtfNKVfeVL9n2SMuemazuEphxAIbaF2UK28qN74LtKGODMMb3wVaje8CLQAAAABBMRsZgmI2MsNTLSsExWxkRfR3fYanWlbHlkFPCIrZyEm7wtGK6O/6y9n04wxPtaxNfq61ji2Dns8cmIdREsJKECPZU9Nw9HiSQe9hVdeuLhTmtTfXtZgcloSDBVmYG4IYqQCb2/otsJrLNqldXXfmHGxs/98/QdSeDlrNoiSEleMVn4wgRrKnYXepvqbh6PHn0PPoJIPew2Wyxdqqrl1d659GRCjMa29p/XB2rmsxOe9aKiAsCQcLbTgcEvM2Rt+yB13GcVRw7TBla/T38yq7tsIxonWRHIk0oAeQ+7yfF7qNhA553qklOO+yPP9583O+SOhqfRvFQTwq3lgFT3nwRH5i6YctT8LGHFTbAYoVlEC7Do2D6COmwtk4vw3FoDhM9Lshj6eWCs6WjRMJAMxcSDHXRYti+m7KU+F3VF27uhVsoKPWP42Ilw6WkVCY194RqczH0vrh7JPL+vVc12JyHeZ5a961VECfhE9ZWBIOFhkjFQ/acDgkm0EjPadr/WXmWuZ8JQnLV2Q40E6jrpEB4p+KGCHMpzNg/bwqr+Ekre7QP7QtgxKfbLIJhqskSMnqFVPQKUZ++2h3ZeL2eT8vt0gkNnQbCR01KhIE8rxTS7ONSFJw3mV5Me9+YP7z5ue/wv3+fJHQ1T2gy8z6NoqDuweRmnhUvLE5ZaeoS5iDOwqpmCLJ+rUJiMuuEE9d718ObPRGzT/ZbYwOwnRDElrzAiNB6sFwbMGAQXfYR9c2lwbmLY7FtQClhIQbvBqKQXFbu1pomOh3Q9nZbFoeTy0VX342DJwtGyfdHAA+EgCYuVMxg6CQYq6L0VO1khbF9N1X9O/ElKfC79WW2fbpvAeuqI0ct2veMZwq7yqF7XlryqxIcNNvG134LipG4eE23magB8V/Y1ToVCJl803l87ICpMKpG2eRhDAmoJ8puK7F5Pmf3v06zPPWe/3oz7xrqYD9WrKZPgmfsn84hKuwJBws8RUHNTJGKh5zdzEHtOFwSPXQa1E2g0Z6d7JdY07X+ssP5uHSzLXM+Y2E1+BKEpavCyONtshwoJ2JQbuERl0jAwdsOBrEPxUxhQ4OKEKYT2cDqVR+wPp5VYHLYkwfxTiBXvQjmJ2nDrPclhWqGwBU5VoxT/yZYmLX2FN5zhdP4UlWfvpQlS3Xe9QczGITio0tUruWNJHoux/Q2aAG7PN+Xq3CZUdukUhsL6BTdeg2EjqpBwkjalQkCCtlPxHkeaeWpUi8j2YbkaQnKoq94LzL8qGN0Oti3v3AI+/m2b3hvBT80KcNP4OKJn6ykT+5JNBw+BXLaTtG5kJ6d/1btWtl3PRafsU3CVPudjhI97GuCbjwnxKhM8w/inL9JJMAAAAAN2rCAW7UhANZvkYC3KgJB+vCywayfI0EhRZPBbhREw6PO9EP1oWXDeHvVQxk+RoJU5PYCAotngo9R1wLcKMmHEfJ5B0ed6IfKR1gHqwLLxubYe0awt+rGPW1aRnI8jUS/5j3E6YmsRGRTHMQFFo8FSMw/hR6jrgWTeR6F+BGTTjXLI85jpLJO7n4Czo87kQ/C4SGPlI6wDxlUAI9WBdeNm99nDc2w9o1AakYNIS/VzGz1ZUw6mvTMt0BETOQ5Wskp4+pJf4x7yfJWy0mTE1iI3snoCIimeYgFfMkISi0eCof3rorRmD8KXEKPij0HHEtw3azLJrI9S6tojcvwI2acPfnWHGuWR5zmTPcchwlk3crT1F2cvEXdEWb1XV43Il+T7ZLfxYIDX0hYs98pHSAeZMeQnjKoAR6/crGe7AuvGyHRH5t3vo4b+mQ+m5shrVrW+x3agJSMWg1OPNpCH+vYj8VbWNmqythUcHpYNTXpmXjvWRkugMiZo1p4Gcgy9dIF6EVSU4fU0t5dZFK/GPeT8sJHE6St1pMpd2YTZiaxEav8AZH9k5ARcEkgkREMs1Bc1gPQCrmSUIdjItDUGjxVGcCM1U+vHVXCda3VozA+FO7qjpS4hR8UNV+vlHoOeJa31MgW4btZlmxh6RYNJHrXQP7KVxaRW9ebS+tX4AbNeG3cffg7s+x4tmlc+Ncszzma9n+5zJnuOUFDXrkOEom7w8g5O5WnqLsYfRg7eTiL+jTiO3pijar671caerwuBP9x9LR/J5sl/6pBlX/LBAa+ht62PtCxJ75da5c+EjpAPN/g8LyJj2E8BFXRvGUQQn0oyvL9fqVjffN/0/2YF142Vc3utgOifzaOeM+27z1cd6Ln7Pf0iH13eVLN9zYDGvX72ap1rbY79SBsi3VBKRi0DPOoNFqcObTXRok0hD+XsUnlJzEfiraxklAGMfMVlfC+zyVw6KC08GV6BHAqK9Ny5/Fj8rGe8nI8RELyXQHRMxDbYbNGtPAzy25As5Alq+Rd/xtkC5CK5IZKOmTnD6mlqtUZJfy6iKVxYDglPjHvJ/PrX6elhM4nKF5+p0kb7WYEwV3mUq7MZt90fOaMDWJjQdfS4xe4Q2OaYvPj+ydgIrb90KLgkkEibUjxoiIZJqDvw5YguawHoDR2tyBVMyThGOmUYU6GBeHDXLVhqDQ4qmXuiCozgRmqvlupKt8eOuuSxIprxKsb60lxq2sGIHxpy/rM6Z2VXWkQT+3pcQp+KDzQzqhqv18o52XvqLQc8S15xkGtL6nQLaJzYK3DNvNsjuxD7NiD0mxVWWLsGgi17tfSBW6BvZTuDGckbm0it68g+AcvdpeWr/tNJi+AAAAAGVnvLiLyAmq7q+1EleXYo8y8N433F9rJbk4153vKLTFik8IfWTgvW8BhwHXuL/WSt3YavIzd9/gVhBjWJ9XGVD6MKXoFJ8Q+nH4rELIwHvfrafHZ0MIcnUmb87NcH+tlRUYES37t6Q/ntAYhyfozxpCj3OirCDGsMlHegg+rzKgW8iOGLVnOwrQAIeyaThQLwxf7Jfi8FmFh5flPdGHhmW04DrdWk+Pzz8oM3eGEOTq43dYUg3Y7UBov1H4ofgr8MSfl0gqMCJaT1ee4vZvSX+TCPXHfadA1RjA/G1O0J81K7cjjcUYlp+gfyonGUf9unwgQQKSj/QQ9+hIqD1YFJtYP6gjtpAdMdP3oYlqz3YUD6jKrOEHf76EYMMG0nCgXrcXHOZZuKn0PN8VTIXnwtHggH5pDi/Le2tId8OiDw3Lx2ixcynHBGFMoLjZ9ZhvRJD/0/x+UGbuGzfaVk0nuQ4oQAW2xu+wpKOIDBwasNuBf9dnOZF40iv0H26TA/cmO2aQmoOIPy+R7ViTKVRgRLQxB/gM36hNHrrP8abs35L+ibguRmcXm1QCcCfsu0jwcd4vTMkwgPnbVedFY5ygP2v5x4PTF2g2wXIPinnLN13krlDhXED/VE4lmOj2c4iLrhbvNxb4QIIEnSc+vCQf6SFBeFWZr9fgi8qwXDM7tlntXtHlVbB+UEfVGez/bCE7YglGh9rn6TLIgo6OcNSe7Six+VGQX1bkgjoxWDqDCY+n5m4zHwjBhg1tpjq1pOFAvcGG/AUvKUkXSk71r/N2IjKWEZ6KeL4rmB3ZlyBLyfR4Lq5IwMAB/dKlZkFqHF6W93k5Kk+Xlp9d8vEj5QUZa01gftf1jtFi5+u23l9SjgnCN+m1etlGAGi8IbzQ6jHfiI9WYzBh+dYiBJ5qmr2mvQfYwQG/Nm60rVMJCBWaTnId/ynOpRGGe7d04ccPzdkQkqi+rCpGERk4I3algHVmxtgQAXpg/q7PcpvJc8oi8aRXR5YY76k5rf3MXhFFBu5NdmOJ8c6NJkTc6EH4ZFF5L/k0HpNB2rEmU7/WmuvpxvmzjKFFC2IO8BkHaUyhvlGbPNs2J4Q1mZKWUP4uLpm5VCb83uieEnFdjHcW4TTOLjapq0mKEUXmPwMggYO7dpHg4xP2XFv9WelJmD5V8SEGgmxEYT7Uqs6Lxs+pN344QX/WXSbDbrOJdnzW7srEb9YdWQqxoeHkHhTzgXmoS9dpyxOyDnerXKHCuTnGfgGA/qmc5ZkVJAs2oDZuURyOpxZmhsJx2j4s3m8sSbnTlPCBBAmV5rixe0kNox4usRtIPtJDLVlu+8P22+mmkWdRH6mwzHrODHSUYblm8QYF3gAAAAB3BzCW7g5hLJkJUboHbcQZcGr0j+ljpTWeZJWjDtuIMnncuKTg1ekel9LZiAm2TCt+sXy957gtB5C/HZEdtxBkarAg8vO5cUiEvkHeGtrUfW3d5Ov01LVRg9OFxxNsmFZka6jA/WL5eoplyewUAVxPYwZs2foPPWONCA31O24gyExpEF7VYEHkomdxcjwD5NFLBNRH0g2F/aUKtWs1taj6QrKYbNu7ydasvPlAMths40XfXHXc1g3Pq9E9WSbZMKxR3gA6yNdRgL/QYRYhtPS1VrPEI8+6lZm4vaUPKAK4nl8FiAjGDNmysQvpJC9vfIdYaEwRwWEdq7ZmLT123EGQAdtxBpjSILzv1RAqcbGFiQa2tR+fv+Sl6LjUM3gHyaIPAPk0lgmojuEOmBh/ag27CG09LZFkbJfmY1wBa2tR9BxsYWKFZTDY8mIATmwGle0bAaV7ggj0wfUPxFdlsNnGErfpUIu+uOr8uYh8Yt0d3xXaLUmM03zz+9RMZU2yYVg6tVHOo7wAdNS7MOJK36VBPdiV16TRxG3T1vT7Q2npajRu2fytZ4hG2mC40EQELXMzAx3lqgpMX90NfMlQBXE8JwJBqr4LEBDJDCCGV2i1JSBvhbO5ZtQJzmHkn17e+Q4p2cmYsNCYIsfXqLRZsz0XLrQNgbe9XDvAumyt7biDIJq/s7YDtuIMdLHSmurVRzmd0nevBNsmFXPcFoPjYwsSlGQ7hA1taj56alqo5A7PC5MJ/50KAK4nfQeesfAPk0SHCKPSHgHyaGkGwv73YlddgGVnyxlsNnFuawbn/tQbdonTK+AQ2npaZ91KzPm532+Ovu/5F7e+Q2CwjtXW1qPoodGTfjjYwsRP3/JS0btn8aa8V2c/tQbdSLI2S9gNK9qvChtMNgNK9kEEemDfYO/DqGffVTFuju9Gab55y2GzjLxmgxolb9KgUmjiNswMd5W7C0cDIgIWuVUFJi/Fuju+sr0LKCu0WpJcs2oEwtf/p7XQzzEs2Z6LW96uHZtkwrDsY/ImdWqjnAJtkwqcCQap6w42P3IHZ4UFAFcTlb9KguK4ehR7sSuuDLYbOJLSjpvl1b4NfNzvtwvb3yGG09LU8dTiQmjds/gf2oNugb4Wzfa5JltvsHfhGLdHd4gIWub/D2pwZgY7yhEBC1yPZZ7/+GKuaWFr/9MWbM9FoArieNcN0u5OBINUOQOzwqdnJmHQYBb3SWlHTT5ud9uu0WpK2dZa3EDfC2Y32DvwqbyuU967nsVHss9/MLX/6b298hzKusKKU7OTMCS0o6a60DYFzdcGk1TeVykj2We/s2Z6LsRhSrhdaBsCKm8rlLQLvjfDDI6hWgXfGy0C740AAAAAGRsxQTI2YoIrLVPDZGzFBH139EVWWqeGT0GWx8jZigjRwrtJ+u/oiuP02custU8Mta5+TZ6DLY6HmBzPSsISUVPZIxB49HDTYe9Bki6u11U3teYUHJi11wWDhJaCG5hZmwCpGLAt+tupNsua5nddXf9sbBzUQT/fzVoOnpWEJKKMnxXjp7JGIL6pd2Hx6OGm6PPQ58PegyTaxbJlXV2uqkRGn+tva8wodnD9aTkxa64gKlrvCwcJLBIcOG3fRjbzxl0Hsu1wVHH0a2Uwuyrz96IxwraJHJF1kAegNBefvPsOhI26JaneeTyy7zhz83n/auhIvkHFG31Y3io88HlPBelifkTCTy2H21QcxpQVigGNDrtApiPog7842cI4oMUNIbv0TAqWp48TjZbOXMwACUXXMUhu+mKLd+FTyrq7XVSjoGwViI0/1pGWDpfe15hQx8ypEezh+tL1+suTcmLXXGt55h1AVLXeWU+EnxYOElgPFSMZJDhw2j0jQZtl/WunfOZa5lfLCSVO0DhkAZGuoxiKn+Izp8whKrz9YK0k4a+0P9DunxKDLYYJsmzJSCSr0FMV6vt+RiniZXdoLz959jYkSLcdCRt0BBIqNUtTvPJSSI2zeWXecGB+7zHn5vP+/v3Cv9XQkXzMy6A9g4o2+pqRB7uxvFR4qKdlOTuDmEsimKkKCbX6yRCuy4hf711PRvRsDm3ZP810wg6M81oSQ+pBIwLBbHDB2HdBgJc210eOLeYGpQC1xbwbhIRxQYoaaFq7W0N36JhabNnZFS1PHgw2fl8nGy2cPgAc3bmYABKggzFTi65ikJK1U9Hd9MUWxO/0V+/Cp5T22ZbVrge86bccjaicMd5rhSrvKspree3TcEis+F0bb+FGKi5m3jbhf8UHoFToVGNN82UiArLz5RupwqQwhJFnKZ+gJuTFrrj93p/51vPMOs/o/XuAqWu8mbJa/bKfCT6rhDh/LBwksDUHFfEeKkYyBzF3c0hw4bRRa9D1ekaDNmNdsnfL+tdO0uHmD/nMtczg14SNr5YSSraNIwudoHDIhLtBiQMjXUYaOGwHMRU/xCgODoVnT5hCflSpA1V5+sBMYsuBgTjFH5gj9F6zDqedqhWW3OVUABv8TzFa12Jimc55U9hJ4U8XUPp+VnvXLZVizBzULY2KEzSWu1Ifu+iRBqDZ0F5+8+xHZcKtbEiRbnVToC86EjboIwkHqQgkVGoRP2Urlqd55I+8SKWkkRtmvYoqJ/LLvODr0I2hwP3eYtnm7yMUvOG9DafQ/CaKgz8/kbJ+cNAkuWnLFfhC5kY7W/13etxla7XFflr07lMJN/dIOHa4Ca6xoRKf8Io/zDOTJP1yAAAAAAHCajcDhNRuAka+WQcJqNwGy8LrBI18sgVPFoUOE1G4D9E7jw2XhdYMVe/hCRr5ZAjYk1MKni0KC1xHPRwmo3Ad5MlHH6J3Hh5gHSkbLwusGu1hmxir38IZabX1EjXyyBP3mP8RsSamEHNMkRU8WhQU/jAjFriOehd65E04TUbgOY8s1zvJko46C/i5P0TuPD6GhAs8wDpSPQJQZTZeF1g3nH1vNdrDNjQYqQExV7+EMJXVszLTa+ozEQHdJGvlkCWpj6cn7zH+Ji1bySNiTUwioCd7IOaZIiEk8xUqeLQoK7reHyn8YEYoPgpxLXEc9CyzdsMu9ciaLzeirXCajcBxWOf3cx5ZrnLcM5l3kyUcdlFPK3QX8XJ11ZtFfonceH9Ltk99DQgWfM9iIXmAdKR4Qh6TegSgynvGyv1svC6wbX5Eh284+t5u+pDpa7WGbGp37FtoMVICafM4NWKvfwhjbRU/YSurZmDpwVFlptfUZGS942YiA7pn4GmNSNfLIEkVoRdLUx9OSpF1eU/eY/xOHAnLTFq3kk2Y3aVGxJqYRwbwr0VATvZEgiTBQc0yREAPWHNCSeYqQ4uMHVTxaFBVMwJnV3W8Pla31glT+MCMUjqqu1B8FOJRvn7VWuI56FsgU99ZZu2GWKSHsV3rkTRcKfsDXm9FWl+tL23hNRuA4Pdxt+Kxz+7jc6XZ5jyzXOf+2WvluGcy5HoNBe8mSjju5CAP7KKeVu1g9GHoL+Lk6e2I0+urNorqaVy9/RO48PzR0sf+l2ye/1UGqfoaECz72Hob+Z7EQvhcrnXzAOlI8sKDf/CEPSbxRlcR9AlBlPXLK6P3jZX69k//zdl4XWDYujdX2vyJDts+4znecfW837Ofi931IdLcN0vl12sM2NapZu/U79i21S2ygdBipATRoM4z0+ZwatIkGl3FXv4QxJyUJ8baKn7HGEBJwldWzMOVPPvB04KiwBHolctNr6jKj8WfyMl7xskLEfHMRAd0zYZtQ8/A0xrOArktka+WQJBt/HeSK0Iuk+koGZamPpyXZFSrlSLq8pTggMWfvMf4nn6tz5w4E5ad+nmhmLVvJJl3BRObMbtKmvPRfY2JNTCMS18Hjg3hXo/Pi2mKgJ3si0L324kESYKIxiO1g5pkiIJYDr+AHrDmgdza0YSTzFSFUaZjhxcYOobVcg2p4tCgqCC6l6pmBM6rpG75rut4fK8pEkutb6wSrK3GJafxgRimM+svpHVVdqW3P0Gg+CnEoTpD86N8/aqivpedtcRz0LQGGee2QKe+t4LNibLN2wyzD7E7sUkPYrCLZVW71yJouhVIX7hT9ga5kZwxvN6KtL0c4IO/Wl7avpg07QAAAAC4vGdlqgnIixK1r+6PYpdXN97wMiVrX9yd1zi5xbQo730IT4pvveBk1wGHAUrWv7jyatjd4N93M1hjEFZQGVef6KUw+voQnxRCrPhx33vAyGfHp611cghDzc5vJpWtf3AtERgVP6S3+4cY0J4az+gnonOPQrDGIKwIekfJoDKvPhiOyFsKO2e1socA0C9QOGmX7F8MhVnw4j3ll4dlhofR3TrgtM+PT1p3Myg/6uQQhlJYd+NA7dgN+FG/aPAr+KFIl5/EWiIwKuKeV09/SW/2x/UIk9VAp31t/MAYNZ/QTo0jtyuflhjFJyp/oLr9RxkCQSB8EPSPkqhI6PebFFg9I6g/WDEdkLaJoffTFHbPaqzKqA++fwfhBsNghF6gcNLmHBe39Km4WUwV3zzRwueFaX6A4HvLLw7Dd0hryw0PonOxaMdhBMcp2bigTERvmPX80/+Q7mZQflbaNxsOuSdNtgVAKKSw78YcDIijgduwGjln138r0niRk24f9Dsm9wODmpBmkS8/iCmTWO20RGBUDPgHMR5NqN+m8c+6/pLf7EYuuIlUmxdn7CdwAnHwSLvJTC/e2/mAMGNF51VrP6Cc04PH+cE2aBd5ig9y5F03y1zhUK5OVP9A9uiYJa6LiHMWN+8WBIJA+Lw+J50h6R8kmVV4QYvg168zXLDK7Vm2O1Xl0V5HUH6w/+wZ1WI7IWzah0YJyDLp53COjoIo7Z7UkFH5sYLkVl86WDE6p48Jgx8zbuYNhsEItTqmbb1A4aQF/IbBF0kpL6/1TkoyInbzip4Rlpgrvnggl9kdePTJS8BIri7S/QHAakFmpfeWXhxPKjl5XZ+Wl+Uj8fJNaxkF9dd+YOdi0Y5f3rbrwgmOUnq16TdoAEbZ0LwhvIjfMeowY1aPItb5YZpqngQHvaa9vwHB2K20bjYVCAlTHXJOmqXOKf+3e4YRD8fhdJIQ2c0qrL6oOBkRRoCldiPYxmZ1YHoBEHLPrv7Kc8mbV6TxIu8Ylkf9rTmpRRFezHZN7gbO8Ylj3EQmjWT4Qej5L3lRQZMeNFMmsdrrmta/s/nG6QtFoYwZ8A5ioUxpBzybUb6EJzbblpKZNS4u/lAmVLmZnuje/IxdcRI04RZ3qTYuzhGKSasDP+ZFu4OBIOPgkXZbXPYTSelZ/fFVPphsggYh1D5hRMaLzqp+N6nP1n9BOG7DJl18domzxMru1lkd1m/hobEK8xQe5EuoeYETy2nXq3cOsrnCoVwBfsY5nKn+gCQVmeU2oDYLjhxRboZmFqc+2nHCLG/eLJTTuUkJBIHwsbjmlaMNSXsbsS4eQ9I+SPtuWS3p2/bDUWeRpsywqR90DM56ZrlhlN4FBvEUBAAAtgcAAHoJAACZBQAAWwUAALoFAAAABAAARQUAAM8FAAB6CQBB0dkAC7YQAQIDBAQFBQYGBgYHBwcHCAgICAgICAgJCQkJCQkJCQoKCgoKCgoKCgoKCgoKCgoLCwsLCwsLCwsLCwsLCwsLDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwNDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4ODg4PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PDw8PAAAQERISExMUFBQUFRUVFRYWFhYWFhYWFxcXFxcXFxcYGBgYGBgYGBgYGBgYGBgYGRkZGRkZGRkZGRkZGRkZGRoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxscHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHBwcHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHR0dHQABAgMEBQYHCAgJCQoKCwsMDAwMDQ0NDQ4ODg4PDw8PEBAQEBAQEBARERERERERERISEhISEhISExMTExMTExMUFBQUFBQUFBQUFBQUFBQUFRUVFRUVFRUVFRUVFRUVFRYWFhYWFhYWFhYWFhYWFhYXFxcXFxcXFxcXFxcXFxcXGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgYGBgZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRkZGRoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGhoaGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxsbGxwQMAAAEDUAAAEBAAAeAQAADwAAAJA0AACQNQAAAAAAAB4AAAAPAAAAAAAAABA2AAAAAAAAEwAAAAcAAAAAAAAADAAIAIwACABMAAgAzAAIACwACACsAAgAbAAIAOwACAAcAAgAnAAIAFwACADcAAgAPAAIALwACAB8AAgA/AAIAAIACACCAAgAQgAIAMIACAAiAAgAogAIAGIACADiAAgAEgAIAJIACABSAAgA0gAIADIACACyAAgAcgAIAPIACAAKAAgAigAIAEoACADKAAgAKgAIAKoACABqAAgA6gAIABoACACaAAgAWgAIANoACAA6AAgAugAIAHoACAD6AAgABgAIAIYACABGAAgAxgAIACYACACmAAgAZgAIAOYACAAWAAgAlgAIAFYACADWAAgANgAIALYACAB2AAgA9gAIAA4ACACOAAgATgAIAM4ACAAuAAgArgAIAG4ACADuAAgAHgAIAJ4ACABeAAgA3gAIAD4ACAC+AAgAfgAIAP4ACAABAAgAgQAIAEEACADBAAgAIQAIAKEACABhAAgA4QAIABEACACRAAgAUQAIANEACAAxAAgAsQAIAHEACADxAAgACQAIAIkACABJAAgAyQAIACkACACpAAgAaQAIAOkACAAZAAgAmQAIAFkACADZAAgAOQAIALkACAB5AAgA+QAIAAUACACFAAgARQAIAMUACAAlAAgApQAIAGUACADlAAgAFQAIAJUACABVAAgA1QAIADUACAC1AAgAdQAIAPUACAANAAgAjQAIAE0ACADNAAgALQAIAK0ACABtAAgA7QAIAB0ACACdAAgAXQAIAN0ACAA9AAgAvQAIAH0ACAD9AAgAEwAJABMBCQCTAAkAkwEJAFMACQBTAQkA0wAJANMBCQAzAAkAMwEJALMACQCzAQkAcwAJAHMBCQDzAAkA8wEJAAsACQALAQkAiwAJAIsBCQBLAAkASwEJAMsACQDLAQkAKwAJACsBCQCrAAkAqwEJAGsACQBrAQkA6wAJAOsBCQAbAAkAGwEJAJsACQCbAQkAWwAJAFsBCQDbAAkA2wEJADsACQA7AQkAuwAJALsBCQB7AAkAewEJAPsACQD7AQkABwAJAAcBCQCHAAkAhwEJAEcACQBHAQkAxwAJAMcBCQAnAAkAJwEJAKcACQCnAQkAZwAJAGcBCQDnAAkA5wEJABcACQAXAQkAlwAJAJcBCQBXAAkAVwEJANcACQDXAQkANwAJADcBCQC3AAkAtwEJAHcACQB3AQkA9wAJAPcBCQAPAAkADwEJAI8ACQCPAQkATwAJAE8BCQDPAAkAzwEJAC8ACQAvAQkArwAJAK8BCQBvAAkAbwEJAO8ACQDvAQkAHwAJAB8BCQCfAAkAnwEJAF8ACQBfAQkA3wAJAN8BCQA/AAkAPwEJAL8ACQC/AQkAfwAJAH8BCQD/AAkA/wEJAAAABwBAAAcAIAAHAGAABwAQAAcAUAAHADAABwBwAAcACAAHAEgABwAoAAcAaAAHABgABwBYAAcAOAAHAHgABwAEAAcARAAHACQABwBkAAcAFAAHAFQABwA0AAcAdAAHAAMACACDAAgAQwAIAMMACAAjAAgAowAIAGMACADjAAgAAAAFABAABQAIAAUAGAAFAAQABQAUAAUADAAFABwABQACAAUAEgAFAAoABQAaAAUABgAFABYABQAOAAUAHgAFAAEABQARAAUACQAFABkABQAFAAUAFQAFAA0ABQAdAAUAAwAFABMABQALAAUAGwAFAAcABQAXAAUAQbDqAAtNAQAAAAEAAAABAAAAAQAAAAIAAAACAAAAAgAAAAIAAAADAAAAAwAAAAMAAAADAAAABAAAAAQAAAAEAAAABAAAAAUAAAAFAAAABQAAAAUAQaDrAAtlAQAAAAEAAAACAAAAAgAAAAMAAAADAAAABAAAAAQAAAAFAAAABQAAAAYAAAAGAAAABwAAAAcAAAAIAAAACAAAAAkAAAAJAAAACgAAAAoAAAALAAAACwAAAAwAAAAMAAAADQAAAA0AQdDsAAsjAgAAAAMAAAAHAAAAAAAAABAREgAIBwkGCgULBAwDDQIOAQ8AQYTtAAtpAQAAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAAgAAAAKAAAADAAAAA4AAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAwAAAAOAAAAEAAAABQAAAAYAAAAHAAAACAAAAAoAAAAMAAAADgAEGE7gALegEAAAACAAAAAwAAAAQAAAAGAAAACAAAAAwAAAAQAAAAGAAAACAAAAAwAAAAQAAAAGAAAACAAAAAwAAAAAABAACAAQAAAAIAAAADAAAABAAAAAYAAAAIAAAADAAAABAAAAAYAAAAIAAAADAAAABAAAAAYAAAMS4yLjExAEGI7wALbQcAAAAEAAQACAAEAAgAAAAEAAUAEAAIAAgAAAAEAAYAIAAgAAgAAAAEAAQAEAAQAAkAAAAIABAAIAAgAAkAAAAIABAAgACAAAkAAAAIACAAgAAAAQkAAAAgAIAAAgEABAkAAAAgAAIBAgEAEAkAQYDwAAulAgMABAAFAAYABwAIAAkACgALAA0ADwARABMAFwAbAB8AIwArADMAOwBDAFMAYwBzAIMAowDDAOMAAgEAAAAAAAAQABAAEAAQABAAEAAQABAAEQARABEAEQASABIAEgASABMAEwATABMAFAAUABQAFAAVABUAFQAVABAATQDKAAAAAQACAAMABAAFAAcACQANABEAGQAhADEAQQBhAIEAwQABAYEBAQIBAwEEAQYBCAEMARABGAEgATABQAFgAAAAABAAEAAQABAAEQARABIAEgATABMAFAAUABUAFQAWABYAFwAXABgAGAAZABkAGgAaABsAGwAcABwAHQAdAEAAQAAQABEAEgAAAAgABwAJAAYACgAFAAsABAAMAAMADQACAA4AAQAPAEGw8gALwRFgBwAAAAhQAAAIEAAUCHMAEgcfAAAIcAAACDAAAAnAABAHCgAACGAAAAggAAAJoAAACAAAAAiAAAAIQAAACeAAEAcGAAAIWAAACBgAAAmQABMHOwAACHgAAAg4AAAJ0AARBxEAAAhoAAAIKAAACbAAAAgIAAAIiAAACEgAAAnwABAHBAAACFQAAAgUABUI4wATBysAAAh0AAAINAAACcgAEQcNAAAIZAAACCQAAAmoAAAIBAAACIQAAAhEAAAJ6AAQBwgAAAhcAAAIHAAACZgAFAdTAAAIfAAACDwAAAnYABIHFwAACGwAAAgsAAAJuAAACAwAAAiMAAAITAAACfgAEAcDAAAIUgAACBIAFQijABMHIwAACHIAAAgyAAAJxAARBwsAAAhiAAAIIgAACaQAAAgCAAAIggAACEIAAAnkABAHBwAACFoAAAgaAAAJlAAUB0MAAAh6AAAIOgAACdQAEgcTAAAIagAACCoAAAm0AAAICgAACIoAAAhKAAAJ9AAQBwUAAAhWAAAIFgBACAAAEwczAAAIdgAACDYAAAnMABEHDwAACGYAAAgmAAAJrAAACAYAAAiGAAAIRgAACewAEAcJAAAIXgAACB4AAAmcABQHYwAACH4AAAg+AAAJ3AASBxsAAAhuAAAILgAACbwAAAgOAAAIjgAACE4AAAn8AGAHAAAACFEAAAgRABUIgwASBx8AAAhxAAAIMQAACcIAEAcKAAAIYQAACCEAAAmiAAAIAQAACIEAAAhBAAAJ4gAQBwYAAAhZAAAIGQAACZIAEwc7AAAIeQAACDkAAAnSABEHEQAACGkAAAgpAAAJsgAACAkAAAiJAAAISQAACfIAEAcEAAAIVQAACBUAEAgCARMHKwAACHUAAAg1AAAJygARBw0AAAhlAAAIJQAACaoAAAgFAAAIhQAACEUAAAnqABAHCAAACF0AAAgdAAAJmgAUB1MAAAh9AAAIPQAACdoAEgcXAAAIbQAACC0AAAm6AAAIDQAACI0AAAhNAAAJ+gAQBwMAAAhTAAAIEwAVCMMAEwcjAAAIcwAACDMAAAnGABEHCwAACGMAAAgjAAAJpgAACAMAAAiDAAAIQwAACeYAEAcHAAAIWwAACBsAAAmWABQHQwAACHsAAAg7AAAJ1gASBxMAAAhrAAAIKwAACbYAAAgLAAAIiwAACEsAAAn2ABAHBQAACFcAAAgXAEAIAAATBzMAAAh3AAAINwAACc4AEQcPAAAIZwAACCcAAAmuAAAIBwAACIcAAAhHAAAJ7gAQBwkAAAhfAAAIHwAACZ4AFAdjAAAIfwAACD8AAAneABIHGwAACG8AAAgvAAAJvgAACA8AAAiPAAAITwAACf4AYAcAAAAIUAAACBAAFAhzABIHHwAACHAAAAgwAAAJwQAQBwoAAAhgAAAIIAAACaEAAAgAAAAIgAAACEAAAAnhABAHBgAACFgAAAgYAAAJkQATBzsAAAh4AAAIOAAACdEAEQcRAAAIaAAACCgAAAmxAAAICAAACIgAAAhIAAAJ8QAQBwQAAAhUAAAIFAAVCOMAEwcrAAAIdAAACDQAAAnJABEHDQAACGQAAAgkAAAJqQAACAQAAAiEAAAIRAAACekAEAcIAAAIXAAACBwAAAmZABQHUwAACHwAAAg8AAAJ2QASBxcAAAhsAAAILAAACbkAAAgMAAAIjAAACEwAAAn5ABAHAwAACFIAAAgSABUIowATByMAAAhyAAAIMgAACcUAEQcLAAAIYgAACCIAAAmlAAAIAgAACIIAAAhCAAAJ5QAQBwcAAAhaAAAIGgAACZUAFAdDAAAIegAACDoAAAnVABIHEwAACGoAAAgqAAAJtQAACAoAAAiKAAAISgAACfUAEAcFAAAIVgAACBYAQAgAABMHMwAACHYAAAg2AAAJzQARBw8AAAhmAAAIJgAACa0AAAgGAAAIhgAACEYAAAntABAHCQAACF4AAAgeAAAJnQAUB2MAAAh+AAAIPgAACd0AEgcbAAAIbgAACC4AAAm9AAAIDgAACI4AAAhOAAAJ/QBgBwAAAAhRAAAIEQAVCIMAEgcfAAAIcQAACDEAAAnDABAHCgAACGEAAAghAAAJowAACAEAAAiBAAAIQQAACeMAEAcGAAAIWQAACBkAAAmTABMHOwAACHkAAAg5AAAJ0wARBxEAAAhpAAAIKQAACbMAAAgJAAAIiQAACEkAAAnzABAHBAAACFUAAAgVABAIAgETBysAAAh1AAAINQAACcsAEQcNAAAIZQAACCUAAAmrAAAIBQAACIUAAAhFAAAJ6wAQBwgAAAhdAAAIHQAACZsAFAdTAAAIfQAACD0AAAnbABIHFwAACG0AAAgtAAAJuwAACA0AAAiNAAAITQAACfsAEAcDAAAIUwAACBMAFQjDABMHIwAACHMAAAgzAAAJxwARBwsAAAhjAAAIIwAACacAAAgDAAAIgwAACEMAAAnnABAHBwAACFsAAAgbAAAJlwAUB0MAAAh7AAAIOwAACdcAEgcTAAAIawAACCsAAAm3AAAICwAACIsAAAhLAAAJ9wAQBwUAAAhXAAAIFwBACAAAEwczAAAIdwAACDcAAAnPABEHDwAACGcAAAgnAAAJrwAACAcAAAiHAAAIRwAACe8AEAcJAAAIXwAACB8AAAmfABQHYwAACH8AAAg/AAAJ3wASBxsAAAhvAAAILwAACb8AAAgPAAAIjwAACE8AAAn/ABAFAQAXBQEBEwURABsFARARBQUAGQUBBBUFQQAdBQFAEAUDABgFAQIUBSEAHAUBIBIFCQAaBQEIFgWBAEAFAAAQBQIAFwWBARMFGQAbBQEYEQUHABkFAQYVBWEAHQUBYBAFBAAYBQEDFAUxABwFATASBQ0AGgUBDBYFwQBABQAAEQAKABEREQAAAAAFAAAAAAAACQAAAAALAAAAAAAAAAARAA8KERERAwoHAAEACQsLAAAJBgsAAAsABhEAAAAREREAQYGEAQshCwAAAAAAAAAAEQAKChEREQAKAAACAAkLAAAACQALAAALAEG7hAELAQwAQceEAQsVDAAAAAAMAAAAAAkMAAAAAAAMAAAMAEH1hAELAQ4AQYGFAQsVDQAAAAQNAAAAAAkOAAAAAAAOAAAOAEGvhQELARAAQbuFAQseDwAAAAAPAAAAAAkQAAAAAAAQAAAQAAASAAAAEhISAEHyhQELDhIAAAASEhIAAAAAAAAJAEGjhgELAQsAQa+GAQsVCgAAAAAKAAAAAAkLAAAAAAALAAALAEHdhgELAQwAQemGAQsnDAAAAAAMAAAAAAkMAAAAAAAMAAAMAAAwMTIzNDU2Nzg5QUJDREVGAEG0hwELARkAQduHAQsF//////8AQaCIAQtXGRJEOwI/LEcUPTMwChsGRktFNw9JDo4XA0AdPGkrNh9KLRwBICUpIQgMFRYiLhA4Pgs0MRhkdHV2L0EJfzkRI0MyQomKiwUEJignDSoeNYwHGkiTE5SVAEGAiQELig5JbGxlZ2FsIGJ5dGUgc2VxdWVuY2UARG9tYWluIGVycm9yAFJlc3VsdCBub3QgcmVwcmVzZW50YWJsZQBOb3QgYSB0dHkAUGVybWlzc2lvbiBkZW5pZWQAT3BlcmF0aW9uIG5vdCBwZXJtaXR0ZWQATm8gc3VjaCBmaWxlIG9yIGRpcmVjdG9yeQBObyBzdWNoIHByb2Nlc3MARmlsZSBleGlzdHMAVmFsdWUgdG9vIGxhcmdlIGZvciBkYXRhIHR5cGUATm8gc3BhY2UgbGVmdCBvbiBkZXZpY2UAT3V0IG9mIG1lbW9yeQBSZXNvdXJjZSBidXN5AEludGVycnVwdGVkIHN5c3RlbSBjYWxsAFJlc291cmNlIHRlbXBvcmFyaWx5IHVuYXZhaWxhYmxlAEludmFsaWQgc2VlawBDcm9zcy1kZXZpY2UgbGluawBSZWFkLW9ubHkgZmlsZSBzeXN0ZW0ARGlyZWN0b3J5IG5vdCBlbXB0eQBDb25uZWN0aW9uIHJlc2V0IGJ5IHBlZXIAT3BlcmF0aW9uIHRpbWVkIG91dABDb25uZWN0aW9uIHJlZnVzZWQASG9zdCBpcyBkb3duAEhvc3QgaXMgdW5yZWFjaGFibGUAQWRkcmVzcyBpbiB1c2UAQnJva2VuIHBpcGUASS9PIGVycm9yAE5vIHN1Y2ggZGV2aWNlIG9yIGFkZHJlc3MAQmxvY2sgZGV2aWNlIHJlcXVpcmVkAE5vIHN1Y2ggZGV2aWNlAE5vdCBhIGRpcmVjdG9yeQBJcyBhIGRpcmVjdG9yeQBUZXh0IGZpbGUgYnVzeQBFeGVjIGZvcm1hdCBlcnJvcgBJbnZhbGlkIGFyZ3VtZW50AEFyZ3VtZW50IGxpc3QgdG9vIGxvbmcAU3ltYm9saWMgbGluayBsb29wAEZpbGVuYW1lIHRvbyBsb25nAFRvbyBtYW55IG9wZW4gZmlsZXMgaW4gc3lzdGVtAE5vIGZpbGUgZGVzY3JpcHRvcnMgYXZhaWxhYmxlAEJhZCBmaWxlIGRlc2NyaXB0b3IATm8gY2hpbGQgcHJvY2VzcwBCYWQgYWRkcmVzcwBGaWxlIHRvbyBsYXJnZQBUb28gbWFueSBsaW5rcwBObyBsb2NrcyBhdmFpbGFibGUAUmVzb3VyY2UgZGVhZGxvY2sgd291bGQgb2NjdXIAU3RhdGUgbm90IHJlY292ZXJhYmxlAFByZXZpb3VzIG93bmVyIGRpZWQAT3BlcmF0aW9uIGNhbmNlbGVkAEZ1bmN0aW9uIG5vdCBpbXBsZW1lbnRlZABObyBtZXNzYWdlIG9mIGRlc2lyZWQgdHlwZQBJZGVudGlmaWVyIHJlbW92ZWQARGV2aWNlIG5vdCBhIHN0cmVhbQBObyBkYXRhIGF2YWlsYWJsZQBEZXZpY2UgdGltZW91dABPdXQgb2Ygc3RyZWFtcyByZXNvdXJjZXMATGluayBoYXMgYmVlbiBzZXZlcmVkAFByb3RvY29sIGVycm9yAEJhZCBtZXNzYWdlAEZpbGUgZGVzY3JpcHRvciBpbiBiYWQgc3RhdGUATm90IGEgc29ja2V0AERlc3RpbmF0aW9uIGFkZHJlc3MgcmVxdWlyZWQATWVzc2FnZSB0b28gbGFyZ2UAUHJvdG9jb2wgd3JvbmcgdHlwZSBmb3Igc29ja2V0AFByb3RvY29sIG5vdCBhdmFpbGFibGUAUHJvdG9jb2wgbm90IHN1cHBvcnRlZABTb2NrZXQgdHlwZSBub3Qgc3VwcG9ydGVkAE5vdCBzdXBwb3J0ZWQAUHJvdG9jb2wgZmFtaWx5IG5vdCBzdXBwb3J0ZWQAQWRkcmVzcyBmYW1pbHkgbm90IHN1cHBvcnRlZCBieSBwcm90b2NvbABBZGRyZXNzIG5vdCBhdmFpbGFibGUATmV0d29yayBpcyBkb3duAE5ldHdvcmsgdW5yZWFjaGFibGUAQ29ubmVjdGlvbiByZXNldCBieSBuZXR3b3JrAENvbm5lY3Rpb24gYWJvcnRlZABObyBidWZmZXIgc3BhY2UgYXZhaWxhYmxlAFNvY2tldCBpcyBjb25uZWN0ZWQAU29ja2V0IG5vdCBjb25uZWN0ZWQAQ2Fubm90IHNlbmQgYWZ0ZXIgc29ja2V0IHNodXRkb3duAE9wZXJhdGlvbiBhbHJlYWR5IGluIHByb2dyZXNzAE9wZXJhdGlvbiBpbiBwcm9ncmVzcwBTdGFsZSBmaWxlIGhhbmRsZQBSZW1vdGUgSS9PIGVycm9yAFF1b3RhIGV4Y2VlZGVkAE5vIG1lZGl1bSBmb3VuZABXcm9uZyBtZWRpdW0gdHlwZQBObyBlcnJvciBpbmZvcm1hdGlvbgBBkJcBC1JQUFAACgAAAAsAAAAMAAAADQAAAA4AAAAPAAAAEAAAABEAAAASAAAACwAAAAwAAAANAAAADgAAAA8AAAAQAAAAEQAAAAEAAAAIAAAAlEsAALRLAEGQmQELAgxQAEHImQELCR8AAADkTAAAAwBB5JkBC4wBLfRRWM+MscBG9rXLKTEDxwRbcDC0Xf0geH+LmthZKVBoSImrp1YDbP+3zYg/1He0K6WjcPG65Kj8QYP92W/hinovLXSWBx8NCV4Ddixw90ClLKdvV0GoqnTfoFhkA0rHxDxTrq9fGAQVseNtKIarDKS/Q/DpUIE5VxZSN/////////////////////8=\";Fu(So)||(So=h(So));function Nu(d){try{if(d==So&&W)return new Uint8Array(W);var E=ba(d);if(E)return E;if(m)return m(d);throw\"sync fetching of the wasm failed: you can preload it to Module['wasmBinary'] manually, or emcc.py will do that for you when generating HTML (but not JS)\"}catch(I){Sr(I)}}function Qh(d,E){var I,D,O;try{O=Nu(d),D=new WebAssembly.Module(O),I=new WebAssembly.Instance(D,E)}catch(ie){var V=ie.toString();throw x(\"failed to compile wasm module: \"+V),(V.includes(\"imported Memory\")||V.includes(\"memory import\"))&&x(\"Memory size incompatibility issues may be due to changing INITIAL_MEMORY at runtime to something too large. Use ALLOW_MEMORY_GROWTH to allow any size memory (and also make sure not to set INITIAL_MEMORY at runtime to something smaller than it was at compile time).\"),ie}return[I,D]}function vh(){var d={a:Qa};function E(O,V){var ie=O.exports;r.asm=ie,A=r.asm.u,mi(A.buffer),Qr=r.asm.pa,SA(r.asm.v),PA(\"wasm-instantiate\")}if(xA(\"wasm-instantiate\"),r.instantiateWasm)try{var I=r.instantiateWasm(d,E);return I}catch(O){return x(\"Module.instantiateWasm callback failed with error: \"+O),!1}var D=Qh(So,d);return E(D[0]),r.asm}var oe,Oi;function ko(d){for(;d.length>0;){var E=d.shift();if(typeof E==\"function\"){E(r);continue}var I=E.func;typeof I==\"number\"?E.arg===void 0?Qr.get(I)():Qr.get(I)(E.arg):I(E.arg===void 0?null:E.arg)}}function jn(d,E){var I=new Date(fe[d>>2]*1e3);fe[E>>2]=I.getUTCSeconds(),fe[E+4>>2]=I.getUTCMinutes(),fe[E+8>>2]=I.getUTCHours(),fe[E+12>>2]=I.getUTCDate(),fe[E+16>>2]=I.getUTCMonth(),fe[E+20>>2]=I.getUTCFullYear()-1900,fe[E+24>>2]=I.getUTCDay(),fe[E+36>>2]=0,fe[E+32>>2]=0;var D=Date.UTC(I.getUTCFullYear(),0,1,0,0,0,0),O=(I.getTime()-D)/(1e3*60*60*24)|0;return fe[E+28>>2]=O,jn.GMTString||(jn.GMTString=Fe(\"GMT\")),fe[E+40>>2]=jn.GMTString,E}function Lu(d,E){return jn(d,E)}var vt={splitPath:function(d){var E=/^(\\/?|)([\\s\\S]*?)((?:\\.{1,2}|[^\\/]+?|)(\\.[^.\\/]*|))(?:[\\/]*)$/;return E.exec(d).slice(1)},normalizeArray:function(d,E){for(var I=0,D=d.length-1;D>=0;D--){var O=d[D];O===\".\"?d.splice(D,1):O===\"..\"?(d.splice(D,1),I++):I&&(d.splice(D,1),I--)}if(E)for(;I;I--)d.unshift(\"..\");return d},normalize:function(d){var E=d.charAt(0)===\"/\",I=d.substr(-1)===\"/\";return d=vt.normalizeArray(d.split(\"/\").filter(function(D){return!!D}),!E).join(\"/\"),!d&&!E&&(d=\".\"),d&&I&&(d+=\"/\"),(E?\"/\":\"\")+d},dirname:function(d){var E=vt.splitPath(d),I=E[0],D=E[1];return!I&&!D?\".\":(D&&(D=D.substr(0,D.length-1)),I+D)},basename:function(d){if(d===\"/\")return\"/\";d=vt.normalize(d),d=d.replace(/\\/$/,\"\");var E=d.lastIndexOf(\"/\");return E===-1?d:d.substr(E+1)},extname:function(d){return vt.splitPath(d)[3]},join:function(){var d=Array.prototype.slice.call(arguments,0);return vt.normalize(d.join(\"/\"))},join2:function(d,E){return vt.normalize(d+\"/\"+E)}};function Gl(){if(typeof crypto==\"object\"&&typeof crypto.getRandomValues==\"function\"){var d=new Uint8Array(1);return function(){return crypto.getRandomValues(d),d[0]}}else if(g)try{var E=require(\"crypto\");return function(){return E.randomBytes(1)[0]}}catch(I){}return function(){Sr(\"randomDevice\")}}var Gn={resolve:function(){for(var d=\"\",E=!1,I=arguments.length-1;I>=-1&&!E;I--){var D=I>=0?arguments[I]:v.cwd();if(typeof D!=\"string\")throw new TypeError(\"Arguments to path.resolve must be strings\");if(!D)return\"\";d=D+\"/\"+d,E=D.charAt(0)===\"/\"}return d=vt.normalizeArray(d.split(\"/\").filter(function(O){return!!O}),!E).join(\"/\"),(E?\"/\":\"\")+d||\".\"},relative:function(d,E){d=Gn.resolve(d).substr(1),E=Gn.resolve(E).substr(1);function I(_e){for(var ot=0;ot<_e.length&&_e[ot]===\"\";ot++);for(var wt=_e.length-1;wt>=0&&_e[wt]===\"\";wt--);return ot>wt?[]:_e.slice(ot,wt-ot+1)}for(var D=I(d.split(\"/\")),O=I(E.split(\"/\")),V=Math.min(D.length,O.length),ie=V,Be=0;Be<V;Be++)if(D[Be]!==O[Be]){ie=Be;break}for(var Ce=[],Be=ie;Be<D.length;Be++)Ce.push(\"..\");return Ce=Ce.concat(O.slice(ie)),Ce.join(\"/\")}},fs={ttys:[],init:function(){},shutdown:function(){},register:function(d,E){fs.ttys[d]={input:[],output:[],ops:E},v.registerDevice(d,fs.stream_ops)},stream_ops:{open:function(d){var E=fs.ttys[d.node.rdev];if(!E)throw new v.ErrnoError(43);d.tty=E,d.seekable=!1},close:function(d){d.tty.ops.flush(d.tty)},flush:function(d){d.tty.ops.flush(d.tty)},read:function(d,E,I,D,O){if(!d.tty||!d.tty.ops.get_char)throw new v.ErrnoError(60);for(var V=0,ie=0;ie<D;ie++){var Be;try{Be=d.tty.ops.get_char(d.tty)}catch(Ce){throw new v.ErrnoError(29)}if(Be===void 0&&V===0)throw new v.ErrnoError(6);if(Be==null)break;V++,E[I+ie]=Be}return V&&(d.node.timestamp=Date.now()),V},write:function(d,E,I,D,O){if(!d.tty||!d.tty.ops.put_char)throw new v.ErrnoError(60);try{for(var V=0;V<D;V++)d.tty.ops.put_char(d.tty,E[I+V])}catch(ie){throw new v.ErrnoError(29)}return D&&(d.node.timestamp=Date.now()),V}},default_tty_ops:{get_char:function(d){if(!d.input.length){var E=null;if(g){var I=256,D=Buffer.alloc?Buffer.alloc(I):new Buffer(I),O=0;try{O=y.readSync(process.stdin.fd,D,0,I,null)}catch(V){if(V.toString().includes(\"EOF\"))O=0;else throw V}O>0?E=D.slice(0,O).toString(\"utf-8\"):E=null}else typeof window!=\"undefined\"&&typeof window.prompt==\"function\"?(E=window.prompt(\"Input: \"),E!==null&&(E+=`\n`)):typeof readline==\"function\"&&(E=readline(),E!==null&&(E+=`\n`));if(!E)return null;d.input=RA(E,!0)}return d.input.shift()},put_char:function(d,E){E===null||E===10?(S(qe(d.output,0)),d.output=[]):E!=0&&d.output.push(E)},flush:function(d){d.output&&d.output.length>0&&(S(qe(d.output,0)),d.output=[])}},default_tty1_ops:{put_char:function(d,E){E===null||E===10?(x(qe(d.output,0)),d.output=[]):E!=0&&d.output.push(E)},flush:function(d){d.output&&d.output.length>0&&(x(qe(d.output,0)),d.output=[])}}};function hs(d){for(var E=Y(d,65536),I=Et(E);d<E;)pe[I+d++]=0;return I}var pt={ops_table:null,mount:function(d){return pt.createNode(null,\"/\",16384|511,0)},createNode:function(d,E,I,D){if(v.isBlkdev(I)||v.isFIFO(I))throw new v.ErrnoError(63);pt.ops_table||(pt.ops_table={dir:{node:{getattr:pt.node_ops.getattr,setattr:pt.node_ops.setattr,lookup:pt.node_ops.lookup,mknod:pt.node_ops.mknod,rename:pt.node_ops.rename,unlink:pt.node_ops.unlink,rmdir:pt.node_ops.rmdir,readdir:pt.node_ops.readdir,symlink:pt.node_ops.symlink},stream:{llseek:pt.stream_ops.llseek}},file:{node:{getattr:pt.node_ops.getattr,setattr:pt.node_ops.setattr},stream:{llseek:pt.stream_ops.llseek,read:pt.stream_ops.read,write:pt.stream_ops.write,allocate:pt.stream_ops.allocate,mmap:pt.stream_ops.mmap,msync:pt.stream_ops.msync}},link:{node:{getattr:pt.node_ops.getattr,setattr:pt.node_ops.setattr,readlink:pt.node_ops.readlink},stream:{}},chrdev:{node:{getattr:pt.node_ops.getattr,setattr:pt.node_ops.setattr},stream:v.chrdev_stream_ops}});var O=v.createNode(d,E,I,D);return v.isDir(O.mode)?(O.node_ops=pt.ops_table.dir.node,O.stream_ops=pt.ops_table.dir.stream,O.contents={}):v.isFile(O.mode)?(O.node_ops=pt.ops_table.file.node,O.stream_ops=pt.ops_table.file.stream,O.usedBytes=0,O.contents=null):v.isLink(O.mode)?(O.node_ops=pt.ops_table.link.node,O.stream_ops=pt.ops_table.link.stream):v.isChrdev(O.mode)&&(O.node_ops=pt.ops_table.chrdev.node,O.stream_ops=pt.ops_table.chrdev.stream),O.timestamp=Date.now(),d&&(d.contents[E]=O,d.timestamp=O.timestamp),O},getFileDataAsTypedArray:function(d){return d.contents?d.contents.subarray?d.contents.subarray(0,d.usedBytes):new Uint8Array(d.contents):new Uint8Array(0)},expandFileStorage:function(d,E){var I=d.contents?d.contents.length:0;if(!(I>=E)){var D=1024*1024;E=Math.max(E,I*(I<D?2:1.125)>>>0),I!=0&&(E=Math.max(E,256));var O=d.contents;d.contents=new Uint8Array(E),d.usedBytes>0&&d.contents.set(O.subarray(0,d.usedBytes),0)}},resizeFileStorage:function(d,E){if(d.usedBytes!=E)if(E==0)d.contents=null,d.usedBytes=0;else{var I=d.contents;d.contents=new Uint8Array(E),I&&d.contents.set(I.subarray(0,Math.min(E,d.usedBytes))),d.usedBytes=E}},node_ops:{getattr:function(d){var E={};return E.dev=v.isChrdev(d.mode)?d.id:1,E.ino=d.id,E.mode=d.mode,E.nlink=1,E.uid=0,E.gid=0,E.rdev=d.rdev,v.isDir(d.mode)?E.size=4096:v.isFile(d.mode)?E.size=d.usedBytes:v.isLink(d.mode)?E.size=d.link.length:E.size=0,E.atime=new Date(d.timestamp),E.mtime=new Date(d.timestamp),E.ctime=new Date(d.timestamp),E.blksize=4096,E.blocks=Math.ceil(E.size/E.blksize),E},setattr:function(d,E){E.mode!==void 0&&(d.mode=E.mode),E.timestamp!==void 0&&(d.timestamp=E.timestamp),E.size!==void 0&&pt.resizeFileStorage(d,E.size)},lookup:function(d,E){throw v.genericErrors[44]},mknod:function(d,E,I,D){return pt.createNode(d,E,I,D)},rename:function(d,E,I){if(v.isDir(d.mode)){var D;try{D=v.lookupNode(E,I)}catch(V){}if(D)for(var O in D.contents)throw new v.ErrnoError(55)}delete d.parent.contents[d.name],d.parent.timestamp=Date.now(),d.name=I,E.contents[I]=d,E.timestamp=d.parent.timestamp,d.parent=E},unlink:function(d,E){delete d.contents[E],d.timestamp=Date.now()},rmdir:function(d,E){var I=v.lookupNode(d,E);for(var D in I.contents)throw new v.ErrnoError(55);delete d.contents[E],d.timestamp=Date.now()},readdir:function(d){var E=[\".\",\"..\"];for(var I in d.contents)!d.contents.hasOwnProperty(I)||E.push(I);return E},symlink:function(d,E,I){var D=pt.createNode(d,E,511|40960,0);return D.link=I,D},readlink:function(d){if(!v.isLink(d.mode))throw new v.ErrnoError(28);return d.link}},stream_ops:{read:function(d,E,I,D,O){var V=d.node.contents;if(O>=d.node.usedBytes)return 0;var ie=Math.min(d.node.usedBytes-O,D);if(ie>8&&V.subarray)E.set(V.subarray(O,O+ie),I);else for(var Be=0;Be<ie;Be++)E[I+Be]=V[O+Be];return ie},write:function(d,E,I,D,O,V){if(E.buffer===pe.buffer&&(V=!1),!D)return 0;var ie=d.node;if(ie.timestamp=Date.now(),E.subarray&&(!ie.contents||ie.contents.subarray)){if(V)return ie.contents=E.subarray(I,I+D),ie.usedBytes=D,D;if(ie.usedBytes===0&&O===0)return ie.contents=E.slice(I,I+D),ie.usedBytes=D,D;if(O+D<=ie.usedBytes)return ie.contents.set(E.subarray(I,I+D),O),D}if(pt.expandFileStorage(ie,O+D),ie.contents.subarray&&E.subarray)ie.contents.set(E.subarray(I,I+D),O);else for(var Be=0;Be<D;Be++)ie.contents[O+Be]=E[I+Be];return ie.usedBytes=Math.max(ie.usedBytes,O+D),D},llseek:function(d,E,I){var D=E;if(I===1?D+=d.position:I===2&&v.isFile(d.node.mode)&&(D+=d.node.usedBytes),D<0)throw new v.ErrnoError(28);return D},allocate:function(d,E,I){pt.expandFileStorage(d.node,E+I),d.node.usedBytes=Math.max(d.node.usedBytes,E+I)},mmap:function(d,E,I,D,O,V){if(E!==0)throw new v.ErrnoError(28);if(!v.isFile(d.node.mode))throw new v.ErrnoError(43);var ie,Be,Ce=d.node.contents;if(!(V&2)&&Ce.buffer===ve)Be=!1,ie=Ce.byteOffset;else{if((D>0||D+I<Ce.length)&&(Ce.subarray?Ce=Ce.subarray(D,D+I):Ce=Array.prototype.slice.call(Ce,D,D+I)),Be=!0,ie=hs(I),!ie)throw new v.ErrnoError(48);pe.set(Ce,ie)}return{ptr:ie,allocated:Be}},msync:function(d,E,I,D,O){if(!v.isFile(d.node.mode))throw new v.ErrnoError(43);if(O&2)return 0;var V=pt.stream_ops.write(d,E,0,D,I,!1);return 0}}},xo={EPERM:63,ENOENT:44,ESRCH:71,EINTR:27,EIO:29,ENXIO:60,E2BIG:1,ENOEXEC:45,EBADF:8,ECHILD:12,EAGAIN:6,EWOULDBLOCK:6,ENOMEM:48,EACCES:2,EFAULT:21,ENOTBLK:105,EBUSY:10,EEXIST:20,EXDEV:75,ENODEV:43,ENOTDIR:54,EISDIR:31,EINVAL:28,ENFILE:41,EMFILE:33,ENOTTY:59,ETXTBSY:74,EFBIG:22,ENOSPC:51,ESPIPE:70,EROFS:69,EMLINK:34,EPIPE:64,EDOM:18,ERANGE:68,ENOMSG:49,EIDRM:24,ECHRNG:106,EL2NSYNC:156,EL3HLT:107,EL3RST:108,ELNRNG:109,EUNATCH:110,ENOCSI:111,EL2HLT:112,EDEADLK:16,ENOLCK:46,EBADE:113,EBADR:114,EXFULL:115,ENOANO:104,EBADRQC:103,EBADSLT:102,EDEADLOCK:16,EBFONT:101,ENOSTR:100,ENODATA:116,ETIME:117,ENOSR:118,ENONET:119,ENOPKG:120,EREMOTE:121,ENOLINK:47,EADV:122,ESRMNT:123,ECOMM:124,EPROTO:65,EMULTIHOP:36,EDOTDOT:125,EBADMSG:9,ENOTUNIQ:126,EBADFD:127,EREMCHG:128,ELIBACC:129,ELIBBAD:130,ELIBSCN:131,ELIBMAX:132,ELIBEXEC:133,ENOSYS:52,ENOTEMPTY:55,ENAMETOOLONG:37,ELOOP:32,EOPNOTSUPP:138,EPFNOSUPPORT:139,ECONNRESET:15,ENOBUFS:42,EAFNOSUPPORT:5,EPROTOTYPE:67,ENOTSOCK:57,ENOPROTOOPT:50,ESHUTDOWN:140,ECONNREFUSED:14,EADDRINUSE:3,ECONNABORTED:13,ENETUNREACH:40,ENETDOWN:38,ETIMEDOUT:73,EHOSTDOWN:142,EHOSTUNREACH:23,EINPROGRESS:26,EALREADY:7,EDESTADDRREQ:17,EMSGSIZE:35,EPROTONOSUPPORT:66,ESOCKTNOSUPPORT:137,EADDRNOTAVAIL:4,ENETRESET:39,EISCONN:30,ENOTCONN:53,ETOOMANYREFS:141,EUSERS:136,EDQUOT:19,ESTALE:72,ENOTSUP:138,ENOMEDIUM:148,EILSEQ:25,EOVERFLOW:61,ECANCELED:11,ENOTRECOVERABLE:56,EOWNERDEAD:62,ESTRPIPE:135},lt={isWindows:!1,staticInit:function(){lt.isWindows=!!process.platform.match(/^win/);var d={fs:Oe.constants};d.fs&&(d=d.fs),lt.flagsForNodeMap={1024:d.O_APPEND,64:d.O_CREAT,128:d.O_EXCL,256:d.O_NOCTTY,0:d.O_RDONLY,2:d.O_RDWR,4096:d.O_SYNC,512:d.O_TRUNC,1:d.O_WRONLY}},bufferFrom:function(d){return Buffer.alloc?Buffer.from(d):new Buffer(d)},convertNodeCode:function(d){var E=d.code;return xo[E]},mount:function(d){return lt.createNode(null,\"/\",lt.getMode(d.opts.root),0)},createNode:function(d,E,I,D){if(!v.isDir(I)&&!v.isFile(I)&&!v.isLink(I))throw new v.ErrnoError(28);var O=v.createNode(d,E,I);return O.node_ops=lt.node_ops,O.stream_ops=lt.stream_ops,O},getMode:function(d){var E;try{E=Oe.lstatSync(d),lt.isWindows&&(E.mode=E.mode|(E.mode&292)>>2)}catch(I){throw I.code?new v.ErrnoError(lt.convertNodeCode(I)):I}return E.mode},realPath:function(d){for(var E=[];d.parent!==d;)E.push(d.name),d=d.parent;return E.push(d.mount.opts.root),E.reverse(),vt.join.apply(null,E)},flagsForNode:function(d){d&=~2097152,d&=~2048,d&=~32768,d&=~524288;var E=0;for(var I in lt.flagsForNodeMap)d&I&&(E|=lt.flagsForNodeMap[I],d^=I);if(d)throw new v.ErrnoError(28);return E},node_ops:{getattr:function(d){var E=lt.realPath(d),I;try{I=Oe.lstatSync(E)}catch(D){throw D.code?new v.ErrnoError(lt.convertNodeCode(D)):D}return lt.isWindows&&!I.blksize&&(I.blksize=4096),lt.isWindows&&!I.blocks&&(I.blocks=(I.size+I.blksize-1)/I.blksize|0),{dev:I.dev,ino:I.ino,mode:I.mode,nlink:I.nlink,uid:I.uid,gid:I.gid,rdev:I.rdev,size:I.size,atime:I.atime,mtime:I.mtime,ctime:I.ctime,blksize:I.blksize,blocks:I.blocks}},setattr:function(d,E){var I=lt.realPath(d);try{if(E.mode!==void 0&&(Oe.chmodSync(I,E.mode),d.mode=E.mode),E.timestamp!==void 0){var D=new Date(E.timestamp);Oe.utimesSync(I,D,D)}E.size!==void 0&&Oe.truncateSync(I,E.size)}catch(O){throw O.code?new v.ErrnoError(lt.convertNodeCode(O)):O}},lookup:function(d,E){var I=vt.join2(lt.realPath(d),E),D=lt.getMode(I);return lt.createNode(d,E,D)},mknod:function(d,E,I,D){var O=lt.createNode(d,E,I,D),V=lt.realPath(O);try{v.isDir(O.mode)?Oe.mkdirSync(V,O.mode):Oe.writeFileSync(V,\"\",{mode:O.mode})}catch(ie){throw ie.code?new v.ErrnoError(lt.convertNodeCode(ie)):ie}return O},rename:function(d,E,I){var D=lt.realPath(d),O=vt.join2(lt.realPath(E),I);try{Oe.renameSync(D,O)}catch(V){throw V.code?new v.ErrnoError(lt.convertNodeCode(V)):V}d.name=I},unlink:function(d,E){var I=vt.join2(lt.realPath(d),E);try{Oe.unlinkSync(I)}catch(D){throw D.code?new v.ErrnoError(lt.convertNodeCode(D)):D}},rmdir:function(d,E){var I=vt.join2(lt.realPath(d),E);try{Oe.rmdirSync(I)}catch(D){throw D.code?new v.ErrnoError(lt.convertNodeCode(D)):D}},readdir:function(d){var E=lt.realPath(d);try{return Oe.readdirSync(E)}catch(I){throw I.code?new v.ErrnoError(lt.convertNodeCode(I)):I}},symlink:function(d,E,I){var D=vt.join2(lt.realPath(d),E);try{Oe.symlinkSync(I,D)}catch(O){throw O.code?new v.ErrnoError(lt.convertNodeCode(O)):O}},readlink:function(d){var E=lt.realPath(d);try{return E=Oe.readlinkSync(E),E=Mu.relative(Mu.resolve(d.mount.opts.root),E),E}catch(I){throw I.code?new v.ErrnoError(lt.convertNodeCode(I)):I}}},stream_ops:{open:function(d){var E=lt.realPath(d.node);try{v.isFile(d.node.mode)&&(d.nfd=Oe.openSync(E,lt.flagsForNode(d.flags)))}catch(I){throw I.code?new v.ErrnoError(lt.convertNodeCode(I)):I}},close:function(d){try{v.isFile(d.node.mode)&&d.nfd&&Oe.closeSync(d.nfd)}catch(E){throw E.code?new v.ErrnoError(lt.convertNodeCode(E)):E}},read:function(d,E,I,D,O){if(D===0)return 0;try{return Oe.readSync(d.nfd,lt.bufferFrom(E.buffer),I,D,O)}catch(V){throw new v.ErrnoError(lt.convertNodeCode(V))}},write:function(d,E,I,D,O){try{return Oe.writeSync(d.nfd,lt.bufferFrom(E.buffer),I,D,O)}catch(V){throw new v.ErrnoError(lt.convertNodeCode(V))}},llseek:function(d,E,I){var D=E;if(I===1)D+=d.position;else if(I===2&&v.isFile(d.node.mode))try{var O=Oe.fstatSync(d.nfd);D+=O.size}catch(V){throw new v.ErrnoError(lt.convertNodeCode(V))}if(D<0)throw new v.ErrnoError(28);return D},mmap:function(d,E,I,D,O,V){if(E!==0)throw new v.ErrnoError(28);if(!v.isFile(d.node.mode))throw new v.ErrnoError(43);var ie=hs(I);return lt.stream_ops.read(d,pe,ie,I,D),{ptr:ie,allocated:!0}},msync:function(d,E,I,D,O){if(!v.isFile(d.node.mode))throw new v.ErrnoError(43);if(O&2)return 0;var V=lt.stream_ops.write(d,E,0,D,I,!1);return 0}}},mn={lookupPath:function(d){return{path:d,node:{mode:lt.getMode(d)}}},createStandardStreams:function(){v.streams[0]={fd:0,nfd:0,position:0,path:\"\",flags:0,tty:!0,seekable:!1};for(var d=1;d<3;d++)v.streams[d]={fd:d,nfd:d,position:0,path:\"\",flags:577,tty:!0,seekable:!1}},cwd:function(){return process.cwd()},chdir:function(){process.chdir.apply(void 0,arguments)},mknod:function(d,E){v.isDir(d)?Oe.mkdirSync(d,E):Oe.writeFileSync(d,\"\",{mode:E})},mkdir:function(){Oe.mkdirSync.apply(void 0,arguments)},symlink:function(){Oe.symlinkSync.apply(void 0,arguments)},rename:function(){Oe.renameSync.apply(void 0,arguments)},rmdir:function(){Oe.rmdirSync.apply(void 0,arguments)},readdir:function(){Oe.readdirSync.apply(void 0,arguments)},unlink:function(){Oe.unlinkSync.apply(void 0,arguments)},readlink:function(){return Oe.readlinkSync.apply(void 0,arguments)},stat:function(){return Oe.statSync.apply(void 0,arguments)},lstat:function(){return Oe.lstatSync.apply(void 0,arguments)},chmod:function(){Oe.chmodSync.apply(void 0,arguments)},fchmod:function(){Oe.fchmodSync.apply(void 0,arguments)},chown:function(){Oe.chownSync.apply(void 0,arguments)},fchown:function(){Oe.fchownSync.apply(void 0,arguments)},truncate:function(){Oe.truncateSync.apply(void 0,arguments)},ftruncate:function(d,E){if(E<0)throw new v.ErrnoError(28);Oe.ftruncateSync.apply(void 0,arguments)},utime:function(){Oe.utimesSync.apply(void 0,arguments)},open:function(d,E,I,D){typeof E==\"string\"&&(E=Xs.modeStringToFlags(E));var O=Oe.openSync(d,lt.flagsForNode(E),I),V=D!=null?D:v.nextfd(O),ie={fd:V,nfd:O,position:0,path:d,flags:E,seekable:!0};return v.streams[V]=ie,ie},close:function(d){d.stream_ops||Oe.closeSync(d.nfd),v.closeStream(d.fd)},llseek:function(d,E,I){if(d.stream_ops)return Xs.llseek(d,E,I);var D=E;if(I===1)D+=d.position;else if(I===2)D+=Oe.fstatSync(d.nfd).size;else if(I!==0)throw new v.ErrnoError(xo.EINVAL);if(D<0)throw new v.ErrnoError(xo.EINVAL);return d.position=D,D},read:function(d,E,I,D,O){if(d.stream_ops)return Xs.read(d,E,I,D,O);var V=typeof O!=\"undefined\";!V&&d.seekable&&(O=d.position);var ie=Oe.readSync(d.nfd,lt.bufferFrom(E.buffer),I,D,O);return V||(d.position+=ie),ie},write:function(d,E,I,D,O){if(d.stream_ops)return Xs.write(d,E,I,D,O);d.flags&+\"1024\"&&v.llseek(d,0,+\"2\");var V=typeof O!=\"undefined\";!V&&d.seekable&&(O=d.position);var ie=Oe.writeSync(d.nfd,lt.bufferFrom(E.buffer),I,D,O);return V||(d.position+=ie),ie},allocate:function(){throw new v.ErrnoError(xo.EOPNOTSUPP)},mmap:function(d,E,I,D,O,V){if(d.stream_ops)return Xs.mmap(d,E,I,D,O,V);if(E!==0)throw new v.ErrnoError(28);var ie=hs(I);return v.read(d,pe,ie,I,D),{ptr:ie,allocated:!0}},msync:function(d,E,I,D,O){return d.stream_ops?Xs.msync(d,E,I,D,O):(O&2||v.write(d,E,0,D,I),0)},munmap:function(){return 0},ioctl:function(){throw new v.ErrnoError(xo.ENOTTY)}},v={root:null,mounts:[],devices:{},streams:[],nextInode:1,nameTable:null,currentPath:\"/\",initialized:!1,ignorePermissions:!0,trackingDelegate:{},tracking:{openFlags:{READ:1,WRITE:2}},ErrnoError:null,genericErrors:{},filesystems:null,syncFSRequests:0,lookupPath:function(d,E){if(d=Gn.resolve(v.cwd(),d),E=E||{},!d)return{path:\"\",node:null};var I={follow_mount:!0,recurse_count:0};for(var D in I)E[D]===void 0&&(E[D]=I[D]);if(E.recurse_count>8)throw new v.ErrnoError(32);for(var O=vt.normalizeArray(d.split(\"/\").filter(function(ut){return!!ut}),!1),V=v.root,ie=\"/\",Be=0;Be<O.length;Be++){var Ce=Be===O.length-1;if(Ce&&E.parent)break;if(V=v.lookupNode(V,O[Be]),ie=vt.join2(ie,O[Be]),v.isMountpoint(V)&&(!Ce||Ce&&E.follow_mount)&&(V=V.mounted.root),!Ce||E.follow)for(var _e=0;v.isLink(V.mode);){var ot=v.readlink(ie);ie=Gn.resolve(vt.dirname(ie),ot);var wt=v.lookupPath(ie,{recurse_count:E.recurse_count});if(V=wt.node,_e++>40)throw new v.ErrnoError(32)}}return{path:ie,node:V}},getPath:function(d){for(var E;;){if(v.isRoot(d)){var I=d.mount.mountpoint;return E?I[I.length-1]!==\"/\"?I+\"/\"+E:I+E:I}E=E?d.name+\"/\"+E:d.name,d=d.parent}},hashName:function(d,E){for(var I=0,D=0;D<E.length;D++)I=(I<<5)-I+E.charCodeAt(D)|0;return(d+I>>>0)%v.nameTable.length},hashAddNode:function(d){var E=v.hashName(d.parent.id,d.name);d.name_next=v.nameTable[E],v.nameTable[E]=d},hashRemoveNode:function(d){var E=v.hashName(d.parent.id,d.name);if(v.nameTable[E]===d)v.nameTable[E]=d.name_next;else for(var I=v.nameTable[E];I;){if(I.name_next===d){I.name_next=d.name_next;break}I=I.name_next}},lookupNode:function(d,E){var I=v.mayLookup(d);if(I)throw new v.ErrnoError(I,d);for(var D=v.hashName(d.id,E),O=v.nameTable[D];O;O=O.name_next){var V=O.name;if(O.parent.id===d.id&&V===E)return O}return v.lookup(d,E)},createNode:function(d,E,I,D){var O=new v.FSNode(d,E,I,D);return v.hashAddNode(O),O},destroyNode:function(d){v.hashRemoveNode(d)},isRoot:function(d){return d===d.parent},isMountpoint:function(d){return!!d.mounted},isFile:function(d){return(d&61440)==32768},isDir:function(d){return(d&61440)==16384},isLink:function(d){return(d&61440)==40960},isChrdev:function(d){return(d&61440)==8192},isBlkdev:function(d){return(d&61440)==24576},isFIFO:function(d){return(d&61440)==4096},isSocket:function(d){return(d&49152)==49152},flagModes:{r:0,\"r+\":2,w:577,\"w+\":578,a:1089,\"a+\":1090},modeStringToFlags:function(d){var E=v.flagModes[d];if(typeof E==\"undefined\")throw new Error(\"Unknown file open mode: \"+d);return E},flagsToPermissionString:function(d){var E=[\"r\",\"w\",\"rw\"][d&3];return d&512&&(E+=\"w\"),E},nodePermissions:function(d,E){return v.ignorePermissions?0:E.includes(\"r\")&&!(d.mode&292)||E.includes(\"w\")&&!(d.mode&146)||E.includes(\"x\")&&!(d.mode&73)?2:0},mayLookup:function(d){var E=v.nodePermissions(d,\"x\");return E||(d.node_ops.lookup?0:2)},mayCreate:function(d,E){try{var I=v.lookupNode(d,E);return 20}catch(D){}return v.nodePermissions(d,\"wx\")},mayDelete:function(d,E,I){var D;try{D=v.lookupNode(d,E)}catch(V){return V.errno}var O=v.nodePermissions(d,\"wx\");if(O)return O;if(I){if(!v.isDir(D.mode))return 54;if(v.isRoot(D)||v.getPath(D)===v.cwd())return 10}else if(v.isDir(D.mode))return 31;return 0},mayOpen:function(d,E){return d?v.isLink(d.mode)?32:v.isDir(d.mode)&&(v.flagsToPermissionString(E)!==\"r\"||E&512)?31:v.nodePermissions(d,v.flagsToPermissionString(E)):44},MAX_OPEN_FDS:4096,nextfd:function(d,E){d=d||0,E=E||v.MAX_OPEN_FDS;for(var I=d;I<=E;I++)if(!v.streams[I])return I;throw new v.ErrnoError(33)},getStream:function(d){return v.streams[d]},createStream:function(d,E,I){v.FSStream||(v.FSStream=function(){},v.FSStream.prototype={object:{get:function(){return this.node},set:function(ie){this.node=ie}},isRead:{get:function(){return(this.flags&2097155)!=1}},isWrite:{get:function(){return(this.flags&2097155)!=0}},isAppend:{get:function(){return this.flags&1024}}});var D=new v.FSStream;for(var O in d)D[O]=d[O];d=D;var V=v.nextfd(E,I);return d.fd=V,v.streams[V]=d,d},closeStream:function(d){v.streams[d]=null},chrdev_stream_ops:{open:function(d){var E=v.getDevice(d.node.rdev);d.stream_ops=E.stream_ops,d.stream_ops.open&&d.stream_ops.open(d)},llseek:function(){throw new v.ErrnoError(70)}},major:function(d){return d>>8},minor:function(d){return d&255},makedev:function(d,E){return d<<8|E},registerDevice:function(d,E){v.devices[d]={stream_ops:E}},getDevice:function(d){return v.devices[d]},getMounts:function(d){for(var E=[],I=[d];I.length;){var D=I.pop();E.push(D),I.push.apply(I,D.mounts)}return E},syncfs:function(d,E){typeof d==\"function\"&&(E=d,d=!1),v.syncFSRequests++,v.syncFSRequests>1&&x(\"warning: \"+v.syncFSRequests+\" FS.syncfs operations in flight at once, probably just doing extra work\");var I=v.getMounts(v.root.mount),D=0;function O(ie){return v.syncFSRequests--,E(ie)}function V(ie){if(ie)return V.errored?void 0:(V.errored=!0,O(ie));++D>=I.length&&O(null)}I.forEach(function(ie){if(!ie.type.syncfs)return V(null);ie.type.syncfs(ie,d,V)})},mount:function(d,E,I){var D=I===\"/\",O=!I,V;if(D&&v.root)throw new v.ErrnoError(10);if(!D&&!O){var ie=v.lookupPath(I,{follow_mount:!1});if(I=ie.path,V=ie.node,v.isMountpoint(V))throw new v.ErrnoError(10);if(!v.isDir(V.mode))throw new v.ErrnoError(54)}var Be={type:d,opts:E,mountpoint:I,mounts:[]},Ce=d.mount(Be);return Ce.mount=Be,Be.root=Ce,D?v.root=Ce:V&&(V.mounted=Be,V.mount&&V.mount.mounts.push(Be)),Ce},unmount:function(d){var E=v.lookupPath(d,{follow_mount:!1});if(!v.isMountpoint(E.node))throw new v.ErrnoError(28);var I=E.node,D=I.mounted,O=v.getMounts(D);Object.keys(v.nameTable).forEach(function(ie){for(var Be=v.nameTable[ie];Be;){var Ce=Be.name_next;O.includes(Be.mount)&&v.destroyNode(Be),Be=Ce}}),I.mounted=null;var V=I.mount.mounts.indexOf(D);I.mount.mounts.splice(V,1)},lookup:function(d,E){return d.node_ops.lookup(d,E)},mknod:function(d,E,I){var D=v.lookupPath(d,{parent:!0}),O=D.node,V=vt.basename(d);if(!V||V===\".\"||V===\"..\")throw new v.ErrnoError(28);var ie=v.mayCreate(O,V);if(ie)throw new v.ErrnoError(ie);if(!O.node_ops.mknod)throw new v.ErrnoError(63);return O.node_ops.mknod(O,V,E,I)},create:function(d,E){return E=E!==void 0?E:438,E&=4095,E|=32768,v.mknod(d,E,0)},mkdir:function(d,E){return E=E!==void 0?E:511,E&=511|512,E|=16384,v.mknod(d,E,0)},mkdirTree:function(d,E){for(var I=d.split(\"/\"),D=\"\",O=0;O<I.length;++O)if(!!I[O]){D+=\"/\"+I[O];try{v.mkdir(D,E)}catch(V){if(V.errno!=20)throw V}}},mkdev:function(d,E,I){return typeof I==\"undefined\"&&(I=E,E=438),E|=8192,v.mknod(d,E,I)},symlink:function(d,E){if(!Gn.resolve(d))throw new v.ErrnoError(44);var I=v.lookupPath(E,{parent:!0}),D=I.node;if(!D)throw new v.ErrnoError(44);var O=vt.basename(E),V=v.mayCreate(D,O);if(V)throw new v.ErrnoError(V);if(!D.node_ops.symlink)throw new v.ErrnoError(63);return D.node_ops.symlink(D,O,d)},rename:function(d,E){var I=vt.dirname(d),D=vt.dirname(E),O=vt.basename(d),V=vt.basename(E),ie,Be,Ce;if(ie=v.lookupPath(d,{parent:!0}),Be=ie.node,ie=v.lookupPath(E,{parent:!0}),Ce=ie.node,!Be||!Ce)throw new v.ErrnoError(44);if(Be.mount!==Ce.mount)throw new v.ErrnoError(75);var _e=v.lookupNode(Be,O),ot=Gn.relative(d,D);if(ot.charAt(0)!==\".\")throw new v.ErrnoError(28);if(ot=Gn.relative(E,I),ot.charAt(0)!==\".\")throw new v.ErrnoError(55);var wt;try{wt=v.lookupNode(Ce,V)}catch(It){}if(_e!==wt){var ut=v.isDir(_e.mode),nt=v.mayDelete(Be,O,ut);if(nt)throw new v.ErrnoError(nt);if(nt=wt?v.mayDelete(Ce,V,ut):v.mayCreate(Ce,V),nt)throw new v.ErrnoError(nt);if(!Be.node_ops.rename)throw new v.ErrnoError(63);if(v.isMountpoint(_e)||wt&&v.isMountpoint(wt))throw new v.ErrnoError(10);if(Ce!==Be&&(nt=v.nodePermissions(Be,\"w\"),nt))throw new v.ErrnoError(nt);try{v.trackingDelegate.willMovePath&&v.trackingDelegate.willMovePath(d,E)}catch(It){x(\"FS.trackingDelegate['willMovePath']('\"+d+\"', '\"+E+\"') threw an exception: \"+It.message)}v.hashRemoveNode(_e);try{Be.node_ops.rename(_e,Ce,V)}catch(It){throw It}finally{v.hashAddNode(_e)}try{v.trackingDelegate.onMovePath&&v.trackingDelegate.onMovePath(d,E)}catch(It){x(\"FS.trackingDelegate['onMovePath']('\"+d+\"', '\"+E+\"') threw an exception: \"+It.message)}}},rmdir:function(d){var E=v.lookupPath(d,{parent:!0}),I=E.node,D=vt.basename(d),O=v.lookupNode(I,D),V=v.mayDelete(I,D,!0);if(V)throw new v.ErrnoError(V);if(!I.node_ops.rmdir)throw new v.ErrnoError(63);if(v.isMountpoint(O))throw new v.ErrnoError(10);try{v.trackingDelegate.willDeletePath&&v.trackingDelegate.willDeletePath(d)}catch(ie){x(\"FS.trackingDelegate['willDeletePath']('\"+d+\"') threw an exception: \"+ie.message)}I.node_ops.rmdir(I,D),v.destroyNode(O);try{v.trackingDelegate.onDeletePath&&v.trackingDelegate.onDeletePath(d)}catch(ie){x(\"FS.trackingDelegate['onDeletePath']('\"+d+\"') threw an exception: \"+ie.message)}},readdir:function(d){var E=v.lookupPath(d,{follow:!0}),I=E.node;if(!I.node_ops.readdir)throw new v.ErrnoError(54);return I.node_ops.readdir(I)},unlink:function(d){var E=v.lookupPath(d,{parent:!0}),I=E.node,D=vt.basename(d),O=v.lookupNode(I,D),V=v.mayDelete(I,D,!1);if(V)throw new v.ErrnoError(V);if(!I.node_ops.unlink)throw new v.ErrnoError(63);if(v.isMountpoint(O))throw new v.ErrnoError(10);try{v.trackingDelegate.willDeletePath&&v.trackingDelegate.willDeletePath(d)}catch(ie){x(\"FS.trackingDelegate['willDeletePath']('\"+d+\"') threw an exception: \"+ie.message)}I.node_ops.unlink(I,D),v.destroyNode(O);try{v.trackingDelegate.onDeletePath&&v.trackingDelegate.onDeletePath(d)}catch(ie){x(\"FS.trackingDelegate['onDeletePath']('\"+d+\"') threw an exception: \"+ie.message)}},readlink:function(d){var E=v.lookupPath(d),I=E.node;if(!I)throw new v.ErrnoError(44);if(!I.node_ops.readlink)throw new v.ErrnoError(28);return Gn.resolve(v.getPath(I.parent),I.node_ops.readlink(I))},stat:function(d,E){var I=v.lookupPath(d,{follow:!E}),D=I.node;if(!D)throw new v.ErrnoError(44);if(!D.node_ops.getattr)throw new v.ErrnoError(63);return D.node_ops.getattr(D)},lstat:function(d){return v.stat(d,!0)},chmod:function(d,E,I){var D;if(typeof d==\"string\"){var O=v.lookupPath(d,{follow:!I});D=O.node}else D=d;if(!D.node_ops.setattr)throw new v.ErrnoError(63);D.node_ops.setattr(D,{mode:E&4095|D.mode&~4095,timestamp:Date.now()})},lchmod:function(d,E){v.chmod(d,E,!0)},fchmod:function(d,E){var I=v.getStream(d);if(!I)throw new v.ErrnoError(8);v.chmod(I.node,E)},chown:function(d,E,I,D){var O;if(typeof d==\"string\"){var V=v.lookupPath(d,{follow:!D});O=V.node}else O=d;if(!O.node_ops.setattr)throw new v.ErrnoError(63);O.node_ops.setattr(O,{timestamp:Date.now()})},lchown:function(d,E,I){v.chown(d,E,I,!0)},fchown:function(d,E,I){var D=v.getStream(d);if(!D)throw new v.ErrnoError(8);v.chown(D.node,E,I)},truncate:function(d,E){if(E<0)throw new v.ErrnoError(28);var I;if(typeof d==\"string\"){var D=v.lookupPath(d,{follow:!0});I=D.node}else I=d;if(!I.node_ops.setattr)throw new v.ErrnoError(63);if(v.isDir(I.mode))throw new v.ErrnoError(31);if(!v.isFile(I.mode))throw new v.ErrnoError(28);var O=v.nodePermissions(I,\"w\");if(O)throw new v.ErrnoError(O);I.node_ops.setattr(I,{size:E,timestamp:Date.now()})},ftruncate:function(d,E){var I=v.getStream(d);if(!I)throw new v.ErrnoError(8);if((I.flags&2097155)==0)throw new v.ErrnoError(28);v.truncate(I.node,E)},utime:function(d,E,I){var D=v.lookupPath(d,{follow:!0}),O=D.node;O.node_ops.setattr(O,{timestamp:Math.max(E,I)})},open:function(d,E,I,D,O){if(d===\"\")throw new v.ErrnoError(44);E=typeof E==\"string\"?v.modeStringToFlags(E):E,I=typeof I==\"undefined\"?438:I,E&64?I=I&4095|32768:I=0;var V;if(typeof d==\"object\")V=d;else{d=vt.normalize(d);try{var ie=v.lookupPath(d,{follow:!(E&131072)});V=ie.node}catch(wt){}}var Be=!1;if(E&64)if(V){if(E&128)throw new v.ErrnoError(20)}else V=v.mknod(d,I,0),Be=!0;if(!V)throw new v.ErrnoError(44);if(v.isChrdev(V.mode)&&(E&=~512),E&65536&&!v.isDir(V.mode))throw new v.ErrnoError(54);if(!Be){var Ce=v.mayOpen(V,E);if(Ce)throw new v.ErrnoError(Ce)}E&512&&v.truncate(V,0),E&=~(128|512|131072);var _e=v.createStream({node:V,path:v.getPath(V),flags:E,seekable:!0,position:0,stream_ops:V.stream_ops,ungotten:[],error:!1},D,O);_e.stream_ops.open&&_e.stream_ops.open(_e),r.logReadFiles&&!(E&1)&&(v.readFiles||(v.readFiles={}),d in v.readFiles||(v.readFiles[d]=1,x(\"FS.trackingDelegate error on read file: \"+d)));try{if(v.trackingDelegate.onOpenFile){var ot=0;(E&2097155)!=1&&(ot|=v.tracking.openFlags.READ),(E&2097155)!=0&&(ot|=v.tracking.openFlags.WRITE),v.trackingDelegate.onOpenFile(d,ot)}}catch(wt){x(\"FS.trackingDelegate['onOpenFile']('\"+d+\"', flags) threw an exception: \"+wt.message)}return _e},close:function(d){if(v.isClosed(d))throw new v.ErrnoError(8);d.getdents&&(d.getdents=null);try{d.stream_ops.close&&d.stream_ops.close(d)}catch(E){throw E}finally{v.closeStream(d.fd)}d.fd=null},isClosed:function(d){return d.fd===null},llseek:function(d,E,I){if(v.isClosed(d))throw new v.ErrnoError(8);if(!d.seekable||!d.stream_ops.llseek)throw new v.ErrnoError(70);if(I!=0&&I!=1&&I!=2)throw new v.ErrnoError(28);return d.position=d.stream_ops.llseek(d,E,I),d.ungotten=[],d.position},read:function(d,E,I,D,O){if(D<0||O<0)throw new v.ErrnoError(28);if(v.isClosed(d))throw new v.ErrnoError(8);if((d.flags&2097155)==1)throw new v.ErrnoError(8);if(v.isDir(d.node.mode))throw new v.ErrnoError(31);if(!d.stream_ops.read)throw new v.ErrnoError(28);var V=typeof O!=\"undefined\";if(!V)O=d.position;else if(!d.seekable)throw new v.ErrnoError(70);var ie=d.stream_ops.read(d,E,I,D,O);return V||(d.position+=ie),ie},write:function(d,E,I,D,O,V){if(D<0||O<0)throw new v.ErrnoError(28);if(v.isClosed(d))throw new v.ErrnoError(8);if((d.flags&2097155)==0)throw new v.ErrnoError(8);if(v.isDir(d.node.mode))throw new v.ErrnoError(31);if(!d.stream_ops.write)throw new v.ErrnoError(28);d.seekable&&d.flags&1024&&v.llseek(d,0,2);var ie=typeof O!=\"undefined\";if(!ie)O=d.position;else if(!d.seekable)throw new v.ErrnoError(70);var Be=d.stream_ops.write(d,E,I,D,O,V);ie||(d.position+=Be);try{d.path&&v.trackingDelegate.onWriteToFile&&v.trackingDelegate.onWriteToFile(d.path)}catch(Ce){x(\"FS.trackingDelegate['onWriteToFile']('\"+d.path+\"') threw an exception: \"+Ce.message)}return Be},allocate:function(d,E,I){if(v.isClosed(d))throw new v.ErrnoError(8);if(E<0||I<=0)throw new v.ErrnoError(28);if((d.flags&2097155)==0)throw new v.ErrnoError(8);if(!v.isFile(d.node.mode)&&!v.isDir(d.node.mode))throw new v.ErrnoError(43);if(!d.stream_ops.allocate)throw new v.ErrnoError(138);d.stream_ops.allocate(d,E,I)},mmap:function(d,E,I,D,O,V){if((O&2)!=0&&(V&2)==0&&(d.flags&2097155)!=2)throw new v.ErrnoError(2);if((d.flags&2097155)==1)throw new v.ErrnoError(2);if(!d.stream_ops.mmap)throw new v.ErrnoError(43);return d.stream_ops.mmap(d,E,I,D,O,V)},msync:function(d,E,I,D,O){return!d||!d.stream_ops.msync?0:d.stream_ops.msync(d,E,I,D,O)},munmap:function(d){return 0},ioctl:function(d,E,I){if(!d.stream_ops.ioctl)throw new v.ErrnoError(59);return d.stream_ops.ioctl(d,E,I)},readFile:function(d,E){if(E=E||{},E.flags=E.flags||0,E.encoding=E.encoding||\"binary\",E.encoding!==\"utf8\"&&E.encoding!==\"binary\")throw new Error('Invalid encoding type \"'+E.encoding+'\"');var I,D=v.open(d,E.flags),O=v.stat(d),V=O.size,ie=new Uint8Array(V);return v.read(D,ie,0,V,0),E.encoding===\"utf8\"?I=qe(ie,0):E.encoding===\"binary\"&&(I=ie),v.close(D),I},writeFile:function(d,E,I){I=I||{},I.flags=I.flags||577;var D=v.open(d,I.flags,I.mode);if(typeof E==\"string\"){var O=new Uint8Array(he(E)+1),V=se(E,O,0,O.length);v.write(D,O,0,V,void 0,I.canOwn)}else if(ArrayBuffer.isView(E))v.write(D,E,0,E.byteLength,void 0,I.canOwn);else throw new Error(\"Unsupported data type\");v.close(D)},cwd:function(){return v.currentPath},chdir:function(d){var E=v.lookupPath(d,{follow:!0});if(E.node===null)throw new v.ErrnoError(44);if(!v.isDir(E.node.mode))throw new v.ErrnoError(54);var I=v.nodePermissions(E.node,\"x\");if(I)throw new v.ErrnoError(I);v.currentPath=E.path},createDefaultDirectories:function(){v.mkdir(\"/tmp\"),v.mkdir(\"/home\"),v.mkdir(\"/home/web_user\")},createDefaultDevices:function(){v.mkdir(\"/dev\"),v.registerDevice(v.makedev(1,3),{read:function(){return 0},write:function(E,I,D,O,V){return O}}),v.mkdev(\"/dev/null\",v.makedev(1,3)),fs.register(v.makedev(5,0),fs.default_tty_ops),fs.register(v.makedev(6,0),fs.default_tty1_ops),v.mkdev(\"/dev/tty\",v.makedev(5,0)),v.mkdev(\"/dev/tty1\",v.makedev(6,0));var d=Gl();v.createDevice(\"/dev\",\"random\",d),v.createDevice(\"/dev\",\"urandom\",d),v.mkdir(\"/dev/shm\"),v.mkdir(\"/dev/shm/tmp\")},createSpecialDirectories:function(){v.mkdir(\"/proc\");var d=v.mkdir(\"/proc/self\");v.mkdir(\"/proc/self/fd\"),v.mount({mount:function(){var E=v.createNode(d,\"fd\",16384|511,73);return E.node_ops={lookup:function(I,D){var O=+D,V=v.getStream(O);if(!V)throw new v.ErrnoError(8);var ie={parent:null,mount:{mountpoint:\"fake\"},node_ops:{readlink:function(){return V.path}}};return ie.parent=ie,ie}},E}},{},\"/proc/self/fd\")},createStandardStreams:function(){r.stdin?v.createDevice(\"/dev\",\"stdin\",r.stdin):v.symlink(\"/dev/tty\",\"/dev/stdin\"),r.stdout?v.createDevice(\"/dev\",\"stdout\",null,r.stdout):v.symlink(\"/dev/tty\",\"/dev/stdout\"),r.stderr?v.createDevice(\"/dev\",\"stderr\",null,r.stderr):v.symlink(\"/dev/tty1\",\"/dev/stderr\");var d=v.open(\"/dev/stdin\",0),E=v.open(\"/dev/stdout\",1),I=v.open(\"/dev/stderr\",1)},ensureErrnoError:function(){v.ErrnoError||(v.ErrnoError=function(E,I){this.node=I,this.setErrno=function(D){this.errno=D},this.setErrno(E),this.message=\"FS error\"},v.ErrnoError.prototype=new Error,v.ErrnoError.prototype.constructor=v.ErrnoError,[44].forEach(function(d){v.genericErrors[d]=new v.ErrnoError(d),v.genericErrors[d].stack=\"<generic error, no stack>\"}))},staticInit:function(){v.ensureErrnoError(),v.nameTable=new Array(4096),v.mount(pt,{},\"/\"),v.createDefaultDirectories(),v.createDefaultDevices(),v.createSpecialDirectories(),v.filesystems={MEMFS:pt,NODEFS:lt}},init:function(d,E,I){v.init.initialized=!0,v.ensureErrnoError(),r.stdin=d||r.stdin,r.stdout=E||r.stdout,r.stderr=I||r.stderr,v.createStandardStreams()},quit:function(){v.init.initialized=!1;var d=r._fflush;d&&d(0);for(var E=0;E<v.streams.length;E++){var I=v.streams[E];!I||v.close(I)}},getMode:function(d,E){var I=0;return d&&(I|=292|73),E&&(I|=146),I},findObject:function(d,E){var I=v.analyzePath(d,E);return I.exists?I.object:null},analyzePath:function(d,E){try{var I=v.lookupPath(d,{follow:!E});d=I.path}catch(O){}var D={isRoot:!1,exists:!1,error:0,name:null,path:null,object:null,parentExists:!1,parentPath:null,parentObject:null};try{var I=v.lookupPath(d,{parent:!0});D.parentExists=!0,D.parentPath=I.path,D.parentObject=I.node,D.name=vt.basename(d),I=v.lookupPath(d,{follow:!E}),D.exists=!0,D.path=I.path,D.object=I.node,D.name=I.node.name,D.isRoot=I.path===\"/\"}catch(O){D.error=O.errno}return D},createPath:function(d,E,I,D){d=typeof d==\"string\"?d:v.getPath(d);for(var O=E.split(\"/\").reverse();O.length;){var V=O.pop();if(!!V){var ie=vt.join2(d,V);try{v.mkdir(ie)}catch(Be){}d=ie}}return ie},createFile:function(d,E,I,D,O){var V=vt.join2(typeof d==\"string\"?d:v.getPath(d),E),ie=v.getMode(D,O);return v.create(V,ie)},createDataFile:function(d,E,I,D,O,V){var ie=E?vt.join2(typeof d==\"string\"?d:v.getPath(d),E):d,Be=v.getMode(D,O),Ce=v.create(ie,Be);if(I){if(typeof I==\"string\"){for(var _e=new Array(I.length),ot=0,wt=I.length;ot<wt;++ot)_e[ot]=I.charCodeAt(ot);I=_e}v.chmod(Ce,Be|146);var ut=v.open(Ce,577);v.write(ut,I,0,I.length,0,V),v.close(ut),v.chmod(Ce,Be)}return Ce},createDevice:function(d,E,I,D){var O=vt.join2(typeof d==\"string\"?d:v.getPath(d),E),V=v.getMode(!!I,!!D);v.createDevice.major||(v.createDevice.major=64);var ie=v.makedev(v.createDevice.major++,0);return v.registerDevice(ie,{open:function(Be){Be.seekable=!1},close:function(Be){D&&D.buffer&&D.buffer.length&&D(10)},read:function(Be,Ce,_e,ot,wt){for(var ut=0,nt=0;nt<ot;nt++){var It;try{It=I()}catch(ke){throw new v.ErrnoError(29)}if(It===void 0&&ut===0)throw new v.ErrnoError(6);if(It==null)break;ut++,Ce[_e+nt]=It}return ut&&(Be.node.timestamp=Date.now()),ut},write:function(Be,Ce,_e,ot,wt){for(var ut=0;ut<ot;ut++)try{D(Ce[_e+ut])}catch(nt){throw new v.ErrnoError(29)}return ot&&(Be.node.timestamp=Date.now()),ut}}),v.mkdev(O,V,ie)},forceLoadFile:function(d){if(d.isDevice||d.isFolder||d.link||d.contents)return!0;if(typeof XMLHttpRequest!=\"undefined\")throw new Error(\"Lazy loading should have been performed (contents set) in createLazyFile, but it was not. Lazy loading only works in web workers. Use --embed-file or --preload-file in emcc on the main thread.\");if(p)try{d.contents=RA(p(d.url),!0),d.usedBytes=d.contents.length}catch(E){throw new v.ErrnoError(29)}else throw new Error(\"Cannot load without read() or XMLHttpRequest.\")},createLazyFile:function(d,E,I,D,O){function V(){this.lengthKnown=!1,this.chunks=[]}if(V.prototype.get=function(ut){if(!(ut>this.length-1||ut<0)){var nt=ut%this.chunkSize,It=ut/this.chunkSize|0;return this.getter(It)[nt]}},V.prototype.setDataGetter=function(ut){this.getter=ut},V.prototype.cacheLength=function(){var ut=new XMLHttpRequest;if(ut.open(\"HEAD\",I,!1),ut.send(null),!(ut.status>=200&&ut.status<300||ut.status===304))throw new Error(\"Couldn't load \"+I+\". Status: \"+ut.status);var nt=Number(ut.getResponseHeader(\"Content-length\")),It,ke=(It=ut.getResponseHeader(\"Accept-Ranges\"))&&It===\"bytes\",Jn=(It=ut.getResponseHeader(\"Content-Encoding\"))&&It===\"gzip\",Mi=1024*1024;ke||(Mi=nt);var OA=function(ps,va){if(ps>va)throw new Error(\"invalid range (\"+ps+\", \"+va+\") or no bytes requested!\");if(va>nt-1)throw new Error(\"only \"+nt+\" bytes available! programmer error!\");var Yr=new XMLHttpRequest;if(Yr.open(\"GET\",I,!1),nt!==Mi&&Yr.setRequestHeader(\"Range\",\"bytes=\"+ps+\"-\"+va),typeof Uint8Array!=\"undefined\"&&(Yr.responseType=\"arraybuffer\"),Yr.overrideMimeType&&Yr.overrideMimeType(\"text/plain; charset=x-user-defined\"),Yr.send(null),!(Yr.status>=200&&Yr.status<300||Yr.status===304))throw new Error(\"Couldn't load \"+I+\". Status: \"+Yr.status);return Yr.response!==void 0?new Uint8Array(Yr.response||[]):RA(Yr.responseText||\"\",!0)},Gr=this;Gr.setDataGetter(function(ps){var va=ps*Mi,Yr=(ps+1)*Mi-1;if(Yr=Math.min(Yr,nt-1),typeof Gr.chunks[ps]==\"undefined\"&&(Gr.chunks[ps]=OA(va,Yr)),typeof Gr.chunks[ps]==\"undefined\")throw new Error(\"doXHR failed!\");return Gr.chunks[ps]}),(Jn||!nt)&&(Mi=nt=1,nt=this.getter(0).length,Mi=nt,S(\"LazyFiles on gzip forces download of the whole file when length is accessed\")),this._length=nt,this._chunkSize=Mi,this.lengthKnown=!0},typeof XMLHttpRequest!=\"undefined\"){if(!u)throw\"Cannot do synchronous binary XHRs outside webworkers in modern browsers. Use --embed-file or --preload-file in emcc\";var ie=new V;Object.defineProperties(ie,{length:{get:function(){return this.lengthKnown||this.cacheLength(),this._length}},chunkSize:{get:function(){return this.lengthKnown||this.cacheLength(),this._chunkSize}}});var Be={isDevice:!1,contents:ie}}else var Be={isDevice:!1,url:I};var Ce=v.createFile(d,E,Be,D,O);Be.contents?Ce.contents=Be.contents:Be.url&&(Ce.contents=null,Ce.url=Be.url),Object.defineProperties(Ce,{usedBytes:{get:function(){return this.contents.length}}});var _e={},ot=Object.keys(Ce.stream_ops);return ot.forEach(function(wt){var ut=Ce.stream_ops[wt];_e[wt]=function(){return v.forceLoadFile(Ce),ut.apply(null,arguments)}}),_e.read=function(ut,nt,It,ke,Jn){v.forceLoadFile(Ce);var Mi=ut.node.contents;if(Jn>=Mi.length)return 0;var OA=Math.min(Mi.length-Jn,ke);if(Mi.slice)for(var Gr=0;Gr<OA;Gr++)nt[It+Gr]=Mi[Jn+Gr];else for(var Gr=0;Gr<OA;Gr++)nt[It+Gr]=Mi.get(Jn+Gr);return OA},Ce.stream_ops=_e,Ce},createPreloadedFile:function(d,E,I,D,O,V,ie,Be,Ce,_e){Browser.init();var ot=E?Gn.resolve(vt.join2(d,E)):d,wt=Ru(\"cp \"+ot);function ut(nt){function It(Jn){_e&&_e(),Be||v.createDataFile(d,E,Jn,D,O,Ce),V&&V(),PA(wt)}var ke=!1;r.preloadPlugins.forEach(function(Jn){ke||Jn.canHandle(ot)&&(Jn.handle(nt,ot,It,function(){ie&&ie(),PA(wt)}),ke=!0)}),ke||It(nt)}xA(wt),typeof I==\"string\"?Browser.asyncLoad(I,function(nt){ut(nt)},ie):ut(I)},indexedDB:function(){return window.indexedDB||window.mozIndexedDB||window.webkitIndexedDB||window.msIndexedDB},DB_NAME:function(){return\"EM_FS_\"+window.location.pathname},DB_VERSION:20,DB_STORE_NAME:\"FILE_DATA\",saveFilesToDB:function(d,E,I){E=E||function(){},I=I||function(){};var D=v.indexedDB();try{var O=D.open(v.DB_NAME(),v.DB_VERSION)}catch(V){return I(V)}O.onupgradeneeded=function(){S(\"creating db\");var ie=O.result;ie.createObjectStore(v.DB_STORE_NAME)},O.onsuccess=function(){var ie=O.result,Be=ie.transaction([v.DB_STORE_NAME],\"readwrite\"),Ce=Be.objectStore(v.DB_STORE_NAME),_e=0,ot=0,wt=d.length;function ut(){ot==0?E():I()}d.forEach(function(nt){var It=Ce.put(v.analyzePath(nt).object.contents,nt);It.onsuccess=function(){_e++,_e+ot==wt&&ut()},It.onerror=function(){ot++,_e+ot==wt&&ut()}}),Be.onerror=I},O.onerror=I},loadFilesFromDB:function(d,E,I){E=E||function(){},I=I||function(){};var D=v.indexedDB();try{var O=D.open(v.DB_NAME(),v.DB_VERSION)}catch(V){return I(V)}O.onupgradeneeded=I,O.onsuccess=function(){var ie=O.result;try{var Be=ie.transaction([v.DB_STORE_NAME],\"readonly\")}catch(nt){I(nt);return}var Ce=Be.objectStore(v.DB_STORE_NAME),_e=0,ot=0,wt=d.length;function ut(){ot==0?E():I()}d.forEach(function(nt){var It=Ce.get(nt);It.onsuccess=function(){v.analyzePath(nt).exists&&v.unlink(nt),v.createDataFile(vt.dirname(nt),vt.basename(nt),It.result,!0,!0,!0),_e++,_e+ot==wt&&ut()},It.onerror=function(){ot++,_e+ot==wt&&ut()}}),Be.onerror=I},O.onerror=I}},Tt={mappings:{},DEFAULT_POLLMASK:5,umask:511,calculateAt:function(d,E,I){if(E[0]===\"/\")return E;var D;if(d===-100)D=v.cwd();else{var O=v.getStream(d);if(!O)throw new v.ErrnoError(8);D=O.path}if(E.length==0){if(!I)throw new v.ErrnoError(44);return D}return vt.join2(D,E)},doStat:function(d,E,I){try{var D=d(E)}catch(O){if(O&&O.node&&vt.normalize(E)!==vt.normalize(v.getPath(O.node)))return-54;throw O}return fe[I>>2]=D.dev,fe[I+4>>2]=0,fe[I+8>>2]=D.ino,fe[I+12>>2]=D.mode,fe[I+16>>2]=D.nlink,fe[I+20>>2]=D.uid,fe[I+24>>2]=D.gid,fe[I+28>>2]=D.rdev,fe[I+32>>2]=0,Oi=[D.size>>>0,(oe=D.size,+Math.abs(oe)>=1?oe>0?(Math.min(+Math.floor(oe/4294967296),4294967295)|0)>>>0:~~+Math.ceil((oe-+(~~oe>>>0))/4294967296)>>>0:0)],fe[I+40>>2]=Oi[0],fe[I+44>>2]=Oi[1],fe[I+48>>2]=4096,fe[I+52>>2]=D.blocks,fe[I+56>>2]=D.atime.getTime()/1e3|0,fe[I+60>>2]=0,fe[I+64>>2]=D.mtime.getTime()/1e3|0,fe[I+68>>2]=0,fe[I+72>>2]=D.ctime.getTime()/1e3|0,fe[I+76>>2]=0,Oi=[D.ino>>>0,(oe=D.ino,+Math.abs(oe)>=1?oe>0?(Math.min(+Math.floor(oe/4294967296),4294967295)|0)>>>0:~~+Math.ceil((oe-+(~~oe>>>0))/4294967296)>>>0:0)],fe[I+80>>2]=Oi[0],fe[I+84>>2]=Oi[1],0},doMsync:function(d,E,I,D,O){var V=X.slice(d,d+I);v.msync(E,V,O,I,D)},doMkdir:function(d,E){return d=vt.normalize(d),d[d.length-1]===\"/\"&&(d=d.substr(0,d.length-1)),v.mkdir(d,E,0),0},doMknod:function(d,E,I){switch(E&61440){case 32768:case 8192:case 24576:case 4096:case 49152:break;default:return-28}return v.mknod(d,E,I),0},doReadlink:function(d,E,I){if(I<=0)return-28;var D=v.readlink(d),O=Math.min(I,he(D)),V=pe[E+O];return Qe(D,E,I+1),pe[E+O]=V,O},doAccess:function(d,E){if(E&~7)return-28;var I,D=v.lookupPath(d,{follow:!0});if(I=D.node,!I)return-44;var O=\"\";return E&4&&(O+=\"r\"),E&2&&(O+=\"w\"),E&1&&(O+=\"x\"),O&&v.nodePermissions(I,O)?-2:0},doDup:function(d,E,I){var D=v.getStream(I);return D&&v.close(D),v.open(d,E,0,I,I).fd},doReadv:function(d,E,I,D){for(var O=0,V=0;V<I;V++){var ie=fe[E+V*8>>2],Be=fe[E+(V*8+4)>>2],Ce=v.read(d,pe,ie,Be,D);if(Ce<0)return-1;if(O+=Ce,Ce<Be)break}return O},doWritev:function(d,E,I,D){for(var O=0,V=0;V<I;V++){var ie=fe[E+V*8>>2],Be=fe[E+(V*8+4)>>2],Ce=v.write(d,pe,ie,Be,D);if(Ce<0)return-1;O+=Ce}return O},varargs:void 0,get:function(){Tt.varargs+=4;var d=fe[Tt.varargs-4>>2];return d},getStr:function(d){var E=re(d);return E},getStreamFromFD:function(d){var E=v.getStream(d);if(!E)throw new v.ErrnoError(8);return E},get64:function(d,E){return d}};function Tu(d,E){try{return d=Tt.getStr(d),v.chmod(d,E),0}catch(I){return(typeof v==\"undefined\"||!(I instanceof v.ErrnoError))&&Sr(I),-I.errno}}function Yl(d){return fe[Rt()>>2]=d,d}function Sh(d,E,I){Tt.varargs=I;try{var D=Tt.getStreamFromFD(d);switch(E){case 0:{var O=Tt.get();if(O<0)return-28;var V;return V=v.open(D.path,D.flags,0,O),V.fd}case 1:case 2:return 0;case 3:return D.flags;case 4:{var O=Tt.get();return D.flags|=O,0}case 12:{var O=Tt.get(),ie=0;return be[O+ie>>1]=2,0}case 13:case 14:return 0;case 16:case 8:return-28;case 9:return Yl(28),-1;default:return-28}}catch(Be){return(typeof v==\"undefined\"||!(Be instanceof v.ErrnoError))&&Sr(Be),-Be.errno}}function kh(d,E){try{var I=Tt.getStreamFromFD(d);return Tt.doStat(v.stat,I.path,E)}catch(D){return(typeof v==\"undefined\"||!(D instanceof v.ErrnoError))&&Sr(D),-D.errno}}function xh(d,E,I){Tt.varargs=I;try{var D=Tt.getStreamFromFD(d);switch(E){case 21509:case 21505:return D.tty?0:-59;case 21510:case 21511:case 21512:case 21506:case 21507:case 21508:return D.tty?0:-59;case 21519:{if(!D.tty)return-59;var O=Tt.get();return fe[O>>2]=0,0}case 21520:return D.tty?-28:-59;case 21531:{var O=Tt.get();return v.ioctl(D,E,O)}case 21523:return D.tty?0:-59;case 21524:return D.tty?0:-59;default:Sr(\"bad ioctl syscall \"+E)}}catch(V){return(typeof v==\"undefined\"||!(V instanceof v.ErrnoError))&&Sr(V),-V.errno}}function Ph(d,E,I){Tt.varargs=I;try{var D=Tt.getStr(d),O=I?Tt.get():0,V=v.open(D,E,O);return V.fd}catch(ie){return(typeof v==\"undefined\"||!(ie instanceof v.ErrnoError))&&Sr(ie),-ie.errno}}function Dh(d,E){try{return d=Tt.getStr(d),E=Tt.getStr(E),v.rename(d,E),0}catch(I){return(typeof v==\"undefined\"||!(I instanceof v.ErrnoError))&&Sr(I),-I.errno}}function G(d){try{return d=Tt.getStr(d),v.rmdir(d),0}catch(E){return(typeof v==\"undefined\"||!(E instanceof v.ErrnoError))&&Sr(E),-E.errno}}function yt(d,E){try{return d=Tt.getStr(d),Tt.doStat(v.stat,d,E)}catch(I){return(typeof v==\"undefined\"||!(I instanceof v.ErrnoError))&&Sr(I),-I.errno}}function DA(d){try{return d=Tt.getStr(d),v.unlink(d),0}catch(E){return(typeof v==\"undefined\"||!(E instanceof v.ErrnoError))&&Sr(E),-E.errno}}function $i(d,E,I){X.copyWithin(d,E,E+I)}function ql(d){try{return A.grow(d-ve.byteLength+65535>>>16),mi(A.buffer),1}catch(E){}}function $e(d){var E=X.length;d=d>>>0;var I=2147483648;if(d>I)return!1;for(var D=1;D<=4;D*=2){var O=E*(1+.2/D);O=Math.min(O,d+100663296);var V=Math.min(I,xe(Math.max(d,O),65536)),ie=ql(V);if(ie)return!0}return!1}function wa(d){try{var E=Tt.getStreamFromFD(d);return v.close(E),0}catch(I){return(typeof v==\"undefined\"||!(I instanceof v.ErrnoError))&&Sr(I),I.errno}}function Ou(d,E){try{var I=Tt.getStreamFromFD(d),D=I.tty?2:v.isDir(I.mode)?3:v.isLink(I.mode)?7:4;return pe[E>>0]=D,0}catch(O){return(typeof v==\"undefined\"||!(O instanceof v.ErrnoError))&&Sr(O),O.errno}}function SE(d,E,I,D){try{var O=Tt.getStreamFromFD(d),V=Tt.doReadv(O,E,I);return fe[D>>2]=V,0}catch(ie){return(typeof v==\"undefined\"||!(ie instanceof v.ErrnoError))&&Sr(ie),ie.errno}}function Rh(d,E,I,D,O){try{var V=Tt.getStreamFromFD(d),ie=4294967296,Be=I*ie+(E>>>0),Ce=9007199254740992;return Be<=-Ce||Be>=Ce?-61:(v.llseek(V,Be,D),Oi=[V.position>>>0,(oe=V.position,+Math.abs(oe)>=1?oe>0?(Math.min(+Math.floor(oe/4294967296),4294967295)|0)>>>0:~~+Math.ceil((oe-+(~~oe>>>0))/4294967296)>>>0:0)],fe[O>>2]=Oi[0],fe[O+4>>2]=Oi[1],V.getdents&&Be===0&&D===0&&(V.getdents=null),0)}catch(_e){return(typeof v==\"undefined\"||!(_e instanceof v.ErrnoError))&&Sr(_e),_e.errno}}function kE(d,E,I,D){try{var O=Tt.getStreamFromFD(d),V=Tt.doWritev(O,E,I);return fe[D>>2]=V,0}catch(ie){return(typeof v==\"undefined\"||!(ie instanceof v.ErrnoError))&&Sr(ie),ie.errno}}function gr(d){J(d)}function Yn(d){var E=Date.now()/1e3|0;return d&&(fe[d>>2]=E),E}function Jl(){if(Jl.called)return;Jl.called=!0;var d=new Date().getFullYear(),E=new Date(d,0,1),I=new Date(d,6,1),D=E.getTimezoneOffset(),O=I.getTimezoneOffset(),V=Math.max(D,O);fe[iQ()>>2]=V*60,fe[rQ()>>2]=Number(D!=O);function ie(wt){var ut=wt.toTimeString().match(/\\(([A-Za-z ]+)\\)$/);return ut?ut[1]:\"GMT\"}var Be=ie(E),Ce=ie(I),_e=Fe(Be),ot=Fe(Ce);O<D?(fe[Yu()>>2]=_e,fe[Yu()+4>>2]=ot):(fe[Yu()>>2]=ot,fe[Yu()+4>>2]=_e)}function Fh(d){Jl();var E=Date.UTC(fe[d+20>>2]+1900,fe[d+16>>2],fe[d+12>>2],fe[d+8>>2],fe[d+4>>2],fe[d>>2],0),I=new Date(E);fe[d+24>>2]=I.getUTCDay();var D=Date.UTC(I.getUTCFullYear(),0,1,0,0,0,0),O=(I.getTime()-D)/(1e3*60*60*24)|0;return fe[d+28>>2]=O,I.getTime()/1e3|0}var Vs=function(d,E,I,D){d||(d=this),this.parent=d,this.mount=d.mount,this.mounted=null,this.id=v.nextInode++,this.name=E,this.mode=I,this.node_ops={},this.stream_ops={},this.rdev=D},Ba=292|73,En=146;if(Object.defineProperties(Vs.prototype,{read:{get:function(){return(this.mode&Ba)===Ba},set:function(d){d?this.mode|=Ba:this.mode&=~Ba}},write:{get:function(){return(this.mode&En)===En},set:function(d){d?this.mode|=En:this.mode&=~En}},isFolder:{get:function(){return v.isDir(this.mode)}},isDevice:{get:function(){return v.isChrdev(this.mode)}}}),v.FSNode=Vs,v.staticInit(),g){var Oe=W_,Mu=require(\"path\");lt.staticInit()}if(g){var Wl=function(d){return function(){try{return d.apply(this,arguments)}catch(E){throw E.code?new v.ErrnoError(xo[E.code]):E}}},Xs=Object.assign({},v);for(var zl in mn)v[zl]=Wl(mn[zl])}else throw new Error(\"NODERAWFS is currently only supported on Node.js environment.\");function RA(d,E,I){var D=I>0?I:he(d)+1,O=new Array(D),V=se(d,O,0,O.length);return E&&(O.length=V),O}var Uu=typeof atob==\"function\"?atob:function(d){var E=\"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=\",I=\"\",D,O,V,ie,Be,Ce,_e,ot=0;d=d.replace(/[^A-Za-z0-9\\+\\/\\=]/g,\"\");do ie=E.indexOf(d.charAt(ot++)),Be=E.indexOf(d.charAt(ot++)),Ce=E.indexOf(d.charAt(ot++)),_e=E.indexOf(d.charAt(ot++)),D=ie<<2|Be>>4,O=(Be&15)<<4|Ce>>2,V=(Ce&3)<<6|_e,I=I+String.fromCharCode(D),Ce!==64&&(I=I+String.fromCharCode(O)),_e!==64&&(I=I+String.fromCharCode(V));while(ot<d.length);return I};function Ku(d){if(typeof g==\"boolean\"&&g){var E;try{E=Buffer.from(d,\"base64\")}catch(V){E=new Buffer(d,\"base64\")}return new Uint8Array(E.buffer,E.byteOffset,E.byteLength)}try{for(var I=Uu(d),D=new Uint8Array(I.length),O=0;O<I.length;++O)D[O]=I.charCodeAt(O);return D}catch(V){throw new Error(\"Converting base64 string to bytes failed.\")}}function ba(d){if(!!Fu(d))return Ku(d.slice(jl.length))}var Qa={s:Lu,p:Tu,e:Sh,k:kh,o:xh,q:Ph,i:Dh,r:G,c:yt,h:DA,l:$i,m:$e,f:wa,j:Ou,g:SE,n:Rh,d:kE,a:gr,b:Yn,t:Fh},it=vh(),Po=r.___wasm_call_ctors=it.v,FA=r._zip_ext_count_symlinks=it.w,_l=r._zip_file_get_external_attributes=it.x,Zs=r._zipstruct_stat=it.y,Vl=r._zipstruct_statS=it.z,xE=r._zipstruct_stat_name=it.A,Nh=r._zipstruct_stat_index=it.B,Hu=r._zipstruct_stat_size=it.C,Lh=r._zipstruct_stat_mtime=it.D,PE=r._zipstruct_stat_crc=it.E,Xl=r._zipstruct_error=it.F,DE=r._zipstruct_errorS=it.G,ju=r._zipstruct_error_code_zip=it.H,NA=r._zipstruct_stat_comp_size=it.I,Lr=r._zipstruct_stat_comp_method=it.J,RE=r._zip_close=it.K,$s=r._zip_delete=it.L,eo=r._zip_dir_add=it.M,Gu=r._zip_discard=it.N,LA=r._zip_error_init_with_code=it.O,R=r._zip_get_error=it.P,q=r._zip_file_get_error=it.Q,de=r._zip_error_strerror=it.R,He=r._zip_fclose=it.S,Te=r._zip_file_add=it.T,Xe=r._free=it.U,Et=r._malloc=it.V,Rt=r.___errno_location=it.W,qn=r._zip_source_error=it.X,Jb=r._zip_source_seek=it.Y,xO=r._zip_file_set_external_attributes=it.Z,PO=r._zip_file_set_mtime=it._,Wb=r._zip_fopen=it.$,DO=r._zip_fopen_index=it.aa,RO=r._zip_fread=it.ba,zb=r._zip_get_name=it.ca,FO=r._zip_get_num_entries=it.da,NO=r._zip_source_read=it.ea,_b=r._zip_name_locate=it.fa,LO=r._zip_open=it.ga,TO=r._zip_open_from_source=it.ha,Vb=r._zip_set_file_compression=it.ia,OO=r._zip_source_buffer=it.ja,MO=r._zip_source_buffer_create=it.ka,UO=r._zip_source_close=it.la,KO=r._zip_source_free=it.ma,Xb=r._zip_source_keep=it.na,Zb=r._zip_source_open=it.oa,$b=r._zip_source_set_mtime=it.qa,eQ=r._zip_source_tell=it.ra,tQ=r._zip_stat=it.sa,HO=r._zip_stat_index=it.ta,Yu=r.__get_tzname=it.ua,rQ=r.__get_daylight=it.va,iQ=r.__get_timezone=it.wa,FE=r.stackSave=it.xa,NE=r.stackRestore=it.ya,B=r.stackAlloc=it.za;r.cwrap=Ee,r.getValue=Z;var Ke;ya=function d(){Ke||TA(),Ke||(ya=d)};function TA(d){if(d=d||a,gs>0||(vr(),gs>0))return;function E(){Ke||(Ke=!0,r.calledRun=!0,!ne&&(Hn(),i(r),r.onRuntimeInitialized&&r.onRuntimeInitialized(),us()))}r.setStatus?(r.setStatus(\"Running...\"),setTimeout(function(){setTimeout(function(){r.setStatus(\"\")},1),E()},1)):E()}if(r.run=TA,r.preInit)for(typeof r.preInit==\"function\"&&(r.preInit=[r.preInit]);r.preInit.length>0;)r.preInit.pop()();return TA(),e}}();typeof Jw==\"object\"&&typeof XP==\"object\"?XP.exports=ZP:typeof define==\"function\"&&define.amd?define([],function(){return ZP}):typeof Jw==\"object\"&&(Jw.createModule=ZP)});var E5=w((Pat,m5)=>{function GDe(t,e){for(var r=-1,i=t==null?0:t.length,n=Array(i);++r<i;)n[r]=e(t[r],r,t);return n}m5.exports=GDe});var Os=w((Dat,I5)=>{var YDe=Array.isArray;I5.exports=YDe});var v5=w((Rat,y5)=>{var w5=Kc(),qDe=E5(),JDe=Os(),WDe=Id(),zDe=1/0,B5=w5?w5.prototype:void 0,b5=B5?B5.toString:void 0;function Q5(t){if(typeof t==\"string\")return t;if(JDe(t))return qDe(t,Q5)+\"\";if(WDe(t))return b5?b5.call(t):\"\";var e=t+\"\";return e==\"0\"&&1/t==-zDe?\"-0\":e}y5.exports=Q5});var nf=w((Fat,S5)=>{var _De=v5();function VDe(t){return t==null?\"\":_De(t)}S5.exports=VDe});var sD=w((Nat,k5)=>{function XDe(t,e,r){var i=-1,n=t.length;e<0&&(e=-e>n?0:n+e),r=r>n?n:r,r<0&&(r+=n),n=e>r?0:r-e>>>0,e>>>=0;for(var s=Array(n);++i<n;)s[i]=t[i+e];return s}k5.exports=XDe});var P5=w((Lat,x5)=>{var ZDe=sD();function $De(t,e,r){var i=t.length;return r=r===void 0?i:r,!e&&r>=i?t:ZDe(t,e,r)}x5.exports=$De});var oD=w((Tat,D5)=>{var eRe=\"\\\\ud800-\\\\udfff\",tRe=\"\\\\u0300-\\\\u036f\",rRe=\"\\\\ufe20-\\\\ufe2f\",iRe=\"\\\\u20d0-\\\\u20ff\",nRe=tRe+rRe+iRe,sRe=\"\\\\ufe0e\\\\ufe0f\",oRe=\"\\\\u200d\",aRe=RegExp(\"[\"+oRe+eRe+nRe+sRe+\"]\");function ARe(t){return aRe.test(t)}D5.exports=ARe});var F5=w((Oat,R5)=>{function lRe(t){return t.split(\"\")}R5.exports=lRe});var H5=w((Mat,N5)=>{var L5=\"\\\\ud800-\\\\udfff\",cRe=\"\\\\u0300-\\\\u036f\",uRe=\"\\\\ufe20-\\\\ufe2f\",gRe=\"\\\\u20d0-\\\\u20ff\",fRe=cRe+uRe+gRe,hRe=\"\\\\ufe0e\\\\ufe0f\",pRe=\"[\"+L5+\"]\",aD=\"[\"+fRe+\"]\",AD=\"\\\\ud83c[\\\\udffb-\\\\udfff]\",dRe=\"(?:\"+aD+\"|\"+AD+\")\",T5=\"[^\"+L5+\"]\",O5=\"(?:\\\\ud83c[\\\\udde6-\\\\uddff]){2}\",M5=\"[\\\\ud800-\\\\udbff][\\\\udc00-\\\\udfff]\",CRe=\"\\\\u200d\",U5=dRe+\"?\",K5=\"[\"+hRe+\"]?\",mRe=\"(?:\"+CRe+\"(?:\"+[T5,O5,M5].join(\"|\")+\")\"+K5+U5+\")*\",ERe=K5+U5+mRe,IRe=\"(?:\"+[T5+aD+\"?\",aD,O5,M5,pRe].join(\"|\")+\")\",yRe=RegExp(AD+\"(?=\"+AD+\")|\"+IRe+ERe,\"g\");function wRe(t){return t.match(yRe)||[]}N5.exports=wRe});var G5=w((Uat,j5)=>{var BRe=F5(),bRe=oD(),QRe=H5();function vRe(t){return bRe(t)?QRe(t):BRe(t)}j5.exports=vRe});var q5=w((Kat,Y5)=>{var SRe=P5(),kRe=oD(),xRe=G5(),PRe=nf();function DRe(t){return function(e){e=PRe(e);var r=kRe(e)?xRe(e):void 0,i=r?r[0]:e.charAt(0),n=r?SRe(r,1).join(\"\"):e.slice(1);return i[t]()+n}}Y5.exports=DRe});var W5=w((Hat,J5)=>{var RRe=q5(),FRe=RRe(\"toUpperCase\");J5.exports=FRe});var tB=w((jat,z5)=>{var NRe=nf(),LRe=W5();function TRe(t){return LRe(NRe(t).toLowerCase())}z5.exports=TRe});var _5=w((Gat,rB)=>{function ORe(){var t=0,e=1,r=2,i=3,n=4,s=5,o=6,a=7,l=8,c=9,u=10,g=11,f=12,h=13,p=14,m=15,y=16,Q=17,S=0,x=1,M=2,Y=3,U=4;function J(A,ne){return 55296<=A.charCodeAt(ne)&&A.charCodeAt(ne)<=56319&&56320<=A.charCodeAt(ne+1)&&A.charCodeAt(ne+1)<=57343}function W(A,ne){ne===void 0&&(ne=0);var le=A.charCodeAt(ne);if(55296<=le&&le<=56319&&ne<A.length-1){var Ae=le,T=A.charCodeAt(ne+1);return 56320<=T&&T<=57343?(Ae-55296)*1024+(T-56320)+65536:Ae}if(56320<=le&&le<=57343&&ne>=1){var Ae=A.charCodeAt(ne-1),T=le;return 55296<=Ae&&Ae<=56319?(Ae-55296)*1024+(T-56320)+65536:T}return le}function ee(A,ne,le){var Ae=[A].concat(ne).concat([le]),T=Ae[Ae.length-2],L=le,Ee=Ae.lastIndexOf(p);if(Ee>1&&Ae.slice(1,Ee).every(function(re){return re==i})&&[i,h,Q].indexOf(A)==-1)return M;var we=Ae.lastIndexOf(n);if(we>0&&Ae.slice(1,we).every(function(re){return re==n})&&[f,n].indexOf(T)==-1)return Ae.filter(function(re){return re==n}).length%2==1?Y:U;if(T==t&&L==e)return S;if(T==r||T==t||T==e)return L==p&&ne.every(function(re){return re==i})?M:x;if(L==r||L==t||L==e)return x;if(T==o&&(L==o||L==a||L==c||L==u))return S;if((T==c||T==a)&&(L==a||L==l))return S;if((T==u||T==l)&&L==l)return S;if(L==i||L==m)return S;if(L==s)return S;if(T==f)return S;var qe=Ae.indexOf(i)!=-1?Ae.lastIndexOf(i)-1:Ae.length-2;return[h,Q].indexOf(Ae[qe])!=-1&&Ae.slice(qe+1,-1).every(function(re){return re==i})&&L==p||T==m&&[y,Q].indexOf(L)!=-1?S:ne.indexOf(n)!=-1?M:T==n&&L==n?S:x}this.nextBreak=function(A,ne){if(ne===void 0&&(ne=0),ne<0)return 0;if(ne>=A.length-1)return A.length;for(var le=Z(W(A,ne)),Ae=[],T=ne+1;T<A.length;T++)if(!J(A,T-1)){var L=Z(W(A,T));if(ee(le,Ae,L))return T;Ae.push(L)}return A.length},this.splitGraphemes=function(A){for(var ne=[],le=0,Ae;(Ae=this.nextBreak(A,le))<A.length;)ne.push(A.slice(le,Ae)),le=Ae;return le<A.length&&ne.push(A.slice(le)),ne},this.iterateGraphemes=function(A){var ne=0,le={next:function(){var Ae,T;return(T=this.nextBreak(A,ne))<A.length?(Ae=A.slice(ne,T),ne=T,{value:Ae,done:!1}):ne<A.length?(Ae=A.slice(ne),ne=A.length,{value:Ae,done:!1}):{value:void 0,done:!0}}.bind(this)};return typeof Symbol!=\"undefined\"&&Symbol.iterator&&(le[Symbol.iterator]=function(){return le}),le},this.countGraphemes=function(A){for(var ne=0,le=0,Ae;(Ae=this.nextBreak(A,le))<A.length;)le=Ae,ne++;return le<A.length&&ne++,ne};function Z(A){return 1536<=A&&A<=1541||A==1757||A==1807||A==2274||A==3406||A==69821||70082<=A&&A<=70083||A==72250||72326<=A&&A<=72329||A==73030?f:A==13?t:A==10?e:0<=A&&A<=9||11<=A&&A<=12||14<=A&&A<=31||127<=A&&A<=159||A==173||A==1564||A==6158||A==8203||8206<=A&&A<=8207||A==8232||A==8233||8234<=A&&A<=8238||8288<=A&&A<=8292||A==8293||8294<=A&&A<=8303||55296<=A&&A<=57343||A==65279||65520<=A&&A<=65528||65529<=A&&A<=65531||113824<=A&&A<=113827||119155<=A&&A<=119162||A==917504||A==917505||917506<=A&&A<=917535||917632<=A&&A<=917759||918e3<=A&&A<=921599?r:768<=A&&A<=879||1155<=A&&A<=1159||1160<=A&&A<=1161||1425<=A&&A<=1469||A==1471||1473<=A&&A<=1474||1476<=A&&A<=1477||A==1479||1552<=A&&A<=1562||1611<=A&&A<=1631||A==1648||1750<=A&&A<=1756||1759<=A&&A<=1764||1767<=A&&A<=1768||1770<=A&&A<=1773||A==1809||1840<=A&&A<=1866||1958<=A&&A<=1968||2027<=A&&A<=2035||2070<=A&&A<=2073||2075<=A&&A<=2083||2085<=A&&A<=2087||2089<=A&&A<=2093||2137<=A&&A<=2139||2260<=A&&A<=2273||2275<=A&&A<=2306||A==2362||A==2364||2369<=A&&A<=2376||A==2381||2385<=A&&A<=2391||2402<=A&&A<=2403||A==2433||A==2492||A==2494||2497<=A&&A<=2500||A==2509||A==2519||2530<=A&&A<=2531||2561<=A&&A<=2562||A==2620||2625<=A&&A<=2626||2631<=A&&A<=2632||2635<=A&&A<=2637||A==2641||2672<=A&&A<=2673||A==2677||2689<=A&&A<=2690||A==2748||2753<=A&&A<=2757||2759<=A&&A<=2760||A==2765||2786<=A&&A<=2787||2810<=A&&A<=2815||A==2817||A==2876||A==2878||A==2879||2881<=A&&A<=2884||A==2893||A==2902||A==2903||2914<=A&&A<=2915||A==2946||A==3006||A==3008||A==3021||A==3031||A==3072||3134<=A&&A<=3136||3142<=A&&A<=3144||3146<=A&&A<=3149||3157<=A&&A<=3158||3170<=A&&A<=3171||A==3201||A==3260||A==3263||A==3266||A==3270||3276<=A&&A<=3277||3285<=A&&A<=3286||3298<=A&&A<=3299||3328<=A&&A<=3329||3387<=A&&A<=3388||A==3390||3393<=A&&A<=3396||A==3405||A==3415||3426<=A&&A<=3427||A==3530||A==3535||3538<=A&&A<=3540||A==3542||A==3551||A==3633||3636<=A&&A<=3642||3655<=A&&A<=3662||A==3761||3764<=A&&A<=3769||3771<=A&&A<=3772||3784<=A&&A<=3789||3864<=A&&A<=3865||A==3893||A==3895||A==3897||3953<=A&&A<=3966||3968<=A&&A<=3972||3974<=A&&A<=3975||3981<=A&&A<=3991||3993<=A&&A<=4028||A==4038||4141<=A&&A<=4144||4146<=A&&A<=4151||4153<=A&&A<=4154||4157<=A&&A<=4158||4184<=A&&A<=4185||4190<=A&&A<=4192||4209<=A&&A<=4212||A==4226||4229<=A&&A<=4230||A==4237||A==4253||4957<=A&&A<=4959||5906<=A&&A<=5908||5938<=A&&A<=5940||5970<=A&&A<=5971||6002<=A&&A<=6003||6068<=A&&A<=6069||6071<=A&&A<=6077||A==6086||6089<=A&&A<=6099||A==6109||6155<=A&&A<=6157||6277<=A&&A<=6278||A==6313||6432<=A&&A<=6434||6439<=A&&A<=6440||A==6450||6457<=A&&A<=6459||6679<=A&&A<=6680||A==6683||A==6742||6744<=A&&A<=6750||A==6752||A==6754||6757<=A&&A<=6764||6771<=A&&A<=6780||A==6783||6832<=A&&A<=6845||A==6846||6912<=A&&A<=6915||A==6964||6966<=A&&A<=6970||A==6972||A==6978||7019<=A&&A<=7027||7040<=A&&A<=7041||7074<=A&&A<=7077||7080<=A&&A<=7081||7083<=A&&A<=7085||A==7142||7144<=A&&A<=7145||A==7149||7151<=A&&A<=7153||7212<=A&&A<=7219||7222<=A&&A<=7223||7376<=A&&A<=7378||7380<=A&&A<=7392||7394<=A&&A<=7400||A==7405||A==7412||7416<=A&&A<=7417||7616<=A&&A<=7673||7675<=A&&A<=7679||A==8204||8400<=A&&A<=8412||8413<=A&&A<=8416||A==8417||8418<=A&&A<=8420||8421<=A&&A<=8432||11503<=A&&A<=11505||A==11647||11744<=A&&A<=11775||12330<=A&&A<=12333||12334<=A&&A<=12335||12441<=A&&A<=12442||A==42607||42608<=A&&A<=42610||42612<=A&&A<=42621||42654<=A&&A<=42655||42736<=A&&A<=42737||A==43010||A==43014||A==43019||43045<=A&&A<=43046||43204<=A&&A<=43205||43232<=A&&A<=43249||43302<=A&&A<=43309||43335<=A&&A<=43345||43392<=A&&A<=43394||A==43443||43446<=A&&A<=43449||A==43452||A==43493||43561<=A&&A<=43566||43569<=A&&A<=43570||43573<=A&&A<=43574||A==43587||A==43596||A==43644||A==43696||43698<=A&&A<=43700||43703<=A&&A<=43704||43710<=A&&A<=43711||A==43713||43756<=A&&A<=43757||A==43766||A==44005||A==44008||A==44013||A==64286||65024<=A&&A<=65039||65056<=A&&A<=65071||65438<=A&&A<=65439||A==66045||A==66272||66422<=A&&A<=66426||68097<=A&&A<=68099||68101<=A&&A<=68102||68108<=A&&A<=68111||68152<=A&&A<=68154||A==68159||68325<=A&&A<=68326||A==69633||69688<=A&&A<=69702||69759<=A&&A<=69761||69811<=A&&A<=69814||69817<=A&&A<=69818||69888<=A&&A<=69890||69927<=A&&A<=69931||69933<=A&&A<=69940||A==70003||70016<=A&&A<=70017||70070<=A&&A<=70078||70090<=A&&A<=70092||70191<=A&&A<=70193||A==70196||70198<=A&&A<=70199||A==70206||A==70367||70371<=A&&A<=70378||70400<=A&&A<=70401||A==70460||A==70462||A==70464||A==70487||70502<=A&&A<=70508||70512<=A&&A<=70516||70712<=A&&A<=70719||70722<=A&&A<=70724||A==70726||A==70832||70835<=A&&A<=70840||A==70842||A==70845||70847<=A&&A<=70848||70850<=A&&A<=70851||A==71087||71090<=A&&A<=71093||71100<=A&&A<=71101||71103<=A&&A<=71104||71132<=A&&A<=71133||71219<=A&&A<=71226||A==71229||71231<=A&&A<=71232||A==71339||A==71341||71344<=A&&A<=71349||A==71351||71453<=A&&A<=71455||71458<=A&&A<=71461||71463<=A&&A<=71467||72193<=A&&A<=72198||72201<=A&&A<=72202||72243<=A&&A<=72248||72251<=A&&A<=72254||A==72263||72273<=A&&A<=72278||72281<=A&&A<=72283||72330<=A&&A<=72342||72344<=A&&A<=72345||72752<=A&&A<=72758||72760<=A&&A<=72765||A==72767||72850<=A&&A<=72871||72874<=A&&A<=72880||72882<=A&&A<=72883||72885<=A&&A<=72886||73009<=A&&A<=73014||A==73018||73020<=A&&A<=73021||73023<=A&&A<=73029||A==73031||92912<=A&&A<=92916||92976<=A&&A<=92982||94095<=A&&A<=94098||113821<=A&&A<=113822||A==119141||119143<=A&&A<=119145||119150<=A&&A<=119154||119163<=A&&A<=119170||119173<=A&&A<=119179||119210<=A&&A<=119213||119362<=A&&A<=119364||121344<=A&&A<=121398||121403<=A&&A<=121452||A==121461||A==121476||121499<=A&&A<=121503||121505<=A&&A<=121519||122880<=A&&A<=122886||122888<=A&&A<=122904||122907<=A&&A<=122913||122915<=A&&A<=122916||122918<=A&&A<=122922||125136<=A&&A<=125142||125252<=A&&A<=125258||917536<=A&&A<=917631||917760<=A&&A<=917999?i:127462<=A&&A<=127487?n:A==2307||A==2363||2366<=A&&A<=2368||2377<=A&&A<=2380||2382<=A&&A<=2383||2434<=A&&A<=2435||2495<=A&&A<=2496||2503<=A&&A<=2504||2507<=A&&A<=2508||A==2563||2622<=A&&A<=2624||A==2691||2750<=A&&A<=2752||A==2761||2763<=A&&A<=2764||2818<=A&&A<=2819||A==2880||2887<=A&&A<=2888||2891<=A&&A<=2892||A==3007||3009<=A&&A<=3010||3014<=A&&A<=3016||3018<=A&&A<=3020||3073<=A&&A<=3075||3137<=A&&A<=3140||3202<=A&&A<=3203||A==3262||3264<=A&&A<=3265||3267<=A&&A<=3268||3271<=A&&A<=3272||3274<=A&&A<=3275||3330<=A&&A<=3331||3391<=A&&A<=3392||3398<=A&&A<=3400||3402<=A&&A<=3404||3458<=A&&A<=3459||3536<=A&&A<=3537||3544<=A&&A<=3550||3570<=A&&A<=3571||A==3635||A==3763||3902<=A&&A<=3903||A==3967||A==4145||4155<=A&&A<=4156||4182<=A&&A<=4183||A==4228||A==6070||6078<=A&&A<=6085||6087<=A&&A<=6088||6435<=A&&A<=6438||6441<=A&&A<=6443||6448<=A&&A<=6449||6451<=A&&A<=6456||6681<=A&&A<=6682||A==6741||A==6743||6765<=A&&A<=6770||A==6916||A==6965||A==6971||6973<=A&&A<=6977||6979<=A&&A<=6980||A==7042||A==7073||7078<=A&&A<=7079||A==7082||A==7143||7146<=A&&A<=7148||A==7150||7154<=A&&A<=7155||7204<=A&&A<=7211||7220<=A&&A<=7221||A==7393||7410<=A&&A<=7411||A==7415||43043<=A&&A<=43044||A==43047||43136<=A&&A<=43137||43188<=A&&A<=43203||43346<=A&&A<=43347||A==43395||43444<=A&&A<=43445||43450<=A&&A<=43451||43453<=A&&A<=43456||43567<=A&&A<=43568||43571<=A&&A<=43572||A==43597||A==43755||43758<=A&&A<=43759||A==43765||44003<=A&&A<=44004||44006<=A&&A<=44007||44009<=A&&A<=44010||A==44012||A==69632||A==69634||A==69762||69808<=A&&A<=69810||69815<=A&&A<=69816||A==69932||A==70018||70067<=A&&A<=70069||70079<=A&&A<=70080||70188<=A&&A<=70190||70194<=A&&A<=70195||A==70197||70368<=A&&A<=70370||70402<=A&&A<=70403||A==70463||70465<=A&&A<=70468||70471<=A&&A<=70472||70475<=A&&A<=70477||70498<=A&&A<=70499||70709<=A&&A<=70711||70720<=A&&A<=70721||A==70725||70833<=A&&A<=70834||A==70841||70843<=A&&A<=70844||A==70846||A==70849||71088<=A&&A<=71089||71096<=A&&A<=71099||A==71102||71216<=A&&A<=71218||71227<=A&&A<=71228||A==71230||A==71340||71342<=A&&A<=71343||A==71350||71456<=A&&A<=71457||A==71462||72199<=A&&A<=72200||A==72249||72279<=A&&A<=72280||A==72343||A==72751||A==72766||A==72873||A==72881||A==72884||94033<=A&&A<=94078||A==119142||A==119149?s:4352<=A&&A<=4447||43360<=A&&A<=43388?o:4448<=A&&A<=4519||55216<=A&&A<=55238?a:4520<=A&&A<=4607||55243<=A&&A<=55291?l:A==44032||A==44060||A==44088||A==44116||A==44144||A==44172||A==44200||A==44228||A==44256||A==44284||A==44312||A==44340||A==44368||A==44396||A==44424||A==44452||A==44480||A==44508||A==44536||A==44564||A==44592||A==44620||A==44648||A==44676||A==44704||A==44732||A==44760||A==44788||A==44816||A==44844||A==44872||A==44900||A==44928||A==44956||A==44984||A==45012||A==45040||A==45068||A==45096||A==45124||A==45152||A==45180||A==45208||A==45236||A==45264||A==45292||A==45320||A==45348||A==45376||A==45404||A==45432||A==45460||A==45488||A==45516||A==45544||A==45572||A==45600||A==45628||A==45656||A==45684||A==45712||A==45740||A==45768||A==45796||A==45824||A==45852||A==45880||A==45908||A==45936||A==45964||A==45992||A==46020||A==46048||A==46076||A==46104||A==46132||A==46160||A==46188||A==46216||A==46244||A==46272||A==46300||A==46328||A==46356||A==46384||A==46412||A==46440||A==46468||A==46496||A==46524||A==46552||A==46580||A==46608||A==46636||A==46664||A==46692||A==46720||A==46748||A==46776||A==46804||A==46832||A==46860||A==46888||A==46916||A==46944||A==46972||A==47e3||A==47028||A==47056||A==47084||A==47112||A==47140||A==47168||A==47196||A==47224||A==47252||A==47280||A==47308||A==47336||A==47364||A==47392||A==47420||A==47448||A==47476||A==47504||A==47532||A==47560||A==47588||A==47616||A==47644||A==47672||A==47700||A==47728||A==47756||A==47784||A==47812||A==47840||A==47868||A==47896||A==47924||A==47952||A==47980||A==48008||A==48036||A==48064||A==48092||A==48120||A==48148||A==48176||A==48204||A==48232||A==48260||A==48288||A==48316||A==48344||A==48372||A==48400||A==48428||A==48456||A==48484||A==48512||A==48540||A==48568||A==48596||A==48624||A==48652||A==48680||A==48708||A==48736||A==48764||A==48792||A==48820||A==48848||A==48876||A==48904||A==48932||A==48960||A==48988||A==49016||A==49044||A==49072||A==49100||A==49128||A==49156||A==49184||A==49212||A==49240||A==49268||A==49296||A==49324||A==49352||A==49380||A==49408||A==49436||A==49464||A==49492||A==49520||A==49548||A==49576||A==49604||A==49632||A==49660||A==49688||A==49716||A==49744||A==49772||A==49800||A==49828||A==49856||A==49884||A==49912||A==49940||A==49968||A==49996||A==50024||A==50052||A==50080||A==50108||A==50136||A==50164||A==50192||A==50220||A==50248||A==50276||A==50304||A==50332||A==50360||A==50388||A==50416||A==50444||A==50472||A==50500||A==50528||A==50556||A==50584||A==50612||A==50640||A==50668||A==50696||A==50724||A==50752||A==50780||A==50808||A==50836||A==50864||A==50892||A==50920||A==50948||A==50976||A==51004||A==51032||A==51060||A==51088||A==51116||A==51144||A==51172||A==51200||A==51228||A==51256||A==51284||A==51312||A==51340||A==51368||A==51396||A==51424||A==51452||A==51480||A==51508||A==51536||A==51564||A==51592||A==51620||A==51648||A==51676||A==51704||A==51732||A==51760||A==51788||A==51816||A==51844||A==51872||A==51900||A==51928||A==51956||A==51984||A==52012||A==52040||A==52068||A==52096||A==52124||A==52152||A==52180||A==52208||A==52236||A==52264||A==52292||A==52320||A==52348||A==52376||A==52404||A==52432||A==52460||A==52488||A==52516||A==52544||A==52572||A==52600||A==52628||A==52656||A==52684||A==52712||A==52740||A==52768||A==52796||A==52824||A==52852||A==52880||A==52908||A==52936||A==52964||A==52992||A==53020||A==53048||A==53076||A==53104||A==53132||A==53160||A==53188||A==53216||A==53244||A==53272||A==53300||A==53328||A==53356||A==53384||A==53412||A==53440||A==53468||A==53496||A==53524||A==53552||A==53580||A==53608||A==53636||A==53664||A==53692||A==53720||A==53748||A==53776||A==53804||A==53832||A==53860||A==53888||A==53916||A==53944||A==53972||A==54e3||A==54028||A==54056||A==54084||A==54112||A==54140||A==54168||A==54196||A==54224||A==54252||A==54280||A==54308||A==54336||A==54364||A==54392||A==54420||A==54448||A==54476||A==54504||A==54532||A==54560||A==54588||A==54616||A==54644||A==54672||A==54700||A==54728||A==54756||A==54784||A==54812||A==54840||A==54868||A==54896||A==54924||A==54952||A==54980||A==55008||A==55036||A==55064||A==55092||A==55120||A==55148||A==55176?c:44033<=A&&A<=44059||44061<=A&&A<=44087||44089<=A&&A<=44115||44117<=A&&A<=44143||44145<=A&&A<=44171||44173<=A&&A<=44199||44201<=A&&A<=44227||44229<=A&&A<=44255||44257<=A&&A<=44283||44285<=A&&A<=44311||44313<=A&&A<=44339||44341<=A&&A<=44367||44369<=A&&A<=44395||44397<=A&&A<=44423||44425<=A&&A<=44451||44453<=A&&A<=44479||44481<=A&&A<=44507||44509<=A&&A<=44535||44537<=A&&A<=44563||44565<=A&&A<=44591||44593<=A&&A<=44619||44621<=A&&A<=44647||44649<=A&&A<=44675||44677<=A&&A<=44703||44705<=A&&A<=44731||44733<=A&&A<=44759||44761<=A&&A<=44787||44789<=A&&A<=44815||44817<=A&&A<=44843||44845<=A&&A<=44871||44873<=A&&A<=44899||44901<=A&&A<=44927||44929<=A&&A<=44955||44957<=A&&A<=44983||44985<=A&&A<=45011||45013<=A&&A<=45039||45041<=A&&A<=45067||45069<=A&&A<=45095||45097<=A&&A<=45123||45125<=A&&A<=45151||45153<=A&&A<=45179||45181<=A&&A<=45207||45209<=A&&A<=45235||45237<=A&&A<=45263||45265<=A&&A<=45291||45293<=A&&A<=45319||45321<=A&&A<=45347||45349<=A&&A<=45375||45377<=A&&A<=45403||45405<=A&&A<=45431||45433<=A&&A<=45459||45461<=A&&A<=45487||45489<=A&&A<=45515||45517<=A&&A<=45543||45545<=A&&A<=45571||45573<=A&&A<=45599||45601<=A&&A<=45627||45629<=A&&A<=45655||45657<=A&&A<=45683||45685<=A&&A<=45711||45713<=A&&A<=45739||45741<=A&&A<=45767||45769<=A&&A<=45795||45797<=A&&A<=45823||45825<=A&&A<=45851||45853<=A&&A<=45879||45881<=A&&A<=45907||45909<=A&&A<=45935||45937<=A&&A<=45963||45965<=A&&A<=45991||45993<=A&&A<=46019||46021<=A&&A<=46047||46049<=A&&A<=46075||46077<=A&&A<=46103||46105<=A&&A<=46131||46133<=A&&A<=46159||46161<=A&&A<=46187||46189<=A&&A<=46215||46217<=A&&A<=46243||46245<=A&&A<=46271||46273<=A&&A<=46299||46301<=A&&A<=46327||46329<=A&&A<=46355||46357<=A&&A<=46383||46385<=A&&A<=46411||46413<=A&&A<=46439||46441<=A&&A<=46467||46469<=A&&A<=46495||46497<=A&&A<=46523||46525<=A&&A<=46551||46553<=A&&A<=46579||46581<=A&&A<=46607||46609<=A&&A<=46635||46637<=A&&A<=46663||46665<=A&&A<=46691||46693<=A&&A<=46719||46721<=A&&A<=46747||46749<=A&&A<=46775||46777<=A&&A<=46803||46805<=A&&A<=46831||46833<=A&&A<=46859||46861<=A&&A<=46887||46889<=A&&A<=46915||46917<=A&&A<=46943||46945<=A&&A<=46971||46973<=A&&A<=46999||47001<=A&&A<=47027||47029<=A&&A<=47055||47057<=A&&A<=47083||47085<=A&&A<=47111||47113<=A&&A<=47139||47141<=A&&A<=47167||47169<=A&&A<=47195||47197<=A&&A<=47223||47225<=A&&A<=47251||47253<=A&&A<=47279||47281<=A&&A<=47307||47309<=A&&A<=47335||47337<=A&&A<=47363||47365<=A&&A<=47391||47393<=A&&A<=47419||47421<=A&&A<=47447||47449<=A&&A<=47475||47477<=A&&A<=47503||47505<=A&&A<=47531||47533<=A&&A<=47559||47561<=A&&A<=47587||47589<=A&&A<=47615||47617<=A&&A<=47643||47645<=A&&A<=47671||47673<=A&&A<=47699||47701<=A&&A<=47727||47729<=A&&A<=47755||47757<=A&&A<=47783||47785<=A&&A<=47811||47813<=A&&A<=47839||47841<=A&&A<=47867||47869<=A&&A<=47895||47897<=A&&A<=47923||47925<=A&&A<=47951||47953<=A&&A<=47979||47981<=A&&A<=48007||48009<=A&&A<=48035||48037<=A&&A<=48063||48065<=A&&A<=48091||48093<=A&&A<=48119||48121<=A&&A<=48147||48149<=A&&A<=48175||48177<=A&&A<=48203||48205<=A&&A<=48231||48233<=A&&A<=48259||48261<=A&&A<=48287||48289<=A&&A<=48315||48317<=A&&A<=48343||48345<=A&&A<=48371||48373<=A&&A<=48399||48401<=A&&A<=48427||48429<=A&&A<=48455||48457<=A&&A<=48483||48485<=A&&A<=48511||48513<=A&&A<=48539||48541<=A&&A<=48567||48569<=A&&A<=48595||48597<=A&&A<=48623||48625<=A&&A<=48651||48653<=A&&A<=48679||48681<=A&&A<=48707||48709<=A&&A<=48735||48737<=A&&A<=48763||48765<=A&&A<=48791||48793<=A&&A<=48819||48821<=A&&A<=48847||48849<=A&&A<=48875||48877<=A&&A<=48903||48905<=A&&A<=48931||48933<=A&&A<=48959||48961<=A&&A<=48987||48989<=A&&A<=49015||49017<=A&&A<=49043||49045<=A&&A<=49071||49073<=A&&A<=49099||49101<=A&&A<=49127||49129<=A&&A<=49155||49157<=A&&A<=49183||49185<=A&&A<=49211||49213<=A&&A<=49239||49241<=A&&A<=49267||49269<=A&&A<=49295||49297<=A&&A<=49323||49325<=A&&A<=49351||49353<=A&&A<=49379||49381<=A&&A<=49407||49409<=A&&A<=49435||49437<=A&&A<=49463||49465<=A&&A<=49491||49493<=A&&A<=49519||49521<=A&&A<=49547||49549<=A&&A<=49575||49577<=A&&A<=49603||49605<=A&&A<=49631||49633<=A&&A<=49659||49661<=A&&A<=49687||49689<=A&&A<=49715||49717<=A&&A<=49743||49745<=A&&A<=49771||49773<=A&&A<=49799||49801<=A&&A<=49827||49829<=A&&A<=49855||49857<=A&&A<=49883||49885<=A&&A<=49911||49913<=A&&A<=49939||49941<=A&&A<=49967||49969<=A&&A<=49995||49997<=A&&A<=50023||50025<=A&&A<=50051||50053<=A&&A<=50079||50081<=A&&A<=50107||50109<=A&&A<=50135||50137<=A&&A<=50163||50165<=A&&A<=50191||50193<=A&&A<=50219||50221<=A&&A<=50247||50249<=A&&A<=50275||50277<=A&&A<=50303||50305<=A&&A<=50331||50333<=A&&A<=50359||50361<=A&&A<=50387||50389<=A&&A<=50415||50417<=A&&A<=50443||50445<=A&&A<=50471||50473<=A&&A<=50499||50501<=A&&A<=50527||50529<=A&&A<=50555||50557<=A&&A<=50583||50585<=A&&A<=50611||50613<=A&&A<=50639||50641<=A&&A<=50667||50669<=A&&A<=50695||50697<=A&&A<=50723||50725<=A&&A<=50751||50753<=A&&A<=50779||50781<=A&&A<=50807||50809<=A&&A<=50835||50837<=A&&A<=50863||50865<=A&&A<=50891||50893<=A&&A<=50919||50921<=A&&A<=50947||50949<=A&&A<=50975||50977<=A&&A<=51003||51005<=A&&A<=51031||51033<=A&&A<=51059||51061<=A&&A<=51087||51089<=A&&A<=51115||51117<=A&&A<=51143||51145<=A&&A<=51171||51173<=A&&A<=51199||51201<=A&&A<=51227||51229<=A&&A<=51255||51257<=A&&A<=51283||51285<=A&&A<=51311||51313<=A&&A<=51339||51341<=A&&A<=51367||51369<=A&&A<=51395||51397<=A&&A<=51423||51425<=A&&A<=51451||51453<=A&&A<=51479||51481<=A&&A<=51507||51509<=A&&A<=51535||51537<=A&&A<=51563||51565<=A&&A<=51591||51593<=A&&A<=51619||51621<=A&&A<=51647||51649<=A&&A<=51675||51677<=A&&A<=51703||51705<=A&&A<=51731||51733<=A&&A<=51759||51761<=A&&A<=51787||51789<=A&&A<=51815||51817<=A&&A<=51843||51845<=A&&A<=51871||51873<=A&&A<=51899||51901<=A&&A<=51927||51929<=A&&A<=51955||51957<=A&&A<=51983||51985<=A&&A<=52011||52013<=A&&A<=52039||52041<=A&&A<=52067||52069<=A&&A<=52095||52097<=A&&A<=52123||52125<=A&&A<=52151||52153<=A&&A<=52179||52181<=A&&A<=52207||52209<=A&&A<=52235||52237<=A&&A<=52263||52265<=A&&A<=52291||52293<=A&&A<=52319||52321<=A&&A<=52347||52349<=A&&A<=52375||52377<=A&&A<=52403||52405<=A&&A<=52431||52433<=A&&A<=52459||52461<=A&&A<=52487||52489<=A&&A<=52515||52517<=A&&A<=52543||52545<=A&&A<=52571||52573<=A&&A<=52599||52601<=A&&A<=52627||52629<=A&&A<=52655||52657<=A&&A<=52683||52685<=A&&A<=52711||52713<=A&&A<=52739||52741<=A&&A<=52767||52769<=A&&A<=52795||52797<=A&&A<=52823||52825<=A&&A<=52851||52853<=A&&A<=52879||52881<=A&&A<=52907||52909<=A&&A<=52935||52937<=A&&A<=52963||52965<=A&&A<=52991||52993<=A&&A<=53019||53021<=A&&A<=53047||53049<=A&&A<=53075||53077<=A&&A<=53103||53105<=A&&A<=53131||53133<=A&&A<=53159||53161<=A&&A<=53187||53189<=A&&A<=53215||53217<=A&&A<=53243||53245<=A&&A<=53271||53273<=A&&A<=53299||53301<=A&&A<=53327||53329<=A&&A<=53355||53357<=A&&A<=53383||53385<=A&&A<=53411||53413<=A&&A<=53439||53441<=A&&A<=53467||53469<=A&&A<=53495||53497<=A&&A<=53523||53525<=A&&A<=53551||53553<=A&&A<=53579||53581<=A&&A<=53607||53609<=A&&A<=53635||53637<=A&&A<=53663||53665<=A&&A<=53691||53693<=A&&A<=53719||53721<=A&&A<=53747||53749<=A&&A<=53775||53777<=A&&A<=53803||53805<=A&&A<=53831||53833<=A&&A<=53859||53861<=A&&A<=53887||53889<=A&&A<=53915||53917<=A&&A<=53943||53945<=A&&A<=53971||53973<=A&&A<=53999||54001<=A&&A<=54027||54029<=A&&A<=54055||54057<=A&&A<=54083||54085<=A&&A<=54111||54113<=A&&A<=54139||54141<=A&&A<=54167||54169<=A&&A<=54195||54197<=A&&A<=54223||54225<=A&&A<=54251||54253<=A&&A<=54279||54281<=A&&A<=54307||54309<=A&&A<=54335||54337<=A&&A<=54363||54365<=A&&A<=54391||54393<=A&&A<=54419||54421<=A&&A<=54447||54449<=A&&A<=54475||54477<=A&&A<=54503||54505<=A&&A<=54531||54533<=A&&A<=54559||54561<=A&&A<=54587||54589<=A&&A<=54615||54617<=A&&A<=54643||54645<=A&&A<=54671||54673<=A&&A<=54699||54701<=A&&A<=54727||54729<=A&&A<=54755||54757<=A&&A<=54783||54785<=A&&A<=54811||54813<=A&&A<=54839||54841<=A&&A<=54867||54869<=A&&A<=54895||54897<=A&&A<=54923||54925<=A&&A<=54951||54953<=A&&A<=54979||54981<=A&&A<=55007||55009<=A&&A<=55035||55037<=A&&A<=55063||55065<=A&&A<=55091||55093<=A&&A<=55119||55121<=A&&A<=55147||55149<=A&&A<=55175||55177<=A&&A<=55203?u:A==9757||A==9977||9994<=A&&A<=9997||A==127877||127938<=A&&A<=127940||A==127943||127946<=A&&A<=127948||128066<=A&&A<=128067||128070<=A&&A<=128080||A==128110||128112<=A&&A<=128120||A==128124||128129<=A&&A<=128131||128133<=A&&A<=128135||A==128170||128372<=A&&A<=128373||A==128378||A==128400||128405<=A&&A<=128406||128581<=A&&A<=128583||128587<=A&&A<=128591||A==128675||128692<=A&&A<=128694||A==128704||A==128716||129304<=A&&A<=129308||129310<=A&&A<=129311||A==129318||129328<=A&&A<=129337||129341<=A&&A<=129342||129489<=A&&A<=129501?h:127995<=A&&A<=127999?p:A==8205?m:A==9792||A==9794||9877<=A&&A<=9878||A==9992||A==10084||A==127752||A==127806||A==127859||A==127891||A==127908||A==127912||A==127979||A==127981||A==128139||128187<=A&&A<=128188||A==128295||A==128300||A==128488||A==128640||A==128658?y:128102<=A&&A<=128105?Q:g}return this}typeof rB!=\"undefined\"&&rB.exports&&(rB.exports=ORe)});var X5=w((Yat,V5)=>{var MRe=/^(.*?)(\\x1b\\[[^m]+m|\\x1b\\]8;;.*?(\\x1b\\\\|\\u0007))/,iB;function URe(){if(iB)return iB;if(typeof Intl.Segmenter!=\"undefined\"){let t=new Intl.Segmenter(\"en\",{granularity:\"grapheme\"});return iB=e=>Array.from(t.segment(e),({segment:r})=>r)}else{let t=_5(),e=new t;return iB=r=>e.splitGraphemes(r)}}V5.exports=(t,e=0,r=t.length)=>{if(e<0||r<0)throw new RangeError(\"Negative indices aren't supported by this implementation\");let i=r-e,n=\"\",s=0,o=0;for(;t.length>0;){let a=t.match(MRe)||[t,t,void 0],l=URe()(a[1]),c=Math.min(e-s,l.length);l=l.slice(c);let u=Math.min(i-o,l.length);n+=l.slice(0,u).join(\"\"),s+=c,o+=u,typeof a[2]!=\"undefined\"&&(n+=a[2]),t=t.slice(a[0].length)}return n}});var sf=w((EAt,u6)=>{\"use strict\";var g6=new Map([[\"C\",\"cwd\"],[\"f\",\"file\"],[\"z\",\"gzip\"],[\"P\",\"preservePaths\"],[\"U\",\"unlink\"],[\"strip-components\",\"strip\"],[\"stripComponents\",\"strip\"],[\"keep-newer\",\"newer\"],[\"keepNewer\",\"newer\"],[\"keep-newer-files\",\"newer\"],[\"keepNewerFiles\",\"newer\"],[\"k\",\"keep\"],[\"keep-existing\",\"keep\"],[\"keepExisting\",\"keep\"],[\"m\",\"noMtime\"],[\"no-mtime\",\"noMtime\"],[\"p\",\"preserveOwner\"],[\"L\",\"follow\"],[\"h\",\"follow\"]]),mAt=u6.exports=t=>t?Object.keys(t).map(e=>[g6.has(e)?g6.get(e):e,t[e]]).reduce((e,r)=>(e[r[0]]=r[1],e),Object.create(null)):{}});var of=w((IAt,f6)=>{\"use strict\";var ZRe=require(\"events\"),h6=require(\"stream\"),qd=Bp(),p6=require(\"string_decoder\").StringDecoder,sA=Symbol(\"EOF\"),Jd=Symbol(\"maybeEmitEnd\"),hl=Symbol(\"emittedEnd\"),lB=Symbol(\"emittingEnd\"),cB=Symbol(\"closed\"),d6=Symbol(\"read\"),gD=Symbol(\"flush\"),C6=Symbol(\"flushChunk\"),Nn=Symbol(\"encoding\"),oA=Symbol(\"decoder\"),uB=Symbol(\"flowing\"),Wd=Symbol(\"paused\"),zd=Symbol(\"resume\"),pn=Symbol(\"bufferLength\"),m6=Symbol(\"bufferPush\"),fD=Symbol(\"bufferShift\"),_i=Symbol(\"objectMode\"),Vi=Symbol(\"destroyed\"),E6=global._MP_NO_ITERATOR_SYMBOLS_!==\"1\",$Re=E6&&Symbol.asyncIterator||Symbol(\"asyncIterator not implemented\"),eFe=E6&&Symbol.iterator||Symbol(\"iterator not implemented\"),I6=t=>t===\"end\"||t===\"finish\"||t===\"prefinish\",tFe=t=>t instanceof ArrayBuffer||typeof t==\"object\"&&t.constructor&&t.constructor.name===\"ArrayBuffer\"&&t.byteLength>=0,rFe=t=>!Buffer.isBuffer(t)&&ArrayBuffer.isView(t);f6.exports=class y6 extends h6{constructor(e){super();this[uB]=!1,this[Wd]=!1,this.pipes=new qd,this.buffer=new qd,this[_i]=e&&e.objectMode||!1,this[_i]?this[Nn]=null:this[Nn]=e&&e.encoding||null,this[Nn]===\"buffer\"&&(this[Nn]=null),this[oA]=this[Nn]?new p6(this[Nn]):null,this[sA]=!1,this[hl]=!1,this[lB]=!1,this[cB]=!1,this.writable=!0,this.readable=!0,this[pn]=0,this[Vi]=!1}get bufferLength(){return this[pn]}get encoding(){return this[Nn]}set encoding(e){if(this[_i])throw new Error(\"cannot set encoding in objectMode\");if(this[Nn]&&e!==this[Nn]&&(this[oA]&&this[oA].lastNeed||this[pn]))throw new Error(\"cannot change encoding\");this[Nn]!==e&&(this[oA]=e?new p6(e):null,this.buffer.length&&(this.buffer=this.buffer.map(r=>this[oA].write(r)))),this[Nn]=e}setEncoding(e){this.encoding=e}get objectMode(){return this[_i]}set objectMode(e){this[_i]=this[_i]||!!e}write(e,r,i){if(this[sA])throw new Error(\"write after end\");return this[Vi]?(this.emit(\"error\",Object.assign(new Error(\"Cannot call write after a stream was destroyed\"),{code:\"ERR_STREAM_DESTROYED\"})),!0):(typeof r==\"function\"&&(i=r,r=\"utf8\"),r||(r=\"utf8\"),!this[_i]&&!Buffer.isBuffer(e)&&(rFe(e)?e=Buffer.from(e.buffer,e.byteOffset,e.byteLength):tFe(e)?e=Buffer.from(e):typeof e!=\"string\"&&(this.objectMode=!0)),!this.objectMode&&!e.length?(this[pn]!==0&&this.emit(\"readable\"),i&&i(),this.flowing):(typeof e==\"string\"&&!this[_i]&&!(r===this[Nn]&&!this[oA].lastNeed)&&(e=Buffer.from(e,r)),Buffer.isBuffer(e)&&this[Nn]&&(e=this[oA].write(e)),this.flowing?(this[pn]!==0&&this[gD](!0),this.emit(\"data\",e)):this[m6](e),this[pn]!==0&&this.emit(\"readable\"),i&&i(),this.flowing))}read(e){if(this[Vi])return null;try{return this[pn]===0||e===0||e>this[pn]?null:(this[_i]&&(e=null),this.buffer.length>1&&!this[_i]&&(this.encoding?this.buffer=new qd([Array.from(this.buffer).join(\"\")]):this.buffer=new qd([Buffer.concat(Array.from(this.buffer),this[pn])])),this[d6](e||null,this.buffer.head.value))}finally{this[Jd]()}}[d6](e,r){return e===r.length||e===null?this[fD]():(this.buffer.head.value=r.slice(e),r=r.slice(0,e),this[pn]-=e),this.emit(\"data\",r),!this.buffer.length&&!this[sA]&&this.emit(\"drain\"),r}end(e,r,i){return typeof e==\"function\"&&(i=e,e=null),typeof r==\"function\"&&(i=r,r=\"utf8\"),e&&this.write(e,r),i&&this.once(\"end\",i),this[sA]=!0,this.writable=!1,(this.flowing||!this[Wd])&&this[Jd](),this}[zd](){this[Vi]||(this[Wd]=!1,this[uB]=!0,this.emit(\"resume\"),this.buffer.length?this[gD]():this[sA]?this[Jd]():this.emit(\"drain\"))}resume(){return this[zd]()}pause(){this[uB]=!1,this[Wd]=!0}get destroyed(){return this[Vi]}get flowing(){return this[uB]}get paused(){return this[Wd]}[m6](e){return this[_i]?this[pn]+=1:this[pn]+=e.length,this.buffer.push(e)}[fD](){return this.buffer.length&&(this[_i]?this[pn]-=1:this[pn]-=this.buffer.head.value.length),this.buffer.shift()}[gD](e){do;while(this[C6](this[fD]()));!e&&!this.buffer.length&&!this[sA]&&this.emit(\"drain\")}[C6](e){return e?(this.emit(\"data\",e),this.flowing):!1}pipe(e,r){if(this[Vi])return;let i=this[hl];r=r||{},e===process.stdout||e===process.stderr?r.end=!1:r.end=r.end!==!1;let n={dest:e,opts:r,ondrain:s=>this[zd]()};return this.pipes.push(n),e.on(\"drain\",n.ondrain),this[zd](),i&&n.opts.end&&n.dest.end(),e}addListener(e,r){return this.on(e,r)}on(e,r){try{return super.on(e,r)}finally{e===\"data\"&&!this.pipes.length&&!this.flowing?this[zd]():I6(e)&&this[hl]&&(super.emit(e),this.removeAllListeners(e))}}get emittedEnd(){return this[hl]}[Jd](){!this[lB]&&!this[hl]&&!this[Vi]&&this.buffer.length===0&&this[sA]&&(this[lB]=!0,this.emit(\"end\"),this.emit(\"prefinish\"),this.emit(\"finish\"),this[cB]&&this.emit(\"close\"),this[lB]=!1)}emit(e,r){if(e!==\"error\"&&e!==\"close\"&&e!==Vi&&this[Vi])return;if(e===\"data\"){if(!r)return;this.pipes.length&&this.pipes.forEach(n=>n.dest.write(r)===!1&&this.pause())}else if(e===\"end\"){if(this[hl]===!0)return;this[hl]=!0,this.readable=!1,this[oA]&&(r=this[oA].end(),r&&(this.pipes.forEach(n=>n.dest.write(r)),super.emit(\"data\",r))),this.pipes.forEach(n=>{n.dest.removeListener(\"drain\",n.ondrain),n.opts.end&&n.dest.end()})}else if(e===\"close\"&&(this[cB]=!0,!this[hl]&&!this[Vi]))return;let i=new Array(arguments.length);if(i[0]=e,i[1]=r,arguments.length>2)for(let n=2;n<arguments.length;n++)i[n]=arguments[n];try{return super.emit.apply(this,i)}finally{I6(e)?this.removeAllListeners(e):this[Jd]()}}collect(){let e=[];this[_i]||(e.dataLength=0);let r=this.promise();return this.on(\"data\",i=>{e.push(i),this[_i]||(e.dataLength+=i.length)}),r.then(()=>e)}concat(){return this[_i]?Promise.reject(new Error(\"cannot concat in objectMode\")):this.collect().then(e=>this[_i]?Promise.reject(new Error(\"cannot concat in objectMode\")):this[Nn]?e.join(\"\"):Buffer.concat(e,e.dataLength))}promise(){return new Promise((e,r)=>{this.on(Vi,()=>r(new Error(\"stream destroyed\"))),this.on(\"end\",()=>e()),this.on(\"error\",i=>r(i))})}[$Re](){return{next:()=>{let r=this.read();if(r!==null)return Promise.resolve({done:!1,value:r});if(this[sA])return Promise.resolve({done:!0});let i=null,n=null,s=c=>{this.removeListener(\"data\",o),this.removeListener(\"end\",a),n(c)},o=c=>{this.removeListener(\"error\",s),this.removeListener(\"end\",a),this.pause(),i({value:c,done:!!this[sA]})},a=()=>{this.removeListener(\"error\",s),this.removeListener(\"data\",o),i({done:!0})},l=()=>s(new Error(\"stream destroyed\"));return new Promise((c,u)=>{n=u,i=c,this.once(Vi,l),this.once(\"error\",s),this.once(\"end\",a),this.once(\"data\",o)})}}}[eFe](){return{next:()=>{let r=this.read();return{value:r,done:r===null}}}}destroy(e){return this[Vi]?(e?this.emit(\"error\",e):this.emit(Vi),this):(this[Vi]=!0,this.buffer=new qd,this[pn]=0,typeof this.close==\"function\"&&!this[cB]&&this.close(),e?this.emit(\"error\",e):this.emit(Vi),this)}static isStream(e){return!!e&&(e instanceof y6||e instanceof h6||e instanceof ZRe&&(typeof e.pipe==\"function\"||typeof e.write==\"function\"&&typeof e.end==\"function\"))}}});var B6=w((yAt,w6)=>{var iFe=require(\"zlib\").constants||{ZLIB_VERNUM:4736};w6.exports=Object.freeze(Object.assign(Object.create(null),{Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_MEM_ERROR:-4,Z_BUF_ERROR:-5,Z_VERSION_ERROR:-6,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,DEFLATE:1,INFLATE:2,GZIP:3,GUNZIP:4,DEFLATERAW:5,INFLATERAW:6,UNZIP:7,BROTLI_DECODE:8,BROTLI_ENCODE:9,Z_MIN_WINDOWBITS:8,Z_MAX_WINDOWBITS:15,Z_DEFAULT_WINDOWBITS:15,Z_MIN_CHUNK:64,Z_MAX_CHUNK:Infinity,Z_DEFAULT_CHUNK:16384,Z_MIN_MEMLEVEL:1,Z_MAX_MEMLEVEL:9,Z_DEFAULT_MEMLEVEL:8,Z_MIN_LEVEL:-1,Z_MAX_LEVEL:9,Z_DEFAULT_LEVEL:-1,BROTLI_OPERATION_PROCESS:0,BROTLI_OPERATION_FLUSH:1,BROTLI_OPERATION_FINISH:2,BROTLI_OPERATION_EMIT_METADATA:3,BROTLI_MODE_GENERIC:0,BROTLI_MODE_TEXT:1,BROTLI_MODE_FONT:2,BROTLI_DEFAULT_MODE:0,BROTLI_MIN_QUALITY:0,BROTLI_MAX_QUALITY:11,BROTLI_DEFAULT_QUALITY:11,BROTLI_MIN_WINDOW_BITS:10,BROTLI_MAX_WINDOW_BITS:24,BROTLI_LARGE_MAX_WINDOW_BITS:30,BROTLI_DEFAULT_WINDOW:22,BROTLI_MIN_INPUT_BLOCK_BITS:16,BROTLI_MAX_INPUT_BLOCK_BITS:24,BROTLI_PARAM_MODE:0,BROTLI_PARAM_QUALITY:1,BROTLI_PARAM_LGWIN:2,BROTLI_PARAM_LGBLOCK:3,BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING:4,BROTLI_PARAM_SIZE_HINT:5,BROTLI_PARAM_LARGE_WINDOW:6,BROTLI_PARAM_NPOSTFIX:7,BROTLI_PARAM_NDIRECT:8,BROTLI_DECODER_RESULT_ERROR:0,BROTLI_DECODER_RESULT_SUCCESS:1,BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT:2,BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT:3,BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION:0,BROTLI_DECODER_PARAM_LARGE_WINDOW:1,BROTLI_DECODER_NO_ERROR:0,BROTLI_DECODER_SUCCESS:1,BROTLI_DECODER_NEEDS_MORE_INPUT:2,BROTLI_DECODER_NEEDS_MORE_OUTPUT:3,BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_NIBBLE:-1,BROTLI_DECODER_ERROR_FORMAT_RESERVED:-2,BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_META_NIBBLE:-3,BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_ALPHABET:-4,BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_SAME:-5,BROTLI_DECODER_ERROR_FORMAT_CL_SPACE:-6,BROTLI_DECODER_ERROR_FORMAT_HUFFMAN_SPACE:-7,BROTLI_DECODER_ERROR_FORMAT_CONTEXT_MAP_REPEAT:-8,BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_1:-9,BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_2:-10,BROTLI_DECODER_ERROR_FORMAT_TRANSFORM:-11,BROTLI_DECODER_ERROR_FORMAT_DICTIONARY:-12,BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS:-13,BROTLI_DECODER_ERROR_FORMAT_PADDING_1:-14,BROTLI_DECODER_ERROR_FORMAT_PADDING_2:-15,BROTLI_DECODER_ERROR_FORMAT_DISTANCE:-16,BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET:-19,BROTLI_DECODER_ERROR_INVALID_ARGUMENTS:-20,BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MODES:-21,BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS:-22,BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MAP:-25,BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_1:-26,BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_2:-27,BROTLI_DECODER_ERROR_ALLOC_BLOCK_TYPE_TREES:-30,BROTLI_DECODER_ERROR_UNREACHABLE:-31},iFe))});var wD=w(ss=>{\"use strict\";var hD=require(\"assert\"),pl=require(\"buffer\").Buffer,b6=require(\"zlib\"),Wc=ss.constants=B6(),nFe=of(),Q6=pl.concat,zc=Symbol(\"_superWrite\"),_d=class extends Error{constructor(e){super(\"zlib: \"+e.message);this.code=e.code,this.errno=e.errno,this.code||(this.code=\"ZLIB_ERROR\"),this.message=\"zlib: \"+e.message,Error.captureStackTrace(this,this.constructor)}get name(){return\"ZlibError\"}},sFe=Symbol(\"opts\"),Vd=Symbol(\"flushFlag\"),v6=Symbol(\"finishFlushFlag\"),pD=Symbol(\"fullFlushFlag\"),pr=Symbol(\"handle\"),gB=Symbol(\"onError\"),af=Symbol(\"sawError\"),dD=Symbol(\"level\"),CD=Symbol(\"strategy\"),mD=Symbol(\"ended\"),wAt=Symbol(\"_defaultFullFlush\"),ED=class extends nFe{constructor(e,r){if(!e||typeof e!=\"object\")throw new TypeError(\"invalid options for ZlibBase constructor\");super(e);this[af]=!1,this[mD]=!1,this[sFe]=e,this[Vd]=e.flush,this[v6]=e.finishFlush;try{this[pr]=new b6[r](e)}catch(i){throw new _d(i)}this[gB]=i=>{this[af]||(this[af]=!0,this.close(),this.emit(\"error\",i))},this[pr].on(\"error\",i=>this[gB](new _d(i))),this.once(\"end\",()=>this.close)}close(){this[pr]&&(this[pr].close(),this[pr]=null,this.emit(\"close\"))}reset(){if(!this[af])return hD(this[pr],\"zlib binding closed\"),this[pr].reset()}flush(e){this.ended||(typeof e!=\"number\"&&(e=this[pD]),this.write(Object.assign(pl.alloc(0),{[Vd]:e})))}end(e,r,i){return e&&this.write(e,r),this.flush(this[v6]),this[mD]=!0,super.end(null,null,i)}get ended(){return this[mD]}write(e,r,i){if(typeof r==\"function\"&&(i=r,r=\"utf8\"),typeof e==\"string\"&&(e=pl.from(e,r)),this[af])return;hD(this[pr],\"zlib binding closed\");let n=this[pr]._handle,s=n.close;n.close=()=>{};let o=this[pr].close;this[pr].close=()=>{},pl.concat=c=>c;let a;try{let c=typeof e[Vd]==\"number\"?e[Vd]:this[Vd];a=this[pr]._processChunk(e,c),pl.concat=Q6}catch(c){pl.concat=Q6,this[gB](new _d(c))}finally{this[pr]&&(this[pr]._handle=n,n.close=s,this[pr].close=o,this[pr].removeAllListeners(\"error\"))}this[pr]&&this[pr].on(\"error\",c=>this[gB](new _d(c)));let l;if(a)if(Array.isArray(a)&&a.length>0){l=this[zc](pl.from(a[0]));for(let c=1;c<a.length;c++)l=this[zc](a[c])}else l=this[zc](pl.from(a));return i&&i(),l}[zc](e){return super.write(e)}},dl=class extends ED{constructor(e,r){e=e||{},e.flush=e.flush||Wc.Z_NO_FLUSH,e.finishFlush=e.finishFlush||Wc.Z_FINISH,super(e,r),this[pD]=Wc.Z_FULL_FLUSH,this[dD]=e.level,this[CD]=e.strategy}params(e,r){if(!this[af]){if(!this[pr])throw new Error(\"cannot switch params when binding is closed\");if(!this[pr].params)throw new Error(\"not supported in this implementation\");if(this[dD]!==e||this[CD]!==r){this.flush(Wc.Z_SYNC_FLUSH),hD(this[pr],\"zlib binding closed\");let i=this[pr].flush;this[pr].flush=(n,s)=>{this.flush(n),s()};try{this[pr].params(e,r)}finally{this[pr].flush=i}this[pr]&&(this[dD]=e,this[CD]=r)}}}},S6=class extends dl{constructor(e){super(e,\"Deflate\")}},k6=class extends dl{constructor(e){super(e,\"Inflate\")}},ID=Symbol(\"_portable\"),x6=class extends dl{constructor(e){super(e,\"Gzip\");this[ID]=e&&!!e.portable}[zc](e){return this[ID]?(this[ID]=!1,e[9]=255,super[zc](e)):super[zc](e)}},P6=class extends dl{constructor(e){super(e,\"Gunzip\")}},D6=class extends dl{constructor(e){super(e,\"DeflateRaw\")}},R6=class extends dl{constructor(e){super(e,\"InflateRaw\")}},F6=class extends dl{constructor(e){super(e,\"Unzip\")}},yD=class extends ED{constructor(e,r){e=e||{},e.flush=e.flush||Wc.BROTLI_OPERATION_PROCESS,e.finishFlush=e.finishFlush||Wc.BROTLI_OPERATION_FINISH,super(e,r),this[pD]=Wc.BROTLI_OPERATION_FLUSH}},N6=class extends yD{constructor(e){super(e,\"BrotliCompress\")}},L6=class extends yD{constructor(e){super(e,\"BrotliDecompress\")}};ss.Deflate=S6;ss.Inflate=k6;ss.Gzip=x6;ss.Gunzip=P6;ss.DeflateRaw=D6;ss.InflateRaw=R6;ss.Unzip=F6;typeof b6.BrotliCompress==\"function\"?(ss.BrotliCompress=N6,ss.BrotliDecompress=L6):ss.BrotliCompress=ss.BrotliDecompress=class{constructor(){throw new Error(\"Brotli is not supported in this version of Node.js\")}}});var Xd=w(fB=>{\"use strict\";fB.name=new Map([[\"0\",\"File\"],[\"\",\"OldFile\"],[\"1\",\"Link\"],[\"2\",\"SymbolicLink\"],[\"3\",\"CharacterDevice\"],[\"4\",\"BlockDevice\"],[\"5\",\"Directory\"],[\"6\",\"FIFO\"],[\"7\",\"ContiguousFile\"],[\"g\",\"GlobalExtendedHeader\"],[\"x\",\"ExtendedHeader\"],[\"A\",\"SolarisACL\"],[\"D\",\"GNUDumpDir\"],[\"I\",\"Inode\"],[\"K\",\"NextFileHasLongLinkpath\"],[\"L\",\"NextFileHasLongPath\"],[\"M\",\"ContinuationFile\"],[\"N\",\"OldGnuLongPath\"],[\"S\",\"SparseFile\"],[\"V\",\"TapeVolumeHeader\"],[\"X\",\"OldExtendedHeader\"]]);fB.code=new Map(Array.from(fB.name).map(t=>[t[1],t[0]]))});var Zd=w((SAt,T6)=>{\"use strict\";var QAt=Xd(),oFe=of(),BD=Symbol(\"slurp\");T6.exports=class extends oFe{constructor(e,r,i){super();switch(this.pause(),this.extended=r,this.globalExtended=i,this.header=e,this.startBlockSize=512*Math.ceil(e.size/512),this.blockRemain=this.startBlockSize,this.remain=e.size,this.type=e.type,this.meta=!1,this.ignore=!1,this.type){case\"File\":case\"OldFile\":case\"Link\":case\"SymbolicLink\":case\"CharacterDevice\":case\"BlockDevice\":case\"Directory\":case\"FIFO\":case\"ContiguousFile\":case\"GNUDumpDir\":break;case\"NextFileHasLongLinkpath\":case\"NextFileHasLongPath\":case\"OldGnuLongPath\":case\"GlobalExtendedHeader\":case\"ExtendedHeader\":case\"OldExtendedHeader\":this.meta=!0;break;default:this.ignore=!0}this.path=e.path,this.mode=e.mode,this.mode&&(this.mode=this.mode&4095),this.uid=e.uid,this.gid=e.gid,this.uname=e.uname,this.gname=e.gname,this.size=e.size,this.mtime=e.mtime,this.atime=e.atime,this.ctime=e.ctime,this.linkpath=e.linkpath,this.uname=e.uname,this.gname=e.gname,r&&this[BD](r),i&&this[BD](i,!0)}write(e){let r=e.length;if(r>this.blockRemain)throw new Error(\"writing more to entry than is appropriate\");let i=this.remain,n=this.blockRemain;return this.remain=Math.max(0,i-r),this.blockRemain=Math.max(0,n-r),this.ignore?!0:i>=r?super.write(e):super.write(e.slice(0,i))}[BD](e,r){for(let i in e)e[i]!==null&&e[i]!==void 0&&!(r&&i===\"path\")&&(this[i]=e[i])}}});var U6=w(bD=>{\"use strict\";var kAt=bD.encode=(t,e)=>{if(Number.isSafeInteger(t))t<0?AFe(t,e):aFe(t,e);else throw Error(\"cannot encode number outside of javascript safe integer range\");return e},aFe=(t,e)=>{e[0]=128;for(var r=e.length;r>1;r--)e[r-1]=t&255,t=Math.floor(t/256)},AFe=(t,e)=>{e[0]=255;var r=!1;t=t*-1;for(var i=e.length;i>1;i--){var n=t&255;t=Math.floor(t/256),r?e[i-1]=O6(n):n===0?e[i-1]=0:(r=!0,e[i-1]=M6(n))}},xAt=bD.parse=t=>{var e=t[t.length-1],r=t[0],i;if(r===128)i=cFe(t.slice(1,t.length));else if(r===255)i=lFe(t);else throw Error(\"invalid base256 encoding\");if(!Number.isSafeInteger(i))throw Error(\"parsed number outside of javascript safe integer range\");return i},lFe=t=>{for(var e=t.length,r=0,i=!1,n=e-1;n>-1;n--){var s=t[n],o;i?o=O6(s):s===0?o=s:(i=!0,o=M6(s)),o!==0&&(r-=o*Math.pow(256,e-n-1))}return r},cFe=t=>{for(var e=t.length,r=0,i=e-1;i>-1;i--){var n=t[i];n!==0&&(r+=n*Math.pow(256,e-i-1))}return r},O6=t=>(255^t)&255,M6=t=>(255^t)+1&255});var lf=w((DAt,K6)=>{\"use strict\";var QD=Xd(),Af=require(\"path\").posix,H6=U6(),vD=Symbol(\"slurp\"),os=Symbol(\"type\"),j6=class{constructor(e,r,i,n){this.cksumValid=!1,this.needPax=!1,this.nullBlock=!1,this.block=null,this.path=null,this.mode=null,this.uid=null,this.gid=null,this.size=null,this.mtime=null,this.cksum=null,this[os]=\"0\",this.linkpath=null,this.uname=null,this.gname=null,this.devmaj=0,this.devmin=0,this.atime=null,this.ctime=null,Buffer.isBuffer(e)?this.decode(e,r||0,i,n):e&&this.set(e)}decode(e,r,i,n){if(r||(r=0),!e||!(e.length>=r+512))throw new Error(\"need 512 bytes for header\");if(this.path=_c(e,r,100),this.mode=Cl(e,r+100,8),this.uid=Cl(e,r+108,8),this.gid=Cl(e,r+116,8),this.size=Cl(e,r+124,12),this.mtime=SD(e,r+136,12),this.cksum=Cl(e,r+148,12),this[vD](i),this[vD](n,!0),this[os]=_c(e,r+156,1),this[os]===\"\"&&(this[os]=\"0\"),this[os]===\"0\"&&this.path.substr(-1)===\"/\"&&(this[os]=\"5\"),this[os]===\"5\"&&(this.size=0),this.linkpath=_c(e,r+157,100),e.slice(r+257,r+265).toString()===\"ustar\\x0000\")if(this.uname=_c(e,r+265,32),this.gname=_c(e,r+297,32),this.devmaj=Cl(e,r+329,8),this.devmin=Cl(e,r+337,8),e[r+475]!==0){let o=_c(e,r+345,155);this.path=o+\"/\"+this.path}else{let o=_c(e,r+345,130);o&&(this.path=o+\"/\"+this.path),this.atime=SD(e,r+476,12),this.ctime=SD(e,r+488,12)}let s=8*32;for(let o=r;o<r+148;o++)s+=e[o];for(let o=r+156;o<r+512;o++)s+=e[o];this.cksumValid=s===this.cksum,this.cksum===null&&s===8*32&&(this.nullBlock=!0)}[vD](e,r){for(let i in e)e[i]!==null&&e[i]!==void 0&&!(r&&i===\"path\")&&(this[i]=e[i])}encode(e,r){if(e||(e=this.block=Buffer.alloc(512),r=0),r||(r=0),!(e.length>=r+512))throw new Error(\"need 512 bytes for header\");let i=this.ctime||this.atime?130:155,n=uFe(this.path||\"\",i),s=n[0],o=n[1];this.needPax=n[2],this.needPax=Vc(e,r,100,s)||this.needPax,this.needPax=ml(e,r+100,8,this.mode)||this.needPax,this.needPax=ml(e,r+108,8,this.uid)||this.needPax,this.needPax=ml(e,r+116,8,this.gid)||this.needPax,this.needPax=ml(e,r+124,12,this.size)||this.needPax,this.needPax=kD(e,r+136,12,this.mtime)||this.needPax,e[r+156]=this[os].charCodeAt(0),this.needPax=Vc(e,r+157,100,this.linkpath)||this.needPax,e.write(\"ustar\\x0000\",r+257,8),this.needPax=Vc(e,r+265,32,this.uname)||this.needPax,this.needPax=Vc(e,r+297,32,this.gname)||this.needPax,this.needPax=ml(e,r+329,8,this.devmaj)||this.needPax,this.needPax=ml(e,r+337,8,this.devmin)||this.needPax,this.needPax=Vc(e,r+345,i,o)||this.needPax,e[r+475]!==0?this.needPax=Vc(e,r+345,155,o)||this.needPax:(this.needPax=Vc(e,r+345,130,o)||this.needPax,this.needPax=kD(e,r+476,12,this.atime)||this.needPax,this.needPax=kD(e,r+488,12,this.ctime)||this.needPax);let a=8*32;for(let l=r;l<r+148;l++)a+=e[l];for(let l=r+156;l<r+512;l++)a+=e[l];return this.cksum=a,ml(e,r+148,8,this.cksum),this.cksumValid=!0,this.needPax}set(e){for(let r in e)e[r]!==null&&e[r]!==void 0&&(this[r]=e[r])}get type(){return QD.name.get(this[os])||this[os]}get typeKey(){return this[os]}set type(e){QD.code.has(e)?this[os]=QD.code.get(e):this[os]=e}},uFe=(t,e)=>{let r=100,i=t,n=\"\",s,o=Af.parse(t).root||\".\";if(Buffer.byteLength(i)<r)s=[i,n,!1];else{n=Af.dirname(i),i=Af.basename(i);do Buffer.byteLength(i)<=r&&Buffer.byteLength(n)<=e?s=[i,n,!1]:Buffer.byteLength(i)>r&&Buffer.byteLength(n)<=e?s=[i.substr(0,r-1),n,!0]:(i=Af.join(Af.basename(n),i),n=Af.dirname(n));while(n!==o&&!s);s||(s=[t.substr(0,r-1),\"\",!0])}return s},_c=(t,e,r)=>t.slice(e,e+r).toString(\"utf8\").replace(/\\0.*/,\"\"),SD=(t,e,r)=>gFe(Cl(t,e,r)),gFe=t=>t===null?null:new Date(t*1e3),Cl=(t,e,r)=>t[e]&128?H6.parse(t.slice(e,e+r)):fFe(t,e,r),hFe=t=>isNaN(t)?null:t,fFe=(t,e,r)=>hFe(parseInt(t.slice(e,e+r).toString(\"utf8\").replace(/\\0.*$/,\"\").trim(),8)),pFe={12:8589934591,8:2097151},ml=(t,e,r,i)=>i===null?!1:i>pFe[r]||i<0?(H6.encode(i,t.slice(e,e+r)),!0):(dFe(t,e,r,i),!1),dFe=(t,e,r,i)=>t.write(CFe(i,r),e,r,\"ascii\"),CFe=(t,e)=>mFe(Math.floor(t).toString(8),e),mFe=(t,e)=>(t.length===e-1?t:new Array(e-t.length-1).join(\"0\")+t+\" \")+\"\\0\",kD=(t,e,r,i)=>i===null?!1:ml(t,e,r,i.getTime()/1e3),EFe=new Array(156).join(\"\\0\"),Vc=(t,e,r,i)=>i===null?!1:(t.write(i+EFe,e,r,\"utf8\"),i.length!==Buffer.byteLength(i)||i.length>r);K6.exports=j6});var pB=w((RAt,G6)=>{\"use strict\";var IFe=lf(),yFe=require(\"path\"),hB=class{constructor(e,r){this.atime=e.atime||null,this.charset=e.charset||null,this.comment=e.comment||null,this.ctime=e.ctime||null,this.gid=e.gid||null,this.gname=e.gname||null,this.linkpath=e.linkpath||null,this.mtime=e.mtime||null,this.path=e.path||null,this.size=e.size||null,this.uid=e.uid||null,this.uname=e.uname||null,this.dev=e.dev||null,this.ino=e.ino||null,this.nlink=e.nlink||null,this.global=r||!1}encode(){let e=this.encodeBody();if(e===\"\")return null;let r=Buffer.byteLength(e),i=512*Math.ceil(1+r/512),n=Buffer.allocUnsafe(i);for(let s=0;s<512;s++)n[s]=0;new IFe({path:(\"PaxHeader/\"+yFe.basename(this.path)).slice(0,99),mode:this.mode||420,uid:this.uid||null,gid:this.gid||null,size:r,mtime:this.mtime||null,type:this.global?\"GlobalExtendedHeader\":\"ExtendedHeader\",linkpath:\"\",uname:this.uname||\"\",gname:this.gname||\"\",devmaj:0,devmin:0,atime:this.atime||null,ctime:this.ctime||null}).encode(n),n.write(e,512,r,\"utf8\");for(let s=r+512;s<n.length;s++)n[s]=0;return n}encodeBody(){return this.encodeField(\"path\")+this.encodeField(\"ctime\")+this.encodeField(\"atime\")+this.encodeField(\"dev\")+this.encodeField(\"ino\")+this.encodeField(\"nlink\")+this.encodeField(\"charset\")+this.encodeField(\"comment\")+this.encodeField(\"gid\")+this.encodeField(\"gname\")+this.encodeField(\"linkpath\")+this.encodeField(\"mtime\")+this.encodeField(\"size\")+this.encodeField(\"uid\")+this.encodeField(\"uname\")}encodeField(e){if(this[e]===null||this[e]===void 0)return\"\";let r=this[e]instanceof Date?this[e].getTime()/1e3:this[e],i=\" \"+(e===\"dev\"||e===\"ino\"||e===\"nlink\"?\"SCHILY.\":\"\")+e+\"=\"+r+`\n`,n=Buffer.byteLength(i),s=Math.floor(Math.log(n)/Math.log(10))+1;return n+s>=Math.pow(10,s)&&(s+=1),s+n+i}};hB.parse=(t,e,r)=>new hB(wFe(BFe(t),e),r);var wFe=(t,e)=>e?Object.keys(t).reduce((r,i)=>(r[i]=t[i],r),e):t,BFe=t=>t.replace(/\\n$/,\"\").split(`\n`).reduce(bFe,Object.create(null)),bFe=(t,e)=>{let r=parseInt(e,10);if(r!==Buffer.byteLength(e)+1)return t;e=e.substr((r+\" \").length);let i=e.split(\"=\"),n=i.shift().replace(/^SCHILY\\.(dev|ino|nlink)/,\"$1\");if(!n)return t;let s=i.join(\"=\");return t[n]=/^([A-Z]+\\.)?([mac]|birth|creation)time$/.test(n)?new Date(s*1e3):/^[0-9]+$/.test(s)?+s:s,t};G6.exports=hB});var dB=w((FAt,Y6)=>{\"use strict\";Y6.exports=t=>class extends t{warn(e,r,i={}){this.file&&(i.file=this.file),this.cwd&&(i.cwd=this.cwd),i.code=r instanceof Error&&r.code||e,i.tarCode=e,!this.strict&&i.recoverable!==!1?(r instanceof Error&&(i=Object.assign(r,i),r=r.message),this.emit(\"warn\",i.tarCode,r,i)):r instanceof Error?this.emit(\"error\",Object.assign(r,i)):this.emit(\"error\",Object.assign(new Error(`${e}: ${r}`),i))}}});var PD=w((NAt,q6)=>{\"use strict\";var CB=[\"|\",\"<\",\">\",\"?\",\":\"],xD=CB.map(t=>String.fromCharCode(61440+t.charCodeAt(0))),QFe=new Map(CB.map((t,e)=>[t,xD[e]])),vFe=new Map(xD.map((t,e)=>[t,CB[e]]));q6.exports={encode:t=>CB.reduce((e,r)=>e.split(r).join(QFe.get(r)),t),decode:t=>xD.reduce((e,r)=>e.split(r).join(vFe.get(r)),t)}});var W6=w((LAt,J6)=>{\"use strict\";J6.exports=(t,e,r)=>(t&=4095,r&&(t=(t|384)&~18),e&&(t&256&&(t|=64),t&32&&(t|=8),t&4&&(t|=1)),t)});var OD=w((KAt,z6)=>{\"use strict\";var _6=of(),V6=pB(),X6=lf(),TAt=Zd(),ra=require(\"fs\"),cf=require(\"path\"),OAt=Xd(),SFe=16*1024*1024,Z6=Symbol(\"process\"),$6=Symbol(\"file\"),eV=Symbol(\"directory\"),DD=Symbol(\"symlink\"),tV=Symbol(\"hardlink\"),$d=Symbol(\"header\"),mB=Symbol(\"read\"),RD=Symbol(\"lstat\"),EB=Symbol(\"onlstat\"),FD=Symbol(\"onread\"),ND=Symbol(\"onreadlink\"),LD=Symbol(\"openfile\"),TD=Symbol(\"onopenfile\"),Xc=Symbol(\"close\"),IB=Symbol(\"mode\"),rV=dB(),kFe=PD(),iV=W6(),yB=rV(class extends _6{constructor(e,r){if(r=r||{},super(r),typeof e!=\"string\")throw new TypeError(\"path is required\");this.path=e,this.portable=!!r.portable,this.myuid=process.getuid&&process.getuid(),this.myuser=process.env.USER||\"\",this.maxReadSize=r.maxReadSize||SFe,this.linkCache=r.linkCache||new Map,this.statCache=r.statCache||new Map,this.preservePaths=!!r.preservePaths,this.cwd=r.cwd||process.cwd(),this.strict=!!r.strict,this.noPax=!!r.noPax,this.noMtime=!!r.noMtime,this.mtime=r.mtime||null,typeof r.onwarn==\"function\"&&this.on(\"warn\",r.onwarn);let i=!1;if(!this.preservePaths&&cf.win32.isAbsolute(e)){let n=cf.win32.parse(e);this.path=e.substr(n.root.length),i=n.root}this.win32=!!r.win32||process.platform===\"win32\",this.win32&&(this.path=kFe.decode(this.path.replace(/\\\\/g,\"/\")),e=e.replace(/\\\\/g,\"/\")),this.absolute=r.absolute||cf.resolve(this.cwd,e),this.path===\"\"&&(this.path=\"./\"),i&&this.warn(\"TAR_ENTRY_INFO\",`stripping ${i} from absolute path`,{entry:this,path:i+this.path}),this.statCache.has(this.absolute)?this[EB](this.statCache.get(this.absolute)):this[RD]()}[RD](){ra.lstat(this.absolute,(e,r)=>{if(e)return this.emit(\"error\",e);this[EB](r)})}[EB](e){this.statCache.set(this.absolute,e),this.stat=e,e.isFile()||(e.size=0),this.type=xFe(e),this.emit(\"stat\",e),this[Z6]()}[Z6](){switch(this.type){case\"File\":return this[$6]();case\"Directory\":return this[eV]();case\"SymbolicLink\":return this[DD]();default:return this.end()}}[IB](e){return iV(e,this.type===\"Directory\",this.portable)}[$d](){this.type===\"Directory\"&&this.portable&&(this.noMtime=!0),this.header=new X6({path:this.path,linkpath:this.linkpath,mode:this[IB](this.stat.mode),uid:this.portable?null:this.stat.uid,gid:this.portable?null:this.stat.gid,size:this.stat.size,mtime:this.noMtime?null:this.mtime||this.stat.mtime,type:this.type,uname:this.portable?null:this.stat.uid===this.myuid?this.myuser:\"\",atime:this.portable?null:this.stat.atime,ctime:this.portable?null:this.stat.ctime}),this.header.encode()&&!this.noPax&&this.write(new V6({atime:this.portable?null:this.header.atime,ctime:this.portable?null:this.header.ctime,gid:this.portable?null:this.header.gid,mtime:this.noMtime?null:this.mtime||this.header.mtime,path:this.path,linkpath:this.linkpath,size:this.header.size,uid:this.portable?null:this.header.uid,uname:this.portable?null:this.header.uname,dev:this.portable?null:this.stat.dev,ino:this.portable?null:this.stat.ino,nlink:this.portable?null:this.stat.nlink}).encode()),this.write(this.header.block)}[eV](){this.path.substr(-1)!==\"/\"&&(this.path+=\"/\"),this.stat.size=0,this[$d](),this.end()}[DD](){ra.readlink(this.absolute,(e,r)=>{if(e)return this.emit(\"error\",e);this[ND](r)})}[ND](e){this.linkpath=e.replace(/\\\\/g,\"/\"),this[$d](),this.end()}[tV](e){this.type=\"Link\",this.linkpath=cf.relative(this.cwd,e).replace(/\\\\/g,\"/\"),this.stat.size=0,this[$d](),this.end()}[$6](){if(this.stat.nlink>1){let e=this.stat.dev+\":\"+this.stat.ino;if(this.linkCache.has(e)){let r=this.linkCache.get(e);if(r.indexOf(this.cwd)===0)return this[tV](r)}this.linkCache.set(e,this.absolute)}if(this[$d](),this.stat.size===0)return this.end();this[LD]()}[LD](){ra.open(this.absolute,\"r\",(e,r)=>{if(e)return this.emit(\"error\",e);this[TD](r)})}[TD](e){let r=512*Math.ceil(this.stat.size/512),i=Math.min(r,this.maxReadSize),n=Buffer.allocUnsafe(i);this[mB](e,n,0,n.length,0,this.stat.size,r)}[mB](e,r,i,n,s,o,a){ra.read(e,r,i,n,s,(l,c)=>{if(l)return this[Xc](e,()=>this.emit(\"error\",l));this[FD](e,r,i,n,s,o,a,c)})}[Xc](e,r){ra.close(e,r)}[FD](e,r,i,n,s,o,a,l){if(l<=0&&o>0){let u=new Error(\"encountered unexpected EOF\");return u.path=this.absolute,u.syscall=\"read\",u.code=\"EOF\",this[Xc](e,()=>this.emit(\"error\",u))}if(l>o){let u=new Error(\"did not encounter expected EOF\");return u.path=this.absolute,u.syscall=\"read\",u.code=\"EOF\",this[Xc](e,()=>this.emit(\"error\",u))}if(l===o)for(let u=l;u<n&&l<a;u++)r[u+i]=0,l++,o++;let c=i===0&&l===r.length?r:r.slice(i,i+l);if(o-=l,a-=l,s+=l,i+=l,this.write(c),!o)return a&&this.write(Buffer.alloc(a)),this[Xc](e,u=>u?this.emit(\"error\",u):this.end());i>=n&&(r=Buffer.allocUnsafe(n),i=0),n=r.length-i,this[mB](e,r,i,n,s,o,a)}}),nV=class extends yB{constructor(e,r){super(e,r)}[RD](){this[EB](ra.lstatSync(this.absolute))}[DD](){this[ND](ra.readlinkSync(this.absolute))}[LD](){this[TD](ra.openSync(this.absolute,\"r\"))}[mB](e,r,i,n,s,o,a){let l=!0;try{let c=ra.readSync(e,r,i,n,s);this[FD](e,r,i,n,s,o,a,c),l=!1}finally{if(l)try{this[Xc](e,()=>{})}catch(c){}}}[Xc](e,r){ra.closeSync(e),r()}},PFe=rV(class extends _6{constructor(e,r){r=r||{},super(r),this.preservePaths=!!r.preservePaths,this.portable=!!r.portable,this.strict=!!r.strict,this.noPax=!!r.noPax,this.noMtime=!!r.noMtime,this.readEntry=e,this.type=e.type,this.type===\"Directory\"&&this.portable&&(this.noMtime=!0),this.path=e.path,this.mode=this[IB](e.mode),this.uid=this.portable?null:e.uid,this.gid=this.portable?null:e.gid,this.uname=this.portable?null:e.uname,this.gname=this.portable?null:e.gname,this.size=e.size,this.mtime=this.noMtime?null:r.mtime||e.mtime,this.atime=this.portable?null:e.atime,this.ctime=this.portable?null:e.ctime,this.linkpath=e.linkpath,typeof r.onwarn==\"function\"&&this.on(\"warn\",r.onwarn);let i=!1;if(cf.isAbsolute(this.path)&&!this.preservePaths){let n=cf.parse(this.path);i=n.root,this.path=this.path.substr(n.root.length)}this.remain=e.size,this.blockRemain=e.startBlockSize,this.header=new X6({path:this.path,linkpath:this.linkpath,mode:this.mode,uid:this.portable?null:this.uid,gid:this.portable?null:this.gid,size:this.size,mtime:this.noMtime?null:this.mtime,type:this.type,uname:this.portable?null:this.uname,atime:this.portable?null:this.atime,ctime:this.portable?null:this.ctime}),i&&this.warn(\"TAR_ENTRY_INFO\",`stripping ${i} from absolute path`,{entry:this,path:i+this.path}),this.header.encode()&&!this.noPax&&super.write(new V6({atime:this.portable?null:this.atime,ctime:this.portable?null:this.ctime,gid:this.portable?null:this.gid,mtime:this.noMtime?null:this.mtime,path:this.path,linkpath:this.linkpath,size:this.size,uid:this.portable?null:this.uid,uname:this.portable?null:this.uname,dev:this.portable?null:this.readEntry.dev,ino:this.portable?null:this.readEntry.ino,nlink:this.portable?null:this.readEntry.nlink}).encode()),super.write(this.header.block),e.pipe(this)}[IB](e){return iV(e,this.type===\"Directory\",this.portable)}write(e){let r=e.length;if(r>this.blockRemain)throw new Error(\"writing more to entry than is appropriate\");return this.blockRemain-=r,super.write(e)}end(){return this.blockRemain&&this.write(Buffer.alloc(this.blockRemain)),super.end()}});yB.Sync=nV;yB.Tar=PFe;var xFe=t=>t.isFile()?\"File\":t.isDirectory()?\"Directory\":t.isSymbolicLink()?\"SymbolicLink\":\"Unsupported\";z6.exports=yB});var xB=w((jAt,sV)=>{\"use strict\";var MD=class{constructor(e,r){this.path=e||\"./\",this.absolute=r,this.entry=null,this.stat=null,this.readdir=null,this.pending=!1,this.ignore=!1,this.piped=!1}},DFe=of(),RFe=wD(),FFe=Zd(),UD=OD(),NFe=UD.Sync,LFe=UD.Tar,TFe=Bp(),oV=Buffer.alloc(1024),wB=Symbol(\"onStat\"),BB=Symbol(\"ended\"),ia=Symbol(\"queue\"),uf=Symbol(\"current\"),Zc=Symbol(\"process\"),bB=Symbol(\"processing\"),aV=Symbol(\"processJob\"),na=Symbol(\"jobs\"),KD=Symbol(\"jobDone\"),QB=Symbol(\"addFSEntry\"),AV=Symbol(\"addTarEntry\"),HD=Symbol(\"stat\"),jD=Symbol(\"readdir\"),vB=Symbol(\"onreaddir\"),SB=Symbol(\"pipe\"),lV=Symbol(\"entry\"),GD=Symbol(\"entryOpt\"),YD=Symbol(\"writeEntryClass\"),cV=Symbol(\"write\"),qD=Symbol(\"ondrain\"),kB=require(\"fs\"),uV=require(\"path\"),OFe=dB(),JD=OFe(class extends DFe{constructor(e){super(e);e=e||Object.create(null),this.opt=e,this.file=e.file||\"\",this.cwd=e.cwd||process.cwd(),this.maxReadSize=e.maxReadSize,this.preservePaths=!!e.preservePaths,this.strict=!!e.strict,this.noPax=!!e.noPax,this.prefix=(e.prefix||\"\").replace(/(\\\\|\\/)+$/,\"\"),this.linkCache=e.linkCache||new Map,this.statCache=e.statCache||new Map,this.readdirCache=e.readdirCache||new Map,this[YD]=UD,typeof e.onwarn==\"function\"&&this.on(\"warn\",e.onwarn),this.portable=!!e.portable,this.zip=null,e.gzip?(typeof e.gzip!=\"object\"&&(e.gzip={}),this.portable&&(e.gzip.portable=!0),this.zip=new RFe.Gzip(e.gzip),this.zip.on(\"data\",r=>super.write(r)),this.zip.on(\"end\",r=>super.end()),this.zip.on(\"drain\",r=>this[qD]()),this.on(\"resume\",r=>this.zip.resume())):this.on(\"drain\",this[qD]),this.noDirRecurse=!!e.noDirRecurse,this.follow=!!e.follow,this.noMtime=!!e.noMtime,this.mtime=e.mtime||null,this.filter=typeof e.filter==\"function\"?e.filter:r=>!0,this[ia]=new TFe,this[na]=0,this.jobs=+e.jobs||4,this[bB]=!1,this[BB]=!1}[cV](e){return super.write(e)}add(e){return this.write(e),this}end(e){return e&&this.write(e),this[BB]=!0,this[Zc](),this}write(e){if(this[BB])throw new Error(\"write after end\");return e instanceof FFe?this[AV](e):this[QB](e),this.flowing}[AV](e){let r=uV.resolve(this.cwd,e.path);if(this.prefix&&(e.path=this.prefix+\"/\"+e.path.replace(/^\\.(\\/+|$)/,\"\")),!this.filter(e.path,e))e.resume();else{let i=new MD(e.path,r,!1);i.entry=new LFe(e,this[GD](i)),i.entry.on(\"end\",n=>this[KD](i)),this[na]+=1,this[ia].push(i)}this[Zc]()}[QB](e){let r=uV.resolve(this.cwd,e);this.prefix&&(e=this.prefix+\"/\"+e.replace(/^\\.(\\/+|$)/,\"\")),this[ia].push(new MD(e,r)),this[Zc]()}[HD](e){e.pending=!0,this[na]+=1;let r=this.follow?\"stat\":\"lstat\";kB[r](e.absolute,(i,n)=>{e.pending=!1,this[na]-=1,i?this.emit(\"error\",i):this[wB](e,n)})}[wB](e,r){this.statCache.set(e.absolute,r),e.stat=r,this.filter(e.path,r)||(e.ignore=!0),this[Zc]()}[jD](e){e.pending=!0,this[na]+=1,kB.readdir(e.absolute,(r,i)=>{if(e.pending=!1,this[na]-=1,r)return this.emit(\"error\",r);this[vB](e,i)})}[vB](e,r){this.readdirCache.set(e.absolute,r),e.readdir=r,this[Zc]()}[Zc](){if(!this[bB]){this[bB]=!0;for(let e=this[ia].head;e!==null&&this[na]<this.jobs;e=e.next)if(this[aV](e.value),e.value.ignore){let r=e.next;this[ia].removeNode(e),e.next=r}this[bB]=!1,this[BB]&&!this[ia].length&&this[na]===0&&(this.zip?this.zip.end(oV):(super.write(oV),super.end()))}}get[uf](){return this[ia]&&this[ia].head&&this[ia].head.value}[KD](e){this[ia].shift(),this[na]-=1,this[Zc]()}[aV](e){if(!e.pending){if(e.entry){e===this[uf]&&!e.piped&&this[SB](e);return}if(e.stat||(this.statCache.has(e.absolute)?this[wB](e,this.statCache.get(e.absolute)):this[HD](e)),!!e.stat&&!e.ignore&&!(!this.noDirRecurse&&e.stat.isDirectory()&&!e.readdir&&(this.readdirCache.has(e.absolute)?this[vB](e,this.readdirCache.get(e.absolute)):this[jD](e),!e.readdir))){if(e.entry=this[lV](e),!e.entry){e.ignore=!0;return}e===this[uf]&&!e.piped&&this[SB](e)}}}[GD](e){return{onwarn:(r,i,n)=>this.warn(r,i,n),noPax:this.noPax,cwd:this.cwd,absolute:e.absolute,preservePaths:this.preservePaths,maxReadSize:this.maxReadSize,strict:this.strict,portable:this.portable,linkCache:this.linkCache,statCache:this.statCache,noMtime:this.noMtime,mtime:this.mtime}}[lV](e){this[na]+=1;try{return new this[YD](e.path,this[GD](e)).on(\"end\",()=>this[KD](e)).on(\"error\",r=>this.emit(\"error\",r))}catch(r){this.emit(\"error\",r)}}[qD](){this[uf]&&this[uf].entry&&this[uf].entry.resume()}[SB](e){e.piped=!0,e.readdir&&e.readdir.forEach(n=>{let s=this.prefix?e.path.slice(this.prefix.length+1)||\"./\":e.path,o=s===\"./\"?\"\":s.replace(/\\/*$/,\"/\");this[QB](o+n)});let r=e.entry,i=this.zip;i?r.on(\"data\",n=>{i.write(n)||r.pause()}):r.on(\"data\",n=>{super.write(n)||r.pause()})}pause(){return this.zip&&this.zip.pause(),super.pause()}}),gV=class extends JD{constructor(e){super(e);this[YD]=NFe}pause(){}resume(){}[HD](e){let r=this.follow?\"statSync\":\"lstatSync\";this[wB](e,kB[r](e.absolute))}[jD](e,r){this[vB](e,kB.readdirSync(e.absolute))}[SB](e){let r=e.entry,i=this.zip;e.readdir&&e.readdir.forEach(n=>{let s=this.prefix?e.path.slice(this.prefix.length+1)||\"./\":e.path,o=s===\"./\"?\"\":s.replace(/\\/*$/,\"/\");this[QB](o+n)}),i?r.on(\"data\",n=>{i.write(n)}):r.on(\"data\",n=>{super[cV](n)})}};JD.Sync=gV;sV.exports=JD});var Cf=w(eC=>{\"use strict\";var MFe=of(),UFe=require(\"events\").EventEmitter,Ms=require(\"fs\"),PB=process.binding(\"fs\"),GAt=PB.writeBuffers,KFe=PB.FSReqWrap||PB.FSReqCallback,gf=Symbol(\"_autoClose\"),sa=Symbol(\"_close\"),tC=Symbol(\"_ended\"),or=Symbol(\"_fd\"),fV=Symbol(\"_finished\"),$c=Symbol(\"_flags\"),WD=Symbol(\"_flush\"),zD=Symbol(\"_handleChunk\"),_D=Symbol(\"_makeBuf\"),VD=Symbol(\"_mode\"),DB=Symbol(\"_needDrain\"),ff=Symbol(\"_onerror\"),hf=Symbol(\"_onopen\"),XD=Symbol(\"_onread\"),eu=Symbol(\"_onwrite\"),El=Symbol(\"_open\"),Il=Symbol(\"_path\"),tu=Symbol(\"_pos\"),oa=Symbol(\"_queue\"),pf=Symbol(\"_read\"),hV=Symbol(\"_readSize\"),yl=Symbol(\"_reading\"),RB=Symbol(\"_remain\"),pV=Symbol(\"_size\"),FB=Symbol(\"_write\"),df=Symbol(\"_writing\"),NB=Symbol(\"_defaultFlag\"),ZD=class extends MFe{constructor(e,r){if(r=r||{},super(r),this.writable=!1,typeof e!=\"string\")throw new TypeError(\"path must be a string\");this[or]=typeof r.fd==\"number\"?r.fd:null,this[Il]=e,this[hV]=r.readSize||16*1024*1024,this[yl]=!1,this[pV]=typeof r.size==\"number\"?r.size:Infinity,this[RB]=this[pV],this[gf]=typeof r.autoClose==\"boolean\"?r.autoClose:!0,typeof this[or]==\"number\"?this[pf]():this[El]()}get fd(){return this[or]}get path(){return this[Il]}write(){throw new TypeError(\"this is a readable stream\")}end(){throw new TypeError(\"this is a readable stream\")}[El](){Ms.open(this[Il],\"r\",(e,r)=>this[hf](e,r))}[hf](e,r){e?this[ff](e):(this[or]=r,this.emit(\"open\",r),this[pf]())}[_D](){return Buffer.allocUnsafe(Math.min(this[hV],this[RB]))}[pf](){if(!this[yl]){this[yl]=!0;let e=this[_D]();if(e.length===0)return process.nextTick(()=>this[XD](null,0,e));Ms.read(this[or],e,0,e.length,null,(r,i,n)=>this[XD](r,i,n))}}[XD](e,r,i){this[yl]=!1,e?this[ff](e):this[zD](r,i)&&this[pf]()}[sa](){this[gf]&&typeof this[or]==\"number\"&&(Ms.close(this[or],e=>this.emit(\"close\")),this[or]=null)}[ff](e){this[yl]=!0,this[sa](),this.emit(\"error\",e)}[zD](e,r){let i=!1;return this[RB]-=e,e>0&&(i=super.write(e<r.length?r.slice(0,e):r)),(e===0||this[RB]<=0)&&(i=!1,this[sa](),super.end()),i}emit(e,r){switch(e){case\"prefinish\":case\"finish\":break;case\"drain\":typeof this[or]==\"number\"&&this[pf]();break;default:return super.emit(e,r)}}},dV=class extends ZD{[El](){let e=!0;try{this[hf](null,Ms.openSync(this[Il],\"r\")),e=!1}finally{e&&this[sa]()}}[pf](){let e=!0;try{if(!this[yl]){this[yl]=!0;do{let r=this[_D](),i=r.length===0?0:Ms.readSync(this[or],r,0,r.length,null);if(!this[zD](i,r))break}while(!0);this[yl]=!1}e=!1}finally{e&&this[sa]()}}[sa](){if(this[gf]&&typeof this[or]==\"number\"){try{Ms.closeSync(this[or])}catch(e){}this[or]=null,this.emit(\"close\")}}},$D=class extends UFe{constructor(e,r){r=r||{},super(r),this.readable=!1,this[df]=!1,this[tC]=!1,this[DB]=!1,this[oa]=[],this[Il]=e,this[or]=typeof r.fd==\"number\"?r.fd:null,this[VD]=r.mode===void 0?438:r.mode,this[tu]=typeof r.start==\"number\"?r.start:null,this[gf]=typeof r.autoClose==\"boolean\"?r.autoClose:!0;let i=this[tu]!==null?\"r+\":\"w\";this[NB]=r.flags===void 0,this[$c]=this[NB]?i:r.flags,this[or]===null&&this[El]()}get fd(){return this[or]}get path(){return this[Il]}[ff](e){this[sa](),this[df]=!0,this.emit(\"error\",e)}[El](){Ms.open(this[Il],this[$c],this[VD],(e,r)=>this[hf](e,r))}[hf](e,r){this[NB]&&this[$c]===\"r+\"&&e&&e.code===\"ENOENT\"?(this[$c]=\"w\",this[El]()):e?this[ff](e):(this[or]=r,this.emit(\"open\",r),this[WD]())}end(e,r){e&&this.write(e,r),this[tC]=!0,!this[df]&&!this[oa].length&&typeof this[or]==\"number\"&&this[eu](null,0)}write(e,r){return typeof e==\"string\"&&(e=new Buffer(e,r)),this[tC]?(this.emit(\"error\",new Error(\"write() after end()\")),!1):this[or]===null||this[df]||this[oa].length?(this[oa].push(e),this[DB]=!0,!1):(this[df]=!0,this[FB](e),!0)}[FB](e){Ms.write(this[or],e,0,e.length,this[tu],(r,i)=>this[eu](r,i))}[eu](e,r){e?this[ff](e):(this[tu]!==null&&(this[tu]+=r),this[oa].length?this[WD]():(this[df]=!1,this[tC]&&!this[fV]?(this[fV]=!0,this[sa](),this.emit(\"finish\")):this[DB]&&(this[DB]=!1,this.emit(\"drain\"))))}[WD](){if(this[oa].length===0)this[tC]&&this[eu](null,0);else if(this[oa].length===1)this[FB](this[oa].pop());else{let e=this[oa];this[oa]=[],HFe(this[or],e,this[tu],(r,i)=>this[eu](r,i))}}[sa](){this[gf]&&typeof this[or]==\"number\"&&(Ms.close(this[or],e=>this.emit(\"close\")),this[or]=null)}},CV=class extends $D{[El](){let e;try{e=Ms.openSync(this[Il],this[$c],this[VD])}catch(r){if(this[NB]&&this[$c]===\"r+\"&&r&&r.code===\"ENOENT\")return this[$c]=\"w\",this[El]();throw r}this[hf](null,e)}[sa](){if(this[gf]&&typeof this[or]==\"number\"){try{Ms.closeSync(this[or])}catch(e){}this[or]=null,this.emit(\"close\")}}[FB](e){try{this[eu](null,Ms.writeSync(this[or],e,0,e.length,this[tu]))}catch(r){this[eu](r,0)}}},HFe=(t,e,r,i)=>{let n=(o,a)=>i(o,a,e),s=new KFe;s.oncomplete=n,PB.writeBuffers(t,e,r,s)};eC.ReadStream=ZD;eC.ReadStreamSync=dV;eC.WriteStream=$D;eC.WriteStreamSync=CV});var nC=w((WAt,mV)=>{\"use strict\";var jFe=dB(),qAt=require(\"path\"),GFe=lf(),YFe=require(\"events\"),qFe=Bp(),JFe=1024*1024,WFe=Zd(),EV=pB(),zFe=wD(),eR=Buffer.from([31,139]),Us=Symbol(\"state\"),ru=Symbol(\"writeEntry\"),aA=Symbol(\"readEntry\"),tR=Symbol(\"nextEntry\"),IV=Symbol(\"processEntry\"),Ks=Symbol(\"extendedHeader\"),rC=Symbol(\"globalExtendedHeader\"),wl=Symbol(\"meta\"),yV=Symbol(\"emitMeta\"),yr=Symbol(\"buffer\"),AA=Symbol(\"queue\"),iu=Symbol(\"ended\"),wV=Symbol(\"emittedEnd\"),nu=Symbol(\"emit\"),Ln=Symbol(\"unzip\"),LB=Symbol(\"consumeChunk\"),TB=Symbol(\"consumeChunkSub\"),rR=Symbol(\"consumeBody\"),BV=Symbol(\"consumeMeta\"),bV=Symbol(\"consumeHeader\"),OB=Symbol(\"consuming\"),iR=Symbol(\"bufferConcat\"),nR=Symbol(\"maybeEnd\"),iC=Symbol(\"writing\"),Bl=Symbol(\"aborted\"),MB=Symbol(\"onDone\"),su=Symbol(\"sawValidEntry\"),UB=Symbol(\"sawNullBlock\"),KB=Symbol(\"sawEOF\"),_Fe=t=>!0;mV.exports=jFe(class extends YFe{constructor(e){e=e||{},super(e),this.file=e.file||\"\",this[su]=null,this.on(MB,r=>{(this[Us]===\"begin\"||this[su]===!1)&&this.warn(\"TAR_BAD_ARCHIVE\",\"Unrecognized archive format\")}),e.ondone?this.on(MB,e.ondone):this.on(MB,r=>{this.emit(\"prefinish\"),this.emit(\"finish\"),this.emit(\"end\"),this.emit(\"close\")}),this.strict=!!e.strict,this.maxMetaEntrySize=e.maxMetaEntrySize||JFe,this.filter=typeof e.filter==\"function\"?e.filter:_Fe,this.writable=!0,this.readable=!1,this[AA]=new qFe,this[yr]=null,this[aA]=null,this[ru]=null,this[Us]=\"begin\",this[wl]=\"\",this[Ks]=null,this[rC]=null,this[iu]=!1,this[Ln]=null,this[Bl]=!1,this[UB]=!1,this[KB]=!1,typeof e.onwarn==\"function\"&&this.on(\"warn\",e.onwarn),typeof e.onentry==\"function\"&&this.on(\"entry\",e.onentry)}[bV](e,r){this[su]===null&&(this[su]=!1);let i;try{i=new GFe(e,r,this[Ks],this[rC])}catch(n){return this.warn(\"TAR_ENTRY_INVALID\",n)}if(i.nullBlock)this[UB]?(this[KB]=!0,this[Us]===\"begin\"&&(this[Us]=\"header\"),this[nu](\"eof\")):(this[UB]=!0,this[nu](\"nullBlock\"));else if(this[UB]=!1,!i.cksumValid)this.warn(\"TAR_ENTRY_INVALID\",\"checksum failure\",{header:i});else if(!i.path)this.warn(\"TAR_ENTRY_INVALID\",\"path is required\",{header:i});else{let n=i.type;if(/^(Symbolic)?Link$/.test(n)&&!i.linkpath)this.warn(\"TAR_ENTRY_INVALID\",\"linkpath required\",{header:i});else if(!/^(Symbolic)?Link$/.test(n)&&i.linkpath)this.warn(\"TAR_ENTRY_INVALID\",\"linkpath forbidden\",{header:i});else{let s=this[ru]=new WFe(i,this[Ks],this[rC]);if(!this[su])if(s.remain){let o=()=>{s.invalid||(this[su]=!0)};s.on(\"end\",o)}else this[su]=!0;s.meta?s.size>this.maxMetaEntrySize?(s.ignore=!0,this[nu](\"ignoredEntry\",s),this[Us]=\"ignore\",s.resume()):s.size>0&&(this[wl]=\"\",s.on(\"data\",o=>this[wl]+=o),this[Us]=\"meta\"):(this[Ks]=null,s.ignore=s.ignore||!this.filter(s.path,s),s.ignore?(this[nu](\"ignoredEntry\",s),this[Us]=s.remain?\"ignore\":\"header\",s.resume()):(s.remain?this[Us]=\"body\":(this[Us]=\"header\",s.end()),this[aA]?this[AA].push(s):(this[AA].push(s),this[tR]())))}}}[IV](e){let r=!0;return e?Array.isArray(e)?this.emit.apply(this,e):(this[aA]=e,this.emit(\"entry\",e),e.emittedEnd||(e.on(\"end\",i=>this[tR]()),r=!1)):(this[aA]=null,r=!1),r}[tR](){do;while(this[IV](this[AA].shift()));if(!this[AA].length){let e=this[aA];!e||e.flowing||e.size===e.remain?this[iC]||this.emit(\"drain\"):e.once(\"drain\",i=>this.emit(\"drain\"))}}[rR](e,r){let i=this[ru],n=i.blockRemain,s=n>=e.length&&r===0?e:e.slice(r,r+n);return i.write(s),i.blockRemain||(this[Us]=\"header\",this[ru]=null,i.end()),s.length}[BV](e,r){let i=this[ru],n=this[rR](e,r);return this[ru]||this[yV](i),n}[nu](e,r,i){!this[AA].length&&!this[aA]?this.emit(e,r,i):this[AA].push([e,r,i])}[yV](e){switch(this[nu](\"meta\",this[wl]),e.type){case\"ExtendedHeader\":case\"OldExtendedHeader\":this[Ks]=EV.parse(this[wl],this[Ks],!1);break;case\"GlobalExtendedHeader\":this[rC]=EV.parse(this[wl],this[rC],!0);break;case\"NextFileHasLongPath\":case\"OldGnuLongPath\":this[Ks]=this[Ks]||Object.create(null),this[Ks].path=this[wl].replace(/\\0.*/,\"\");break;case\"NextFileHasLongLinkpath\":this[Ks]=this[Ks]||Object.create(null),this[Ks].linkpath=this[wl].replace(/\\0.*/,\"\");break;default:throw new Error(\"unknown meta: \"+e.type)}}abort(e){this[Bl]=!0,this.emit(\"abort\",e),this.warn(\"TAR_ABORT\",e,{recoverable:!1})}write(e){if(this[Bl])return;if(this[Ln]===null&&e){if(this[yr]&&(e=Buffer.concat([this[yr],e]),this[yr]=null),e.length<eR.length)return this[yr]=e,!0;for(let i=0;this[Ln]===null&&i<eR.length;i++)e[i]!==eR[i]&&(this[Ln]=!1);if(this[Ln]===null){let i=this[iu];this[iu]=!1,this[Ln]=new zFe.Unzip,this[Ln].on(\"data\",s=>this[LB](s)),this[Ln].on(\"error\",s=>this.abort(s)),this[Ln].on(\"end\",s=>{this[iu]=!0,this[LB]()}),this[iC]=!0;let n=this[Ln][i?\"end\":\"write\"](e);return this[iC]=!1,n}}this[iC]=!0,this[Ln]?this[Ln].write(e):this[LB](e),this[iC]=!1;let r=this[AA].length?!1:this[aA]?this[aA].flowing:!0;return!r&&!this[AA].length&&this[aA].once(\"drain\",i=>this.emit(\"drain\")),r}[iR](e){e&&!this[Bl]&&(this[yr]=this[yr]?Buffer.concat([this[yr],e]):e)}[nR](){if(this[iu]&&!this[wV]&&!this[Bl]&&!this[OB]){this[wV]=!0;let e=this[ru];if(e&&e.blockRemain){let r=this[yr]?this[yr].length:0;this.warn(\"TAR_BAD_ARCHIVE\",`Truncated input (needed ${e.blockRemain} more bytes, only ${r} available)`,{entry:e}),this[yr]&&e.write(this[yr]),e.end()}this[nu](MB)}}[LB](e){if(this[OB])this[iR](e);else if(!e&&!this[yr])this[nR]();else{if(this[OB]=!0,this[yr]){this[iR](e);let r=this[yr];this[yr]=null,this[TB](r)}else this[TB](e);for(;this[yr]&&this[yr].length>=512&&!this[Bl]&&!this[KB];){let r=this[yr];this[yr]=null,this[TB](r)}this[OB]=!1}(!this[yr]||this[iu])&&this[nR]()}[TB](e){let r=0,i=e.length;for(;r+512<=i&&!this[Bl]&&!this[KB];)switch(this[Us]){case\"begin\":case\"header\":this[bV](e,r),r+=512;break;case\"ignore\":case\"body\":r+=this[rR](e,r);break;case\"meta\":r+=this[BV](e,r);break;default:throw new Error(\"invalid state: \"+this[Us])}r<i&&(this[yr]?this[yr]=Buffer.concat([e.slice(r),this[yr]]):this[yr]=e.slice(r))}end(e){this[Bl]||(this[Ln]?this[Ln].end(e):(this[iu]=!0,this.write(e)))}})});var HB=w((_At,QV)=>{\"use strict\";var VFe=sf(),vV=nC(),mf=require(\"fs\"),XFe=Cf(),SV=require(\"path\"),zAt=QV.exports=(t,e,r)=>{typeof t==\"function\"?(r=t,e=null,t={}):Array.isArray(t)&&(e=t,t={}),typeof e==\"function\"&&(r=e,e=null),e?e=Array.from(e):e=[];let i=VFe(t);if(i.sync&&typeof r==\"function\")throw new TypeError(\"callback not supported for sync tar functions\");if(!i.file&&typeof r==\"function\")throw new TypeError(\"callback only supported with file option\");return e.length&&$Fe(i,e),i.noResume||ZFe(i),i.file&&i.sync?eNe(i):i.file?tNe(i,r):kV(i)},ZFe=t=>{let e=t.onentry;t.onentry=e?r=>{e(r),r.resume()}:r=>r.resume()},$Fe=(t,e)=>{let r=new Map(e.map(s=>[s.replace(/\\/+$/,\"\"),!0])),i=t.filter,n=(s,o)=>{let a=o||SV.parse(s).root||\".\",l=s===a?!1:r.has(s)?r.get(s):n(SV.dirname(s),a);return r.set(s,l),l};t.filter=i?(s,o)=>i(s,o)&&n(s.replace(/\\/+$/,\"\")):s=>n(s.replace(/\\/+$/,\"\"))},eNe=t=>{let e=kV(t),r=t.file,i=!0,n;try{let s=mf.statSync(r),o=t.maxReadSize||16*1024*1024;if(s.size<o)e.end(mf.readFileSync(r));else{let a=0,l=Buffer.allocUnsafe(o);for(n=mf.openSync(r,\"r\");a<s.size;){let c=mf.readSync(n,l,0,o,a);a+=c,e.write(l.slice(0,c))}e.end()}i=!1}finally{if(i&&n)try{mf.closeSync(n)}catch(s){}}},tNe=(t,e)=>{let r=new vV(t),i=t.maxReadSize||16*1024*1024,n=t.file,s=new Promise((o,a)=>{r.on(\"error\",a),r.on(\"end\",o),mf.stat(n,(l,c)=>{if(l)a(l);else{let u=new XFe.ReadStream(n,{readSize:i,size:c.size});u.on(\"error\",a),u.pipe(r)}})});return e?s.then(e,e):s},kV=t=>new vV(t)});var NV=w((ZAt,xV)=>{\"use strict\";var rNe=sf(),jB=xB(),VAt=require(\"fs\"),PV=Cf(),DV=HB(),RV=require(\"path\"),XAt=xV.exports=(t,e,r)=>{if(typeof e==\"function\"&&(r=e),Array.isArray(t)&&(e=t,t={}),!e||!Array.isArray(e)||!e.length)throw new TypeError(\"no files or directories specified\");e=Array.from(e);let i=rNe(t);if(i.sync&&typeof r==\"function\")throw new TypeError(\"callback not supported for sync tar functions\");if(!i.file&&typeof r==\"function\")throw new TypeError(\"callback only supported with file option\");return i.file&&i.sync?iNe(i,e):i.file?nNe(i,e,r):i.sync?sNe(i,e):oNe(i,e)},iNe=(t,e)=>{let r=new jB.Sync(t),i=new PV.WriteStreamSync(t.file,{mode:t.mode||438});r.pipe(i),FV(r,e)},nNe=(t,e,r)=>{let i=new jB(t),n=new PV.WriteStream(t.file,{mode:t.mode||438});i.pipe(n);let s=new Promise((o,a)=>{n.on(\"error\",a),n.on(\"close\",o),i.on(\"error\",a)});return sR(i,e),r?s.then(r,r):s},FV=(t,e)=>{e.forEach(r=>{r.charAt(0)===\"@\"?DV({file:RV.resolve(t.cwd,r.substr(1)),sync:!0,noResume:!0,onentry:i=>t.add(i)}):t.add(r)}),t.end()},sR=(t,e)=>{for(;e.length;){let r=e.shift();if(r.charAt(0)===\"@\")return DV({file:RV.resolve(t.cwd,r.substr(1)),noResume:!0,onentry:i=>t.add(i)}).then(i=>sR(t,e));t.add(r)}t.end()},sNe=(t,e)=>{let r=new jB.Sync(t);return FV(r,e),r},oNe=(t,e)=>{let r=new jB(t);return sR(r,e),r}});var oR=w((tlt,LV)=>{\"use strict\";var aNe=sf(),TV=xB(),$At=nC(),Hs=require(\"fs\"),OV=Cf(),MV=HB(),UV=require(\"path\"),KV=lf(),elt=LV.exports=(t,e,r)=>{let i=aNe(t);if(!i.file)throw new TypeError(\"file is required\");if(i.gzip)throw new TypeError(\"cannot append to compressed archives\");if(!e||!Array.isArray(e)||!e.length)throw new TypeError(\"no files or directories specified\");return e=Array.from(e),i.sync?ANe(i,e):lNe(i,e,r)},ANe=(t,e)=>{let r=new TV.Sync(t),i=!0,n,s;try{try{n=Hs.openSync(t.file,\"r+\")}catch(l){if(l.code===\"ENOENT\")n=Hs.openSync(t.file,\"w+\");else throw l}let o=Hs.fstatSync(n),a=Buffer.alloc(512);e:for(s=0;s<o.size;s+=512){for(let u=0,g=0;u<512;u+=g){if(g=Hs.readSync(n,a,u,a.length-u,s+u),s===0&&a[0]===31&&a[1]===139)throw new Error(\"cannot append to compressed archives\");if(!g)break e}let l=new KV(a);if(!l.cksumValid)break;let c=512*Math.ceil(l.size/512);if(s+c+512>o.size)break;s+=c,t.mtimeCache&&t.mtimeCache.set(l.path,l.mtime)}i=!1,cNe(t,r,s,n,e)}finally{if(i)try{Hs.closeSync(n)}catch(o){}}},cNe=(t,e,r,i,n)=>{let s=new OV.WriteStreamSync(t.file,{fd:i,start:r});e.pipe(s),uNe(e,n)},lNe=(t,e,r)=>{e=Array.from(e);let i=new TV(t),n=(o,a,l)=>{let c=(p,m)=>{p?Hs.close(o,y=>l(p)):l(null,m)},u=0;if(a===0)return c(null,0);let g=0,f=Buffer.alloc(512),h=(p,m)=>{if(p)return c(p);if(g+=m,g<512&&m)return Hs.read(o,f,g,f.length-g,u+g,h);if(u===0&&f[0]===31&&f[1]===139)return c(new Error(\"cannot append to compressed archives\"));if(g<512)return c(null,u);let y=new KV(f);if(!y.cksumValid)return c(null,u);let Q=512*Math.ceil(y.size/512);if(u+Q+512>a||(u+=Q+512,u>=a))return c(null,u);t.mtimeCache&&t.mtimeCache.set(y.path,y.mtime),g=0,Hs.read(o,f,0,512,u,h)};Hs.read(o,f,0,512,u,h)},s=new Promise((o,a)=>{i.on(\"error\",a);let l=\"r+\",c=(u,g)=>{if(u&&u.code===\"ENOENT\"&&l===\"r+\")return l=\"w+\",Hs.open(t.file,l,c);if(u)return a(u);Hs.fstat(g,(f,h)=>{if(f)return a(f);n(g,h.size,(p,m)=>{if(p)return a(p);let y=new OV.WriteStream(t.file,{fd:g,start:m});i.pipe(y),y.on(\"error\",a),y.on(\"close\",o),HV(i,e)})})};Hs.open(t.file,l,c)});return r?s.then(r,r):s},uNe=(t,e)=>{e.forEach(r=>{r.charAt(0)===\"@\"?MV({file:UV.resolve(t.cwd,r.substr(1)),sync:!0,noResume:!0,onentry:i=>t.add(i)}):t.add(r)}),t.end()},HV=(t,e)=>{for(;e.length;){let r=e.shift();if(r.charAt(0)===\"@\")return MV({file:UV.resolve(t.cwd,r.substr(1)),noResume:!0,onentry:i=>t.add(i)}).then(i=>HV(t,e));t.add(r)}t.end()}});var GV=w((ilt,jV)=>{\"use strict\";var gNe=sf(),fNe=oR(),rlt=jV.exports=(t,e,r)=>{let i=gNe(t);if(!i.file)throw new TypeError(\"file is required\");if(i.gzip)throw new TypeError(\"cannot append to compressed archives\");if(!e||!Array.isArray(e)||!e.length)throw new TypeError(\"no files or directories specified\");return e=Array.from(e),hNe(i),fNe(i,e,r)},hNe=t=>{let e=t.filter;t.mtimeCache||(t.mtimeCache=new Map),t.filter=e?(r,i)=>e(r,i)&&!(t.mtimeCache.get(r)>i.mtime):(r,i)=>!(t.mtimeCache.get(r)>i.mtime)}});var JV=w((nlt,YV)=>{var{promisify:qV}=require(\"util\"),bl=require(\"fs\"),pNe=t=>{if(!t)t={mode:511,fs:bl};else if(typeof t==\"object\")t=N({mode:511,fs:bl},t);else if(typeof t==\"number\")t={mode:t,fs:bl};else if(typeof t==\"string\")t={mode:parseInt(t,8),fs:bl};else throw new TypeError(\"invalid options argument\");return t.mkdir=t.mkdir||t.fs.mkdir||bl.mkdir,t.mkdirAsync=qV(t.mkdir),t.stat=t.stat||t.fs.stat||bl.stat,t.statAsync=qV(t.stat),t.statSync=t.statSync||t.fs.statSync||bl.statSync,t.mkdirSync=t.mkdirSync||t.fs.mkdirSync||bl.mkdirSync,t};YV.exports=pNe});var zV=w((slt,WV)=>{var dNe=process.env.__TESTING_MKDIRP_PLATFORM__||process.platform,{resolve:CNe,parse:mNe}=require(\"path\"),ENe=t=>{if(/\\0/.test(t))throw Object.assign(new TypeError(\"path must be a string without null bytes\"),{path:t,code:\"ERR_INVALID_ARG_VALUE\"});if(t=CNe(t),dNe===\"win32\"){let e=/[*|\"<>?:]/,{root:r}=mNe(t);if(e.test(t.substr(r.length)))throw Object.assign(new Error(\"Illegal characters in path.\"),{path:t,code:\"EINVAL\"})}return t};WV.exports=ENe});var $V=w((olt,_V)=>{var{dirname:VV}=require(\"path\"),XV=(t,e,r=void 0)=>r===e?Promise.resolve():t.statAsync(e).then(i=>i.isDirectory()?r:void 0,i=>i.code===\"ENOENT\"?XV(t,VV(e),e):void 0),ZV=(t,e,r=void 0)=>{if(r!==e)try{return t.statSync(e).isDirectory()?r:void 0}catch(i){return i.code===\"ENOENT\"?ZV(t,VV(e),e):void 0}};_V.exports={findMade:XV,findMadeSync:ZV}});var lR=w((alt,e9)=>{var{dirname:t9}=require(\"path\"),aR=(t,e,r)=>{e.recursive=!1;let i=t9(t);return i===t?e.mkdirAsync(t,e).catch(n=>{if(n.code!==\"EISDIR\")throw n}):e.mkdirAsync(t,e).then(()=>r||t,n=>{if(n.code===\"ENOENT\")return aR(i,e).then(s=>aR(t,e,s));if(n.code!==\"EEXIST\"&&n.code!==\"EROFS\")throw n;return e.statAsync(t).then(s=>{if(s.isDirectory())return r;throw n},()=>{throw n})})},AR=(t,e,r)=>{let i=t9(t);if(e.recursive=!1,i===t)try{return e.mkdirSync(t,e)}catch(n){if(n.code!==\"EISDIR\")throw n;return}try{return e.mkdirSync(t,e),r||t}catch(n){if(n.code===\"ENOENT\")return AR(t,e,AR(i,e,r));if(n.code!==\"EEXIST\"&&n.code!==\"EROFS\")throw n;try{if(!e.statSync(t).isDirectory())throw n}catch(s){throw n}}};e9.exports={mkdirpManual:aR,mkdirpManualSync:AR}});var n9=w((Alt,r9)=>{var{dirname:i9}=require(\"path\"),{findMade:INe,findMadeSync:yNe}=$V(),{mkdirpManual:wNe,mkdirpManualSync:BNe}=lR(),bNe=(t,e)=>(e.recursive=!0,i9(t)===t?e.mkdirAsync(t,e):INe(e,t).then(i=>e.mkdirAsync(t,e).then(()=>i).catch(n=>{if(n.code===\"ENOENT\")return wNe(t,e);throw n}))),QNe=(t,e)=>{if(e.recursive=!0,i9(t)===t)return e.mkdirSync(t,e);let i=yNe(e,t);try{return e.mkdirSync(t,e),i}catch(n){if(n.code===\"ENOENT\")return BNe(t,e);throw n}};r9.exports={mkdirpNative:bNe,mkdirpNativeSync:QNe}});var A9=w((llt,s9)=>{var o9=require(\"fs\"),vNe=process.env.__TESTING_MKDIRP_NODE_VERSION__||process.version,cR=vNe.replace(/^v/,\"\").split(\".\"),a9=+cR[0]>10||+cR[0]==10&&+cR[1]>=12,SNe=a9?t=>t.mkdir===o9.mkdir:()=>!1,kNe=a9?t=>t.mkdirSync===o9.mkdirSync:()=>!1;s9.exports={useNative:SNe,useNativeSync:kNe}});var h9=w((clt,l9)=>{var Ef=JV(),If=zV(),{mkdirpNative:c9,mkdirpNativeSync:u9}=n9(),{mkdirpManual:g9,mkdirpManualSync:f9}=lR(),{useNative:xNe,useNativeSync:PNe}=A9(),yf=(t,e)=>(t=If(t),e=Ef(e),xNe(e)?c9(t,e):g9(t,e)),DNe=(t,e)=>(t=If(t),e=Ef(e),PNe(e)?u9(t,e):f9(t,e));yf.sync=DNe;yf.native=(t,e)=>c9(If(t),Ef(e));yf.manual=(t,e)=>g9(If(t),Ef(e));yf.nativeSync=(t,e)=>u9(If(t),Ef(e));yf.manualSync=(t,e)=>f9(If(t),Ef(e));l9.exports=yf});var y9=w((ult,p9)=>{\"use strict\";var js=require(\"fs\"),ou=require(\"path\"),RNe=js.lchown?\"lchown\":\"chown\",FNe=js.lchownSync?\"lchownSync\":\"chownSync\",d9=js.lchown&&!process.version.match(/v1[1-9]+\\./)&&!process.version.match(/v10\\.[6-9]/),C9=(t,e,r)=>{try{return js[FNe](t,e,r)}catch(i){if(i.code!==\"ENOENT\")throw i}},NNe=(t,e,r)=>{try{return js.chownSync(t,e,r)}catch(i){if(i.code!==\"ENOENT\")throw i}},LNe=d9?(t,e,r,i)=>n=>{!n||n.code!==\"EISDIR\"?i(n):js.chown(t,e,r,i)}:(t,e,r,i)=>i,uR=d9?(t,e,r)=>{try{return C9(t,e,r)}catch(i){if(i.code!==\"EISDIR\")throw i;NNe(t,e,r)}}:(t,e,r)=>C9(t,e,r),TNe=process.version,m9=(t,e,r)=>js.readdir(t,e,r),ONe=(t,e)=>js.readdirSync(t,e);/^v4\\./.test(TNe)&&(m9=(t,e,r)=>js.readdir(t,r));var GB=(t,e,r,i)=>{js[RNe](t,e,r,LNe(t,e,r,n=>{i(n&&n.code!==\"ENOENT\"?n:null)}))},E9=(t,e,r,i,n)=>{if(typeof e==\"string\")return js.lstat(ou.resolve(t,e),(s,o)=>{if(s)return n(s.code!==\"ENOENT\"?s:null);o.name=e,E9(t,o,r,i,n)});if(e.isDirectory())gR(ou.resolve(t,e.name),r,i,s=>{if(s)return n(s);let o=ou.resolve(t,e.name);GB(o,r,i,n)});else{let s=ou.resolve(t,e.name);GB(s,r,i,n)}},gR=(t,e,r,i)=>{m9(t,{withFileTypes:!0},(n,s)=>{if(n){if(n.code===\"ENOENT\")return i();if(n.code!==\"ENOTDIR\"&&n.code!==\"ENOTSUP\")return i(n)}if(n||!s.length)return GB(t,e,r,i);let o=s.length,a=null,l=c=>{if(!a){if(c)return i(a=c);if(--o==0)return GB(t,e,r,i)}};s.forEach(c=>E9(t,c,e,r,l))})},MNe=(t,e,r,i)=>{if(typeof e==\"string\")try{let n=js.lstatSync(ou.resolve(t,e));n.name=e,e=n}catch(n){if(n.code===\"ENOENT\")return;throw n}e.isDirectory()&&I9(ou.resolve(t,e.name),r,i),uR(ou.resolve(t,e.name),r,i)},I9=(t,e,r)=>{let i;try{i=ONe(t,{withFileTypes:!0})}catch(n){if(n.code===\"ENOENT\")return;if(n.code===\"ENOTDIR\"||n.code===\"ENOTSUP\")return uR(t,e,r);throw n}return i&&i.length&&i.forEach(n=>MNe(t,n,e,r)),uR(t,e,r)};p9.exports=gR;gR.sync=I9});var Q9=w((hlt,fR)=>{\"use strict\";var w9=h9(),Gs=require(\"fs\"),YB=require(\"path\"),B9=y9(),hR=class extends Error{constructor(e,r){super(\"Cannot extract through symbolic link\");this.path=r,this.symlink=e}get name(){return\"SylinkError\"}},sC=class extends Error{constructor(e,r){super(r+\": Cannot cd into '\"+e+\"'\");this.path=e,this.code=r}get name(){return\"CwdError\"}},glt=fR.exports=(t,e,r)=>{let i=e.umask,n=e.mode|448,s=(n&i)!=0,o=e.uid,a=e.gid,l=typeof o==\"number\"&&typeof a==\"number\"&&(o!==e.processUid||a!==e.processGid),c=e.preserve,u=e.unlink,g=e.cache,f=e.cwd,h=(y,Q)=>{y?r(y):(g.set(t,!0),Q&&l?B9(Q,o,a,S=>h(S)):s?Gs.chmod(t,n,r):r())};if(g&&g.get(t)===!0)return h();if(t===f)return Gs.stat(t,(y,Q)=>{(y||!Q.isDirectory())&&(y=new sC(t,y&&y.code||\"ENOTDIR\")),h(y)});if(c)return w9(t,{mode:n}).then(y=>h(null,y),h);let m=YB.relative(f,t).split(/\\/|\\\\/);qB(f,m,n,g,u,f,null,h)},qB=(t,e,r,i,n,s,o,a)=>{if(!e.length)return a(null,o);let l=e.shift(),c=t+\"/\"+l;if(i.get(c))return qB(c,e,r,i,n,s,o,a);Gs.mkdir(c,r,b9(c,e,r,i,n,s,o,a))},b9=(t,e,r,i,n,s,o,a)=>l=>{if(l){if(l.path&&YB.dirname(l.path)===s&&(l.code===\"ENOTDIR\"||l.code===\"ENOENT\"))return a(new sC(s,l.code));Gs.lstat(t,(c,u)=>{if(c)a(c);else if(u.isDirectory())qB(t,e,r,i,n,s,o,a);else if(n)Gs.unlink(t,g=>{if(g)return a(g);Gs.mkdir(t,r,b9(t,e,r,i,n,s,o,a))});else{if(u.isSymbolicLink())return a(new hR(t,t+\"/\"+e.join(\"/\")));a(l)}})}else o=o||t,qB(t,e,r,i,n,s,o,a)},flt=fR.exports.sync=(t,e)=>{let r=e.umask,i=e.mode|448,n=(i&r)!=0,s=e.uid,o=e.gid,a=typeof s==\"number\"&&typeof o==\"number\"&&(s!==e.processUid||o!==e.processGid),l=e.preserve,c=e.unlink,u=e.cache,g=e.cwd,f=y=>{u.set(t,!0),y&&a&&B9.sync(y,s,o),n&&Gs.chmodSync(t,i)};if(u&&u.get(t)===!0)return f();if(t===g){let y=!1,Q=\"ENOTDIR\";try{y=Gs.statSync(t).isDirectory()}catch(S){Q=S.code}finally{if(!y)throw new sC(t,Q)}f();return}if(l)return f(w9.sync(t,i));let p=YB.relative(g,t).split(/\\/|\\\\/),m=null;for(let y=p.shift(),Q=g;y&&(Q+=\"/\"+y);y=p.shift())if(!u.get(Q))try{Gs.mkdirSync(Q,i),m=m||Q,u.set(Q,!0)}catch(S){if(S.path&&YB.dirname(S.path)===g&&(S.code===\"ENOTDIR\"||S.code===\"ENOENT\"))return new sC(g,S.code);let x=Gs.lstatSync(Q);if(x.isDirectory()){u.set(Q,!0);continue}else if(c){Gs.unlinkSync(Q),Gs.mkdirSync(Q,i),m=m||Q,u.set(Q,!0);continue}else if(x.isSymbolicLink())return new hR(Q,Q+\"/\"+p.join(\"/\"))}return f(m)}});var k9=w((plt,v9)=>{var S9=require(\"assert\");v9.exports=()=>{let t=new Map,e=new Map,{join:r}=require(\"path\"),i=u=>r(u).split(/[\\\\\\/]/).slice(0,-1).reduce((g,f)=>g.length?g.concat(r(g[g.length-1],f)):[f],[]),n=new Set,s=u=>{let g=e.get(u);if(!g)throw new Error(\"function does not have any path reservations\");return{paths:g.paths.map(f=>t.get(f)),dirs:[...g.dirs].map(f=>t.get(f))}},o=u=>{let{paths:g,dirs:f}=s(u);return g.every(h=>h[0]===u)&&f.every(h=>h[0]instanceof Set&&h[0].has(u))},a=u=>n.has(u)||!o(u)?!1:(n.add(u),u(()=>l(u)),!0),l=u=>{if(!n.has(u))return!1;let{paths:g,dirs:f}=e.get(u),h=new Set;return g.forEach(p=>{let m=t.get(p);S9.equal(m[0],u),m.length===1?t.delete(p):(m.shift(),typeof m[0]==\"function\"?h.add(m[0]):m[0].forEach(y=>h.add(y)))}),f.forEach(p=>{let m=t.get(p);S9(m[0]instanceof Set),m[0].size===1&&m.length===1?t.delete(p):m[0].size===1?(m.shift(),h.add(m[0])):m[0].delete(u)}),n.delete(u),h.forEach(p=>a(p)),!0};return{check:o,reserve:(u,g)=>{let f=new Set(u.map(h=>i(h)).reduce((h,p)=>h.concat(p)));return e.set(g,{dirs:f,paths:u}),u.forEach(h=>{let p=t.get(h);p?p.push(g):t.set(h,[g])}),f.forEach(h=>{let p=t.get(h);p?p[p.length-1]instanceof Set?p[p.length-1].add(g):p.push(new Set([g])):t.set(h,[new Set([g])])}),a(g)}}}});var D9=w((dlt,x9)=>{var UNe=process.env.__FAKE_PLATFORM__||process.platform,KNe=UNe===\"win32\",HNe=global.__FAKE_TESTING_FS__||require(\"fs\"),{O_CREAT:jNe,O_TRUNC:GNe,O_WRONLY:YNe,UV_FS_O_FILEMAP:P9=0}=HNe.constants,qNe=KNe&&!!P9,JNe=512*1024,WNe=P9|GNe|jNe|YNe;x9.exports=qNe?t=>t<JNe?WNe:\"w\":()=>\"w\"});var BR=w((Ilt,R9)=>{\"use strict\";var zNe=require(\"assert\"),Clt=require(\"events\").EventEmitter,_Ne=nC(),$t=require(\"fs\"),VNe=Cf(),lA=require(\"path\"),pR=Q9(),mlt=pR.sync,F9=PD(),XNe=k9(),N9=Symbol(\"onEntry\"),dR=Symbol(\"checkFs\"),L9=Symbol(\"checkFs2\"),CR=Symbol(\"isReusable\"),cA=Symbol(\"makeFs\"),mR=Symbol(\"file\"),ER=Symbol(\"directory\"),JB=Symbol(\"link\"),T9=Symbol(\"symlink\"),O9=Symbol(\"hardlink\"),M9=Symbol(\"unsupported\"),Elt=Symbol(\"unknown\"),U9=Symbol(\"checkPath\"),wf=Symbol(\"mkdir\"),dn=Symbol(\"onError\"),WB=Symbol(\"pending\"),K9=Symbol(\"pend\"),Bf=Symbol(\"unpend\"),IR=Symbol(\"ended\"),yR=Symbol(\"maybeClose\"),wR=Symbol(\"skip\"),oC=Symbol(\"doChown\"),aC=Symbol(\"uid\"),AC=Symbol(\"gid\"),H9=require(\"crypto\"),j9=D9(),zB=()=>{throw new Error(\"sync function called cb somehow?!?\")},ZNe=(t,e)=>{if(process.platform!==\"win32\")return $t.unlink(t,e);let r=t+\".DELETE.\"+H9.randomBytes(16).toString(\"hex\");$t.rename(t,r,i=>{if(i)return e(i);$t.unlink(r,e)})},$Ne=t=>{if(process.platform!==\"win32\")return $t.unlinkSync(t);let e=t+\".DELETE.\"+H9.randomBytes(16).toString(\"hex\");$t.renameSync(t,e),$t.unlinkSync(e)},G9=(t,e,r)=>t===t>>>0?t:e===e>>>0?e:r,_B=class extends _Ne{constructor(e){if(e||(e={}),e.ondone=r=>{this[IR]=!0,this[yR]()},super(e),this.reservations=XNe(),this.transform=typeof e.transform==\"function\"?e.transform:null,this.writable=!0,this.readable=!1,this[WB]=0,this[IR]=!1,this.dirCache=e.dirCache||new Map,typeof e.uid==\"number\"||typeof e.gid==\"number\"){if(typeof e.uid!=\"number\"||typeof e.gid!=\"number\")throw new TypeError(\"cannot set owner without number uid and gid\");if(e.preserveOwner)throw new TypeError(\"cannot preserve owner in archive and also set owner explicitly\");this.uid=e.uid,this.gid=e.gid,this.setOwner=!0}else this.uid=null,this.gid=null,this.setOwner=!1;e.preserveOwner===void 0&&typeof e.uid!=\"number\"?this.preserveOwner=process.getuid&&process.getuid()===0:this.preserveOwner=!!e.preserveOwner,this.processUid=(this.preserveOwner||this.setOwner)&&process.getuid?process.getuid():null,this.processGid=(this.preserveOwner||this.setOwner)&&process.getgid?process.getgid():null,this.forceChown=e.forceChown===!0,this.win32=!!e.win32||process.platform===\"win32\",this.newer=!!e.newer,this.keep=!!e.keep,this.noMtime=!!e.noMtime,this.preservePaths=!!e.preservePaths,this.unlink=!!e.unlink,this.cwd=lA.resolve(e.cwd||process.cwd()),this.strip=+e.strip||0,this.processUmask=process.umask(),this.umask=typeof e.umask==\"number\"?e.umask:this.processUmask,this.dmode=e.dmode||511&~this.umask,this.fmode=e.fmode||438&~this.umask,this.on(\"entry\",r=>this[N9](r))}warn(e,r,i={}){return(e===\"TAR_BAD_ARCHIVE\"||e===\"TAR_ABORT\")&&(i.recoverable=!1),super.warn(e,r,i)}[yR](){this[IR]&&this[WB]===0&&(this.emit(\"prefinish\"),this.emit(\"finish\"),this.emit(\"end\"),this.emit(\"close\"))}[U9](e){if(this.strip){let r=e.path.split(/\\/|\\\\/);if(r.length<this.strip)return!1;if(e.path=r.slice(this.strip).join(\"/\"),e.type===\"Link\"){let i=e.linkpath.split(/\\/|\\\\/);i.length>=this.strip&&(e.linkpath=i.slice(this.strip).join(\"/\"))}}if(!this.preservePaths){let r=e.path;if(r.match(/(^|\\/|\\\\)\\.\\.(\\\\|\\/|$)/))return this.warn(\"TAR_ENTRY_ERROR\",\"path contains '..'\",{entry:e,path:r}),!1;if(lA.win32.isAbsolute(r)){let i=lA.win32.parse(r);e.path=r.substr(i.root.length);let n=i.root;this.warn(\"TAR_ENTRY_INFO\",`stripping ${n} from absolute path`,{entry:e,path:r})}}if(this.win32){let r=lA.win32.parse(e.path);e.path=r.root===\"\"?F9.encode(e.path):r.root+F9.encode(e.path.substr(r.root.length))}return lA.isAbsolute(e.path)?e.absolute=e.path:e.absolute=lA.resolve(this.cwd,e.path),!0}[N9](e){if(!this[U9](e))return e.resume();switch(zNe.equal(typeof e.absolute,\"string\"),e.type){case\"Directory\":case\"GNUDumpDir\":e.mode&&(e.mode=e.mode|448);case\"File\":case\"OldFile\":case\"ContiguousFile\":case\"Link\":case\"SymbolicLink\":return this[dR](e);case\"CharacterDevice\":case\"BlockDevice\":case\"FIFO\":return this[M9](e)}}[dn](e,r){e.name===\"CwdError\"?this.emit(\"error\",e):(this.warn(\"TAR_ENTRY_ERROR\",e,{entry:r}),this[Bf](),r.resume())}[wf](e,r,i){pR(e,{uid:this.uid,gid:this.gid,processUid:this.processUid,processGid:this.processGid,umask:this.processUmask,preserve:this.preservePaths,unlink:this.unlink,cache:this.dirCache,cwd:this.cwd,mode:r},i)}[oC](e){return this.forceChown||this.preserveOwner&&(typeof e.uid==\"number\"&&e.uid!==this.processUid||typeof e.gid==\"number\"&&e.gid!==this.processGid)||typeof this.uid==\"number\"&&this.uid!==this.processUid||typeof this.gid==\"number\"&&this.gid!==this.processGid}[aC](e){return G9(this.uid,e.uid,this.processUid)}[AC](e){return G9(this.gid,e.gid,this.processGid)}[mR](e,r){let i=e.mode&4095||this.fmode,n=new VNe.WriteStream(e.absolute,{flags:j9(e.size),mode:i,autoClose:!1});n.on(\"error\",l=>this[dn](l,e));let s=1,o=l=>{if(l)return this[dn](l,e);--s==0&&$t.close(n.fd,c=>{r(),c?this[dn](c,e):this[Bf]()})};n.on(\"finish\",l=>{let c=e.absolute,u=n.fd;if(e.mtime&&!this.noMtime){s++;let g=e.atime||new Date,f=e.mtime;$t.futimes(u,g,f,h=>h?$t.utimes(c,g,f,p=>o(p&&h)):o())}if(this[oC](e)){s++;let g=this[aC](e),f=this[AC](e);$t.fchown(u,g,f,h=>h?$t.chown(c,g,f,p=>o(p&&h)):o())}o()});let a=this.transform&&this.transform(e)||e;a!==e&&(a.on(\"error\",l=>this[dn](l,e)),e.pipe(a)),a.pipe(n)}[ER](e,r){let i=e.mode&4095||this.dmode;this[wf](e.absolute,i,n=>{if(n)return r(),this[dn](n,e);let s=1,o=a=>{--s==0&&(r(),this[Bf](),e.resume())};e.mtime&&!this.noMtime&&(s++,$t.utimes(e.absolute,e.atime||new Date,e.mtime,o)),this[oC](e)&&(s++,$t.chown(e.absolute,this[aC](e),this[AC](e),o)),o()})}[M9](e){e.unsupported=!0,this.warn(\"TAR_ENTRY_UNSUPPORTED\",`unsupported entry type: ${e.type}`,{entry:e}),e.resume()}[T9](e,r){this[JB](e,e.linkpath,\"symlink\",r)}[O9](e,r){this[JB](e,lA.resolve(this.cwd,e.linkpath),\"link\",r)}[K9](){this[WB]++}[Bf](){this[WB]--,this[yR]()}[wR](e){this[Bf](),e.resume()}[CR](e,r){return e.type===\"File\"&&!this.unlink&&r.isFile()&&r.nlink<=1&&process.platform!==\"win32\"}[dR](e){this[K9]();let r=[e.path];e.linkpath&&r.push(e.linkpath),this.reservations.reserve(r,i=>this[L9](e,i))}[L9](e,r){this[wf](lA.dirname(e.absolute),this.dmode,i=>{if(i)return r(),this[dn](i,e);$t.lstat(e.absolute,(n,s)=>{s&&(this.keep||this.newer&&s.mtime>e.mtime)?(this[wR](e),r()):n||this[CR](e,s)?this[cA](null,e,r):s.isDirectory()?e.type===\"Directory\"?!e.mode||(s.mode&4095)===e.mode?this[cA](null,e,r):$t.chmod(e.absolute,e.mode,o=>this[cA](o,e,r)):$t.rmdir(e.absolute,o=>this[cA](o,e,r)):ZNe(e.absolute,o=>this[cA](o,e,r))})})}[cA](e,r,i){if(e)return this[dn](e,r);switch(r.type){case\"File\":case\"OldFile\":case\"ContiguousFile\":return this[mR](r,i);case\"Link\":return this[O9](r,i);case\"SymbolicLink\":return this[T9](r,i);case\"Directory\":case\"GNUDumpDir\":return this[ER](r,i)}}[JB](e,r,i,n){$t[i](r,e.absolute,s=>{if(s)return this[dn](s,e);n(),this[Bf](),e.resume()})}},Y9=class extends _B{constructor(e){super(e)}[dR](e){let r=this[wf](lA.dirname(e.absolute),this.dmode,zB);if(r)return this[dn](r,e);try{let i=$t.lstatSync(e.absolute);if(this.keep||this.newer&&i.mtime>e.mtime)return this[wR](e);if(this[CR](e,i))return this[cA](null,e,zB);try{return i.isDirectory()?e.type===\"Directory\"?e.mode&&(i.mode&4095)!==e.mode&&$t.chmodSync(e.absolute,e.mode):$t.rmdirSync(e.absolute):$Ne(e.absolute),this[cA](null,e,zB)}catch(n){return this[dn](n,e)}}catch(i){return this[cA](null,e,zB)}}[mR](e,r){let i=e.mode&4095||this.fmode,n=l=>{let c;try{$t.closeSync(o)}catch(u){c=u}(l||c)&&this[dn](l||c,e)},s,o;try{o=$t.openSync(e.absolute,j9(e.size),i)}catch(l){return n(l)}let a=this.transform&&this.transform(e)||e;a!==e&&(a.on(\"error\",l=>this[dn](l,e)),e.pipe(a)),a.on(\"data\",l=>{try{$t.writeSync(o,l,0,l.length)}catch(c){n(c)}}),a.on(\"end\",l=>{let c=null;if(e.mtime&&!this.noMtime){let u=e.atime||new Date,g=e.mtime;try{$t.futimesSync(o,u,g)}catch(f){try{$t.utimesSync(e.absolute,u,g)}catch(h){c=f}}}if(this[oC](e)){let u=this[aC](e),g=this[AC](e);try{$t.fchownSync(o,u,g)}catch(f){try{$t.chownSync(e.absolute,u,g)}catch(h){c=c||f}}}n(c)})}[ER](e,r){let i=e.mode&4095||this.dmode,n=this[wf](e.absolute,i);if(n)return this[dn](n,e);if(e.mtime&&!this.noMtime)try{$t.utimesSync(e.absolute,e.atime||new Date,e.mtime)}catch(s){}if(this[oC](e))try{$t.chownSync(e.absolute,this[aC](e),this[AC](e))}catch(s){}e.resume()}[wf](e,r){try{return pR.sync(e,{uid:this.uid,gid:this.gid,processUid:this.processUid,processGid:this.processGid,umask:this.processUmask,preserve:this.preservePaths,unlink:this.unlink,cache:this.dirCache,cwd:this.cwd,mode:r})}catch(i){return i}}[JB](e,r,i,n){try{$t[i+\"Sync\"](r,e.absolute),e.resume()}catch(s){return this[dn](s,e)}}};_B.Sync=Y9;R9.exports=_B});var _9=w((wlt,q9)=>{\"use strict\";var eLe=sf(),VB=BR(),J9=require(\"fs\"),W9=Cf(),z9=require(\"path\"),ylt=q9.exports=(t,e,r)=>{typeof t==\"function\"?(r=t,e=null,t={}):Array.isArray(t)&&(e=t,t={}),typeof e==\"function\"&&(r=e,e=null),e?e=Array.from(e):e=[];let i=eLe(t);if(i.sync&&typeof r==\"function\")throw new TypeError(\"callback not supported for sync tar functions\");if(!i.file&&typeof r==\"function\")throw new TypeError(\"callback only supported with file option\");return e.length&&tLe(i,e),i.file&&i.sync?rLe(i):i.file?iLe(i,r):i.sync?nLe(i):sLe(i)},tLe=(t,e)=>{let r=new Map(e.map(s=>[s.replace(/\\/+$/,\"\"),!0])),i=t.filter,n=(s,o)=>{let a=o||z9.parse(s).root||\".\",l=s===a?!1:r.has(s)?r.get(s):n(z9.dirname(s),a);return r.set(s,l),l};t.filter=i?(s,o)=>i(s,o)&&n(s.replace(/\\/+$/,\"\")):s=>n(s.replace(/\\/+$/,\"\"))},rLe=t=>{let e=new VB.Sync(t),r=t.file,i=!0,n,s=J9.statSync(r),o=t.maxReadSize||16*1024*1024;new W9.ReadStreamSync(r,{readSize:o,size:s.size}).pipe(e)},iLe=(t,e)=>{let r=new VB(t),i=t.maxReadSize||16*1024*1024,n=t.file,s=new Promise((o,a)=>{r.on(\"error\",a),r.on(\"close\",o),J9.stat(n,(l,c)=>{if(l)a(l);else{let u=new W9.ReadStream(n,{readSize:i,size:c.size});u.on(\"error\",a),u.pipe(r)}})});return e?s.then(e,e):s},nLe=t=>new VB.Sync(t),sLe=t=>new VB(t)});var V9=w(hi=>{\"use strict\";hi.c=hi.create=NV();hi.r=hi.replace=oR();hi.t=hi.list=HB();hi.u=hi.update=GV();hi.x=hi.extract=_9();hi.Pack=xB();hi.Unpack=BR();hi.Parse=nC();hi.ReadEntry=Zd();hi.WriteEntry=OD();hi.Header=lf();hi.Pax=pB();hi.types=Xd()});var t7=w((Qlt,e7)=>{var QR;e7.exports.getContent=()=>(typeof QR==\"undefined\"&&(QR=require(\"zlib\").brotliDecompressSync(Buffer.from(\"W0ISdwE9bQeS320DgM/v/rJIqkB1S3dRtyGA2Swsv6u5umDk4fUhsbltwAy5392lO0dVVZOSExkKxhOa2Kpu//awROkts0ahMEgkCX3MNVCVyD2Wz2U6h7xxFSeLsympT4zG7GnbkEiKnqciN4R102Eu1gNxBrRYkZvmtVC5EmYX6aHkvGlYMFS46pVIPBL6yIiRc1KVyRgb05w8mrL04sZsZPYcKZJiL1x6x3rr10foz/h8qcaG5+lwr9z7UOLQP2oYhjAVrn7vrTTKQiPRRVb4W9p7bNQHy/wVOjPqH/kWwMmtujuIMcEUZklvJjkhNzxah8/7vVp+/e45zX1bVfLk1nPylcobAWI5wwyhuGxWtbKqDZRXiOPc82pDLBiLmtFZH0RpHuwR7NIFV12V8earfX79njg7CBS6AKTksgn3DcyTZ5jufen27qsy4ivWja2rQg7+P81+RTGuSxedEXm3rl4JGPDzWWNVAjZ0JtraFvx/n9rvR9NWFwjt1nKCfE4f3XKZ5DHM/MyP/YyJ6WoHy72/tP6/fgmHx1Q1ZXqZXVDWI1u/3KwnzmyX8fEQIycaY8kPybHdkfxS87VqLRUYrvRC74kzMCdF7fIHudVUIax+jF+qgtj94Xb3FPGH6fSfqISGv8z8Ol56W19VDljCNhWB9AElTnfPyP96S/X/38+X0d6Ax6LVGC6iS4JTB0dt5WqH6X2smBPy8rDt4TSFaVpxUMy9P9Wv33JVnTtxPtnpxk9ReHRvCYIn3R7JRoqFuqwsCUr5UIkDj8pOtzOroJ0jgCf6mPvv8+O9XTFxlft5qrsGehiGVMOo8Q/xpnrCKgkT7UGo1hbrh/zH7oxg/ZClC6If/8P59PmxuytV2xc/z8wdLkMAHZAo/kWswy3VaEkl6cB2czSECfgB9qPG9sdDB6msj8c3ywCDgg1IRTfCHI0KFX8r/b/22i8X74dQ5zyXXFOJtGEqRkB3MXY0CsZTf0E/au88HnqQamrt8c3sLAy/ZCCbBG8R64KoqKjU+mKm9sfD35RlOvnHBxBabCkJzLCpc2s9DQjRS1pnRqb/3lSrjbZ+AxQFUqbINdZXLrPGRrtnbJJY4L33+wv/dwNiG1JoGIpokFoBkFQDgOSOCM11A9IUSM1dgbLkWs3MGZ0xTgTHyIznaA11xrjYRNbm1gXRhBeEF26WXZRekFwSXBp7//veLKteugMRARZLsdhdrfRqdsr2CABMdrUWm334Fe9k/P/dnenuAJgQEWQiIjmMyCSrIpPMEtmsd+973/H9uyPSAUSyEcgkKzPIqqYYkU224AhJjhBitxndYtvLRZCjWCPLZtWL5SxnOf9/lWrf9r2IyMgEQUKkTcnWKVk1DMM6MgFCg1X+wzisi/Huffco472ILyCHMpGZ+EUkgPNFkDwtAMT5Fim/F5kgI5KQnICoqgREVwGU7CLl4VAqVR/ZPVuktLCrqodpXqztqup52Cz/YqmeVfV7nhbL3mx6s+vFbtFWnKYpXSAGV9YSpFKBErAIfbXv4l2Efo7n6WfvbHqu+POuaosC4ahRGRRS57O+sAYUanUIh1s2ITRGGu6n+j7wcUjdVS50GXr6mrGOGZY/wZz1/9gEkx+z+krdFBNjiZFiQxTBggUbYsHSsAIWBEtL9a7P/InX7f97UUGlyNt2ikIPvQcQCPQEemghlFLsZ+7ub4pHZ3YedPP/X1BSClL+6K0LBIQQVlhhzgBKCHMpIMoUJSCyHB1/aQM5f77fFv/H3ZblVhtRgBUYhbqEMQMqYa/3xc+HQbxwjYWooQw+LNyYwWAwxd9p3z/+ndmiyf8QtphiChMMUeFCEENEcECFCCIRQQ9cqHChwvCm2OIU+fz8T9vi/eDSsZgHz6KZIeCnWjTSifdUJ7Zu/T//f2YtYe7nv8WIyQ0C0ZcgEIgKBGLygkAgEBWIir6koqICseU7t+gffHsnZNt+cWIFSREIBII0CAQCQRoEAoEgzQoEYgRixIgRIxZ68eK7PdEPC4XCQiEQCAQCDwKBEsyiTUu4z1sxgHRABaJh/2G3bEl+0Bcmn3ZWhd95FpzddaM8uiaW/P775UPwRWLPl0AwWCrqCDbCT1qg6cp+sUkBDp+FXCK/hYCOax1FpceEDMI8b3HeBRq+nkrYk9B3v0xVBeNmb0aX/gOTMOSATt8tbEjX8ah7Pu5PLlInkUQSESmmst0lxx1IPVRThvB3f/eudkg011Aeg6meqgNHfZx0Rci+mOwTWElLHTLkEnzly+tAPa8AGqSwd73T+GRi0ndwxLMjZiL6LdGArI75wSi09VSyaDFxPylH+KdhSAPVbaQMzQyl/P0BRc+CmusT/QFApUXKIA+vARExxwdl2Rdf3B/eVyfTEKxVRdvVN8fBaRl7UydUkGdwAUmYfSsInXhHxd/cWGxPcnk/dYcY4ZicBKy3d3iKt3khmP0oHL+G2PbzRh3Tt07pXjt+kzyYr6GMr9nnmLyjLNYCw9S/pHscM3LAbtjZsVOxtdPPnL1muyIv2TVvhQi/NbeK31CaAF/Z+biJXfOyze638C1fA+28UpRvkdWzf2WnqjyxtFcnT8fUjUI91f34axGgHGj6fUofZBy766V1XdI9R/zYRmG9g4PLymF9TwIH33obdmB3wLEYEvcDKFJ8JNfRSKjam+2MC9TtzQtCvXlW0Mb+gKJzHkcUmUksOR3ub3X6l7Fsq3Xc77s7/UF/LHd6iS9PHrYFsq1JC+AlYDYE31Q5/e/aGobLXu4Jl9NQkZB8b00YP9bp9A3MiK+Vv5DFAEd6WWbXQKXJbHI40TwwanWFMeUs7kCxC+rPqvtgJGsKhvLMwFgusAW7KbcMl/ZiC3dbuNvi3YZ3Tz9jw1MweHwf0RtMTYqfsaDa6fcP2j1y5ABYfwpzgnqgibGj4KpmjZNZsD5MzSoP04L1UX8glX1oZuUlwpAF7UsGgplF8liBjCNNXIEW1c+CRRWjNWlVBCtuX3ztXHmI3tLLi0BYaXodGUPzDJh3VOYbUSr811yxh2f9oo2XRSKYHwVoWS4qPtYuYd6nRkCdcXwFJtWAN7U2OXmiC/hX/comuEinUUEjybSySG6T8byFtvTxiZtjQicBAUuEYmJlZ4Xmqv5zR+gK1Exui3xSztgiH4gk1EZBv78ZVYjytBRKIFJuDy5cepdcO2gkCa6eqwN1ACSEEtAan8uenDpQSkhYmpx4ME2Oai5HQ4SPIOjQE/+g0j0BHvBBbSwBHuDFZQLpCHuKfAIHhZDIU4wYEKog6CsQfCfSwZeA+zXaKAAeACKnkDjjw1rCBwVGxFPFcSLD0pErGfvmplYSIIKRYw9i4L17jmciqKVYSXUKCWWJYsMlzKp+ShhRO8Ngp9157BeAlYVRImiD3Oe6ihR03hI61SlV54Rdp8LNGJRHzS+/Y/qpEwyZ+3hKBK+hsnYsFv58j6kudzQ/lRVul3uOOap8ObL1eX5vYDfIST91raCUwBFPT613Lxjgq8N7Aol3NTFegT5VlgsF6C8icidvUE4Mff+Aiepeyc97Lp1r/REJ9UFTSJ46WVt6FmQHplZ8/hRGESlb/bLm/c50PwCoHe2W0LJAVezJnCWD2rL/kEKF6Cr2YdXq/qGG0Txe/9NAzLzIgayspUmpqyIoD3IaK5EOH3QgULTdLJ3obj2KOK9fbnGN2OXpdAwn5iQwR6JWbRhRX24zxqij6dsnqFrPhg2XvyJYxzdm+AzgYNWPY4kOQagEGQWJaKDlQa/dqfPpGq8X+pwSla5d+98t4FGDfjr5pez8V+IrTcrwD7dNgZ8cTDAY5HpADrnhuTN8WPfJsPP87J+aZfgXFmsfe8RPc4tSzQ+E81/m+wB/heYcYNQIWHtuu8QRkfIdygrYn8TlnlVpiRmIl0wJ3G6w8gcCfL9RYh+HL8Lxp6u+eunrBvK6B0WAZPOJVe2W0Ytg5Z8SjOk4JBvEhvylrJZYdIPHsHRqK+GuAaJcmUpGlh50pS+M6sviMrdFISQDEZPXa1ElDj5mn6py9iN6DjF3KzncRTT/ETyk5t0ua/wyUZRNpc8G9yNE0751xhbSHJKn+c7d81q5WMQrJT7piRyE6gTTfxHhEbIQVLzQQ8R+w3eKPgfutCr0TV6FhPebIGE1QTwJn1YIltEelxFQ0mj+OKmn9lEzyMbmXiev3WxMTViNUrjh95I84qGiGXMnL78fDb7IugfqYg8HfR8Jt0QKcVSqrFuICtL66Gp5HOAO/EbEezQfuPs7TfEGufXWdpXYIvZvInJKLqNZhoBKKlJDlLDOAaQg8lg5rpZts09dFJ/oP9e6W1L67Kj6GH6mTebW+MtkRDzOQNvb49odEr8vYngDrlNpC2pf/VvUgysFvzTJNO/YA4XX78n/vD+zlyHaCeuxnOp2TRGQM2lzdy/GZpB+n6VRYBX8d5CUTz4Hxa57PW5B48sepeYj+XG83L8P52Q985oTmnnLnbnD89bEgRTxaMykmOfVg1Z5mz7m1NeFAuWJYsqU/ee7sm6AQpkm+m5tSQmgFRj+Ox3LA44sV7pfO4S1ArynaGhLDfICDLpdMEwGydPgG0qUD+bSdlto+52eS92xzVEPP8LwHFVSlVHR25nmSDqjlBhV0kUeFi9u2rc34ROEdzmg0RZaK/1+idlzYYRyLIcmu+qnmfxCQ9TlHVrGVtyF2HGkl5f9tTFh5XRxc+Rl9rtWFRI03iwMvl/XuW2keWh6e2pcZSMKfXz6llflvleKT2z19sU//hAxob5lkk47atU0NvhuRf27w+MIdsEG817ioc3839EU0jIH8XFK7QM0KXIpQqdQUHTNTO0RjR5EyDaZr4cN5w2AO2nkZ+xMzYS7p+RksU9eP4vkQXbcEGH4+EkaptZLJyLp0Kwd13BndUf4GZQfPxf++hZS166TbYPaS/FxkhU+gQTqypZD3mKT7bZQgmcInoLwbY2ok8mOdZTxy7m6paHs1LOqUfjA9cLidjg/KhvjdnUfwWYPhXAZvAdIAR5X9hmllecmLgr9yLN2hMJES2gry0ejcCyWI81WOZVJ8FFXLC/eTIjAAgcHrJBpH8SyxERxZRch9xdpq4KxF0X53jKzEqbICWWeuNnc4VHMRXl/Ziuk1CYezC4qiyzGT93V7/JKv8BrWNFBX1nrn6RTRRLpyUop9kmbAyz1+TltvrsyaoT/QFKfBtcLs3DaPqKw9IH8g67P01Ms42fsnceCIcidHNZBKb8gQ14pz3tKKmfoV0bYH/8Xuitem92ZIaPU4bCSHK6TkCaed9+Cq3rjyQBlNIbXUCKipIoM2SuFq4Jt08edh+DBh3dyQBsk+Kx0yF4AxxYUE5G9VhwoYEKy/Ih6UJJg5nq7gOqdcEjKgrMy7YGoUMHEhL0FXbtOiyAxNHLl20f9XSHERLp/vaVaSPd3fSOLn1bCLRpKx1fr77xVvvNqHeVw/8XBs4FI249eK/gTJhLn0/Urp86OcHhaQr5weO2zZ71OXguvszxVLPqp4MaBGwrqo3bynJpD0j3Adqgu/UV7Ng9DChweZYwESbgT3hdGxZkfrn8yC7nwbXC/tsscNkZ/yontW5yGmDV8Cha/1knfioYuUHtKx9vTFmNZeKK4rIKvgnPT2RDn5zYixAkASp0SYst5pM5nOiy0TR4OcNWEeuFPepiw4pNboa6PYjnC8zJWD8+n09PJd37FeFAG84sGOJACAe7b55BnlLh8BQPhH8HrzJYLJu/kaa8+1jimOogxEqIN8MvY0IdzNu8N2yXwwUPRS2JQITRvg8rQy9PeOEKae8Ewd8lrnEhffBnDxw6qBda//XgGqB5R+HwS+Vynx+CYdvmIpTaWAcjTHmFCddaJiKSmLr4Mr4EBNu9NBKXojBknuDpuOk01PjJbxgVs9GZ87GsWs02hnPRWOS3S9LDTG/F1q9F4ShL8qq4+dv3/dTYN3dq6bfpiCHV1mWts990q7/vQl5jOK17p8pvewtFJ+HwnltbRtM4fBqQP2Q6RURYE4qhKH1IG32GJAYk099nnGuXLRC8Peh7mqYCAxgOT7+8Yz7pL49knzBsJwhE3vMv8/auqf7sElhpb0yqBSznJtycfjn/cpHxndBXpcMeO86ldRa2REt0V/SfcertF24BcYeO/NHTK7m+thj7BGtus+W7kE6pJ0JmRUgCcclZQO/xXp9pHSClxl4TBhnXkhxt1cIzvDNWLDJecB9DZVw28CEwFYTtYJHQKiwGpgXxpoEovTTkRuCMeX8Dt2lijNnFf5SxaRNsm1W5pvPdurxumVgIGSWsfRK1qyGqUPEG7RL22NH6lRVRSBdpgpSeqFU2YB0Umsbyl+mfbkoVlnG/g226nLotUrcwMSNuMJPeAKvPkYcYAivELYGn7jYQYmjNEGHA+WGMmwyH7GwfDyhCh5M+hEqc82wNwGaT2OkgVveQ2dkugvbRRFOycCnccaiYV5q1oYA3W6sSPIDIisop0UeFJSSH3aDUq1HBAobAzvT39ps4/TbjtDtvLeZ8T5UlkcU4d2ZFpH80cCbY7AKXq0IDO8AmdAr3UPOvkGCj8tqxkL+YVdaQvmZB6kaQ5TYzU48nB/JTjuLINMXg+s9GpUM4Sq3UnXC0HK83wiG1TGhlTSCQYTHtxQ5rt8XSMnwF65lfRB/0wpWQYBSp8kiwvW/a/Z1zHeJdJXiOOHSGJ5D431iM6r3KJjmJxNGXD5nQY2QLqWl+mwDg77voZR0Cg5IjDQZ9o6g/9AQC44X8oRL3/JcOD8A3cFF7yID4DiQGEHYgKEGPTSX/Oc9SvL2maK77XejVQB1vpZffAV16HzjbQfycCStcJZFKA8BPh3yjA1GbxkrEnwMvotFtzSG3XKjSmLigH2G7P6kAC2mXKopA30t/jiIu+WukeQ0Ebv2S4Sg7ZO75outRHD/JV3PRR6M9U+h8xhijW1ezYDFF0C0RjiMNfLWRFMWjLdnMNLtI0PTU3u0ORxTLL7eAyPLGwIbX2R8KKtrga0HU1oS9tsFJNx8tYpCa/MIXp+rfeslt7gJ7HLtpLe+ld9j6eE0IOab0QMD5IKE1y50IdFB4zaPWizblwnDoGW5tAajnxjGodK7VymgPr9Dfqhm7loxUhgCZk/VVgi391iE2ENZ85BivqNRGiTIj9fdXIoej1jF3AowJ1SfaIUc/p5SWnMqpGP0SwUqDN8cb496Hh98nL8FkcX3FG203Xf1WeB04RPj01CvcQM1RafAEQwZKaGgNbiM5GJuGN9iqOkge7Vvu2KiffAqlclLmecYcc2oxg/nKcCHlQavdfGosmKPGyPLRQzlLYI6XyVgcsMhHpYFOTvGjfGFE/xJOXEIk3RdJIGouHppfC0T8pgncbwuINySiJDTDPC9xwmb68fycPX91hTGA0LCRHziH1/9b9r0TY+dUvEIwwyrDLkKIgxf2yB4jSFdJ3QpUPI9YI2tXaNYRImfJYEgJgZBhiG/P98vRi3pJOENSP8mR+YeM54fM8+lIJkmv0/XnMUZsOAuITydRp6rWraqiHNzTW1HiVL3zv6TLqcx1nyH+ZCvGWCCM+qRYYcSbke8AoxkXPpmKwJZJ4nyhYQ22B5grEQNT6Z8H1aLuy88I/M/Op2L8C7R66jEmuCQyq+EGXv3ZdUSAuXWPm4N/6n0BmDiUDIm/ee1PqCfOWLgjSleOdnhVTxqEAQMH8p4Y7zTp8uVcdJyGBTjSE9ksR9od0oJ6LK4U2hg56XtOywaZpnJxZctUG9K1OPzclLa8Ec9d891vyyYbowAWNjXJpUj3Ub49Vv/9ZZ2Vau+X8eD7v/lrUVr6bJ0aNBtpHkiwa8Gm9OsJaznVP9/OaqSsCclvQdUcikuKevqi9tShhGFMuJFcvEZkuzbplhcWWQ8Uf7wlpqyOJjr6+7ODdIl188bUEKD18lv49U7Q/NBfBkJefHb/HGgEcN0ZvRp7yizQahvJoWaq18vNpRX51/ac2ejYaQyJfrYtSsTYn/Mf1/CSXyRWY3cacoobZK1yaTHPfOJo3xtBAdJq+Pw54lWcHed7l1aDUOU8GEtSDwekukoDVJIjst8rbUyGCneafY1r0rS4ouW3pSsbQS6pjWa9l1oKR/fgZUv4CxX0FJlNCPP3nPDrd5On6UWv6ypoHkox/TXHl8C7Nbqr0aRWZaz37mL8B3BrfkboE9MVDTm93DyY57D4X4RphluQXbNIeSPoevmyj4IRu9otvrIgcG3apiDoPlh4IJERZ/B7dcBkyDHfEZhr1NGErlZznsSmJRfmJaEMmq8+hf6Zuf+ix/9M4b5ge05pf1HyCmZ1HVZTnwyj8gT07NynqUkRJi2ZNeC5VTtFYCao/mIggKPNh5g6VDARXAk4jj2GpL0ZFUgKFYWg6MN4bV2Ar3yUTIu0sVGO+7oPEEMunJC8UYr77fgAZ7edRgSqt43CiNzHhfoEsTTOirGjCMr1gYUxBKHYtQjFKcMIalKgVDyB6Ss9i1C2DuGeQcQ3EEQQ8H2GXL9/5kJnh0kopgRO6eHzOoJ2dsYfoROKLWkOQxIz+VjhIJ+iUTVN18Gcl1PW1LhQVifQ7McioFaqf1BgJoi92Al9kOeurPpIR6np4WccpX2REktZNUbNMkoZQejGpo9kJYugZoSHb0iqA7xytMZlSSmMxZR9TThbSMQCPhmD4nQBoPPFhdxi84yUYny7QIu4wzj7FgDAOZ0UyDWqH61xO8WjLQ1wgz+tAzvyZfL1g8EaiBlimn/Pwg9N8HDF5RDqk65OkTeSQl3OvJf8QZXnhPM02Ilv5MNXfYpxmCZjwcNqlHofICPDT17CUCwQdGhtTunYKGoJyTJV3fUjZJCHHPn0TWNYDfD9NIQzf2GuwrMCTg1xwkE2Vk74Grb2UO06f+Bu7Degbb5LKG2TAk34HPmQDPiQDoi/8OLgGoziWKzbks0FYq331l/Hy1e4bNQznmPRsqKOHvSno0dF9LNEkUHL1OY8XV+IV2Pn0i2xN3QeIV4j/26r9dKNXEU3dP3XiXeHDaDBM+eN4qBIxhflcbj5l6uowycfn1WvwlLNkSxEOjww1WywYz2ZTFdbj75BIPHbsgXzVThDWbEPtu5T08EPdK+us9hUsP11XgG+usi3hCgMOx9AiEQSZ19nNQnGeW5uFpboAOgOPqWwSJMekF4PRJ9yYUrmUEG56ySbZW0rC5UjEWofUdH7O7bAzavppQzZ9d+1tLhCMki8HW559iy6a1PugU61MYjz2ycNsfyN4Apgu2nqBhe2nI8BTJcbKih4X78AojqVqx4Z8ANZqA5BgPp6aPsM5RgCwpg/wy/lU+YPlN+CMxxDFAJ6WOiGhzb+GpeO6xgTF4ypMFU2W3qqX+hvHPyxnIcXaoXpXp1FgrD4VmCH3oL99akbIak6z9De64P6bAM8z1G5icHaqPkhR16Lv0FBmylxFB0cHUg5S6O0UyQve9cYsFjpqzO5ttDCoL6MWKwL0aX7nXtXiVC0xpE+xpWkGE2imF1QQNCVYJu6Or16oUHK/H2sPtboyVQBO8wFc/o1Wx51Zdl4wEJAcS1EfDPlGWKxn5A1ecvNKsZNf/xFInOByocFeLYinfzcXacCc5RNoItHADLHzsxkKVCCN+WbVfPxrgOF/Tb7wHnYVbHxRNyJLwIhZpG2Z+nITMmv9MlWdHPCOWiZq3BnXQBwMgPjPjj5Nn6j1vGhln5T23/vBEGTlKvKwkt84THnt/qCjPyYE2lZ0wL6y/zkIhiDDkh+igEUvd4/pIz3Ib+9N/hs91v/HU/WkB2ZyInFMKGRDzBok/J6l38SF1d6rGcfVoq58V3DCpRURkYjTIInv5aGEgxzseF+NTknM7dedczgY5fx4FE/MjqpuraRNoinOVyQ+CVOaD0zn1JVjRgrN15UVFWV4z2uhsB/8MFGLodisDLeXocPW/2Y7Y8XBvo7SkaE0SwU6lnGUibj1IsUOGoFEYRzVHbnRnhk1EXqU8u7RtNzOETFLvUCCkPR3Zbygzcs9I4VU5iRBfUuLYIrQJ1Hg2DOh8yyo8TKRlJiZhCQcJ6TWOJ7XrgMQW54rNePPipUExmiOFg40ZMTK4i8XamZbz4BpL0EcP+2V4nhShGbtIYckaE8vUAwPcMxaMHcDb1D7Ly9VqExmVCvovGrY7FmOZfeFMW9B3z/y3JyvzC5qg8TckqiUQCnLS9XJ2O8sI1a2Sru+omKZxY7skQUXENbye144AxwvaYz5v/c58DQxJiHSqdfrBNhcgp/orzvnChJlCrqs57I/3W/a1ceGJYbo+/hYnjWNAw7utKHpi/a7znpo7PPXA3d6EjnM0AzmVVEm2GwLVftDQqPUAp1j4pS/os4C/DbJJ8wFhTlFcQBZna1FHwuPnwmfHE8c7u3iQqWlzQ04NzPmTnLqpE5O2tXUPshFnMw32J85Q5kjbQrf8JwDzlwRNctKC8QcRZFd+adFNKOYy9T0WKEl2PHlzVWMxpraqpwtIvMjwfbgYZ4sZrlqYJHrOh45+UUw+jJTPWJ4Lvl6FF5eEFuceFmG7WBpmOb/+wHeb4z5pU1dwzOgTmVmua9woOztULONCqQzdGlFz88UP4P8YeBM3dOkm55uq1xT9yHyxrxVTuo0W4RVn+KPl1Tqn+AFv35AM7Wz0+7SsG0zBSHsiZeJ1DmpowOZw95HLhwVEmLZlJSn3ZY4bjFSI3+hxmv+zv0df64LR9FqO2aNHesYi7qyoXXShroW6F0C2qTgjBQ86W8nuTvtkL5S7E9enzEeB05/gqeIgzW+/Yv1q3bnZ6G2ysR82xA2rl7rmqxe60TX8nUe0KRhQeE+bPS3sYDQ8HIlP27Gcckld0R1JW6XZiGobdZ/o+kFFF1sjTdvjcSSK97uy/8sJlKc1sq/CZyGcNycLRgtsMkww7tUyGRGwfZRU/Xk6XKmRlqFyd+4DTLDAX2jQJU6tjhXj+RHn++XFZJ0BuC5a7GjGPqJuopNGvcUuWxXuZLFFLkYbWLdMIQq0xbmesexOSntxNbVY51B+ihxZG31F5Wapz0guvP1ddNf6mvrw+xf6e9fzrLhIIsYKgPK6Pn5iT+5yW8+0Z8t9vIGUq1ZDY87mXaIK2IqfK3Oq99wW6OSfiYA0uR5RgeYU9ulXPDSb0PxLPFooJTBHv2iEwotE9G3kp7gIRaCsl5LvsHHJn/yrN4ldDGnt1GnpVvoFyg2gzRM3JJPvPdbGkF/v/9U52msKI6YFHMNYxTi78dMLpSkHeXPaCmp/bmkoroHzzeHd8n+LrKHkJ+iuGBJN874QiXtQkJiUoCwD2LZ6m/WsytUOALWZWfXpIKgiyCgLss/G7OCbcl47RxaWUTmRsOy/umsj6iYNQsp8Brkloh9efBv9F7NrEXUuIsccqKwCJQtpKPOIxEeCFQNkeb1Z9KGYSidk/AoUe39V7scNUOLYoVs83/JjvRiGS/OJo4OXCt+aCBirRXOHw67L/WXkJsQjpbSJURFuvyxVkRbQBdkqwmp0BjYLNBGHJPQZhO3iBQJdVEoa7VWjeEuT0UP0ZSBUgEb4G3ro0MhUlV4F7hR2ina88TQBgDAez0UfiSJi51cJhpMgFVCu5vUS0a9nf+8d9O9HUOD7NMe0OTXz7S9SVYhR2QBPrldnlrYb1Co+4uVM+pK+2p2moY15uCl+wSSUgZYXBmZHcMVW4jSL7HCGpF+YF9nkyA9HFM2Yto6HsA/UgxZoK57Fo6kIauv4emfJSi15pqosf7/A8WVdu1uHp9EyQSA9lvgOYH//zcB5Ph75u11/GWealS5Chax+ECoeshujOSkZuQS3MXHh7m3R+gFEajzfcGIfgtWEBDtwYne8Easb9ZoK5dqK1n3fM/JFdv86zQJ+Us2f1qkQjiNaYI9oqkq0lHCAkKHtG8LhwxeLG0TTaLT21wNBa1a02+i3boftroi79vMb7FzlrKbh10dobfhAC7qcfCLwaomRA77xHZe+IC0unuXvSvtPvlrNovPyb8tPz6D3YP8WNEjAKZwgM+zhoLQXNENqGeH1dbDWAbPaqOqJaMilA0+M3xlGnZGXtX2uyTam6rQpgMsife0VijojnzfAPgaMygcH/dR80bi9EO9SI9N+EMd4cpN7fnT0NM15brz0qEcxqhhgok/PhOCidMtX/uYVH1j4zJjeSuCvYi76BphAxS0a1SKZXnlngjYPqlgSJKpOe0cfjbjbdw52EaTr9XcSP7gemD/wlEYku/PKgbDwFolpHVmTJtKzZSxREDgo7SFK5zL0FJFz6oCBwvGZ+k/x3n9lcjzqdEP8XORBD5QYYgGwhvYcV3seSCGhs30205NTvHOnW3tz7OGKaaF09rQB0ev3h9IJohjrD5onRoOGFZW/QH0QSsnamoepSM5nd/yYJqNDuVYJrPQYpxXFNE8UEuHWX7yKqXWTbuVcgnX05lfYf9VtjtAxA9aLScsrnG7w+bSTybcH6FD07TWb8Dho0XKZKE837z+BG47OZvWQPMeAVrO7HBDSY/K6Uj1K6rl3/54D63ExPBspeWzmOc9ZR/BHoZNk1PCB8tJwg61IKumhjcFhQg9Ck7mWQE1hRNLZpideDx3bmz93cYqlmFySlc8KEFK3/S4V3Svf/Uj9EfwcDF+bokDBxFmRxQS5NB6HFpG9O33x3SvM+mpcOTo/ZdSXs/G0sUBgNhbmSuENn9RqEIi8HBhwNrNR2RdJ1YBFfwkuEZNczOXPp9Wtqoi8SXrRgi3iy6nTldaGXKOxW13/8y5R09La3zTeRC8qNo3L0dwjHaBKKgWpHF6HNMHSNXmH4cvZQtUkZ3kQbw1Jw2OJz2qFZhgALOs96Hp84CXZCAcz6xArBJCo7jqTnnwKS/duFUkJQVVHl9qs5mTuRoD9n+wEbiCZwI8Mipinldb73oL2lAWJsLmJFJAkFCVa6fe57crNryIHFsGnx8FhiBRPX4OAUpEjtbbvStRX1+q4ZoVBd55SqvgVF3O15oAeI6WBldrQ8G7rok3nfpFjT799W2NTuA8DVcuQrTE4oGSszTHtPvJcDkXUi01DGhURWFj7oAM+fQ8/mtAxDV1plMevTNQS3pkXvDA27p6N7Q8AczBP55UsPh1ZLVU4PzXYH7dcwMCTihqRu93Cly/EOKRvmJbsS34lnwD+xOxYVvzrdrsOQAN1299u4HZ/Lv6hT0vB3iMKfX75RvwxN+Ac74BYQGLI8cNZoRMUuB6darFbSSRqTu/3rPKT3kr59EAW2lC1TcMAg+0ORF03h7OKaq3W8vV1hLO4cbdPwotx0v2KHwiiubWF+RKD5CeDMjMS41vBaEs3Ua9AcO1YT8803jh+8xupfJ9k9jcYWPghu0Dl2x+3i2XVbrtSwbaXGnaJJXZWExld4eN2bd85O0ef77wc9OsHyvjmbdBoXoIKSRrKIcT/ko/XyfjI/KSUrNnXinm1c6rqjQm+M+n5V73wkm3mUof6rie2JpJSxTagUld5BWvmzvausaC2NDlf957Q7fq0l73NjxNVvFOe+Tyf/fu6VbNi1ZvfMbluBQv65bRSZ4pxZl77wJm8Vxx3q+aTnJK6amJq5JAq5xgnMfrM5dsoD+ft10eTJonnTNTAmEpM95hJbBnObCrzolu8bjZnQhOko8z4pKGLjwVzfdCDU6FsbAv7spTsVTM7mbabtjl813G5pcswftd412ILMuJXFh+ij3xGCfKDuGA+NHh+aPCahXG4TeetPHLd5/qT9NfqO/1Z74GVp6WskjicBT0L0/uc/YjScE88NcPfGf9iJtfyLMKuAiCSHIQ0ksPeUshVBBXnich4SqgMLZFPRkuNGlAxCvQoPHTZ1GPEVH6XixzR3bQtyMtwvH7OIriFRLtU/BBrnsoE3GFsfI2bByfQUCklEiRLWXQiZe01p1CSeAFDECo6pKY4cyv2DuwpvD1DDMI2lAEEHY7SpZFWJbANBvvTOa1X3TjJkFMuvag3sLwhimdtbmep7zQ56miVjM22pRO3xOa+I/hkUM68W1qlG3iUle/zQ3iR3pS6X9+mGsseLEz9HJtRYx6eBm3vUi08YvDhRJhZoPsJeXSupDiVdrHtpT8N55gkA8bqPjyfLw6FKp4toEvdfOirz07te4+HjtVaDVz4+8sUabr2Nenyaf1hugSeBI8p5pJxxSWUfko8YZVsZq9/EGVIDGeyejrJLGEakmKhtenN2Om0wXIl0F6QHrcqEDBBim4z5K0tlOujthC3LdyXGMY4tU7X/HDz+h/+a2PP9X/L6PPj5PIdWHb1DPJNjoi0s0I648YLbqJxJs9fhKeXMeJLWculcHYruzAkt3goM+UbCrRjgEhlAvJOSHDkCN92VCHKwLrEZgOp8UYgi2Z2ooK6zGRAOi2asdWZ3rc1q+ZwrbvU8v+LKtI0eSDkHbCASlTBXkUfR4lZyRr0w4ek6oY8DcWVDb82t7+nX6pVD5lVTPW6Rhly3TWhRQVKk+vdbhblZitqVS966/9/F5PaXWee/jaL68h4SDfuoGm9Bf/8nptTborYHRMwUQp49RSUFf7JaGA2PQVt/kvOczrBce6+9/b2hSE5dPUngAFwg6Jpt68SB3+z5ZrIhXBAr7Dc8mZibfBgYztRn9yItuAVa4Bxzgcd8/VKj7sloMRZ6D+bFc5C1eL8n7EZHNuKo5nnD1tQBqGTAQvxZ0ww2aXPCd/g/jN+GsDY4lEGv9wwSnS5MTmHTIfXK2YAb4YVl388E/U2svWC8ltvLtHcpWpUYbUToCsq5OrPF/IS/3BaVq5jd2bJnZBgc8W8Vr1CAfItAjWSxY52XBw0hcdnwgUV9EIOnigWp5wp/owl8k5bnSVMxNEBooX5lSRTqqlrthfIrIje69WXz8/E+lHyjvSFh59BXBmgHkI09ezO2tPQRhvX9+fWrZsHe1ANzpocb/dUAys5UsetqJCXmzP823aVSuvDyGsWmYF8TfU4vXTPrPIuoqc2b6nZTmleLm2/FD+4srplf0ck3pu5WswDnVwPuCupJl9YdzZO5dh411NcUV/76sfnmosqUxFqxBPkcRpTLFKSSBx3SF0UZtneImPSRZJstTEpbdRqag8froISgbmuPD2vIKxFmvoNborMe49dJb/lmITIoiZtBxdr7edF3cR0HiZ3u3L4SSzcXT5Njwu3gIDgDI0xZZ8nOEUrRiHPW1gVzzse8T3C8IA+g+lwjH0HW2SmjVCF0Lwfd33D+NB8Vho1k9Bkw0j8wEtyWxS9pNvnLTfRFoquacnXuYrzrjHyJ5O1/9rhy51yGZaO0Pk2i5wBZKifKNdJbZYFUEDoO/ATu8MFdfugtwYQP6eHERTerzjdf9Eh6hjbQnIZro1DV82AwfDupzFle6Q04i7YBY2FeIoDUBkZcNjlSe2XVHPHn94SgWnSDaqvslZAPPqtps71AjT/4SfxeB+6NJ9bLWwWOBi63JlK9URruxeteOt0jxoqMNyaK2hdKH+axLavHYSkC/o2zoYJHlk0P57GtWTkf3dGbZOvVoo6oYFatf+fldLLJ3Jl3FaU5lqzZa1OUfN2RHeZa4YjW5VxzrU0RxR4aBrJqek4J9crCKWO2pgUlI0kpp0DwbICxIJLk5ofSa+VOvhBFaHnzwohrlTi0uLfZfimwsOan3fQFluLUbRI+Tzli9JoGIUZpjxCxZR635iDVZ+3r1bi4TixAW2YnAAIPYLS2Y1LAgn24JzDPhQvwPfoG9AiMaljZGJ5BMJJinAAf0BvQGkdfdej/1WI6L9PzwqlHI2Rr6ESHYzWCW70PDt0l206C+nkNjSzV+kP94DB9qf8T2dXFiom2KIvbBAWSlH/RxOtkzvIKSMJMlNJexwCtd4xhYQfjxfMTBEeGRr6r7r6aTBXaVOSXVCWzkA+6IxEYx7/uFirnCNsGIEVsJzVrGQWN4/v3gDVOgfElP0khj1bSFFWNf93bj2M+URFmZ7EYjy2HOBKkQiY6cHWmnY2hk9qztMFxm/LySJlZCtmUPVSbs4czdmAEqzGoC7YYL4Oz4MVnFhJ9FONcshZHunZ5HRu10rUesLma3/zZl+HkD9Glq29+l8pYWFR1385xqP/HYIlPX8CjX1mtLeHwf3nr6PtQe1PQct33aR+o0g0tXaT4kl9bMoaotbwy89aw/jBNItQ+SqC5K5ufJuYXD1h2PyVdo/tdyG7dx+xym2a6bWWjn+WdzHP6XARVY/k2qw9Xzqwx6WL0N8JTt+62/agVRgK3Vzu/bMpF85y26mPVAeCwAkWcDDyKU7hv3OshxZcGfWyGqo0iIXu/DCvsY6+rKk8OU4UJrbHpP+o80oXS4GXUtbkJFysytsTMnbtJsWtfdyzfmgwTe2SXSM00KBdvrp+xnXVaEmR8DxtNfzc3neeMcTEQ/E5ILc/ASDUFMpgOv311rc0j1TD9lH3lrqdnAuiuK8aOqjIKFfna3vOEuwfBeTIUn8NIKasRMcbz0YFtCFA/GGqoHsjDdaML2MeyK401vRW9zwHqS8veg2WwXiukepA6HzYu22mxSjkqR6mM6ut4YOvL/aAa1n8tJZ9s8tsVlIHO38fCCsrLVr8ETUpdSSOa+1fA3OpgZ1ERi4Ig29QY3zr/vj8A+EcXqor6E5vMNxh/xxPGGdHxXJkP++iQIdR9XWGhwlU8p/lsTyQL0Y99VhLr+BZxBfqSaZBPt5WmvTiPvb7iYUBk1T7c4ojgss4jXznMFwFGpJPu1uF+I+6aQWu+EIaauIpvwCLaRefyb/kK4+KSzrEwwAs1oxoIuEDqCsVkqeDtIjqxbxEzotjm5B7Wi7Thp1Xn6lF3F3oUWaGA94NDn63TsZcVnsvf5khSNsKX/Zy9MvnVJl3lmE1kY7B8N5myzJ6XHqkw9ovxQ+bJ+aYcF5VzaGtl/It2fz+ctkjkyHy8FfRVWfS4OjJvVWsuNQixndgjVor6fYIs8UjC9vuyZcdkhg9q7afh38POyYi/bLlcb+bm1+MWMBbIVKndwl/b6nT0eZ04mkn5qI/RhjpX5PO9qwf/JQN9jnI88Z+3LAan67gNxYXHIxerv8ZzPRAgwwxVtP+iTi0WSmwQ/xEUUTAvIIfiyohIIfjUCSa7Vuc6qxG5UvL9rzbaDCXLQXB+0fEx5kRm/M9Q/5n1R5ej2JL6j+AMHYN3MERPyYhuEPSWosFagg64FDHgtl67xL+SuA036AMHZqaS3Xhdk7PiaYvd6GhceUOjOgKonCmdI7UHnPj40oICQKVnZfnQFen0t8IS8qXADD0LEn5j1TdYTZM470W7jWJvGm4z5sYbE2YAcp5LQYv3mSrKufrihfFe/+t/0DN3QUi4Qofm0/bfMZaS/okEQG4ezDYPkOFLOpsWWSI4nvXpx8l7ZNA5KzPD7Xdqgbp/BurT7x2HLM82tOgZuElDHzdbPAerLYEzUl2+JKNECHxUhlpD2nXw4wprp0wTEz8t0jBt6gNdN7n0ZFeLZ2UJ25b+xuwUN/JD7ct577HFi1d+XX/iWdc0f3dEV/0xtu6bqogi1xGeev1YTLHQVNTCh0eDdDgPFS35XNOomKZi1YelkVhtfJV0Cflim72asY9Hyi8s5jIONE4yJ17hTOXpfCy0UA5ZWgMVP3QFbuXFLtVbjL9GYlXuFf4Fd7rakzO7/YU0uPr9f0kdqVeoLAHeRMmxxIztSAc03DPlLX67RH7+HWcHpfRPv4NqtoFrkINLskNUVYpt+KVPjjoDS8j4Y8C5KZNB1SArLT5ZlXZ6z5JLTl+zZdDoyTbMo3s1q6bXcu1CLJi/vtkWD55Z/eA1pupJkCUU6d/ZYnegDxnO7oLm21HxaHrzfnkTcYuG1tv0rEMMZS4euy/U0wc7kwkmmkisMQIo4OUbivDhpfn+SxX6YV+o8lGOrnry/mto+FSbb/xpYSxRAPE6ZwsWgI/XOivN9rwfEWW01J+3lqmf1LlW3p9aKHK3bw7MzWP7dveg/T5X2YjLRGuYHCuPVDv2GgFEXA8kFBRvrWaWWwBdom8Qed11eBD30NY4hxfleWdDdF8NLHav4Vbw2xHN3dNeyxP40MXhWcA6bBqPVyBWNM7DJQKthUC8+uXXx5r0JFsuhbvEK3WFi+Yj8m0sA880vZEgbidjODUQ0uFz8KpVDCIEeFBpChvmPnw5kd8kWKs9f+mqtWJVPeqY52s88v/ZxnYu50Wcyhn/KrTvHGrnnz+dd0+bEwMPZz3hMaC8aWX7UdkAyE8sq3NhoIYD7aHz5XW++Cmf/0dAz9TetgNkOOk0A5+x4pqyz7idZtQYSTRbbD2mrxJ3krv084kC3jQq9BEkiBOirx7roGtcgE7tlzFjodzd7OBWcaSq293qlP5pXtWiyBavBsDyOslfOSfETKT8CpLvBax49gag34Y8J25VSBOGZ67EZZqVOYd+OvaSec5cHUR6yF4TJ5Xme7W3NBynAH83dmNxPIxwPQh3K0+9BQBnlYQw4hjHOFgweAOiaLiFhbAB6EDrMbHiEu9/Q8b2uIRxk7c2urPt+xO3oXuMdQ9lTON8fkmHuIamNWfTae1abx32FY/PeuDAD0BX1p4GeYheFqMXs6xDOhpzPw7p6i3d3h16DDYSZhoubUULWOjNLFjYoqJMbo9YlrPOU86ButQYi9CAZPH8dW+xsOHrSMFRoXL9FPOvr3M7idumHED5bxwpAhtXboUJsAFR20p9MsSoCDhCXJE5xv/rChhA/IQBXuzeOgpdcHcAh6gEc/8XIl+urwLzx3H6VSo/Ee0EN2cMA+nYr1Tse+HGHfDvSPBBsc6KMKfWIUzsweDaMj7OKA/Xd6/cvQnqGvQmxyoP8YLPj/CCOYbwEfGW6AMcPxSLhm8sqY1krWDDmyLYA6ghaprQbXgFUZBgVGbakWMiAAwvAAawBXIsN7vgph1RckPJNpy5bHODkxlAAUpDyAhggTkLcgLSvO2gFCWGvo2v9A1BQ27mA5SHMGsvd8zYiAyvk6RFiBFX2mOQG+hzKT5gSvAzAW5JXhehAdlbEI67CCAwAAzDnQ56QcsQ6RrI9kA6EVArQkA0UGgoVgZYnal6wAUDPbgWBiAACXgBUCG1rgGqwQ0thjE+7IOsIANgWTzfS65hQsI4IvxZfrAx5rBvI7KAiPHcqq9IHwCY0fcMeExwGNMqlGCosd6pDQ1/jnQiVCDotj1COhH3A2odJzB88VJTP6GfOqVBlv4XmF25nQr7g3oY1Ge2i8RvB078MGFivuesKO8T9Qqj2s0HiDu4GwU1xD6Fmxw65BRP6KuiPsDFcTetFoB41bPM4JuxBXypQh9NBsP1jUQbPD1qJ3aPY5WNRCs36w6A2WfQ0WNbDse7DoFZb9DBadoNmwNddBs+Ng0QssOw0WPcOy82BRDcvGwfCFw7cJNPLzm+Dswu+Xgg6lH4yB93xC1/H4Jsi8cHcp+AHhbRRkJrx9ENx44e2b4MaEt5eCG2X+6ATfvfDHq+C7CX8cBN+VS1WGKVfp6DRjXeBwz59Yr/BnpKRbwWHHk29zHJRt3TIOwoZxhqdrmpn8D7vfDK81hVZ84ec9/dN0xM8z8f9/KdeAFUK7cw0r6axWO0LjIocgkEu4Oyq4MBVFPcBWUYewhNbAnfwSDkN1DkBVbrNY/n8V9l91gBS7tYSC4kjhENF91SAgomQ8wOBI+bzrFHuXFjMMSaizDIqccBXHkbKjIiyhUM//qDW0yEbaQtFpQAI4XocMRtBdekY0hoyiDqS7DW20BOrfpQQHqRZ34i3hqqTPcqV9GfYDtMM/mUKltbCP+IxU9G+rnGBEuwR5uZJLOCasi8NBhekkEMc8sMPKjHnxINY4dQitbG2kkdhqVVfDC11Gji7UsLGyWsvypKZU/n8vIYOiIY6Dw43BIkGJwEwRpEIGh0svsAOS9HtXXkKhbg0YpCWdVMURjYVFgqISvBBC44DFae4fYRkUtaE6F05YNQv0rfrA6+DExpCBzUEUcgmHQQPSD39GKgYyZfy3iGZRLdUoGQsjlPvjJ+CLpb6BL46IWJhuCZUyOtaC5AIzuNKavCowC3F1R6wxGf3SNjrZWM/tL6RJQtdLKH0G+6jPiEBjCkXvSPnNeB0cOQQBER7lEm4ORmgdaEwR/OKoaDc+A5TRSmqAkkf2fgkxnPoiJPCXiMREM3wpRX0HJwqzkWzHHK5jYO+KwuqvF6Hul8XqRQ2ydHxb83HmpZi/L8FmJdjRDe8hm1B9EqEw7RqO4Y2nYOAlfG3mBcI58PA2xQxDxuaQc/o3NknYJhXTItBmGvF6jPEApsPX0wMbC9LO6QHxafY6Nyq2p6ZPDa0EvH8xp844Oo6YYdS1svpMXhDjvp6dsO46Qi9v3iukmSR/F+9+OIh61Ct/VPbKeZmdSB/oJ+FrdBMfI4qGt7bPSCRi56nUgfT1YeBBwnRY/xt6UuASjlFANFuThhgOYSlAlwwZKGdWvdCHFA9jxzc94JtgipjCfs5RX+vBSjwBW6jyJCyA4zqwc2DxGlZHBZS8oV7HZqVluIcjqOEOEwxSzC5AKjPC04hc2mjxUk10c4ccSYo3ciknnIyDY4sVEWLHxxFDHu0QYnj/XqeipOi+MURwlyDj25KQhLYMKHKKX46XpvF1nGW3T8IjwDBsXXMR3kpXv3v+qcqsKIGZ6HEINNXKHXjLWnR9EmOWykaMgj4DI7M2cEiN3+tEEHkSlITHt7OipDRo3l9YWCAHGDK0gEISsTGxFvzWMi6v97XZI4JV40BMnBikWl73SXst6YRny6MY6qwkqfRjFJKJ90GbaCwaMSJZdZylyEHwmT+iYXAI948IelXWVB1S5HcI9P2FhzFB8pmC58YV7I8Mi2daiTiOCVGhEI8Bkn5NDLFLwQVwDNYisHk9wMxiHQtVws5NOvgySqBS70ZZLeAuCi5VcVAoqIOLjTC4MSNE8Numev3kE+WCkjgOBfjrlctUy64zxn+3wPpV5wDbmdMEBXpcriWqIlVbRmwyvZRx0sF1mstL8ZxwxU7kvS7+eHxQivKk1hCu9IAU63e0J+Zsl0i6DwrzJi5vUP2XEaM+v1Ga++5+cC+g0yePiOCEM9GbXAnKYMRa24R6ilRSj7XhCYN6qN/P7u1GAxKIGEqYiuNoCSJQ0HlMIEEZdJ3CFI7ucjKLz2owiNTbWbTrlLdJSfX5TnV/UoPhqIYI3LRMK206c2P3jbyJrbGBFoMoHvn4RnjkhB5wkbdDQsNhywp7qwiRiRRbDKrkmOOu8EovIjdlDHc5td4CpHiuMIbIS0K3L6J7FyjMWOgx0Sam1O6TYPJiofXhCOkLDQdRhy0PHbn3jrv0HnJ1zIL3jVMvJMgD8fzUGnKSYEwwYzQ4O/diLBMiCoEJnIuQDuNmJLchJ3nLBhYOMzMgW+mHj70pls1PBEZs73LRb4sB0+ullXTl2aVXVS9n8EDm+zGvH1iLd5ptR52tirne/sqcYC199OIcxwkVJCZBUUKhB2Q4pBdz4CW0V6c8Uq4EqGYTXPf1jqZPyCDSE7y1aJlQldX1aRwH5wptNHCv6dknPoNU5SaaNaTRDppB55WDgQewEZSr1R3kSAJZoz7LqOEfrTTsG/gTnaGBjtijGCR41Exj6n/tyMATelVcwjE5f6TXswwKot+rm1WkWpLuhufvT6p7qzm/czebBd1XCZqz32+ud24NuNIAsZd3xUQXn1oWGhvQAB3Ql0sqiOzuerYvottZBtFNvj36Fq+abKRHK+40bsCLyN1WI3MnBU37s8J4v91wh8iuPT4RHuPJ2usnn7X1PSKqlTWwQA017EJghmIy74Zd/07S7xtnRgkVFHcNo6MJCzBYTI3OGrx11SDQCVgDT+BujiU6oxc4unrpCDiRfInhWQGn0zKBQV1ixj0tA/vFwZAR+eVYsAvE6T+PwP5OnR85QfoGZ6h0HR+E95a+ymtq3RnM4AloNSA2W0nFol9VzE0rV3AySRDAid+aEJUhLWeO5JfDrBLHIyCb0caIjmY++Rwvd6wPMXREu8YKn514rkzPBWbEgYB5Yce8FIHbx8qkEwaM4HEBh7GFUE6rAK6RWEOmzI/gTNkRdood4ERraVDG3SVU/wADZhgcw74qaUlPxET/nBsnrHZznqI2npMM2+RRjxAUhZnOx0oczFdTZq2YpKycdwVJ67AcPlkJGv+w16rL1h1vipXGDqI4hKzNumzXmexowIlJBGOaLPPUokOsCuK+nvMrVneDs2Uzy8+vwAa0sK57U8eYvD+Bd/9GieMCtmW4Mii6xmL4gMSCSN2WfB37vjkdHz7pMQ4MBVloK85th7IxKpkA12A7AoPaqn3XKQumJuiR3suNOj0TsxRxexlHBbeuJGiKyCMeqHQ+q1yQ2PsqnwARpdyQzoqNM1mDtL6UgSZOKCZo04ZvJZcyJYNBvfa6HCYzd0kXMCTMD0j5rG2yaI7xvgCa1XCPeijyK8mLEbKkTophtxD7Sm5C21M2USV1o/op6uN4ShqBvfGBWYjs6gxNUBD0e65dRdpVit6eDgMUeRop1w6LU4fYVvRdp7nu/pt/k5XP8rJBDJroax+jEUuRwcHyKmmXDQUYahs6KXZFpETXMZrFuEPe3n6Mds6dJPSYJuoARWnCBv9j8SKdIRby/usRYIJ65YCh2J0lzNuAvhKALHvqItgKYLReAsbKtAbkJAFXiFQqbfkVafBHnGCmzHBW3rev/2bg01RB2ZHxL7WYQF8ufLw0dVXdQKg9ziGMU6RtcryqaoR04i9v9KCIgsTmVYXMohfTosk7ZQN/Oxr7xQiDFC3hMMZ84W5fBK2ikMnOX8egTXmJBn0L7I+kPIkDygDdU6NjeL8wv6Ol3b+ImL+joqPKd6JKXi6ygJFnDY5fNjRJPpdsNdJwiTYbkBUh75leURopKm09ItdqnQ19MKhtpOh6gSInlxyJ7vlZjf5AiRaP2kENb1NqcbjI6UixxnVIRT6CiEpN4ACBRUP6gdooG4NQ7qUJ9CTLBBBS6PpR6Ks5XV6I7mMCo1Ddf5ptT/Gs2Ls6vLXjmBXxXikQC+cisK+hM6DQ+GOf1pkuacnKlazTayM0Wo0D2ArSImpUGKuBRZhTrK0GBvM0BmOKma1gZg//xiGjCFq5CagUter+1FpALuDPHxWrz9fTTgxdxCE8Cc87hwU8WZ4h4nssJK9PkQ98u33SlWCy4FNLfi0oz4T/6TWDo8iwL9J37fFTrTdmlK1r11ZhQe+UNWxchN+rGP0aGFL4tzWnUKwmSvM9SAm4roA6Ik8ei4dKsRiHUId3V6oEDitv8NHLRGuJCEhRF8Lj7FVTq21FpA3tdHDUCBzHAEiBoyqRwSfOweAEXAJgMlGgtXzN67O82rhBke7VztFcjoILHuvFFM65uoHrc66NQe9n1VB1YnlQy2pVZY+hqaXqSHZMBG8MnjC0GiB+szHbOHBDMZSMAU8HgyKN6ZFM0Muapw9WEsUO2rwBuyS5GwAh/zAnz/glJifEZAnE8NVYG+cSLAWeh7iS2P7YAaH5TZ0uh0wdWPDir5J8k3hxYTFQHnN0Doc3isRuKUe5ucT7sp6h8M21AjN8lTr9YnCHByiB44GBVYEDflUBrTjslb9+Bvjdr58B9o2hIgLgwYUrjCsSB+tcfLk0KxqvKiBGdhAJxE7DESRohIOJO/xWlCCFskA+MZTbPaicWfDJw1fDUZWw70mLEfliQ+EapaszxbSetoLUR8WnyOH7WYON7sFrOfmVncFdLO8P8PcSx+2RttEpjrB94E8Y4EISeVe0KhaX2j47VqxbnBUqovdFXTsmFPkBP1XSGaZ0pPASOJ4arDUdTA2BO9P6S+VvJw4k5wgh7SQZ7DRDFNIYKcyVNewQkSB4RpxkJtyFox/loDpcPlbcTp0ux0RGZqyMh0RWPyVxsIyqHGOSB878vAC3v5dBirl6uh5gXqkBDeK4QhAEJBAxPs9jZSvNfYyinLbYng/NsRJPRfx+G+RgOUiGEehmIQFF04IXyqgm4ILVelpBV4vAyt4xZt6Wr8XXdjrK1HJGMPqtHVFUO1O+rcjLSxK6H4+6vxsLcqI0Pqmt6i0NYEs2zkygzlltu8XjEE4PRo9rS3oxrhX6/qiKQ5MHAkTqw9zDfIkV4hVM3X/F4vgZXBgzR4+52GEsWQHRwf1gaEKy0/J+Gzgcw7rOUEwD2LUk4pvUK33vnpFkBa5XTMDKDnBxHClYHcTIlcuQ/iIx3CM4dLPqzhovIawc6bRcv9ftiEX6xfYmIxnew4nOYzFMFJU0M4sQaIE9aMPRMhKZydiBhAlJTRfO1WKEEJIhEi/GkOgCPK5jqGmhTTYkDU0Wmk+nhTFptPFuDD+KnfurGP3ky1zQ3cQFj3jXN2G4cr2kikqIxdQhqixNG0yEm27pt60LZZADPNXG1UMM5sXIQ5GDfm8Ee+z6w+GTvtCS6xENxYgPzNV8AEQcfqwr1kVy7/go3hTJzz0p2jjNlgcXRq9y83EKBWHiLkE8jPgwhXIEzbVZzOypO1t1xVG1RDLhjJdT0ZhlXHISJEdLwYL3G1GxKqOgME5uhNul2B0BeFbg78AQhR5nMFM3MC5DxTJi79N7CdRp2f6bwwDPc4YVM6Ukw+ZJnRcJ1Nz14GmTmJsrE5YSLh8VuV+deOme1aLHkgKT33dVY4FFrA+4q+bPj/UxAMKA6AvOdLQI1ZRh9YCWuJB3HO/N47DrVKwDOg1Ab0WSY+/aVj6LC6YzQngPCbmkJX+Mvchrf+LicynbxGR88Le88NOSYyKBz37h2+KSL/7CV3/hjV+UZp/45XzDHlZy0mRMPL62prPabJ+RAQrnNwWs6MwBZP3Ska6gzeIcaGoD8SzqVs1ZhylK5fmFtdVwgPz9joD8AOSjVgpeO7jI/139jOIU97sJzLJ5BAUyoCNErTexMpTyrCOUfh27Lb4oQhKOC2wGQ1KPCeK2GVZ4ESxqBplajnk2MQshTuUIJ9RNDWxtJuzaZZRgY6bND5wBakeolpM+BQXKV9q5Y56yaIK7ihwxKG08zggMijljzMDwZCbaXfloC0H1tNBTGhZVDDpn0PodxWf2iKXI/poC79ODeEOS2r7ISgFF+WevG+p6+y1bfBgC+yoR0Zf31iOGcGUMMcPeDTfhVZ06oXWsIoVEONd4SZQMAQjWVNlL/VM4Nb74sJ1RnmbbubjA19g2+wbOTxARraLT0ttqcVkjof8B1f1lP3c7R/9Gkv/pCIihEg1tHvHNt2qJeMNwmbM12RH8nCGjiKoAUjMrEan87ZEl5VWjpkIrhF0ufTvcj1uK4m/8Z6CvbYmSUVv+BOBqcG9ppP/ZMoJ5mIYIlMhQ9/rrwiSehGY1Zk+mzy9mOCSnjiTsdWzHoOYuEHxig0fjyCtNdIP7o1pYqv52eN8x4sLFyWyf7b2CvQ4M23joQJjvRYZujnZGNgwfd4wJMuamqFQXeDIiS5L26egkuDK4KMPFNQMY3GUUK97+CizsvTszYqycX1YoEdmc6u8DgRAF+09/wHzn2R1frO13Bu2aZtjfsmnFW7vK5AoanlA3bXn0ikF/uNOgrB3S8WiOdYrDgDV9kZIXU60clEmkgtXWapKz9RiDOqcSuXTJ8LcM1T8o4X3zCyLWVqUMyP3rmHcpi9w32KrsW0LXvQpyNDNTL3bZxLhb2TajVvQPs5C0FfdZdT2cc76hDQdWlzlKhbHfGVcop1eKg0M0ghUazBsbszlZdguwFrlouI2rhVt2qLWlD0Cbvi+Eo5qDak0fsBatlRni7XT+UAeEmZJdhCAjoXG+AW1fdstC7XXgEr2HWO90VivnF0JvfyKIwrQ2oYHKNmAr9DsBTfw1/qxDHxcIhhhFBJdETSocFGGRwAb7gjh4AggnnnsuUbxWpooDGofb4sC+7DA05WOeBGKERSicd4vlwB5egTp4m7hXIkeef0I1yo+gMWB5npPxTOgMWsY615GB6Sr2VW3GsmH4y2zgOxAeB/CHpzIrM80Z5ewQROaLckIYKZjrY1v7CCDCbuxL6ks8/AmbrwTyBZ1L75XrgK8P6F6sUlWYjvnavvUHLsiIgYoVOBuW5QqnzA19vRsAskcWHLc1V+cOYW9l4D5IcX0IWmn3gXWK5ZP1HG1eLNtStgfVMJqT8CgBv7YxNY+X3s5RrStdPoMx9LqH+3bGAS1eLa8JDG4P+4c1GU6XUMQ0ubX3nZ3r3fUqXeJgjeiAs4eozDpnIxxo+BX9gjZdQTNP8spLp/3Ttw6qHUJQS4cZGtAcned/za6Q54eIXYuzfZNKu4OFwtfDnV3jeDt3sesHLhOjW2/X/xn2oUd7/g3fUJQcqWCBkBpJOWue+hAgSi4R8xI3O1qAQixOq5QhW20NpaHKxUR8i9AFpmBWDajieH3KFYgAdPIvDp23UBEb8Ohr5allRczofpWBLmtNyyRSVG/HcttaBuw7QMwmIoIfeBNJ/O0kmcLiLWh13kUOKRC7ROiVfMqMOD4adSFa3tE2frdaylcmxttD2n067ipzuEVkxry0xDPNzNLPgbHhqhmF92Mlv3Am8inmmlYJZEms5jMu5akowiYzJR15roceEf1aKxiLFQHhQ+t+A3rn5REMVApYRWuXOH+ZAVLwntej7VohnZaPOLJkqAdu4a6cpfDesbwqipmObhbnOM/2fza0BA4A4VAaZaqhP/zjzUxS5hRyl3YfcA/5m3/oe1+BIb60fGDhd7MyaDNdCaS2VpR2YQzL8hYW1D+wm5XO/3uRYXVW6wsna1qyGpJQf506Gs/yKPd5cV17OyaUiL34zwOuKqHkiqsijx33UsWODINJKczXQo/U1foE0L8FauKLUx+Bt8MMfLU2cTZO7YZSMrfGvnePUuRq3XPpCN6vNh8V9DW8Q1oejCwvoQdfZLVaOF/U3X7QaBYnkfKccGkLqIWirrI2I9AfmFCtt3qzIKiPqLjDiiZgIuYA0r++joZ1X1RVGxVR9GsLCLUII5WCuo0+36LFFvc1vHa+aXQtvjoyTszOkWUHbMmE6UzSOw/MDrSEoOi3WA44KanSUlWPVw9HVBEYbuG8WnyXDRcTza7d7J1sFINpbxPzpGGoLXJtknOGCzS67ZqJfqM9vUXaufJZrYKCIiPh7g9LMIfGHARUdw7LxXCFS9iF9B/SFDdo5eMGzef8CztPgZdgyioIe7iOQfFSSARmFwUt2pvSBFLw7ZUNAS2yd/9IquQWATgHStWpmZZTIh5jcEdgnEsv9EZPJ+mHItwvsb0+tJl+DAge5ZajBby/a7Jt7038O9VLbLt84h/oLoC84USJmtPsWP/LH6Qa6ITjzTn6/O1gPx2wMplYDoZy0HPIrEHjvbu1X6OVRvY8Xxb+gUqJao2H8ZFJUk17odLF7TB25J+qxWhtfJr77RNnI4v0D/QlXaQ0GVISgNO9bNusOtQDOUTY7yPXo9nx06xgCE/XawXnoHgV8xasGJvFdZ7cxVsX0unGB2rxWjwr+zI30nQPGwsKnuutb8B1HfRVPXFMXCwmtLR3ZiJRa6cim2u0tKBTN/5btYCWxIvQUH/SzbITf1iOknXX+ni9qiCDnb+YMffcfrcv/hZ0vxb6y9n1aEj/QfsZLS6zY4ZGdZ2nJz+8ZTHsMkTSkAdsxaXNHghq2ZdoOaqiPbdvV2Cq6pTL3N7xMgAsCrNTFxfUYp8zGp9jf9X/h2c/9v7AFtuwatKEmHO/nCkA22+CAl/VFSK8fTQgroDsey2nW4LtRY28bDn5/iGGFPrns6fIIFEfFfZ/kfxVbDd2YQb5GnG//bpvDu6rKS5uwVBfmLWYYvBqHa4+hkyeYDXRBBepr5hP8AKcdZueMGHIcuxxxkFaO4QQyfOtjzhZS6NzfrnnkHqLceF404kkXj4b325WalEaYx4cViOVERosRn49L4UiedmCfOhVgagJhYqsivgXyoTkSuezQBSqrT2PaTIkumWJnxKV4i+neZyS50dN60Yyc0abMQXfHBfkP523mKmrbyUS3hbLh5f1d05IIGwwFJ2j0oajfKP8CkecQ1V+KH5+RAcTfuvOHMENY4bCrGqhJd4fRyPyQH4sInjMwGaOYMlR4c7qjQdDJaf2rhjiYdDb+pEQVUHbg0huHGsqKCfdRLhrSB1So2BOGSrMeBXDivj6MewIE0xbdEa6i+w4FGqJcitFz+NI6Yx44yhUcdkFi+yKiWns/XL5RpNF/dV7jYpYe0sUTAvtNyGCVtUoC5xoKY6yO5sFhrqjRX3jAPyOy+4tZpYkrh50S6EZw30fUv0qnn9Mez4NuVq/nl3Dsk7+yYa8nNL/uys0GFMvYHDxWdPlog/hKuSzd1yEcTC/sHIBrpPr1KDA3Iu3Z1ezZvnQlZWHjivLijjtk6HCbPXGY1L0Zr80n9rknoY5f8QTjVPoWSaFTiYAUxduYaZLDeCID8QaD9VbhS5d+DvAiEXTJZpXr6/Y/qo4JMe+mmfoCobv0yKXg4/Fozt0ZZxTZQli7gq5fM02C8fRW4j7yN1l19X2QePRLgjnUJoz+a3jWTvMdXyUBPF7Lb8LWULwpnirkNpEPBk7RbalojunR+AmblRZIgg5VKcN6LwpR1/NZXwCAuvEpV6ChC6wq/xehjucTq8Ud7MdIrHSG0weiPWzjrFjSd5f8L2tD1OU39nGRVBdZV+l4MKqhlxantbJuSLDIlnwgnnl6wT87bEGJU+PmL5vDLGCfoefzRXObpTaB/g26XZ1DpGr2U+epLCzrBjG8NgCU1MRlyrNzB0dw5YKy5XhW8qUaUK5ubZJFJz/8OTvUMNqQZ8wAObzxLhFTYh0jVUm1RDZ55/RPsmtltcIpATLtX0qweUj6UaVTVgWAabPKB3ynM11XeEfCDV8ew6iMwQ0AVPacbZeQTvLZLHujiULrR0NVQh35bCZwvN7foIvVZCLLt0AEtV5ooPItzjsOIVZlNgfMJB3PrAIsdAHjldjlIujZzivTTSVoAgQFCspAcjhIXcT7yuJl0AVDja4XozKToGdWuW+UKoZHvNQAzqsUV9CLan+bsj+rUD3A7T0LwIx68QXKwtPAMPmdTgAK9T3HPhRyAWWlfLAWmF42GeS60Fh8rIrtePFxqaG6YmzogePLTTYPXwDxZEKPF9dvAZDVqIkIRETVuvDnr/j8Wz0CNokbo/8Z+w7+LDln30eR4pK0FgfVbzi6MY72Fo9AjJhn7NwUhPeMLtg9DrRmX80tAQ0HNbpyDUiVwbFi5Eb5xwoZLsOhboTg3LiI1aFW9Nq8PZqSg4izIx9U9NQeLstFMILgvgVlMRKWqOkouWyAI89aO+0/W/QP+5G5vk1pRQOjpDAOsvJqf8tubOVOBCw/3uGCu3W/H9oPUAOtKZlR/gZnQGq0VsozVvRGhxD+PPeWJz74XqfLjx21+l9Bd2JA1B7w0t5CtveFp9Ad7HEccc1HsqGEj7LmcWSp1BP1VVFTNzlPtKy9DVay68jHgE8F/KLbW5rUHSQI5PWrtktK1x7hQYQiobrLR+o9DCEjtbF4orxvzFs/4z65V+ntK9Pl22huhazJe0pXytMmQ0xODx2TvDAtfEwBqoYsbGLImHTdrJUfGPCmkZbY5TCcVECq27LsLgMlsM5e1/4gDVUyx3U2k+12pHSsFAdo5z4R+RfVJ3ihV4LSTgcEhKcHh4su3pufOSQO4eS5zlDo+/Patc/JdHRe9O1NRAwMVmsdxW5kFHvJOkRwpayJFAKa5787Vq7P386sFCSCO5q/OnXZPr/a2/vqZs3LeWwxt6hv8LJHRF58nGhON13ge/RiqF9jwYG7BZ5747d1X7JSOS2mLES/2wt7xLwYklR8uAlgo7RJhsEpuVpi8iYgzvUYiwqRyOhCVbScTqYTzDDVOV6xwZaIoVry8v8cYC4nZu7TCrD5IoF1XCPFln2s2NW7Buwe6wZlvkCr/bwvqB7hQFW+QOTekTY5QpwAplh0o2ZjU2Ao4l5bqve4eQW+aTSPDd6cFoopxwuedHInGQwX6W5mI4EnMjYjceIBAUKuTnH6sBZRQmVxUgddA46mOw8dY7GidAhw3LocFzggEcq+94oua65hdRwjrjOGv3wyQGWUyYwT1SlB1nNtB7TtMlMKiy6QBpyB7zgMMg+jrM6mTH2kXrg9nKwO0c5M38/Ef7LqUP4DLdLzda9oEN7iSY5BRxUnzwmSjhzzXm6UexzIpyyGRs3PVPC+enz7kXIPm3Pcly88QD4sJA3w0QHTZBjg0CRdzUvoSBABSCx4r0GhwaYJ/fGlRe599nNC0/VvWeAKE/SUmv1TsnwCRAZ2mzp63E/G9VDAddG0urTscKHgb2bjpalEUlaDnvAP2KO/GojqCLK8qn5FvzphMGfJ/DLuQm+Fv4Js0ZKd5Yw44gggFzvIB6fZVWF/lIErVDbhKbVtQ4k4zRXQxgBsVMdyPNR2QZCw/rZZWeyJUDyEoOweAYRL5WCsqEhFeNeTOxJ/nI2Mc7OtYR5HHS1qRZppB0dhhKboElKrfpbQoOvFhEqLaM6TJuIlHi73p0HR74cFGT7JjikB3ViG9UmpnpySqw5vHN0kjpeFscMOQcG/L6R6l/+iv9wL9Wn9YQrJdqemZZhjXzFdB8RGlNvlbMsPJddWKusTObiK+6R4RlFQLPIsZczxBJv34v6SCNPimrDoKg4jwBbpyegcb0nS9guyt8Y9LxB9FV8WnmbX76ygwFO2xMAU+bcGRjLEyI1XDCxZhFGmdcc02LXlfpxZOGOH5nISzzoyxk0EZ4kxqOHICY1QHNfOdwfFSX/2dlEmRNh9Jsyzs0fUNyWf4CNU1sYK7SfKBLddWh1FK0Kpxm3fUiNJpyKPU+b5RtspEqIJ0FEns0Oz2Inx6wM2XprEfOYlN97IDE/te4z7t1j9zxcu4NTN/XJmXaHpFTfcbjH6z5F0SoBnTjTarStaLJpPawrYVW6ZWJeia7ZrBQA20yEdTe7TXhQ6uIjIhfXCy/+lAy3tHN9eEPR3mF+W90VF7gbkwoo7+dKCrpfTghYzVZTL7rhciIfbSR4uK7S0ss4R912qecpxFWoq40CTMJchluPLsXhyOTUWa0K7bcf7atuIRr5CD5r+o7fPRZg2+UTkyFkfTaPT1RW7AhIQewhEu+Aq1XuE3dN7gIgFUKQk/Qel3CmMUyffeiomsY4feOHjt00ltN4mL5pGuvpyz504TBNd3MtMYZvSrfzqEulJCxN6W7VQ1y6WjLfRtAC2RYR1UYXlM8kMn340KF7Dnmc6as/dOSewxxnGofpm6ZRp2/+0PFhGqvpWz90oZrG22nanGKxQ8WyvLPNapv00D1SPOGHc48QItfw+xri0q1seL+hvhz9V1PCKoiEtujWErZO1ouoO79A9h4MjhACDmRgFibFztoFRFR2Bw01nq/yGq5+MbAJjkwQ+8ZF6rwKQUiRxAsjHSNpUghSDCjUdep44CMySviSlj/AmOi+iDXVGIdFwKWgf7meYNbAxkBCgCYzmdJC6BZq+BID+ifzyv2hkyHIghAS+vtmMOBkj4/NqeFckqSyUnan/Bx9kO2WKC1PW6isbUeU6pRJ8rvePYST+Mic7gBLnFzOC2bYrtVQrLqYdzq5tjTd7mZbh1C/D5BV1TartxapXofrW7eE+wGEwX65ic/C8Qsak+eOqXICXjxv5xY/JW+3GlKOO1nGZN3bcEHCBQmmsJsfYyKxn/xqMGk8GYywbfuY+0Tx7cZVvKW2QkvEX+HC2FGivkEC2UeEh/eg5gByZ2Lm5nmxilO516CVquLk6yI7BGyS+qogLA5PPS/iDLT8ELcpWQN9gIeWrklhS8ryijE5/jTMttn1Qc2vcGIuChe/Kv0NTQkb3l1kC1eG5KpLdOpSME0T62+igQGzTwy+Y/fz/eaYyToybzP2uzosADX9ys4tt2LQGK2j/S7BJlYrCVGuX0gdRdbvSYPqcdnDxKanqn1EMN2ogNxDqichF5fyETDJvjbpRIo8AOQR3+y2VrmF6JZjC7V+cniy+0zVi4hgK1Ie4T3T2nn3SXyOV6ortQ76Yeo3rfuXMrr4tYQfiWUgoKwSicAj+0AdrZkyUv/VbKKX22wMdaX1WTqkBWrapBKhuzWTOJj13HDa1ol2TZU2SVK0xsV+y4SB9ck0UXIH1leVnZNA+GOstQ+B4eYgAYxTrByqHzEDGJ9pq7cnAZL2Kj0LYXntHxBC6utz1oAq1s2/p2fRKv4660A8j4Cfh2zHIpNhd6eoCDAb0j53oqUiRgapDYw0R1L0XjoajqkPOMfgMTx9ah3yNCdxC1iNx5IG2kl/xAjm5j01mnaQ1JG77lsHRluREP5R/hdpO8v72PfAo/wABnP1ZnG1AdQFRzvQAGFsXjNHODY5hInzcd2Fn/CIJftT6eeJnu7jDmriJ+1cixdj0WUCw7isKC04020/wmn1vQ/3ymEX6P+aIm+tbpPDj82dKl+bifBw+eTi4BMrwUx81TpkkmjdC48O0OzA1tj6gmzn0CE0pDcS9ksHJtZBsXRhhi/Y+qqK7Qh1aMZ1EXZCVrqq35HevnwnqbhXnFJFMYV5uqcIAzIvvallX171z+sslHzTpewCbFmo64pH2f2DfZ4XNj822TfqJezFFGNzMCL/83npCPg3VPDKfw2ty/vY2hjXGxXvwwEpi37Og0y7DcyIoFPoKwE1PwQp46VbR5N8HmHAozRrlupJ42k6J9jEnpfA4DlDKFMBCWk+7ktiL3hrLInNHLvkrqR++clW6TmTE+gGPLAFZgv3ZIVCh1jDUgP6DAdTN9s5tGtSsgYrtpwkaKT1GVPD7DVc7q7R6b0UzNi9SLAk7tQKXqubg2AF6+tNBhMEIhRj5sXAlJvLDMyK4sHk7BOWKzl7tc6hTeGrkhdul/D3jeb/7ppOCu7Dv/w+X6/xyhzP0rckKQSEr8Rxi6SsDujO3JNyZI3jDv9cVIaNVg9KlvWM0IXaBcFqjBeTX8dUOBMwf7rKTrGu6btfyYiJHGEQixxyyMn66paEHWAxU+j1MACOHrhwUrpAQhblMOmNCRQ59ELTt3tQe3gTALQtP6w7/os74GAK4MXoIOXqQXUzmEAjom8fSC0Er06Td3Z2OhCIUAuW80SWzp7r8vOwlkIixENd3wuJVsflioFaJsNXRgK8EMWU8ABbsbJQUDHjWnMKLKlarjpCqJo23/GTYwUinpUc8LhokfdZQdU9PAWTamYltdDVl7wE55yp50B1ZgjRRpsGkiT0U4nqhygUIi4YJAKzuHjJMu99gd4OSRXc226t2Yfew9JUYbKGkDNBHGgvfPAKu1wAFrHB4L5RxeKQZgJ6uzvnywtz4UgeBR5FYkFcccmYSFWmSpu5KBaXloovbgqnOQLz3GA6ODH2NYgZJOrl8CXF09kmDItpXVPAj2QuicRse0Y7NhfWT2Qyija0bklZJMLx8pKr4mkyyywj9/dObyOOYB6JRkt/ivlrsdarC/j5zGHEtEjy9u+cOc1Zg4RW8JvwyE7vJeFdb+Fm1Z1zDVVEb/wqpEhr08nTMFWanJPhG3CTDGxsnByjuLaiqzRwtEyU5gwY+r3yZHtTtY0UiJwPMqJBFir1MNzdj4qWcVbtSHdvCcysiolwNGNHZmh1/8XqbwC4hVL5adk1GDuaFoWfU7B19FTfDKduB2XCbFJShUlHmc/BXknZuYSJN06heQSOzNwrCwFzz251/Izem6ZwHudQSb3CtUMw92WcuMC+KpoUc4nl8sRRT79KjdnjYjPBGRvdc1m4B87Y9Q/OKAuHORvns1e4fpjlAWZUwTHoBS8bg1oYksnMRW0u/+BcgvewBLE+tPn6DrGbhd+Z5dETd78viIQ7/zioBn/9+4NgFbDq1L8NW8r8zSvJJcMzTyAmS9T3YiyuJwx3uNJPBkzLc21PKQlX/FxtffR5Gpju/patkivekgMgVDvZJcCKmawwhuZCHAEk2qlu8V0ktrI4VMt46ejcEGVLJuz3L+e2SnATgpv7AIsp4u/Uf2ibG/9i8zYoe2MGAVyqU06Ko5YRqbHuLhEqQwFfA08TquTmrDXh2rgutURR58Cw9dyMBKRR6u6RG6bg1gKFnw1JwZHJU9LjMnBOTcOlQpRBwCKb0e6roydiQqJBCZdK6fV1varsZshHQhQ3GgjDlym3xDYzvP1AW0/alpbxstxyhwjrR0Df21K+oNWpYllS7DoPbFd90rLi5kIh7Wc0ifyvW2S/W0QhaSgY4XC8zWN2RbhztaHvgYow8aQfV44FUlFZXfZ4gkVFYduy6YTWRMCHuGRFbf5wpDaNvNqB8KkhD6cw/NueTGZ6kRmtbDhyER3hQC88ceeEwGf5GnTBjemMPidhyO81fIINu1adeN0cp20bjOstbGG9WZfzTcH8UosFzD9+Ag/3jWJfSeNwmX5+6K2VxFzLBw8fUGBb037OPeNrL60/9Q45oiBnTl3czMR9svNmpAhUg/QVFjwbq4awka9P2LqxL1T4zRR9O99+DnHDHV5derJEXfVES5iGN6ysNAhCykBx82rwrgoBC6ZZ0ijxdYynAsaeaU8Jz/Yroul6ik7awB5QIFHBfKiAenu9ysWPpesOiziX7PGr1LT4FyoKP5Eb67o6bASHiDI199lVb1UcKrUexBiv0oA7Swrp1CFj6XZ2zR5OX/ETvclrFw326HrLdJ9tOQUSOw0e5Zu5MzIiIoQBsHZjVU8vfodXyPnXRQzdUOoJmI7F+eoT2yqc5Dcm+cjs4p9snrE33zh/mMnehjc3Ugr+acj4WFPdmboorLSqXGoSZkJGEg2EZsbqipMb1qDjw+N4yHC3L75mZLVNXfAHM5HUrI1+ICT2EZIeQ0Z4NYL46CyCE/b4SE7E9SoTYvJdbUPLFvO5a6yuqkaz7HLKNdC2+6iy1+54+/goFsxVrDCXBVqEj5xxLbrhlYqulHdWFmA0jOCs8P9gj7p4eEyhFBs2NLlsfkvzYTPuLHcvL2ln4n7VMfnGexGU4joUvm9JiyG+CGaZPv1zFSIcdM5PpF1cmeyizaCTbWz0+YEkqgxG8PUpNbWGfOC+bG39Tj+Plw9aCBfEmKsX2XHdJQCfvxhtXTbtjhLZ6vOmcZ9I7jnCsVzrVKULhqGlGRbhVddPhJW62DMJ2rNw50YP7Tn1zP0dBgp7ioTO/lNvTXhxvYgwU/h+Ml3opFe42IoQUXZP+/J724I8QtpbKRxnmuWEbL19T6zakXjKCrSXwyGzl9n4ALpqRfaUixqwKkm+yVqRHkO55yMwfVKn6hyefRl7zq5a1pc6yv8V4EV/mKlqdU0azczu3biF5NzMqqBhfsPK68ABWZCwVqrKPL/5YSEfosO6sydsnYyHCmmxq94yC52/MARyNcD/qE8IDxHSRK9mFmg2GD0k8hR9m0s7fjFfL+H5bzUn/8QvCWxh1/V+1bPBwxbW4HqnIalwUcPR7Ph6lFlAGNBG3fRxDJk1pHwt00m7v2FjY0ZD3uJnwvzGWNEfEHuTJtv2MmNjj9LDTA6QptIjzygrtfFXs421V+mmtleBrtrGo5v+kJiJ672XyFFCabqhRWAsXGLqB3pD13lB4M823HA2ZGrbyy1UsntFOLve99ccihIegLcUMzFij72QqTQDvcaH8zzXwoTitnHsfIbT0cMObEKl9Cl/+w3RyQ2G8DAq0NnvgfymdYTFluGq8nKJGqO7cpu+QV0coLE2c77JvmCzoF54GGVqhsigr0wc4HxFLsamuCcQJjJqNw1CzDxXzTFsoouhGoRNfuNE3CXOmbqnA9P5I0Bm42t54fG1D+YJEj0cgEI/bhodhkTHqVXREOnBF1eeV/J/HTdgQvTgKOnBNz9F7GBEDGpx6sFB0jZubR5D4qzAQFHCGaosAgSFb9WaMo2EkjAcrYEYsCSzoRz2fymXEZ9EGkBTzio4Yp2YmUMOwfCWkYls7yrENZGhKUxqDXOTzXxnSVEQE4nyXNlUwY1E9lQMyRFtOlN3JSX+rDAyDdloICMQVSnzM16snZAJh2Ptprd0H6YflpAWM5l9NO3iyHkDN5Zj7c0dFu3bSO6LyIRiNTHAzN+vO+BirWDx4GhBXbf45VUpyTD69PIpXEzaKy9lv8hqwnoe/ELKO1Zhgr5zEcGfTlMvcsxNUTxqhgyf5CN4/01cPM/wDrrfb638rwu3aACGGWzCwehN4c4OFwL14mbdzjqqjVdqGC7jY2O8xCmyu0UGry+uzFs1xnQZf5YCQOuCFaDYCLR6rlgTG3HI/6RuLW9QJAv9tLnVjJcldfigjbV7YjTfeIoOFpKf19M2aWnnuCRxry5m29ICz3WzxJv8I8rdg5ZNe1dYqMsa1SERofeQMO27yg8j7uCwo7rw8nJKVNDUiUy3N8tS28hnzxVng08N81k4ZfHtXOFzgrAb6PADtpHaYr21NLVUow9ZVW5r01J2BkYkkIT8WIvY9SMNJ2R+jiKOSEthQNoI5WaeLkhjDY+F65qbxK5j1prKRzum9rFk9823Yqp9pjI03Tha8nszW1GOxj7fKgHKFYTMoB/UFeOORQw/zQzcKu78ykJghqhvMEhJ0vusigMwcen7QPQuBpEgX5oi5YHhLsWeMWfFfv7pYnSHMlHn22Ss5EXh3ftCdAd1IiM0qOVRVQHRva+i+ZC04Rx3mzyafYLpZy11C4vEC7lQwt5yYmS8YjJresnvTnADvY86FIdN4N4JkjlF0ipFNrIm61aT1F5j9MRVbygNyILnuCZxzkQoSdU+NMDMxrML8ePuKULQqKMP74XudQSGX2lsSHziqlTBEHCUoDrmE25toToSwhTau+EU3LM61hmJt9nx4+dvjoVNTsfh9/mp/Huxi9n1vkSSmkM6iwZxA1+LcJq+FwhRa6YTpFqWE/upAL561VAF7R+iUJO5GyobP/ZPeEAbeRWQqNkS1DuZmcFDO1pl/Hj0vqbiyQnjVHZyK5Z5jurUk+UGWQoBPUvryD9rUqo7z4pk4Tj0CWZx1ucYaygKUKtLUM3U8FiLmax2QEjLFNXnOitiJ0YtszTFmiubY7Sf2jnieSU0WSOBTa3A+JJmo0VvGdIMgbNmisZ0XUYG2ChpAUIdB5ZJtwkBUnw/UFoI0jJ3gdXA0fBINgIX61kGHhDLA9Zu0S2awbkvgnam5nSbVqS1XhOAVpwzhvKfdgQGNoruEAX7Ja9wW/9kVuad+4NTNU2cZLF2013sRDB75o/5/Wf2MJtgjZuu1nED9gzrGtfefTzsdzzLbANq86nJ2z8hNO+T0fLoZgzq+GQS3/3Qdc+hyouuwP4kaL1gbij/PgZIdoW8mb0sDiUbxRwkNBnPx4LSIKpnqytjNPslUNUfEOSzcd3L8Y2YMTtfVzvTkfWqKg2YR5qVWdma0+dVbpsZgU/pVOPgiNJEDWJS4EjvNV/apiC2YHhZfvGmK+ndyFxLRofEm7F8gmkuyFTv+cgEVUqGzaRsSsPY3rN0p4aU31PbVl6PIzR+qkmjbyICC7s/iasZ7PMpgu7lKEiWyfcc6qmP8JPpwmQ3liSiRde4CkqQnfcWDkUUEcowlMu1UBbj/e62LVrGPbVE+sADjqM2CyiyCNgsHjl9TMu/AJGpkl1kDDpTp6F3qDwBiqbaZofsQs34yDZFRLtQ6wRNTcTqC5w+IuoaBlpQ4I6820eX5Ax9+NlT6ylSLRfV99HaJZjO+n7DgGaxVZRSLdjidlkisfk1JD/Xs+OpbmzKSPRQwUKJsvpqFZUMEIRG9PzXitu1RlgCP+XJ+M/GhuPGHi59GpG2uPxrGXHDB5j1DsaG9CxhS609lyDpasjb6nad+cxlZMhWJC/x87pfC1Ha872M+Skev3IUi0T4H2qOJCrkYM/K72ioDX2dDDx/jDmoJBUp+6BcKNKR65JhkWB2xObEqJbnBvZmoWFmq9BkOZ+NLZrulilpevpc/iheeJaBa5qyygpl9culQm+VhotLJUfpKUaxMpssG8ocPWRYthYIdMgtrLFXHCFZW8hadQVhtd29DvyK1nHWs5/1m9kVrZeiVOkBDefRHhfZ9LEqgII0osSVfUL8OInq1NQQoJ5sO+7UgSZnBaaHXOAbLBYRfA0I8IcH/U3jw9IfScO9ORWU81kYlV0xynUM1VBwxvKjoeXOaXWmSMcxdIkbWcX121yREMNRv924MSqeX2ePuFx5HWIUV32kn2rvf8QMRWH8gI+9qNACivgCbvdxa63mYaFEIs9WFaMtPBZn4AtrbXWdGyQixD7eUucBQeZ11Z3EU6U5BJWDhMizRPZo5fMnKYPr7Tqt9q8aoj+oVPzfmUTIvRg1jiOQXyggTYUAj+eK/IJ0L1SPnJrIklrELDvsYFyDIqSSN7vYCNlNSTDsW5Ery7ooomLbnz/BSsRASBJSVuMi56nw/VoC9SZfotcR0NrByzXZY75GU6BVJMMdyi71OtgGBBBB+t1Bb/hNHfLHlk7xGaEhZ90UeH8uBtFCqbTlWzcblBMgLwspy4sntHPEpoHINSoVokKauYV0IwiseZh0Ayq5qMt9bFuBIU9fbSkCwsv+F/uLdMxDCXSQV+jVl7iWWhIHF8cZ/EwiY/hN23hvMYMBns16JuFvLkH6j6DREflr7dzhgDQgz3FdK/96z9YcJXNAqbgzi6+r61nQZGesNSxyR6Ie9s4QuH3BIqZNysznhKkjT4YFJXIrhUfg0sLHPKkSfZy0X8zvZ9UcOPyRSOQBFbq1jFV75edTv4fc4BYBwlASCG8cmbgJM4yZp+WC3s4d/ZhqKzx0qeAO5ILecCJ0Vr1kY1njgZa3y+0b/zRa6djA3wmxstGqLl+0/ceT9XycysivPxFydwtZ677GxRMDD0yY6L1rwtywKH/WKZHgZftksK/1WbnpvZDDP8LyllATG5pA8NVOei65/fBqZGvzI0d87zsxZ1MdWl2/dnSXFqTKYMGdma9WETG8c9Fbasz90RjtAmkDsuB0ADuETJAwtD/MeKj7ZbZ03cd+hxWfbJY23ZCcjf6snS0QhB9MR02QUgBup6yN0xHDlFGhGhpXnu8irAVGCiSRGZ4FP3vd/3Op47Tc2tPBqAqIW0q6WwSQ2eOZUlKJxCsxnJAWVucU5MgI00U2i7oB+NkO9nqF2D9Ik+C1DyJKGEHppcVKa3x6BS8TY4GXl3paquTm92CmmK0+zJu9GP0J9rJisj1Z73R7d/MT0j0iXbNMOlSvqR+UQ7shdJMnDKwh61jZyXZmQI0WviDXnPOjfE92p4xynEnvNVBrIBJE7S2vTA/sK4o5fHXzvtCOkWc32dMOhosNLVWndQNLPM+Mtd31qvH15kaRp/jIl1XDB1QBdSRvchYNMP3cyYXiHPzKa5zQOmwP8J1xaZLC3+AJj3Yo/A5wx3FxIzQ6xntnLOk2n5MAyy2yEFzEMsysym9X33DgLFhlhjhY0hL2oCVmyV/esDD4IeBLnz/79qWg+uGy9RlleYE7IA4bXgsdSbddEbMVyTQghGy7zGM7Tt0TnNw2nJNYmdv+7eR0JwZR8H2s5XDPxpE9eloVWGc22mTahszAJ9obNrRm9kon0re9WgFbkTHreEcRIGBVsN6ufT/srdohCIPdvPT7Us8KpjjWY0L40YarlCgm6cMKf4YPGo+qc5hy0Lzc+S5M0NaZc8xMu1d9c2seWlkY9JJRKth0njkPcaSxkCntfwaF5vZ0LZ+Y4QnvcP36L2Q/9R4/S6pjRFnHquoeUsVC09t1PAx5+8/+hoWYwrKAgWscKwBbjAa+9sbFuGcGPiwEYLRrj7Q77DoHx170cqt2A8xHbrH/Gndm0p+zF+bCMqlBLI8DVKT+ACm9risk2rZmogVJ73pgbB1iUDpZpLAHLFRO8KetoaN1rektSUagSCCd70qfft8LNRwrI9e9mg+hZaq5ASYWlZD+xRlyxg7gIv6XPBRpwtEiRsPGRaKneZJBf3h4XwSGdWxQ4flgpfVY9dbjumDlcvezkWhek1jTQFGydQvBUJLbRNzkgnRkXrJXvgxzS/vjWbvl7fevf4bZn2J6pqWFBWfKP8pKWR41jA2FVzszIoMw4BxvbFDY6hZbE/bu7W8LMUkL/bYZNJzKGo31Env4qFYCU9YtFI56xFwnEywp0jK5JZBmsqUJ7an9JG0LJ1w/tMMOEG2RL5gd+DFWx1wGTW15pJaxA0WrkYrJ0PXo+Vl4BIB5AVojd86TylIuDT5itIDdN7BGFQ7Ho9wKLNOHT76tlU/ZZuzJhgGsHhj7cuPKiJwtdz21qKpDyivo78hjFd9WsJK/g0hGCMGhl5T2KJTYu56Y0wlqoP54FgOJ5g9e8Q9/4k6ujIriesFULVli/HX+4f2skoT/K7oKxO+J17AdHm6albrxQo9Z5G6NOENf38PqtjYCUoBezLjFbgxR4u4Qp+7dViL56fa9+v+uZiACA4IwwoCVl1U6JVC6kEu7EIC3FgIOBCzHcEXfFE0M53lPHtKolEhnY1LX/TN8qFlLjLUwrXYhthrbgTHh6np/pcwjTWyaFzAVoi76dgUsTqMlO5p7FVkugdGUSF+MHIuERpReb2Bf6Mn5fAH0/VfeCKR0d2fBy5KZg06ee1OitmFbnxMTaHC2qyYA/fAmsXREl53wTspxCtwGO90SujkNwYH1zCVQtzGJ8NKhlBvxdWTRbiEQHWH6zFifkdR4bXTUCJMXTWkn9gZivqHlvCAPaXDwFv7qMC5RpzAB1/lz5+/JHjVdCmoKYnA5kq33662cw7bCMuNDXfXW2kLtWWGVWcUJffhcpXQVlmEMi7XOBaazW3AeoxcWWc1IJsTKngX7AwWSuF6CErRUJTb124D1eTEjU7zmmtTCQx16DQnydnqiPX29N4AeH6ytPcjmTKa0WB4jRAwIMaW8SowsyqezAgllSXAcZJRwz8CtZP1bc6kB6QGYEjGibVW+h+5B1JifaQho4ASvmwwE9rQpyjqiqVlwxZ810ayoJQeIFOINjL1M0sP67VkNsVAKGhyCvJBl53TdI/5txJL5rkSrn73wzVN1s31Nm7zGNjVVp32wxwl0hRgELRJs15lPywQ/rxUvxNQrj7RNMLW76aPK4UA8DiWu9a/43kkmxudZSorAJ6SMXkVCmmT+JD+3fLJv88LhNKF6S1ndH08as4YTO6TVcxp5CcAnrMpFREPcOFj3IOQD6BBHBjWOcdI9M1aRkb7dbkwwtav3zgZj+IYiDYpA4Sc4zJQ6AU95chHulw2+oZWCtSrTSHMjehBkPd70WZUHjSDV4hRhK/8Cb0zD2kK5HiWD6C7zfzyPYqODJCnFcaQZoWVCQhQGmaU4JVA1AV3YNG040RLanqRjMJYUyp4TYe44UIInnG0WVug7Q5KiTwP7mmLNzAjzZ30am1PPAjULFTQA0DiPMremXsqUo7zYKNMqD6xcGpyZTvkUFeYKAvrp2SSJhxjfXo9kKaCphIzvDWiEyqVi7r/PF0XechTtWkAqyzL8xtUU9RPklC22HK2A4u4SmpP1ULuQorAEyOLKvMVzkUf1jmVH0tHGvrz0z9WoBskUgyaB2uCgMcA+GSyUwtEfN8Q36WS063Uo9HWxiTQmMIr2pmvBIHEL9KMOb7CmA/1BJ113ovFFILGnQzHPuLRqIVhmHc/Tqi+p6YzquCJGdZJj/Uyz44NTvCwUDdfoPqxlUt4GyPm8Bo4X4c9HxsZYK1hd6uBaR24zb7omEUJ2/a6Ia5oApaHjsI/0OCXD3ObTJF9CS/9iZXaeL2g4+eftofdeIWQ50sFeq8YEgudTDBL7YrfXwq8xFrmAZIT0Ituw4IgmbhN/TkGtschMgs606Jfvq1f5/SFROy4AllQ1PpvtW6coBbfEyWOBka7UYCX4I5CNmoMo/rC33B0628Ld2RVJFk6PXnBRn1TxywihXluTXppiLqx3nBKVdMZzO1ReciUi0GigPMHP+P8SGTtOb8xV5qXIpFC7yCtW4jcqZj3cvau9fNKX5bEhWXWmF6809HqvCw1109diRZJMkJDcJWvoDjrHUUWGFP96wSeJyxjZh1uJj5eXjYhD4CGtmLrRDl1cxJM3Osd42kmc9pX1A4uTsg2kK5z6zBPtUgtoU9IR2xKy49EZI8dUsL/ClgFC3Ir3QxcTXd8cai1bjBpEm8OiUVR9Tokhz+JyMud1k4YGRtnm8PVMPgJUHMHbsHEWZkz2Mb+Q4kmYl+9JeysNDH1Rdw6zA0K5b2j2rD53aftoTkKQ4A9g38tEvMI36XUhu27oMNSGi1zXr9vOgFLpotvHfWQEs71qoxYxcAicHqPGNx31MaKb+qXf6Eh+TboQjDpGqPK4ROSf35CtVHNBlqofL/uxPSVrNCjf0d1mWYtEziRRzllgFYmhhQnur4YcdOyuq2gswdqtp66dd+9HVQevhjVZMDaui8ZCTaY1NHtaQTKWh2bwZXkpOS5Hgopl4if2V09KsNJx5ia6d4lFkSuh4KHkOCbQ6+RrTrAAUwzZe1u3ZZs8tAiBi9DJk7LEEOKwhS1rI9YjpFvfK+Qu2Q8BI+diFI1q4pG9MiWTDopNkRHCUsHkbHBoKbXa+o4wrIEDxWLPxgTOspQyN5pYrhjOCUIBP5oHN7L2gZTpappn5uo21ant3wcZG+riuh+NU2bsUrV16d7eXF0SRetk6xTZYWaB3deqGISSoQRGLpAol5eRtOjh5FsjGw9setIx9xNnJxaMdM6nn019GyNvl9pq4xN4oTYTuXwfGBR+eYhEBHwdmX6U8v2mmtvDGXpqEwFZrtmBNSNqoWCQoTUXgyK80xDDyEqA+0w5cdOnaB+dqwV2wJXp6PTci3UeR+0i6PDKdRTBzoyVUY7i4oFdM5aernny0GLb0jjGdBgDoxBED9PlezK1wLh+DBk7wWI6AtsgEqGKW7IE6WcOCUS5QyX4JSA8JyDKN0/CkqpJsPjKVmHP2DMQrtZxik4zKfGVbbBYFYPE+NukEV+gPaSgGp8T6mlnUvhJFFR7Y3220ejKnhc/szq/soPrAjmwLmfb67i+ijznhKeaF9pZVZSR8lqLVXvFO/HtaZeY17dcXJFT2DwFXoFWEuNK0CJynBaNvXZAlobjM0qsGDht9QQN1JZGcxHXo067rshSY+84gzJUS8eid1xAFNL1KUY5b1H8Gu/KwhPCgQechNhBpUFnIlY+iCtPBw7FFkR3wX7UWqPGSNhfUvBxr0l/O4V3+/18jxXFfrrbynHfw3Fgr+nW4Isdd0lOD8ut+bz9PUUEURjyIrk1DLm5m8VnBZ9rUjbk+CmmAiNeWV2MrRlXvnNROcPI/E55w/iAcgkYDkMWzgWhywmG1Yu1ZwpOgSm9Ro39UY7PJYhKHgPikQmkLMp53iIurkZ0bkLNFwuPsFIQ7rnQdo5cNPdJdTXdnrcF0Xs0LI2cRVLufSEMdTNhtCH1jr5fRIp9O9xttbaEhkN2EhO1SGKZdj/oHEj07tV4xCE9Nq058AEXDIy+hjgW6VwSi/aPKNnWpHIERU6IA69pgEtWx4ZKoqJWQH3E6wBV4InyP7NOHvrTo67xqcL+Gjc8XLf96qSmRESHm+thLSVFATgiAHUsoy89+0Z4O1gnjMvBdM/ycEKnjayiClOWuXv8a59DyAE5X6nszfithCHR9WyU5wdmT0yd8LEem/C4QecNOv6hfgJcvI6FsVdr3RI0TytOHVKFyYLsqqDY6VQrmjfiE/YXLNLYqqxjo56WWliABuFhiVVzFSSEosd0oSZBYbJ6G/vFvf2KhEDeWehWRVI8avMYjR2HECYyxrnbUv0TOoB56s+c8JmdKVaK/pz9KeTo8R5OFmXVdie1I5AxyUE312Ck4fXnPm4ExouwUcfMbJhfEefSzEUslZgIuPHfoGkNuIFXh1SVr/G9C8Zac0a7UEVj9vdkOENrOazdcet8H13gGkJF3Y4oGyqQ2QyIg0iMOt/cdYpLDGjAjFEH18PekLKJR/6AqeGCYPfGL+rKjjVlXCW+FA4AaEDApVZLfNkoKoKmMJ55MSKghByxAm0bWvSIZ/KAi6Xe2gR01ft2LoxLgkipLETml+yt7jEesvvKerkBd4K5xDblEcLFN0TeI8u6zbjT5RDOs24UjMVqr7wAvkkrCv5pBL1NaJ7zavJ8ueNthz80NKA/UXR6v5eXEePt5JM9zQ3EZN3/k+UNYcRw2gQ09GHV9LbDfJqEFxFkNAPfwhc0QlefrPqmei1a94j5owd0HgDhWPvDSDmHRzRmWiO+Oo4aV2ENrtIm8oS9uJVq9+NsPtwwj1hrmrcHASj/daoIsHByinthQYTY9OqeF7rF3TmD5Lk3noC2f1P/CC2ZfablMvT4NuMupo8ePfNPNcSVkP1aMo2TUVb32tfUT1YHJuFpKwx0Se7tHtkrl/cozyOBdyyvnx//XnjVpkbx0XtquHXar/KgwmgMOSRxseJGOVrO0l8JxBqRvdeB2g+e1KwR69B9vk0uSEvbKF11egKoVo6/cQ6u8abgkbBOw7APGGSmXAfibU2VzIUPa7vj2XT0Q/s+bhRWQN32WxqOfx/fUQ0Tta6Hu0Af/6Z11Tecu95Bpw0EtSnKnRpAIq3Bq85jKaqv2nmtJLg+rQhOzR3Pfk4GapwhTQjVAVg6HZL3uvJCaBz9vD6jAL2FvWqr3XoiZuKzkerb9ZNh3CqC9U3KsyfTKCKSknqzSJwJ+aijnsosxoWqVdc1GluO7edZY9RsY2sH7Q/OFxyrdvKFJHkW2uaCxAaRquEwfCuRCkP9IYgGjzhQcm1KhzpCt/Rqrdk90AGIu7tkB6tmU4QV23/avQpuNPdQnVZJGUihcI/G6wFLK4/qTrH3y05Swfk+mgjsPFQRMkK81Mc2zph2AW0p6HMA1q0fgRxwHhSrwomPk+pkm7oMH1znhnKvfRn+xKQcPchlz61eFS7tTG5Hmh3NN736/JZNnubWMhTI5vXx3S15bmIkd3ihxmbp6+q7URk39J3/+sOJcjQFJkfyPRaXMyqyaEWA0Uu3Hj9S0UhdKWXBkEEWTZHK6TAp3G8OBXtU+y8cKNLPqCoQh1h3VzFygdQY50psGzL17FHTg1TMzPsIQeHQMoF7Pp3y7Af26AX9PTrybLOT98lOrpM/SZK5jjfUjLANE6nJBQ5etuHu+XJY5lSMopolK7CAMCO7YNwLP0y+VsWqo+Gy3SWWX11sGO72uhqF3jDLIF2BFrTndiL66A+18vXo29Bs1p63wyJ59biV7dwqSd7Dt7O93dcvqqVc0OQk/MHurMW6duiNQF5eTMJ+c4bHVE+xuuGT0yEumpiu43nV8zo2zxefXDv8Yc66Au2D/rC///PcW4aLXociZmjDq2zk7fTi6/Isl2Gv9UjHWazoueCX40+b7sgyUtUSaTK2YzGxk6shdohMPXAlbyd1ke23+6oSeyAvm4jLLXZG33kpnJDma7AU2Cow0rPeLL19Pu/WFnCQ8juec9/FUpXacdzKcZvxFTYDh3PHndt5662TX10Qj1s6y5M6XNEom4wu1nruEI7IImPuHQssz0gHp9mBZ1QyTAudLR3gzy12txBzgqnG8riqHc4mYvDdm+LSShReMDIUNQmqYAWuuWT2flL91BFGFtEUtGAevyTqNCNUHL2sFoXRSc8yZ4YJQQ5O2F6UgmSht102KHXJwhUalyYtNxnngm6+WjVHn+gAllIOFyhABcjhYSoqNF74IPajmN61vzFJgjl1z60NncMEeAwz31PRhGpjt7tE8ZExZ82wys44t0SIz9hIahbhqr4reh41kHfCRpFaZQ6mRA8VFP9KqO71i1bKsjWPj3XD+zeVylSryrOm0mBj64NTAI+kdQJoHFpzaaoC4A+ezNg+9uALL/zNa2HeSwe9EJP9UBI+9TPsG/HU0+Mgt1DHIxY5Aog21q1yQxzLnls/peDk1H19vJyFfb9nYZJkOwTosu/24rh8KepMgKQ/Pi1aEyMGA987OouSGj2vkZN82g5sV+VAIUXaprlEY2SAqG0zuBwQ4m5leZc46wvD6C8Ipg/LG+8hF++Iqvlce+TQlAYk/PRV6qWTe59elBKTwgUW3Q2SRzE/u+nu5BzFi1N+GRd/he5PAXCLjVktEvlzv9dUpQfp8bD55iL6hztSAhQkQH22+ETW6R06e+jrZ603hxAC45qp2hsHOncwwEk22LZDSS7JkqStT7JeQ+lZaQnpZ5CdtbECS/phhSZPCfRm8CMGxVJ4H1gPaPxnCWH1y+7oLwutBaQotFiQkekQi+cKf5Vhz6cbcHYN5nxIwHbStX+rM6jyQQgOuOQ4uW4nZQGKlo87eUWcFxA7m+4H8VwtUSqQWshQCDXgELQZdXo0I6O2xSdfc9tbxyUYD8sNWNlmJVoc9iBSBf3Xq57y4jN5vyrPsWh39QsXhBsVH63GHMrOJOtsk1lvmIvncZR4wPdf+OTwt0laDU7FYeWDO0LYlfpH5UYj9jM0l7SH1Qor17bGz5oz3CAjhpfvoV9O71X0WGYLDJxp7763BppnBdJeiLQn6ZPAqFSdp1YDZW93csd8C32gomtU8aD2K1ewE92I8UT0osItVyFgPELtYp+BnaqeJkd0CtF5Dqs/07gK6mXynHDtPVtZ7cZm6O1gDpCXgJHsIRrb3GJ2woPRObAhJko7pPRfJOszyWVPGsjM9WCsliXjNCT2IY5Y4op1nzywIa7fQjw5gR4tvWTkc3dnLH2Mk/ROsJrOLRApW/meHHxQM4CRkC5QhDZcb8ljmtgxk4TFQh6vr6i4AF4nvnYqFL/6Jy978J5/HuYTIEeCygPM5qyNUsmUGBRxn+PldokubiM9D4JMi7j0Qj7UpHNQQbGgRi3Dm6nGf99V9P55uq8VhA/Qh+BQTPUPjGxCPHMLXrEgr3cHBUuD/vQhMJdmHburfqFWdftqxpb0l5oiu67aNsiI95PKAhDpdzlZ9yLzBO5mHOOMj8wJJccfkj76bH0yjwErWuD9de5AOaXjH89B7MRyn99qBeptR1deMyLNq9I15fXVc4mCvWnjJ//xFMVHwx0f5EdyD2rDno4Bk/CZIhvS13LKeS2OohitOBLsFj+z9tp+Q337Fr2A0gRit+bmfnnszKZsE/OeKTf5XmBM/cbsuoaOPL8y367wQfXXMzSFDuBFEYB6Cq55vXCgtu9x/YDSN+161bquiIogMvcJDpbf/c7uq/SviytWxiTyD8hLStMuUZAOnsnYxp4dQqh+8bsQvH+8MFBw3IkvCenlaUzh2IcXR75lZAly8F01DOiXaFIcN/338J2Yyc6oO1wrzxY6VsiDDC0GG2fvjBIr5G5mQGzEs8upxmoLrHFHnNvKYP8xtX7ctM/l1JM+zkBVkcRM+OkyrokMHQFFd121DTzDGmdkaWPZ0KZ2H1PVCjahFnYes5u3J7+AR/dyeok1oXM5GCBCf1lKRA5nKBKLppifG4JlEhh/m23kVFwxbj5SiQyRr1df5hFbuZjzpEDz8Q88+kG1Pyutv6yOUJ9MIeck+IriO2oKr97j2dGGENkFMOOCK54vMl3g5YeH0fw6vqZNccVK3RDrG6fhwRz0VGp3sMJT2vOmQjelkVt5fAz7cAo6H/Y1tQNfys9fyQjiQhFiRj1PZxdKHo1SoYUXXQ8pgK6wOdjc9cIfXsQFE2+id6/LWGfhSaFZriSSguzpKQzxL8ibBXy7qMym9P5wkJDDhTVXXjmvjqqm6+pfzl9oddHQaJyeiwdNFbm6SoKnSS9me4sOc+fjCpg1ZTtbepVGMzUBRwDjkI2ht6pq7bTIn6Zv2iGhZ6taQ9F5F5kSgXBGmnrExvBSlrpDkRrR2sJOqei7njAHFprzD1zxnx+O3y4G3t049fm9BIWfdEgbZYEiUJU1QEgTWy1UOdH/qZoqUOPW+gzfYU2AKc1G+ljx4DU8nA9iEedvrXpCyKnOZnAS2W9pgTIPfGczRy/46urmkNLU7ZkVRy3XSvgsyDW7rF0GEtFwVNsETGrONxsuJI8ccFu/5UHO+if1N43cv86tI5thcYz+eHbtlXRi7URkgBWPriP6HFXfSTtZltbNFxIui/utWuAEqtSkRon97bcPk/CCjl7seBdFavdch1c5iXk/VV/udev0SYVWWwCH4tagseYSChWsY6FxWrruICj4gj7N3vPVo+QHOXhqJkFzNaX7K+h1L6ReIWljWeSOGQ/euPpl8yBUT9vwCp+AtKbIl+BDAFfS08KWQ6/TiqFxv50n94pmsE6VeMCteY2SS9GOP1r0MbYB64Px9iYsvUf16T36m5HpjImaDpPyyvHEecDiiF1QW7Kw5mnimF85PWhQlZOBemRx+Tq1b5ACR62jvx1/Icz4xPPrrhe+35BjBIH6i49mlK4jfyUcKO3171c51F2qd+PBQEBY9ln3TrB2WGd8+9yaWw6ZOOkN3terVa+IqO4FivARalc3dPqsMtNDQlaoiAAg4OD3X0g0Sw8K22nU3WP7R30DMtYBpTmXbln2PbS0UtzbriD1fDVvUxJ1X+1oCkOCD8dx6dZ90mv2PvCNmSEa5U3kuhhLOF4nd7OKETtkUTu8pFy2A0BBKP5Km1HKww06buHvpNxVeuX5WpZDlCF8aM280E0lyVoaL/U1jZGn3qptAiF2jQmpsN94xZJvz2uDo+b7S69n9ewohqTWjMw/nA5+jmu/iA/i7g8rY9z9m21OzqL82V5pC+cHR0so184VNmnY4hhrl4fCTxwcwMWK/szbtVghBW0+XedOb0mb+nQ6+FTtjXaL+q31k6Imds40JO1RNB09jwAKPhEazRGXLoMCdZKMJTbjfPCJFolotrCKEbxH8yYaPKw4UGrvwtt7NUBjsJ+L3n88ZlA0k/sSpggBPzpoJPVf3SHmQR+GmMNRoCqc6wBKwCReI/wGi5EI+nJsH4WjcBAVPAc/mGjzeQVFg90V6OiYMNQa2kWcPIqyK3QG59YoooOIOLq8Hbomf5g77IroFNQy6NiBn4XpOgj6D/JRD0GWyOftxozQkpt7HSapHvw4AHEYRAZ6zzIuZbevDoWVcVKpW8crFHYfwlgZhetxSl+8kYhFfcQPfhsZ8U6xEzbfGBZrU1fRyFytEwErVNj+aVasTfQ5q4jObjKFpEXdNeENxFiRoa/dyEuD2slxRL97pTQspezPuhocTylyiM4dEaRcROuIsJ9a+mJoUDpgO7qsBzPrCvQmyxWt8iHf4IuCGwi19IboofaaxAF+H/B5zTFsya3KwkrCkOUowFQW6GhuUnOntgBgeVt4XRLSztQOXl1srh43AhvHv93pQ2WzJRKPhBJPVU+pY/ztppvVDwoSdi9gx4OMqJPOgEDFqM10jjDF2lsavCWq/vvG0UFHWprr+tQ5vc54APastNODj+5nAFoUYa/g1jhdUSDZopy5csQtshsJ4d+Ad2L4fjPCR/VvElDqld4guhngoeYR6Y48jLEUfw8Yy/3L2WOBzhaXoy0UsQ+XYAvsy8HokMrU5ypM4b47H19px0LsGpRGF/UKqjMRUc042M8BADfh2jBuiyDx5z62aLwDtbQHkycubPF/S0Ta9ryi0Vsy3ghE31uC4bT9tC82GVGwpBXjcOEQoZkzFB5kQoHY7PLiamo7IeZhKqghhlu4IGtMyiUc2lkmU+C5HWJChlrq/C3qBxRkTtOEwIxuR5B8k4Z0/e4BAz+7FVi3KpziFRSt2b2FhVcq2oMwYaj1VJusq1YIcVtojZn+xlybLkZSh8dJqJ0Xw5rcrVeVTIDGS6QruD6Wo/pgkz63XROj6NQP5HHZDiBHmSYdeKjTX6g6ICRaUzmOIeSMrUzOPpqgTjZg71VA3iUpjTYGYk9ymzGKi4ky7n46T05vcgHx4gVvGik0j3Sm4DJ5kZ8ymm9g3Lja/KyqfKYxUIF32uKwW/ShCrNnyDfQjqEtavZ1JhQAk2lSSUHQLWaspV+LAPBJnN7A/oYLHN5eExOP38Qs5RbAJH42YUt3hOiWRzG1sNfD8619/HcwSnqwaIxw0fgrsgemsPKw8Swyiz9nFpf4YiGYqZFMg3SZXk5v/raWUbpCNo9AwL7VadyL0zpEbm+2r6pypy1jFxF1PlbT+6L8dEm7+MAmiyBjOyfmajPtsomM5y0QPjxucsiFTex783pBF6jiB7qo3BiqeaxtA6zmbM/GjJA1PGrKqlISLmNKTvpoce4xzmVoyPZ2qtUucj3qMtFMw0wG9rIXU9k0ZMuzcVwkoe7EyN1Y3AORO6UpzXKENt219wX5MR1rAtzx+IovrMZF8ZmocPk9RoUnfOGrtUDZjykk8lrC6lJxvIsCTZgruvAE6kV8I+fXvDuW+UwbE+3HXBslgOko1OHV6eCBESyksGfjBVf+fD4KPvsr1pslx5QZy9NyDcNbvscPcqR8kqW8daFT+HMuR7MlorhyJrOPhDSQeBXIJiXW01rMOtBewHJA2B/7+PP2LhRHeLizfY833rr3DEtT/YNcpcyxCZo4VitJxL3ZbPqPnLMvdAhluqKZmvn8aWFFbxSRZjk3sEGTyd6IZzDVUWRTJ0vGPi79e3uCPDSoOcaAvYcNfIt+6EA/KuY+1dpRsIeMTsv55wWyvBJoPANXdwh+HPanHvSBfTOV16RNFNcxp+oIikV9gyaN9P6oHxBzOu2qCfkVp55ddaCjFkOlGZnRxGrZFWJItOP6PUgcd0XISCEW0yqrJSEVG2ZJO4eU+SeS7tZk2Ey89Qd+s28Op+UDj7xyR0YX5HDxi5d2/IEWWos9WIiRcBxtaLHtjDdOUGPNfRC3li26oD63ThrIUTycisds0USKd2qaW1FF9tKm6tD8jr/MiMornm0tmKVt8cFW1+Bcicb+16hW5HQ8PkAEgesqomeJzgUW/IdvD2QSk8Rr3NGZ/gvSbql3GFGXqV53z/DaIeetXvtzonT14n0D1oeYphvuqeGKR86xtLEmpQJMs4IOzpg2+LAJz++DYf8eUKx8PkGpDuFRk+Y8U8YGjsWLpVt5CrOdkEUriLMHaqN344wqYbg+npsbPvYlV7bDOHwgXtEGTqdjkIVPtZUZN+NwihT+MtZ71MPEtyTyRXshnZPR7BpJfckNuz/hkyqqkk+0DkbzuD4dQya4WI9rICC8G/EaCwGAGGkQKDxDrG502qRzL4iF4FmE3Yx6cFMBwADbciafNnjfrlHwzAzIrcEiVH2ennsht0vOaH6Asu8vAc1praKnn+1ROGD36WfrCA46v+BbO103RfrtDwSvrn9PxivKB7jhdEon2QIfvC76PCZmHnqmLPsvMQupHU6OnEuPA2v17PKUcXIPHKfcL7Tppb4KJA6LpLQG21Ndf+qdpvcNNq8xbXc+xTFDzNzO7sS27VfDK+7yoPCDRNgnsIhlCqd+DjWXLGQuJavNjmEx1x7hZAMzzF0fB3EIWJ6OcMF64YVxQalnEAagUSGXdUklGT1LoU2U+XGFhBOLesYU9rKCMNuGizB8fuQbU9pNpHhg5nprMheTNpPO9NppR/uoeKMGcMcGr3Yle/a7GVLFpvXqDWveKymmTDHcb7Q3iTGhsi9Z+4iFwOEetJdyyHWtbqrwughXtSFjTCmOdTS38zXN08HrHDPvr4W64w8tUxf5A8kCPoP4fmwgGDPe1wB/Ip//L1DRHvfbOP/IqaHYH1myBRhtzKG6aFzhQAOtZ50NTjNqUeHN9zRfCHUxSPElFB3MwssWgF3m398Txj/oXjhtcthFsE4fgcXFslmINgogbVFOZ6X3ZDVsWGosuzFslupOBrrlY0VpaBGO5Bd0uBYagqYRbSYgcNqJZ0UwcNtJoI3FZlrqWHrdIT9BEYCMqRo8HCDvSQmfhyyHoPzNdslkkETOMB/nTLKWYqG1PA/GXxajmIDRWFJqS1z36YhARjOBZrweip+1mG2MI3W12NGs8IrU1spWmjmYh/02e2DBjiweUC63hYukcaoSF/g2FcUzKTFtm23wuJQnCFPbS0us/crVvVrsGKP5LLhcSQ8I0JEDGKLkWwUB7OY6w8yMIAzDuP1aNZ2YJI8bUZRwpHUg04owTlcHJTMAjtktRBj8PWj/PARyn+zYnuNtIrwoUuB04wJsYPckEZVO1bNiJYhOrFUXkmbsoLzW22fyWd4dkH0pY+FtDQk2U/ep27SF1gcERmfcuSzP3oUJ2FsWAVQDPKR1m64sczUaVoZmhFzpkqR1JMIrlI+tCdG+TYCVxsEb15Xwkxb3qjdH3c9NVy/Sypkh54AmOZi6KWuPw/GmI1g2aEQN4jOl6yaRpwYrPdty1J3YF0F3cQ+EV3ksVecoPmP1tvk4A9hjDXbprfje6KxSt4yntRxW9mDbWDOLP1anTvySP3zio2nlYWSOjGeaK2LKHD5ac8B9NLwTljuspyQxXOr63BdJ9vBr2GB+4zg7QBpmgtE7TY3EzRMs466a7EG0/Tf9kNZCs7jadH3h6BJ7YtLvDLvvKu2rKWFKIId0RG1MkEIVSkGnG2g0zZth4V2JOnTEvpqbLXO6Ryjt+0PRAA1CQbFoz6o6phuIHLQNUxPotakMYUBPP3Ammv+ADrVyqGm0D+mKWKOxChNtS6Wj6pRM+I1GFC2ZSJt5C1vkcG0OH/kU0+9l0GWVcu1moDrICorqsMQ5RPS/BxIWLTjOxEHzrD2quc/hHarJUP0ioANlJpBWQVd8fyzRu4YRIcZA64XSpDu5SKWYPJjchfjFL9EZkEd2m3gwzerRTMBMQIozqnNiwmj6CSO1xEiQQ25XvUtZg3qkqRoO1FZOgAvVIi2d5OFwsX1uFw964MDpSupt1nHA6aBMoWhr30XqEAmNr3kRRWj4oO9pRVyDtWDym99aEMBK+4KbTjDXynUOErX6CmA/9JclbRJzqZDQ3Lr+yP5To91ayMgoB2zAOIHHcdrSmE08tEuwmIaijies9uWBUJ68l4LOO410UVea4cEw7P4LLMizUIs2LrwdL9AazsJKffx7UJvCWdAXrWeCmaD/VkSwbQrF3SwY9+IgFB16qCV/M3sCD+Tf5nH4Zvn2+cguVLAPIDYXW/+5/kRZZs19NsTNFcgtJfAwLjQ62+qA6KAtT3H7844Z2eMv9nskQvN2OAA3+vNPK+iDxzU5JzndyRyT1B3Yo0ucATRj+usaEjebqOnnqAiFSq0C4LVzLkWRu1TDuniPSH6J/R2PJFGlwazVBwmSj6ziMrN34ntFdFC1vDpoq9dfE7Z2+On6wdLYhvFtNLjalen0nsWekXQ2WD7drhtE3kHsHySp4andX3AsVzKnWnyaXy1MFhmfOn+OpniKUb5KOOii2uro0CAI0gPsnMp2M79t6OZP+ZxGiiB9bkMsS15/WQHb8CAWP0baVlaDWJgScb/x8GtxRMlszlcf82hnrCQdP5rGYZdD3IwyqcpH1S8RxQ/EMNzitIcVbYQ7q+2xuZ4OfcAXX2WecEq2DkHFxb+w3yz3PHr76Dg6sLaDe5vV8tsjz1Zvo1K4YVsnitrB+ZZdhT+cddxO5e43RxuF2yKTcp5Jag1/SxaxZpBylayH2vIP2iWKj7pNOGPa9SMW4lHAoVC9Y6Jz0J1Gg8z77MUbR6aogrJd6vqBfS3w+eYZ6+lwszcn9+Ubpnv85EItVPffwmaujisr/8KCFJ6q9h0GTFffiDtdkOUsbsAihdfwb+fAKsZjedOCGfnfUKW3gsGzx9xameq9EvVQrHSwEPe/Pn8hIr3nwTJx/tc0cB6gh5bOZIr7xzlBRdUYbIuEPSxuj8FkOduKVpXkvk/qaSljwVAzVnCneEKWOApAtf8qhPiciragGLm589qa4CPUMflUBQH8tJ9cZHPwhGWlRsppfTLAUP0bsoxf7dfM+dzIL0fCakP75+WmLQfshFBNtVFnzn6Yqs9Ob888VQ1sTxdX+tULOgIMdRc0IGOnzZQ/dcLgys9t7w72VnV15QKFl6Gho0bdFJ9VbrRuDfOTX5SzH5/XLgtkaHZI4WjpzsXaWcGtCN/9nvgfrcOGgFmrX+P0ljknv1MI26+fScl7rJ2+Puy7eFT5Ab1BT6PlHk45kHSgePpWpND15WAu3xLWHmDMDvn98SP3GcJw2PytLoAoaNvPFfPNPC8UeeozogOCrc4kr3o0AofzviCxkYTwG4bf9rwBYNtw/D4MtCS+XZ+tIf/By6AOZ6K0QOeSNqDlo+M8UMrmfrM6MWPXIeIDEmX/ASLSiPpy7pfLh/33vrgyk/WzgcX4lbOr6ggVw2RedQq/WSOhtr2/uNZxvmfDTcN07vl4RPTqP00J3Cxefevzo8UQcrqkeD8U9+SBjNF1unkKoIPQ1df+6oG6KrIeEVMRk7WKiBpE/Xk4tvhZdS1hm/mE1+EZ4d2ILpYj2BfiY7mzBfAARDtAtbXjXl1owfUYMpieK1QaYLMuw84S+Rlnpucx+/RwVmHZtQ05icNtjlc+xMSIg4vn9q6r8ss7OJBMv17ONa9fg+s5Kx/QthlUoToCLRNYLIgusXzTxQo5jBlY2REx1r4vuwaHWvxzKU2vmRubn2gFMc6YD5rWfOGyW4EZ3qZ1RPi7GqFwzL+/hQrg774zfLst9ow7l8mVb3dK7Bzy4gP7gplZTSSbWFeb8LhIrwDi92IiK+fI4VdwUNt9Yit94ryoll2xTY1whBC9suMrvl80ipWNV1eqv3kHpD3af7bt4P+u5YnSzFW8yzplmcbQXxA74SQHhD4aP+LoK/f2P2+5ys0XVEACk11dakx2DvbNiy539nZ2s3UozGo8B4F5sUdq/ulW6H5W5k+N1OhBq97PsGL99O4ka2tcBNc3TtGKNzspPw17vCV7BC4conGenUaleap4sXkYjSQdMT85Etk36NFdAmdHYiRTHwY59Mx4HNcp1P6YdKudLrhgUOsJI9tID8Mar/vtGcWtYj0+9/fQXHork3z3rGaTD54Yl5R2z4tgWOqCU/FiE4WoMbwXWGgJby0+sBWhymixYyIpImrHxav3figuGMaRYbBSH0ySJiTL7PWwHmPCUi0eFqp/QE8irVhfGo/g3xGxHOPXuE4HAsc6I+O+o27eRW0mHMLZLEzydg7VvIoujr5/2mXZW1AdNV5r6lrNQRIHJQBQizTkbQu1LABgZUXCeQ0QnRRE5HBHquIRq4uJgYXaEw3X+w0gIwvebv4ia3hoPz6E6kPt217VFAVAEXWI3ax24/HR9PplVnsHiM4z16k0Rl/LoWx6CJ0dNlAZQi2ESPe73J04/qlW0uDwY1YDArmhcjvzRbp+W4o0kXWukTFjpmtR+mb7/pmybzJrTUtwvMkZ90ltpXmBmgJJx6ze5cb3lC1v3qNZtTDq67RhIc5apWea8ZCW/tyeBhiMKlpu2c7l7zZZzqlwAVK9cSOalq3Y3toTWcg+rR57P5NK9pvOIQaAczZTEZkRfHHCCdNwnEJDlCEZlrbkVPbElBmoBB+AmpnlHkgatDwfkAiwM910zRTRzlJy4prAASW7sUI715VQflm97tapDgG0EG2xhwrCtj2q1GRNTNrdo/MTz9pKWXNsjA1j0e2RtE/QdcTnncC0kDwlp6VGGPICqWFImqNcCfG5pt4+DnWOaoGtyaiH9nqspUsuB0XybLnvYfu2usOSvowr1BuFODn1WNvuc5OnqkXawZJwbGN9ohR6VTUSmBlv3pA/SjliAeKmaPj9ZoypFdRFlEEyEtfgD92W0j9DLB7hbpUA3UjEOryPET2PVptO57hsJvu2G125DKHuHXsjyCylsvri7V0jfLPGr7To2RnU0l4pyA8QfkwST3joO6JUD+EhRbOWWlUgYeoWze5fyydiEEQMAOFrFZk1CnipPZevW93kKe3MSeIAE9SwVgs25tE+v9IY2HgVh8KqYYZZbAlPXaY4g3C6iQP9lOO0qaymk8JyVCzKdlbJKOrjt7VivtneQIjlPKAiugtWth9HkLh/EOoJo7sn2guIxEUoUogrFggDLgCBqEe8NWbGjSH3IpCqwx6gTgzEIH/UIThIplGthetpofZjtjNJGwDbDg51KJNGWDzj45k1CKnuo3wG7fjfAWyuJ/u2zFcx2FAwv+AFapyH1OymtDX/ZXPQZXjQ0otClWj7xsTyEFHGyQpRZJUWy6PiT+uv483jWnM9xEeOkdmArfs/xn1/9KhcVSlaYqzOxncWH8PkDXCTt2Nuty6AVMJshjeX6BBimk4uThET46+eU15r4xATzTUomswXCXE1iT3i7Vky49dOwVV/+SasIIS+L13enZ5hU9IjnFmZUr7yt1uJ76DsV0ihb/rAuiJ82rKg5d7gQOGMVzTw2Lxe/IulA2BbUG9XZrMsp79of+rdFqMlXwVMFe5joLc1bakpZzScc9ovrdIVFvkWoxr8LscqmSG7Vwif1VxVXyaNYapjKgHbRKco7eVpRr/ryg54mYSlE8XptXnHA9/CQw6OWxkKNXHIwI+Qggp22xcvrxfugW/1636vT0aE5hye/+PewQTNADjOIeOcezz+tps//SSaBapNez9I7vUABFQzA2wAYFTZBGmsB7StbYgz8BY/73j38GDtwykdN3dfTDAKTptA07idJ/dB6jY85AkRMx+ZbPB26QfMFC46Rcj0KZUg85p2lQ7YdajdoZoAJlwJtLAFRXO1sduCjk6PqegmjPu6JR123PofkjU5/XJorbnP8/26Tgo6IqP9xnbESCXXE/8MuUERJhquZHHsUJ3yDmlg9PdXKgLn3+Sh/imYcqxXj3yGAiunP+rQCoAUQqUitakeQYjhl6Vv1grZiC3IBQqunFdMFQ3+J3haXsNI2voVdC8EcDW2S2j2lurjNmIAJNsjfaYJ3YZCr+5UYeSEtIEuPhwn5EypG7mujqtyjsLazeTXJmS0UzLl4tpz5S/I4pQVGNZIhY4++qxvVrHmivu0TgQFfJnJ8fbTkXRzmuTCE7cEf72O8lDPaTns4IgVZ6SAqSzhOwuKcZbrsxW7LHsDX6PLZX8+9ljoRA5pRIyfzyIrSUVuUaElwGQoRNyXJZoWZxZVsK2wVdTJ/YnGrkfrI80Nh/iWudsCqYEzpEwpM0XBSxX+BeryCTnvjR6X2TpHdsmb5bdZK1VfWPr/Bf9ue+Ak5yQwZ5KUHIGdI2rCuVM42KObn36fkyVjjRMpKMmfHtMn4CB4nQW3HtzNK8q6smp6FQjPnW5Q4MCpkwHoHDsOJImfHgdoPtfXA64KuR4dUCU7Imw0TEfPv4J7oSiJ5YaoJeZPXr+LD9XgeqA+4+W7BOnjv0K2EI0Nw/f6PtZC6XoNMG4yn07J/+np64qex6b8KfTVNJELGYux6Uk61yn9ywcrn6txMf59PfRoMqCXnLmo7U5Uj5516/OcbzJsEUYFvE+Ua7u5iGzAEgWdT0cCJK7Axojw5tXBmDoUVg27Jfd5nAHhk+leLm84XgGzEyBS+o8JpjiwsaOFB8dhUwvkNEXFrX7XgqIIAbHJ/zWBL/hQaP1RsXbmut8f8rZ+2ALCWM2TnRLr0GGWdYCT4yni5jPwom9BF3dS50QyKk1NO/eup9lHy9MUuj6jue4tBPr42P1rY7TnyR65ZVOwf3u3L7tPzhiMNCkZN/dysS5OydqvqYJSjxq9nARUNGpftvMRIjKWGq9TZOOOwDQmymAcRueHegc2Qy5ur/eOGrnzS/nwl/cPDbJj+QEXw+rbDxlKD1PAQBYUoQMoOTSz+vnlUR0ps3Kq3yYzxgq/mFI8KG16dxNLW4ThAR1l2bhBEEyjXI6A0sV+lCdbfhpMpSIsdksLj08Ijb/klsZkW7jy+tZc3liKUm8uGYruSrdnQuYFT1h3zatJGwdKaLUWL/ZaGiy9qpy+sczubOdyOpEzmgdWbmOEmn+rjHrZ18XSZ4JBCWug4Ge4dDcV8Rl0jvm+ADy0zqJCs8laP8+DoK76C0S6AiNtJNrYrYfPA0l6sa0ZpkxqVWKPAWxQsEHfsiO7mN4FcUhacw1cd0vSnJM2iF4jKShV6W02mdo3NcwhVAmtBqwFNBCwYmrPMPmBAK/TmwAfR8Jhi4fMQVNad/0FSeH/IYws5eyJtMg7tKbxRGwdJ/30dj9MCGPTD+LgflalmAvQoj/SWNgGxwMEigsuGrWtGVG3I41nenC63ZuRBvOtCJ4qZ3sX2QsXFS6mzYRUEPs2BXWTcOHLOeiEZPrrDMiUhqAQ00zF8UEsLEknn1L00aBKgHvf1E4X/DvtEHskyPTDiFX/kSgtMhJPuL//imt3g3P8NkFR+93TvBp3/EKjBI0GWyuFjxRzPJhGLnnnXrjUP+DSWrqe1eVhoyA2pM/Z/Umr5lDCfjg3pI1v/TKyDRI4ns0082908kuNOQ8kNS2kq+8kCH5HsBfHUwHbNmgXEensd7jXB0SU5663hANN0En7h4bbwEkTAYYrUjnNFj0jEj2jB3zAobWU+IdPzUgBxC5Bxu0uGg+UyPUvh4IYljocUMeC4YAKICmRf2KaCvkZFp7nludgwzeVWH3lRtUwAYi7PDOngCkBkkFIXCX1NkoWAS5sjiQ7b8fxTL/yzbUppWcV928qeCww+VFX9Qt9qKDxb4ITXyOmS6BwSShi6NlNVD4ro6upKSghOtxzMefReESTjBhQVxV/Vlw71R3XrmeSYytOXentxYBxOB++jRavII0TvEGeiPJJl/aJhnWoLBJmgQlUD0K38mo+BxNtrjpnWHrJwd2DK1YFS+Sh3sEm/BKhjDJiEbNS6CcHO01JZK4y6d5lsqLaS0MIz7QjS/FWF80p86WQCbDBBMhe8M9mQRqohozszhV5aLBXnl14ckSItbiYNBkBrslyTBQ8ZIWLmM81xgqoY04JawDOZRWrnIEgJMSDz2/xiSV7IGn2YCn4RyTOb5A7hAe4ek+WSilOcHR5Ka+0WhOXS7357PeOxTUpcOa7x9OFYgOVXsAUsTRZt1He8kx2pNcq3nc2AEqsGOKlZl9J0OAuLSiAU7NG1gA7z0s2snzuUcxJ92a50ZHqB2sA834axL6q97eVEbEkN1fhZiROx2EFsge1k+GK6e08t9GyrblQBFA3BesdQDHiFwLem3K7EpcxgE1I4zTbuWIldp8RAhHOmNXMSSg0O4mSQ9NAheLSAFz5ASyoooaH1P5DMoStsWsD5g2TCtt5CGXh4tSMo7STk2w1vijDBBhoLUUm3mFyTDi7r9qGp0Je9RMrbdQyCyJ1bMp6CKyv42mfGx+PIs7qHUvp3DPh47Mdsgey9getjjccr4pRUkHQesOq6RlBKIKXj5zDRhMCfblzKtA7zJlRIktCQ9HiAYh10eMpSdGj0er6iPYQaWJHf6VwDBEeVh+dbKpYPmctr7/s+4+9Cbo6BmT188Gx6TxY7MJyu7vHy6O3I6NNJPP75uDImLKLH885pAHfNkfccDDzHzA7G3JV1AmF+NyvzCQBC/UB+OeYmIjkWlnOIMK9gyy4o1qy3LX3iGWWr1chEsOUPS7jy4r9rZQaivYLtCk797AY9Z3j4tzrw2pKFrrq+lXFz1+1+bsQDoxubrnDW2FOBooJ0sktwcZrCBEGV2P5eLl/IFTEAUvdaClbM+QoQ92hyv+K4aZ3Qfk7chEGTcQdMSnoNJYfDWJjqPEtzPApagjRtC0xyNVrjo1b5K2b650hLZeP1R60Z7kue+jX81lznPMaPbkrMZPCZctGNXHPHzzaYMn168g3gT50Es3CB9SBBR+9EVUD2AmarHgOP9WKYsr5rOl2f6I9T/T+o3qdgo3AcUJAoOei/lFjbx8Gu0EuM17OlrB8XX8f1kFikBiMgTJ5F+N+Pfg6M+EQsLz0hooYETuhdQiQTAcFLdJ3ynUiwqPCWthNeWRGyOXii+yMI25oRfwQbo6wg6Kor30o4hYpZrsP9lSwTEWJbTVW4ZGO4zaNkLUVZxxEVvblkn2KTt4OuJtK4NUI2fDuJWNgR7sXjrU5Id3q5ueoCsIwwkNSDq/Sfx+ipDv2AfFdh3c3m3vycY3PMnhLXEXWSbXcWHKa4PmN8gx6meHIrSyuzsz26XnJOmGcGwvhhmu7vTuAYinvatHFgkiRWUeTuI/9j7RPBWLrE9kdkwXqT6ZaLrUURRR0jU+59sDkW0yG6qQyh0JHEakKyitQVMHnQA+yMeILcAA5Xpw7g17sOvEDaiMZ8wftpHQQwenOQLq3zsPnSpYYi6u4mJvR7gY6D1spUNhdlR074WHxqOVyru9Bgzaw6OptiAkvqi6om6WTGbxXicqPa3rw0i4CDbCGb+DsFCj12WtnIDoU/b/DLjkIFaplc9ttAKM2lNJGRmonlysfZZzEa6ljApireErzcR9rQyiqxl+UWEfsMofhfH+IC44DbcKlCCjlosGPXQtxWgnNuVPHs+m6XTMdImI01medBZ8TGnyp8vnCVVWoKhxsyRPPWyB5k0CjhHrR98yfUCEImVUjCNNy+EB4SNbxD+G++hyxcHCUAqticM37dtEfWvMGQCTSP8VmoZdRxbE1+wDGAtt1wnDYonxPdpLFHbLqW7f8fhFcgRlQQdwbzjqwe+6thjlrLnbOHEJEJ9Z9klAVkkjxWM9vjWPhRcXzodTw+bmvSD8DPdCe2EW5/TGXHd3p+GWdZyLuoR2EnGsYh7IHJF75TS6Rv0YYbPUJdPxwMxPlXePQxQWDGDM7zz4wadYg/8/W+Tja79sZiMxwWetZXHt3iEYevRs2iZpO5Uyfd0kA1+vzcF8/AjgXYxAYscP+zkMDTTENByvXXzpje21FknFOycayvp+9IRdVMihxTWXEY2s40IfI2daw4J6ynaY6p7GnaqW5rVyPgIZF7O2MEk3ti9p9CXDidWpZWVXlHQQ4mGcFadnLj6xQ6XzBrJEHDmsGsL8aGkMWB2wDZlBNw/O1pozETGva6aR5S3a68/l4XGwqTa6ts3c2oU6u+p6XzJz/oTK51B2yMxrWF74LwlXhCRcYlBq1YUV8KQ7QSVOqCaOrJeM/0ylO6pwDQunntbQdiZMnvZc9NZtFUyiB7lFfOmlPNR4HJ6E4qw+iPZ6QKbsmOXSnzWFW84Y7LHyGgatZjeZyhqEZhvJS3e1UoJpHnMLNCK26HgynGv6KUizW1t3qA9pzo8FY7fFwI1e7FE/367KHnfC2vMsqRW9oKo1XXbKV22YxMya5rNQfr42/+TTLsrSgzKAAQqlaUzHsMJBgMoL5GxDyRmPEjsIy9Kyh3hkeylZHVCXcCAHV6BCjZz4tKFRKjrd1CIQ0i9Eb+85Bh+b3TPkVU9U5l4DLDC1Dr5G9IarpU/+xusfeJS+9B3XYd66u+QOcrG05GjmjRRjcdYFgHNhmPMcNxwJgk9BBBylxjtmchcJ+bm2bwmVWpH/v1Ss6GvZ6oyPVSgPyUyv0354Q+7IQbXNPvSaCVhaCWlysoBvK/tpC43GDp+9j8HXi88bswfdFlczEh1x9jrKJmc4KrxVEpyDdoFk1MDnXEQuw3JDo0+LzsoO7ati+2qSq4MnLthzrq+2F+5rfV7WGgNLXVmfPlefeLmkOoKPNwXlPu0Fwe17X0ChJ/rj4dW6WVyjLUIhQHP/df260E6F75hUgCGhhyRNssGolTnwzr6pcFQqHH9NmkOpUHJ0Ki3l3pAEe7u/307DV+lq75MmFqgYPSvCYiZlkow3zWxJwG7MteXLY3KAc9dI9zuS1VLhEAjModWkZWKGrGpmhwo6REDQffHzkMj4wLoWPbq7+LZhDfxYXUPFJjN6T7OnCu33BRRfaxnZNUciOrhK1cIeafjnD0AeNwj3BBwOmXHA3L4gt/WJV2aHfOC2nX99rOYFDD/6lmLGQDjz/eX3AKcpcdI9ooJX7vEyfT3XgTgmabMlkIz1YZO54bnLHbzZ1cWzhPoKmbhhg/narhztWg3dXNlHrwkjh0qKpqVwMW2Iup2AU7s25fNGPfVmh8eKu4g26E2fUvuPst0T1kVRSuZpSneDedr7GUuZM1m5d/fCB6JDyx3Vr8G5Oyctud/VXuq7auo3vzKrcXD2ripu0NbtBUptXA94m7blTlikZXvzBZ0D5pKoW51pqGGDuJqpthmNt9+C4LU+25lOystF5toZEDNc/eMTkhdfgcyeXDIIsSQxP/+Dd32V/fN4qi5eeEpeL8Z5I2HD5gxvIq4X3TieJtHmlxBnOf3DFONCFNvfC8Q6NYUgkOoIrJDAHV5nnmtUS+6Hs5FYL6kbvpTtoOpqbq+RiZqoFLlqp+mFEY5S6oe3fPZmhMkj8zAlqoSK4AmEr4JihJU6d2d2LtOgckMP50e7oHRl+iKfOiMqAbtRQtu8KaugLNF6bCkoFjMRFI45afBrkhE2N2LnR6Nvk4vIanAP2QPVe9Nz3XBhRcckwkoA5OgsqaxYKFoE7d4Ik6cnlVsexTD1IKJqj6FB5Kirh3taxIH5xOU/0KwfigurOi3Pq+ZyQ66tf+NxGMviXgYxjvudr/j2/jXV2aEfX0X8SclxMPOXpJpK2Lk/LcjGVOT+Wp4IW5OjrnJyX+UyBez7RVn7Fp2wzoNeXeuEXnzCFjLvX7WOQmgi1zRaa0ocnmr9kw3Gu9OgoyYZ1OQz9YePZCuevZIfhao+RKee6HaXWVOQtK+ZlxWRlslJepiefpJTtp6lsYT9MJstPPK9clCTMZu+lz5hoCqi4oAOkuc4FJE2w+rNcbs1kllcRDZaplnQLSvyxOPlVXP+mPU5Xnh4nOaX3Lj+fLJd23wMvmfrdiWEP68/s2ky5G/1x2zXhks4eUVddA32DEoc9CZb3zbli5g3cT1w6z2db3Iey2Q7j3ZrspmmfyT9XNks07OhDUdBlhcaLVxXn3sU4sR2VvGJPieuCg64n8km8LxmrrM0jq4GknyG/tLFk2/MKeL688Uhv7hQNLZXNU1YLPIbBHpmz15QlU+FfxeVv2m0avMfTlisGmYquAQqdxyq41GpxyNL8OfTH8dvDWcYVDdagm3LetGjMVc7/Qzz5addpcDkMucn2ScT7qra3O+S0h1m1zb2UfrVfOYWaPqbW4OEUpGk4/uDQUhvfsU1O0u/r76+IKiybDoNHfUhtl5I9nsvPuRPX3MmEuRou2OHq0tVKNbltjA/2H5oHbEPwDBM76RW3HW8OtiTGCbMJXKa7lJsXcw0FFIo+oCXSAGjrF8321wIJO+LWEmTTnTy/62d0l2ONy1zHE77jZsuc/FGMlm9Q4tUyP4gjPWBBjI/0u2QwFbM9rXnl0KK9zcNJQ4ZYqqNqIPTngVguBcEkHha0NCBBk/9r+VuysUP5/jfbrg4YzUZar6vAVRl+20ve6oBTjNvXTv+Z8Rm+E7XLkK/YK+zwaAfGxOEZEFLiDiKekZ1CCpuTNmogmJwkUYfn+Y7oGmd2w0AH5CyHDJ97a+WHyoqTw0Oneba1NHILZgeXU7gwuzmTdNtXFsZpssazo4S/zW/fC5ocUZUh9JCilSOLwy8rXJ+EyX9xOYkddxDmS1d3QVCxae5BucCTE+xRAiXPF57bJPdoQ/zdvI1h/4wtRUDLrCwwn+KXUEQ8ex9DPmN8XVHIbVwByW0INU89IW4q98CSDhPsu16ESc6/ylxlK757OcBEWU8Q58+tg9u/nmx/wDfHjpWGMeYTTZmRmMgNUcUpEPI+CZJfpeLQO+YJMcTrgwcOsYRveY9UX8oGJAxwdhYwAUBDGrubWi4QNBslnPKTEb7ZXkxurgP7rGNjx1YFicJn3uodkxuD8HfR0LKdtmpPOfP2fB5b9DIuw0ExFWaEXdaKUJsKgC9AfET9noSDd/j2nf/S58uVSJBY0/MS8BxH4AX2RqABv+FBC4jDEfzss7VqUmhNMDrEc+eC9Mx7B7QW/Ya8po8V25okZRtRgAiAoAg2FZCeEW/S9r8Xzqct7G0mQ3FIjOx9mZwwWkS7aOA7wLEAjcnAgbmmNo0SChf1hFE4JIVZXBGTF4TVz27jknnL04dXCpx2VHRHpPa8E7mVK4LVbOF/nJNhUTdYhA5y2HIIxV5qdKkiO45JWe/IBI1bVZA3GgW3caTCjFpG6ijCh9AV+z+J/NGVuKAXb+hRmCE3fSsNye08vMBrnrPKffTgJOY8vdcJHnjL5HiFmTpXk5ixFltxKDPJCZONsZKkRB5QrX/JVLCFqqmge17csKcjAB/kr5Fd4TX5mYgu53ZViAVzuMqLgDYrn6ciLf417kikWnzzA5h8vuWOB8acyWlT//ygQCpfAyS8UFDgH5eVwgzMJXcx8fIKZUJ4Kn1ZnpWsD5RrQqL1nb4wLsETJ3netUfFHPN5XDjIvnxQzAHzD3lgqKyPKqGMT8QH2smwxSUfIwzkzSwRZWcxRAqLvtRMdxh0wLWNJ2GVg32WnpKUebfQqiHa7dUEeZBiK/yIcaF25QjxqEMeci4cUFwMjqoHKV6xnAgw1V7CnmiBryHV/R1Pl65w8bj4tIak4DVyooxvBK+kvyYiPFvVJqaLu5TWZBI5tuDbvxsnNSJYy+fTj7NS9zWnNBkKNlGPs10ul+LF+jxirhoi8eT6i7lC81W9ExiK/RFSofjluERx3a9Vk3DPxC0sePlRRwq1YOUHyamlIKboqYJEuB+KlWNpcH4ic4hJLQhChxBaFJvRIca1Fc5DlsaLTbxP9rIKllTnI6POG4Hie3e6SjHjOzcC6GryC4j7cKlq7H9PiDpd2AJh9YtvxXIGOkIcVs4um/9Yh3nNNoFeb/PQKdYNqHFNQT8M2MPg6ZKlPsGIIn3cQkCXjHCgGVl3qagc4G51xZ6QbEVr0m+35o3bz1ckJtte8aswoFY05nRHYHmR8uvqutMjTHSPiP0JHcgBWi14LGpjxNxxAEZ9AnsATApPAJETZKnkO9IMxeNteVSaHw9iJOwtdhDarPlOQHlCcYAb+/Il34c1VuiF0odgW+CDRmk3kwX/ygiJ2TQ+s3xLEVSd9+fNCltOKTS+aGcqs/HI6C6hDrXbPa5Y8hC7TnPW86TXDXYkRBT5h/6UElBeHuskTFZhWBVGczSdmE7WrODSRD+tKn8oydZC4ILeP8AA0tSiI4rAuhPw13WUEFI+25zZ6yVurY1walUHS5dN/yzA/mOWfuugplaOH518YXWMqVUVtSSn7/GCY61xNnEaAmUljvUBvg5kQQ1sEchq57kNezwI3hwpD56LIRxGnmRVE4as1kBxVFK//08hEHr2P7508Eqo9AGZjyOCZw420BIkOt4SyG5jDuXPerZLH2SV1cD4Loun1MY8WPBbEKrj0TLG9t79UjFQafUjAgR1Hir/cnHeUY0pIprxlGBTvalI6iMdU359iWWNl69z+Aesmq9a1y92zp/GlmH3ZoyPBpLaz9pEd/KCTVCD4kOMicSvCv4vSDrGQIRB5z34PAyxXCxSEcnmHa9KuxGITCNJ47jpDQw61ukzGFCkl4TiYmNI9wFOYHrFNry3E7aa8PwMgu5sRmtwp+8L59d7Fj3rHBiH4VXwkthBTIOHpfUq7aFkXj84IAk+QkLFMIkPHLSHW2/JxGwwB8Rw1U8TBEvP4yzMghK6aNtYucfYB6YDmAEGoqLeI21eEejKh2IXuHAyaCdiEDglsU3VCNX4D6/JonGGEwbSx+JlAOHA4ZA2VcCydy1nqE6oo4sF39SyHiNTPhsuABHUbllMsXY8lZ+G18m2Y1FwFsxg8F3yTzg49MY9Ioy4QlrIfFl3PQlO8yJ8K7VuTOFt9XUGiu1L+LfU2EX5Gyl4T5ZZdPylQJSi695dybd39JaDuZ+KXMgGH15QYsG0UXxDzDi1wsqIKUkzDXEtDWW4+XOOLNgIWozd8/xIoedGVjyAbSiqZMHuOMZyqk52nPxe9VAtl3BVk5t7OM6hQzd7Rj082Hst6flfC+dLlXQU05ycBDw25BYKCmHrZRum7fAA6Y3lzZh7ggOn2JJMmKVSYZ0VHy6xSTobMcMHGlJY1cz1kTPRHe5Ye5bF1WWeksH/b9cTTuCoYffr2oPSQpTUetRljZ1fxt4rxcur3RKSkslx9BwgMia6hz+fs3d7ZgRl+ze4OYPnmU/Weq0cagcdclB7IEz4Gp4CSo9sihRnNcNjTGsYd/bBaaI1AwUZ7EmbWPvVjuinclv0RJ1Pzn3ZY+8y6lriJHqOiJtSsBJ9fFSgSjKZTNoF0wCpqpaoZg30w/dUNKVUe95apanxyNHdmapxJkjm41ZMBjbbJmlKriH2neeXSBHs26TkqJm7cJRxc1kqCJ84XkS9xWLdRlg9WGa7p+SkvnP2oErjkNp1LS+Vekh4mP1BTdbXK/Od3w1RTF39iH7tlUdv3C7ohGBZopbntjgHoVGaIe741J8AB0iP1r77ovKcFAUsVAdHW1eByNcWhHYgghWnjjapro9qD7sC+01LXWg303X64ggy5UAyIaZiCHlD5gfETSz1RJJO/fo+g2yiFlvLYBxDusk6yiIL8aQYum8OtepDcsSTrDrL8kjMu6Z/T7KtNDg60dJqY0B/PypNjlL+XQDGE8+y+MjN02J/RicqkEKZH6yYaqERZNwT+UyysLGx0XhyXWc5z4gtHd5avPj3MeHORj7sf06BaUGWQvhczb5tlGJOcF3wCvqNnK8UWREq/PYPSVXUDfS0KHQ63t6qUGDA/FnJTaLySczBb8PwrLPw/ZJrwOZPCoxlgkUTvME4k1EIEXSz6sCC9Tz8HV88K9TBL1vXgQD//6Fj4BEGzovcn/aKHIhgWYGBHCgJeASfAVVzeVk425RtIlxR2N6Fe+j/8hbZR66Xsznz4TVf1mieVGOmonY50RO5etR7fHAnuW2KILFCLnQaW+0EZ2WSl1484WmwIM9sl1QfQqM/NXOKXsX2nRiLzXsUSaePbETmesuYYDuihTDQ9ZYZCQ662AxYHnc1onkkjMucYPr9SJkS08BL896XYf8b+IfqsA1gLo/PgnTCPHyqE/UE6DQUcG2JLEF0hrQSBxgji/p40I+xawLu4biPBQGsr1M8jNfmctyT+Qy7NBC9KWAiD65A9AlYPRVFF7bD2ZA1UJDAAABBbT76OsH7y5ITJHbEOEQnNIWd0nTUxyr/SugaN4j7x/L8PSfYZ1KJeWljATgJrQ4JlQ30sjKJKCb1dKAvtQIDIJfO7TOCaRpFPDl2E/BqSh/sBX5octTD5SVyJHAa4oC03Vb6xZHR0/RIcFVLmNV0y1B/gp+NQj3g1WSNHXEbsjd8dhDTmiJBBKxsGCBDTrMsXVLKdL1c43Joq61GOy81G/G3CNmqlytxFqqVId64R5nt7UDMh2pYUFZ7BrUPBFVpMdu4pmnx6vnlz0p1mocILQuFZy7cgRM8oBTZY71EiSfQIK6BF8Rdt3VCOEOL7uajtBhNIqdmQuTDvt2zLB2HyeXeqEDEY8DC4X/ov6N8Ebqt8gPf0CFT/4nedEgfG6HvPL1sECNZir6w7dsupAxlSOS6/hRLrNW0W2I+2wXHFRuyLt09Rw3UTbi5f+1Q9Pf4JwJQqCEIPhvMGifZDKR+VrPNKGuEAUZeFUBbFXjCmK3L5Zw9NVFbzeBEdA1yw1GCI5PRjnLCWcvkT4SWIbFBzJnIQhzV0KYj6Hi5mHfJwfVbE4GU4S3IEeHdYm6tD3oHcP7yhWsMXLibf1QoaW0mTi7TcZcSX5+OpM/naKTzWHe3NBZESUNbQqhrVpimnzyk8DgMdkzY/1u3jKwub6+maWs+J9Nr8q7GmNZMivCizJnkgdjlcaeVjuDFg1nARBlQg7HsF+KGiTUxQkqkGyD/qx68kIJ+g6JoK+Xejv1n7iGGznPlAv0/jJmQozB99gZqBernw1o20bOpRZQR6qDFkFJvawT7j/UhXA0LEfCgNA2kHSB7LuoIiGRvNcry0RYIU99bDa4pROO/HxvqI8NjiuNz0r5Dpah2SIHv7aO81X3AafqoGJ93lRTncEJ/u3cu/ME3HhOyp1TDJzeHrNszcxEsUs9Ghe59fT+JvQKO9aiDc8mALXZs7YHIZ8mRl1hVH/gmbHqCgeRw3U6IidDeJCtNPJklRTw0bNTsfRmQCeSTJwEsJhN2D4n8zTTmW9aOUzG5tLO/ThLiGGbwDKQzCKoWWSuFUCn3l9434WkNcJTmSQtv7A8ZnoRqxWAoaA3Qm1Aq5MDZZoaO2cEu99Ks5yc8QQbl0VDbpbBXk0DkXpbLAmE+W4TbDib69BP0y1KYWUSka0fzvIrKkktDmi+KyjhgXOwbNpBFnKR4uOSOQZmvDLT6HPEBlF4VFR62gUc1nq9KgUPj6csq1LKGa5goNNIX0R0+UTc9209oio1FoYCPM6oNmHkbT2SuRkoInLXpiK4XZv1XufnKpjjucp5TxVLOiOr4DR25Z7TMKjPBfzjT3ZH0iAEvnB+KOEcugM6YSZe1XElRmJRvLbyqIMY+ubjdjlVJ2HN9ZysnWM0zW1TdcmlNuJ+l9pEqx8wic4hz1+i1Buse1sxlXPUUFHs5VAXU6W2EHAtwAGiQkuMc6UgwAOYG8y/DsG4oC1VqZtl1NkzZKdk11P5G+vXiJqBdYHZeoQnra56QSH6D3spYPZ9TS51RWP16Wo54YedXOKqRqAXqZBdQGzawHMjN/Lb0hKSIx4AK3C2IKx/SLzWbGSF07gMqR/IqdsTAHFUf0TcidX8LlfgeNRjwqNkPIoItauH7pwx5ju4pwGrzbceCDrpmbgNkQ4PsFiIdDySRvftHOXtbUgBvSEtYstjFBRIMDh4gZXLBSFoRHltx4TZzQ4nwz+bMxpjTlqvPzSzWdqAammKTp5YXxbhgrFv3H1xLW4Td7lbU6+/D//j/FAXd66427ML4cPNQ+K3VGBKChas5h+ZNZVHGhVi5qx8j2RlLFMkRTUtmMStF3bAb0CLro3ZWSZ+eV8pSLabDyFOc3fcH7srkZHoRZCRLtcaESTOCFq3T/kG8Q16JrRRFT6eiFHn6+SUlV2eEB6yk64h76gboxG8X8p/fH2ydrF5eCKVFt+sD5l38fJgV+bEZOkaHQ9wYYk4XS8U4Ic+fzWJBTB1QpF34tzsGj83j+sMOcNTqPNTkodCD31wHVlqIELgAk9hPPHvlXmhUfeqYIgdkqI7EgEDKNoJUDLIzzCF6Ogv7uKUBsUMeZyp/JPRidf4ufRbRU9zQoWMUGVE6FrImnbMaeRp7hNqYJYWxtunPwk0FdGqlqRUhikNv9zNmC9KTirlo7O2WFIIrKaiLQPaBLBEiK2a46SLv6Qy5X2Js/tJfRKQ0EBHZhwvQhA1Z41Cz3LDLsWS1I1Zn8LV+8RWDjIhTfuXJAxplj6yRbNkuRDEetbfwTwdwN6hqf6ojNaDHRKtuN5bKp1azMrIHtHXKzyga1JPkMy8ayipgR5Wz/JAPafY2CsiV/lDqIoFTrh7DzrqIREkeJKiRg4H0w9S5WHHV2WW6NkjLa4+8uRdW8tJ5L4iDVZcdV2iV5IR9t+13xjDRvIzmynk4MEE2zV+VTMORTEAEutCE1B1nbI7AEgwmz7Bz++AXznGhloFVXULMfkmyTY0qSIGwanupHdhcHIzA7YyuZxNrzmAnIQsIayATKvX/e+O68vtZ207QE7hR034H9/CpxHz7h/K1S7r4mhwedCq3xUG1NlM0kQpMK+9hOEyv/x6K+S2cqPOzerCTrMOvJFxa9mdHJSpYLFTM6JemLs7hakmQCBgEQoTeR60jzxFspY7/HFJlLBG+1XbcwCNmdgCMVbEtoiFyM2hE7A1VsptxXY5VmCJjMvcFrqwPb7f6EDvhtNJsm21g25V/EnqJ2S5UX+X8cf8bOhdZQafdeO8i+SjOm//QyBgLv9r631KjsBR7Wjeze4sSyRnUpy3aaqvzVbtHB1TDgF77l+lcoMSmbojYZn6bJJUuh8bwdja2Xrl7PPxYpF1kMVLlntVJd7/EhkXB/0GBgiT2bqMDtUP/zAZBM6kUAA+hR+yRY09d8QjKrujSqhd/81rzcBDo7DrcsWmjVvNP2HBsd4OVoiRBu+0sIPqamtAYCGwP9+zXket6jMzPRKnvc/zuq7Y8cp2EHRzYLCvWmVWiYrQXnYAMea2wXmhq6XZIBwNDLq3zUDpn3OzZNSw/YzukZ5UoNJolj7BBDeLMJkoyc+dbEHIgi3hsOnoEpeQdgD9xn332AtP7Xi20HKBpU3zM87AZoVyfWW/FEQJhQVadJ8gni8kRlrQCZl+GsKYUZ7XmEOOJD3tGdorXx55tpOemi+05HmU038aDFmOiyScbVyEGsKqlufWE8dSsh0n19YjwcEqm59ila9PV8H9YARsxdP8QhAmHbtDOb9Hww3mDFXjlLP994CjVZXUWoA9jQO32lodaYfYbVOasNMMq1Fn8El7RLQMrxmlJMbQO3p/qgFqlyvvjqX5jRVwdGmPA2qlgPMahC4zAPmNHDEcULxqBqY2iNx7HhbbYhqKwkEU6LpfmhEi818jZ8XNcwaic3j5WbhF+9qwRWNsrlYgBcNgk95Caal9C13lPmwvojl9vJMDREfH4GBEceWvNZUWOSRGSzwZlDgbM3XeKn/gkCDerDSKJJj1w424xptLUXm3mq7I5Hzr/NaAnOEGuDBIUmj3Yh2QKl6uIPpjDQULqKnCwAs0iKCaUSmiGIHkyBo1opn3w6mYVFndcGnrHl9Y5gh/FD5zSdv12mfOURqMiNlHDZR3XmWlJVtkAd/etsXRSpAx0cIXH8+pzN0J4DTw21uyQprVPSvdj4m5j52r9Pe2ZfeNtaaxW/v5AbWDp9AcwmLQi2aUvLsIgQ+ZhZIMP5hgCRWm7rUwBnsLZQYv+BG83C4TZEjIICEqm3wuMTWy0OZExmlGzv3UtzpI9hLaj6pMoISUFlsHC7OJbc3PR93PXyP1nhB9/MOm2g1K6C3vJFuh2oFS7tDeX9VOZA7PmKYoFFpdIyioRpeCtFgOGPASvI5/alo2IJ/bFL1SRR5OPEOaqSAefuKLvRZVIRaAnFSHOptcIFZuwXkCPyJGy7CPsW8kVmAIRblaslarhpSNW5HAXsXuo7+4HBSkYhVGgqP3H6iUL4N9oKNRodjDo4TLboWzg0NL1hqtZQs2YkHGwFawKzvOj8V4HU+1sjZviZZaEQEDSo/l2WKpl2W10aa3EU6dPAUqxq+9t5pK6OMq054IUj0AGrZYdU/vLEgCyz9yqOrflbNgrkGIpFRR90nPReqJQ6plWYtARae7BvhpzD9ysoCWLB/aoNJ2bFsRh02V7lBU4dpExu2urAjvXlwD9skUV2DXTfeh2SXzmfaYlK4C7P+3QsO5nFbrRVoAn90DrEFBEh7hCnySGBfgJD0mRpR4GG5ctiPgBMB0W6wrzdplf90llkuGnI/U0vO4tAsbRgW1kWFA8+eHVKDf7V/aB05moi1IDzo0GR+rJEXKIZpouFGPN44SIU9wDDqleqYSEdpXGTRh3K7p/XeDxuvTbjOuSNkaARH+LpqkEoi3Hc+3jbxAx8FYEwg+nPdEGqVpsuxFXMnp6ntQncaEtRk9nYb/0QthVmP6pUTJDzOGmAvQyjpX7LuzZa1uguPKNZk2m8Qq3liVou6G4LIj42XiUx4b/yLdAB6OEPW2SaVkzUV9z8evS4rWz+yNhHeSsQKvt/hdZ5zYjPl/RSULYzgfuQIugMRRY30IEWtc1TxKTWB8T8wmtfxK3juKaibcOIGcoWGcPaX8Z6eXjQoEttSCK8nAa+Eq8hVrBejHFsk1aM8kZ3uTmIsuXmBO/ts38hg33nwe0E0oPofdCTOG/Bug+iiO9lY2RdLZweqkRkGs5N9bEPXFfhwy0TPuIwKQjJUr4TJWS4e/m3GIrntOHoOc6lR7HWfQCmdzHlSjXx4DP2Ti1O26FNNMMIH/H/ng5SqJYlIjB6NyOrqcSnaaKy0VRP050RrvJi2rOdxD6KRBjubuiHHdkTBi1xnB3yIjR0PEvt8H53onHDzKzAsZ4qmVZ+BQi932dQOG3H3TqVKTIfnllNUkvkAmMQnL3qZxy2Agjzso/GYMx9miibL2w1rXkI/FTYPERs/1qd6CT2fPiKexHeXktKmr2DUX8FHu/xFxCnxE3YMVuETqGkiMJceaOeSZQ89J6/1KLofwj4TMqskyQCS/qaxYT7eJEx2uySdQVCG0SqLO3e1zz9Hr6sHcS+xwFbrCYhKrd0q44Oy2VnUB274iaoMDczxrv/iSOsLOOlOvmwlkwBzW2A7jqD1bx95boL0sU2N0Rue8uzNiMCrIf7i54N3E0srtyDWjX3j/qsq/cSFae6Fh5WBiYRm1RWYAu8e0+0k3kiDtVYqbbmxXD3IO8gmhfqRLNsQX3fxfK+8bQxbPE0ifKujjQhJgeP61zLtc/Ee/f02K3sg4FfBXS4QmWZ4zlLqi1YmhBE+ybKx1jY6ULfg7CyishF3tQbVcrlXDgwomuxB+bEH7cmN2qXoY99qB3niLFo+zOuQ21R/UnEqLoHZB1Q0u4uKCPkdsw/davx45m+5Dowk87i4M4ht/45Xzi0zusE51pPuNikOQgkbzm9hkh/JUnRdQ7NkcfMDTynWTyX4zCeUCKBjKIqoZP1hj5wCUFcmowO0NtGppyP+tV4r8ThaYrlDlskGjvr/uO/zd+UDRE0Uh0QNAPq0yJQDQi+bswmAADA1siJWWVIY4iyUrM71956Zw+a3AnXjqT4n6zEAW7SUJyCVwnGA7krdqOiI0qopPE+IvkWOF79QK3k61qwPkk6AiaXw1EZ0VDv6rCaBMl14RhHQis9huYTVAgSax7hQ3asSNOK08TY4FAILVfa116ggOmP2KFMwQRFn+ntvyEXoJqQegQVLcxTghaLpLkTRPGwgHBvTrHqZ3NpJ+N177Wf2YTioDV5WIueOK78w9C6c6v/+0eiiqe7ecBxYPDo08oailcOzAipODrBkfTEWhEvjFCeDW8X8oBM9FUztjEud2R6M8P+7gd34ZrfpbuYQ468cxc6joNgcMBksACvS6aO6PCdvX97G6mPLcDcXDE0WKodwGEUYpTHZG1sp7BaFa7b3senoG46wmPzR7Ly2bAhOD1D+ZGWYxnJ4MG0doOFpC1x6dSdui1VelvDSO23FEPfu+2wroFOFKN3AzqXPoKRojZXGSW/yL8QR+kEbDT/CwEbWnrOGiqZRYoCqKGKfQcdOZ/YNc3bcgfxBSwd05BNCnQsYEeX7MBrPH+RQWbA5qEGt7waxaDB2KXEdpR8ExQheCuh8pHrGB5oVeyBz7LQwEseGzhQ8F8WRy1ayzRLhvmlBvMVa8rdx3HJ+4onV081fzTiYaL6qAynIutJxbvU3BY9NKNtgt0iel+MGCqWwgbRGjVfmBhK4FhXWFboJt2Cu+F4/EGtGKGtW/ZIOpoW1rkrGu7/UFF7BCrnR8W/MoEsec5Ey1iGn/yykXUN9nWMBiZSteRE8jDz8CNnWDABKmBqvLRKcB2R/DvPscWW6DFzqtjLVEsppNriI1rTrnmiMbM7PwGhXdSFNybcaV+9UiCdQXRS2KxwSEDc8KyrLZMLyM6JiRC62G2HOF8MhdjsNh10NFsagdLIEEh4pG2bKyG4hpo0HWEsnLSimONafck7ArZv6mkgRSX8JZ2sYMSc/Vjrpv3cA/3RQATMclKKPWpVM2Gc7rUh70soSjjhordclnKkDLVknKWiUa2Skjkf+jK8gIY5mSVGV2MO9qf3moxX6aRjFH5XOk9nK0O2SSPrrzC0EqNHf6H5Mq4wX2QTB/UL5aH5ANUZ3cEl10fZwmskhAHPQhcqNHy6Hh2dOTnqtxin2HPiPruj+bvfO/yfgaoQiHoHRFQwJGWZzXRpLQUJgUWR8iGMkQRk8PFfFYiRtCOZ3pwd0HiOWFiiWaIRSbozS0MeymKJAhUaNYbnHqRJ86eRj2bSFWPI3CorjaWyJcevVWu5SWzcnHrgnPnxdERQU8tZXi+4EjolwaButEufF0WXj5VDWfztiphmHPgrlLy1zAqYgc1TPTwNPamNYz8RIbja96nkgdqWtmI2+lqeRv+qyO0BXZFQYU/jO1GkehSpVN5yG1b4JGs82KuZhnfgF6SQ9Wn1LBouctRy45DfPNaCE8ZxpWN2aE94i2XIyo02emWgtoaqaZcrjfCdQQoYQX1SDj1HgF16YdzBKfjGbGN1ghYlfkwJA5yZN+cYBrXyzoncaWuM8GJpUJWX+hlQ4dHjYKpZqA/OEgT3wVFBjTq/MoVVm3pn6EJrgw2OL4rZiPKnrMuwF9Yho40S5NXUqKX5wN5V2ZhVGZjeGdJwzhgXCCDHqQ9Zj3UXUoq5Jqx2td9A4yQ2zUahWJldsngtI9eqtSykTqPp5kMgarm5xZddvLkjQ3QJDfg21y1eZN8gn47RQPAjcezwIeJPb4pf2HsTJNz9hyM2putYtLg3rC4h8NRPFQ4+yiRHsFSQpzAmovPT+Ba/pZPvpTYIrS0d53v862kdzliZkPERoWnGqu7TzB5QCj5rfysyUyAmaJ13quSHgTL9/uDAydVTRpNWVuFUsW4FKu7ZZfAaE/bKltZA+P+scHFwchdAUWtnS0JSe5LXUfydsgywBk/RXfstbQPMDYSPdXngOPLj0gpOVRVU+iPwM9A5c99TA71nvMQDske8kz+0baljOsXe8PoqwcIUcpYgZOrketXr2otQVDyJTGQmCo6AsOfqLa2uFlfwz1xf9Z3iLctM2GXsC6jE+/HZ1eM7vsFbL0MiGAsfL+KBpy4b5vGxEUBFdXWec5L3GgwrruVk7xHMahOUpM4ycw7Hr4juWrfauHw0aXk6x6IFcc+EZ4O+RjxbGB/ZkhnWYB7koGcz+cBM9Z7hhk9jAkvAnlZuV6fCUl64XgSOZak+ILSPRIS44YNgvpAmM/KRDJnbVIXpzhxiSiLpSxqijPgn/Opmmucq+p8+9JqCqfp+EfNnTvjugO8/VOqemef3txl6H6WYPm59qxaJ4/7gtXiNC0J3ffc01ltzAILASsPOFlVdxtVKwfzixqiMic3qZgqkp4rzobBDpgk6KZBaUXQcxlzaaiZTFrW/nmaZxsmAGXCA4VcOJ5BwNXWGXw0HQ7E979WdkAZOmKQNqop8aMFInL4NBqrb6pEjil0ixMIVfuKjdMK3pYOMJqAkKawF+p+Jqo4e0e0N2oPYYU61tRnyr6jRgKUOCDbZ1Btg4j8X9VBLoVJZLeHP0BDiIlV26XxObgCJuSFyjO5ZBn1M2LJyhYzLHXzWGxHSygZl7S+hW1HTE+MIr01N/5FOvDFNY5GpPUTbCUOfgY1QXMioOtZzWA82iAHIgFg4YonkOJa4cK2vl2jfmCGTYypkpS0AILvl+YnxKiQZTNOwGPeHOitKI75eIa88w8IUaJ2KK2svDAsMiL3m7bqhVvDJpZ3WksNTPH7d19PpDjnszOV3njheH3W/QH6QqnAK6aP7MuEJt1JKX5wUs3NARf3H5b/NuRAqfpJ8/o33Pn57Olw0g1a0u1tjweBRSFCujF7XZd/0FiQQz6hSg3msijoeONccZSRzeuZXyzdllafa8/0c0272ZHZ80V3RxoLI8krpO5A5HVTYA7iZ9MEcGVO3I1bLIcmU30gMXq35xrg4XSjaMkrpq13F4Ep+OcUZ5VJ8V/651kzixBb0HJl5lmDDh3MJosbDN0bV1AoJn1BODOOCWELB785eMvqdnV2OMQshh1LhZ6oUa/WBfMbgmHg8cst9+0l9x+Ew8xnKDOq/y6QrIhBL5/+f+4oqImT8XsIAEGqws4ymCHDIfbJ+nFW2Ft6D5kbMIKpfx6nLtz22xcAtQN1XGCGQSItPP1Ry8mDcI9vl7Sm059h2FMmr/3ACVFVcUZwZeuioCQPOF7/fyY9CkwisLpSjkduKpSuiY0j3QCY7KgUpPYsvzKRJqdjYglqyGQejT4SkAvrgGKtgC9qWw5YuELvONlENjozAuN/Xbys0k6NXmEu18DRyOaw4apSxbskGfpm5w4BUNeTx9ai3rANjGsS+mCLuAOPfoLBursGzl1Wdm5PyhiYKQnKMI7OibZ1gScW2N+P17fytS7fsWI8t2OQ9w3vPN/ISjRic1abZzBKVSC/fPvpt9NBf24eiYnQcaftvWqbG94Gx49sOjv4y2wNrLMWpjxJiSdmvo8glb6bWvugipbwyEnyMklFLCt+dL6E0j6b2q8FmhunZrOtyktTVC1Oatkbb+TRjxNf3CfdC0nth1b63scxpZy9L0f6tX6j/xt1XFqzqdkfCSd4wdTvrlcQlAsmSVdWlfXTJFVuNrASmEwN6VD3MiuWxhDFyDUnqSUNyjWwOQ1cp15zS3OlDkjGGFdmw0okRxKnUrZqfKL0ZUzMaSPg/OWpiAYXMj1umTgRcybvN2UOG8Pxly/+rHY90dpAMfLr2YrdzWtuVQBPPw3jnta2Io5xuNJiBLIxqt5eD0kNX1d31Wg7OBIhmCPNbhGf7RFZnc8SBONECU8Kz5gekgyKhdzHayiVYbK8lSguTJEY3p847QRTGRQetsE+9gptLZhDFAy3cFgcGouFT5Ex6MD9j4UPzvorvr5n/1MsMSPSjsx8aojhcWouYPmev10hbjthH4bqTRdLlluC1cqsRPMsapceIcuUmpq3CjLohpIrRbFua8MJT3rwLG9tkdV7Ny74TGoUK7TLB5ZL8Q0Z9rkVMP16BsLQ1uuWTvS1OWHofAtrQtYqvd2JLYxpv26ZwcVkc2VMqBjUumAOhaA+lbSkrvAWnaiFYro342OWSUx784kHc9Gd5UpchSfjJMPZ6ZjT+anxlA+qJ3NgKczsQRqDxomxOHcol9BF6e20x0Guyzek+llt9NAz8CyjE7z4mX8r+MaxmOZy6VwRB+fBtaT37ahj3ComQ1H1a6HtMIdcSfVYBiV6AHIGHG/CAwu5UxXWTQe37yyBs95MngWKMYXv0Pv+jGQfO2wOWKtS2j0sCRWd4Ltryezrz8z2z5CGrHZ8d92ofh3VToR3c+5l3c8iY8JKt8atY1QliXdHcqaz6i25gwTMMImoVb+W/Vkn1e02QORU9SCiTDKNhjyobwGAEZ75PyGtEDtu54fACO9xSSFjEnwjSgbnM7pyf8yDEgMAAFxHi9+2m+v9T6CMEsYRvqtQPWuK78SPMU4YBTerZJLvG/yPs4SiEvgZAkT++fasyqwOj5HCpWaAyLFdMyid9roZDG9Dho4eOrSCm7PacgmHIWJ/YlCjARSZoWJsvZ5WacjK+/MVmqezCNU0F5W+rxn4VlGel1QdzoKNLLZlImT5iNn3nbl6dhxVzl6DaudHI3CkzgZBhTd1VTlO5EutWb6wbR54hhqV//RQM9SUyKDTLJjcPUvHTu7UizCLp+/OWOv3E09LooS+OqDX18t27DQkE7rWZa2vC9olhGR6EZfVW3Ipp7BmsNfu9Dm/NvDB37+Ev1INlXLI/aBOBGrXCAbJS7XR+6AxwrkFm9RTVh3ymjVCRdmdy44oElWQ1ef/k4hGcqIN8FJonF4DBETLqjWV2EuVlsmkQVAzRh7Vgb0yu8PHXblvHZ1qUWqcqXtHtS8wC/JDL3e1dhPWgpvJjFJpeTQb6qW201q7AckHJptlFM61vjdhFNdrLO7rhNvBcLc+KsFKJ7FthDZogWwdw6qjGx5InjAKOa7QBoySjMd/FfTccFgjR+tGJ+RRGo0ju5a9OCG2jk17+KiDZR2k+wiuyi/leBS0gaVi7kSn82tQk2ZscCT0n/oj8atMlPnvmDszbrvrkqhjRQbnUDMYBYudghu8lTejRbKREdpRi82nBbPQdFTr5txghIKuynhKZI5cKKjAav4CbxPmfw3h9l4DcMBQa0xS3yABth06VTZJGBTHu6DsPAtVyMg3yPCVsdKSTt2aTkBDF3m/xAdxha3ql3ZX+K3yMk6UOkLkVMdZWOCsU9scZ5OsD8VHscjzTG25obt8NkZwsH9WrZPQVhtHlgbbm1Vq7Dsz+USi/pRgj/XWsBXUe3LKFgI4gwjz2bu3gMJS54zAR4v0DzSGbfgL/iDI6uG0NODIUx6Rvzi9VY1at/KM2Z+FRUCdue6OmhstLVflg16zQ5tz1s9TWy2zbRx76wnPO5jQ81l4NDucsjFkG4DQw0VWOcsMPDcijRccL11/Dp+Wib1ETQ0GY7EWNEyGW+/2VDBFb6B0vM+umQ/DZoiMepBxYA5OPAdufz8gtgOdpkN5yNTIHp38ytlni0GQFTzVRIzeXo/eiXxQ/S7IGsOEleripNqshDLCu+aQOtOodl82fWF3FN0NVSEawSzGjHjVEaLBGYtvrezfEjWP2NF44uYx5ErASbVxKR6Q7Uf5CgXn80G8/MijEnytWeyYZ3na4Ujox/OEgOEgQIdQwyTyjj9YcjtanSKxXMtOZpUR88LEZ8E1mQL1UAOvCzPUSOcisEvtadINFuKF8HbIiMF5n1CcPuuXPextSbitz4/fVnDtKuoy7xgKCh/lBQwNFa0Qev+s2+MfM8rzlx4YgVkCwGR9gYgV9HeI++hppKvMvVLtl4p4Ffy+HZLzA2x7foWOEVLU76uMCaS2nU55OrplerrHD+kk+vRlPc9D1m16AzpqzMgthYVOXiT5IgpxKm1h62TC7tqmxCNROk2oPOemBKlmHYcQcZ2SQl7/4LF2gn0j+9iOjhgALKwVTQp1lIIQLxT7+dkIU7MCQSIdciFCfo4l4WEPVOGZUFPYMgQExe+sSxBxi6Onvc+/2N7obM2xTe+mbkQYxvRg7pNKPX9Zt2okQhRW43if58IzFuaYC02D/jPj1hkGVh0o75g4PNt0JsmRqurIJn7gktfaOsVJw8WrdJijP8GNwo78Vx6RreMz9ZFC2iXWvv35Etm5o7aB84gYeqvWCT3Ot2iLwBdQN51jHkf2qwSrY8E7SaM8iSdb7Q98b7qHapjEyYHx22WDWiZ80N0ZkX9S3Im8EcsU5JB8s9fOgaiBFq9YSMAl07AN1y4OCUVVt2WN8Fz4kL8WdM2+EXa32faE/fwbUkUEVR9diNne1UjIqCjAd7qU7BQINHBrKzbWxxOCsBn2uNVgMtpH/c0VcaP2FW+YGcbGj3K8YUOkrYmJn2JwPm8+DCLd2vxw/tX9YIrkH2AFbNtuYrIzEzXS3Xm7cf4y8z1pEGC0S1ckvgBQfJJifRQODGawq0qupiGegzLMlHyV4EjNZiC1T2aXw5Jzd41+d9WWkqZD6KT2SGTXjrcCn9zueXi2hLX7QErs1qqLDhjQ1auD1WzE990g5chW71k5lYerSXZpAefwNunIqtoNRYFYQwRC+YNNFjwcsZy4Y8GlD4TjRcgMLVk2V5q6rbgBwvF84zUkyeEmzlV59ARzljQ4FCuvnKHkPl3ivcPPoYpgBeezoYKzQtWxcovBCrJ4CeZHEjRdssvCmJo6st9W2RxA/mmpB8FAh7gM4QuOzvjW2VAdivN4gPNwp2472ObqL4npQKYzhrKAISlpLLVUmFHQ9PP/B3Yn9PgbQtyIv6la/bAMWs/peREXE88jS8D7/Na8SPLe6KtpiPXMlxvtFTBxpM3/K1npEyHGZHCXKvhT16CZLv2oZfLAQJiQNwNcSEjEpcisDoO4jMuTzmQq00qBNbFlPPEl4yylfXABYQtxzknGAcMlBGzEObqHO2q3b92JdwRBhYqgURkPcslt2aaMVDFA8Xk+wc/lVFg5AneSxLMK4mr9oAFbH06DA6bwx057qNetXktz04gVKIOCX4iVgEXI8JOAtWEWATuFHMuLYP1wltrbyBtN9jmhXR+yIjvKMAgMYKz5oFkEdPpWw+KPVqMyppywNOOkf5QZDd0A1xNjUQAjePuLUy2Y9bOz4QYZ5YgXx/llMmh2pcvu4R6v8IbRiI4MnMC/8Xr+Ni8Czza3GbQatXYPxuXbDP2KfBL8lZwoS+yynThcYbrpxK+ZnyrblIbJHRGcWI5+sBNS97CSuOeoUNKOVQkLXsCc7k5R7tVHyRbzr2SgoXsTAE02NsuOfZBQBkWgZBNLvIgkJpw0by5sz7T8U6qTtPiYPBbIOKz7TMXPTAvevv6eOJh4uC83sM8nBU7IjGoGgtUZaxRMaJdJbaW/V71OOLydk8A7KiG0hYP+BjF9uEVLRU2hlyGF7yT53jzvqg0IIxpZjOpvJw3Afqrf2EO8XsGWT9i7YDFUtKYdf/hbAPdKC2eJelCmOKBorrjlqEPZdh5WDI/ATy0KdLLxf54lZqK7SMzNVqObTU10s07IcwrFUGXlmBWMjpB9/GHpmK4A/FhT5VBtXNhCcM7fIopcWAUW+20xcWeKp/WAgHQHRGmQ2Od/Og0TbNGs7wt4KOe1D+Ym4aWr1RPIx34Hv7H5AGa3dpbIBaBtFbn08dc1syAR4LH+UITfKnEaNVEAqjY0iYzpWP7xa1KlEVBRLT0ErLSdvRNzg3IBgb+3IdWSoHOql1x9K3OgdddcU+SfLG2J5q3C6jlf2QVdvy4bCuQxf4MbTTSSxB7TTkwT1Fx6M0RIdSChg0AJvtxK6Lnc1k0PlpGpzb6ln2Amv0S0e+ajQw45Y5Ez6Msw8fGXaOgXLrhD78EHXEizRN4aHrYv5ZY5t8QlrCeQ2pqs+OcU3oz2NrRVyfV2Xtr927XaLzyV5GrB9STx/P0YyHMcDb0hxFJ4Z2AAHI7j/Yv0MUhBz1imb2erQt8gbbD3v9/AUsW6sNcxiJUQCYdIcGfVHeVe+znpRThVtkAJnCVX8BEhNgn8mHjK05jAKTtg0ml6AQyABucsUr2waGcp7ivZYyHD0+VsyU43nlnjnKaf/D0wecwd6PifLKKZ9LDkE8UPai27R/XPL5kFLbm1k6+hfAxKyX/eqzV0p7IoEpR5rvJvAkSKVsX0jbyoPAdMZ5r4rCzyJQmusPXOntgB7Q7sE3R+2eR5KnoaTZAiyDGRw3tZuFRMDh2zJrzcxnY3ys7eemGKcjgx4Hl8z1kHIy80hnN4q4pyHA4QEwll+o8+RioMz1cuTMW3BalcjnIFo7iuEd1dKy+dMyXtuwnhciNs0MuekSHLjaOcXWFp+zdKvp77rOFxgpEV0TrnD7FqFl7KQQvTmw6xZBIP/mf4wdxuU9j88WZ0ebWG2KQuOaak/NyXaxZ+JQiyy8urUZQ7W3ybslCQpuUXYLmSKVIMCb3wyruEQ/9Ulvs7j9QeX5GrO35YCFMtkmA0tg/svDH/HhOayf8s7sJ39O+eOfa9BE1G3z3Z4afXs2iqA1C4bMkgyW4Ys8F6NVE8lroEF0sLjvdDSqvVshh/yM4TzgeGC4BXUgMjmi71wUjopoZotFlng9OPeqVo6kMnsSIoPkpsp/xWL2ShdZJRKUmTxUMKUSt3MA5os5SiQ/FdKknfzq45rjlO9FFeOxuPTwzFvRDmweYSMg8KT4VeAG8vZw3vpDU/UTpPUx1Qi05yLn9sXmZ79jInHIyRw+Y1+9w4RRTXfbAgq7vjccyCyMvmspsAqK6yszD7M1k2WZ9LmfQ98sazuxC90EtqbrfluvsPqjJKUVOeUNTdoEMfxQD7t4TeQCEAxh174ssPEWHjNyiuhkFZF6ikTBhaEmHlTcc8P5HC9fyznnkElkKdiJ/ETznAdsLISe2yovFYI612V5d4VlzbclQOia+yPJMqTn4uQ+5PpKpcfXCbL+coGdzwriKvA2wEPIpTGsGOONoZ6LM/pt5DPnN1irJMPwybH93mKTjKwmS/JntwStKZFVxjkUl3mTtypsvAx1b6BMVyxbtELJx7exbqyo5imimQNbmZdDMPppqQAb+1+d3D/tLonIPi4X8+0bITGKPdP43ax7jp7M4RFnGXeZgT6ACEJwDQlsw9S5uX7Nh7JRd4jbB0pGEvZpunwGtJi7kpTNHVOMxAZPaciOQh/UrR1prDTw7gJJLvVQLcpEHPnpMiRWTVLz6TtSUWv9PQKPoElCFOrOvIByJh8MxXfRX/pk+4VfIaIiTJr/qCUzte3oaJvnVYmIR3thU+8pO3JET5PrwsGydmk++IKC6pjWTgnEF3fWcjaMvOLgTCBlKAeo4slwxNVcR7S0uMqRiwqjNFRdGX8Dd5Fmk8joSHmweLop6nua2/2HdchjfyVd1voNtsyw8p0YHVB1mOghDEJKruJGHh4+Lj/s+I2//tCuvM90MGH8Tl33qrnIDXMcw8e968p0hkP9HzGyaVbZi6I3fnC4UgJ63kzo5L5m0dw1JB1BXeLc51DtOE5jA/RfPPzsO2sjLECSvPzaDGqYXk/EWSOeT/Q27Zrbp4PCuNGk0RYieL6xZds9l+6aAjmFgr9jxmpFRI1TQJS5QjAXXy/q0FgvMiJCJ6h4JBQZGgr/pKU7/15/4txTl2ioetn/3uZoGPa6fBYZtqFIRJoXrEiB1A78bzFxPFD1XgvtDqZQkBAlKYAxUrrnplLdPkL2u5TcpiGJpqYLADYZ26orQ7lldVyMt9pf17QQre+U1jmBFOLFkXcT3pwCr9gRgKfw5aKfZi+Zc1CbNcXJxDMnKBgGY658h5BMgqsRPw6PYk4l+03VoqKOgDRL0ALcuxZ/YsBR1NAlcdQBLzX2zIHcDBuOCWelWDFwieNkhbchoTtWqFwrWAUypDB2lX1yk57BHUslRF2AaGSZIlNvKRbpUFiKlmiWQFO4Ql3oT4KBp6pFphqj3NViuc/fC9ABbQky0+AnyB9QcSzzBeo1+8KcWzvApovQoP9Jv5qDiz5zxS6YvAf6jLQVuF3xEJJYuJos5QC1acA8Vkc3Gu7O5cwcuKyk5NgosTMMFBpE6Q49nULBOPo8hnHgygDdEYFkVSEZveyNf6U3IRNPBt1rhg9kWWoxAKsswWBLHClVOI0+G5bVKm987z/ZCRj2x3MCb0uhQDX8ghDIeuY8Germeq97+JhVvnVGzZQWytJUd4A2dbZcRqOvqbTdtTfWMS2SXERhg7/T28h7HMPVwzYAlPAwtveJmSaV3qRDDhYQJKSS5EHQFNghf2TJ10WGv11SanSaCj+O1ECxneLQoGN/cyV3h2R8asK5b0Gslll1u93lWWeSmT+0a41wDCfprNBHm/HZnJVmi8OFXs0L3EIq7rMJNISJsIFab9ui1vkNLqEXwh/XYASZtkciTLOk/9ziD828qU0K/e1d+H3OYTswtvowEOxS2FBAM9KEvxoDD4g8HW6uhF4eEuxirmfGL6nA4UmlWatC1nICF+bjWvQqK2HS9Dn6aMl2hYoF1WzSerXFAXQF4JKh7wgR02DBFmWfPy2OzXKn7K4THugryXuN/bFMsVljaxP64VZibWTBqXaYbm3iv8gKVpzXq+XvF57aE1K+iVfrry7X89mH0dVdN3RgK/x62I0PenSbrt6C210SnV5ymi4IPyRVWWmf3mTpWW5bYyTa95YdyvLXuegbifyDiml6oS4x4zcqG9HuQvY6YVntjRX+aO6z885vyRLLJDtCZ8RNsXjqo2c71J7P2df5htyFW7+OOpRi7E6oGsuMiFF+G9OROZuPjCA98Vnxi8TDx6whsKJHvP5hIhMJJ3F/+z42CH5W7c9VvIf0Sa1GHasK+9s6VxI/uY6mN89W/pfjqtm/lT+LbIVFygVoT8w4bhJXNNJIwRG1bpNpTso4rD46xKkS3undCkyioDuNlZKZaYncX+OVJioxgetggHHVhNmFuHlRt7uzlOPeS/bI6iHmqs544kh/E9LMUrqtscsgZ6a/2oB8oFONbN8INrPNfmUHuubJ94ncMj718ba4kYspBYqEYOz7BAbO7+ilinRODC+PCFqNou1duM6P+azXB2/957C/siz8sA5U6LkpWz7S4TaVzZdYNo/0fTY6xLiqdW3oWlPMzJ+W9oku4w6+53dHZQVYwbjE5htdwcPeFznmW3Sc82JskgYcexTRQnEX+w7VqxZS3VmYqWk2npY8HxSYmhS+niPNHvuybM6cjm+slTQv2BEn+599GhFO9O/FEQrCMdR0JCTi9Lsgwy6sgDBgQ6q4z7MVhgTo77JhHk2SxQyC1Ias60GWe5ZnAZoXvWeIahxcguEg0SmjdD5ap63K19R8Hw/PMC2/VISlZC+/Lnw0Io6aNFymWGUs6l4CntLZ9kCp1Sx/bPbKpDPLfw6m5+rGZnk0elyMrHBSljA2/vjLgow5GIYchsChyV9TbZ82SSdJ2aCiCzdCS977ME/7Uu538Sv5PilcFnL9YMZgetGdMejxzVbBKEUtrVSwlg2UYHQtVFHG+SpMkzeEeRq0jbXCuWpvCee/gSYrn6jPhXRlgzrNxUhn/Fp7JfX4TkQ9IINNwUvf/LrkJA7bY6HjM9jlKMHA55sC4sFDXEVaWI8pyrzMTs3tSFzXRFeaMOik32EisqKHqWxuuU+didHNuUTGnaQ+eKaPCXsvCPt8SKQLWBOQOrntVB8aDOrlPtFiqRFwdetTB2kyEeH3PYyhTadeKTItGHUO38f+8KbQo12k0NhzrB4RuN5jg/EpRet0uiivXZpFS9nvza8bdUbgw4J1lqnA3N8T2VmgqlxNKjhTLWQTaX4nv5WpfqCyPBhPqoMNB5nJ8rR5cW0JAQtunKhX3vvosI7pgy1GKxnNIfFq0ajIfyd8409TfE5K0pzIsBJ2V4Jsn1J3myIxmcJz2cc7WIs1g04NxoqdpG/3iulUqFaYinF0ezaybR638FaqCon5hHcqzZh482+V+HMuhiFeTdefy7Nt8WIdkFoZcp6iSRduLfcfYvRlc9NlHfnY9SbOvX+66cPxeD7fP5cN38eIln3Z4ljWjhd1eU1SBFqTPxITNZ/jhm1BjD8EzFDgIYMdb4O6bnZ/P4VAdj6ttyBXeT7II3VxtD9tufBn/KWEbGpttkCrdrdVRCmNJYmaPs5zxM3PIWCsNTN+LbROeu0bpNU/vQPVVYqO+YiequiG9fm74lUdmCO5uLTmHbg1n3tcu8LNUBum/OA6jwt8tR3dsruF8Bda9UjO7JXIzPIASpmQawpy7KWPgNg8oALA0s49anPbBOYIXg/jSnfllMnjinsHyi7X5sSQOZH8IVNHmdFr0nMyVRMPI2MJtIXO5i/cqgDLTyDm2scl9pdMfjigkSOxw8DfMUU8UXN1uOpKtUSN29nX8kkvHcjH1ZtuZV8W6O/4hak4aqenueAYwBIvkNtrpuOiqFbQd57WuwHsmcZEp3U1raokuk8LBt8danegaMFKt3JWXyqM2HXpYTTRm62YKsG+QN/ePVHxuZ7M1OVgmO6OorGcza3xGs+7YuvNu25pnkka5/V+c/mYunZvcNmeXKKU43YvHbmlGU55QGapeLua6e29nsqFMGY2VxE7ObzeY9lWBWu+4thyJZP2P+RmSL4Gdk0Cazm42nFmq5zJ1r1RTeZ6W/S+UPk1pH9zrg/l9hZziPo6bCXXiEzesDa6KJjtS4ftJn365J0AoVr1hBfS7Qs0NDkqySF58nqCdY6YPEIbvbyJLMWFHX3Y086GOjMGbBy3YRwdPxs74Agz75C97dbsHhpH29acpgyv0ND/XsMg1jfyleX7oHpvxk+6zOvx5WMZUUrdXYxj0q6/n6aZBLxFW32Jm4/xk9aJDDd/SDaCkUmeYxsuEYNDQg+ITSg3FkVI6/QaentBsekY8Q7wCfV2MtIMQzzLN0w4YcbxCE2xp91w5/ZGJ8Fe2HnQ+4G1J5Ms30/1iyJ0fRZORJSgPX32RzUY3yghcPT+eKPI5ngndcVHp1a+LklMH2fQPC5UtPeXdWf2RC95Reh/mSj6+9TBF+0m1lV7xMD1uRflVxfzTcOKolMLuq7srkbsHRqCJkxtG7krL7J9ZW/tffMOIrwtzj1V3RRHOlS77T1D1yIinHonRFFOB2K79OhEOH/Q3vhvEIbmgIvj8I35ftL7QNcKdqtGaRuHVFzFEsB6RDpMlt2zDiscAZ/91+Z74t8lPsk+H3N/1jSorxYRrPrq+whj8NkpgpySivxIwbpRh5//ALdrcJ5GWw+wPWrD0lp/LAHvocaPORBjNjc3qYi6lQHerPU1oEuec0o2WcVa89DzC2DUwKPC8DbzzSWslYCPqwjVzBIsL5yAcYrMydpxq4T0B/16f1S/KMdM8qjdkZ0GkoK5no5dKupgV3frWtOLKygCPU2ruKQcuirMpCZvlaMVEPiUm7wkvXkOD43pUfeJ0rTGOzl8Fvys7cypalPQ8fbSyvmQ17usfN0p6nQ6qW7VNaor84zul2s9miJY6PngAUaYjV1I9tANQpedXFZVwJmw9V35FNTXsBuIoG56OKN7wyp/2DsrU3GPJ918o2byUZxa6km0WtZapV20pGS+4oMa4xurFgKOcC9yKZcR9UbHULxKtdABmCCwrOBctjXiMf5KzyIrvuwPxJFzdLtHsSZvrh1oIo7oH3y4zbgfAaHp3zGqQlE49c2jOXuVp0uqr5JgGMQV9T+A+rbJQxSVer8W0FSPyXJzgZHddfwlMQsnsHxPaOs21iyctUabpIWvIWHrcWYbO2qAB93743k3GOxAjVpu9g1XXOdlWYxRNn28cDp2C9tGE/ecut7xbl1OTqonepCupovHsup2WMWStwapNayeCRphG5sfXzC/EIwJU92JEk/olY7faJKBIA8FKIXlWhcEU35s6c39IImxMEdquuY/4PftDmTOyVa2hHP8paXY9Gj0lShcSoR9pmTuQiYtV15UxlpTcC8Ccy4lk6vhSD/ZpT4F/mVl9iMWJW/LT7D7z3bEfdrLlMVzTRBHXDIlT1ZtrxSodmrFwu66sk0vvz3zwkXq94tFpnPAzULYKJIx9hyF4pj2T7ExcWKxE/i5DFuQzACaYQKm5ttSCtanFS8zSGfF5QlKHW4chppbOu3g0tLsGsDD0ErKR0ZIq8lrsWFhrmsnZE39X3LCBQguYAJsiGxQQ6Tvy+Qze2r18btRWLucO4VmaDVCqhjnOz6t7DeGK/lJk6moa8HHuU7LJy7XmskJnQAYOSQRTQmwUWLg4YsKQ6cvY00jjdsOO49O7BrWtCehF6gXTkyONcma98dVgUbjsiDyzxl9ByqVytO+lfWBFG1RMlduMD+tA7wrgC3l0bXGwQeC1zbj74kSO/MXHzLavPahrt3La63kwIONftF79nX/tHhLi/82bh/7pq8IfOuOBZKBWR+6GxvLtQ1DWUYBqnyUUpjUNuCLjwAfWWrTQ5hoz2trtvtz+LnrPZt55W05kQLUzT76+I3d+k6SPoq+5cUpeQyXDosP2TN8W+i+iU73XxAYv9tt/x/rWPndYToFqLwikMY251CveCPRgr06Z7B4L4GUywm1bB/xUfioW/4WubJ1GOhtWWLWbOMA7R4k5M5PMQTYlffj7evdEPO4/sId7hMouWoFEhNehSP4i6tA5DGyzSjFZKqe8uWkAidhI7v9R7culLE7Uk9ZKXtFfdWcuWBxlCCrlEmndt94P0MDSkvOZ8xCXNv4g2kVGHhSL53gsFEmeb9q+2QgHzb1bOi4i6lwj+t5GJpf9ARP4PSl8Db5lAldk8XOaFiVRTaQAAfl8DhWhypNQjuIyGImkDtK4QRfdKCMT3VlZGnK/rHDGv+XiHM2enGEm7GJnymTEiV7VQIZU4iZ5KnY5UxVMSln9Q3k1oNFijWJLgLvaUEeRIkr7jztLjMZyMvDxNjsy652QgHIwDR46g8i2pOCl+fM5l+iP3vrllQ99RUDktwPOClMlqg67BgpRsH1265eFFG17kgFwnqGbaDtQybVMjNjgxaFjwgafJ62xdWZ5GjVhSTCjUCXWBgZGCdVYsjD1BGP0/9HRkrFQGj/WkbinDNTUouyp6OVLrOSZgOnzEoX9f33qLW+/yNK2wEsH9yDlO1ibxzXnpMe3p6MsaYWW2o9kN1BI79g7EjL6WsH6HNzQxUpEwY7wjq+Cfnz7hT5iY1PwoLF8VnHTvfGRMuZLcr1H1tjxz11ORKacX1e6dPDCg4uMF898fA/lfZ5r+MLLloqz5G15sfT7yApnBvhaUTvvRcZfaa3CeJooyvReyCg05yasFhPZMLmUTN4/dhpdtkRjngSKN5Z82UOYHJ1ili6Ra6zARVfjX6TIaMaIYdqLdna9ZTnyQO9IYBaADuUjTDT4kthrdPxNrbypQVSl6MKdw0YEZR4tdc2nAlMsFC6nfzcvClEtKi2BmjXDMrE/Vkg3g093zYL2+4hExactdVJzq2Az1ttK89UzGvG2bH4660Uc2aOLrj5iVpTtkqj9DXqoZnjQ4x7vucc5m2pwcgG9zFsv25AgB1OJNuy8rmIBaJEqqNYwmJXAv+Fp7+ycWt6CTXujxYAYJFWnwxkAg0mEsqtsjMf+BWi8AVkojW4TMVtqJvcUVlJdVIdC55c3XCinLHPZkhJ1v58PBH1HysmiSntT3PieLW4+CNP8+XjAGKJyos0FO9SoneHEldeI1CjY4qiIozMxq2t4XZfuZLLXhUnZJQ7lCRrBiasfbvxtoCLotKVnbEPvMscdpSDOiCqryBy+7OtA9oj3HLKWIJgdYEea2se4JuSjSnXdJR+4TrTrncA1gtHlPHI3FhTsr4LUlixSnXhnFjpQY4x7eFwGCucABYFE0RM2fd82SBfesgtpR7tHKsCOfqJEYlt1ESykKBLcOkcithlXsu/Duu7BqcOOPAveY2SZu9ZHMOT9JdelH9k4i7x73sxrw+RXGkTL9baM8lxwrMssLy5BIJoN9MN1eFkRajhQOvV9z2WXSF4D9l+Ez9nneCjMlsxhrK6lCi7KLKhf4kyyXsOEthsUHX3y3B2mdLYtKEVQIT/7KJo+JsTO+1Aqp2ULZIwG8yOiWuMHLiEcZUecxd0IpvUmKbGsCeceSOw5HqmwZkYQDJkW8j7T8vW4riHfMlweFIfMKI+NWsRL+boiPhUc505J8km9h9gGawolSUmwWUfWU2iZHZ6w49lyrdja6Q3y4qAQPLP05Ii/qDnwTT31yAO2vEuvSXypzBkQ323tKACBnqPbOVjqA04BHcrih0mTIVpqCvt/U+c1RUC3RpiCjZhjneZc9ogqRD/MlfOPTuiupZNeD5mWJqlkSUUk1uVI9o/5eF1w8kafQsw1d5mkASl2N4/VNnerXZllFNCqWm8xa1hcXLL8XLUqeJ+qZhQkS5ndJa5Z0kMnEianvZ1ZjjfcyGp5nd7JmII3fK2RcVttUec5SimUhlfeeqEGFBkOLWVD9LG7ERvJa2wT0GE3oQVR+8MX1ifXHLKyMConmwUYCOikFvV2YpVodyrXiplCKT2SIlLPPbPF/mQkB7eraV7KwcgEbGAZHGNyg50VSWJd00aLQWGDEAf0ko+XIz16zY1LpbDK3tVKdHR+SJGLK6yn6fvlW3lLfDUYgBD7vTC4POaTIkXcxShiyYHuYnSpE6dhhAYVIpAdqHuAZ6BMFWzmWGWTajHI6PzeF+8+A3JxmyKdnhnW76Lo3OxYTUPvNPrTkZQDrZ9z7+chXj3PsYMPweABTqwCLfIF97+2YhHZ5LzBWqYFdCkY3uvOylEjCSfxUz2B0Qc0nWy2tKYnDXKoAyaund7bEBkW4IBcyocH8gN5U5gTT6R7/tevNHkwt/x7EcQ6cGbJ29BL2pXNS0eVjGZWKayKRFtVLv3uq1QglhZa46yn3foDhuPEXuqFLkz6ktk1saISkcv8o9cmfTuvH4aGxfZbui5/ZXR1D8Xi9EWfz+8ot7y7EQCboqrLYz+VcCIUy1Hd8KRKrUM9+hSQfOBRTsV0Ksz10ww22ozRSi02Ur9ssyDoZSyX30uLbi5BTGpAaWcEd3ACtFzwWXXOdrIt+d3kgazt1dviCAy1FyVXi7oF/JSJOH1ePuLJuW3szynPvjDmzT1PKI61tX0KijgkXN+wx19g84tmz+sKUIhUbF/IYAynif4pREqkbKK0zlqv9yoe7Jn/tcyl4CkblKkFdXwMZKxDJ5GBeN/v+wD8CadJRWKYp1hswJOkrVEmV1YdHkOxBGuWry1AjASFg3TFD77sppiB29vuJCWODfFWIJpdzi2gt0zVYkoESBM80rOMO2PNs/QkTHS4Nycz0r3xwOkxEHvUMoyWK5NxQfSg4I1kgSe1Nt6bPp0Ojdqacuglv8AhtDIYiiXM3OR7VvZG+FtSuDo3i7IYxzYMpr/+pZ+NoGunsaWi8DQ07sKKJHi7QDWShq8PDKo1rMtum8hJvw+9QjdkNBVdVxeuAKGo5n17gwAFPkyavb+gTdZR5ce8a5HL+9Ti4qEwy3GtzJm+sMAitl+REllfMdwSAEotRO6dwo7Zn7lGsXrqm4PY3vOUy48N/rYdLKLvDNk3gsOrOJGLRvzUoKIHs2X3rGkaVljs9Xuri4dQ1KkRneoGJ01DJ+po9NXJhSazD/OWt7PaEUMhUMSSurAhfIutnCF9KhMWAx8sQaTg9MycmHw9NElz9gKS83uhV31+DyehmthQqonrtg7G/liWlkn74KMLK5jsYsZsQS33N08oLCAqLXiVVKnG5zf+Z3cDq0u0qT103cWV+oCV0OIftaRewxP1EvkggtGrENJ1lMfUy27slcz8y9yJLnA1nt3eLt2MayBYjUDwYOx+oFk8OhpLU5ubsjovPIiz0mO6ZzjTXbAt4SaLmhxWQtq4Zp1FSi+1QKRLRtXS0eXWAUYZU7Yu2Q8eWr/U5LAQsoSLrPGABH9yB6jedmEXqb6hKnIgoRsO0ztE+RRqOuLgOXMKus7aZG3zA6jbQi0u+x3AknwfMnlf2rYSWpMjWlYr0ahzv2tLvswiFDZNEZTyQGsD5iqeDNLI2VfSBXtuOE8gDpETXL8eqJrMAWoCwXK3etHC3DOvinVXCz5i0uLQT+GgxwC2FSt4T7q/SgCYhA3vDt67WOFsskv3mXC0G2+lS2DDmBYV3ndVteMGbkAlJ1C/2ZKWEfDDBGzfCawq0vMjLVNdL+BXHU1BR4HiG7t/IgItnIn3Ri0M6G7Y2g4NBEPxZp0sskvFD9BmOOyzhAZN96Z+QpuUPPCI6FpNn1gYN8ZMKbO6/iGnKXxeKjIS1Qw0sowEKwpE7vEHfoCZaOrZKEwHXiGCXaR8tcfYGphUz00JmHox2PdEFF13MK0SX2cQJuy99brDHBqU8rNczp08yeIC65VDMjhm+v6YA+HAtNAwVmWnQK3tuYrJPh6YVPaCTedErs+sVOKBGxjTihNCYKin6uLph75tJLltXNqfMIeYdgDNXW6HzVzpRpYxF7kIGXBLS2aWqR3RmzeM1GbgyTaIeL5bfy0AQDLxxxKpfzRdDJVymWsSJQoYN9UxpSTajlLAQ9FFixvkj56eyvlbmok/GAa6oHlfQMp9Ey0pZGeB3nO+HCvbFuW9ZY//7ytegPbhnnyXOw6lttCzxUvZPNN7EVUNyV+iTQyXMMb2kTp6/6lJaWc8eOTNGLytYXXBOWg7mnuPMqoQNeLHAUOCgHGMEj5gyL7g8N9FWjhdDOXMODEwfpnvmZY75lNyvh4HNN55d+3ijLz6lenT58ydQ5ZO3TpHtjuMM7PqJNGjQPOTxqruukz289g7l1HP7Ir9XRSei997pnXW66gkZka9IEMIDXOR6mSzJDTsN9cRK5nqfTT9aw1SyzoGzq9UxPb+K1zSdwhrRGBV9dTPQa/bgBRfXMhd9sK/+h91vWvqHnh7Sl979p6/c/rcZnzu7B/Rqd1n2fJwrxS/019jf0IcrcAh+p3XhEydSrWN/Vrq/FPcCnsvCUMLvON/Tjn7eonMASYAMQnLxeZ1CyrF8vMljMQJf+5mwe5XE5/KHEE5C9/rjosRbdJ1wHlvuFOA6MttECfcproUXa0FhbDt1sKpNIAM+uATfLKbkx/9oWsGmZ6yY0L+8w6ByrjeuyDiw/FKiCXQyXzka3ahWzEh143srPBDninhiAJE7iWvxW5uZBu4gBW6m9n+qyNLL7vzcynHQWyoecsM6XSGi74NIh7OZG5hosqE2347U52G/XaPNxFR9+ygzm6z+r/7sE5oDJ/1C2S4Mj3Qit0SETd2hEkrBuPeQV4wwCahqcY2qFzhb3L2j9iJbVgibANrGd7L0MDIUkIzOxBlqW6PIbepuTcVzPb5lIy5jCd1N0ulroLWJe92J0Gc0S+y4mgoR7jaFY2KYbU6sFECr6f3swjuH65gccuPkYhUj56BBEkT4KHqoUeTGFKLbb1+r+NCiDMqClWuNorwdLoTk4XzWgru55hee6YtH0c6YJTbqns+7kKLaQT98UpmyF3PUsDCYOOX4/DBBZ0cJo5BNMLvpxe0cPD+HhPvK8zcS26Ule2ISM/vRLR12//M8ucqPvJg4arh9S8gEGJWqaPWLqMKPFzO2nVTmFkuwJv7kZWBUnJVP/bO/celCY8Xf/8en0OCddDJ/9prwb1wS3nkgKQT86WNQH20Ki3/4driuyUYOsNjZBljnlq94YhJizzoXMMyIINCrI+v0o3cBtFgGscsxWekgpa7gNoebBqCjEobDhoArAWzPHXGzfeLSF9qld394DE5Nou/ivrKbopDR/RVRjnPX/sB11ya4LRnn0Bal6s4I6HWZwxEt0ammnTGDHl266iF1myIhj6RUMaUXEgmVpqJWNSGo7lyyxe+nyDlE/hxs2IQOhfbxG2U/+KTEMOnVtiZvxiHvXnbvJNDRaX5Bmuuh3NoxO1J7f+CHmN4DUrH5MlPEzoSxU6OKUcd3idJLcc3MDkxxa+7s/DqkbHqT7PeqK4r4JaKueOmGG0CStYcKxq/dzognG1KLDC6qKpvRdYI0P+JJN77HgdgBL3PF9m6Hu1UsiaD/pLJlnAY8RY3FOILTUumCaBPO7ninLUdOjl6RTogdnA3oQafNU83Mb/B2n3EQa8NtROPq+/Cg6bVRmeJAk9PXe/Fs3xPHIPDOVrKNIfyMmcrttuAQEt2C1/SynEcPA8Z2KdKfFGVoSCAEgg5A4jVP6rEC0DAz3fxNYg0uGrOTmTACqqotVB4llky7BtihViDDph+p7g0VoupwI5WcXGOns3b1NeTDV7QIC+y9MARVweBYMTN0UweDcttmEfRv3SyYU1/9jkMJmrBSF6qwrX26HodCIAtzcKm/Jb9N+x2qrW6dSG/iCTYYXG1KPk8OAmVkoJqQTkrTqvpxQMWgUEgMREIOT/aQPQd63N9EmvMx9oOlgcVeGyS6pTQv4/5Kre8L3TKDPmDFHBG2LK2jRpnFVT+gMOirWzOda8ZRo2SRXWqgUtLNtaLe4lhINorqQMoptEwl0WL7pLD+dgfcYva4E0LvFd7z4C5ZSV6wIfTJCBpCHQWbeHobTSZHSgFDl6sOsogWxD49mhKZVYHELseoXCcHKXNrYCRyv3lvteTgNHNiyqE0vPPaXdinu2LhoWS8dEqJ711hdmhaeeFsS/TJYzl4Jkj8tii+OC0XCGW1Qshmv/XK0gr/dyzFAqJgwW6mLc05VrFe/5RDQ7l2kWIBjlrzk16PfWcpzy1aN87o83DonVevZwv3mdeyDWYavXNqlBCd5Fq8q98zWsirUITPb57t0IYayoPsI2Cm/vPwsiZKYahp0qi5c32/qV3yUGBZT+TtVHG7GGIDc39lUbmR1Jn4TkmccnY5mQptIa4cZErGY3N44jV4JxmTRmWqnVZpHG0zL6HOAFfYZOqS4gudKBy5vZ01DkQYxDjCCuBCL7p+vcTQbvhgV0gLqb1IQ+vdRIxuvrKFY9MMoMdujz6x32MCgCx7bd6HbBAYuhlBtVElUxI7XD0nI5r7jFTJ7TMHBGXROoTnrbqq7QLv1zewaM93XQx1qEGy8qGo1dCfKVOiwxh7daNhK9aqdp1rYudyrzPqH5fnCXBz5yFJwjdGT5vz6whqUFLUzD+caIPBQ0opHHYyVeqgQyYw4ciFaxohOSshJQS1Bh9DgxIUZSLh4GEbVo+1bWcWzZXlUypNWWFf2AfCQnEx+OrQ1p/Z/KxT2Ds/r2RBea36v4OJvIphA/MgZBjFSjC8Wq44Vot3reO951YOpN6+NT2hBsUFy35OWukK2rOxEKO0Up4+VV9rJUJLqgUoRZxtfc3O4dFD/DW/I8Uwkfn5/f+CeuXmSYEuhzVQnvOjzupWwiN05VyLCTpEaRpY6sjXDFZoNWTkZEROxGGncaB3Pr+nQ9dygjfKaC/xvNMxXUuM9hrPu++YrpXumxsazfDemT2vcpjoxMZRTCLScsxoq0Nc6te8iqGTa4K6qDXonjhWHZn9TnnNG6a86X1bM7WCyKc8HDAZPSN8//XQE0ubeXW1a9BtemmFFaKZWh2HW+C31pnqwJTPVLPp5pgqrElKry+ooxPWcPDhJmsd7mWWWzHVBd3sVl5dSjLLoeY8vFemO3qeIyk3HlpMh6HbX9YBbHrA4EagUoc9cjiqDh7EryPFbhqXzK6zVkRucmrI8JlY/IR9R2luLlZeHQkeVA5WD7Aa+aYWft8dfld7xJlb06iF4Z4kxYGpmfHma+f1j7RyYfe+umul1U9GebnIpzaHm5H2NYtqIisOZQgOPOfdyOzn/rmyC/Y+ZQE/rRD6J02Jo0UsQbfCZ+SAK8MP19GaCDu29ANT+JMobzociFHpOx20ZIJJREx097aqDa2OXKuTcct99nKqjxie0MkAk0bDXVfi+fBr70m4hxaoTmPvTPW+K70bjdwayVqwBJX52l6MkWtbziyWG28D1izhvW8tDrpg4unXXbVHF7eZ105u4eFwM3W05Q7LFuJwrnkXu8UzWHX7+bVFDF12mt/jP/yoCEfB3eXeCHQrr1WK3aSqAxmI0t6xxpcWbjzRNS/sQtgNlOF9UqDZ1eaA6gP0qi8frIjB1CIWeTM8TCVcHIaT9Z5kLQlYPsC1AJ4fSlYEJW7fK9fNVQbZvVeLGRnc+6JULNPyBHaTJJVdQsXlrHCaXDK3uBRXEGrmewrMHhuWrIaLaDgakEFQ98tak2aTIjYa8DBC2fNmAhusS9NO0K0rZTsGXV7k+jZofgb1VE6txv3D8/u3s3tK4g1388apI1PookcY2Ek4qnRG9wT8DWRus9nV4O9ivUa7N8xu6wcsMPJJnLV0EUozEdynsGmUFpRO9Xmwy6lDeH/RpP6RvubwNHNpzf0vNDAg1zAVWjfKznXYT4uQ3sUTODkyYiYR5swkOvoCozysJbvq96WilFc67KjtpvINqnQQCDdBCqbn4EzbVbBEi3VjljK30G9dFj9q1xzBwO54NyOOwJFHDLtchBWRSXNgOKpNWhXno7kvedSQ5I9k+m5t4wO/z7O8lqQzVwU+GCq/4No2/DoYtSKgyt9824/+NBH3d3/fEmHJyg3u13ZhD56jSB5u3xN/hBsMnlZkHYIsCRlp5etU+wisWiJEeP8/0osH3Bt54B0RyFkBdqTAKwKgSQy4mBS48wY4TQF3JAA5egBVCvAvKbDRBqhGAXDlAJh2gB8Y4AcyoPc1ICIAvZIHjogAdQLwDxmwMQN2nQBuJhX49g2QWgw4Ig8oiL5JV2wMAAQAMAAQACBIGsZYMjAeyAZG/x4gRAeqSxjDWjm4x5K3Y/Mv+66f3w5vn9k3nmWMmqwbv+68Z5f1eG4f1Tn79N//H4bzt3p4cu4ORYd39zLILQ/9lFRyiyGdlrXeydOvqyOYoRGPYEWueAZ3KIBX8IDCYwEHKA1fcECl+IbLUQM/4RrUHhu4HtuI33AeO8UPTiEMRBCFeWJIQGOUQiJapSVkwR5UQDbYe1J4Roi0gVcclF7gOxxBb/ADjp46+AAnox1SoFM6IM1xC/pC2uDW04C0x9nojNTjTuk/0hA96IpM0HsKkNW4N7pHZvir9IhsxANogWwOjE8pdh7XGh2wNWzgvrDtcW3RGduI3z76j+2Kf3Axqh6/LEqwU/yxaMJuxA+LM0SDK/ABRY5XdT+SC801umCX44cPrqgZb+YeUYYQXi8oA3yB9yhGfHr3O1G5aqqMwxdwqueMiRd2apiMoRf2aiiM4ReWaviM0Rc4quuMJhccqvuMDBdQaoiN8RdmauiMmRdOasSMARcealSMGRcOahSMkRcmaqiMwRfuarSMEhcYatKMLRcZasqMDRe9DE0xw2MzDGHwz2PBjOHwa2MlDDHwJ2NUypHwp2OsyiR0h8OOyuR0Cme0ClgwHqc9DDt058Nkykw1R8M1asYwDMMwjJo8LFociUNe1yiokzsLuVPZtLQ+Zhb2RGaFqYp9nXlYq94XlUuucMUpSKnFGvQhXA5TR4zJet7Pg4uAg+Q454t+is5unizhFI2z9+LSsLlkBOCXeSjBsO3LbWnx4doeAsUtGeISQKTTpDGO3UAXjOunOZP2yQ/ZwB2YnIIqioSRccuuvuWyiRl+RVR1UOXwtlAHrHDTrYN6GRloa70tpx8ek65y8DGRCMTjt/b3udl8ZfjXbkGZ8PmmdintO8+nar8w6CgxQVkr4iQ7NxlDJyf0r25GUxoR/wOMd95VF6MsnNKIMamnvJEMhHO9Lfre+isamdjtciSztgdP58qyqJxIRXmwN8d7E7c4cAWobS6+Mhkh7BYPN079sN4vyzHIFe/jDANYGzNFktpYWH6Y2UyddgQpI/IOgyMWCZBDQXTuySQestk8aRJajKR/ZBpIuXyNjHuzNNNxIeXYdJGnhxQ6RF54xN4BHfopV/3Xcgl3nodmYm1SEPaOvumTkSR9foG7p79CjP71WM+RsCCNKLTHY9nQeeG7q31D8GluRDTyoDleppApKYE6l8RnxQcJrhWUzG/WPrNtX/yYqP++bMhtjqPMAsvo95I0vDGMZ9TGBxbeCjMH6T7VuCRb/D1X8dqriDd0k8k9p9bH71vQBzyxfO5cFvzRJwEf18E0Ddmf526BU/N3Z2Nl0C5CVlCHxEaj0AqF6nkIzZsXxavoCWAJFuIF9rSefNZm2bzo/0MOc02pw1ddDLO9c1PSrmrYlKn1nFIuVCNU4KFIWQjHyV3rKRIxhZ8eUlVIGYIjEu0DGHf7XbgSSMsb3gnPOQ3gQD3f94Lz917EcEI8idX18JkCAy21r6Enlqh/KAS+KXm7KVxR6wlJp0/r5q4qSHeYiPDk3AYiXmMgibwITmHfiUmHKFItJIZfRjwBWZAZByLr0mJOt8Z7+Rcg2NR6Y1wYP2PKHHnhYqN0+peIYY6R1xEsHIl8w7hB2uNuB/zNIF1zRgE6Z3JViPeU+4nnh6EO05d/CfAVC7LC3GqezDZIXPqAZkPubVTIRWvGDBNx3g6Eorxh2IbtR3xgEpkYE0Z6Q4MAcBWhJP1SSgnmHO17EJZy2Om/gG6XmUTwSAua4k6w0Zfh9CWih0SjLlkk+LgoncxuPAhq1pNOMXOwQg1UaUKv+6RjEEjvSRDbOZtiWLM15KmYMOzxrEByx2PgiZfOK1hEicmaLjKqCFpe9rBuUEYW0ZmnaXPXjs2QCF/6+/6tb0qQ6Azax1CvIBk8sq3Rd7Jz17E8A2n662ZBKhMgkL757FmbWlhXoU7PiGdc1ZT9l7C2L6PkmYgEGkJtJonppyZZvGHk6BNmfUVxusMwDMOyhnukzaQgxmH6uncIb/QObu2idZLN0ZjL4yDanLKnnzCRzylphAUSHz1D2PPaZ/aa9UbAfYfw80EVk1K6wqFqAmenAW0+tNx5TkW3YVldeutPp1/HZ9oU4+LxbDglEqYkm8PlCLich7rr5jaWcVxPsvQeIE2BTulV/LibS+cJY2WEM/Y07q7NSVyt95a3hC3PkG0vqmJXDxJ9b7lt+wiXBE5LLtcbGUKvYMwi+3S8VkVnWsccnFPoud8Uwm0g7gl8LIxKReg+pM+OGeij1LeR56G8yxy5sjJAC1M5APnVIrmZLYPj6DaMlgV8tk3jFGoBntVutXuLjv9s6NiIPjo/rox3RFKcPqVLsujnTLKEo8CR/ILwBGdNzzlg94OKC98atH60PVFF3EFEvWuojPEWHi/HbnJ2wERqASCioYiQVxsei2ZHNoYo40NEQ8glMI25W/HL0L23pGrrwB0WMKJ+cpXiGU3AocDVrUCnEatYYjv0ogNmBd8MEyT0dlbuYBRP0ZVSqjPrIeJgkED0+yqw2ApoEicb6282YP5sOyAMe0U6tus/fJazebU61xW13Y7BM+ZNDG1HC1IAQFo0rXSk8qn7hJrBnJ2ccyFYaxWB2l/fIy3TE9UHnULcqGggsckfvyrwuVGgVmUH9pHmo37CXDb+eip3mtvg1gU8zarFwzveerZ14nQV2AO1M65LEOiShCkrt13p98E0Tx7rmV1eGPef6UyWOMQnNyTGievEpQtkfPJJT0jxdLixVZz8ZjfNicX8gtdlslrUoec0F5fK/6AwRjGEV59T+upSZhhAaLjkCo23ITgn2QgdCi14aHORNuGYtkBh1GqOpKMIqfP1ZqprU6DiRO7KSJ+N/DaRjMf9216lMM1bglFM71YJqwQEnbKGpUIwgf0l5NSRgtCRIhVuNbRVQiq35ZlpUan10kgRt6cavSHit+2oAtHlJ999S3g62rXUbqb6mCms1a5HZZ+DcgPZ9FVJa/G//AX5tMTHs2MngwWkgVKiJNUygcgYOA4bv2Tv12MDSBABBlhNWLOb+yzzPNpnURHVyS/BHaGeecrDYnfBfoO3dTBLE7TERwp+C13gsVc8TGQ9GZ769Ts2vp1+UpGZ3+1ibKNNetgAD5fXB780mfQYEU38b0sbC8aovldn9NC4op3ilxS/W6Az2K/IVOXlekEHyRiJtE0g3lAuDcprCNpDJ1i/rBpE6LeVfYY+G52B487RfuWhyqpoDTqqR5J8yJxZWzYa03+d2iQTss4OvVFGAcAJydH2Zn0K0grTxixNnYlv8AFcNdkHYGM9808JH1SiBOH8jM4Rtd2sPgKUC+2r7FuFJqWLy/tnfrXWoB6wm3VaVQ2GYRjGRHaOicigZcz0HhIO1RqVBW7ue7KvViChdVObzVtXeAjbogUbXqKYr+Q7dN4BcBc1wO2EY5ZzbhbahR+90IxJFmoVZ6GqJJRac+FG3h5df688qaDsOYFdsiM4c2eF6gMPxHtUIJLpcCXXlw1HFBdK9IwHbF2i35vIr1xCBFjJ9/tYVyi8X3XVImeM+xeKHByQwzzJHhXi4d8xYBQ6YsvKnGh65gvkoReYyYbEVDww/KUfToOHmG7fOCFW8C0Hj45hxYBVrbGX/Go1+qDJF+BOQY954ZRuqpJGrI1B8Tqo4pYa3ZfdnXhKiwXN7npIloXzQbNfV3eF9ohz6Yv7UZCssOs5kqUrcvF/8u2iaJ9xQIvdh0WkuIW4FKIcjkMMVUbAJ1O9fhOFj6E2OWhqlT2mSo3KmXttYVwCLHapIpnrlj7ZlfETvRcsByYHmPCj3cM9Al40a1TPiBrYss2Vk4E76akwj8xp+842KOQ1Mf14neL+jtzhaowuXeAVk4rmo0tFJITQC7A6gE68TEUNlOzKpw5VFgjvhFdjLZHET7vsFbKbYlKZHnDGLYShGVzBOGC2gBvb4fbJC5SyV04+kPo3lArOgAPglsOYKzM0XH4GEC1y61cPa10qMEPgHuXFqtQMhY0mwvIzHae3JdzEVOU9dmxSC9/Mjegq94AzxpjcrCS0qOuGPIAXO6exAMzi+P8po2cA/9X5sgKRj/fBEpaJfKgGt/Bo3vQjqd2GrPxOB3q/s9C167OdIOEiq/bnYm36pXUXVPFjBImVHWNQZ5kMaE8CsU/uVUm8zPACEPqVsU2wofRP7RGBYsXYvaXAcWZo+W6drrewr2sThEAkEWEMCQv0Ohurq//MqgwAIX+4KAl9Ju0a6mQYrULdpP+El+6YRLIV3vOEpD326NCQ1dwO4m1rMcWtsHI/dpcBankgPoJ5VH+h7vbsHUZGMmojHGsO+L/wITQOoqSQYGVm02jzw0Gxp3apomaFcgX9ix3ZNV4+T3JTOm3m/xCcOXBm3AC3BN9z4HBOw5VUsyZ/5olp3M4gVeuP6LTwTfhmtHv5VkF7j4lqbA1s1kQcWKEhJ6NEeEJ0CjbSSE8aqq3M4CO1d8raw94KcxArS267B76yUfachjS9GQahy4P+XTolVTf+fvCkubJHXngXOT82ND6/lawpSAJCYiuZj/Ww3XgH26b46rjlb9xpu8dr1U6qsq7Le5V1f7UjJ6Dx3ZBDUuR3/cDdIFIkjKPB8QF0LZwV0Pjf+bd2u2FqwZH43nSnoDwYfwfp1InZ0TP1TzSUN2/z6K+XJ82Oh6Y3jCL9kJoOJs/aYyOCvVP/4qViyCeoOwmqAZjiLtcq2sHTVDuAYRiGkcL51jv9Y6aN3ABYGKs4WaiPMjEUFeldzQcMxtFb032xGyiljvzmjii2feT40sHIO9Eku5W/pMEmbNk+UulYCnnC0OJFVCkHmGVsF3Tlke96z2o5hQylmIMpp0izkIFzdINE8FDyJBxwPpmk/erlf1euR61u53jpMEa5ahpg/7UmojpL0sPZenaHUaov81tHf96aVOuOEiexxevRLiW5nSII4/cyGgD7/6nFQCtJsPwNQ8skaNCEDNjlhmVsqZudMrx6hJrN4NTMh6sKu9PbiwGfs2O5zkoGdnsGlGALUilYI4u+L4JkXIGm/pZZCmr6mp/GcTSvBx2HBlpl4+FEVM/EJpr+XJnSKG10ScbtR5OAJ3pttwS5JgkhMQwJmrkm3otYttL00mbINqnQV+L5OeWmkfGV70C9Qk6lpKyOf1D9Rbq3+GaZlURj0fuUTtvbVXkQAl8gkPJL+ussHKD5crDFYSLkvYBn1SyFZ5/sMC+2psvZSkUM6m5IRgifAYvJQjJnWizF/gCXDDtfC93G1Z2XBRsDJVpT3nebyvIDxXWgy5vLnYp4gh2u4OrjgvqnKLHbN4ansec8rEoxwb/GRm78xWxfRBrkiRW7m2fdhNFhH/k2FngwFLBxsnqkwDp9b/PbfHPMURRh9grg5pya8ES2EqN0p+umo7Yn5g3yHOczPuUxzFkgEOpQqn4w5wQyaUappfeZ8HWlWQirML2K9LoXCCUnhvOh6Zc34jeeS4AZn7Bffnq2beBz7Td4tUTmLiQJZIJbC4bYG0h4B01E6bVISAmt0rF9YtF0/Fj9q11M33DlMRgWLLUxHcRNFomIRgdcG4B4XOmSuoUed4I0g+CmPmu4P8UOBy4EHWmvb2bOBCa4OkeJkTpKyX+H6OW6U+go+dkWKJOOVfFt2pHpdurkcSTQnAhoDw2VY6PMn5US/GZrdSWpIksIKwr7Lwdn2Dpg51ZJcAdMQbOxkcPpPRuHidcG3Ei7WG+1oJaOytzYxZOEJAZ8r1EjbkWRvXCznCyVKghTHfyCEGQsFUVcBwdIXk0W2YUpIMFouu71vae/10SRQxSLYgc5C772HqXcNg9ExRx4SrUosLFLXyc85HIKvbpKmMWyl4sI831L77N1gyZNCOLJfoymSSyyXOUGe20kFm86qkZ14vGQ8gSpmV4h8clgOwj9PYDVRDHMicEo9nI+3y21nP+IPHYhp5MycrZ3qeRBbv2/iafFo8v1e62Pn6s91mTu8mepbiQG3KYCT4dQZQ5k4dMrcD9rfdOpPdQX6bEDnajDy2d/tDmWZaYQ37u3ggZzebHrcE/FPlVeD8cLoGwEAaZpiEpXJqbnl3eLl70gyceJchIRbRiGYViU/bmIbPBWVwc1yBc5L5y0kHaoB8Z+9aMqVaOePy/FmOfi0vwGroZqpPW0YybE507giD8aJ5uTxTlQToXyfNkh25mGui28CDrhHuRKOvegjtASt24I1kO3oS3xfc1jEtMhI3oGjHS8Wtbxq56ACV87edIDqRfbjiyTY1DYQrDmE95VEMkwEeRhmcq36B8eN0WdNV0ZahARzALtjA74zHrC2bUkNy/yK7PjtkfGONsDg+I6luMCblB7owU68vLU0F4XbRjiM0u30UEncZP2tD043G1BoQBiTCfszgJbYzRsHimxY3qJN6fvcO5V5ei7VsokEx73iYRyI9NJ92EEFSSHKc+XXXtP0f8bdpmtyCWxczLkdIH6BryR0PvAQBv5yxSbsQdobKalvwRrYlswaxCSkyBaT8UhV+spgR82Q06XZt8jiTQx9fmEmhTGnx5prKCcyUKyaXroqQP7FcvxMGdJ2Os1UC7gsnbqSloqOaESfeNMDyYDQv8foF0jDZAvCRviDGVrIR+mND3SsFQNJULWLrsQJnz3lVAfvn3SQeY1690Lqb8zxVoJmTgMEhxBj42fedFxa1YjpiGzNkaHjQVKfVR22zFdYHmV5zgpRDpY9di7zS08PR31n+RhhskKcLAVKabkMLlEOep40930+KFY3x2zySs65m/mmr6TzDteUTfwIBP+vBSj1GVvuvbQ/acSmqubNY9JjO1CXOw+eQ5vlByyZeAz63EiG7cFZ0ibww0VxCGN0bC5dqrqqBl7gDyZ02xPbuQvkwbpuUk0GcuykcL5e8fDnMUre/IZPJ28XGZfAxgHmdcspWHttyccRQDrvkBcJCvAQbinP+MI3uxlKSW/7vvkOVwbBVUPys+ZyjIqRArwdPLS0xXEoxtTo7eba0/DbEyNHgShmvg6sUB+k+o69MWrv2gpy3H8Q6S/19+peZcxp7UmmPrOPB6jxCOSgwpeaZToCgNR7UDx6i/g2CFd4y4oWtXiQBbhOWtwJWPn/4UDSQk9oolLTs/h2fkjAZWpjLZ+ZtRzeLaCc7KVeDDERhU5xQKREQt/zGmtuTSsal3y8K6L15PcUnWvX+8RzjGzNv/7fO4c/SQJovWAKv3OmfryAJsaWcyrybHorzNcHc/7kOtW0ku5xBolunKQR9YnbRLyawWWgz87TUkQkLQri8QdpI0oN2hxYfauMgsTunCLVczAcqz/2cpZnWQnYXPUi0bZ9/GrpK8LilYNKdMMFfiaQxIU4e9ny78roAlyPkL5OGjQ0GOQyBGaan/tOjQtIC3f4DxoITIC4hPxLwtpgP0q/IYeiSrwQFJCP9zW29syYw9Om6h91B397mueV4g2auJa+iCOxCCGYRhGco/X8RUkWZRdD20+lX6RlWGUFsIo2irJh6Topfq0a3r32U8VuMClu2wIKa7jDCjjzHDATwleTwh5Yn/bOxQvCO1UeVmMlnk/qWh26IgLhgP0rJJAVjsYfiK+VTNOwtt0erhV0pkTOK/VPK4Mn8syrukjZ//A8cH6DxdrXiNB1TRjfuN+JVVJ0Ym1ROSXsKSJsG0Hp+yq+fzszdUxDpFrG1xpBs47o56lnHk684c3I7BGG/ebewpVZ5XCktDa7fP/HSH5uOAnlGUoQ4Ln+vafJpT32TNVVkV4YKrGZKv6tg+rVRBCzeaq8r+P/NufQmcwu1rlHmRRQuEE7RaKkkebrcauHnzO/xAZgZ3iXZ2wayST309m7eylISkMKRRsMQJTyW/IZ7ZoxSi/T3MrNh6GC+LYCNoLmDS9BET/2zcyDr1+kZ6wUyAQXqWIX+sKxlvnpb5VqP4WOHUYOC6/mPQ4lk4cb2Em1OMqCgBhfhNLGBtb9IxU5dsH6aHeSc8RjK65ttIMH2ud80IL4tuBSaVreTttBpR/pZ5W23860aABywDIpvlrfjVeTxL1mH7V47wdEo2QuE/A7eYujctF/2mc7pPB04uSc0+IZZXNDE7/tlXPU5C/iuKqkFiA/OUZv20AvFHupmNniH/jIxi6wRYEWDeXJa/3Fl1s08rR8i9+Eo5u0gaNy2Pic7nrw9qahGHbvd2Dn9DUpmDoaQiDghfPAVZWeXQ2l3kWkUu2UQsEhipLrxJpdTj4zZ+8VdzXSZYsEzcygkyUUNssN+dU9GBbbh6kHqlZ+pDCbpgCzaeEYbWm+UW/I/BuUDU4oOWSNoLotUAoBEBuxX0ESfNwbyqKyIuiz7rCKPG99fZbO9a9qlC/ELoDEKE8B0RJ6/AJ7fO2pgJ5Ef0llEyTQoKO2fUfd6/12ZynVcVZS4FGvXxx33saXUEcvm3DOsAFjC9miy5qytUMzLWa84m95VtQZ5/h4ANZrIVKG5KP/UW0gNoHy103o3m4GjLenMzymGsdOEsLuuSJB2fjLP4GxjxzxxPP2zNTea5ykXml1U+2c4w+DpLMkh7ZcAPw5RqLX8LZx1uuHr2VgUMcOFgjOS628XtRxpueFomf36pya6MItcKAYW/K/8UMRtRuKnA8oFsVC6g4sFcgnO9YT8PuZ/9ATxBN+L3ogpX6AjqX3fgUEnGAAl45LbQzFzo3n0N/4ShC6PNFeQZ5c5ohlBbXRoJ93EocVwDK06JhcM/pTpZNBXX9tvgdUpD2sdEqOeeJ/IzgtiSaz2oH5DKt+SlwyFWgMJnhimZdrFsOHxIyAmG0Ot8xI2OxHneIHA3VBEvFkjOMy4h3cbWaUhbw5y1fteGPYRiGYWQu6wu+Nd2LGTIgcgp1Mx/po/SdVWDqaEJbXAJLgCWMpwWyf5C4LRMjrMCWc3c8ZfpkgA1t7iCNg1/Zdp9pQZkSN0ZZJlRUbvFG9oZcf65DuuB2ersdpZQ0y2NY6OPvY210sin38LJJkE1jcAMqap0A7Jle3DuraksCFyDMD/LGSqgD/m91WDCCAzvchvzL5gmpZQ7q4VfPdlQn9dNKhBt0luHbLHfe7J1kc4Q3JAHBUOPw+HU/A5ZfcORSle5x4KK1J4qZ/GVA7eD888zOD55u4UpSsbJU6YSBeoOt0p1FI/dK/mYqEZLFbtZXj4WC5r6/PIyLLrpM/6tuj7msL9g3smq0ePF0n/XCwEd0+IKiHEVbQ52YilUYTk+xXuaikmn9HGiad5qoNuhOiSem1FzMaQm/arzmm2/pu+Hj5ognj3GhRSgMOPELWPk1IyMpl1aUFM/FCBUuqw3MjLS7fto+dwMw8a49nfj1CcVxPesglTwaQ5KOIdcZkybTM2tdEYZwcUs9eGnLW0W4CsAqAV9LZmbdFzd0GJAEzULpVbfEffJnh0LGv1Oh6XzWKaDPFRg7kk3Oq5LNwS5DDv03E9nQeNQ5RCPKHjMUbkYfMDgj6LftgllKtFT3nskwuhScEI8moOk0vRAp0jz5CNF98N1Cqw8+nmgxGoFTR9DWdC8Gs8v3BLT3B8i+IOIU778/tmoAie7xkooPlxeTrajJ6/sL5xib1LQqvOCGluRFB/SBqBAFAq7dXPYtCz8adC2Msn8XOmpgRL4KhANpq5dM4H4SkZccSEnBudtin+OfsUaFOjS+BjnB9vLXHQuXZkZ2tGNdI2y0T2OCHkDwL0csjvjk6ssYuT+VPw8A++eqqCMKJa26o1FYKfXChO4RRg4DMbM1vcCApDsE9pCvKdluZXx3MbAfAtInO5VBGlExvjHnVb9ApfJMm0lZElj1GcvoN6zQtm8WwrTuYeVlLE5zJ3qtp6L6M0byPQZdqGXDZUroT9LToUAGeneL2YdzyIDIUdkTlCtPg5RrAIZFSrPJXr84Fb199PSNWv2mMaL6Nmpsnonzxx9MwLMSmKd20sNbVUeUda3cyE9tum23ThDV9DnDgym5b9A2XkC7QyatUdGjg3QUwQbBSzTl7Sy4wdehU9SE3wFG3vmpKf8MSjz/Fz0xyNK+fYvEWvKwaJAVO7lm4zhiJNaJNYh9lUh8v06s1qhVGK4zJx5v11BJzeq3cbvJAI57lcOw3CLUhqQG4GZh8VeeIkwKzLbuXqvyLNwf0h1JqirE0SrgxHSzy3UTeb6fJwXyCAY72Tk9DSMS/tTlakMlJlufbjJ8SJ/3Ngnt1KECUywwGsnsXhuIdBiGYTjslspxa2alezV4Bwg7WcvYpMnH6MI9pr8S/jv8zZ2FRCHFx9b7rtgBfiSZCfboJvsvO/sSM2nUgNteEFNHCqua0IIHuBdnfl1ut9sXRW/mk2p5JWC4aFr+L/BbXaOzRplTcJvQJjwUwIoEUrHsB5p5mS3b2XlJ0R6lGmp14bXbbjvrEw/NQir4WAoG4mjHkqaYFCmjDecH69FxxMqxXWoX9ffmNADbh1fU+BqQuBynHdWkBEyGLBsTECUws5kd0ERlxEEh6pRHu7QqhGjQi0DkBQNloENmReETNJjETEHHX6D+/K56bHtR/VZw0XfafQfO8VhKsD172FqFsKtcFBtaIRow4oVy0DN1qWtflHM7235tYwCgfuqWPcymFYmmMPVJZiuU1iYsCp4Q/X/3uSoxiwkGk29df0i+hNr9eyB7ma03M8qWNYoVybvGF9x7smMKAgGHnFKnHvPMfA/r8krzbEJG07yHpJKCV7q+Ka9H6scZRqkdmjt3QLCa5o+Jfix+Wtlp8Ciw4yY/shDIldiAtxvZcHFzM4GJKgACXPWaFJLrUsZgesuZ5MqHmeXH02/V58AKASfGkEwRh8PdJrgCwmwZKAQkY0u8a+gPFyH0g/820HArtGmGvrpEzQi9k43IBKiIVVQG0B9AxaGWcKlhCkPMSe4GJqg/1nmNDCfEguMRCnE5kIkHBJ4UjhRSif6shRL14/UVRzmrX5SFTMlgEGX8SPQhHCki4gVixSi1eak/vxKzPQTl9Lcpz2EGyxwRVHJWhIZVJ0DYH5ZPmfO/WzF3XJv9nptmpIeysn3Vo1hl70gPdGQmG8Rprasobl9b60c8ToFoBIKeqlUzeweRLksgscoOSvAa3fP9aSYrmDeY8MuFEfIU0p/2VvraqJYngTNbOXM8Sddtbhz5zjL6fmQW0RAo75OQY0ZfjlVr2mJLqwG0whKXp+q2VBUHKaBt14nFYgfXXbix7OzknVI4kRdWkHV7v0mtoKY8JPxXuahkHGp696fVdcXql1OTbHKLSsYeFZypj3zMqXpcR0fXSY3KFI3pKPd9OjPwAGeJcE5xSZ6UYvBuCd6V/b88yAlB/fN8XLXU9LALHJkp1Kd0vvoKVN6ycBUIc1N085PrbyXNyw7eBlLxwixWsexmiNM8thPfq8YUrMXQXtujaZIW5vHWNTL931bAJGYbnmnWkkheHWGqXKb/NfYLHocSnsB2yQWNep0KF66lSgT/Q+Z4Qwvr2GduCR+2PhJ81mag4ZxwqgJk4RO9b183X9JJ3AoAmWVOl8BKV33/CgaVVeTnyPtpzAzTi4/88LDKoRXAs88r8tAVAXM+iCp3Pb5nc2mIN3Ai3cd2NDES5Jh+ORCAD+uAbMgAfiMFjr4GyiEGAALQaA1UYQzQo1OiYNuD/zUKE0ewmLRJyJqUfJc6RGFEYcTMaW/62IelM3jk/tb4xIiw5p8YRaTBh9d87czw4WVHPjSUARi/39We519ee9Khsj7gFQaER1+7V+z71ngrZf/2iTG0bP+RcR3f+Q3mHkr411UGW8M8fUx5I85Iy4+zqC8PKDrJBAuMNWwW6ywNlDE2jCsUglAsBKFgS+hKhiFwjBAKt8ZDfwYc7jUHIK52PWAC8BcGOA8gRwKwFgrvMM2xdYcnEy4xLCvSfeLFHaDQFQzKgYj/mv+FuDy6sy16L3ycnbIUuhjQshtmqmI0S+OIvFRJuPKktysK45A5TqEonVyjcuuoLq59yuLQ0K6U+IfL325ti6MnPk6tLN35M7Apr+1Vxad3aZxMXvpwxZVvPNgVNKIOkTiqKO1tULkU1MU/iywOD51KmW9cPjrbFree+ThbZenBj4FQjjWq4smrNI6Xl5795Mq37u2K2uOQJE6pKF38RuWtk7r440sW9++yIgfl5NRtgAmT5xw+ByttYD2SLOriv0pZ96eXJXIzsBSlcehSWmlViRS47uxNWjq7ti3upPK6d0c+kUuu+O1TWfrnr10iNNDXpYYhfSn1T1W8yBV1P26lifxCxcmTvPTqoE6k5urufchK9/7YFTtu6E3dk/2Qm0T+4+LLu6L06842kQzV7bzwpZ0rdXFhp6ybeZAlcrFtdd942tYu+i5DygoTuJLHQ2IxO6NSYysrRHOwMj1frxL3b1G9SM62DebleeTlG9vaOzxKuoQNhQlcyeMhsZidUamxlQ3IhouwZdHz9SpxX6xyZVOvd/ps79FpXXVu9N1rG+nOmlOMs56qxzm/LelZkCDY0eBVG53XX48b+DHntxyzQ9LH+9pvmmqSl82sfLqd2DddsC8zgW2jEVg+BWSzffu//w1HKPk+cVaXhDWKVLawENZsLsDHUaq867kt1ImZog8HbXQcZDol1mRnO0gGIZN1OjN36ss1SMq/DeWHSZus9X0s0hjsGuPevX3uOqLriO8iqfz2KN+16+ScgSM6aqrBmlGoDoTIgqh6S0E5LPbVEYzZRhj0RKjYxJHIVk6PgJImuO7H5uDzCzEHjRLrYGXHMP2z0VACCZxHA8rhPe5AIXTFO1yO7UoRfI7O0x+0wuyjCNsKiAwAAGAgQCLSOCWDtUwikeXDK/Y0x6PEDrDSgSrAXqOZRUOoC8Z9hKw5WZod4G4JXbOHCtYaZXdawGRk5diahhykagFLZGRMcbhaCuTChCAuhB1lIwU7fYiGFLgF9UgNZ9CI1KMHeWSK+xLRxhhrMeM+MtuBt21swQnNKuyR3ORxdnj87Grpy9hW1MCgUsfKDyynOSR2OwDaA+AIAF4AQuYM4AQAogHIh4kdGYfVID96afWJRzsqXo0jz0qHpesV4NQ4InMWFBu6soDPrGEW2N8bmALjwZHxg9BYN7m+0o3HnqaRBOqbnUGsK9weTuncwJnIMTAtj3eMvOlk863/jvsNPDTyS3Nbstg38XNk+38JrHL77kzUEG8IvB4+I8TkWj86DI1tlWs0mp43zBbL151AQe7R9tiqgjbH2cggKJvZMgu1l0tvdD7vOro/9N40S36Ls0jjO2edZDTWDy/Lac4FbDvJN2lD5m6W+1stXf1X9t1b59HmMmTSAniWM+CInkPoelSKKQIsj06BHQojqHPTeGuqUyTwr39Vryqb3rGjmWSzpeyww6Rt7/vo0LAs1W47mcBXczWIO6jgVAeB7fi23Fp0pwavvVOBxzqVKHrjcpubsuLg8txney+XxMl99mgqUc1rpD16o5eo0b/e2pmczxL1vdlX2wDekjuqu/sVAgX6l+eeN88Tr8t/Xi/tP9Nnx5vnkxf6SCL0219zkyf+cs50ASx4h+GPY08B/OFVgGE3AA4AsmdMc/QqgXGfAyZjLwEsVxBJQ6WpvwAHt54B+xlAk1Gq70lZGPQR4L2l5oW9U11G0CcH63xAsZJZYZ/3AWfaJ2Kzxr4CI/kSKdcH1aVls4aof83gOWxw6kXN+xJMnZhdNrBLgJJr3PVPhflmgiRMlws19zF+NHe5YYK8M/MsmI31k5n50TFAyqdGfwsoXkU4e56E6Cqn+tVGn4eTz5HKk1HT6k+9PG+KvmDklq5Gqs6ZColPPlu6Gqlg9S7IhZekoKvRepHf9xnJ83fy+bTqEvGq1Y8x7yeff9ZVxxtzz1uyOvm83svzutWTLj+8Pim/CnnhaifM7OTzywvzxLv5b88A8DCcSJl4rRvQGKNBQ1oj1ZqIaq+Ke9IYguBAFsMkUMXIEEhIvFOcSVm9vRAEHoCRCrx/ITR5nCjuSLljqzNQH8OaZ3phZFpTQ/AEdKTsYoR/IB9jj5Y0QGo1RdTEXgmkgBgKEDdsOCuVUsBssCZSctv+BrhZpl+xwTqcAQHrqI5PNuIYHo9WcU30N1RgTcJ/KmLKm0D2bXJ7rWC8ZVpFy4tI+m8PKWEe1n1wnefowqhuzelnBNUKkt3qkXM7VQxEdIsVG9ZsG8P28envXmMNBt//Ox8nN5o7YIOa9l9aK9FfUoRkk34VFUkjaE2uGt+2l/e5N7ew8RHtTRx0hn7tx1bgydl2b8Mf38mr//PBAWu+kf9FVF8Wf/bg1HlU8iGBpMHe+4n1w9v/51S8lnLGWgf/Nl5ekzLj7SoXnfb1bpN7tx+Tsr/EhV+o0TCPkeD+KenPxX6hrUpPwvWrmOVx2RxWe2yebAyMct9v0+e7VcD57q8vJcknHb6XB19LfrCfqBo+ROPTpD9c+Vf1DEkxKpcHe3QX8nry6iqczBkeNpMiCqtXaLUaLtCwaDFtXwSUDFlfE1FzxLAS3yF1A7gpS4boZfB7ub7lgJrpkM4sUFv6lhzqTZmw3CvtMHxxI3vu3W0ronqPwYFKO/KwECBi4mQ1PaADHKHFMwueWFiRC21gm2Ur1PsQzhd6cOM5dBGDRVvm9XF3jU2piOWkGdnhQG60oAN8jWTKRQzOGVNNnoCjEP4TVHRXzrLYwnCCeYFHGBFSedo0qbxR7ajOz4qQhHo59MSwBTpMxKUNbHKJd/26SjO1moF03kfpEKHggFBkRes7nO59dMgHXuRatJvbzutkwiPHV55h3dOOagRiyGaWRE7WFzxMDWYPff+CfFa4rlhB6geRifrlyMDCpJV85USieitR0KRvC6OCH4GMtiw6IoZJHgaSZkMzbO8ka91c2tChh056Z8mDrIBpOUDAYQ9TJrif+8uO95tjtWahzFSbR1/6wpwxrkl8K0JGJRlw5gL2mttvZZsZFssoOccbDgSlbKUrxu8t3weKW9Odgsf90cb/ByrJ90T1om7mgi44qQCVNYkHUUsjXVR+QGEhDZd52W3t7vRc2XXp7mbXl7qHe5iJoz3FWlWr6QB14nKzcEwFrVa0M0IUMGEurZ8lQiVttYSTOzy/NIcRPuksN42XVt143qG5laUVFt4NoPoibQTA4oOrWdbqzDfD6tAOcamz1kqqnckKzw/BDB+RQXF3/n9Lnaw1h9l1+KO9X8SdkFoG7eQqyuH5/0GcKrEvwFsqWmxwFNgT67A+TIpwJbEn55Flbg1HhJVCvILvxCKhluWI1L2aR7vxKMFPFankyG1woSHd5kQDM3RgOUruOLDAM908hssEjd3HrI2z3VUZ8IqpNZjtb1p5txg14kghlosbVqA4aWQ0I48Eo0k+0tR76yx2rYnteV/NawZZgl3o6dAiD35EvrnQvZwQzgQEbFe2gecteBK5jUVzV1RdhJ2MRop6NCoftQ2lH8T+cel7lNU7Ks5IY3NsYl7RpdfKXQqm8uheA1A7GUEryIBG7gMuEDK+BYt64v29IbwJyjorB0eSmkIRPGdIUOioLRaVV3V5mOyc3xgjwGGJi7Ymp+FTv3LjZdgLp9vQ0MrOdDANubVHJYZHzfHn86D6XGz0oWKStrYNKu7df8RCF7mf6ifll41zs7272cfRymnh5cKxY8M6d4WRpL7gP0A150LLsCtvrWnux5XLB9Eeh0kvMTyJPRUb3KWbvNlfAaXilXhIaEWUy3N4lm4adoUs9AXL5DOMDLT+7D3zCLsjubVlSIHx7lXzH96SUkhrjjMqXptJcod8aEBak/K14DKtYJtH+F7KTYL6osPp6q3EykIDXH4FzJ7VErvVilxvsEDLKTTqP8CrwUe5WyA/u4U9+kPFz4gDnhX+GtJCS3NXoNNKVPCfhdrmsWgdaTtuGvWIIa7rDD2N3lIpP+l7UGdlzT6jkqWnVixVZt3wV9P/cm0g5EMTv+3N55/fUl/+mg4Bi35ePz+u7+61k7VOMTJ/Ttt6/muc1l31oeD5J31eTkF5ql/+ujusPcH5+/Tjl+71xX6FH/1tVn5FWP+XfXxunybX8LoVsXo7NZ+vPUKxbT+arULsuRivf2ZJ903Xz/Q1fP39WUR/JtTUXMXykJ6Xp4/8PwXD+u+SX+qFXPz3b91TlDT/RMvpbf5/ZdPn4kg4CIGHx4Fq/kaAhw853FG6qe4eYYE78bOuFlhI68/pLBk4I5VVrq08Fn24ep5JKXpC7XlcA1S4fOBAoXlutQrb62q/hFPP+e5KTB9vOBIPI2p9s1yW7kgeFe/KJjzDglYRcGQd/YjANRQT9FPlnBcz+wiXafKv64B9YkwFcTSRzx8cYbGu6cL7DGGHXDp6HZBWHzfHvuQGgkakBFFP2qeIdvKZT1wm3W0FzJJwsU//3C7LtFoFQroKlW1KpRH+B6Eyal7Kw8LacUJMdQnQsopT+0rHzEoLYc1VXnU+mVUgh9EW2uZjW8trx+w//pWciubNDCXJNTRBfqW4GzMePlmz5ZBWl7NQ7oFbsJczofw4QthcCSBxD6rrKwu1GnELCFZQzmqJTe8XqZ9UYrfoCUj7QD6exvcwyN/9g34weayyRFudDBHKG5yuEsYX+a0sB8enLrYx/+YYTNW5aP4wyqWRt0zgUNB1/nw0t+Coxjt+btGWt5fyEZFVHC7/23b7mwMb6XAXF02S03iJF2+/QW7n8Mhi1jj2zCPbZ+SDDo+8YWCgew7QfLih23BjrrSQd22eYn3RiCl9iqcgTkeEROE4H9rfS0R/oQRJPzAgB25/U7QT9qAbKa6SSURro9PPZfx+HLVLg2HaBP+SArhGRqSFBUtdlxT/gg9YnP/hxMr7IinBkjwK58mUxNRjVIxZ1vQBj7Her7yguwGpunAc0d37/fEVf1u+sXpa/3b/++18elNP7T8ef1v0PVZ7Pf7LY73MybgaF8kFG3qy2vvH4zpxe/Uby8XyoJYAgQw7ZiAOxLN8cNAupKQ8jV1m8SI2D2sVh13mv5j3HDYzlMDD4FJ7HPD4uKencHh8Psa/vjD7OrhBGARHKzT6wXvCI7V0LMkcmNBmSJM4xALaifSPRJdIQs1TjTaRpJgdRFdfAQ65xc8H4F/LivDxKQ/1teClQRvbZE8qIWWFoah0wAaeVgukLBzORPYG87VyKq7QSjoqeFRxloV0B9vOYuzXsjdYgJ9SuuaQMYvrVqhnsBJosOqlbGHjEeYtSeZmBVEMooMxG+4xoE5Ffh5PKPxi/05aeHUSjXkSsTyPBdEaC8vkEyQpT45L2Y1uqQi8d+4HPxE+qxYzL1+OljFsoDIZyIa9FABEnPZqEj54AILBdPSZ8EyC9qIBFvoS/G1rJnPjyELR5kcvRLwXPwIHzO3CISVxw6DHEzh6GG9wyAkqrbgfjZitsXjVaGg5I3Xag4QE2N0/hJT4mPxe50pdTDWtwCUzCAUix7cc16k8LlVH3e5xyMK2xFFh/9nG+KU0uOc+xA4Z3EN1fYts4RBxCVyIgPeNDcHd5+Xgxo7aVXJIha/zGBY6fmkU5RnBDXqQHKHpKY7vAydIX8RLwC49+kfNPdD9+dyeUO42osG5Z8OGyQhXt9dYZTU4bzCXfBNlVnrNVDiOUteqFL6+r8BGmQ9p8LbM1ZOLDkzAkEAuDxxBvKYlvUuqT+TACx5HpyBzhhbU9LkvSZJnYG//FaHDUv3RifV7stE+6Gse2L/uPJMjw3k6yiNbuNJDX9urtJi1oH4Fui2Und9q3jqhSOprF4KMJCWArjesyYV3CZoUSyzaINnmGCp6BDS0IRdIjjMXs+1IX3vH2CPIPzbKuj6LICJsROOI2zD4i+G+sHRtSVtXVsPqYlYWTeLYywMWJUWshcieVVNySbCqhaJSfnFQHt0CFp2y+AjNKGnBgh0J+kJVHv5OUrhbfVriQeaohrFX7QItbly9ksP6TawjC4qt2SxqEdxVEZUfyLp0AXwQrpDkLsEBl6AONlRFjfAa4kYy2lvodmgWYbgE1apWW3UkVgrHfybGubX4HF9Dcw1ci6XF6I/BrTsouGh9sXgI8UNcD3pY6cVEyT7Okyk1sIpMMVXKw6xJzBFjD+kZummhhyRzgu+1KaseoM9ERWIYsXhhefG4G3w6PXRxF4wES54zRyI3zBZb21+ZLhPMRAcA7qGcxLEv/YUCpJjVvD8Pf2zqApSXF+lPba6xWcvTj07DBT8Y821JI27gu4RyAjpIkURKHa/fKyJ1JvRMOn0eXwe4Xi1QJ8iJkmPoq4XXjc/xG2cW//JvQq/BTRDzLSdQc45ue13FFbJ+Tp8JmR73wBPFJ40TkCaiUD/h3aGq6r4YzJL9S8XbXrzaNbGBpWzdQrHruPcjcmDa3lW6bGA1B0wwY/VfRvjoqM+1BSKZMq1PMZgwEvtPbmfXqzegVuuQe3KK1AmG2Tj7OtJlwTCjlpcrPYrl2ICHe0Pn/Sh4ooDdwSyzT0b74g0BaPyyeLeNUWaGbtcQzsVwLIxpvg+wEcmsSJilNl8iJakC1OZyOp/3j2Ouo9o+mfbHTZRqCp/wV4plOzilfIklo/LtfjSabuIYC6l+cpyXyBFmHXUce7YtqV9t79wEp69Ft5tuaybrEd2Cop492+5HU2LHmGPOjil5oA5y78FcvrJFT0UJH6S+httYmCfRQo3HnFMPRPsKPbqfDF1KBQaC4hPY0OOb4U5zswR23CYvw7Q3dd0mNxaxfD/1dyOpwJR6KVHjy0XgCwips7j3Pj1F5TimzJOyon+BNxrXVFO5JvhSk8Ze7s/PSj+gjzTCs9ZTPgqPAV0ArmUEuBrkdtRKiNaE9zjoy4hHrhcmafGhUeOmrq093wkUO4q6If/APXtM2r4DIlARZ2GsXyBQwpkmADFCMpRB1XtWSJvhuaYR/VyORVxcfCtkK6G67vv2f0QRHyNrz16j5Cv0/rzUuHDOe/GRwCEbhBgzaT9NS3ZdlbgZlSb/NHbDt96Q76ySos/fxKjDmawi4E9g7PfOSRLeh91LhEdyN7EvcbxIkA+HeWkn/tcRs+VZ5eS0lPiLiqYLYHBLzRJ3l4fZP+tHF9OdruBm04960cMQsBT8lzHJsWJ3uyQDAFPJEML07lcVhTdmmIdE5a5C1NEKq2WYaHByPcRZi/sS3y1lIrREsWMcPJ3y0kSP7AW2brfOb5fDOgtkJdreJWLqJC3Dj+57rWGcSGwDi7X2A2dtO4ccqs0y4g54sEdYGEcbgpK/Ee9kn8wpdfayg2ajLGa6qNtOXdfV8surXHvsy8xVglNM05zw0Df95YUEg2kaiAKDOjfrBQRLjvG0yFhP2OteSSK/3cR6UB59unMSd3p5OYHGR4wo+QgF0905jukRQDy+g4kCvydnAYVIRIOID1mR4/QKH7jFowtenLRU5kzT1NoWHNtHwBwV1YfOh5YZAkOcGGzurUOZEvw2nGdkhesQ/eHe9cwuVZWYNHnFuEFnu41Kxo8EKX7lsXgRULw0yvFpJrZYRx9u8AeevBLbXdXIV9ESeXV5kJeAL/KAgqBvt33qEA/3moisBzYS/9R1vz3pL3zqa1L7MME4kozxNrasq6938qIT1D2uD3UkwOtqItjnxvDbq7ZegHHNFEiNqL7isnLe5WE4/+O/rhywcsNEU3KjuWWsM7rdUMinc8CmK62lYH9GeeVMDGpckacGRzUGrg4tkUrufWzhugvHTgL0zfUIFczJ/OZdXpT6nAiLqyWB2QuOGIW7nH3l0F6AhPtKZZ/aDIqiZlzpml+xOAxGy2oKaAEmt2Dg4m84ndS7dPqZ0VVZeoaWyl7DKDlBdeMVK9tTG7AHXQSTzwG8NvKLxmrsobu2FrA+Zski19Rv+DpFxZ/JAnAdOUTl+XBeW+HxOj/tGIr8ksgG86VjCe5WKZSHl2mrNAa7tE3/JA0v8YFbojnztpJzzfxVvBWdHXn+EfgjA0b4VBVrkU4mX79uReGG0GCyNkCln1V13lzzmZIcSKJdYSRfbrwzaVEuXpmz3u/8KNJ7tRclGmZo39/e+IUpGs4XtEp9MH4I5qkfLVrk+PYA4raxDlMHxWiwK0omjNTvWUTYCS8nGnbE0sG2Ix1cO3ZJR7mdL2/kAJrtubgwlnM2qjvGDrYihwAiOSJ9lB19j0SGRFqo8B5lBTZNVAoHaDJyt8FWa3Nrd8DjmRmvp2FSg/VBN7pneOisCtHBEACYWosMOxJhpdHveVBjbK5ruzRGmEqjhhrtzhewvWBeucULHXb2RVlkfH+MgqSPiyEj224qjR1BDVKpGDkCARbHKi5WuwJjx8kORg37uK5xsK2V6gqGYYbcjot42WlfvsfLCpApe79+bImD3oO/QxvD6rC9wDTKcgLwrjMlhEq0hE3B1MXKVM2G5h1q5a7KIUmOKG1pzkJe6I5MMDRhI1kHPKqMvBVB1WV03O9OL8nitncrQidlIh1wShOVkhgrL0V7VV0KwmiJEePiIHZo3A+NxGn1h5HL0aKx6AzVhykZgFKHi1ETpas+weB7vWdnLJHNkjVnH1BZ38vaa5Afgumxx/05Td44GUPue6Tq/ofLeYCZ98tGpYjoDm1UN8mHo1gUw0pXlLi7G/CjoXgFo67s2rikpUfKSJgsdzsgp8o+UUYBGm3iC9jsHjvnlFRPyHKXapsxddKAjZ53m3sg2EnlE67AsWQW1cPLsJPKHSwe6SN5ObcT58zanwaED8Bx4p6QwdwT1cUmByrxDNq6JyzP55zGsskswiPYGkwdvByXnjYTrcLZYxDFb6IhOG9UDBmTErbgGrWuoVg5chRF5zfsMpbG7w0oMLTEwLyTVYZ5zkuTGb2EFawnPUgaYppkQLCmJZHMQr5KuOOlzDP3T8leznsAATsz2AMIJOttxqy1yfmN0fYFu0EMdoht63GuRY204DcEnaD75AUKdgpZ4baSdCrjDHYv09YYBzuiiRL2LQ/F3pQfR1aDsgQ23mRLdv+xCEQbkFvyenn1KX/cm27Zd8pNhVYYnS4Ze15/1EDn168FXq6d2DUiLt4bE0WH+3PpTR/xRi7BRXdJ/jGkfAuOF16OrojiQF80Blt9V2lECwD/HThhAMQYai2AMPzLK1kvma93ztUCgElT9unveCiAQP+XfZdwJj0DYL/Dh/X0UK0+2ALOYT73dE//k7Cjv3SKu3Jtv7kLmfvsTYS91RmJjVtqtAHAcJFSyoJeotqC93YvKOVv30po0hK+zKsnxzWrQQMr8BQyGvnVCou6hTmnywXw8RILmne9+RP8ZbWPLRQMnndbeLg56mfsGlWuYZooAaZGQPSqgKum6c8n47K8LZ62aqUji2CqBadqbwMkZpYEeKrcPmzMhMiTlG8CxPEZJ2nA0U3dhV/yIgJ7CUUIlFuHqei+/iNR2u/4VSHH3LlTEeVGvcum6ztiS4eBW5Ew/Dn29kbaYEeQz27VtbaxicryJRJddx+Pm5D6nJBcf07LCT4mMuonxuLj5BLOjohxhkLv7ZLN4/uOFe40uGHFx6RisILPMwnvKghls/zRZzh0rfjEhGmH3gtELEtxRCiArlA2VoDNdLeIyWQQUMbNFbrXvXi1Sb9rCh42C0HUJATWVRMMFwS7dGsRtRsuo7yaUXEaiMzhwpAHYhaIPFypsuQLVUqCv7u8ay75b4fK560L+4YFM2Bp47qTO50Wo+EXBrr6YPDutVFlDOpTHJ+epgSBcrKspDuf8eUos17iImaKH1NYn/o+ogmEFoo9382nqVV1xf311+Vrc2eFtys5dcK9M/uHFNIbLCy+vv9im8yCzpUkvLKc74XcUcUsd3bx9Y/+FWRUf+CLA9yeGhkgnRL6aaNwgMynnjEO/rSZc4HzZN8S5MJaGjVR8jRhlQ+u1ywvjSIMR09bmZkM1kd49PRUOslXIN4013F4d9MoMyc5kqcoK7WleG2OhLuUfGXxRclqtOqzS+FKVt2ZSB+sfuWgiEpuEM7Glp94VKKQ8g3bPk3f5IldlM8fLUWkuliUMSrKaA4F0UaFEcZ4TzOd2XK9Dk7s8ylHpgPYXy9oAIR84L5Qrnrzzq65SvSoemZFjiXjLAaeBvj/XQzJP2ZtuBt/wx1fctcqsSy+cPtXdEPPz0DfrcLl5WzV67/fnH2RrxMuKZ7SMwUUJi2oNCvV8WpGKDNIafq2ro2rluteROx+VtLDHd2fhHRK2DPG1V8p/rB7oSsww/vZMA/Uo4HVrSiOK1jFFvFWJ6W8Unaa+2EGwIZVqTskcSifeXiFD7LXxa3Q7UPshEozuw3kWlvAsrKHBLO6DmPFaD1sTCyZZtAG1r865KNgY61Z7OqY52yw/QPzG7BV1qQO+SYwr9YkdjVT9YJ7E0rNWKhvzIiBHGuEXnD7fELzzFNTL+tsW8M89zqk2NVK8Y9m94bNdUzJkCetnsPc10ODU52E1WtKMfAkmngIwynExXN4+4B5ff4HuEdnNqzbJt0rqFxdwg3lzM3eE40Gt0MQwp2WfdwQspBUFTzzH+gkWZQ99RXVwa96cCRR0tW6sffx1NQAOqQNiDFF7tS3TCS60oxR5wXjHndItuyzN/cgh63hGjSRBmbw4LIp/QW4d2rsz6JiccSzchlb44H1dal12yZnU/dR8Y8wIwmUPBY4O/8ULPIsRbBbmZ+071VzUCZUJClbx0USOJ1pMtEggvQyRRThp4qUhKLRNPPCoQxC6y/9RNbMpKKqf2EiVnnV+J4CBDSfoQTZJIAV0GxZk+yiIr/W90mTC07TtJ10rEysRmawLSSe5C/2OY6WZj2z60xqr6OyOL0zr4j5nxVxk+3SDwUsmxk+YO2q63Sg+JAVbwnW/18sBsuMnh2oXyyvmN9RDyan8bRHS3V+EjPOtMoVqU2lohdPqorXi1QEV+G4zaAxlO6dS6Nemsfu8mAr8AH35SOxrAXvux6X8H3kVpgjR4JaaV6KKtIPNZXzRIW9fVnokCViqZOMPiY/2o/3VdMDDe4RD1v3vwpImhGMQ/rM8U1iQm/VpPKUykWpEsFyZa0r9f/ZxVlxK6028LyYvdbmqNbbniuiHMk0W8bwaNMPW/H7jhSbho/3IH/BI+M4dbjoeUou6q71Gd+DrncZuCgbCaM9+b8+ryD3VTQWXab6tirFO660571D4MH1kyXPCIpbotlILMnVKD5qLsFijouZ7Q8rV73KrPAWeQ9v9tYMRyulFIwAqHBzhhsdXav3AbiZmsHw17ta30n9qXwicnoCJJa69upuEiVx91d5LvzTu1yPp2wyq9LTuMZl1kxR5VL1ZNM56Rb0HYgXHMC/oS3RMMTx8Q/06d4drMCD1fKpGCPBH19aN5YUgquT/CUSfRJ2pH6Zbt45+cfNfIqGVTWgrv1XOhlZn57QJme9d7+7pvN8jgA5H6r5xJtdX/sfDUtS/tJNL+5Ez1RNIX0DSDMd9pDIJpi/FgvqNsNJkEy4ZzMpxCnGCu443qWObNRPHAbINsbMkWVtZUYnyapvMBBH2ux/Nd/ouRLLJ/vA+hYIS0neH9nuePOup8Rl0nsXCTbl21KRgzaXIJ6Tjn2FV87+phLkak0pNrxIicje9lgOox5U2faEomNyNupsqpDBUgl5pNFXexljUqN5wY5HY27gtwT1E5EKK5dIp0zvilty6kozDHrzxeAcBrpJBBZLxsr6YWl/plaHLJUJBxKsJM1sVJasd6GOSYSNzJI1hZ1VSfTdZ9L0SQ9ap01fvfIo+VF2YmdhYp+CLjNK+9lrEo/8K5uIXiaBHe1e+HYiIoyFEyt8FIR9POwL99ir2MhWPtuMTkfmy3kzx8wYAEICJICz1zU6ddX2VRV6EcdCRlCK4qITA5siWyELzKUn4qAEk/SOlO54c3/53HGfXi9HSWZMdPXgC2EuxkVSbQgqs2FoM6f9R63fZ+HeFQyb63LeGUeryewMZdXG+ljZ/3Gx55XDztqeXtOorzYUJ6OsPQL2AETVzCMJ/O3zFHNJz5eeeQl9WNFE4pDRRVZ8OAtek5jc3DNOiJhbf46XuwuM3MEUzxwKObxWsb/KTJapJKihDNKO3FgwNj6BLE77YBgqOwFB2YyedljDwPgi1K2e95Q/K+qdI6mIMhZF670gJ8renNj9eDtqDx8kQ2RWDmnK6Q5sQBCIzL9pxRalF1R+BP9BFMe+ed1v+tcPVHT/nN0uSAGCvBzw1ZC+TxCr0ExA83WqVx+sgyp+3anZgEFLnNcfP48W7Nj7kIA+yf7Nwv1hpEMQp9nx5CauF4OhxSwf8RTbR3sfJrz8/9GxvGiQdQyCBpc4SxPZzMJ5n7EmQYJucqK60z9SK+E2jhRSVnTZxLdZPQvZaiybq/GehTB4EgXrxCAUkzpBgcxWezF/BnQePi9c7yZcQJYINjSylATWXcKVoMAfB0hVsa1ZhiXDsQ4nXayuSw/WI7biM7Iew4T05oyejCNDpWfnYL0L4DnWhhrKxzvQ2XBJpdme59vyYeT3Q5t7q4gkUYzXimFIU49vpD49uzVyplhCpCT8DFkSi8uQtpOjsJJ42ODsmzmzxZycaIWW1xlwioMdm59ZnN1Cl3KhRyb4u4LfB8Touqa7QNIssGhdjcpJzVDEwu99gN2H1Hws2HFO4W3H08PXhNVQTBG3kQj6m1wyml1YucRoy8n/D0P0b4EJhq6NjLgU688v1jJXZNjxdpVzCBRjH0AFybmcVIXThppeCz5uk8lc1TUgwEplBVUNppo/YEf1N+172FrSXi3R5gSvOfClFc8XVPUj9bxPpSNdVRbDIpXG0ZGWZa7FgtQk5Gy6+VcAEahoSTE8yQBs46cxvoJUbvLkvZ1D4eQtrJZ7VqcDEllb4kN06PCfxrxDTWh1LnRE5m5g8CGWRzelnKInMv3dOXHyvstiKZ89AdTX7gXR7eKmQYHjLZNBil0ow2eVLY+4UCmZz+RMLrv+ZynORg2LoZ5XH284+qQdSzrevYIii0H5MUk5bVFuBa1NwZUqz5rK9hOy5xfJnPE+g7xrDExXTYnBrzAL5CY2z/QQZKfUHZQyacoRmBCx3X11QG548WxWZMhKalIiZJbHIp3oZlnEka5XJAI8Rz39t1vT0rPo6j6rzh4gJZDsQokakzk0CeJVyHbR+Vf9bKA9ZxLsOGP+TZ7zaw/jO+YNpFzKYWZ21Ko+tZUTuvLPE9PScENRgpzzwOJlFOOMdEPMz1JxbBXK5dnvBhR0xdQk9WhlaQESE7Vj/OCJyG/sWY1BgxzT23zyUgBoAOgRdprueznH4TUv8fSoR2r1wvC5FKpI2Eh/CZsDOZkzwZQW7674kaZKkSYw4nSdrd5akH/L8flmeP8sJfGxVcoj8S5g1qBp+VEdIcwpsplSSUOfDG3z3URMQifLkc1YIw+wGloqrsV65mPlD+uy5rM2mghIiZ4LjkSx/0hFg4H0DprVqqOW1HZPU/Kpo1nN3n9CK2g2HAePWhVEEpQB7c5JGuz3PH/vexyqp5K8qf2IrBgs+Z9SuX2sST4fypQ3piOZ8/sk+Jtpw5SrqV03mu2T2MHhYXiZUU6eoZ3znUzI7sif/SvziL01AA7K4pgxjZlfXKn2Ph7nLIP/mFGa0XQlQzVismQrrS3XzIHcsXs8oxjJdTpZZoImdq/fmLvnxXT56KmyoJTMXf0kHYtJKleNSJZxy/fk6bCcr30D4fKrQKfaL5+PvrtOX14h6+e/o08eXX0AH+LrqP4pD2hwMpoRDLL61DgHDWdXcz78argEZmpJUhCRIRXyYPS3NaPLY5nMS9rHND/g7zg+Unss5OWkGo7aOKglz306hXcrtqWzcOsP4OuU0zrV7pDP+NdotJJhoJm+FA0PWxeC4SG3qtapWxhSvB6Nu6Bl63wyL9bTND/gb6of2Og5pelK1OzcpqHR5S7WMLInAu6C5Ee/i7rAM8uS0f9xh6A4vscjUyA9Q/q1WDrbMzSR2lssIjY/FpFAuWUeER+CUPE5SQ9nduSHo1MB8eR/1Xd6NDwCngJKW6dXG6pOkytBqTo9PZfTUtPGCCYv11fzRQFNLepmqd2CbIPtCaDFbycVczFOlSCrIekg4vh0P8o58uTAx+xEFpL1uddeDcQZlHNPovp79gyQgFTHMkl6PL1BQcr2mfMPS9nEO55Xgsn0C0r2YoNSTHgXdHPbckEi0hJ0oX5Mcu1cn1LYzxw/LGSWUq8wL8RZR7JNC2itfC1mUxaONLOcOzaHzaFDOVYgr13hYMDxtxk9txDrfP38lp/3RtoFgV6G/QgZLbBeDacpjukZ5mxo/ja9i0EVl+VfKqoH9VRJ6BFr5YJ/kcZqJ/TK13985G1IdrshjkLYhsZxPmHBt+gRat/rjj6xHPHs/fNgyjlkFtUD2JtwWYbXEouFCioOEL3M8Nypp4j8t9Vvy3quXHafj8tbTgblDkj88p60i1ojpTn/+UqilTM6MuVAjP4+xTuS5rDG8SEmTr4aWs9KVmmHI0jIzWI5ggcy+j3RRA2pFokwStsNOJIi1gAa8zE92mPkq/5V2n4ScyPnJW1FsQcKY/HnTFoCPro3L1YvaOyv37NaRkseTKrn5Cwa3vh2NCPNXYjvP14S+Ve5YJxU1mDoba2VqJWWAiLFPLeybvsIPbjLMjEkuxSEN1CJ7NRQXynJYGKD4fAIUr+naGw8KJQ8Lk5nCj0spdGL8umLY4khRGs8nID0/DUFMMncNAIZKIMrvi5drfTc3HkLEroJOX7ZPUB9Mje7o14ZfyYY/sKrRFL6VnmAEzwnB9tHBQBG6bGZ8ET/UomIgG+yvqpL6pCDnDlS0wy2TNgPGYuXovJXGsnTdx4TFNkMoOR/dUNu5ZWc9pRyV7/dAiUddzlyljtNSuZffSG39XWiqVKq2RfVWxe8FIt7dXEatSB/gnktwM6Akj68lk7b3GjUUsjMk9Lz5ET3N04zv36FUqXRL7/4Fw5uvt5MR6s7na0t0fPDnVaKh7czf6wt0XJQaKi53aIOZ8JD10ZPlGvXVVWBgXXbVqxO4Yp+tLDULg0aZWLe8WaTheD27eY6sKYNi/mrUhiMbTqrQvhxVAMhUzDujZXSpaFVd+WNvXaV3Mz+308jFgNTCoiHvMpi5628S06tvWKGr9F6q/ZER6fIF2COFI91nxNxguN8sGxM9f3JKvO4P3rSEloRploFTREQwtzAShEwYf3k4cAgfwQNvj8K5p5PONBPGVjR8pCU/HAOVi4xT2DwJVljCcqEVihdCHuFxo8Uv9GUgCF4dhm22ceZcqHteDJt+0yq2/S0+5zX+0alSPIj1A/X1Sdl9nPy7VD4xfJ4aEVjnLvOYx81/0MQm0f6rpbOnGvcb0vxnWErSJ/aQdKkbEVbHVrds9Q+W86TG/deJ/kw2pV3mNNvS5PerrQywbdi4Z253yNSeSU2EMyrgphx9jnCaR6G1Cv9pqvOd7OpdV1ZcjfiWxxDMmthFlIVzcMlnOJ0e5AM7QwRbr02aCDhD1mDk3EwqVYpNUbmcdSYYHYDHQ+PWXmgjpIK1JAlMDCE01qtv4OhRN8yfBw321QIE2Vy9Fn3Pv6onhf0zbFFtugA4iwbeqMA3rbHBMjg5m8IehwRW6gY1Je5RAAWfurbzWQi21t8DMezUqy6IOAY44r9E0RFb5PaPD2bnFafWZ5LenbgwLIk6Oxn1sqprkQNv8v0HJFy6RwaEFcCihXIYDpvWjGGnjQJNt0yldB78BcEbRJetEt2OcGchL3IaPj2Uf2PKAVC1cyOZWPHRz/eX3dJNsdMwJuS87SaiZnWXRqk0PMUPvnxroaVJ8nZ/3juXaIDcNuWEbK48tYevRVW/S6r5z22HuRH5WW51Mu8Es7/Q6Xk5trNbPaFbS7jiotAcmlm0pWIcxcwqgx1LNnLXosVfxvZyaNRNMsDAVZ3ka/KN1heSCYMVrKdovI2RlpyyiQhuRf1lVhvz+68waJilBI80FLDfNWl3D8+NY6iAoCIZQCDzyzVtyNKbuOps4kK6oeWnguzeCyxUPW1KHZ4aSKSFmJiJGF3LTaTk3ZEtEaBNcRqlV1KuTOwLsBoFvTbNTi5/Kpl8vHUWB9PpgJmkKWsWTKKmFnmF7RHXcZQcH87u8sxKx31/JtBZcOXl2sOtTTT6NZ9tAlrPH2kSUc5Gwx0kvtYTkE3dV58LZiXLK4NdDNhT2KSQLKKbzExVfBCNo0qVUm8Esi/GC+vrzmwY192guk9S5+VBm85vZekEJF+Vp5AkvZySMo0LVDvkQSpwRAekHSoohsuyNR0lkJr6536sHNdjtP6jPaP+pKddwAvqgbYE1KkPE7DIHFB/dmIp9lyBMIrCjCxAlTfAuv6p3PzIdWrqNz/ujMd5SD0sN97FhDTZ5PJDVl9goGkbDa+jevDWAUlpSV3ZC5kiBCAYz2pWbxfPow2yKJS9h9VB3TdFp2lf9AwbObfjGjKuOaNFDykX37H7qqo8Bhc6FUJdmaspnnvFW5FqjelgXphjfkEjyKqZeBVEz6s6XOvuKzy4I7DyodQBeRhnbVnHlmhInRJ4gJJnDXDCeyKE7IGPKs5RLWWqghXbxTEXPTIz0gf2AyWvjEHsYg2XDgjfXrej1d2ahUWSaxDI6O07cyDZmJ/6uHobnnhWkLTNr0TIkzP9w+Wjv/DkjuDV0auwUzVQu3n3Zdz3DAkD37+VLqDZm65/CVymZCZ0QTmmifMjZkxQXPOD8IlPAbHBZMJoyX+hVRTHY8Df9Av7TSLC8JsnJT0LY6cWZup93wPFcNk3zpwc6aTUprXx/ZEacfJl9UqwfammUkhjLJsVYP6GpZzOSO0rL72pso0VWleCEb/CtWXHgodPvV2cLPe1Fv1cOE3BO3VCL0KM4g3dzCOJdug7HZJv2kiZbWMmOzV8YRwSPDQSaZVpMz1o3Bywo8300k1vHziqZDddNTy4OcTSlNJtifheB6Ow9LQKp2I4zIeLOOEXMUwm+r/E6fcgBkw3Nk3hMdkNhFrBA5GwR6EQYQRQOV4+olta8UpOshi/SCODbT5gA92FahRnNBRrUseaEfiopXcwUQ2fCf+2EABlDQO1CTEbPPt78A+hZALSfcXcwNvx5x4sAQFldSvuh8HwYVwI0TOGlfrUtbd4de6BsuMRPUMMfEEb9v0IilMuPo0hyqXUXqE/7MXWXADBusVNMt+53YGadzOnQbQvI/BafZoUZoQ6VZGG8No1ElT2xfFq7fyBNKytCTP5YhImbZwbheMA5HYnXUbZZvgAIbtgau6WycWfuvml3Ndqrzt75VL54L1tZKq+FhXvegSZr+ZCbJPcKFUfQk0kiAjBoJhfH2EJq4Ma45ISb74/50jq1xsDhURWp6kgRnyH9gDHZGfM21wSB15NRKnTBA74WMzkvzdbvLPND+72NUhQNsJ9NMIm4aXcpdFTDrzdVutxm0ZqleR9yYje2pjzd2Sw2CZUr8x+TUK5ujn/GWXoiYZChlhrh0UPx4H4pQbn9DBfVEwTBKabPElVH/eSzdKdhahavthok+29tvV7TdzUM9r7a80DN3V0cGzp8yAOWHhP7n0k/e6/0pdwcDmGPulijHr18/gyJ5bsQhTcBAxswz0UYM+gSFH7Lu3HOiCeyKfoWnKfVB2zZZL6CyK7pLI9+UHFKxll8rKS72E+khqy5EbEysQFQx66q8e77a0iU4oEZ1BErc2ECjE3fjquU1JaPuc3ghqYtJiqNYjCCjvIvOVBtAZo8U7MM1QJ8YzYW1UJQw+gmmh5p6ofs22uE9VZBf5EElRJvu78AxdjzMVqdAFU+cjCqUzuJzpoSbblxuAzmhZvAXk2fsHvssDJlPcmv6kVOpKJl09VvjZBd0NTVARkhDqb7PXwi3W+z0Nnh4C4QD2d2p9mCCysplwCmGO4hqpsJ1kjC3d0s2bzVemZyCXqUjJTAh+ItLaOBNkdF5PCbaH6O20aqe531cAHlBD//lK4sAPxYXi9yx2iHPdDKUKbKl+hikNTzTljlbgBdWZI+bv/G0z8u5OcL/p3Ho5gYiJObY3+DecWIyPRBYdQhCzS44OmXnk9PC8OFl0Go35Prp8yz3FMcZT6NE3cPSimlNw0A2nv0Kx3bVvxlzZWQQ/Ru0+rbK7VYndg3g/PTP4+quEQxqE4/ql6MGSxLcnrTPXLRyEruRNaRsR9hQ9J14Zef2N4q/Wxupx8GTGjIhezRwDmfvDgQ+S7tlX/ZoKZg+8xdtRWQefpTWpu2CqvnLQ/sEiYzoQ/DvDs4M/cKn++mq6vzDe5bzmAcO5+xf542OjzVsuSlDGw16QDH5H/9+v/sAa/3D7x8SjWnwlu0zSLOoPIl0hI/46/OwYJa761V54hMfmi34LLd1GkflXW9mV0aFUPdUd4tXKJhUaOMQz9ayvg7W9i3FgaZ2DmsMWHBL6ruZstFm/KWKqKW7Lf7lNt/7dmRSOAPJAAB1LRPW1wvmljMpb2USQS+Lcc4ZyT+hGxzhv3Xqgy1ZXPVGnp5eqEWooocuUKkcWZi37Pa0zwhxZ+1QuKYHxpaxb2tJJooQm9Z1Qk7izSom6KJBIiiR65UF6MAmFPOYtyl6LSBD3yJ9g2f9r+Hyo6BcTrV7RCCYaRyPSL1Qlt79sijXLY/af5XCTFHOtak69AYLrNOLsHZc65+A46YkZmALPDFci3f1a3z08a5ieCPUKU5uHHnhf8Hly09MaRP9xZ8nQOi514f4ueRS6RMq8t7bEqNV6EnFEsRWGFls+uuPoNV6PKSI3ke0XDd2ekXlngk6RmSfDOpIrPqXFyylTrk9oFuIPRxgPJH0KHcfl8Wwu3cQO+ov+6sZufHFi+FNGW8hhKUu171GkY1y1EuUEnYQVM5M3j30LWFGTMONzLgcjpNKViC2r2lHq5lF+8mWuzHm6VrLdB4++LQEXcfnjQLCyoCsOTJFNPk38kHYbbsz0c6tgcGdW5nNWNQxhfxv7PfUUXqU0xQWMz1fZ1JlwxsQDOxrRldNbZi16Dr9Zc2KJEs6+l1YY1hNkSB/G/av2tgFY+Ul1yq+RjGtv5C/s60W7l7yd+6cckpCydfjbM/+SUnf+aQXTawv8hGt/TzIJKXbz2IyhZIN5KSPkZqad3UfReiIceA1UF+tvDts1Lv/9qfNL8j7Xr+o3lpFeyPM6lnGCTJnm9lZTA/1X26fRpAwdNKDjTEqI6Cp3FX5CHAuvCWeke/jEDhzLhafYZU1Rd7s+FTAUp3oQBt4KfDnE0D1bjL3fdg/7psd2oYGrXJr6OAUdtGaJlWnGaFd2Zmg3py8Nu4tQejP4hk6EtjPfzXn/yHq2rN4/pP0G+x9jL4EAnhTY42tBd4lapf04E8B8Qyu4bbHE/B5PXSiMqkA74F/tXcOFTUOdKc6rXG/nJoX0IG5zmwp4W3afAaa+O+mB9Rfl1XznwzkN1a5opr7Zcjvn79+gNvhJf9k287ppoP5uKHfsfrTH3aEz5fV32glhBpRUNoZ3I6hXqfAPUjQQueGZfvtZ49Xmf0jLUNaRBeyBESYEuvMAokKHgjpFpVWWcdTeoZghS3zoEzv+9y56QlV38M5rgq/4cl42MC3T3v950cQF79Y0vwUPhdsSoPNTljePr1z6dVakhmmgcIqX2VUy0Kr4wcjBYY/+OWZsd1RQLzNHPcBFKJCpyNWqcyg3b8ee7vuu518GOj3Hax5qr3Dp6ITKBoUTTFDVq6xM6PeVbPmx3Rtr13tfglTNg4QaJDKj4+7YyPKWBL0bkpMZ272yYi1/B7hvvaAfy2HVChGLF+2Dr68+p/Kq4qq9z7v+X1HpkHfYcB3qrSJ/xhCujudER1TxkhaNPuRaOjCEYt3TEXe+f8q/XrOS4Nbv1+L3EF4na/p7cF5X1urvodZw7r9C9EdJ/bBtw+BR6+iU5RXVkAovUaGtZtXLyFEbrmeGfcM/zyTmbac+paVru478vb8gkK81Vnwh2R8dZijU2OIpicZn9oM0OXcYPhjGkDKk8AlK1k+JSx9OziqbEnQ0SJu7Pl1Z5/q5BuLW1c2+f8B7OJeBksgNvn+ACh7GDTRAEH+auIlZZuUdLdwQxmfKknr2pRzGftH/7ubbk/NKvHi17JW2W3xLtGWpSAtVcMPxRH+A+88ejkf9kFY17deVx9i/F1++LxYaH4nr2ySQmTlna5+7nRuQf8ypPXHe60GS3kT69JWTZb8BzH+DvOj/UAjFmIbO5DoaxIc18goQQo47eURXD/GPQovW12Z0GW8jcoVLLZ6zDpK31v3YD8X52ao7++8X480RfgXe2IoXVUHxRiNwrXEpsWrfL9WVG++W8Out1KXkdt6XXjmjzjz4n6Dw18xFWtPw4tyaxpC/qTocDsG6VkXOV9Lbyd00MmJkarUe1inWAXcdiX4Xt1/P+M2yF1fDugaGjkOTHx07C+vqGH9vECni3pCr2KVEv6eeu35Dnczz/t3i5lABpdl7PLmeenxc2qzKW5qu39R/hzozOOnR3qn+fAzxPrqw5H5Nh5uraw2zZyPRqxDBaj4OMv60nVZ8HaA6BMw3vM15h1NSf9W0ES/7KV0dEtrRl7+4gn+DyktwGzZ+SO2OhW6XCdsU+7fid11HvL+9pxfn8q44kufovoLv5ztIAvgpXnde/ZFuYintg2RsabV/A+H49b6OQGES5XMcwR3NsrpPAxrS87ME/4UPBHUVlzi9jIO8P55zGGMHqWCmFWw2yE8NGeO6b7uvftOOyWPcjLPPpi6ximsHmjHyKaZkZCSpsMGodSGBXz3KNJ2ClYNIbpNTUbA2eWMa8keKm+U82IK4m/Wop5QuN+tTNyJfbjbY456zNxhy0NLdZiP2wrwxt2A4H5IisLkTC2+Tda5daHohdMOQrq/OSlaBINFrczmhIXHXFve1ZyA0X1sqnRsi/tWVupcG2BUsEbHTCezaelnJQGjfxAHIt/xr7O0QPTB8rxWuny0MU8eubPcrL5WVH9BH6WbsCBOQIZn7H5eUZ4mXs9ABG0NGitufnswVqjgTuW7lcyxNBSpxZr84ZMEA0+1fwRGXhkyeamIkev3vUWzdBZCmlmESXyuXlEzwLVxwmCyjayF0cZYADwXzrkk2S6DDJq5VwZmzSrRg7XZqJCm59kqeFW/HFPyQrlu1OySbJP4Cv+TvMu6li1p3ypAo2ows4kOr5fzGnZRuI4Q3Wwmw0db6nxIi00d0ec0jx5oNotEjHUeIo0cib59uObJ24lskdlKjKOwLOG0HRfzOvvldm/QFMb9zWPnG8wn3C9hLdY5rXekrK912TmthkKeQpTco5d6NpdfNBxwbfa/rV30s46tmgyft8dp7TCh289KmKsbosiCWnxdsDh2kXgUdP6UXFFaGxzcpmdRoXKl/hBwUevW4dJadap+3zvt5CHJq3y6ALeWgYZfX97fnShZmLS0dLamHot7l30V1H7O///YfP6V/o6dO8r+e9reh6x51VFeiRjh0LxzhCEcuDUfFwdn0+crzCjbT5BNh0lMzUmgbwvegmE6y3oQRURmv5MhZs45StHoxq/RrX8Fg4UDoc8pXvhBP7JGlSPaP78Ukk5P7o0dPBrE0gJINhUHNQp91aJOub6rEjwD1E1XPLLaD5ndxEl2zCI8nGYUPtspCNMQMbLDvR0PjVHhLOCHnNQNo8Mid++dn/Or8gibvCvoJNszSK/dXDmyW6t79yVkrV1ffFiaqi+uZv+WFASiCtfKm807cLhnIch0xOt4QDEopQgyZ07vNW2f8q7j558cKR/jWMpj4lX5n+HVfYJMyaRa8A0y48sBc4QhXLuMXyYCkTB6MWnHZOURKkFD1fgd7nBjeCCg6IBcbggE3WEXQWFH8evEnn8eYNuBXeU1SMnm7oh/bXwbrytwPhixZYVMV+1+XgcIyjf/6lmwIQmQhA87nJvyoj9xYY0hl6iyfjzW4DeEOojl3SsvpxhA5EM7kvOLtlXZoriCR66Aj9NCycZHH4xU7x7ACIJzr9rtfbrBmi1ger7exxy542GiCVJ5OPmDlBLa6IJynQ77yOzvAEQzxLG28ce0E2houIZyn5SB13K7QSmZ6/6NNNroqCGfd6cuh3JhXwojn8MEsnG6MK2klM7k7XQ+YsXtDhFuFzFD+SN+7lYZE3h0fYuTGuDKxaG6E9x7bN3BxmQcERk++dOM1pHK1fqx0C1yBEc/jydeZ51et0AB5gPw19Txb2YqsZGbg3rLiYfN+hlHIB/QAiTeqK2nEc0ObaGH5R561tVaQyuMzdS8PtyvtK+RmNnhxZSBTh8fnrrwxrGwsmplSX+LWbooVEM7SEpOQHJ6GdCO/grRioLN8NPkYzAH0nrm9wPEw5QWVCjELMLHwzn/F9sm6OZl8x7W0aAuku2kCv//dZTqwoRq2CprzkBGZA9wuQRgzkrIBodl8lQYaF2Qjw+ECDifakqnvSAzI5TJBj5MNnBa0nF97wCXKpHKQvYIPqDD70HBAiIS0dEPCEw9dJRUy/hdBSS4rEmlgvH+o0S9P6u6UUqE05/vjRHNqf9Z30wd8ifw/U4YRWGJmgzOWR9nMucGPP+Qgjj6Cg/yKqXS35RIF8MuP7pSjxSFKcg5GB/PDG3carM8nySor0L7+0W/uc4WpXqfsJOrGA+d+Zdc2KeTJOphq4Y6kc6eB160tc/fxQOoRKI+PEL14RAUb+G1wquUPas6HYuHPhLDXBooA9gBQaHQ0ILRQGFhdg3XD4X7hOWB8ZwjyM2UN83EdxmD8TgLcPuGYx+KOIdChBDaHr1K6IpTRERiei8NFy1qORUrfpSyANqTtFEjV8p5lMlPGPL/lybgkaZwQHx1TRLKWdsKrODwSTfPyIx/kjcczEK02aVNvfSVSj0gL5lzDJYvXdyMMSbmNFabDhYnccugYx+JTgGl7kYrbJzizvnoUy6ZnAc+cHyKTq86YkzABrb9TjnJkHN/MekkoTdJ80malicFThiebUKfhgO+/+7+UHNCk7ARxdPoZxaGSP9aGdCvytC2SpyU6LRaaUGWsS3ZZuTwYx/FnZiEHVta0dLcIEHgSCtrdkTRKFU2sNg6S5bKqhVhvzLAt+fVSCUzki96cJMNB1oY0jn7KyITsqmdF2uS6CG++uiPpVGlK3QJpPXSZApDFu4CL5rMrBNCYnYVSh1P6gqSnlg9QR0XaMLrMcQBKDReoXXjgDnHc5WbntkA6ac35fJuopVv0BvErDCLtYw1aLlhopn7OjIVyVVKQxqzyKx9+X3UOlJR0J1nA78vO1M+oN9sM5tQzCPQXKJePvuhcvh/dfs97vkturJluOHtcak/DJ3gJTtCw2IzlJgxD2a/yNKKjP6fAqUySc7Zv7ckQDA2bG960a7fCpjy9xl4OqNW2IsxAuzhTYcFeDFzPeFBVPgg8e2W8+6gbZzqJ4aczZjYvBRTJkpkO6uP3LsoqjuOzNuPLptvSzq1/FVivw9l9wnLfp1dEd0VlAOZ2vxUvIUVA90GpGB2v4FA6me5vMlyVxDjDLW0nFJK3QjwCWeDsipD+0eSq9tkJ/4TcnLsraCgni6Bm6JpzKT9OUREZFCj1pSs97+Kq2JhX2lTD/ENjJ8nEyjW3IXV4f0Dbuzcy+Zcvmq9ij12V0YY+lq+CvznGole7EBmdqS5hGZjzkor8mFJ+L0QHt757lbY08XgAVENub8JYeaK8vobb1wMSNMpyW7E+yuv/7oZBO8+//FzKmuQAcYx24zmAvFmKGUYHtSCLLxCl5Tvuxk4P1EOF6Oe1PoS5TV8vAhv0b2EdkkVwSZJwOmlamAnH90OHJHq8WujwXi7YUcVq59tRE81TrsUzY4PlwrBhvYOXUj8q+6+QAaBHJhCfvpOgkNoXaRJkb27J7KU0bm+tEGi3Tcj+JjeqN7KHgD688I7Cz8x9JJARdGoOLJ+IRMVhBxCm7QQzV1QYwyO2doaLQPVEgJgqN3QhxhEg7hDZ9IUY49DBFoplBsZm3pzKiDhW3zBpDc0FNug35qsYSGJgc6IVRQrKd7AwhNj9dxjIS6gW7SUBEWJgGOG7PKdB0n83SFKH0+AulsfMUyXPOWyCv7ghaVDaWJRYsFC+Ll37O9tTuV7Qt0xLU2EZimTYIB1gWiHW7dPwqpliWMnv+xRnyBDOS0Jgp3D09Vw8GAFHsQBySWaJR0Jv/oX5DAG4RIC8qtXcYH4MLE14aW7znhsn2A2c5PAzDuYdACSgpaquWDg4cVkEkQ5wqTOcQ7zGA0CRqNO+YLSSdQcD6wPQYgENVAXEQwKIBgVCksfD1C7m8ysEvkWEctPfgaoKoLSCDRKsQ2x9fvhPG1DtxyZ4grNbHnMUh5e4AsKCcoLpkIvAOIqiKGlIaaSNoQgbEQgfGSiHDyjFboVhAYCwB8BS/BY0AbGGlEa6HQr3T82tWnzbIUG6fBsZEp81C7bycYqUPgCVPGDf8EguxMQzFWX5NNxdcq18+5XiAdd1BbzZI+auyS76A7jMKcQygGWlHptPB2AQBLDv/gZaY5H74NNAerxuYQPQUDyqa/KsByTyHAEIJAH13HEAVKmtGwG2+ngOLFh27KDjhIiEKybsQ2GYFFbfz3MToDJ9l1lo1QckBPMBrcwnGWONDZAkpvsVBzpZ1FASGjKexbJ8tGw/KStqHErkcHx7Z0lmKXvjSanSUIWBWX308Wo7EWRKh5/bSAyQhj+m+oGRFgAqiIraADQ8JQByeJOE/It0TvIBOzwJofpxZRztWgzpIjJeYCK2/pGQrIbTuFCQMnULshy92zoQSVN2DyIwHwBUcoHLw6DTjgwzNXb3n6ePWvdU7UHoBGQDzMscQHYK7YBOYoAj0FUw4T0dCU1NIUvtFVCL0HUpP0oKp+4yJfK+/Rf2qGBdVs2Krs1C7lN5qC2KMITQQth97ww2I3eaHI4AoU6jYpoAgh9AEGpWjtdmUwXat7GN+gWeVKnDHy1I/tuEn83F57+QBMEthOFFSglbimDjjPo3H6mppl7RcTJ/lZsYkL3M7EndT8XQ1EOowfKhnXtKnSk4EqmAf6UTh2HeOdl9dg4CBy2Rbqg2G6GNgCSw2o5EvPxQZFYRcDnbKge3q7kkm9pgj77D0lyYsOJQs8qo/RVqSv5oQ5MEb3XIb35Z+mj098troz2jtT3oxivdFveaW+iWtNnr2nits+nTJMLJy01jm2p48zPTPh7+mpsKMqtfVspxDEMTIEi7Iy8wtDoOUdSEYPkgf5uU09Ioxe07j+XEAlwqcWk8sADRqfqiZNSycJlO2n1mMUqTz1K4/ZPO/Xy28yy7/P7+a8JdQqei+oKtb9mamAYGNEb9KkDWQvl3oypoqkMHfcs2ON39GAfpqTwzceY9neANvRkC9a+BVK90oUS6VWUFJKgbNh+Cd2GTApvJqr7H4k+tyUR8nqSIuYABkFWe95YknAIyqo8b0FXkvx1C0DRKn5nLtMqAoe78W3fZsmmpGgd1HLXrN2JqrAs11w9yFZhyrl7IgSwx7gSz2F4yMcoypqWFMLFdG7egNuhnZXeBQrvXNvOjR2X6qSsHpzkqPKoi1CofC3rZeINb0m7nfkeKvSlJN0RnZCu7izk4TWyHUcHr2JGuaBWsS6YVnUH765JT5gz0T+q8EXbi1faf199qo7edc0yn0Yag0WiSmzh0Gc1h99t4f0eqYWGJ7eAQiSafdQV2FaUlPqa4jTZujqUCKuJtBzPbgJZNOKTbhrQnQFLNivaclqfMGT4Ht6NF8Y6eqeptRQFtoo8J82edn2uWZ3kVgBFnBK0YHRopoMCYK0gvCtVAptW6DGYRZIRn2hxkW0pK07QYV+wRxwapRL2FCWNYQwhADxz+XfavoIqLdCMrQXRWci1m9NMSeCYnogqe+5jOk3uPznB6hr3iJ7Qe3cnZjAk/LiS3CdNljYqAlrT6iUMaYP1OASaaSADuXlAzmBLiB9B+S2tuWCy20L9bgXEK/ubFFd0gxvcinVscqOYx2ggSfgROfD9VusK98bCS5UIsoxzH+kTosgamhKmEblGJ8+LqUkMq+dYPXftZpDbSkkEeY0m1IdOtlgTmBnIcyYwDUr7jVUO1vjJkP/JcillxQ52FTzZ8J8GSoUgCSmlqVOUj796urdGoqrAU4nW4DrNhUsqUsxWvzh3gNspZKfvQuGWz3NCtdo5Eg845zbPzFJ0uVKXFWXSukmihZOHlZ/Tz4cwRvKFVr1IeZD2R5NVQrgaOZNrsATEtUPGvd0IKzWVT/jYhDIUhAR2XaafXGTm56xcWozVHKTMdkKxNN/aVkeN1R86NyeqD1ah8c440EHkRfxeNSxSgRPRQ23h6eOy+NpstLSYxgNPHmryjQWl4Iu9/pCleFUY4DIgpx7x5lAA9LnJSkzhcE7CEWHOMk2xBJhLW0taACtkLBMa5NI0zjEwAbR3rAI6SKvs/Apm6aK3H6ybSynsQessI+OytNgkwPqvHByfFSJuFoJEYsTPTPJ0zTH8+yJyTtq5Q40n9HLSPFIC8hBM+8egr68IY+SVQmFQrlYkeYzQZhY1BuVQ96YeX1MurS22sZ8z0ZMV65yBma8u/DQpm1OOSwIxEPctd+GeB5ro5ERZj2RK3GIcXCtcvSUmdMnpJ1QIUiIZjA4BVaohaHiwQHtvsmAQr24xIcZeuyls4/pJz6rtMKJlMQGO8jpOaYGMaC6vrjpVNSiYjg6rpNgHg1bz5TkLh8Mq+M4xIFkQEifNkQgj7Upa40025Mzbeno9cIeAUfV5+c+Sf9DZD5nJBVYv8ms0gnxFgF2ZSn7rP942dudFt9ZmCtL3IDfdfeuKPfm/LnnmB2+iUtONivFoJbNmkBGCUqIfiVMEvopAJot6k5pxG2JwDjUqxixxBD2Q7AbyCgSGpdD75PlJi9zwvFcqNZ4CFYkTCoBzy48jZiGSL1YS2CpSPTLCTgG2RsHWo8OKFxhgSLIdyThRqnJFBjDOZ2bVqjtHVnF6HFcdDM8SdLFYrRH7Cnq1Y7WiDm/NyQ8IIA1/jyJkb1K5tGUypk1pGk3xEBkW60JqJ1ARFOwtJExV+FTtSmYdMYRjolh6iOCj6sUGMUnGMWaQ2n4JNkFgqxegNVLYWCQ0x9kcQt4QDzNBEFVzhSqdaHVoYN4L+FYhK0rkpO8cHnvMvSiRiYC3KSphQdZZHd+yQztehiWS9XpoOIn2Uq9VAWBH/z2tSXWsVvW72iF5KvxuZ8RtEpyPHhevLS9TRfMbbRKkkXTAel3TjEzow45FM9T8KULOrQYM5UJyrLNiyjYWisqufBkmQnDDqPEo0D/cQJTr7N+NyHRINpZ8eXdseYJiqWVcVEHlHdWyIGKsZPeJtRO32mKJtHkpuImRWRiLmb1n8671E25AY47ZWaaUNUInI7N5TY43FPQ6lftQWNaAZoEJIAME8v5NirA8ChZAHXJdL0M3cXKmtKnGLCqVc1a4VdoF4M0mjHWfUwsx9SKlv9tIrEJXbivfvIkWC7uLJFtox9d/EqbZdqn15tFxZs3u1kLK6DloSP1VHc0WTOPBmCfgHyiR9QMzNfhRu38oRJQqh/YpsrvlIuEmA2EmMa1D9bQQkRlF8YYrSf05YpnLbWQqfDRVCZOxqOsk5jiyS8W6l/Dy6kBjU+mIYXE6SgysND4EBHtzLCexZ8+neKagNzVBnv7mO6juUq1P4y3MQ1zNo1EGDOF/r/balflHqLkm39Sfe10hMPdZOBSvSSERkOfZM1cVg1Hvb2e0D1lANo6uYw6EthlZCdDlS3MF9fgK8Kxf95waTF8whPceDaxAUCwq6uYj+a+3wMoXpk7Pqhv+qg7OXNy1YTS4D7nxFsMknosnGva+zqYhWzXCBsktu6zm1e3Xaq1OzZPZ+oKuUG2m0Pkd//UWOqWgXlDvqXZf0mrkKrOh5MXl1Q54C+GYO4343T4na+2z1bmS8RM2+lOTcZ3frK4SoWhJ9X/cOeR2bwOtqV1kFW7czd04nw1bGQbxT5K+Yntmxk/7+G0euGMwqZtV1N0UL7cSV8u0LU90/65z7rmRroRr2z6E3hkh1rUcDw8L3QOEW3BY9OYJ9st+9+sc//JEBex+m/RqEuvtOT+StCk3KlL2+7Y33bLKqtzsYifezsT8OVWwtGfoLVBhhxH8CXctIZ1oFNYFr7Aeo0K1iF/D4k06VlBOGs0aKJqYad9uATgibwIVUfy7lZnDNStwATH2LT5JCf32WxM6GyyMO+zxNsO3kSqXyBeUkGNyBwKjM1F9/tWNiHWToi6gg9uCOX6QYFhjmCQf97HX9VIvFUj9K15mAIb5sHNbVij5jnL38EQZ/3hV8NQNp9+LFYyAt5L+EBcnkau36MipcmzvjFGkoQBI+cwTPtWNFQEJKcpgW8ferBAVELVshu93WX9sY2/i1mtgBl9DLCH9knfYfmUiFkPq+pRRvkH9SPXKXC60gXw6yKC1qvbNaf5djmi0t4kr0SzrWW9J8zjFfFUu7cwAjUHLBn+1ItxvJg1jhBAUuvMmcHWp/DQy+a5Oh47v4Uax7Ns+bNX1X119icbX+vsj+5hdfc2Mny2WybY2zjbSe8FfGQCVObbMFPcCCrUu+u2QoKfj70vDRjbP6jMXHgpRrW8+PlNY/P6gkt1wwVzVsvZGJDmisb0rZjdKWRzO+VAQ/sj4XnqHtfHl0OAtfqYZeeOuIPrENPbZlGiTYG7cu/ZKdkefePILX/bL1DJ/ghh5sXrGHXljWc7W9XnrT1QUrb0tdn+6GG8PvB51TvEWVpGHLuJ/OvD4DDnS5VZr40yNbTZsV3fw8PyPV/bLV1gRGL9JNric//n7I9bm2K8rUl4O7NmVzUCIqoXGBZgDsGoEtrSDnZ3xxLValSliYUf5fo1tZmA2IqE7Q5Ir/Dl6Poevf/hoQXb0V28ozvMyAXah4mjH93jrHe81gjGUsuZ2KBgySQ+tRqdDALoJbeSsQnGQyZkyhJX7M0JG9hBup0xZ30VOGRg+HWas5ypdvWUUcJae3xWx9+uLpy5kSzXuOXV1inwNvFYceZXh3kJIkwOJ3vC0CiISIOtvyaF06PixkIUoqhMjffgkKhVj1W+2Gga0y4MkhTqBGxEh6PBg3g6sSxL3k3u1ZIixcph1luGMR1hcovSkABQhg5oQbD3JmCRtTiKb2gRNiphXgZRwaMix1bUhPXsIC5Ppl4oCVsQWPp0omKTpsgyHqhA4eiUQ5nG8sfVzhcfb0sggFaTxCqIjFmeAt4PKI5HBnIZN//VbzCWcLQrwjNeqoyjgc7XOxZzSFWN+tEvApSfbjMg50caKO5W2T4wyO9dRSXUeXhyg7wgkK7ciABFxqZaSV0+0I0xEH28pvm+55X3GoyJ0dy0qyCaXZwhiq7zhJbSgd3vHwxbIfJKbr6cSxNBoA5qTf5pwCT7mtLyHnBb3aPMWS0IgwfI40wGVeMjSFb8S9GTGzXXG9T11aZAcHtBCTBQWySnBbEv3+uMr642mSdMoPw+1OZXydEaXPXIU2cSdq9dcdcrZY1684jDMGLu68yjAqNMjuckqlCBPaKE159D3nAqFjKUHysEy0zzaR1kA/Z2bykY5W4BkiHIC+M5LFfpTt1NZyEkIE6LSrk8r11y02Qv1cp+BsqTLJrj/NpWRbEM/qY83W7ibdV6P0dBzrCNKT1kQr62z3EhFR2eiM+QTPRdrbzNBHv24/k3pHU+b0ih1Ir29dW6IuDB2QeUaJek5Dva0YkbpkF88omkPhmQBAR6kEyYQScDlYdbRxthrPLvmgKRJ9fUp4E2QkcTiZi7aVRhCbRYuUXSj/5V8ihWoLCcJDFZlIL1uXLjwgBTjKGoQvVJBYjJWPmqXfAT2gIt/QaqstYTbdgAx0k06HtgqLocbrGKIT35HURWHPGwwUDfH+827JWjeg2DOvkpzlDtxQmjItfeKsMQkdZxRS0uDzuXIaxAUfDmij7JDox88UXT2zgox6yyEIGoZpqvl82R9G/OjVH1pzCHTPF9VliKLK/Evw/Jve2kCA0Mi7BK34tiKaZ2Dgz4sXPJVnNGRwZvh1GF+vWkVEUhfZuYpz8KwvzvMdOobgiev51BfjEQ5focLwpqgNGDHwkH6wR8tv+/8fMY7kB41jsxo66hPbNJKpx78ZTSqOrcPAy7C9nMtzO0eh64Ff3HPJtYRKcFFTopjS58Mhif7Q9A8FIoAiWsHfo7MSXkv+EZ+LsLDBJ0YfyAAjBsoP2vI/qZvTUWZsKd1sIlEdX7SFz0SoXDdqZf5HwtA0UxtOZA03Gy4p0o0mWql0TOZ3SmK4dfE7nnVeuTubIFG4lUC3H7juVz/7LB9ueEKqWf2z+2lwueJnj9y+/02I3fzMBjeLe978soBhbgWp7vceKchlQfcHpSu5kJz5kbqrMq/7E70CJQt1f46/ezGffgkmUQru6CqZnRzourRQkGAG6R5Tct382xT/GfA65P0k3h28flym/9+6gVbanfJ5U1ikOydmLi1kt+9eWBAqr9JJIFjNJovBRnGwdP0YU1kE/GoF1cfHmPR0i7Jg7J9Q3v1IdI9aKFZ57J/qsymuV3enwvXXcRJm+owJd107tIslLv9JC4EO1FLLeKfuZun3a3zoV3KavbVcvRxM8m7tQBpww6QJTCcfKzs2y8C8t9GfICM7BkqFxKThcrlafmFWRf0Scx2yd6qRQb8YOb4xpah+54no6+Msydu86VmGtPMKZTOfxyYIaOXViAEizqebLfEjtKju+A4fpq+PCTFSqpsTchQLX5OXiAhP7sCDuMtTAXgQBU94wpN7y1M9JhU0grTOZiV81ejyIGviN6TSSVblvQsY5AoQyAmayRWHON3FtPCQf8SHxSkBRMJSEVYQflIM8HYRsu5b4H9EbFVdU2VCnhV+qjIVX1Mcb2zskS1sS/xU8auwDq7ydbxvwmKXcT8Nx4Q5VJqKy/Xy366kvLBKqPUCBxPm6PDaPK3TTJMxKZhqQcTwyoVZtNFfa0hM+YZt8whJ7VIy2xMa7bFSQ33+LudFM7MFW7HlpoKXcbA/tqhFNvNnQwwz8gq2KyKopBIqLRUgx71hTOFLg3O1AwRAjM69zpmnp/ZJdO7ZrITGtPHbp1NKgl4xMz4bKCrox81rkBVFIqPWyuTWcUB9r54tJgiJ4Y6oXnTEZJAP993q7/KBSZv6spJOKYo5gdxZs3OqkAxk1Rq0000fo6ME9dh9tZERYsUL5A11LeVxO3yssvu3lheuNX1lEuBjLYzw5I9DOm1cZC1gnbqKFDGb2E88Tods3zrrWPxx3wTY7HgAhrbmIqIRxVSWewEcIzwHLv1C7C0JwSp2q+6tWip/mcgUSj3KfQ+WSoIN8oNwUL6rtFOLYsdrdOM76+LeTg99DSTVghdXES+b7vXq0FF8HvsSeYWDUo6kJmZeQtpI7PQ3qIejgRIFWc8BoI8lRiZK6wLprMEj1sm4iul/h8d0UCk9aegFi/FxXEk3k3mU9ta6a9iLqkTndFM7+sEl1Xz/b9bZPawiQ5N+wqSz957kzn3sshVzPwodWbXif9ScfKMSPEC2EexRFB79IcLlqrB9eLOLkWjA1XOU0wlx6Na59egqr6CSJsdCxdhcdkGXPqTINhBSJM89nTWVNay5LQXTvbXbbX5PcMBu6FCLtx/gKEG5IWMmKqqoGT1ThiktosSNxXpTzLmmUJrqJXjKhRMjoGh9O5Kq1dgSVJWh8IwlNlLfQqi3TfcvR/aPaNMKTsBFavmDfjOkv1yTfD8Y3faje0rJg13DyrDUuSxLQTTrVE5MqET1vVJRkU3kw/RYbib74tvqEtLLK+VkxYciEg6u1HI9MLbxVM8MU5hAJCER0C2WcFY28rlrdRP1fsl4jC53Xx0VcoQrgdFZM4T3fsvmMXtm/KhXKahvYAvFCqvcmHgB74sxZRz3jF9PCKAgkgKWeqHWTyw5Yh+CNX4Xe54JKz9u+GMDIaOOBKz6gqqE0yjsain3//7pE7OTQr5xI9YNvdupHDSEjDpnothuDBb1OzgK3bkK7P2tmMzghcFB1PaIkUZcsPdboZyzwJ8NeKvEwi7baOX96IH+qQ0RT+WpzCHBfT3ZRrzPuM+QTRujrm2HhPYTmpJQUVbekb9TvhoxzH1VtHbsmDUh8vYQ45eieher4VhG6ijZWsDNVStPICrJPcyW93IOIfJ6jG6WO6Cy6lPFlnIYOoHC6d6Qvdv38sa1a0ZYBnUd302I8Ch8loI9ym8n2bj3ETCfgVsYiXsEB3eCufSF6wo7z8zNupU99FF49dQ/crU4yw1y3IejxVDl6vGzTKDT2totD4i2vmbwAvAMKSDgKIuuo9vNmSPE7WsfYyT85sr2zAG3lWPMT3ActOhbo401e1GRHS54ZgOyv6gaUM/nfq6ooV1Xv48wDUKd2sVPsPIHoo2bO15GqjabNc9vJtjs2ee/KdFqfL9x7XgFNBxn03W14w7AZlxp4WF+zv0hfgJ1aJUAVvLEhJvrpTOAnY1g0NdTFcuBQN3rtHLymzUfAWKSU6+oq/2b8m3/GIfEIoLz5Z2gouQoQaRC0HzhWp5uYzVXfyUXiqmUL/BeUx3EFZ0YpFReagYwQK9ujKpRpw+5/D4zR9YrDGdAcsREfim5TiSN632AnoyYn3GXyrdaEkWLqv3bgOMzdfbRqVJD/YOb1biYRsaewhWoRq1cYlq0adNp24PEZbcuSVo8N/SfZLmKTmO693Ujszo00zpSposnHV3WPxor7dFbFbVWTXlycic6kPs/CTsNzUfYOgopCzXajqHMFTEkSlQ/+Yxgzrvv60H8bh336Wyy+lSrInPu/7DbPFFGRtVRFX9UUG05YDCynJl9W5i3RRgQGU+v2UbJo2jNYiQa6+qRwmu0t7m1/pPsOnP3g3T3D7m7O1jVoWWgyVqp/CEwE+fpQah8YNGXsOq/ZMcZidf9VmaGqdd+FrU182iCs/5MRKW0jVl3jLDriATLTayZwuW8l0qNF4aEVuhOG4KwSuI5bkLkOczz25iEb3cojF8uS98Nnj0yKLBvHTq31DoVNyH5v/BT19/LzECLXBxg5dlJ8oPSoBXlyhEP+JoLl+xrcyxrcoRZSSvUfPvkLNkl28vub3pSfGQEHqAbrpzTSY2Ib/PJEoO1h31ky8gCCSp4NBU+AWJjy8axKo0ZeNeVvbifQ/Dz6gew/vRtdmjvV2i/UYSpYpM4VTA+6ZGzD7gkbQf6Ou+6YxYGXSQ6Ksyqej6l+x7gtwFR8vzcYQUsoKZuyNHdAim3XwPSGZ0jUOF2uphL1Wf12G4b8GyHkJVSd22Y5GPJ9SC+lD9djv4HF2BAyxV+QhodniPWYsmhLuZRAkKRoc8K226Xr1hoF48UPfOUoJ4EwR7Sg/6tN7PWsBv9Zoi+ZwLJ1yRhRl18/DwwZcoUIf5Vx5uceKkNDz8dJQ8+uiIhxnkWGuaVEJIWQfxMm9F5Ro+sUX5bSCwehcNAwJrSJ3XkyBZFE/b8YuZx6C4i+yfbgnBJ3NIPn0GRrUZp2PesdVpTguWPRfz1A9o0Nm5bD41LuXV8NwphE3IQxPFJcCKKyo4fy1gLNt6UpQkflOSAG6+6p9YxaRoPZCZ6zizNTkxzt9a9da/e2H9sbSxnhPSFopg0Xzlp4K1fdK/A4ZXmhyhPCxbb+JZiYJq7yM4ZV3ZNS+l6sDHxZ6lFuw1Eo7fdiZHy+0ktD3Bd5J6grPcSqsz/juFmz84x3deaCEXjTa6qwfEyHpRP9qWcUwj2O4fy1M51HpHLQPTLLi7a+f61xr4AtWP9rJ5JMw++nj66VO72DHsvbLs9xd9X5Jw1Op+SMl1UBqOch6WbxzdpSMpi29cfn0RjwHYkBglYROttJ13sjSvm7kzCg/Uq+mogajZPU4jLcdWVra9gQpCBkaqNqAJsem/dJmB+jQjY5Lboeaw7TAf4mTNTGunSzs0UyvbjlhUgdCuGv+gvbh0LC1tG/qpeYWXv3wUUYpRvcSz6DNzNQsajtFt7ew8rGybU4E5B6s1jOpRMN992Pi2w4fq2W85XWmw4CAyEegUn2ZLuUT/u2Mr5/3IjxqPXY/xlu2ZVnfoqP4pKGPF2dNcdOBYUeM8D9271UZuwcsYjjfA/iEu1e/crPXTq8a8hi4da+2jfg9DPAQsfIBxlZVa9EXzmYQ46/fKmeKuI84oGJ70w6qUv0InQqp+eKxo+PEcadyK5Yjo3k+aE0vrUC5j1z239cDnSdskKxZiHGttLUG2k3pXaG8YG/AvN3Z7XITgK9aU77cqfIcKcj9pwAOPQUuQyTU4SfvotLYcyg+4T9g+NY9xgkqah0QVW0IbXov7XPcKhZ7tA7wsUYHjqhbig2c14PMbz8JPX3Tu7f8xE4c2fssNky22yW5qlagLZXAdgtxpg39wWCjNHUo+8PeEp90tOBvtOc24Jj6rfolcQureKnES+OF+c5RZHKvLiGXHMLns73LF+68Lo3U8W3/2uO1CZbhCaEl/y2mUfGZSs1DK2vBXy1zuGflzeX9znUopKxrjseGChfh9D3lN8+zqRNCXN3oLPkUssD8rFfyTOf3j7cLFAz5LRm570z949i+iCdbg8dcXdgMTXuh+Ry/7dr74aOECh5J4g3t4Ulqy5FqPAEb5w2F2eogJ0xh4KiUjJ/lYZPulPXAzNte6vZK9Ixm4TVG4WI+RckyMaWeiAMkKDk0WAyyHDZ4lJDDzy81bG06hFjmGgc8MUZh3/qSsMY9ZjvcnMBHcx0xyfSbsONcfF7JHbTaVCIj+yxzqdJjZKez3tjBxYu9Cpv+3UyHpBmrfyvbhRJV/B4tg9I5HlYFdRY1VodPFQD72kS49F+14dLl3AHr1APGlEkYeNWOzTwT5gkimSSL/eh7N0E46dZPi3cPRAA8KXB9O8vl7pFdxGZBIIloMPxUeQlnJjn8/MO6fx5ySHXKY86SWzJVXOMSNBNOrQVmxiB97dLROPscz3hBYuo2o2gfwrv7dvY9DMm1SDNNmeZ007GPUB2STVZv7SwMKT2qJ6dpd1PwkGYKnLEOHbhLEMsNM67blQDliTnytE98NT7/qXCFPo99BwxAVM8LNNMFPAVWZqqZYXydX+P7PlC+pDbHk+rCb76VsUk6WzwHpHOw4UZZ4lk54/mlJeOxOUs9v5d9ELJWVyr1YXqOKsfVsWyTn9s+keZZyzkKA6mefCDcMywdcXGDClaF7NFiM8MHKcGlTSp2KwKS4usAEECkf2cyyCofMZw8tNDw6VDqyrLceHW6UoVKHaG4ZuBVrCCk9LdwOmKzGPfMlvzAQ6LTguAHCMxFb1iZN1H6Av+TJ5O0ebJGRwQ3xHlhUgry2lcRvV1YCYG+F5LF4xwkepnxDV0rsP3BQcIp0FUIn325bP6yqKK7E4iaEtCx4z2hXtQpcIx0Qu+g2cVQ1XSsBqydiURR+1eqHlKtMUVvUm6RKcgdkYNaGlS+mRnRZCdtGGUMt/u3wsDoKx4NdULXyEUyY482tHSaxP+fVjGxIADGD+Ce8d10kI63xwBiViZH4k4f3N5V/2fPo25/JVsqqIkNnp4rCN3Sejic5CZ0lVf1M/OWPsIKfvIhk2G3qNHSGSJigqQmXIS0LF+zgFKNdDEIlDgLBPl/zZbo8aMWIgShWZQ9SCFagoPGHovGZ5dk8XiihdCDreYz9jeuipQML66sKJUMASVfjgblQZLFbnqIgpqx9/YBz8GR/A0ZhHFYcYR4ln3kpwKlpYISP2O1nmhE47FnoaB7ReRnk3hBDPqIbmxbEvchjTCxgOIBYfIvC8/h14I54F1+Apu9tVo8nsCsIOWz1kcsLXtGeOszbdJaGVW1qirIHi0zLcw0I5MNYKlrsmc7BekgOh2wV1nbJxQoYWTOrLEfpi3uzSDO3jgk76FvUQE4zTfQDHIxhjCuEd0Wdx3lIZ0yOkGu7EgW2VZ5OvGL+wjRZsI90nx/sdcaJ96zL9zX2BVHvDnb8cEzdSUoo3wegOe7bBRwldxQeQ88+PKIWf1cIzGU39YCHNYce1x6SKD/VZDWPb701SofTfP+DFCuM85x0Zad4hqK1gmETOs0k8vz0LyFlVHSiAYuCEjfINJjNNYemtoGQVkR9esSmZOXbsWRlRKwud9mg3Y9yKwb8k+FimwDSWnKZOVHsi470ml5tP+sWbLTTThlgE6OdcQInQb9HiQso6KP7wa/BHa0SNszUwxVw383MEfp3+AS+cItkOGZy6e7w/iKL8ZqEScYGs16Lj3aKI+gV4Ak8kc27q44DqtYEd/10063sOZkUfU70tdxJv3BRrxUzd1J2dvptW5k7oTfL9mbVpfu1Zv4mmw0A3/pTgtEX+JOG00lrzho+bQKeYZdwSh7BDXc7GyggjFN1KnIu0Hib91rUgmaj6z6Llbp5n5/szgVsxp//4WwUQL7ObaPUa/7buX7PWP/UmPiupUYuvXWNGw/1pbj+sTxjgbDHJPX6P1gu3/Eiafq2/Xn2/ZiO/bvMeKBu1/JrKUdevyU819vG40twR2A5GZN9YbDP8blq8QB3qGvz4JMnrFdYjhcdcPZlVUVc5gpTM5HKiqQQW510rlxlnoepJ2588V0kUy9Y2aR7ZgvbrfiMoe3Sbe6bhkhNkk7ut4Y/N86BXaSc7XDHDLsQY1K/L1/+h8hoVg9FpPRlu/4hIcLSZScO/zncef/BJzK8ceSfPWbyFYqkwaec295ivY1rkSSXofpe+w4yQROLwGDAUrIwXhRylMce5ocjEyT/WAh3V2CYi+mMJqKfY65euZlf8hezpfYG3CnXJ35dP3E3O1My81+n3rRIsMDn/gTryU+zoL7LHBbmAhEZ1+0f9p379sRmVPTsymZhwuUjSV73H3O4NCJ15P/c+cIkEwfMghE6QEzWXD+ysW1vwLBtYVWBTHeK2jx3QRCQd+FAEB/zlTZxr/36nW7CfYTbIXFLbuB2tnRP4vL4MdnFLGMpRTCr6yScod9SwO7/Sd4yFrVsK03iOLcLWzdk22pBp8qABSr00jEBls2o5G3BMj6F5Xb/UODlQT2htaMhYrRI32g+vvd1bgu2mvw4ZDapNBc2mzeU6GbgpTvqb3BrjXZ9F9qzxfs0sbd3BAR8qtgaFQ1UJsYSgrY1fTUEyyUbClnANfsNzI6E1+/REbzeVOFviyvUiLW/agrSctTwYDfnYriUiG8ibki3zWoSqIQLz6Duok2fPSi2jReWg+PXvJE1FiyVrspuVLTN1N3nhwgSRInDFJ8VmWvMGim9H59o/flw5CWAjLKrkDIkEovduM35N8mdH6Luym8sJQRUH8hcNcwvnFdE8ZHZtcnglUJY+kAU0CNjnA/n5eLCm39vudK9FFQlXO97mLtR82X/6fsVWKLKRsjx81tLwrbJ2llyGQwaoDnR/QDCtI3LYA8ag0qCDrqOSaigTKTWPU0m7v3tb1OaNiJm3GSrCHGk7jXIszlxa/4YUr6Ag/HiE8M89LfC3pbeHGonNKNGoOX3qxcHOyVq9CypKvfOcPittdGerPNuAR6KlI63rsS10OEmU1WrFgDlNJbhL5tTHTv3joXXV8CGSYltG9owDvX6oiilxaCaCMj8QfYfagMNS/9adGKfSQGW2hYIAcwTibKKSZZx+DF8sEhvgvTUiXGwYC2+MX8+dzXkQbrwjzb3UfduwnwrCczWqxtj/rJYKN3MVuyqNrcG6f6mQ0fJZHQFjQaGYTEKAUnm2ILPDbk1wDQIDkkagTskUKJgKrJfxgAXZbW4q1jgItkmTh2z8s2fK6iDy9vA5+96ucFTIASLBqDxPsKRHytJf+riQilR/S2WzPcoijtt4slgyaAsRDMMm+9pxqx4I+GaqeoRfnK+/Z7oLNtqShEVnKPTlTAX9thbUIjxYrsFlQzIhiXPQAWlRPLVKw5QMB+aSYY9Qt3Gdk0gpi5RwvB64f7wa7gXH56GaCl1Mok12vXZqAF87bpCZU9xW3ABRhbTN+Q2WY/Sxm/cFY7VO+8ONFUlBtQaxXOfazGZust0c6YqTH598QKErkI0K5CmHuEFhFMui/C7+8KKCWQ/FDAC0v6poSaNjVfA9fygLFErBZ02BTmztRlRbcTNSX1TEwhXVfkRBHvpUFffwkwNqxKgCxCpKLJDGRZ8wU4tf9edy9iO0+Mbm5uaoBJR/phs5Woi41YsO7Ib3LlbRhbs1SrJP2PQUJtUs2w2E2Q4vp0cQkwSVlkKlZegY1e+w1huiBHsDcR+ds6GhGWamglLJkNF+9Vg6nD5uwoR76wsojswi/H523ulSwQFFR8vfPixZ7Vl1HVsesYJyiaacW0Qj5SdVDK2n7XbeHK6mS0STSe2hHFl5Skvf97Z5wI3N8Fs1KPqH7mWrfZLa5kDUELxHFkGjXjObzIPTotPAxFhdXvmNdEsZTh/aEjEnMDpCM4mKh0WRtkJEJnsmIMblRuH3uBi4MPEsN2Gh+SmHERAYEMJ7iB06HfFguWbubVy7HSXPWh4mXUXs59IV8g+DZ2kCL0XU/G3rne4+g3kKKiHkBL6fPPGtEIr7zl38Z9MVfzwnTgCF6OlgrPBJtL3/+4edt2/LBe2LuOAtFWc/KYoYtj8EWy5IoXG+6Ylo98dYg4qKgF9JEgIRw56SuJCLaOdH3vgLSowUqdv7tlsHDtQEYVZcsr2PSIQ5tIBTcY63gXwkLvtKXq+EJ1yZsAWeSogspR8AeikARyC4QZi51sjApmQDHouwTMOGHgkRj1v7AYgYAY57Art/wwrAhagq4fgIjgoLZ2YDNZLQu9m7mf27eZ63wdY+O0dW7dF0YbcDNVJY9zYilaw+sbrujDX/6EpTnMOmXyAx1aqo60v8fu7nW9/dU1n18r/js8jxY8UeToiEHtkt/UQ0TLkUUI2MqDHFgZ22lKQr+YOOLiA69LEtz8sAlQUSuLGCTCI2BhOhYjmYUDtZDkg0mrNx0iaO3T1FTrQUd7BCK8sSdAwfENM2vBuQjhr9+pCFk9HoqdOqtDd32jrF2ToQ+NoWbrI89WZlrisMoHSEtzsCRtGJNOCihDOjeUWkE0fFAjTJVEVvcZ02NXIRA4S9OObAA5uppqMVAglNeZBgnhkiHrsHI7uxALrzDnYZN0DfMcjltg0wsZ9TRhmr4mUGNslF0BkBEDW7JA2zbf0KxqkMFfUub05PR8kIkau4gOnPaZeuiQi64qKReG4GesQRUsb+COhHNiDiomHi7f26JGBE/vRB4+BUGIkeuG8+jRnrQ42dGCWmYWdjyv1NAnghy9N0ZX1YR/aaU5AVQfDst6das4bRK33zHXIwONfgbnbhGY8wa67CbLn4UEllSOhNvA2K9sUsHGFhH6m3NYnto4BbLrVd9MSNCiPXeEcc0lKn415NNqHxHoiFTHORCxFdzFjXPlyOUGs481rr0smZiIaJJP22nnT99jHEV7cl9dQJJ+ekUFloJJ2atqGn+/ZT5Mm2h/FAnpxaypMDYEUCxw9mkDCksnZjRlyISQAs7qhBUHMocPrwTqES5O7THbNRQY0kpbL3pHmt32MVU8VG6FNs0qz/AzwkzO9vS+6DDedaZ6wM1uzLnzD7WCoWbRVDgo7sL9vHD90O+7qzqRpaL3CjQl3BD+fMS9OtlnflWLYJ2cTxYyxJhA1Y9qN+Ll3Uw0KsV02YTq98kgig0QihrZBRO3rr5bIBpfM+LTGw6Z2O2iD8S6TAfKQuyFwqHdjByW3Bbappo9A9tVqKwQHH/L3N4SzVFBppirCxy5J5Tf/0Xt/mWatxB0cmz1Tpdj4vQXd9AN3jn1s7jIQKt9b1rNgIKT2jXSprKcmeOW3CBFd2PeUNpyIJhU/mZBUMSMeJuMl6wYVodqXZzRm0l0iFJnOCCz8kLFzBsznZL7qotP0CURcJ1yl3iUocGCsKbrjYR+v+WQN7HM7G+9JO+a8FnZ8Rg/H6DIv3BqlGAyYvkJcgYd64aM0iiQ2aeYsTU0oE35VlbFu8+9U8KCS5fHMaxujqI2rnedaPDxWjWzs/S8Z7d0hVSf+fACprxQhGTE7CNRAPbKuBfEwvjyug3D0MFpb4a5YB1ngvm3OCwPqJyksi/6Wkg+qkxfknFUpJoq2a+g5HJ4G5UdYfGnw/n8CYaO2IGfAYTpv8kFIFA2MJEMJ5+NdcfuIUGSUajx7VWTgDXmrsTaRrmzjGP19j6ro8upWx61xh3KT+g0oGWKAKo7FpiTNi88jtiE1DBcUt9jCrr1sfCXpra3TwLLFm1Qjd1PBcHUAMFeAHzp/lzfU2iMAhAAZIQ2D4QbCbvgda1Kq1F8VzEOyd6qii4/iASzts6tGtrQKlLIIwTiLtHUM6jBt32BhmvhHj8Nj5V1Z2kuroVpDS3c8LnorukpnpANjh61Gh11bdW/P8enthlspbIhf+HocXzY2UIYhiQAUKYD05y5eQBx8j46FMOBLQvdRjG47QxhEjwim9Ewf03kYBm8b8iDCu/8AEyO50z0yD0o6q37bm6s55v+qZeJyYuO+6/UYWwhW1tKC5HBrHqBzcbVKknMe6u6BCzhy+v7N2Qzx9bws8P3A7W48n99jnp2u3K3aFzxwn3tZ6SQoPjNOlQJA+vzJ810dCGZFTf2/kg2ymDTtrtYlxH9n1vf7/QAvtdG+/wApKuV3lW6FEXYpytsV3WaOkeyBzDoYrp3hAyvThKiEQd2SPc/cRbDvEzwx2NeWxxBQX6h2M2ozodEYaq6ykoPgRD9us2EgBeYMbRXe/YoFIUorvDSpyKn2vp+U5V5Fe1BBg1GWUaSqMACf9RAqaZFh4+kPys7Tq8BJwWYK4Kv84+pKVGeKungc/nRJQK/EUyBv6B2qaSbcFm4xgjQI08frUOZNO2RZu4ZAPwRyym7kYtGc1ZHdIormtUvosJgSplJhjhD4gU1B2VStboI3CWVLfMDlg1HjKevxN7hFUotcI8zcUE52iku5uPXrFyXYjbNdhbeQCU2YbedesGhAcBLXRWpoTFn9fEFyOXryxc8pXhADL58/viX8BL7tplWOQYnTnHrwD3thPhBSgfcwXjwaxVO0YZ91G95GCJeCvIHJimFj4Z5ZlaekhYVMTKW/+JI1GZ+QgZhtrREaL18eBsidV0nuLppBUYRf3rjjZVhuGs0RKnjAnyHitAmrMBmqDU6NBm/OrAf9QrumttpdCuAykzwaR7Kwfh61aJ5cmrGx+ZNht0BHk5gONloegJUtNsVKqGeV0ntEDKnCZD+uJ1Wktr7Yx+8tXlR3ot0vDAhj8K44DA6Ql+MN7RrbnscEOtO7ZUeH/uJCcRbkBEbeEUk3nQcZ2FkYKYiOU8/uWSNwSkMdQ1Ogfptj6NX5UGCbdV6qHLFqikJV4TijywFQDmTxD9zzu5IxhiGdaiU6fxHgcOL2Tnf6VbHnrOK5VtAut/7S527swc8Y3Lc1/sRUGtOMfT4aNm6K3dR3GsD3uiFHnB0shOeWgATNN322GJxht1AJE3PU13OlxzKmCxQam/pksgQ73xONVrdnJulwb0Jyr46uJTVs4i6VU1WpBg3HZ71BgkdXpHVgBSPTWu5+Oi53A3Kjk2mm44zj3qAzI43INLJ+z1miKp3SAk8OvnKm4qkQnicEVqa3eTpKHiZi6zahm5Ntei6rXDwJF3aTwLsSdRPHYrg9T3DAVdbdPjUFRkMXZOOvp2cssGzvwHyjsTRiE+fbsyKhYo96wO4I21Gu4XyhBmVOh5slgqhEUP6lZoyZyeUNsrMYlMEGI/0ixpBPnqIF4IZsLQxpAdLNK6Jc6QdrfEWZmm1OLZ/2Kojc6EoyRT2I/hhgkI/uNtvQof4WPpGoUgW3dlSvkWom6NgqYaDDUDENi/yJpej8m8pNjfUQa5kNdymVzjVub7SE660TKie1gR5MHU/ksfcxuDxn76DkUtOYddddF6vULWMYeYkKZToTUHjdkMo1/SNxG6CQtPRXRmRHa2sCPozpYGLVpzhE3NKvQxnw8o4+BrEZPPz+SkMnbkVYcLxmh6H/jVDZhavvi7+9is+y/UQtV+7VdOP1Zt7SMy+LI9tz0zQqhjWTw7ZoY4tJOZ2AhIXK9xDwjM1AVveE8mIlbwJqiuImgN6HIgBsDUGPVMWOwMmUMUgrUZZiaDOxFEtfkEZp3ZWooppXoE/20PAkyI4KN03uYA19o1TCjsKbu3g78YGqwD0xG8Jw9W2UKDf5YxxxbiHrL7QhiuRIgvI/dF/jrQD+jCgcMVypq0raj2E7z64pUEre1h4oAjsstqI+SkDYyH0UFdhvrFewRqt3yc63yCY7ywZ+Jt95oPPck32qis+vaRoW6gpMiFSuGVqjUsTFuBUgbEMniRHScH23+F6+psWMnpW/yZrBZM59s8j69+8m7PkBt/aeO2bmbaS49+x2brJndkbJezZbXVcUJFkb7KaXfLvfzgYS+iB/8jxZzG9Py53MLSP5LMWgVjN8lfnQOo08LY/1+ozYOmED+c/mNaqePwLwAJxKEr5MqPoc8ZBq9Fugg4eOf5McdI1O3StA/vrlQ+ie+s2FRfo+KAqmgRcVKj/ftGG1muY/2kbizHazdOXxWTW+ZcKMESCHDMGLA3ROFCsjxVsNFiPQ8oZPZM3qG6fgw2rqH1e/eL0TKKk3mxRTLm2HFYrP4xt2vD9uMNXSypiRj1Oattfk530LXx1nk5hU2zlMSdmqz87tUGXsJD7gN+yBvuT5x0Gvej1XTTr0X1jz/FFJjOuLH2aI8/6SPA550fSGo5MifMlFloU58ckufPXLY4d0u5LtSlzfnP8NmmbRck53JQnFZFhg/qpNu6IgNcMiqdhhsFDasFbvm+gLNg/SSWmySSx6QJ6gWDqox7gBIqf5pks8nEP5UTkTEGHOGoG3fyj1tL+sOE7yzgA02NKzYLjqMIE4nvJCDcoRnBdwAfaJvTZUPDN9kA4h3zoyaObchIqiCXRHWKmUc23rPNjCyjgee9KrlEmpCyn/5RkKW/0AEYzuWhzykzPRtI/ozLiexL7XSiAQNxTg3IAi1LhTznDFfs8fLdZFyXXx3zVazxNBCZusrabVI8/OPe/noSOlSNJs/O8qeE+oBwWYg7LGxufKMI7H4t7VyX/gMPhuQzZfszf++iP/yChUJs4i/i64j+cAMwo2+WI9I0APyHsfkdEaV+Dc86fk6kktrHFbVh/SqjHo5bIU9svp+BCWJvGRi0//hZbLkpzCR7gZuCXERvPVGjtiCZzvgig080eTkJhv5qjOKY/6sqTHpPPhr0gRW13Zgxv8goed9QLxFs80n4X/f0SfeFfcDbcxWx+LBSqRtLOVU4qahf3rjSDiC3iluqLDQg/JHQtUDWnCM3N+I+6h+24Zn4E5p6ISaT9SqPqN/1Kc4fx2Br6vhifWs6Xh9qYUOPQfrONoqVXcIryE51I3liFY3oTIn8HPNSplyugksHjGkGIk/4BANjwVDYRDYj5atmxwtIZ/YvN1yq4Mcld2v3JwuMQwyXg3IKjNQIsnaDkfNwgkPm9KG44IpY7iltY7DuQSjAUqKoJ/KfAhh65VEfRCuQiniqrBmzSFyVSYfJsHWiucO1Wv6L6r0/Wi2DtkfDZjGW4l1aVj1qtT3j6TKlJW9OKlKqvB/kUQoRq5/dJ69xJ47yBtir1K1s/Q2ChwMRV9oNjp+QZVcWMoag5V+ohAgx8yvE7wQEQ8P9G8QZQ5DaeMaga092Ff7mnZckSl5kwTMMjke6rMNRVk8uEU08GNxj6DQ0PZE8i6/itwmVP2AiOotvKyti1by4kGnchCRf4yarhUnCvvo35iSfVzA8gcspPhlsPFBvdniOjYK47stLS1HsbpYKNVfuu6FMdHtJukVtdQS/eMgctREHt/lYG+I9cjfLZX9XnFb8FaeKUaVYzwO2qJ7MM3+OD1gtUQTCZ6TWOzshxT/g/+EdLO/XGQPhV6hRpe3IBN5aLf/zDfy/WXZtiPlydfCXkJESi9IhXt5Ji7dCktnMFbfDCM98IUFmLlCyrpJbEhlwM8kc2vMLlxn+r9e7hU5zZQqMXZwA0Haew8QFUrp7r06atGRSW1vVsaDVdMVMIsd5Xl4UGFj3pQa1x+y9LaNaXP9pLr2a7FLn7unMOB+tLVIMu9GndZtnTR7ZboyyYfqAwxFQTyOMH9EmbC4UCq+ZE2ZkEZmgkNMSKdYk1qv5m66Yp0yW1KTSADD7dBWWcZgAr5NphpTBb5I2Vd2N/Cpsa9rf1m0/3oa2K4Pnjogiy1Ypehjd2eKLNYW+OyZc3dVpV6d7N7/vuJ/cXSXClXKEbXNubtYhqlQRMXpTEVETgeLZfDx7x5SzdmRJDDF7bBHZytF+hZzt5dzLNSPAWBTkB1dOWlw0a3gwimA6JsBSBdLqvfn9/n6enWMRV8n4gLI+dmqaEKktn9241XGExVxvWtqbbMnR5fjRycy3G0h1OWdlmG4mktxN15C7gCgKDCb3TlVUuPDFsqzo9QjminRbc5IP7nqmDI75gKmS5LX50Nu0TdwpwQtQJ8OBDfB3ftwDiMMyw9BJCfCJkDhXE1ELLCZMEcnggxnBCGAYDKVi1Njk1gphAKpG+r9nQWwQCNNeCD9DdnOVGCbvdsmkD9H//zWx950G3q7YZ9t9jD78BZoS/Ii8ae3n6126ZZiaRMeqTMWbg7XEOLMKKN2f1hYY03KRUj7g1upeBNNWOyjmbNDqbtqsSwMaRdONpbFPfzhUlFLiu/46Pj/5upv2uP73ezV4Bxylf01bu7LQSHlwXqOZlpTrag0FyWi+Dfc52HCQcxqmJ3XQr2epAogxwAG7+9aVw8fPQv+xavS+q5bEGpSGeWpY6m/QwY0A/uXQYufcscEGuTPnm1BIhuXG1bZkGh7AItSMrAjVD6U6VCjF9egl5yct13St/gDzZ9jJmxWm8pNGw+G5hpRSSoOCm4u1nDaEI6z1mgRmi+yvWzrLIlItd1Re05loQw4LD8Duj1ECRAaxxVnpc34pq/4EBmfautEVRtpQQcJEnQl4XIp1Ijn4PU5p9NEtK4R3HnW7yZ3bEOJfZwrc6Sg/62W/JDm970QqjsiAQTVtEPPasQh9wpgVy/0um+j5NCj7vkJIc63W+9gXzVekvIBtrzjVSW8w4Tjhoi7s2FU+d6idl00qlfgNYpwxevyYD3eT52rzGemuY++pBoaaTdCgI88IPBr5DMmoghohFpEEPJNP0fYx4uQNS5CRHd7gS/FihEWhYU+RCYiYK0ZVAktklB86Ed8dPWIrvRnRHbgo3eCCBEvP2IPRSDaGbm3yJIPNEJ1SOV0ayOxdo6tCLucvUUUzNFq1gQKO3DxvDCUtSG3TtduCJoAFIBQi/BF3x0sjE0mqMpjfpvn16l+18je0Qb/YgFXcRZP1WC17jbes1A8HsnsDyQsUebk4KZzxtzmA6WvauNaDvy2bmPh3RH4nu19r61ece9UCIZG1MEmZN5Hw+BQ4eMnN9seWC6cJS2fzgc1wK/qsM+IWvoNfA1hLA8VxD2/0Nz1kDPcIZ8reTAWtP6+njSeMAw3rvAJgx8TRDZYI5EOHuKL6KLamWBMyvjCAZSjgdKeNNMsJ3GF4bJgXzoQY2pcUmEMgbBvELg7Sodw80+DdR+S6CySf2wdEJsrAhtSXlo9ARjr5gPQ27iDa7dpmMDVBJ6QCcRxza0FTy0jQrFgc/++rTcPvnp2rtcCmlTnWwyYSHf4a9IICkdPvWBgycQBXkegj04L2o9LzkVpNpFRflkOFU4Ky834wg+ffU/a1Zd9Wpga8pOj3xf+tL+wqzFFnBeL439C3fiyh/Ymq/NBGkeu980aA4Yn3FQPZCRLca528LSmHDCIstaUwRJpEaMhjQEF7x2WHB0Immf9NP5M6ply4CJbx7Va7vQ6O9sOpcFCPfS15dWqnv09aYPXc2NNeNwRNGGbq1hQS88gsIxMm2v4jEevP/wQFlUTseF0w1Ut/YZ8ZAVbxfbrFGObYpr1Zj3LhZDCunL4oWNbyhZjwVkm1uVJ47UWVxcz1no8DpM6Ax6JAywdN8i6ChKb5kq9Mu5k2mBJmKgHV9PEKuJ4GXmRfV7GpmCEJWrwiG1tjHmkZOUSvBpgCtvZ8ERV5bW6Y0xq/RHenfzovcvftryYvE6B6q6lLmH6I5K0O413SMgVkkO4V73XLc9G83AFM0g9ReUZJrczl8J3m8F9X5IlFZ7A+LsP510gRP3GUtampI4ZX+dSn1xts9hpG78zuVHr3pCOgJMKqbeUDHlfRDEpIoIXHicjMGFYFrddeDJfCTzBSR7s9p1gCE8yTCE+dW13nVlMgpSdYACEAbjWUROqOzXttU03yvIlZ/i+aqCF/kcRmktJSBTeWvCMXv1xpLm5yNkkuGqmRsFyMQbfgjEzs/UoOgzYEjqmg64r0JToQAOeN12bcnSrV+nvBzzpJ5Mx8z+B7lUw7DzG4rdGX/x3Hlw23xeLe2G2l1U48Qkwuux32xN9AwbWSDOgVg2pb6I3f2O3z295Nla5YbWfTc7HXu9Rm78jx0dqPS8i+WVNTgqDc/JAzvNSH49FWSlkbj+lqX3x22grM4Ng5+s7kJmnsvEiTdb7ei7G69vy+DAhaexTtONWn1P6It9Vs9uONDeCVz61uuA9ura4nyipcPVDp5FRlSEPG4F54MSNUvsB5/64wZP09sfbKjCTTsHxN7Ef7IVZI057G3hgJM2ZsyXjJax0jPiH+HU0VUBc7HQ9eweUyrbM1RL4uBaf8vixj9d+N0l5SE6vbBpx1j+He0IBn+7auGun07LcdXOx/zYbchqZ2JNmw91VWIFqT7Oh0eSmTVOsqphnNyld0heMbpr8S3PbtawmacuGlOm+LKUHR6SYUx92Rz6fNGt1/zrO+I9Oh0w9R2ub9FyFHXQs7l3LwNVXcmRr67XblWDUDkODHR+caeK9Cz/C3FO1bcpJqZtsBKgYOyKXqEEc2BPmms1OuB4bI15YQPs0kwNrUxO3EjL13/iFp4+U6Id+AuOkfUZOE2N8QkEnPuqtqXaciJGus1X/gG35ar2aNimb1W7auG7cXZfXzWvT1XG05044zSkW6mgwZEaG5ZLdfiwJd4CDkN7Kt6vTRtO6fTXehqTv4h0hVldulNXPq2yPlyRcM1No/2iX/NUSKkru42VRJMt03ciWExy1Uwb7hDigfYeLNhvJRtUmOxu1co021e6ueDze3heFuYuihChkdnGyjsgdouBPbE4kXseGN8GUvGhFNou5AOKGNWw0boINYMFfPYK46hrEQbjnYm7pDZGFY0pwwLFIs8cDiuSEWN9XbzFGeh/81SKtvw4dDC7ZoOd2uGVGk3b5GLnkHUv1anbxvmGWAkMdF4v+b0Lwis3Q/k+dfcuZGvJV9j0n7ArvX8kJj9Uyz5P8vxwxruBHt2/ZnUZlf2OOVllOZiZudgqGluUQSzQ0PM7uUizDH0riopzMmcMRPjXrrhvWZu8mDvsjrlau1FPTkzBP5vuCijkIh7+AYl4wcoFOTUP7pCm/UPUArXq63D9/AhqCyk/yS0zMyuSk7ExBM6znpnHnK95JwWV3KL4SHDLrNy7CL1bRUcAiphmvPGP7I4q+2KND8A/lk5/dWzB51WRPbNJg3Abb0clI8Y+dc7hIIfv+mKxpmPHswzxbjEoS+npO8yxtYZvSFrFplalySxpxaGdItrHJ4DcvG+xQyHTrcdspBWsRH3v+QfKbXbDeyvo3fA88/dzxN1h62U3M+PJDJjJGYUf8VYvPpvmO8EpuE3nqtBJPpUzqagPlvXjc1ZNx14HTglV9olWxXCLlT+u4w1imk0MMEL+Dtc/9uJ5q8z7wPG92JMm/yBDVMXgofYtRm3Nijqp+Voe9Tlse+IsAPcKdOmpFbzc3cXMTJQltV6v4oWrjquEoMDdZ+B78eHu54a3jhjeOu7d8uCgt6jMc/yzK2OAiln7RXDpfbn9bgtsRnWayVwnb9k45S+dtnKfM3txsWfl5vEYPuv8wSf9hssHDRJyW7CsZO22z4tgI3Gw4/KLT4B89aNwfbjLs3W26lQ9O6dGsoq12RTbDWWztcuPLE161DGrLctCSo+Z8mJrj1IGTbuqmuXOyRPgSWpcRvncIB/fmjafbDSzOxGVpFIDWX5g3r9j9KXea8+QF9zN0eC9TWA2Jdy5xJ7XpkWHMJu/Pm/RrGiQWQGXp9MVyJwGbBHc/lN2EELz473A62PoJDceZWW0eq8pN8eCw1/RG3b27U08WWm12FW6AOoa/ilsWmxBsMZ5wZZR9D9QB4jED2Igbxugl2lEIkCjJFA0CL82DcOmRm+xZMYbQN1+zCs1qPhdcfZGX0xCF4E22E6lHsprIO6JdLkYoOpTkBP2smEebMlj2fnL5/BNaeO2r3cvqjXtlR3aYhPq0NYQuUZSqTQeTO8iuXMG/g7VW2NB5vQ4zeWL3o1f1GidYTayZrF7/MRP749ESjKFdLlkOwCAa0wacD3jzGypu1ILGRXMo9DLiRRWJVEgtsIaoH79kl1jViyp4zA2vdL16wRNC6XKxwJCl5WFo/BYeZi8CI0yngxa29ouFPdHrg1RQ7tzkgOmQ0ZSneDhd7ZHqNarrzFdgC5XRLMm1SZBq41dgKAnyKYmOsn930osMyq4f8ruO7O+V3yJ20CJbtDlI7vVnmJ0iv/4NwNvN7cpHHKBV891s7ouAP9JDpHtEj6nerPKpvHplNsU2N+9sr+ZTBUFRgz8v956DzsvBBTFVUivbJyuYNHRJC54XKs9qKp3FjFE91TncKjwdzBGtOInhdqk/rI/6rmkj3+BlgxdZ6npOPcbcuGgsOEtT7H1G5ADV3pPkWERoE8aIzUc1prpTf5JHiucYkJOIvCTJn+H6YGqySWYcjTtbPpCjS4KRd1KtJqzKlZyKj4RvDl/cItqfYJeH7QKhEhJ5yjXZK8Mnlbz/q8aAu1oNq349EgDuC2HJGsJg45eJ0jTKJSXy+5iU2aNEo+MSP8XuIH1cY0pMFA2Iz7CO203sL5eUJaxUo/v0PA7lNiWf18MIlPBdMwEGChpOraa2kdAhdyOF9v/Q3cLDmo/EkFE1mz5pIUchCg7GYAqdScMDa2laTBo2GPLyDaV6p4zJN7v2FkfqzVn72EQL7FGQSe0UQaA3PzLR4zs4W6z95QdqYvPpL4vsQX+iMou6dyqJyQ6cEyYAvaYrvD50BA/Xb9cTt1HIuYdSkbi39eewg7hshPuCkTeVsGDq0t3Zy2Y7kD/OZ1YHqVWIblhkzHAa1baqN5wR7E4L9tGAbgpBVZozkySugDJxBR4fbS8mBA0MrIdNbJbCuNb9BfwHM2BiBd6I7Kg3Ty1mcBYvYF+uaq/Km8BciGH6WQnBzZ4esxaLZfDY+k9uaJ7U0GoWF0hlgah9X5jUofjRuC9EgvLsGWSKLtxGwwHC7EAj90pJk+/fDa8ivEzlWwzNH64qzizPZWGfyKJBmRcn2YrOb/S1+zUrlQsEKTljGUmtRVxTbqYRmjaP7UyORt+fLic4WvI1E8vycjKaIfLyhQtgFn0nhy14xdolK+cu8jZWna/zkBmcXPoLrq8blK6ptIFfFS+fX3uXkGLeEAM4znlHB89idYtPV9VGavUr1fY4j5WQ53AMI45FuZa4v7Tr2JDJVC3vAkprGXTi3RV69y6Xx4/KWw2ctf7er3SIZPKI+UCnKobM0qYQDC/iyO1WZba5k4beAa+T+71SLahVPe6wPzNf4WxEPq4SUFNKzkU4cKrY6qNkPeLtpkUGSkZe+Db25UxqG3p6l0D3m7fNKqXk36+o+cHLCbGRPsuiur9C8bA7qAHcULOE3VNFm8vJQc8QGsZ0vJAQdhS93ujpsuVcJrs8cTa0JXXK9YrxEOKGr8PySBs9dNLHmIDRKG+m15U0Y1OVrofFK4JzFsClkS9eyqXFLMIUH5QQuWarzhjXeX45dhiHr8cCEwzheXYse/ZnY74Exij4NclR7OfQDoEWs20IMCAzdIY+bqp3sZelChaVLG5yFiu8aC4U6RQumWRSGtSegWYcrN54Eqa8a8azD0Ix+ayoTWbyepWXlpuXbYKaF5AAqt/N9uWb0Zy7TPr8uZthNVjh63bZCRMhAraN6ufthvZIpgjFDg0vmqreGKLd9YVOWeSj9cAks5pyGPNEMJaC2KuOecileboMfh0E0dlUnZ7yhRGhTBas8ctY8BAUwFyG6kdo94Eg28Qy9XQiKGO+tvuKAGxvBXSRotYCTydW7LMSOuzIzvS4S6SwdATkc4hOCqryfK9vZG5Jc0q4B7cMaJo3bU8E8FBmkjneWi0F4kc+Dji4NzsxAzzGoi8rFuHmDFMxQl3JbwC5GkVV4vVVaG9tSMQvhTiZZLM0lD+aKlXULxqkp9ovkzwRjl88AL4AZO1p9kNOfmnHKjdpk9yJg1hJFxRJvjHyhDDsqgvr15RIDh81LZ2RaTGZyn7z0Hs9mzn9S4svUEie7gVrmpPeDKadvLsU1nGsByPKib4bVDAsvoChgbe3S6sgccEYm9hezQ+ZUXstB4mIA/vai9tcrB6UiZru+ZNgnXhAzTx/mW+4fLWnWZTo67aNVUYZdea0eCfJT62pi0a2DYzLIsSTIgJrVXoYwiCgxlPdEkmTaNALqINQyqifg71an7GTbm2tM9uCS0QzIJj1mQ/EZmaLVgkRHn4FZ+WrKtmFkdoFETUTGMpt6LWjdxxSsR+7OuQgIN+GRxdRahdPBB0YNSwEWdnsPT7sa/W8Zdd0SxJqzCRXWtlnxHorzzO9KOWu9QsDcY+mtCcn3Kd+wcBoHCczsLq3FkdRG5jv9dVSCQLRjlQcWPeC50cHtqRJukmb68e9/ZRGKynJUSIuQyvwmQHDRQMhZoACW7JC6l87C/TbP22Zq6qL1veuR/n8jyAwgPahCupDEQegiisHYJAICuSMeYxPbOjT/7idDwf/cuN/wWkTPPYcuQw5Spvxdwoardw+ZGdrrpWu8X1cRFV8L5crftUkr9EiMyb8D8BTd88aztIpd5qMpxmEywRvSwWUQjkQnBe5Nx1Lbv/Rhya4950pSLL0UKHfcD304FYjj51D3z2I1aTD38oPC7N6DGbVQ08b3FuBBWySIDAU1dBGlgRTU+ZmCX/a3ODaByqwoM7qhE/tIqo5FZPWwO4wreO7T5NDGbXlsKgUtGxwK6qgduN9NcX1zKSwYuUVa42+WB+h9vg3ULekFy+0NjQp6M4/X1Q76KVsB9Vl3xbq4r1L68n69XD5jTi/N44G9GZw4z82Ce62PERKhI6sVy7hURtfVstPAOf3UkigJLgeGLa0SMVzKa0nztJ5fBOnojLKx2q/4wuiF+xH3Z1uvzOc+EIKDczdeS7qRxEl6+VbHFRe6GZQdfa+u8LOBHRYHbSV9USDc4yj49zg7cgwCH8rSYQchsJwkLl3H/Xg9uOPtLbUSG9r9+Tk5OuJBr1x/EUwkmk27G29S2gRo0/8efZgoFT4dcqsNNRZwKXyNkdKJ6wDchJiaivA8K+Wh6VSvQuZ0q/+kXv2YbL02cUvZphZR/p6Nvzcs0hMQXz9DeuuSIZ5ZFpsPlOorpysTql86LnjQREqm5dfrxWnoJRbEMtSOUimaNUTItKTBX+DU+AdkZT6ql5F38SgtGT4bgiQ8w1JGk/UgUxWGs22T/1aTY9gdmzqtiucj+YrOzddRrlQdkE63bcDSdE+jCt+eTRjsRuJcDbicYamDlr0yCQrt+/ht4tr1Hk7mnxyGvCxTRZDY928T+/de0olC8FZNxQIyhpxRXqtsxPQM4c1ay+XCfYenCwPHuirY8P6XYWZ/pbBTgaqwqOKlR+JoVuBKEwP/+6Ml9iBdBo9oRsxLYDHnciqMV4HDmL6NO5Ojv7jjq26zgTlWwixUvyaiZFF4/r1w2SM8kO6+GNyPSmPl98lg+Lwn2LRtwn5HC7j3DYg6P6lfj104RvVw808ri0/q2ONhUCZJ8AUhTN0luWfZ8rYdyZziCQEMAOflfXV8hk7BHR2QfJrIfjG1NAzwFBKS/9lwwW64rNek7pjVdQy7SsKQZ7wX3HwF0o+T3Gw4hgkQh1/pjdxPeDZrqOQeVO1B5gZt2TPDPS0z6DBoDzsq0fRlZioE+v+HzniPvbAPmtha/iKXaJRqxBcXUlDgWX7Hz2enHr/XeV4C+9TDJjYKHRo50Qb6CUW4/iJSawEhKBYKjjEWUIY59klMgecfrb6+6KIEOH47xJ34ZlUXJhxiZn1N/x8jgdMnQivuS51XDK5wg1OeoL7wi6ZrDhmSAfFwyCXvES0FZf/8iyD9T1GMlMiBYjtOdluxflnZZtCUMqgku5YJayUXVqHH1Op3H3TMh3MbiMCZd6p/DTcwnbMygUT3ePcUmsZnpY7Obz59/dmy2ndHW7Qr/UEuPzvx0GaO95DcuooyjyqrW/NdMi29xkR65CaGqFuNvAh8CaYPkq+sh55apbfQ8MswE11C/aQQtA5f2WKJ0/tkzv0IdTNUdvHdA1MP1nRZC8bTrtw+h9OZ9kVsI2zqmAnoKqsRnhTLfWj+fbI8X8sCsJsr50XgJW9wpxd/jXels1IuddE0c4zjYWvtMNtDweZRyHwNXidu0b1CSP5UNdkOum7/1IJp0D3dJnBnDUzzWEW9IQ/U5gAi2K3Wic37JYqRvww2il6tOeguYqdgptHQE9HiOGrncJtXVumjQraZe4T0BqF4PGVTQ/ZGH6GPRNetOvi4TiX0QVsu9ApuI19vp3dAvq4B4rQM3a0ORVM8dVKbKRqG16fnsQcP+n3k2zclj+bCwUvQqXwY1t/LrywT/2DdQHkQTJLvOei8/fTTw4OhAKktp8txPtQpEimeqB5BPljOqYHWYWG6Scq9tCtYHsA5TjCXFXNhdyFDl4UDj8PSOIOxcvUvfG1qBS2nDYE/AQ2CsgRqKoSQhJjECfqgKgKvlrbJqgLScNtFTSPgkD1Ko10jUkQh5syAd5FrPaN2kpTMGiDzdGHDqcHG5PrLk8oV8n3y3yNk22p2AxEOvkvm+xovDancudE+flnluntBcts3ik2bav4QuGWRV6MLRx1wKdIm0H5mjulgAIwAozsqoVksxg3BHe+0ApYOMlKIRR4Zm5Q0uU0k+1zVNu7wSreeWVpouqRn4mIWqEvHe8mbRBlmhLK1Dd0G8sLlCiUnITMciVouw0t1+4U+tiy7i7foZK99ocHbgAfGQDLbWUSzfiM2iCPnViv9juQQfFeqgeBit1NVLpx1ncCuiK17Yelsd67IGoxRWlVcSQ3u8EpLrZU7Q37UPbxQA78azltk0np/ZV8TdP4WmlgRa8dnj/1YAFFfmtdKdrMRh6o6fI/XqlecvqLi/WohFuUWpfEYiEjqvM97l+4xtwX06rEq5wU/eRlxA3lmbJzdrCk1qy5YwTqsO8/SqUOTtbl8mREVJHS0Pt0pZz6qoiuAHiql0BVACzmPBNvkajez0SNO+uP1g+bE9GZPcG9KgfbsdKwZPL3RAm55jrGxImen5QBpBlfS+WIXsV4yd0QrXeipVk3kow7smzYWAvT3zk1IWiFB+qliiODIsdT/+fvs9v3PbDER2e7/Tt5lPjFZCg9UcIURSOOUt3loTw+EM2oJkStVmjEhbt+nRsInsdEnNWfpRBJMcCLvdQC4cj76+w3HGb0nIKy6tEsaz/q6FrE38PdNgVIvEB7aiRDY/8AHmWpZxAvLFM+NuTRx2e8L1K+w9wzSQfafGuh2W/hYJmJorDi17CieUHk40ehgVSIvbxckuyomfYT7T9WieOkzkhKtZyD/5d7viRaUUtsBX8Bv+Sssnast1ecJ1+oR87Qt/63grAvF3IB2cjRm7CqAMzPNN9HqdD/4Ru2FXuu+FkPe2UkjbEa2AJJVYdLoL+Xhb2BKZPFJppxPN9o7c6kuejVLXhry7o8izam0Qi2nMPoJZavLB9JwDtj6b/ZOzBhdqw2b2r8+6O5xD0ZsndsqxsKnxHyStaji+3HBkL/72VOLiSmPFywNa2J5dGqYrF3hy47xrSHZEZIdeiN7oWCUDDdl91ykrQUZUe75Uk33Yx7rqFfK818dToI/ucYCj+xlElEGph1oerzvJT9x8F1uacyxhRZ7z67DmqyaHC4/8AQ3sxMPW7RUxkxROomjOLI6d/sq7DQjljqJ1OtTGGZO7Myzq1FTUKk9Fzo9qcXX4a8IVT7u68xlhD3X7O5QNWf+FTU+4CY3hiBaG0NU2vkoWQYaHx8PJjh8F2nXcPfOBPFJXnGr6J40n3zNYYdoX2aO01dFGn/0M6RhJFLJbxHDgJ1OWlCHhv7UQ8dCoaFTjXYRj+mxKYRjGKvlPCrYShWqWQU0qOfczzFQJ+UYvSlTVF6oSWMIXty5phxMDxTbIOLYqd2T4k+Jtcdc9Ekh7KJQ7Jwd0N1/6T9mT2F4/qpTk6baFQT0tVzcuTgKMfxH+bpSMMERFc8jQfk7Bvqz6/3rWM5Tck61n/KF/Hfj1PQLU9mBen8/rDoiKJiOCoyHV+WhJptZLpJPr+NrH9NkZwRzXkanKnpUFd4PC5oAjwnpqY2Y5LExaTy+rk5Pcq4V1XqG+1hEy8Z/fpFLfzYou8Zu9XRXRP9SOQsg88qMfz3+khGhETRo47p7H+g0IjJ7+d6xXf+dzC5OYa+KqrUMupGVZuQi/Ub5+9L+5eSxF9Dr5583vyP5f02Nb52kMVwZAwoisFeMzcYevW7N+w0bjj6DTgmBeSyoC9c5EZ+tdEaAHDkAMVSQO580GFXiW/tMG+WzcEt2JNJxaaUdHF/w5u2W/nqCpgXUxLcZEBuHNOGwLPs6MlHKqj4VarMECWhk4uUvK6Yf8+CqLRtjLps1kzELCOleVoWUFCNDwU9TShI/O4paEhHfoB+YpSjJ/tPQWUhRhBBvdW7Tl8Hwy+jhMWtgr+HrHUTH7l1wEd4T/dU9tSt2/M+4e/aHr60OxnSOTat0hOqSaaLm/VwTzxF1jgmhXTfhSI01/Z30s/qowytgvuNbtwQSO1FtXRhUF6y4GkGtQLT34wLesnPqF/NMOQBddVL48fCPm90UwZSlaiFaTFB+6k0FzoPU/gSc/9gQ83vS1LnKzf5+Jy7PR7MwR1VZ2jz5RKWCGDShNwRmO3dzPLmLLLnCgRJPZ0y5DCUrRZYiqwbsymZ/S0tsxKzyhpUUbNmV1BygsZs9CpCmTJfs6waHhzxzSv7u/RZP++HM8RouA4snsMVEOiw0oyBIx4WzCmnVI3mhXBI8yXMjpM0kPor6dNqSvfh/uGLovc7Ggj6UCDGlE5jJgAyEKkJlpNMVcdOfdaVN/c9jg//qdPXJ0dwmaWUzVbTMcH1PhrqUQBFxGekGZjgO26k0hFpVLtCYszNL5SYfUdCiBMLnSnow83uJ11Rt7v92Bv6jiqLgxX/VP4f0ubqqnAmI2DlKCibM60ZiqScX0eLXQssNaz+VJkygD4T+R+oNr93bWCVJAAptQCWGgMvcRyv9iXybhLx6mdxD19vBRDsXTyTA3nKR7qB60ytzTq0nX9kXx5s7KLX6e8zNbB8gDlOVvsem2kQsmTmvh0IR3bgEHUk1WRG/roTtwI/qo2632Xx3zMjRWT2GaTPFqycM41sHcCpU9K6Hhglw9i/Y9qC/UgYdbQeL+L2S4jZVFMbkceEjZ1Jx4V5lJODki+DNHq9kXwjNpCcBqAbov8F+Z5WE0neqmY/ugVtessZkfLByrOWBfdJO4Ra7uWbbZ5S+wb4/IRw+PZwNdj3ayNADoDQMiEwYJXhQwAehwPyQt85lYjZd2y3UKM4c/9KMj1DEUbCdCR3r2w2ylPv+VbztUUEZzJ8R+LDJIzrCpeY38YATodmBtIoce76fpYetMx70lM+S3oruau3D7z6Kn/k1Il7HKy0R4GwqTuB73ujC72tu1nO5YOj2Y0hI2Sd4pgUN2M6DT55c6TIf7BHNNIp+8GgP8ja1EfOcrMBaMHQxK2jLXijNNw/K9JNV31xb9EWttRBGeEGPWOhB1qNleHpUCvtjDaJNhbRwJgmavjK1AxjyLpuyVxonoOQPfXLjLwCTjuqsxwXKXNJhSCfce9wLr9jsfbu9gQ3thxi1Ewr8nNnP4PngbJReQDbQZa0qO7EeTcNxnEVTGXJZFwpF/slZbDfNy2KS3GuFsmM1y4C/Ugk8kIwzRDNsyPfYPNtQ7LxfuZG3mefXKnscyyrJY24xJMqma7GjaIimXxmpE5JdcMgXaIGbTk9VfTcUmh8LjQmy4h2PVhySoCurJmmJNWN1yKEBEB+PGejouP0qvs5aytfiqtcqKZFPEOLeSKw3UyIEn6fqlNRWOdd6u7I7M/xL1WEkXQWisyiWotMJ7lywE4MRU900XzF63H4az5/qWcK1r5H+FAP1K0Gn8JMW37Qqs8q2c7hboZuLs5x/u6nu9RW3x8qmCnbN3rp3tS7OZa79lGT7aXsw9XuWNcrH0Mhw4p3JZVNIoMCHAiVG4R2txPuMfj/k15BBBSQIiZqf8S36ZVpvXY1qEYE03taOtBpohyJ8xmLzV7Al6GyDnwdvaUzU5w5ReQlquwyGTgKKaU9wJGwoSlP8ucouXtfQZvw6lKu0Dfw8TcCmAzzvYGZseptgXerFoKJ/eS9ZPPeSD8czrUPbKA6j/Ha7oeOWAZrxse9UQQtZ237+9bm6vZHtPkhbRiwwLUN2zkFXNmwDAb4uEf4wAaHNY7cP+Mi1yAR18OmAnpGCscXeMQhk4DPw/B1G1SiRlgOyBcPEfaGs6Kr01V7/Vr3L9wQ2RSw+gcPEK1jaNLenHG85NralFjSMyKJvam75u7mVtjS+RTEGCPHCGQLRGGmRUYkGBWmUK7dUzgo+wrs7ejvpiQBvgrQpSNfn8BD8RVJ7y30I2WQCz6MmYnhy8R3dY8PvUBdv/oS8lkMKv2V4HjZcwshI2iPvb7tbDcJV9srzytbAjxgfVPuiGoUP697XNeUOyYr8BDORgfOVRYbRM6gmL3M6vS414e0moPAUWB1u/6bsW8TCnIcmZot1oNt9aYkw9UpSb4Yxid86Z212T5m3nlXgxRvh1Hbkp13m1j3fzYC1WdbSwAFSthSyPh+GpCv3dOB5wZ1OByAI98CrBvMAfG2HXO4AOSmDiNSyM5suWvPna23FwHrtyg7ZFfyB4rZRsyuNKF8/nKzBfZJhzG7ys8jPdGSwGrYKHz60qpxGQdn1dbNrvLxOZ93f63R/cVAIqtvBuUy73B9Z/S7rYMUG3suGf/mFw7PFhcD+VXFUbntpSaflpdZoijWNzpaoqTor/z+5e60gcKt0w6nFE+CzXw0iRodnBus4bW+J66tTlAh6B/OXDmib1aeilZ5nyYVFzQv/cD0L+iaVbE9fPXRWs19evJdPT7890wJk/gxYQwfKUcXpIHFyVwMIuNtLC56N6Pe+wAQu0HiQd0wbTN1XPBk9fsvEpYfISa/VDv+LTYFmUI7og6LCPda+6qb4onQT3I1rtopbg8HOxI6OBZ1f1QphfZq26UBd4A0+HJvjYWMln2PPjVFhnrA+LTQ4TzTaF2wePSr3iT/z5NApiLt159kuJHnckVWx3cR9H6Je7hTf3jA/wuLx7khdPuTzIw6R/9/h+Nov2ZsqZ4Zj+B+TZYfWHQc7Mdb1pGua7FTxiRrvbizZrItlKmWffX5fLffpHgX+DsNxY2LIbYp+ov0Q8G5Ur/au6ZUvS9E+3GYIN2LEBJ4nf0nKL6ffJSRns9wrHAMz9pSwhjrKNBhzJuqDUcsruFLDnweQTmhK3oxAF6YETkOtgDA5qP2boGC4TpBwy53/S+81Oi77LMl6gJI7BB9Mxylashhk0UXJxkx4Wu0/PotN1gg8xUEh769RsmvNQK1/gG82bJhuOFBiRrRjbXIrG5oRlSNQ7TkPHIuh14mR/wPL4Ax10JM1SorDS5mzEGF51juHd6xU0fU3C5bO5FuYp0nZXfjTmnDYoVpeJF2lL3aDHvj8uBANVf/F4cm31CI5A0cld5r1QhX1vFyJFTDd86i3O7KSSX+H7//oRod5hftA38BqA6eV2opywn0BXM0oOWAeN9Y9sspymdl8YrJWrx2egQkvhCaBnIx0KpbTqYdksEa5DgVHco46BahnfuOh/n/iPb/d5utnWHV/eHi8Iy69as0wvSnMM72X+LLHrkgzhf3Bcbx1B9W5HS4ua6LsYT+7dz49JUJQzw8t7x4Oos/5imVeqhMnxCyYnrL8uVFhEf4sMWYxM6PTUjhtzGN2DwAB1dqcD+jTGdusETz0JCEDPf1uljsWgQTSnWpCGt2qBgzolrzLhvVq3aYUs5iEa5dn8wgJhzpisKqEaMYjcd1MmStgqYcvJOcrP4mqBd50lkNbeZaflk4TzwYEaCHtXhNMzZyaB+LKQLjWYXpOgzGpfK8GQD87UOSE31KxfxxVToJyGXF6L+cn5RaQ3R5yW7X9CtNmPFML0MMdbsQsZB6x55LhmRj5YG/s2+ZwOSWYOsoqgB/+z8zPV6vR0tFxHjtqCheMLvAxUD/Qte+1o6s7PtbPPKJVgsAdWGlL9oXfhp3JlthHADcqYVoOOOKbqqKZYxIVUTOwC366icZN47pIGIkpoHKOnFoYU/ltU+blZyXYoyWMSn3ksqqYrgydln7BsPnIHjrH1QjGjlSlB9GYzJUIISpeg41R79bCf50g3OTqEcbPYQuqsEtmdtJQ2j1jhFy45WOUOjnMERZgdoRYUxHbuh14fESaGY0IVTJZ4iLh19hyk002xgaq1B/awLHNIRVMhEbcGAymKOx2aFHgBUyoQJRZgLGLkSeFmy6mkIAkxvjOMG/kHqaEbVxcIhtVykxtpwojy6jEJbjbJCpRtShzwYMtWBRP3F60KGzT++dMDQPMvkhSqumbhmeyEv2Q+szDrbjbjziUNBW2/PWx/emsw83R2/MqkZIO4+1hbQu95siHgGu51cUva3BQ96lsZNF9FELXnWDEBd9LV6vMVt4sgBp/FS1Xm6QWSJjDPAwT+p6cWO+aVtjmS1lmezR7HA0QJ8hZcI0tFE2JCAPZ7PrQrUiUpzWtHn21LKbuqPn6QaVGBoSsJjkN2kRcMUsNeIJTshkpH1zwkf6eEPlaXO4uyONSqMG09m+/4aXL67Bk76zXQeZP+wJ3aJXBmqEaeEtYa3un00Y/EPP7gZjNtAk3SIXUoz/3lbHTe1IpBsXUBfFZ9PKldkQGsxxnwI6fHJ/Rte/xWlgmv5gQBvFbkrd6eaPLp/JZ/ejcyXOv0sSHrXjM2+O21dlHoUMmlS3h4m4EbHDolKn9y3DORv1YbjTvet8oVdWrHUEUyQuTF0TsrQjfcv2P1KocYfhmcLdg+eglHAvOQFwax6AKOpfKJZ1uEIRAl5hzAm2L35LSPiEtUKYZQyK4kW/7WiM3poL6qjdqKuXrwaIUCIsFRIHDKqK+EnFCRs2SGuKlUx8ndBhO1OkDr3FQrG4rYoNypQmIb6JcvcU7cuS5VTCT0D141OTdQ7lUBjYCymeSMaAeBjNgabqMlxTXBss/6FcWivhb5YykuPdeDU6tfTLZTwfU8OMEm3VwTgzPFY9geuU9nR1SJs4+jcftVvp7nlQQIQjiKwNsYpBnjRMl+G0g7NOxQHh3yq38xIHNSSjmUkbZmw75Y3a5O5oYuabtKvavtGikoi4k1nIb4MMupnZ0zx6hfcA+KY+icnVByRgqSlhAHmeHbqcODKdKz80Gjzff665k79xO+2Z4NuYcUyId9DtLbiHXkEMTYarvADHXj5x8XGI19SI4M99Avfr3pfcRkhDPqeJDGHEbFBB1luf5Rr9juNpYcK9USt7nlel7VfRzNK+supOtrC1yUsDBNFymXp4KhhndorDtTvZAMPbF2fuIhrHhJ5EeuEYLJ480wVKL+Jwyj+DisspD8HIAZ44izCChRMW9j2R3Z+ezavzXR6c3W4JcDe73eqHWYwarK+5gMw4GcoYNHjcqpuYWJXiZ5umIW23IiB2UVL4CdK2k7oKwWWQYB3PJR8DPDvLz65TrXNIxemK7V081RAAFGQ/8q1+vnSQM+/QW/4l9TqnvpyIx3y9zEphcKnK4GJc4IXkbkzRW23o+wc8bs6rUbtz6pCThPWCk1JR3/skkab0YcLplPwWytIAIAD4mE8FonRgI0NiAIhQz5BkiWvfTahzYl5okBmsK7BxgdkAD8a1oePXDeT26RBRVrkcsy0xvKrjUCWca33LI2QaeUsKQvBkBjopMlQOW9zw3j+JgtS6VLoluiPApJx8y+Kbb+GlOvZHxhq1PghnymcDvj7bt5wbFyNqdMq6gtg2Edc7MCHWz7xgAO9zhAkYcFDCyr1U2j8XeCCWhRs9MumiBOTq4wxQyGLKJUBZw8w52Cmrj5o8s5QfPRmYSyu8Zl+1u/FbBZ8/2mhbT1lmh6lc/zRXH6Ujc4Cj9kqhoRP4K84zIun9XJkMNIdPK9As0TkcJBGDfai7lxx+kK+kCLXpiCVrRbRvdJkjhMWlWYVNaAvqQOVaDklyi+lxbCrx33XMMscHsXbVr5T1PW1RZIAUJOBFoRvB0noHOB1eJRL19AEgcKAY5Jf3eNjUSjbzJC40JdWGlHx20o8JlQ64MXCnitYWu59jTE0FPeyuDA+ypx3WN22d+k6Mo9iYwas2QJDqRwIMGBBUwxsk3AN8YWE+8v+HVYL9cNa3O5Efr7uxxJWxnH302uG5x/3UH5jF9L0YieRH9tHtwuxvH+xiOJb3bA7d4KiakeJEfM0ADR92Z0XAzxiJGjxC3xy2EDRfXVnn5TqOGoYgdLB+UlRYRvgeO0lDgeM9z/x2fwcwCYucmu4bbCCdDvo2cMHBMOh4P4fC1bM/pA1zCZDj1HtzhuA7we2+kUBap9izsNQOsR22w//vm2JPysNFtWKM6IS7ptkFrR5Zo3DYoTX0Ogt7fFw7NNU+KzBunAho77HxMFinvIQDZeWO95gVWqA2C+XokRYbr5++pItgGK5hGaMiETu3rdVFVmxJ6Bh5cvWHEXkQeNYe8W64Q1vEGn2nZEhSLQPNyY0R4rSMWlw2m0uYOoRRc0BYnvppMxSJsycrWuH0pi5DXTgKORzFKfATqNL+e+s2OcGFMZVqkQ+1kSNqjQw56mzGRI5GNSuh7IWSVRUVS008b8Q89TktS30bhl26rFLCgTrtirMJQULdT0ThXUvBGnk6WxHpzYKHI9h4yTvTSOeBBQ1iRh8uGuUIRw1mf9pPsPeG4+9vB5PdBpY12QjTdvN603qTesP6POfYgx3uEZPFqrUUVU2ObyzKL1IINSGS8n/BCFzfyHrPYxS+CMtZrWfAWqmN3puUjaeb11uoN5luNDW/Tc8PnNbTixXeP22oYoxjyDidxt0kJuXyh6d64qyodhnY32y5B4KkAm494rVGg3U5a6dU4Oqm2lfqPUm5LJIxLfcIVQWURcyNgfq6QLSBGGpfXPDUReyqmvbXxliq1jVm3F17KlPYtlEt0QbPv84WN27orVMGFVnMCjndN1qi811dHd52B4oJatc2tr1vvR23qXLv5mZ1yZVVzp3NymtB+1P0vU+FpskGO2MNesB18QQ04465D8TDdJaKlQWINT13xYyFHMdvEK61fwi1+bC2tfc3Cz9RGzASwAZ8paE7ulaZnEKH+ooqCWBPdCmX7HtkMirIcWFaSSATu0UTOygolpbHJeAUW+/Yca6HzIB1A5qaq9frIGYn3Zo1z0ZO0RrFTSAwDYo4zhv03loypH7izteBDnhx3qMGiBijFg5DXV0PPcjQ04s6LpEv8FMucUsVZ6iR/jN897nOQBAxLewRB4QR22An7tpF7SJe7INl3a9wiR4fsPcOv1lu+ZbeKJT0SMRnY7+rkXPMTo2T1ytfVQgeZLncGJB9Sz6r3WDNbSgPYJz6zHBz1L0Ft7vLrD7Gu+A0UrSHVGM3zFO+5bHX8T5AAakrpIKyStykndtqCZUPCGlxiJQb7EyyZIYFONvBUAiGSDI8oyh1HhVO8VWungqHGpAd2UYVVbTeWB91r0D55c5TyClue/FGD9b14erynlDDxsZse/xusjtBJ0Nbd1rVQIuy/gKKxP35kTOrj9FpjMa79hacb1kIl0DSvD4UIV+F3AwnSlCPyKayMHr8UUhDv/R3aOJ+C1M7GHs6vzZSHL0/GkS3MZXXvnoYSjCh2VwjcrFtdjmp2Z18mkrFLgDNLDKs7GczbN6hxkEuDME86n6HYKi8IARxxth4ku+QxoKEVdtU1+LVsWVEFoObbQVcq1e3q1sQiR/OOCqpReqpn6p06cw3q7MraqJBFYsEAIOyQeAL36LwOsORG4N4NLjLNCOMtlvWeccWtfogapsvwNSL4tvd16SuzGl46oYNWAgy6dnC9oGnCyXmD06VUbKo67+SU52Lmjl5DP0CQ2eH81GFycJA+8kWaM4nptFzh8P8z3+Wgsm9NKTpwt5VKT5RaVyF2NFOHfyye5m0PIhxjWcTe8vhQzuSQcnNjm1tymvnmgJONUv5z1icS8YjAA8ySXF4/VjvugjF8KVghBPcZMAQ/4jYJ4NFBA5Ba12HTO6dtsllZ4yqPuLQ64ME6syVeIXWG3s4WHHOyvgUD6mUGPIwHCrHamakF8EJk92gaxuhXfEuifoJKe07S+GOBshgtwwWbiY0j4GyRHjbdrg1bajGk+axOr/liMcF1Fzio1wDgsATGD0smNI5stQji/VxBvIoYyDaawnutok2gUrObapMsGcPZM+7S+Dk6/Z1zNMVJF/hhvV45DFGn70ZrlSZsHBftja7SEjrYTArKrKzk+wmDzOCGjQ9opB7aWp75fpy60P9yRyvpFmrM6hGWwzWpRmDsr6IdIcALnWeFYxr+AIVfuMa6bdizwkhyVoxXBZrgwJ2f+SGfpIlMEbmJpB1nYFS9eCKxV6bLfVb7q/2k2qcNfHcsWFFq3DiWqkhyUY/dJx1Obvh1EbqQzlXJEF1E6xhn2b2ZV/6hnXhLv4l9jmIHZMisBEk0RhzdY8DNMms7A2AazMY+nCEe6ufFPuxR8yUy7CLCWXAv2II6ggn6HdcEa25DZ+IUBrcApRDhOfQpR7ycWrj1Mbi6I6bCVgCMpnoANvL/FK5Ur1jH8CJ1e2NXCfpButxRMvBKTELY5PQgHiUJsgPOKk8frDK+sfva3V3nVt6e0rCt2MjW2NNYkTSR0DUZ5Mg81X8Xn+I6JPlEQhgRQUWTvqOMIZbnVa5nrR2ueRvOfmnOkw+UrRh6yA8/SirY13KbCdLsKFTBKlL3aNs3rkVC4s/6EwlrFaQZaQ5WkDYTcm6pAsWej008rfImtSm9bY2L1vNHYQyeOWHOulLn8DvPH4g1eMLIHrX74leaBaxc6mAD891EtMLTS691AqeU4E+9wAK0zxRZ4Es2mAvWcMCkc0nJdID8SqTLDTo4AmOLZytqSkv+ltLnU52RLdQofETYXLb8vJyXxJ0xCMv+Tfdctq8a3kcCOvpdQl9+4RTrfSYST9bW51+Jd25gvxa1qRDh8uCAl2bRsdv/ZV/lfjZ3oYCxKUDl3uslDgcSF7p7FS2EqQriH9BGmmqMVUWMyb/TLfG3FptCrO5Og5zZ+MZTom0mi0hgneNjNUnt46Vq5jiKXF1htXCmYW77WNLSz5zt3Uuv4WDvV5EV/oYTmdKvB2izCyzKfp04lGums6su9UJUbyTsMPhgfbjDZfju2KhFe/358U8lJOEyFdOCgEqSBOzUpOTp8Sc+K36qIkkjrPE7/Nf0Ar+pNeeFrPb36hio8zWbzjT77eiFpgi89L4AOHlm2aYvGGyMaJEotNtksBjYVwy4xcNNLd8UGt875k55bDnVsY8z6TdRL8f4uOzU8XwbZaf6rgdDuEgX2mr59+T1Z9fZIf109OTpkrHo393Qb2b/SAntdk8e5Sbq4mOZIUYU9uB05SostP0wOmA7zE8//soQ8tr/e3UeH9mvVOQaQTDSlSoRqvUWtl7EC34U9dbgaW5Hin7D9GQnK3SiQi3i8JY1VjEoEb3C86BWpqBGH6vGRTDu89FkNzePkDiTqRUv/FqbS+WOnCKLpsKTVxJeNF81X9/N3g/u758E6RutoRpX9i6SnhRtLyLbJ6F1s8TNZZTcoB9TXBaB8jt9PpsAkK/nI4+GMtsijgTI68nvxK4L/EClrfZonJ305kBX+/idbyOVartZyf2M5WW5i0lpTxaEKf73c46f3sIKB7DMxQq/g8=\",\"base64\")).toString()),QR)});var a7=w((vR,o7)=>{(function(t,e){typeof vR==\"object\"?o7.exports=e():typeof define==\"function\"&&define.amd?define(e):t.treeify=e()})(vR,function(){function t(n,s){var o=s?\"\\u2514\":\"\\u251C\";return n?o+=\"\\u2500 \":o+=\"\\u2500\\u2500\\u2510\",o}function e(n,s){var o=[];for(var a in n)!n.hasOwnProperty(a)||s&&typeof n[a]==\"function\"||o.push(a);return o}function r(n,s,o,a,l,c,u){var g=\"\",f=0,h,p,m=a.slice(0);if(m.push([s,o])&&a.length>0&&(a.forEach(function(Q,S){S>0&&(g+=(Q[1]?\" \":\"\\u2502\")+\"  \"),!p&&Q[0]===s&&(p=!0)}),g+=t(n,o)+n,l&&(typeof s!=\"object\"||s instanceof Date)&&(g+=\": \"+s),p&&(g+=\" (circular ref.)\"),u(g)),!p&&typeof s==\"object\"){var y=e(s,c);y.forEach(function(Q){h=++f===y.length,r(Q,s[Q],h,m,l,c,u)})}}var i={};return i.asLines=function(n,s,o,a){var l=typeof o!=\"function\"?o:!1;r(\".\",n,!1,[],s,l,a||o)},i.asTree=function(n,s,o){var a=\"\";return r(\".\",n,!1,[],s,o,function(l){a+=l+`\n`}),a},i})});var gA=w(xR=>{\"use strict\";Object.defineProperty(xR,\"__esModule\",{value:!0});xR.default=f7;function f7(){}f7.prototype={diff:function(e,r){var i=arguments.length>2&&arguments[2]!==void 0?arguments[2]:{},n=i.callback;typeof i==\"function\"&&(n=i,i={}),this.options=i;var s=this;function o(m){return n?(setTimeout(function(){n(void 0,m)},0),!0):m}e=this.castInput(e),r=this.castInput(r),e=this.removeEmpty(this.tokenize(e)),r=this.removeEmpty(this.tokenize(r));var a=r.length,l=e.length,c=1,u=a+l,g=[{newPos:-1,components:[]}],f=this.extractCommon(g[0],r,e,0);if(g[0].newPos+1>=a&&f+1>=l)return o([{value:this.join(r),count:r.length}]);function h(){for(var m=-1*c;m<=c;m+=2){var y=void 0,Q=g[m-1],S=g[m+1],x=(S?S.newPos:0)-m;Q&&(g[m-1]=void 0);var M=Q&&Q.newPos+1<a,Y=S&&0<=x&&x<l;if(!M&&!Y){g[m]=void 0;continue}if(!M||Y&&Q.newPos<S.newPos?(y=hLe(S),s.pushComponent(y.components,void 0,!0)):(y=Q,y.newPos++,s.pushComponent(y.components,!0,void 0)),x=s.extractCommon(y,r,e,m),y.newPos+1>=a&&x+1>=l)return o(fLe(s,y.components,r,e,s.useLongestToken));g[m]=y}c++}if(n)(function m(){setTimeout(function(){if(c>u)return n();h()||m()},0)})();else for(;c<=u;){var p=h();if(p)return p}},pushComponent:function(e,r,i){var n=e[e.length-1];n&&n.added===r&&n.removed===i?e[e.length-1]={count:n.count+1,added:r,removed:i}:e.push({count:1,added:r,removed:i})},extractCommon:function(e,r,i,n){for(var s=r.length,o=i.length,a=e.newPos,l=a-n,c=0;a+1<s&&l+1<o&&this.equals(r[a+1],i[l+1]);)a++,l++,c++;return c&&e.components.push({count:c}),e.newPos=a,l},equals:function(e,r){return this.options.comparator?this.options.comparator(e,r):e===r||this.options.ignoreCase&&e.toLowerCase()===r.toLowerCase()},removeEmpty:function(e){for(var r=[],i=0;i<e.length;i++)e[i]&&r.push(e[i]);return r},castInput:function(e){return e},tokenize:function(e){return e.split(\"\")},join:function(e){return e.join(\"\")}};function fLe(t,e,r,i,n){for(var s=0,o=e.length,a=0,l=0;s<o;s++){var c=e[s];if(c.removed){if(c.value=t.join(i.slice(l,l+c.count)),l+=c.count,s&&e[s-1].added){var g=e[s-1];e[s-1]=e[s],e[s]=g}}else{if(!c.added&&n){var u=r.slice(a,a+c.count);u=u.map(function(h,p){var m=i[l+p];return m.length>h.length?m:h}),c.value=t.join(u)}else c.value=t.join(r.slice(a,a+c.count));a+=c.count,c.added||(l+=c.count)}}var f=e[o-1];return o>1&&typeof f.value==\"string\"&&(f.added||f.removed)&&t.equals(\"\",f.value)&&(e[o-2].value+=f.value,e.pop()),e}function hLe(t){return{newPos:t.newPos,components:t.components.slice(0)}}});var p7=w(lC=>{\"use strict\";Object.defineProperty(lC,\"__esModule\",{value:!0});lC.diffChars=pLe;lC.characterDiff=void 0;var CLe=dLe(gA());function dLe(t){return t&&t.__esModule?t:{default:t}}var h7=new CLe.default;lC.characterDiff=h7;function pLe(t,e,r){return h7.diff(t,e,r)}});var DR=w(PR=>{\"use strict\";Object.defineProperty(PR,\"__esModule\",{value:!0});PR.generateOptions=mLe;function mLe(t,e){if(typeof t==\"function\")e.callback=t;else if(t)for(var r in t)t.hasOwnProperty(r)&&(e[r]=t[r]);return e}});var m7=w(bf=>{\"use strict\";Object.defineProperty(bf,\"__esModule\",{value:!0});bf.diffWords=ELe;bf.diffWordsWithSpace=ILe;bf.wordDiff=void 0;var wLe=yLe(gA()),BLe=DR();function yLe(t){return t&&t.__esModule?t:{default:t}}var d7=/^[A-Za-z\\xC0-\\u02C6\\u02C8-\\u02D7\\u02DE-\\u02FF\\u1E00-\\u1EFF]+$/,C7=/\\S/,cC=new wLe.default;bf.wordDiff=cC;cC.equals=function(t,e){return this.options.ignoreCase&&(t=t.toLowerCase(),e=e.toLowerCase()),t===e||this.options.ignoreWhitespace&&!C7.test(t)&&!C7.test(e)};cC.tokenize=function(t){for(var e=t.split(/(\\s+|[()[\\]{}'\"]|\\b)/),r=0;r<e.length-1;r++)!e[r+1]&&e[r+2]&&d7.test(e[r])&&d7.test(e[r+2])&&(e[r]+=e[r+2],e.splice(r+1,2),r--);return e};function ELe(t,e,r){return r=(0,BLe.generateOptions)(r,{ignoreWhitespace:!0}),cC.diff(t,e,r)}function ILe(t,e,r){return cC.diff(t,e,r)}});var ZB=w(Qf=>{\"use strict\";Object.defineProperty(Qf,\"__esModule\",{value:!0});Qf.diffLines=bLe;Qf.diffTrimmedLines=QLe;Qf.lineDiff=void 0;var SLe=vLe(gA()),kLe=DR();function vLe(t){return t&&t.__esModule?t:{default:t}}var XB=new SLe.default;Qf.lineDiff=XB;XB.tokenize=function(t){var e=[],r=t.split(/(\\n|\\r\\n)/);r[r.length-1]||r.pop();for(var i=0;i<r.length;i++){var n=r[i];i%2&&!this.options.newlineIsToken?e[e.length-1]+=n:(this.options.ignoreWhitespace&&(n=n.trim()),e.push(n))}return e};function bLe(t,e,r){return XB.diff(t,e,r)}function QLe(t,e,r){var i=(0,kLe.generateOptions)(r,{ignoreWhitespace:!0});return XB.diff(t,e,i)}});var E7=w(uC=>{\"use strict\";Object.defineProperty(uC,\"__esModule\",{value:!0});uC.diffSentences=xLe;uC.sentenceDiff=void 0;var DLe=PLe(gA());function PLe(t){return t&&t.__esModule?t:{default:t}}var RR=new DLe.default;uC.sentenceDiff=RR;RR.tokenize=function(t){return t.split(/(\\S.+?[.!?])(?=\\s+|$)/)};function xLe(t,e,r){return RR.diff(t,e,r)}});var I7=w(gC=>{\"use strict\";Object.defineProperty(gC,\"__esModule\",{value:!0});gC.diffCss=RLe;gC.cssDiff=void 0;var NLe=FLe(gA());function FLe(t){return t&&t.__esModule?t:{default:t}}var FR=new NLe.default;gC.cssDiff=FR;FR.tokenize=function(t){return t.split(/([{}:;,]|\\s+)/)};function RLe(t,e,r){return FR.diff(t,e,r)}});var w7=w(vf=>{\"use strict\";Object.defineProperty(vf,\"__esModule\",{value:!0});vf.diffJson=LLe;vf.canonicalize=$B;vf.jsonDiff=void 0;var y7=TLe(gA()),OLe=ZB();function TLe(t){return t&&t.__esModule?t:{default:t}}function e0(t){return typeof Symbol==\"function\"&&typeof Symbol.iterator==\"symbol\"?e0=function(r){return typeof r}:e0=function(r){return r&&typeof Symbol==\"function\"&&r.constructor===Symbol&&r!==Symbol.prototype?\"symbol\":typeof r},e0(t)}var MLe=Object.prototype.toString,au=new y7.default;vf.jsonDiff=au;au.useLongestToken=!0;au.tokenize=OLe.lineDiff.tokenize;au.castInput=function(t){var e=this.options,r=e.undefinedReplacement,i=e.stringifyReplacer,n=i===void 0?function(s,o){return typeof o==\"undefined\"?r:o}:i;return typeof t==\"string\"?t:JSON.stringify($B(t,null,null,n),n,\"  \")};au.equals=function(t,e){return y7.default.prototype.equals.call(au,t.replace(/,([\\r\\n])/g,\"$1\"),e.replace(/,([\\r\\n])/g,\"$1\"))};function LLe(t,e,r){return au.diff(t,e,r)}function $B(t,e,r,i,n){e=e||[],r=r||[],i&&(t=i(n,t));var s;for(s=0;s<e.length;s+=1)if(e[s]===t)return r[s];var o;if(MLe.call(t)===\"[object Array]\"){for(e.push(t),o=new Array(t.length),r.push(o),s=0;s<t.length;s+=1)o[s]=$B(t[s],e,r,i,n);return e.pop(),r.pop(),o}if(t&&t.toJSON&&(t=t.toJSON()),e0(t)===\"object\"&&t!==null){e.push(t),o={},r.push(o);var a=[],l;for(l in t)t.hasOwnProperty(l)&&a.push(l);for(a.sort(),s=0;s<a.length;s+=1)l=a[s],o[l]=$B(t[l],e,r,i,l);e.pop(),r.pop()}else o=t;return o}});var B7=w(fC=>{\"use strict\";Object.defineProperty(fC,\"__esModule\",{value:!0});fC.diffArrays=ULe;fC.arrayDiff=void 0;var HLe=KLe(gA());function KLe(t){return t&&t.__esModule?t:{default:t}}var hC=new HLe.default;fC.arrayDiff=hC;hC.tokenize=function(t){return t.slice()};hC.join=hC.removeEmpty=function(t){return t};function ULe(t,e,r){return hC.diff(t,e,r)}});var t0=w(NR=>{\"use strict\";Object.defineProperty(NR,\"__esModule\",{value:!0});NR.parsePatch=jLe;function jLe(t){var e=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{},r=t.split(/\\r\\n|[\\n\\v\\f\\r\\x85]/),i=t.match(/\\r\\n|[\\n\\v\\f\\r\\x85]/g)||[],n=[],s=0;function o(){var c={};for(n.push(c);s<r.length;){var u=r[s];if(/^(\\-\\-\\-|\\+\\+\\+|@@)\\s/.test(u))break;var g=/^(?:Index:|diff(?: -r \\w+)+)\\s+(.+?)\\s*$/.exec(u);g&&(c.index=g[1]),s++}for(a(c),a(c),c.hunks=[];s<r.length;){var f=r[s];if(/^(Index:|diff|\\-\\-\\-|\\+\\+\\+)\\s/.test(f))break;if(/^@@/.test(f))c.hunks.push(l());else{if(f&&e.strict)throw new Error(\"Unknown line \"+(s+1)+\" \"+JSON.stringify(f));s++}}}function a(c){var u=/^(---|\\+\\+\\+)\\s+(.*)$/.exec(r[s]);if(u){var g=u[1]===\"---\"?\"old\":\"new\",f=u[2].split(\"\t\",2),h=f[0].replace(/\\\\\\\\/g,\"\\\\\");/^\".*\"$/.test(h)&&(h=h.substr(1,h.length-2)),c[g+\"FileName\"]=h,c[g+\"Header\"]=(f[1]||\"\").trim(),s++}}function l(){for(var c=s,u=r[s++],g=u.split(/@@ -(\\d+)(?:,(\\d+))? \\+(\\d+)(?:,(\\d+))? @@/),f={oldStart:+g[1],oldLines:+g[2]||1,newStart:+g[3],newLines:+g[4]||1,lines:[],linedelimiters:[]},h=0,p=0;s<r.length&&!(r[s].indexOf(\"--- \")===0&&s+2<r.length&&r[s+1].indexOf(\"+++ \")===0&&r[s+2].indexOf(\"@@\")===0);s++){var m=r[s].length==0&&s!=r.length-1?\" \":r[s][0];if(m===\"+\"||m===\"-\"||m===\" \"||m===\"\\\\\")f.lines.push(r[s]),f.linedelimiters.push(i[s]||`\n`),m===\"+\"?h++:m===\"-\"?p++:m===\" \"&&(h++,p++);else break}if(!h&&f.newLines===1&&(f.newLines=0),!p&&f.oldLines===1&&(f.oldLines=0),e.strict){if(h!==f.newLines)throw new Error(\"Added line count did not match for hunk at line \"+(c+1));if(p!==f.oldLines)throw new Error(\"Removed line count did not match for hunk at line \"+(c+1))}return f}for(;s<r.length;)o();return n}});var b7=w(LR=>{\"use strict\";Object.defineProperty(LR,\"__esModule\",{value:!0});LR.default=GLe;function GLe(t,e,r){var i=!0,n=!1,s=!1,o=1;return function a(){if(i&&!s){if(n?o++:i=!1,t+o<=r)return o;s=!0}if(!n)return s||(i=!0),e<=t-o?-o++:(n=!0,a())}}});var S7=w(r0=>{\"use strict\";Object.defineProperty(r0,\"__esModule\",{value:!0});r0.applyPatch=Q7;r0.applyPatches=YLe;var v7=t0(),JLe=qLe(b7());function qLe(t){return t&&t.__esModule?t:{default:t}}function Q7(t,e){var r=arguments.length>2&&arguments[2]!==void 0?arguments[2]:{};if(typeof e==\"string\"&&(e=(0,v7.parsePatch)(e)),Array.isArray(e)){if(e.length>1)throw new Error(\"applyPatch only works with a single input.\");e=e[0]}var i=t.split(/\\r\\n|[\\n\\v\\f\\r\\x85]/),n=t.match(/\\r\\n|[\\n\\v\\f\\r\\x85]/g)||[],s=e.hunks,o=r.compareLine||function(T,L,Ee,we){return L===we},a=0,l=r.fuzzFactor||0,c=0,u=0,g,f;function h(T,L){for(var Ee=0;Ee<T.lines.length;Ee++){var we=T.lines[Ee],qe=we.length>0?we[0]:\" \",re=we.length>0?we.substr(1):we;if(qe===\" \"||qe===\"-\"){if(!o(L+1,i[L],qe,re)&&(a++,a>l))return!1;L++}}return!0}for(var p=0;p<s.length;p++){for(var m=s[p],y=i.length-m.oldLines,Q=0,S=u+m.oldStart-1,x=(0,JLe.default)(S,c,y);Q!==void 0;Q=x())if(h(m,S+Q)){m.offset=u+=Q;break}if(Q===void 0)return!1;c=m.offset+m.oldStart+m.oldLines}for(var M=0,Y=0;Y<s.length;Y++){var U=s[Y],J=U.oldStart+U.offset+M-1;M+=U.newLines-U.oldLines,J<0&&(J=0);for(var W=0;W<U.lines.length;W++){var ee=U.lines[W],Z=ee.length>0?ee[0]:\" \",A=ee.length>0?ee.substr(1):ee,ne=U.linedelimiters[W];if(Z===\" \")J++;else if(Z===\"-\")i.splice(J,1),n.splice(J,1);else if(Z===\"+\")i.splice(J,0,A),n.splice(J,0,ne),J++;else if(Z===\"\\\\\"){var le=U.lines[W-1]?U.lines[W-1][0]:null;le===\"+\"?g=!0:le===\"-\"&&(f=!0)}}}if(g)for(;!i[i.length-1];)i.pop(),n.pop();else f&&(i.push(\"\"),n.push(`\n`));for(var Ae=0;Ae<i.length-1;Ae++)i[Ae]=i[Ae]+n[Ae];return i.join(\"\")}function YLe(t,e){typeof t==\"string\"&&(t=(0,v7.parsePatch)(t));var r=0;function i(){var n=t[r++];if(!n)return e.complete();e.loadFile(n,function(s,o){if(s)return e.complete(s);var a=Q7(o,n,e);e.patched(n,a,function(l){if(l)return e.complete(l);i()})})}i()}});var OR=w(pC=>{\"use strict\";Object.defineProperty(pC,\"__esModule\",{value:!0});pC.structuredPatch=k7;pC.createTwoFilesPatch=x7;pC.createPatch=WLe;var zLe=ZB();function TR(t){return XLe(t)||VLe(t)||_Le()}function _Le(){throw new TypeError(\"Invalid attempt to spread non-iterable instance\")}function VLe(t){if(Symbol.iterator in Object(t)||Object.prototype.toString.call(t)===\"[object Arguments]\")return Array.from(t)}function XLe(t){if(Array.isArray(t)){for(var e=0,r=new Array(t.length);e<t.length;e++)r[e]=t[e];return r}}function k7(t,e,r,i,n,s,o){o||(o={}),typeof o.context==\"undefined\"&&(o.context=4);var a=(0,zLe.diffLines)(r,i,o);a.push({value:\"\",lines:[]});function l(Q){return Q.map(function(S){return\" \"+S})}for(var c=[],u=0,g=0,f=[],h=1,p=1,m=function(S){var x=a[S],M=x.lines||x.value.replace(/\\n$/,\"\").split(`\n`);if(x.lines=M,x.added||x.removed){var Y;if(!u){var U=a[S-1];u=h,g=p,U&&(f=o.context>0?l(U.lines.slice(-o.context)):[],u-=f.length,g-=f.length)}(Y=f).push.apply(Y,TR(M.map(function(Ae){return(x.added?\"+\":\"-\")+Ae}))),x.added?p+=M.length:h+=M.length}else{if(u)if(M.length<=o.context*2&&S<a.length-2){var J;(J=f).push.apply(J,TR(l(M)))}else{var W,ee=Math.min(M.length,o.context);(W=f).push.apply(W,TR(l(M.slice(0,ee))));var Z={oldStart:u,oldLines:h-u+ee,newStart:g,newLines:p-g+ee,lines:f};if(S>=a.length-2&&M.length<=o.context){var A=/\\n$/.test(r),ne=/\\n$/.test(i),le=M.length==0&&f.length>Z.oldLines;!A&&le&&f.splice(Z.oldLines,0,\"\\\\ No newline at end of file\"),(!A&&!le||!ne)&&f.push(\"\\\\ No newline at end of file\")}c.push(Z),u=0,g=0,f=[]}h+=M.length,p+=M.length}},y=0;y<a.length;y++)m(y);return{oldFileName:t,newFileName:e,oldHeader:n,newHeader:s,hunks:c}}function x7(t,e,r,i,n,s,o){var a=k7(t,e,r,i,n,s,o),l=[];t==e&&l.push(\"Index: \"+t),l.push(\"===================================================================\"),l.push(\"--- \"+a.oldFileName+(typeof a.oldHeader==\"undefined\"?\"\":\"\t\"+a.oldHeader)),l.push(\"+++ \"+a.newFileName+(typeof a.newHeader==\"undefined\"?\"\":\"\t\"+a.newHeader));for(var c=0;c<a.hunks.length;c++){var u=a.hunks[c];l.push(\"@@ -\"+u.oldStart+\",\"+u.oldLines+\" +\"+u.newStart+\",\"+u.newLines+\" @@\"),l.push.apply(l,u.lines)}return l.join(`\n`)+`\n`}function WLe(t,e,r,i,n,s){return x7(t,t,e,r,i,n,s)}});var D7=w(i0=>{\"use strict\";Object.defineProperty(i0,\"__esModule\",{value:!0});i0.arrayEqual=ZLe;i0.arrayStartsWith=P7;function ZLe(t,e){return t.length!==e.length?!1:P7(t,e)}function P7(t,e){if(e.length>t.length)return!1;for(var r=0;r<e.length;r++)if(e[r]!==t[r])return!1;return!0}});var j7=w(n0=>{\"use strict\";Object.defineProperty(n0,\"__esModule\",{value:!0});n0.calcLineCount=R7;n0.merge=$Le;var eTe=OR(),tTe=t0(),MR=D7();function Sf(t){return nTe(t)||iTe(t)||rTe()}function rTe(){throw new TypeError(\"Invalid attempt to spread non-iterable instance\")}function iTe(t){if(Symbol.iterator in Object(t)||Object.prototype.toString.call(t)===\"[object Arguments]\")return Array.from(t)}function nTe(t){if(Array.isArray(t)){for(var e=0,r=new Array(t.length);e<t.length;e++)r[e]=t[e];return r}}function R7(t){var e=UR(t.lines),r=e.oldLines,i=e.newLines;r!==void 0?t.oldLines=r:delete t.oldLines,i!==void 0?t.newLines=i:delete t.newLines}function $Le(t,e,r){t=F7(t,r),e=F7(e,r);var i={};(t.index||e.index)&&(i.index=t.index||e.index),(t.newFileName||e.newFileName)&&(N7(t)?N7(e)?(i.oldFileName=s0(i,t.oldFileName,e.oldFileName),i.newFileName=s0(i,t.newFileName,e.newFileName),i.oldHeader=s0(i,t.oldHeader,e.oldHeader),i.newHeader=s0(i,t.newHeader,e.newHeader)):(i.oldFileName=t.oldFileName,i.newFileName=t.newFileName,i.oldHeader=t.oldHeader,i.newHeader=t.newHeader):(i.oldFileName=e.oldFileName||t.oldFileName,i.newFileName=e.newFileName||t.newFileName,i.oldHeader=e.oldHeader||t.oldHeader,i.newHeader=e.newHeader||t.newHeader)),i.hunks=[];for(var n=0,s=0,o=0,a=0;n<t.hunks.length||s<e.hunks.length;){var l=t.hunks[n]||{oldStart:Infinity},c=e.hunks[s]||{oldStart:Infinity};if(L7(l,c))i.hunks.push(T7(l,o)),n++,a+=l.newLines-l.oldLines;else if(L7(c,l))i.hunks.push(T7(c,a)),s++,o+=c.newLines-c.oldLines;else{var u={oldStart:Math.min(l.oldStart,c.oldStart),oldLines:0,newStart:Math.min(l.newStart+o,c.oldStart+a),newLines:0,lines:[]};sTe(u,l.oldStart,l.lines,c.oldStart,c.lines),s++,n++,i.hunks.push(u)}}return i}function F7(t,e){if(typeof t==\"string\"){if(/^@@/m.test(t)||/^Index:/m.test(t))return(0,tTe.parsePatch)(t)[0];if(!e)throw new Error(\"Must provide a base reference or pass in a patch\");return(0,eTe.structuredPatch)(void 0,void 0,e,t)}return t}function N7(t){return t.newFileName&&t.newFileName!==t.oldFileName}function s0(t,e,r){return e===r?e:(t.conflict=!0,{mine:e,theirs:r})}function L7(t,e){return t.oldStart<e.oldStart&&t.oldStart+t.oldLines<e.oldStart}function T7(t,e){return{oldStart:t.oldStart,oldLines:t.oldLines,newStart:t.newStart+e,newLines:t.newLines,lines:t.lines}}function sTe(t,e,r,i,n){var s={offset:e,lines:r,index:0},o={offset:i,lines:n,index:0};for(M7(t,s,o),M7(t,o,s);s.index<s.lines.length&&o.index<o.lines.length;){var a=s.lines[s.index],l=o.lines[o.index];if((a[0]===\"-\"||a[0]===\"+\")&&(l[0]===\"-\"||l[0]===\"+\"))oTe(t,s,o);else if(a[0]===\"+\"&&l[0]===\" \"){var c;(c=t.lines).push.apply(c,Sf(Au(s)))}else if(l[0]===\"+\"&&a[0]===\" \"){var u;(u=t.lines).push.apply(u,Sf(Au(o)))}else a[0]===\"-\"&&l[0]===\" \"?O7(t,s,o):l[0]===\"-\"&&a[0]===\" \"?O7(t,o,s,!0):a===l?(t.lines.push(a),s.index++,o.index++):KR(t,Au(s),Au(o))}U7(t,s),U7(t,o),R7(t)}function oTe(t,e,r){var i=Au(e),n=Au(r);if(K7(i)&&K7(n)){if((0,MR.arrayStartsWith)(i,n)&&H7(r,i,i.length-n.length)){var s;(s=t.lines).push.apply(s,Sf(i));return}else if((0,MR.arrayStartsWith)(n,i)&&H7(e,n,n.length-i.length)){var o;(o=t.lines).push.apply(o,Sf(n));return}}else if((0,MR.arrayEqual)(i,n)){var a;(a=t.lines).push.apply(a,Sf(i));return}KR(t,i,n)}function O7(t,e,r,i){var n=Au(e),s=aTe(r,n);if(s.merged){var o;(o=t.lines).push.apply(o,Sf(s.merged))}else KR(t,i?s:n,i?n:s)}function KR(t,e,r){t.conflict=!0,t.lines.push({conflict:!0,mine:e,theirs:r})}function M7(t,e,r){for(;e.offset<r.offset&&e.index<e.lines.length;){var i=e.lines[e.index++];t.lines.push(i),e.offset++}}function U7(t,e){for(;e.index<e.lines.length;){var r=e.lines[e.index++];t.lines.push(r)}}function Au(t){for(var e=[],r=t.lines[t.index][0];t.index<t.lines.length;){var i=t.lines[t.index];if(r===\"-\"&&i[0]===\"+\"&&(r=\"+\"),r===i[0])e.push(i),t.index++;else break}return e}function aTe(t,e){for(var r=[],i=[],n=0,s=!1,o=!1;n<e.length&&t.index<t.lines.length;){var a=t.lines[t.index],l=e[n];if(l[0]===\"+\")break;if(s=s||a[0]!==\" \",i.push(l),n++,a[0]===\"+\")for(o=!0;a[0]===\"+\";)r.push(a),a=t.lines[++t.index];l.substr(1)===a.substr(1)?(r.push(a),t.index++):o=!0}if((e[n]||\"\")[0]===\"+\"&&s&&(o=!0),o)return r;for(;n<e.length;)i.push(e[n++]);return{merged:i,changes:r}}function K7(t){return t.reduce(function(e,r){return e&&r[0]===\"-\"},!0)}function H7(t,e,r){for(var i=0;i<r;i++){var n=e[e.length-r+i].substr(1);if(t.lines[t.index+i]!==\" \"+n)return!1}return t.index+=r,!0}function UR(t){var e=0,r=0;return t.forEach(function(i){if(typeof i!=\"string\"){var n=UR(i.mine),s=UR(i.theirs);e!==void 0&&(n.oldLines===s.oldLines?e+=n.oldLines:e=void 0),r!==void 0&&(n.newLines===s.newLines?r+=n.newLines:r=void 0)}else r!==void 0&&(i[0]===\"+\"||i[0]===\" \")&&r++,e!==void 0&&(i[0]===\"-\"||i[0]===\" \")&&e++}),{oldLines:e,newLines:r}}});var G7=w(HR=>{\"use strict\";Object.defineProperty(HR,\"__esModule\",{value:!0});HR.convertChangesToDMP=ATe;function ATe(t){for(var e=[],r,i,n=0;n<t.length;n++)r=t[n],r.added?i=1:r.removed?i=-1:i=0,e.push([i,r.value]);return e}});var Y7=w(jR=>{\"use strict\";Object.defineProperty(jR,\"__esModule\",{value:!0});jR.convertChangesToXML=lTe;function lTe(t){for(var e=[],r=0;r<t.length;r++){var i=t[r];i.added?e.push(\"<ins>\"):i.removed&&e.push(\"<del>\"),e.push(cTe(i.value)),i.added?e.push(\"</ins>\"):i.removed&&e.push(\"</del>\")}return e.join(\"\")}function cTe(t){var e=t;return e=e.replace(/&/g,\"&amp;\"),e=e.replace(/</g,\"&lt;\"),e=e.replace(/>/g,\"&gt;\"),e=e.replace(/\"/g,\"&quot;\"),e}});var _7=w(Kr=>{\"use strict\";Object.defineProperty(Kr,\"__esModule\",{value:!0});Object.defineProperty(Kr,\"Diff\",{enumerable:!0,get:function(){return uTe.default}});Object.defineProperty(Kr,\"diffChars\",{enumerable:!0,get:function(){return gTe.diffChars}});Object.defineProperty(Kr,\"diffWords\",{enumerable:!0,get:function(){return q7.diffWords}});Object.defineProperty(Kr,\"diffWordsWithSpace\",{enumerable:!0,get:function(){return q7.diffWordsWithSpace}});Object.defineProperty(Kr,\"diffLines\",{enumerable:!0,get:function(){return J7.diffLines}});Object.defineProperty(Kr,\"diffTrimmedLines\",{enumerable:!0,get:function(){return J7.diffTrimmedLines}});Object.defineProperty(Kr,\"diffSentences\",{enumerable:!0,get:function(){return fTe.diffSentences}});Object.defineProperty(Kr,\"diffCss\",{enumerable:!0,get:function(){return hTe.diffCss}});Object.defineProperty(Kr,\"diffJson\",{enumerable:!0,get:function(){return W7.diffJson}});Object.defineProperty(Kr,\"canonicalize\",{enumerable:!0,get:function(){return W7.canonicalize}});Object.defineProperty(Kr,\"diffArrays\",{enumerable:!0,get:function(){return pTe.diffArrays}});Object.defineProperty(Kr,\"applyPatch\",{enumerable:!0,get:function(){return z7.applyPatch}});Object.defineProperty(Kr,\"applyPatches\",{enumerable:!0,get:function(){return z7.applyPatches}});Object.defineProperty(Kr,\"parsePatch\",{enumerable:!0,get:function(){return dTe.parsePatch}});Object.defineProperty(Kr,\"merge\",{enumerable:!0,get:function(){return CTe.merge}});Object.defineProperty(Kr,\"structuredPatch\",{enumerable:!0,get:function(){return GR.structuredPatch}});Object.defineProperty(Kr,\"createTwoFilesPatch\",{enumerable:!0,get:function(){return GR.createTwoFilesPatch}});Object.defineProperty(Kr,\"createPatch\",{enumerable:!0,get:function(){return GR.createPatch}});Object.defineProperty(Kr,\"convertChangesToDMP\",{enumerable:!0,get:function(){return mTe.convertChangesToDMP}});Object.defineProperty(Kr,\"convertChangesToXML\",{enumerable:!0,get:function(){return ETe.convertChangesToXML}});var uTe=ITe(gA()),gTe=p7(),q7=m7(),J7=ZB(),fTe=E7(),hTe=I7(),W7=w7(),pTe=B7(),z7=S7(),dTe=t0(),CTe=j7(),GR=OR(),mTe=G7(),ETe=Y7();function ITe(t){return t&&t.__esModule?t:{default:t}}});var o0=w((Cct,V7)=>{var yTe=Os(),wTe=Id(),BTe=/\\.|\\[(?:[^[\\]]*|([\"'])(?:(?!\\1)[^\\\\]|\\\\.)*?\\1)\\]/,bTe=/^\\w*$/;function QTe(t,e){if(yTe(t))return!1;var r=typeof t;return r==\"number\"||r==\"symbol\"||r==\"boolean\"||t==null||wTe(t)?!0:bTe.test(t)||!BTe.test(t)||e!=null&&t in Object(e)}V7.exports=QTe});var a0=w((mct,X7)=>{var vTe=Hc(),STe=Rn(),kTe=\"[object AsyncFunction]\",xTe=\"[object Function]\",PTe=\"[object GeneratorFunction]\",DTe=\"[object Proxy]\";function RTe(t){if(!STe(t))return!1;var e=vTe(t);return e==xTe||e==PTe||e==kTe||e==DTe}X7.exports=RTe});var $7=w((Ect,Z7)=>{var FTe=Rs(),NTe=FTe[\"__core-js_shared__\"];Z7.exports=NTe});var rX=w((Ict,eX)=>{var YR=$7(),tX=function(){var t=/[^.]+$/.exec(YR&&YR.keys&&YR.keys.IE_PROTO||\"\");return t?\"Symbol(src)_1.\"+t:\"\"}();function LTe(t){return!!tX&&tX in t}eX.exports=LTe});var qR=w((yct,iX)=>{var TTe=Function.prototype,OTe=TTe.toString;function MTe(t){if(t!=null){try{return OTe.call(t)}catch(e){}try{return t+\"\"}catch(e){}}return\"\"}iX.exports=MTe});var sX=w((wct,nX)=>{var UTe=a0(),KTe=rX(),HTe=Rn(),jTe=qR(),GTe=/[\\\\^$.*+?()[\\]{}|]/g,YTe=/^\\[object .+?Constructor\\]$/,qTe=Function.prototype,JTe=Object.prototype,WTe=qTe.toString,zTe=JTe.hasOwnProperty,_Te=RegExp(\"^\"+WTe.call(zTe).replace(GTe,\"\\\\$&\").replace(/hasOwnProperty|(function).*?(?=\\\\\\()| for .+?(?=\\\\\\])/g,\"$1.*?\")+\"$\");function VTe(t){if(!HTe(t)||KTe(t))return!1;var e=UTe(t)?_Te:YTe;return e.test(jTe(t))}nX.exports=VTe});var aX=w((Bct,oX)=>{function XTe(t,e){return t==null?void 0:t[e]}oX.exports=XTe});var vl=w((bct,AX)=>{var ZTe=sX(),$Te=aX();function eOe(t,e){var r=$Te(t,e);return ZTe(r)?r:void 0}AX.exports=eOe});var dC=w((Qct,lX)=>{var tOe=vl(),rOe=tOe(Object,\"create\");lX.exports=rOe});var gX=w((vct,cX)=>{var uX=dC();function iOe(){this.__data__=uX?uX(null):{},this.size=0}cX.exports=iOe});var hX=w((Sct,fX)=>{function nOe(t){var e=this.has(t)&&delete this.__data__[t];return this.size-=e?1:0,e}fX.exports=nOe});var dX=w((kct,pX)=>{var sOe=dC(),oOe=\"__lodash_hash_undefined__\",aOe=Object.prototype,AOe=aOe.hasOwnProperty;function lOe(t){var e=this.__data__;if(sOe){var r=e[t];return r===oOe?void 0:r}return AOe.call(e,t)?e[t]:void 0}pX.exports=lOe});var mX=w((xct,CX)=>{var cOe=dC(),uOe=Object.prototype,gOe=uOe.hasOwnProperty;function fOe(t){var e=this.__data__;return cOe?e[t]!==void 0:gOe.call(e,t)}CX.exports=fOe});var IX=w((Pct,EX)=>{var hOe=dC(),pOe=\"__lodash_hash_undefined__\";function dOe(t,e){var r=this.__data__;return this.size+=this.has(t)?0:1,r[t]=hOe&&e===void 0?pOe:e,this}EX.exports=dOe});var wX=w((Dct,yX)=>{var COe=gX(),mOe=hX(),EOe=dX(),IOe=mX(),yOe=IX();function kf(t){var e=-1,r=t==null?0:t.length;for(this.clear();++e<r;){var i=t[e];this.set(i[0],i[1])}}kf.prototype.clear=COe;kf.prototype.delete=mOe;kf.prototype.get=EOe;kf.prototype.has=IOe;kf.prototype.set=yOe;yX.exports=kf});var bX=w((Rct,BX)=>{function wOe(){this.__data__=[],this.size=0}BX.exports=wOe});var xf=w((Fct,QX)=>{function BOe(t,e){return t===e||t!==t&&e!==e}QX.exports=BOe});var CC=w((Nct,vX)=>{var bOe=xf();function QOe(t,e){for(var r=t.length;r--;)if(bOe(t[r][0],e))return r;return-1}vX.exports=QOe});var kX=w((Lct,SX)=>{var vOe=CC(),SOe=Array.prototype,kOe=SOe.splice;function xOe(t){var e=this.__data__,r=vOe(e,t);if(r<0)return!1;var i=e.length-1;return r==i?e.pop():kOe.call(e,r,1),--this.size,!0}SX.exports=xOe});var PX=w((Tct,xX)=>{var POe=CC();function DOe(t){var e=this.__data__,r=POe(e,t);return r<0?void 0:e[r][1]}xX.exports=DOe});var RX=w((Oct,DX)=>{var ROe=CC();function FOe(t){return ROe(this.__data__,t)>-1}DX.exports=FOe});var NX=w((Mct,FX)=>{var NOe=CC();function LOe(t,e){var r=this.__data__,i=NOe(r,t);return i<0?(++this.size,r.push([t,e])):r[i][1]=e,this}FX.exports=LOe});var mC=w((Uct,LX)=>{var TOe=bX(),OOe=kX(),MOe=PX(),UOe=RX(),KOe=NX();function Pf(t){var e=-1,r=t==null?0:t.length;for(this.clear();++e<r;){var i=t[e];this.set(i[0],i[1])}}Pf.prototype.clear=TOe;Pf.prototype.delete=OOe;Pf.prototype.get=MOe;Pf.prototype.has=UOe;Pf.prototype.set=KOe;LX.exports=Pf});var A0=w((Kct,TX)=>{var HOe=vl(),jOe=Rs(),GOe=HOe(jOe,\"Map\");TX.exports=GOe});var UX=w((Hct,OX)=>{var MX=wX(),YOe=mC(),qOe=A0();function JOe(){this.size=0,this.__data__={hash:new MX,map:new(qOe||YOe),string:new MX}}OX.exports=JOe});var HX=w((jct,KX)=>{function WOe(t){var e=typeof t;return e==\"string\"||e==\"number\"||e==\"symbol\"||e==\"boolean\"?t!==\"__proto__\":t===null}KX.exports=WOe});var EC=w((Gct,jX)=>{var zOe=HX();function _Oe(t,e){var r=t.__data__;return zOe(e)?r[typeof e==\"string\"?\"string\":\"hash\"]:r.map}jX.exports=_Oe});var YX=w((Yct,GX)=>{var VOe=EC();function XOe(t){var e=VOe(this,t).delete(t);return this.size-=e?1:0,e}GX.exports=XOe});var JX=w((qct,qX)=>{var ZOe=EC();function $Oe(t){return ZOe(this,t).get(t)}qX.exports=$Oe});var zX=w((Jct,WX)=>{var eMe=EC();function tMe(t){return eMe(this,t).has(t)}WX.exports=tMe});var VX=w((Wct,_X)=>{var rMe=EC();function iMe(t,e){var r=rMe(this,t),i=r.size;return r.set(t,e),this.size+=r.size==i?0:1,this}_X.exports=iMe});var l0=w((zct,XX)=>{var nMe=UX(),sMe=YX(),oMe=JX(),aMe=zX(),AMe=VX();function Df(t){var e=-1,r=t==null?0:t.length;for(this.clear();++e<r;){var i=t[e];this.set(i[0],i[1])}}Df.prototype.clear=nMe;Df.prototype.delete=sMe;Df.prototype.get=oMe;Df.prototype.has=aMe;Df.prototype.set=AMe;XX.exports=Df});var eZ=w((_ct,ZX)=>{var $X=l0(),lMe=\"Expected a function\";function JR(t,e){if(typeof t!=\"function\"||e!=null&&typeof e!=\"function\")throw new TypeError(lMe);var r=function(){var i=arguments,n=e?e.apply(this,i):i[0],s=r.cache;if(s.has(n))return s.get(n);var o=t.apply(this,i);return r.cache=s.set(n,o)||s,o};return r.cache=new(JR.Cache||$X),r}JR.Cache=$X;ZX.exports=JR});var rZ=w((Vct,tZ)=>{var cMe=eZ(),uMe=500;function gMe(t){var e=cMe(t,function(i){return r.size===uMe&&r.clear(),i}),r=e.cache;return e}tZ.exports=gMe});var nZ=w((Xct,iZ)=>{var fMe=rZ(),hMe=/[^.[\\]]+|\\[(?:(-?\\d+(?:\\.\\d+)?)|([\"'])((?:(?!\\2)[^\\\\]|\\\\.)*?)\\2)\\]|(?=(?:\\.|\\[\\])(?:\\.|\\[\\]|$))/g,pMe=/\\\\(\\\\)?/g,dMe=fMe(function(t){var e=[];return t.charCodeAt(0)===46&&e.push(\"\"),t.replace(hMe,function(r,i,n,s){e.push(n?s.replace(pMe,\"$1\"):i||r)}),e});iZ.exports=dMe});var Rf=w((Zct,sZ)=>{var CMe=Os(),mMe=o0(),EMe=nZ(),IMe=nf();function yMe(t,e){return CMe(t)?t:mMe(t,e)?[t]:EMe(IMe(t))}sZ.exports=yMe});var lu=w(($ct,oZ)=>{var wMe=Id(),BMe=1/0;function bMe(t){if(typeof t==\"string\"||wMe(t))return t;var e=t+\"\";return e==\"0\"&&1/t==-BMe?\"-0\":e}oZ.exports=bMe});var IC=w((eut,aZ)=>{var QMe=Rf(),vMe=lu();function SMe(t,e){e=QMe(e,t);for(var r=0,i=e.length;t!=null&&r<i;)t=t[vMe(e[r++])];return r&&r==i?t:void 0}aZ.exports=SMe});var WR=w((tut,AZ)=>{var kMe=vl(),xMe=function(){try{var t=kMe(Object,\"defineProperty\");return t({},\"\",{}),t}catch(e){}}();AZ.exports=xMe});var Ff=w((rut,lZ)=>{var cZ=WR();function PMe(t,e,r){e==\"__proto__\"&&cZ?cZ(t,e,{configurable:!0,enumerable:!0,value:r,writable:!0}):t[e]=r}lZ.exports=PMe});var c0=w((iut,uZ)=>{var DMe=Ff(),RMe=xf(),FMe=Object.prototype,NMe=FMe.hasOwnProperty;function LMe(t,e,r){var i=t[e];(!(NMe.call(t,e)&&RMe(i,r))||r===void 0&&!(e in t))&&DMe(t,e,r)}uZ.exports=LMe});var yC=w((nut,gZ)=>{var TMe=9007199254740991,OMe=/^(?:0|[1-9]\\d*)$/;function MMe(t,e){var r=typeof t;return e=e==null?TMe:e,!!e&&(r==\"number\"||r!=\"symbol\"&&OMe.test(t))&&t>-1&&t%1==0&&t<e}gZ.exports=MMe});var zR=w((sut,fZ)=>{var UMe=c0(),KMe=Rf(),HMe=yC(),hZ=Rn(),jMe=lu();function GMe(t,e,r,i){if(!hZ(t))return t;e=KMe(e,t);for(var n=-1,s=e.length,o=s-1,a=t;a!=null&&++n<s;){var l=jMe(e[n]),c=r;if(l===\"__proto__\"||l===\"constructor\"||l===\"prototype\")return t;if(n!=o){var u=a[l];c=i?i(u,l,a):void 0,c===void 0&&(c=hZ(u)?u:HMe(e[n+1])?[]:{})}UMe(a,l,c),a=a[l]}return t}fZ.exports=GMe});var dZ=w((out,pZ)=>{var YMe=IC(),qMe=zR(),JMe=Rf();function WMe(t,e,r){for(var i=-1,n=e.length,s={};++i<n;){var o=e[i],a=YMe(t,o);r(a,o)&&qMe(s,JMe(o,t),a)}return s}pZ.exports=WMe});var mZ=w((aut,CZ)=>{function zMe(t,e){return t!=null&&e in Object(t)}CZ.exports=zMe});var IZ=w((Aut,EZ)=>{var _Me=Hc(),VMe=Zo(),XMe=\"[object Arguments]\";function ZMe(t){return VMe(t)&&_Me(t)==XMe}EZ.exports=ZMe});var wC=w((lut,yZ)=>{var wZ=IZ(),$Me=Zo(),BZ=Object.prototype,e1e=BZ.hasOwnProperty,t1e=BZ.propertyIsEnumerable,r1e=wZ(function(){return arguments}())?wZ:function(t){return $Me(t)&&e1e.call(t,\"callee\")&&!t1e.call(t,\"callee\")};yZ.exports=r1e});var u0=w((cut,bZ)=>{var i1e=9007199254740991;function n1e(t){return typeof t==\"number\"&&t>-1&&t%1==0&&t<=i1e}bZ.exports=n1e});var _R=w((uut,QZ)=>{var s1e=Rf(),o1e=wC(),a1e=Os(),A1e=yC(),l1e=u0(),c1e=lu();function u1e(t,e,r){e=s1e(e,t);for(var i=-1,n=e.length,s=!1;++i<n;){var o=c1e(e[i]);if(!(s=t!=null&&r(t,o)))break;t=t[o]}return s||++i!=n?s:(n=t==null?0:t.length,!!n&&l1e(n)&&A1e(o,n)&&(a1e(t)||o1e(t)))}QZ.exports=u1e});var VR=w((gut,vZ)=>{var g1e=mZ(),f1e=_R();function h1e(t,e){return t!=null&&f1e(t,e,g1e)}vZ.exports=h1e});var kZ=w((fut,SZ)=>{var p1e=dZ(),d1e=VR();function C1e(t,e){return p1e(t,e,function(r,i){return d1e(t,i)})}SZ.exports=C1e});var g0=w((hut,xZ)=>{function m1e(t,e){for(var r=-1,i=e.length,n=t.length;++r<i;)t[n+r]=e[r];return t}xZ.exports=m1e});var FZ=w((put,PZ)=>{var DZ=Kc(),E1e=wC(),I1e=Os(),RZ=DZ?DZ.isConcatSpreadable:void 0;function y1e(t){return I1e(t)||E1e(t)||!!(RZ&&t&&t[RZ])}PZ.exports=y1e});var TZ=w((dut,NZ)=>{var w1e=g0(),B1e=FZ();function LZ(t,e,r,i,n){var s=-1,o=t.length;for(r||(r=B1e),n||(n=[]);++s<o;){var a=t[s];e>0&&r(a)?e>1?LZ(a,e-1,r,i,n):w1e(n,a):i||(n[n.length]=a)}return n}NZ.exports=LZ});var MZ=w((Cut,OZ)=>{var b1e=TZ();function Q1e(t){var e=t==null?0:t.length;return e?b1e(t,1):[]}OZ.exports=Q1e});var KZ=w((mut,UZ)=>{function v1e(t,e,r){switch(r.length){case 0:return t.call(e);case 1:return t.call(e,r[0]);case 2:return t.call(e,r[0],r[1]);case 3:return t.call(e,r[0],r[1],r[2])}return t.apply(e,r)}UZ.exports=v1e});var XR=w((Eut,HZ)=>{var S1e=KZ(),jZ=Math.max;function k1e(t,e,r){return e=jZ(e===void 0?t.length-1:e,0),function(){for(var i=arguments,n=-1,s=jZ(i.length-e,0),o=Array(s);++n<s;)o[n]=i[e+n];n=-1;for(var a=Array(e+1);++n<e;)a[n]=i[n];return a[e]=r(o),S1e(t,this,a)}}HZ.exports=k1e});var YZ=w((Iut,GZ)=>{function x1e(t){return function(){return t}}GZ.exports=x1e});var f0=w((yut,qZ)=>{function P1e(t){return t}qZ.exports=P1e});var zZ=w((wut,JZ)=>{var D1e=YZ(),WZ=WR(),R1e=f0(),F1e=WZ?function(t,e){return WZ(t,\"toString\",{configurable:!0,enumerable:!1,value:D1e(e),writable:!0})}:R1e;JZ.exports=F1e});var VZ=w((But,_Z)=>{var N1e=800,L1e=16,T1e=Date.now;function O1e(t){var e=0,r=0;return function(){var i=T1e(),n=L1e-(i-r);if(r=i,n>0){if(++e>=N1e)return arguments[0]}else e=0;return t.apply(void 0,arguments)}}_Z.exports=O1e});var ZR=w((but,XZ)=>{var M1e=zZ(),U1e=VZ(),K1e=U1e(M1e);XZ.exports=K1e});var $Z=w((Qut,ZZ)=>{var H1e=MZ(),j1e=XR(),G1e=ZR();function Y1e(t){return G1e(j1e(t,void 0,H1e),t+\"\")}ZZ.exports=Y1e});var t$=w((vut,e$)=>{var q1e=kZ(),J1e=$Z(),W1e=J1e(function(t,e){return t==null?{}:q1e(t,e)});e$.exports=W1e});var h$=w((wft,u$)=>{\"use strict\";var AF;try{AF=Map}catch(t){}var lF;try{lF=Set}catch(t){}function g$(t,e,r){if(!t||typeof t!=\"object\"||typeof t==\"function\")return t;if(t.nodeType&&\"cloneNode\"in t)return t.cloneNode(!0);if(t instanceof Date)return new Date(t.getTime());if(t instanceof RegExp)return new RegExp(t);if(Array.isArray(t))return t.map(f$);if(AF&&t instanceof AF)return new Map(Array.from(t.entries()));if(lF&&t instanceof lF)return new Set(Array.from(t.values()));if(t instanceof Object){e.push(t);var i=Object.create(t);r.push(i);for(var n in t){var s=e.findIndex(function(o){return o===t[n]});i[n]=s>-1?r[s]:g$(t[n],e,r)}return i}return t}function f$(t){return g$(t,[],[])}u$.exports=f$});var vC=w(cF=>{\"use strict\";Object.defineProperty(cF,\"__esModule\",{value:!0});cF.default=rUe;var iUe=Object.prototype.toString,nUe=Error.prototype.toString,sUe=RegExp.prototype.toString,oUe=typeof Symbol!=\"undefined\"?Symbol.prototype.toString:()=>\"\",aUe=/^Symbol\\((.*)\\)(.*)$/;function AUe(t){return t!=+t?\"NaN\":t===0&&1/t<0?\"-0\":\"\"+t}function p$(t,e=!1){if(t==null||t===!0||t===!1)return\"\"+t;let r=typeof t;if(r===\"number\")return AUe(t);if(r===\"string\")return e?`\"${t}\"`:t;if(r===\"function\")return\"[Function \"+(t.name||\"anonymous\")+\"]\";if(r===\"symbol\")return oUe.call(t).replace(aUe,\"Symbol($1)\");let i=iUe.call(t).slice(8,-1);return i===\"Date\"?isNaN(t.getTime())?\"\"+t:t.toISOString(t):i===\"Error\"||t instanceof Error?\"[\"+nUe.call(t)+\"]\":i===\"RegExp\"?sUe.call(t):null}function rUe(t,e){let r=p$(t,e);return r!==null?r:JSON.stringify(t,function(i,n){let s=p$(this[i],e);return s!==null?s:n},2)}});var fA=w(Bi=>{\"use strict\";Object.defineProperty(Bi,\"__esModule\",{value:!0});Bi.default=Bi.array=Bi.object=Bi.boolean=Bi.date=Bi.number=Bi.string=Bi.mixed=void 0;var d$=lUe(vC());function lUe(t){return t&&t.__esModule?t:{default:t}}var C$={default:\"${path} is invalid\",required:\"${path} is a required field\",oneOf:\"${path} must be one of the following values: ${values}\",notOneOf:\"${path} must not be one of the following values: ${values}\",notType:({path:t,type:e,value:r,originalValue:i})=>{let n=i!=null&&i!==r,s=`${t} must be a \\`${e}\\` type, but the final value was: \\`${(0,d$.default)(r,!0)}\\``+(n?` (cast from the value \\`${(0,d$.default)(i,!0)}\\`).`:\".\");return r===null&&(s+='\\n If \"null\" is intended as an empty value be sure to mark the schema as `.nullable()`'),s},defined:\"${path} must be defined\"};Bi.mixed=C$;var m$={length:\"${path} must be exactly ${length} characters\",min:\"${path} must be at least ${min} characters\",max:\"${path} must be at most ${max} characters\",matches:'${path} must match the following: \"${regex}\"',email:\"${path} must be a valid email\",url:\"${path} must be a valid URL\",uuid:\"${path} must be a valid UUID\",trim:\"${path} must be a trimmed string\",lowercase:\"${path} must be a lowercase string\",uppercase:\"${path} must be a upper case string\"};Bi.string=m$;var E$={min:\"${path} must be greater than or equal to ${min}\",max:\"${path} must be less than or equal to ${max}\",lessThan:\"${path} must be less than ${less}\",moreThan:\"${path} must be greater than ${more}\",positive:\"${path} must be a positive number\",negative:\"${path} must be a negative number\",integer:\"${path} must be an integer\"};Bi.number=E$;var I$={min:\"${path} field must be later than ${min}\",max:\"${path} field must be at earlier than ${max}\"};Bi.date=I$;var y$={isValue:\"${path} field must be ${value}\"};Bi.boolean=y$;var w$={noUnknown:\"${path} field has unspecified keys: ${unknown}\"};Bi.object=w$;var B$={min:\"${path} field must have at least ${min} items\",max:\"${path} field must have less than or equal to ${max} items\",length:\"${path} must be have ${length} items\"};Bi.array=B$;var cUe=Object.assign(Object.create(null),{mixed:C$,string:m$,number:E$,date:I$,object:w$,array:B$,boolean:y$});Bi.default=cUe});var Q$=w((Qft,b$)=>{var uUe=Object.prototype,gUe=uUe.hasOwnProperty;function fUe(t,e){return t!=null&&gUe.call(t,e)}b$.exports=fUe});var SC=w((vft,v$)=>{var hUe=Q$(),pUe=_R();function dUe(t,e){return t!=null&&pUe(t,e,hUe)}v$.exports=dUe});var Lf=w(C0=>{\"use strict\";Object.defineProperty(C0,\"__esModule\",{value:!0});C0.default=void 0;var CUe=t=>t&&t.__isYupSchema__;C0.default=CUe});var x$=w(m0=>{\"use strict\";Object.defineProperty(m0,\"__esModule\",{value:!0});m0.default=void 0;var mUe=S$(SC()),EUe=S$(Lf());function S$(t){return t&&t.__esModule?t:{default:t}}var k$=class{constructor(e,r){if(this.refs=e,this.refs=e,typeof r==\"function\"){this.fn=r;return}if(!(0,mUe.default)(r,\"is\"))throw new TypeError(\"`is:` is required for `when()` conditions\");if(!r.then&&!r.otherwise)throw new TypeError(\"either `then:` or `otherwise:` is required for `when()` conditions\");let{is:i,then:n,otherwise:s}=r,o=typeof i==\"function\"?i:(...a)=>a.every(l=>l===i);this.fn=function(...a){let l=a.pop(),c=a.pop(),u=o(...a)?n:s;if(!!u)return typeof u==\"function\"?u(c):c.concat(u.resolve(l))}}resolve(e,r){let i=this.refs.map(s=>s.getValue(r==null?void 0:r.value,r==null?void 0:r.parent,r==null?void 0:r.context)),n=this.fn.apply(e,i.concat(e,r));if(n===void 0||n===e)return e;if(!(0,EUe.default)(n))throw new TypeError(\"conditions must return a schema object\");return n.resolve(r)}},IUe=k$;m0.default=IUe});var gF=w(uF=>{\"use strict\";Object.defineProperty(uF,\"__esModule\",{value:!0});uF.default=yUe;function yUe(t){return t==null?[]:[].concat(t)}});var cu=w(E0=>{\"use strict\";Object.defineProperty(E0,\"__esModule\",{value:!0});E0.default=void 0;var wUe=P$(vC()),BUe=P$(gF());function P$(t){return t&&t.__esModule?t:{default:t}}function fF(){return fF=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var r=arguments[e];for(var i in r)Object.prototype.hasOwnProperty.call(r,i)&&(t[i]=r[i])}return t},fF.apply(this,arguments)}var bUe=/\\$\\{\\s*(\\w+)\\s*\\}/g,kC=class extends Error{static formatError(e,r){let i=r.label||r.path||\"this\";return i!==r.path&&(r=fF({},r,{path:i})),typeof e==\"string\"?e.replace(bUe,(n,s)=>(0,wUe.default)(r[s])):typeof e==\"function\"?e(r):e}static isError(e){return e&&e.name===\"ValidationError\"}constructor(e,r,i,n){super();this.name=\"ValidationError\",this.value=r,this.path=i,this.type=n,this.errors=[],this.inner=[],(0,BUe.default)(e).forEach(s=>{kC.isError(s)?(this.errors.push(...s.errors),this.inner=this.inner.concat(s.inner.length?s.inner:s)):this.errors.push(s)}),this.message=this.errors.length>1?`${this.errors.length} errors occurred`:this.errors[0],Error.captureStackTrace&&Error.captureStackTrace(this,kC)}};E0.default=kC});var I0=w(hF=>{\"use strict\";Object.defineProperty(hF,\"__esModule\",{value:!0});hF.default=QUe;var pF=vUe(cu());function vUe(t){return t&&t.__esModule?t:{default:t}}var SUe=t=>{let e=!1;return(...r)=>{e||(e=!0,t(...r))}};function QUe(t,e){let{endEarly:r,tests:i,args:n,value:s,errors:o,sort:a,path:l}=t,c=SUe(e),u=i.length,g=[];if(o=o||[],!u)return o.length?c(new pF.default(o,s,l)):c(null,s);for(let f=0;f<i.length;f++)i[f](n,function(m){if(m){if(!pF.default.isError(m))return c(m,s);if(r)return m.value=s,c(m,s);g.push(m)}if(--u<=0){if(g.length&&(a&&g.sort(a),o.length&&g.push(...o),o=g),o.length){c(new pF.default(o,s,l),s);return}c(null,s)}})}});var R$=w((Rft,D$)=>{function kUe(t){return function(e,r,i){for(var n=-1,s=Object(e),o=i(e),a=o.length;a--;){var l=o[t?a:++n];if(r(s[l],l,s)===!1)break}return e}}D$.exports=kUe});var dF=w((Fft,F$)=>{var xUe=R$(),PUe=xUe();F$.exports=PUe});var L$=w((Nft,N$)=>{function DUe(t,e){for(var r=-1,i=Array(t);++r<t;)i[r]=e(r);return i}N$.exports=DUe});var O$=w((Lft,T$)=>{function RUe(){return!1}T$.exports=RUe});var PC=w((xC,Tf)=>{var FUe=Rs(),NUe=O$(),M$=typeof xC==\"object\"&&xC&&!xC.nodeType&&xC,U$=M$&&typeof Tf==\"object\"&&Tf&&!Tf.nodeType&&Tf,LUe=U$&&U$.exports===M$,K$=LUe?FUe.Buffer:void 0,TUe=K$?K$.isBuffer:void 0,OUe=TUe||NUe;Tf.exports=OUe});var j$=w((Tft,H$)=>{var MUe=Hc(),UUe=u0(),KUe=Zo(),HUe=\"[object Arguments]\",jUe=\"[object Array]\",GUe=\"[object Boolean]\",YUe=\"[object Date]\",qUe=\"[object Error]\",JUe=\"[object Function]\",WUe=\"[object Map]\",zUe=\"[object Number]\",_Ue=\"[object Object]\",VUe=\"[object RegExp]\",XUe=\"[object Set]\",ZUe=\"[object String]\",$Ue=\"[object WeakMap]\",eKe=\"[object ArrayBuffer]\",tKe=\"[object DataView]\",rKe=\"[object Float32Array]\",iKe=\"[object Float64Array]\",nKe=\"[object Int8Array]\",sKe=\"[object Int16Array]\",oKe=\"[object Int32Array]\",aKe=\"[object Uint8Array]\",AKe=\"[object Uint8ClampedArray]\",lKe=\"[object Uint16Array]\",cKe=\"[object Uint32Array]\",wr={};wr[rKe]=wr[iKe]=wr[nKe]=wr[sKe]=wr[oKe]=wr[aKe]=wr[AKe]=wr[lKe]=wr[cKe]=!0;wr[HUe]=wr[jUe]=wr[eKe]=wr[GUe]=wr[tKe]=wr[YUe]=wr[qUe]=wr[JUe]=wr[WUe]=wr[zUe]=wr[_Ue]=wr[VUe]=wr[XUe]=wr[ZUe]=wr[$Ue]=!1;function uKe(t){return KUe(t)&&UUe(t.length)&&!!wr[MUe(t)]}H$.exports=uKe});var y0=w((Oft,G$)=>{function gKe(t){return function(e){return t(e)}}G$.exports=gKe});var w0=w((DC,Of)=>{var fKe=ux(),Y$=typeof DC==\"object\"&&DC&&!DC.nodeType&&DC,RC=Y$&&typeof Of==\"object\"&&Of&&!Of.nodeType&&Of,hKe=RC&&RC.exports===Y$,CF=hKe&&fKe.process,pKe=function(){try{var t=RC&&RC.require&&RC.require(\"util\").types;return t||CF&&CF.binding&&CF.binding(\"util\")}catch(e){}}();Of.exports=pKe});var B0=w((Mft,q$)=>{var dKe=j$(),CKe=y0(),J$=w0(),W$=J$&&J$.isTypedArray,mKe=W$?CKe(W$):dKe;q$.exports=mKe});var mF=w((Uft,z$)=>{var EKe=L$(),IKe=wC(),yKe=Os(),wKe=PC(),BKe=yC(),bKe=B0(),QKe=Object.prototype,vKe=QKe.hasOwnProperty;function SKe(t,e){var r=yKe(t),i=!r&&IKe(t),n=!r&&!i&&wKe(t),s=!r&&!i&&!n&&bKe(t),o=r||i||n||s,a=o?EKe(t.length,String):[],l=a.length;for(var c in t)(e||vKe.call(t,c))&&!(o&&(c==\"length\"||n&&(c==\"offset\"||c==\"parent\")||s&&(c==\"buffer\"||c==\"byteLength\"||c==\"byteOffset\")||BKe(c,l)))&&a.push(c);return a}z$.exports=SKe});var b0=w((Kft,_$)=>{var kKe=Object.prototype;function xKe(t){var e=t&&t.constructor,r=typeof e==\"function\"&&e.prototype||kKe;return t===r}_$.exports=xKe});var EF=w((Hft,V$)=>{function PKe(t,e){return function(r){return t(e(r))}}V$.exports=PKe});var Z$=w((jft,X$)=>{var DKe=EF(),RKe=DKe(Object.keys,Object);X$.exports=RKe});var eee=w((Gft,$$)=>{var FKe=b0(),NKe=Z$(),LKe=Object.prototype,TKe=LKe.hasOwnProperty;function OKe(t){if(!FKe(t))return NKe(t);var e=[];for(var r in Object(t))TKe.call(t,r)&&r!=\"constructor\"&&e.push(r);return e}$$.exports=OKe});var FC=w((Yft,tee)=>{var MKe=a0(),UKe=u0();function KKe(t){return t!=null&&UKe(t.length)&&!MKe(t)}tee.exports=KKe});var Mf=w((qft,ree)=>{var HKe=mF(),jKe=eee(),GKe=FC();function YKe(t){return GKe(t)?HKe(t):jKe(t)}ree.exports=YKe});var IF=w((Jft,iee)=>{var qKe=dF(),JKe=Mf();function WKe(t,e){return t&&qKe(t,e,JKe)}iee.exports=WKe});var see=w((Wft,nee)=>{var zKe=mC();function _Ke(){this.__data__=new zKe,this.size=0}nee.exports=_Ke});var aee=w((zft,oee)=>{function VKe(t){var e=this.__data__,r=e.delete(t);return this.size=e.size,r}oee.exports=VKe});var lee=w((_ft,Aee)=>{function XKe(t){return this.__data__.get(t)}Aee.exports=XKe});var uee=w((Vft,cee)=>{function ZKe(t){return this.__data__.has(t)}cee.exports=ZKe});var fee=w((Xft,gee)=>{var $Ke=mC(),e2e=A0(),t2e=l0(),r2e=200;function i2e(t,e){var r=this.__data__;if(r instanceof $Ke){var i=r.__data__;if(!e2e||i.length<r2e-1)return i.push([t,e]),this.size=++r.size,this;r=this.__data__=new t2e(i)}return r.set(t,e),this.size=r.size,this}gee.exports=i2e});var NC=w((Zft,hee)=>{var n2e=mC(),s2e=see(),o2e=aee(),a2e=lee(),A2e=uee(),l2e=fee();function Uf(t){var e=this.__data__=new n2e(t);this.size=e.size}Uf.prototype.clear=s2e;Uf.prototype.delete=o2e;Uf.prototype.get=a2e;Uf.prototype.has=A2e;Uf.prototype.set=l2e;hee.exports=Uf});var dee=w(($ft,pee)=>{var c2e=\"__lodash_hash_undefined__\";function u2e(t){return this.__data__.set(t,c2e),this}pee.exports=u2e});var mee=w((eht,Cee)=>{function g2e(t){return this.__data__.has(t)}Cee.exports=g2e});var Iee=w((tht,Eee)=>{var f2e=l0(),h2e=dee(),p2e=mee();function Q0(t){var e=-1,r=t==null?0:t.length;for(this.__data__=new f2e;++e<r;)this.add(t[e])}Q0.prototype.add=Q0.prototype.push=h2e;Q0.prototype.has=p2e;Eee.exports=Q0});var wee=w((rht,yee)=>{function d2e(t,e){for(var r=-1,i=t==null?0:t.length;++r<i;)if(e(t[r],r,t))return!0;return!1}yee.exports=d2e});var bee=w((iht,Bee)=>{function C2e(t,e){return t.has(e)}Bee.exports=C2e});var yF=w((nht,Qee)=>{var m2e=Iee(),E2e=wee(),I2e=bee(),y2e=1,w2e=2;function B2e(t,e,r,i,n,s){var o=r&y2e,a=t.length,l=e.length;if(a!=l&&!(o&&l>a))return!1;var c=s.get(t),u=s.get(e);if(c&&u)return c==e&&u==t;var g=-1,f=!0,h=r&w2e?new m2e:void 0;for(s.set(t,e),s.set(e,t);++g<a;){var p=t[g],m=e[g];if(i)var y=o?i(m,p,g,e,t,s):i(p,m,g,t,e,s);if(y!==void 0){if(y)continue;f=!1;break}if(h){if(!E2e(e,function(Q,S){if(!I2e(h,S)&&(p===Q||n(p,Q,r,i,s)))return h.push(S)})){f=!1;break}}else if(!(p===m||n(p,m,r,i,s))){f=!1;break}}return s.delete(t),s.delete(e),f}Qee.exports=B2e});var wF=w((sht,vee)=>{var b2e=Rs(),Q2e=b2e.Uint8Array;vee.exports=Q2e});var kee=w((oht,See)=>{function v2e(t){var e=-1,r=Array(t.size);return t.forEach(function(i,n){r[++e]=[n,i]}),r}See.exports=v2e});var Pee=w((aht,xee)=>{function S2e(t){var e=-1,r=Array(t.size);return t.forEach(function(i){r[++e]=i}),r}xee.exports=S2e});var Lee=w((Aht,Dee)=>{var Ree=Kc(),Fee=wF(),k2e=xf(),x2e=yF(),P2e=kee(),D2e=Pee(),R2e=1,F2e=2,N2e=\"[object Boolean]\",L2e=\"[object Date]\",T2e=\"[object Error]\",O2e=\"[object Map]\",M2e=\"[object Number]\",U2e=\"[object RegExp]\",K2e=\"[object Set]\",H2e=\"[object String]\",j2e=\"[object Symbol]\",G2e=\"[object ArrayBuffer]\",Y2e=\"[object DataView]\",Nee=Ree?Ree.prototype:void 0,BF=Nee?Nee.valueOf:void 0;function q2e(t,e,r,i,n,s,o){switch(r){case Y2e:if(t.byteLength!=e.byteLength||t.byteOffset!=e.byteOffset)return!1;t=t.buffer,e=e.buffer;case G2e:return!(t.byteLength!=e.byteLength||!s(new Fee(t),new Fee(e)));case N2e:case L2e:case M2e:return k2e(+t,+e);case T2e:return t.name==e.name&&t.message==e.message;case U2e:case H2e:return t==e+\"\";case O2e:var a=P2e;case K2e:var l=i&R2e;if(a||(a=D2e),t.size!=e.size&&!l)return!1;var c=o.get(t);if(c)return c==e;i|=F2e,o.set(t,e);var u=x2e(a(t),a(e),i,n,s,o);return o.delete(t),u;case j2e:if(BF)return BF.call(t)==BF.call(e)}return!1}Dee.exports=q2e});var bF=w((lht,Tee)=>{var J2e=g0(),W2e=Os();function z2e(t,e,r){var i=e(t);return W2e(t)?i:J2e(i,r(t))}Tee.exports=z2e});var Mee=w((cht,Oee)=>{function _2e(t,e){for(var r=-1,i=t==null?0:t.length,n=0,s=[];++r<i;){var o=t[r];e(o,r,t)&&(s[n++]=o)}return s}Oee.exports=_2e});var QF=w((uht,Uee)=>{function V2e(){return[]}Uee.exports=V2e});var v0=w((ght,Kee)=>{var X2e=Mee(),Z2e=QF(),$2e=Object.prototype,eHe=$2e.propertyIsEnumerable,Hee=Object.getOwnPropertySymbols,tHe=Hee?function(t){return t==null?[]:(t=Object(t),X2e(Hee(t),function(e){return eHe.call(t,e)}))}:Z2e;Kee.exports=tHe});var vF=w((fht,jee)=>{var rHe=bF(),iHe=v0(),nHe=Mf();function sHe(t){return rHe(t,nHe,iHe)}jee.exports=sHe});var qee=w((hht,Gee)=>{var Yee=vF(),oHe=1,aHe=Object.prototype,AHe=aHe.hasOwnProperty;function lHe(t,e,r,i,n,s){var o=r&oHe,a=Yee(t),l=a.length,c=Yee(e),u=c.length;if(l!=u&&!o)return!1;for(var g=l;g--;){var f=a[g];if(!(o?f in e:AHe.call(e,f)))return!1}var h=s.get(t),p=s.get(e);if(h&&p)return h==e&&p==t;var m=!0;s.set(t,e),s.set(e,t);for(var y=o;++g<l;){f=a[g];var Q=t[f],S=e[f];if(i)var x=o?i(S,Q,f,e,t,s):i(Q,S,f,t,e,s);if(!(x===void 0?Q===S||n(Q,S,r,i,s):x)){m=!1;break}y||(y=f==\"constructor\")}if(m&&!y){var M=t.constructor,Y=e.constructor;M!=Y&&\"constructor\"in t&&\"constructor\"in e&&!(typeof M==\"function\"&&M instanceof M&&typeof Y==\"function\"&&Y instanceof Y)&&(m=!1)}return s.delete(t),s.delete(e),m}Gee.exports=lHe});var Wee=w((pht,Jee)=>{var cHe=vl(),uHe=Rs(),gHe=cHe(uHe,\"DataView\");Jee.exports=gHe});var _ee=w((dht,zee)=>{var fHe=vl(),hHe=Rs(),pHe=fHe(hHe,\"Promise\");zee.exports=pHe});var Xee=w((Cht,Vee)=>{var dHe=vl(),CHe=Rs(),mHe=dHe(CHe,\"Set\");Vee.exports=mHe});var $ee=w((mht,Zee)=>{var EHe=vl(),IHe=Rs(),yHe=EHe(IHe,\"WeakMap\");Zee.exports=yHe});var LC=w((Eht,ete)=>{var SF=Wee(),kF=A0(),xF=_ee(),PF=Xee(),DF=$ee(),tte=Hc(),Kf=qR(),rte=\"[object Map]\",wHe=\"[object Object]\",ite=\"[object Promise]\",nte=\"[object Set]\",ste=\"[object WeakMap]\",ote=\"[object DataView]\",BHe=Kf(SF),bHe=Kf(kF),QHe=Kf(xF),vHe=Kf(PF),SHe=Kf(DF),uu=tte;(SF&&uu(new SF(new ArrayBuffer(1)))!=ote||kF&&uu(new kF)!=rte||xF&&uu(xF.resolve())!=ite||PF&&uu(new PF)!=nte||DF&&uu(new DF)!=ste)&&(uu=function(t){var e=tte(t),r=e==wHe?t.constructor:void 0,i=r?Kf(r):\"\";if(i)switch(i){case BHe:return ote;case bHe:return rte;case QHe:return ite;case vHe:return nte;case SHe:return ste}return e});ete.exports=uu});var hte=w((Iht,ate)=>{var RF=NC(),kHe=yF(),xHe=Lee(),PHe=qee(),Ate=LC(),lte=Os(),cte=PC(),DHe=B0(),RHe=1,ute=\"[object Arguments]\",gte=\"[object Array]\",S0=\"[object Object]\",FHe=Object.prototype,fte=FHe.hasOwnProperty;function NHe(t,e,r,i,n,s){var o=lte(t),a=lte(e),l=o?gte:Ate(t),c=a?gte:Ate(e);l=l==ute?S0:l,c=c==ute?S0:c;var u=l==S0,g=c==S0,f=l==c;if(f&&cte(t)){if(!cte(e))return!1;o=!0,u=!1}if(f&&!u)return s||(s=new RF),o||DHe(t)?kHe(t,e,r,i,n,s):xHe(t,e,l,r,i,n,s);if(!(r&RHe)){var h=u&&fte.call(t,\"__wrapped__\"),p=g&&fte.call(e,\"__wrapped__\");if(h||p){var m=h?t.value():t,y=p?e.value():e;return s||(s=new RF),n(m,y,r,i,s)}}return f?(s||(s=new RF),PHe(t,e,r,i,n,s)):!1}ate.exports=NHe});var FF=w((yht,pte)=>{var LHe=hte(),dte=Zo();function Cte(t,e,r,i,n){return t===e?!0:t==null||e==null||!dte(t)&&!dte(e)?t!==t&&e!==e:LHe(t,e,r,i,Cte,n)}pte.exports=Cte});var Ete=w((wht,mte)=>{var THe=NC(),OHe=FF(),MHe=1,UHe=2;function KHe(t,e,r,i){var n=r.length,s=n,o=!i;if(t==null)return!s;for(t=Object(t);n--;){var a=r[n];if(o&&a[2]?a[1]!==t[a[0]]:!(a[0]in t))return!1}for(;++n<s;){a=r[n];var l=a[0],c=t[l],u=a[1];if(o&&a[2]){if(c===void 0&&!(l in t))return!1}else{var g=new THe;if(i)var f=i(c,u,l,t,e,g);if(!(f===void 0?OHe(u,c,MHe|UHe,i,g):f))return!1}}return!0}mte.exports=KHe});var NF=w((Bht,Ite)=>{var HHe=Rn();function jHe(t){return t===t&&!HHe(t)}Ite.exports=jHe});var wte=w((bht,yte)=>{var GHe=NF(),YHe=Mf();function qHe(t){for(var e=YHe(t),r=e.length;r--;){var i=e[r],n=t[i];e[r]=[i,n,GHe(n)]}return e}yte.exports=qHe});var LF=w((Qht,Bte)=>{function JHe(t,e){return function(r){return r==null?!1:r[t]===e&&(e!==void 0||t in Object(r))}}Bte.exports=JHe});var Qte=w((vht,bte)=>{var WHe=Ete(),zHe=wte(),_He=LF();function VHe(t){var e=zHe(t);return e.length==1&&e[0][2]?_He(e[0][0],e[0][1]):function(r){return r===t||WHe(r,t,e)}}bte.exports=VHe});var k0=w((Sht,vte)=>{var XHe=IC();function ZHe(t,e,r){var i=t==null?void 0:XHe(t,e);return i===void 0?r:i}vte.exports=ZHe});var kte=w((kht,Ste)=>{var $He=FF(),eje=k0(),tje=VR(),rje=o0(),ije=NF(),nje=LF(),sje=lu(),oje=1,aje=2;function Aje(t,e){return rje(t)&&ije(e)?nje(sje(t),e):function(r){var i=eje(r,t);return i===void 0&&i===e?tje(r,t):$He(e,i,oje|aje)}}Ste.exports=Aje});var Pte=w((xht,xte)=>{function lje(t){return function(e){return e==null?void 0:e[t]}}xte.exports=lje});var Rte=w((Pht,Dte)=>{var cje=IC();function uje(t){return function(e){return cje(e,t)}}Dte.exports=uje});var Nte=w((Dht,Fte)=>{var gje=Pte(),fje=Rte(),hje=o0(),pje=lu();function dje(t){return hje(t)?gje(pje(t)):fje(t)}Fte.exports=dje});var TF=w((Rht,Lte)=>{var Cje=Qte(),mje=kte(),Eje=f0(),Ije=Os(),yje=Nte();function wje(t){return typeof t==\"function\"?t:t==null?Eje:typeof t==\"object\"?Ije(t)?mje(t[0],t[1]):Cje(t):yje(t)}Lte.exports=wje});var OF=w((Fht,Tte)=>{var Bje=Ff(),bje=IF(),Qje=TF();function vje(t,e){var r={};return e=Qje(e,3),bje(t,function(i,n,s){Bje(r,n,e(i,n,s))}),r}Tte.exports=vje});var TC=w((Nht,Ote)=>{\"use strict\";function gu(t){this._maxSize=t,this.clear()}gu.prototype.clear=function(){this._size=0,this._values=Object.create(null)};gu.prototype.get=function(t){return this._values[t]};gu.prototype.set=function(t,e){return this._size>=this._maxSize&&this.clear(),t in this._values||this._size++,this._values[t]=e};var Sje=/[^.^\\]^[]+|(?=\\[\\]|\\.\\.)/g,Mte=/^\\d+$/,kje=/^\\d/,xje=/[~`!#$%\\^&*+=\\-\\[\\]\\\\';,/{}|\\\\\":<>\\?]/g,Pje=/^\\s*(['\"]?)(.*?)(\\1)\\s*$/,MF=512,Ute=new gu(MF),Kte=new gu(MF),Hte=new gu(MF);Ote.exports={Cache:gu,split:KF,normalizePath:UF,setter:function(t){var e=UF(t);return Kte.get(t)||Kte.set(t,function(i,n){for(var s=0,o=e.length,a=i;s<o-1;){var l=e[s];if(l===\"__proto__\"||l===\"constructor\"||l===\"prototype\")return i;a=a[e[s++]]}a[e[s]]=n})},getter:function(t,e){var r=UF(t);return Hte.get(t)||Hte.set(t,function(n){for(var s=0,o=r.length;s<o;)if(n!=null||!e)n=n[r[s++]];else return;return n})},join:function(t){return t.reduce(function(e,r){return e+(HF(r)||Mte.test(r)?\"[\"+r+\"]\":(e?\".\":\"\")+r)},\"\")},forEach:function(t,e,r){Dje(Array.isArray(t)?t:KF(t),e,r)}};function UF(t){return Ute.get(t)||Ute.set(t,KF(t).map(function(e){return e.replace(Pje,\"$2\")}))}function KF(t){return t.match(Sje)}function Dje(t,e,r){var i=t.length,n,s,o,a;for(s=0;s<i;s++)n=t[s],n&&(Rje(n)&&(n='\"'+n+'\"'),a=HF(n),o=!a&&/^\\d+$/.test(n),e.call(r,n,a,o,s,t))}function HF(t){return typeof t==\"string\"&&t&&[\"'\",'\"'].indexOf(t.charAt(0))!==-1}function Fje(t){return t.match(kje)&&!t.match(Mte)}function Nje(t){return xje.test(t)}function Rje(t){return!HF(t)&&(Fje(t)||Nje(t))}});var fu=w(OC=>{\"use strict\";Object.defineProperty(OC,\"__esModule\",{value:!0});OC.create=Lje;OC.default=void 0;var Tje=TC(),x0={context:\"$\",value:\".\"};function Lje(t,e){return new P0(t,e)}var P0=class{constructor(e,r={}){if(typeof e!=\"string\")throw new TypeError(\"ref must be a string, got: \"+e);if(this.key=e.trim(),e===\"\")throw new TypeError(\"ref must be a non-empty string\");this.isContext=this.key[0]===x0.context,this.isValue=this.key[0]===x0.value,this.isSibling=!this.isContext&&!this.isValue;let i=this.isContext?x0.context:this.isValue?x0.value:\"\";this.path=this.key.slice(i.length),this.getter=this.path&&(0,Tje.getter)(this.path,!0),this.map=r.map}getValue(e,r,i){let n=this.isContext?i:this.isValue?e:r;return this.getter&&(n=this.getter(n||{})),this.map&&(n=this.map(n)),n}cast(e,r){return this.getValue(e,r==null?void 0:r.parent,r==null?void 0:r.context)}resolve(){return this}describe(){return{type:\"ref\",key:this.key}}toString(){return`Ref(${this.key})`}static isRef(e){return e&&e.__isYupRef}};OC.default=P0;P0.prototype.__isYupRef=!0});var jte=w(jF=>{\"use strict\";Object.defineProperty(jF,\"__esModule\",{value:!0});jF.default=Oje;var Mje=GF(OF()),D0=GF(cu()),Uje=GF(fu());function GF(t){return t&&t.__esModule?t:{default:t}}function R0(){return R0=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var r=arguments[e];for(var i in r)Object.prototype.hasOwnProperty.call(r,i)&&(t[i]=r[i])}return t},R0.apply(this,arguments)}function Kje(t,e){if(t==null)return{};var r={},i=Object.keys(t),n,s;for(s=0;s<i.length;s++)n=i[s],!(e.indexOf(n)>=0)&&(r[n]=t[n]);return r}function Oje(t){function e(r,i){let{value:n,path:s=\"\",label:o,options:a,originalValue:l,sync:c}=r,u=Kje(r,[\"value\",\"path\",\"label\",\"options\",\"originalValue\",\"sync\"]),{name:g,test:f,params:h,message:p}=t,{parent:m,context:y}=a;function Q(U){return Uje.default.isRef(U)?U.getValue(n,m,y):U}function S(U={}){let J=(0,Mje.default)(R0({value:n,originalValue:l,label:o,path:U.path||s},h,U.params),Q),W=new D0.default(D0.default.formatError(U.message||p,J),n,J.path,U.type||g);return W.params=J,W}let x=R0({path:s,parent:m,type:g,createError:S,resolve:Q,options:a,originalValue:l},u);if(!c){try{Promise.resolve(f.call(x,n,x)).then(U=>{D0.default.isError(U)?i(U):U?i(null,U):i(S())})}catch(U){i(U)}return}let M;try{var Y;if(M=f.call(x,n,x),typeof((Y=M)==null?void 0:Y.then)==\"function\")throw new Error(`Validation test of type: \"${x.type}\" returned a Promise during a synchronous validate. This test will finish after the validate call has returned`)}catch(U){i(U);return}D0.default.isError(M)?i(M):M?i(null,M):i(S())}return e.OPTIONS=t,e}});var YF=w(MC=>{\"use strict\";Object.defineProperty(MC,\"__esModule\",{value:!0});MC.getIn=Gte;MC.default=void 0;var Hje=TC(),jje=t=>t.substr(0,t.length-1).substr(1);function Gte(t,e,r,i=r){let n,s,o;return e?((0,Hje.forEach)(e,(a,l,c)=>{let u=l?jje(a):a;if(t=t.resolve({context:i,parent:n,value:r}),t.innerType){let g=c?parseInt(u,10):0;if(r&&g>=r.length)throw new Error(`Yup.reach cannot resolve an array item at index: ${a}, in the path: ${e}. because there is no value at that index. `);n=r,r=r&&r[g],t=t.innerType}if(!c){if(!t.fields||!t.fields[u])throw new Error(`The schema does not contain the path: ${e}. (failed at: ${o} which is a type: \"${t._type}\")`);n=r,r=r&&r[u],t=t.fields[u]}s=u,o=l?\"[\"+a+\"]\":\".\"+a}),{schema:t,parent:n,parentPath:s}):{parent:n,parentPath:e,schema:t}}var Gje=(t,e,r,i)=>Gte(t,e,r,i).schema,Yje=Gje;MC.default=Yje});var qte=w(F0=>{\"use strict\";Object.defineProperty(F0,\"__esModule\",{value:!0});F0.default=void 0;var Yte=qje(fu());function qje(t){return t&&t.__esModule?t:{default:t}}var N0=class{constructor(){this.list=new Set,this.refs=new Map}get size(){return this.list.size+this.refs.size}describe(){let e=[];for(let r of this.list)e.push(r);for(let[,r]of this.refs)e.push(r.describe());return e}toArray(){return Array.from(this.list).concat(Array.from(this.refs.values()))}add(e){Yte.default.isRef(e)?this.refs.set(e.key,e):this.list.add(e)}delete(e){Yte.default.isRef(e)?this.refs.delete(e.key):this.list.delete(e)}has(e,r){if(this.list.has(e))return!0;let i,n=this.refs.values();for(;i=n.next(),!i.done;)if(r(i.value)===e)return!0;return!1}clone(){let e=new N0;return e.list=new Set(this.list),e.refs=new Map(this.refs),e}merge(e,r){let i=this.clone();return e.list.forEach(n=>i.add(n)),e.refs.forEach(n=>i.add(n)),r.list.forEach(n=>i.delete(n)),r.refs.forEach(n=>i.delete(n)),i}};F0.default=N0});var pA=w(L0=>{\"use strict\";Object.defineProperty(L0,\"__esModule\",{value:!0});L0.default=void 0;var Jte=hA(h$()),Hf=fA(),Jje=hA(x$()),Wte=hA(I0()),T0=hA(jte()),zte=hA(vC()),Wje=hA(fu()),zje=YF(),_je=hA(gF()),_te=hA(cu()),Vte=hA(qte());function hA(t){return t&&t.__esModule?t:{default:t}}function Ys(){return Ys=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var r=arguments[e];for(var i in r)Object.prototype.hasOwnProperty.call(r,i)&&(t[i]=r[i])}return t},Ys.apply(this,arguments)}var Aa=class{constructor(e){this.deps=[],this.conditions=[],this._whitelist=new Vte.default,this._blacklist=new Vte.default,this.exclusiveTests=Object.create(null),this.tests=[],this.transforms=[],this.withMutation(()=>{this.typeError(Hf.mixed.notType)}),this.type=(e==null?void 0:e.type)||\"mixed\",this.spec=Ys({strip:!1,strict:!1,abortEarly:!0,recursive:!0,nullable:!1,presence:\"optional\"},e==null?void 0:e.spec)}get _type(){return this.type}_typeCheck(e){return!0}clone(e){if(this._mutate)return e&&Object.assign(this.spec,e),this;let r=Object.create(Object.getPrototypeOf(this));return r.type=this.type,r._typeError=this._typeError,r._whitelistError=this._whitelistError,r._blacklistError=this._blacklistError,r._whitelist=this._whitelist.clone(),r._blacklist=this._blacklist.clone(),r.exclusiveTests=Ys({},this.exclusiveTests),r.deps=[...this.deps],r.conditions=[...this.conditions],r.tests=[...this.tests],r.transforms=[...this.transforms],r.spec=(0,Jte.default)(Ys({},this.spec,e)),r}label(e){var r=this.clone();return r.spec.label=e,r}meta(...e){if(e.length===0)return this.spec.meta;let r=this.clone();return r.spec.meta=Object.assign(r.spec.meta||{},e[0]),r}withMutation(e){let r=this._mutate;this._mutate=!0;let i=e(this);return this._mutate=r,i}concat(e){if(!e||e===this)return this;if(e.type!==this.type&&this.type!==\"mixed\")throw new TypeError(`You cannot \\`concat()\\` schema's of different types: ${this.type} and ${e.type}`);let r=this,i=e.clone(),n=Ys({},r.spec,i.spec);return i.spec=n,i._typeError||(i._typeError=r._typeError),i._whitelistError||(i._whitelistError=r._whitelistError),i._blacklistError||(i._blacklistError=r._blacklistError),i._whitelist=r._whitelist.merge(e._whitelist,e._blacklist),i._blacklist=r._blacklist.merge(e._blacklist,e._whitelist),i.tests=r.tests,i.exclusiveTests=r.exclusiveTests,i.withMutation(s=>{e.tests.forEach(o=>{s.test(o.OPTIONS)})}),i}isType(e){return this.spec.nullable&&e===null?!0:this._typeCheck(e)}resolve(e){let r=this;if(r.conditions.length){let i=r.conditions;r=r.clone(),r.conditions=[],r=i.reduce((n,s)=>s.resolve(n,e),r),r=r.resolve(e)}return r}cast(e,r={}){let i=this.resolve(Ys({value:e},r)),n=i._cast(e,r);if(e!==void 0&&r.assert!==!1&&i.isType(n)!==!0){let s=(0,zte.default)(e),o=(0,zte.default)(n);throw new TypeError(`The value of ${r.path||\"field\"} could not be cast to a value that satisfies the schema type: \"${i._type}\". \n\nattempted value: ${s} \n`+(o!==s?`result of cast: ${o}`:\"\"))}return n}_cast(e,r){let i=e===void 0?e:this.transforms.reduce((n,s)=>s.call(this,n,e,this),e);return i===void 0&&(i=this.getDefault()),i}_validate(e,r={},i){let{sync:n,path:s,from:o=[],originalValue:a=e,strict:l=this.spec.strict,abortEarly:c=this.spec.abortEarly}=r,u=e;l||(u=this._cast(u,Ys({assert:!1},r)));let g={value:u,path:s,options:r,originalValue:a,schema:this,label:this.spec.label,sync:n,from:o},f=[];this._typeError&&f.push(this._typeError),this._whitelistError&&f.push(this._whitelistError),this._blacklistError&&f.push(this._blacklistError),(0,Wte.default)({args:g,value:u,path:s,sync:n,tests:f,endEarly:c},h=>{if(h)return void i(h,u);(0,Wte.default)({tests:this.tests,args:g,path:s,sync:n,value:u,endEarly:c},i)})}validate(e,r,i){let n=this.resolve(Ys({},r,{value:e}));return typeof i==\"function\"?n._validate(e,r,i):new Promise((s,o)=>n._validate(e,r,(a,l)=>{a?o(a):s(l)}))}validateSync(e,r){let i=this.resolve(Ys({},r,{value:e})),n;return i._validate(e,Ys({},r,{sync:!0}),(s,o)=>{if(s)throw s;n=o}),n}isValid(e,r){return this.validate(e,r).then(()=>!0,i=>{if(_te.default.isError(i))return!1;throw i})}isValidSync(e,r){try{return this.validateSync(e,r),!0}catch(i){if(_te.default.isError(i))return!1;throw i}}_getDefault(){let e=this.spec.default;return e==null?e:typeof e==\"function\"?e.call(this):(0,Jte.default)(e)}getDefault(e){return this.resolve(e||{})._getDefault()}default(e){return arguments.length===0?this._getDefault():this.clone({default:e})}strict(e=!0){var r=this.clone();return r.spec.strict=e,r}_isPresent(e){return e!=null}defined(e=Hf.mixed.defined){return this.test({message:e,name:\"defined\",exclusive:!0,test(r){return r!==void 0}})}required(e=Hf.mixed.required){return this.clone({presence:\"required\"}).withMutation(r=>r.test({message:e,name:\"required\",exclusive:!0,test(i){return this.schema._isPresent(i)}}))}notRequired(){var e=this.clone({presence:\"optional\"});return e.tests=e.tests.filter(r=>r.OPTIONS.name!==\"required\"),e}nullable(e=!0){var r=this.clone({nullable:e!==!1});return r}transform(e){var r=this.clone();return r.transforms.push(e),r}test(...e){let r;if(e.length===1?typeof e[0]==\"function\"?r={test:e[0]}:r=e[0]:e.length===2?r={name:e[0],test:e[1]}:r={name:e[0],message:e[1],test:e[2]},r.message===void 0&&(r.message=Hf.mixed.default),typeof r.test!=\"function\")throw new TypeError(\"`test` is a required parameters\");let i=this.clone(),n=(0,T0.default)(r),s=r.exclusive||r.name&&i.exclusiveTests[r.name]===!0;if(r.exclusive&&!r.name)throw new TypeError(\"Exclusive tests must provide a unique `name` identifying the test\");return r.name&&(i.exclusiveTests[r.name]=!!r.exclusive),i.tests=i.tests.filter(o=>!(o.OPTIONS.name===r.name&&(s||o.OPTIONS.test===n.OPTIONS.test))),i.tests.push(n),i}when(e,r){!Array.isArray(e)&&typeof e!=\"string\"&&(r=e,e=\".\");let i=this.clone(),n=(0,_je.default)(e).map(s=>new Wje.default(s));return n.forEach(s=>{s.isSibling&&i.deps.push(s.key)}),i.conditions.push(new Jje.default(n,r)),i}typeError(e){var r=this.clone();return r._typeError=(0,T0.default)({message:e,name:\"typeError\",test(i){return i!==void 0&&!this.schema.isType(i)?this.createError({params:{type:this.schema._type}}):!0}}),r}oneOf(e,r=Hf.mixed.oneOf){var i=this.clone();return e.forEach(n=>{i._whitelist.add(n),i._blacklist.delete(n)}),i._whitelistError=(0,T0.default)({message:r,name:\"oneOf\",test(n){if(n===void 0)return!0;let s=this.schema._whitelist;return s.has(n,this.resolve)?!0:this.createError({params:{values:s.toArray().join(\", \")}})}}),i}notOneOf(e,r=Hf.mixed.notOneOf){var i=this.clone();return e.forEach(n=>{i._blacklist.add(n),i._whitelist.delete(n)}),i._blacklistError=(0,T0.default)({message:r,name:\"notOneOf\",test(n){let s=this.schema._blacklist;return s.has(n,this.resolve)?this.createError({params:{values:s.toArray().join(\", \")}}):!0}}),i}strip(e=!0){let r=this.clone();return r.spec.strip=e,r}describe(){let e=this.clone(),{label:r,meta:i}=e.spec;return{meta:i,label:r,type:e.type,oneOf:e._whitelist.describe(),notOneOf:e._blacklist.describe(),tests:e.tests.map(s=>({name:s.OPTIONS.name,params:s.OPTIONS.params})).filter((s,o,a)=>a.findIndex(l=>l.name===s.name)===o)}}};L0.default=Aa;Aa.prototype.__isYupSchema__=!0;for(let t of[\"validate\",\"validateSync\"])Aa.prototype[`${t}At`]=function(e,r,i={}){let{parent:n,parentPath:s,schema:o}=(0,zje.getIn)(this,e,r,i.context);return o[t](n&&n[s],Ys({},i,{parent:n,path:e}))};for(let t of[\"equals\",\"is\"])Aa.prototype[t]=Aa.prototype.oneOf;for(let t of[\"not\",\"nope\"])Aa.prototype[t]=Aa.prototype.notOneOf;Aa.prototype.optional=Aa.prototype.notRequired});var Zte=w(UC=>{\"use strict\";Object.defineProperty(UC,\"__esModule\",{value:!0});UC.create=Xte;UC.default=void 0;var Xje=Vje(pA());function Vje(t){return t&&t.__esModule?t:{default:t}}var qF=Xje.default,Zje=qF;UC.default=Zje;function Xte(){return new qF}Xte.prototype=qF.prototype});var jf=w(O0=>{\"use strict\";Object.defineProperty(O0,\"__esModule\",{value:!0});O0.default=void 0;var $je=t=>t==null;O0.default=$je});var ire=w(KC=>{\"use strict\";Object.defineProperty(KC,\"__esModule\",{value:!0});KC.create=$te;KC.default=void 0;var eGe=ere(pA()),tre=fA(),rre=ere(jf());function ere(t){return t&&t.__esModule?t:{default:t}}function $te(){return new M0}var M0=class extends eGe.default{constructor(){super({type:\"boolean\"});this.withMutation(()=>{this.transform(function(e){if(!this.isType(e)){if(/^(true|1)$/i.test(String(e)))return!0;if(/^(false|0)$/i.test(String(e)))return!1}return e})})}_typeCheck(e){return e instanceof Boolean&&(e=e.valueOf()),typeof e==\"boolean\"}isTrue(e=tre.boolean.isValue){return this.test({message:e,name:\"is-value\",exclusive:!0,params:{value:\"true\"},test(r){return(0,rre.default)(r)||r===!0}})}isFalse(e=tre.boolean.isValue){return this.test({message:e,name:\"is-value\",exclusive:!0,params:{value:\"false\"},test(r){return(0,rre.default)(r)||r===!1}})}};KC.default=M0;$te.prototype=M0.prototype});var ore=w(HC=>{\"use strict\";Object.defineProperty(HC,\"__esModule\",{value:!0});HC.create=nre;HC.default=void 0;var la=fA(),dA=sre(jf()),tGe=sre(pA());function sre(t){return t&&t.__esModule?t:{default:t}}var rGe=/^((([a-z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])+(\\.([a-z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])|(\\\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-z]|\\d|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])|(([a-z]|\\d|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])([a-z]|\\d|-|\\.|_|~|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])*([a-z]|\\d|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])))\\.)+(([a-z]|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])|(([a-z]|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])([a-z]|\\d|-|\\.|_|~|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])*([a-z]|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])))$/i,iGe=/^((https?|ftp):)?\\/\\/(((([a-z]|\\d|-|\\.|_|~|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])|(%[\\da-f]{2})|[!\\$&'\\(\\)\\*\\+,;=]|:)*@)?(((\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5])\\.(\\d|[1-9]\\d|1\\d\\d|2[0-4]\\d|25[0-5]))|((([a-z]|\\d|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])|(([a-z]|\\d|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])([a-z]|\\d|-|\\.|_|~|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])*([a-z]|\\d|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])))\\.)+(([a-z]|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])|(([a-z]|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])([a-z]|\\d|-|\\.|_|~|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])*([a-z]|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])))\\.?)(:\\d*)?)(\\/((([a-z]|\\d|-|\\.|_|~|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])|(%[\\da-f]{2})|[!\\$&'\\(\\)\\*\\+,;=]|:|@)+(\\/(([a-z]|\\d|-|\\.|_|~|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])|(%[\\da-f]{2})|[!\\$&'\\(\\)\\*\\+,;=]|:|@)*)*)?)?(\\?((([a-z]|\\d|-|\\.|_|~|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])|(%[\\da-f]{2})|[!\\$&'\\(\\)\\*\\+,;=]|:|@)|[\\uE000-\\uF8FF]|\\/|\\?)*)?(\\#((([a-z]|\\d|-|\\.|_|~|[\\u00A0-\\uD7FF\\uF900-\\uFDCF\\uFDF0-\\uFFEF])|(%[\\da-f]{2})|[!\\$&'\\(\\)\\*\\+,;=]|:|@)|\\/|\\?)*)?$/i,nGe=/^(?:[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}|00000000-0000-0000-0000-000000000000)$/i,sGe=t=>(0,dA.default)(t)||t===t.trim(),oGe={}.toString();function nre(){return new U0}var U0=class extends tGe.default{constructor(){super({type:\"string\"});this.withMutation(()=>{this.transform(function(e){if(this.isType(e)||Array.isArray(e))return e;let r=e!=null&&e.toString?e.toString():e;return r===oGe?e:r})})}_typeCheck(e){return e instanceof String&&(e=e.valueOf()),typeof e==\"string\"}_isPresent(e){return super._isPresent(e)&&!!e.length}length(e,r=la.string.length){return this.test({message:r,name:\"length\",exclusive:!0,params:{length:e},test(i){return(0,dA.default)(i)||i.length===this.resolve(e)}})}min(e,r=la.string.min){return this.test({message:r,name:\"min\",exclusive:!0,params:{min:e},test(i){return(0,dA.default)(i)||i.length>=this.resolve(e)}})}max(e,r=la.string.max){return this.test({name:\"max\",exclusive:!0,message:r,params:{max:e},test(i){return(0,dA.default)(i)||i.length<=this.resolve(e)}})}matches(e,r){let i=!1,n,s;return r&&(typeof r==\"object\"?{excludeEmptyString:i=!1,message:n,name:s}=r:n=r),this.test({name:s||\"matches\",message:n||la.string.matches,params:{regex:e},test:o=>(0,dA.default)(o)||o===\"\"&&i||o.search(e)!==-1})}email(e=la.string.email){return this.matches(rGe,{name:\"email\",message:e,excludeEmptyString:!0})}url(e=la.string.url){return this.matches(iGe,{name:\"url\",message:e,excludeEmptyString:!0})}uuid(e=la.string.uuid){return this.matches(nGe,{name:\"uuid\",message:e,excludeEmptyString:!1})}ensure(){return this.default(\"\").transform(e=>e===null?\"\":e)}trim(e=la.string.trim){return this.transform(r=>r!=null?r.trim():r).test({message:e,name:\"trim\",test:sGe})}lowercase(e=la.string.lowercase){return this.transform(r=>(0,dA.default)(r)?r:r.toLowerCase()).test({message:e,name:\"string_case\",exclusive:!0,test:r=>(0,dA.default)(r)||r===r.toLowerCase()})}uppercase(e=la.string.uppercase){return this.transform(r=>(0,dA.default)(r)?r:r.toUpperCase()).test({message:e,name:\"string_case\",exclusive:!0,test:r=>(0,dA.default)(r)||r===r.toUpperCase()})}};HC.default=U0;nre.prototype=U0.prototype});var lre=w(jC=>{\"use strict\";Object.defineProperty(jC,\"__esModule\",{value:!0});jC.create=are;jC.default=void 0;var hu=fA(),pu=Are(jf()),aGe=Are(pA());function Are(t){return t&&t.__esModule?t:{default:t}}var AGe=t=>t!=+t;function are(){return new K0}var K0=class extends aGe.default{constructor(){super({type:\"number\"});this.withMutation(()=>{this.transform(function(e){let r=e;if(typeof r==\"string\"){if(r=r.replace(/\\s/g,\"\"),r===\"\")return NaN;r=+r}return this.isType(r)?r:parseFloat(r)})})}_typeCheck(e){return e instanceof Number&&(e=e.valueOf()),typeof e==\"number\"&&!AGe(e)}min(e,r=hu.number.min){return this.test({message:r,name:\"min\",exclusive:!0,params:{min:e},test(i){return(0,pu.default)(i)||i>=this.resolve(e)}})}max(e,r=hu.number.max){return this.test({message:r,name:\"max\",exclusive:!0,params:{max:e},test(i){return(0,pu.default)(i)||i<=this.resolve(e)}})}lessThan(e,r=hu.number.lessThan){return this.test({message:r,name:\"max\",exclusive:!0,params:{less:e},test(i){return(0,pu.default)(i)||i<this.resolve(e)}})}moreThan(e,r=hu.number.moreThan){return this.test({message:r,name:\"min\",exclusive:!0,params:{more:e},test(i){return(0,pu.default)(i)||i>this.resolve(e)}})}positive(e=hu.number.positive){return this.moreThan(0,e)}negative(e=hu.number.negative){return this.lessThan(0,e)}integer(e=hu.number.integer){return this.test({name:\"integer\",message:e,test:r=>(0,pu.default)(r)||Number.isInteger(r)})}truncate(){return this.transform(e=>(0,pu.default)(e)?e:e|0)}round(e){var r,i=[\"ceil\",\"floor\",\"round\",\"trunc\"];if(e=((r=e)==null?void 0:r.toLowerCase())||\"round\",e===\"trunc\")return this.truncate();if(i.indexOf(e.toLowerCase())===-1)throw new TypeError(\"Only valid options for round() are: \"+i.join(\", \"));return this.transform(n=>(0,pu.default)(n)?n:Math[e](n))}};jC.default=K0;are.prototype=K0.prototype});var cre=w(JF=>{\"use strict\";Object.defineProperty(JF,\"__esModule\",{value:!0});JF.default=lGe;var cGe=/^(\\d{4}|[+\\-]\\d{6})(?:-?(\\d{2})(?:-?(\\d{2}))?)?(?:[ T]?(\\d{2}):?(\\d{2})(?::?(\\d{2})(?:[,\\.](\\d{1,}))?)?(?:(Z)|([+\\-])(\\d{2})(?::?(\\d{2}))?)?)?$/;function lGe(t){var e=[1,4,5,6,7,10,11],r=0,i,n;if(n=cGe.exec(t)){for(var s=0,o;o=e[s];++s)n[o]=+n[o]||0;n[2]=(+n[2]||1)-1,n[3]=+n[3]||1,n[7]=n[7]?String(n[7]).substr(0,3):0,(n[8]===void 0||n[8]===\"\")&&(n[9]===void 0||n[9]===\"\")?i=+new Date(n[1],n[2],n[3],n[4],n[5],n[6],n[7]):(n[8]!==\"Z\"&&n[9]!==void 0&&(r=n[10]*60+n[11],n[9]===\"+\"&&(r=0-r)),i=Date.UTC(n[1],n[2],n[3],n[4],n[5]+r,n[6],n[7]))}else i=Date.parse?Date.parse(t):NaN;return i}});var fre=w(GC=>{\"use strict\";Object.defineProperty(GC,\"__esModule\",{value:!0});GC.create=WF;GC.default=void 0;var uGe=H0(cre()),ure=fA(),gre=H0(jf()),gGe=H0(fu()),fGe=H0(pA());function H0(t){return t&&t.__esModule?t:{default:t}}var zF=new Date(\"\"),hGe=t=>Object.prototype.toString.call(t)===\"[object Date]\";function WF(){return new YC}var YC=class extends fGe.default{constructor(){super({type:\"date\"});this.withMutation(()=>{this.transform(function(e){return this.isType(e)?e:(e=(0,uGe.default)(e),isNaN(e)?zF:new Date(e))})})}_typeCheck(e){return hGe(e)&&!isNaN(e.getTime())}prepareParam(e,r){let i;if(gGe.default.isRef(e))i=e;else{let n=this.cast(e);if(!this._typeCheck(n))throw new TypeError(`\\`${r}\\` must be a Date or a value that can be \\`cast()\\` to a Date`);i=n}return i}min(e,r=ure.date.min){let i=this.prepareParam(e,\"min\");return this.test({message:r,name:\"min\",exclusive:!0,params:{min:e},test(n){return(0,gre.default)(n)||n>=this.resolve(i)}})}max(e,r=ure.date.max){var i=this.prepareParam(e,\"max\");return this.test({message:r,name:\"max\",exclusive:!0,params:{max:e},test(n){return(0,gre.default)(n)||n<=this.resolve(i)}})}};GC.default=YC;YC.INVALID_DATE=zF;WF.prototype=YC.prototype;WF.INVALID_DATE=zF});var pre=w((Wht,hre)=>{function pGe(t,e,r,i){var n=-1,s=t==null?0:t.length;for(i&&s&&(r=t[++n]);++n<s;)r=e(r,t[n],n,t);return r}hre.exports=pGe});var Cre=w((zht,dre)=>{function dGe(t){return function(e){return t==null?void 0:t[e]}}dre.exports=dGe});var Ere=w((_ht,mre)=>{var CGe=Cre(),mGe={\\u00C0:\"A\",\\u00C1:\"A\",\\u00C2:\"A\",\\u00C3:\"A\",\\u00C4:\"A\",\\u00C5:\"A\",\\u00E0:\"a\",\\u00E1:\"a\",\\u00E2:\"a\",\\u00E3:\"a\",\\u00E4:\"a\",\\u00E5:\"a\",\\u00C7:\"C\",\\u00E7:\"c\",\\u00D0:\"D\",\\u00F0:\"d\",\\u00C8:\"E\",\\u00C9:\"E\",\\u00CA:\"E\",\\u00CB:\"E\",\\u00E8:\"e\",\\u00E9:\"e\",\\u00EA:\"e\",\\u00EB:\"e\",\\u00CC:\"I\",\\u00CD:\"I\",\\u00CE:\"I\",\\u00CF:\"I\",\\u00EC:\"i\",\\u00ED:\"i\",\\u00EE:\"i\",\\u00EF:\"i\",\\u00D1:\"N\",\\u00F1:\"n\",\\u00D2:\"O\",\\u00D3:\"O\",\\u00D4:\"O\",\\u00D5:\"O\",\\u00D6:\"O\",\\u00D8:\"O\",\\u00F2:\"o\",\\u00F3:\"o\",\\u00F4:\"o\",\\u00F5:\"o\",\\u00F6:\"o\",\\u00F8:\"o\",\\u00D9:\"U\",\\u00DA:\"U\",\\u00DB:\"U\",\\u00DC:\"U\",\\u00F9:\"u\",\\u00FA:\"u\",\\u00FB:\"u\",\\u00FC:\"u\",\\u00DD:\"Y\",\\u00FD:\"y\",\\u00FF:\"y\",\\u00C6:\"Ae\",\\u00E6:\"ae\",\\u00DE:\"Th\",\\u00FE:\"th\",\\u00DF:\"ss\",\\u0100:\"A\",\\u0102:\"A\",\\u0104:\"A\",\\u0101:\"a\",\\u0103:\"a\",\\u0105:\"a\",\\u0106:\"C\",\\u0108:\"C\",\\u010A:\"C\",\\u010C:\"C\",\\u0107:\"c\",\\u0109:\"c\",\\u010B:\"c\",\\u010D:\"c\",\\u010E:\"D\",\\u0110:\"D\",\\u010F:\"d\",\\u0111:\"d\",\\u0112:\"E\",\\u0114:\"E\",\\u0116:\"E\",\\u0118:\"E\",\\u011A:\"E\",\\u0113:\"e\",\\u0115:\"e\",\\u0117:\"e\",\\u0119:\"e\",\\u011B:\"e\",\\u011C:\"G\",\\u011E:\"G\",\\u0120:\"G\",\\u0122:\"G\",\\u011D:\"g\",\\u011F:\"g\",\\u0121:\"g\",\\u0123:\"g\",\\u0124:\"H\",\\u0126:\"H\",\\u0125:\"h\",\\u0127:\"h\",\\u0128:\"I\",\\u012A:\"I\",\\u012C:\"I\",\\u012E:\"I\",\\u0130:\"I\",\\u0129:\"i\",\\u012B:\"i\",\\u012D:\"i\",\\u012F:\"i\",\\u0131:\"i\",\\u0134:\"J\",\\u0135:\"j\",\\u0136:\"K\",\\u0137:\"k\",\\u0138:\"k\",\\u0139:\"L\",\\u013B:\"L\",\\u013D:\"L\",\\u013F:\"L\",\\u0141:\"L\",\\u013A:\"l\",\\u013C:\"l\",\\u013E:\"l\",\\u0140:\"l\",\\u0142:\"l\",\\u0143:\"N\",\\u0145:\"N\",\\u0147:\"N\",\\u014A:\"N\",\\u0144:\"n\",\\u0146:\"n\",\\u0148:\"n\",\\u014B:\"n\",\\u014C:\"O\",\\u014E:\"O\",\\u0150:\"O\",\\u014D:\"o\",\\u014F:\"o\",\\u0151:\"o\",\\u0154:\"R\",\\u0156:\"R\",\\u0158:\"R\",\\u0155:\"r\",\\u0157:\"r\",\\u0159:\"r\",\\u015A:\"S\",\\u015C:\"S\",\\u015E:\"S\",\\u0160:\"S\",\\u015B:\"s\",\\u015D:\"s\",\\u015F:\"s\",\\u0161:\"s\",\\u0162:\"T\",\\u0164:\"T\",\\u0166:\"T\",\\u0163:\"t\",\\u0165:\"t\",\\u0167:\"t\",\\u0168:\"U\",\\u016A:\"U\",\\u016C:\"U\",\\u016E:\"U\",\\u0170:\"U\",\\u0172:\"U\",\\u0169:\"u\",\\u016B:\"u\",\\u016D:\"u\",\\u016F:\"u\",\\u0171:\"u\",\\u0173:\"u\",\\u0174:\"W\",\\u0175:\"w\",\\u0176:\"Y\",\\u0177:\"y\",\\u0178:\"Y\",\\u0179:\"Z\",\\u017B:\"Z\",\\u017D:\"Z\",\\u017A:\"z\",\\u017C:\"z\",\\u017E:\"z\",\\u0132:\"IJ\",\\u0133:\"ij\",\\u0152:\"Oe\",\\u0153:\"oe\",\\u0149:\"'n\",\\u017F:\"s\"},EGe=CGe(mGe);mre.exports=EGe});var yre=w((Vht,Ire)=>{var IGe=Ere(),yGe=nf(),wGe=/[\\xc0-\\xd6\\xd8-\\xf6\\xf8-\\xff\\u0100-\\u017f]/g,BGe=\"\\\\u0300-\\\\u036f\",bGe=\"\\\\ufe20-\\\\ufe2f\",QGe=\"\\\\u20d0-\\\\u20ff\",vGe=BGe+bGe+QGe,SGe=\"[\"+vGe+\"]\",kGe=RegExp(SGe,\"g\");function xGe(t){return t=yGe(t),t&&t.replace(wGe,IGe).replace(kGe,\"\")}Ire.exports=xGe});var Bre=w((Xht,wre)=>{var PGe=/[^\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\x7f]+/g;function DGe(t){return t.match(PGe)||[]}wre.exports=DGe});var Qre=w((Zht,bre)=>{var RGe=/[a-z][A-Z]|[A-Z]{2}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]/;function FGe(t){return RGe.test(t)}bre.exports=FGe});var Yre=w(($ht,vre)=>{var Sre=\"\\\\ud800-\\\\udfff\",NGe=\"\\\\u0300-\\\\u036f\",LGe=\"\\\\ufe20-\\\\ufe2f\",TGe=\"\\\\u20d0-\\\\u20ff\",OGe=NGe+LGe+TGe,kre=\"\\\\u2700-\\\\u27bf\",xre=\"a-z\\\\xdf-\\\\xf6\\\\xf8-\\\\xff\",MGe=\"\\\\xac\\\\xb1\\\\xd7\\\\xf7\",UGe=\"\\\\x00-\\\\x2f\\\\x3a-\\\\x40\\\\x5b-\\\\x60\\\\x7b-\\\\xbf\",KGe=\"\\\\u2000-\\\\u206f\",HGe=\" \\\\t\\\\x0b\\\\f\\\\xa0\\\\ufeff\\\\n\\\\r\\\\u2028\\\\u2029\\\\u1680\\\\u180e\\\\u2000\\\\u2001\\\\u2002\\\\u2003\\\\u2004\\\\u2005\\\\u2006\\\\u2007\\\\u2008\\\\u2009\\\\u200a\\\\u202f\\\\u205f\\\\u3000\",Pre=\"A-Z\\\\xc0-\\\\xd6\\\\xd8-\\\\xde\",jGe=\"\\\\ufe0e\\\\ufe0f\",Dre=MGe+UGe+KGe+HGe,Rre=\"['\\u2019]\",Fre=\"[\"+Dre+\"]\",GGe=\"[\"+OGe+\"]\",Nre=\"\\\\d+\",YGe=\"[\"+kre+\"]\",Lre=\"[\"+xre+\"]\",Tre=\"[^\"+Sre+Dre+Nre+kre+xre+Pre+\"]\",qGe=\"\\\\ud83c[\\\\udffb-\\\\udfff]\",JGe=\"(?:\"+GGe+\"|\"+qGe+\")\",WGe=\"[^\"+Sre+\"]\",Ore=\"(?:\\\\ud83c[\\\\udde6-\\\\uddff]){2}\",Mre=\"[\\\\ud800-\\\\udbff][\\\\udc00-\\\\udfff]\",Gf=\"[\"+Pre+\"]\",zGe=\"\\\\u200d\",Ure=\"(?:\"+Lre+\"|\"+Tre+\")\",_Ge=\"(?:\"+Gf+\"|\"+Tre+\")\",Kre=\"(?:\"+Rre+\"(?:d|ll|m|re|s|t|ve))?\",Hre=\"(?:\"+Rre+\"(?:D|LL|M|RE|S|T|VE))?\",jre=JGe+\"?\",Gre=\"[\"+jGe+\"]?\",VGe=\"(?:\"+zGe+\"(?:\"+[WGe,Ore,Mre].join(\"|\")+\")\"+Gre+jre+\")*\",XGe=\"\\\\d*(?:1st|2nd|3rd|(?![123])\\\\dth)(?=\\\\b|[A-Z_])\",ZGe=\"\\\\d*(?:1ST|2ND|3RD|(?![123])\\\\dTH)(?=\\\\b|[a-z_])\",$Ge=Gre+jre+VGe,eYe=\"(?:\"+[YGe,Ore,Mre].join(\"|\")+\")\"+$Ge,tYe=RegExp([Gf+\"?\"+Lre+\"+\"+Kre+\"(?=\"+[Fre,Gf,\"$\"].join(\"|\")+\")\",_Ge+\"+\"+Hre+\"(?=\"+[Fre,Gf+Ure,\"$\"].join(\"|\")+\")\",Gf+\"?\"+Ure+\"+\"+Kre,Gf+\"+\"+Hre,ZGe,XGe,Nre,eYe].join(\"|\"),\"g\");function rYe(t){return t.match(tYe)||[]}vre.exports=rYe});var Jre=w((ept,qre)=>{var iYe=Bre(),nYe=Qre(),sYe=nf(),oYe=Yre();function aYe(t,e,r){return t=sYe(t),e=r?void 0:e,e===void 0?nYe(t)?oYe(t):iYe(t):t.match(e)||[]}qre.exports=aYe});var _F=w((tpt,Wre)=>{var AYe=pre(),lYe=yre(),cYe=Jre(),uYe=\"['\\u2019]\",gYe=RegExp(uYe,\"g\");function fYe(t){return function(e){return AYe(cYe(lYe(e).replace(gYe,\"\")),t,\"\")}}Wre.exports=fYe});var _re=w((rpt,zre)=>{var hYe=_F(),pYe=hYe(function(t,e,r){return t+(r?\"_\":\"\")+e.toLowerCase()});zre.exports=pYe});var Xre=w((ipt,Vre)=>{var dYe=tB(),CYe=_F(),mYe=CYe(function(t,e,r){return e=e.toLowerCase(),t+(r?dYe(e):e)});Vre.exports=mYe});var $re=w((npt,Zre)=>{var EYe=Ff(),IYe=IF(),yYe=TF();function wYe(t,e){var r={};return e=yYe(e,3),IYe(t,function(i,n,s){EYe(r,e(i,n,s),i)}),r}Zre.exports=wYe});var tie=w((spt,VF)=>{VF.exports=function(t){return eie(BYe(t),t)};VF.exports.array=eie;function eie(t,e){var r=t.length,i=new Array(r),n={},s=r,o=bYe(e),a=QYe(t);for(e.forEach(function(c){if(!a.has(c[0])||!a.has(c[1]))throw new Error(\"Unknown node. There is an unknown node in the supplied edges.\")});s--;)n[s]||l(t[s],s,new Set);return i;function l(c,u,g){if(g.has(c)){var f;try{f=\", node was:\"+JSON.stringify(c)}catch(m){f=\"\"}throw new Error(\"Cyclic dependency\"+f)}if(!a.has(c))throw new Error(\"Found unknown node. Make sure to provided all involved nodes. Unknown node: \"+JSON.stringify(c));if(!n[u]){n[u]=!0;var h=o.get(c)||new Set;if(h=Array.from(h),u=h.length){g.add(c);do{var p=h[--u];l(p,a.get(p),g)}while(u);g.delete(c)}i[--r]=c}}}function BYe(t){for(var e=new Set,r=0,i=t.length;r<i;r++){var n=t[r];e.add(n[0]),e.add(n[1])}return Array.from(e)}function bYe(t){for(var e=new Map,r=0,i=t.length;r<i;r++){var n=t[r];e.has(n[0])||e.set(n[0],new Set),e.has(n[1])||e.set(n[1],new Set),e.get(n[0]).add(n[1])}return e}function QYe(t){for(var e=new Map,r=0,i=t.length;r<i;r++)e.set(t[r],r);return e}});var rie=w(XF=>{\"use strict\";Object.defineProperty(XF,\"__esModule\",{value:!0});XF.default=vYe;var SYe=j0(SC()),kYe=j0(tie()),xYe=TC(),PYe=j0(fu()),DYe=j0(Lf());function j0(t){return t&&t.__esModule?t:{default:t}}function vYe(t,e=[]){let r=[],i=[];function n(s,o){var a=(0,xYe.split)(s)[0];~i.indexOf(a)||i.push(a),~e.indexOf(`${o}-${a}`)||r.push([o,a])}for(let s in t)if((0,SYe.default)(t,s)){let o=t[s];~i.indexOf(s)||i.push(s),PYe.default.isRef(o)&&o.isSibling?n(o.path,s):(0,DYe.default)(o)&&\"deps\"in o&&o.deps.forEach(a=>n(a,s))}return kYe.default.array(i,r).reverse()}});var nie=w(ZF=>{\"use strict\";Object.defineProperty(ZF,\"__esModule\",{value:!0});ZF.default=RYe;function iie(t,e){let r=Infinity;return t.some((i,n)=>{var s;if(((s=e.path)==null?void 0:s.indexOf(i))!==-1)return r=n,!0}),r}function RYe(t){return(e,r)=>iie(t,e)-iie(t,r)}});var uie=w(qC=>{\"use strict\";Object.defineProperty(qC,\"__esModule\",{value:!0});qC.create=sie;qC.default=void 0;var oie=ca(SC()),aie=ca(_re()),FYe=ca(Xre()),NYe=ca($re()),LYe=ca(OF()),TYe=TC(),Aie=fA(),OYe=ca(rie()),lie=ca(nie()),MYe=ca(I0()),UYe=ca(cu()),$F=ca(pA());function ca(t){return t&&t.__esModule?t:{default:t}}function Yf(){return Yf=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var r=arguments[e];for(var i in r)Object.prototype.hasOwnProperty.call(r,i)&&(t[i]=r[i])}return t},Yf.apply(this,arguments)}var cie=t=>Object.prototype.toString.call(t)===\"[object Object]\";function KYe(t,e){let r=Object.keys(t.fields);return Object.keys(e).filter(i=>r.indexOf(i)===-1)}var HYe=(0,lie.default)([]),G0=class extends $F.default{constructor(e){super({type:\"object\"});this.fields=Object.create(null),this._sortErrors=HYe,this._nodes=[],this._excludedEdges=[],this.withMutation(()=>{this.transform(function(i){if(typeof i==\"string\")try{i=JSON.parse(i)}catch(n){i=null}return this.isType(i)?i:null}),e&&this.shape(e)})}_typeCheck(e){return cie(e)||typeof e==\"function\"}_cast(e,r={}){var i;let n=super._cast(e,r);if(n===void 0)return this.getDefault();if(!this._typeCheck(n))return n;let s=this.fields,o=(i=r.stripUnknown)!=null?i:this.spec.noUnknown,a=this._nodes.concat(Object.keys(n).filter(g=>this._nodes.indexOf(g)===-1)),l={},c=Yf({},r,{parent:l,__validating:r.__validating||!1}),u=!1;for(let g of a){let f=s[g],h=(0,oie.default)(n,g);if(f){let p,m=n[g];c.path=(r.path?`${r.path}.`:\"\")+g,f=f.resolve({value:m,context:r.context,parent:l});let y=\"spec\"in f?f.spec:void 0,Q=y==null?void 0:y.strict;if(y==null?void 0:y.strip){u=u||g in n;continue}p=!r.__validating||!Q?f.cast(n[g],c):n[g],p!==void 0&&(l[g]=p)}else h&&!o&&(l[g]=n[g]);l[g]!==n[g]&&(u=!0)}return u?l:n}_validate(e,r={},i){let n=[],{sync:s,from:o=[],originalValue:a=e,abortEarly:l=this.spec.abortEarly,recursive:c=this.spec.recursive}=r;o=[{schema:this,value:a},...o],r.__validating=!0,r.originalValue=a,r.from=o,super._validate(e,r,(u,g)=>{if(u){if(!UYe.default.isError(u)||l)return void i(u,g);n.push(u)}if(!c||!cie(g)){i(n[0]||null,g);return}a=a||g;let f=this._nodes.map(h=>(p,m)=>{let y=h.indexOf(\".\")===-1?(r.path?`${r.path}.`:\"\")+h:`${r.path||\"\"}[\"${h}\"]`,Q=this.fields[h];if(Q&&\"validate\"in Q){Q.validate(g[h],Yf({},r,{path:y,from:o,strict:!0,parent:g,originalValue:a[h]}),m);return}m(null)});(0,MYe.default)({sync:s,tests:f,value:g,errors:n,endEarly:l,sort:this._sortErrors,path:r.path},i)})}clone(e){let r=super.clone(e);return r.fields=Yf({},this.fields),r._nodes=this._nodes,r._excludedEdges=this._excludedEdges,r._sortErrors=this._sortErrors,r}concat(e){let r=super.concat(e),i=r.fields;for(let[n,s]of Object.entries(this.fields)){let o=i[n];o===void 0?i[n]=s:o instanceof $F.default&&s instanceof $F.default&&(i[n]=s.concat(o))}return r.withMutation(()=>r.shape(i))}getDefaultFromShape(){let e={};return this._nodes.forEach(r=>{let i=this.fields[r];e[r]=\"default\"in i?i.getDefault():void 0}),e}_getDefault(){if(\"default\"in this.spec)return super._getDefault();if(!!this._nodes.length)return this.getDefaultFromShape()}shape(e,r=[]){let i=this.clone(),n=Object.assign(i.fields,e);if(i.fields=n,i._sortErrors=(0,lie.default)(Object.keys(n)),r.length){Array.isArray(r[0])||(r=[r]);let s=r.map(([o,a])=>`${o}-${a}`);i._excludedEdges=i._excludedEdges.concat(s)}return i._nodes=(0,OYe.default)(n,i._excludedEdges),i}pick(e){let r={};for(let i of e)this.fields[i]&&(r[i]=this.fields[i]);return this.clone().withMutation(i=>(i.fields={},i.shape(r)))}omit(e){let r=this.clone(),i=r.fields;r.fields={};for(let n of e)delete i[n];return r.withMutation(()=>r.shape(i))}from(e,r,i){let n=(0,TYe.getter)(e,!0);return this.transform(s=>{if(s==null)return s;let o=s;return(0,oie.default)(s,e)&&(o=Yf({},s),i||delete o[e],o[r]=n(s)),o})}noUnknown(e=!0,r=Aie.object.noUnknown){typeof e==\"string\"&&(r=e,e=!0);let i=this.test({name:\"noUnknown\",exclusive:!0,message:r,test(n){if(n==null)return!0;let s=KYe(this.schema,n);return!e||s.length===0||this.createError({params:{unknown:s.join(\", \")}})}});return i.spec.noUnknown=e,i}unknown(e=!0,r=Aie.object.noUnknown){return this.noUnknown(!e,r)}transformKeys(e){return this.transform(r=>r&&(0,NYe.default)(r,(i,n)=>e(n)))}camelCase(){return this.transformKeys(FYe.default)}snakeCase(){return this.transformKeys(aie.default)}constantCase(){return this.transformKeys(e=>(0,aie.default)(e).toUpperCase())}describe(){let e=super.describe();return e.fields=(0,LYe.default)(this.fields,r=>r.describe()),e}};qC.default=G0;function sie(t){return new G0(t)}sie.prototype=G0.prototype});var fie=w(JC=>{\"use strict\";Object.defineProperty(JC,\"__esModule\",{value:!0});JC.create=gie;JC.default=void 0;var eN=qf(jf()),jYe=qf(Lf()),GYe=qf(vC()),tN=fA(),YYe=qf(I0()),qYe=qf(cu()),JYe=qf(pA());function qf(t){return t&&t.__esModule?t:{default:t}}function Y0(){return Y0=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var r=arguments[e];for(var i in r)Object.prototype.hasOwnProperty.call(r,i)&&(t[i]=r[i])}return t},Y0.apply(this,arguments)}function gie(t){return new q0(t)}var q0=class extends JYe.default{constructor(e){super({type:\"array\"});this.innerType=e,this.withMutation(()=>{this.transform(function(r){if(typeof r==\"string\")try{r=JSON.parse(r)}catch(i){r=null}return this.isType(r)?r:null})})}_typeCheck(e){return Array.isArray(e)}get _subType(){return this.innerType}_cast(e,r){let i=super._cast(e,r);if(!this._typeCheck(i)||!this.innerType)return i;let n=!1,s=i.map((o,a)=>{let l=this.innerType.cast(o,Y0({},r,{path:`${r.path||\"\"}[${a}]`}));return l!==o&&(n=!0),l});return n?s:i}_validate(e,r={},i){var n,s;let o=[],a=r.sync,l=r.path,c=this.innerType,u=(n=r.abortEarly)!=null?n:this.spec.abortEarly,g=(s=r.recursive)!=null?s:this.spec.recursive,f=r.originalValue!=null?r.originalValue:e;super._validate(e,r,(h,p)=>{if(h){if(!qYe.default.isError(h)||u)return void i(h,p);o.push(h)}if(!g||!c||!this._typeCheck(p)){i(o[0]||null,p);return}f=f||p;let m=new Array(p.length);for(let y=0;y<p.length;y++){let Q=p[y],S=`${r.path||\"\"}[${y}]`,x=Y0({},r,{path:S,strict:!0,parent:p,index:y,originalValue:f[y]});m[y]=(M,Y)=>c.validate(Q,x,Y)}(0,YYe.default)({sync:a,path:l,value:p,errors:o,endEarly:u,tests:m},i)})}clone(e){let r=super.clone(e);return r.innerType=this.innerType,r}concat(e){let r=super.concat(e);return r.innerType=this.innerType,e.innerType&&(r.innerType=r.innerType?r.innerType.concat(e.innerType):e.innerType),r}of(e){let r=this.clone();if(!(0,jYe.default)(e))throw new TypeError(\"`array.of()` sub-schema must be a valid yup schema not: \"+(0,GYe.default)(e));return r.innerType=e,r}length(e,r=tN.array.length){return this.test({message:r,name:\"length\",exclusive:!0,params:{length:e},test(i){return(0,eN.default)(i)||i.length===this.resolve(e)}})}min(e,r){return r=r||tN.array.min,this.test({message:r,name:\"min\",exclusive:!0,params:{min:e},test(i){return(0,eN.default)(i)||i.length>=this.resolve(e)}})}max(e,r){return r=r||tN.array.max,this.test({message:r,name:\"max\",exclusive:!0,params:{max:e},test(i){return(0,eN.default)(i)||i.length<=this.resolve(e)}})}ensure(){return this.default(()=>[]).transform((e,r)=>this._typeCheck(e)?e:r==null?[]:[].concat(r))}compact(e){let r=e?(i,n,s)=>!e(i,n,s):i=>!!i;return this.transform(i=>i!=null?i.filter(r):i)}describe(){let e=super.describe();return this.innerType&&(e.innerType=this.innerType.describe()),e}nullable(e=!0){return super.nullable(e)}defined(){return super.defined()}required(e){return super.required(e)}};JC.default=q0;gie.prototype=q0.prototype});var hie=w(WC=>{\"use strict\";Object.defineProperty(WC,\"__esModule\",{value:!0});WC.create=WYe;WC.default=void 0;var _Ye=zYe(Lf());function zYe(t){return t&&t.__esModule?t:{default:t}}function WYe(t){return new rN(t)}var rN=class{constructor(e){this.type=\"lazy\",this.__isYupSchema__=!0,this._resolve=(r,i={})=>{let n=this.builder(r,i);if(!(0,_Ye.default)(n))throw new TypeError(\"lazy() functions must return a valid schema\");return n.resolve(i)},this.builder=e}resolve(e){return this._resolve(e.value,e)}cast(e,r){return this._resolve(e,r).cast(e,r)}validate(e,r,i){return this._resolve(e,r).validate(e,r,i)}validateSync(e,r){return this._resolve(e,r).validateSync(e,r)}validateAt(e,r,i){return this._resolve(r,i).validateAt(e,r,i)}validateSyncAt(e,r,i){return this._resolve(r,i).validateSyncAt(e,r,i)}describe(){return null}isValid(e,r){return this._resolve(e,r).isValid(e,r)}isValidSync(e,r){return this._resolve(e,r).isValidSync(e,r)}},VYe=rN;WC.default=VYe});var pie=w(iN=>{\"use strict\";Object.defineProperty(iN,\"__esModule\",{value:!0});iN.default=XYe;var $Ye=ZYe(fA());function ZYe(t){return t&&t.__esModule?t:{default:t}}function XYe(t){Object.keys(t).forEach(e=>{Object.keys(t[e]).forEach(r=>{$Ye.default[e][r]=t[e][r]})})}});var sN=w(Br=>{\"use strict\";Object.defineProperty(Br,\"__esModule\",{value:!0});Br.addMethod=eqe;Object.defineProperty(Br,\"MixedSchema\",{enumerable:!0,get:function(){return die.default}});Object.defineProperty(Br,\"mixed\",{enumerable:!0,get:function(){return die.create}});Object.defineProperty(Br,\"BooleanSchema\",{enumerable:!0,get:function(){return nN.default}});Object.defineProperty(Br,\"bool\",{enumerable:!0,get:function(){return nN.create}});Object.defineProperty(Br,\"boolean\",{enumerable:!0,get:function(){return nN.create}});Object.defineProperty(Br,\"StringSchema\",{enumerable:!0,get:function(){return Cie.default}});Object.defineProperty(Br,\"string\",{enumerable:!0,get:function(){return Cie.create}});Object.defineProperty(Br,\"NumberSchema\",{enumerable:!0,get:function(){return mie.default}});Object.defineProperty(Br,\"number\",{enumerable:!0,get:function(){return mie.create}});Object.defineProperty(Br,\"DateSchema\",{enumerable:!0,get:function(){return Eie.default}});Object.defineProperty(Br,\"date\",{enumerable:!0,get:function(){return Eie.create}});Object.defineProperty(Br,\"ObjectSchema\",{enumerable:!0,get:function(){return Iie.default}});Object.defineProperty(Br,\"object\",{enumerable:!0,get:function(){return Iie.create}});Object.defineProperty(Br,\"ArraySchema\",{enumerable:!0,get:function(){return yie.default}});Object.defineProperty(Br,\"array\",{enumerable:!0,get:function(){return yie.create}});Object.defineProperty(Br,\"ref\",{enumerable:!0,get:function(){return tqe.create}});Object.defineProperty(Br,\"lazy\",{enumerable:!0,get:function(){return rqe.create}});Object.defineProperty(Br,\"ValidationError\",{enumerable:!0,get:function(){return iqe.default}});Object.defineProperty(Br,\"reach\",{enumerable:!0,get:function(){return nqe.default}});Object.defineProperty(Br,\"isSchema\",{enumerable:!0,get:function(){return wie.default}});Object.defineProperty(Br,\"setLocale\",{enumerable:!0,get:function(){return sqe.default}});Object.defineProperty(Br,\"BaseSchema\",{enumerable:!0,get:function(){return oqe.default}});var die=du(Zte()),nN=du(ire()),Cie=du(ore()),mie=du(lre()),Eie=du(fre()),Iie=du(uie()),yie=du(fie()),tqe=fu(),rqe=hie(),iqe=zC(cu()),nqe=zC(YF()),wie=zC(Lf()),sqe=zC(pie()),oqe=zC(pA());function zC(t){return t&&t.__esModule?t:{default:t}}function Bie(){if(typeof WeakMap!=\"function\")return null;var t=new WeakMap;return Bie=function(){return t},t}function du(t){if(t&&t.__esModule)return t;if(t===null||typeof t!=\"object\"&&typeof t!=\"function\")return{default:t};var e=Bie();if(e&&e.has(t))return e.get(t);var r={},i=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var n in t)if(Object.prototype.hasOwnProperty.call(t,n)){var s=i?Object.getOwnPropertyDescriptor(t,n):null;s&&(s.get||s.set)?Object.defineProperty(r,n,s):r[n]=t[n]}return r.default=t,e&&e.set(t,r),r}function eqe(t,e,r){if(!t||!(0,wie.default)(t.prototype))throw new TypeError(\"You must provide a yup schema constructor function\");if(typeof e!=\"string\")throw new TypeError(\"A Method name must be provided\");if(typeof r!=\"function\")throw new TypeError(\"Method function must be provided\");t.prototype[e]=r}});var kie=w((Qpt,VC)=>{\"use strict\";var lqe=process.env.TERM_PROGRAM===\"Hyper\",cqe=process.platform===\"win32\",Qie=process.platform===\"linux\",oN={ballotDisabled:\"\\u2612\",ballotOff:\"\\u2610\",ballotOn:\"\\u2611\",bullet:\"\\u2022\",bulletWhite:\"\\u25E6\",fullBlock:\"\\u2588\",heart:\"\\u2764\",identicalTo:\"\\u2261\",line:\"\\u2500\",mark:\"\\u203B\",middot:\"\\xB7\",minus:\"\\uFF0D\",multiplication:\"\\xD7\",obelus:\"\\xF7\",pencilDownRight:\"\\u270E\",pencilRight:\"\\u270F\",pencilUpRight:\"\\u2710\",percent:\"%\",pilcrow2:\"\\u2761\",pilcrow:\"\\xB6\",plusMinus:\"\\xB1\",section:\"\\xA7\",starsOff:\"\\u2606\",starsOn:\"\\u2605\",upDownArrow:\"\\u2195\"},vie=Object.assign({},oN,{check:\"\\u221A\",cross:\"\\xD7\",ellipsisLarge:\"...\",ellipsis:\"...\",info:\"i\",question:\"?\",questionSmall:\"?\",pointer:\">\",pointerSmall:\"\\xBB\",radioOff:\"( )\",radioOn:\"(*)\",warning:\"\\u203C\"}),Sie=Object.assign({},oN,{ballotCross:\"\\u2718\",check:\"\\u2714\",cross:\"\\u2716\",ellipsisLarge:\"\\u22EF\",ellipsis:\"\\u2026\",info:\"\\u2139\",question:\"?\",questionFull:\"\\uFF1F\",questionSmall:\"\\uFE56\",pointer:Qie?\"\\u25B8\":\"\\u276F\",pointerSmall:Qie?\"\\u2023\":\"\\u203A\",radioOff:\"\\u25EF\",radioOn:\"\\u25C9\",warning:\"\\u26A0\"});VC.exports=cqe&&!lqe?vie:Sie;Reflect.defineProperty(VC.exports,\"common\",{enumerable:!1,value:oN});Reflect.defineProperty(VC.exports,\"windows\",{enumerable:!1,value:vie});Reflect.defineProperty(VC.exports,\"other\",{enumerable:!1,value:Sie})});var Co=w((vpt,aN)=>{\"use strict\";var uqe=t=>t!==null&&typeof t==\"object\"&&!Array.isArray(t),gqe=/[\\u001b\\u009b][[\\]#;?()]*(?:(?:(?:[^\\W_]*;?[^\\W_]*)\\u0007)|(?:(?:[0-9]{1,4}(;[0-9]{0,4})*)?[~0-9=<>cf-nqrtyA-PRZ]))/g,xie=()=>{let t={enabled:!0,visible:!0,styles:{},keys:{}};\"FORCE_COLOR\"in process.env&&(t.enabled=process.env.FORCE_COLOR!==\"0\");let e=s=>{let o=s.open=`\u001b[${s.codes[0]}m`,a=s.close=`\u001b[${s.codes[1]}m`,l=s.regex=new RegExp(`\\\\u001b\\\\[${s.codes[1]}m`,\"g\");return s.wrap=(c,u)=>{c.includes(a)&&(c=c.replace(l,a+o));let g=o+c+a;return u?g.replace(/\\r*\\n/g,`${a}$&${o}`):g},s},r=(s,o,a)=>typeof s==\"function\"?s(o):s.wrap(o,a),i=(s,o)=>{if(s===\"\"||s==null)return\"\";if(t.enabled===!1)return s;if(t.visible===!1)return\"\";let a=\"\"+s,l=a.includes(`\n`),c=o.length;for(c>0&&o.includes(\"unstyle\")&&(o=[...new Set([\"unstyle\",...o])].reverse());c-- >0;)a=r(t.styles[o[c]],a,l);return a},n=(s,o,a)=>{t.styles[s]=e({name:s,codes:o}),(t.keys[a]||(t.keys[a]=[])).push(s),Reflect.defineProperty(t,s,{configurable:!0,enumerable:!0,set(c){t.alias(s,c)},get(){let c=u=>i(u,c.stack);return Reflect.setPrototypeOf(c,t),c.stack=this.stack?this.stack.concat(s):[s],c}})};return n(\"reset\",[0,0],\"modifier\"),n(\"bold\",[1,22],\"modifier\"),n(\"dim\",[2,22],\"modifier\"),n(\"italic\",[3,23],\"modifier\"),n(\"underline\",[4,24],\"modifier\"),n(\"inverse\",[7,27],\"modifier\"),n(\"hidden\",[8,28],\"modifier\"),n(\"strikethrough\",[9,29],\"modifier\"),n(\"black\",[30,39],\"color\"),n(\"red\",[31,39],\"color\"),n(\"green\",[32,39],\"color\"),n(\"yellow\",[33,39],\"color\"),n(\"blue\",[34,39],\"color\"),n(\"magenta\",[35,39],\"color\"),n(\"cyan\",[36,39],\"color\"),n(\"white\",[37,39],\"color\"),n(\"gray\",[90,39],\"color\"),n(\"grey\",[90,39],\"color\"),n(\"bgBlack\",[40,49],\"bg\"),n(\"bgRed\",[41,49],\"bg\"),n(\"bgGreen\",[42,49],\"bg\"),n(\"bgYellow\",[43,49],\"bg\"),n(\"bgBlue\",[44,49],\"bg\"),n(\"bgMagenta\",[45,49],\"bg\"),n(\"bgCyan\",[46,49],\"bg\"),n(\"bgWhite\",[47,49],\"bg\"),n(\"blackBright\",[90,39],\"bright\"),n(\"redBright\",[91,39],\"bright\"),n(\"greenBright\",[92,39],\"bright\"),n(\"yellowBright\",[93,39],\"bright\"),n(\"blueBright\",[94,39],\"bright\"),n(\"magentaBright\",[95,39],\"bright\"),n(\"cyanBright\",[96,39],\"bright\"),n(\"whiteBright\",[97,39],\"bright\"),n(\"bgBlackBright\",[100,49],\"bgBright\"),n(\"bgRedBright\",[101,49],\"bgBright\"),n(\"bgGreenBright\",[102,49],\"bgBright\"),n(\"bgYellowBright\",[103,49],\"bgBright\"),n(\"bgBlueBright\",[104,49],\"bgBright\"),n(\"bgMagentaBright\",[105,49],\"bgBright\"),n(\"bgCyanBright\",[106,49],\"bgBright\"),n(\"bgWhiteBright\",[107,49],\"bgBright\"),t.ansiRegex=gqe,t.hasColor=t.hasAnsi=s=>(t.ansiRegex.lastIndex=0,typeof s==\"string\"&&s!==\"\"&&t.ansiRegex.test(s)),t.alias=(s,o)=>{let a=typeof o==\"string\"?t[o]:o;if(typeof a!=\"function\")throw new TypeError(\"Expected alias to be the name of an existing color (string) or a function\");a.stack||(Reflect.defineProperty(a,\"name\",{value:s}),t.styles[s]=a,a.stack=[s]),Reflect.defineProperty(t,s,{configurable:!0,enumerable:!0,set(l){t.alias(s,l)},get(){let l=c=>i(c,l.stack);return Reflect.setPrototypeOf(l,t),l.stack=this.stack?this.stack.concat(a.stack):a.stack,l}})},t.theme=s=>{if(!uqe(s))throw new TypeError(\"Expected theme to be an object\");for(let o of Object.keys(s))t.alias(o,s[o]);return t},t.alias(\"unstyle\",s=>typeof s==\"string\"&&s!==\"\"?(t.ansiRegex.lastIndex=0,s.replace(t.ansiRegex,\"\")):\"\"),t.alias(\"noop\",s=>s),t.none=t.clear=t.noop,t.stripColor=t.unstyle,t.symbols=kie(),t.define=n,t};aN.exports=xie();aN.exports.create=xie});var Xi=w(Lt=>{\"use strict\";var fqe=Object.prototype.toString,qs=Co(),Pie=!1,AN=[],Die={yellow:\"blue\",cyan:\"red\",green:\"magenta\",black:\"white\",blue:\"yellow\",red:\"cyan\",magenta:\"green\",white:\"black\"};Lt.longest=(t,e)=>t.reduce((r,i)=>Math.max(r,e?i[e].length:i.length),0);Lt.hasColor=t=>!!t&&qs.hasColor(t);var W0=Lt.isObject=t=>t!==null&&typeof t==\"object\"&&!Array.isArray(t);Lt.nativeType=t=>fqe.call(t).slice(8,-1).toLowerCase().replace(/\\s/g,\"\");Lt.isAsyncFn=t=>Lt.nativeType(t)===\"asyncfunction\";Lt.isPrimitive=t=>t!=null&&typeof t!=\"object\"&&typeof t!=\"function\";Lt.resolve=(t,e,...r)=>typeof e==\"function\"?e.call(t,...r):e;Lt.scrollDown=(t=[])=>[...t.slice(1),t[0]];Lt.scrollUp=(t=[])=>[t.pop(),...t];Lt.reorder=(t=[])=>{let e=t.slice();return e.sort((r,i)=>r.index>i.index?1:r.index<i.index?-1:0),e};Lt.swap=(t,e,r)=>{let i=t.length,n=r===i?0:r<0?i-1:r,s=t[e];t[e]=t[n],t[n]=s};Lt.width=(t,e=80)=>{let r=t&&t.columns?t.columns:e;return t&&typeof t.getWindowSize==\"function\"&&(r=t.getWindowSize()[0]),process.platform===\"win32\"?r-1:r};Lt.height=(t,e=20)=>{let r=t&&t.rows?t.rows:e;return t&&typeof t.getWindowSize==\"function\"&&(r=t.getWindowSize()[1]),r};Lt.wordWrap=(t,e={})=>{if(!t)return t;typeof e==\"number\"&&(e={width:e});let{indent:r=\"\",newline:i=`\n`+r,width:n=80}=e;n-=((i+r).match(/[^\\S\\n]/g)||[]).length;let o=`.{1,${n}}([\\\\s\\\\u200B]+|$)|[^\\\\s\\\\u200B]+?([\\\\s\\\\u200B]+|$)`,a=t.trim(),l=new RegExp(o,\"g\"),c=a.match(l)||[];return c=c.map(u=>u.replace(/\\n$/,\"\")),e.padEnd&&(c=c.map(u=>u.padEnd(n,\" \"))),e.padStart&&(c=c.map(u=>u.padStart(n,\" \"))),r+c.join(i)};Lt.unmute=t=>{let e=t.stack.find(i=>qs.keys.color.includes(i));return e?qs[e]:t.stack.find(i=>i.slice(2)===\"bg\")?qs[e.slice(2)]:i=>i};Lt.pascal=t=>t?t[0].toUpperCase()+t.slice(1):\"\";Lt.inverse=t=>{if(!t||!t.stack)return t;let e=t.stack.find(i=>qs.keys.color.includes(i));if(e){let i=qs[\"bg\"+Lt.pascal(e)];return i?i.black:t}let r=t.stack.find(i=>i.slice(0,2)===\"bg\");return r?qs[r.slice(2).toLowerCase()]||t:qs.none};Lt.complement=t=>{if(!t||!t.stack)return t;let e=t.stack.find(i=>qs.keys.color.includes(i)),r=t.stack.find(i=>i.slice(0,2)===\"bg\");if(e&&!r)return qs[Die[e]||e];if(r){let i=r.slice(2).toLowerCase(),n=Die[i];return n&&qs[\"bg\"+Lt.pascal(n)]||t}return qs.none};Lt.meridiem=t=>{let e=t.getHours(),r=t.getMinutes(),i=e>=12?\"pm\":\"am\";e=e%12;let n=e===0?12:e,s=r<10?\"0\"+r:r;return n+\":\"+s+\" \"+i};Lt.set=(t={},e=\"\",r)=>e.split(\".\").reduce((i,n,s,o)=>{let a=o.length-1>s?i[n]||{}:r;return!Lt.isObject(a)&&s<o.length-1&&(a={}),i[n]=a},t);Lt.get=(t={},e=\"\",r)=>{let i=t[e]==null?e.split(\".\").reduce((n,s)=>n&&n[s],t):t[e];return i==null?r:i};Lt.mixin=(t,e)=>{if(!W0(t))return e;if(!W0(e))return t;for(let r of Object.keys(e)){let i=Object.getOwnPropertyDescriptor(e,r);if(i.hasOwnProperty(\"value\"))if(t.hasOwnProperty(r)&&W0(i.value)){let n=Object.getOwnPropertyDescriptor(t,r);W0(n.value)?t[r]=Lt.merge({},t[r],e[r]):Reflect.defineProperty(t,r,i)}else Reflect.defineProperty(t,r,i);else Reflect.defineProperty(t,r,i)}return t};Lt.merge=(...t)=>{let e={};for(let r of t)Lt.mixin(e,r);return e};Lt.mixinEmitter=(t,e)=>{let r=e.constructor.prototype;for(let i of Object.keys(r)){let n=r[i];typeof n==\"function\"?Lt.define(t,i,n.bind(e)):Lt.define(t,i,n)}};Lt.onExit=t=>{let e=(r,i)=>{Pie||(Pie=!0,AN.forEach(n=>n()),r===!0&&process.exit(128+i))};AN.length===0&&(process.once(\"SIGTERM\",e.bind(null,!0,15)),process.once(\"SIGINT\",e.bind(null,!0,2)),process.once(\"exit\",e)),AN.push(t)};Lt.define=(t,e,r)=>{Reflect.defineProperty(t,e,{value:r})};Lt.defineExport=(t,e,r)=>{let i;Reflect.defineProperty(t,e,{enumerable:!0,configurable:!0,set(n){i=n},get(){return i?i():r()}})}});var Rie=w(Wf=>{\"use strict\";Wf.ctrl={a:\"first\",b:\"backward\",c:\"cancel\",d:\"deleteForward\",e:\"last\",f:\"forward\",g:\"reset\",i:\"tab\",k:\"cutForward\",l:\"reset\",n:\"newItem\",m:\"cancel\",j:\"submit\",p:\"search\",r:\"remove\",s:\"save\",u:\"undo\",w:\"cutLeft\",x:\"toggleCursor\",v:\"paste\"};Wf.shift={up:\"shiftUp\",down:\"shiftDown\",left:\"shiftLeft\",right:\"shiftRight\",tab:\"prev\"};Wf.fn={up:\"pageUp\",down:\"pageDown\",left:\"pageLeft\",right:\"pageRight\",delete:\"deleteForward\"};Wf.option={b:\"backward\",f:\"forward\",d:\"cutRight\",left:\"cutLeft\",up:\"altUp\",down:\"altDown\"};Wf.keys={pageup:\"pageUp\",pagedown:\"pageDown\",home:\"home\",end:\"end\",cancel:\"cancel\",delete:\"deleteForward\",backspace:\"delete\",down:\"down\",enter:\"submit\",escape:\"cancel\",left:\"left\",space:\"space\",number:\"number\",return:\"submit\",right:\"right\",tab:\"next\",up:\"up\"}});var Lie=w((xpt,Fie)=>{\"use strict\";var Nie=require(\"readline\"),hqe=Rie(),pqe=/^(?:\\x1b)([a-zA-Z0-9])$/,dqe=/^(?:\\x1b+)(O|N|\\[|\\[\\[)(?:(\\d+)(?:;(\\d+))?([~^$])|(?:1;)?(\\d+)?([a-zA-Z]))/,Cqe={OP:\"f1\",OQ:\"f2\",OR:\"f3\",OS:\"f4\",\"[11~\":\"f1\",\"[12~\":\"f2\",\"[13~\":\"f3\",\"[14~\":\"f4\",\"[[A\":\"f1\",\"[[B\":\"f2\",\"[[C\":\"f3\",\"[[D\":\"f4\",\"[[E\":\"f5\",\"[15~\":\"f5\",\"[17~\":\"f6\",\"[18~\":\"f7\",\"[19~\":\"f8\",\"[20~\":\"f9\",\"[21~\":\"f10\",\"[23~\":\"f11\",\"[24~\":\"f12\",\"[A\":\"up\",\"[B\":\"down\",\"[C\":\"right\",\"[D\":\"left\",\"[E\":\"clear\",\"[F\":\"end\",\"[H\":\"home\",OA:\"up\",OB:\"down\",OC:\"right\",OD:\"left\",OE:\"clear\",OF:\"end\",OH:\"home\",\"[1~\":\"home\",\"[2~\":\"insert\",\"[3~\":\"delete\",\"[4~\":\"end\",\"[5~\":\"pageup\",\"[6~\":\"pagedown\",\"[[5~\":\"pageup\",\"[[6~\":\"pagedown\",\"[7~\":\"home\",\"[8~\":\"end\",\"[a\":\"up\",\"[b\":\"down\",\"[c\":\"right\",\"[d\":\"left\",\"[e\":\"clear\",\"[2$\":\"insert\",\"[3$\":\"delete\",\"[5$\":\"pageup\",\"[6$\":\"pagedown\",\"[7$\":\"home\",\"[8$\":\"end\",Oa:\"up\",Ob:\"down\",Oc:\"right\",Od:\"left\",Oe:\"clear\",\"[2^\":\"insert\",\"[3^\":\"delete\",\"[5^\":\"pageup\",\"[6^\":\"pagedown\",\"[7^\":\"home\",\"[8^\":\"end\",\"[Z\":\"tab\"};function mqe(t){return[\"[a\",\"[b\",\"[c\",\"[d\",\"[e\",\"[2$\",\"[3$\",\"[5$\",\"[6$\",\"[7$\",\"[8$\",\"[Z\"].includes(t)}function Eqe(t){return[\"Oa\",\"Ob\",\"Oc\",\"Od\",\"Oe\",\"[2^\",\"[3^\",\"[5^\",\"[6^\",\"[7^\",\"[8^\"].includes(t)}var z0=(t=\"\",e={})=>{let r,i=N({name:e.name,ctrl:!1,meta:!1,shift:!1,option:!1,sequence:t,raw:t},e);if(Buffer.isBuffer(t)?t[0]>127&&t[1]===void 0?(t[0]-=128,t=\"\u001b\"+String(t)):t=String(t):t!==void 0&&typeof t!=\"string\"?t=String(t):t||(t=i.sequence||\"\"),i.sequence=i.sequence||t||i.name,t===\"\\r\")i.raw=void 0,i.name=\"return\";else if(t===`\n`)i.name=\"enter\";else if(t===\"\t\")i.name=\"tab\";else if(t===\"\\b\"||t===\"\\x7F\"||t===\"\u001b\\x7F\"||t===\"\u001b\\b\")i.name=\"backspace\",i.meta=t.charAt(0)===\"\u001b\";else if(t===\"\u001b\"||t===\"\u001b\u001b\")i.name=\"escape\",i.meta=t.length===2;else if(t===\" \"||t===\"\u001b \")i.name=\"space\",i.meta=t.length===2;else if(t<=\"\u001a\")i.name=String.fromCharCode(t.charCodeAt(0)+\"a\".charCodeAt(0)-1),i.ctrl=!0;else if(t.length===1&&t>=\"0\"&&t<=\"9\")i.name=\"number\";else if(t.length===1&&t>=\"a\"&&t<=\"z\")i.name=t;else if(t.length===1&&t>=\"A\"&&t<=\"Z\")i.name=t.toLowerCase(),i.shift=!0;else if(r=pqe.exec(t))i.meta=!0,i.shift=/^[A-Z]$/.test(r[1]);else if(r=dqe.exec(t)){let n=[...t];n[0]===\"\u001b\"&&n[1]===\"\u001b\"&&(i.option=!0);let s=[r[1],r[2],r[4],r[6]].filter(Boolean).join(\"\"),o=(r[3]||r[5]||1)-1;i.ctrl=!!(o&4),i.meta=!!(o&10),i.shift=!!(o&1),i.code=s,i.name=Cqe[s],i.shift=mqe(s)||i.shift,i.ctrl=Eqe(s)||i.ctrl}return i};z0.listen=(t={},e)=>{let{stdin:r}=t;if(!r||r!==process.stdin&&!r.isTTY)throw new Error(\"Invalid stream passed\");let i=Nie.createInterface({terminal:!0,input:r});Nie.emitKeypressEvents(r,i);let n=(a,l)=>e(a,z0(a,l),i),s=r.isRaw;return r.isTTY&&r.setRawMode(!0),r.on(\"keypress\",n),i.resume(),()=>{r.isTTY&&r.setRawMode(s),r.removeListener(\"keypress\",n),i.pause(),i.close()}};z0.action=(t,e,r)=>{let i=N(N({},hqe),r);return e.ctrl?(e.action=i.ctrl[e.name],e):e.option&&i.option?(e.action=i.option[e.name],e):e.shift?(e.action=i.shift[e.name],e):(e.action=i.keys[e.name],e)};Fie.exports=z0});var Oie=w((Ppt,Tie)=>{\"use strict\";Tie.exports=t=>{t.timers=t.timers||{};let e=t.options.timers;if(!!e)for(let r of Object.keys(e)){let i=e[r];typeof i==\"number\"&&(i={interval:i}),Iqe(t,r,i)}};function Iqe(t,e,r={}){let i=t.timers[e]={name:e,start:Date.now(),ms:0,tick:0},n=r.interval||120;i.frames=r.frames||[],i.loading=!0;let s=setInterval(()=>{i.ms=Date.now()-i.start,i.tick++,t.render()},n);return i.stop=()=>{i.loading=!1,clearInterval(s)},Reflect.defineProperty(i,\"interval\",{value:s}),t.once(\"close\",()=>i.stop()),i.stop}});var Kie=w((Dpt,Mie)=>{\"use strict\";var{define:yqe,width:wqe}=Xi(),Uie=class{constructor(e){let r=e.options;yqe(this,\"_prompt\",e),this.type=e.type,this.name=e.name,this.message=\"\",this.header=\"\",this.footer=\"\",this.error=\"\",this.hint=\"\",this.input=\"\",this.cursor=0,this.index=0,this.lines=0,this.tick=0,this.prompt=\"\",this.buffer=\"\",this.width=wqe(r.stdout||process.stdout),Object.assign(this,r),this.name=this.name||this.message,this.message=this.message||this.name,this.symbols=e.symbols,this.styles=e.styles,this.required=new Set,this.cancelled=!1,this.submitted=!1}clone(){let e=N({},this);return e.status=this.status,e.buffer=Buffer.from(e.buffer),delete e.clone,e}set color(e){this._color=e}get color(){let e=this.prompt.styles;if(this.cancelled)return e.cancelled;if(this.submitted)return e.submitted;let r=this._color||e[this.status];return typeof r==\"function\"?r:e.pending}set loading(e){this._loading=e}get loading(){return typeof this._loading==\"boolean\"?this._loading:this.loadingChoices?\"choices\":!1}get status(){return this.cancelled?\"cancelled\":this.submitted?\"submitted\":\"pending\"}};Mie.exports=Uie});var jie=w((Rpt,Hie)=>{\"use strict\";var lN=Xi(),Fi=Co(),cN={default:Fi.noop,noop:Fi.noop,set inverse(t){this._inverse=t},get inverse(){return this._inverse||lN.inverse(this.primary)},set complement(t){this._complement=t},get complement(){return this._complement||lN.complement(this.primary)},primary:Fi.cyan,success:Fi.green,danger:Fi.magenta,strong:Fi.bold,warning:Fi.yellow,muted:Fi.dim,disabled:Fi.gray,dark:Fi.dim.gray,underline:Fi.underline,set info(t){this._info=t},get info(){return this._info||this.primary},set em(t){this._em=t},get em(){return this._em||this.primary.underline},set heading(t){this._heading=t},get heading(){return this._heading||this.muted.underline},set pending(t){this._pending=t},get pending(){return this._pending||this.primary},set submitted(t){this._submitted=t},get submitted(){return this._submitted||this.success},set cancelled(t){this._cancelled=t},get cancelled(){return this._cancelled||this.danger},set typing(t){this._typing=t},get typing(){return this._typing||this.dim},set placeholder(t){this._placeholder=t},get placeholder(){return this._placeholder||this.primary.dim},set highlight(t){this._highlight=t},get highlight(){return this._highlight||this.inverse}};cN.merge=(t={})=>{t.styles&&typeof t.styles.enabled==\"boolean\"&&(Fi.enabled=t.styles.enabled),t.styles&&typeof t.styles.visible==\"boolean\"&&(Fi.visible=t.styles.visible);let e=lN.merge({},cN,t.styles);delete e.merge;for(let r of Object.keys(Fi))e.hasOwnProperty(r)||Reflect.defineProperty(e,r,{get:()=>Fi[r]});for(let r of Object.keys(Fi.styles))e.hasOwnProperty(r)||Reflect.defineProperty(e,r,{get:()=>Fi[r]});return e};Hie.exports=cN});var Yie=w((Fpt,Gie)=>{\"use strict\";var uN=process.platform===\"win32\",CA=Co(),Bqe=Xi(),gN=te(N({},CA.symbols),{upDownDoubleArrow:\"\\u21D5\",upDownDoubleArrow2:\"\\u2B0D\",upDownArrow:\"\\u2195\",asterisk:\"*\",asterism:\"\\u2042\",bulletWhite:\"\\u25E6\",electricArrow:\"\\u2301\",ellipsisLarge:\"\\u22EF\",ellipsisSmall:\"\\u2026\",fullBlock:\"\\u2588\",identicalTo:\"\\u2261\",indicator:CA.symbols.check,leftAngle:\"\\u2039\",mark:\"\\u203B\",minus:\"\\u2212\",multiplication:\"\\xD7\",obelus:\"\\xF7\",percent:\"%\",pilcrow:\"\\xB6\",pilcrow2:\"\\u2761\",pencilUpRight:\"\\u2710\",pencilDownRight:\"\\u270E\",pencilRight:\"\\u270F\",plus:\"+\",plusMinus:\"\\xB1\",pointRight:\"\\u261E\",rightAngle:\"\\u203A\",section:\"\\xA7\",hexagon:{off:\"\\u2B21\",on:\"\\u2B22\",disabled:\"\\u2B22\"},ballot:{on:\"\\u2611\",off:\"\\u2610\",disabled:\"\\u2612\"},stars:{on:\"\\u2605\",off:\"\\u2606\",disabled:\"\\u2606\"},folder:{on:\"\\u25BC\",off:\"\\u25B6\",disabled:\"\\u25B6\"},prefix:{pending:CA.symbols.question,submitted:CA.symbols.check,cancelled:CA.symbols.cross},separator:{pending:CA.symbols.pointerSmall,submitted:CA.symbols.middot,cancelled:CA.symbols.middot},radio:{off:uN?\"( )\":\"\\u25EF\",on:uN?\"(*)\":\"\\u25C9\",disabled:uN?\"(|)\":\"\\u24BE\"},numbers:[\"\\u24EA\",\"\\u2460\",\"\\u2461\",\"\\u2462\",\"\\u2463\",\"\\u2464\",\"\\u2465\",\"\\u2466\",\"\\u2467\",\"\\u2468\",\"\\u2469\",\"\\u246A\",\"\\u246B\",\"\\u246C\",\"\\u246D\",\"\\u246E\",\"\\u246F\",\"\\u2470\",\"\\u2471\",\"\\u2472\",\"\\u2473\",\"\\u3251\",\"\\u3252\",\"\\u3253\",\"\\u3254\",\"\\u3255\",\"\\u3256\",\"\\u3257\",\"\\u3258\",\"\\u3259\",\"\\u325A\",\"\\u325B\",\"\\u325C\",\"\\u325D\",\"\\u325E\",\"\\u325F\",\"\\u32B1\",\"\\u32B2\",\"\\u32B3\",\"\\u32B4\",\"\\u32B5\",\"\\u32B6\",\"\\u32B7\",\"\\u32B8\",\"\\u32B9\",\"\\u32BA\",\"\\u32BB\",\"\\u32BC\",\"\\u32BD\",\"\\u32BE\",\"\\u32BF\"]});gN.merge=t=>{let e=Bqe.merge({},CA.symbols,gN,t.symbols);return delete e.merge,e};Gie.exports=gN});var Jie=w((Npt,qie)=>{\"use strict\";var bqe=jie(),Qqe=Yie(),vqe=Xi();qie.exports=t=>{t.options=vqe.merge({},t.options.theme,t.options),t.symbols=Qqe.merge(t.options),t.styles=bqe.merge(t.options)}});var Xie=w((Wie,zie)=>{\"use strict\";var _ie=process.env.TERM_PROGRAM===\"Apple_Terminal\",Sqe=Co(),fN=Xi(),mo=zie.exports=Wie,Nr=\"\u001b[\",Vie=\"\\x07\",hN=!1,Sl=mo.code={bell:Vie,beep:Vie,beginning:`${Nr}G`,down:`${Nr}J`,esc:Nr,getPosition:`${Nr}6n`,hide:`${Nr}?25l`,line:`${Nr}2K`,lineEnd:`${Nr}K`,lineStart:`${Nr}1K`,restorePosition:Nr+(_ie?\"8\":\"u\"),savePosition:Nr+(_ie?\"7\":\"s\"),screen:`${Nr}2J`,show:`${Nr}?25h`,up:`${Nr}1J`},Cu=mo.cursor={get hidden(){return hN},hide(){return hN=!0,Sl.hide},show(){return hN=!1,Sl.show},forward:(t=1)=>`${Nr}${t}C`,backward:(t=1)=>`${Nr}${t}D`,nextLine:(t=1)=>`${Nr}E`.repeat(t),prevLine:(t=1)=>`${Nr}F`.repeat(t),up:(t=1)=>t?`${Nr}${t}A`:\"\",down:(t=1)=>t?`${Nr}${t}B`:\"\",right:(t=1)=>t?`${Nr}${t}C`:\"\",left:(t=1)=>t?`${Nr}${t}D`:\"\",to(t,e){return e?`${Nr}${e+1};${t+1}H`:`${Nr}${t+1}G`},move(t=0,e=0){let r=\"\";return r+=t<0?Cu.left(-t):t>0?Cu.right(t):\"\",r+=e<0?Cu.up(-e):e>0?Cu.down(e):\"\",r},restore(t={}){let{after:e,cursor:r,initial:i,input:n,prompt:s,size:o,value:a}=t;if(i=fN.isPrimitive(i)?String(i):\"\",n=fN.isPrimitive(n)?String(n):\"\",a=fN.isPrimitive(a)?String(a):\"\",o){let l=mo.cursor.up(o)+mo.cursor.to(s.length),c=n.length-r;return c>0&&(l+=mo.cursor.left(c)),l}if(a||e){let l=!n&&!!i?-i.length:-n.length+r;return e&&(l-=e.length),n===\"\"&&i&&!s.includes(i)&&(l+=i.length),mo.cursor.move(l)}}},pN=mo.erase={screen:Sl.screen,up:Sl.up,down:Sl.down,line:Sl.line,lineEnd:Sl.lineEnd,lineStart:Sl.lineStart,lines(t){let e=\"\";for(let r=0;r<t;r++)e+=mo.erase.line+(r<t-1?mo.cursor.up(1):\"\");return t&&(e+=mo.code.beginning),e}};mo.clear=(t=\"\",e=process.stdout.columns)=>{if(!e)return pN.line+Cu.to(0);let r=s=>[...Sqe.unstyle(s)].length,i=t.split(/\\r?\\n/),n=0;for(let s of i)n+=1+Math.floor(Math.max(r(s)-1,0)/e);return(pN.line+Cu.prevLine()).repeat(n-1)+pN.line+Cu.to(0)}});var zf=w((Lpt,Zie)=>{\"use strict\";var kqe=require(\"events\"),$ie=Co(),dN=Lie(),xqe=Oie(),Pqe=Kie(),Dqe=Jie(),Tn=Xi(),mu=Xie(),_0=class extends kqe{constructor(e={}){super();this.name=e.name,this.type=e.type,this.options=e,Dqe(this),xqe(this),this.state=new Pqe(this),this.initial=[e.initial,e.default].find(r=>r!=null),this.stdout=e.stdout||process.stdout,this.stdin=e.stdin||process.stdin,this.scale=e.scale||1,this.term=this.options.term||process.env.TERM_PROGRAM,this.margin=Fqe(this.options.margin),this.setMaxListeners(0),Rqe(this)}async keypress(e,r={}){this.keypressed=!0;let i=dN.action(e,dN(e,r),this.options.actions);this.state.keypress=i,this.emit(\"keypress\",e,i),this.emit(\"state\",this.state.clone());let n=this.options[i.action]||this[i.action]||this.dispatch;if(typeof n==\"function\")return await n.call(this,e,i);this.alert()}alert(){delete this.state.alert,this.options.show===!1?this.emit(\"alert\"):this.stdout.write(mu.code.beep)}cursorHide(){this.stdout.write(mu.cursor.hide()),Tn.onExit(()=>this.cursorShow())}cursorShow(){this.stdout.write(mu.cursor.show())}write(e){!e||(this.stdout&&this.state.show!==!1&&this.stdout.write(e),this.state.buffer+=e)}clear(e=0){let r=this.state.buffer;this.state.buffer=\"\",!(!r&&!e||this.options.show===!1)&&this.stdout.write(mu.cursor.down(e)+mu.clear(r,this.width))}restore(){if(this.state.closed||this.options.show===!1)return;let{prompt:e,after:r,rest:i}=this.sections(),{cursor:n,initial:s=\"\",input:o=\"\",value:a=\"\"}=this,l=this.state.size=i.length,c={after:r,cursor:n,initial:s,input:o,prompt:e,size:l,value:a},u=mu.cursor.restore(c);u&&this.stdout.write(u)}sections(){let{buffer:e,input:r,prompt:i}=this.state;i=$ie.unstyle(i);let n=$ie.unstyle(e),s=n.indexOf(i),o=n.slice(0,s),l=n.slice(s).split(`\n`),c=l[0],u=l[l.length-1],f=(i+(r?\" \"+r:\"\")).length,h=f<c.length?c.slice(f+1):\"\";return{header:o,prompt:c,after:h,rest:l.slice(1),last:u}}async submit(){this.state.submitted=!0,this.state.validating=!0,this.options.onSubmit&&await this.options.onSubmit.call(this,this.name,this.value,this);let e=this.state.error||await this.validate(this.value,this.state);if(e!==!0){let r=`\n`+this.symbols.pointer+\" \";typeof e==\"string\"?r+=e.trim():r+=\"Invalid input\",this.state.error=`\n`+this.styles.danger(r),this.state.submitted=!1,await this.render(),await this.alert(),this.state.validating=!1,this.state.error=void 0;return}this.state.validating=!1,await this.render(),await this.close(),this.value=await this.result(this.value),this.emit(\"submit\",this.value)}async cancel(e){this.state.cancelled=this.state.submitted=!0,await this.render(),await this.close(),typeof this.options.onCancel==\"function\"&&await this.options.onCancel.call(this,this.name,this.value,this),this.emit(\"cancel\",await this.error(e))}async close(){this.state.closed=!0;try{let e=this.sections(),r=Math.ceil(e.prompt.length/this.width);e.rest&&this.write(mu.cursor.down(e.rest.length)),this.write(`\n`.repeat(r))}catch(e){}this.emit(\"close\")}start(){!this.stop&&this.options.show!==!1&&(this.stop=dN.listen(this,this.keypress.bind(this)),this.once(\"close\",this.stop))}async skip(){return this.skipped=this.options.skip===!0,typeof this.options.skip==\"function\"&&(this.skipped=await this.options.skip.call(this,this.name,this.value)),this.skipped}async initialize(){let{format:e,options:r,result:i}=this;if(this.format=()=>e.call(this,this.value),this.result=()=>i.call(this,this.value),typeof r.initial==\"function\"&&(this.initial=await r.initial.call(this,this)),typeof r.onRun==\"function\"&&await r.onRun.call(this,this),typeof r.onSubmit==\"function\"){let n=r.onSubmit.bind(this),s=this.submit.bind(this);delete this.options.onSubmit,this.submit=async()=>(await n(this.name,this.value,this),s())}await this.start(),await this.render()}render(){throw new Error(\"expected prompt to have a custom render method\")}run(){return new Promise(async(e,r)=>{if(this.once(\"submit\",e),this.once(\"cancel\",r),await this.skip())return this.render=()=>{},this.submit();await this.initialize(),this.emit(\"run\")})}async element(e,r,i){let{options:n,state:s,symbols:o,timers:a}=this,l=a&&a[e];s.timer=l;let c=n[e]||s[e]||o[e],u=r&&r[e]!=null?r[e]:await c;if(u===\"\")return u;let g=await this.resolve(u,s,r,i);return!g&&r&&r[e]?this.resolve(c,s,r,i):g}async prefix(){let e=await this.element(\"prefix\")||this.symbols,r=this.timers&&this.timers.prefix,i=this.state;return i.timer=r,Tn.isObject(e)&&(e=e[i.status]||e.pending),Tn.hasColor(e)?e:(this.styles[i.status]||this.styles.pending)(e)}async message(){let e=await this.element(\"message\");return Tn.hasColor(e)?e:this.styles.strong(e)}async separator(){let e=await this.element(\"separator\")||this.symbols,r=this.timers&&this.timers.separator,i=this.state;i.timer=r;let n=e[i.status]||e.pending||i.separator,s=await this.resolve(n,i);return Tn.isObject(s)&&(s=s[i.status]||s.pending),Tn.hasColor(s)?s:this.styles.muted(s)}async pointer(e,r){let i=await this.element(\"pointer\",e,r);if(typeof i==\"string\"&&Tn.hasColor(i))return i;if(i){let n=this.styles,s=this.index===r,o=s?n.primary:c=>c,a=await this.resolve(i[s?\"on\":\"off\"]||i,this.state),l=Tn.hasColor(a)?a:o(a);return s?l:\" \".repeat(a.length)}}async indicator(e,r){let i=await this.element(\"indicator\",e,r);if(typeof i==\"string\"&&Tn.hasColor(i))return i;if(i){let n=this.styles,s=e.enabled===!0,o=s?n.success:n.dark,a=i[s?\"on\":\"off\"]||i;return Tn.hasColor(a)?a:o(a)}return\"\"}body(){return null}footer(){if(this.state.status===\"pending\")return this.element(\"footer\")}header(){if(this.state.status===\"pending\")return this.element(\"header\")}async hint(){if(this.state.status===\"pending\"&&!this.isValue(this.state.input)){let e=await this.element(\"hint\");return Tn.hasColor(e)?e:this.styles.muted(e)}}error(e){return this.state.submitted?\"\":e||this.state.error}format(e){return e}result(e){return e}validate(e){return this.options.required===!0?this.isValue(e):!0}isValue(e){return e!=null&&e!==\"\"}resolve(e,...r){return Tn.resolve(this,e,...r)}get base(){return _0.prototype}get style(){return this.styles[this.state.status]}get height(){return this.options.rows||Tn.height(this.stdout,25)}get width(){return this.options.columns||Tn.width(this.stdout,80)}get size(){return{width:this.width,height:this.height}}set cursor(e){this.state.cursor=e}get cursor(){return this.state.cursor}set input(e){this.state.input=e}get input(){return this.state.input}set value(e){this.state.value=e}get value(){let{input:e,value:r}=this.state,i=[r,e].find(this.isValue.bind(this));return this.isValue(i)?i:this.initial}static get prompt(){return e=>new this(e).run()}};function Rqe(t){let e=n=>t[n]===void 0||typeof t[n]==\"function\",r=[\"actions\",\"choices\",\"initial\",\"margin\",\"roles\",\"styles\",\"symbols\",\"theme\",\"timers\",\"value\"],i=[\"body\",\"footer\",\"error\",\"header\",\"hint\",\"indicator\",\"message\",\"prefix\",\"separator\",\"skip\"];for(let n of Object.keys(t.options)){if(r.includes(n)||/^on[A-Z]/.test(n))continue;let s=t.options[n];typeof s==\"function\"&&e(n)?i.includes(n)||(t[n]=s.bind(t)):typeof t[n]!=\"function\"&&(t[n]=s)}}function Fqe(t){typeof t==\"number\"&&(t=[t,t,t,t]);let e=[].concat(t||[]),r=n=>n%2==0?`\n`:\" \",i=[];for(let n=0;n<4;n++){let s=r(n);e[n]?i.push(s.repeat(e[n])):i.push(\"\")}return i}Zie.exports=_0});var rne=w((Tpt,ene)=>{\"use strict\";var Nqe=Xi(),tne={default(t,e){return e},checkbox(t,e){throw new Error(\"checkbox role is not implemented yet\")},editable(t,e){throw new Error(\"editable role is not implemented yet\")},expandable(t,e){throw new Error(\"expandable role is not implemented yet\")},heading(t,e){return e.disabled=\"\",e.indicator=[e.indicator,\" \"].find(r=>r!=null),e.message=e.message||\"\",e},input(t,e){throw new Error(\"input role is not implemented yet\")},option(t,e){return tne.default(t,e)},radio(t,e){throw new Error(\"radio role is not implemented yet\")},separator(t,e){return e.disabled=\"\",e.indicator=[e.indicator,\" \"].find(r=>r!=null),e.message=e.message||t.symbols.line.repeat(5),e},spacer(t,e){return e}};ene.exports=(t,e={})=>{let r=Nqe.merge({},tne,e.roles);return r[t]||r.default}});var XC=w((Opt,ine)=>{\"use strict\";var Lqe=Co(),Tqe=zf(),Oqe=rne(),V0=Xi(),{reorder:CN,scrollUp:Mqe,scrollDown:Uqe,isObject:nne,swap:Kqe}=V0,sne=class extends Tqe{constructor(e){super(e);this.cursorHide(),this.maxSelected=e.maxSelected||Infinity,this.multiple=e.multiple||!1,this.initial=e.initial||0,this.delay=e.delay||0,this.longest=0,this.num=\"\"}async initialize(){typeof this.options.initial==\"function\"&&(this.initial=await this.options.initial.call(this)),await this.reset(!0),await super.initialize()}async reset(){let{choices:e,initial:r,autofocus:i,suggest:n}=this.options;if(this.state._choices=[],this.state.choices=[],this.choices=await Promise.all(await this.toChoices(e)),this.choices.forEach(s=>s.enabled=!1),typeof n!=\"function\"&&this.selectable.length===0)throw new Error(\"At least one choice must be selectable\");nne(r)&&(r=Object.keys(r)),Array.isArray(r)?(i!=null&&(this.index=this.findIndex(i)),r.forEach(s=>this.enable(this.find(s))),await this.render()):(i!=null&&(r=i),typeof r==\"string\"&&(r=this.findIndex(r)),typeof r==\"number\"&&r>-1&&(this.index=Math.max(0,Math.min(r,this.choices.length)),this.enable(this.find(this.index)))),this.isDisabled(this.focused)&&await this.down()}async toChoices(e,r){this.state.loadingChoices=!0;let i=[],n=0,s=async(o,a)=>{typeof o==\"function\"&&(o=await o.call(this)),o instanceof Promise&&(o=await o);for(let l=0;l<o.length;l++){let c=o[l]=await this.toChoice(o[l],n++,a);i.push(c),c.choices&&await s(c.choices,c)}return i};return s(e,r).then(o=>(this.state.loadingChoices=!1,o))}async toChoice(e,r,i){if(typeof e==\"function\"&&(e=await e.call(this,this)),e instanceof Promise&&(e=await e),typeof e==\"string\"&&(e={name:e}),e.normalized)return e;e.normalized=!0;let n=e.value;if(e=Oqe(e.role,this.options)(this,e),typeof e.disabled==\"string\"&&!e.hint&&(e.hint=e.disabled,e.disabled=!0),e.disabled===!0&&e.hint==null&&(e.hint=\"(disabled)\"),e.index!=null)return e;e.name=e.name||e.key||e.title||e.value||e.message,e.message=e.message||e.name||\"\",e.value=[e.value,e.name].find(this.isValue.bind(this)),e.input=\"\",e.index=r,e.cursor=0,V0.define(e,\"parent\",i),e.level=i?i.level+1:1,e.indent==null&&(e.indent=i?i.indent+\"  \":e.indent||\"\"),e.path=i?i.path+\".\"+e.name:e.name,e.enabled=!!(this.multiple&&!this.isDisabled(e)&&(e.enabled||this.isSelected(e))),this.isDisabled(e)||(this.longest=Math.max(this.longest,Lqe.unstyle(e.message).length));let o=N({},e);return e.reset=(a=o.input,l=o.value)=>{for(let c of Object.keys(o))e[c]=o[c];e.input=a,e.value=l},n==null&&typeof e.initial==\"function\"&&(e.input=await e.initial.call(this,this.state,e,r)),e}async onChoice(e,r){this.emit(\"choice\",e,r,this),typeof e.onChoice==\"function\"&&await e.onChoice.call(this,this.state,e,r)}async addChoice(e,r,i){let n=await this.toChoice(e,r,i);return this.choices.push(n),this.index=this.choices.length-1,this.limit=this.choices.length,n}async newItem(e,r,i){let n=N({name:\"New choice name?\",editable:!0,newChoice:!0},e),s=await this.addChoice(n,r,i);return s.updateChoice=()=>{delete s.newChoice,s.name=s.message=s.input,s.input=\"\",s.cursor=0},this.render()}indent(e){return e.indent==null?e.level>1?\"  \".repeat(e.level-1):\"\":e.indent}dispatch(e,r){if(this.multiple&&this[r.name])return this[r.name]();this.alert()}focus(e,r){return typeof r!=\"boolean\"&&(r=e.enabled),r&&!e.enabled&&this.selected.length>=this.maxSelected?this.alert():(this.index=e.index,e.enabled=r&&!this.isDisabled(e),e)}space(){return this.multiple?(this.toggle(this.focused),this.render()):this.alert()}a(){if(this.maxSelected<this.choices.length)return this.alert();let e=this.selectable.every(r=>r.enabled);return this.choices.forEach(r=>r.enabled=!e),this.render()}i(){return this.choices.length-this.selected.length>this.maxSelected?this.alert():(this.choices.forEach(e=>e.enabled=!e.enabled),this.render())}g(e=this.focused){return this.choices.some(r=>!!r.parent)?(this.toggle(e.parent&&!e.choices?e.parent:e),this.render()):this.a()}toggle(e,r){if(!e.enabled&&this.selected.length>=this.maxSelected)return this.alert();typeof r!=\"boolean\"&&(r=!e.enabled),e.enabled=r,e.choices&&e.choices.forEach(n=>this.toggle(n,r));let i=e.parent;for(;i;){let n=i.choices.filter(s=>this.isDisabled(s));i.enabled=n.every(s=>s.enabled===!0),i=i.parent}return one(this,this.choices),this.emit(\"toggle\",e,this),e}enable(e){return this.selected.length>=this.maxSelected?this.alert():(e.enabled=!this.isDisabled(e),e.choices&&e.choices.forEach(this.enable.bind(this)),e)}disable(e){return e.enabled=!1,e.choices&&e.choices.forEach(this.disable.bind(this)),e}number(e){this.num+=e;let r=i=>{let n=Number(i);if(n>this.choices.length-1)return this.alert();let s=this.focused,o=this.choices.find(a=>n===a.index);if(!o.enabled&&this.selected.length>=this.maxSelected)return this.alert();if(this.visible.indexOf(o)===-1){let a=CN(this.choices),l=a.indexOf(o);if(s.index>l){let c=a.slice(l,l+this.limit),u=a.filter(g=>!c.includes(g));this.choices=c.concat(u)}else{let c=l-this.limit+1;this.choices=a.slice(c).concat(a.slice(0,c))}}return this.index=this.choices.indexOf(o),this.toggle(this.focused),this.render()};return clearTimeout(this.numberTimeout),new Promise(i=>{let n=this.choices.length,s=this.num,o=(a=!1,l)=>{clearTimeout(this.numberTimeout),a&&(l=r(s)),this.num=\"\",i(l)};if(s===\"0\"||s.length===1&&Number(s+\"0\")>n)return o(!0);if(Number(s)>n)return o(!1,this.alert());this.numberTimeout=setTimeout(()=>o(!0),this.delay)})}home(){return this.choices=CN(this.choices),this.index=0,this.render()}end(){let e=this.choices.length-this.limit,r=CN(this.choices);return this.choices=r.slice(e).concat(r.slice(0,e)),this.index=this.limit-1,this.render()}first(){return this.index=0,this.render()}last(){return this.index=this.visible.length-1,this.render()}prev(){return this.visible.length<=1?this.alert():this.up()}next(){return this.visible.length<=1?this.alert():this.down()}right(){return this.cursor>=this.input.length?this.alert():(this.cursor++,this.render())}left(){return this.cursor<=0?this.alert():(this.cursor--,this.render())}up(){let e=this.choices.length,r=this.visible.length,i=this.index;return this.options.scroll===!1&&i===0?this.alert():e>r&&i===0?this.scrollUp():(this.index=(i-1%e+e)%e,this.isDisabled()?this.up():this.render())}down(){let e=this.choices.length,r=this.visible.length,i=this.index;return this.options.scroll===!1&&i===r-1?this.alert():e>r&&i===r-1?this.scrollDown():(this.index=(i+1)%e,this.isDisabled()?this.down():this.render())}scrollUp(e=0){return this.choices=Mqe(this.choices),this.index=e,this.isDisabled()?this.up():this.render()}scrollDown(e=this.visible.length-1){return this.choices=Uqe(this.choices),this.index=e,this.isDisabled()?this.down():this.render()}async shiftUp(){if(this.options.sort===!0){this.sorting=!0,this.swap(this.index-1),await this.up(),this.sorting=!1;return}return this.scrollUp(this.index)}async shiftDown(){if(this.options.sort===!0){this.sorting=!0,this.swap(this.index+1),await this.down(),this.sorting=!1;return}return this.scrollDown(this.index)}pageUp(){return this.visible.length<=1?this.alert():(this.limit=Math.max(this.limit-1,0),this.index=Math.min(this.limit-1,this.index),this._limit=this.limit,this.isDisabled()?this.up():this.render())}pageDown(){return this.visible.length>=this.choices.length?this.alert():(this.index=Math.max(0,this.index),this.limit=Math.min(this.limit+1,this.choices.length),this._limit=this.limit,this.isDisabled()?this.down():this.render())}swap(e){Kqe(this.choices,this.index,e)}isDisabled(e=this.focused){return e&&[\"disabled\",\"collapsed\",\"hidden\",\"completing\",\"readonly\"].some(i=>e[i]===!0)?!0:e&&e.role===\"heading\"}isEnabled(e=this.focused){if(Array.isArray(e))return e.every(r=>this.isEnabled(r));if(e.choices){let r=e.choices.filter(i=>!this.isDisabled(i));return e.enabled&&r.every(i=>this.isEnabled(i))}return e.enabled&&!this.isDisabled(e)}isChoice(e,r){return e.name===r||e.index===Number(r)}isSelected(e){return Array.isArray(this.initial)?this.initial.some(r=>this.isChoice(e,r)):this.isChoice(e,this.initial)}map(e=[],r=\"value\"){return[].concat(e||[]).reduce((i,n)=>(i[n]=this.find(n,r),i),{})}filter(e,r){let i=(a,l)=>[a.name,l].includes(e),n=typeof e==\"function\"?e:i,o=(this.options.multiple?this.state._choices:this.choices).filter(n);return r?o.map(a=>a[r]):o}find(e,r){if(nne(e))return r?e[r]:e;let i=(o,a)=>[o.name,a].includes(e),n=typeof e==\"function\"?e:i,s=this.choices.find(n);if(s)return r?s[r]:s}findIndex(e){return this.choices.indexOf(this.find(e))}async submit(){let e=this.focused;if(!e)return this.alert();if(e.newChoice)return e.input?(e.updateChoice(),this.render()):this.alert();if(this.choices.some(o=>o.newChoice))return this.alert();let{reorder:r,sort:i}=this.options,n=this.multiple===!0,s=this.selected;return s===void 0?this.alert():(Array.isArray(s)&&r!==!1&&i!==!0&&(s=V0.reorder(s)),this.value=n?s.map(o=>o.name):s.name,super.submit())}set choices(e=[]){this.state._choices=this.state._choices||[],this.state.choices=e;for(let r of e)this.state._choices.some(i=>i.name===r.name)||this.state._choices.push(r);if(!this._initial&&this.options.initial){this._initial=!0;let r=this.initial;if(typeof r==\"string\"||typeof r==\"number\"){let i=this.find(r);i&&(this.initial=i.index,this.focus(i,!0))}}}get choices(){return one(this,this.state.choices||[])}set visible(e){this.state.visible=e}get visible(){return(this.state.visible||this.choices).slice(0,this.limit)}set limit(e){this.state.limit=e}get limit(){let{state:e,options:r,choices:i}=this,n=e.limit||this._limit||r.limit||i.length;return Math.min(n,this.height)}set value(e){super.value=e}get value(){return typeof super.value!=\"string\"&&super.value===this.initial?this.input:super.value}set index(e){this.state.index=e}get index(){return Math.max(0,this.state?this.state.index:0)}get enabled(){return this.filter(this.isEnabled.bind(this))}get focused(){let e=this.choices[this.index];return e&&this.state.submitted&&this.multiple!==!0&&(e.enabled=!0),e}get selectable(){return this.choices.filter(e=>!this.isDisabled(e))}get selected(){return this.multiple?this.enabled:this.focused}};function one(t,e){if(e instanceof Promise)return e;if(typeof e==\"function\"){if(V0.isAsyncFn(e))return e;e=e.call(t,t)}for(let r of e){if(Array.isArray(r.choices)){let i=r.choices.filter(n=>!t.isDisabled(n));r.enabled=i.every(n=>n.enabled===!0)}t.isDisabled(r)===!0&&delete r.enabled}return e}ine.exports=sne});var kl=w((Mpt,ane)=>{\"use strict\";var Hqe=XC(),mN=Xi(),Ane=class extends Hqe{constructor(e){super(e);this.emptyError=this.options.emptyError||\"No items were selected\"}async dispatch(e,r){if(this.multiple)return this[r.name]?await this[r.name](e,r):await super.dispatch(e,r);this.alert()}separator(){if(this.options.separator)return super.separator();let e=this.styles.muted(this.symbols.ellipsis);return this.state.submitted?super.separator():e}pointer(e,r){return!this.multiple||this.options.pointer?super.pointer(e,r):\"\"}indicator(e,r){return this.multiple?super.indicator(e,r):\"\"}choiceMessage(e,r){let i=this.resolve(e.message,this.state,e,r);return e.role===\"heading\"&&!mN.hasColor(i)&&(i=this.styles.strong(i)),this.resolve(i,this.state,e,r)}choiceSeparator(){return\":\"}async renderChoice(e,r){await this.onChoice(e,r);let i=this.index===r,n=await this.pointer(e,r),s=await this.indicator(e,r)+(e.pad||\"\"),o=await this.resolve(e.hint,this.state,e,r);o&&!mN.hasColor(o)&&(o=this.styles.muted(o));let a=this.indent(e),l=await this.choiceMessage(e,r),c=()=>[this.margin[3],a+n+s,l,this.margin[1],o].filter(Boolean).join(\" \");return e.role===\"heading\"?c():e.disabled?(mN.hasColor(l)||(l=this.styles.disabled(l)),c()):(i&&(l=this.styles.em(l)),c())}async renderChoices(){if(this.state.loading===\"choices\")return this.styles.warning(\"Loading choices\");if(this.state.submitted)return\"\";let e=this.visible.map(async(s,o)=>await this.renderChoice(s,o)),r=await Promise.all(e);r.length||r.push(this.styles.danger(\"No matching choices\"));let i=this.margin[0]+r.join(`\n`),n;return this.options.choicesHeader&&(n=await this.resolve(this.options.choicesHeader,this.state)),[n,i].filter(Boolean).join(`\n`)}format(){return!this.state.submitted||this.state.cancelled?\"\":Array.isArray(this.selected)?this.selected.map(e=>this.styles.primary(e.name)).join(\", \"):this.styles.primary(this.selected.name)}async render(){let{submitted:e,size:r}=this.state,i=\"\",n=await this.header(),s=await this.prefix(),o=await this.separator(),a=await this.message();this.options.promptLine!==!1&&(i=[s,a,o,\"\"].join(\" \"),this.state.prompt=i);let l=await this.format(),c=await this.error()||await this.hint(),u=await this.renderChoices(),g=await this.footer();l&&(i+=l),c&&!i.includes(c)&&(i+=\" \"+c),e&&!l&&!u.trim()&&this.multiple&&this.emptyError!=null&&(i+=this.styles.danger(this.emptyError)),this.clear(r),this.write([n,i,u,g].filter(Boolean).join(`\n`)),this.write(this.margin[2]),this.restore()}};ane.exports=Ane});var une=w((Upt,lne)=>{\"use strict\";var jqe=kl(),Gqe=(t,e)=>{let r=t.toLowerCase();return i=>{let s=i.toLowerCase().indexOf(r),o=e(i.slice(s,s+r.length));return s>=0?i.slice(0,s)+o+i.slice(s+r.length):i}},cne=class extends jqe{constructor(e){super(e);this.cursorShow()}moveCursor(e){this.state.cursor+=e}dispatch(e){return this.append(e)}space(e){return this.options.multiple?super.space(e):this.append(e)}append(e){let{cursor:r,input:i}=this.state;return this.input=i.slice(0,r)+e+i.slice(r),this.moveCursor(1),this.complete()}delete(){let{cursor:e,input:r}=this.state;return r?(this.input=r.slice(0,e-1)+r.slice(e),this.moveCursor(-1),this.complete()):this.alert()}deleteForward(){let{cursor:e,input:r}=this.state;return r[e]===void 0?this.alert():(this.input=`${r}`.slice(0,e)+`${r}`.slice(e+1),this.complete())}number(e){return this.append(e)}async complete(){this.completing=!0,this.choices=await this.suggest(this.input,this.state._choices),this.state.limit=void 0,this.index=Math.min(Math.max(this.visible.length-1,0),this.index),await this.render(),this.completing=!1}suggest(e=this.input,r=this.state._choices){if(typeof this.options.suggest==\"function\")return this.options.suggest.call(this,e,r);let i=e.toLowerCase();return r.filter(n=>n.message.toLowerCase().includes(i))}pointer(){return\"\"}format(){if(!this.focused)return this.input;if(this.options.multiple&&this.state.submitted)return this.selected.map(e=>this.styles.primary(e.message)).join(\", \");if(this.state.submitted){let e=this.value=this.input=this.focused.value;return this.styles.primary(e)}return this.input}async render(){if(this.state.status!==\"pending\")return super.render();let e=this.options.highlight?this.options.highlight.bind(this):this.styles.placeholder,r=Gqe(this.input,e),i=this.choices;this.choices=i.map(n=>te(N({},n),{message:r(n.message)})),await super.render(),this.choices=i}submit(){return this.options.multiple&&(this.value=this.selected.map(e=>e.name)),super.submit()}};lne.exports=cne});var IN=w((Kpt,gne)=>{\"use strict\";var EN=Xi();gne.exports=(t,e={})=>{t.cursorHide();let{input:r=\"\",initial:i=\"\",pos:n,showCursor:s=!0,color:o}=e,a=o||t.styles.placeholder,l=EN.inverse(t.styles.primary),c=m=>l(t.styles.black(m)),u=r,g=\" \",f=c(g);if(t.blink&&t.blink.off===!0&&(c=m=>m,f=\"\"),s&&n===0&&i===\"\"&&r===\"\")return c(g);if(s&&n===0&&(r===i||r===\"\"))return c(i[0])+a(i.slice(1));i=EN.isPrimitive(i)?`${i}`:\"\",r=EN.isPrimitive(r)?`${r}`:\"\";let h=i&&i.startsWith(r)&&i!==r,p=h?c(i[r.length]):f;if(n!==r.length&&s===!0&&(u=r.slice(0,n)+c(r[n])+r.slice(n+1),p=\"\"),s===!1&&(p=\"\"),h){let m=t.styles.unstyle(u+p);return u+p+a(i.slice(m.length))}return u+p}});var X0=w((Hpt,fne)=>{\"use strict\";var Yqe=Co(),qqe=kl(),Jqe=IN(),hne=class extends qqe{constructor(e){super(te(N({},e),{multiple:!0}));this.type=\"form\",this.initial=this.options.initial,this.align=[this.options.align,\"right\"].find(r=>r!=null),this.emptyError=\"\",this.values={}}async reset(e){return await super.reset(),e===!0&&(this._index=this.index),this.index=this._index,this.values={},this.choices.forEach(r=>r.reset&&r.reset()),this.render()}dispatch(e){return!!e&&this.append(e)}append(e){let r=this.focused;if(!r)return this.alert();let{cursor:i,input:n}=r;return r.value=r.input=n.slice(0,i)+e+n.slice(i),r.cursor++,this.render()}delete(){let e=this.focused;if(!e||e.cursor<=0)return this.alert();let{cursor:r,input:i}=e;return e.value=e.input=i.slice(0,r-1)+i.slice(r),e.cursor--,this.render()}deleteForward(){let e=this.focused;if(!e)return this.alert();let{cursor:r,input:i}=e;if(i[r]===void 0)return this.alert();let n=`${i}`.slice(0,r)+`${i}`.slice(r+1);return e.value=e.input=n,this.render()}right(){let e=this.focused;return e?e.cursor>=e.input.length?this.alert():(e.cursor++,this.render()):this.alert()}left(){let e=this.focused;return e?e.cursor<=0?this.alert():(e.cursor--,this.render()):this.alert()}space(e,r){return this.dispatch(e,r)}number(e,r){return this.dispatch(e,r)}next(){let e=this.focused;if(!e)return this.alert();let{initial:r,input:i}=e;return r&&r.startsWith(i)&&i!==r?(e.value=e.input=r,e.cursor=e.value.length,this.render()):super.next()}prev(){let e=this.focused;return e?e.cursor===0?super.prev():(e.value=e.input=\"\",e.cursor=0,this.render()):this.alert()}separator(){return\"\"}format(e){return this.state.submitted?\"\":super.format(e)}pointer(){return\"\"}indicator(e){return e.input?\"\\u29BF\":\"\\u2299\"}async choiceSeparator(e,r){let i=await this.resolve(e.separator,this.state,e,r)||\":\";return i?\" \"+this.styles.disabled(i):\"\"}async renderChoice(e,r){await this.onChoice(e,r);let{state:i,styles:n}=this,{cursor:s,initial:o=\"\",name:a,hint:l,input:c=\"\"}=e,{muted:u,submitted:g,primary:f,danger:h}=n,p=l,m=this.index===r,y=e.validate||(()=>!0),Q=await this.choiceSeparator(e,r),S=e.message;this.align===\"right\"&&(S=S.padStart(this.longest+1,\" \")),this.align===\"left\"&&(S=S.padEnd(this.longest+1,\" \"));let x=this.values[a]=c||o,M=c?\"success\":\"dark\";await y.call(e,x,this.state)!==!0&&(M=\"danger\");let U=n[M](await this.indicator(e,r))+(e.pad||\"\"),J=this.indent(e),W=()=>[J,U,S+Q,c,p].filter(Boolean).join(\" \");if(i.submitted)return S=Yqe.unstyle(S),c=g(c),p=\"\",W();if(e.format)c=await e.format.call(this,c,e,r);else{let ee=this.styles.muted;c=Jqe(this,{input:c,initial:o,pos:s,showCursor:m,color:ee})}return this.isValue(c)||(c=this.styles.muted(this.symbols.ellipsis)),e.result&&(this.values[a]=await e.result.call(this,x,e,r)),m&&(S=f(S)),e.error?c+=(c?\" \":\"\")+h(e.error.trim()):e.hint&&(c+=(c?\" \":\"\")+u(e.hint.trim())),W()}async submit(){return this.value=this.values,super.base.submit.call(this)}};fne.exports=hne});var yN=w((jpt,pne)=>{\"use strict\";var Wqe=X0(),zqe=()=>{throw new Error(\"expected prompt to have a custom authenticate method\")},dne=(t=zqe)=>{class e extends Wqe{constructor(i){super(i)}async submit(){this.value=await t.call(this,this.values,this.state),super.base.submit.call(this)}static create(i){return dne(i)}}return e};pne.exports=dne()});var Ene=w((Gpt,Cne)=>{\"use strict\";var _qe=yN();function Vqe(t,e){return t.username===this.options.username&&t.password===this.options.password}var mne=(t=Vqe)=>{let e=[{name:\"username\",message:\"username\"},{name:\"password\",message:\"password\",format(i){return this.options.showPassword?i:(this.state.submitted?this.styles.primary:this.styles.muted)(this.symbols.asterisk.repeat(i.length))}}];class r extends _qe.create(t){constructor(n){super(te(N({},n),{choices:e}))}static create(n){return mne(n)}}return r};Cne.exports=mne()});var Z0=w((Ypt,Ine)=>{\"use strict\";var Xqe=zf(),{isPrimitive:Zqe,hasColor:$qe}=Xi(),yne=class extends Xqe{constructor(e){super(e);this.cursorHide()}async initialize(){let e=await this.resolve(this.initial,this.state);this.input=await this.cast(e),await super.initialize()}dispatch(e){return this.isValue(e)?(this.input=e,this.submit()):this.alert()}format(e){let{styles:r,state:i}=this;return i.submitted?r.success(e):r.primary(e)}cast(e){return this.isTrue(e)}isTrue(e){return/^[ty1]/i.test(e)}isFalse(e){return/^[fn0]/i.test(e)}isValue(e){return Zqe(e)&&(this.isTrue(e)||this.isFalse(e))}async hint(){if(this.state.status===\"pending\"){let e=await this.element(\"hint\");return $qe(e)?e:this.styles.muted(e)}}async render(){let{input:e,size:r}=this.state,i=await this.prefix(),n=await this.separator(),s=await this.message(),o=this.styles.muted(this.default),a=[i,s,o,n].filter(Boolean).join(\" \");this.state.prompt=a;let l=await this.header(),c=this.value=this.cast(e),u=await this.format(c),g=await this.error()||await this.hint(),f=await this.footer();g&&!a.includes(g)&&(u+=\" \"+g),a+=\" \"+u,this.clear(r),this.write([l,a,f].filter(Boolean).join(`\n`)),this.restore()}set value(e){super.value=e}get value(){return this.cast(super.value)}};Ine.exports=yne});var bne=w((qpt,wne)=>{\"use strict\";var eJe=Z0(),Bne=class extends eJe{constructor(e){super(e);this.default=this.options.default||(this.initial?\"(Y/n)\":\"(y/N)\")}};wne.exports=Bne});var Sne=w((Jpt,Qne)=>{\"use strict\";var tJe=kl(),rJe=X0(),_f=rJe.prototype,vne=class extends tJe{constructor(e){super(te(N({},e),{multiple:!0}));this.align=[this.options.align,\"left\"].find(r=>r!=null),this.emptyError=\"\",this.values={}}dispatch(e,r){let i=this.focused,n=i.parent||{};return!i.editable&&!n.editable&&(e===\"a\"||e===\"i\")?super[e]():_f.dispatch.call(this,e,r)}append(e,r){return _f.append.call(this,e,r)}delete(e,r){return _f.delete.call(this,e,r)}space(e){return this.focused.editable?this.append(e):super.space()}number(e){return this.focused.editable?this.append(e):super.number(e)}next(){return this.focused.editable?_f.next.call(this):super.next()}prev(){return this.focused.editable?_f.prev.call(this):super.prev()}async indicator(e,r){let i=e.indicator||\"\",n=e.editable?i:super.indicator(e,r);return await this.resolve(n,this.state,e,r)||\"\"}indent(e){return e.role===\"heading\"?\"\":e.editable?\" \":\"  \"}async renderChoice(e,r){return e.indent=\"\",e.editable?_f.renderChoice.call(this,e,r):super.renderChoice(e,r)}error(){return\"\"}footer(){return this.state.error}async validate(){let e=!0;for(let r of this.choices){if(typeof r.validate!=\"function\"||r.role===\"heading\")continue;let i=r.parent?this.value[r.parent.name]:this.value;if(r.editable?i=r.value===r.name?r.initial||\"\":r.value:this.isDisabled(r)||(i=r.enabled===!0),e=await r.validate(i,this.state),e!==!0)break}return e!==!0&&(this.state.error=typeof e==\"string\"?e:\"Invalid Input\"),e}submit(){if(this.focused.newChoice===!0)return super.submit();if(this.choices.some(e=>e.newChoice))return this.alert();this.value={};for(let e of this.choices){let r=e.parent?this.value[e.parent.name]:this.value;if(e.role===\"heading\"){this.value[e.name]={};continue}e.editable?r[e.name]=e.value===e.name?e.initial||\"\":e.value:this.isDisabled(e)||(r[e.name]=e.enabled===!0)}return this.base.submit.call(this)}};Qne.exports=vne});var Eu=w((Wpt,kne)=>{\"use strict\";var iJe=zf(),nJe=IN(),{isPrimitive:sJe}=Xi(),xne=class extends iJe{constructor(e){super(e);this.initial=sJe(this.initial)?String(this.initial):\"\",this.initial&&this.cursorHide(),this.state.prevCursor=0,this.state.clipboard=[]}async keypress(e,r={}){let i=this.state.prevKeypress;return this.state.prevKeypress=r,this.options.multiline===!0&&r.name===\"return\"&&(!i||i.name!==\"return\")?this.append(`\n`,r):super.keypress(e,r)}moveCursor(e){this.cursor+=e}reset(){return this.input=this.value=\"\",this.cursor=0,this.render()}dispatch(e,r){if(!e||r.ctrl||r.code)return this.alert();this.append(e)}append(e){let{cursor:r,input:i}=this.state;this.input=`${i}`.slice(0,r)+e+`${i}`.slice(r),this.moveCursor(String(e).length),this.render()}insert(e){this.append(e)}delete(){let{cursor:e,input:r}=this.state;if(e<=0)return this.alert();this.input=`${r}`.slice(0,e-1)+`${r}`.slice(e),this.moveCursor(-1),this.render()}deleteForward(){let{cursor:e,input:r}=this.state;if(r[e]===void 0)return this.alert();this.input=`${r}`.slice(0,e)+`${r}`.slice(e+1),this.render()}cutForward(){let e=this.cursor;if(this.input.length<=e)return this.alert();this.state.clipboard.push(this.input.slice(e)),this.input=this.input.slice(0,e),this.render()}cutLeft(){let e=this.cursor;if(e===0)return this.alert();let r=this.input.slice(0,e),i=this.input.slice(e),n=r.split(\" \");this.state.clipboard.push(n.pop()),this.input=n.join(\" \"),this.cursor=this.input.length,this.input+=i,this.render()}paste(){if(!this.state.clipboard.length)return this.alert();this.insert(this.state.clipboard.pop()),this.render()}toggleCursor(){this.state.prevCursor?(this.cursor=this.state.prevCursor,this.state.prevCursor=0):(this.state.prevCursor=this.cursor,this.cursor=0),this.render()}first(){this.cursor=0,this.render()}last(){this.cursor=this.input.length-1,this.render()}next(){let e=this.initial!=null?String(this.initial):\"\";if(!e||!e.startsWith(this.input))return this.alert();this.input=this.initial,this.cursor=this.initial.length,this.render()}prev(){if(!this.input)return this.alert();this.reset()}backward(){return this.left()}forward(){return this.right()}right(){return this.cursor>=this.input.length?this.alert():(this.moveCursor(1),this.render())}left(){return this.cursor<=0?this.alert():(this.moveCursor(-1),this.render())}isValue(e){return!!e}async format(e=this.value){let r=await this.resolve(this.initial,this.state);return this.state.submitted?this.styles.submitted(e||r):nJe(this,{input:e,initial:r,pos:this.cursor})}async render(){let e=this.state.size,r=await this.prefix(),i=await this.separator(),n=await this.message(),s=[r,n,i].filter(Boolean).join(\" \");this.state.prompt=s;let o=await this.header(),a=await this.format(),l=await this.error()||await this.hint(),c=await this.footer();l&&!a.includes(l)&&(a+=\" \"+l),s+=\" \"+a,this.clear(e),this.write([o,s,c].filter(Boolean).join(`\n`)),this.restore()}};kne.exports=xne});var Dne=w((zpt,Pne)=>{\"use strict\";var oJe=t=>t.filter((e,r)=>t.lastIndexOf(e)===r),$0=t=>oJe(t).filter(Boolean);Pne.exports=(t,e={},r=\"\")=>{let{past:i=[],present:n=\"\"}=e,s,o;switch(t){case\"prev\":case\"undo\":return s=i.slice(0,i.length-1),o=i[i.length-1]||\"\",{past:$0([r,...s]),present:o};case\"next\":case\"redo\":return s=i.slice(1),o=i[0]||\"\",{past:$0([...s,r]),present:o};case\"save\":return{past:$0([...i,r]),present:\"\"};case\"remove\":return o=$0(i.filter(a=>a!==r)),n=\"\",o.length&&(n=o.pop()),{past:o,present:n};default:throw new Error(`Invalid action: \"${t}\"`)}}});var wN=w((_pt,Rne)=>{\"use strict\";var aJe=Eu(),Fne=Dne(),Nne=class extends aJe{constructor(e){super(e);let r=this.options.history;if(r&&r.store){let i=r.values||this.initial;this.autosave=!!r.autosave,this.store=r.store,this.data=this.store.get(\"values\")||{past:[],present:i},this.initial=this.data.present||this.data.past[this.data.past.length-1]}}completion(e){return this.store?(this.data=Fne(e,this.data,this.input),this.data.present?(this.input=this.data.present,this.cursor=this.input.length,this.render()):this.alert()):this.alert()}altUp(){return this.completion(\"prev\")}altDown(){return this.completion(\"next\")}prev(){return this.save(),super.prev()}save(){!this.store||(this.data=Fne(\"save\",this.data,this.input),this.store.set(\"values\",this.data))}submit(){return this.store&&this.autosave===!0&&this.save(),super.submit()}};Rne.exports=Nne});var One=w((Vpt,Lne)=>{\"use strict\";var AJe=Eu(),Tne=class extends AJe{format(){return\"\"}};Lne.exports=Tne});var Kne=w((Xpt,Mne)=>{\"use strict\";var lJe=Eu(),Une=class extends lJe{constructor(e={}){super(e);this.sep=this.options.separator||/, */,this.initial=e.initial||\"\"}split(e=this.value){return e?String(e).split(this.sep):[]}format(){let e=this.state.submitted?this.styles.primary:r=>r;return this.list.map(e).join(\", \")}async submit(e){let r=this.state.error||await this.validate(this.list,this.state);return r!==!0?(this.state.error=r,super.submit()):(this.value=this.list,super.submit())}get list(){return this.split()}};Mne.exports=Une});var Gne=w((Zpt,Hne)=>{\"use strict\";var cJe=kl(),jne=class extends cJe{constructor(e){super(te(N({},e),{multiple:!0}))}};Hne.exports=jne});var BN=w(($pt,Yne)=>{\"use strict\";var uJe=Eu(),qne=class extends uJe{constructor(e={}){super(N({style:\"number\"},e));this.min=this.isValue(e.min)?this.toNumber(e.min):-Infinity,this.max=this.isValue(e.max)?this.toNumber(e.max):Infinity,this.delay=e.delay!=null?e.delay:1e3,this.float=e.float!==!1,this.round=e.round===!0||e.float===!1,this.major=e.major||10,this.minor=e.minor||1,this.initial=e.initial!=null?e.initial:\"\",this.input=String(this.initial),this.cursor=this.input.length,this.cursorShow()}append(e){return!/[-+.]/.test(e)||e===\".\"&&this.input.includes(\".\")?this.alert(\"invalid number\"):super.append(e)}number(e){return super.append(e)}next(){return this.input&&this.input!==this.initial?this.alert():this.isValue(this.initial)?(this.input=this.initial,this.cursor=String(this.initial).length,this.render()):this.alert()}up(e){let r=e||this.minor,i=this.toNumber(this.input);return i>this.max+r?this.alert():(this.input=`${i+r}`,this.render())}down(e){let r=e||this.minor,i=this.toNumber(this.input);return i<this.min-r?this.alert():(this.input=`${i-r}`,this.render())}shiftDown(){return this.down(this.major)}shiftUp(){return this.up(this.major)}format(e=this.input){return typeof this.options.format==\"function\"?this.options.format.call(this,e):this.styles.info(e)}toNumber(e=\"\"){return this.float?+e:Math.round(+e)}isValue(e){return/^[-+]?[0-9]+((\\.)|(\\.[0-9]+))?$/.test(e)}submit(){let e=[this.input,this.initial].find(r=>this.isValue(r));return this.value=this.toNumber(e||0),super.submit()}};Yne.exports=qne});var Wne=w((edt,Jne)=>{Jne.exports=BN()});var Vne=w((tdt,zne)=>{\"use strict\";var gJe=Eu(),_ne=class extends gJe{constructor(e){super(e);this.cursorShow()}format(e=this.input){return this.keypressed?(this.state.submitted?this.styles.primary:this.styles.muted)(this.symbols.asterisk.repeat(e.length)):\"\"}};zne.exports=_ne});var ese=w((rdt,Xne)=>{\"use strict\";var fJe=Co(),hJe=XC(),Zne=Xi(),$ne=class extends hJe{constructor(e={}){super(e);this.widths=[].concat(e.messageWidth||50),this.align=[].concat(e.align||\"left\"),this.linebreak=e.linebreak||!1,this.edgeLength=e.edgeLength||3,this.newline=e.newline||`\n   `;let r=e.startNumber||1;typeof this.scale==\"number\"&&(this.scaleKey=!1,this.scale=Array(this.scale).fill(0).map((i,n)=>({name:n+r})))}async reset(){return this.tableized=!1,await super.reset(),this.render()}tableize(){if(this.tableized===!0)return;this.tableized=!0;let e=0;for(let r of this.choices){e=Math.max(e,r.message.length),r.scaleIndex=r.initial||2,r.scale=[];for(let i=0;i<this.scale.length;i++)r.scale.push({index:i})}this.widths[0]=Math.min(this.widths[0],e+3)}async dispatch(e,r){if(this.multiple)return this[r.name]?await this[r.name](e,r):await super.dispatch(e,r);this.alert()}heading(e,r,i){return this.styles.strong(e)}separator(){return this.styles.muted(this.symbols.ellipsis)}right(){let e=this.focused;return e.scaleIndex>=this.scale.length-1?this.alert():(e.scaleIndex++,this.render())}left(){let e=this.focused;return e.scaleIndex<=0?this.alert():(e.scaleIndex--,this.render())}indent(){return\"\"}format(){return this.state.submitted?this.choices.map(r=>this.styles.info(r.index)).join(\", \"):\"\"}pointer(){return\"\"}renderScaleKey(){if(this.scaleKey===!1||this.state.submitted)return\"\";let e=this.scale.map(i=>`   ${i.name} - ${i.message}`);return[\"\",...e].map(i=>this.styles.muted(i)).join(`\n`)}renderScaleHeading(e){let r=this.scale.map(l=>l.name);typeof this.options.renderScaleHeading==\"function\"&&(r=this.options.renderScaleHeading.call(this,e));let i=this.scaleLength-r.join(\"\").length,n=Math.round(i/(r.length-1)),o=r.map(l=>this.styles.strong(l)).join(\" \".repeat(n)),a=\" \".repeat(this.widths[0]);return this.margin[3]+a+this.margin[1]+o}scaleIndicator(e,r,i){if(typeof this.options.scaleIndicator==\"function\")return this.options.scaleIndicator.call(this,e,r,i);let n=e.scaleIndex===r.index;return r.disabled?this.styles.hint(this.symbols.radio.disabled):n?this.styles.success(this.symbols.radio.on):this.symbols.radio.off}renderScale(e,r){let i=e.scale.map(s=>this.scaleIndicator(e,s,r)),n=this.term===\"Hyper\"?\"\":\" \";return i.join(n+this.symbols.line.repeat(this.edgeLength))}async renderChoice(e,r){await this.onChoice(e,r);let i=this.index===r,n=await this.pointer(e,r),s=await e.hint;s&&!Zne.hasColor(s)&&(s=this.styles.muted(s));let o=p=>this.margin[3]+p.replace(/\\s+$/,\"\").padEnd(this.widths[0],\" \"),a=this.newline,l=this.indent(e),c=await this.resolve(e.message,this.state,e,r),u=await this.renderScale(e,r),g=this.margin[1]+this.margin[3];this.scaleLength=fJe.unstyle(u).length,this.widths[0]=Math.min(this.widths[0],this.width-this.scaleLength-g.length);let h=Zne.wordWrap(c,{width:this.widths[0],newline:a}).split(`\n`).map(p=>o(p)+this.margin[1]);return i&&(u=this.styles.info(u),h=h.map(p=>this.styles.info(p))),h[0]+=u,this.linebreak&&h.push(\"\"),[l+n,h.join(`\n`)].filter(Boolean)}async renderChoices(){if(this.state.submitted)return\"\";this.tableize();let e=this.visible.map(async(n,s)=>await this.renderChoice(n,s)),r=await Promise.all(e),i=await this.renderScaleHeading();return this.margin[0]+[i,...r.map(n=>n.join(\" \"))].join(`\n`)}async render(){let{submitted:e,size:r}=this.state,i=await this.prefix(),n=await this.separator(),s=await this.message(),o=\"\";this.options.promptLine!==!1&&(o=[i,s,n,\"\"].join(\" \"),this.state.prompt=o);let a=await this.header(),l=await this.format(),c=await this.renderScaleKey(),u=await this.error()||await this.hint(),g=await this.renderChoices(),f=await this.footer(),h=this.emptyError;l&&(o+=l),u&&!o.includes(u)&&(o+=\" \"+u),e&&!l&&!g.trim()&&this.multiple&&h!=null&&(o+=this.styles.danger(h)),this.clear(r),this.write([a,o,c,g,f].filter(Boolean).join(`\n`)),this.state.submitted||this.write(this.margin[2]),this.restore()}submit(){this.value={};for(let e of this.choices)this.value[e.name]=e.scaleIndex;return this.base.submit.call(this)}};Xne.exports=$ne});var nse=w((idt,tse)=>{\"use strict\";var rse=Co(),pJe=(t=\"\")=>typeof t==\"string\"?t.replace(/^['\"]|['\"]$/g,\"\"):\"\",ise=class{constructor(e){this.name=e.key,this.field=e.field||{},this.value=pJe(e.initial||this.field.initial||\"\"),this.message=e.message||this.name,this.cursor=0,this.input=\"\",this.lines=[]}},dJe=async(t={},e={},r=i=>i)=>{let i=new Set,n=t.fields||[],s=t.template,o=[],a=[],l=[],c=1;typeof s==\"function\"&&(s=await s());let u=-1,g=()=>s[++u],f=()=>s[u+1],h=p=>{p.line=c,o.push(p)};for(h({type:\"bos\",value:\"\"});u<s.length-1;){let p=g();if(/^[^\\S\\n ]$/.test(p)){h({type:\"text\",value:p});continue}if(p===`\n`){h({type:\"newline\",value:p}),c++;continue}if(p===\"\\\\\"){p+=g(),h({type:\"text\",value:p});continue}if((p===\"$\"||p===\"#\"||p===\"{\")&&f()===\"{\"){p+=g();let Q={type:\"template\",open:p,inner:\"\",close:\"\",value:p},S;for(;S=g();){if(S===\"}\"){f()===\"}\"&&(S+=g()),Q.value+=S,Q.close=S;break}S===\":\"?(Q.initial=\"\",Q.key=Q.inner):Q.initial!==void 0&&(Q.initial+=S),Q.value+=S,Q.inner+=S}Q.template=Q.open+(Q.initial||Q.inner)+Q.close,Q.key=Q.key||Q.inner,e.hasOwnProperty(Q.key)&&(Q.initial=e[Q.key]),Q=r(Q),h(Q),l.push(Q.key),i.add(Q.key);let x=a.find(M=>M.name===Q.key);Q.field=n.find(M=>M.name===Q.key),x||(x=new ise(Q),a.push(x)),x.lines.push(Q.line-1);continue}let m=o[o.length-1];m.type===\"text\"&&m.line===c?m.value+=p:h({type:\"text\",value:p})}return h({type:\"eos\",value:\"\"}),{input:s,tabstops:o,unique:i,keys:l,items:a}};tse.exports=async t=>{let e=t.options,r=new Set(e.required===!0?[]:e.required||[]),i=N(N({},e.values),e.initial),{tabstops:n,items:s,keys:o}=await dJe(e,i),a=bN(\"result\",t,e),l=bN(\"format\",t,e),c=bN(\"validate\",t,e,!0),u=t.isValue.bind(t);return async(g={},f=!1)=>{let h=0;g.required=r,g.items=s,g.keys=o,g.output=\"\";let p=async(S,x,M,Y)=>{let U=await c(S,x,M,Y);return U===!1?\"Invalid field \"+M.name:U};for(let S of n){let x=S.value,M=S.key;if(S.type!==\"template\"){x&&(g.output+=x);continue}if(S.type===\"template\"){let Y=s.find(Z=>Z.name===M);e.required===!0&&g.required.add(Y.name);let U=[Y.input,g.values[Y.value],Y.value,x].find(u),W=(Y.field||{}).message||S.inner;if(f){let Z=await p(g.values[M],g,Y,h);if(Z&&typeof Z==\"string\"||Z===!1){g.invalid.set(M,Z);continue}g.invalid.delete(M);let A=await a(g.values[M],g,Y,h);g.output+=rse.unstyle(A);continue}Y.placeholder=!1;let ee=x;x=await l(x,g,Y,h),U!==x?(g.values[M]=U,x=t.styles.typing(U),g.missing.delete(W)):(g.values[M]=void 0,U=`<${W}>`,x=t.styles.primary(U),Y.placeholder=!0,g.required.has(M)&&g.missing.add(W)),g.missing.has(W)&&g.validating&&(x=t.styles.warning(U)),g.invalid.has(M)&&g.validating&&(x=t.styles.danger(U)),h===g.index&&(ee!==x?x=t.styles.underline(x):x=t.styles.heading(rse.unstyle(x))),h++}x&&(g.output+=x)}let m=g.output.split(`\n`).map(S=>\" \"+S),y=s.length,Q=0;for(let S of s)g.invalid.has(S.name)&&S.lines.forEach(x=>{m[x][0]===\" \"&&(m[x]=g.styles.danger(g.symbols.bullet)+m[x].slice(1))}),t.isValue(g.values[S.name])&&Q++;return g.completed=(Q/y*100).toFixed(0),g.output=m.join(`\n`),g.output}};function bN(t,e,r,i){return(n,s,o,a)=>typeof o.field[t]==\"function\"?o.field[t].call(e,n,s,o,a):[i,n].find(l=>e.isValue(l))}});var ase=w((ndt,sse)=>{\"use strict\";var CJe=Co(),mJe=nse(),EJe=zf(),ose=class extends EJe{constructor(e){super(e);this.cursorHide(),this.reset(!0)}async initialize(){this.interpolate=await mJe(this),await super.initialize()}async reset(e){this.state.keys=[],this.state.invalid=new Map,this.state.missing=new Set,this.state.completed=0,this.state.values={},e!==!0&&(await this.initialize(),await this.render())}moveCursor(e){let r=this.getItem();this.cursor+=e,r.cursor+=e}dispatch(e,r){if(!r.code&&!r.ctrl&&e!=null&&this.getItem()){this.append(e,r);return}this.alert()}append(e,r){let i=this.getItem(),n=i.input.slice(0,this.cursor),s=i.input.slice(this.cursor);this.input=i.input=`${n}${e}${s}`,this.moveCursor(1),this.render()}delete(){let e=this.getItem();if(this.cursor<=0||!e.input)return this.alert();let r=e.input.slice(this.cursor),i=e.input.slice(0,this.cursor-1);this.input=e.input=`${i}${r}`,this.moveCursor(-1),this.render()}increment(e){return e>=this.state.keys.length-1?0:e+1}decrement(e){return e<=0?this.state.keys.length-1:e-1}first(){this.state.index=0,this.render()}last(){this.state.index=this.state.keys.length-1,this.render()}right(){if(this.cursor>=this.input.length)return this.alert();this.moveCursor(1),this.render()}left(){if(this.cursor<=0)return this.alert();this.moveCursor(-1),this.render()}prev(){this.state.index=this.decrement(this.state.index),this.getItem(),this.render()}next(){this.state.index=this.increment(this.state.index),this.getItem(),this.render()}up(){this.prev()}down(){this.next()}format(e){let r=this.state.completed<100?this.styles.warning:this.styles.success;return this.state.submitted===!0&&this.state.completed!==100&&(r=this.styles.danger),r(`${this.state.completed}% completed`)}async render(){let{index:e,keys:r=[],submitted:i,size:n}=this.state,s=[this.options.newline,`\n`].find(S=>S!=null),o=await this.prefix(),a=await this.separator(),l=await this.message(),c=[o,l,a].filter(Boolean).join(\" \");this.state.prompt=c;let u=await this.header(),g=await this.error()||\"\",f=await this.hint()||\"\",h=i?\"\":await this.interpolate(this.state),p=this.state.key=r[e]||\"\",m=await this.format(p),y=await this.footer();m&&(c+=\" \"+m),f&&!m&&this.state.completed===0&&(c+=\" \"+f),this.clear(n);let Q=[u,c,h,y,g.trim()];this.write(Q.filter(Boolean).join(s)),this.restore()}getItem(e){let{items:r,keys:i,index:n}=this.state,s=r.find(o=>o.name===i[n]);return s&&s.input!=null&&(this.input=s.input,this.cursor=s.cursor),s}async submit(){typeof this.interpolate!=\"function\"&&await this.initialize(),await this.interpolate(this.state,!0);let{invalid:e,missing:r,output:i,values:n}=this.state;if(e.size){let a=\"\";for(let[l,c]of e)a+=`Invalid ${l}: ${c}\n`;return this.state.error=a,super.submit()}if(r.size)return this.state.error=\"Required: \"+[...r.keys()].join(\", \"),super.submit();let o=CJe.unstyle(i).split(`\n`).map(a=>a.slice(1)).join(`\n`);return this.value={values:n,result:o},super.submit()}};sse.exports=ose});var cse=w((sdt,Ase)=>{\"use strict\";var IJe=\"(Use <shift>+<up/down> to sort)\",yJe=kl(),lse=class extends yJe{constructor(e){super(te(N({},e),{reorder:!1,sort:!0,multiple:!0}));this.state.hint=[this.options.hint,IJe].find(this.isValue.bind(this))}indicator(){return\"\"}async renderChoice(e,r){let i=await super.renderChoice(e,r),n=this.symbols.identicalTo+\" \",s=this.index===r&&this.sorting?this.styles.muted(n):\"  \";return this.options.drag===!1&&(s=\"\"),this.options.numbered===!0?s+`${r+1} - `+i:s+i}get selected(){return this.choices}submit(){return this.value=this.choices.map(e=>e.value),super.submit()}};Ase.exports=lse});var fse=w((odt,use)=>{\"use strict\";var wJe=XC(),gse=class extends wJe{constructor(e={}){super(e);if(this.emptyError=e.emptyError||\"No items were selected\",this.term=process.env.TERM_PROGRAM,!this.options.header){let r=[\"\",\"4 - Strongly Agree\",\"3 - Agree\",\"2 - Neutral\",\"1 - Disagree\",\"0 - Strongly Disagree\",\"\"];r=r.map(i=>this.styles.muted(i)),this.state.header=r.join(`\n   `)}}async toChoices(...e){if(this.createdScales)return!1;this.createdScales=!0;let r=await super.toChoices(...e);for(let i of r)i.scale=BJe(5,this.options),i.scaleIdx=2;return r}dispatch(){this.alert()}space(){let e=this.focused,r=e.scale[e.scaleIdx],i=r.selected;return e.scale.forEach(n=>n.selected=!1),r.selected=!i,this.render()}indicator(){return\"\"}pointer(){return\"\"}separator(){return this.styles.muted(this.symbols.ellipsis)}right(){let e=this.focused;return e.scaleIdx>=e.scale.length-1?this.alert():(e.scaleIdx++,this.render())}left(){let e=this.focused;return e.scaleIdx<=0?this.alert():(e.scaleIdx--,this.render())}indent(){return\"   \"}async renderChoice(e,r){await this.onChoice(e,r);let i=this.index===r,n=this.term===\"Hyper\",s=n?9:8,o=n?\"\":\" \",a=this.symbols.line.repeat(s),l=\" \".repeat(s+(n?0:1)),c=x=>(x?this.styles.success(\"\\u25C9\"):\"\\u25EF\")+o,u=r+1+\".\",g=i?this.styles.heading:this.styles.noop,f=await this.resolve(e.message,this.state,e,r),h=this.indent(e),p=h+e.scale.map((x,M)=>c(M===e.scaleIdx)).join(a),m=x=>x===e.scaleIdx?g(x):x,y=h+e.scale.map((x,M)=>m(M)).join(l),Q=()=>[u,f].filter(Boolean).join(\" \"),S=()=>[Q(),p,y,\" \"].filter(Boolean).join(`\n`);return i&&(p=this.styles.cyan(p),y=this.styles.cyan(y)),S()}async renderChoices(){if(this.state.submitted)return\"\";let e=this.visible.map(async(i,n)=>await this.renderChoice(i,n)),r=await Promise.all(e);return r.length||r.push(this.styles.danger(\"No matching choices\")),r.join(`\n`)}format(){return this.state.submitted?this.choices.map(r=>this.styles.info(r.scaleIdx)).join(\", \"):\"\"}async render(){let{submitted:e,size:r}=this.state,i=await this.prefix(),n=await this.separator(),s=await this.message(),o=[i,s,n].filter(Boolean).join(\" \");this.state.prompt=o;let a=await this.header(),l=await this.format(),c=await this.error()||await this.hint(),u=await this.renderChoices(),g=await this.footer();(l||!c)&&(o+=\" \"+l),c&&!o.includes(c)&&(o+=\" \"+c),e&&!l&&!u&&this.multiple&&this.type!==\"form\"&&(o+=this.styles.danger(this.emptyError)),this.clear(r),this.write([o,a,u,g].filter(Boolean).join(`\n`)),this.restore()}submit(){this.value={};for(let e of this.choices)this.value[e.name]=e.scaleIdx;return this.base.submit.call(this)}};function BJe(t,e={}){if(Array.isArray(e.scale))return e.scale.map(i=>N({},i));let r=[];for(let i=1;i<t+1;i++)r.push({i,selected:!1});return r}use.exports=gse});var pse=w((adt,hse)=>{hse.exports=wN()});var mse=w((Adt,dse)=>{\"use strict\";var bJe=Z0(),Cse=class extends bJe{async initialize(){await super.initialize(),this.value=this.initial=!!this.options.initial,this.disabled=this.options.disabled||\"no\",this.enabled=this.options.enabled||\"yes\",await this.render()}reset(){this.value=this.initial,this.render()}delete(){this.alert()}toggle(){this.value=!this.value,this.render()}enable(){if(this.value===!0)return this.alert();this.value=!0,this.render()}disable(){if(this.value===!1)return this.alert();this.value=!1,this.render()}up(){this.toggle()}down(){this.toggle()}right(){this.toggle()}left(){this.toggle()}next(){this.toggle()}prev(){this.toggle()}dispatch(e=\"\",r){switch(e.toLowerCase()){case\" \":return this.toggle();case\"1\":case\"y\":case\"t\":return this.enable();case\"0\":case\"n\":case\"f\":return this.disable();default:return this.alert()}}format(){let e=i=>this.styles.primary.underline(i);return[this.value?this.disabled:e(this.disabled),this.value?e(this.enabled):this.enabled].join(this.styles.muted(\" / \"))}async render(){let{size:e}=this.state,r=await this.header(),i=await this.prefix(),n=await this.separator(),s=await this.message(),o=await this.format(),a=await this.error()||await this.hint(),l=await this.footer(),c=[i,s,n,o].join(\" \");this.state.prompt=c,a&&!c.includes(a)&&(c+=\" \"+a),this.clear(e),this.write([r,c,l].filter(Boolean).join(`\n`)),this.write(this.margin[2]),this.restore()}};dse.exports=Cse});var yse=w((ldt,Ese)=>{\"use strict\";var QJe=kl(),Ise=class extends QJe{constructor(e){super(e);if(typeof this.options.correctChoice!=\"number\"||this.options.correctChoice<0)throw new Error(\"Please specify the index of the correct answer from the list of choices\")}async toChoices(e,r){let i=await super.toChoices(e,r);if(i.length<2)throw new Error(\"Please give at least two choices to the user\");if(this.options.correctChoice>i.length)throw new Error(\"Please specify the index of the correct answer from the list of choices\");return i}check(e){return e.index===this.options.correctChoice}async result(e){return{selectedAnswer:e,correctAnswer:this.options.choices[this.options.correctChoice].value,correct:await this.check(this.state)}}};Ese.exports=Ise});var Bse=w(QN=>{\"use strict\";var wse=Xi(),Ci=(t,e)=>{wse.defineExport(QN,t,e),wse.defineExport(QN,t.toLowerCase(),e)};Ci(\"AutoComplete\",()=>une());Ci(\"BasicAuth\",()=>Ene());Ci(\"Confirm\",()=>bne());Ci(\"Editable\",()=>Sne());Ci(\"Form\",()=>X0());Ci(\"Input\",()=>wN());Ci(\"Invisible\",()=>One());Ci(\"List\",()=>Kne());Ci(\"MultiSelect\",()=>Gne());Ci(\"Numeral\",()=>Wne());Ci(\"Password\",()=>Vne());Ci(\"Scale\",()=>ese());Ci(\"Select\",()=>kl());Ci(\"Snippet\",()=>ase());Ci(\"Sort\",()=>cse());Ci(\"Survey\",()=>fse());Ci(\"Text\",()=>pse());Ci(\"Toggle\",()=>mse());Ci(\"Quiz\",()=>yse())});var Qse=w((udt,bse)=>{bse.exports={ArrayPrompt:XC(),AuthPrompt:yN(),BooleanPrompt:Z0(),NumberPrompt:BN(),StringPrompt:Eu()}});var $C=w((gdt,vse)=>{\"use strict\";var Sse=require(\"assert\"),vN=require(\"events\"),xl=Xi(),ua=class extends vN{constructor(e,r){super();this.options=xl.merge({},e),this.answers=N({},r)}register(e,r){if(xl.isObject(e)){for(let n of Object.keys(e))this.register(n,e[n]);return this}Sse.equal(typeof r,\"function\",\"expected a function\");let i=e.toLowerCase();return r.prototype instanceof this.Prompt?this.prompts[i]=r:this.prompts[i]=r(this.Prompt,this),this}async prompt(e=[]){for(let r of[].concat(e))try{typeof r==\"function\"&&(r=await r.call(this)),await this.ask(xl.merge({},this.options,r))}catch(i){return Promise.reject(i)}return this.answers}async ask(e){typeof e==\"function\"&&(e=await e.call(this));let r=xl.merge({},this.options,e),{type:i,name:n}=e,{set:s,get:o}=xl;if(typeof i==\"function\"&&(i=await i.call(this,e,this.answers)),!i)return this.answers[n];Sse(this.prompts[i],`Prompt \"${i}\" is not registered`);let a=new this.prompts[i](r),l=o(this.answers,n);a.state.answers=this.answers,a.enquirer=this,n&&a.on(\"submit\",u=>{this.emit(\"answer\",n,u,a),s(this.answers,n,u)});let c=a.emit.bind(a);return a.emit=(...u)=>(this.emit.call(this,...u),c(...u)),this.emit(\"prompt\",a,this),r.autofill&&l!=null?(a.value=a.input=l,r.autofill===\"show\"&&await a.submit()):l=a.value=await a.run(),l}use(e){return e.call(this,this),this}set Prompt(e){this._Prompt=e}get Prompt(){return this._Prompt||this.constructor.Prompt}get prompts(){return this.constructor.prompts}static set Prompt(e){this._Prompt=e}static get Prompt(){return this._Prompt||zf()}static get prompts(){return Bse()}static get types(){return Qse()}static get prompt(){let e=(r,...i)=>{let n=new this(...i),s=n.emit.bind(n);return n.emit=(...o)=>(e.emit(...o),s(...o)),n.prompt(r)};return xl.mixinEmitter(e,new vN),e}};xl.mixinEmitter(ua,new vN);var SN=ua.prompts;for(let t of Object.keys(SN)){let e=t.toLowerCase(),r=i=>new SN[t](i).run();ua.prompt[e]=r,ua[e]=r,ua[t]||Reflect.defineProperty(ua,t,{get:()=>SN[t]})}var ZC=t=>{xl.defineExport(ua,t,()=>ua.types[t])};ZC(\"ArrayPrompt\");ZC(\"AuthPrompt\");ZC(\"BooleanPrompt\");ZC(\"NumberPrompt\");ZC(\"StringPrompt\");vse.exports=ua});var Kse=w((tCt,Use)=>{function PJe(t,e){for(var r=-1,i=t==null?0:t.length;++r<i&&e(t[r],r,t)!==!1;);return t}Use.exports=PJe});var Xf=w((rCt,Hse)=>{var DJe=c0(),RJe=Ff();function FJe(t,e,r,i){var n=!r;r||(r={});for(var s=-1,o=e.length;++s<o;){var a=e[s],l=i?i(r[a],t[a],a,r,t):void 0;l===void 0&&(l=t[a]),n?RJe(r,a,l):DJe(r,a,l)}return r}Hse.exports=FJe});var Gse=w((iCt,jse)=>{var NJe=Xf(),LJe=Mf();function TJe(t,e){return t&&NJe(e,LJe(e),t)}jse.exports=TJe});var qse=w((nCt,Yse)=>{function OJe(t){var e=[];if(t!=null)for(var r in Object(t))e.push(r);return e}Yse.exports=OJe});var Wse=w((sCt,Jse)=>{var MJe=Rn(),UJe=b0(),KJe=qse(),HJe=Object.prototype,jJe=HJe.hasOwnProperty;function GJe(t){if(!MJe(t))return KJe(t);var e=UJe(t),r=[];for(var i in t)i==\"constructor\"&&(e||!jJe.call(t,i))||r.push(i);return r}Jse.exports=GJe});var Zf=w((oCt,zse)=>{var YJe=mF(),qJe=Wse(),JJe=FC();function WJe(t){return JJe(t)?YJe(t,!0):qJe(t)}zse.exports=WJe});var Vse=w((aCt,_se)=>{var zJe=Xf(),_Je=Zf();function VJe(t,e){return t&&zJe(e,_Je(e),t)}_se.exports=VJe});var FN=w((om,$f)=>{var XJe=Rs(),Xse=typeof om==\"object\"&&om&&!om.nodeType&&om,Zse=Xse&&typeof $f==\"object\"&&$f&&!$f.nodeType&&$f,ZJe=Zse&&Zse.exports===Xse,$se=ZJe?XJe.Buffer:void 0,eoe=$se?$se.allocUnsafe:void 0;function $Je(t,e){if(e)return t.slice();var r=t.length,i=eoe?eoe(r):new t.constructor(r);return t.copy(i),i}$f.exports=$Je});var NN=w((ACt,toe)=>{function e3e(t,e){var r=-1,i=t.length;for(e||(e=Array(i));++r<i;)e[r]=t[r];return e}toe.exports=e3e});var ioe=w((lCt,roe)=>{var t3e=Xf(),r3e=v0();function i3e(t,e){return t3e(t,r3e(t),e)}roe.exports=i3e});var eb=w((cCt,noe)=>{var n3e=EF(),s3e=n3e(Object.getPrototypeOf,Object);noe.exports=s3e});var LN=w((uCt,soe)=>{var o3e=g0(),a3e=eb(),A3e=v0(),l3e=QF(),c3e=Object.getOwnPropertySymbols,u3e=c3e?function(t){for(var e=[];t;)o3e(e,A3e(t)),t=a3e(t);return e}:l3e;soe.exports=u3e});var aoe=w((gCt,ooe)=>{var g3e=Xf(),f3e=LN();function h3e(t,e){return g3e(t,f3e(t),e)}ooe.exports=h3e});var loe=w((fCt,Aoe)=>{var p3e=bF(),d3e=LN(),C3e=Zf();function m3e(t){return p3e(t,C3e,d3e)}Aoe.exports=m3e});var uoe=w((hCt,coe)=>{var E3e=Object.prototype,I3e=E3e.hasOwnProperty;function y3e(t){var e=t.length,r=new t.constructor(e);return e&&typeof t[0]==\"string\"&&I3e.call(t,\"index\")&&(r.index=t.index,r.input=t.input),r}coe.exports=y3e});var tb=w((pCt,goe)=>{var foe=wF();function w3e(t){var e=new t.constructor(t.byteLength);return new foe(e).set(new foe(t)),e}goe.exports=w3e});var poe=w((dCt,hoe)=>{var B3e=tb();function b3e(t,e){var r=e?B3e(t.buffer):t.buffer;return new t.constructor(r,t.byteOffset,t.byteLength)}hoe.exports=b3e});var Coe=w((CCt,doe)=>{var Q3e=/\\w*$/;function v3e(t){var e=new t.constructor(t.source,Q3e.exec(t));return e.lastIndex=t.lastIndex,e}doe.exports=v3e});var woe=w((mCt,moe)=>{var Eoe=Kc(),Ioe=Eoe?Eoe.prototype:void 0,yoe=Ioe?Ioe.valueOf:void 0;function S3e(t){return yoe?Object(yoe.call(t)):{}}moe.exports=S3e});var TN=w((ECt,Boe)=>{var k3e=tb();function x3e(t,e){var r=e?k3e(t.buffer):t.buffer;return new t.constructor(r,t.byteOffset,t.length)}Boe.exports=x3e});var Qoe=w((ICt,boe)=>{var P3e=tb(),D3e=poe(),R3e=Coe(),F3e=woe(),N3e=TN(),L3e=\"[object Boolean]\",T3e=\"[object Date]\",O3e=\"[object Map]\",M3e=\"[object Number]\",U3e=\"[object RegExp]\",K3e=\"[object Set]\",H3e=\"[object String]\",j3e=\"[object Symbol]\",G3e=\"[object ArrayBuffer]\",Y3e=\"[object DataView]\",q3e=\"[object Float32Array]\",J3e=\"[object Float64Array]\",W3e=\"[object Int8Array]\",z3e=\"[object Int16Array]\",_3e=\"[object Int32Array]\",V3e=\"[object Uint8Array]\",X3e=\"[object Uint8ClampedArray]\",Z3e=\"[object Uint16Array]\",$3e=\"[object Uint32Array]\";function eWe(t,e,r){var i=t.constructor;switch(e){case G3e:return P3e(t);case L3e:case T3e:return new i(+t);case Y3e:return D3e(t,r);case q3e:case J3e:case W3e:case z3e:case _3e:case V3e:case X3e:case Z3e:case $3e:return N3e(t,r);case O3e:return new i;case M3e:case H3e:return new i(t);case U3e:return R3e(t);case K3e:return new i;case j3e:return F3e(t)}}boe.exports=eWe});var koe=w((yCt,voe)=>{var tWe=Rn(),Soe=Object.create,rWe=function(){function t(){}return function(e){if(!tWe(e))return{};if(Soe)return Soe(e);t.prototype=e;var r=new t;return t.prototype=void 0,r}}();voe.exports=rWe});var ON=w((wCt,xoe)=>{var iWe=koe(),nWe=eb(),sWe=b0();function oWe(t){return typeof t.constructor==\"function\"&&!sWe(t)?iWe(nWe(t)):{}}xoe.exports=oWe});var Doe=w((BCt,Poe)=>{var aWe=LC(),AWe=Zo(),lWe=\"[object Map]\";function cWe(t){return AWe(t)&&aWe(t)==lWe}Poe.exports=cWe});var Loe=w((bCt,Roe)=>{var uWe=Doe(),gWe=y0(),Foe=w0(),Noe=Foe&&Foe.isMap,fWe=Noe?gWe(Noe):uWe;Roe.exports=fWe});var Ooe=w((QCt,Toe)=>{var hWe=LC(),pWe=Zo(),dWe=\"[object Set]\";function CWe(t){return pWe(t)&&hWe(t)==dWe}Toe.exports=CWe});var Hoe=w((vCt,Moe)=>{var mWe=Ooe(),EWe=y0(),Uoe=w0(),Koe=Uoe&&Uoe.isSet,IWe=Koe?EWe(Koe):mWe;Moe.exports=IWe});var Joe=w((SCt,joe)=>{var yWe=NC(),wWe=Kse(),BWe=c0(),bWe=Gse(),QWe=Vse(),vWe=FN(),SWe=NN(),kWe=ioe(),xWe=aoe(),PWe=vF(),DWe=loe(),RWe=LC(),FWe=uoe(),NWe=Qoe(),LWe=ON(),TWe=Os(),OWe=PC(),MWe=Loe(),UWe=Rn(),KWe=Hoe(),HWe=Mf(),jWe=Zf(),GWe=1,YWe=2,qWe=4,Goe=\"[object Arguments]\",JWe=\"[object Array]\",WWe=\"[object Boolean]\",zWe=\"[object Date]\",_We=\"[object Error]\",Yoe=\"[object Function]\",VWe=\"[object GeneratorFunction]\",XWe=\"[object Map]\",ZWe=\"[object Number]\",qoe=\"[object Object]\",$We=\"[object RegExp]\",e8e=\"[object Set]\",t8e=\"[object String]\",r8e=\"[object Symbol]\",i8e=\"[object WeakMap]\",n8e=\"[object ArrayBuffer]\",s8e=\"[object DataView]\",o8e=\"[object Float32Array]\",a8e=\"[object Float64Array]\",A8e=\"[object Int8Array]\",l8e=\"[object Int16Array]\",c8e=\"[object Int32Array]\",u8e=\"[object Uint8Array]\",g8e=\"[object Uint8ClampedArray]\",f8e=\"[object Uint16Array]\",h8e=\"[object Uint32Array]\",dr={};dr[Goe]=dr[JWe]=dr[n8e]=dr[s8e]=dr[WWe]=dr[zWe]=dr[o8e]=dr[a8e]=dr[A8e]=dr[l8e]=dr[c8e]=dr[XWe]=dr[ZWe]=dr[qoe]=dr[$We]=dr[e8e]=dr[t8e]=dr[r8e]=dr[u8e]=dr[g8e]=dr[f8e]=dr[h8e]=!0;dr[_We]=dr[Yoe]=dr[i8e]=!1;function rb(t,e,r,i,n,s){var o,a=e&GWe,l=e&YWe,c=e&qWe;if(r&&(o=n?r(t,i,n,s):r(t)),o!==void 0)return o;if(!UWe(t))return t;var u=TWe(t);if(u){if(o=FWe(t),!a)return SWe(t,o)}else{var g=RWe(t),f=g==Yoe||g==VWe;if(OWe(t))return vWe(t,a);if(g==qoe||g==Goe||f&&!n){if(o=l||f?{}:LWe(t),!a)return l?xWe(t,QWe(o,t)):kWe(t,bWe(o,t))}else{if(!dr[g])return n?t:{};o=NWe(t,g,a)}}s||(s=new yWe);var h=s.get(t);if(h)return h;s.set(t,o),KWe(t)?t.forEach(function(y){o.add(rb(y,e,r,y,t,s))}):MWe(t)&&t.forEach(function(y,Q){o.set(Q,rb(y,e,r,Q,t,s))});var p=c?l?DWe:PWe:l?jWe:HWe,m=u?void 0:p(t);return wWe(m||t,function(y,Q){m&&(Q=y,y=t[Q]),BWe(o,Q,rb(y,e,r,Q,t,s))}),o}joe.exports=rb});var MN=w((kCt,Woe)=>{var p8e=Joe(),d8e=1,C8e=4;function m8e(t){return p8e(t,d8e|C8e)}Woe.exports=m8e});var _oe=w((xCt,zoe)=>{var E8e=zR();function I8e(t,e,r){return t==null?t:E8e(t,e,r)}zoe.exports=I8e});var tae=w((LCt,eae)=>{function y8e(t){var e=t==null?0:t.length;return e?t[e-1]:void 0}eae.exports=y8e});var iae=w((TCt,rae)=>{var w8e=IC(),B8e=sD();function b8e(t,e){return e.length<2?t:w8e(t,B8e(e,0,-1))}rae.exports=b8e});var sae=w((OCt,nae)=>{var Q8e=Rf(),v8e=tae(),S8e=iae(),k8e=lu();function x8e(t,e){return e=Q8e(e,t),t=S8e(t,e),t==null||delete t[k8e(v8e(e))]}nae.exports=x8e});var aae=w((MCt,oae)=>{var P8e=sae();function D8e(t,e){return t==null?!0:P8e(t,e)}oae.exports=D8e});var dae=w((hmt,pae)=>{pae.exports={name:\"@yarnpkg/cli\",version:\"3.2.0\",license:\"BSD-2-Clause\",main:\"./sources/index.ts\",dependencies:{\"@yarnpkg/core\":\"workspace:^\",\"@yarnpkg/fslib\":\"workspace:^\",\"@yarnpkg/libzip\":\"workspace:^\",\"@yarnpkg/parsers\":\"workspace:^\",\"@yarnpkg/plugin-compat\":\"workspace:^\",\"@yarnpkg/plugin-dlx\":\"workspace:^\",\"@yarnpkg/plugin-essentials\":\"workspace:^\",\"@yarnpkg/plugin-file\":\"workspace:^\",\"@yarnpkg/plugin-git\":\"workspace:^\",\"@yarnpkg/plugin-github\":\"workspace:^\",\"@yarnpkg/plugin-http\":\"workspace:^\",\"@yarnpkg/plugin-init\":\"workspace:^\",\"@yarnpkg/plugin-link\":\"workspace:^\",\"@yarnpkg/plugin-nm\":\"workspace:^\",\"@yarnpkg/plugin-npm\":\"workspace:^\",\"@yarnpkg/plugin-npm-cli\":\"workspace:^\",\"@yarnpkg/plugin-pack\":\"workspace:^\",\"@yarnpkg/plugin-patch\":\"workspace:^\",\"@yarnpkg/plugin-pnp\":\"workspace:^\",\"@yarnpkg/plugin-pnpm\":\"workspace:^\",\"@yarnpkg/shell\":\"workspace:^\",chalk:\"^3.0.0\",\"ci-info\":\"^3.2.0\",clipanion:\"^3.2.0-rc.4\",semver:\"^7.1.2\",tslib:\"^1.13.0\",typanion:\"^3.3.0\",yup:\"^0.32.9\"},devDependencies:{\"@types/semver\":\"^7.1.0\",\"@types/yup\":\"^0\",\"@yarnpkg/builder\":\"workspace:^\",\"@yarnpkg/monorepo\":\"workspace:^\",\"@yarnpkg/pnpify\":\"workspace:^\",micromatch:\"^4.0.2\"},peerDependencies:{\"@yarnpkg/core\":\"workspace:^\"},scripts:{postpack:\"rm -rf lib\",prepack:'run build:compile \"$(pwd)\"',\"build:cli+hook\":\"run build:pnp:hook && builder build bundle\",\"build:cli\":\"builder build bundle\",\"run:cli\":\"builder run\",\"update-local\":\"run build:cli --no-git-hash && rsync -a --delete bundles/ bin/\"},publishConfig:{main:\"./lib/index.js\",types:\"./lib/index.d.ts\",bin:null},files:[\"/lib/**/*\",\"!/lib/pluginConfiguration.*\",\"!/lib/cli.*\"],\"@yarnpkg/builder\":{bundles:{standard:[\"@yarnpkg/plugin-essentials\",\"@yarnpkg/plugin-compat\",\"@yarnpkg/plugin-dlx\",\"@yarnpkg/plugin-file\",\"@yarnpkg/plugin-git\",\"@yarnpkg/plugin-github\",\"@yarnpkg/plugin-http\",\"@yarnpkg/plugin-init\",\"@yarnpkg/plugin-link\",\"@yarnpkg/plugin-nm\",\"@yarnpkg/plugin-npm\",\"@yarnpkg/plugin-npm-cli\",\"@yarnpkg/plugin-pack\",\"@yarnpkg/plugin-patch\",\"@yarnpkg/plugin-pnp\",\"@yarnpkg/plugin-pnpm\"]}},repository:{type:\"git\",url:\"ssh://git@github.com/yarnpkg/berry.git\",directory:\"packages/yarnpkg-cli\"},engines:{node:\">=12 <14 || 14.2 - 14.9 || >14.10.0\"}}});var VN=w((Jyt,rAe)=>{\"use strict\";rAe.exports=function(e,r){r===!0&&(r=0);var i=e.indexOf(\"://\"),n=e.substring(0,i).split(\"+\").filter(Boolean);return typeof r==\"number\"?n[r]:n}});var XN=w((Wyt,iAe)=>{\"use strict\";var Z8e=VN();function nAe(t){if(Array.isArray(t))return t.indexOf(\"ssh\")!==-1||t.indexOf(\"rsync\")!==-1;if(typeof t!=\"string\")return!1;var e=Z8e(t);return t=t.substring(t.indexOf(\"://\")+3),nAe(e)?!0:t.indexOf(\"@\")<t.indexOf(\":\")}iAe.exports=nAe});var oAe=w((zyt,sAe)=>{\"use strict\";var $8e=VN(),eze=XN(),tze=require(\"querystring\");function rze(t){t=(t||\"\").trim();var e={protocols:$8e(t),protocol:null,port:null,resource:\"\",user:\"\",pathname:\"\",hash:\"\",search:\"\",href:t,query:Object.create(null)},r=t.indexOf(\"://\"),i=-1,n=null,s=null;t.startsWith(\".\")&&(t.startsWith(\"./\")&&(t=t.substring(2)),e.pathname=t,e.protocol=\"file\");var o=t.charAt(1);return e.protocol||(e.protocol=e.protocols[0],e.protocol||(eze(t)?e.protocol=\"ssh\":((o===\"/\"||o===\"~\")&&(t=t.substring(2)),e.protocol=\"file\"))),r!==-1&&(t=t.substring(r+3)),s=t.split(\"/\"),e.protocol!==\"file\"?e.resource=s.shift():e.resource=\"\",n=e.resource.split(\"@\"),n.length===2&&(e.user=n[0],e.resource=n[1]),n=e.resource.split(\":\"),n.length===2&&(e.resource=n[0],n[1]?(e.port=Number(n[1]),isNaN(e.port)&&(e.port=null,s.unshift(n[1]))):e.port=null),s=s.filter(Boolean),e.protocol===\"file\"?e.pathname=e.href:e.pathname=e.pathname||(e.protocol!==\"file\"||e.href[0]===\"/\"?\"/\":\"\")+s.join(\"/\"),n=e.pathname.split(\"#\"),n.length===2&&(e.pathname=n[0],e.hash=n[1]),n=e.pathname.split(\"?\"),n.length===2&&(e.pathname=n[0],e.search=n[1]),e.query=tze.parse(e.search),e.href=e.href.replace(/\\/$/,\"\"),e.pathname=e.pathname.replace(/\\/$/,\"\"),e}sAe.exports=rze});var lAe=w((_yt,aAe)=>{\"use strict\";var ize=\"text/plain\",nze=\"us-ascii\",AAe=(t,e)=>e.some(r=>r instanceof RegExp?r.test(t):r===t),sze=(t,{stripHash:e})=>{let r=/^data:(?<type>[^,]*?),(?<data>[^#]*?)(?:#(?<hash>.*))?$/.exec(t);if(!r)throw new Error(`Invalid URL: ${t}`);let{type:i,data:n,hash:s}=r.groups,o=i.split(\";\");s=e?\"\":s;let a=!1;o[o.length-1]===\"base64\"&&(o.pop(),a=!0);let l=(o.shift()||\"\").toLowerCase(),u=[...o.map(g=>{let[f,h=\"\"]=g.split(\"=\").map(p=>p.trim());return f===\"charset\"&&(h=h.toLowerCase(),h===nze)?\"\":`${f}${h?`=${h}`:\"\"}`}).filter(Boolean)];return a&&u.push(\"base64\"),(u.length!==0||l&&l!==ize)&&u.unshift(l),`data:${u.join(\";\")},${a?n.trim():n}${s?`#${s}`:\"\"}`},oze=(t,e)=>{if(e=N({defaultProtocol:\"http:\",normalizeProtocol:!0,forceHttp:!1,forceHttps:!1,stripAuthentication:!0,stripHash:!1,stripTextFragment:!0,stripWWW:!0,removeQueryParameters:[/^utm_\\w+/i],removeTrailingSlash:!0,removeSingleSlash:!0,removeDirectoryIndex:!1,sortQueryParameters:!0},e),t=t.trim(),/^data:/i.test(t))return sze(t,e);if(/^view-source:/i.test(t))throw new Error(\"`view-source:` is not supported as it is a non-standard protocol\");let r=t.startsWith(\"//\");!r&&/^\\.*\\//.test(t)||(t=t.replace(/^(?!(?:\\w+:)?\\/\\/)|^\\/\\//,e.defaultProtocol));let n=new URL(t);if(e.forceHttp&&e.forceHttps)throw new Error(\"The `forceHttp` and `forceHttps` options cannot be used together\");if(e.forceHttp&&n.protocol===\"https:\"&&(n.protocol=\"http:\"),e.forceHttps&&n.protocol===\"http:\"&&(n.protocol=\"https:\"),e.stripAuthentication&&(n.username=\"\",n.password=\"\"),e.stripHash?n.hash=\"\":e.stripTextFragment&&(n.hash=n.hash.replace(/#?:~:text.*?$/i,\"\")),n.pathname&&(n.pathname=n.pathname.replace(/(?<!\\b(?:[a-z][a-z\\d+\\-.]{1,50}:))\\/{2,}/g,\"/\")),n.pathname)try{n.pathname=decodeURI(n.pathname)}catch(o){}if(e.removeDirectoryIndex===!0&&(e.removeDirectoryIndex=[/^index\\.[a-z]+$/]),Array.isArray(e.removeDirectoryIndex)&&e.removeDirectoryIndex.length>0){let o=n.pathname.split(\"/\"),a=o[o.length-1];AAe(a,e.removeDirectoryIndex)&&(o=o.slice(0,o.length-1),n.pathname=o.slice(1).join(\"/\")+\"/\")}if(n.hostname&&(n.hostname=n.hostname.replace(/\\.$/,\"\"),e.stripWWW&&/^www\\.(?!www\\.)(?:[a-z\\-\\d]{1,63})\\.(?:[a-z.\\-\\d]{2,63})$/.test(n.hostname)&&(n.hostname=n.hostname.replace(/^www\\./,\"\"))),Array.isArray(e.removeQueryParameters))for(let o of[...n.searchParams.keys()])AAe(o,e.removeQueryParameters)&&n.searchParams.delete(o);e.removeQueryParameters===!0&&(n.search=\"\"),e.sortQueryParameters&&n.searchParams.sort(),e.removeTrailingSlash&&(n.pathname=n.pathname.replace(/\\/$/,\"\"));let s=t;return t=n.toString(),!e.removeSingleSlash&&n.pathname===\"/\"&&!s.endsWith(\"/\")&&n.hash===\"\"&&(t=t.replace(/\\/$/,\"\")),(e.removeTrailingSlash||n.pathname===\"/\")&&n.hash===\"\"&&e.removeSingleSlash&&(t=t.replace(/\\/$/,\"\")),r&&!e.normalizeProtocol&&(t=t.replace(/^http:\\/\\//,\"//\")),e.stripProtocol&&(t=t.replace(/^(?:https?:)?\\/\\//,\"\")),t};aAe.exports=oze});var uAe=w((Vyt,cAe)=>{\"use strict\";var aze=typeof Symbol==\"function\"&&typeof Symbol.iterator==\"symbol\"?function(t){return typeof t}:function(t){return t&&typeof Symbol==\"function\"&&t.constructor===Symbol&&t!==Symbol.prototype?\"symbol\":typeof t},Aze=oAe(),lze=lAe();function cze(t){var e=arguments.length>1&&arguments[1]!==void 0?arguments[1]:!1;if(typeof t!=\"string\"||!t.trim())throw new Error(\"Invalid url.\");e&&((typeof e==\"undefined\"?\"undefined\":aze(e))!==\"object\"&&(e={stripHash:!1}),t=lze(t,e));var r=Aze(t);return r}cAe.exports=cze});var hAe=w((Xyt,gAe)=>{\"use strict\";var uze=uAe(),fAe=XN();function gze(t){var e=uze(t);e.token=\"\";var r=e.user.split(\":\");return r.length===2&&(r[1]===\"x-oauth-basic\"?e.token=r[0]:r[0]===\"x-token-auth\"&&(e.token=r[1])),fAe(e.protocols)||fAe(t)?e.protocol=\"ssh\":e.protocols.length?e.protocol=e.protocols[0]:e.protocol=\"file\",e.href=e.href.replace(/\\/$/,\"\"),e}gAe.exports=gze});var dAe=w((Zyt,pAe)=>{\"use strict\";var fze=hAe();function ZN(t){if(typeof t!=\"string\")throw new Error(\"The url must be a string.\");var e=fze(t),r=e.resource.split(\".\"),i=null;switch(e.toString=function(l){return ZN.stringify(this,l)},e.source=r.length>2?r.slice(1-r.length).join(\".\"):e.source=e.resource,e.git_suffix=/\\.git$/.test(e.pathname),e.name=decodeURIComponent(e.pathname.replace(/^\\//,\"\").replace(/\\.git$/,\"\")),e.owner=decodeURIComponent(e.user),e.source){case\"git.cloudforge.com\":e.owner=e.user,e.organization=r[0],e.source=\"cloudforge.com\";break;case\"visualstudio.com\":if(e.resource===\"vs-ssh.visualstudio.com\"){i=e.name.split(\"/\"),i.length===4&&(e.organization=i[1],e.owner=i[2],e.name=i[3],e.full_name=i[2]+\"/\"+i[3]);break}else{i=e.name.split(\"/\"),i.length===2?(e.owner=i[1],e.name=i[1],e.full_name=\"_git/\"+e.name):i.length===3?(e.name=i[2],i[0]===\"DefaultCollection\"?(e.owner=i[2],e.organization=i[0],e.full_name=e.organization+\"/_git/\"+e.name):(e.owner=i[0],e.full_name=e.owner+\"/_git/\"+e.name)):i.length===4&&(e.organization=i[0],e.owner=i[1],e.name=i[3],e.full_name=e.organization+\"/\"+e.owner+\"/_git/\"+e.name);break}case\"dev.azure.com\":case\"azure.com\":if(e.resource===\"ssh.dev.azure.com\"){i=e.name.split(\"/\"),i.length===4&&(e.organization=i[1],e.owner=i[2],e.name=i[3]);break}else{i=e.name.split(\"/\"),i.length===5?(e.organization=i[0],e.owner=i[1],e.name=i[4],e.full_name=\"_git/\"+e.name):i.length===3?(e.name=i[2],i[0]===\"DefaultCollection\"?(e.owner=i[2],e.organization=i[0],e.full_name=e.organization+\"/_git/\"+e.name):(e.owner=i[0],e.full_name=e.owner+\"/_git/\"+e.name)):i.length===4&&(e.organization=i[0],e.owner=i[1],e.name=i[3],e.full_name=e.organization+\"/\"+e.owner+\"/_git/\"+e.name);break}default:i=e.name.split(\"/\");var n=i.length-1;if(i.length>=2){var s=i.indexOf(\"blob\",2),o=i.indexOf(\"tree\",2),a=i.indexOf(\"commit\",2);n=s>0?s-1:o>0?o-1:a>0?a-1:n,e.owner=i.slice(0,n).join(\"/\"),e.name=i[n],a&&(e.commit=i[n+2])}e.ref=\"\",e.filepathtype=\"\",e.filepath=\"\",i.length>n+2&&[\"blob\",\"tree\"].indexOf(i[n+1])>=0&&(e.filepathtype=i[n+1],e.ref=i[n+2],i.length>n+3&&(e.filepath=i.slice(n+3).join(\"/\"))),e.organization=e.owner;break}return e.full_name||(e.full_name=e.owner,e.name&&(e.full_name&&(e.full_name+=\"/\"),e.full_name+=e.name)),e}ZN.stringify=function(t,e){e=e||(t.protocols&&t.protocols.length?t.protocols.join(\"+\"):t.protocol);var r=t.port?\":\"+t.port:\"\",i=t.user||\"git\",n=t.git_suffix?\".git\":\"\";switch(e){case\"ssh\":return r?\"ssh://\"+i+\"@\"+t.resource+r+\"/\"+t.full_name+n:i+\"@\"+t.resource+\":\"+t.full_name+n;case\"git+ssh\":case\"ssh+git\":case\"ftp\":case\"ftps\":return e+\"://\"+i+\"@\"+t.resource+r+\"/\"+t.full_name+n;case\"http\":case\"https\":var s=t.token?hze(t):t.user&&(t.protocols.includes(\"http\")||t.protocols.includes(\"https\"))?t.user+\"@\":\"\";return e+\"://\"+s+t.resource+r+\"/\"+t.full_name+n;default:return t.href}};function hze(t){switch(t.source){case\"bitbucket.org\":return\"x-token-auth:\"+t.token+\"@\";default:return t.token+\"@\"}}pAe.exports=ZN});var kL=w((nbt,UAe)=>{var Fze=Ff(),Nze=xf();function Lze(t,e,r){(r!==void 0&&!Nze(t[e],r)||r===void 0&&!(e in t))&&Fze(t,e,r)}UAe.exports=Lze});var HAe=w((sbt,KAe)=>{var Tze=FC(),Oze=Zo();function Mze(t){return Oze(t)&&Tze(t)}KAe.exports=Mze});var YAe=w((obt,jAe)=>{var Uze=Hc(),Kze=eb(),Hze=Zo(),jze=\"[object Object]\",Gze=Function.prototype,Yze=Object.prototype,GAe=Gze.toString,qze=Yze.hasOwnProperty,Jze=GAe.call(Object);function Wze(t){if(!Hze(t)||Uze(t)!=jze)return!1;var e=Kze(t);if(e===null)return!0;var r=qze.call(e,\"constructor\")&&e.constructor;return typeof r==\"function\"&&r instanceof r&&GAe.call(r)==Jze}jAe.exports=Wze});var xL=w((abt,qAe)=>{function zze(t,e){if(!(e===\"constructor\"&&typeof t[e]==\"function\")&&e!=\"__proto__\")return t[e]}qAe.exports=zze});var WAe=w((Abt,JAe)=>{var _ze=Xf(),Vze=Zf();function Xze(t){return _ze(t,Vze(t))}JAe.exports=Xze});var $Ae=w((lbt,zAe)=>{var _Ae=kL(),Zze=FN(),$ze=TN(),e4e=NN(),t4e=ON(),VAe=wC(),XAe=Os(),r4e=HAe(),i4e=PC(),n4e=a0(),s4e=Rn(),o4e=YAe(),a4e=B0(),ZAe=xL(),A4e=WAe();function l4e(t,e,r,i,n,s,o){var a=ZAe(t,r),l=ZAe(e,r),c=o.get(l);if(c){_Ae(t,r,c);return}var u=s?s(a,l,r+\"\",t,e,o):void 0,g=u===void 0;if(g){var f=XAe(l),h=!f&&i4e(l),p=!f&&!h&&a4e(l);u=l,f||h||p?XAe(a)?u=a:r4e(a)?u=e4e(a):h?(g=!1,u=Zze(l,!0)):p?(g=!1,u=$ze(l,!0)):u=[]:o4e(l)||VAe(l)?(u=a,VAe(a)?u=A4e(a):(!s4e(a)||n4e(a))&&(u=t4e(l))):g=!1}g&&(o.set(l,u),n(u,l,i,s,o),o.delete(l)),_Ae(t,r,u)}zAe.exports=l4e});var rle=w((cbt,ele)=>{var c4e=NC(),u4e=kL(),g4e=dF(),f4e=$Ae(),h4e=Rn(),p4e=Zf(),d4e=xL();function tle(t,e,r,i,n){t!==e&&g4e(e,function(s,o){if(n||(n=new c4e),h4e(s))f4e(t,e,o,r,tle,i,n);else{var a=i?i(d4e(t,o),s,o+\"\",t,e,n):void 0;a===void 0&&(a=s),u4e(t,o,a)}},p4e)}ele.exports=tle});var nle=w((ubt,ile)=>{var C4e=f0(),m4e=XR(),E4e=ZR();function I4e(t,e){return E4e(m4e(t,e,C4e),t+\"\")}ile.exports=I4e});var ole=w((gbt,sle)=>{var y4e=xf(),w4e=FC(),B4e=yC(),b4e=Rn();function Q4e(t,e,r){if(!b4e(r))return!1;var i=typeof e;return(i==\"number\"?w4e(r)&&B4e(e,r.length):i==\"string\"&&e in r)?y4e(r[e],t):!1}sle.exports=Q4e});var Ale=w((fbt,ale)=>{var v4e=nle(),S4e=ole();function k4e(t){return v4e(function(e,r){var i=-1,n=r.length,s=n>1?r[n-1]:void 0,o=n>2?r[2]:void 0;for(s=t.length>3&&typeof s==\"function\"?(n--,s):void 0,o&&S4e(r[0],r[1],o)&&(s=n<3?void 0:s,n=1),e=Object(e);++i<n;){var a=r[i];a&&t(e,a,i,s)}return e})}ale.exports=k4e});var cle=w((hbt,lle)=>{var x4e=rle(),P4e=Ale(),D4e=P4e(function(t,e,r){x4e(t,e,r)});lle.exports=D4e});var vle=w((EQt,Qle)=>{var GL;Qle.exports=()=>(typeof GL==\"undefined\"&&(GL=require(\"zlib\").brotliDecompressSync(Buffer.from(\"W31XWKPorUfgdvBvNq74tjXKGGKKTn67hrdZ+RAVrTgfUG4fKu5WVREkeB0IqqqJSUWGSzqedPTAYEf9VYjMzqCnEw7kFcklPKENO1XiwIa9DI+kNNTSqWg1zmc80tEIriBTqFbPYDcubwfX6V6RtUJ8TAhZmJkY/DpQt3EnnYba76/FdePbgiCS8GO36r24B4230NFRH8pqnqWl16B+8Un+E3a7+Xz8vBb/F0kY0ySR9BJAj81OqiKS0oN4QjZclvdDEPnnL63+5+frjStVkSYvcrfnhrkwDtPO+mCN08dQLBAksNpYYiT5ILVqkC0ZruZrtq9XTTXE9dwJLzJkEWJL0ewJDPyYqv/Q/za17jlct0ksawu0IDPTtXw0pXRIkGiOXgDz2pKM5HmvVm/Vzttq/M/DObFAHmIJtCB57H+xeX2KMpup+nobHaIjDwAcUuWMX8e/OwsSbaX0hFcXcwmQBFEA832dVT9NxduOCQku17t7VQ5gY61vpI+eZA4uNUiPj1fLlp+P6OjQjbS2qqurk9cpIWf2tM4ff4Bsr6rvnmVS0oXPNI+pZXa2fvK7gM8WeWGDow8Ynsdk83jwIFO21eP7SdKmEOCjRetNqwYtDrc6v+rH/3A+fX7s7j6qaWs/z8ydC3MD6JAQg38R64BosCUV24Ht5mgIE/AD7Mf/cPL/nx/33leqnjb9XGv2QIYQ4hDRpn8xPQ40z6Ih7cYu5s/64+H9PND5vKMkK4khoRXG09Jbh53KigNph+6mhvifW43L9+BrnptiNhiMFkE/zN+eXy7eB1T04vLMZJKmX6ZpKbgDlFAREfDeWv/WXul42B9C3FmPPFOJtGErVkBvMbYaBePUX9CPGtsfDx3civp4fLMMMCjYgFR0I8zFqFDxtxI8vr2zXy5+c0pbXvk5QQMpgum7hH8iC6eTM8ZAtbZYDYb0x+6MYP2QpQuiRrqsLAnKdcDeiwOPzqgbafaYeP6/VHW5TWnbi2dQ+KNmqLSdTdkzdA6RdPj9+6bZrueAlCDn4rVJKguQa22sXCdHV916V2g7bACkCEDkGMq58etfVb0GfzeaswA4swtSxtlwfbhhbGwajmaN8/v3/bJet1CD/j3/UypTSMOiN43CdFMI5ZjIOBF3NzLzvVxd8kOvBTRGtRr1hT4RO+KcfCITVVmF/ugCRqG/pJDKp+eTLrVpvsyiAKXy6NLwaDg2/f/vLbV+eu97LyISSAIgVSAlraLUxphxRAC0Uhtrx4V7z7l7Kd6977Uyw3whIyK7gDTrizBrCa4MKd37IhJ8L5BiRSahrsgkVZUgpWqQZRbIUv/F0upRFyl9Y8zo92TszOgbNxn+wZD9Lb8xhsg6/ZmTPfsAQmdaq6jH75umdHV2x8YBOABGu/JPFwR3s5oUKa00fCP1vL1iM8PUSmgAJaUr/7+pfrbvzYDAUD8w/K891IYUelByiKEoM++7b97hvPcGR4MBaCFxPwK1C4LkMQGQZ0VwdwbUXwP82mNIf7UhUhvpLDlu6D5B/kDJQXGDvkNItdY59yGVLl1t5eOidLld4+OmcdO6Kdx2IfLPvydqch+krU1kwv+MRmQz1NHzMtgqux/Yf4FJsxhSgcAmJqKlGhIaQjOafUxngLpqjD7eDeCm319aZA99d9et4DEhQBIggaChiBYrRQsEtc3/9/l+zu8P1kRCst3SBlCpKoIFUKNYUVM0Vb2l/e3v7usDDIzlnAcva8/YLn8QLkx8YchSMLiz/3/+d7s2vNl/81iDIgUzKBKxYJAIGdRLuMZHlCJBi2RQcA324Aze5+h/3Z5/eP+51/7/t+2JIxCILkEgyFKBqCALAoFAIBAVXVJRUYG4ltvz/1Dv3DszE//FFSSDQCDIhmwQCATZIBAIsiEbBIJsEIgWLUq0OL46/Py+BW93GuSkgFR1JQNXMnjAwFIj3jkeLIATdeDd9v8XFTgXRF+cGIOxhBUxQGiRVKqkJUrCQjze+FJZEAO/zf+/7KicKzpfLFTUUSYWoj1nzQrUbSAmVi1+X4D5osxS/i5oGsBPG3m3aFvygV1hwOnlOc3F0W13mzsvIF7pETunC1IwZ1wigrbLd3YrELct1nvqvSunElU/+6XI3ro5WqR2/vDQ1exB6E4PO5X5o36u8tW5PK9M4KF7gB0xm3acz9i+owflgP4hvwvUFQx1ErKcuEdcL9urI5K9Ndn9DI1wPIKLleBv4gbsbQbm1NFFggtmSTl6DMwHLGqYb/Ce7PsfUu+/57WrgxrR2tfF+ok0wW5PMMkEvyINyQp2qCM0+Hoz0YAxuGC02IS3zzhOGGKnsl2Ivlr1CKLbzmrwWj0F/oO1IYutC0rwN/OMOFc8XyTb0G5QQX0YCFRNHDf0Y7F8gKQGy5xrp72K+wOEhsq1z6ufdieegW0v/wzdSP1R1obAQeOmWSRR3fWmat1LvDpSPoF5A/P/fsvurj9+hM+k5HbSt2R3kNAERhY3DxJqBaoh+R4kfI8oScEb67iX3hy5j96E4uPrQZIoMKqH95H7GJxrr/qKaRJOe1eQDV/L0lsvNlpfy1nZLy6yfYQwCfY+yBS7qxbzoyOrJ7z81zBhF6/cj9tjmQCmey260Qq1hGhnJ9DmYpFxTvyTpFLimObu0yl/6tXQztokuUSkfX/9erTX7wqDn2vd1pf1g+ZytBsT6qly8tdjSILt9s8vkIF2N3eKvFB3pwxtR8ry8xBRtf0+8nSJIrWTQwyP5sBD6w1pya70+SPU+zEifUFNt+ydO7t7lo1CU4cEbrSJ8YPpZWOSXsH8ZJKvWbFKr+aDQ/krjNZvSqZwFE6PWRKnsinLHBBuJEMAo0xBD3ggaQmXkB4fqwWCQH6WIbtMWqmRSSy3MqnlwPnL+QafBJsZOf/N+W8uZJO7+e5XAgmVVAaJXcY8Gl3msHjS0tMcOV3O6KCy8Ei++5WGkWgJDlpzYpnIH/+knZ0EZCr/EydtOy3IZMaTlsVtedVCyQLKxlBCp7GRhfdtZOiQtE4GBP0jAI0rU0856mm7mK2sSdXup+9gK86yGRzg6CPCCa10m87aiYeAazPPjIvibvV6SatwZSjrh92qDF+DQ8EHe1xlqg3p67DwTirH+JIhT0uLunuePZY93hvuJSg+rxzS8j3FCDA+/KypZXsviuNH8Fr9C9uGvfZMqXi6DHeymIjMIN34sPO4M/HRnIajg+QKuJhVujPriW9xt97fj7tchwujC5b1wwUHP3MmaXOGNFwWD7vvnjKRSv/SlR/RbhXPAMa1evTxfFz8/hA6phKAohGEadqBaA8hbL9vB8DMiAR+25yYD3zeXOBRVgLILb9rzvkyci5EM/GyDC1EMxHN7QAJ0zUDZjM+QZl2ZmC/GCBDZpkJDPM+2PCP7QywCiCBcDvByZ4UU8xA+dmfYD5TO0MsDsBE/tBe0BvjhQlWi6CQ1Ex1rOvejEXrATfTxWTq5utPnNy+gno2AZW8UkwK4PGznqYVL6DzyxQHYawhFyprhAsMzKoIhw4aYbkZdyRt1bcUMrJWmusXhHOmFCfDWfbKLzu3z/pw4T9aTpK/43Epd10mUCuIlYgOGsFgnQbVnb1mNuxIzMXwEWAteqoqoCZsMSs4xfEWgN/kBJT3kUy2jw74szeH/SQhCFuSu5t7KfDRhLxpg/L5dvldq8jiMYc9r221XDqqvl+Un7wT6fG3X8Eo9lwt9q6xBzt5/mzDqN23D3l4biT2xFX2szm5HB+1o6CdmoNFiHxF07DfqWdp1HdaBC3bvoW1NO9WDbAHhGDMDk2weatSorssnNsO0LJsHSziRIqEi3+5ODYrhOaJd2rt/aCA2WH1pmsGFlnJ64ut8mZuN8GuwR+Pl7KPQ6c3easZ1ZFaA1oocp/wiIrgwSJVJsH2yQo0DMsyvsePTBGdFjv7uKu97Vmsyee9IJ3jgmvuk2+CAEroiTi5uXRSN4e61kry/JzLSyIppv63XmTPbXL1phWKNzqHAgdQ8a/HKXJTHractuIGjbgNRSRu9lfZ+MhE3RYS8q6GwoRsCpwoAwgdFI8nFem8DZrqMBqTyNCoWWwOiLdKVIzWM0JaWxEt7K74+3HemWHcgb809v+tZyRVu/9vd9+K07rwNpnnT+jzJgWMv3fGMkaycMW8S6IvR9rJPrXibYWyt6DBfds7ub9YdKdYmBXwgUqhvKfVl9tXl6ur7/kzhmrM+syeDgbIeskMH3NBk9orUTH2LInEeiekATQZ7w+eQliyWnIH1cKECY+PfGzEeVFmuyOuAe3R3w0ROBN+YyTiUh49vZkjO5VhEPJZ1eX+9oyxfWBiXrbdMEXcwAgDmw7/2azPNBCnDYYINDZ4I9bQdWLwqSAJ6tczQ3DkMHnHEHWUpIZZsEpKGEwfiDQwSDwZTc2pNxoYrGkbjUYQ+jJM0gICUxuKDd9EF3nT7B6VhENE0nQIR1wqUtssQmrhJy02n8DdPuBd9YLQ5Qdi75r2qhRWX3WOS8c4YTS/N88QoUa0sZzXAVwQUtUARRitDVTFKsOKXgdLf8L5NFLv8IJu+VpAWI6KPL2baNT2Lux15hKsBa3nPAYiAmrXi807/3mr2tZOdMbO+jkIbvr/CzprlIKtQjOG/mDRLdWgrUXoZGuuuJXVVQZFsXz0Whpnc1/AbTTubXUBirk7BIQjuEVt+7T4vnsLll3ySAdSUihTvuRe71C6eeiGC9mU5IPTu7ZWqWEuSO5aqnAGH2fEXhYvJADlpWzlzUX7t0BV5/tDfBS9bzYT0iJQVDm5up5zZvU972DrBrNFaS1sIHuOaVYVeZFZmESNEjyNxs8HVU3NeVRJcTZYY9M+qbXS2xnGn85lPWBrM1H1lyUDFEfY1tUJB22tZrm3yz/8Og6wHMrKybUbDeMQOfrST4CsHCps1yD3hyr6pfeh9yWdSb+1XDs19cz02pCw5wF+3mIer1UaRsLDJIgh5t07UXC7XA31pR/63HReOO8CSjYAIuDfI+TEpRngNPG2l5HqtA/a+xXmfCTnY9JAHi2SZ+iVZ5JujfgB2iGDNYrQtF+k6QOvINzLvbN5N5IZBjBm05BXKrvvWcrQk7DuYuh2kT1Ks8l2Lq0+modhtBDTlIVFWe+XfIwXwvt97nyxRZ1JDTmAJj/fPeP63dNXyADhZ7rpEiGU4BKlWQX+UUwwEAOCuQazxgix0rcos4MDFfDBZxeqnEXpToqo6MBJ10KJf4FQd0BwRHtB91cuW9MtkTWrrxpa0AZiramTWclTUTHUTJWyufXflS/xoUyfxCDLN2VcTQUlaE0/e9qFzi6DTl9LKA+SfsM3Uh9GWib2mm7IQNStNgBVI4eGgWTb/QH4Ub0+nZl818CB+LTXIa33TycBNalO/GTHwxxcK0V608vX5LMamSJtsTKu7RS5M/j17cftnt088iYTkBZjPg/JVQXKot8Iwb4Ykbdgl68ApZgPpW6nuXo1EWFuk+E7HPVa2TJSfhqK8+zWLNZQlvrr8MfoALTjVnIwT8TusioUOC6WodOhCu6ERWg1GCLbpffK/k0z5i2jDJ22ByJ64V8MsQiGKFA5oWYfMq8z20nh5nKOvQ42JVgDS+mrR7BIjXAX5A8DZchGnXBAetImmtDz3ZsRg+5UBp6BpfW+i4HkZeP6jQgB2b6xTjyp6nRglVIAnqQxAWM+Mm0J9UMg9ZHbvj0662RjluJz2DKT1SyJAey16+DCJMmetZba8xjfN2f1XyxbpI4yd8mMHpGhDmS3zGgGHDw+oyMH07iMuRdU5NAHvVoaExcrPjZyLCkzSemXPs2KTTLxR2aryYtzqez96YEoZyOipUVF3RSR1xU1mWI6G0GiU6BawOUeCqNmihta7Wir+cpIKh9LwNwhNIUncOCMECgCBiQNbx8lwo4dLwWDF0iYk/UweP0wb2qx+ALaXIJ5GuUkhGW1NcSEjDtyYYZbZnqq7JuJIZHrlvMvLhgfrT1etRJTeat6HzQzyTziu2to/c0PCJWyJe/S+6SdcrDhBdFtG4N8o3Wk8BiZQkR2EB6kfo+75qEjJpmsa7q6NDJdqgg+8vL/C4Bf0X46hoYVIc7TCNjqw2gb3w+BkywiVQ5o+LbyXiMszLrBkQSkpPUY+Ym/EHLVggnBaK9L2dBc42gx/w59GSP6n5llQqztzFwoMqeiQpHVEn0cl2H/S8uJJoqBOd2PwM8T9dOfKqexzZ/sPpqQ6lRpgQEj+HYICeuMCi+YoGQHvqB9R/sSRGEAwQEmGwcu/mRFbL5AF4y8RH1dq/6++eoiT0rWN2ylGdlakJGfqoK3APlwsxX4pAK3XbW0XXnQln5JwQj2oxIH/ggAZNJr9zA4036WhtsHmHSEhGxgr4dfmwQ6oyNEy/dEKBrjsjH37Z0SL65XI03FaNe6htYXjDhcrK3y2umph8tidj0bMpVuHnMMoQdI8XCnjylBycxBaaPh7t3pfu1nmwYCoRBMbi/7B+6Shsapa8C8wZ+6mfWdvnvKXtCv5ut6zOHnFZtnaCCeOk9WjxAlk2Ic4lKbjeDhvTd1hNm71QdITvId2zfJZIdyRQNPAmxpqAYp6rhbcX1yBMwadSeGnQLttpNqgEdKiMH0wZF0IzyJcdprtnFgvHywWdbb9Z9kRzYtvMH4wTtQhJ68uWaHXWPzxvH04rPjRpvNbAA8CaA8s2naMH9Lq/6T6zQ7oO7EJGj6jyqXBKupHWoPEz159mx6uhUzv0MHnbumleD2rpXwb7IZ6VGM/CoZ+O6hmXcPsUC2+A/kufUv71IIs20U0/zur7Dr7AItRoE/6JX1b3JAalNg4/NwPDgzF0nHrODRefqLQz/hF73ih0IMOLqVc9SWFHFpkTYKMf3SLRZOkWT7aA8R345UHoo+Iu5DdfFmfZkdhURyFABs7QbQFU1tDwK09lkj36pMe8sU254jL+kaDC3wHEeuf2laeWnbT//0wApMlyUK7WtT4PQ1abQfdyu2hvDftcSQ2GPTmDIocau8z2HJwv72ntFFzxH7qp/NmeybzWpBhYFSorfHhbbbkUTSdj3bVAPLNzZslQvJrnXBlzd6gQGZOw84DKLZthc/h9LTC2glE15+AtF6FGFMmhDY4AX9XQvnRvq03YcRMz5gfrBCe0DRZYgqFBPC/bTu4KmlSxCD3d7vo6pTCpvnXWYmqrIezKUhPY7FYKRma7PMemK5OOuKizZDED+0KTD1pDIJ72uP7SHAom1emYM/tIF+4ISO52FrLXxpFEhZk6lGnk8Cu+zWQFuKGw3PDLAlw1Plbdr+E7IW7xUlyfSxk/bcpI4FofJ5wJ5qX+vp5PhaIFvG6OQ3EyU9nPdZb5kVbLdujj3SBIqNprZMSR3Wo27C8dhSF4cXF7K+pX9uVGj5207XEr7E+E4wfnvg0LC7KDJTX/HFIU+6aZadafEA36NORHG5mDUKkFkbY7Y8S8NdXffovClpIOd3cBMdC4LJkA9kLyn/2ihMj/Ou7Qdy5BUGCb97g9uO/O76mGMvrtE0iMjnaWpvRpH7ZtmJ1hOY1wBK9C40bBbNnCMMVkWacIXRIn/jXPQUzo2Vwlk6s7JurDD/8uEXRj17sdBKDd8tURiM9hKZg3lKi975aAwR3yOOXW14QFHjwT8sk3S+/05j+OdR4C5s1WPpuI3tstjvoOYykKjqODrFrBOZ86nGxeLHkeG1iqqePtKOLNE/9bh+YkBiZ0QkUuuEOpgmSEdGxmFIg77A0lR7gKm0TqDA8DRroS7rVvcm83EBwZdqFcQHk7Rq+ScfE9Zd+NJTkhVzIDUB7Wjcxj7IQQiPBxGS0bq01B9CCA/JEGxywodXmWJxy7gKhc3ZHMG8RN+JFZmHXATLM0HDvL3fNs1cKgWjabx+VEkeaVznJTyc6xflWXCOuI04bc61FaIFraG4BJpW+UEwQDQ+c9oP+rysYe07kvmUMiqedoCeFj2WTo89KMvNSwTcQ3tR6UelDPBTPFRPMYbmSCZP44tnp6vjERInpd/tWcXAaRJyDPFBW6ccFCzDIL2d2+ICwctKGldFAv9zAkuXOgHj5rf0yRI+HW5xMg7QB5IUEQ+F0CgLZ3J+3EOh6Md88a7I0zwg1vz1CDvKUGchNVx1YhMjAPguUtUh3yV9cR2pSomnV6ns6YbZkehIC0ha+xaghroZxvODV+c1FmAosVndgMlFr3Fbsflk7t+slYtsChGZKQXQjLhYbMV3MI5iXGjmRb8YNMhb9BB0+7zcl89Iy/ffQzj1kaPijYv+CEbMA+WLuKc4DKS3UdgBDC+qSRZpOeehtxVtPM+FNmIELrzHJ7HU4Jd87yedXLNg0LV9vAaVcw5oN7W6cZZIklaIi3Q901e+gZNTVE2QjVORN7faW/sb+L9jMS0X0v0L6l0V/EoXf9NwkN3PaTKL6iAYm851y3FSg+pcfhaGPQY0IuvWjRK5XhsrjSL0LU6z2I0CfUMYrU9WxD2iSNDi0aFUzym8vipWgSy7uyjpPNtFsrQnPamG24qTD4AQZOz0qpmB05eT4DFuTQK/a+1m2zzX7XfDZ4TAvKp33YU1Q5DjMoFkEFD5yAf7WV7b5K0N5025lXj5/RXJPZDyj5sspf3lJxfJqO6zWGnGLDwGwptsGkR3H7vLcn3dfIEX6OKqhUiMfXAIeYQ8ojSUkJkfdIIFwWkWgM1aKCmHmUR9bFPMB5wweU6cjpM0SPOpEOXiaamR8+rmT4Y8PBysfNroGurbzDrHYQYd+q7fKzN5DSNwhjfO/uoDadd4or5qc1/pdGOKlTEY++3S5wZe/TdU3PICctyB+9kWXjUgHGEVzXlBgWgyKJ37sQBlFTHUpss6Vr4ep97RhCp9+ByEPV/7qSNdTa4sE9gHdS+ftRgKcKyz7OF1WL4C633o2jWL1L/TWJ3YE9j3iDsZkqGdMH/8wXk4+mMztx5SFNY/8ty0KoGFoUXvkmrtk6MYs/ieGHkAbEofZ6GUGwh9lDpHpMP3ED0QxpCZRpHv35MLh92QnIxK8eA4JgMrPLcU+GRl/V8JLKv1djK8gikVGuK+XkqZrnRW+ZBNt8dRP52baSHY8Bn8kZOI+wFqjia/z0PcckaWDvC6Xdvi04fq/t90FZ+yl95zatHgtkLHilyW1WvLws2l/gYK6mN59tR6XplE1n7XcPcgeuPf35XtSG7jjSTlHSwi+fYxZGGarrA+ZYDFh5f1pBnIeowrpQqeFodpkX5w71tIBlYvuJCUv+3CWA1HpfDSxGvrDL9O9Qpd3te3XfPrwqyehD0CCjZbjsUtvLJ2o7CGcGux1RuPwwZMSPNstookOWcUywVh24Xnd3mn9OKbUgCecgsrGo7DCnbddXi3puDeGvWorPecgWtdqRL6p5VLrznzwocoaseyuulAKRW+g20nrbzCA3x3/tgdcP7YERC2Ee6PtR7tSE0qAhCSxMJ7RHC+H1yQC/7OEtqdHsye0DBNsIzYuy8uVIBqb0UMK88MXfRDyXVeY7KCEI3ftad7At+mKh4VTwXbtjAyhgzOZmD9I9NsfP2lf0wTmItLlsC8qrHPcHatZXzgvWXtW20I8Gtv3DM/1grC9natSL07G2/s9XAgIq+D7S0IMFSDP4Gyn3gdCqQyz+vG0Y8CyKu3ZZVd1jffsuU15hNR1o7KjLrk0XwI5hDDHO13eurNHEM2EXkWyrJosHZGZSMSAwlww4kJBU/CJFW7YmKIU1eeYjjvX2pPpntYJoxrgOAdFAszFRGqHSThA/rgWGqLHmU/dVJiCS1u6lw5WnlBwKE8kYVT3sN+VKiwzgDlKIsoW5tqALe1JLyDZ9Zz1TsvEiE4IrOnJyq6PRlDBKXjZq2IGOMtaQxZE2ISASVC3keeQkAYQCUToOulqA2Ms4TnY3MDn5BfxmulLWtt8AfxGS+FKSQMxG1hK6d8b5NRfDAZb/n0z+L6TRS6Pqi28jU7fmydnni3j1L2njiMXHQxS4pPhthTqxu+lWROUV9yA0tZOtQgi6tArup30jVL64Eo3wZ4/Mn2bNnhwP9DwDjKVoQuxszCSMfECqsYR9H3NyesehagQfKjQ4OUJLXmB8Ug8wVKNjwFckfGFpXZymZn45BGJlePryM7jLROUWvpSGs+LpujgW6MM8vo7G08asad8dmdrzEOlIg6T3O6NAn+FWA8WM4Gf5UKW2wU/7T0oWFoxtElCHwUB8ZidHAOMLpgkaR8PTtKIucQiMswZTdVRk6x30czroZHRMzAgk9piRht2+S8PlqQkTciVznuiaISdTA5heJKF7zfr3yMOSijCHJqpEv8qOtI9sAH386WhxuyCmh5dreR/zrgabApF+yKIm0yiCFPdCvWqqtV1OE6a+bldAUFw0HVahRf0jyrSqOcphJDQqTCqUhAaPw7mNcWBzXkmpBw6LWTumVe3i24yOsBCRIedgBX4YzPkyei4PnwcYOkAw5eIhoNIDw/4j2ReaG8brUgeZzRw6uHKIfFI+6/Xm3S/a8Ra7+orP3oeu0Xi7UfBWuXMVjI0f4vAnv8R8OWNCj+8QCG/CZ84amC1xJlcf/QJ/S15fL43A/j/yJPzi0NY9YIGsZbGjYzAnLWAu9lCiPBBUPBPtgxT7sCviDpg2LMGBsFR1vGfqNsvEC7fl3El/Sh8z6L8lAu+RSLp6l2lbYjS7vYgQS/0ZAzK01Za6MVbdzDw8JoUvMIHsQBAsj5gYrE5G5tPD2eYGIILVvO0dWhhb6uiXhzaKKpVrHrL+yOxQ1CB9iclmOPJz7NqOgQJaioDorrTMd1IJM/srLAHMT70Z4rLwxyDB8euAH4D9EAQtfd4AM/2QDYow3oXXdQDXuKKmkaX5ApmlprCzwNAsLBRX6xogPhkuLNAp6JCQJDTwbqL0Ii3I8gMwpKMtCC04GUZt6fBtDbg/QIgXvWqwo9CU397ZEfuzJrfVrspDKiHnMBfbzti7ga+zzOCGFDrFPk9Wz9TxbFfK7XlrmOh5jzsMu4D/MhhoHDOUmY4SibL9JpSIcPwoZtamMLD2PMe5HwBaGq3NQRCQrTcF2PxqlGwTUAYDlEoBEaEWLwOhJw+fGy08DDeuXP5s0g4/Dq/SiU8371jAPGTOYjoUezy+ulftzTUN47EN4r0ZxVmXCbK3iDugxfU8x/MCnZ48PEqS/IB0PEFqAcHGuRqMgHFssG2m+IGhR/d81eueGX8ZHnRmgQoRwUi1wWHE0ZvzJxHmS+Q6cnFmdyjUdCt+zjJflyUkrz7989aBtEN7+wAkVjzefMvg6MCrNTLMCUODHP4jGXGWAdK2hfvZ2LdoEeS+ghvuYqrrT5WvZWUSYnAICWB6ICViL3V6mmdtl4G1J2IIJfiCWBRsL4tNcIASA+iSiD+wp1wNg2GCq4sNGshFG95dAoMm9sSayKCV7HYHvyDP3eEcBj4eFcfMiHmUfdhviucMRUVEhgZT4y9DenoKFUbQMYwswUGtuviO6V1e2AQm3Y+GQ8277Bblixm4lBCfTmyrEcege6zemBfU6oU5nbyh5taxPKANbudEbYMSOe2j4q6k/yqbZvR1Q2PZ4GZjvAtNGKB1jhQY34gkaOVLI79oB9zrZRjgAByt0cG2ewf8xHSo+eLxYkeAafxlONzJKlkwCctCmUZw/0I3qSc2BE0diAEHUritQYOLYJt3apIfK1PYl0ThhjuHrBMG8mLVOJn49xGrsgbTlWDcHaOWQHp7S+JMyG6LB8qoc6NpPRORsD7SvJAgtPbipcCoUs4aG2e41hN2ZH/3DNCeyqHv1FvZDSZvTOJHz4MxFLhhB8cODAjWBdDuec1MKTMeSltY6YedS0RxziUGOk1F8JCbOo2UTo1aPC5IYDUoyLaYX2+iDSZR1Ddv0+QTiGz0Mld1pZ0T5pPQSfF0yz2RRL1aiYcCVZesbthjgBAfztQUx2Q54pcz4qk/1sUrsLnWT+U2YCc0GcWtp/+O4TRTljwTzEyWXE5gdPRrMnRkd9aCa+wlyd25YjYbHJLlcLU+8E2UVtekRam2cU3lvZnTG1Pnq6xBBsvc8RW4BQOSu50+fZF8ESd2MyaibBghWZU7lSM58ZwWG1ce6O4O6Ef/EoNsGINjMtJ0iZj/OAHbJ+4hUqWLnaRe4p8lEBsyTNW3QlYaKDhySG1CW/WZj1lEIF0ozWPPbl1L86JRPpCa3B7O/Oubhv5hZ2H82/MqF8jIj7GMxhZEX+AlKsOWrZrHXysf1M3I9S9vs/wDLGGJoygg23ytI7VYw8rA0Qud0E4PfSzPjJGE+dZ8oJ9qlEAnAkZpbg44rIvp1vE9Zbl6dnVcYXUfh6hZ05sPyEz4syoB2SbSbGFL89Fsj3nNoiS70VnWSTIkH5kx8m1vREuyXoQtIT9VeTskkSe/xxbzdMMiJfZKSew1xP25mZmr4n8/Ca8fPpuGkKH/m3BogOmuYgvKP17CNo6WOOYtXn0IWv0Ks2ojjSKKkS+0JlTakl9r0HUt62SV28RoPyjGfsS+5ZdKUq4KrlbufywZKXaKBkkAq9mwLDN0J4v6A9AvH+cKMBiiAgPl+knGC0UmAtVl6AFVrYzRuFw0NSW2m0ruJA0JzItB6Vvl7EMQwvXqrmBRH/ZsT9XyurvXah7gur9hrt4JEXO5daBJjsde2jys3zHtQu37W+Dbay2dX8Y75ZoIWNahstaUkz2472XF5fHPgg7r9b2hH3Or/p6q0AyuEu+YPJc3iSS6pidB2Wun6bzVk/LCGIff3Mlf7UaREuzg6gayZch3QCsx9vj1kgR+gknzG0P50fsrlTChS4yrnfwK/fti9o8iz4vRIRf2NbqBpPruqdgOgJKu1yIvxa0DZjtXxTmYPbT9u/oBi2/XP8u2nO/fS/8d0/etAtFp/+EP4wnWUq2ZNX2ga7x9eZt6Tp1I6K0NTqyqUac/uy6gJQtbk5zIxcQnhhsrV4bcJnSd85w6ZTMuuQV9PGItX2ORxXLzI7qkWylLUeWIGDBZxXOExmwsm8LMmzBuCGyOptV5iNMX9yCEh30gAAr5l6DAacC94gDKCybGOT14qaDd80RBB44p2i5dyYSNIH7wl+zXAlrZR+Yz/xMHB5EDCVi4SSAxKSb+lMsJJ03+gJFDdkiS2u7yiZuROD1UUIL2Mu2xX4DV+NK4U2OyU0+znoBf007VWAOk5uM1kcYG2H2h8/orrpM0ybBEkd4C+wNF3/xC3+nOaLrc23dRuSdDjSg1kNQ6oZ39TwZtuBMM2svFrsg5NdQkfm5khpnmMOGPjxG0TV9NaXYcrCwcwqys9VO/GCs4HDeZIyKOYZ/SCnhDW4hXgT3dmLi5bZU6UnB77QQ1SQEuKefUZoRiLZ7TYG0CeMhVqDvHrAHHazOO1dQYFIiU02bytnpbVZvby7PaL7hrvAP9nChOwPtRAga2zarveuYME2y84FHH4EoQ+rxb7ogAaSeKrGIb6v6TlBrV+yXE/usxD8en0z5Km8QaFT1Xy6zHINXhZNXXN+fyOuaE9DBL7R6S+GebzD4f0DrhXXfFZ2rtIeqJ5Mg0hAuxck24vwRkvIc5+ElGpHwLK5VwY2BGn/ee8cqlm2X5ErDYu9YXG3kDnMvFTh4SogCdhBU97dnMhhFCKwRZzQ+tKf37hYCJ1JP4qHkoQa4iId3/6Pa7zgGV2YiOM4WmxnSDotmVbZcnQXUIcgmss2p7DUMVmMGMWaDIrmP28kiz9wy1LTkmiD+x3XUkXCb96sukQLYDgLsj6a+crNQL7Ij/2pPSwRTu+6+Wt/mO3SkNqBHKzXVSd8q5FbPfFkB2sv9ygFCmKXYEBKuQPP6rDO0rqzSaqIM1+ngommeWZatsJFgJ7ZGbXL1K+tc9uiPZH+bhMraoLTSksiyDF85IxhAkMO+IpvF36kZqqLREmstWkwd7kXgW9lItjlhZ2qaCUumhIB9KNgLvjBzoioZTaQ/JUPUI7gebd8m7meDC8JDoLOjYfsxWVTk/WggDCspTIPzCAHGFTxeGONqr8Vpzi+hfgvzvX/h74m2oIEKUaytjOKMHBygKdsKCuoGytI/6/u1P+hCZvXrprAgoXd1T3oS6+qYhV8PTGkHihK42ioR5lq9yFiW84dAN1cmZ5h1Qtc8PHQ9PpmZ8F9GtOU3gFGd/xM8RvMEeQfZyJnHPcwIUgVdiTMf8inBG18+e5V2rc+Z8FoliuAanlzphKBF1KmJ3rFO6w6ADFA2fpEnH/l4u55A59vQHYvo+jlCUjeaBuBWslh9PROHNu/m3ji7jSxIL+WQ3O5nJEZ5R3I+UErUeFliMP4tTsOsOP3vVm5/cauaoV5Pnf2bQ5R65kRhC+jYXLxirbC0unaCl+n+l8Kb9BmtBxlZksnGnIjZiKv8FFQgkI+cBnoN086pnWIBieyBlXs0FJ5wMfZ8LSQljt81e2LdyiGoNHImC+oyQePMOeY548hpFVGHldV5yOkXHx0vjVwXoAce3lRh3xNe8o6D8pYZkmF+pILkJ8/ojG813jyYdMJIs0mOqXjtnwDLE11vMVIVEces7B3mxybX0x65Ugx8ABjXJqgRi/a0JCxxQDSCwWwBfJNTFfxdF8dyzkfHhBBuqg5WMSHjBfpg1blgOJk3leoa3wKXQTps7a6x8T1U3K1/ADlX43oHNMhJM8IdjWWN3adaZ97G04mKJ5euHwhNoJK2fJXgMp2peQ3hRRuucnPxuljofnBtmbQ48U+lFYZZX6thPGz/E9R7w8otRUa1PYxW7tgtYLOV2zwWypCKv4jGJG3ceoyrwJEd+p2TkqLfmZyMAT6LFeRfNc7sE5D3b2RKpNHwD04VFos40vXKTRLiOZSvmbsSASe2uDzB0D57NnDshB2dSe/ieBG8HiGDR9gwacIpWHoOV4WzsJ5H0yPUSDPR3nYm7A8xPxFCvjCAsBUGvM7c2GCgeCuSxLhOzroD7qo7kprK/ig4+sZOFvXx7/Ao73+SmssrPP8AX4cnSYTHx/KvKy8YLbpiwUHVvxhd6cFWzVU299gLxx2eFP0f5YRJS3H8EchbAuR2wV3BTZX3Ja1grtlnJGfYRQD/sgd6/SZmjMFHpwCS+JdWL8RMdumW0cJOGN/zv8O2uIrZDJSvDMn8oOJ3VeUntcIiI0vWr6sbEQZINsCm5V1Vp36CcKaS7sFvGXQxK10Q7VFOStLxnKhcd77Si8QsAU96gnDV4m9VbdOqKUy92l+kvpNynhWR3uUgFpC+xqv+QckwQkPPsrxRaJ4iioPR6GLvmIvBzJHwCnh7CaNvZyBFnHeQHsDQEdYgI60fhWVyW6ffUAR2J2Ikb8uPWQyPu7A2/sjiCTMt6stLiJSIzldzSslruVQIcAANI4dKUb1IKMsQkNO9+PWu1McpWReXwpHQyRYNyazmszQ19A2Nf31MHnsQVAB1rBFa/wmcjyXnK1Z7aY9Uvij0AMAbXYsXC66GxABC3ydjJTVrvUxB6x3UoldGMAh6XIsSg/RsEIwv4/Xuj9LmsGr/Ch0suxQdkGitgZtv+fdP6A3oVt8UyzXwRjiTzWOC7rm41fYvPyelhw4qsv06wqh6Zu3q4U2h/L++ig1aKKS1GPftUp/n6560fePSbRoNiNGxts2g2skCBABx0cuAKQAWdkmu2jOYdceQUNQhHzv18xwwZXl/ziMPUNrtMNo47obazPsVllm4T2Nf8Ull68/FIfTagHtOCcI3INy0kmRJFsGJ5LuUqpZA6dzljQMNhS7HOTb1J2o7vQiZaqhqCDFep8oyqfyW0ZSi4PJ1t21PSJws+b+DM006McHkZ7SR/y4h+uL2RYcouoos3f0Roc4wiqak68Hyn6Y0JiBifxWHWx2GUNbf+3GGYd7uyMB0lVlGALHPPVfo4ebKB/ic8oaBctQ1M4jbcWeF5YPh0dPa3brb+wM7HHKl3rH+OZ4v9bRH26XQaW7eHpRkY3H9fUEbSYFDEEABfXJgX/fRSA3vvhBlb2fahAoUYvcy6hKgQ165hDbmG7Oywsvi6uMOvtxbXTLubqivDmHsxLpkAOum6UXV2ZyPMKYXtS0I+zxxXyc+t4khW7wdByfnNvYEpzCYqmX+7l4qNJs2PDqJwbi7KlkV0YPu+fhqK1UUph2PrWymgPPRJbARO7xzxDnZ3f/+JRu1+2ehtMMxkvG3xWbxrn/Z2KCylue9Wz40mZSdXu5dA6n438NJL6WUodOxhUlWe5bqLqLorocAizZkNevZ8rKveuvtva+51dmk6LKDdfxlAH7drRAMvkpp/vNoh/+rGyQWsCV0l344d4DtPnp8YOKBZvWcsJ2pdaZKKOg8FGW0XaJMmyT8To+NMs0/a09U3zpNfIeBSiyDzhu95IR3pBctOTVW10FRI9Ipl+DSx14RmPQv5CoOSYMmsi/K94NIHyNoXYW8hfZRpHrbggQ8HXxu69m3HKQ52ZMY9JAuIKZYC40h6CXk7qFqjD3LkAO9bf3F0LBm4iv8Vh/JfM2Qd6wphXEHq6SAAQaSEckV57VwPJRdFc87loKlRCRToAjwbm3K3JFHiptqoQV62wzAha1UrjXbXMo1wOFW0vRwnFWV4Tfzj1pzx/RtQBqAGppwVCG8ruI8ZsmbzV1FnN530DKI2kzAeiWG2kKVIEl37HcxgKOyihQQE7JtixRMzpLUkGPpNvhpAUmzO1yUcFHhlc+vYC6LVyqhJQ2oQFIpF8ytn7molqQRkosKfqC0w1qQUAPrs6rv4Y98C6XCKSHXfKxhsBAElGM0pgGRGHLjj9wA4MWeEdbW2TMxiUjVQWFdN8ZAyqorOMNFns7aDo/VyI4rvHMZ3pyebD6UVKv+ECd7Wt5C1GpoDU9ZBSklfxlp+S0d0pKCPqhSYanFASKxJFcZzImdQV7jPSZc24fkqaz52H/ASyCWR+dtnjvB9oaLD9SbZYOVH14znooYv6CzyPcLy8mUrFJVvi4ygwpmhuGVpFDMDgRnBmkQLIrQHzeRxBXn+FCa+S0EfBMyVt7aBnZ4EOE+fitx/QpAAaW4Jeao/Y60oD0ka6gLXNITysBOPVpK0iyUNShhfD9eghRYfWZWvZhbBBiBN/8kl+et5s2Pfx0DWZkCylF8wg3mcyPg0MZI11dLsYvV+lLz1vD5YL0CEF9FWW/qBWl1QC70F3WgcPHrnHrwi/MJXQn+3r+fP4FtAc6Nh3Tk0HVIMnz9OY+XgKA0Gh49RIBeyvV+FWA7OQOfxIpl6mTSs1Rt7y/ObWOwxBFiydK7nnUIMC89jObp7yfH61Htnyio94nTKSoxXl20FB9WqLS7v545UY1+UFbfNiSa3z+v50ztjFRSbnprXlug5NgbnyzIjCeIxjSt1TxNEDJtntn3vpBvLNgSK44Y1+w/cVWNujAGks+ztwiJRQ33hB4+QOdxJ4V+Zgu+IE6SDOAA2/mznebt4wTXecOSX/9XHoHa3mw6gXmcXNL8JxTIj/MLmSwe47HrLCEOswnCY1KCEprKvfz6iy2znwlbVaNomAKC1ML5q0f0UwrKEF+w5+ZvuILMtP+13ym9WNX+DQWnudE6n05OZtfWyx0E1ackhFn8ZbWMOVjlC0Cv4NYX4wRf8NVoQTHSsPFtX9T7DDvcFMrC3TRvtqkjPyO3XhTQ5mjoMyZ88sBBsUqfO6cA+BkQmYgjrcFX8hPKVhbzzvEgCq7EVouO7GXPiBDZlzRSR/CUP2BV/3Gb5An2nQ6XOV2qjNwXFWDFxgbVDnSI/oCjBj4CPAEqHYhAd+47qL6bHlvf84Cod7JVnfNnN1tqskRd8HQVReOx3+X+fqFTfsp9ffvshXfnioog/g7bNDLTbNlg/1iYcf/j8P9cMNs6Xb9gqn/F8QxKN9ADdvmkOLnWwqq7TeoPOZ48G6CQCbfoQ8Abn8CUN+HT9Qa1rLEpTmjPZVJhV3z+/EC1tJEhtldFHUGI20wwxhV3rQOikh9BCtdJ9trNh2+98kqMNPLr0Kz93EIAljZNKwhjgUiou+w/QBdx8iohMSNJ+xScMA6MD+TSHypFasZ3r9gnHzZUAX94U8JVowlyh+dUztyM1v+6E+kcFwef7I7XZ5s+oNSeTxqHcByWg21XGHLiQQaXGaBJ9bLTjonVjekMPbI8rmyHYowGSR7FlpIVcmUF7JJmEcHuabB4SmhsrOH0f4oHR//YqOWabuLOuop3BvzV3cnPgzA2y5D6jIsc+CDYDGDRI3HQ7ciNXssv98tbCbaodVLRPWtQNQPylFnJggxf0vK0k1W1+0fZFEcMzAFSuMlmM7f044PX7gamDy/q5uOwriZ/zxANDzIYfmgtb1mgH6yt4Nl4gB64eo0OGpeOM4wdcb1rRpXFG51/KHR5wOKKaNHbtMKAYLqjmWAtdbHkbnE5Z71J2JujJ3v/4EBVcgofULWa4y6XSEx+qfLT9zmM4n6MQwC81Nc4LZMmyzO7tauT6/m3XiFpb4JY/YZ+qAAweiK3FkacnnLBWkZzpffAFvE7Rctzx+kyoF3p3imG856sZG5VHR7IZSqqHGrIkupcGcq27YM19JjOixZsyhvgQhVXlOKMGOnykx8GvTxrR5m1SN2Q2wRlVSnvLWnvLSVmNnOIRV38RbaU4kJZrdUckQmDI5ctMUqCewwPhfXvXiR78V+UzpUF1aGg4qwrwTXt4XGzy3H92c2SPIU0ngzrPPNhLaR4cTrlPXqJKdECbse+gAs35i1lbX2mNwwWnVPaQPDXZFOATw4AM5XNqLeIFc3rFYPsouy9gvRa78wrRYdE0N5x73Af/X4pt7KMz+oUMDsm3cbP/oYTg4Wx7ePxHeymGuUlJSU0mjG8rsLHZlPuyvixXE1WI+c16Rzor5DNsW912n+bGdX554/gfdmjw5nbSR7ZEkejxP44xP0nQbnDkrM9T8qbjVC401ARAuXWbNh0RYvxnQ9VRTLmIM3AodjS/IooRABlKJBXWR/yTHV5McdbjMhMaMiHfQAuDbTqyeDI4SHbV0ESHGyUZivVREGpOQNZERpUtI18tMTbp4T3fRurva77vCFteZD6ifHx+iCGuoqWcKDpKuDGZvGU+ypbdjVd2mHoHLn3l5L0RUg0EovSNRQ3n0dpZ/vWK6wobmBSshgilkN8M3YgiMHfn71pIStXkjxQmhcsFxcN7Mo1FHRfm6vEEVaBtZmKp9Teycgyf4hs3X8g2tRTXuJtOs9r6ThMGiSIE0aMJ2JL86YkxUvCd86q6q4bVzM/jrcAfHZUSIEWPPdPTR26Wb3rjM3uBAm+9fDr/Ven93aN63Z7vDXsjlsVX0e67SsGMK9XrVPxEDSzK5YNGoN34Yh5D5V6ofTWSpe/+dyqFdIAvHrB4Cc8QDQD9uX4SY1C1ovR/7A2BJK3sCPu05nsVxVpW2cTtyfcB16ckiS4mYMeQpDd9dqByNUyHq3Jblkkiy03Bh4umXCWWTmqKMMENgjU9rii+ukZhVM3GjKHNVV4odrmLFDPHyanmF+8Yn9wuNWHNHPcGoR4fCbFzwtw3vn1gflgndb+VEv21Yid+GaYoCgTkecNw1Q+I4jX0TCLCeqQ4qQfE8muE7vxJJu6iu3ay+uhyO8YQ5MCF5YVwC9tqWvPfyUKn6jXtBDH6PAHrvSeM55pdWBjCocn2cOCBZ1WvmViWNnNqywvuk8A/1N1vIPaojjGipYSg/XlXYFB0pze+iEL8ar7gLtwEUWoCxutHM+TXhk5oY6uG8JGRn4w25S8HO7pTfHAPx+uV7uOVwSV/xHELbiq2yqRH6HiQ7NgmRxdXcvncLNb06hjs0jmbXyLlA4MKEfLntelJejBQRimXraMV9PQBHc0wKw3M+49h5bQIofzqtOTkAHEI2zzF/blXBlKIr5B8cCSTOBw18plPOxQolfOHQe9PkLic4PFvhg2iGuAPZQAY4+cls8dxWQjAU4MwSGO+5UlDboxqaHHqiKUwEE44319eJ4XsUzHg8zJucCXGKunis6SBTjyLXeQMS7JUa2DFwVfobs+hmlPAYADJhzNimO0h6T1sFrdMEE99RUW74rjychpGnrVQGg3vnSVXXYcD1rbSQgvzTm8GX12guydvScCXiAZx6I7GwKdVTsYeIBfzxOB4NYq6Pk6yhZgCYfvsfjBQzqMzuhTmSvYRh8MB6QAKBsmX4QHs4UEDJeAAez+oPgTD2kDujzwBTL+A9DuZmAs5X7zq9j5eAPIet0AFLZMnrsXLrUOoMDvolvMBnjA1//CAKo4bZSyVFTDYCBARMH+wOnR0Ks40cC5HEz9iYf5MMThedyod1MIwAYUsD4obYA4JkD2X64dVTsYe7ZM7w1mRAAtlYIIfYPogMwBlrGnwMDhwx+NAxvZtxjPMQPe/xym21TQrCyAII8zl2sR2BMGc3bXbNQcLLXEPrpnUbkboEEPfsY6gBlfAUS6JXLHYOg+Q8+DfyjdFS552gkAiO9R691xIOyF1wLeJe3jO9f6XsW6mnH8y14wJMu1ywm36Z3WMZcP8r5bIPAdMt4w/j+bZ74qEJWPQ8xO0PdjP5Gs5oCAbMMqMfBACBxYPyRLxy+CASxKkg6YGIQOhaQccexAAEABPzYY1gAAwCOpd5JzOsQeSLn4qNPhsUBx2EKv9CQBnhKzHXU5Sz3mQoTuqDZhm/Vt8FOb1fbXTYnd/WqJLgoz945Z3YYCnQKE6zBVD+Lid+MOtv4XUC7Ky79Cunug6/X8jZM7U857Tz8WWTFyCXvkZ0upyveQtzvvnuwK37t/v+uX694LHQvXbQPDrzL9HmXCA49h/rpjLeh4fViePHpkqT3QGGMP2oItA48fYAwHX4hp8XMo0LIhxPmUbilkTvBb4T96EkSGS7WJw9DOk9Y7f8DU39qjGxBc3OB9tVQ6SWydx0s885wvYg0+mBEjxUf8e/xnmgkgcqqf2yFhyvAOW/1Ff3LLaDfZLXCiF+oicJUgTtE5bZgewvg2iXulCs1hV7vId4Wu+X6ZbB3YoyuTPbyZmMZ5F6sH+jlcBJ+gIVRMCQPMwhPMgoJFWtlgbhnRy7evI5ihSeo6MaxDYNvAReXJetJ8GXQEFd06A3rh2Iuwp67i7qLPg3iuItp6lzGCuOz3dxe3IT5/fqLWW8uBQx3wDpDHi1w1daFzgj4wSVOXFHnk/LZF+6r3RB6tOq51QeUKWzS2iOkF9nJjNwpvUaf7an8idYxKvhrATo7MPiEXbtX2nAgCWnV1lchiFizVAXByBoJobTjC6TEPpPpwU8uQ3Oq+f2q6sOe+CAn/r54ZhTP1nwDrA5SOREs0I6WUR3WGRmJCiekpIcqAZ0pCSPe5M/nX+2t/zQhFrEdcpA6VPGOr64UyVa5vQV7Wy9mA3xEnHxTFXsCFBd8vO/l70OUox/aDcRDgj/K4x0LImg33URIIS8JR5+oLBqKq7qcyd+MF8CmUnvqMtZF0dj+EndTGP/sZAi8N3wbemvHn9iJt513V1+ZFV1y//PiahxPL+zv3cugygYD3x7H2iyhCwGxidKIgrsrMRYTr0eAMmpllSJdCEg3h4EyC7gvV8iUnIOCUAQXP2zxEDK2lgfOzCEbJcEQMy/Hy7mTykLim3UGnjQ/BSZv0Cmi5kj6Vtx1R4YHtx1322DvrfGFiR9CmdOzXcJt14fidffT73ZL0FdHnKgP6XNXznL17OIiGXmpXNOIvtzz/zUoO2JjvIY6KKCj/bl/UGuo/tXtF5L9H9fF3j9AyNkup2VfxmLXy+4nd9qOdoQgt+9ciFt3hyONEf7SWtEb3uyP+JPm8onb03bZ1oE4pxg6gp1K75VoqIW49UlvN4p492XvjslB4MC7R0zQJ73dOFYENoKiPRvY/dQ+T3Jd/UwiS4QiybSJmqVLD3mvyQRkzgo3W9heTePrhbCI3UGabyA3CNPkTOXcI5O84HQtFM5NUbTlO13Nbo4VwoxTwYLE7/J5lZ6ZRLj1YfOBMPCgbKQVtv2M0M6ENE2E5Jwcbslkv2HRG8jnIHeuNb9bUl6i6qs4UhY6tqTYbkxyq42r0Q6k7cl+Rh+g56JRlHvoKsodkvfwltBmkWx647x7uPAnl+uR+I30lWoTWQYsqwEK898qX9DULo9ScUWCVjLaU4/fkPrHaRd7AcGYZ84TPjEmxYlJAmhGEKv4vf7hzYFGCU58fHRA2BM8sHp0AGk/fsFp1txis5wUL+hifrRgRAZGCXAASCdZ5n+MhNU5ZC0tzbzotRDNH6/EP5RHOnlw+0ZFWtuOE/i4FbsD4zHTB1ARTBuHNT888qO0rn8oKH2DQ0IjAP6D5ZpMOuNhMe4uMuZ/mOb1h6hHxeig5aORPMhS5zpPvBAMNfIRcGMRz//w7/Dlctr4QK8frL/+0RDUHmhUFNiOL3UJrMOcP9lyav4ewT/wF4nbdLrDhF3fTsrZvf9g5JNMJxXKgHr43AP2g+GgE1wnodWJtXyTvdnHHOjL9mJ6nRAvqdumerTyzcvt0bdO0kARjLQCvwIoOM4PHxjEB9NSOvUVMN4rqgWQjkSeG2GAfIBGj25SfYpD59WqUufrgVadCW72ylyPotqhJcjPRW6vAuggaNlND5vtiZ4gzO2Nnngs+plTsxXofgXfRMykHQ8cp/EogrUNXeWn78aLg/kMqbF4MyeTU0mAA9HmRpWXXABv0FgCCUFgiIXPWnDoCiZCPbURDsi9IX/TKPPjgEMxuCDwIIkvinMqHKi4OP6p4QYsHggIZVVUzA9Dgme0GD1c7q/ibBUHKkqGfq7JESiUVsTtYTVdA800C8ByS3mujkm1LQ7/wOtdjY4pLZs+AOo5pYJkNseA+FpZpjX/AiSM9APi65MZfOFYe0ms/eL12o+etZfGWq5a5kDFSLHPBYDGAR5Xcrqb+Q8Ey3AAUFMBqqvWOVBRpiiYLU6gAkDwFab8mS9Wjmm8aAE5Hbi6iqG6eIGgauWIBDMF8hKS8wPqghQHCqvktSifPL6BvZP5xwg8ffzBvuTXfo4fQwpXpfwggPHY0/+MJGLpua12Hbyq/97f/ufe4fuV+fFOm6/cWElCg6T1H1VQKmvXghX+FotnENu2MW0R29nWv3SjY3NukgbihkeadXCzYAiA4uxcPO5c4yRTjS91zlzDxPECkSp8gkCeMtoyXqUbPra040V0FzkY3lH2/XSH8EHSEJ/teD9zsyn1SKEa3YeKf3gamjd2Wvnchx408FlF86N0yEQN5CBx053R8f3ajCt/LHtn9yedZnF3oTXoMGp7tM1/lttYmK4eULxjb2Umf4aSOg90ZYnXOLkU7LEWhW0A4o2/Tm46NZ3Cjm9F5sJeGQNvxltBzVhjPbmnShJ3WtxaJGmBknAj2h4hGYYFiEulsztlt7KDDEqwY+SAho0gYBPkvGBEvvNjspcXAyklAh5avNqeyQwIgQDkpprP+Ck1x+YuL7qFSngmkhs7PyEvErB4M1p84QE/lS1eebmgqL9uAGvdWEymLo0bWxjMzgnyBcBs/sxR9oeX1h+Wgle4XuzTMvb6fMYH3aqfZmR6Iz/HaZAyqIfMtjy/ATJNcHx3ACFQZ98P07+GMQO0HKABGAcbgORADR7nfANw1A352a8PVAYi1HPDHMjzw2XU3aEAQDEFAMg8UJrz+0DFlqdjcjjyi3jmBKvqyOeHcRbfeHEDftm/+VnQDzTUKfaJogaaehd783hYALBMAwC1DvBZ4A8UoNgnAkBTF2PzeFgAsMwD/kqwVJ8Ff9q7wnyYZpmYGLJJQgZvxq0s1buDeCSniA0Pb4Cv+rQ+5/+BptgCE3NDneMjQHUu/PkpOUtssNg5cgqMvzemYZGNNaFD2DDr/OqrWZeOIvVDLfp1nudj+Mi85DGChsRhHnlrTXnJmx0e+eno86EwFfQl+6jv0ZSC20v/tSmCgPgBmT2e+TOMKIYDA0BkHgCkBwZch/q8wz3bDAxFs3o7HDNwzDEDJjXXV0840IhLEJ8qPuCJAI8uvaLD/LBkRMZHpkfKWeMrSMwPkMcwmB1ANG8B0vxiFfMDApNngiDiWyOIoOSBydQTLFmAb6IAUFgqZ6nW5sc7xV/Y9pacvvtrWOfLcmzgGS9o/DScqnqP3JD1Xund72O+S/luXrED/m8ovp0jyPws1ua+whTMLuC6XoCBXLGYi8PslFBwZXeXOBfrwv0Fl5uk6hVdC3F91PBZ4tzCJiLXydlBh+s1F+fBhCUXoQu/xmNqcaFWiazF135ecTc3LfYco8/bYgZHca66GZf1cepjjXGF3ZXT+nzLHVLYVWacRmw1NfDdd9q6IHffOd1C2IQgIP3JdNi8tv5Stp1a7mie7l2EwjdRpuDcr5pFyF+K/N3F6fI0jdcLpsUCv/dQPT6o4i7c6ogLRgcbujjCz4OOf5S+OH4CUbjI6uaGlo3nAIROAtTN90RDBzBjdAgDpNNLr/aDa+DvxIdjwMAuiF4tjqG/Whc5iHtz4SDCwMV57mGAe6AjPVMAi/Za91nCCY7Ra4OFKlMZkE+hazIWzN4UqOn+NWSDm4XNnM3nbgmU+47rPXpj5nv77YkcR3FM3G5Q1dcw6rRIy/V2HNHu4Oy4iwJHqu4Y8DjwqlBTU/9KijWa5APRkxbtkNmAkYNstOe9emBtWZWe8t+0XhL5D2F5fJ35H4we1DzLG8DjVbBGUY1KFxbi7w6hrnHQkR2HNljbNzwZU963dR3X+T3epGej775OKwQcNjIx7jSRAHH/JdyLnKEvXp9XXWV+2mN3rmot8kehcxwkvBW9SCg+e1tQ2lxrcdAPdTVXuGsb/5q+NlrNxDeDYWnmUKdYN0CfGoBp8XcHavMBsf0pWUtzJpzOPh4wP/jDnmdkkQt/EH+GrnHIOfdi7mAAsa+0gmtGvhekS8yDGEnrOXRv1+LeOpjq/26yj/SO8iepukZi/sEA7S4KVAwuP2WD8mrxGp9oDI+XWZD1SXIZBQ7u53hlrp2TM+ZTX2CRfClEsvAB67u54ozP7X0tzMFsIZK/5wtOwktB+msTgBSqFHhxc2mEtuifoh+4Mufz3XevSCPSnOumaPisJdcyg+EJPK+Lw/6WJq1cB93NH68Ry7Opc2ASGxe/DixZYYHZ/1t/7opONPlVZLHo0/IN+9/V7dex/DnsrZXXCbHXauZHVtvdIxhmxLGy19/Dx6Ok+PP5nPP1itrFMJ2r5no6roDz1SBiOAnn1ITZz+xoRqizWL78q+urktDsAPRrh1U7UvC5ZTpmfIVm2WMzzDpcC4+6UIMzeMd4b+PtgD4AJTCYgw4AJvCaYTPBDHy19HMAgPcRsGKYKJ6jbVqNoVCaY3zh3PrBwXBg6B+t4rtAvKT7smQlzdAKR5AS2IEVAKwAnNVb+/eEvZQEHogd39u0l57lAdbYELoK3lcHMSJ2CXYK0PzbTydTcACrN7hAO+kC4KzeqlZvZjwLB85nJUjGsul/HUyVaDQ/xnhB8nDICw0CqgdJ3qBW3MqVEYMh+fVbdi15qzSwuBsMwzNwCC8td09h4Kx+hdwXkxxivyeJGV38UxHEX3+QXNY6F3Tu596xnUxv5xiuEMaqxYbEmOw5U5x18uvNGign3ijb8RcwuxzPE+6IN0N8crhC638AtRFQmuAc/4cy2VFuEGEwLPn88Vrr7LzRH3EOG3+cajwRr/3878kGJrnOXnQCZCL0GLYEMg/0Tv3iHfEVmOXW/KWBQv6jPQm8QX6yKUXtUiDhKIIxARqLc2BvHb6zk3t228ZECZ9MIOORoHFRTNEnVAMunt64N6iTGCJIXa2gDylpcPuSBc6zw6vSKvOcQ4ImChKlQH4UlbwGWOE+i/grmN+bnAko3Ppi+3qsXY0F9z3iTdmrH+ndQdh9INdXPuvCvee9oUfrNiFLoYCkU+sAINWRWx2jngS8hOqqoWuMRTSmojJMfbVwgZj9nZgMS5zjF1Oy5IsryShxAC4pJ5ZzleXAqmMnxGoS91eZtM46Xiq0B58XNRArgNTd+KhxTW99lE5ljTRlnUPdWsxtWbmOqBZKskNZ3HbjaU7VjgI/vA4laN8m/h6g7ObnMfP0Xl9PdBBM+7nU4cbiBOMiweTgbtTATtXGY239jjUSsqsaWgghOq4x1PyZAOASM4Xr/4tftoFT1KeLFQ2tekllw/uWt3j4F7q1ns34qijqObLSOjNXHlYQQ4phRk6ofzaUPhccemMx1yjXiiIDbocXLD42jF8FLp9Wj9fctNlxPgMAfP02Ra1qVckOJFJclkCdu2zBcVcuhNP+gBpnkOWeG+wpLbB6z363qujaB0VLaX+7XVeaePA+OadIeHMqpy70HwToxcco4VW/4ZWJefo82UXZ5vq6fpye6wFSsSgSaXdKwuBxA+zX44jPSLXFvBuUAfFEtxy6004meWzm/ez9JpcL1lg0XzquV0giXLrg7C5I0IeMlfRQKNPhUwg/KSKhSGZ8R0tGooTkNTyaqJRhGGy2Euu+ThKlyptDsz82Zhc0Ij9xdQNpa5w+X4S3Dpk/cNTkdMvOyaggSW2NQote2DXn5tWLelJo8AYKK+3w+le5c5UvTtnl/9Ayz+fj98WfE60lfwI4diOohrfRVP22qYCi6a1ePufXM8j3v0qdL+maQPhTEsB3eUjfcB6d8xy72g+SwCDPf/Q0iRsQTyZbPcT6qZTrKlj1xiJIliN3PIv53tZJC56zrAjf0KdGu3cdWk/CjfN5jlgkYjRqJ7uzjHpbIgaAAMyrOZh7c/VnISXLLXcRexigF7bMM2UvJke9gCpLPfQP5EiPrroluaQ7356+4FhOtPtUhK+CXnlwEt30ooBwA/BC9HZxioZCP4/qIySNsQU7mNefaYd+YpLAYaUlw/DdoNCXkKhc3WoSdaLUjG2IhBS1DC4NH+37gzYxeCkalrHmK8wdtRkjYNDvZyN4Pk5hkgWyknFq5zHfq8/XBBQBc0gHI3U8fzRMUfR9eXZbvA0YdHSSZwwE9nY9GH9fKtPIz3R67eUVI4nKUL10njcB3RYI5H3+z/mR4Bf4nlfglMtBYefyBZReNFCrCDJEYP2z82+7mjDr722OYC7lMNlBJMVzwfsYpCLVl+7QGsQEOYLOkF0O44oX37VI1FHGhWFlbzIz5hGH0KcXf2Byck0iAzlSHHrJueB7YxJmvMHXDkEcB5P8axRY/308f4ccausuk9iUij+fduTuTrNdP8izyL5LpCkU25gKZ4RkGnBU4neGvmGS3M8H6LqT3v+XAMci87A/H3MV98LEj56kBbf+zqe09cJ+Nan7rK5CbTbq7PqOtRlEd3gy85f9VIETrU/HQ0UcZJaR7ZmU+tm1ZeYhpqaH0BsH1oYKm79tKm44KHEl5Vj7DS80m/WPn+OdG5f/so8HSFtA4VV+GQRB3qK/hRH8P6AHB1jGL0chOlOU2aKQ8PwYAL6gSBhfvQlRjS4+C2Xp+gp7SvoJu5VTBti8WxqSugyKJr9b6WmRbjfQOY1oJgoj7ry9+vV3/4d2QWUJ8aAz4dBfU/faOAJK0PohZNhPUeNKRZeWiiB97EKAZ/e+qt1cLvVnbJUhKTm1Z2U4ofsq6IcJkGa5x9V9E5VnVXv42b0NflHUgX8z6IZQ/ebrY7UPH8/RpzvdX/CMoX25aTdw1HGmAoL5KX/4hE6pvfQ2tevQL6jhC95zHf4lvy/85tua9EtnLX7NDG3aF3zSGds4WwYlsKjkld94XJGtnwwAlS0VN2ZHYepDRlBlHu++T7W+TaDQgkKQP8si+R02RCbahzMfmHDHbvCHgnVAmoQuxp/u10iKFwpgPlBXtTLolxjUEzls/hCX1xEeXqybNBTYz8mDqAlOype4+zX5eV3o9zKEKmid0P3rWx7/+yVR+GOEruVoIdo5JE+sm8imqfA+vX1W3BMX/92fVnl4j6DVlY06s+Z6iklc++9w155b/daZ5xEN9Dg/g0Llm4sXxeBOOWCcQTSe/bCE4+igCTQ71h6w19s3+QuOAUJxUwM42QgrWXOzN7j8Qk8VXgfXL6Q1T654Mh86SjBFaT/Csb1XghETmKbLRjrDkmD4jYAPdlCQKwis8APHSnKQuSMITcF7UoSn42/1C1LbCPnrLaR7USvt8IFDQk1Zpi5KjmGC7brmzeVb6y67ivYgkvDWLl37nrun0+oPr1G/PesRJ2824V6CHaeVv6r4e9K7mqevNJECevVwzTYY5hst9zdziH+X/0KWAplXLggFvY0Et0nrZKa7r/LQDMHME3cp9VbUx/tovFkGDhrgqiF2AvACWQOrLft9c8ENEPjY05A9StfyDIDPGOnsMsAXUkIKpEvvtXUkJY/e9D9YkMKIr/i4SfSML/hDsbqW15jgYBeUdTLr4jjrHirvkt46imUkPX3tGUuSVVqwt1bjZn8BsRqyFK3VqZIpf3IKNiXK19hltpJ2RHmVdm5GpNYfHASKflGMluZaZiZXafEOTPG2E7H+vDF5upfe55OylUe3KbXA44JQ97K0m+AH2lTcw9z5oIicEXG9k5wTq0eTbz9Q2y60zfMgIEuu2UCgChYecV+MNVeTsHP2t1j893kCcUB4A8z5hZe79LVrmg3pc9sTnCFM0s6Ly7FGBdPLnfywSS8vYhL5fBtE7O9TUq/JDRGpu3vYi2N97ZS9joMaB0/a1jh9TrxFwZmDZo+p0+x+qGhIJRAN7XnEvsXDsuh8ovun6PwcAfR7AGIwQQ+CxC2ilozezDnH8AiHleZcE/ozz83ML/ABEU2v5FMf4RkFcubJzWeVUXwyDDA+LqfuPh8cn23FWDu8Bwe/bhF2RKCfLOi7sPSbyEhzK1+6Qn09yZg86DPDrUBkbbjtglsfEI6ahBvSw0oykaO4rGmlNLQtWFp1shCNloADxzjLYDoYx5fMto1TsCAYpavOIdJgj3Ic1Ajy8psEsP7xGZ+pGce2KvAwCqL/mFEh6TsZlExaYoSku/heG9/jy+WwfboSWoWBWlbEgug4XTThWmxCgtrG1sJJU1u9UnSNWDrahcDql0q7ChakUkUgVkkqcW9pEv/AGNJKteyGkKFKVbggCS3xCwSXfwQbCoWRroLeN4xfE24Z6wJ8LKIWAQTz432a9NnXyv4xwlejjySMBywEr0A9HNFtw6K/CecsUCq1jE7TuSN5nP47V24glssiNc1NwuUGzZaat4U0I+6bPf7riNnbqEyLZnpQYjz9Mp0Wcd9rqTzRtCcYFSH5dJf6aOpnIF7BEhd7uOqOmHho4mZcuEVvR4OHOdqs/OvyrseL8MR7Xeiy0fHkS30L19RZrg3zVib5fOkn0xVxFaftCMW7iVGhU+e7L0r+ht/wTlBUePdy+seuVhB6OT6+RQ5sPxEgr5lQ23ZZhVRqwSkkV7HQro9/f957TrL2+CdVE8t3nY1fKAlvF1V/QkhyuxGw+kk+Z7rwbadSpCb9zN7yjp9rFHSIAHSrEV9Uxo1pBuNz3xOPFfCL+J7PHV8AuDfRszyAanqeidrh0NL7THR2s/8mWftJPpl0Deu5n2TPHPDCn8TY7ODZfy+Gw/hP0RBv6h5GWVV8e8m3F/rTT6o0f2We3+/NyIZENDl6h0IkN8lPCCUEU006/QZUsRsgQ2xg2w2v8V2Joac+Wk0Mk2+Ejd0KZaGTxfF6H7qxvCmaTvAdkW+RPiQg8H3PttxiCyeUWKdp+VjmmCWcb1BIlzXetUaC7OAO2SvJnlPDSSPM7hN/Bab6laR2uNFss5W1hU6Ge27S7w66NXRhv9lIb7MbaaWRttqN9NBuBOBh3FgRM2HkRh+zLsfF7OPiXngGSeKiEazIgTcxImc3kR+xlSylVlYA7htqwYny+0I1x4TsYAbRrezuJ4N/PiwcwBFS1t3BljY7UheixdV9hjJovxWWBTuvQYycCVhUMM/QmYqDHB+MP3A/Ud9dAE1h2PUwTK55CMatA+7DvxyCqy61y0Q6NrT9mxP1yh63s4qfufEoeqHmXl47mqLUDFd7o/4J/Im1y9p2NSI3ARDbBshQDGLrKNAGZemb5T2WQQLsQUM4PbDO/m0o7ZyhW179TahNV++n8uBpur93snEK7/cPWn8Ko+PsqvUJ5Qg3IGfEsjEsT+zIzHoDLyZXoRNmDMOqXyd+LXfCjtiYC35RLg8bgxPucZ9pMk/RHX/SMRx3oe4fmlRli03325nw5CxJB0NtZYJBlc+fg6mo7zTUc/H7XgVADvg/PeIpq4vCw8Xp024n95UO93ROydPZ+4Pp2Z3adbm9BQBNFCW5OGk71AZldZDgxfipT3rwTj7ZRZPJcci/wjQR7radJm7JZ3sUxwEACnD3JxqtsCdKDBqAcZTcdYlb51jDcYhsEzFe6UP21vjIL942TyZha/WTj0IaEfrg56f0J4U7nrvHrUqvcQ/SO3k2IC9GJqPRk4MRyKuB/lMjRwP91tZ/r4F/eoIMvJP/Rqc/XtqK/ryTLwP9fZHZvxDGl5KBMjCKAg2OQinMg1hi9paLzZjI7AGQPalpiilKMlhilARDstIFKcBDcBQP7aEkksI+DoGw1PDMQkqyQXPksxCTBGhEQ48cL6EJMTVpGxJfyUEQLCNZtyVog047aEY0B5jdx2HGCiydZeMxYCStZoFyBw1ONAslGxgassSUdAo6JWYKrMcWBQDAGEEXRUON9U5KF6T0xDYJpFYK5AuEVjgNodaxZQXIwZUjQZMCUILBEPQYwRynBNFo1NWT6CDocSXBSRlpb5ZmCy+UKflX/JybzkE8IyHcDBzN0yPmBY0/2QTh5shUg6h6RaMjV0KX+S5CW3xGYyRPgq7nS0W25gIletrg6EbezdNlcRkl5qxHQWfMSqAr9Xs0rvEmjfk5GpXNKNgFThDpih/QeMXmKNgpc4LslB27npp4Qx4Eu8Kvilyrf0LjHeVc2FWca5AWP+JxYQAk6w8adZDs8Eujd0j2edCoRbLhoNEbLPs6aNTAsu+DRq+w7Oeg0QmSjb/UdpDseNDoBZadDho9w7LzQaMalk0HxQfOnwfgJNtXhb+h9H6j2CH5gzJwLg9oO9m9KmRZqbtRfIPwrhJkobR7EvzISrtXhR+htLtR/HDmjyvB16z08UPwNZQ+nhW+OidVhimX6RQ8YltgPpZPbNf4fWJvmWBeyyE2FrOxyxvGXMihX+D1jgtN/+HwW+B5S2Gs/+H3Mf95OuH3lvz/L7UNWCG0/9jCSjqbbiN8YyOLIJAreAQqqHDGimpEs4pFWEGr0i9u4FDUEFMcaf6Twv7v1kCKfa6hoDhSOFj0NMwLiCiZJhRGisFp73GK0mCCIgl1PoUiEq7LOFJ2VPgVFOrl1bSlZT0xg2KnhgI4HscKQtD9+IDoBNKrWJDuV9xqCex7NWKganFn3hGuO/qsB27aYaOgY344hUrXwT5WW6RieM06o6T9CD/d6hUcA9Y5YGDhbBCIYzywxSqM3NqJNVYtAgLJ6dTTq1d1UxxIEzK0tIVN7Xoty5OpEfp3ryCDoibOx8BcwCJBicBMEaREBoeVTmAHkrSnUFpBoe4lGKQlnUzEEZ0IiwRFKXgiQmOBJXjsd7AMisqhhiRWWDUz/lZ98DiGsBNIx2YhCrmCQyGG+hEPSMVIaRxdEY8qWqpj7yKMUPbXd9huqa3hcyAiL5wdaVkZBe1AtcQMbueGaOrEAV3v6KUHZ2Edo4lCWqo/fElC13MofQH7GM+IQO0MFK0jxZtuu7ZkEQREOMoV3CMIoY3WNQc/ByqarS/QQaiWaiCJSc6riOFsi5DGRTtiokU20aO6g5PCeqLuRcgmBk6hWKR6GAt1v0LStrTI1tNrLllNSxHcVuGmERzoRnyQFCFVhMLZaGATmTJQiAOvW/PC+RmIQLVYIGXsE1HYi00SdqOyaWG8mU94PFbuAJGiudix8VzyJT3grrDXIJjslt4vG1oKxN8tGJw4yzkmKAMCqWbLhBjVZrbCut/Tet2mo8KEDhD/AR9+MCBb7C/6sn/ffxxn7502rj0Av5B2FuUlFLV4eV9wVkKixQwgfX3gvR7LDaDiyQADdusFRAsfl8RwsFRRZNp+PEgL/Vgxokew40kPPM1NklxsqH+x19XASp4BW6jyrCSA43Zga8EzbMiBCtDzQHUVN42W4QDDq8MaAxRcLBanVWKHBxPZtKFxtya6ebp7oxRvZFMe0Q4ZRDe4tUHkMiYM2WsRLJx/WCQpKbo/OERwN8L96zyShHYCKCLFL8cb1/h6OuvpGIXsDM+J5EsuSjtt63OUFxNmRQIm0iMXZq2td+Cd+KLrkxhLVVUyChbIXFc2Dmnyu50IIg+CRDi+nQ0Spebw/lOEBTKjyJACChmFnRArotS4QppX+9r0iGDV2JiJk0Gq5fUY2mlJJ9Sk7mYiRz05wsKimUwgAjogO2RqcFYdVzmykCEkRp2CA7mLGOjXaFN1SDmfMafvLdxPIxwXAnhsowH7q8DihSwkzqeRqFCI0wDVUkWMmZeNNfANUjN4a55Q11ineo1wsLMazlADpXpPylqBgCb4xIpBodAOaTVCYS6MY2Cx6vVdfvgPyqCrN9UAf2IRdQBk8xkivEIgfa+zgO3db4JzGotaosZSto1pi+mldrMF7ynW9+ZepdSPMq/Lf2IWDdLp2hHXfiDl9RnNSb6kTSZ7GApzKD7loAXuQJVtvBGo702LkwVSERmNYBAgrehNSdAHIdIkVRm4pJV6rC3PCBrCoT+9t1s1FIgYEkzFcfEeFijofBpBQTKnvYcpHO1lcY3PplCwVLuzSrf3vBsVVZ/vXPvFFIqLKSJQoxktkxwFSnbNlCcmSgqdioiuPr4RjjzSwe7hdQqchp142FtFC60k2WgAQd+cDgXHzEREa1pY50x7I6Duuc4IjMPEuXMRPUUBhTkRkkToQAxRSiQYHFkofiRCIr5hLmKxFaOjdODpKJ2TXBmz4LwKtkKCaMzLn07xJ0lqBqve5mTcirEMiCgENmdAwDHT5jh1JS90mAEWPrM0IGv0I9hfl5etDwRG3h6Vs9+JANPrjZd0FTtFr9p+aQLz0ufS1fGm8QGzcdh51DJX3V+bGe3oYyYubhpQgWISFAkKnckwuBWz4BW0VaeNlEsBINkRU3+9w51HMhRVBTcaPRPCTlN5sLgx8ixt1cirOccgI1BbqVEWDiq1g4toUzk6iEcSXr9024G+JPApReyQOnxghhrxLUoDRyihI3YqgjKc0soJjMQWkIFndKoQ+hKsv3L/oUFB9Hv7rYlUUU544uZ+MekN5///0tOWQU9DvfKS52+Vu7cGXKlBPuirwcDJyzstZm5Bo4NFn0YFBZG7n31qB5XdWcPstdT27uCZcoz2GtxRasCT8t1OKStnA5qPw8B+R8IDLIf2Zf7XGM9eX99ltMb9kC4tl8ACNVSwC8EMxWDhgUP04NBfQy6PI4RwxnWngFu1lmk2dRzNOyxItGoArIlnCFj0XsYJMl+XT2MBzci7HLYGcP7MAwg0yjLuqwXYzwGC9PhPwYKDMee/HYHTkbmseITbGn+h1at4I84dfdUztuECZvAMaNRQ0nYTI7FUVcy9W1dQMqsXwMk3ykhoWMqFL/F+hOfi2AHTU02Y6HBLqiTuci/9MEMtbXdvikjZyU6c5SiwG4sFxAtT5rkwnm+lSSfY0MBYwLxoM69UozBUilWTliUSXCz7wwGxH5xorW1KuLeE9i8yYILCCKdhqCU9ERP982+2ZunNJo/KybIkiIumGQgtYanNWMlmyWpZuGKdktR1DUwoZ95VyQW1iyTXZtSNe7kur6j4yGeE+HTRo/vxaEoFP8ZZqHm9dKlRh1kVZvx+rN8yv5sia7bQ948XeAMaWLe/4VhLzvfl/b+Tg3IHbsNwI1BUsfLwolSTSNW2cjj1XXO6Pl24CgXDpg6Tiit+rDpBKWtga30kMKgd1nX7Yatao0d+1nONZlzMWCjwWYwKakNRUBeRU1ys7HzRWFO531k/AESUck26MGILZwulQzkD6lAhiWaStqJLuVcpGR0aTq5z0SzfrtYoJAQjKS5KV1QWmW4LYHpOB9WDIl4pL4JMKnnSDVlB+CvbGc1gpwks7o32Z2iodpcjTMgb502CcahTuKKlji2bdaRd49Ha0wHhgtNIubJYggF5O9B1ex91/3UJVipflE2FMmoMTx4rO5Yjg+PlFdK+OBRgqG3pZHAoIiW6smlVlYDYHr4x3HCkI4OaVCygWC3Y43/0dbleJB4iiasRYIB4I6NQHM7qg+OEthSAMvF1EWwNyFovAX2jCgCKDsK3iFRKbfMeif/ojGpZ5CTdm9/F4tR2jVF2ZPxLJCYQ/kcEzjLB0XLHiymQCwmsisySY0WWTBJJxLyy00VB8uZVA08qd60B5WWKQsT9naKiAp+i95zZdIuAEJdEINLs/3UE2paXgBBZAgmkluF8xBpkg/2+4va4fEZD+3+G6F/GzFedd7pSXilkMSMvPvj+Yoer8h9Ml1GHu9kMQJaFHChTUTqCK2w0anWVLX1kUJu06HqGIpJNjqR7fTTltQPJYeAOKTku0TjSdF7N1tgOqciZiMDUAjIILBrV5yrH6RyHck+tIenwtACIOr16FPrqgvdX0n1JYBSq/bvrbqlGg1Ooz8O4U1LhX6WYj8BpRM84xf5zvXtwN/COXD1QO9WmQ6NY14BrSIuKXZFdGst8MqStEtbnlYz2ZItbwdIR/O5Bxgha2hlQKirVfunU0BoRo71iDUa1GMJQh5siFHO+MhaQz3PbsKorVF8xReR9umPoRjAYcXEpZ4Z0ps3E/lErGk5Nu677fjftVRipDW3boSRondMDGxfha1bl+4EhhX/NJYUzm0Ky4APZA69QQBWUD5zHzvvLY3kPqghvskM90K59zGSgTkUQ5IEXI2Ty+gDwdzqrZd1hctQg7LoYKGKHI8EhSlTivRH4LoDrScTakLtF2k7acEyvqH518DWTZPDCkVZMwZXdlvB+yv6UVD+nDtXWVgRq2bAcdhqxutQ2mI2I4E0DzwiNGoRw0ne2PpxW4GaHAQDPRhrTI6X4GFevDFYSxRaKsQyHJLmbYDAFXJbNfoMhgj5ZIIa7utKfTbDUqQV0m97G2ALReUvn/VQZwIInl7U5TBHvLySPSyNGaxDURDA2y3gYrybRblxHMPBk6gARuJ863wnM7v7sIbeToS6x0WebYygUR9JHTABdf8QEkCfCfhEHDSaYTHi/RIG0Jq6VR4PamwhIlhMUjsVGSxFP2I+FiNl9HPbwQBqgKBGMtyEqZZKiFMGD4KiKZxDRHCMX7QGdw032FWNwuaVKYxiYYXW+H+bNNITH8RQd5AM9JrQA4vgpxLF6YxZlOIf54/6YDZZT5GzQaBnu/jGVmoflxTpYAG/Z19NI0V9dmURnsFJmcI87ZoK11sPVwdC9Nl4q3ozVuuQfAMwnyWiqMhH9bS24XBrYwSJxcFacMtEuw+gYnaIOmyJGg028n0exM2FlYiRkA1QSCsfRoTCTeBAuWg7AHPYSSLFgK3VGo/EGTAzlCnEQ8XCW7riUlW029yEU43mMzfW+c9kfv1ck2h0HLiTHyfEgtFlRQAF1IAaSrGiYIJUeNtF6EVg5BNI2r3OA5bk7XKeVC0E8+riOKKpZLI/N8vRzLLgPR+23Y0EkSuOT6caeEwNzkt4bwICSOo50ey5Gh3Z3K2NGcrfS6Viuij1DxHkM1tdLi0vGVWCkIGr+xoqbR9BA33FxmlaHimQtRA37QNAETl/s8KeBYYStnSCW1mHvsnGL1VrfE4+krPBmxQBYxQda40jBaiF2vsba14rFJjHcKSS4euNdVdlFRFrY+7SF33AgFmn32/uU0r1HFBfDMa0Vm2AISxFrSULuJoYlDs6wd3OinKSrS/8aMkIUUQU+dycJMCTmDQw1LRQqutyRFWHwcNobc6eQD2L4odHG74283tdlLeh+7ZJTfOBdJG5fbZmgnMdmapFrrUghQrSn7urV3LXUzE10tnWsxwXCG1GGhjTboxGcUBxPh53xgF03QXATlHblBI8EVKWe9zy2Rnh4dpnhWVI9JtKwZR4DJ671SRnHZZqaCI6XZpwie5t2WJottlVMEto7c22IoyrKTAuLnlejt9CkVCROLibBnvfrUbFpYRPTFFk0pbfg1YTEEiZXtFpOE1hobAS3oG0Sz+fZ03AS6nPnvhwCONgyrFgIJHLYP2m0RaBmr5NnTmx51iqWGl4vi/QvRbx7J7UYNKPA4Ppl1VjAklWCVdX+JGmOBggjARj9abgo+krglOEuB+JzlluX4WnvxTrATg10J0rEKbSpfBEagv6IER5omaQlhXSjRavoQCXKVvYbB6dd9EiTaGoZN47QKVpoFlvRHu10jS60jBbWGDe+BL0hP2qRNJlyj4fUdDZNthklJLJw08KK8RRIOVoaXBY0WahDZS0jnlTsxpkMWMMqzyfX5RwR/2kuAPkGyPOsFbxxUIv+u+2/WEaxz84gkk0jKJyA+4JsHYokmOic4Uu/lZ0WPuggOccSP4Ig2ceN5Lw5bPIsXviGaeUs84IiDkCjMpQSV48PNjgRciszEiTxOlv4IlAnWn4klTIiImXJW8tidaIXq8oJRaC0/2ZCIFAkHGECgmom7W/dNAevqiq1lIYt5eLOLBr/SCGaHHlD2u9Y8FbsxFuqonuXtQKK9Of7FbvehIdd1Y2xvU5E9Ok3wg0K2BwBGQik7YzqBAOBWbCKdCJxNXhNlIwxCLdVe6D61bnof/nd7oy1wnbn9QRPsXk2jNmegCxgyZ/dN9fyuU0iiVNU+5uh5ibL3xPVvx8BF/QoFCpSCZLniW/UdJ/VPJoSXZ2gQ3FVARcYpOR445/ff0ndptSgTvczvZRN34r59iWK4u/0z0YP6YiSSXsKhT2O8XA+Go3+bmohYoAjchlBIfW6tEpEwLKBBdTRphgeh/zUkAQ5HjkRqHEUxh/Xu2TPvNZEN1gXJzFW6nW69c3xnyt+XPviGG0+cKbbVecxYeSbkKFs0SwoQwTpWxKkL1VRqu7xwo5ktMkXnZXgzeilBK21ABjcFURfh3b3D65NvrJwks3PTzWlyCU1fTBEkZjb7x+Qv1bTP+1UI19EK66PcAFhVu1OQpRUaHvGLmx8GrYcXz3qYQUKZeTbx3CXcCQQK5iITcZUQ68pBtFYuePnZp2TPEKgwqlGOm0ZrgzlPyjhvPoFFmuHYQak/ylMBxVIcflpZcMKuh5XuFDeyIHkZgr9geW4EYCYH2Kho7W1wfPrdGVtSWcODjnMuZRIG62THKu4Vsw+kU5aTzOfbYIYieXAgNuRibQ7ay0Ck8nvUjfAbt3UgXGQr9YMggNkbp+ZWHhXnUGI4yX2EgSKaMUVhd3XXLNQuz9widajbOQ0p6X1M607XwhRmNYmNBI6DjgNfRIgiX8hfzL3mwXCo4wimjOiJm0KIshyGmP+DyzEQiAlaiM2oai5zFwIwYpaJbCvx/QMJdsJcSRoYX0h3JWjhLyCDNDhWHV/ZMjtD4BIW+KdgDenOZmuBF3x3uHjU8h67AoerfKM5czwVhv4DoQjj2hYzKzMFMhK2SAMzi2RECYa5/28O/8NwMUD2Vn1qTqfwYJGIB+QQAGRsgHw8QY5jn1AVg0MI36rj/mOT2TEuIokfEEclmt8Jm/c3csGkGMyEzqvufo3gHdS0JCkuD4ZbeeHxGrlzYlW4vzJ7SGsllKNpF0cIW3Yh3VCqeONDHxq0C7LCELS+Rj3OC4GDc6U8wiBUYAEAOyIOoOiqsxjez/cmVHerNrmDiGmBihHIFPimYxQuO1r+gFpRqNyqrz22hqguHMgYA+CWsaABnSRa+rf4WBI8z3CprnZ0UmNB4RS4bfrKrm58Ymu1ewLn4Ngt+U2/z5sphbNdeFLipIjExYIqaOkReG35wmiZ4tMU37e0QJnM6Os8wzZKgJMSLRbiPgWoUubXQgS6FAcsjNugItww+4OnTZQERvZ+a3nubMm5thsNDiq0bRMIgXkdCy3jSswoA/E3EAieOE1lsfXazKFxajYDwfJHgPcMhFaPWfMUejhqEtyuckshOcayjcnwseveAA1jKp9EqZCXFki9mxzrqlcnLl8ZYQfKeXEGIRbEdO0UXCp2Ibu2VSsmDCzeCWVbgKBU8hf2wUhtinwrp58PIP2X76DgVIBq2j7YtcvMEAKPuXh6gdXzKjtpxydZCt+zIMLjcWrBbdViy127mluRHtB/+dMK3riwLuw8ZJMdccH//LNSyqfQvqSHxKqIOL0/dC7C3I5u7xjkU/X0hwrHwwCaBSlfdqlnfkYZuzP3NxTG/3T4rA5m/ZFmIaG3BaSUz9UHS6TKKPbs0rjah4pFY7j5wBXlVByxc0gxr7LUtM5h2DSgJQNnFJXWxR2XIGa+Bxsc+BtaoCPzizG1tiawRrdrG48vssU36kGz1xB/YZTH/Q1vMHIXsszksTsjqQ6if/JXe0Zetc4iZSzwqU5EACzrfM6I9DfPKbdyHorL6iOqFhjxWRgFnYQj2Uav+JTM1FtVETRrh2AopPZyQpqOn2tycUc93G6Fr658y66GnwdmVylJQjuauf2RqvSQPSUhhB0vUOfmI1UaaWtam6d4J3R6O3gv1rcl4RWokVgFj/3K93wzHcK07oh8JdbhQr/hD2ae27DwA80pxvLPGuXZlCoa7ITPwBiA5fRoEPA1Z3PUgl4wyq2If/DdWuZlr6aA2M+/uIUWfAMmLIWYJDpPC6uhuTAFCrQir0prSEF966dE2jwI/v3T5V9SkDhkZgzYIgdVdB3wd9gZLV6xxNfTtqNLayu8o11beUTT3Qqs3zJAjriTZE74dtDUCkt2648yBN2d8RftrCX8rRAmt/jScpRLixbc+7L02RvAViZT/BTMPKdRxYt+vfuc9+gpUbuecmU/kCkIQDp++mpSaJZ+Uyrle24CBwWazF5HZ+Wf/4kU+YsA0SV2V5iDc6JMVixlW2bWIMqhE/A0CPz1fX47lrIxC8z/ITTUF5hSxcN6ZrR52IbChDMn/jZBwFubs3JqZ3bCZXnpZp6xHTmHlhpQl7VU2ghSKWnoeMzERvbX9YJXbqhPV2MpH9UDctJfpk3gqrump1E5E05GrjRV7OmgvBb+LRgrSjb/ZO74vRr5t88rQyH/I8G0NISCQDvkHWrFw9/aEtiPGiwvKEb4JpNmx44ajmaaCmOKNBs8xWwWnVKZ07vOIwAz4ZoZrKo9vsuI/Nmjkf9dzzGlcADttiGjk0IkLO/MtUBR9kEBT4gNSG8vfVxdAbc8FrOO5r5xZSD3TnxFwCSPe9vzF8iB8710WH/u6aw7MSBAggOPnnoRj2lyYO1hMZjcOyT0w6Z8XfqiPXFyBwE2BfNcTmDmsYAq+OV8sNDJh61DJ9knYobxzxY/vwlrfjxmkzh8vNBSEGAscay5UDJL+9t3MwbNapgMw0KIdJPE/tCsnxEP9ceOdidXBTd1AVBplOWWNlTQH1ifqXzhcDqTmPSEU3GhJ5a3KdcJfZCHUtiZR6SSL13dJE3bJnoFEvqZR87LNb6SnLC22x8uDcvrJBAJLmCAg0DGIUrYbFWqGR5h43F8iMkFCFyFY8MRhEkUBttrQ/N+2KjU6I8EhEiYohGKaeXRJtFQICLQDvDa0MVsXgkd3+lBQgpfBZNndmdDa3iyiB3F6nFc8lwUAyEpYrmrEPs9+CeNsBOAzST3mTLod4dJb2y9EgOehck60gZQrstGeVASUzm8JfLZ7Iiun8ErvMi1h7SJesSyn1E0BqSZrGTjSXRsgilEOgZnfZOCHI9KfccMQFm0F2nXkTn100RP8/EWjz/ALeAvK14DZakTkYXVV1TrmgJnFujBtuk95CX7zXdMXLMT5OvL7gIy3j+YG4NNtV1dXBszhJvsEo64UbVTolLtdL0LnkpaEZBwBYjsmcwOy7HqTxxunnFPU2raMBRpyYRmsB2JzAtdqMEZvp4XuO+U+l0cyeP6KGWv2LScQP7y/a/Jg6OOA13DpeD+XZB5Gpys6z3iA3tA7a3YLMVSue7tt07xreQ2C49JmHImoeHLqXPUq8ZysrxoCBhzj1NohB+vXwqupH4jZBrkd2ksh7HRY+Nhr2iF8CP0NHhSVHUAQhn0HVTDsCOm/wQhLaKF1pLErrEtv27OazxeWGtuFvwFIyt1mj9ULhftTwgk/z9S76PLeIs5be3sRHsrn2kVHD9mppsWp62yrWmYZ+seU2T8msE+DPDGpcSO+Hsw6fk2tjP9bO6xd9AQ0nEJ6UH1XNyXC5+5bMVDq5oqTCeeWwaeISmdpOwD59Shvp2o/iSNZVcNPe3N4PFIYAe/h2v2d5GnHQg5JeJUUtdlIxVVCGdENxHz2ge5qqiEa2P25DPYzTo/l357MomLPuA8M8li5j30uhL/A2h7tobQXeFgKnBVseuVuuO8TJ4uHvXlIU2DscKoWoDvFJ4ka8O8LmJc+nnSAKmdak4P+s3OvQ6RFqadD1wLB+/Y5nHxSFxvCHz1CQmRvTuwNVELQLyzPISgyKaszchv50kCUGoiC6ORtSyNJ1snvrNUi5wzEMlOLNN/cOoG1V+V3T/MmD3Aynq7wCZdZCJT8wTwOB17Q7wNtWpQ34WtE2rdrFpbdM97XM0W0JtSHIs3cuns00Xw0NnS88eh6hxgPipFEcq9Ih185oUiYmVnESvWafPe/6ll4tuhtcmsT3y5dS38EPGP2c2jhSloHYA1vIlRzvdg6/VC0AGoKsjaqricYIh8GtEZ3k60C7Qt9iq01CK2Rws70dxXLOgqB071XtWbCpFTgzRno0N4ZurKVmIMDNOvQNF/ZttoRBeEuIV0pMozTYFXZudgZ584Pi09+9gAj6QrMt+zCoinKDQVispqv5tufeBbAzs/wyI0D6X/yH3OFnQdR1bws8ATR0OBLTekpUNIjGH37dbi3MfNsN4l/nGOvzO0HdjA2oPZCUPYfc3xQdgd/E+u70AZFhiXGfh13gmuRVHUJWzaomIsQQN9Qj9e4IYpaEO1NAkckGKscd8G0rnqZTG7Ws1Y5272AYPrWuqvOENlR0X5wzMrfNOfk3IJBl0+7cqF1vUbdur7kZ9V/ucjww1kkTzpyfPOZ659u+nWJUhPG6gqD+3neO2b0KE67cjTFmU542Q6q42+8tsKdHf5IufsSrNnDftDujDiJSJKY0scuKfkv9DqtNduF/LyHkKUheeH2zbemrd5LiHBMuPdOamOKJ1r5eGHps8u8ogDDs5eCZVG3mRBxonaXFUZ0vSVGqDnnuz4XX/8QHBxZpE0K/F735Lzv7d7aOBYekMhcM2+y7tNT4jEBHzj5vF2XSJq+mK5PyYRjbcKPq6LwebTU9KzMlYsFUXHa7uqp7NRFHy7CWFmtkstwhMy9MpUZCEtKzITLTOdmIrXBo/h8ncQhhnK6+2b6Ao6Nx5albvZ4jnxTFmaI3zK2E6oZou06HIDt2+A9vHnmGZO8gaSlAX9jAVY208M+lFRF6p1MXQMFLGsbBjA6iY58sKRj7Hp2Sl8yLnTe/9lpoFJabEecmTiefXaC6mE43ICYrdI0TCc1q7xoqWcWKxhJq6R8H0+YS22lXqAs6P2CdVU/mcFCr0VOXIGyXXbbeWev6RVYljPfzAAJYlGVgnqjKEsiZbn9S8Kg1abITGm/OAfclhkYCcZrUmaZwi++AT5mzzj1YWtkGE7wD4RJ2p9kLXTSvYobnbYaqqi7SfH3MlWrzBUr1V7HMgPhOaEL04WeIlWueDy5W6dbjQef/mQl3zig5Na52UAd+mOaUx2r+ruRAhAmaC+jYsamBa37euvfLeF7XEfK6eIgGiP0POZsi7JeMngMihyXa/CdXLUd0V8G6mbt99Sz0PnNz7cCodqGJHnND1XQLEq++Abd00D8534a8obH57Ai/RTfO+9k9bNlXSq4RFB5oEjWOM/Pg4q8Ms6OEIYY8INULsKbZdUANQENE73wGdn/Tt4/Rdv8DCZHYJMOJkoK0fQ2RLO1Z+1mFkT1xuD/O3s4sJd6UjTCe+15pqkUY6NiJqiIF6zcRtW8vJ/1oRodIVhOdpG5RyQ3Gn695e7icdhe5JPpnALpKz5vRq732iRbkKFz3xbFWOOVQC3hQfm1j8ta/4jr2Un94SrvUYOmdRkrXwUfXVI0JTkwZzokXXtispRKT51HrTCRmWVDRpGWn2dgZb7l2Phj5TmUhDQzC1LKcSZO7iHFRW/ZykJnXTjeNeEvRwy1eQutUTZTSFwo5mANiaf5dgr4RBbb5gba3CM1rXHCvjwI0tK1N/1/dC5ak62+sHcC18loyjBzIy+ghjqjTuD4vEvx/1ZMpBm12nhSvjZxQr/I2Bn90SO2VCahnSN6DRSQWi/WZZe0UQa87GwRfMlfMBTBi45wQ6X8wSNZCowtLMaa4NiZktUvwEMNMfXE8Vdx5yMFjtPCKqO/rwzIVPItuPnKpZPqpY2ihonGRsSHuKJqF23daiIMYzc3NM9s5BqTA+aRJ4Y/6ySs9KY3wXx8X1ky+uDc8zw3SfbtQKD5k/u+7XE6zmvIGb6pWSzOmXHxLioNHsi2vYUczTjRzP123afQVXaOQB9TKLuAONDrFITlrOVJswqtGdGALj7aDTaYfiaPUG0iTfPZdN3/Fn0AXYbpZjISAbtvMhIHTJvwFS18eIPMbwhvWAjXf0e6w1wFjvQlWSyVqgAQPOwqMKmOP68NgCE3DGzcAKl4UXDqC7ox0x0rTHeqK36oyFilBW0m18Fp0rYtofETRAdkq0muM7TVHQwSE87IbIrHBVeGSHGNGAHW4GKm4Kjw/AEreGFyrgLejmCIuVOJ7O121ZadX5NVKcYXPWkCQKaOzzMf5YTbx0a7ecv3God9aEa8AS2qZey9lA2Suq8fwLZM9J4fAhumgnWIUJsqt2gWKj3UdCpedbvIbNLwKnEPFJct2kGLVXLWhBEuwwb2VGoCRZSoDlunERj6xESfe+vGERGKB7MGVlNYNoHUip6Z/XV9RL2BTOCRsw71RO9rqLW/gMoC3QZDdHxDghiIMU0Sr8QVL4mY6fXLDvUhiC2VKOqPwjDEm2O1JanjZQWd4RW1MrQ/h7tT2Ymfiuke4AS6zcU6aFDy5anxBaJGW6vBy938t2AabhOEEW1si1Pl2Mhp2en7Uj7EeQY9MyFD8Cl7+QeIceOxNmIHFX7dzhp/AdV0/QiYfAnuxaMy4JuSTE1E7rfdXvsj/w5ehKP2xGTN4R5mHc9EP62t+CXF21/hO05fOYaHAlAap/SRs2hSgi/NEgLM7rYhWz8qhD2lWNoS6bbBEQmg3YBnlxfujZkWiNFcjofeGatggKOemdAs6kJSsBLX/76iftjUFd3OKTv6h91qulP9NAwpn3HryFG0GiqhOjeimpo7n1z1BBgLl3BH9k+/Ofb2+Frif+bcV+N4hvfvKEn00PoxGD0ugGbtYj+h0moc3rH0sdRbfvo3rV43wE63PPNkeJ5rtZB3mIlE9yrlnNNYb4G+6evCnPwA7JjXug53ZpeuqEcm09eWjwx5xJEmHPJjncNYFSZ+o9IvlzvFhj7nZQEaPrrB5hBL+ZIHyrnLGGkq4RyYEjB4G+drw5ogu7OmyZe2yKbEjjiyyUXKuCcyqXrbdrAiLLXuwvaB54cGq3zUdda1ZMuEIERJsPc6Uyc7827Z8c8p8g57uReG4JyGCSMmFS65FQAssJp304D3h379DjEFF2/2AvRHdfsQZUcW/1PYINa/ty2YJ4GoR6riTPRCzjHi/QEnBO3v6SAdNKGYc3HBpsiTzpwGJMEmcfeJpRaH54VzxUcVryKXAtjiWNshQAwQtWxmM1HQ6RwsF71bcOpKEuQf1t/R67s76e+g44MrjBeF67XS6+BvXCwzvQDOFt3jbncOwzaSvnp3SqvVEokxKfSz8/6Om+HGNnljLMuPlfV7f5yDZu6iqKrex2n+IM+85HJ2UKy0z/nCXtriIr4k/OA/V38k++7uWdkaNv5vfM3BZuRP4u7Vr5MQK2PIg1s+6A+Zz6BKOaaueHfZh7H2XSiyV+6PTLlPvN2z4t2V7ifihzb00abBzCrf/h4+K8KloF/NOpJGySdWlNM/uVlf+yTUTDOr2QrcGWhbreeJb9PzrqZRGyZGd6QkdxRxbYm1sisUBfl04A/4wyqf1fc+vcpFAhE0e/4qO4SVpiPcpkkh8AcySoFaZLI+UPgC2iElKT4yrCyE6g9QtO3OYxo3OcYy9L4PGiUU2rhhyVH48lsS9551xJyB/HFK/UCeD9bitz8gS6GQ+cgU/koaTQ6YA7XKqBz3xSjXOYyYUypezgRifJQAeN197oIm/jcj/DTgNL5pPY67RA6C6s4+3aAAQruN9k8PggkqIEC2sYoVJjFhDW4AQ+c5+zaCk5qVyeMBB+2llttYU/v2kJHKzppON+6CUe89shW1vkxfgzUwoB4S/HscJorBbo0cqTfuS6iDX+hbRMZ61b61luZIQuFTZGhmN+hPwazrjGdP6GT8uc+WX93jcqMZEldGLx4xYVXmIbKeEA26w0gyqGUtIfjDj0LpRWRlFOfWcCRRat0AzemE0fGkOwkeaHpSdfpBMa4SlF+TkX7XlVz3AaBsVVId4wNLuO+bs6xlbQ5KgiLloXQlD+W129VICCXUKH7CZQG7FAbheANDMlfkoSloUuaYUN2I0Vp7KWedduMLyl6krtESfY4fxd/gyuQ4PKkolCz+v6ejagKqAuzaeGuVkzdOclO5Ikl4pp0JlkYszpDY0Qhr4/Uf7sCzXHa4b3gVX8184Wn/yQ6o53KwqMwmvLD5oSKqsoh4Occ5Lc1Nz8kA3s8gm2iT06D44O9ociK9A3u7O+nmgJR3IqSChVI+Ilm0yjaMtsMpvrklkyVixud1M4jxUwnVsMk5O392FLyA7tAIMVD2ezMK6nJtfCr7SYSLbczkonaqP6aaeUuRmtXFIWSXE0TV65MrtmxpFy6zzZDQIWEtkb+9tYwCqVXkPG7576jASNkrS5Oo88Fi1GGsErcWTnZ0042wruht1sqK9FnlhWTLQNTmiqhL5OZZ4C7gGbpGNj++QgXWNFW6mR7YwhoIRIjvbpFvObqsykuM9FQdjRMJco+H6w3oeKhlCnmpK27wjMrIqBuFiyZ+wObs+mmoAfwS5kJahm16TscETBRxVkKx+qhF2RfuS9OmCyKtnCqmPd7eCwUcm/RHDSn4V7Bi7C4psITRaPcXl6RJcMZzEui2mnSRH7cTR+WhaucV0t58XamOVJOLJEv1WdcMKN+qRMRR+PLeEUJjj2Tyxo8Q/JdFnGpIj9IZHHhVE2981e85a+WWshHyxdILv1XQqeha1hDCItmL15j7w7KzWbRPnA4G9IIiFA3081hfXAviBYGdgZSOShc7HLS2ST8Y5nIJMbpW3FWLxPCPpc+ZYB0/Jc4aKacK2PkNzHzCaj9e7K1skVz8kBEKqdekmwYiEu4Iay5ALAKIfVl/BHR2xoMcivaLc806Js1cDQ38jtUG9neBvkBIs3xU/SdzTNa/7F/m1cXjYm7IEzVcqj7ijnSJqs7TmHSlfAc+J5TlnuxuoLV0h1piGK+gdHbs3ziAFJ90s9QCphFu7ObPKrIXUcpTorE24ByYXJvFrM5kDgtK8RQUbG+wbSXiil18fNqnygMStJED5rKEpfJ0OP84zfv7RuJm1K10Hr2abCRSQwbOqTqVps14WSWT6Ydh6/XfXeyaJbC0kK0HSW+r9mkfvzBS4nzYnuHVEM2cwNEe68bfC9pyBMbOmXlWMBtlVRtrw8waKtzKpyPaEN9vA5LrnRkEVcqE3jb7Uk/Ck+jxbw/HteXWx6hQVS33xk486TaMtIOF5yTLnM34Audmu+oM+ZDPzdxjPYeGDVgd2tcPKpwXX300NLjmMsDAdPdakogFdfPk0/HpuofzmNxZV9uWtdLemVwArPH1BmTxk+V57xsZcGpO/W/tTvaFHd3JyJR6UghV9OukA1CmBQj+hgNUI/XQ/appAvrvPztgMKrd6MhPEQ78HOtFDrPtAQphGGdq1BK6QPZO9IjQ5WACuqZolc+DWcpgxMTuJYgSv/BVZ2LdJBG9gzCr6v8KxcB3XncuXkpjLdw43G5ojwGWpa/hcsjS8Z9rqyhg3RMAjR4FFb9yvToNLsIYDgDA2482k5nfpkzNwuoSVgBbOXvIhsLaO9h64wBZKBEzG90OBFxMmcwUbBCA9zgzfWtrLEDd6B1reYIymHrCd4e0TG71EirMNRvsOEH14Cy5CzZ2/usfBwJgcZWzukHPxTksPbeuP1alGvNapcKhM8SaiijTCU16mEeY0l6BiKsDvm2e4DPzNyuXbW/IM4S53KpYoA6B0h+VESwrsd2EdnKVxU4B53FPac4Qg2+VtzWkui6bnssLlqG4PFD6dcA3N+SF78eml/ewjFMhLK6+xKwnnCRaqySy97R4Ws9XehXMhrBGJJ8v/sZdjyKfReGGKOHKa4bEFO8/E8LlSLl3aG89G4zSnHaz6KoCD1PTK+EwGJO8lmHX2WRYUIm0bi8O5O6kASOg+QQWdJCyNJVB/MU14xxaZiEAWP5vDpb/TTON8pIkwI3ly9iY9NFwH+vPDauqSuLyOO+rxt7Ce4QJZwXzQ3d/ZCL3B1Qgtb2zzDr9XGzkkY7u/00DbrmIcbDEhepkhbPnq/dmdEvFlEmCl8VfOlTlrBd7gv2Eb7Mu0rggBBbdHLVgyreh+W95Y1u2cj1Q7LogrUc1lsBiYVH9zi1ioI5YyJmh5VUt1irciPkvd4i01v0CkHSsSuoeHiqmV9r6MK3gWc9ZerqlaVRKmcZGNZDyTzC61MKf+ytdfAhjJO0i9Za5HY/FzohWpMWfeQrdDhuS0UWRbviZEWLggteRsOeZ5PIJ4TpFe8pjI7OsXuQGeRvgVs4534VJVHwE7icPAXyCXD0c2+2mGP51OsfPQE369PWtzU9t6Z8mtRFoIwYC6G+ycysKwifjfTScG/bmcTDq5viAsmvlG4mBZOBKDMsXtbBtCgpf0CHyjS6olntJTu+IIRIh5W2rn7LaCrNrEAvkvkbuXeEA34iprQhgS0sqldpneAAI1eF7QihrajEFvERoyz2na50UW0mFx92yGV8Ax/Q3EVo/fJF3I2bqZX+nzK0y3OKR4Yx9Z38Ds87+Be0su0KhhdwGp5jTHvRx1GzSFINwgLLLYTd1ScF6hDb69YZXAE0oWUNmLOz9k3bW4QTzFYncylBJWVIo8rBdkY5+LJcFrLrOAAGMjzUtVl3kQdcyVo+yJn2Yw3OTFRmAAvFhGw0sbdqizm2z+4JYg7OcG1iuI0feDKvtPNxJ+rwjcrj8cT4FXcgPaij090h7qrp5R9jMCjNoYJTCRO8uD8mJNgCKZNRS1S6+K0p8ij3W5OCyETnmN4QAwHwtlcgguAEc3AKrESMBS1agHck3B/1ClG/ArmcniwgHGCrUnltZ7FKeS/y6QryGiXSkzL0MYsJvYbGe1IOHfO3iwpyWeLwenLRgmZYl81Yr+gzdIKWXEkrt+0zu5kfgnj7VjI6osrGBfOG3iyPNKOiFiTeCvaL0QmLNcSA8z8/clnmOiwtXh8nGDvWdz+tBRmyCvg1clkUGAlqfgKw4nopfnHUt5jExr0Nz4p+E8XaKCUtW0Sp5o5xye/xD56k5a/VnjfcdRvtfznxQc0SKFJ7CeR3RvR8R3+k6k3N8P2rKa69YIqhylCt8ZpOia271STekvo866ZklvFnwUJxuaMfAugJpngJa9jBxAV+ay/tbzBMlz0dwu8ZpxX7PNZW4P3ydHqKlP2sTAsvZtOykBBJzn347qRQ2MLBW9YGD/4sy9vHLRsSL7EQl2nVJ2jyL3HyTh6rR9JPsTpUI2R5G1HVGyrJzM2etcswwhpEbxk7PFHyTxEUFWPinnyRBwH+/yQeaS7XJ8uAy016SSdW3mxonI8oMfDqcSSDYlDV2JEKf9LFDFFjuUJb68tz+FJsjRV8slwfdXszMTZbrqJfImv/fUhR93q2khdTNYO+sDUsutbSEgrHI764SDQrzBwBfmsrhGPPcL2MyjAsPLhby1ClojeBuOUZwMgqsykgFE/AqJ3SQiy/NIQ2QhXB5Vw0lwW+/m2ByBAxdLOvclbzJvk/edyCJBGURDb1KqorQbdw2+h+ajSMI8HHd8tP835j1hulx5JGGatKAJmbpSsbrJsqslfkBBGOp92WB5ypMC4lcIvEguVOckuh9Mmrx3HEKSF7+sNYMSLwoA4cy6bSUc/N+D8XvAiJE4HKEWIrof3nxWBHhjMX2NvyGPuWqdjaAqcRpQ4wLEpQnVcgS1IvuVobrEG4hXxWnc8JeGfhcy6dOr/nF+G/77aZHStLZQ7i8i4aAHyUA4Jxxl8kxC1pmpBVLRt5Ocy3PqOYuvtv0ehMpNKhGc/d5EqRKasmpi6U8G+7jQreEZhw0zd3T2Xyk/kl1HFHFciSwKqdZPxhrkKnXCZNo5/lqXReF4WKcJx9HNM5NymmYLUwQlBr7CmaRCpIi+HXcApuZRRUOkMieMYtdii4fxiirwmgNQFWw57jfU9jFIaFbfTp1t3mCfQIKVs1mjO4obBIU7KGIFYzcHFis1ChJebSHoLvbusXUCa4MUBpD19GC6tyMgJYnuA1qf0ENf796VQ0HSDntSWFNfXGWAnwR/Dxs8oAmObukdEzf1Vr6C2/+1YmfceEFF2NHfyxeCNAB1GegKtX7rra/W8WGG9t8sGcg/8eb5ubHv4dn/kyaRUhe3m0+XlL0XWbTIZH+NEwF6CQrKbF55T52wePE3TOWHJcAFIKkAB8ZWylxpxnAr3i/tIOLKf+0LsBglvhlfBbPnLoa2/R5AWQ3uY4RtJY3W5LHihC5tVtRo0lRzps8o1sy+ryjE/wdXMRKywBqihHxMRTiWwZudsIQEuGs6rr7NJJMD7+bvJ5JN4P5avMOUFrPVJHJyB9ZKsn+fU9BUnfJkCoKX1FeRdej8GOIS2ScY3F5lFV/RyZwd/PVvTvByJbJXj1Dmf/5Y7uWtDSK8RihQ9VyUAuP/kUTVLHUeeuSTPxaE43R5we5LmPVRLZJm5RPOwLiRdFuCDEvEburT8Oz1kphwxSpqwUqbRd648wRpuA+VKYqqcQ+W6lCaNFU9EbS6GX2L2GdeGQoGZdLjrv/07Q+1CYfXx0/ApUkUXqvxCbRNMZ339QMKWsWsopVpwyh3wKQHrg0xfr2rBU/VEzYKH1DEsUfafU2nJg1JkQU97Q+6GNcJC+EYkF56ISF0eQ+1+DvGhhBwe2nHVM8xuA3vDhFyqs9u2QhCw5qKu8cAVHxm9POWM53XuLvuhHJIVp+Ii9EQDE7FJhH+RwUi6RSaxsf5QY4Xo18jAwSFmopJUN3ry5kVSHbyXYJSgFchPbhuXs7FCy9A8s2HowU25me0tCX2FNQnTN/In5rUnbFygHCfl6vr1RNMLBPR41BnOkmIX85kuoUDflqggzC0UPkF2aZCtHCcbXNhgVSF43/5WDV5pNdctx0V3nlVaNUO43ke0PYVJbrvPIFEHleVBZcbsmfhxpvZUKAlIVbYUjxrBfuaKrA8N4x7cW0f07QMSi/hovCl9NHJJSuDhnApt2FyKfNeKGg1PFb/HW3t5PvR8AcEukp3G3Cumk4rqY10jg8uw385Ml0XwofaIkKuoRpS+8KX/OL0LCn5IyhAiK5vZatGp3lLyQ6oM1lRKOTIGc5W6vVCPLcga+nXxzA+M4P3ePW7Q64jYa6o5saBymdhGh2kyckkN5fLgRdCgerpOq/5dPeQZSsW/nTJigUrUWI5jEaaDvMnj6wSkXxn4CFK/oCozO5krDfazcjjEpDaLFkx+vEkg5D4UBcPRldrioksdm7j9RzewEi6EzJCzGpe6SoVvh0KsOvmH2WkPaG0R5RuK0HqJpswwS8YAFd/qNdgZREC9+8eD27KcTopklgWgFSGuq3YOa2kwFSKN3lZu42xWvgclmSJuHpnY1JE5JUSl0apgI3LMMBTbQWjZ07ybUYlGGzykQAhHEn2rZSAI5yMwsw/UzHOGrTCqSAmQm6La7A8xnCbw5xM5hO/YNQ5cQmmMV7OBT/hnS5D+C2gKsPy0EATIIDc7dJaXy7/Rs9VHJhYoFY+u8bV6WoimhCDno7pAyhri5ErhB2csYtokPv/v1oAo+p5ga0R2sUgnRk096SkU6tcTMu3RUVbN4fPftkveo0K3xrFOr/9q6vpQGT0jSB7Kjfwmkfk5zGFVCjJSh1fuXZm6a4RcruNuyvWWIwkYg6phZrkuhFa3qxOc/9OiUUfjb4dY2TjVmG/q/vvFgL6MKqhefzVk7wpx67HGxicHPD5gwuSDk6ZHsPmXjR6J2jslj727Z6MDDUKRi8RcgY+dONEBKn6ZBegl289XPWdbHZniRz9Bo22nR5CR5NBhAGkzRBuPInaqoKiqUtxxQ5bxYU96E4NBYS4IiIOSA/LQ0XbGIhtPZ3NLP4uHrP6Us8AZt/nV7i/CaoYYvTgLK4OswPhEFSFzg8aJi0J1QlY5v+uoE7DRIVF+Ctbs6mX/d6PjvDzdJ6aQNiBrNlPOBuDqsW2WKJRbPUbUFkXdOZGYMiB7ni70YQRcbWdH1ULmQ1AmJM3H4WwXUHqBsd2aza1IMj0c/CRporHKnH8f7yjmoBurZjJml4LDPJvNT0GzHmzudgb3kM7iWCydq9cMn5DbUUPoqYQCooGBrOUU+jOwUovbKLUSLKk8wvRU8kynUXNviDWQCkjurirfQ0eLJSbf3jSZSFHHIpjU7pVJQKs1CnErkiQKF2Fjs2ps4dpECeqPaa8NH9IG9JA15jIaYHq9mw1pLeLaa5zTrj0h6Lti0jxFPlOQQqOQ3AwK8NDETnRnaHrJUx/wORNY3qmL0AOuYWVVkTf0DQfmQlR+REIkQ7KHbjFLar9nZKi3TXHpz/PU7hQggZ5QpdGSajQGcdRzXBhLvuuNjA1Jvg1BSR6T9JHPTZD83G5pxlbm5v8wR7Efgy77YdZy/obtI4cMaLRg49nonBk58CRUucS40prfW5opPvm6W5iLvIkn14jTHtaB9KPatyM/rC1iNN6Dqwhs7bCBNU7sOE19uhEr0tVtQQs/T/8U3pY47CaKTJia+69ljg43XAF+8qM+Nm9MoZeFTS/vBYMdZ1sLiYPN5XyBX6WSX8TLZ3tmBFv4LpfvP5L85Ft8KWy7uHV9a6uAIszlnsS1zp4cdlLiHQsxhWWGhxtcMoyIZhvfaBO07/nhz5Aghmz4O5pD9gaLwFFUc9fsAswnhnH6SvdnUqGLg7CqCU1I4HTAutRfSkqv2wqZ09MZ20Heex94p4cel3ESKfhxJ4XVLOowdLS0HTUUYgLiTW69K32Wx95QFbuSPI9qPirjdLo+mlsoZFyMS8icBUyK/r/zXKRpD2cx+jatEzdMlRJ0SYIPR2BeJppqz/dWWo+17z6ujhavDH8uMk01iVFHupLde4RGE54TmctIWZh/KV+7M6wt9X+CdgW+fzO7k8V/yuicoRYutjJ801fBCumip5Va91wPD+TBBYjJOLq+hS2L8ACvH2sxBIZpySYlp3V0c9XgEJ9Vh/pAu2ThmuB16ZIVFrq0PO1f0FaGytCJClAoXCTxEsIhO0MURnXhEsSSiTrjYmxtqyJDC0WwtCBR0kS0RWWy/Egw/yHMXoFKpLakShp7xmhN+tjIlrU4EglVBniLPn2KPbV8WrbHXn0eQJyA4Dqr7aaYuZVhoiLVdnhlRV2eWsT6O2ytZPEoXpK94PjZikK1IYveB396ErWpXlr2O5DOP36NT9QHac3iaCux13zVljPCn/Xnz2etJP0/0q2RwKGNVWcqVq70jNd6xlL3cMR4+t4WhrfrIyBbUNu8d2aHkYXuMXHu4V2YDlneOR3VBnBvMxCBAUE7QoCVl8rNBpQuealve8DOJOBAwHLMD+iemLERXYjkgUoVC2nOTWz3F55r+onjLoKGOxigelI4gHm77quU+SWa2Clf0FrI1dGnGwBLwCnKhnOvLNcqGM2G+mLnqCAa0Xu9CfdET9Z3E3g0vuulQEp7dybOK+EPCnlprYnaRq45JSbQ6G/nZDB93xhX+qLMVvgwpciA23ind3tdVkKg4VVlDajniEi0Gya6HB9HFm3XgthKiD6351/g3OGRbtnIky1HO1HfQPw3Ek8L4SSNPt7MZR2HRnY6E/Senz8/mx6UXQqaCsHjUiSODrCgxu6dvExoT1PsrC3YNgWrnBfM0KFrJpc9LEMKLb8LmWluBzgFZGnl4EoyJ2oNC/Z3Cuz9dRWUoJUqcaqPBdbn5cL54oariZN9NXqNsB2uOKM5vd9kQJNj5rXDzlkuFMFjwo4IHkTwpbRK+M5oB401CikzgvtRTIFjA3VJ243PDgb5ATAl4kXzKhwADxEbMr2KURBjx9jdxLjHFk1RgjimskoiyjOmXFFLJpQqWCMCx5m9JaYezpoICBUMDdthhHGnmPfw35tYIjiYxAZADnS7WLe7d9vnALLxi/jto0lOJnGllMH2/fngRPDlMhuFmDrmJt2amf2H7rQOc6kba1ruX/PGSiHGV1rKipYvSaleS4LKZB5EV7e86Y+p4XOeVnugrO5vKE1Zw4wtaPWcRo4C8CGrDClR0ZODtJchF4AOqSeosaejTvzYLHu69sTR4Gu37wN7jHkERmpKQf3HuJ0peQZEypMLeVi1+AtcK1irMo00d4ge0Mp6HPqgyoNGkGpxSrC1i0EmNGwvmPFmGWh7zf/J7MSmAGSmFMeRZoQdEzI4GWSZ4puBrGnMi41q5oSmzToTb8J4stX3igjzgo1S0gSQ7bqWwDK0gTwY7LNZ5UKFRbI+jxRlWDBXVkcDAPVloLlBdTdTdn56L8cqNV6uDs5PuzFR5ekCsCg9giT+YOHp/ZA8RYdr+PjxwOxQuVzMA/gxksivLgrS1pH6sox4drW4fgASZestR2tg+aYMzcl3wDbptnAGy+LaHcZnI3bnzf965/gzs0leTV10JSrGTDFoEqiNDmhssA8AC7lw+yclsTGdjPlqLbb6utxEGhMY23boVjAwboF+LKERl3SgP+BJtx29vxHiLR1KZoxLwxZEMwN5ntZDYapLqmNFjBodDvdLXY/3RtFeaKlAdwSXMintBHJqa+D+Qv48MELCtCB1eY3Q+nLrRD0VAxnH7hH/nVyCuqBgkJv0NCXj9OYzJT9I2AKtsvhvEyZ+AnCSsOlbtEWOfLSvrACB4zEUA2Nb7HotIqup1B1IQcgvrNoFTVAZN/2MglrjEjMJOqdFvz1XX/xzZ9RGAlhy1fjs2tdGUQoeiLNDgZG2VFMJfhCyUbMQxQ8Ol3tEZwNCgFyjcOG36IgU6ssqpTJCW3VSk46aId66DvKHpKdTD6TIvPBWiECjRnkGS1N6JDr2nU3PFXa3gUcFtQu/oEMaIWKuk3v3uaNv97y6OeRU47HtsyW93utaY+rpW7RIEhWF5LG67R3onPrCMlj8s+W9+KqK3N1O/BC4Bd57r9qwa2T3exf3EMk7nVM87djnffvqgSWQii2kK6z6jmc0Ky2gTUlH6Jbw3RZdM/JVZftz3DJA8K1E37fxvltSi1rRZlHDMOegyS4qHxMtpCyuZoteL2lkcGLcw1uYfQFQsafEsdrIM5rdzO9I8Urm5dlkMNRg6Iu6D8g7KJQbgRNo+yAYhqRW2lZal4DkE+fiOUak90Lvur7FkEtG8qq+fuwUoFTa6PbQJ0Zrbu+1UZcwdgzeHKPGX7QbkEqe6ru/0RH53WRJjKWPUGVLIeL/0oZOS7UYZKn6cbSfaigHpT0czd31HIqIXExGWn+BbcQlwARPF1sO8+2uq2gaREm3M1w67z+fVCyiOmiZmPHSdElZiGpcRsuPFaRgCVGHdy+Ghh+8FVQsEz/PYPukBCstF3ZG2xzL6WzmBc8lpzGhBnsHWyFqYIqhRH/rtuzmkIC25VJ4+aQqLq1zLmxZ6bgaId3F3kMeTX8SjMTbid5V6ZS9NSazBsVmYA8imoUpsEdDzd2cfqsUq7BRrPe+bvBOMHLmRhnLdcM5QVumFAViBzd5IH26nKaa3rxnFt76JiFvw3q57mdplLkCRxW2e++XUJOpYmflNihKwOIC01+qYiAlYxVGEojzfgrSoopzrv2kn9u0pFMBVj2UgkO66hNlrQ1S+cSkaxI2h9fb5UOv77mgiKt7JCJc8iPDyxPfrdXiHs7TV5sIKHLNDmx5UQ9bGQloeU+gaH/DwaVnI0B/mBy5GZgsx1tSdwdcuw5Q371YF6kvbQQdYWdgJDiaqTLK9D26hDCKdti9YgoJuN3RNE5ujMzYCjzkOD2Yqgqj24VwJ1jcj4BtURLm9T3hDKRfcDYgyi0qwVeC8GYA54h7YcngviDFx/Djuwzy6AcEwvVSzuhCUyrustdZrIpBaPw3SB2WsDuoV43PBvW8U2auEmPV3l6/5tS92UcSF4bnX9IidEIWPLI+LrEGrxk+aMe1dlkVZaS/dmLYvmStxPbCS8ZL3KyvNOhsPge0Bq0nzYvBidRxQdRbBZE4TTdoHBsyaOa6OaGpDtXdS9yNau2eIFeXo+8Mi1FDmQbV40U4Sd1HpbezR7o2vicLz2A2ITjKsbNKm85JzzLop0p3DukUMgHNgXQbtyHz3v66xOVWmX6jItp+fzbWE29d7Lly2reIgjhsvPt8MeVuyfFhdTpfdf9OCTGWhxKHZ8NcCDwtfkTwZ0bKiZw+JaeR8lOW9TicMf27pE15RhH3yxveh/RLxHN44nCllbuiltk0gv8sGefAeD3UOB55gy4HxLAZBEpNJhVRJbIXWfb0PRgKOz+ifYYONZZRCk50ZqRlzKtnYBZ8IIjekyLq504qlbEuQNJTl0f7Wh/qR0Vke3SrA9dwS6CdSkBSdCyMlZojYXwgUb13vGRtlswU58QK3DTS+ywgLjW+h5eeHld+rMzmOEudBCJ2EylkAqZKWFp0HtYbcTxgLXjuWaBVKyH3x6mu/6fI+9eF8e6pHhYoqElpKx6sD8spz8rQLQGqZhvyMnBwQD3GOiPdbk17rkcS+22UPTaVtazd4pPEiJgJij4KOZzlW6qoGHvRmysEqyfMHvVkTxz5xsH4F/AGiP0JS07Woj327sMHgvLpUlCLXGdyUe4qU/JCrhUtHf1RJDSXdGxV0YneHVebqjNNdF/imrkTGSLpexatKWF5svMQ/nVg9AWOkMhzsRkUUrNQ6Ak6dj8qsM7rcs8l1cP9RNnSU3cplJSSS9Ff1J9HwqAPMVua1bxxVD3CBUMUur8GI4/eeG7jRmDsiOhdzC8O62vijI4ZCeLPKBzHaln/Q4aRR4tclb8hDkGk3XKDeSPT+25A/Vui5awAn7MLvIQz6xLq2ugQ5UzlaxLe/qDT07V77g2CKxgwkSWjB65K/OLNTYeqB2R9Jw4Va4Lqyn1lpVBPXWoHgvVB+FbQEl/R67qIGsErZ6dDesgFK0De3LLFvZKMIFutq6fQO76Sqk6AWqq0HoEFpvygfsz6EnUVgzmDx6I2fUR5hHPzApamlFm9mbXqHKC9HCZiLNl7RxuoMcPIfKMDDVIWuqqOpynA733+gNYm/d6iUxNpeUzTQBMUH2sDYg43/BV5Xxg3XTghJb2CBA53lNWR/KJjgh6BNuoiOlx3y+ptipZh6TPyLxyRQABhwPjNST9HtzRWaqWv9klb7SfKhdc8/HE8zLV7RdvPNMw985Wtu3uBWf+NihRgtT51oH4jnEx9uifHvLPuqkH+3CGfBqf6vPeRiVl9A/Em8qkdZh3AXZx61l+zNFK8k6meKrp2p11NwzzwRCcpXKmuFyTvr+C9EnpKeR0ZyOfojcFxP/Goqb0SBEzu5OFpPzSJ+wzVmFpByKUwsXNsy3QRatmC7H032Kb+vKYO1BOG9ZBc8m3dOteQJ6el3jUk3BoA3xBdHyvFoybxRvWsDBdesu2qYrp5v9Y7vHk3/fJDGzceVsC+FO2MB0Z+dWowATZctlIF367rXNDe8w502kBQO4XouATyoePo22MXSfO3u5n1EwXU500Xmzu8TXWMUX8DduTpjPCl00P+DrIRVvZxb0c0t2Fye++46+CeIj3ZeZzU/BWWg7RIE8BHvjpbp3Je9NNkKkGcbiLKumNUxKzobbsunU6Jj0zL4lBxuRzuCOMPrvW6erToLMk37Q4JQcupVZ2Qk9yZfLVDby+tIy0HN955vxOIE3b1PmCre5QgOAt4+R7c6ayh2uEUGu2Us6dLIdt1LPqDPaT5edxd5VnjqY9/00/WXPHjuUAjWiJ8wL3VxjpOmlEGJZZUOgTeyF+ATHQllGvjuc/z6lw5e5lf2v5VU7/+3H+HSTmAlDGcXpPq/OhO+ULdFVDw8ueloQOepxcOXMbOwnWGCWpKUUC3qb+5dXfnaldBfX1nwf5gopzcE59jWJ+A0VkUWRQjxOsVG5Cfqeg7W9B6TMnIr4G1yQ6p6sg5lzXU6d95rKCisUU4awEbi4qRkQRHm588RcCzz57VHYxuZj1G9hQJxzk8MRgNX3zgkL6kv4Mvzx5fOaDsFzoRFMLMq7hCLAl7MO02FePjFdruXi6Fcc5GCGtaygxuEuFOoXyAGoO02gjnjHptbxDL1172bHeHXZ0iF4sMclfPgx66mwF59ofc/vrw23i5U5PePktz7ImrOMVVnjSl7jf0/pqfNqJfoMzJ+cM92vW8fNAoEpk3p9JvrvH4GiqRb/nA/DUWVBared6yrWN5mtxyw+hnuOiYSKQuCb//+9y7Lkl3PJXyI7d8cca3U9tvyAvvpVd8FKNtVAhejOn+2ay7sIpUuwRlxl4sVnZ4G3KfmAz3PPDtpDEKXXf74kouFM/F65b7Y+rsiOak/JotZXYLpAj2mpXDz9e9cwUCpPwu527qkoV67SwmdFkXbsVOkujq8fh22TrszFq9EJOruawjle2y+ehl7eY+kbB6BNC2mbFIY3ycVgcE05xnutjoysL+3sW3hWbU+pqshDs65rIhgwPJlA2EsvzC8TnXVKpFVHrWTL2JRgM1JiOXpoaNmKev4M7lpbrTtwVDJrQJudZcMSfg48QksyBkyn3TZYNSVyxcofFqcvI2ICns6etEeUTc484KRlyjJqrBjJBQXafX5nuxn2h9d9ZoCIMV9dDtjgufCXIaFnaoRxNZm5oAj72k/poKIs48vXREzkwnKAO6zruu11ED/7jdKN6g2L2O6KGOxt4kuF7FGCSuuJIbw6kLBgWLdarWy06rIabRBXOYLmKNBTQUw4BNbQT/ZSdXto8V+OaJv3ksnAOT5pGCHHW/JHz+Tvr+fFCirl38nOnI2SNuAbKZdbdMS+TlcwPOqo1S9OAU7QsqOGpZeFcKfQJc3n2H4bJqF/qTgMWhHxJtiAGgsZ8MWUW5Ib2sIjPc2u65XZXCdUfbm6NC/ZExAnkruOJJoLiLdIO4GMrTBLhITHcrnANKkPdF7XLuvuDelGmf+gDWqKbzO7deFhTzwguWPQ7yF0m/vOnx/ArhR1J1BdJ/aAQKYNIy5VcbBJfubzFV40R6Oiy/pegukgF6gBqb0B0dRKTTizGGo99rWnUVEYJnOV+ntw/172yk5Ml4hz4llSxX7hqQZx3HArSVQsbVoDwrZDWi4vsKA57T0Dh66XEJdN5b6wMaF9oQZ+ef6dlllggmvCy3BMCF6SFiqGQuK3ttecHhQIQFRdgH6gl6kYtrNNoyBBz3mOQ4e2/neRlAWqf9rOOuClScfQ+EbKkZmxUJLrAVUiU8CiNGnSVd8IG87OhuHjrsxERDEngCWzuvNNFqEHYr6sJJVbUOdYYDrLVMm77NT4wQn1VyCug5np0h7uyRWXVY8Ss5Vpzgpy9/rtePuauJZ+O88N0dLe8aFaQNA5LFSVpJCsROgVjZEVTvHOV5jYQYjr5L7+E3rWG52C4YvtIefnkNURiEi9oEBUrxPPDqtefZ1UfZ4ePUMuuCD9V1l4WdqOtVLNyPcST4pEdD6V2pcAheoDYdNXRY9WzwRUcEVzqCLi1iLNDRcJ6I/VsqWQN6S9RAmgbsKBDkSJAG2UPmID94/UNnYq7bh1EANFCO+czKhxKydL0b62TVRA0kP0aTMciy+lN6EbK6IbijTqq04sKpT9/9sXQzz9N3A6vpXAOOcpofyuZO00B2QjykC+3bwZgnNNebJjlLhtJfXVH2AnyV+NhpUeq9PifZx+/xGSqlIFGStB4ffccS6mtppYJH0YE1Xu1W6eato2NDmG/Rj1t2EDurHWyrW7CjouEfQjX+e1bWp18zZUzZuAD6CN2L6fxdzClkM8OQZBfl/WbSsfTpTyVC02nVsMfqX6hR3b27siXDvatQwnWFG+XlwLlTBkxneY+Tr19k3gIeJ53gUoyvkZIbEHkP+WJtDhkTYjbC19e1A+Wbnby8BLEfy2N+69vQZFO6tcoRY2E13ilfWz3VJNhbN77vH09JgvQ88kmEhIfQCPZ0ExjyZ4F4iG/gl/NWhqIYO/ElIFVKYPANXYemdi/6EoYynIVBt/LVaWfVsVkseUABuF8wwH5HHoeDmry6tuBewhtqtNBTFkagtjKMVRXSV/3DY7V3j+eXKn0ZNqvWGwZ6EcTXPgB3y9/70V8zd+/FLVb0qXARqEriwO+xQA0veGzjXxzkUPfieqHZ/6ywUUi8G18M8svLqP7Yjc93PGXkDOTuuy150H8YZz7Od88yGjG5a+oRd8uzkU4U5i7DkMH22XdHiRVob0aAlxLa7dRkjQRG3wlrdxNMQFD8MfWYzgWXfaIRtUFoRhX8BM/JDNwCZY9dexv+PK+tQ2YAlFuyxkNy2hWioisLp9mdm5S/hMdwdHqdDaHzOpghcn9LykUUF2iQcF/Sr4RYizSNv8WtY26sBQ9eUI+JBB58ky0koX6xEqlN7ssf2blCjf50NOC2+kL9QApZK8S35boFNbL6UHQGZ0JqD4G9h55seWC94Kfunwb3a/hsO2KMrXoi9jZGycOZELRqegiiZ7WXTZv1WY3StcZPopfz0LmEz9kd5NJ6/UuZAF4YWu9QFalcnsujiQVmauuNkC3QFWaHkMG++BcH8cLkt+7gV+WwzSWzmtuuxJHIHOp5DOnvl/cL+LaoL9zsk0CzIvRirZRWV9VX1fde/fP8D1Jd1DdKp5fiTlkFb1ez5zlSjYUOo/PcubkSckc5L5deuVFOnUQAzLtn46muOgq4IPKjHF50T+iLFcWh7r/nmRKBcMJbeMvGc1Q26kOdIaXlxf3SNvLHaf+r13R6/Br/8Cvit5st7z+Y9fWerc7P+KQtskAXaPNKKOKIs9banPTfbC2Uqfl0/bzYwWoA05stDKlgw7t4viIkEk/hbo00JawrJnHuHLgxQvl3vssdU69Z686sGIHqwcyKey2Xi3ocFLuweFiM08l21J0JMhuwzp4XyTNHDDWbaKCtPfGr+h2NVB8aNo59Lw2vf10ObfV0bPlItMGyUNkQfQ5sNISCclya0lhLfGXs0+wtJdSmJmqsvMujzfthe1LHKHlzl8RW8JmBP+OJTf33ahwfdfD7FOXqnYBD2mUYWHYJigx4wUntt/QKt4KyJ/XpJp8HTUwP9vipeRfK1pQesajaXUqDguSNcZEGYtS86eoX+wdSPZ3ENXwASHHKWRMeYvBaeip8Nfo6LRox9KezZF/RjAYqi0d+Ojd4sil2U4Ab9TlgGdZ75ds3M/YO6/1v+TMvEc9RU9eT0soIVwQXnGApnSbiWGXINsB9EWtbQh9VP9lULyynn7P7NXLgXvFY3y2foZI+HfzJt4mXwUwjydT/0JMrpRvHXwYHSgf9uzXH20vnkSWNBUVNt21sibXPxvH5SNAbzh1x1iGy7VXrjtEQt5MpLnzsET8Los81rnpQyE11AUYyDn//dqJreihge426P4R/9DbYycZAaeatF5Z9J22tFU/mK8pwNbt1cyp6sHtxnSGS5uMYwfDJ5Fl9FdaxFCBpd468KsYSzbY5NpKKATju0RnU0C+HYdpEAHqz7cT88Ka+u/i3CveWWzu/tuQEEaCeWwuv1VO5GUyzlcqmPxJ0R81Nk2ShNIEW+53XLGl3Xit8NT9Ser3aZ18x3m39+Prz6cHV8foPxAfa+/vKDIr/FttzS4P/9KC4U/Q/EkOhXEFX2xYhzGLEGzRWvDJzB5fr+lO/LkMtJ4U+niunq0lTpcvHHzW/1m5Rew9/UzTF0QmjMpros7DzESWQW4QDRfpxZUxjz7PhZEqumq/YIZ0tF0FJJE2g8GSJZ2QHZJhUfm/rbdAcm+ZBFyzHTLpmeDBxopbx04NS0r2pDzkbuzFkHE2NtnCtA2AlZLol6mssFiJo+8Q+bW/DEYT8LRAm94+wjobTAUtb6TFhqliZRl8+SrNH+UN065VyXkpcXN32vVM9TB9xja2KKpp134c/AtOlZ3QhZUUnUa7UZ9+IGYGY+wcNEykgVw7CreLILOtcRiWC562hsDJO6o3rbI0l7EYaLq98z+csm3NGLO1zFeS6URIHlTBt5/SI1J2buhYCtX4kxFTn6LdpyRHR58TCVrvDRAiMrqPiHeiwIQdpeb+70TTKjlHDwY3SyCzl+KKr8REaxo/T2eC2r5cjyDvuoyGkTELQpPvarBNLmzrcJteLWutTv6G2kicIiH1NyKF3nEFDrh/0edtRKrLr4rSYOBL4Cgg1ZcaaO8F5XJuBBQ4XyT5vk58rLaJ62dw5poPpk9/x8CtLl0AqXggZz9ZE0Df12x1TNg0WyLs3ceCBPfa8syGgJMjGVcICg+/SEENWdeH3bg9GMtRcl6hLW3YpwnRsFV0gfPa4IOimCHsdt8MFixoZoiruzZF1+F5LCJ9jPooyNg4oQcJOYUutRyPSmzGecwe8+/I4zEYC3jCWp/ZNGg72t7gd5CAe3i9hIsJHQIHLZYaXqp3F0/c/tWr/QkwbSMaDjiGNJoFU0bFp7gDEidS+Mzl6l7/ycY3G+1AbkzCccemU/4dk2bWtoilqcngmtJRPBc9v+7t9YJ8RmdV2sQyqHVJQukrhYzTLBFhdXbxNQz/EKqwFXbDtlh7KnaF+iUd3lckaeGUHbpCn66zLRe+AsiI4nBOcN+4ArDbJvLW/t2DA78UAsK7K/OItdK3l3sgiM0pP2psoMvzIZe5rViB8W+465/47zLvp+eDc4mUmkXoBrMmPnpbzEaZ6Qj4C4l6SVyWd0+luvaGWnZQm1mtkQL4yDn2GdWUroj5I6cFszmKEmOMwX7RP54SomLF3LICDSQHd3lgcSnY7TFkxV5bNL6TJ6To12r9kIZt+DuUjrhXcJkfy8w7uaxhvrWavWYWvNTZV5KC2OewWDdmB3/NEHCJlbgdN+w1KMoDVNK8EIeoZHzDUryJMrJKkd/C3PMH5+p6eB+g+Zil3MCHZsxXbeCSkt2yKr4h9ZQ62JnLyiZjwBNuM9tJId8R/mVYezp5NZolXzbIcZSZ7ZmG85PBu08vp6nIb2KXbZP8WBB1d48a++K1T/MGufVaRAE6Ng0TU/1TT5AdF8BjT+Mwml97J9k+ct82TLjvIe9UJ4eJ0U0FGpnLq8+tLumBj/FiXPPeHbCEbuR4M52Iv+hJ3Nr5tKqr1u7gBWLns4+jf413NcOzce1ENd2no1JiDbZMtZWQyeyWVDS27XuzjOpXUJ36B/hJOiDJYTRwMx4SBuQK/Jt+ufU2iAN9gqYuoWYZhxbXLfB+hwgOleg0LlF1ZsEveTZi9Kk53SYiKtaILW2APEhz8X6vmW1ZnnE04kqX2B0nBbaiH1uC7B0Gs5ujY5oee6vWKFH2OVqw3q06oUJEWZaQEpjchwfXyAyzlbcid2qVzO7qucGWVP1l4K4SsJ8aME1XobCeWHSKZEN0j7I/d3LHx/3DCc0D0Exq9++gc4QpdhGJF/sxkTR6uS8YS3sw208dVHMrGSKkrXcr5r54KLwa7iLHMwe/ZkdXeA5VgrUN8R5dLjl2m6R0+SSSa1dxk4A5kGtUTx5voR9nst5PHag5R0AU+PU9AOn0ZVIK3O8W+DPc33/Who7OQ3cQ5yktZomNo6nWtfs+hBYFnvYOe1kVPV8i1uPD6agTctpgrnZ2ZrayBaSEDoXbZ/50lvrs64iWCywyKW06bgJByFV1FKv2TSg9qMp4nyeZj37Fvlm0hXTg9J0bXO+LmBw7tFKDtMFjvzZkejCjgTLO6FZMoVbzsPoHZ0G+6pn5sndSTpXi8GM8502k33q9p3g3VJZb21RiDGxazJFqvbDa3cK6RxgdzXZ9zPZr4X79a76jL8YHiDD9fEP2OnDj0IOlXXB9gFTP717LhEhUYJb8h32BEY5b2GUrjuTpnveHraEqULmAO3IL1Ptrw5mNVbXrwj8XZmpc6MMc6Orpm2ufDObzemP4In5EtfjlRwTHhRZkWPSBvH/cYGnncpaMJD3zpGhjtacNxd0PNWmF6vpybkG9+8eKmh1BRJ+X2DMcFGebiC81l3tu7n2Lkv5J3IDoZrEvOXzYZ4qqMY+9IdKd0PAID7i/IkIjidDwGx/WHrXAM+9iEZlX1QF2WPYAAUCIgukKscfbbxNUXRoSkMuTdkjpxowwhoZO5lM8c/FpTKHuWAv5gvIBtygt0X8rtijlaH8ESRwdBc2GrZpOu91jcsVNb8e7hrvOXfOsX7M6Sfusj3ttvP5XxpnIB3DA7S2vugu5eN2IdEyYfO6ds+6fYyaljTo7My4Q7a70cIpV3eY98p+IvMusVt4bS50USh6M1G299mJzsJpr955g2+x9jgCdp7uVwaTt0rZGVdHNX+ONE2OcwipUyv34JOxeuZTHl6/NOYDPnFtF8DvYsXl8GcRsOnB3poSUji6CaR+cwmKCLJJt7SzkbnUuJLOT/lMokgUt9zhT2sog45y2fZMT81jcnjupIUdHkTbawGEOhiWu9YfLDQ/L8NjRAfKEn5vLV9WZOBaHTZzLYvs7yAtOz8cBxRpUgYAsfQ8CCk5bJvddeyjHj3dq5zldFuK4QvDEnWsWcuSs2g9XB8ZzzH+0D6qE/t8y9VB8gDtSZJPhHRrw36evaxF/NnydCS3vab+P6gVNbcX0SKZRhnOMP7X/01Q4YazerbXimgajacz1iygB7ozUSjJo9WYa3nQAHzX+wM4p/ID05bXLYhVjnL8AS1GwRwVlBu1uatlrjQNmRM0uX5UCm89I+ikFvuFSUhkbhVH6jIzZlC8wknCckc4bpl0Uw4L2vQCFLlAYaWbR4zs8JDGPe1AYnB8p+7mi3hzJHCMX9fo/JgblcYz7NhZQtMcAcOB8svywXMQGM4USGoYTuQ92gkftwH9QL6WqtJ5zgVL0t9jVX/pLUrpbTtHSwENdD+cCFnSzuSTQP5Yvc+VXph9ybGkzMCE17Rh8/Ka0Zg231OAaMvnDzsNaHxkfTohaOGicIUpNTCkHgXkVh7ukew9LMUI7S0P1qNZ2E5C9nUZoxpT0g3yUyjlcfJT9MvtlDOHj8Q6j/NQrmqBxbjxtuOropssUFR/UXpw9QSKm4WC+GIE33Y7e67GreISpLVwXzR5F3RudO6Zi9paFgCLo1bkakzR2BxRnzL6uyg2EODlc4QbuAk7Srs1jP34k4m8iKkDe9wKR2RESd8iODQdqQRYgxFMl4Q5U77BDoBEbavO67xcAgairhCVN+sHYj94T78b4vWNnQ0YW4TbmaKjhriGEiOQ9HIIul2GM8AVA7r1BQQmUbxElVjZMgfelzCrME3++83nhYdrUZWfEjNLOWFpescZ/xO38c5NDA8/vIEiXPt1ap+hw5BtMgfvEcFLbXWE+pesTst6tfEO7pl9hoivNQR1Ay56MBvECN+PmJVvKGOjZQcAAD3yePkEw9Qb1fO8YkngSnnWSP3RjeyhMmA/JJZ+zeHKkVaiK44IAB9d+ekfeSa+iIg/XPWxFch9OY+OeSwvqEsnrdXha1TG+SOnASgwq0HMWAhJ39yJ9o/QK81yphpwE/risjTvtafMxD5W5jpxxhFaCjYfm0u/lv1WlGbGco4VaMn2Zwea1m+ZagPSgLls64y0WII2ACHpGCpG5uYMHEJeoOuKIZnSm3aXGEd8oA54WwIXE4FgoqCfEBCZYMko1HOWUwAHM3eQMSl7+jMyPFDyptCib24pKAHHCA1BiVmwgM/IyUiAnTzKiHtR9Q0pjrt1UHH+ogMyWRdtGsnvzH/eLw6i7f9eCxc4XgO6h9zHFXppaNtOviPYiQJocqSyMYQep+LIq7cHUKXOd3acJU71RJvh/M3XKdCdLWdGHqiulyFc1jKfUsnF66IdN/u7RfDiUZ6CabEXN4Gcdd9Tl1yJkQy8ks7SQs6ro83panmKSjkzMftVhsnhPDvPlvWMC50FEhp2qnC+w8f2Grbv4TNpzaVW8xzl8LFsIuXCliI1Xo7lbB6DcPpOjeQzf5m9wnYU3pZ3vs/2L58fVwLwXsA5CcG3X9ufeK4sIGU2eLW9siGUso5D9kdw41AulSYV7g+Zd0JTnmnx+3yEhBPd6BW/z51sn65HQN/yRPRjeBg5KbgbyYpEZJJuiK6m91kXnapE1Xp0VE6bUDmKyj61IUxSs7Lo0PiaQX+Fvck0T1BqtW3iXIL7lVCUqM/bPhule1vPpo29dfCrt+fA59w0JvX/m9KKb2ZrrQsNhjkvEWy2ej63oWebiCT5DUFHThMngsKSyqAatWry5OHjFEPXwfUB3UA30Hf9fFhhpxb5BkaV+/QmMaum5TT6vPTu+IGL4bxLLYY3JywKHe7iWOj7SsxrE25eyBk8Cvxg4rE7qa3czThb0San62jtNBx+gFlVKtz6pfGsIfAnB0yPkArhkNLir85OyAex/05tasE445gntQWXExnDbXqZcYdZYhMzMp0ofcVr098ea9OuqF+1b5UXTPwbocqPjqrxOvu2l019GFoxvzKVeZpNboz8wi1ozSqob7sBUL4UK6/tiXtOectsOICGwK+RTad2S4Ck2DqmFH/DNKX6a0QstdhhtGpjzA/ee82EBKuvB8il9+6nSLL7hZC9X+R3ue225xVQ9cSO21o19kIPT6L7SnCV7R6k5ckqqo36uDKAQfS80Q5k6AW4r0lhice/zTVaib9KjHPeNuIR+BU/4mIpp6b5lY/0sNkgNfJD3Wa/FwmBYobSM2EmmPsay+CVOmnJekqiS5P7nX1UxllXKC2H7xjC83LITt6I0E0JqyEtGI8tQahUS3qDNktQWQqsCR8jxdxBKWSpNUkSpNWKDzM2p5v+43SHx15K9FwuqH+vntQxsmOGqVprqTTp6jMFW3ncnOebYa24lervj1BZdgnsaLaDjE1npB7SAnkzveg9vDyS5ZF1jF6Lw8JR3oui8+K98o3voal7+QeRq31w0WyOjsk9rbpfsXJ7WNuSR892fLAjkdOYatWt2SCxVz4rBfCM7fve0mH3L18adI06XOpjt6s14HLD6fD8BkZ4qzb1tS3m/Gc/uFsHaCILGs7m8gxW8Upfvm32gCiEzdfi+YdZapVlSp27AVaM5aZDDnaciM4fZs5ruC+prhb3reALCddN8Ph+0ke1miw0P3jFrAnq9FOSasmTTQ7c5yCi9lj0dRZ87eBbexLpvnY4jIBf2k6RZf4xFx2q+vVKGGBNGLmH0xdx1h0rY9U7Rrnd97OKlzTfvJFY/rA/YMNc3oiaFneHQ+laXRi3jnyu3HiS3kQklxz01GOCEz5AbdvAjoLnTnnYTdb7CWDEjgKzIyWGFfI0g9UOz165rbIFXh074GJzDvR4wZb2EPcVndZSdUBHQBcw0BdFzXGvI+pjyh8wojIPBqcORFgqtxaQ4+op+hFhYXv74hK96vMr35ISYGiLw893ZNrX9pBwdIoV9v59wNX4/xuSwX4I4S1Gnv4YEVLBNcL7B826UCCSycDe2Eigk4UlzFYNd/t1x67Riy9SMaWUwii7IRF6FPSAjn+SaNJcLPqx6MZvwTLihDP3/nu3cHHJo0McObr+x+6cXKFjq5bGnWFBkqe2pzntbpi4HbcHlluUpOv8IB8usk1u7IfKGiA7Gb6YgIXG8e+Bp/25rG+pZrWK/CeR8kP9s/7NmBC7yTIAs0d4he68j1JLbTxbgfznlmexPaNhQfvAP9fcON8yfUAZkprLEQYPqHu+cVV4+ugSHlTsrLmhyfODt3Ste/vGvdn5PgpZQanRhU/cMsWP94j8LgmmWxGxur2Em0PNk4Eyc0VLwTI06QLC+BgRnMHTVTyJCUszDyz06IHPvc5DHBEyTXXdOdx1k5Z9YP7VnU9T7vFjweemhcexkGt28/RjbbdN3nlfCC9f65y+fGocNKyX0voBxueHpoCkhfp2BkeoKL8focJYg3wjc5CYRfRN9TiVB9WO+cDl1E95haPzlk8dBILozftOAnkmwAybV6eViSGSHqZM8L7Sc8kVTrdp/0c8pvi1jus3eQc2PBw12Sod9XN81x2zlHSWLRVvkwk21a44rb7z/kYdWbqRH0O5/hek0g8V4PQBtGEf6Rk1IcoBTrpdTSJqDLyjJ+CvDGkqCoG0lAtN1X0O6mM6DEaz5swSZr+9wEDmnth23I72iWQlA6ovWB1v0SoMX0+jSxkwNB1pnrDBimn0QhtwNFLo9zqIyi417khoPu3Tj+yZ7T6KDQqnoc5kyV7rkPpG/X1Yi57LpKBQsS/nqUvqx+P27uO8zd1TIl22TOhtSRYlCmq4Bn7FofdMsTWlb3T5CsWhu9dT6zGG23onMG8ZhcHNybGiReLcPw5PCqnXZaNKp4DaLqrJFOW9ZiCo5F0ZZfF1B4pBk9+32bHAUpVTdZCZ8Xe2XCEdm8z5E5yvDMMxQ+mz34Mh+b2nxYObPdAhlN20GPTIZdaedKqYCMl6FLv65lg5I9WdcEmP4fyb+8pDyXdcygtRhKPihLJ/eiYpM7N4H3kNbPbOswFc3NMdEf8wgHM7xEty4xN3UV+C011/12yE67jUDubnPlyOpSAUbA1S5qgdMprJ2Oe65og2yOTSnR7aGFHd32EVHxSm5Tn8Ofmp1asOnuAc+16+R7c8I9mb0ZBdmxqp+Bn0mCPp/1cuACMldafcmZYBCCXkIew0vchT30XEEXzbE4hcfdwFwtFDvfJA9yFVJtO7Wj2oxAvteb2xDqmjkZR2kpWPZtH+1LYZ0dftViYn+A2ImJxBKgCk7VQQq57b7UC8FGasq2aq/AIzQujj3AtlfE2BM5THvfqPKsVKDHToy+C+rJ89o6P8kErDfEbh0QCkubIlYBZOa1HFEQzxKzGQivvWYpj/a7yWQLNUO1o9y+n8J28lj07aIyEFbJ76nFgnNvjbTYd38t0Oqi5+3pIEIAILkeSOMG2y/h0g+wFOyvXEbFXILTvp7y6ViKs6HyIpPa8zFgVzzELHvboXk0fKNdy/ds2fCP+cLf3QhOMEVdTcshqoNZd22NIrb7aLGW+r0AJuaHTDfCbQWgM4WLaONZDnQ5aexCqbQ7w5vjUrPCLk4fpRWnT0arHozL3F7JhFdLg/JI2n/jt3TzuZH8KOK7TonZgNvwj49HPrTThdWSe26rnVt/Y4kxHwkC7vPe5e53A0C+yRzIhSpaAf3bk+fncCP8LFfLt9tYuGFwiOLoBiOnhU3uqmzHTuYhT9HTO//Yr7Ahvq9f3p1+GZ+VnuAicFfwNQBX1GYKUaSIPH12GRcpw+1gQOEQAdpSPGuQcXCdSIwNDeBIUM9YfsoCur90YnwPd9JmaUXrKp7K6OjWv04tYNOINkW4Va4IyGRkUb2f6ERl3JSnM+HLmqtqforWTD1MnmimGG9Ll3806g9cWQZEqjt5Nx7kXHaylUdSmfqJq0CtxiKZliQ4o6IJ95Xz67SAkJV7nfLd6h5hEGDZ/+XegmEAU9lij1s8A+7VmAANpQUrB369S540BxK1mXdyQCepphaK0AecL28ADrzL75veQ0K0iiQVk+duRANj4izm0fR+r5L9ILUcKVWkxaxGnA252/ofqKukuJQms46FPJl3tc+swHFQAc/VdGYSaV80D5MpXUMCt+aAC2TOfXjM/aolHzfd7hvXGo/AAZrA19tdL+vpAJno0X59G2JilJtIgkaxlIwYjRez3H1QrGaFTND8/dWKIPgr9/nvAFUK+MohLVBAyvmPGrgGIFuhUunaZE8jRPAr0jdXC7kRLWiNApjznLmiwT+DSKur9KTdX0F78sAtsr1B0xsrgh2iAaTowX26keIGA9g+qMLICmkDXX84oW/GIX9w7NrkKhsUrN1PPi/JoZlNNSf9F56/QZZAqcDHjZhv86VE+ubKVq8BqG0+KBBNJzfcDwHn21MFn7gHTChtN855PafluIlDNE6jgLU84SoxRV/4dLVDzEpu4TB9m7boUy8Dq8iCOtggfY7znFvkNSq0PDMZDiGp84pSi+NFFXQryIo2xz+y2fWpbaS5g4HQZaljQdfAMSldxVMUcKrwPepgBMJv9LksvrbkpuVsBa5rVc0PWNr/NaX19sBlzu2AwElcuQXbIGoC3insklMYPeS+JGdqTRYpmMkf4vYB4KfwNgq3HUKqG6jWpZbU04FwEGjHPCzjghiANmjHXJL4+dBH87m+3nHNoh0gfapsRzCShg4K/XOYDyiKYrkDqzlLonP9Qj6ohpYE9Qkvz2ylmf8q2UKol4w46OtYEOXLRWBynUYzJf+3L98HCj0y549hLKiRY0gV3TMjeaBW/DdvKp+waTb+eQvk6DJALolLoTY+UT66t202BZ5k2CWkBfAn0lbfSjj2YHFS9Nl0gAgMrCaatzvNvQKWpl3V71fdm3AFEEmZ7ersCsM6gDkI4ChNSAMXB852uPbgZGwy0adJ5rj2nQcIlRDQq/zvBtDgw+3OHx26M2r+YEhtv9sAwljdkjsZ6bggLkvgfOIVBG4+gwf9YHTpAHVOpGDUpL6zd3CSQzI/SaHnBc01H0yA8PPHC53Mgce8+simYH/9tDU7r0BvRpxl4/yVE79cTOv3mTWlnjr6pUnASCP77G1G6+DutM2TOfb2iMiTKzvmUAtn2EBYTeTc7qLoqOk7TzN/4/WupXes3uO/fv0ViATlh1nQl9VHMIUeQxlCsyiivzUqI2VWzvXbZUaxyL+QcjzrrdNsyb0i9RA1NmtpBIuYl1NQdYkf6zmXvxuYSkVYjLcW7J5oGv+qWxqT2yKU12v5JONSlCYBUzVqhmifl/liJKwb52tRGwtK2LU2J1st9YHWVrY/rczufB/zBD5nMppZWR8jdP21PHrlUBxbjwQ7KOyCQniOS3uT859R+1jvB5wfBtdRynKm4K/yQNSlfxOlKzCyR7JhWIE8JRjuiz2uHzVRsUDIICQUrtNv2JKbzD+KBGQVeh1C92Fco2dsmMLGMFHa0uVqNldxobfjECvsGrQccKDByqhKVf5Dfr5GVwKXI+9wACNzryltWv9FyRH/pWOyousn6Bg+tKvxWBzcKP1ZbP6jwhs78S6OFJdVtgHPLabjjqWdcDYcQknhdaOzZSNr9qXpWo8uuCdjaYjYDZOqSgo3kpPIUekRKsxExAS4BdSbIIdv5WYDnemvMcJnNTQEuJmgXaWARFLMQXWELOTW1Zpiaoanm/6dduAtHWTxfcQ1/2FnoEeSUxk/eo1vEbfR8WdrFbXfpfGIbfUr1JaUNLwRX9gGIGJauMLvBYxk7xsLtaFu5hNz1a/HGxzpHPxLc9JhuQKJ5jiQH747XQ/CJptvatYvaLcwXeY8slHqwEkW+7IL0c8EJ3E2lwnNg2EpnUX8Ucd9woVs7/lhcKAZeNLugfYQfsYE2HxFWqfV7cCLpL9qIJTEpA2V+CWPg0hh4Jgo796g0oOFcvfLpDCSrQ4HVHPRcEAdlJrzX062IUsj2pdZITjboTyb2HkXyNZgDcvW2VaPhi4FNTFKv4b1rTOJEyO2RlJcuZP4/Lj25ZMrpdcV0z3JQETzn9VV/1gfhdH4K3HkbxBTJyC6JJRywuVZUj7pozupNZCWHgKVmPnov0OZzEhYXZV91t2d+6EbVnWG49DnBsHeweFZe0V8Dy1bYewTsmG2CFmNV/vJhl1bX8r8giBRQHK3jT9i4lwGl36z9PMO9h64xhqiRmWokIHqXRf0msmtLHO7PzvaarZZS29qn9ysANth6kOVZoivIGdPlFgRW20hICQI06f2GkJbeKMPC4lSvFa5r7M3n5082/OhtX6EDBh+7spyYhS+Z6UH+HP9sZ/VZeHecF/y4XpS5XRIAhi+euBJyzRCqA9h2UJI5AE5KA0fCcRyKrOtozEOT+Jk5bwNgC7d9nPUWhYH+XDU/I/gp4niB1R0gprsUu9ZuryDTY540kK+HsIzKL1oyJbCfTNBg8foKA/ggORCEG1dign1eUK6LT1j7zed09FF3AlM/gUQR4jr35d62BKC7sAjiR87Vm2ikDqoxH0zOcfaTMvhlgIc1D01m1zGse8X6d4H/MCMGyQEVGQusJ0zYm6BFSEo3MaCkc6l3WLWjafRoT5l7oAyyAPB0FfzfZoCczwU73IYBhF9kz7bf5DmSIlyKVy2M/ts6l9DM3QDw2Xg3tsna88nbZINDUNJ9b3LgN1QoQng0eyFUPeNPZqj48sp4Bv72FnyH3v1mfMN2j6dPWed7sA624BeNaK8OkSbfFyNaQJUgrePoyOkFCJta4gqsTysmVI8W0Jj1fEdSPBghsodhTSbvjqi/lAaiPQvFpkhH4n1puCcapQvWQ33V77+iItmrRMgxW4Iz+WxB49FHrb4bHX7dmK0k6b1bjAEPaZp5adTbovO33bG33Bvx384tgsPrxpyUqE/H/XMZM4WegOOQI+J6XDuluUaQh0fTN4jcUz1hr0nrLTqegfiMizY5gq+rQTgVhmisGXAvuzKvjjpK+ji2utDNSprTWd47+DtLZu/CX/AunnDDX4NGJaJaQIasyu49RsIDjqx5izK4aMjvgIIlr3RIWUryIEGLeJ+jovS+9DMvwsX8ulE1vGTksbB/AtBbAw1/lVJPkInTwOv+EHuXIM27pJ3b3GMhglHYVMm1Hmqe7I/AHs5bW503qYle0VMp7bctTFOnPqD9PBk8saN40mg806uqfiYWLCG0ltQdWd2mi0rC45QnVCZSr5sutuv2M9zVfw9ep1JjAQ6oQ+h16LhoA53ELNNb1YaNE5OVsfxq2PtiYjEwwYqpVYm7dXg44zDoY/ziusqZVrsHBWEs2TcFQxJ35nXwYeH/AYETlWIjTArxlfYKW65qSL5Y8hzyJhy4762okReZBl5n6yQt1LFjB3UV+0FN3I7psnaTWAHwTR8Ygov0uHyHnuTy5E1H24kdwYbOFL11WVUoxR1Z/PGSs0bBlIka0dW7m0eheaRN6A8jXn/YXgqTdfZjoeo+atgk2fzW3POE7xdmc+ooXn2UlvizijvlO63fC1MdhPQ3Eg/ag+n01gk3y6aTGgpStuQ8nB09B3NU57E+unszvly8A+DbpfrEEpqWscPOrTA5EGPh3ZWCqd8p05XRVnWVKrkeKb7utVhCWwOjzfnhUKrN578IWVfFf/BOTcdhXGhUZD6gyvxtZwl0MY6q0aDpAqPokesLZjs7sj2Y9kXs62UK30LGnVNi+su55hIo3qC4tmcTaFW+Jq2/Ha6GlXiS+jwfZxsm3J3dT0eEO0a/j9hV+wL+RrqUrccIqpVEw3pkcbJ5QKlTOP4pdzibXbgkSqt+z4WiFbZkoTREB6J+wr6q/1DLCQYNlS7ETHLpaOzsCCyvBwG80K5nN2VVVvqaBHTsUh7LCSdVOdble8mDmxBrSJBLFWCvSWuN3NulFM07hX/iVU4JAsLhY61IrpHHsza/nGsrnVOsXaIFP5t1r/WY5uaxHCe4xwJ2s/RhfQyaTnTmp/BhGAHZpSMLU3bsnNunBit74bV9xtxfdLAKia/kexaxOfJSpW2VmIXpbOndIXeZ3hVKZYCt73uUj8BaVQ230vNnzbNxTib6c/tgRgV94+YLKDADcwNtiTi/sAD9LkwIdfwy3D5Ay7pvxYIbPpVdPz8YQTmu/7DOjjqx5zedY7GwLGaIUCH52NY0YxswvMsr7csoT2cQjK3755hrgbnc0cfeZkzrq/N/nb7CMBFIyaZEfv0N276kEiLyLAq/eMS5OsgnJKxzTfmvWlxyUCq0FtxUIjFE6ol2JFO3O5gJi1SXqSMVsdecxDtnuFyCzRPsQrTCrbaV/TXwc8Tz2eXtGSbK0iGg39dUJTC6oSGiisGv1Aefxg6EALW3CSpnFH59Q/e5neXwvLi822w2SlymS80V9N0TKtyM8vr8tZQqeWO78kpjVmYy5PHbemdSX/j1jetcJV/kAOTqkutdx1kgR2104KS4hC7yfSvb5upcs0CHt1mOR2GE6Vs50ot+MKQajmxPrpLO52ozKb+CSpn+823J6cwKA+xzm21RvySvuUGCz6k7zosSCs5ZmiJD3pRfLOKHByRoLD2ArBdI6aLjfoakTbycFt9H/jQbUloRU73dKVlY/URXfYI4lYYQWophlDdbrrjLWWVKpWliHVbcTg+t/7Bh2uWRUfZJhXRlNohIs+Y/WmcEU7irLYSOQkFoQT6iYU0Oc1z2YsJpSIdNKhem1i84sMMHgsSvbeBP2JbTH8heM56aAGJWrto6hj/0kczc0vfcEyDSHsS0ISCFnJ4zksDYg03nztrEoksyyTddDLkYnbXCpp95i7tFa470RJBCe46gmZCTv2BSA2N80gVlqsiTfywQaGtUcP321K3dOf8qIjpq5Qi2BaiXQbQcFLokA9T6XoR3VdyOhKabhwS/TCBXyX8GXgP9dhUKZwEkTsHVJuc7jLDrTBFBTSzZqVHzHkDsQDLL/N9BlAv+5grPYu3mlQUPLvak7DE2pvZr/99S6sJKB3udnTffpxsWWRHGuwPrkba90ze8zUNnZLkD8hvfDO9HrQMQsjFOWnf62/9+sjrXno9EyTKsPz1eLCjDUlyu66qwAq15bdrM0qLColxhcXoYIQ7Pzw67I/9busn+T1eQyvt6MaSlBl4kv26HeLylryxXHMoK4w+5dSXcbeS6onxKomMSNWTHYYVO7ATHiptGkGj+fGXA5fZGbuUPq5+htFudiaYUwcEqp+JsnzSio8p/IOHj1mM/TlPoTEfbLjwQN0MDsOPmaiTCT7mN3igqB8EGD1w4p7WYJ3tr/VazrBLIEwtbUIKS4BnwqsfCYoyIbR46raird7JXiUXBFBJ6vqQzgP2w6y2hd30gndT6PHA4S6MfboJmmNLwcBU1betNF7x2HDpSUS9z8uj4MRAp2TSc3JwHL7WmXxEAPbxgv6GnDA4au5gszPFtmSXDmzKxULb25M1lmxfaYbqP3EsylMEMyXrO3OXPPvtXfyFriugpCAHrLJuKo+oiqdw4fZx1a6qRoxj7wSXVWQtnv3BZ0D+jqpanAsqwsBM56u1PTIyXua4zdL8ml2y7OnT+TWCSIv7P7hj0s1ew5yqPMgQIAdJWtz9wbu/LvkzZ5GdjnOJs2awHQnCLS5/cAPZRYvZqi6I67VTBrEtzn9wxRjQRa9XjW540I0pnrQ7sTDnObzQvOKqFfa9OcqDSnSNNnH7aN6hBysTsjLluhTnGfupU3OormHvP3h0pWYwJJonUWsVjhZAsMDdBqucHrOTxLHunJG91qODHg0nvkK1jVaZKI76SlYbghraGv33sowKz0ayrB4XNT71cxBUM4FuGh58IJe3YSywB7X3fZb7FrcHPhoB6AT8DlpTU7iUtxQA3Tli5ckrJNJOu4caEdWeDoU6DOcW5Lc9LLRfUq4S/XLJX9DZOXJF3V8h2SE7p88DuEB8GTk51nv8zEJiD7DOl204WPxhm+iGSSfTHcQePIm25Ia12Lq3Ws3X5OGvxaLzeqEgPz9ZF/VKTmXXp28seeIdH8cKpaDo+Xc/9WlqBzXUJpp9i52SxJ0b0LnZl7DCbFTTQO8fVx6w7BC2c0HkKrtJ4DwJHVWMRpMrbe7LTMVU/AWTG4qvARWHpqeY8sXn+hJL3GqZPOcMjEcB7WLtKNr7rEFWmgWgsHy1I+dpKXF/MZcCZiq4SPwt7v/gjkizg2Xb9AY2nZvSyHg1nDsadjPjLkvQ+lpTjRvtgtu57q6uXqxm2xrrD6AEI8dBfx+PgI2kSxu7kjykOe49SuRhqxDjHaip5Z9qU2Xrjz/WmXxEAPRQl0x9snMytSiaeW3P8SeFDD2ZOCit4oLDJqv27VoIO4dB8svKUuzOFfA2fWJHb/YiLTKPfagSlbAJg6+fu8nalwXhFpc/uAHsJMM6p3YYTsozhROPmeTU8WJXRwtoQLuA79FdaJ5aveh3gfODGnU7ff4dTwFcAaaBXmdqAjdU4LJNxvtjK67xo6WvtiwuVPszxPoOXBBti+MPDgBRkEVrXUufP/z5CVdGbeLZPopwjJJtnoR0tgqx0c4dlD8Y+LJJ+EisVaSeuprxwRaEk4GdCKYxMqaGcdnyRs2AAu0yP5wcW5XTPjIklMtpKJoBZ4r9oLq3XPtLngiReGZ5aiOHIuf1OWgxL2pwwfPiBEHp9BPza1FafQ9NXYf4B364QxYEele/K4wpH/k0ok17C+SyoUSo6qT0afqrQFtuZMHk/jhoKyCDuvrT/Ddl4x5a97/x9rmnkG6lzVq1XGXjt7zCrzzjFOjeodH+8PAhozG8zNkKYWGfR78zBsgz0aRk60PxkmwWCpBOiHWCDkpzYxCvsoIpNp7b94EO9FkRSnz17RQSVRTneoJx8+yW0wwYLB5iLgBijlMhxRZvTsZxtuHro5y+g7+xHfRBUa3S9EDlFJQloq8oP52I899YncFOW4jwrVsEaVRCtntS3WTJCvZUgQb2ReYh1z0bAuItmXlWL6goAnbMKn0UoA1BJZLYhwnyw4aPC+TwNqvOycNDauliAaBKeGoLYrCf/5NMcnppse218ZrkFmbmehw6/9gaQv9+8uZAck5tq5BorE4YNVMxgDVTi8Mfw580Y60qJqOPPBFg4k0BgwNU+KG3iPj1XA8ZBs7+AhwA1hch3nRzOUQLqcKRpJJAZycxrGNDVmWDZCfWNabtZ13zDc6Nx/u7adixsfOSL7j2TmwVu/QWasG9Yip4hBusFVE31UAOifaRtYcSDj7oO3UWlv68vRhxAp1elkTquIAox284Hag3UhyfckTCnyM+ZI2NbXBMx6TuUpil+XDBzmxaz3d1qZBr8hQ5ogbwAFEuQkYgt9DeFG2Bbw7mLextIUZxjsr5vk1WmDJiVNRDD0hchLrpJWi46c6jkEJlMmZyyAteXANnLnxcv7qNq+ZdTh/6WIDtuOiE6NqrQepWtchWExRXHFoe1nWzWe6gRCiKUG+ndOVAfJzgljaSk3Bx1Sqaz/YBcaTCitkyP4r8IXLAXQBSf1wlMeiVS3qUZyjNPQhEirsEiZe1zEUVvyeITASpbE9PArIWcrqChEY3j5+RoN8+WxKyE7KYMuVKSeNZtfstViGUq2ZbzcviBqEOZ3yUxH52PbKpLqR0RR93hGSwCHc4yGgLkroWaQFzRzGNSTc/dCxfbQDyyKSsotq0EdWkYqgGt4N2Bd8EIVeW/MxMwruEJKlNOUk+m7YllTn1M/k5yRh8n7x2mW3C0h5CjP1Z0cjqtCzczHX+hKiAOYF68Z/2+qxsW/iJxKbmTNjlkpQFBpIXZopTtNk6CkpfUbpVh7nj2sOzvMrZMVWfY8K8LxTrSXd7K0Ey5rX8I8G1OpYTiA8bmKBq8gFVEYjU3gvyyuRSQIiLQvZ0JfxEUu1vpL2wwn+fy3evSRBev5uqljfkJ2+fkw6ZXVNPXZc3kV2XiONSn+B2jyqsaa18Pkn7+wbfU0qTsUJnBnJxqiRBXqKrQtJVXySoSvuIUGu+tQ94gUZ/AqlW6uW0OOPwq1VjGYXERfgkpfa/KT8azY9itEtefOlTNUlBcdyzxNUQ/BxyCVlZIx8Q8YFRZiYRCe5WuBLZ6i9+hD85zGUQSo0vHYo9NaVGnO6ZEsePb4gD1vz3LO4zpWqK/xMP1fFwJSjIP/StWM5AR/B+Y900/+cGzDN28+nrA+7XinVz2njdAPkwEBCTuxes9AlGFGmz+hB0ywkHtpHNe0VFx61wzbYQFkoJ+DmoeWP9j1YZTbcP+DogoFZK5mJ7Qr0o+NnQTae3mOwkRQQU3MkRZS25bWh/xGhsAaL6HFZ0Nqnw+FBOc6bCg9KtHPh3u1cKbgg5HHHJwiFt2TzkYD5BMGQtvv3DvvZbrMAXSh/EbYEPmxjjlcR8c4RcDfsfnYhLfaL3nc4FbDWbL3Jx4bYsDMqYHBDqQo+bT6nWLcTDBS4GPOyN21CvcLWXue/0REEYzk4EFvinjuGlyBHTdD64Jqom/AwrnweT3bXqm1p/D4Opw0Qh7aX1Mzb4ST2KMOE0vNGclI831kY4JmyMpXHp7vyIX3Y1bTRIbfv42cnX1sgJPyE2tZrwg7FqR5GuGlmzuYknclPYvAoUJhKx6PC61yUZh/wgMs+DD7PGIBjPWV5FxpogQRQCn1N/5McAjxjEb3ZF45VTAPfIfB9hbI52AH6Ygp76WYXJrCCIyx7HldeJKkhjtnaVKJJZRUD8jRhE82wJc5Lf+14xoKLRjwmAUfmG1gdEY4qIFiPohOOpyYwkvZ5uKn9KjmWNr1zn/HNetV61rt3srD91L8/4x2NcSkjuXnMZ4rlobpq1WZrwE4rgNdr4De1j9IP/EDnvwec++DBZRAVLETS8Ku5HQGQaSerjfOdg0DHPj2BAkV8Mthfvs+sFQWB6hYWqjgxu1er6YZjm7Mpu8G4vKfD+WURPZRjM2h5Wi4bfQY+EDzEkhYsHZUk7u6UTfAbbtu0ZYtPB+aPrXamkDRFEG267kQLH6VVchwUHhTGMbMoAMuGd6QzGwGBq1FMMVaGhax0aXdOnJI99IgMNp+G36Ritvv0jbQKbM4NEQpuJqxlEBse92lcSoX2sFaV2Yg0e0dbVLksyGFQ7/Q0ggtpvCA4aBtX+niuTXfUjyRZ8n/TZjq842E8kPwJRXC2tJXX59PVUXZCLn0WtG9V/U/++ABS3Z/lZqcPc/NMkfpt2LDq+KMCZdc01lIN736hBzOupwbXOg+IlVUZiG8u/TBxn1+My4JIM0pDc0lPNl89XyRpJMMB2r/KsgqmjLG7IQs3WoLBHxgGdz7OXc/cdH0rmBVzVZIOQGx3cu4WD6vLgAJK8+l4EvpUKpGysW25J2RgwlFQg2Cs3uO00h0wm9BYsLY6CCywgFuZIqbD26gyUlCUkzDk4Yn2IVqNcQ6djo19jbVwBsMs6JaMX9U/fhQ/ZOyD+xvZxBohlwadN6sTmK2i8YtBRDirDMDNFAVMBOJmgUG9rfvVpo1n2/oB11CdRxJlCLXBvu9cigeGDj4Xv4skwe5QdXVxQE5+kq6nrhXen20oMc9VbyN3EPCT5z9M+NCIY9OFiKituYF5xG7LExIC/qYAwsY462lA7OZOcsWtOm8hWBYXC9SgQP1TRXFDyVa+YgfqtY7z3lbgW9IxOazF7BG07ZCt5+di3H2WBH519u5Ts6rVPXMTc3LYyqo4iWH2hKCk+IOzhbIeNpSJFXrG5qggkdcO2HMr1OHme411NORatiOM/l8Q0decLBXZSnr2xB6ATjeUSamsaSnTg5XW5lcSwu16GA0WJ5e8RVwq4UYA+thSfb+gAkY9dCDYh6rtxYposdPCs+nBA2O9bekIbmg2jK24hC+4kc8IV48M3xj8AoFjhpSQu/qa+hhygllwr4N1Exim4zYKZnizmbtt7rV6f7Pkyq8eyQlKhN/CSSHknEA6XWk4NvQPgL3DJkfnfxHk8YMuyI6BngICGSxWKpBEsfCCUGwGTQ5EWwoW9vY3DXRwuSXxJLHB/G8DxH2HCo6GauKmIb1SQMx/+yq7fIH+cGF4avIl8g09PgbgIm3+Ojioq7YYaWyrtTu4U3Sr6zJ/m5EjtT4qK0uetrLPw/Qo4YMeXBeoVLBeA1bhdyCi8CK5ZeHDeJt7/Li9RFgbhr1i3huf///ox8AICTqn0p71HAhywVGAgASUBL4A/GaVze1GcQ+Y27bUhookRHXbwJItpJq+WiwB/+LoPrzVPSrKUqbuimIhZQLqJJDxKcV0gS6yeVMNht53juszA6WUTqYYwPoWsYTgGHtp67hyFldiPYyyGtBhTvsgmjK9HI7VRgB3Co9hbZGQ4aWw4Y3Xj1U/qFkSXFSH2+zY+K0bfK/Ixlf7oJ+CT2mEPgC/P00Exbx5P7GCgIM2nwBblyKsQl9GtrLdoqL1dd9fPETg63+OunwkN2FQXeRx0CHQ8EX9GTH1Y68uYqAIYCJ8m6+WicgPycDlEB2om0tOZoDNHU13idRLOazfEuEdnygRtBFUBxVlpoHR+G6R4OORX8BUhQDOqjFY3wRknKOuYV9mjMGuRFAOzr3f6RqeJACrp6r4kxKZpJJUTN03Rzklgq6tvuLHThsTtsMeJGpOBDMIOYBg1V6Blnqt1Q8Qu1R0v+4Kp3y/UDT8D1giYvKH8BWqHia0RDtNglds+Mhc01cayIFuvlms8Ee12p3bWVdcmEi7QW01yAHAhxunux4FqdrId8Id6WKhWJ/kO7wRtabbqQj3QeAH9lc8F62gN6rLN3EuRgWxpjrJkxUWTZu5IzeQGSasVLhQLyIY2zd2GGmBNMlgzaPm4eU/MzjhAHHNfuIXDphYLyD/TW1LAoLi1se97Gsb1P9lq1iaB62GqwLkjTh0zYqhVUxsr4pSNXJ5X77AE3HEjc3Wsp3yi3yt1UR8WxTM48f5eldfub/EpBVArFZQ+t7g2VopByN6VdIiVpdrA8moD7FZokvErWMoV91JGrTgPjOguOAeVkCh1lEcVBtfyIYrYnOHGQnCigHbUQVdMo5PkRqr+2Q03XY+GVOAbkCXCb49dsLnXWgD8q85SM5DD3f9/BiYVLiTqWvyySQHtM5b8+SoNmoRd81uEQpQ37CWgXgu8DfzspgDmsLkTIn7fWDGKOsuTamekecCpN+B9oQntmuTiFZkL4QOZTeZ+K9pCEs8mGRMtYDUFpPrKDY41sJByaQkMgTUBDqnZNNdBWC4qlnC+Bb54tJ8Xyx+jd31gyNuUDQIVgaY+r2UtfTG1lDJQDwaYlO62ULgJBR+QawS0gPdK00DgASN0oS0wnX1QLwv3rijs3uSFeF0h6v89ravvKo/m48ZkGNPO7Yku8FuGJN/oSXCcvxjGza6c8uxVNR/0G5/4ir+kTIxQacdPZfHcs2d8Ec7VxHqlFv7qvsAbRnlzKGIIvD1k7MyqhOnP8iOcWEee+eZs8AswMuSHVG5mS7tfg6bJl7NQ6XkjpM4+UgCFTD5zGSBiGIAJc5BBIMu/Zb1R5RiLrv/GSQg0zIMNxDUESsYUTK1tlyerQiqsQ2aMqvmMWTJORw8PuVoZMAWDPnofSoXcOYd46JwibnWvy3p6JBUEZXv5WH0hwpo5R55kvs0QhtoCeTsb8Gcq30OFycs60LVBN5+rOLmip0GbycCkhL9cRwF/ksX8Urn3LWx7sOkXxAeg9CoRav/E7fO6557pUfZzl4VoYEPPPFEAzGJECnzSdoQOJ2kEKFyryNFzog0Yv00kSNhClRZcG7d0k4jrlVn1EzIdb3DKqWyRODAeXzRkn7dEUFIGjhRYGgVzH3mp5SGigStkDeiaGZqyJ8xIw0TSncBVUaBTArk9iFVJ2AO/i3UkQQ1tFJUUJNzZ00C1RaocOK9GDNldv/WFOGdBvFWyJVRTbuhUGlSsKyS7ggwAoyvsNavJCOeVUZ/5p8TH3ZQdlGtp2Xj2TRGVwkIafYObtvouFLzGHL8Dx6zOR/0gy4bCVZA9XVUbjZdf935hzqRhVwdQVNNRa+rJDUCt59CKoHKG3HJHOkW+CbQAuAAwH7Mw9cszTahC11nJHyWPeMSj9i2GfqROblbxz6jRVOWNnHHjXCrUtRMN3Kf3TGDdYNu2qI1eF/gIut4B442IlvuSkvCjEzc+4hznN9CBUJZbuaaD4f5D3EwRTDjnTN0ry6xXvqABErQ1d2PgRSvWl2YRsDVshkGA5uLyeSkWgnVr/pG6tIUa+V7bev3z9Tf9npJQfL2njbd+up/fF36rhca8YGpDj2C6h9VQs0Ky3J27cPbHEkreUWqZVJY5t7fsAnYommmYWmiLk4tUdd3uI89zcU+CAJZh67vSnCRpjYcXjdzTuIJGP2AflFwgSyEGXIwi/OIrTMTYQ38kcCndOMzdhoFOkLuUhWp/t3WuJfliyBbNgQsIvvjloKZwFmI6pg/3uBlIOt0sFeOYvH4yDYiJdUhljNXv9ZjcDo7rjzjQbWv00JXnfM++OJhZ6ShMAgKG5E++fpVeq2198egJAOC6AM8IRR7SSKmB0RMnkt1dhn0M6xP7pDhd1SOvlyirj0kQzu6yhhWdQylRNXaS1Oqa1UjWsRegmkVWm3qb3XW4pyKNW9lq1aDGMNnJc85v4MbUyHV979UkH1KhYk0Keh9KFWhZOc2DoMin58j+IgbYWKYKC6o+nJZ9vgBb2JzXdl3L9QcsS2rHYnUa3+4HfmJgTztVrxL+VnU2kBUbiHcB03i6+1U/KsiJx1j7/e2ofhMGu3pYXzaNrn55lJeU60jSATQoRIaa13UV5HGsyql+ykORVUgF8mJ/kAAdx/lQj5Hnhgh2TucSVpv+AO08dZZBcEbw1dAPrHzn6TcPw4rkjH8PkAhrL5suZZh8JsDb86cjMnG8kpYqad64LBv7y80KPmGcI5ixwBgwg8eqn++5xW8aP9X40cEXLrKBYYYWdjnh/UK4TZeqwwJtOopS24lA343Qrzo/pGIjhYzN4QIfN8CAVb3/K5Mb8u9f2nSyntgaBHe0xnf2Rmzvt0Lb5U2Q6xweby9pl8PK7oLcKUY8snyIEDjK6RspJ7l2RPsX5ewgXEfeIF/aNOlIDTVYJozM7Byjl2S1YfJhBEij1fFtY+N42MKycvlLXyhmCSRXe3FzJLFyB9ixKg6lNKR2EE0RAd+Xq8504bx5pIIQnpC1b+Dy+sxxpxcQhYsytGf0vh2W/zm5JfDCfE3O6p/xt9y+YErt9sa7oKSJLnb8CTuHfBzpf4sQ3hFBUUsLG4wV4Rlo1gF7ddD+OuunByhkorC9dKrAZPIt7CWhkXzJk3I3vWN+ezYZvmM9Q3x7RLPERkT5XtZB978lh6Xm/0yJiSTz1koOr03/6HpGMyvnge+hF+wRN5+uggkqV6xpnf9+H7b+/SCx2o25Y4NbtQq+wsabu9dYK0outAcuBNHX6MQmDVPg5lS/Tm3rE+h/Tpb64YC9x6oNj9xmslhhFoYPtSdUMe2lJzAmb+A9aQdaWh/X0SOSK2o/iOo5M322dSuvXBXFqQWTxiDnTjajoZ3ZSWZW1nEKgR3Msqg9zEIFyafzn2yXA/FJI/xhLbgiwMipruBKbENoaopuoioSnGJRaqMXSqqIeXecdAnMsfQUjCrRGgwj3qRP20ZxytaZLzTB1Bljb47ZDENuNuo4MIw/jOCHqRJCpagxHM9dOJ903kuK+Sk/TmUv3JiuxxOsODkS2PhKtGMOfqmqv0HDz+cNVkBUXea/O4kiVibGAcQxQPfjmozugAFwVpm4pQWiYk0AmfCmZmW86KpLCnNr7+PzLVCrVPsHf1tOTkq7Ojclv93TQPSYSStYAPm0nXCcUHjFCRjdEIM3I9dCI6jZiLaMqGJampXpeO+SixvoqOI2t3e7vlkeEr40rgWQVlkhHwORsRkAka5oc2IHh3g09DHj306kFelUAdQiQbK+949X6KZHwrjI2X761jQbYR87pb88Gajzah+RSxNzbrZej0VuQzcz4n18R9w7xB8ziePwyuNCwQAivg0uXKXi+4gMVyMrrwGbCjSQsKRQZNL3BJEyHptoqZl7XbMQy9ZADZODLGofzd+Nbx7VDnwPDnvWh2SdoKgfXMqNzzRcq+JAivjSr4wgKY8i3GKGvrY1PYh3/8fGPqOPFlulYmc++Ta3r+Hbeo/3zb/q9zNjfn5fwXg6/z4UJjsd89a3oGBkmGQs7HHBx9BUSmQJgyZ8DheHUvRX+Pd+gTCbUxqJRsl3fpOyiU2GT2RKZ+rsa6zYSPIQfI+qby68lMY/fKoE42fly1Rez00/t58FfvyJ0x36RtQXj5IsKe5I1Wkz30rWns256azmwZkFUGSQWuZLwW/XhUieQjDYu6M3pzSr6uYHROlH84snf2HkvGNe1E9iTEQuMFmBdra4UJBzEL6QlukjQcU/52vhFRwJeDfb1nbV3NLwKwb0wBqf0d31C6g4FC9PdjfsMUET/yyGpHoLfVIjNylX0yO7oW42wGY50eNwRQRlXObZEbe8QQHdkWsDsBVT8pBMqME9S7NUIbahuRITz5QylHLLG1sJYV0SLTpxTcq3ILPWK4+z+yuQAXJEw/NiD0VthIuQRmlULP1k4rr1icKqeyzGqC3y8IPmmkkj3KmspQAGe5GbztoJ7bBvuTeXRTi3kmO+GKmCQLcuTfTXLEoTd0L3K9mPEjQfc1bKcnb3Jx2c6mljQ0vaDUjlHm8sAkvp0I9yS2AW8EcVw5Q1eFEhLLsQCQT0dMUMNoR5u9LvxaAUUuIJEo2Nn0eLguB0Ro5UdSvjHz/9er6FFR88ouni4Pv08bbBrXp+OTxkI4wYlgjneUzkUe7xCC7frKWeZqEWOQU5qNj9uwEcr6u/nTjY0sbxLLG+Y4OzMWAv56vtR09gtfYuCYQfLnzSPV213nkzuWT6MKX0ZrXELmOyC9W19Uocyiz7KVQ24sDVuYK0JphL+y5tsutzZnL5fjvE1N/Deug9uo3QXNdE8q83m2dKAikzG7b2hFCbZzrRnNzOpMSFgsuw4RTyOuDCgl07/pq69u2HfbWik3rv4y+sTxfZJ1Qqr2NL2oozX0rHhvnET2zzfbgL5NiPfziDLihv4x7cBEy4tw2IgiuLDt1G9HeAlrhWrWD9r88txe6E64x/zf6izI8eFn9SW+WEm/afE81M1QMKXw5MfRtQZvPi35Zdqu1NrTDbn+UGLvp5BFA8skMGdqYjRGDukSXpeVo1wuHfK3KNrXhJH4VCN6rcqaulT4eTR7gSBX5s8jWbhqrn3ZATtgGGeJwOzpH5Yo1QDIFXDnRdaoqaDtyfFw2ngSOafZ6rwe90pX9WjOWuwqLcGZsw7UzUAVFQ0NzxD+DgeuuE+WE8K8LGsw1q4ecQ2e+7Eaq+AmFcpCJI9i8tVbN2AhlAKIR3n4+UgxcY+cD8fdE5ISDNPbIXF7zjpOR+FGwcOeVvdQc6uT6vn9RQJEf99DjEfUYuf5ApbjBlss5MHnDJ7SB0mJILCXH6HnEqVPjG5uhGi7EIFsJHlGWLMA4va7sa8+3yWCcrGObqgHhtcihXVwO5eXFRfSbuhY5EFFjTepJKd1eHHO03N8aa3l3RYVmBld+FvvEmHBFonaiHnXkNjh3srirIgXtDi3+0VFHpbfIGL+D4BUs0BBSmdUbTT9kcRnxXpT7dDbeQLm0ur6UbT1ZWGS52TJOu4YVhy6zaSsYj7HmzCjzdyWgM1zzYM+RtV7VfgF14/bVWP3ZKIZuEln461RzBkTa9+nQwt/N/manoX6NXsh4SeFbU8RYszxjLvVHXiaE5nRBfYhk7UTRdcnYvrLzyconOdq1tKSFChGAj6T72Ice43JrD80HDNnvWGVakcTEtujJn+Oi84TGV3j26bWgVlyUsMmYn5t6a9vGLB44MxfB3O6uBnIff7PWK4od2zgNHts+s6Cd8HRQybx7JCH9ESoGBJwT1IX2/3gsn/yuRWA+QooF00qrpUzYsH3qMTE4HvDPWFVEqh0vWSRA8aaq9WlmBPRK2/1knBEoJ/p4UTaQDXj+84dOTE44kPBxgwrAFN6moAlkulcwK3IRrV+tlO3qSKRqsNJ2LueAwThigwBsUuoPEih1H5PW60wJFmx8fftTzI75Ug/VEcQI+nZgTT3MZROcCETT6ync4lnlNRNZxntRRhwMKl0ni6nYhr53QPPN1bB0LBHJY+xDrxt8wQPTzc+ADfTGzO9jJC1oJqgUBEvrtKjfDa7mskpdNGAwnfuKrc7LaZKY2GW8cNl+KAUUB7bISBQ/QO98NyZ5fvj4+tFW8aPsDigfB0Y8U03GaCRj5YcFDDxHuJE5H7JdGgErAx2oOmEhzfSQo7nq8Ff3GYR+Hp9fDTB+1fbgDnWAzZ7q9D/6BR8KYcdcVjRBq2LK+f4R1UKQbPKsOBZNjqA92DqOUrFrSRmnjIJjU9ls4HravvtGEY9Njed8MHBMi3QNzx6mL6TMB2cMvY3BDr338UAtGa6hKv1NkAf5N1rLRbfXfDQvgwoK+mSuz61dgIXbmUrNiPwEVm6HoBCeanuWgHc4sh30bJIIsL6mDkVc2PMmviS1r8lNRA9i7RMJqFVnZUMvftAFtuGJXhZsRV1HDN/pcVGZOLDNC2xEciDhtr9Oy0vUn80yLc3lTzF0BrHnC8ofi87I4auvgb9lgDyuvIXC9Lt517KJ8U2ns3bnwV5LGnclo0qNjbURC8KdoV/HaThYlulS94lSjVFcSrxOmU5u+xSsK9U2t2wC7kWfgPHC5uQHVzGHrKzceKTSVzRJX9mU/KTmBSKiqh4wHJN+eh01u5rIL2k9XTv1mVzf2R2rcQc/Mz9ckh7MV9Jl4SqMsf24b4zZtIXrP4NdeqsHBteFyolhPJzaHNqYaCY6mEF1X43rL8jsRioyDuFL+hJ6CGxKil8QFSPcMwmrzlm1g8BU4y0RHGMxn2zlcLueqoMRDt+5nc1UEMglqkXTYtYmqZuxRQvcQsiuJ2IqxzJnI26VmcF9No3hywm53y0XALDnRaI8G+kJIZMnxgLPET8DZlIOWE7s2pM3/X36Ydk/JLXNbUsh0DSUt0tHPmriMYVnouY7kIJG5MO3YWQYU3Aq+/wDs1JyTUfmEpYcn8WF8n/ySgwzHaJaL6/8hccNuMJ9s3Af1J5lHiAhwRo9Ey5d6+1D/apTjoEeZCz6tbo+PmAJP8g1Sc2OQnRX1x6s62fp6eQeqUAhaRySVri3Ppkr6pKuBAsoRR6IELDY+rJLRkBgBnjPoxnyMp2E5MRbWhCtiQTfXMJwEKQpZoEy9VGHSisjeOBFbpEjem1Ru56xmtukvI9wpankpvFx+FnP1DCdvfvqUE4Z7/0j8SjyiT7cd2qX09G6in4vtMCQyNwBXKTbEhoBeWexjSoyuLm97qlTXfb7LNpJ/NrFpi8WN/r2Qr25X3yBhZLAnNWX+NHav5YgupXo295VNJTyTVQ0yhSv4NeglOZR+QSEr492Wgo6cglzXQmBlBLdXYccnZEegI+Qbxq5hog46qy+a652wSYAS1tMec1R9SEJVAPMlQsANjd5JO/gdSn6cE3nGUCQj45vcqNucxI3Bjsd+DidEwp1kiPujfsFoMzPOHVBna9+MQWP/vnVFUGDuLQAcLZMhd/G22YKRB9elIxhRsD0GariGKn2ZIuDDhplXnv3xEZUGsReE0WMo2YpTr9RXEBEclKIC3KwOFsju6w0eRV2e0/ik90lqdDBG+0mslA+hOva1S5uiKrmMOn0QckgVVVd1wk/YuUZhDIEWxsASXNDFkUQVPotEnb0EvW7g+VEgO+egeoa9nlck1bFEegLLEuJJbCXX+dPYtPpRDnVeIpeispNNtU26lQYWqZ8SHSDyj0Lr2c+T8iZBCnbzszBL6diUbapempgLtvf95OeTH6p/LJw6Zo5MOSPXhlU3QqjS0LRc0YF5C9nn4m5Ej0c3aue3kST6pScThzulmo4af5D9dy+lVRNxPWFiXwMu7z9iBOWgtM48BGLj1uT3Q2ARJw97fBQpuipzgIx0roP7K32jaMsHCFGKrSAxEj6IunY17QhClW/4gfRoMRYYCUWnMYf6nqNlAkC7Be3t0FUQE9Yq2tmS+uVQ2TQN2n15h4KZQH81jMC4H5jGxEVxMGqg/YaSb/egvC7qSmxUAjYIa9KXmVUL47sQVZvXEOe3VmXe+EAvOo4U5vmeT1AWM7trQ1xnEflJCaI+ZfPtgLD1SubYwdVymbwkNcszYSydfDybPlYI8kVV+UiM2A3LFKqDYEgbJzY+6xRDsOzSRTKVVpamxDH4Mz404WYPrvX5fR7q4DxcL0MOCVh8SLqfUdUqNPGZZ+hpf68135xN6hTg1gC6S4d5pAXPP5xNT6kgPs7HGz5eOYezaigR+cV0kg7JalQxYs59rjglAURgIUS33UEt8z0/VS4ddSaRL027q4x3dCe8NfBj4bl0/ACer+Y+QLfTwIv2HyYbGYeCGASOaop/uIFZIlfg1Ny5Etm3YV28Ed5x79rAVvgvDjV8DOJshWtR73dEJR/AJW2P8utwTz02ZVvmvmEnQWYcFO+zTmUoaPlHIVdowFK8hlOkRHCJ1paNz+n/ztLCYaginTOW0TAoWlbmmGHO+TsqnLuRKgPOsxXbnTBCEZL6O7X379LB311jOMeIr9egzU+AoWBq4LnrnGUQZjdMBIsDUVz5JSR7aly4TdyxiaYQ9J5wXCVb2ALKvp/Z/EAeGkdt5mOwHlmuMBi76wuWxPZ8ifQ8s3b1hCcwxPHXtB6UWicodu4U5464+NHTW4iUB/o3hFEvm1iuNG4cYDF0BH7O9aP6PMCt91rWTzoq9f6Oi3sJi4ALvfTLRs5v7GTTeZl+OqyAg7ase+ORgcXY76Rb5/s8/4PEshzyPVVwMBdHAWuSueosnZsXNX9I2knLz9wT/dxxP0ASfYzo7o27tUoiC7FF6CwyM81BAm2CZ64kEOC4zYqoEqqDSNK7MzcPTz/fDA2HRdN6P4lQQOgUj8aD7k//+WFZRPgtcMWy9MO8jlcw0ixxMKRoXIGhmTQH4YKQpcgqOfjNwS7RbiXvYxfMVWb/1g4nqtW1t+DPDnqDJzCvuW0upZfgUUw3zBm1308Ilsugq+L/PIVATdBkl5PT06rAwRUwh/w4n8pdZWBv6T1EsiwAoscN1cSZbyPUDhL4nqOOc1pmjjLTHvjGJfD7XF9+Oax7Tu3NHzgvqiyuCc6teydI5AGOx/+fo94GToJYiblOSq71oG6FKOd0AziWxQMimxD93ESanK5jlqAOmeop6Q2DlqQFogwazNONECQ0to7ZB0q80gH735Ywq7RdAzYs50UwnN2UJEwpVZX5HAv3U857hLO6vj7YiXrL7sB4KqD3bh3nH31s4EI9Y05gVg52W0oY5CkLUhhM58Deus4DCvb34t1OPvL8H6zoz90uyXkl+yjntpIm7M+m5zMISRX0L630u/ed/nz7fv50GbzT9ti0yR2ex+BXMVwf/FW2BtZ5DVMetFQ/wryPIKW+u7f23BQN4chjyfOgFbGs+Gh9DaU+pfR9nfqXM7QlrUpOS1QtTqbFS2/kz78XaNz774Wk/sNyfe/jNQhc/T+vfM+f+X/pHZ8V2tL0V+ITljf52vqU9kcWAFRIXrWLy9g+ZcyeTspkFxW2MJ2jGkMgQwc5GS+5g3ItbFgD76m33FHg1AbRxOJSN2giOZJ8NCWbxgdKX6Yxg20EXHx1SqIxChqMzYP0iM+UeeXUZy24/MrFn03vBg4tFL3sP0zJXVfzWwPw8FM3qVOuLgSNY4stQjA9grXbzfRN9G+uu221VSPxgqWk3jDC9m6IJ+OMmvuBPfd6pNGc+hIUC8dPpqZ0GpXy1nKJYZKSeC/RWsFQG4WnzAxVq9DGJRzQb9MtHFqtG1clfrxidDrm9tu3nKL4n+L+d/E/LXw1DhDd+1EiutXoXAi2PcRdoco6eR9KtTokk+WWpAXjj4KPz40Tp8NljFBOuwUnqQdjLOdi3e6G8570kaM+t0037/uuwQdTVSzSzm6ilOIb3PWlFjD9mgaBu/e6I2U+ZnpCVPat8bzY09ud2dKeyccdHpyle3axVQ1qXerLC6qLSU+qNq/UiWiKrwY/5pmWiK+dVDKJcvVEKggszLCH8foUkHhSIg9YVDqgCuPkfDk0DYIlJCCtoIvh22mbw4CXn5vqR9NTpBbDEzh/1z9dXLNbw5dNMmsrqAniLbA0qhg41FB0TiRrwbhVDPqi69dM3hf2vJXqsTZK9AAkDoS+DvfE5z4bs3Sa276LBHA9YDUDij6Sb9P7cSDZxxr7A1tVSrun20hFK/jqYOLKXfYY+P/W4bYRL6F71a+r6UKcPUor67Ep9yMb3Tm3Qa9Kkt+9MQqdTW5pD8VjgmpEpfo1H4cNqruNH+OzFpOI4mSqmkMosgBMpWn/vBshdqzuxokR3qvbOE2VBF84XUvkEW3ZX0ujp4rTm7T8ib/J/E8UMprse1ghUwnVx3fm18ot/lb9aJJZX5b4n5YJRSXww7z87DvyowmzWhwjhWstAJFLNxRQOh11PzqexwqZIgU0gh8X0/kGDkXE6TWDGjVQZIGKvo2i2qYmVzfnW9RP1xGyrjEsgEg9+LnBZL/EsHAdnM8irTFC5qOLh4VeTx9Hlbj3Rnngi0qg1BPMyrylrcppYJwLzvNZcj3ChqrSS+JQOFibDKNTJzh3D9d+L3fySZ7FQ/Yf2KonhzT3HaOrDnT6etns9j4kM9o2ylZfl9yP3EmD1yX9bMrjNBSw1+4CGv4SuSP+Cib8rWowo3n7g7dFkdcw9jCWK8fPfqPS1QUZ5ENqfcbCVamskq1NkcgVlpHXxv+uiCq51IauchjSvwnIRME3xJFcgclVssFSFTZyWkaEZXLHZrxq1watdpngOK3bR9nnmXjnXTWwAe20Ffz4cKVUGphmpa4W732l3QH5B4s5yyhclHyvWq73+onJfT+ymhzr3cUQVlrJ26S0RQNkSxkyFusW3wBAoJ7jCtVAOEna91PQcv0RSS8epiB+miaTg4fW/aQntsG+fX4rhpcKkBYkgJWfxNgJmsBqMffJnyts0BQ1Evzim1D9kfwqA9P/bfMQxu3uOo8SWJHBxdQMRsFiF+FKdvM6N5tsjwpVSWP9Y80s1B3N0jnXOVKhyxRUmVwgIRKUIG1+B6sWEn8D4XavBmwUao3T1NYYAdsdgiqbjATF5d4ouxwVK2T0ANOQmzc7OYYhqoAaH/RRfCdX88p8WYsH99mXaWDfFoxTnUxXLf5q5CbR2b5s78XTWER6ultA9JSMJgbMn59N6ljoJtaTNFjlpJR981kqPxT1H3FCFtVwFFQ9bbOF4KhBeIPvZnvrvGP7NMBDM1g9FBqrxmt+J2SN9DO34PBdD6u0BE+lI7cVveQYz0oio05vWyQN1bBzzb5zX0zVztGfZ7ZaZjs58tYXXgaY0PNZqUP70qckaVtfzoYcOxNQ7oTuoDooBIfWvyL/zIPEGoW1x8yTVYCc4Qq8MxUMVWyhdFQsr5gPuh/Dw0akZnxwhB2485NC3iqehunLuTIkZ+gUFY6gLfoBkAVlWHJ6P/xEDkL5mpdL9DM2qst3k2YtlBHOllPfntLSfeWwX3hh76bSEPWgi24j3nYE2XCm8reKLt0SNbNsOj8Begy7IQgD/6pV3dPbp+WBCudLQX75UobGoa35UhBT7T1fD5D8SiGgnwfoMBUySr3jc0vuQOuLJBJpdnuuMWHJQaJ/veFUADMpvDHMcNJdCh4xta/TdvTkLsTblBZFyb/F4PO9nTmcfB5ZFeknbSvYpAyKMx+EvMKneQF9Q0UjhA6wbPV2caVkg4+9JLP4gfX6WDlW4N+CtLlnZ5eZkz8DV/3ckCMFdUjrD7DdxwtkqiBFfc7aJxDcdjLl6faW6Mle3wSUjGupBenDLAfitrFDftug6HSLMF9ORdyWvdA6OSJeC2Y8caPFfoVI56niBZsVDObjykghr36kncIp6WlVs1WWwML9uwbqKGDmJTecn0346KzAkyjuuThNfhcLw+ONUPlpwfewIIhjLoGnaYTFT9ufX2vvdPb6WLu7uelhzJg18H2OkoH+1u/WBKejEeLAZ4y3nkNxIhYYcPnLjb0PbEp8XrVd3mECeS5krGo61qI6qvAzPKWjWkpSuHaY/io3Hnbrf+kdfBugpr6rkHZGOtRvW+TODcMNXUlkYLkazsiNfsBeRA6JwekK8zSyZcVxnWhrM5vgMp4tmNNKbyvnahiQnAT8HHC9mOd81z3u4X+NxBPzC9QUBkm+P2oXOFQjHT8ZucANbtgt34IGGVPVxcDqR7r8gdMhdM2B5/2g6+6S/eoFXEU8VZ0iaG6bGhknFfz8XJ9DZxTm3CCugwBFm800wr73017CCb80ftayuFFOFbRKDjOFpFx2WItoSouv3TOAnw+ee6luI34+//R8b5Dyv0QYsGc7CyxMT+oEvMvm1uBtwYuKL8LaxZOKQzorPme9RFK8yYiMO0zXHCR0IM3Cw6ECxd73Z/cxHbBYEh+tjfMDpbPYcQ+dl27BeR3FwP+5Kb7Ojx63C0fMjD1cddEQBl2vOJiuOjy8F6SccNR7rLiYp2dA7nFyoiObvOOsGjUU+RMleISqB8MyYBw5q7xp4dUPeWXIGDeuWnaaoZ6tAQHphr7/GnLlSEtT8yJkisejqjndSnX6QFZEOXRQ/UdQRWsFp0VfIbh89bACxnAZBXCC1Z4LzZWIWRzYQCYEbpsMA/CC1R75JA2TM6gX3Z7JXbRBMeoreTrqkcG+tLGdFWGeoqJ0lxzVAnvm2JFxoxIG38UkAI/vZuT0BQxA4p9erX+wzFqv6qOiipAkMirgY36rtYjhNybXToBh9Jca9QowCaTN/2tyqTeEB+YBTNXi+W8ABh6309H+yc4PwzPQw7xEkg0WeR4ENG4uO+WUHe52DKh3NkGTRcwCPyRJ3oLLwWRCwWN1l+UUH/HowtZ1P9rzBNUKHEiV/KlLpUJOWajUs+Kr+AB8WVGp9gmEcwZsFQU3/AV3cv1Qbu5RRbx5Ouij7vaUhKTWi9Ac570UMM6LGPMT57UqkaDNQoVFRoh+PkutQvqNAfvEYOPHaGQPE4z800nWutcsDFzAdbE4xYLUBFYhqpkk9yvbrN8wCAM/Fvx4IYyejOrIyD248KwprRLZYr++Skb1Abfq5vdsPXJYTOg4wXECbNcrOI4E7gPAGdIteuhPcuW2hKYFxziChS2nCszLvtfacN/M+4IfO+xU+iYKpnESOTvjdogWEsRy6l4ps2OX6Z1ckVGxE4R62JAkZcK+mYHGxcsAODQMUTuOQNrSN59LDrAEDToxffSifrQVuccK0KrGsONzCjhP3n1dykAkO8z76PpnC2KSwKO5g2n2eUYWsqKahBjaBxOKEGw0UV3u78I3ah/anvMWPionyoUjcQXiPrBY9bjGdTpU0R1z8K3zPhuSPKJBcXrUoi0670cRZ/bQ60389hV7S86JjTbY9Ks/HwFYBmRLaESl7R3FYIW1ozY1wThBEMgz4IsTFQOq2zD38USPk8A6VKY3+5r0ZqlQ5UgabYf145cx21MO+IczKPQSwI85Yk+ujNNKoqv+IVFkKSoyO+o3FydC+cKeaJC+AZw1Mv/8H77iM5Vats2B7+WihiYD87x+tX4CB9lv4zssmMGIa5eJrAHlVoOXvvy+axY6Ar7Zn3NArgJs1KcC6Bh3SI0Zu+4/PwOq1Ppg1EkjIVPn1+/AN2gVkvmr4SlK0ETV61ajEghtvJFaIAJlqyIKKkaUPzVWroDGXTGCTOQN7zXZBEJ7fGM9UqDG3KaXQBnBHBASM/hKK9EQb8jAd8uMa3Pk1h6BJ79FtP8FpQO7iqHkzPqnf9rHnpfSL5gwv8ELHJELZluCy76FxeuFJ9Y6NyHrOQS35isJOoLwQ34A7ZST/l6VXX+9lPzNx5JsLXg+Szw/HGN5hUPCS0JshTcODGDFKvmX6kwI389pR9+BrIJvM13x938/IIlGjVp+ncK4IB/CC+qfY8EiT9oFyC/kVO8wuihcGGDwBSE20CQye6Y7vuhyqWpoHL0DBh0IVyxct6C0i5y3luKllvEFs1rYAaoUsW6c/+lG3nBbObPyd2pEVIMCEUXZs7Kb3FP7V1d4QVvuYXY4lybfSMHXvXpNdyqLypxmXqn8laClGFVMydO5lTkRO2PsC1Ipl7x1QLvv8owIdFDYxxl9DTn0bHQ3NMgTvppFC2JqpIge8Tq83Sb99fTqAcE7U7RCFJuijB9qGrzkoBtX8GIVrYi4CaXwQdN//DFBfmS6NFGVWB1QSnmYK3g5jg2sd8Pu1rhA7PVwUvjO9n4Ptz8tc5qbLHJxPU57zY2SL+rJhbxM8FKjK6hz2JgQhBGFpjKcMbGkk+/+V/BLvoOmMPyjTejy7jWJKZ8SHbwVxE/IrchZM4yr8nYUrSaWup+1wNdAu3dgeZEj5as/vPPKB4QD+P4s+xu3qJK/LFt3/FwoUy0SZ9pXEW2+OQsJg9LkfxePMsbwwVkiOEzQpPWD09n92bUsGvEM5K+SYyQFdePX2aRacn3GmR6bK3F51xdU3GoRr4QKksVDgmcJyM/QP1Pqohnq442FE5yubAx5BHLvslwmawxhBIJEfPt1l6KBmfPjjHVcTQGMFFNX7uPsN0SVwr34AS9Z8y6vie4FbFGipLvixx1euBfjat0WST+ppNNa9fz2thbyflpDiuJKzXbnWrjMuf0BJknoPkmi9miMBEOFjMvSIBT38EyT2oN+jFTo5OwsxTnO1W12HRZ+zpSdqa+kdNYtw+OFrQjv9FKY4D1d2QNmFWlF6EgWS7ubdWim7OdoRej18ETH3GqRPCTI273frLgkZiWtoJ3PBBmA0Lxx0/O9KVzU/YxV+s7y9UTsGexZt7Sj8BLTvc+UPEFn697lVV4Q3J4U5QMJVqF7FJMS7DZk4IlU7atPrMvt7CizS/gquJ5hneBZjKURbouLjYJF+3s0GskPX4OizNavgC7PrstZGKTiTI9neohyaPwy7rOIpbutbbnUpOF7ls0CAb3orSIT8N6J+XpxOzIPFkAn92MfJDPaKBkauwDynraYXvucFY8AlzMtO84BRU3UqwAOHtd3kYjIG82HOKE2AFIBOnApvaLavG7H0asqLdyISBdK9pK2pSO4JSdGA2Gmrv55OUcWD7kIH3JfTG3Dc74DAYdUvrcwCLN6rTjlgyCR697xYVpnWt0ugDpVunmbAE+0nGKpMHzta79K4L4zPCi5Vjj59Kfzida9v7wQE749SJ5EdC5SyUmekluOUD+M86q/YqT8SHSOoewnI3gGFG5rqNnLLieFsJ5VYKBjc+YHjMiQvhXFJ1T3WdsYpIPKXYK4z7JI43ZB9h9uAqZ66cqVU+yPPX0+OexMZ+gR2+oDVXpG+4D5Igi+fL7qcRKWP64/8b8Srf9/t3WNenjKUAeA/oMXy2myPpQ4Lxg6TxWJaSpufMmklq3QeHQB7yz3otKWzMVly0VdBrKVEXX5jzP13zBXzOR7fUrqhUFXYOpJFLaWoFmN7RDZ+ctccwwBBOtypewdG0o5duDrcYVkRm51bBS8d9AJTGKnjj1BuMindbUiEhu4QKM0vY2mcVY1lr5DPpFX5ulLv8QKoklPa66vsgvc7P7k/CALHN09jXZ7VEMBMYXtaSP2Ab0ez3+VGJyhDI+0O72sIkhGirCk5LLrXKQrNUksuwTneU48dcfYTGV0QW421yzhnchCxaV2P8F5H39Ho18Rzi1Fk3JRuRYt/fEYkn8MWiv2YvlXNDmjLm6e83QEgqCasvYGbUwcT8b6SHo0U7zo6rsRCzKtgbQXzc6yfsJUhXjYQJOr06nE+gf2HA9k5z30W/h0zItkT3vcLRUNDBXbJou1yRBR78yqhRoxcqNA5aKUuBo8A6UeqmjDuio8BdZV41rhNhFIOAGUoqdlxhWhzqIdBuEcqNtFsIClbOERSBWtCL5NLGH70geelYLarlrsXGs3+Fl5y2ir566z4svn/1Scs3YquQUXpYi5p55BJbadm7IzzY25vcVzG2vJ2htqhdcnZEB3kW5KHy+mQZm4XURauMG5jcdm2BDDSOy7hMMbV8mFaODBL3SN9wXzBQgZ6FEAtBWxjOGz3PvoGkT5/vPwlFEdpeZujK80WRJ8LfcuaeM+o5x3Vqr335dFnNdUbOVhkK0BCu+RbSsXECfc/q3m7cU+jyOY62FC3We6+dsHk+NFGFDhvb7Fv/z4wmljHEcw72EOZkkpYJCgWuGHG6dLGi64XjVLbN6yUsyOIhxZETAxxHmSxCJz3AsWXaMUF0oleeKa9LGKJMmFfLiHJvQh2QfSHLnbFPDkBQHwI6ltx07Uz/XnVbhKONDlwogZvnHLO6S8egQ+IAB3BgmcFHIi84ZIw07B+7ebIa7/8q7+fqXr8sTs5pdohEJaF3BhsJu+mozRKn/Y543ae3pENGAsZK5mrs9pX8HAMqBdVpsyQLp1rcuJ2nN/6f0YNV6nZoFuWzWfTSumOJjD0htE3etYxXUD3qxgaO6PUVdTjToyZY2vbyHuVV/hikW4AfY3tkAZ9AfTbBhpMPy+yHMsRrZgin3Rh7an1gLfL/YEK6v/fjA7fFhN3x9x/C7XgYIvXhrq9rssrHdE/HIDVSxE+WV5Gdxtx/GBrAfpxpteeIq76yxsIG8pShb30laS3DOmLwyXhPLlmGmbV9HVV5VuCcg3nd+GDY4Iy8Jn1LtaZ2sjqc+JvX+6mc9Ysa1d+s/EBkugPcBUill7gfQtmmAgpS6ylYvGk4AsJ18i5z0Fc75XcqEQkzYmL/6/YTDiYbuddt0u8i9Ck1pMG/a5NW7l3Mg+pvo+vsYP18+ndR28QDaFYo7FNPT3M8xLYQfIsBH7VmluKZ2NKg6CB1aEN2qs5LtfFwq76Fpmlam+bkq9AiRFRTeU4uBatAl/93Dpxt9uXkYfeGmZ06iRYknnTS7LE/LUQZp3OrAhlmu30xLzAt3slmjuOp6UlNR4TRVlm0SeeP/eX4ewmSwSHvsp3NMacbD/J2Ud8dDDQfI7UY1sqr/vKPr0+sbk/tX3NqaBp3fgINtNKcrFjlcKm1y663rWLsAGn2ReIahueecdyViT0z4ACHhYNfebutD3esZtVvn0Ua8qTdxXLMvdNsrQJM/6ieiObaMsCfsT7daWzbupDlZYURYlgfPOkyUHsJQvrxL9fgq8XYjYXz98yqlJIP6v9C5ZZOSDiT/44mJycTiQc3pbYpbg0FN7NqApWk3wn4QTEbC3k56T7edJCUzCmkt12kXuOgCNQFFW2DA2422Pl/arvyZ85cD25FtHjsQwBSatOE2IzZTD6PL3bi2UtyEz87LAUuIl81L0ok8mknHJYxMobUtET138bGx+VmcX/Mu3bYR7tZMAyeX7IzikORVJDKVNntOy6M70cCbOm3aNPJhUncjvp8zEf6/L4M+QeCJo0tLKjtYA0PmBHXLgqHDzVi5lWMKIg7O3OOJLL+Wg8yxamdBritxBqudusdRG8muCQy2Za8qw/4sRFo4od5Tm/8Kn7L+6BEmKulggNMdQ/x2WIlN3+21POLdwl4dP9zxcHAGVGpOrIqwqJ6w04d37GoWBJ031Ois2CFdZaaHSC9RRh9XPweRYR+YL1J47dVqDv4/JD54TlwsUCCeOTCeWCIJiTViszsyUfL0pu3aC7WY8fWrCec8jGZWJjw3SD6nko/8Ty3RAqrf7am7rhIxvMakFf8RBvatXRW37F5MiTno4D6MYernR87AUKLOkdy5PaFe3KQ2hHtbKXAzlVJDv7atgFBmHw3FNLDMUD484Yfm43IzqkrwPQrm2n9x8mcadI6eCbJRn9Vd4UIiQCTi+whXhHJPLgTAcA4/LNC2ZJSjcheHMTmOfT1wy+jqW9Tw8BkpnA8By0nalzbQPz++OhsLJ9fk/gRrI7VdnkR0L9+mjnflfRCW0sjbywTz+3Z2vc5EvfOF9isZJpO34t138S9Edj7U0dftDpCP9+tyaq9eciH0177E77zLZouWLpJ80iIMVZYWIUF3HfyjcFEHmafW6MU2r3EaIKCbogo/oaePfoLrWpPCFuZoXZ1g039wpPE/bpGZ/TVtJumfclArgte1UCmwlYBEs/rkKA+lV98hBoCXJKtfhazFvXYS/8p4qztfP3RHVXRE/uDZ9S6KyEvfXkJ4iqnuL7vs6/SZj99B9ee5Dtb8DgeoG3yb8F6BumJYpPoXN8JkFX11XP+HohbSprxm0fsbSx9J163QI1vlkoXm6wLlftfJX8BSRTtbr1ZuJG8zaX7WJ5q8jY/LiR6bzBpczs7Ek5g2s3/k0g02TB0wz16RG99gtmji+h8yLqiOwtuc4I2F/HDSUB3fLz3hKkaBjT7fdRWl8kOM/MGyit9q3KY2OGSicX2dbtuQdyYIxJXeq/nqisAaX7r68dMU+nvy+VIIGwEXNRClhA8vzFLCuKg2tQnULNjs1XUkV1fWfqeFYz7Dy21k7ROKqr8GYDb9zWAM58f5pz9oNJs80wwe6+zMJ+dzkviTnqX4ax9diibvCsZTsgjoalculuF7as1GkUdkc20uc+C1tNQ2MgmVtvW84D0yGQfPXo1hKvWOMNpPicjy6oPJKWwcdp/Mpy/0DKn8cW4/uZz415E32jlN0nDZbVRTGjcabaEbLbdw76vR3fCbKr+21KNDsmlpx7kuWWriq00nqE6zcWehQ2HJMZlY8A7nuF27spV64r9riOUAEd6ebHgijz/w03z1oicNJu3rfVMKo++s+5HPM6Mn1W+PKW4EJSSoPqV1/fxgkVbKDgvr2FOmwpO+detFEUtyOQJO3QIsPahL5vp6LtkLKNJTBUGLWUIfkmAqATCgxHEWfThN13D4jTRT0jJhcbzmgJTSss3Q9B/av0Qq3hfoB7f9gQE4mNMXxBdQSkaVSOQN8MsVTpGzgJcsIBHdP4rlQqLKQxdPt2paX8YYjjxeUXs3qOLloCDy8BuHqrbF8MKs/GLCTgobHSTMz2GGmCOKMu/ahcVXsvS39l4r9Q4+TcCXuC0q8PcU7svdKifE4JpdTeT9p7ea3COfhbwlzynq8xljzYmd+v7mZdVLJyiZ3ReTnQWu/npLbNvvr3vf9KdxwL/yIke9p7xnVQwBV/VrNcru0yDiUbYN8DDd/qR4BU8/HdoFKut+2lXeSnzznv+EfW7NMEtN/ts1FNPJ1FgeOWNNuMeb9IlLnvahbc/cAw5fRXu9YyzZRns2DuJ+yr61HagbHwXw/HCNfYxrOo1oK0ecY1EqB1hsOBQJ7BowLMm8BWUeEdutyPvApbfCpmeZ84QPoryKi1y64WVCTd6slTP5Mdy8yuu0K6PJUlzqil0vV+mbS1Ks9JeiV9VFCtz0sGzZvyEqmn+vlYq5uchd4xY+rD4OzOwfZkuo5MDg2RgvkTctVastqacLTR3PpxGE+kiJrVvcynVM1763SIs0HptiN2XRZuhwkJgGEqxAHetkJULHCtcse78mx/7C1gnqcJwc+netcjjb+8lOatxDWlt+8/gxbV7l5Mck0tibXrGsDg261K2c2l85C4/qjm9SMRV3wdqQ0boUawdXneN2JU0OyZNm5ZP6I1yiDGVhe4a57wH9y2W8g3T1vB/v5Wn1ZboTfKjPm++NnfnTwNYrOxJ+a6FcuC6RhXVdWmzg4EHeJ5D+qUtJC+w5SokYGEsVtCQEmUZgu7rNgGL8PAnzfWQywJOds6ckiYx4FnnE93mbnoREFf6gYJNMwyWHaTfPh0htf/SE/mysudjOEpOZ6a8N+dlleXy/quJRwEMMtItYM0qxITEWzmbFOk9XmtaqEWc1B+N7ur14AE8CLezKGMP5TnoMqSikcOuOFp0mlYfEo19XASCYahZHDf5xdGfjf/b4HIyJoXzLRNPz4xb3okWJFmWQoSuw0TyRhhBRePCO7eCmA+gATvk7n92II33SKvJgEu0tAXuCWJ7y/632UGwZnx9l6UePNUTzCqbR3UpUvdgCCbUhof2U8Ez//w0Pm+kUP2emgx/7U0YJB+UAk+yK5hcX3XruYKhYgwicGhXIM5mt9d+uEbtWJrV1GMoZ6YWmGTnqkxEudr+u73iWluUuNQoRNjYBkhN9NbSVrtTBTQLr5uiMUCBaiOYN5smk9wfYP/X6CtZjWnzx4sYHKsmtbM2RGFaXmyqLED1GeEJhKc1tzEJkTdpQDOlwan8hMOpzBKJlFfpNZE+JynyFzFFM0obPpwA/FZeUjQZyva/ML6eWwo0Ju18pygdYRrq33RfJvSJjYfGu4+Pqy/2JFGJWJWBovD3DLBqJCC9c+BVK8R5C5JaTVITuJ6tZAet3LOl2jw0vd9WY/oR51PXiJW/G5nSW04v03EP/3Vc0fu+A1zzuVCvc9sFvh3UwfGkowkmN8scrmUBn9XrwAB/Nhnhw9p3uLgw+719xXdADu5dWIpqULYwT+L8Tu74z5M/CrZ/hSciruCuUHbk67ZlDHkm8DF1DOXzim1/c2Nr5OFQ3lqSSWY/J3O7VbJSFHVworvntaiQtjJXGhngX5YDj8dT95kEfLNDYZ6nJJHA9FmN+Rjmo+SmPtV12eD270Fy4lJ+TwobJoy2aHunDchVcaeDEVozjSprgulVLfTQXB4YTr2PUl34ZYPDvBMq9nvXyncF2FhVniNHEglSxDH9DmD0XBlxRseDXiCg2ONkKyy7kie/GIPKlrhP17dXEeq28VXZVcuiFTJB4Zqfq7YvD/hPg18yEdXGphMm7z3ITSaTeQGZM2YJRGWyp8agvPf8HKgVkG1qJ7DYtLIFkHi+JqnuqQfsfP+zx94SojD/ORn0V/mKhUllAXho/cF0aAp7DssQ6r3pfffWjUrC+TSKGnlWA1mxXy8WiZoJpPpuU82kdpnXZKGDsExd3kgb+8pSrMF6PTY5cXq5ioAflFsqGzNhwwyyDcBclwpU33eDg2kokXxyvWvGwFKs1etRTK0ErLnaAPbHK1UPU6SOkvd3avq55SLUUQbWpg86tJHI+grtYoghNhY7zT06mxszfm7CmcVeazpalc+d6pAzLhNlhstRDQZ7mhE3dcH3BrWRyPXgCZZo4Lp+D1IbSyXRv5r2iEdL2djpSgwc7j8JZ4re9E665WfUnuqbCf8bIpCvr3h074IfM6tdrvl++IxfRkp7hPeTEXwF7se/KywD4bOWt7Re0+bQlmVEt5vf6fbg3532ea/zyXFd9u/rpvPrwc+T2Zt4KtKRj+wPHSfTUsNalv65NQVHyXk79egmDalNIySE04bmTZ4zCQPJM3ZLUnrt/85i4Py/e8X9ixPb9Rl1ePKQWyHy0UciNSQCGrT55xxMj09zgAMPN/uejYX007lA9WjfTBihyNzkxALdKRrg1q9RXhUHZsIXtGv4wnJjCzti9tLtbyzdlnvJcSvMHiCTFjIU2aXeyEmHq7cS8+UzKXe3b3t93sqNyq3eUHeGUT7ErZqScy1Q8Cah7m40i4YoOtkztw1gcB7c2Oh4D78PoWioHP3nChMyQLVRAZOanZbjv+Vfkta4wrzBC+L6VLHx6Dz5ESTcsqyd1LU+mWM1ZEIwromouTe83yaV5Z5UBXlpdfq6UiiTiRBba+qh8yf3XmG6lAhmR/imFZ3SfkZbA/PNwVMWXwxBoqkyJqk7idRr4LqRrSEU9haVHY4dao3A9mhXVXsFGay/lwhMumexz/voZ60+xKYRMe2pe6Rucg0Ahl1OHPXg6sQYqUth2r5GhN7sGGMLwd614YSBEK2eQdx09atc9yP6GmO3uENLCp+HDCxjxMfPFVEgJJQzSe4MMRwLEL3QE2hK/jvv6oZinMLVZZ7Sz66NJ4i94YkgTUcxgwixLPbUSQutXXTICnqvWgIADkZ8nsGSMn7vaF07k6C3aZLH6Hy4GuV3+Y+PMTBko54YWjkugjvdeDxxXAT4kQhPH7FUFkB0oR0Uc0l7s4DdTRDm/Dc+xP2w3hkgXY2pZLMWDuQvkSRMl2lRleZFh/+N13J3DXheKoLB0M8eVfy5AwsJ35vVagLGeqQwyAI8Nv7vokAUwZGPREeEIpXibFVDdOvTNJoESEIvEyOpYIQhoQSJf5IYwKEl9zNIpMPusK4+S2sUr9lQHKIqGK6iCi5GdS/wY0tSMis2ykqE0W3GY6aKZk9lI7evW7hyB5XUluOqn6GU8PEeQmGhR4Bm2xcmvVNgP5vsPN5g4lgNcsuLKwku7MacYjR1y/lmkqUg2I5j/Q+eVRWLlE25osmqKrV138iM2kPqwXGUlq48qoysNdM96YzzA0Z6LML4zPrH3UmCkRcxG9WtPjXwiw4NXMn5/eqX5ttr2IhibygFkn+sZSpI9ihyVbnqxn0Z4kEfaA7Jw57jKFRDGaf2nd3WRvQpKpfBI3Q9H8bm3BZbVHBXYWkbaEyPKpqGEV+sMRZvlFJUF6SbWEq4W0HJJyIhRBviuIT68/l9raK5N0198goxORuN3arND0eNK221ppvEKfKrhPt/i/0uQAUWKEzOipbmIPw2BP/Zu0MsszmyVdnKjQHQRJUFMpaVaqs+vSGNsEpMw9tZTo2Ms0loySe56+376ad2nXzIY/Pn/en2wyKhCpRNUGao6RNfJheSgTq91HGHjK2Qgd9/EStLmeXEVa6sXaLHIeXJ2S4kdBNkAzJjVRz2a07ZqXO9Zj0OiXf+vgagZrV8L79cz3yFICYU//qVbCB3nY5BvYQ/CuCFGNKvBa+HUcY3OMYxy0pFzizuOJ3TQEgMnlRx2WlQCZK5Qheef02pZenbqOOgMGrPePBknrq9aI6YJPfj3wy/cGw3gWiZwzF/qy3b6o4bgq1a3eil9qLJVFzGRtdahaTVNSa9L5iexiXuM+6uyJdeiF0zZE7TaxZBMyqo/2Up7qxwv68XXfPzpz/eXP6j53o2x63omL4DZjhd+WwJBpNluZ0fsDgleoRM2nLg2Max5kWUHSI/fGQoKXaCzsveqMHSk6a6NO09ItOFyHyVLpJun4BUZgW3ROzWhw926AnSu+Gb3mOvs6+t3osbjtwgHi360OaChKroy7HzjFOSlq7AB2INo99byjV96xOSuvo0prPb7IVCm5MRMvlcbwGW9R9u9MqSMKySEfIaBAAlAjGj51j6p1RTvItCLi7ipem10BqYJXvlao189DxvdS/zKQg23fJORHU5p3TJZFquAlGHL3FlVGKeqHRpjzwZiYt5dBPREvYC9gRA1wLSYzIfrDmt894LBN8oHOA761md1bNKSbh480PZusO+/5Yu0lE+6X5mTG/Wsdoo7jMbc989jxRUruq84L0QQz5qq979b1+XRvNOooZ3DiX5pF+4NTSfLwDfuHde+g7Zrc2qleotM8pqGw4A1A3Wyy6CIdQD11skHPI1OM9fSQZibWuq6dr4qAk8W2EE58BNXA41BzW5X8MIwpRH0mQYL7PMZNIqLfl4E5L2sdNHMr/1oMaqgTE2zoPJFjtaB1SVIRcV4ZK/gMFCijQWJZg+oXllAmrrrOXvY2cFnJTJ7+kiFrihS0U9NkrK4zIHkDrGZZCbAS++E17ZRY8Ux4r4unM/QrpGcmXkjkkLmpBlCHhARC6ziJVc9+OCbUMsoGEssaUGdnWCbAQeC3vMXwk9dgz8Lq8SfBgEKu38QFbzh6x/c3YfJaWiiICoyvU2D7O1lVWmmKzyLM7/VAnXYHkqnves5wQJRfdm0YUwF0W/0zAlTAx29Xva9ZZjNUhVvMa7pegIr71XxQILdqwVm6znpcZ7LoAfSYuUdY4kQSrSTHbVyiE0zxeFigfDd21bgWT3aeKq+NBmPYb26VWe4xFKVgsNbs1kcmuTd6WgF0Pc3ARqFs1IMk0sRJDrIEiCbncmCrhu8T2DzdHiDMXDxa134UjD+8CXVvGj2Q9N5QmjwWIVAYqP8u5dET/zKQ68zZe9faOzJfnvQDIJfQrMhgD8fbrs9rK3OtuNT2oWapSO/EQV+f/DFV0ONhCFXmO6kZkrRsuEuDja9kR+x65JhPPUhQdO9qLm0Y6XMLkpfr1KUTgEsF0q+toj5mcmx57iu+XWyicw8J39NO3iKBZkW9OPGLVztcLzYYvjzXkMHGrpQ3zMlh+V2XdJde8TJkJIz6lV5cLSSfDOilG+lNBYZe+X2qeyWc6xsqKiuyf47uf5A7X/xspC966ZuAxBa8fp8awj/BBZOITBQUQ0mCVw2+Ywof93bYtP3BDiGKmG/RDfrilxVc4/8Vpi2CnlB4kNdOhTBAraH2UDm/RN+sJmTcXBVJges7IGYGt0vwzsVT+BEaZ3jAExYVP/zwA6X/xCwGSdyCoXuNDXtk9CHdrCzvlPgOddc+p2NXfH9VAfSRWqsF2pLQZmtvlQFnH/emba1Jz2QNa8+xteEm9dLmECwFwBFD5S4LHHHeSYzlbXlLKp0D3we4dg1VuILFpSrSloYl9LwqxPWlHbd0fuHzcyZIFZpHLT8if5SAJhS+sRiK2PFHQDvcpoLkscJ4V6pGdAZpFHFrWR9LyjyEVOxitq7tqWFn4iZqqwm30RY7g7aUltRE7zsPTxXCJ6JpxY393yvfjTbiic0W4MOF1bQt+aVuomxyH5cNicKiWfqtculMxn0Nv/ZKYNnERrmyqxyt5nqCq9JWCEusZFHmrOdHBIEaNfMjhJCgVWN+qlxLd7w8E9obG+DD3J54hDVZ+p3q+TUI2HH9FVYAf/HliWpCq786g5VPQXCRHLadaC32X02DGjUEt3dctp7y+c13kNgkwgfO74h2hFffmZx6PL2RtJi3JFBdTdzgXhnMYc3tBo2xnYvjlLXPsbMSNhWAu/ptExFNKGxcxHoheWV9Z9fjTXugQAsPProZTtUEuQU+Kd1TT0/p0+QmNFWC/8dMxvaegF2HrVZsuXfCDTXZzF/SRyqBBL/fWvETJyiDm3+R4hcrDSHKOT+I+33n4SnA5BfpnIFCIJthgHx+VCFzLG8v8zj5pe8cs+3xpPZSfpHDCYqvPypKvDs7Du9K8/0MnhOTn6wRP2WV/GI6CiZKMjqu2gE94MeXFFebKfnTPwNPkHqOFdfar2xWkau9ZUUpnpenBG4/9JhBjRhOIqSrW5681gG8IpsxICqLxJZ6E+Jp8CZSk2e6+Nc2cXqlvTNFe69VjDyw5mK9RETfk1SHXpWXn78PFzT5jqR3Pmy5u7Qdm9T/SxlLbwkcn/mRaoTG/WUkCpVmdXBcMs2mC9SERjbuKfKWaSaJWi0rqPWa/gYA32n9wEKstWETRk4W4vQ4PJaRzC4ADrWn9fRtdrGsktH1L9rIK1lQvBlWfQfKNq3oW5H7DCmMLNGP+HD3eNjnRtwWxV2t14r8inaqKhFbIcfdJcVwjtY4qK8cYT6Lzl2KrIVSiEf9X1ZHTlTVKltZVYnq7aFCIBLnRyzYm4DfeWYqmKL9MQltdC3UuxakOkbbfQpx2RuR1cLMJrCcOj9NMD5QghWyA7ybSYDn5P4lJPJXGd9JbD9uix9Mkma/sKPD+n+ev8NESH534lzDn68j6WATq0o2/Suowo9XV7a9YOYhS7ABRHkLCGosWofpEXBiusWJx5+Lt3ievRdQVj9mzavd1L/ktSAC+fI9UN9uarO/eEtsenInMbDYuhWwnju+4p5JiL3YW6MwR4JMr7Gsww9eBzBADRJXx2TFXUpPATjH+wZipdL6hxya3MrZ9toRX3ZQkqeivEzuHjqlqIL0u6wv74Ho4ejRnCgnuxu/47YhV99wybuKDphV90egsCvNt7RgVS26ZiYtW26IBxdvgZA8CKoSHu8kovRmnc42nlB8MYUS+AtEHTJER7SeWsNy++Q3qz/0KQjkjNfQBi69e97DvLFC6HChXxOXjVjQlILZo1NyZnDvIdwT7zMFRBNBUcFKDj1bx7tpkG5cK3iB9eBRr7AVcd+lQeDrNET5C74ut7JmVX4jbo8fjsJFYy8omyJklRLGvCHq/ACBfoClW56gBAve50psor3ZsHJmNAsosleyanKXtmbLCFGHDDZjRPjrhBXYdVTk9pVZhcQ5uiEtaZxz13V13tgxXB1sPOsFiSt3TzdaXCDtmRm2nEkAGBNEvHXTwodYYWHe2kommxvvwtIkxoWsUnyYkX3E+xF8BJIKSxBCSDwdRMfrX9ZzGbB+Zfj2+4r1gDT4HU05DJTWLipMSSDWDlIMH9MCcGaUh0OFr1qdpfJPBwciHJP028jH72jgnRC+2BNKYbYrmxU+g0hvw2JQmCbuyjVz4dvfSVihFLbeEmW2u3cHp6HQ9MISpPSek58dOz/eG6s5x3MOwCccgp1nntLJR2UQbEgaI1Cr41mRkn1TPtkf5Byeq0rCCG7a4qSaeOKWKq5jddxmoW4FNZzw5IzbJt/I0xK7CRllmq1AF6mXbf30zsgPO9yomM6FE5mXudQfu2ZUcs1zSby7KZ+i1yXCzFNsXsilJTb52ubnOGOFKbIgh55/s7a4ybQs2WehjWxCalCPkqEdSI9ZyX2F9BtcdhBHdFHsM0JaIheUIRBzvPKNShI0p5dTdr9//2hD9k+BUwUpVdUftauW9dl6hc/oOykX75PlniHvwpG/Ln3GiP5Tl3rTkLpEjRTyGFSEknTaohVpm/9HVmBGoYgU1tSuAtAq0bqfomiQ8BgFZmTvNEkTinLaVFWlUnvGaX0ZDg30zvWAET/LylTDSuurkv0S0pPiyJZF/JwW8qxM4evzR3dvg7pKYJoJGaEJPT3VYBamwobaBi56w6qA4abGpqjg8nxx+43LLawXERUdifYkl1aCNQPMOWSGJf1UIFYy62skkaRhBcRMeuVZWb7Z8WZx9ULqvBOLkFPdMH2xLbzCWgs0uYXQiXGYbSAF77rOttPo0i9sBVSSnaQaBq8n4rX057Zw7GuCtdzy2TE3fY6D6GXvznvPfQKmm+fbKWMyYpm7RWK4Y3hpWeVzWwGNsq4gkHrbULobwPnul8xGwcs+kB5Ub9B8ELobmzTSpXzG2657ddu2sLX13BVby0dd0Hu7uVKg3zhQVEFyzO525jwSVrOwqJV/uNiQQ1DECkB2Sk0CSMM4MIp5bYZOUtFMIi6lOP74ahaiaOJR/FU3l1CwcxeWjuVVZyFbWW1T2zSFtfXzwdeAtPGA9HODgb3LdjFLqrcr/69gIm9j2MfUDwUWsfIYrxuVzGrzrg1efe7SLN67F6dn6iDIqOQzCGYtGC26WpLSpfbjYvJmKykq0y5PU/T1VnpgSLUSRc8/cmN49Pxr9v8LqrUNqgK7HNZAOdAKr9668AhdOddigg5R6gJLHfmSwQqthoycjMiJOOw0DvTO50E6dOjOcKKM9i6eDzqmQxejvcTz4TumQ6Hr5oZGM7x3Zq+rHCY6sXEUk4i0HDMadIhL/VpWMXRyTVB3ag26Jo5VR2YnKa9lw5Q3ve9rplYQ+ZSHAyajGymn/mboiaXNvKa1a9CtubbCCtFErY7DLfCpdaY6MOUz1Wy6OacKu+pIwy+roxN2bf7hJmttvoVZbsVUF3SxW/eapiSznBZdhkFlutlHO5Jy48JiOgwt/6o+w6YHDG4EKnXYI4ej6OBB/DpT7K5xyew6a0XkJqeGDJ+Jxa/nt5Wm5mrl1ZHgQeVg9QCrkW9q4f/t6YErPeLCrWnUwnBPkmLJ1MxYf9O8nksrF3bKlX1VWv1mlJeLfGpzuhlpH1xGNZEV807rH5BNNzI7Gj5KdsHeNmjAz+qfPnZX4mgRS9Ct4zNywJXhh+toTYQdW/qBKfxBlDcdDsQo+90OWjLBJCImuntbtYZWR67V8fjieXs51UcMT+hkgEmj4W5a4nn+Tfck3EMLlKqxd6Z638F+Exq5NZK1YAkqc3VtPCqn95wteu5tFDDIEt6bL8ZBF0w8TXurdvaebZa1k1t4ONxMU9qM3+ZsIQ7nkkMnJ55uKpvyuEUMXXZa3mONjWeFo+Bu9X0i0K17bVLsLlUdyECU+rWp8bWFe5t4UR6ZSQDbcynDe12gHizOAdUn6IGfPVgRg6lFLPJmeJiiuHsoTtYeyVoSsHyA+xXM+J3JiqDE7aBy07nKILvP1GJGBvdZkc3KtJ2EQYWksloof2Z2g7aSzC3OxkGE6vmuArPnhiWr4SIajgZkENSzZa1Js0sRGw14GAHu0TOBDdYlaDHMsidlNAWNQIT7LqC5C+pGTq2Wk/A8+HZ2T0m84W7ZOHVkct3jEQZ2Eo4qndGGgItCFmvo71Ctw16j3Rtm9/UDFhj5JM5aughF/UPu3sCmUVpQOtfXwS6nDuHhPSb1XPqSw9PMtTX3P9fggFzDlHfdKDvXYUltvPoXT+DkyIiZRFgcqqOjrjcihHROB+B8Kkp5pcOO2u4qX6FKB76wGQiiOQdn2m78HaR5N7ZY5Rb6VGhxWLtmFnp217sZcQSOPGLY5SJ8DDlpAtaOSk+o/Lw755NHDUn+SKZXaysf+K2l1bVLOrvxjlB5hK+24l+DUS1aFPnJt/3rnyZa+c7vWyIsWbnB/drSnGjGwqCu9+WhMJhjgZWllAFtmGoqopfWOWUU04T8cyoGY/p/Rn8rBv4RQRsSOCcKb7SQZhF8FIV7i4DXBvybAFoGQG7AfYjCXHvIUwKcO0B1APEYgPspBQzWghACyI0YuKYAGRJwfySCeSnAZkjAn4qBL1uDukTgRQwkRF5Fl0EDgACgAAFACHUVmVohbrA5EvnXwUrkWdolGtC7tHS/tmH93P9pX7vTbX/73p44pd6v22H6uPd2rh053fUv1/bdTn/7lbS33X7D7ozFwNV1x/QS9uPoQrpl34yLNt2njw9uCxxIhXdYaZVXeCCDN/hIZtzgGcvCPwxk5QuTWcEPfM/KmOFHqsIfvLFQTjAVigghoSZC6NgWSQiFoLIgTDTIijDnykSJga5IQ1R2KtfEgQ65JR7pTHbYGTdF9jjoVZ5xmVvkgOu5NTnjRjaFrjhjq/QfVzFAnySJwWhK0/FQ6IGm8Kb0i2ZiD13QzOF5k7AxntQ/Uylz/IFq4Kn4K1Xhj/m/VMYPJpJHHot3bJSL4kcWE/9Rf0rZc457Jsu8Kf9JaZK5+ncWmZNy/qEM/C38xLIiuPVEPuMD3pNN/DWu29t0GMeW20s99GH33C77wy7sXtplOOzG3Wu7tEM3dtpu9bDibmprO4C7qt3NDzfjbtMu9XAXlq9tx6Hj3dR2w2HDy7HtusN12PXtLh26cXdtu+PBcZfbvhwO3OX2lkPh7pPnitEW4Ff2ZtUdWf1bqdb8Gv03xWO69eZdi7d0Cv2kdEgn7RPnfZoMNLiFQePz94BN+qyaKXJsNwCAtjd515rQ4kgc8rpGQZ3cWcidyqal9TGzsCcyK0xV7OvMw1r1vqhccoUrTkFKLdagD+FymDpiTNbzfh5cBBwkxzlf9FN0dvNkCadonL0Xl4bNJSMAv8xDCYZtX25Liw/X9hAobskQlwAinSaNcewGumBcP82ZtE9+yAbuwOQUVFEkjIxbdvUtl03M8CuiqoMqh7eFOmCFm24d1MvIQFvrbTn98Jh0lYOPiUQgHr+1v8/N5ivDv3YLyoTPN7VLad95PlX7hUFHiQnKWhEn2bnJGDo5oX91M5rSiPgfYLzzrroYZeGURoxJPeWNZCCc623R99Zf0cjEbpcjmbU9eDpXlkXlRCrKg7053pu4xYErQG1z8ZXJCGG3eLhx6of1flmOQa54H2cYwNqYKZLUxsLyw8xm6rQjSBmRdxgcsUiAHAqic08m8ZDN5kmT0GIk/SPTQMrla2Tcm6WZjgspx6aLPD2k0CHywiP2DujQT7nqv5ZLuPM8NBNrk4Kwd/RNn4wk6fML3D39FWL0r8d6joQFaUShPR7Lhs4L313tG4JPcyOikQfN8TKFTEkJ1LkkPis+SHCtoGR+s/aZbfvix0T992VDbnMcZRZYRr+XpOGNYTyjNj6w8FaYOUj3qcYl2eLvuYrXXkW8oZtM7jm1Pn7fgj7gieVz57Lgjz4J+LgOpmnI/jx3C5yavzsbK4N2EbKCOiQ2GoVWKFTPQ2jevCheRU8AS7AQL7Cn9eSzNsvmRf8fcphrSh2+6mKY7Z2bknZVw6ZMreeUcqEaoQIPRcpCOE7uWk+RiCn89JCqQsoQHJFoH8C42+/ClUBa3vBOeM5pAAfq+b4XnL/3IoYT4kmsrofPFBhoqX0NPbFE/UMh8E3J203hilpPSDp9Wjd3VUG6w0SEJ+c2EPEaA0nkRXAK+05MOkSRaiEx/DLiCciCzDgQWZcWc7o13su/AMGm1hvjwvgZU+bICxcbpdO/RAxzjLyOYOFI5BvGDdIedzvgbwbpmjMK0DmTq0K8p9xPPD8MdZi+/EuAr1iQFeZW82S2QeLSBzQbcm+jQi5aM2aYiPN2IBTlDcM2bD/iA5PIxJgw0hsaBICrCCXpl1JKMOdo34OwlMNO/wV0u8wkgkda0BR3go2+DKcvET0kGnXJIsHHRelkduNB8N160ilmDlaogSpN6HWfdAwC6T0JYjtnUwxrtoY8FROGPZ4VSO54DDzx0nkFiygxWdNFRhVBy8se1g3KyCI68zRt7tqxGRLhS3/fv/VNCRKdQfsY6hUkg0e2NfpOdu46lmcgTX/dLEhlAgTSN589a1ML6yrU6RnxjKuasv8S1vZllDwTkUBDqM0kMf3UJIs3jBx9wqyvKE53GIZhWNZwj7SZFMQ4TF/3DuGN3sGtXbROsjkac3kcRJtT9vQTJvI5JY2wQOKjZwh7XvvMXrPeCLjvEH4+qGJSSlc4VE3g7DSgzYeWO8+p6DYsq0tv/en06/hMm2JcPJ4Np0TClGRzuBwBl/NQd93cxjKO60mW3gOkKdApvYofd3PpPGGsjHDGnsbdtTmJq/Xe8paw5Rmy7UVV7OpBou8tt20f4ZLAacnleiND6BWMWWSfjteq6EzrmINzCj33m0K4DcQ9gY+FUakI3Yf02TEDfZT6NvI8lHeZI1dWBmhhKgcgv1okN7NlcBzdhtGygM+2aZxCLcCz2q12b9Hxnw0dG9FH58eV8Y5IitOndEkW/ZxJlnAUOJJfEJ7grOk5B+x+UHHhW4PWj7Ynqog7iKh3DZUx3sLj5dhNzg6YSC0ARDQUEfJqw2PR7MjGEGV8iGgIuQSmMXcrfhm695ZUbR24wwJG1E+uUjyjCTgUuLoV6DRiFUtsh150wKzgm2GChN7Oyh2M4im6Ukp1Zj1EHAwSiH5fBRZbAU3iZGP9zQbMn20HhGGvSMcmnbN5tTrXFbXdjsEz5k0MbUcLUgDALZpWOlL51H1CzWDOTs65EKy1ikDtr++RlumJ6oNOIW5UNJDY5I9fFfjcKFCrsgP7SPNRP2EuG389lTvNbXDrAp5m1eLhHW892zpxugrsgdoZ1yUIdEnClJXbrvT7YJonj/XMLi+M+890Jksc4pMbEuPEdeLSBTI++aQnpHg63NgqTn6zm+bEYn7B6zJZLerQc5qLS+V/UBijGMKrzyl9dSkzDCA0XHKFxtsQnJNshA6FFjy0uUibcExboDBqNUfSUYTU+Xoz1bUpUHEid2Wkz0Z+m0jG4/5tr1KY5i3BKKZ3q4RVAoJOWcNSIZjA/hJy6khB6EiRCrca2iohldvyzLSo1HpppIjbU43eEPHbdlSB6PKT774lPB3tWmo3U33MFNZq16Oyz0G5gWz6qqS1+F/+gnxa4uPZsZPBAtJAKVGSaplAZAwch41fsvfrsQEkiAADrCas2c19lnke7bOoiOrkl+COUM885WGxu2C/wds6mKUJWuIjBb+FLvDYKx4msp4MT/36HRvfTj+pyMzvdjG20SY9bICHy+uDX5pMeoyIJv63pY0FY1TfqzN6aFzRTvFLit8t0BnsV2Sq8nK9oINkjETaJhBvKJcG5TUE7aETrF9WDSL028o+Q5+NzsBx52i/8lBlVbQGHdUjST5kzqwtG43pv05tkglZZ4feKKMAoENytL1Zn4K0wrQxS1Nn4ht8AFdN9gHYWM/8U8IHlShBOD+jc0RtN6uPAOVC+yr7VqFJ6eLy/plfrTWoB+xmnVZVg2EYhjGRnWMiMmgZM72HhEO1RmWBm/ue7KsVSGjd1Gbz1hUewrZowYaXKOYr+Q6ddwDcRQ1wO+GY5ZybhXbhRy80Y5KFWsVZqCoJpdZcuJG3R9ffK08qKHtOYJfsCM7cWaH6wAPxHhWIZDpcyfVlwxHFhRI94wFbl+j3JvIrlxABVvL9PtYVCu9XXbXIGeP+hSIHB+QwT7JHhXj4dwwYhY7YsjInmp75AnnoBWayITEVDwx/6YfT4CGm2zdOiBV8y8GjY1gxYFVr7CW/Wo0+aPIFuFPQY144pZuqpBFrY1C8Dqq4pUb3ZXcnntJiQbO7HpJl4XzQ7NfVXaE94lz64n4UJCvseo5k6Ypc/J98uyjaZxzQYvdhESluIS6FKIfjEEOVEfDJVK/fROFjqE0Omlplj6lSo3LmXlsYlwCLXapI5rqlT3Zl/ETvBcuByQEm/Gj3cI+AF80a1TOiBrZsc+Vk4E56Kswjc9q+sw0KeU1MP16nuL8jd7gao0sXeMWkovnoUhEJIfQCrA6gEy9TUQMlu/KpQ5UFwjvh1VhLJPHTLnuF7KaYVKYHnHELYWgGVzAOmC3gxna4ffICpeyVkw+k/g2lgjPgALjlMObKDA2XnwFEi9z61cNalwrMELhHebEqNUNho4mw/EzH6W0JNzFVeY8dm9TCN3Mjuso94IwxJjcrCS3quiEP4MXOaSwAszj+f8roGcB/db6sQOTjfbCEZSIfqsEtPJo3/UhqtyErv9OB3u8sdO36bCdIuMiq/blYm35p3QVV/BhBYmXHGNRZJgPak0Dsk3tVEi8zvACEfmVsE2wo/VN7RKBYMXZvKXCcGVq+W6frLezr2gQhEElEGEPCAr3OxurqP7MqA0DIHy5KQp9Ju4Y6GUarUDfpP+GlOyaRbIX3PCFpjz06NGQ1t4N421pMcSus3I/dZYBaHoiPYB7VX6i7PXuHkZGM2gjHmgP+L3wIjYMoKSRYmdk02vxwUOypXaqoWaFcQf9iR3aNl8+T3JROm/k/BGcOnBk3wC3B9xw4nNNwJdWsyZ95Yhq3M0jV+iM6LXwTvhntXr5V0N5johpbA5s1EQdWaMjJKBGeEJ2CjTTSk4ZqKzP4SO2dsvawt8IcxMqS2+6Br2yUPachTW+GQejyoH+XTknVjb8fPGmu7JEX3kXOjw2Nz28lawqSgJDYSuZjPWw33sG2Kb46bvkbd9ru8Vq1k6qs6/JeZd1f7cgJaHw35JAU+V0/cDeIFAnjaHB8AF0LZwU0/nf+rd1umFpwJL433SkoD8bfQTp1Ynb0TP0TDeXN2zz66+VJs+Oh6Q2jSD+kpoPJs/bYiGDv1L94qRjyCepOgmoAprjLtYp28DTVDmAYhmGkcL71Tv+YaSM3ABbGKk4W6qNMDEVFelfzAYNx9NZ0X+wGSqkjv7kjim0fOb50MPJONMlu5S9psAlbto9UOpZCnjC0eBFVygFmGdsFXXnku96zWk4hQynmYMop0ixk4BzdIBE8lDwJB5xPJmm/evnfletRq9s5XjqMUa6aBth/rYmozpL0cLae3WGU6sv81tGftybVuqPESWzxerRLSW6nCML4vYwGwP5/ajHQShIsf8PQMgkaNCEDdrlhGVvqZqcMrx6hZjM4NfPhqsLu9PZiwOfsWK6zkoHdngEl2IJUCtbIou+LIBlXoKm/ZZaCmr7mp3EczetBx6GBVtl4OBHVM7GJpj9XpjRKG12ScfvRJOCJXtstQa5JQkgMQ4Jmron3IpatNL20GbJNKvSVeH5OuWlkfOU7UK+QUykpq+MfVH+R7i2+WWYl0Vj0PqXT9nZVHoTAFwik/JL+OgsHaL4cbHGYCHkv4Fk1S+HZJzvMi63pcrZSEYO6G5IRwmfAYrKQzJkWS7E/wCXDztdCt3F152XBxkCJ1pT33aay/EBxHejy5nKnIp5ghyu4+rig/ilK7PaN4WnsOQ+rUkzwr7GRG38x2xeRBnlixe7mWTdhdNhHvo0FHgwFbJysHimwTt/b/DbfHHMURZi9Arg5pyY8ka3EKN3puumo7Yl5gzzH+YxPeQxzFgiEOpSqH8w5gUyaUWrpfSZ8XWkWwipMryK97gVCyYnhfGj65Y34jecSYMYn7Jefnm0b+Fz7DV4tkbkLSQKZ4NaCIfYGEt5BE1F6LRJSQqt0bJ9YNB0/Vv9qF9M3XHkMhgVLbUwHcZNFIqLRAdcGIB5XuqRuocedIM0guKnPGu5PscOBC0FH2uubmTOBCa7OUWKkjlLy3yF6ue4UOkp+tgXKpGNVfJt2ZLqdOnkcCTQnAtpDQ+XYKPNnpQS/2VpdSarIEsKKwv7LwRm2Dti5VRLcAVPQbGzkcHrPxmHitQE30i7WWy2opaMyN3bxJCGJAd9r1IhbUWQv3CwnS6UKwlQHvyAEGUtFEdfBAZJXk0V2YQpIMJque33v6e81UeQQxaLYQc6Cr71HKbfNA1ExB55SLQps7NLXCQ+5nEKvrhJmsezlIsJ839L7bN2gSROCeLIfo2kSiyxXucFeG4nFm46qUZ14PKQ8QWqmV0h8MtgOQn8PYDVRDHNiMIq9nM93Sy3nPyKPXcjppIyc7V0qeZBb/2/iafHocv1e6+Pnao81mbv8WaobiQG3qcDTIVSZA1n49Arcz1rfdGoP9UV67EAn6vDy2R9tjmWZKcT37q2gwVxe7DrcU7FPldfD8QIoG0GAaRqi0pWJ6fnl3eJlL0jycaKcREQbhmEYFmV/LiIbvNXVQQ3yRc4LJy2kHeqBsV/9qErVqOfPSzHmubg0v4GroRppPe2YCfG5Ezjij8bJ5mRxDpRToTxfdsh2pqFuCy+CTrgHuZLOPagjtMStG4L10G1oS3xf85jEdMiIngEjHa+WdfyqJ2DC106e9EDqxbYjy+QYFLYQrPmEdxVEMkwEeVim8i36h8dNUWdNV4YaRASzQDujAz6znnB2LcnNi/zK7LjtkTHO9sCguI7luIAb1N5ogY68PDW010UbhvjM0m100EncpD1tDw53W1AogBjTCbuzwNYYDZtHSuyYXuLN6Tuce1U5+q6VMsmEx30iodzIdNJ9GEEFyWHK82XX3lP0/4ZdZitySeycDDldoL4BbyT0PjDQRv4yxWbsARqbaekvwZrYFswahOQkiNZTccjVekrgh82Q06XZ90giTUx9PqEmhfGnRxorKGeykGyaHnrqwH7FcjzMWRL2eg2UC7isnbqSlkpOqETfONODyYDQ/wdo10gD5EvChjhD2VrIhylNjzQsVUOJkLXLLoQJ330l1Idvn3SQec1690Lq70yxVkImDoMER9Bj42dedNya1YhpyKyN0WFjgVIfld12TBdYXuU5TgqRDlY99m5zC09PR/0neZhhsgIcbEWKKTlMLlGOOt50Nz1+KNZ3x2zyio75m7mm7yTzjlfUDTzIhD8vxSh12ZuuPXT/qYTm6mbNYxJjuxAXu0+ewxslh2wZ+Mx6nMjGbcEZ0uZwQwVxSGM0bK6dqjpqxh4gT+Y025Mb+cukQXpuEk3GsmykcP7e8TBn8cqefAZPJy+X2dcAxkHmNUtpWPvtCUcRwLovEBfJCnAQ7unPOII3e1lKya/7PnkO10ZB1YPyc6ayjAqRAjydvPR0BfHoxtTo7eba0zAbU6MHQagmvk4skN+kug598eovWspyHP8Q6e/1d2reZcxprQmmvjOPxyjxiOSgglcaJbrCQFQ7ULz6Czh2SNe4C4pWtTiQRXjOGlzJ2Pl/4UBSQo9o4pLTc3h2/khAZSqjrZ8Z9RyereCcbCUeDLFRRU6xQGTEwh9zWmsuDatalzy86+L1JLdU3evXe4RzzKzN/z6fO0c/SYJoPaBKv3OmvjzApkYW82pyLPrrDFfH8z7kupX0Ui6xRomuHOSR9UmbhPxageXgz05TEgSYdmWRuIO0EeUGLS7M3lVmYUIXbrGKGViO9T9bOauT7CRsjnrRKPs+fpX0dUHRqiFlmqECX3NIgiL8/Wz5dwU0Qc5HKB8HDRp6DBI5QlPtr12HpgWk5RucBy1ERkB8Iv5lIQ2wX4Xf0CNRBR5ISuiH23p7W2bswWkTtY+6o999zfMK0UZNXEsfxJEYxDAMw0ju8Tq+giSLsuuhzafSL7IyjNJCGEVbJfmQFL1Un3ZN7z77qQIXuHSXDSHFdZwBZZwZDvgpwesJIU/sb3uH4gWhnSovi9Ey7ycVzQ4dccFwgJ5VEshqB8NPxLdqxkl4m04Pt0o6cwLntZrHleFzWcY1feTsHzg+WP/hYs1rJKiaZsxv3K+kKik6sZaI/BKWNBG27eCUXTWfn725OsYhcm2DK83AeWfUs5QzT2f+8GYE1mjjfnNPoeqsUlgSWrt9/r8jJB8X/ISyDGVI8Fzf/tOE8j57psqqCA9M1ZhsVd/2YbUKQqjZXFX+95F/+1PoDGZXq9yDLEoonKDdQlHyaLPV2NWDz/kfIiOwU7yrE3aNZPL7yaydvTQkhSGFgi1GYCr5DfnMFq0Y5fdpbsXGw3BBHBtBewGTppeA6H/7Rsah1y/SE3YKBMKrFPFrXcF467zUtwrV3wKnDgPH5ReTHsfSieMtzIR6XEUBIMxvYgljY4uekap8+yA91DvpOYLRNddWmuFjrXNeaEF8OzCpdC1vp82A8q/U02r7TycaNGAZANk0f82vxutJoh7Tr3qct0OiERL3Cbjd3KVxueg/jdN9Mnh6UXLuCbGsspnB6d+26nkK8ldRXBUSC5C/POO3DYAbyt107Azxb3wEQzfYggDr5rLk9d6ii21aOVr+xU/C0U3aoHF5THwud31YW5MwbLu3e/ATmtoUDD0NYVDw4jnAyiqPzuYyzyJyyTZqgcBQZelVIq0OB7/5k7eK+zrJkmXiRkaQiRJqm+XmnIoebMvNg9QjNUsfUtgNU6D5lDCs1jS/6HcE3g2qBge0XNJGEL0WCIUAGK+4jyBpHu5NRRF5UfRZVxglvrfefmvHulcV6hdCdwAilOeAKGkdPqF93tZUIC+iv4SSaVJI0DG7/uPutT6b87SqOGsp0KiXL+57T6MriMO3bVgHuIDxxWzRRU25moG5VnM+sbd8C+rsMxx8IIu1UGlD8rG/iBZQ+2C562Y0D1dDxpuTWR5zrQNnaUGXPPHgbJzF38CYZ+544nl7ZirPVS4yr7T6yXaO0cdBklnSIxtuAL5cY/FLOPt4y9WjtzJwiAMHayTHxTZ+L8p409Mi8fNbVW5tFKFWGDDsTfm/mMGI2k0Fjgd0q2IBFQf2CoTzHetp2P3sH+gJogm/F12wUl9A57Ibn0IiDlDAK6eFduZC5+Zz6C8cRQh9vijPIG9OM4TS4tpIsI9bieMKQHlaNAzuOd3Jsqmgrt8Wv0MK0j42WiXnPJGfEdyWRPNZ7YBcpjU/BQ65ChQmM1zRrIt1y+FDQkYgjFbnO2ZkLNbjDpGjoZpgqVhyhnEZ8S6uVlPKAv685as2/DEMwzCMzGV9wbemezFDBkROoW7mI32UvrMKTB1NaItLYAmwhPG0QPYPErdlYoQV2HLujqdMnwywoc0dpHHwK9vuMy0oU+LGKMuEisot3sjekOvPdUgX3E5vt6OUkmZ5DAt9/H2sjU425R5eNgmyaQxuQEWtE4A904t7Z1VtSeAChPlB3lgJdcD/rQ4LRnBgh9uQf9k8IbXMQT386tmO6qR+Wolwg84yfJvlzpu9k2yO8IYkIBhqHB6/7mfA8guOXKrSPQ5ctPZEMZO/DKgdnH+e2fnB0y1cSSpWliqdMFBvsFW6s2jkXsnfTCVCstjN+uqxUNDc95eHcdFFl+l/1e0xl/UF+0ZWjRYvnu6zXhj4iA5fUJSjaGuoE1OxCsPpKdbLXFQyrZ8DTfNOE9UG3SnxxJSaizkt4VeN13zzLX03fNwc8eQxLrQIhQEnfgErv2ZkJOXSipLiuRihwmW1gZmRdtdP2+duACbetacTvz6hOK5nHaSSR2NI0jHkOmPSZHpmrSvCEC5uqQcvbXmrCFcBWCXga8nMrPvihg4DkqBZKL3qlrhP/uxQyPh3KjSdzzoF9LkCY0eyyXlVsjnYZcih/2YiGxqPOodoRNljhsLN6AMGZwT9tl0wS4mW6t4zGUaXghPi0QQ0naYXIkWaJx8hug++W2j1wccTLUYjcOoI2pruxWB2+Z6A9v4A2RdEnOL998dWDSDRPV5S8eHyYrIVNXl9f+EcY5OaVoUX3NCSvOiAPhAVokDAtZvLvmXhR4OuhVH270JHDYzIV4FwIG31kgncTyLykgMpKTh3W+xz/DPWqFCHxtcgJ9he/rpj4dLMyI52rGuEjfZpTNADCB7liMURn1x9GSP3p/LnAWD/XBV1RKGkVXc0CiulXpjQPcLIYSBmtqYXGJB0h8Ae8jUl262M7y4G9kNA+mSnMkgjKsY35rzqF6hUnmkzKUsCqz5jGf2GFdr2zUKY1j2svIzFae5Er/VUVH/GSL7HoAu1bLhMCf1JejoUyEDvbjH7cA4ZEDkqe4Jy5WmQcg3AsEhpNtnrF6eit4+evlGr3zRGVN9Gjc0zcf74gwl4VgLz1E56eKvqiLKulRv5qU237dYJopo+Z3gwJfcN2sYLaHfIpDUqenSQjiLYIHiJprydBTf4OnSKmvA7wMg7PzXln0GJ5/+iJwZZ2rdvkVhLHhYNsmIn12wcR4zEOrEGsa8Sie/XidUatQrDdebE4+0aKqlZ/TZuNxnAca9yGJZbhNqQ1ADcLCz+ylOESYHZ1t1rVZ6F+0O6I0lVhThaBZyYbna5biLP9/OkQB7BYCc7p6dhRMKfulxtqMRk69NNhg/p894moZ06VGCKBUYjmd1rA5EOwzAMh91SOW7NrHSvBu8AYSdrGZs0+RhduMf0V8J/h7+5s5AopPjYet8VO8CPJDPBHt1k/2VnX2ImjRpw2wti6khhVRNa8AD34syvy+12+6LozXxSLa8EDBdNy/8FfqtrdNYocwpuE9qEhwJYkUAqlv1AMy+zZTs7Lynao1RDrS68dtttZ33ioVlIBR9LwUAc7VjSFJMiZbTh/GA9Oo5YObZL7aL+3pwGYPvwihpfAxKX47SjmpSAyZBlYwKiBGY2swOaqIw4KESd8miXVoUQDXoRiLxgoAx0yKwofIIGk5gp6PgL1J/fVY9tL6rfCi76TrvvwDkeSwm2Zw9bqxB2lYtiQytEA0a8UA56pi517Ytybmfbr20MAPw8dcseZtOKRFOY+iSzFUprExYFT4j+v/tclZjFBIPJt64/JF9C7f49kL3M1psZZcsaxYrkXeML7j3ZMQWBQEZOqVOPeWa+h3V5pXk2IaNp3kNSScErXd+U1yP14wyj1A7NnTsgWE3zx0Q/Fj+t7DR4FNhxkx9ZCORKbMDbjWy4uLmZwEQVAAGuek0KyXUpYzC95Uxy5cPM8uPpt+pzYIWAE2NIpojD4W4TXAFhtgwUApKxJd419IeLEPrBfxtouBXaNENfXaJmhN7JRmQCVMQqKgPoD6DiUEu41DCFIeYkdwMT1B/rvEaGE2LB8QiFuBzIxAMCewpHCqlEf9ZCifrx+oqjnNUvykKmZDCIMn4k+hCOFBHxArFilNq81J9fidkegnL625TnMINljggqOStCw6oTIOwPy6fM+d+tmDuuzX7PTTPSQ1nZvupRrLJ3pAc6MpMN4rTWVRS3r631Ix6nQDQCQU/Vqpm9g0iXJZBYZQcleI3u+f40kxXMG0z45cIIeQrpT3srfW1Uy5PAma2cOZ6k6zY3jnxnGX0/MotoCJT3Scgxoy/HqjVtsaXVAFphictTdVuqioMU0LbrxGKxg+su3Fh2dvJOKZzICyvIur3fpFZQUx4S/qtcVDIONb370+q6YvXLqUk2uUUlY48KztRHPuZUPa6jo+ukRmWKxnSU+z6dGXiAs0Q4p7gkT0oxeLcE78r+Xx7khKD+eT6uWmp62AWOzBTqUzpffQUqb1m4CoS5Kbr5yfW3kuZlB28DqXhhFqtYdjPEaR7bie9VYwrWYmiv7dE0SQvzeOsamf5vK2ASsw3PNGtJJK+OMFUu0/8a+wWPQwlPYLvkgka9ToUL11Ilgv8hc7yhhXXsM7eED1sfCT5rM9BwTjhVAbLwid63r5sv6SRuBYDMMqdLYKWrvn8Fg8oq8nPk/TRmhunFR354WOXQCuDZ5xV56IqAOR9Elbse37O5NMQbOJHuYzuaWFQZ5SO0AryXHdAcW+BuReHaOrA8RgACbLWDXEWQX05FZg8j+HevaPKCRtG+ltInYkPClMSUxJSWdbzxCR8XXD7hnG36lDBY/pRYGZ29Z83Xvhzes2xv1aEKaPifoni0z9xG0eFtt4MvG3DZ+zms4HlYQz6S7Xj7KXG8c4ePiZ9py79t7t1J/nHVs4dDl/5keVVwejaBiuRA3koQ9MbUNpwdGr+4NMl02hxbLifI5wQ5Q1myeoEUBbmJHDyG/zb/x7QAgu+AIMAjCt4ALTNwulLOzzLntOFNOOMcjNPALz0kksFIhsIvSLgHc5OtdQw+xfEWlkllsqScWy1yPDurxxNtcmnK577sbGKh33AcL7NI/noiuTudVfzyVzMebeqT2g+Xe/ViHdc+xPFKy+TM8zLPrTVy/HdVj5dok0cPfO7E3iZko24I43mLZG9GclutVXy6acY/WI+M2kY1emujRred1HgVlWXy038edCJWq86hL9VdhdUmO3d87odXm1h53zAbL7dI3v0huQcrq7jwTzP+72KkSBan7gpGjiYpMeNUrtCRqW4V3wrL3LedZrz5MsVSv2lKloI8Xsbl/rmok3+erOOe0+budeJ493z89tcy+e3NZjxahlzjsGFIGj9y/JFa5P54UY/3SOLGhza5sbQar+Rz5741yblnm9gwm6rcRrOhGu+Xiw9Xi+TD1nq8hOSefIrJkwerOLWwzJ3aa8abfAHEpLsH/9lQH4Vq+WTAG0qcbUiyVmlC1A+nR45sUU29Psg18KtWzveXHjP5cnx56htJ9NBdJFTHJ8ZtKHF0R5LeWhPBdp0moyjGzMNiSbZUx+DmVe/95eV9hQPffs1Rujs5lvHsmG93eb7rRxdjuLN+vvPX4+G29vP8d3PvvUx9292nJ23U5oqj5W3az9mz0Md/iTP0wd/A9GJTbnb+X/s2FLzY6Lh0uWDeaFqjFdpfvYP3t2ihx6zOOirRIvaz5qPq21nFd1GJLFrPChwpNRwoHVn8Sy1IHX4X8sci5Mgi/Z6obbE/hZsc8Jeuf8CE4sVtI3X6beT3umOZxYlKgPLc6T51JED1gACk/p3LQC4hrkOPDyowhiMwVRZy50qAAIwiCQ1/N9TeG/h/QMwQiNAjhocRLuQwwxs1konGFqpIxhWTWZsYYqY3+cVnjmYM6wwlAABQBMK/P4ayRQI/NpVTAJJHjAbjuylkB6keHprgSv0CC3E286ywcFbISf035x4TUJU9PmAKoQ+CJsGCoL04QmaL1IRCgyixAGCWMymY40yRa6cXClldZcHBLTLiChtowhk7yEiUB/DwFwkMoy6FZbbHZXNkYtGbZBY4Z+7e37AX7VpuW1tSV3r3l8BDf1Azj2kycx9u1BHiwYaJxRWkBr2gHaO506tW/n9/xOxYo54hUuCsptysd6Qp0OdoM+7kM2AcUZeQe9AT6gQJHTYTFoFEhd6ZuvppZ8rXhDFD3UCGotZhhXqDOIXzETVB0gojoP1iFSsYA2qB3BYW3q9QXyHeYOtNaYmG1AZ9QvsPYYdTRj1Anopy9KjHjkhy0OcXyU3akSahf6F94k5swnhGVUfuFN2heiGhsI3CIobEI/Q7tAMep9k1jF+o2448qLkPE/WpEHeE8wyVgqQ/YCzQLljFOoxr1KGIwLwPP+q9EN/Dlk1h8YbUPfRXtH8QXuFUo+4K8mjKsUW9FETmOA+iTW9IM0c/QTvnXj4njP+oq4LcG/oa9a2Q8BO2ygULSAzoW1NX3Qz5rDA+UdeKDMnsuEC9KuICzs8oK5JmDEGbd7pYj3GLulDkNpn3oaG+KPEjbJemsIQVqUfob2hTIfwOpxZ1r8hTMjsG1JMi0pl15y43C0OaDv0H2lexic0wDqimyN0RPaKGkvCI7a+wOEicoT+gvRb79OMaxgXqoyIPR9WHE/VZiZvh/Av1QZH0HcYK7cR1sYxxhZp3Dpcz8z48qKMRn2GrTWFxhtQZ+gvaWyFcw8mjVkMee7Njg3o2RCY4X5u0UUOaCfpvtB+dIZ8Z4x/q0pD7Hn2JOhkJ/8B2ZmIRReIA/cas8CDfGeMcdWPIMDevwwb1Zogb4PwfNRmSjjAatN+dLjZiPKEWQ27nDry/oL4a8RW2R0lLbUhdoX+gfRTCbzhtUA+GPM29cgT1iAgOjG9JS2NIA7qhmVrFjhgFVchdQAfVIQG2vbBIQWKB3qON6kG+J4w56hZ5COZ12KI+QVyB8ycqkNRhJLRqZRVTjA51wJNbXzPvw4t6h3iFrTWFxStSK/QR7a8SVjgF1B3kcVSOCfUCEYPzrUmbbkcag16hna3cy+eAMaGuIPcjekZ9g4Qdtp8mFnYkJvRLU1d/mClfGeMLdV2QoTOvwxL1WhCXcD6gdCQVhkP7ubKKDRh3qIuC3HYOvHvUl0L8EbadKS1hR+oj9L9o70r4A04L1H1BnjqzY0Q9FUR6Vjqm3GRDmh76Gdq3uhOrMF5RrSB3M/QaNQoJr7C9C4tTJM7RH9H+qMfp7BrGCepjQR5m5j5cqM+FuDnOF6gPBUk/YazRTo1FPjC2qDATlJ1LlQEqDC+0Z0JCR6bGcWXs57HeqFxLjeNbt3ZvEiX0g46J6vpHdcVjPdAxUbNxb9Y7M2tFx8R2/nrxct/HemGGPJPHH1P3x/rNxsBMnTNTy2PdfDJmGBtDf5ihho2KqcaTqtNjfTBVG6ay+X+2TxER5UY0SLCuRqaILVvRjlo7EemiKg+ikUDnkRKhqZPLQEMnSVJ0yp1oSLFcE4EIqGgi2jWxzbFW7kVDH7RrkDFC+51cBxrtpBeiQC8afKT7hCziikvRGXXpyEsfTQlRCMoCCdug1FFBp9zz6PJHEdhCtQjbsMWOCQg4nZaAVsoEMEH7AGAbtgHt64BOex7gIeHr3J4uiLthXJrLewj17z2yRA0dHo3+zjyo/Jceq33ToJYJzY8xmd/U19HQG3Q1sCZri/W++mOv1pY99/9HH81wA5sB1XqDXjq8dVIkNF9T0r845bgMN/EliGqoX/bRikyjdyfrHi7MO8qN8IDmYR9HHl/de/3POwaWm/S9Mt3Z6rs8Ol3rt2FXE+rZ3vzIOPLwP3Ejq/zPqd45j77fuG0b1pYmyzB0i6ts7jC5eny3pV2o/jiPmfGwceO/8nChLbcm3nVvSUu22/55eeCy7vnZbtBVdpPmovAHekt/zoNMp1f/6zl7fxyKpI/BeM6fRZR7w4bR19tjrqg42v96yx+DkNf1a1rhZM7wsKmLKKxeodVquEDNosW0JcoY3ddC1BwxrGTwGPEG/KZT0oiZDP7XntNyQE0zpL0UqC0BNzm0Y5mYuafZYfjiRvZt3GMronqPwXGVycjzhcAjToRTVi/qBEdo8cyCJxZW5Eprvs2zhdf7nrhc6dK9zaGLGCzaMq/h3TU2pdr0gMGM7BhBvu5Bx/E1kikXMdg/hqkmw4WvEAMoiNHzzCsjtjC8Rw6PMCKk8rFrom01qj2YGrIiJEMzOfSkYRv2cIJ2N7DJJd716y5N1GqGpzv9sHRHS0NrngMr6zuc77PokA8Mwfqg3ZGTs43LWh2tvPB1Dz6KEYigJLNkZKW+4WF0sMig7188nxeuKkviaArOE+2rkYCFSZOVlBNJ1dcSBU2a8jCC+CHIaGDRETFM8jCQNDuaw/5OstbNtQ0deuikd5Y8yAqolgMPOOJhuvzgfl1Ax97mWK1ZmMQKhDz6hBimGpuYwrdOIJlNBlw4hmeqO24cN4PmGYXotoY5QVS2g2bxe1tlrngx3SX3uHW08f9RMki+a9XjOsUFXVClgqtclHgQtdTSQ+UBIGIhEkd61n3t7nQ/2G3pnmbX19RHepiJoz3FYE6tpgO2E7RTwjGVa61FO/QJwy3m2np4IsRkUi0RuIcF9nNihE+6yE3jtQU4nXaob2tphYV3A6i+SWv5fFvd8nKyVhe+GVaHdohLnbWWVe+0LDd+J8zwERkU2+v/WupkLTnMrsNf7SUzxhOiO0F4ciDV9vT/IKMqsXmuA8tbbHAUmEwMK/owKdoHyXwiJVnl1nBEsqJXpuCL8UiIy9mI1DPVkjU6JAWFKklMHnwJItSnHScamKEDy1FSj4MLMqYbUS4Y1HaeWRsXe6gy4BVTazA7u2nlLS1qsEjUeDm+EQXyO3YN25Qhwewk76r7Vj2J7LbSFWY+m58gTsntQlkLizz4LpXNlTaIQozm7AFT2RoeveBJ5JaO5sI4AOF5ezRo5KNW55FeKcMeNGPuLeo5e8cKUaktTk0MLQrNtHL7wqJlaKM50GQycq2ABGo5D1ygTfgWLOqJ90FDeB0YeFkOjiRdDTnxsCFModk1mVHnC2AeJlvim0ZJjkWeGl2DPA0tlCr3tvR7fTcK9a3qTAdTn9sbzcbwKDkD+miovhYbS1AxSbiu6Vds3H9Ehx7nvrGflUcbl2Z/d/MYjlbqnmeFFd+GwcoVRpL6hv9yqjkXWhp79tSa5qYcvPMgtadh0n0MN6O5YjO8TJuv+13AaDwZ9wqrwObyMBlL15BkhRZxArTKM4z09Z73srlTsiN568mQCMbtl81ReIRgIbIe21T8aupkG3nfgGwtyhnZfnrB1pLw1lSbBPVNkc3qVGJlAQP2fxbMXtUoe9XKuq5aoOUUGrUgSFmDmXYbAaY7twBD717FAc8K+2bSere3vgIfYTGFXV0vvFVmlkbbMV8M+oIPQ+INz/DH61vO5Qf9F1En+aJjRiVJk7xYssma4W8F/+VaQciHKj7t1cfPb6mvfwyTgEW/Px/vX2/mdZC1zjEyP5dtufzRd9uuelfw/JOe1lNQnuvvP9aHtRd2+b58/1f3+mT/hu/9Y1Z+Rlj/k2N8bh8m1/B6FLE6nZvRa49QbNv3Zq8Qe67Gr59F0k3o8du8Xl5/f1fRT01VzUUs7+lmfX7P/xHmh3+XPKoXcvHPT16qNyafAFZSCM98awOpt4AHG7Z3tLeQN222yXp0LjyJBRCbg9b2UtRwQCJqK0DvZpNmTbfRbAsJvUD+9tCTEy64xzIfnctqIhvB6zEWomDGdpUKIOGNmUcBFsgshLhVCImpHKQEaKZrDlJ1SKIODSDcRJIHAGruKnNLQBeY5d5yHYWy/PtATPR76EfcGdTSNEw35KHOVILjTABKYGhJZp/7MuDXpDUJntQbX8RpXZ61oKW6/szBJTHdUTtjHQ+I6Vo6IS73GUWvIaHVkPY7/jc00kd57/XcmGVAx5HuCB0JzRfVXEpgsJtTVS9GAxoQk5HL8dVm5Vg1t3tD7q6dV3Hrwi06AUW6M0DN6f7oezJ9aVzV9SmFZaTzR5YOR3VO4Twt1eMtu2H6RFDAcKpI856fWA9IpazU0wCf1MDbaA1C2d0RA2BHncCCzNWBg/N9+6IKjE5NJ/kCPEi/J33C5bz1ZRYn+56xOI3jN1CKt4A//UBBMKvHipzcBI9b7bSU0q2lxO8HnUAOaay7Q8REii6dIACfQiuMSejdKYcBXKfo5m7dZfc10prr834u/ER6yL1W4zfMNx7ThnfSz4+sg9mkIaE6umcsSKiT5kWk736/4AgGsyyd7HDZzbt+ctA7gCBP706XR6sv/Nr562eFnGk3B+xYEtfyBAT+SGWPD4j2kNuLCn39Ah17Dg29fjgD6ygPdQ+93sqdfOJMkRGOtcFLsxzL42ICW7F9mftVgj714W5sUKsA21ik8xY9PcG/zKexyZjjReFQguWQnERgygzngmjpEYyfsVEr3Yc5tigE8h0eeRSeBHJwDaD3m2GW7UvPG+Qu4reOv1tCgK4f0hJUaaUQ3ENh4EsrhdwfALyX/WVQ+Eu1gBvqwo+IebhaguExS6a8Wc7eIyghEfIC9JOddfj0gpxRL9YAfitcdlKsjSvS8TwMwqBOIVOSdy45Sjj5GF/YvJnvk/68U9oDHD08n2AmCbjtefS3Mqh1SnEl4XNalIVlb7SNPLkCFbW88/Im/V/cIAHwqRQ5EMOS/FF8+datBdnPG2O3JEalxo2hdVPPybim0M/LEPw7v/8LOxNcPT8hGoDeEuJ7ud+RV7NLYYG1SRhBEhbHH/9tT1cZ1HFyB/Rzu5jboLtXVs+kxoqoXTcclyf+u27ToMcrF4HX5BN7RipZbeHk3R6Z5nz90W3ttuiExrLUJwC4oOImrMs96CZbMmUcDekaPjy85Tp5/BHt8dCergkW54imBDlb+cbARDFi3Nx+2iptc6tTFr20I056/BnCemVeTT9e/wRgocd7h9mO/XvTmxlK5g3Lxuf1IW4EXmw99rvS4OcRTKV1ebL4OiIaseivpWLfSEaHRF0UuXunHk9vWCpPo36Zc0eNrn47i5px6l6cQtNBSXfztPWTwm+RdjvajEsQyzY6dM/NytwkMyaUReFcMoKyRR1ir3Yn+LQtzQ8h6Kgo7M5nedhw+T4vCv6xH3eYq0o7wzf+t/5z1pA5FwkeXNgaIC8AdEtpJU3HG82V7qAnna/bWJBEw6BVeh93R7zFHMTK8t9Fqi5ba3soZi5v/R4ZRyRksKA5JAebb6DHjS/DgY+jhjX1bZ0d392FD8pqq6E88xlk5wayWGBuFfqRu6uaV3H73CgPUjejTT3/WRh6lTBqgyZou50nDX8sijYoXG5Qp3F1nWle3Zq/wkrbGh4jjBYVTJx/BFbSx1pJl+IqurQnxRtxlKLtklZcyEArlp8vhnOC4ji4nibVpzU5pgBOVfBG6AqJljRD3U5rVu2+Wk6cSyiHNYkSOQ+miPHS9Z/+UN1ghovCLDA6eSdsZqlM3T3Lp1E0l3xmsTglggM3refPe9e19ML+OjTV1YBJG80KdJ5sq5OdQtMWR5kc83WktHnO2b6PqJfu/Cz67joYaLaS07XjxB4dtx8CcWG8Ca5o8B8Fa+qLwKP/b1LWcXDa3zr3GKN9CilhGgUXB/UUONij8ImR8q355rxM8cR8B72fTPJrBf7OpyIUNn5VBFPNuUlD0uXYe5H9EsE2I2tI8D0eNlw/cjBDN3dhI4LaN9C10MaRgdaGvLCExLLHRgClJY+PVlJbAX0HzfsYIJ7BhKDHkYPI6H7EZtJ3PtXBxRMfQsRRnlQ6BJ6B/xaxc1SF2lKYBzOG9KTjgHRJYTsISwwUl3VXyKK2oqCWH6Uk7QdXYLodsbDPLiVAD/xkZnFfmyesnSsxRi0n0RAugaHOqAJQ53npPUvnt4JrOtq76fNpTvmfelIMvMihCEm9QhX3s1fjuXq1rI/FyFFULQtcD9X1BeF2kp9LDZrAVFx1vHG43Gfr0852YFJKIeml7Xe4lWPJjuihV2CnqjhOSgQr/4wfw26XSmTRGU+ZHWNkeaDHRE3X66T5T1P/NqttJyn6ARDZHE/Oyv2MJ8XgDcXwHW7gbDfSPV1yFP5gwknE1yvaZhh5R1WlMFWFpiHQ/Tx1liT0j1HbMmFWiZdi03qVK7LbWxztGdYn5EGr51XI69lXZr49Dfd1Yczz5tkYqiFZ45hhRTxup2OXYwgLx5Y3ppVPfjqcE/Sbro8+bfncaWA2erp00zWs3Ps2nt0JU5TOBz8P1RNvznmBuGofOQb+jl+J9HpOHxdituOtSHi0WlrS42pXNTIOGjdbngknAdcz9+u4rDQen7us5wORfkCrqJvmikdNUdzRDOm1kBypIA3vqYn3oB6J3AJAqnQ03ld6EfCZnO+Rybg2YUQiV8wIwVo7e1UWvqDZol5ITmvNOb/JRvXHSWLWXardtKSW7+AI2V4RJ+AKntzLkZkfUe3KbRhlEgn2PtGVi5chTffHBNDpDLAPxuw62mL6u/2LhZininWBTE251vn9QVkhfwxPFEcbLDw675pFQ3k0qqq3wCjugMx5LFJtMXAOBA42Etegg6e3yPeAovR1XSVtU6CEUH43kgDPxb1HfZNKPIWbeBDBfWcPmvyrXIfX88H9B3HOr78VdmK7clPLD5R6jBuH1uewrrdqmwMEXCn3c8vSxzlwGS56s8HsFU5q7bwZMVsoabmYt2huwA0K/9wHKTt/I3wORxJwp9HIaa2fBPHHFWFoiLVoWZEOHNKWujTKbgADc/NgeNszkIT6N/HOpWLO99xe8hlfJy+UA9FqZoYWcDvD+g9r/T08aX6gVVMiJyt3P1O+f4noHB/V3LYHOiGJSjr8L+msIWRsTjH7+iOOwdgrbmigU5knTnEUNeBuBZ9liVuhbWc5cU8oBJrjJp+WB6gN5/stGGNLCB3Kj/yklyquGZK+ekT5GXBJPk8dhoePHQSmryKi2tzyNDZqoy417p3SHQce1xgqt5y1PiRJ9XwJZTuTD1coS15jOK7YpnPQmqmPxav30QPtdOz2etEjoJkDJvg7g+91qR4iaK363UEKU9W3Bc9cdTX2OmJzO1oX+qsP3fTslnwIbsDC4XaMPJABDihsfmXraQ8bL3aVisMn+T/4mmvsm1pt6QsRvX95ageYU9ekLZm8h1HmhtIccgoRNtD2vlnxmVwR8ghvhpzZdHT6Gz15P+Q114JeUc+d8/JW8N4TD9jdrx+dxyRX2wvdUn0z+Zo1BhaFBe2eDlllrS5UFpQQpYaG+1ensBYevspBOthyb9TOgZYjhWS0WbFs2xHg1dhX5f+Ie+njeYhoJpL4xumUiBMtXZwwnzkRhzVTTJovTYgJDYeE+YqDONHS+QQ2Gyhl2Zhin76dhLMQCOgzfWD5PYLcrlYIpppolKaEoVGmsm3omnadMTWOS6poINFaQWEkggvcAbP8LVtMYmsbu7GM7EZGegYgmVhu+YIPJSVbROQ7X3XwWhKJdgp7BahxZF4Nd5u4wXrD6f5Rk4p+rmz25AZD6k3QaR/QnvxsmZvtdnhE8Jq4rSmwyU2yobZXAsxNgu0wLfPWkAVkzFCWI6SWY4IFTpzqF5eog3wGYA4pVnEgCSYZUg7albalAvYSldPdmXLq75qV4yiDaSABJo7gvJ7EZjuFMqMfSgGIUPhdeSNMamU9JKiFZkF7AY7skNoXc5VGUiR3poF1PzQAEcbkE6d3I9/OsZXv0RUm4cp0E+vXxejnYLehSjwzOMowZctzyiZNoP/glhIokCFUi55MBXJpJ2FEpJBjZL/4v/94x9i7VVyyLk2EnrdFJC8nFdUVqCIP+nhyzp1pyb/lXg7ly3+nQB1LYVmjwnGBYDV/4v8SAI6Lw+O+PtMWlHGbL+k+Mfd/I4F4cXp4YiCpXLLKeAwfJaasu0Yv+H/iwDxW/CsAbshQGBiKCwcyROOrVDFFKMpEQ7i86dJ7RT2hDio6uZk9mnO+H0I5mRF5ICsEx4ytTB31ecevvwxtunocmwgu1JeHqo1tNWwzsXLjTiQoY5xTkDFMvOIAA2yuBfYgHYu9ciVXtEg5gVSGMOqJeXOxTHdzvG1DRuaD1C1PnN7Qigr04xzAI/YwLRChPbx6NXRqcmF/AiLFhaNnK4PLULAdDIeu5jzjqN1RSmExVSdEmTZVkEqyYLvQQJGLBLr1YOmwg7b6UICvavi0fj12EjIz/17X1X6TMaS7j4Xk7yd22752ggBk0xZ9gCG1mx8BrJ+guG8dWgoaqlKWt/kCWmmO0FyyMlvlxrOPKQg0a2vg2GzjzzXp2uSDJ/EkmWuxv2ebO12th7W5QWum+c3aHBgXZJv5coZMHmflbCweCq6/QY25pQKd2BpgRBdQyVPbJoO8Os5rMzRZCY/1D8jwF45LqSDploZHGvxhikWous/gN/iovwj1SerX9tHI4d0TUkqj/NgiZFDdquBZPwJQtotYS3Q8SQq7hwMgA3DiJEXN1uchAGNizFh3rvZRqDhs2FihdS2CLzzJLiUwKKihewYolegn0c3BevkF9xrChn3N1nNoF/WBwwC/PLq7UMjv+wXH2SW/d6dOEr45i/IfY+C91yTXV6bCtrSTwACIwvCKgpvyPV4tudwa+HQINSk+EXiaGjjNZBWCT7D7OdgWtFYyMpM1Fd7ZqA8l0TqpxWJ9n9aFWuWoVU+VdxKP37F6LPOS9unv5yzM7v8YbOg3kuLuIQwXBPrQsdco45GOM34EQT2WqsUPlhagD47r9/VkzSpqOVNXe8iMc6OmXeMteKi2aJd9Car6YUJhqFeIpfd7YmiDUJKyYFjKcfoeTe+1/j9EbWfVS5ZG7fzUi777txRRMxhTt2Mc8bD/qURFg8Ko4rsk8pTWe3ntnANhh6OyvMrPPBGlYzpTJN88CQVZk46BjrBewJ9ndA+mRJUuNb0jDsHNf5VBNzTNmOrZh/Ku33Xfh+DV+VGZlXq1kWJMJ/Xj7DAZ6C2Bez9sdEeErysfGb1s3rxDGaGUA4yhmeZtt4aUfHyEDvrRtKh43gXxNDtIm28G3uGwxn43AiNNve2HMXc0C82+N1rBsPkVqA3UV6K+Ghgvlwykw0L6r8TRRpxRZHGUFyfQcr3VfA9HMNOcvwxbyqFtiA9sZCl8OL3Wm732CsMjcqa6Ks/BOXg58YESIqMd7RLUHpAfUXBBaM23k7GRTazZnYSld73Ljf94G/Y5RosKtP7vRR4uk88wAoY8wXtp6uymxHIJAtGbFezN7Jh/sDmxNYCa9fWDyVCsgU2YHSdm5gDOLrVx6TRYJkgX5te8zPz3fUg+TYzjg1gp2myyC58m+j25pET8SBCeZpeN+0eax72jWqP+IlhpOHzdZM6hjNv9+IF9ZcPuI0QZG2uN4vBx8D0dFaowGIz8xyj22UUy2/mICtaoPhoDKJRFS3cx/vHo9fMuPDYTjaRI259Xv2lkdU5vHu6Psg7PUppnuqUbDJWNsXR7P6xJVbHx5CCGEmPvXvLaZRwPHZQwFmMLYE40wY4lq4mOBEPPghVYeCIj7EC3YmeGFU/HOR2qVEEwfrQ3kBK5iGCwkZwgQjyO9aVsvYZoZviZRoR5ICBJMMR9HklvBVsWOk5e1jCydYR2k+yhLicP7VBkrJ8lmQuJIVmkdjt5sJ8iNsV+9EWOoZLMbdFLRg95SzUxe7Y3bgQxjJlm78nhZ19h2WLCxFDqC1ArDFkXDWTNE6zjPq2+xsZj1MdeyghGTNYOdO4XLu8++CmJio/i+/elF+usq+u+eFxroR+b4hDoxv2SZpAyk5zDncYHcguaYwkJ0GRCpjdS0taVRu0Qky5PY1hM1SOPUqIhM8qAKO/dYuM9pixh1tLF3B9OoCleWrJKsZDBZ7xbJKSFIkKHaCyznatMXQXEiFCnJoWU1TmhkbatwYGC5yQsZAnxhd1qSJUB3IvEmHWrdRmiF5PWSgaNMWPaR5xYsLMU6/Q3bbNnF812VPDw0MPMMiRPtQ/hx3i4ZWUej7qqiqPptgZNrsoLpJRl3IwsgO/PMepJxtlohWMccaTstp4/4qeLQgKZxiaouMhApaMiy9Fe0Ol9RaqaYhQoIwaVxrrSsuipCgspO6czZkWMXA9CeuwtY9QYEruA5jrResl+DiGxmqlEItXluciiRafa2FWN+UC/jY56jx8XxLTji1ZS1ib2ADSW4dbT5/5JWNR6rekHUh9iBMOEcbJM4AHa4kC5gjrWTrG7dlMXdZ7pWElt2JdYjrisnms17XCNPT3XIJ96l21qpKtVtmn1Pz1dahppu26JTpYlDT7f23m+tPb3Ee2wA5dZmrDWr7vElD3ec8yhSUG+IFy64++qIKdR9gZatfuGk2Vw/XXrcURYqqigX6amVUJ3LcIsOANkpcSK2uXiqqv7Pk9FxSNp3eIdIInKhV8O2FlWO4WFp3rq6cn7yJAG99Ek732bECW0MGwvWRH4j5jtjE36wfbskdzU/KfQUsvvpRkB7ckrdlOA7/6KfLt88cPHm2fbgumHIYkiKR9t2hYEShQT3/tTlDvl0Dp69gQA6xzJ0pO8a77dRPa+DUGByLGW01GtxP6uouuA1kjwIbRAN2PwvZ8+IZa4Dn8MOlIJ/UpGfg2iqfcz3gFh2PsZEK/J43eL6cvC+kZWKQFxLAzwjt0G0bXY9t2kqRQc7gO1+wP5YnZtMnrHO2Hv9/NAj7lFJeXwAPZRk8CNf2jzrPwPDPi0sVTmQPTS1PPxM3a9FZNZX+yQAd9Z7EFvX1ct6X/XUz464ZsDB11aDOTQov0eUSoTKs5TJp1BpQ/jw+qOuca8PqaSoRhRL3Uw+NiL+mmdsTWzhw3wxV0ijHLQERhErdWpVQN1rU+B+UtlxF8JH6nUxvBH4/Zmr9iFdoowDYWlPWLj0+sfvWeRkUsB//XR0td7SXJEMmaEJoJblI1JUQ1580yU+D4zWdRjCsMjoRJHFjHjRPCMy1hwHPpgu85GUhlLWBnOhqHh1HDcScfCbY+aeIzchgl4mxu181U9M8SuDBbn/mHkKxqSCiAPC9SeMY2n2sGj/ptcw5RFTAn1PlUNZyfwAIG+d5Niy8ELqI1NfFh7PcMKEq9gEj3N7CvRKlGA6LNYQP4wX4UP74pbZ/kxCfFshBRFaIEU0ktgePI4vxM9xsQFdZa4pIbhAJ56ebAO5lMWDnJwMggwn6q2vEysBW5r5nrCzj3YOuP4pkqwg1vG3SzepHZR0SiwOYXBLCLZJmtvtfTOVZQy8KLX+ydJ6KOHB+w4wEbSHGhXPkyFWA//d86Z/zp6BKovd69I54zNt5tc7MIg+SmA2iAAg9qjyG29/jvIj23uECwYTG33A7X6rSn1rvOzmRz79Rebv8R1fn+AG+0xc3Oiv4EIRMv9Azw4HuTTMkXYYgeetdI0BU5qGPWvk++G6gMLQNSJAxGogYsob6i0cvxABx89UU65anYfUQ3fisrD7MfCUlTJQfbCKkq1CsL8rhaw8Vf09d4W85/heh8wKCF+VAB0pmmHL4LSHwUtQPsIHMnwCQL/zegjOq/P1bvCrDey1UK2bSNDX8/daWAbTmhcgfhiaRxSMtZUXoo2m63ouDESVXPjFdLU59ALxWiQ7fVx+BkV93Ul56Yr//KeKteUjs0xN6wy351lfpqPaVsgwOqmpVq+R8uv27ycjs4cDfswwBx4w0J8oM2hxkZDZUAytdsAK0//OXz+O4kr1GvDzCa84asXc7F8sqybLKgmzkxQ90ewerGQnMpdvJi4/5dHAaG8K/yxljOYow1gN2ZBsD6NwgzfxgeQzot5Gb2R0YDO4M1L0/m95rhUrUh3NqznoJFvI1J3bxujblf7UtLL6dF8as/vawao7n3NwhsWeuHrG8JDGq7sm+e/nGgSxF2GVtXqsPfzAxnW3QQ9WV5Qu32U2fMt9KPRXs2Oy1EqIIhpZlLjsDcR7PbCxmcPql8/SCj5SYN7VqHBWDKuxXlpz5nGyVXQsMibfp8lswPLchae2NXAk2EvA63kk4UeddS6qrUje2Sr0oEIGuoTK0uZaMhGhqcwBj051SiW2b2MT667yUxgd0oDOjdsf07i2i8nD95OEXg7lUAOApTWrJAzjxdBgLw29URzLmGS6BRd4QkxtlVsqWFqoBIY453Qth0uPhhuK39r0FISVMJZK70Xt7+8eR4YU2wCdYb8hsdGPEF8htqvrCSNPNwKMzBM7VveuVLhRyr3T7T4K9WSbwbkgBkYBvjGbA4o/GC7U3N4zWmwZZJcrFkk+gZacvFefsksb+3YSYo3/QOVF2ktUPih9v7rq69KVGMBWMCB0dDyoTupXoyMJJ0Reqw3UR/jGQeoAwxnpwI1JthcmhRkb8ByUFdRRqGl+E7KddtlLPMsyYfjvMDY2HPzkxv8B742XppdZdIfoZVEjRMwxYI7qpXQUCOfc7dNBk/1khwXMDoN+28J3lzLSY0VCLXwE5fqot8jfseFOVpoU7VTsrLY3d7kej/m04Q+hIE6I9JP29iX952RctPiee8Ee7KZWtSQgdShTBXYPi8DfSQwKsGMJd459RabvKKuqCs86iSTxbWgcGtQ9y8q+q4Ep68Gz3pqlVpG5hMDBREYxDsJIwWKyS3G46Oc0DDI2lQMtJO6BseEBFCEt9HaOwyPnEfTOu+k0YvHUj7d5QEzu71znN0jxxKx0ZqbzxYKKK5R2GgOmAd55cJrfrd07vSK8lyS1n0Dj+j/0s3uYhsOtlQ5Vypefwg2JRqiHRo64pALuXoGiR7ELsec3Sw8KXwLEyOJk6hImqInFq3rDGJBPbJBuK6AYr5ifAgJC6PMJj5abBiGgpfzxrQAEzrv2PnoFUPFdNgtjeYfLDk2rk94KuQjucgF3+p8ywAf5mat9OtesIuf/aTMo+GNSc5juGyRnhDMx8RJ7KfGpMhpH6aNpBMmr0U8Q9Tz2S7rqOqyl5m3raXlJGh5x4IsupSLDidszyN5MPElYjXpPS+061W2/JYZ/0NR0Tlwhx9u3llYiTn/BRaF+O9Lyf56oGFFqrW53x9qF6xvZYsasSW2JOw/tUTsj6R/Sb0ClqydiE3W1rYgtrJfFpFs6l5MYYbLNWRQRh7zH5MyhvuiDNpGiK0106GmgZt8NBKLHOeG9r1Wz+ZUwK1ayt3NlH3KOV5G6B2Qkv9lW6Jf5MGu7glvnrwjLHHpoF65xLn+dbmmp9ssK/DyArqcj2b81NegJVh2tUauKIjVNI4uRj9hWX3VKXBzKNjd4Ky3ZnRkZoAShPfW6WTIfJvE8TqwvD60/Dq3m/ISqaslGEaedwkHqJhb7Z5d1KQI2uobfU+OswedLEKfAfQW9V143YQzIC/WALONcW5FFPnR+zS8mAQQT+XkV0jRNI/4RC3mCxjQ6GFRBUltEOSE2yJMvUgaEfJW4BcJSHVEOMozgqph/4d82KBruJStnPaM5PDAPZ7cBxQS6gGz0/fCL3cFB38pOaJzaMKSxZ8pYhpWv9T1nVzP5HmEpam8zfuA4AEGovM0N69Lw2wxPN9kDtwYwQqPWtRdc22Eo5Ysm8wuXQp0bw8xQ+vQbu/wr7+HA+xh0+6nOJ6UfPABFILApwA8KxBUSaXhPDb/L5+Yim/6jbG2U4rKi7MKN4iWRVGQfx8oTjsYVY/fhIilHAnbUXrci2Ifl3SYhVcIkYWGboSCE0Y/NgwBYI7LTK4ihD2Pc8xduYPBmt9yT+jVo3R4fyKuUDvSsRj4r8g7eBs7LY3kvQ2pBfLUWAa9HSnUFQaQJ7lTeIHDgwwkxEbwrndFldqgIBxkI2exQJYPBOMKoSkjf7Yn68XWBboaMV/IhzvENnMDyUAs+aKh3/TqX8d+lBgEKiw4k1xoPEhsEQT3Jc0FnKLV0ZwPVKznnyhY+JCcNQLQtoO8p6kF5d0bx5pE5lOfPj3UlKQ8tA4OZtpkWgTrkG4RDdDr650gpg6zRA2/ZchR7E5bgGqSaJZyc4xcuQ57rK9i0+Duw+Y0NNElmWHolYjAL/vCTaYLeIgBLW8Ph3S2IQLY7Jzj1T+stqHsb5L5evx7ZkUloAuLmEOFUIZLcmQQ8Ge6saA+4MP7LJFdKzZ+4QHJ+0wYr65uhOUf1r0JgqzwE2nZjh47OR5DHSYMUA8s5hNoFMkiHHoPKSxJkplU8ZWasyz49xxN+YQtn87OQB0pvlKeCWE4WSZzlj7C8px2p+xejXRwfzQmUlyB6KUxIdmK6a//+c7xPIlZulGehCwFTN8J3pp1Da/MLvJ8V5jK1oA5xxsXw2FCgCZjT4ybJWdvaiZKtaReus62uTXMm9rDjSWWW7LwWsEsv133+ttGynVvT7JyWV6rTeZWAnKazz53U2qKAlMP2nahOlF6IirTCdMQJ4nhmj52UvfWnXkSFxUCwBUJ0f+kPoUh0OHWO/AysIpFYEhSdbXKQ89JSXsC+CGYPABRIOFOLzqbe44F7N39kAbunx4ajfZb5bCeq/kzH1EGUa023ggIPvWmSqy8SQULdjHGjcFIImHtBSmif8kIpYPMY5Br/pLXbWTSsLll5WvmG8BXi8D97Kk3FFL1ebThLJJM1qlihjK5pLH3KdPp1nj/2boVed00+rLobr5ZsyGJ///qazhCG0lk9LkotuhU9kLfwXRVLubw6HG0OXctsI8BPNiwLUSGQ3TrHY3czjGsKvp4YwSOw5/XMUJRPfVh8zJ6fsMM7f9yN5Zn2t5yxkxVdkFPPVZSDUjNVDmBOWyfz/oJfOL+jWMLbnut9HBfAc8mR4FZbeIV+9hJ4x//+Swd2dO5ZFdCdpJfa4q3IFwhlN0Au4qf6EsL1xhzIjmxZnnshMiXRK/jgJ5GhnCZZLd2WIEcIPTVxJNIeuzMgtwNlpPhHTB6pTCPeI3caGIxfl3K0GoiRZqOWQDE93PtmGyPqE3AVg0kjGsbXNkk/eyMo0NxuzmgjtLZpIlbKE0ByA2WczNpeA470q9WUIix9UmGq4Vk0855odKGlQA5FiRlvw/DKDcG5wweLD+glfbZK/Q5ex6+6nVhsjzEa88mg02fqUNsvwSuJEq4RQR0JxxFC9DnA24cnHaoEbpSL+vuChnjDZagyde56WOjBnC31M0+0gk0b6+EVXnnh2qKerXmgOIjyPsPlRjz2mltZ9WMnDQjZEjspoonr4MlR1Vq5JyK5+qeM8tGuPZh1BAIoPWTaefl0/3NIxzpxh01Ash1XNpovQVXMpMXaf60cWoxqJrK8vwfATzdawDD1unpYk3AX2XLkQR5sQEcljDBLg/syqLRprAYiVTfLro+0Vq4Qqj0/yoS8mDvy7XrJBKFpl3kQO3MwxEpo+VChYcvIYwIuSLcPOqvAc8PLmMc9wpH8H0IlWIf5xVa4mks3NyvnjnC6ZxESbhSCfPz9WqUUgWVHjWvu8QoP043m9AQ5HMq8DY/jyZd5X6MUq9/mJy/kHzqSdimckgE454hfzPJCKdomv54b9MOC3LZWfjkVRkLFm6n5SZGrSQ8wudekPrAvxjn2RDsi2Q4B9dGmI7aUst7A1j4sjf184Ymzlzfr6bTBEa29wy8ZVHm9hINyI9vEant+xWeq8OgKXU66sjfhlySmrYFnYhgvFeGHCZiccJjlOykRlhsyhQBOLLxKr8YijuLp4ZRVbJFru6xxjLuldDl2ZfpIENYfboLl+2ZaFpU2dsepho8InAErVjkhnESRsNUmGPgc9wHqFyb54KNvynnXIaepQrFClCh3sIEF7Bu92HGQDOD+fEIkTR8ARBfZk+LQ+MgD8P+LXT1Fd90xae6jR7R0I6XYgd9TUiN/BeOdrxB/Xn/G+sLgkKd1b8f8lyrCAmfAHrWA5A0gBUny3sDlOHzggosXGuHvV4cL3D060UaYhR7XngQbyBeiGFDuuP5Uv3hxyaPZtDQKRCqXn/xjB9WLYa0nfSoFDYdJ4TihaN62WVAMytMtM3gfWsR2EYnosev4dxn2Lf14P2b993Z7FTJZU2GwdsCbtZiKRFrOKHsqPi8+bg21o7NDxDNWHF/CR0yHbG1NMDwyYYnY1voWlsWGlYNN5eAHMygRgTsTHX8X4c30+W5PNcbPQA2nWnAZ99X4euIuEdZuU5EvUGiQTZyl0ZFJni5SFnxbdqtW0NNWr18yCcKHMqGIz88qFLqKHqfzG9HDfdsbbbSZ8XHZQhZ3d8LY3pWcUkdObNCtOZZZ9RAwwqyEAWDZ2ObD1vgpMorYeOyMChdK44K1b49hB93wi4UhmBEiegv4VR+nRhZszQ+BZ3qaNFPcyYjG5p6rmHIr5HGYsCBqWFRbqVDJEzy1WkBaPfCzE0gtMsO/MUeAhluEIQUhWkARFaSQM+ryNNZlS3YdfHYdlwEVSp+CIIKZFZkbrMvXYmQkZx/7sw2bVni9vRl0cQHxCazurFcgxW96Rjx5uMVw/s57xC0BBCiRBSQy/D4ri+Z6YF/IXhZQl8M9/uwuD2YWEJNOxJBrR48wJu/TB9IKJQ3a8qC0Op8Z4hURsjCVIqLJ+WR+9qfgcQ62NIZPzjYwggG1UG+BuCojs4bwo5ECeBxNLkWMXjIavO6cOXB0254y6WQBTcbaKbNut4gItDwhl7vIPPalsDM3d6KAt8gItRjsGo4ODBQeAmYJ1h4bTjuwd0cHmDx/loimgpzsQOsYmAlAGLf4s7pqPIrivfvo4xZkMVyqXV7c4t0mE0MOKbcvCUeKVzyTbbPOpMQ2yxc834siHjJjDDzHPBV+tWTRV7P4SL1yXft4Y4fH0Ye11DUFN40V82KDrDqYFX5UWuFzI3CNKsvU6+e13kUWG79NanZNOofVn4xKwcYnpyqzw5UeyRfrJKGwjyo5yaXPChZBKQN+sigxqZMcNoQtafz7MzosCjrR2Wcl/HgPRiA+hsqzxEfr8x8RPVhbzEZOCGECzr69otYEqxtl9cHUvr/53CQhuQLPTQx84nivSDPJ7uiZ14TNtCS9cBPaoBCxs2GiUP3sGYLzPGZcFAzdDE6RYHN9P6BlEuysAnZ3rkBwvGvv3mhArTxEuinN8xTQqkcdFfv3YlOzKK36UdJn07MwNvI0oEH3+abcrszzA/4PUFBByLCtBg3IoS/1s7N0N51sAQLeLC3z+1D+RF3g6WPX3ScPwBcR7H9Yi/TLJh79n4e8dM3g73+VATa2sof9YafTxcgxNC3DYfLUjjG/gD685QNrnH9QozE2hwfmyrN7/7zieeAh2Uee/OhFcfgK/XVRDIWwywAUF3PmAxXeQBkFqd/gY8d4iwth5/+tyC8QTP9fE/3uhV/adborIdMPZkFLtn3+/+SsN7S5NMLXb6JHun5+ZSjPlvR9YvfwPoyIYNmb19NC1TRt1DBZR6qrM2TtF0lldsBtzwNfVVgGaDewZec0zUVB7h3SdlH5tQRIIApNwmUteVmJny6jwZD0NkQ+O1+ePgRSRMz6V80TqWXCPhEkDNBxJc7wYbpM1iK5aPCXALSTCmfcXNyyvuG9Q0d42sdoOFvQm4oRr56rsRbOvLdWKyjLZZBWg9MMtCD4fjlIvYm8+4MHiqOKiVuV9eZGAPwKlSaX1OvbcSErYHqmYyIzZDuGAoLKBuf4//l9DK6OcQs1zYBN25om0PnJ4BFRNgMyZqMVPZRzQfALXLa7NUPlo1/UJBcW6llmjA5IXMBhyMajZpcrjpbnetbHi1RzZl6/4xOj4fjprjAVZgRzzbMXlvwnfqIBGWFU1CxDfWg24nSp5U9Jn26FGaPPSzO2hDEDcXQiV1geFzLEfnlTYIoxXuco1gX+7coMpJP4KxmFYy2naelMflAOs881BD1YF6FaL1AbD1lKDEmhLvEaTpwY9VkvR1b0Detf8/sFQGm8XUSvjbY5kBJQzXvtFzFW38Y7eId4fidgmWqw98szJ9YEq8vMbjmRqbWQkz2uUTH978T6fW93/iLwFOj5tIw+mRM8RFRZvNA1afS9BB0p4apWUIlH4KIAUB+Fjh+8s5DDyPb8NR0/T4zyR7BZg1FswmTa9rC2ruHr0AfEVeI+WSY3In16U0N1ZKNY5QWGgEOe2pNLFXarWB6GjNvflQb3+TxkZJ5KH3j5kAa2kj0ERedRvPp80hmprmtzZbLE/2UF5vPdpqgH4n1N/PcptgEwgvLC/rfoPhHC9q53h7mhpir1QsRrE4h+9+KOB3w09BmgmlwIps882y96Go/kiBVCxHM74i9anFOUUcBc+eyOzlrFgjE5rE9jZ+aN7/2/4NOxtQGS5qL3UyMVh5++lhflQwYiNMyz7/SnFsQj9A8A7k8aU0qzBzwrMzHRKe4ZS9dkt0WBNZX2jIhr1iG1D8Ot+x7aRy4wtVZkQapQsKsof50mHM8gxaZemqzNj9XRt0bBs8PXZuR2ZAPn5gZZL0VE1+QBpAmY0dTDYD+SvIDiwiJsauWJtrkxMPFiYQhcY7NgN0VQ9tQFh3TxXgssKfw3WiiIpY3JbGAiX4tiS5iWOAsTH+zLg7pfuBqw5d/nrOF0orfSFFhS5WzxDVyHz20+d5oCxoBs1tFlu/cmfgMAWzRWNjxNdm4XmmGeEHjJzF/Ztz+8GWg0m6LbBdYzSNDK1KzEIRULWynF9xhniscm1u+Wsmjvoo/6fnPgIwLNTKK6CMtUfAP8MIs7ZhVjRReijkJ3gbFS+S0yx/y3c2QbZsQkeUkyBCL5P/JNy8gE+bj6YDgCgU2GSVQt2DRp9kGRzq2vOwadRd8hJNhAXyKd5zoIqvbpTR1Fo5PZw0qcbePP2GsHbocJ0HZz45PVF+02OsMsbHvKAEm+dz2fRSoYdnkN/H1TnFmuXHHfSEv/4iCkIzrSkZoM1HmD4n09VmksJEEgrf+EJjOaXCT1ynSLCwr/5npzNdSOB7uT8eZQPWqw45GAPwHIwisjiX/PUIHe/j48ob34kLN4ITpu/bneyHi77piH74vLt5xPA0qz50odxsC+1oL8CbnO7f8LWz6o03nV5HdOZy9l3PSjyJQdg7KP73lvfzl4OOifteOdTmjA/nsH946jXaqVy2V+/4pzqOHR15Gteci55H3N049hvO68bLRxGJn2ti4l1dmRxYO9Sc6jkvRVrYVTB5Pz93TFP4502R/2mjGLqo3HcyxtWXfQ/X3iNDYZsMfVS7n68CHuWNXuBxPmLKtnAZ4IDcw5QqGYmDDUVQVYM5GV08sogP6Qj96PHcT3P6jhQZHMUnTWII6IV5ejhgVku9aKIlrcVODQQHZE8TeEYPqEi+w018gNnLSZj1FRkh8mbTCPc9i1X5WAbiEKkmUGb9KoWNFT48eY/a5tFsfKUonSzn6l0efndxML6cZS4B2Hvl2W4b+xhHGmGkAHu1w4Oy93HBSAHU5FzKo8TIjoPM+BMFyKkxaz4q/Ll3KxlaffqX34OsNOsk45kq58tMZTrHPZ00LEyQOlOOGtyek6NFo629Xw5NqwsfxnEWKHzeAt7co7czZOnXWQTcDcIYibs66vrp3saFpnDOozVe9sQ6ZPGwqGcV9a/TqeJOquokNc6C3SpFdzGXreqR4PYBYt9JI3PkEYJ/+x/rEHtVdpIAMF2YZHSGIfgtcq4Jy70haemlbmFrS86iAk6ovRTZg9RhuwpfkOOp+8TYS5yuptjonN/EU/+Ej7UQhXL6UQSw12thBReXY66vUDrOYo63ZQsqiIr0xDqTz+HEmQAPtMYTkVmTqQih7WO8ZnueyxUQ++e+gZoly6GvhS4ztUyg4yR2mOsyByDRVV59kGiq3V3A+0jbmw6usgQZlUyDASNq8a1pdbHSlG5YMLO6O5O0IdVDBHd0gHbPYevA3kYILFkD9UNcnLO61rqgcASxj0PNJ2WjLthiix8WfzovBBU6c24kA822rUTNQSpfT5UzAPHkbdySJjoi5MDsE3viHKgHXblC80FE/aPvg7p7ccRBYJ8b3kWJmBZrdAwOrPsYrryVhVRDkgnlQylOtWgTaQBiJY1DD8vYnujTEoI4qarufHAH96kcJTOe3hFh5A341ebQrVKzgBOQaxARtdWRPwUl8AByMlgVofTEg9uEFzDm55tZLGZ3pjQhIbEpjbmA55ifLJwp4zArbbCJ6krpk6qpit6zVRGmdz9jVJDqw4bHvxrro8j2tVEXGW9vlJqsgW6XBBEOCgs+OB/z4YlaY3TIc4Ww/N2EFn/nIr+IJtBOsIyaxMHUZQKB67zeEyZCHERr6eEfJ14vSnVmvPCYU9hAwJWywUfBQaujXEUzsyGa/wOkRBij3EeIzo9TUsfd8sT1Ip19Pn5YafFGEL0FStc+gF2+I5TRDXHHnVc24NmOAf5Iy07Xxkg+6pFi8iDnk/mnsfFVnnoECB6jZ0WwoiATzwo25Q+BpKdiPBqTteFq4lBOHmr91VxRyex4mq23H6BzhWF6rhXJu90ulP0bBMcByL4SvcV7vJOHBAJP32s3UZJ4+5852dcC6CEQNiNd9MogUm5enfyvFET142DDp8t87CFSsTlZe/1lAHqyDoFj3fU70nq33MdaS2t2oGQtn8QToSZ3/dYD4T6GZslFe/tV+OMjBdoaL1z4XzfAv9ANwOlgHPTgZ9GAVrIMc5OAMEGAZBDgfHB+TEWF5PGX0Pfzwy/1M3XWIt/tpNgo+IdrfMC4u7fr2u3olltXn7fqv70UE5G2WPcOXK0Uf63WntQBSFWSP0j8SA7VS/+MaL1Ju+8Cbw396jmJtdn/uZL3UfwdSt31iT2QpjB3ejxaqc8LZkccmr9JxOfP/9AJ6eg52do5Qk7a1uOTz+77Cn7H8/0s+W18L6k6OMegkoFRSLRFGm4OU4JivSyK0j6uuBeGqexIBXPgOFgwB7zDZPSWBbPC3tRA0i+XAyrwpdV6PseFQEaXBxVBfE4hXkERQXeogF0KzcKj0AW8rzxQwNwlzNCJMj17A4497+hMC4hh43OSfAsxsLZohrHtHi6RwiTO7yPAqLrWEgHC6lBm3bz2yUm40a1JvjboLO5I7vbfLkYPl606bv83/KFpN2N9/hLS6tX9g0K09XxrdaD8MT1XzIcEO1m7tpyM6+71SPe47AKQ3Dzlw25BoaV2OSLDRudApnrr7du3j6Zubdfy6QLNU+6Qleq5wOnratYCM42jMf2HGu33PCphXfCWcnulpVXbaOGXuxF4OpJKtlda3Rzvf7jOg5oTOFN8+SefUKJpAK1zqmAIB3fgc8QFDMPKjzpZpZmCpwqLBGbpEdeG2+Jkr+b497DKUJIyCWESH2X/5gRi/yInqpX5hliWWtxkHgCH+eTk0UB9QSbdni3F8x/Nmen34DT84w6h6itdkP83PvX0ssVC+S9ML2taDIL/sUviFK8jT9Ie8faDeHCcKUqrNf99M31ArKS2m8pmDHvbsx6sAeZTuqJ16hveBeO7DKzIsyiLHUQs16SK8Fl0pwScu+OLd4BW7WiNpXApfBvPxRG9LfV/vzNejdEsEkyaL0/etBV6F0jPY7kHO4xrZD3K56DwflIKo+PKUkP33oMK25m26IILkzRYDkAKg70To8GAEx1kuDr3FYmyKZkJ4u81Jetub3gcAd4Qjzpk0dzS0gsPuFSCkWgglrejgoYvN9L22FJtaRe+oCr9b07uUzFrwyPKT9b7RdMzK3ZBU8GZ0R8632IjzcCHt6Rm06sqoztQrlR2V8D2r1nCO+D2/WiYl0F9bLQBkEKbYA77kYlqnaCdVo1GnizzOc7iJILtu9lJugwHnrLqjYcpdpJN3AczSa7mS3HrgrZ/qGMZrDWzEp04bwZCOyMdmo48gep1e3jTJionqvXZu79mHZ1QBs6UWmmcq+iMs5/j4xF88u0ov6dbXC3k/PKjfG4aLR0rFY1ZKyPiVVtRLUcOh2zYD/kw4AjxTendvuTCCkY+/cwEaQqcp4hwdoRG9enNkI+3iKtPNVMLU4f7y5QMftePUthqnOQq/QBAbCVUqB//zlVeG4bp+6THy1+lJtEgl0gHrrXBeWFr20gt5Zv+XvuePEllK/pe/I+A54H2FZyecjxV1bcw1SL8bQ61m7lfoYIvvZp0FN9TD7huvub+GY65HzvSXtyN0v8gAfOH//9Afj56glNX/2ymc6Zazj/OTQkRn9q+u4NtG/L91ltoe19uy6OguhMwrorI40PstL+DYjzZtZx8FdRQm8pBnybImmDTfwCYF8ktr8blyZcNWhvMpwCp7Ym6c6VwzSPHOp0qSlDUAHsflbqeeVWDcg3GBbEDDaIoQsEZcoD6cQ77kFk7PWzt1xZe6qNLEYkOew5WNfyjfqf5Wq1BoBI/+dATNgIlKvegHtGUm0HuSEMVwYx7Qj6cJZEvRRY8eHszbNB8Wm8B8FbAysIutASwfG/qwUJ9/bbXL4Nkzaz5mdKrXr0ytf/jz3OAaqTeaFvULGHwe6abTNXSetEYvaRy7paXc1rFX+omuXYrDLWHu6a6VSYeshq8blWkPy/qi011vIfq0DuojaQmEZJDMT2hibi91S541WBv6NzDkOJosTm79fhCzZ7eXXPUVFn/5RdcrySG8J0AloQlgVju+aRTxukzVSYX6NV7I4ngn3xBmbT0huvwuLyKX7pxBvdCzfpbrJv1r5KGdhB4RTrbge2FAm4HImz6uYuKF9c9R8acy/MlU6ecZI8yiIty/W1QXWq9fz9hX4J241n0rVm+kj8YV1mC2KmQbktGqzeN2vVbtik+7nNYpG1b5b30NJP23+uOysL4gYYH63Kvq03ghbSf/6Z6oAEm9xBMS9K87NkLFhkUh962qulJ5+WR+UwP0Cw+HbnyY/3TsNTuDtrYIqFClSyAPjyV8jI91KeI45u068XH/45mRH73WRFGbiVSemzt2o44g6db+c5d9WWAGhs0jmVHOu3NiSvLMAImupYaR3I4f7bqjKRy06Kdr8DhutuI/etULoj6UYyg8wxlYOBhOL8b6EFckQy6LhmlOopx/fLyzffrBtZpKenVba90fYHq/yf3EjZ6YHdT+Zd4QFoCOBY8R6z/d7WWe7zMafhX8atZXQa90qVp1RHJ2v1XMEj3jUu6kGvw+w2LouJjkYeXgqAYYFlY7+MCMptpFe7tAZxig0dnG03ge/3TsfpbaW6yy69Jm+600Xv7zszTKJZJCVe2ZNQwjGjPRhyu1eP+PS+Pl3VT5+SYQ8mGPhz4xcknivrxZN0UxsfJETvSFLOGRJ9q0FDyW3xZ1YBWfKygyaCXFkE5U6j1UT/mnMO/Fxg3RVey6qOvuAsp/ojad/Qz+GViyfS1K+1avUGGjzAsuFrgXw78TIHdeZlfDIBeOPb0b7xF/HPsG9OeLME7q0/7iyYePtJ/wfYiifs+4v3tH95PeX+uKEueStVY58tLiWr+O0587bbZczAF/cfDIWMMnmFM0REGKOiZAz1jRGm3xdPWTOKmx9AP/BHm/YWIG/VZBjlURZjStgi71+VVx16Gibm2UZ7WAZgvgX38UDee65Av+FiV0gqbFseB07PYu86+TDsr1P+m+tyHWLfiAFhIK1wATvGx3HniBLPL9CMi+oPJpZeJ5lKfvHvUE3taZRlttChSmRCoVMjqu8UV3e70Acx12raJbfaV9jxQGRq6PqQJmpDfAWsCZHVGeDFq43wfcHa97hmMMl3/DFUDCpucUFJwLF9hZ/FJJD6DIJhxpR/bpPBSkK6pPQtJtunRmebu7z/cjkvwpUea5S1lhqDbvQiICwZgz6Tgc+W44N9cQ5X/j+cgU4ZxU/pkMt2FmR1ygDvq1CmhFJj9AJNAd/TemO6NOuKGfh99rnDA87aAnv/aaBYIDtG2E/2arVgu7C2u5Lm/CAlDwqYZH2u5sd+RfNvLO9V+rtu5HP0eIxPcf5EjuNEjUBUywyl/lhA8jSwz8BrfRjoAyCT/VjQZHwXYxav6vlY+37y++KODFw1hv7A9ewYGDH8C02c+VYpt7HRzvRk28SGXU/Ih46R1QKkz5vlEWctuEV5aaqNK4MEaebXbPfJLqmzOhxLwJ2fxRuYsskMQ12BJPgZ5BG2hU+M2xdyNnhcH5daoZm1tBAMw3O5csfYtU5hyHqmdob+LkLFqIzvZLwH1jq8/qDSryX60UnpphLHvvvmK5/9R4rA1ux2hOVksvXz9QFUO6PaT85z2g8LxiqTf6nvG+nKdl9T3WPte5DvmdGGTr32DrUxN/mTPvA91ICU1ML3yk+/0x4CGBUp5ad2Bd/0cTdPIPBiJ0K229de0IM8EVILSDiBqOA5XpC34Fr2RAov2b4PaB3aDffSyxu4AYYXQ4yl97TV/D8QvL4Raa1Tw3lu+lJvycU9/y4/cI8U99+CXM31VfFfXGGXkZor97tCjCh+fFkHYexTGhqjwGPo8ctJrtnwEyvQ1NPIfiEH3PqTPcJAqxyB9PaAeyuaav6Tcg3PNezX3hUvEr8QlFjw4yPrAPf1nv3oO5l363sVKQWc0vJeHDCt44UWjlBPZ1N1m8+KAkyaHyxdDyJGAQs/sV2rZ1P40mZcYrB11bnTLycKh+qdrqGnOstAnubRTTb2rPAR/O8jW1m3otm+9OT/YD3mr56a3i7bQhozC3FGUI4pva+f8MpzhqiflPlqzrL1INuCxUo+VATra6DunCfPtZWG+x26U6D40iPRRK/R2rT4CPTKrOO3PpjTUwWXfQdj8ThSvL338HlJDZ/OlQCdfd53TwPwBjPOJeBnorcSeJhjRPvWXy2nPOF5vG+lkKFBvIesMvIO1k4WOJmup7Voqay/JlzEhrKvjMzWaXor8WlzlzU0cd2tbkusXU1qDnRUr9bzTR7SnDjuXdk4BRQ9L8Hi1XnMIPh+mb5cPaHst2+PxXYkw80E1OGYUPry1vrwRhBdKdlUOKnw55fegfJx49/94Xtoal4k21+601HArls8xef49T38mGh//bO43T6XYg5FbbMMJ+XrpCiBjoVNAdhCiQ2M812RWdg7t1SXRMbSI54ETrEnn556uCumtbwsjJe+bg6N6KGJ8ZCTJstOKOLZlfD9XA6xifsz3+R/W0Oab4aVeqvcOPu0XMLmtjDrEq9h88KfP1y2//7v5Mp0tv5W2yK8+wPhTooxH+Fpo32AF/HnuuPb2rY1cBibtSWD2CLCDz3LSb9+nRAXNyHEd35N9Ki/drm7F/hX3H3uPXXKv6pPDUXyBE4m+o3gL8My2ZpwY4x8tgCS/LRaDP6U7kdztBLOY8RCDdux1J/U4lJ+XvUbLX8duXEMBXw33qkzs704FUL1/zqrZowcFvyuQGfsclXjpN8E5pYdG/dbhuU/doqm93N7/7C9Vh4saVS+9WGIHwQ8DqxIJQ6kNiANvD8b9WeVt22LL+kw8b+H6cRg/XrcTQA35S5M2jmgmnDc9Jza53a3Rcfp6uzcdUbNDeCpoGYdq1lOBe9UMnEUOx9X/sbLDEacxPEU0j2bacifv+K8JUN2qozoSIpl+mk4OXnOdwMhc9Zi1fBcQsl5qXA8LD+RDwMwg6s9Ay5vlIDZrTYIf0WIAQoj2UKFZxyDX8OkAUNEhn9A7gMFUpsh4vwTU3m1T7hFb3/ZRKEuxTDHLC273lP5OvtR6YxQS7TqgBa08A/JCfB4SDF7NRk4/kfPhfsMj9YD3FvqWwE/vkfC40Pv1L40ep6KP3ESs+qKzyo9cTn8HGHeSOx9+1yzUsCIk1MWwG+Hs4OkS/9v07Hcf1fSuK/MReDWdNwCAT02lMHru7nQ9CNHNikSH55vLwFzt+UiZ06+3q8hvztTvRpXnAFUaglR7sUT/BE6JE71x4bwT0wV8JQEuURDDDNU7/eeZw8ZNgn6Sjc67vSQGOhPLFt44BChPQD5ZT3MpjfO4VBGZUUVBlH83I+znUQ31zEupnrFtoB4Ia2Nd4/i/3lN34Q6NU0g6th8QqvISBugqQ2t3k/rPRuHid+K9+49+SknsDOLErz94rDngKTPVZpxUDFongDgqjEcewGTJksUYrW3Ma/Yt2YEbVkAXrH58bTfjq4vNGVcBC41CwLvJ30z9rHWD0RKfJ1d1GLVrcTjQ6oq9BHZoBTLrgFb1bBRK84nfLZeGred9bPy75MmDyNVhJOed+netOKc0RpOsfB3h7TtDJ8ZWv5Pnq6pflTP1yDG39qjwKFv1dmCL9D/3pQzks7nUZjMqp92sU61/F32petQjd0k4pkpiJa90b7suu7lxBXsjYyeQX8qaP+FEZvqBI+8qt1Bc0l0BUHV9QLMGxPr6gxalQG8X5PmEcfZ5dUFI76E39re6XSQgBqnDc3Aez3EmngVs3vzyC9Jenct1nhNJK3/r/wetZrtTnsfpMN2UXaVh/bV9a3kqz7sY4KGMGvKOc3R0cssKx/vZkomcul151KfSy/2XsTt6NbP3douTAS2Y0eftbe3+6qQLvXS8Mv+yNDtT3fjctFfxrfMxt3B3QoQp1djh3f7KJbp7MoVPu/RlWIvNOtecXB7qQh4d7/4MrjiX2KKElQe30vcq7MFp7hrqDlTisg7r9Sv1rGKnu2+h9Z5xK/mAP2TNzcD2g6s6iUaQu37P1KtrA0fbs9B2XaVZjqkNw54O4m3peL6WfodxfWtkyptBc9Cv0aXmD75C8+mMUmvfu9Uchdv+T6m92oRhetYpcYl8t46Fg/38RiDCGxtGdTZS02G318YT/CYewyJW8uxgIYM4aO86YcNso1ACw09zhghqrUIbjTvYVV3yFQh0q6OiQvOINn3FQfw6w/vwO47YonDcgf779zW3aJyC06Wk/BMDLZkbazpfvOya02vTBbhSs+Eeb0m8cqG8OFWPYaj5Jl0CMrLbAoj6A60vTAMKuTNiz5Tc2Uf6lq5TyilouEp8nzI/QEfvHvxUCYdsAwg02IDX0+Xpsd4EBIWo2YWNbidW0if6rgokz+nFYdW8IjG6OlYb23HKmGsEsJAuQjhp8VjCdE4cUPLDk61TBgmkY+GuL7V/o/Puz4RKkFZhLEHkKoqHJwzdAZX4EakDw6k4Yzm6ClivjI1aHKzn8qGaK00NNFYDKqdk5jgGEYyhiPewvrMGN07kRcEdA+AIvHnxVWVeneIkUmJPvILaLiNsESDnqPout1jqhPF81/Gi1VscFMSHxRstK0zpuNPbd7S/bq/gUtmoxrRd8WNBiMvt+fPv/EfCOoyNN001yiNY6lnUM+pJu95Ed1YoloM0IB/aSa/VsldTVfnY9FfWTFfdg7ap9J2L/nKhtJsa3ABUr4Y7aT2Vp15CtJsHabbtlSXGuCNR+2jsrR0D7SiegzQh1tYgV2cp4sHZE2FO+IFBUfoqA7bhkfQFwumIOrP30VK/tzxWGWtudbr/wcrTiDqz5zN7FHTSrImCrhvwZCeqVCLW2qy9pXbchoFzJVHu7y21r/nauqK39TFfRCeqV9rpm8WM735h9qnncLisHlxdRrtKBzoLvelJxxaoRsP0UexAY4Folj9plOITBDFG+4qHWds+6TZK5+gspap9NZi/DFghXg4CzuOC7GrjfzbSSEehMMeBtKivGlcq0zxJJeHtZ6UTnDIFhZ7ibrozX1a460+ELV51grXhqgWaik9UB4znLISA4S4FVTu2n6JR8B2tYC6ZXS17w6wYsBS50rwk3gfhaSpcMvqRfugq99JB3hhwHqx+zB9FSxB3StRlnL0rDVu1FJLhCsQs9LBRIMLQXE1yJXifDoY516khFhtetqxDPaPPOZW2UQzGrMpiHAq601twvTJCvSeyjunlqHe+GyiuwR43ELVf36pR6sShhK1JcAd2310iwa+jh0ME4fBWNHbErHu94cPbYComlq9+sa9T2tzgUxRQHoupiap7XHIpJ0S3pL284Mv+07ppINd3dc6KzPweSmoSxJ29o+ysZui1ScRdZYMauEbsL6dEaeV8HQYuebOVgVz7swrd2j4/VJqYgWrFXQX32gRNdK+XiU2+n+DQT+CnjOTSU3y7mklH7zwPnKn8elKv6eQidbXwfqkSrlPlVp/KjnJaSFWo1AD2X0jwCYnNOHY9byk6PVJqF2EG6sSYXzAHhh5OgNBDGT8QYUiQbGImfcjIpAuWEBJCiByqr2aEaA11V85ULUS2ChoKqUU+TZsTEwxOTodz9aIJjl53Fi7WnkBArpyfMUmO0Lo1iV61ltHr/AnoqulKnxYAG3qvCTUJFb4agfKZql7xxkuRebfBogCWbcPTcCjBhrBqkpESMmyKAgy6zmEwuyF30gxytG59WR2pxG7bE2YfBFIXUxBRIoLpykk9LXvIgVw/1omR80yJLhNcmDvfR3JKXZ+n7gv/941PWXUaK5BbpDAZA2AuKF2Y6r+abQgX87rticSde6EURimjjm1Qg82nrhZwT51JMxVsnkPiO2aU1I0iSMAh3lXHm5Rq3qLQ+l5Dwtb3Fy5kQbSkKaKjEkWFU8NrXGbEn2BhhfMrUegLgknucpIDHrg5kUsWblQl4Ukavk1GVcCGsdIkMRVm60dgT42SPBJyoqvYsVc5ZmYAXKHUJM5ONuznjzF2VTE3jp3IY7NGWNDwYaksuk6XSfqqPJQGVVcVzgUy+XybnyWt/ns5gA3E2cwk1T2VytQx7kzxGM8KiSm5kVeSiKhvheflLTYOWeUUND2isIWSZ4o9VG1qvqA3W+tWDU6ryqC6SyTPe04mAqgpKtK5MJ/zCv9Hfkm8UiD3oAecmwT1Ro5MEv7HEhnYbbI7Ohs2XB/qNZNdL302BZiJh9ftiiC1n2L0HXXIX/5gngEMqbzjxFEtWsGOKPeriNWJx5wBazvHrL7gBjWMfrKA8BA2qoyXQ27bXCv8ULCGKFDtnhdPYqtwougJMbIIdeG5S6AFeQ2zboZt/rUBqJEiSTpREVw7T5zciw8eAOdLfUQcUzkzY6TBluxJUbIqvj1z//DE29KCTXQDOdjQTB2UIbT3nF9p7ABUlaYBu5/gxqDXSQ1Tit0K0x2FsQX1W+HVJBroLJDeY/9l8AUvcluvGGddXcpEAmnMTH5PkgI94/qFE8xDLbfaKjWNOum7KD93rmdSfT/n/7gXOKdcbJafD2m2VcCJyCE23kzCsdm17959oenWxhEA1SAx6HbiJYIVJvZTLTC4OCzMIM4fB4t/F2luDmC8Us6W3m7cM8J1CnKTOZ52Ib6vZx3xihtufN4+dfyyI7cxE1GcgIAub4JJemeTWMmgCpLVAA+4HREBzNNP6cLP5aXk1YndlKuzimwgbjY7/QvHIof9jheVdN8SmAHkDBxM4bjJBKYHb28zunf5OItt9S53qn14UI2Bc2bQwcMPXsrlJd+b7UjnWORvL9GLa97QCb2EGhWE38y+bCY/tdM5WIqiIH6sYuf2A9jnORwCDx7brtP3iNuXUE1lDubt7xgfstBBtKFxsIr7JBHB5k6bTunI33WtbVtpkzVA+yKi11OhcpKMKILGKIWdbyTIAkVXmkYi2h1w58AR+vHNPTuIcQHkU+MvVwZhQpKoAthcZXI9cY9ym56W1FyIPw3lrONq7e3rH7Ix8Dygi09PUcIR/BVf8bCFejdIYoJtzLkxiX6Dq5ZKMnHczuYhwnNCKu9NvLD4nPBNWzSM2+jec6eC3Ie+x7URCGftyCbvP3vFCbBcVTASUXygy7HjeP6+BCFIkvPDTtoc5vzooqx/cXz/LXaHIOckyC40j3sOZtCUgNC6GYZr261HR3xklN98F0/cHONHuuvxm17oUloVZMne+7yR4nF1imIeuwfB1/+rokHhf326m07byImM2EDWgD9iK9xR3YgvZhO74Q9cw+zkeGgTae0E28betBwXKkyHvPXZSflzuJaIC8fzD5/2li+XOfljcvrq/fWrgtE3pBsFpKorAGwi6fi21JLDkCu5f5fbVb1/V9uah/9rrRfNxEBaVJ8v6uCB/LAZNHCjYSEAkx6y4Mnnll5vCWH5UrgIjjERBAbv7kxodWiDFJp+uPy8xRcDzxcQY7iKj8tWrRiIqWOeoVxacqzDrmwVrSp/rNtiW90Z41IZZaZIGW5Lan1IPUnJ9WnBXvlpgbmgiq0W8g0rQuV/zQeNxIctLJmPh5+0/oXmAzxGumbxmO/2jT3jnEYeFsLOnBv1dBocOX+MN60ay/buleYhUAjzIiTaniT9Ezwps0a2DZJ856adcxKdNOjJug+P0ifY7SqWx4tR2CBdpjB8ZSy2eI8z18Tcw+dJJn0hFRqt0C47UHxJdBGom9I9PDuzAtmYcaSxb/LxINXqwbTcIggGS1sDSC37BMeqyx1lHN5/MR5Kv14OHF+wlSphJDytkBQzgLAQ0maXYYX+9KbriO2YICdxDa1E9maMTeY0XsQuLMJjWa9rpailjJEn+je+tVVBxtV9a4pKZWdICsgIav3aNrCSGUBwKgwDCmNQIJT5/FmR6HxijggC+ZYbu5TEEUrEHyDR/osp8eMV5BoARsyBKg5SFXMmpNYOYPTZFKhpHwEncjVxINRQ+yMZBDR7VkzZSNZ9xQeIDDELzyhp2b6vIgBLYBSJxc0rm9wk/KErTtuREm3DVfjDf7ih0XHYcBhCNvZRSlQGHlewQMyEwEGpA1gzsz6P3ja2gc3BU8a8nvuoeFnA8CkmM1sbr6Il9fK03g5v5ec+K6rbrh1J2wkDO5dT3Ur2kg0ZMEOnSXbMfPi3dkrHpwe9m95P2wFrq3bMz3eGI/BUNhswn8fkCCje7kozeGCCNjZcIGpLZgU5y6Dng3Jo85mnVhsa90v1aHDL0UIQhH6AtNxIHy57xJRT2ZbIGsEy0b+ZTDJKeM78Xel81lPFP0K9Q5OJxd/NIwBIuVChOqO6dgx9bbVzvokbgcgh5oAJ4znVBzrsd3DxtKuARcIG8P7dWwzQSje04y4OMgjthSZIMpaxSQtuW4yZGnNq5GrIvArZJ3BP5yW/e2UvmNh8bKhYnba020d76xwkAX0jMs+79NBXTNvgYHUss1AegDVzhS7QuUnl5NZ2nZoNZYbldIZ0WpCB71Op07K5n/JVv7dchDunIyBsLD8vlIt04McDFx+yI2AJF23DuwJE7uzVIwv9pvDC8CeK9sDg9kppOg1JahtpkvbMaTpt80GwPs6NXPaHcvcxrziYk+00xnnf80fSuTPk5/60e8uPWqqUFeugpgk5fFfd6cjETBEf0IZsHcO46gU+Uc4fiUeIZu5QTGJKeoPrQRHqwExfBGg45oj4L1uPHKVxWeZ28vk3tRwJLY99/uTnL2OlblmWZ3V7aNUFRcd+60qVifSWKKk4uv/u6h9/2Sbiwb1WbZ++27JKv9RSu4C5CVXN/rUwGq6hYypoF/snALjnMP3JO1PNo3CodTaZFwlD7SXbWB8eqbqzLOIZhjoZMhRfTJYfiyhyPizhIfZROUNtIpEBoEs78bcG3SKM5L4sRT9Lat62A+K/r8Pz87nxHji03vSAculjoJgr2gxvILceeq0AE+oCh21S6TySc2L8yAFc1Q2ATBvHFBiervtu1RHBNGLDvljqWvPR68Ze6fU7oaiaE+0/c/xOX0z//nUqnKzqeEtRJEDTHoL37ZWmNuOkJTVyWCCNlA2/urg6LRfcTKTYPJwU/ova3zkn+3u590sWoWhtEr/WK0X0UnM2YJK96QhrAxYtBRiHUi9INx8jCF6G7pSVu7YW8Gm5Fr8UJ6IIVXGE6qC+M9CIIeubMLqNTkjYiKYqYstvgnCU0Rm5lFJ27z48JJxxQtWwiZmGxtC9z5rbdgICMt0sO3S6NS1/seuj4MMALBRaakn3l91898/vGdjiLPzzVmfTUD+CQrOY/8t+d8Ov2/tMQaPHeZamymgu2RXr8750A5DMeA7YlZwr95t0X13apDM++SG9MAXnHT5K25PwPUeZI6yKGSIuMhWfCvz1HQd/dWWdvT3TRJzXrxITUvIAU8fO0clyRrEf5De6693+PA3wvJBts9hNM3scunbmpeyNJByh2c+G3TVJVLckxUR4bibl+AEnsq6rhVrLFHFdy5SY2AfOWiL/C4Q7Y5a1NAfYDaau5OnEGHU3xraAdaRXVobYE+zWfqHd1SdoKY6ztGxc3QAphEnu6kflD97VuBFedgBSLwqsdPs8hwrY8WWm+arQLnqyaFypaZoFUI5WL+dUNBxciYLnKpQp6eaU/bvbpNEBHbQdsVCopuYX4oJLK4G2Gt4ZwvugmL36XqAC1U2yVhIQLt6Q+7n7V0NvSp3foMvNgAgtffEolWzMMtOTGlYusL6LiIGkDCGcl8Q0lMdpv6WtUGBa+j4yyx0XUxEfxWAAfJ/V3nvc5JCeihE54WX87l/BL8+FlOBdOrDZOiBQ4xfGIUlhzhgWmb6Mo+GOJkfXHDqeOtN2EiS49LbCS0W3FbIJKthILKV4Z1DYSZo2u1CznKlZ8O6tk6zkT/bC+JDcl7WG9BfKRPiSTGOcfuT5ApC+8+nGdzih2qUzgqW7g2VzhfMXpH5DlvnT4EEmbohMksY5HpBve85WwBTJZKOrnTJcTsECfDIIa8+mjgL4Om7aqfTKz232fzJL9ObiGTSuctcNs040vIvpDH7kYn5ZsL/FsTWzn2K1eGdVVp4kxvoba2gi6NlFbBpGYzMXnx4JIaI8Ktw6OCKQpADtc9mq3jS+g20MleaDXF8Zl9sUpdCeImNDf5XphS19z/q6db/wL2/N/jBjeRGD/2ajPaIjARaN1SqtUq76RE/u+LalZYHYUvHHYIwBRh4gQA7+tNy29tMUOyfq6JFP2LCAbJByHVhgJcATyMSJMScQw+3pEDuFbblX5Dw4mJeMNdFtV0WmjLUklkK4SLaozEpzYzhCEty/8NZx7F2kJqINSeLeCpaALXNvtJZ5sNgDaEqK5hXo0WIMDKJpJLg7Jkvzq+DeR6RFuCwjsQBfZxw/vjLpy5oEprRLWxkDREu6YC4MGKM068UVItj3w4v0qcBw/hSiCLgmOxF1c/+obQfCmF4Xh6KtETf1hysFg3vuj50K/4cQ7EZYDF0jq8ln0AANxnQ70yY6XTm5SyJwjS8fClE4vIkJo4oKor3T10AzzWYjZX/nB6uUj6Rdw/NRwWTI2n7tyd7E/15fVwNtc2Krh1h5VlnURZe6MYejsMWAsgg/GfRV/bwdre8NloMBjNS1v7aUCsn2VEFswJbTLub/rZkk5HHHntqKTUqpnCVJ6zD0FKjiHavHpBFUno0/odjf57bP5+e9KmXz/BN+he9LpW5mFsrv+30kiBFrORPktHen9kd3zATjBMmdwaD3iWCTqhhDVOVTccGCUhNuIWt7D6MGh+RnsrAkQFdeCUOQbboTRh+Y0zOwb1vMks/+/MIxyORQJTCzl33eOrz401zML4rPA6RcA2qDBuDw+lLfZZp4ZFWYkb902mi4OuWf8UdNwGbTlnHGCVDpxiIFCNe2jiQohDDlMF/tKLiScd+gzKpJsMS+gAJleu7GL2rp2mMTKmdb0cu4cCgzPcc0j3Wsa7OGzOuyOEGjBn4HjnQVkT/IwZhQh1S7CjNeGZTLo6X3ecYnkGy1Uw6v968Ge4xojdvy6uaYaOoR0bzRneHglw3AqRYt7hcM8qUn9pOjc/qmZX22Wa8Jw2kkpcLTprPDTacS+LIBQ2jbwyWp2t+FoVgDajNMFqEgstwqY/54IdQHjncX/tkAxn6FvzdiW6vmusDlqkFIB1y2PD8yTcmKQyocFQzB672BwDK6kRo7SHcyHA7avcnWltoPNyyxQtk37SOg9mt+rGcBmomM6YVt5ELgis4I7p072lt+3bGxJMfz9c0yvEdjQFOIZGQ6PZSuHhRGewZutR7jfjAFLdknByGPp6WfVXNALOnbTSgNZiUEBI97pHJ5QLOo7doD+KsbMHqZTbnjpTYmhy2MHg1L2RmkWSOF7N9wC+Yy39F/mPE8RwYEkJQZU+rGXNw7KVWM/veEeHOEfl7zHYy4gMvtJZdIASk7BsETOeiOSCkwcEAQglcFVDz+223FCUPRPAVDW7qBhvS1rVlTG/QLfbNd6jVhweQM2M0ppbsJ9Gb9GkbReGJcglpsSIOYL1pQJgZuNARrV6lggL3bVu7gFVwsu0qjOMGCi8xvG1T3XhYwlrjxJ43bpC4hWUPsRJxVIXgL1hj1f0Vdglzu1schD6s/TZ49z+SdvoEVOTwztD19M4KNf1Om9A37su2HePgs0olAmMRJLeCd1zaXM1IOzM5P3OLfVFJ/Md1sEE8TUaoc+hcM0YYN3zaTX4Ef+Yn1zvav4ToHNW9Ck7bI/5g3AoClrGLGQYbQocgUUvq3yZ0YqyaHcPeltDIk6LvUBl7fIYnN+MKzv+ItwYS82LWB1hykW7o9MYviiB5ayQNqmSMvl9/dez13WuRG17ok8IOwDhTs3ttmxjoCysR5E++Nj2g/0KY70Rk4IrURQePhTC7dvGypAXkSUfU8/QxjLG6bWO5rLE6qHYQWvFUWjeBfvOSEsczFE26xYhj6tRY6CVeu4lBlChvMWmaITCPAy9dohC3onb2QlkBN/5vdsApDEd2hwHFX1i0Ckf5hWO8aE08cYuw0l/Qv3cPZ1rDIyrIJ8ld3z05szKjVppduklrRHCGIBk5AK9Q4+lMZ9IAIUpJHnsOqKq6WtppBWsnZ+0erNwHIXbH6u925bPOKKgLy1Vn6FsixchFLllhxFnrg17F7x9krUWkJtgbFHSf85v6bNgWUYiEG0jorelAQe39TdCRUrTuc6aGGGVOEfXV4qX6peqjeaFjUqttWlXuhW6cUg8BIW6hOrp27Jf3AHG+MsSUhgX5N0q3TkRAwW/+HZ999ORoW2EzXv3Rav/Pbyt+5Mf/l+J6/9dX//5LvvlWHlJKpL0ODOCdysPoQ+1L3I4fv58YFct9Mu53/f2XIuy+UakleoM/axqpqZ9R/w81+Q9Y/WbPr29eQmlxCQY+XLe5+Av9pRRQBdJ8p/JdC41DYnQT6q6kctctbhTCD6GUHoztD48tEbzxUMd22Eo+2RtfuYnduHa1ym1n0bma3/gwmKdJsFv9hHaB33T1FAbVGn8+235yJ/NfTEq+xu94IbWILESfPf0VoHnFTdy3Kg/iNBRAxH1HsubM32iQaYZT4ZWzhIvNScpd2mGW8Iztub92wT5PANJxAlj37H4/PA275+vlDDblyUStlQwEgcPz8QRX9XN1kMH7cCXhMg8ei9EzalO83fKC9D7JJnQca2B3wSkuVBwhS+EaS7AV1WXW/2VFhe/w3UDC+8X9vM08EIUJJWZjzeBXKEbYjS8p3FVxAfLPjyC41Bgsw4++OLyk403k7Gu9Oe4IB1btUlqWG6Dd/O3B15e1rPKoeDob8InAPJxRDDMwBgDrH27XDJqRHNd4BadhdgHc8mM0nQ3iK9T0jV1X98co6i3lGEvL1ES9hheVhnDl0QUi9K3kOhgX/auwE6Q3lHJmaLHf352he1n8TO4VSBVlueFB8KBd3fXGzL6lol8vUZemJA412jiYB3dDF6pViPYUDKmOIBsLFBbMgan4b9d9ngWBDv/L6i/GWpE0u0iW3QI+C0qiQIhgt7fZMxAl0q5x1yxs7K4UxK81pHPMVWEIS69ipsFBc0hWCkcRd/a7X6ZbWaFtu1IAS9FazF4tZFyw6Fiu9QMFbh5UZellJXahj1n+bm/cOs7CUnVYmR7Vqd4rfdAUrkv5oEf1fewE4v37HBFPmqqJWIFIySMZ/ZxrXch1T/JgJMJm5OhR48Cw4X7fWNNV1/nM60pOgLQPPk0+fhUPvBOR0ZDMrpNOHAnNYMqpfQMYGY8ia2jFyzPwXOCQEoImSSxN4OUwERfSN9BtwjLEFSYSNgtsGFsF/bMCzvTK5R2Z01vu2YBn0yEEug+0j01aHnn/mBGkZDBkuuXVkX1aAdltVyIjhDSTIs1YuSG3B/ywshT3slupJ3BX0+OwNqtXXbjhCOYNam2GAnvTPc9zqCqO1SnnQYdxkFXS1yFaGHox42dpGcRB/IX9kerZ0RSVtNExI0js9quq0fxjnTzaHKz4mByoqfSHD1743TRRf4bZUIjrKQ2xVs+OtqMuFJDiWo8GJLhFMSE2MnvkOSBS5+NjDMzN58adSOKLXOn24d7xDY8kLMFZeaGYQT9lMbJgopI4KPtK8n5vGYnensXBihYFKuHBEiHVZcJ6d8kAAt9WABG5R7lXJlvoM1+tvcpj939GAILeDoQ8E7jsBCEngkESFpn3ObFpXXxGE3Fha0PW7vjom0RISAFYi9HlY4rAfF+XaYU8J7a9LsHslTwsY+XOtwVnJiNZ04M3DkL7GuHK+sKrkXzF9H0INbu7zR4Y0ATGVA4v9+NqDCcF5Yq8NI96gBjcim5t/jjW0pxenasxaRQteL13r2u7yvaonECqMpVKdPjht8lFXy9BZHKkrdiwIdcreoe+XgzXr2u/xIMgp9vuA1prOKAcFlObn44/PvbwX5YgBWOPbC95hn8H0FwNI6+xbG6fvbacF0zjAy19vTv/eYVseAYOVz++MlBfUyqLa+IYBjPjeCSODjyNwKTEIuLjVkR44dWUt8JcAt44uFpWSktq+kWqpT8/WnKB3j6GhOsVQ0l8HcmIB5CqzQo4FlYK7oY6kI+1jOOulZBRn2k626z1I5ul9Qq4tkNAhdISih93iDq4B3puYs5XvlQUbYjZdTTIwgDDIl6fHR42PHtAtS5D1xRElIqgwumaI07eQoO2S2H0QK6JZa+Pg1ogS+pgN37UAIS+T5+Fni3tPDQ4DulBnkzixn773jrvF3jrLgEgSJkwKbgcY9VU25iI0N29kyhI1SEP6pSHg+rtXLXI3NKPbBro8P0QISB0eSwzdCtp21/QjY054cwJ4TBp0Rmr9GF7mpaVjBo5rQwuxSxpZtoDPCXIQa3wenfJEiEESXFUXs794Tp+u9S5QGhYsygrypysOefuoka7ju/Jdd+UAduGtUvAKk6Y34fiuQe2XfRObgbz0TgBA/0eProof0X9DQO1oU0rb6iXX2w7WGtZkSoT8VWNOaJ62DJ0Cbgjds4ggR48bn54HJdJsiREYwvN3J+IQW6PAbVCFAeOPkqg+SAmLLZ2Zhor46ueA8LdAZ8MrE4y6hC2AqIwsa+hRNiNyaQARtEeEKIfRSDhzqWgrw3ESeh+EmMcVqkmO8tpE9yUHAOU1ZCKFI4+Bs5+y9sP9ALMVtHIP0jeCvRtXu7j8n9skg0Gp+dW09yyrD3MDb+d7hCoMJ3EJjdZTsi8piXwvkj+9VsRXmcm/hwj/0bUDbTSt4HgYOW8WH4tByDIVHftsFjZAi8eBiH7s8r2GGUH4vvjQu0wfyHpU5jldh8kqGUXX25ur0+6hn0RvONDo1qJb4fRJDApzUZRGRAFwu3ohSAAuusHlurqqmPlGu1bvalJP6mJZdOSgks/+jFbsoAjtUH+ABPpa+BE8IH4doDSoe30t6/L875AHThtkXDqgdNKLd7gzjQ9GPcHBOUqOl6u0BlX7swMhxcIs49mevLQqnCEUEUIaB6a6X/vuANP62uMzVAVQsJZEPchnRYop4bOtSxk+zZZINWfjIemqQWhefMVRBgM+dOO+2z/n2AaoggYaP5gH61EmH+GNMJm4+loadiFbxb/mjBwaiE7XHILBT7NcH5PyIEv96XgWFMzPy0EF4EzpK78kSR3wLhkYABmQu3t/N4FyrruxLPQO2usF8SW5gjomeRwlFFGxQeUmyTfOA9AKjgMJKiLZhZNBug9t9YmhYQQfj3XfVYghekXDfIF6s4zW0QbGtKYB4Nyc9/L/zvTb1uCUECJspPaT1Rqfe78Y9Yz5ySPGH94pecE7wokkYV1QFIceZhDtw3GnYtzlnBucsHHctGvPDja7r0WeW1r++oMoHqREzMlNhTl9V+uGLiwRp+wvNB+QlZRuBaIWbM46Un8D8EUFnZn/b9+X5+Z8AOjxB/dK87RN/0/RqYBriv0KTn5KfVtAmKnfpKZ7jz1IdzVglPM+Kemy0qyc5pwEalISm8H9GnM5iHOfIN142pveic/t66JPsHLM8v+S4izM1BnuqeDBXbmBa5nnSrwyA/03mfJsh9AarR2spOWtoHoXZ3glAhbuel+ZY4dsW5MGgoT4cC+ieQ8KEleIGBBfo7+BB6mqgMKBrnzradOpN2LZPXfVOSUCF6oO2Ld4RbKEY6l0OyHf6bAWoquKFG2I/2KKkdFY3YTvonDU/YdcD2PWAVYD8cyKQROTidYK/omO2H2jCH6Y1i6KsDhXfq8hhf6kWoKYKaVl+vYcXjEyyLyX+yrij9oLk3hQQ5kWnKo3rPr79CHwgxTb+QOT9QVhg29VVoD660b5h/WlLdv//K0DJq+zO+mxBgcMCq8/Kf3q723qlrbsFoUYyb/25lE0TGN4TDArDbZ1hF5hlf37Q9jUrwqLHAQ10Mc1D6RMTHalZQcw7w/bgqRXCS4Jk9yKNhVCvvCH9/Hu62HD4v9XZsxdaeV5qi6YzMjb1blj8OKz+wpDNSg5xJZ0PztSj5VD5YMGrWavb8RUEu7oIFbu4ag+nGA+hLGOl9ygRuoXa+XvopR4fylNs7/PQdlAJyvfIzYjvWlAmiu1tZ0sU28CiChywCOJDrMI9DQb9krY/Ogw7XmAyaVDODog8pnvR8pLcWf5X4mgD6PyEgIf98N83ZGy8RdM/+Yjtv6sK/8i7bUvJ1lUvQOc3TDSCzOKECvtkkSLGKHStpumIGaJ3wESO9xVQiAzSli8c05GbZyu24/baWL9INTEzItOP7IELsCZb0EbTZQR91Gn7UoOUFnquz0D2ysBDdH9KF3lHmI/HBEERwVyft1XvqjyD4ekZl9+lEKP3Me3taRVxiSQMfI8iwXF631vC9SzezTSycTAcIKrH5QTCaS3Kdep8wWyI05D3raB7WATUmx8pRt3pNTo4MH7IAewA+zdO3fIdTIq9McZIbTUXDFl1cDf4Z1jxAv83GKoOZ5MMYj6nlcR+HM8LR4wcWpvHysSvpdooeMglANf9ouF3HcWq7Y60KIPQgZYRmVZI3AiCNdlBI2zzQnue88adUCyd/7tGEpOGgDFO+qbI1g9FpiFmuo3Z1oVhc64Jg9f0bdAoIVOX7dqjSffs4za9oL9AxFp2dzWb8UD+/pEmAqjoPSLZncpElG5GPvUQBn/oXQaKsotE0Nn5YUSMFZr18hAE3N0Ww8HiQN5miSNdLB2fwuITBvBhBMx0q2slrxyS9wvnuoU73hwUMJbgta1MIXI2wiJmSLabClNzv16WZOYNzs3pLXTq76l5/do/o9wc6SO1lTlmGN10edDpddxVDKQkba9WMxsplFOqYop2epYOJP33n/4FudsRMLi7fRcC2xW2Z3Bmn8PjiZCpDbNLTfK4PYMC3Nj4paDjXN+36LJJcFyGJfZnJsu01rKx5CoKZvZYERJJ6Av92/tL6aPvNF7Rx1DMMvvvNJCsHpIflHayznVErNb3P/23sgVDf+46wafdg3AXAtI57KAyJQczqv0O4xatAq03sb4JAllvF0jsteOxAtJSnne2QEylUMbSPdACtwUgtpakWwdQYjs+i/fQ2hG3QTvtf9XzFq50hguKofyIpltlqFWW6uMCD/WERhKvlQONg9wwLB/Fv/dA7IViuI9RskOhiJj/KiicYKJ1Ww7QDQ9Cx1zj6v9/8CGBClPcaMcbv2cFLYutwtuVI+yvhH3/TrAbTbDJ4PHvYbitRATlIqd9tpKFtKlBcbY267/Fy6ZJVljdJH/Fbcn9N7oMq1eJ4Beghw5qG2e6A0/X+cnTq92lnDEu6eoycFQUJJIsxPjeWrBasZNWPoXLktlkcA3a9co1yNWG+OiuqaLpwizi7oXMu0jRaW4hSKWMOnb7tbUzzam1d2BA6R9EMoQoTAnrzElhXzx34Czmz9J94407XBqsFguGsMUydB08t0CuXPW2Cd9dzj/ZUQ5lAOUecbtT/DLyyy8aCAXknw4jpVaS3YM4j8RKn3REEhIFVH1vR8uZ0Orm0BdOHbpsh/baj0t/LZgtPypItpDe0ldIK3KFyeSXDpKMnGVlhg5/WN7oQK+iGKQ6QyCFMBysGbYlVWtPtwHfa2ndzUsD+z5ZuGm+j5ytZGSzjXbiEzr6ST9ZHu9rpTTVh2Ja7vERKXxLfAkurg2vyb3iwv0UHHA8GPZ2xAuZZ8HQuHSwjKNcbsTrXetyX0UwJkEIM+E5ntfUCMWlxwFuXeis8Z4Wf8+jcEa7P2JiwlOo5sgjp+3JOA2rGHimAD8kpkUJSQn9tUmyNN4/Fwzyv1N0f0qlyBRIKYATZD7hbAhwNyC5HnPWkSgzT/ZDJ1KauWN01xNPJczj/Z4zsewF85IVrEe/uMYm5QWFV8RpJC+r00dM4c4XEl5TObUR0qzeNl7rTeOPwOoN6oWQifDc91pF5qIP1qkxwl2LQnyvRn6nV96GhojUYGS5dt7P+plInXw7kHzklqJqfdl082nhFrbFpfJoZcnBWB+RTR6SWsRuMO8vmNMQsOmM+feqVK5Zal67etiqk9uTLTkWDHhtc3fMLQcQA9pjxo/mfF7kpYRmXLT/q/WwbsdkhD1sc4HC9yz09+RDAGMB/C6tah6pXNLQzx0mSls7SZ2c8EUO1yX50hVhlB/Z0y/d0AyYLD8zQndyOnKHz4tmpYACS7dDAwIRhi4WuhbtZwG6rcwDVFiXsyWRZDGsgQk/wGXW73rOg95ekHk+BonpPZjZPNO+4YGkP5wU93+3DDf4skVzzX6P4PDvxoIGqOqdSqfGexEauQ/BeO9+yv5ayqNrfsgttCyyzLK3UYi1g+DeEKgzWcrUcnnMF22DnuBP9JyHrYsORBWS9wMTAQsXVB+LfmPDhdtlL50Xx+ye6ZecorwSQHituNWRc5FTlusvqQu3uFcmCKqxmB8DgM5qNxgrpZ97kc1t2I8o6EUSwr29DHwQge9M6P8R/5aJhl01t2qJC2kGli1DwLjFTvqoeyELI9gaNeEoWc1g8FehjK3cXFrgYI5FY0b5kS/a0eK87k1ZNrNeUaOcw4yIY1o+t0lRuavnJwtat9+ZUHskI5xu1l7Uct3OJ2dy8B8bYFnDnlXfMMmw8n5QAdUz1fFG2e1R+tFsZOe5wWq5FYrZ7OrAtfWJQJcJpNzSRs7Dvg/lE1vX2OEiMfFw7t0Y1IQuHxes6YJMyKJczF7bXzN0I5koImue5OnKtjCL9BoTVsZiP0bJ4RFMYYzAAmturXsP0k7tJ+MsGxDf0xDcmBLOZZbIhAiCwxYxSppXPDRavJk9AwAerfuBx7TTZ0MoXbFuv6HGZ3USiGlQwVYrQyOWtUh1675ILLwsPh+0w86yArzewZAh0XzTslXNr+t+ggaF2Glb6J6LQ6+/bwIRNNrKtbQofhVMl/yE3FLxOUGbxjmfxcskmVcaN5gTz/K48HL+zSqUWpdPAbh+fYl8RGOGu/ebGXm5fByu8aFiDJvzPDpa3xY03lMkmABFd5kK5TEpnAmt1ohnfDwUeGDNYEt6vN6murXouKzYwhd/PjEydWsDdr75Gfo5GRsiZEANcgB1yJ3yQgSfM7ZLPqpqN0wwEJV/hFkItgzIfSEnES7AMlv5Q42K0nYh+nyxELcZj38IJS5dnabZeWCuPo27kp4G1lUdfUFQkJNn+hnL+4rHKoYr5ikGKq6VYr6irhinGKTIK0Yoxij8UxkcrOJaKwYpZioOToHcq/EI9eASyArXEmyTI2KBdWbIAWM43HLnvgXAdXd4PyexzR9kS6IL8W1Lh1hu1C8JQgR7L2k7Fg5jExUg713zCc+ncZLIzLRCadZ8AhMCNXrrP3psKzXgJBHNOhARxrEENsN0kd/xlKTAQLN9zlNd7M6MEJJrkb0kxYcabm7jeF6a2b3ZwstgvrevQ3/0La1jCdzLFqgfNOJZluvBfM95lAAx2wJKxOHD/rf3CvzQ298kcRvx46BNNmIlVbaW2q+BwrkZcz0J8NrnFgqfBiB/CgCdzwQg19AqTOwo5pv8mnqpMBLrCXUPJ6pF6epOQyZOJoBLpI16oujytGTeYhoRWMbFq/cM0X0qBQ8HfLJpcf+Ise0YJnnuc6eGnSo3bUbnYrAdII+OEe1c3T11xpGRBLzzp1cgCmC26GJbIHdjN0f+Yifj52k8asWzohmRJwFqJx21yauiIZTEJZ49ugeQuaffGfTy74i3w/dVY7Yn4+Z0yecPH5sZy4gPEK4KZ2jmoMuwX6d0dbJ6sKRAx4odzcM+dRc+WhqGvVg+wjvgn3ZrsJ76vNa5gpfAb3OqbD/KsQHV96PpsfmvACNnwJD7yCXuMxis+Pa5nn74Lj6XUo++0IrTuUwvuo7jKYu7tpkLfm5Wo0rO3EXlLY23pbRvKe4x25PeAc4Yfhtl6bfl+UZaxQnxy3qyuPPxIr5NUCTbLTqCG7BYeX1D4wPYnyyMefDZ9hZwKD83x0iQl2gVc+TtPxDLn/eHdMdrlS4r92RhT7m136WNc9obLS/1xQNs2EKDx/14T4EDeoWUo/dLALPkBtvjUfhox4NrLOY5+DGkkoq2EE+fK2DDnNtLQ1uUMzKJdWoIkalHZW9OIp1y5eMAM/IAEZt5lRPxi84Sod72LZikJQp5J2TRHvjm/i0c0GZoibLJc83rRdCn8AA=\",\"base64\")).toString()),GL)});var Rle=w(qL=>{function nh(t,e){if(typeof t==\"string\")return t;if(t){let r,i;if(Array.isArray(t)){for(r=0;r<t.length;r++)if(i=nh(t[r],e))return i}else for(r in t)if(e.has(r))return nh(t[r],e)}}function bu(t,e,r){throw new Error(r?`No known conditions for \"${e}\" entry in \"${t}\" package`:`Missing \"${e}\" export in \"${t}\" package`)}function Dle(t,e){return e===t?\".\":e[0]===\".\"?e:e.replace(new RegExp(\"^\"+t+\"/\"),\"./\")}function s_e(t,e=\".\",r={}){let{name:i,exports:n}=t;if(n){let{browser:s,require:o,unsafe:a,conditions:l=[]}=r,c=Dle(i,e);if(c[0]!==\".\"&&(c=\"./\"+c),typeof n==\"string\")return c===\".\"?n:bu(i,c);let u=new Set([\"default\",...l]);a||u.add(o?\"require\":\"import\"),a||u.add(s?\"browser\":\"node\");let g,f,h=!1;for(g in n){h=g[0]!==\".\";break}if(h)return c===\".\"?nh(n,u)||bu(i,c,1):bu(i,c);if(f=n[c])return nh(f,u)||bu(i,c,1);for(g in n){if(f=g[g.length-1],f===\"/\"&&c.startsWith(g))return(f=nh(n[g],u))?f+c.substring(g.length):bu(i,c,1);if(f===\"*\"&&c.startsWith(g.slice(0,-1))&&c.substring(g.length-1).length>0)return(f=nh(n[g],u))?f.replace(\"*\",c.substring(g.length-1)):bu(i,c,1)}return bu(i,c)}}function o_e(t,e={}){let r=0,i,n=e.browser,s=e.fields||[\"module\",\"main\"];for(n&&!s.includes(\"browser\")&&s.unshift(\"browser\");r<s.length;r++)if(i=t[s[r]]){if(typeof i!=\"string\")if(typeof i==\"object\"&&s[r]==\"browser\"){if(typeof n==\"string\"&&(i=i[n=Dle(t.name,n)],i==null))return n}else continue;return typeof i==\"string\"?\"./\"+i.replace(/^\\.?\\//,\"\"):i}}qL.legacy=o_e;qL.resolve=s_e});var Ule=w((XQt,Mle)=>{var zL;Mle.exports=()=>(typeof zL==\"undefined\"&&(zL=require(\"zlib\").brotliDecompressSync(Buffer.from(\"G10hAKwOjG0Yab+syByiPMj3Q6L/91P/Pz9ftxtaCkm69dCxllpxlwzlCWleq7QCiMzOBOnxaaciKKlEbkt1vTwowg4cBnvOTQ6v///TzIWNIQO4m1IpiO5rsaSx3DTeVsr7/0vTt7VxWoc5ATBBQZ3xpWGBzAAG0Cxj6rO6dzCPECCAPbqxE3V+Ay+/JsUie1t8rnY3FEx3PjtdsQjWtbh0aVKPltwOcp3P60quHwhX3vGre2dp5M9BWjbXTqzkGSb7JAOCIFDkCHdzdg2/so+h6QAEDXi/5bNCnzcbANf9gR8nchF08zZC2tiz4IaIsit+PG/sa6DsrJy+fAbaFgJ+jWssxm4nUWoDpZqWuaTl/9sMrDCw96fOoarS8j9wUYb8YjJNgUMY+JDKCJ9FQO+uukYMId0wwrGTFFJA3EUpzXCFpgHFI/PpCi0etu/WLRL4oSv1pnAGi6KVk016fl10lGn4hAQwg26BalH9YoQJ0OmyatUSJBz7wnzei5EjTW45x7IhK0L2AAdT0ky0X/5laDXRfxID6pCpsKstprVYxIO47BZUgOUzJ9ysBxzcO0f0oM4URmRK/OPDKwClDztMRXM7T4COaj2DoqqJACT1mukaVYHpkK0NqIEJTQUuWMEfZar5scbsU6VCSQBb05UFgnXpY3baEvvdlFqCHI7mPLxJ1WX4b3bydzj2hbxo9e9g9TNw6DKeZyhf6cVRiPbKPvMZ8qnP8B5a9EZzp56asmqKuWNSJuxeSivzrKYcT4s9SOH8qhldXcFXneERU1mu9YVZ5mr+7igGFDXaj90vnJTcr1Hri6MgZ4j/k4yX2PUqqjg3XCNDCTYJ+o53+tNU15DNgvC/PG6+IQaG87AHtJ7+NAIcQljQw/3ACsmiSrPfXa1+3GesILwL4epZQv0HjBkxC1hu8nM2cRqKxxWZxEOMT4aQeDHbT78cEmy+IRB8PmzFEnJPv7ThBPsv2IPRir0cSivDliSsvXhF1lbeI79qWZbOSEc7cwefMoNCR1GYbttWyvBqLe2kWVEj0SvwgMD1/UvSIR0UWuKc0SS/B+V3LnbBqxBtgIgMR+h3pCn4IgBuoVzaJR27QvIhoQS2M5YGDvCNT1FRHZmqHkpa4aDdGUm5eN6jM2VTwxoRa+pUjTGhpukbiTG6AwAZYEkN9aguoSx4sCgHb1XmHjHfcMit9srQlay1sKTIaAGFi5+FjpUhrkLHZQ+sAG0vBeFJ5WqLBS0C7FwXU6/ICtAacCIr60pLubC1MDfqRiGdVrsJF6okGq3x1hzUWqIfkE/0P1g6fSHOLgLi5tNiekBgh6XUhzjj9NMvE0TvMRg7EBaEBh0dcZqMAqSEY240hOeFE5NvC4wzKPXYuNJz1/19xCQXiZsAWBVxXPKoZ6fO5yB0CENDNU4QULqzC9RUYqqIpwTiIAnmvDC+4MRdnk+piN4AYFZdTQFT3KYh4tOZbXCr8fdrU5PttppEAeVPCBvKmnbRQn7bi5j48v+DH1p0lUp+tKquDCMCYewaw67sxomdpb+iNekQAcn9x7NoURWhq+nHCjoeaOFkhFJkvgFh2obz3gk3Qh870MuhaaTHJ1MzKYkObGNHbNYOoW2ooJhFLmb7ULrzTYxWswKzgf31/h7IPbm32Qqe+hRzWGorLZklw2UIanlp1YUrj6MwAs4DN/AHN2qogzfhWNgDiEkcvUtvQFTcRWlDg4q3LfdHdfFT8yJnqNDhxFj2GVrsmtYEwEMFQFQzzOZymRLff/x+4mFbbFl+5Ly4a76Hytg6JYdtRrQ+dwYZlNHfm2wEvPAwIlYSFwjOpEMH+fYuOOh6+49K7Eli1/Q2jwvQXsjKnXi8aRwvc6wHCY8DuCFs8UzVmwzACi1+6quw6RThnsB1kI57SVOvCtri3qisoVeToL2Y9Baqs9DrJnVqwlQ6WbWIKak+9E620Nz/71qUKg3M2wnnbbfaXRIJXbQ3cWA4b4iB+ydcmIZM856R9IRQiVSUYjZbz2cjpD6Olb2Z5TzP91nQNo5/Q+vTpktm0Z7js3YeY/TbzLJKMwfvnbfvfbFgpduBXj7ED99dRnS57bzoSuelPfSYIuMnhL/QZ8w1KcnF2af2vctO9H5JLrNaV+UjwlDtxrjE+geaXJbpfasDJS3wHmZLKAc3Fdq2QZaQI/Rlo5+9E9saUAo/HTgak7oZx5BHE0XzMI0B7cawJ4vf8QDsxabkLDNamLr3dIPslpjWhh4GYG6W+QZsuloc3IWUXc55gQXOzamOhkNA3HAfBXuMbO00DE4vuZGaoVpFSrIf5QynRExGY2cKaTa0B2wvYAVoM0NmsEaMyyS3y+VaTH4HzbR/xYSRfhZXFphd0pAYX1uFkNbznojgKfJsP8r/UR55Pk5Av1jpqwbApXPNaIFlF60jSNui1XwNPGW0iXro2Ut51e/bZZY2/SOMHras0doh5hF6k0rf+Noim8SUvNQNW52hB+YICpq9RD1c3/Q1+AF3SicEsOVw6WgV6fC61s86YzbVTZ+MzCwDuc5CVTcdIrrbF1Dr/3LhJa6R3M6XJh99Hb7oBZK5epJfcZ9qZNWP2Saae5bmGymX7FKXNsJJXATykPE0gd0402Yw4WBlidYmSz7Dk+0uc56VJWCUC7brO9Iz/RrxTpkoAIYctGl1llfEssHho7n+wj25Fh2FWgZsz7VxPxS1oGEK+O9+Xiw8PQau31vfWt9e21zfdkEHo1uLlDbZhrRzdr2XpwQmskOGMw2BOv8CgJpvdihPYZEb+WA8uGeIu4BXWmSrQASBYgLMLujbUOaQL9encmSl7p8qz1RBH8iFp6MqMpvrB+K18syfqPwQTbMfEoxh+OgpLwfho6epE5k+vEcxrmazPR4NB9ujzx/M71olQWlkvqOXIoh2tF9nhLWv0CDo8GHhcoApUXN3VK7TeaVLLKabeuAtTa9yEf6cbKz0M4IKTMeRQmiz0hX++RAp+DMEet3ea91xlD+g1NVIppHJ0nPVUAPfRnLdqd4mtG3Idl7L4uKiNIDIgub6tGxM2TmAutpayo8HzjIoXgw+JMEbeL5Bu7d7at40w5bGj7lSO12dwvkBaMQZIGIUBGmOxBMlAQ==\",\"base64\")).toString()),zL)});var Wle=w((tT,rT)=>{(function(t){tT&&typeof tT==\"object\"&&typeof rT!=\"undefined\"?rT.exports=t():typeof define==\"function\"&&define.amd?define([],t):typeof window!=\"undefined\"?window.isWindows=t():typeof global!=\"undefined\"?global.isWindows=t():typeof self!=\"undefined\"?self.isWindows=t():this.isWindows=t()})(function(){\"use strict\";return function(){return process&&(process.platform===\"win32\"||/^(msys|cygwin)$/.test(process.env.OSTYPE))}})});var Xle=w((iSt,zle)=>{\"use strict\";iT.ifExists=E_e;var oh=require(\"util\"),Js=require(\"path\"),_le=Wle(),I_e=/^#!\\s*(?:\\/usr\\/bin\\/env)?\\s*([^ \\t]+)(.*)$/,y_e={createPwshFile:!0,createCmdFile:_le(),fs:require(\"fs\")},w_e=new Map([[\".js\",\"node\"],[\".cjs\",\"node\"],[\".mjs\",\"node\"],[\".cmd\",\"cmd\"],[\".bat\",\"cmd\"],[\".ps1\",\"pwsh\"],[\".sh\",\"sh\"]]);function Vle(t){let e=N(N({},y_e),t),r=e.fs;return e.fs_={chmod:r.chmod?oh.promisify(r.chmod):async()=>{},mkdir:oh.promisify(r.mkdir),readFile:oh.promisify(r.readFile),stat:oh.promisify(r.stat),unlink:oh.promisify(r.unlink),writeFile:oh.promisify(r.writeFile)},e}async function iT(t,e,r){let i=Vle(r);await i.fs_.stat(t),await B_e(t,e,i)}function E_e(t,e,r){return iT(t,e,r).catch(()=>{})}function b_e(t,e){return e.fs_.unlink(t).catch(()=>{})}async function B_e(t,e,r){let i=await S_e(t,r);return await Q_e(e,r),v_e(t,e,i,r)}function Q_e(t,e){return e.fs_.mkdir(Js.dirname(t),{recursive:!0})}function v_e(t,e,r,i){let n=Vle(i),s=[{generator:P_e,extension:\"\"}];return n.createCmdFile&&s.push({generator:x_e,extension:\".cmd\"}),n.createPwshFile&&s.push({generator:D_e,extension:\".ps1\"}),Promise.all(s.map(o=>k_e(t,e+o.extension,r,o.generator,n)))}function R_e(t,e){return b_e(t,e)}function N_e(t,e){return F_e(t,e)}async function S_e(t,e){let n=(await e.fs_.readFile(t,\"utf8\")).trim().split(/\\r*\\n/)[0].match(I_e);if(!n){let s=Js.extname(t).toLowerCase();return{program:w_e.get(s)||null,additionalArgs:\"\"}}return{program:n[1],additionalArgs:n[2]}}async function k_e(t,e,r,i,n){let s=n.preserveSymlinks?\"--preserve-symlinks\":\"\",o=[r.additionalArgs,s].filter(a=>a).join(\" \");return n=Object.assign({},n,{prog:r.program,args:o}),await R_e(e,n),await n.fs_.writeFile(e,i(t,e,n),\"utf8\"),N_e(e,n)}function x_e(t,e,r){let n=Js.relative(Js.dirname(e),t).split(\"/\").join(\"\\\\\"),s=Js.isAbsolute(n)?`\"${n}\"`:`\"%~dp0\\\\${n}\"`,o,a=r.prog,l=r.args||\"\",c=nT(r.nodePath).win32;a?(o=`\"%~dp0\\\\${a}.exe\"`,n=s):(a=s,l=\"\",n=\"\");let u=r.progArgs?`${r.progArgs.join(\" \")} `:\"\",g=c?`@SET NODE_PATH=${c}\\r\n`:\"\";return o?g+=`@IF EXIST ${o} (\\r\n  ${o} ${l} ${n} ${u}%*\\r\n) ELSE (\\r\n  @SETLOCAL\\r\n  @SET PATHEXT=%PATHEXT:;.JS;=;%\\r\n  ${a} ${l} ${n} ${u}%*\\r\n)\\r\n`:g+=`@${a} ${l} ${n} ${u}%*\\r\n`,g}function P_e(t,e,r){let i=Js.relative(Js.dirname(e),t),n=r.prog&&r.prog.split(\"\\\\\").join(\"/\"),s;i=i.split(\"\\\\\").join(\"/\");let o=Js.isAbsolute(i)?`\"${i}\"`:`\"$basedir/${i}\"`,a=r.args||\"\",l=nT(r.nodePath).posix;n?(s=`\"$basedir/${r.prog}\"`,i=o):(n=o,a=\"\",i=\"\");let c=r.progArgs?`${r.progArgs.join(\" \")} `:\"\",u=`#!/bin/sh\nbasedir=$(dirname \"$(echo \"$0\" | sed -e 's,\\\\\\\\,/,g')\")\n\ncase \\`uname\\` in\n    *CYGWIN*) basedir=\\`cygpath -w \"$basedir\"\\`;;\nesac\n\n`,g=r.nodePath?`export NODE_PATH=\"${l}\"\n`:\"\";return s?u+=`${g}if [ -x ${s} ]; then\n  exec ${s} ${a} ${i} ${c}\"$@\"\nelse\n  exec ${n} ${a} ${i} ${c}\"$@\"\nfi\n`:u+=`${g}${n} ${a} ${i} ${c}\"$@\"\nexit $?\n`,u}function D_e(t,e,r){let i=Js.relative(Js.dirname(e),t),n=r.prog&&r.prog.split(\"\\\\\").join(\"/\"),s=n&&`\"${n}$exe\"`,o;i=i.split(\"\\\\\").join(\"/\");let a=Js.isAbsolute(i)?`\"${i}\"`:`\"$basedir/${i}\"`,l=r.args||\"\",c=nT(r.nodePath),u=c.win32,g=c.posix;s?(o=`\"$basedir/${r.prog}$exe\"`,i=a):(s=a,l=\"\",i=\"\");let f=r.progArgs?`${r.progArgs.join(\" \")} `:\"\",h=`#!/usr/bin/env pwsh\n$basedir=Split-Path $MyInvocation.MyCommand.Definition -Parent\n\n$exe=\"\"\n${r.nodePath?`$env_node_path=$env:NODE_PATH\n$env:NODE_PATH=\"${u}\"\n`:\"\"}if ($PSVersionTable.PSVersion -lt \"6.0\" -or $IsWindows) {\n  # Fix case when both the Windows and Linux builds of Node\n  # are installed in the same directory\n  $exe=\".exe\"\n}`;return r.nodePath&&(h+=` else {\n  $env:NODE_PATH=\"${g}\"\n}`),o?h+=`\n$ret=0\nif (Test-Path ${o}) {\n  # Support pipeline input\n  if ($MyInvocation.ExpectingInput) {\n    $input | & ${o} ${l} ${i} ${f}$args\n  } else {\n    & ${o} ${l} ${i} ${f}$args\n  }\n  $ret=$LASTEXITCODE\n} else {\n  # Support pipeline input\n  if ($MyInvocation.ExpectingInput) {\n    $input | & ${s} ${l} ${i} ${f}$args\n  } else {\n    & ${s} ${l} ${i} ${f}$args\n  }\n  $ret=$LASTEXITCODE\n}\n${r.nodePath?`$env:NODE_PATH=$env_node_path\n`:\"\"}exit $ret\n`:h+=`\n# Support pipeline input\nif ($MyInvocation.ExpectingInput) {\n  $input | & ${s} ${l} ${i} ${f}$args\n} else {\n  & ${s} ${l} ${i} ${f}$args\n}\n${r.nodePath?`$env:NODE_PATH=$env_node_path\n`:\"\"}exit $LASTEXITCODE\n`,h}function F_e(t,e){return e.fs_.chmod(t,493)}function nT(t){if(!t)return{win32:\"\",posix:\"\"};let e=typeof t==\"string\"?t.split(Js.delimiter):Array.from(t),r={};for(let i=0;i<e.length;i++){let n=e[i].split(\"/\").join(\"\\\\\"),s=_le()?e[i].split(\"\\\\\").join(\"/\").replace(/^([^:\\\\/]*):/,(o,a)=>`/mnt/${a.toLowerCase()}`):e[i];r.win32=r.win32?`${r.win32};${n}`:n,r.posix=r.posix?`${r.posix}:${s}`:s,r[i]={win32:n,posix:s}}return r}zle.exports=iT});var IT=w((Ukt,Cce)=>{Cce.exports=require(\"stream\")});var yce=w((Kkt,mce)=>{\"use strict\";function Ece(t,e){var r=Object.keys(t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(t);e&&(i=i.filter(function(n){return Object.getOwnPropertyDescriptor(t,n).enumerable})),r.push.apply(r,i)}return r}function e5e(t){for(var e=1;e<arguments.length;e++){var r=arguments[e]!=null?arguments[e]:{};e%2?Ece(Object(r),!0).forEach(function(i){$_e(t,i,r[i])}):Object.getOwnPropertyDescriptors?Object.defineProperties(t,Object.getOwnPropertyDescriptors(r)):Ece(Object(r)).forEach(function(i){Object.defineProperty(t,i,Object.getOwnPropertyDescriptor(r,i))})}return t}function $_e(t,e,r){return e in t?Object.defineProperty(t,e,{value:r,enumerable:!0,configurable:!0,writable:!0}):t[e]=r,t}function t5e(t,e){if(!(t instanceof e))throw new TypeError(\"Cannot call a class as a function\")}function Ice(t,e){for(var r=0;r<e.length;r++){var i=e[r];i.enumerable=i.enumerable||!1,i.configurable=!0,\"value\"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function r5e(t,e,r){return e&&Ice(t.prototype,e),r&&Ice(t,r),t}var i5e=require(\"buffer\"),mb=i5e.Buffer,n5e=require(\"util\"),yT=n5e.inspect,s5e=yT&&yT.custom||\"inspect\";function o5e(t,e,r){mb.prototype.copy.call(t,e,r)}mce.exports=function(){function t(){t5e(this,t),this.head=null,this.tail=null,this.length=0}return r5e(t,[{key:\"push\",value:function(r){var i={data:r,next:null};this.length>0?this.tail.next=i:this.head=i,this.tail=i,++this.length}},{key:\"unshift\",value:function(r){var i={data:r,next:this.head};this.length===0&&(this.tail=i),this.head=i,++this.length}},{key:\"shift\",value:function(){if(this.length!==0){var r=this.head.data;return this.length===1?this.head=this.tail=null:this.head=this.head.next,--this.length,r}}},{key:\"clear\",value:function(){this.head=this.tail=null,this.length=0}},{key:\"join\",value:function(r){if(this.length===0)return\"\";for(var i=this.head,n=\"\"+i.data;i=i.next;)n+=r+i.data;return n}},{key:\"concat\",value:function(r){if(this.length===0)return mb.alloc(0);for(var i=mb.allocUnsafe(r>>>0),n=this.head,s=0;n;)o5e(n.data,i,s),s+=n.data.length,n=n.next;return i}},{key:\"consume\",value:function(r,i){var n;return r<this.head.data.length?(n=this.head.data.slice(0,r),this.head.data=this.head.data.slice(r)):r===this.head.data.length?n=this.shift():n=i?this._getString(r):this._getBuffer(r),n}},{key:\"first\",value:function(){return this.head.data}},{key:\"_getString\",value:function(r){var i=this.head,n=1,s=i.data;for(r-=s.length;i=i.next;){var o=i.data,a=r>o.length?o.length:r;if(a===o.length?s+=o:s+=o.slice(0,r),r-=a,r===0){a===o.length?(++n,i.next?this.head=i.next:this.head=this.tail=null):(this.head=i,i.data=o.slice(a));break}++n}return this.length-=n,s}},{key:\"_getBuffer\",value:function(r){var i=mb.allocUnsafe(r),n=this.head,s=1;for(n.data.copy(i),r-=n.data.length;n=n.next;){var o=n.data,a=r>o.length?o.length:r;if(o.copy(i,i.length-r,0,a),r-=a,r===0){a===o.length?(++s,n.next?this.head=n.next:this.head=this.tail=null):(this.head=n,n.data=o.slice(a));break}++s}return this.length-=s,i}},{key:s5e,value:function(r,i){return yT(this,e5e({},i,{depth:0,customInspect:!1}))}}]),t}()});var BT=w((Hkt,wce)=>{\"use strict\";function a5e(t,e){var r=this,i=this._readableState&&this._readableState.destroyed,n=this._writableState&&this._writableState.destroyed;return i||n?(e?e(t):t&&(this._writableState?this._writableState.errorEmitted||(this._writableState.errorEmitted=!0,process.nextTick(wT,this,t)):process.nextTick(wT,this,t)),this):(this._readableState&&(this._readableState.destroyed=!0),this._writableState&&(this._writableState.destroyed=!0),this._destroy(t||null,function(s){!e&&s?r._writableState?r._writableState.errorEmitted?process.nextTick(Eb,r):(r._writableState.errorEmitted=!0,process.nextTick(Bce,r,s)):process.nextTick(Bce,r,s):e?(process.nextTick(Eb,r),e(s)):process.nextTick(Eb,r)}),this)}function Bce(t,e){wT(t,e),Eb(t)}function Eb(t){t._writableState&&!t._writableState.emitClose||t._readableState&&!t._readableState.emitClose||t.emit(\"close\")}function A5e(){this._readableState&&(this._readableState.destroyed=!1,this._readableState.reading=!1,this._readableState.ended=!1,this._readableState.endEmitted=!1),this._writableState&&(this._writableState.destroyed=!1,this._writableState.ended=!1,this._writableState.ending=!1,this._writableState.finalCalled=!1,this._writableState.prefinished=!1,this._writableState.finished=!1,this._writableState.errorEmitted=!1)}function wT(t,e){t.emit(\"error\",e)}function l5e(t,e){var r=t._readableState,i=t._writableState;r&&r.autoDestroy||i&&i.autoDestroy?t.destroy(e):t.emit(\"error\",e)}wce.exports={destroy:a5e,undestroy:A5e,errorOrDestroy:l5e}});var Nl=w((jkt,bce)=>{\"use strict\";var Qce={};function Ws(t,e,r){r||(r=Error);function i(s,o,a){return typeof e==\"string\"?e:e(s,o,a)}class n extends r{constructor(o,a,l){super(i(o,a,l))}}n.prototype.name=r.name,n.prototype.code=t,Qce[t]=n}function vce(t,e){if(Array.isArray(t)){let r=t.length;return t=t.map(i=>String(i)),r>2?`one of ${e} ${t.slice(0,r-1).join(\", \")}, or `+t[r-1]:r===2?`one of ${e} ${t[0]} or ${t[1]}`:`of ${e} ${t[0]}`}else return`of ${e} ${String(t)}`}function c5e(t,e,r){return t.substr(!r||r<0?0:+r,e.length)===e}function u5e(t,e,r){return(r===void 0||r>t.length)&&(r=t.length),t.substring(r-e.length,r)===e}function g5e(t,e,r){return typeof r!=\"number\"&&(r=0),r+e.length>t.length?!1:t.indexOf(e,r)!==-1}Ws(\"ERR_INVALID_OPT_VALUE\",function(t,e){return'The value \"'+e+'\" is invalid for option \"'+t+'\"'},TypeError);Ws(\"ERR_INVALID_ARG_TYPE\",function(t,e,r){let i;typeof e==\"string\"&&c5e(e,\"not \")?(i=\"must not be\",e=e.replace(/^not /,\"\")):i=\"must be\";let n;if(u5e(t,\" argument\"))n=`The ${t} ${i} ${vce(e,\"type\")}`;else{let s=g5e(t,\".\")?\"property\":\"argument\";n=`The \"${t}\" ${s} ${i} ${vce(e,\"type\")}`}return n+=`. Received type ${typeof r}`,n},TypeError);Ws(\"ERR_STREAM_PUSH_AFTER_EOF\",\"stream.push() after EOF\");Ws(\"ERR_METHOD_NOT_IMPLEMENTED\",function(t){return\"The \"+t+\" method is not implemented\"});Ws(\"ERR_STREAM_PREMATURE_CLOSE\",\"Premature close\");Ws(\"ERR_STREAM_DESTROYED\",function(t){return\"Cannot call \"+t+\" after a stream was destroyed\"});Ws(\"ERR_MULTIPLE_CALLBACK\",\"Callback called multiple times\");Ws(\"ERR_STREAM_CANNOT_PIPE\",\"Cannot pipe, not readable\");Ws(\"ERR_STREAM_WRITE_AFTER_END\",\"write after end\");Ws(\"ERR_STREAM_NULL_VALUES\",\"May not write null values to stream\",TypeError);Ws(\"ERR_UNKNOWN_ENCODING\",function(t){return\"Unknown encoding: \"+t},TypeError);Ws(\"ERR_STREAM_UNSHIFT_AFTER_END_EVENT\",\"stream.unshift() after end event\");bce.exports.codes=Qce});var bT=w((Gkt,Sce)=>{\"use strict\";var f5e=Nl().codes.ERR_INVALID_OPT_VALUE;function h5e(t,e,r){return t.highWaterMark!=null?t.highWaterMark:e?t[r]:null}function p5e(t,e,r,i){var n=h5e(e,i,r);if(n!=null){if(!(isFinite(n)&&Math.floor(n)===n)||n<0){var s=i?r:\"highWaterMark\";throw new f5e(s,n)}return Math.floor(n)}return t.objectMode?16:16*1024}Sce.exports={getHighWaterMark:p5e}});var kce=w((Ykt,QT)=>{typeof Object.create==\"function\"?QT.exports=function(e,r){r&&(e.super_=r,e.prototype=Object.create(r.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}))}:QT.exports=function(e,r){if(r){e.super_=r;var i=function(){};i.prototype=r.prototype,e.prototype=new i,e.prototype.constructor=e}}});var Ll=w((qkt,vT)=>{try{if(ST=require(\"util\"),typeof ST.inherits!=\"function\")throw\"\";vT.exports=ST.inherits}catch(t){vT.exports=kce()}var ST});var Pce=w((Jkt,xce)=>{xce.exports=require(\"util\").deprecate});var PT=w((Wkt,Dce)=>{\"use strict\";Dce.exports=jr;function Rce(t){var e=this;this.next=null,this.entry=null,this.finish=function(){d5e(e,t)}}var lh;jr.WritableState=_m;var C5e={deprecate:Pce()},Fce=IT(),Ib=require(\"buffer\").Buffer,m5e=global.Uint8Array||function(){};function E5e(t){return Ib.from(t)}function I5e(t){return Ib.isBuffer(t)||t instanceof m5e}var kT=BT(),y5e=bT(),w5e=y5e.getHighWaterMark,Tl=Nl().codes,B5e=Tl.ERR_INVALID_ARG_TYPE,b5e=Tl.ERR_METHOD_NOT_IMPLEMENTED,Q5e=Tl.ERR_MULTIPLE_CALLBACK,v5e=Tl.ERR_STREAM_CANNOT_PIPE,S5e=Tl.ERR_STREAM_DESTROYED,k5e=Tl.ERR_STREAM_NULL_VALUES,x5e=Tl.ERR_STREAM_WRITE_AFTER_END,P5e=Tl.ERR_UNKNOWN_ENCODING,ch=kT.errorOrDestroy;Ll()(jr,Fce);function D5e(){}function _m(t,e,r){lh=lh||vu(),t=t||{},typeof r!=\"boolean\"&&(r=e instanceof lh),this.objectMode=!!t.objectMode,r&&(this.objectMode=this.objectMode||!!t.writableObjectMode),this.highWaterMark=w5e(this,t,\"writableHighWaterMark\",r),this.finalCalled=!1,this.needDrain=!1,this.ending=!1,this.ended=!1,this.finished=!1,this.destroyed=!1;var i=t.decodeStrings===!1;this.decodeStrings=!i,this.defaultEncoding=t.defaultEncoding||\"utf8\",this.length=0,this.writing=!1,this.corked=0,this.sync=!0,this.bufferProcessing=!1,this.onwrite=function(n){R5e(e,n)},this.writecb=null,this.writelen=0,this.bufferedRequest=null,this.lastBufferedRequest=null,this.pendingcb=0,this.prefinished=!1,this.errorEmitted=!1,this.emitClose=t.emitClose!==!1,this.autoDestroy=!!t.autoDestroy,this.bufferedRequestCount=0,this.corkedRequestsFree=new Rce(this)}_m.prototype.getBuffer=function(){for(var e=this.bufferedRequest,r=[];e;)r.push(e),e=e.next;return r};(function(){try{Object.defineProperty(_m.prototype,\"buffer\",{get:C5e.deprecate(function(){return this.getBuffer()},\"_writableState.buffer is deprecated. Use _writableState.getBuffer instead.\",\"DEP0003\")})}catch(t){}})();var yb;typeof Symbol==\"function\"&&Symbol.hasInstance&&typeof Function.prototype[Symbol.hasInstance]==\"function\"?(yb=Function.prototype[Symbol.hasInstance],Object.defineProperty(jr,Symbol.hasInstance,{value:function(e){return yb.call(this,e)?!0:this!==jr?!1:e&&e._writableState instanceof _m}})):yb=function(e){return e instanceof this};function jr(t){lh=lh||vu();var e=this instanceof lh;if(!e&&!yb.call(jr,this))return new jr(t);this._writableState=new _m(t,this,e),this.writable=!0,t&&(typeof t.write==\"function\"&&(this._write=t.write),typeof t.writev==\"function\"&&(this._writev=t.writev),typeof t.destroy==\"function\"&&(this._destroy=t.destroy),typeof t.final==\"function\"&&(this._final=t.final)),Fce.call(this)}jr.prototype.pipe=function(){ch(this,new v5e)};function F5e(t,e){var r=new x5e;ch(t,r),process.nextTick(e,r)}function N5e(t,e,r,i){var n;return r===null?n=new k5e:typeof r!=\"string\"&&!e.objectMode&&(n=new B5e(\"chunk\",[\"string\",\"Buffer\"],r)),n?(ch(t,n),process.nextTick(i,n),!1):!0}jr.prototype.write=function(t,e,r){var i=this._writableState,n=!1,s=!i.objectMode&&I5e(t);return s&&!Ib.isBuffer(t)&&(t=E5e(t)),typeof e==\"function\"&&(r=e,e=null),s?e=\"buffer\":e||(e=i.defaultEncoding),typeof r!=\"function\"&&(r=D5e),i.ending?F5e(this,r):(s||N5e(this,i,t,r))&&(i.pendingcb++,n=L5e(this,i,s,t,e,r)),n};jr.prototype.cork=function(){this._writableState.corked++};jr.prototype.uncork=function(){var t=this._writableState;t.corked&&(t.corked--,!t.writing&&!t.corked&&!t.bufferProcessing&&t.bufferedRequest&&Nce(this,t))};jr.prototype.setDefaultEncoding=function(e){if(typeof e==\"string\"&&(e=e.toLowerCase()),!([\"hex\",\"utf8\",\"utf-8\",\"ascii\",\"binary\",\"base64\",\"ucs2\",\"ucs-2\",\"utf16le\",\"utf-16le\",\"raw\"].indexOf((e+\"\").toLowerCase())>-1))throw new P5e(e);return this._writableState.defaultEncoding=e,this};Object.defineProperty(jr.prototype,\"writableBuffer\",{enumerable:!1,get:function(){return this._writableState&&this._writableState.getBuffer()}});function T5e(t,e,r){return!t.objectMode&&t.decodeStrings!==!1&&typeof e==\"string\"&&(e=Ib.from(e,r)),e}Object.defineProperty(jr.prototype,\"writableHighWaterMark\",{enumerable:!1,get:function(){return this._writableState.highWaterMark}});function L5e(t,e,r,i,n,s){if(!r){var o=T5e(e,i,n);i!==o&&(r=!0,n=\"buffer\",i=o)}var a=e.objectMode?1:i.length;e.length+=a;var l=e.length<e.highWaterMark;if(l||(e.needDrain=!0),e.writing||e.corked){var c=e.lastBufferedRequest;e.lastBufferedRequest={chunk:i,encoding:n,isBuf:r,callback:s,next:null},c?c.next=e.lastBufferedRequest:e.bufferedRequest=e.lastBufferedRequest,e.bufferedRequestCount+=1}else xT(t,e,!1,a,i,n,s);return l}function xT(t,e,r,i,n,s,o){e.writelen=i,e.writecb=o,e.writing=!0,e.sync=!0,e.destroyed?e.onwrite(new S5e(\"write\")):r?t._writev(n,e.onwrite):t._write(n,s,e.onwrite),e.sync=!1}function O5e(t,e,r,i,n){--e.pendingcb,r?(process.nextTick(n,i),process.nextTick(Vm,t,e),t._writableState.errorEmitted=!0,ch(t,i)):(n(i),t._writableState.errorEmitted=!0,ch(t,i),Vm(t,e))}function M5e(t){t.writing=!1,t.writecb=null,t.length-=t.writelen,t.writelen=0}function R5e(t,e){var r=t._writableState,i=r.sync,n=r.writecb;if(typeof n!=\"function\")throw new Q5e;if(M5e(r),e)O5e(t,r,i,e,n);else{var s=Tce(r)||t.destroyed;!s&&!r.corked&&!r.bufferProcessing&&r.bufferedRequest&&Nce(t,r),i?process.nextTick(Lce,t,r,s,n):Lce(t,r,s,n)}}function Lce(t,e,r,i){r||U5e(t,e),e.pendingcb--,i(),Vm(t,e)}function U5e(t,e){e.length===0&&e.needDrain&&(e.needDrain=!1,t.emit(\"drain\"))}function Nce(t,e){e.bufferProcessing=!0;var r=e.bufferedRequest;if(t._writev&&r&&r.next){var i=e.bufferedRequestCount,n=new Array(i),s=e.corkedRequestsFree;s.entry=r;for(var o=0,a=!0;r;)n[o]=r,r.isBuf||(a=!1),r=r.next,o+=1;n.allBuffers=a,xT(t,e,!0,e.length,n,\"\",s.finish),e.pendingcb++,e.lastBufferedRequest=null,s.next?(e.corkedRequestsFree=s.next,s.next=null):e.corkedRequestsFree=new Rce(e),e.bufferedRequestCount=0}else{for(;r;){var l=r.chunk,c=r.encoding,u=r.callback,g=e.objectMode?1:l.length;if(xT(t,e,!1,g,l,c,u),r=r.next,e.bufferedRequestCount--,e.writing)break}r===null&&(e.lastBufferedRequest=null)}e.bufferedRequest=r,e.bufferProcessing=!1}jr.prototype._write=function(t,e,r){r(new b5e(\"_write()\"))};jr.prototype._writev=null;jr.prototype.end=function(t,e,r){var i=this._writableState;return typeof t==\"function\"?(r=t,t=null,e=null):typeof e==\"function\"&&(r=e,e=null),t!=null&&this.write(t,e),i.corked&&(i.corked=1,this.uncork()),i.ending||K5e(this,i,r),this};Object.defineProperty(jr.prototype,\"writableLength\",{enumerable:!1,get:function(){return this._writableState.length}});function Tce(t){return t.ending&&t.length===0&&t.bufferedRequest===null&&!t.finished&&!t.writing}function H5e(t,e){t._final(function(r){e.pendingcb--,r&&ch(t,r),e.prefinished=!0,t.emit(\"prefinish\"),Vm(t,e)})}function j5e(t,e){!e.prefinished&&!e.finalCalled&&(typeof t._final==\"function\"&&!e.destroyed?(e.pendingcb++,e.finalCalled=!0,process.nextTick(H5e,t,e)):(e.prefinished=!0,t.emit(\"prefinish\")))}function Vm(t,e){var r=Tce(e);if(r&&(j5e(t,e),e.pendingcb===0&&(e.finished=!0,t.emit(\"finish\"),e.autoDestroy))){var i=t._readableState;(!i||i.autoDestroy&&i.endEmitted)&&t.destroy()}return r}function K5e(t,e,r){e.ending=!0,Vm(t,e),r&&(e.finished?process.nextTick(r):t.once(\"finish\",r)),e.ended=!0,t.writable=!1}function d5e(t,e,r){var i=t.entry;for(t.entry=null;i;){var n=i.callback;e.pendingcb--,n(r),i=i.next}e.corkedRequestsFree.next=t}Object.defineProperty(jr.prototype,\"destroyed\",{enumerable:!1,get:function(){return this._writableState===void 0?!1:this._writableState.destroyed},set:function(e){!this._writableState||(this._writableState.destroyed=e)}});jr.prototype.destroy=kT.destroy;jr.prototype._undestroy=kT.undestroy;jr.prototype._destroy=function(t,e){e(t)}});var vu=w((zkt,Oce)=>{\"use strict\";var G5e=Object.keys||function(t){var e=[];for(var r in t)e.push(r);return e};Oce.exports=pa;var Mce=DT(),RT=PT();Ll()(pa,Mce);for(FT=G5e(RT.prototype),wb=0;wb<FT.length;wb++)Bb=FT[wb],pa.prototype[Bb]||(pa.prototype[Bb]=RT.prototype[Bb]);var FT,Bb,wb;function pa(t){if(!(this instanceof pa))return new pa(t);Mce.call(this,t),RT.call(this,t),this.allowHalfOpen=!0,t&&(t.readable===!1&&(this.readable=!1),t.writable===!1&&(this.writable=!1),t.allowHalfOpen===!1&&(this.allowHalfOpen=!1,this.once(\"end\",Y5e)))}Object.defineProperty(pa.prototype,\"writableHighWaterMark\",{enumerable:!1,get:function(){return this._writableState.highWaterMark}});Object.defineProperty(pa.prototype,\"writableBuffer\",{enumerable:!1,get:function(){return this._writableState&&this._writableState.getBuffer()}});Object.defineProperty(pa.prototype,\"writableLength\",{enumerable:!1,get:function(){return this._writableState.length}});function Y5e(){this._writableState.ended||process.nextTick(q5e,this)}function q5e(t){t.end()}Object.defineProperty(pa.prototype,\"destroyed\",{enumerable:!1,get:function(){return this._readableState===void 0||this._writableState===void 0?!1:this._readableState.destroyed&&this._writableState.destroyed},set:function(e){this._readableState===void 0||this._writableState===void 0||(this._readableState.destroyed=e,this._writableState.destroyed=e)}})});var Hce=w((NT,Uce)=>{var bb=require(\"buffer\"),BA=bb.Buffer;function Kce(t,e){for(var r in t)e[r]=t[r]}BA.from&&BA.alloc&&BA.allocUnsafe&&BA.allocUnsafeSlow?Uce.exports=bb:(Kce(bb,NT),NT.Buffer=uh);function uh(t,e,r){return BA(t,e,r)}Kce(BA,uh);uh.from=function(t,e,r){if(typeof t==\"number\")throw new TypeError(\"Argument must not be a number\");return BA(t,e,r)};uh.alloc=function(t,e,r){if(typeof t!=\"number\")throw new TypeError(\"Argument must be a number\");var i=BA(t);return e!==void 0?typeof r==\"string\"?i.fill(e,r):i.fill(e):i.fill(0),i};uh.allocUnsafe=function(t){if(typeof t!=\"number\")throw new TypeError(\"Argument must be a number\");return BA(t)};uh.allocUnsafeSlow=function(t){if(typeof t!=\"number\")throw new TypeError(\"Argument must be a number\");return bb.SlowBuffer(t)}});var OT=w(jce=>{\"use strict\";var LT=Hce().Buffer,Gce=LT.isEncoding||function(t){switch(t=\"\"+t,t&&t.toLowerCase()){case\"hex\":case\"utf8\":case\"utf-8\":case\"ascii\":case\"binary\":case\"base64\":case\"ucs2\":case\"ucs-2\":case\"utf16le\":case\"utf-16le\":case\"raw\":return!0;default:return!1}};function J5e(t){if(!t)return\"utf8\";for(var e;;)switch(t){case\"utf8\":case\"utf-8\":return\"utf8\";case\"ucs2\":case\"ucs-2\":case\"utf16le\":case\"utf-16le\":return\"utf16le\";case\"latin1\":case\"binary\":return\"latin1\";case\"base64\":case\"ascii\":case\"hex\":return t;default:if(e)return;t=(\"\"+t).toLowerCase(),e=!0}}function W5e(t){var e=J5e(t);if(typeof e!=\"string\"&&(LT.isEncoding===Gce||!Gce(t)))throw new Error(\"Unknown encoding: \"+t);return e||t}jce.StringDecoder=Xm;function Xm(t){this.encoding=W5e(t);var e;switch(this.encoding){case\"utf16le\":this.text=_5e,this.end=V5e,e=4;break;case\"utf8\":this.fillLast=z5e,e=4;break;case\"base64\":this.text=X5e,this.end=Z5e,e=3;break;default:this.write=$5e,this.end=e6e;return}this.lastNeed=0,this.lastTotal=0,this.lastChar=LT.allocUnsafe(e)}Xm.prototype.write=function(t){if(t.length===0)return\"\";var e,r;if(this.lastNeed){if(e=this.fillLast(t),e===void 0)return\"\";r=this.lastNeed,this.lastNeed=0}else r=0;return r<t.length?e?e+this.text(t,r):this.text(t,r):e||\"\"};Xm.prototype.end=t6e;Xm.prototype.text=r6e;Xm.prototype.fillLast=function(t){if(this.lastNeed<=t.length)return t.copy(this.lastChar,this.lastTotal-this.lastNeed,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal);t.copy(this.lastChar,this.lastTotal-this.lastNeed,0,t.length),this.lastNeed-=t.length};function TT(t){return t<=127?0:t>>5==6?2:t>>4==14?3:t>>3==30?4:t>>6==2?-1:-2}function i6e(t,e,r){var i=e.length-1;if(i<r)return 0;var n=TT(e[i]);return n>=0?(n>0&&(t.lastNeed=n-1),n):--i<r||n===-2?0:(n=TT(e[i]),n>=0?(n>0&&(t.lastNeed=n-2),n):--i<r||n===-2?0:(n=TT(e[i]),n>=0?(n>0&&(n===2?n=0:t.lastNeed=n-3),n):0))}function n6e(t,e,r){if((e[0]&192)!=128)return t.lastNeed=0,\"\\uFFFD\";if(t.lastNeed>1&&e.length>1){if((e[1]&192)!=128)return t.lastNeed=1,\"\\uFFFD\";if(t.lastNeed>2&&e.length>2&&(e[2]&192)!=128)return t.lastNeed=2,\"\\uFFFD\"}}function z5e(t){var e=this.lastTotal-this.lastNeed,r=n6e(this,t,e);if(r!==void 0)return r;if(this.lastNeed<=t.length)return t.copy(this.lastChar,e,0,this.lastNeed),this.lastChar.toString(this.encoding,0,this.lastTotal);t.copy(this.lastChar,e,0,t.length),this.lastNeed-=t.length}function r6e(t,e){var r=i6e(this,t,e);if(!this.lastNeed)return t.toString(\"utf8\",e);this.lastTotal=r;var i=t.length-(r-this.lastNeed);return t.copy(this.lastChar,0,i),t.toString(\"utf8\",e,i)}function t6e(t){var e=t&&t.length?this.write(t):\"\";return this.lastNeed?e+\"\\uFFFD\":e}function _5e(t,e){if((t.length-e)%2==0){var r=t.toString(\"utf16le\",e);if(r){var i=r.charCodeAt(r.length-1);if(i>=55296&&i<=56319)return this.lastNeed=2,this.lastTotal=4,this.lastChar[0]=t[t.length-2],this.lastChar[1]=t[t.length-1],r.slice(0,-1)}return r}return this.lastNeed=1,this.lastTotal=2,this.lastChar[0]=t[t.length-1],t.toString(\"utf16le\",e,t.length-1)}function V5e(t){var e=t&&t.length?this.write(t):\"\";if(this.lastNeed){var r=this.lastTotal-this.lastNeed;return e+this.lastChar.toString(\"utf16le\",0,r)}return e}function X5e(t,e){var r=(t.length-e)%3;return r===0?t.toString(\"base64\",e):(this.lastNeed=3-r,this.lastTotal=3,r===1?this.lastChar[0]=t[t.length-1]:(this.lastChar[0]=t[t.length-2],this.lastChar[1]=t[t.length-1]),t.toString(\"base64\",e,t.length-r))}function Z5e(t){var e=t&&t.length?this.write(t):\"\";return this.lastNeed?e+this.lastChar.toString(\"base64\",0,3-this.lastNeed):e}function $5e(t){return t.toString(this.encoding)}function e6e(t){return t&&t.length?this.write(t):\"\"}});var Qb=w((Vkt,Yce)=>{\"use strict\";var qce=Nl().codes.ERR_STREAM_PREMATURE_CLOSE;function s6e(t){var e=!1;return function(){if(!e){e=!0;for(var r=arguments.length,i=new Array(r),n=0;n<r;n++)i[n]=arguments[n];t.apply(this,i)}}}function o6e(){}function a6e(t){return t.setHeader&&typeof t.abort==\"function\"}function Jce(t,e,r){if(typeof e==\"function\")return Jce(t,null,e);e||(e={}),r=s6e(r||o6e);var i=e.readable||e.readable!==!1&&t.readable,n=e.writable||e.writable!==!1&&t.writable,s=function(){t.writable||a()},o=t._writableState&&t._writableState.finished,a=function(){n=!1,o=!0,i||r.call(t)},l=t._readableState&&t._readableState.endEmitted,c=function(){i=!1,l=!0,n||r.call(t)},u=function(p){r.call(t,p)},g=function(){var p;if(i&&!l)return(!t._readableState||!t._readableState.ended)&&(p=new qce),r.call(t,p);if(n&&!o)return(!t._writableState||!t._writableState.ended)&&(p=new qce),r.call(t,p)},f=function(){t.req.on(\"finish\",a)};return a6e(t)?(t.on(\"complete\",a),t.on(\"abort\",g),t.req?f():t.on(\"request\",f)):n&&!t._writableState&&(t.on(\"end\",s),t.on(\"close\",s)),t.on(\"end\",c),t.on(\"finish\",a),e.error!==!1&&t.on(\"error\",u),t.on(\"close\",g),function(){t.removeListener(\"complete\",a),t.removeListener(\"abort\",g),t.removeListener(\"request\",f),t.req&&t.req.removeListener(\"finish\",a),t.removeListener(\"end\",s),t.removeListener(\"close\",s),t.removeListener(\"finish\",a),t.removeListener(\"end\",c),t.removeListener(\"error\",u),t.removeListener(\"close\",g)}}Yce.exports=Jce});var zce=w((Xkt,Wce)=>{\"use strict\";var vb;function Ol(t,e,r){return e in t?Object.defineProperty(t,e,{value:r,enumerable:!0,configurable:!0,writable:!0}):t[e]=r,t}var A6e=Qb(),Ml=Symbol(\"lastResolve\"),Su=Symbol(\"lastReject\"),Zm=Symbol(\"error\"),Sb=Symbol(\"ended\"),ku=Symbol(\"lastPromise\"),MT=Symbol(\"handlePromise\"),xu=Symbol(\"stream\");function Ul(t,e){return{value:t,done:e}}function l6e(t){var e=t[Ml];if(e!==null){var r=t[xu].read();r!==null&&(t[ku]=null,t[Ml]=null,t[Su]=null,e(Ul(r,!1)))}}function c6e(t){process.nextTick(l6e,t)}function u6e(t,e){return function(r,i){t.then(function(){if(e[Sb]){r(Ul(void 0,!0));return}e[MT](r,i)},i)}}var g6e=Object.getPrototypeOf(function(){}),f6e=Object.setPrototypeOf((vb={get stream(){return this[xu]},next:function(){var e=this,r=this[Zm];if(r!==null)return Promise.reject(r);if(this[Sb])return Promise.resolve(Ul(void 0,!0));if(this[xu].destroyed)return new Promise(function(o,a){process.nextTick(function(){e[Zm]?a(e[Zm]):o(Ul(void 0,!0))})});var i=this[ku],n;if(i)n=new Promise(u6e(i,this));else{var s=this[xu].read();if(s!==null)return Promise.resolve(Ul(s,!1));n=new Promise(this[MT])}return this[ku]=n,n}},Ol(vb,Symbol.asyncIterator,function(){return this}),Ol(vb,\"return\",function(){var e=this;return new Promise(function(r,i){e[xu].destroy(null,function(n){if(n){i(n);return}r(Ul(void 0,!0))})})}),vb),g6e),h6e=function(e){var r,i=Object.create(f6e,(r={},Ol(r,xu,{value:e,writable:!0}),Ol(r,Ml,{value:null,writable:!0}),Ol(r,Su,{value:null,writable:!0}),Ol(r,Zm,{value:null,writable:!0}),Ol(r,Sb,{value:e._readableState.endEmitted,writable:!0}),Ol(r,MT,{value:function(s,o){var a=i[xu].read();a?(i[ku]=null,i[Ml]=null,i[Su]=null,s(Ul(a,!1))):(i[Ml]=s,i[Su]=o)},writable:!0}),r));return i[ku]=null,A6e(e,function(n){if(n&&n.code!==\"ERR_STREAM_PREMATURE_CLOSE\"){var s=i[Su];s!==null&&(i[ku]=null,i[Ml]=null,i[Su]=null,s(n)),i[Zm]=n;return}var o=i[Ml];o!==null&&(i[ku]=null,i[Ml]=null,i[Su]=null,o(Ul(void 0,!0))),i[Sb]=!0}),e.on(\"readable\",c6e.bind(null,i)),i};Wce.exports=h6e});var Zce=w((Zkt,_ce)=>{\"use strict\";function Vce(t,e,r,i,n,s,o){try{var a=t[s](o),l=a.value}catch(c){r(c);return}a.done?e(l):Promise.resolve(l).then(i,n)}function p6e(t){return function(){var e=this,r=arguments;return new Promise(function(i,n){var s=t.apply(e,r);function o(l){Vce(s,i,n,o,a,\"next\",l)}function a(l){Vce(s,i,n,o,a,\"throw\",l)}o(void 0)})}}function Xce(t,e){var r=Object.keys(t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(t);e&&(i=i.filter(function(n){return Object.getOwnPropertyDescriptor(t,n).enumerable})),r.push.apply(r,i)}return r}function C6e(t){for(var e=1;e<arguments.length;e++){var r=arguments[e]!=null?arguments[e]:{};e%2?Xce(Object(r),!0).forEach(function(i){d6e(t,i,r[i])}):Object.getOwnPropertyDescriptors?Object.defineProperties(t,Object.getOwnPropertyDescriptors(r)):Xce(Object(r)).forEach(function(i){Object.defineProperty(t,i,Object.getOwnPropertyDescriptor(r,i))})}return t}function d6e(t,e,r){return e in t?Object.defineProperty(t,e,{value:r,enumerable:!0,configurable:!0,writable:!0}):t[e]=r,t}var m6e=Nl().codes.ERR_INVALID_ARG_TYPE;function E6e(t,e,r){var i;if(e&&typeof e.next==\"function\")i=e;else if(e&&e[Symbol.asyncIterator])i=e[Symbol.asyncIterator]();else if(e&&e[Symbol.iterator])i=e[Symbol.iterator]();else throw new m6e(\"iterable\",[\"Iterable\"],e);var n=new t(C6e({objectMode:!0},r)),s=!1;n._read=function(){s||(s=!0,o())};function o(){return a.apply(this,arguments)}function a(){return a=p6e(function*(){try{var l=yield i.next(),c=l.value,u=l.done;u?n.push(null):n.push(yield c)?o():s=!1}catch(g){n.destroy(g)}}),a.apply(this,arguments)}return n}_ce.exports=E6e});var DT=w((ext,$ce)=>{\"use strict\";$ce.exports=Kt;var gh;Kt.ReadableState=eue;var $kt=require(\"events\").EventEmitter,tue=function(e,r){return e.listeners(r).length},$m=IT(),kb=require(\"buffer\").Buffer,I6e=global.Uint8Array||function(){};function y6e(t){return kb.from(t)}function w6e(t){return kb.isBuffer(t)||t instanceof I6e}var UT=require(\"util\"),xt;UT&&UT.debuglog?xt=UT.debuglog(\"stream\"):xt=function(){};var B6e=yce(),KT=BT(),b6e=bT(),Q6e=b6e.getHighWaterMark,xb=Nl().codes,v6e=xb.ERR_INVALID_ARG_TYPE,S6e=xb.ERR_STREAM_PUSH_AFTER_EOF,k6e=xb.ERR_METHOD_NOT_IMPLEMENTED,x6e=xb.ERR_STREAM_UNSHIFT_AFTER_END_EVENT,fh,HT,jT;Ll()(Kt,$m);var eE=KT.errorOrDestroy,GT=[\"error\",\"close\",\"destroy\",\"pause\",\"resume\"];function P6e(t,e,r){if(typeof t.prependListener==\"function\")return t.prependListener(e,r);!t._events||!t._events[e]?t.on(e,r):Array.isArray(t._events[e])?t._events[e].unshift(r):t._events[e]=[r,t._events[e]]}function eue(t,e,r){gh=gh||vu(),t=t||{},typeof r!=\"boolean\"&&(r=e instanceof gh),this.objectMode=!!t.objectMode,r&&(this.objectMode=this.objectMode||!!t.readableObjectMode),this.highWaterMark=Q6e(this,t,\"readableHighWaterMark\",r),this.buffer=new B6e,this.length=0,this.pipes=null,this.pipesCount=0,this.flowing=null,this.ended=!1,this.endEmitted=!1,this.reading=!1,this.sync=!0,this.needReadable=!1,this.emittedReadable=!1,this.readableListening=!1,this.resumeScheduled=!1,this.paused=!0,this.emitClose=t.emitClose!==!1,this.autoDestroy=!!t.autoDestroy,this.destroyed=!1,this.defaultEncoding=t.defaultEncoding||\"utf8\",this.awaitDrain=0,this.readingMore=!1,this.decoder=null,this.encoding=null,t.encoding&&(fh||(fh=OT().StringDecoder),this.decoder=new fh(t.encoding),this.encoding=t.encoding)}function Kt(t){if(gh=gh||vu(),!(this instanceof Kt))return new Kt(t);var e=this instanceof gh;this._readableState=new eue(t,this,e),this.readable=!0,t&&(typeof t.read==\"function\"&&(this._read=t.read),typeof t.destroy==\"function\"&&(this._destroy=t.destroy)),$m.call(this)}Object.defineProperty(Kt.prototype,\"destroyed\",{enumerable:!1,get:function(){return this._readableState===void 0?!1:this._readableState.destroyed},set:function(e){!this._readableState||(this._readableState.destroyed=e)}});Kt.prototype.destroy=KT.destroy;Kt.prototype._undestroy=KT.undestroy;Kt.prototype._destroy=function(t,e){e(t)};Kt.prototype.push=function(t,e){var r=this._readableState,i;return r.objectMode?i=!0:typeof t==\"string\"&&(e=e||r.defaultEncoding,e!==r.encoding&&(t=kb.from(t,e),e=\"\"),i=!0),rue(this,t,e,!1,i)};Kt.prototype.unshift=function(t){return rue(this,t,null,!0,!1)};function rue(t,e,r,i,n){xt(\"readableAddChunk\",e);var s=t._readableState;if(e===null)s.reading=!1,R6e(t,s);else{var o;if(n||(o=D6e(s,e)),o)eE(t,o);else if(s.objectMode||e&&e.length>0)if(typeof e!=\"string\"&&!s.objectMode&&Object.getPrototypeOf(e)!==kb.prototype&&(e=y6e(e)),i)s.endEmitted?eE(t,new x6e):YT(t,s,e,!0);else if(s.ended)eE(t,new S6e);else{if(s.destroyed)return!1;s.reading=!1,s.decoder&&!r?(e=s.decoder.write(e),s.objectMode||e.length!==0?YT(t,s,e,!1):qT(t,s)):YT(t,s,e,!1)}else i||(s.reading=!1,qT(t,s))}return!s.ended&&(s.length<s.highWaterMark||s.length===0)}function YT(t,e,r,i){e.flowing&&e.length===0&&!e.sync?(e.awaitDrain=0,t.emit(\"data\",r)):(e.length+=e.objectMode?1:r.length,i?e.buffer.unshift(r):e.buffer.push(r),e.needReadable&&Pb(t)),qT(t,e)}function D6e(t,e){var r;return!w6e(e)&&typeof e!=\"string\"&&e!==void 0&&!t.objectMode&&(r=new v6e(\"chunk\",[\"string\",\"Buffer\",\"Uint8Array\"],e)),r}Kt.prototype.isPaused=function(){return this._readableState.flowing===!1};Kt.prototype.setEncoding=function(t){fh||(fh=OT().StringDecoder);var e=new fh(t);this._readableState.decoder=e,this._readableState.encoding=this._readableState.decoder.encoding;for(var r=this._readableState.buffer.head,i=\"\";r!==null;)i+=e.write(r.data),r=r.next;return this._readableState.buffer.clear(),i!==\"\"&&this._readableState.buffer.push(i),this._readableState.length=i.length,this};var iue=1073741824;function F6e(t){return t>=iue?t=iue:(t--,t|=t>>>1,t|=t>>>2,t|=t>>>4,t|=t>>>8,t|=t>>>16,t++),t}function nue(t,e){return t<=0||e.length===0&&e.ended?0:e.objectMode?1:t!==t?e.flowing&&e.length?e.buffer.head.data.length:e.length:(t>e.highWaterMark&&(e.highWaterMark=F6e(t)),t<=e.length?t:e.ended?e.length:(e.needReadable=!0,0))}Kt.prototype.read=function(t){xt(\"read\",t),t=parseInt(t,10);var e=this._readableState,r=t;if(t!==0&&(e.emittedReadable=!1),t===0&&e.needReadable&&((e.highWaterMark!==0?e.length>=e.highWaterMark:e.length>0)||e.ended))return xt(\"read: emitReadable\",e.length,e.ended),e.length===0&&e.ended?JT(this):Pb(this),null;if(t=nue(t,e),t===0&&e.ended)return e.length===0&&JT(this),null;var i=e.needReadable;xt(\"need readable\",i),(e.length===0||e.length-t<e.highWaterMark)&&(i=!0,xt(\"length less than watermark\",i)),e.ended||e.reading?(i=!1,xt(\"reading or ended\",i)):i&&(xt(\"do read\"),e.reading=!0,e.sync=!0,e.length===0&&(e.needReadable=!0),this._read(e.highWaterMark),e.sync=!1,e.reading||(t=nue(r,e)));var n;return t>0?n=sue(t,e):n=null,n===null?(e.needReadable=e.length<=e.highWaterMark,t=0):(e.length-=t,e.awaitDrain=0),e.length===0&&(e.ended||(e.needReadable=!0),r!==t&&e.ended&&JT(this)),n!==null&&this.emit(\"data\",n),n};function R6e(t,e){if(xt(\"onEofChunk\"),!e.ended){if(e.decoder){var r=e.decoder.end();r&&r.length&&(e.buffer.push(r),e.length+=e.objectMode?1:r.length)}e.ended=!0,e.sync?Pb(t):(e.needReadable=!1,e.emittedReadable||(e.emittedReadable=!0,oue(t)))}}function Pb(t){var e=t._readableState;xt(\"emitReadable\",e.needReadable,e.emittedReadable),e.needReadable=!1,e.emittedReadable||(xt(\"emitReadable\",e.flowing),e.emittedReadable=!0,process.nextTick(oue,t))}function oue(t){var e=t._readableState;xt(\"emitReadable_\",e.destroyed,e.length,e.ended),!e.destroyed&&(e.length||e.ended)&&(t.emit(\"readable\"),e.emittedReadable=!1),e.needReadable=!e.flowing&&!e.ended&&e.length<=e.highWaterMark,WT(t)}function qT(t,e){e.readingMore||(e.readingMore=!0,process.nextTick(N6e,t,e))}function N6e(t,e){for(;!e.reading&&!e.ended&&(e.length<e.highWaterMark||e.flowing&&e.length===0);){var r=e.length;if(xt(\"maybeReadMore read 0\"),t.read(0),r===e.length)break}e.readingMore=!1}Kt.prototype._read=function(t){eE(this,new k6e(\"_read()\"))};Kt.prototype.pipe=function(t,e){var r=this,i=this._readableState;switch(i.pipesCount){case 0:i.pipes=t;break;case 1:i.pipes=[i.pipes,t];break;default:i.pipes.push(t);break}i.pipesCount+=1,xt(\"pipe count=%d opts=%j\",i.pipesCount,e);var n=(!e||e.end!==!1)&&t!==process.stdout&&t!==process.stderr,s=n?a:m;i.endEmitted?process.nextTick(s):r.once(\"end\",s),t.on(\"unpipe\",o);function o(y,Q){xt(\"onunpipe\"),y===r&&Q&&Q.hasUnpiped===!1&&(Q.hasUnpiped=!0,u())}function a(){xt(\"onend\"),t.end()}var l=L6e(r);t.on(\"drain\",l);var c=!1;function u(){xt(\"cleanup\"),t.removeListener(\"close\",h),t.removeListener(\"finish\",p),t.removeListener(\"drain\",l),t.removeListener(\"error\",f),t.removeListener(\"unpipe\",o),r.removeListener(\"end\",a),r.removeListener(\"end\",m),r.removeListener(\"data\",g),c=!0,i.awaitDrain&&(!t._writableState||t._writableState.needDrain)&&l()}r.on(\"data\",g);function g(y){xt(\"ondata\");var Q=t.write(y);xt(\"dest.write\",Q),Q===!1&&((i.pipesCount===1&&i.pipes===t||i.pipesCount>1&&aue(i.pipes,t)!==-1)&&!c&&(xt(\"false write response, pause\",i.awaitDrain),i.awaitDrain++),r.pause())}function f(y){xt(\"onerror\",y),m(),t.removeListener(\"error\",f),tue(t,\"error\")===0&&eE(t,y)}P6e(t,\"error\",f);function h(){t.removeListener(\"finish\",p),m()}t.once(\"close\",h);function p(){xt(\"onfinish\"),t.removeListener(\"close\",h),m()}t.once(\"finish\",p);function m(){xt(\"unpipe\"),r.unpipe(t)}return t.emit(\"pipe\",r),i.flowing||(xt(\"pipe resume\"),r.resume()),t};function L6e(t){return function(){var r=t._readableState;xt(\"pipeOnDrain\",r.awaitDrain),r.awaitDrain&&r.awaitDrain--,r.awaitDrain===0&&tue(t,\"data\")&&(r.flowing=!0,WT(t))}}Kt.prototype.unpipe=function(t){var e=this._readableState,r={hasUnpiped:!1};if(e.pipesCount===0)return this;if(e.pipesCount===1)return t&&t!==e.pipes?this:(t||(t=e.pipes),e.pipes=null,e.pipesCount=0,e.flowing=!1,t&&t.emit(\"unpipe\",this,r),this);if(!t){var i=e.pipes,n=e.pipesCount;e.pipes=null,e.pipesCount=0,e.flowing=!1;for(var s=0;s<n;s++)i[s].emit(\"unpipe\",this,{hasUnpiped:!1});return this}var o=aue(e.pipes,t);return o===-1?this:(e.pipes.splice(o,1),e.pipesCount-=1,e.pipesCount===1&&(e.pipes=e.pipes[0]),t.emit(\"unpipe\",this,r),this)};Kt.prototype.on=function(t,e){var r=$m.prototype.on.call(this,t,e),i=this._readableState;return t===\"data\"?(i.readableListening=this.listenerCount(\"readable\")>0,i.flowing!==!1&&this.resume()):t===\"readable\"&&!i.endEmitted&&!i.readableListening&&(i.readableListening=i.needReadable=!0,i.flowing=!1,i.emittedReadable=!1,xt(\"on readable\",i.length,i.reading),i.length?Pb(this):i.reading||process.nextTick(T6e,this)),r};Kt.prototype.addListener=Kt.prototype.on;Kt.prototype.removeListener=function(t,e){var r=$m.prototype.removeListener.call(this,t,e);return t===\"readable\"&&process.nextTick(Aue,this),r};Kt.prototype.removeAllListeners=function(t){var e=$m.prototype.removeAllListeners.apply(this,arguments);return(t===\"readable\"||t===void 0)&&process.nextTick(Aue,this),e};function Aue(t){var e=t._readableState;e.readableListening=t.listenerCount(\"readable\")>0,e.resumeScheduled&&!e.paused?e.flowing=!0:t.listenerCount(\"data\")>0&&t.resume()}function T6e(t){xt(\"readable nexttick read 0\"),t.read(0)}Kt.prototype.resume=function(){var t=this._readableState;return t.flowing||(xt(\"resume\"),t.flowing=!t.readableListening,O6e(this,t)),t.paused=!1,this};function O6e(t,e){e.resumeScheduled||(e.resumeScheduled=!0,process.nextTick(M6e,t,e))}function M6e(t,e){xt(\"resume\",e.reading),e.reading||t.read(0),e.resumeScheduled=!1,t.emit(\"resume\"),WT(t),e.flowing&&!e.reading&&t.read(0)}Kt.prototype.pause=function(){return xt(\"call pause flowing=%j\",this._readableState.flowing),this._readableState.flowing!==!1&&(xt(\"pause\"),this._readableState.flowing=!1,this.emit(\"pause\")),this._readableState.paused=!0,this};function WT(t){var e=t._readableState;for(xt(\"flow\",e.flowing);e.flowing&&t.read()!==null;);}Kt.prototype.wrap=function(t){var e=this,r=this._readableState,i=!1;t.on(\"end\",function(){if(xt(\"wrapped end\"),r.decoder&&!r.ended){var o=r.decoder.end();o&&o.length&&e.push(o)}e.push(null)}),t.on(\"data\",function(o){if(xt(\"wrapped data\"),r.decoder&&(o=r.decoder.write(o)),!(r.objectMode&&o==null)&&!(!r.objectMode&&(!o||!o.length))){var a=e.push(o);a||(i=!0,t.pause())}});for(var n in t)this[n]===void 0&&typeof t[n]==\"function\"&&(this[n]=function(a){return function(){return t[a].apply(t,arguments)}}(n));for(var s=0;s<GT.length;s++)t.on(GT[s],this.emit.bind(this,GT[s]));return this._read=function(o){xt(\"wrapped _read\",o),i&&(i=!1,t.resume())},this};typeof Symbol==\"function\"&&(Kt.prototype[Symbol.asyncIterator]=function(){return HT===void 0&&(HT=zce()),HT(this)});Object.defineProperty(Kt.prototype,\"readableHighWaterMark\",{enumerable:!1,get:function(){return this._readableState.highWaterMark}});Object.defineProperty(Kt.prototype,\"readableBuffer\",{enumerable:!1,get:function(){return this._readableState&&this._readableState.buffer}});Object.defineProperty(Kt.prototype,\"readableFlowing\",{enumerable:!1,get:function(){return this._readableState.flowing},set:function(e){this._readableState&&(this._readableState.flowing=e)}});Kt._fromList=sue;Object.defineProperty(Kt.prototype,\"readableLength\",{enumerable:!1,get:function(){return this._readableState.length}});function sue(t,e){if(e.length===0)return null;var r;return e.objectMode?r=e.buffer.shift():!t||t>=e.length?(e.decoder?r=e.buffer.join(\"\"):e.buffer.length===1?r=e.buffer.first():r=e.buffer.concat(e.length),e.buffer.clear()):r=e.buffer.consume(t,e.decoder),r}function JT(t){var e=t._readableState;xt(\"endReadable\",e.endEmitted),e.endEmitted||(e.ended=!0,process.nextTick(U6e,e,t))}function U6e(t,e){if(xt(\"endReadableNT\",t.endEmitted,t.length),!t.endEmitted&&t.length===0&&(t.endEmitted=!0,e.readable=!1,e.emit(\"end\"),t.autoDestroy)){var r=e._writableState;(!r||r.autoDestroy&&r.finished)&&e.destroy()}}typeof Symbol==\"function\"&&(Kt.from=function(t,e){return jT===void 0&&(jT=Zce()),jT(Kt,t,e)});function aue(t,e){for(var r=0,i=t.length;r<i;r++)if(t[r]===e)return r;return-1}});var zT=w((txt,lue)=>{\"use strict\";lue.exports=bA;var Db=Nl().codes,K6e=Db.ERR_METHOD_NOT_IMPLEMENTED,H6e=Db.ERR_MULTIPLE_CALLBACK,j6e=Db.ERR_TRANSFORM_ALREADY_TRANSFORMING,G6e=Db.ERR_TRANSFORM_WITH_LENGTH_0,Rb=vu();Ll()(bA,Rb);function Y6e(t,e){var r=this._transformState;r.transforming=!1;var i=r.writecb;if(i===null)return this.emit(\"error\",new H6e);r.writechunk=null,r.writecb=null,e!=null&&this.push(e),i(t);var n=this._readableState;n.reading=!1,(n.needReadable||n.length<n.highWaterMark)&&this._read(n.highWaterMark)}function bA(t){if(!(this instanceof bA))return new bA(t);Rb.call(this,t),this._transformState={afterTransform:Y6e.bind(this),needTransform:!1,transforming:!1,writecb:null,writechunk:null,writeencoding:null},this._readableState.needReadable=!0,this._readableState.sync=!1,t&&(typeof t.transform==\"function\"&&(this._transform=t.transform),typeof t.flush==\"function\"&&(this._flush=t.flush)),this.on(\"prefinish\",q6e)}function q6e(){var t=this;typeof this._flush==\"function\"&&!this._readableState.destroyed?this._flush(function(e,r){cue(t,e,r)}):cue(this,null,null)}bA.prototype.push=function(t,e){return this._transformState.needTransform=!1,Rb.prototype.push.call(this,t,e)};bA.prototype._transform=function(t,e,r){r(new K6e(\"_transform()\"))};bA.prototype._write=function(t,e,r){var i=this._transformState;if(i.writecb=r,i.writechunk=t,i.writeencoding=e,!i.transforming){var n=this._readableState;(i.needTransform||n.needReadable||n.length<n.highWaterMark)&&this._read(n.highWaterMark)}};bA.prototype._read=function(t){var e=this._transformState;e.writechunk!==null&&!e.transforming?(e.transforming=!0,this._transform(e.writechunk,e.writeencoding,e.afterTransform)):e.needTransform=!0};bA.prototype._destroy=function(t,e){Rb.prototype._destroy.call(this,t,function(r){e(r)})};function cue(t,e,r){if(e)return t.emit(\"error\",e);if(r!=null&&t.push(r),t._writableState.length)throw new G6e;if(t._transformState.transforming)throw new j6e;return t.push(null)}});var fue=w((rxt,uue)=>{\"use strict\";uue.exports=tE;var gue=zT();Ll()(tE,gue);function tE(t){if(!(this instanceof tE))return new tE(t);gue.call(this,t)}tE.prototype._transform=function(t,e,r){r(null,t)}});var mue=w((ixt,hue)=>{\"use strict\";var _T;function J6e(t){var e=!1;return function(){e||(e=!0,t.apply(void 0,arguments))}}var pue=Nl().codes,W6e=pue.ERR_MISSING_ARGS,z6e=pue.ERR_STREAM_DESTROYED;function due(t){if(t)throw t}function _6e(t){return t.setHeader&&typeof t.abort==\"function\"}function V6e(t,e,r,i){i=J6e(i);var n=!1;t.on(\"close\",function(){n=!0}),_T===void 0&&(_T=Qb()),_T(t,{readable:e,writable:r},function(o){if(o)return i(o);n=!0,i()});var s=!1;return function(o){if(!n&&!s){if(s=!0,_6e(t))return t.abort();if(typeof t.destroy==\"function\")return t.destroy();i(o||new z6e(\"pipe\"))}}}function Cue(t){t()}function X6e(t,e){return t.pipe(e)}function Z6e(t){return!t.length||typeof t[t.length-1]!=\"function\"?due:t.pop()}function $6e(){for(var t=arguments.length,e=new Array(t),r=0;r<t;r++)e[r]=arguments[r];var i=Z6e(e);if(Array.isArray(e[0])&&(e=e[0]),e.length<2)throw new W6e(\"streams\");var n,s=e.map(function(o,a){var l=a<e.length-1,c=a>0;return V6e(o,l,c,function(u){n||(n=u),u&&s.forEach(Cue),!l&&(s.forEach(Cue),i(n))})});return e.reduce(X6e)}hue.exports=$6e});var hh=w((zs,rE)=>{var iE=require(\"stream\");process.env.READABLE_STREAM===\"disable\"&&iE?(rE.exports=iE.Readable,Object.assign(rE.exports,iE),rE.exports.Stream=iE):(zs=rE.exports=DT(),zs.Stream=iE||zs,zs.Readable=zs,zs.Writable=PT(),zs.Duplex=vu(),zs.Transform=zT(),zs.PassThrough=fue(),zs.finished=Qb(),zs.pipeline=mue())});var yue=w((nxt,Eue)=>{\"use strict\";var{Buffer:Qo}=require(\"buffer\"),Iue=Symbol.for(\"BufferList\");function mr(t){if(!(this instanceof mr))return new mr(t);mr._init.call(this,t)}mr._init=function(e){Object.defineProperty(this,Iue,{value:!0}),this._bufs=[],this.length=0,e&&this.append(e)};mr.prototype._new=function(e){return new mr(e)};mr.prototype._offset=function(e){if(e===0)return[0,0];let r=0;for(let i=0;i<this._bufs.length;i++){let n=r+this._bufs[i].length;if(e<n||i===this._bufs.length-1)return[i,e-r];r=n}};mr.prototype._reverseOffset=function(t){let e=t[0],r=t[1];for(let i=0;i<e;i++)r+=this._bufs[i].length;return r};mr.prototype.get=function(e){if(e>this.length||e<0)return;let r=this._offset(e);return this._bufs[r[0]][r[1]]};mr.prototype.slice=function(e,r){return typeof e==\"number\"&&e<0&&(e+=this.length),typeof r==\"number\"&&r<0&&(r+=this.length),this.copy(null,0,e,r)};mr.prototype.copy=function(e,r,i,n){if((typeof i!=\"number\"||i<0)&&(i=0),(typeof n!=\"number\"||n>this.length)&&(n=this.length),i>=this.length||n<=0)return e||Qo.alloc(0);let s=!!e,o=this._offset(i),a=n-i,l=a,c=s&&r||0,u=o[1];if(i===0&&n===this.length){if(!s)return this._bufs.length===1?this._bufs[0]:Qo.concat(this._bufs,this.length);for(let g=0;g<this._bufs.length;g++)this._bufs[g].copy(e,c),c+=this._bufs[g].length;return e}if(l<=this._bufs[o[0]].length-u)return s?this._bufs[o[0]].copy(e,r,u,u+l):this._bufs[o[0]].slice(u,u+l);s||(e=Qo.allocUnsafe(a));for(let g=o[0];g<this._bufs.length;g++){let f=this._bufs[g].length-u;if(l>f)this._bufs[g].copy(e,c,u),c+=f;else{this._bufs[g].copy(e,c,u,u+l),c+=f;break}l-=f,u&&(u=0)}return e.length>c?e.slice(0,c):e};mr.prototype.shallowSlice=function(e,r){if(e=e||0,r=typeof r!=\"number\"?this.length:r,e<0&&(e+=this.length),r<0&&(r+=this.length),e===r)return this._new();let i=this._offset(e),n=this._offset(r),s=this._bufs.slice(i[0],n[0]+1);return n[1]===0?s.pop():s[s.length-1]=s[s.length-1].slice(0,n[1]),i[1]!==0&&(s[0]=s[0].slice(i[1])),this._new(s)};mr.prototype.toString=function(e,r,i){return this.slice(r,i).toString(e)};mr.prototype.consume=function(e){if(e=Math.trunc(e),Number.isNaN(e)||e<=0)return this;for(;this._bufs.length;)if(e>=this._bufs[0].length)e-=this._bufs[0].length,this.length-=this._bufs[0].length,this._bufs.shift();else{this._bufs[0]=this._bufs[0].slice(e),this.length-=e;break}return this};mr.prototype.duplicate=function(){let e=this._new();for(let r=0;r<this._bufs.length;r++)e.append(this._bufs[r]);return e};mr.prototype.append=function(e){if(e==null)return this;if(e.buffer)this._appendBuffer(Qo.from(e.buffer,e.byteOffset,e.byteLength));else if(Array.isArray(e))for(let r=0;r<e.length;r++)this.append(e[r]);else if(this._isBufferList(e))for(let r=0;r<e._bufs.length;r++)this.append(e._bufs[r]);else typeof e==\"number\"&&(e=e.toString()),this._appendBuffer(Qo.from(e));return this};mr.prototype._appendBuffer=function(e){this._bufs.push(e),this.length+=e.length};mr.prototype.indexOf=function(t,e,r){if(r===void 0&&typeof e==\"string\"&&(r=e,e=void 0),typeof t==\"function\"||Array.isArray(t))throw new TypeError('The \"value\" argument must be one of type string, Buffer, BufferList, or Uint8Array.');if(typeof t==\"number\"?t=Qo.from([t]):typeof t==\"string\"?t=Qo.from(t,r):this._isBufferList(t)?t=t.slice():Array.isArray(t.buffer)?t=Qo.from(t.buffer,t.byteOffset,t.byteLength):Qo.isBuffer(t)||(t=Qo.from(t)),e=Number(e||0),isNaN(e)&&(e=0),e<0&&(e=this.length+e),e<0&&(e=0),t.length===0)return e>this.length?this.length:e;let i=this._offset(e),n=i[0],s=i[1];for(;n<this._bufs.length;n++){let o=this._bufs[n];for(;s<o.length;)if(o.length-s>=t.length){let l=o.indexOf(t,s);if(l!==-1)return this._reverseOffset([n,l]);s=o.length-t.length+1}else{let l=this._reverseOffset([n,s]);if(this._match(l,t))return l;s++}s=0}return-1};mr.prototype._match=function(t,e){if(this.length-t<e.length)return!1;for(let r=0;r<e.length;r++)if(this.get(t+r)!==e[r])return!1;return!0};(function(){let t={readDoubleBE:8,readDoubleLE:8,readFloatBE:4,readFloatLE:4,readInt32BE:4,readInt32LE:4,readUInt32BE:4,readUInt32LE:4,readInt16BE:2,readInt16LE:2,readUInt16BE:2,readUInt16LE:2,readInt8:1,readUInt8:1,readIntBE:null,readIntLE:null,readUIntBE:null,readUIntLE:null};for(let e in t)(function(r){t[r]===null?mr.prototype[r]=function(i,n){return this.slice(i,i+n)[r](0,n)}:mr.prototype[r]=function(i=0){return this.slice(i,i+t[r])[r](0)}})(e)})();mr.prototype._isBufferList=function(e){return e instanceof mr||mr.isBufferList(e)};mr.isBufferList=function(e){return e!=null&&e[Iue]};Eue.exports=mr});var wue=w((sxt,Fb)=>{\"use strict\";var VT=hh().Duplex,eVe=Ll(),nE=yue();function Zi(t){if(!(this instanceof Zi))return new Zi(t);if(typeof t==\"function\"){this._callback=t;let e=function(i){this._callback&&(this._callback(i),this._callback=null)}.bind(this);this.on(\"pipe\",function(i){i.on(\"error\",e)}),this.on(\"unpipe\",function(i){i.removeListener(\"error\",e)}),t=null}nE._init.call(this,t),VT.call(this)}eVe(Zi,VT);Object.assign(Zi.prototype,nE.prototype);Zi.prototype._new=function(e){return new Zi(e)};Zi.prototype._write=function(e,r,i){this._appendBuffer(e),typeof i==\"function\"&&i()};Zi.prototype._read=function(e){if(!this.length)return this.push(null);e=Math.min(e,this.length),this.push(this.slice(0,e)),this.consume(e)};Zi.prototype.end=function(e){VT.prototype.end.call(this,e),this._callback&&(this._callback(null,this.slice()),this._callback=null)};Zi.prototype._destroy=function(e,r){this._bufs.length=0,this.length=0,r(e)};Zi.prototype._isBufferList=function(e){return e instanceof Zi||e instanceof nE||Zi.isBufferList(e)};Zi.isBufferList=nE.isBufferList;Fb.exports=Zi;Fb.exports.BufferListStream=Zi;Fb.exports.BufferList=nE});var $T=w(ph=>{var tVe=Buffer.alloc,rVe=\"0000000000000000000\",iVe=\"7777777777777777777\",Bue=\"0\".charCodeAt(0),bue=Buffer.from(\"ustar\\0\",\"binary\"),nVe=Buffer.from(\"00\",\"binary\"),sVe=Buffer.from(\"ustar \",\"binary\"),oVe=Buffer.from(\" \\0\",\"binary\"),aVe=parseInt(\"7777\",8),sE=257,XT=263,AVe=function(t,e,r){return typeof t!=\"number\"?r:(t=~~t,t>=e?e:t>=0||(t+=e,t>=0)?t:0)},lVe=function(t){switch(t){case 0:return\"file\";case 1:return\"link\";case 2:return\"symlink\";case 3:return\"character-device\";case 4:return\"block-device\";case 5:return\"directory\";case 6:return\"fifo\";case 7:return\"contiguous-file\";case 72:return\"pax-header\";case 55:return\"pax-global-header\";case 27:return\"gnu-long-link-path\";case 28:case 30:return\"gnu-long-path\"}return null},cVe=function(t){switch(t){case\"file\":return 0;case\"link\":return 1;case\"symlink\":return 2;case\"character-device\":return 3;case\"block-device\":return 4;case\"directory\":return 5;case\"fifo\":return 6;case\"contiguous-file\":return 7;case\"pax-header\":return 72}return 0},Que=function(t,e,r,i){for(;r<i;r++)if(t[r]===e)return r;return i},vue=function(t){for(var e=8*32,r=0;r<148;r++)e+=t[r];for(var i=156;i<512;i++)e+=t[i];return e},Kl=function(t,e){return t=t.toString(8),t.length>e?iVe.slice(0,e)+\" \":rVe.slice(0,e-t.length)+t+\" \"};function uVe(t){var e;if(t[0]===128)e=!0;else if(t[0]===255)e=!1;else return null;for(var r=[],i=t.length-1;i>0;i--){var n=t[i];e?r.push(n):r.push(255-n)}var s=0,o=r.length;for(i=0;i<o;i++)s+=r[i]*Math.pow(256,i);return e?s:-1*s}var Hl=function(t,e,r){if(t=t.slice(e,e+r),e=0,t[e]&128)return uVe(t);for(;e<t.length&&t[e]===32;)e++;for(var i=AVe(Que(t,32,e,t.length),t.length,t.length);e<i&&t[e]===0;)e++;return i===e?0:parseInt(t.slice(e,i).toString(),8)},dh=function(t,e,r,i){return t.slice(e,Que(t,0,e,e+r)).toString(i)},ZT=function(t){var e=Buffer.byteLength(t),r=Math.floor(Math.log(e)/Math.log(10))+1;return e+r>=Math.pow(10,r)&&r++,e+r+t};ph.decodeLongPath=function(t,e){return dh(t,0,t.length,e)};ph.encodePax=function(t){var e=\"\";t.name&&(e+=ZT(\" path=\"+t.name+`\n`)),t.linkname&&(e+=ZT(\" linkpath=\"+t.linkname+`\n`));var r=t.pax;if(r)for(var i in r)e+=ZT(\" \"+i+\"=\"+r[i]+`\n`);return Buffer.from(e)};ph.decodePax=function(t){for(var e={};t.length;){for(var r=0;r<t.length&&t[r]!==32;)r++;var i=parseInt(t.slice(0,r).toString(),10);if(!i)return e;var n=t.slice(r+1,i-1).toString(),s=n.indexOf(\"=\");if(s===-1)return e;e[n.slice(0,s)]=n.slice(s+1),t=t.slice(i)}return e};ph.encode=function(t){var e=tVe(512),r=t.name,i=\"\";if(t.typeflag===5&&r[r.length-1]!==\"/\"&&(r+=\"/\"),Buffer.byteLength(r)!==r.length)return null;for(;Buffer.byteLength(r)>100;){var n=r.indexOf(\"/\");if(n===-1)return null;i+=i?\"/\"+r.slice(0,n):r.slice(0,n),r=r.slice(n+1)}return Buffer.byteLength(r)>100||Buffer.byteLength(i)>155||t.linkname&&Buffer.byteLength(t.linkname)>100?null:(e.write(r),e.write(Kl(t.mode&aVe,6),100),e.write(Kl(t.uid,6),108),e.write(Kl(t.gid,6),116),e.write(Kl(t.size,11),124),e.write(Kl(t.mtime.getTime()/1e3|0,11),136),e[156]=Bue+cVe(t.type),t.linkname&&e.write(t.linkname,157),bue.copy(e,sE),nVe.copy(e,XT),t.uname&&e.write(t.uname,265),t.gname&&e.write(t.gname,297),e.write(Kl(t.devmajor||0,6),329),e.write(Kl(t.devminor||0,6),337),i&&e.write(i,345),e.write(Kl(vue(e),6),148),e)};ph.decode=function(t,e,r){var i=t[156]===0?0:t[156]-Bue,n=dh(t,0,100,e),s=Hl(t,100,8),o=Hl(t,108,8),a=Hl(t,116,8),l=Hl(t,124,12),c=Hl(t,136,12),u=lVe(i),g=t[157]===0?null:dh(t,157,100,e),f=dh(t,265,32),h=dh(t,297,32),p=Hl(t,329,8),m=Hl(t,337,8),y=vue(t);if(y===8*32)return null;if(y!==Hl(t,148,8))throw new Error(\"Invalid tar header. Maybe the tar is corrupted or it needs to be gunzipped?\");if(bue.compare(t,sE,sE+6)===0)t[345]&&(n=dh(t,345,155,e)+\"/\"+n);else if(!(sVe.compare(t,sE,sE+6)===0&&oVe.compare(t,XT,XT+2)===0)){if(!r)throw new Error(\"Invalid tar header: unknown format.\")}return i===0&&n&&n[n.length-1]===\"/\"&&(i=5),{name:n,mode:s,uid:o,gid:a,size:l,mtime:new Date(1e3*c),type:u,linkname:g,uname:f,gname:h,devmajor:p,devminor:m}}});var Fue=w((axt,Sue)=>{var kue=require(\"util\"),gVe=wue(),oE=$T(),xue=hh().Writable,Pue=hh().PassThrough,Due=function(){},Rue=function(t){return t&=511,t&&512-t},fVe=function(t,e){var r=new Nb(t,e);return r.end(),r},hVe=function(t,e){return e.path&&(t.name=e.path),e.linkpath&&(t.linkname=e.linkpath),e.size&&(t.size=parseInt(e.size,10)),t.pax=e,t},Nb=function(t,e){this._parent=t,this.offset=e,Pue.call(this,{autoDestroy:!1})};kue.inherits(Nb,Pue);Nb.prototype.destroy=function(t){this._parent.destroy(t)};var QA=function(t){if(!(this instanceof QA))return new QA(t);xue.call(this,t),t=t||{},this._offset=0,this._buffer=gVe(),this._missing=0,this._partial=!1,this._onparse=Due,this._header=null,this._stream=null,this._overflow=null,this._cb=null,this._locked=!1,this._destroyed=!1,this._pax=null,this._paxGlobal=null,this._gnuLongPath=null,this._gnuLongLinkPath=null;var e=this,r=e._buffer,i=function(){e._continue()},n=function(f){if(e._locked=!1,f)return e.destroy(f);e._stream||i()},s=function(){e._stream=null;var f=Rue(e._header.size);f?e._parse(f,o):e._parse(512,g),e._locked||i()},o=function(){e._buffer.consume(Rue(e._header.size)),e._parse(512,g),i()},a=function(){var f=e._header.size;e._paxGlobal=oE.decodePax(r.slice(0,f)),r.consume(f),s()},l=function(){var f=e._header.size;e._pax=oE.decodePax(r.slice(0,f)),e._paxGlobal&&(e._pax=Object.assign({},e._paxGlobal,e._pax)),r.consume(f),s()},c=function(){var f=e._header.size;this._gnuLongPath=oE.decodeLongPath(r.slice(0,f),t.filenameEncoding),r.consume(f),s()},u=function(){var f=e._header.size;this._gnuLongLinkPath=oE.decodeLongPath(r.slice(0,f),t.filenameEncoding),r.consume(f),s()},g=function(){var f=e._offset,h;try{h=e._header=oE.decode(r.slice(0,512),t.filenameEncoding,t.allowUnknownFormat)}catch(p){e.emit(\"error\",p)}if(r.consume(512),!h){e._parse(512,g),i();return}if(h.type===\"gnu-long-path\"){e._parse(h.size,c),i();return}if(h.type===\"gnu-long-link-path\"){e._parse(h.size,u),i();return}if(h.type===\"pax-global-header\"){e._parse(h.size,a),i();return}if(h.type===\"pax-header\"){e._parse(h.size,l),i();return}if(e._gnuLongPath&&(h.name=e._gnuLongPath,e._gnuLongPath=null),e._gnuLongLinkPath&&(h.linkname=e._gnuLongLinkPath,e._gnuLongLinkPath=null),e._pax&&(e._header=h=hVe(h,e._pax),e._pax=null),e._locked=!0,!h.size||h.type===\"directory\"){e._parse(512,g),e.emit(\"entry\",h,fVe(e,f),n);return}e._stream=new Nb(e,f),e.emit(\"entry\",h,e._stream,n),e._parse(h.size,s),i()};this._onheader=g,this._parse(512,g)};kue.inherits(QA,xue);QA.prototype.destroy=function(t){this._destroyed||(this._destroyed=!0,t&&this.emit(\"error\",t),this.emit(\"close\"),this._stream&&this._stream.emit(\"close\"))};QA.prototype._parse=function(t,e){this._destroyed||(this._offset+=t,this._missing=t,e===this._onheader&&(this._partial=!1),this._onparse=e)};QA.prototype._continue=function(){if(!this._destroyed){var t=this._cb;this._cb=Due,this._overflow?this._write(this._overflow,void 0,t):t()}};QA.prototype._write=function(t,e,r){if(!this._destroyed){var i=this._stream,n=this._buffer,s=this._missing;if(t.length&&(this._partial=!0),t.length<s)return this._missing-=t.length,this._overflow=null,i?i.write(t,r):(n.append(t),r());this._cb=r,this._missing=0;var o=null;t.length>s&&(o=t.slice(s),t=t.slice(0,s)),i?i.end(t):n.append(t),this._overflow=o,this._onparse()}};QA.prototype._final=function(t){if(this._partial)return this.destroy(new Error(\"Unexpected end of data\"));t()};Sue.exports=QA});var Lue=w((Axt,Nue)=>{Nue.exports=require(\"fs\").constants||require(\"constants\")});var Kue=w((lxt,Tue)=>{var Ch=Lue(),Oue=Wx(),Lb=Ll(),pVe=Buffer.alloc,Mue=hh().Readable,mh=hh().Writable,dVe=require(\"string_decoder\").StringDecoder,Tb=$T(),CVe=parseInt(\"755\",8),mVe=parseInt(\"644\",8),Uue=pVe(1024),eO=function(){},tO=function(t,e){e&=511,e&&t.push(Uue.slice(0,512-e))};function EVe(t){switch(t&Ch.S_IFMT){case Ch.S_IFBLK:return\"block-device\";case Ch.S_IFCHR:return\"character-device\";case Ch.S_IFDIR:return\"directory\";case Ch.S_IFIFO:return\"fifo\";case Ch.S_IFLNK:return\"symlink\"}return\"file\"}var Ob=function(t){mh.call(this),this.written=0,this._to=t,this._destroyed=!1};Lb(Ob,mh);Ob.prototype._write=function(t,e,r){if(this.written+=t.length,this._to.push(t))return r();this._to._drain=r};Ob.prototype.destroy=function(){this._destroyed||(this._destroyed=!0,this.emit(\"close\"))};var Mb=function(){mh.call(this),this.linkname=\"\",this._decoder=new dVe(\"utf-8\"),this._destroyed=!1};Lb(Mb,mh);Mb.prototype._write=function(t,e,r){this.linkname+=this._decoder.write(t),r()};Mb.prototype.destroy=function(){this._destroyed||(this._destroyed=!0,this.emit(\"close\"))};var aE=function(){mh.call(this),this._destroyed=!1};Lb(aE,mh);aE.prototype._write=function(t,e,r){r(new Error(\"No body allowed for this entry\"))};aE.prototype.destroy=function(){this._destroyed||(this._destroyed=!0,this.emit(\"close\"))};var da=function(t){if(!(this instanceof da))return new da(t);Mue.call(this,t),this._drain=eO,this._finalized=!1,this._finalizing=!1,this._destroyed=!1,this._stream=null};Lb(da,Mue);da.prototype.entry=function(t,e,r){if(this._stream)throw new Error(\"already piping an entry\");if(!(this._finalized||this._destroyed)){typeof e==\"function\"&&(r=e,e=null),r||(r=eO);var i=this;if((!t.size||t.type===\"symlink\")&&(t.size=0),t.type||(t.type=EVe(t.mode)),t.mode||(t.mode=t.type===\"directory\"?CVe:mVe),t.uid||(t.uid=0),t.gid||(t.gid=0),t.mtime||(t.mtime=new Date),typeof e==\"string\"&&(e=Buffer.from(e)),Buffer.isBuffer(e)){t.size=e.length,this._encode(t);var n=this.push(e);return tO(i,t.size),n?process.nextTick(r):this._drain=r,new aE}if(t.type===\"symlink\"&&!t.linkname){var s=new Mb;return Oue(s,function(a){if(a)return i.destroy(),r(a);t.linkname=s.linkname,i._encode(t),r()}),s}if(this._encode(t),t.type!==\"file\"&&t.type!==\"contiguous-file\")return process.nextTick(r),new aE;var o=new Ob(this);return this._stream=o,Oue(o,function(a){if(i._stream=null,a)return i.destroy(),r(a);if(o.written!==t.size)return i.destroy(),r(new Error(\"size mismatch\"));tO(i,t.size),i._finalizing&&i.finalize(),r()}),o}};da.prototype.finalize=function(){if(this._stream){this._finalizing=!0;return}this._finalized||(this._finalized=!0,this.push(Uue),this.push(null))};da.prototype.destroy=function(t){this._destroyed||(this._destroyed=!0,t&&this.emit(\"error\",t),this.emit(\"close\"),this._stream&&this._stream.destroy&&this._stream.destroy())};da.prototype._encode=function(t){if(!t.pax){var e=Tb.encode(t);if(e){this.push(e);return}}this._encodePax(t)};da.prototype._encodePax=function(t){var e=Tb.encodePax({name:t.name,linkname:t.linkname,pax:t.pax}),r={name:\"PaxHeader\",mode:t.mode,uid:t.uid,gid:t.gid,size:e.length,mtime:t.mtime,type:\"pax-header\",linkname:t.linkname&&\"PaxHeader\",uname:t.uname,gname:t.gname,devmajor:t.devmajor,devminor:t.devminor};this.push(Tb.encode(r)),this.push(e),tO(this,e.length),r.size=t.size,r.type=t.type,this.push(Tb.encode(r))};da.prototype._read=function(t){var e=this._drain;this._drain=eO,e()};Tue.exports=da});var Hue=w(rO=>{rO.extract=Fue();rO.pack=Kue()});var ege=w((Rxt,Vue)=>{\"use strict\";var Eh=class{constructor(e,r,i){this.__specs=e||{},Object.keys(this.__specs).forEach(n=>{if(typeof this.__specs[n]==\"string\"){let s=this.__specs[n],o=this.__specs[s];if(o){let a=o.aliases||[];a.push(n,s),o.aliases=[...new Set(a)],this.__specs[n]=o}else throw new Error(`Alias refers to invalid key: ${s} -> ${n}`)}}),this.__opts=r||{},this.__providers=Zue(i.filter(n=>n!=null&&typeof n==\"object\")),this.__isFiggyPudding=!0}get(e){return AO(this,e,!0)}get[Symbol.toStringTag](){return\"FiggyPudding\"}forEach(e,r=this){for(let[i,n]of this.entries())e.call(r,n,i,this)}toJSON(){let e={};return this.forEach((r,i)=>{e[i]=r}),e}*entries(e){for(let i of Object.keys(this.__specs))yield[i,this.get(i)];let r=e||this.__opts.other;if(r){let i=new Set;for(let n of this.__providers){let s=n.entries?n.entries(r):RVe(n);for(let[o,a]of s)r(o)&&!i.has(o)&&(i.add(o),yield[o,a])}}}*[Symbol.iterator](){for(let[e,r]of this.entries())yield[e,r]}*keys(){for(let[e]of this.entries())yield e}*values(){for(let[,e]of this.entries())yield e}concat(...e){return new Proxy(new Eh(this.__specs,this.__opts,Zue(this.__providers).concat(e)),Xue)}};try{let t=require(\"util\");Eh.prototype[t.inspect.custom]=function(e,r){return this[Symbol.toStringTag]+\" \"+t.inspect(this.toJSON(),r)}}catch(t){}function FVe(t){throw Object.assign(new Error(`invalid config key requested: ${t}`),{code:\"EBADKEY\"})}function AO(t,e,r){let i=t.__specs[e];if(r&&!i&&(!t.__opts.other||!t.__opts.other(e)))FVe(e);else{i||(i={});let n;for(let s of t.__providers){if(n=$ue(e,s),n===void 0&&i.aliases&&i.aliases.length){for(let o of i.aliases)if(o!==e&&(n=$ue(o,s),n!==void 0))break}if(n!==void 0)break}return n===void 0&&i.default!==void 0?typeof i.default==\"function\"?i.default(t):i.default:n}}function $ue(t,e){let r;return e.__isFiggyPudding?r=AO(e,t,!1):typeof e.get==\"function\"?r=e.get(t):r=e[t],r}var Xue={has(t,e){return e in t.__specs&&AO(t,e,!1)!==void 0},ownKeys(t){return Object.keys(t.__specs)},get(t,e){return typeof e==\"symbol\"||e.slice(0,2)===\"__\"||e in Eh.prototype?t[e]:t.get(e)},set(t,e,r){if(typeof e==\"symbol\"||e.slice(0,2)===\"__\")return t[e]=r,!0;throw new Error(\"figgyPudding options cannot be modified. Use .concat() instead.\")},deleteProperty(){throw new Error(\"figgyPudding options cannot be deleted. Use .concat() and shadow them instead.\")}};Vue.exports=NVe;function NVe(t,e){function r(...i){return new Proxy(new Eh(t,e,i),Xue)}return r}function Zue(t){let e=[];return t.forEach(r=>e.unshift(r)),e}function RVe(t){return Object.keys(t).map(e=>[e,t[e]])}});var ige=w((Fxt,Ca)=>{\"use strict\";var lE=require(\"crypto\"),LVe=ege(),TVe=require(\"stream\").Transform,tge=[\"sha256\",\"sha384\",\"sha512\"],OVe=/^[a-z0-9+/]+(?:=?=?)$/i,MVe=/^([^-]+)-([^?]+)([?\\S*]*)$/,UVe=/^([^-]+)-([A-Za-z0-9+/=]{44,88})(\\?[\\x21-\\x7E]*)*$/,KVe=/^[\\x21-\\x7E]+$/,Cn=LVe({algorithms:{default:[\"sha512\"]},error:{default:!1},integrity:{},options:{default:[]},pickAlgorithm:{default:()=>HVe},Promise:{default:()=>Promise},sep:{default:\" \"},single:{default:!1},size:{},strict:{default:!1}}),Pu=class{get isHash(){return!0}constructor(e,r){r=Cn(r);let i=!!r.strict;this.source=e.trim();let n=this.source.match(i?UVe:MVe);if(!n||i&&!tge.some(o=>o===n[1]))return;this.algorithm=n[1],this.digest=n[2];let s=n[3];this.options=s?s.slice(1).split(\"?\"):[]}hexDigest(){return this.digest&&Buffer.from(this.digest,\"base64\").toString(\"hex\")}toJSON(){return this.toString()}toString(e){if(e=Cn(e),e.strict&&!(tge.some(i=>i===this.algorithm)&&this.digest.match(OVe)&&(this.options||[]).every(i=>i.match(KVe))))return\"\";let r=this.options&&this.options.length?`?${this.options.join(\"?\")}`:\"\";return`${this.algorithm}-${this.digest}${r}`}},Ih=class{get isIntegrity(){return!0}toJSON(){return this.toString()}toString(e){e=Cn(e);let r=e.sep||\" \";return e.strict&&(r=r.replace(/\\S+/g,\" \")),Object.keys(this).map(i=>this[i].map(n=>Pu.prototype.toString.call(n,e)).filter(n=>n.length).join(r)).filter(i=>i.length).join(r)}concat(e,r){r=Cn(r);let i=typeof e==\"string\"?e:cE(e,r);return ma(`${this.toString(r)} ${i}`,r)}hexDigest(){return ma(this,{single:!0}).hexDigest()}match(e,r){r=Cn(r);let i=ma(e,r),n=i.pickAlgorithm(r);return this[n]&&i[n]&&this[n].find(s=>i[n].find(o=>s.digest===o.digest))||!1}pickAlgorithm(e){e=Cn(e);let r=e.pickAlgorithm,i=Object.keys(this);if(!i.length)throw new Error(`No algorithms available for ${JSON.stringify(this.toString())}`);return i.reduce((n,s)=>r(n,s)||n)}};Ca.exports.parse=ma;function ma(t,e){if(e=Cn(e),typeof t==\"string\")return lO(t,e);if(t.algorithm&&t.digest){let r=new Ih;return r[t.algorithm]=[t],lO(cE(r,e),e)}else return lO(cE(t,e),e)}function lO(t,e){return e.single?new Pu(t,e):t.trim().split(/\\s+/).reduce((r,i)=>{let n=new Pu(i,e);if(n.algorithm&&n.digest){let s=n.algorithm;r[s]||(r[s]=[]),r[s].push(n)}return r},new Ih)}Ca.exports.stringify=cE;function cE(t,e){return e=Cn(e),t.algorithm&&t.digest?Pu.prototype.toString.call(t,e):typeof t==\"string\"?cE(ma(t,e),e):Ih.prototype.toString.call(t,e)}Ca.exports.fromHex=jVe;function jVe(t,e,r){r=Cn(r);let i=r.options&&r.options.length?`?${r.options.join(\"?\")}`:\"\";return ma(`${e}-${Buffer.from(t,\"hex\").toString(\"base64\")}${i}`,r)}Ca.exports.fromData=GVe;function GVe(t,e){e=Cn(e);let r=e.algorithms,i=e.options&&e.options.length?`?${e.options.join(\"?\")}`:\"\";return r.reduce((n,s)=>{let o=lE.createHash(s).update(t).digest(\"base64\"),a=new Pu(`${s}-${o}${i}`,e);if(a.algorithm&&a.digest){let l=a.algorithm;n[l]||(n[l]=[]),n[l].push(a)}return n},new Ih)}Ca.exports.fromStream=YVe;function YVe(t,e){e=Cn(e);let r=e.Promise||Promise,i=cO(e);return new r((n,s)=>{t.pipe(i),t.on(\"error\",s),i.on(\"error\",s);let o;i.on(\"integrity\",a=>{o=a}),i.on(\"end\",()=>n(o)),i.on(\"data\",()=>{})})}Ca.exports.checkData=qVe;function qVe(t,e,r){if(r=Cn(r),e=ma(e,r),!Object.keys(e).length){if(r.error)throw Object.assign(new Error(\"No valid integrity hashes to check against\"),{code:\"EINTEGRITY\"});return!1}let i=e.pickAlgorithm(r),n=lE.createHash(i).update(t).digest(\"base64\"),s=ma({algorithm:i,digest:n}),o=s.match(e,r);if(o||!r.error)return o;if(typeof r.size==\"number\"&&t.length!==r.size){let a=new Error(`data size mismatch when checking ${e}.\n  Wanted: ${r.size}\n  Found: ${t.length}`);throw a.code=\"EBADSIZE\",a.found=t.length,a.expected=r.size,a.sri=e,a}else{let a=new Error(`Integrity checksum failed when using ${i}: Wanted ${e}, but got ${s}. (${t.length} bytes)`);throw a.code=\"EINTEGRITY\",a.found=s,a.expected=e,a.algorithm=i,a.sri=e,a}}Ca.exports.checkStream=JVe;function JVe(t,e,r){r=Cn(r);let i=r.Promise||Promise,n=cO(r.concat({integrity:e}));return new i((s,o)=>{t.pipe(n),t.on(\"error\",o),n.on(\"error\",o);let a;n.on(\"verified\",l=>{a=l}),n.on(\"end\",()=>s(a)),n.on(\"data\",()=>{})})}Ca.exports.integrityStream=cO;function cO(t){t=Cn(t);let e=t.integrity&&ma(t.integrity,t),r=e&&Object.keys(e).length,i=r&&e.pickAlgorithm(t),n=r&&e[i],s=Array.from(new Set(t.algorithms.concat(i?[i]:[]))),o=s.map(lE.createHash),a=0,l=new TVe({transform(c,u,g){a+=c.length,o.forEach(f=>f.update(c,u)),g(null,c,u)}}).on(\"end\",()=>{let c=t.options&&t.options.length?`?${t.options.join(\"?\")}`:\"\",u=ma(o.map((f,h)=>`${s[h]}-${f.digest(\"base64\")}${c}`).join(\" \"),t),g=r&&u.match(e,t);if(typeof t.size==\"number\"&&a!==t.size){let f=new Error(`stream size mismatch when checking ${e}.\n  Wanted: ${t.size}\n  Found: ${a}`);f.code=\"EBADSIZE\",f.found=a,f.expected=t.size,f.sri=e,l.emit(\"error\",f)}else if(t.integrity&&!g){let f=new Error(`${e} integrity checksum failed when using ${i}: wanted ${n} but got ${u}. (${a} bytes)`);f.code=\"EINTEGRITY\",f.found=u,f.expected=n,f.algorithm=i,f.sri=e,l.emit(\"error\",f)}else l.emit(\"size\",a),l.emit(\"integrity\",u),g&&l.emit(\"verified\",g)});return l}Ca.exports.create=WVe;function WVe(t){t=Cn(t);let e=t.algorithms,r=t.options.length?`?${t.options.join(\"?\")}`:\"\",i=e.map(lE.createHash);return{update:function(n,s){return i.forEach(o=>o.update(n,s)),this},digest:function(n){return e.reduce((o,a)=>{let l=i.shift().digest(\"base64\"),c=new Pu(`${a}-${l}${r}`,t);if(c.algorithm&&c.digest){let u=c.algorithm;o[u]||(o[u]=[]),o[u].push(c)}return o},new Ih)}}}var zVe=new Set(lE.getHashes()),rge=[\"md5\",\"whirlpool\",\"sha1\",\"sha224\",\"sha256\",\"sha384\",\"sha512\",\"sha3\",\"sha3-256\",\"sha3-384\",\"sha3-512\",\"sha3_256\",\"sha3_384\",\"sha3_512\"].filter(t=>zVe.has(t));function HVe(t,e){return rge.indexOf(t.toLowerCase())>=rge.indexOf(e.toLowerCase())?t:e}});var QC={};ft(QC,{BuildType:()=>As,Cache:()=>Nt,Configuration:()=>ye,DEFAULT_LOCK_FILENAME:()=>wx,DEFAULT_RC_FILENAME:()=>yx,FormatType:()=>Di,InstallMode:()=>di,LightReport:()=>uA,LinkType:()=>Qt,Manifest:()=>At,MessageName:()=>$,MultiFetcher:()=>yd,PackageExtensionStatus:()=>qi,PackageExtensionType:()=>yi,Project:()=>ze,ProjectLookup:()=>ol,Report:()=>Ji,ReportError:()=>ct,SettingsType:()=>Ie,StreamReport:()=>Je,TAG_REGEXP:()=>Gg,TelemetryManager:()=>bC,ThrowReport:()=>pi,VirtualFetcher:()=>Bd,Workspace:()=>BC,WorkspaceFetcher:()=>bd,WorkspaceResolver:()=>si,YarnVersion:()=>Ur,execUtils:()=>Fr,folderUtils:()=>hx,formatUtils:()=>ae,hashUtils:()=>Dn,httpUtils:()=>ir,miscUtils:()=>Se,nodeUtils:()=>qg,parseMessageName:()=>BI,scriptUtils:()=>Zt,semverUtils:()=>Wt,stringifyMessageName:()=>YA,structUtils:()=>P,tgzUtils:()=>wi,treeUtils:()=>as});var Fr={};ft(Fr,{EndStrategy:()=>is,ExecError:()=>xx,PipeError:()=>Qw,execvp:()=>Eke,pipevp:()=>$o});var Zh={};ft(Zh,{AliasFS:()=>Pa,CwdFS:()=>_t,DEFAULT_COMPRESSION_LEVEL:()=>ic,FakeFS:()=>KA,Filename:()=>Pt,JailFS:()=>Da,LazyFS:()=>zh,LinkStrategy:()=>jh,NoFS:()=>zE,NodeFS:()=>ar,PortablePath:()=>Me,PosixFS:()=>_h,ProxiedFS:()=>bi,VirtualFS:()=>Jr,ZipFS:()=>Ai,ZipOpenFS:()=>ms,constants:()=>Dr,extendFs:()=>VE,normalizeLineEndings:()=>$l,npath:()=>j,opendir:()=>qE,patchFs:()=>bQ,ppath:()=>k,statUtils:()=>uQ,toFilename:()=>qr,xfs:()=>K});var Dr={};ft(Dr,{SAFE_TIME:()=>cQ,S_IFDIR:()=>Sa,S_IFLNK:()=>xa,S_IFMT:()=>zn,S_IFREG:()=>ka});var zn=61440,Sa=16384,ka=32768,xa=40960,cQ=456789e3;var uQ={};ft(uQ,{BigIntStatsEntry:()=>Uh,DEFAULT_MODE:()=>Mh,DirEntry:()=>sM,StatEntry:()=>MA,areStatsEqual:()=>fQ,clearStats:()=>ME,convertToBigIntStats:()=>UE,makeDefaultStats:()=>Kh,makeEmptyStats:()=>dfe});var gQ=ge(require(\"util\"));var Mh=ka|420,sM=class{constructor(){this.name=\"\";this.mode=0}isBlockDevice(){return!1}isCharacterDevice(){return!1}isDirectory(){return(this.mode&zn)===Sa}isFIFO(){return!1}isFile(){return(this.mode&zn)===ka}isSocket(){return!1}isSymbolicLink(){return(this.mode&zn)===xa}},MA=class{constructor(){this.uid=0;this.gid=0;this.size=0;this.blksize=0;this.atimeMs=0;this.mtimeMs=0;this.ctimeMs=0;this.birthtimeMs=0;this.atime=new Date(0);this.mtime=new Date(0);this.ctime=new Date(0);this.birthtime=new Date(0);this.dev=0;this.ino=0;this.mode=Mh;this.nlink=1;this.rdev=0;this.blocks=1}isBlockDevice(){return!1}isCharacterDevice(){return!1}isDirectory(){return(this.mode&zn)===Sa}isFIFO(){return!1}isFile(){return(this.mode&zn)===ka}isSocket(){return!1}isSymbolicLink(){return(this.mode&zn)===xa}},Uh=class{constructor(){this.uid=BigInt(0);this.gid=BigInt(0);this.size=BigInt(0);this.blksize=BigInt(0);this.atimeMs=BigInt(0);this.mtimeMs=BigInt(0);this.ctimeMs=BigInt(0);this.birthtimeMs=BigInt(0);this.atimeNs=BigInt(0);this.mtimeNs=BigInt(0);this.ctimeNs=BigInt(0);this.birthtimeNs=BigInt(0);this.atime=new Date(0);this.mtime=new Date(0);this.ctime=new Date(0);this.birthtime=new Date(0);this.dev=BigInt(0);this.ino=BigInt(0);this.mode=BigInt(Mh);this.nlink=BigInt(1);this.rdev=BigInt(0);this.blocks=BigInt(1)}isBlockDevice(){return!1}isCharacterDevice(){return!1}isDirectory(){return(this.mode&BigInt(zn))===BigInt(Sa)}isFIFO(){return!1}isFile(){return(this.mode&BigInt(zn))===BigInt(ka)}isSocket(){return!1}isSymbolicLink(){return(this.mode&BigInt(zn))===BigInt(xa)}};function Kh(){return new MA}function dfe(){return ME(Kh())}function ME(t){for(let e in t)if(Object.prototype.hasOwnProperty.call(t,e)){let r=t[e];typeof r==\"number\"?t[e]=0:typeof r==\"bigint\"?t[e]=BigInt(0):gQ.types.isDate(r)&&(t[e]=new Date(0))}return t}function UE(t){let e=new Uh;for(let r in t)if(Object.prototype.hasOwnProperty.call(t,r)){let i=t[r];typeof i==\"number\"?e[r]=BigInt(i):gQ.types.isDate(i)&&(e[r]=new Date(i))}return e.atimeNs=e.atimeMs*BigInt(1e6),e.mtimeNs=e.mtimeMs*BigInt(1e6),e.ctimeNs=e.ctimeMs*BigInt(1e6),e.birthtimeNs=e.birthtimeMs*BigInt(1e6),e}function fQ(t,e){if(t.atimeMs!==e.atimeMs||t.birthtimeMs!==e.birthtimeMs||t.blksize!==e.blksize||t.blocks!==e.blocks||t.ctimeMs!==e.ctimeMs||t.dev!==e.dev||t.gid!==e.gid||t.ino!==e.ino||t.isBlockDevice()!==e.isBlockDevice()||t.isCharacterDevice()!==e.isCharacterDevice()||t.isDirectory()!==e.isDirectory()||t.isFIFO()!==e.isFIFO()||t.isFile()!==e.isFile()||t.isSocket()!==e.isSocket()||t.isSymbolicLink()!==e.isSymbolicLink()||t.mode!==e.mode||t.mtimeMs!==e.mtimeMs||t.nlink!==e.nlink||t.rdev!==e.rdev||t.size!==e.size||t.uid!==e.uid)return!1;let r=t,i=e;return!(r.atimeNs!==i.atimeNs||r.mtimeNs!==i.mtimeNs||r.ctimeNs!==i.ctimeNs||r.birthtimeNs!==i.birthtimeNs)}var HE=ge(require(\"fs\"));var Hh=ge(require(\"path\")),oM;(function(i){i[i.File=0]=\"File\",i[i.Portable=1]=\"Portable\",i[i.Native=2]=\"Native\"})(oM||(oM={}));var Me={root:\"/\",dot:\".\"},Pt={nodeModules:\"node_modules\",manifest:\"package.json\",lockfile:\"yarn.lock\",virtual:\"__virtual__\",pnpJs:\".pnp.js\",pnpCjs:\".pnp.cjs\",rc:\".yarnrc.yml\"},j=Object.create(Hh.default),k=Object.create(Hh.default.posix);j.cwd=()=>process.cwd();k.cwd=()=>hQ(process.cwd());k.resolve=(...t)=>t.length>0&&k.isAbsolute(t[0])?Hh.default.posix.resolve(...t):Hh.default.posix.resolve(k.cwd(),...t);var aM=function(t,e,r){return e=t.normalize(e),r=t.normalize(r),e===r?\".\":(e.endsWith(t.sep)||(e=e+t.sep),r.startsWith(e)?r.slice(e.length):null)};j.fromPortablePath=AM;j.toPortablePath=hQ;j.contains=(t,e)=>aM(j,t,e);k.contains=(t,e)=>aM(k,t,e);var Cfe=/^([a-zA-Z]:.*)$/,mfe=/^\\/\\/(\\.\\/)?(.*)$/,Efe=/^\\/([a-zA-Z]:.*)$/,Ife=/^\\/unc\\/(\\.dot\\/)?(.*)$/;function AM(t){if(process.platform!==\"win32\")return t;let e,r;if(e=t.match(Efe))t=e[1];else if(r=t.match(Ife))t=`\\\\\\\\${r[1]?\".\\\\\":\"\"}${r[2]}`;else return t;return t.replace(/\\//g,\"\\\\\")}function hQ(t){if(process.platform!==\"win32\")return t;t=t.replace(/\\\\/g,\"/\");let e,r;return(e=t.match(Cfe))?t=`/${e[1]}`:(r=t.match(mfe))&&(t=`/unc/${r[1]?\".dot/\":\"\"}${r[2]}`),t}function KE(t,e){return t===j?AM(e):hQ(e)}function qr(t){if(j.parse(t).dir!==\"\"||k.parse(t).dir!==\"\")throw new Error(`Invalid filename: \"${t}\"`);return t}var jE=new Date(cQ*1e3),jh;(function(r){r.Allow=\"allow\",r.ReadOnly=\"readOnly\"})(jh||(jh={}));async function lM(t,e,r,i,n){let s=t.pathUtils.normalize(e),o=r.pathUtils.normalize(i),a=[],l=[],{atime:c,mtime:u}=n.stableTime?{atime:jE,mtime:jE}:await r.lstatPromise(o);await t.mkdirpPromise(t.pathUtils.dirname(e),{utimes:[c,u]});let g=typeof t.lutimesPromise==\"function\"?t.lutimesPromise.bind(t):t.utimesPromise.bind(t);await pQ(a,l,g,t,s,r,o,te(N({},n),{didParentExist:!0}));for(let f of a)await f();await Promise.all(l.map(f=>f()))}async function pQ(t,e,r,i,n,s,o,a){var h,p;let l=a.didParentExist?await yfe(i,n):null,c=await s.lstatPromise(o),{atime:u,mtime:g}=a.stableTime?{atime:jE,mtime:jE}:c,f;switch(!0){case c.isDirectory():f=await wfe(t,e,r,i,n,l,s,o,c,a);break;case c.isFile():f=await Bfe(t,e,r,i,n,l,s,o,c,a);break;case c.isSymbolicLink():f=await bfe(t,e,r,i,n,l,s,o,c,a);break;default:throw new Error(`Unsupported file type (${c.mode})`)}return(f||((h=l==null?void 0:l.mtime)==null?void 0:h.getTime())!==g.getTime()||((p=l==null?void 0:l.atime)==null?void 0:p.getTime())!==u.getTime())&&(e.push(()=>r(n,u,g)),f=!0),(l===null||(l.mode&511)!=(c.mode&511))&&(e.push(()=>i.chmodPromise(n,c.mode&511)),f=!0),f}async function yfe(t,e){try{return await t.lstatPromise(e)}catch(r){return null}}async function wfe(t,e,r,i,n,s,o,a,l,c){if(s!==null&&!s.isDirectory())if(c.overwrite)t.push(async()=>i.removePromise(n)),s=null;else return!1;let u=!1;s===null&&(t.push(async()=>{try{await i.mkdirPromise(n,{mode:l.mode})}catch(h){if(h.code!==\"EEXIST\")throw h}}),u=!0);let g=await o.readdirPromise(a),f=c.didParentExist&&!s?te(N({},c),{didParentExist:!1}):c;if(c.stableSort)for(let h of g.sort())await pQ(t,e,r,i,i.pathUtils.join(n,h),o,o.pathUtils.join(a,h),f)&&(u=!0);else(await Promise.all(g.map(async p=>{await pQ(t,e,r,i,i.pathUtils.join(n,p),o,o.pathUtils.join(a,p),f)}))).some(p=>p)&&(u=!0);return u}var dQ=new WeakMap;function CQ(t,e,r,i,n){return async()=>{await t.linkPromise(r,e),n===jh.ReadOnly&&(i.mode&=~146,await t.chmodPromise(e,i.mode))}}function Qfe(t,e,r,i,n){let s=dQ.get(t);return typeof s==\"undefined\"?async()=>{try{await t.copyFilePromise(r,e,HE.default.constants.COPYFILE_FICLONE_FORCE),dQ.set(t,!0)}catch(o){if(o.code===\"ENOSYS\"||o.code===\"ENOTSUP\")dQ.set(t,!1),await CQ(t,e,r,i,n)();else throw o}}:s?async()=>t.copyFilePromise(r,e,HE.default.constants.COPYFILE_FICLONE_FORCE):CQ(t,e,r,i,n)}async function Bfe(t,e,r,i,n,s,o,a,l,c){var f;if(s!==null)if(c.overwrite)t.push(async()=>i.removePromise(n)),s=null;else return!1;let u=(f=c.linkStrategy)!=null?f:null,g=i===o?u!==null?Qfe(i,n,a,l,u):async()=>i.copyFilePromise(a,n,HE.default.constants.COPYFILE_FICLONE):u!==null?CQ(i,n,a,l,u):async()=>i.writeFilePromise(n,await o.readFilePromise(a));return t.push(async()=>g()),!0}async function bfe(t,e,r,i,n,s,o,a,l,c){if(s!==null)if(c.overwrite)t.push(async()=>i.removePromise(n)),s=null;else return!1;return t.push(async()=>{await i.symlinkPromise(KE(i.pathUtils,await o.readlinkPromise(a)),n)}),!0}function Cs(t,e){return Object.assign(new Error(`${t}: ${e}`),{code:t})}function GE(t){return Cs(\"EBUSY\",t)}function Gh(t,e){return Cs(\"ENOSYS\",`${t}, ${e}`)}function UA(t){return Cs(\"EINVAL\",`invalid argument, ${t}`)}function en(t){return Cs(\"EBADF\",`bad file descriptor, ${t}`)}function to(t){return Cs(\"ENOENT\",`no such file or directory, ${t}`)}function Do(t){return Cs(\"ENOTDIR\",`not a directory, ${t}`)}function Yh(t){return Cs(\"EISDIR\",`illegal operation on a directory, ${t}`)}function YE(t){return Cs(\"EEXIST\",`file already exists, ${t}`)}function In(t){return Cs(\"EROFS\",`read-only filesystem, ${t}`)}function cM(t){return Cs(\"ENOTEMPTY\",`directory not empty, ${t}`)}function uM(t){return Cs(\"EOPNOTSUPP\",`operation not supported, ${t}`)}function gM(){return Cs(\"ERR_DIR_CLOSED\",\"Directory handle was closed\")}var mQ=class extends Error{constructor(e,r){super(e);this.name=\"Libzip Error\",this.code=r}};var fM=class{constructor(e,r,i={}){this.path=e;this.nextDirent=r;this.opts=i;this.closed=!1}throwIfClosed(){if(this.closed)throw gM()}async*[Symbol.asyncIterator](){try{let e;for(;(e=await this.read())!==null;)yield e}finally{await this.close()}}read(e){let r=this.readSync();return typeof e!=\"undefined\"?e(null,r):Promise.resolve(r)}readSync(){return this.throwIfClosed(),this.nextDirent()}close(e){return this.closeSync(),typeof e!=\"undefined\"?e(null):Promise.resolve()}closeSync(){var e,r;this.throwIfClosed(),(r=(e=this.opts).onClose)==null||r.call(e),this.closed=!0}};function qE(t,e,r,i){let n=()=>{let s=r.shift();return typeof s==\"undefined\"?null:Object.assign(t.statSync(t.pathUtils.join(e,s)),{name:s})};return new fM(e,n,i)}var hM=ge(require(\"os\"));var KA=class{constructor(e){this.pathUtils=e}async*genTraversePromise(e,{stableSort:r=!1}={}){let i=[e];for(;i.length>0;){let n=i.shift();if((await this.lstatPromise(n)).isDirectory()){let o=await this.readdirPromise(n);if(r)for(let a of o.sort())i.push(this.pathUtils.join(n,a));else throw new Error(\"Not supported\")}else yield n}}async removePromise(e,{recursive:r=!0,maxRetries:i=5}={}){let n;try{n=await this.lstatPromise(e)}catch(s){if(s.code===\"ENOENT\")return;throw s}if(n.isDirectory()){if(r){let s=await this.readdirPromise(e);await Promise.all(s.map(o=>this.removePromise(this.pathUtils.resolve(e,o))))}for(let s=0;s<=i;s++)try{await this.rmdirPromise(e);break}catch(o){if(o.code!==\"EBUSY\"&&o.code!==\"ENOTEMPTY\")throw o;s<i&&await new Promise(a=>setTimeout(a,s*100))}}else await this.unlinkPromise(e)}removeSync(e,{recursive:r=!0}={}){let i;try{i=this.lstatSync(e)}catch(n){if(n.code===\"ENOENT\")return;throw n}if(i.isDirectory()){if(r)for(let n of this.readdirSync(e))this.removeSync(this.pathUtils.resolve(e,n));this.rmdirSync(e)}else this.unlinkSync(e)}async mkdirpPromise(e,{chmod:r,utimes:i}={}){if(e=this.resolve(e),e===this.pathUtils.dirname(e))return;let n=e.split(this.pathUtils.sep);for(let s=2;s<=n.length;++s){let o=n.slice(0,s).join(this.pathUtils.sep);if(!this.existsSync(o)){try{await this.mkdirPromise(o)}catch(a){if(a.code===\"EEXIST\")continue;throw a}if(r!=null&&await this.chmodPromise(o,r),i!=null)await this.utimesPromise(o,i[0],i[1]);else{let a=await this.statPromise(this.pathUtils.dirname(o));await this.utimesPromise(o,a.atime,a.mtime)}}}}mkdirpSync(e,{chmod:r,utimes:i}={}){if(e=this.resolve(e),e===this.pathUtils.dirname(e))return;let n=e.split(this.pathUtils.sep);for(let s=2;s<=n.length;++s){let o=n.slice(0,s).join(this.pathUtils.sep);if(!this.existsSync(o)){try{this.mkdirSync(o)}catch(a){if(a.code===\"EEXIST\")continue;throw a}if(r!=null&&this.chmodSync(o,r),i!=null)this.utimesSync(o,i[0],i[1]);else{let a=this.statSync(this.pathUtils.dirname(o));this.utimesSync(o,a.atime,a.mtime)}}}}async copyPromise(e,r,{baseFs:i=this,overwrite:n=!0,stableSort:s=!1,stableTime:o=!1,linkStrategy:a=null}={}){return await lM(this,e,i,r,{overwrite:n,stableSort:s,stableTime:o,linkStrategy:a})}copySync(e,r,{baseFs:i=this,overwrite:n=!0}={}){let s=i.lstatSync(r),o=this.existsSync(e);if(s.isDirectory()){this.mkdirpSync(e);let l=i.readdirSync(r);for(let c of l)this.copySync(this.pathUtils.join(e,c),i.pathUtils.join(r,c),{baseFs:i,overwrite:n})}else if(s.isFile()){if(!o||n){o&&this.removeSync(e);let l=i.readFileSync(r);this.writeFileSync(e,l)}}else if(s.isSymbolicLink()){if(!o||n){o&&this.removeSync(e);let l=i.readlinkSync(r);this.symlinkSync(KE(this.pathUtils,l),e)}}else throw new Error(`Unsupported file type (file: ${r}, mode: 0o${s.mode.toString(8).padStart(6,\"0\")})`);let a=s.mode&511;this.chmodSync(e,a)}async changeFilePromise(e,r,i={}){return Buffer.isBuffer(r)?this.changeFileBufferPromise(e,r,i):this.changeFileTextPromise(e,r,i)}async changeFileBufferPromise(e,r,{mode:i}={}){let n=Buffer.alloc(0);try{n=await this.readFilePromise(e)}catch(s){}Buffer.compare(n,r)!==0&&await this.writeFilePromise(e,r,{mode:i})}async changeFileTextPromise(e,r,{automaticNewlines:i,mode:n}={}){let s=\"\";try{s=await this.readFilePromise(e,\"utf8\")}catch(a){}let o=i?$l(s,r):r;s!==o&&await this.writeFilePromise(e,o,{mode:n})}changeFileSync(e,r,i={}){return Buffer.isBuffer(r)?this.changeFileBufferSync(e,r,i):this.changeFileTextSync(e,r,i)}changeFileBufferSync(e,r,{mode:i}={}){let n=Buffer.alloc(0);try{n=this.readFileSync(e)}catch(s){}Buffer.compare(n,r)!==0&&this.writeFileSync(e,r,{mode:i})}changeFileTextSync(e,r,{automaticNewlines:i=!1,mode:n}={}){let s=\"\";try{s=this.readFileSync(e,\"utf8\")}catch(a){}let o=i?$l(s,r):r;s!==o&&this.writeFileSync(e,o,{mode:n})}async movePromise(e,r){try{await this.renamePromise(e,r)}catch(i){if(i.code===\"EXDEV\")await this.copyPromise(r,e),await this.removePromise(e);else throw i}}moveSync(e,r){try{this.renameSync(e,r)}catch(i){if(i.code===\"EXDEV\")this.copySync(r,e),this.removeSync(e);else throw i}}async lockPromise(e,r){let i=`${e}.flock`,n=1e3/60,s=Date.now(),o=null,a=async()=>{let l;try{[l]=await this.readJsonPromise(i)}catch(c){return Date.now()-s<500}try{return process.kill(l,0),!0}catch(c){return!1}};for(;o===null;)try{o=await this.openPromise(i,\"wx\")}catch(l){if(l.code===\"EEXIST\"){if(!await a())try{await this.unlinkPromise(i);continue}catch(c){}if(Date.now()-s<60*1e3)await new Promise(c=>setTimeout(c,n));else throw new Error(`Couldn't acquire a lock in a reasonable time (via ${i})`)}else throw l}await this.writePromise(o,JSON.stringify([process.pid]));try{return await r()}finally{try{await this.closePromise(o),await this.unlinkPromise(i)}catch(l){}}}async readJsonPromise(e){let r=await this.readFilePromise(e,\"utf8\");try{return JSON.parse(r)}catch(i){throw i.message+=` (in ${e})`,i}}readJsonSync(e){let r=this.readFileSync(e,\"utf8\");try{return JSON.parse(r)}catch(i){throw i.message+=` (in ${e})`,i}}async writeJsonPromise(e,r){return await this.writeFilePromise(e,`${JSON.stringify(r,null,2)}\n`)}writeJsonSync(e,r){return this.writeFileSync(e,`${JSON.stringify(r,null,2)}\n`)}async preserveTimePromise(e,r){let i=await this.lstatPromise(e),n=await r();typeof n!=\"undefined\"&&(e=n),this.lutimesPromise?await this.lutimesPromise(e,i.atime,i.mtime):i.isSymbolicLink()||await this.utimesPromise(e,i.atime,i.mtime)}async preserveTimeSync(e,r){let i=this.lstatSync(e),n=r();typeof n!=\"undefined\"&&(e=n),this.lutimesSync?this.lutimesSync(e,i.atime,i.mtime):i.isSymbolicLink()||this.utimesSync(e,i.atime,i.mtime)}},ec=class extends KA{constructor(){super(k)}};function vfe(t){let e=t.match(/\\r?\\n/g);if(e===null)return hM.EOL;let r=e.filter(n=>n===`\\r\n`).length,i=e.length-r;return r>i?`\\r\n`:`\n`}function $l(t,e){return e.replace(/\\r?\\n/g,vfe(t))}var qu=ge(require(\"fs\")),EQ=ge(require(\"stream\")),mM=ge(require(\"util\")),IQ=ge(require(\"zlib\"));var pM=ge(require(\"fs\"));var ar=class extends ec{constructor(e=pM.default){super();this.realFs=e,typeof this.realFs.lutimes!=\"undefined\"&&(this.lutimesPromise=this.lutimesPromiseImpl,this.lutimesSync=this.lutimesSyncImpl)}getExtractHint(){return!1}getRealPath(){return Me.root}resolve(e){return k.resolve(e)}async openPromise(e,r,i){return await new Promise((n,s)=>{this.realFs.open(j.fromPortablePath(e),r,i,this.makeCallback(n,s))})}openSync(e,r,i){return this.realFs.openSync(j.fromPortablePath(e),r,i)}async opendirPromise(e,r){return await new Promise((i,n)=>{typeof r!=\"undefined\"?this.realFs.opendir(j.fromPortablePath(e),r,this.makeCallback(i,n)):this.realFs.opendir(j.fromPortablePath(e),this.makeCallback(i,n))}).then(i=>Object.defineProperty(i,\"path\",{value:e,configurable:!0,writable:!0}))}opendirSync(e,r){let i=typeof r!=\"undefined\"?this.realFs.opendirSync(j.fromPortablePath(e),r):this.realFs.opendirSync(j.fromPortablePath(e));return Object.defineProperty(i,\"path\",{value:e,configurable:!0,writable:!0})}async readPromise(e,r,i=0,n=0,s=-1){return await new Promise((o,a)=>{this.realFs.read(e,r,i,n,s,(l,c)=>{l?a(l):o(c)})})}readSync(e,r,i,n,s){return this.realFs.readSync(e,r,i,n,s)}async writePromise(e,r,i,n,s){return await new Promise((o,a)=>typeof r==\"string\"?this.realFs.write(e,r,i,this.makeCallback(o,a)):this.realFs.write(e,r,i,n,s,this.makeCallback(o,a)))}writeSync(e,r,i,n,s){return typeof r==\"string\"?this.realFs.writeSync(e,r,i):this.realFs.writeSync(e,r,i,n,s)}async closePromise(e){await new Promise((r,i)=>{this.realFs.close(e,this.makeCallback(r,i))})}closeSync(e){this.realFs.closeSync(e)}createReadStream(e,r){let i=e!==null?j.fromPortablePath(e):e;return this.realFs.createReadStream(i,r)}createWriteStream(e,r){let i=e!==null?j.fromPortablePath(e):e;return this.realFs.createWriteStream(i,r)}async realpathPromise(e){return await new Promise((r,i)=>{this.realFs.realpath(j.fromPortablePath(e),{},this.makeCallback(r,i))}).then(r=>j.toPortablePath(r))}realpathSync(e){return j.toPortablePath(this.realFs.realpathSync(j.fromPortablePath(e),{}))}async existsPromise(e){return await new Promise(r=>{this.realFs.exists(j.fromPortablePath(e),r)})}accessSync(e,r){return this.realFs.accessSync(j.fromPortablePath(e),r)}async accessPromise(e,r){return await new Promise((i,n)=>{this.realFs.access(j.fromPortablePath(e),r,this.makeCallback(i,n))})}existsSync(e){return this.realFs.existsSync(j.fromPortablePath(e))}async statPromise(e,r){return await new Promise((i,n)=>{r?this.realFs.stat(j.fromPortablePath(e),r,this.makeCallback(i,n)):this.realFs.stat(j.fromPortablePath(e),this.makeCallback(i,n))})}statSync(e,r){return r?this.realFs.statSync(j.fromPortablePath(e),r):this.realFs.statSync(j.fromPortablePath(e))}async fstatPromise(e,r){return await new Promise((i,n)=>{r?this.realFs.fstat(e,r,this.makeCallback(i,n)):this.realFs.fstat(e,this.makeCallback(i,n))})}fstatSync(e,r){return r?this.realFs.fstatSync(e,r):this.realFs.fstatSync(e)}async lstatPromise(e,r){return await new Promise((i,n)=>{r?this.realFs.lstat(j.fromPortablePath(e),r,this.makeCallback(i,n)):this.realFs.lstat(j.fromPortablePath(e),this.makeCallback(i,n))})}lstatSync(e,r){return r?this.realFs.lstatSync(j.fromPortablePath(e),r):this.realFs.lstatSync(j.fromPortablePath(e))}async chmodPromise(e,r){return await new Promise((i,n)=>{this.realFs.chmod(j.fromPortablePath(e),r,this.makeCallback(i,n))})}chmodSync(e,r){return this.realFs.chmodSync(j.fromPortablePath(e),r)}async chownPromise(e,r,i){return await new Promise((n,s)=>{this.realFs.chown(j.fromPortablePath(e),r,i,this.makeCallback(n,s))})}chownSync(e,r,i){return this.realFs.chownSync(j.fromPortablePath(e),r,i)}async renamePromise(e,r){return await new Promise((i,n)=>{this.realFs.rename(j.fromPortablePath(e),j.fromPortablePath(r),this.makeCallback(i,n))})}renameSync(e,r){return this.realFs.renameSync(j.fromPortablePath(e),j.fromPortablePath(r))}async copyFilePromise(e,r,i=0){return await new Promise((n,s)=>{this.realFs.copyFile(j.fromPortablePath(e),j.fromPortablePath(r),i,this.makeCallback(n,s))})}copyFileSync(e,r,i=0){return this.realFs.copyFileSync(j.fromPortablePath(e),j.fromPortablePath(r),i)}async appendFilePromise(e,r,i){return await new Promise((n,s)=>{let o=typeof e==\"string\"?j.fromPortablePath(e):e;i?this.realFs.appendFile(o,r,i,this.makeCallback(n,s)):this.realFs.appendFile(o,r,this.makeCallback(n,s))})}appendFileSync(e,r,i){let n=typeof e==\"string\"?j.fromPortablePath(e):e;i?this.realFs.appendFileSync(n,r,i):this.realFs.appendFileSync(n,r)}async writeFilePromise(e,r,i){return await new Promise((n,s)=>{let o=typeof e==\"string\"?j.fromPortablePath(e):e;i?this.realFs.writeFile(o,r,i,this.makeCallback(n,s)):this.realFs.writeFile(o,r,this.makeCallback(n,s))})}writeFileSync(e,r,i){let n=typeof e==\"string\"?j.fromPortablePath(e):e;i?this.realFs.writeFileSync(n,r,i):this.realFs.writeFileSync(n,r)}async unlinkPromise(e){return await new Promise((r,i)=>{this.realFs.unlink(j.fromPortablePath(e),this.makeCallback(r,i))})}unlinkSync(e){return this.realFs.unlinkSync(j.fromPortablePath(e))}async utimesPromise(e,r,i){return await new Promise((n,s)=>{this.realFs.utimes(j.fromPortablePath(e),r,i,this.makeCallback(n,s))})}utimesSync(e,r,i){this.realFs.utimesSync(j.fromPortablePath(e),r,i)}async lutimesPromiseImpl(e,r,i){let n=this.realFs.lutimes;if(typeof n==\"undefined\")throw Gh(\"unavailable Node binding\",`lutimes '${e}'`);return await new Promise((s,o)=>{n.call(this.realFs,j.fromPortablePath(e),r,i,this.makeCallback(s,o))})}lutimesSyncImpl(e,r,i){let n=this.realFs.lutimesSync;if(typeof n==\"undefined\")throw Gh(\"unavailable Node binding\",`lutimes '${e}'`);n.call(this.realFs,j.fromPortablePath(e),r,i)}async mkdirPromise(e,r){return await new Promise((i,n)=>{this.realFs.mkdir(j.fromPortablePath(e),r,this.makeCallback(i,n))})}mkdirSync(e,r){return this.realFs.mkdirSync(j.fromPortablePath(e),r)}async rmdirPromise(e,r){return await new Promise((i,n)=>{r?this.realFs.rmdir(j.fromPortablePath(e),r,this.makeCallback(i,n)):this.realFs.rmdir(j.fromPortablePath(e),this.makeCallback(i,n))})}rmdirSync(e,r){return this.realFs.rmdirSync(j.fromPortablePath(e),r)}async linkPromise(e,r){return await new Promise((i,n)=>{this.realFs.link(j.fromPortablePath(e),j.fromPortablePath(r),this.makeCallback(i,n))})}linkSync(e,r){return this.realFs.linkSync(j.fromPortablePath(e),j.fromPortablePath(r))}async symlinkPromise(e,r,i){return await new Promise((n,s)=>{this.realFs.symlink(j.fromPortablePath(e.replace(/\\/+$/,\"\")),j.fromPortablePath(r),i,this.makeCallback(n,s))})}symlinkSync(e,r,i){return this.realFs.symlinkSync(j.fromPortablePath(e.replace(/\\/+$/,\"\")),j.fromPortablePath(r),i)}async readFilePromise(e,r){return await new Promise((i,n)=>{let s=typeof e==\"string\"?j.fromPortablePath(e):e;this.realFs.readFile(s,r,this.makeCallback(i,n))})}readFileSync(e,r){let i=typeof e==\"string\"?j.fromPortablePath(e):e;return this.realFs.readFileSync(i,r)}async readdirPromise(e,r){return await new Promise((i,n)=>{(r==null?void 0:r.withFileTypes)?this.realFs.readdir(j.fromPortablePath(e),{withFileTypes:!0},this.makeCallback(i,n)):this.realFs.readdir(j.fromPortablePath(e),this.makeCallback(s=>i(s),n))})}readdirSync(e,r){return(r==null?void 0:r.withFileTypes)?this.realFs.readdirSync(j.fromPortablePath(e),{withFileTypes:!0}):this.realFs.readdirSync(j.fromPortablePath(e))}async readlinkPromise(e){return await new Promise((r,i)=>{this.realFs.readlink(j.fromPortablePath(e),this.makeCallback(r,i))}).then(r=>j.toPortablePath(r))}readlinkSync(e){return j.toPortablePath(this.realFs.readlinkSync(j.fromPortablePath(e)))}async truncatePromise(e,r){return await new Promise((i,n)=>{this.realFs.truncate(j.fromPortablePath(e),r,this.makeCallback(i,n))})}truncateSync(e,r){return this.realFs.truncateSync(j.fromPortablePath(e),r)}watch(e,r,i){return this.realFs.watch(j.fromPortablePath(e),r,i)}watchFile(e,r,i){return this.realFs.watchFile(j.fromPortablePath(e),r,i)}unwatchFile(e,r){return this.realFs.unwatchFile(j.fromPortablePath(e),r)}makeCallback(e,r){return(i,n)=>{i?r(i):e(n)}}};var dM=ge(require(\"events\"));var tc;(function(r){r.Change=\"change\",r.Stop=\"stop\"})(tc||(tc={}));var rc;(function(i){i.Ready=\"ready\",i.Running=\"running\",i.Stopped=\"stopped\"})(rc||(rc={}));function CM(t,e){if(t!==e)throw new Error(`Invalid StatWatcher status: expected '${e}', got '${t}'`)}var qh=class extends dM.EventEmitter{constructor(e,r,{bigint:i=!1}={}){super();this.status=rc.Ready;this.changeListeners=new Map;this.startTimeout=null;this.fakeFs=e,this.path=r,this.bigint=i,this.lastStats=this.stat()}static create(e,r,i){let n=new qh(e,r,i);return n.start(),n}start(){CM(this.status,rc.Ready),this.status=rc.Running,this.startTimeout=setTimeout(()=>{this.startTimeout=null,this.fakeFs.existsSync(this.path)||this.emit(tc.Change,this.lastStats,this.lastStats)},3)}stop(){CM(this.status,rc.Running),this.status=rc.Stopped,this.startTimeout!==null&&(clearTimeout(this.startTimeout),this.startTimeout=null),this.emit(tc.Stop)}stat(){try{return this.fakeFs.statSync(this.path,{bigint:this.bigint})}catch(e){let r=this.bigint?new Uh:new MA;return ME(r)}}makeInterval(e){let r=setInterval(()=>{let i=this.stat(),n=this.lastStats;fQ(i,n)||(this.lastStats=i,this.emit(tc.Change,i,n))},e.interval);return e.persistent?r:r.unref()}registerChangeListener(e,r){this.addListener(tc.Change,e),this.changeListeners.set(e,this.makeInterval(r))}unregisterChangeListener(e){this.removeListener(tc.Change,e);let r=this.changeListeners.get(e);typeof r!=\"undefined\"&&clearInterval(r),this.changeListeners.delete(e)}unregisterAllChangeListeners(){for(let e of this.changeListeners.keys())this.unregisterChangeListener(e)}hasChangeListeners(){return this.changeListeners.size>0}ref(){for(let e of this.changeListeners.values())e.ref();return this}unref(){for(let e of this.changeListeners.values())e.unref();return this}};var JE=new WeakMap;function WE(t,e,r,i){let n,s,o,a;switch(typeof r){case\"function\":n=!1,s=!0,o=5007,a=r;break;default:({bigint:n=!1,persistent:s=!0,interval:o=5007}=r),a=i;break}let l=JE.get(t);typeof l==\"undefined\"&&JE.set(t,l=new Map);let c=l.get(e);return typeof c==\"undefined\"&&(c=qh.create(t,e,{bigint:n}),l.set(e,c)),c.registerChangeListener(a,{persistent:s,interval:o}),c}function Jh(t,e,r){let i=JE.get(t);if(typeof i==\"undefined\")return;let n=i.get(e);typeof n!=\"undefined\"&&(typeof r==\"undefined\"?n.unregisterAllChangeListeners():n.unregisterChangeListener(r),n.hasChangeListeners()||(n.stop(),i.delete(e)))}function Wh(t){let e=JE.get(t);if(typeof e!=\"undefined\")for(let r of e.keys())Jh(t,r)}var ic=\"mixed\";function Sfe(t){if(typeof t==\"string\"&&String(+t)===t)return+t;if(Number.isFinite(t))return t<0?Date.now()/1e3:t;if(mM.types.isDate(t))return t.getTime()/1e3;throw new Error(\"Invalid time\")}function EM(){return Buffer.from([80,75,5,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])}var Ai=class extends ec{constructor(e,r){super();this.lzSource=null;this.listings=new Map;this.entries=new Map;this.fileSources=new Map;this.fds=new Map;this.nextFd=0;this.ready=!1;this.readOnly=!1;this.libzip=r.libzip;let i=r;if(this.level=typeof i.level!=\"undefined\"?i.level:ic,e!=null||(e=EM()),typeof e==\"string\"){let{baseFs:o=new ar}=i;this.baseFs=o,this.path=e}else this.path=null,this.baseFs=null;if(r.stats)this.stats=r.stats;else if(typeof e==\"string\")try{this.stats=this.baseFs.statSync(e)}catch(o){if(o.code===\"ENOENT\"&&i.create)this.stats=Kh();else throw o}else this.stats=Kh();let n=this.libzip.malloc(4);try{let o=0;if(typeof e==\"string\"&&i.create&&(o|=this.libzip.ZIP_CREATE|this.libzip.ZIP_TRUNCATE),r.readOnly&&(o|=this.libzip.ZIP_RDONLY,this.readOnly=!0),typeof e==\"string\")this.zip=this.libzip.open(j.fromPortablePath(e),o,n);else{let a=this.allocateUnattachedSource(e);try{this.zip=this.libzip.openFromSource(a,o,n),this.lzSource=a}catch(l){throw this.libzip.source.free(a),l}}if(this.zip===0){let a=this.libzip.struct.errorS();throw this.libzip.error.initWithCode(a,this.libzip.getValue(n,\"i32\")),this.makeLibzipError(a)}}finally{this.libzip.free(n)}this.listings.set(Me.root,new Set);let s=this.libzip.getNumEntries(this.zip,0);for(let o=0;o<s;++o){let a=this.libzip.getName(this.zip,o,0);if(k.isAbsolute(a))continue;let l=k.resolve(Me.root,a);this.registerEntry(l,o),a.endsWith(\"/\")&&this.registerListing(l)}if(this.symlinkCount=this.libzip.ext.countSymlinks(this.zip),this.symlinkCount===-1)throw this.makeLibzipError(this.libzip.getError(this.zip));this.ready=!0}makeLibzipError(e){let r=this.libzip.struct.errorCodeZip(e),i=this.libzip.error.strerror(e),n=new mQ(i,this.libzip.errors[r]);if(r===this.libzip.errors.ZIP_ER_CHANGED)throw new Error(`Assertion failed: Unexpected libzip error: ${n.message}`);return n}getExtractHint(e){for(let r of this.entries.keys()){let i=this.pathUtils.extname(r);if(e.relevantExtensions.has(i))return!0}return!1}getAllFiles(){return Array.from(this.entries.keys())}getRealPath(){if(!this.path)throw new Error(\"ZipFS don't have real paths when loaded from a buffer\");return this.path}getBufferAndClose(){if(this.prepareClose(),!this.lzSource)throw new Error(\"ZipFS was not created from a Buffer\");try{if(this.libzip.source.keep(this.lzSource),this.libzip.close(this.zip)===-1)throw this.makeLibzipError(this.libzip.getError(this.zip));if(this.libzip.source.open(this.lzSource)===-1)throw this.makeLibzipError(this.libzip.source.error(this.lzSource));if(this.libzip.source.seek(this.lzSource,0,0,this.libzip.SEEK_END)===-1)throw this.makeLibzipError(this.libzip.source.error(this.lzSource));let e=this.libzip.source.tell(this.lzSource);if(e===-1)throw this.makeLibzipError(this.libzip.source.error(this.lzSource));if(this.libzip.source.seek(this.lzSource,0,0,this.libzip.SEEK_SET)===-1)throw this.makeLibzipError(this.libzip.source.error(this.lzSource));let r=this.libzip.malloc(e);if(!r)throw new Error(\"Couldn't allocate enough memory\");try{let i=this.libzip.source.read(this.lzSource,r,e);if(i===-1)throw this.makeLibzipError(this.libzip.source.error(this.lzSource));if(i<e)throw new Error(\"Incomplete read\");if(i>e)throw new Error(\"Overread\");let n=this.libzip.HEAPU8.subarray(r,r+e);return Buffer.from(n)}finally{this.libzip.free(r)}}finally{this.libzip.source.close(this.lzSource),this.libzip.source.free(this.lzSource),this.ready=!1}}prepareClose(){if(!this.ready)throw GE(\"archive closed, close\");Wh(this)}saveAndClose(){if(!this.path||!this.baseFs)throw new Error(\"ZipFS cannot be saved and must be discarded when loaded from a buffer\");if(this.prepareClose(),this.readOnly){this.discardAndClose();return}let e=this.baseFs.existsSync(this.path)||this.stats.mode===Mh?void 0:this.stats.mode;if(this.entries.size===0)this.discardAndClose(),this.baseFs.writeFileSync(this.path,EM(),{mode:e});else{if(this.libzip.close(this.zip)===-1)throw this.makeLibzipError(this.libzip.getError(this.zip));typeof e!=\"undefined\"&&this.baseFs.chmodSync(this.path,e)}this.ready=!1}discardAndClose(){this.prepareClose(),this.libzip.discard(this.zip),this.ready=!1}resolve(e){return k.resolve(Me.root,e)}async openPromise(e,r,i){return this.openSync(e,r,i)}openSync(e,r,i){let n=this.nextFd++;return this.fds.set(n,{cursor:0,p:e}),n}hasOpenFileHandles(){return!!this.fds.size}async opendirPromise(e,r){return this.opendirSync(e,r)}opendirSync(e,r={}){let i=this.resolveFilename(`opendir '${e}'`,e);if(!this.entries.has(i)&&!this.listings.has(i))throw to(`opendir '${e}'`);let n=this.listings.get(i);if(!n)throw Do(`opendir '${e}'`);let s=[...n],o=this.openSync(i,\"r\");return qE(this,i,s,{onClose:()=>{this.closeSync(o)}})}async readPromise(e,r,i,n,s){return this.readSync(e,r,i,n,s)}readSync(e,r,i=0,n=r.byteLength,s=-1){let o=this.fds.get(e);if(typeof o==\"undefined\")throw en(\"read\");let a=s===-1||s===null?o.cursor:s,l=this.readFileSync(o.p);l.copy(r,i,a,a+n);let c=Math.max(0,Math.min(l.length-a,n));return(s===-1||s===null)&&(o.cursor+=c),c}async writePromise(e,r,i,n,s){return typeof r==\"string\"?this.writeSync(e,r,s):this.writeSync(e,r,i,n,s)}writeSync(e,r,i,n,s){throw typeof this.fds.get(e)==\"undefined\"?en(\"read\"):new Error(\"Unimplemented\")}async closePromise(e){return this.closeSync(e)}closeSync(e){if(typeof this.fds.get(e)==\"undefined\")throw en(\"read\");this.fds.delete(e)}createReadStream(e,{encoding:r}={}){if(e===null)throw new Error(\"Unimplemented\");let i=this.openSync(e,\"r\"),n=Object.assign(new EQ.PassThrough({emitClose:!0,autoDestroy:!0,destroy:(o,a)=>{clearImmediate(s),this.closeSync(i),a(o)}}),{close(){n.destroy()},bytesRead:0,path:e}),s=setImmediate(async()=>{try{let o=await this.readFilePromise(e,r);n.bytesRead=o.length,n.end(o)}catch(o){n.destroy(o)}});return n}createWriteStream(e,{encoding:r}={}){if(this.readOnly)throw In(`open '${e}'`);if(e===null)throw new Error(\"Unimplemented\");let i=[],n=this.openSync(e,\"w\"),s=Object.assign(new EQ.PassThrough({autoDestroy:!0,emitClose:!0,destroy:(o,a)=>{try{o?a(o):(this.writeFileSync(e,Buffer.concat(i),r),a(null))}catch(l){a(l)}finally{this.closeSync(n)}}}),{bytesWritten:0,path:e,close(){s.destroy()}});return s.on(\"data\",o=>{let a=Buffer.from(o);s.bytesWritten+=a.length,i.push(a)}),s}async realpathPromise(e){return this.realpathSync(e)}realpathSync(e){let r=this.resolveFilename(`lstat '${e}'`,e);if(!this.entries.has(r)&&!this.listings.has(r))throw to(`lstat '${e}'`);return r}async existsPromise(e){return this.existsSync(e)}existsSync(e){if(!this.ready)throw GE(`archive closed, existsSync '${e}'`);if(this.symlinkCount===0){let i=k.resolve(Me.root,e);return this.entries.has(i)||this.listings.has(i)}let r;try{r=this.resolveFilename(`stat '${e}'`,e)}catch(i){return!1}return this.entries.has(r)||this.listings.has(r)}async accessPromise(e,r){return this.accessSync(e,r)}accessSync(e,r=qu.constants.F_OK){let i=this.resolveFilename(`access '${e}'`,e);if(!this.entries.has(i)&&!this.listings.has(i))throw to(`access '${e}'`);if(this.readOnly&&r&qu.constants.W_OK)throw In(`access '${e}'`)}async statPromise(e,r){return this.statSync(e,r)}statSync(e,r){let i=this.resolveFilename(`stat '${e}'`,e);if(!this.entries.has(i)&&!this.listings.has(i))throw to(`stat '${e}'`);if(e[e.length-1]===\"/\"&&!this.listings.has(i))throw Do(`stat '${e}'`);return this.statImpl(`stat '${e}'`,i,r)}async fstatPromise(e,r){return this.fstatSync(e,r)}fstatSync(e,r){let i=this.fds.get(e);if(typeof i==\"undefined\")throw en(\"fstatSync\");let{p:n}=i,s=this.resolveFilename(`stat '${n}'`,n);if(!this.entries.has(s)&&!this.listings.has(s))throw to(`stat '${n}'`);if(n[n.length-1]===\"/\"&&!this.listings.has(s))throw Do(`stat '${n}'`);return this.statImpl(`fstat '${n}'`,s,r)}async lstatPromise(e,r){return this.lstatSync(e,r)}lstatSync(e,r){let i=this.resolveFilename(`lstat '${e}'`,e,!1);if(!this.entries.has(i)&&!this.listings.has(i))throw to(`lstat '${e}'`);if(e[e.length-1]===\"/\"&&!this.listings.has(i))throw Do(`lstat '${e}'`);return this.statImpl(`lstat '${e}'`,i,r)}statImpl(e,r,i={}){let n=this.entries.get(r);if(typeof n!=\"undefined\"){let s=this.libzip.struct.statS();if(this.libzip.statIndex(this.zip,n,0,0,s)===-1)throw this.makeLibzipError(this.libzip.getError(this.zip));let a=this.stats.uid,l=this.stats.gid,c=this.libzip.struct.statSize(s)>>>0,u=512,g=Math.ceil(c/u),f=(this.libzip.struct.statMtime(s)>>>0)*1e3,h=f,p=f,m=f,y=new Date(h),Q=new Date(p),S=new Date(m),x=new Date(f),M=this.listings.has(r)?Sa:this.isSymbolicLink(n)?xa:ka,Y=M===Sa?493:420,U=M|this.getUnixMode(n,Y)&511,J=this.libzip.struct.statCrc(s),W=Object.assign(new MA,{uid:a,gid:l,size:c,blksize:u,blocks:g,atime:y,birthtime:Q,ctime:S,mtime:x,atimeMs:h,birthtimeMs:p,ctimeMs:m,mtimeMs:f,mode:U,crc:J});return i.bigint===!0?UE(W):W}if(this.listings.has(r)){let s=this.stats.uid,o=this.stats.gid,a=0,l=512,c=0,u=this.stats.mtimeMs,g=this.stats.mtimeMs,f=this.stats.mtimeMs,h=this.stats.mtimeMs,p=new Date(u),m=new Date(g),y=new Date(f),Q=new Date(h),S=Sa|493,x=0,M=Object.assign(new MA,{uid:s,gid:o,size:a,blksize:l,blocks:c,atime:p,birthtime:m,ctime:y,mtime:Q,atimeMs:u,birthtimeMs:g,ctimeMs:f,mtimeMs:h,mode:S,crc:x});return i.bigint===!0?UE(M):M}throw new Error(\"Unreachable\")}getUnixMode(e,r){if(this.libzip.file.getExternalAttributes(this.zip,e,0,0,this.libzip.uint08S,this.libzip.uint32S)===-1)throw this.makeLibzipError(this.libzip.getError(this.zip));return this.libzip.getValue(this.libzip.uint08S,\"i8\")>>>0!==this.libzip.ZIP_OPSYS_UNIX?r:this.libzip.getValue(this.libzip.uint32S,\"i32\")>>>16}registerListing(e){let r=this.listings.get(e);if(r)return r;this.registerListing(k.dirname(e)).add(k.basename(e));let n=new Set;return this.listings.set(e,n),n}registerEntry(e,r){this.registerListing(k.dirname(e)).add(k.basename(e)),this.entries.set(e,r)}unregisterListing(e){this.listings.delete(e);let r=this.listings.get(k.dirname(e));r==null||r.delete(k.basename(e))}unregisterEntry(e){this.unregisterListing(e);let r=this.entries.get(e);this.entries.delete(e),typeof r!=\"undefined\"&&(this.fileSources.delete(r),this.isSymbolicLink(r)&&this.symlinkCount--)}deleteEntry(e,r){if(this.unregisterEntry(e),this.libzip.delete(this.zip,r)===-1)throw this.makeLibzipError(this.libzip.getError(this.zip))}resolveFilename(e,r,i=!0){if(!this.ready)throw GE(`archive closed, ${e}`);let n=k.resolve(Me.root,r);if(n===\"/\")return Me.root;let s=this.entries.get(n);if(i&&s!==void 0)if(this.symlinkCount!==0&&this.isSymbolicLink(s)){let o=this.getFileSource(s).toString();return this.resolveFilename(e,k.resolve(k.dirname(n),o),!0)}else return n;for(;;){let o=this.resolveFilename(e,k.dirname(n),!0),a=this.listings.has(o),l=this.entries.has(o);if(!a&&!l)throw to(e);if(!a)throw Do(e);if(n=k.resolve(o,k.basename(n)),!i||this.symlinkCount===0)break;let c=this.libzip.name.locate(this.zip,n.slice(1));if(c===-1)break;if(this.isSymbolicLink(c)){let u=this.getFileSource(c).toString();n=k.resolve(k.dirname(n),u)}else break}return n}allocateBuffer(e){Buffer.isBuffer(e)||(e=Buffer.from(e));let r=this.libzip.malloc(e.byteLength);if(!r)throw new Error(\"Couldn't allocate enough memory\");return new Uint8Array(this.libzip.HEAPU8.buffer,r,e.byteLength).set(e),{buffer:r,byteLength:e.byteLength}}allocateUnattachedSource(e){let r=this.libzip.struct.errorS(),{buffer:i,byteLength:n}=this.allocateBuffer(e),s=this.libzip.source.fromUnattachedBuffer(i,n,0,!0,r);if(s===0)throw this.libzip.free(r),this.makeLibzipError(r);return s}allocateSource(e){let{buffer:r,byteLength:i}=this.allocateBuffer(e),n=this.libzip.source.fromBuffer(this.zip,r,i,0,!0);if(n===0)throw this.libzip.free(r),this.makeLibzipError(this.libzip.getError(this.zip));return n}setFileSource(e,r){let i=Buffer.isBuffer(r)?r:Buffer.from(r),n=k.relative(Me.root,e),s=this.allocateSource(r);try{let o=this.libzip.file.add(this.zip,n,s,this.libzip.ZIP_FL_OVERWRITE);if(o===-1)throw this.makeLibzipError(this.libzip.getError(this.zip));if(this.level!==\"mixed\"){let a=this.level===0?this.libzip.ZIP_CM_STORE:this.libzip.ZIP_CM_DEFLATE;if(this.libzip.file.setCompression(this.zip,o,0,a,this.level)===-1)throw this.makeLibzipError(this.libzip.getError(this.zip))}return this.fileSources.set(o,i),o}catch(o){throw this.libzip.source.free(s),o}}isSymbolicLink(e){if(this.symlinkCount===0)return!1;if(this.libzip.file.getExternalAttributes(this.zip,e,0,0,this.libzip.uint08S,this.libzip.uint32S)===-1)throw this.makeLibzipError(this.libzip.getError(this.zip));return this.libzip.getValue(this.libzip.uint08S,\"i8\")>>>0!==this.libzip.ZIP_OPSYS_UNIX?!1:(this.libzip.getValue(this.libzip.uint32S,\"i32\")>>>16&zn)===xa}getFileSource(e,r={asyncDecompress:!1}){let i=this.fileSources.get(e);if(typeof i!=\"undefined\")return i;let n=this.libzip.struct.statS();if(this.libzip.statIndex(this.zip,e,0,0,n)===-1)throw this.makeLibzipError(this.libzip.getError(this.zip));let o=this.libzip.struct.statCompSize(n),a=this.libzip.struct.statCompMethod(n),l=this.libzip.malloc(o);try{let c=this.libzip.fopenIndex(this.zip,e,0,this.libzip.ZIP_FL_COMPRESSED);if(c===0)throw this.makeLibzipError(this.libzip.getError(this.zip));try{let u=this.libzip.fread(c,l,o,0);if(u===-1)throw this.makeLibzipError(this.libzip.file.getError(c));if(u<o)throw new Error(\"Incomplete read\");if(u>o)throw new Error(\"Overread\");let g=this.libzip.HEAPU8.subarray(l,l+o),f=Buffer.from(g);if(a===0)return this.fileSources.set(e,f),f;if(r.asyncDecompress)return new Promise((h,p)=>{IQ.default.inflateRaw(f,(m,y)=>{m?p(m):(this.fileSources.set(e,y),h(y))})});{let h=IQ.default.inflateRawSync(f);return this.fileSources.set(e,h),h}}finally{this.libzip.fclose(c)}}finally{this.libzip.free(l)}}async chmodPromise(e,r){return this.chmodSync(e,r)}chmodSync(e,r){if(this.readOnly)throw In(`chmod '${e}'`);r&=493;let i=this.resolveFilename(`chmod '${e}'`,e,!1),n=this.entries.get(i);if(typeof n==\"undefined\")throw new Error(`Assertion failed: The entry should have been registered (${i})`);let o=this.getUnixMode(n,ka|0)&~511|r;if(this.libzip.file.setExternalAttributes(this.zip,n,0,0,this.libzip.ZIP_OPSYS_UNIX,o<<16)===-1)throw this.makeLibzipError(this.libzip.getError(this.zip))}async chownPromise(e,r,i){return this.chownSync(e,r,i)}chownSync(e,r,i){throw new Error(\"Unimplemented\")}async renamePromise(e,r){return this.renameSync(e,r)}renameSync(e,r){throw new Error(\"Unimplemented\")}async copyFilePromise(e,r,i){let{indexSource:n,indexDest:s,resolvedDestP:o}=this.prepareCopyFile(e,r,i),a=await this.getFileSource(n,{asyncDecompress:!0}),l=this.setFileSource(o,a);l!==s&&this.registerEntry(o,l)}copyFileSync(e,r,i=0){let{indexSource:n,indexDest:s,resolvedDestP:o}=this.prepareCopyFile(e,r,i),a=this.getFileSource(n),l=this.setFileSource(o,a);l!==s&&this.registerEntry(o,l)}prepareCopyFile(e,r,i=0){if(this.readOnly)throw In(`copyfile '${e} -> '${r}'`);if((i&qu.constants.COPYFILE_FICLONE_FORCE)!=0)throw Gh(\"unsupported clone operation\",`copyfile '${e}' -> ${r}'`);let n=this.resolveFilename(`copyfile '${e} -> ${r}'`,e),s=this.entries.get(n);if(typeof s==\"undefined\")throw UA(`copyfile '${e}' -> '${r}'`);let o=this.resolveFilename(`copyfile '${e}' -> ${r}'`,r),a=this.entries.get(o);if((i&(qu.constants.COPYFILE_EXCL|qu.constants.COPYFILE_FICLONE_FORCE))!=0&&typeof a!=\"undefined\")throw YE(`copyfile '${e}' -> '${r}'`);return{indexSource:s,resolvedDestP:o,indexDest:a}}async appendFilePromise(e,r,i){if(this.readOnly)throw In(`open '${e}'`);return typeof i==\"undefined\"?i={flag:\"a\"}:typeof i==\"string\"?i={flag:\"a\",encoding:i}:typeof i.flag==\"undefined\"&&(i=N({flag:\"a\"},i)),this.writeFilePromise(e,r,i)}appendFileSync(e,r,i={}){if(this.readOnly)throw In(`open '${e}'`);return typeof i==\"undefined\"?i={flag:\"a\"}:typeof i==\"string\"?i={flag:\"a\",encoding:i}:typeof i.flag==\"undefined\"&&(i=N({flag:\"a\"},i)),this.writeFileSync(e,r,i)}fdToPath(e,r){var n;let i=(n=this.fds.get(e))==null?void 0:n.p;if(typeof i==\"undefined\")throw en(r);return i}async writeFilePromise(e,r,i){let{encoding:n,mode:s,index:o,resolvedP:a}=this.prepareWriteFile(e,i);o!==void 0&&typeof i==\"object\"&&i.flag&&i.flag.includes(\"a\")&&(r=Buffer.concat([await this.getFileSource(o,{asyncDecompress:!0}),Buffer.from(r)])),n!==null&&(r=r.toString(n));let l=this.setFileSource(a,r);l!==o&&this.registerEntry(a,l),s!==null&&await this.chmodPromise(a,s)}writeFileSync(e,r,i){let{encoding:n,mode:s,index:o,resolvedP:a}=this.prepareWriteFile(e,i);o!==void 0&&typeof i==\"object\"&&i.flag&&i.flag.includes(\"a\")&&(r=Buffer.concat([this.getFileSource(o),Buffer.from(r)])),n!==null&&(r=r.toString(n));let l=this.setFileSource(a,r);l!==o&&this.registerEntry(a,l),s!==null&&this.chmodSync(a,s)}prepareWriteFile(e,r){if(typeof e==\"number\"&&(e=this.fdToPath(e,\"read\")),this.readOnly)throw In(`open '${e}'`);let i=this.resolveFilename(`open '${e}'`,e);if(this.listings.has(i))throw Yh(`open '${e}'`);let n=null,s=null;typeof r==\"string\"?n=r:typeof r==\"object\"&&({encoding:n=null,mode:s=null}=r);let o=this.entries.get(i);return{encoding:n,mode:s,resolvedP:i,index:o}}async unlinkPromise(e){return this.unlinkSync(e)}unlinkSync(e){if(this.readOnly)throw In(`unlink '${e}'`);let r=this.resolveFilename(`unlink '${e}'`,e);if(this.listings.has(r))throw Yh(`unlink '${e}'`);let i=this.entries.get(r);if(typeof i==\"undefined\")throw UA(`unlink '${e}'`);this.deleteEntry(r,i)}async utimesPromise(e,r,i){return this.utimesSync(e,r,i)}utimesSync(e,r,i){if(this.readOnly)throw In(`utimes '${e}'`);let n=this.resolveFilename(`utimes '${e}'`,e);this.utimesImpl(n,i)}async lutimesPromise(e,r,i){return this.lutimesSync(e,r,i)}lutimesSync(e,r,i){if(this.readOnly)throw In(`lutimes '${e}'`);let n=this.resolveFilename(`utimes '${e}'`,e,!1);this.utimesImpl(n,i)}utimesImpl(e,r){this.listings.has(e)&&(this.entries.has(e)||this.hydrateDirectory(e));let i=this.entries.get(e);if(i===void 0)throw new Error(\"Unreachable\");if(this.libzip.file.setMtime(this.zip,i,0,Sfe(r),0)===-1)throw this.makeLibzipError(this.libzip.getError(this.zip))}async mkdirPromise(e,r){return this.mkdirSync(e,r)}mkdirSync(e,{mode:r=493,recursive:i=!1}={}){if(i){this.mkdirpSync(e,{chmod:r});return}if(this.readOnly)throw In(`mkdir '${e}'`);let n=this.resolveFilename(`mkdir '${e}'`,e);if(this.entries.has(n)||this.listings.has(n))throw YE(`mkdir '${e}'`);this.hydrateDirectory(n),this.chmodSync(n,r)}async rmdirPromise(e,r){return this.rmdirSync(e,r)}rmdirSync(e,{recursive:r=!1}={}){if(this.readOnly)throw In(`rmdir '${e}'`);if(r){this.removeSync(e);return}let i=this.resolveFilename(`rmdir '${e}'`,e),n=this.listings.get(i);if(!n)throw Do(`rmdir '${e}'`);if(n.size>0)throw cM(`rmdir '${e}'`);let s=this.entries.get(i);if(typeof s==\"undefined\")throw UA(`rmdir '${e}'`);this.deleteEntry(e,s)}hydrateDirectory(e){let r=this.libzip.dir.add(this.zip,k.relative(Me.root,e));if(r===-1)throw this.makeLibzipError(this.libzip.getError(this.zip));return this.registerListing(e),this.registerEntry(e,r),r}async linkPromise(e,r){return this.linkSync(e,r)}linkSync(e,r){throw uM(`link '${e}' -> '${r}'`)}async symlinkPromise(e,r){return this.symlinkSync(e,r)}symlinkSync(e,r){if(this.readOnly)throw In(`symlink '${e}' -> '${r}'`);let i=this.resolveFilename(`symlink '${e}' -> '${r}'`,r);if(this.listings.has(i))throw Yh(`symlink '${e}' -> '${r}'`);if(this.entries.has(i))throw YE(`symlink '${e}' -> '${r}'`);let n=this.setFileSource(i,e);if(this.registerEntry(i,n),this.libzip.file.setExternalAttributes(this.zip,n,0,0,this.libzip.ZIP_OPSYS_UNIX,(xa|511)<<16)===-1)throw this.makeLibzipError(this.libzip.getError(this.zip));this.symlinkCount+=1}async readFilePromise(e,r){typeof r==\"object\"&&(r=r?r.encoding:void 0);let i=await this.readFileBuffer(e,{asyncDecompress:!0});return r?i.toString(r):i}readFileSync(e,r){typeof r==\"object\"&&(r=r?r.encoding:void 0);let i=this.readFileBuffer(e);return r?i.toString(r):i}readFileBuffer(e,r={asyncDecompress:!1}){typeof e==\"number\"&&(e=this.fdToPath(e,\"read\"));let i=this.resolveFilename(`open '${e}'`,e);if(!this.entries.has(i)&&!this.listings.has(i))throw to(`open '${e}'`);if(e[e.length-1]===\"/\"&&!this.listings.has(i))throw Do(`open '${e}'`);if(this.listings.has(i))throw Yh(\"read\");let n=this.entries.get(i);if(n===void 0)throw new Error(\"Unreachable\");return this.getFileSource(n,r)}async readdirPromise(e,r){return this.readdirSync(e,r)}readdirSync(e,r){let i=this.resolveFilename(`scandir '${e}'`,e);if(!this.entries.has(i)&&!this.listings.has(i))throw to(`scandir '${e}'`);let n=this.listings.get(i);if(!n)throw Do(`scandir '${e}'`);let s=[...n];return(r==null?void 0:r.withFileTypes)?s.map(o=>Object.assign(this.statImpl(\"lstat\",k.join(e,o)),{name:o})):s}async readlinkPromise(e){let r=this.prepareReadlink(e);return(await this.getFileSource(r,{asyncDecompress:!0})).toString()}readlinkSync(e){let r=this.prepareReadlink(e);return this.getFileSource(r).toString()}prepareReadlink(e){let r=this.resolveFilename(`readlink '${e}'`,e,!1);if(!this.entries.has(r)&&!this.listings.has(r))throw to(`readlink '${e}'`);if(e[e.length-1]===\"/\"&&!this.listings.has(r))throw Do(`open '${e}'`);if(this.listings.has(r))throw UA(`readlink '${e}'`);let i=this.entries.get(r);if(i===void 0)throw new Error(\"Unreachable\");if(!this.isSymbolicLink(i))throw UA(`readlink '${e}'`);return i}async truncatePromise(e,r=0){let i=this.resolveFilename(`open '${e}'`,e),n=this.entries.get(i);if(typeof n==\"undefined\")throw UA(`open '${e}'`);let s=await this.getFileSource(n,{asyncDecompress:!0}),o=Buffer.alloc(r,0);return s.copy(o),await this.writeFilePromise(e,o)}truncateSync(e,r=0){let i=this.resolveFilename(`open '${e}'`,e),n=this.entries.get(i);if(typeof n==\"undefined\")throw UA(`open '${e}'`);let s=this.getFileSource(n),o=Buffer.alloc(r,0);return s.copy(o),this.writeFileSync(e,o)}watch(e,r,i){let n;switch(typeof r){case\"function\":case\"string\":case\"undefined\":n=!0;break;default:({persistent:n=!0}=r);break}if(!n)return{on:()=>{},close:()=>{}};let s=setInterval(()=>{},24*60*60*1e3);return{on:()=>{},close:()=>{clearInterval(s)}}}watchFile(e,r,i){let n=k.resolve(Me.root,e);return WE(this,n,r,i)}unwatchFile(e,r){let i=k.resolve(Me.root,e);return Jh(this,i,r)}};var bi=class extends KA{getExtractHint(e){return this.baseFs.getExtractHint(e)}resolve(e){return this.mapFromBase(this.baseFs.resolve(this.mapToBase(e)))}getRealPath(){return this.mapFromBase(this.baseFs.getRealPath())}async openPromise(e,r,i){return this.baseFs.openPromise(this.mapToBase(e),r,i)}openSync(e,r,i){return this.baseFs.openSync(this.mapToBase(e),r,i)}async opendirPromise(e,r){return Object.assign(await this.baseFs.opendirPromise(this.mapToBase(e),r),{path:e})}opendirSync(e,r){return Object.assign(this.baseFs.opendirSync(this.mapToBase(e),r),{path:e})}async readPromise(e,r,i,n,s){return await this.baseFs.readPromise(e,r,i,n,s)}readSync(e,r,i,n,s){return this.baseFs.readSync(e,r,i,n,s)}async writePromise(e,r,i,n,s){return typeof r==\"string\"?await this.baseFs.writePromise(e,r,i):await this.baseFs.writePromise(e,r,i,n,s)}writeSync(e,r,i,n,s){return typeof r==\"string\"?this.baseFs.writeSync(e,r,i):this.baseFs.writeSync(e,r,i,n,s)}async closePromise(e){return this.baseFs.closePromise(e)}closeSync(e){this.baseFs.closeSync(e)}createReadStream(e,r){return this.baseFs.createReadStream(e!==null?this.mapToBase(e):e,r)}createWriteStream(e,r){return this.baseFs.createWriteStream(e!==null?this.mapToBase(e):e,r)}async realpathPromise(e){return this.mapFromBase(await this.baseFs.realpathPromise(this.mapToBase(e)))}realpathSync(e){return this.mapFromBase(this.baseFs.realpathSync(this.mapToBase(e)))}async existsPromise(e){return this.baseFs.existsPromise(this.mapToBase(e))}existsSync(e){return this.baseFs.existsSync(this.mapToBase(e))}accessSync(e,r){return this.baseFs.accessSync(this.mapToBase(e),r)}async accessPromise(e,r){return this.baseFs.accessPromise(this.mapToBase(e),r)}async statPromise(e,r){return this.baseFs.statPromise(this.mapToBase(e),r)}statSync(e,r){return this.baseFs.statSync(this.mapToBase(e),r)}async fstatPromise(e,r){return this.baseFs.fstatPromise(e,r)}fstatSync(e,r){return this.baseFs.fstatSync(e,r)}async lstatPromise(e,r){return this.baseFs.lstatPromise(this.mapToBase(e),r)}lstatSync(e,r){return this.baseFs.lstatSync(this.mapToBase(e),r)}async chmodPromise(e,r){return this.baseFs.chmodPromise(this.mapToBase(e),r)}chmodSync(e,r){return this.baseFs.chmodSync(this.mapToBase(e),r)}async chownPromise(e,r,i){return this.baseFs.chownPromise(this.mapToBase(e),r,i)}chownSync(e,r,i){return this.baseFs.chownSync(this.mapToBase(e),r,i)}async renamePromise(e,r){return this.baseFs.renamePromise(this.mapToBase(e),this.mapToBase(r))}renameSync(e,r){return this.baseFs.renameSync(this.mapToBase(e),this.mapToBase(r))}async copyFilePromise(e,r,i=0){return this.baseFs.copyFilePromise(this.mapToBase(e),this.mapToBase(r),i)}copyFileSync(e,r,i=0){return this.baseFs.copyFileSync(this.mapToBase(e),this.mapToBase(r),i)}async appendFilePromise(e,r,i){return this.baseFs.appendFilePromise(this.fsMapToBase(e),r,i)}appendFileSync(e,r,i){return this.baseFs.appendFileSync(this.fsMapToBase(e),r,i)}async writeFilePromise(e,r,i){return this.baseFs.writeFilePromise(this.fsMapToBase(e),r,i)}writeFileSync(e,r,i){return this.baseFs.writeFileSync(this.fsMapToBase(e),r,i)}async unlinkPromise(e){return this.baseFs.unlinkPromise(this.mapToBase(e))}unlinkSync(e){return this.baseFs.unlinkSync(this.mapToBase(e))}async utimesPromise(e,r,i){return this.baseFs.utimesPromise(this.mapToBase(e),r,i)}utimesSync(e,r,i){return this.baseFs.utimesSync(this.mapToBase(e),r,i)}async mkdirPromise(e,r){return this.baseFs.mkdirPromise(this.mapToBase(e),r)}mkdirSync(e,r){return this.baseFs.mkdirSync(this.mapToBase(e),r)}async rmdirPromise(e,r){return this.baseFs.rmdirPromise(this.mapToBase(e),r)}rmdirSync(e,r){return this.baseFs.rmdirSync(this.mapToBase(e),r)}async linkPromise(e,r){return this.baseFs.linkPromise(this.mapToBase(e),this.mapToBase(r))}linkSync(e,r){return this.baseFs.linkSync(this.mapToBase(e),this.mapToBase(r))}async symlinkPromise(e,r,i){let n=this.mapToBase(r);if(this.pathUtils.isAbsolute(e))return this.baseFs.symlinkPromise(this.mapToBase(e),n,i);let s=this.mapToBase(this.pathUtils.join(this.pathUtils.dirname(r),e)),o=this.baseFs.pathUtils.relative(this.baseFs.pathUtils.dirname(n),s);return this.baseFs.symlinkPromise(o,n,i)}symlinkSync(e,r,i){let n=this.mapToBase(r);if(this.pathUtils.isAbsolute(e))return this.baseFs.symlinkSync(this.mapToBase(e),n,i);let s=this.mapToBase(this.pathUtils.join(this.pathUtils.dirname(r),e)),o=this.baseFs.pathUtils.relative(this.baseFs.pathUtils.dirname(n),s);return this.baseFs.symlinkSync(o,n,i)}async readFilePromise(e,r){return r===\"utf8\"?this.baseFs.readFilePromise(this.fsMapToBase(e),r):this.baseFs.readFilePromise(this.fsMapToBase(e),r)}readFileSync(e,r){return r===\"utf8\"?this.baseFs.readFileSync(this.fsMapToBase(e),r):this.baseFs.readFileSync(this.fsMapToBase(e),r)}async readdirPromise(e,r){return this.baseFs.readdirPromise(this.mapToBase(e),r)}readdirSync(e,r){return this.baseFs.readdirSync(this.mapToBase(e),r)}async readlinkPromise(e){return this.mapFromBase(await this.baseFs.readlinkPromise(this.mapToBase(e)))}readlinkSync(e){return this.mapFromBase(this.baseFs.readlinkSync(this.mapToBase(e)))}async truncatePromise(e,r){return this.baseFs.truncatePromise(this.mapToBase(e),r)}truncateSync(e,r){return this.baseFs.truncateSync(this.mapToBase(e),r)}watch(e,r,i){return this.baseFs.watch(this.mapToBase(e),r,i)}watchFile(e,r,i){return this.baseFs.watchFile(this.mapToBase(e),r,i)}unwatchFile(e,r){return this.baseFs.unwatchFile(this.mapToBase(e),r)}fsMapToBase(e){return typeof e==\"number\"?e:this.mapToBase(e)}};var Pa=class extends bi{constructor(e,{baseFs:r,pathUtils:i}){super(i);this.target=e,this.baseFs=r}getRealPath(){return this.target}getBaseFs(){return this.baseFs}mapFromBase(e){return e}mapToBase(e){return e}};var _t=class extends bi{constructor(e,{baseFs:r=new ar}={}){super(k);this.target=this.pathUtils.normalize(e),this.baseFs=r}getRealPath(){return this.pathUtils.resolve(this.baseFs.getRealPath(),this.target)}resolve(e){return this.pathUtils.isAbsolute(e)?k.normalize(e):this.baseFs.resolve(k.join(this.target,e))}mapFromBase(e){return e}mapToBase(e){return this.pathUtils.isAbsolute(e)?e:this.pathUtils.join(this.target,e)}};var IM=Me.root,Da=class extends bi{constructor(e,{baseFs:r=new ar}={}){super(k);this.target=this.pathUtils.resolve(Me.root,e),this.baseFs=r}getRealPath(){return this.pathUtils.resolve(this.baseFs.getRealPath(),this.pathUtils.relative(Me.root,this.target))}getTarget(){return this.target}getBaseFs(){return this.baseFs}mapToBase(e){let r=this.pathUtils.normalize(e);if(this.pathUtils.isAbsolute(e))return this.pathUtils.resolve(this.target,this.pathUtils.relative(IM,e));if(r.match(/^\\.\\.\\/?/))throw new Error(`Resolving this path (${e}) would escape the jail`);return this.pathUtils.resolve(this.target,e)}mapFromBase(e){return this.pathUtils.resolve(IM,this.pathUtils.relative(this.target,e))}};var zh=class extends bi{constructor(e,r){super(r);this.instance=null;this.factory=e}get baseFs(){return this.instance||(this.instance=this.factory()),this.instance}set baseFs(e){this.instance=e}mapFromBase(e){return e}mapToBase(e){return e}};var st=()=>Object.assign(new Error(\"ENOSYS: unsupported filesystem access\"),{code:\"ENOSYS\"}),yQ=class extends KA{constructor(){super(k)}getExtractHint(){throw st()}getRealPath(){throw st()}resolve(){throw st()}async openPromise(){throw st()}openSync(){throw st()}async opendirPromise(){throw st()}opendirSync(){throw st()}async readPromise(){throw st()}readSync(){throw st()}async writePromise(){throw st()}writeSync(){throw st()}async closePromise(){throw st()}closeSync(){throw st()}createWriteStream(){throw st()}createReadStream(){throw st()}async realpathPromise(){throw st()}realpathSync(){throw st()}async readdirPromise(){throw st()}readdirSync(){throw st()}async existsPromise(e){throw st()}existsSync(e){throw st()}async accessPromise(){throw st()}accessSync(){throw st()}async statPromise(){throw st()}statSync(){throw st()}async fstatPromise(e){throw st()}fstatSync(e){throw st()}async lstatPromise(e){throw st()}lstatSync(e){throw st()}async chmodPromise(){throw st()}chmodSync(){throw st()}async chownPromise(){throw st()}chownSync(){throw st()}async mkdirPromise(){throw st()}mkdirSync(){throw st()}async rmdirPromise(){throw st()}rmdirSync(){throw st()}async linkPromise(){throw st()}linkSync(){throw st()}async symlinkPromise(){throw st()}symlinkSync(){throw st()}async renamePromise(){throw st()}renameSync(){throw st()}async copyFilePromise(){throw st()}copyFileSync(){throw st()}async appendFilePromise(){throw st()}appendFileSync(){throw st()}async writeFilePromise(){throw st()}writeFileSync(){throw st()}async unlinkPromise(){throw st()}unlinkSync(){throw st()}async utimesPromise(){throw st()}utimesSync(){throw st()}async readFilePromise(){throw st()}readFileSync(){throw st()}async readlinkPromise(){throw st()}readlinkSync(){throw st()}async truncatePromise(){throw st()}truncateSync(){throw st()}watch(){throw st()}watchFile(){throw st()}unwatchFile(){throw st()}},zE=yQ;zE.instance=new yQ;var _h=class extends bi{constructor(e){super(j);this.baseFs=e}mapFromBase(e){return j.fromPortablePath(e)}mapToBase(e){return j.toPortablePath(e)}};var kfe=/^[0-9]+$/,wQ=/^(\\/(?:[^/]+\\/)*?(?:\\$\\$virtual|__virtual__))((?:\\/((?:[^/]+-)?[a-f0-9]+)(?:\\/([^/]+))?)?((?:\\/.*)?))$/,xfe=/^([^/]+-)?[a-f0-9]+$/,Jr=class extends bi{static makeVirtualPath(e,r,i){if(k.basename(e)!==\"__virtual__\")throw new Error('Assertion failed: Virtual folders must be named \"__virtual__\"');if(!k.basename(r).match(xfe))throw new Error(\"Assertion failed: Virtual components must be ended by an hexadecimal hash\");let s=k.relative(k.dirname(e),i).split(\"/\"),o=0;for(;o<s.length&&s[o]===\"..\";)o+=1;let a=s.slice(o);return k.join(e,r,String(o),...a)}static resolveVirtual(e){let r=e.match(wQ);if(!r||!r[3]&&r[5])return e;let i=k.dirname(r[1]);if(!r[3]||!r[4])return i;if(!kfe.test(r[4]))return e;let s=Number(r[4]),o=\"../\".repeat(s),a=r[5]||\".\";return Jr.resolveVirtual(k.join(i,o,a))}constructor({baseFs:e=new ar}={}){super(k);this.baseFs=e}getExtractHint(e){return this.baseFs.getExtractHint(e)}getRealPath(){return this.baseFs.getRealPath()}realpathSync(e){let r=e.match(wQ);if(!r)return this.baseFs.realpathSync(e);if(!r[5])return e;let i=this.baseFs.realpathSync(this.mapToBase(e));return Jr.makeVirtualPath(r[1],r[3],i)}async realpathPromise(e){let r=e.match(wQ);if(!r)return await this.baseFs.realpathPromise(e);if(!r[5])return e;let i=await this.baseFs.realpathPromise(this.mapToBase(e));return Jr.makeVirtualPath(r[1],r[3],i)}mapToBase(e){if(e===\"\")return e;if(this.pathUtils.isAbsolute(e))return Jr.resolveVirtual(e);let r=Jr.resolveVirtual(this.baseFs.resolve(Me.dot)),i=Jr.resolveVirtual(this.baseFs.resolve(e));return k.relative(r,i)||Me.dot}mapFromBase(e){return e}};var Vh=ge(require(\"fs\"));var Ra=2147483648,yM=(t,e)=>{let r=t.indexOf(e);if(r<=0)return null;let i=r;for(;r>=0&&(i=r+e.length,t[i]!==k.sep);){if(t[r-1]===k.sep)return null;r=t.indexOf(e,i)}return t.length>i&&t[i]!==k.sep?null:t.slice(0,i)},ms=class extends ec{constructor({libzip:e,baseFs:r=new ar,filter:i=null,maxOpenFiles:n=Infinity,readOnlyArchives:s=!1,useCache:o=!0,maxAge:a=5e3,fileExtensions:l=null}){super();this.fdMap=new Map;this.nextFd=3;this.isZip=new Set;this.notZip=new Set;this.realPaths=new Map;this.limitOpenFilesTimeout=null;this.libzipFactory=typeof e!=\"function\"?()=>e:e,this.baseFs=r,this.zipInstances=o?new Map:null,this.filter=i,this.maxOpenFiles=n,this.readOnlyArchives=s,this.maxAge=a,this.fileExtensions=l}static async openPromise(e,r){let i=new ms(r);try{return await e(i)}finally{i.saveAndClose()}}get libzip(){return typeof this.libzipInstance==\"undefined\"&&(this.libzipInstance=this.libzipFactory()),this.libzipInstance}getExtractHint(e){return this.baseFs.getExtractHint(e)}getRealPath(){return this.baseFs.getRealPath()}saveAndClose(){if(Wh(this),this.zipInstances)for(let[e,{zipFs:r}]of this.zipInstances.entries())r.saveAndClose(),this.zipInstances.delete(e)}discardAndClose(){if(Wh(this),this.zipInstances)for(let[e,{zipFs:r}]of this.zipInstances.entries())r.discardAndClose(),this.zipInstances.delete(e)}resolve(e){return this.baseFs.resolve(e)}remapFd(e,r){let i=this.nextFd++|Ra;return this.fdMap.set(i,[e,r]),i}async openPromise(e,r,i){return await this.makeCallPromise(e,async()=>await this.baseFs.openPromise(e,r,i),async(n,{subPath:s})=>this.remapFd(n,await n.openPromise(s,r,i)))}openSync(e,r,i){return this.makeCallSync(e,()=>this.baseFs.openSync(e,r,i),(n,{subPath:s})=>this.remapFd(n,n.openSync(s,r,i)))}async opendirPromise(e,r){return await this.makeCallPromise(e,async()=>await this.baseFs.opendirPromise(e,r),async(i,{subPath:n})=>await i.opendirPromise(n,r),{requireSubpath:!1})}opendirSync(e,r){return this.makeCallSync(e,()=>this.baseFs.opendirSync(e,r),(i,{subPath:n})=>i.opendirSync(n,r),{requireSubpath:!1})}async readPromise(e,r,i,n,s){if((e&Ra)==0)return await this.baseFs.readPromise(e,r,i,n,s);let o=this.fdMap.get(e);if(typeof o==\"undefined\")throw en(\"read\");let[a,l]=o;return await a.readPromise(l,r,i,n,s)}readSync(e,r,i,n,s){if((e&Ra)==0)return this.baseFs.readSync(e,r,i,n,s);let o=this.fdMap.get(e);if(typeof o==\"undefined\")throw en(\"readSync\");let[a,l]=o;return a.readSync(l,r,i,n,s)}async writePromise(e,r,i,n,s){if((e&Ra)==0)return typeof r==\"string\"?await this.baseFs.writePromise(e,r,i):await this.baseFs.writePromise(e,r,i,n,s);let o=this.fdMap.get(e);if(typeof o==\"undefined\")throw en(\"write\");let[a,l]=o;return typeof r==\"string\"?await a.writePromise(l,r,i):await a.writePromise(l,r,i,n,s)}writeSync(e,r,i,n,s){if((e&Ra)==0)return typeof r==\"string\"?this.baseFs.writeSync(e,r,i):this.baseFs.writeSync(e,r,i,n,s);let o=this.fdMap.get(e);if(typeof o==\"undefined\")throw en(\"writeSync\");let[a,l]=o;return typeof r==\"string\"?a.writeSync(l,r,i):a.writeSync(l,r,i,n,s)}async closePromise(e){if((e&Ra)==0)return await this.baseFs.closePromise(e);let r=this.fdMap.get(e);if(typeof r==\"undefined\")throw en(\"close\");this.fdMap.delete(e);let[i,n]=r;return await i.closePromise(n)}closeSync(e){if((e&Ra)==0)return this.baseFs.closeSync(e);let r=this.fdMap.get(e);if(typeof r==\"undefined\")throw en(\"closeSync\");this.fdMap.delete(e);let[i,n]=r;return i.closeSync(n)}createReadStream(e,r){return e===null?this.baseFs.createReadStream(e,r):this.makeCallSync(e,()=>this.baseFs.createReadStream(e,r),(i,{archivePath:n,subPath:s})=>{let o=i.createReadStream(s,r);return o.path=j.fromPortablePath(this.pathUtils.join(n,s)),o})}createWriteStream(e,r){return e===null?this.baseFs.createWriteStream(e,r):this.makeCallSync(e,()=>this.baseFs.createWriteStream(e,r),(i,{subPath:n})=>i.createWriteStream(n,r))}async realpathPromise(e){return await this.makeCallPromise(e,async()=>await this.baseFs.realpathPromise(e),async(r,{archivePath:i,subPath:n})=>{let s=this.realPaths.get(i);return typeof s==\"undefined\"&&(s=await this.baseFs.realpathPromise(i),this.realPaths.set(i,s)),this.pathUtils.join(s,this.pathUtils.relative(Me.root,await r.realpathPromise(n)))})}realpathSync(e){return this.makeCallSync(e,()=>this.baseFs.realpathSync(e),(r,{archivePath:i,subPath:n})=>{let s=this.realPaths.get(i);return typeof s==\"undefined\"&&(s=this.baseFs.realpathSync(i),this.realPaths.set(i,s)),this.pathUtils.join(s,this.pathUtils.relative(Me.root,r.realpathSync(n)))})}async existsPromise(e){return await this.makeCallPromise(e,async()=>await this.baseFs.existsPromise(e),async(r,{subPath:i})=>await r.existsPromise(i))}existsSync(e){return this.makeCallSync(e,()=>this.baseFs.existsSync(e),(r,{subPath:i})=>r.existsSync(i))}async accessPromise(e,r){return await this.makeCallPromise(e,async()=>await this.baseFs.accessPromise(e,r),async(i,{subPath:n})=>await i.accessPromise(n,r))}accessSync(e,r){return this.makeCallSync(e,()=>this.baseFs.accessSync(e,r),(i,{subPath:n})=>i.accessSync(n,r))}async statPromise(e,r){return await this.makeCallPromise(e,async()=>await this.baseFs.statPromise(e,r),async(i,{subPath:n})=>await i.statPromise(n,r))}statSync(e,r){return this.makeCallSync(e,()=>this.baseFs.statSync(e,r),(i,{subPath:n})=>i.statSync(n,r))}async fstatPromise(e,r){if((e&Ra)==0)return this.baseFs.fstatPromise(e,r);let i=this.fdMap.get(e);if(typeof i==\"undefined\")throw en(\"fstat\");let[n,s]=i;return n.fstatPromise(s,r)}fstatSync(e,r){if((e&Ra)==0)return this.baseFs.fstatSync(e,r);let i=this.fdMap.get(e);if(typeof i==\"undefined\")throw en(\"fstatSync\");let[n,s]=i;return n.fstatSync(s,r)}async lstatPromise(e,r){return await this.makeCallPromise(e,async()=>await this.baseFs.lstatPromise(e,r),async(i,{subPath:n})=>await i.lstatPromise(n,r))}lstatSync(e,r){return this.makeCallSync(e,()=>this.baseFs.lstatSync(e,r),(i,{subPath:n})=>i.lstatSync(n,r))}async chmodPromise(e,r){return await this.makeCallPromise(e,async()=>await this.baseFs.chmodPromise(e,r),async(i,{subPath:n})=>await i.chmodPromise(n,r))}chmodSync(e,r){return this.makeCallSync(e,()=>this.baseFs.chmodSync(e,r),(i,{subPath:n})=>i.chmodSync(n,r))}async chownPromise(e,r,i){return await this.makeCallPromise(e,async()=>await this.baseFs.chownPromise(e,r,i),async(n,{subPath:s})=>await n.chownPromise(s,r,i))}chownSync(e,r,i){return this.makeCallSync(e,()=>this.baseFs.chownSync(e,r,i),(n,{subPath:s})=>n.chownSync(s,r,i))}async renamePromise(e,r){return await this.makeCallPromise(e,async()=>await this.makeCallPromise(r,async()=>await this.baseFs.renamePromise(e,r),async()=>{throw Object.assign(new Error(\"EEXDEV: cross-device link not permitted\"),{code:\"EEXDEV\"})}),async(i,{subPath:n})=>await this.makeCallPromise(r,async()=>{throw Object.assign(new Error(\"EEXDEV: cross-device link not permitted\"),{code:\"EEXDEV\"})},async(s,{subPath:o})=>{if(i!==s)throw Object.assign(new Error(\"EEXDEV: cross-device link not permitted\"),{code:\"EEXDEV\"});return await i.renamePromise(n,o)}))}renameSync(e,r){return this.makeCallSync(e,()=>this.makeCallSync(r,()=>this.baseFs.renameSync(e,r),()=>{throw Object.assign(new Error(\"EEXDEV: cross-device link not permitted\"),{code:\"EEXDEV\"})}),(i,{subPath:n})=>this.makeCallSync(r,()=>{throw Object.assign(new Error(\"EEXDEV: cross-device link not permitted\"),{code:\"EEXDEV\"})},(s,{subPath:o})=>{if(i!==s)throw Object.assign(new Error(\"EEXDEV: cross-device link not permitted\"),{code:\"EEXDEV\"});return i.renameSync(n,o)}))}async copyFilePromise(e,r,i=0){let n=async(s,o,a,l)=>{if((i&Vh.constants.COPYFILE_FICLONE_FORCE)!=0)throw Object.assign(new Error(`EXDEV: cross-device clone not permitted, copyfile '${o}' -> ${l}'`),{code:\"EXDEV\"});if(i&Vh.constants.COPYFILE_EXCL&&await this.existsPromise(o))throw Object.assign(new Error(`EEXIST: file already exists, copyfile '${o}' -> '${l}'`),{code:\"EEXIST\"});let c;try{c=await s.readFilePromise(o)}catch(u){throw Object.assign(new Error(`EINVAL: invalid argument, copyfile '${o}' -> '${l}'`),{code:\"EINVAL\"})}await a.writeFilePromise(l,c)};return await this.makeCallPromise(e,async()=>await this.makeCallPromise(r,async()=>await this.baseFs.copyFilePromise(e,r,i),async(s,{subPath:o})=>await n(this.baseFs,e,s,o)),async(s,{subPath:o})=>await this.makeCallPromise(r,async()=>await n(s,o,this.baseFs,r),async(a,{subPath:l})=>s!==a?await n(s,o,a,l):await s.copyFilePromise(o,l,i)))}copyFileSync(e,r,i=0){let n=(s,o,a,l)=>{if((i&Vh.constants.COPYFILE_FICLONE_FORCE)!=0)throw Object.assign(new Error(`EXDEV: cross-device clone not permitted, copyfile '${o}' -> ${l}'`),{code:\"EXDEV\"});if(i&Vh.constants.COPYFILE_EXCL&&this.existsSync(o))throw Object.assign(new Error(`EEXIST: file already exists, copyfile '${o}' -> '${l}'`),{code:\"EEXIST\"});let c;try{c=s.readFileSync(o)}catch(u){throw Object.assign(new Error(`EINVAL: invalid argument, copyfile '${o}' -> '${l}'`),{code:\"EINVAL\"})}a.writeFileSync(l,c)};return this.makeCallSync(e,()=>this.makeCallSync(r,()=>this.baseFs.copyFileSync(e,r,i),(s,{subPath:o})=>n(this.baseFs,e,s,o)),(s,{subPath:o})=>this.makeCallSync(r,()=>n(s,o,this.baseFs,r),(a,{subPath:l})=>s!==a?n(s,o,a,l):s.copyFileSync(o,l,i)))}async appendFilePromise(e,r,i){return await this.makeCallPromise(e,async()=>await this.baseFs.appendFilePromise(e,r,i),async(n,{subPath:s})=>await n.appendFilePromise(s,r,i))}appendFileSync(e,r,i){return this.makeCallSync(e,()=>this.baseFs.appendFileSync(e,r,i),(n,{subPath:s})=>n.appendFileSync(s,r,i))}async writeFilePromise(e,r,i){return await this.makeCallPromise(e,async()=>await this.baseFs.writeFilePromise(e,r,i),async(n,{subPath:s})=>await n.writeFilePromise(s,r,i))}writeFileSync(e,r,i){return this.makeCallSync(e,()=>this.baseFs.writeFileSync(e,r,i),(n,{subPath:s})=>n.writeFileSync(s,r,i))}async unlinkPromise(e){return await this.makeCallPromise(e,async()=>await this.baseFs.unlinkPromise(e),async(r,{subPath:i})=>await r.unlinkPromise(i))}unlinkSync(e){return this.makeCallSync(e,()=>this.baseFs.unlinkSync(e),(r,{subPath:i})=>r.unlinkSync(i))}async utimesPromise(e,r,i){return await this.makeCallPromise(e,async()=>await this.baseFs.utimesPromise(e,r,i),async(n,{subPath:s})=>await n.utimesPromise(s,r,i))}utimesSync(e,r,i){return this.makeCallSync(e,()=>this.baseFs.utimesSync(e,r,i),(n,{subPath:s})=>n.utimesSync(s,r,i))}async mkdirPromise(e,r){return await this.makeCallPromise(e,async()=>await this.baseFs.mkdirPromise(e,r),async(i,{subPath:n})=>await i.mkdirPromise(n,r))}mkdirSync(e,r){return this.makeCallSync(e,()=>this.baseFs.mkdirSync(e,r),(i,{subPath:n})=>i.mkdirSync(n,r))}async rmdirPromise(e,r){return await this.makeCallPromise(e,async()=>await this.baseFs.rmdirPromise(e,r),async(i,{subPath:n})=>await i.rmdirPromise(n,r))}rmdirSync(e,r){return this.makeCallSync(e,()=>this.baseFs.rmdirSync(e,r),(i,{subPath:n})=>i.rmdirSync(n,r))}async linkPromise(e,r){return await this.makeCallPromise(r,async()=>await this.baseFs.linkPromise(e,r),async(i,{subPath:n})=>await i.linkPromise(e,n))}linkSync(e,r){return this.makeCallSync(r,()=>this.baseFs.linkSync(e,r),(i,{subPath:n})=>i.linkSync(e,n))}async symlinkPromise(e,r,i){return await this.makeCallPromise(r,async()=>await this.baseFs.symlinkPromise(e,r,i),async(n,{subPath:s})=>await n.symlinkPromise(e,s))}symlinkSync(e,r,i){return this.makeCallSync(r,()=>this.baseFs.symlinkSync(e,r,i),(n,{subPath:s})=>n.symlinkSync(e,s))}async readFilePromise(e,r){return this.makeCallPromise(e,async()=>{switch(r){case\"utf8\":return await this.baseFs.readFilePromise(e,r);default:return await this.baseFs.readFilePromise(e,r)}},async(i,{subPath:n})=>await i.readFilePromise(n,r))}readFileSync(e,r){return this.makeCallSync(e,()=>{switch(r){case\"utf8\":return this.baseFs.readFileSync(e,r);default:return this.baseFs.readFileSync(e,r)}},(i,{subPath:n})=>i.readFileSync(n,r))}async readdirPromise(e,r){return await this.makeCallPromise(e,async()=>await this.baseFs.readdirPromise(e,r),async(i,{subPath:n})=>await i.readdirPromise(n,r),{requireSubpath:!1})}readdirSync(e,r){return this.makeCallSync(e,()=>this.baseFs.readdirSync(e,r),(i,{subPath:n})=>i.readdirSync(n,r),{requireSubpath:!1})}async readlinkPromise(e){return await this.makeCallPromise(e,async()=>await this.baseFs.readlinkPromise(e),async(r,{subPath:i})=>await r.readlinkPromise(i))}readlinkSync(e){return this.makeCallSync(e,()=>this.baseFs.readlinkSync(e),(r,{subPath:i})=>r.readlinkSync(i))}async truncatePromise(e,r){return await this.makeCallPromise(e,async()=>await this.baseFs.truncatePromise(e,r),async(i,{subPath:n})=>await i.truncatePromise(n,r))}truncateSync(e,r){return this.makeCallSync(e,()=>this.baseFs.truncateSync(e,r),(i,{subPath:n})=>i.truncateSync(n,r))}watch(e,r,i){return this.makeCallSync(e,()=>this.baseFs.watch(e,r,i),(n,{subPath:s})=>n.watch(s,r,i))}watchFile(e,r,i){return this.makeCallSync(e,()=>this.baseFs.watchFile(e,r,i),()=>WE(this,e,r,i))}unwatchFile(e,r){return this.makeCallSync(e,()=>this.baseFs.unwatchFile(e,r),()=>Jh(this,e,r))}async makeCallPromise(e,r,i,{requireSubpath:n=!0}={}){if(typeof e!=\"string\")return await r();let s=this.resolve(e),o=this.findZip(s);return o?n&&o.subPath===\"/\"?await r():await this.getZipPromise(o.archivePath,async a=>await i(a,o)):await r()}makeCallSync(e,r,i,{requireSubpath:n=!0}={}){if(typeof e!=\"string\")return r();let s=this.resolve(e),o=this.findZip(s);return!o||n&&o.subPath===\"/\"?r():this.getZipSync(o.archivePath,a=>i(a,o))}findZip(e){if(this.filter&&!this.filter.test(e))return null;let r=\"\";for(;;){let i=e.substring(r.length),n;if(!this.fileExtensions)n=yM(i,\".zip\");else for(let s of this.fileExtensions)if(n=yM(i,s),n)break;if(!n)return null;if(r=this.pathUtils.join(r,n),this.isZip.has(r)===!1){if(this.notZip.has(r))continue;try{if(!this.baseFs.lstatSync(r).isFile()){this.notZip.add(r);continue}}catch{return null}this.isZip.add(r)}return{archivePath:r,subPath:this.pathUtils.join(Me.root,e.substring(r.length))}}}limitOpenFiles(e){if(this.zipInstances===null)return;let r=Date.now(),i=r+this.maxAge,n=e===null?0:this.zipInstances.size-e;for(let[s,{zipFs:o,expiresAt:a,refCount:l}]of this.zipInstances.entries())if(!(l!==0||o.hasOpenFileHandles())){if(r>=a){o.saveAndClose(),this.zipInstances.delete(s),n-=1;continue}else if(e===null||n<=0){i=a;break}o.saveAndClose(),this.zipInstances.delete(s),n-=1}this.limitOpenFilesTimeout===null&&(e===null&&this.zipInstances.size>0||e!==null)&&(this.limitOpenFilesTimeout=setTimeout(()=>{this.limitOpenFilesTimeout=null,this.limitOpenFiles(null)},i-r).unref())}async getZipPromise(e,r){let i=async()=>({baseFs:this.baseFs,libzip:this.libzip,readOnly:this.readOnlyArchives,stats:await this.baseFs.statPromise(e)});if(this.zipInstances){let n=this.zipInstances.get(e);if(!n){let s=await i();n=this.zipInstances.get(e),n||(n={zipFs:new Ai(e,s),expiresAt:0,refCount:0})}this.zipInstances.delete(e),this.limitOpenFiles(this.maxOpenFiles-1),this.zipInstances.set(e,n),n.expiresAt=Date.now()+this.maxAge,n.refCount+=1;try{return await r(n.zipFs)}finally{n.refCount-=1}}else{let n=new Ai(e,await i());try{return await r(n)}finally{n.saveAndClose()}}}getZipSync(e,r){let i=()=>({baseFs:this.baseFs,libzip:this.libzip,readOnly:this.readOnlyArchives,stats:this.baseFs.statSync(e)});if(this.zipInstances){let n=this.zipInstances.get(e);return n||(n={zipFs:new Ai(e,i()),expiresAt:0,refCount:0}),this.zipInstances.delete(e),this.limitOpenFiles(this.maxOpenFiles-1),this.zipInstances.set(e,n),n.expiresAt=Date.now()+this.maxAge,r(n.zipFs)}else{let n=new Ai(e,i());try{return r(n)}finally{n.saveAndClose()}}}};var Xh=ge(require(\"util\"));var _E=ge(require(\"url\"));var BQ=class extends bi{constructor(e){super(j);this.baseFs=e}mapFromBase(e){return e}mapToBase(e){return e instanceof _E.URL?(0,_E.fileURLToPath)(e):e}};var Pfe=new Set([\"accessSync\",\"appendFileSync\",\"createReadStream\",\"createWriteStream\",\"chmodSync\",\"chownSync\",\"closeSync\",\"copyFileSync\",\"linkSync\",\"lstatSync\",\"fstatSync\",\"lutimesSync\",\"mkdirSync\",\"openSync\",\"opendirSync\",\"readlinkSync\",\"readFileSync\",\"readdirSync\",\"readlinkSync\",\"realpathSync\",\"renameSync\",\"rmdirSync\",\"statSync\",\"symlinkSync\",\"truncateSync\",\"unlinkSync\",\"unwatchFile\",\"utimesSync\",\"watch\",\"watchFile\",\"writeFileSync\",\"writeSync\"]),wM=new Set([\"accessPromise\",\"appendFilePromise\",\"chmodPromise\",\"chownPromise\",\"closePromise\",\"copyFilePromise\",\"linkPromise\",\"fstatPromise\",\"lstatPromise\",\"lutimesPromise\",\"mkdirPromise\",\"openPromise\",\"opendirPromise\",\"readdirPromise\",\"realpathPromise\",\"readFilePromise\",\"readdirPromise\",\"readlinkPromise\",\"renamePromise\",\"rmdirPromise\",\"statPromise\",\"symlinkPromise\",\"truncatePromise\",\"unlinkPromise\",\"utimesPromise\",\"writeFilePromise\",\"writeSync\"]),Dfe=new Set([\"appendFilePromise\",\"chmodPromise\",\"chownPromise\",\"closePromise\",\"readPromise\",\"readFilePromise\",\"statPromise\",\"truncatePromise\",\"utimesPromise\",\"writePromise\",\"writeFilePromise\"]);function bQ(t,e){e=new BQ(e);let r=(i,n,s)=>{let o=i[n];i[n]=s,typeof(o==null?void 0:o[Xh.promisify.custom])!=\"undefined\"&&(s[Xh.promisify.custom]=o[Xh.promisify.custom])};{r(t,\"exists\",(i,...n)=>{let o=typeof n[n.length-1]==\"function\"?n.pop():()=>{};process.nextTick(()=>{e.existsPromise(i).then(a=>{o(a)},()=>{o(!1)})})}),r(t,\"read\",(...i)=>{let[n,s,o,a,l,c]=i;if(i.length<=3){let u={};i.length<3?c=i[1]:(u=i[1],c=i[2]),{buffer:s=Buffer.alloc(16384),offset:o=0,length:a=s.byteLength,position:l}=u}if(o==null&&(o=0),a|=0,a===0){process.nextTick(()=>{c(null,0,s)});return}l==null&&(l=-1),process.nextTick(()=>{e.readPromise(n,s,o,a,l).then(u=>{c(null,u,s)},u=>{c(u,0,s)})})});for(let i of wM){let n=i.replace(/Promise$/,\"\");if(typeof t[n]==\"undefined\")continue;let s=e[i];if(typeof s==\"undefined\")continue;r(t,n,(...a)=>{let c=typeof a[a.length-1]==\"function\"?a.pop():()=>{};process.nextTick(()=>{s.apply(e,a).then(u=>{c(null,u)},u=>{c(u)})})})}t.realpath.native=t.realpath}{r(t,\"existsSync\",i=>{try{return e.existsSync(i)}catch(n){return!1}}),r(t,\"readSync\",(...i)=>{let[n,s,o,a,l]=i;return i.length<=3&&({offset:o=0,length:a=s.byteLength,position:l}=i[2]||{}),o==null&&(o=0),a|=0,a===0?0:(l==null&&(l=-1),e.readSync(n,s,o,a,l))});for(let i of Pfe){let n=i;if(typeof t[n]==\"undefined\")continue;let s=e[i];typeof s!=\"undefined\"&&r(t,n,s.bind(e))}t.realpathSync.native=t.realpathSync}{let i=process.emitWarning;process.emitWarning=()=>{};let n;try{n=t.promises}finally{process.emitWarning=i}if(typeof n!=\"undefined\"){for(let o of wM){let a=o.replace(/Promise$/,\"\");if(typeof n[a]==\"undefined\")continue;let l=e[o];typeof l!=\"undefined\"&&o!==\"open\"&&r(n,a,l.bind(e))}class s{constructor(a){this.fd=a}}for(let o of Dfe){let a=o.replace(/Promise$/,\"\"),l=e[o];typeof l!=\"undefined\"&&r(s.prototype,a,function(...c){return l.call(e,this.fd,...c)})}r(n,\"open\",async(...o)=>{let a=await e.openPromise(...o);return new s(a)})}}t.read[Xh.promisify.custom]=async(i,n,...s)=>({bytesRead:await e.readPromise(i,n,...s),buffer:n})}function VE(t,e){let r=Object.create(t);return bQ(r,e),r}var BM=ge(require(\"os\"));function bM(t){let e=Math.ceil(Math.random()*4294967296).toString(16).padStart(8,\"0\");return`${t}${e}`}var ro=new Set,QQ=null;function QM(){if(QQ)return QQ;let t=j.toPortablePath(BM.default.tmpdir()),e=K.realpathSync(t);return process.once(\"exit\",()=>{K.rmtempSync()}),QQ={tmpdir:t,realTmpdir:e}}var K=Object.assign(new ar,{detachTemp(t){ro.delete(t)},mktempSync(t){let{tmpdir:e,realTmpdir:r}=QM();for(;;){let i=bM(\"xfs-\");try{this.mkdirSync(k.join(e,i))}catch(s){if(s.code===\"EEXIST\")continue;throw s}let n=k.join(r,i);if(ro.add(n),typeof t==\"undefined\")return n;try{return t(n)}finally{if(ro.has(n)){ro.delete(n);try{this.removeSync(n)}catch{}}}}},async mktempPromise(t){let{tmpdir:e,realTmpdir:r}=QM();for(;;){let i=bM(\"xfs-\");try{await this.mkdirPromise(k.join(e,i))}catch(s){if(s.code===\"EEXIST\")continue;throw s}let n=k.join(r,i);if(ro.add(n),typeof t==\"undefined\")return n;try{return await t(n)}finally{if(ro.has(n)){ro.delete(n);try{await this.removePromise(n)}catch{}}}}},async rmtempPromise(){await Promise.all(Array.from(ro.values()).map(async t=>{try{await K.removePromise(t,{maxRetries:0}),ro.delete(t)}catch{}}))},rmtempSync(){for(let t of ro)try{K.removeSync(t),ro.delete(t)}catch{}}});var Sx=ge(LQ());var op={};ft(op,{parseResolution:()=>rI,parseShell:()=>ZE,parseSyml:()=>Qi,stringifyArgument:()=>UQ,stringifyArgumentSegment:()=>KQ,stringifyArithmeticExpression:()=>tI,stringifyCommand:()=>MQ,stringifyCommandChain:()=>_u,stringifyCommandChainThen:()=>OQ,stringifyCommandLine:()=>$E,stringifyCommandLineThen:()=>TQ,stringifyEnvSegment:()=>eI,stringifyRedirectArgument:()=>$h,stringifyResolution:()=>iI,stringifyShell:()=>zu,stringifyShellLine:()=>zu,stringifySyml:()=>Na,stringifyValueArgument:()=>sc});var p1=ge(h1());function ZE(t,e={isGlobPattern:()=>!1}){try{return(0,p1.parse)(t,e)}catch(r){throw r.location&&(r.message=r.message.replace(/(\\.)?$/,` (line ${r.location.start.line}, column ${r.location.start.column})$1`)),r}}function zu(t,{endSemicolon:e=!1}={}){return t.map(({command:r,type:i},n)=>`${$E(r)}${i===\";\"?n!==t.length-1||e?\";\":\"\":\" &\"}`).join(\" \")}function $E(t){return`${_u(t.chain)}${t.then?` ${TQ(t.then)}`:\"\"}`}function TQ(t){return`${t.type} ${$E(t.line)}`}function _u(t){return`${MQ(t)}${t.then?` ${OQ(t.then)}`:\"\"}`}function OQ(t){return`${t.type} ${_u(t.chain)}`}function MQ(t){switch(t.type){case\"command\":return`${t.envs.length>0?`${t.envs.map(e=>eI(e)).join(\" \")} `:\"\"}${t.args.map(e=>UQ(e)).join(\" \")}`;case\"subshell\":return`(${zu(t.subshell)})${t.args.length>0?` ${t.args.map(e=>$h(e)).join(\" \")}`:\"\"}`;case\"group\":return`{ ${zu(t.group,{endSemicolon:!0})} }${t.args.length>0?` ${t.args.map(e=>$h(e)).join(\" \")}`:\"\"}`;case\"envs\":return t.envs.map(e=>eI(e)).join(\" \");default:throw new Error(`Unsupported command type:  \"${t.type}\"`)}}function eI(t){return`${t.name}=${t.args[0]?sc(t.args[0]):\"\"}`}function UQ(t){switch(t.type){case\"redirection\":return $h(t);case\"argument\":return sc(t);default:throw new Error(`Unsupported argument type: \"${t.type}\"`)}}function $h(t){return`${t.subtype} ${t.args.map(e=>sc(e)).join(\" \")}`}function sc(t){return t.segments.map(e=>KQ(e)).join(\"\")}function KQ(t){let e=(i,n)=>n?`\"${i}\"`:i,r=i=>i===\"\"?'\"\"':i.match(/[(){}<>$|&; \\t\"']/)?`$'${i.replace(/\\\\/g,\"\\\\\\\\\").replace(/'/g,\"\\\\'\").replace(/\\f/g,\"\\\\f\").replace(/\\n/g,\"\\\\n\").replace(/\\r/g,\"\\\\r\").replace(/\\t/g,\"\\\\t\").replace(/\\v/g,\"\\\\v\").replace(/\\0/g,\"\\\\0\")}'`:i;switch(t.type){case\"text\":return r(t.text);case\"glob\":return t.pattern;case\"shell\":return e(`\\${${zu(t.shell)}}`,t.quoted);case\"variable\":return e(typeof t.defaultValue==\"undefined\"?typeof t.alternativeValue==\"undefined\"?`\\${${t.name}}`:t.alternativeValue.length===0?`\\${${t.name}:+}`:`\\${${t.name}:+${t.alternativeValue.map(i=>sc(i)).join(\" \")}}`:t.defaultValue.length===0?`\\${${t.name}:-}`:`\\${${t.name}:-${t.defaultValue.map(i=>sc(i)).join(\" \")}}`,t.quoted);case\"arithmetic\":return`$(( ${tI(t.arithmetic)} ))`;default:throw new Error(`Unsupported argument segment type: \"${t.type}\"`)}}function tI(t){let e=n=>{switch(n){case\"addition\":return\"+\";case\"subtraction\":return\"-\";case\"multiplication\":return\"*\";case\"division\":return\"/\";default:throw new Error(`Can't extract operator from arithmetic expression of type \"${n}\"`)}},r=(n,s)=>s?`( ${n} )`:n,i=n=>r(tI(n),![\"number\",\"variable\"].includes(n.type));switch(t.type){case\"number\":return String(t.value);case\"variable\":return t.name;default:return`${i(t.left)} ${e(t.type)} ${i(t.right)}`}}var m1=ge(C1());function rI(t){let e=t.match(/^\\*{1,2}\\/(.*)/);if(e)throw new Error(`The override for '${t}' includes a glob pattern. Glob patterns have been removed since their behaviours don't match what you'd expect. Set the override to '${e[1]}' instead.`);try{return(0,m1.parse)(t)}catch(r){throw r.location&&(r.message=r.message.replace(/(\\.)?$/,` (line ${r.location.start.line}, column ${r.location.start.column})$1`)),r}}function iI(t){let e=\"\";return t.from&&(e+=t.from.fullName,t.from.description&&(e+=`@${t.from.description}`),e+=\"/\"),e+=t.descriptor.fullName,t.descriptor.description&&(e+=`@${t.descriptor.description}`),e}var hI=ge(AK()),uK=ge(cK()),Kde=/^(?![-?:,\\][{}#&*!|>'\"%@` \\t\\r\\n]).([ \\t]*(?![,\\][{}:# \\t\\r\\n]).)*$/,gK=[\"__metadata\",\"version\",\"resolution\",\"dependencies\",\"peerDependencies\",\"dependenciesMeta\",\"peerDependenciesMeta\",\"binaries\"],$Q=class{constructor(e){this.data=e}};function fK(t){return t.match(Kde)?t:JSON.stringify(t)}function hK(t){return typeof t==\"undefined\"?!0:typeof t==\"object\"&&t!==null?Object.keys(t).every(e=>hK(t[e])):!1}function ev(t,e,r){if(t===null)return`null\n`;if(typeof t==\"number\"||typeof t==\"boolean\")return`${t.toString()}\n`;if(typeof t==\"string\")return`${fK(t)}\n`;if(Array.isArray(t)){if(t.length===0)return`[]\n`;let i=\"  \".repeat(e);return`\n${t.map(s=>`${i}- ${ev(s,e+1,!1)}`).join(\"\")}`}if(typeof t==\"object\"&&t){let i,n;t instanceof $Q?(i=t.data,n=!1):(i=t,n=!0);let s=\"  \".repeat(e),o=Object.keys(i);n&&o.sort((l,c)=>{let u=gK.indexOf(l),g=gK.indexOf(c);return u===-1&&g===-1?l<c?-1:l>c?1:0:u!==-1&&g===-1?-1:u===-1&&g!==-1?1:u-g});let a=o.filter(l=>!hK(i[l])).map((l,c)=>{let u=i[l],g=fK(l),f=ev(u,e+1,!0),h=c>0||r?s:\"\";return f.startsWith(`\n`)?`${h}${g}:${f}`:`${h}${g}: ${f}`}).join(e===0?`\n`:\"\")||`\n`;return r?`\n${a}`:`${a}`}throw new Error(`Unsupported value type (${t})`)}function Na(t){try{let e=ev(t,0,!1);return e!==`\n`?e:\"\"}catch(e){throw e.location&&(e.message=e.message.replace(/(\\.)?$/,` (line ${e.location.start.line}, column ${e.location.start.column})$1`)),e}}Na.PreserveOrdering=$Q;function Hde(t){return t.endsWith(`\n`)||(t+=`\n`),(0,uK.parse)(t)}var jde=/^(#.*(\\r?\\n))*?#\\s+yarn\\s+lockfile\\s+v1\\r?\\n/i;function Gde(t){if(jde.test(t))return Hde(t);let e=(0,hI.safeLoad)(t,{schema:hI.FAILSAFE_SCHEMA,json:!0});if(e==null)return{};if(typeof e!=\"object\")throw new Error(`Expected an indexed object, got a ${typeof e} instead. Does your file follow Yaml's rules?`);if(Array.isArray(e))throw new Error(\"Expected an indexed object, got an array instead. Does your file follow Yaml's rules?\");return e}function Qi(t){return Gde(t)}var rz=ge(dK()),yw=ge(hc());var Cp={};ft(Cp,{Builtins:()=>pv,Cli:()=>Is,Command:()=>Re,Option:()=>z,UsageError:()=>Pe,formatMarkdownish:()=>Ki});var pc=0,ap=1,tn=2,rv=\"\u0001\",vi=\"\\0\",ng=-1,iv=/^(-h|--help)(?:=([0-9]+))?$/,pI=/^(--[a-z]+(?:-[a-z]+)*|-[a-zA-Z]+)$/,yK=/^-[a-zA-Z]{2,}$/,nv=/^([^=]+)=([\\s\\S]*)$/,sv=process.env.DEBUG_CLI===\"1\";var Pe=class extends Error{constructor(e){super(e);this.clipanion={type:\"usage\"},this.name=\"UsageError\"}},Ap=class extends Error{constructor(e,r){super();if(this.input=e,this.candidates=r,this.clipanion={type:\"none\"},this.name=\"UnknownSyntaxError\",this.candidates.length===0)this.message=\"Command not found, but we're not sure what's the alternative.\";else if(this.candidates.every(i=>i.reason!==null&&i.reason===r[0].reason)){let[{reason:i}]=this.candidates;this.message=`${i}\n\n${this.candidates.map(({usage:n})=>`$ ${n}`).join(`\n`)}`}else if(this.candidates.length===1){let[{usage:i}]=this.candidates;this.message=`Command not found; did you mean:\n\n$ ${i}\n${ov(e)}`}else this.message=`Command not found; did you mean one of:\n\n${this.candidates.map(({usage:i},n)=>`${`${n}.`.padStart(4)} ${i}`).join(`\n`)}\n\n${ov(e)}`}},av=class extends Error{constructor(e,r){super();this.input=e,this.usages=r,this.clipanion={type:\"none\"},this.name=\"AmbiguousSyntaxError\",this.message=`Cannot find which to pick amongst the following alternatives:\n\n${this.usages.map((i,n)=>`${`${n}.`.padStart(4)} ${i}`).join(`\n`)}\n\n${ov(e)}`}},ov=t=>`While running ${t.filter(e=>e!==vi).map(e=>{let r=JSON.stringify(e);return e.match(/\\s/)||e.length===0||r!==`\"${e}\"`?r:e}).join(\" \")}`;var lp=Symbol(\"clipanion/isOption\");function rn(t){return te(N({},t),{[lp]:!0})}function No(t,e){return typeof t==\"undefined\"?[t,e]:typeof t==\"object\"&&t!==null&&!Array.isArray(t)?[void 0,t]:[t,e]}function dI(t,e=!1){let r=t.replace(/^\\.: /,\"\");return e&&(r=r[0].toLowerCase()+r.slice(1)),r}function cp(t,e){return e.length===1?new Pe(`${t}: ${dI(e[0],!0)}`):new Pe(`${t}:\n${e.map(r=>`\n- ${dI(r)}`).join(\"\")}`)}function up(t,e,r){if(typeof r==\"undefined\")return e;let i=[],n=[],s=a=>{let l=e;return e=a,s.bind(null,l)};if(!r(e,{errors:i,coercions:n,coercion:s}))throw cp(`Invalid value for ${t}`,i);for(let[,a]of n)a();return e}var Re=class{constructor(){this.help=!1}static Usage(e){return e}async catch(e){throw e}async validateAndExecute(){let r=this.constructor.schema;if(Array.isArray(r)){let{isDict:n,isUnknown:s,applyCascade:o}=await Promise.resolve().then(()=>(Es(),sg)),a=o(n(s()),r),l=[],c=[];if(!a(this,{errors:l,coercions:c}))throw cp(\"Invalid option schema\",l);for(let[,g]of c)g()}else if(r!=null)throw new Error(\"Invalid command schema\");let i=await this.execute();return typeof i!=\"undefined\"?i:0}};Re.isOption=lp;Re.Default=[];var DK=80,cv=Array(DK).fill(\"\\u2501\");for(let t=0;t<=24;++t)cv[cv.length-t]=`\u001b[38;5;${232+t}m\\u2501`;var uv={header:t=>`\u001b[1m\\u2501\\u2501\\u2501 ${t}${t.length<DK-5?` ${cv.slice(t.length+5).join(\"\")}`:\":\"}\u001b[0m`,bold:t=>`\u001b[1m${t}\u001b[22m`,error:t=>`\u001b[31m\u001b[1m${t}\u001b[22m\u001b[39m`,code:t=>`\u001b[36m${t}\u001b[39m`},RK={header:t=>t,bold:t=>t,error:t=>t,code:t=>t};function QCe(t){let e=t.split(`\n`),r=e.filter(n=>n.match(/\\S/)),i=r.length>0?r.reduce((n,s)=>Math.min(n,s.length-s.trimStart().length),Number.MAX_VALUE):0;return e.map(n=>n.slice(i).trimRight()).join(`\n`)}function Ki(t,{format:e,paragraphs:r}){return t=t.replace(/\\r\\n?/g,`\n`),t=QCe(t),t=t.replace(/^\\n+|\\n+$/g,\"\"),t=t.replace(/^(\\s*)-([^\\n]*?)\\n+/gm,`$1-$2\n\n`),t=t.replace(/\\n(\\n)?\\n*/g,\"$1\"),r&&(t=t.split(/\\n/).map(i=>{let n=i.match(/^\\s*[*-][\\t ]+(.*)/);if(!n)return i.match(/(.{1,80})(?: |$)/g).join(`\n`);let s=i.length-i.trimStart().length;return n[1].match(new RegExp(`(.{1,${78-s}})(?: |$)`,\"g\")).map((o,a)=>\" \".repeat(s)+(a===0?\"- \":\"  \")+o).join(`\n`)}).join(`\n\n`)),t=t.replace(/(`+)((?:.|[\\n])*?)\\1/g,(i,n,s)=>e.code(n+s+n)),t=t.replace(/(\\*\\*)((?:.|[\\n])*?)\\1/g,(i,n,s)=>e.bold(n+s+n)),t?`${t}\n`:\"\"}var hv=ge(require(\"tty\"));function wn(t){sv&&console.log(t)}var FK={candidateUsage:null,requiredOptions:[],errorMessage:null,ignoreOptions:!1,path:[],positionals:[],options:[],remainder:null,selectedIndex:ng};function NK(){return{nodes:[sn(),sn(),sn()]}}function SCe(t){let e=NK(),r=[],i=e.nodes.length;for(let n of t){r.push(i);for(let s=0;s<n.nodes.length;++s)LK(s)||e.nodes.push(vCe(n.nodes[s],i));i+=n.nodes.length-2}for(let n of r)og(e,pc,n);return e}function io(t,e){return t.nodes.push(e),t.nodes.length-1}function kCe(t){let e=new Set,r=i=>{if(e.has(i))return;e.add(i);let n=t.nodes[i];for(let o of Object.values(n.statics))for(let{to:a}of o)r(a);for(let[,{to:o}]of n.dynamics)r(o);for(let{to:o}of n.shortcuts)r(o);let s=new Set(n.shortcuts.map(({to:o})=>o));for(;n.shortcuts.length>0;){let{to:o}=n.shortcuts.shift(),a=t.nodes[o];for(let[l,c]of Object.entries(a.statics)){let u=Object.prototype.hasOwnProperty.call(n.statics,l)?n.statics[l]:n.statics[l]=[];for(let g of c)u.some(({to:f})=>g.to===f)||u.push(g)}for(let[l,c]of a.dynamics)n.dynamics.some(([u,{to:g}])=>l===u&&c.to===g)||n.dynamics.push([l,c]);for(let l of a.shortcuts)s.has(l.to)||(n.shortcuts.push(l),s.add(l.to))}};r(pc)}function xCe(t,{prefix:e=\"\"}={}){if(sv){wn(`${e}Nodes are:`);for(let r=0;r<t.nodes.length;++r)wn(`${e}  ${r}: ${JSON.stringify(t.nodes[r])}`)}}function TK(t,e,r=!1){wn(`Running a vm on ${JSON.stringify(e)}`);let i=[{node:pc,state:{candidateUsage:null,requiredOptions:[],errorMessage:null,ignoreOptions:!1,options:[],path:[],positionals:[],remainder:null,selectedIndex:null}}];xCe(t,{prefix:\"  \"});let n=[rv,...e];for(let s=0;s<n.length;++s){let o=n[s];wn(`  Processing ${JSON.stringify(o)}`);let a=[];for(let{node:l,state:c}of i){wn(`    Current node is ${l}`);let u=t.nodes[l];if(l===tn){a.push({node:l,state:c});continue}console.assert(u.shortcuts.length===0,\"Shortcuts should have been eliminated by now\");let g=Object.prototype.hasOwnProperty.call(u.statics,o);if(!r||s<n.length-1||g)if(g){let f=u.statics[o];for(let{to:h,reducer:p}of f)a.push({node:h,state:typeof p!=\"undefined\"?mI(gv,p,c,o):c}),wn(`      Static transition to ${h} found`)}else wn(\"      No static transition found\");else{let f=!1;for(let h of Object.keys(u.statics))if(!!h.startsWith(o)){if(o===h)for(let{to:p,reducer:m}of u.statics[h])a.push({node:p,state:typeof m!=\"undefined\"?mI(gv,m,c,o):c}),wn(`      Static transition to ${p} found`);else for(let{to:p}of u.statics[h])a.push({node:p,state:te(N({},c),{remainder:h.slice(o.length)})}),wn(`      Static transition to ${p} found (partial match)`);f=!0}f||wn(\"      No partial static transition found\")}if(o!==vi)for(let[f,{to:h,reducer:p}]of u.dynamics)mI(EI,f,c,o)&&(a.push({node:h,state:typeof p!=\"undefined\"?mI(gv,p,c,o):c}),wn(`      Dynamic transition to ${h} found (via ${f})`))}if(a.length===0&&o===vi&&e.length===1)return[{node:pc,state:FK}];if(a.length===0)throw new Ap(e,i.filter(({node:l})=>l!==tn).map(({state:l})=>({usage:l.candidateUsage,reason:null})));if(a.every(({node:l})=>l===tn))throw new Ap(e,a.map(({state:l})=>({usage:l.candidateUsage,reason:l.errorMessage})));i=PCe(a)}if(i.length>0){wn(\"  Results:\");for(let s of i)wn(`    - ${s.node} -> ${JSON.stringify(s.state)}`)}else wn(\"  No results\");return i}function DCe(t,e){if(e.selectedIndex!==null)return!0;if(Object.prototype.hasOwnProperty.call(t.statics,vi)){for(let{to:r}of t.statics[vi])if(r===ap)return!0}return!1}function FCe(t,e,r){let i=r&&e.length>0?[\"\"]:[],n=TK(t,e,r),s=[],o=new Set,a=(l,c,u=!0)=>{let g=[c];for(;g.length>0;){let h=g;g=[];for(let p of h){let m=t.nodes[p],y=Object.keys(m.statics);for(let Q of Object.keys(m.statics)){let S=y[0];for(let{to:x,reducer:M}of m.statics[S])M===\"pushPath\"&&(u||l.push(S),g.push(x))}}u=!1}let f=JSON.stringify(l);o.has(f)||(s.push(l),o.add(f))};for(let{node:l,state:c}of n){if(c.remainder!==null){a([c.remainder],l);continue}let u=t.nodes[l],g=DCe(u,c);for(let[f,h]of Object.entries(u.statics))(g&&f!==vi||!f.startsWith(\"-\")&&h.some(({reducer:p})=>p===\"pushPath\"))&&a([...i,f],l);if(!!g)for(let[f,{to:h}]of u.dynamics){if(h===tn)continue;let p=RCe(f,c);if(p!==null)for(let m of p)a([...i,m],l)}}return[...s].sort()}function LCe(t,e){let r=TK(t,[...e,vi]);return NCe(e,r.map(({state:i})=>i))}function PCe(t){let e=0;for(let{state:r}of t)r.path.length>e&&(e=r.path.length);return t.filter(({state:r})=>r.path.length===e)}function NCe(t,e){let r=e.filter(g=>g.selectedIndex!==null);if(r.length===0)throw new Error;let i=r.filter(g=>g.requiredOptions.every(f=>f.some(h=>g.options.find(p=>p.name===h))));if(i.length===0)throw new Ap(t,r.map(g=>({usage:g.candidateUsage,reason:null})));let n=0;for(let g of i)g.path.length>n&&(n=g.path.length);let s=i.filter(g=>g.path.length===n),o=g=>g.positionals.filter(({extra:f})=>!f).length+g.options.length,a=s.map(g=>({state:g,positionalCount:o(g)})),l=0;for(let{positionalCount:g}of a)g>l&&(l=g);let c=a.filter(({positionalCount:g})=>g===l).map(({state:g})=>g),u=TCe(c);if(u.length>1)throw new av(t,u.map(g=>g.candidateUsage));return u[0]}function TCe(t){let e=[],r=[];for(let i of t)i.selectedIndex===ng?r.push(i):e.push(i);return r.length>0&&e.push(te(N({},FK),{path:OK(...r.map(i=>i.path)),options:r.reduce((i,n)=>i.concat(n.options),[])})),e}function OK(t,e,...r){return e===void 0?Array.from(t):OK(t.filter((i,n)=>i===e[n]),...r)}function sn(){return{dynamics:[],shortcuts:[],statics:{}}}function LK(t){return t===ap||t===tn}function fv(t,e=0){return{to:LK(t.to)?t.to:t.to>2?t.to+e-2:t.to+e,reducer:t.reducer}}function vCe(t,e=0){let r=sn();for(let[i,n]of t.dynamics)r.dynamics.push([i,fv(n,e)]);for(let i of t.shortcuts)r.shortcuts.push(fv(i,e));for(let[i,n]of Object.entries(t.statics))r.statics[i]=n.map(s=>fv(s,e));return r}function Si(t,e,r,i,n){t.nodes[e].dynamics.push([r,{to:i,reducer:n}])}function og(t,e,r,i){t.nodes[e].shortcuts.push({to:r,reducer:i})}function La(t,e,r,i,n){(Object.prototype.hasOwnProperty.call(t.nodes[e].statics,r)?t.nodes[e].statics[r]:t.nodes[e].statics[r]=[]).push({to:i,reducer:n})}function mI(t,e,r,i){if(Array.isArray(e)){let[n,...s]=e;return t[n](r,i,...s)}else return t[e](r,i)}function RCe(t,e){let r=Array.isArray(t)?EI[t[0]]:EI[t];if(typeof r.suggest==\"undefined\")return null;let i=Array.isArray(t)?t.slice(1):[];return r.suggest(e,...i)}var EI={always:()=>!0,isOptionLike:(t,e)=>!t.ignoreOptions&&e!==\"-\"&&e.startsWith(\"-\"),isNotOptionLike:(t,e)=>t.ignoreOptions||e===\"-\"||!e.startsWith(\"-\"),isOption:(t,e,r,i)=>!t.ignoreOptions&&e===r,isBatchOption:(t,e,r)=>!t.ignoreOptions&&yK.test(e)&&[...e.slice(1)].every(i=>r.includes(`-${i}`)),isBoundOption:(t,e,r,i)=>{let n=e.match(nv);return!t.ignoreOptions&&!!n&&pI.test(n[1])&&r.includes(n[1])&&i.filter(s=>s.names.includes(n[1])).every(s=>s.allowBinding)},isNegatedOption:(t,e,r)=>!t.ignoreOptions&&e===`--no-${r.slice(2)}`,isHelp:(t,e)=>!t.ignoreOptions&&iv.test(e),isUnsupportedOption:(t,e,r)=>!t.ignoreOptions&&e.startsWith(\"-\")&&pI.test(e)&&!r.includes(e),isInvalidOption:(t,e)=>!t.ignoreOptions&&e.startsWith(\"-\")&&!pI.test(e)};EI.isOption.suggest=(t,e,r=!0)=>r?null:[e];var gv={setCandidateState:(t,e,r)=>N(N({},t),r),setSelectedIndex:(t,e,r)=>te(N({},t),{selectedIndex:r}),pushBatch:(t,e)=>te(N({},t),{options:t.options.concat([...e.slice(1)].map(r=>({name:`-${r}`,value:!0})))}),pushBound:(t,e)=>{let[,r,i]=e.match(nv);return te(N({},t),{options:t.options.concat({name:r,value:i})})},pushPath:(t,e)=>te(N({},t),{path:t.path.concat(e)}),pushPositional:(t,e)=>te(N({},t),{positionals:t.positionals.concat({value:e,extra:!1})}),pushExtra:(t,e)=>te(N({},t),{positionals:t.positionals.concat({value:e,extra:!0})}),pushExtraNoLimits:(t,e)=>te(N({},t),{positionals:t.positionals.concat({value:e,extra:Vn})}),pushTrue:(t,e,r=e)=>te(N({},t),{options:t.options.concat({name:e,value:!0})}),pushFalse:(t,e,r=e)=>te(N({},t),{options:t.options.concat({name:r,value:!1})}),pushUndefined:(t,e)=>te(N({},t),{options:t.options.concat({name:e,value:void 0})}),pushStringValue:(t,e)=>{var r;let i=te(N({},t),{options:[...t.options]}),n=t.options[t.options.length-1];return n.value=((r=n.value)!==null&&r!==void 0?r:[]).concat([e]),i},setStringValue:(t,e)=>{let r=te(N({},t),{options:[...t.options]}),i=t.options[t.options.length-1];return i.value=e,r},inhibateOptions:t=>te(N({},t),{ignoreOptions:!0}),useHelp:(t,e,r)=>{let[,,i]=e.match(iv);return typeof i!=\"undefined\"?te(N({},t),{options:[{name:\"-c\",value:String(r)},{name:\"-i\",value:i}]}):te(N({},t),{options:[{name:\"-c\",value:String(r)}]})},setError:(t,e,r)=>e===vi?te(N({},t),{errorMessage:`${r}.`}):te(N({},t),{errorMessage:`${r} (\"${e}\").`}),setOptionArityError:(t,e)=>{let r=t.options[t.options.length-1];return te(N({},t),{errorMessage:`Not enough arguments to option ${r.name}.`})}},Vn=Symbol(),MK=class{constructor(e,r){this.allOptionNames=[],this.arity={leading:[],trailing:[],extra:[],proxy:!1},this.options=[],this.paths=[],this.cliIndex=e,this.cliOpts=r}addPath(e){this.paths.push(e)}setArity({leading:e=this.arity.leading,trailing:r=this.arity.trailing,extra:i=this.arity.extra,proxy:n=this.arity.proxy}){Object.assign(this.arity,{leading:e,trailing:r,extra:i,proxy:n})}addPositional({name:e=\"arg\",required:r=!0}={}){if(!r&&this.arity.extra===Vn)throw new Error(\"Optional parameters cannot be declared when using .rest() or .proxy()\");if(!r&&this.arity.trailing.length>0)throw new Error(\"Optional parameters cannot be declared after the required trailing positional arguments\");!r&&this.arity.extra!==Vn?this.arity.extra.push(e):this.arity.extra!==Vn&&this.arity.extra.length===0?this.arity.leading.push(e):this.arity.trailing.push(e)}addRest({name:e=\"arg\",required:r=0}={}){if(this.arity.extra===Vn)throw new Error(\"Infinite lists cannot be declared multiple times in the same command\");if(this.arity.trailing.length>0)throw new Error(\"Infinite lists cannot be declared after the required trailing positional arguments\");for(let i=0;i<r;++i)this.addPositional({name:e});this.arity.extra=Vn}addProxy({required:e=0}={}){this.addRest({required:e}),this.arity.proxy=!0}addOption({names:e,description:r,arity:i=0,hidden:n=!1,required:s=!1,allowBinding:o=!0}){if(!o&&i>1)throw new Error(\"The arity cannot be higher than 1 when the option only supports the --arg=value syntax\");if(!Number.isInteger(i))throw new Error(`The arity must be an integer, got ${i}`);if(i<0)throw new Error(`The arity must be positive, got ${i}`);this.allOptionNames.push(...e),this.options.push({names:e,description:r,arity:i,hidden:n,required:s,allowBinding:o})}setContext(e){this.context=e}usage({detailed:e=!0,inlineOptions:r=!0}={}){let i=[this.cliOpts.binaryName],n=[];if(this.paths.length>0&&i.push(...this.paths[0]),e){for(let{names:o,arity:a,hidden:l,description:c,required:u}of this.options){if(l)continue;let g=[];for(let h=0;h<a;++h)g.push(` #${h}`);let f=`${o.join(\",\")}${g.join(\"\")}`;!r&&c?n.push({definition:f,description:c,required:u}):i.push(u?`<${f}>`:`[${f}]`)}i.push(...this.arity.leading.map(o=>`<${o}>`)),this.arity.extra===Vn?i.push(\"...\"):i.push(...this.arity.extra.map(o=>`[${o}]`)),i.push(...this.arity.trailing.map(o=>`<${o}>`))}return{usage:i.join(\" \"),options:n}}compile(){if(typeof this.context==\"undefined\")throw new Error(\"Assertion failed: No context attached\");let e=NK(),r=pc,i=this.usage().usage,n=this.options.filter(a=>a.required).map(a=>a.names);r=io(e,sn()),La(e,pc,rv,r,[\"setCandidateState\",{candidateUsage:i,requiredOptions:n}]);let s=this.arity.proxy?\"always\":\"isNotOptionLike\",o=this.paths.length>0?this.paths:[[]];for(let a of o){let l=r;if(a.length>0){let f=io(e,sn());og(e,l,f),this.registerOptions(e,f),l=f}for(let f=0;f<a.length;++f){let h=io(e,sn());La(e,l,a[f],h,\"pushPath\"),l=h}if(this.arity.leading.length>0||!this.arity.proxy){let f=io(e,sn());Si(e,l,\"isHelp\",f,[\"useHelp\",this.cliIndex]),La(e,f,vi,ap,[\"setSelectedIndex\",ng]),this.registerOptions(e,l)}this.arity.leading.length>0&&La(e,l,vi,tn,[\"setError\",\"Not enough positional arguments\"]);let c=l;for(let f=0;f<this.arity.leading.length;++f){let h=io(e,sn());this.arity.proxy||this.registerOptions(e,h),(this.arity.trailing.length>0||f+1!==this.arity.leading.length)&&La(e,h,vi,tn,[\"setError\",\"Not enough positional arguments\"]),Si(e,c,\"isNotOptionLike\",h,\"pushPositional\"),c=h}let u=c;if(this.arity.extra===Vn||this.arity.extra.length>0){let f=io(e,sn());if(og(e,c,f),this.arity.extra===Vn){let h=io(e,sn());this.arity.proxy||this.registerOptions(e,h),Si(e,c,s,h,\"pushExtraNoLimits\"),Si(e,h,s,h,\"pushExtraNoLimits\"),og(e,h,f)}else for(let h=0;h<this.arity.extra.length;++h){let p=io(e,sn());this.arity.proxy||this.registerOptions(e,p),Si(e,u,s,p,\"pushExtra\"),og(e,p,f),u=p}u=f}this.arity.trailing.length>0&&La(e,u,vi,tn,[\"setError\",\"Not enough positional arguments\"]);let g=u;for(let f=0;f<this.arity.trailing.length;++f){let h=io(e,sn());this.arity.proxy||this.registerOptions(e,h),f+1<this.arity.trailing.length&&La(e,h,vi,tn,[\"setError\",\"Not enough positional arguments\"]),Si(e,g,\"isNotOptionLike\",h,\"pushPositional\"),g=h}Si(e,g,s,tn,[\"setError\",\"Extraneous positional argument\"]),La(e,g,vi,ap,[\"setSelectedIndex\",this.cliIndex])}return{machine:e,context:this.context}}registerOptions(e,r){Si(e,r,[\"isOption\",\"--\"],r,\"inhibateOptions\"),Si(e,r,[\"isBatchOption\",this.allOptionNames],r,\"pushBatch\"),Si(e,r,[\"isBoundOption\",this.allOptionNames,this.options],r,\"pushBound\"),Si(e,r,[\"isUnsupportedOption\",this.allOptionNames],tn,[\"setError\",\"Unsupported option name\"]),Si(e,r,[\"isInvalidOption\"],tn,[\"setError\",\"Invalid option name\"]);for(let i of this.options){let n=i.names.reduce((s,o)=>o.length>s.length?o:s,\"\");if(i.arity===0)for(let s of i.names)Si(e,r,[\"isOption\",s,i.hidden||s!==n],r,\"pushTrue\"),s.startsWith(\"--\")&&!s.startsWith(\"--no-\")&&Si(e,r,[\"isNegatedOption\",s],r,[\"pushFalse\",s]);else{let s=io(e,sn());for(let o of i.names)Si(e,r,[\"isOption\",o,i.hidden||o!==n],s,\"pushUndefined\");for(let o=0;o<i.arity;++o){let a=io(e,sn());La(e,s,vi,tn,\"setOptionArityError\"),Si(e,s,\"isOptionLike\",tn,\"setOptionArityError\");let l=i.arity===1?\"setStringValue\":\"pushStringValue\";Si(e,s,\"isNotOptionLike\",a,l),s=a}og(e,s,r)}}}},pp=class{constructor({binaryName:e=\"...\"}={}){this.builders=[],this.opts={binaryName:e}}static build(e,r={}){return new pp(r).commands(e).compile()}getBuilderByIndex(e){if(!(e>=0&&e<this.builders.length))throw new Error(`Assertion failed: Out-of-bound command index (${e})`);return this.builders[e]}commands(e){for(let r of e)r(this.command());return this}command(){let e=new MK(this.builders.length,this.opts);return this.builders.push(e),e}compile(){let e=[],r=[];for(let n of this.builders){let{machine:s,context:o}=n.compile();e.push(s),r.push(o)}let i=SCe(e);return kCe(i),{machine:i,contexts:r,process:n=>LCe(i,n),suggest:(n,s)=>FCe(i,n,s)}}};var dp=class extends Re{constructor(e){super();this.contexts=e,this.commands=[]}static from(e,r){let i=new dp(r);i.path=e.path;for(let n of e.options)switch(n.name){case\"-c\":i.commands.push(Number(n.value));break;case\"-i\":i.index=Number(n.value);break}return i}async execute(){let e=this.commands;if(typeof this.index!=\"undefined\"&&this.index>=0&&this.index<e.length&&(e=[e[this.index]]),e.length===0)this.context.stdout.write(this.cli.usage());else if(e.length===1)this.context.stdout.write(this.cli.usage(this.contexts[e[0]].commandClass,{detailed:!0}));else if(e.length>1){this.context.stdout.write(`Multiple commands match your selection:\n`),this.context.stdout.write(`\n`);let r=0;for(let i of this.commands)this.context.stdout.write(this.cli.usage(this.contexts[i].commandClass,{prefix:`${r++}. `.padStart(5)}));this.context.stdout.write(`\n`),this.context.stdout.write(`Run again with -h=<index> to see the longer details of any of those commands.\n`)}}};var UK=Symbol(\"clipanion/errorCommand\");function OCe(){return process.env.FORCE_COLOR===\"0\"?1:process.env.FORCE_COLOR===\"1\"||typeof process.stdout!=\"undefined\"&&process.stdout.isTTY?8:1}var Is=class{constructor({binaryLabel:e,binaryName:r=\"...\",binaryVersion:i,enableCapture:n=!1,enableColors:s}={}){this.registrations=new Map,this.builder=new pp({binaryName:r}),this.binaryLabel=e,this.binaryName=r,this.binaryVersion=i,this.enableCapture=n,this.enableColors=s}static from(e,r={}){let i=new Is(r);for(let n of e)i.register(n);return i}register(e){var r;let i=new Map,n=new e;for(let l in n){let c=n[l];typeof c==\"object\"&&c!==null&&c[Re.isOption]&&i.set(l,c)}let s=this.builder.command(),o=s.cliIndex,a=(r=e.paths)!==null&&r!==void 0?r:n.paths;if(typeof a!=\"undefined\")for(let l of a)s.addPath(l);this.registrations.set(e,{specs:i,builder:s,index:o});for(let[l,{definition:c}]of i.entries())c(s,l);s.setContext({commandClass:e})}process(e){let{contexts:r,process:i}=this.builder.compile(),n=i(e);switch(n.selectedIndex){case ng:return dp.from(n,r);default:{let{commandClass:s}=r[n.selectedIndex],o=this.registrations.get(s);if(typeof o==\"undefined\")throw new Error(\"Assertion failed: Expected the command class to have been registered.\");let a=new s;a.path=n.path;try{for(let[l,{transformer:c}]of o.specs.entries())a[l]=c(o.builder,l,n);return a}catch(l){throw l[UK]=a,l}}break}}async run(e,r){var i;let n,s=N(N({},Is.defaultContext),r),o=(i=this.enableColors)!==null&&i!==void 0?i:s.colorDepth>1;if(!Array.isArray(e))n=e;else try{n=this.process(e)}catch(c){return s.stdout.write(this.error(c,{colored:o})),1}if(n.help)return s.stdout.write(this.usage(n,{colored:o,detailed:!0})),0;n.context=s,n.cli={binaryLabel:this.binaryLabel,binaryName:this.binaryName,binaryVersion:this.binaryVersion,enableCapture:this.enableCapture,enableColors:this.enableColors,definitions:()=>this.definitions(),error:(c,u)=>this.error(c,u),format:c=>this.format(c),process:c=>this.process(c),run:(c,u)=>this.run(c,N(N({},s),u)),usage:(c,u)=>this.usage(c,u)};let a=this.enableCapture?MCe(s):KK,l;try{l=await a(()=>n.validateAndExecute().catch(c=>n.catch(c).then(()=>0)))}catch(c){return s.stdout.write(this.error(c,{colored:o,command:n})),1}return l}async runExit(e,r){process.exitCode=await this.run(e,r)}suggest(e,r){let{suggest:i}=this.builder.compile();return i(e,r)}definitions({colored:e=!1}={}){let r=[];for(let[i,{index:n}]of this.registrations){if(typeof i.usage==\"undefined\")continue;let{usage:s}=this.getUsageByIndex(n,{detailed:!1}),{usage:o,options:a}=this.getUsageByIndex(n,{detailed:!0,inlineOptions:!1}),l=typeof i.usage.category!=\"undefined\"?Ki(i.usage.category,{format:this.format(e),paragraphs:!1}):void 0,c=typeof i.usage.description!=\"undefined\"?Ki(i.usage.description,{format:this.format(e),paragraphs:!1}):void 0,u=typeof i.usage.details!=\"undefined\"?Ki(i.usage.details,{format:this.format(e),paragraphs:!0}):void 0,g=typeof i.usage.examples!=\"undefined\"?i.usage.examples.map(([f,h])=>[Ki(f,{format:this.format(e),paragraphs:!1}),h.replace(/\\$0/g,this.binaryName)]):void 0;r.push({path:s,usage:o,category:l,description:c,details:u,examples:g,options:a})}return r}usage(e=null,{colored:r,detailed:i=!1,prefix:n=\"$ \"}={}){var s;if(e===null){for(let l of this.registrations.keys()){let c=l.paths,u=typeof l.usage!=\"undefined\";if(!c||c.length===0||c.length===1&&c[0].length===0||((s=c==null?void 0:c.some(h=>h.length===0))!==null&&s!==void 0?s:!1))if(e){e=null;break}else e=l;else if(u){e=null;continue}}e&&(i=!0)}let o=e!==null&&e instanceof Re?e.constructor:e,a=\"\";if(o)if(i){let{description:l=\"\",details:c=\"\",examples:u=[]}=o.usage||{};l!==\"\"&&(a+=Ki(l,{format:this.format(r),paragraphs:!1}).replace(/^./,h=>h.toUpperCase()),a+=`\n`),(c!==\"\"||u.length>0)&&(a+=`${this.format(r).header(\"Usage\")}\n`,a+=`\n`);let{usage:g,options:f}=this.getUsageByRegistration(o,{inlineOptions:!1});if(a+=`${this.format(r).bold(n)}${g}\n`,f.length>0){a+=`\n`,a+=`${uv.header(\"Options\")}\n`;let h=f.reduce((p,m)=>Math.max(p,m.definition.length),0);a+=`\n`;for(let{definition:p,description:m}of f)a+=`  ${this.format(r).bold(p.padEnd(h))}    ${Ki(m,{format:this.format(r),paragraphs:!1})}`}if(c!==\"\"&&(a+=`\n`,a+=`${this.format(r).header(\"Details\")}\n`,a+=`\n`,a+=Ki(c,{format:this.format(r),paragraphs:!0})),u.length>0){a+=`\n`,a+=`${this.format(r).header(\"Examples\")}\n`;for(let[h,p]of u)a+=`\n`,a+=Ki(h,{format:this.format(r),paragraphs:!1}),a+=`${p.replace(/^/m,`  ${this.format(r).bold(n)}`).replace(/\\$0/g,this.binaryName)}\n`}}else{let{usage:l}=this.getUsageByRegistration(o);a+=`${this.format(r).bold(n)}${l}\n`}else{let l=new Map;for(let[f,{index:h}]of this.registrations.entries()){if(typeof f.usage==\"undefined\")continue;let p=typeof f.usage.category!=\"undefined\"?Ki(f.usage.category,{format:this.format(r),paragraphs:!1}):null,m=l.get(p);typeof m==\"undefined\"&&l.set(p,m=[]);let{usage:y}=this.getUsageByIndex(h);m.push({commandClass:f,usage:y})}let c=Array.from(l.keys()).sort((f,h)=>f===null?-1:h===null?1:f.localeCompare(h,\"en\",{usage:\"sort\",caseFirst:\"upper\"})),u=typeof this.binaryLabel!=\"undefined\",g=typeof this.binaryVersion!=\"undefined\";u||g?(u&&g?a+=`${this.format(r).header(`${this.binaryLabel} - ${this.binaryVersion}`)}\n\n`:u?a+=`${this.format(r).header(`${this.binaryLabel}`)}\n`:a+=`${this.format(r).header(`${this.binaryVersion}`)}\n`,a+=`  ${this.format(r).bold(n)}${this.binaryName} <command>\n`):a+=`${this.format(r).bold(n)}${this.binaryName} <command>\n`;for(let f of c){let h=l.get(f).slice().sort((m,y)=>m.usage.localeCompare(y.usage,\"en\",{usage:\"sort\",caseFirst:\"upper\"})),p=f!==null?f.trim():\"General commands\";a+=`\n`,a+=`${this.format(r).header(`${p}`)}\n`;for(let{commandClass:m,usage:y}of h){let Q=m.usage.description||\"undocumented\";a+=`\n`,a+=`  ${this.format(r).bold(y)}\n`,a+=`    ${Ki(Q,{format:this.format(r),paragraphs:!1})}`}}a+=`\n`,a+=Ki(\"You can also print more details about any of these commands by calling them with the `-h,--help` flag right after the command name.\",{format:this.format(r),paragraphs:!0})}return a}error(e,r){var i,{colored:n,command:s=(i=e[UK])!==null&&i!==void 0?i:null}=r===void 0?{}:r;e instanceof Error||(e=new Error(`Execution failed with a non-error rejection (rejected value: ${JSON.stringify(e)})`));let o=\"\",a=e.name.replace(/([a-z])([A-Z])/g,\"$1 $2\");a===\"Error\"&&(a=\"Internal Error\"),o+=`${this.format(n).error(a)}: ${e.message}\n`;let l=e.clipanion;return typeof l!=\"undefined\"?l.type===\"usage\"&&(o+=`\n`,o+=this.usage(s)):e.stack&&(o+=`${e.stack.replace(/^.*\\n/,\"\")}\n`),o}format(e){var r;return((r=e!=null?e:this.enableColors)!==null&&r!==void 0?r:Is.defaultContext.colorDepth>1)?uv:RK}getUsageByRegistration(e,r){let i=this.registrations.get(e);if(typeof i==\"undefined\")throw new Error(\"Assertion failed: Unregistered command\");return this.getUsageByIndex(i.index,r)}getUsageByIndex(e,r){return this.builder.getBuilderByIndex(e).usage(r)}};Is.defaultContext={stdin:process.stdin,stdout:process.stdout,stderr:process.stderr,colorDepth:\"getColorDepth\"in hv.default.WriteStream.prototype?hv.default.WriteStream.prototype.getColorDepth():OCe()};var HK;function MCe(t){let e=HK;if(typeof e==\"undefined\"){if(t.stdout===process.stdout&&t.stderr===process.stderr)return KK;let{AsyncLocalStorage:r}=require(\"async_hooks\");e=HK=new r;let i=process.stdout._write;process.stdout._write=function(s,o,a){let l=e.getStore();return typeof l==\"undefined\"?i.call(this,s,o,a):l.stdout.write(s,o,a)};let n=process.stderr._write;process.stderr._write=function(s,o,a){let l=e.getStore();return typeof l==\"undefined\"?n.call(this,s,o,a):l.stderr.write(s,o,a)}}return r=>e.run(t,r)}function KK(t){return t()}var pv={};ft(pv,{DefinitionsCommand:()=>II,HelpCommand:()=>yI,VersionCommand:()=>wI});var II=class extends Re{async execute(){this.context.stdout.write(`${JSON.stringify(this.cli.definitions(),null,2)}\n`)}};II.paths=[[\"--clipanion=definitions\"]];var yI=class extends Re{async execute(){this.context.stdout.write(this.cli.usage())}};yI.paths=[[\"-h\"],[\"--help\"]];var wI=class extends Re{async execute(){var e;this.context.stdout.write(`${(e=this.cli.binaryVersion)!==null&&e!==void 0?e:\"<unknown>\"}\n`)}};wI.paths=[[\"-v\"],[\"--version\"]];var z={};ft(z,{Array:()=>jK,Boolean:()=>GK,Counter:()=>YK,Proxy:()=>qK,Rest:()=>JK,String:()=>WK,applyValidator:()=>up,cleanValidationError:()=>dI,formatError:()=>cp,isOptionSymbol:()=>lp,makeCommandOption:()=>rn,rerouteArguments:()=>No});function jK(t,e,r){let[i,n]=No(e,r!=null?r:{}),{arity:s=1}=n,o=t.split(\",\"),a=new Set(o);return rn({definition(l){l.addOption({names:o,arity:s,hidden:n==null?void 0:n.hidden,description:n==null?void 0:n.description,required:n.required})},transformer(l,c,u){let g=typeof i!=\"undefined\"?[...i]:void 0;for(let{name:f,value:h}of u.options)!a.has(f)||(g=g!=null?g:[],g.push(h));return g}})}function GK(t,e,r){let[i,n]=No(e,r!=null?r:{}),s=t.split(\",\"),o=new Set(s);return rn({definition(a){a.addOption({names:s,allowBinding:!1,arity:0,hidden:n.hidden,description:n.description,required:n.required})},transformer(a,l,c){let u=i;for(let{name:g,value:f}of c.options)!o.has(g)||(u=f);return u}})}function YK(t,e,r){let[i,n]=No(e,r!=null?r:{}),s=t.split(\",\"),o=new Set(s);return rn({definition(a){a.addOption({names:s,allowBinding:!1,arity:0,hidden:n.hidden,description:n.description,required:n.required})},transformer(a,l,c){let u=i;for(let{name:g,value:f}of c.options)!o.has(g)||(u!=null||(u=0),f?u+=1:u=0);return u}})}function qK(t={}){return rn({definition(e,r){var i;e.addProxy({name:(i=t.name)!==null&&i!==void 0?i:r,required:t.required})},transformer(e,r,i){return i.positionals.map(({value:n})=>n)}})}function JK(t={}){return rn({definition(e,r){var i;e.addRest({name:(i=t.name)!==null&&i!==void 0?i:r,required:t.required})},transformer(e,r,i){let n=o=>{let a=i.positionals[o];return a.extra===Vn||a.extra===!1&&o<e.arity.leading.length},s=0;for(;s<i.positionals.length&&n(s);)s+=1;return i.positionals.splice(0,s).map(({value:o})=>o)}})}function UCe(t,e,r){let[i,n]=No(e,r!=null?r:{}),{arity:s=1}=n,o=t.split(\",\"),a=new Set(o);return rn({definition(l){l.addOption({names:o,arity:n.tolerateBoolean?0:s,hidden:n.hidden,description:n.description,required:n.required})},transformer(l,c,u){let g,f=i;for(let{name:h,value:p}of u.options)!a.has(h)||(g=h,f=p);return typeof f==\"string\"?up(g!=null?g:c,f,n.validator):f}})}function KCe(t={}){let{required:e=!0}=t;return rn({definition(r,i){var n;r.addPositional({name:(n=t.name)!==null&&n!==void 0?n:i,required:t.required})},transformer(r,i,n){var s;for(let o=0;o<n.positionals.length;++o){if(n.positionals[o].extra===Vn||e&&n.positionals[o].extra===!0||!e&&n.positionals[o].extra===!1)continue;let[a]=n.positionals.splice(o,1);return up((s=t.name)!==null&&s!==void 0?s:i,a.value,t.validator)}}})}function WK(t,...e){return typeof t==\"string\"?UCe(t,...e):KCe(t)}var iz=ge(ag()),Ix=ge(require(\"stream\"));var $;(function(oe){oe[oe.UNNAMED=0]=\"UNNAMED\",oe[oe.EXCEPTION=1]=\"EXCEPTION\",oe[oe.MISSING_PEER_DEPENDENCY=2]=\"MISSING_PEER_DEPENDENCY\",oe[oe.CYCLIC_DEPENDENCIES=3]=\"CYCLIC_DEPENDENCIES\",oe[oe.DISABLED_BUILD_SCRIPTS=4]=\"DISABLED_BUILD_SCRIPTS\",oe[oe.BUILD_DISABLED=5]=\"BUILD_DISABLED\",oe[oe.SOFT_LINK_BUILD=6]=\"SOFT_LINK_BUILD\",oe[oe.MUST_BUILD=7]=\"MUST_BUILD\",oe[oe.MUST_REBUILD=8]=\"MUST_REBUILD\",oe[oe.BUILD_FAILED=9]=\"BUILD_FAILED\",oe[oe.RESOLVER_NOT_FOUND=10]=\"RESOLVER_NOT_FOUND\",oe[oe.FETCHER_NOT_FOUND=11]=\"FETCHER_NOT_FOUND\",oe[oe.LINKER_NOT_FOUND=12]=\"LINKER_NOT_FOUND\",oe[oe.FETCH_NOT_CACHED=13]=\"FETCH_NOT_CACHED\",oe[oe.YARN_IMPORT_FAILED=14]=\"YARN_IMPORT_FAILED\",oe[oe.REMOTE_INVALID=15]=\"REMOTE_INVALID\",oe[oe.REMOTE_NOT_FOUND=16]=\"REMOTE_NOT_FOUND\",oe[oe.RESOLUTION_PACK=17]=\"RESOLUTION_PACK\",oe[oe.CACHE_CHECKSUM_MISMATCH=18]=\"CACHE_CHECKSUM_MISMATCH\",oe[oe.UNUSED_CACHE_ENTRY=19]=\"UNUSED_CACHE_ENTRY\",oe[oe.MISSING_LOCKFILE_ENTRY=20]=\"MISSING_LOCKFILE_ENTRY\",oe[oe.WORKSPACE_NOT_FOUND=21]=\"WORKSPACE_NOT_FOUND\",oe[oe.TOO_MANY_MATCHING_WORKSPACES=22]=\"TOO_MANY_MATCHING_WORKSPACES\",oe[oe.CONSTRAINTS_MISSING_DEPENDENCY=23]=\"CONSTRAINTS_MISSING_DEPENDENCY\",oe[oe.CONSTRAINTS_INCOMPATIBLE_DEPENDENCY=24]=\"CONSTRAINTS_INCOMPATIBLE_DEPENDENCY\",oe[oe.CONSTRAINTS_EXTRANEOUS_DEPENDENCY=25]=\"CONSTRAINTS_EXTRANEOUS_DEPENDENCY\",oe[oe.CONSTRAINTS_INVALID_DEPENDENCY=26]=\"CONSTRAINTS_INVALID_DEPENDENCY\",oe[oe.CANT_SUGGEST_RESOLUTIONS=27]=\"CANT_SUGGEST_RESOLUTIONS\",oe[oe.FROZEN_LOCKFILE_EXCEPTION=28]=\"FROZEN_LOCKFILE_EXCEPTION\",oe[oe.CROSS_DRIVE_VIRTUAL_LOCAL=29]=\"CROSS_DRIVE_VIRTUAL_LOCAL\",oe[oe.FETCH_FAILED=30]=\"FETCH_FAILED\",oe[oe.DANGEROUS_NODE_MODULES=31]=\"DANGEROUS_NODE_MODULES\",oe[oe.NODE_GYP_INJECTED=32]=\"NODE_GYP_INJECTED\",oe[oe.AUTHENTICATION_NOT_FOUND=33]=\"AUTHENTICATION_NOT_FOUND\",oe[oe.INVALID_CONFIGURATION_KEY=34]=\"INVALID_CONFIGURATION_KEY\",oe[oe.NETWORK_ERROR=35]=\"NETWORK_ERROR\",oe[oe.LIFECYCLE_SCRIPT=36]=\"LIFECYCLE_SCRIPT\",oe[oe.CONSTRAINTS_MISSING_FIELD=37]=\"CONSTRAINTS_MISSING_FIELD\",oe[oe.CONSTRAINTS_INCOMPATIBLE_FIELD=38]=\"CONSTRAINTS_INCOMPATIBLE_FIELD\",oe[oe.CONSTRAINTS_EXTRANEOUS_FIELD=39]=\"CONSTRAINTS_EXTRANEOUS_FIELD\",oe[oe.CONSTRAINTS_INVALID_FIELD=40]=\"CONSTRAINTS_INVALID_FIELD\",oe[oe.AUTHENTICATION_INVALID=41]=\"AUTHENTICATION_INVALID\",oe[oe.PROLOG_UNKNOWN_ERROR=42]=\"PROLOG_UNKNOWN_ERROR\",oe[oe.PROLOG_SYNTAX_ERROR=43]=\"PROLOG_SYNTAX_ERROR\",oe[oe.PROLOG_EXISTENCE_ERROR=44]=\"PROLOG_EXISTENCE_ERROR\",oe[oe.STACK_OVERFLOW_RESOLUTION=45]=\"STACK_OVERFLOW_RESOLUTION\",oe[oe.AUTOMERGE_FAILED_TO_PARSE=46]=\"AUTOMERGE_FAILED_TO_PARSE\",oe[oe.AUTOMERGE_IMMUTABLE=47]=\"AUTOMERGE_IMMUTABLE\",oe[oe.AUTOMERGE_SUCCESS=48]=\"AUTOMERGE_SUCCESS\",oe[oe.AUTOMERGE_REQUIRED=49]=\"AUTOMERGE_REQUIRED\",oe[oe.DEPRECATED_CLI_SETTINGS=50]=\"DEPRECATED_CLI_SETTINGS\",oe[oe.PLUGIN_NAME_NOT_FOUND=51]=\"PLUGIN_NAME_NOT_FOUND\",oe[oe.INVALID_PLUGIN_REFERENCE=52]=\"INVALID_PLUGIN_REFERENCE\",oe[oe.CONSTRAINTS_AMBIGUITY=53]=\"CONSTRAINTS_AMBIGUITY\",oe[oe.CACHE_OUTSIDE_PROJECT=54]=\"CACHE_OUTSIDE_PROJECT\",oe[oe.IMMUTABLE_INSTALL=55]=\"IMMUTABLE_INSTALL\",oe[oe.IMMUTABLE_CACHE=56]=\"IMMUTABLE_CACHE\",oe[oe.INVALID_MANIFEST=57]=\"INVALID_MANIFEST\",oe[oe.PACKAGE_PREPARATION_FAILED=58]=\"PACKAGE_PREPARATION_FAILED\",oe[oe.INVALID_RANGE_PEER_DEPENDENCY=59]=\"INVALID_RANGE_PEER_DEPENDENCY\",oe[oe.INCOMPATIBLE_PEER_DEPENDENCY=60]=\"INCOMPATIBLE_PEER_DEPENDENCY\",oe[oe.DEPRECATED_PACKAGE=61]=\"DEPRECATED_PACKAGE\",oe[oe.INCOMPATIBLE_OS=62]=\"INCOMPATIBLE_OS\",oe[oe.INCOMPATIBLE_CPU=63]=\"INCOMPATIBLE_CPU\",oe[oe.FROZEN_ARTIFACT_EXCEPTION=64]=\"FROZEN_ARTIFACT_EXCEPTION\",oe[oe.TELEMETRY_NOTICE=65]=\"TELEMETRY_NOTICE\",oe[oe.PATCH_HUNK_FAILED=66]=\"PATCH_HUNK_FAILED\",oe[oe.INVALID_CONFIGURATION_VALUE=67]=\"INVALID_CONFIGURATION_VALUE\",oe[oe.UNUSED_PACKAGE_EXTENSION=68]=\"UNUSED_PACKAGE_EXTENSION\",oe[oe.REDUNDANT_PACKAGE_EXTENSION=69]=\"REDUNDANT_PACKAGE_EXTENSION\",oe[oe.AUTO_NM_SUCCESS=70]=\"AUTO_NM_SUCCESS\",oe[oe.NM_CANT_INSTALL_EXTERNAL_SOFT_LINK=71]=\"NM_CANT_INSTALL_EXTERNAL_SOFT_LINK\",oe[oe.NM_PRESERVE_SYMLINKS_REQUIRED=72]=\"NM_PRESERVE_SYMLINKS_REQUIRED\",oe[oe.UPDATE_LOCKFILE_ONLY_SKIP_LINK=73]=\"UPDATE_LOCKFILE_ONLY_SKIP_LINK\",oe[oe.NM_HARDLINKS_MODE_DOWNGRADED=74]=\"NM_HARDLINKS_MODE_DOWNGRADED\",oe[oe.PROLOG_INSTANTIATION_ERROR=75]=\"PROLOG_INSTANTIATION_ERROR\",oe[oe.INCOMPATIBLE_ARCHITECTURE=76]=\"INCOMPATIBLE_ARCHITECTURE\",oe[oe.GHOST_ARCHITECTURE=77]=\"GHOST_ARCHITECTURE\"})($||($={}));function YA(t){return`YN${t.toString(10).padStart(4,\"0\")}`}function BI(t){let e=Number(t.slice(2));if(typeof $[e]==\"undefined\")throw new Error(`Unknown message name: \"${t}\"`);return e}var P={};ft(P,{areDescriptorsEqual:()=>c8,areIdentsEqual:()=>fd,areLocatorsEqual:()=>hd,areVirtualPackagesEquivalent:()=>uSe,bindDescriptor:()=>lSe,bindLocator:()=>cSe,convertDescriptorToLocator:()=>uw,convertLocatorToDescriptor:()=>nx,convertPackageToLocator:()=>ASe,convertToIdent:()=>aSe,convertToManifestRange:()=>hSe,copyPackage:()=>cd,devirtualizeDescriptor:()=>ud,devirtualizeLocator:()=>gd,getIdentVendorPath:()=>lx,isPackageCompatible:()=>pw,isVirtualDescriptor:()=>il,isVirtualLocator:()=>Xo,makeDescriptor:()=>rr,makeIdent:()=>Vo,makeLocator:()=>cn,makeRange:()=>fw,parseDescriptor:()=>nl,parseFileStyleRange:()=>gSe,parseIdent:()=>An,parseLocator:()=>Mc,parseRange:()=>Kg,prettyDependent:()=>YS,prettyDescriptor:()=>sr,prettyIdent:()=>gi,prettyLocator:()=>Bt,prettyLocatorNoColors:()=>Ax,prettyRange:()=>cw,prettyReference:()=>dd,prettyResolution:()=>qS,prettyWorkspace:()=>Cd,renamePackage:()=>ld,slugifyIdent:()=>ax,slugifyLocator:()=>Hg,sortDescriptors:()=>jg,stringifyDescriptor:()=>Pn,stringifyIdent:()=>Ot,stringifyLocator:()=>Ps,tryParseDescriptor:()=>pd,tryParseIdent:()=>u8,tryParseLocator:()=>gw,virtualizeDescriptor:()=>sx,virtualizePackage:()=>ox});var Ug=ge(require(\"querystring\")),a8=ge(ti()),A8=ge(bY());var ae={};ft(ae,{LogLevel:()=>go,Style:()=>Pc,Type:()=>Ge,addLogFilterSupport:()=>nd,applyColor:()=>rs,applyHyperlink:()=>Fg,applyStyle:()=>Ly,json:()=>Dc,jsonOrPretty:()=>G0e,mark:()=>VS,pretty:()=>et,prettyField:()=>Jo,prettyList:()=>_S,supportsColor:()=>Fy,supportsHyperlinks:()=>WS,tuple:()=>uo});var rd=ge(IS()),id=ge(hc());var QJ=ge(ts()),vJ=ge(gJ());var Se={};ft(Se,{AsyncActions:()=>EJ,BufferStream:()=>mJ,CachingStrategy:()=>xc,DefaultStream:()=>IJ,allSettledSafe:()=>co,assertNever:()=>US,bufferStream:()=>Dg,buildIgnorePattern:()=>U0e,convertMapsToIndexableObjects:()=>Ry,dynamicRequire:()=>Rg,escapeRegExp:()=>N0e,getArrayWithDefault:()=>kg,getFactoryWithDefault:()=>qa,getMapWithDefault:()=>xg,getSetWithDefault:()=>kc,isIndexableObject:()=>KS,isPathLike:()=>K0e,isTaggedYarnVersion:()=>F0e,mapAndFilter:()=>qo,mapAndFind:()=>$p,overrideType:()=>MS,parseBoolean:()=>td,parseOptionalBoolean:()=>bJ,prettifyAsyncErrors:()=>Pg,prettifySyncErrors:()=>HS,releaseAfterUseAsync:()=>T0e,replaceEnvVariables:()=>jS,sortMap:()=>xn,tryParseOptionalBoolean:()=>GS,validateEnum:()=>L0e});var fJ=ge(ts()),hJ=ge(ag()),pJ=ge(ti()),OS=ge(require(\"stream\"));function F0e(t){return!!(pJ.default.valid(t)&&t.match(/^[^-]+(-rc\\.[0-9]+)?$/))}function N0e(t){return t.replace(/[.*+?^${}()|[\\]\\\\]/g,\"\\\\$&\")}function MS(t){}function US(t){throw new Error(`Assertion failed: Unexpected object '${t}'`)}function L0e(t,e){let r=Object.values(t);if(!r.includes(e))throw new Pe(`Invalid value for enumeration: ${JSON.stringify(e)} (expected one of ${r.map(i=>JSON.stringify(i)).join(\", \")})`);return e}function qo(t,e){let r=[];for(let i of t){let n=e(i);n!==dJ&&r.push(n)}return r}var dJ=Symbol();qo.skip=dJ;function $p(t,e){for(let r of t){let i=e(r);if(i!==CJ)return i}}var CJ=Symbol();$p.skip=CJ;function KS(t){return typeof t==\"object\"&&t!==null}async function co(t){let e=await Promise.allSettled(t),r=[];for(let i of e){if(i.status===\"rejected\")throw i.reason;r.push(i.value)}return r}function Ry(t){if(t instanceof Map&&(t=Object.fromEntries(t)),KS(t))for(let e of Object.keys(t)){let r=t[e];KS(r)&&(t[e]=Ry(r))}return t}function qa(t,e,r){let i=t.get(e);return typeof i==\"undefined\"&&t.set(e,i=r()),i}function kg(t,e){let r=t.get(e);return typeof r==\"undefined\"&&t.set(e,r=[]),r}function kc(t,e){let r=t.get(e);return typeof r==\"undefined\"&&t.set(e,r=new Set),r}function xg(t,e){let r=t.get(e);return typeof r==\"undefined\"&&t.set(e,r=new Map),r}async function T0e(t,e){if(e==null)return await t();try{return await t()}finally{await e()}}async function Pg(t,e){try{return await t()}catch(r){throw r.message=e(r.message),r}}function HS(t,e){try{return t()}catch(r){throw r.message=e(r.message),r}}async function Dg(t){return await new Promise((e,r)=>{let i=[];t.on(\"error\",n=>{r(n)}),t.on(\"data\",n=>{i.push(n)}),t.on(\"end\",()=>{e(Buffer.concat(i))})})}var mJ=class extends OS.Transform{constructor(){super(...arguments);this.chunks=[]}_transform(e,r,i){if(r!==\"buffer\"||!Buffer.isBuffer(e))throw new Error(\"Assertion failed: BufferStream only accept buffers\");this.chunks.push(e),i(null,null)}_flush(e){e(null,Buffer.concat(this.chunks))}};function O0e(){let t,e;return{promise:new Promise((i,n)=>{t=i,e=n}),resolve:t,reject:e}}var EJ=class{constructor(e){this.deferred=new Map;this.promises=new Map;this.limit=(0,hJ.default)(e)}set(e,r){let i=this.deferred.get(e);typeof i==\"undefined\"&&this.deferred.set(e,i=O0e());let n=this.limit(()=>r());return this.promises.set(e,n),n.then(()=>{this.promises.get(e)===n&&i.resolve()},s=>{this.promises.get(e)===n&&i.reject(s)}),i.promise}reduce(e,r){var n;let i=(n=this.promises.get(e))!=null?n:Promise.resolve();this.set(e,()=>r(i))}async wait(){await Promise.all(this.promises.values())}},IJ=class extends OS.Transform{constructor(e=Buffer.alloc(0)){super();this.active=!0;this.ifEmpty=e}_transform(e,r,i){if(r!==\"buffer\"||!Buffer.isBuffer(e))throw new Error(\"Assertion failed: DefaultStream only accept buffers\");this.active=!1,i(null,e)}_flush(e){this.active&&this.ifEmpty.length>0?e(null,this.ifEmpty):e(null)}},ed=eval(\"require\");function yJ(t){return ed(j.fromPortablePath(t))}function wJ(path){let physicalPath=j.fromPortablePath(path),currentCacheEntry=ed.cache[physicalPath];delete ed.cache[physicalPath];let result;try{result=yJ(physicalPath);let freshCacheEntry=ed.cache[physicalPath],dynamicModule=eval(\"module\"),freshCacheIndex=dynamicModule.children.indexOf(freshCacheEntry);freshCacheIndex!==-1&&dynamicModule.children.splice(freshCacheIndex,1)}finally{ed.cache[physicalPath]=currentCacheEntry}return result}var BJ=new Map;function M0e(t){let e=BJ.get(t),r=K.statSync(t);if((e==null?void 0:e.mtime)===r.mtimeMs)return e.instance;let i=wJ(t);return BJ.set(t,{mtime:r.mtimeMs,instance:i}),i}var xc;(function(i){i[i.NoCache=0]=\"NoCache\",i[i.FsTime=1]=\"FsTime\",i[i.Node=2]=\"Node\"})(xc||(xc={}));function Rg(t,{cachingStrategy:e=2}={}){switch(e){case 0:return wJ(t);case 1:return M0e(t);case 2:return yJ(t);default:throw new Error(\"Unsupported caching strategy\")}}function xn(t,e){let r=Array.from(t);Array.isArray(e)||(e=[e]);let i=[];for(let s of e)i.push(r.map(o=>s(o)));let n=r.map((s,o)=>o);return n.sort((s,o)=>{for(let a of i){let l=a[s]<a[o]?-1:a[s]>a[o]?1:0;if(l!==0)return l}return 0}),n.map(s=>r[s])}function U0e(t){return t.length===0?null:t.map(e=>`(${fJ.default.makeRe(e,{windows:!1,dot:!0}).source})`).join(\"|\")}function jS(t,{env:e}){let r=/\\${(?<variableName>[\\d\\w_]+)(?<colon>:)?(?:-(?<fallback>[^}]*))?}/g;return t.replace(r,(...i)=>{let{variableName:n,colon:s,fallback:o}=i[i.length-1],a=Object.prototype.hasOwnProperty.call(e,n),l=e[n];if(l||a&&!s)return l;if(o!=null)return o;throw new Pe(`Environment variable not found (${n})`)})}function td(t){switch(t){case\"true\":case\"1\":case 1:case!0:return!0;case\"false\":case\"0\":case 0:case!1:return!1;default:throw new Error(`Couldn't parse \"${t}\" as a boolean`)}}function bJ(t){return typeof t==\"undefined\"?t:td(t)}function GS(t){try{return bJ(t)}catch{return null}}function K0e(t){return!!(j.isAbsolute(t)||t.match(/^(\\.{1,2}|~)\\//))}var Qt;(function(r){r.HARD=\"HARD\",r.SOFT=\"SOFT\"})(Qt||(Qt={}));var yi;(function(i){i.Dependency=\"Dependency\",i.PeerDependency=\"PeerDependency\",i.PeerDependencyMeta=\"PeerDependencyMeta\"})(yi||(yi={}));var qi;(function(i){i.Inactive=\"inactive\",i.Redundant=\"redundant\",i.Active=\"active\"})(qi||(qi={}));var Ge={NO_HINT:\"NO_HINT\",NULL:\"NULL\",SCOPE:\"SCOPE\",NAME:\"NAME\",RANGE:\"RANGE\",REFERENCE:\"REFERENCE\",NUMBER:\"NUMBER\",PATH:\"PATH\",URL:\"URL\",ADDED:\"ADDED\",REMOVED:\"REMOVED\",CODE:\"CODE\",DURATION:\"DURATION\",SIZE:\"SIZE\",IDENT:\"IDENT\",DESCRIPTOR:\"DESCRIPTOR\",LOCATOR:\"LOCATOR\",RESOLUTION:\"RESOLUTION\",DEPENDENT:\"DEPENDENT\",PACKAGE_EXTENSION:\"PACKAGE_EXTENSION\",SETTING:\"SETTING\",MARKDOWN:\"MARKDOWN\"},Pc;(function(e){e[e.BOLD=2]=\"BOLD\"})(Pc||(Pc={}));var JS=id.default.GITHUB_ACTIONS?{level:2}:rd.default.supportsColor?{level:rd.default.supportsColor.level}:{level:0},Fy=JS.level!==0,WS=Fy&&!id.default.GITHUB_ACTIONS&&!id.default.CIRCLE&&!id.default.GITLAB,zS=new rd.default.Instance(JS),H0e=new Map([[Ge.NO_HINT,null],[Ge.NULL,[\"#a853b5\",129]],[Ge.SCOPE,[\"#d75f00\",166]],[Ge.NAME,[\"#d7875f\",173]],[Ge.RANGE,[\"#00afaf\",37]],[Ge.REFERENCE,[\"#87afff\",111]],[Ge.NUMBER,[\"#ffd700\",220]],[Ge.PATH,[\"#d75fd7\",170]],[Ge.URL,[\"#d75fd7\",170]],[Ge.ADDED,[\"#5faf00\",70]],[Ge.REMOVED,[\"#d70000\",160]],[Ge.CODE,[\"#87afff\",111]],[Ge.SIZE,[\"#ffd700\",220]]]),Ds=t=>t,Ny={[Ge.NUMBER]:Ds({pretty:(t,e)=>`${e}`,json:t=>t}),[Ge.IDENT]:Ds({pretty:(t,e)=>gi(t,e),json:t=>Ot(t)}),[Ge.LOCATOR]:Ds({pretty:(t,e)=>Bt(t,e),json:t=>Ps(t)}),[Ge.DESCRIPTOR]:Ds({pretty:(t,e)=>sr(t,e),json:t=>Pn(t)}),[Ge.RESOLUTION]:Ds({pretty:(t,{descriptor:e,locator:r})=>qS(t,e,r),json:({descriptor:t,locator:e})=>({descriptor:Pn(t),locator:e!==null?Ps(e):null})}),[Ge.DEPENDENT]:Ds({pretty:(t,{locator:e,descriptor:r})=>YS(t,e,r),json:({locator:t,descriptor:e})=>({locator:Ps(t),descriptor:Pn(e)})}),[Ge.PACKAGE_EXTENSION]:Ds({pretty:(t,e)=>{switch(e.type){case yi.Dependency:return`${gi(t,e.parentDescriptor)} \\u27A4 ${rs(t,\"dependencies\",Ge.CODE)} \\u27A4 ${gi(t,e.descriptor)}`;case yi.PeerDependency:return`${gi(t,e.parentDescriptor)} \\u27A4 ${rs(t,\"peerDependencies\",Ge.CODE)} \\u27A4 ${gi(t,e.descriptor)}`;case yi.PeerDependencyMeta:return`${gi(t,e.parentDescriptor)} \\u27A4 ${rs(t,\"peerDependenciesMeta\",Ge.CODE)} \\u27A4 ${gi(t,An(e.selector))} \\u27A4 ${rs(t,e.key,Ge.CODE)}`;default:throw new Error(`Assertion failed: Unsupported package extension type: ${e.type}`)}},json:t=>{switch(t.type){case yi.Dependency:return`${Ot(t.parentDescriptor)} > ${Ot(t.descriptor)}`;case yi.PeerDependency:return`${Ot(t.parentDescriptor)} >> ${Ot(t.descriptor)}`;case yi.PeerDependencyMeta:return`${Ot(t.parentDescriptor)} >> ${t.selector} / ${t.key}`;default:throw new Error(`Assertion failed: Unsupported package extension type: ${t.type}`)}}}),[Ge.SETTING]:Ds({pretty:(t,e)=>(t.get(e),Fg(t,rs(t,e,Ge.CODE),`https://yarnpkg.com/configuration/yarnrc#${e}`)),json:t=>t}),[Ge.DURATION]:Ds({pretty:(t,e)=>{if(e>1e3*60){let r=Math.floor(e/1e3/60),i=Math.ceil((e-r*60*1e3)/1e3);return i===0?`${r}m`:`${r}m ${i}s`}else{let r=Math.floor(e/1e3),i=e-r*1e3;return i===0?`${r}s`:`${r}s ${i}ms`}},json:t=>t}),[Ge.SIZE]:Ds({pretty:(t,e)=>{let r=[\"KB\",\"MB\",\"GB\",\"TB\"],i=r.length;for(;i>1&&e<1024**i;)i-=1;let n=1024**i,s=Math.floor(e*100/n)/100;return rs(t,`${s} ${r[i-1]}`,Ge.NUMBER)},json:t=>t}),[Ge.PATH]:Ds({pretty:(t,e)=>rs(t,j.fromPortablePath(e),Ge.PATH),json:t=>j.fromPortablePath(t)}),[Ge.MARKDOWN]:Ds({pretty:(t,{text:e,format:r,paragraphs:i})=>Ki(e,{format:r,paragraphs:i}),json:({text:t})=>t})};function uo(t,e){return[e,t]}function Ly(t,e,r){return t.get(\"enableColors\")&&r&2&&(e=rd.default.bold(e)),e}function rs(t,e,r){if(!t.get(\"enableColors\"))return e;let i=H0e.get(r);if(i===null)return e;let n=typeof i==\"undefined\"?r:JS.level>=3?i[0]:i[1],s=typeof n==\"number\"?zS.ansi256(n):n.startsWith(\"#\")?zS.hex(n):zS[n];if(typeof s!=\"function\")throw new Error(`Invalid format type ${n}`);return s(e)}var j0e=!!process.env.KONSOLE_VERSION;function Fg(t,e,r){return t.get(\"enableHyperlinks\")?j0e?`\u001b]8;;${r}\u001b\\\\${e}\u001b]8;;\u001b\\\\`:`\u001b]8;;${r}\\x07${e}\u001b]8;;\\x07`:e}function et(t,e,r){if(e===null)return rs(t,\"null\",Ge.NULL);if(Object.prototype.hasOwnProperty.call(Ny,r))return Ny[r].pretty(t,e);if(typeof e!=\"string\")throw new Error(`Assertion failed: Expected the value to be a string, got ${typeof e}`);return rs(t,e,r)}function _S(t,e,r,{separator:i=\", \"}={}){return[...e].map(n=>et(t,n,r)).join(i)}function Dc(t,e){if(t===null)return null;if(Object.prototype.hasOwnProperty.call(Ny,e))return MS(e),Ny[e].json(t);if(typeof t!=\"string\")throw new Error(`Assertion failed: Expected the value to be a string, got ${typeof t}`);return t}function G0e(t,e,[r,i]){return t?Dc(r,i):et(e,r,i)}function VS(t){return{Check:rs(t,\"\\u2713\",\"green\"),Cross:rs(t,\"\\u2718\",\"red\"),Question:rs(t,\"?\",\"cyan\")}}function Jo(t,{label:e,value:[r,i]}){return`${et(t,e,Ge.CODE)}: ${et(t,r,i)}`}var go;(function(n){n.Error=\"error\",n.Warning=\"warning\",n.Info=\"info\",n.Discard=\"discard\"})(go||(go={}));function nd(t,{configuration:e}){let r=e.get(\"logFilters\"),i=new Map,n=new Map,s=[];for(let g of r){let f=g.get(\"level\");if(typeof f==\"undefined\")continue;let h=g.get(\"code\");typeof h!=\"undefined\"&&i.set(h,f);let p=g.get(\"text\");typeof p!=\"undefined\"&&n.set(p,f);let m=g.get(\"pattern\");typeof m!=\"undefined\"&&s.push([QJ.default.matcher(m,{contains:!0}),f])}s.reverse();let o=(g,f,h)=>{if(g===null||g===$.UNNAMED)return h;let p=n.size>0||s.length>0?(0,vJ.default)(f):f;if(n.size>0){let m=n.get(p);if(typeof m!=\"undefined\")return m!=null?m:h}if(s.length>0){for(let[m,y]of s)if(m(p))return y!=null?y:h}if(i.size>0){let m=i.get(YA(g));if(typeof m!=\"undefined\")return m!=null?m:h}return h},a=t.reportInfo,l=t.reportWarning,c=t.reportError,u=function(g,f,h,p){switch(o(f,h,p)){case go.Info:a.call(g,f,h);break;case go.Warning:l.call(g,f!=null?f:$.UNNAMED,h);break;case go.Error:c.call(g,f!=null?f:$.UNNAMED,h);break}};t.reportInfo=function(...g){return u(this,...g,go.Info)},t.reportWarning=function(...g){return u(this,...g,go.Warning)},t.reportError=function(...g){return u(this,...g,go.Error)}}var Dn={};ft(Dn,{checksumFile:()=>Aw,checksumPattern:()=>lw,makeHash:()=>ln});var aw=ge(require(\"crypto\")),ix=ge(rx());function ln(...t){let e=(0,aw.createHash)(\"sha512\"),r=\"\";for(let i of t)typeof i==\"string\"?r+=i:i&&(r&&(e.update(r),r=\"\"),e.update(i));return r&&e.update(r),e.digest(\"hex\")}async function Aw(t,{baseFs:e,algorithm:r}={baseFs:K,algorithm:\"sha512\"}){let i=await e.openPromise(t,\"r\");try{let n=65536,s=Buffer.allocUnsafeSlow(n),o=(0,aw.createHash)(r),a=0;for(;(a=await e.readPromise(i,s,0,n))!==0;)o.update(a===n?s:s.slice(0,a));return o.digest(\"hex\")}finally{await e.closePromise(i)}}async function lw(t,{cwd:e}){let i=(await(0,ix.default)(t,{cwd:j.fromPortablePath(e),expandDirectories:!1,onlyDirectories:!0,unique:!0})).map(a=>`${a}/**/*`),n=await(0,ix.default)([t,...i],{cwd:j.fromPortablePath(e),expandDirectories:!1,onlyFiles:!1,unique:!0});n.sort();let s=await Promise.all(n.map(async a=>{let l=[Buffer.from(a)],c=j.toPortablePath(a),u=await K.lstatPromise(c);return u.isSymbolicLink()?l.push(Buffer.from(await K.readlinkPromise(c))):u.isFile()&&l.push(await K.readFilePromise(c)),l.join(\"\\0\")})),o=(0,aw.createHash)(\"sha512\");for(let a of s)o.update(a);return o.digest(\"hex\")}var Ad=\"virtual:\",sSe=5,l8=/(os|cpu|libc)=([a-z0-9_-]+)/,oSe=(0,A8.makeParser)(l8);function Vo(t,e){if(t==null?void 0:t.startsWith(\"@\"))throw new Error(\"Invalid scope: don't prefix it with '@'\");return{identHash:ln(t,e),scope:t,name:e}}function rr(t,e){return{identHash:t.identHash,scope:t.scope,name:t.name,descriptorHash:ln(t.identHash,e),range:e}}function cn(t,e){return{identHash:t.identHash,scope:t.scope,name:t.name,locatorHash:ln(t.identHash,e),reference:e}}function aSe(t){return{identHash:t.identHash,scope:t.scope,name:t.name}}function uw(t){return{identHash:t.identHash,scope:t.scope,name:t.name,locatorHash:t.descriptorHash,reference:t.range}}function nx(t){return{identHash:t.identHash,scope:t.scope,name:t.name,descriptorHash:t.locatorHash,range:t.reference}}function ASe(t){return{identHash:t.identHash,scope:t.scope,name:t.name,locatorHash:t.locatorHash,reference:t.reference}}function ld(t,e){return{identHash:e.identHash,scope:e.scope,name:e.name,locatorHash:e.locatorHash,reference:e.reference,version:t.version,languageName:t.languageName,linkType:t.linkType,conditions:t.conditions,dependencies:new Map(t.dependencies),peerDependencies:new Map(t.peerDependencies),dependenciesMeta:new Map(t.dependenciesMeta),peerDependenciesMeta:new Map(t.peerDependenciesMeta),bin:new Map(t.bin)}}function cd(t){return ld(t,t)}function sx(t,e){if(e.includes(\"#\"))throw new Error(\"Invalid entropy\");return rr(t,`virtual:${e}#${t.range}`)}function ox(t,e){if(e.includes(\"#\"))throw new Error(\"Invalid entropy\");return ld(t,cn(t,`virtual:${e}#${t.reference}`))}function il(t){return t.range.startsWith(Ad)}function Xo(t){return t.reference.startsWith(Ad)}function ud(t){if(!il(t))throw new Error(\"Not a virtual descriptor\");return rr(t,t.range.replace(/^[^#]*#/,\"\"))}function gd(t){if(!Xo(t))throw new Error(\"Not a virtual descriptor\");return cn(t,t.reference.replace(/^[^#]*#/,\"\"))}function lSe(t,e){return t.range.includes(\"::\")?t:rr(t,`${t.range}::${Ug.default.stringify(e)}`)}function cSe(t,e){return t.reference.includes(\"::\")?t:cn(t,`${t.reference}::${Ug.default.stringify(e)}`)}function fd(t,e){return t.identHash===e.identHash}function c8(t,e){return t.descriptorHash===e.descriptorHash}function hd(t,e){return t.locatorHash===e.locatorHash}function uSe(t,e){if(!Xo(t))throw new Error(\"Invalid package type\");if(!Xo(e))throw new Error(\"Invalid package type\");if(!fd(t,e)||t.dependencies.size!==e.dependencies.size)return!1;for(let r of t.dependencies.values()){let i=e.dependencies.get(r.identHash);if(!i||!c8(r,i))return!1}return!0}function An(t){let e=u8(t);if(!e)throw new Error(`Invalid ident (${t})`);return e}function u8(t){let e=t.match(/^(?:@([^/]+?)\\/)?([^/]+)$/);if(!e)return null;let[,r,i]=e,n=typeof r!=\"undefined\"?r:null;return Vo(n,i)}function nl(t,e=!1){let r=pd(t,e);if(!r)throw new Error(`Invalid descriptor (${t})`);return r}function pd(t,e=!1){let r=e?t.match(/^(?:@([^/]+?)\\/)?([^/]+?)(?:@(.+))$/):t.match(/^(?:@([^/]+?)\\/)?([^/]+?)(?:@(.+))?$/);if(!r)return null;let[,i,n,s]=r;if(s===\"unknown\")throw new Error(`Invalid range (${t})`);let o=typeof i!=\"undefined\"?i:null,a=typeof s!=\"undefined\"?s:\"unknown\";return rr(Vo(o,n),a)}function Mc(t,e=!1){let r=gw(t,e);if(!r)throw new Error(`Invalid locator (${t})`);return r}function gw(t,e=!1){let r=e?t.match(/^(?:@([^/]+?)\\/)?([^/]+?)(?:@(.+))$/):t.match(/^(?:@([^/]+?)\\/)?([^/]+?)(?:@(.+))?$/);if(!r)return null;let[,i,n,s]=r;if(s===\"unknown\")throw new Error(`Invalid reference (${t})`);let o=typeof i!=\"undefined\"?i:null,a=typeof s!=\"undefined\"?s:\"unknown\";return cn(Vo(o,n),a)}function Kg(t,e){let r=t.match(/^([^#:]*:)?((?:(?!::)[^#])*)(?:#((?:(?!::).)*))?(?:::(.*))?$/);if(r===null)throw new Error(`Invalid range (${t})`);let i=typeof r[1]!=\"undefined\"?r[1]:null;if(typeof(e==null?void 0:e.requireProtocol)==\"string\"&&i!==e.requireProtocol)throw new Error(`Invalid protocol (${i})`);if((e==null?void 0:e.requireProtocol)&&i===null)throw new Error(`Missing protocol (${i})`);let n=typeof r[3]!=\"undefined\"?decodeURIComponent(r[2]):null;if((e==null?void 0:e.requireSource)&&n===null)throw new Error(`Missing source (${t})`);let s=typeof r[3]!=\"undefined\"?decodeURIComponent(r[3]):decodeURIComponent(r[2]),o=(e==null?void 0:e.parseSelector)?Ug.default.parse(s):s,a=typeof r[4]!=\"undefined\"?Ug.default.parse(r[4]):null;return{protocol:i,source:n,selector:o,params:a}}function gSe(t,{protocol:e}){let{selector:r,params:i}=Kg(t,{requireProtocol:e,requireBindings:!0});if(typeof i.locator!=\"string\")throw new Error(`Assertion failed: Invalid bindings for ${t}`);return{parentLocator:Mc(i.locator,!0),path:r}}function g8(t){return t=t.replace(/%/g,\"%25\"),t=t.replace(/:/g,\"%3A\"),t=t.replace(/#/g,\"%23\"),t}function fSe(t){return t===null?!1:Object.entries(t).length>0}function fw({protocol:t,source:e,selector:r,params:i}){let n=\"\";return t!==null&&(n+=`${t}`),e!==null&&(n+=`${g8(e)}#`),n+=g8(r),fSe(i)&&(n+=`::${Ug.default.stringify(i)}`),n}function hSe(t){let{params:e,protocol:r,source:i,selector:n}=Kg(t);for(let s in e)s.startsWith(\"__\")&&delete e[s];return fw({protocol:r,source:i,params:e,selector:n})}function Ot(t){return t.scope?`@${t.scope}/${t.name}`:`${t.name}`}function Pn(t){return t.scope?`@${t.scope}/${t.name}@${t.range}`:`${t.name}@${t.range}`}function Ps(t){return t.scope?`@${t.scope}/${t.name}@${t.reference}`:`${t.name}@${t.reference}`}function ax(t){return t.scope!==null?`@${t.scope}-${t.name}`:t.name}function Hg(t){let{protocol:e,selector:r}=Kg(t.reference),i=e!==null?e.replace(/:$/,\"\"):\"exotic\",n=a8.default.valid(r),s=n!==null?`${i}-${n}`:`${i}`,o=10,a=t.scope?`${ax(t)}-${s}-${t.locatorHash.slice(0,o)}`:`${ax(t)}-${s}-${t.locatorHash.slice(0,o)}`;return qr(a)}function gi(t,e){return e.scope?`${et(t,`@${e.scope}/`,Ge.SCOPE)}${et(t,e.name,Ge.NAME)}`:`${et(t,e.name,Ge.NAME)}`}function hw(t){if(t.startsWith(Ad)){let e=hw(t.substring(t.indexOf(\"#\")+1)),r=t.substring(Ad.length,Ad.length+sSe);return`${e} [${r}]`}else return t.replace(/\\?.*/,\"?[...]\")}function cw(t,e){return`${et(t,hw(e),Ge.RANGE)}`}function sr(t,e){return`${gi(t,e)}${et(t,\"@\",Ge.RANGE)}${cw(t,e.range)}`}function dd(t,e){return`${et(t,hw(e),Ge.REFERENCE)}`}function Bt(t,e){return`${gi(t,e)}${et(t,\"@\",Ge.REFERENCE)}${dd(t,e.reference)}`}function Ax(t){return`${Ot(t)}@${hw(t.reference)}`}function jg(t){return xn(t,[e=>Ot(e),e=>e.range])}function Cd(t,e){return gi(t,e.locator)}function qS(t,e,r){let i=il(e)?ud(e):e;return r===null?`${sr(t,i)} \\u2192 ${VS(t).Cross}`:i.identHash===r.identHash?`${sr(t,i)} \\u2192 ${dd(t,r.reference)}`:`${sr(t,i)} \\u2192 ${Bt(t,r)}`}function YS(t,e,r){return r===null?`${Bt(t,e)}`:`${Bt(t,e)} (via ${cw(t,r.range)})`}function lx(t){return`node_modules/${Ot(t)}`}function pw(t,e){return t.conditions?oSe(t.conditions,r=>{let[,i,n]=r.match(l8),s=e[i];return s?s.includes(n):!0}):!0}var f8={hooks:{reduceDependency:(t,e,r,i,{resolver:n,resolveOptions:s})=>{for(let{pattern:o,reference:a}of e.topLevelWorkspace.manifest.resolutions){if(o.from&&o.from.fullName!==Ot(r)||o.from&&o.from.description&&o.from.description!==r.reference||o.descriptor.fullName!==Ot(t)||o.descriptor.description&&o.descriptor.description!==t.range)continue;return n.bindDescriptor(rr(t,a),e.topLevelWorkspace.anchoredLocator,s)}return t},validateProject:async(t,e)=>{for(let r of t.workspaces){let i=Cd(t.configuration,r);await t.configuration.triggerHook(n=>n.validateWorkspace,r,{reportWarning:(n,s)=>e.reportWarning(n,`${i}: ${s}`),reportError:(n,s)=>e.reportError(n,`${i}: ${s}`)})}},validateWorkspace:async(t,e)=>{let{manifest:r}=t;r.resolutions.length&&t.cwd!==t.project.cwd&&r.errors.push(new Error(\"Resolutions field will be ignored\"));for(let i of r.errors)e.reportWarning($.INVALID_MANIFEST,i.message)}}};var C8=ge(ti());var md=class{supportsDescriptor(e,r){return!!(e.range.startsWith(md.protocol)||r.project.tryWorkspaceByDescriptor(e)!==null)}supportsLocator(e,r){return!!e.reference.startsWith(md.protocol)}shouldPersistResolution(e,r){return!1}bindDescriptor(e,r,i){return e}getResolutionDependencies(e,r){return[]}async getCandidates(e,r,i){return[i.project.getWorkspaceByDescriptor(e).anchoredLocator]}async getSatisfying(e,r,i){return null}async resolve(e,r){let i=r.project.getWorkspaceByCwd(e.reference.slice(md.protocol.length));return te(N({},e),{version:i.manifest.version||\"0.0.0\",languageName:\"unknown\",linkType:Qt.SOFT,conditions:null,dependencies:new Map([...i.manifest.dependencies,...i.manifest.devDependencies]),peerDependencies:new Map([...i.manifest.peerDependencies]),dependenciesMeta:i.manifest.dependenciesMeta,peerDependenciesMeta:i.manifest.peerDependenciesMeta,bin:i.manifest.bin})}},si=md;si.protocol=\"workspace:\";var Wt={};ft(Wt,{SemVer:()=>h8.SemVer,clean:()=>dSe,satisfiesWithPrereleases:()=>Uc,validRange:()=>fo});var dw=ge(ti()),h8=ge(ti()),p8=new Map;function Uc(t,e,r=!1){if(!t)return!1;let i=`${e}${r}`,n=p8.get(i);if(typeof n==\"undefined\")try{n=new dw.default.Range(e,{includePrerelease:!0,loose:r})}catch{return!1}finally{p8.set(i,n||null)}else if(n===null)return!1;let s;try{s=new dw.default.SemVer(t,n)}catch(o){return!1}return n.test(s)?!0:(s.prerelease&&(s.prerelease=[]),n.set.some(o=>{for(let a of o)a.semver.prerelease&&(a.semver.prerelease=[]);return o.every(a=>a.test(s))}))}var d8=new Map;function fo(t){if(t.indexOf(\":\")!==-1)return null;let e=d8.get(t);if(typeof e!=\"undefined\")return e;try{e=new dw.default.Range(t)}catch{e=null}return d8.set(t,e),e}var pSe=/^(?:[\\sv=]*?)((0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?)(?:\\s*)$/;function dSe(t){let e=pSe.exec(t);return e?e[1]:null}var sl=class{constructor(){this.indent=\"  \";this.name=null;this.version=null;this.os=null;this.cpu=null;this.libc=null;this.type=null;this.packageManager=null;this.private=!1;this.license=null;this.main=null;this.module=null;this.browser=null;this.languageName=null;this.bin=new Map;this.scripts=new Map;this.dependencies=new Map;this.devDependencies=new Map;this.peerDependencies=new Map;this.workspaceDefinitions=[];this.dependenciesMeta=new Map;this.peerDependenciesMeta=new Map;this.resolutions=[];this.files=null;this.publishConfig=null;this.installConfig=null;this.preferUnplugged=null;this.raw={};this.errors=[]}static async tryFind(e,{baseFs:r=new ar}={}){let i=k.join(e,\"package.json\");try{return await sl.fromFile(i,{baseFs:r})}catch(n){if(n.code===\"ENOENT\")return null;throw n}}static async find(e,{baseFs:r}={}){let i=await sl.tryFind(e,{baseFs:r});if(i===null)throw new Error(\"Manifest not found\");return i}static async fromFile(e,{baseFs:r=new ar}={}){let i=new sl;return await i.loadFile(e,{baseFs:r}),i}static fromText(e){let r=new sl;return r.loadFromText(e),r}static isManifestFieldCompatible(e,r){if(e===null)return!0;let i=!0,n=!1;for(let s of e)if(s[0]===\"!\"){if(n=!0,r===s.slice(1))return!1}else if(i=!1,s===r)return!0;return n&&i}loadFromText(e){let r;try{r=JSON.parse(E8(e)||\"{}\")}catch(i){throw i.message+=` (when parsing ${e})`,i}this.load(r),this.indent=m8(e)}async loadFile(e,{baseFs:r=new ar}){let i=await r.readFilePromise(e,\"utf8\"),n;try{n=JSON.parse(E8(i)||\"{}\")}catch(s){throw s.message+=` (when parsing ${e})`,s}this.load(n),this.indent=m8(i)}load(e,{yamlCompatibilityMode:r=!1}={}){if(typeof e!=\"object\"||e===null)throw new Error(`Utterly invalid manifest data (${e})`);this.raw=e;let i=[];if(this.name=null,typeof e.name==\"string\")try{this.name=An(e.name)}catch(s){i.push(new Error(\"Parsing failed for the 'name' field\"))}if(typeof e.version==\"string\"?this.version=e.version:this.version=null,Array.isArray(e.os)){let s=[];this.os=s;for(let o of e.os)typeof o!=\"string\"?i.push(new Error(\"Parsing failed for the 'os' field\")):s.push(o)}else this.os=null;if(Array.isArray(e.cpu)){let s=[];this.cpu=s;for(let o of e.cpu)typeof o!=\"string\"?i.push(new Error(\"Parsing failed for the 'cpu' field\")):s.push(o)}else this.cpu=null;if(Array.isArray(e.libc)){let s=[];this.libc=s;for(let o of e.libc)typeof o!=\"string\"?i.push(new Error(\"Parsing failed for the 'libc' field\")):s.push(o)}else this.libc=null;if(typeof e.type==\"string\"?this.type=e.type:this.type=null,typeof e.packageManager==\"string\"?this.packageManager=e.packageManager:this.packageManager=null,typeof e.private==\"boolean\"?this.private=e.private:this.private=!1,typeof e.license==\"string\"?this.license=e.license:this.license=null,typeof e.languageName==\"string\"?this.languageName=e.languageName:this.languageName=null,typeof e.main==\"string\"?this.main=un(e.main):this.main=null,typeof e.module==\"string\"?this.module=un(e.module):this.module=null,e.browser!=null)if(typeof e.browser==\"string\")this.browser=un(e.browser);else{this.browser=new Map;for(let[s,o]of Object.entries(e.browser))this.browser.set(un(s),typeof o==\"string\"?un(o):o)}else this.browser=null;if(this.bin=new Map,typeof e.bin==\"string\")this.name!==null?this.bin.set(this.name.name,un(e.bin)):i.push(new Error(\"String bin field, but no attached package name\"));else if(typeof e.bin==\"object\"&&e.bin!==null)for(let[s,o]of Object.entries(e.bin)){if(typeof o!=\"string\"){i.push(new Error(`Invalid bin definition for '${s}'`));continue}let a=An(s);this.bin.set(a.name,un(o))}if(this.scripts=new Map,typeof e.scripts==\"object\"&&e.scripts!==null)for(let[s,o]of Object.entries(e.scripts)){if(typeof o!=\"string\"){i.push(new Error(`Invalid script definition for '${s}'`));continue}this.scripts.set(s,o)}if(this.dependencies=new Map,typeof e.dependencies==\"object\"&&e.dependencies!==null)for(let[s,o]of Object.entries(e.dependencies)){if(typeof o!=\"string\"){i.push(new Error(`Invalid dependency range for '${s}'`));continue}let a;try{a=An(s)}catch(c){i.push(new Error(`Parsing failed for the dependency name '${s}'`));continue}let l=rr(a,o);this.dependencies.set(l.identHash,l)}if(this.devDependencies=new Map,typeof e.devDependencies==\"object\"&&e.devDependencies!==null)for(let[s,o]of Object.entries(e.devDependencies)){if(typeof o!=\"string\"){i.push(new Error(`Invalid dependency range for '${s}'`));continue}let a;try{a=An(s)}catch(c){i.push(new Error(`Parsing failed for the dependency name '${s}'`));continue}let l=rr(a,o);this.devDependencies.set(l.identHash,l)}if(this.peerDependencies=new Map,typeof e.peerDependencies==\"object\"&&e.peerDependencies!==null)for(let[s,o]of Object.entries(e.peerDependencies)){let a;try{a=An(s)}catch(c){i.push(new Error(`Parsing failed for the dependency name '${s}'`));continue}(typeof o!=\"string\"||!o.startsWith(si.protocol)&&!fo(o))&&(i.push(new Error(`Invalid dependency range for '${s}'`)),o=\"*\");let l=rr(a,o);this.peerDependencies.set(l.identHash,l)}typeof e.workspaces==\"object\"&&e.workspaces!==null&&e.workspaces.nohoist&&i.push(new Error(\"'nohoist' is deprecated, please use 'installConfig.hoistingLimits' instead\"));let n=Array.isArray(e.workspaces)?e.workspaces:typeof e.workspaces==\"object\"&&e.workspaces!==null&&Array.isArray(e.workspaces.packages)?e.workspaces.packages:[];this.workspaceDefinitions=[];for(let s of n){if(typeof s!=\"string\"){i.push(new Error(`Invalid workspace definition for '${s}'`));continue}this.workspaceDefinitions.push({pattern:s})}if(this.dependenciesMeta=new Map,typeof e.dependenciesMeta==\"object\"&&e.dependenciesMeta!==null)for(let[s,o]of Object.entries(e.dependenciesMeta)){if(typeof o!=\"object\"||o===null){i.push(new Error(`Invalid meta field for '${s}`));continue}let a=nl(s),l=this.ensureDependencyMeta(a),c=Cw(o.built,{yamlCompatibilityMode:r});if(c===null){i.push(new Error(`Invalid built meta field for '${s}'`));continue}let u=Cw(o.optional,{yamlCompatibilityMode:r});if(u===null){i.push(new Error(`Invalid optional meta field for '${s}'`));continue}let g=Cw(o.unplugged,{yamlCompatibilityMode:r});if(g===null){i.push(new Error(`Invalid unplugged meta field for '${s}'`));continue}Object.assign(l,{built:c,optional:u,unplugged:g})}if(this.peerDependenciesMeta=new Map,typeof e.peerDependenciesMeta==\"object\"&&e.peerDependenciesMeta!==null)for(let[s,o]of Object.entries(e.peerDependenciesMeta)){if(typeof o!=\"object\"||o===null){i.push(new Error(`Invalid meta field for '${s}'`));continue}let a=nl(s),l=this.ensurePeerDependencyMeta(a),c=Cw(o.optional,{yamlCompatibilityMode:r});if(c===null){i.push(new Error(`Invalid optional meta field for '${s}'`));continue}Object.assign(l,{optional:c})}if(this.resolutions=[],typeof e.resolutions==\"object\"&&e.resolutions!==null)for(let[s,o]of Object.entries(e.resolutions)){if(typeof o!=\"string\"){i.push(new Error(`Invalid resolution entry for '${s}'`));continue}try{this.resolutions.push({pattern:rI(s),reference:o})}catch(a){i.push(a);continue}}if(Array.isArray(e.files)){this.files=new Set;for(let s of e.files){if(typeof s!=\"string\"){i.push(new Error(`Invalid files entry for '${s}'`));continue}this.files.add(s)}}else this.files=null;if(typeof e.publishConfig==\"object\"&&e.publishConfig!==null){if(this.publishConfig={},typeof e.publishConfig.access==\"string\"&&(this.publishConfig.access=e.publishConfig.access),typeof e.publishConfig.main==\"string\"&&(this.publishConfig.main=un(e.publishConfig.main)),typeof e.publishConfig.module==\"string\"&&(this.publishConfig.module=un(e.publishConfig.module)),e.publishConfig.browser!=null)if(typeof e.publishConfig.browser==\"string\")this.publishConfig.browser=un(e.publishConfig.browser);else{this.publishConfig.browser=new Map;for(let[s,o]of Object.entries(e.publishConfig.browser))this.publishConfig.browser.set(un(s),typeof o==\"string\"?un(o):o)}if(typeof e.publishConfig.registry==\"string\"&&(this.publishConfig.registry=e.publishConfig.registry),typeof e.publishConfig.bin==\"string\")this.name!==null?this.publishConfig.bin=new Map([[this.name.name,un(e.publishConfig.bin)]]):i.push(new Error(\"String bin field, but no attached package name\"));else if(typeof e.publishConfig.bin==\"object\"&&e.publishConfig.bin!==null){this.publishConfig.bin=new Map;for(let[s,o]of Object.entries(e.publishConfig.bin)){if(typeof o!=\"string\"){i.push(new Error(`Invalid bin definition for '${s}'`));continue}this.publishConfig.bin.set(s,un(o))}}if(Array.isArray(e.publishConfig.executableFiles)){this.publishConfig.executableFiles=new Set;for(let s of e.publishConfig.executableFiles){if(typeof s!=\"string\"){i.push(new Error(\"Invalid executable file definition\"));continue}this.publishConfig.executableFiles.add(un(s))}}}else this.publishConfig=null;if(typeof e.installConfig==\"object\"&&e.installConfig!==null){this.installConfig={};for(let s of Object.keys(e.installConfig))s===\"hoistingLimits\"?typeof e.installConfig.hoistingLimits==\"string\"?this.installConfig.hoistingLimits=e.installConfig.hoistingLimits:i.push(new Error(\"Invalid hoisting limits definition\")):s==\"selfReferences\"?typeof e.installConfig.selfReferences==\"boolean\"?this.installConfig.selfReferences=e.installConfig.selfReferences:i.push(new Error(\"Invalid selfReferences definition, must be a boolean value\")):i.push(new Error(`Unrecognized installConfig key: ${s}`))}else this.installConfig=null;if(typeof e.optionalDependencies==\"object\"&&e.optionalDependencies!==null)for(let[s,o]of Object.entries(e.optionalDependencies)){if(typeof o!=\"string\"){i.push(new Error(`Invalid dependency range for '${s}'`));continue}let a;try{a=An(s)}catch(g){i.push(new Error(`Parsing failed for the dependency name '${s}'`));continue}let l=rr(a,o);this.dependencies.set(l.identHash,l);let c=rr(a,\"unknown\"),u=this.ensureDependencyMeta(c);Object.assign(u,{optional:!0})}typeof e.preferUnplugged==\"boolean\"?this.preferUnplugged=e.preferUnplugged:this.preferUnplugged=null,this.errors=i}getForScope(e){switch(e){case\"dependencies\":return this.dependencies;case\"devDependencies\":return this.devDependencies;case\"peerDependencies\":return this.peerDependencies;default:throw new Error(`Unsupported value (\"${e}\")`)}}hasConsumerDependency(e){return!!(this.dependencies.has(e.identHash)||this.peerDependencies.has(e.identHash))}hasHardDependency(e){return!!(this.dependencies.has(e.identHash)||this.devDependencies.has(e.identHash))}hasSoftDependency(e){return!!this.peerDependencies.has(e.identHash)}hasDependency(e){return!!(this.hasHardDependency(e)||this.hasSoftDependency(e))}getConditions(){let e=[];return this.os&&this.os.length>0&&e.push(cx(\"os\",this.os)),this.cpu&&this.cpu.length>0&&e.push(cx(\"cpu\",this.cpu)),this.libc&&this.libc.length>0&&e.push(cx(\"libc\",this.libc)),e.length>0?e.join(\" & \"):null}isCompatibleWithOS(e){return sl.isManifestFieldCompatible(this.os,e)}isCompatibleWithCPU(e){return sl.isManifestFieldCompatible(this.cpu,e)}ensureDependencyMeta(e){if(e.range!==\"unknown\"&&!C8.default.valid(e.range))throw new Error(`Invalid meta field range for '${Pn(e)}'`);let r=Ot(e),i=e.range!==\"unknown\"?e.range:null,n=this.dependenciesMeta.get(r);n||this.dependenciesMeta.set(r,n=new Map);let s=n.get(i);return s||n.set(i,s={}),s}ensurePeerDependencyMeta(e){if(e.range!==\"unknown\")throw new Error(`Invalid meta field range for '${Pn(e)}'`);let r=Ot(e),i=this.peerDependenciesMeta.get(r);return i||this.peerDependenciesMeta.set(r,i={}),i}setRawField(e,r,{after:i=[]}={}){let n=new Set(i.filter(s=>Object.prototype.hasOwnProperty.call(this.raw,s)));if(n.size===0||Object.prototype.hasOwnProperty.call(this.raw,e))this.raw[e]=r;else{let s=this.raw,o=this.raw={},a=!1;for(let l of Object.keys(s))o[l]=s[l],a||(n.delete(l),n.size===0&&(o[e]=r,a=!0))}}exportTo(e,{compatibilityMode:r=!0}={}){var s;if(Object.assign(e,this.raw),this.name!==null?e.name=Ot(this.name):delete e.name,this.version!==null?e.version=this.version:delete e.version,this.os!==null?e.os=this.os:delete e.os,this.cpu!==null?e.cpu=this.cpu:delete e.cpu,this.type!==null?e.type=this.type:delete e.type,this.packageManager!==null?e.packageManager=this.packageManager:delete e.packageManager,this.private?e.private=!0:delete e.private,this.license!==null?e.license=this.license:delete e.license,this.languageName!==null?e.languageName=this.languageName:delete e.languageName,this.main!==null?e.main=this.main:delete e.main,this.module!==null?e.module=this.module:delete e.module,this.browser!==null){let o=this.browser;typeof o==\"string\"?e.browser=o:o instanceof Map&&(e.browser=Object.assign({},...Array.from(o.keys()).sort().map(a=>({[a]:o.get(a)}))))}else delete e.browser;this.bin.size===1&&this.name!==null&&this.bin.has(this.name.name)?e.bin=this.bin.get(this.name.name):this.bin.size>0?e.bin=Object.assign({},...Array.from(this.bin.keys()).sort().map(o=>({[o]:this.bin.get(o)}))):delete e.bin,this.workspaceDefinitions.length>0?this.raw.workspaces&&!Array.isArray(this.raw.workspaces)?e.workspaces=te(N({},this.raw.workspaces),{packages:this.workspaceDefinitions.map(({pattern:o})=>o)}):e.workspaces=this.workspaceDefinitions.map(({pattern:o})=>o):this.raw.workspaces&&!Array.isArray(this.raw.workspaces)&&Object.keys(this.raw.workspaces).length>0?e.workspaces=this.raw.workspaces:delete e.workspaces;let i=[],n=[];for(let o of this.dependencies.values()){let a=this.dependenciesMeta.get(Ot(o)),l=!1;if(r&&a){let c=a.get(null);c&&c.optional&&(l=!0)}l?n.push(o):i.push(o)}i.length>0?e.dependencies=Object.assign({},...jg(i).map(o=>({[Ot(o)]:o.range}))):delete e.dependencies,n.length>0?e.optionalDependencies=Object.assign({},...jg(n).map(o=>({[Ot(o)]:o.range}))):delete e.optionalDependencies,this.devDependencies.size>0?e.devDependencies=Object.assign({},...jg(this.devDependencies.values()).map(o=>({[Ot(o)]:o.range}))):delete e.devDependencies,this.peerDependencies.size>0?e.peerDependencies=Object.assign({},...jg(this.peerDependencies.values()).map(o=>({[Ot(o)]:o.range}))):delete e.peerDependencies,e.dependenciesMeta={};for(let[o,a]of xn(this.dependenciesMeta.entries(),([l,c])=>l))for(let[l,c]of xn(a.entries(),([u,g])=>u!==null?`0${u}`:\"1\")){let u=l!==null?Pn(rr(An(o),l)):o,g=N({},c);r&&l===null&&delete g.optional,Object.keys(g).length!==0&&(e.dependenciesMeta[u]=g)}if(Object.keys(e.dependenciesMeta).length===0&&delete e.dependenciesMeta,this.peerDependenciesMeta.size>0?e.peerDependenciesMeta=Object.assign({},...xn(this.peerDependenciesMeta.entries(),([o,a])=>o).map(([o,a])=>({[o]:a}))):delete e.peerDependenciesMeta,this.resolutions.length>0?e.resolutions=Object.assign({},...this.resolutions.map(({pattern:o,reference:a})=>({[iI(o)]:a}))):delete e.resolutions,this.files!==null?e.files=Array.from(this.files):delete e.files,this.preferUnplugged!==null?e.preferUnplugged=this.preferUnplugged:delete e.preferUnplugged,this.scripts!==null&&this.scripts.size>0){(s=e.scripts)!=null||(e.scripts={});for(let o of Object.keys(e.scripts))this.scripts.has(o)||delete e.scripts[o];for(let[o,a]of this.scripts.entries())e.scripts[o]=a}else delete e.scripts;return e}},At=sl;At.fileName=\"package.json\",At.allDependencies=[\"dependencies\",\"devDependencies\",\"peerDependencies\"],At.hardDependencies=[\"dependencies\",\"devDependencies\"];function m8(t){let e=t.match(/^[ \\t]+/m);return e?e[0]:\"  \"}function E8(t){return t.charCodeAt(0)===65279?t.slice(1):t}function un(t){return t.replace(/\\\\/g,\"/\")}function Cw(t,{yamlCompatibilityMode:e}){return e?GS(t):typeof t==\"undefined\"||typeof t==\"boolean\"?t:null}function I8(t,e){let r=e.search(/[^!]/);if(r===-1)return\"invalid\";let i=r%2==0?\"\":\"!\",n=e.slice(r);return`${i}${t}=${n}`}function cx(t,e){return e.length===1?I8(t,e[0]):`(${e.map(r=>I8(t,r)).join(\" | \")})`}var X8=ge(V8()),Z8=ge(require(\"stream\")),$8=ge(require(\"string_decoder\"));var lke=15,ct=class extends Error{constructor(e,r,i){super(r);this.reportExtra=i;this.reportCode=e}};function cke(t){return typeof t.reportCode!=\"undefined\"}var Ji=class{constructor(){this.reportedInfos=new Set;this.reportedWarnings=new Set;this.reportedErrors=new Set}static progressViaCounter(e){let r=0,i,n=new Promise(l=>{i=l}),s=l=>{let c=i;n=new Promise(u=>{i=u}),r=l,c()},o=(l=0)=>{s(r+1)},a=async function*(){for(;r<e;)await n,yield{progress:r/e}}();return{[Symbol.asyncIterator](){return a},hasProgress:!0,hasTitle:!1,set:s,tick:o}}static progressViaTitle(){let e,r,i=new Promise(o=>{r=o}),n=(0,X8.default)(o=>{let a=r;i=new Promise(l=>{r=l}),e=o,a()},1e3/lke),s=async function*(){for(;;)await i,yield{title:e}}();return{[Symbol.asyncIterator](){return s},hasProgress:!1,hasTitle:!0,setTitle:n}}async startProgressPromise(e,r){let i=this.reportProgress(e);try{return await r(e)}finally{i.stop()}}startProgressSync(e,r){let i=this.reportProgress(e);try{return r(e)}finally{i.stop()}}reportInfoOnce(e,r,i){var s;let n=i&&i.key?i.key:r;this.reportedInfos.has(n)||(this.reportedInfos.add(n),this.reportInfo(e,r),(s=i==null?void 0:i.reportExtra)==null||s.call(i,this))}reportWarningOnce(e,r,i){var s;let n=i&&i.key?i.key:r;this.reportedWarnings.has(n)||(this.reportedWarnings.add(n),this.reportWarning(e,r),(s=i==null?void 0:i.reportExtra)==null||s.call(i,this))}reportErrorOnce(e,r,i){var s;let n=i&&i.key?i.key:r;this.reportedErrors.has(n)||(this.reportedErrors.add(n),this.reportError(e,r),(s=i==null?void 0:i.reportExtra)==null||s.call(i,this))}reportExceptionOnce(e){cke(e)?this.reportErrorOnce(e.reportCode,e.message,{key:e,reportExtra:e.reportExtra}):this.reportErrorOnce($.EXCEPTION,e.stack||e.message,{key:e})}createStreamReporter(e=null){let r=new Z8.PassThrough,i=new $8.StringDecoder,n=\"\";return r.on(\"data\",s=>{let o=i.write(s),a;do if(a=o.indexOf(`\n`),a!==-1){let l=n+o.substring(0,a);o=o.substring(a+1),n=\"\",e!==null?this.reportInfo(null,`${e} ${l}`):this.reportInfo(null,l)}while(a!==-1);n+=o}),r.on(\"end\",()=>{let s=i.end();s!==\"\"&&(e!==null?this.reportInfo(null,`${e} ${s}`):this.reportInfo(null,s))}),r}};var yd=class{constructor(e){this.fetchers=e}supports(e,r){return!!this.tryFetcher(e,r)}getLocalPath(e,r){return this.getFetcher(e,r).getLocalPath(e,r)}async fetch(e,r){return await this.getFetcher(e,r).fetch(e,r)}tryFetcher(e,r){let i=this.fetchers.find(n=>n.supports(e,r));return i||null}getFetcher(e,r){let i=this.fetchers.find(n=>n.supports(e,r));if(!i)throw new ct($.FETCHER_NOT_FOUND,`${Bt(r.project.configuration,e)} isn't supported by any available fetcher`);return i}};var wd=class{constructor(e){this.resolvers=e.filter(r=>r)}supportsDescriptor(e,r){return!!this.tryResolverByDescriptor(e,r)}supportsLocator(e,r){return!!this.tryResolverByLocator(e,r)}shouldPersistResolution(e,r){return this.getResolverByLocator(e,r).shouldPersistResolution(e,r)}bindDescriptor(e,r,i){return this.getResolverByDescriptor(e,i).bindDescriptor(e,r,i)}getResolutionDependencies(e,r){return this.getResolverByDescriptor(e,r).getResolutionDependencies(e,r)}async getCandidates(e,r,i){return await this.getResolverByDescriptor(e,i).getCandidates(e,r,i)}async getSatisfying(e,r,i){return this.getResolverByDescriptor(e,i).getSatisfying(e,r,i)}async resolve(e,r){return await this.getResolverByLocator(e,r).resolve(e,r)}tryResolverByDescriptor(e,r){let i=this.resolvers.find(n=>n.supportsDescriptor(e,r));return i||null}getResolverByDescriptor(e,r){let i=this.resolvers.find(n=>n.supportsDescriptor(e,r));if(!i)throw new Error(`${sr(r.project.configuration,e)} isn't supported by any available resolver`);return i}tryResolverByLocator(e,r){let i=this.resolvers.find(n=>n.supportsLocator(e,r));return i||null}getResolverByLocator(e,r){let i=this.resolvers.find(n=>n.supportsLocator(e,r));if(!i)throw new Error(`${Bt(r.project.configuration,e)} isn't supported by any available resolver`);return i}};var ez=ge(ti());var Gg=/^(?!v)[a-z0-9._-]+$/i,fx=class{supportsDescriptor(e,r){return!!(fo(e.range)||Gg.test(e.range))}supportsLocator(e,r){return!!(ez.default.valid(e.reference)||Gg.test(e.reference))}shouldPersistResolution(e,r){return r.resolver.shouldPersistResolution(this.forwardLocator(e,r),r)}bindDescriptor(e,r,i){return i.resolver.bindDescriptor(this.forwardDescriptor(e,i),r,i)}getResolutionDependencies(e,r){return r.resolver.getResolutionDependencies(this.forwardDescriptor(e,r),r)}async getCandidates(e,r,i){return await i.resolver.getCandidates(this.forwardDescriptor(e,i),r,i)}async getSatisfying(e,r,i){return await i.resolver.getSatisfying(this.forwardDescriptor(e,i),r,i)}async resolve(e,r){let i=await r.resolver.resolve(this.forwardLocator(e,r),r);return ld(i,e)}forwardDescriptor(e,r){return rr(e,`${r.project.configuration.get(\"defaultProtocol\")}${e.range}`)}forwardLocator(e,r){return cn(e,`${r.project.configuration.get(\"defaultProtocol\")}${e.reference}`)}};var Bd=class{supports(e){return!!e.reference.startsWith(\"virtual:\")}getLocalPath(e,r){let i=e.reference.indexOf(\"#\");if(i===-1)throw new Error(\"Invalid virtual package reference\");let n=e.reference.slice(i+1),s=cn(e,n);return r.fetcher.getLocalPath(s,r)}async fetch(e,r){let i=e.reference.indexOf(\"#\");if(i===-1)throw new Error(\"Invalid virtual package reference\");let n=e.reference.slice(i+1),s=cn(e,n),o=await r.fetcher.fetch(s,r);return await this.ensureVirtualLink(e,o,r)}getLocatorFilename(e){return Hg(e)}async ensureVirtualLink(e,r,i){let n=r.packageFs.getRealPath(),s=i.project.configuration.get(\"virtualFolder\"),o=this.getLocatorFilename(e),a=Jr.makeVirtualPath(s,o,n),l=new Pa(a,{baseFs:r.packageFs,pathUtils:k});return te(N({},r),{packageFs:l})}};var Yg=class{static isVirtualDescriptor(e){return!!e.range.startsWith(Yg.protocol)}static isVirtualLocator(e){return!!e.reference.startsWith(Yg.protocol)}supportsDescriptor(e,r){return Yg.isVirtualDescriptor(e)}supportsLocator(e,r){return Yg.isVirtualLocator(e)}shouldPersistResolution(e,r){return!1}bindDescriptor(e,r,i){throw new Error('Assertion failed: calling \"bindDescriptor\" on a virtual descriptor is unsupported')}getResolutionDependencies(e,r){throw new Error('Assertion failed: calling \"getResolutionDependencies\" on a virtual descriptor is unsupported')}async getCandidates(e,r,i){throw new Error('Assertion failed: calling \"getCandidates\" on a virtual descriptor is unsupported')}async getSatisfying(e,r,i){throw new Error('Assertion failed: calling \"getSatisfying\" on a virtual descriptor is unsupported')}async resolve(e,r){throw new Error('Assertion failed: calling \"resolve\" on a virtual locator is unsupported')}},mw=Yg;mw.protocol=\"virtual:\";var bd=class{supports(e){return!!e.reference.startsWith(si.protocol)}getLocalPath(e,r){return this.getWorkspace(e,r).cwd}async fetch(e,r){let i=this.getWorkspace(e,r).cwd;return{packageFs:new _t(i),prefixPath:Me.dot,localPath:i}}getWorkspace(e,r){return r.project.getWorkspaceByCwd(e.reference.slice(si.protocol.length))}};var hx={};ft(hx,{getDefaultGlobalFolder:()=>dx,getHomeFolder:()=>Qd,isFolderInside:()=>Cx});var px=ge(require(\"os\"));function dx(){if(process.platform===\"win32\"){let t=j.toPortablePath(process.env.LOCALAPPDATA||j.join((0,px.homedir)(),\"AppData\",\"Local\"));return k.resolve(t,\"Yarn/Berry\")}if(process.env.XDG_DATA_HOME){let t=j.toPortablePath(process.env.XDG_DATA_HOME);return k.resolve(t,\"yarn/berry\")}return k.resolve(Qd(),\".yarn/berry\")}function Qd(){return j.toPortablePath((0,px.homedir)()||\"/usr/local/share\")}function Cx(t,e){let r=k.relative(e,t);return r&&!r.startsWith(\"..\")&&!k.isAbsolute(r)}var qg={};ft(qg,{builtinModules:()=>mx,getArchitecture:()=>vd,getArchitectureName:()=>gke,getArchitectureSet:()=>Ex});var tz=ge(require(\"module\"));function mx(){return new Set(tz.default.builtinModules||Object.keys(process.binding(\"natives\")))}function uke(){var i,n,s,o;if(process.platform===\"win32\")return null;let e=(s=((n=(i=process.report)==null?void 0:i.getReport())!=null?n:{}).sharedObjects)!=null?s:[],r=/\\/(?:(ld-linux-|[^/]+-linux-gnu\\/)|(libc.musl-|ld-musl-))/;return(o=$p(e,a=>{let l=a.match(r);if(!l)return $p.skip;if(l[1])return\"glibc\";if(l[2])return\"musl\";throw new Error(\"Assertion failed: Expected the libc variant to have been detected\")}))!=null?o:null}var Ew,Iw;function vd(){return Ew=Ew!=null?Ew:{os:process.platform,cpu:process.arch,libc:uke()}}function gke(t=vd()){return t.libc?`${t.os}-${t.cpu}-${t.libc}`:`${t.os}-${t.cpu}`}function Ex(){let t=vd();return Iw=Iw!=null?Iw:{os:[t.os],cpu:[t.cpu],libc:t.libc?[t.libc]:[]}}var fke=new Set([\"binFolder\",\"version\",\"flags\",\"profile\",\"gpg\",\"ignoreNode\",\"wrapOutput\",\"home\",\"confDir\"]),ww=\"yarn_\",yx=\".yarnrc.yml\",wx=\"yarn.lock\",hke=\"********\",Ie;(function(u){u.ANY=\"ANY\",u.BOOLEAN=\"BOOLEAN\",u.ABSOLUTE_PATH=\"ABSOLUTE_PATH\",u.LOCATOR=\"LOCATOR\",u.LOCATOR_LOOSE=\"LOCATOR_LOOSE\",u.NUMBER=\"NUMBER\",u.STRING=\"STRING\",u.SECRET=\"SECRET\",u.SHAPE=\"SHAPE\",u.MAP=\"MAP\"})(Ie||(Ie={}));var Di=Ge,Bx={lastUpdateCheck:{description:\"Last timestamp we checked whether new Yarn versions were available\",type:Ie.STRING,default:null},yarnPath:{description:\"Path to the local executable that must be used over the global one\",type:Ie.ABSOLUTE_PATH,default:null},ignorePath:{description:\"If true, the local executable will be ignored when using the global one\",type:Ie.BOOLEAN,default:!1},ignoreCwd:{description:\"If true, the `--cwd` flag will be ignored\",type:Ie.BOOLEAN,default:!1},cacheKeyOverride:{description:\"A global cache key override; used only for test purposes\",type:Ie.STRING,default:null},globalFolder:{description:\"Folder where all system-global files are stored\",type:Ie.ABSOLUTE_PATH,default:dx()},cacheFolder:{description:\"Folder where the cache files must be written\",type:Ie.ABSOLUTE_PATH,default:\"./.yarn/cache\"},compressionLevel:{description:\"Zip files compression level, from 0 to 9 or mixed (a variant of 9, which stores some files uncompressed, when compression doesn't yield good results)\",type:Ie.NUMBER,values:[\"mixed\",0,1,2,3,4,5,6,7,8,9],default:ic},virtualFolder:{description:\"Folder where the virtual packages (cf doc) will be mapped on the disk (must be named __virtual__)\",type:Ie.ABSOLUTE_PATH,default:\"./.yarn/__virtual__\"},lockfileFilename:{description:\"Name of the files where the Yarn dependency tree entries must be stored\",type:Ie.STRING,default:wx},installStatePath:{description:\"Path of the file where the install state will be persisted\",type:Ie.ABSOLUTE_PATH,default:\"./.yarn/install-state.gz\"},immutablePatterns:{description:\"Array of glob patterns; files matching them won't be allowed to change during immutable installs\",type:Ie.STRING,default:[],isArray:!0},rcFilename:{description:\"Name of the files where the configuration can be found\",type:Ie.STRING,default:Bw()},enableGlobalCache:{description:\"If true, the system-wide cache folder will be used regardless of `cache-folder`\",type:Ie.BOOLEAN,default:!1},enableColors:{description:\"If true, the CLI is allowed to use colors in its output\",type:Ie.BOOLEAN,default:Fy,defaultText:\"<dynamic>\"},enableHyperlinks:{description:\"If true, the CLI is allowed to use hyperlinks in its output\",type:Ie.BOOLEAN,default:WS,defaultText:\"<dynamic>\"},enableInlineBuilds:{description:\"If true, the CLI will print the build output on the command line\",type:Ie.BOOLEAN,default:yw.isCI,defaultText:\"<dynamic>\"},enableMessageNames:{description:\"If true, the CLI will prefix most messages with codes suitable for search engines\",type:Ie.BOOLEAN,default:!0},enableProgressBars:{description:\"If true, the CLI is allowed to show a progress bar for long-running events\",type:Ie.BOOLEAN,default:!yw.isCI,defaultText:\"<dynamic>\"},enableTimers:{description:\"If true, the CLI is allowed to print the time spent executing commands\",type:Ie.BOOLEAN,default:!0},preferAggregateCacheInfo:{description:\"If true, the CLI will only print a one-line report of any cache changes\",type:Ie.BOOLEAN,default:yw.isCI},preferInteractive:{description:\"If true, the CLI will automatically use the interactive mode when called from a TTY\",type:Ie.BOOLEAN,default:!1},preferTruncatedLines:{description:\"If true, the CLI will truncate lines that would go beyond the size of the terminal\",type:Ie.BOOLEAN,default:!1},progressBarStyle:{description:\"Which style of progress bar should be used (only when progress bars are enabled)\",type:Ie.STRING,default:void 0,defaultText:\"<dynamic>\"},defaultLanguageName:{description:\"Default language mode that should be used when a package doesn't offer any insight\",type:Ie.STRING,default:\"node\"},defaultProtocol:{description:\"Default resolution protocol used when resolving pure semver and tag ranges\",type:Ie.STRING,default:\"npm:\"},enableTransparentWorkspaces:{description:\"If false, Yarn won't automatically resolve workspace dependencies unless they use the `workspace:` protocol\",type:Ie.BOOLEAN,default:!0},supportedArchitectures:{description:\"Architectures that Yarn will fetch and inject into the resolver\",type:Ie.SHAPE,properties:{os:{description:\"Array of supported process.platform strings, or null to target them all\",type:Ie.STRING,isArray:!0,isNullable:!0,default:[\"current\"]},cpu:{description:\"Array of supported process.arch strings, or null to target them all\",type:Ie.STRING,isArray:!0,isNullable:!0,default:[\"current\"]},libc:{description:\"Array of supported libc libraries, or null to target them all\",type:Ie.STRING,isArray:!0,isNullable:!0,default:[\"current\"]}}},enableMirror:{description:\"If true, the downloaded packages will be retrieved and stored in both the local and global folders\",type:Ie.BOOLEAN,default:!0},enableNetwork:{description:\"If false, the package manager will refuse to use the network if required to\",type:Ie.BOOLEAN,default:!0},httpProxy:{description:\"URL of the http proxy that must be used for outgoing http requests\",type:Ie.STRING,default:null},httpsProxy:{description:\"URL of the http proxy that must be used for outgoing https requests\",type:Ie.STRING,default:null},unsafeHttpWhitelist:{description:\"List of the hostnames for which http queries are allowed (glob patterns are supported)\",type:Ie.STRING,default:[],isArray:!0},httpTimeout:{description:\"Timeout of each http request in milliseconds\",type:Ie.NUMBER,default:6e4},httpRetry:{description:\"Retry times on http failure\",type:Ie.NUMBER,default:3},networkConcurrency:{description:\"Maximal number of concurrent requests\",type:Ie.NUMBER,default:50},networkSettings:{description:\"Network settings per hostname (glob patterns are supported)\",type:Ie.MAP,valueDefinition:{description:\"\",type:Ie.SHAPE,properties:{caFilePath:{description:\"Path to file containing one or multiple Certificate Authority signing certificates\",type:Ie.ABSOLUTE_PATH,default:null},enableNetwork:{description:\"If false, the package manager will refuse to use the network if required to\",type:Ie.BOOLEAN,default:null},httpProxy:{description:\"URL of the http proxy that must be used for outgoing http requests\",type:Ie.STRING,default:null},httpsProxy:{description:\"URL of the http proxy that must be used for outgoing https requests\",type:Ie.STRING,default:null},httpsKeyFilePath:{description:\"Path to file containing private key in PEM format\",type:Ie.ABSOLUTE_PATH,default:null},httpsCertFilePath:{description:\"Path to file containing certificate chain in PEM format\",type:Ie.ABSOLUTE_PATH,default:null}}}},caFilePath:{description:\"A path to a file containing one or multiple Certificate Authority signing certificates\",type:Ie.ABSOLUTE_PATH,default:null},httpsKeyFilePath:{description:\"Path to file containing private key in PEM format\",type:Ie.ABSOLUTE_PATH,default:null},httpsCertFilePath:{description:\"Path to file containing certificate chain in PEM format\",type:Ie.ABSOLUTE_PATH,default:null},enableStrictSsl:{description:\"If false, SSL certificate errors will be ignored\",type:Ie.BOOLEAN,default:!0},logFilters:{description:\"Overrides for log levels\",type:Ie.SHAPE,isArray:!0,concatenateValues:!0,properties:{code:{description:\"Code of the messages covered by this override\",type:Ie.STRING,default:void 0},text:{description:\"Code of the texts covered by this override\",type:Ie.STRING,default:void 0},pattern:{description:\"Code of the patterns covered by this override\",type:Ie.STRING,default:void 0},level:{description:\"Log level override, set to null to remove override\",type:Ie.STRING,values:Object.values(go),isNullable:!0,default:void 0}}},enableTelemetry:{description:\"If true, telemetry will be periodically sent, following the rules in https://yarnpkg.com/advanced/telemetry\",type:Ie.BOOLEAN,default:!0},telemetryInterval:{description:\"Minimal amount of time between two telemetry uploads, in days\",type:Ie.NUMBER,default:7},telemetryUserId:{description:\"If you desire to tell us which project you are, you can set this field. Completely optional and opt-in.\",type:Ie.STRING,default:null},enableScripts:{description:\"If true, packages are allowed to have install scripts by default\",type:Ie.BOOLEAN,default:!0},enableStrictSettings:{description:\"If true, unknown settings will cause Yarn to abort\",type:Ie.BOOLEAN,default:!0},enableImmutableCache:{description:\"If true, the cache is reputed immutable and actions that would modify it will throw\",type:Ie.BOOLEAN,default:!1},checksumBehavior:{description:\"Enumeration defining what to do when a checksum doesn't match expectations\",type:Ie.STRING,default:\"throw\"},packageExtensions:{description:\"Map of package corrections to apply on the dependency tree\",type:Ie.MAP,valueDefinition:{description:\"The extension that will be applied to any package whose version matches the specified range\",type:Ie.SHAPE,properties:{dependencies:{description:\"The set of dependencies that must be made available to the current package in order for it to work properly\",type:Ie.MAP,valueDefinition:{description:\"A range\",type:Ie.STRING}},peerDependencies:{description:\"Inherited dependencies - the consumer of the package will be tasked to provide them\",type:Ie.MAP,valueDefinition:{description:\"A semver range\",type:Ie.STRING}},peerDependenciesMeta:{description:\"Extra information related to the dependencies listed in the peerDependencies field\",type:Ie.MAP,valueDefinition:{description:\"The peerDependency meta\",type:Ie.SHAPE,properties:{optional:{description:\"If true, the selected peer dependency will be marked as optional by the package manager and the consumer omitting it won't be reported as an error\",type:Ie.BOOLEAN,default:!1}}}}}}}};function Qx(t,e,r,i,n){if(i.isArray||i.type===Ie.ANY&&Array.isArray(r))return Array.isArray(r)?r.map((s,o)=>bx(t,`${e}[${o}]`,s,i,n)):String(r).split(/,/).map(s=>bx(t,e,s,i,n));if(Array.isArray(r))throw new Error(`Non-array configuration settings \"${e}\" cannot be an array`);return bx(t,e,r,i,n)}function bx(t,e,r,i,n){var a;switch(i.type){case Ie.ANY:return r;case Ie.SHAPE:return pke(t,e,r,i,n);case Ie.MAP:return dke(t,e,r,i,n)}if(r===null&&!i.isNullable&&i.default!==null)throw new Error(`Non-nullable configuration settings \"${e}\" cannot be set to null`);if((a=i.values)==null?void 0:a.includes(r))return r;let o=(()=>{if(i.type===Ie.BOOLEAN&&typeof r!=\"string\")return td(r);if(typeof r!=\"string\")throw new Error(`Expected value (${r}) to be a string`);let l=jS(r,{env:process.env});switch(i.type){case Ie.ABSOLUTE_PATH:return k.resolve(n,j.toPortablePath(l));case Ie.LOCATOR_LOOSE:return Mc(l,!1);case Ie.NUMBER:return parseInt(l);case Ie.LOCATOR:return Mc(l);case Ie.BOOLEAN:return td(l);default:return l}})();if(i.values&&!i.values.includes(o))throw new Error(`Invalid value, expected one of ${i.values.join(\", \")}`);return o}function pke(t,e,r,i,n){if(typeof r!=\"object\"||Array.isArray(r))throw new Pe(`Object configuration settings \"${e}\" must be an object`);let s=vx(t,i,{ignoreArrays:!0});if(r===null)return s;for(let[o,a]of Object.entries(r)){let l=`${e}.${o}`;if(!i.properties[o])throw new Pe(`Unrecognized configuration settings found: ${e}.${o} - run \"yarn config -v\" to see the list of settings supported in Yarn`);s.set(o,Qx(t,l,a,i.properties[o],n))}return s}function dke(t,e,r,i,n){let s=new Map;if(typeof r!=\"object\"||Array.isArray(r))throw new Pe(`Map configuration settings \"${e}\" must be an object`);if(r===null)return s;for(let[o,a]of Object.entries(r)){let l=i.normalizeKeys?i.normalizeKeys(o):o,c=`${e}['${l}']`,u=i.valueDefinition;s.set(l,Qx(t,c,a,u,n))}return s}function vx(t,e,{ignoreArrays:r=!1}={}){switch(e.type){case Ie.SHAPE:{if(e.isArray&&!r)return[];let i=new Map;for(let[n,s]of Object.entries(e.properties))i.set(n,vx(t,s));return i}break;case Ie.MAP:return e.isArray&&!r?[]:new Map;case Ie.ABSOLUTE_PATH:return e.default===null?null:t.projectCwd===null?k.isAbsolute(e.default)?k.normalize(e.default):e.isNullable?null:void 0:Array.isArray(e.default)?e.default.map(i=>k.resolve(t.projectCwd,i)):k.resolve(t.projectCwd,e.default);default:return e.default}}function bw(t,e,r){if(e.type===Ie.SECRET&&typeof t==\"string\"&&r.hideSecrets)return hke;if(e.type===Ie.ABSOLUTE_PATH&&typeof t==\"string\"&&r.getNativePaths)return j.fromPortablePath(t);if(e.isArray&&Array.isArray(t)){let i=[];for(let n of t)i.push(bw(n,e,r));return i}if(e.type===Ie.MAP&&t instanceof Map){let i=new Map;for(let[n,s]of t.entries())i.set(n,bw(s,e.valueDefinition,r));return i}if(e.type===Ie.SHAPE&&t instanceof Map){let i=new Map;for(let[n,s]of t.entries()){let o=e.properties[n];i.set(n,bw(s,o,r))}return i}return t}function Cke(){let t={};for(let[e,r]of Object.entries(process.env))e=e.toLowerCase(),!!e.startsWith(ww)&&(e=(0,rz.default)(e.slice(ww.length)),t[e]=r);return t}function Bw(){let t=`${ww}rc_filename`;for(let[e,r]of Object.entries(process.env))if(e.toLowerCase()===t&&typeof r==\"string\")return r;return yx}var ol;(function(i){i[i.LOCKFILE=0]=\"LOCKFILE\",i[i.MANIFEST=1]=\"MANIFEST\",i[i.NONE=2]=\"NONE\"})(ol||(ol={}));var Xa=class{constructor(e){this.projectCwd=null;this.plugins=new Map;this.settings=new Map;this.values=new Map;this.sources=new Map;this.invalid=new Map;this.packageExtensions=new Map;this.limits=new Map;this.startingCwd=e}static create(e,r,i){let n=new Xa(e);typeof r!=\"undefined\"&&!(r instanceof Map)&&(n.projectCwd=r),n.importSettings(Bx);let s=typeof i!=\"undefined\"?i:r instanceof Map?r:new Map;for(let[o,a]of s)n.activatePlugin(o,a);return n}static async find(e,r,{lookup:i=0,strict:n=!0,usePath:s=!1,useRc:o=!0}={}){let a=Cke();delete a.rcFilename;let l=await Xa.findRcFiles(e),c=await Xa.findHomeRcFile();if(c){let Q=l.find(S=>S.path===c.path);Q?Q.strict=!1:l.push(te(N({},c),{strict:!1}))}let u=({ignoreCwd:Q,yarnPath:S,ignorePath:x,lockfileFilename:M})=>({ignoreCwd:Q,yarnPath:S,ignorePath:x,lockfileFilename:M}),g=U=>{var J=U,{ignoreCwd:Q,yarnPath:S,ignorePath:x,lockfileFilename:M}=J,Y=Tr(J,[\"ignoreCwd\",\"yarnPath\",\"ignorePath\",\"lockfileFilename\"]);return Y},f=new Xa(e);f.importSettings(u(Bx)),f.useWithSource(\"<environment>\",u(a),e,{strict:!1});for(let{path:Q,cwd:S,data:x}of l)f.useWithSource(Q,u(x),S,{strict:!1});if(s){let Q=f.get(\"yarnPath\"),S=f.get(\"ignorePath\");if(Q!==null&&!S)return f}let h=f.get(\"lockfileFilename\"),p;switch(i){case 0:p=await Xa.findProjectCwd(e,h);break;case 1:p=await Xa.findProjectCwd(e,null);break;case 2:K.existsSync(k.join(e,\"package.json\"))?p=k.resolve(e):p=null;break}f.startingCwd=e,f.projectCwd=p,f.importSettings(g(Bx));let m=new Map([[\"@@core\",f8]]),y=Q=>\"default\"in Q?Q.default:Q;if(r!==null){for(let M of r.plugins.keys())m.set(M,y(r.modules.get(M)));let Q=new Map;for(let M of mx())Q.set(M,()=>Rg(M));for(let[M,Y]of r.modules)Q.set(M,()=>Y);let S=new Set,x=async(M,Y)=>{let{factory:U,name:J}=Rg(M);if(S.has(J))return;let W=new Map(Q),ee=A=>{if(W.has(A))return W.get(A)();throw new Pe(`This plugin cannot access the package referenced via ${A} which is neither a builtin, nor an exposed entry`)},Z=await Pg(async()=>y(await U(ee)),A=>`${A} (when initializing ${J}, defined in ${Y})`);Q.set(J,()=>Z),S.add(J),m.set(J,Z)};if(a.plugins)for(let M of a.plugins.split(\";\")){let Y=k.resolve(e,j.toPortablePath(M));await x(Y,\"<environment>\")}for(let{path:M,cwd:Y,data:U}of l)if(!!o&&!!Array.isArray(U.plugins))for(let J of U.plugins){let W=typeof J!=\"string\"?J.path:J,ee=k.resolve(Y,j.toPortablePath(W));await x(ee,M)}}for(let[Q,S]of m)f.activatePlugin(Q,S);f.useWithSource(\"<environment>\",g(a),e,{strict:n});for(let{path:Q,cwd:S,data:x,strict:M}of l)f.useWithSource(Q,g(x),S,{strict:M!=null?M:n});return f.get(\"enableGlobalCache\")&&(f.values.set(\"cacheFolder\",`${f.get(\"globalFolder\")}/cache`),f.sources.set(\"cacheFolder\",\"<internal>\")),await f.refreshPackageExtensions(),f}static async findRcFiles(e){let r=Bw(),i=[],n=e,s=null;for(;n!==s;){s=n;let o=k.join(s,r);if(K.existsSync(o)){let a=await K.readFilePromise(o,\"utf8\"),l;try{l=Qi(a)}catch(c){let u=\"\";throw a.match(/^\\s+(?!-)[^:]+\\s+\\S+/m)&&(u=\" (in particular, make sure you list the colons after each key name)\"),new Pe(`Parse error when loading ${o}; please check it's proper Yaml${u}`)}i.push({path:o,cwd:s,data:l})}n=k.dirname(s)}return i}static async findHomeRcFile(){let e=Bw(),r=Qd(),i=k.join(r,e);if(K.existsSync(i)){let n=await K.readFilePromise(i,\"utf8\"),s=Qi(n);return{path:i,cwd:r,data:s}}return null}static async findProjectCwd(e,r){let i=null,n=e,s=null;for(;n!==s;){if(s=n,K.existsSync(k.join(s,\"package.json\"))&&(i=s),r!==null){if(K.existsSync(k.join(s,r))){i=s;break}}else if(i!==null)break;n=k.dirname(s)}return i}static async updateConfiguration(e,r){let i=Bw(),n=k.join(e,i),s=K.existsSync(n)?Qi(await K.readFilePromise(n,\"utf8\")):{},o=!1,a;if(typeof r==\"function\"){try{a=r(s)}catch{a=r({})}if(a===s)return}else{a=s;for(let l of Object.keys(r)){let c=s[l],u=r[l],g;if(typeof u==\"function\")try{g=u(c)}catch{g=u(void 0)}else g=u;c!==g&&(a[l]=g,o=!0)}if(!o)return}await K.changeFilePromise(n,Na(a),{automaticNewlines:!0})}static async updateHomeConfiguration(e){let r=Qd();return await Xa.updateConfiguration(r,e)}activatePlugin(e,r){this.plugins.set(e,r),typeof r.configuration!=\"undefined\"&&this.importSettings(r.configuration)}importSettings(e){for(let[r,i]of Object.entries(e))if(i!=null){if(this.settings.has(r))throw new Error(`Cannot redefine settings \"${r}\"`);this.settings.set(r,i),this.values.set(r,vx(this,i))}}useWithSource(e,r,i,n){try{this.use(e,r,i,n)}catch(s){throw s.message+=` (in ${et(this,e,Ge.PATH)})`,s}}use(e,r,i,{strict:n=!0,overwrite:s=!1}={}){n=n&&this.get(\"enableStrictSettings\");for(let o of[\"enableStrictSettings\",...Object.keys(r)]){if(typeof r[o]==\"undefined\"||o===\"plugins\"||e===\"<environment>\"&&fke.has(o))continue;if(o===\"rcFilename\")throw new Pe(`The rcFilename settings can only be set via ${`${ww}RC_FILENAME`.toUpperCase()}, not via a rc file`);let l=this.settings.get(o);if(!l){if(n)throw new Pe(`Unrecognized or legacy configuration settings found: ${o} - run \"yarn config -v\" to see the list of settings supported in Yarn`);this.invalid.set(o,e);continue}if(this.sources.has(o)&&!(s||l.type===Ie.MAP||l.isArray&&l.concatenateValues))continue;let c;try{c=Qx(this,o,r[o],l,i)}catch(u){throw u.message+=` in ${et(this,e,Ge.PATH)}`,u}if(o===\"enableStrictSettings\"&&e!==\"<environment>\"){n=c;continue}if(l.type===Ie.MAP){let u=this.values.get(o);this.values.set(o,new Map(s?[...u,...c]:[...c,...u])),this.sources.set(o,`${this.sources.get(o)}, ${e}`)}else if(l.isArray&&l.concatenateValues){let u=this.values.get(o);this.values.set(o,s?[...u,...c]:[...c,...u]),this.sources.set(o,`${this.sources.get(o)}, ${e}`)}else this.values.set(o,c),this.sources.set(o,e)}}get(e){if(!this.values.has(e))throw new Error(`Invalid configuration key \"${e}\"`);return this.values.get(e)}getSpecial(e,{hideSecrets:r=!1,getNativePaths:i=!1}){let n=this.get(e),s=this.settings.get(e);if(typeof s==\"undefined\")throw new Pe(`Couldn't find a configuration settings named \"${e}\"`);return bw(n,s,{hideSecrets:r,getNativePaths:i})}getSubprocessStreams(e,{header:r,prefix:i,report:n}){let s,o,a=K.createWriteStream(e);if(this.get(\"enableInlineBuilds\")){let l=n.createStreamReporter(`${i} ${et(this,\"STDOUT\",\"green\")}`),c=n.createStreamReporter(`${i} ${et(this,\"STDERR\",\"red\")}`);s=new Ix.PassThrough,s.pipe(l),s.pipe(a),o=new Ix.PassThrough,o.pipe(c),o.pipe(a)}else s=a,o=a,typeof r!=\"undefined\"&&s.write(`${r}\n`);return{stdout:s,stderr:o}}makeResolver(){let e=[];for(let r of this.plugins.values())for(let i of r.resolvers||[])e.push(new i);return new wd([new mw,new si,new fx,...e])}makeFetcher(){let e=[];for(let r of this.plugins.values())for(let i of r.fetchers||[])e.push(new i);return new yd([new Bd,new bd,...e])}getLinkers(){let e=[];for(let r of this.plugins.values())for(let i of r.linkers||[])e.push(new i);return e}getSupportedArchitectures(){let e=vd(),r=this.get(\"supportedArchitectures\"),i=r.get(\"os\");i!==null&&(i=i.map(o=>o===\"current\"?e.os:o));let n=r.get(\"cpu\");n!==null&&(n=n.map(o=>o===\"current\"?e.cpu:o));let s=r.get(\"libc\");return s!==null&&(s=qo(s,o=>{var a;return o===\"current\"?(a=e.libc)!=null?a:qo.skip:o})),{os:i,cpu:n,libc:s}}async refreshPackageExtensions(){this.packageExtensions=new Map;let e=this.packageExtensions,r=(i,n,{userProvided:s=!1}={})=>{if(!fo(i.range))throw new Error(\"Only semver ranges are allowed as keys for the packageExtensions setting\");let o=new At;o.load(n,{yamlCompatibilityMode:!0});let a=kg(e,i.identHash),l=[];a.push([i.range,l]);let c={status:qi.Inactive,userProvided:s,parentDescriptor:i};for(let u of o.dependencies.values())l.push(te(N({},c),{type:yi.Dependency,descriptor:u}));for(let u of o.peerDependencies.values())l.push(te(N({},c),{type:yi.PeerDependency,descriptor:u}));for(let[u,g]of o.peerDependenciesMeta)for(let[f,h]of Object.entries(g))l.push(te(N({},c),{type:yi.PeerDependencyMeta,selector:u,key:f,value:h}))};await this.triggerHook(i=>i.registerPackageExtensions,this,r);for(let[i,n]of this.get(\"packageExtensions\"))r(nl(i,!0),Ry(n),{userProvided:!0})}normalizePackage(e){let r=cd(e);if(this.packageExtensions==null)throw new Error(\"refreshPackageExtensions has to be called before normalizing packages\");let i=this.packageExtensions.get(e.identHash);if(typeof i!=\"undefined\"){let s=e.version;if(s!==null){for(let[o,a]of i)if(!!Uc(s,o))for(let l of a)switch(l.status===qi.Inactive&&(l.status=qi.Redundant),l.type){case yi.Dependency:typeof r.dependencies.get(l.descriptor.identHash)==\"undefined\"&&(l.status=qi.Active,r.dependencies.set(l.descriptor.identHash,l.descriptor));break;case yi.PeerDependency:typeof r.peerDependencies.get(l.descriptor.identHash)==\"undefined\"&&(l.status=qi.Active,r.peerDependencies.set(l.descriptor.identHash,l.descriptor));break;case yi.PeerDependencyMeta:{let c=r.peerDependenciesMeta.get(l.selector);(typeof c==\"undefined\"||!Object.prototype.hasOwnProperty.call(c,l.key)||c[l.key]!==l.value)&&(l.status=qi.Active,qa(r.peerDependenciesMeta,l.selector,()=>({}))[l.key]=l.value)}break;default:US(l);break}}}let n=s=>s.scope?`${s.scope}__${s.name}`:`${s.name}`;for(let s of r.peerDependenciesMeta.keys()){let o=An(s);r.peerDependencies.has(o.identHash)||r.peerDependencies.set(o.identHash,rr(o,\"*\"))}for(let s of r.peerDependencies.values()){if(s.scope===\"types\")continue;let o=n(s),a=Vo(\"types\",o),l=Ot(a);r.peerDependencies.has(a.identHash)||r.peerDependenciesMeta.has(l)||(r.peerDependencies.set(a.identHash,rr(a,\"*\")),r.peerDependenciesMeta.set(l,{optional:!0}))}return r.dependencies=new Map(xn(r.dependencies,([,s])=>Pn(s))),r.peerDependencies=new Map(xn(r.peerDependencies,([,s])=>Pn(s))),r}getLimit(e){return qa(this.limits,e,()=>(0,iz.default)(this.get(e)))}async triggerHook(e,...r){for(let i of this.plugins.values()){let n=i.hooks;if(!n)continue;let s=e(n);!s||await s(...r)}}async triggerMultipleHooks(e,r){for(let i of r)await this.triggerHook(e,...i)}async reduceHook(e,r,...i){let n=r;for(let s of this.plugins.values()){let o=s.hooks;if(!o)continue;let a=e(o);!a||(n=await a(n,...i))}return n}async firstHook(e,...r){for(let i of this.plugins.values()){let n=i.hooks;if(!n)continue;let s=e(n);if(!s)continue;let o=await s(...r);if(typeof o!=\"undefined\")return o}return null}},ye=Xa;ye.telemetry=null;var is;(function(i){i[i.Never=0]=\"Never\",i[i.ErrorCode=1]=\"ErrorCode\",i[i.Always=2]=\"Always\"})(is||(is={}));var Qw=class extends ct{constructor({fileName:e,code:r,signal:i}){let n=ye.create(k.cwd()),s=et(n,e,Ge.PATH);super($.EXCEPTION,`Child ${s} reported an error`,o=>{mke(r,i,{configuration:n,report:o})});this.code=kx(r,i)}},xx=class extends Qw{constructor({fileName:e,code:r,signal:i,stdout:n,stderr:s}){super({fileName:e,code:r,signal:i});this.stdout=n,this.stderr=s}};function jc(t){return t!==null&&typeof t.fd==\"number\"}var Gc=new Set;function Px(){}function Dx(){for(let t of Gc)t.kill()}async function $o(t,e,{cwd:r,env:i=process.env,strict:n=!1,stdin:s=null,stdout:o,stderr:a,end:l=2}){let c=[\"pipe\",\"pipe\",\"pipe\"];s===null?c[0]=\"ignore\":jc(s)&&(c[0]=s),jc(o)&&(c[1]=o),jc(a)&&(c[2]=a);let u=(0,Sx.default)(t,e,{cwd:j.fromPortablePath(r),env:te(N({},i),{PWD:j.fromPortablePath(r)}),stdio:c});Gc.add(u),Gc.size===1&&(process.on(\"SIGINT\",Px),process.on(\"SIGTERM\",Dx)),!jc(s)&&s!==null&&s.pipe(u.stdin),jc(o)||u.stdout.pipe(o,{end:!1}),jc(a)||u.stderr.pipe(a,{end:!1});let g=()=>{for(let f of new Set([o,a]))jc(f)||f.end()};return new Promise((f,h)=>{u.on(\"error\",p=>{Gc.delete(u),Gc.size===0&&(process.off(\"SIGINT\",Px),process.off(\"SIGTERM\",Dx)),(l===2||l===1)&&g(),h(p)}),u.on(\"close\",(p,m)=>{Gc.delete(u),Gc.size===0&&(process.off(\"SIGINT\",Px),process.off(\"SIGTERM\",Dx)),(l===2||l===1&&p>0)&&g(),p===0||!n?f({code:kx(p,m)}):h(new Qw({fileName:t,code:p,signal:m}))})})}async function Eke(t,e,{cwd:r,env:i=process.env,encoding:n=\"utf8\",strict:s=!1}){let o=[\"ignore\",\"pipe\",\"pipe\"],a=[],l=[],c=j.fromPortablePath(r);typeof i.PWD!=\"undefined\"&&(i=te(N({},i),{PWD:c}));let u=(0,Sx.default)(t,e,{cwd:c,env:i,stdio:o});return u.stdout.on(\"data\",g=>{a.push(g)}),u.stderr.on(\"data\",g=>{l.push(g)}),await new Promise((g,f)=>{u.on(\"error\",h=>{let p=ye.create(r),m=et(p,t,Ge.PATH);f(new ct($.EXCEPTION,`Process ${m} failed to spawn`,y=>{y.reportError($.EXCEPTION,`  ${Jo(p,{label:\"Thrown Error\",value:uo(Ge.NO_HINT,h.message)})}`)}))}),u.on(\"close\",(h,p)=>{let m=n===\"buffer\"?Buffer.concat(a):Buffer.concat(a).toString(n),y=n===\"buffer\"?Buffer.concat(l):Buffer.concat(l).toString(n);h===0||!s?g({code:kx(h,p),stdout:m,stderr:y}):f(new xx({fileName:t,code:h,signal:p,stdout:m,stderr:y}))})})}var Ike=new Map([[\"SIGINT\",2],[\"SIGQUIT\",3],[\"SIGKILL\",9],[\"SIGTERM\",15]]);function kx(t,e){let r=Ike.get(e);return typeof r!=\"undefined\"?128+r:t!=null?t:1}function mke(t,e,{configuration:r,report:i}){i.reportError($.EXCEPTION,`  ${Jo(r,t!==null?{label:\"Exit Code\",value:uo(Ge.NUMBER,t)}:{label:\"Exit Signal\",value:uo(Ge.CODE,e)})}`)}var ir={};ft(ir,{Method:()=>gl,RequestError:()=>j_.RequestError,del:()=>RDe,get:()=>PDe,getNetworkSettings:()=>J_,post:()=>VP,put:()=>DDe,request:()=>Od});var U_=ge(Gw()),K_=ge(require(\"https\")),H_=ge(require(\"http\")),WP=ge(ts()),zP=ge(M_()),Yw=ge(require(\"url\"));var j_=ge(Gw()),G_=new Map,Y_=new Map,vDe=new H_.Agent({keepAlive:!0}),SDe=new K_.Agent({keepAlive:!0});function q_(t){let e=new Yw.URL(t),r={host:e.hostname,headers:{}};return e.port&&(r.port=Number(e.port)),{proxy:r}}async function _P(t){return qa(Y_,t,()=>K.readFilePromise(t).then(e=>(Y_.set(t,e),e)))}function kDe({statusCode:t,statusMessage:e},r){let i=et(r,t,Ge.NUMBER),n=`https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/${t}`;return Fg(r,`${i}${e?` (${e})`:\"\"}`,n)}async function qw(t,{configuration:e,customErrorMessage:r}){var i,n;try{return await t}catch(s){if(s.name!==\"HTTPError\")throw s;let o=(n=r==null?void 0:r(s))!=null?n:(i=s.response.body)==null?void 0:i.error;o==null&&(s.message.startsWith(\"Response code\")?o=\"The remote server failed to provide the requested resource\":o=s.message),s instanceof U_.TimeoutError&&s.event===\"socket\"&&(o+=`(can be increased via ${et(e,\"httpTimeout\",Ge.SETTING)})`);let a=new ct($.NETWORK_ERROR,o,l=>{s.response&&l.reportError($.NETWORK_ERROR,`  ${Jo(e,{label:\"Response Code\",value:uo(Ge.NO_HINT,kDe(s.response,e))})}`),s.request&&(l.reportError($.NETWORK_ERROR,`  ${Jo(e,{label:\"Request Method\",value:uo(Ge.NO_HINT,s.request.options.method)})}`),l.reportError($.NETWORK_ERROR,`  ${Jo(e,{label:\"Request URL\",value:uo(Ge.URL,s.request.requestUrl)})}`)),s.request.redirects.length>0&&l.reportError($.NETWORK_ERROR,`  ${Jo(e,{label:\"Request Redirects\",value:uo(Ge.NO_HINT,_S(e,s.request.redirects,Ge.URL))})}`),s.request.retryCount===s.request.options.retry.limit&&l.reportError($.NETWORK_ERROR,`  ${Jo(e,{label:\"Request Retry Count\",value:uo(Ge.NO_HINT,`${et(e,s.request.retryCount,Ge.NUMBER)} (can be increased via ${et(e,\"httpRetry\",Ge.SETTING)})`)})}`)});throw a.originalError=s,a}}function J_(t,e){let r=[...e.configuration.get(\"networkSettings\")].sort(([o],[a])=>a.length-o.length),i={enableNetwork:void 0,caFilePath:void 0,httpProxy:void 0,httpsProxy:void 0,httpsKeyFilePath:void 0,httpsCertFilePath:void 0},n=Object.keys(i),s=typeof t==\"string\"?new Yw.URL(t):t;for(let[o,a]of r)if(WP.default.isMatch(s.hostname,o))for(let l of n){let c=a.get(l);c!==null&&typeof i[l]==\"undefined\"&&(i[l]=c)}for(let o of n)typeof i[o]==\"undefined\"&&(i[o]=e.configuration.get(o));return i}var gl;(function(n){n.GET=\"GET\",n.PUT=\"PUT\",n.POST=\"POST\",n.DELETE=\"DELETE\"})(gl||(gl={}));async function Od(t,e,{configuration:r,headers:i,jsonRequest:n,jsonResponse:s,method:o=gl.GET}){let a=async()=>await xDe(t,e,{configuration:r,headers:i,jsonRequest:n,jsonResponse:s,method:o});return await(await r.reduceHook(c=>c.wrapNetworkRequest,a,{target:t,body:e,configuration:r,headers:i,jsonRequest:n,jsonResponse:s,method:o}))()}async function PDe(t,n){var s=n,{configuration:e,jsonResponse:r}=s,i=Tr(s,[\"configuration\",\"jsonResponse\"]);let o=qa(G_,t,()=>qw(Od(t,null,N({configuration:e},i)),{configuration:e}).then(a=>(G_.set(t,a.body),a.body)));return Buffer.isBuffer(o)===!1&&(o=await o),r?JSON.parse(o.toString()):o}async function DDe(t,e,n){var s=n,{customErrorMessage:r}=s,i=Tr(s,[\"customErrorMessage\"]);return(await qw(Od(t,e,te(N({},i),{method:gl.PUT})),i)).body}async function VP(t,e,n){var s=n,{customErrorMessage:r}=s,i=Tr(s,[\"customErrorMessage\"]);return(await qw(Od(t,e,te(N({},i),{method:gl.POST})),i)).body}async function RDe(t,i){var n=i,{customErrorMessage:e}=n,r=Tr(n,[\"customErrorMessage\"]);return(await qw(Od(t,null,te(N({},r),{method:gl.DELETE})),r)).body}async function xDe(t,e,{configuration:r,headers:i,jsonRequest:n,jsonResponse:s,method:o=gl.GET}){let a=typeof t==\"string\"?new Yw.URL(t):t,l=J_(a,{configuration:r});if(l.enableNetwork===!1)throw new Error(`Request to '${a.href}' has been blocked because of your configuration settings`);if(a.protocol===\"http:\"&&!WP.default.isMatch(a.hostname,r.get(\"unsafeHttpWhitelist\")))throw new Error(`Unsafe http requests must be explicitly whitelisted in your configuration (${a.hostname})`);let u={agent:{http:l.httpProxy?zP.default.httpOverHttp(q_(l.httpProxy)):vDe,https:l.httpsProxy?zP.default.httpsOverHttp(q_(l.httpsProxy)):SDe},headers:i,method:o};u.responseType=s?\"json\":\"buffer\",e!==null&&(Buffer.isBuffer(e)||!n&&typeof e==\"string\"?u.body=e:u.json=e);let g=r.get(\"httpTimeout\"),f=r.get(\"httpRetry\"),h=r.get(\"enableStrictSsl\"),p=l.caFilePath,m=l.httpsCertFilePath,y=l.httpsKeyFilePath,{default:Q}=await Promise.resolve().then(()=>ge(Gw())),S=p?await _P(p):void 0,x=m?await _P(m):void 0,M=y?await _P(y):void 0,Y=Q.extend(N({timeout:{socket:g},retry:f,https:{rejectUnauthorized:h,certificateAuthority:S,certificate:x,key:M}},u));return r.getLimit(\"networkConcurrency\")(()=>Y(a))}var Zt={};ft(Zt,{PackageManager:()=>hn,detectPackageManager:()=>s6,executePackageAccessibleBinary:()=>c6,executePackageScript:()=>aB,executePackageShellcode:()=>uD,executeWorkspaceAccessibleBinary:()=>XRe,executeWorkspaceLifecycleScript:()=>l6,executeWorkspaceScript:()=>A6,getPackageAccessibleBinaries:()=>AB,getWorkspaceAccessibleBinaries:()=>a6,hasPackageScript:()=>zRe,hasWorkspaceScript:()=>cD,makeScriptEnv:()=>Yd,maybeExecuteWorkspaceLifecycleScript:()=>VRe,prepareExternalProject:()=>WRe});var Md={};ft(Md,{getLibzipPromise:()=>fn,getLibzipSync:()=>X_});var V_=ge(z_());var fl=[\"number\",\"number\"],$P;(function(L){L[L.ZIP_ER_OK=0]=\"ZIP_ER_OK\",L[L.ZIP_ER_MULTIDISK=1]=\"ZIP_ER_MULTIDISK\",L[L.ZIP_ER_RENAME=2]=\"ZIP_ER_RENAME\",L[L.ZIP_ER_CLOSE=3]=\"ZIP_ER_CLOSE\",L[L.ZIP_ER_SEEK=4]=\"ZIP_ER_SEEK\",L[L.ZIP_ER_READ=5]=\"ZIP_ER_READ\",L[L.ZIP_ER_WRITE=6]=\"ZIP_ER_WRITE\",L[L.ZIP_ER_CRC=7]=\"ZIP_ER_CRC\",L[L.ZIP_ER_ZIPCLOSED=8]=\"ZIP_ER_ZIPCLOSED\",L[L.ZIP_ER_NOENT=9]=\"ZIP_ER_NOENT\",L[L.ZIP_ER_EXISTS=10]=\"ZIP_ER_EXISTS\",L[L.ZIP_ER_OPEN=11]=\"ZIP_ER_OPEN\",L[L.ZIP_ER_TMPOPEN=12]=\"ZIP_ER_TMPOPEN\",L[L.ZIP_ER_ZLIB=13]=\"ZIP_ER_ZLIB\",L[L.ZIP_ER_MEMORY=14]=\"ZIP_ER_MEMORY\",L[L.ZIP_ER_CHANGED=15]=\"ZIP_ER_CHANGED\",L[L.ZIP_ER_COMPNOTSUPP=16]=\"ZIP_ER_COMPNOTSUPP\",L[L.ZIP_ER_EOF=17]=\"ZIP_ER_EOF\",L[L.ZIP_ER_INVAL=18]=\"ZIP_ER_INVAL\",L[L.ZIP_ER_NOZIP=19]=\"ZIP_ER_NOZIP\",L[L.ZIP_ER_INTERNAL=20]=\"ZIP_ER_INTERNAL\",L[L.ZIP_ER_INCONS=21]=\"ZIP_ER_INCONS\",L[L.ZIP_ER_REMOVE=22]=\"ZIP_ER_REMOVE\",L[L.ZIP_ER_DELETED=23]=\"ZIP_ER_DELETED\",L[L.ZIP_ER_ENCRNOTSUPP=24]=\"ZIP_ER_ENCRNOTSUPP\",L[L.ZIP_ER_RDONLY=25]=\"ZIP_ER_RDONLY\",L[L.ZIP_ER_NOPASSWD=26]=\"ZIP_ER_NOPASSWD\",L[L.ZIP_ER_WRONGPASSWD=27]=\"ZIP_ER_WRONGPASSWD\",L[L.ZIP_ER_OPNOTSUPP=28]=\"ZIP_ER_OPNOTSUPP\",L[L.ZIP_ER_INUSE=29]=\"ZIP_ER_INUSE\",L[L.ZIP_ER_TELL=30]=\"ZIP_ER_TELL\",L[L.ZIP_ER_COMPRESSED_DATA=31]=\"ZIP_ER_COMPRESSED_DATA\"})($P||($P={}));var __=t=>({get HEAP8(){return t.HEAP8},get HEAPU8(){return t.HEAPU8},errors:$P,SEEK_SET:0,SEEK_CUR:1,SEEK_END:2,ZIP_CHECKCONS:4,ZIP_CREATE:1,ZIP_EXCL:2,ZIP_TRUNCATE:8,ZIP_RDONLY:16,ZIP_FL_OVERWRITE:8192,ZIP_FL_COMPRESSED:4,ZIP_OPSYS_DOS:0,ZIP_OPSYS_AMIGA:1,ZIP_OPSYS_OPENVMS:2,ZIP_OPSYS_UNIX:3,ZIP_OPSYS_VM_CMS:4,ZIP_OPSYS_ATARI_ST:5,ZIP_OPSYS_OS_2:6,ZIP_OPSYS_MACINTOSH:7,ZIP_OPSYS_Z_SYSTEM:8,ZIP_OPSYS_CPM:9,ZIP_OPSYS_WINDOWS_NTFS:10,ZIP_OPSYS_MVS:11,ZIP_OPSYS_VSE:12,ZIP_OPSYS_ACORN_RISC:13,ZIP_OPSYS_VFAT:14,ZIP_OPSYS_ALTERNATE_MVS:15,ZIP_OPSYS_BEOS:16,ZIP_OPSYS_TANDEM:17,ZIP_OPSYS_OS_400:18,ZIP_OPSYS_OS_X:19,ZIP_CM_DEFAULT:-1,ZIP_CM_STORE:0,ZIP_CM_DEFLATE:8,uint08S:t._malloc(1),uint16S:t._malloc(2),uint32S:t._malloc(4),uint64S:t._malloc(8),malloc:t._malloc,free:t._free,getValue:t.getValue,open:t.cwrap(\"zip_open\",\"number\",[\"string\",\"number\",\"number\"]),openFromSource:t.cwrap(\"zip_open_from_source\",\"number\",[\"number\",\"number\",\"number\"]),close:t.cwrap(\"zip_close\",\"number\",[\"number\"]),discard:t.cwrap(\"zip_discard\",null,[\"number\"]),getError:t.cwrap(\"zip_get_error\",\"number\",[\"number\"]),getName:t.cwrap(\"zip_get_name\",\"string\",[\"number\",\"number\",\"number\"]),getNumEntries:t.cwrap(\"zip_get_num_entries\",\"number\",[\"number\",\"number\"]),delete:t.cwrap(\"zip_delete\",\"number\",[\"number\",\"number\"]),stat:t.cwrap(\"zip_stat\",\"number\",[\"number\",\"string\",\"number\",\"number\"]),statIndex:t.cwrap(\"zip_stat_index\",\"number\",[\"number\",...fl,\"number\",\"number\"]),fopen:t.cwrap(\"zip_fopen\",\"number\",[\"number\",\"string\",\"number\"]),fopenIndex:t.cwrap(\"zip_fopen_index\",\"number\",[\"number\",...fl,\"number\"]),fread:t.cwrap(\"zip_fread\",\"number\",[\"number\",\"number\",\"number\",\"number\"]),fclose:t.cwrap(\"zip_fclose\",\"number\",[\"number\"]),dir:{add:t.cwrap(\"zip_dir_add\",\"number\",[\"number\",\"string\"])},file:{add:t.cwrap(\"zip_file_add\",\"number\",[\"number\",\"string\",\"number\",\"number\"]),getError:t.cwrap(\"zip_file_get_error\",\"number\",[\"number\"]),getExternalAttributes:t.cwrap(\"zip_file_get_external_attributes\",\"number\",[\"number\",...fl,\"number\",\"number\",\"number\"]),setExternalAttributes:t.cwrap(\"zip_file_set_external_attributes\",\"number\",[\"number\",...fl,\"number\",\"number\",\"number\"]),setMtime:t.cwrap(\"zip_file_set_mtime\",\"number\",[\"number\",...fl,\"number\",\"number\"]),setCompression:t.cwrap(\"zip_set_file_compression\",\"number\",[\"number\",...fl,\"number\",\"number\"])},ext:{countSymlinks:t.cwrap(\"zip_ext_count_symlinks\",\"number\",[\"number\"])},error:{initWithCode:t.cwrap(\"zip_error_init_with_code\",null,[\"number\",\"number\"]),strerror:t.cwrap(\"zip_error_strerror\",\"string\",[\"number\"])},name:{locate:t.cwrap(\"zip_name_locate\",\"number\",[\"number\",\"string\",\"number\"])},source:{fromUnattachedBuffer:t.cwrap(\"zip_source_buffer_create\",\"number\",[\"number\",\"number\",\"number\",\"number\"]),fromBuffer:t.cwrap(\"zip_source_buffer\",\"number\",[\"number\",\"number\",...fl,\"number\"]),free:t.cwrap(\"zip_source_free\",null,[\"number\"]),keep:t.cwrap(\"zip_source_keep\",null,[\"number\"]),open:t.cwrap(\"zip_source_open\",\"number\",[\"number\"]),close:t.cwrap(\"zip_source_close\",\"number\",[\"number\"]),seek:t.cwrap(\"zip_source_seek\",\"number\",[\"number\",...fl,\"number\"]),tell:t.cwrap(\"zip_source_tell\",\"number\",[\"number\"]),read:t.cwrap(\"zip_source_read\",\"number\",[\"number\",\"number\",\"number\"]),error:t.cwrap(\"zip_source_error\",\"number\",[\"number\"]),setMtime:t.cwrap(\"zip_source_set_mtime\",\"number\",[\"number\",\"number\"])},struct:{stat:t.cwrap(\"zipstruct_stat\",\"number\",[]),statS:t.cwrap(\"zipstruct_statS\",\"number\",[]),statName:t.cwrap(\"zipstruct_stat_name\",\"string\",[\"number\"]),statIndex:t.cwrap(\"zipstruct_stat_index\",\"number\",[\"number\"]),statSize:t.cwrap(\"zipstruct_stat_size\",\"number\",[\"number\"]),statCompSize:t.cwrap(\"zipstruct_stat_comp_size\",\"number\",[\"number\"]),statCompMethod:t.cwrap(\"zipstruct_stat_comp_method\",\"number\",[\"number\"]),statMtime:t.cwrap(\"zipstruct_stat_mtime\",\"number\",[\"number\"]),statCrc:t.cwrap(\"zipstruct_stat_crc\",\"number\",[\"number\"]),error:t.cwrap(\"zipstruct_error\",\"number\",[]),errorS:t.cwrap(\"zipstruct_errorS\",\"number\",[]),errorCodeZip:t.cwrap(\"zipstruct_error_code_zip\",\"number\",[\"number\"])}});var eD=null;function X_(){return eD===null&&(eD=__((0,V_.default)())),eD}async function fn(){return X_()}var Kd={};ft(Kd,{ShellError:()=>Ts,execute:()=>eB,globUtils:()=>Ww});var l5=ge(IS()),c5=ge(require(\"os\")),ns=ge(require(\"stream\")),u5=ge(require(\"util\"));var Ts=class extends Error{constructor(e){super(e);this.name=\"ShellError\"}};var Ww={};ft(Ww,{fastGlobOptions:()=>e5,isBraceExpansion:()=>t5,isGlobPattern:()=>FDe,match:()=>NDe,micromatchOptions:()=>_w});var Z_=ge(tw()),$_=ge(require(\"fs\")),zw=ge(ts()),_w={strictBrackets:!0},e5={onlyDirectories:!1,onlyFiles:!1};function FDe(t){if(!zw.default.scan(t,_w).isGlob)return!1;try{zw.default.parse(t,_w)}catch{return!1}return!0}function NDe(t,{cwd:e,baseFs:r}){return(0,Z_.default)(t,te(N({},e5),{cwd:j.fromPortablePath(e),fs:VE($_.default,new _h(r))}))}function t5(t){return zw.default.scan(t,_w).isBrace}var r5=ge(LQ()),ta=ge(require(\"stream\")),i5=ge(require(\"string_decoder\")),Fn;(function(i){i[i.STDIN=0]=\"STDIN\",i[i.STDOUT=1]=\"STDOUT\",i[i.STDERR=2]=\"STDERR\"})(Fn||(Fn={}));var qc=new Set;function tD(){}function rD(){for(let t of qc)t.kill()}function n5(t,e,r,i){return n=>{let s=n[0]instanceof ta.Transform?\"pipe\":n[0],o=n[1]instanceof ta.Transform?\"pipe\":n[1],a=n[2]instanceof ta.Transform?\"pipe\":n[2],l=(0,r5.default)(t,e,te(N({},i),{stdio:[s,o,a]}));return qc.add(l),qc.size===1&&(process.on(\"SIGINT\",tD),process.on(\"SIGTERM\",rD)),n[0]instanceof ta.Transform&&n[0].pipe(l.stdin),n[1]instanceof ta.Transform&&l.stdout.pipe(n[1],{end:!1}),n[2]instanceof ta.Transform&&l.stderr.pipe(n[2],{end:!1}),{stdin:l.stdin,promise:new Promise(c=>{l.on(\"error\",u=>{switch(qc.delete(l),qc.size===0&&(process.off(\"SIGINT\",tD),process.off(\"SIGTERM\",rD)),u.code){case\"ENOENT\":n[2].write(`command not found: ${t}\n`),c(127);break;case\"EACCES\":n[2].write(`permission denied: ${t}\n`),c(128);break;default:n[2].write(`uncaught error: ${u.message}\n`),c(1);break}}),l.on(\"exit\",u=>{qc.delete(l),qc.size===0&&(process.off(\"SIGINT\",tD),process.off(\"SIGTERM\",rD)),c(u!==null?u:129)})})}}}function s5(t){return e=>{let r=e[0]===\"pipe\"?new ta.PassThrough:e[0];return{stdin:r,promise:Promise.resolve().then(()=>t({stdin:r,stdout:e[1],stderr:e[2]}))}}}var po=class{constructor(e){this.stream=e}close(){}get(){return this.stream}},o5=class{constructor(){this.stream=null}close(){if(this.stream===null)throw new Error(\"Assertion failed: No stream attached\");this.stream.end()}attach(e){this.stream=e}get(){if(this.stream===null)throw new Error(\"Assertion failed: No stream attached\");return this.stream}},Ud=class{constructor(e,r){this.stdin=null;this.stdout=null;this.stderr=null;this.pipe=null;this.ancestor=e,this.implementation=r}static start(e,{stdin:r,stdout:i,stderr:n}){let s=new Ud(null,e);return s.stdin=r,s.stdout=i,s.stderr=n,s}pipeTo(e,r=1){let i=new Ud(this,e),n=new o5;return i.pipe=n,i.stdout=this.stdout,i.stderr=this.stderr,(r&1)==1?this.stdout=n:this.ancestor!==null&&(this.stderr=this.ancestor.stdout),(r&2)==2?this.stderr=n:this.ancestor!==null&&(this.stderr=this.ancestor.stderr),i}async exec(){let e=[\"ignore\",\"ignore\",\"ignore\"];if(this.pipe)e[0]=\"pipe\";else{if(this.stdin===null)throw new Error(\"Assertion failed: No input stream registered\");e[0]=this.stdin.get()}let r;if(this.stdout===null)throw new Error(\"Assertion failed: No output stream registered\");r=this.stdout,e[1]=r.get();let i;if(this.stderr===null)throw new Error(\"Assertion failed: No error stream registered\");i=this.stderr,e[2]=i.get();let n=this.implementation(e);return this.pipe&&this.pipe.attach(n.stdin),await n.promise.then(s=>(r.close(),i.close(),s))}async run(){let e=[];for(let i=this;i;i=i.ancestor)e.push(i.exec());return(await Promise.all(e))[0]}};function Vw(t,e){return Ud.start(t,e)}function a5(t,e=null){let r=new ta.PassThrough,i=new i5.StringDecoder,n=\"\";return r.on(\"data\",s=>{let o=i.write(s),a;do if(a=o.indexOf(`\n`),a!==-1){let l=n+o.substring(0,a);o=o.substring(a+1),n=\"\",t(e!==null?`${e} ${l}`:l)}while(a!==-1);n+=o}),r.on(\"end\",()=>{let s=i.end();s!==\"\"&&t(e!==null?`${e} ${s}`:s)}),r}function A5(t,{prefix:e}){return{stdout:a5(r=>t.stdout.write(`${r}\n`),t.stdout.isTTY?e:null),stderr:a5(r=>t.stderr.write(`${r}\n`),t.stderr.isTTY?e:null)}}var LDe=(0,u5.promisify)(setTimeout);var zi;(function(r){r[r.Readable=1]=\"Readable\",r[r.Writable=2]=\"Writable\"})(zi||(zi={}));function g5(t,e,r){let i=new ns.PassThrough({autoDestroy:!0});switch(t){case Fn.STDIN:(e&1)==1&&r.stdin.pipe(i,{end:!1}),(e&2)==2&&r.stdin instanceof ns.Writable&&i.pipe(r.stdin,{end:!1});break;case Fn.STDOUT:(e&1)==1&&r.stdout.pipe(i,{end:!1}),(e&2)==2&&i.pipe(r.stdout,{end:!1});break;case Fn.STDERR:(e&1)==1&&r.stderr.pipe(i,{end:!1}),(e&2)==2&&i.pipe(r.stderr,{end:!1});break;default:throw new Ts(`Bad file descriptor: \"${t}\"`)}return i}function Xw(t,e={}){let r=N(N({},t),e);return r.environment=N(N({},t.environment),e.environment),r.variables=N(N({},t.variables),e.variables),r}var TDe=new Map([[\"cd\",async([t=(0,c5.homedir)(),...e],r,i)=>{let n=k.resolve(i.cwd,j.toPortablePath(t));if(!(await r.baseFs.statPromise(n).catch(o=>{throw o.code===\"ENOENT\"?new Ts(`cd: no such file or directory: ${t}`):o})).isDirectory())throw new Ts(`cd: not a directory: ${t}`);return i.cwd=n,0}],[\"pwd\",async(t,e,r)=>(r.stdout.write(`${j.fromPortablePath(r.cwd)}\n`),0)],[\":\",async(t,e,r)=>0],[\"true\",async(t,e,r)=>0],[\"false\",async(t,e,r)=>1],[\"exit\",async([t,...e],r,i)=>i.exitCode=parseInt(t!=null?t:i.variables[\"?\"],10)],[\"echo\",async(t,e,r)=>(r.stdout.write(`${t.join(\" \")}\n`),0)],[\"sleep\",async([t],e,r)=>{if(typeof t==\"undefined\")throw new Ts(\"sleep: missing operand\");let i=Number(t);if(Number.isNaN(i))throw new Ts(`sleep: invalid time interval '${t}'`);return await LDe(1e3*i,0)}],[\"__ysh_run_procedure\",async(t,e,r)=>{let i=r.procedures[t[0]];return await Vw(i,{stdin:new po(r.stdin),stdout:new po(r.stdout),stderr:new po(r.stderr)}).run()}],[\"__ysh_set_redirects\",async(t,e,r)=>{let i=r.stdin,n=r.stdout,s=r.stderr,o=[],a=[],l=[],c=0;for(;t[c]!==\"--\";){let g=t[c++],{type:f,fd:h}=JSON.parse(g),p=S=>{switch(h){case null:case 0:o.push(S);break;default:throw new Error(`Unsupported file descriptor: \"${h}\"`)}},m=S=>{switch(h){case null:case 1:a.push(S);break;case 2:l.push(S);break;default:throw new Error(`Unsupported file descriptor: \"${h}\"`)}},y=Number(t[c++]),Q=c+y;for(let S=c;S<Q;++c,++S)switch(f){case\"<\":p(()=>e.baseFs.createReadStream(k.resolve(r.cwd,j.toPortablePath(t[S]))));break;case\"<<<\":p(()=>{let x=new ns.PassThrough;return process.nextTick(()=>{x.write(`${t[S]}\n`),x.end()}),x});break;case\"<&\":p(()=>g5(Number(t[S]),1,r));break;case\">\":case\">>\":{let x=k.resolve(r.cwd,j.toPortablePath(t[S]));m(x===\"/dev/null\"?new ns.Writable({autoDestroy:!0,emitClose:!0,write(M,Y,U){setImmediate(U)}}):e.baseFs.createWriteStream(x,f===\">>\"?{flags:\"a\"}:void 0))}break;case\">&\":m(g5(Number(t[S]),2,r));break;default:throw new Error(`Assertion failed: Unsupported redirection type: \"${f}\"`)}}if(o.length>0){let g=new ns.PassThrough;i=g;let f=h=>{if(h===o.length)g.end();else{let p=o[h]();p.pipe(g,{end:!1}),p.on(\"end\",()=>{f(h+1)})}};f(0)}if(a.length>0){let g=new ns.PassThrough;n=g;for(let f of a)g.pipe(f)}if(l.length>0){let g=new ns.PassThrough;s=g;for(let f of l)g.pipe(f)}let u=await Vw(Hd(t.slice(c+1),e,r),{stdin:new po(i),stdout:new po(n),stderr:new po(s)}).run();return await Promise.all(a.map(g=>new Promise((f,h)=>{g.on(\"error\",p=>{h(p)}),g.on(\"close\",()=>{f()}),g.end()}))),await Promise.all(l.map(g=>new Promise((f,h)=>{g.on(\"error\",p=>{h(p)}),g.on(\"close\",()=>{f()}),g.end()}))),u}]]);async function ODe(t,e,r){let i=[],n=new ns.PassThrough;return n.on(\"data\",s=>i.push(s)),await Zw(t,e,Xw(r,{stdout:n})),Buffer.concat(i).toString().replace(/[\\r\\n]+$/,\"\")}async function f5(t,e,r){let i=t.map(async s=>{let o=await iA(s.args,e,r);return{name:s.name,value:o.join(\" \")}});return(await Promise.all(i)).reduce((s,o)=>(s[o.name]=o.value,s),{})}function $w(t){return t.match(/[^ \\r\\n\\t]+/g)||[]}async function h5(t,e,r,i,n=i){switch(t.name){case\"$\":i(String(process.pid));break;case\"#\":i(String(e.args.length));break;case\"@\":if(t.quoted)for(let s of e.args)n(s);else for(let s of e.args){let o=$w(s);for(let a=0;a<o.length-1;++a)n(o[a]);i(o[o.length-1])}break;case\"*\":{let s=e.args.join(\" \");if(t.quoted)i(s);else for(let o of $w(s))n(o)}break;case\"PPID\":i(String(process.ppid));break;case\"RANDOM\":i(String(Math.floor(Math.random()*32768)));break;default:{let s=parseInt(t.name,10),o;if(Number.isFinite(s))if(s>=0&&s<e.args.length)o=e.args[s];else if(t.defaultValue)o=(await iA(t.defaultValue,e,r)).join(\" \");else if(t.alternativeValue)o=(await iA(t.alternativeValue,e,r)).join(\" \");else throw new Ts(`Unbound argument #${s}`);else if(Object.prototype.hasOwnProperty.call(r.variables,t.name))o=r.variables[t.name];else if(Object.prototype.hasOwnProperty.call(r.environment,t.name))o=r.environment[t.name];else if(t.defaultValue)o=(await iA(t.defaultValue,e,r)).join(\" \");else throw new Ts(`Unbound variable \"${t.name}\"`);if(typeof o!=\"undefined\"&&t.alternativeValue&&(o=(await iA(t.alternativeValue,e,r)).join(\" \")),t.quoted)i(o);else{let a=$w(o);for(let c=0;c<a.length-1;++c)n(a[c]);let l=a[a.length-1];typeof l!=\"undefined\"&&i(l)}}break}}var MDe={addition:(t,e)=>t+e,subtraction:(t,e)=>t-e,multiplication:(t,e)=>t*e,division:(t,e)=>Math.trunc(t/e)};async function jd(t,e,r){if(t.type===\"number\"){if(Number.isInteger(t.value))return t.value;throw new Error(`Invalid number: \"${t.value}\", only integers are allowed`)}else if(t.type===\"variable\"){let i=[];await h5(te(N({},t),{quoted:!0}),e,r,s=>i.push(s));let n=Number(i.join(\" \"));return Number.isNaN(n)?jd({type:\"variable\",name:i.join(\" \")},e,r):jd({type:\"number\",value:n},e,r)}else return MDe[t.type](await jd(t.left,e,r),await jd(t.right,e,r))}async function iA(t,e,r){let i=new Map,n=[],s=[],o=u=>{s.push(u)},a=()=>{s.length>0&&n.push(s.join(\"\")),s=[]},l=u=>{o(u),a()},c=(u,g,f)=>{let h=JSON.stringify({type:u,fd:g}),p=i.get(h);typeof p==\"undefined\"&&i.set(h,p=[]),p.push(f)};for(let u of t){let g=!1;switch(u.type){case\"redirection\":{let f=await iA(u.args,e,r);for(let h of f)c(u.subtype,u.fd,h)}break;case\"argument\":for(let f of u.segments)switch(f.type){case\"text\":o(f.text);break;case\"glob\":o(f.pattern),g=!0;break;case\"shell\":{let h=await ODe(f.shell,e,r);if(f.quoted)o(h);else{let p=$w(h);for(let m=0;m<p.length-1;++m)l(p[m]);o(p[p.length-1])}}break;case\"variable\":await h5(f,e,r,o,l);break;case\"arithmetic\":o(String(await jd(f.arithmetic,e,r)));break}break}if(a(),g){let f=n.pop();if(typeof f==\"undefined\")throw new Error(\"Assertion failed: Expected a glob pattern to have been set\");let h=await e.glob.match(f,{cwd:r.cwd,baseFs:e.baseFs});if(h.length===0){let p=t5(f)?\". Note: Brace expansion of arbitrary strings isn't currently supported. For more details, please read this issue: https://github.com/yarnpkg/berry/issues/22\":\"\";throw new Ts(`No matches found: \"${f}\"${p}`)}for(let p of h.sort())l(p)}}if(i.size>0){let u=[];for(let[g,f]of i.entries())u.splice(u.length,0,g,String(f.length),...f);n.splice(0,0,\"__ysh_set_redirects\",...u,\"--\")}return n}function Hd(t,e,r){e.builtins.has(t[0])||(t=[\"command\",...t]);let i=j.fromPortablePath(r.cwd),n=r.environment;typeof n.PWD!=\"undefined\"&&(n=te(N({},n),{PWD:i}));let[s,...o]=t;if(s===\"command\")return n5(o[0],o.slice(1),e,{cwd:i,env:n});let a=e.builtins.get(s);if(typeof a==\"undefined\")throw new Error(`Assertion failed: A builtin should exist for \"${s}\"`);return s5(async({stdin:l,stdout:c,stderr:u})=>{let{stdin:g,stdout:f,stderr:h}=r;r.stdin=l,r.stdout=c,r.stderr=u;try{return await a(o,e,r)}finally{r.stdin=g,r.stdout=f,r.stderr=h}})}function UDe(t,e,r){return i=>{let n=new ns.PassThrough,s=Zw(t,e,Xw(r,{stdin:n}));return{stdin:n,promise:s}}}function KDe(t,e,r){return i=>{let n=new ns.PassThrough,s=Zw(t,e,r);return{stdin:n,promise:s}}}function p5(t,e,r,i){if(e.length===0)return t;{let n;do n=String(Math.random());while(Object.prototype.hasOwnProperty.call(i.procedures,n));return i.procedures=N({},i.procedures),i.procedures[n]=t,Hd([...e,\"__ysh_run_procedure\",n],r,i)}}async function d5(t,e,r){let i=t,n=null,s=null;for(;i;){let o=i.then?N({},r):r,a;switch(i.type){case\"command\":{let l=await iA(i.args,e,r),c=await f5(i.envs,e,r);a=i.envs.length?Hd(l,e,Xw(o,{environment:c})):Hd(l,e,o)}break;case\"subshell\":{let l=await iA(i.args,e,r),c=UDe(i.subshell,e,o);a=p5(c,l,e,o)}break;case\"group\":{let l=await iA(i.args,e,r),c=KDe(i.group,e,o);a=p5(c,l,e,o)}break;case\"envs\":{let l=await f5(i.envs,e,r);o.environment=N(N({},o.environment),l),a=Hd([\"true\"],e,o)}break}if(typeof a==\"undefined\")throw new Error(\"Assertion failed: An action should have been generated\");if(n===null)s=Vw(a,{stdin:new po(o.stdin),stdout:new po(o.stdout),stderr:new po(o.stderr)});else{if(s===null)throw new Error(\"Assertion failed: The execution pipeline should have been setup\");switch(n){case\"|\":s=s.pipeTo(a,Fn.STDOUT);break;case\"|&\":s=s.pipeTo(a,Fn.STDOUT|Fn.STDERR);break}}i.then?(n=i.then.type,i=i.then.chain):i=null}if(s===null)throw new Error(\"Assertion failed: The execution pipeline should have been setup\");return await s.run()}async function HDe(t,e,r,{background:i=!1}={}){function n(s){let o=[\"#2E86AB\",\"#A23B72\",\"#F18F01\",\"#C73E1D\",\"#CCE2A3\"],a=o[s%o.length];return l5.default.hex(a)}if(i){let s=r.nextBackgroundJobIndex++,o=n(s),a=`[${s}]`,l=o(a),{stdout:c,stderr:u}=A5(r,{prefix:l});return r.backgroundJobs.push(d5(t,e,Xw(r,{stdout:c,stderr:u})).catch(g=>u.write(`${g.message}\n`)).finally(()=>{r.stdout.isTTY&&r.stdout.write(`Job ${l}, '${o(_u(t))}' has ended\n`)})),0}return await d5(t,e,r)}async function jDe(t,e,r,{background:i=!1}={}){let n,s=a=>{n=a,r.variables[\"?\"]=String(a)},o=async a=>{try{return await HDe(a.chain,e,r,{background:i&&typeof a.then==\"undefined\"})}catch(l){if(!(l instanceof Ts))throw l;return r.stderr.write(`${l.message}\n`),1}};for(s(await o(t));t.then;){if(r.exitCode!==null)return r.exitCode;switch(t.then.type){case\"&&\":n===0&&s(await o(t.then.line));break;case\"||\":n!==0&&s(await o(t.then.line));break;default:throw new Error(`Assertion failed: Unsupported command type: \"${t.then.type}\"`)}t=t.then.line}return n}async function Zw(t,e,r){let i=r.backgroundJobs;r.backgroundJobs=[];let n=0;for(let{command:s,type:o}of t){if(n=await jDe(s,e,r,{background:o===\"&\"}),r.exitCode!==null)return r.exitCode;r.variables[\"?\"]=String(n)}return await Promise.all(r.backgroundJobs),r.backgroundJobs=i,n}function C5(t){switch(t.type){case\"variable\":return t.name===\"@\"||t.name===\"#\"||t.name===\"*\"||Number.isFinite(parseInt(t.name,10))||\"defaultValue\"in t&&!!t.defaultValue&&t.defaultValue.some(e=>Gd(e))||\"alternativeValue\"in t&&!!t.alternativeValue&&t.alternativeValue.some(e=>Gd(e));case\"arithmetic\":return iD(t.arithmetic);case\"shell\":return nD(t.shell);default:return!1}}function Gd(t){switch(t.type){case\"redirection\":return t.args.some(e=>Gd(e));case\"argument\":return t.segments.some(e=>C5(e));default:throw new Error(`Assertion failed: Unsupported argument type: \"${t.type}\"`)}}function iD(t){switch(t.type){case\"variable\":return C5(t);case\"number\":return!1;default:return iD(t.left)||iD(t.right)}}function nD(t){return t.some(({command:e})=>{for(;e;){let r=e.chain;for(;r;){let i;switch(r.type){case\"subshell\":i=nD(r.subshell);break;case\"command\":i=r.envs.some(n=>n.args.some(s=>Gd(s)))||r.args.some(n=>Gd(n));break}if(i)return!0;if(!r.then)break;r=r.then.chain}if(!e.then)break;e=e.then.line}return!1})}async function eB(t,e=[],{baseFs:r=new ar,builtins:i={},cwd:n=j.toPortablePath(process.cwd()),env:s=process.env,stdin:o=process.stdin,stdout:a=process.stdout,stderr:l=process.stderr,variables:c={},glob:u=Ww}={}){let g={};for(let[p,m]of Object.entries(s))typeof m!=\"undefined\"&&(g[p]=m);let f=new Map(TDe);for(let[p,m]of Object.entries(i))f.set(p,m);o===null&&(o=new ns.PassThrough,o.end());let h=ZE(t,u);if(!nD(h)&&h.length>0&&e.length>0){let{command:p}=h[h.length-1];for(;p.then;)p=p.then.line;let m=p.chain;for(;m.then;)m=m.then.chain;m.type===\"command\"&&(m.args=m.args.concat(e.map(y=>({type:\"argument\",segments:[{type:\"text\",text:y}]}))))}return await Zw(h,{args:e,baseFs:r,builtins:f,initialStdin:o,initialStdout:a,initialStderr:l,glob:u},{cwd:n,environment:g,exitCode:null,procedures:{},stdin:o,stdout:a,stderr:l,variables:Object.assign({},c,{[\"?\"]:0}),nextBackgroundJobIndex:1,backgroundJobs:[]})}var i6=ge(tB()),n6=ge(ag()),Jc=ge(require(\"stream\"));var Z5=ge(X5()),nB=ge(hc());var $5=[\"\\u280B\",\"\\u2819\",\"\\u2839\",\"\\u2838\",\"\\u283C\",\"\\u2834\",\"\\u2826\",\"\\u2827\",\"\\u2807\",\"\\u280F\"],e6=80,KRe=new Set([$.FETCH_NOT_CACHED,$.UNUSED_CACHE_ENTRY]),HRe=5,sB=nB.default.GITHUB_ACTIONS?{start:t=>`::group::${t}\n`,end:t=>`::endgroup::\n`}:nB.default.TRAVIS?{start:t=>`travis_fold:start:${t}\n`,end:t=>`travis_fold:end:${t}\n`}:nB.default.GITLAB?{start:t=>`section_start:${Math.floor(Date.now()/1e3)}:${t.toLowerCase().replace(/\\W+/g,\"_\")}[collapsed=true]\\r\u001b[0K${t}\n`,end:t=>`section_end:${Math.floor(Date.now()/1e3)}:${t.toLowerCase().replace(/\\W+/g,\"_\")}\\r\u001b[0K`}:null,t6=new Date,jRe=[\"iTerm.app\",\"Apple_Terminal\"].includes(process.env.TERM_PROGRAM)||!!process.env.WT_SESSION,GRe=t=>t,oB=GRe({patrick:{date:[17,3],chars:[\"\\u{1F340}\",\"\\u{1F331}\"],size:40},simba:{date:[19,7],chars:[\"\\u{1F981}\",\"\\u{1F334}\"],size:40},jack:{date:[31,10],chars:[\"\\u{1F383}\",\"\\u{1F987}\"],size:40},hogsfather:{date:[31,12],chars:[\"\\u{1F389}\",\"\\u{1F384}\"],size:40},default:{chars:[\"=\",\"-\"],size:80}}),YRe=jRe&&Object.keys(oB).find(t=>{let e=oB[t];return!(e.date&&(e.date[0]!==t6.getDate()||e.date[1]!==t6.getMonth()+1))})||\"default\";function r6(t,{configuration:e,json:r}){if(!e.get(\"enableMessageNames\"))return\"\";let n=YA(t===null?0:t);return!r&&t===null?et(e,n,\"grey\"):n}function lD(t,{configuration:e,json:r}){let i=r6(t,{configuration:e,json:r});if(!i||t===null||t===$.UNNAMED)return i;let n=$[t],s=`https://yarnpkg.com/advanced/error-codes#${i}---${n}`.toLowerCase();return Fg(e,i,s)}var Je=class extends Ji{constructor({configuration:e,stdout:r,json:i=!1,includeFooter:n=!0,includeLogs:s=!i,includeInfos:o=s,includeWarnings:a=s,forgettableBufferSize:l=HRe,forgettableNames:c=new Set}){super();this.uncommitted=new Set;this.cacheHitCount=0;this.cacheMissCount=0;this.lastCacheMiss=null;this.warningCount=0;this.errorCount=0;this.startTime=Date.now();this.indent=0;this.progress=new Map;this.progressTime=0;this.progressFrame=0;this.progressTimeout=null;this.progressStyle=null;this.progressMaxScaledSize=null;this.forgettableLines=[];if(nd(this,{configuration:e}),this.configuration=e,this.forgettableBufferSize=l,this.forgettableNames=new Set([...c,...KRe]),this.includeFooter=n,this.includeInfos=o,this.includeWarnings=a,this.json=i,this.stdout=r,e.get(\"enableProgressBars\")&&!i&&r.isTTY&&r.columns>22){let u=e.get(\"progressBarStyle\")||YRe;if(!Object.prototype.hasOwnProperty.call(oB,u))throw new Error(\"Assertion failed: Invalid progress bar style\");this.progressStyle=oB[u];let g=\"\\u27A4 YN0000: \\u250C \".length,f=Math.max(0,Math.min(r.columns-g,80));this.progressMaxScaledSize=Math.floor(this.progressStyle.size*f/80)}}static async start(e,r){let i=new this(e),n=process.emitWarning;process.emitWarning=(s,o)=>{if(typeof s!=\"string\"){let l=s;s=l.message,o=o!=null?o:l.name}let a=typeof o!=\"undefined\"?`${o}: ${s}`:s;i.reportWarning($.UNNAMED,a)};try{await r(i)}catch(s){i.reportExceptionOnce(s)}finally{await i.finalize(),process.emitWarning=n}return i}hasErrors(){return this.errorCount>0}exitCode(){return this.hasErrors()?1:0}reportCacheHit(e){this.cacheHitCount+=1}reportCacheMiss(e,r){this.lastCacheMiss=e,this.cacheMissCount+=1,typeof r!=\"undefined\"&&!this.configuration.get(\"preferAggregateCacheInfo\")&&this.reportInfo($.FETCH_NOT_CACHED,r)}startSectionSync({reportHeader:e,reportFooter:r,skipIfEmpty:i},n){let s={committed:!1,action:()=>{e==null||e()}};i?this.uncommitted.add(s):(s.action(),s.committed=!0);let o=Date.now();try{return n()}catch(a){throw this.reportExceptionOnce(a),a}finally{let a=Date.now();this.uncommitted.delete(s),s.committed&&(r==null||r(a-o))}}async startSectionPromise({reportHeader:e,reportFooter:r,skipIfEmpty:i},n){let s={committed:!1,action:()=>{e==null||e()}};i?this.uncommitted.add(s):(s.action(),s.committed=!0);let o=Date.now();try{return await n()}catch(a){throw this.reportExceptionOnce(a),a}finally{let a=Date.now();this.uncommitted.delete(s),s.committed&&(r==null||r(a-o))}}startTimerImpl(e,r,i){let n=typeof r==\"function\"?{}:r;return{cb:typeof r==\"function\"?r:i,reportHeader:()=>{this.reportInfo(null,`\\u250C ${e}`),this.indent+=1,sB!==null&&!this.json&&this.includeInfos&&this.stdout.write(sB.start(e))},reportFooter:o=>{this.indent-=1,sB!==null&&!this.json&&this.includeInfos&&this.stdout.write(sB.end(e)),this.configuration.get(\"enableTimers\")&&o>200?this.reportInfo(null,`\\u2514 Completed in ${et(this.configuration,o,Ge.DURATION)}`):this.reportInfo(null,\"\\u2514 Completed\")},skipIfEmpty:n.skipIfEmpty}}startTimerSync(e,r,i){let o=this.startTimerImpl(e,r,i),{cb:n}=o,s=Tr(o,[\"cb\"]);return this.startSectionSync(s,n)}async startTimerPromise(e,r,i){let o=this.startTimerImpl(e,r,i),{cb:n}=o,s=Tr(o,[\"cb\"]);return this.startSectionPromise(s,n)}async startCacheReport(e){let r=this.configuration.get(\"preferAggregateCacheInfo\")?{cacheHitCount:this.cacheHitCount,cacheMissCount:this.cacheMissCount}:null;try{return await e()}catch(i){throw this.reportExceptionOnce(i),i}finally{r!==null&&this.reportCacheChanges(r)}}reportSeparator(){this.indent===0?this.writeLineWithForgettableReset(\"\"):this.reportInfo(null,\"\")}reportInfo(e,r){if(!this.includeInfos)return;this.commit();let i=this.formatNameWithHyperlink(e),n=i?`${i}: `:\"\",s=`${et(this.configuration,\"\\u27A4\",\"blueBright\")} ${n}${this.formatIndent()}${r}`;if(this.json)this.reportJson({type:\"info\",name:e,displayName:this.formatName(e),indent:this.formatIndent(),data:r});else if(this.forgettableNames.has(e))if(this.forgettableLines.push(s),this.forgettableLines.length>this.forgettableBufferSize){for(;this.forgettableLines.length>this.forgettableBufferSize;)this.forgettableLines.shift();this.writeLines(this.forgettableLines,{truncate:!0})}else this.writeLine(s,{truncate:!0});else this.writeLineWithForgettableReset(s)}reportWarning(e,r){if(this.warningCount+=1,!this.includeWarnings)return;this.commit();let i=this.formatNameWithHyperlink(e),n=i?`${i}: `:\"\";this.json?this.reportJson({type:\"warning\",name:e,displayName:this.formatName(e),indent:this.formatIndent(),data:r}):this.writeLineWithForgettableReset(`${et(this.configuration,\"\\u27A4\",\"yellowBright\")} ${n}${this.formatIndent()}${r}`)}reportError(e,r){this.errorCount+=1,this.commit();let i=this.formatNameWithHyperlink(e),n=i?`${i}: `:\"\";this.json?this.reportJson({type:\"error\",name:e,displayName:this.formatName(e),indent:this.formatIndent(),data:r}):this.writeLineWithForgettableReset(`${et(this.configuration,\"\\u27A4\",\"redBright\")} ${n}${this.formatIndent()}${r}`,{truncate:!1})}reportProgress(e){if(this.progressStyle===null)return te(N({},Promise.resolve()),{stop:()=>{}});if(e.hasProgress&&e.hasTitle)throw new Error(\"Unimplemented: Progress bars can't have both progress and titles.\");let r=!1,i=Promise.resolve().then(async()=>{let s={progress:e.hasProgress?0:void 0,title:e.hasTitle?\"\":void 0};this.progress.set(e,{definition:s,lastScaledSize:e.hasProgress?-1:void 0,lastTitle:void 0}),this.refreshProgress({delta:-1});for await(let{progress:o,title:a}of e)r||s.progress===o&&s.title===a||(s.progress=o,s.title=a,this.refreshProgress());n()}),n=()=>{r||(r=!0,this.progress.delete(e),this.refreshProgress({delta:1}))};return te(N({},i),{stop:n})}reportJson(e){this.json&&this.writeLineWithForgettableReset(`${JSON.stringify(e)}`)}async finalize(){if(!this.includeFooter)return;let e=\"\";this.errorCount>0?e=\"Failed with errors\":this.warningCount>0?e=\"Done with warnings\":e=\"Done\";let r=et(this.configuration,Date.now()-this.startTime,Ge.DURATION),i=this.configuration.get(\"enableTimers\")?`${e} in ${r}`:e;this.errorCount>0?this.reportError($.UNNAMED,i):this.warningCount>0?this.reportWarning($.UNNAMED,i):this.reportInfo($.UNNAMED,i)}writeLine(e,{truncate:r}={}){this.clearProgress({clear:!0}),this.stdout.write(`${this.truncate(e,{truncate:r})}\n`),this.writeProgress()}writeLineWithForgettableReset(e,{truncate:r}={}){this.forgettableLines=[],this.writeLine(e,{truncate:r})}writeLines(e,{truncate:r}={}){this.clearProgress({delta:e.length});for(let i of e)this.stdout.write(`${this.truncate(i,{truncate:r})}\n`);this.writeProgress()}reportCacheChanges({cacheHitCount:e,cacheMissCount:r}){let i=this.cacheHitCount-e,n=this.cacheMissCount-r;if(i===0&&n===0)return;let s=\"\";this.cacheHitCount>1?s+=`${this.cacheHitCount} packages were already cached`:this.cacheHitCount===1?s+=\" - one package was already cached\":s+=\"No packages were cached\",this.cacheHitCount>0?this.cacheMissCount>1?s+=`, ${this.cacheMissCount} had to be fetched`:this.cacheMissCount===1&&(s+=`, one had to be fetched (${Bt(this.configuration,this.lastCacheMiss)})`):this.cacheMissCount>1?s+=` - ${this.cacheMissCount} packages had to be fetched`:this.cacheMissCount===1&&(s+=` - one package had to be fetched (${Bt(this.configuration,this.lastCacheMiss)})`),this.reportInfo($.FETCH_NOT_CACHED,s)}commit(){let e=this.uncommitted;this.uncommitted=new Set;for(let r of e)r.committed=!0,r.action()}clearProgress({delta:e=0,clear:r=!1}){this.progressStyle!==null&&this.progress.size+e>0&&(this.stdout.write(`\u001b[${this.progress.size+e}A`),(e>0||r)&&this.stdout.write(\"\u001b[0J\"))}writeProgress(){if(this.progressStyle===null||(this.progressTimeout!==null&&clearTimeout(this.progressTimeout),this.progressTimeout=null,this.progress.size===0))return;let e=Date.now();e-this.progressTime>e6&&(this.progressFrame=(this.progressFrame+1)%$5.length,this.progressTime=e);let r=$5[this.progressFrame];for(let i of this.progress.values()){let n=\"\";if(typeof i.lastScaledSize!=\"undefined\"){let l=this.progressStyle.chars[0].repeat(i.lastScaledSize),c=this.progressStyle.chars[1].repeat(this.progressMaxScaledSize-i.lastScaledSize);n=` ${l}${c}`}let s=this.formatName(null),o=s?`${s}: `:\"\",a=i.definition.title?` ${i.definition.title}`:\"\";this.stdout.write(`${et(this.configuration,\"\\u27A4\",\"blueBright\")} ${o}${r}${n}${a}\n`)}this.progressTimeout=setTimeout(()=>{this.refreshProgress({force:!0})},e6)}refreshProgress({delta:e=0,force:r=!1}={}){let i=!1,n=!1;if(r||this.progress.size===0)i=!0;else for(let s of this.progress.values()){let o=typeof s.definition.progress!=\"undefined\"?Math.trunc(this.progressMaxScaledSize*s.definition.progress):void 0,a=s.lastScaledSize;s.lastScaledSize=o;let l=s.lastTitle;if(s.lastTitle=s.definition.title,o!==a||(n=l!==s.definition.title)){i=!0;break}}i&&(this.clearProgress({delta:e,clear:n}),this.writeProgress())}truncate(e,{truncate:r}={}){return this.progressStyle===null&&(r=!1),typeof r==\"undefined\"&&(r=this.configuration.get(\"preferTruncatedLines\")),r&&(e=(0,Z5.default)(e,0,this.stdout.columns-1)),e}formatName(e){return r6(e,{configuration:this.configuration,json:this.json})}formatNameWithHyperlink(e){return lD(e,{configuration:this.configuration,json:this.json})}formatIndent(){return\"\\u2502 \".repeat(this.indent)}};var Ur=\"3.2.0\";var hn;(function(n){n.Yarn1=\"Yarn Classic\",n.Yarn2=\"Yarn\",n.Npm=\"npm\",n.Pnpm=\"pnpm\"})(hn||(hn={}));async function nA(t,e,r,i=[]){if(process.platform===\"win32\"){let n=`@goto #_undefined_# 2>NUL || @title %COMSPEC% & @setlocal & @\"${r}\" ${i.map(s=>`\"${s.replace('\"','\"\"')}\"`).join(\" \")} %*`;await K.writeFilePromise(k.format({dir:t,name:e,ext:\".cmd\"}),n)}await K.writeFilePromise(k.join(t,e),`#!/bin/sh\nexec \"${r}\" ${i.map(n=>`'${n.replace(/'/g,`'\"'\"'`)}'`).join(\" \")} \"$@\"\n`,{mode:493})}async function s6(t){let e=await At.tryFind(t);if(e==null?void 0:e.packageManager){let i=gw(e.packageManager);if(i==null?void 0:i.name){let n=`found ${JSON.stringify({packageManager:e.packageManager})} in manifest`,[s]=i.reference.split(\".\");switch(i.name){case\"yarn\":return{packageManager:Number(s)===1?hn.Yarn1:hn.Yarn2,reason:n};case\"npm\":return{packageManager:hn.Npm,reason:n};case\"pnpm\":return{packageManager:hn.Pnpm,reason:n}}}}let r;try{r=await K.readFilePromise(k.join(t,Pt.lockfile),\"utf8\")}catch{}return r!==void 0?r.match(/^__metadata:$/m)?{packageManager:hn.Yarn2,reason:'\"__metadata\" key found in yarn.lock'}:{packageManager:hn.Yarn1,reason:'\"__metadata\" key not found in yarn.lock, must be a Yarn classic lockfile'}:K.existsSync(k.join(t,\"package-lock.json\"))?{packageManager:hn.Npm,reason:`found npm's \"package-lock.json\" lockfile`}:K.existsSync(k.join(t,\"pnpm-lock.yaml\"))?{packageManager:hn.Pnpm,reason:`found pnpm's \"pnpm-lock.yaml\" lockfile`}:null}async function Yd({project:t,locator:e,binFolder:r,lifecycleScript:i}){var l,c;let n={};for(let[u,g]of Object.entries(process.env))typeof g!=\"undefined\"&&(n[u.toLowerCase()!==\"path\"?u:\"PATH\"]=g);let s=j.fromPortablePath(r);n.BERRY_BIN_FOLDER=j.fromPortablePath(s);let o=process.env.COREPACK_ROOT?j.join(process.env.COREPACK_ROOT,\"dist/yarn.js\"):process.argv[1];if(await Promise.all([nA(r,\"node\",process.execPath),...Ur!==null?[nA(r,\"run\",process.execPath,[o,\"run\"]),nA(r,\"yarn\",process.execPath,[o]),nA(r,\"yarnpkg\",process.execPath,[o]),nA(r,\"node-gyp\",process.execPath,[o,\"run\",\"--top-level\",\"node-gyp\"])]:[]]),t&&(n.INIT_CWD=j.fromPortablePath(t.configuration.startingCwd),n.PROJECT_CWD=j.fromPortablePath(t.cwd)),n.PATH=n.PATH?`${s}${j.delimiter}${n.PATH}`:`${s}`,n.npm_execpath=`${s}${j.sep}yarn`,n.npm_node_execpath=`${s}${j.sep}node`,e){if(!t)throw new Error(\"Assertion failed: Missing project\");let u=t.tryWorkspaceByLocator(e),g=u?(l=u.manifest.version)!=null?l:\"\":(c=t.storedPackages.get(e.locatorHash).version)!=null?c:\"\";n.npm_package_name=Ot(e),n.npm_package_version=g}let a=Ur!==null?`yarn/${Ur}`:`yarn/${Rg(\"@yarnpkg/core\").version}-core`;return n.npm_config_user_agent=`${a} npm/? node/${process.version} ${process.platform} ${process.arch}`,i&&(n.npm_lifecycle_event=i),t&&await t.configuration.triggerHook(u=>u.setupScriptEnvironment,t,n,async(u,g,f)=>await nA(r,qr(u),g,f)),n}var qRe=2,JRe=(0,n6.default)(qRe);async function WRe(t,e,{configuration:r,report:i,workspace:n=null,locator:s=null}){await JRe(async()=>{await K.mktempPromise(async o=>{let a=k.join(o,\"pack.log\"),l=null,{stdout:c,stderr:u}=r.getSubprocessStreams(a,{prefix:j.fromPortablePath(t),report:i}),g=s&&Xo(s)?gd(s):s,f=g?Ps(g):\"an external project\";c.write(`Packing ${f} from sources\n`);let h=await s6(t),p;h!==null?(c.write(`Using ${h.packageManager} for bootstrap. Reason: ${h.reason}\n\n`),p=h.packageManager):(c.write(`No package manager configuration detected; defaulting to Yarn\n\n`),p=hn.Yarn2),await K.mktempPromise(async m=>{let y=await Yd({binFolder:m}),S=new Map([[hn.Yarn1,async()=>{let M=n!==null?[\"workspace\",n]:[],Y=await $o(\"yarn\",[\"set\",\"version\",\"classic\",\"--only-if-needed\"],{cwd:t,env:y,stdin:l,stdout:c,stderr:u,end:is.ErrorCode});if(Y.code!==0)return Y.code;await K.appendFilePromise(k.join(t,\".npmignore\"),`/.yarn\n`),c.write(`\n`);let U=await $o(\"yarn\",[\"install\"],{cwd:t,env:y,stdin:l,stdout:c,stderr:u,end:is.ErrorCode});if(U.code!==0)return U.code;c.write(`\n`);let J=await $o(\"yarn\",[...M,\"pack\",\"--filename\",j.fromPortablePath(e)],{cwd:t,env:y,stdin:l,stdout:c,stderr:u});return J.code!==0?J.code:0}],[hn.Yarn2,async()=>{let M=n!==null?[\"workspace\",n]:[];y.YARN_ENABLE_INLINE_BUILDS=\"1\";let Y=k.join(t,Pt.lockfile);await K.existsPromise(Y)||await K.writeFilePromise(Y,\"\");let U=await $o(\"yarn\",[...M,\"pack\",\"--install-if-needed\",\"--filename\",j.fromPortablePath(e)],{cwd:t,env:y,stdin:l,stdout:c,stderr:u});return U.code!==0?U.code:0}],[hn.Npm,async()=>{if(n!==null){let A=new Jc.PassThrough,ne=Dg(A);A.pipe(c,{end:!1});let le=await $o(\"npm\",[\"--version\"],{cwd:t,env:y,stdin:l,stdout:A,stderr:u,end:is.Never});if(A.end(),le.code!==0)return c.end(),u.end(),le.code;let Ae=(await ne).toString().trim();if(!Uc(Ae,\">=7.x\")){let T=Vo(null,\"npm\"),L=rr(T,Ae),Ee=rr(T,\">=7.x\");throw new Error(`Workspaces aren't supported by ${sr(r,L)}; please upgrade to ${sr(r,Ee)} (npm has been detected as the primary package manager for ${et(r,t,Ge.PATH)})`)}}let M=n!==null?[\"--workspace\",n]:[];delete y.npm_config_user_agent;let Y=await $o(\"npm\",[\"install\"],{cwd:t,env:y,stdin:l,stdout:c,stderr:u,end:is.ErrorCode});if(Y.code!==0)return Y.code;let U=new Jc.PassThrough,J=Dg(U);U.pipe(c);let W=await $o(\"npm\",[\"pack\",\"--silent\",...M],{cwd:t,env:y,stdin:l,stdout:U,stderr:u});if(W.code!==0)return W.code;let ee=(await J).toString().trim().replace(/^.*\\n/s,\"\"),Z=k.resolve(t,j.toPortablePath(ee));return await K.renamePromise(Z,e),0}]]).get(p);if(typeof S==\"undefined\")throw new Error(\"Assertion failed: Unsupported workflow\");let x=await S();if(!(x===0||typeof x==\"undefined\"))throw K.detachTemp(o),new ct($.PACKAGE_PREPARATION_FAILED,`Packing the package failed (exit code ${x}, logs can be found here: ${et(r,a,Ge.PATH)})`)})})})}async function zRe(t,e,{project:r}){let i=r.tryWorkspaceByLocator(t);if(i!==null)return cD(i,e);let n=r.storedPackages.get(t.locatorHash);if(!n)throw new Error(`Package for ${Bt(r.configuration,t)} not found in the project`);return await ms.openPromise(async s=>{let o=r.configuration,a=r.configuration.getLinkers(),l={project:r,report:new Je({stdout:new Jc.PassThrough,configuration:o})},c=a.find(h=>h.supportsPackage(n,l));if(!c)throw new Error(`The package ${Bt(r.configuration,n)} isn't supported by any of the available linkers`);let u=await c.findPackageLocation(n,l),g=new _t(u,{baseFs:s});return(await At.find(Me.dot,{baseFs:g})).scripts.has(e)},{libzip:await fn()})}async function aB(t,e,r,{cwd:i,project:n,stdin:s,stdout:o,stderr:a}){return await K.mktempPromise(async l=>{let{manifest:c,env:u,cwd:g}=await o6(t,{project:n,binFolder:l,cwd:i,lifecycleScript:e}),f=c.scripts.get(e);if(typeof f==\"undefined\")return 1;let h=async()=>await eB(f,r,{cwd:g,env:u,stdin:s,stdout:o,stderr:a});return await(await n.configuration.reduceHook(m=>m.wrapScriptExecution,h,n,t,e,{script:f,args:r,cwd:g,env:u,stdin:s,stdout:o,stderr:a}))()})}async function uD(t,e,r,{cwd:i,project:n,stdin:s,stdout:o,stderr:a}){return await K.mktempPromise(async l=>{let{env:c,cwd:u}=await o6(t,{project:n,binFolder:l,cwd:i});return await eB(e,r,{cwd:u,env:c,stdin:s,stdout:o,stderr:a})})}async function _Re(t,{binFolder:e,cwd:r,lifecycleScript:i}){let n=await Yd({project:t.project,locator:t.anchoredLocator,binFolder:e,lifecycleScript:i});return await Promise.all(Array.from(await a6(t),([s,[,o]])=>nA(e,qr(s),process.execPath,[o]))),typeof r==\"undefined\"&&(r=k.dirname(await K.realpathPromise(k.join(t.cwd,\"package.json\")))),{manifest:t.manifest,binFolder:e,env:n,cwd:r}}async function o6(t,{project:e,binFolder:r,cwd:i,lifecycleScript:n}){let s=e.tryWorkspaceByLocator(t);if(s!==null)return _Re(s,{binFolder:r,cwd:i,lifecycleScript:n});let o=e.storedPackages.get(t.locatorHash);if(!o)throw new Error(`Package for ${Bt(e.configuration,t)} not found in the project`);return await ms.openPromise(async a=>{let l=e.configuration,c=e.configuration.getLinkers(),u={project:e,report:new Je({stdout:new Jc.PassThrough,configuration:l})},g=c.find(y=>y.supportsPackage(o,u));if(!g)throw new Error(`The package ${Bt(e.configuration,o)} isn't supported by any of the available linkers`);let f=await Yd({project:e,locator:t,binFolder:r,lifecycleScript:n});await Promise.all(Array.from(await AB(t,{project:e}),([y,[,Q]])=>nA(r,qr(y),process.execPath,[Q])));let h=await g.findPackageLocation(o,u),p=new _t(h,{baseFs:a}),m=await At.find(Me.dot,{baseFs:p});return typeof i==\"undefined\"&&(i=h),{manifest:m,binFolder:r,env:f,cwd:i}},{libzip:await fn()})}async function A6(t,e,r,{cwd:i,stdin:n,stdout:s,stderr:o}){return await aB(t.anchoredLocator,e,r,{cwd:i,project:t.project,stdin:n,stdout:s,stderr:o})}function cD(t,e){return t.manifest.scripts.has(e)}async function l6(t,e,{cwd:r,report:i}){let{configuration:n}=t.project,s=null;await K.mktempPromise(async o=>{let a=k.join(o,`${e}.log`),l=`# This file contains the result of Yarn calling the \"${e}\" lifecycle script inside a workspace (\"${j.fromPortablePath(t.cwd)}\")\n`,{stdout:c,stderr:u}=n.getSubprocessStreams(a,{report:i,prefix:Bt(n,t.anchoredLocator),header:l});i.reportInfo($.LIFECYCLE_SCRIPT,`Calling the \"${e}\" lifecycle script`);let g=await A6(t,e,[],{cwd:r,stdin:s,stdout:c,stderr:u});if(c.end(),u.end(),g!==0)throw K.detachTemp(o),new ct($.LIFECYCLE_SCRIPT,`${(0,i6.default)(e)} script failed (exit code ${et(n,g,Ge.NUMBER)}, logs can be found here: ${et(n,a,Ge.PATH)}); run ${et(n,`yarn ${e}`,Ge.CODE)} to investigate`)})}async function VRe(t,e,r){cD(t,e)&&await l6(t,e,r)}async function AB(t,{project:e}){let r=e.configuration,i=new Map,n=e.storedPackages.get(t.locatorHash);if(!n)throw new Error(`Package for ${Bt(r,t)} not found in the project`);let s=new Jc.Writable,o=r.getLinkers(),a={project:e,report:new Je({configuration:r,stdout:s})},l=new Set([t.locatorHash]);for(let u of n.dependencies.values()){let g=e.storedResolutions.get(u.descriptorHash);if(!g)throw new Error(`Assertion failed: The resolution (${sr(r,u)}) should have been registered`);l.add(g)}let c=await Promise.all(Array.from(l,async u=>{let g=e.storedPackages.get(u);if(!g)throw new Error(`Assertion failed: The package (${u}) should have been registered`);if(g.bin.size===0)return qo.skip;let f=o.find(p=>p.supportsPackage(g,a));if(!f)return qo.skip;let h=null;try{h=await f.findPackageLocation(g,a)}catch(p){if(p.code===\"LOCATOR_NOT_INSTALLED\")return qo.skip;throw p}return{dependency:g,packageLocation:h}}));for(let u of c){if(u===qo.skip)continue;let{dependency:g,packageLocation:f}=u;for(let[h,p]of g.bin)i.set(h,[g,j.fromPortablePath(k.resolve(f,p))])}return i}async function a6(t){return await AB(t.anchoredLocator,{project:t.project})}async function c6(t,e,r,{cwd:i,project:n,stdin:s,stdout:o,stderr:a,nodeArgs:l=[],packageAccessibleBinaries:c}){c!=null||(c=await AB(t,{project:n}));let u=c.get(e);if(!u)throw new Error(`Binary not found (${e}) for ${Bt(n.configuration,t)}`);return await K.mktempPromise(async g=>{let[,f]=u,h=await Yd({project:n,locator:t,binFolder:g});await Promise.all(Array.from(c,([m,[,y]])=>nA(h.BERRY_BIN_FOLDER,qr(m),process.execPath,[y])));let p;try{p=await $o(process.execPath,[...l,f,...r],{cwd:i,env:h,stdin:s,stdout:o,stderr:a})}finally{await K.removePromise(h.BERRY_BIN_FOLDER)}return p.code})}async function XRe(t,e,r,{cwd:i,stdin:n,stdout:s,stderr:o,packageAccessibleBinaries:a}){return await c6(t.anchoredLocator,e,r,{project:t.project,cwd:i,stdin:n,stdout:s,stderr:o,packageAccessibleBinaries:a})}var wi={};ft(wi,{convertToZip:()=>aLe,extractArchiveTo:()=>lLe,makeArchiveFromDirectory:()=>oLe});var r7=ge(require(\"stream\")),i7=ge(V9());var X9=ge(require(\"os\")),Z9=ge(ag()),$9=ge(require(\"worker_threads\")),Ql=Symbol(\"kTaskInfo\"),bR=class{constructor(e){this.source=e;this.workers=[];this.limit=(0,Z9.default)(Math.max(1,(0,X9.cpus)().length));this.cleanupInterval=setInterval(()=>{if(this.limit.pendingCount===0&&this.limit.activeCount===0){let r=this.workers.pop();r?r.terminate():clearInterval(this.cleanupInterval)}},5e3).unref()}createWorker(){this.cleanupInterval.refresh();let e=new $9.Worker(this.source,{eval:!0,execArgv:[...process.execArgv,\"--unhandled-rejections=strict\"]});return e.on(\"message\",r=>{if(!e[Ql])throw new Error(\"Assertion failed: Worker sent a result without having a task assigned\");e[Ql].resolve(r),e[Ql]=null,e.unref(),this.workers.push(e)}),e.on(\"error\",r=>{var i;(i=e[Ql])==null||i.reject(r),e[Ql]=null}),e.on(\"exit\",r=>{var i;r!==0&&((i=e[Ql])==null||i.reject(new Error(`Worker exited with code ${r}`))),e[Ql]=null}),e}run(e){return this.limit(()=>{var i;let r=(i=this.workers.pop())!=null?i:this.createWorker();return r.ref(),new Promise((n,s)=>{r[Ql]={resolve:n,reject:s},r.postMessage(e)})})}};var n7=ge(t7());async function oLe(t,{baseFs:e=new ar,prefixPath:r=Me.root,compressionLevel:i,inMemory:n=!1}={}){let s=await fn(),o;if(n)o=new Ai(null,{libzip:s,level:i});else{let l=await K.mktempPromise(),c=k.join(l,\"archive.zip\");o=new Ai(c,{create:!0,libzip:s,level:i})}let a=k.resolve(Me.root,r);return await o.copyPromise(a,t,{baseFs:e,stableTime:!0,stableSort:!0}),o}var s7;async function aLe(t,e){let r=await K.mktempPromise(),i=k.join(r,\"archive.zip\");return s7||(s7=new bR((0,n7.getContent)())),await s7.run({tmpFile:i,tgz:t,opts:e}),new Ai(i,{libzip:await fn(),level:e.compressionLevel})}async function*ALe(t){let e=new i7.default.Parse,r=new r7.PassThrough({objectMode:!0,autoDestroy:!0,emitClose:!0});e.on(\"entry\",i=>{r.write(i)}),e.on(\"error\",i=>{r.destroy(i)}),e.on(\"close\",()=>{r.destroyed||r.end()}),e.end(t);for await(let i of r){let n=i;yield n,n.resume()}}async function lLe(t,e,{stripComponents:r=0,prefixPath:i=Me.dot}={}){var s,o;function n(a){if(a.path[0]===\"/\")return!0;let l=a.path.split(/\\//g);return!!(l.some(c=>c===\"..\")||l.length<=r)}for await(let a of ALe(t)){if(n(a))continue;let l=k.normalize(j.toPortablePath(a.path)).replace(/\\/$/,\"\").split(/\\//g);if(l.length<=r)continue;let c=l.slice(r).join(\"/\"),u=k.join(i,c),g=420;switch((a.type===\"Directory\"||(((s=a.mode)!=null?s:0)&73)!=0)&&(g|=73),a.type){case\"Directory\":e.mkdirpSync(k.dirname(u),{chmod:493,utimes:[Dr.SAFE_TIME,Dr.SAFE_TIME]}),e.mkdirSync(u,{mode:g}),e.utimesSync(u,Dr.SAFE_TIME,Dr.SAFE_TIME);break;case\"OldFile\":case\"File\":e.mkdirpSync(k.dirname(u),{chmod:493,utimes:[Dr.SAFE_TIME,Dr.SAFE_TIME]}),e.writeFileSync(u,await Dg(a),{mode:g}),e.utimesSync(u,Dr.SAFE_TIME,Dr.SAFE_TIME);break;case\"SymbolicLink\":e.mkdirpSync(k.dirname(u),{chmod:493,utimes:[Dr.SAFE_TIME,Dr.SAFE_TIME]}),e.symlinkSync(a.linkpath,u),(o=e.lutimesSync)==null||o.call(e,u,Dr.SAFE_TIME,Dr.SAFE_TIME);break}}return e}var as={};ft(as,{emitList:()=>cLe,emitTree:()=>u7,treeNodeToJson:()=>c7,treeNodeToTreeify:()=>l7});var A7=ge(a7());function l7(t,{configuration:e}){let r={},i=(n,s)=>{let o=Array.isArray(n)?n.entries():Object.entries(n);for(let[a,{label:l,value:c,children:u}]of o){let g=[];typeof l!=\"undefined\"&&g.push(Ly(e,l,Pc.BOLD)),typeof c!=\"undefined\"&&g.push(et(e,c[0],c[1])),g.length===0&&g.push(Ly(e,`${a}`,Pc.BOLD));let f=g.join(\": \"),h=s[f]={};typeof u!=\"undefined\"&&i(u,h)}};if(typeof t.children==\"undefined\")throw new Error(\"The root node must only contain children\");return i(t.children,r),r}function c7(t){let e=r=>{var s;if(typeof r.children==\"undefined\"){if(typeof r.value==\"undefined\")throw new Error(\"Assertion failed: Expected a value to be set if the children are missing\");return Dc(r.value[0],r.value[1])}let i=Array.isArray(r.children)?r.children.entries():Object.entries((s=r.children)!=null?s:{}),n=Array.isArray(r.children)?[]:{};for(let[o,a]of i)n[o]=e(a);return typeof r.value==\"undefined\"?n:{value:Dc(r.value[0],r.value[1]),children:n}};return e(t)}function cLe(t,{configuration:e,stdout:r,json:i}){let n=t.map(s=>({value:s}));u7({children:n},{configuration:e,stdout:r,json:i})}function u7(t,{configuration:e,stdout:r,json:i,separators:n=0}){var o;if(i){let a=Array.isArray(t.children)?t.children.values():Object.values((o=t.children)!=null?o:{});for(let l of a)r.write(`${JSON.stringify(c7(l))}\n`);return}let s=(0,A7.asTree)(l7(t,{configuration:e}),!1,!1);if(n>=1&&(s=s.replace(/^([├└]─)/gm,`\\u2502\n$1`).replace(/^│\\n/,\"\")),n>=2)for(let a=0;a<2;++a)s=s.replace(/^([│ ].{2}[├│ ].{2}[^\\n]+\\n)(([│ ]).{2}[├└].{2}[^\\n]*\\n[│ ].{2}[│ ].{2}[├└]─)/gm,`$1$3  \\u2502\n$2`).replace(/^│\\n/,\"\");if(n>=3)throw new Error(\"Only the first two levels are accepted by treeUtils.emitTree\");r.write(s)}var g7=ge(require(\"crypto\")),SR=ge(require(\"fs\"));var uLe=8,Nt=class{constructor(e,{configuration:r,immutable:i=r.get(\"enableImmutableCache\"),check:n=!1}){this.markedFiles=new Set;this.mutexes=new Map;this.cacheId=`-${(0,g7.randomBytes)(8).toString(\"hex\")}.tmp`;this.configuration=r,this.cwd=e,this.immutable=i,this.check=n;let s=r.get(\"cacheKeyOverride\");if(s!==null)this.cacheKey=`${s}`;else{let o=r.get(\"compressionLevel\"),a=o!==ic?`c${o}`:\"\";this.cacheKey=[uLe,a].join(\"\")}}static async find(e,{immutable:r,check:i}={}){let n=new Nt(e.get(\"cacheFolder\"),{configuration:e,immutable:r,check:i});return await n.setup(),n}get mirrorCwd(){if(!this.configuration.get(\"enableMirror\"))return null;let e=`${this.configuration.get(\"globalFolder\")}/cache`;return e!==this.cwd?e:null}getVersionFilename(e){return`${Hg(e)}-${this.cacheKey}.zip`}getChecksumFilename(e,r){let n=gLe(r).slice(0,10);return`${Hg(e)}-${n}.zip`}getLocatorPath(e,r,i={}){var s;return this.mirrorCwd===null||((s=i.unstablePackages)==null?void 0:s.has(e.locatorHash))?k.resolve(this.cwd,this.getVersionFilename(e)):r===null||kR(r)!==this.cacheKey?null:k.resolve(this.cwd,this.getChecksumFilename(e,r))}getLocatorMirrorPath(e){let r=this.mirrorCwd;return r!==null?k.resolve(r,this.getVersionFilename(e)):null}async setup(){if(!this.configuration.get(\"enableGlobalCache\"))if(this.immutable){if(!await K.existsPromise(this.cwd))throw new ct($.IMMUTABLE_CACHE,\"Cache path does not exist.\")}else{await K.mkdirPromise(this.cwd,{recursive:!0});let e=k.resolve(this.cwd,\".gitignore\");await K.changeFilePromise(e,`/.gitignore\n*.flock\n*.tmp\n`)}(this.mirrorCwd||!this.immutable)&&await K.mkdirPromise(this.mirrorCwd||this.cwd,{recursive:!0})}async fetchPackageFromCache(e,r,a){var l=a,{onHit:i,onMiss:n,loader:s}=l,o=Tr(l,[\"onHit\",\"onMiss\",\"loader\"]);var A;let c=this.getLocatorMirrorPath(e),u=new ar,g=()=>{let ne=new Ai(null,{libzip:Y}),le=k.join(Me.root,lx(e));return ne.mkdirSync(le,{recursive:!0}),ne.writeJsonSync(k.join(le,Pt.manifest),{name:Ot(e),mocked:!0}),ne},f=async(ne,le=null)=>{var T;if(le===null&&((T=o.unstablePackages)==null?void 0:T.has(e.locatorHash)))return null;let Ae=!o.skipIntegrityCheck||!r?`${this.cacheKey}/${await Aw(ne)}`:r;if(le!==null){let L=!o.skipIntegrityCheck||!r?`${this.cacheKey}/${await Aw(le)}`:r;if(Ae!==L)throw new ct($.CACHE_CHECKSUM_MISMATCH,\"The remote archive doesn't match the local checksum - has the local cache been corrupted?\")}if(r!==null&&Ae!==r){let L;switch(this.check?L=\"throw\":kR(r)!==kR(Ae)?L=\"update\":L=this.configuration.get(\"checksumBehavior\"),L){case\"ignore\":return r;case\"update\":return Ae;default:case\"throw\":throw new ct($.CACHE_CHECKSUM_MISMATCH,\"The remote archive doesn't match the expected checksum\")}}return Ae},h=async ne=>{if(!s)throw new Error(`Cache check required but no loader configured for ${Bt(this.configuration,e)}`);let le=await s(),Ae=le.getRealPath();return le.saveAndClose(),await K.chmodPromise(Ae,420),await f(ne,Ae)},p=async()=>{if(c===null||!await K.existsPromise(c)){let ne=await s(),le=ne.getRealPath();return ne.saveAndClose(),{source:\"loader\",path:le}}return{source:\"mirror\",path:c}},m=async()=>{if(!s)throw new Error(`Cache entry required but missing for ${Bt(this.configuration,e)}`);if(this.immutable)throw new ct($.IMMUTABLE_CACHE,`Cache entry required but missing for ${Bt(this.configuration,e)}`);let{path:ne,source:le}=await p(),Ae=await f(ne),T=this.getLocatorPath(e,Ae,o);if(!T)throw new Error(\"Assertion failed: Expected the cache path to be available\");let L=[];le!==\"mirror\"&&c!==null&&L.push(async()=>{let we=`${c}${this.cacheId}`;await K.copyFilePromise(ne,we,SR.default.constants.COPYFILE_FICLONE),await K.chmodPromise(we,420),await K.renamePromise(we,c)}),(!o.mirrorWriteOnly||c===null)&&L.push(async()=>{let we=`${T}${this.cacheId}`;await K.copyFilePromise(ne,we,SR.default.constants.COPYFILE_FICLONE),await K.chmodPromise(we,420),await K.renamePromise(we,T)});let Ee=o.mirrorWriteOnly&&c!=null?c:T;return await Promise.all(L.map(we=>we())),[!1,Ee,Ae]},y=async()=>{let le=(async()=>{var qe;let Ae=this.getLocatorPath(e,r,o),T=Ae!==null?await u.existsPromise(Ae):!1,L=!!((qe=o.mockedPackages)==null?void 0:qe.has(e.locatorHash))&&(!this.check||!T),Ee=L||T,we=Ee?i:n;if(we&&we(),Ee){let re=null,se=Ae;return L||(re=this.check?await h(se):await f(se)),[L,se,re]}else return m()})();this.mutexes.set(e.locatorHash,le);try{return await le}finally{this.mutexes.delete(e.locatorHash)}};for(let ne;ne=this.mutexes.get(e.locatorHash);)await ne;let[Q,S,x]=await y();this.markedFiles.add(S);let M,Y=await fn(),U=Q?()=>g():()=>new Ai(S,{baseFs:u,libzip:Y,readOnly:!0}),J=new zh(()=>HS(()=>M=U(),ne=>`Failed to open the cache entry for ${Bt(this.configuration,e)}: ${ne}`),k),W=new Pa(S,{baseFs:J,pathUtils:k}),ee=()=>{M==null||M.discardAndClose()},Z=((A=o.unstablePackages)==null?void 0:A.has(e.locatorHash))?null:x;return[W,ee,Z]}};function kR(t){let e=t.indexOf(\"/\");return e!==-1?t.slice(0,e):null}function gLe(t){let e=t.indexOf(\"/\");return e!==-1?t.slice(e+1):t}var As;(function(r){r[r.SCRIPT=0]=\"SCRIPT\",r[r.SHELLCODE=1]=\"SHELLCODE\"})(As||(As={}));var uA=class extends Ji{constructor({configuration:e,stdout:r,suggestInstall:i=!0}){super();this.errorCount=0;nd(this,{configuration:e}),this.configuration=e,this.stdout=r,this.suggestInstall=i}static async start(e,r){let i=new this(e);try{await r(i)}catch(n){i.reportExceptionOnce(n)}finally{await i.finalize()}return i}hasErrors(){return this.errorCount>0}exitCode(){return this.hasErrors()?1:0}reportCacheHit(e){}reportCacheMiss(e){}startSectionSync(e,r){return r()}async startSectionPromise(e,r){return await r()}startTimerSync(e,r,i){return(typeof r==\"function\"?r:i)()}async startTimerPromise(e,r,i){return await(typeof r==\"function\"?r:i)()}async startCacheReport(e){return await e()}reportSeparator(){}reportInfo(e,r){}reportWarning(e,r){}reportError(e,r){this.errorCount+=1,this.stdout.write(`${et(this.configuration,\"\\u27A4\",\"redBright\")} ${this.formatNameWithHyperlink(e)}: ${r}\n`)}reportProgress(e){let r=Promise.resolve().then(async()=>{for await(let{}of e);}),i=()=>{};return te(N({},r),{stop:i})}reportJson(e){}async finalize(){this.errorCount>0&&(this.stdout.write(`\n`),this.stdout.write(`${et(this.configuration,\"\\u27A4\",\"redBright\")} Errors happened when preparing the environment required to run this command.\n`),this.suggestInstall&&this.stdout.write(`${et(this.configuration,\"\\u27A4\",\"redBright\")} This might be caused by packages being missing from the lockfile, in which case running \"yarn install\" might help.\n`))}formatNameWithHyperlink(e){return lD(e,{configuration:this.configuration,json:!1})}};var h0=ge(require(\"crypto\")),i$=ge(_7()),p0=ge(t$()),n$=ge(ag()),s$=ge(ti()),rF=ge(require(\"util\")),iF=ge(require(\"v8\")),nF=ge(require(\"zlib\"));var z1e=[[/^(git(?:\\+(?:https|ssh))?:\\/\\/.*(?:\\.git)?)#(.*)$/,(t,e,r,i)=>`${r}#commit=${i}`],[/^https:\\/\\/((?:[^/]+?)@)?codeload\\.github\\.com\\/([^/]+\\/[^/]+)\\/tar\\.gz\\/([0-9a-f]+)$/,(t,e,r=\"\",i,n)=>`https://${r}github.com/${i}.git#commit=${n}`],[/^https:\\/\\/((?:[^/]+?)@)?github\\.com\\/([^/]+\\/[^/]+?)(?:\\.git)?#([0-9a-f]+)$/,(t,e,r=\"\",i,n)=>`https://${r}github.com/${i}.git#commit=${n}`],[/^https?:\\/\\/[^/]+\\/(?:[^/]+\\/)*(?:@.+(?:\\/|(?:%2f)))?([^/]+)\\/(?:-|download)\\/\\1-[^/]+\\.tgz(?:#|$)/,t=>`npm:${t}`],[/^https:\\/\\/npm\\.pkg\\.github\\.com\\/download\\/(?:@[^/]+)\\/(?:[^/]+)\\/(?:[^/]+)\\/(?:[0-9a-f]+)(?:#|$)/,t=>`npm:${t}`],[/^https:\\/\\/npm\\.fontawesome\\.com\\/(?:@[^/]+)\\/([^/]+)\\/-\\/([^/]+)\\/\\1-\\2.tgz(?:#|$)/,t=>`npm:${t}`],[/^https?:\\/\\/(?:[^\\\\.]+)\\.jfrog\\.io\\/.*\\/(@[^/]+)\\/([^/]+)\\/-\\/\\1\\/\\2-(?:[.\\d\\w-]+)\\.tgz(?:#|$)/,(t,e)=>fw({protocol:\"npm:\",source:null,selector:t,params:{__archiveUrl:e}})],[/^[^/]+\\.tgz#[0-9a-f]+$/,t=>`npm:${t}`]],$R=class{constructor(e){this.resolver=e;this.resolutions=null}async setup(e,{report:r}){let i=k.join(e.cwd,e.configuration.get(\"lockfileFilename\"));if(!K.existsSync(i))return;let n=await K.readFilePromise(i,\"utf8\"),s=Qi(n);if(Object.prototype.hasOwnProperty.call(s,\"__metadata\"))return;let o=this.resolutions=new Map;for(let a of Object.keys(s)){let l=pd(a);if(!l){r.reportWarning($.YARN_IMPORT_FAILED,`Failed to parse the string \"${a}\" into a proper descriptor`);continue}fo(l.range)&&(l=rr(l,`npm:${l.range}`));let{version:c,resolved:u}=s[a];if(!u)continue;let g;for(let[h,p]of z1e){let m=u.match(h);if(m){g=p(c,...m);break}}if(!g){r.reportWarning($.YARN_IMPORT_FAILED,`${sr(e.configuration,l)}: Only some patterns can be imported from legacy lockfiles (not \"${u}\")`);continue}let f=l;try{let h=Kg(l.range),p=pd(h.selector,!0);p&&(f=p)}catch{}o.set(l.descriptorHash,cn(f,g))}}supportsDescriptor(e,r){return this.resolutions?this.resolutions.has(e.descriptorHash):!1}supportsLocator(e,r){return!1}shouldPersistResolution(e,r){throw new Error(\"Assertion failed: This resolver doesn't support resolving locators to packages\")}bindDescriptor(e,r,i){return e}getResolutionDependencies(e,r){return[]}async getCandidates(e,r,i){if(!this.resolutions)throw new Error(\"Assertion failed: The resolution store should have been setup\");let n=this.resolutions.get(e.descriptorHash);if(!n)throw new Error(\"Assertion failed: The resolution should have been registered\");return await this.resolver.getCandidates(nx(n),r,i)}async getSatisfying(e,r,i){return null}async resolve(e,r){throw new Error(\"Assertion failed: This resolver doesn't support resolving locators to packages\")}};var eF=class{constructor(e){this.resolver=e}supportsDescriptor(e,r){return!!(r.project.storedResolutions.get(e.descriptorHash)||r.project.originalPackages.has(uw(e).locatorHash))}supportsLocator(e,r){return!!(r.project.originalPackages.has(e.locatorHash)&&!r.project.lockfileNeedsRefresh)}shouldPersistResolution(e,r){throw new Error(\"The shouldPersistResolution method shouldn't be called on the lockfile resolver, which would always answer yes\")}bindDescriptor(e,r,i){return e}getResolutionDependencies(e,r){return this.resolver.getResolutionDependencies(e,r)}async getCandidates(e,r,i){let n=i.project.originalPackages.get(uw(e).locatorHash);if(n)return[n];let s=i.project.storedResolutions.get(e.descriptorHash);if(!s)throw new Error(\"Expected the resolution to have been successful - resolution not found\");if(n=i.project.originalPackages.get(s),!n)throw new Error(\"Expected the resolution to have been successful - package not found\");return[n]}async getSatisfying(e,r,i){return null}async resolve(e,r){let i=r.project.originalPackages.get(e.locatorHash);if(!i)throw new Error(\"The lockfile resolver isn't meant to resolve packages - they should already have been stored into a cache\");return i}};var tF=class{constructor(e){this.resolver=e}supportsDescriptor(e,r){return this.resolver.supportsDescriptor(e,r)}supportsLocator(e,r){return this.resolver.supportsLocator(e,r)}shouldPersistResolution(e,r){return this.resolver.shouldPersistResolution(e,r)}bindDescriptor(e,r,i){return this.resolver.bindDescriptor(e,r,i)}getResolutionDependencies(e,r){return this.resolver.getResolutionDependencies(e,r)}async getCandidates(e,r,i){throw new ct($.MISSING_LOCKFILE_ENTRY,`This package doesn't seem to be present in your lockfile; run \"yarn install\" to update the lockfile`)}async getSatisfying(e,r,i){throw new ct($.MISSING_LOCKFILE_ENTRY,`This package doesn't seem to be present in your lockfile; run \"yarn install\" to update the lockfile`)}async resolve(e,r){throw new ct($.MISSING_LOCKFILE_ENTRY,`This package doesn't seem to be present in your lockfile; run \"yarn install\" to update the lockfile`)}};var pi=class extends Ji{reportCacheHit(e){}reportCacheMiss(e){}startSectionSync(e,r){return r()}async startSectionPromise(e,r){return await r()}startTimerSync(e,r,i){return(typeof r==\"function\"?r:i)()}async startTimerPromise(e,r,i){return await(typeof r==\"function\"?r:i)()}async startCacheReport(e){return await e()}reportSeparator(){}reportInfo(e,r){}reportWarning(e,r){}reportError(e,r){}reportProgress(e){let r=Promise.resolve().then(async()=>{for await(let{}of e);}),i=()=>{};return te(N({},r),{stop:i})}reportJson(e){}async finalize(){}};var r$=ge(rx());var BC=class{constructor(e,{project:r}){this.workspacesCwds=new Set;this.dependencies=new Map;this.project=r,this.cwd=e}async setup(){var s;this.manifest=(s=await At.tryFind(this.cwd))!=null?s:new At,this.relativeCwd=k.relative(this.project.cwd,this.cwd)||Me.dot;let e=this.manifest.name?this.manifest.name:Vo(null,`${this.computeCandidateName()}-${ln(this.relativeCwd).substring(0,6)}`),r=this.manifest.version?this.manifest.version:\"0.0.0\";this.locator=cn(e,r),this.anchoredDescriptor=rr(this.locator,`${si.protocol}${this.relativeCwd}`),this.anchoredLocator=cn(this.locator,`${si.protocol}${this.relativeCwd}`);let i=this.manifest.workspaceDefinitions.map(({pattern:o})=>o),n=await(0,r$.default)(i,{cwd:j.fromPortablePath(this.cwd),expandDirectories:!1,onlyDirectories:!0,onlyFiles:!1,ignore:[\"**/node_modules\",\"**/.git\",\"**/.yarn\"]});n.sort();for(let o of n){let a=k.resolve(this.cwd,j.toPortablePath(o));K.existsSync(k.join(a,\"package.json\"))&&this.workspacesCwds.add(a)}}accepts(e){var o;let r=e.indexOf(\":\"),i=r!==-1?e.slice(0,r+1):null,n=r!==-1?e.slice(r+1):e;if(i===si.protocol&&k.normalize(n)===this.relativeCwd||i===si.protocol&&(n===\"*\"||n===\"^\"||n===\"~\"))return!0;let s=fo(n);return s?i===si.protocol?s.test((o=this.manifest.version)!=null?o:\"0.0.0\"):this.project.configuration.get(\"enableTransparentWorkspaces\")&&this.manifest.version!==null?s.test(this.manifest.version):!1:!1}computeCandidateName(){return this.cwd===this.project.cwd?\"root-workspace\":`${k.basename(this.cwd)}`||\"unnamed-workspace\"}getRecursiveWorkspaceDependencies({dependencies:e=At.hardDependencies}={}){let r=new Set,i=n=>{for(let s of e)for(let o of n.manifest[s].values()){let a=this.project.tryWorkspaceByDescriptor(o);a===null||r.has(a)||(r.add(a),i(a))}};return i(this),r}getRecursiveWorkspaceDependents({dependencies:e=At.hardDependencies}={}){let r=new Set,i=n=>{for(let s of this.project.workspaces)e.some(a=>[...s.manifest[a].values()].some(l=>{let c=this.project.tryWorkspaceByDescriptor(l);return c!==null&&hd(c.anchoredLocator,n.anchoredLocator)}))&&!r.has(s)&&(r.add(s),i(s))};return i(this),r}getRecursiveWorkspaceChildren(){let e=[];for(let r of this.workspacesCwds){let i=this.project.workspacesByCwd.get(r);i&&e.push(i,...i.getRecursiveWorkspaceChildren())}return e}async persistManifest(){let e={};this.manifest.exportTo(e);let r=k.join(this.cwd,At.fileName),i=`${JSON.stringify(e,null,this.manifest.indent)}\n`;await K.changeFilePromise(r,i,{automaticNewlines:!0}),this.manifest.raw=e}};var o$=6,_1e=1,V1e=/ *, */g,a$=/\\/$/,X1e=32,Z1e=(0,rF.promisify)(nF.default.gzip),$1e=(0,rF.promisify)(nF.default.gunzip),di;(function(r){r.UpdateLockfile=\"update-lockfile\",r.SkipBuild=\"skip-build\"})(di||(di={}));var sF={restoreInstallersCustomData:[\"installersCustomData\"],restoreResolutions:[\"accessibleLocators\",\"conditionalLocators\",\"disabledLocators\",\"optionalBuilds\",\"storedDescriptors\",\"storedResolutions\",\"storedPackages\",\"lockFileChecksum\"],restoreBuildState:[\"storedBuildState\"]},A$=t=>ln(`${_1e}`,t),ze=class{constructor(e,{configuration:r}){this.resolutionAliases=new Map;this.workspaces=[];this.workspacesByCwd=new Map;this.workspacesByIdent=new Map;this.storedResolutions=new Map;this.storedDescriptors=new Map;this.storedPackages=new Map;this.storedChecksums=new Map;this.storedBuildState=new Map;this.accessibleLocators=new Set;this.conditionalLocators=new Set;this.disabledLocators=new Set;this.originalPackages=new Map;this.optionalBuilds=new Set;this.lockfileNeedsRefresh=!1;this.peerRequirements=new Map;this.installersCustomData=new Map;this.lockFileChecksum=null;this.installStateChecksum=null;this.configuration=r,this.cwd=e}static async find(e,r){var p,m,y;if(!e.projectCwd)throw new Pe(`No project found in ${r}`);let i=e.projectCwd,n=r,s=null;for(;s!==e.projectCwd;){if(s=n,K.existsSync(k.join(s,Pt.manifest))){i=s;break}n=k.dirname(s)}let o=new ze(e.projectCwd,{configuration:e});(p=ye.telemetry)==null||p.reportProject(o.cwd),await o.setupResolutions(),await o.setupWorkspaces(),(m=ye.telemetry)==null||m.reportWorkspaceCount(o.workspaces.length),(y=ye.telemetry)==null||y.reportDependencyCount(o.workspaces.reduce((Q,S)=>Q+S.manifest.dependencies.size+S.manifest.devDependencies.size,0));let a=o.tryWorkspaceByCwd(i);if(a)return{project:o,workspace:a,locator:a.anchoredLocator};let l=await o.findLocatorForLocation(`${i}/`,{strict:!0});if(l)return{project:o,locator:l,workspace:null};let c=et(e,o.cwd,Ge.PATH),u=et(e,k.relative(o.cwd,i),Ge.PATH),g=`- If ${c} isn't intended to be a project, remove any yarn.lock and/or package.json file there.`,f=`- If ${c} is intended to be a project, it might be that you forgot to list ${u} in its workspace configuration.`,h=`- Finally, if ${c} is fine and you intend ${u} to be treated as a completely separate project (not even a workspace), create an empty yarn.lock file in it.`;throw new Pe(`The nearest package directory (${et(e,i,Ge.PATH)}) doesn't seem to be part of the project declared in ${et(e,o.cwd,Ge.PATH)}.\n\n${[g,f,h].join(`\n`)}`)}async setupResolutions(){var i;this.storedResolutions=new Map,this.storedDescriptors=new Map,this.storedPackages=new Map,this.lockFileChecksum=null;let e=k.join(this.cwd,this.configuration.get(\"lockfileFilename\")),r=this.configuration.get(\"defaultLanguageName\");if(K.existsSync(e)){let n=await K.readFilePromise(e,\"utf8\");this.lockFileChecksum=A$(n);let s=Qi(n);if(s.__metadata){let o=s.__metadata.version,a=s.__metadata.cacheKey;this.lockfileNeedsRefresh=o<o$;for(let l of Object.keys(s)){if(l===\"__metadata\")continue;let c=s[l];if(typeof c.resolution==\"undefined\")throw new Error(`Assertion failed: Expected the lockfile entry to have a resolution field (${l})`);let u=Mc(c.resolution,!0),g=new At;g.load(c,{yamlCompatibilityMode:!0});let f=g.version,h=g.languageName||r,p=c.linkType.toUpperCase(),m=(i=c.conditions)!=null?i:null,y=g.dependencies,Q=g.peerDependencies,S=g.dependenciesMeta,x=g.peerDependenciesMeta,M=g.bin;if(c.checksum!=null){let U=typeof a!=\"undefined\"&&!c.checksum.includes(\"/\")?`${a}/${c.checksum}`:c.checksum;this.storedChecksums.set(u.locatorHash,U)}let Y=te(N({},u),{version:f,languageName:h,linkType:p,conditions:m,dependencies:y,peerDependencies:Q,dependenciesMeta:S,peerDependenciesMeta:x,bin:M});this.originalPackages.set(Y.locatorHash,Y);for(let U of l.split(V1e)){let J=nl(U);this.storedDescriptors.set(J.descriptorHash,J),this.storedResolutions.set(J.descriptorHash,u.locatorHash)}}}}}async setupWorkspaces(){this.workspaces=[],this.workspacesByCwd=new Map,this.workspacesByIdent=new Map;let e=[this.cwd];for(;e.length>0;){let r=e;e=[];for(let i of r){if(this.workspacesByCwd.has(i))continue;let n=await this.addWorkspace(i),s=this.storedPackages.get(n.anchoredLocator.locatorHash);s&&(n.dependencies=s.dependencies);for(let o of n.workspacesCwds)e.push(o)}}}async addWorkspace(e){let r=new BC(e,{project:this});await r.setup();let i=this.workspacesByIdent.get(r.locator.identHash);if(typeof i!=\"undefined\")throw new Error(`Duplicate workspace name ${gi(this.configuration,r.locator)}: ${j.fromPortablePath(e)} conflicts with ${j.fromPortablePath(i.cwd)}`);return this.workspaces.push(r),this.workspacesByCwd.set(e,r),this.workspacesByIdent.set(r.locator.identHash,r),r}get topLevelWorkspace(){return this.getWorkspaceByCwd(this.cwd)}tryWorkspaceByCwd(e){k.isAbsolute(e)||(e=k.resolve(this.cwd,e)),e=k.normalize(e).replace(/\\/+$/,\"\");let r=this.workspacesByCwd.get(e);return r||null}getWorkspaceByCwd(e){let r=this.tryWorkspaceByCwd(e);if(!r)throw new Error(`Workspace not found (${e})`);return r}tryWorkspaceByFilePath(e){let r=null;for(let i of this.workspaces)k.relative(i.cwd,e).startsWith(\"../\")||r&&r.cwd.length>=i.cwd.length||(r=i);return r||null}getWorkspaceByFilePath(e){let r=this.tryWorkspaceByFilePath(e);if(!r)throw new Error(`Workspace not found (${e})`);return r}tryWorkspaceByIdent(e){let r=this.workspacesByIdent.get(e.identHash);return typeof r==\"undefined\"?null:r}getWorkspaceByIdent(e){let r=this.tryWorkspaceByIdent(e);if(!r)throw new Error(`Workspace not found (${gi(this.configuration,e)})`);return r}tryWorkspaceByDescriptor(e){let r=this.tryWorkspaceByIdent(e);return r===null||(il(e)&&(e=ud(e)),!r.accepts(e.range))?null:r}getWorkspaceByDescriptor(e){let r=this.tryWorkspaceByDescriptor(e);if(r===null)throw new Error(`Workspace not found (${sr(this.configuration,e)})`);return r}tryWorkspaceByLocator(e){let r=this.tryWorkspaceByIdent(e);return r===null||(Xo(e)&&(e=gd(e)),r.locator.locatorHash!==e.locatorHash&&r.anchoredLocator.locatorHash!==e.locatorHash)?null:r}getWorkspaceByLocator(e){let r=this.tryWorkspaceByLocator(e);if(!r)throw new Error(`Workspace not found (${Bt(this.configuration,e)})`);return r}refreshWorkspaceDependencies(){for(let e of this.workspaces){let r=this.storedPackages.get(e.anchoredLocator.locatorHash);if(!r)throw new Error(`Assertion failed: Expected workspace ${Cd(this.configuration,e)} (${et(this.configuration,k.join(e.cwd,Pt.manifest),Ge.PATH)}) to have been resolved. Run \"yarn install\" to update the lockfile`);e.dependencies=new Map(r.dependencies)}}forgetResolution(e){let r=n=>{this.storedResolutions.delete(n),this.storedDescriptors.delete(n)},i=n=>{this.originalPackages.delete(n),this.storedPackages.delete(n),this.accessibleLocators.delete(n)};if(\"descriptorHash\"in e){let n=this.storedResolutions.get(e.descriptorHash);r(e.descriptorHash);let s=new Set(this.storedResolutions.values());typeof n!=\"undefined\"&&!s.has(n)&&i(n)}if(\"locatorHash\"in e){i(e.locatorHash);for(let[n,s]of this.storedResolutions)s===e.locatorHash&&r(n)}}forgetTransientResolutions(){let e=this.configuration.makeResolver();for(let r of this.originalPackages.values()){let i;try{i=e.shouldPersistResolution(r,{project:this,resolver:e})}catch{i=!1}i||this.forgetResolution(r)}}forgetVirtualResolutions(){for(let e of this.storedPackages.values())for(let[r,i]of e.dependencies)il(i)&&e.dependencies.set(r,ud(i))}getDependencyMeta(e,r){let i={},s=this.topLevelWorkspace.manifest.dependenciesMeta.get(Ot(e));if(!s)return i;let o=s.get(null);if(o&&Object.assign(i,o),r===null||!s$.default.valid(r))return i;for(let[a,l]of s)a!==null&&a===r&&Object.assign(i,l);return i}async findLocatorForLocation(e,{strict:r=!1}={}){let i=new pi,n=this.configuration.getLinkers(),s={project:this,report:i};for(let o of n){let a=await o.findPackageLocator(e,s);if(a){if(r&&(await o.findPackageLocation(a,s)).replace(a$,\"\")!==e.replace(a$,\"\"))continue;return a}}return null}async resolveEverything(e){if(!this.workspacesByCwd||!this.workspacesByIdent)throw new Error(\"Workspaces must have been setup before calling this function\");this.forgetVirtualResolutions(),e.lockfileOnly||this.forgetTransientResolutions();let r=e.resolver||this.configuration.makeResolver(),i=new $R(r);await i.setup(this,{report:e.report});let n=e.lockfileOnly?[new tF(r)]:[i,r],s=new wd([new eF(r),...n]),o=this.configuration.makeFetcher(),a=e.lockfileOnly?{project:this,report:e.report,resolver:s}:{project:this,report:e.report,resolver:s,fetchOptions:{project:this,cache:e.cache,checksums:this.storedChecksums,report:e.report,fetcher:o,cacheOptions:{mirrorWriteOnly:!0}}},l=new Map,c=new Map,u=new Map,g=new Map,f=new Map,h=new Map,p=this.topLevelWorkspace.anchoredLocator,m=new Set,y=[],Q=Ex(),S=this.configuration.getSupportedArchitectures();await e.report.startProgressPromise(Ji.progressViaTitle(),async ee=>{let Z=async T=>{let L=await Pg(async()=>await s.resolve(T,a),qe=>`${Bt(this.configuration,T)}: ${qe}`);if(!hd(T,L))throw new Error(`Assertion failed: The locator cannot be changed by the resolver (went from ${Bt(this.configuration,T)} to ${Bt(this.configuration,L)})`);g.set(L.locatorHash,L);let Ee=this.configuration.normalizePackage(L);for(let[qe,re]of Ee.dependencies){let se=await this.configuration.reduceHook(he=>he.reduceDependency,re,this,Ee,re,{resolver:s,resolveOptions:a});if(!fd(re,se))throw new Error(\"Assertion failed: The descriptor ident cannot be changed through aliases\");let Qe=s.bindDescriptor(se,T,a);Ee.dependencies.set(qe,Qe)}let we=co([...Ee.dependencies.values()].map(qe=>Ae(qe)));return y.push(we),we.catch(()=>{}),c.set(Ee.locatorHash,Ee),Ee},A=async T=>{let L=f.get(T.locatorHash);if(typeof L!=\"undefined\")return L;let Ee=Promise.resolve().then(()=>Z(T));return f.set(T.locatorHash,Ee),Ee},ne=async(T,L)=>{let Ee=await Ae(L);return l.set(T.descriptorHash,T),u.set(T.descriptorHash,Ee.locatorHash),Ee},le=async T=>{ee.setTitle(sr(this.configuration,T));let L=this.resolutionAliases.get(T.descriptorHash);if(typeof L!=\"undefined\")return ne(T,this.storedDescriptors.get(L));let Ee=s.getResolutionDependencies(T,a),we=new Map(await co(Ee.map(async se=>{let Qe=s.bindDescriptor(se,p,a),he=await Ae(Qe);return m.add(he.locatorHash),[se.descriptorHash,he]}))),re=(await Pg(async()=>await s.getCandidates(T,we,a),se=>`${sr(this.configuration,T)}: ${se}`))[0];if(typeof re==\"undefined\")throw new Error(`${sr(this.configuration,T)}: No candidates found`);return l.set(T.descriptorHash,T),u.set(T.descriptorHash,re.locatorHash),A(re)},Ae=T=>{let L=h.get(T.descriptorHash);if(typeof L!=\"undefined\")return L;l.set(T.descriptorHash,T);let Ee=Promise.resolve().then(()=>le(T));return h.set(T.descriptorHash,Ee),Ee};for(let T of this.workspaces){let L=T.anchoredDescriptor;y.push(Ae(L))}for(;y.length>0;){let T=[...y];y.length=0,await co(T)}});let x=new Set(this.resolutionAliases.values()),M=new Set(c.keys()),Y=new Set,U=new Map;eUe({project:this,report:e.report,accessibleLocators:Y,volatileDescriptors:x,optionalBuilds:M,peerRequirements:U,allDescriptors:l,allResolutions:u,allPackages:c});for(let ee of m)M.delete(ee);for(let ee of x)l.delete(ee),u.delete(ee);let J=new Set,W=new Set;for(let ee of c.values())ee.conditions!=null&&(!M.has(ee.locatorHash)||(pw(ee,S)||(pw(ee,Q)&&e.report.reportWarningOnce($.GHOST_ARCHITECTURE,`${Bt(this.configuration,ee)}: Your current architecture (${process.platform}-${process.arch}) is supported by this package, but is missing from the ${et(this.configuration,\"supportedArchitectures\",Di.SETTING)} setting`),W.add(ee.locatorHash)),J.add(ee.locatorHash)));this.storedResolutions=u,this.storedDescriptors=l,this.storedPackages=c,this.accessibleLocators=Y,this.conditionalLocators=J,this.disabledLocators=W,this.originalPackages=g,this.optionalBuilds=M,this.peerRequirements=U,this.refreshWorkspaceDependencies()}async fetchEverything({cache:e,report:r,fetcher:i,mode:n}){let s={mockedPackages:this.disabledLocators,unstablePackages:this.conditionalLocators},o=i||this.configuration.makeFetcher(),a={checksums:this.storedChecksums,project:this,cache:e,fetcher:o,report:r,cacheOptions:s},l=Array.from(new Set(xn(this.storedResolutions.values(),[f=>{let h=this.storedPackages.get(f);if(!h)throw new Error(\"Assertion failed: The locator should have been registered\");return Ps(h)}])));n===di.UpdateLockfile&&(l=l.filter(f=>!this.storedChecksums.has(f)));let c=!1,u=Ji.progressViaCounter(l.length);r.reportProgress(u);let g=(0,n$.default)(X1e);if(await r.startCacheReport(async()=>{await co(l.map(f=>g(async()=>{let h=this.storedPackages.get(f);if(!h)throw new Error(\"Assertion failed: The locator should have been registered\");if(Xo(h))return;let p;try{p=await o.fetch(h,a)}catch(m){m.message=`${Bt(this.configuration,h)}: ${m.message}`,r.reportExceptionOnce(m),c=m;return}p.checksum!=null?this.storedChecksums.set(h.locatorHash,p.checksum):this.storedChecksums.delete(h.locatorHash),p.releaseFs&&p.releaseFs()}).finally(()=>{u.tick()})))}),c)throw c}async linkEverything({cache:e,report:r,fetcher:i,mode:n}){var A,ne,le;let s={mockedPackages:this.disabledLocators,unstablePackages:this.conditionalLocators,skipIntegrityCheck:!0},o=i||this.configuration.makeFetcher(),a={checksums:this.storedChecksums,project:this,cache:e,fetcher:o,report:r,skipIntegrityCheck:!0,cacheOptions:s},l=this.configuration.getLinkers(),c={project:this,report:r},u=new Map(l.map(Ae=>{let T=Ae.makeInstaller(c),L=T.getCustomDataKey(),Ee=this.installersCustomData.get(L);return typeof Ee!=\"undefined\"&&T.attachCustomData(Ee),[Ae,T]})),g=new Map,f=new Map,h=new Map,p=new Map(await co([...this.accessibleLocators].map(async Ae=>{let T=this.storedPackages.get(Ae);if(!T)throw new Error(\"Assertion failed: The locator should have been registered\");return[Ae,await o.fetch(T,a)]}))),m=[];for(let Ae of this.accessibleLocators){let T=this.storedPackages.get(Ae);if(typeof T==\"undefined\")throw new Error(\"Assertion failed: The locator should have been registered\");let L=p.get(T.locatorHash);if(typeof L==\"undefined\")throw new Error(\"Assertion failed: The fetch result should have been registered\");let Ee=[],we=re=>{Ee.push(re)},qe=this.tryWorkspaceByLocator(T);if(qe!==null){let re=[],{scripts:se}=qe.manifest;for(let he of[\"preinstall\",\"install\",\"postinstall\"])se.has(he)&&re.push([As.SCRIPT,he]);try{for(let[he,Fe]of u)if(he.supportsPackage(T,c)&&(await Fe.installPackage(T,L,{holdFetchResult:we})).buildDirective!==null)throw new Error(\"Assertion failed: Linkers can't return build directives for workspaces; this responsibility befalls to the Yarn core\")}finally{Ee.length===0?(A=L.releaseFs)==null||A.call(L):m.push(co(Ee).catch(()=>{}).then(()=>{var he;(he=L.releaseFs)==null||he.call(L)}))}let Qe=k.join(L.packageFs.getRealPath(),L.prefixPath);f.set(T.locatorHash,Qe),!Xo(T)&&re.length>0&&h.set(T.locatorHash,{directives:re,buildLocations:[Qe]})}else{let re=l.find(he=>he.supportsPackage(T,c));if(!re)throw new ct($.LINKER_NOT_FOUND,`${Bt(this.configuration,T)} isn't supported by any available linker`);let se=u.get(re);if(!se)throw new Error(\"Assertion failed: The installer should have been registered\");let Qe;try{Qe=await se.installPackage(T,L,{holdFetchResult:we})}finally{Ee.length===0?(ne=L.releaseFs)==null||ne.call(L):m.push(co(Ee).then(()=>{}).then(()=>{var he;(he=L.releaseFs)==null||he.call(L)}))}g.set(T.locatorHash,re),f.set(T.locatorHash,Qe.packageLocation),Qe.buildDirective&&Qe.buildDirective.length>0&&Qe.packageLocation&&h.set(T.locatorHash,{directives:Qe.buildDirective,buildLocations:[Qe.packageLocation]})}}let y=new Map;for(let Ae of this.accessibleLocators){let T=this.storedPackages.get(Ae);if(!T)throw new Error(\"Assertion failed: The locator should have been registered\");let L=this.tryWorkspaceByLocator(T)!==null,Ee=async(we,qe)=>{let re=f.get(T.locatorHash);if(typeof re==\"undefined\")throw new Error(`Assertion failed: The package (${Bt(this.configuration,T)}) should have been registered`);let se=[];for(let Qe of T.dependencies.values()){let he=this.storedResolutions.get(Qe.descriptorHash);if(typeof he==\"undefined\")throw new Error(`Assertion failed: The resolution (${sr(this.configuration,Qe)}, from ${Bt(this.configuration,T)})should have been registered`);let Fe=this.storedPackages.get(he);if(typeof Fe==\"undefined\")throw new Error(`Assertion failed: The package (${he}, resolved from ${sr(this.configuration,Qe)}) should have been registered`);let Ue=this.tryWorkspaceByLocator(Fe)===null?g.get(he):null;if(typeof Ue==\"undefined\")throw new Error(`Assertion failed: The package (${he}, resolved from ${sr(this.configuration,Qe)}) should have been registered`);Ue===we||Ue===null?f.get(Fe.locatorHash)!==null&&se.push([Qe,Fe]):!L&&re!==null&&kg(y,he).push(re)}re!==null&&await qe.attachInternalDependencies(T,se)};if(L)for(let[we,qe]of u)we.supportsPackage(T,c)&&await Ee(we,qe);else{let we=g.get(T.locatorHash);if(!we)throw new Error(\"Assertion failed: The linker should have been found\");let qe=u.get(we);if(!qe)throw new Error(\"Assertion failed: The installer should have been registered\");await Ee(we,qe)}}for(let[Ae,T]of y){let L=this.storedPackages.get(Ae);if(!L)throw new Error(\"Assertion failed: The package should have been registered\");let Ee=g.get(L.locatorHash);if(!Ee)throw new Error(\"Assertion failed: The linker should have been found\");let we=u.get(Ee);if(!we)throw new Error(\"Assertion failed: The installer should have been registered\");await we.attachExternalDependents(L,T)}let Q=new Map;for(let Ae of u.values()){let T=await Ae.finalizeInstall();for(let L of(le=T==null?void 0:T.records)!=null?le:[])h.set(L.locatorHash,{directives:L.buildDirective,buildLocations:L.buildLocations});typeof(T==null?void 0:T.customData)!=\"undefined\"&&Q.set(Ae.getCustomDataKey(),T.customData)}if(this.installersCustomData=Q,await co(m),n===di.SkipBuild)return;let S=new Set(this.storedPackages.keys()),x=new Set(h.keys());for(let Ae of x)S.delete(Ae);let M=(0,h0.createHash)(\"sha512\");M.update(process.versions.node),await this.configuration.triggerHook(Ae=>Ae.globalHashGeneration,this,Ae=>{M.update(\"\\0\"),M.update(Ae)});let Y=M.digest(\"hex\"),U=new Map,J=Ae=>{let T=U.get(Ae.locatorHash);if(typeof T!=\"undefined\")return T;let L=this.storedPackages.get(Ae.locatorHash);if(typeof L==\"undefined\")throw new Error(\"Assertion failed: The package should have been registered\");let Ee=(0,h0.createHash)(\"sha512\");Ee.update(Ae.locatorHash),U.set(Ae.locatorHash,\"<recursive>\");for(let we of L.dependencies.values()){let qe=this.storedResolutions.get(we.descriptorHash);if(typeof qe==\"undefined\")throw new Error(`Assertion failed: The resolution (${sr(this.configuration,we)}) should have been registered`);let re=this.storedPackages.get(qe);if(typeof re==\"undefined\")throw new Error(\"Assertion failed: The package should have been registered\");Ee.update(J(re))}return T=Ee.digest(\"hex\"),U.set(Ae.locatorHash,T),T},W=(Ae,T)=>{let L=(0,h0.createHash)(\"sha512\");L.update(Y),L.update(J(Ae));for(let Ee of T)L.update(Ee);return L.digest(\"hex\")},ee=new Map,Z=!1;for(;x.size>0;){let Ae=x.size,T=[];for(let L of x){let Ee=this.storedPackages.get(L);if(!Ee)throw new Error(\"Assertion failed: The package should have been registered\");let we=!0;for(let se of Ee.dependencies.values()){let Qe=this.storedResolutions.get(se.descriptorHash);if(!Qe)throw new Error(`Assertion failed: The resolution (${sr(this.configuration,se)}) should have been registered`);if(x.has(Qe)){we=!1;break}}if(!we)continue;x.delete(L);let qe=h.get(Ee.locatorHash);if(!qe)throw new Error(\"Assertion failed: The build directive should have been registered\");let re=W(Ee,qe.buildLocations);if(this.storedBuildState.get(Ee.locatorHash)===re){ee.set(Ee.locatorHash,re);continue}Z||(await this.persistInstallStateFile(),Z=!0),this.storedBuildState.has(Ee.locatorHash)?r.reportInfo($.MUST_REBUILD,`${Bt(this.configuration,Ee)} must be rebuilt because its dependency tree changed`):r.reportInfo($.MUST_BUILD,`${Bt(this.configuration,Ee)} must be built because it never has been before or the last one failed`);for(let se of qe.buildLocations){if(!k.isAbsolute(se))throw new Error(`Assertion failed: Expected the build location to be absolute (not ${se})`);T.push((async()=>{for(let[Qe,he]of qe.directives){let Fe=`# This file contains the result of Yarn building a package (${Ps(Ee)})\n`;switch(Qe){case As.SCRIPT:Fe+=`# Script name: ${he}\n`;break;case As.SHELLCODE:Fe+=`# Script code: ${he}\n`;break}let Ue=null;if(!await K.mktempPromise(async ve=>{let pe=k.join(ve,\"build.log\"),{stdout:X,stderr:be}=this.configuration.getSubprocessStreams(pe,{header:Fe,prefix:Bt(this.configuration,Ee),report:r}),ce;try{switch(Qe){case As.SCRIPT:ce=await aB(Ee,he,[],{cwd:se,project:this,stdin:Ue,stdout:X,stderr:be});break;case As.SHELLCODE:ce=await uD(Ee,he,[],{cwd:se,project:this,stdin:Ue,stdout:X,stderr:be});break}}catch(gt){be.write(gt.stack),ce=1}if(X.end(),be.end(),ce===0)return ee.set(Ee.locatorHash,re),!0;K.detachTemp(ve);let fe=`${Bt(this.configuration,Ee)} couldn't be built successfully (exit code ${et(this.configuration,ce,Ge.NUMBER)}, logs can be found here: ${et(this.configuration,pe,Ge.PATH)})`;return this.optionalBuilds.has(Ee.locatorHash)?(r.reportInfo($.BUILD_FAILED,fe),ee.set(Ee.locatorHash,re),!0):(r.reportError($.BUILD_FAILED,fe),!1)}))return}})())}}if(await co(T),Ae===x.size){let L=Array.from(x).map(Ee=>{let we=this.storedPackages.get(Ee);if(!we)throw new Error(\"Assertion failed: The package should have been registered\");return Bt(this.configuration,we)}).join(\", \");r.reportError($.CYCLIC_DEPENDENCIES,`Some packages have circular dependencies that make their build order unsatisfiable - as a result they won't be built (affected packages are: ${L})`);break}}this.storedBuildState=ee}async install(e){var a,l;let r=this.configuration.get(\"nodeLinker\");(a=ye.telemetry)==null||a.reportInstall(r),await e.report.startTimerPromise(\"Project validation\",{skipIfEmpty:!0},async()=>{await this.configuration.triggerHook(c=>c.validateProject,this,{reportWarning:e.report.reportWarning.bind(e.report),reportError:e.report.reportError.bind(e.report)})});for(let c of this.configuration.packageExtensions.values())for(let[,u]of c)for(let g of u)g.status=qi.Inactive;let i=k.join(this.cwd,this.configuration.get(\"lockfileFilename\")),n=null;if(e.immutable)try{n=await K.readFilePromise(i,\"utf8\")}catch(c){throw c.code===\"ENOENT\"?new ct($.FROZEN_LOCKFILE_EXCEPTION,\"The lockfile would have been created by this install, which is explicitly forbidden.\"):c}await e.report.startTimerPromise(\"Resolution step\",async()=>{await this.resolveEverything(e)}),await e.report.startTimerPromise(\"Post-resolution validation\",{skipIfEmpty:!0},async()=>{for(let[,c]of this.configuration.packageExtensions)for(let[,u]of c)for(let g of u)if(g.userProvided){let f=et(this.configuration,g,Ge.PACKAGE_EXTENSION);switch(g.status){case qi.Inactive:e.report.reportWarning($.UNUSED_PACKAGE_EXTENSION,`${f}: No matching package in the dependency tree; you may not need this rule anymore.`);break;case qi.Redundant:e.report.reportWarning($.REDUNDANT_PACKAGE_EXTENSION,`${f}: This rule seems redundant when applied on the original package; the extension may have been applied upstream.`);break}}if(n!==null){let c=$l(n,this.generateLockfile());if(c!==n){let u=(0,i$.structuredPatch)(i,i,n,c);e.report.reportSeparator();for(let g of u.hunks){e.report.reportInfo(null,`@@ -${g.oldStart},${g.oldLines} +${g.newStart},${g.newLines} @@`);for(let f of g.lines)f.startsWith(\"+\")?e.report.reportError($.FROZEN_LOCKFILE_EXCEPTION,et(this.configuration,f,Ge.ADDED)):f.startsWith(\"-\")?e.report.reportError($.FROZEN_LOCKFILE_EXCEPTION,et(this.configuration,f,Ge.REMOVED)):e.report.reportInfo(null,et(this.configuration,f,\"grey\"))}throw e.report.reportSeparator(),new ct($.FROZEN_LOCKFILE_EXCEPTION,\"The lockfile would have been modified by this install, which is explicitly forbidden.\")}}});for(let c of this.configuration.packageExtensions.values())for(let[,u]of c)for(let g of u)g.userProvided&&g.status===qi.Active&&((l=ye.telemetry)==null||l.reportPackageExtension(Dc(g,Ge.PACKAGE_EXTENSION)));await e.report.startTimerPromise(\"Fetch step\",async()=>{await this.fetchEverything(e),(typeof e.persistProject==\"undefined\"||e.persistProject)&&e.mode!==di.UpdateLockfile&&await this.cacheCleanup(e)});let s=e.immutable?[...new Set(this.configuration.get(\"immutablePatterns\"))].sort():[],o=await Promise.all(s.map(async c=>lw(c,{cwd:this.cwd})));(typeof e.persistProject==\"undefined\"||e.persistProject)&&await this.persist(),await e.report.startTimerPromise(\"Link step\",async()=>{if(e.mode===di.UpdateLockfile){e.report.reportWarning($.UPDATE_LOCKFILE_ONLY_SKIP_LINK,`Skipped due to ${et(this.configuration,\"mode=update-lockfile\",Ge.CODE)}`);return}await this.linkEverything(e);let c=await Promise.all(s.map(async u=>lw(u,{cwd:this.cwd})));for(let u=0;u<s.length;++u)o[u]!==c[u]&&e.report.reportError($.FROZEN_ARTIFACT_EXCEPTION,`The checksum for ${s[u]} has been modified by this install, which is explicitly forbidden.`)}),await this.persistInstallStateFile(),await this.configuration.triggerHook(c=>c.afterAllInstalled,this,e)}generateLockfile(){let e=new Map;for(let[n,s]of this.storedResolutions.entries()){let o=e.get(s);o||e.set(s,o=new Set),o.add(n)}let r={};r.__metadata={version:o$,cacheKey:void 0};for(let[n,s]of e.entries()){let o=this.originalPackages.get(n);if(!o)continue;let a=[];for(let f of s){let h=this.storedDescriptors.get(f);if(!h)throw new Error(\"Assertion failed: The descriptor should have been registered\");a.push(h)}let l=a.map(f=>Pn(f)).sort().join(\", \"),c=new At;c.version=o.linkType===Qt.HARD?o.version:\"0.0.0-use.local\",c.languageName=o.languageName,c.dependencies=new Map(o.dependencies),c.peerDependencies=new Map(o.peerDependencies),c.dependenciesMeta=new Map(o.dependenciesMeta),c.peerDependenciesMeta=new Map(o.peerDependenciesMeta),c.bin=new Map(o.bin);let u,g=this.storedChecksums.get(o.locatorHash);if(typeof g!=\"undefined\"){let f=g.indexOf(\"/\");if(f===-1)throw new Error(\"Assertion failed: Expected the checksum to reference its cache key\");let h=g.slice(0,f),p=g.slice(f+1);typeof r.__metadata.cacheKey==\"undefined\"&&(r.__metadata.cacheKey=h),h===r.__metadata.cacheKey?u=p:u=g}r[l]=te(N({},c.exportTo({},{compatibilityMode:!1})),{linkType:o.linkType.toLowerCase(),resolution:Ps(o),checksum:u,conditions:o.conditions||void 0})}return`${[`# This file is generated by running \"yarn install\" inside your project.\n`,`# Manual changes might be lost - proceed with caution!\n`].join(\"\")}\n`+Na(r)}async persistLockfile(){let e=k.join(this.cwd,this.configuration.get(\"lockfileFilename\")),r=\"\";try{r=await K.readFilePromise(e,\"utf8\")}catch(s){}let i=this.generateLockfile(),n=$l(r,i);n!==r&&(await K.writeFilePromise(e,n),this.lockFileChecksum=A$(n),this.lockfileNeedsRefresh=!1)}async persistInstallStateFile(){let e=[];for(let o of Object.values(sF))e.push(...o);let r=(0,p0.default)(this,e),i=iF.default.serialize(r),n=ln(i);if(this.installStateChecksum===n)return;let s=this.configuration.get(\"installStatePath\");await K.mkdirPromise(k.dirname(s),{recursive:!0}),await K.writeFilePromise(s,await Z1e(i)),this.installStateChecksum=n}async restoreInstallState({restoreInstallersCustomData:e=!0,restoreResolutions:r=!0,restoreBuildState:i=!0}={}){let n=this.configuration.get(\"installStatePath\"),s;try{let o=await $1e(await K.readFilePromise(n));s=iF.default.deserialize(o),this.installStateChecksum=ln(o)}catch{r&&await this.applyLightResolution();return}e&&typeof s.installersCustomData!=\"undefined\"&&(this.installersCustomData=s.installersCustomData),i&&Object.assign(this,(0,p0.default)(s,sF.restoreBuildState)),r&&(s.lockFileChecksum===this.lockFileChecksum?(Object.assign(this,(0,p0.default)(s,sF.restoreResolutions)),this.refreshWorkspaceDependencies()):await this.applyLightResolution())}async applyLightResolution(){await this.resolveEverything({lockfileOnly:!0,report:new pi}),await this.persistInstallStateFile()}async persist(){await this.persistLockfile();for(let e of this.workspacesByCwd.values())await e.persistManifest()}async cacheCleanup({cache:e,report:r}){let i=new Set([\".gitignore\"]);if(!Cx(e.cwd,this.cwd)||!await K.existsPromise(e.cwd))return;let n=this.configuration.get(\"preferAggregateCacheInfo\"),s=0,o=null;for(let a of await K.readdirPromise(e.cwd)){if(i.has(a))continue;let l=k.resolve(e.cwd,a);e.markedFiles.has(l)||(o=a,e.immutable?r.reportError($.IMMUTABLE_CACHE,`${et(this.configuration,k.basename(l),\"magenta\")} appears to be unused and would be marked for deletion, but the cache is immutable`):(n?s+=1:r.reportInfo($.UNUSED_CACHE_ENTRY,`${et(this.configuration,k.basename(l),\"magenta\")} appears to be unused - removing`),await K.removePromise(l)))}n&&s!==0&&r.reportInfo($.UNUSED_CACHE_ENTRY,s>1?`${s} packages appeared to be unused and were removed`:`${o} appeared to be unused and was removed`),e.markedFiles.clear()}};function eUe({project:t,allDescriptors:e,allResolutions:r,allPackages:i,accessibleLocators:n=new Set,optionalBuilds:s=new Set,peerRequirements:o=new Map,volatileDescriptors:a=new Set,report:l,tolerateMissingPackages:c=!1}){var ee;let u=new Map,g=[],f=new Map,h=new Map,p=new Map,m=new Map,y=new Map,Q=new Map(t.workspaces.map(Z=>{let A=Z.anchoredLocator.locatorHash,ne=i.get(A);if(typeof ne==\"undefined\"){if(c)return[A,null];throw new Error(\"Assertion failed: The workspace should have an associated package\")}return[A,cd(ne)]})),S=()=>{let Z=K.mktempSync(),A=k.join(Z,\"stacktrace.log\"),ne=String(g.length+1).length,le=g.map((Ae,T)=>`${`${T+1}.`.padStart(ne,\" \")} ${Ps(Ae)}\n`).join(\"\");throw K.writeFileSync(A,le),K.detachTemp(Z),new ct($.STACK_OVERFLOW_RESOLUTION,`Encountered a stack overflow when resolving peer dependencies; cf ${j.fromPortablePath(A)}`)},x=Z=>{let A=r.get(Z.descriptorHash);if(typeof A==\"undefined\")throw new Error(\"Assertion failed: The resolution should have been registered\");let ne=i.get(A);if(!ne)throw new Error(\"Assertion failed: The package could not be found\");return ne},M=(Z,A,ne,{top:le,optional:Ae})=>{g.length>1e3&&S(),g.push(A);let T=Y(Z,A,ne,{top:le,optional:Ae});return g.pop(),T},Y=(Z,A,ne,{top:le,optional:Ae})=>{if(n.has(A.locatorHash))return;n.add(A.locatorHash),Ae||s.delete(A.locatorHash);let T=i.get(A.locatorHash);if(!T){if(c)return;throw new Error(`Assertion failed: The package (${Bt(t.configuration,A)}) should have been registered`)}let L=[],Ee=[],we=[],qe=[],re=[];for(let Qe of Array.from(T.dependencies.values())){if(T.peerDependencies.has(Qe.identHash)&&T.locatorHash!==le)continue;if(il(Qe))throw new Error(\"Assertion failed: Virtual packages shouldn't be encountered when virtualizing a branch\");a.delete(Qe.descriptorHash);let he=Ae;if(!he){let be=T.dependenciesMeta.get(Ot(Qe));if(typeof be!=\"undefined\"){let ce=be.get(null);typeof ce!=\"undefined\"&&ce.optional&&(he=!0)}}let Fe=r.get(Qe.descriptorHash);if(!Fe){if(c)continue;throw new Error(`Assertion failed: The resolution (${sr(t.configuration,Qe)}) should have been registered`)}let Ue=Q.get(Fe)||i.get(Fe);if(!Ue)throw new Error(`Assertion failed: The package (${Fe}, resolved from ${sr(t.configuration,Qe)}) should have been registered`);if(Ue.peerDependencies.size===0){M(Qe,Ue,new Map,{top:le,optional:he});continue}let xe,ve,pe=new Set,X;Ee.push(()=>{xe=sx(Qe,A.locatorHash),ve=ox(Ue,A.locatorHash),T.dependencies.delete(Qe.identHash),T.dependencies.set(xe.identHash,xe),r.set(xe.descriptorHash,ve.locatorHash),e.set(xe.descriptorHash,xe),i.set(ve.locatorHash,ve),L.push([Ue,xe,ve])}),we.push(()=>{var be;X=new Map;for(let ce of ve.peerDependencies.values()){let fe=T.dependencies.get(ce.identHash);if(!fe&&fd(A,ce)&&(Z.identHash===A.identHash?fe=Z:(fe=rr(A,Z.range),e.set(fe.descriptorHash,fe),r.set(fe.descriptorHash,A.locatorHash),a.delete(fe.descriptorHash))),(!fe||fe.range===\"missing:\")&&ve.dependencies.has(ce.identHash)){ve.peerDependencies.delete(ce.identHash);continue}fe||(fe=rr(ce,\"missing:\")),ve.dependencies.set(fe.identHash,fe),il(fe)&&kc(p,fe.descriptorHash).add(ve.locatorHash),f.set(fe.identHash,fe),fe.range===\"missing:\"&&pe.add(fe.identHash),X.set(ce.identHash,(be=ne.get(ce.identHash))!=null?be:ve.locatorHash)}ve.dependencies=new Map(xn(ve.dependencies,([ce,fe])=>Ot(fe)))}),qe.push(()=>{if(!i.has(ve.locatorHash))return;let be=u.get(Ue.locatorHash);typeof be==\"number\"&&be>=2&&S();let ce=u.get(Ue.locatorHash),fe=typeof ce!=\"undefined\"?ce+1:1;u.set(Ue.locatorHash,fe),M(xe,ve,X,{top:le,optional:he}),u.set(Ue.locatorHash,fe-1)}),re.push(()=>{let be=T.dependencies.get(Qe.identHash);if(typeof be==\"undefined\")throw new Error(\"Assertion failed: Expected the peer dependency to have been turned into a dependency\");let ce=r.get(be.descriptorHash);if(typeof ce==\"undefined\")throw new Error(\"Assertion failed: Expected the descriptor to be registered\");if(kc(y,ce).add(A.locatorHash),!!i.has(ve.locatorHash)){for(let fe of ve.peerDependencies.values()){let gt=X.get(fe.identHash);if(typeof gt==\"undefined\")throw new Error(\"Assertion failed: Expected the peer dependency ident to be registered\");kg(xg(m,gt),Ot(fe)).push(ve.locatorHash)}for(let fe of pe)ve.dependencies.delete(fe)}})}for(let Qe of[...Ee,...we])Qe();let se;do{se=!0;for(let[Qe,he,Fe]of L){let Ue=xg(h,Qe.locatorHash),xe=ln(...[...Fe.dependencies.values()].map(be=>{let ce=be.range!==\"missing:\"?r.get(be.descriptorHash):\"missing:\";if(typeof ce==\"undefined\")throw new Error(`Assertion failed: Expected the resolution for ${sr(t.configuration,be)} to have been registered`);return ce===le?`${ce} (top)`:ce}),he.identHash),ve=Ue.get(xe);if(typeof ve==\"undefined\"){Ue.set(xe,he);continue}if(ve===he)continue;i.delete(Fe.locatorHash),e.delete(he.descriptorHash),r.delete(he.descriptorHash),n.delete(Fe.locatorHash);let pe=p.get(he.descriptorHash)||[],X=[T.locatorHash,...pe];p.delete(he.descriptorHash);for(let be of X){let ce=i.get(be);typeof ce!=\"undefined\"&&(ce.dependencies.get(he.identHash).descriptorHash!==ve.descriptorHash&&(se=!1),ce.dependencies.set(he.identHash,ve))}}}while(!se);for(let Qe of[...qe,...re])Qe()};for(let Z of t.workspaces){let A=Z.anchoredLocator;a.delete(Z.anchoredDescriptor.descriptorHash),M(Z.anchoredDescriptor,A,new Map,{top:A.locatorHash,optional:!1})}var U;(function(ne){ne[ne.NotProvided=0]=\"NotProvided\",ne[ne.NotCompatible=1]=\"NotCompatible\"})(U||(U={}));let J=[];for(let[Z,A]of y){let ne=i.get(Z);if(typeof ne==\"undefined\")throw new Error(\"Assertion failed: Expected the root to be registered\");let le=m.get(Z);if(typeof le!=\"undefined\")for(let Ae of A){let T=i.get(Ae);if(typeof T!=\"undefined\")for(let[L,Ee]of le){let we=An(L);if(T.peerDependencies.has(we.identHash))continue;let qe=`p${ln(Ae,L,Z).slice(0,5)}`;o.set(qe,{subject:Ae,requested:we,rootRequester:Z,allRequesters:Ee});let re=ne.dependencies.get(we.identHash);if(typeof re!=\"undefined\"){let se=x(re),Qe=(ee=se.version)!=null?ee:\"0.0.0\",he=new Set;for(let Ue of Ee){let xe=i.get(Ue);if(typeof xe==\"undefined\")throw new Error(\"Assertion failed: Expected the link to be registered\");let ve=xe.peerDependencies.get(we.identHash);if(typeof ve==\"undefined\")throw new Error(\"Assertion failed: Expected the ident to be registered\");he.add(ve.range)}[...he].every(Ue=>{if(Ue.startsWith(si.protocol)){if(!t.tryWorkspaceByLocator(se))return!1;Ue=Ue.slice(si.protocol.length),(Ue===\"^\"||Ue===\"~\")&&(Ue=\"*\")}return Uc(Qe,Ue)})||J.push({type:1,subject:T,requested:we,requester:ne,version:Qe,hash:qe,requirementCount:Ee.length})}else{let se=ne.peerDependenciesMeta.get(L);(se==null?void 0:se.optional)||J.push({type:0,subject:T,requested:we,requester:ne,hash:qe})}}}}let W=[Z=>Ax(Z.subject),Z=>Ot(Z.requested),Z=>`${Z.type}`];l==null||l.startSectionSync({reportFooter:()=>{l.reportWarning($.UNNAMED,`Some peer dependencies are incorrectly met; run ${et(t.configuration,\"yarn explain peer-requirements <hash>\",Ge.CODE)} for details, where ${et(t.configuration,\"<hash>\",Ge.CODE)} is the six-letter p-prefixed code`)},skipIfEmpty:!0},()=>{for(let Z of xn(J,W))switch(Z.type){case 0:l.reportWarning($.MISSING_PEER_DEPENDENCY,`${Bt(t.configuration,Z.subject)} doesn't provide ${gi(t.configuration,Z.requested)} (${et(t.configuration,Z.hash,Ge.CODE)}), requested by ${gi(t.configuration,Z.requester)}`);break;case 1:{let A=Z.requirementCount>1?\"and some of its descendants request\":\"requests\";l.reportWarning($.INCOMPATIBLE_PEER_DEPENDENCY,`${Bt(t.configuration,Z.subject)} provides ${gi(t.configuration,Z.requested)} (${et(t.configuration,Z.hash,Ge.CODE)}) with version ${dd(t.configuration,Z.version)}, which doesn't satisfy what ${gi(t.configuration,Z.requester)} ${A}`)}break}})}var aa;(function(l){l.VERSION=\"version\",l.COMMAND_NAME=\"commandName\",l.PLUGIN_NAME=\"pluginName\",l.INSTALL_COUNT=\"installCount\",l.PROJECT_COUNT=\"projectCount\",l.WORKSPACE_COUNT=\"workspaceCount\",l.DEPENDENCY_COUNT=\"dependencyCount\",l.EXTENSION=\"packageExtension\"})(aa||(aa={}));var bC=class{constructor(e,r){this.values=new Map;this.hits=new Map;this.enumerators=new Map;this.configuration=e;let i=this.getRegistryPath();this.isNew=!K.existsSync(i),this.sendReport(r),this.startBuffer()}reportVersion(e){this.reportValue(aa.VERSION,e.replace(/-git\\..*/,\"-git\"))}reportCommandName(e){this.reportValue(aa.COMMAND_NAME,e||\"<none>\")}reportPluginName(e){this.reportValue(aa.PLUGIN_NAME,e)}reportProject(e){this.reportEnumerator(aa.PROJECT_COUNT,e)}reportInstall(e){this.reportHit(aa.INSTALL_COUNT,e)}reportPackageExtension(e){this.reportValue(aa.EXTENSION,e)}reportWorkspaceCount(e){this.reportValue(aa.WORKSPACE_COUNT,String(e))}reportDependencyCount(e){this.reportValue(aa.DEPENDENCY_COUNT,String(e))}reportValue(e,r){kc(this.values,e).add(r)}reportEnumerator(e,r){kc(this.enumerators,e).add(ln(r))}reportHit(e,r=\"*\"){let i=xg(this.hits,e),n=qa(i,r,()=>0);i.set(r,n+1)}getRegistryPath(){let e=this.configuration.get(\"globalFolder\");return k.join(e,\"telemetry.json\")}sendReport(e){var u,g,f;let r=this.getRegistryPath(),i;try{i=K.readJsonSync(r)}catch{i={}}let n=Date.now(),s=this.configuration.get(\"telemetryInterval\")*24*60*60*1e3,a=((u=i.lastUpdate)!=null?u:n+s+Math.floor(s*Math.random()))+s;if(a>n&&i.lastUpdate!=null)return;try{K.mkdirSync(k.dirname(r),{recursive:!0}),K.writeJsonSync(r,{lastUpdate:n})}catch{return}if(a>n||!i.blocks)return;let l=`https://browser-http-intake.logs.datadoghq.eu/v1/input/${e}?ddsource=yarn`,c=h=>VP(l,h,{configuration:this.configuration}).catch(()=>{});for(let[h,p]of Object.entries((g=i.blocks)!=null?g:{})){if(Object.keys(p).length===0)continue;let m=p;m.userId=h,m.reportType=\"primary\";for(let S of Object.keys((f=m.enumerators)!=null?f:{}))m.enumerators[S]=m.enumerators[S].length;c(m);let y=new Map,Q=20;for(let[S,x]of Object.entries(m.values))x.length>0&&y.set(S,x.slice(0,Q));for(;y.size>0;){let S={};S.userId=h,S.reportType=\"secondary\",S.metrics={};for(let[x,M]of y)S.metrics[x]=M.shift(),M.length===0&&y.delete(x);c(S)}}}applyChanges(){var o,a,l,c,u,g,f,h,p;let e=this.getRegistryPath(),r;try{r=K.readJsonSync(e)}catch{r={}}let i=(o=this.configuration.get(\"telemetryUserId\"))!=null?o:\"*\",n=r.blocks=(a=r.blocks)!=null?a:{},s=n[i]=(l=n[i])!=null?l:{};for(let m of this.hits.keys()){let y=s.hits=(c=s.hits)!=null?c:{},Q=y[m]=(u=y[m])!=null?u:{};for(let[S,x]of this.hits.get(m))Q[S]=((g=Q[S])!=null?g:0)+x}for(let m of[\"values\",\"enumerators\"])for(let y of this[m].keys()){let Q=s[m]=(f=s[m])!=null?f:{};Q[y]=[...new Set([...(h=Q[y])!=null?h:[],...(p=this[m].get(y))!=null?p:[]])]}K.mkdirSync(k.dirname(e),{recursive:!0}),K.writeJsonSync(e,r)}startBuffer(){process.on(\"exit\",()=>{try{this.applyChanges()}catch{}})}};var oF=ge(require(\"child_process\")),l$=ge(hc());var aF=ge(require(\"fs\"));var Nf=new Map([[\"constraints\",[[\"constraints\",\"query\"],[\"constraints\",\"source\"],[\"constraints\"]]],[\"exec\",[]],[\"interactive-tools\",[[\"search\"],[\"upgrade-interactive\"]]],[\"stage\",[[\"stage\"]]],[\"typescript\",[]],[\"version\",[[\"version\",\"apply\"],[\"version\",\"check\"],[\"version\"]]],[\"workspace-tools\",[[\"workspaces\",\"focus\"],[\"workspaces\",\"foreach\"]]]]);function tUe(t){let e=j.fromPortablePath(t);process.on(\"SIGINT\",()=>{}),e?(0,oF.execFileSync)(process.execPath,[e,...process.argv.slice(2)],{stdio:\"inherit\",env:te(N({},process.env),{YARN_IGNORE_PATH:\"1\",YARN_IGNORE_CWD:\"1\"})}):(0,oF.execFileSync)(e,process.argv.slice(2),{stdio:\"inherit\",env:te(N({},process.env),{YARN_IGNORE_PATH:\"1\",YARN_IGNORE_CWD:\"1\"})})}async function d0({binaryVersion:t,pluginConfiguration:e}){async function r(){let n=new Is({binaryLabel:\"Yarn Package Manager\",binaryName:\"yarn\",binaryVersion:t});try{await i(n)}catch(s){process.stdout.write(n.error(s)),process.exitCode=1}}async function i(n){var m,y,Q,S,x;let s=process.versions.node,o=\">=12 <14 || 14.2 - 14.9 || >14.10.0\";if(!Se.parseOptionalBoolean(process.env.YARN_IGNORE_NODE)&&!Wt.satisfiesWithPrereleases(s,o))throw new Pe(`This tool requires a Node version compatible with ${o} (got ${s}). Upgrade Node, or set \\`YARN_IGNORE_NODE=1\\` in your environment.`);let l=await ye.find(j.toPortablePath(process.cwd()),e,{usePath:!0,strict:!1}),c=l.get(\"yarnPath\"),u=l.get(\"ignorePath\"),g=l.get(\"ignoreCwd\"),f=j.toPortablePath(j.resolve(process.argv[1])),h=M=>K.readFilePromise(M).catch(()=>Buffer.of());if(!u&&!g&&await(async()=>c===f||Buffer.compare(...await Promise.all([h(c),h(f)]))===0)()){process.env.YARN_IGNORE_PATH=\"1\",process.env.YARN_IGNORE_CWD=\"1\",await i(n);return}else if(c!==null&&!u)if(!K.existsSync(c))process.stdout.write(n.error(new Error(`The \"yarn-path\" option has been set (in ${l.sources.get(\"yarnPath\")}), but the specified location doesn't exist (${c}).`))),process.exitCode=1;else try{tUe(c)}catch(M){process.exitCode=M.code||1}else{u&&delete process.env.YARN_IGNORE_PATH,l.get(\"enableTelemetry\")&&!l$.isCI&&process.stdout.isTTY&&(ye.telemetry=new bC(l,\"puba9cdc10ec5790a2cf4969dd413a47270\")),(m=ye.telemetry)==null||m.reportVersion(t);for(let[J,W]of l.plugins.entries()){Nf.has((Q=(y=J.match(/^@yarnpkg\\/plugin-(.*)$/))==null?void 0:y[1])!=null?Q:\"\")&&((S=ye.telemetry)==null||S.reportPluginName(J));for(let ee of W.commands||[])n.register(ee)}let Y=n.process(process.argv.slice(2));Y.help||(x=ye.telemetry)==null||x.reportCommandName(Y.path.join(\" \"));let U=Y.cwd;if(typeof U!=\"undefined\"&&!g){let J=(0,aF.realpathSync)(process.cwd()),W=(0,aF.realpathSync)(U);if(J!==W){process.chdir(U),await r();return}}await n.runExit(Y,{cwd:j.toPortablePath(process.cwd()),plugins:e,quiet:!1,stdin:process.stdin,stdout:process.stdout,stderr:process.stderr})}}return r().catch(n=>{process.stdout.write(n.stack||n.message),process.exitCode=1}).finally(()=>K.rmtempPromise())}function c$(t){t.Command.Path=(...e)=>r=>{r.paths=r.paths||[],r.paths.push(e)};for(let e of[\"Array\",\"Boolean\",\"String\",\"Proxy\",\"Rest\",\"Counter\"])t.Command[e]=(...r)=>(i,n)=>{let s=t.Option[e](...r);Object.defineProperty(i,`__${n}`,{configurable:!1,enumerable:!0,get(){return s},set(o){this[n]=o}})};return t}var _C={};ft(_C,{BaseCommand:()=>Le,WorkspaceRequiredError:()=>ht,getDynamicLibs:()=>bie,getPluginConfiguration:()=>J0,main:()=>d0,openWorkspace:()=>Jf,pluginCommands:()=>Nf});var Le=class extends Re{constructor(){super(...arguments);this.cwd=z.String(\"--cwd\",{hidden:!0})}};var ht=class extends Pe{constructor(e,r){let i=k.relative(e,r),n=k.join(e,At.fileName);super(`This command can only be run from within a workspace of your project (${i} isn't a workspace of ${n}).`)}};var aqe=ge(ti());Es();var Aqe=ge(sN()),bie=()=>new Map([[\"@yarnpkg/cli\",_C],[\"@yarnpkg/core\",QC],[\"@yarnpkg/fslib\",Zh],[\"@yarnpkg/libzip\",Md],[\"@yarnpkg/parsers\",op],[\"@yarnpkg/shell\",Kd],[\"clipanion\",Cp],[\"semver\",aqe],[\"typanion\",sg],[\"yup\",Aqe]]);async function Jf(t,e){let{project:r,workspace:i}=await ze.find(t,e);if(!i)throw new ht(r.cwd,e);return i}var S9e=ge(ti());Es();var k9e=ge(sN());var AL={};ft(AL,{dedupeUtils:()=>HN,default:()=>Ize,suggestUtils:()=>kN});var vAe=ge(hc());var Fse=ge($C());Es();var kN={};ft(kN,{Modifier:()=>ga,Strategy:()=>_r,Target:()=>Hr,WorkspaceModifier:()=>Vf,applyModifier:()=>xse,extractDescriptorFromPath:()=>DN,extractRangeModifier:()=>kse,fetchDescriptorFrom:()=>PN,findProjectDescriptors:()=>Rse,getModifier:()=>em,getSuggestedDescriptors:()=>tm,makeWorkspaceDescriptor:()=>Dse,toWorkspaceModifier:()=>Pse});var xN=ge(ti()),vJe=\"workspace:\",Hr;(function(i){i.REGULAR=\"dependencies\",i.DEVELOPMENT=\"devDependencies\",i.PEER=\"peerDependencies\"})(Hr||(Hr={}));var ga;(function(i){i.CARET=\"^\",i.TILDE=\"~\",i.EXACT=\"\"})(ga||(ga={}));var Vf;(function(i){i.CARET=\"^\",i.TILDE=\"~\",i.EXACT=\"*\"})(Vf||(Vf={}));var _r;(function(s){s.KEEP=\"keep\",s.REUSE=\"reuse\",s.PROJECT=\"project\",s.LATEST=\"latest\",s.CACHE=\"cache\"})(_r||(_r={}));function em(t,e){return t.exact?ga.EXACT:t.caret?ga.CARET:t.tilde?ga.TILDE:e.configuration.get(\"defaultSemverRangePrefix\")}var SJe=/^([\\^~]?)[0-9]+(?:\\.[0-9]+){0,2}(?:-\\S+)?$/;function kse(t,{project:e}){let r=t.match(SJe);return r?r[1]:e.configuration.get(\"defaultSemverRangePrefix\")}function xse(t,e){let{protocol:r,source:i,params:n,selector:s}=P.parseRange(t.range);return xN.default.valid(s)&&(s=`${e}${t.range}`),P.makeDescriptor(t,P.makeRange({protocol:r,source:i,params:n,selector:s}))}function Pse(t){switch(t){case ga.CARET:return Vf.CARET;case ga.TILDE:return Vf.TILDE;case ga.EXACT:return Vf.EXACT;default:throw new Error(`Assertion failed: Unknown modifier: \"${t}\"`)}}function Dse(t,e){return P.makeDescriptor(t.anchoredDescriptor,`${vJe}${Pse(e)}`)}async function Rse(t,{project:e,target:r}){let i=new Map,n=s=>{let o=i.get(s.descriptorHash);return o||i.set(s.descriptorHash,o={descriptor:s,locators:[]}),o};for(let s of e.workspaces)if(r===Hr.PEER){let o=s.manifest.peerDependencies.get(t.identHash);o!==void 0&&n(o).locators.push(s.locator)}else{let o=s.manifest.dependencies.get(t.identHash),a=s.manifest.devDependencies.get(t.identHash);r===Hr.DEVELOPMENT?a!==void 0?n(a).locators.push(s.locator):o!==void 0&&n(o).locators.push(s.locator):o!==void 0?n(o).locators.push(s.locator):a!==void 0&&n(a).locators.push(s.locator)}return i}async function DN(t,{cwd:e,workspace:r}){return await kJe(async i=>{k.isAbsolute(t)||(t=k.relative(r.cwd,k.resolve(e,t)),t.match(/^\\.{0,2}\\//)||(t=`./${t}`));let{project:n}=r,s=await PN(P.makeIdent(null,\"archive\"),t,{project:r.project,cache:i,workspace:r});if(!s)throw new Error(\"Assertion failed: The descriptor should have been found\");let o=new pi,a=n.configuration.makeResolver(),l=n.configuration.makeFetcher(),c={checksums:n.storedChecksums,project:n,cache:i,fetcher:l,report:o,resolver:a},u=a.bindDescriptor(s,r.anchoredLocator,c),g=P.convertDescriptorToLocator(u),f=await l.fetch(g,c),h=await At.find(f.prefixPath,{baseFs:f.packageFs});if(!h.name)throw new Error(\"Target path doesn't have a name\");return P.makeDescriptor(h.name,t)})}async function tm(t,{project:e,workspace:r,cache:i,target:n,modifier:s,strategies:o,maxResults:a=Infinity}){if(!(a>=0))throw new Error(`Invalid maxResults (${a})`);if(t.range!==\"unknown\")return{suggestions:[{descriptor:t,name:`Use ${P.prettyDescriptor(e.configuration,t)}`,reason:\"(unambiguous explicit request)\"}],rejections:[]};let l=typeof r!=\"undefined\"&&r!==null&&r.manifest[n].get(t.identHash)||null,c=[],u=[],g=async f=>{try{await f()}catch(h){u.push(h)}};for(let f of o){if(c.length>=a)break;switch(f){case _r.KEEP:await g(async()=>{l&&c.push({descriptor:l,name:`Keep ${P.prettyDescriptor(e.configuration,l)}`,reason:\"(no changes)\"})});break;case _r.REUSE:await g(async()=>{for(let{descriptor:h,locators:p}of(await Rse(t,{project:e,target:n})).values()){if(p.length===1&&p[0].locatorHash===r.anchoredLocator.locatorHash&&o.includes(_r.KEEP))continue;let m=`(originally used by ${P.prettyLocator(e.configuration,p[0])}`;m+=p.length>1?` and ${p.length-1} other${p.length>2?\"s\":\"\"})`:\")\",c.push({descriptor:h,name:`Reuse ${P.prettyDescriptor(e.configuration,h)}`,reason:m})}});break;case _r.CACHE:await g(async()=>{for(let h of e.storedDescriptors.values())h.identHash===t.identHash&&c.push({descriptor:h,name:`Reuse ${P.prettyDescriptor(e.configuration,h)}`,reason:\"(already used somewhere in the lockfile)\"})});break;case _r.PROJECT:await g(async()=>{if(r.manifest.name!==null&&t.identHash===r.manifest.name.identHash)return;let h=e.tryWorkspaceByIdent(t);if(h===null)return;let p=Dse(h,s);c.push({descriptor:p,name:`Attach ${P.prettyDescriptor(e.configuration,p)}`,reason:`(local workspace at ${ae.pretty(e.configuration,h.relativeCwd,ae.Type.PATH)})`})});break;case _r.LATEST:await g(async()=>{if(t.range!==\"unknown\")c.push({descriptor:t,name:`Use ${P.prettyRange(e.configuration,t.range)}`,reason:\"(explicit range requested)\"});else if(n===Hr.PEER)c.push({descriptor:P.makeDescriptor(t,\"*\"),name:\"Use *\",reason:\"(catch-all peer dependency pattern)\"});else if(!e.configuration.get(\"enableNetwork\"))c.push({descriptor:null,name:\"Resolve from latest\",reason:ae.pretty(e.configuration,\"(unavailable because enableNetwork is toggled off)\",\"grey\")});else{let h=await PN(t,\"latest\",{project:e,cache:i,workspace:r,preserveModifier:!1});h&&(h=xse(h,s),c.push({descriptor:h,name:`Use ${P.prettyDescriptor(e.configuration,h)}`,reason:\"(resolved from latest)\"}))}});break}}return{suggestions:c.slice(0,a),rejections:u.slice(0,a)}}async function PN(t,e,{project:r,cache:i,workspace:n,preserveModifier:s=!0}){let o=P.makeDescriptor(t,e),a=new pi,l=r.configuration.makeFetcher(),c=r.configuration.makeResolver(),u={project:r,fetcher:l,cache:i,checksums:r.storedChecksums,report:a,cacheOptions:{skipIntegrityCheck:!0},skipIntegrityCheck:!0},g=te(N({},u),{resolver:c,fetchOptions:u}),f=c.bindDescriptor(o,n.anchoredLocator,g),h=await c.getCandidates(f,new Map,g);if(h.length===0)return null;let p=h[0],{protocol:m,source:y,params:Q,selector:S}=P.parseRange(P.convertToManifestRange(p.reference));if(m===r.configuration.get(\"defaultProtocol\")&&(m=null),xN.default.valid(S)&&s!==!1){let x=typeof s==\"string\"?s:o.range;S=kse(x,{project:r})+S}return P.makeDescriptor(p,P.makeRange({protocol:m,source:y,params:Q,selector:S}))}async function kJe(t){return await K.mktempPromise(async e=>{let r=ye.create(e);return r.useWithSource(e,{enableMirror:!1,compressionLevel:0},e,{overwrite:!0}),await t(new Nt(e,{configuration:r,check:!1,immutable:!1}))})}var rm=class extends Le{constructor(){super(...arguments);this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.exact=z.Boolean(\"-E,--exact\",!1,{description:\"Don't use any semver modifier on the resolved range\"});this.tilde=z.Boolean(\"-T,--tilde\",!1,{description:\"Use the `~` semver modifier on the resolved range\"});this.caret=z.Boolean(\"-C,--caret\",!1,{description:\"Use the `^` semver modifier on the resolved range\"});this.dev=z.Boolean(\"-D,--dev\",!1,{description:\"Add a package as a dev dependency\"});this.peer=z.Boolean(\"-P,--peer\",!1,{description:\"Add a package as a peer dependency\"});this.optional=z.Boolean(\"-O,--optional\",!1,{description:\"Add / upgrade a package to an optional regular / peer dependency\"});this.preferDev=z.Boolean(\"--prefer-dev\",!1,{description:\"Add / upgrade a package to a dev dependency\"});this.interactive=z.Boolean(\"-i,--interactive\",{description:\"Reuse the specified package from other workspaces in the project\"});this.cached=z.Boolean(\"--cached\",!1,{description:\"Reuse the highest version already used somewhere within the project\"});this.mode=z.String(\"--mode\",{description:\"Change what artifacts installs generate\",validator:nn(di)});this.silent=z.Boolean(\"--silent\",{hidden:!0});this.packages=z.Rest()}async execute(){var m;let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n=await Nt.find(e);if(!i)throw new ht(r.cwd,this.context.cwd);await r.restoreInstallState({restoreResolutions:!1});let s=(m=this.interactive)!=null?m:e.get(\"preferInteractive\"),o=em(this,r),a=[...s?[_r.REUSE]:[],_r.PROJECT,...this.cached?[_r.CACHE]:[],_r.LATEST],l=s?Infinity:1,c=await Promise.all(this.packages.map(async y=>{let Q=y.match(/^\\.{0,2}\\//)?await DN(y,{cwd:this.context.cwd,workspace:i}):P.tryParseDescriptor(y),S=y.match(/^(https?:|git@github)/);if(S)throw new Pe(`It seems you are trying to add a package using a ${ae.pretty(e,`${S[0]}...`,Di.RANGE)} url; we now require package names to be explicitly specified.\nTry running the command again with the package name prefixed: ${ae.pretty(e,\"yarn add\",Di.CODE)} ${ae.pretty(e,P.makeDescriptor(P.makeIdent(null,\"my-package\"),`${S[0]}...`),Di.DESCRIPTOR)}`);if(!Q)throw new Pe(`The ${ae.pretty(e,y,Di.CODE)} string didn't match the required format (package-name@range). Did you perhaps forget to explicitly reference the package name?`);let x=xJe(i,Q,{dev:this.dev,peer:this.peer,preferDev:this.preferDev,optional:this.optional}),M=await tm(Q,{project:r,workspace:i,cache:n,target:x,modifier:o,strategies:a,maxResults:l});return[Q,M,x]})),u=await uA.start({configuration:e,stdout:this.context.stdout,suggestInstall:!1},async y=>{for(let[Q,{suggestions:S,rejections:x}]of c)if(S.filter(Y=>Y.descriptor!==null).length===0){let[Y]=x;if(typeof Y==\"undefined\")throw new Error(\"Assertion failed: Expected an error to have been set\");r.configuration.get(\"enableNetwork\")?y.reportError($.CANT_SUGGEST_RESOLUTIONS,`${P.prettyDescriptor(e,Q)} can't be resolved to a satisfying range`):y.reportError($.CANT_SUGGEST_RESOLUTIONS,`${P.prettyDescriptor(e,Q)} can't be resolved to a satisfying range (note: network resolution has been disabled)`),y.reportSeparator(),y.reportExceptionOnce(Y)}});if(u.hasErrors())return u.exitCode();let g=!1,f=[],h=[];for(let[,{suggestions:y},Q]of c){let S,x=y.filter(J=>J.descriptor!==null),M=x[0].descriptor,Y=x.every(J=>P.areDescriptorsEqual(J.descriptor,M));x.length===1||Y?S=M:(g=!0,{answer:S}=await(0,Fse.prompt)({type:\"select\",name:\"answer\",message:\"Which range do you want to use?\",choices:y.map(({descriptor:J,name:W,reason:ee})=>J?{name:W,hint:ee,descriptor:J}:{name:W,hint:ee,disabled:!0}),onCancel:()=>process.exit(130),result(J){return this.find(J,\"descriptor\")},stdin:this.context.stdin,stdout:this.context.stdout}));let U=i.manifest[Q].get(S.identHash);(typeof U==\"undefined\"||U.descriptorHash!==S.descriptorHash)&&(i.manifest[Q].set(S.identHash,S),this.optional&&(Q===\"dependencies\"?i.manifest.ensureDependencyMeta(te(N({},S),{range:\"unknown\"})).optional=!0:Q===\"peerDependencies\"&&(i.manifest.ensurePeerDependencyMeta(te(N({},S),{range:\"unknown\"})).optional=!0)),typeof U==\"undefined\"?f.push([i,Q,S,a]):h.push([i,Q,U,S]))}return await e.triggerMultipleHooks(y=>y.afterWorkspaceDependencyAddition,f),await e.triggerMultipleHooks(y=>y.afterWorkspaceDependencyReplacement,h),g&&this.context.stdout.write(`\n`),(await Je.start({configuration:e,json:this.json,stdout:this.context.stdout,includeLogs:!this.context.quiet},async y=>{await r.install({cache:n,report:y,mode:this.mode})})).exitCode()}};rm.paths=[[\"add\"]],rm.usage=Re.Usage({description:\"add dependencies to the project\",details:\"\\n      This command adds a package to the package.json for the nearest workspace.\\n\\n      - If it didn't exist before, the package will by default be added to the regular `dependencies` field, but this behavior can be overriden thanks to the `-D,--dev` flag (which will cause the dependency to be added to the `devDependencies` field instead) and the `-P,--peer` flag (which will do the same but for `peerDependencies`).\\n\\n      - If the package was already listed in your dependencies, it will by default be upgraded whether it's part of your `dependencies` or `devDependencies` (it won't ever update `peerDependencies`, though).\\n\\n      - If set, the `--prefer-dev` flag will operate as a more flexible `-D,--dev` in that it will add the package to your `devDependencies` if it isn't already listed in either `dependencies` or `devDependencies`, but it will also happily upgrade your `dependencies` if that's what you already use (whereas `-D,--dev` would throw an exception).\\n\\n      - If set, the `-O,--optional` flag will add the package to the `optionalDependencies` field and, in combination with the `-P,--peer` flag, it will add the package as an optional peer dependency. If the package was already listed in your `dependencies`, it will be upgraded to `optionalDependencies`. If the package was already listed in your `peerDependencies`, in combination with the `-P,--peer` flag, it will be upgraded to an optional peer dependency: `\\\"peerDependenciesMeta\\\": { \\\"<package>\\\": { \\\"optional\\\": true } }`\\n\\n      - If the added package doesn't specify a range at all its `latest` tag will be resolved and the returned version will be used to generate a new semver range (using the `^` modifier by default unless otherwise configured via the `defaultSemverRangePrefix` configuration, or the `~` modifier if `-T,--tilde` is specified, or no modifier at all if `-E,--exact` is specified). Two exceptions to this rule: the first one is that if the package is a workspace then its local version will be used, and the second one is that if you use `-P,--peer` the default range will be `*` and won't be resolved at all.\\n\\n      - If the added package specifies a range (such as `^1.0.0`, `latest`, or `rc`), Yarn will add this range as-is in the resulting package.json entry (in particular, tags such as `rc` will be encoded as-is rather than being converted into a semver range).\\n\\n      If the `--cached` option is used, Yarn will preferably reuse the highest version already used somewhere within the project, even if through a transitive dependency.\\n\\n      If the `-i,--interactive` option is used (or if the `preferInteractive` settings is toggled on) the command will first try to check whether other workspaces in the project use the specified package and, if so, will offer to reuse them.\\n\\n      If the `--mode=<mode>` option is set, Yarn will change which artifacts are generated. The modes currently supported are:\\n\\n      - `skip-build` will not run the build scripts at all. Note that this is different from setting `enableScripts` to false because the later will disable build scripts, and thus affect the content of the artifacts generated on disk, whereas the former will just disable the build step - but not the scripts themselves, which just won't run.\\n\\n      - `update-lockfile` will skip the link step altogether, and only fetch packages that are missing from the lockfile (or that have no associated checksums). This mode is typically used by tools like Renovate or Dependabot to keep a lockfile up-to-date without incurring the full install cost.\\n\\n      For a compilation of all the supported protocols, please consult the dedicated page from our website: https://yarnpkg.com/features/protocols.\\n    \",examples:[[\"Add a regular package to the current workspace\",\"$0 add lodash\"],[\"Add a specific version for a package to the current workspace\",\"$0 add lodash@1.2.3\"],[\"Add a package from a GitHub repository (the master branch) to the current workspace using a URL\",\"$0 add lodash@https://github.com/lodash/lodash\"],[\"Add a package from a GitHub repository (the master branch) to the current workspace using the GitHub protocol\",\"$0 add lodash@github:lodash/lodash\"],[\"Add a package from a GitHub repository (the master branch) to the current workspace using the GitHub protocol (shorthand)\",\"$0 add lodash@lodash/lodash\"],[\"Add a package from a specific branch of a GitHub repository to the current workspace using the GitHub protocol (shorthand)\",\"$0 add lodash-es@lodash/lodash#es\"]]});var Nse=rm;function xJe(t,e,{dev:r,peer:i,preferDev:n,optional:s}){let o=t.manifest[Hr.REGULAR].has(e.identHash),a=t.manifest[Hr.DEVELOPMENT].has(e.identHash),l=t.manifest[Hr.PEER].has(e.identHash);if((r||i)&&o)throw new Pe(`Package \"${P.prettyIdent(t.project.configuration,e)}\" is already listed as a regular dependency - remove the -D,-P flags or remove it from your dependencies first`);if(!r&&!i&&l)throw new Pe(`Package \"${P.prettyIdent(t.project.configuration,e)}\" is already listed as a peer dependency - use either of -D or -P, or remove it from your peer dependencies first`);if(s&&a)throw new Pe(`Package \"${P.prettyIdent(t.project.configuration,e)}\" is already listed as a dev dependency - remove the -O flag or remove it from your dev dependencies first`);if(s&&!i&&l)throw new Pe(`Package \"${P.prettyIdent(t.project.configuration,e)}\" is already listed as a peer dependency - remove the -O flag or add the -P flag or remove it from your peer dependencies first`);if((r||n)&&s)throw new Pe(`Package \"${P.prettyIdent(t.project.configuration,e)}\" cannot simultaneously be a dev dependency and an optional dependency`);return i?Hr.PEER:r||n?Hr.DEVELOPMENT:o?Hr.REGULAR:a?Hr.DEVELOPMENT:Hr.REGULAR}var im=class extends Le{constructor(){super(...arguments);this.verbose=z.Boolean(\"-v,--verbose\",!1,{description:\"Print both the binary name and the locator of the package that provides the binary\"});this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.name=z.String({required:!1})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,locator:i}=await ze.find(e,this.context.cwd);if(await r.restoreInstallState(),this.name){let o=(await Zt.getPackageAccessibleBinaries(i,{project:r})).get(this.name);if(!o)throw new Pe(`Couldn't find a binary named \"${this.name}\" for package \"${P.prettyLocator(e,i)}\"`);let[,a]=o;return this.context.stdout.write(`${a}\n`),0}return(await Je.start({configuration:e,json:this.json,stdout:this.context.stdout},async s=>{let o=await Zt.getPackageAccessibleBinaries(i,{project:r}),l=Array.from(o.keys()).reduce((c,u)=>Math.max(c,u.length),0);for(let[c,[u,g]]of o)s.reportJson({name:c,source:P.stringifyIdent(u),path:g});if(this.verbose)for(let[c,[u]]of o)s.reportInfo(null,`${c.padEnd(l,\" \")}   ${P.prettyLocator(e,u)}`);else for(let c of o.keys())s.reportInfo(null,c)})).exitCode()}};im.paths=[[\"bin\"]],im.usage=Re.Usage({description:\"get the path to a binary script\",details:`\n      When used without arguments, this command will print the list of all the binaries available in the current workspace. Adding the \\`-v,--verbose\\` flag will cause the output to contain both the binary name and the locator of the package that provides the binary.\n\n      When an argument is specified, this command will just print the path to the binary on the standard output and exit. Note that the reported path may be stored within a zip archive.\n    `,examples:[[\"List all the available binaries\",\"$0 bin\"],[\"Print the path to a specific binary\",\"$0 bin eslint\"]]});var Lse=im;var nm=class extends Le{constructor(){super(...arguments);this.mirror=z.Boolean(\"--mirror\",!1,{description:\"Remove the global cache files instead of the local cache files\"});this.all=z.Boolean(\"--all\",!1,{description:\"Remove both the global cache files and the local cache files of the current project\"})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),r=await Nt.find(e);return(await Je.start({configuration:e,stdout:this.context.stdout},async()=>{let n=(this.all||this.mirror)&&r.mirrorCwd!==null,s=!this.mirror;n&&(await K.removePromise(r.mirrorCwd),await e.triggerHook(o=>o.cleanGlobalArtifacts,e)),s&&await K.removePromise(r.cwd)})).exitCode()}};nm.paths=[[\"cache\",\"clean\"],[\"cache\",\"clear\"]],nm.usage=Re.Usage({description:\"remove the shared cache files\",details:`\n      This command will remove all the files from the cache.\n    `,examples:[[\"Remove all the local archives\",\"$0 cache clean\"],[\"Remove all the archives stored in the ~/.yarn directory\",\"$0 cache clean --mirror\"]]});var Tse=nm;var Ose=ge(k0()),RN=ge(require(\"util\")),sm=class extends Le{constructor(){super(...arguments);this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.unsafe=z.Boolean(\"--no-redacted\",!1,{description:\"Don't redact secrets (such as tokens) from the output\"});this.name=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),r=this.name.replace(/[.[].*$/,\"\"),i=this.name.replace(/^[^.[]*/,\"\");if(typeof e.settings.get(r)==\"undefined\")throw new Pe(`Couldn't find a configuration settings named \"${r}\"`);let s=e.getSpecial(r,{hideSecrets:!this.unsafe,getNativePaths:!0}),o=Se.convertMapsToIndexableObjects(s),a=i?(0,Ose.default)(o,i):o,l=await Je.start({configuration:e,includeFooter:!1,json:this.json,stdout:this.context.stdout},async c=>{c.reportJson(a)});if(!this.json){if(typeof a==\"string\")return this.context.stdout.write(`${a}\n`),l.exitCode();RN.inspect.styles.name=\"cyan\",this.context.stdout.write(`${(0,RN.inspect)(a,{depth:Infinity,colors:e.get(\"enableColors\"),compact:!1})}\n`)}return l.exitCode()}};sm.paths=[[\"config\",\"get\"]],sm.usage=Re.Usage({description:\"read a configuration settings\",details:`\n      This command will print a configuration setting.\n\n      Secrets (such as tokens) will be redacted from the output by default. If this behavior isn't desired, set the \\`--no-redacted\\` to get the untransformed value.\n    `,examples:[[\"Print a simple configuration setting\",\"yarn config get yarnPath\"],[\"Print a complex configuration setting\",\"yarn config get packageExtensions\"],[\"Print a nested field from the configuration\",`yarn config get 'npmScopes[\"my-company\"].npmRegistryServer'`],[\"Print a token from the configuration\",\"yarn config get npmAuthToken --no-redacted\"],[\"Print a configuration setting as JSON\",\"yarn config get packageExtensions --json\"]]});var Mse=sm;var Voe=ge(MN()),Xoe=ge(k0()),Zoe=ge(_oe()),UN=ge(require(\"util\")),am=class extends Le{constructor(){super(...arguments);this.json=z.Boolean(\"--json\",!1,{description:\"Set complex configuration settings to JSON values\"});this.home=z.Boolean(\"-H,--home\",!1,{description:\"Update the home configuration instead of the project configuration\"});this.name=z.String();this.value=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),r=()=>{if(!e.projectCwd)throw new Pe(\"This command must be run from within a project folder\");return e.projectCwd},i=this.name.replace(/[.[].*$/,\"\"),n=this.name.replace(/^[^.[]*\\.?/,\"\");if(typeof e.settings.get(i)==\"undefined\")throw new Pe(`Couldn't find a configuration settings named \"${i}\"`);if(i===\"enableStrictSettings\")throw new Pe(\"This setting only affects the file it's in, and thus cannot be set from the CLI\");let o=this.json?JSON.parse(this.value):this.value;await(this.home?h=>ye.updateHomeConfiguration(h):h=>ye.updateConfiguration(r(),h))(h=>{if(n){let p=(0,Voe.default)(h);return(0,Zoe.default)(p,this.name,o),p}else return te(N({},h),{[i]:o})});let c=(await ye.find(this.context.cwd,this.context.plugins)).getSpecial(i,{hideSecrets:!0,getNativePaths:!0}),u=Se.convertMapsToIndexableObjects(c),g=n?(0,Xoe.default)(u,n):u;return(await Je.start({configuration:e,includeFooter:!1,stdout:this.context.stdout},async h=>{UN.inspect.styles.name=\"cyan\",h.reportInfo($.UNNAMED,`Successfully set ${this.name} to ${(0,UN.inspect)(g,{depth:Infinity,colors:e.get(\"enableColors\"),compact:!1})}`)})).exitCode()}};am.paths=[[\"config\",\"set\"]],am.usage=Re.Usage({description:\"change a configuration settings\",details:`\n      This command will set a configuration setting.\n\n      When used without the \\`--json\\` flag, it can only set a simple configuration setting (a string, a number, or a boolean).\n\n      When used with the \\`--json\\` flag, it can set both simple and complex configuration settings, including Arrays and Objects.\n    `,examples:[[\"Set a simple configuration setting (a string, a number, or a boolean)\",\"yarn config set initScope myScope\"],[\"Set a simple configuration setting (a string, a number, or a boolean) using the `--json` flag\",'yarn config set initScope --json \\\\\"myScope\\\\\"'],[\"Set a complex configuration setting (an Array) using the `--json` flag\",`yarn config set unsafeHttpWhitelist --json '[\"*.example.com\", \"example.com\"]'`],[\"Set a complex configuration setting (an Object) using the `--json` flag\",`yarn config set packageExtensions --json '{ \"@babel/parser@*\": { \"dependencies\": { \"@babel/types\": \"*\" } } }'`],[\"Set a nested configuration setting\",'yarn config set npmScopes.company.npmRegistryServer \"https://npm.example.com\"'],[\"Set a nested configuration setting using indexed access for non-simple keys\",`yarn config set 'npmRegistries[\"//npm.example.com\"].npmAuthToken' \"ffffffff-ffff-ffff-ffff-ffffffffffff\"`]]});var $oe=am;var Aae=ge(MN()),lae=ge(SC()),cae=ge(aae()),Am=class extends Le{constructor(){super(...arguments);this.home=z.Boolean(\"-H,--home\",!1,{description:\"Update the home configuration instead of the project configuration\"});this.name=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),r=()=>{if(!e.projectCwd)throw new Pe(\"This command must be run from within a project folder\");return e.projectCwd},i=this.name.replace(/[.[].*$/,\"\"),n=this.name.replace(/^[^.[]*\\.?/,\"\");if(typeof e.settings.get(i)==\"undefined\")throw new Pe(`Couldn't find a configuration settings named \"${i}\"`);let o=this.home?l=>ye.updateHomeConfiguration(l):l=>ye.updateConfiguration(r(),l);return(await Je.start({configuration:e,includeFooter:!1,stdout:this.context.stdout},async l=>{let c=!1;await o(u=>{if(!(0,lae.default)(u,this.name))return l.reportWarning($.UNNAMED,`Configuration doesn't contain setting ${this.name}; there is nothing to unset`),c=!0,u;let g=n?(0,Aae.default)(u):N({},u);return(0,cae.default)(g,this.name),g}),c||l.reportInfo($.UNNAMED,`Successfully unset ${this.name}`)})).exitCode()}};Am.paths=[[\"config\",\"unset\"]],Am.usage=Re.Usage({description:\"unset a configuration setting\",details:`\n      This command will unset a configuration setting.\n    `,examples:[[\"Unset a simple configuration setting\",\"yarn config unset initScope\"],[\"Unset a complex configuration setting\",\"yarn config unset packageExtensions\"],[\"Unset a nested configuration setting\",\"yarn config unset npmScopes.company.npmRegistryServer\"]]});var uae=Am;var KN=ge(require(\"util\")),lm=class extends Le{constructor(){super(...arguments);this.verbose=z.Boolean(\"-v,--verbose\",!1,{description:\"Print the setting description on top of the regular key/value information\"});this.why=z.Boolean(\"--why\",!1,{description:\"Print the reason why a setting is set a particular way\"});this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins,{strict:!1});return(await Je.start({configuration:e,json:this.json,stdout:this.context.stdout},async i=>{if(e.invalid.size>0&&!this.json){for(let[n,s]of e.invalid)i.reportError($.INVALID_CONFIGURATION_KEY,`Invalid configuration key \"${n}\" in ${s}`);i.reportSeparator()}if(this.json){let n=Se.sortMap(e.settings.keys(),s=>s);for(let s of n){let o=e.settings.get(s),a=e.getSpecial(s,{hideSecrets:!0,getNativePaths:!0}),l=e.sources.get(s);this.verbose?i.reportJson({key:s,effective:a,source:l}):i.reportJson(N({key:s,effective:a,source:l},o))}}else{let n=Se.sortMap(e.settings.keys(),a=>a),s=n.reduce((a,l)=>Math.max(a,l.length),0),o={breakLength:Infinity,colors:e.get(\"enableColors\"),maxArrayLength:2};if(this.why||this.verbose){let a=n.map(c=>{let u=e.settings.get(c);if(!u)throw new Error(`Assertion failed: This settings (\"${c}\") should have been registered`);let g=this.why?e.sources.get(c)||\"<default>\":u.description;return[c,g]}),l=a.reduce((c,[,u])=>Math.max(c,u.length),0);for(let[c,u]of a)i.reportInfo(null,`${c.padEnd(s,\" \")}   ${u.padEnd(l,\" \")}   ${(0,KN.inspect)(e.getSpecial(c,{hideSecrets:!0,getNativePaths:!0}),o)}`)}else for(let a of n)i.reportInfo(null,`${a.padEnd(s,\" \")}   ${(0,KN.inspect)(e.getSpecial(a,{hideSecrets:!0,getNativePaths:!0}),o)}`)}})).exitCode()}};lm.paths=[[\"config\"]],lm.usage=Re.Usage({description:\"display the current configuration\",details:`\n      This command prints the current active configuration settings.\n    `,examples:[[\"Print the active configuration settings\",\"$0 config\"]]});var gae=lm;Es();var HN={};ft(HN,{Strategy:()=>Iu,acceptedStrategies:()=>R8e,dedupe:()=>jN});var fae=ge(ts()),Iu;(function(e){e.HIGHEST=\"highest\"})(Iu||(Iu={}));var R8e=new Set(Object.values(Iu)),F8e={highest:async(t,e,{resolver:r,fetcher:i,resolveOptions:n,fetchOptions:s})=>{let o=new Map;for(let[a,l]of t.storedResolutions){let c=t.storedDescriptors.get(a);if(typeof c==\"undefined\")throw new Error(`Assertion failed: The descriptor (${a}) should have been registered`);Se.getSetWithDefault(o,c.identHash).add(l)}return Array.from(t.storedDescriptors.values(),async a=>{if(e.length&&!fae.default.isMatch(P.stringifyIdent(a),e))return null;let l=t.storedResolutions.get(a.descriptorHash);if(typeof l==\"undefined\")throw new Error(`Assertion failed: The resolution (${a.descriptorHash}) should have been registered`);let c=t.originalPackages.get(l);if(typeof c==\"undefined\"||!r.shouldPersistResolution(c,n))return null;let u=o.get(a.identHash);if(typeof u==\"undefined\")throw new Error(`Assertion failed: The resolutions (${a.identHash}) should have been registered`);if(u.size===1)return null;let g=[...u].map(y=>{let Q=t.originalPackages.get(y);if(typeof Q==\"undefined\")throw new Error(`Assertion failed: The package (${y}) should have been registered`);return Q.reference}),f=await r.getSatisfying(a,g,n),h=f==null?void 0:f[0];if(typeof h==\"undefined\")return null;let p=h.locatorHash,m=t.originalPackages.get(p);if(typeof m==\"undefined\")throw new Error(`Assertion failed: The package (${p}) should have been registered`);return p===l?null:{descriptor:a,currentPackage:c,updatedPackage:m}})}};async function jN(t,{strategy:e,patterns:r,cache:i,report:n}){let{configuration:s}=t,o=new pi,a=s.makeResolver(),l=s.makeFetcher(),c={cache:i,checksums:t.storedChecksums,fetcher:l,project:t,report:o,skipIntegrityCheck:!0,cacheOptions:{skipIntegrityCheck:!0}},u={project:t,resolver:a,report:o,fetchOptions:c};return await n.startTimerPromise(\"Deduplication step\",async()=>{let f=await F8e[e](t,r,{resolver:a,resolveOptions:u,fetcher:l,fetchOptions:c}),h=Ji.progressViaCounter(f.length);n.reportProgress(h);let p=0;await Promise.all(f.map(Q=>Q.then(S=>{if(S===null)return;p++;let{descriptor:x,currentPackage:M,updatedPackage:Y}=S;n.reportInfo($.UNNAMED,`${P.prettyDescriptor(s,x)} can be deduped from ${P.prettyLocator(s,M)} to ${P.prettyLocator(s,Y)}`),n.reportJson({descriptor:P.stringifyDescriptor(x),currentResolution:P.stringifyLocator(M),updatedResolution:P.stringifyLocator(Y)}),t.storedResolutions.set(x.descriptorHash,Y.locatorHash)}).finally(()=>h.tick())));let m;switch(p){case 0:m=\"No packages\";break;case 1:m=\"One package\";break;default:m=`${p} packages`}let y=ae.pretty(s,e,ae.Type.CODE);return n.reportInfo($.UNNAMED,`${m} can be deduped using the ${y} strategy`),p})}var cm=class extends Le{constructor(){super(...arguments);this.strategy=z.String(\"-s,--strategy\",Iu.HIGHEST,{description:\"The strategy to use when deduping dependencies\",validator:nn(Iu)});this.check=z.Boolean(\"-c,--check\",!1,{description:\"Exit with exit code 1 when duplicates are found, without persisting the dependency tree\"});this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.mode=z.String(\"--mode\",{description:\"Change what artifacts installs generate\",validator:nn(di)});this.patterns=z.Rest()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r}=await ze.find(e,this.context.cwd),i=await Nt.find(e);await r.restoreInstallState({restoreResolutions:!1});let n=0,s=await Je.start({configuration:e,includeFooter:!1,stdout:this.context.stdout,json:this.json},async o=>{n=await jN(r,{strategy:this.strategy,patterns:this.patterns,cache:i,report:o})});return s.hasErrors()?s.exitCode():this.check?n?1:0:(await Je.start({configuration:e,stdout:this.context.stdout,json:this.json},async a=>{await r.install({cache:i,report:a,mode:this.mode})})).exitCode()}};cm.paths=[[\"dedupe\"]],cm.usage=Re.Usage({description:\"deduplicate dependencies with overlapping ranges\",details:\"\\n      Duplicates are defined as descriptors with overlapping ranges being resolved and locked to different locators. They are a natural consequence of Yarn's deterministic installs, but they can sometimes pile up and unnecessarily increase the size of your project.\\n\\n      This command dedupes dependencies in the current project using different strategies (only one is implemented at the moment):\\n\\n      - `highest`: Reuses (where possible) the locators with the highest versions. This means that dependencies can only be upgraded, never downgraded. It's also guaranteed that it never takes more than a single pass to dedupe the entire dependency tree.\\n\\n      **Note:** Even though it never produces a wrong dependency tree, this command should be used with caution, as it modifies the dependency tree, which can sometimes cause problems when packages don't strictly follow semver recommendations. Because of this, it is recommended to also review the changes manually.\\n\\n      If set, the `-c,--check` flag will only report the found duplicates, without persisting the modified dependency tree. If changes are found, the command will exit with a non-zero exit code, making it suitable for CI purposes.\\n\\n      If the `--mode=<mode>` option is set, Yarn will change which artifacts are generated. The modes currently supported are:\\n\\n      - `skip-build` will not run the build scripts at all. Note that this is different from setting `enableScripts` to false because the later will disable build scripts, and thus affect the content of the artifacts generated on disk, whereas the former will just disable the build step - but not the scripts themselves, which just won't run.\\n\\n      - `update-lockfile` will skip the link step altogether, and only fetch packages that are missing from the lockfile (or that have no associated checksums). This mode is typically used by tools like Renovate or Dependabot to keep a lockfile up-to-date without incurring the full install cost.\\n\\n      This command accepts glob patterns as arguments (if valid Idents and supported by [micromatch](https://github.com/micromatch/micromatch)). Make sure to escape the patterns, to prevent your own shell from trying to expand them.\\n\\n      ### In-depth explanation:\\n\\n      Yarn doesn't deduplicate dependencies by default, otherwise installs wouldn't be deterministic and the lockfile would be useless. What it actually does is that it tries to not duplicate dependencies in the first place.\\n\\n      **Example:** If `foo@^2.3.4` (a dependency of a dependency) has already been resolved to `foo@2.3.4`, running `yarn add foo@*`will cause Yarn to reuse `foo@2.3.4`, even if the latest `foo` is actually `foo@2.10.14`, thus preventing unnecessary duplication.\\n\\n      Duplication happens when Yarn can't unlock dependencies that have already been locked inside the lockfile.\\n\\n      **Example:** If `foo@^2.3.4` (a dependency of a dependency) has already been resolved to `foo@2.3.4`, running `yarn add foo@2.10.14` will cause Yarn to install `foo@2.10.14` because the existing resolution doesn't satisfy the range `2.10.14`. This behavior can lead to (sometimes) unwanted duplication, since now the lockfile contains 2 separate resolutions for the 2 `foo` descriptors, even though they have overlapping ranges, which means that the lockfile can be simplified so that both descriptors resolve to `foo@2.10.14`.\\n    \",examples:[[\"Dedupe all packages\",\"$0 dedupe\"],[\"Dedupe all packages using a specific strategy\",\"$0 dedupe --strategy highest\"],[\"Dedupe a specific package\",\"$0 dedupe lodash\"],[\"Dedupe all packages with the `@babel/*` scope\",\"$0 dedupe '@babel/*'\"],[\"Check for duplicates (can be used as a CI step)\",\"$0 dedupe --check\"]]});var hae=cm;var ib=class extends Le{async execute(){let{plugins:e}=await ye.find(this.context.cwd,this.context.plugins),r=[];for(let o of e){let{commands:a}=o[1];if(a){let c=Is.from(a).definitions();r.push([o[0],c])}}let i=this.cli.definitions(),n=(o,a)=>o.split(\" \").slice(1).join()===a.split(\" \").slice(1).join(),s=dae()[\"@yarnpkg/builder\"].bundles.standard;for(let o of r){let a=o[1];for(let l of a)i.find(c=>n(c.path,l.path)).plugin={name:o[0],useAlts:s.includes(o[0])}}this.context.stdout.write(`${JSON.stringify(i,null,2)}\n`)}};ib.paths=[[\"--clipanion=definitions\"]];var Cae=ib;var nb=class extends Le{async execute(){this.context.stdout.write(this.cli.usage(null))}};nb.paths=[[\"help\"],[\"--help\"],[\"-h\"]];var mae=nb;var GN=class extends Le{constructor(){super(...arguments);this.leadingArgument=z.String();this.args=z.Proxy()}async execute(){if(this.leadingArgument.match(/[\\\\/]/)&&!P.tryParseIdent(this.leadingArgument)){let e=k.resolve(this.context.cwd,j.toPortablePath(this.leadingArgument));return await this.cli.run(this.args,{cwd:e})}else return await this.cli.run([\"run\",this.leadingArgument,...this.args])}},Eae=GN;var sb=class extends Le{async execute(){this.context.stdout.write(`${Ur||\"<unknown>\"}\n`)}};sb.paths=[[\"-v\"],[\"--version\"]];var Iae=sb;var um=class extends Le{constructor(){super(...arguments);this.commandName=z.String();this.args=z.Proxy()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,locator:i}=await ze.find(e,this.context.cwd);return await r.restoreInstallState(),await Zt.executePackageShellcode(i,this.commandName,this.args,{cwd:this.context.cwd,stdin:this.context.stdin,stdout:this.context.stdout,stderr:this.context.stderr,project:r})}};um.paths=[[\"exec\"]],um.usage=Re.Usage({description:\"execute a shell script\",details:`\n      This command simply executes a shell script within the context of the root directory of the active workspace using the portable shell.\n\n      It also makes sure to call it in a way that's compatible with the current project (for example, on PnP projects the environment will be setup in such a way that PnP will be correctly injected into the environment).\n    `,examples:[[\"Execute a single shell command\",\"$0 exec echo Hello World\"],[\"Execute a shell script\",'$0 exec \"tsc & babel src --out-dir lib\"']]});var yae=um;Es();var gm=class extends Le{constructor(){super(...arguments);this.hash=z.String({required:!1,validator:fp(gp(),[hp(/^p[0-9a-f]{5}$/)])})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r}=await ze.find(e,this.context.cwd);return await r.restoreInstallState({restoreResolutions:!1}),await r.applyLightResolution(),typeof this.hash!=\"undefined\"?await N8e(this.hash,r,{stdout:this.context.stdout}):(await Je.start({configuration:e,stdout:this.context.stdout,includeFooter:!1},async n=>{var o;let s=[([,a])=>P.stringifyLocator(r.storedPackages.get(a.subject)),([,a])=>P.stringifyIdent(a.requested)];for(let[a,l]of Se.sortMap(r.peerRequirements,s)){let c=r.storedPackages.get(l.subject);if(typeof c==\"undefined\")throw new Error(\"Assertion failed: Expected the subject package to have been registered\");let u=r.storedPackages.get(l.rootRequester);if(typeof u==\"undefined\")throw new Error(\"Assertion failed: Expected the root package to have been registered\");let g=(o=c.dependencies.get(l.requested.identHash))!=null?o:null,f=ae.pretty(e,a,ae.Type.CODE),h=P.prettyLocator(e,c),p=P.prettyIdent(e,l.requested),m=P.prettyIdent(e,u),y=l.allRequesters.length-1,Q=`descendant${y===1?\"\":\"s\"}`,S=y>0?` and ${y} ${Q}`:\"\",x=g!==null?\"provides\":\"doesn't provide\";n.reportInfo(null,`${f} \\u2192 ${h} ${x} ${p} to ${m}${S}`)}})).exitCode()}};gm.paths=[[\"explain\",\"peer-requirements\"]],gm.usage=Re.Usage({description:\"explain a set of peer requirements\",details:`\n      A set of peer requirements represents all peer requirements that a dependent must satisfy when providing a given peer request to a requester and its descendants.\n\n      When the hash argument is specified, this command prints a detailed explanation of all requirements of the set corresponding to the hash and whether they're satisfied or not.\n\n      When used without arguments, this command lists all sets of peer requirements and the corresponding hash that can be used to get detailed information about a given set.\n\n      **Note:** A hash is a six-letter p-prefixed code that can be obtained from peer dependency warnings or from the list of all peer requirements (\\`yarn explain peer-requirements\\`).\n    `,examples:[[\"Explain the corresponding set of peer requirements for a hash\",\"$0 explain peer-requirements p1a4ed\"],[\"List all sets of peer requirements\",\"$0 explain peer-requirements\"]]});var wae=gm;async function N8e(t,e,r){let{configuration:i}=e,n=e.peerRequirements.get(t);if(typeof n==\"undefined\")throw new Error(`No peerDependency requirements found for hash: \"${t}\"`);return(await Je.start({configuration:i,stdout:r.stdout,includeFooter:!1},async o=>{var Q,S;let a=e.storedPackages.get(n.subject);if(typeof a==\"undefined\")throw new Error(\"Assertion failed: Expected the subject package to have been registered\");let l=e.storedPackages.get(n.rootRequester);if(typeof l==\"undefined\")throw new Error(\"Assertion failed: Expected the root package to have been registered\");let c=(Q=a.dependencies.get(n.requested.identHash))!=null?Q:null,u=c!==null?e.storedResolutions.get(c.descriptorHash):null;if(typeof u==\"undefined\")throw new Error(\"Assertion failed: Expected the resolution to have been registered\");let g=u!==null?e.storedPackages.get(u):null;if(typeof g==\"undefined\")throw new Error(\"Assertion failed: Expected the provided package to have been registered\");let f=[...n.allRequesters.values()].map(x=>{let M=e.storedPackages.get(x);if(typeof M==\"undefined\")throw new Error(\"Assertion failed: Expected the package to be registered\");let Y=P.devirtualizeLocator(M),U=e.storedPackages.get(Y.locatorHash);if(typeof U==\"undefined\")throw new Error(\"Assertion failed: Expected the package to be registered\");let J=U.peerDependencies.get(n.requested.identHash);if(typeof J==\"undefined\")throw new Error(\"Assertion failed: Expected the peer dependency to be registered\");return{pkg:M,peerDependency:J}});if(g!==null){let x=f.every(({peerDependency:M})=>Wt.satisfiesWithPrereleases(g.version,M.range));o.reportInfo($.UNNAMED,`${P.prettyLocator(i,a)} provides ${P.prettyLocator(i,g)} with version ${P.prettyReference(i,(S=g.version)!=null?S:\"<missing>\")}, which ${x?\"satisfies\":\"doesn't satisfy\"} the following requirements:`)}else o.reportInfo($.UNNAMED,`${P.prettyLocator(i,a)} doesn't provide ${P.prettyIdent(i,n.requested)}, breaking the following requirements:`);o.reportSeparator();let h=ae.mark(i),p=[];for(let{pkg:x,peerDependency:M}of Se.sortMap(f,Y=>P.stringifyLocator(Y.pkg))){let U=(g!==null?Wt.satisfiesWithPrereleases(g.version,M.range):!1)?h.Check:h.Cross;p.push({stringifiedLocator:P.stringifyLocator(x),prettyLocator:P.prettyLocator(i,x),prettyRange:P.prettyRange(i,M.range),mark:U})}let m=Math.max(...p.map(({stringifiedLocator:x})=>x.length)),y=Math.max(...p.map(({prettyRange:x})=>x.length));for(let{stringifiedLocator:x,prettyLocator:M,prettyRange:Y,mark:U}of Se.sortMap(p,({stringifiedLocator:J})=>J))o.reportInfo(null,`${M.padEnd(m+(M.length-x.length),\" \")} \\u2192 ${Y.padEnd(y,\" \")} ${U}`);p.length>1&&(o.reportSeparator(),o.reportInfo($.UNNAMED,`Note: these requirements start with ${P.prettyLocator(e.configuration,l)}`))})).exitCode()}Es();var Bae=ge(ti()),fm=class extends Le{constructor(){super(...arguments);this.onlyIfNeeded=z.Boolean(\"--only-if-needed\",!1,{description:\"Only lock the Yarn version if it isn't already locked\"});this.version=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins);if(e.get(\"yarnPath\")&&this.onlyIfNeeded)return 0;let r=()=>{if(typeof Ur==\"undefined\")throw new Pe(\"The --install flag can only be used without explicit version specifier from the Yarn CLI\");return`file://${process.argv[1]}`},i;if(this.version===\"self\")i=r();else if(this.version===\"latest\"||this.version===\"berry\"||this.version===\"stable\")i=`https://repo.yarnpkg.com/${await hm(e,\"stable\")}/packages/yarnpkg-cli/bin/yarn.js`;else if(this.version===\"canary\")i=`https://repo.yarnpkg.com/${await hm(e,\"canary\")}/packages/yarnpkg-cli/bin/yarn.js`;else if(this.version===\"classic\")i=\"https://nightly.yarnpkg.com/latest.js\";else if(this.version.match(/^https?:/))i=this.version;else if(this.version.match(/^\\.{0,2}[\\\\/]/)||j.isAbsolute(this.version))i=`file://${j.resolve(this.version)}`;else if(Wt.satisfiesWithPrereleases(this.version,\">=2.0.0\"))i=`https://repo.yarnpkg.com/${this.version}/packages/yarnpkg-cli/bin/yarn.js`;else if(Wt.satisfiesWithPrereleases(this.version,\"^0.x || ^1.x\"))i=`https://github.com/yarnpkg/yarn/releases/download/v${this.version}/yarn-${this.version}.js`;else if(Wt.validRange(this.version))i=`https://repo.yarnpkg.com/${await L8e(e,this.version)}/packages/yarnpkg-cli/bin/yarn.js`;else throw new Pe(`Invalid version descriptor \"${this.version}\"`);return(await Je.start({configuration:e,stdout:this.context.stdout,includeLogs:!this.context.quiet},async s=>{let o=\"file://\",a;i.startsWith(o)?(s.reportInfo($.UNNAMED,`Downloading ${ae.pretty(e,i,Di.URL)}`),a=await K.readFilePromise(j.toPortablePath(i.slice(o.length)))):(s.reportInfo($.UNNAMED,`Retrieving ${ae.pretty(e,i,Di.PATH)}`),a=await ir.get(i,{configuration:e})),await YN(e,null,a,{report:s})})).exitCode()}};fm.paths=[[\"set\",\"version\"]],fm.usage=Re.Usage({description:\"lock the Yarn version used by the project\",details:\"\\n      This command will download a specific release of Yarn directly from the Yarn GitHub repository, will store it inside your project, and will change the `yarnPath` settings from your project `.yarnrc.yml` file to point to the new file.\\n\\n      A very good use case for this command is to enforce the version of Yarn used by the any single member of your team inside a same project - by doing this you ensure that you have control on Yarn upgrades and downgrades (including on your deployment servers), and get rid of most of the headaches related to someone using a slightly different version and getting a different behavior than you.\\n\\n      The version specifier can be:\\n\\n      - a tag:\\n        - `latest` / `berry` / `stable` -> the most recent stable berry (`>=2.0.0`) release\\n        - `canary` -> the most recent canary (release candidate) berry (`>=2.0.0`) release\\n        - `classic` -> the most recent classic (`^0.x || ^1.x`) release\\n\\n      - a semver range (e.g. `2.x`) -> the most recent version satisfying the range (limited to berry releases)\\n\\n      - a semver version (e.g. `2.4.1`, `1.22.1`)\\n\\n      - a local file referenced through either a relative or absolute path\\n\\n      - `self` -> the version used to invoke the command\\n    \",examples:[[\"Download the latest release from the Yarn repository\",\"$0 set version latest\"],[\"Download the latest canary release from the Yarn repository\",\"$0 set version canary\"],[\"Download the latest classic release from the Yarn repository\",\"$0 set version classic\"],[\"Download the most recent Yarn 3 build\",\"$0 set version 3.x\"],[\"Download a specific Yarn 2 build\",\"$0 set version 2.0.0-rc.30\"],[\"Switch back to a specific Yarn 1 release\",\"$0 set version 1.22.1\"],[\"Use a release from the local filesystem\",\"$0 set version ./yarn.cjs\"],[\"Use a release from a URL\",\"$0 set version https://repo.yarnpkg.com/3.1.0/packages/yarnpkg-cli/bin/yarn.js\"],[\"Download the version used to invoke the command\",\"$0 set version self\"]]});var bae=fm;async function L8e(t,e){let i=(await ir.get(\"https://repo.yarnpkg.com/tags\",{configuration:t,jsonResponse:!0})).tags.filter(n=>Wt.satisfiesWithPrereleases(n,e));if(i.length===0)throw new Pe(`No matching release found for range ${ae.pretty(t,e,ae.Type.RANGE)}.`);return i[0]}async function hm(t,e){let r=await ir.get(\"https://repo.yarnpkg.com/tags\",{configuration:t,jsonResponse:!0});if(!r.latest[e])throw new Pe(`Tag ${ae.pretty(t,e,ae.Type.RANGE)} not found`);return r.latest[e]}async function YN(t,e,r,{report:i}){var g;e===null&&await K.mktempPromise(async f=>{let h=k.join(f,\"yarn.cjs\");await K.writeFilePromise(h,r);let{stdout:p}=await Fr.execvp(process.execPath,[j.fromPortablePath(h),\"--version\"],{cwd:f,env:te(N({},process.env),{YARN_IGNORE_PATH:\"1\"})});if(e=p.trim(),!Bae.default.valid(e))throw new Error(`Invalid semver version. ${ae.pretty(t,\"yarn --version\",ae.Type.CODE)} returned:\n${e}`)});let n=(g=t.projectCwd)!=null?g:t.startingCwd,s=k.resolve(n,\".yarn/releases\"),o=k.resolve(s,`yarn-${e}.cjs`),a=k.relative(t.startingCwd,o),l=k.relative(n,o),c=t.get(\"yarnPath\"),u=c===null||c.startsWith(`${s}/`);if(i.reportInfo($.UNNAMED,`Saving the new release in ${ae.pretty(t,a,\"magenta\")}`),await K.removePromise(k.dirname(o)),await K.mkdirPromise(k.dirname(o),{recursive:!0}),await K.writeFilePromise(o,r,{mode:493}),u){await ye.updateConfiguration(n,{yarnPath:l});let f=await At.tryFind(n)||new At;f.packageManager=`yarn@${e&&Se.isTaggedYarnVersion(e)?e:await hm(t,\"stable\")}`;let h={};f.exportTo(h);let p=k.join(n,At.fileName),m=`${JSON.stringify(h,null,f.indent)}\n`;await K.changeFilePromise(p,m,{automaticNewlines:!0})}}function Qae(t){return $[BI(t)]}var T8e=/## (?<code>YN[0-9]{4}) - `(?<name>[A-Z_]+)`\\n\\n(?<details>(?:.(?!##))+)/gs;async function O8e(t){let r=`https://repo.yarnpkg.com/${Se.isTaggedYarnVersion(Ur)?Ur:await hm(t,\"canary\")}/packages/gatsby/content/advanced/error-codes.md`,i=await ir.get(r,{configuration:t});return new Map(Array.from(i.toString().matchAll(T8e),({groups:n})=>{if(!n)throw new Error(\"Assertion failed: Expected the match to have been successful\");let s=Qae(n.code);if(n.name!==s)throw new Error(`Assertion failed: Invalid error code data: Expected \"${n.name}\" to be named \"${s}\"`);return[n.code,n.details]}))}var pm=class extends Le{constructor(){super(...arguments);this.code=z.String({required:!1,validator:fp(gp(),[hp(/^YN[0-9]{4}$/)])});this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins);if(typeof this.code!=\"undefined\"){let r=Qae(this.code),i=ae.pretty(e,r,ae.Type.CODE),n=this.cli.format().header(`${this.code} - ${i}`),o=(await O8e(e)).get(this.code),a=typeof o!=\"undefined\"?ae.jsonOrPretty(this.json,e,ae.tuple(ae.Type.MARKDOWN,{text:o,format:this.cli.format(),paragraphs:!0})):`This error code does not have a description.\n\nYou can help us by editing this page on GitHub \\u{1F642}:\n${ae.jsonOrPretty(this.json,e,ae.tuple(ae.Type.URL,\"https://github.com/yarnpkg/berry/blob/master/packages/gatsby/content/advanced/error-codes.md\"))}\n`;this.json?this.context.stdout.write(`${JSON.stringify({code:this.code,name:r,details:a})}\n`):this.context.stdout.write(`${n}\n\n${a}\n`)}else{let r={children:Se.mapAndFilter(Object.entries($),([i,n])=>Number.isNaN(Number(i))?Se.mapAndFilter.skip:{label:YA(Number(i)),value:ae.tuple(ae.Type.CODE,n)})};as.emitTree(r,{configuration:e,stdout:this.context.stdout,json:this.json})}}};pm.paths=[[\"explain\"]],pm.usage=Re.Usage({description:\"explain an error code\",details:`\n      When the code argument is specified, this command prints its name and its details.\n\n      When used without arguments, this command lists all error codes and their names.\n    `,examples:[[\"Explain an error code\",\"$0 explain YN0006\"],[\"List all error codes\",\"$0 explain\"]]});var vae=pm;var Sae=ge(ts()),dm=class extends Le{constructor(){super(...arguments);this.all=z.Boolean(\"-A,--all\",!1,{description:\"Print versions of a package from the whole project\"});this.recursive=z.Boolean(\"-R,--recursive\",!1,{description:\"Print information for all packages, including transitive dependencies\"});this.extra=z.Array(\"-X,--extra\",[],{description:\"An array of requests of extra data provided by plugins\"});this.cache=z.Boolean(\"--cache\",!1,{description:\"Print information about the cache entry of a package (path, size, checksum)\"});this.dependents=z.Boolean(\"--dependents\",!1,{description:\"Print all dependents for each matching package\"});this.manifest=z.Boolean(\"--manifest\",!1,{description:\"Print data obtained by looking at the package archive (license, homepage, ...)\"});this.nameOnly=z.Boolean(\"--name-only\",!1,{description:\"Only print the name for the matching packages\"});this.virtuals=z.Boolean(\"--virtuals\",!1,{description:\"Print each instance of the virtual packages\"});this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.patterns=z.Rest()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n=await Nt.find(e);if(!i&&!this.all)throw new ht(r.cwd,this.context.cwd);await r.restoreInstallState();let s=new Set(this.extra);this.cache&&s.add(\"cache\"),this.dependents&&s.add(\"dependents\"),this.manifest&&s.add(\"manifest\");let o=(x,{recursive:M})=>{let Y=x.anchoredLocator.locatorHash,U=new Map,J=[Y];for(;J.length>0;){let W=J.shift();if(U.has(W))continue;let ee=r.storedPackages.get(W);if(typeof ee==\"undefined\")throw new Error(\"Assertion failed: Expected the package to be registered\");if(U.set(W,ee),P.isVirtualLocator(ee)&&J.push(P.devirtualizeLocator(ee).locatorHash),!(!M&&W!==Y))for(let Z of ee.dependencies.values()){let A=r.storedResolutions.get(Z.descriptorHash);if(typeof A==\"undefined\")throw new Error(\"Assertion failed: Expected the resolution to be registered\");J.push(A)}}return U.values()},a=({recursive:x})=>{let M=new Map;for(let Y of r.workspaces)for(let U of o(Y,{recursive:x}))M.set(U.locatorHash,U);return M.values()},l=({all:x,recursive:M})=>x&&M?r.storedPackages.values():x?a({recursive:M}):o(i,{recursive:M}),c=({all:x,recursive:M})=>{let Y=l({all:x,recursive:M}),U=this.patterns.map(ee=>{let Z=P.parseLocator(ee),A=Sae.default.makeRe(P.stringifyIdent(Z)),ne=P.isVirtualLocator(Z),le=ne?P.devirtualizeLocator(Z):Z;return Ae=>{let T=P.stringifyIdent(Ae);if(!A.test(T))return!1;if(Z.reference===\"unknown\")return!0;let L=P.isVirtualLocator(Ae),Ee=L?P.devirtualizeLocator(Ae):Ae;return!(ne&&L&&Z.reference!==Ae.reference||le.reference!==Ee.reference)}}),J=Se.sortMap([...Y],ee=>P.stringifyLocator(ee));return{selection:J.filter(ee=>U.length===0||U.some(Z=>Z(ee))),sortedLookup:J}},{selection:u,sortedLookup:g}=c({all:this.all,recursive:this.recursive});if(u.length===0)throw new Pe(\"No package matched your request\");let f=new Map;if(this.dependents)for(let x of g)for(let M of x.dependencies.values()){let Y=r.storedResolutions.get(M.descriptorHash);if(typeof Y==\"undefined\")throw new Error(\"Assertion failed: Expected the resolution to be registered\");Se.getArrayWithDefault(f,Y).push(x)}let h=new Map;for(let x of g){if(!P.isVirtualLocator(x))continue;let M=P.devirtualizeLocator(x);Se.getArrayWithDefault(h,M.locatorHash).push(x)}let p={},m={children:p},y=e.makeFetcher(),Q={project:r,fetcher:y,cache:n,checksums:r.storedChecksums,report:new pi,cacheOptions:{skipIntegrityCheck:!0},skipIntegrityCheck:!0},S=[async(x,M,Y)=>{var W,ee;if(!M.has(\"manifest\"))return;let U=await y.fetch(x,Q),J;try{J=await At.find(U.prefixPath,{baseFs:U.packageFs})}finally{(W=U.releaseFs)==null||W.call(U)}Y(\"Manifest\",{License:ae.tuple(ae.Type.NO_HINT,J.license),Homepage:ae.tuple(ae.Type.URL,(ee=J.raw.homepage)!=null?ee:null)})},async(x,M,Y)=>{var A;if(!M.has(\"cache\"))return;let U={mockedPackages:r.disabledLocators,unstablePackages:r.conditionalLocators},J=(A=r.storedChecksums.get(x.locatorHash))!=null?A:null,W=n.getLocatorPath(x,J,U),ee;if(W!==null)try{ee=K.statSync(W)}catch{}let Z=typeof ee!=\"undefined\"?[ee.size,ae.Type.SIZE]:void 0;Y(\"Cache\",{Checksum:ae.tuple(ae.Type.NO_HINT,J),Path:ae.tuple(ae.Type.PATH,W),Size:Z})}];for(let x of u){let M=P.isVirtualLocator(x);if(!this.virtuals&&M)continue;let Y={},U={value:[x,ae.Type.LOCATOR],children:Y};if(p[P.stringifyLocator(x)]=U,this.nameOnly){delete U.children;continue}let J=h.get(x.locatorHash);typeof J!=\"undefined\"&&(Y.Instances={label:\"Instances\",value:ae.tuple(ae.Type.NUMBER,J.length)}),Y.Version={label:\"Version\",value:ae.tuple(ae.Type.NO_HINT,x.version)};let W=(Z,A)=>{let ne={};if(Y[Z]=ne,Array.isArray(A))ne.children=A.map(le=>({value:le}));else{let le={};ne.children=le;for(let[Ae,T]of Object.entries(A))typeof T!=\"undefined\"&&(le[Ae]={label:Ae,value:T})}};if(!M){for(let Z of S)await Z(x,s,W);await e.triggerHook(Z=>Z.fetchPackageInfo,x,s,W)}x.bin.size>0&&!M&&W(\"Exported Binaries\",[...x.bin.keys()].map(Z=>ae.tuple(ae.Type.PATH,Z)));let ee=f.get(x.locatorHash);typeof ee!=\"undefined\"&&ee.length>0&&W(\"Dependents\",ee.map(Z=>ae.tuple(ae.Type.LOCATOR,Z))),x.dependencies.size>0&&!M&&W(\"Dependencies\",[...x.dependencies.values()].map(Z=>{var le;let A=r.storedResolutions.get(Z.descriptorHash),ne=typeof A!=\"undefined\"&&(le=r.storedPackages.get(A))!=null?le:null;return ae.tuple(ae.Type.RESOLUTION,{descriptor:Z,locator:ne})})),x.peerDependencies.size>0&&M&&W(\"Peer dependencies\",[...x.peerDependencies.values()].map(Z=>{var Ae,T;let A=x.dependencies.get(Z.identHash),ne=typeof A!=\"undefined\"&&(Ae=r.storedResolutions.get(A.descriptorHash))!=null?Ae:null,le=ne!==null&&(T=r.storedPackages.get(ne))!=null?T:null;return ae.tuple(ae.Type.RESOLUTION,{descriptor:Z,locator:le})}))}as.emitTree(m,{configuration:e,json:this.json,stdout:this.context.stdout,separators:this.nameOnly?0:2})}};dm.paths=[[\"info\"]],dm.usage=Re.Usage({description:\"see information related to packages\",details:\"\\n      This command prints various information related to the specified packages, accepting glob patterns.\\n\\n      By default, if the locator reference is missing, Yarn will default to print the information about all the matching direct dependencies of the package for the active workspace. To instead print all versions of the package that are direct dependencies of any of your workspaces, use the `-A,--all` flag. Adding the `-R,--recursive` flag will also report transitive dependencies.\\n\\n      Some fields will be hidden by default in order to keep the output readable, but can be selectively displayed by using additional options (`--dependents`, `--manifest`, `--virtuals`, ...) described in the option descriptions.\\n\\n      Note that this command will only print the information directly related to the selected packages - if you wish to know why the package is there in the first place, use `yarn why` which will do just that (it also provides a `-R,--recursive` flag that may be of some help).\\n    \",examples:[[\"Show information about Lodash\",\"$0 info lodash\"]]});var kae=dm;var ob=ge(hc());Es();var Cm=class extends Le{constructor(){super(...arguments);this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.immutable=z.Boolean(\"--immutable\",{description:\"Abort with an error exit code if the lockfile was to be modified\"});this.immutableCache=z.Boolean(\"--immutable-cache\",{description:\"Abort with an error exit code if the cache folder was to be modified\"});this.checkCache=z.Boolean(\"--check-cache\",!1,{description:\"Always refetch the packages and ensure that their checksums are consistent\"});this.inlineBuilds=z.Boolean(\"--inline-builds\",{description:\"Verbosely print the output of the build steps of dependencies\"});this.mode=z.String(\"--mode\",{description:\"Change what artifacts installs generate\",validator:nn(di)});this.cacheFolder=z.String(\"--cache-folder\",{hidden:!0});this.frozenLockfile=z.Boolean(\"--frozen-lockfile\",{hidden:!0});this.ignoreEngines=z.Boolean(\"--ignore-engines\",{hidden:!0});this.nonInteractive=z.Boolean(\"--non-interactive\",{hidden:!0});this.preferOffline=z.Boolean(\"--prefer-offline\",{hidden:!0});this.production=z.Boolean(\"--production\",{hidden:!0});this.registry=z.String(\"--registry\",{hidden:!0});this.silent=z.Boolean(\"--silent\",{hidden:!0});this.networkTimeout=z.String(\"--network-timeout\",{hidden:!0})}async execute(){var g;let e=await ye.find(this.context.cwd,this.context.plugins);typeof this.inlineBuilds!=\"undefined\"&&e.useWithSource(\"<cli>\",{enableInlineBuilds:this.inlineBuilds},e.startingCwd,{overwrite:!0});let r=!!process.env.FUNCTION_TARGET||!!process.env.GOOGLE_RUNTIME,i=async(f,{error:h})=>{let p=await Je.start({configuration:e,stdout:this.context.stdout,includeFooter:!1},async m=>{h?m.reportError($.DEPRECATED_CLI_SETTINGS,f):m.reportWarning($.DEPRECATED_CLI_SETTINGS,f)});return p.hasErrors()?p.exitCode():null};if(typeof this.ignoreEngines!=\"undefined\"){let f=await i(\"The --ignore-engines option is deprecated; engine checking isn't a core feature anymore\",{error:!ob.default.VERCEL});if(f!==null)return f}if(typeof this.registry!=\"undefined\"){let f=await i(\"The --registry option is deprecated; prefer setting npmRegistryServer in your .yarnrc.yml file\",{error:!1});if(f!==null)return f}if(typeof this.preferOffline!=\"undefined\"){let f=await i(\"The --prefer-offline flag is deprecated; use the --cached flag with 'yarn add' instead\",{error:!ob.default.VERCEL});if(f!==null)return f}if(typeof this.production!=\"undefined\"){let f=await i(\"The --production option is deprecated on 'install'; use 'yarn workspaces focus' instead\",{error:!0});if(f!==null)return f}if(typeof this.nonInteractive!=\"undefined\"){let f=await i(\"The --non-interactive option is deprecated\",{error:!r});if(f!==null)return f}if(typeof this.frozenLockfile!=\"undefined\"&&(await i(\"The --frozen-lockfile option is deprecated; use --immutable and/or --immutable-cache instead\",{error:!1}),this.immutable=this.frozenLockfile),typeof this.cacheFolder!=\"undefined\"){let f=await i(\"The cache-folder option has been deprecated; use rc settings instead\",{error:!ob.default.NETLIFY});if(f!==null)return f}let n=this.mode===di.UpdateLockfile;if(n&&(this.immutable||this.immutableCache))throw new Pe(`${ae.pretty(e,\"--immutable\",ae.Type.CODE)} and ${ae.pretty(e,\"--immutable-cache\",ae.Type.CODE)} cannot be used with ${ae.pretty(e,\"--mode=update-lockfile\",ae.Type.CODE)}`);let s=((g=this.immutable)!=null?g:e.get(\"enableImmutableInstalls\"))&&!n,o=this.immutableCache&&!n;if(e.projectCwd!==null){let f=await Je.start({configuration:e,json:this.json,stdout:this.context.stdout,includeFooter:!1},async h=>{await M8e(e,s)&&(h.reportInfo($.AUTOMERGE_SUCCESS,\"Automatically fixed merge conflicts \\u{1F44D}\"),h.reportSeparator())});if(f.hasErrors())return f.exitCode()}if(e.projectCwd!==null&&typeof e.sources.get(\"nodeLinker\")==\"undefined\"){let f=e.projectCwd,h;try{h=await K.readFilePromise(k.join(f,Pt.lockfile),\"utf8\")}catch{}if(h==null?void 0:h.includes(\"yarn lockfile v1\")){let p=await Je.start({configuration:e,json:this.json,stdout:this.context.stdout,includeFooter:!1},async m=>{m.reportInfo($.AUTO_NM_SUCCESS,\"Migrating from Yarn 1; automatically enabling the compatibility node-modules linker \\u{1F44D}\"),m.reportSeparator(),e.use(\"<compat>\",{nodeLinker:\"node-modules\"},f,{overwrite:!0}),await ye.updateConfiguration(f,{nodeLinker:\"node-modules\"})});if(p.hasErrors())return p.exitCode()}}if(e.projectCwd!==null){let f=await Je.start({configuration:e,json:this.json,stdout:this.context.stdout,includeFooter:!1},async h=>{var p;((p=ye.telemetry)==null?void 0:p.isNew)&&(h.reportInfo($.TELEMETRY_NOTICE,\"Yarn will periodically gather anonymous telemetry: https://yarnpkg.com/advanced/telemetry\"),h.reportInfo($.TELEMETRY_NOTICE,`Run ${ae.pretty(e,\"yarn config set --home enableTelemetry 0\",ae.Type.CODE)} to disable`),h.reportSeparator())});if(f.hasErrors())return f.exitCode()}let{project:a,workspace:l}=await ze.find(e,this.context.cwd),c=await Nt.find(e,{immutable:o,check:this.checkCache});if(!l)throw new ht(a.cwd,this.context.cwd);return await a.restoreInstallState({restoreResolutions:!1}),(await Je.start({configuration:e,json:this.json,stdout:this.context.stdout,includeLogs:!0},async f=>{await a.install({cache:c,report:f,immutable:s,mode:this.mode})})).exitCode()}};Cm.paths=[[\"install\"],Re.Default],Cm.usage=Re.Usage({description:\"install the project dependencies\",details:`\n      This command sets up your project if needed. The installation is split into four different steps that each have their own characteristics:\n\n      - **Resolution:** First the package manager will resolve your dependencies. The exact way a dependency version is privileged over another isn't standardized outside of the regular semver guarantees. If a package doesn't resolve to what you would expect, check that all dependencies are correctly declared (also check our website for more information: ).\n\n      - **Fetch:** Then we download all the dependencies if needed, and make sure that they're all stored within our cache (check the value of \\`cacheFolder\\` in \\`yarn config\\` to see where the cache files are stored).\n\n      - **Link:** Then we send the dependency tree information to internal plugins tasked with writing them on the disk in some form (for example by generating the .pnp.cjs file you might know).\n\n      - **Build:** Once the dependency tree has been written on the disk, the package manager will now be free to run the build scripts for all packages that might need it, in a topological order compatible with the way they depend on one another. See https://yarnpkg.com/advanced/lifecycle-scripts for detail.\n\n      Note that running this command is not part of the recommended workflow. Yarn supports zero-installs, which means that as long as you store your cache and your .pnp.cjs file inside your repository, everything will work without requiring any install right after cloning your repository or switching branches.\n\n      If the \\`--immutable\\` option is set (defaults to true on CI), Yarn will abort with an error exit code if the lockfile was to be modified (other paths can be added using the \\`immutablePatterns\\` configuration setting). For backward compatibility we offer an alias under the name of \\`--frozen-lockfile\\`, but it will be removed in a later release.\n\n      If the \\`--immutable-cache\\` option is set, Yarn will abort with an error exit code if the cache folder was to be modified (either because files would be added, or because they'd be removed).\n\n      If the \\`--check-cache\\` option is set, Yarn will always refetch the packages and will ensure that their checksum matches what's 1/ described in the lockfile 2/ inside the existing cache files (if present). This is recommended as part of your CI workflow if you're both following the Zero-Installs model and accepting PRs from third-parties, as they'd otherwise have the ability to alter the checked-in packages before submitting them.\n\n      If the \\`--inline-builds\\` option is set, Yarn will verbosely print the output of the build steps of your dependencies (instead of writing them into individual files). This is likely useful mostly for debug purposes only when using Docker-like environments.\n\n      If the \\`--mode=<mode>\\` option is set, Yarn will change which artifacts are generated. The modes currently supported are:\n\n      - \\`skip-build\\` will not run the build scripts at all. Note that this is different from setting \\`enableScripts\\` to false because the later will disable build scripts, and thus affect the content of the artifacts generated on disk, whereas the former will just disable the build step - but not the scripts themselves, which just won't run.\n\n      - \\`update-lockfile\\` will skip the link step altogether, and only fetch packages that are missing from the lockfile (or that have no associated checksums). This mode is typically used by tools like Renovate or Dependabot to keep a lockfile up-to-date without incurring the full install cost.\n    `,examples:[[\"Install the project\",\"$0 install\"],[\"Validate a project when using Zero-Installs\",\"$0 install --immutable --immutable-cache\"],[\"Validate a project when using Zero-Installs (slightly safer if you accept external PRs)\",\"$0 install --immutable --immutable-cache --check-cache\"]]});var xae=Cm,U8e=\"|||||||\",K8e=\">>>>>>>\",H8e=\"=======\",Pae=\"<<<<<<<\";async function M8e(t,e){if(!t.projectCwd)return!1;let r=k.join(t.projectCwd,t.get(\"lockfileFilename\"));if(!await K.existsPromise(r))return!1;let i=await K.readFilePromise(r,\"utf8\");if(!i.includes(Pae))return!1;if(e)throw new ct($.AUTOMERGE_IMMUTABLE,\"Cannot autofix a lockfile when running an immutable install\");let[n,s]=j8e(i),o,a;try{o=Qi(n),a=Qi(s)}catch(c){throw new ct($.AUTOMERGE_FAILED_TO_PARSE,\"The individual variants of the lockfile failed to parse\")}let l=N(N({},o),a);for(let[c,u]of Object.entries(l))typeof u==\"string\"&&delete l[c];return await K.changeFilePromise(r,Na(l),{automaticNewlines:!0}),!0}function j8e(t){let e=[[],[]],r=t.split(/\\r?\\n/g),i=!1;for(;r.length>0;){let n=r.shift();if(typeof n==\"undefined\")throw new Error(\"Assertion failed: Some lines should remain\");if(n.startsWith(Pae)){for(;r.length>0;){let s=r.shift();if(typeof s==\"undefined\")throw new Error(\"Assertion failed: Some lines should remain\");if(s===H8e){i=!1;break}else if(i||s.startsWith(U8e)){i=!0;continue}else e[0].push(s)}for(;r.length>0;){let s=r.shift();if(typeof s==\"undefined\")throw new Error(\"Assertion failed: Some lines should remain\");if(s.startsWith(K8e))break;e[1].push(s)}}else e[0].push(n),e[1].push(n)}return[e[0].join(`\n`),e[1].join(`\n`)]}var mm=class extends Le{constructor(){super(...arguments);this.all=z.Boolean(\"-A,--all\",!1,{description:\"Link all workspaces belonging to the target project to the current one\"});this.private=z.Boolean(\"-p,--private\",!1,{description:\"Also link private workspaces belonging to the target project to the current one\"});this.relative=z.Boolean(\"-r,--relative\",!1,{description:\"Link workspaces using relative paths instead of absolute paths\"});this.destination=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n=await Nt.find(e);if(!i)throw new ht(r.cwd,this.context.cwd);await r.restoreInstallState({restoreResolutions:!1});let s=k.resolve(this.context.cwd,j.toPortablePath(this.destination)),o=await ye.find(s,this.context.plugins,{useRc:!1,strict:!1}),{project:a,workspace:l}=await ze.find(o,s);if(r.cwd===a.cwd)throw new Pe(\"Invalid destination; Can't link the project to itself\");if(!l)throw new ht(a.cwd,s);let c=r.topLevelWorkspace,u=[];if(this.all){for(let f of a.workspaces)f.manifest.name&&(!f.manifest.private||this.private)&&u.push(f);if(u.length===0)throw new Pe(\"No workspace found to be linked in the target project\")}else{if(!l.manifest.name)throw new Pe(\"The target workspace doesn't have a name and thus cannot be linked\");if(l.manifest.private&&!this.private)throw new Pe(\"The target workspace is marked private - use the --private flag to link it anyway\");u.push(l)}for(let f of u){let h=P.stringifyIdent(f.locator),p=this.relative?k.relative(r.cwd,f.cwd):f.cwd;c.manifest.resolutions.push({pattern:{descriptor:{fullName:h}},reference:`portal:${p}`})}return(await Je.start({configuration:e,stdout:this.context.stdout},async f=>{await r.install({cache:n,report:f})})).exitCode()}};mm.paths=[[\"link\"]],mm.usage=Re.Usage({description:\"connect the local project to another one\",details:\"\\n      This command will set a new `resolutions` field in the project-level manifest and point it to the workspace at the specified location (even if part of another project).\\n    \",examples:[[\"Register a remote workspace for use in the current project\",\"$0 link ~/ts-loader\"],[\"Register all workspaces from a remote project for use in the current project\",\"$0 link ~/jest --all\"]]});var Dae=mm;var Em=class extends Le{constructor(){super(...arguments);this.args=z.Proxy()}async execute(){return this.cli.run([\"exec\",\"node\",...this.args])}};Em.paths=[[\"node\"]],Em.usage=Re.Usage({description:\"run node with the hook already setup\",details:`\n      This command simply runs Node. It also makes sure to call it in a way that's compatible with the current project (for example, on PnP projects the environment will be setup in such a way that PnP will be correctly injected into the environment).\n\n      The Node process will use the exact same version of Node as the one used to run Yarn itself, which might be a good way to ensure that your commands always use a consistent Node version.\n    `,examples:[[\"Run a Node script\",\"$0 node ./my-script.js\"]]});var Rae=Em;var Hae=ge(require(\"os\"));var Nae=ge(require(\"os\"));var G8e=\"https://raw.githubusercontent.com/yarnpkg/berry/master/plugins.yml\";async function yu(t){let e=await ir.get(G8e,{configuration:t});return Qi(e.toString())}var Im=class extends Le{constructor(){super(...arguments);this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins);return(await Je.start({configuration:e,json:this.json,stdout:this.context.stdout},async i=>{let n=await yu(e);for(let s of Object.entries(n)){let[l,o]=s,a=o,{experimental:c}=a,u=Tr(a,[\"experimental\"]);let g=l;c&&(g+=\" [experimental]\"),i.reportJson(N({name:l,experimental:c},u)),i.reportInfo(null,g)}})).exitCode()}};Im.paths=[[\"plugin\",\"list\"]],Im.usage=Re.Usage({category:\"Plugin-related commands\",description:\"list the available official plugins\",details:\"\\n      This command prints the plugins available directly from the Yarn repository. Only those plugins can be referenced by name in `yarn plugin import`.\\n    \",examples:[[\"List the official plugins\",\"$0 plugin list\"]]});var Fae=Im;var Y8e=/^[0-9]+$/;function Lae(t){return Y8e.test(t)?`pull/${t}/head`:t}var q8e=({repository:t,branch:e},r)=>[[\"git\",\"init\",j.fromPortablePath(r)],[\"git\",\"remote\",\"add\",\"origin\",t],[\"git\",\"fetch\",\"origin\",\"--depth=1\",Lae(e)],[\"git\",\"reset\",\"--hard\",\"FETCH_HEAD\"]],J8e=({branch:t})=>[[\"git\",\"fetch\",\"origin\",\"--depth=1\",Lae(t),\"--force\"],[\"git\",\"reset\",\"--hard\",\"FETCH_HEAD\"],[\"git\",\"clean\",\"-dfx\"]],W8e=({plugins:t,noMinify:e},r)=>[[\"yarn\",\"build:cli\",...new Array().concat(...t.map(i=>[\"--plugin\",k.resolve(r,i)])),...e?[\"--no-minify\"]:[],\"|\"]],ym=class extends Le{constructor(){super(...arguments);this.installPath=z.String(\"--path\",{description:\"The path where the repository should be cloned to\"});this.repository=z.String(\"--repository\",\"https://github.com/yarnpkg/berry.git\",{description:\"The repository that should be cloned\"});this.branch=z.String(\"--branch\",\"master\",{description:\"The branch of the repository that should be cloned\"});this.plugins=z.Array(\"--plugin\",[],{description:\"An array of additional plugins that should be included in the bundle\"});this.noMinify=z.Boolean(\"--no-minify\",!1,{description:\"Build a bundle for development (debugging) - non-minified and non-mangled\"});this.force=z.Boolean(\"-f,--force\",!1,{description:\"Always clone the repository instead of trying to fetch the latest commits\"});this.skipPlugins=z.Boolean(\"--skip-plugins\",!1,{description:\"Skip updating the contrib plugins\"})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r}=await ze.find(e,this.context.cwd),i=typeof this.installPath!=\"undefined\"?k.resolve(this.context.cwd,j.toPortablePath(this.installPath)):k.resolve(j.toPortablePath((0,Nae.tmpdir)()),\"yarnpkg-sources\",Dn.makeHash(this.repository).slice(0,6));return(await Je.start({configuration:e,stdout:this.context.stdout},async s=>{await JN(this,{configuration:e,report:s,target:i}),s.reportSeparator(),s.reportInfo($.UNNAMED,\"Building a fresh bundle\"),s.reportSeparator(),await wm(W8e(this,i),{configuration:e,context:this.context,target:i}),s.reportSeparator();let o=k.resolve(i,\"packages/yarnpkg-cli/bundles/yarn.js\"),a=await K.readFilePromise(o);await YN(e,\"sources\",a,{report:s}),this.skipPlugins||await z8e(this,{project:r,report:s,target:i})})).exitCode()}};ym.paths=[[\"set\",\"version\",\"from\",\"sources\"]],ym.usage=Re.Usage({description:\"build Yarn from master\",details:`\n      This command will clone the Yarn repository into a temporary folder, then build it. The resulting bundle will then be copied into the local project.\n\n      By default, it also updates all contrib plugins to the same commit the bundle is built from. This behavior can be disabled by using the \\`--skip-plugins\\` flag.\n    `,examples:[[\"Build Yarn from master\",\"$0 set version from sources\"]]});var Tae=ym;async function wm(t,{configuration:e,context:r,target:i}){for(let[n,...s]of t){let o=s[s.length-1]===\"|\";if(o&&s.pop(),o)await Fr.pipevp(n,s,{cwd:i,stdin:r.stdin,stdout:r.stdout,stderr:r.stderr,strict:!0});else{r.stdout.write(`${ae.pretty(e,`  $ ${[n,...s].join(\" \")}`,\"grey\")}\n`);try{await Fr.execvp(n,s,{cwd:i,strict:!0})}catch(a){throw r.stdout.write(a.stdout||a.stack),a}}}}async function JN(t,{configuration:e,report:r,target:i}){let n=!1;if(!t.force&&K.existsSync(k.join(i,\".git\"))){r.reportInfo($.UNNAMED,\"Fetching the latest commits\"),r.reportSeparator();try{await wm(J8e(t),{configuration:e,context:t.context,target:i}),n=!0}catch(s){r.reportSeparator(),r.reportWarning($.UNNAMED,\"Repository update failed; we'll try to regenerate it\")}}n||(r.reportInfo($.UNNAMED,\"Cloning the remote repository\"),r.reportSeparator(),await K.removePromise(i),await K.mkdirPromise(i,{recursive:!0}),await wm(q8e(t,i),{configuration:e,context:t.context,target:i}))}async function z8e(t,{project:e,report:r,target:i}){let n=await yu(e.configuration),s=new Set(Object.keys(n));for(let o of e.configuration.plugins.keys())!s.has(o)||await qN(o,t,{project:e,report:r,target:i})}var Oae=ge(ti()),Mae=ge(require(\"url\")),Uae=ge(require(\"vm\"));var Bm=class extends Le{constructor(){super(...arguments);this.name=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins);return(await Je.start({configuration:e,stdout:this.context.stdout},async i=>{let{project:n}=await ze.find(e,this.context.cwd),s,o;if(this.name.match(/^\\.{0,2}[\\\\/]/)||j.isAbsolute(this.name)){let a=k.resolve(this.context.cwd,j.toPortablePath(this.name));i.reportInfo($.UNNAMED,`Reading ${ae.pretty(e,a,ae.Type.PATH)}`),s=k.relative(n.cwd,a),o=await K.readFilePromise(a)}else{let a;if(this.name.match(/^https?:/)){try{new Mae.URL(this.name)}catch{throw new ct($.INVALID_PLUGIN_REFERENCE,`Plugin specifier \"${this.name}\" is neither a plugin name nor a valid url`)}s=this.name,a=this.name}else{let l=P.parseLocator(this.name.replace(/^((@yarnpkg\\/)?plugin-)?/,\"@yarnpkg/plugin-\"));if(l.reference!==\"unknown\"&&!Oae.default.valid(l.reference))throw new ct($.UNNAMED,\"Official plugins only accept strict version references. Use an explicit URL if you wish to download them from another location.\");let c=P.stringifyIdent(l),u=await yu(e);if(!Object.prototype.hasOwnProperty.call(u,c))throw new ct($.PLUGIN_NAME_NOT_FOUND,`Couldn't find a plugin named \"${c}\" on the remote registry. Note that only the plugins referenced on our website (https://github.com/yarnpkg/berry/blob/master/plugins.yml) can be referenced by their name; any other plugin will have to be referenced through its public url (for example https://github.com/yarnpkg/berry/raw/master/packages/plugin-typescript/bin/%40yarnpkg/plugin-typescript.js).`);s=c,a=u[c].url,l.reference!==\"unknown\"?a=a.replace(/\\/master\\//,`/${c}/${l.reference}/`):Ur!==null&&(a=a.replace(/\\/master\\//,`/@yarnpkg/cli/${Ur}/`))}i.reportInfo($.UNNAMED,`Downloading ${ae.pretty(e,a,\"green\")}`),o=await ir.get(a,{configuration:e})}await WN(s,o,{project:n,report:i})})).exitCode()}};Bm.paths=[[\"plugin\",\"import\"]],Bm.usage=Re.Usage({category:\"Plugin-related commands\",description:\"download a plugin\",details:`\n      This command downloads the specified plugin from its remote location and updates the configuration to reference it in further CLI invocations.\n\n      Three types of plugin references are accepted:\n\n      - If the plugin is stored within the Yarn repository, it can be referenced by name.\n      - Third-party plugins can be referenced directly through their public urls.\n      - Local plugins can be referenced by their path on the disk.\n\n      Plugins cannot be downloaded from the npm registry, and aren't allowed to have dependencies (they need to be bundled into a single file, possibly thanks to the \\`@yarnpkg/builder\\` package).\n    `,examples:[['Download and activate the \"@yarnpkg/plugin-exec\" plugin',\"$0 plugin import @yarnpkg/plugin-exec\"],['Download and activate the \"@yarnpkg/plugin-exec\" plugin (shorthand)',\"$0 plugin import exec\"],[\"Download and activate a community plugin\",\"$0 plugin import https://example.org/path/to/plugin.js\"],[\"Activate a local plugin\",\"$0 plugin import ./path/to/plugin.js\"]]});var Kae=Bm;async function WN(t,e,{project:r,report:i}){let{configuration:n}=r,s={},o={exports:s};(0,Uae.runInNewContext)(e.toString(),{module:o,exports:s});let a=o.exports.name,l=`.yarn/plugins/${a}.cjs`,c=k.resolve(r.cwd,l);i.reportInfo($.UNNAMED,`Saving the new plugin in ${ae.pretty(n,l,\"magenta\")}`),await K.mkdirPromise(k.dirname(c),{recursive:!0}),await K.writeFilePromise(c,e);let u={path:l,spec:t};await ye.updateConfiguration(r.cwd,g=>{let f=[],h=!1;for(let p of g.plugins||[]){let m=typeof p!=\"string\"?p.path:p,y=k.resolve(r.cwd,j.toPortablePath(m)),{name:Q}=Se.dynamicRequire(y);Q!==a?f.push(p):(f.push(u),h=!0)}return h||f.push(u),te(N({},g),{plugins:f})})}var _8e=({pluginName:t,noMinify:e},r)=>[[\"yarn\",`build:${t}`,...e?[\"--no-minify\"]:[],\"|\"]],bm=class extends Le{constructor(){super(...arguments);this.installPath=z.String(\"--path\",{description:\"The path where the repository should be cloned to\"});this.repository=z.String(\"--repository\",\"https://github.com/yarnpkg/berry.git\",{description:\"The repository that should be cloned\"});this.branch=z.String(\"--branch\",\"master\",{description:\"The branch of the repository that should be cloned\"});this.noMinify=z.Boolean(\"--no-minify\",!1,{description:\"Build a plugin for development (debugging) - non-minified and non-mangled\"});this.force=z.Boolean(\"-f,--force\",!1,{description:\"Always clone the repository instead of trying to fetch the latest commits\"});this.name=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),r=typeof this.installPath!=\"undefined\"?k.resolve(this.context.cwd,j.toPortablePath(this.installPath)):k.resolve(j.toPortablePath((0,Hae.tmpdir)()),\"yarnpkg-sources\",Dn.makeHash(this.repository).slice(0,6));return(await Je.start({configuration:e,stdout:this.context.stdout},async n=>{let{project:s}=await ze.find(e,this.context.cwd),o=P.parseIdent(this.name.replace(/^((@yarnpkg\\/)?plugin-)?/,\"@yarnpkg/plugin-\")),a=P.stringifyIdent(o),l=await yu(e);if(!Object.prototype.hasOwnProperty.call(l,a))throw new ct($.PLUGIN_NAME_NOT_FOUND,`Couldn't find a plugin named \"${a}\" on the remote registry. Note that only the plugins referenced on our website (https://github.com/yarnpkg/berry/blob/master/plugins.yml) can be built and imported from sources.`);let c=a;await JN(this,{configuration:e,report:n,target:r}),await qN(c,this,{project:s,report:n,target:r})})).exitCode()}};bm.paths=[[\"plugin\",\"import\",\"from\",\"sources\"]],bm.usage=Re.Usage({category:\"Plugin-related commands\",description:\"build a plugin from sources\",details:`\n      This command clones the Yarn repository into a temporary folder, builds the specified contrib plugin and updates the configuration to reference it in further CLI invocations.\n\n      The plugins can be referenced by their short name if sourced from the official Yarn repository.\n    `,examples:[['Build and activate the \"@yarnpkg/plugin-exec\" plugin',\"$0 plugin import from sources @yarnpkg/plugin-exec\"],['Build and activate the \"@yarnpkg/plugin-exec\" plugin (shorthand)',\"$0 plugin import from sources exec\"]]});var jae=bm;async function qN(t,{context:e,noMinify:r},{project:i,report:n,target:s}){let o=t.replace(/@yarnpkg\\//,\"\"),{configuration:a}=i;n.reportSeparator(),n.reportInfo($.UNNAMED,`Building a fresh ${o}`),n.reportSeparator(),await wm(_8e({pluginName:o,noMinify:r},s),{configuration:a,context:e,target:s}),n.reportSeparator();let l=k.resolve(s,`packages/${o}/bundles/${t}.js`),c=await K.readFilePromise(l);await WN(t,c,{project:i,report:n})}var Qm=class extends Le{constructor(){super(...arguments);this.name=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r}=await ze.find(e,this.context.cwd);return(await Je.start({configuration:e,stdout:this.context.stdout},async n=>{let s=this.name,o=P.parseIdent(s);if(!e.plugins.has(s))throw new Pe(`${P.prettyIdent(e,o)} isn't referenced by the current configuration`);let a=`.yarn/plugins/${s}.cjs`,l=k.resolve(r.cwd,a);K.existsSync(l)&&(n.reportInfo($.UNNAMED,`Removing ${ae.pretty(e,a,ae.Type.PATH)}...`),await K.removePromise(l)),n.reportInfo($.UNNAMED,\"Updating the configuration...\"),await ye.updateConfiguration(r.cwd,c=>{if(!Array.isArray(c.plugins))return c;let u=c.plugins.filter(g=>g.path!==a);return c.plugins.length===u.length?c:te(N({},c),{plugins:u})})})).exitCode()}};Qm.paths=[[\"plugin\",\"remove\"]],Qm.usage=Re.Usage({category:\"Plugin-related commands\",description:\"remove a plugin\",details:`\n      This command deletes the specified plugin from the .yarn/plugins folder and removes it from the configuration.\n\n      **Note:** The plugins have to be referenced by their name property, which can be obtained using the \\`yarn plugin runtime\\` command. Shorthands are not allowed.\n   `,examples:[[\"Remove a plugin imported from the Yarn repository\",\"$0 plugin remove @yarnpkg/plugin-typescript\"],[\"Remove a plugin imported from a local file\",\"$0 plugin remove my-local-plugin\"]]});var Gae=Qm;var vm=class extends Le{constructor(){super(...arguments);this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins);return(await Je.start({configuration:e,json:this.json,stdout:this.context.stdout},async i=>{for(let n of e.plugins.keys()){let s=this.context.plugins.plugins.has(n),o=n;s&&(o+=\" [builtin]\"),i.reportJson({name:n,builtin:s}),i.reportInfo(null,`${o}`)}})).exitCode()}};vm.paths=[[\"plugin\",\"runtime\"]],vm.usage=Re.Usage({category:\"Plugin-related commands\",description:\"list the active plugins\",details:`\n      This command prints the currently active plugins. Will be displayed both builtin plugins and external plugins.\n    `,examples:[[\"List the currently active plugins\",\"$0 plugin runtime\"]]});var Yae=vm;var Sm=class extends Le{constructor(){super(...arguments);this.idents=z.Rest()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n=await Nt.find(e);if(!i)throw new ht(r.cwd,this.context.cwd);let s=new Set;for(let a of this.idents)s.add(P.parseIdent(a).identHash);if(await r.restoreInstallState({restoreResolutions:!1}),await r.resolveEverything({cache:n,report:new pi}),s.size>0)for(let a of r.storedPackages.values())s.has(a.identHash)&&r.storedBuildState.delete(a.locatorHash);else r.storedBuildState.clear();return(await Je.start({configuration:e,stdout:this.context.stdout,includeLogs:!this.context.quiet},async a=>{await r.install({cache:n,report:a})})).exitCode()}};Sm.paths=[[\"rebuild\"]],Sm.usage=Re.Usage({description:\"rebuild the project's native packages\",details:`\n      This command will automatically cause Yarn to forget about previous compilations of the given packages and to run them again.\n\n      Note that while Yarn forgets the compilation, the previous artifacts aren't erased from the filesystem and may affect the next builds (in good or bad). To avoid this, you may remove the .yarn/unplugged folder, or any other relevant location where packages might have been stored (Yarn may offer a way to do that automatically in the future).\n\n      By default all packages will be rebuilt, but you can filter the list by specifying the names of the packages you want to clear from memory.\n    `,examples:[[\"Rebuild all packages\",\"$0 rebuild\"],[\"Rebuild fsevents only\",\"$0 rebuild fsevents\"]]});var qae=Sm;var zN=ge(ts());Es();var km=class extends Le{constructor(){super(...arguments);this.all=z.Boolean(\"-A,--all\",!1,{description:\"Apply the operation to all workspaces from the current project\"});this.mode=z.String(\"--mode\",{description:\"Change what artifacts installs generate\",validator:nn(di)});this.patterns=z.Rest()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n=await Nt.find(e);if(!i)throw new ht(r.cwd,this.context.cwd);await r.restoreInstallState({restoreResolutions:!1});let s=this.all?r.workspaces:[i],o=[Hr.REGULAR,Hr.DEVELOPMENT,Hr.PEER],a=[],l=!1,c=[];for(let h of this.patterns){let p=!1,m=P.parseIdent(h);for(let y of s){let Q=[...y.manifest.peerDependenciesMeta.keys()];for(let S of(0,zN.default)(Q,h))y.manifest.peerDependenciesMeta.delete(S),l=!0,p=!0;for(let S of o){let x=y.manifest.getForScope(S),M=[...x.values()].map(Y=>P.stringifyIdent(Y));for(let Y of(0,zN.default)(M,P.stringifyIdent(m))){let{identHash:U}=P.parseIdent(Y),J=x.get(U);if(typeof J==\"undefined\")throw new Error(\"Assertion failed: Expected the descriptor to be registered\");y.manifest[S].delete(U),c.push([y,S,J]),l=!0,p=!0}}}p||a.push(h)}let u=a.length>1?\"Patterns\":\"Pattern\",g=a.length>1?\"don't\":\"doesn't\",f=this.all?\"any\":\"this\";if(a.length>0)throw new Pe(`${u} ${ae.prettyList(e,a,Di.CODE)} ${g} match any packages referenced by ${f} workspace`);return l?(await e.triggerMultipleHooks(p=>p.afterWorkspaceDependencyRemoval,c),(await Je.start({configuration:e,stdout:this.context.stdout},async p=>{await r.install({cache:n,report:p,mode:this.mode})})).exitCode()):0}};km.paths=[[\"remove\"]],km.usage=Re.Usage({description:\"remove dependencies from the project\",details:`\n      This command will remove the packages matching the specified patterns from the current workspace.\n\n      If the \\`--mode=<mode>\\` option is set, Yarn will change which artifacts are generated. The modes currently supported are:\n\n      - \\`skip-build\\` will not run the build scripts at all. Note that this is different from setting \\`enableScripts\\` to false because the later will disable build scripts, and thus affect the content of the artifacts generated on disk, whereas the former will just disable the build step - but not the scripts themselves, which just won't run.\n\n      - \\`update-lockfile\\` will skip the link step altogether, and only fetch packages that are missing from the lockfile (or that have no associated checksums). This mode is typically used by tools like Renovate or Dependabot to keep a lockfile up-to-date without incurring the full install cost.\n\n      This command accepts glob patterns as arguments (if valid Idents and supported by [micromatch](https://github.com/micromatch/micromatch)). Make sure to escape the patterns, to prevent your own shell from trying to expand them.\n    `,examples:[[\"Remove a dependency from the current project\",\"$0 remove lodash\"],[\"Remove a dependency from all workspaces at once\",\"$0 remove lodash --all\"],[\"Remove all dependencies starting with `eslint-`\",\"$0 remove 'eslint-*'\"],[\"Remove all dependencies with the `@babel` scope\",\"$0 remove '@babel/*'\"],[\"Remove all dependencies matching `react-dom` or `react-helmet`\",\"$0 remove 'react-{dom,helmet}'\"]]});var Jae=km;var Wae=ge(require(\"util\")),ab=class extends Le{async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd);if(!i)throw new ht(r.cwd,this.context.cwd);return(await Je.start({configuration:e,stdout:this.context.stdout},async s=>{let o=i.manifest.scripts,a=Se.sortMap(o.keys(),u=>u),l={breakLength:Infinity,colors:e.get(\"enableColors\"),maxArrayLength:2},c=a.reduce((u,g)=>Math.max(u,g.length),0);for(let[u,g]of o.entries())s.reportInfo(null,`${u.padEnd(c,\" \")}   ${(0,Wae.inspect)(g,l)}`)})).exitCode()}};ab.paths=[[\"run\"]];var zae=ab;var xm=class extends Le{constructor(){super(...arguments);this.inspect=z.String(\"--inspect\",!1,{tolerateBoolean:!0,description:\"Forwarded to the underlying Node process when executing a binary\"});this.inspectBrk=z.String(\"--inspect-brk\",!1,{tolerateBoolean:!0,description:\"Forwarded to the underlying Node process when executing a binary\"});this.topLevel=z.Boolean(\"-T,--top-level\",!1,{description:\"Check the root workspace for scripts and/or binaries instead of the current one\"});this.binariesOnly=z.Boolean(\"-B,--binaries-only\",!1,{description:\"Ignore any user defined scripts and only check for binaries\"});this.silent=z.Boolean(\"--silent\",{hidden:!0});this.scriptName=z.String();this.args=z.Proxy()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i,locator:n}=await ze.find(e,this.context.cwd);await r.restoreInstallState();let s=this.topLevel?r.topLevelWorkspace.anchoredLocator:n;if(!this.binariesOnly&&await Zt.hasPackageScript(s,this.scriptName,{project:r}))return await Zt.executePackageScript(s,this.scriptName,this.args,{project:r,stdin:this.context.stdin,stdout:this.context.stdout,stderr:this.context.stderr});let o=await Zt.getPackageAccessibleBinaries(s,{project:r});if(o.get(this.scriptName)){let l=[];return this.inspect&&(typeof this.inspect==\"string\"?l.push(`--inspect=${this.inspect}`):l.push(\"--inspect\")),this.inspectBrk&&(typeof this.inspectBrk==\"string\"?l.push(`--inspect-brk=${this.inspectBrk}`):l.push(\"--inspect-brk\")),await Zt.executePackageAccessibleBinary(s,this.scriptName,this.args,{cwd:this.context.cwd,project:r,stdin:this.context.stdin,stdout:this.context.stdout,stderr:this.context.stderr,nodeArgs:l,packageAccessibleBinaries:o})}if(!this.topLevel&&!this.binariesOnly&&i&&this.scriptName.includes(\":\")){let c=(await Promise.all(r.workspaces.map(async u=>u.manifest.scripts.has(this.scriptName)?u:null))).filter(u=>u!==null);if(c.length===1)return await Zt.executeWorkspaceScript(c[0],this.scriptName,this.args,{stdin:this.context.stdin,stdout:this.context.stdout,stderr:this.context.stderr})}if(this.topLevel)throw this.scriptName===\"node-gyp\"?new Pe(`Couldn't find a script name \"${this.scriptName}\" in the top-level (used by ${P.prettyLocator(e,n)}). This typically happens because some package depends on \"node-gyp\" to build itself, but didn't list it in their dependencies. To fix that, please run \"yarn add node-gyp\" into your top-level workspace. You also can open an issue on the repository of the specified package to suggest them to use an optional peer dependency.`):new Pe(`Couldn't find a script name \"${this.scriptName}\" in the top-level (used by ${P.prettyLocator(e,n)}).`);{if(this.scriptName===\"global\")throw new Pe(\"The 'yarn global' commands have been removed in 2.x - consider using 'yarn dlx' or a third-party plugin instead\");let l=[this.scriptName].concat(this.args);for(let[c,u]of Nf)for(let g of u)if(l.length>=g.length&&JSON.stringify(l.slice(0,g.length))===JSON.stringify(g))throw new Pe(`Couldn't find a script named \"${this.scriptName}\", but a matching command can be found in the ${c} plugin. You can install it with \"yarn plugin import ${c}\".`);throw new Pe(`Couldn't find a script named \"${this.scriptName}\".`)}}};xm.paths=[[\"run\"]],xm.usage=Re.Usage({description:\"run a script defined in the package.json\",details:`\n      This command will run a tool. The exact tool that will be executed will depend on the current state of your workspace:\n\n      - If the \\`scripts\\` field from your local package.json contains a matching script name, its definition will get executed.\n\n      - Otherwise, if one of the local workspace's dependencies exposes a binary with a matching name, this binary will get executed.\n\n      - Otherwise, if the specified name contains a colon character and if one of the workspaces in the project contains exactly one script with a matching name, then this script will get executed.\n\n      Whatever happens, the cwd of the spawned process will be the workspace that declares the script (which makes it possible to call commands cross-workspaces using the third syntax).\n    `,examples:[[\"Run the tests from the local workspace\",\"$0 run test\"],['Same thing, but without the \"run\" keyword',\"$0 test\"],[\"Inspect Webpack while running\",\"$0 run --inspect-brk webpack\"]]});var _ae=xm;var Pm=class extends Le{constructor(){super(...arguments);this.save=z.Boolean(\"-s,--save\",!1,{description:\"Persist the resolution inside the top-level manifest\"});this.descriptor=z.String();this.resolution=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n=await Nt.find(e);if(await r.restoreInstallState({restoreResolutions:!1}),!i)throw new ht(r.cwd,this.context.cwd);let s=P.parseDescriptor(this.descriptor,!0),o=P.makeDescriptor(s,this.resolution);return r.storedDescriptors.set(s.descriptorHash,s),r.storedDescriptors.set(o.descriptorHash,o),r.resolutionAliases.set(s.descriptorHash,o.descriptorHash),(await Je.start({configuration:e,stdout:this.context.stdout},async l=>{await r.install({cache:n,report:l})})).exitCode()}};Pm.paths=[[\"set\",\"resolution\"]],Pm.usage=Re.Usage({description:\"enforce a package resolution\",details:'\\n      This command updates the resolution table so that `descriptor` is resolved by `resolution`.\\n\\n      Note that by default this command only affect the current resolution table - meaning that this \"manual override\" will disappear if you remove the lockfile, or if the package disappear from the table. If you wish to make the enforced resolution persist whatever happens, add the `-s,--save` flag which will also edit the `resolutions` field from your top-level manifest.\\n\\n      Note that no attempt is made at validating that `resolution` is a valid resolution entry for `descriptor`.\\n    ',examples:[[\"Force all instances of lodash@npm:^1.2.3 to resolve to 1.5.0\",\"$0 set resolution lodash@npm:^1.2.3 1.5.0\"]]});var Vae=Pm;var Xae=ge(ts()),Dm=class extends Le{constructor(){super(...arguments);this.all=z.Boolean(\"-A,--all\",!1,{description:\"Unlink all workspaces belonging to the target project from the current one\"});this.leadingArguments=z.Rest()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n=await Nt.find(e);if(!i)throw new ht(r.cwd,this.context.cwd);let s=r.topLevelWorkspace,o=new Set;if(this.leadingArguments.length===0&&this.all)for(let{pattern:l,reference:c}of s.manifest.resolutions)c.startsWith(\"portal:\")&&o.add(l.descriptor.fullName);if(this.leadingArguments.length>0)for(let l of this.leadingArguments){let c=k.resolve(this.context.cwd,j.toPortablePath(l));if(Se.isPathLike(l)){let u=await ye.find(c,this.context.plugins,{useRc:!1,strict:!1}),{project:g,workspace:f}=await ze.find(u,c);if(!f)throw new ht(g.cwd,c);if(this.all){for(let h of g.workspaces)h.manifest.name&&o.add(P.stringifyIdent(h.locator));if(o.size===0)throw new Pe(\"No workspace found to be unlinked in the target project\")}else{if(!f.manifest.name)throw new Pe(\"The target workspace doesn't have a name and thus cannot be unlinked\");o.add(P.stringifyIdent(f.locator))}}else{let u=[...s.manifest.resolutions.map(({pattern:g})=>g.descriptor.fullName)];for(let g of(0,Xae.default)(u,l))o.add(g)}}return s.manifest.resolutions=s.manifest.resolutions.filter(({pattern:l})=>!o.has(l.descriptor.fullName)),(await Je.start({configuration:e,stdout:this.context.stdout},async l=>{await r.install({cache:n,report:l})})).exitCode()}};Dm.paths=[[\"unlink\"]],Dm.usage=Re.Usage({description:\"disconnect the local project from another one\",details:`\n      This command will remove any resolutions in the project-level manifest that would have been added via a yarn link with similar arguments.\n    `,examples:[[\"Unregister a remote workspace in the current project\",\"$0 unlink ~/ts-loader\"],[\"Unregister all workspaces from a remote project in the current project\",\"$0 unlink ~/jest --all\"],[\"Unregister all previously linked workspaces\",\"$0 unlink --all\"],[\"Unregister all workspaces matching a glob\",\"$0 unlink '@babel/*' 'pkg-{a,b}'\"]]});var Zae=Dm;var $ae=ge($C()),_N=ge(ts());Es();var eh=class extends Le{constructor(){super(...arguments);this.interactive=z.Boolean(\"-i,--interactive\",{description:\"Offer various choices, depending on the detected upgrade paths\"});this.exact=z.Boolean(\"-E,--exact\",!1,{description:\"Don't use any semver modifier on the resolved range\"});this.tilde=z.Boolean(\"-T,--tilde\",!1,{description:\"Use the `~` semver modifier on the resolved range\"});this.caret=z.Boolean(\"-C,--caret\",!1,{description:\"Use the `^` semver modifier on the resolved range\"});this.recursive=z.Boolean(\"-R,--recursive\",!1,{description:\"Resolve again ALL resolutions for those packages\"});this.mode=z.String(\"--mode\",{description:\"Change what artifacts installs generate\",validator:nn(di)});this.patterns=z.Rest()}async execute(){return this.recursive?await this.executeUpRecursive():await this.executeUpClassic()}async executeUpRecursive(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n=await Nt.find(e);if(!i)throw new ht(r.cwd,this.context.cwd);await r.restoreInstallState({restoreResolutions:!1});let s=[...r.storedDescriptors.values()],o=s.map(u=>P.stringifyIdent(u)),a=new Set;for(let u of this.patterns){if(P.parseDescriptor(u).range!==\"unknown\")throw new Pe(\"Ranges aren't allowed when using --recursive\");for(let g of(0,_N.default)(o,u)){let f=P.parseIdent(g);a.add(f.identHash)}}let l=s.filter(u=>a.has(u.identHash));for(let u of l)r.storedDescriptors.delete(u.descriptorHash),r.storedResolutions.delete(u.descriptorHash);return(await Je.start({configuration:e,stdout:this.context.stdout},async u=>{await r.install({cache:n,report:u})})).exitCode()}async executeUpClassic(){var m;let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n=await Nt.find(e);if(!i)throw new ht(r.cwd,this.context.cwd);await r.restoreInstallState({restoreResolutions:!1});let s=(m=this.interactive)!=null?m:e.get(\"preferInteractive\"),o=em(this,r),a=s?[_r.KEEP,_r.REUSE,_r.PROJECT,_r.LATEST]:[_r.PROJECT,_r.LATEST],l=[],c=[];for(let y of this.patterns){let Q=!1,S=P.parseDescriptor(y);for(let x of r.workspaces)for(let M of[Hr.REGULAR,Hr.DEVELOPMENT]){let U=[...x.manifest.getForScope(M).values()].map(J=>P.stringifyIdent(J));for(let J of(0,_N.default)(U,P.stringifyIdent(S))){let W=P.parseIdent(J),ee=x.manifest[M].get(W.identHash);if(typeof ee==\"undefined\")throw new Error(\"Assertion failed: Expected the descriptor to be registered\");let Z=P.makeDescriptor(W,S.range);l.push(Promise.resolve().then(async()=>[x,M,ee,await tm(Z,{project:r,workspace:x,cache:n,target:M,modifier:o,strategies:a})])),Q=!0}}Q||c.push(y)}if(c.length>1)throw new Pe(`Patterns ${ae.prettyList(e,c,Di.CODE)} don't match any packages referenced by any workspace`);if(c.length>0)throw new Pe(`Pattern ${ae.prettyList(e,c,Di.CODE)} doesn't match any packages referenced by any workspace`);let u=await Promise.all(l),g=await uA.start({configuration:e,stdout:this.context.stdout,suggestInstall:!1},async y=>{for(let[,,Q,{suggestions:S,rejections:x}]of u){let M=S.filter(Y=>Y.descriptor!==null);if(M.length===0){let[Y]=x;if(typeof Y==\"undefined\")throw new Error(\"Assertion failed: Expected an error to have been set\");let U=this.cli.error(Y);r.configuration.get(\"enableNetwork\")?y.reportError($.CANT_SUGGEST_RESOLUTIONS,`${P.prettyDescriptor(e,Q)} can't be resolved to a satisfying range\n\n${U}`):y.reportError($.CANT_SUGGEST_RESOLUTIONS,`${P.prettyDescriptor(e,Q)} can't be resolved to a satisfying range (note: network resolution has been disabled)\n\n${U}`)}else M.length>1&&!s&&y.reportError($.CANT_SUGGEST_RESOLUTIONS,`${P.prettyDescriptor(e,Q)} has multiple possible upgrade strategies; use -i to disambiguate manually`)}});if(g.hasErrors())return g.exitCode();let f=!1,h=[];for(let[y,Q,,{suggestions:S}]of u){let x,M=S.filter(W=>W.descriptor!==null),Y=M[0].descriptor,U=M.every(W=>P.areDescriptorsEqual(W.descriptor,Y));M.length===1||U?x=Y:(f=!0,{answer:x}=await(0,$ae.prompt)({type:\"select\",name:\"answer\",message:`Which range to you want to use in ${P.prettyWorkspace(e,y)} \\u276F ${Q}?`,choices:S.map(({descriptor:W,name:ee,reason:Z})=>W?{name:ee,hint:Z,descriptor:W}:{name:ee,hint:Z,disabled:!0}),onCancel:()=>process.exit(130),result(W){return this.find(W,\"descriptor\")},stdin:this.context.stdin,stdout:this.context.stdout}));let J=y.manifest[Q].get(x.identHash);if(typeof J==\"undefined\")throw new Error(\"Assertion failed: This descriptor should have a matching entry\");if(J.descriptorHash!==x.descriptorHash)y.manifest[Q].set(x.identHash,x),h.push([y,Q,J,x]);else{let W=e.makeResolver(),ee={project:r,resolver:W},Z=W.bindDescriptor(J,y.anchoredLocator,ee);r.forgetResolution(Z)}}return await e.triggerMultipleHooks(y=>y.afterWorkspaceDependencyReplacement,h),f&&this.context.stdout.write(`\n`),(await Je.start({configuration:e,stdout:this.context.stdout},async y=>{await r.install({cache:n,report:y,mode:this.mode})})).exitCode()}};eh.paths=[[\"up\"]],eh.usage=Re.Usage({description:\"upgrade dependencies across the project\",details:\"\\n      This command upgrades the packages matching the list of specified patterns to their latest available version across the whole project (regardless of whether they're part of `dependencies` or `devDependencies` - `peerDependencies` won't be affected). This is a project-wide command: all workspaces will be upgraded in the process.\\n\\n      If `-R,--recursive` is set the command will change behavior and no other switch will be allowed. When operating under this mode `yarn up` will force all ranges matching the selected packages to be resolved again (often to the highest available versions) before being stored in the lockfile. It however won't touch your manifests anymore, so depending on your needs you might want to run both `yarn up` and `yarn up -R` to cover all bases.\\n\\n      If `-i,--interactive` is set (or if the `preferInteractive` settings is toggled on) the command will offer various choices, depending on the detected upgrade paths. Some upgrades require this flag in order to resolve ambiguities.\\n\\n      The, `-C,--caret`, `-E,--exact` and  `-T,--tilde` options have the same meaning as in the `add` command (they change the modifier used when the range is missing or a tag, and are ignored when the range is explicitly set).\\n\\n      If the `--mode=<mode>` option is set, Yarn will change which artifacts are generated. The modes currently supported are:\\n\\n      - `skip-build` will not run the build scripts at all. Note that this is different from setting `enableScripts` to false because the later will disable build scripts, and thus affect the content of the artifacts generated on disk, whereas the former will just disable the build step - but not the scripts themselves, which just won't run.\\n\\n      - `update-lockfile` will skip the link step altogether, and only fetch packages that are missing from the lockfile (or that have no associated checksums). This mode is typically used by tools like Renovate or Dependabot to keep a lockfile up-to-date without incurring the full install cost.\\n\\n      Generally you can see `yarn up` as a counterpart to what was `yarn upgrade --latest` in Yarn 1 (ie it ignores the ranges previously listed in your manifests), but unlike `yarn upgrade` which only upgraded dependencies in the current workspace, `yarn up` will upgrade all workspaces at the same time.\\n\\n      This command accepts glob patterns as arguments (if valid Descriptors and supported by [micromatch](https://github.com/micromatch/micromatch)). Make sure to escape the patterns, to prevent your own shell from trying to expand them.\\n\\n      **Note:** The ranges have to be static, only the package scopes and names can contain glob patterns.\\n    \",examples:[[\"Upgrade all instances of lodash to the latest release\",\"$0 up lodash\"],[\"Upgrade all instances of lodash to the latest release, but ask confirmation for each\",\"$0 up lodash -i\"],[\"Upgrade all instances of lodash to 1.2.3\",\"$0 up lodash@1.2.3\"],[\"Upgrade all instances of packages with the `@babel` scope to the latest release\",\"$0 up '@babel/*'\"],[\"Upgrade all instances of packages containing the word `jest` to the latest release\",\"$0 up '*jest*'\"],[\"Upgrade all instances of packages with the `@babel` scope to 7.0.0\",\"$0 up '@babel/*@7.0.0'\"]]}),eh.schema=[lv(\"recursive\",Cc.Forbids,[\"interactive\",\"exact\",\"tilde\",\"caret\"],{ignore:[void 0,!1]})];var eAe=eh;var Rm=class extends Le{constructor(){super(...arguments);this.recursive=z.Boolean(\"-R,--recursive\",!1,{description:\"List, for each workspace, what are all the paths that lead to the dependency\"});this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.peers=z.Boolean(\"--peers\",!1,{description:\"Also print the peer dependencies that match the specified name\"});this.package=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd);if(!i)throw new ht(r.cwd,this.context.cwd);await r.restoreInstallState();let n=P.parseIdent(this.package).identHash,s=this.recursive?X8e(r,n,{configuration:e,peers:this.peers}):V8e(r,n,{configuration:e,peers:this.peers});as.emitTree(s,{configuration:e,stdout:this.context.stdout,json:this.json,separators:1})}};Rm.paths=[[\"why\"]],Rm.usage=Re.Usage({description:\"display the reason why a package is needed\",details:`\n      This command prints the exact reasons why a package appears in the dependency tree.\n\n      If \\`-R,--recursive\\` is set, the listing will go in depth and will list, for each workspaces, what are all the paths that lead to the dependency. Note that the display is somewhat optimized in that it will not print the package listing twice for a single package, so if you see a leaf named \"Foo\" when looking for \"Bar\", it means that \"Foo\" already got printed higher in the tree.\n    `,examples:[[\"Explain why lodash is used in your project\",\"$0 why lodash\"]]});var tAe=Rm;function V8e(t,e,{configuration:r,peers:i}){let n=Se.sortMap(t.storedPackages.values(),a=>P.stringifyLocator(a)),s={},o={children:s};for(let a of n){let l={},c=null;for(let u of a.dependencies.values()){if(!i&&a.peerDependencies.has(u.identHash))continue;let g=t.storedResolutions.get(u.descriptorHash);if(!g)throw new Error(\"Assertion failed: The resolution should have been registered\");let f=t.storedPackages.get(g);if(!f)throw new Error(\"Assertion failed: The package should have been registered\");if(f.identHash!==e)continue;if(c===null){let p=P.stringifyLocator(a);s[p]={value:[a,ae.Type.LOCATOR],children:l}}let h=P.stringifyLocator(f);l[h]={value:[{descriptor:u,locator:f},ae.Type.DEPENDENT]}}}return o}function X8e(t,e,{configuration:r,peers:i}){let n=Se.sortMap(t.workspaces,f=>P.stringifyLocator(f.anchoredLocator)),s=new Set,o=new Set,a=f=>{if(s.has(f.locatorHash))return o.has(f.locatorHash);if(s.add(f.locatorHash),f.identHash===e)return o.add(f.locatorHash),!0;let h=!1;f.identHash===e&&(h=!0);for(let p of f.dependencies.values()){if(!i&&f.peerDependencies.has(p.identHash))continue;let m=t.storedResolutions.get(p.descriptorHash);if(!m)throw new Error(\"Assertion failed: The resolution should have been registered\");let y=t.storedPackages.get(m);if(!y)throw new Error(\"Assertion failed: The package should have been registered\");a(y)&&(h=!0)}return h&&o.add(f.locatorHash),h};for(let f of n){let h=t.storedPackages.get(f.anchoredLocator.locatorHash);if(!h)throw new Error(\"Assertion failed: The package should have been registered\");a(h)}let l=new Set,c={},u={children:c},g=(f,h,p)=>{if(!o.has(f.locatorHash))return;let m=p!==null?ae.tuple(ae.Type.DEPENDENT,{locator:f,descriptor:p}):ae.tuple(ae.Type.LOCATOR,f),y={},Q={value:m,children:y},S=P.stringifyLocator(f);if(h[S]=Q,!l.has(f.locatorHash)&&(l.add(f.locatorHash),!(p!==null&&t.tryWorkspaceByLocator(f))))for(let x of f.dependencies.values()){if(!i&&f.peerDependencies.has(x.identHash))continue;let M=t.storedResolutions.get(x.descriptorHash);if(!M)throw new Error(\"Assertion failed: The resolution should have been registered\");let Y=t.storedPackages.get(M);if(!Y)throw new Error(\"Assertion failed: The package should have been registered\");g(Y,y,x)}};for(let f of n){let h=t.storedPackages.get(f.anchoredLocator.locatorHash);if(!h)throw new Error(\"Assertion failed: The package should have been registered\");g(h,c,null)}return u}var aL={};ft(aL,{default:()=>mze,gitUtils:()=>wu});var wu={};ft(wu,{TreeishProtocols:()=>On,clone:()=>nL,fetchBase:()=>wAe,fetchChangedFiles:()=>BAe,fetchChangedWorkspaces:()=>dze,fetchRoot:()=>yAe,isGitUrl:()=>rh,lsRemote:()=>IAe,normalizeLocator:()=>tL,normalizeRepoUrl:()=>Fm,resolveUrl:()=>iL,splitRepoUrl:()=>Nm});var $N=ge(dAe()),CAe=ge(tB()),th=ge(require(\"querystring\")),eL=ge(ti()),mAe=ge(require(\"url\"));function EAe(){return te(N({},process.env),{GIT_SSH_COMMAND:\"ssh -o BatchMode=yes\"})}var pze=[/^ssh:/,/^git(?:\\+[^:]+)?:/,/^(?:git\\+)?https?:[^#]+\\/[^#]+(?:\\.git)(?:#.*)?$/,/^git@[^#]+\\/[^#]+\\.git(?:#.*)?$/,/^(?:github:|https:\\/\\/github\\.com\\/)?(?!\\.{1,2}\\/)([a-zA-Z._0-9-]+)\\/(?!\\.{1,2}(?:#|$))([a-zA-Z._0-9-]+?)(?:\\.git)?(?:#.*)?$/,/^https:\\/\\/github\\.com\\/(?!\\.{1,2}\\/)([a-zA-Z0-9._-]+)\\/(?!\\.{1,2}(?:#|$))([a-zA-Z0-9._-]+?)\\/tarball\\/(.+)?$/],On;(function(n){n.Commit=\"commit\",n.Head=\"head\",n.Tag=\"tag\",n.Semver=\"semver\"})(On||(On={}));function rh(t){return t?pze.some(e=>!!t.match(e)):!1}function Nm(t){t=Fm(t);let e=t.indexOf(\"#\");if(e===-1)return{repo:t,treeish:{protocol:On.Head,request:\"HEAD\"},extra:{}};let r=t.slice(0,e),i=t.slice(e+1);if(i.match(/^[a-z]+=/)){let n=th.default.parse(i);for(let[l,c]of Object.entries(n))if(typeof c!=\"string\")throw new Error(`Assertion failed: The ${l} parameter must be a literal string`);let s=Object.values(On).find(l=>Object.prototype.hasOwnProperty.call(n,l)),o,a;typeof s!=\"undefined\"?(o=s,a=n[s]):(o=On.Head,a=\"HEAD\");for(let l of Object.values(On))delete n[l];return{repo:r,treeish:{protocol:o,request:a},extra:n}}else{let n=i.indexOf(\":\"),s,o;return n===-1?(s=null,o=i):(s=i.slice(0,n),o=i.slice(n+1)),{repo:r,treeish:{protocol:s,request:o},extra:{}}}}function Fm(t,{git:e=!1}={}){var r;if(t=t.replace(/^git\\+https:/,\"https:\"),t=t.replace(/^(?:github:|https:\\/\\/github\\.com\\/)?(?!\\.{1,2}\\/)([a-zA-Z0-9._-]+)\\/(?!\\.{1,2}(?:#|$))([a-zA-Z0-9._-]+?)(?:\\.git)?(#.*)?$/,\"https://github.com/$1/$2.git$3\"),t=t.replace(/^https:\\/\\/github\\.com\\/(?!\\.{1,2}\\/)([a-zA-Z0-9._-]+)\\/(?!\\.{1,2}(?:#|$))([a-zA-Z0-9._-]+?)\\/tarball\\/(.+)?$/,\"https://github.com/$1/$2.git#$3\"),e){t=t.replace(/^git\\+([^:]+):/,\"$1:\");let i;try{i=mAe.default.parse(t)}catch{i=null}i&&i.protocol===\"ssh:\"&&((r=i.path)==null?void 0:r.startsWith(\"/:\"))&&(t=t.replace(/^ssh:\\/\\//,\"\"))}return t}function tL(t){return P.makeLocator(t,Fm(t.reference))}async function IAe(t,e){let r=Fm(t,{git:!0});if(!ir.getNetworkSettings(`https://${(0,$N.default)(r).resource}`,{configuration:e}).enableNetwork)throw new Error(`Request to '${r}' has been blocked because of your configuration settings`);let n=await rL(\"listing refs\",[\"ls-remote\",r],{cwd:e.startingCwd,env:EAe()},{configuration:e,normalizedRepoUrl:r}),s=new Map,o=/^([a-f0-9]{40})\\t([^\\n]+)/gm,a;for(;(a=o.exec(n.stdout))!==null;)s.set(a[2],a[1]);return s}async function iL(t,e){let{repo:r,treeish:{protocol:i,request:n},extra:s}=Nm(t),o=await IAe(r,e),a=(c,u)=>{switch(c){case On.Commit:{if(!u.match(/^[a-f0-9]{40}$/))throw new Error(\"Invalid commit hash\");return th.default.stringify(te(N({},s),{commit:u}))}case On.Head:{let g=o.get(u===\"HEAD\"?u:`refs/heads/${u}`);if(typeof g==\"undefined\")throw new Error(`Unknown head (\"${u}\")`);return th.default.stringify(te(N({},s),{commit:g}))}case On.Tag:{let g=o.get(`refs/tags/${u}`);if(typeof g==\"undefined\")throw new Error(`Unknown tag (\"${u}\")`);return th.default.stringify(te(N({},s),{commit:g}))}case On.Semver:{let g=Wt.validRange(u);if(!g)throw new Error(`Invalid range (\"${u}\")`);let f=new Map([...o.entries()].filter(([p])=>p.startsWith(\"refs/tags/\")).map(([p,m])=>[eL.default.parse(p.slice(10)),m]).filter(p=>p[0]!==null)),h=eL.default.maxSatisfying([...f.keys()],g);if(h===null)throw new Error(`No matching range (\"${u}\")`);return th.default.stringify(te(N({},s),{commit:f.get(h)}))}case null:{let g;if((g=l(On.Commit,u))!==null||(g=l(On.Tag,u))!==null||(g=l(On.Head,u))!==null)return g;throw u.match(/^[a-f0-9]+$/)?new Error(`Couldn't resolve \"${u}\" as either a commit, a tag, or a head - if a commit, use the 40-characters commit hash`):new Error(`Couldn't resolve \"${u}\" as either a commit, a tag, or a head`)}default:throw new Error(`Invalid Git resolution protocol (\"${c}\")`)}},l=(c,u)=>{try{return a(c,u)}catch(g){return null}};return`${r}#${a(i,n)}`}async function nL(t,e){return await e.getLimit(\"cloneConcurrency\")(async()=>{let{repo:r,treeish:{protocol:i,request:n}}=Nm(t);if(i!==\"commit\")throw new Error(\"Invalid treeish protocol when cloning\");let s=Fm(r,{git:!0});if(ir.getNetworkSettings(`https://${(0,$N.default)(s).resource}`,{configuration:e}).enableNetwork===!1)throw new Error(`Request to '${s}' has been blocked because of your configuration settings`);let o=await K.mktempPromise(),a={cwd:o,env:EAe()};return await rL(\"cloning the repository\",[\"clone\",\"-c core.autocrlf=false\",s,j.fromPortablePath(o)],a,{configuration:e,normalizedRepoUrl:s}),await rL(\"switching branch\",[\"checkout\",`${n}`],a,{configuration:e,normalizedRepoUrl:s}),o})}async function yAe(t){let e=null,r,i=t;do r=i,await K.existsPromise(k.join(r,\".git\"))&&(e=r),i=k.dirname(r);while(e===null&&i!==r);return e}async function wAe(t,{baseRefs:e}){if(e.length===0)throw new Pe(\"Can't run this command with zero base refs specified.\");let r=[];for(let a of e){let{code:l}=await Fr.execvp(\"git\",[\"merge-base\",a,\"HEAD\"],{cwd:t});l===0&&r.push(a)}if(r.length===0)throw new Pe(`No ancestor could be found between any of HEAD and ${e.join(\", \")}`);let{stdout:i}=await Fr.execvp(\"git\",[\"merge-base\",\"HEAD\",...r],{cwd:t,strict:!0}),n=i.trim(),{stdout:s}=await Fr.execvp(\"git\",[\"show\",\"--quiet\",\"--pretty=format:%s\",n],{cwd:t,strict:!0}),o=s.trim();return{hash:n,title:o}}async function BAe(t,{base:e,project:r}){let i=Se.buildIgnorePattern(r.configuration.get(\"changesetIgnorePatterns\")),{stdout:n}=await Fr.execvp(\"git\",[\"diff\",\"--name-only\",`${e}`],{cwd:t,strict:!0}),s=n.split(/\\r\\n|\\r|\\n/).filter(c=>c.length>0).map(c=>k.resolve(t,j.toPortablePath(c))),{stdout:o}=await Fr.execvp(\"git\",[\"ls-files\",\"--others\",\"--exclude-standard\"],{cwd:t,strict:!0}),a=o.split(/\\r\\n|\\r|\\n/).filter(c=>c.length>0).map(c=>k.resolve(t,j.toPortablePath(c))),l=[...new Set([...s,...a].sort())];return i?l.filter(c=>!k.relative(r.cwd,c).match(i)):l}async function dze({ref:t,project:e}){if(e.configuration.projectCwd===null)throw new Pe(\"This command can only be run from within a Yarn project\");let r=[k.resolve(e.cwd,e.configuration.get(\"cacheFolder\")),k.resolve(e.cwd,e.configuration.get(\"installStatePath\")),k.resolve(e.cwd,e.configuration.get(\"lockfileFilename\")),k.resolve(e.cwd,e.configuration.get(\"virtualFolder\"))];await e.configuration.triggerHook(o=>o.populateYarnPaths,e,o=>{o!=null&&r.push(o)});let i=await yAe(e.configuration.projectCwd);if(i==null)throw new Pe(\"This command can only be run on Git repositories\");let n=await wAe(i,{baseRefs:typeof t==\"string\"?[t]:e.configuration.get(\"changesetBaseRefs\")}),s=await BAe(i,{base:n.hash,project:e});return new Set(Se.mapAndFilter(s,o=>{let a=e.tryWorkspaceByFilePath(o);return a===null?Se.mapAndFilter.skip:r.some(l=>o.startsWith(l))?Se.mapAndFilter.skip:a}))}async function rL(t,e,r,{configuration:i,normalizedRepoUrl:n}){try{return await Fr.execvp(\"git\",e,te(N({},r),{strict:!0}))}catch(s){if(!(s instanceof Fr.ExecError))throw s;let o=s.reportExtra,a=s.stderr.toString();throw new ct($.EXCEPTION,`Failed ${t}`,l=>{l.reportError($.EXCEPTION,`  ${ae.prettyField(i,{label:\"Repository URL\",value:ae.tuple(ae.Type.URL,n)})}`);for(let c of a.matchAll(/^(.+?): (.*)$/gm)){let[,u,g]=c;u=u.toLowerCase();let f=u===\"error\"?\"Error\":`${(0,CAe.default)(u)} Error`;l.reportError($.EXCEPTION,`  ${ae.prettyField(i,{label:f,value:ae.tuple(ae.Type.NO_HINT,g)})}`)}o==null||o(l)})}}var sL=class{supports(e,r){return rh(e.reference)}getLocalPath(e,r){return null}async fetch(e,r){let i=r.checksums.get(e.locatorHash)||null,n=tL(e),s=new Map(r.checksums);s.set(n.locatorHash,i);let o=te(N({},r),{checksums:s}),a=await this.downloadHosted(n,o);if(a!==null)return a;let[l,c,u]=await r.cache.fetchPackageFromCache(e,i,N({onHit:()=>r.report.reportCacheHit(e),onMiss:()=>r.report.reportCacheMiss(e,`${P.prettyLocator(r.project.configuration,e)} can't be found in the cache and will be fetched from the remote repository`),loader:()=>this.cloneFromRemote(n,o),skipIntegrityCheck:r.skipIntegrityCheck},r.cacheOptions));return{packageFs:l,releaseFs:c,prefixPath:P.getIdentVendorPath(e),checksum:u}}async downloadHosted(e,r){return r.project.configuration.reduceHook(i=>i.fetchHostedRepository,null,e,r)}async cloneFromRemote(e,r){let i=await nL(e.reference,r.project.configuration),n=Nm(e.reference),s=k.join(i,\"package.tgz\");await Zt.prepareExternalProject(i,s,{configuration:r.project.configuration,report:r.report,workspace:n.extra.workspace,locator:e});let o=await K.readFilePromise(s);return await Se.releaseAfterUseAsync(async()=>await wi.convertToZip(o,{compressionLevel:r.project.configuration.get(\"compressionLevel\"),prefixPath:P.getIdentVendorPath(e),stripComponents:1}))}};var oL=class{supportsDescriptor(e,r){return rh(e.range)}supportsLocator(e,r){return rh(e.reference)}shouldPersistResolution(e,r){return!0}bindDescriptor(e,r,i){return e}getResolutionDependencies(e,r){return[]}async getCandidates(e,r,i){let n=await iL(e.range,i.project.configuration);return[P.makeLocator(e,n)]}async getSatisfying(e,r,i){return null}async resolve(e,r){if(!r.fetchOptions)throw new Error(\"Assertion failed: This resolver cannot be used unless a fetcher is configured\");let i=await r.fetchOptions.fetcher.fetch(e,r.fetchOptions),n=await Se.releaseAfterUseAsync(async()=>await At.find(i.prefixPath,{baseFs:i.packageFs}),i.releaseFs);return te(N({},e),{version:n.version||\"0.0.0\",languageName:n.languageName||r.project.configuration.get(\"defaultLanguageName\"),linkType:Qt.HARD,conditions:n.getConditions(),dependencies:n.dependencies,peerDependencies:n.peerDependencies,dependenciesMeta:n.dependenciesMeta,peerDependenciesMeta:n.peerDependenciesMeta,bin:n.bin})}};var Cze={configuration:{changesetBaseRefs:{description:\"The base git refs that the current HEAD is compared against when detecting changes. Supports git branches, tags, and commits.\",type:Ie.STRING,isArray:!0,isNullable:!1,default:[\"master\",\"origin/master\",\"upstream/master\",\"main\",\"origin/main\",\"upstream/main\"]},changesetIgnorePatterns:{description:\"Array of glob patterns; files matching them will be ignored when fetching the changed files\",type:Ie.STRING,default:[],isArray:!0},cloneConcurrency:{description:\"Maximal number of concurrent clones\",type:Ie.NUMBER,default:2}},fetchers:[sL],resolvers:[oL]};var mze=Cze;var Lm=class extends Le{constructor(){super(...arguments);this.since=z.String(\"--since\",{description:\"Only include workspaces that have been changed since the specified ref.\",tolerateBoolean:!0});this.recursive=z.Boolean(\"-R,--recursive\",!1,{description:\"Find packages via dependencies/devDependencies instead of using the workspaces field\"});this.verbose=z.Boolean(\"-v,--verbose\",!1,{description:\"Also return the cross-dependencies between workspaces\"});this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r}=await ze.find(e,this.context.cwd);return(await Je.start({configuration:e,json:this.json,stdout:this.context.stdout},async n=>{let s=this.since?await wu.fetchChangedWorkspaces({ref:this.since,project:r}):r.workspaces,o=new Set(s);if(this.recursive)for(let a of[...s].map(l=>l.getRecursiveWorkspaceDependents()))for(let l of a)o.add(l);for(let a of o){let{manifest:l}=a,c;if(this.verbose){let u=new Set,g=new Set;for(let f of At.hardDependencies)for(let[h,p]of l.getForScope(f)){let m=r.tryWorkspaceByDescriptor(p);m===null?r.workspacesByIdent.has(h)&&g.add(p):u.add(m)}c={workspaceDependencies:Array.from(u).map(f=>f.relativeCwd),mismatchedWorkspaceDependencies:Array.from(g).map(f=>P.stringifyDescriptor(f))}}n.reportInfo(null,`${a.relativeCwd}`),n.reportJson(N({location:a.relativeCwd,name:l.name?P.stringifyIdent(l.name):null},c))}})).exitCode()}};Lm.paths=[[\"workspaces\",\"list\"]],Lm.usage=Re.Usage({category:\"Workspace-related commands\",description:\"list all available workspaces\",details:\"\\n      This command will print the list of all workspaces in the project.\\n\\n      - If `--since` is set, Yarn will only list workspaces that have been modified since the specified ref. By default Yarn will use the refs specified by the `changesetBaseRefs` configuration option.\\n\\n      - If `-R,--recursive` is set, Yarn will find workspaces to run the command on by recursively evaluating `dependencies` and `devDependencies` fields, instead of looking at the `workspaces` fields.\\n\\n      - If both the `-v,--verbose` and `--json` options are set, Yarn will also return the cross-dependencies between each workspaces (useful when you wish to automatically generate Buck / Bazel rules).\\n    \"});var bAe=Lm;var Tm=class extends Le{constructor(){super(...arguments);this.workspaceName=z.String();this.commandName=z.String();this.args=z.Proxy()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd);if(!i)throw new ht(r.cwd,this.context.cwd);let n=r.workspaces,s=new Map(n.map(a=>{let l=P.convertToIdent(a.locator);return[P.stringifyIdent(l),a]})),o=s.get(this.workspaceName);if(o===void 0){let a=Array.from(s.keys()).sort();throw new Pe(`Workspace '${this.workspaceName}' not found. Did you mean any of the following:\n  - ${a.join(`\n  - `)}?`)}return this.cli.run([this.commandName,...this.args],{cwd:o.cwd})}};Tm.paths=[[\"workspace\"]],Tm.usage=Re.Usage({category:\"Workspace-related commands\",description:\"run a command within the specified workspace\",details:`\n      This command will run a given sub-command on a single workspace.\n    `,examples:[[\"Add a package to a single workspace\",\"yarn workspace components add -D react\"],[\"Run build script on a single workspace\",\"yarn workspace components run build\"]]});var QAe=Tm;var Eze={configuration:{enableImmutableInstalls:{description:\"If true (the default on CI), prevents the install command from modifying the lockfile\",type:Ie.BOOLEAN,default:vAe.isCI},defaultSemverRangePrefix:{description:\"The default save prefix: '^', '~' or ''\",type:Ie.STRING,values:[\"^\",\"~\",\"\"],default:ga.CARET}},commands:[Tse,Mse,$oe,uae,Vae,Tae,bae,bAe,Cae,mae,Eae,Iae,Nse,Lse,gae,hae,yae,wae,vae,kae,xae,Dae,Zae,Rae,jae,Kae,Gae,Fae,Yae,qae,Jae,zae,_ae,eAe,tAe,QAe]},Ize=Eze;var gL={};ft(gL,{default:()=>wze});var Ye={optional:!0},SAe=[[\"@tailwindcss/aspect-ratio@<0.2.1\",{peerDependencies:{tailwindcss:\"^2.0.2\"}}],[\"@tailwindcss/line-clamp@<0.2.1\",{peerDependencies:{tailwindcss:\"^2.0.2\"}}],[\"@fullhuman/postcss-purgecss@3.1.3 || 3.1.3-alpha.0\",{peerDependencies:{postcss:\"^8.0.0\"}}],[\"@samverschueren/stream-to-observable@<0.3.1\",{peerDependenciesMeta:{rxjs:Ye,zenObservable:Ye}}],[\"any-observable@<0.5.1\",{peerDependenciesMeta:{rxjs:Ye,zenObservable:Ye}}],[\"@pm2/agent@<1.0.4\",{dependencies:{debug:\"*\"}}],[\"debug@<4.2.0\",{peerDependenciesMeta:{[\"supports-color\"]:Ye}}],[\"got@<11\",{dependencies:{[\"@types/responselike\"]:\"^1.0.0\",[\"@types/keyv\"]:\"^3.1.1\"}}],[\"cacheable-lookup@<4.1.2\",{dependencies:{[\"@types/keyv\"]:\"^3.1.1\"}}],[\"http-link-dataloader@*\",{peerDependencies:{graphql:\"^0.13.1 || ^14.0.0\"}}],[\"typescript-language-server@*\",{dependencies:{[\"vscode-jsonrpc\"]:\"^5.0.1\",[\"vscode-languageserver-protocol\"]:\"^3.15.0\"}}],[\"postcss-syntax@*\",{peerDependenciesMeta:{[\"postcss-html\"]:Ye,[\"postcss-jsx\"]:Ye,[\"postcss-less\"]:Ye,[\"postcss-markdown\"]:Ye,[\"postcss-scss\"]:Ye}}],[\"jss-plugin-rule-value-function@<=10.1.1\",{dependencies:{[\"tiny-warning\"]:\"^1.0.2\"}}],[\"ink-select-input@<4.1.0\",{peerDependencies:{react:\"^16.8.2\"}}],[\"license-webpack-plugin@<2.3.18\",{peerDependenciesMeta:{webpack:Ye}}],[\"snowpack@>=3.3.0\",{dependencies:{[\"node-gyp\"]:\"^7.1.0\"}}],[\"promise-inflight@*\",{peerDependenciesMeta:{bluebird:Ye}}],[\"reactcss@*\",{peerDependencies:{react:\"*\"}}],[\"react-color@<=2.19.0\",{peerDependencies:{react:\"*\"}}],[\"gatsby-plugin-i18n@*\",{dependencies:{ramda:\"^0.24.1\"}}],[\"useragent@^2.0.0\",{dependencies:{request:\"^2.88.0\",yamlparser:\"0.0.x\",semver:\"5.5.x\"}}],[\"@apollographql/apollo-tools@*\",{peerDependencies:{graphql:\"^14.2.1 || ^15.0.0\"}}],[\"material-table@^2.0.0\",{dependencies:{\"@babel/runtime\":\"^7.11.2\"}}],[\"@babel/parser@*\",{dependencies:{\"@babel/types\":\"^7.8.3\"}}],[\"fork-ts-checker-webpack-plugin@<=6.3.4\",{peerDependencies:{eslint:\">= 6\",typescript:\">= 2.7\",webpack:\">= 4\",\"vue-template-compiler\":\"*\"},peerDependenciesMeta:{eslint:Ye,\"vue-template-compiler\":Ye}}],[\"rc-animate@<=3.1.1\",{peerDependencies:{react:\">=16.9.0\",\"react-dom\":\">=16.9.0\"}}],[\"react-bootstrap-table2-paginator@*\",{dependencies:{classnames:\"^2.2.6\"}}],[\"react-draggable@<=4.4.3\",{peerDependencies:{react:\">= 16.3.0\",\"react-dom\":\">= 16.3.0\"}}],[\"apollo-upload-client@<14\",{peerDependencies:{graphql:\"14 - 15\"}}],[\"react-instantsearch-core@<=6.7.0\",{peerDependencies:{algoliasearch:\">= 3.1 < 5\"}}],[\"react-instantsearch-dom@<=6.7.0\",{dependencies:{\"react-fast-compare\":\"^3.0.0\"}}],[\"ws@<7.2.1\",{peerDependencies:{bufferutil:\"^4.0.1\",\"utf-8-validate\":\"^5.0.2\"},peerDependenciesMeta:{bufferutil:Ye,\"utf-8-validate\":Ye}}],[\"react-portal@*\",{peerDependencies:{\"react-dom\":\"^15.0.0-0 || ^16.0.0-0 || ^17.0.0-0\"}}],[\"react-scripts@<=4.0.1\",{peerDependencies:{react:\"*\"}}],[\"testcafe@<=1.10.1\",{dependencies:{\"@babel/plugin-transform-for-of\":\"^7.12.1\",\"@babel/runtime\":\"^7.12.5\"}}],[\"testcafe-legacy-api@<=4.2.0\",{dependencies:{\"testcafe-hammerhead\":\"^17.0.1\",\"read-file-relative\":\"^1.2.0\"}}],[\"@google-cloud/firestore@<=4.9.3\",{dependencies:{protobufjs:\"^6.8.6\"}}],[\"gatsby-source-apiserver@*\",{dependencies:{[\"babel-polyfill\"]:\"^6.26.0\"}}],[\"@webpack-cli/package-utils@<=1.0.1-alpha.4\",{dependencies:{[\"cross-spawn\"]:\"^7.0.3\"}}],[\"gatsby-remark-prismjs@<3.3.28\",{dependencies:{lodash:\"^4\"}}],[\"gatsby-plugin-favicon@*\",{peerDependencies:{webpack:\"*\"}}],[\"gatsby-plugin-sharp@<=4.6.0-next.3\",{dependencies:{debug:\"^4.3.1\"}}],[\"gatsby-react-router-scroll@<=5.6.0-next.0\",{dependencies:{[\"prop-types\"]:\"^15.7.2\"}}],[\"@rebass/forms@*\",{dependencies:{[\"@styled-system/should-forward-prop\"]:\"^5.0.0\"},peerDependencies:{react:\"^16.8.6\"}}],[\"rebass@*\",{peerDependencies:{react:\"^16.8.6\"}}],[\"@ant-design/react-slick@<=0.28.3\",{peerDependencies:{react:\">=16.0.0\"}}],[\"mqtt@<4.2.7\",{dependencies:{duplexify:\"^4.1.1\"}}],[\"vue-cli-plugin-vuetify@<=2.0.3\",{dependencies:{semver:\"^6.3.0\"},peerDependenciesMeta:{\"sass-loader\":Ye,\"vuetify-loader\":Ye}}],[\"vue-cli-plugin-vuetify@<=2.0.4\",{dependencies:{\"null-loader\":\"^3.0.0\"}}],[\"@vuetify/cli-plugin-utils@<=0.0.4\",{dependencies:{semver:\"^6.3.0\"},peerDependenciesMeta:{\"sass-loader\":Ye}}],[\"@vue/cli-plugin-typescript@<=5.0.0-alpha.0\",{dependencies:{\"babel-loader\":\"^8.1.0\"}}],[\"@vue/cli-plugin-typescript@<=5.0.0-beta.0\",{dependencies:{\"@babel/core\":\"^7.12.16\"},peerDependencies:{\"vue-template-compiler\":\"^2.0.0\"},peerDependenciesMeta:{\"vue-template-compiler\":Ye}}],[\"cordova-ios@<=6.3.0\",{dependencies:{underscore:\"^1.9.2\"}}],[\"cordova-lib@<=10.0.1\",{dependencies:{underscore:\"^1.9.2\"}}],[\"git-node-fs@*\",{peerDependencies:{\"js-git\":\"^0.7.8\"},peerDependenciesMeta:{\"js-git\":Ye}}],[\"consolidate@*\",{peerDependencies:{velocityjs:\"^2.0.1\",tinyliquid:\"^0.2.34\",\"liquid-node\":\"^3.0.1\",jade:\"^1.11.0\",\"then-jade\":\"*\",dust:\"^0.3.0\",\"dustjs-helpers\":\"^1.7.4\",\"dustjs-linkedin\":\"^2.7.5\",swig:\"^1.4.2\",\"swig-templates\":\"^2.0.3\",\"razor-tmpl\":\"^1.3.1\",atpl:\">=0.7.6\",liquor:\"^0.0.5\",twig:\"^1.15.2\",ejs:\"^3.1.5\",eco:\"^1.1.0-rc-3\",jazz:\"^0.0.18\",jqtpl:\"~1.1.0\",hamljs:\"^0.6.2\",hamlet:\"^0.3.3\",whiskers:\"^0.4.0\",\"haml-coffee\":\"^1.14.1\",\"hogan.js\":\"^3.0.2\",templayed:\">=0.2.3\",handlebars:\"^4.7.6\",underscore:\"^1.11.0\",lodash:\"^4.17.20\",pug:\"^3.0.0\",\"then-pug\":\"*\",qejs:\"^3.0.5\",walrus:\"^0.10.1\",mustache:\"^4.0.1\",just:\"^0.1.8\",ect:\"^0.5.9\",mote:\"^0.2.0\",toffee:\"^0.3.6\",dot:\"^1.1.3\",\"bracket-template\":\"^1.1.5\",ractive:\"^1.3.12\",nunjucks:\"^3.2.2\",htmling:\"^0.0.8\",\"babel-core\":\"^6.26.3\",plates:\"~0.4.11\",\"react-dom\":\"^16.13.1\",react:\"^16.13.1\",\"arc-templates\":\"^0.5.3\",vash:\"^0.13.0\",slm:\"^2.0.0\",marko:\"^3.14.4\",teacup:\"^2.0.0\",\"coffee-script\":\"^1.12.7\",squirrelly:\"^5.1.0\",twing:\"^5.0.2\"},peerDependenciesMeta:{velocityjs:Ye,tinyliquid:Ye,\"liquid-node\":Ye,jade:Ye,\"then-jade\":Ye,dust:Ye,\"dustjs-helpers\":Ye,\"dustjs-linkedin\":Ye,swig:Ye,\"swig-templates\":Ye,\"razor-tmpl\":Ye,atpl:Ye,liquor:Ye,twig:Ye,ejs:Ye,eco:Ye,jazz:Ye,jqtpl:Ye,hamljs:Ye,hamlet:Ye,whiskers:Ye,\"haml-coffee\":Ye,\"hogan.js\":Ye,templayed:Ye,handlebars:Ye,underscore:Ye,lodash:Ye,pug:Ye,\"then-pug\":Ye,qejs:Ye,walrus:Ye,mustache:Ye,just:Ye,ect:Ye,mote:Ye,toffee:Ye,dot:Ye,\"bracket-template\":Ye,ractive:Ye,nunjucks:Ye,htmling:Ye,\"babel-core\":Ye,plates:Ye,\"react-dom\":Ye,react:Ye,\"arc-templates\":Ye,vash:Ye,slm:Ye,marko:Ye,teacup:Ye,\"coffee-script\":Ye,squirrelly:Ye,twing:Ye}}],[\"vue-loader@<=16.3.1\",{peerDependencies:{\"@vue/compiler-sfc\":\"^3.0.8\",webpack:\"^4.1.0 || ^5.0.0-0\"}}],[\"scss-parser@*\",{dependencies:{lodash:\"^4.17.21\"}}],[\"query-ast@*\",{dependencies:{lodash:\"^4.17.21\"}}],[\"redux-thunk@<=2.3.0\",{peerDependencies:{redux:\"^4.0.0\"}}],[\"skypack@<=0.3.2\",{dependencies:{tar:\"^6.1.0\"}}],[\"@npmcli/metavuln-calculator@<2.0.0\",{dependencies:{\"json-parse-even-better-errors\":\"^2.3.1\"}}],[\"bin-links@<2.3.0\",{dependencies:{\"mkdirp-infer-owner\":\"^1.0.2\"}}],[\"rollup-plugin-polyfill-node@<=0.8.0\",{peerDependencies:{rollup:\"^1.20.0 || ^2.0.0\"}}],[\"snowpack@<3.8.6\",{dependencies:{\"magic-string\":\"^0.25.7\"}}],[\"elm-webpack-loader@*\",{dependencies:{temp:\"^0.9.4\"}}],[\"winston-transport@<=4.4.0\",{dependencies:{logform:\"^2.2.0\"}}],[\"jest-vue-preprocessor@*\",{dependencies:{\"@babel/core\":\"7.8.7\",\"@babel/template\":\"7.8.6\"},peerDependencies:{pug:\"^2.0.4\"},peerDependenciesMeta:{pug:Ye}}],[\"redux-persist@*\",{peerDependencies:{react:\">=16\"},peerDependenciesMeta:{react:Ye}}],[\"sodium@>=3\",{dependencies:{\"node-gyp\":\"^3.8.0\"}}],[\"babel-plugin-graphql-tag@<=3.1.0\",{peerDependencies:{graphql:\"^14.0.0 || ^15.0.0\"}}],[\"@playwright/test@<=1.14.1\",{dependencies:{\"jest-matcher-utils\":\"^26.4.2\"}}],...[\"babel-plugin-remove-graphql-queries@<3.14.0-next.1\",\"babel-preset-gatsby-package@<1.14.0-next.1\",\"create-gatsby@<1.14.0-next.1\",\"gatsby-admin@<0.24.0-next.1\",\"gatsby-cli@<3.14.0-next.1\",\"gatsby-core-utils@<2.14.0-next.1\",\"gatsby-design-tokens@<3.14.0-next.1\",\"gatsby-legacy-polyfills@<1.14.0-next.1\",\"gatsby-plugin-benchmark-reporting@<1.14.0-next.1\",\"gatsby-plugin-graphql-config@<0.23.0-next.1\",\"gatsby-plugin-image@<1.14.0-next.1\",\"gatsby-plugin-mdx@<2.14.0-next.1\",\"gatsby-plugin-netlify-cms@<5.14.0-next.1\",\"gatsby-plugin-no-sourcemaps@<3.14.0-next.1\",\"gatsby-plugin-page-creator@<3.14.0-next.1\",\"gatsby-plugin-preact@<5.14.0-next.1\",\"gatsby-plugin-preload-fonts@<2.14.0-next.1\",\"gatsby-plugin-schema-snapshot@<2.14.0-next.1\",\"gatsby-plugin-styletron@<6.14.0-next.1\",\"gatsby-plugin-subfont@<3.14.0-next.1\",\"gatsby-plugin-utils@<1.14.0-next.1\",\"gatsby-recipes@<0.25.0-next.1\",\"gatsby-source-shopify@<5.6.0-next.1\",\"gatsby-source-wikipedia@<3.14.0-next.1\",\"gatsby-transformer-screenshot@<3.14.0-next.1\",\"gatsby-worker@<0.5.0-next.1\"].map(t=>[t,{dependencies:{\"@babel/runtime\":\"^7.14.8\"}}]),[\"gatsby-core-utils@<2.14.0-next.1\",{dependencies:{got:\"8.3.2\"}}],[\"gatsby-plugin-gatsby-cloud@<=3.1.0-next.0\",{dependencies:{\"gatsby-core-utils\":\"^2.13.0-next.0\"}}],[\"gatsby-plugin-gatsby-cloud@<=3.2.0-next.1\",{peerDependencies:{webpack:\"*\"}}],[\"babel-plugin-remove-graphql-queries@<=3.14.0-next.1\",{dependencies:{\"gatsby-core-utils\":\"^2.8.0-next.1\"}}],[\"gatsby-plugin-netlify@3.13.0-next.1\",{dependencies:{\"gatsby-core-utils\":\"^2.13.0-next.0\"}}],[\"clipanion-v3-codemod@<=0.2.0\",{peerDependencies:{jscodeshift:\"^0.11.0\"}}],[\"react-live@*\",{peerDependencies:{\"react-dom\":\"*\",react:\"*\"}}],[\"webpack@<4.44.1\",{peerDependenciesMeta:{\"webpack-cli\":Ye,\"webpack-command\":Ye}}],[\"webpack@<5.0.0-beta.23\",{peerDependenciesMeta:{\"webpack-cli\":Ye}}],[\"webpack-dev-server@<3.10.2\",{peerDependenciesMeta:{\"webpack-cli\":Ye}}],[\"@docusaurus/responsive-loader@<1.5.0\",{peerDependenciesMeta:{sharp:Ye,jimp:Ye}}],[\"eslint-module-utils@*\",{peerDependenciesMeta:{\"eslint-import-resolver-node\":Ye,\"eslint-import-resolver-typescript\":Ye,\"eslint-import-resolver-webpack\":Ye,\"@typescript-eslint/parser\":Ye}}],[\"eslint-plugin-import@*\",{peerDependenciesMeta:{\"@typescript-eslint/parser\":Ye}}],[\"critters-webpack-plugin@<3.0.2\",{peerDependenciesMeta:{\"html-webpack-plugin\":Ye}}],[\"terser@<=5.10.0\",{dependencies:{acorn:\"^8.5.0\"}}],[\"babel-preset-react-app@10.0.x\",{dependencies:{\"@babel/plugin-proposal-private-property-in-object\":\"^7.16.0\"}}],[\"eslint-config-react-app@*\",{peerDependenciesMeta:{typescript:Ye}}],[\"@vue/eslint-config-typescript@*\",{peerDependenciesMeta:{typescript:Ye}}],[\"unplugin-vue2-script-setup@<0.9.1\",{peerDependencies:{\"@vue/composition-api\":\"^1.4.3\",\"@vue/runtime-dom\":\"^3.2.26\"}}]];var lL;function kAe(){return typeof lL==\"undefined\"&&(lL=require(\"zlib\").brotliDecompressSync(Buffer.from(\"G7weAByFTVk3Vs7UfHhq4yykgEM7pbW7TI43SG2S5tvGrwHBAzdz+s/npQ6tgEvobvxisrPIadkXeUAJotBn5bDZ5kAhcRqsIHe3F75Walet5hNalwgFDtxb0BiDUjiUQkjG0yW2hto9HPgiCkm316d6bC0kST72YN7D7rfkhCE9x4J0XwB0yavalxpUu2t9xszHrmtwalOxT7VslsxWcB1qpqZwERUra4psWhTV8BgwWeizurec82Caf1ABL11YMfbf8FJ9JBceZOkgmvrQPbC9DUldX/yMbmX06UQluCEjSwUoyO+EZPIjofr+/oAZUck2enraRD+oWLlnlYnj8xB+gwSo9lmmks4fXv574qSqcWA6z21uYkzMu3EWj+K23RxeQlLqiE35/rC8GcS4CGkKHKKq+zAIQwD9iRDNfiAqueLLpicFFrNsAI4zeTD/eO9MHcnRa5m8UT+M2+V+AkFST4BlKneiAQRSdST8KEAIyFlULt6wa9EBd0Ds28VmpaxquJdVt+nwdEs5xUskI13OVtFyY0UrQIRAlCuvvWivvlSKQfTO+2Q8OyUR1W5RvetaPz4jD27hdtwHFFA1Ptx6Ee/t2cY2rg2G46M1pNDRf2pWhvpy8pqMnuI3++4OF3+7OFIWXGjh+o7Nr2jNvbiYcQdQS1h903/jVFgOpA0yJ78z+x759bFA0rq+6aY5qPB4FzS3oYoLupDUhD9nDz6F6H7hpnlMf18KNKDu4IKjTWwrAnY6MFQw1W6ymOALHlFyCZmQhldg1MQHaMVVQTVgDC60TfaBqG++Y8PEoFhN/PBTZT175KNP/BlHDYGOOBmnBdzqJKplZ/ljiVG0ZBzfqeBRrrUkn6rA54462SgiliKoYVnbeptMdXNfAuaupIEi0bApF10TlgHfmEJAPUVidRVFyDupSem5po5vErPqWKhKbUIp0LozpYsIKK57dM/HKr+nguF+7924IIWMICkQ8JUigs9D+W+c4LnNoRtPPKNRUiCYmP+Jfo2lfKCKw8qpraEeWU3uiNRO6zcyKQoXPR5htmzzLznke7b4YbXW3I1lIRzmgG02Udb58U+7TpwyN7XymCgH+wuPDthZVQvRZuEP+SnLtMicz9m5zASWOBiAcLmkuFlTKuHspSIhCBD0yUPKcxu81A+4YD78rA2vtwsUEday9WNyrShyrl60rWmA+SmbYZkQOwFJWArxRYYc5jGhA5ikxYw1rx3ei4NmeX/lKiwpZ9Ln1tV2Ae7sArvxuVLbJjqJRjW1vFXAyHpvLG+8MJ6T2Ubx5M2KDa2SN6vuIGxJ9WQM9Mk3Q7aCNiZONXllhqq24DmoLbQfW2rYWsOgHWjtOmIQMyMKdiHZDjoyIq5+U700nZ6odJAoYXPQBvFNiQ78d5jaXliBqLTJEqUCwi+LiH2mx92EmNKDsJL74Z613+3lf20pxkV1+erOrjj8pW00vsPaahKUM+05ssd5uwM7K482KWEf3TCwlg/o3e5ngto7qSMz7YteIgCsF1UOcsLk7F7MxWbvrPMY473ew0G+noVL8EPbkmEMftMSeL6HFub/zy+2JQ==\",\"base64\")).toString()),lL}var cL;function xAe(){return typeof cL==\"undefined\"&&(cL=require(\"zlib\").brotliDecompressSync(Buffer.from(\"G8MSIIzURnVBnObTcvb3XE6v2S9Qgc2K801Oa5otNKEtK8BINZNcaQHy+9/vf/WXBimwutXC33P2DPc64pps5rz7NGGWaOKNSPL4Y2KRE8twut2lFOIN+OXPtRmPMRhMTILib2bEQx43az2I5d3YS8Roa5UZpF/ujHb3Djd3GDvYUfvFYSUQ39vb2cmifp/rgB4J/65JK3wRBTvMBoNBmn3mbXC63/gbBkW/2IRPri0O8bcsRBsmarF328pAln04nyJFkwUAvNu934supAqLtyerZZpJ8I8suJHhf/ocMV+scKwa8NOiDKIPXw6Ex/EEZD6TEGaW8N5zvNHYF10l6Lfooj7D5W2k3dgvQSbp2Wv8TGOayS978gxlOLVjTGXs66ozewbrjwElLtyrYNnWTfzzdEutgROUFPVMhnMoy8EjJLLlWwIEoySxliim9kYW30JUHiPVyjt0iAw/ZpPmCbUCltYPnq6ZNblIKhTNhqS/oqC9iya5sGKZTOVsTEg34n92uZTf2iPpcZih8rPW8CzA+adIGmyCPcKdLMsBLShd+zuEbTrqpwuh+DLmracZcjPC5Sdf5odDAhKpFuOsQS67RT+1VgWWygSv3YwxDnylc04/PYuaMeIzhBkLrvs7e/OUzRTF56MmfY6rI63QtEjEQzq637zQqJ39nNhu3NmoRRhW/086bHGBUtx0PE0j3aEGvkdh9WJC8y8j8mqqke9/dQ5la+Q3ba4RlhvTbnfQhPDDab3tUifkjKuOsp13mXEmO00Mu88F/M67R7LXfoFDFLNtgCSWjWX+3Jn1371pJTK9xPBiMJafvDjtFyAzu8rxeQ0TKMQXNPs5xxiBOd+BRJP8KP88XPtJIbZKh/cdW8KvBUkpqKpGoiIaA32c3/JnQr4efXt85mXvidOvn/eU3Pase1typLYBalJ14mCso9h79nuMOuCa/kZAOkJHmTjP5RM2WNoPasZUAnT1TAE/NH25hUxcQv6hQWR/m1PKk4ooXMcM4SR1iYU3fUohvqk4RY2hbmTVVIXv6TvqO+0doOjgeVFAcom+RlwJQmOVH7pr1Q9LoJT6n1DeQEB+NHygsATbIwTcOKZlJsY8G4+suX1uQLjUWwLjjs0mvSvZcLTpIGAekeR7GCgl8eo3ndAqEe2XCav4huliHjdbIPBsGJuPX7lrO9HX1UbXRH5opOe1x6JsOSgHZR+EaxuXVhpLLxm6jk1LJtZfHSc6BKPun3CpYYVMJGwEUyk8MTGG0XL5MfEwaXpnc9TKnBmlGn6nHiGREc3ysn47XIBDzA+YvFdjZzVIEDcKGpS6PbUJehFRjEne8D0lVU1XuRtlgszq6pTNlQ/3MzNOEgCWPyTct22V2mEi2krizn5VDo9B19/X2DB3hCGRMM7ONbtnAcIx/OWB1u5uPbW1gsH8irXxT/IzG0PoXWYjhbMsH3KTuoOl5o17PulcgvsfTSnKFM354GWI8luqZnrswWjiXy3G+Vbyo1KMopFmmvBwNELgaS8z8dNZchx/Cl/xjddxhMcyqtzFyONb2Zdu90NkI8pAeufe7YlXrp53v8Dj/l8vWeVspRKBGXScBBPI/HinSTGmLDOGGOCIyH0JFdOZx0gWsacNlQLJMIrBhqRxXxHF/5pseWwejlAAvZ3klZSDSYY8mkToaWejXhgNomeGtx1DTLEUFMRkgF5yFB22WYdJnaWN14r1YJj81hGi45+jrADS5nYRhCiSlCJJ1nL8pYX+HDSMhdTEWyRcgHVp/IsUIZYMfT+YYncUQPgcxNGCHfZ88vDdrcUuaGIl6zhAsiaq7R5dfqrqXH/JcBhfjT8D0azayIyEz75Nxp6YkcyDxlJq3EXnJUpqDohJJOysL1t1uNiHESlvsxPb5cpbW0+ICZqJmUZus1BMW0F5IVBODLIo2zHHjA0=\",\"base64\")).toString()),cL}var uL;function PAe(){return typeof uL==\"undefined\"&&(uL=require(\"zlib\").brotliDecompressSync(Buffer.from(\"mwO6FaORsdsGcONiBdm+GYlI5y201PzTeQMV083BKbeT8BMrpQ2odQF3mN44LvRLbJSkDh9Bd6X8rqsk+kHglydMzGrLobOUPr9if9TuCkpvD49xOQ2jFKLUMYtdBtgubYZIkGwockj/2RiwVsFP7EeybqGwU0xzBaTFL19N//Xz9dx3z0vIJZVM47XKseSURhWAFBwK4uxCl5En+fOn2X9+voTShFj2IeNtTOSS1q4V21GeEHAa6oB83BxWg/TMKLT89c7XqJcqxOSyO7PfoCwNoeQwDvh+TlS2rsxUDaYndGWZuMLBDrI8yF9EbGCtChNvD6gPslDb0ep9X3tTEN8Yg/ctKBttEKK4NDLOZMrk8fqcOYN9DivgAagFCK7hcr+hkXVnzplz73MAvUTwG2MiGWOTSEnqc/ksvs9xHSnLlSVlwZXzkfSDIBGEeQol0hLEZe11gycawZoK+2hXyoX2v6qV7ZgXkogNwtcGQPds8lKJaRqp6BxyUxxtAKPZdS5jmHOcdShdTSLoXKXYu3cnuWg6d7U0IkUHceUAkA4A6SBynEJr4tl+yKlCs9rCl/W3ZSyretvOPn+tCCGEEDFQ5dL9dJia6N12XttOtxMBpgNipuhdvKXysHTwUFV63gZI/GdMTS9NeyvdKiBLQPgiFHwmQ5f6c7XspBDKqKJ/hXpE1ztLSYWkQxf8O8qzZNay7F/9Sdchjqtk4LTJaupq7T4V9NfqJNSfE5jX3cn+3fJ15c9Mwt6rJsKsIh7m7WNQND34NYz5gM33v3hepFc+ijgO4ZVMoigK5SfY7Ui8YnaBfqK5m2LOEtLjmKtq/XCSn9z9Sq5c76wFBbxDzetVGaRl8Gxyh044pMzLb3XCKDc/RXq9UcbyyPrQlbNdLG3Y/OCIgH4rwRI++AXbb5R0uURINen6wdWIIUAVLBpQSwdPtY7RVuO/cFd/zuLVw0p3yw5BOvjhsL3aocplq3XKufm8hn8DrVsSW4yfQQxfG9x+bZ5FmyMsgKgh/OeF9K53E1++eliOXpcI1wEAD9790MJQ8EouF8IQ0mTmGc4Xxp438Yega3nt7pOZWY9IOF6d3YrUYx/t9zBq4qisUJfNtr0dfiqE7joopDhJGn8xmz7QNucKpuovsPYSDy5ePXQVji6I68Fa/GwwFBuDdVs2bA4KAguR8dcb+rgcI2QkXmk9WnJm2kiVz/JuV8DRUlB3cbXb5eGg3mlEhAIN0K748zr1EJIPaR2D+Qwg7Cf/NFEJgNOxW1+fI9mBGSFsuKqoyH7pBEch5JPXYwRIYv6TaYKshlyte8OLjQWSV67s6yjJTCATurzd9oeyfzU3Ld+6SV9J89pdNm2zoFWAyZSB9ezAPGDs+gzFEWpsD3xGfBIergYyTQQGKghYw0StfkgxiXd63IQuIRUJGEtIV8wnJWIQAk5O9iWRPdjCiwnMEHfIowZPMrfwrJI1PNCDRbBeRln/UVOaW7HIPKj+YzxZlo+xXMyeJTGiDtWVwHBi6+oGCTe3nspFKv7UVEr2M/mpYX7OizddmH0RF2YsEsPCN95asJ/fdHwM5Bah1wtPxMUJJZMoZzEFcnYyNnvC5SgnOERN0hF9fcna/ME6ecqol7lVEQQJaC+sNvT1Tdc29hov4xg6uwOqImvzuiQr24LkmCsXTUvdEhEQ5DT2tTiMl+sMgpWZGx8AgdBtQ4DKPNobnBSMbj0Jxxt5yNXT/3T6zOqEJFneZeWbzxn18aHzv38/SqtcT0ffnKMziwH+LNNcYrSCRTB/CP+wuiUOIIcF03ah/L4HVH9EXAvoPWaWTtErU71MLBR44jFzXDbpWQm10ZQr35MshC2HN78J52fkLsEdkFuuN+OwRiznJ3PlToN92lmOa3N9z5L4uNom38JGWytRLrblRnKt3ea8qS3zKFJiqRUopRTwUqzlj6f168546DrxE5d96flW5OaHFAxTiPX6hZPpN8ds+SvYLzYoo1zMgxAUro9Ces5Nz7VZR1AT3N02w+oekcSr35LqgqB2O2pB4wXI2TMOmD/GHEtBjzW6Pla6rM1WW12DzC79x4Ptwc0dfhJkQI5eXEwtP2Tgs+47kFdw4QQFmdXrFb94bkyjPsPpi0iWX851zPjTA8gHjnFtFkJY5z1m+7dk+VQvnTzLI8ObrFLkDGYBhLcrdOzK7mlE1sc+JnHQR97si3pG9rpdo9/P5Cl/Qx4o7yQ2+/mMpHQdnYuAkWnjcqaEtOEExnV+S89r+xLE4MPi7dEDQhYlbtM+cD0m21t6es+b2WYf7lamENzePS4gzOaFD468FrnNynUWLc1/IpDufNMkYeG4ZWhrx25ym0p0ATV7iE9+FMxWdLtDGriqXYQ5RcWCB3b5yIkuFQGgjagMJBxwgHyr2xomD7dLx0sqN6ELvIZrncK+WxQ0WeiVCwdUXofKJclDuuLcoW3AeAyD23vsAUc9tJIi/9QKFQg/wq8PWQRQELLdg4BNT5VbFlTjtS5dXZb75oTMkb1XgYUpIg6I8CadMiKxjlAIf8uWSvSUwmTBgahJI04eupXT62YQvPNQUeEQLnF5lXg9wwHPVDe9WOjtC5VEXlEeyInvIMslxPgdSxJ5JE8WY2F67PrGr/zO/TctJ9k3vvH9IWfoHqByiq2YfN3uNTBONHRSMuXB8Ey2HzgfOEw4+dn2EnCib2h5sfFSm+4Qm0cswqDr60h5eDOcz00ugS+COOcF5XogBbc698qAfrLny0Eg+3QSLdmsUW8cnv4PNI59v3F01QKAL+/lX0+zQdj7OtUi3OTANngO4TLPl76LlZ73+iEASGFDTLlwxFgptDMqhR7WakfBbdWn/eNahJmop1V3yrC1RU275VKl6vQ/K7aKmERaRh9w3sCqlw2zu/9Uwq2qX0Xzc7X5LVl7h3S5tWJ1ONeIyiy7Beg9skKO54SLZ9cWh//NEs+DwG6B+9iTHczdyfwLcZ6wCme14SRcvXmt6B6puG2J3AwWge+mPoHDPybEdlpfpuJA1mV2TvYznTe1yF7LvqzF1G782L36fa4A1bkH4aj80bqssbn58j/2HzcHXo+iVqxCOVO7c2yHneecn30Fm8i4YtEjGD5kvjrOZMLSxh6ssCx7weZPq3jPWfo6/iKP71N0ZGA6oE+PrarJrhpo7RpoQ35ldn7HDOlVXjXfWBLb+F9iHXd7Q5CNqyZ+ETIDDMSLyR/WSoBOKz62X4/ZDrNfadj+voq/W338GysDzhBlitnK34YVDhNPV8oKuwUbO0MGN3azgKf98gBk56qIKmnUQt/S+UC+J/8p4YCB3G2Fi2n5DzqaY3wXgN9O28bJqX30rXYENB9mdGoNdLGB8+vLz0dAEOd2oRo2+eTwjS1XLnV0MtkflgWrPJ8IuWzvDIrLm8jTQx8vKDEUAMfszwDVrMcMFwA4omxxhK9wRDShXUK5mmHXCABIzl1al2YgLPNjPASzVdlnN5+1++bBp7checMtz6dBK+tcigsAzEGAKOyGdGI7A2lljwAAF0Ro5tHLTIdmtg/ox+5BP/CDqRdPUcYK3cw8r2AFAO18//LnT/89GD3aYHanQjlbd79pIV+rfYkbwpuXQOfPQadJhgsAJKjZ/QvnGgKHdhzKrkfYNQIAJ2shHNurIt01j4nfyQb8vWB2JGZfFCfSEvaGchkAALkVBPL/5vK//uz8/4PQjUmbaX+2Q5Cpdpt9igIZurrrhzdg49l3f5qzHTB0HwiGJ34iVmlvKH8LAJCTiLJ0ImRjYtfZCRuPnCZbISS112ZP94FEm4syAYB4CiwvPOTNNQtC4GctRyonmCTFjIqVtgBAEfdvYdi4fzPtoRoL93nTJQDQwpTmyLv3TBA639r2Q5tAIuZhbyjHFgCgk0EXU3gEPym0VyOmnB9tagQASpII0ZgF/AFFP5oFF+6MBr2cXMrzKgDoZrWhGe/wh9FM2lp+BejvPHp0E0pHaeYHmGcLADjHRrrC8OD+ZNufKivu/uQ3AgByppCMWTo6ZuL7dHfC6PglbzvmbSQhU7i9oVwGAECW0ifUGDfgfrYL7xhVZMZmuwD/VGJ/C3ad3kz10dmde0u6XyB64nckld4byt8CACRleT6NcUdu8BTwjlgBGhvtXvS3JcLGTADghF3RnGe+0/Y/nceN45aTfTqNUaUKFwCoAKfjCGcK/ViiFtpX+tsSNlwCACu70d9Sck1T/nBh/LD55ST+UakKFwAooZ+2qAXhk35ModAe6W9T2LAJAERYT9gMm9/Uq7UmtZs/Te4/F6wAYJ3Vyi7TT0LTfXyu0+z7iP2d9HNp3C+XHMrHBQA8Zt9nY7gP97PgHtzPTY0AQLBkyQIyhYrPnUOrK/sms1du8XJcErivqgSTSXpIrs7/wqKp14KS0Tc5+71IsD0JoWyHgyS1poEBU9LhF8KTrNgVCRYdLLToCjIRX7KxD6dCx6E4w9XAkV9/STonvadnooaXvURfyVOvF/o+8aqLmPFZ3E/BIv1CgZHsm9v+ahf9gK/ZfMlxl3cg4AtpHIDzE6vtIkUf+tfofPKt2WK8fRexPz2UeVGmLqSRwJX0fTLajpHcImqCvoVZQ9sEI/gu8GYMFxAennjv8xrd+9Yg59S2RhLG7YXX6mRMr+rrN0z5PP/RmL74XpPXaU7IKEC6sOeVrLyy37HbjcOQniJCkera9L4x0jvCCIqRssw458LO7k5WamuJ6Y7JHKBb5r2sUFbj6hifC7uMvcvWK2f7vVlcDh5y0RBdWhPrFWNJwToPLbBIi+3oqnaIblojGfs49MUQhEcdD11/QH/CdtBYYBFRn5244nkVMt7nwZYgawsyVaQrD5rwh4Zg5jKI4S4cvDXdsdq8xbGxRyy/mRJihy89Xjg822fPbl0pdMjlh+f8ymBDWH7j3NNDJmknwpRv3TSLCV4kUV2yjyaXITt3PkHLh+ijNEQCPQqUFU773ivEBLyPx0npSc8cfsiwLFQ7rADIwI5lWm9ayj2RsX2IHG9iDkp6Cnzwh3vUTLcgIqLmawgd34X1vf5izXvR80if1m6szyfB3P8Dx13l4x0CkF+/xfnk1azRxKGB13IxeRLyIKhB22/qJADC6I9Uv7RF7jiooVEaQ85qJXcL00aJ2cDHREH9Dka6jObUby5gcCDwnlF9XJzA6XiNbUz3kW9fHtq5JHIvA1KL7AKT41kC4fW5Dyfu/4o8nNmvLrW2sFw0tGu8gXKmdl555bnXTT7aj9B/ZwesRYWNq5/KNjMmlM8xsR+9vhxHXffD5ZfgQdjvU9huqpYvrYu6rcalR5u9dl53ewebrWjSLn+hiQ77GPNtH/eKhc1Of+0wAPnQbeytW4MREvrpSdZv+cLZr4aWBWDRZL6lKjszqSNrdoQ3nQ+h+cbk+aURs6N6mD2zpgsYbxrlIE+TZYhVwOOEtS8S03nYOqg9s2UyNmQypTAA4jig2aNruMtvywS1HXA/kWBYCxxToeruiwl+qsdiGRlMwepFCvW0j/hYmFuxnv4RH7O/xepKeMndD2QEO5tYh5IeprKKxad2JvGZVV6nN9mhHBtr/RbEd35bXxi0JFZzBiZmTFwPhqBfRwZFhXO7C29+X6STk3DJ7hnv9KqWt+A96xEn2PVdvfISX0ydLCJgRcum435cACJ4DROEU8a8PgQwpLxCibLENZtfiSNhnC/oeUVe9lGN8GGGa9g3HcJ6XXJwVZv0WExGOdmhU68/khAXP6+NSx3dHWAsxGf1Rm8+1iTA+i6xcRtQLDuALXEin90Q1WUpEFu7Juwgl1C5F+4WJl76ybWx2eMKk4NTqOtj3bjbhWI4saBlMjktS0y6s9fnY0FANRmV6rymMly5ZZRK7udv+nDljBLSiNvNmp3CP2lDX93r3qg43Kg/Z+14I3gLo9zlBy50tvS611GlpjBW0NeV902hh+VjT4EYthK3eelxtKcshOFFB/Z8rqqm9v1FrL0VG7osUKYVnzdzkgRLafWXdNrWJhjv8NeHkn9BoYcmXrNgmau1/tjFOVblePjKFym3YGkjF3Wx3zbNCaMGW79EPNRUvhgetV/jaGqzMlhLZA0OXTKhIhl6jMJoXgOex/suHS9jO9mUpjejWWe2w7Een8llBgA82jQ0ReH1iIV2e9QNmgCA9X26PIcflAns8Ci5qenTmiF5V9PkFoob2Sgaaj+ueNLLqE82m+nVadY60Fw1MwAAcqQpkgrHR6FdyeQ7aoUmACAf/kaXb+FPlwr0ciku7YtAu2VPRZJp1K1oJmeYHzov47tcTAVgWU1ijQgXTZ7hb/3My3ju9nKaXkmzlkBJJTMAkOVYU8tlhb2t0c7yiBWaAMCVvqDLDXylHMBGVW5ZdrP1n0h+EQn4F+8hdu65wX91E6yR99MQCS4nUqUZALAjYQV2gl11Z07P9UmznYRw1535vCFvM+vxWP4f2JpLICjOpBNjAgTUFSvCLTdNNztYGBk1ll0R+p7swo+LCQPdZiZ3+SO7o4qKivGVNtqcH6Q+Lgwx+aY+TEYOms4FN7lHLOtaM85ealgmZCAoD6iMN7+YEHTR8l+xU/yWMB6dioBCWsCtzdlHKlZiWtTGUTWiL37ZsUHxwVdcTp9knFCDTmv6+AoOjwUNw4TWS2RuH4oAhfq/7rmAWnZDuvsBSukq04dzZQ4chMqjhyom46GpDzApIVH35nr3fDRqLh582cw5E8jSYxih6owoy+Odsm8wBLOG1VBnlt7jpupMP9hJDFAQ5ngoG4vGiibIKhL6pQPQ5hfYI3pp1i5d2S8P4SnqjptfeeVHJzQlKyT0Zx6d1Nf59CGGAw3TPCpw+3KodeLcOmsWoAIxGIY82+E3dra1+W6H4c036XbmVph3wX3UYcUlQPVaEJZAl9/S4atlmxAnIVs1QQqvnx8HRu4Yubu4MPA5YIa9kEJIoGkc+4Y85Y/BcWfi2ywehXHNji24QdlN8pVIcaD3GeDYKazFFwp7XdBHK+4lW2qayBgEscBUdsB6epapfy9HcPSB48P2rRHzVFVyrJFzm5gBgEhOZUf4xKQX37zbtNlRNbE2AxSXcv6tTAAApRGHurgdJn/4P9xd3Ds4xNsASxqxBNDG6v02zRcgzMkthtdlxnNNzI11+PytDgzXdIdg0EARfCmwvzSmljlZvn0wGLsWi1t3BW54YN9cdfVLgzYJR+b70AZU+o8mQPncULt/lbQeuelV2k5pkpoXFqm9it14ZjfWwIbW9y73Z1Ore70Eh1OowKHw/+5q+JxIWGwOuI8CEoe+e8TDKCeZjc0If33uIBSQ28rwJkaq3lJFs58dTaXYGbSlJTUgkxwV0dqdjPrD6h2dpZ4qwNlKDAkpsvojrVNYKdbfMra3Tq+BC4uSuWITbjK+HUtoYMviymTjRs4y+NKZjc7Sfxmfj8IbGqKmn9WRocB7zZ5eWktv+Iw6QfyM8x/fkJ5cOqzlcFwVuFeAmjm5iSrLyprLGvyFMG+cPr+9+hIgUkSi9rdGs27FjUBSNTMAIOFbBWEXSukA1dBs7+AbVZoAgAM61d2rCLptRgkA1MUgrJrJzQDR1DRdNSqyy8c+mJoHW2cPtj9e2h0tIr65VQ5AjAiFqdRkx+NXbyenj0NevhMxI5Izf8oBiCGhNm+47LO8c2Rv+u+R7SRS/IiEvWq2vJ+u5jMCcN3IDADgDPBUEEarKHJ+bLY38YMKTQCgybWcyu53uTSuNgkAGFRls5rdofscu59+bevmF3OV/2okfdVseT8FMUTJlF83mQEAMUBZK0ZK0WTcqfkrQNesKAa7YYBmmqvNV51vw1TRX0T5Eha98Dc5dygZLStu6JYQUstia5+pwda7hn8oqFDY7n7hX5CSX+ykAphOoABoXh4gaGoRvgCn3ThNrjAJn/uax8lVs2QyFXPIFyCq+tUW66pVEUPoPzxuEZKbhQw5AmU/oxsLpAEgewIiyZZWCe3Ai7bkI2imueF/tKMOrelgWvpEhaug3XWSdZ97P1x76v2o3m/mQGqeEWj2QGBcFxV+TUWHFoq+W+EEarxocK7/FoKYiGYDR+5HzgG6vnHivLIPn6M69dhDOToeL+qLOR/0J1rYfTOC0mE0y30chzHyjg1tQ47UHz2PkbUVj/bPeHbuxdrRFSCKVM+idJgT+sICGSnhfOiaQuJkGyYTsGkFjGkIMLjJLLHp/eWpDc3Q6HdTAMDXamMmjcW9UWqsQxzl56/DFx5WZtd8R4ZqteSVHyTDtb6n/KHrD9wrHaGAkLFeMb5mkJ9uWPMJDyxhiF1l0HoyD7E/0QaTyC8StsUokg+Okv7LVbxsaIGRkA4mCfqgheYR5ws7mBjD50JtuFZHOC22JXhya7A6DXdMcc69Z1NOhvTgSa63wnH9Ezbm413L0LO2AXB0ckzSYqDNnITFygAjxSVHUhZ0TpJi+ZmW4zIcNluBEHZ3rJfqE8UziCEXBg6aaK+u3mnQDla1bvbddtqHLiiVBgX8QgSdKqHnOnSfV1HtjtCFjLi7F9pghoEIuKWgSiF7BKaCuejPlQfdcQHI+Cd2rV6uTyuhNeVJn4MrNkyxgR2yxXrYqN4xq+1Dgk2yrn67UFotEJ0IC76wIKsVU/uEEKcYIIb7SrdURGduRdfZBI2wQcoaHvEM99ezNh86QEq0lbYYvO0UN4dKIkirpGX7MWWglTjjwf3xT6Al5dlVpn2+ZWq8Z70i4KR/FI6i/C5oFcW8vVxqOkpnCwoJiv4sWuT7x16sxdbyYGxYQg2agBFpbvs4ut0eMiC2DFYEGeScWNDZzuLLiojGCb87vcEO7U18b+YSzicy+OEhlZQDVbRsguJmHt6YxW7ebeQvz1R/bf6CxtXkFqYVVRVnglFKU6KEC4lQnUT6U6aAIWOgIuQMb4iB4j6jpeO9rceiXmyW1WjbtLGUo+LKZgKGBAU8u/fzTLlo1zqJiOLKeERUPcF9ZI/f6VYDn7yYHyc+9yMzMislAMJiWbTEh/Sh5bRJKy1eLIaTK2DI3H6dYz69KqUOi2l/xlSPXTrReceDZxfbnNEPtsa5onKjhdG8RovdlW9/G1JeDi3Kn3n89aiKsxoXrkh8+Gqjtbul6q0N9LZJT1Wp2M9Swga/TGEi9POI1gspNsg/FzriiI0uZEb3yTJ7n4nEU9C81w9MTN+i4sGr5cY+aCEP7zLaCdG4uZaT94d0+ChKBE3SPFvGW89fCfD0K39jab57mati+4fSqofuNqNpPHIujxkAkKTJiJFSzGtcpuIVpDslUQrtXFRNNjVcVSYAQByfJKpTdaMY5jqdXGqnFr/xPQmsFoBrPWgtXh83Rq8+hxuKFon7gKjDQDxHUz3Jr+cJXxe6jb/0hAeqyxVzxFDTFYBYdWzr3zQmTGmmJvxENNp5JxDqAxJcsErFBsMQocRRdBYBlCSxcCk6/2cxvn6hm/mH5PSC9gCgt2xsc3OJKwD4oMu4axx6XoM1nIFGFGmGhLE+I1mxgW3Pps12Drn9LSDRWJreRONTE/YlYDoM6oGJl9JOsTrEGJsbzfh7BR9GGD1xjP2M4PyR2tGsh/qvr0Xu1d0/iZS/RnndlqwDEPRhF32uOL5/EkZX8qj0UAKSllivXBaOP8Abj1afevcjy8zF1n1risc8mOQ2PWMgiUS/HksYHoTi0X8HJl8K+Nj3rkeV4KA9k6BPs4ratcntolYP5XQVgddv4LBHmVe8U/kwvHMU4GA8Ge9fEkodSiB0v0jQ3cHEv9M8Hl8jnrMaUK6B5jifzrQgWHeLsvf0ZUi9SF27HtHn41rvbADtxpWNdWu3VDuvgrkmGda1SFlrVKOeafwsfyx+Y/0fLnCzU42htaaoayN0o1knvJMkGA1nnM21Vw/IiZAPXcuiv3BZL+lmHTVxWLCXK5o1G8PpjUs4p9N97U2nq5N45K/NqB5WzXmNmESGTenNVw+1tyths6JcTQEdpggp646XNdoabjKYwoZ4fWP6AqvYHB444HnnjPfYKmiCmHrRPEVmnm8ZUT/uJJzgGgrsnbRGf0BXYQasDX2XwsgmUYgWRXmcrDlN+PG9ca6YGg46Dyqmu1jFwDARXBh23BBbmLUsIArdKVkKnmsqVqPAQXMjTbcbLM5Gy5LI7/jX8yf6xiYoODUmqpUReyl9eZfTh7uOPuyRVt2neg52NBT+K8/THy/p4kX7+VkIU6ncQ0yYTciYyzKd6IyMgloXWfgCZ+1zozNnYgOoalwvnrw5rifCJobrXZhWWHNMMCWhPnX4YvQLqV8tA2+ZUliYgTdCA8vWX+bE7MouekLMb9P1JfR3hE3Zq5REu+MRWxHnNY3dgVxb4W12EugrVzRy5Vr8y0d7INrT0yr6mqZjncVdMk2fLXTBb0yrNqjibUosn+0fN2SL2m2U0EknHGTfTMAl0iV6+ktEKWdLUgJdBviUVHwF+No0aoKYlsBLH8qEnVZ4JSbqGY9jxmYHDyktRsLVlaguqG7zRuprEpcpTwKnnQCDPS5vE7s2JGGHI458lpyIJi2kl+IcnRHKYWgg3QH556yNLj2EXqxyXW9uoNLjnBAsEe8Wt6pSbghJWGE62BGU0JM4bi0w7bpS1zA943xYlkmeonBvXnWa6DOul0ojlLQ0QIhtwY2XzF49Z8TQVHiyu7mou6+j9+LdMqfE2W5Ni2KG3bSX8kWRcAwBAmYh9l9fFMevqMXhAftuszsNrz0xBYStmBOufOMlpZDRbeTF8kd+wmz6uO59BN8PfK+/u9wO452/JfbhL62YLfQQV2I+1h4y893md7wTzokiz24Gbwy+tiacjKUIwYB4aQcTXQgYQ90VyUXwq+yw4Phby3pAiTowBmFHJ3Ss7tLwMtudyNW5UuuDRqlB4kzC8t65Hz+7Rqm0UeHuGbxMV3dKO1aAEF/GZJdQbrlZkkHmOGBezKGQ2LDcgHfyR0GS636vwN7o1oPvnvlIf1MGfJLu0XvNPdcI7us7TX2UpRDZpvy6gRkAOAPkesVYT+hzUyhjsesDxp3JTszYHof3XJkJABz6GVmAPTo2twFyVaO3KwbbauoHAA4CSLtQCgD1WT6rAYG22ClAlPPnHSCESbB7O5sCRKu/r8594a66JdkIlFXTpcS2Ae9as80AWFoL+QBWu9naAJzYbzcAUHVFAlBR+Da2ltcG02ew6rCPYJ+H+OYaSACR++djPCY+Jdv+7g2OzGbk5KcjfpecuQsSQIhEtWVLZdH72R3uGWgpV0kZPU7Y/vVp1etRh6GIS6RNzABAS5C0GMVgGmD7yJnsg69HjkOsrzN8XKEJAGAYeM+02QGtyvS2uNRV2Q8AoEBsI8oLauo+GjSnjS0Qy/xLQFjDIMIXrLfPqKrKMdsejL+I0Vwi/2AnvB7F3CR3xDwNmS8nih6Kc6MZACAJGhSj+BkDdNqCwa42wLROKM2/+HY6ZQ5uuTDcqhyTi/mHdUsgJfHloxbHMnIZiNF4UrhuXNLmOVYLoo/u5bruxi/ydu+EPaFe/8P/4SVPkC+CixOWkiTwy7x+V+pX6XjvT6zFvXWYD1/BdeXBeaKnNsfBFSei6TzA4IoczgpL4zNX1JVhjBkGV57lwSEIzsMrE9eXmeCS9fPuTuZng5+OL/Kz2DW+N1O6NWuGFDbEskfO5TEDAI1qSbBqMQrCklxVcUV2jO1igOMt15wJAAh3vBFI7m+7BlTv3k9vEnrILNZi/+Vxz36JlmzZ+HXNYx2Ol9eoC3e6DjW0G9ePk59UnbnFAECtjhc8RslB192+rLy7T2Xt4R92VTfvWq8Zp0QmKR2/sh72KWkDIYZfuId/2DUQ7w4BMkoKQyrtfXDv2jmENK7XQLekHEAlbEmS8nHctGeoXvnvZZpiQSFlW4FwoFDYykZcULhq642sodRMVxHThfoKHbMa7fzxeTTJC9rpr9djn6tW5P3UStaf9qe0+eFkRctpXyfxvZkR5LtUVbou87OKGQDolmy67MSGt8OXrZBcHwvu99SGO3fP7FyOJgDQk0UU0rRsYGrvB2C6Vk42A1U3FwQAnubxrsYYUKPOLOJgSnoPkIOZ0sSQHAza9IPlgHNGgSwHJHW51oHraiY9BNxVo+Qxh5pSac0GBKK2FgcD8brZJwI52u/bAtKuK7oiKup0zM7dom5+EUkw3770enSTgyABxO+fj2ods5ovGn+a0Zif0A+fej2+yWmQAAIlKv5ah2DeW/XdxTN8Wp1MyRGC+p/3p7iZP9asXFD1JmYAoMYMV/BLxWB6iL09C+3Tn+Wcv6nSBABmQYxpwwLZpXJurs250iAAaIMmRpQIqtJ9GFBLG6tBY/PPAA0NAzcIp713UFW5491+Yk+/iUiGNtAbb0aCnkePeIAcLudcmB3O8l0jEasuvq9DCVyA2uayRJIOlqNiynP9kvgGrpwVcVoZI/7MX2Us6rPQd955OMl6/mrHUvnHeKb1xJLmRWLjcedAPGGkENQuzgVYfhGkZJkIa7vtM/HReqY6loZ37OCquPnclJn2mabMWXjEHDyhjF85tFuS3T0a7ygyxQu8sXok4vxhmNtxaimoJBjPJ8qFEJqlZSe0TWbr+1fqajsM61yXlUxxLkCZdpcyC2AU0ckXF1K6+zK+byiYL5A8x+XFXsdGorIIQUTWWyWP/a/8esFbNYo3dShnOXCyLNq8eKfVjwmtxM/GAmEFPRA+D2Q4IxlVthWgKeXTTpBylqlQyO56egX27MqrQhuCIOWjG78Bm5qcUVAxRQoq0CmSkGG45PxvieILICDdoV+EyLQYwyNk2jcoLXMfBXnSlZd+26LLb66TKRBOmT9gL/txfUITA7eQ190uKnvyycDzXYmJzkmEFMSOYIVSOokx4Ytwz+RtiBQYjZLvCNtKicC7QghmZol2QZDjDK0Eucbp+lxVnZWG8XbnUmFPzv42/fuH3SkEAJmIk1CCcQxbzSZJcrj6hazuntXiZgmZG7Gsog+sTHHixoMdiWUnYllV9v1P1inG4kC5MFd4KOrjXRgc4QlckefPr/YomTnjJWPi34jtsu+pVvr4XzCepA2G5brPnqJnitvLFjVYWDTjsJVKx69PmIoKGMQmI7Memlc+nygWQXF2uHYKrWFQ9M3CHfLFUdWWTvVyX/I+liX74K/27Bwd1wrqSEF3cSZ/ZWPgom/HGwRXrrWsPXb2NH290Y3XfncdACDj1Oj8J3TSg7JpMQ4p1as9ciinxlwqVntqbP5tc7IcFOFTSY0dWrNovKFDR58VTMSqUntqvNgenq0MB8anMzjNsd6RzPraBHPHgcwu2wmo1sCmD6QnejQZ+j4Df70ZMXpDvToc6BX6+hV8NwQPSdJyWaDcl6TDRGHSuithTSpGTWYZz5cJvoauuwYMG+kOJZufktBdEZJzn747g4mNjAWtG3LHaQnfLQffyyL8jyZ8AGqQvjUvEJ0tB4+4jysCfRrJc4P/1qYlu6mWTuf+/bq4JqRDttVTs7H/nL42s6ZuQxUYWKFTDj6FNTQRVZnxd9Caifdrk4PcQH27YYomjszkwX4ERSIvL0SvoPBmiC+uLgS2qGzFKB7qyMBBOsB1PgQMz74Q01x5gTbjWMoBkpKwhDPvRzzB5F00BNBh/oAzR7QH1tNT46+re9JXQ3QqG0wjdRJLYwnQpGQskSpTSv6VSmL5agKpa639y7Q5oO/A7o1LmkYpMGJn+tiKz/FF6lOZfwAi1wi2lvoOSxfSgTyf1M5/R+ysXyhCTgXpThLH4c8Wcmd36p7dvaxLqTlcOLxNavd7Xbfv7FHVtS1jhFhcYpaWSqNoe23SpgsHRSwdX7ksjHtgG7Vc0hv39VNnYqk0W7iaYI4/mTvQYY/uvXiSxxkCL8G6P5OPgHSpD1ZrlW7Exx5b6xKY5w8QOXBYaOigseEwAGhlCXTXI5MQeUcork3jiHi4sAeM+JA+Ich/O3xTRnj2UHERIXon09SY691F3s2FtcwTgERqihKVsj758BYTniR7qEADSnnOMGgp/785KJQOVECcljf6URtIXupuqg3L7WYzmWkbqc6K5UwOp7lYGolJIsIWmtKJOPa1RKkK8zKn0PtxONE5ReDQHyu8jueTWLVcbnPTyvcfTN1ab3VsVsU9beGrfUQQUq019W6qP0uYGnIMXS1aaHGiwO9S+bDrfG+YzPXkCWxmk0AJaa1O5hgUPjM3C1ohDNaMGzQGhaiKnYtC67kVHK5WWaNKGgm4J+qnuLFHdFPaa2bzDRkx3MLkRyQ2bHbqIgG+6Tg0hN/fIi1U93TptPJW4L+pMhs/Cr5SL8/U9QILXZ2Yw+4ogrFPzTnxn626+MoAvbsr3ZM2cPGLe5ivhXfoDRL3g8y+KibNjOKBZi+aKHYY9YLfClENu7FdpRPQvm6ub5fS1WAzbSOSKKQ+lUyjlSBVeb1pmY7vOYuFvDvmOsn1UCm6eYhVAtL58jik77MqK3hWxH1It3h86XBbMra9JT9RK8YKBqbYOKXEf7Z/tHpbkkdcY0p/1hUP7inuoQb5vFu6okFs4EkKDYFaSJB0O2rNRPwQ98ocohsI0HXAgOtFSFkJ4CIIkz1UzbTca3QrrKNt9Pz+YmALDs14R6KrzCyKRXdHP1sJvrFtcMDi/3fx96c8HI9w+u94mZ38qNdLAv7TNjvC5UnIlL6f3eEbnF4fIfzxi7nQt5Oza34Jn4M86s+GDMbfIJShebntIsT644lRqzxSmWJMs5plLLTTQor1tj/ES1L4Qtto5noprV4Vvy3nbAx4p/EWz+RAgDuhfdMzNpAsib6AIpKXux1CY2fJNYrkvmiWijoWb3y2lFl250Q5d0kVd5Ra4Wgny5KTK+DGbVIxN3Az5b+RmzRIN3Tpqs26Kmfn0cZ8i68m38CyNtjkQ2etjpVz0Xt8lwTMm3SHkQMsNBr0eHhkxBBhXWfr0LQkaDfDrhplWXiQjmvvMco2XAR61i4f6FM6ZKiG5spQZtCBFp3w6TzgSJTz7bk1U4RAvLLJIVWJBqvplwkj3O9vfg98745VELQoY2qyZ9gK37RObfwMCFeI0VR01cM73WYHmDZBcDYDcyxHDL2iCFIMyczLtxi+1QEtD8KatsLgmPTJa7SaXWnNO/xJkzhDPxAnT4l/Aw/ShN6BukWwtoiQF7NUtT8vguChjS1bFzeMPBS5qJsTkYoUXT+4ztlQKpXzQfkDTZAWe8CvO5nPMDPyJDNxVJlL5W68PI2D5hjq6qe4K6CwJihJ6h9UbFZpiiaLGWV7oQLpJmmibBQK0T+m6UHyN1H5b7jthNgGM1lY8tgSdpuxU1Pad8miGQrsrsI3RhUo1/xhZJ/5ogS6q0L/pmbEaHzuWl01ran0bQDNZFfQ6ZYda7jMI0R03cjtzVjFB6PU5IrL6WG5+tFSHXCJPX3w6sTQcbIx3VDb6orQp0pwfry2u2Om5Og297S8+NcRZwtndjtnj8YahZjHBOWJRdZyvV+cLbd3Dp6TY24jow5X3ADoMve7ZpB4Ic6CunffNd0STlE6JKFnvDdaq3gYDx3Un2aLPUmrznGKR4Kwvkla8bIn7NEh0h8vtVZ8eo/5FgxZ5jryKsWaXnU61c8uaaqFKRhHOqmp04drwcp/s/F2PeZks0d/2ujOCn01gwObQHJVwCSRC2kHaYC8v7Ee4z4C/yG94uCg03XMHkUNoHjuCIcQ3PK0JGC6y5GGM8RxCZXl0wI9MbPeT7uNhyDgzL5vCyWNDLBlwoNzYgyEyaRjd9kBvN8YCjWpllLKI/htwX6vsQNw8RZBbyWewcYRhYyq2JQyDew9FWFoYRjAPlK7llgdu07Cfemszn5Is9MdGJ7Jh85JpOHnsKZUbItFzhBrYeX7qKOrC5B/hhCS5q+nYC2XNzb2CdJlHxi9wC0CwzIzVpbFX4FiSVhbQ/g8iFieLDFu/PJ1KcVaJRLdOoaRWYOlYri3ACNeben3+t1lvSBSEvD2ETSsim3ROzpS0M/arAOPNfMWEiZowiHZxApYwfcmrIMoOffCbQIDT9W2hMmxgbId6QPBvCN/Nay866IBw8V2YisaRH6nk8OXttFdGTADdlIf8Yd1Q1rRxNuN+3ESW0f6j6SYZWBNIjvW2ZRKkS2d7ZTTgN+92hdK2inAx/mkKEfRYHXHPHtJ6hK64GKrY16Z3dJOJAYbb5yAfQVnDzbFlUDrHI80QyJuFf3qpDfLqBd6ItNxk+fV2SCAspKBiGuB/tuiKnrxV3pHhgOl4rDqkK2MV6QXvxjcG4uQn8p9dfab8Qcmv77baiRXi+8o7oE1Af3D40PIsgLtpeHd/6GHka15B3L6ZZVO4yijlemhWobhKFqXZmElUTXneHzGLyXMny1Z14qdaDT2w2m3BfDYTCbzZ0s0w+6sjpUyYPs0AKjEo3fvkQk68GtLl+5d7uGu83DR7aEUNP+0RgYJuOyy6dkAsmn3/3LRYaI/BNgNnR7Rqy/H0s7hE7VS/QFGwXMnCJA/mc/UWrYrZpY7nesskO/OqoLsR2aHg/PaEQjwrT1PIrH4SjRaHv0qwDhpZwpgbOfpkpm9yhKbtmCa9R8P4NujsIGfPSLltXJEKRMNyapS6+PrX0xU7z7V0x+2qJRkXvkSC8StOFZvbDSum8V9giqGb4PnxbiLXUz3zJXKSRc5gNY2siAL59t0QRiQvzd/tbPArj03SteUo0IDhxMrByI31pvQVhswB/molLMR7+y6AnHNgykH7xZMTX5OevpJ4n6UlBvTVck8PfRdglYstBaEAvyVrbV61H4kU3GXJWEx+aqEzVMESdff7A79h2yDWIns1J/2WQcI23QrN19+BXdksmiOul0iiofnRESDGa21oi3WcCRUAVxvg0x7as8kIck5R0cV3BtkTjW7BMBuB3REVYgfnpHr3tmdUv2u+3NH/Jn3ucLVJPe1nr6uKHyuHGGXnJmfTJaRjOOFyu1Op+dFll4R2nmRpXdLmhbI4fqRgZP+OGXpJNPsBDmWfCbvlVdfmllIyX10+HwdP5pR5E/M313l7nvi8yT7dC+QwO+RepWE3owu6gYAblCZF8OY03SwnUjSc+FtaLvVwGGQrVDAaliE0oqsHmO1WqtPFcsv0yCMbiMCriLkVroLXGUSpGzXstiCDWr/bgu4woX/AMAUCPjo/CwaiM+6XReQXT18qLG3JG2gO+OPWuC7tArQBBTV0xJAm8AOLO/lgIXcsbsBr/pftgMAU1VL/4y/9001lv6lZaKW2zEa3tR2Xx5uoDdTHU3HKfDRbZW0zVB1acwDANABMrZxtwXQtDPpA7jjZgcBVOs2xAC4qYEKyZn9RvVNzGfu52OZZPqRVvtdn5v+/VvoTsD+cPzQz2CsgUxB1phUWOqKDcX94v1m9nNlKtMkiZ9duWobQDaYYpub71s0n7mfj3WFmeVUfOekvTLZ1CG1f8eP5ewW58smdJVZob//WCuIzy1xNFp9f0L4eDM+5L5tAF1tVjwyd2Eu4RnaWYbn+a/5rnO16p1W2hvo1QHkakrObeoGAPT5GuAeruu6yaeZngu7dn3n5qBmtkIB7tAilFYECnBHC2USZ+EuGxGtGjFhpbsAJ5NAZWrXxRbYW/uXCPDthf8AQO8cwD3nx2NApnVLCdDCFmcW0NH5Cwucm1bBHBY6W9n2z+lQZXxXNvinEu07ouKst6UeRsP7bi1ykNFazm3qAgBOExexL4b/XfGknncTml8/cvHz+CN/JmjanUfvJ+sZANSJUBQ+LTdLAgYtZ2gKAvdcUAAAwB7IFbDFvx6y/8TbfP5OqGHXCt2GUuqLdQAA2oBothwX4aKXJCIiqP+EFNMYhx6bMt+LCCtrxHcTf4gjtmuNwVijUl549nnhhMfd5v9rQSDYT4SFLpZ88uoLWlti7+HFg3//22nlUMs/tm8vVSMHoIOT8oOmSeAbWus/etI6fenv9Ea9e15ZjZo94hvNrNaw/ZGHEbMP7HoVGWUKWVWQc5u5AYAVYTVEWDMkyk0K3yMn5jJSTsDyamvHyoxL8bIVCtiFEeHuFTHOK2EXhbtJnHBPcZsQAQCLLmpdgmWPgokWWpQ+df1xaxgyo4v+AQCuDJWGzo9s/1lQuWZ7qRBB5WSCU/CVdXFM9goyLb1yxIg5t+TlsolqKbdAd8OOENW67ImrWVocbHEw3fAd5/XpWbGus+UHV071cxY1s+MX2TBqtTuiwNfB/3zJzRe9Jc57dVXfFDI4rwIGy2h4hS24amELFdw3BQoefIUJ7rvmFVSpm/x4Gu2iffL5fVeAYLCF3AoOnFxo4NwLDJxrYYEzfa1eciGBEwoIDPTm/mnmF+yTzM+4QMCIT4HvU5MLAZxPAYDj74RIOOT879O1scC+zP/k8d+f0/RUT+oSsr1TcmU629rqoiquCwlmdiJJwhSb0uMzS7QgUDCbpgAA8NjMSds9tM3jM2d02qEj3qeiuiYgkqviyDoV9Fq6KA8AOESQi2cL56r2fGvo1vrvMe5lc8ft4jVhYusUuI2SnMBkWCn0uNIF5Vlbp2IiGigtnxiExp12VfYW1vD0Nbt3NPBkHdech7Gzny55PzFN+rvUIlq/dXICUjoHuDK3x7MvJpqJ6zcu9DBW9oXRqkIzeCJk9eVfdQMArKnJgBfGrNsMC7y59d21q4CzFsIzrrEYL3jYZRJz5m/FzWpCBACagq5qXIJ1jQ2/FnsBu1f6dzfUGg143fgPAKgEVXpbP1pJkLPVPATY2TXxjdZlBVicpr81iutSK+rCOQXurW69D9slSp9MaYnRINX1c6sAadQ97IohsLS7ihkCZ2dMezwK9jS1AwAuMYSGli8Qj0KYeruQ0K5ha6WHBMBrzAMA5ANO1nDTvmXlcLaszBQGuCIO59bJnpI4vEOnYSri8FZ3QKG0JOdbVrbC3pz0Lpr0h3yH1sMomWHI3pEKF7OyhnoP+Y+jwadb1P3xMDrmYGwAWTmaH++t7qYR+Rr0O5LfSp/x28M4mYXosrDKWraSeT8fwI9G458xJH/pYXzM6dgAujysuJY+ZaJrXZ8XzXY3lQg9jJckmFhVTCB3N3JuUzcAkL7zj3vgNrcL7FDbpVvjJVkL4ZouryhbR5XDBZdJ1Ap3XxMiANBD6M7FJVjWuMtzsRfgZe1froE0L/wHALgUoIbzYxWga6sZDOCBLo5tpgoyLL2yLLWiWtAs0DV6Hw5by9MapbR8SGHbDp0R0nuwdEoy7i/5U+8pevxiSDcZrbyfgigEcCW/brYBAGA+1Um9bXrbgqq3/uPeSVrOjKhKoG7LAPGTwswMCJ/ozt8VhON0ZRBlCGe75UzMQshpHqoJAIC0guuqEdbM1xJUOj8WxPbWrjiugFnBvoYdAMBbkBDlKEUUjspmxgOOYIljgSPi2EsCcgfHJOLAjf++7O6UcQrUwp+kVCZT6otiX1WSTFyCrEYwgR+5hCP7nc1GvVhb2WX4eDSe68wn2OOI1aM0KwWcU2UIAFx6gsRZgtqkC3l2Il23lqEbcXKyJCJtwZ1vn+HjP8KpKnVXTHEgU60OiSkLPJ0GRABgZgi71Me7Q+ShaGqv/C/tpolL6ep/ACADzQ3Nny0rq6ItvbkN2tLG26CtX9BtsiVR686BnD7RHY1bsLhBzAd2bP2XzX4Z5EPvvvLyS76StbIbBIXP/7vAapxAYlnpGnVW08WonHxvQj0cIzAxItmcc+5QFcvt37OazTlnC9VjpZuGeV7Ib0aRUNzp+PG10mi3u2VeQb/ijyxi/H/FQ7UVm/gs0NtB7MBCLB4Zbr//Lx8etjIXrMVNTgRh0LSG5rz/0QUTMIpUdlfearIsHKm3NTZK8fhIoAuhNPlTAIBpU6lMWz4t9fjIYr2dVnxUWLXaKSwP0norL2ny8gCASno1NMbLk4VTY0nVU1sZwYaqsa1sYQlVm60GsQvq1nZFmWXCdM1xhs/ckqZnObdKbrEa/XX4OEbz8MHQH4THsetqdPJLNsO0qaxeYg+9iuiN8Nidx7GqZ6WsbV3nkN1uOZdnCADUNfjpSLaz8Ny4zZgDL2PO745aNzjcR1N4ULy7pUN46uokRgdGyojgjHOFV7oL0nCJtheYRekfrhIsGQTd+A8AhIRu95o/OwZORdt6cwHa0cYL0M6Di0zpAkibBLpCVApAtz5xb5cXUU6q1UWOoZxsZyAJQl27211PGuquao15lgzd7YvaAQCjCuqZ3fKZKIYeuNvOxDEkgcKKrYrK4aWb8gAAlKpXVQ7osXGpAGjaGbkAYNxsZQFo3QYXAWjKoFz1wn1yC1ZezVf0PdGc4vy9jfY4SmUYMjWkwqqorAg9F/yime13mrgeR6fcag0gA0PzY9BTtmV1jfhEg78T/BsZj+NUFqJ7ugrFErztWukvGvNI/fDzOD7lamsA3dtVa0tTNm0dkzdG03zirbXH8QqH1KokIetkkarAAQB3/8fHtXGbewg2R9t52/plGQvRvr0ai0KDq1gnUSK4LSNieoz/v9JdoNpCkWtuL5rK1v6lt2ZDHaSr8R8AyACaa5g/WxY1RTm9uQ3a0sbboK0HtzOlSxi2W4ANMLLtZy2qjG3KluvUXzCjySjku3veqn/zfiTGibVIjDPfU7hB9quFuR0cAxPdPg66Xl59+sYLwYNbzxamfxW5BWDOlsezmRnS7ha9Qro3jsAILBL28xpQKNtZrJoOsw+kPnlcjuVf7e0T0XPvpr0Fh2aro68OiVEqhk8vni5ROiE8Hgv7CJ2bkcxHUTmGF9Vt+HHl6RPXvsxw5RD6oU7pF0rGLgMbEr16ukSaavUTQTKJfw4qy56meJthEBTZIJ6Yc46eln5DLtQZiq76O8Ja4o0kHtQug8HHyd6q3otq+PKfSSp76yvx4hqGyjaQfnn0ZbkHkq+pgV0mguRLf5AGN0tPUoPOT6f1wP6ibubUQ/VZpgq9JOzmWtm4IfvB3WckhThbT1XjqPFPhGo7NU4YRAc/OWinJtmGulIIOMs3zWHFJ76tZBskLnhtuzSqSQKZBJOj2VIvcJ1lAvTT16PoT9DxtbnGnxsp+vl0Sr28EqBBWwrAezAfuck+gD/oY2HxWYseiD2nRW9fAVw+oBTt12sjg/BX2ot65DtYnKUlkeHYN6hTk7prWCZ3TK3S2+VrD97X4nZ/oa/Dx20chFpal/8CsE2zm5LmVMuMNNepyWAk8LswObgdpWMgR4zNPAly9Nrh07VLIw4ejkkZbqJrFuRsiZ4lGNNny7+y491b+mR7cy5/woXqPYTKGjDjs7pz1jHflT/CLt5M/G1fLauqFk9tXL965dyOi73RfjUR5Tf+huu8ONF7eHktn4VJ8n3mHA6jAJFYHbSOH+/nHM85VtocOGgVhn8lsMaOh2wUZsoq2UBIS2/es2651+G+ZP5CJQXrWUkE3XgnXE3g0UiC9/HhcDdwNfwGUzoB+H168JKNBvlJ3J27fmefHr5jMg36jGgBfJqZ+YIOf8DTveHc1c1Uh7HBefDPNxlVtkNUAc5dzc2smRB3udrZNh72G7FG06Vud5//dO8rVyhvKvTTOgBZUdKBBQDaGYoqbl4lBv6FLQMN9ZbBJFP3nbmpxIqWRc9SWn5uB0bz7P7MzLWNirmZb1PYo7buqgj+NLZhN9UpFO6qNtRV3F4jowhkowY/UFxYI2hZSj6/SsPL1UTjsrVq6VB9dcSxPUaREiNt/LelX7KzD7iJ/mP2AIBMjr/6I0jlSCyQ4rqYyuX3/9giplDcpuT5C0T95CEuY641PTW5beIpTkvObGKWr9LGfUixi+kpVV1TMGVaXHV6Sq1rBKZsSq7JNV3uFSUXtL3lPhB9i8SFLcyuWsqKV3i3QClrXz22JZ6R3Xncli91OC41WlNwS1EMDAer5u5e1cydq9qHrlVN7ysYrqfUu73na4xWTP3f3zHcY0QZWDk1k3OHmlh8gKTkbgF+fGf83l6/KXbysu9jGoPeyau5+6VizMQDFaXccZtvp5pnbNXcv91OF6V06VqVYJqSDrrfPbcaxQvsiuKGipRjGaZ8UdEE3g4UwHP4/l7j0HQG8C5xr173spf7aF47bam34bzVlJaFbZm1SGk5fU32OkNpyfzteH2L++I265Y0wCZiNVxo2kf2s30+XowJWWc0IWfLVTyQ1ug7WObs/lXF4xG0yAGmQzSfHWr3okeTw116sntYJ+gEzfg+vkbWc+iDs+5XdV/fBSuA1Q6yaoS7TWn1A6SvkXEe3YZUnrTCMKj807fz7WpR0vYopiERkGjRVJqxbPPEDt0sB/wroo/k40v9tp+69WomSU+5d2iVqy9HWFdgVxpMAAB3gzNV+MhAS51GCWPX/SMKguJ1naEOEJSnXYBuQlBhGwk9Cx7AaJvto14rtQW+NolyrmGlx1OH4LmogYCqPQDArnntU2cBdM4PbwEiVlNbAANN7Jl+KuCAS6fUGMBHo2DpIFYOUmcIrEXtunaX4wjcqSFMxHAJa16Pl1jpkuuOrcEzS1PNDwGAVQxXse3LgoGWNXe5AbxxVa7A0D1XvgfWfPbZd0EoG3cnQBc7s1zALTd7bcBsB2JvAEM1YrtLQpOuDgyEXR+l2YRnbxVP2TCKv80P3QAyXzQfBlRlqxbrFfuM4uWP2vFDc1a/zTfdALrIq9azrawifxJvsVmXL2bqxHWNV/22r6JWVU2Gid1hY8tiGKbwCr+mygQAKOO3ih6qkcFSKfkLE3CET5af7eIrpkEdvs9IGuDPFtq2arFYrS1/XSjQtHffYgvmqk4Qd1yarQIIANz4ovszBxBGb0Yg75q4J/BJBJQzSicMuFcogKVgBVQsUki7WzZmtDsSCv5sfX5A/K//0/6pPzfrkWmgARzginLJrgK+QQ35jSEslGVg2c/9GQdhXRxAA+A5aUfgc/XM9gmj7BRAA0bHlsMCU2r4AABUhIBbLRYoBT7y1/QEZG1Y+vPnZC238m//91MrhLpsmdtg4ZgysFNDkTWbpP36GW9ewtzcqvqyaY3HcSmkHY+ZgGdgJa0DAJ1JVcnOYRV8W+Iq3hSshbzeShsukQrOgZU+TPLcLtebHNDrvwnm8XXfpq9tBHv03jJU8LJ29UWGiZde9ueJw91ONP6uPkkHe1merPvKpvyjWdWzS8sP1VHGdYm/uemttaveqvpX39FaucKtxXdVmQAALnA6bnoUMLvcP2ogaE/XGaYMQUdtAswESWH+xkRiC4NuEAUAWA2A4xBBIJwC/ZwpSuliOLHk/4wfiywrn+2ucApLEZu0AQB1NJ8BC8PpAR+czvxknQsabTWbAhq0xc0DGnKlWoXyoIXhtOKt63SmLQusUJJo0PX6hA1U4OVl+Nml6bPbFahuZ6WZouUKzOoNq1zA1Vjemwau4I7dDIMrNUAEADcErm37DmFwnZq7w0J3dcbKVQMhIHS20b3nrRlH2WbeF3Q5t31b5whTs/33mJQw3EnN3TVhYqsUpjDiyXlQrDCNPq5GTBlN7uWp+9yGM1Pmd6Wm3OZzINi7wXe/nlmlj/Cc8ssJl/pNv2rWqzU/RYFDiSmcxc9VmQAAW+DrNj1OTViTNErYRQPQHJiN3g/Iqj1jFRg+XckZhiEXUhjbLhIhsFhGhG6O6UIeAgAggMchQobvmRIH1ZS7JV6+uAbKVmoDAOle0TWvAs4aLnIhPuMKVLaa5gHiaGJvm+KEAwmmVKoyxk9AkJR2vR56jNQKsjIEDgOEEWTL7coZyHYdYS4cnBFZ3p0BzHPHFmJgqwEiAFg1kMT23ZJBVvV2d2B4TJkq3rYG63Vv1UR34ZtGknT2XeCBhttU2PgcMNIyGXHAg5EdigP1ORBJAWrUiMUmGfu4suD7HoevTr/oJDuDMzKkbpMvtwFkX2hO5uPlMfbF97Wb6Te//qN16IZtcr/l320A3bRVPLOQkMEft2e86euv5TKhu2eb7G+vdz+It6vOVTmt72zFwKymsIKvqzIBAHTglA31sH4Uw7hzAcm2WkVPXX/WJ3zTNGjjx0YSih5ktOFalIartYXaBaIxMKuAIH5ZmahLwUZtAKCqWou5FRoUXDwRYBRbqFpvkiBEGy/xuW4bzyBr7dqTSgyKhbbW/pj6YQA=\",\"base64\")).toString()),uL}var DAe=new Map([[P.makeIdent(null,\"fsevents\").identHash,kAe],[P.makeIdent(null,\"resolve\").identHash,xAe],[P.makeIdent(null,\"typescript\").identHash,PAe]]),yze={hooks:{registerPackageExtensions:async(t,e)=>{for(let[r,i]of SAe)e(P.parseDescriptor(r,!0),i)},getBuiltinPatch:async(t,e)=>{var s;let r=\"compat/\";if(!e.startsWith(r))return;let i=P.parseIdent(e.slice(r.length)),n=(s=DAe.get(i.identHash))==null?void 0:s();return typeof n!=\"undefined\"?n:null},reduceDependency:async(t,e,r,i)=>typeof DAe.get(t.identHash)==\"undefined\"?t:P.makeDescriptor(t,P.makeRange({protocol:\"patch:\",source:P.stringifyDescriptor(t),selector:`~builtin<compat/${P.stringifyIdent(t)}>`,params:null}))}},wze=yze;var fL={};ft(fL,{default:()=>bze});var Ab=class extends Le{constructor(){super(...arguments);this.pkg=z.String(\"-p,--package\",{description:\"The package to run the provided command from\"});this.quiet=z.Boolean(\"-q,--quiet\",!1,{description:\"Only report critical errors instead of printing the full install logs\"});this.command=z.String();this.args=z.Proxy()}async execute(){let e=[];this.pkg&&e.push(\"--package\",this.pkg),this.quiet&&e.push(\"--quiet\");let r=P.parseIdent(this.command),i=P.makeIdent(r.scope,`create-${r.name}`);return this.cli.run([\"dlx\",...e,P.stringifyIdent(i),...this.args])}};Ab.paths=[[\"create\"]];var RAe=Ab;var Om=class extends Le{constructor(){super(...arguments);this.packages=z.Array(\"-p,--package\",{description:\"The package(s) to install before running the command\"});this.quiet=z.Boolean(\"-q,--quiet\",!1,{description:\"Only report critical errors instead of printing the full install logs\"});this.command=z.String();this.args=z.Proxy()}async execute(){return ye.telemetry=null,await K.mktempPromise(async e=>{var p;let r=k.join(e,`dlx-${process.pid}`);await K.mkdirPromise(r),await K.writeFilePromise(k.join(r,\"package.json\"),`{}\n`),await K.writeFilePromise(k.join(r,\"yarn.lock\"),\"\");let i=k.join(r,\".yarnrc.yml\"),n=await ye.findProjectCwd(this.context.cwd,Pt.lockfile),s=!(await ye.find(this.context.cwd,null,{strict:!1})).get(\"enableGlobalCache\"),o=n!==null?k.join(n,\".yarnrc.yml\"):null;o!==null&&K.existsSync(o)?(await K.copyFilePromise(o,i),await ye.updateConfiguration(r,m=>{let y=te(N({},m),{enableGlobalCache:s,enableTelemetry:!1});return Array.isArray(m.plugins)&&(y.plugins=m.plugins.map(Q=>{let S=typeof Q==\"string\"?Q:Q.path,x=j.isAbsolute(S)?S:j.resolve(j.fromPortablePath(n),S);return typeof Q==\"string\"?x:{path:x,spec:Q.spec}})),y})):await K.writeFilePromise(i,`enableGlobalCache: ${s}\nenableTelemetry: false\n`);let a=(p=this.packages)!=null?p:[this.command],l=P.parseDescriptor(this.command).name,c=await this.cli.run([\"add\",\"--\",...a],{cwd:r,quiet:this.quiet});if(c!==0)return c;this.quiet||this.context.stdout.write(`\n`);let u=await ye.find(r,this.context.plugins),{project:g,workspace:f}=await ze.find(u,r);if(f===null)throw new ht(g.cwd,r);await g.restoreInstallState();let h=await Zt.getWorkspaceAccessibleBinaries(f);return h.has(l)===!1&&h.size===1&&typeof this.packages==\"undefined\"&&(l=Array.from(h)[0][0]),await Zt.executeWorkspaceAccessibleBinary(f,l,this.args,{packageAccessibleBinaries:h,cwd:this.context.cwd,stdin:this.context.stdin,stdout:this.context.stdout,stderr:this.context.stderr})})}};Om.paths=[[\"dlx\"]],Om.usage=Re.Usage({description:\"run a package in a temporary environment\",details:\"\\n      This command will install a package within a temporary environment, and run its binary script if it contains any. The binary will run within the current cwd.\\n\\n      By default Yarn will download the package named `command`, but this can be changed through the use of the `-p,--package` flag which will instruct Yarn to still run the same command but from a different package.\\n\\n      Using `yarn dlx` as a replacement of `yarn add` isn't recommended, as it makes your project non-deterministic (Yarn doesn't keep track of the packages installed through `dlx` - neither their name, nor their version).\\n    \",examples:[[\"Use create-react-app to create a new React app\",\"yarn dlx create-react-app ./my-app\"],[\"Install multiple packages for a single command\",`yarn dlx -p typescript -p ts-node ts-node --transpile-only -e \"console.log('hello!')\"`]]});var FAe=Om;var Bze={commands:[RAe,FAe]},bze=Bze;var wL={};ft(wL,{default:()=>Sze,fileUtils:()=>hL});var ih=/^(?:[a-zA-Z]:[\\\\/]|\\.{0,2}\\/)/,Mm=/^[^?]*\\.(?:tar\\.gz|tgz)(?:::.*)?$/,Vr=\"file:\";var hL={};ft(hL,{makeArchiveFromLocator:()=>lb,makeBufferFromLocator:()=>CL,makeLocator:()=>dL,makeSpec:()=>NAe,parseSpec:()=>pL});function pL(t){let{params:e,selector:r}=P.parseRange(t),i=j.toPortablePath(r);return{parentLocator:e&&typeof e.locator==\"string\"?P.parseLocator(e.locator):null,path:i}}function NAe({parentLocator:t,path:e,folderHash:r,protocol:i}){let n=t!==null?{locator:P.stringifyLocator(t)}:{},s=typeof r!=\"undefined\"?{hash:r}:{};return P.makeRange({protocol:i,source:e,selector:e,params:N(N({},s),n)})}function dL(t,{parentLocator:e,path:r,folderHash:i,protocol:n}){return P.makeLocator(t,NAe({parentLocator:e,path:r,folderHash:i,protocol:n}))}async function lb(t,{protocol:e,fetchOptions:r,inMemory:i=!1}){let{parentLocator:n,path:s}=P.parseFileStyleRange(t.reference,{protocol:e}),o=k.isAbsolute(s)?{packageFs:new _t(Me.root),prefixPath:Me.dot,localPath:Me.root}:await r.fetcher.fetch(n,r),a=o.localPath?{packageFs:new _t(Me.root),prefixPath:k.relative(Me.root,o.localPath)}:o;o!==a&&o.releaseFs&&o.releaseFs();let l=a.packageFs,c=k.join(a.prefixPath,s);return await Se.releaseAfterUseAsync(async()=>await wi.makeArchiveFromDirectory(c,{baseFs:l,prefixPath:P.getIdentVendorPath(t),compressionLevel:r.project.configuration.get(\"compressionLevel\"),inMemory:i}),a.releaseFs)}async function CL(t,{protocol:e,fetchOptions:r}){return(await lb(t,{protocol:e,fetchOptions:r,inMemory:!0})).getBufferAndClose()}var mL=class{supports(e,r){return!!e.reference.startsWith(Vr)}getLocalPath(e,r){let{parentLocator:i,path:n}=P.parseFileStyleRange(e.reference,{protocol:Vr});if(k.isAbsolute(n))return n;let s=r.fetcher.getLocalPath(i,r);return s===null?null:k.resolve(s,n)}async fetch(e,r){let i=r.checksums.get(e.locatorHash)||null,[n,s,o]=await r.cache.fetchPackageFromCache(e,i,N({onHit:()=>r.report.reportCacheHit(e),onMiss:()=>r.report.reportCacheMiss(e,`${P.prettyLocator(r.project.configuration,e)} can't be found in the cache and will be fetched from the disk`),loader:()=>this.fetchFromDisk(e,r),skipIntegrityCheck:r.skipIntegrityCheck},r.cacheOptions));return{packageFs:n,releaseFs:s,prefixPath:P.getIdentVendorPath(e),localPath:this.getLocalPath(e,r),checksum:o}}async fetchFromDisk(e,r){return lb(e,{protocol:Vr,fetchOptions:r})}};var Qze=2,EL=class{supportsDescriptor(e,r){return e.range.match(ih)?!0:!!e.range.startsWith(Vr)}supportsLocator(e,r){return!!e.reference.startsWith(Vr)}shouldPersistResolution(e,r){return!1}bindDescriptor(e,r,i){return ih.test(e.range)&&(e=P.makeDescriptor(e,`${Vr}${e.range}`)),P.bindDescriptor(e,{locator:P.stringifyLocator(r)})}getResolutionDependencies(e,r){return[]}async getCandidates(e,r,i){if(!i.fetchOptions)throw new Error(\"Assertion failed: This resolver cannot be used unless a fetcher is configured\");let{path:n,parentLocator:s}=pL(e.range);if(s===null)throw new Error(\"Assertion failed: The descriptor should have been bound\");let o=await CL(P.makeLocator(e,P.makeRange({protocol:Vr,source:n,selector:n,params:{locator:P.stringifyLocator(s)}})),{protocol:Vr,fetchOptions:i.fetchOptions}),a=Dn.makeHash(`${Qze}`,o).slice(0,6);return[dL(e,{parentLocator:s,path:n,folderHash:a,protocol:Vr})]}async getSatisfying(e,r,i){return null}async resolve(e,r){if(!r.fetchOptions)throw new Error(\"Assertion failed: This resolver cannot be used unless a fetcher is configured\");let i=await r.fetchOptions.fetcher.fetch(e,r.fetchOptions),n=await Se.releaseAfterUseAsync(async()=>await At.find(i.prefixPath,{baseFs:i.packageFs}),i.releaseFs);return te(N({},e),{version:n.version||\"0.0.0\",languageName:n.languageName||r.project.configuration.get(\"defaultLanguageName\"),linkType:Qt.HARD,conditions:n.getConditions(),dependencies:n.dependencies,peerDependencies:n.peerDependencies,dependenciesMeta:n.dependenciesMeta,peerDependenciesMeta:n.peerDependenciesMeta,bin:n.bin})}};var IL=class{supports(e,r){return Mm.test(e.reference)?!!e.reference.startsWith(Vr):!1}getLocalPath(e,r){return null}async fetch(e,r){let i=r.checksums.get(e.locatorHash)||null,[n,s,o]=await r.cache.fetchPackageFromCache(e,i,N({onHit:()=>r.report.reportCacheHit(e),onMiss:()=>r.report.reportCacheMiss(e,`${P.prettyLocator(r.project.configuration,e)} can't be found in the cache and will be fetched from the disk`),loader:()=>this.fetchFromDisk(e,r),skipIntegrityCheck:r.skipIntegrityCheck},r.cacheOptions));return{packageFs:n,releaseFs:s,prefixPath:P.getIdentVendorPath(e),checksum:o}}async fetchFromDisk(e,r){let{parentLocator:i,path:n}=P.parseFileStyleRange(e.reference,{protocol:Vr}),s=k.isAbsolute(n)?{packageFs:new _t(Me.root),prefixPath:Me.dot,localPath:Me.root}:await r.fetcher.fetch(i,r),o=s.localPath?{packageFs:new _t(Me.root),prefixPath:k.relative(Me.root,s.localPath)}:s;s!==o&&s.releaseFs&&s.releaseFs();let a=o.packageFs,l=k.join(o.prefixPath,n),c=await a.readFilePromise(l);return await Se.releaseAfterUseAsync(async()=>await wi.convertToZip(c,{compressionLevel:r.project.configuration.get(\"compressionLevel\"),prefixPath:P.getIdentVendorPath(e),stripComponents:1}),o.releaseFs)}};var yL=class{supportsDescriptor(e,r){return Mm.test(e.range)?!!(e.range.startsWith(Vr)||ih.test(e.range)):!1}supportsLocator(e,r){return Mm.test(e.reference)?!!e.reference.startsWith(Vr):!1}shouldPersistResolution(e,r){return!0}bindDescriptor(e,r,i){return ih.test(e.range)&&(e=P.makeDescriptor(e,`${Vr}${e.range}`)),P.bindDescriptor(e,{locator:P.stringifyLocator(r)})}getResolutionDependencies(e,r){return[]}async getCandidates(e,r,i){let n=e.range;return n.startsWith(Vr)&&(n=n.slice(Vr.length)),[P.makeLocator(e,`${Vr}${j.toPortablePath(n)}`)]}async getSatisfying(e,r,i){return null}async resolve(e,r){if(!r.fetchOptions)throw new Error(\"Assertion failed: This resolver cannot be used unless a fetcher is configured\");let i=await r.fetchOptions.fetcher.fetch(e,r.fetchOptions),n=await Se.releaseAfterUseAsync(async()=>await At.find(i.prefixPath,{baseFs:i.packageFs}),i.releaseFs);return te(N({},e),{version:n.version||\"0.0.0\",languageName:n.languageName||r.project.configuration.get(\"defaultLanguageName\"),linkType:Qt.HARD,conditions:n.getConditions(),dependencies:n.dependencies,peerDependencies:n.peerDependencies,dependenciesMeta:n.dependenciesMeta,peerDependenciesMeta:n.peerDependenciesMeta,bin:n.bin})}};var vze={fetchers:[IL,mL],resolvers:[yL,EL]},Sze=vze;var bL={};ft(bL,{default:()=>Pze});var LAe=ge(require(\"querystring\")),TAe=[/^https?:\\/\\/(?:([^/]+?)@)?github.com\\/([^/#]+)\\/([^/#]+)\\/tarball\\/([^/#]+)(?:#(.*))?$/,/^https?:\\/\\/(?:([^/]+?)@)?github.com\\/([^/#]+)\\/([^/#]+?)(?:\\.git)?(?:#(.*))?$/];function OAe(t){return t?TAe.some(e=>!!t.match(e)):!1}function MAe(t){let e;for(let a of TAe)if(e=t.match(a),e)break;if(!e)throw new Error(kze(t));let[,r,i,n,s=\"master\"]=e,{commit:o}=LAe.default.parse(s);return s=o||s.replace(/[^:]*:/,\"\"),{auth:r,username:i,reponame:n,treeish:s}}function kze(t){return`Input cannot be parsed as a valid GitHub URL ('${t}').`}var BL=class{supports(e,r){return!!OAe(e.reference)}getLocalPath(e,r){return null}async fetch(e,r){let i=r.checksums.get(e.locatorHash)||null,[n,s,o]=await r.cache.fetchPackageFromCache(e,i,N({onHit:()=>r.report.reportCacheHit(e),onMiss:()=>r.report.reportCacheMiss(e,`${P.prettyLocator(r.project.configuration,e)} can't be found in the cache and will be fetched from GitHub`),loader:()=>this.fetchFromNetwork(e,r),skipIntegrityCheck:r.skipIntegrityCheck},r.cacheOptions));return{packageFs:n,releaseFs:s,prefixPath:P.getIdentVendorPath(e),checksum:o}}async fetchFromNetwork(e,r){let i=await ir.get(this.getLocatorUrl(e,r),{configuration:r.project.configuration});return await K.mktempPromise(async n=>{let s=new _t(n);await wi.extractArchiveTo(i,s,{stripComponents:1});let o=wu.splitRepoUrl(e.reference),a=k.join(n,\"package.tgz\");await Zt.prepareExternalProject(n,a,{configuration:r.project.configuration,report:r.report,workspace:o.extra.workspace,locator:e});let l=await K.readFilePromise(a);return await wi.convertToZip(l,{compressionLevel:r.project.configuration.get(\"compressionLevel\"),prefixPath:P.getIdentVendorPath(e),stripComponents:1})})}getLocatorUrl(e,r){let{auth:i,username:n,reponame:s,treeish:o}=MAe(e.reference);return`https://${i?`${i}@`:\"\"}github.com/${n}/${s}/archive/${o}.tar.gz`}};var xze={hooks:{async fetchHostedRepository(t,e,r){if(t!==null)return t;let i=new BL;if(!i.supports(e,r))return null;try{return await i.fetch(e,r)}catch(n){return null}}}},Pze=xze;var SL={};ft(SL,{default:()=>Rze});var Um=/^[^?]*\\.(?:tar\\.gz|tgz)(?:\\?.*)?$/,Km=/^https?:/;var QL=class{supports(e,r){return Um.test(e.reference)?!!Km.test(e.reference):!1}getLocalPath(e,r){return null}async fetch(e,r){let i=r.checksums.get(e.locatorHash)||null,[n,s,o]=await r.cache.fetchPackageFromCache(e,i,N({onHit:()=>r.report.reportCacheHit(e),onMiss:()=>r.report.reportCacheMiss(e,`${P.prettyLocator(r.project.configuration,e)} can't be found in the cache and will be fetched from the remote server`),loader:()=>this.fetchFromNetwork(e,r),skipIntegrityCheck:r.skipIntegrityCheck},r.cacheOptions));return{packageFs:n,releaseFs:s,prefixPath:P.getIdentVendorPath(e),checksum:o}}async fetchFromNetwork(e,r){let i=await ir.get(e.reference,{configuration:r.project.configuration});return await wi.convertToZip(i,{compressionLevel:r.project.configuration.get(\"compressionLevel\"),prefixPath:P.getIdentVendorPath(e),stripComponents:1})}};var vL=class{supportsDescriptor(e,r){return Um.test(e.range)?!!Km.test(e.range):!1}supportsLocator(e,r){return Um.test(e.reference)?!!Km.test(e.reference):!1}shouldPersistResolution(e,r){return!0}bindDescriptor(e,r,i){return e}getResolutionDependencies(e,r){return[]}async getCandidates(e,r,i){return[P.convertDescriptorToLocator(e)]}async getSatisfying(e,r,i){return null}async resolve(e,r){if(!r.fetchOptions)throw new Error(\"Assertion failed: This resolver cannot be used unless a fetcher is configured\");let i=await r.fetchOptions.fetcher.fetch(e,r.fetchOptions),n=await Se.releaseAfterUseAsync(async()=>await At.find(i.prefixPath,{baseFs:i.packageFs}),i.releaseFs);return te(N({},e),{version:n.version||\"0.0.0\",languageName:n.languageName||r.project.configuration.get(\"defaultLanguageName\"),linkType:Qt.HARD,conditions:n.getConditions(),dependencies:n.dependencies,peerDependencies:n.peerDependencies,dependenciesMeta:n.dependenciesMeta,peerDependenciesMeta:n.peerDependenciesMeta,bin:n.bin})}};var Dze={fetchers:[QL],resolvers:[vL]},Rze=Dze;var DL={};ft(DL,{default:()=>F4e});var ule=ge(cle()),PL=ge(require(\"util\")),Hm=class extends Le{constructor(){super(...arguments);this.private=z.Boolean(\"-p,--private\",!1,{description:\"Initialize a private package\"});this.workspace=z.Boolean(\"-w,--workspace\",!1,{description:\"Initialize a workspace root with a `packages/` directory\"});this.install=z.String(\"-i,--install\",!1,{tolerateBoolean:!0,description:\"Initialize a package with a specific bundle that will be locked in the project\"});this.usev2=z.Boolean(\"-2\",!1,{hidden:!0});this.yes=z.Boolean(\"-y,--yes\",{hidden:!0});this.assumeFreshProject=z.Boolean(\"--assume-fresh-project\",!1,{hidden:!0})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),r=typeof this.install==\"string\"?this.install:this.usev2||this.install===!0?\"latest\":null;return r!==null?await this.executeProxy(e,r):await this.executeRegular(e)}async executeProxy(e,r){if(e.projectCwd!==null&&e.projectCwd!==this.context.cwd)throw new Pe(\"Cannot use the --install flag from within a project subdirectory\");K.existsSync(this.context.cwd)||await K.mkdirPromise(this.context.cwd,{recursive:!0});let i=k.join(this.context.cwd,e.get(\"lockfileFilename\"));K.existsSync(i)||await K.writeFilePromise(i,\"\");let n=await this.cli.run([\"set\",\"version\",r],{quiet:!0});if(n!==0)return n;let s=[];return this.private&&s.push(\"-p\"),this.workspace&&s.push(\"-w\"),this.yes&&s.push(\"-y\"),await K.mktempPromise(async o=>{let{code:a}=await Fr.pipevp(\"yarn\",[\"init\",...s],{cwd:this.context.cwd,stdin:this.context.stdin,stdout:this.context.stdout,stderr:this.context.stderr,env:await Zt.makeScriptEnv({binFolder:o})});return a})}async executeRegular(e){var l;let r=null;try{r=(await ze.find(e,this.context.cwd)).project}catch{r=null}K.existsSync(this.context.cwd)||await K.mkdirPromise(this.context.cwd,{recursive:!0});let i=await At.tryFind(this.context.cwd)||new At,n=Object.fromEntries(e.get(\"initFields\").entries());i.load(n),i.name=(l=i.name)!=null?l:P.makeIdent(e.get(\"initScope\"),k.basename(this.context.cwd)),i.packageManager=Ur&&Se.isTaggedYarnVersion(Ur)?`yarn@${Ur}`:null,typeof i.raw.private==\"undefined\"&&(this.private||this.workspace&&i.workspaceDefinitions.length===0)&&(i.private=!0),this.workspace&&i.workspaceDefinitions.length===0&&(await K.mkdirPromise(k.join(this.context.cwd,\"packages\"),{recursive:!0}),i.workspaceDefinitions=[{pattern:\"packages/*\"}]);let s={};i.exportTo(s),PL.inspect.styles.name=\"cyan\",this.context.stdout.write(`${(0,PL.inspect)(s,{depth:Infinity,colors:!0,compact:!1})}\n`);let o=k.join(this.context.cwd,At.fileName);await K.changeFilePromise(o,`${JSON.stringify(s,null,2)}\n`,{automaticNewlines:!0});let a=k.join(this.context.cwd,\"README.md\");if(K.existsSync(a)||await K.writeFilePromise(a,`# ${P.stringifyIdent(i.name)}\n`),!r||r.cwd===this.context.cwd){let c=k.join(this.context.cwd,Pt.lockfile);K.existsSync(c)||await K.writeFilePromise(c,\"\");let g=[\".yarn/*\",\"!.yarn/patches\",\"!.yarn/plugins\",\"!.yarn/releases\",\"!.yarn/sdks\",\"!.yarn/versions\",\"\",\"# Swap the comments on the following lines if you don't wish to use zero-installs\",\"# Documentation here: https://yarnpkg.com/features/zero-installs\",\"!.yarn/cache\",\"#.pnp.*\"].map(y=>`${y}\n`).join(\"\"),f=k.join(this.context.cwd,\".gitignore\");K.existsSync(f)||await K.writeFilePromise(f,g);let h={[\"*\"]:{endOfLine:\"lf\",insertFinalNewline:!0},[\"*.{js,json,yml}\"]:{charset:\"utf-8\",indentStyle:\"space\",indentSize:2}};(0,ule.default)(h,e.get(\"initEditorConfig\"));let p=`root = true\n`;for(let[y,Q]of Object.entries(h)){p+=`\n[${y}]\n`;for(let[S,x]of Object.entries(Q))p+=`${S.replace(/[A-Z]/g,Y=>`_${Y.toLowerCase()}`)} = ${x}\n`}let m=k.join(this.context.cwd,\".editorconfig\");K.existsSync(m)||await K.writeFilePromise(m,p),K.existsSync(k.join(this.context.cwd,\".git\"))||await Fr.execvp(\"git\",[\"init\"],{cwd:this.context.cwd})}}};Hm.paths=[[\"init\"]],Hm.usage=Re.Usage({description:\"create a new package\",details:\"\\n      This command will setup a new package in your local directory.\\n\\n      If the `-p,--private` or `-w,--workspace` options are set, the package will be private by default.\\n\\n      If the `-w,--workspace` option is set, the package will be configured to accept a set of workspaces in the `packages/` directory.\\n\\n      If the `-i,--install` option is given a value, Yarn will first download it using `yarn set version` and only then forward the init call to the newly downloaded bundle. Without arguments, the downloaded bundle will be `latest`.\\n\\n      The initial settings of the manifest can be changed by using the `initScope` and `initFields` configuration values. Additionally, Yarn will generate an EditorConfig file whose rules can be altered via `initEditorConfig`, and will initialize a Git repository in the current directory.\\n    \",examples:[[\"Create a new package in the local directory\",\"yarn init\"],[\"Create a new private package in the local directory\",\"yarn init -p\"],[\"Create a new package and store the Yarn release inside\",\"yarn init -i=latest\"],[\"Create a new private package and defines it as a workspace root\",\"yarn init -w\"]]});var gle=Hm;var R4e={configuration:{initScope:{description:\"Scope used when creating packages via the init command\",type:Ie.STRING,default:null},initFields:{description:\"Additional fields to set when creating packages via the init command\",type:Ie.MAP,valueDefinition:{description:\"\",type:Ie.ANY}},initEditorConfig:{description:\"Extra rules to define in the generator editorconfig\",type:Ie.MAP,valueDefinition:{description:\"\",type:Ie.ANY}}},commands:[gle]},F4e=R4e;var TL={};ft(TL,{default:()=>L4e});var mA=\"portal:\",EA=\"link:\";var RL=class{supports(e,r){return!!e.reference.startsWith(mA)}getLocalPath(e,r){let{parentLocator:i,path:n}=P.parseFileStyleRange(e.reference,{protocol:mA});if(k.isAbsolute(n))return n;let s=r.fetcher.getLocalPath(i,r);return s===null?null:k.resolve(s,n)}async fetch(e,r){var c;let{parentLocator:i,path:n}=P.parseFileStyleRange(e.reference,{protocol:mA}),s=k.isAbsolute(n)?{packageFs:new _t(Me.root),prefixPath:Me.dot,localPath:Me.root}:await r.fetcher.fetch(i,r),o=s.localPath?{packageFs:new _t(Me.root),prefixPath:k.relative(Me.root,s.localPath),localPath:Me.root}:s;s!==o&&s.releaseFs&&s.releaseFs();let a=o.packageFs,l=k.resolve((c=o.localPath)!=null?c:o.packageFs.getRealPath(),o.prefixPath,n);return s.localPath?{packageFs:new _t(l,{baseFs:a}),releaseFs:o.releaseFs,prefixPath:Me.dot,localPath:l}:{packageFs:new Da(l,{baseFs:a}),releaseFs:o.releaseFs,prefixPath:Me.dot}}};var FL=class{supportsDescriptor(e,r){return!!e.range.startsWith(mA)}supportsLocator(e,r){return!!e.reference.startsWith(mA)}shouldPersistResolution(e,r){return!1}bindDescriptor(e,r,i){return P.bindDescriptor(e,{locator:P.stringifyLocator(r)})}getResolutionDependencies(e,r){return[]}async getCandidates(e,r,i){let n=e.range.slice(mA.length);return[P.makeLocator(e,`${mA}${j.toPortablePath(n)}`)]}async getSatisfying(e,r,i){return null}async resolve(e,r){if(!r.fetchOptions)throw new Error(\"Assertion failed: This resolver cannot be used unless a fetcher is configured\");let i=await r.fetchOptions.fetcher.fetch(e,r.fetchOptions),n=await Se.releaseAfterUseAsync(async()=>await At.find(i.prefixPath,{baseFs:i.packageFs}),i.releaseFs);return te(N({},e),{version:n.version||\"0.0.0\",languageName:n.languageName||r.project.configuration.get(\"defaultLanguageName\"),linkType:Qt.SOFT,conditions:n.getConditions(),dependencies:new Map([...n.dependencies]),peerDependencies:n.peerDependencies,dependenciesMeta:n.dependenciesMeta,peerDependenciesMeta:n.peerDependenciesMeta,bin:n.bin})}};var NL=class{supports(e,r){return!!e.reference.startsWith(EA)}getLocalPath(e,r){let{parentLocator:i,path:n}=P.parseFileStyleRange(e.reference,{protocol:EA});if(k.isAbsolute(n))return n;let s=r.fetcher.getLocalPath(i,r);return s===null?null:k.resolve(s,n)}async fetch(e,r){var c;let{parentLocator:i,path:n}=P.parseFileStyleRange(e.reference,{protocol:EA}),s=k.isAbsolute(n)?{packageFs:new _t(Me.root),prefixPath:Me.dot,localPath:Me.root}:await r.fetcher.fetch(i,r),o=s.localPath?{packageFs:new _t(Me.root),prefixPath:k.relative(Me.root,s.localPath),localPath:Me.root}:s;s!==o&&s.releaseFs&&s.releaseFs();let a=o.packageFs,l=k.resolve((c=o.localPath)!=null?c:o.packageFs.getRealPath(),o.prefixPath,n);return s.localPath?{packageFs:new _t(l,{baseFs:a}),releaseFs:o.releaseFs,prefixPath:Me.dot,discardFromLookup:!0,localPath:l}:{packageFs:new Da(l,{baseFs:a}),releaseFs:o.releaseFs,prefixPath:Me.dot,discardFromLookup:!0}}};var LL=class{supportsDescriptor(e,r){return!!e.range.startsWith(EA)}supportsLocator(e,r){return!!e.reference.startsWith(EA)}shouldPersistResolution(e,r){return!1}bindDescriptor(e,r,i){return P.bindDescriptor(e,{locator:P.stringifyLocator(r)})}getResolutionDependencies(e,r){return[]}async getCandidates(e,r,i){let n=e.range.slice(EA.length);return[P.makeLocator(e,`${EA}${j.toPortablePath(n)}`)]}async getSatisfying(e,r,i){return null}async resolve(e,r){return te(N({},e),{version:\"0.0.0\",languageName:r.project.configuration.get(\"defaultLanguageName\"),linkType:Qt.SOFT,conditions:null,dependencies:new Map,peerDependencies:new Map,dependenciesMeta:new Map,peerDependenciesMeta:new Map,bin:new Map})}};var N4e={fetchers:[NL,RL],resolvers:[LL,FL]},L4e=N4e;var gT={};ft(gT,{default:()=>Y_e});var ls;(function(i){i[i.REGULAR=0]=\"REGULAR\",i[i.WORKSPACE=1]=\"WORKSPACE\",i[i.EXTERNAL_SOFT_LINK=2]=\"EXTERNAL_SOFT_LINK\"})(ls||(ls={}));var IA;(function(i){i[i.YES=0]=\"YES\",i[i.NO=1]=\"NO\",i[i.DEPENDS=2]=\"DEPENDS\"})(IA||(IA={}));var OL=(t,e)=>`${t}@${e}`,fle=(t,e)=>{let r=e.indexOf(\"#\"),i=r>=0?e.substring(r+1):e;return OL(t,i)},Eo;(function(s){s[s.NONE=-1]=\"NONE\",s[s.PERF=0]=\"PERF\",s[s.CHECK=1]=\"CHECK\",s[s.REASONS=2]=\"REASONS\",s[s.INTENSIVE_CHECK=9]=\"INTENSIVE_CHECK\"})(Eo||(Eo={}));var ple=(t,e={})=>{let r=e.debugLevel||Number(process.env.NM_DEBUG_LEVEL||-1),i=e.check||r>=9,n=e.hoistingLimits||new Map,s={check:i,debugLevel:r,hoistingLimits:n,fastLookupPossible:!0},o;s.debugLevel>=0&&(o=Date.now());let a=T4e(t,s),l=!1,c=0;do l=ML(a,[a],new Set([a.locator]),new Map,s).anotherRoundNeeded,s.fastLookupPossible=!1,c++;while(l);if(s.debugLevel>=0&&console.log(`hoist time: ${Date.now()-o}ms, rounds: ${c}`),s.debugLevel>=1){let u=jm(a);if(ML(a,[a],new Set([a.locator]),new Map,s).isGraphChanged)throw new Error(`The hoisting result is not terminal, prev tree:\n${u}, next tree:\n${jm(a)}`);let f=hle(a);if(f)throw new Error(`${f}, after hoisting finished:\n${jm(a)}`)}return s.debugLevel>=2&&console.log(jm(a)),O4e(a)},M4e=t=>{let e=t[t.length-1],r=new Map,i=new Set,n=s=>{if(!i.has(s)){i.add(s);for(let o of s.hoistedDependencies.values())r.set(o.name,o);for(let o of s.dependencies.values())s.peerNames.has(o.name)||n(o)}};return n(e),r},U4e=t=>{let e=t[t.length-1],r=new Map,i=new Set,n=new Set,s=(o,a)=>{if(i.has(o))return;i.add(o);for(let c of o.hoistedDependencies.values())if(!a.has(c.name)){let u;for(let g of t)u=g.dependencies.get(c.name),u&&r.set(u.name,u)}let l=new Set;for(let c of o.dependencies.values())l.add(c.name);for(let c of o.dependencies.values())o.peerNames.has(c.name)||s(c,l)};return s(e,n),r},dle=(t,e)=>{if(e.decoupled)return e;let{name:r,references:i,ident:n,locator:s,dependencies:o,originalDependencies:a,hoistedDependencies:l,peerNames:c,reasons:u,isHoistBorder:g,hoistPriority:f,dependencyKind:h,hoistedFrom:p,hoistedTo:m}=e,y={name:r,references:new Set(i),ident:n,locator:s,dependencies:new Map(o),originalDependencies:new Map(a),hoistedDependencies:new Map(l),peerNames:new Set(c),reasons:new Map(u),decoupled:!0,isHoistBorder:g,hoistPriority:f,dependencyKind:h,hoistedFrom:new Map(p),hoistedTo:new Map(m)},Q=y.dependencies.get(r);return Q&&Q.ident==y.ident&&y.dependencies.set(r,y),t.dependencies.set(y.name,y),y},K4e=(t,e)=>{let r=new Map([[t.name,[t.ident]]]);for(let n of t.dependencies.values())t.peerNames.has(n.name)||r.set(n.name,[n.ident]);let i=Array.from(e.keys());i.sort((n,s)=>{let o=e.get(n),a=e.get(s);return a.hoistPriority!==o.hoistPriority?a.hoistPriority-o.hoistPriority:a.peerDependents.size!==o.peerDependents.size?a.peerDependents.size-o.peerDependents.size:a.dependents.size-o.dependents.size});for(let n of i){let s=n.substring(0,n.indexOf(\"@\",1)),o=n.substring(s.length+1);if(!t.peerNames.has(s)){let a=r.get(s);a||(a=[],r.set(s,a)),a.indexOf(o)<0&&a.push(o)}}return r},UL=t=>{let e=new Set,r=(i,n=new Set)=>{if(!n.has(i)){n.add(i);for(let s of i.peerNames)if(!t.peerNames.has(s)){let o=t.dependencies.get(s);o&&!e.has(o)&&r(o,n)}e.add(i)}};for(let i of t.dependencies.values())t.peerNames.has(i.name)||r(i);return e},ML=(t,e,r,i,n,s=new Set)=>{let o=e[e.length-1];if(s.has(o))return{anotherRoundNeeded:!1,isGraphChanged:!1};s.add(o);let a=j4e(o),l=K4e(o,a),c=t==o?new Map:n.fastLookupPossible?M4e(e):U4e(e),u,g=!1,f=!1,h=new Map(Array.from(l.entries()).map(([m,y])=>[m,y[0]])),p=new Map;do{let m=H4e(t,e,r,c,h,l,i,p,n);m.isGraphChanged&&(f=!0),m.anotherRoundNeeded&&(g=!0),u=!1;for(let[y,Q]of l)Q.length>1&&!o.dependencies.has(y)&&(h.delete(y),Q.shift(),h.set(y,Q[0]),u=!0)}while(u);for(let m of o.dependencies.values())if(!o.peerNames.has(m.name)&&!r.has(m.locator)){r.add(m.locator);let y=ML(t,[...e,m],r,p,n);y.isGraphChanged&&(f=!0),y.anotherRoundNeeded&&(g=!0),r.delete(m.locator)}return{anotherRoundNeeded:g,isGraphChanged:f}},G4e=(t,e,r,i,n,s,o,a,{outputReason:l,fastLookupPossible:c})=>{let u,g=null,f=new Set;l&&(u=`${Array.from(e).map(y=>Ni(y)).join(\"\\u2192\")}`);let h=r[r.length-1],m=!(i.ident===h.ident);if(l&&!m&&(g=\"- self-reference\"),m&&(m=i.dependencyKind!==1,l&&!m&&(g=\"- workspace\")),m&&(m=i.dependencyKind!==2||i.dependencies.size===0,l&&!m&&(g=\"- external soft link with unhoisted dependencies\")),m&&(m=h.dependencyKind!==1||h.hoistedFrom.has(i.name)||e.size===1,l&&!m&&(g=h.reasons.get(i.name))),m&&(m=!t.peerNames.has(i.name),l&&!m&&(g=`- cannot shadow peer: ${Ni(t.originalDependencies.get(i.name).locator)} at ${u}`)),m){let y=!1,Q=n.get(i.name);if(y=!Q||Q.ident===i.ident,l&&!y&&(g=`- filled by: ${Ni(Q.locator)} at ${u}`),y)for(let S=r.length-1;S>=1;S--){let M=r[S].dependencies.get(i.name);if(M&&M.ident!==i.ident){y=!1;let Y=a.get(h);Y||(Y=new Set,a.set(h,Y)),Y.add(i.name),l&&(g=`- filled by ${Ni(M.locator)} at ${r.slice(0,S).map(U=>Ni(U.locator)).join(\"\\u2192\")}`);break}}m=y}if(m&&(m=s.get(i.name)===i.ident,l&&!m&&(g=`- filled by: ${Ni(o.get(i.name)[0])} at ${u}`)),m){let y=!0,Q=new Set(i.peerNames);for(let S=r.length-1;S>=1;S--){let x=r[S];for(let M of Q){if(x.peerNames.has(M)&&x.originalDependencies.has(M))continue;let Y=x.dependencies.get(M);Y&&t.dependencies.get(M)!==Y&&(S===r.length-1?f.add(Y):(f=null,y=!1,l&&(g=`- peer dependency ${Ni(Y.locator)} from parent ${Ni(x.locator)} was not hoisted to ${u}`))),Q.delete(M)}if(!y)break}m=y}if(m&&!c)for(let y of i.hoistedDependencies.values()){let Q=n.get(y.name);if(!Q||y.ident!==Q.ident){m=!1,l&&(g=`- previously hoisted dependency mismatch, needed: ${Ni(y.locator)}, available: ${Ni(Q==null?void 0:Q.locator)}`);break}}return f!==null&&f.size>0?{isHoistable:2,dependsOn:f,reason:g}:{isHoistable:m?0:1,reason:g}},H4e=(t,e,r,i,n,s,o,a,l)=>{let c=e[e.length-1],u=new Set,g=!1,f=!1,h=(y,Q,S,x)=>{if(u.has(S))return;let M=[...Q,S.locator],Y=new Map,U=new Map;for(let Z of UL(S)){let A=G4e(c,r,[c,...y,S],Z,i,n,s,a,{outputReason:l.debugLevel>=2,fastLookupPossible:l.fastLookupPossible});if(U.set(Z,A),A.isHoistable===2)for(let ne of A.dependsOn){let le=Y.get(ne.name)||new Set;le.add(Z.name),Y.set(ne.name,le)}}let J=new Set,W=(Z,A,ne)=>{if(!J.has(Z)){J.add(Z),U.set(Z,{isHoistable:1,reason:ne});for(let le of Y.get(Z.name)||[])W(S.dependencies.get(le),A,l.debugLevel>=2?`- peer dependency ${Ni(Z.locator)} from parent ${Ni(S.locator)} was not hoisted`:\"\")}};for(let[Z,A]of U)A.isHoistable===1&&W(Z,A,A.reason);for(let Z of U.keys())if(!J.has(Z)){f=!0;let A=o.get(S);A&&A.has(Z.name)&&(g=!0),S.dependencies.delete(Z.name),S.hoistedDependencies.set(Z.name,Z),S.reasons.delete(Z.name);let ne=c.dependencies.get(Z.name);if(l.debugLevel>=2){let le=Array.from(Q).concat([S.locator]).map(T=>Ni(T)).join(\"\\u2192\"),Ae=c.hoistedFrom.get(Z.name);Ae||(Ae=[],c.hoistedFrom.set(Z.name,Ae)),Ae.push(le),S.hoistedTo.set(Z.name,Array.from(e).map(T=>Ni(T.locator)).join(\"\\u2192\"))}if(!ne)c.ident!==Z.ident&&(c.dependencies.set(Z.name,Z),x.add(Z));else for(let le of Z.references)ne.references.add(le)}if(l.check){let Z=hle(t);if(Z)throw new Error(`${Z}, after hoisting dependencies of ${[c,...y,S].map(A=>Ni(A.locator)).join(\"\\u2192\")}:\n${jm(t)}`)}let ee=UL(S);for(let Z of ee)if(J.has(Z)){let A=U.get(Z);if((n.get(Z.name)===Z.ident||!S.reasons.has(Z.name))&&A.isHoistable!==0&&S.reasons.set(Z.name,A.reason),!Z.isHoistBorder&&M.indexOf(Z.locator)<0){u.add(S);let le=dle(S,Z);h([...y,S],[...Q,S.locator],le,m),u.delete(S)}}},p,m=new Set(UL(c));do{p=m,m=new Set;for(let y of p){if(y.locator===c.locator||y.isHoistBorder)continue;let Q=dle(c,y);h([],Array.from(r),Q,m)}}while(m.size>0);return{anotherRoundNeeded:g,isGraphChanged:f}},hle=t=>{let e=[],r=new Set,i=new Set,n=(s,o,a)=>{if(r.has(s)||(r.add(s),i.has(s)))return;let l=new Map(o);for(let c of s.dependencies.values())s.peerNames.has(c.name)||l.set(c.name,c);for(let c of s.originalDependencies.values()){let u=l.get(c.name),g=()=>`${Array.from(i).concat([s]).map(f=>Ni(f.locator)).join(\"\\u2192\")}`;if(s.peerNames.has(c.name)){let f=o.get(c.name);(f!==u||!f||f.ident!==c.ident)&&e.push(`${g()} - broken peer promise: expected ${c.ident} but found ${f&&f.ident}`)}else{let f=a.hoistedFrom.get(s.name),h=s.hoistedTo.get(c.name),p=`${f?` hoisted from ${f.join(\", \")}`:\"\"}`,m=`${h?` hoisted to ${h}`:\"\"}`,y=`${g()}${p}`;u?u.ident!==c.ident&&e.push(`${y} - broken require promise for ${c.name}${m}: expected ${c.ident}, but found: ${u.ident}`):e.push(`${y} - broken require promise: no required dependency ${c.name}${m} found`)}}i.add(s);for(let c of s.dependencies.values())s.peerNames.has(c.name)||n(c,l,s);i.delete(s)};return n(t,t.dependencies,t),e.join(`\n`)},T4e=(t,e)=>{let{identName:r,name:i,reference:n,peerNames:s}=t,o={name:i,references:new Set([n]),locator:OL(r,n),ident:fle(r,n),dependencies:new Map,originalDependencies:new Map,hoistedDependencies:new Map,peerNames:new Set(s),reasons:new Map,decoupled:!0,isHoistBorder:!0,hoistPriority:0,dependencyKind:1,hoistedFrom:new Map,hoistedTo:new Map},a=new Map([[t,o]]),l=(c,u)=>{let g=a.get(c),f=!!g;if(!g){let{name:h,identName:p,reference:m,peerNames:y,hoistPriority:Q,dependencyKind:S}=c,x=e.hoistingLimits.get(u.locator);g={name:h,references:new Set([m]),locator:OL(p,m),ident:fle(p,m),dependencies:new Map,originalDependencies:new Map,hoistedDependencies:new Map,peerNames:new Set(y),reasons:new Map,decoupled:!0,isHoistBorder:x?x.has(h):!1,hoistPriority:Q||0,dependencyKind:S||0,hoistedFrom:new Map,hoistedTo:new Map},a.set(c,g)}if(u.dependencies.set(c.name,g),u.originalDependencies.set(c.name,g),f){let h=new Set,p=m=>{if(!h.has(m)){h.add(m),m.decoupled=!1;for(let y of m.dependencies.values())m.peerNames.has(y.name)||p(y)}};p(g)}else for(let h of c.dependencies)l(h,g)};for(let c of t.dependencies)l(c,o);return o},KL=t=>t.substring(0,t.indexOf(\"@\",1)),O4e=t=>{let e={name:t.name,identName:KL(t.locator),references:new Set(t.references),dependencies:new Set},r=new Set([t]),i=(n,s,o)=>{let a=r.has(n),l;if(s===n)l=o;else{let{name:c,references:u,locator:g}=n;l={name:c,identName:KL(g),references:u,dependencies:new Set}}if(o.dependencies.add(l),!a){r.add(n);for(let c of n.dependencies.values())n.peerNames.has(c.name)||i(c,n,l);r.delete(n)}};for(let n of t.dependencies.values())i(n,t,e);return e},j4e=t=>{let e=new Map,r=new Set([t]),i=o=>`${o.name}@${o.ident}`,n=o=>{let a=i(o),l=e.get(a);return l||(l={dependents:new Set,peerDependents:new Set,hoistPriority:0},e.set(a,l)),l},s=(o,a)=>{let l=!!r.has(a);if(n(a).dependents.add(o.ident),!l){r.add(a);for(let u of a.dependencies.values()){let g=n(u);g.hoistPriority=Math.max(g.hoistPriority,u.hoistPriority),a.peerNames.has(u.name)?g.peerDependents.add(a.ident):s(a,u)}}};for(let o of t.dependencies.values())t.peerNames.has(o.name)||s(t,o);return e},Ni=t=>{if(!t)return\"none\";let e=t.indexOf(\"@\",1),r=t.substring(0,e);r.endsWith(\"$wsroot$\")&&(r=`wh:${r.replace(\"$wsroot$\",\"\")}`);let i=t.substring(e+1);if(i===\"workspace:.\")return\".\";if(i){let n=(i.indexOf(\"#\")>0?i.split(\"#\")[1]:i).replace(\"npm:\",\"\");return i.startsWith(\"virtual\")&&(r=`v:${r}`),n.startsWith(\"workspace\")&&(r=`w:${r}`,n=\"\"),`${r}${n?`@${n}`:\"\"}`}else return`${r}`},Cle=5e4,jm=t=>{let e=0,r=(n,s,o=\"\")=>{if(e>Cle||s.has(n))return\"\";e++;let a=Array.from(n.dependencies.values()).sort((c,u)=>c.name===u.name?0:c.name>u.name?1:-1),l=\"\";s.add(n);for(let c=0;c<a.length;c++){let u=a[c];if(!n.peerNames.has(u.name)&&u!==n){let g=n.reasons.get(u.name),f=KL(u.locator),h=n.hoistedFrom.get(u.name)||[];l+=`${o}${c<a.length-1?\"\\u251C\\u2500\":\"\\u2514\\u2500\"}${(s.has(u)?\">\":\"\")+(f!==u.name?`a:${u.name}:`:\"\")+Ni(u.locator)+(g?` ${g}`:\"\")+(u!==n&&h.length>0?`, hoisted from: ${h.join(\", \")}`:\"\")}\n`,l+=r(u,s,`${o}${c<a.length-1?\"\\u2502 \":\"  \"}`)}}return s.delete(n),l};return r(t,new Set)+(e>Cle?`\nTree is too large, part of the tree has been dunped\n`:\"\")};var Io;(function(r){r.HARD=\"HARD\",r.SOFT=\"SOFT\"})(Io||(Io={}));var Mn;(function(i){i.WORKSPACES=\"workspaces\",i.DEPENDENCIES=\"dependencies\",i.NONE=\"none\"})(Mn||(Mn={}));var mle=\"node_modules\",Bu=\"$wsroot$\";var Gm=(t,e)=>{let{packageTree:r,hoistingLimits:i,errors:n,preserveSymlinksRequired:s}=Y4e(t,e),o=null;if(n.length===0){let a=ple(r,{hoistingLimits:i});o=q4e(t,a,e)}return{tree:o,errors:n,preserveSymlinksRequired:s}},fa=t=>`${t.name}@${t.reference}`,HL=t=>{let e=new Map;for(let[r,i]of t.entries())if(!i.dirList){let n=e.get(i.locator);n||(n={target:i.target,linkType:i.linkType,locations:[],aliases:i.aliases},e.set(i.locator,n)),n.locations.push(r)}for(let r of e.values())r.locations=r.locations.sort((i,n)=>{let s=i.split(k.delimiter).length,o=n.split(k.delimiter).length;return n===i?0:s!==o?o-s:n>i?1:-1});return e},Ele=(t,e)=>{let r=P.isVirtualLocator(t)?P.devirtualizeLocator(t):t,i=P.isVirtualLocator(e)?P.devirtualizeLocator(e):e;return P.areLocatorsEqual(r,i)},jL=(t,e,r,i)=>{if(t.linkType!==Io.SOFT)return!1;let n=j.toPortablePath(r.resolveVirtual&&e.reference&&e.reference.startsWith(\"virtual:\")?r.resolveVirtual(t.packageLocation):t.packageLocation);return k.contains(i,n)===null},J4e=t=>{let e=t.getPackageInformation(t.topLevel);if(e===null)throw new Error(\"Assertion failed: Expected the top-level package to have been registered\");if(t.findPackageLocator(e.packageLocation)===null)throw new Error(\"Assertion failed: Expected the top-level package to have a physical locator\");let i=j.toPortablePath(e.packageLocation.slice(0,-1)),n=new Map,s={children:new Map},o=t.getDependencyTreeRoots(),a=new Map,l=new Set,c=(f,h)=>{let p=fa(f);if(l.has(p))return;l.add(p);let m=t.getPackageInformation(f);if(m){let y=h?fa(h):\"\";if(fa(f)!==y&&m.linkType===Io.SOFT&&!jL(m,f,t,i)){let Q=Ile(m,f,t);(!a.get(Q)||f.reference.startsWith(\"workspace:\"))&&a.set(Q,f)}for(let[Q,S]of m.packageDependencies)S!==null&&(m.packagePeers.has(Q)||c(t.getLocator(Q,S),f))}};for(let f of o)c(f,null);let u=i.split(k.sep);for(let f of a.values()){let h=t.getPackageInformation(f),m=j.toPortablePath(h.packageLocation.slice(0,-1)).split(k.sep).slice(u.length),y=s;for(let Q of m){let S=y.children.get(Q);S||(S={children:new Map},y.children.set(Q,S)),y=S}y.workspaceLocator=f}let g=(f,h)=>{if(f.workspaceLocator){let p=fa(h),m=n.get(p);m||(m=new Set,n.set(p,m)),m.add(f.workspaceLocator)}for(let p of f.children.values())g(p,f.workspaceLocator||h)};for(let f of s.children.values())g(f,s.workspaceLocator);return n},Y4e=(t,e)=>{let r=[],i=!1,n=new Map,s=J4e(t),o=t.getPackageInformation(t.topLevel);if(o===null)throw new Error(\"Assertion failed: Expected the top-level package to have been registered\");let a=t.findPackageLocator(o.packageLocation);if(a===null)throw new Error(\"Assertion failed: Expected the top-level package to have a physical locator\");let l=j.toPortablePath(o.packageLocation.slice(0,-1)),c={name:a.name,identName:a.name,reference:a.reference,peerNames:o.packagePeers,dependencies:new Set,dependencyKind:ls.WORKSPACE},u=new Map,g=(h,p)=>`${fa(p)}:${h}`,f=(h,p,m,y,Q,S,x,M)=>{var Ae,T;let Y=g(h,m),U=u.get(Y),J=!!U;!J&&m.name===a.name&&m.reference===a.reference&&(U=c,u.set(Y,c));let W=jL(p,m,t,l);if(!U){let L=ls.REGULAR;W?L=ls.EXTERNAL_SOFT_LINK:p.linkType===Io.SOFT&&m.name.endsWith(Bu)&&(L=ls.WORKSPACE),U={name:h,identName:m.name,reference:m.reference,dependencies:new Set,peerNames:L===ls.WORKSPACE?new Set:p.packagePeers,dependencyKind:L},u.set(Y,U)}let ee;if(W?ee=2:Q.linkType===Io.SOFT?ee=1:ee=0,U.hoistPriority=Math.max(U.hoistPriority||0,ee),M&&!W){let L=fa({name:y.identName,reference:y.reference}),Ee=n.get(L)||new Set;n.set(L,Ee),Ee.add(U.name)}let Z=new Map(p.packageDependencies);if(e.project){let L=e.project.workspacesByCwd.get(j.toPortablePath(p.packageLocation.slice(0,-1)));if(L){let Ee=new Set([...Array.from(L.manifest.peerDependencies.values(),we=>P.stringifyIdent(we)),...Array.from(L.manifest.peerDependenciesMeta.keys())]);for(let we of Ee)Z.has(we)||(Z.set(we,S.get(we)||null),U.peerNames.add(we))}}let A=fa({name:m.name.replace(Bu,\"\"),reference:m.reference}),ne=s.get(A);if(ne)for(let L of ne)Z.set(`${L.name}${Bu}`,L.reference);(p!==Q||p.linkType!==Io.SOFT||!e.selfReferencesByCwd||e.selfReferencesByCwd.get(x))&&y.dependencies.add(U);let le=m!==a&&p.linkType===Io.SOFT&&!m.name.endsWith(Bu)&&!W;if(!J&&!le){let L=new Map;for(let[Ee,we]of Z)if(we!==null){let qe=t.getLocator(Ee,we),re=t.getLocator(Ee.replace(Bu,\"\"),we),se=t.getPackageInformation(re);if(se===null)throw new Error(\"Assertion failed: Expected the package to have been registered\");let Qe=jL(se,qe,t,l);if(e.validateExternalSoftLinks&&e.project&&Qe){se.packageDependencies.size>0&&(i=!0);for(let[ve,pe]of se.packageDependencies)if(pe!==null){let X=P.parseLocator(Array.isArray(pe)?`${pe[0]}@${pe[1]}`:`${ve}@${pe}`);if(fa(X)!==fa(qe)){let be=Z.get(ve);if(be){let ce=P.parseLocator(Array.isArray(be)?`${be[0]}@${be[1]}`:`${ve}@${be}`);Ele(ce,X)||r.push({messageName:$.NM_CANT_INSTALL_EXTERNAL_SOFT_LINK,text:`Cannot link ${P.prettyIdent(e.project.configuration,P.parseIdent(qe.name))} into ${P.prettyLocator(e.project.configuration,P.parseLocator(`${m.name}@${m.reference}`))} dependency ${P.prettyLocator(e.project.configuration,X)} conflicts with parent dependency ${P.prettyLocator(e.project.configuration,ce)}`})}else{let ce=L.get(ve);if(ce){let fe=ce.target,gt=P.parseLocator(Array.isArray(fe)?`${fe[0]}@${fe[1]}`:`${ve}@${fe}`);Ele(gt,X)||r.push({messageName:$.NM_CANT_INSTALL_EXTERNAL_SOFT_LINK,text:`Cannot link ${P.prettyIdent(e.project.configuration,P.parseIdent(qe.name))} into ${P.prettyLocator(e.project.configuration,P.parseLocator(`${m.name}@${m.reference}`))} dependency ${P.prettyLocator(e.project.configuration,X)} conflicts with dependency ${P.prettyLocator(e.project.configuration,gt)} from sibling portal ${P.prettyIdent(e.project.configuration,P.parseIdent(ce.portal.name))}`})}else L.set(ve,{target:X.reference,portal:qe})}}}}let he=(Ae=e.hoistingLimitsByCwd)==null?void 0:Ae.get(x),Fe=Qe?x:k.relative(l,j.toPortablePath(se.packageLocation))||Me.dot,Ue=(T=e.hoistingLimitsByCwd)==null?void 0:T.get(Fe),xe=he===Mn.DEPENDENCIES||Ue===Mn.DEPENDENCIES||Ue===Mn.WORKSPACES;f(Ee,se,qe,U,p,Z,Fe,xe)}}};return f(a.name,o,a,c,o,o.packageDependencies,Me.dot,!1),{packageTree:c,hoistingLimits:n,errors:r,preserveSymlinksRequired:i}};function Ile(t,e,r){let i=r.resolveVirtual&&e.reference&&e.reference.startsWith(\"virtual:\")?r.resolveVirtual(t.packageLocation):t.packageLocation;return j.toPortablePath(i||t.packageLocation)}function W4e(t,e,r){let i=e.getLocator(t.name.replace(Bu,\"\"),t.reference),n=e.getPackageInformation(i);if(n===null)throw new Error(\"Assertion failed: Expected the package to be registered\");let s,o;return r.pnpifyFs?(o=j.toPortablePath(n.packageLocation),s=Io.SOFT):(o=Ile(n,t,e),s=n.linkType),{linkType:s,target:o}}var q4e=(t,e,r)=>{let i=new Map,n=(u,g,f)=>{let{linkType:h,target:p}=W4e(u,t,r);return{locator:fa(u),nodePath:g,target:p,linkType:h,aliases:f}},s=u=>{let[g,f]=u.split(\"/\");return f?{scope:qr(g),name:qr(f)}:{scope:null,name:qr(g)}},o=new Set,a=(u,g,f)=>{if(!o.has(u)){o.add(u);for(let h of u.dependencies){if(h===u)continue;let p=Array.from(h.references).sort(),m={name:h.identName,reference:p[0]},{name:y,scope:Q}=s(h.name),S=Q?[Q,y]:[y],x=k.join(g,mle),M=k.join(x,...S),Y=`${f}/${m.name}`,U=n(m,f,p.slice(1)),J=!1;if(U.linkType===Io.SOFT&&r.project){let W=r.project.workspacesByCwd.get(U.target.slice(0,-1));J=!!(W&&!W.manifest.name)}if(!h.name.endsWith(Bu)&&!J){let W=i.get(M);if(W){if(W.dirList)throw new Error(`Assertion failed: ${M} cannot merge dir node with leaf node`);{let ne=P.parseLocator(W.locator),le=P.parseLocator(U.locator);if(W.linkType!==U.linkType)throw new Error(`Assertion failed: ${M} cannot merge nodes with different link types ${W.nodePath}/${P.stringifyLocator(ne)} and ${f}/${P.stringifyLocator(le)}`);if(ne.identHash!==le.identHash)throw new Error(`Assertion failed: ${M} cannot merge nodes with different idents ${W.nodePath}/${P.stringifyLocator(ne)} and ${f}/s${P.stringifyLocator(le)}`);U.aliases=[...U.aliases,...W.aliases,P.parseLocator(W.locator).reference]}}i.set(M,U);let ee=M.split(\"/\"),Z=ee.indexOf(mle),A=ee.length-1;for(;Z>=0&&A>Z;){let ne=j.toPortablePath(ee.slice(0,A).join(k.sep)),le=qr(ee[A]),Ae=i.get(ne);if(!Ae)i.set(ne,{dirList:new Set([le])});else if(Ae.dirList){if(Ae.dirList.has(le))break;Ae.dirList.add(le)}A--}}a(h,U.linkType===Io.SOFT?U.target:M,Y)}}},l=n({name:e.name,reference:Array.from(e.references)[0]},\"\",[]),c=l.target;return i.set(c,l),a(e,c,\"\"),i};var eT={};ft(eT,{PnpInstaller:()=>sh,PnpLinker:()=>Qu,default:()=>m_e,getPnpPath:()=>Pl,jsInstallUtils:()=>wo,pnpUtils:()=>ZL,quotePathIfNeeded:()=>Jle});var Yle=ge(ti()),qle=ge(require(\"url\"));var yle;(function(r){r.HARD=\"HARD\",r.SOFT=\"SOFT\"})(yle||(yle={}));var er;(function(f){f.DEFAULT=\"DEFAULT\",f.TOP_LEVEL=\"TOP_LEVEL\",f.FALLBACK_EXCLUSION_LIST=\"FALLBACK_EXCLUSION_LIST\",f.FALLBACK_EXCLUSION_ENTRIES=\"FALLBACK_EXCLUSION_ENTRIES\",f.FALLBACK_EXCLUSION_DATA=\"FALLBACK_EXCLUSION_DATA\",f.PACKAGE_REGISTRY_DATA=\"PACKAGE_REGISTRY_DATA\",f.PACKAGE_REGISTRY_ENTRIES=\"PACKAGE_REGISTRY_ENTRIES\",f.PACKAGE_STORE_DATA=\"PACKAGE_STORE_DATA\",f.PACKAGE_STORE_ENTRIES=\"PACKAGE_STORE_ENTRIES\",f.PACKAGE_INFORMATION_DATA=\"PACKAGE_INFORMATION_DATA\",f.PACKAGE_DEPENDENCIES=\"PACKAGE_DEPENDENCIES\",f.PACKAGE_DEPENDENCY=\"PACKAGE_DEPENDENCY\"})(er||(er={}));var wle={[er.DEFAULT]:{collapsed:!1,next:{[\"*\"]:er.DEFAULT}},[er.TOP_LEVEL]:{collapsed:!1,next:{fallbackExclusionList:er.FALLBACK_EXCLUSION_LIST,packageRegistryData:er.PACKAGE_REGISTRY_DATA,[\"*\"]:er.DEFAULT}},[er.FALLBACK_EXCLUSION_LIST]:{collapsed:!1,next:{[\"*\"]:er.FALLBACK_EXCLUSION_ENTRIES}},[er.FALLBACK_EXCLUSION_ENTRIES]:{collapsed:!0,next:{[\"*\"]:er.FALLBACK_EXCLUSION_DATA}},[er.FALLBACK_EXCLUSION_DATA]:{collapsed:!0,next:{[\"*\"]:er.DEFAULT}},[er.PACKAGE_REGISTRY_DATA]:{collapsed:!1,next:{[\"*\"]:er.PACKAGE_REGISTRY_ENTRIES}},[er.PACKAGE_REGISTRY_ENTRIES]:{collapsed:!0,next:{[\"*\"]:er.PACKAGE_STORE_DATA}},[er.PACKAGE_STORE_DATA]:{collapsed:!1,next:{[\"*\"]:er.PACKAGE_STORE_ENTRIES}},[er.PACKAGE_STORE_ENTRIES]:{collapsed:!0,next:{[\"*\"]:er.PACKAGE_INFORMATION_DATA}},[er.PACKAGE_INFORMATION_DATA]:{collapsed:!1,next:{packageDependencies:er.PACKAGE_DEPENDENCIES,[\"*\"]:er.DEFAULT}},[er.PACKAGE_DEPENDENCIES]:{collapsed:!1,next:{[\"*\"]:er.PACKAGE_DEPENDENCY}},[er.PACKAGE_DEPENDENCY]:{collapsed:!0,next:{[\"*\"]:er.DEFAULT}}};function z4e(t,e,r){let i=\"\";i+=\"[\";for(let n=0,s=t.length;n<s;++n)i+=cb(String(n),t[n],e,r).replace(/^ +/g,\"\"),n+1<s&&(i+=\", \");return i+=\"]\",i}function _4e(t,e,r){let i=`${r}  `,n=\"\";n+=r,n+=`[\n`;for(let s=0,o=t.length;s<o;++s)n+=i+cb(String(s),t[s],e,i).replace(/^ +/,\"\"),s+1<o&&(n+=\",\"),n+=`\n`;return n+=r,n+=\"]\",n}function V4e(t,e,r){let i=Object.keys(t),n=\"\";n+=\"{\";for(let s=0,o=i.length,a=0;s<o;++s){let l=i[s],c=t[l];typeof c!=\"undefined\"&&(a!==0&&(n+=\", \"),n+=JSON.stringify(l),n+=\": \",n+=cb(l,c,e,r).replace(/^ +/g,\"\"),a+=1)}return n+=\"}\",n}function X4e(t,e,r){let i=Object.keys(t),n=`${r}  `,s=\"\";s+=r,s+=`{\n`;let o=0;for(let a=0,l=i.length;a<l;++a){let c=i[a],u=t[c];typeof u!=\"undefined\"&&(o!==0&&(s+=\",\",s+=`\n`),s+=n,s+=JSON.stringify(c),s+=\": \",s+=cb(c,u,e,n).replace(/^ +/g,\"\"),o+=1)}return o!==0&&(s+=`\n`),s+=r,s+=\"}\",s}function cb(t,e,r,i){let{next:n}=wle[r],s=n[t]||n[\"*\"];return Ble(e,s,i)}function Ble(t,e,r){let{collapsed:i}=wle[e];return Array.isArray(t)?i?z4e(t,e,r):_4e(t,e,r):typeof t==\"object\"&&t!==null?i?V4e(t,e,r):X4e(t,e,r):JSON.stringify(t)}function ble(t){return Ble(t,er.TOP_LEVEL,\"\")}function Ym(t,e){let r=Array.from(t);Array.isArray(e)||(e=[e]);let i=[];for(let s of e)i.push(r.map(o=>s(o)));let n=r.map((s,o)=>o);return n.sort((s,o)=>{for(let a of i){let l=a[s]<a[o]?-1:a[s]>a[o]?1:0;if(l!==0)return l}return 0}),n.map(s=>r[s])}function Z4e(t){let e=new Map,r=Ym(t.fallbackExclusionList||[],[({name:i,reference:n})=>i,({name:i,reference:n})=>n]);for(let{name:i,reference:n}of r){let s=e.get(i);typeof s==\"undefined\"&&e.set(i,s=new Set),s.add(n)}return Array.from(e).map(([i,n])=>[i,Array.from(n)])}function $4e(t){return Ym(t.fallbackPool||[],([e])=>e)}function e_e(t){let e=[];for(let[r,i]of Ym(t.packageRegistry,([n])=>n===null?\"0\":`1${n}`)){let n=[];e.push([r,n]);for(let[s,{packageLocation:o,packageDependencies:a,packagePeers:l,linkType:c,discardFromLookup:u}]of Ym(i,([g])=>g===null?\"0\":`1${g}`)){let g=[];r!==null&&s!==null&&!a.has(r)&&g.push([r,s]);for(let[p,m]of Ym(a.entries(),([y])=>y))g.push([p,m]);let f=l&&l.size>0?Array.from(l):void 0,h=u||void 0;n.push([s,{packageLocation:o,packageDependencies:g,packagePeers:f,linkType:c,discardFromLookup:h}])}}return e}function qm(t){return{__info:[\"This file is automatically generated. Do not touch it, or risk\",\"your modifications being lost. We also recommend you not to read\",\"it either without using the @yarnpkg/pnp package, as the data layout\",\"is entirely unspecified and WILL change from a version to another.\"],dependencyTreeRoots:t.dependencyTreeRoots,enableTopLevelFallback:t.enableTopLevelFallback||!1,ignorePatternData:t.ignorePattern||null,fallbackExclusionList:Z4e(t),fallbackPool:$4e(t),packageRegistryData:e_e(t)}}var Sle=ge(vle());function kle(t,e){return[t?`${t}\n`:\"\",`/* eslint-disable */\n\n`,`try {\n`,`  Object.freeze({}).detectStrictMode = true;\n`,`} catch (error) {\n`,\"  throw new Error(`The whole PnP file got strict-mode-ified, which is known to break (Emscripten libraries aren't strict mode). This usually happens when the file goes through Babel.`);\\n\",`}\n`,`\n`,`function $$SETUP_STATE(hydrateRuntimeState, basePath) {\n`,e.replace(/^/gm,\"  \"),`}\n`,`\n`,(0,Sle.default)()].join(\"\")}function t_e(t){return JSON.stringify(t,null,2)}function r_e(t){return`'${t.replace(/\\\\/g,\"\\\\\\\\\").replace(/'/g,\"\\\\'\").replace(/\\n/g,`\\\\\n`)}'`}function i_e(t){return[`return hydrateRuntimeState(JSON.parse(${r_e(ble(t))}), {basePath: basePath || __dirname});\n`].join(\"\")}function n_e(t){return[`var path = require('path');\n`,`var dataLocation = path.resolve(__dirname, ${JSON.stringify(t)});\n`,`return hydrateRuntimeState(require(dataLocation), {basePath: basePath || path.dirname(dataLocation)});\n`].join(\"\")}function xle(t){let e=qm(t),r=i_e(e);return kle(t.shebang,r)}function Ple(t){let e=qm(t),r=n_e(t.dataLocation),i=kle(t.shebang,r);return{dataFile:t_e(e),loaderFile:i}}var Lle=ge(require(\"fs\")),u_e=ge(require(\"path\")),Tle=ge(require(\"util\"));function YL(t,{basePath:e}){let r=j.toPortablePath(e),i=k.resolve(r),n=t.ignorePatternData!==null?new RegExp(t.ignorePatternData):null,s=new Map,o=new Map(t.packageRegistryData.map(([g,f])=>[g,new Map(f.map(([h,p])=>{var x;if(g===null!=(h===null))throw new Error(\"Assertion failed: The name and reference should be null, or neither should\");let m=(x=p.discardFromLookup)!=null?x:!1,y={name:g,reference:h},Q=s.get(p.packageLocation);Q?(Q.discardFromLookup=Q.discardFromLookup&&m,m||(Q.locator=y)):s.set(p.packageLocation,{locator:y,discardFromLookup:m});let S=null;return[h,{packageDependencies:new Map(p.packageDependencies),packagePeers:new Set(p.packagePeers),linkType:p.linkType,discardFromLookup:m,get packageLocation(){return S||(S=k.join(i,p.packageLocation))}}]}))])),a=new Map(t.fallbackExclusionList.map(([g,f])=>[g,new Set(f)])),l=new Map(t.fallbackPool),c=t.dependencyTreeRoots,u=t.enableTopLevelFallback;return{basePath:r,dependencyTreeRoots:c,enableTopLevelFallback:u,fallbackExclusionList:a,fallbackPool:l,ignorePattern:n,packageLocatorsByLocations:s,packageRegistry:o}}var Jm=ge(require(\"module\")),Nle=ge(Rle()),JL=ge(require(\"util\"));var ur;(function(c){c.API_ERROR=\"API_ERROR\",c.BUILTIN_NODE_RESOLUTION_FAILED=\"BUILTIN_NODE_RESOLUTION_FAILED\",c.EXPORTS_RESOLUTION_FAILED=\"EXPORTS_RESOLUTION_FAILED\",c.MISSING_DEPENDENCY=\"MISSING_DEPENDENCY\",c.MISSING_PEER_DEPENDENCY=\"MISSING_PEER_DEPENDENCY\",c.QUALIFIED_PATH_RESOLUTION_FAILED=\"QUALIFIED_PATH_RESOLUTION_FAILED\",c.INTERNAL=\"INTERNAL\",c.UNDECLARED_DEPENDENCY=\"UNDECLARED_DEPENDENCY\",c.UNSUPPORTED=\"UNSUPPORTED\"})(ur||(ur={}));var a_e=new Set([ur.BUILTIN_NODE_RESOLUTION_FAILED,ur.MISSING_DEPENDENCY,ur.MISSING_PEER_DEPENDENCY,ur.QUALIFIED_PATH_RESOLUTION_FAILED,ur.UNDECLARED_DEPENDENCY]);function oi(t,e,r={},i){i!=null||(i=a_e.has(t)?\"MODULE_NOT_FOUND\":t);let n={configurable:!0,writable:!0,enumerable:!1};return Object.defineProperties(new Error(e),{code:te(N({},n),{value:i}),pnpCode:te(N({},n),{value:t}),data:te(N({},n),{value:r})})}function yo(t){return j.normalize(j.fromPortablePath(t))}var A_e=ge(require(\"fs\")),Fle=ge(require(\"module\")),l_e=ge(require(\"path\")),c_e=new Set(Fle.Module.builtinModules||Object.keys(process.binding(\"natives\"))),ub=t=>t.startsWith(\"node:\")||c_e.has(t);function WL(t,e){let r=Number(process.env.PNP_ALWAYS_WARN_ON_FALLBACK)>0,i=Number(process.env.PNP_DEBUG_LEVEL),n=/^(?![a-zA-Z]:[\\\\/]|\\\\\\\\|\\.{0,2}(?:\\/|$))((?:node:)?(?:@[^/]+\\/)?[^/]+)\\/*(.*|)$/,s=/^(\\/|\\.{1,2}(\\/|$))/,o=/\\/$/,a=/^\\.{0,2}\\//,l={name:null,reference:null},c=[],u=new Set;if(t.enableTopLevelFallback===!0&&c.push(l),e.compatibilityMode!==!1)for(let re of[\"react-scripts\",\"gatsby\"]){let se=t.packageRegistry.get(re);if(se)for(let Qe of se.keys()){if(Qe===null)throw new Error(\"Assertion failed: This reference shouldn't be null\");c.push({name:re,reference:Qe})}}let{ignorePattern:g,packageRegistry:f,packageLocatorsByLocations:h}=t;function p(re,se){return{fn:re,args:se,error:null,result:null}}function m(re){var Ue,xe,ve,pe,X,be;let se=(ve=(xe=(Ue=process.stderr)==null?void 0:Ue.hasColors)==null?void 0:xe.call(Ue))!=null?ve:process.stdout.isTTY,Qe=(ce,fe)=>`\u001b[${ce}m${fe}\u001b[0m`,he=re.error;console.error(he?Qe(\"31;1\",`\\u2716 ${(pe=re.error)==null?void 0:pe.message.replace(/\\n.*/s,\"\")}`):Qe(\"33;1\",\"\\u203C Resolution\")),re.args.length>0&&console.error();for(let ce of re.args)console.error(`  ${Qe(\"37;1\",\"In \\u2190\")} ${(0,JL.inspect)(ce,{colors:se,compact:!0})}`);re.result&&(console.error(),console.error(`  ${Qe(\"37;1\",\"Out \\u2192\")} ${(0,JL.inspect)(re.result,{colors:se,compact:!0})}`));let Fe=(be=(X=new Error().stack.match(/(?<=^ +)at.*/gm))==null?void 0:X.slice(2))!=null?be:[];if(Fe.length>0){console.error();for(let ce of Fe)console.error(`  ${Qe(\"38;5;244\",ce)}`)}console.error()}function y(re,se){if(e.allowDebug===!1)return se;if(Number.isFinite(i)){if(i>=2)return(...Qe)=>{let he=p(re,Qe);try{return he.result=se(...Qe)}catch(Fe){throw he.error=Fe}finally{m(he)}};if(i>=1)return(...Qe)=>{try{return se(...Qe)}catch(he){let Fe=p(re,Qe);throw Fe.error=he,m(Fe),he}}}return se}function Q(re){let se=A(re);if(!se)throw oi(ur.INTERNAL,\"Couldn't find a matching entry in the dependency tree for the specified parent (this is probably an internal error)\");return se}function S(re){if(re.name===null)return!0;for(let se of t.dependencyTreeRoots)if(se.name===re.name&&se.reference===re.reference)return!0;return!1}let x=new Set([\"default\",\"node\",\"require\"]);function M(re,se=x){let Qe=Ae(k.join(re,\"internal.js\"),{resolveIgnored:!0,includeDiscardFromLookup:!0});if(Qe===null)throw oi(ur.INTERNAL,`The locator that owns the \"${re}\" path can't be found inside the dependency tree (this is probably an internal error)`);let{packageLocation:he}=Q(Qe),Fe=k.join(he,Pt.manifest);if(!e.fakeFs.existsSync(Fe))return null;let Ue=JSON.parse(e.fakeFs.readFileSync(Fe,\"utf8\")),xe=k.contains(he,re);if(xe===null)throw oi(ur.INTERNAL,\"unqualifiedPath doesn't contain the packageLocation (this is probably an internal error)\");a.test(xe)||(xe=`./${xe}`);let ve;try{ve=(0,Nle.resolve)(Ue,k.normalize(xe),{conditions:se,unsafe:!0})}catch(pe){throw oi(ur.EXPORTS_RESOLUTION_FAILED,pe.message,{unqualifiedPath:yo(re),locator:Qe,pkgJson:Ue,subpath:yo(xe),conditions:se},\"ERR_PACKAGE_PATH_NOT_EXPORTED\")}return typeof ve==\"string\"?k.join(he,ve):null}function Y(re,se,{extensions:Qe}){let he;try{se.push(re),he=e.fakeFs.statSync(re)}catch(Fe){}if(he&&!he.isDirectory())return e.fakeFs.realpathSync(re);if(he&&he.isDirectory()){let Fe;try{Fe=JSON.parse(e.fakeFs.readFileSync(k.join(re,Pt.manifest),\"utf8\"))}catch(xe){}let Ue;if(Fe&&Fe.main&&(Ue=k.resolve(re,Fe.main)),Ue&&Ue!==re){let xe=Y(Ue,se,{extensions:Qe});if(xe!==null)return xe}}for(let Fe=0,Ue=Qe.length;Fe<Ue;Fe++){let xe=`${re}${Qe[Fe]}`;if(se.push(xe),e.fakeFs.existsSync(xe))return xe}if(he&&he.isDirectory())for(let Fe=0,Ue=Qe.length;Fe<Ue;Fe++){let xe=k.format({dir:re,name:\"index\",ext:Qe[Fe]});if(se.push(xe),e.fakeFs.existsSync(xe))return xe}return null}function U(re){let se=new Jm.Module(re,null);return se.filename=re,se.paths=Jm.Module._nodeModulePaths(re),se}function J(re,se){return se.endsWith(\"/\")&&(se=k.join(se,\"internal.js\")),Jm.Module._resolveFilename(j.fromPortablePath(re),U(j.fromPortablePath(se)),!1,{plugnplay:!1})}function W(re){if(g===null)return!1;let se=k.contains(t.basePath,re);return se===null?!1:!!g.test(se.replace(/\\/$/,\"\"))}let ee={std:3,resolveVirtual:1,getAllLocators:1},Z=l;function A({name:re,reference:se}){let Qe=f.get(re);if(!Qe)return null;let he=Qe.get(se);return he||null}function ne({name:re,reference:se}){let Qe=[];for(let[he,Fe]of f)if(he!==null)for(let[Ue,xe]of Fe)Ue===null||xe.packageDependencies.get(re)!==se||he===re&&Ue===se||Qe.push({name:he,reference:Ue});return Qe}function le(re,se){let Qe=new Map,he=new Set,Fe=xe=>{let ve=JSON.stringify(xe.name);if(he.has(ve))return;he.add(ve);let pe=ne(xe);for(let X of pe)if(Q(X).packagePeers.has(re))Fe(X);else{let ce=Qe.get(X.name);typeof ce==\"undefined\"&&Qe.set(X.name,ce=new Set),ce.add(X.reference)}};Fe(se);let Ue=[];for(let xe of[...Qe.keys()].sort())for(let ve of[...Qe.get(xe)].sort())Ue.push({name:xe,reference:ve});return Ue}function Ae(re,{resolveIgnored:se=!1,includeDiscardFromLookup:Qe=!1}={}){if(W(re)&&!se)return null;let he=k.relative(t.basePath,re);he.match(s)||(he=`./${he}`),he.endsWith(\"/\")||(he=`${he}/`);do{let Fe=h.get(he);if(typeof Fe==\"undefined\"||Fe.discardFromLookup&&!Qe){he=he.substring(0,he.lastIndexOf(\"/\",he.length-2)+1);continue}return Fe.locator}while(he!==\"\");return null}function T(re,se,{considerBuiltins:Qe=!0}={}){if(re===\"pnpapi\")return j.toPortablePath(e.pnpapiResolution);if(Qe&&ub(re))return null;let he=yo(re),Fe=se&&yo(se);if(se&&W(se)&&(!k.isAbsolute(re)||Ae(re)===null)){let ve=J(re,se);if(ve===!1)throw oi(ur.BUILTIN_NODE_RESOLUTION_FAILED,`The builtin node resolution algorithm was unable to resolve the requested module (it didn't go through the pnp resolver because the issuer was explicitely ignored by the regexp)\n\nRequire request: \"${he}\"\nRequired by: ${Fe}\n`,{request:he,issuer:Fe});return j.toPortablePath(ve)}let Ue,xe=re.match(n);if(xe){if(!se)throw oi(ur.API_ERROR,\"The resolveToUnqualified function must be called with a valid issuer when the path isn't a builtin nor absolute\",{request:he,issuer:Fe});let[,ve,pe]=xe,X=Ae(se);if(!X){let jt=J(re,se);if(jt===!1)throw oi(ur.BUILTIN_NODE_RESOLUTION_FAILED,`The builtin node resolution algorithm was unable to resolve the requested module (it didn't go through the pnp resolver because the issuer doesn't seem to be part of the Yarn-managed dependency tree).\n\nRequire path: \"${he}\"\nRequired by: ${Fe}\n`,{request:he,issuer:Fe});return j.toPortablePath(jt)}let ce=Q(X).packageDependencies.get(ve),fe=null;if(ce==null&&X.name!==null){let jt=t.fallbackExclusionList.get(X.name);if(!jt||!jt.has(X.reference)){for(let Ti=0,_s=c.length;Ti<_s;++Ti){let Kn=Q(c[Ti]).packageDependencies.get(ve);if(Kn!=null){r?fe=Kn:ce=Kn;break}}if(t.enableTopLevelFallback&&ce==null&&fe===null){let Ti=t.fallbackPool.get(ve);Ti!=null&&(fe=Ti)}}}let gt=null;if(ce===null)if(S(X))gt=oi(ur.MISSING_PEER_DEPENDENCY,`Your application tried to access ${ve} (a peer dependency); this isn't allowed as there is no ancestor to satisfy the requirement. Use a devDependency if needed.\n\nRequired package: ${ve}${ve!==he?` (via \"${he}\")`:\"\"}\nRequired by: ${Fe}\n`,{request:he,issuer:Fe,dependencyName:ve});else{let jt=le(ve,X);jt.every(Qr=>S(Qr))?gt=oi(ur.MISSING_PEER_DEPENDENCY,`${X.name} tried to access ${ve} (a peer dependency) but it isn't provided by your application; this makes the require call ambiguous and unsound.\n\nRequired package: ${ve}${ve!==he?` (via \"${he}\")`:\"\"}\nRequired by: ${X.name}@${X.reference} (via ${Fe})\n${jt.map(Qr=>`Ancestor breaking the chain: ${Qr.name}@${Qr.reference}\n`).join(\"\")}\n`,{request:he,issuer:Fe,issuerLocator:Object.assign({},X),dependencyName:ve,brokenAncestors:jt}):gt=oi(ur.MISSING_PEER_DEPENDENCY,`${X.name} tried to access ${ve} (a peer dependency) but it isn't provided by its ancestors; this makes the require call ambiguous and unsound.\n\nRequired package: ${ve}${ve!==he?` (via \"${he}\")`:\"\"}\nRequired by: ${X.name}@${X.reference} (via ${Fe})\n\n${jt.map(Qr=>`Ancestor breaking the chain: ${Qr.name}@${Qr.reference}\n`).join(\"\")}\n`,{request:he,issuer:Fe,issuerLocator:Object.assign({},X),dependencyName:ve,brokenAncestors:jt})}else ce===void 0&&(!Qe&&ub(re)?S(X)?gt=oi(ur.UNDECLARED_DEPENDENCY,`Your application tried to access ${ve}. While this module is usually interpreted as a Node builtin, your resolver is running inside a non-Node resolution context where such builtins are ignored. Since ${ve} isn't otherwise declared in your dependencies, this makes the require call ambiguous and unsound.\n\nRequired package: ${ve}${ve!==he?` (via \"${he}\")`:\"\"}\nRequired by: ${Fe}\n`,{request:he,issuer:Fe,dependencyName:ve}):gt=oi(ur.UNDECLARED_DEPENDENCY,`${X.name} tried to access ${ve}. While this module is usually interpreted as a Node builtin, your resolver is running inside a non-Node resolution context where such builtins are ignored. Since ${ve} isn't otherwise declared in ${X.name}'s dependencies, this makes the require call ambiguous and unsound.\n\nRequired package: ${ve}${ve!==he?` (via \"${he}\")`:\"\"}\nRequired by: ${Fe}\n`,{request:he,issuer:Fe,issuerLocator:Object.assign({},X),dependencyName:ve}):S(X)?gt=oi(ur.UNDECLARED_DEPENDENCY,`Your application tried to access ${ve}, but it isn't declared in your dependencies; this makes the require call ambiguous and unsound.\n\nRequired package: ${ve}${ve!==he?` (via \"${he}\")`:\"\"}\nRequired by: ${Fe}\n`,{request:he,issuer:Fe,dependencyName:ve}):gt=oi(ur.UNDECLARED_DEPENDENCY,`${X.name} tried to access ${ve}, but it isn't declared in its dependencies; this makes the require call ambiguous and unsound.\n\nRequired package: ${ve}${ve!==he?` (via \"${he}\")`:\"\"}\nRequired by: ${X.name}@${X.reference} (via ${Fe})\n`,{request:he,issuer:Fe,issuerLocator:Object.assign({},X),dependencyName:ve}));if(ce==null){if(fe===null||gt===null)throw gt||new Error(\"Assertion failed: Expected an error to have been set\");ce=fe;let jt=gt.message.replace(/\\n.*/g,\"\");gt.message=jt,!u.has(jt)&&i!==0&&(u.add(jt),process.emitWarning(gt))}let Ht=Array.isArray(ce)?{name:ce[0],reference:ce[1]}:{name:ve,reference:ce},Mt=Q(Ht);if(!Mt.packageLocation)throw oi(ur.MISSING_DEPENDENCY,`A dependency seems valid but didn't get installed for some reason. This might be caused by a partial install, such as dev vs prod.\n\nRequired package: ${Ht.name}@${Ht.reference}${Ht.name!==he?` (via \"${he}\")`:\"\"}\nRequired by: ${X.name}@${X.reference} (via ${Fe})\n`,{request:he,issuer:Fe,dependencyLocator:Object.assign({},Ht)});let mi=Mt.packageLocation;pe?Ue=k.join(mi,pe):Ue=mi}else if(k.isAbsolute(re))Ue=k.normalize(re);else{if(!se)throw oi(ur.API_ERROR,\"The resolveToUnqualified function must be called with a valid issuer when the path isn't a builtin nor absolute\",{request:he,issuer:Fe});let ve=k.resolve(se);se.match(o)?Ue=k.normalize(k.join(ve,re)):Ue=k.normalize(k.join(k.dirname(ve),re))}return k.normalize(Ue)}function L(re,se,Qe=x){if(s.test(re))return se;let he=M(se,Qe);return he?k.normalize(he):se}function Ee(re,{extensions:se=Object.keys(Jm.Module._extensions)}={}){var Fe,Ue;let Qe=[],he=Y(re,Qe,{extensions:se});if(he)return k.normalize(he);{let xe=yo(re),ve=Ae(re);if(ve){let{packageLocation:pe}=Q(ve),X=!0;try{e.fakeFs.accessSync(pe)}catch(be){if((be==null?void 0:be.code)===\"ENOENT\")X=!1;else{let ce=((Ue=(Fe=be==null?void 0:be.message)!=null?Fe:be)!=null?Ue:\"empty exception thrown\").replace(/^[A-Z]/,fe=>fe.toLowerCase());throw oi(ur.QUALIFIED_PATH_RESOLUTION_FAILED,`Required package exists but could not be accessed (${ce}).\n\nMissing package: ${ve.name}@${ve.reference}\nExpected package location: ${yo(pe)}\n`,{unqualifiedPath:xe,extensions:se})}}if(!X){let be=pe.includes(\"/unplugged/\")?\"Required unplugged package missing from disk. This may happen when switching branches without running installs (unplugged packages must be fully materialized on disk to work).\":\"Required package missing from disk. If you keep your packages inside your repository then restarting the Node process may be enough. Otherwise, try to run an install first.\";throw oi(ur.QUALIFIED_PATH_RESOLUTION_FAILED,`${be}\n\nMissing package: ${ve.name}@${ve.reference}\nExpected package location: ${yo(pe)}\n`,{unqualifiedPath:xe,extensions:se})}}throw oi(ur.QUALIFIED_PATH_RESOLUTION_FAILED,`Qualified path resolution failed: we looked for the following paths, but none could be accessed.\n\nSource path: ${xe}\n${Qe.map(pe=>`Not found: ${yo(pe)}\n`).join(\"\")}`,{unqualifiedPath:xe,extensions:se})}}function we(re,se,{considerBuiltins:Qe,extensions:he,conditions:Fe}={}){try{let Ue=T(re,se,{considerBuiltins:Qe});if(re===\"pnpapi\")return Ue;if(Ue===null)return null;let xe=()=>se!==null?W(se):!1,ve=(!Qe||!ub(re))&&!xe()?L(re,Ue,Fe):Ue;return Ee(ve,{extensions:he})}catch(Ue){throw Object.prototype.hasOwnProperty.call(Ue,\"pnpCode\")&&Object.assign(Ue.data,{request:yo(re),issuer:se&&yo(se)}),Ue}}function qe(re){let se=k.normalize(re),Qe=Jr.resolveVirtual(se);return Qe!==se?Qe:null}return{VERSIONS:ee,topLevel:Z,getLocator:(re,se)=>Array.isArray(se)?{name:se[0],reference:se[1]}:{name:re,reference:se},getDependencyTreeRoots:()=>[...t.dependencyTreeRoots],getAllLocators(){let re=[];for(let[se,Qe]of f)for(let he of Qe.keys())se!==null&&he!==null&&re.push({name:se,reference:he});return re},getPackageInformation:re=>{let se=A(re);if(se===null)return null;let Qe=j.fromPortablePath(se.packageLocation);return te(N({},se),{packageLocation:Qe})},findPackageLocator:re=>Ae(j.toPortablePath(re)),resolveToUnqualified:y(\"resolveToUnqualified\",(re,se,Qe)=>{let he=se!==null?j.toPortablePath(se):null,Fe=T(j.toPortablePath(re),he,Qe);return Fe===null?null:j.fromPortablePath(Fe)}),resolveUnqualified:y(\"resolveUnqualified\",(re,se)=>j.fromPortablePath(Ee(j.toPortablePath(re),se))),resolveRequest:y(\"resolveRequest\",(re,se,Qe)=>{let he=se!==null?j.toPortablePath(se):null,Fe=we(j.toPortablePath(re),he,Qe);return Fe===null?null:j.fromPortablePath(Fe)}),resolveVirtual:y(\"resolveVirtual\",re=>{let se=qe(j.toPortablePath(re));return se!==null?j.fromPortablePath(se):null})}}var YQt=(0,Tle.promisify)(Lle.readFile);var Ole=(t,e,r)=>{let i=qm(t),n=YL(i,{basePath:e}),s=j.join(e,Pt.pnpCjs);return WL(n,{fakeFs:r,pnpapiResolution:s})};var _L=ge(Ule());var wo={};ft(wo,{checkAndReportManifestCompatibility:()=>Hle,checkManifestCompatibility:()=>Kle,extractBuildScripts:()=>gb,getExtractHint:()=>VL,hasBindingGyp:()=>XL});function Kle(t){return P.isPackageCompatible(t,qg.getArchitectureSet())}function Hle(t,e,{configuration:r,report:i}){return Kle(t)?!0:(i==null||i.reportWarningOnce($.INCOMPATIBLE_ARCHITECTURE,`${P.prettyLocator(r,t)} The ${qg.getArchitectureName()} architecture is incompatible with this package, ${e} skipped.`),!1)}function gb(t,e,r,{configuration:i,report:n}){let s=[];for(let a of[\"preinstall\",\"install\",\"postinstall\"])e.manifest.scripts.has(a)&&s.push([As.SCRIPT,a]);return!e.manifest.scripts.has(\"install\")&&e.misc.hasBindingGyp&&s.push([As.SHELLCODE,\"node-gyp rebuild\"]),s.length===0?[]:t.linkType!==Qt.HARD?(n==null||n.reportWarningOnce($.SOFT_LINK_BUILD,`${P.prettyLocator(i,t)} lists build scripts, but is referenced through a soft link. Soft links don't support build scripts, so they'll be ignored.`),[]):r&&r.built===!1?(n==null||n.reportInfoOnce($.BUILD_DISABLED,`${P.prettyLocator(i,t)} lists build scripts, but its build has been explicitly disabled through configuration.`),[]):!i.get(\"enableScripts\")&&!r.built?(n==null||n.reportWarningOnce($.DISABLED_BUILD_SCRIPTS,`${P.prettyLocator(i,t)} lists build scripts, but all build scripts have been disabled.`),[]):Hle(t,\"build\",{configuration:i,report:n})?s:[]}var g_e=new Set([\".exe\",\".h\",\".hh\",\".hpp\",\".c\",\".cc\",\".cpp\",\".java\",\".jar\",\".node\"]);function VL(t){return t.packageFs.getExtractHint({relevantExtensions:g_e})}function XL(t){let e=k.join(t.prefixPath,\"binding.gyp\");return t.packageFs.existsSync(e)}var ZL={};ft(ZL,{getUnpluggedPath:()=>Wm});function Wm(t,{configuration:e}){return k.resolve(e.get(\"pnpUnpluggedFolder\"),P.slugifyLocator(t))}var f_e=new Set([P.makeIdent(null,\"nan\").identHash,P.makeIdent(null,\"node-gyp\").identHash,P.makeIdent(null,\"node-pre-gyp\").identHash,P.makeIdent(null,\"node-addon-api\").identHash,P.makeIdent(null,\"fsevents\").identHash]),Qu=class{constructor(){this.mode=\"strict\";this.pnpCache=new Map}supportsPackage(e,r){return this.isEnabled(r)}async findPackageLocation(e,r){if(!this.isEnabled(r))throw new Error(\"Assertion failed: Expected the PnP linker to be enabled\");let i=Pl(r.project).cjs;if(!K.existsSync(i))throw new Pe(`The project in ${ae.pretty(r.project.configuration,`${r.project.cwd}/package.json`,ae.Type.PATH)} doesn't seem to have been installed - running an install there might help`);let n=Se.getFactoryWithDefault(this.pnpCache,i,()=>Se.dynamicRequire(i,{cachingStrategy:Se.CachingStrategy.FsTime})),s={name:P.stringifyIdent(e),reference:e.reference},o=n.getPackageInformation(s);if(!o)throw new Pe(`Couldn't find ${P.prettyLocator(r.project.configuration,e)} in the currently installed PnP map - running an install might help`);return j.toPortablePath(o.packageLocation)}async findPackageLocator(e,r){if(!this.isEnabled(r))return null;let i=Pl(r.project).cjs;if(!K.existsSync(i))return null;let s=Se.getFactoryWithDefault(this.pnpCache,i,()=>Se.dynamicRequire(i,{cachingStrategy:Se.CachingStrategy.FsTime})).findPackageLocator(j.fromPortablePath(e));return s?P.makeLocator(P.parseIdent(s.name),s.reference):null}makeInstaller(e){return new sh(e)}isEnabled(e){return!(e.project.configuration.get(\"nodeLinker\")!==\"pnp\"||e.project.configuration.get(\"pnpMode\")!==this.mode)}},sh=class{constructor(e){this.opts=e;this.mode=\"strict\";this.asyncActions=new Se.AsyncActions(10);this.packageRegistry=new Map;this.virtualTemplates=new Map;this.isESMLoaderRequired=!1;this.customData={store:new Map};this.unpluggedPaths=new Set;this.opts=e}getCustomDataKey(){return JSON.stringify({name:\"PnpInstaller\",version:2})}attachCustomData(e){this.customData=e}async installPackage(e,r,i){let n=P.stringifyIdent(e),s=e.reference,o=!!this.opts.project.tryWorkspaceByLocator(e),a=P.isVirtualLocator(e),l=e.peerDependencies.size>0&&!a,c=!l&&!o,u=!l&&e.linkType!==Qt.SOFT,g,f;if(c||u){let x=a?P.devirtualizeLocator(e):e;g=this.customData.store.get(x.locatorHash),typeof g==\"undefined\"&&(g=await h_e(r),e.linkType===Qt.HARD&&this.customData.store.set(x.locatorHash,g)),g.manifest.type===\"module\"&&(this.isESMLoaderRequired=!0),f=this.opts.project.getDependencyMeta(x,e.version)}let h=c?gb(e,g,f,{configuration:this.opts.project.configuration,report:this.opts.report}):[],p=u?await this.unplugPackageIfNeeded(e,g,r,f,i):r.packageFs;if(k.isAbsolute(r.prefixPath))throw new Error(`Assertion failed: Expected the prefix path (${r.prefixPath}) to be relative to the parent`);let m=k.resolve(p.getRealPath(),r.prefixPath),y=$L(this.opts.project.cwd,m),Q=new Map,S=new Set;if(a){for(let x of e.peerDependencies.values())Q.set(P.stringifyIdent(x),null),S.add(P.stringifyIdent(x));if(!o){let x=P.devirtualizeLocator(e);this.virtualTemplates.set(x.locatorHash,{location:$L(this.opts.project.cwd,Jr.resolveVirtual(m)),locator:x})}}return Se.getMapWithDefault(this.packageRegistry,n).set(s,{packageLocation:y,packageDependencies:Q,packagePeers:S,linkType:e.linkType,discardFromLookup:r.discardFromLookup||!1}),{packageLocation:m,buildDirective:h.length>0?h:null}}async attachInternalDependencies(e,r){let i=this.getPackageInformation(e);for(let[n,s]of r){let o=P.areIdentsEqual(n,s)?s.reference:[P.stringifyIdent(s),s.reference];i.packageDependencies.set(P.stringifyIdent(n),o)}}async attachExternalDependents(e,r){for(let i of r)this.getDiskInformation(i).packageDependencies.set(P.stringifyIdent(e),e.reference)}async finalizeInstall(){if(this.opts.project.configuration.get(\"pnpMode\")!==this.mode)return;let e=Pl(this.opts.project);if(K.existsSync(e.cjsLegacy)&&(this.opts.report.reportWarning($.UNNAMED,`Removing the old ${ae.pretty(this.opts.project.configuration,Pt.pnpJs,ae.Type.PATH)} file. You might need to manually update existing references to reference the new ${ae.pretty(this.opts.project.configuration,Pt.pnpCjs,ae.Type.PATH)} file. If you use Editor SDKs, you'll have to rerun ${ae.pretty(this.opts.project.configuration,\"yarn sdks\",ae.Type.CODE)}.`),await K.removePromise(e.cjsLegacy)),this.isEsmEnabled()||await K.removePromise(e.esmLoader),this.opts.project.configuration.get(\"nodeLinker\")!==\"pnp\"){await K.removePromise(e.cjs),await K.removePromise(this.opts.project.configuration.get(\"pnpDataPath\")),await K.removePromise(e.esmLoader);return}for(let{locator:u,location:g}of this.virtualTemplates.values())Se.getMapWithDefault(this.packageRegistry,P.stringifyIdent(u)).set(u.reference,{packageLocation:g,packageDependencies:new Map,packagePeers:new Set,linkType:Qt.SOFT,discardFromLookup:!1});this.packageRegistry.set(null,new Map([[null,this.getPackageInformation(this.opts.project.topLevelWorkspace.anchoredLocator)]]));let r=this.opts.project.configuration.get(\"pnpFallbackMode\"),i=this.opts.project.workspaces.map(({anchoredLocator:u})=>({name:P.stringifyIdent(u),reference:u.reference})),n=r!==\"none\",s=[],o=new Map,a=Se.buildIgnorePattern([\".yarn/sdks/**\",...this.opts.project.configuration.get(\"pnpIgnorePatterns\")]),l=this.packageRegistry,c=this.opts.project.configuration.get(\"pnpShebang\");if(r===\"dependencies-only\")for(let u of this.opts.project.storedPackages.values())this.opts.project.tryWorkspaceByLocator(u)&&s.push({name:P.stringifyIdent(u),reference:u.reference});return await this.finalizeInstallWithPnp({dependencyTreeRoots:i,enableTopLevelFallback:n,fallbackExclusionList:s,fallbackPool:o,ignorePattern:a,packageRegistry:l,shebang:c}),await this.asyncActions.wait(),{customData:this.customData}}async transformPnpSettings(e){}isEsmEnabled(){if(this.opts.project.configuration.sources.has(\"pnpEnableEsmLoader\"))return this.opts.project.configuration.get(\"pnpEnableEsmLoader\");if(this.isESMLoaderRequired)return!0;for(let e of this.opts.project.workspaces)if(e.manifest.type===\"module\")return!0;return!1}async finalizeInstallWithPnp(e){let r=Pl(this.opts.project),i=this.opts.project.configuration.get(\"pnpDataPath\"),n=await this.locateNodeModules(e.ignorePattern);if(n.length>0){this.opts.report.reportWarning($.DANGEROUS_NODE_MODULES,\"One or more node_modules have been detected and will be removed. This operation may take some time.\");for(let o of n)await K.removePromise(o)}if(await this.transformPnpSettings(e),this.opts.project.configuration.get(\"pnpEnableInlining\")){let o=xle(e);await K.changeFilePromise(r.cjs,o,{automaticNewlines:!0,mode:493}),await K.removePromise(i)}else{let o=k.relative(k.dirname(r.cjs),i),{dataFile:a,loaderFile:l}=Ple(te(N({},e),{dataLocation:o}));await K.changeFilePromise(r.cjs,l,{automaticNewlines:!0,mode:493}),await K.changeFilePromise(i,a,{automaticNewlines:!0,mode:420})}this.isEsmEnabled()&&(this.opts.report.reportWarning($.UNNAMED,\"ESM support for PnP uses the experimental loader API and is therefore experimental\"),await K.changeFilePromise(r.esmLoader,(0,_L.default)(),{automaticNewlines:!0,mode:420}));let s=this.opts.project.configuration.get(\"pnpUnpluggedFolder\");if(this.unpluggedPaths.size===0)await K.removePromise(s);else for(let o of await K.readdirPromise(s)){let a=k.resolve(s,o);this.unpluggedPaths.has(a)||await K.removePromise(a)}}async locateNodeModules(e){let r=[],i=e?new RegExp(e):null;for(let n of this.opts.project.workspaces){let s=k.join(n.cwd,\"node_modules\");if(i&&i.test(k.relative(this.opts.project.cwd,n.cwd))||!K.existsSync(s))continue;let o=await K.readdirPromise(s,{withFileTypes:!0}),a=o.filter(l=>!l.isDirectory()||l.name===\".bin\"||!l.name.startsWith(\".\"));if(a.length===o.length)r.push(s);else for(let l of a)r.push(k.join(s,l.name))}return r}async unplugPackageIfNeeded(e,r,i,n,s){return this.shouldBeUnplugged(e,r,n)?this.unplugPackage(e,i,s):i.packageFs}shouldBeUnplugged(e,r,i){return typeof i.unplugged!=\"undefined\"?i.unplugged:f_e.has(e.identHash)||e.conditions!=null?!0:r.manifest.preferUnplugged!==null?r.manifest.preferUnplugged:!!(gb(e,r,i,{configuration:this.opts.project.configuration}).length>0||r.misc.extractHint)}async unplugPackage(e,r,i){let n=Wm(e,{configuration:this.opts.project.configuration});return this.opts.project.disabledLocators.has(e.locatorHash)?new Pa(n,{baseFs:r.packageFs,pathUtils:k}):(this.unpluggedPaths.add(n),i.holdFetchResult(this.asyncActions.set(e.locatorHash,async()=>{let s=k.join(n,r.prefixPath,\".ready\");await K.existsPromise(s)||(this.opts.project.storedBuildState.delete(e.locatorHash),await K.mkdirPromise(n,{recursive:!0}),await K.copyPromise(n,Me.dot,{baseFs:r.packageFs,overwrite:!1}),await K.writeFilePromise(s,\"\"))})),new _t(n))}getPackageInformation(e){let r=P.stringifyIdent(e),i=e.reference,n=this.packageRegistry.get(r);if(!n)throw new Error(`Assertion failed: The package information store should have been available (for ${P.prettyIdent(this.opts.project.configuration,e)})`);let s=n.get(i);if(!s)throw new Error(`Assertion failed: The package information should have been available (for ${P.prettyLocator(this.opts.project.configuration,e)})`);return s}getDiskInformation(e){let r=Se.getMapWithDefault(this.packageRegistry,\"@@disk\"),i=$L(this.opts.project.cwd,e);return Se.getFactoryWithDefault(r,i,()=>({packageLocation:i,packageDependencies:new Map,packagePeers:new Set,linkType:Qt.SOFT,discardFromLookup:!1}))}};function $L(t,e){let r=k.relative(t,e);return r.match(/^\\.{0,2}\\//)||(r=`./${r}`),r.replace(/\\/?$/,\"/\")}async function h_e(t){var i;let e=(i=await At.tryFind(t.prefixPath,{baseFs:t.packageFs}))!=null?i:new At,r=new Set([\"preinstall\",\"install\",\"postinstall\"]);for(let n of e.scripts.keys())r.has(n)||e.scripts.delete(n);return{manifest:{scripts:e.scripts,preferUnplugged:e.preferUnplugged,type:e.type},misc:{extractHint:VL(t),hasBindingGyp:XL(t)}}}var jle=ge(ts());var zm=class extends Le{constructor(){super(...arguments);this.all=z.Boolean(\"-A,--all\",!1,{description:\"Unplug direct dependencies from the entire project\"});this.recursive=z.Boolean(\"-R,--recursive\",!1,{description:\"Unplug both direct and transitive dependencies\"});this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.patterns=z.Rest()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n=await Nt.find(e);if(!i)throw new ht(r.cwd,this.context.cwd);if(e.get(\"nodeLinker\")!==\"pnp\")throw new Pe(\"This command can only be used if the `nodeLinker` option is set to `pnp`\");await r.restoreInstallState();let s=new Set(this.patterns),o=this.patterns.map(f=>{let h=P.parseDescriptor(f),p=h.range!==\"unknown\"?h:P.makeDescriptor(h,\"*\");if(!Wt.validRange(p.range))throw new Pe(`The range of the descriptor patterns must be a valid semver range (${P.prettyDescriptor(e,p)})`);return m=>{let y=P.stringifyIdent(m);return!jle.default.isMatch(y,P.stringifyIdent(p))||m.version&&!Wt.satisfiesWithPrereleases(m.version,p.range)?!1:(s.delete(f),!0)}}),a=()=>{let f=[];for(let h of r.storedPackages.values())!r.tryWorkspaceByLocator(h)&&!P.isVirtualLocator(h)&&o.some(p=>p(h))&&f.push(h);return f},l=f=>{let h=new Set,p=[],m=(y,Q)=>{if(!h.has(y.locatorHash)&&(h.add(y.locatorHash),!r.tryWorkspaceByLocator(y)&&o.some(S=>S(y))&&p.push(y),!(Q>0&&!this.recursive)))for(let S of y.dependencies.values()){let x=r.storedResolutions.get(S.descriptorHash);if(!x)throw new Error(\"Assertion failed: The resolution should have been registered\");let M=r.storedPackages.get(x);if(!M)throw new Error(\"Assertion failed: The package should have been registered\");m(M,Q+1)}};for(let y of f){let Q=r.storedPackages.get(y.anchoredLocator.locatorHash);if(!Q)throw new Error(\"Assertion failed: The package should have been registered\");m(Q,0)}return p},c,u;if(this.all&&this.recursive?(c=a(),u=\"the project\"):this.all?(c=l(r.workspaces),u=\"any workspace\"):(c=l([i]),u=\"this workspace\"),s.size>1)throw new Pe(`Patterns ${ae.prettyList(e,s,ae.Type.CODE)} don't match any packages referenced by ${u}`);if(s.size>0)throw new Pe(`Pattern ${ae.prettyList(e,s,ae.Type.CODE)} doesn't match any packages referenced by ${u}`);return c=Se.sortMap(c,f=>P.stringifyLocator(f)),(await Je.start({configuration:e,stdout:this.context.stdout,json:this.json},async f=>{var h;for(let p of c){let m=(h=p.version)!=null?h:\"unknown\",y=r.topLevelWorkspace.manifest.ensureDependencyMeta(P.makeDescriptor(p,m));y.unplugged=!0,f.reportInfo($.UNNAMED,`Will unpack ${P.prettyLocator(e,p)} to ${ae.pretty(e,Wm(p,{configuration:e}),ae.Type.PATH)}`),f.reportJson({locator:P.stringifyLocator(p),version:m})}await r.topLevelWorkspace.persistManifest(),f.reportSeparator(),await r.install({cache:n,report:f})})).exitCode()}};zm.paths=[[\"unplug\"]],zm.usage=Re.Usage({description:\"force the unpacking of a list of packages\",details:\"\\n      This command will add the selectors matching the specified patterns to the list of packages that must be unplugged when installed.\\n\\n      A package being unplugged means that instead of being referenced directly through its archive, it will be unpacked at install time in the directory configured via `pnpUnpluggedFolder`. Note that unpacking packages this way is generally not recommended because it'll make it harder to store your packages within the repository. However, it's a good approach to quickly and safely debug some packages, and can even sometimes be required depending on the context (for example when the package contains shellscripts).\\n\\n      Running the command will set a persistent flag inside your top-level `package.json`, in the `dependenciesMeta` field. As such, to undo its effects, you'll need to revert the changes made to the manifest and run `yarn install` to apply the modification.\\n\\n      By default, only direct dependencies from the current workspace are affected. If `-A,--all` is set, direct dependencies from the entire project are affected. Using the `-R,--recursive` flag will affect transitive dependencies as well as direct ones.\\n\\n      This command accepts glob patterns inside the scope and name components (not the range). Make sure to escape the patterns to prevent your own shell from trying to expand them.\\n    \",examples:[[\"Unplug the lodash dependency from the active workspace\",\"yarn unplug lodash\"],[\"Unplug all instances of lodash referenced by any workspace\",\"yarn unplug lodash -A\"],[\"Unplug all instances of lodash referenced by the active workspace and its dependencies\",\"yarn unplug lodash -R\"],[\"Unplug all instances of lodash, anywhere\",\"yarn unplug lodash -AR\"],[\"Unplug one specific version of lodash\",\"yarn unplug lodash@1.2.3\"],[\"Unplug all packages with the `@babel` scope\",\"yarn unplug '@babel/*'\"],[\"Unplug all packages (only for testing, not recommended)\",\"yarn unplug -R '*'\"]]});var Gle=zm;var Pl=t=>({cjs:k.join(t.cwd,Pt.pnpCjs),cjsLegacy:k.join(t.cwd,Pt.pnpJs),esmLoader:k.join(t.cwd,\".pnp.loader.mjs\")}),Jle=t=>/\\s/.test(t)?JSON.stringify(t):t;async function p_e(t,e,r){let i=Pl(t),n=`--require ${Jle(j.fromPortablePath(i.cjs))}`;if(K.existsSync(i.esmLoader)&&(n=`${n} --experimental-loader ${(0,qle.pathToFileURL)(j.fromPortablePath(i.esmLoader)).href}`),i.cjs.includes(\" \")&&Yle.default.lt(process.versions.node,\"12.0.0\"))throw new Error(`Expected the build location to not include spaces when using Node < 12.0.0 (${process.versions.node})`);if(K.existsSync(i.cjs)){let s=e.NODE_OPTIONS||\"\",o=/\\s*--require\\s+\\S*\\.pnp\\.c?js\\s*/g,a=/\\s*--experimental-loader\\s+\\S*\\.pnp\\.loader\\.mjs\\s*/;s=s.replace(o,\" \").replace(a,\" \").trim(),s=s?`${n} ${s}`:n,e.NODE_OPTIONS=s}}async function d_e(t,e){let r=Pl(t);e(r.cjs),e(r.esmLoader),e(t.configuration.get(\"pnpDataPath\")),e(t.configuration.get(\"pnpUnpluggedFolder\"))}var C_e={hooks:{populateYarnPaths:d_e,setupScriptEnvironment:p_e},configuration:{nodeLinker:{description:'The linker used for installing Node packages, one of: \"pnp\", \"node-modules\"',type:Ie.STRING,default:\"pnp\"},pnpMode:{description:\"If 'strict', generates standard PnP maps. If 'loose', merges them with the n_m resolution.\",type:Ie.STRING,default:\"strict\"},pnpShebang:{description:\"String to prepend to the generated PnP script\",type:Ie.STRING,default:\"#!/usr/bin/env node\"},pnpIgnorePatterns:{description:\"Array of glob patterns; files matching them will use the classic resolution\",type:Ie.STRING,default:[],isArray:!0},pnpEnableEsmLoader:{description:\"If true, Yarn will generate an ESM loader (`.pnp.loader.mjs`). If this is not explicitly set Yarn tries to automatically detect whether ESM support is required.\",type:Ie.BOOLEAN,default:!1},pnpEnableInlining:{description:\"If true, the PnP data will be inlined along with the generated loader\",type:Ie.BOOLEAN,default:!0},pnpFallbackMode:{description:\"If true, the generated PnP loader will follow the top-level fallback rule\",type:Ie.STRING,default:\"dependencies-only\"},pnpUnpluggedFolder:{description:\"Folder where the unplugged packages must be stored\",type:Ie.ABSOLUTE_PATH,default:\"./.yarn/unplugged\"},pnpDataPath:{description:\"Path of the file where the PnP data (used by the loader) must be written\",type:Ie.ABSOLUTE_PATH,default:\"./.pnp.data.json\"}},linkers:[Qu],commands:[Gle]},m_e=C_e;var Zle=ge(Xle());var sT=ge(require(\"crypto\")),$le=ge(require(\"fs\")),ece=1,ai=\"node_modules\",oT=\".bin\",tce=\".yarn-state.yml\",Li;(function(i){i.CLASSIC=\"classic\",i.HARDLINKS_LOCAL=\"hardlinks-local\",i.HARDLINKS_GLOBAL=\"hardlinks-global\"})(Li||(Li={}));var aT=class{constructor(){this.installStateCache=new Map}supportsPackage(e,r){return this.isEnabled(r)}async findPackageLocation(e,r){if(!this.isEnabled(r))throw new Error(\"Assertion failed: Expected the node-modules linker to be enabled\");let i=r.project.tryWorkspaceByLocator(e);if(i)return i.cwd;let n=await Se.getFactoryWithDefault(this.installStateCache,r.project.cwd,async()=>await AT(r.project,{unrollAliases:!0}));if(n===null)throw new Pe(\"Couldn't find the node_modules state file - running an install might help (findPackageLocation)\");let s=n.locatorMap.get(P.stringifyLocator(e));if(!s){let a=new Pe(`Couldn't find ${P.prettyLocator(r.project.configuration,e)} in the currently installed node_modules map - running an install might help`);throw a.code=\"LOCATOR_NOT_INSTALLED\",a}let o=r.project.configuration.startingCwd;return s.locations.find(a=>k.contains(o,a))||s.locations[0]}async findPackageLocator(e,r){if(!this.isEnabled(r))return null;let i=await Se.getFactoryWithDefault(this.installStateCache,r.project.cwd,async()=>await AT(r.project,{unrollAliases:!0}));if(i===null)return null;let{locationRoot:n,segments:s}=fb(k.resolve(e),{skipPrefix:r.project.cwd}),o=i.locationTree.get(n);if(!o)return null;let a=o.locator;for(let l of s){if(o=o.children.get(l),!o)break;a=o.locator||a}return P.parseLocator(a)}makeInstaller(e){return new rce(e)}isEnabled(e){return e.project.configuration.get(\"nodeLinker\")===\"node-modules\"}},rce=class{constructor(e){this.opts=e;this.localStore=new Map;this.realLocatorChecksums=new Map;this.customData={store:new Map}}getCustomDataKey(){return JSON.stringify({name:\"NodeModulesInstaller\",version:2})}attachCustomData(e){this.customData=e}async installPackage(e,r){var u;let i=k.resolve(r.packageFs.getRealPath(),r.prefixPath),n=this.customData.store.get(e.locatorHash);if(typeof n==\"undefined\"&&(n=await L_e(e,r),e.linkType===Qt.HARD&&this.customData.store.set(e.locatorHash,n)),!wo.checkManifestCompatibility(e))return{packageLocation:null,buildDirective:null};let s=new Map,o=new Set;s.has(P.stringifyIdent(e))||s.set(P.stringifyIdent(e),e.reference);let a=e;if(P.isVirtualLocator(e)){a=P.devirtualizeLocator(e);for(let g of e.peerDependencies.values())s.set(P.stringifyIdent(g),null),o.add(P.stringifyIdent(g))}let l={packageLocation:`${j.fromPortablePath(i)}/`,packageDependencies:s,packagePeers:o,linkType:e.linkType,discardFromLookup:(u=r.discardFromLookup)!=null?u:!1};this.localStore.set(e.locatorHash,{pkg:e,customPackageData:n,dependencyMeta:this.opts.project.getDependencyMeta(e,e.version),pnpNode:l});let c=r.checksum?r.checksum.substring(r.checksum.indexOf(\"/\")+1):null;return this.realLocatorChecksums.set(a.locatorHash,c),{packageLocation:i,buildDirective:null}}async attachInternalDependencies(e,r){let i=this.localStore.get(e.locatorHash);if(typeof i==\"undefined\")throw new Error(\"Assertion failed: Expected information object to have been registered\");for(let[n,s]of r){let o=P.areIdentsEqual(n,s)?s.reference:[P.stringifyIdent(s),s.reference];i.pnpNode.packageDependencies.set(P.stringifyIdent(n),o)}}async attachExternalDependents(e,r){throw new Error(\"External dependencies haven't been implemented for the node-modules linker\")}async finalizeInstall(){if(this.opts.project.configuration.get(\"nodeLinker\")!==\"node-modules\")return;let e=new Jr({baseFs:new ms({libzip:await fn(),maxOpenFiles:80,readOnlyArchives:!0})}),r=await AT(this.opts.project),i=this.opts.project.configuration.get(\"nmMode\");(r===null||i!==r.nmMode)&&(this.opts.project.storedBuildState.clear(),r={locatorMap:new Map,binSymlinks:new Map,locationTree:new Map,nmMode:i});let n=new Map(this.opts.project.workspaces.map(f=>{var p,m;let h=this.opts.project.configuration.get(\"nmHoistingLimits\");try{h=Se.validateEnum(Mn,(m=(p=f.manifest.installConfig)==null?void 0:p.hoistingLimits)!=null?m:h)}catch(y){let Q=P.prettyWorkspace(this.opts.project.configuration,f);this.opts.report.reportWarning($.INVALID_MANIFEST,`${Q}: Invalid 'installConfig.hoistingLimits' value. Expected one of ${Object.values(Mn).join(\", \")}, using default: \"${h}\"`)}return[f.relativeCwd,h]})),s=new Map(this.opts.project.workspaces.map(f=>{var p,m;let h=this.opts.project.configuration.get(\"nmSelfReferences\");return h=(m=(p=f.manifest.installConfig)==null?void 0:p.selfReferences)!=null?m:h,[f.relativeCwd,h]})),o={VERSIONS:{std:1},topLevel:{name:null,reference:null},getLocator:(f,h)=>Array.isArray(h)?{name:h[0],reference:h[1]}:{name:f,reference:h},getDependencyTreeRoots:()=>this.opts.project.workspaces.map(f=>{let h=f.anchoredLocator;return{name:P.stringifyIdent(f.locator),reference:h.reference}}),getPackageInformation:f=>{let h=f.reference===null?this.opts.project.topLevelWorkspace.anchoredLocator:P.makeLocator(P.parseIdent(f.name),f.reference),p=this.localStore.get(h.locatorHash);if(typeof p==\"undefined\")throw new Error(\"Assertion failed: Expected the package reference to have been registered\");return p.pnpNode},findPackageLocator:f=>{let h=this.opts.project.tryWorkspaceByCwd(j.toPortablePath(f));if(h!==null){let p=h.anchoredLocator;return{name:P.stringifyIdent(p),reference:p.reference}}throw new Error(\"Assertion failed: Unimplemented\")},resolveToUnqualified:()=>{throw new Error(\"Assertion failed: Unimplemented\")},resolveUnqualified:()=>{throw new Error(\"Assertion failed: Unimplemented\")},resolveRequest:()=>{throw new Error(\"Assertion failed: Unimplemented\")},resolveVirtual:f=>j.fromPortablePath(Jr.resolveVirtual(j.toPortablePath(f)))},{tree:a,errors:l,preserveSymlinksRequired:c}=Gm(o,{pnpifyFs:!1,validateExternalSoftLinks:!0,hoistingLimitsByCwd:n,project:this.opts.project,selfReferencesByCwd:s});if(!a){for(let{messageName:f,text:h}of l)this.opts.report.reportError(f,h);return}let u=HL(a);await T_e(r,u,{baseFs:e,project:this.opts.project,report:this.opts.report,realLocatorChecksums:this.realLocatorChecksums,loadManifest:async f=>{let h=P.parseLocator(f),p=this.localStore.get(h.locatorHash);if(typeof p==\"undefined\")throw new Error(\"Assertion failed: Expected the slot to exist\");return p.customPackageData.manifest}});let g=[];for(let[f,h]of u.entries()){if(ice(f))continue;let p=P.parseLocator(f),m=this.localStore.get(p.locatorHash);if(typeof m==\"undefined\")throw new Error(\"Assertion failed: Expected the slot to exist\");if(this.opts.project.tryWorkspaceByLocator(m.pkg))continue;let y=wo.extractBuildScripts(m.pkg,m.customPackageData,m.dependencyMeta,{configuration:this.opts.project.configuration,report:this.opts.report});y.length!==0&&g.push({buildLocations:h.locations,locatorHash:p.locatorHash,buildDirective:y})}return c&&this.opts.report.reportWarning($.NM_PRESERVE_SYMLINKS_REQUIRED,`The application uses portals and that's why ${ae.pretty(this.opts.project.configuration,\"--preserve-symlinks\",ae.Type.CODE)} Node option is required for launching it`),{customData:this.customData,records:g}}};async function L_e(t,e){var n;let r=(n=await At.tryFind(e.prefixPath,{baseFs:e.packageFs}))!=null?n:new At,i=new Set([\"preinstall\",\"install\",\"postinstall\"]);for(let s of r.scripts.keys())i.has(s)||r.scripts.delete(s);return{manifest:{bin:r.bin,scripts:r.scripts},misc:{extractHint:wo.getExtractHint(e),hasBindingGyp:wo.hasBindingGyp(e)}}}async function O_e(t,e,r,i){let n=\"\";n+=`# Warning: This file is automatically generated. Removing it is fine, but will\n`,n+=`# cause your node_modules installation to become invalidated.\n`,n+=`\n`,n+=`__metadata:\n`,n+=`  version: ${ece}\n`,n+=`  nmMode: ${i.value}\n`;let s=Array.from(e.keys()).sort(),o=P.stringifyLocator(t.topLevelWorkspace.anchoredLocator);for(let c of s){let u=e.get(c);n+=`\n`,n+=`${JSON.stringify(c)}:\n`,n+=`  locations:\n`;for(let g of u.locations){let f=k.contains(t.cwd,g);if(f===null)throw new Error(`Assertion failed: Expected the path to be within the project (${g})`);n+=`    - ${JSON.stringify(f)}\n`}if(u.aliases.length>0){n+=`  aliases:\n`;for(let g of u.aliases)n+=`    - ${JSON.stringify(g)}\n`}if(c===o&&r.size>0){n+=`  bin:\n`;for(let[g,f]of r){let h=k.contains(t.cwd,g);if(h===null)throw new Error(`Assertion failed: Expected the path to be within the project (${g})`);n+=`    ${JSON.stringify(h)}:\n`;for(let[p,m]of f){let y=k.relative(k.join(g,ai),m);n+=`      ${JSON.stringify(p)}: ${JSON.stringify(y)}\n`}}}}let a=t.cwd,l=k.join(a,ai,tce);await K.changeFilePromise(l,n,{automaticNewlines:!0})}async function AT(t,{unrollAliases:e=!1}={}){let r=t.cwd,i=k.join(r,ai,tce);if(!K.existsSync(i))return null;let n=Qi(await K.readFilePromise(i,\"utf8\"));if(n.__metadata.version>ece)return null;let s=n.__metadata.nmMode||Li.CLASSIC,o=new Map,a=new Map;delete n.__metadata;for(let[l,c]of Object.entries(n)){let u=c.locations.map(f=>k.join(r,f)),g=c.bin;if(g)for(let[f,h]of Object.entries(g)){let p=k.join(r,j.toPortablePath(f)),m=Se.getMapWithDefault(a,p);for(let[y,Q]of Object.entries(h))m.set(qr(y),j.toPortablePath([p,ai,Q].join(k.delimiter)))}if(o.set(l,{target:Me.dot,linkType:Qt.HARD,locations:u,aliases:c.aliases||[]}),e&&c.aliases)for(let f of c.aliases){let{scope:h,name:p}=P.parseLocator(l),m=P.makeLocator(P.makeIdent(h,p),f),y=P.stringifyLocator(m);o.set(y,{target:Me.dot,linkType:Qt.HARD,locations:u,aliases:[]})}}return{locatorMap:o,binSymlinks:a,locationTree:nce(o,{skipPrefix:t.cwd}),nmMode:s}}var ah=async(t,e)=>{if(t.split(k.sep).indexOf(ai)<0)throw new Error(`Assertion failed: trying to remove dir that doesn't contain node_modules: ${t}`);try{if(!e.innerLoop){let i=e.allowSymlink?await K.statPromise(t):await K.lstatPromise(t);if(e.allowSymlink&&!i.isDirectory()||!e.allowSymlink&&i.isSymbolicLink()){await K.unlinkPromise(t);return}}let r=await K.readdirPromise(t,{withFileTypes:!0});for(let i of r){let n=k.join(t,qr(i.name));i.isDirectory()?(i.name!==ai||e&&e.innerLoop)&&await ah(n,{innerLoop:!0,contentsOnly:!1}):await K.unlinkPromise(n)}e.contentsOnly||await K.rmdirPromise(t)}catch(r){if(r.code!==\"ENOENT\"&&r.code!==\"ENOTEMPTY\")throw r}},sce=4,fb=(t,{skipPrefix:e})=>{let r=k.contains(e,t);if(r===null)throw new Error(`Assertion failed: Writing attempt prevented to ${t} which is outside project root: ${e}`);let i=r.split(k.sep).filter(l=>l!==\"\"),n=i.indexOf(ai),s=i.slice(0,n).join(k.sep),o=k.join(e,s),a=i.slice(n);return{locationRoot:o,segments:a}},nce=(t,{skipPrefix:e})=>{let r=new Map;if(t===null)return r;let i=()=>({children:new Map,linkType:Qt.HARD});for(let[n,s]of t.entries()){if(s.linkType===Qt.SOFT&&k.contains(e,s.target)!==null){let a=Se.getFactoryWithDefault(r,s.target,i);a.locator=n,a.linkType=s.linkType}for(let o of s.locations){let{locationRoot:a,segments:l}=fb(o,{skipPrefix:e}),c=Se.getFactoryWithDefault(r,a,i);for(let u=0;u<l.length;++u){let g=l[u];if(g!==\".\"){let f=Se.getFactoryWithDefault(c.children,g,i);c.children.set(g,f),c=f}u===l.length-1&&(c.locator=n,c.linkType=s.linkType)}}}return r},lT=async(t,e)=>{let r;try{process.platform===\"win32\"&&(r=await K.lstatPromise(t))}catch(i){}process.platform==\"win32\"&&(!r||r.isDirectory())?await K.symlinkPromise(t,e,\"junction\"):await K.symlinkPromise(k.relative(k.dirname(e),t),e)};async function oce(t,e,r){let i=k.join(t,qr(`${sT.default.randomBytes(16).toString(\"hex\")}.tmp`));try{await K.writeFilePromise(i,r);try{await K.linkPromise(i,e)}catch(n){}}finally{await K.unlinkPromise(i)}}async function M_e({srcPath:t,dstPath:e,srcMode:r,globalHardlinksStore:i,baseFs:n,nmMode:s,digest:o}){if(s.value===Li.HARDLINKS_GLOBAL&&i&&o){let l=k.join(i,o.substring(0,2),`${o.substring(2)}.dat`),c;try{if(await Dn.checksumFile(l,{baseFs:K,algorithm:\"sha1\"})!==o){let g=k.join(i,qr(`${sT.default.randomBytes(16).toString(\"hex\")}.tmp`));await K.renamePromise(l,g);let f=await n.readFilePromise(t);await K.writeFilePromise(g,f);try{await K.linkPromise(g,l),await K.unlinkPromise(g)}catch(h){}}await K.linkPromise(l,e),c=!0}catch(u){c=!1}if(!c){let u=await n.readFilePromise(t);await oce(i,l,u);try{await K.linkPromise(l,e)}catch(g){g&&g.code&&g.code==\"EXDEV\"&&(s.value=Li.HARDLINKS_LOCAL,await n.copyFilePromise(t,e))}}}else await n.copyFilePromise(t,e);let a=r&511;a!==420&&await K.chmodPromise(e,a)}var Dl;(function(i){i.FILE=\"file\",i.DIRECTORY=\"directory\",i.SYMLINK=\"symlink\"})(Dl||(Dl={}));var U_e=async(t,e,{baseFs:r,globalHardlinksStore:i,nmMode:n,packageChecksum:s})=>{await K.mkdirPromise(t,{recursive:!0});let o=async(l=Me.dot)=>{let c=k.join(e,l),u=await r.readdirPromise(c,{withFileTypes:!0}),g=new Map;for(let f of u){let h=k.join(l,f.name),p,m=k.join(c,f.name);if(f.isFile()){if(p={kind:Dl.FILE,mode:(await r.lstatPromise(m)).mode},n.value===Li.HARDLINKS_GLOBAL){let y=await Dn.checksumFile(m,{baseFs:r,algorithm:\"sha1\"});p.digest=y}}else if(f.isDirectory())p={kind:Dl.DIRECTORY};else if(f.isSymbolicLink())p={kind:Dl.SYMLINK,symlinkTo:await r.readlinkPromise(m)};else throw new Error(`Unsupported file type (file: ${m}, mode: 0o${await r.statSync(m).mode.toString(8).padStart(6,\"0\")})`);if(g.set(h,p),f.isDirectory()&&h!==ai){let y=await o(h);for(let[Q,S]of y)g.set(Q,S)}}return g},a;if(n.value===Li.HARDLINKS_GLOBAL&&i&&s){let l=k.join(i,s.substring(0,2),`${s.substring(2)}.json`);try{a=new Map(Object.entries(JSON.parse(await K.readFilePromise(l,\"utf8\"))))}catch(c){a=await o(),await oce(i,l,Buffer.from(JSON.stringify(Object.fromEntries(a))))}}else a=await o();for(let[l,c]of a){let u=k.join(e,l),g=k.join(t,l);c.kind===Dl.DIRECTORY?await K.mkdirPromise(g,{recursive:!0}):c.kind===Dl.FILE?await M_e({srcPath:u,dstPath:g,srcMode:c.mode,digest:c.digest,nmMode:n,baseFs:r,globalHardlinksStore:i}):c.kind===Dl.SYMLINK&&await lT(k.resolve(k.dirname(g),c.symlinkTo),g)}};function K_e(t,e){let r=new Map([...t]),i=new Map([...e]);for(let[n,s]of t){let o=k.join(n,ai);if(!K.existsSync(o)){s.children.delete(ai);for(let a of i.keys())k.contains(o,a)!==null&&i.delete(a)}}return{locationTree:r,binSymlinks:i}}function ice(t){let e=P.parseDescriptor(t);return P.isVirtualDescriptor(e)&&(e=P.devirtualizeDescriptor(e)),e.range.startsWith(\"link:\")}async function H_e(t,e,r,{loadManifest:i}){let n=new Map;for(let[a,{locations:l}]of t){let c=ice(a)?null:await i(a,l[0]),u=new Map;if(c)for(let[g,f]of c.bin){let h=k.join(l[0],f);f!==\"\"&&K.existsSync(h)&&u.set(g,f)}n.set(a,u)}let s=new Map,o=(a,l,c)=>{let u=new Map,g=k.contains(r,a);if(c.locator&&g!==null){let f=n.get(c.locator);for(let[h,p]of f){let m=k.join(a,j.toPortablePath(p));u.set(qr(h),m)}for(let[h,p]of c.children){let m=k.join(a,h),y=o(m,m,p);y.size>0&&s.set(a,new Map([...s.get(a)||new Map,...y]))}}else for(let[f,h]of c.children){let p=o(k.join(a,f),l,h);for(let[m,y]of p)u.set(m,y)}return u};for(let[a,l]of e){let c=o(a,a,l);c.size>0&&s.set(a,new Map([...s.get(a)||new Map,...c]))}return s}var ace=(t,e)=>{if(!t||!e)return t===e;let r=P.parseLocator(t);P.isVirtualLocator(r)&&(r=P.devirtualizeLocator(r));let i=P.parseLocator(e);return P.isVirtualLocator(i)&&(i=P.devirtualizeLocator(i)),P.areLocatorsEqual(r,i)};function cT(t){return k.join(t.get(\"globalFolder\"),\"store\")}async function T_e(t,e,{baseFs:r,project:i,report:n,loadManifest:s,realLocatorChecksums:o}){let a=k.join(i.cwd,ai),{locationTree:l,binSymlinks:c}=K_e(t.locationTree,t.binSymlinks),u=nce(e,{skipPrefix:i.cwd}),g=[],f=async({srcDir:U,dstDir:J,linkType:W,globalHardlinksStore:ee,nmMode:Z,packageChecksum:A})=>{let ne=(async()=>{try{W===Qt.SOFT?(await K.mkdirPromise(k.dirname(J),{recursive:!0}),await lT(k.resolve(U),J)):await U_e(J,U,{baseFs:r,globalHardlinksStore:ee,nmMode:Z,packageChecksum:A})}catch(le){throw le.message=`While persisting ${U} -> ${J} ${le.message}`,le}finally{S.tick()}})().then(()=>g.splice(g.indexOf(ne),1));g.push(ne),g.length>sce&&await Promise.race(g)},h=async(U,J,W)=>{let ee=(async()=>{let Z=async(A,ne,le)=>{try{le.innerLoop||await K.mkdirPromise(ne,{recursive:!0});let Ae=await K.readdirPromise(A,{withFileTypes:!0});for(let T of Ae){if(!le.innerLoop&&T.name===oT)continue;let L=k.join(A,T.name),Ee=k.join(ne,T.name);T.isDirectory()?(T.name!==ai||le&&le.innerLoop)&&(await K.mkdirPromise(Ee,{recursive:!0}),await Z(L,Ee,te(N({},le),{innerLoop:!0}))):Y.value===Li.HARDLINKS_LOCAL||Y.value===Li.HARDLINKS_GLOBAL?await K.linkPromise(L,Ee):await K.copyFilePromise(L,Ee,$le.default.constants.COPYFILE_FICLONE)}}catch(Ae){throw le.innerLoop||(Ae.message=`While cloning ${A} -> ${ne} ${Ae.message}`),Ae}finally{le.innerLoop||S.tick()}};await Z(U,J,W)})().then(()=>g.splice(g.indexOf(ee),1));g.push(ee),g.length>sce&&await Promise.race(g)},p=async(U,J,W)=>{if(W)for(let[ee,Z]of J.children){let A=W.children.get(ee);await p(k.join(U,ee),Z,A)}else{J.children.has(ai)&&await ah(k.join(U,ai),{contentsOnly:!1});let ee=k.basename(U)===ai&&u.has(k.join(k.dirname(U),k.sep));await ah(U,{contentsOnly:U===a,allowSymlink:ee})}};for(let[U,J]of l){let W=u.get(U);for(let[ee,Z]of J.children){if(ee===\".\")continue;let A=W&&W.children.get(ee),ne=k.join(U,ee);await p(ne,Z,A)}}let m=async(U,J,W)=>{if(W){ace(J.locator,W.locator)||await ah(U,{contentsOnly:J.linkType===Qt.HARD});for(let[ee,Z]of J.children){let A=W.children.get(ee);await m(k.join(U,ee),Z,A)}}else{J.children.has(ai)&&await ah(k.join(U,ai),{contentsOnly:!0});let ee=k.basename(U)===ai&&u.has(k.join(k.dirname(U),k.sep));await ah(U,{contentsOnly:J.linkType===Qt.HARD,allowSymlink:ee})}};for(let[U,J]of u){let W=l.get(U);for(let[ee,Z]of J.children){if(ee===\".\")continue;let A=W&&W.children.get(ee);await m(k.join(U,ee),Z,A)}}let y=new Map,Q=[];for(let[U,{locations:J}]of t.locatorMap.entries())for(let W of J){let{locationRoot:ee,segments:Z}=fb(W,{skipPrefix:i.cwd}),A=u.get(ee),ne=ee;if(A){for(let le of Z)if(ne=k.join(ne,le),A=A.children.get(le),!A)break;if(A){let le=ace(A.locator,U),Ae=e.get(A.locator),T=Ae.target,L=ne,Ee=Ae.linkType;if(le)y.has(T)||y.set(T,L);else if(T!==L){let we=P.parseLocator(A.locator);P.isVirtualLocator(we)&&(we=P.devirtualizeLocator(we)),Q.push({srcDir:T,dstDir:L,linkType:Ee,realLocatorHash:we.locatorHash})}}}}for(let[U,{locations:J}]of e.entries())for(let W of J){let{locationRoot:ee,segments:Z}=fb(W,{skipPrefix:i.cwd}),A=l.get(ee),ne=u.get(ee),le=ee,Ae=e.get(U),T=P.parseLocator(U);P.isVirtualLocator(T)&&(T=P.devirtualizeLocator(T));let L=T.locatorHash,Ee=Ae.target,we=W;if(Ee===we)continue;let qe=Ae.linkType;for(let re of Z)ne=ne.children.get(re);if(!A)Q.push({srcDir:Ee,dstDir:we,linkType:qe,realLocatorHash:L});else for(let re of Z)if(le=k.join(le,re),A=A.children.get(re),!A){Q.push({srcDir:Ee,dstDir:we,linkType:qe,realLocatorHash:L});break}}let S=Ji.progressViaCounter(Q.length),x=n.reportProgress(S),M=i.configuration.get(\"nmMode\"),Y={value:M};try{let U=Y.value===Li.HARDLINKS_GLOBAL?`${cT(i.configuration)}/v1`:null;if(U&&!await K.existsPromise(U)){await K.mkdirpPromise(U);for(let W=0;W<256;W++)await K.mkdirPromise(k.join(U,W.toString(16).padStart(2,\"0\")))}for(let W of Q)(W.linkType===Qt.SOFT||!y.has(W.srcDir))&&(y.set(W.srcDir,W.dstDir),await f(te(N({},W),{globalHardlinksStore:U,nmMode:Y,packageChecksum:o.get(W.realLocatorHash)||null})));await Promise.all(g),g.length=0;for(let W of Q){let ee=y.get(W.srcDir);W.linkType!==Qt.SOFT&&W.dstDir!==ee&&await h(ee,W.dstDir,{nmMode:Y})}await Promise.all(g),await K.mkdirPromise(a,{recursive:!0});let J=await H_e(e,u,i.cwd,{loadManifest:s});await j_e(c,J,i.cwd),await O_e(i,e,J,Y),M==Li.HARDLINKS_GLOBAL&&Y.value==Li.HARDLINKS_LOCAL&&n.reportWarningOnce($.NM_HARDLINKS_MODE_DOWNGRADED,\"'nmMode' has been downgraded to 'hardlinks-local' due to global cache and install folder being on different devices\")}finally{x.stop()}}async function j_e(t,e,r){for(let i of t.keys()){if(k.contains(r,i)===null)throw new Error(`Assertion failed. Excepted bin symlink location to be inside project dir, instead it was at ${i}`);if(!e.has(i)){let n=k.join(i,ai,oT);await K.removePromise(n)}}for(let[i,n]of e){if(k.contains(r,i)===null)throw new Error(`Assertion failed. Excepted bin symlink location to be inside project dir, instead it was at ${i}`);let s=k.join(i,ai,oT),o=t.get(i)||new Map;await K.mkdirPromise(s,{recursive:!0});for(let a of o.keys())n.has(a)||(await K.removePromise(k.join(s,a)),process.platform===\"win32\"&&await K.removePromise(k.join(s,qr(`${a}.cmd`))));for(let[a,l]of n){let c=o.get(a),u=k.join(s,a);c!==l&&(process.platform===\"win32\"?await(0,Zle.default)(j.fromPortablePath(l),j.fromPortablePath(u),{createPwshFile:!1}):(await K.removePromise(u),await lT(l,u),k.contains(r,await K.realpathPromise(l))!==null&&await K.chmodPromise(l,493)))}}}var uT=class extends Qu{constructor(){super(...arguments);this.mode=\"loose\"}makeInstaller(e){return new Ace(e)}},Ace=class extends sh{constructor(){super(...arguments);this.mode=\"loose\"}async transformPnpSettings(e){let r=new Jr({baseFs:new ms({libzip:await fn(),maxOpenFiles:80,readOnlyArchives:!0})}),i=Ole(e,this.opts.project.cwd,r),{tree:n,errors:s}=Gm(i,{pnpifyFs:!1,project:this.opts.project});if(!n){for(let{messageName:u,text:g}of s)this.opts.report.reportError(u,g);return}let o=new Map;e.fallbackPool=o;let a=(u,g)=>{let f=P.parseLocator(g.locator),h=P.stringifyIdent(f);h===u?o.set(u,f.reference):o.set(u,[h,f.reference])},l=k.join(this.opts.project.cwd,Pt.nodeModules),c=n.get(l);if(typeof c!=\"undefined\"){if(\"target\"in c)throw new Error(\"Assertion failed: Expected the root junction point to be a directory\");for(let u of c.dirList){let g=k.join(l,u),f=n.get(g);if(typeof f==\"undefined\")throw new Error(\"Assertion failed: Expected the child to have been registered\");if(\"target\"in f)a(u,f);else for(let h of f.dirList){let p=k.join(g,h),m=n.get(p);if(typeof m==\"undefined\")throw new Error(\"Assertion failed: Expected the subchild to have been registered\");if(\"target\"in m)a(`${u}/${h}`,m);else throw new Error(\"Assertion failed: Expected the leaf junction to be a package\")}}}}};var G_e={hooks:{cleanGlobalArtifacts:async t=>{let e=cT(t);await K.removePromise(e)}},configuration:{nmHoistingLimits:{description:\"Prevent packages to be hoisted past specific levels\",type:Ie.STRING,values:[Mn.WORKSPACES,Mn.DEPENDENCIES,Mn.NONE],default:Mn.NONE},nmMode:{description:'If set to \"hardlinks-local\" Yarn will utilize hardlinks to reduce disk space consumption inside \"node_modules\" directories. With \"hardlinks-global\" Yarn will use global content addressable storage to reduce \"node_modules\" size across all the projects using this option.',type:Ie.STRING,values:[Li.CLASSIC,Li.HARDLINKS_LOCAL,Li.HARDLINKS_GLOBAL],default:Li.CLASSIC},nmSelfReferences:{description:\"If set to 'false' the workspace will not be allowed to require itself and corresponding self-referencing symlink will not be created\",type:Ie.BOOLEAN,default:!0}},linkers:[aT,uT]},Y_e=G_e;var uO={};ft(uO,{default:()=>ZVe,npmConfigUtils:()=>br,npmHttpUtils:()=>zt,npmPublishUtils:()=>yh});var fce=ge(ti());var Cr=\"npm:\";var zt={};ft(zt,{AuthType:()=>cs,customPackageError:()=>W_e,del:()=>V_e,get:()=>Bo,getIdentUrl:()=>Fl,handleInvalidAuthenticationError:()=>Rl,post:()=>z_e,put:()=>__e});var uce=ge($C()),gce=ge(require(\"url\"));var br={};ft(br,{RegistryType:()=>yA,getAuditRegistry:()=>q_e,getAuthConfiguration:()=>hT,getDefaultRegistry:()=>hb,getPublishRegistry:()=>lce,getRegistryConfiguration:()=>cce,getScopeConfiguration:()=>fT,getScopeRegistry:()=>wA,normalizeRegistry:()=>ha});var yA;(function(i){i.AUDIT_REGISTRY=\"npmAuditRegistry\",i.FETCH_REGISTRY=\"npmRegistryServer\",i.PUBLISH_REGISTRY=\"npmPublishRegistry\"})(yA||(yA={}));function ha(t){return t.replace(/\\/$/,\"\")}function q_e(t,{configuration:e}){let r=e.get(yA.AUDIT_REGISTRY);return r!==null?ha(r):lce(t,{configuration:e})}function lce(t,{configuration:e}){var r;return((r=t.publishConfig)==null?void 0:r.registry)?ha(t.publishConfig.registry):t.name?wA(t.name.scope,{configuration:e,type:yA.PUBLISH_REGISTRY}):hb({configuration:e,type:yA.PUBLISH_REGISTRY})}function wA(t,{configuration:e,type:r=yA.FETCH_REGISTRY}){let i=fT(t,{configuration:e});if(i===null)return hb({configuration:e,type:r});let n=i.get(r);return n===null?hb({configuration:e,type:r}):ha(n)}function hb({configuration:t,type:e=yA.FETCH_REGISTRY}){let r=t.get(e);return ha(r!==null?r:t.get(yA.FETCH_REGISTRY))}function cce(t,{configuration:e}){let r=e.get(\"npmRegistries\"),i=ha(t),n=r.get(i);if(typeof n!=\"undefined\")return n;let s=r.get(i.replace(/^[a-z]+:/,\"\"));return typeof s!=\"undefined\"?s:null}function fT(t,{configuration:e}){if(t===null)return null;let i=e.get(\"npmScopes\").get(t);return i||null}function hT(t,{configuration:e,ident:r}){let i=r&&fT(r.scope,{configuration:e});return(i==null?void 0:i.get(\"npmAuthIdent\"))||(i==null?void 0:i.get(\"npmAuthToken\"))?i:cce(t,{configuration:e})||e}var cs;(function(n){n[n.NO_AUTH=0]=\"NO_AUTH\",n[n.BEST_EFFORT=1]=\"BEST_EFFORT\",n[n.CONFIGURATION=2]=\"CONFIGURATION\",n[n.ALWAYS_AUTH=3]=\"ALWAYS_AUTH\"})(cs||(cs={}));async function Rl(t,{attemptedAs:e,registry:r,headers:i,configuration:n}){var s,o;if(pb(t))throw new ct($.AUTHENTICATION_INVALID,\"Invalid OTP token\");if(((s=t.originalError)==null?void 0:s.name)===\"HTTPError\"&&((o=t.originalError)==null?void 0:o.response.statusCode)===401)throw new ct($.AUTHENTICATION_INVALID,`Invalid authentication (${typeof e!=\"string\"?`as ${await J_e(r,i,{configuration:n})}`:`attempted as ${e}`})`)}function W_e(t){var e;return((e=t.response)==null?void 0:e.statusCode)===404?\"Package not found\":null}function Fl(t){return t.scope?`/@${t.scope}%2f${t.name}`:`/${t.name}`}async function Bo(t,a){var l=a,{configuration:e,headers:r,ident:i,authType:n,registry:s}=l,o=Tr(l,[\"configuration\",\"headers\",\"ident\",\"authType\",\"registry\"]);if(i&&typeof s==\"undefined\"&&(s=wA(i.scope,{configuration:e})),i&&i.scope&&typeof n==\"undefined\"&&(n=1),typeof s!=\"string\")throw new Error(\"Assertion failed: The registry should be a string\");let c=await db(s,{authType:n,configuration:e,ident:i});c&&(r=te(N({},r),{authorization:c}));try{return await ir.get(t.charAt(0)===\"/\"?`${s}${t}`:t,N({configuration:e,headers:r},o))}catch(u){throw await Rl(u,{registry:s,configuration:e,headers:r}),u}}async function z_e(t,e,u){var g=u,{attemptedAs:r,configuration:i,headers:n,ident:s,authType:o=3,registry:a,otp:l}=g,c=Tr(g,[\"attemptedAs\",\"configuration\",\"headers\",\"ident\",\"authType\",\"registry\",\"otp\"]);if(s&&typeof a==\"undefined\"&&(a=wA(s.scope,{configuration:i})),typeof a!=\"string\")throw new Error(\"Assertion failed: The registry should be a string\");let f=await db(a,{authType:o,configuration:i,ident:s});f&&(n=te(N({},n),{authorization:f})),l&&(n=N(N({},n),Ah(l)));try{return await ir.post(a+t,e,N({configuration:i,headers:n},c))}catch(h){if(!pb(h)||l)throw await Rl(h,{attemptedAs:r,registry:a,configuration:i,headers:n}),h;l=await pT();let p=N(N({},n),Ah(l));try{return await ir.post(`${a}${t}`,e,N({configuration:i,headers:p},c))}catch(m){throw await Rl(m,{attemptedAs:r,registry:a,configuration:i,headers:n}),m}}}async function __e(t,e,u){var g=u,{attemptedAs:r,configuration:i,headers:n,ident:s,authType:o=3,registry:a,otp:l}=g,c=Tr(g,[\"attemptedAs\",\"configuration\",\"headers\",\"ident\",\"authType\",\"registry\",\"otp\"]);if(s&&typeof a==\"undefined\"&&(a=wA(s.scope,{configuration:i})),typeof a!=\"string\")throw new Error(\"Assertion failed: The registry should be a string\");let f=await db(a,{authType:o,configuration:i,ident:s});f&&(n=te(N({},n),{authorization:f})),l&&(n=N(N({},n),Ah(l)));try{return await ir.put(a+t,e,N({configuration:i,headers:n},c))}catch(h){if(!pb(h))throw await Rl(h,{attemptedAs:r,registry:a,configuration:i,headers:n}),h;l=await pT();let p=N(N({},n),Ah(l));try{return await ir.put(`${a}${t}`,e,N({configuration:i,headers:p},c))}catch(m){throw await Rl(m,{attemptedAs:r,registry:a,configuration:i,headers:n}),m}}}async function V_e(t,c){var u=c,{attemptedAs:e,configuration:r,headers:i,ident:n,authType:s=3,registry:o,otp:a}=u,l=Tr(u,[\"attemptedAs\",\"configuration\",\"headers\",\"ident\",\"authType\",\"registry\",\"otp\"]);if(n&&typeof o==\"undefined\"&&(o=wA(n.scope,{configuration:r})),typeof o!=\"string\")throw new Error(\"Assertion failed: The registry should be a string\");let g=await db(o,{authType:s,configuration:r,ident:n});g&&(i=te(N({},i),{authorization:g})),a&&(i=N(N({},i),Ah(a)));try{return await ir.del(o+t,N({configuration:r,headers:i},l))}catch(f){if(!pb(f)||a)throw await Rl(f,{attemptedAs:e,registry:o,configuration:r,headers:i}),f;a=await pT();let h=N(N({},i),Ah(a));try{return await ir.del(`${o}${t}`,N({configuration:r,headers:h},l))}catch(p){throw await Rl(p,{attemptedAs:e,registry:o,configuration:r,headers:i}),p}}}async function db(t,{authType:e=2,configuration:r,ident:i}){let n=hT(t,{configuration:r,ident:i}),s=X_e(n,e);if(!s)return null;let o=await r.reduceHook(a=>a.getNpmAuthenticationHeader,void 0,t,{configuration:r,ident:i});if(o)return o;if(n.get(\"npmAuthToken\"))return`Bearer ${n.get(\"npmAuthToken\")}`;if(n.get(\"npmAuthIdent\")){let a=n.get(\"npmAuthIdent\");return a.includes(\":\")?`Basic ${Buffer.from(a).toString(\"base64\")}`:`Basic ${a}`}if(s&&e!==1)throw new ct($.AUTHENTICATION_NOT_FOUND,\"No authentication configured for request\");return null}function X_e(t,e){switch(e){case 2:return t.get(\"npmAlwaysAuth\");case 1:case 3:return!0;case 0:return!1;default:throw new Error(\"Unreachable\")}}async function J_e(t,e,{configuration:r}){var i;if(typeof e==\"undefined\"||typeof e.authorization==\"undefined\")return\"an anonymous user\";try{return(i=(await ir.get(new gce.URL(`${t}/-/whoami`).href,{configuration:r,headers:e,jsonResponse:!0})).username)!=null?i:\"an unknown user\"}catch{return\"an unknown user\"}}async function pT(){if(process.env.TEST_ENV)return process.env.TEST_NPM_2FA_TOKEN||\"\";let{otp:t}=await(0,uce.prompt)({type:\"password\",name:\"otp\",message:\"One-time password:\",required:!0,onCancel:()=>process.exit(130)});return t}function pb(t){var e,r;if(((e=t.originalError)==null?void 0:e.name)!==\"HTTPError\")return!1;try{return((r=t.originalError)==null?void 0:r.response.headers[\"www-authenticate\"].split(/,\\s*/).map(n=>n.toLowerCase())).includes(\"otp\")}catch(i){return!1}}function Ah(t){return{[\"npm-otp\"]:t}}var dT=class{supports(e,r){if(!e.reference.startsWith(Cr))return!1;let{selector:i,params:n}=P.parseRange(e.reference);return!(!fce.default.valid(i)||n===null||typeof n.__archiveUrl!=\"string\")}getLocalPath(e,r){return null}async fetch(e,r){let i=r.checksums.get(e.locatorHash)||null,[n,s,o]=await r.cache.fetchPackageFromCache(e,i,N({onHit:()=>r.report.reportCacheHit(e),onMiss:()=>r.report.reportCacheMiss(e,`${P.prettyLocator(r.project.configuration,e)} can't be found in the cache and will be fetched from the remote server`),loader:()=>this.fetchFromNetwork(e,r),skipIntegrityCheck:r.skipIntegrityCheck},r.cacheOptions));return{packageFs:n,releaseFs:s,prefixPath:P.getIdentVendorPath(e),checksum:o}}async fetchFromNetwork(e,r){let{params:i}=P.parseRange(e.reference);if(i===null||typeof i.__archiveUrl!=\"string\")throw new Error(\"Assertion failed: The archiveUrl querystring parameter should have been available\");let n=await Bo(i.__archiveUrl,{configuration:r.project.configuration,ident:e});return await wi.convertToZip(n,{compressionLevel:r.project.configuration.get(\"compressionLevel\"),prefixPath:P.getIdentVendorPath(e),stripComponents:1})}};var CT=class{supportsDescriptor(e,r){return!(!e.range.startsWith(Cr)||!P.tryParseDescriptor(e.range.slice(Cr.length),!0))}supportsLocator(e,r){return!1}shouldPersistResolution(e,r){throw new Error(\"Unreachable\")}bindDescriptor(e,r,i){return e}getResolutionDependencies(e,r){let i=P.parseDescriptor(e.range.slice(Cr.length),!0);return r.resolver.getResolutionDependencies(i,r)}async getCandidates(e,r,i){let n=P.parseDescriptor(e.range.slice(Cr.length),!0);return await i.resolver.getCandidates(n,r,i)}async getSatisfying(e,r,i){let n=P.parseDescriptor(e.range.slice(Cr.length),!0);return i.resolver.getSatisfying(n,r,i)}resolve(e,r){throw new Error(\"Unreachable\")}};var hce=ge(ti()),pce=ge(require(\"url\"));var bo=class{supports(e,r){if(!e.reference.startsWith(Cr))return!1;let i=new pce.URL(e.reference);return!(!hce.default.valid(i.pathname)||i.searchParams.has(\"__archiveUrl\"))}getLocalPath(e,r){return null}async fetch(e,r){let i=r.checksums.get(e.locatorHash)||null,[n,s,o]=await r.cache.fetchPackageFromCache(e,i,N({onHit:()=>r.report.reportCacheHit(e),onMiss:()=>r.report.reportCacheMiss(e,`${P.prettyLocator(r.project.configuration,e)} can't be found in the cache and will be fetched from the remote registry`),loader:()=>this.fetchFromNetwork(e,r),skipIntegrityCheck:r.skipIntegrityCheck},r.cacheOptions));return{packageFs:n,releaseFs:s,prefixPath:P.getIdentVendorPath(e),checksum:o}}async fetchFromNetwork(e,r){let i;try{i=await Bo(bo.getLocatorUrl(e),{configuration:r.project.configuration,ident:e})}catch(n){i=await Bo(bo.getLocatorUrl(e).replace(/%2f/g,\"/\"),{configuration:r.project.configuration,ident:e})}return await wi.convertToZip(i,{compressionLevel:r.project.configuration.get(\"compressionLevel\"),prefixPath:P.getIdentVendorPath(e),stripComponents:1})}static isConventionalTarballUrl(e,r,{configuration:i}){let n=wA(e.scope,{configuration:i}),s=bo.getLocatorUrl(e);return r=r.replace(/^https?:(\\/\\/(?:[^/]+\\.)?npmjs.org(?:$|\\/))/,\"https:$1\"),n=n.replace(/^https:\\/\\/registry\\.npmjs\\.org($|\\/)/,\"https://registry.yarnpkg.com$1\"),r=r.replace(/^https:\\/\\/registry\\.npmjs\\.org($|\\/)/,\"https://registry.yarnpkg.com$1\"),r===n+s||r===n+s.replace(/%2f/g,\"/\")}static getLocatorUrl(e){let r=Wt.clean(e.reference.slice(Cr.length));if(r===null)throw new ct($.RESOLVER_NOT_FOUND,\"The npm semver resolver got selected, but the version isn't semver\");return`${Fl(e)}/-/${e.name}-${r}.tgz`}};var dce=ge(ti());var Cb=P.makeIdent(null,\"node-gyp\"),Z_e=/\\b(node-gyp|prebuild-install)\\b/,mT=class{supportsDescriptor(e,r){return e.range.startsWith(Cr)?!!Wt.validRange(e.range.slice(Cr.length)):!1}supportsLocator(e,r){if(!e.reference.startsWith(Cr))return!1;let{selector:i}=P.parseRange(e.reference);return!!dce.default.valid(i)}shouldPersistResolution(e,r){return!0}bindDescriptor(e,r,i){return e}getResolutionDependencies(e,r){return[]}async getCandidates(e,r,i){let n=Wt.validRange(e.range.slice(Cr.length));if(n===null)throw new Error(`Expected a valid range, got ${e.range.slice(Cr.length)}`);let s=await Bo(Fl(e),{configuration:i.project.configuration,ident:e,jsonResponse:!0}),o=Se.mapAndFilter(Object.keys(s.versions),c=>{try{let u=new Wt.SemVer(c);if(n.test(u))return u}catch{}return Se.mapAndFilter.skip}),a=o.filter(c=>!s.versions[c.raw].deprecated),l=a.length>0?a:o;return l.sort((c,u)=>-c.compare(u)),l.map(c=>{let u=P.makeLocator(e,`${Cr}${c.raw}`),g=s.versions[c.raw].dist.tarball;return bo.isConventionalTarballUrl(u,g,{configuration:i.project.configuration})?u:P.bindLocator(u,{__archiveUrl:g})})}async getSatisfying(e,r,i){let n=Wt.validRange(e.range.slice(Cr.length));if(n===null)throw new Error(`Expected a valid range, got ${e.range.slice(Cr.length)}`);return Se.mapAndFilter(r,s=>{try{let{selector:o}=P.parseRange(s,{requireProtocol:Cr}),a=new Wt.SemVer(o);if(n.test(a))return{reference:s,version:a}}catch{}return Se.mapAndFilter.skip}).sort((s,o)=>-s.version.compare(o.version)).map(({reference:s})=>P.makeLocator(e,s))}async resolve(e,r){let{selector:i}=P.parseRange(e.reference),n=Wt.clean(i);if(n===null)throw new ct($.RESOLVER_NOT_FOUND,\"The npm semver resolver got selected, but the version isn't semver\");let s=await Bo(Fl(e),{configuration:r.project.configuration,ident:e,jsonResponse:!0});if(!Object.prototype.hasOwnProperty.call(s,\"versions\"))throw new ct($.REMOTE_INVALID,'Registry returned invalid data for - missing \"versions\" field');if(!Object.prototype.hasOwnProperty.call(s.versions,n))throw new ct($.REMOTE_NOT_FOUND,`Registry failed to return reference \"${n}\"`);let o=new At;if(o.load(s.versions[n]),!o.dependencies.has(Cb.identHash)&&!o.peerDependencies.has(Cb.identHash)){for(let a of o.scripts.values())if(a.match(Z_e)){o.dependencies.set(Cb.identHash,P.makeDescriptor(Cb,\"latest\")),r.report.reportWarningOnce($.NODE_GYP_INJECTED,`${P.prettyLocator(r.project.configuration,e)}: Implicit dependencies on node-gyp are discouraged`);break}}if(typeof o.raw.deprecated==\"string\"&&o.raw.deprecated!==\"\"){let a=P.prettyLocator(r.project.configuration,e),l=o.raw.deprecated.match(/\\S/)?`${a} is deprecated: ${o.raw.deprecated}`:`${a} is deprecated`;r.report.reportWarningOnce($.DEPRECATED_PACKAGE,l)}return te(N({},e),{version:n,languageName:\"node\",linkType:Qt.HARD,conditions:o.getConditions(),dependencies:o.dependencies,peerDependencies:o.peerDependencies,dependenciesMeta:o.dependenciesMeta,peerDependenciesMeta:o.peerDependenciesMeta,bin:o.bin})}};var ET=class{supportsDescriptor(e,r){return!(!e.range.startsWith(Cr)||!Gg.test(e.range.slice(Cr.length)))}supportsLocator(e,r){return!1}shouldPersistResolution(e,r){throw new Error(\"Unreachable\")}bindDescriptor(e,r,i){return e}getResolutionDependencies(e,r){return[]}async getCandidates(e,r,i){let n=e.range.slice(Cr.length),s=await Bo(Fl(e),{configuration:i.project.configuration,ident:e,jsonResponse:!0});if(!Object.prototype.hasOwnProperty.call(s,\"dist-tags\"))throw new ct($.REMOTE_INVALID,'Registry returned invalid data - missing \"dist-tags\" field');let o=s[\"dist-tags\"];if(!Object.prototype.hasOwnProperty.call(o,n))throw new ct($.REMOTE_NOT_FOUND,`Registry failed to return tag \"${n}\"`);let a=o[n],l=P.makeLocator(e,`${Cr}${a}`),c=s.versions[a].dist.tarball;return bo.isConventionalTarballUrl(l,c,{configuration:i.project.configuration})?[l]:[P.bindLocator(l,{__archiveUrl:c})]}async getSatisfying(e,r,i){return null}async resolve(e,r){throw new Error(\"Unreachable\")}};var yh={};ft(yh,{getGitHead:()=>VVe,makePublishBody:()=>_Ve});var aO={};ft(aO,{default:()=>DVe,packUtils:()=>vA});var vA={};ft(vA,{genPackList:()=>Ub,genPackStream:()=>oO,genPackageManifest:()=>Yue,hasPackScripts:()=>nO,prepareForPack:()=>sO});var iO=ge(ts()),jue=ge(Hue()),Gue=ge(require(\"zlib\")),IVe=[\"/package.json\",\"/readme\",\"/readme.*\",\"/license\",\"/license.*\",\"/licence\",\"/licence.*\",\"/changelog\",\"/changelog.*\"],yVe=[\"/package.tgz\",\".github\",\".git\",\".hg\",\"node_modules\",\".npmignore\",\".gitignore\",\".#*\",\".DS_Store\"];async function nO(t){return!!(Zt.hasWorkspaceScript(t,\"prepack\")||Zt.hasWorkspaceScript(t,\"postpack\"))}async function sO(t,{report:e},r){await Zt.maybeExecuteWorkspaceLifecycleScript(t,\"prepack\",{report:e});try{let i=k.join(t.cwd,At.fileName);await K.existsPromise(i)&&await t.manifest.loadFile(i,{baseFs:K}),await r()}finally{await Zt.maybeExecuteWorkspaceLifecycleScript(t,\"postpack\",{report:e})}}async function oO(t,e){var s,o;typeof e==\"undefined\"&&(e=await Ub(t));let r=new Set;for(let a of(o=(s=t.manifest.publishConfig)==null?void 0:s.executableFiles)!=null?o:new Set)r.add(k.normalize(a));for(let a of t.manifest.bin.values())r.add(k.normalize(a));let i=jue.default.pack();process.nextTick(async()=>{for(let a of e){let l=k.normalize(a),c=k.resolve(t.cwd,l),u=k.join(\"package\",l),g=await K.lstatPromise(c),f={name:u,mtime:new Date(Dr.SAFE_TIME*1e3)},h=r.has(l)?493:420,p,m,y=new Promise((S,x)=>{p=S,m=x}),Q=S=>{S?m(S):p()};if(g.isFile()){let S;l===\"package.json\"?S=Buffer.from(JSON.stringify(await Yue(t),null,2)):S=await K.readFilePromise(c),i.entry(te(N({},f),{mode:h,type:\"file\"}),S,Q)}else g.isSymbolicLink()?i.entry(te(N({},f),{mode:h,type:\"symlink\",linkname:await K.readlinkPromise(c)}),Q):Q(new Error(`Unsupported file type ${g.mode} for ${j.fromPortablePath(l)}`));await y}i.finalize()});let n=(0,Gue.createGzip)();return i.pipe(n),n}async function Yue(t){let e=JSON.parse(JSON.stringify(t.manifest.raw));return await t.project.configuration.triggerHook(r=>r.beforeWorkspacePacking,t,e),e}async function Ub(t){var g,f,h,p,m,y,Q,S;let e=t.project,r=e.configuration,i={accept:[],reject:[]};for(let x of yVe)i.reject.push(x);for(let x of IVe)i.accept.push(x);i.reject.push(r.get(\"rcFilename\"));let n=x=>{if(x===null||!x.startsWith(`${t.cwd}/`))return;let M=k.relative(t.cwd,x),Y=k.resolve(Me.root,M);i.reject.push(Y)};n(k.resolve(e.cwd,r.get(\"lockfileFilename\"))),n(r.get(\"cacheFolder\")),n(r.get(\"globalFolder\")),n(r.get(\"installStatePath\")),n(r.get(\"virtualFolder\")),n(r.get(\"yarnPath\")),await r.triggerHook(x=>x.populateYarnPaths,e,x=>{n(x)});for(let x of e.workspaces){let M=k.relative(t.cwd,x.cwd);M!==\"\"&&!M.match(/^(\\.\\.)?\\//)&&i.reject.push(`/${M}`)}let s={accept:[],reject:[]},o=(f=(g=t.manifest.publishConfig)==null?void 0:g.main)!=null?f:t.manifest.main,a=(p=(h=t.manifest.publishConfig)==null?void 0:h.module)!=null?p:t.manifest.module,l=(y=(m=t.manifest.publishConfig)==null?void 0:m.browser)!=null?y:t.manifest.browser,c=(S=(Q=t.manifest.publishConfig)==null?void 0:Q.bin)!=null?S:t.manifest.bin;o!=null&&s.accept.push(k.resolve(Me.root,o)),a!=null&&s.accept.push(k.resolve(Me.root,a)),typeof l==\"string\"&&s.accept.push(k.resolve(Me.root,l));for(let x of c.values())s.accept.push(k.resolve(Me.root,x));if(l instanceof Map)for(let[x,M]of l.entries())s.accept.push(k.resolve(Me.root,x)),typeof M==\"string\"&&s.accept.push(k.resolve(Me.root,M));let u=t.manifest.files!==null;if(u){s.reject.push(\"/*\");for(let x of t.manifest.files)que(s.accept,x,{cwd:Me.root})}return await wVe(t.cwd,{hasExplicitFileList:u,globalList:i,ignoreList:s})}async function wVe(t,{hasExplicitFileList:e,globalList:r,ignoreList:i}){let n=[],s=new Da(t),o=[[Me.root,[i]]];for(;o.length>0;){let[a,l]=o.pop(),c=await s.lstatPromise(a);if(!Wue(a,{globalList:r,ignoreLists:c.isDirectory()?null:l}))if(c.isDirectory()){let u=await s.readdirPromise(a),g=!1,f=!1;if(!e||a!==Me.root)for(let m of u)g=g||m===\".gitignore\",f=f||m===\".npmignore\";let h=f?await Jue(s,a,\".npmignore\"):g?await Jue(s,a,\".gitignore\"):null,p=h!==null?[h].concat(l):l;Wue(a,{globalList:r,ignoreLists:l})&&(p=[...l,{accept:[],reject:[\"**/*\"]}]);for(let m of u)o.push([k.resolve(a,m),p])}else(c.isFile()||c.isSymbolicLink())&&n.push(k.relative(Me.root,a))}return n.sort()}async function Jue(t,e,r){let i={accept:[],reject:[]},n=await t.readFilePromise(k.join(e,r),\"utf8\");for(let s of n.split(/\\n/g))que(i.reject,s,{cwd:e});return i}function BVe(t,{cwd:e}){let r=t[0]===\"!\";return r&&(t=t.slice(1)),t.match(/\\.{0,1}\\//)&&(t=k.resolve(e,t)),r&&(t=`!${t}`),t}function que(t,e,{cwd:r}){let i=e.trim();i===\"\"||i[0]===\"#\"||t.push(BVe(i,{cwd:r}))}function Wue(t,{globalList:e,ignoreLists:r}){if(Kb(t,e.accept))return!1;if(Kb(t,e.reject))return!0;if(r!==null)for(let i of r){if(Kb(t,i.accept))return!1;if(Kb(t,i.reject))return!0}return!1}function Kb(t,e){let r=e,i=[];for(let n=0;n<e.length;++n)e[n][0]!==\"!\"?r!==e&&r.push(e[n]):(r===e&&(r=e.slice(0,n)),i.push(e[n].slice(1)));return zue(t,i)?!1:!!zue(t,r)}function zue(t,e){let r=e,i=[];for(let n=0;n<e.length;++n)e[n].includes(\"/\")?r!==e&&r.push(e[n]):(r===e&&(r=e.slice(0,n)),i.push(e[n]));return!!(iO.default.isMatch(t,r,{dot:!0,nocase:!0})||iO.default.isMatch(t,i,{dot:!0,basename:!0,nocase:!0}))}var AE=class extends Le{constructor(){super(...arguments);this.installIfNeeded=z.Boolean(\"--install-if-needed\",!1,{description:\"Run a preliminary `yarn install` if the package contains build scripts\"});this.dryRun=z.Boolean(\"-n,--dry-run\",!1,{description:\"Print the file paths without actually generating the package archive\"});this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.out=z.String(\"-o,--out\",{description:\"Create the archive at the specified path\"});this.filename=z.String(\"--filename\",{hidden:!0})}async execute(){var a;let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd);if(!i)throw new ht(r.cwd,this.context.cwd);await nO(i)&&(this.installIfNeeded?await r.install({cache:await Nt.find(e),report:new pi}):await r.restoreInstallState());let n=(a=this.out)!=null?a:this.filename,s=typeof n!=\"undefined\"?k.resolve(this.context.cwd,bVe(n,{workspace:i})):k.resolve(i.cwd,\"package.tgz\");return(await Je.start({configuration:e,stdout:this.context.stdout,json:this.json},async l=>{await sO(i,{report:l},async()=>{l.reportJson({base:j.fromPortablePath(i.cwd)});let c=await Ub(i);for(let u of c)l.reportInfo(null,j.fromPortablePath(u)),l.reportJson({location:j.fromPortablePath(u)});if(!this.dryRun){let u=await oO(i,c),g=K.createWriteStream(s);u.pipe(g),await new Promise(f=>{g.on(\"finish\",f)})}}),this.dryRun||(l.reportInfo($.UNNAMED,`Package archive generated in ${ae.pretty(e,s,ae.Type.PATH)}`),l.reportJson({output:j.fromPortablePath(s)}))})).exitCode()}};AE.paths=[[\"pack\"]],AE.usage=Re.Usage({description:\"generate a tarball from the active workspace\",details:\"\\n      This command will turn the active workspace into a compressed archive suitable for publishing. The archive will by default be stored at the root of the workspace (`package.tgz`).\\n\\n      If the `-o,---out` is set the archive will be created at the specified path. The `%s` and `%v` variables can be used within the path and will be respectively replaced by the package name and version.\\n    \",examples:[[\"Create an archive from the active workspace\",\"yarn pack\"],[\"List the files that would be made part of the workspace's archive\",\"yarn pack --dry-run\"],[\"Name and output the archive in a dedicated folder\",\"yarn pack --out /artifacts/%s-%v.tgz\"]]});var _ue=AE;function bVe(t,{workspace:e}){let r=t.replace(\"%s\",QVe(e)).replace(\"%v\",vVe(e));return j.toPortablePath(r)}function QVe(t){return t.manifest.name!==null?P.slugifyIdent(t.manifest.name):\"package\"}function vVe(t){return t.manifest.version!==null?t.manifest.version:\"unknown\"}var SVe=[\"dependencies\",\"devDependencies\",\"peerDependencies\"],kVe=\"workspace:\",xVe=(t,e)=>{var i,n;e.publishConfig&&(e.publishConfig.main&&(e.main=e.publishConfig.main),e.publishConfig.browser&&(e.browser=e.publishConfig.browser),e.publishConfig.module&&(e.module=e.publishConfig.module),e.publishConfig.browser&&(e.browser=e.publishConfig.browser),e.publishConfig.exports&&(e.exports=e.publishConfig.exports),e.publishConfig.bin&&(e.bin=e.publishConfig.bin));let r=t.project;for(let s of SVe)for(let o of t.manifest.getForScope(s).values()){let a=r.tryWorkspaceByDescriptor(o),l=P.parseRange(o.range);if(l.protocol===kVe)if(a===null){if(r.tryWorkspaceByIdent(o)===null)throw new ct($.WORKSPACE_NOT_FOUND,`${P.prettyDescriptor(r.configuration,o)}: No local workspace found for this range`)}else{let c;P.areDescriptorsEqual(o,a.anchoredDescriptor)||l.selector===\"*\"?c=(i=a.manifest.version)!=null?i:\"0.0.0\":l.selector===\"~\"||l.selector===\"^\"?c=`${l.selector}${(n=a.manifest.version)!=null?n:\"0.0.0\"}`:c=l.selector;let u=s===\"dependencies\"?P.makeDescriptor(o,\"unknown\"):null,g=u!==null&&t.manifest.ensureDependencyMeta(u).optional?\"optionalDependencies\":s;e[g][P.stringifyIdent(o)]=c}}},PVe={hooks:{beforeWorkspacePacking:xVe},commands:[_ue]},DVe=PVe;var nge=ge(require(\"crypto\")),sge=ge(ige()),oge=ge(require(\"url\"));async function _Ve(t,e,{access:r,tag:i,registry:n,gitHead:s}){let o=t.project.configuration,a=t.manifest.name,l=t.manifest.version,c=P.stringifyIdent(a),u=(0,nge.createHash)(\"sha1\").update(e).digest(\"hex\"),g=sge.default.fromData(e).toString();typeof r==\"undefined\"&&(t.manifest.publishConfig&&typeof t.manifest.publishConfig.access==\"string\"?r=t.manifest.publishConfig.access:o.get(\"npmPublishAccess\")!==null?r=o.get(\"npmPublishAccess\"):a.scope?r=\"restricted\":r=\"public\");let f=await vA.genPackageManifest(t),h=`${c}-${l}.tgz`,p=new oge.URL(`${ha(n)}/${c}/-/${h}`);return{_id:c,_attachments:{[h]:{content_type:\"application/octet-stream\",data:e.toString(\"base64\"),length:e.length}},name:c,access:r,[\"dist-tags\"]:{[i]:l},versions:{[l]:te(N({},f),{_id:`${c}@${l}`,name:c,version:l,gitHead:s,dist:{shasum:u,integrity:g,tarball:p.toString()}})}}}async function VVe(t){try{let{stdout:e}=await Fr.execvp(\"git\",[\"rev-parse\",\"--revs-only\",\"HEAD\"],{cwd:t});return e.trim()===\"\"?void 0:e.trim()}catch{return}}var gO={npmAlwaysAuth:{description:\"URL of the selected npm registry (note: npm enterprise isn't supported)\",type:Ie.BOOLEAN,default:!1},npmAuthIdent:{description:\"Authentication identity for the npm registry (_auth in npm and yarn v1)\",type:Ie.SECRET,default:null},npmAuthToken:{description:\"Authentication token for the npm registry (_authToken in npm and yarn v1)\",type:Ie.SECRET,default:null}},age={npmAuditRegistry:{description:\"Registry to query for audit reports\",type:Ie.STRING,default:null},npmPublishRegistry:{description:\"Registry to push packages to\",type:Ie.STRING,default:null},npmRegistryServer:{description:\"URL of the selected npm registry (note: npm enterprise isn't supported)\",type:Ie.STRING,default:\"https://registry.yarnpkg.com\"}},XVe={configuration:te(N(N({},gO),age),{npmScopes:{description:\"Settings per package scope\",type:Ie.MAP,valueDefinition:{description:\"\",type:Ie.SHAPE,properties:N(N({},gO),age)}},npmRegistries:{description:\"Settings per registry\",type:Ie.MAP,normalizeKeys:ha,valueDefinition:{description:\"\",type:Ie.SHAPE,properties:N({},gO)}}}),fetchers:[dT,bo],resolvers:[CT,mT,ET]},ZVe=XVe;var dO={};ft(dO,{default:()=>a9e});Es();var Ea;(function(i){i.All=\"all\",i.Production=\"production\",i.Development=\"development\"})(Ea||(Ea={}));var vo;(function(s){s.Info=\"info\",s.Low=\"low\",s.Moderate=\"moderate\",s.High=\"high\",s.Critical=\"critical\"})(vo||(vo={}));var Hb=[vo.Info,vo.Low,vo.Moderate,vo.High,vo.Critical];function Age(t,e){let r=[],i=new Set,n=o=>{i.has(o)||(i.add(o),r.push(o))};for(let o of e)n(o);let s=new Set;for(;r.length>0;){let o=r.shift(),a=t.storedResolutions.get(o);if(typeof a==\"undefined\")throw new Error(\"Assertion failed: Expected the resolution to have been registered\");let l=t.storedPackages.get(a);if(!!l){s.add(o);for(let c of l.dependencies.values())n(c.descriptorHash)}}return s}function $Ve(t,e){return new Set([...t].filter(r=>!e.has(r)))}function e9e(t,e,{all:r}){let i=r?t.workspaces:[e],n=i.map(f=>f.manifest),s=new Set(n.map(f=>[...f.dependencies].map(([h,p])=>h)).flat()),o=new Set(n.map(f=>[...f.devDependencies].map(([h,p])=>h)).flat()),a=i.map(f=>[...f.dependencies.values()]).flat(),l=a.filter(f=>s.has(f.identHash)).map(f=>f.descriptorHash),c=a.filter(f=>o.has(f.identHash)).map(f=>f.descriptorHash),u=Age(t,l),g=Age(t,c);return $Ve(g,u)}function lge(t){let e={};for(let r of t)e[P.stringifyIdent(r)]=P.parseRange(r.range).selector;return e}function cge(t){if(typeof t==\"undefined\")return new Set;let e=Hb.indexOf(t),r=Hb.slice(e);return new Set(r)}function t9e(t,e){let r=cge(e),i={};for(let n of r)i[n]=t[n];return i}function uge(t,e){var i;let r=t9e(t,e);for(let n of Object.keys(r))if((i=r[n])!=null?i:0>0)return!0;return!1}function gge(t,e){var s;let r={},i={children:r},n=Object.values(t.advisories);if(e!=null){let o=cge(e);n=n.filter(a=>o.has(a.severity))}for(let o of Se.sortMap(n,a=>a.module_name))r[o.module_name]={label:o.module_name,value:ae.tuple(ae.Type.RANGE,o.findings.map(a=>a.version).join(\", \")),children:{Issue:{label:\"Issue\",value:ae.tuple(ae.Type.NO_HINT,o.title)},URL:{label:\"URL\",value:ae.tuple(ae.Type.URL,o.url)},Severity:{label:\"Severity\",value:ae.tuple(ae.Type.NO_HINT,o.severity)},[\"Vulnerable Versions\"]:{label:\"Vulnerable Versions\",value:ae.tuple(ae.Type.RANGE,o.vulnerable_versions)},[\"Patched Versions\"]:{label:\"Patched Versions\",value:ae.tuple(ae.Type.RANGE,o.patched_versions)},Via:{label:\"Via\",value:ae.tuple(ae.Type.NO_HINT,Array.from(new Set(o.findings.map(a=>a.paths).flat().map(a=>a.split(\">\")[0]))).join(\", \"))},Recommendation:{label:\"Recommendation\",value:ae.tuple(ae.Type.NO_HINT,(s=o.recommendation)==null?void 0:s.replace(/\\n/g,\" \"))}}};return i}function fge(t,e,{all:r,environment:i}){let n=r?t.workspaces:[e],s=[Ea.All,Ea.Production].includes(i),o=[];if(s)for(let c of n)for(let u of c.manifest.dependencies.values())o.push(u);let a=[Ea.All,Ea.Development].includes(i),l=[];if(a)for(let c of n)for(let u of c.manifest.devDependencies.values())l.push(u);return lge([...o,...l].filter(c=>P.parseRange(c.range).protocol===null))}function hge(t,e,{all:r}){var s;let i=e9e(t,e,{all:r}),n={};for(let o of t.storedPackages.values())n[P.stringifyIdent(o)]={version:(s=o.version)!=null?s:\"0.0.0\",integrity:o.identHash,requires:lge(o.dependencies.values()),dev:i.has(P.convertLocatorToDescriptor(o).descriptorHash)};return n}var uE=class extends Le{constructor(){super(...arguments);this.all=z.Boolean(\"-A,--all\",!1,{description:\"Audit dependencies from all workspaces\"});this.recursive=z.Boolean(\"-R,--recursive\",!1,{description:\"Audit transitive dependencies as well\"});this.environment=z.String(\"--environment\",Ea.All,{description:\"Which environments to cover\",validator:nn(Ea)});this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.severity=z.String(\"--severity\",vo.Info,{description:\"Minimal severity requested for packages to be displayed\",validator:nn(vo)})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd);if(!i)throw new ht(r.cwd,this.context.cwd);await r.restoreInstallState();let n=fge(r,i,{all:this.all,environment:this.environment}),s=hge(r,i,{all:this.all});if(!this.recursive)for(let f of Object.keys(s))Object.prototype.hasOwnProperty.call(n,f)?s[f].requires={}:delete s[f];let o={requires:n,dependencies:s},a=br.getAuditRegistry(i.manifest,{configuration:e}),l,c=await uA.start({configuration:e,stdout:this.context.stdout},async()=>{l=await zt.post(\"/-/npm/v1/security/audits/quick\",o,{authType:zt.AuthType.BEST_EFFORT,configuration:e,jsonResponse:!0,registry:a})});if(c.hasErrors())return c.exitCode();let u=uge(l.metadata.vulnerabilities,this.severity);return!this.json&&u?(as.emitTree(gge(l,this.severity),{configuration:e,json:this.json,stdout:this.context.stdout,separators:2}),1):(await Je.start({configuration:e,includeFooter:!1,json:this.json,stdout:this.context.stdout},async f=>{f.reportJson(l),u||f.reportInfo($.EXCEPTION,\"No audit suggestions\")})).exitCode()}};uE.paths=[[\"npm\",\"audit\"]],uE.usage=Re.Usage({description:\"perform a vulnerability audit against the installed packages\",details:`\n      This command checks for known security reports on the packages you use. The reports are by default extracted from the npm registry, and may or may not be relevant to your actual program (not all vulnerabilities affect all code paths).\n\n      For consistency with our other commands the default is to only check the direct dependencies for the active workspace. To extend this search to all workspaces, use \\`-A,--all\\`. To extend this search to both direct and transitive dependencies, use \\`-R,--recursive\\`.\n\n      Applying the \\`--severity\\` flag will limit the audit table to vulnerabilities of the corresponding severity and above. Valid values are ${Hb.map(e=>`\\`${e}\\``).join(\", \")}.\n\n      If the \\`--json\\` flag is set, Yarn will print the output exactly as received from the registry. Regardless of this flag, the process will exit with a non-zero exit code if a report is found for the selected packages.\n\n      To understand the dependency tree requiring vulnerable packages, check the raw report with the \\`--json\\` flag or use \\`yarn why <package>\\` to get more information as to who depends on them.\n    `,examples:[[\"Checks for known security issues with the installed packages. The output is a list of known issues.\",\"yarn npm audit\"],[\"Audit dependencies in all workspaces\",\"yarn npm audit --all\"],[\"Limit auditing to `dependencies` (excludes `devDependencies`)\",\"yarn npm audit --environment production\"],[\"Show audit report as valid JSON\",\"yarn npm audit --json\"],[\"Audit all direct and transitive dependencies\",\"yarn npm audit --recursive\"],[\"Output moderate (or more severe) vulnerabilities\",\"yarn npm audit --severity moderate\"]]});var pge=uE;var fO=ge(ti()),hO=ge(require(\"util\")),gE=class extends Le{constructor(){super(...arguments);this.fields=z.String(\"-f,--fields\",{description:\"A comma-separated list of manifest fields that should be displayed\"});this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.packages=z.Rest()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r}=await ze.find(e,this.context.cwd),i=typeof this.fields!=\"undefined\"?new Set([\"name\",...this.fields.split(/\\s*,\\s*/)]):null,n=[],s=!1,o=await Je.start({configuration:e,includeFooter:!1,json:this.json,stdout:this.context.stdout},async a=>{for(let l of this.packages){let c;if(l===\".\"){let x=r.topLevelWorkspace;if(!x.manifest.name)throw new Pe(`Missing ${ae.pretty(e,\"name\",ae.Type.CODE)} field in ${j.fromPortablePath(k.join(x.cwd,Pt.manifest))}`);c=P.makeDescriptor(x.manifest.name,\"unknown\")}else c=P.parseDescriptor(l);let u=zt.getIdentUrl(c),g=pO(await zt.get(u,{configuration:e,ident:c,jsonResponse:!0,customErrorMessage:zt.customPackageError})),f=Object.keys(g.versions).sort(fO.default.compareLoose),p=g[\"dist-tags\"].latest||f[f.length-1],m=Wt.validRange(c.range);if(m){let x=fO.default.maxSatisfying(f,m);x!==null?p=x:(a.reportWarning($.UNNAMED,`Unmet range ${P.prettyRange(e,c.range)}; falling back to the latest version`),s=!0)}else Object.prototype.hasOwnProperty.call(g[\"dist-tags\"],c.range)?p=g[\"dist-tags\"][c.range]:c.range!==\"unknown\"&&(a.reportWarning($.UNNAMED,`Unknown tag ${P.prettyRange(e,c.range)}; falling back to the latest version`),s=!0);let y=g.versions[p],Q=te(N(N({},g),y),{version:p,versions:f}),S;if(i!==null){S={};for(let x of i){let M=Q[x];if(typeof M!=\"undefined\")S[x]=M;else{a.reportWarning($.EXCEPTION,`The ${ae.pretty(e,x,ae.Type.CODE)} field doesn't exist inside ${P.prettyIdent(e,c)}'s information`),s=!0;continue}}}else this.json||(delete Q.dist,delete Q.readme,delete Q.users),S=Q;a.reportJson(S),this.json||n.push(S)}});hO.inspect.styles.name=\"cyan\";for(let a of n)(a!==n[0]||s)&&this.context.stdout.write(`\n`),this.context.stdout.write(`${(0,hO.inspect)(a,{depth:Infinity,colors:!0,compact:!1})}\n`);return o.exitCode()}};gE.paths=[[\"npm\",\"info\"]],gE.usage=Re.Usage({category:\"Npm-related commands\",description:\"show information about a package\",details:\"\\n      This command fetches information about a package from the npm registry and prints it in a tree format.\\n\\n      The package does not have to be installed locally, but needs to have been published (in particular, local changes will be ignored even for workspaces).\\n\\n      Append `@<range>` to the package argument to provide information specific to the latest version that satisfies the range or to the corresponding tagged version. If the range is invalid or if there is no version satisfying the range, the command will print a warning and fall back to the latest version.\\n\\n      If the `-f,--fields` option is set, it's a comma-separated list of fields which will be used to only display part of the package information.\\n\\n      By default, this command won't return the `dist`, `readme`, and `users` fields, since they are often very long. To explicitly request those fields, explicitly list them with the `--fields` flag or request the output in JSON mode.\\n    \",examples:[[\"Show all available information about react (except the `dist`, `readme`, and `users` fields)\",\"yarn npm info react\"],[\"Show all available information about react as valid JSON (including the `dist`, `readme`, and `users` fields)\",\"yarn npm info react --json\"],[\"Show all available information about react@16.12.0\",\"yarn npm info react@16.12.0\"],[\"Show all available information about react@next\",\"yarn npm info react@next\"],[\"Show the description of react\",\"yarn npm info react --fields description\"],[\"Show all available versions of react\",\"yarn npm info react --fields versions\"],[\"Show the readme of react\",\"yarn npm info react --fields readme\"],[\"Show a few fields of react\",\"yarn npm info react --fields homepage,repository\"]]});var dge=gE;function pO(t){if(Array.isArray(t)){let e=[];for(let r of t)r=pO(r),r&&e.push(r);return e}else if(typeof t==\"object\"&&t!==null){let e={};for(let r of Object.keys(t)){if(r.startsWith(\"_\"))continue;let i=pO(t[r]);i&&(e[r]=i)}return e}else return t||null}var Cge=ge($C()),fE=class extends Le{constructor(){super(...arguments);this.scope=z.String(\"-s,--scope\",{description:\"Login to the registry configured for a given scope\"});this.publish=z.Boolean(\"--publish\",!1,{description:\"Login to the publish registry\"})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),r=await jb({configuration:e,cwd:this.context.cwd,publish:this.publish,scope:this.scope});return(await Je.start({configuration:e,stdout:this.context.stdout},async n=>{let s=await i9e({registry:r,report:n,stdin:this.context.stdin,stdout:this.context.stdout}),o=`/-/user/org.couchdb.user:${encodeURIComponent(s.name)}`,a=await zt.put(o,s,{attemptedAs:s.name,configuration:e,registry:r,jsonResponse:!0,authType:zt.AuthType.NO_AUTH});return await r9e(r,a.token,{configuration:e,scope:this.scope}),n.reportInfo($.UNNAMED,\"Successfully logged in\")})).exitCode()}};fE.paths=[[\"npm\",\"login\"]],fE.usage=Re.Usage({category:\"Npm-related commands\",description:\"store new login info to access the npm registry\",details:\"\\n      This command will ask you for your username, password, and 2FA One-Time-Password (when it applies). It will then modify your local configuration (in your home folder, never in the project itself) to reference the new tokens thus generated.\\n\\n      Adding the `-s,--scope` flag will cause the authentication to be done against whatever registry is configured for the associated scope (see also `npmScopes`).\\n\\n      Adding the `--publish` flag will cause the authentication to be done against the registry used when publishing the package (see also `publishConfig.registry` and `npmPublishRegistry`).\\n    \",examples:[[\"Login to the default registry\",\"yarn npm login\"],[\"Login to the registry linked to the @my-scope registry\",\"yarn npm login --scope my-scope\"],[\"Login to the publish registry for the current package\",\"yarn npm login --publish\"]]});var mge=fE;async function jb({scope:t,publish:e,configuration:r,cwd:i}){return t&&e?br.getScopeRegistry(t,{configuration:r,type:br.RegistryType.PUBLISH_REGISTRY}):t?br.getScopeRegistry(t,{configuration:r}):e?br.getPublishRegistry((await Jf(r,i)).manifest,{configuration:r}):br.getDefaultRegistry({configuration:r})}async function r9e(t,e,{configuration:r,scope:i}){let n=o=>a=>{let l=Se.isIndexableObject(a)?a:{},c=l[o],u=Se.isIndexableObject(c)?c:{};return te(N({},l),{[o]:te(N({},u),{npmAuthToken:e})})},s=i?{npmScopes:n(i)}:{npmRegistries:n(t)};return await ye.updateHomeConfiguration(s)}async function i9e({registry:t,report:e,stdin:r,stdout:i}){if(process.env.TEST_ENV)return{name:process.env.TEST_NPM_USER||\"\",password:process.env.TEST_NPM_PASSWORD||\"\"};e.reportInfo($.UNNAMED,`Logging in to ${t}`);let n=!1;t.match(/^https:\\/\\/npm\\.pkg\\.github\\.com(\\/|$)/)&&(e.reportInfo($.UNNAMED,\"You seem to be using the GitHub Package Registry. Tokens must be generated with the 'repo', 'write:packages', and 'read:packages' permissions.\"),n=!0),e.reportSeparator();let{username:s,password:o}=await(0,Cge.prompt)([{type:\"input\",name:\"username\",message:\"Username:\",required:!0,onCancel:()=>process.exit(130),stdin:r,stdout:i},{type:\"password\",name:\"password\",message:n?\"Token:\":\"Password:\",required:!0,onCancel:()=>process.exit(130),stdin:r,stdout:i}]);return e.reportSeparator(),{name:s,password:o}}var wh=new Set([\"npmAuthIdent\",\"npmAuthToken\"]),hE=class extends Le{constructor(){super(...arguments);this.scope=z.String(\"-s,--scope\",{description:\"Logout of the registry configured for a given scope\"});this.publish=z.Boolean(\"--publish\",!1,{description:\"Logout of the publish registry\"});this.all=z.Boolean(\"-A,--all\",!1,{description:\"Logout of all registries\"})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),r=async()=>{var l;let n=await jb({configuration:e,cwd:this.context.cwd,publish:this.publish,scope:this.scope}),s=await ye.find(this.context.cwd,this.context.plugins),o=P.makeIdent((l=this.scope)!=null?l:null,\"pkg\");return!br.getAuthConfiguration(n,{configuration:s,ident:o}).get(\"npmAuthToken\")};return(await Je.start({configuration:e,stdout:this.context.stdout},async n=>{if(this.all&&(await n9e(),n.reportInfo($.UNNAMED,\"Successfully logged out from everything\")),this.scope){await Ege(\"npmScopes\",this.scope),await r()?n.reportInfo($.UNNAMED,`Successfully logged out from ${this.scope}`):n.reportWarning($.UNNAMED,\"Scope authentication settings removed, but some other ones settings still apply to it\");return}let s=await jb({configuration:e,cwd:this.context.cwd,publish:this.publish});await Ege(\"npmRegistries\",s),await r()?n.reportInfo($.UNNAMED,`Successfully logged out from ${s}`):n.reportWarning($.UNNAMED,\"Registry authentication settings removed, but some other ones settings still apply to it\")})).exitCode()}};hE.paths=[[\"npm\",\"logout\"]],hE.usage=Re.Usage({category:\"Npm-related commands\",description:\"logout of the npm registry\",details:\"\\n      This command will log you out by modifying your local configuration (in your home folder, never in the project itself) to delete all credentials linked to a registry.\\n\\n      Adding the `-s,--scope` flag will cause the deletion to be done against whatever registry is configured for the associated scope (see also `npmScopes`).\\n\\n      Adding the `--publish` flag will cause the deletion to be done against the registry used when publishing the package (see also `publishConfig.registry` and `npmPublishRegistry`).\\n\\n      Adding the `-A,--all` flag will cause the deletion to be done against all registries and scopes.\\n    \",examples:[[\"Logout of the default registry\",\"yarn npm logout\"],[\"Logout of the @my-scope scope\",\"yarn npm logout --scope my-scope\"],[\"Logout of the publish registry for the current package\",\"yarn npm logout --publish\"],[\"Logout of all registries\",\"yarn npm logout --all\"]]});var Ige=hE;function s9e(t,e){let r=t[e];if(!Se.isIndexableObject(r))return!1;let i=new Set(Object.keys(r));if([...wh].every(s=>!i.has(s)))return!1;for(let s of wh)i.delete(s);if(i.size===0)return t[e]=void 0,!0;let n=N({},r);for(let s of wh)delete n[s];return t[e]=n,!0}async function n9e(){let t=e=>{let r=!1,i=Se.isIndexableObject(e)?N({},e):{};i.npmAuthToken&&(delete i.npmAuthToken,r=!0);for(let n of Object.keys(i))s9e(i,n)&&(r=!0);if(Object.keys(i).length!==0)return r?i:e};return await ye.updateHomeConfiguration({npmRegistries:t,npmScopes:t})}async function Ege(t,e){return await ye.updateHomeConfiguration({[t]:r=>{let i=Se.isIndexableObject(r)?r:{};if(!Object.prototype.hasOwnProperty.call(i,e))return r;let n=i[e],s=Se.isIndexableObject(n)?n:{},o=new Set(Object.keys(s));if([...wh].every(l=>!o.has(l)))return r;for(let l of wh)o.delete(l);if(o.size===0)return Object.keys(i).length===1?void 0:te(N({},i),{[e]:void 0});let a={};for(let l of wh)a[l]=void 0;return te(N({},i),{[e]:N(N({},s),a)})}})}var pE=class extends Le{constructor(){super(...arguments);this.access=z.String(\"--access\",{description:\"The access for the published package (public or restricted)\"});this.tag=z.String(\"--tag\",\"latest\",{description:\"The tag on the registry that the package should be attached to\"});this.tolerateRepublish=z.Boolean(\"--tolerate-republish\",!1,{description:\"Warn and exit when republishing an already existing version of a package\"});this.otp=z.String(\"--otp\",{description:\"The OTP token to use with the command\"})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd);if(!i)throw new ht(r.cwd,this.context.cwd);if(i.manifest.private)throw new Pe(\"Private workspaces cannot be published\");if(i.manifest.name===null||i.manifest.version===null)throw new Pe(\"Workspaces must have valid names and versions to be published on an external registry\");await r.restoreInstallState();let n=i.manifest.name,s=i.manifest.version,o=br.getPublishRegistry(i.manifest,{configuration:e});return(await Je.start({configuration:e,stdout:this.context.stdout},async l=>{var c,u;if(this.tolerateRepublish)try{let g=await zt.get(zt.getIdentUrl(n),{configuration:e,registry:o,ident:n,jsonResponse:!0});if(!Object.prototype.hasOwnProperty.call(g,\"versions\"))throw new ct($.REMOTE_INVALID,'Registry returned invalid data for - missing \"versions\" field');if(Object.prototype.hasOwnProperty.call(g.versions,s)){l.reportWarning($.UNNAMED,`Registry already knows about version ${s}; skipping.`);return}}catch(g){if(((u=(c=g.originalError)==null?void 0:c.response)==null?void 0:u.statusCode)!==404)throw g}await Zt.maybeExecuteWorkspaceLifecycleScript(i,\"prepublish\",{report:l}),await vA.prepareForPack(i,{report:l},async()=>{let g=await vA.genPackList(i);for(let y of g)l.reportInfo(null,y);let f=await vA.genPackStream(i,g),h=await Se.bufferStream(f),p=await yh.getGitHead(i.cwd),m=await yh.makePublishBody(i,h,{access:this.access,tag:this.tag,registry:o,gitHead:p});await zt.put(zt.getIdentUrl(n),m,{configuration:e,registry:o,ident:n,otp:this.otp,jsonResponse:!0})}),l.reportInfo($.UNNAMED,\"Package archive published\")})).exitCode()}};pE.paths=[[\"npm\",\"publish\"]],pE.usage=Re.Usage({category:\"Npm-related commands\",description:\"publish the active workspace to the npm registry\",details:'\\n      This command will pack the active workspace into a fresh archive and upload it to the npm registry.\\n\\n      The package will by default be attached to the `latest` tag on the registry, but this behavior can be overriden by using the `--tag` option.\\n\\n      Note that for legacy reasons scoped packages are by default published with an access set to `restricted` (aka \"private packages\"). This requires you to register for a paid npm plan. In case you simply wish to publish a public scoped package to the registry (for free), just add the `--access public` flag. This behavior can be enabled by default through the `npmPublishAccess` settings.\\n    ',examples:[[\"Publish the active workspace\",\"yarn npm publish\"]]});var yge=pE;var Bge=ge(ti());var dE=class extends Le{constructor(){super(...arguments);this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.package=z.String({required:!1})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n;if(typeof this.package!=\"undefined\")n=P.parseIdent(this.package);else{if(!i)throw new ht(r.cwd,this.context.cwd);if(!i.manifest.name)throw new Pe(`Missing 'name' field in ${j.fromPortablePath(k.join(i.cwd,Pt.manifest))}`);n=i.manifest.name}let s=await CE(n,e),a={children:Se.sortMap(Object.entries(s),([l])=>l).map(([l,c])=>({value:ae.tuple(ae.Type.RESOLUTION,{descriptor:P.makeDescriptor(n,l),locator:P.makeLocator(n,c)})}))};return as.emitTree(a,{configuration:e,json:this.json,stdout:this.context.stdout})}};dE.paths=[[\"npm\",\"tag\",\"list\"]],dE.usage=Re.Usage({category:\"Npm-related commands\",description:\"list all dist-tags of a package\",details:`\n      This command will list all tags of a package from the npm registry.\n\n      If the package is not specified, Yarn will default to the current workspace.\n    `,examples:[[\"List all tags of package `my-pkg`\",\"yarn npm tag list my-pkg\"]]});var wge=dE;async function CE(t,e){let r=`/-/package${zt.getIdentUrl(t)}/dist-tags`;return zt.get(r,{configuration:e,ident:t,jsonResponse:!0,customErrorMessage:zt.customPackageError})}var mE=class extends Le{constructor(){super(...arguments);this.package=z.String();this.tag=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd);if(!i)throw new ht(r.cwd,this.context.cwd);let n=P.parseDescriptor(this.package,!0),s=n.range;if(!Bge.default.valid(s))throw new Pe(`The range ${ae.pretty(e,n.range,ae.Type.RANGE)} must be a valid semver version`);let o=br.getPublishRegistry(i.manifest,{configuration:e}),a=ae.pretty(e,n,ae.Type.IDENT),l=ae.pretty(e,s,ae.Type.RANGE),c=ae.pretty(e,this.tag,ae.Type.CODE);return(await Je.start({configuration:e,stdout:this.context.stdout},async g=>{let f=await CE(n,e);Object.prototype.hasOwnProperty.call(f,this.tag)&&f[this.tag]===s&&g.reportWarning($.UNNAMED,`Tag ${c} is already set to version ${l}`);let h=`/-/package${zt.getIdentUrl(n)}/dist-tags/${encodeURIComponent(this.tag)}`;await zt.put(h,s,{configuration:e,registry:o,ident:n,jsonRequest:!0,jsonResponse:!0}),g.reportInfo($.UNNAMED,`Tag ${c} added to version ${l} of package ${a}`)})).exitCode()}};mE.paths=[[\"npm\",\"tag\",\"add\"]],mE.usage=Re.Usage({category:\"Npm-related commands\",description:\"add a tag for a specific version of a package\",details:`\n      This command will add a tag to the npm registry for a specific version of a package. If the tag already exists, it will be overwritten.\n    `,examples:[[\"Add a `beta` tag for version `2.3.4-beta.4` of package `my-pkg`\",\"yarn npm tag add my-pkg@2.3.4-beta.4 beta\"]]});var bge=mE;var EE=class extends Le{constructor(){super(...arguments);this.package=z.String();this.tag=z.String()}async execute(){if(this.tag===\"latest\")throw new Pe(\"The 'latest' tag cannot be removed.\");let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd);if(!i)throw new ht(r.cwd,this.context.cwd);let n=P.parseIdent(this.package),s=br.getPublishRegistry(i.manifest,{configuration:e}),o=ae.pretty(e,this.tag,ae.Type.CODE),a=ae.pretty(e,n,ae.Type.IDENT),l=await CE(n,e);if(!Object.prototype.hasOwnProperty.call(l,this.tag))throw new Pe(`${o} is not a tag of package ${a}`);return(await Je.start({configuration:e,stdout:this.context.stdout},async u=>{let g=`/-/package${zt.getIdentUrl(n)}/dist-tags/${encodeURIComponent(this.tag)}`;await zt.del(g,{configuration:e,registry:s,ident:n,jsonResponse:!0}),u.reportInfo($.UNNAMED,`Tag ${o} removed from package ${a}`)})).exitCode()}};EE.paths=[[\"npm\",\"tag\",\"remove\"]],EE.usage=Re.Usage({category:\"Npm-related commands\",description:\"remove a tag from a package\",details:`\n      This command will remove a tag from a package from the npm registry.\n    `,examples:[[\"Remove the `beta` tag from package `my-pkg`\",\"yarn npm tag remove my-pkg beta\"]]});var Qge=EE;var IE=class extends Le{constructor(){super(...arguments);this.scope=z.String(\"-s,--scope\",{description:\"Print username for the registry configured for a given scope\"});this.publish=z.Boolean(\"--publish\",!1,{description:\"Print username for the publish registry\"})}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),r;return this.scope&&this.publish?r=br.getScopeRegistry(this.scope,{configuration:e,type:br.RegistryType.PUBLISH_REGISTRY}):this.scope?r=br.getScopeRegistry(this.scope,{configuration:e}):this.publish?r=br.getPublishRegistry((await Jf(e,this.context.cwd)).manifest,{configuration:e}):r=br.getDefaultRegistry({configuration:e}),(await Je.start({configuration:e,stdout:this.context.stdout},async n=>{var o,a;let s;try{s=await zt.get(\"/-/whoami\",{configuration:e,registry:r,authType:zt.AuthType.ALWAYS_AUTH,jsonResponse:!0,ident:this.scope?P.makeIdent(this.scope,\"\"):void 0})}catch(l){if(((o=l.response)==null?void 0:o.statusCode)===401||((a=l.response)==null?void 0:a.statusCode)===403){n.reportError($.AUTHENTICATION_INVALID,\"Authentication failed - your credentials may have expired\");return}else throw l}n.reportInfo($.UNNAMED,s.username)})).exitCode()}};IE.paths=[[\"npm\",\"whoami\"]],IE.usage=Re.Usage({category:\"Npm-related commands\",description:\"display the name of the authenticated user\",details:\"\\n      Print the username associated with the current authentication settings to the standard output.\\n\\n      When using `-s,--scope`, the username printed will be the one that matches the authentication settings of the registry associated with the given scope (those settings can be overriden using the `npmRegistries` map, and the registry associated with the scope is configured via the `npmScopes` map).\\n\\n      When using `--publish`, the registry we'll select will by default be the one used when publishing packages (`publishConfig.registry` or `npmPublishRegistry` if available, otherwise we'll fallback to the regular `npmRegistryServer`).\\n    \",examples:[[\"Print username for the default registry\",\"yarn npm whoami\"],[\"Print username for the registry on a given scope\",\"yarn npm whoami --scope company\"]]});var vge=IE;var o9e={configuration:{npmPublishAccess:{description:\"Default access of the published packages\",type:Ie.STRING,default:null}},commands:[pge,dge,mge,Ige,yge,bge,wge,Qge,vge]},a9e=o9e;var bO={};ft(bO,{default:()=>B9e,patchUtils:()=>CO});var CO={};ft(CO,{applyPatchFile:()=>qb,diffFolders:()=>yO,extractPackageToDisk:()=>IO,extractPatchFlags:()=>Nge,isParentRequired:()=>EO,loadPatchFiles:()=>bE,makeDescriptor:()=>I9e,makeLocator:()=>mO,parseDescriptor:()=>wE,parseLocator:()=>BE,parsePatchFile:()=>Yb});var yE=class extends Error{constructor(e,r){super(`Cannot apply hunk #${e+1}`);this.hunk=r}};var A9e=/^@@ -(\\d+)(,(\\d+))? \\+(\\d+)(,(\\d+))? @@.*/;function Bh(t){return k.relative(Me.root,k.resolve(Me.root,j.toPortablePath(t)))}function l9e(t){let e=t.trim().match(A9e);if(!e)throw new Error(`Bad header line: '${t}'`);return{original:{start:Math.max(Number(e[1]),1),length:Number(e[3]||1)},patched:{start:Math.max(Number(e[4]),1),length:Number(e[6]||1)}}}var c9e=420,u9e=493,Xr;(function(i){i.Context=\"context\",i.Insertion=\"insertion\",i.Deletion=\"deletion\"})(Xr||(Xr={}));var Sge=()=>({semverExclusivity:null,diffLineFromPath:null,diffLineToPath:null,oldMode:null,newMode:null,deletedFileMode:null,newFileMode:null,renameFrom:null,renameTo:null,beforeHash:null,afterHash:null,fromPath:null,toPath:null,hunks:null}),g9e=t=>({header:l9e(t),parts:[]}),f9e={[\"@\"]:\"header\",[\"-\"]:Xr.Deletion,[\"+\"]:Xr.Insertion,[\" \"]:Xr.Context,[\"\\\\\"]:\"pragma\",undefined:Xr.Context};function p9e(t){let e=[],r=Sge(),i=\"parsing header\",n=null,s=null;function o(){n&&(s&&(n.parts.push(s),s=null),r.hunks.push(n),n=null)}function a(){o(),e.push(r),r=Sge()}for(let l=0;l<t.length;l++){let c=t[l];if(i===\"parsing header\")if(c.startsWith(\"@@\"))i=\"parsing hunks\",r.hunks=[],l-=1;else if(c.startsWith(\"diff --git \")){r&&r.diffLineFromPath&&a();let u=c.match(/^diff --git a\\/(.*?) b\\/(.*?)\\s*$/);if(!u)throw new Error(`Bad diff line: ${c}`);r.diffLineFromPath=u[1],r.diffLineToPath=u[2]}else if(c.startsWith(\"old mode \"))r.oldMode=c.slice(\"old mode \".length).trim();else if(c.startsWith(\"new mode \"))r.newMode=c.slice(\"new mode \".length).trim();else if(c.startsWith(\"deleted file mode \"))r.deletedFileMode=c.slice(\"deleted file mode \".length).trim();else if(c.startsWith(\"new file mode \"))r.newFileMode=c.slice(\"new file mode \".length).trim();else if(c.startsWith(\"rename from \"))r.renameFrom=c.slice(\"rename from \".length).trim();else if(c.startsWith(\"rename to \"))r.renameTo=c.slice(\"rename to \".length).trim();else if(c.startsWith(\"index \")){let u=c.match(/(\\w+)\\.\\.(\\w+)/);if(!u)continue;r.beforeHash=u[1],r.afterHash=u[2]}else c.startsWith(\"semver exclusivity \")?r.semverExclusivity=c.slice(\"semver exclusivity \".length).trim():c.startsWith(\"--- \")?r.fromPath=c.slice(\"--- a/\".length).trim():c.startsWith(\"+++ \")&&(r.toPath=c.slice(\"+++ b/\".length).trim());else{let u=f9e[c[0]]||null;switch(u){case\"header\":o(),n=g9e(c);break;case null:i=\"parsing header\",a(),l-=1;break;case\"pragma\":{if(!c.startsWith(\"\\\\ No newline at end of file\"))throw new Error(`Unrecognized pragma in patch file: ${c}`);if(!s)throw new Error(\"Bad parser state: No newline at EOF pragma encountered without context\");s.noNewlineAtEndOfFile=!0}break;case Xr.Context:case Xr.Deletion:case Xr.Insertion:{if(!n)throw new Error(\"Bad parser state: Hunk lines encountered before hunk header\");s&&s.type!==u&&(n.parts.push(s),s=null),s||(s={type:u,lines:[],noNewlineAtEndOfFile:!1}),s.lines.push(c.slice(1))}break;default:Se.assertNever(u);break}}}a();for(let{hunks:l}of e)if(l)for(let c of l)h9e(c);return e}function d9e(t){let e=[];for(let r of t){let{semverExclusivity:i,diffLineFromPath:n,diffLineToPath:s,oldMode:o,newMode:a,deletedFileMode:l,newFileMode:c,renameFrom:u,renameTo:g,beforeHash:f,afterHash:h,fromPath:p,toPath:m,hunks:y}=r,Q=u?\"rename\":l?\"file deletion\":c?\"file creation\":y&&y.length>0?\"patch\":\"mode change\",S=null;switch(Q){case\"rename\":{if(!u||!g)throw new Error(\"Bad parser state: rename from & to not given\");e.push({type:\"rename\",semverExclusivity:i,fromPath:Bh(u),toPath:Bh(g)}),S=g}break;case\"file deletion\":{let x=n||p;if(!x)throw new Error(\"Bad parse state: no path given for file deletion\");e.push({type:\"file deletion\",semverExclusivity:i,hunk:y&&y[0]||null,path:Bh(x),mode:Gb(l),hash:f})}break;case\"file creation\":{let x=s||m;if(!x)throw new Error(\"Bad parse state: no path given for file creation\");e.push({type:\"file creation\",semverExclusivity:i,hunk:y&&y[0]||null,path:Bh(x),mode:Gb(c),hash:h})}break;case\"patch\":case\"mode change\":S=m||s;break;default:Se.assertNever(Q);break}S&&o&&a&&o!==a&&e.push({type:\"mode change\",semverExclusivity:i,path:Bh(S),oldMode:Gb(o),newMode:Gb(a)}),S&&y&&y.length&&e.push({type:\"patch\",semverExclusivity:i,path:Bh(S),hunks:y,beforeHash:f,afterHash:h})}if(e.length===0)throw new Error(\"Unable to parse patch file: No changes found. Make sure the patch is a valid UTF8 encoded string\");return e}function Gb(t){let e=parseInt(t,8)&511;if(e!==c9e&&e!==u9e)throw new Error(`Unexpected file mode string: ${t}`);return e}function Yb(t){let e=t.split(/\\n/g);return e[e.length-1]===\"\"&&e.pop(),d9e(p9e(e))}function h9e(t){let e=0,r=0;for(let{type:i,lines:n}of t.parts)switch(i){case Xr.Context:r+=n.length,e+=n.length;break;case Xr.Deletion:e+=n.length;break;case Xr.Insertion:r+=n.length;break;default:Se.assertNever(i);break}if(e!==t.header.original.length||r!==t.header.patched.length){let i=n=>n<0?n:`+${n}`;throw new Error(`hunk header integrity check failed (expected @@ ${i(t.header.original.length)} ${i(t.header.patched.length)} @@, got @@ ${i(e)} ${i(r)} @@)`)}}async function bh(t,e,r){let i=await t.lstatPromise(e),n=await r();if(typeof n!=\"undefined\"&&(e=n),t.lutimesPromise)await t.lutimesPromise(e,i.atime,i.mtime);else if(!i.isSymbolicLink())await t.utimesPromise(e,i.atime,i.mtime);else throw new Error(\"Cannot preserve the time values of a symlink\")}async function qb(t,{baseFs:e=new ar,dryRun:r=!1,version:i=null}={}){for(let n of t)if(!(n.semverExclusivity!==null&&i!==null&&!Wt.satisfiesWithPrereleases(i,n.semverExclusivity)))switch(n.type){case\"file deletion\":if(r){if(!e.existsSync(n.path))throw new Error(`Trying to delete a file that doesn't exist: ${n.path}`)}else await bh(e,k.dirname(n.path),async()=>{await e.unlinkPromise(n.path)});break;case\"rename\":if(r){if(!e.existsSync(n.fromPath))throw new Error(`Trying to move a file that doesn't exist: ${n.fromPath}`)}else await bh(e,k.dirname(n.fromPath),async()=>{await bh(e,k.dirname(n.toPath),async()=>{await bh(e,n.fromPath,async()=>(await e.movePromise(n.fromPath,n.toPath),n.toPath))})});break;case\"file creation\":if(r){if(e.existsSync(n.path))throw new Error(`Trying to create a file that already exists: ${n.path}`)}else{let s=n.hunk?n.hunk.parts[0].lines.join(`\n`)+(n.hunk.parts[0].noNewlineAtEndOfFile?\"\":`\n`):\"\";await e.mkdirpPromise(k.dirname(n.path),{chmod:493,utimes:[Dr.SAFE_TIME,Dr.SAFE_TIME]}),await e.writeFilePromise(n.path,s,{mode:n.mode}),await e.utimesPromise(n.path,Dr.SAFE_TIME,Dr.SAFE_TIME)}break;case\"patch\":await bh(e,n.path,async()=>{await C9e(n,{baseFs:e,dryRun:r})});break;case\"mode change\":{let o=(await e.statPromise(n.path)).mode;if(kge(n.newMode)!==kge(o))continue;await bh(e,n.path,async()=>{await e.chmodPromise(n.path,n.newMode)})}break;default:Se.assertNever(n);break}}function kge(t){return(t&64)>0}function xge(t){return t.replace(/\\s+$/,\"\")}function m9e(t,e){return xge(t)===xge(e)}async function C9e({hunks:t,path:e},{baseFs:r,dryRun:i=!1}){let n=await r.statSync(e).mode,o=(await r.readFileSync(e,\"utf8\")).split(/\\n/),a=[],l=0,c=0;for(let g of t){let f=Math.max(c,g.header.patched.start+l),h=Math.max(0,f-c),p=Math.max(0,o.length-f-g.header.original.length),m=Math.max(h,p),y=0,Q=0,S=null;for(;y<=m;){if(y<=h&&(Q=f-y,S=Pge(g,o,Q),S!==null)){y=-y;break}if(y<=p&&(Q=f+y,S=Pge(g,o,Q),S!==null))break;y+=1}if(S===null)throw new yE(t.indexOf(g),g);a.push(S),l+=y,c=Q+g.header.original.length}if(i)return;let u=0;for(let g of a)for(let f of g)switch(f.type){case\"splice\":{let h=f.index+u;o.splice(h,f.numToDelete,...f.linesToInsert),u+=f.linesToInsert.length-f.numToDelete}break;case\"pop\":o.pop();break;case\"push\":o.push(f.line);break;default:Se.assertNever(f);break}await r.writeFilePromise(e,o.join(`\n`),{mode:n})}function Pge(t,e,r){let i=[];for(let n of t.parts)switch(n.type){case Xr.Context:case Xr.Deletion:{for(let s of n.lines){let o=e[r];if(o==null||!m9e(o,s))return null;r+=1}n.type===Xr.Deletion&&(i.push({type:\"splice\",index:r-n.lines.length,numToDelete:n.lines.length,linesToInsert:[]}),n.noNewlineAtEndOfFile&&i.push({type:\"push\",line:\"\"}))}break;case Xr.Insertion:i.push({type:\"splice\",index:r,numToDelete:0,linesToInsert:n.lines}),n.noNewlineAtEndOfFile&&i.push({type:\"pop\"});break;default:Se.assertNever(n.type);break}return i}var E9e=/^builtin<([^>]+)>$/;function Dge(t,e){let{source:r,selector:i,params:n}=P.parseRange(t);if(r===null)throw new Error(\"Patch locators must explicitly define their source\");let s=i?i.split(/&/).map(c=>j.toPortablePath(c)):[],o=n&&typeof n.locator==\"string\"?P.parseLocator(n.locator):null,a=n&&typeof n.version==\"string\"?n.version:null,l=e(r);return{parentLocator:o,sourceItem:l,patchPaths:s,sourceVersion:a}}function wE(t){let i=Dge(t.range,P.parseDescriptor),{sourceItem:e}=i,r=Tr(i,[\"sourceItem\"]);return te(N({},r),{sourceDescriptor:e})}function BE(t){let i=Dge(t.reference,P.parseLocator),{sourceItem:e}=i,r=Tr(i,[\"sourceItem\"]);return te(N({},r),{sourceLocator:e})}function Rge({parentLocator:t,sourceItem:e,patchPaths:r,sourceVersion:i,patchHash:n},s){let o=t!==null?{locator:P.stringifyLocator(t)}:{},a=typeof i!=\"undefined\"?{version:i}:{},l=typeof n!=\"undefined\"?{hash:n}:{};return P.makeRange({protocol:\"patch:\",source:s(e),selector:r.join(\"&\"),params:N(N(N({},a),l),o)})}function I9e(t,{parentLocator:e,sourceDescriptor:r,patchPaths:i}){return P.makeLocator(t,Rge({parentLocator:e,sourceItem:r,patchPaths:i},P.stringifyDescriptor))}function mO(t,{parentLocator:e,sourcePackage:r,patchPaths:i,patchHash:n}){return P.makeLocator(t,Rge({parentLocator:e,sourceItem:r,sourceVersion:r.version,patchPaths:i,patchHash:n},P.stringifyLocator))}function Fge({onAbsolute:t,onRelative:e,onBuiltin:r},i){i.startsWith(\"~\")&&(i=i.slice(1));let s=i.match(E9e);return s!==null?r(s[1]):k.isAbsolute(i)?t(i):e(i)}function Nge(t){let e=t.startsWith(\"~\");return e&&(t=t.slice(1)),{optional:e}}function EO(t){return Fge({onAbsolute:()=>!1,onRelative:()=>!0,onBuiltin:()=>!1},t)}async function bE(t,e,r){let i=t!==null?await r.fetcher.fetch(t,r):null,n=i&&i.localPath?{packageFs:new _t(Me.root),prefixPath:k.relative(Me.root,i.localPath)}:i;i&&i!==n&&i.releaseFs&&i.releaseFs();let s=await Se.releaseAfterUseAsync(async()=>await Promise.all(e.map(async o=>{let a=Nge(o),l=await Fge({onAbsolute:async()=>await K.readFilePromise(o,\"utf8\"),onRelative:async()=>{if(n===null)throw new Error(\"Assertion failed: The parent locator should have been fetched\");return await n.packageFs.readFilePromise(k.join(n.prefixPath,o),\"utf8\")},onBuiltin:async c=>await r.project.configuration.firstHook(u=>u.getBuiltinPatch,r.project,c)},o);return te(N({},a),{source:l})})));for(let o of s)typeof o.source==\"string\"&&(o.source=o.source.replace(/\\r\\n?/g,`\n`));return s}async function IO(t,{cache:e,project:r}){let i=r.storedPackages.get(t.locatorHash);if(typeof i==\"undefined\")throw new Error(\"Assertion failed: Expected the package to be registered\");let n=r.storedChecksums,s=new pi,o=r.configuration.makeFetcher(),a=await o.fetch(t,{cache:e,project:r,fetcher:o,checksums:n,report:s}),l=await K.mktempPromise(),c=k.join(l,\"source\"),u=k.join(l,\"user\"),g=k.join(l,\".yarn-patch.json\");return await Promise.all([K.copyPromise(c,a.prefixPath,{baseFs:a.packageFs}),K.copyPromise(u,a.prefixPath,{baseFs:a.packageFs}),K.writeJsonPromise(g,{locator:P.stringifyLocator(t),version:i.version})]),K.detachTemp(l),u}async function yO(t,e){let r=j.fromPortablePath(t).replace(/\\\\/g,\"/\"),i=j.fromPortablePath(e).replace(/\\\\/g,\"/\"),{stdout:n,stderr:s}=await Fr.execvp(\"git\",[\"-c\",\"core.safecrlf=false\",\"diff\",\"--src-prefix=a/\",\"--dst-prefix=b/\",\"--ignore-cr-at-eol\",\"--full-index\",\"--no-index\",\"--text\",r,i],{cwd:j.toPortablePath(process.cwd()),env:te(N({},process.env),{GIT_CONFIG_NOSYSTEM:\"1\",HOME:\"\",XDG_CONFIG_HOME:\"\",USERPROFILE:\"\"})});if(s.length>0)throw new Error(`Unable to diff directories. Make sure you have a recent version of 'git' available in PATH.\nThe following error was reported by 'git':\n${s}`);let o=r.startsWith(\"/\")?a=>a.slice(1):a=>a;return n.replace(new RegExp(`(a|b)(${Se.escapeRegExp(`/${o(r)}/`)})`,\"g\"),\"$1/\").replace(new RegExp(`(a|b)${Se.escapeRegExp(`/${o(i)}/`)}`,\"g\"),\"$1/\").replace(new RegExp(Se.escapeRegExp(`${r}/`),\"g\"),\"\").replace(new RegExp(Se.escapeRegExp(`${i}/`),\"g\"),\"\")}function Lge(t,{configuration:e,report:r}){for(let i of t.parts)for(let n of i.lines)switch(i.type){case Xr.Context:r.reportInfo(null,`  ${ae.pretty(e,n,\"grey\")}`);break;case Xr.Deletion:r.reportError($.FROZEN_LOCKFILE_EXCEPTION,`- ${ae.pretty(e,n,ae.Type.REMOVED)}`);break;case Xr.Insertion:r.reportError($.FROZEN_LOCKFILE_EXCEPTION,`+ ${ae.pretty(e,n,ae.Type.ADDED)}`);break;default:Se.assertNever(i.type)}}var wO=class{supports(e,r){return!!e.reference.startsWith(\"patch:\")}getLocalPath(e,r){return null}async fetch(e,r){let i=r.checksums.get(e.locatorHash)||null,[n,s,o]=await r.cache.fetchPackageFromCache(e,i,N({onHit:()=>r.report.reportCacheHit(e),onMiss:()=>r.report.reportCacheMiss(e,`${P.prettyLocator(r.project.configuration,e)} can't be found in the cache and will be fetched from the disk`),loader:()=>this.patchPackage(e,r),skipIntegrityCheck:r.skipIntegrityCheck},r.cacheOptions));return{packageFs:n,releaseFs:s,prefixPath:P.getIdentVendorPath(e),localPath:this.getLocalPath(e,r),checksum:o}}async patchPackage(e,r){let{parentLocator:i,sourceLocator:n,sourceVersion:s,patchPaths:o}=BE(e),a=await bE(i,o,r),l=await K.mktempPromise(),c=k.join(l,\"current.zip\"),u=await r.fetcher.fetch(n,r),g=P.getIdentVendorPath(e),f=await fn(),h=new Ai(c,{libzip:f,create:!0,level:r.project.configuration.get(\"compressionLevel\")});await Se.releaseAfterUseAsync(async()=>{await h.copyPromise(g,u.prefixPath,{baseFs:u.packageFs,stableSort:!0})},u.releaseFs),h.saveAndClose();for(let{source:p,optional:m}of a){if(p===null)continue;let y=new Ai(c,{libzip:f,level:r.project.configuration.get(\"compressionLevel\")}),Q=new _t(k.resolve(Me.root,g),{baseFs:y});try{await qb(Yb(p),{baseFs:Q,version:s})}catch(S){if(!(S instanceof yE))throw S;let x=r.project.configuration.get(\"enableInlineHunks\"),M=!x&&!m?\" (set enableInlineHunks for details)\":\"\",Y=`${P.prettyLocator(r.project.configuration,e)}: ${S.message}${M}`,U=J=>{!x||Lge(S.hunk,{configuration:r.project.configuration,report:J})};if(y.discardAndClose(),m){r.report.reportWarningOnce($.PATCH_HUNK_FAILED,Y,{reportExtra:U});continue}else throw new ct($.PATCH_HUNK_FAILED,Y,U)}y.saveAndClose()}return new Ai(c,{libzip:f,level:r.project.configuration.get(\"compressionLevel\")})}};var y9e=3,BO=class{supportsDescriptor(e,r){return!!e.range.startsWith(\"patch:\")}supportsLocator(e,r){return!!e.reference.startsWith(\"patch:\")}shouldPersistResolution(e,r){return!1}bindDescriptor(e,r,i){let{patchPaths:n}=wE(e);return n.every(s=>!EO(s))?e:P.bindDescriptor(e,{locator:P.stringifyLocator(r)})}getResolutionDependencies(e,r){let{sourceDescriptor:i}=wE(e);return[i]}async getCandidates(e,r,i){if(!i.fetchOptions)throw new Error(\"Assertion failed: This resolver cannot be used unless a fetcher is configured\");let{parentLocator:n,sourceDescriptor:s,patchPaths:o}=wE(e),a=await bE(n,o,i.fetchOptions),l=r.get(s.descriptorHash);if(typeof l==\"undefined\")throw new Error(\"Assertion failed: The dependency should have been resolved\");let c=Dn.makeHash(`${y9e}`,...a.map(u=>JSON.stringify(u))).slice(0,6);return[mO(e,{parentLocator:n,sourcePackage:l,patchPaths:o,patchHash:c})]}async getSatisfying(e,r,i){return null}async resolve(e,r){let{sourceLocator:i}=BE(e),n=await r.resolver.resolve(i,r);return N(N({},n),e)}};var QE=class extends Le{constructor(){super(...arguments);this.save=z.Boolean(\"-s,--save\",!1,{description:\"Add the patch to your resolution entries\"});this.patchFolder=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd);if(!i)throw new ht(r.cwd,this.context.cwd);await r.restoreInstallState();let n=k.resolve(this.context.cwd,j.toPortablePath(this.patchFolder)),s=k.join(n,\"../source\"),o=k.join(n,\"../.yarn-patch.json\");if(!K.existsSync(s))throw new Pe(\"The argument folder didn't get created by 'yarn patch'\");let a=await yO(s,n),l=await K.readJsonPromise(o),c=P.parseLocator(l.locator,!0);if(!r.storedPackages.has(c.locatorHash))throw new Pe(\"No package found in the project for the given locator\");if(!this.save){this.context.stdout.write(a);return}let u=e.get(\"patchFolder\"),g=k.join(u,`${P.slugifyLocator(c)}.patch`);await K.mkdirPromise(u,{recursive:!0}),await K.writeFilePromise(g,a);let f=k.relative(r.cwd,g);r.topLevelWorkspace.manifest.resolutions.push({pattern:{descriptor:{fullName:P.stringifyIdent(c),description:l.version}},reference:`patch:${P.stringifyLocator(c)}#${f}`}),await r.persist()}};QE.paths=[[\"patch-commit\"]],QE.usage=Re.Usage({description:\"generate a patch out of a directory\",details:\"\\n      By default, this will print a patchfile on stdout based on the diff between the folder passed in and the original version of the package. Such file is suitable for consumption with the `patch:` protocol.\\n\\n      With the `-s,--save` option set, the patchfile won't be printed on stdout anymore and will instead be stored within a local file (by default kept within `.yarn/patches`, but configurable via the `patchFolder` setting). A `resolutions` entry will also be added to your top-level manifest, referencing the patched package via the `patch:` protocol.\\n\\n      Note that only folders generated by `yarn patch` are accepted as valid input for `yarn patch-commit`.\\n    \"});var Tge=QE;var vE=class extends Le{constructor(){super(...arguments);this.json=z.Boolean(\"--json\",!1,{description:\"Format the output as an NDJSON stream\"});this.package=z.String()}async execute(){let e=await ye.find(this.context.cwd,this.context.plugins),{project:r,workspace:i}=await ze.find(e,this.context.cwd),n=await Nt.find(e);if(!i)throw new ht(r.cwd,this.context.cwd);await r.restoreInstallState();let s=P.parseLocator(this.package);if(s.reference===\"unknown\"){let o=Se.mapAndFilter([...r.storedPackages.values()],a=>a.identHash!==s.identHash?Se.mapAndFilter.skip:P.isVirtualLocator(a)?Se.mapAndFilter.skip:a);if(o.length===0)throw new Pe(\"No package found in the project for the given locator\");if(o.length>1)throw new Pe(`Multiple candidate packages found; explicitly choose one of them (use \\`yarn why <package>\\` to get more information as to who depends on them):\n${o.map(a=>`\n- ${P.prettyLocator(e,a)}`).join(\"\")}`);s=o[0]}if(!r.storedPackages.has(s.locatorHash))throw new Pe(\"No package found in the project for the given locator\");await Je.start({configuration:e,json:this.json,stdout:this.context.stdout},async o=>{let a=await IO(s,{cache:n,project:r});o.reportJson({locator:P.stringifyLocator(s),path:j.fromPortablePath(a)}),o.reportInfo($.UNNAMED,`Package ${P.prettyLocator(e,s)} got extracted with success!`),o.reportInfo($.UNNAMED,`You can now edit the following folder: ${ae.pretty(e,j.fromPortablePath(a),\"magenta\")}`),o.reportInfo($.UNNAMED,`Once you are done run ${ae.pretty(e,`yarn patch-commit -s ${process.platform===\"win32\"?'\"':\"\"}${j.fromPortablePath(a)}${process.platform===\"win32\"?'\"':\"\"}`,\"cyan\")} and Yarn will store a patchfile based on your changes.`)})}};vE.paths=[[\"patch\"]],vE.usage=Re.Usage({description:\"prepare a package for patching\",details:\"\\n      This command will cause a package to be extracted in a temporary directory intended to be editable at will.\\n      \\n      Once you're done with your changes, run `yarn patch-commit -s <path>` (with `<path>` being the temporary directory you received) to generate a patchfile and register it into your top-level manifest via the `patch:` protocol. Run `yarn patch-commit -h` for more details.\\n    \"});var Oge=vE;var w9e={configuration:{enableInlineHunks:{description:\"If true, the installs will print unmatched patch hunks\",type:Ie.BOOLEAN,default:!1},patchFolder:{description:\"Folder where the patch files must be written\",type:Ie.ABSOLUTE_PATH,default:\"./.yarn/patches\"}},commands:[Tge,Oge],fetchers:[wO],resolvers:[BO]},B9e=w9e;var kO={};ft(kO,{default:()=>v9e});var QO=class{supportsPackage(e,r){return this.isEnabled(r)}async findPackageLocation(e,r){if(!this.isEnabled(r))throw new Error(\"Assertion failed: Expected the pnpm linker to be enabled\");let i=vO(),n=r.project.installersCustomData.get(i);if(!n)throw new Pe(`The project in ${ae.pretty(r.project.configuration,`${r.project.cwd}/package.json`,ae.Type.PATH)} doesn't seem to have been installed - running an install there might help`);let s=n.pathByLocator.get(e.locatorHash);if(typeof s==\"undefined\")throw new Pe(`Couldn't find ${P.prettyLocator(r.project.configuration,e)} in the currently installed pnpm map - running an install might help`);return s}async findPackageLocator(e,r){if(!this.isEnabled(r))return null;let i=vO(),n=r.project.installersCustomData.get(i);if(!n)throw new Pe(`The project in ${ae.pretty(r.project.configuration,`${r.project.cwd}/package.json`,ae.Type.PATH)} doesn't seem to have been installed - running an install there might help`);let s=e.match(/(^.*\\/node_modules\\/(@[^/]*\\/)?[^/]+)(\\/.*$)/);if(s){let l=n.locatorByPath.get(s[1]);if(l)return l}let o=e,a=e;do{a=o,o=k.dirname(a);let l=n.locatorByPath.get(a);if(l)return l}while(o!==a);return null}makeInstaller(e){return new Mge(e)}isEnabled(e){return e.project.configuration.get(\"nodeLinker\")===\"pnpm\"}},Mge=class{constructor(e){this.opts=e;this.asyncActions=new Se.AsyncActions(10);this.customData={pathByLocator:new Map,locatorByPath:new Map}}getCustomDataKey(){return vO()}attachCustomData(e){}async installPackage(e,r,i){switch(e.linkType){case Qt.SOFT:return this.installPackageSoft(e,r,i);case Qt.HARD:return this.installPackageHard(e,r,i)}throw new Error(\"Assertion failed: Unsupported package link type\")}async installPackageSoft(e,r,i){let n=k.resolve(r.packageFs.getRealPath(),r.prefixPath);return this.customData.pathByLocator.set(e.locatorHash,n),{packageLocation:n,buildDirective:null}}async installPackageHard(e,r,i){var u;let n=b9e(e,{project:this.opts.project});this.customData.locatorByPath.set(n,P.stringifyLocator(e)),this.customData.pathByLocator.set(e.locatorHash,n),i.holdFetchResult(this.asyncActions.set(e.locatorHash,async()=>{await K.mkdirPromise(n,{recursive:!0}),await K.copyPromise(n,r.prefixPath,{baseFs:r.packageFs,overwrite:!1})}));let o=P.isVirtualLocator(e)?P.devirtualizeLocator(e):e,a={manifest:(u=await At.tryFind(r.prefixPath,{baseFs:r.packageFs}))!=null?u:new At,misc:{hasBindingGyp:wo.hasBindingGyp(r)}},l=this.opts.project.getDependencyMeta(o,e.version),c=wo.extractBuildScripts(e,a,l,{configuration:this.opts.project.configuration,report:this.opts.report});return{packageLocation:n,buildDirective:c}}async attachInternalDependencies(e,r){this.opts.project.configuration.get(\"nodeLinker\")===\"pnpm\"&&(!Hge(e,{project:this.opts.project})||this.asyncActions.reduce(e.locatorHash,async i=>{await i;let n=this.customData.pathByLocator.get(e.locatorHash);if(typeof n==\"undefined\")throw new Error(`Assertion failed: Expected the package to have been registered (${P.stringifyLocator(e)})`);let s=k.join(n,Pt.nodeModules),o=[],a=await jge(s);for(let[l,c]of r){let u=c;Hge(c,{project:this.opts.project})||(this.opts.report.reportWarning($.UNNAMED,\"The pnpm linker doesn't support providing different versions to workspaces' peer dependencies\"),u=P.devirtualizeLocator(c));let g=this.customData.pathByLocator.get(u.locatorHash);if(typeof g==\"undefined\")throw new Error(`Assertion failed: Expected the package to have been registered (${P.stringifyLocator(c)})`);let f=P.stringifyIdent(l),h=k.join(s,f),p=k.relative(k.dirname(h),g),m=a.get(f);a.delete(f),o.push(Promise.resolve().then(async()=>{if(m){if(m.isSymbolicLink()&&await K.readlinkPromise(h)===p)return;await K.removePromise(h)}await K.mkdirpPromise(k.dirname(h)),process.platform==\"win32\"?await K.symlinkPromise(g,h,\"junction\"):await K.symlinkPromise(p,h)}))}o.push(Gge(s,a)),await Promise.all(o)}))}async attachExternalDependents(e,r){throw new Error(\"External dependencies haven't been implemented for the pnpm linker\")}async finalizeInstall(){let e=Kge(this.opts.project);if(this.opts.project.configuration.get(\"nodeLinker\")!==\"pnpm\")await K.removePromise(e);else{let r=[],i=new Set;for(let s of this.customData.pathByLocator.values()){let o=k.contains(e,s);if(o!==null){let[a,,...l]=o.split(k.sep);i.add(a);let c=k.join(e,a);r.push(K.readdirPromise(c).then(u=>Promise.all(u.map(async g=>{let f=k.join(c,g);if(g===Pt.nodeModules){let h=await jge(f);return h.delete(l.join(k.sep)),Gge(f,h)}else return K.removePromise(f)}))).catch(u=>{if(u.code!==\"ENOENT\")throw u}))}}let n;try{n=await K.readdirPromise(e)}catch{n=[]}for(let s of n)i.has(s)||r.push(K.removePromise(k.join(e,s)));await Promise.all(r)}return await this.asyncActions.wait(),await SO(e),await SO(Uge(this.opts.project)),{customData:this.customData}}};function vO(){return JSON.stringify({name:\"PnpmInstaller\",version:2})}function Uge(t){return k.join(t.cwd,Pt.nodeModules)}function Kge(t){return k.join(Uge(t),\".store\")}function b9e(t,{project:e}){let r=P.slugifyLocator(t),i=P.getIdentVendorPath(t);return k.join(Kge(e),r,i)}function Hge(t,{project:e}){return!P.isVirtualLocator(t)||!e.tryWorkspaceByLocator(t)}async function jge(t){let e=new Map,r=[];try{r=await K.readdirPromise(t,{withFileTypes:!0})}catch(i){if(i.code!==\"ENOENT\")throw i}try{for(let i of r)if(!i.name.startsWith(\".\"))if(i.name.startsWith(\"@\")){let n=await K.readdirPromise(k.join(t,i.name),{withFileTypes:!0});if(n.length===0)e.set(i.name,i);else for(let s of n)e.set(`${i.name}/${s.name}`,s)}else e.set(i.name,i)}catch(i){if(i.code!==\"ENOENT\")throw i}return e}async function Gge(t,e){var n;let r=[],i=new Set;for(let s of e.keys()){r.push(K.removePromise(k.join(t,s)));let o=(n=P.tryParseIdent(s))==null?void 0:n.scope;o&&i.add(`@${o}`)}return Promise.all(r).then(()=>Promise.all([...i].map(s=>SO(k.join(t,s)))))}async function SO(t){try{await K.rmdirPromise(t)}catch(e){if(e.code!==\"ENOENT\"&&e.code!==\"ENOTEMPTY\")throw e}}var Q9e={linkers:[QO]},v9e=Q9e;var J0=()=>({modules:new Map([[\"@yarnpkg/cli\",_C],[\"@yarnpkg/core\",QC],[\"@yarnpkg/fslib\",Zh],[\"@yarnpkg/libzip\",Md],[\"@yarnpkg/parsers\",op],[\"@yarnpkg/shell\",Kd],[\"clipanion\",c$(Cp)],[\"semver\",S9e],[\"typanion\",sg],[\"yup\",k9e],[\"@yarnpkg/plugin-essentials\",AL],[\"@yarnpkg/plugin-compat\",gL],[\"@yarnpkg/plugin-dlx\",fL],[\"@yarnpkg/plugin-file\",wL],[\"@yarnpkg/plugin-git\",aL],[\"@yarnpkg/plugin-github\",bL],[\"@yarnpkg/plugin-http\",SL],[\"@yarnpkg/plugin-init\",DL],[\"@yarnpkg/plugin-link\",TL],[\"@yarnpkg/plugin-nm\",gT],[\"@yarnpkg/plugin-npm\",uO],[\"@yarnpkg/plugin-npm-cli\",dO],[\"@yarnpkg/plugin-pack\",aO],[\"@yarnpkg/plugin-patch\",bO],[\"@yarnpkg/plugin-pnp\",eT],[\"@yarnpkg/plugin-pnpm\",kO]]),plugins:new Set([\"@yarnpkg/plugin-essentials\",\"@yarnpkg/plugin-compat\",\"@yarnpkg/plugin-dlx\",\"@yarnpkg/plugin-file\",\"@yarnpkg/plugin-git\",\"@yarnpkg/plugin-github\",\"@yarnpkg/plugin-http\",\"@yarnpkg/plugin-init\",\"@yarnpkg/plugin-link\",\"@yarnpkg/plugin-nm\",\"@yarnpkg/plugin-npm\",\"@yarnpkg/plugin-npm-cli\",\"@yarnpkg/plugin-pack\",\"@yarnpkg/plugin-patch\",\"@yarnpkg/plugin-pnp\",\"@yarnpkg/plugin-pnpm\"])});d0({binaryVersion:Ur||\"<unknown>\",pluginConfiguration:J0()});})();\n/*!\n * buildToken\n * Builds OAuth token prefix (helper function)\n *\n * @name buildToken\n * @function\n * @param {GitUrl} obj The parsed Git url object.\n * @return {String} token prefix\n */\n/*!\n * fill-range <https://github.com/jonschlinkert/fill-range>\n *\n * Copyright (c) 2014-present, Jon Schlinkert.\n * Licensed under the MIT License.\n */\n/*!\n * is-extglob <https://github.com/jonschlinkert/is-extglob>\n *\n * Copyright (c) 2014-2016, Jon Schlinkert.\n * Licensed under the MIT License.\n */\n/*!\n * is-glob <https://github.com/jonschlinkert/is-glob>\n *\n * Copyright (c) 2014-2017, Jon Schlinkert.\n * Released under the MIT License.\n */\n/*!\n * is-number <https://github.com/jonschlinkert/is-number>\n *\n * Copyright (c) 2014-present, Jon Schlinkert.\n * Released under the MIT License.\n */\n/*!\n * is-windows <https://github.com/jonschlinkert/is-windows>\n *\n * Copyright © 2015-2018, Jon Schlinkert.\n * Released under the MIT License.\n */\n/*!\n * to-regex-range <https://github.com/micromatch/to-regex-range>\n *\n * Copyright (c) 2015-present, Jon Schlinkert.\n * Released under the MIT License.\n */\n"
  },
  {
    "path": "services/workbench2/.yarnrc",
    "content": "save-prefix false\n"
  },
  {
    "path": "services/workbench2/.yarnrc.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nyarnPath: .yarn/releases/yarn-3.2.0.cjs\nnodeLinker: node-modules\n"
  },
  {
    "path": "services/workbench2/AUTHORS",
    "content": "# Names should be added to this file with this pattern:\n#\n# For individuals:\n#   Name <email address>\n#\n# For organizations:\n#   Organization <fnmatch pattern>\n#\n# See python fnmatch module documentation for more information.\n\nCuroverse, Inc. <*@curoverse.com>\nAdam Savitzky <adam.savitzky@gmail.com>\nColin Nolan <colin.nolan@sanger.ac.uk>\nDavid <davide.fiorentino.loregio@gmail.com>\nGuillermo Carrasco <guille.ch.88@gmail.com>\nJoshua Randall <joshua.randall@sanger.ac.uk>\nPresident and Fellows of Harvard College <*@harvard.edu>\nThomas Mooney <tmooney@genome.wustl.edu>\nChen Chen <aflyhorse@gmail.com>\nVeritas Genetics, Inc. <*@veritasgenetics.com>\n"
  },
  {
    "path": "services/workbench2/COPYING",
    "content": "Unless indicated otherwise in the header of the file, the files in this\nrepository are distributed under one of three different licenses: AGPL-3.0,\nApache-2.0 or CC-BY-SA-3.0.\n\nIndividual files contain an SPDX tag that indicates the license for the file.\nThese are the three tags in use:\n\n    SPDX-License-Identifier: AGPL-3.0\n    SPDX-License-Identifier: Apache-2.0\n    SPDX-License-Identifier: CC-BY-SA-3.0\n\nThis enables machine processing of license information based on the SPDX\nLicense Identifiers that are available here: http://spdx.org/licenses/\n\nThe full license text for each license is available in this directory:\n\n  AGPL-3.0:     agpl-3.0.txt\n  Apache-2.0:   apache-2.0.txt\n  CC-BY-SA-3.0: cc-by-sa-3.0.txt\n"
  },
  {
    "path": "services/workbench2/Makefile",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Use bash, and run all lines in each recipe as one shell command\nSHELL := /bin/bash\n.ONESHELL:\n\n# IMPORTANT BACKGROUND: Because Workbench used to be a separate project, its\n# build tools usually expect $WORKSPACE to be the Workbench source.\n# $ARVADOS_DIRECTORY is the path to the full Arvados source tree.\n# When you call other Arvados build tools, you'll probably need to set\n# WORKSPACE=$(ARVADOS_DIRECTORY).\nexport WORKSPACE=$(PWD)\nexport ARVADOS_DIRECTORY=$(shell git rev-parse --show-toplevel)\n\nAPP_NAME?=arvados-workbench2\nDOCKER_IMAGE=arvados/workbench\nDOCKER_VOLUME=arvados-workbench\n\n# Cypress test file that can be passed to the integration-test target\nSPECFILE?=ALL\n\n# VERSION uses all the above to produce X.Y.Z.timestamp\n# something in the lines of 1.2.0.20180612145021, this will be the package version\nVERSION?=$(shell ./version-at-commit.sh HEAD)\n# We don't use BUILD_NUMBER at the moment, but it needs to be defined\nBUILD_NUMBER?=0\nGIT_COMMIT?=$(shell git rev-parse --short HEAD)\n\nexport CYPRESS_FAIL_FAST_ENABLED?=false\n\nifndef ci\n\tTI=-ti\nelse\n\tTI=\nendif\n\n.PHONY: help clean* *-install test build *-tests* workbench-docker-*\n\nhelp:\n\t@echo >&2\n\t@echo >&2 \"There is no default make target here.  Did you mean 'make test'?\"\n\t@echo >&2\n\t@echo >&2 \"More info:\"\n\t@echo >&2 \"  Installing              --> http://doc.arvados.org/install\"\n\t@echo >&2 \"  Developing/contributing --> https://github.com/arvados/arvados\"\n\t@echo >&2 \"  Project home            --> https://arvados.org\"\n\t@echo >&2\n\t@false\n\nclean-node-modules:\n\trm -rf $(WORKSPACE)/node_modules\n\nclean-docker-image:\n\tif docker image inspect \"$(DOCKER_IMAGE)\" >/dev/null; \\\n\tthen docker image rm \"$(DOCKER_IMAGE)\"; \\\n\telse :; fi\n\nclean-docker-volume:\n\tif docker volume inspect \"$(DOCKER_VOLUME)\" >/dev/null; \\\n\tthen docker volume rm \"$(DOCKER_VOLUME)\"; \\\n\telse :; fi\n\nclean: clean-docker-image clean-docker-volume clean-node-modules\n\nyarn-install:\n\tyarn install\n\ncypress-install: yarn-install\n\tyarn run cypress install && yarn run cypress verify\n\nunit-tests: yarn-install\n\tyarn test\n\nintegration-tests: cypress-install\nifeq ($(SPECFILE), ALL)\n\t$(WORKSPACE)/tools/run-integration-tests.sh $(INTERACTIVE)\nelse\n\t$(WORKSPACE)/tools/run-integration-tests.sh $(INTERACTIVE) -- --spec $(SPECFILE)\nendif\n\ninteractive-tests-in-docker: workbench-docker-volume\n\txhost +local:\n\tdocker run --interactive --tty --rm \\\n\t\t--env=DISPLAY \\\n\t\t--mount=\"type=volume,src=$(DOCKER_VOLUME),dst=/home\" \\\n\t\t--mount=\"type=bind,src=$(ARVADOS_DIRECTORY),dst=/opt/arvados\" \\\n\t\t--mount=\"type=bind,src=/tmp/.X11-unix,dst=/tmp/.X11-unix\" \\\n\t\t--workdir=/opt/arvados/services/workbench2 \\\n\t\t\"$(DOCKER_IMAGE)\" \\\n\t\tbash tools/run-integration-tests.sh -i\n\nintegration-tests-in-docker: workbench-docker-volume\n\tdocker run $(TI) --rm \\\n\t\t--mount=\"type=volume,src=$(DOCKER_VOLUME),dst=/home\" \\\n\t\t--mount=\"type=bind,src=$(ARVADOS_DIRECTORY),dst=/opt/arvados\" \\\n\t\t--workdir=/opt/arvados/services/workbench2 \\\n\t\t\"$(DOCKER_IMAGE)\" \\\n\t\tbash tools/run-integration-tests.sh\n\nunit-tests-in-docker: workbench-docker-volume\n\tdocker run $(TI) --rm \\\n\t\t--mount=\"type=volume,src=$(DOCKER_VOLUME),dst=/home\" \\\n\t\t--mount=\"type=bind,src=$(ARVADOS_DIRECTORY),dst=/opt/arvados\" \\\n\t\t--workdir=/opt/arvados/services/workbench2 \\\n\t\t\"$(DOCKER_IMAGE)\" \\\n\t\tyarn test\n\ntests-in-docker: unit-tests-in-docker integration-tests-in-docker\n\nshell-in-docker: workbench-docker-image\n\tdocker run $(TI) --rm \\\n\t\t--mount=\"type=volume,src=$(DOCKER_VOLUME),dst=/home\" \\\n\t\t--mount=\"type=bind,src=$(ARVADOS_DIRECTORY),dst=/opt/arvados\" \\\n\t\t--workdir=/opt/arvados/services/workbench2 \\\n\t\t\"$(DOCKER_IMAGE)\"\n\ntest: unit-tests integration-tests\n\nbuild: yarn-install\n\tVERSION=$(VERSION) BUILD_NUMBER=$(BUILD_NUMBER) GIT_COMMIT=$(GIT_COMMIT) yarn build\n\nworkbench-docker-image: yarn-install\n\tif ! docker image inspect \"$(DOCKER_IMAGE)\" >/dev/null; then \\\n\t\tenv -C \"$(ARVADOS_DIRECTORY)/tools/ansible\" \\\n\t\t\tWORKSPACE=\"$(ARVADOS_DIRECTORY)\" \\\n\t\t\tansible-playbook \\\n\t\t\t--extra-vars=arvados_build_playbook=install-dev-tools.yml \\\n\t\t\t--inventory=files/development-docker-images.yml \\\n\t\t\t--limit=arvados_workbench \\\n\t\t\tbuild-docker-image.yml; \\\n\tfi\n\nworkbench-docker-volume: workbench-docker-image\n\tif docker volume inspect \"$(DOCKER_VOLUME)\" >/dev/null; then exit; fi; \\\n\tdocker volume create \"$(DOCKER_VOLUME)\" || exit; \\\n\tif ! docker run --interactive --rm \\\n\t\t--mount=\"type=bind,src=$(ARVADOS_DIRECTORY),dst=/opt/arvados\" \\\n\t\t--mount=\"type=volume,src=$(DOCKER_VOLUME),dst=/mnt\" \\\n\t\t--user=root \\\n\t\t\"$(DOCKER_IMAGE)\" \\\n\t\tbash /opt/arvados/services/workbench2/tools/setup-docker-volume.sh </dev/null; \\\n\tthen docker volume rm \"$(DOCKER_VOLUME)\"; exit 73; fi\n"
  },
  {
    "path": "services/workbench2/README.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Arvados Workbench 2\n\n## Setup\n```\nnpm install yarn\nyarn install\n```\n\nInstall [redux-devtools-extension](https://chrome.google.com/webstore/detail/redux-devtools/lmhkpmbekcpmknklioeibfkpmmfibljd)\n\n## Start project for development\n```\nyarn start\n```\n\n## Start project for development inside Docker container\n\n```\nmake workbench2-build-image\n# (create public/config.json, see \"Run time configuration\" below)\ndocker run -ti -v$PWD:$PWD -p 3000:3000 -w$PWD workbench2-build /bin/bash\n# (inside docker container)\nyarn install\nyarn start\n```\n\n## Run unit tests\n```\nmake unit-tests\n```\n\n## Run end-to-end tests\n\n```\nmake integration-tests\n```\n\n## Run end-to-end tests in a Docker container\n\n```\nmake integration-tests-in-docker\n```\n\n## Run tests interactively in container\n\n```\nxhost +local:root\ndocker run -ti -v$PWD:$PWD -v$(realpath ../..):/usr/src/arvados -w$PWD --env=\"DISPLAY\" --volume=\"/tmp/.X11-unix:/tmp/.X11-unix:rw\" workbench2-build /bin/bash\n(inside container)\nyarn run cypress install\ntools/run-integration-tests.sh -i -a /usr/src/arvados\n```\n\n## Production build\n```\nyarn build\n```\n\n## Package build\n```\nmake packages\n```\n\n## Build time configuration\nYou can customize project global variables using env variables. Default values are placed in the `.env` file.\n\nExample:\n```\nREACT_APP_ARVADOS_CONFIG_URL=config.json yarn build\n```\n\n## Run time configuration\nThe app will fetch runtime configuration when starting. By default it will try to fetch `/config.json`.  In development mode, this can be found in the `public` directory.\nYou can customize this url using build time configuration.\n\nCurrently this configuration schema is supported:\n```\n{\n    \"API_HOST\": \"string\",\n    \"FILE_VIEWERS_CONFIG_URL\": \"string\",\n}\n```\n\n### API_HOST\n\nThe Arvados base URL.\n\nThe `REACT_APP_ARVADOS_API_HOST` environment variable can be used to set the default URL if the run time configuration is unreachable.\n\n## FILE_VIEWERS_CONFIG_URL\nLocal path, or any URL that allows cross-origin requests. See:\n\n[File viewers config file example](public/file-viewers-example.json)\n\n[File viewers config scheme](src/models/file-viewers-config.ts)\n\nTo use the URL defined in the Arvados cluster configuration, remove the entire `FILE_VIEWERS_CONFIG_URL` entry from the runtime configuration. Found in `/config.json` by default.\n\n## Plugin support\n\nWorkbench supports plugins to add new functionality to the user\ninterface.  For information about installing plugins, the provided\nexample plugins, see [src/plugins/README.md](src/plugins/README.md).\n\n\n## Licensing\n\nArvados is Free Software. See COPYING for information about Arvados Free\nSoftware licenses.\n"
  },
  {
    "path": "services/workbench2/__mocks__/popper.js.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport default class Popper {\n    static placements = [\n        'auto',\n        'auto-end',\n        'auto-start',\n        'bottom',\n        'bottom-end',\n        'bottom-start',\n        'left',\n        'left-end',\n        'left-start',\n        'right',\n        'right-end',\n        'right-start',\n        'top',\n        'top-end',\n        'top-start'\n    ];\n\n    constructor() {\n        return {\n            destroy: jest.fn(),\n            scheduleUpdate: jest.fn()\n        };\n    }\n}"
  },
  {
    "path": "services/workbench2/agpl-3.0.txt",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "services/workbench2/apache-2.0.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "services/workbench2/cc-by-sa-3.0.txt",
    "content": "Creative Commons Legal Code\n\nAttribution-ShareAlike 3.0 United States\n\nLicense\n\nTHE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE\nCOMMONS PUBLIC LICENSE (\"CCPL\" OR \"LICENSE\"). THE WORK IS PROTECTED BY\nCOPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS\nAUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.\n\nBY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE\nBOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE\nCONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE\nIN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.\n\n1. Definitions\n\n a. \"Collective Work\" means a work, such as a periodical issue, anthology or\n    encyclopedia, in which the Work in its entirety in unmodified form, along\n    with one or more other contributions, constituting separate and independent\n    works in themselves, are assembled into a collective whole. A work that\n    constitutes a Collective Work will not be considered a Derivative Work (as\n    defined below) for the purposes of this License.\n\n b. \"Creative Commons Compatible License\" means a license that is listed at\n    http://creativecommons.org/compatiblelicenses that has been approved by\n    Creative Commons as being essentially equivalent to this License,\n    including, at a minimum, because that license: (i) contains terms that have\n    the same purpose, meaning and effect as the License Elements of this\n    License; and, (ii) explicitly permits the relicensing of derivatives of\n    works made available under that license under this License or either a\n    Creative Commons unported license or a Creative Commons jurisdiction\n    license with the same License Elements as this License.\n\n c. \"Derivative Work\" means a work based upon the Work or upon the Work and\n    other pre-existing works, such as a translation, musical arrangement,\n    dramatization, fictionalization, motion picture version, sound recording,\n    art reproduction, abridgment, condensation, or any other form in which the\n    Work may be recast, transformed, or adapted, except that a work that\n    constitutes a Collective Work will not be considered a Derivative Work for\n    the purpose of this License. For the avoidance of doubt, where the Work is\n    a musical composition or sound recording, the synchronization of the Work\n    in timed-relation with a moving image (\"synching\") will be considered a\n    Derivative Work for the purpose of this License.\n\n d. \"License Elements\" means the following high-level license attributes as\n    selected by Licensor and indicated in the title of this License:\n    Attribution, ShareAlike.\n\n e. \"Licensor\" means the individual, individuals, entity or entities that\n    offers the Work under the terms of this License.\n\n f. \"Original Author\" means the individual, individuals, entity or entities who\n    created the Work.\n\n g. \"Work\" means the copyrightable work of authorship offered under the terms\n    of this License.\n\n    h. \"You\" means an individual or entity exercising rights under this License\n    who has not previously violated the terms of this License with respect to\n    the Work, or who has received express permission from the Licensor to\n    exercise rights under this License despite a previous violation.\n\n2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or\nrestrict any rights arising from fair use, first sale or other limitations on\nthe exclusive rights of the copyright owner under copyright law or other\napplicable laws.\n\n3. License Grant. Subject to the terms and conditions of this License, Licensor\nhereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the\nduration of the applicable copyright) license to exercise the rights in the\nWork as stated below:\n\n a. to reproduce the Work, to incorporate the Work into one or more Collective\n    Works, and to reproduce the Work as incorporated in the Collective Works;\n\n b. to create and reproduce Derivative Works provided that any such\n    Derivative Work, including any translation in any medium, takes reasonable\n    steps to clearly label, demarcate or otherwise identify that changes were\n    made to the original Work. For example, a translation could be marked \"The\n    original work was translated from English to Spanish,\" or a modification\n    could indicate \"The original work has been modified.\";\n\n c. to distribute copies or phonorecords of, display publicly, perform\n    publicly, and perform publicly by means of a digital audio transmission the\n    Work including as incorporated in Collective Works;\n\n d. to distribute copies or phonorecords of, display publicly, perform\n    publicly, and perform publicly by means of a digital audio transmission\n    Derivative Works.\n\n e. For the avoidance of doubt, where the Work is a musical composition:\n\n     i. Performance Royalties Under Blanket Licenses. Licensor waives the\n        exclusive right to collect, whether individually or, in the event that\n        Licensor is a member of a performance rights society (e.g. ASCAP, BMI,\n        SESAC), via that society, royalties for the public performance or\n        public digital performance (e.g. webcast) of the Work.\n\n    ii. Mechanical Rights and Statutory Royalties. Licensor waives the\n        exclusive right to collect, whether individually or via a music rights\n        agency or designated agent (e.g. Harry Fox Agency), royalties for any\n        phonorecord You create from the Work (\"cover version\") and distribute,\n        subject to the compulsory license created by 17 USC Section 115 of the\n        US Copyright Act (or the equivalent in other jurisdictions).\n\n f. Webcasting Rights and Statutory Royalties. For the avoidance of doubt,\n    where the Work is a sound recording, Licensor waives the exclusive right to\n    collect, whether individually or via a performance-rights society\n    (e.g. SoundExchange), royalties for the public digital performance\n    (e.g. webcast) of the Work, subject to the compulsory license created by 17\n    USC Section 114 of the US Copyright Act (or the equivalent in other\n    jurisdictions).\n\nThe above rights may be exercised in all media and formats whether now known or\nhereafter devised. The above rights include the right to make such\nmodifications as are technically necessary to exercise the rights in other\nmedia and formats. All rights not expressly granted by Licensor are hereby\nreserved.\n\n4. Restrictions. The license granted in Section 3 above is expressly made subject to and limited by the following restrictions:\n\n a. You may distribute, publicly display, publicly perform, or publicly\n    digitally perform the Work only under the terms of this License, and You\n    must include a copy of, or the Uniform Resource Identifier for, this\n    License with every copy or phonorecord of the Work You distribute, publicly\n    display, publicly perform, or publicly digitally perform. You may not offer\n    or impose any terms on the Work that restrict the terms of this License or\n    the ability of a recipient of the Work to exercise of the rights granted to\n    that recipient under the terms of the License. You may not sublicense the\n    Work. You must keep intact all notices that refer to this License and to\n    the disclaimer of warranties. When You distribute, publicly display,\n    publicly perform, or publicly digitally perform the Work, You may not\n    impose any technological measures on the Work that restrict the ability of\n    a recipient of the Work from You to exercise of the rights granted to that\n    recipient under the terms of the License. This Section 4(a) applies to the\n    Work as incorporated in a Collective Work, but this does not require the\n    Collective Work apart from the Work itself to be made subject to the terms\n    of this License. If You create a Collective Work, upon notice from any\n    Licensor You must, to the extent practicable, remove from the Collective\n    Work any credit as required by Section 4(c), as requested. If You create a\n    Derivative Work, upon notice from any Licensor You must, to the extent\n    practicable, remove from the Derivative Work any credit as required by\n    Section 4(c), as requested.\n\n b. You may distribute, publicly display, publicly perform, or publicly\n    digitally perform a Derivative Work only under: (i) the terms of this\n    License; (ii) a later version of this License with the same License\n    Elements as this License; (iii) either the Creative Commons (Unported)\n    license or a Creative Commons jurisdiction license (either this or a later\n    license version) that contains the same License Elements as this License\n    (e.g. Attribution-ShareAlike 3.0 (Unported)); (iv) a Creative Commons\n    Compatible License. If you license the Derivative Work under one of the\n    licenses mentioned in (iv), you must comply with the terms of that\n    license. If you license the Derivative Work under the terms of any of the\n    licenses mentioned in (i), (ii) or (iii) (the \"Applicable License\"), you\n    must comply with the terms of the Applicable License generally and with the\n    following provisions: (I) You must include a copy of, or the Uniform\n    Resource Identifier for, the Applicable License with every copy or\n    phonorecord of each Derivative Work You distribute, publicly display,\n    publicly perform, or publicly digitally perform; (II) You may not offer or\n    impose any terms on the Derivative Works that restrict the terms of the\n    Applicable License or the ability of a recipient of the Work to exercise\n    the rights granted to that recipient under the terms of the Applicable\n    License; (III) You must keep intact all notices that refer to the\n    Applicable License and to the disclaimer of warranties; and, (IV) when You\n    distribute, publicly display, publicly perform, or publicly digitally\n    perform the Work, You may not impose any technological measures on the\n    Derivative Work that restrict the ability of a recipient of the Derivative\n    Work from You to exercise the rights granted to that recipient under the\n    terms of the Applicable License. This Section 4(b) applies to the\n    Derivative Work as incorporated in a Collective Work, but this does not\n    require the Collective Work apart from the Derivative Work itself to be\n    made subject to the terms of the Applicable License.\n\n c. If You distribute, publicly display, publicly perform, or publicly\n    digitally perform the Work (as defined in Section 1 above) or any\n    Derivative Works (as defined in Section 1 above) or Collective Works (as\n    defined in Section 1 above), You must, unless a request has been made\n    pursuant to Section 4(a), keep intact all copyright notices for the Work\n    and provide, reasonable to the medium or means You are utilizing: (i) the\n    name of the Original Author (or pseudonym, if applicable) if supplied,\n    and/or (ii) if the Original Author and/or Licensor designate another party\n    or parties (e.g. a sponsor institute, publishing entity, journal) for\n    attribution (\"Attribution Parties\") in Licensor's copyright notice, terms\n    of service or by other reasonable means, the name of such party or parties;\n    the title of the Work if supplied; to the extent reasonably practicable,\n    the Uniform Resource Identifier, if any, that Licensor specifies to be\n    associated with the Work, unless such URI does not refer to the copyright\n    notice or licensing information for the Work; and, consistent with Section\n    3(b) in the case of a Derivative Work, a credit identifying the use of the\n    Work in the Derivative Work (e.g., \"French translation of the Work by\n    Original Author,\" or \"Screenplay based on original Work by Original\n    Author\"). The credit required by this Section 4(c) may be implemented in\n    any reasonable manner; provided, however, that in the case of a Derivative\n    Work or Collective Work, at a minimum such credit will appear, if a credit\n    for all contributing authors of the Derivative Work or Collective Work\n    appears, then as part of these credits and in a manner at least as\n    prominent as the credits for the other contributing authors. For the\n    avoidance of doubt, You may only use the credit required by this Section\n    for the purpose of attribution in the manner set out above and, by\n    exercising Your rights under this License, You may not implicitly or\n    explicitly assert or imply any connection with, sponsorship or endorsement\n    by the Original Author, Licensor and/or Attribution Parties, as\n    appropriate, of You or Your use of the Work, without the separate, express\n    prior written permission of the Original Author, Licensor and/or\n    Attribution Parties.\n\n\n5. Representations, Warranties and Disclaimer\n\nUNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS\nTHE WORK AS-IS AND ONLY TO THE EXTENT OF ANY RIGHTS HELD IN THE LICENSED WORK\nBY THE LICENSOR. THE LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY\nKIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING,\nWITHOUT LIMITATION, WARRANTIES OF TITLE, MARKETABILITY, MERCHANTIBILITY,\nFITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR\nOTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT\nDISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED\nWARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.\n\n6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN\nNO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL,\nINCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS\nLICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGES.\n\n7. Termination\n\n a. This License and the rights granted hereunder will terminate automatically\n    upon any breach by You of the terms of this License. Individuals or\n    entities who have received Derivative Works or Collective Works from You\n    under this License, however, will not have their licenses terminated\n    provided such individuals or entities remain in full compliance with those\n    licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of\n    this License.\n\n b. Subject to the above terms and conditions, the license granted here is\n    perpetual (for the duration of the applicable copyright in the\n    Work). Notwithstanding the above, Licensor reserves the right to release\n    the Work under different license terms or to stop distributing the Work at\n    any time; provided, however that any such election will not serve to\n    withdraw this License (or any other license that has been, or is required\n    to be, granted under the terms of this License), and this License will\n    continue in full force and effect unless terminated as stated above.\n\n8. Miscellaneous\n\n a. Each time You distribute or publicly digitally perform the Work (as defined\n    in Section 1 above) or a Collective Work (as defined in Section 1 above),\n    the Licensor offers to the recipient a license to the Work on the same\n    terms and conditions as the license granted to You under this License.\n\n b. Each time You distribute or publicly digitally perform a Derivative Work,\n    Licensor offers to the recipient a license to the original Work on the same\n    terms and conditions as the license granted to You under this License.\n\n c. If any provision of this License is invalid or unenforceable under\n    applicable law, it shall not affect the validity or enforceability of the\n    remainder of the terms of this License, and without further action by the\n    parties to this agreement, such provision shall be reformed to the minimum\n    extent necessary to make such provision valid and enforceable.\n\n d. No term or provision of this License shall be deemed waived and no breach\n    consented to unless such waiver or consent shall be in writing and signed\n    by the party to be charged with such waiver or consent.\n\n e. This License constitutes the entire agreement between the parties with\n    respect to the Work licensed here. There are no understandings, agreements\n    or representations with respect to the Work not specified here. Licensor\n    shall not be bound by any additional provisions that may appear in any\n    communication from You. This License may not be modified without the mutual\n    written agreement of the Licensor and You.\n\nCreative Commons Notice\n\n    Creative Commons is not a party to this License, and makes no warranty\n    whatsoever in connection with the Work. Creative Commons will not be liable\n    to You or any party on any legal theory for any damages whatsoever,\n    including without limitation any general, special, incidental or\n    consequential damages arising in connection to this\n    license. Notwithstanding the foregoing two (2) sentences, if Creative\n    Commons has expressly identified itself as the Licensor hereunder, it shall\n    have all rights and obligations of Licensor.\n\n    Except for the limited purpose of indicating to the public that the Work is\n    licensed under the CCPL, Creative Commons does not authorize the use by\n    either party of the trademark \"Creative Commons\" or any related trademark\n    or logo of Creative Commons without the prior written consent of Creative\n    Commons. Any permitted use will be in compliance with Creative Commons'\n    then-current trademark usage guidelines, as may be published on its website\n    or otherwise made available upon request from time to time. For the\n    avoidance of doubt, this trademark restriction does not form part of this\n    License.\n\n    Creative Commons may be contacted at http://creativecommons.org/.\n"
  },
  {
    "path": "services/workbench2/config/env.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\n\nconst fs = require('fs');\nconst path = require('path');\nconst paths = require('./paths');\n\n// Make sure that including paths.js after env.js will read .env variables.\ndelete require.cache[require.resolve('./paths')];\n\nconst NODE_ENV = process.env.NODE_ENV;\nif (!NODE_ENV) {\n  throw new Error(\n    'The NODE_ENV environment variable is required but was not specified.'\n  );\n}\n\n// https://github.com/bkeepers/dotenv#what-other-env-files-can-i-use\nconst dotenvFiles = [\n  `${paths.dotenv}.${NODE_ENV}.local`,\n  // Don't include `.env.local` for `test` environment\n  // since normally you expect tests to produce the same\n  // results for everyone\n  NODE_ENV !== 'test' && `${paths.dotenv}.local`,\n  `${paths.dotenv}.${NODE_ENV}`,\n  paths.dotenv,\n].filter(Boolean);\n\n// Load environment variables from .env* files. Suppress warnings using silent\n// if this file is missing. dotenv will never modify any environment variables\n// that have already been set.  Variable expansion is supported in .env files.\n// https://github.com/motdotla/dotenv\n// https://github.com/motdotla/dotenv-expand\ndotenvFiles.forEach(dotenvFile => {\n  if (fs.existsSync(dotenvFile)) {\n    require('dotenv-expand')(\n      require('dotenv').config({\n        path: dotenvFile,\n      })\n    );\n  }\n});\n\n// We support resolving modules according to `NODE_PATH`.\n// This lets you use absolute paths in imports inside large monorepos:\n// https://github.com/facebook/create-react-app/issues/253.\n// It works similar to `NODE_PATH` in Node itself:\n// https://nodejs.org/api/modules.html#modules_loading_from_the_global_folders\n// Note that unlike in Node, only *relative* paths from `NODE_PATH` are honored.\n// Otherwise, we risk importing Node.js core modules into an app instead of webpack shims.\n// https://github.com/facebook/create-react-app/issues/1023#issuecomment-265344421\n// We also resolve them to make sure all tools using them work consistently.\nconst appDirectory = fs.realpathSync(process.cwd());\nprocess.env.NODE_PATH = (process.env.NODE_PATH || '')\n  .split(path.delimiter)\n  .filter(folder => folder && !path.isAbsolute(folder))\n  .map(folder => path.resolve(appDirectory, folder))\n  .join(path.delimiter);\n\n// Grab NODE_ENV and REACT_APP_* environment variables and prepare them to be\n// injected into the application via DefinePlugin in webpack configuration.\nconst REACT_APP = /^REACT_APP_/i;\n\nfunction getClientEnvironment(publicUrl) {\n  const raw = Object.keys(process.env)\n    .filter(key => REACT_APP.test(key))\n    .reduce(\n      (env, key) => {\n        env[key] = process.env[key];\n        return env;\n      },\n      {\n        // Useful for determining whether we’re running in production mode.\n        // Most importantly, it switches React into the correct mode.\n        NODE_ENV: process.env.NODE_ENV || 'development',\n        // Useful for resolving the correct path to static assets in `public`.\n        // For example, <img src={process.env.PUBLIC_URL + '/img/logo.png'} />.\n        // This should only be used as an escape hatch. Normally you would put\n        // images into the `src` and `import` them in code to get their paths.\n        PUBLIC_URL: publicUrl,\n        // We support configuring the sockjs pathname during development.\n        // These settings let a developer run multiple simultaneous projects.\n        // They are used as the connection `hostname`, `pathname` and `port`\n        // in webpackHotDevClient. They are used as the `sockHost`, `sockPath`\n        // and `sockPort` options in webpack-dev-server.\n        WDS_SOCKET_HOST: process.env.WDS_SOCKET_HOST,\n        WDS_SOCKET_PATH: process.env.WDS_SOCKET_PATH,\n        WDS_SOCKET_PORT: process.env.WDS_SOCKET_PORT,\n        // Whether or not react-refresh is enabled.\n        // It is defined here so it is available in the webpackHotDevClient.\n        FAST_REFRESH: process.env.FAST_REFRESH !== 'false',\n      }\n    );\n  // Stringify all values so we can feed into webpack DefinePlugin\n  const stringified = {\n    'process.env': Object.keys(raw).reduce((env, key) => {\n      env[key] = JSON.stringify(raw[key]);\n      return env;\n    }, {}),\n  };\n\n  return { raw, stringified };\n}\n\nmodule.exports = getClientEnvironment;\n"
  },
  {
    "path": "services/workbench2/config/getHttpsConfig.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\n\nconst fs = require('fs');\nconst path = require('path');\nconst crypto = require('crypto');\nconst chalk = require('react-dev-utils/chalk');\nconst paths = require('./paths');\n\n// Ensure the certificate and key provided are valid and if not\n// throw an easy to debug error\nfunction validateKeyAndCerts({ cert, key, keyFile, crtFile }) {\n  let encrypted;\n  try {\n    // publicEncrypt will throw an error with an invalid cert\n    encrypted = crypto.publicEncrypt(cert, Buffer.from('test'));\n  } catch (err) {\n    throw new Error(\n      `The certificate \"${chalk.yellow(crtFile)}\" is invalid.\\n${err.message}`\n    );\n  }\n\n  try {\n    // privateDecrypt will throw an error with an invalid key\n    crypto.privateDecrypt(key, encrypted);\n  } catch (err) {\n    throw new Error(\n      `The certificate key \"${chalk.yellow(keyFile)}\" is invalid.\\n${\n        err.message\n      }`\n    );\n  }\n}\n\n// Read file and throw an error if it doesn't exist\nfunction readEnvFile(file, type) {\n  if (!fs.existsSync(file)) {\n    throw new Error(\n      `You specified ${chalk.cyan(\n        type\n      )} in your env, but the file \"${chalk.yellow(file)}\" can't be found.`\n    );\n  }\n  return fs.readFileSync(file);\n}\n\n// Get the https config\n// Return cert files if provided in env, otherwise just true or false\nfunction getHttpsConfig() {\n  const { SSL_CRT_FILE, SSL_KEY_FILE, HTTPS } = process.env;\n  const isHttps = HTTPS === 'true';\n\n  if (isHttps && SSL_CRT_FILE && SSL_KEY_FILE) {\n    const crtFile = path.resolve(paths.appPath, SSL_CRT_FILE);\n    const keyFile = path.resolve(paths.appPath, SSL_KEY_FILE);\n    const config = {\n      cert: readEnvFile(crtFile, 'SSL_CRT_FILE'),\n      key: readEnvFile(keyFile, 'SSL_KEY_FILE'),\n    };\n\n    validateKeyAndCerts({ ...config, keyFile, crtFile });\n    return config;\n  }\n  return isHttps;\n}\n\nmodule.exports = getHttpsConfig;\n"
  },
  {
    "path": "services/workbench2/config/jest/babelTransform.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\n\nconst babelJest = require('babel-jest').default;\n\nconst hasJsxRuntime = (() => {\n  if (process.env.DISABLE_NEW_JSX_TRANSFORM === 'true') {\n    return false;\n  }\n\n  try {\n    require.resolve('react/jsx-runtime');\n    return true;\n  } catch (e) {\n    return false;\n  }\n})();\n\nmodule.exports = babelJest.createTransformer({\n  presets: [\n    [\n      require.resolve('babel-preset-react-app'),\n      {\n        runtime: hasJsxRuntime ? 'automatic' : 'classic',\n      },\n    ],\n  ],\n  babelrc: false,\n  configFile: false,\n});\n"
  },
  {
    "path": "services/workbench2/config/jest/cssTransform.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\n\n// This is a custom Jest transformer turning style imports into empty objects.\n// http://facebook.github.io/jest/docs/en/webpack.html\n\nmodule.exports = {\n  process() {\n    return 'module.exports = {};';\n  },\n  getCacheKey() {\n    // The output is always the same.\n    return 'cssTransform';\n  },\n};\n"
  },
  {
    "path": "services/workbench2/config/jest/fileTransform.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\n\nconst path = require('path');\nconst camelcase = require('camelcase');\n\n// This is a custom Jest transformer turning file imports into filenames.\n// http://facebook.github.io/jest/docs/en/webpack.html\n\nmodule.exports = {\n  process(src, filename) {\n    const assetFilename = JSON.stringify(path.basename(filename));\n\n    if (filename.match(/\\.svg$/)) {\n      // Based on how SVGR generates a component name:\n      // https://github.com/smooth-code/svgr/blob/01b194cf967347d43d4cbe6b434404731b87cf27/packages/core/src/state.js#L6\n      const pascalCaseFilename = camelcase(path.parse(filename).name, {\n        pascalCase: true,\n      });\n      const componentName = `Svg${pascalCaseFilename}`;\n      return `const React = require('react');\n      module.exports = {\n        __esModule: true,\n        default: ${assetFilename},\n        ReactComponent: React.forwardRef(function ${componentName}(props, ref) {\n          return {\n            $$typeof: Symbol.for('react.element'),\n            type: 'svg',\n            ref: ref,\n            key: null,\n            props: Object.assign({}, props, {\n              children: ${assetFilename}\n            })\n          };\n        }),\n      };`;\n    }\n\n    return `module.exports = ${assetFilename};`;\n  },\n};\n"
  },
  {
    "path": "services/workbench2/config/modules.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\n\nconst fs = require('fs');\nconst path = require('path');\nconst paths = require('./paths');\nconst chalk = require('react-dev-utils/chalk');\nconst resolve = require('resolve');\n\n/**\n * Get additional module paths based on the baseUrl of a compilerOptions object.\n *\n * @param {Object} options\n */\nfunction getAdditionalModulePaths(options = {}) {\n  const baseUrl = options.baseUrl;\n\n  if (!baseUrl) {\n    return '';\n  }\n\n  const baseUrlResolved = path.resolve(paths.appPath, baseUrl);\n\n  // We don't need to do anything if `baseUrl` is set to `node_modules`. This is\n  // the default behavior.\n  if (path.relative(paths.appNodeModules, baseUrlResolved) === '') {\n    return null;\n  }\n\n  // Allow the user set the `baseUrl` to `appSrc`.\n  if (path.relative(paths.appSrc, baseUrlResolved) === '') {\n    return [paths.appSrc];\n  }\n\n  // If the path is equal to the root directory we ignore it here.\n  // We don't want to allow importing from the root directly as source files are\n  // not transpiled outside of `src`. We do allow importing them with the\n  // absolute path (e.g. `src/Components/Button.js`) but we set that up with\n  // an alias.\n  if (path.relative(paths.appPath, baseUrlResolved) === '') {\n    return null;\n  }\n\n  // Otherwise, throw an error.\n  throw new Error(\n    chalk.red.bold(\n      \"Your project's `baseUrl` can only be set to `src` or `node_modules`.\" +\n        ' Create React App does not support other values at this time.'\n    )\n  );\n}\n\n/**\n * Get webpack aliases based on the baseUrl of a compilerOptions object.\n *\n * @param {*} options\n */\nfunction getWebpackAliases(options = {}) {\n  const baseUrl = options.baseUrl;\n\n  if (!baseUrl) {\n    return {};\n  }\n\n  const baseUrlResolved = path.resolve(paths.appPath, baseUrl);\n\n  if (path.relative(paths.appPath, baseUrlResolved) === '') {\n    return {\n      src: paths.appSrc,\n    };\n  }\n}\n\n/**\n * Get jest aliases based on the baseUrl of a compilerOptions object.\n *\n * @param {*} options\n */\nfunction getJestAliases(options = {}) {\n  const baseUrl = options.baseUrl;\n\n  if (!baseUrl) {\n    return {};\n  }\n\n  const baseUrlResolved = path.resolve(paths.appPath, baseUrl);\n\n  if (path.relative(paths.appPath, baseUrlResolved) === '') {\n    return {\n      '^src/(.*)$': '<rootDir>/src/$1',\n    };\n  }\n}\n\nfunction getModules() {\n  // Check if TypeScript is setup\n  const hasTsConfig = fs.existsSync(paths.appTsConfig);\n  const hasJsConfig = fs.existsSync(paths.appJsConfig);\n\n  if (hasTsConfig && hasJsConfig) {\n    throw new Error(\n      'You have both a tsconfig.json and a jsconfig.json. If you are using TypeScript please remove your jsconfig.json file.'\n    );\n  }\n\n  let config;\n\n  // If there's a tsconfig.json we assume it's a\n  // TypeScript project and set up the config\n  // based on tsconfig.json\n  if (hasTsConfig) {\n    const ts = require(resolve.sync('typescript', {\n      basedir: paths.appNodeModules,\n    }));\n    config = ts.readConfigFile(paths.appTsConfig, ts.sys.readFile).config;\n    // Otherwise we'll check if there is jsconfig.json\n    // for non TS projects.\n  } else if (hasJsConfig) {\n    config = require(paths.appJsConfig);\n  }\n\n  config = config || {};\n  const options = config.compilerOptions || {};\n\n  const additionalModulePaths = getAdditionalModulePaths(options);\n\n  return {\n    additionalModulePaths: additionalModulePaths,\n    webpackAliases: getWebpackAliases(options),\n    jestAliases: getJestAliases(options),\n    hasTsConfig,\n  };\n}\n\nmodule.exports = getModules();\n"
  },
  {
    "path": "services/workbench2/config/paths.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\n\nconst path = require('path');\nconst fs = require('fs');\nconst getPublicUrlOrPath = require('react-dev-utils/getPublicUrlOrPath');\n\n// Make sure any symlinks in the project folder are resolved:\n// https://github.com/facebook/create-react-app/issues/637\nconst appDirectory = fs.realpathSync(process.cwd());\nconst resolveApp = relativePath => path.resolve(appDirectory, relativePath);\n\n// We use `PUBLIC_URL` environment variable or \"homepage\" field to infer\n// \"public path\" at which the app is served.\n// webpack needs to know it to put the right <script> hrefs into HTML even in\n// single-page apps that may serve index.html for nested URLs like /todos/42.\n// We can't use a relative path in HTML because we don't want to load something\n// like /todos/42/static/js/bundle.7289d.js. We have to know the root.\nconst publicUrlOrPath = getPublicUrlOrPath(\n  process.env.NODE_ENV === 'development',\n  require(resolveApp('package.json')).homepage,\n  process.env.PUBLIC_URL\n);\n\nconst buildPath = process.env.BUILD_PATH || 'build';\n\nconst moduleFileExtensions = [\n  'web.mjs',\n  'mjs',\n  'web.js',\n  'js',\n  'web.ts',\n  'ts',\n  'web.tsx',\n  'tsx',\n  'json',\n  'web.jsx',\n  'jsx',\n];\n\n// Resolve file paths in the same order as webpack\nconst resolveModule = (resolveFn, filePath) => {\n  const extension = moduleFileExtensions.find(extension =>\n    fs.existsSync(resolveFn(`${filePath}.${extension}`))\n  );\n\n  if (extension) {\n    return resolveFn(`${filePath}.${extension}`);\n  }\n\n  return resolveFn(`${filePath}.js`);\n};\n\n// config after eject: we're in ./config/\nmodule.exports = {\n  dotenv: resolveApp('.env'),\n  appPath: resolveApp('.'),\n  appBuild: resolveApp(buildPath),\n  appPublic: resolveApp('public'),\n  appHtml: resolveApp('public/index.html'),\n  appIndexJs: resolveModule(resolveApp, 'src/index'),\n  appPackageJson: resolveApp('package.json'),\n  appSrc: resolveApp('src'),\n  appTsConfig: resolveApp('tsconfig.json'),\n  appJsConfig: resolveApp('jsconfig.json'),\n  yarnLockFile: resolveApp('yarn.lock'),\n  testsSetup: resolveModule(resolveApp, 'src/setupTests'),\n  proxySetup: resolveApp('src/setupProxy.js'),\n  appNodeModules: resolveApp('node_modules'),\n  appWebpackCache: resolveApp('node_modules/.cache'),\n  appTsBuildInfoFile: resolveApp('node_modules/.cache/tsconfig.tsbuildinfo'),\n  swSrc: resolveModule(resolveApp, 'src/service-worker'),\n  publicUrlOrPath,\n};\n\n\n\nmodule.exports.moduleFileExtensions = moduleFileExtensions;\n"
  },
  {
    "path": "services/workbench2/config/webpack/persistentCache/createEnvironmentHash.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\nconst { createHash } = require('crypto');\n\nmodule.exports = env => {\n  const hash = createHash('md5');\n  hash.update(JSON.stringify(env));\n\n  return hash.digest('hex');\n};\n"
  },
  {
    "path": "services/workbench2/config/webpack.config.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\n\n// Do this as the first thing so that any code reading it knows the right env.\nprocess.env.BABEL_ENV = 'development';\nprocess.env.NODE_ENV = 'development';\n\nconst fs = require('fs');\nconst path = require('path');\nconst webpack = require('webpack');\nconst resolve = require('resolve');\nconst HtmlWebpackPlugin = require('html-webpack-plugin');\nconst CaseSensitivePathsPlugin = require('case-sensitive-paths-webpack-plugin');\nconst InlineChunkHtmlPlugin = require('react-dev-utils/InlineChunkHtmlPlugin');\nconst TerserPlugin = require('terser-webpack-plugin');\nconst MiniCssExtractPlugin = require('mini-css-extract-plugin');\nconst CssMinimizerPlugin = require('css-minimizer-webpack-plugin');\nconst { WebpackManifestPlugin } = require('webpack-manifest-plugin');\nconst InterpolateHtmlPlugin = require('react-dev-utils/InterpolateHtmlPlugin');\nconst WorkboxWebpackPlugin = require('workbox-webpack-plugin');\nconst ModuleScopePlugin = require('react-dev-utils/ModuleScopePlugin');\nconst getCSSModuleLocalIdent = require('react-dev-utils/getCSSModuleLocalIdent');\nconst ESLintPlugin = require('eslint-webpack-plugin');\nconst paths = require('./paths');\nconst modules = require('./modules');\nconst getClientEnvironment = require('./env');\nconst ModuleNotFoundPlugin = require('react-dev-utils/ModuleNotFoundPlugin');\nconst ForkTsCheckerWebpackPlugin =\n  process.env.TSC_COMPILE_ON_ERROR === 'true'\n    ? require('react-dev-utils/ForkTsCheckerWarningWebpackPlugin')\n    : require('react-dev-utils/ForkTsCheckerWebpackPlugin');\nconst ReactRefreshWebpackPlugin = require('@pmmmwh/react-refresh-webpack-plugin');\n\nconst createEnvironmentHash = require('./webpack/persistentCache/createEnvironmentHash');\n\n// Source maps are resource heavy and can cause out of memory issue for large source files.\nconst shouldUseSourceMap = process.env.GENERATE_SOURCEMAP !== 'false';\n\nconst reactRefreshRuntimeEntry = require.resolve('react-refresh/runtime');\nconst reactRefreshWebpackPluginRuntimeEntry = require.resolve(\n  '@pmmmwh/react-refresh-webpack-plugin'\n);\nconst babelRuntimeEntry = require.resolve('babel-preset-react-app');\nconst babelRuntimeEntryHelpers = require.resolve(\n  '@babel/runtime/helpers/esm/assertThisInitialized',\n  { paths: [babelRuntimeEntry] }\n);\nconst babelRuntimeRegenerator = require.resolve('@babel/runtime/regenerator', {\n  paths: [babelRuntimeEntry],\n});\n\n// Some apps do not need the benefits of saving a web request, so not inlining the chunk\n// makes for a smoother build process.\nconst shouldInlineRuntimeChunk = process.env.INLINE_RUNTIME_CHUNK !== 'false';\n\nconst emitErrorsAsWarnings = process.env.ESLINT_NO_DEV_ERRORS === 'true';\nconst disableESLintPlugin = process.env.DISABLE_ESLINT_PLUGIN === 'true';\n\nconst imageInlineSizeLimit = parseInt(\n  process.env.IMAGE_INLINE_SIZE_LIMIT || '10000'\n);\n\n// Check if TypeScript is setup\nconst useTypeScript = fs.existsSync(paths.appTsConfig);\n\n// Check if Tailwind config exists\nconst useTailwind = fs.existsSync(\n  path.join(paths.appPath, 'tailwind.config.js')\n);\n\n// Get the path to the uncompiled service worker (if it exists).\nconst swSrc = paths.swSrc;\n\n// style files regexes\nconst cssRegex = /\\.css$/;\nconst cssModuleRegex = /\\.module\\.css$/;\nconst sassRegex = /\\.(scss|sass)$/;\nconst sassModuleRegex = /\\.module\\.(scss|sass)$/;\n\nconst hasJsxRuntime = (() => {\n  if (process.env.DISABLE_NEW_JSX_TRANSFORM === 'true') {\n    return false;\n  }\n\n  try {\n    require.resolve('react/jsx-runtime');\n    return true;\n  } catch (e) {\n    return false;\n  }\n})();\n\n// This is the production and development configuration.\n// It is focused on developer experience, fast rebuilds, and a minimal bundle.\nmodule.exports = function (webpackEnv) {\n  const isEnvTest = webpackEnv === 'test';\n  const isEnvDevelopment = webpackEnv === 'development';\n  const isEnvProduction = webpackEnv === 'production';\n\n  // Variable used for enabling profiling in Production\n  // passed into alias object. Uses a flag if passed into the build command\n  const isEnvProductionProfile =\n    isEnvProduction && process.argv.includes('--profile');\n\n  // We will provide `paths.publicUrlOrPath` to our app\n  // as %PUBLIC_URL% in `index.html` and `process.env.PUBLIC_URL` in JavaScript.\n  // Omit trailing slash as %PUBLIC_URL%/xyz looks better than %PUBLIC_URL%xyz.\n  // Get environment variables to inject into our app.\n  const env = getClientEnvironment(paths.publicUrlOrPath.slice(0, -1));\n\n  const shouldUseReactRefresh = env.raw.FAST_REFRESH;\n\n  // common function to get style loaders\n  const getStyleLoaders = (cssOptions, preProcessor) => {\n    const loaders = [\n      isEnvDevelopment && require.resolve('style-loader'),\n      isEnvProduction && {\n        loader: MiniCssExtractPlugin.loader,\n        // css is located in `static/css`, use '../../' to locate index.html folder\n        // in production `paths.publicUrlOrPath` can be a relative path\n        options: paths.publicUrlOrPath.startsWith('.')\n          ? { publicPath: '../../' }\n          : {},\n      },\n      {\n        loader: require.resolve('css-loader'),\n        options: cssOptions,\n      },\n      {\n        // Options for PostCSS as we reference these options twice\n        // Adds vendor prefixing based on your specified browser support in\n        // package.json\n        loader: require.resolve('postcss-loader'),\n        options: {\n          postcssOptions: {\n            // Necessary for external CSS imports to work\n            // https://github.com/facebook/create-react-app/issues/2677\n            ident: 'postcss',\n            config: false,\n            plugins: !useTailwind\n              ? [\n                  'postcss-flexbugs-fixes',\n                  [\n                    'postcss-preset-env',\n                    {\n                      autoprefixer: {\n                        flexbox: 'no-2009',\n                      },\n                      stage: 3,\n                    },\n                  ],\n                  // Adds PostCSS Normalize as the reset css with default options,\n                  // so that it honors browserslist config in package.json\n                  // which in turn let's users customize the target behavior as per their needs.\n                  'postcss-normalize',\n                ]\n              : [\n                  'tailwindcss',\n                  'postcss-flexbugs-fixes',\n                  [\n                    'postcss-preset-env',\n                    {\n                      autoprefixer: {\n                        flexbox: 'no-2009',\n                      },\n                      stage: 3,\n                    },\n                  ],\n                ],\n          },\n          sourceMap: isEnvProduction ? shouldUseSourceMap : isEnvDevelopment,\n        },\n      },\n    ].filter(Boolean);\n    if (preProcessor) {\n      loaders.push(\n        {\n          loader: require.resolve('resolve-url-loader'),\n          options: {\n            sourceMap: isEnvProduction ? shouldUseSourceMap : isEnvDevelopment,\n            root: paths.appSrc,\n          },\n        },\n        {\n          loader: require.resolve(preProcessor),\n          options: {\n            sourceMap: true,\n          },\n        }\n      );\n    }\n    return loaders;\n  };\n\n  return {\n    target: ['browserslist'],\n    mode: isEnvProduction ? 'production' : isEnvDevelopment && 'development',\n    // Stop compilation early in production\n    bail: isEnvProduction,\n    devtool: isEnvProduction\n      ? shouldUseSourceMap\n        ? 'source-map'\n        : false\n      : isEnvDevelopment && 'cheap-module-source-map',\n    // These are the \"entry points\" to our application.\n    // This means they will be the \"root\" imports that are included in JS bundle.\n    entry: paths.appIndexJs,\n    output: {\n      // The build folder.\n      path: paths.appBuild,\n      // Add /* filename */ comments to generated require()s in the output.\n      pathinfo: isEnvDevelopment,\n      // There will be one main bundle, and one file per asynchronous chunk.\n      // In development, it does not produce real files.\n      filename: isEnvProduction\n        ? 'static/js/[name].[contenthash:8].js'\n        : isEnvDevelopment && 'static/js/bundle.js',\n      // There are also additional JS chunk files if you use code splitting.\n      chunkFilename: isEnvProduction\n        ? 'static/js/[name].[contenthash:8].chunk.js'\n        : isEnvDevelopment && 'static/js/[name].chunk.js',\n      assetModuleFilename: 'static/media/[name].[hash][ext]',\n      // webpack uses `publicPath` to determine where the app is being served from.\n      // It requires a trailing slash, or the file assets will get an incorrect path.\n      // We inferred the \"public path\" (such as / or /my-project) from homepage.\n      publicPath: paths.publicUrlOrPath,\n      // Point sourcemap entries to original disk location (format as URL on Windows)\n      devtoolModuleFilenameTemplate: isEnvProduction\n        ? info =>\n            path\n              .relative(paths.appSrc, info.absoluteResourcePath)\n              .replace(/\\\\/g, '/')\n        : isEnvDevelopment || isEnvTest ?\n          (info => path.resolve(info.absoluteResourcePath).replace(/\\\\/g, '/')) : ()=>null,\n    },\n    cache: {\n      type: 'filesystem',\n      version: createEnvironmentHash(env.raw),\n      cacheDirectory: paths.appWebpackCache,\n      store: 'pack',\n      buildDependencies: {\n        defaultWebpack: ['webpack/lib/'],\n        config: [__filename],\n        tsconfig: [paths.appTsConfig, paths.appJsConfig].filter(f =>\n          fs.existsSync(f)\n        ),\n      },\n    },\n    infrastructureLogging: {\n      level: 'none',\n    },\n    optimization: {\n      minimize: isEnvProduction,\n      minimizer: [\n        // This is only used in production mode\n        new TerserPlugin({\n          terserOptions: {\n            parse: {\n              // We want terser to parse ecma 8 code. However, we don't want it\n              // to apply any minification steps that turns valid ecma 5 code\n              // into invalid ecma 5 code. This is why the 'compress' and 'output'\n              // sections only apply transformations that are ecma 5 safe\n              // https://github.com/facebook/create-react-app/pull/4234\n              ecma: 8,\n            },\n            compress: {\n              ecma: 5,\n              warnings: false,\n              // Disabled because of an issue with Uglify breaking seemingly valid code:\n              // https://github.com/facebook/create-react-app/issues/2376\n              // Pending further investigation:\n              // https://github.com/mishoo/UglifyJS2/issues/2011\n              comparisons: false,\n              // Disabled because of an issue with Terser breaking valid code:\n              // https://github.com/facebook/create-react-app/issues/5250\n              // Pending further investigation:\n              // https://github.com/terser-js/terser/issues/120\n              inline: 2,\n            },\n            mangle: {\n              safari10: true,\n            },\n            // Added for profiling in devtools\n            keep_classnames: isEnvProductionProfile,\n            keep_fnames: isEnvProductionProfile,\n            output: {\n              ecma: 5,\n              comments: false,\n              // Turned on because emoji and regex is not minified properly using default\n              // https://github.com/facebook/create-react-app/issues/2488\n              ascii_only: true,\n            },\n          },\n        }),\n        // This is only used in production mode\n        new CssMinimizerPlugin(),\n      ],\n    },\n    resolve: {\n      // This allows you to set a fallback for where webpack should look for modules.\n      // We placed these paths second because we want `node_modules` to \"win\"\n      // if there are any conflicts. This matches Node resolution mechanism.\n      // https://github.com/facebook/create-react-app/issues/253\n      modules: ['node_modules', paths.appNodeModules].concat(\n        modules.additionalModulePaths || []\n      ),\n      fallback: { \"path\": require.resolve(\"path-browserify\") },\n      // These are the reasonable defaults supported by the Node ecosystem.\n      // We also include JSX as a common component filename extension to support\n      // some tools, although we do not recommend using it, see:\n      // https://github.com/facebook/create-react-app/issues/290\n      // `web` extension prefixes have been added for better support\n      // for React Native Web.\n      extensions: paths.moduleFileExtensions\n        .map(ext => `.${ext}`)\n        .filter(ext => useTypeScript || !ext.includes('ts')),\n      alias: {\n        // Support React Native Web\n        // https://www.smashingmagazine.com/2016/08/a-glimpse-into-the-future-with-react-native-for-web/\n        'react-native': 'react-native-web',\n        // Allows for better profiling with ReactDevTools\n        ...(isEnvProductionProfile && {\n          'react-dom$': 'react-dom/profiling',\n          'scheduler/tracing': 'scheduler/tracing-profiling',\n        }),\n        ...(modules.webpackAliases || {}),\n      },\n      plugins: [\n        // Prevents users from importing files from outside of src/ (or node_modules/).\n        // This often causes confusion because we only process files within src/ with babel.\n        // To fix this, we prevent you from importing files out of src/ -- if you'd like to,\n        // please link the files into your node_modules/ and let module-resolution kick in.\n        // Make sure your source files are compiled, as they will not be processed in any way.\n        new ModuleScopePlugin(paths.appSrc, [\n          paths.appPackageJson,\n          reactRefreshRuntimeEntry,\n          reactRefreshWebpackPluginRuntimeEntry,\n          babelRuntimeEntry,\n          babelRuntimeEntryHelpers,\n          babelRuntimeRegenerator,\n        ]),\n      ],\n    },\n    module: {\n      strictExportPresence: true,\n      rules: [\n        // Handle node_modules packages that contain sourcemaps\n        shouldUseSourceMap && {\n          enforce: 'pre',\n          exclude: /@babel(?:\\/|\\\\{1,2})runtime/,\n          test: /\\.(js|mjs|jsx|ts|tsx|css)$/,\n          loader: require.resolve('source-map-loader'),\n        },\n        {\n          // \"oneOf\" will traverse all following loaders until one will\n          // match the requirements. When no loader matches it will fall\n          // back to the \"file\" loader at the end of the loader list.\n          oneOf: [\n            // TODO: Merge this config once `image/avif` is in the mime-db\n            // https://github.com/jshttp/mime-db\n            {\n              test: [/\\.avif$/],\n              type: 'asset',\n              mimetype: 'image/avif',\n              parser: {\n                dataUrlCondition: {\n                  maxSize: imageInlineSizeLimit,\n                },\n              },\n            },\n            // \"url\" loader works like \"file\" loader except that it embeds assets\n            // smaller than specified limit in bytes as data URLs to avoid requests.\n            // A missing `test` is equivalent to a match.\n            {\n              test: [/\\.bmp$/, /\\.gif$/, /\\.jpe?g$/, /\\.png$/],\n              type: 'asset',\n              parser: {\n                dataUrlCondition: {\n                  maxSize: imageInlineSizeLimit,\n                },\n              },\n            },\n            {\n              test: /\\.svg$/,\n              use: [\n                {\n                  loader: require.resolve('@svgr/webpack'),\n                  options: {\n                    prettier: false,\n                    svgo: false,\n                    svgoConfig: {\n                      plugins: [{ removeViewBox: false }],\n                    },\n                    titleProp: true,\n                    ref: true,\n                  },\n                },\n                {\n                  loader: require.resolve('file-loader'),\n                  options: {\n                    name: 'static/media/[name].[hash].[ext]',\n                  },\n                },\n              ],\n              issuer: {\n                and: [/\\.(ts|tsx|js|jsx|md|mdx)$/],\n              },\n            },\n            // Process application JS with Babel.\n            // The preset includes JSX, Flow, TypeScript, and some ESnext features.\n            {\n              test: /\\.(js|mjs|jsx|ts|tsx)$/,\n              include: paths.appSrc,\n              loader: require.resolve('babel-loader'),\n              options: {\n                customize: require.resolve(\n                  'babel-preset-react-app/webpack-overrides'\n                ),\n                presets: [\n                  [\n                    require.resolve('babel-preset-react-app'),\n                    {\n                      runtime: hasJsxRuntime ? 'automatic' : 'classic',\n                    },\n                  ],\n                ],\n                \n                plugins: [\n                  isEnvDevelopment &&\n                    shouldUseReactRefresh &&\n                    require.resolve('react-refresh/babel'),\n                ].filter(Boolean),\n                // This is a feature of `babel-loader` for webpack (not Babel itself).\n                // It enables caching results in ./node_modules/.cache/babel-loader/\n                // directory for faster rebuilds.\n                cacheDirectory: true,\n                // See #6846 for context on why cacheCompression is disabled\n                cacheCompression: false,\n                compact: isEnvProduction,\n              },\n            },\n            // Process any JS outside of the app with Babel.\n            // Unlike the application JS, we only compile the standard ES features.\n            {\n              test: /\\.(js|mjs)$/,\n              exclude: /@babel(?:\\/|\\\\{1,2})runtime/,\n              loader: require.resolve('babel-loader'),\n              options: {\n                babelrc: false,\n                configFile: false,\n                compact: false,\n                presets: [\n                  [\n                    require.resolve('babel-preset-react-app/dependencies'),\n                    { helpers: true },\n                  ],\n                ],\n                cacheDirectory: true,\n                // See #6846 for context on why cacheCompression is disabled\n                cacheCompression: false,\n                \n                // Babel sourcemaps are needed for debugging into node_modules\n                // code.  Without the options below, debuggers like VSCode\n                // show incorrect code and set breakpoints on the wrong lines.\n                sourceMaps: shouldUseSourceMap,\n                inputSourceMap: shouldUseSourceMap,\n              },\n            },\n            // \"postcss\" loader applies autoprefixer to our CSS.\n            // \"css\" loader resolves paths in CSS and adds assets as dependencies.\n            // \"style\" loader turns CSS into JS modules that inject <style> tags.\n            // In production, we use MiniCSSExtractPlugin to extract that CSS\n            // to a file, but in development \"style\" loader enables hot editing\n            // of CSS.\n            // By default we support CSS Modules with the extension .module.css\n            {\n              test: cssRegex,\n              exclude: cssModuleRegex,\n              use: getStyleLoaders({\n                importLoaders: 1,\n                sourceMap: isEnvProduction\n                  ? shouldUseSourceMap\n                  : isEnvDevelopment,\n                modules: {\n                  mode: 'icss',\n                },\n              }),\n              // Don't consider CSS imports dead code even if the\n              // containing package claims to have no side effects.\n              // Remove this when webpack adds a warning or an error for this.\n              // See https://github.com/webpack/webpack/issues/6571\n              sideEffects: true,\n            },\n            // Adds support for CSS Modules (https://github.com/css-modules/css-modules)\n            // using the extension .module.css\n            {\n              test: cssModuleRegex,\n              use: getStyleLoaders({\n                importLoaders: 1,\n                sourceMap: isEnvProduction\n                  ? shouldUseSourceMap\n                  : isEnvDevelopment,\n                modules: {\n                  mode: 'local',\n                  getLocalIdent: getCSSModuleLocalIdent,\n                },\n              }),\n            },\n            // Opt-in support for SASS (using .scss or .sass extensions).\n            // By default we support SASS Modules with the\n            // extensions .module.scss or .module.sass\n            {\n              test: sassRegex,\n              exclude: sassModuleRegex,\n              use: getStyleLoaders(\n                {\n                  importLoaders: 3,\n                  sourceMap: isEnvProduction\n                    ? shouldUseSourceMap\n                    : isEnvDevelopment,\n                  modules: {\n                    mode: 'icss',\n                  },\n                },\n                'sass-loader'\n              ),\n              // Don't consider CSS imports dead code even if the\n              // containing package claims to have no side effects.\n              // Remove this when webpack adds a warning or an error for this.\n              // See https://github.com/webpack/webpack/issues/6571\n              sideEffects: true,\n            },\n            // Adds support for CSS Modules, but using SASS\n            // using the extension .module.scss or .module.sass\n            {\n              test: sassModuleRegex,\n              use: getStyleLoaders(\n                {\n                  importLoaders: 3,\n                  sourceMap: isEnvProduction\n                    ? shouldUseSourceMap\n                    : isEnvDevelopment,\n                  modules: {\n                    mode: 'local',\n                    getLocalIdent: getCSSModuleLocalIdent,\n                  },\n                },\n                'sass-loader'\n              ),\n            },\n            // \"file\" loader makes sure those assets get served by WebpackDevServer.\n            // When you `import` an asset, you get its (virtual) filename.\n            // In production, they would get copied to the `build` folder.\n            // This loader doesn't use a \"test\" so it will catch all modules\n            // that fall through the other loaders.\n            {\n              // Exclude `js` files to keep \"css\" loader working as it injects\n              // its runtime that would otherwise be processed through \"file\" loader.\n              // Also exclude `html` and `json` extensions so they get processed\n              // by webpacks internal loaders.\n              exclude: [/^$/, /\\.(js|mjs|jsx|ts|tsx)$/, /\\.html$/, /\\.json$/],\n              type: 'asset/resource',\n            },\n            // ** STOP ** Are you adding a new loader?\n            // Make sure to add the new loader(s) before the \"file\" loader.\n          ],\n        },\n      ].filter(Boolean),\n    },\n    plugins: [\n      // Generates an `index.html` file with the <script> injected.\n      new HtmlWebpackPlugin(\n        Object.assign(\n          {},\n          {\n            inject: true,\n            template: paths.appHtml,\n          },\n          isEnvProduction\n            ? {\n                minify: {\n                  removeComments: true,\n                  collapseWhitespace: true,\n                  removeRedundantAttributes: true,\n                  useShortDoctype: true,\n                  removeEmptyAttributes: true,\n                  removeStyleLinkTypeAttributes: true,\n                  keepClosingSlash: true,\n                  minifyJS: true,\n                  minifyCSS: true,\n                  minifyURLs: true,\n                },\n              }\n            : undefined\n        )\n      ),\n      // Inlines the webpack runtime script. This script is too small to warrant\n      // a network request.\n      // https://github.com/facebook/create-react-app/issues/5358\n      isEnvProduction &&\n        shouldInlineRuntimeChunk &&\n        new InlineChunkHtmlPlugin(HtmlWebpackPlugin, [/runtime-.+[.]js/]),\n      // Makes some environment variables available in index.html.\n      // The public URL is available as %PUBLIC_URL% in index.html, e.g.:\n      // <link rel=\"icon\" href=\"%PUBLIC_URL%/favicon.ico\">\n      // It will be an empty string unless you specify \"homepage\"\n      // in `package.json`, in which case it will be the pathname of that URL.\n      new InterpolateHtmlPlugin(HtmlWebpackPlugin, env.raw),\n      // This gives some necessary context to module not found errors, such as\n      // the requesting resource.\n      new ModuleNotFoundPlugin(paths.appPath),\n      // Makes some environment variables available to the JS code, for example:\n      // if (process.env.NODE_ENV === 'production') { ... }. See `./env.js`.\n      // It is absolutely essential that NODE_ENV is set to production\n      // during a production build.\n      // Otherwise React will be compiled in the very slow development mode.\n      new webpack.DefinePlugin(env.stringified),\n      // Experimental hot reloading for React .\n      // https://github.com/facebook/react/tree/main/packages/react-refresh\n      isEnvDevelopment &&\n        shouldUseReactRefresh &&\n        new ReactRefreshWebpackPlugin({\n          overlay: false,\n        }),\n      // Watcher doesn't work well if you mistype casing in a path so we use\n      // a plugin that prints an error when you attempt to do this.\n      // See https://github.com/facebook/create-react-app/issues/240\n      isEnvDevelopment && new CaseSensitivePathsPlugin(),\n      isEnvProduction &&\n        new MiniCssExtractPlugin({\n          // Options similar to the same options in webpackOptions.output\n          // both options are optional\n          filename: 'static/css/[name].[contenthash:8].css',\n          chunkFilename: 'static/css/[name].[contenthash:8].chunk.css',\n        }),\n      // Generate an asset manifest file with the following content:\n      // - \"files\" key: Mapping of all asset filenames to their corresponding\n      //   output file so that tools can pick it up without having to parse\n      //   `index.html`\n      // - \"entrypoints\" key: Array of files which are included in `index.html`,\n      //   can be used to reconstruct the HTML if necessary\n      new WebpackManifestPlugin({\n        fileName: 'asset-manifest.json',\n        publicPath: paths.publicUrlOrPath,\n        generate: (seed, files, entrypoints) => {\n          const manifestFiles = files.reduce((manifest, file) => {\n            manifest[file.name] = file.path;\n            return manifest;\n          }, seed);\n          const entrypointFiles = entrypoints.main.filter(\n            fileName => !fileName.endsWith('.map')\n          );\n\n          return {\n            files: manifestFiles,\n            entrypoints: entrypointFiles,\n          };\n        },\n      }),\n      // Moment.js is an extremely popular library that bundles large locale files\n      // by default due to how webpack interprets its code. This is a practical\n      // solution that requires the user to opt into importing specific locales.\n      // https://github.com/jmblog/how-to-optimize-momentjs-with-webpack\n      // You can remove this if you don't use Moment.js:\n      new webpack.IgnorePlugin({\n        resourceRegExp: /^\\.\\/locale$/,\n        contextRegExp: /moment$/,\n      }),\n      // Generate a service worker script that will precache, and keep up to date,\n      // the HTML & assets that are part of the webpack build.\n      isEnvProduction &&\n        fs.existsSync(swSrc) &&\n        new WorkboxWebpackPlugin.InjectManifest({\n          swSrc,\n          dontCacheBustURLsMatching: /\\.[0-9a-f]{8}\\./,\n          exclude: [/\\.map$/, /asset-manifest\\.json$/, /LICENSE/],\n          // Bump up the default maximum size (2mb) that's precached,\n          // to make lazy-loading failure scenarios less likely.\n          // See https://github.com/cra-template/pwa/issues/13#issuecomment-722667270\n          maximumFileSizeToCacheInBytes: 5 * 1024 * 1024,\n        }),\n      // TypeScript type checking\n      useTypeScript &&\n        new ForkTsCheckerWebpackPlugin({\n          async: isEnvDevelopment,\n          typescript: {\n            typescriptPath: resolve.sync('typescript', {\n              basedir: paths.appNodeModules,\n            }),\n            configOverwrite: {\n              compilerOptions: {\n                sourceMap: isEnvProduction\n                  ? shouldUseSourceMap\n                  : isEnvDevelopment,\n                skipLibCheck: true,\n                inlineSourceMap: false,\n                declarationMap: false,\n                noEmit: true,\n                incremental: true,\n                tsBuildInfoFile: paths.appTsBuildInfoFile,\n              },\n            },\n            context: paths.appPath,\n            diagnosticOptions: {\n              syntactic: true,\n            },\n            mode: 'write-references',\n            // profile: true,\n          },\n          issue: {\n            // This one is specifically to match during CI tests,\n            // as micromatch doesn't match\n            // '../cra-template-typescript/template/src/App.tsx'\n            // otherwise.\n            include: [\n              { file: '../**/src/**/*.{ts,tsx}' },\n              { file: '**/src/**/*.{ts,tsx}' },\n            ],\n            exclude: [\n              { file: '**/src/**/__tests__/**' },\n              { file: '**/src/**/?(*.){spec|test}.*' },\n              { file: '**/src/setupProxy.*' },\n              { file: '**/src/setupTests.*' },\n            ],\n          },\n          logger: {\n            infrastructure: 'silent',\n          },\n        }),\n      !disableESLintPlugin &&\n        new ESLintPlugin({\n          // Plugin options\n          extensions: ['js', 'mjs', 'jsx', 'ts', 'tsx'],\n          formatter: require.resolve('react-dev-utils/eslintFormatter'),\n          eslintPath: require.resolve('eslint'),\n          failOnError: !(isEnvDevelopment && emitErrorsAsWarnings),\n          context: paths.appSrc,\n          cache: true,\n          cacheLocation: path.resolve(\n            paths.appNodeModules,\n            '.cache/.eslintcache'\n          ),\n          // ESLint class options\n          cwd: paths.appPath,\n          resolvePluginsRelativeTo: __dirname,\n          baseConfig: {\n            extends: [require.resolve('eslint-config-react-app/base')],\n            rules: {\n              ...(!hasJsxRuntime && {\n                'react/react-in-jsx-scope': 'error',\n              }),\n            },\n            ignorePatterns: ['**/*.cy.js'],\n          },\n        }),\n    ].filter(Boolean),\n    // Turn off performance processing because we utilize\n    // our own hints via the FileSizeReporter\n    performance: false,\n  };\n};\n"
  },
  {
    "path": "services/workbench2/config/webpackDevServer.config.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\n\nconst fs = require('fs');\nconst evalSourceMapMiddleware = require('react-dev-utils/evalSourceMapMiddleware');\nconst noopServiceWorkerMiddleware = require('react-dev-utils/noopServiceWorkerMiddleware');\nconst ignoredFiles = require('react-dev-utils/ignoredFiles');\nconst redirectServedPath = require('react-dev-utils/redirectServedPathMiddleware');\nconst paths = require('./paths');\nconst getHttpsConfig = require('./getHttpsConfig');\n\nconst host = process.env.HOST || '0.0.0.0';\nconst sockHost = process.env.WDS_SOCKET_HOST;\nconst sockPath = process.env.WDS_SOCKET_PATH; // default: '/ws'\nconst sockPort = process.env.WDS_SOCKET_PORT;\n\nmodule.exports = function (proxy, allowedHost) {\n  const disableFirewall =\n    !proxy || process.env.DANGEROUSLY_DISABLE_HOST_CHECK === 'true';\n  return {\n    // WebpackDevServer 2.4.3 introduced a security fix that prevents remote\n    // websites from potentially accessing local content through DNS rebinding:\n    // https://github.com/webpack/webpack-dev-server/issues/887\n    // https://medium.com/webpack/webpack-dev-server-middleware-security-issues-1489d950874a\n    // However, it made several existing use cases such as development in cloud\n    // environment or subdomains in development significantly more complicated:\n    // https://github.com/facebook/create-react-app/issues/2271\n    // https://github.com/facebook/create-react-app/issues/2233\n    // While we're investigating better solutions, for now we will take a\n    // compromise. Since our WDS configuration only serves files in the `public`\n    // folder we won't consider accessing them a vulnerability. However, if you\n    // use the `proxy` feature, it gets more dangerous because it can expose\n    // remote code execution vulnerabilities in backends like Django and Rails.\n    // So we will disable the host check normally, but enable it if you have\n    // specified the `proxy` setting. Finally, we let you override it if you\n    // really know what you're doing with a special environment variable.\n    // Note: [\"localhost\", \".localhost\"] will support subdomains - but we might\n    // want to allow setting the allowedHosts manually for more complex setups\n    allowedHosts: disableFirewall ? 'all' : [allowedHost],\n    headers: {\n      'Access-Control-Allow-Origin': '*',\n      'Access-Control-Allow-Methods': '*',\n      'Access-Control-Allow-Headers': '*',\n    },\n    // Enable gzip compression of generated files.\n    compress: true,\n    static: {\n      // By default WebpackDevServer serves physical files from current directory\n      // in addition to all the virtual build products that it serves from memory.\n      // This is confusing because those files won’t automatically be available in\n      // production build folder unless we copy them. However, copying the whole\n      // project directory is dangerous because we may expose sensitive files.\n      // Instead, we establish a convention that only files in `public` directory\n      // get served. Our build script will copy `public` into the `build` folder.\n      // In `index.html`, you can get URL of `public` folder with %PUBLIC_URL%:\n      // <link rel=\"icon\" href=\"%PUBLIC_URL%/favicon.ico\">\n      // In JavaScript code, you can access it with `process.env.PUBLIC_URL`.\n      // Note that we only recommend to use `public` folder as an escape hatch\n      // for files like `favicon.ico`, `manifest.json`, and libraries that are\n      // for some reason broken when imported through webpack. If you just want to\n      // use an image, put it in `src` and `import` it from JavaScript instead.\n      directory: paths.appPublic,\n      publicPath: [paths.publicUrlOrPath],\n      // By default files from `contentBase` will not trigger a page reload.\n      watch: {\n        // Reportedly, this avoids CPU overload on some systems.\n        // https://github.com/facebook/create-react-app/issues/293\n        // src/node_modules is not ignored to support absolute imports\n        // https://github.com/facebook/create-react-app/issues/1065\n        ignored: ignoredFiles(paths.appSrc),\n      },\n    },\n    client: {\n      webSocketURL: {\n        // Enable custom sockjs pathname for websocket connection to hot reloading server.\n        // Enable custom sockjs hostname, pathname and port for websocket connection\n        // to hot reloading server.\n        hostname: sockHost,\n        pathname: sockPath,\n        port: sockPort,\n      },\n      overlay: {\n        errors: true,\n        warnings: false,\n      },\n    },\n    devMiddleware: {\n      // It is important to tell WebpackDevServer to use the same \"publicPath\" path as\n      // we specified in the webpack config. When homepage is '.', default to serving\n      // from the root.\n      // remove last slash so user can land on `/test` instead of `/test/`\n      publicPath: paths.publicUrlOrPath.slice(0, -1),\n    },\n\n    https: getHttpsConfig(),\n    host,\n    historyApiFallback: {\n      // Paths with dots should still use the history fallback.\n      // See https://github.com/facebook/create-react-app/issues/387.\n      disableDotRule: true,\n      index: paths.publicUrlOrPath,\n    },\n    // `proxy` is run between `before` and `after` `webpack-dev-server` hooks\n    proxy,\n    onBeforeSetupMiddleware(devServer) {\n      // Keep `evalSourceMapMiddleware`\n      // middlewares before `redirectServedPath` otherwise will not have any effect\n      // This lets us fetch source contents from webpack for the error overlay\n      devServer.app.use(evalSourceMapMiddleware(devServer));\n\n      if (fs.existsSync(paths.proxySetup)) {\n        // This registers user provided middleware for proxy reasons\n        require(paths.proxySetup)(devServer.app);\n      }\n    },\n    onAfterSetupMiddleware(devServer) {\n      // Redirect to `PUBLIC_URL` or `homepage` from `package.json` if url not match\n      devServer.app.use(redirectServedPath(paths.publicUrlOrPath));\n\n      // This service worker file is effectively a 'no-op' that will reset any\n      // previous service worker registered for the same host:port combination.\n      // We do this in development to avoid hitting the production cache if\n      // it used the same host and port.\n      // https://github.com/facebook/create-react-app/issues/2272#issuecomment-302832432\n      devServer.app.use(noopServiceWorkerMiddleware(paths.publicUrlOrPath));\n    },\n  };\n};\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/auth-action.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// import { getNewExtraToken, initAuth } from \"../../src/store/auth/auth-action\";\nimport { API_TOKEN_KEY } from 'services/auth-service/auth-service';\n\n// import 'jest-localstorage-mock';\n// import { ServiceRepository, createServices } from \"services/services\";\n// import { configureStore, RootStore } from \"../../src/store/store\"; //causes tuppy failure\n// import { createBrowserHistory } from \"history\";\n// import { mockConfig } from 'common/config';\n// import { ApiActions } from \"services/api/api-actions\";\nimport { ACCOUNT_LINK_STATUS_KEY } from 'services/link-account-service/link-account-service';\nimport Axios, { AxiosInstance } from 'axios';\n// import MockAdapter from \"axios-mock-adapter\";\n// import { ImportMock } from 'ts-mock-imports';\n// import * as servicesModule from \"services/services\";\n// import * as authActionSessionModule from \"../../src/store/auth/auth-action-session\";\n// import { SessionStatus } from \"models/session\";\nimport { getRemoteHostConfig } from '../../src/store/auth/auth-action-session';\n\ndescribe('auth-actions', () => {\n    // let axiosInst;\n    // let axiosMock;\n\n    // let store;\n    // let services;\n    // const config = {};\n    // const actions = {\n    //     progressFn: (id, working) => { },\n    //     errorFn: (id, message) => { }\n    // };\n    // let importMocks;\n\n    const localClusterTokenExpiration = '2020-01-01T00:00:00.000Z';\n    const loginClusterTokenExpiration = '2140-01-01T00:00:00.000Z';\n\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser')\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it('creates an extra token', () => {\n        let firstToken;\n        let firstExtraToken;\n\n        cy.loginAs(activeUser);\n        cy.waitForDom();\n        cy.waitForLocalStorage('arvadosStore').then((storedStore) => {\n            const store = JSON.parse(storedStore);\n\n            //check that no extra token was requested\n            expect(store.auth.apiToken).to.not.be.undefined;\n            expect(store.auth.extraApiToken).to.be.undefined;\n            firstToken = store.auth.apiToken;\n        });\n\n        //ask for an extra token\n        cy.get('[aria-label=\"Account Management\"]').click();\n        cy.contains('Get API token').click();\n        cy.contains('GET NEW TOKEN').click();\n        cy.waitForLocalStorage('arvadosStore').then((storedStore) => {\n            const store = JSON.parse(storedStore);\n\n            // check that cached token is used\n            expect(store.auth.apiToken).to.equal(firstToken);\n            cy.waitForLocalStorageUpdate('arvadosStore');\n\n            //check that an extra token was requested\n            expect(store.auth.extraApiToken).to.not.be.undefined;\n            firstExtraToken = store.auth.extraApiToken;\n        });\n        //check that another request generates a new token\n        cy.contains('GET NEW TOKEN').click();\n        cy.waitForLocalStorageUpdate('arvadosStore');\n        cy.waitForLocalStorage('arvadosStore').then((storedStore) => {\n            const store = JSON.parse(storedStore);\n\n            expect(store.auth.apiToken).to.not.be.undefined;\n            expect(store.auth.extraApiToken).to.not.be.undefined;\n            expect(store.auth.extraApiToken).to.not.equal(firstExtraToken);\n        });\n    });\n\n    it('requests remote token and token expiration', () => {\n        cy.loginAs(adminUser);\n        cy.waitForLocalStorage('arvadosStore').then((storedStore) => {\n            const store = JSON.parse(storedStore);\n\n            // verify that the token is cached\n            expect(store.auth.apiToken).to.not.be.undefined;\n            expect(localClusterTokenExpiration).to.not.equal(loginClusterTokenExpiration);\n\n            const now = new Date();\n            const expiration = new Date(store.auth.apiTokenExpiration);\n            const expectedExpiration = new Date(now.getTime() + 24 * 60 * 60 * 1000 + 2000);\n            const timeDiff = Math.abs(expectedExpiration.getMilliseconds() - expiration.getMilliseconds());\n\n            // verify that the token expiration is ~24 hours from now (with a 2 second buffer)\n            expect(timeDiff).to.be.lessThan(2000);\n        });\n    });\n\n    //TODO: finish this test, maybe convert back to component test?\n\n    // it('should initialise state with user and api token from local storage', () => {\n    //     let apiToken;\n    //     cy.loginAs(activeUser);\n\n    //     cy.waitForLocalStorage('apiToken').then((storedToken) => {\n    //         apiToken = storedToken;\n\n    //         // logout\n    //         cy.get('[aria-label=\"Account Management\"]').click();\n    //         cy.get('[data-cy=logout-menuitem]').click();\n\n    //         // verify logout\n    //         cy.window().then((win) => {\n    //             cy.contains('Please log in.').should('exist');\n    //             expect(win.localStorage.getItem('apiToken')).to.be.null;\n    //         });\n    //     });\n\n    //     cy.visit('/');\n    //     cy.waitForLocalStorage('arvadosStore').then((storedStore) => {\n    //         const store = JSON.parse(storedStore);\n    //         console.log(store);\n    //         const auth = store.auth;\n    //         console.log(JSON.stringify(auth.user));\n    //         expect(auth.user).to.deep.equal({\n    //             email: 'user@example.local',\n    //             firstName: 'Active',\n    //             lastName: 'User',\n    //             uuid: 'zzzzz-tpzed-wlme7goukc5495r',\n    //             ownerUuid: 'zzzzz-tpzed-000000000000000',\n    //             isAdmin: false,\n    //             isActive: true,\n    //             username: 'user',\n    //             canWrite: true,\n    //             canManage: true,\n    //             prefs: { profile: {} },\n    //         });\n    //     });\n    // });\n\n    // TODO: Add remaining action tests\n    /*\n       it('should fire external url to login', () => {\n       const initialState = undefined;\n       window.location.assign = jest.fn();\n       reducer(initialState, authActions.LOGIN());\n       expect(window.location.assign).toBeCalledWith(\n       `/login?return_to=${window.location.protocol}//${window.location.host}/token`\n       );\n       });\n\n       it('should fire external url to logout', () => {\n       const initialState = undefined;\n       window.location.assign = jest.fn();\n       reducer(initialState, authActions.LOGOUT());\n       expect(window.location.assign).toBeCalledWith(\n       `/logout?return_to=${location.protocol}//${location.host}`\n       );\n       });\n     */\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/auth-middleware.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe(\"AuthMiddleware\", () => {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser(\"admin\", \"Admin\", \"User\", true, true)\n            .as(\"adminUser\")\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser(\"user\", \"Active\", \"User\", false, true)\n            .as(\"activeUser\")\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it(\"handles LOGOUT action\", () => {\n        cy.loginAs(activeUser);\n        cy.waitForDom();\n        // verify that the token is stored in localStorage\n        cy.window().then(win => {\n            expect(win.localStorage.getItem('apiToken')).to.equal(activeUser.token);\n        });\n\n            // logout\n            cy.get('[aria-label=\"Account Management\"]').click();\n            cy.get('[data-cy=logout-menuitem]').click();\n\n            cy.window().then(win => {\n                // verify that logout has been successful\n                cy.contains(\"Please log in.\").should(\"exist\"); \n                expect(win.localStorage.getItem('apiToken')).to.be.null;\n            });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/banner-tooltip.cy.js",
    "content": "\n// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('Banner / tooltip tests', function () {\n    let adminUser;\n    let collectionUUID;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getAll('@adminUser').then(([adminUser]) => {\n            // This collection will not be deleted after each test, we'll\n            // clean it up manually.\n            cy.createCollection(adminUser.token, {\n                name: `BannerTooltipTest${Math.floor(Math.random() * 999999)}`,\n                owner_uuid: adminUser.user.uuid,\n            }, true).as('bannerCollection');\n        });\n\n        cy.getAll('@bannerCollection').then(function ([bannerCollection]) {\n            collectionUUID = bannerCollection.uuid;\n\n            cy.loginAs(adminUser);\n\n            cy.goToPath(`/collections/${bannerCollection.uuid}`);\n\n            cy.doMPVTabSelect(\"Files\");\n            cy.waitForDom();\n            cy.get('[data-cy=upload-button]').click();\n\n            cy.fixture('files/banner.html').as('banner');\n            cy.fixture('files/tooltips.txt').as('tooltips');\n\n            cy.getAll('@banner', '@tooltips').then(([banner, tooltips]) => {\n                cy.get('[data-cy=drag-and-drop]').upload(banner, 'banner.html', false);\n                cy.get('[data-cy=drag-and-drop]').upload(tooltips, 'tooltips.json', false);\n            });\n\n            cy.get('[data-cy=form-submit-btn]').click();\n            cy.get('[data-cy=form-submit-btn]').should('not.exist');\n            cy.get('[data-cy=collection-files-right-panel]')\n                .should('contain', 'banner.html');\n            cy.get('[data-cy=collection-files-right-panel]')\n                .should('contain', 'tooltips.json');\n        });\n    });\n\n    beforeEach(function () {\n        cy.on('uncaught:exception', (err, runnable, promise) => {\n            Cypress.log({ message: `Application Error: ${err}`});\n            if (promise) {\n                return false;\n            }\n        });\n\n        //login here instead of in specific tests to preserve localStorage and intercept listener\n        cy.loginAs(adminUser);\n\n        //must be in localstorage to have banner option in notifications menu\n        //it doesn't matter what the value is, as long as it's truthy\n        window.localStorage.setItem('bannerFileData', 'foo');\n\n        cy.intercept({ method: 'GET', url: '**/arvados/v1/config?nocache=*' }, (req) => {\n            req.on('response', (res) => {\n                res.body.Workbench.BannerUUID = collectionUUID;\n            });\n        });\n    });\n\n    after(function () {\n        // Delete banner collection after all test used it.\n        cy.deleteResource(adminUser.token, \"collections\", collectionUUID);\n    });\n\n    it('should re-show the banner', () => {\n        //reload instead of cy.loginAs() to preserve localStorage and intercept listener\n        //logged in as adminUser\n        cy.reload();\n        cy.waitForDom();\n\n        //check that banner appears on reload\n        cy.waitForDom().get('[data-cy=confirmation-dialog]', {timeout: 10000}).should('be.visible');\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n        cy.waitForDom().get('[data-cy=confirmation-dialog]', {timeout: 10000}).should('not.exist');\n\n        //check that banner appears on toggle\n        cy.get('[data-cy=\"notifications-menu\"]').click();\n        cy.get('li').contains('Restore Banner').click();\n\n        cy.waitForDom().get('[data-cy=confirmation-dialog-ok-btn]', {timeout: 10000}).should('be.visible');\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n        cy.waitForDom().get('[data-cy=confirmation-dialog]', {timeout: 10000}).should('not.exist');\n    });\n\n\n    it('should show tooltips and remove tooltips as localStorage key is present', () => {\n        //reload instead of cy.loginAs() to preserve localStorage and intercept listener\n        //logged in as adminUser\n        cy.reload();\n        cy.waitForDom();\n\n        //banner appears on reload\n        cy.waitForDom().get('[data-cy=confirmation-dialog]', {timeout: 10000}).should('be.visible');\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click({force: true});\n        cy.waitForDom().get('[data-cy=confirmation-dialog]', {timeout: 10000}).should('not.exist');\n\n        cy.contains('This allows you to navigate through the app').should('not.exist'); // This content comes from tooltips.txt\n        cy.get('[data-cy=side-panel-tree]').trigger('mouseover');\n        cy.get('[data-cy=side-panel-tree]').trigger('mouseenter');\n        cy.contains('This allows you to navigate through the app').should('be.visible');\n\n        cy.get('[data-cy=\"notifications-menu\"]').click();\n        cy.get('li').contains('Disable tooltips').click();\n\n        cy.contains('This allows you to navigate through the app').should('not.exist');\n        cy.get('[data-cy=side-panel-tree]').trigger('mouseover');\n        cy.get('[data-cy=side-panel-tree]').trigger('mouseenter');\n        cy.contains('This allows you to navigate through the app').should('not.exist');\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/collection.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nconst path = require(\"path\");\nrequire('cypress-plugin-tab')\n\ndescribe(\"Collection panel tests\", function () {\n    let activeUser;\n    let adminUser;\n    let downloadsFolder;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser(\"admin\", \"Admin\", \"User\", true, true)\n            .as(\"adminUser\")\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser(\"collectionuser1\", \"Collection\", \"User\", false, true)\n            .as(\"activeUser\")\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n        downloadsFolder = Cypress.config(\"downloadsFolder\");\n    });\n\n    it(\"allows to download mountain duck config for a collection\", () => {\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        })\n            .as(\"testCollection\")\n            .then(function (testCollection) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${testCollection.uuid}`);\n\n                cy.get('[data-title=\"Open with 3rd party client\"]').click();\n                cy.get(\"[data-cy=download-button\").click();\n\n                const filename = path.join(downloadsFolder, `${testCollection.name}.duck`);\n\n                cy.readFile(filename, { timeout: 15000 })\n                    .then(body => {\n                        const childrenCollection = Array.prototype.slice.call(Cypress.$(body).find(\"dict\")[0].children);\n                        const map = {};\n                        let i,\n                            j = 2;\n\n                        for (i = 0; i < childrenCollection.length; i += j) {\n                            map[childrenCollection[i].outerText] = childrenCollection[i + 1].outerText;\n                        }\n\n                        cy.get(\"#simple-tabpanel-0\")\n                            .find(\"a\")\n                            .then(a => {\n                                const [host, port] = a.text().split(\"@\")[1].split(\"/\")[0].split(\":\");\n                                expect(map[\"Protocol\"]).to.equal(\"davs\");\n                                expect(map[\"UUID\"]).to.equal(testCollection.uuid);\n                                expect(map[\"Username\"]).to.equal(activeUser.user.username);\n                                expect(map[\"Port\"]).to.equal(port);\n                                expect(map[\"Hostname\"]).to.equal(host);\n                                if (map[\"Path\"]) {\n                                    expect(map[\"Path\"]).to.equal(`/c=${testCollection.uuid}`);\n                                }\n                            });\n                    })\n                    .then(() => cy.task(\"clearDownload\", { filename }));\n            });\n    });\n\n    it(\"attempts to use a preexisting name creating or updating a collection\", function () {\n        const name = `Test collection ${Math.floor(Math.random() * 999999)}`;\n        cy.createCollection(adminUser.token, {\n            name: name,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        });\n        cy.loginAs(activeUser);\n        cy.goToPath(`/projects/${activeUser.user.uuid}`);\n        cy.get(\"[data-cy=breadcrumb-first]\").should(\"contain\", \"Projects\");\n        cy.get(\"[data-cy=breadcrumb-last]\").should(\"not.exist\");\n        // Attempt to create new collection with a duplicate name\n        cy.get(\"[data-cy=side-panel-button]\").click();\n        cy.get(\"[data-cy=side-panel-new-collection]\").click();\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"New collection\")\n            .within(() => {\n                cy.get(\"[data-cy=name-field]\").within(() => {\n                    cy.get(\"input\").type(name);\n                });\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n            });\n        // Error message should display, allowing editing the name\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"exist\")\n            .and(\"contain\", \"Collection with the same name already exists\")\n            .within(() => {\n                cy.get(\"[data-cy=name-field]\").within(() => {\n                    cy.get(\"input\").type(\" renamed\");\n                });\n                cy.get(\"[data-cy=form-submit-btn]\").click({timeout: 10000});\n            });\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n        // Attempt to rename the collection with the duplicate name\n        cy.get('[data-title=\"Edit collection\"]').click();\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"Edit Collection\")\n            .within(() => {\n                cy.get(\"[data-cy=name-field]\").within(() => {\n                    cy.get(\"input\").type(\"{selectall}{backspace}\").type(name);\n                });\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n            });\n        cy.get(\"[data-cy=form-dialog]\").should(\"exist\").and(\"contain\", \"Collection with the same name already exists\");\n    });\n\n    it(\"uses the property editor (from edit dialog) with vocabulary terms\", function () {\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        })\n            .as(\"testCollection\")\n            .then(function () {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.testCollection.uuid}`);\n\n                // Verify collection name\n                cy.get(\"[data-cy=collection-details-card\").should(\"contain\", this.testCollection.name);\n                // Open overview tab\n                cy.doMPVTabSelect(\"Overview\");\n                // Verify property not present\n                cy.get(\"[data-cy=resource-properties]\").should(\"not.contain\", \"Color: Magenta\");\n\n                cy.get('[data-title=\"Edit collection\"]').click();\n                cy.get(\"[data-cy=form-dialog]\").should(\"contain\", \"Properties\");\n\n                // Key: Color (IDTAGCOLORS) - Value: Magenta (IDVALCOLORS3)\n                cy.get(\"[data-cy=resource-properties-form]\").within(() => {\n                    cy.get(\"[data-cy=property-field-key]\").within(() => {\n                        cy.get(\"input\").type(\"Color\");\n                    });\n                    cy.get(\"[data-cy=property-field-value]\").click().within(() => {\n                        cy.get(\"input\").type(\"Magenta\");\n                    });\n                    cy.get(\"[data-cy=property-add-btn]\").click();\n                });\n                // Confirm proper vocabulary labels are displayed on the UI.\n                cy.get(\"[data-cy=form-dialog]\").should(\"contain\", \"Color: Magenta\");\n                cy.get(\"[data-cy=form-dialog]\").contains(\"Save\").click();\n                cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n                // Confirm proper vocabulary IDs were saved on the backend.\n                cy.doRequest(\"GET\", `/arvados/v1/collections/${this.testCollection.uuid}`)\n                    .its(\"body\")\n                    .as(\"collection\")\n                    .then(function () {\n                        expect(this.collection.properties.IDTAGCOLORS).to.deep.equal([\"IDVALCOLORS3\"]);\n                    });\n                // Confirm the property is displayed on the UI.\n                cy.get(\"[data-cy=resource-properties\").should(\"contain\", \"Color: Magenta\");\n            });\n    });\n\n    it(\"uses the editor (from details panel) with vocabulary terms\", function () {\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        })\n            .as(\"testCollection\")\n            .then(function () {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.testCollection.uuid}`);\n\n                // Verify collection name\n                cy.get(\"[data-cy=collection-details-card\")\n                    .should(\"contain\", this.testCollection.name);\n\n                // Open overview tab\n                cy.doMPVTabSelect(\"Overview\");\n\n                // Verify properties not present\n                cy.get(\"[data-cy=resource-properties]\")\n                    .and(\"not.contain\", \"Color: Magenta\")\n                    .and(\"not.contain\", \"Size: S\");\n                cy.get(\"[data-title='View details']\").click();\n\n                cy.get(\"[data-cy=details-panel]\").within(() => {\n                    cy.get(\"[data-cy=details-panel-edit-btn]\").click();\n                });\n                cy.get(\"[data-cy=form-dialog\").contains(\"Edit Collection\");\n\n                // Key: Color (IDTAGCOLORS) - Value: Magenta (IDVALCOLORS3)\n                cy.get(\"[data-cy=resource-properties-form]\").within(() => {\n                    cy.get(\"[data-cy=property-field-key]\").within(() => {\n                        cy.get(\"input\").type(\"Color\");\n                    });\n                    cy.get(\"[data-cy=property-field-value]\").click().within(() => {\n                        cy.get(\"input\").type(\"Magenta\");\n                    });\n                    cy.get(\"[data-cy=property-add-btn]\").click();\n                });\n                // Confirm proper vocabulary labels are displayed on the UI.\n                cy.get(\"[data-cy=form-dialog]\").should(\"contain\", \"Color: Magenta\");\n\n                // Case-insensitive on-blur auto-selection test\n                // Key: Size (IDTAGSIZES) - Value: Small (IDVALSIZES2)\n                cy.get(\"[data-cy=resource-properties-form]\").within(() => {\n                    cy.get(\"[data-cy=property-field-key]\").within(() => {\n                        cy.get(\"input\").type(\"sIzE\");\n                    });\n                    cy.get(\"[data-cy=property-field-value]\").click().within(() => {\n                        cy.get(\"input\").type(\"sMaLL{enter}\");\n                    });\n                    cy.get(\"[data-cy=property-add-btn]\").click();\n                    cy.waitForDom();\n                });\n                // Confirm proper vocabulary labels are displayed on the UI.\n                cy.get(\"[data-cy=form-dialog]\").should(\"contain\", \"Size: S\");\n\n                cy.get(\"[data-cy=form-dialog]\").contains(\"Save\").click();\n                cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n\n                // Confirm proper vocabulary IDs were saved on the backend.\n                cy.doRequest(\"GET\", `/arvados/v1/collections/${this.testCollection.uuid}`)\n                    .its(\"body\")\n                    .as(\"collection\")\n                    .then(function () {\n                        expect(this.collection.properties.IDTAGCOLORS).to.deep.equal([\"IDVALCOLORS3\"]);\n                        expect(this.collection.properties.IDTAGSIZES).to.deep.equal([\"IDVALSIZES2\"]);\n                    });\n\n                // Confirm properties display on the UI.\n                cy.get(\"[data-cy=resource-properties]\")\n                    .should(\"contain\", \"Color: Magenta\")\n                    .and(\"contain\", \"Size: S\");\n            });\n    });\n\n    it(\"shows collection by URL\", function () {\n        cy.loginAs(activeUser);\n        [true, false].map(function (isWritable) {\n            // Using different file names to avoid test flakyness: the second iteration\n            // on this loop may pass an assertion from the first iteration by looking\n            // for the same file name.\n            const fileName = isWritable ? \"bar\" : \"foo\";\n            const subDirName = \"subdir\";\n            cy.createGroup(adminUser.token, {\n                name: \"Shared project\",\n                group_class: \"project\",\n            })\n                .as(\"sharedGroup\")\n                .then(function () {\n                    // Creates the collection using the admin token so we can set up\n                    // a bogus manifest text without block signatures.\n                    cy.doRequest(\"GET\", \"/arvados/v1/config\", null, null)\n                        .its(\"body\")\n                        .should(clusterConfig => {\n                            expect(clusterConfig.Collections, \"clusterConfig\").to.have.property(\"TrustAllContent\", true);\n                            expect(clusterConfig.Services, \"clusterConfig\").to.have.property(\"WebDAV\").have.property(\"ExternalURL\");\n                            expect(clusterConfig.Services, \"clusterConfig\").to.have.property(\"WebDAVDownload\").have.property(\"ExternalURL\");\n                            const inlineUrl =\n                                clusterConfig.Services.WebDAV.ExternalURL !== \"\"\n                                    ? clusterConfig.Services.WebDAV.ExternalURL\n                                    : clusterConfig.Services.WebDAVDownload.ExternalURL;\n                            expect(inlineUrl).to.not.contain(\"*\");\n                        })\n                        .createCollection(adminUser.token, {\n                            name: \"Test collection\",\n                            owner_uuid: this.sharedGroup.uuid,\n                            properties: { someKey: \"someValue\" },\n                            manifest_text: `. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:${fileName}\\n./${subDirName} 37b51d194a7513e45b56f6524f2d51f2+3 0:3:${fileName}\\n`,\n                        })\n                        .as(\"testCollection\")\n                        .then(function () {\n                            // Share the group with active user.\n                            cy.createLink(adminUser.token, {\n                                name: isWritable ? \"can_write\" : \"can_read\",\n                                link_class: \"permission\",\n                                head_uuid: this.sharedGroup.uuid,\n                                tail_uuid: activeUser.user.uuid,\n                            });\n                            cy.goToPath(`/collections/${this.testCollection.uuid}`);\n\n                            // Verify collection name\n                            cy.get(\"[data-cy=collection-details-card]\")\n                                .should(\"contain\", this.testCollection.name);\n\n                            // Open overview tab\n                            cy.doMPVTabSelect(\"Overview\");\n\n                            // Verify collection uuid\n                            cy.get(\"[data-cy=details-element]\")\n                                .should(\"contain\", this.testCollection.uuid)\n                                .and(\"not.contain\", \"This is an old version\");\n\n                            // Check for the read-only icon\n                            cy.get(\"[data-cy=read-only-icon]\").should(`${isWritable ? \"not.\" : \"\"}exist`);\n                            // Check that both read and write operations are available on\n                            // the 'More options' menu.\n                            cy.get(\"[data-cy=collection-details-card]\").within(() => {\n                                cy.get(\"[data-targetid='Add to favorites']\");\n                                if (isWritable) {\n                                    cy.get(\"[data-targetid='Edit collection']\");\n                                } else {\n                                    cy.get(\"[data-targetid='Edit collection']\").should(\"not.exist\");\n                                }\n                            });\n                            cy.get(\"body\").click(); // Collapse the menu avoiding details panel expansion\n                            cy.get(\"[data-cy=resource-properties]\")\n                                .should(\"contain\", \"someKey: someValue\")\n                                .and(\"not.contain\", \"anotherKey: anotherValue\");\n                            // Check that the file listing show both read & write operations\n                            cy.waitForDom();\n                            cy.doMPVTabSelect(\"Files\");\n                            cy.get(\"[data-cy=collection-files-right-panel]\", { timeout: 5000 }).should(\"contain\", fileName);\n                            if (isWritable) {\n                                cy.get(\"[data-cy=upload-button]\").should(`${isWritable ? \"\" : \"not.\"}contain`, \"Upload data\");\n                            }\n                            // Test context menus\n                            cy.get(\"[data-cy=collection-files-panel]\").contains(fileName).rightclick();\n                            cy.get(\"[data-cy=context-menu]\")\n                                .should(\"contain\", \"Download\")\n                                .and(\"contain\", \"Open in new tab\")\n                                .and(\"contain\", \"Copy link to latest version\")\n                                .and(\"contain\", \"Copy link to immutable version\")\n                                .and(`${isWritable ? \"\" : \"not.\"}contain`, \"Rename\")\n                                .and(`${isWritable ? \"\" : \"not.\"}contain`, \"Remove\");\n                            cy.get(\"body\").click(); // Collapse the menu\n                            cy.get(\"[data-cy=collection-files-panel]\").contains(subDirName).rightclick();\n                            cy.get(\"[data-cy=context-menu]\")\n                                .should(\"not.contain\", \"Download\")\n                                .and(\"contain\", \"Open in new tab\")\n                                .and(\"contain\", \"Copy link to latest version\")\n                                .and(\"contain\", \"Copy link to immutable version\")\n                                .and(`${isWritable ? \"\" : \"not.\"}contain`, \"Rename\")\n                                .and(`${isWritable ? \"\" : \"not.\"}contain`, \"Remove\");\n                            cy.get(\"body\").click(); // Collapse the menu\n                            // File/dir item 'more options' button\n                            cy.get(\"[data-cy=file-item-options-btn\").first().click();\n                            cy.get(\"[data-cy=context-menu]\").should(`${isWritable ? \"\" : \"not.\"}contain`, \"Remove\");\n                            cy.get(\"body\").click(); // Collapse the menu\n                            // Hamburger 'more options' menu button\n                            cy.doCollectionPanelOptionsAction(\"Select all\");\n                            cy.get(\"[data-cy=collection-files-panel-options-btn]\").click();\n                            cy.get(\"[data-cy=context-menu]\").should(`${isWritable ? \"\" : \"not.\"}contain`, \"Remove selected\");\n                            cy.get(\"body\").click(); // Collapse the menu\n                        });\n                });\n        });\n    });\n\n    it(\"renames a file using valid names\", function () {\n        function eachPair(lst, func) {\n            for (var i = 0; i < lst.length - 1; i++) {\n                func(lst[i], lst[i + 1]);\n            }\n        }\n        // Creates the collection using the admin token so we can set up\n        // a bogus manifest text without block signatures.\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        })\n            .as(\"testCollection\")\n            .then(function () {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.testCollection.uuid}`);\n\n                const names = [\n                    \"bar\", // initial name already set\n                    \"&\",\n                    \"foo\",\n                    \"&amp;\",\n                    \"I ❤️ ⛵️\",\n                    \"...\",\n                    \"#..\",\n                    \"some name with whitespaces\",\n                    \"some name with #2\",\n                    \"is this name legal? I hope it is\",\n                    \"some_file.pdf#\",\n                    \"some_file.pdf?\",\n                    \"?some_file.pdf\",\n                    \"some%file.pdf\",\n                    \"some%2Ffile.pdf\",\n                    \"some%22file.pdf\",\n                    \"some%20file.pdf\",\n                    \"G%C3%BCnter's%20file.pdf\",\n                    \"table%&?*2\",\n                    \"bar\", // make sure we can go back to the original name as a last step\n                ];\n                cy.intercept({ method: \"PUT\", url: \"**/arvados/v1/collections/*\" }).as(\"renameRequest\");\n                cy.doMPVTabSelect(\"Files\");\n                eachPair(names, (from, to) => {\n                    cy.waitForDom().get(\"[data-cy=collection-files-panel]\").contains(`${from}`).rightclick();\n                    cy.get(\"[data-cy=context-menu]\").contains(\"Rename\").click();\n                    cy.get(\"[data-cy=form-dialog]\")\n                        .should(\"contain\", \"Rename\")\n                        .within(() => {\n                            cy.get(\"input\").type(\"{selectall}{backspace}\").type(to, { parseSpecialCharSequences: false });\n                        });\n                    cy.get(\"[data-cy=form-submit-btn]\").click();\n                    cy.wait(\"@renameRequest\");\n                    cy.get(\"[data-cy=collection-files-panel]\").should(\"not.contain\", `${from}`).and(\"contain\", `${to}`);\n                });\n            });\n    });\n\n    it(\"renames a file to a different directory\", function () {\n        // Creates the collection using the admin token so we can set up\n        // a bogus manifest text without block signatures.\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        })\n            .as(\"testCollection\")\n            .then(function () {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.testCollection.uuid}`);\n\n                cy.doMPVTabSelect(\"Files\");\n                [\"subdir\", \"G%C3%BCnter's%20file\", \"table%&?*2\"].forEach(subdir => {\n                    cy.waitForDom().get(\"[data-cy=collection-files-panel]\").contains(\"bar\").rightclick();\n                    cy.get(\"[data-cy=context-menu]\").contains(\"Rename\").click();\n                    cy.get(\"[data-cy=form-dialog]\")\n                        .should(\"contain\", \"Rename\")\n                        .within(() => {\n                            cy.get(\"input\").type(`{selectall}{backspace}${subdir}/foo`);\n                        });\n                    cy.get(\"[data-cy=form-submit-btn]\").click();\n                    cy.waitForDom();\n                    cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n                    cy.get(\"[data-cy=collection-files-panel]\").should(\"not.contain\", \"bar\").and(\"contain\", subdir);\n                    cy.get(\"[data-cy=collection-files-panel]\").contains(subdir).click();\n\n                    // Rename 'subdir/foo' to 'bar'\n                    cy.waitForDom();\n                    cy.get(\"[data-cy=collection-files-panel]\").contains(\"foo\").rightclick();\n                    cy.get(\"[data-cy=context-menu]\").contains(\"Rename\").click();\n                    cy.get(\"[data-cy=form-dialog]\")\n                        .should(\"contain\", \"Rename\")\n                        .within(() => {\n                            cy.get(\"input\").should(\"have.value\", `${subdir}/foo`).type(`{selectall}{backspace}bar`);\n                        });\n                    cy.get(\"[data-cy=form-submit-btn]\").click({ force: true });\n\n                    // need to wait for dialog to dismiss\n                    cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n\n                    cy.waitForDom().get(\"[data-cy=collection-files-panel]\").contains(\"Home\").click();\n\n                    cy.waitForDom();\n                    cy.get(\"[data-cy=collection-files-panel]\").contains(subdir).click();\n                    cy.get(\"[data-cy=collection-files-panel]\")\n                        .should(\"contain\", subdir) // empty dir kept\n                        .and(\"contain\", \"bar\");\n\n                    // this is when the dom is actually finished loading\n                    cy.get(\"[data-cy=file-item-options-btn]\", { timeout: 20000 }).first().should('exist')\n\n                    cy.get(\"[data-cy=collection-files-panel-content]\").contains(subdir).rightclick();\n                    cy.get(\"[data-cy=context-menu]\").contains(\"Remove\").click();\n                    cy.get(\"[data-cy=confirmation-dialog-ok-btn]\").click();\n                    cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n                });\n            });\n    });\n\n    it(\"shows collection owner\", () => {\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        })\n            .as(\"testCollection\")\n            .then(testCollection => {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${testCollection.uuid}`);\n                cy.wait(5000);\n                cy.doMPVTabSelect(\"Overview\");\n                cy.get(\"[data-cy=details-element]\").contains(`Collection User`);\n            });\n    });\n\n    it(\"tries to rename a file with illegal names\", function () {\n        // Creates the collection using the admin token so we can set up\n        // a bogus manifest text without block signatures.\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        })\n            .as(\"testCollection\")\n            .then(function () {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.testCollection.uuid}`);\n\n                const illegalNamesFromUI = [\n                    [\".\", \"Name cannot be '.' or '..'\"],\n                    [\"..\", \"Name cannot be '.' or '..'\"],\n                    [\"\", \"This field is required\"],\n                    [\" \", \"Leading/trailing whitespaces not allowed\"],\n                    [\" foo\", \"Leading/trailing whitespaces not allowed\"],\n                    [\"foo \", \"Leading/trailing whitespaces not allowed\"],\n                    [\"//foo\", \"Empty dir name not allowed\"],\n                ];\n                cy.doMPVTabSelect(\"Files\");\n                illegalNamesFromUI.forEach(([name, errMsg]) => {\n                    cy.get(\"[data-cy=collection-files-panel]\").contains(\"bar\").rightclick();\n                    cy.get(\"[data-cy=context-menu]\").contains(\"Rename\").click();\n                    cy.get(\"[data-cy=form-dialog]\")\n                        .should(\"contain\", \"Rename\")\n                        .within(() => {\n                            cy.get(\"input\").type(`{selectall}{backspace}${name}`);\n                        });\n                    cy.get(\"[data-cy=form-dialog]\")\n                        .should(\"contain\", \"Rename\")\n                        .within(() => {\n                            cy.contains(`${errMsg}`);\n                        });\n                    cy.get(\"[data-cy=form-cancel-btn]\").click();\n                });\n            });\n    });\n\n    it(\"can correctly display old versions\", function () {\n        const colName = `Versioned Collection ${Math.floor(Math.random() * 999999)}`;\n        let colUuid = \"\";\n        let oldVersionUuid = \"\";\n        // Make sure no other collections with this name exist\n        cy.doRequest(\"GET\", \"/arvados/v1/collections\", null, {\n            filters: `[[\"name\", \"=\", \"${colName}\"]]`,\n            include_old_versions: true,\n        })\n            .its(\"body.items\")\n            .as(\"collections\")\n            .then(function () {\n                expect(this.collections).to.be.empty;\n            });\n        // Creates the collection using the admin token so we can set up\n        // a bogus manifest text without block signatures.\n        cy.createCollection(adminUser.token, {\n            name: colName,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        })\n            .as(\"originalVersion\")\n            .then(function () {\n                // Change the file name to create a new version.\n                cy.updateCollection(adminUser.token, this.originalVersion.uuid, {\n                    manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo\\n\",\n                });\n                colUuid = this.originalVersion.uuid;\n            });\n        // Confirm that there are 2 versions of the collection\n        cy.doRequest(\"GET\", \"/arvados/v1/collections\", null, {\n            filters: `[[\"name\", \"=\", \"${colName}\"]]`,\n            include_old_versions: true,\n        })\n            .its(\"body.items\")\n            .as(\"collections\")\n            .then(function () {\n                expect(this.collections).to.have.lengthOf(2);\n                this.collections.map(function (aCollection) {\n                    expect(aCollection.current_version_uuid).to.equal(colUuid);\n                    if (aCollection.uuid !== aCollection.current_version_uuid) {\n                        oldVersionUuid = aCollection.uuid;\n                    }\n                });\n                // Check the old version displays as what it is.\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${oldVersionUuid}`);\n                cy.doMPVTabSelect(\"Overview\");\n\n                cy.get(\"[data-cy=details-element]\").should(\"contain\", \"This is an old version\");\n                cy.get(\"[data-cy=read-only-icon]\").should(\"exist\");\n                cy.get(\"[data-cy=collection-details-card]\").should(\"contain\", colName);\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").should(\"contain\", \"bar\");\n            });\n    });\n\n    it(\"views & edits storage classes data\", function () {\n        const colName = `Test Collection ${Math.floor(Math.random() * 999999)}`;\n        cy.createCollection(adminUser.token, {\n            name: colName,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:some-file\\n\",\n        })\n            .as(\"collection\")\n            .then(function () {\n                expect(this.collection.storage_classes_desired).to.deep.equal([\"default\"]);\n\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.collection.uuid}`);\n                cy.doMPVTabSelect(\"Overview\");\n\n                // Initial check: it should show the 'default' storage class\n                cy.get(\"[data-cy=details-element]\")\n                    .should(\"contain\", \"Storage classes\")\n                    .and(\"contain\", \"default\")\n                    .and(\"not.contain\", \"foo\")\n                    .and(\"not.contain\", \"bar\");\n                // Edit collection: add storage class 'foo'\n                cy.get('[data-title=\"Edit collection\"]').click();\n                cy.get(\"[data-cy=form-dialog]\")\n                    .should(\"contain\", \"Edit Collection\")\n                    .and(\"contain\", \"Storage classes\")\n                    .and(\"contain\", \"default\")\n                    .and(\"contain\", \"foo\")\n                    .and(\"contain\", \"bar\")\n                    .within(() => {\n                        cy.get(\"[data-cy=checkbox-foo]\").click();\n                    });\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n                cy.get(\"[data-cy=details-element]\").should(\"contain\", \"default\").and(\"contain\", \"foo\").and(\"not.contain\", \"bar\");\n                cy.doRequest(\"GET\", `/arvados/v1/collections/${this.collection.uuid}`)\n                    .its(\"body\")\n                    .as(\"updatedCollection\")\n                    .then(function () {\n                        expect(this.updatedCollection.storage_classes_desired).to.deep.equal([\"default\", \"foo\"]);\n                    });\n                // Edit collection: remove storage class 'default'\n                cy.get('[data-title=\"Edit collection\"]').click();\n                cy.get(\"[data-cy=form-dialog]\")\n                    .should(\"contain\", \"Edit Collection\")\n                    .and(\"contain\", \"Storage classes\")\n                    .and(\"contain\", \"default\")\n                    .and(\"contain\", \"foo\")\n                    .and(\"contain\", \"bar\")\n                    .within(() => {\n                        cy.get(\"[data-cy=checkbox-default]\").click();\n                    });\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n                cy.get(\"[data-cy=details-element]\").should(\"not.contain\", \"default\").and(\"contain\", \"foo\").and(\"not.contain\", \"bar\");\n                cy.doRequest(\"GET\", `/arvados/v1/collections/${this.collection.uuid}`)\n                    .its(\"body\")\n                    .as(\"updatedCollection\")\n                    .then(function () {\n                        expect(this.updatedCollection.storage_classes_desired).to.deep.equal([\"foo\"]);\n                    });\n            });\n    });\n\n    it(\"moves a collection to a different project\", function () {\n        const collName = `Test Collection ${Math.floor(Math.random() * 999999)}`;\n        const projName = `Test Project ${Math.floor(Math.random() * 999999)}`;\n        const fileName = `Test_File_${Math.floor(Math.random() * 999999)}`;\n\n        cy.createCollection(adminUser.token, {\n            name: collName,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: `. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:${fileName}\\n`,\n        }).as(\"testCollection\");\n        cy.createGroup(adminUser.token, {\n            name: projName,\n            group_class: \"project\",\n            owner_uuid: activeUser.user.uuid,\n        }).as(\"testProject\");\n\n        cy.getAll(\"@testCollection\", \"@testProject\").then(function ([testCollection, testProject]) {\n            cy.loginAs(activeUser);\n            cy.goToPath(`/collections/${testCollection.uuid}`);\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\").should(\"contain\", fileName);\n            cy.get(\"[data-cy=collection-details-card]\").should(\"not.contain\", projName).and(\"not.contain\", testProject.uuid);\n            cy.get('[data-title=\"Move to\"]').click();\n            cy.get(\"[data-cy=form-dialog]\")\n                .should(\"contain\", \"Move to\")\n                .within(() => {\n                    // must use .then to avoid selecting instead of expanding https://github.com/cypress-io/cypress/issues/5529\n                    cy.get(\"[data-cy=projects-tree-home-tree-picker]\")\n                        .find(\"i\")\n                        .then(el => el.click());\n                    cy.get(\"[data-cy=projects-tree-home-tree-picker]\").contains(projName).click();\n                });\n            cy.get(\"[data-cy=form-submit-btn]\").click();\n            cy.get(\"[data-cy=snackbar]\").contains(\"Collection has been moved\");\n            cy.get(\"button\").contains(projName);\n            // Double check that the collection is in the project\n            cy.goToPath(`/projects/${testProject.uuid}`);\n            cy.doMPVTabSelect(\"Data\");\n            cy.waitForDom().get(\"[data-cy=project-panel]\").should(\"contain\", collName);\n        });\n    });\n\n    it(\"automatically updates the collection UI contents without using the Refresh button\", function () {\n        const collName = `Test Collection ${Math.floor(Math.random() * 999999)}`;\n        cy.clock();\n\n        cy.createCollection(adminUser.token, {\n            name: collName,\n            owner_uuid: activeUser.user.uuid,\n        }).as(\"testCollection\");\n\n        cy.getAll(\"@testCollection\").then(function ([testCollection]) {\n            cy.loginAs(activeUser);\n\n            const files = [\"foobar\", \"anotherFile\", \"\", \"finalName\"];\n\n            cy.goToPath(`/collections/${testCollection.uuid}`);\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\").should(\"contain\", \"This collection is empty\");\n            cy.get(\"[data-cy=collection-files-panel]\").should(\"not.contain\", files[0]);\n            cy.get(\"[data-cy=collection-details-card]\").should(\"contain\", collName);\n\n            files.map((fileName, i, files) => {\n                cy.updateCollection(adminUser.token, testCollection.uuid, {\n                    name: `${collName + \" updated\"}`,\n                    manifest_text: fileName ? `. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:${fileName}\\n` : \"\",\n                }).as(\"updatedCollection\");\n\n                // Fast forward 15 seconds for the websocket throttle\n                cy.tick(15000);\n\n                cy.getAll(\"@updatedCollection\").then(function ([updatedCollection]) {\n                    expect(updatedCollection.name).to.equal(`${collName + \" updated\"}`);\n                    cy.get(\"[data-cy=collection-details-card]\").should(\"contain\", updatedCollection.name);\n                    fileName\n                        ? cy.get(\"[data-cy=collection-files-panel]\").should(\"contain\", fileName)\n                        : cy.get(\"[data-cy=collection-files-panel]\").should(\"not.contain\", files[i - 1]);\n                });\n            });\n        });\n    });\n\n    it(\"makes a copy of an existing collection\", function () {\n        const collName = `Test Collection ${Math.floor(Math.random() * 999999)}`;\n        const copyName = `Copy of: ${collName}`;\n\n        cy.createCollection(adminUser.token, {\n            name: collName,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:some-file\\n\",\n        })\n            .as(\"collection\")\n            .then(function () {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.collection.uuid}`);\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").should(\"contain\", \"some-file\");\n                cy.get('[data-title=\"Make a copy\"]').click();\n                cy.get(\"[data-cy=form-dialog]\")\n                    .should(\"contain\", \"Make a copy\")\n                    .within(() => {\n                        cy.get(\"[data-cy=projects-tree-home-tree-picker]\").contains(\"Projects\").click();\n                        cy.get(\"[data-cy=form-submit-btn]\").click();\n                    });\n                cy.get(\"[data-cy=snackbar]\").contains(\"Collection has been copied.\");\n                cy.get(\"[data-cy=tree-li]\").contains(\"Home Projects\").click();\n                cy.doMPVTabSelect(\"Data\");\n                cy.get(\"[data-cy=project-panel]\").contains(copyName).click();\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").should(\"contain\", \"some-file\");\n            });\n    });\n\n    it(\"uses the collection version browser to view a previous version\", function () {\n        const colName = `Test Collection ${Math.floor(Math.random() * 999999)}`;\n\n        // Creates the collection using the admin token so we can set up\n        // a bogus manifest text without block signatures.\n        cy.createCollection(adminUser.token, {\n            name: colName,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 0:3:bar\\n\",\n        })\n            .as(\"collection\")\n            .then(function () {\n                // Visit collection, check basic information\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.collection.uuid}`);\n                cy.doMPVTabSelect(\"Overview\");\n\n                cy.get(\"[data-cy=details-element]\").should(\"not.contain\", \"This is an old version\");\n                cy.get(\"[data-cy=read-only-icon]\").should(\"not.exist\");\n                cy.get(\"[data-cy=collection-version-number]\").should(\"contain\", \"1\");\n                cy.get(\"[data-cy=collection-details-card]\").should(\"contain\", colName);\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").should(\"contain\", \"foo\").and(\"contain\", \"bar\");\n\n                // Modify collection, expect version number change\n                cy.get(\"[data-cy=collection-files-panel]\").contains(\"foo\").rightclick();\n                cy.get(\"[data-cy=context-menu]\").contains(\"Remove\").click();\n                cy.get(\"[data-cy=confirmation-dialog]\").should(\"contain\", \"Removing file\");\n                cy.get(\"[data-cy=confirmation-dialog-ok-btn]\").click();\n                cy.get(\"[data-cy=collection-files-panel]\").should(\"not.contain\", \"foo\").and(\"contain\", \"bar\");\n                cy.doMPVTabSelect(\"Overview\");\n                cy.get(\"[data-cy=collection-version-number]\").should(\"contain\", \"2\");\n\n                // Click on version number, check version browser. Click on past version.\n                cy.get(\"[data-cy=collection-version-browser]\").should(\"not.exist\");\n                cy.get(\"[data-cy=collection-version-number]\").contains(\"2\").click();\n                cy.get(\"[data-cy=collection-version-browser]\")\n                    .should(\"contain\", \"Nr\")\n                    .and(\"contain\", \"Size\")\n                    .and(\"contain\", \"Date\")\n                    .within(() => {\n                        // Version 1: 6 bytes in size\n                        cy.get(\"[data-cy=collection-version-browser-select-1]\")\n                            .should(\"contain\", \"1\")\n                            .and(\"contain\", \"6 B\")\n                            .and(\"contain\", adminUser.user.full_name);\n                        // Version 2: 3 bytes in size (one file removed)\n                        cy.get(\"[data-cy=collection-version-browser-select-2]\")\n                            .should(\"contain\", \"2\")\n                            .and(\"contain\", \"3 B\")\n                            .and(\"contain\", activeUser.user.full_name);\n                        cy.get(\"[data-cy=collection-version-browser-select-3]\").should(\"not.exist\");\n                        cy.get(\"[data-cy=collection-version-browser-select-1]\").click();\n                    });\n                // Navigate back to overview tab\n                cy.doMPVTabSelect(\"Overview\");\n                cy.get(\"[data-cy=details-element]\").should(\"contain\", \"This is an old version\");\n                cy.get(\"[data-cy=read-only-icon]\").should(\"exist\");\n                cy.get(\"[data-cy=collection-version-number]\").should(\"contain\", \"1\");\n                cy.get(\"[data-cy=collection-details-card]\").should(\"contain\", colName);\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").should(\"contain\", \"foo\").and(\"contain\", \"bar\");\n\n                // Check that only old collection action are available on toolbar\n                cy.get('[data-title=\"Restore version\"]').should('exist');\n                cy.get('[data-title=\"Add to favorites\"]').should('not.exist');\n\n                // Click on \"head version\" link, confirm that it's the latest version.\n                cy.doMPVTabSelect(\"Overview\");;\n                cy.get(\"[data-cy=details-element]\").contains(\"head version\").click();\n                // Navigate back to overview after changing versions\n                cy.doMPVTabSelect(\"Overview\");\n                cy.get(\"[data-cy=details-element]\").should(\"not.contain\", \"This is an old version\");\n                cy.get(\"[data-cy=read-only-icon]\").should(\"not.exist\");\n                cy.get(\"[data-cy=collection-version-number]\").should(\"contain\", \"2\");\n                cy.get(\"[data-cy=collection-details-card]\").should(\"contain\", colName);\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").should(\"not.contain\", \"foo\").and(\"contain\", \"bar\");\n\n                // Check that old collection action isn't available on context menu\n                cy.get('[data-title=\"Restore version\"]').should('not.exist');\n\n                // Make another change, confirm new version.\n                cy.get('[data-title=\"Edit collection\"]').click();\n                cy.get(\"[data-cy=form-dialog]\")\n                    .should(\"contain\", \"Edit Collection\")\n                    .within(() => {\n                        // appends some text\n                        cy.get(\"input\").first().type(\" renamed\");\n                    });\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n                cy.doMPVTabSelect(\"Overview\");;\n                cy.get(\"[data-cy=details-element]\").should(\"not.contain\", \"This is an old version\");\n                cy.get(\"[data-cy=read-only-icon]\").should(\"not.exist\");\n                cy.get(\"[data-cy=collection-version-number]\").should(\"contain\", \"3\");\n                cy.get(\"[data-cy=collection-details-card]\").should(\"contain\", colName + \" renamed\");\n                cy.get(\"[data-cy=collection-version-browser-select-3]\").should(\"contain\", \"3\").and(\"contain\", \"3 B\");\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").should(\"not.contain\", \"foo\").and(\"contain\", \"bar\");\n\n                // Check context menus on version browser\n                cy.waitForDom();\n                cy.waitForDom();\n                cy.get(\"[data-cy=collection-version-browser-select-3]\").rightclick();\n                cy.get(\"[data-cy=context-menu]\")\n                    .should(\"contain\", \"Add to favorites\")\n                    .and(\"contain\", \"Make a copy\")\n                    .and(\"contain\", \"Edit collection\");\n                cy.get(\"body\").click();\n                // (and now an old version...)\n                cy.get(\"[data-cy=collection-version-browser-select-1]\").rightclick();\n                cy.get(\"[data-cy=context-menu]\")\n                    .should(\"not.contain\", \"Add to favorites\")\n                    .and(\"contain\", \"Make a copy\")\n                    .and(\"not.contain\", \"Edit collection\");\n                cy.get(\"body\").click();\n\n\n                // Restore first version\n                cy.get(\"[data-cy=collection-version-browser]\").within(() => {\n                    cy.get(\"[data-cy=collection-version-browser-select-1]\").click();\n                });\n                cy.get('[data-title=\"Restore version\"]').click();\n                cy.get(\"[data-cy=confirmation-dialog]\").should(\"contain\", \"Restore version\");\n                cy.get(\"[data-cy=confirmation-dialog-ok-btn]\").click();\n                // Navigate back to overview after changing versions\n                cy.doMPVTabSelect(\"Overview\");\n                cy.get(\"[data-cy=details-element]\").should(\"not.contain\", \"This is an old version\");\n                cy.get(\"[data-cy=collection-version-number]\").should(\"contain\", \"4\");\n                cy.get(\"[data-cy=collection-details-card]\").should(\"contain\", colName);\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").should(\"contain\", \"foo\").and(\"contain\", \"bar\");\n            });\n    });\n\n    it(\"copies selected files into new collection\", () => {\n        cy.createCollection(adminUser.token, {\n            name: `Test Collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 0:3:bar\\n\",\n        })\n            .as(\"collection\")\n            .then(function () {\n                // Visit collection, check basic information\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.collection.uuid}`);\n\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").within(() => {\n                    cy.get(\"input[type=checkbox]\").first().click();\n                });\n\n                cy.get(\"[data-cy=collection-files-panel-options-btn]\").click();\n                cy.get(\"[data-cy=context-menu]\").contains(\"Copy selected into new collection\").click();\n\n                cy.get(\"[data-cy=form-dialog]\").contains(\"Projects\").click();\n\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n\n                cy.waitForDom().get(\".layout-pane-primary\", { timeout: 12000 }).contains(\"Projects\").click();\n\n                cy.doMPVTabSelect(\"Data\");\n                cy.waitForDom().get(\"main\").contains(`Files extracted from: ${this.collection.name}`).click();\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").and(\"contain\", \"bar\");\n            });\n    });\n\n    it(\"copies selected files into existing collection\", () => {\n        cy.createCollection(adminUser.token, {\n            name: `Test Collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 0:3:bar\\n\",\n        }).as(\"sourceCollection\");\n\n        cy.createCollection(adminUser.token, {\n            name: `Destination Collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \"\",\n        }).as(\"destinationCollection\");\n\n        cy.getAll(\"@sourceCollection\", \"@destinationCollection\").then(function ([sourceCollection, destinationCollection]) {\n            // Visit collection, check basic information\n            cy.loginAs(activeUser);\n            cy.goToPath(`/collections/${sourceCollection.uuid}`);\n\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\").within(() => {\n                cy.get(\"input[type=checkbox]\").first().click();\n            });\n\n            cy.get(\"[data-cy=collection-files-panel-options-btn]\").click();\n            cy.get(\"[data-cy=context-menu]\").contains(\"Copy selected into existing collection\").click();\n\n            cy.get(\"[data-cy=form-dialog]\").contains(destinationCollection.name).click();\n\n            cy.get(\"[data-cy=form-submit-btn]\").click();\n            cy.wait(2000);\n\n            cy.goToPath(`/collections/${destinationCollection.uuid}`);\n\n            cy.doMPVTabSelect(\"Overview\");;\n            cy.get(\"main\").contains(destinationCollection.name).should(\"exist\");\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\").and(\"contain\", \"bar\");\n        });\n    });\n\n    it(\"copies selected files into separate collections\", () => {\n        cy.createCollection(adminUser.token, {\n            name: `Test Collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 0:3:bar\\n\",\n        }).as(\"sourceCollection\");\n\n        cy.getAll(\"@sourceCollection\").then(function ([sourceCollection]) {\n            // Visit collection, check basic information\n            cy.loginAs(activeUser);\n            cy.goToPath(`/collections/${sourceCollection.uuid}`);\n\n            // Select both files\n            cy.waitForDom()\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\")\n                .within(() => {\n                    cy.get(\"input[type=checkbox]\").first().click();\n                    cy.get(\"input[type=checkbox]\").last().click();\n                });\n\n            // Copy to separate collections\n            cy.get(\"[data-cy=collection-files-panel-options-btn]\").click();\n            cy.get(\"[data-cy=context-menu]\").contains(\"Copy selected into separate collections\").click();\n            cy.get(\"[data-cy=form-dialog]\").contains(\"Projects\").click();\n            cy.get(\"[data-cy=form-submit-btn]\").click();\n\n            // Verify created collections\n            cy.waitForDom().get(\".layout-pane-primary\", { timeout: 12000 }).contains(\"Projects\").click();\n            cy.doMPVTabSelect(\"Data\");\n            cy.get(\"main\").contains(`File copied from collection ${sourceCollection.name}/foo`).click();\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\").and(\"contain\", \"foo\");\n            cy.get(\".layout-pane-primary\").contains(\"Projects\").click();\n            cy.doMPVTabSelect(\"Data\");\n            cy.get(\"main\").contains(`File copied from collection ${sourceCollection.name}/bar`).click();\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\").and(\"contain\", \"bar\");\n\n            // Verify separate collection menu items not present when single file selected\n            // Wait for dom for collection to re-render\n            cy.waitForDom()\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\")\n                .within(() => {\n                    cy.get(\"input[type=checkbox]\").first().click();\n                });\n            cy.get(\"[data-cy=collection-files-panel-options-btn]\").click();\n            cy.get(\"[data-cy=context-menu]\").should(\"not.contain\", \"Copy selected into separate collections\");\n            cy.get(\"[data-cy=context-menu]\").should(\"not.contain\", \"Move selected into separate collections\");\n        });\n    });\n\n    it(\"moves selected files into new collection\", () => {\n        cy.createCollection(adminUser.token, {\n            name: `Test Collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 0:3:bar\\n\",\n        })\n            .as(\"collection\")\n            .then(function () {\n                // Visit collection, check basic information\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.collection.uuid}`);\n\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").within(() => {\n                    cy.get(\"input[type=checkbox]\").first().click();\n                });\n\n                cy.get(\"[data-cy=collection-files-panel-options-btn]\").click();\n                cy.get(\"[data-cy=context-menu]\").contains(\"Move selected into new collection\").click();\n\n                cy.get(\"[data-cy=form-dialog]\").contains(\"Projects\").click();\n\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n\n                cy.waitForDom().get(\".layout-pane-primary\", { timeout: 12000 }).contains(\"Projects\").click();\n\n                cy.doMPVTabSelect(\"Data\");\n                cy.get(\"main\").contains(`Files moved from: ${this.collection.name}`).click();\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=collection-files-panel]\").and(\"contain\", \"bar\");\n            });\n    });\n\n    it(\"moves selected files into existing collection\", () => {\n        cy.createCollection(adminUser.token, {\n            name: `Test Collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 0:3:bar\\n\",\n        }).as(\"sourceCollection\");\n\n        cy.createCollection(adminUser.token, {\n            name: `Destination Collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \"\",\n        }).as(\"destinationCollection\");\n\n        cy.getAll(\"@sourceCollection\", \"@destinationCollection\").then(function ([sourceCollection, destinationCollection]) {\n            // Visit collection, check basic information\n            cy.loginAs(activeUser);\n            cy.goToPath(`/collections/${sourceCollection.uuid}`);\n\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\").within(() => {\n                cy.get(\"input[type=checkbox]\").first().click();\n            });\n\n            cy.get(\"[data-cy=collection-files-panel-options-btn]\").click();\n            cy.get(\"[data-cy=context-menu]\").contains(\"Move selected into existing collection\").click();\n\n            cy.get(\"[data-cy=form-dialog]\").contains(destinationCollection.name).click();\n\n            cy.get(\"[data-cy=form-submit-btn]\").click();\n            cy.wait(2000);\n\n            cy.goToPath(`/collections/${destinationCollection.uuid}`);\n\n            cy.get(\"main\").contains(destinationCollection.name).should(\"exist\");\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\").and(\"contain\", \"bar\");\n        });\n    });\n\n    it(\"moves selected files into separate collections\", () => {\n        cy.createCollection(adminUser.token, {\n            name: `Test Collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo 0:3:bar\\n\",\n        }).as(\"sourceCollection\");\n\n        cy.getAll(\"@sourceCollection\").then(function ([sourceCollection]) {\n            // Visit collection, check basic information\n            cy.loginAs(activeUser);\n            cy.goToPath(`/collections/${sourceCollection.uuid}`);\n            cy.doMPVTabSelect(\"Files\");\n\n            // Select both files\n            cy.get(\"[data-cy=collection-files-panel]\").within(() => {\n                cy.get(\"input[type=checkbox]\").first().click();\n                cy.get(\"input[type=checkbox]\").last().click();\n            });\n\n            // Copy to separate collections\n            cy.get(\"[data-cy=collection-files-panel-options-btn]\").click();\n            cy.get(\"[data-cy=context-menu]\").contains(\"Move selected into separate collections\").click();\n            cy.get(\"[data-cy=form-dialog]\").contains(\"Projects\").click();\n            cy.get(\"[data-cy=form-submit-btn]\").click();\n            cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\", { timeout: 10000 });\n\n            // Verify created collections\n            cy.waitForDom().get(\".layout-pane-primary\", { timeout: 12000 }).contains(\"Projects\").click();\n            cy.doMPVTabSelect(\"Data\");\n            cy.get(\"main\").contains(`File moved from collection ${sourceCollection.name}/foo`).click();\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\").and(\"contain\", \"foo\");\n            cy.get(\".layout-pane-primary\").contains(\"Projects\").click();\n            cy.doMPVTabSelect(\"Data\");\n            cy.get(\"main\").contains(`File moved from collection ${sourceCollection.name}/bar`).click();\n            cy.doMPVTabSelect(\"Files\");\n            cy.get(\"[data-cy=collection-files-panel]\").and(\"contain\", \"bar\");\n        });\n    });\n\n    it(\"creates new collection with properties on home project\", function () {\n        cy.loginAs(activeUser);\n        cy.goToPath(`/projects/${activeUser.user.uuid}`);\n        cy.get(\"[data-cy=breadcrumb-first]\").should(\"contain\", \"Projects\");\n        cy.get(\"[data-cy=breadcrumb-last]\").should(\"not.exist\");\n        // Create new collection\n        cy.get(\"[data-cy=side-panel-button]\").click();\n        cy.get(\"[data-cy=side-panel-new-collection]\").click();\n        // Name between brackets tests bugfix #17582\n        const collName = `[Test collection (${Math.floor(999999 * Math.random())})]`;\n\n        // Select a storage class.\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"New collection\")\n            .and(\"contain\", \"Storage classes\")\n            .and(\"contain\", \"default\")\n            .and(\"contain\", \"foo\")\n            .and(\"contain\", \"bar\")\n            .within(() => {\n                cy.get(\"[data-cy=parent-field]\").within(() => {\n                    cy.get(\"input\").should(\"have.value\", \"Home project\");\n                });\n                cy.get(\"[data-cy=name-field]\").within(() => {\n                    cy.get(\"input\").type(collName);\n                });\n                cy.get(\"[data-cy=checkbox-foo]\").click();\n            });\n\n        // Add a property.\n        // Key: Color (IDTAGCOLORS) - Value: Magenta (IDVALCOLORS3)\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.contain\", \"Color: Magenta\");\n        cy.get(\"[data-cy=resource-properties-form]\").within(() => {\n            cy.get(\"[data-cy=property-field-key]\").within(() => {\n                cy.get(\"input\").type(\"Color\");\n            });\n            cy.get(\"[data-cy=property-field-value]\").click().within(() => {\n                cy.get(\"input\").type(\"Magenta\");\n            });\n            cy.get(\"[data-cy=property-add-btn]\").click();\n        });\n        // Confirm proper vocabulary labels are displayed on the UI.\n        cy.get(\"[data-cy=form-dialog]\").should(\"contain\", \"Color: Magenta\");\n\n        // Value field should not complain about being required just after\n        // adding a new property. See #19732\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.contain\", \"This field is required\");\n\n        cy.get(\"[data-cy=form-submit-btn]\").click();\n        // Confirm that the user was taken to the newly created collection\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n        cy.get(\"[data-cy=breadcrumb-first]\").should(\"contain\", \"Projects\");\n        cy.waitForDom();\n        cy.get(\"[data-cy=breadcrumb-last]\").should('exist', { timeout: 10000 });\n        cy.get(\"[data-cy=breadcrumb-last]\").should(\"contain\", collName);\n\n        // Navigate to Overview tab\n        cy.doMPVTabSelect(\"Overview\");\n\n        // Verify details\n        cy.get(\"[data-cy=details-element]\")\n            .should(\"contain\", \"default\")\n            .and(\"contain\", \"foo\")\n        cy.get(\"[data-cy=resource-properties]\")\n            .should(\"contain\", \"Color: Magenta\")\n            .and(\"not.contain\", \"bar\");\n        // Confirm that the collection's properties has the real values.\n        cy.doRequest(\"GET\", \"/arvados/v1/collections\", null, {\n            filters: `[[\"name\", \"=\", \"${collName}\"]]`,\n        })\n            .its(\"body.items\")\n            .as(\"collections\")\n            .then(function () {\n                expect(this.collections).to.have.lengthOf(1);\n                expect(this.collections[0].properties[\"IDTAGCOLORS\"]).to.deep.equal([\"IDVALCOLORS3\"]);\n            });\n    });\n\n    it(\"shows responsible person for collection if available\", () => {\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).as(\"testCollection1\");\n\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: adminUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        })\n            .as(\"testCollection2\")\n            .then(function (testCollection2) {\n                cy.shareWith(adminUser.token, activeUser.user.uuid, testCollection2.uuid, \"can_write\");\n            });\n\n        cy.getAll(\"@testCollection1\", \"@testCollection2\").then(function ([testCollection1, testCollection2]) {\n            cy.loginAs(activeUser);\n\n            cy.goToPath(`/collections/${testCollection1.uuid}`);\n            // Navigate to Overview tab\n            cy.doMPVTabSelect(\"Overview\");\n            cy.get(\"[data-cy=responsible-person-wrapper]\").contains(activeUser.user.uuid);\n\n            cy.goToPath(`/collections/${testCollection2.uuid}`);\n            // Navigate to Overview tab\n            cy.doMPVTabSelect(\"Overview\");\n            cy.get(\"[data-cy=responsible-person-wrapper]\").contains(adminUser.user.uuid);\n        });\n    });\n\n    describe(\"file upload\", () => {\n        beforeEach(() => {\n            cy.createCollection(adminUser.token, {\n                name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n                owner_uuid: activeUser.user.uuid,\n                manifest_text: \"./subdir 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo\\n. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n            }).as(\"testCollection1\");\n        });\n\n        it(\"uploads a file and checks the collection UI to be fresh\", () => {\n            cy.getAll(\"@testCollection1\").then(function ([testCollection1]) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${testCollection1.uuid}`);\n                // Navigate to Overview tab\n                cy.doMPVTabSelect(\"Overview\");\n                cy.get(\"[data-cy=collection-file-count]\").should(\"contain\", \"2\");\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=upload-button]\").click();\n                cy.get(\"[data-cy=collection-files-panel]\").contains(\"5mb_a.bin\").should(\"not.exist\");\n                cy.fixture(\"files/5mb.bin\", \"base64\").then(content => {\n                    cy.get(\"[data-cy=drag-and-drop]\").upload(content, \"5mb_a.bin\");\n                    cy.get(\"[data-cy=form-submit-btn]\").click();\n                    cy.get(\"[data-cy=form-submit-btn]\").should(\"not.exist\");\n                    cy.get(\"[data-cy=collection-files-panel]\").contains(\"5mb_a.bin\").should(\"exist\");\n                    cy.doMPVTabSelect(\"Overview\");\n                    cy.get(\"[data-cy=collection-file-count]\").should(\"contain\", \"3\");\n\n                    cy.doMPVTabSelect(\"Files\");\n                    cy.get(\"[data-cy=collection-files-panel]\").contains(\"subdir\").click();\n                    cy.get(\"[data-cy=upload-button]\").click();\n                    cy.fixture(\"files/5mb.bin\", \"base64\").then(content => {\n                        cy.get(\"[data-cy=drag-and-drop]\").upload(content, \"5mb_b.bin\");\n                        cy.get(\"[data-cy=form-submit-btn]\").click();\n                        cy.waitForDom().get(\"[data-cy=form-submit-btn]\").should(\"not.exist\");\n                        // subdir gets unselected, I think this is a bug but\n                        // for the time being let's just make sure the test works.\n                        cy.get(\"[data-cy=collection-files-panel]\").contains(\"subdir\").click();\n                        cy.waitForDom().get(\"[data-cy=collection-files-right-panel]\").contains(\"5mb_b.bin\").should(\"exist\");\n                    });\n                });\n            });\n        });\n\n        it('uploads and maintains nested folder structure', () => {\n            cy.getAll('@testCollection1').then(function ([testCollection1]) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${testCollection1.uuid}`);\n                cy.doMPVTabSelect(\"Files\");\n                cy.get('[data-cy=upload-button]').click();\n                cy.fixture('files/5mb.bin', 'base64').then((content) => {\n                    cy.get('[data-cy=drag-and-drop]').upload(content, 'foo/bar/baz/qux');\n                    cy.get(\"[data-cy=form-submit-btn]\").click();\n                    cy.get(\"[data-cy=form-submit-btn]\").should(\"not.exist\");\n                    cy.waitForDom().get(\"[data-cy=collection-files-panel]\").contains(\"foo\").should(\"exist\").click();\n                    cy.get('[data-subfolder-path=\"bar\"]').should('exist').click();\n                    cy.get('[data-subfolder-path=\"baz\"]').should('exist').click();\n                })\n            });\n        });\n\n        it(\"allows to cancel running upload\", () => {\n            cy.getAll(\"@testCollection1\").then(function ([testCollection1]) {\n                cy.loginAs(activeUser);\n\n                cy.goToPath(`/collections/${testCollection1.uuid}`);\n\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=upload-button]\").click();\n\n                cy.fixture(\"files/5mb.bin\", \"base64\").then(content => {\n                    cy.get(\"[data-cy=drag-and-drop]\").upload(content, \"5mb_a.bin\");\n                    cy.get(\"[data-cy=drag-and-drop]\").upload(content, \"5mb_b.bin\");\n\n                    cy.get(\"[data-cy=form-submit-btn]\").click();\n\n                    cy.get(\"button\").contains(\"Cancel\").click();\n\n                    cy.get(\"[data-cy=form-submit-btn]\").should(\"not.exist\");\n                });\n            });\n        });\n\n        it(\"allows to cancel single file from the running upload\", () => {\n            cy.getAll(\"@testCollection1\").then(function ([testCollection1]) {\n                cy.loginAs(activeUser);\n\n                cy.goToPath(`/collections/${testCollection1.uuid}`);\n\n                cy.doMPVTabSelect(\"Files\");\n                cy.get(\"[data-cy=upload-button]\").click();\n\n                cy.fixture(\"files/5mb.bin\", \"base64\").then(content => {\n                    cy.get(\"[data-cy=drag-and-drop]\").upload(content, \"5mb_a.bin\");\n                    cy.get(\"[data-cy=drag-and-drop]\").upload(content, \"5mb_b.bin\");\n\n                    cy.get(\"[data-cy=form-submit-btn]\").click();\n\n                    cy.get(\"button[aria-label=Remove]\").eq(1).click();\n\n                    cy.get(\"[data-cy=form-submit-btn]\").should(\"not.exist\");\n\n                    cy.get(\"[data-cy=collection-files-panel]\").contains(\"5mb_a.bin\").should(\"exist\");\n                });\n            });\n        });\n\n        it(\"allows to cancel all files from the running upload\", () => {\n            cy.getAll(\"@testCollection1\").then(function ([testCollection1]) {\n                cy.loginAs(activeUser);\n\n                cy.goToPath(`/collections/${testCollection1.uuid}`);\n                cy.doMPVTabSelect(\"Files\");\n\n                // Confirm initial collection state.\n                cy.get(\"[data-cy=collection-files-panel]\").contains(\"bar\").should(\"exist\");\n                cy.get(\"[data-cy=collection-files-panel]\").contains(\"15mb_a.bin\").should(\"not.exist\");\n                cy.get(\"[data-cy=collection-files-panel]\").contains(\"15mb_b.bin\").should(\"not.exist\");\n\n                cy.get(\"[data-cy=upload-button]\").click();\n\n                cy.fixture(\"files/15mb.bin\", \"base64\").then(content => {\n                    cy.get(\"[data-cy=drag-and-drop]\").upload(content, \"15mb_a.bin\");\n                    cy.get(\"[data-cy=drag-and-drop]\").upload(content, \"15mb_b.bin\");\n\n                    cy.get(\"[data-cy=form-submit-btn]\").click();\n                    cy.get(\"button[aria-label=Remove]\").should(\"exist\").click({ multiple: true});\n                    cy.get(\"[data-cy=form-submit-btn]\").should(\"not.exist\");\n\n                    // Confirm final collection state.\n                    cy.get(\"[data-cy=collection-files-panel]\").contains(\"bar\").should(\"exist\");\n                    // The following fails, but doesn't seem to happen\n                    // in the real world. Maybe there's a race between\n                    // the PUT request finishing and the 'Remove' button\n                    // dissapearing, because sometimes just one of the 2\n                    // files gets uploaded.\n                    // Maybe this will be needed to simulate a slow network:\n                    // https://docs.cypress.io/api/commands/intercept#Convenience-functions-1\n                    // cy.get('[data-cy=collection-files-panel]')\n                    //     .contains('5mb_a.bin').should('not.exist');\n                    // cy.get('[data-cy=collection-files-panel]')\n                    //     .contains('5mb_b.bin').should('not.exist');\n                });\n            });\n        });\n\n        it('displays the correct breadcrumbs after moving a collection to trash', () => {\n            const breadcrumbTestCollectionName = `Breadcrumb Test Collection ${Math.floor(Math.random() * 999999)}`;\n            cy.loginAs(activeUser);\n            cy.goToPath(`/projects/${activeUser.user.uuid}`);\n\n            cy.get(\"[data-cy=side-panel-button]\").click();\n            cy.get(\"[data-cy=side-panel-new-collection]\").click();\n            cy.get(\"[data-cy=form-dialog]\")\n                .should(\"contain\", \"New collection\")\n                .within(() => {\n                    cy.get(\"[data-cy=name-field]\").within(() => {\n                        cy.get(\"input\").type(breadcrumbTestCollectionName);\n                    });\n                    cy.get(\"[data-cy=form-submit-btn]\").click();\n                });\n            cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n\n            cy.get(\"[data-cy=side-panel-tree]\").contains(\"Home Projects\").click();\n            cy.waitForDom()\n            cy.get('[data-cy=data-table-row]').contains(breadcrumbTestCollectionName).should('exist').rightclick();\n            cy.get('[data-cy=context-menu]').should('exist');\n            cy.get('[data-cy=context-move-to-trash]').click();\n\n            cy.waitForDom();\n            cy.get(\"[data-cy=breadcrumb-first]\").should(\"contain\", \"Home Projects\");\n        });\n    });\n\n    describe(\"zip download\", () => {\n        beforeEach(() => {\n            cy.createCollection(adminUser.token, {\n                name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n                owner_uuid: activeUser.user.uuid,\n                manifest_text: \"./subdir 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo\\n. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n            }).as(\"testCollection1\");\n        });\n\n        it('all files', () => {\n            cy.getAll(\"@testCollection1\").then(function ([testCollection1]) {\n                const downloadName = `${testCollection1.name}.zip`;\n\n                cy.intercept({ method: \"GET\", url: `**/c=${testCollection1.uuid}*`, times: 1, query: {\n                    accept: \"application/zip\",\n                    disposition: \"attachment\",\n                    download_filename: downloadName,\n                }}, (req) => {\n                    const url = new URL(req.url);\n                    const files = url.searchParams.get(\"files\");\n                    // Cannot assert on null so we assert the comparison\n                    expect(files === null).to.equal(true);\n                }).as('downloadQuery');\n\n                cy.loginAs(activeUser);\n                cy.goToPath(`/projects/${activeUser.user.uuid}`);\n\n                // Navigate to collection files\n                cy.doDataExplorerNavigate(testCollection1.name);\n                cy.doMPVTabSelect(\"Files\");\n\n                // Click download all files as zip\n                cy.doCollectionPanelOptionsAction(\"Download entire collection as zip\");\n                cy.waitForDom();\n\n                // Verify filename\n                cy.get(\"[data-cy=form-dialog]\").within(() => {\n                    cy.get('h2').contains(\"Download\");\n                    cy.get('input[name=fileName]').should('have.value', downloadName);\n                    cy.get('button[data-cy=form-submit-btn]').click();\n                });\n                // Wait for download request to match\n                cy.wait('@downloadQuery');\n            });\n        });\n\n        it('one file', () => {\n            cy.getAll(\"@testCollection1\").then(function ([testCollection1]) {\n                const downloadName = `${testCollection1.name} - bar.zip`;\n\n                cy.intercept({ method: \"GET\", url: `**/c=${testCollection1.uuid}*`, times: 1, query: {\n                    accept: \"application/zip\",\n                    disposition: \"attachment\",\n                    download_filename: downloadName,\n                }}, (req) => {\n                    const url = new URL(req.url);\n                    const files = url.searchParams.toString()\n                        .split(\"&\")\n                        .filter(param => param.startsWith(\"files=\"))\n                        .join(\"&\");\n\n                    expect(files).to.equal(\"files=bar\");\n                }).as('downloadQuery');\n\n                cy.loginAs(activeUser);\n                cy.goToPath(`/projects/${activeUser.user.uuid}`);\n\n                // Navigate to collection files\n                cy.doDataExplorerNavigate(testCollection1.name);\n                cy.doMPVTabSelect(\"Files\");\n\n                // Select one file\n                cy.doCollectionFileSelect(\"bar\");\n\n                // Click download all files as zip\n                cy.doCollectionPanelOptionsAction(\"Download selected files as zip\");\n                cy.waitForDom();\n\n                // Verify filename\n                cy.get(\"[data-cy=form-dialog]\").within(() => {\n                    cy.get('h2').contains(\"Download\");\n                    cy.get('input[name=fileName]').should('have.value', downloadName);\n                    cy.get('button[data-cy=form-submit-btn]').click();\n                });\n                // Wait for download request to match\n                cy.wait('@downloadQuery');\n            });\n        });\n\n        it('multi file', () => {\n            cy.getAll(\"@testCollection1\").then(function ([testCollection1]) {\n                const downloadName = `${testCollection1.name} - 2 files.zip`;\n\n                cy.intercept({ method: \"GET\", url: `**/c=${testCollection1.uuid}*`, times: 1, query: {\n                    accept: \"application/zip\",\n                    disposition: \"attachment\",\n                    download_filename: downloadName,\n                }}, (req) => {\n                    const url = new URL(req.url);\n                    const files = url.searchParams.toString()\n                        .split(\"&\")\n                        .filter(param => param.startsWith(\"files=\"))\n                        .join(\"&\");\n\n                    expect(files).to.equal(\"files=subdir&files=bar\");\n                }).as('downloadQuery');\n\n                cy.loginAs(activeUser);\n                cy.goToPath(`/projects/${activeUser.user.uuid}`);\n\n                // Navigate to collection files\n                cy.doDataExplorerNavigate(testCollection1.name);\n                cy.doMPVTabSelect(\"Files\");\n\n                // Select multi file\n                cy.doCollectionFileSelect(\"subdir\");\n                cy.doCollectionFileSelect(\"bar\");\n\n                // Click download all files as zip\n                cy.doCollectionPanelOptionsAction(\"Download selected files as zip\");\n                cy.waitForDom();\n\n                // Verify filename\n                cy.get(\"[data-cy=form-dialog]\").within(() => {\n                    cy.get('h2').contains(\"Download\");\n                    cy.get('input[name=fileName]').should('have.value', downloadName);\n                    cy.get('button[data-cy=form-submit-btn]').click();\n                });\n                // Wait for download request to match\n                cy.wait('@downloadQuery');\n            });\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/context-menu.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { projectOrder, collectionOrder, workflowOrder } from 'views-components/context-menu/menu-item-sort';\nimport { ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\n\ndescribe('ContextMenu', () => {\n    let adminUser;\n    let activeUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser')\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    describe('Basic context menu tests', () => {\n        it('opens context menu on right click and closes on click outside', () => {\n            cy.createGroup(adminUser.token, {\n                name: `my-context-menu-project-1`,\n                group_class: 'project',\n            });\n\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.doMPVTabSelect(\"Data\");\n\n            cy.get('[data-cy=\"data-table-row\"]').contains('my-context-menu-project-1').rightclick();\n            cy.get('[data-cy=\"context-menu\"]').then(($el) => {\n                // Click outside the context menu\n                const rect = $el[0].getBoundingClientRect();\n                const x = rect.right + 300;\n                const y = rect.top + rect.height / 2; // Vertically centered\n\n                cy.get('body').click(x, y);\n                cy.get('[data-cy=\"context-menu\"]').should('not.exist');\n            });\n        });\n\n        it('executes menu item action when clicked', () => {\n            cy.createGroup(adminUser.token, {\n                name: `my-context-menu-project-2`,\n                group_class: 'project',\n            });\n\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n            cy.doMPVTabSelect(\"Data\");\n\n            cy.get('[data-cy=\"data-table-row\"]').contains('my-context-menu-project-2').rightclick();\n            cy.get('[data-cy=\"context-menu\"]').contains('Share').click();\n\n            // Verify the menu closed\n            cy.get('[data-cy=\"context-menu\"]').should('not.exist');\n            // Verify the sharing dialog opened\n            cy.get('[data-cy=\"sharing-dialog\"]').should('be.visible');\n        });\n    });\n\n    describe('Shows correct menu items', () => {\n        it('shows correct Project menu items', () => {\n            cy.createGroup(adminUser.token, {\n                name: `my-context-menu-project-3`,\n                group_class: 'project',\n            });\n\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n            cy.doMPVTabSelect(\"Data\");\n\n            // Right click on a project\n            cy.get('[data-cy=\"data-table-row\"]').contains('my-context-menu-project-3').rightclick();\n            // Check for project-specific menu items\n            cy.get('[data-cy=\"context-menu\"]').within(() => {\n                projectOrder.forEach((name) => {\n                    if (name === ContextMenuActionNames.DIVIDER) return;\n                    cy.contains(name).should('exist');\n                });\n            });\n        });\n\n        it('filters menu items based on user permissions', () => {\n            cy.createGroup(activeUser.token, {\n                name: `my-context-menu-project-4`,\n                group_class: 'project',\n            });\n\n            // Test as non-admin user\n            cy.loginAs(activeUser);\n            cy.doSidePanelNavigation('Home Projects');\n            cy.doMPVTabSelect(\"Data\");\n\n            cy.get('[data-cy=\"data-table-row\"]').contains('my-context-menu-project-4').rightclick();\n            cy.get('[data-cy=\"context-menu\"]').within(() => {\n                // Admin-only options should not be visible\n                cy.contains('Add to public favorites').should('not.exist');\n            });\n        });\n\n        it('shows correct Collection menu items', () => {\n            cy.createCollection(adminUser.token, {\n                name: `my-context-menu-collection`,\n                owner_uuid: adminUser.uuid,\n                manifest_text: '. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n',\n            });\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n            cy.doMPVTabSelect(\"Data\");\n\n            // Right click on a project\n            cy.get('[data-cy=\"data-table-row\"]').contains('my-context-menu-collection').rightclick();\n            // Check for project-specific menu items\n            cy.get('[data-cy=\"context-menu\"]').within(() => {\n                collectionOrder.forEach((name) => {\n                    if (name === ContextMenuActionNames.DIVIDER) return;\n                    cy.contains(name).should('exist');\n                });\n            });\n        });\n\n        it('shows correct Workflow menu items', () => {\n            cy.createWorkflow(adminUser.token, {\n                name: `my-context-menu-workflow.cwl`,\n                definition:\n                    '{\\n    \"$graph\": [\\n        {\\n            \"class\": \"Workflow\",\\n            \"doc\": \"Reverse the lines in a document, then sort those lines.\",\\n            \"hints\": [\\n                {\\n                    \"acrContainerImage\": \"99b0201f4cade456b4c9d343769a3b70+261\",\\n                    \"class\": \"http://arvados.org/cwl#WorkflowRunnerResources\"\\n                }\\n            ],\\n            \"id\": \"#main\",\\n            \"inputs\": [\\n                {\\n                    \"default\": null,\\n                    \"doc\": \"The input file to be processed.\",\\n                    \"id\": \"#main/input\",\\n                    \"type\": \"File\"\\n                },\\n                {\\n                    \"default\": true,\\n                    \"doc\": \"If true, reverse (decending) sort\",\\n                    \"id\": \"#main/reverse_sort\",\\n                    \"type\": \"boolean\"\\n                }\\n            ],\\n            \"outputs\": [\\n                {\\n                    \"doc\": \"The output with the lines reversed and sorted.\",\\n                    \"id\": \"#main/output\",\\n                    \"outputSource\": \"#main/sorted/output\",\\n                    \"type\": \"File\"\\n                }\\n            ],\\n            \"steps\": [\\n                {\\n                    \"id\": \"#main/rev\",\\n                    \"in\": [\\n                        {\\n                            \"id\": \"#main/rev/input\",\\n                            \"source\": \"#main/input\"\\n                        }\\n                    ],\\n                    \"out\": [\\n                        \"#main/rev/output\"\\n                    ],\\n                    \"run\": \"#revtool.cwl\"\\n                },\\n                {\\n                    \"id\": \"#main/sorted\",\\n                    \"in\": [\\n                        {\\n                            \"id\": \"#main/sorted/input\",\\n                            \"source\": \"#main/rev/output\"\\n                        },\\n                        {\\n                            \"id\": \"#main/sorted/reverse\",\\n                            \"source\": \"#main/reverse_sort\"\\n                        }\\n                    ],\\n                    \"out\": [\\n                        \"#main/sorted/output\"\\n                    ],\\n                    \"run\": \"#sorttool.cwl\"\\n                }\\n            ]\\n        },\\n        {\\n            \"baseCommand\": \"rev\",\\n            \"class\": \"CommandLineTool\",\\n            \"doc\": \"Reverse each line using the `rev` command\",\\n            \"hints\": [\\n                {\\n                    \"class\": \"ResourceRequirement\",\\n                    \"ramMin\": 8\\n                }\\n            ],\\n            \"id\": \"#revtool.cwl\",\\n            \"inputs\": [\\n                {\\n                    \"id\": \"#revtool.cwl/input\",\\n                    \"inputBinding\": {},\\n                    \"type\": \"File\"\\n                }\\n            ],\\n            \"outputs\": [\\n                {\\n                    \"id\": \"#revtool.cwl/output\",\\n                    \"outputBinding\": {\\n                        \"glob\": \"output.txt\"\\n                    },\\n                    \"type\": \"File\"\\n                }\\n            ],\\n            \"stdout\": \"output.txt\"\\n        },\\n        {\\n            \"baseCommand\": \"sort\",\\n            \"class\": \"CommandLineTool\",\\n            \"doc\": \"Sort lines using the `sort` command\",\\n            \"hints\": [\\n                {\\n                    \"class\": \"ResourceRequirement\",\\n                    \"ramMin\": 8\\n                }\\n            ],\\n            \"id\": \"#sorttool.cwl\",\\n            \"inputs\": [\\n                {\\n                    \"id\": \"#sorttool.cwl/reverse\",\\n                    \"inputBinding\": {\\n                        \"position\": 1,\\n                        \"prefix\": \"-r\"\\n                    },\\n                    \"type\": \"boolean\"\\n                },\\n                {\\n                    \"id\": \"#sorttool.cwl/input\",\\n                    \"inputBinding\": {\\n                        \"position\": 2\\n                    },\\n                    \"type\": \"File\"\\n                }\\n            ],\\n            \"outputs\": [\\n                {\\n                    \"id\": \"#sorttool.cwl/output\",\\n                    \"outputBinding\": {\\n                        \"glob\": \"output.txt\"\\n                    },\\n                    \"type\": \"File\"\\n                }\\n            ],\\n            \"stdout\": \"output.txt\"\\n        }\\n    ],\\n    \"cwlVersion\": \"v1.0\"\\n}',\n                owner_uuid: adminUser.uuid,\n            });\n\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n            cy.doMPVTabSelect(\"Data\");\n\n            // Right click on a workflow\n            cy.get('[data-cy=\"data-table-row\"]').contains('my-context-menu-workflow.cwl').rightclick();\n            // Check for project-specific menu items\n            cy.get('[data-cy=\"context-menu\"]').within(() => {\n                workflowOrder.forEach((name) => {\n                    if (name === ContextMenuActionNames.DIVIDER) return;\n                    cy.contains(name).should('exist');\n                });\n            });\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/create-workflow.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nconst testWFDefinition = \"{\\n    \\\"$graph\\\": [\\n        {\\n            \\\"class\\\": \\\"Workflow\\\",\\n            \\\"doc\\\": \\\"Reverse the lines in a document, then sort those lines.\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"acrContainerImage\\\": \\\"99b0201f4cade456b4c9d343769a3b70+261\\\",\\n                    \\\"class\\\": \\\"http://arvados.org/cwl#WorkflowRunnerResources\\\"\\n                }\\n            ],\\n            \\\"id\\\": \\\"#main\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"default\\\": null,\\n                    \\\"doc\\\": \\\"The input file to be processed.\\\",\\n                    \\\"id\\\": \\\"#main/input\\\",\\n                    \\\"type\\\": \\\"File\\\"\\n                },\\n                {\\n                    \\\"default\\\": true,\\n                    \\\"doc\\\": \\\"If true, reverse (decending) sort\\\",\\n                    \\\"id\\\": \\\"#main/reverse_sort\\\",\\n                    \\\"type\\\": \\\"boolean\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"doc\\\": \\\"The output with the lines reversed and sorted.\\\",\\n                    \\\"id\\\": \\\"#main/output\\\",\\n                    \\\"outputSource\\\": \\\"#main/sorted/output\\\",\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"steps\\\": [\\n                {\\n                    \\\"id\\\": \\\"#main/rev\\\",\\n                    \\\"in\\\": [\\n                        {\\n                            \\\"id\\\": \\\"#main/rev/input\\\",\\n                            \\\"source\\\": \\\"#main/input\\\"\\n                        }\\n                    ],\\n                    \\\"out\\\": [\\n                        \\\"#main/rev/output\\\"\\n                    ],\\n                    \\\"run\\\": \\\"#revtool.cwl\\\"\\n                },\\n                {\\n                    \\\"id\\\": \\\"#main/sorted\\\",\\n                    \\\"in\\\": [\\n                        {\\n                            \\\"id\\\": \\\"#main/sorted/input\\\",\\n                            \\\"source\\\": \\\"#main/rev/output\\\"\\n                        },\\n                        {\\n                            \\\"id\\\": \\\"#main/sorted/reverse\\\",\\n                            \\\"source\\\": \\\"#main/reverse_sort\\\"\\n                        }\\n                    ],\\n                    \\\"out\\\": [\\n                        \\\"#main/sorted/output\\\"\\n                    ],\\n                    \\\"run\\\": \\\"#sorttool.cwl\\\"\\n                }\\n            ]\\n        },\\n        {\\n            \\\"baseCommand\\\": \\\"rev\\\",\\n            \\\"class\\\": \\\"CommandLineTool\\\",\\n            \\\"doc\\\": \\\"Reverse each line using the `rev` command\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"class\\\": \\\"ResourceRequirement\\\",\\n                    \\\"ramMin\\\": 8\\n                }\\n            ],\\n            \\\"id\\\": \\\"#revtool.cwl\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#revtool.cwl/input\\\",\\n                    \\\"inputBinding\\\": {},\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#revtool.cwl/output\\\",\\n                    \\\"outputBinding\\\": {\\n                        \\\"glob\\\": \\\"output.txt\\\"\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"stdout\\\": \\\"output.txt\\\"\\n        },\\n        {\\n            \\\"baseCommand\\\": \\\"sort\\\",\\n            \\\"class\\\": \\\"CommandLineTool\\\",\\n            \\\"doc\\\": \\\"Sort lines using the `sort` command\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"class\\\": \\\"ResourceRequirement\\\",\\n                    \\\"ramMin\\\": 8\\n                }\\n            ],\\n            \\\"id\\\": \\\"#sorttool.cwl\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/reverse\\\",\\n                    \\\"inputBinding\\\": {\\n                        \\\"position\\\": 1,\\n                        \\\"prefix\\\": \\\"-r\\\"\\n                    },\\n                    \\\"type\\\": \\\"boolean\\\"\\n                },\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/input\\\",\\n                    \\\"inputBinding\\\": {\\n                        \\\"position\\\": 2\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/output\\\",\\n                    \\\"outputBinding\\\": {\\n                        \\\"glob\\\": \\\"output.txt\\\"\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"stdout\\\": \\\"output.txt\\\"\\n        }\\n    ],\\n    \\\"cwlVersion\\\": \\\"v1.0\\\"\\n}\"\n\ndescribe('Create workflow tests', function () {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function () {\n                adminUser = this.adminUser;\n            }\n            );\n        cy.getUser('activeuser', 'Active', 'User', false, true)\n            .as('activeUser').then(function () {\n                activeUser = this.activeUser;\n            }\n            );\n    });\n\n    function createNestedHelper(testRemainder) {\n        cy.createGroup(adminUser.token, {\n            group_class: \"project\",\n            name: `Test project (${Math.floor(Math.random() * 999999)})`,\n        }).as('project1');\n\n        cy.get('@project1').then(() => {\n            cy.createGroup(adminUser.token, {\n                group_class: \"project\",\n                name: `Test project (${Math.floor(Math.random() * 999999)})`,\n                owner_uuid: this.project1.uuid,\n            }).as('project2');\n        })\n\n        cy.get('@project2').then(() => {\n            cy.createGroup(adminUser.token, {\n                group_class: \"project\",\n                name: `Test project (${Math.floor(Math.random() * 999999)})`,\n                owner_uuid: this.project2.uuid,\n            }).as('project3');\n        });\n\n        cy.get('@project3').then(() => {\n            cy.createWorkflow(adminUser.token, {\n                name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n                definition: testWFDefinition,\n            })\n                .as('testWorkflow');\n\n            cy.createCollection(adminUser.token, {\n                name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n                owner_uuid: this.project3.uuid,\n                manifest_text: \"./subdir 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo\\n. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:baz\\n\"\n            })\n                .as('testCollection');\n        });\n\n        cy.get('@testWorkflow').then(() => {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.get('[data-cy=side-panel-button]').click();\n            cy.get('[data-cy=side-panel-run-process]').click();\n\n            cy.get('.layout-pane')\n                .contains(this.testWorkflow.name)\n                .click();\n\n            cy.get('[data-cy=run-process-next-button]').click();\n\n            cy.get('[data-cy=new-process-panel]').contains('Run workflow').should('be.disabled');\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n\n            cy.get('[data-cy=new-process-panel]')\n                .within(() => {\n                    cy.get('[name=name]').type(`Workflow name (${Math.floor(Math.random() * 999999)})`);\n                    cy.contains('input').next().click();\n                });\n\n            testRemainder();\n\n            cy.get('[data-cy=new-process-panel]')\n                .find('button').contains('Run workflow').should('not.be.disabled');\n        });\n    }\n\n    it('can create project with nested data', function () {\n        this.createNestedHelper = createNestedHelper;\n        this.createNestedHelper(() => {\n            cy.get('[data-cy=choose-a-file-dialog]').as('chooseFileDialog');\n            cy.get('@chooseFileDialog').contains('Home Projects')\n                .parents('[data-cy=tree-li]')\n                .find('[data-cy=side-panel-arrow-icon]')\n                .click();\n\n            cy.get('@project1').then((project1) => {\n                cy.get('@chooseFileDialog').find(`[data-id=${project1.uuid}]`);\n                cy.get('@chooseFileDialog')\n                    .find(`[data-id=${project1.uuid}]`)\n                    .find('[data-action=TOGGLE_ACTIVE]')\n                    .click();\n                cy.get('[data-cy=picker-dialog-details]')\n                    .contains(\"Project\");\n                cy.get('[data-cy=picker-dialog-details]')\n                    .contains(project1.uuid);\n                cy.get('@chooseFileDialog')\n                    .find(`[data-id=${project1.uuid}]`)\n                    .find('[data-action=TOGGLE_OPEN]')\n                    .click();\n            });\n\n            cy.get('@project2').then((project2) => {\n                cy.get('@chooseFileDialog').find(`[data-id=${project2.uuid}]`);\n                cy.get('@chooseFileDialog')\n                    .find(`[data-id=${project2.uuid}]`)\n                    .find('[data-action=TOGGLE_ACTIVE]')\n                    .click();\n                cy.get('[data-cy=picker-dialog-details]')\n                    .contains(\"Project\");\n                cy.get('[data-cy=picker-dialog-details]')\n                    .contains(project2.uuid);\n                cy.get('@chooseFileDialog')\n                    .find(`[data-id=${project2.uuid}]`)\n                    .find('[data-action=TOGGLE_OPEN]')\n                    .click();\n            });\n\n            cy.get('@project3').then((project3) => {\n                cy.get('@chooseFileDialog').find(`[data-id=${project3.uuid}]`);\n                cy.get('@chooseFileDialog')\n                    .find(`[data-id=${project3.uuid}]`)\n                    .find('[data-action=TOGGLE_ACTIVE]')\n                    .click();\n                cy.get('[data-cy=picker-dialog-details]')\n                    .contains(\"Project\");\n                cy.get('[data-cy=picker-dialog-details]')\n                    .contains(project3.uuid);\n                cy.get('@chooseFileDialog')\n                    .find(`[data-id=${project3.uuid}]`)\n                    .find('[data-action=TOGGLE_OPEN]')\n                    .click();\n            });\n\n            cy.get('@testCollection').then((testCollection) => {\n                cy.get('@chooseFileDialog').find(`[data-id=${testCollection.uuid}]`);\n                cy.get('@chooseFileDialog')\n                    .find(`[data-id=${testCollection.uuid}]`)\n                    .find('[data-action=TOGGLE_ACTIVE]')\n                    .click();\n                cy.get('[data-cy=picker-dialog-details]')\n                    .contains(\"Collection\");\n                cy.get('[data-cy=picker-dialog-details]')\n                    .contains(testCollection.uuid);\n                cy.get('@chooseFileDialog')\n                    .find(`[data-id=${testCollection.uuid}]`)\n                    .find('[data-action=TOGGLE_OPEN]')\n                    .click();\n            });\n\n            cy.get('@chooseFileDialog').contains('baz').click();\n            cy.get('[data-cy=picker-dialog-details]')\n                .contains(\"File\");\n\n            cy.get('@chooseFileDialog').find('button').contains('Ok').click();\n        });\n    });\n\n    it('can search for nested project by name', function () {\n        this.createNestedHelper = createNestedHelper;\n        this.createNestedHelper(() => {\n            cy.get('[data-cy=choose-a-file-dialog]').as('chooseFileDialog');\n\n            cy.get('@project3').then((project3) => {\n                cy.get('[data-cy=picker-dialog-project-search]')\n                    .find('[data-cy=search-input]')\n                    .type(project3.name)\n\n                cy.waitForDom();\n\n                cy.get('@chooseFileDialog')\n                    .find(`[data-id=${project3.uuid}]`)\n                    .find('[data-action=TOGGLE_OPEN]')\n                    .click();\n\n                cy.get('@testCollection').then((testCollection) => {\n                    cy.get('@chooseFileDialog').find(`[data-id=${testCollection.uuid}]`).find('i').click();\n                });\n\n                cy.get('@chooseFileDialog').contains('baz').click();\n\n                cy.get('@chooseFileDialog').find('button').contains('Ok').click();\n            });\n        });\n    });\n\n    it('can search for nested project by uuid', function () {\n        this.createNestedHelper = createNestedHelper;\n        this.createNestedHelper(() => {\n            cy.get('[data-cy=choose-a-file-dialog]').as('chooseFileDialog');\n\n            cy.get('@project3').then((project3) => {\n                cy.get('[data-cy=picker-dialog-project-search]')\n                    .find('[data-cy=search-input]')\n                    .type(project3.uuid)\n\n                cy.waitForDom();\n\n                cy.get('@chooseFileDialog')\n                    .find(`[data-id=${project3.uuid}]`)\n                    .find('[data-action=TOGGLE_OPEN]')\n                    .click();\n\n                cy.get('@testCollection').then((testCollection) => {\n                    cy.get('@chooseFileDialog').find(`[data-id=${testCollection.uuid}]`).find('i').click();\n                });\n\n                cy.get('@chooseFileDialog').contains('baz').click();\n\n                cy.get('@chooseFileDialog').find('button').contains('Ok').click();\n            });\n        });\n    });\n\n\n    it('can search for collection by name', function () {\n        this.createNestedHelper = createNestedHelper;\n        this.createNestedHelper(() => {\n            cy.get('[data-cy=choose-a-file-dialog]').as('chooseFileDialog');\n\n            cy.get('@testCollection').then((testCollection) => {\n                cy.get('[data-cy=picker-dialog-collection-search]')\n                    .find('[data-cy=search-input]')\n                    .type(testCollection.name)\n\n                cy.waitForDom();\n\n                cy.get('@testCollection').then((testCollection) => {\n                    cy.get('@chooseFileDialog')\n                        .find(`[data-id=${testCollection.uuid}]`)\n                        .find('[data-action=TOGGLE_OPEN]')\n                        .click();\n                });\n\n                cy.get('@chooseFileDialog').contains('baz').click();\n\n                cy.get('@chooseFileDialog').find('button').contains('Ok').click();\n            });\n        });\n    });\n\n    it('can search for collection by uuid', function () {\n        this.createNestedHelper = createNestedHelper;\n        this.createNestedHelper(() => {\n            cy.get('[data-cy=choose-a-file-dialog]').as('chooseFileDialog');\n\n            cy.get('@testCollection').then((testCollection) => {\n                cy.get('[data-cy=picker-dialog-collection-search]')\n                    .find('[data-cy=search-input]')\n                    .type(testCollection.uuid)\n\n                cy.waitForDom();\n\n                cy.get('@testCollection').then((testCollection) => {\n                    cy.get('@chooseFileDialog')\n                        .find(`[data-id=${testCollection.uuid}]`)\n                        .find('[data-action=TOGGLE_OPEN]')\n                        .click();\n                });\n\n                cy.get('@chooseFileDialog').contains('baz').click();\n\n                cy.get('@chooseFileDialog').find('button').contains('Ok').click();\n            });\n        });\n    });\n\n    it('can search for collection by PDH', function () {\n        this.createNestedHelper = createNestedHelper;\n        this.createNestedHelper(() => {\n            cy.get('[data-cy=choose-a-file-dialog]').as('chooseFileDialog');\n\n            cy.get('@testCollection').then((testCollection) => {\n                cy.get('[data-cy=picker-dialog-collection-search]')\n                    .find('[data-cy=search-input]')\n                    .type(testCollection.portable_data_hash)\n\n                cy.waitForDom();\n\n                cy.get('@testCollection').then((testCollection) => {\n                    cy.get('@chooseFileDialog')\n                        .find(`[data-id=${testCollection.uuid}]`)\n                        .find('[data-action=TOGGLE_OPEN]')\n                        .click();\n                });\n\n                cy.get('@chooseFileDialog').contains('baz').click();\n\n                cy.get('@chooseFileDialog').find('button').contains('Ok').click();\n            });\n        });\n    });\n\n    it('can pick a parent project from the project picker when starting from +NEW button', function () {\n        cy.createGroup(adminUser.token, {\n            group_class: 'project',\n            name: `Test project (${Math.floor(Math.random() * 999999)})`,\n        }).as('project1');\n\n        cy.createGroup(adminUser.token, {\n            group_class: 'project',\n            name: `Test project (${Math.floor(Math.random() * 999999)})`,\n        }).as('project2');\n\n        cy.get('@project1').then(() => {\n            cy.createWorkflow(adminUser.token, {\n                name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n                definition: testWFDefinition,\n            }).as('testWorkflow');\n        });\n\n        cy.getAll('@project1', '@project2', '@testWorkflow').then(([project1, project2, testWorkflow]) => {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.get('[data-cy=side-panel-button]').click();\n            cy.get('[data-cy=side-panel-run-process]').click();\n\n            cy.get('.layout-pane').contains(this.testWorkflow.name).click();\n\n            cy.get('[data-cy=run-process-next-button]').click();\n\n            cy.get('[data-cy=new-process-panel]').contains('Run workflow').should('be.disabled');\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n\n            cy.get('[data-cy=new-process-panel]').within(() => {\n                cy.get('[name=name]').type(`Workflow name (${Math.floor(Math.random() * 999999)})`);\n            });\n\n            //check that the default owner project is correct\n            cy.get(`input[value=\"Admin User (root project)\"]`).should('exist');\n            cy.get('[data-cy=run-wf-project-input]').click();\n            cy.get('[data-cy=project-picker-details]').contains('Admin User (root project)');\n            //selecting a project should update the details element\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(project1.name).click();\n            cy.get('[data-cy=project-picker-details]').contains(project1.name);\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(project2.name).click();\n            cy.get('[data-cy=project-picker-details]').contains(project2.name);\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(project1.name).click();\n            cy.get('[data-cy=project-picker-details]').contains(project1.name);\n            //canceling should reset the details element\n            cy.get('[data-cy=run-wf-project-picker-cancel-button]').click();\n            cy.get(`input[value=\"Admin User (root project)\"]`).should('exist');\n            //we should be able to change the selection with the 'OK' button\n            cy.get('[data-cy=run-wf-project-input]').click();\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(project1.name).click();\n            cy.get('[data-cy=project-picker-details]').contains(project1.name);\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n            cy.get(`input[value=\"${project1.name}\"]`).should('exist');\n            cy.get('[data-cy=run-wf-project-input]').click();\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(project2.name).click();\n            cy.get('[data-cy=project-picker-details]').contains(project2.name);\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n            cy.get(`input[value=\"${project2.name}\"]`).should('exist');\n            //should be able to re-select root project\n            cy.get('[data-cy=run-wf-project-input]').click();\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(\"Home Projects\").click();\n            // wait for tree node to expand\n            cy.waitForDom();\n            cy.wait(1000);\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(\"Home Projects\").should('exist', {timeout: 10000}).click();\n            cy.get('[data-cy=project-picker-details]').contains('Admin User (root project)');\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n            cy.get(`input[value=\"Admin User (root project)\"]`).should('exist');\n        });\n    });\n\n    it('can pick a parent project from the project picker starting from toolbar or context menu', function () {\n        cy.createGroup(adminUser.token, {\n            group_class: 'project',\n            name: `Test project (${Math.floor(Math.random() * 999999)})`,\n        }).as('project1');\n\n        cy.createGroup(adminUser.token, {\n            group_class: 'project',\n            name: `Test project (${Math.floor(Math.random() * 999999)})`,\n        }).as('project2');\n\n        cy.get('@project1').then(() => {\n            cy.createWorkflow(adminUser.token, {\n                name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n                definition: testWFDefinition,\n            }).as('testWorkflow');\n        });\n\n        cy.getAll('@project1', '@project2', '@testWorkflow').then(([project1, project2, testWorkflow]) => {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.doMPVTabSelect(\"Data\");\n            cy.get('.layout-pane').contains(this.testWorkflow.name).rightclick();\n            cy.get('[data-cy=context-menu]').within(() => {\n                cy.contains('Run Workflow').click();\n            });\n\n            //check that the default owner project is correct\n            cy.get('[data-cy=project-picker-details]').contains('Admin User (root project)');\n            //selecting a project should update the details element\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(project1.name).click();\n            cy.get('[data-cy=project-picker-details]').contains(project1.name);\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(project2.name).click();\n            cy.get('[data-cy=project-picker-details]').contains(project2.name);\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(project1.name).click();\n            cy.get('[data-cy=project-picker-details]').contains(project1.name);\n            //canceling should reset the details element\n            cy.get('[data-cy=run-wf-project-picker-cancel-button]').click();\n            cy.get(`input[value=\"Admin User (root project)\"]`).should('exist');\n            //we should be able to change the selection with the 'OK' button\n            cy.get('[data-cy=run-wf-project-input]').click();\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(project1.name).click();\n            cy.get('[data-cy=project-picker-details]').contains(project1.name);\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n            cy.get(`input[value=\"${project1.name}\"]`).should('exist');\n            cy.get('[data-cy=run-wf-project-input]').click();\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(project2.name).click();\n            cy.get('[data-cy=project-picker-details]').contains(project2.name);\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n            cy.get(`input[value=\"${project2.name}\"]`).should('exist');\n            //should be able to re-select root project\n            cy.get('[data-cy=run-wf-project-input]').click();\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(\"Home Projects\").click();\n            // wait for tree node to expand\n            cy.waitForDom();\n            cy.wait(1000);\n            cy.get('[data-cy=projects-tree-home-tree-picker]').contains(\"Home Projects\").should('exist', {timeout: 10000}).click();\n            cy.get('[data-cy=project-picker-details]').contains('Admin User (root project)');\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n            cy.get(`input[value=\"Admin User (root project)\"]`).should('exist');\n        });\n    });\n\n    it('respects write permissions in the project picker', function () {\n        cy.loginAs(adminUser);\n        cy.doSidePanelNavigation('Home Projects');\n\n        cy.createGroup(adminUser.token, {\n            name: `my-shared-writable-project ${Math.floor(Math.random() * 999999)}`,\n            group_class: 'project',\n        }).as('mySharedWritableProject').then(function (mySharedWritableProject) {\n            cy.createWorkflow(adminUser.token, {\n                name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n                definition: testWFDefinition,\n                owner_uuid: mySharedWritableProject.uuid,\n                }).as('parentWritableWF');\n            cy.contains('Refresh').click();\n            cy.doMPVTabSelect(\"Data\");\n            cy.get('main').contains(mySharedWritableProject.name).rightclick();\n            cy.get('[data-cy=context-menu]').within(() => {\n                cy.contains('Share').click({ waitForAnimations: false });\n            });\n            cy.get('[data-cy=permission-select]').as('permissionSelect');\n            cy.get('@permissionSelect').click();\n            cy.contains('Write').click();\n            cy.get('.sharing-dialog').as('sharingDialog');\n            cy.get('[data-cy=invite-people-field]').find('input').type(activeUser.user.email);\n            cy.get('[data-cy=\"loading-spinner\"]').should('not.exist');\n            cy.get('[data-cy=\"users-tab-label\"]').click();\n            cy.get('[data-cy=sharing-suggestion]').click();\n            cy.get('@sharingDialog').within(() => {\n                cy.get('[data-cy=add-invited-people]').click();\n                cy.contains('Close').click({ waitForAnimations: false });\n            });\n        });\n\n        cy.createGroup(adminUser.token, {\n            name: `my-shared-readonly-project ${Math.floor(Math.random() * 999999)}`,\n            group_class: 'project',\n        }).as('mySharedReadonlyProject').then(function (mySharedReadonlyProject) {\n            cy.createWorkflow(adminUser.token, {\n                name: `(readonly) TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n                definition: testWFDefinition,\n                owner_uuid: mySharedReadonlyProject.uuid,\n                }).as('parentReadonlyWF');\n            cy.contains('Refresh').click();\n            cy.get('main').contains(mySharedReadonlyProject.name).rightclick();\n            cy.get('[data-cy=context-menu]').within(() => {\n                cy.contains('Share').click({ waitForAnimations: false });\n            });\n            cy.get('.sharing-dialog').as('sharingDialog');\n            cy.get('[data-cy=invite-people-field]').find('input').type(activeUser.user.email);\n            cy.get('[data-cy=\"loading-spinner\"]').should('not.exist');\n            cy.get('[data-cy=\"users-tab-label\"]').click();\n            cy.get('[data-cy=sharing-suggestion]').click();\n            cy.get('@sharingDialog').within(() => {\n                cy.get('[data-cy=add-invited-people]').click();\n                cy.contains('Close').click({ waitForAnimations: false });\n            });\n        });\n\n        cy.loginAs(activeUser);\n        cy.doSidePanelNavigation('Home Projects');\n        cy.createGroup(activeUser.token, {\n            name: `non-admin-readonly-project ${Math.floor(Math.random() * 999999)}`,\n            group_class: 'project',\n        }).as('nonAdminReadonlyProject').then(function (nonAdminReadonlyProject) {\n            cy.createWorkflow(activeUser.token, {\n                name: `(non-admin, readonly) TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n                definition: testWFDefinition,\n                owner_uuid: nonAdminReadonlyProject.uuid,\n                }).as('nonAdminReadonlyWF');\n            cy.contains('Refresh').click();\n            cy.doMPVTabSelect(\"Data\");\n            cy.get('main').contains(nonAdminReadonlyProject.name).rightclick();\n            cy.get('[data-cy=context-menu]').within(() => {\n                cy.contains('Share').click({ waitForAnimations: false });\n            });\n            cy.get('.sharing-dialog').as('sharingDialog');\n            cy.get('[data-cy=invite-people-field]').find('input').type(adminUser.user.email);\n            cy.get('[data-cy=\"loading-spinner\"]').should('not.exist');\n            cy.get('[data-cy=\"users-tab-label\"]').click();\n            cy.get('[data-cy=sharing-suggestion]').click();\n            cy.get('@sharingDialog').within(() => {\n                cy.get('[data-cy=add-invited-people]').click();\n                cy.contains('Close').click({ waitForAnimations: false });\n            });\n        });\n\n        cy.getAll('@parentWritableWF', '@parentReadonlyWF', '@mySharedWritableProject', '@mySharedReadonlyProject', '@nonAdminReadonlyProject', '@nonAdminReadonlyWF')\n        .then(([parentWritableWF, parentReadonlyWF, mySharedWritableProject, mySharedReadonlyProject, nonAdminReadonlyProject, nonAdminReadonlyWF]) => {\n            // already logged in as activeUser from previous step\n\n            // a non-admin can run a wf in a writable project\n            cy.contains('Shared with me').click();\n            cy.contains(mySharedWritableProject.name).click();\n            cy.waitForDom();\n            cy.doMPVTabSelect(\"Data\");\n            cy.get('[data-cy=data-table]').should('exist');\n            cy.contains(parentWritableWF.name).click();\n            cy.get('[data-title=\"Run Workflow\"]').click();\n            cy.get('[data-cy=project-picker-details]').contains(mySharedWritableProject.name);\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n            cy.get(`input[value=\"${mySharedWritableProject.name}\"]`).should('exist');\n\n            // a non-admin cannot run a wf in a non-writable project, it defaults to the user's root project instead\n            cy.contains('Shared with me').click();\n            cy.contains(mySharedReadonlyProject.name).click();\n            cy.doMPVTabSelect(\"Data\");\n            cy.contains(parentReadonlyWF.name).click();\n            cy.get('[data-title=\"Run Workflow\"]').click();\n            cy.get('[data-cy=project-picker-details]').contains(\"Active User (root project)\");\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n            cy.get(`input[value=\"Active User (root project)\"]`).should('exist');\n\n            //using +NEW button in Home Projects should default to user's root project\n            cy.contains('Home Projects').click();\n            cy.get('[data-cy=side-panel-button]').click();\n            cy.get('[data-cy=side-panel-run-process]').click();\n            cy.contains(parentWritableWF.name).click();\n            cy.get('[data-cy=run-process-next-button]').click();\n            cy.get('[data-cy=project-picker-details]').contains(\"Active User (root project)\");\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n\n            //using +NEW button to run a wf in a writable project should default to that writable project\n            cy.contains('Shared with me').click();\n            cy.contains(mySharedWritableProject.name).click();\n            cy.get('[data-cy=side-panel-button]').click();\n            cy.get('[data-cy=side-panel-run-process]').click();\n            cy.contains(parentWritableWF.name).click();\n            cy.get('[data-cy=run-process-next-button]').click();\n            cy.get('[data-cy=project-picker-details]').contains(mySharedWritableProject.name);\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n\n            // admin should be able to launch wf in shared readonly project\n            cy.loginAs(adminUser);\n            cy.contains('Shared with me').click();\n            cy.contains(nonAdminReadonlyProject.name).click();\n            cy.get('[data-cy=side-panel-button]').click();\n            cy.get('[data-cy=side-panel-run-process]').click();\n            cy.contains(nonAdminReadonlyWF.name).click();\n            cy.get('[data-cy=run-process-next-button]').click();\n            cy.get('[data-cy=project-picker-details]').contains(nonAdminReadonlyProject.name);\n            cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n        });\n    });\n\n    ['workflow_with_array_fields.yaml', 'workflow_with_default_array_fields.yaml'].forEach((yamlfile) =>\n    it('can select multi files when creating workflow '+yamlfile, () => {\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: 'myProject1',\n            addToFavorites: true\n        });\n\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:baz\\n\"\n        })\n            .as('testCollection');\n\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: `. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:buz\\n`\n        })\n            .as('testCollection2');\n\n        cy.getAll('@myProject1', '@testCollection', '@testCollection2')\n            .then(function ([myProject1, testCollection, testCollection2]) {\n                cy.readFile('cypress/fixtures/'+yamlfile).then(workflow => {\n                    cy.createWorkflow(adminUser.token, {\n                        name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n                        definition: workflow,\n                        owner_uuid: myProject1.uuid,\n                    })\n                        .as('testWorkflow');\n                });\n\n                cy.loginAs(activeUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                cy.doMPVTabSelect(\"Data\");\n\n                cy.get('main').contains(myProject1.name).click();\n\n                cy.waitForDom();\n\n                cy.get('[data-cy=side-panel-button]').click();\n\n                cy.get('#aside-menu-list').contains('Run a workflow').click();\n\n                cy.get('@testWorkflow')\n                    .then((testWorkflow) => {\n                        cy.get('main').contains(testWorkflow.name).click();\n                        cy.get('[data-cy=run-process-next-button]').click();\n                        cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n\n                        cy.get('label').contains('foo').parent('div').find('input').click();\n                        cy.get('div[role=dialog]')\n                            .within(() => {\n                                // must use .then to avoid selecting instead of expanding https://github.com/cypress-io/cypress/issues/5529\n                                cy.get('p').contains('Home Projects').closest('ul')\n                                    .find('i')\n                                    .then(el => el.click());\n\n                                cy.get(`[data-id=${testCollection.uuid}]`)\n                                    .find('i').click();\n\n                                cy.wait(1000);\n                                cy.contains('bar').closest('[data-action=TOGGLE_ACTIVE]').parent().find('input[type=checkbox]').click();\n                                cy.contains('baz').closest('[data-action=TOGGLE_ACTIVE]').parent().find('input[type=checkbox]').click();\n\n                                cy.get('[data-cy=ok-button]').click();\n                            });\n\n                        cy.get('label').contains('bar').parent('div').find('input').click();\n                        cy.get('div[role=dialog]')\n                            .within(() => {\n                                // must use .then to avoid selecting instead of expanding https://github.com/cypress-io/cypress/issues/5529\n                                cy.get('p').contains('Home Projects').closest('ul')\n                                    .find('i')\n                                    .then(el => el.click());\n\n                                cy.get(`[data-id=${testCollection.uuid}]`)\n                                    .find('input[type=checkbox]').click();\n\n                                cy.get(`[data-id=${testCollection2.uuid}]`)\n                                    .find('input[type=checkbox]').click();\n\n                                cy.get('[data-cy=ok-button]').click();\n                            });\n                    });\n\n                cy.get('label').contains('foo').parent('div')\n                    .within(() => {\n                        cy.contains('baz');\n                        cy.contains('bar');\n                    });\n\n                cy.get('label').contains('bar').parent('div')\n                    .within(() => {\n                        cy.contains(testCollection.name);\n                        cy.contains(testCollection2.name);\n                    });\n            });\n    }));\n\n    it('allows selecting collection subdirectories and reselects existing selections', () => {\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: 'myProject1',\n            addToFavorites: true\n        });\n\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \"./subdir/dir1 d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\056\\n./subdir/dir2 d41d8cd98f00b204e9800998ecf8427e+0 0:0:\\\\056\\n\"\n        })\n            .as('testCollection');\n\n        cy.getAll('@myProject1', '@testCollection')\n            .then(function ([myProject1, testCollection]) {\n                cy.readFile('cypress/fixtures/workflow_directory_array.yaml').then(workflow => {\n                    cy.createWorkflow(adminUser.token, {\n                        name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n                        definition: workflow,\n                        owner_uuid: myProject1.uuid,\n                    })\n                        .as('testWorkflow');\n                });\n\n                cy.loginAs(activeUser);\n                cy.doSidePanelNavigation('Home Projects');\n                cy.doMPVTabSelect(\"Data\");\n\n                cy.get('main').contains(myProject1.name).click();\n\n                cy.waitForDom();\n\n                cy.get('[data-cy=side-panel-button]').click();\n\n                cy.get('#aside-menu-list').contains('Run a workflow').click();\n\n                cy.get('@testWorkflow')\n                    .then((testWorkflow) => {\n                        cy.get('main').contains(testWorkflow.name).click();\n                        cy.get('[data-cy=run-process-next-button]').click();\n                        cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n\n                        cy.get('label').contains('directoryInputName').parent('div').find('input').click();\n                        cy.get('div[role=dialog]')\n                            .within(() => {\n                                // must use .then to avoid selecting instead of expanding https://github.com/cypress-io/cypress/issues/5529\n                                cy.get('p').contains('Home Projects').closest('ul')\n                                    .find('i')\n                                    .then(el => el.click());\n\n                                cy.get(`[data-id=${testCollection.uuid}]`)\n                                    .find('i').click();\n\n                                cy.get(`[data-id=\"${testCollection.uuid}/subdir\"]`)\n                                    .find('i').click();\n\n                                cy.contains('dir1').closest('[data-action=TOGGLE_ACTIVE]').parent().find('input[type=checkbox]').click();\n                                cy.contains('dir2').closest('[data-action=TOGGLE_ACTIVE]').parent().find('input[type=checkbox]').click();\n\n                                cy.get('[data-cy=ok-button]').click();\n                            });\n\n                        // Verify subdirectories were selected\n                        cy.get('label').contains('directoryInputName').parent('div')\n                            .within(() => {\n                                cy.contains('dir1');\n                                cy.contains('dir2');\n                            });\n\n                        // Reopen tree picker and verify subdirectories are preselected\n                        cy.get('label').contains('directoryInputName').parent('div').find('input').click();\n                        cy.waitForDom().get('div[role=dialog]')\n                            .within(() => {\n                                cy.contains('dir1').closest('[data-action=TOGGLE_ACTIVE]').parent().find('input[type=checkbox]').should('be.checked');\n                                cy.contains('dir2').closest('[data-action=TOGGLE_ACTIVE]').parent().find('input[type=checkbox]').should('be.checked');\n                            });\n                    });\n\n            });\n    })\n\n    it('handles secret inputs', () => {\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: 'myProject1',\n            addToFavorites: true\n        });\n\n        cy.setupDockerImage(\"arvados/jobs\").as(\"dockerImg\");\n\n        cy.getAll('@myProject1').then(function ([myProject1]) {\n                cy.readFile('cypress/fixtures/workflow_with_secret_input.yaml').then(workflow => {\n                    cy.createWorkflow(adminUser.token, {\n                        name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n                        definition: workflow,\n                        owner_uuid: myProject1.uuid,\n                    })\n                        .as('testWorkflow');\n                });\n\n                cy.loginAs(activeUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                cy.doMPVTabSelect(\"Data\");\n\n                cy.get('main').contains(myProject1.name).click();\n\n                cy.get('[data-cy=side-panel-button]').click();\n\n                cy.get('#aside-menu-list').contains('Run a workflow').click();\n\n                cy.get('@testWorkflow')\n                    .then((testWorkflow) => {\n                        cy.get('main').contains(testWorkflow.name).click();\n                        cy.get('[data-cy=run-process-next-button]').click();\n                        cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n\n                        var foo = cy.get('label').contains('foo').parent('div').find('input');\n                        foo.type(\"secret_value_xyz\");\n                        foo.should('have.attr', 'type').and('equal', 'password');\n\n                        var bar = cy.get('label').contains('bar').parent('div').find('input');\n                        bar.type(\"exposed_value_xyz\");\n                        bar.should('have.attr', 'type').and('equal', 'text');\n                    });\n            cy.get('[data-cy=new-process-panel]').contains('Run workflow').click();\n\n            cy.doMPVTabSelect(\"Inputs\");\n            cy.get('[data-cy=process-io-card]').should('contain', 'exposed_value_xyz');\n            cy.get('[data-cy=process-io-card]').should('contain', 'Cannot display secret');\n            cy.get('[data-cy=process-io-card]').should('not.contain', 'secret_value_xyz');\n\n            cy.url().then((url) => {\n                let uuid = url.split('/').pop();\n                cy.getResource(activeUser.token, \"container_requests\", uuid).then((res) => {\n                    expect(res.mounts[\"/var/lib/cwl/cwl.input.json\"].content.bar).to.equal('exposed_value_xyz');\n                    expect(res.mounts[\"/var/lib/cwl/cwl.input.json\"].content.foo).to.deep.equal({$include: '/secrets/s0'});\n                });\n            });\n\n        });\n    });\n\n    it('handles optional inputs', () => {\n        cy.intercept({ method: \"POST\", url: \"**/arvados/v1/container_requests\" }, (req, res) => {\n            const inputs = req.body.container_request.mounts[\"/var/lib/cwl/cwl.input.json\"].content;\n            expect(inputs).to.deep.equal({\n                int_input: null,\n                empty_string_input: null,\n                string_input: \"foo\"\n            });\n\n            //handle expected 422 error\n            req.reply({\n                statusCode: 200,\n                body: { message: 'Expected 422 error' },\n            });\n        }).as(\"mockedRequest\");\n\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'myProject1',\n        });\n\n        cy.setupDockerImage(\"arvados/jobs\").as(\"dockerImg\");\n\n        cy.getAll('@myProject1').then(function ([myProject1]) {\n                cy.readFile('cypress/fixtures/workflow-with-optional-inputs.yaml').then(workflow => {\n                    cy.createWorkflow(adminUser.token, {\n                        name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n                        definition: workflow,\n                        owner_uuid: myProject1.uuid,\n                    })\n                        .as('testWorkflow');\n                });\n\n                cy.loginAs(adminUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                cy.doMPVTabSelect(\"Data\");\n\n                cy.get('main').contains(myProject1.name).click();\n\n                cy.get('[data-cy=side-panel-button]').click();\n\n                cy.get('#aside-menu-list').contains('Run a workflow').click();\n\n                cy.get('@testWorkflow')\n                    .then((testWorkflow) => {\n                        cy.get('main').contains(testWorkflow.name).click();\n                        cy.get('[data-cy=run-process-next-button]').click();\n                        cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n\n                        var int_input = cy.get('label').contains('int_input').parent('div').find('input');\n                        var string_input = cy.get('label').contains('string_input').parent('div').find('input');\n                        var empty_string_input = cy.get('label').contains('empty_string_input').parent('div').find('input');\n\n                        string_input.type(\"foo\");\n\n                        //both inputs are optional, so they should be null instead of empty strings\n                        int_input.type(\"123{backspace}{backspace}{backspace}\")\n                        empty_string_input.type(\"bar{backspace}{backspace}{backspace}\");\n                    });\n\n                cy.get('[data-cy=new-process-panel]').contains('Run workflow').click();\n\n                cy.wait('@mockedRequest')\n        });\n    });\n})\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/dashboard.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('Main Dashboard', () => {\n    let activeUser;\n    let adminUser;\n\n    const sectionsTitles = [\n        'Favorites',\n        'Recently Visited',\n        'Recent Workflow Runs',\n    ];\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser')\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it('displays the appropriate sections', () => {\n        cy.loginAs(activeUser);\n        cy.get('[data-cy=tree-top-level-item]').contains('Dashboard').click();\n        cy.get('[data-cy=dashboard-root]').should('exist');\n        cy.get('[data-cy=breadcrumbs]').contains('Dashboard');\n        cy.get('[data-cy=dashboard-root] [data-cy=dashboard-section]').should('have.length', sectionsTitles.length);\n        sectionsTitles.forEach(title => {\n            cy.get('[data-cy=dashboard-section]').contains(title).should('exist');\n        });\n    });\n});\n\ndescribe('Favorites section', () => {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser')\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it('displays the favorites section', () => {\n        cy.loginAs(activeUser);\n        cy.get('[data-cy=tree-top-level-item]').contains('Dashboard').click();\n        cy.get('[data-cy=dashboard-section]').contains('Favorites').should('exist');\n    });\n\n    it('handles favorite pins operations', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject1',\n            addToFavorites: true,\n        }).as('testProject1');\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject2',\n            addToFavorites: true,\n        }).as('testProject2');\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject3',\n            addToFavorites: true,\n        }).as('testProject3');\n        cy.getAll('@testProject1', '@testProject2', '@testProject3').then(\n            ([testProject1, testProject2, testProject3]) => {\n                cy.loginAs(adminUser);\n                cy.get('[data-cy=tree-top-level-item]').contains('Dashboard').click();\n                cy.get('[data-cy=dashboard-section]').contains('Favorites').should('exist');\n\n                //verify favorite pins\n                cy.get('[data-cy=favorite-pin]').should('have.length', 3);\n                cy.get('[data-cy=favorite-pin]').eq(0).contains('TestProject3')\n                cy.get('[data-cy=favorite-pin]').eq(1).contains('TestProject2')\n                cy.get('[data-cy=favorite-pin]').eq(2).contains('TestProject1')\n\n                //remove favorite pin\n                cy.get(`[data-cy=${testProject1.head_uuid}-star]`).click();\n                cy.get('[data-cy=favorite-pin]').should('have.length', 2);\n                cy.get('[data-cy=favorite-pin]').contains('TestProject1').should('not.exist');\n\n                //add favorite pin\n                cy.doSidePanelNavigation('Home Projects');\n                cy.doMPVTabSelect(\"Data\");\n                cy.get('[data-cy=data-table-row]').contains('TestProject1').rightclick();\n                cy.get('[data-cy=context-menu]').contains('Add to favorites').click();\n                cy.doSidePanelNavigation('Dashboard');\n                cy.get('[data-cy=favorite-pin]').should('have.length', 3);\n                cy.get('[data-cy=favorite-pin]').contains('TestProject1');\n\n                //verify ordered by last favorited\n                cy.get('[data-cy=favorite-pin]').eq(0).contains('TestProject1');\n                cy.get('[data-cy=favorite-pin]').eq(1).contains('TestProject3');\n                cy.get('[data-cy=favorite-pin]').eq(2).contains('TestProject2');\n\n                //opens context menu\n                cy.get('[data-cy=favorite-pin]').contains('TestProject1').rightclick();\n                cy.get('[data-cy=context-menu]').contains('TestProject1');\n                cy.get('body').click();\n\n                //navs to item\n                cy.get('[data-cy=favorite-pin]').contains('TestProject1').click();\n                cy.get('[data-cy=project-details-card]').contains('TestProject1').should('exist');\n            });\n        });\n});\n\ndescribe('Recently Visited section', () => {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser')\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it('displays the recently visited section', () => {\n        cy.loginAs(activeUser);\n        cy.get('[data-cy=tree-top-level-item]').contains('Dashboard').click();\n        cy.get('[data-cy=dashboard-section]').contains('Recently Visited').should('exist');\n    });\n\n    it('handles recently visited operations', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject1',\n        }).as('testProject1');\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject2',\n        }).as('testProject2');\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject3',\n        }).as('testProject3');\n        cy.getAll('@testProject1', '@testProject2', '@testProject3').then(\n            ([testProject1, testProject2, testProject3]) => {\n                cy.loginAs(adminUser);\n\n                // visit some projects\n                cy.doSidePanelNavigation('Home Projects');\n                cy.get('[data-cy=side-panel-tree]').contains(testProject1.name).click();\n                cy.get('[data-cy=project-details-card]').contains(testProject1.name).should('exist');\n                cy.get('[data-cy=side-panel-tree]').contains(testProject2.name).click();\n                cy.get('[data-cy=project-details-card]').contains(testProject2.name).should('exist');\n                cy.get('[data-cy=side-panel-tree]').contains(testProject3.name).click();\n                cy.get('[data-cy=project-details-card]').contains(testProject3.name).should('exist');\n\n                // verify recently visited\n                cy.get('[data-cy=tree-top-level-item]').contains('Dashboard').click();\n                cy.get('[data-cy=dashboard-section]').contains('Recently Visited').should('exist');\n                cy.get('[data-cy=dashboard-item-row]').should('have.length', 3);\n                cy.get('[data-cy=dashboard-item-row]').eq(0).contains(testProject3.name);\n                cy.get('[data-cy=dashboard-item-row]').eq(1).contains(testProject2.name);\n                cy.get('[data-cy=dashboard-item-row]').eq(2).contains(testProject1.name);\n\n                // opens context menu\n                cy.get('[data-cy=dashboard-item-row]').contains(testProject1.name).rightclick();\n                cy.get('[data-cy=context-menu]').contains(testProject1.name);\n                cy.get('body').click();\n\n                // navs to item\n                cy.get('[data-cy=dashboard-item-row]').contains(testProject1.name).click();\n                cy.get('[data-cy=project-details-card]').contains(testProject1.name).should('exist');\n\n                // verify recently visited order has changed\n                cy.get('[data-cy=tree-top-level-item]').contains('Dashboard').click();\n                cy.get('[data-cy=dashboard-section]').contains('Recently Visited').should('exist');\n                cy.get('[data-cy=dashboard-item-row]').should('have.length', 3);\n                cy.get('[data-cy=dashboard-item-row]').eq(0).contains(testProject1.name);\n                cy.get('[data-cy=dashboard-item-row]').eq(1).contains(testProject3.name);\n                cy.get('[data-cy=dashboard-item-row]').eq(2).contains(testProject2.name);\n            });\n        });\n});\n\ndescribe('Recent Workflow Runs section', () => {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser')\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it('displays the recent workflow runs section', () => {\n        cy.loginAs(activeUser);\n        cy.get('[data-cy=tree-top-level-item]').contains('Dashboard').click();\n        cy.get('[data-cy=dashboard-section]').contains('Recent Workflow Runs').should('exist');\n    });\n\n    it('handles recent workflow runs operations', () => {\n        cy.setupDockerImage('arvados/jobs')\n            .then((dockerImage) => {\n                cy.createDefaultContainerRequest(\n                    adminUser.token,\n                    dockerImage,\n                    { name: \"test_container_request_1\", state: \"Committed\" },\n                ).as(\"containerRequest1\");\n                cy.createDefaultContainerRequest(\n                    adminUser.token,\n                    dockerImage,\n                    { name: \"test_container_request_2\", state: \"Committed\" },\n                ).as(\"containerRequest2\");\n                cy.createDefaultContainerRequest(\n                    adminUser.token,\n                    dockerImage,\n                    { name: \"test_container_request_3\", state: \"Committed\" },\n                ).as(\"containerRequest3\");\n            });\n        cy.getAll(\"@containerRequest1\", \"@containerRequest2\", \"@containerRequest3\")\n            .then(function ([containerRequest1, containerRequest2, containerRequest3]) {\n                cy.loginAs(adminUser);\n\n                // verify recent workflow runs\n                cy.get('[data-cy=tree-top-level-item]').contains('Dashboard').click();\n                cy.get('[data-cy=dashboard-section]').contains('Recent Workflow Runs').should('exist');\n                cy.get('[data-cy=dashboard-item-row]').should('have.length', 3);\n                cy.get('[data-cy=dashboard-item-row]').eq(0).contains(containerRequest3.name);\n                cy.get('[data-cy=dashboard-item-row]').eq(1).contains(containerRequest2.name);\n                cy.get('[data-cy=dashboard-item-row]').eq(2).contains(containerRequest1.name);\n\n                // open context menu\n                cy.get('[data-cy=dashboard-item-row]').contains(containerRequest1.name).rightclick();\n                cy.get('[data-cy=context-menu]').contains(containerRequest1.name);\n                cy.get('body').click();\n\n                // navs to item\n                cy.get('[data-cy=dashboard-item-row]').contains(containerRequest1.name).click();\n                cy.get('[data-cy=process-details-card]').contains(containerRequest1.name).should('exist');\n            });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/delete-multiple-files.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('Multi-file deletion tests', function () {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function () {\n                adminUser = this.adminUser;\n            }\n            );\n        cy.getUser('collectionuser1', 'Collection', 'User', false, true)\n            .as('activeUser').then(function () {\n                activeUser = this.activeUser;\n            }\n            );\n    });\n\n    it('deletes all files from root dir', function () {\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:baz\\n\"\n        })\n            .as('testCollection').then(function () {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.testCollection.uuid}`);\n                cy.doMPVTabSelect(\"Files\");\n\n                cy.get('[data-cy=collection-files-panel]').within(() => {\n                    cy.get('[type=\"checkbox\"]').first().check();\n                    cy.get('[type=\"checkbox\"]').last().check();\n                });\n                cy.get('[data-cy=collection-files-panel-options-btn]').click();\n                cy.get('[data-cy=context-menu] div').contains('Remove selected').click();\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n                cy.wait(1000);\n                cy.get('[data-cy=collection-files-panel]')\n                    .should('not.contain', 'baz')\n                    .and('not.contain', 'bar');\n            });\n    });\n\n    it.skip('deletes all files from non root dir', function () {\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \"./subdir 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo\\n. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:baz\\n\"\n        })\n            .as('testCollection').then(function () {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.testCollection.uuid}`);\n\n                cy.get('[data-cy=virtual-file-tree] > div > i').first().click();\n                cy.get('[data-cy=collection-files-panel]')\n                    .should('contain', 'foo');\n\n                cy.get('[data-cy=collection-files-panel]')\n                    .contains('foo').closest('[data-cy=virtual-file-tree]').find('[type=\"checkbox\"]').click();\n\n                cy.get('[data-cy=collection-files-panel-options-btn]').click();\n                cy.get('[data-cy=context-menu] div').contains('Remove selected').click();\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n\n                cy.get('[data-cy=collection-files-panel]')\n                    .should('not.contain', 'subdir')\n                    .and('contain', 'baz');\n            });\n    });\n\n    it('deletes all files from non root dir', function () {\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \"./subdir 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo\\n. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:baz\\n\"\n        })\n            .as('testCollection').then(function () {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${this.testCollection.uuid}`);\n                cy.doMPVTabSelect(\"Files\");\n\n                cy.get('[data-cy=collection-files-panel]').contains('subdir').click();\n                cy.wait(1000);\n                cy.get('[data-cy=collection-files-panel]')\n                    .should('contain', 'foo');\n\n                cy.get('[data-cy=collection-files-panel]')\n                    .contains('foo').parent().find('[type=\"checkbox\"]').click();\n\n                cy.get('[data-cy=collection-files-panel-options-btn]').click();\n                cy.get('[data-cy=context-menu] div').contains('Remove selected').click();\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n\n                cy.get('[data-cy=collection-files-panel]')\n                    .should('not.contain', 'foo')\n                    .and('contain', 'subdir');\n            });\n    });\n})\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/details-card.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nconst testWFDefinition = \"{\\n    \\\"$graph\\\": [\\n        {\\n            \\\"class\\\": \\\"Workflow\\\",\\n            \\\"doc\\\": \\\"Reverse the lines in a document, then sort those lines.\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"acrContainerImage\\\": \\\"99b0201f4cade456b4c9d343769a3b70+261\\\",\\n                    \\\"class\\\": \\\"http://arvados.org/cwl#WorkflowRunnerResources\\\"\\n                }\\n            ],\\n            \\\"id\\\": \\\"#main\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"default\\\": null,\\n                    \\\"doc\\\": \\\"The input file to be processed.\\\",\\n                    \\\"id\\\": \\\"#main/input\\\",\\n                    \\\"type\\\": \\\"File\\\"\\n                },\\n                {\\n                    \\\"default\\\": true,\\n                    \\\"doc\\\": \\\"If true, reverse (decending) sort\\\",\\n                    \\\"id\\\": \\\"#main/reverse_sort\\\",\\n                    \\\"type\\\": \\\"boolean\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"doc\\\": \\\"The output with the lines reversed and sorted.\\\",\\n                    \\\"id\\\": \\\"#main/output\\\",\\n                    \\\"outputSource\\\": \\\"#main/sorted/output\\\",\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"steps\\\": [\\n                {\\n                    \\\"id\\\": \\\"#main/rev\\\",\\n                    \\\"in\\\": [\\n                        {\\n                            \\\"id\\\": \\\"#main/rev/input\\\",\\n                            \\\"source\\\": \\\"#main/input\\\"\\n                        }\\n                    ],\\n                    \\\"out\\\": [\\n                        \\\"#main/rev/output\\\"\\n                    ],\\n                    \\\"run\\\": \\\"#revtool.cwl\\\"\\n                },\\n                {\\n                    \\\"id\\\": \\\"#main/sorted\\\",\\n                    \\\"in\\\": [\\n                        {\\n                            \\\"id\\\": \\\"#main/sorted/input\\\",\\n                            \\\"source\\\": \\\"#main/rev/output\\\"\\n                        },\\n                        {\\n                            \\\"id\\\": \\\"#main/sorted/reverse\\\",\\n                            \\\"source\\\": \\\"#main/reverse_sort\\\"\\n                        }\\n                    ],\\n                    \\\"out\\\": [\\n                        \\\"#main/sorted/output\\\"\\n                    ],\\n                    \\\"run\\\": \\\"#sorttool.cwl\\\"\\n                }\\n            ]\\n        },\\n        {\\n            \\\"baseCommand\\\": \\\"rev\\\",\\n            \\\"class\\\": \\\"CommandLineTool\\\",\\n            \\\"doc\\\": \\\"Reverse each line using the `rev` command\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"class\\\": \\\"ResourceRequirement\\\",\\n                    \\\"ramMin\\\": 8\\n                }\\n            ],\\n            \\\"id\\\": \\\"#revtool.cwl\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#revtool.cwl/input\\\",\\n                    \\\"inputBinding\\\": {},\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#revtool.cwl/output\\\",\\n                    \\\"outputBinding\\\": {\\n                        \\\"glob\\\": \\\"output.txt\\\"\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"stdout\\\": \\\"output.txt\\\"\\n        },\\n        {\\n            \\\"baseCommand\\\": \\\"sort\\\",\\n            \\\"class\\\": \\\"CommandLineTool\\\",\\n            \\\"doc\\\": \\\"Sort lines using the `sort` command\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"class\\\": \\\"ResourceRequirement\\\",\\n                    \\\"ramMin\\\": 8\\n                }\\n            ],\\n            \\\"id\\\": \\\"#sorttool.cwl\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/reverse\\\",\\n                    \\\"inputBinding\\\": {\\n                        \\\"position\\\": 1,\\n                        \\\"prefix\\\": \\\"-r\\\"\\n                    },\\n                    \\\"type\\\": \\\"boolean\\\"\\n                },\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/input\\\",\\n                    \\\"inputBinding\\\": {\\n                        \\\"position\\\": 2\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/output\\\",\\n                    \\\"outputBinding\\\": {\\n                        \\\"glob\\\": \\\"output.txt\\\"\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"stdout\\\": \\\"output.txt\\\"\\n        }\\n    ],\\n    \\\"cwlVersion\\\": \\\"v1.0\\\"\\n}\"\nconst ResourceKinds = {\n    USER: 'user',\n    PROJECT: 'project',\n    WORKFLOW: 'workflow',\n    COLLECTION: 'collection',\n    PROCESS: 'process',\n};\n\n\ndescribe('Base Details Card tests', function () {\n    let adminUser;\n    let dockerImage;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.on('uncaught:exception', (err, runnable) => {\n            console.error(err);\n        });\n    });\n\n    beforeEach(function () {\n        cy.clearCookies();\n        cy.clearLocalStorage();\n        // Since setupDockerImage uses createCollection\n        // it will be cleaned up after every test even if we\n        // do this in before()\n        cy.setupDockerImage('arvados/jobs')\n            .as('dockerImageAlias')\n            .then((dockerImageAlias) => {\n                dockerImage = dockerImageAlias;\n            });\n    });\n\n    Object.values(ResourceKinds).forEach(resourceKind => {\n        it(`Should display the ${resourceKind} details card`, () => {\n            const { name, createResource, navToResource, extraAssertions } = getCardTestParams(dockerImage, adminUser, resourceKind);\n\n            createResource();\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n            navToResource();\n\n            cy.get(`[data-cy=${resourceKind}-details-card]`).should('be.visible');\n            cy.get(`[data-cy=${resourceKind}-details-card]`).contains(name).should('be.visible');\n            cy.get(`[data-cy=${resourceKind}-details-card]`).within(() => {\n                cy.get('[data-cy=multiselect-toolbar]').should('exist');\n            });\n\n            if (extraAssertions) extraAssertions();\n        });\n    });\n});\n\n\nconst getCardTestParams = (dockerImage, adminUser, resourceKind) => {\n    let name;\n    switch (resourceKind) {\n        case ResourceKinds.USER:\n            return {\n                name: adminUser.user.full_name,\n                createResource: () => {},\n                navToResource: () => {},\n            };\n\n        case ResourceKinds.PROJECT:\n            name = `Test project (${Math.floor(999999 * Math.random())})`;\n            return {\n                name,\n                createResource: () => cy.createProject({ owningUser: adminUser, projectName: name }),\n                navToResource: () => {\n                    cy.doMPVTabSelect(\"Data\");\n                    cy.get('main').contains(name).click()\n                },\n            };\n\n        case ResourceKinds.WORKFLOW:\n            name = `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`;\n            return {\n                name,\n                createResource: () =>\n                    cy.createWorkflow(adminUser.token, {\n                        name,\n                        definition: testWFDefinition,\n                    }),\n                navToResource: () => {\n                    cy.doMPVTabSelect(\"Data\");\n                    cy.get('main').contains(name).click();\n                },\n            };\n\n        case ResourceKinds.COLLECTION:\n            name = `Test collection ${Math.floor(Math.random() * 999999)}`;\n            return {\n                name,\n                createResource: () =>\n                    cy.createCollection(adminUser.token, {\n                        name,\n                        owner_uuid: adminUser.user.uuid,\n                        manifest_text: '. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n',\n                    }),\n                navToResource: () => {\n                    cy.doMPVTabSelect(\"Data\");\n                    cy.get('main').contains(name).click();\n                },\n            };\n\n        case ResourceKinds.PROCESS:\n            name = `Test container request ${Math.floor(Math.random() * 999999)}`;\n            return {\n                name,\n                createResource: () => cy.createDefaultContainerRequest(adminUser.token, dockerImage, { name, state: 'Committed' }),\n                navToResource: () => {\n                    cy.doMPVTabSelect(\"Workflow Runs\");\n                    cy.get('main').contains(name).click();\n                },\n                extraAssertions: () => {\n                    cy.get(`[data-cy=process-details-card]`).within(() => {\n                        cy.get('[data-cy=process-cancel-button]').should('exist');\n                        cy.get('[data-cy=process-status-chip]').should('exist');\n                    });\n                },\n            };\n\n        default:\n            throw new Error(`Unknown resource kind: ${resourceKind}`);\n    }\n};\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/details-panel.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('Details panel', () => {\n  let adminUser;\n\n  before(() => {\n    cy.getUser(\"active\", \"Active\", \"User\", true, true)\n      .as(\"activeUser\")\n      .then((user) => {\n        adminUser = user;\n      });\n  });\n\n  // Add this test to the existing describe block in details-panel.cy.js\n\n  it('displays root project details when no items are selected', () => {\n    cy.loginAs(adminUser);\n\n    // Navigate to the user's root project\n    cy.visit(`/projects/${adminUser.user.uuid}`);\n\n    // Wait for the data table to load\n    cy.get('[data-cy=data-table]').should('be.visible');\n\n    // Ensure no items are selected\n    cy.get('[data-cy=data-table-row] input[type=\"checkbox\"]:checked').should('not.exist');\n\n    // Open the details panel\n    cy.get('[data-cy=details-panel]').should('not.exist');\n    cy.get('[data-testid=InfoIcon]').click();\n    cy.get('[data-cy=details-panel]').should('be.visible');\n\n    // Check if root project details are displayed\n    cy.get('[data-cy=details-panel]').within(() => {\n      cy.contains('Type').should('be.visible');\n      cy.contains('Root Project').should('be.visible');\n      cy.contains('User').should('be.visible');\n      cy.contains('Created at').should('be.visible');\n      cy.contains('UUID').should('be.visible');\n\n      // Verify specific root project details\n      cy.contains(adminUser.user.uuid).should('be.visible');\n    });\n\n    // Verify that the Root Project icon is displayed\n    cy.get('[data-cy=details-panel]').find('[data-testid=InboxIcon]').should('be.visible');\n  });\n});\n\ndescribe('Collection details panel', () => {\n  let adminUser;\n\n  before(() => {\n    cy.getUser(\"active\", \"Active\", \"User\", true, true)\n      .as(\"activeUser\")\n      .then((user) => {\n        adminUser = user;\n      });\n  });\n\n  it('displays appropriate attributes when a collection is selected', () => {\n    cy.loginAs(adminUser);\n\n    // Create a test collection\n    const collectionName = `Test Collection ${Math.floor(Math.random() * 999999)}`;\n    cy.createCollection(adminUser.token, {\n      name: collectionName,\n      owner_uuid: adminUser.user.uuid,\n      manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo\\n\",\n    }).as('testCollection');\n\n    // Navigate to the project containing the collection\n    cy.get('@testCollection').then((collection) => {\n      cy.visit(`/projects/${adminUser.user.uuid}`);\n      cy.waitForDom();\n\n      // Wait for the data table to load\n      cy.doMPVTabSelect(\"Data\");\n      cy.get('[data-cy=data-table]').should('be.visible');\n\n      // Find and check the checkbox for the test collection\n      cy.contains('[data-cy=data-table-row]', collectionName)\n        .find('input[type=\"checkbox\"]')\n        .click();\n\n      // Open the details panel\n      cy.get('[data-cy=details-panel]').should('not.exist');\n      cy.get('[data-testid=InfoIcon]').click();\n      cy.get('[data-cy=details-panel]').should('be.visible');\n\n      // Check if appropriate attributes are displayed\n      cy.get('[data-cy=details-panel]').within(() => {\n        cy.contains('Collection UUID').should('be.visible');\n        cy.contains('Portable data hash').should('be.visible');\n        cy.contains('Owner').should('be.visible');\n        cy.contains('Created at').should('be.visible');\n        cy.contains('Last modified').should('be.visible');\n        cy.contains('Content size').should('be.visible');\n        cy.contains('Number of files').should('be.visible');\n        cy.contains('Properties').should('be.visible');\n      });\n\n      // Verify specific collection details\n      cy.get('[data-cy=details-panel]').within(() => {\n        cy.contains(collection.uuid).should('be.visible');\n        cy.contains(collection.portable_data_hash).should('be.visible');\n        cy.contains(adminUser.user.uuid).should('be.visible');\n        cy.contains('1').should('be.visible'); // Number of files\n        cy.contains('3 B').should('be.visible'); // Content size\n      });\n    });\n  });\n\n  describe('Collection versioning', () => {\n    let adminUser;\n\n    before(() => {\n      cy.getUser(\"active\", \"Active\", \"User\", true, true)\n        .as(\"activeUser\")\n        .then((user) => {\n          adminUser = user;\n        });\n    });\n\n    it('creates a collection, edits it, and verifies version information', () => {\n      cy.loginAs(adminUser);\n\n      // Create a test collection\n      const collectionName = `Test Collection ${Math.floor(Math.random() * 999999)}`;\n      cy.createCollection(adminUser.token, {\n        name: collectionName,\n        owner_uuid: adminUser.user.uuid,\n        manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo\\n\",\n      }).as('testCollection');\n\n      cy.get('@testCollection').then((collection) => {\n        // Navigate to the project containing the collection\n        cy.visit(`/projects/${adminUser.user.uuid}`);\n        cy.waitForDom();\n\n        // Wait for the data table to load\n        cy.doMPVTabSelect(\"Data\");\n        cy.get('[data-cy=data-table]').should('be.visible');\n\n        // Edit the collection\n        cy.doDataExplorerContextAction(collectionName, \"Edit collection\");\n\n        // Change the name in the edit dialog\n        const newName = `${collectionName} (edited)`;\n        cy.get('[data-cy=form-dialog]').within(() => {\n          cy.get('input[name=name]').clear().type(newName);\n          cy.get('[data-cy=form-submit-btn]').click();\n        });\n\n        // Wait for the update to complete\n        cy.assertDataExplorerContains(newName);\n\n        // Open the collection\n        cy.doDataExplorerNavigate(newName);\n\n        // Navigate to overview\n        cy.doMPVTabSelect(\"Overview\");\n\n        // Verify that the version number has increased\n        cy.get('[data-cy=collection-version-number]').should('contain', '2');\n\n        // Click on the version number to open the details panel\n        cy.get('[data-cy=collection-version-number]').click();\n\n        // Verify that the details panel is open and the \"Versions\" tab is selected\n        cy.get('[data-cy=details-panel]').should('be.visible');\n        cy.get('[data-cy=details-panel-tab-Versions]').should('have.attr', 'aria-selected', 'true');\n\n        // Verify that the version number is visible in the details panel\n        cy.get('[data-cy=collection-version-browser]').within(() => {\n          cy.get('[data-cy=collection-version-browser-select-2]').should('be.visible');\n          cy.get('[data-cy=collection-version-browser-select-2]').should('have.class', 'Mui-selected');\n        });\n      });\n    });\n  });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/external-credentials.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport moment from 'moment';\n\ndescribe('External Credentials panel tests', function () {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Set up common users\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser')\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    beforeEach(() => {\n        cy.loginAs(adminUser);\n        cy.visit('/external_credentials');\n    });\n\n    it('displays empty state correctly', () => {\n        cy.get('[data-cy=new-credential-button]').should('be.visible');\n        cy.contains('External credentials list empty.').should('be.visible');\n    });\n\n    it('shows all expected columns', () => {\n        const expectedColumns = ['Name', 'Description', 'Credential class', 'External ID', 'Expires at', 'Scopes'];\n\n        expectedColumns.forEach((column) => {\n            cy.get('thead').contains(column).should('be.visible');\n        });\n    });\n\n    it('displays credential details correctly', () => {\n        const expirationDate = moment().add(1, 'year');\n        cy.createExternalCredential(adminUser.token, { expires_at: expirationDate }).then((credential) => {\n            cy.reload();\n\n            cy.contains(credential.name).should('be.visible');\n            cy.contains(credential.description).should('be.visible');\n            cy.contains(credential.credential_class).should('be.visible');\n            cy.contains(credential.external_id).should('be.visible');\n            cy.contains(expirationDate.format('M/D/YYYY')).should('be.visible');\n            cy.get('[data-cy=expired-badge]').should('not.exist');\n            cy.get('[data-cy=expiring-badge]').should('not.exist');\n            cy.contains(credential.scopes[0]).should('be.visible');\n            cy.contains(credential.scopes[1]).should('be.visible');\n        });\n    });\n\n    it('opens context menu on right click', () => {\n        cy.createExternalCredential(adminUser.token, { name: 'Context Menu Test', expires_at: moment().add(1, 'year') }).then((credential) => {\n            cy.reload();\n\n            cy.contains(credential.name).rightclick();\n            cy.get('[data-cy=context-menu]').should('be.visible');\n        });\n    });\n\n    it('displays expired and expiring badges correctly', () => {\n        const expiringDate = moment().add(1, 'month');\n        const expiredDate = moment().subtract(1, 'month');\n\n        cy.createExternalCredential(adminUser.token, { name: 'Expiring Test Credential', expires_at: expiringDate }).then((expiringCredential) => {\n            cy.createExternalCredential(adminUser.token, { name: 'Expired Test Credential', expires_at: expiredDate }).then((expiredCredential) => {\n                cy.reload();\n\n                cy.contains(expiringCredential.name).should('be.visible');\n                cy.get('[data-cy=expiring-badge]').should('be.visible');\n\n                cy.contains(expiredCredential.name).should('be.visible');\n                cy.get('[data-cy=expired-badge]').should('be.visible');\n            });\n        });\n    });\n\n    it('creates new credential with add button', () => {\n        const newCredentialName = `Test Credential ${Math.floor(Math.random() * 999999)}`;\n        cy.get('[data-cy=new-credential-button]').click();\n        cy.get('[data-cy=form-dialog]').should('be.visible').and('contain', 'New External Credential');\n        cy.get('[data-cy=form-submit-btn]').should('be.disabled');\n\n        // verify default values\n        cy.get('input[name=credentialClass]').should('have.value', 'arv:aws_access_key');\n        cy.get('[data-cy=date-picker-input]').should('have.value', moment().add(1, 'year').format('MM/DD/YYYY'));\n\n        cy.get('input[name=name]').type(newCredentialName);\n        cy.get('div[role=textbox]').type('Test Description');\n        cy.get('input[name=externalId]').type('Test External ID');\n        cy.get('input[name=string-array-input]').type('scope1{enter}');\n        cy.get('input[name=string-array-input]').type('scope2{enter}');\n        cy.get('input[name=secret]').type('test-secret');\n        cy.get('[data-cy=form-submit-btn]').should('not.be.disabled');\n\n        // modify default values\n        cy.get('input[name=credentialClass]').clear().type('foo');\n        cy.get('[data-cy=date-picker-input]').type('12/25/2099');\n\n        cy.get('[data-cy=form-submit-btn]').should('not.be.disabled');\n        cy.get('[data-cy=form-submit-btn]').click();\n\n        cy.contains(newCredentialName).should('be.visible');\n        cy.contains('Test Description').should('be.visible');\n        cy.contains('foo').should('be.visible');\n        cy.contains('Test External ID').should('be.visible');\n        cy.contains('scope1').should('be.visible');\n        cy.contains('scope2').should('be.visible');\n        cy.contains('12/25/2099').should('be.visible');\n\n        // remove credential\n        cy.contains(newCredentialName).rightclick();\n        cy.get('[data-cy=context-menu]').contains('Remove').click();\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n        cy.get('[data-cy=form-dialog]').should('not.exist');\n    });\n\n    it('edits an existing credential', () => {\n        const newCredentialName = `Test Credential ${Math.floor(Math.random() * 999999)}`;\n        const editCredentialName = `Edited Test Credential ${Math.floor(Math.random() * 999999)}`;\n        cy.createExternalCredential(adminUser.token, { name: newCredentialName, expires_at: moment().add(1, 'year') }).then((credential) => {\n            cy.reload();\n            cy.contains(credential.name).rightclick();\n            cy.get('[data-cy=context-menu]').contains('Edit').click();\n            cy.get('[data-cy=form-dialog]').should('be.visible').and('contain', 'Edit External Credential');\n\n            cy.get('input[name=name]').clear().type(editCredentialName);\n            cy.get('div[role=textbox]').clear().type('Edited Description');\n            cy.get('input[name=credentialClass]').clear().type('Edited Credential Class');\n            cy.get('input[name=externalId]').clear().type('Edited External ID');\n            cy.get('[data-cy=date-picker-input]').type('01/01/2100');\n            cy.get('input[name=secret]').should('have.value', '');\n            cy.get('input[name=string-array-input]').type('new scope{enter}');\n            //remove the first scope\n            cy.get('svg[data-testid=\"CancelIcon\"]').eq(0).click();\n\n            cy.get('[data-cy=form-submit-btn]').click();\n\n            cy.get('[data-cy=data-table]').contains(editCredentialName).should('be.visible');\n            cy.contains('Edited Description').should('be.visible');\n            cy.contains('Edited Credential Class').should('be.visible');\n            cy.contains('Edited External ID').should('be.visible');\n            cy.contains('new scope').should('be.visible');\n            cy.get('td[data-cy=6]').contains(`${credential.scopes[1]}, new scope`).should('be.visible');\n            cy.contains('1/1/2100').should('be.visible');\n\n            // remove credential\n            cy.contains(editCredentialName).rightclick();\n            cy.get('[data-cy=context-menu]').contains('Remove').click();\n            cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n            cy.get('[data-cy=form-dialog]').should('not.exist');\n        });\n    });\n});"
  },
  {
    "path": "services/workbench2/cypress/e2e/favorites.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nconst kebabCase = require('lodash/kebabCase');\n\ndescribe('Favorites tests', function () {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('collectionuser1', 'Collection', 'User', false, true)\n            .as('activeUser').then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it('creates and removes a public favorite', function () {\n        cy.loginAs(adminUser);\n\n        cy.createGroup(adminUser.token, {\n            name: `my-favorite-project`,\n            group_class: 'project',\n        }).as('myFavoriteProject')\n        cy.get('@myFavoriteProject').then(function (myFavoriteProject) {\n            cy.contains('Refresh').click();\n            cy.doSidePanelNavigation('Home Projects');\n            cy.doMPVTabSelect(\"Data\");\n            cy.get('main').contains(myFavoriteProject.name).rightclick();\n            cy.contains('Add to public favorites').click();\n            cy.contains('Public Favorites').click();\n            cy.get('main').contains(myFavoriteProject.name).rightclick();\n            cy.contains('Remove from public favorites').click();\n            cy.get('main').contains(myFavoriteProject.name).should('not.exist');\n            cy.trashGroup(adminUser.token, this.myFavoriteProject.uuid);\n        });\n    });\n\n    // Disabled while addressing #18587\n    it.skip('can copy selected into the collection', () => {\n        cy.createCollection(adminUser.token, {\n            name: `Test source collection ${Math.floor(Math.random() * 999999)}`,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n        }).as('testSourceCollection').then(function (testSourceCollection) {\n            cy.shareWith(adminUser.token, activeUser.user.uuid, testSourceCollection.uuid, 'can_read');\n        });\n        cy.createCollection(adminUser.token, {\n            name: `Test target collection ${Math.floor(Math.random() * 999999)}`,\n        }).as('testTargetCollection').then(function (testTargetCollection) {\n            cy.shareWith(adminUser.token, activeUser.user.uuid, testTargetCollection.uuid, 'can_write');\n            cy.addToFavorites(activeUser.token, activeUser.user.uuid, testTargetCollection.uuid);\n        });\n\n        cy.getAll('@testSourceCollection', '@testTargetCollection')\n            .then(function ([testSourceCollection, testTargetCollection]) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/collections/${testSourceCollection.uuid}`);\n                cy.get('[data-cy=collection-files-panel]').contains('bar');\n                cy.get('[data-cy=collection-files-panel]').find('input[type=checkbox]').click();\n                cy.get('[data-cy=collection-files-panel-options-btn]').click();\n                cy.get('[data-cy=context-menu]')\n                    .contains('Copy selected into the collection').click();\n                cy.get('[data-cy=projects-tree-favourites-tree-picker]')\n                    .find('i')\n                    .click();\n                cy.get('[data-cy=projects-tree-favourites-tree-picker]')\n                    .contains(testTargetCollection.name)\n                    .click();\n                cy.get('[data-cy=form-submit-btn]').click();\n                cy.get('.layout-pane-primary').contains('Projects').click();\n                cy.goToPath(`/collections/${testTargetCollection.uuid}`);\n                cy.get('[data-cy=collection-files-panel]').contains('bar');\n            });\n    });\n\n    it('can copy collection to favorites', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            targetUser: activeUser,\n            projectName: 'mySharedWritableProject',\n            canWrite: true,\n            addToFavorites: true\n        });\n        cy.createProject({\n            owningUser: adminUser,\n            targetUser: activeUser,\n            projectName: 'mySharedReadonlyProject',\n            canWrite: false,\n            addToFavorites: true\n        });\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: 'myProject1',\n            addToFavorites: true\n        });\n\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n        })\n            .as('testCollection');\n\n        cy.getAll('@mySharedWritableProject', '@mySharedReadonlyProject', '@myProject1', '@testCollection')\n            .then(function ([mySharedWritableProject, mySharedReadonlyProject, myProject1, testCollection]) {\n                cy.loginAs(activeUser);\n                cy.doSidePanelNavigation('Home Projects');\n                cy.doMPVTabSelect(\"Data\");\n\n                cy.contains(testCollection.name).rightclick();\n                cy.get('[data-cy=\"Move to\"]').click();\n                cy.waitForDom();\n\n                cy.get('[data-cy=form-dialog]').within(function () {\n                    // must use .then to avoid selecting instead of expanding https://github.com/cypress-io/cypress/issues/5529\n                    cy.get('[data-cy=projects-tree-favourites-tree-picker]')\n                        .find('i')\n                        .then(el => el.click());\n                    cy.contains(myProject1.name);\n                    cy.contains(mySharedWritableProject.name);\n                    cy.get('[data-cy=projects-tree-favourites-tree-picker]')\n                        .should('not.contain', mySharedReadonlyProject.name);\n                    cy.contains(mySharedWritableProject.name).click();\n                    cy.get('[data-cy=form-submit-btn]').click();\n                });\n\n                cy.visit(`/projects/${mySharedWritableProject.uuid}`).then(() => {\n                    cy.doMPVTabSelect(\"Data\");\n                    cy.get('main').contains(testCollection.name);\n                });\n            });\n    });\n\n    it('can edit project and collections in favorites', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'mySharedWritableProject',\n            canWrite: true,\n            addToFavorites: true\n        });\n\n        cy.createCollection(adminUser.token, {\n            owner_uuid: adminUser.user.uuid,\n            name: `Test target collection ${Math.floor(Math.random() * 999999)}`,\n        }).as('testTargetCollection').then(function (testTargetCollection) {\n            cy.addToFavorites(adminUser.token, adminUser.user.uuid, testTargetCollection.uuid);\n        });\n\n        cy.getAll('@mySharedWritableProject', '@testTargetCollection')\n            .then(function ([mySharedWritableProject, testTargetCollection]) {\n                cy.loginAs(adminUser);\n\n                cy.get('[data-cy=side-panel-tree]').contains('My Favorites').click();\n\n                const newProjectName = `New project name ${mySharedWritableProject.name}`;\n                const newProjectDescription = `New project description ${mySharedWritableProject.name}`;\n                const newCollectionName = `New collection name ${testTargetCollection.name}`;\n                const newCollectionDescription = `New collection description ${testTargetCollection.name}`;\n\n                cy.testEditProjectOrCollection('main', mySharedWritableProject.name, newProjectName, newProjectDescription);\n                cy.testEditProjectOrCollection('main', testTargetCollection.name, newCollectionName, newCollectionDescription, false);\n\n                cy.get('[data-cy=side-panel-tree]').contains('Projects').click();\n\n                cy.get('main').contains(newProjectName).rightclick();\n                cy.contains('Add to public favorites').click();\n                cy.get('main').contains(newCollectionName).rightclick();\n                cy.contains('Add to public favorites').click();\n\n                cy.get('[data-cy=side-panel-tree]').contains('Public Favorites').click();\n\n                cy.testEditProjectOrCollection('main', newProjectName, mySharedWritableProject.name, 'newProjectDescription');\n                cy.testEditProjectOrCollection('main', newCollectionName, testTargetCollection.name, 'newCollectionDescription', false);\n            });\n    });\n\n    it('can view favorites in workflow', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            targetUser: activeUser,\n            projectName: 'mySharedWritableProject',\n            canWrite: true,\n            addToFavorites: true\n        });\n        cy.createProject({\n            owningUser: adminUser,\n            targetUser: activeUser,\n            projectName: 'mySharedReadonlyProject',\n            canWrite: false,\n            addToFavorites: true\n        });\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: 'myProject1',\n            addToFavorites: true\n        });\n\n        cy.getAll('@mySharedWritableProject', '@mySharedReadonlyProject', '@myProject1')\n            .then(function ([mySharedWritableProject, mySharedReadonlyProject, myProject1]) {\n                cy.createWorkflow(adminUser.token, {\n                    name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n                    definition: \"{\\n    \\\"$graph\\\": [\\n        {\\n            \\\"class\\\": \\\"Workflow\\\",\\n            \\\"doc\\\": \\\"Reverse the lines in a document, then sort those lines.\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"acrContainerImage\\\": \\\"99b0201f4cade456b4c9d343769a3b70+261\\\",\\n                    \\\"class\\\": \\\"http://arvados.org/cwl#WorkflowRunnerResources\\\"\\n                }\\n            ],\\n            \\\"id\\\": \\\"#main\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"default\\\": null,\\n                    \\\"doc\\\": \\\"The input file to be processed.\\\",\\n                    \\\"id\\\": \\\"#main/input\\\",\\n                    \\\"type\\\": \\\"File\\\"\\n                },\\n                {\\n                    \\\"default\\\": true,\\n                    \\\"doc\\\": \\\"If true, reverse (decending) sort\\\",\\n                    \\\"id\\\": \\\"#main/reverse_sort\\\",\\n                    \\\"type\\\": \\\"boolean\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"doc\\\": \\\"The output with the lines reversed and sorted.\\\",\\n                    \\\"id\\\": \\\"#main/output\\\",\\n                    \\\"outputSource\\\": \\\"#main/sorted/output\\\",\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"steps\\\": [\\n                {\\n                    \\\"id\\\": \\\"#main/rev\\\",\\n                    \\\"in\\\": [\\n                        {\\n                            \\\"id\\\": \\\"#main/rev/input\\\",\\n                            \\\"source\\\": \\\"#main/input\\\"\\n                        }\\n                    ],\\n                    \\\"out\\\": [\\n                        \\\"#main/rev/output\\\"\\n                    ],\\n                    \\\"run\\\": \\\"#revtool.cwl\\\"\\n                },\\n                {\\n                    \\\"id\\\": \\\"#main/sorted\\\",\\n                    \\\"in\\\": [\\n                        {\\n                            \\\"id\\\": \\\"#main/sorted/input\\\",\\n                            \\\"source\\\": \\\"#main/rev/output\\\"\\n                        },\\n                        {\\n                            \\\"id\\\": \\\"#main/sorted/reverse\\\",\\n                            \\\"source\\\": \\\"#main/reverse_sort\\\"\\n                        }\\n                    ],\\n                    \\\"out\\\": [\\n                        \\\"#main/sorted/output\\\"\\n                    ],\\n                    \\\"run\\\": \\\"#sorttool.cwl\\\"\\n                }\\n            ]\\n        },\\n        {\\n            \\\"baseCommand\\\": \\\"rev\\\",\\n            \\\"class\\\": \\\"CommandLineTool\\\",\\n            \\\"doc\\\": \\\"Reverse each line using the `rev` command\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"class\\\": \\\"ResourceRequirement\\\",\\n                    \\\"ramMin\\\": 8\\n                }\\n            ],\\n            \\\"id\\\": \\\"#revtool.cwl\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#revtool.cwl/input\\\",\\n                    \\\"inputBinding\\\": {},\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#revtool.cwl/output\\\",\\n                    \\\"outputBinding\\\": {\\n                        \\\"glob\\\": \\\"output.txt\\\"\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"stdout\\\": \\\"output.txt\\\"\\n        },\\n        {\\n            \\\"baseCommand\\\": \\\"sort\\\",\\n            \\\"class\\\": \\\"CommandLineTool\\\",\\n            \\\"doc\\\": \\\"Sort lines using the `sort` command\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"class\\\": \\\"ResourceRequirement\\\",\\n                    \\\"ramMin\\\": 8\\n                }\\n            ],\\n            \\\"id\\\": \\\"#sorttool.cwl\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/reverse\\\",\\n                    \\\"inputBinding\\\": {\\n                        \\\"position\\\": 1,\\n                        \\\"prefix\\\": \\\"-r\\\"\\n                    },\\n                    \\\"type\\\": \\\"boolean\\\"\\n                },\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/input\\\",\\n                    \\\"inputBinding\\\": {\\n                        \\\"position\\\": 2\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/output\\\",\\n                    \\\"outputBinding\\\": {\\n                        \\\"glob\\\": \\\"output.txt\\\"\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"stdout\\\": \\\"output.txt\\\"\\n        }\\n    ],\\n    \\\"cwlVersion\\\": \\\"v1.0\\\"\\n}\",\n                    owner_uuid: myProject1.uuid,\n                })\n                    .as('testWorkflow');\n\n                cy.createWorkflow(adminUser.token, {\n                    name: `TestWorkflow2-${Math.floor(Math.random() * 999999)}.cwl`,\n                    definition: \"{     \\\"$graph\\\": [         {             \\\"$namespaces\\\": {                 \\\"arv\\\": \\\"http://arvados.org/cwl#\\\"             },             \\\"class\\\": \\\"Workflow\\\",             \\\"doc\\\": \\\"Detect blurriness of WSI data.\\\",             \\\"id\\\": \\\"#main\\\",             \\\"inputs\\\": [                 {                     \\\"default\\\": {                         \\\"basename\\\": \\\"3d3cb547725e72ddb442bc620adbc342+2463\\\",                         \\\"class\\\": \\\"Directory\\\",                         \\\"location\\\": \\\"keep:3d3cb547725e72ddb442bc620adbc342+2463\\\"                     },                     \\\"doc\\\": \\\"Collection containing all pipeline input images\\\",                     \\\"id\\\": \\\"#main/image_collection\\\",                     \\\"type\\\": \\\"Directory\\\"                 }             ],             \\\"outputs\\\": [                 {                     \\\"id\\\": \\\"#main/blur_report\\\",                     \\\"outputSource\\\": \\\"#main/blurdetection/report\\\",                     \\\"type\\\": \\\"Any\\\"                 }             ],             \\\"steps\\\": [                 {                     \\\"id\\\": \\\"#main/blurdetection\\\",                     \\\"in\\\": [                         {                             \\\"id\\\": \\\"#main/blurdetection/image_collection\\\",                             \\\"source\\\": \\\"#main/image_collection\\\"                         }                     ],                     \\\"out\\\": [                         \\\"#main/blurdetection/report\\\"                     ],                     \\\"run\\\": \\\"#blurdetection.cwl\\\"                 }             ]         },         {             \\\"arguments\\\": [                 \\\"--num_workers\\\",                 \\\"0\\\",                 \\\"--wsi_dir\\\",                 \\\"$(inputs.image_collection)\\\",                 \\\"--tile_out_dir\\\",                 \\\"$(runtime.outdir)\\\"             ],             \\\"baseCommand\\\": [                 \\\"python3\\\",                 \\\"/updated_blur_on_folder.py\\\"             ],             \\\"class\\\": \\\"CommandLineTool\\\",             \\\"hints\\\": [                 {                     \\\"class\\\": \\\"DockerRequirement\\\",                     \\\"dockerPull\\\": \\\"updated_score_aws:cpu2\\\",                     \\\"http://arvados.org/cwl#dockerCollectionPDH\\\": \\\"0d6702518d1408ce2c471ffec40695cf+4924\\\"                 },                 {                     \\\"class\\\": \\\"ResourceRequirement\\\",                     \\\"coresMin\\\": 8,                     \\\"ramMin\\\": 20000                 },                 {                     \\\"class\\\": \\\"http://arvados.org/cwl#RuntimeConstraints\\\",                     \\\"keep_cache\\\": 2000                 }             ],             \\\"id\\\": \\\"#blurdetection.cwl\\\",             \\\"inputs\\\": [                 {                     \\\"doc\\\": \\\"Collection containing all pipeline input images\\\",                     \\\"id\\\": \\\"#blurdetection.cwl/image_collection\\\",                     \\\"type\\\": \\\"Directory\\\"                 }             ],             \\\"outputs\\\": [                 {                     \\\"id\\\": \\\"#blurdetection.cwl/report\\\",                     \\\"outputBinding\\\": {                         \\\"glob\\\": \\\"*.csv\\\"                     },                     \\\"type\\\": \\\"Any\\\"                 }             ]         }     ],     \\\"cwlVersion\\\": \\\"v1.0\\\" }\",\n                    owner_uuid: myProject1.uuid,\n                })\n                    .as('testWorkflow2');\n\n                cy.loginAs(activeUser);\n                cy.doSidePanelNavigation('Home Projects');\n                cy.doMPVTabSelect(\"Data\");\n\n                cy.get('main').contains(myProject1.name).click();\n\n                cy.get('[data-cy=side-panel-button]').click();\n\n                cy.get('#aside-menu-list').contains('Run a workflow').click();\n\n                cy.get('@testWorkflow')\n                    .then((testWorkflow) => {\n                        cy.get('main').contains(testWorkflow.name).click();\n                        cy.get('[data-cy=run-process-next-button]').click();\n                        cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n                        cy.get('[data-cy=new-process-panel]')\n                            .within(() => {\n                                cy.contains('input').next().click();\n                            });\n                        cy.get('[data-cy=choose-a-file-dialog]').as('chooseFileDialog');\n                        cy.get('[data-cy=projects-tree-favourites-tree-picker]').contains('Favorites').closest('ul').find('i').click();\n                        cy.get('@chooseFileDialog').find(`[data-id=${mySharedWritableProject.uuid}]`);\n                        cy.get('@chooseFileDialog').find(`[data-id=${mySharedReadonlyProject.uuid}]`);\n                        cy.get('button').contains('Cancel').click();\n                    });\n\n                cy.get('button').contains('Back').click();\n\n                cy.get('@testWorkflow2')\n                    .then((testWorkflow2) => {\n                        cy.get('main').contains(testWorkflow2.name).click();\n                        cy.get('button').contains('Change Workflow').click();\n                        cy.get('[data-cy=run-process-next-button]').click();\n                        cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n                        cy.get('[data-cy=new-process-panel]')\n                            .within(() => {\n                                cy.contains('image_collection').next().click();\n                            });\n                        cy.get('[data-cy=choose-a-directory-dialog]').as('chooseDirectoryDialog');\n                        cy.get('[data-cy=projects-tree-favourites-tree-picker]').contains('Favorites').closest('ul').find('i').click();\n                        cy.get('@chooseDirectoryDialog').find(`[data-id=${mySharedWritableProject.uuid}]`);\n                        cy.get('@chooseDirectoryDialog').find(`[data-id=${mySharedReadonlyProject.uuid}]`);\n                    });\n            });\n    });\n});\n\ndescribe('Favorites-SidePanel tests', function () {\n        let activeUser;\n        let adminUser;\n\n        before(function () {\n            // Only set up common users once. These aren't set up as aliases because\n            // aliases are cleaned up after every test. Also it doesn't make sense\n            // to set the same users on beforeEach() over and over again, so we\n            // separate a little from Cypress' 'Best Practices' here.\n            cy.getUser('admin', 'Admin', 'User', true, true)\n                .as('adminUser').then(function () {\n                    adminUser = this.adminUser;\n                });\n            cy.getUser('collectionuser1', 'Collection', 'User', false, true)\n                .as('activeUser').then(function () {\n                    activeUser = this.activeUser;\n                });\n        });\n\n    it('shows the correct favorites and public favorites in the side panel', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: `myFavoriteProject1`,\n        });\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: `myFavoriteProject2`,\n        });\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: `myPublicFavoriteProject1`,\n        });\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: `myPublicFavoriteProject2`,\n        });\n        cy.createCollection(adminUser.token, {\n            owner_uuid: adminUser.user.uuid,\n            name: `Test favorite collection ${Math.floor(Math.random() * 999999)}`,\n        }).as('testFavoriteCollection');\n\n        cy.getAll('@myFavoriteProject1', '@myFavoriteProject2', '@myPublicFavoriteProject1', '@myPublicFavoriteProject2')\n        .then(function ([myFavoriteProject1, myFavoriteProject2, myPublicFavoriteProject1, myPublicFavoriteProject2, ]) {\n                cy.loginAs(adminUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                //add two projects and collection to favorites\n                cy.get('[data-cy=side-panel-tree]').contains(myFavoriteProject1.name).rightclick();\n                cy.contains('Add to favorites').click();\n                cy.get('[data-cy=side-panel-tree]').contains(myFavoriteProject2.name).rightclick();\n                cy.contains('Add to favorites').click();\n\n                cy.doMPVTabSelect(\"Data\");\n\n                //add two projects to public favorites\n                cy.get('[data-cy=data-table]').contains(myPublicFavoriteProject1.name).rightclick();\n                cy.contains('Add to public favorites').click();\n                cy.get('[data-cy=data-table]').contains(myPublicFavoriteProject2.name).rightclick();\n                cy.contains('Add to public favorites').click();\n\n                //close \"Home Projects\", which is open by default\n                cy.get(`[data-cy=tree-item-toggle-${kebabCase(adminUser.user.uuid)}]`).click();\n\n                //check if the correct favorites are displayed in the side panel\n                cy.get('span').contains(myFavoriteProject1.name).should('not.exist');\n                cy.get('span').contains(myFavoriteProject2.name).should('not.exist');\n                cy.get(`[data-cy=tree-item-toggle-my-favorites]`).should('exist').click({ force: true });\n                cy.get('span').contains(myFavoriteProject1.name).should('exist');\n                cy.get('span').contains(myFavoriteProject2.name).should('exist');\n                cy.get(`[data-cy=tree-item-toggle-my-favorites]`).click();\n\n                //check if the correct public favorites are displayed in the side panel\n                cy.get('span').contains(myPublicFavoriteProject1.name).should('not.exist');\n                cy.get('span').contains(myPublicFavoriteProject2.name).should('not.exist');\n                cy.get(`[data-cy=tree-item-toggle-public-favorites]`).click();\n                cy.get('span').contains(myPublicFavoriteProject1.name).should('exist');\n                cy.get('span').contains(myPublicFavoriteProject2.name).should('exist');\n                cy.get(`[data-cy=tree-item-toggle-public-favorites]`).click();\n\n                //double check both sets\n                cy.get('span').contains(myFavoriteProject1.name).should('not.exist');\n                cy.get('span').contains(myFavoriteProject2.name).should('not.exist');\n                cy.get(`[data-cy=tree-item-toggle-my-favorites]`).click();\n                cy.get('span').contains(myFavoriteProject1.name).should('exist');\n                cy.get('span').contains(myFavoriteProject2.name).should('exist');\n                cy.get(`[data-cy=tree-item-toggle-my-favorites]`).click();\n\n                cy.get('span').contains(myPublicFavoriteProject1.name).should('not.exist');\n                cy.get('span').contains(myPublicFavoriteProject2.name).should('not.exist');\n                cy.get(`[data-cy=tree-item-toggle-public-favorites]`).click();\n                cy.get('span').contains(myPublicFavoriteProject1.name).should('exist');\n                cy.get('span').contains(myPublicFavoriteProject2.name).should('exist');\n                cy.get(`[data-cy=tree-item-toggle-public-favorites]`).click();\n\n                // Keep favorites open\n                cy.get(`[data-cy=tree-item-toggle-my-favorites]`).click();\n\n                // Trash favorited project\n                cy.get('[data-cy=data-table]').contains(myFavoriteProject1.name).rightclick();\n                cy.get('[data-cy=context-menu]').contains('Move to trash').click();\n                cy.waitForDom();\n                // Check removed from favorites\n                cy.get('[data-cy=tree-item-toggle-my-favorites]').parents('[data-cy=tree-top-level-item]').should('not.contain', myFavoriteProject1.name);\n                // Untrash favorited project\n                cy.get('[data-cy=side-panel-tree]').contains('Trash').click();\n                cy.get('[data-cy=data-table]').contains(myFavoriteProject1.name).rightclick();\n                cy.get('[data-cy=context-menu]').contains('Restore').click();\n                //navigates to restored project\n                cy.assertDetailsCardTitle(myFavoriteProject1.name);\n                cy.assertBreadcrumbs([\"Home Projects\", myFavoriteProject1.name]);\n                // Check project restored to favorites\n                cy.wait(1000);\n                cy.get('[data-cy=tree-item-toggle-my-favorites]').parents('[data-cy=tree-top-level-item]').should('contain', myFavoriteProject1.name);\n        });\n\n        cy.getAll('@testFavoriteCollection')\n            .then(function ([testFavoriteCollection]) {\n                cy.loginAs(adminUser);\n                cy.get('[data-cy=side-panel-tree]').contains('Home Projects').click().waitForDom();\n                cy.doMPVTabSelect(\"Data\");\n                cy.get('[data-cy=data-table]').contains(testFavoriteCollection.name).rightclick();\n                cy.get('[data-cy=context-menu]').contains('Add to favorites').click();\n                cy.waitForDom()\n                cy.get('[data-cy=data-table]').contains(testFavoriteCollection.name).rightclick();\n                cy.get('[data-cy=context-menu]').contains('Move to trash').click();\n                // Check removed from favorites\n                cy.get('[data-cy=tree-item-toggle-my-favorites]').click({ force: true })\n                cy.wait(1000);\n                cy.get('[data-cy=side-panel-tree]').should('not.contain', testFavoriteCollection.name);\n                // Untrash favorited collection\n                cy.get('[data-cy=side-panel-tree]').contains('Trash').click();\n                // collection might not be on first page\n                cy.get('[data-cy=search-input]').type(testFavoriteCollection.name);\n                cy.waitForDom();\n                cy.get('[data-cy=data-table]').contains(testFavoriteCollection.name).rightclick();\n                cy.get('[data-cy=context-menu]').contains('Restore').click();\n                cy.get('[data-cy=data-table]').should('exist', { timeout: 10000 })\n                cy.assertDataExplorerContains(testFavoriteCollection.name, false);\n                // Check collection restored to favorites\n                cy.wait(1000);\n                cy.get('[data-cy=tree-item-toggle-my-favorites]').parents('[data-cy=tree-top-level-item]').should('contain', testFavoriteCollection.name);\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/group-manage.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('Group manage tests', function() {\n    let activeUser;\n    let adminUser;\n    let otherUser;\n    let userThree;\n    const groupName = `Test group (${Math.floor(999999 * Math.random())})`;\n\n    before(function() {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function() {\n                adminUser = this.adminUser;\n            }\n        );\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser').then(function() {\n                activeUser = this.activeUser;\n            }\n        );\n        cy.getUser('otheruser', 'Other', 'User', false, true)\n            .as('otherUser').then(function() {\n                otherUser = this.otherUser;\n            }\n        );\n        cy.getUser('userThree', 'User', 'Three', false, true)\n            .as('userThree').then(function() {\n                userThree = this.userThree;\n            }\n        );\n    });\n\n    it('creates a new group, add users to it and changes permission level', function() {\n        cy.loginAs(activeUser);\n\n        // Navigate to Groups\n        cy.get('[data-cy=side-panel-tree]').contains('Groups').click();\n\n        // Create new group\n        cy.get('[data-cy=groups-panel-new-group]').click();\n        cy.get('[data-cy=form-dialog]')\n            .should('contain', 'New Group')\n            .within(() => {\n                cy.get('input[name=name]').type(groupName);\n                cy.get('[data-cy=users-field] input').type(\"three\");\n            });\n        cy.get('[role=tooltip]').click();\n        cy.get('[data-cy=form-dialog]').within(() => {\n            cy.get('[data-cy=form-submit-btn]').click();\n        })\n\n        // Check that the group was created\n        cy.get('[data-cy=groups-panel-data-explorer]').contains(groupName).click();\n        cy.get('[data-cy=group-members-data-explorer]').contains(activeUser.user.full_name);\n        cy.get('[data-cy=group-members-data-explorer]').contains(userThree.user.full_name);\n\n        // Add other user to the group\n        cy.get('[data-cy=group-member-add]').click();\n        cy.get('.sharing-dialog')\n            .should('contain', 'Sharing settings')\n            .within(() => {\n                cy.get('[data-cy=invite-people-field] input').type(\"other\");\n            });\n        cy.get('[data-cy=\"loading-spinner\"]').should('not.exist');\n        cy.get('[data-cy=\"users-tab-label\"]').click();\n        cy.get('[data-cy=sharing-suggestion]').click();\n        // Add admin to the group\n        cy.get('.sharing-dialog')\n            .should('contain', 'Sharing settings')\n            .within(() => {\n                cy.get('[data-cy=invite-people-field] input').clear().type(\"admin\");\n            });\n        cy.get('[data-cy=\"loading-spinner\"]').should('not.exist');\n        cy.wait(1000);\n        cy.get('[data-cy=\"users-tab-label\"]').click();\n        cy.get('[data-cy=sharing-suggestion]').click();\n        cy.get('.sharing-dialog').get('[data-cy=add-invited-people]').click();\n        cy.get('.sharing-dialog').contains('Close').click();\n\n        // Check that both users are present with appropriate permissions\n        cy.get('[data-cy=group-members-data-explorer]')\n            .contains(otherUser.user.full_name)\n            .parents('tr')\n            .within(() => {\n                cy.contains('Read');\n            });\n        cy.get('[data-cy=group-members-data-explorer] tr')\n            .contains(activeUser.user.full_name)\n            .parents('tr')\n            .within(() => {\n                cy.contains('Manage');\n            });\n\n        // Test change permission level\n        cy.get('[data-cy=group-members-data-explorer]')\n            .contains(otherUser.user.full_name)\n            .parents('tr')\n            .within(() => {\n                cy.contains('Read')\n                    .parents('td')\n                    .within(() => {\n                        cy.get('button').click();\n                    });\n            });\n        cy.get('[data-cy=context-menu]')\n            .contains('Write')\n            .click();\n        cy.get('[data-cy=group-members-data-explorer]')\n            .contains(otherUser.user.full_name)\n            .parents('tr')\n            .within(() => {\n                cy.contains('Write');\n            });\n\n        // Change admin to manage\n        cy.get('[data-cy=group-members-data-explorer]')\n            .contains(adminUser.user.full_name)\n            .parents('tr')\n            .within(() => {\n                cy.contains('Read')\n                    .parents('td')\n                    .within(() => {\n                        cy.get('button').click();\n                    });\n            });\n        cy.get('[data-cy=context-menu]')\n            .contains('Manage')\n            .click();\n        cy.get('[data-cy=group-members-data-explorer]')\n            .contains(adminUser.user.full_name)\n            .parents('tr')\n            .within(() => {\n                cy.contains('Manage');\n            });\n    });\n\n    it('can unhide and re-hide users', function() {\n        // Must use admin user to have manage permission on user\n        cy.loginAs(adminUser);\n        cy.get('[data-cy=side-panel-tree]').contains('Groups').click();\n        cy.get('[data-cy=groups-panel-data-explorer]').contains(groupName).click();\n\n        // Check that other user is hidden\n        cy.get('[data-cy=group-details-permissions-tab]').click();\n        cy.get('[data-cy=group-permissions-data-explorer]')\n            .should('not.contain', otherUser.user.full_name)\n        cy.get('[data-cy=group-details-members-tab]').click();\n\n        // Test unhide\n        cy.get('[data-cy=group-members-data-explorer]')\n            .contains(otherUser.user.full_name)\n            .parents('tr')\n            .within(() => {\n                cy.get('[data-cy=user-visible-checkbox]').click();\n            });\n        // Check that other user is visible\n        cy.get('[data-cy=group-details-permissions-tab]').click();\n        cy.get('[data-cy=group-permissions-data-explorer]')\n            .contains(otherUser.user.full_name)\n            .parents('tr')\n            .within(() => {\n                cy.contains('Read');\n            });\n        // Test re-hide\n        cy.get('[data-cy=group-details-members-tab]').click();\n        cy.get('[data-cy=group-members-data-explorer]')\n            .contains(otherUser.user.full_name)\n            .parents('tr')\n            .within(() => {\n                cy.get('[data-cy=user-visible-checkbox]').click();\n            });\n        // Check that other user is hidden\n        cy.get('[data-cy=group-details-permissions-tab]').click();\n        cy.get('[data-cy=group-permissions-data-explorer]')\n            .should('not.contain', otherUser.user.full_name)\n    });\n\n    it('displays resources shared with the group', function() {\n        // Switch to activeUser\n        cy.loginAs(activeUser);\n        cy.get('[data-cy=side-panel-tree]').contains('Groups').click();\n\n        // Get groupUuid and create shared project\n        cy.get('[data-cy=groups-panel-data-explorer]')\n            .contains(groupName)\n            .parents('tr')\n            .find('[data-cy=uuid]')\n            .invoke('text')\n            .as('groupUuid')\n            .then((groupUuid) => {\n                cy.createProject({\n                    owningUser: activeUser,\n                    projectName: 'test-project',\n                }).as('testProject').then((testProject) => {\n                    cy.shareWith(activeUser.token, groupUuid, testProject.uuid, 'can_read');\n                });\n            });\n\n        // Check that the project is listed in permissions\n        cy.get('[data-cy=groups-panel-data-explorer]').contains(groupName).click();\n        cy.get('[data-cy=group-details-permissions-tab]').click();\n        cy.get('[data-cy=group-permissions-data-explorer]')\n            .contains('test-project')\n            .parents('tr')\n            .within(() => {\n                cy.contains('Read');\n            });\n    });\n\n    it('removes users from the group', function() {\n        cy.loginAs(activeUser);\n\n        cy.get('[data-cy=side-panel-tree]').contains('Groups').click();\n        cy.get('[data-cy=groups-panel-data-explorer]').contains(groupName).click();\n\n        // Remove other user\n        cy.get('[data-cy=group-members-data-explorer]')\n            .contains(otherUser.user.full_name)\n            .parents('tr')\n            .within(() => {\n                cy.get('[data-cy=resource-delete-button]').click();\n            });\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n        cy.get('[data-cy=group-members-data-explorer]')\n            .should('not.contain', otherUser.user.full_name);\n\n        // Remove user three\n        cy.get('[data-cy=group-members-data-explorer]')\n            .contains(userThree.user.full_name)\n            .parents('tr')\n            .within(() => {\n                cy.get('[data-cy=resource-delete-button]').click();\n            });\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n        cy.get('[data-cy=group-members-data-explorer]')\n            .should('not.contain', userThree.user.full_name);\n    });\n\n    it('renames the group', function() {\n        cy.loginAs(adminUser);\n        // Navigate to Groups\n        cy.get('[data-cy=side-panel-tree]').contains('Groups').click();\n\n        // Open edit dialog\n        cy.get('[data-cy=groups-panel-data-explorer]')\n            .contains(groupName)\n            .rightclick();\n        cy.get('[data-cy=context-menu]')\n            .contains('Edit group')\n            .click();\n\n        // Rename the group\n        cy.get('[data-cy=form-dialog]')\n            .should('contain', 'Edit Group')\n            .within(() => {\n                cy.get('input[name=name]').clear().type(groupName + ' (renamed)');\n                cy.get('button').contains('Save').click();\n            });\n\n        // Check that the group was renamed\n        cy.get('[data-cy=groups-panel-data-explorer]')\n            .contains(groupName + ' (renamed)');\n    });\n\n    it('deletes the group', function() {\n        cy.loginAs(adminUser);\n\n        // Navigate to Groups\n        cy.get('[data-cy=side-panel-tree]').contains('Groups').click();\n\n        // Delete the group\n        cy.get('[data-cy=groups-panel-data-explorer]')\n            .contains(groupName + ' (renamed)')\n            .rightclick();\n        cy.get('[data-cy=context-menu]')\n            .contains('Remove')\n            .click();\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n\n        // Check that the group was deleted\n        cy.get('[data-cy=groups-panel-data-explorer]')\n            .should('not.contain', groupName + ' (renamed)');\n    });\n\n    it('disables group-related controls for built-in groups', function() {\n        cy.loginAs(adminUser);\n\n        ['All users', 'Anonymous users', 'System group'].forEach((builtInGroup) => {\n            cy.get('[data-cy=side-panel-tree]').contains('Groups').click();\n            cy.get('[data-cy=groups-panel-data-explorer]').contains(builtInGroup).click();\n\n            // Check group member actions\n            cy.get('[data-cy=group-members-data-explorer]')\n                .within(() => {\n                    cy.get('[data-cy=group-member-add]').should('not.exist');\n                    cy.get('[data-cy=user-visible-checkbox] input').should('be.disabled');\n                    cy.get('[data-cy=resource-delete-button]').should('be.disabled');\n                    cy.get('[data-cy=edit-permission-button]').should('not.exist');\n                });\n\n            // Check permissions actions\n            cy.get('[data-cy=group-details-permissions-tab]').click();\n            cy.get('[data-cy=group-permissions-data-explorer]').within(() => {\n                cy.get('[data-cy=resource-delete-button]').should('be.disabled');\n                cy.get('[data-cy=edit-permission-button]').should('not.exist');\n            });\n        });\n    });\n\n    it('adds properties to groups', () => {\n        const groupWithProperties = `Group with properties (${Math.floor(999999 * Math.random())})`;\n\n        cy.loginAs(activeUser);\n\n        // Navigate to Groups\n        cy.get('[data-cy=side-panel-tree]').contains('Groups').click();\n\n        // Create new group\n        cy.get('[data-cy=groups-panel-new-group]').click();\n        cy.get('[data-cy=form-dialog]')\n            .should('contain', 'New Group')\n            .within(() => {\n                cy.get('input[name=name]').type(groupWithProperties);\n                cy.get('[data-cy=users-field] input').type(\"three\");\n            });\n        // Click suggested user\n        cy.get('[role=tooltip]').click();\n\n        // Add properties\n        cy.get(\"[data-cy=resource-properties-form]\").within(() => {\n            cy.get(\"[data-cy=property-field-key]\").within(() => {\n                cy.get(\"input\").type(\"Color\");\n            });\n            cy.get(\"[data-cy=property-field-value]\").click().within(() => {\n                cy.get(\"input\").type(\"Magenta\");\n            });\n            // Submit new property\n            cy.get(\"[data-cy=property-add-btn]\").click();\n        });\n\n        // Submit new group\n        cy.get('[data-cy=form-dialog]').within(() => {\n            cy.get('[data-cy=form-submit-btn]').click();\n        });\n\n        // Open edit dialog\n        cy.get('[data-cy=groups-panel-data-explorer]')\n            .contains(groupWithProperties)\n            .rightclick();\n        cy.get('[data-cy=context-menu]')\n            .contains('Edit group')\n            .click();\n\n        // Verify properties\n        cy.get(\"[data-cy=form-dialog]\").should(\"contain\", \"Color: Magenta\");\n\n        // Edit proprties\n        cy.get(\"[data-cy=resource-properties-form]\").within(() => {\n            cy.get(\"[data-cy=property-field-key]\").within(() => {\n                cy.get(\"input\").type(\"Animal\");\n            });\n            cy.get(\"[data-cy=property-field-value]\").click().within(() => {\n                cy.get(\"input\").type(\"Dog\");\n            });\n            // Submit new property\n            cy.get(\"[data-cy=property-add-btn]\").click();\n        });\n        // Submit edited group\n        cy.get('[data-cy=form-dialog]').within(() => {\n            cy.get('[data-cy=form-submit-btn]').click();\n        });\n\n        // Open edit dialog\n        cy.get('[data-cy=groups-panel-data-explorer]')\n            .contains(groupWithProperties)\n            .rightclick();\n        cy.get('[data-cy=context-menu]')\n            .contains('Edit group')\n            .click();\n\n        // Verify properties\n        cy.get(\"[data-cy=form-dialog]\").should(\"contain\", \"Color: Magenta\");\n        cy.get(\"[data-cy=form-dialog]\").should(\"contain\", \"Animal: Dog\");\n\n        // Close dialog\n        cy.get('[data-cy=form-dialog]').within(() => {\n            cy.get('[data-cy=form-cancel-btn]').click();\n        });\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/login.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('Login tests', function() {\n    let activeUser;\n    let inactiveUser;\n    let adminUser;\n    let randomUser = {};\n\n    before(function() {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function() {\n                adminUser = this.adminUser;\n            }\n        );\n        cy.getUser('active', 'Active', 'User', false, true)\n            .as('activeUser').then(function() {\n                activeUser = this.activeUser;\n            }\n        );\n        cy.getUser('inactive', 'Inactive', 'User', false, false)\n            .as('inactiveUser').then(function() {\n                inactiveUser = this.inactiveUser;\n            }\n                                    );\n        // Username/password match Login.Test section of arvados_config.yml\n        randomUser.username = 'randomuser1234';\n        randomUser.password = 'topsecret';\n    })\n\n    it('shows login page on first visit', function() {\n        cy.visit('/')\n        cy.get('div#root').should('contain', 'Please log in')\n        cy.url().should('not.contain', '/projects/')\n    })\n\n    it('shows login page with no token', function() {\n        cy.visit('/token/?api_token=')\n        cy.get('div#root').should('contain', 'Please log in')\n        cy.url().should('not.contain', '/projects/')\n    })\n\n    it('shows inactive page to inactive user', function() {\n        cy.visit(`/token/?api_token=${inactiveUser.token}`)\n        cy.get('div#root').should('contain', 'Your account is inactive');\n    })\n\n    it('shows login page with invalid token', function() {\n        cy.visit('/token/?api_token=nope')\n        cy.get('div#root').should('contain', 'Please log in')\n        cy.url().should('not.contain', '/projects/')\n    })\n\n    it('logs in successfully with valid user token', function() {\n        cy.visit(`/token/?api_token=${activeUser.token}`);\n        cy.url().should('contain', '/dashboard');\n        cy.doSidePanelNavigation('Home Projects');\n        cy.get('div#root').should('contain', 'Arvados Workbench (zzzzz)');\n        cy.get('div#root').should('not.contain', 'Your account is inactive');\n        cy.get('button[aria-label=\"Account Management\"]').click();\n        cy.get('ul[role=menu] > li[role=menuitem]').contains(\n            `${activeUser.user.first_name} ${activeUser.user.last_name}`);\n    })\n\n    it('logs out when token no longer valid', function() {\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: `Test Project ${Math.floor(Math.random() * 999999)}`,\n            addToFavorites: false\n        }).as('testProject1');\n        // Log in\n        cy.visit(`/token/?api_token=${activeUser.token}`);\n        cy.doSidePanelNavigation('Home Projects');\n        cy.url().should('contain', '/projects/');\n        cy.get('div#root').should('contain', 'Arvados Workbench (zzzzz)');\n        cy.get('div#root').should('not.contain', 'Your account is inactive');\n        cy.waitForDom();\n\n        // Invalidate own token.\n        const tokenUuid = activeUser.token.split('/')[1];\n        cy.doRequest('PUT', `/arvados/v1/api_client_authorizations/${tokenUuid}`, {\n            id: tokenUuid,\n            api_client_authorization: JSON.stringify({\n                api_token: `randomToken${Math.floor(Math.random() * 999999)}`\n            })\n        }, null, activeUser.token, true);\n        // Should log the user out.\n\n        cy.getAll('@testProject1').then(([testProject1]) => {\n            cy.doMPVTabSelect(\"Data\");\n            cy.get('main').contains(testProject1.name).click();\n            cy.get('div#root').should('contain', 'Please log in');\n            // Should retain last visited url when auth is invalidated\n            cy.url().should('contain', `/projects/${testProject1.uuid}`);\n        })\n    })\n\nit('logs in successfully with valid admin token', function() {\n        cy.visit(`/token/?api_token=${adminUser.token}`);\n        cy.doSidePanelNavigation('Home Projects');\n        cy.url().should('contain', '/projects/');\n        cy.get('div#root').should('contain', 'Arvados Workbench (zzzzz)');\n        cy.get('div#root').should('not.contain', 'Your account is inactive');\n        cy.get('button[aria-label=\"Admin Panel\"]').click();\n        cy.get('ul[role=menu] > li[role=menuitem]')\n            .contains('Users')\n            .type('{esc}', {force: true});\n        cy.get('button[aria-label=\"Account Management\"]').click();\n        cy.get('ul[role=menu] > li[role=menuitem]').contains(\n            `${adminUser.user.first_name} ${adminUser.user.last_name}`);\n    })\n\n    it('fails to authenticate using the login form with wrong password', function() {\n        cy.visit('/');\n        cy.get('#username').type(randomUser.username);\n        cy.get('#password').type('wrong password');\n        cy.contains('button', 'Log in').click();\n        cy.get('p#password-helper-text').should('contain', 'authentication failed');\n        cy.url().should('not.contain', '/projects/');\n    })\n\n    it('successfully authenticates using the login form', function() {\n        cy.visit('/');\n        cy.get('#username').type(randomUser.username);\n        cy.get('#password').type(randomUser.password);\n        cy.contains('button', 'Log in').click();\n        cy.url().should('contain', '/dashboard');\n        cy.get('div#root').should('contain', 'Arvados Workbench (zzzzz)');\n        cy.get('div#root').should('contain', 'Your account is inactive');\n        cy.get('button[aria-label=\"Account Management\"]').click();\n        cy.get('ul[role=menu] > li[role=menuitem]').contains(randomUser.username);\n    })\n})\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/multiselect-toolbar.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport moment from 'moment';\nimport { tooltips } from '../support/msToolbarTooltips';\n\nconst testWFDefinition = \"{\\n    \\\"$graph\\\": [\\n        {\\n            \\\"class\\\": \\\"Workflow\\\",\\n            \\\"doc\\\": \\\"Reverse the lines in a document, then sort those lines.\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"acrContainerImage\\\": \\\"99b0201f4cade456b4c9d343769a3b70+261\\\",\\n                    \\\"class\\\": \\\"http://arvados.org/cwl#WorkflowRunnerResources\\\"\\n                }\\n            ],\\n            \\\"id\\\": \\\"#main\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"default\\\": null,\\n                    \\\"doc\\\": \\\"The input file to be processed.\\\",\\n                    \\\"id\\\": \\\"#main/input\\\",\\n                    \\\"type\\\": \\\"File\\\"\\n                },\\n                {\\n                    \\\"default\\\": true,\\n                    \\\"doc\\\": \\\"If true, reverse (decending) sort\\\",\\n                    \\\"id\\\": \\\"#main/reverse_sort\\\",\\n                    \\\"type\\\": \\\"boolean\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"doc\\\": \\\"The output with the lines reversed and sorted.\\\",\\n                    \\\"id\\\": \\\"#main/output\\\",\\n                    \\\"outputSource\\\": \\\"#main/sorted/output\\\",\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"steps\\\": [\\n                {\\n                    \\\"id\\\": \\\"#main/rev\\\",\\n                    \\\"in\\\": [\\n                        {\\n                            \\\"id\\\": \\\"#main/rev/input\\\",\\n                            \\\"source\\\": \\\"#main/input\\\"\\n                        }\\n                    ],\\n                    \\\"out\\\": [\\n                        \\\"#main/rev/output\\\"\\n                    ],\\n                    \\\"run\\\": \\\"#revtool.cwl\\\"\\n                },\\n                {\\n                    \\\"id\\\": \\\"#main/sorted\\\",\\n                    \\\"in\\\": [\\n                        {\\n                            \\\"id\\\": \\\"#main/sorted/input\\\",\\n                            \\\"source\\\": \\\"#main/rev/output\\\"\\n                        },\\n                        {\\n                            \\\"id\\\": \\\"#main/sorted/reverse\\\",\\n                            \\\"source\\\": \\\"#main/reverse_sort\\\"\\n                        }\\n                    ],\\n                    \\\"out\\\": [\\n                        \\\"#main/sorted/output\\\"\\n                    ],\\n                    \\\"run\\\": \\\"#sorttool.cwl\\\"\\n                }\\n            ]\\n        },\\n        {\\n            \\\"baseCommand\\\": \\\"rev\\\",\\n            \\\"class\\\": \\\"CommandLineTool\\\",\\n            \\\"doc\\\": \\\"Reverse each line using the `rev` command\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"class\\\": \\\"ResourceRequirement\\\",\\n                    \\\"ramMin\\\": 8\\n                }\\n            ],\\n            \\\"id\\\": \\\"#revtool.cwl\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#revtool.cwl/input\\\",\\n                    \\\"inputBinding\\\": {},\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#revtool.cwl/output\\\",\\n                    \\\"outputBinding\\\": {\\n                        \\\"glob\\\": \\\"output.txt\\\"\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"stdout\\\": \\\"output.txt\\\"\\n        },\\n        {\\n            \\\"baseCommand\\\": \\\"sort\\\",\\n            \\\"class\\\": \\\"CommandLineTool\\\",\\n            \\\"doc\\\": \\\"Sort lines using the `sort` command\\\",\\n            \\\"hints\\\": [\\n                {\\n                    \\\"class\\\": \\\"ResourceRequirement\\\",\\n                    \\\"ramMin\\\": 8\\n                }\\n            ],\\n            \\\"id\\\": \\\"#sorttool.cwl\\\",\\n            \\\"inputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/reverse\\\",\\n                    \\\"inputBinding\\\": {\\n                        \\\"position\\\": 1,\\n                        \\\"prefix\\\": \\\"-r\\\"\\n                    },\\n                    \\\"type\\\": \\\"boolean\\\"\\n                },\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/input\\\",\\n                    \\\"inputBinding\\\": {\\n                        \\\"position\\\": 2\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"outputs\\\": [\\n                {\\n                    \\\"id\\\": \\\"#sorttool.cwl/output\\\",\\n                    \\\"outputBinding\\\": {\\n                        \\\"glob\\\": \\\"output.txt\\\"\\n                    },\\n                    \\\"type\\\": \\\"File\\\"\\n                }\\n            ],\\n            \\\"stdout\\\": \\\"output.txt\\\"\\n        }\\n    ],\\n    \\\"cwlVersion\\\": \\\"v1.0\\\"\\n}\"\n\ndescribe('Multiselect Toolbar Baseline Tests', () => {\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n    });\n\n    it('exists in DOM in neutral state', () => {\n        cy.loginAs(adminUser);\n        cy.doSidePanelNavigation('Home Projects');\n        //multiselect toolbar should exist in details card and not in data explorer\n        cy.get('[data-cy=user-details-card]')\n            .should('exist')\n            .within(() => {\n                cy.get('[data-cy=multiselect-toolbar]').should('exist');\n            });\n        cy.get('[data-cy=title-wrapper]')\n            .should('exist')\n            .within(() => {\n                cy.get('[data-cy=multiselect-button]').should('not.exist');\n            });\n    });\n\n    it('header checkbox checks/unchecks in response to item selection', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject1',\n        }).as('testProject1');\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject2',\n        }).as('testProject2');\n        cy.getAll('@testProject1', '@testProject2')\n            .then(([testProject1, testProject2]) => {\n                cy.loginAs(adminUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                cy.doMPVTabSelect('Data');\n                cy.assertDataExplorerContains(testProject1.name, true);\n                cy.assertDataExplorerContains(testProject2.name, true);\n\n                //check header checkbox\n                cy.get('[data-cy=data-table-header-checkbox]').click();\n                cy.assertCheckboxes([testProject1.uuid, testProject2.uuid], true);\n\n                //uncheck header checkbox\n                cy.get('[data-cy=data-table-header-checkbox]').click();\n                cy.assertCheckboxes([testProject1.uuid, testProject2.uuid], false);\n\n                //test checkbox select\n                cy.doDataExplorerSelect(testProject1.name);\n                cy.get('[data-cy=data-table-header-checkbox]').should('be.checked');\n                cy.doDataExplorerSelect(testProject2.name);\n                cy.get('[data-cy=data-table-header-checkbox]').should('be.checked');\n                cy.doDataExplorerSelect(testProject1.name);\n                cy.get('[data-cy=data-table-header-checkbox]').should('be.checked');\n                cy.doDataExplorerSelect(testProject2.name);\n                cy.get('[data-cy=data-table-header-checkbox]').should('not.be.checked');\n\n                //test onRowClick select\n                cy.get('[data-cy=data-table-row]').eq(0).click();\n                cy.get('[data-cy=data-table-header-checkbox]').should('be.checked');\n                cy.get('[data-cy=data-table-row]').eq(1).click();\n                cy.get('[data-cy=data-table-header-checkbox]').should('be.checked');\n                cy.get('[data-cy=data-table-row]').eq(0).click();\n                cy.get('[data-cy=data-table-header-checkbox]').should('be.checked');\n                cy.get('[data-cy=data-table-row]').eq(1).click();\n                cy.get('[data-cy=data-table-header-checkbox]').should('not.be.checked');\n        });\n    });\n\n    it('uses selector popover to select the correct items', () => {\n        cy.setupDockerImage('arvados/jobs')\n            .as('dockerImage');\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject1',\n        }).as('testProject1');\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject2',\n        }).as('testProject2');\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject3',\n        }).as('testProject3');\n\n        cy.getAll('@dockerImage')\n            .then(([dockerImage]) => {\n                cy.createDefaultContainerRequest(\n                    adminUser.token,\n                    dockerImage,\n                    {\n                        name: `test_container_request_1 ${Math.floor(Math.random() * 999999)}`,\n                        state: \"Committed\",\n                    },\n                ).as('testProcess1');\n                cy.createDefaultContainerRequest(\n                    adminUser.token,\n                    dockerImage,\n                    {\n                        name: `test_container_request_2 ${Math.floor(Math.random() * 999999)}`,\n                        state: \"Committed\",\n                    },\n                ).as('testProcess2');\n                cy.createDefaultContainerRequest(\n                    adminUser.token,\n                    dockerImage,\n                    {\n                        name: `test_container_request_3 ${Math.floor(Math.random() * 999999)}`,\n                        state: \"Committed\",\n                    },\n                ).as('testWorkflow3');\n            });\n\n        cy.getAll('@testProject1', '@testProject2', '@testProject3', '@testProcess1', '@testProcess2', '@testWorkflow3')\n            .then(([testProject1, testProject2, testProject3, testProcess1, testProcess2, testWorkflow3]) => {\n                cy.loginAs(adminUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                // Data tab\n                cy.doMPVTabSelect('Data');\n                cy.assertCheckboxes([testProject1.uuid, testProject2.uuid, testProject3.uuid], false);\n\n                    //check that a thing can be checked\n                    cy.doDataExplorerSelect(testProject1.name);\n                    cy.assertCheckboxes([testProject1.uuid], true);\n                    cy.assertCheckboxes([testProject2.uuid, testProject3.uuid], false);\n\n                    //check invert\n                    cy.get('[data-cy=data-table-multiselect-popover]').click();\n                    cy.get('[data-cy=multiselect-popover-Invert]').click();\n                    cy.assertCheckboxes([testProject1.uuid], false);\n                    cy.assertCheckboxes([testProject2.uuid, testProject3.uuid], true);\n                    //check all\n                    cy.get('[data-cy=data-table-multiselect-popover]').click();\n                    cy.get('[data-cy=multiselect-popover-All]').click();\n                    cy.assertCheckboxes([testProject1.uuid, testProject2.uuid, testProject3.uuid], true);\n                    //check none\n                    cy.get('[data-cy=data-table-multiselect-popover]').click();\n                    cy.get('[data-cy=multiselect-popover-None]').click();\n                    cy.assertCheckboxes([testProject1.uuid, testProject2.uuid, testProject3.uuid], false);\n\n                // Workflow Runs tab\n                cy.doMPVTabSelect('Workflow Runs');\n                cy.assertCheckboxes([testProcess1.uuid], false);\n\n                    //check that a thing can be checked\n                    cy.doDataExplorerSelect(testProcess1.name);\n                    cy.assertCheckboxes([testProcess1.uuid], true);\n                    cy.assertCheckboxes([testProcess2.uuid, testWorkflow3.uuid], false);\n\n                    //check invert\n                    cy.get('[data-cy=data-table-multiselect-popover]').click();\n                    cy.get('[data-cy=multiselect-popover-Invert]').click();\n                    cy.assertCheckboxes([testProcess1.uuid], false);\n                    cy.assertCheckboxes([testProcess2.uuid, testWorkflow3.uuid], true);\n                    //check all\n                    cy.get('[data-cy=data-table-multiselect-popover]').click();\n                    cy.get('[data-cy=multiselect-popover-All]').click();\n                    cy.assertCheckboxes([testProcess1.uuid, testProcess2.uuid, testWorkflow3.uuid], true);\n                    //check none\n                    cy.get('[data-cy=data-table-multiselect-popover]').click();\n                    cy.get('[data-cy=multiselect-popover-None]').click();\n                    cy.assertCheckboxes([testProcess1.uuid, testProcess2.uuid, testWorkflow3.uuid], false);\n        });\n    });\n\n    it('retains selection and toolbar state when toggling details panel', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject1',\n        }).as('testProject1');\n\n        cy.getAll('@testProject1')\n            .then(([testProject1]) => {\n                cy.loginAs(adminUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                // Toolbar in user details card\n                cy.get('[data-cy=user-details-card]')\n                    .should('exist')\n                    .within(() => {\n                        cy.get('[data-cy=multiselect-toolbar]').should('exist');\n                    });\n                // Toolbar not in DE\n                cy.get('[data-cy=project-data]')\n                    .should('exist')\n                    .within(() => {\n                        cy.get('[data-cy=multiselect-toolbar]').should('not.exist');\n                    });\n\n                // Select project1\n                cy.doDataExplorerSelect(testProject1.name);\n                cy.assertCheckboxes([testProject1.uuid], true);\n\n                // Toolbar not in user card\n                cy.get('[data-cy=user-details-card]')\n                    .should('exist')\n                    .within(() => {\n                        cy.get('[data-cy=multiselect-toolbar]').should('not.exist');\n                    });\n                // Toolbar in DE\n                cy.get('[data-cy=project-data]')\n                    .should('exist')\n                    .within(() => {\n                        cy.get('[data-cy=multiselect-toolbar]').should('exist');\n                    });\n\n                // Open details panel\n                cy.doToolbarAction(\"View details\");\n\n                // Verify still checked\n                cy.assertCheckboxes([testProject1.uuid], true);\n\n                // Details panel contains project name\n                cy.get('[data-cy=details-panel]')\n                    .should('exist')\n                    .within(() => {\n                        cy.get('h6').contains(testProject1.name);\n                    });\n\n                // Close details panel\n                cy.get('[data-cy=details-panel] button[data-cy=close-details-btn]').click();\n\n                // Verify still checked\n                cy.assertCheckboxes([testProject1.uuid], true);\n\n                // Toolbar still in DE, not user card\n                cy.get('[data-cy=user-details-card]')\n                    .should('exist')\n                    .within(() => {\n                        cy.get('[data-cy=multiselect-toolbar]').should('not.exist');\n                    });\n                cy.get('[data-cy=project-data]')\n                    .should('exist')\n                    .within(() => {\n                        cy.get('[data-cy=multiselect-toolbar]').should('exist');\n                    });\n            });\n    });\n});\n\ndescribe('For project resources', () => {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser')\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it('should behave correctly for a single project', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject',\n        }).as('testProject');\n        cy.getAll('@testProject').then(([testProject]) => {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.doMPVTabSelect('Data');\n            cy.doDataExplorerSelect(testProject.name);\n\n            // View details\n            cy.get('[aria-label=\"View details\"]').click();\n            cy.get('[data-cy=details-panel]').contains(testProject.name).should('be.visible');\n            cy.get('[data-cy=close-details-btn]').click();\n\n            cy.window().then((win) => {\n                cy.stub(win, 'open').as('windowOpen');\n            });\n\n            // Open in new tab\n            cy.doToolbarAction(\"Open in new tab\");\n            cy.get('@windowOpen').should('be.called');\n\n            //Share\n            cy.get('[aria-label=\"Share\"]').click();\n            cy.get('.sharing-dialog').should('exist');\n            cy.contains('button', 'Close').click();\n\n            //edit project\n            cy.get('[aria-label=\"Edit project\"]').click();\n            cy.get(\"[data-cy=form-dialog]\").within(() => {\n                cy.contains(\"Edit Project\").should('be.visible');\n                cy.get(\"[data-cy=form-cancel-btn]\").click();\n            });\n\n            //new project\n            cy.get('[aria-label=\"New project\"]').click();\n            cy.get(\"[data-cy=form-dialog]\").within(() => {\n                cy.contains(\"New Project\").should('be.visible');\n                cy.get(\"[data-cy=form-cancel-btn]\").click();\n            });\n\n            //freeze project\n            cy.get('[aria-label=\"Freeze project\"]').click();\n            cy.doDataExplorerSelect(testProject.name);\n            cy.assertToolbarButtons(tooltips.adminFrozenProject);\n\n            //unfreeze project\n            cy.get('[aria-label=\"Unfreeze project\"]').click();\n            cy.doDataExplorerSelect(testProject.name);\n            cy.assertToolbarButtons(tooltips.adminProject);\n\n            //Add to favorites\n            cy.doToolbarAction(\"Add to favorites\");\n            cy.waitForDom();\n            cy.get('[data-cy=favorite-star]').should('exist')\n                .parents('[data-cy=data-table-row]')\n                .contains(testProject.name)\n\n            //Add to public favorites\n            cy.doToolbarAction(\"Add to public favorites\");\n            cy.waitForDom();\n            cy.get('[data-cy=public-favorite-star]').should('exist')\n                .parents('[data-cy=data-table-row]')\n                .contains(testProject.name)\n\n            //Open with 3rd party client\n            cy.get('[aria-label=\"Open with 3rd party client\"]').click()\n            cy.get('[role=dialog]').contains('Open with 3rd party client')\n            cy.contains('Close').click()\n\n            //API Details\n            cy.get('[aria-label=\"API Details\"]').click()\n            cy.get('[role=dialog]').contains('API Details')\n            cy.contains('Close').click()\n        });\n    });\n\n    // The following test is enabled on Electron only, as Chromium and Firefox\n    // require permissions to access the clipboard.\n    it(\"handles project Copy UUID\", { browser: 'electron' }, () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'ClipboardTestProject',\n        }).as('clipboardTestProject');\n        cy.getAll('@clipboardTestProject').then(([clipboardTestProject]) => {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.doMPVTabSelect(\"Data\");\n            cy.doDataExplorerSelect(clipboardTestProject.name);\n\n            // Copy UUID\n            cy.get('[aria-label=\"Copy UUID\"]').click()\n            cy.window({ timeout: 10000 }).then(win =>{\n                console.log('this ia a load-bearing console.log');\n                win.focus();\n                win.navigator.clipboard.readText().then(text => {\n                    expect(text).to.equal(clipboardTestProject.uuid);\n                })}\n            );\n        });\n    });\n\n    // The following test is enabled on Electron only, as Chromium and Firefox\n    // require permissions to access the clipboard.\n    it(\"handles project Copy link to clipboard\", { browser: 'electron' }, () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'ClipboardTestProject',\n        }).as('clipboardTestProject');\n        cy.getAll('@clipboardTestProject').then(([clipboardTestProject]) => {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n            cy.doMPVTabSelect(\"Data\");\n            cy.doDataExplorerSelect(clipboardTestProject.name);\n\n            // Copy link to clipboard\n            cy.get('[aria-label=\"Copy link to clipboard\"]').click()\n            cy.window({ timeout: 10000 }).then(win =>{\n                console.log('this ia a load-bearing console.log');\n                win.focus();\n                win.navigator.clipboard.readText().then(text => {\n                expect(text).to.match(/https\\:\\/\\/127\\.0\\.0\\.1\\:[0-9]+\\/projects\\/[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}/);\n                })}\n            );\n        });\n    });\n\n    it('should behave correctly for multiple projects', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject1',\n        }).as('testProject1');\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject2',\n        }).as('testProject2');\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject3',\n        }).as('testProject3');\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: 'TestProject4',\n        }).as('testProject4');\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: 'TestProject5',\n        }).as('testProject5');\n        cy.getAll('@testProject1', '@testProject2', '@testProject3', '@testProject4', '@testProject5').then(\n            ([testProject1, testProject2, testProject3, testProject4, testProject5]) => {\n                //share with active user to test permissions\n                cy.shareWith(adminUser.token, activeUser.user.uuid, testProject1.uuid, 'can_read');\n\n                // non-admin actions\n                cy.loginAs(activeUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                cy.doMPVTabSelect('Data');\n                cy.assertDataExplorerContains(testProject4.name, true);\n                cy.assertDataExplorerContains(testProject5.name, true);\n\n                //assert toolbar buttons\n                cy.doDataExplorerSelect(testProject4.name);\n                cy.assertToolbarButtons(tooltips.nonAdminProject);\n                cy.doDataExplorerSelect(testProject5.name);\n                cy.assertToolbarButtons(tooltips.multiProject);\n\n                //assert read only project toolbar buttons\n                cy.contains('Shared with me').click();\n                cy.doDataExplorerSelect(testProject1.name);\n                cy.assertToolbarButtons(tooltips.readOnlyProject);\n\n                // admin actions\n                cy.loginAs(adminUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                cy.doMPVTabSelect('Data');\n                cy.assertDataExplorerContains(testProject1.name, true);\n                cy.assertDataExplorerContains(testProject2.name, true);\n                cy.assertDataExplorerContains(testProject3.name, true);\n\n                //assert admin project toolbar buttons\n                cy.doDataExplorerSelect(testProject1.name);\n                cy.assertToolbarButtons(tooltips.adminProject);\n                cy.doDataExplorerSelect(testProject2.name);\n                cy.assertToolbarButtons(tooltips.multiProject);\n\n                //check multi-project move to\n                cy.get(`[aria-label=\"Move to\"]`, { timeout: 5000 }).click();\n                cy.get('[data-cy=picker-dialog-project-search]').find('input').type(testProject3.name);\n                cy.get('[data-cy=projects-tree-search-picker]').contains(testProject3.name).click();\n                cy.get('[data-cy=form-submit-btn]').click();\n                cy.doMPVTabSelect('Data');\n\n                cy.assertDataExplorerContains(testProject3.name, true).click();\n                cy.waitForDom()\n                cy.doMPVTabSelect('Data');\n                cy.assertDataExplorerContains(testProject1.name, true);\n                cy.assertDataExplorerContains(testProject2.name, true);\n\n                //check multi-project trash\n                cy.doDataExplorerSelect(testProject1.name);\n                cy.doDataExplorerSelect(testProject2.name);\n                cy.doToolbarAction('Move to trash');\n                cy.assertDataExplorerContains(testProject1.name, false);\n                cy.assertDataExplorerContains(testProject2.name, false);\n                cy.contains('Trash').click();\n                cy.assertDataExplorerContains(testProject1.name, true);\n                cy.assertDataExplorerContains(testProject2.name, true);\n\n                //check multi-project unTrash\n                cy.doDataExplorerSelect(testProject1.name);\n                cy.doDataExplorerSelect(testProject2.name);\n                cy.get(`[aria-label=\"Restore\"]`, { timeout: 5000 }).eq(0).click();\n                cy.waitForDom();\n                cy.assertDataExplorerContains(testProject1.name, false);\n                cy.assertDataExplorerContains(testProject2.name, false);\n                cy.contains(testProject3.name).click();\n                cy.doMPVTabSelect('Data');\n                cy.assertDataExplorerContains(testProject1.name, true);\n                cy.assertDataExplorerContains(testProject2.name, true);\n            }\n        );\n    });\n\n    it('should select all after selecting in filtering view', () => {\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: 'RedFish',\n        }).as('redfish');\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: 'BlueFish',\n        }).as('bluefish');\n\n        cy.getAll('@redfish', '@bluefish').then(\n            ([redfish, bluefish]) => {\n                cy.loginAs(activeUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                cy.doMPVTabSelect('Data');\n\n                // Verify both projects present\n                cy.assertDataExplorerContains(redfish.name);\n                cy.assertDataExplorerContains(bluefish.name);\n\n                // Search redfish\n                cy.doDataExplorerSearch(redfish.name);\n                cy.assertDataExplorerContains(redfish.name);\n                cy.assertDataExplorerContains(bluefish.name, false);\n\n                // Select redfish\n                cy.doDataExplorerSelect(redfish.name);\n                cy.assertCheckboxes([redfish.uuid], true);\n\n                // Clear search\n                cy.doDataExplorerSearch('{selectall}{backspace}');\n                cy.assertDataExplorerContains(redfish.name);\n                cy.assertDataExplorerContains(bluefish.name);\n                // Verify unchecked\n                cy.assertCheckboxes([redfish.uuid], false);\n                cy.assertCheckboxes([bluefish.uuid], false);\n\n                // Check all and verify all checked\n                cy.doDataExplorerSelectAll();\n                cy.assertCheckboxes([redfish.uuid, bluefish.uuid], true);\n            });\n    });\n});\n\ndescribe('For collection resources', () => {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser')\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it('should behave correctly for a single collection', () => {\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: adminUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).as(\"testCollection\")\n        cy.getAll('@testCollection').then(([testCollection]) => {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.doMPVTabSelect('Data');\n            cy.doDataExplorerSelect(testCollection.name);\n\n            // View details\n            cy.get('[aria-label=\"View details\"]').click();\n            cy.get('[data-cy=details-panel]').contains(testCollection.name).should('be.visible');\n            cy.get('[data-cy=close-details-btn]').click();\n\n            cy.window().then((win) => {\n                cy.stub(win, 'open').as('windowOpen');\n            });\n\n            // Open in new tab\n            cy.doToolbarAction(\"Open in new tab\");\n            cy.get('@windowOpen').should('be.called');\n\n            //Share\n            cy.get('[aria-label=\"Share\"]').click();\n            cy.get('.sharing-dialog').should('exist');\n            cy.contains('button', 'Close').click();\n\n            //edit collection\n            cy.get('[aria-label=\"Edit collection\"]').click();\n            cy.get(\"[data-cy=form-dialog]\").within(() => {\n                cy.contains(\"Edit Collection\").should('be.visible');\n                cy.get(\"[data-cy=form-cancel-btn]\").click();\n            });\n\n            //Make a copy\n            cy.get('[aria-label=\"Make a copy\"]').click();\n            cy.get(\"[data-cy=form-dialog]\").within(() => {\n                cy.contains(\"Make a copy\").should('be.visible');\n                cy.get(\"[data-cy=form-cancel-btn]\").click();\n            });\n\n            //Add to favorites\n            cy.doToolbarAction(\"Add to favorites\");\n            cy.waitForDom();\n            cy.get('[data-cy=favorite-star]').should('exist')\n                .parents('[data-cy=data-table-row]')\n                .contains(testCollection.name)\n\n            //Add to public favorites\n            cy.doToolbarAction(\"Add to public favorites\");\n            cy.waitForDom();\n            cy.get('[data-cy=public-favorite-star]').should('exist')\n                .parents('[data-cy=data-table-row]')\n                .contains(testCollection.name)\n\n            //Open with 3rd party client\n            cy.get('[aria-label=\"Open with 3rd party client\"]').click()\n            cy.get('[role=dialog]').contains('Open with 3rd party client')\n            cy.contains('Close').click()\n\n            //API Details\n            cy.get('[aria-label=\"API Details\"]').click()\n            cy.get('[role=dialog]').contains('API Details')\n            cy.contains('Close').click()\n        });\n    });\n\n    it('should behave correctly for multiple collections', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject1',\n        }).as('testProject1');\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: adminUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).as(\"testCollection1\")\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: adminUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).as(\"testCollection2\")\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: adminUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).as(\"testCollection3\")\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: adminUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).as(\"testCollection4\")\n        cy.getAll('@testProject1', '@testCollection1', '@testCollection2', '@testCollection3', '@testCollection4')\n            .then(([testProject1, testCollection1, testCollection2, testCollection3, testCollection4]) => {\n\n                cy.loginAs(adminUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                cy.doMPVTabSelect(\"Data\");\n                cy.assertDataExplorerContains(testProject1.name, true);\n                cy.assertDataExplorerContains(testCollection1.name, true);\n                cy.assertDataExplorerContains(testCollection2.name, true);\n                cy.assertDataExplorerContains(testCollection3.name, true);\n                cy.assertDataExplorerContains(testCollection4.name, true);\n\n                //assert toolbar buttons\n                cy.doDataExplorerSelect(testCollection1.name);\n                cy.assertToolbarButtons(tooltips.adminCollection);\n                cy.doDataExplorerSelect(testCollection2.name);\n                cy.assertToolbarButtons(tooltips.multiCollection);\n\n                //check multi-collection move to\n                cy.get(`[aria-label=\"Move to\"]`, { timeout: 5000 }).click();\n                cy.get('[data-cy=picker-dialog-project-search]').find('input').type(testProject1.name);\n                cy.get('[data-cy=projects-tree-search-picker]').contains(testProject1.name).click();\n                cy.get('[data-cy=form-submit-btn]').click();\n\n                cy.assertDataExplorerContains(testProject1.name, true).click();\n                cy.waitForDom();\n                cy.doMPVTabSelect(\"Data\");\n                cy.assertDataExplorerContains(testCollection1.name, true);\n                cy.assertDataExplorerContains(testCollection2.name, true);\n\n                //check multi-collection trash\n                cy.contains('Home Projects').click();\n                cy.doMPVTabSelect(\"Data\");\n                cy.get('[data-cy=data-table]').should('exist', { timeout: 10000 });\n                cy.doDataExplorerSelect(testCollection3.name);\n                cy.doDataExplorerSelect(testCollection4.name);\n                cy.doToolbarAction('Move to trash');\n                cy.assertDataExplorerContains(testCollection3.name, false);\n                cy.assertDataExplorerContains(testCollection4.name, false);\n\n                //share with active user to test readonly permissions\n                cy.shareWith(adminUser.token, activeUser.user.uuid, testProject1.uuid, 'can_read');\n\n                //check read only project toolbar buttons\n                cy.loginAs(activeUser);\n\n                cy.contains('Shared with me').click();\n                cy.doDataExplorerSelect(testProject1.name);\n                cy.assertToolbarButtons(tooltips.readOnlyProject);\n                cy.get(\"[data-cy=data-table-row]\").contains(testProject1.name).click();\n                cy.waitForDom();\n                cy.doMPVTabSelect(\"Data\");\n                cy.doDataExplorerSelect(testCollection1.name);\n                cy.assertToolbarButtons(tooltips.readonlyCollection);\n                cy.doDataExplorerSelect(testCollection2.name);\n                cy.assertToolbarButtons(tooltips.readonlyMultiCollection);\n            }\n        );\n    });\n});\n\ndescribe('For process resources', () => {\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n    });\n\n    it('should behave correctly for a single process', () => {\n        cy.setupDockerImage('arvados/jobs')\n            .then((dockerImage) => {\n                cy.createDefaultContainerRequest(\n                    adminUser.token,\n                    dockerImage,\n                    {\n                        name: `test_container_request_1 ${Math.floor(Math.random() * 999999)}`,\n                        state: \"Committed\",\n                    },\n                ).as('testProcess');\n            });\n\n        cy.getAll('@testProcess').then(([testProcess]) => {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.doMPVTabSelect('Workflow Runs');\n\n            cy.doDataExplorerSelect(testProcess.name);\n            cy.assertToolbarButtons(tooltips.adminRunningProcess);\n\n            //Cancel process first to avoid unnecessary log polling\n            cy.get('[aria-label=\"Cancel\"]').click();\n            cy.get('[data-cy=\"confirmation-dialog-ok-btn\"]').click();\n            cy.assertToolbarButtons(tooltips.adminOnHoldProcess);\n\n            // View details\n            cy.get('[aria-label=\"View details\"]').click();\n            cy.get('[data-cy=details-panel]').contains(testProcess.name).should('be.visible');\n            cy.get('[data-cy=close-details-btn]').click();\n\n            cy.window().then((win) => {\n                cy.stub(win, 'open').as('windowOpen');\n            });\n\n            // Open in new tab\n            cy.doToolbarAction(\"Open in new tab\");\n            cy.get('@windowOpen').should('be.called');\n\n            //Copy and re-run process\n            cy.doToolbarAction(\"Copy and re-run process\");\n            cy.get(\"[data-cy=form-dialog]\").within(() => {\n                cy.contains(\"Choose location for re-run\").should('be.visible');\n                cy.get(\"[data-cy=form-cancel-btn]\").click();\n            });\n\n            //edit process\n            cy.doToolbarAction(\"Edit process\");\n            cy.get(\"[data-cy=form-dialog]\").within(() => {\n                cy.contains(\"Edit Process\").should('be.visible');\n                cy.get(\"[data-cy=form-cancel-btn]\").click();\n            });\n\n            //Outputs\n            cy.doToolbarAction(\"Outputs\");\n            cy.contains('Output collection was trashed or deleted').should('exist');\n\n            //API Details\n            cy.doToolbarAction(\"API Details\");\n            cy.get('[role=dialog]').contains('API Details')\n            cy.contains('Close').click()\n\n            //Add to favorites\n            cy.doToolbarAction(\"Add to favorites\");\n            cy.get('[data-cy=favorite-star]').should('exist')\n                .parents('[data-cy=data-table-row]')\n                .contains(testProcess.name)\n\n            //Add to public favorites\n            cy.doToolbarAction(\"Add to public favorites\");\n            cy.get('[data-cy=public-favorite-star]').should('exist')\n                .parents('[data-cy=data-table-row]')\n                .contains(testProcess.name)\n\n            //Remove\n            cy.doToolbarAction(\"Remove\");\n            cy.get('[data-cy=confirmation-dialog]').within(() => {\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n            });\n            cy.assertDataExplorerContains(testProcess.name, false);\n        });\n    });\n\n    it('should behave correctly for multiple processes', () => {\n        cy.setupDockerImage('arvados/jobs')\n            .then((dockerImage) => {\n                cy.createDefaultContainerRequest(\n                    adminUser.token,\n                    dockerImage,\n                    {\n                        name: `test_container_request_1 ${Math.floor(Math.random() * 999999)}`,\n                        state: \"Committed\",\n                    },\n                ).as('testProcess1');\n                cy.createDefaultContainerRequest(\n                    adminUser.token,\n                    dockerImage,\n                    {\n                        name: `test_container_request_2 ${Math.floor(Math.random() * 999999)}`,\n                        state: \"Committed\",\n                    },\n                ).as('testProcess2');\n            });\n\n        cy.getAll('@testProcess1', '@testProcess2').then(([testProcess1, testProcess2]) => {\n\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.doMPVTabSelect('Workflow Runs');\n            cy.assertDataExplorerContains(testProcess1.name, true);\n            cy.assertDataExplorerContains(testProcess2.name, true);\n\n            //assert toolbar buttons\n            cy.doDataExplorerSelect(testProcess1.name);\n            cy.assertToolbarButtons(tooltips.adminRunningProcess);\n            cy.doDataExplorerSelect(testProcess2.name);\n            cy.assertToolbarButtons(tooltips.multiProcess);\n\n            //multiprocess remove\n            cy.doToolbarAction(\"Remove\");\n            cy.get('[data-cy=confirmation-dialog]').within(() => {\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n            });\n            cy.assertDataExplorerContains(testProcess1.name, false);\n            cy.assertDataExplorerContains(testProcess2.name, false);\n        });\n    });\n});\n\ndescribe('For workflow resources', () => {\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n    });\n\n    it('should behave correctly for a single workflow', () => {\n        cy.createWorkflow(adminUser.token, {\n            name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n            definition: testWFDefinition,\n            owner_uuid: adminUser.user.uuid,\n            }).as('testWorkflow');\n        cy.getAll('@testWorkflow').then(function ([testWorkflow]) {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.doMPVTabSelect('Data');\n            cy.assertDataExplorerContains(testWorkflow.name, true);\n\n            //assert toolbar buttons\n            cy.doDataExplorerSelect(testWorkflow.name);\n            cy.assertToolbarButtons(tooltips.adminWorkflow);\n\n            // View details\n            cy.get('[aria-label=\"View details\"]').click();\n            cy.get('[data-cy=details-panel]').contains(testWorkflow.name).should('be.visible');\n            cy.get('[data-cy=close-details-btn]').click();\n\n            cy.window().then((win) => {\n                cy.stub(win, 'open').as('windowOpen');\n            });\n\n            // Open in new tab\n            cy.doToolbarAction(\"Open in new tab\");\n            cy.get('@windowOpen').should('be.called');\n\n            //Run workflow\n            cy.doToolbarAction(\"Run Workflow\");\n            cy.get('[data-cy=choose-a-project-dialog]').within(() => {\n                cy.contains(\"Choose the project where the workflow will run\").should('be.visible');\n                cy.get('[data-cy=run-wf-project-picker-ok-button]').click();\n            });\n            cy.contains('Home Projects').click();\n            cy.doMPVTabSelect('Data');\n            cy.doDataExplorerSelect(testWorkflow.name);\n\n            //api details\n            cy.doToolbarAction(\"API Details\");\n            cy.get('[role=dialog]').contains('API Details')\n            cy.contains('Close').click()\n\n            //delete workflow\n            cy.doToolbarAction(\"Delete Workflow\");\n            cy.get('[data-cy=confirmation-dialog]').within(() => {\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n            });\n            cy.contains('Removed').should('be.visible');\n            cy.assertDataExplorerContains(testWorkflow.name, false);\n        });\n    });\n\n    it('should behave correctly for multiple workflows', () => {\n        cy.createWorkflow(adminUser.token, {\n            name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n            definition: testWFDefinition,\n            owner_uuid: adminUser.user.uuid,\n            }).as('testWorkflow1');\n        cy.createWorkflow(adminUser.token, {\n            name: `TestWorkflow${Math.floor(Math.random() * 999999)}.cwl`,\n            definition: testWFDefinition,\n            owner_uuid: adminUser.user.uuid,\n            }).as('testWorkflow2');\n        cy.getAll('@testWorkflow1', '@testWorkflow2').then(function ([testWorkflow1, testWorkflow2]) {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.doMPVTabSelect('Data');\n            cy.assertDataExplorerContains(testWorkflow1.name, true);\n            cy.assertDataExplorerContains(testWorkflow2.name, true);\n\n            //assert toolbar buttons\n            cy.doDataExplorerSelect(testWorkflow1.name);\n            cy.assertToolbarButtons(tooltips.adminWorkflow);\n            cy.doDataExplorerSelect(testWorkflow2.name);\n            cy.assertToolbarButtons(tooltips.multiWorkflow);\n\n            //multi-workflow remove\n            cy.get('[aria-label=\"Delete Workflow\"]').click();\n            cy.get('[data-cy=confirmation-dialog]').within(() => {\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n            });\n            cy.assertDataExplorerContains(testWorkflow1.name, false);\n            cy.assertDataExplorerContains(testWorkflow2.name, false);\n        });\n    });\n});\n\ndescribe('For groups', () => {\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n    });\n\n    it('should behave correctly for a single group', () => {\n        cy.createGroup(adminUser.token, {\n            group_class: \"role\",\n            name: `Test group ${Math.floor(Math.random() * 999999)}`,\n        }).as('testGroup');\n\n        cy.getAll('@testGroup').then(([testGroup]) => {\n            cy.loginAs(adminUser);\n            cy.contains('Groups').click();\n            cy.doDataExplorerSelect(testGroup.name);\n            cy.assertToolbarButtons(tooltips.nonAdminGroup);\n\n            // View details\n            cy.get('[aria-label=\"View details\"]').click();\n            cy.get('[data-cy=details-panel]').contains(testGroup.name).should('be.visible');\n            cy.get('[data-cy=close-details-btn]').click();\n\n            //API Details\n            cy.doToolbarAction(\"API Details\");\n            cy.get('[role=dialog]').contains('API Details')\n            cy.contains('Close').click()\n\n            //rename group\n            cy.doToolbarAction(\"Edit group\");\n            cy.get('[data-cy=form-dialog]').within(() => {\n                cy.get(\"[data-cy=form-cancel-btn]\").click();\n            });\n\n            //remove group\n            cy.doToolbarAction(\"Remove\");\n            cy.get('[data-cy=confirmation-dialog]').within(() => {\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n            });\n            cy.contains('Removed').should('be.visible');\n            cy.assertDataExplorerContains(testGroup.name, false);\n        });\n    });\n\n    it('should behave correctly for built in group', () => {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Groups');\n\n            cy.doDataExplorerSelect('All users');\n            cy.assertToolbarButtons(tooltips.builtInGroup);\n\n            // View details\n            cy.get('[aria-label=\"View details\"]').click();\n            cy.get('[data-cy=details-panel]').contains('All users').should('be.visible');\n            cy.get('[data-cy=close-details-btn]').click();\n\n            //API Details\n            cy.doToolbarAction(\"API Details\");\n            cy.get('[role=dialog]').contains('API Details')\n            cy.contains('Close').click()\n\n            cy.doDataExplorerSelect('System group');\n            cy.assertToolbarButtons(tooltips.multiBuiltInGroup);\n    });\n\n    it('should behave correctly for multiple groups', () => {\n        cy.createGroup(adminUser.token, {\n            group_class: \"role\",\n            name: `Test group ${Math.floor(Math.random() * 999999)}`,\n        }).as('testGroup1');\n        cy.createGroup(adminUser.token, {\n            group_class: \"role\",\n            name: `Test group ${Math.floor(Math.random() * 999999)}`,\n        }).as('testGroup2');\n        cy.getAll('@testGroup1', '@testGroup2').then(([testGroup1, testGroup2]) => {\n            cy.loginAs(adminUser);\n            cy.contains('Groups').click();\n            cy.assertDataExplorerContains(testGroup1.name, true);\n            cy.assertDataExplorerContains(testGroup2.name, true);\n\n            //assert toolbar buttons\n            cy.doDataExplorerSelect(testGroup1.name);\n            cy.assertToolbarButtons(tooltips.nonAdminGroup);\n            cy.doDataExplorerSelect(testGroup2.name);\n            cy.assertToolbarButtons(tooltips.multiGroup);\n\n            //multi-group remove\n            cy.get('[aria-label=\"Remove\"]').click();\n            cy.get('[data-cy=confirmation-dialog]').within(() => {\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n            });\n            cy.assertDataExplorerContains(testGroup1.name, false);\n            cy.assertDataExplorerContains(testGroup2.name, false);\n        });\n    });\n});\n\ndescribe('For users', () => {\n    let activeUser;\n    let adminUser;\n    let otherUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin_M', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('user', 'Active_M', 'User', false, true)\n            .as('activeUser')\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n        cy.getUser('otheruser', 'Other_M', 'User', false, true)\n            .as('otherUser').then(function() {\n                otherUser = this.otherUser;\n            });\n    });\n\n    it('should behave correctly for a single user', () => {\n        const groupName = `Test group (${Math.floor(999999 * Math.random())})`\n\n        cy.loginAs(adminUser);\n        cy.get('[data-cy=side-panel-tree]').contains('Groups').click();\n\n        // Create new group\n        cy.get('[data-cy=groups-panel-new-group]').click();\n        cy.get('[data-cy=form-dialog]')\n            .should('contain', 'New Group')\n            .within(() => {\n                cy.get('input[name=name]').type(groupName);\n                cy.get('[data-cy=users-field] input').type(\"active_m\");\n                cy.wait(1000) // wait for the autocomplete to load\n                cy.get('[data-cy=users-field] input').type(\"{enter}\");\n                cy.get('[data-cy=users-field] input').type(\"other_m\");\n                cy.wait(1000) // wait for the autocomplete to load\n                cy.get('[data-cy=users-field] input').type(\"{enter}\");\n            });\n        cy.get('[data-cy=form-dialog]').within(() => {\n            cy.get('[data-cy=form-submit-btn]').click();\n        })\n\n        cy.assertDataExplorerContains(groupName, true).click();\n        cy.assertDataExplorerContains(adminUser.user.full_name, true);\n        cy.assertDataExplorerContains(activeUser.user.full_name, true);\n        cy.assertDataExplorerContains(otherUser.user.full_name, true);\n\n        cy.doDataExplorerSelect(otherUser.user.full_name);\n\n        //API Details\n        cy.doToolbarAction(\"API Details\");\n        cy.get('[role=dialog]').contains('API Details')\n        cy.contains('Close').click()\n\n        //attributes\n        cy.doToolbarAction(\"Attributes\");\n        cy.get('[role=dialog]').contains('Attributes')\n        cy.contains('Close').click()\n\n        //remove\n        cy.get('[aria-label=\"Remove\"]').click();\n        cy.get('[data-cy=confirmation-dialog]').within(() => {\n            cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n        });\n        cy.contains('Removed').should('be.visible');\n        cy.assertDataExplorerContains(groupName, false);\n    });\n\n    it('should behave correctly for multiple users', () => {\n        const groupName = `Test group (${Math.floor(999999 * Math.random())})`\n\n        cy.loginAs(adminUser);\n        cy.get('[data-cy=side-panel-tree]').contains('Groups').click();\n\n        // Create new group\n        cy.get('[data-cy=groups-panel-new-group]').click();\n        cy.get('[data-cy=form-dialog]')\n            .should('contain', 'New Group')\n            .within(() => {\n                cy.get('input[name=name]').type(groupName);\n                cy.get('[data-cy=users-field] input').type(\"active\");\n                cy.wait(1000) // wait for the autocomplete to load\n                cy.get('[data-cy=users-field] input').type(\"{enter}\");\n                cy.get('[data-cy=users-field] input').type(\"other\");\n                cy.wait(1000) // wait for the autocomplete to load\n                cy.get('[data-cy=users-field] input').type(\"{enter}\");\n            });\n        cy.get('[data-cy=form-dialog]').within(() => {\n            cy.get('[data-cy=form-submit-btn]').click();\n        })\n\n        cy.assertDataExplorerContains(groupName, true).click();\n        cy.assertDataExplorerContains(adminUser.user.full_name, true);\n        cy.assertDataExplorerContains(activeUser.user.full_name, true);\n        cy.assertDataExplorerContains(otherUser.user.full_name, true);\n\n        // assert toolbar buttons\n        cy.doDataExplorerSelect(activeUser.user.full_name);\n        cy.assertToolbarButtons(tooltips.nonAdminUser);\n        cy.doDataExplorerSelect(otherUser.user.full_name);\n        cy.assertToolbarButtons(tooltips.multiUser);\n    });\n});\n\ndescribe('For external credentials', () => {\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n    });\n\n    it('should behave correctly for a single external credential', () => {\n        cy.createExternalCredential(adminUser.token).as('testExternalCredential');\n        cy.getAll('@testExternalCredential').then(([testExternalCredential]) => {\n            cy.loginAs(adminUser);\n            cy.visit('/external_credentials');\n\n            cy.assertDataExplorerContains(testExternalCredential.name, true);\n\n            //assert toolbar buttons\n            cy.doDataExplorerSelect(testExternalCredential.name);\n            cy.assertToolbarButtons(tooltips.externalCredential);\n\n            //Share\n            cy.get('[aria-label=\"Share\"]').click();\n            cy.get('.sharing-dialog').should('exist');\n            cy.contains('button', 'Close').click();\n\n            //edit credential\n            cy.get('[aria-label=\"Edit credential\"]').click();\n            cy.get(\"[data-cy=form-dialog]\").within(() => {\n                cy.contains(\"Edit External Credential\").should('be.visible');\n                cy.get(\"[data-cy=form-cancel-btn]\").click();\n            });\n\n            //API Details\n            cy.get('[aria-label=\"API Details\"]').click()\n            cy.get('[role=dialog]').contains('API Details')\n            cy.contains('Close').click()\n\n            //remove credential\n            cy.get('[aria-label=\"Remove\"]').click();\n            cy.get('[data-cy=confirmation-dialog]').within(() => {\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n            });\n            cy.assertDataExplorerContains(testExternalCredential.name, false);\n        });\n    });\n\n    it('should behave correctly for multiple external credentials', () => {\n        cy.createExternalCredential(adminUser.token).as('testExternalCredential1');\n        cy.createExternalCredential(adminUser.token).as('testExternalCredential2');\n        cy.getAll('@testExternalCredential1', '@testExternalCredential2').then(([testExternalCredential1, testExternalCredential2]) => {\n            cy.loginAs(adminUser);\n            cy.visit('/external_credentials');\n\n            cy.assertDataExplorerContains(testExternalCredential1.name, true);\n            cy.assertDataExplorerContains(testExternalCredential2.name, true);\n\n            //assert toolbar buttons\n            cy.doDataExplorerSelect(testExternalCredential1.name);\n            cy.assertToolbarButtons(tooltips.externalCredential);\n            cy.doDataExplorerSelect(testExternalCredential2.name);\n            cy.assertToolbarButtons(tooltips.multiExternalCredential);\n\n            //multi-external credential remove\n            cy.get('[aria-label=\"Remove\"]').click();\n            cy.get('[data-cy=confirmation-dialog]').within(() => {\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n            });\n            cy.assertDataExplorerContains(testExternalCredential1.name, false);\n            cy.assertDataExplorerContains(testExternalCredential2.name, false);\n        });\n    });\n});\n\ndescribe('For multiple resource types', () => {\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser')\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n    });\n\n    it('shows the appropriate buttons in the multiselect toolbar', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'TestProject',\n        }).as('testProject');\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: adminUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).as(\"testCollection\");\n        cy.setupDockerImage('arvados/jobs')\n            .then((dockerImage) => {\n                cy.createDefaultContainerRequest(\n                    adminUser.token,\n                    dockerImage,\n                    {\n                        name: `test_container_request_1 ${Math.floor(Math.random() * 999999)}`,\n                        state: \"Committed\",\n                    },\n                ).as('testProcess');\n            });\n\n        cy.getAll('@testProject', '@testCollection', '@testProcess')\n            .then(([testProject, testCollection,  testProcess]) => {\n\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.doMPVTabSelect('Data');\n            //add resources to favorites so they are all in the same table\n            cy.doDataExplorerSelect(testProject.name);\n            cy.doToolbarAction(\"Add to favorites\");\n            //deselect project\n            cy.doDataExplorerSelect(testProject.name);\n            cy.doDataExplorerSelect(testCollection.name);\n            cy.doToolbarAction(\"Add to favorites\");\n            cy.doMPVTabSelect('Workflow Runs');\n            cy.doDataExplorerSelect(testProcess.name);\n            cy.doToolbarAction(\"Add to favorites\");\n\n            cy.contains('My Favorites').click();\n\n            cy.assertDataExplorerContains(testProject.name, true);\n            cy.assertDataExplorerContains(testCollection.name, true);\n            cy.assertDataExplorerContains(testProcess.name, true);\n\n            cy.doDataExplorerSelect(testProject.name);\n            cy.doDataExplorerSelect(testCollection.name);\n            cy.assertToolbarButtons(tooltips.projectAndCollection);\n\n            cy.get('[data-cy=data-table-multiselect-popover]').click();\n            cy.get('[data-cy=multiselect-popover-None]').click();\n            cy.doDataExplorerSelect(testProcess.name);\n            cy.doDataExplorerSelect(testCollection.name);\n            cy.assertToolbarButtons(tooltips.processAndCollection);\n\n            cy.get('[data-cy=data-table-multiselect-popover]').click();\n            cy.get('[data-cy=multiselect-popover-None]').click();\n            cy.doDataExplorerSelect(testProcess.name);\n            cy.doDataExplorerSelect(testProject.name);\n            cy.assertToolbarButtons(tooltips.processAndProject);\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/page-not-found.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('Page not found tests', function() {\n    let adminUser;\n\n    before(function() {\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function() {\n                adminUser = this.adminUser;\n            }\n        );\n    });\n\n    it('shows not found page', function() {\n        // when\n        cy.loginAs(adminUser);\n        cy.goToPath(`/this/is/an/invalid/route`);\n\n        // then\n        cy.get('[data-cy=not-found-page]').should('exist');\n        cy.get('[data-cy=not-found-content]').should('exist');\n    });\n\n\n    it('shows not found popup', function() {\n        // given\n        [\n            '/projects/zzzzz-j7d0g-nonexistingproj',\n            '/projects/zzzzz-tpzed-nonexistinguser',\n        ].forEach(function(path) {\n            // Using de slower loginAs() method to avoid bumping into dialog\n            // dismissal issues that are not related to this test.\n            cy.loginAs(adminUser);\n\n            // when\n            cy.goToPath(path);\n            cy.doMPVTabSelect(\"Data\");\n            cy.waitForDom();\n\n            // then\n            cy.get('[data-cy=default-view]').should('exist');\n        });\n\n        [\n            '/processes/zzzzz-xvhdp-nonexistingproc',\n            '/collections/zzzzz-4zz18-nonexistingcoll'\n        ].forEach(function(path) {\n            cy.loginAs(adminUser);\n\n            cy.goToPath(path);\n\n            cy.get('[data-cy=not-found-view]').should('exist');\n        });\n    });\n\n    it('shows not found popup in workflow tab', function() {\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: 'test-project',\n        })\n        cy.loginAs(adminUser);\n        cy.doSidePanelNavigation('Home Projects');\n        cy.waitForDom();\n\n        cy.doMPVTabSelect(\"Data\");\n        cy.get('[data-cy=project-panel]').contains(\"test-project\").click();\n\n        cy.get('[data-cy=mpv-tabs]').contains(\"Workflow Runs\").click();\n        cy.contains('button[aria-selected=\"true\"]', 'Workflow Runs').should('exist');\n        cy.contains('No workflow runs found').should('exist');\n    });\n});\n\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/process.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContainerState } from \"models/container\";\n\ndescribe(\"Process tests\", function () {\n    let activeUser;\n    let adminUser;\n    let dockerImage;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser(\"admin\", \"Admin\", \"User\", true, true)\n            .as(\"adminUser\")\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser(\"activeuser\", \"Active\", \"User\", false, true)\n            .as(\"activeUser\")\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n        // don't abort on expected errors\n        cy.on('uncaught:exception', (err, runnable) => {\n            console.error(err);\n        });\n    });\n\n    beforeEach(() => {\n        // Since setupDockerImage uses createCollection\n        // it will be cleaned up after every test even if we\n        // do this in before()\n        cy.setupDockerImage('arvados/jobs')\n            .as('dockerImageAlias')\n            .then((dockerImageAlias) => {\n                dockerImage = dockerImageAlias;\n            });\n    });\n\n    describe(\"Details panel\", function () {\n        it(\"shows process details\", function () {\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                {\n                    name: `test_container_request ${Math.floor(Math.random() * 999999)}`,\n                    state: \"Committed\",\n                },\n            ).then(function (containerRequest) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.get(\"[data-cy=process-details-card]\").should('exist', { timeout: 10000 }).and(\"contain\", containerRequest.name);\n                cy.get(\"[data-cy=process-details-attributes-modifiedby-user]\").contains(`Active User (${activeUser.user.uuid})`);\n                cy.get(\"[data-cy=process-details-attributes-runtime-user]\").should(\"not.exist\");\n            });\n\n            // Fake submitted by another user to test \"runtime user\" field.\n            //\n            // Need to override group contents because we use group\n            // contents to fetch both the container_request and\n            // container record in a single API call.\n            cy.intercept({ method: \"GET\", url: \"**/arvados/v1/groups/contents?*\" }, req => {\n                req.on('response', res => {\n                    if (!res.body.items) {\n                        return;\n                    }\n                    res.body.items.forEach(item => {\n                        item.modified_by_user_uuid = \"zzzzz-tpzed-000000000000000\";\n                    });\n                });\n            });\n\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                {\n                    name: `test_container_request ${Math.floor(Math.random() * 999999)}`,\n                    state: \"Committed\",\n                    properties: {\n                        cwl_input: {foo: \"bar\"},\n                        cwl_output: {baz: \"qux\"},\n                        foo: \"bar\"\n                    },\n                },\n            ).then(function (containerRequest) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.get(\"[data-cy=process-details-card]\").should(\"contain\", containerRequest.name);\n                cy.get(\"[data-cy=process-details-attributes-modifiedby-user]\").contains(`zzzzz-tpzed-000000000000000`);\n                cy.get(\"[data-cy=process-details-attributes-runtime-user]\").contains(`Active User (${activeUser.user.uuid})`);\n                cy.assertPropertyTag(\"foo\", \"bar\");\n                // property values are unimportant here, but [object Object] displays prior to #23063\n                cy.assertPropertyTag(\"cwl_input\", \"[object Object]\", false);\n                cy.assertPropertyTag(\"cwl_output\", \"[object Object]\", false);\n            });\n        });\n\n        it(\"should show runtime status indicators\", function () {\n            // Setup running container with runtime_status error & warning messages\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                {\n                    name: `test_container_request ${Math.floor(Math.random() * 999999)}`,\n                    state: \"Committed\",\n                },\n            ).as(\"containerRequest\")\n                .then(function (containerRequest) {\n                    expect(containerRequest.state).to.equal(\"Committed\");\n                    expect(containerRequest.container_uuid).not.to.be.equal(\"\");\n\n                    cy.getContainer(activeUser.token, containerRequest.container_uuid).then(function (queuedContainer) {\n                        expect(queuedContainer.state).to.be.equal(\"Queued\");\n                    });\n                    cy.updateContainer(adminUser.token, containerRequest.container_uuid, {\n                        state: \"Locked\",\n                    }).then(function (lockedContainer) {\n                        expect(lockedContainer.state).to.be.equal(\"Locked\");\n\n                        cy.updateContainer(adminUser.token, lockedContainer.uuid, {\n                            state: \"Running\",\n                            runtime_status: {\n                                error: \"Something went wrong\",\n                                errorDetail: \"Process exited with status 1\",\n                                warning: \"Free disk space is low\",\n                            },\n                        })\n                            .as(\"runningContainer\")\n                            .then(function (runningContainer) {\n                                expect(runningContainer.state).to.be.equal(\"Running\");\n                                expect(runningContainer.runtime_status).to.be.deep.equal({\n                                    error: \"Something went wrong\",\n                                    errorDetail: \"Process exited with status 1\",\n                                    warning: \"Free disk space is low\",\n                                });\n                            });\n                    });\n                });\n            // Test that the UI shows the error and warning messages\n            cy.getAll(\"@containerRequest\", \"@runningContainer\").then(function ([containerRequest]) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.get(\"[data-cy=process-runtime-status-error]\")\n                    .should(\"contain\", \"Something went wrong\")\n                    .and(\"contain\", \"Process exited with status 1\");\n                cy.get(\"[data-cy=process-runtime-status-warning]\")\n                    .should(\"contain\", \"Free disk space is low\")\n                    .and(\"contain\", \"No additional warning details available\");\n            });\n\n            // Force container_count for testing\n            let containerCount = 2;\n            cy.intercept({ method: \"GET\", url: \"**/arvados/v1/groups/contents?*\" }, req => {\n                req.on('response', res => {\n                    if (!res.body.items) {\n                        return;\n                    }\n                    res.body.items.forEach(item => {\n                        item.container_count = containerCount;\n                    });\n                });\n            }).as(\"intercept1\");\n\n            cy.getAll(\"@containerRequest\", \"@runningContainer\", \"@intercept1\").then(function ([containerRequest]) {\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.waitForDom();\n                cy.reload();\n                cy.waitForDom();\n                cy.get(\"[data-cy=process-runtime-status-retry-warning]\", { timeout: 7000 }).should(\"contain\", \"Process retried 1 time\")\n            }).as(\"retry1\");\n\n            cy.getAll(\"@containerRequest\", \"@runningContainer\", \"@retry1\").then(function ([containerRequest]) {\n                containerCount = 3;\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.waitForDom();\n                cy.reload();\n                cy.waitForDom();\n                cy.get(\"[data-cy=process-runtime-status-retry-warning]\", { timeout: 7000 }).should(\"contain\", \"Process retried 2 times\");\n            });\n        });\n\n        it(\"allows copying processes\", function () {\n            const crName = \"first_container_request\";\n            const copiedCrName = \"copied_container_request\";\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: crName, state: \"Committed\" },\n            ).then(function (containerRequest) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.get(\"[data-cy=process-details-card]\").should(\"contain\", crName);\n                cy.get(\"[data-cy=process-details-card]\").within(() => {\n                    cy.get('[data-title=\"Copy and re-run process\"]').click();\n                })\n            });\n\n            cy.get(\"[data-cy=form-dialog]\").within(() => {\n                cy.get(\"input[name=name]\").clear().type(copiedCrName);\n                cy.get(\"[data-cy=projects-tree-home-tree-picker]\").click();\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n            });\n\n            cy.get(\"[data-cy=process-details-card]\").should(\"contain\", copiedCrName);\n            cy.get(\"[data-cy=process-details-card]\").find(\"button\").contains(\"Run\");\n        });\n\n        const getFakeContainer = fakeContainerUuid => ({\n            href: `/containers/${fakeContainerUuid}`,\n            kind: \"arvados#container\",\n            etag: \"ecfosljpnxfari9a8m7e4yv06\",\n            uuid: fakeContainerUuid,\n            owner_uuid: \"zzzzz-tpzed-000000000000000\",\n            created_at: \"2023-02-13T15:55:47.308915000Z\",\n            modified_by_user_uuid: \"zzzzz-tpzed-000000000000000\",\n            modified_at: \"2023-02-15T19:12:45.987086000Z\",\n            command: [\n                \"arvados-cwl-runner\",\n                \"--api=containers\",\n                \"--local\",\n                \"--project-uuid=zzzzz-j7d0g-yr18k784zplfeza\",\n                \"/var/lib/cwl/workflow.json#main\",\n                \"/var/lib/cwl/cwl.input.json\",\n            ],\n            container_image: \"4ad7d11381df349e464694762db14e04+303\",\n            cwd: \"/var/spool/cwl\",\n            environment: {},\n            exit_code: null,\n            finished_at: null,\n            locked_by_uuid: null,\n            log: null,\n            mounts: {\n                '/var/lib/cwl/workflow.json': {\n                    kind: 'json',\n                    content: {},\n                },\n                '/var/spool/cwl': {\n                    kind: 'tmp',\n                    capacity: 1000000,\n                },\n            },\n            output: null,\n            output_path: \"/var/spool/cwl\",\n            progress: null,\n            runtime_constraints: {\n                API: true,\n                cuda: {\n                    device_count: 0,\n                    driver_version: \"\",\n                    hardware_capability: \"\",\n                },\n                keep_cache_disk: 2147483648,\n                keep_cache_ram: 0,\n                ram: 1342177280,\n                vcpus: 1,\n            },\n            runtime_status: {},\n            started_at: null,\n            auth_uuid: null,\n            scheduling_parameters: {\n                max_run_time: 0,\n                partitions: [],\n                preemptible: false,\n            },\n            runtime_user_uuid: \"zzzzz-tpzed-vllbpebicy84rd5\",\n            runtime_auth_scopes: [\"all\"],\n            lock_count: 2,\n            gateway_address: null,\n            interactive_session_started: false,\n            output_storage_classes: [\"default\"],\n            output_properties: {},\n            cost: 0.0,\n            subrequests_cost: 0.0,\n        });\n\n        it(\"shows cancel button when appropriate\", function () {\n            // Ignore collection requests\n            cy.intercept(\n                { method: \"GET\", url: `**/arvados/v1/collections/*` },\n                {\n                    statusCode: 200,\n                    body: {},\n                }\n            );\n\n            // Uncommitted container\n            const crUncommitted = `Test process ${Math.floor(Math.random() * 999999)}`;\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: crUncommitted, state: \"Uncommitted\" },\n            ).then(function (containerRequest) {\n                cy.loginAs(activeUser);\n                // Navigate to process and verify run / cancel button\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.waitForDom();\n                cy.get(\"[data-cy=process-details-card]\").should(\"contain\", crUncommitted);\n                cy.get(\"[data-cy=process-run-button]\").should(\"exist\");\n                cy.get(\"[data-cy=process-cancel-button]\").should(\"not.exist\");\n            });\n\n            // Queued container\n            const crQueued = `Test process ${Math.floor(Math.random() * 999999)}`;\n            const fakeCtrUuid = \"zzzzz-dz642-000000000000001\";\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: crQueued, state: \"Committed\" },\n            ).then(function (containerRequest) {\n                // Fake container uuid\n                cy.intercept({ method: \"GET\", url: `**/arvados/v1/groups/contents?*${containerRequest.uuid}*` }, req => {\n                    req.on('response', res => {\n                        if (!res.body.items) {\n                            return;\n                        }\n                        res.body.items.forEach(item => {\n                            item.container_uuid = fakeCtrUuid;\n                            item.priority = 500;\n                            item.state = \"Committed\";\n                        });\n                        if (!res.body.included) {\n                            return;\n                        }\n                        const container = getFakeContainer(fakeCtrUuid);\n                        res.body.included = [{ ...container, state: \"Queued\", priority: 500 }];\n                    });\n                });\n\n                // Navigate to process and verify cancel button\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.waitForDom();\n                cy.get(\"[data-cy=process-details-card]\").should(\"contain\", crQueued);\n                cy.get(\"[data-cy=process-cancel-button]\").contains(\"Cancel\");\n            });\n\n            // Locked container\n            const crLocked = `Test process ${Math.floor(Math.random() * 999999)}`;\n            const fakeCtrLockedUuid = \"zzzzz-dz642-000000000000002\";\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: crLocked, state: \"Committed\" },\n            ).then(function (containerRequest) {\n                // Fake container uuid\n                cy.intercept({ method: \"GET\", url: `**/arvados/v1/groups/contents?*${containerRequest.uuid}*` }, req => {\n                    req.on('response', res => {\n                        if (!res.body.items) {\n                            return;\n                        }\n                        res.body.items.forEach(item => {\n                            item.container_uuid = fakeCtrLockedUuid;\n                            item.priority = 500;\n                            item.state = \"Committed\";\n                        });\n                        if (!res.body.included) {\n                            return;\n                        }\n                        const container = getFakeContainer(fakeCtrLockedUuid);\n                        res.body.included = [{ ...container, state: \"Locked\", priority: 500 }];\n                    });\n                });\n\n                // Navigate to process and verify cancel button\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.waitForDom();\n                cy.get(\"[data-cy=process-details-card]\").should(\"contain\", crLocked);\n                cy.get(\"[data-cy=process-cancel-button]\").contains(\"Cancel\");\n            });\n\n            // On Hold container\n            const crOnHold = `Test process ${Math.floor(Math.random() * 999999)}`;\n            const fakeCtrOnHoldUuid = \"zzzzz-dz642-000000000000003\";\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: crOnHold, state: \"Committed\" },\n            ).then(function (containerRequest) {\n                // Fake container uuid\n                cy.intercept({ method: \"GET\", url: `**/arvados/v1/groups/contents?*${containerRequest.uuid}*` }, req => {\n                    req.on('response', res => {\n                        if (!res.body.items) {\n                            return;\n                        }\n                        res.body.items.forEach(item => {\n                            item.container_uuid = fakeCtrOnHoldUuid;\n                            item.priority = 0;\n                            item.state = \"Committed\";\n                        });\n                        if (!res.body.included) {\n                            return;\n                        }\n                        const container = getFakeContainer(fakeCtrOnHoldUuid);\n                        res.body.included = [{ ...container, state: \"Queued\", priority: 0 }];\n                    });\n                });\n\n                // Navigate to process and verify cancel button\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.waitForDom();\n                cy.get(\"[data-cy=process-details-card]\").should(\"contain\", crOnHold);\n                cy.get(\"[data-cy=process-run-button]\").should(\"exist\");\n                cy.get(\"[data-cy=process-cancel-button]\").should(\"not.exist\");\n            });\n        });\n\n        it(\"shows service button and opens URL\", () => {\n            const crName = `test_container_request ${Math.floor(Math.random() * 999999)}`;\n\n            // Fake service\n            const services = {\n                \"80\": {\n                access: \"public\",\n                label: \"My Service\",\n                initial_url: \"http://example.com/\",\n                initial_path: \"\",\n            }};\n\n            // Fake container state and services\n            cy.intercept({ method: \"GET\", url: `**/arvados/v1/groups/contents?*` }, req => {\n                req.on('response', res => {\n                    if (!res.body.included || res.body.included.length === 0) {\n                        return;\n                    }\n                    res.body.included[0].state = ContainerState.RUNNING;\n                    res.body.included[0].published_ports = services;\n                });\n            });\n\n            // Create process\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: crName, state: \"Committed\" },\n            ).then(function (containerRequest) {\n                cy.loginAs(activeUser);\n\n                // Navigate to process and verify name\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.get(\"[data-cy=process-details-card]\").should('exist', { timeout: 10000 }).and(\"contain\", containerRequest.name);\n\n                // Stub the global window.open after last cy.visit to avoid being cleared\n                cy.window().then((win) => {\n                    cy.stub(win, 'open').as('open');\n                });\n\n                // Click service button\n                cy.get('[data-cy=service-button]')\n                    .should('have.length', 1)\n                    .should('have.text', `Connect to ${services['80'].label}`)\n                    .click();\n                // Verify correct URL opened\n                cy.get('@open').should(\"have.been.calledWith\", services['80'].initial_url);\n            });\n        });\n    });\n\n    describe(\"Logs panel\", function () {\n        it(\"shows live process logs\", function () {\n            // Fake container uuid\n            cy.intercept({ method: \"GET\", url: `**/arvados/v1/groups/contents?*` }, req => {\n                req.on('response', res => {\n                    if (!res.body.included || res.body.included.length === 0) {\n                        return;\n                    }\n                    res.body.included[0].state = ContainerState.RUNNING;\n                });\n            });\n\n            const crName = \"test_container_request\";\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: crName, state: \"Committed\" },\n            ).then(function (containerRequest) {\n                // Create empty log file before loading process page\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"stdout.txt\", [\"\"]);\n\n                cy.loginAs(activeUser);\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.get(\"[data-cy=process-details-card]\").should(\"contain\", crName);\n                cy.doMPVTabSelect(\"Logs\");\n                cy.get(\"[data-cy=process-logs]\").should(\"contain\", \"No logs yet\").and(\"not.contain\", \"hello world\");\n\n                // Append a log line\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"stdout.txt\", [\"2023-07-18T20:14:48.128642814Z hello world\"]).then(() => {\n                    cy.doMPVTabSelect(\"Logs\");\n                    cy.get(\"[data-cy=process-logs]\", { timeout: 7000 }).should(\"not.contain\", \"No logs yet\").and(\"contain\", \"hello world\");\n                });\n\n                // Append new log line to different file\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"stderr.txt\", [\"2023-07-18T20:14:49.128642814Z hello new line\"]).then(() => {\n                    cy.doMPVTabSelect(\"Logs\");\n                    cy.get(\"[data-cy=process-logs]\", { timeout: 7000 }).should(\"not.contain\", \"No logs yet\").and(\"contain\", \"hello new line\");\n                });\n            });\n        });\n\n        it(\"filters process logs by event type\", function () {\n            const nodeInfoLogs = [\n                \"Host Information\",\n                \"Linux compute-99cb150b26149780de44b929577e1aed-19rgca8vobuvc4p 5.4.0-1059-azure #62~18.04.1-Ubuntu SMP Tue Sep 14 17:53:18 UTC 2021 x86_64 x86_64 x86_64 GNU/Linux\",\n                \"CPU Information\",\n                \"processor  : 0\",\n                \"vendor_id  : GenuineIntel\",\n                \"cpu family : 6\",\n                \"model      : 79\",\n                \"model name : Intel(R) Xeon(R) CPU E5-2673 v4 @ 2.30GHz\",\n            ];\n            const crunchRunLogs = [\n                \"2022-03-22T13:56:22.542417997Z using local keepstore process (pid 3733) at http://localhost:46837, writing logs to keepstore.txt in log collection\",\n                \"2022-03-22T13:56:26.237571754Z crunch-run 2.4.0~dev20220321141729 (go1.17.1) started\",\n                \"2022-03-22T13:56:26.244704134Z crunch-run process has uid=0(root) gid=0(root) groups=0(root)\",\n                \"2022-03-22T13:56:26.244862836Z Executing container 'zzzzz-dz642-1wokwvcct9s9du3' using docker runtime\",\n                \"2022-03-22T13:56:26.245037738Z Executing on host 'compute-99cb150b26149780de44b929577e1aed-19rgca8vobuvc4p'\",\n            ];\n            const stdoutLogs = [\n                \"2022-03-22T13:56:22.542417987Z Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec dui nisi, hendrerit porta sapien a, pretium dignissim purus.\",\n                \"2022-03-22T13:56:22.542417997Z Integer viverra, mauris finibus aliquet ultricies, dui mauris cursus justo, ut venenatis nibh ex eget neque.\",\n                \"2022-03-22T13:56:22.542418007Z In hac habitasse platea dictumst.\",\n                \"2022-03-22T13:56:22.542418027Z Fusce fringilla turpis id accumsan faucibus. Donec congue congue ex non posuere. In semper mi quis tristique rhoncus.\",\n                \"2022-03-22T13:56:22.542418037Z Interdum et malesuada fames ac ante ipsum primis in faucibus.\",\n                \"2022-03-22T13:56:22.542418047Z Quisque fermentum tortor ex, ut suscipit velit feugiat faucibus.\",\n                \"2022-03-22T13:56:22.542418057Z Donec vitae porta risus, at luctus nulla. Mauris gravida iaculis ipsum, id sagittis tortor egestas ac.\",\n                \"2022-03-22T13:56:22.542418067Z Maecenas condimentum volutpat nulla. Integer lacinia maximus risus eu posuere.\",\n                \"2022-03-22T13:56:22.542418077Z Donec vitae leo id augue gravida bibendum.\",\n                \"2022-03-22T13:56:22.542418087Z Nam libero libero, pretium ac faucibus elementum, mattis nec ex.\",\n                \"2022-03-22T13:56:22.542418097Z Nullam id laoreet nibh. Vivamus tellus metus, pretium quis justo ut, bibendum varius metus. Pellentesque vitae accumsan lorem, quis tincidunt augue.\",\n                \"2022-03-22T13:56:22.542418107Z Aliquam viverra nisi nulla, et efficitur dolor mattis in.\",\n                \"2022-03-22T13:56:22.542418117Z Sed at enim sit amet nulla tincidunt mattis. Aenean eget aliquet ex, non ultrices ex. Nulla ex tortor, vestibulum aliquam tempor ac, aliquam vel est.\",\n                \"2022-03-22T13:56:22.542418127Z Fusce auctor faucibus libero id venenatis. Etiam sodales, odio eu cursus efficitur, quam sem blandit ex, quis porttitor enim dui quis lectus. In id tincidunt felis.\",\n                \"2022-03-22T13:56:22.542418137Z Phasellus non ex quis arcu tempus faucibus molestie in sapien.\",\n                \"2022-03-22T13:56:22.542418147Z Duis tristique semper dolor, vitae pulvinar risus.\",\n                \"2022-03-22T13:56:22.542418157Z Aliquam tortor elit, luctus nec tortor eget, porta tristique nulla.\",\n                \"2022-03-22T13:56:22.542418167Z Nulla eget mollis ipsum.\",\n            ];\n\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: \"test_container_request\", state: \"Committed\" },\n            ).then(function (containerRequest) {\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"node-info.txt\", nodeInfoLogs).as(\"nodeInfoLogs\");\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"crunch-run.txt\", crunchRunLogs).as(\"crunchRunLogs\");\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"stdout.txt\", stdoutLogs).as(\"stdoutLogs\");\n\n                cy.getAll(\"@stdoutLogs\", \"@nodeInfoLogs\", \"@crunchRunLogs\").then(function () {\n                    cy.loginAs(activeUser);\n                    cy.goToPath(`/processes/${containerRequest.uuid}`);\n                    cy.waitForDom();\n                    cy.doMPVTabSelect(\"Logs\");\n                    // Should show main logs by default\n                    cy.get(\"[data-cy=process-logs-filter]\", { timeout: 7000 }).should(\"contain\", \"Main logs\");\n                    cy.get(\"[data-cy=process-logs]\")\n                        .should(\"contain\", stdoutLogs[Math.floor(Math.random() * stdoutLogs.length)])\n                        .and(\"not.contain\", nodeInfoLogs[Math.floor(Math.random() * nodeInfoLogs.length)])\n                        .and(\"contain\", crunchRunLogs[Math.floor(Math.random() * crunchRunLogs.length)]);\n                    // Select 'All logs'\n                    cy.get(\"[data-cy=process-logs-filter]\").click();\n                    cy.get(\"body\").contains(\"li\", \"All logs\").click();\n                    cy.get(\"[data-cy=process-logs]\")\n                        .should(\"contain\", stdoutLogs[Math.floor(Math.random() * stdoutLogs.length)])\n                        .and(\"contain\", nodeInfoLogs[Math.floor(Math.random() * nodeInfoLogs.length)])\n                        .and(\"contain\", crunchRunLogs[Math.floor(Math.random() * crunchRunLogs.length)]);\n                    // Select 'node-info' logs\n                    cy.get(\"[data-cy=process-logs-filter]\").click();\n                    cy.get(\"body\").contains(\"li\", \"node-info\").click();\n                    cy.get(\"[data-cy=process-logs]\")\n                        .should(\"not.contain\", stdoutLogs[Math.floor(Math.random() * stdoutLogs.length)])\n                        .and(\"contain\", nodeInfoLogs[Math.floor(Math.random() * nodeInfoLogs.length)])\n                        .and(\"not.contain\", crunchRunLogs[Math.floor(Math.random() * crunchRunLogs.length)]);\n                    // Select 'stdout' logs\n                    cy.get(\"[data-cy=process-logs-filter]\").click();\n                    cy.get(\"body\").contains(\"li\", \"stdout\").click();\n                    cy.get(\"[data-cy=process-logs]\")\n                        .should(\"contain\", stdoutLogs[Math.floor(Math.random() * stdoutLogs.length)])\n                        .and(\"not.contain\", nodeInfoLogs[Math.floor(Math.random() * nodeInfoLogs.length)])\n                        .and(\"not.contain\", crunchRunLogs[Math.floor(Math.random() * crunchRunLogs.length)]);\n                });\n            });\n        });\n\n        it(\"sorts combined logs\", function () {\n            const crName = \"test_container_request\";\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: crName, state: \"Committed\" },\n            ).then(function (containerRequest) {\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"node-info.txt\", [\n                    \"3: nodeinfo 1\",\n                    \"2: nodeinfo 2\",\n                    \"1: nodeinfo 3\",\n                    \"2: nodeinfo 4\",\n                    \"3: nodeinfo 5\",\n                ]).as(\"node-info\");\n\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"stdout.txt\", [\n                    \"2023-07-18T20:14:48.128642814Z first\",\n                    \"2023-07-18T20:14:49.128642814Z third\",\n                ]).as(\"stdout\");\n\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"stderr.txt\", [\"2023-07-18T20:14:48.528642814Z second\"]).as(\"stderr\");\n\n                cy.loginAs(activeUser);\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.get(\"[data-cy=process-details-card]\").should(\"contain\", crName);\n                cy.doMPVTabSelect(\"Logs\");\n\n                cy.getAll(\"@node-info\", \"@stdout\", \"@stderr\").then(() => {\n                    // Verify sorted main logs\n                    cy.get(\"[data-cy=process-logs] span > p\", { timeout: 7000 }).eq(0).should(\"contain\", \"2023-07-18T20:14:48.128642814Z first\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(1).should(\"contain\", \"2023-07-18T20:14:48.528642814Z second\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(2).should(\"contain\", \"2023-07-18T20:14:49.128642814Z third\");\n\n                    // Switch to All logs\n                    cy.get(\"[data-cy=process-logs-filter]\").click();\n                    cy.get(\"body\").contains(\"li\", \"All logs\").click();\n                    // Verify non-sorted lines were preserved\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(0).should(\"contain\", \"3: nodeinfo 1\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(1).should(\"contain\", \"2: nodeinfo 2\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(2).should(\"contain\", \"1: nodeinfo 3\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(3).should(\"contain\", \"2: nodeinfo 4\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(4).should(\"contain\", \"3: nodeinfo 5\");\n                    // Verify sorted logs\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(5).should(\"contain\", \"2023-07-18T20:14:48.128642814Z first\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(6).should(\"contain\", \"2023-07-18T20:14:48.528642814Z second\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(7).should(\"contain\", \"2023-07-18T20:14:49.128642814Z third\");\n                });\n            });\n        });\n\n        it(\"preserves original ordering of lines within the same log type\", function () {\n            const crName = \"test_container_request\";\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: crName, state: \"Committed\" },\n            ).then(function (containerRequest) {\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"stdout.txt\", [\n                    // Should come first\n                    \"2023-07-18T20:14:46.000000000Z A out 1\",\n                    // Comes fourth in a contiguous block\n                    \"2023-07-18T20:14:48.128642814Z A out 2\",\n                    \"2023-07-18T20:14:48.128642814Z X out 3\",\n                    \"2023-07-18T20:14:48.128642814Z A out 4\",\n                ]).as(\"stdout\");\n\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"stderr.txt\", [\n                    // Comes second\n                    \"2023-07-18T20:14:47.000000000Z Z err 1\",\n                    // Comes third in a contiguous block\n                    \"2023-07-18T20:14:48.128642814Z B err 2\",\n                    \"2023-07-18T20:14:48.128642814Z C err 3\",\n                    \"2023-07-18T20:14:48.128642814Z Y err 4\",\n                    \"2023-07-18T20:14:48.128642814Z Z err 5\",\n                    \"2023-07-18T20:14:48.128642814Z A err 6\",\n                ]).as(\"stderr\");\n\n                cy.loginAs(activeUser);\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.get(\"[data-cy=process-details-card]\").should(\"contain\", crName);\n                cy.doMPVTabSelect(\"Logs\");\n\n                cy.getAll(\"@stdout\", \"@stderr\").then(() => {\n                    // Switch to All logs\n                    cy.get(\"[data-cy=process-logs-filter]\").click();\n                    cy.get(\"body\").contains(\"li\", \"All logs\").click();\n                    // Verify sorted logs\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(0).should(\"contain\", \"2023-07-18T20:14:46.000000000Z A out 1\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(1).should(\"contain\", \"2023-07-18T20:14:47.000000000Z Z err 1\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(2).should(\"contain\", \"2023-07-18T20:14:48.128642814Z B err 2\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(3).should(\"contain\", \"2023-07-18T20:14:48.128642814Z C err 3\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(4).should(\"contain\", \"2023-07-18T20:14:48.128642814Z Y err 4\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(5).should(\"contain\", \"2023-07-18T20:14:48.128642814Z Z err 5\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(6).should(\"contain\", \"2023-07-18T20:14:48.128642814Z A err 6\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(7).should(\"contain\", \"2023-07-18T20:14:48.128642814Z A out 2\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(8).should(\"contain\", \"2023-07-18T20:14:48.128642814Z X out 3\");\n                    cy.get(\"[data-cy=process-logs] span > p\").eq(9).should(\"contain\", \"2023-07-18T20:14:48.128642814Z A out 4\");\n                });\n            });\n        });\n\n        it(\"correctly generates sniplines\", function () {\n            const SNIPLINE = `================ ✀ ================ ✀ ========= Some log(s) were skipped ========= ✀ ================ ✀ ================`;\n            const crName = \"test_container_request\";\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: crName, state: \"Committed\" },\n            ).then(function (containerRequest) {\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"stdout.txt\", [\n                    \"X\".repeat(63999) + \"_\" + \"O\".repeat(100) + \"_\" + \"X\".repeat(63999),\n                ]).as(\"stdout\");\n\n                cy.loginAs(activeUser);\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.get(\"[data-cy=process-details-card]\").should(\"contain\", crName);\n                cy.doMPVTabSelect(\"Logs\");\n\n                // Switch to stdout since lines are unsortable (no timestamp)\n                cy.get(\"[data-cy=process-logs-filter]\").click();\n                cy.get(\"body\").contains(\"li\", \"stdout\").click();\n\n                cy.getAll(\"@stdout\").then(() => {\n                    // Verify first 64KB and snipline\n                    cy.get(\"[data-cy=process-logs] span > p\", { timeout: 7000 })\n                        .eq(0)\n                        .should(\"contain\", \"X\".repeat(63999) + \"_\\n\" + SNIPLINE);\n                    // Verify last 64KB\n                    cy.get(\"[data-cy=process-logs] span > p\")\n                        .eq(1)\n                        .should(\"contain\", \"_\" + \"X\".repeat(63999));\n                    // Verify none of the Os got through\n                    cy.get(\"[data-cy=process-logs] span > p\").should(\"not.contain\", \"O\");\n                });\n            });\n        });\n\n        it(\"correctly break long lines when no obvious line separation exists\", function () {\n            function randomString(length) {\n                const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';\n                let res = '';\n                for (let i = 0; i < length; i++) {\n                    res += chars.charAt(Math.floor(Math.random() * chars.length));\n                }\n                return res;\n            }\n\n            const logLinesQty = 10;\n            const logLines = [];\n            for (let i = 0; i < logLinesQty; i++) {\n                const length = Math.floor(Math.random() * 500) + 500;\n                logLines.push(randomString(length));\n            }\n\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: \"test_container_request\", state: \"Committed\" },\n            ).then(function (containerRequest) {\n                cy.appendLog(adminUser.token, containerRequest.uuid, \"stdout.txt\", logLines).as(\"stdoutLogs\");\n\n                cy.getAll(\"@stdoutLogs\").then(function () {\n                    cy.loginAs(activeUser);\n                    cy.goToPath(`/processes/${containerRequest.uuid}`);\n                    cy.doMPVTabSelect(\"Logs\");\n                    // Select 'stdout' log filter\n                    cy.get(\"[data-cy=process-logs-filter]\").click();\n                    cy.get(\"body\").contains(\"li\", \"stdout\").click();\n                    cy.get(\"[data-cy=process-logs] span > p\")\n                        .should('have.length', logLinesQty)\n                        .each($p => {\n                            expect($p.text().length).to.be.greaterThan(499);\n\n                            // This looks like an ugly hack, but I was not able\n                            // to get [client|scroll]Width attributes through\n                            // the usual Cypress methods.\n                            const parentClientWidth = $p[0].parentElement.clientWidth;\n                            const parentScrollWidth = $p[0].parentElement.scrollWidth\n                            // Scrollbar should not be visible\n                            expect(parentClientWidth).to.be.eq(parentScrollWidth);\n                        });\n                });\n            });\n        });\n    });\n\n    describe(\"I/O panel\", function () {\n        const testInputs = [\n            {\n                definition: {\n                    id: \"#main/input_file\",\n                    label: \"Label Description\",\n                    type: \"File\",\n                },\n                input: {\n                    input_file: {\n                        basename: \"input1.tar\",\n                        class: \"File\",\n                        location: \"keep:00000000000000000000000000000000+01/input1.tar\",\n                        secondaryFiles: [\n                            {\n                                basename: \"input1-2.txt\",\n                                class: \"File\",\n                                location: \"keep:00000000000000000000000000000000+01/input1-2.txt\",\n                            },\n                            {\n                                basename: \"input1-3.txt\",\n                                class: \"File\",\n                                location: \"keep:00000000000000000000000000000000+01/input1-3.txt\",\n                            },\n                            {\n                                basename: \"input1-4.txt\",\n                                class: \"File\",\n                                location: \"keep:00000000000000000000000000000000+01/input1-4.txt\",\n                            },\n                        ],\n                    },\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_dir\",\n                    doc: \"Doc Description\",\n                    type: \"Directory\",\n                },\n                input: {\n                    input_dir: {\n                        basename: \"11111111111111111111111111111111+01\",\n                        class: \"Directory\",\n                        location: \"keep:11111111111111111111111111111111+01\",\n                    },\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_bool\",\n                    doc: [\"Doc desc 1\", \"Doc desc 2\"],\n                    type: \"boolean\",\n                },\n                input: {\n                    input_bool: true,\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_int\",\n                    type: \"int\",\n                },\n                input: {\n                    input_int: 1,\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_long\",\n                    type: \"long\",\n                },\n                input: {\n                    input_long: 1,\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_float\",\n                    type: \"float\",\n                },\n                input: {\n                    input_float: 1.5,\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_double\",\n                    type: \"double\",\n                },\n                input: {\n                    input_double: 1.3,\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_string\",\n                    type: \"string\",\n                },\n                input: {\n                    input_string: \"Hello World\",\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_file_array\",\n                    type: {\n                        items: \"File\",\n                        type: \"array\",\n                    },\n                },\n                input: {\n                    input_file_array: [\n                        {\n                            basename: \"input2.tar\",\n                            class: \"File\",\n                            location: \"keep:00000000000000000000000000000000+02/input2.tar\",\n                        },\n                        {\n                            basename: \"input3.tar\",\n                            class: \"File\",\n                            location: \"keep:00000000000000000000000000000000+03/input3.tar\",\n                            secondaryFiles: [\n                                {\n                                    basename: \"input3-2.txt\",\n                                    class: \"File\",\n                                    location: \"keep:00000000000000000000000000000000+03/input3-2.txt\",\n                                },\n                            ],\n                        },\n                        {\n                            $import: \"import_path\",\n                        },\n                    ],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_dir_array\",\n                    type: {\n                        items: \"Directory\",\n                        type: \"array\",\n                    },\n                },\n                input: {\n                    input_dir_array: [\n                        {\n                            basename: \"11111111111111111111111111111111+02\",\n                            class: \"Directory\",\n                            location: \"keep:11111111111111111111111111111111+02\",\n                        },\n                        {\n                            basename: \"11111111111111111111111111111111+03\",\n                            class: \"Directory\",\n                            location: \"keep:11111111111111111111111111111111+03\",\n                        },\n                        {\n                            $import: \"import_path\",\n                        },\n                    ],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_int_array\",\n                    type: {\n                        items: \"int\",\n                        type: \"array\",\n                    },\n                },\n                input: {\n                    input_int_array: [\n                        1,\n                        3,\n                        5,\n                        {\n                            $import: \"import_path\",\n                        },\n                    ],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_long_array\",\n                    type: {\n                        items: \"long\",\n                        type: \"array\",\n                    },\n                },\n                input: {\n                    input_long_array: [\n                        10,\n                        20,\n                        {\n                            $import: \"import_path\",\n                        },\n                    ],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_float_array\",\n                    type: {\n                        items: \"float\",\n                        type: \"array\",\n                    },\n                },\n                input: {\n                    input_float_array: [\n                        10.2,\n                        10.4,\n                        10.6,\n                        {\n                            $import: \"import_path\",\n                        },\n                    ],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_double_array\",\n                    type: {\n                        items: \"double\",\n                        type: \"array\",\n                    },\n                },\n                input: {\n                    input_double_array: [\n                        20.1,\n                        20.2,\n                        20.3,\n                        {\n                            $import: \"import_path\",\n                        },\n                    ],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_string_array\",\n                    type: {\n                        items: \"string\",\n                        type: \"array\",\n                    },\n                },\n                input: {\n                    input_string_array: [\n                        \"Hello\",\n                        \"World\",\n                        \"!\",\n                        {\n                            $import: \"import_path\",\n                        },\n                    ],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_bool_include\",\n                    type: \"boolean\",\n                },\n                input: {\n                    input_bool_include: {\n                        $include: \"include_path\",\n                    },\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_int_include\",\n                    type: \"int\",\n                },\n                input: {\n                    input_int_include: {\n                        $include: \"include_path\",\n                    },\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_float_include\",\n                    type: \"float\",\n                },\n                input: {\n                    input_float_include: {\n                        $include: \"include_path\",\n                    },\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_string_include\",\n                    type: \"string\",\n                },\n                input: {\n                    input_string_include: {\n                        $include: \"include_path\",\n                    },\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_file_include\",\n                    type: \"File\",\n                },\n                input: {\n                    input_file_include: {\n                        $include: \"include_path\",\n                    },\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_directory_include\",\n                    type: \"Directory\",\n                },\n                input: {\n                    input_directory_include: {\n                        $include: \"include_path\",\n                    },\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/input_file_url\",\n                    type: \"File\",\n                },\n                input: {\n                    input_file_url: {\n                        basename: \"index.html\",\n                        class: \"File\",\n                        location: \"http://example.com/index.html\",\n                    },\n                },\n            },\n        ];\n\n        const testOutputs = [\n            {\n                definition: {\n                    id: \"#main/output_file\",\n                    label: \"Label Description\",\n                    type: \"File\",\n                },\n                output: {\n                    output_file: {\n                        basename: \"cat.png\",\n                        class: \"File\",\n                        location: \"cat.png\",\n                    },\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_file_with_secondary\",\n                    doc: \"Doc Description\",\n                    type: \"File\",\n                },\n                output: {\n                    output_file_with_secondary: {\n                        basename: \"main.dat\",\n                        class: \"File\",\n                        location: \"main.dat\",\n                        secondaryFiles: [\n                            {\n                                basename: \"secondary.dat\",\n                                class: \"File\",\n                                location: \"secondary.dat\",\n                            },\n                            {\n                                basename: \"secondary2.dat\",\n                                class: \"File\",\n                                location: \"secondary2.dat\",\n                            },\n                        ],\n                    },\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_dir\",\n                    doc: [\"Doc desc 1\", \"Doc desc 2\"],\n                    type: \"Directory\",\n                },\n                output: {\n                    output_dir: {\n                        basename: \"outdir1\",\n                        class: \"Directory\",\n                        location: \"outdir1\",\n                    },\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_bool\",\n                    type: \"boolean\",\n                },\n                output: {\n                    output_bool: true,\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_int\",\n                    type: \"int\",\n                },\n                output: {\n                    output_int: 1,\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_long\",\n                    type: \"long\",\n                },\n                output: {\n                    output_long: 1,\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_float\",\n                    type: \"float\",\n                },\n                output: {\n                    output_float: 100.5,\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_double\",\n                    type: \"double\",\n                },\n                output: {\n                    output_double: 100.3,\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_string\",\n                    type: \"string\",\n                },\n                output: {\n                    output_string: \"Hello output\",\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_file_array\",\n                    type: {\n                        items: \"File\",\n                        type: \"array\",\n                    },\n                },\n                output: {\n                    output_file_array: [\n                        {\n                            basename: \"output2.tar\",\n                            class: \"File\",\n                            location: \"output2.tar\",\n                        },\n                        {\n                            basename: \"output3.tar\",\n                            class: \"File\",\n                            location: \"output3.tar\",\n                        },\n                    ],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_dir_array\",\n                    type: {\n                        items: \"Directory\",\n                        type: \"array\",\n                    },\n                },\n                output: {\n                    output_dir_array: [\n                        {\n                            basename: \"outdir2\",\n                            class: \"Directory\",\n                            location: \"outdir2\",\n                        },\n                        {\n                            basename: \"outdir3\",\n                            class: \"Directory\",\n                            location: \"outdir3\",\n                        },\n                    ],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_int_array\",\n                    type: {\n                        items: \"int\",\n                        type: \"array\",\n                    },\n                },\n                output: {\n                    output_int_array: [10, 11, 12],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_long_array\",\n                    type: {\n                        items: \"long\",\n                        type: \"array\",\n                    },\n                },\n                output: {\n                    output_long_array: [51, 52],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_float_array\",\n                    type: {\n                        items: \"float\",\n                        type: \"array\",\n                    },\n                },\n                output: {\n                    output_float_array: [100.2, 100.4, 100.6],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_double_array\",\n                    type: {\n                        items: \"double\",\n                        type: \"array\",\n                    },\n                },\n                output: {\n                    output_double_array: [100.1, 100.2, 100.3],\n                },\n            },\n            {\n                definition: {\n                    id: \"#main/output_string_array\",\n                    type: {\n                        items: \"string\",\n                        type: \"array\",\n                    },\n                },\n                output: {\n                    output_string_array: [\"Hello\", \"Output\", \"!\"],\n                },\n            },\n        ];\n\n        const verifyIOParameter = (name, label, doc, val, collection, multipleRows) => {\n            cy.get(\"table tr\")\n                .contains(name)\n                .parents(\"tr\")\n                .within($mainRow => {\n                    cy.get($mainRow).scrollIntoView();\n                    label && cy.contains(label);\n\n                    if (multipleRows) {\n                        cy.get($mainRow).nextUntil('[data-cy=\"process-io-param\"]').as(\"secondaryRows\");\n                        if (val) {\n                            if (Array.isArray(val)) {\n                                val.forEach(v => cy.get(\"@secondaryRows\").contains(v));\n                            } else {\n                                cy.get(\"@secondaryRows\").contains(val);\n                            }\n                        }\n                        if (collection) {\n                            cy.get(\"@secondaryRows\").contains(collection);\n                        }\n                    } else {\n                        if (val) {\n                            if (Array.isArray(val)) {\n                                val.forEach(v => cy.contains(v));\n                            } else {\n                                cy.contains(val);\n                            }\n                        }\n                        if (collection) {\n                            cy.contains(collection);\n                        }\n                    }\n                });\n        };\n\n        const verifyIOParameterImage = (name, url) => {\n            cy.get(\"table tr\")\n                .contains(name)\n                .parents(\"tr\")\n                .within(() => {\n                    cy.get('[alt=\"Inline Preview\"]')\n                        .should(\"be.visible\")\n                        .and($img => {\n                            expect($img[0].naturalWidth).to.be.greaterThan(0);\n                            expect($img[0].src).contains(url);\n                        });\n                });\n        };\n\n        it(\"displays IO parameters with keep links and previews\", function () {\n            // Create output collection for real files\n            cy.createCollection(adminUser.token, {\n                name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n                owner_uuid: activeUser.user.uuid,\n            }).then(testOutputCollection => {\n                cy.loginAs(activeUser);\n\n                cy.goToPath(`/collections/${testOutputCollection.uuid}`);\n                cy.doMPVTabSelect(\"Files\");\n\n                cy.get(\"[data-cy=upload-button]\").click();\n\n                cy.fixture(\"files/cat.png\", \"base64\").then(content => {\n                    cy.get(\"[data-cy=drag-and-drop]\").upload(content, \"cat.png\");\n                    cy.get(\"[data-cy=form-submit-btn]\").click();\n                    cy.waitForDom().get(\"[data-cy=form-submit-btn]\").should(\"not.exist\");\n                    // Confirm final collection state.\n                    cy.get(\"[data-cy=collection-files-panel]\").contains(\"cat.png\").should(\"exist\");\n                });\n\n                cy.getCollection(activeUser.token, testOutputCollection.uuid).as(\"testOutputCollection\");\n            });\n\n            // Get updated collection pdh\n            cy.getAll(\"@testOutputCollection\").then(([testOutputCollection]) => {\n                // Add output uuid and inputs to container request\n                cy.intercept({ method: \"GET\", url: `**/arvados/v1/groups/contents?*` }, req => {\n                    req.on('response', res => {\n                        if (res.body.included && res.body.included.length > 0) {\n                            res.body.included[0].state = ContainerState.RUNNING;\n                        }\n                        const body = res.body.items ? res.body.items[0] : res.body;\n                        if (!body || !body.mounts) {\n                            return;\n                        }\n                        body.output_uuid = testOutputCollection.uuid;\n                        body.mounts[\"/var/lib/cwl/cwl.input.json\"] = {\n                            content: testInputs.map(param => param.input).reduce((acc, val) => Object.assign(acc, val), {}),\n                        };\n                        body.mounts[\"/var/lib/cwl/workflow.json\"] = {\n                            content: {\n                                $graph: [\n                                    {\n                                        id: \"#main\",\n                                        inputs: testInputs.map(input => input.definition),\n                                        outputs: testOutputs.map(output => output.definition),\n                                    },\n                                ],\n                            },\n                        };\n                    });\n                });\n\n                // Stub fake output collection\n                cy.intercept(\n                    { method: \"GET\", url: `**/arvados/v1/collections/${testOutputCollection.uuid}*` },\n                    {\n                        statusCode: 200,\n                        body: {\n                            uuid: testOutputCollection.uuid,\n                            portable_data_hash: testOutputCollection.portable_data_hash,\n                        },\n                    }\n                );\n\n                // Stub fake output json\n                cy.intercept(\n                    { method: \"GET\", url: \"**/c%3Dzzzzz-4zz18-zzzzzzzzzzzzzzz/cwl.output.json\" },\n                    {\n                        statusCode: 200,\n                        body: testOutputs.map(param => param.output).reduce((acc, val) => Object.assign(acc, val), {}),\n                    }\n                );\n\n                // Stub webdav response, points to output json\n                cy.intercept(\n                    { method: \"PROPFIND\", url: \"*\" },\n                    {\n                        fixture: \"webdav-propfind-outputs.xml\",\n                    }\n                );\n            });\n\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: \"test_container_request\", state: \"Committed\" },\n            ).as(\"containerRequest\");\n\n            cy.getAll(\"@containerRequest\", \"@testOutputCollection\").then(function ([containerRequest, testOutputCollection]) {\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.doMPVTabSelect(\"Inputs\");\n                cy.get(\"[data-cy=process-io-card] h6\")\n                    .contains(\"Input Parameters\")\n                    .parents(\"[data-cy=process-io-card]\")\n                    .within((ctx) => {\n                        cy.get(ctx).scrollIntoView();\n                        verifyIOParameter(\"input_file\", null, \"Label Description\", \"input1.tar\", \"00000000000000000000000000000000+01\");\n                        verifyIOParameter(\"input_file\", null, \"Label Description\", \"input1-2.txt\", undefined, true);\n                        verifyIOParameter(\"input_file\", null, \"Label Description\", \"input1-3.txt\", undefined, true);\n                        verifyIOParameter(\"input_file\", null, \"Label Description\", \"input1-4.txt\", undefined, true);\n                        verifyIOParameter(\"input_dir\", null, \"Doc Description\", \"/\", \"11111111111111111111111111111111+01\");\n                        verifyIOParameter(\"input_bool\", null, \"Doc desc 1, Doc desc 2\", \"true\");\n                        verifyIOParameter(\"input_int\", null, null, \"1\");\n                        verifyIOParameter(\"input_long\", null, null, \"1\");\n                        verifyIOParameter(\"input_float\", null, null, \"1.5\");\n                        verifyIOParameter(\"input_double\", null, null, \"1.3\");\n                        verifyIOParameter(\"input_string\", null, null, \"Hello World\");\n                        verifyIOParameter(\"input_file_array\", null, null, \"input2.tar\", \"00000000000000000000000000000000+02\");\n                        verifyIOParameter(\"input_file_array\", null, null, \"input3.tar\", undefined, true);\n                        verifyIOParameter(\"input_file_array\", null, null, \"input3-2.txt\", undefined, true);\n                        verifyIOParameter(\"input_file_array\", null, null, \"Cannot display value\", undefined, true);\n                        verifyIOParameter(\"input_dir_array\", null, null, \"/\", \"11111111111111111111111111111111+02\");\n                        verifyIOParameter(\"input_dir_array\", null, null, \"/\", \"11111111111111111111111111111111+03\", true);\n                        verifyIOParameter(\"input_dir_array\", null, null, \"Cannot display value\", undefined, true);\n                        verifyIOParameter(\"input_int_array\", null, null, [\"1\", \"3\", \"5\", \"Cannot display value\"]);\n                        verifyIOParameter(\"input_long_array\", null, null, [\"10\", \"20\", \"Cannot display value\"]);\n                        verifyIOParameter(\"input_float_array\", null, null, [\"10.2\", \"10.4\", \"10.6\", \"Cannot display value\"]);\n                        verifyIOParameter(\"input_double_array\", null, null, [\"20.1\", \"20.2\", \"20.3\", \"Cannot display value\"]);\n                        verifyIOParameter(\"input_string_array\", null, null, [\"Hello\", \"World\", \"!\", \"Cannot display value\"]);\n                        verifyIOParameter(\"input_bool_include\", null, null, \"Cannot display value\");\n                        verifyIOParameter(\"input_int_include\", null, null, \"Cannot display value\");\n                        verifyIOParameter(\"input_float_include\", null, null, \"Cannot display value\");\n                        verifyIOParameter(\"input_string_include\", null, null, \"Cannot display value\");\n                        verifyIOParameter(\"input_file_include\", null, null, \"Cannot display value\");\n                        verifyIOParameter(\"input_directory_include\", null, null, \"Cannot display value\");\n                        verifyIOParameter(\"input_file_url\", null, null, \"http://example.com/index.html\");\n                    });\n\n                cy.get('button').contains('Outputs').click();\n                cy.get(\"[data-cy=process-io-card] h6\")\n                    .contains(\"Output Parameters\")\n                    .parents(\"[data-cy=process-io-card]\")\n                    .within(ctx => {\n                        cy.get(ctx).scrollIntoView();\n                        const outPdh = testOutputCollection.portable_data_hash;\n\n                        verifyIOParameter(\"output_file\", null, \"Label Description\", \"cat.png\", `${outPdh}`);\n                        // Disabled until image preview returns\n                        // verifyIOParameterImage(\"output_file\", `/c=${outPdh}/cat.png`);\n                        verifyIOParameter(\"output_file_with_secondary\", null, \"Doc Description\", \"main.dat\", `${outPdh}`);\n                        verifyIOParameter(\"output_file_with_secondary\", null, \"Doc Description\", \"secondary.dat\", undefined, true);\n                        verifyIOParameter(\"output_file_with_secondary\", null, \"Doc Description\", \"secondary2.dat\", undefined, true);\n                        verifyIOParameter(\"output_dir\", null, \"Doc desc 1, Doc desc 2\", \"outdir1\", `${outPdh}`);\n                        verifyIOParameter(\"output_bool\", null, null, \"true\");\n                        verifyIOParameter(\"output_int\", null, null, \"1\");\n                        verifyIOParameter(\"output_long\", null, null, \"1\");\n                        verifyIOParameter(\"output_float\", null, null, \"100.5\");\n                        verifyIOParameter(\"output_double\", null, null, \"100.3\");\n                        verifyIOParameter(\"output_string\", null, null, \"Hello output\");\n                        verifyIOParameter(\"output_file_array\", null, null, \"output2.tar\", `${outPdh}`);\n                        verifyIOParameter(\"output_file_array\", null, null, \"output3.tar\", undefined, true);\n                        verifyIOParameter(\"output_dir_array\", null, null, \"outdir2\", `${outPdh}`);\n                        verifyIOParameter(\"output_dir_array\", null, null, \"outdir3\", undefined, true);\n                        verifyIOParameter(\"output_int_array\", null, null, [\"10\", \"11\", \"12\"]);\n                        verifyIOParameter(\"output_long_array\", null, null, [\"51\", \"52\"]);\n                        verifyIOParameter(\"output_float_array\", null, null, [\"100.2\", \"100.4\", \"100.6\"]);\n                        verifyIOParameter(\"output_double_array\", null, null, [\"100.1\", \"100.2\", \"100.3\"]);\n                        verifyIOParameter(\"output_string_array\", null, null, [\"Hello\", \"Output\", \"!\"]);\n                    });\n            });\n        });\n\n        it(\"displays IO parameters with no value\", function () {\n            const fakeOutputUUID = \"zzzzz-4zz18-abcdefghijklmno\";\n            const fakeOutputPDH = \"11111111111111111111111111111111+99/\";\n\n            cy.loginAs(activeUser);\n\n            // Add output uuid and inputs to container request\n            cy.intercept({ method: \"GET\", url: `**/arvados/v1/groups/contents?*` }, req => {\n                req.on('response', res => {\n                    const body = res.body.items ? res.body.items[0] : res.body;\n                    if (!body || !body.mounts) {\n                        return;\n                    }\n                    body.output_uuid = fakeOutputUUID;\n                    body.mounts[\"/var/lib/cwl/cwl.input.json\"] = {\n                        content: {},\n                    };\n                    body.mounts[\"/var/lib/cwl/workflow.json\"] = {\n                        content: {\n                            $graph: [\n                                {\n                                    id: \"#main\",\n                                    inputs: testInputs.map(input => input.definition),\n                                    outputs: testOutputs.map(output => output.definition),\n                                },\n                            ],\n                        },\n                    };\n                });\n            });\n\n            // Stub fake output collection\n            cy.intercept(\n                { method: \"GET\", url: `**/arvados/v1/collections/${fakeOutputUUID}*` },\n                {\n                    statusCode: 200,\n                    body: {\n                        uuid: fakeOutputUUID,\n                        portable_data_hash: fakeOutputPDH,\n                    },\n                }\n            );\n\n            // Stub fake output json\n            cy.intercept(\n                { method: \"GET\", url: `**/c%3D${fakeOutputUUID}/cwl.output.json` },\n                {\n                    statusCode: 200,\n                    body: {},\n                }\n            );\n\n            cy.readFile(\"cypress/fixtures/webdav-propfind-outputs.xml\").then(data => {\n                // Stub webdav response, points to output json\n                cy.intercept(\n                    { method: \"PROPFIND\", url: \"*\" },\n                    {\n                        statusCode: 200,\n                        body: data.replace(/zzzzz-4zz18-zzzzzzzzzzzzzzz/g, fakeOutputUUID),\n                    }\n                );\n            });\n\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                { name: \"test_container_request\", state: \"Committed\" },\n            ).as(\"containerRequest\");\n\n            cy.getAll(\"@containerRequest\").then(function ([containerRequest]) {\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.waitForDom();\n\n                cy.doMPVTabSelect(\"Inputs\");\n                cy.get(\"[data-cy=process-io-card] h6\")\n                    .contains(\"Input Parameters\")\n                    .parents(\"[data-cy=process-io-card]\")\n                    .within((ctx) => {\n                        cy.get(ctx).scrollIntoView();\n                        cy.wait(2000);\n                        cy.waitForDom();\n\n                        testInputs.map((input) => {\n                            verifyIOParameter(input.definition.id.split('/').slice(-1)[0], null, null, \"No value\");\n                        });\n                    });\n                cy.get('button').contains('Outputs').click();\n                cy.get(\"[data-cy=process-io-card] h6\")\n                    .contains(\"Output Parameters\")\n                    .parents(\"[data-cy=process-io-card]\")\n                    .within((ctx) => {\n                        cy.get(ctx).scrollIntoView();\n\n                        testOutputs.map((output) => {\n                            verifyIOParameter(output.definition.id.split('/').slice(-1)[0], null, null, \"No value\");\n                        });\n                    });\n            });\n        });\n    });\n\n    describe(\"Process operations\", function () {\n        it(\"navigates to parent project when deleting current process\", function () {\n            // Process in home project\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                {\n                    name: `test_container_request ${Math.floor(Math.random() * 999999)}`,\n                    state: \"Committed\",\n                },\n            ).then(function (containerRequest) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/processes/${containerRequest.uuid}`);\n                cy.get(\"[data-cy=process-details-card]\").should(\"contain\", containerRequest.name);\n\n                // Favorite process\n                cy.get(\"[data-cy=process-details-card]\").find('[data-targetid=\"Add to favorites\"]').click();\n                cy.waitForDom();\n\n                // Verify process in favorites\n                cy.get(`[data-cy=tree-item-toggle-my-favorites]`).click();\n                cy.get('[data-cy=tree-item-toggle-my-favorites]').parents('[data-cy=tree-top-level-item]').should('contain', containerRequest.name);\n\n                // Delete process\n                cy.get(\"[data-cy=process-details-card]\").find('[data-targetid=\"Remove\"]').click();\n                cy.get(\"[data-cy=confirmation-dialog]\").within(() => {\n                    cy.get(\"[data-cy=confirmation-dialog-ok-btn]\").click();\n                });\n\n                // Verify we are in home project\n                cy.get(\"[data-cy=project-panel]\").should('exist');\n                cy.url().should(\"contain\", `/projects/${activeUser.user.uuid}`);\n                // Verify project absent from side panel\n                cy.get('[data-cy=side-panel-tree]').should(\"not.contain\", containerRequest.name);\n                cy.get('[data-cy=tree-item-toggle-my-favorites]').parents('[data-cy=tree-top-level-item]').should('not.contain', containerRequest.name);\n            });\n\n            // Process in subproject\n            cy.createProject({\n                owningUser: activeUser,\n                projectName: 'myProject1',\n                addToFavorites: false\n            }).then(function (subproject) {\n                cy.createDefaultContainerRequest(\n                    activeUser.token,\n                    dockerImage,\n                    {\n                        name: `test_container_request ${Math.floor(Math.random() * 999999)}`,\n                        state: \"Committed\",\n                        owner_uuid: subproject.uuid,\n                    },\n                ).then(function (containerRequest) {\n                    cy.loginAs(activeUser);\n                    cy.doSidePanelNavigation('Home Projects');\n                    cy.doMPVTabSelect(\"Data\");\n                    cy.waitForDom();\n                    // Navigate to process through subproject\n                    cy.doDataExplorerNavigate(subproject.name);\n                    cy.doMPVTabSelect(\"Workflow Runs\");\n                    cy.get('[data-cy=data-table-row]').contains(containerRequest.name).should('exist', {timeout: 1000}).click();\n                    cy.waitForDom();\n                    cy.url().should(\"contain\", `/processes/${containerRequest.uuid}`);\n\n                    // Delete process\n                    cy.get(\"[data-cy=process-details-card]\").find('[data-targetid=\"Remove\"]').click();\n                    cy.get(\"[data-cy=confirmation-dialog]\").within(() => {\n                        cy.get(\"[data-cy=confirmation-dialog-ok-btn]\").click();\n                    });\n\n                    // Verify we are in subproject\n                    cy.doMPVTabSelect(\"Data\");\n                    cy.get(\"[data-cy=project-panel]\").should('exist');\n                    cy.url().should(\"contain\", `/projects/${subproject.uuid}`);\n                });\n            });\n        });\n\n        it(\"refreshes project runs tab when deleting process\", function () {\n            cy.createDefaultContainerRequest(\n                activeUser.token,\n                dockerImage,\n                {\n                    name: `test_container_request ${Math.floor(Math.random() * 999999)}`,\n                    state: \"Committed\",\n                },\n            ).as('firstCr')\n            .then(function () {\n                cy.createDefaultContainerRequest(\n                    activeUser.token,\n                    dockerImage,\n                    {\n                        name: `test_container_request ${Math.floor(Math.random() * 999999)}`,\n                        state: \"Committed\",\n                    },\n                ).as('secondCr');\n            });\n\n            cy.getAll(\"@firstCr\", \"@secondCr\").then(function ([firstCr, secondCr]) {\n                cy.loginAs(activeUser);\n                cy.doSidePanelNavigation('Home Projects');\n                cy.get('[data-cy=mpv-tabs]').contains(\"Workflow Runs\").click();\n\n                // Delete firstCr\n                cy.get('[data-cy=data-table-row]').contains(firstCr.name).should('exist').parents('[data-cy=data-table-row]').rightclick();\n                cy.waitForDom();\n                cy.get(\"ul[data-cy=context-menu]\").contains(\"Remove\").click();\n                cy.get(\"[data-cy=confirmation-dialog]\").within(() => {\n                    cy.get(\"[data-cy=confirmation-dialog-ok-btn]\").click();\n                });\n\n                // DE should refresh\n                cy.get('[data-cy=data-table-row]').contains(firstCr.name).should('not.exist');\n                cy.get('[data-cy=data-table-row]').contains(secondCr.name).should('exist');\n\n                // Delete second CR\n                cy.get('[data-cy=data-table-row]').contains(secondCr.name).should('exist').parents('[data-cy=data-table-row]').rightclick();\n                cy.waitForDom();\n                cy.get(\"ul[data-cy=context-menu]\").contains(\"Remove\").click();\n                cy.get(\"[data-cy=confirmation-dialog]\").within(() => {\n                    cy.get(\"[data-cy=confirmation-dialog-ok-btn]\").click();\n                });\n\n                // No CRs\n                cy.get('[data-cy=data-table]').contains(firstCr.name).should('not.exist');\n                cy.get('[data-cy=data-table]').contains(secondCr.name).should('not.exist');\n            });\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/project.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe(\"Project tests\", function () {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser(\"admin\", \"Admin\", \"User\", true, true)\n            .as(\"adminUser\")\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser(\"user\", \"Active\", \"User\", false, true)\n            .as(\"activeUser\")\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it(\"creates a new project with multiple properties\", function () {\n        const projName = `Test project (${Math.floor(999999 * Math.random())})`;\n        cy.loginAs(activeUser);\n        cy.goToPath(`/projects/${activeUser.user.uuid}`);\n        cy.get(\"[data-cy=side-panel-button]\").click();\n        cy.get(\"[data-cy=side-panel-new-project]\").click();\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"New Project\")\n            .within(() => {\n                cy.get(\"[data-cy=name-field]\").within(() => {\n                    cy.get(\"input\").type(projName);\n                });\n            });\n        // Key: Color (IDTAGCOLORS) - Value: Magenta (IDVALCOLORS3)\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.contain\", \"Color: Magenta\");\n        cy.get(\"[data-cy=resource-properties-form]\").within(() => {\n            cy.get(\"[data-cy=property-field-key]\").within(() => {\n                cy.get(\"input\").type(\"Color\").blur();\n            });\n            cy.get(\"[data-cy=property-field-value]\").within(() => {\n                cy.get(\"input\").type(\"Magenta\").blur();\n            });\n            cy.get(\"[data-cy=property-add-btn]\").click();\n\n            cy.get(\"[data-cy=property-field-value]\").within(() => {\n                cy.get(\"input\").type(\"Pink\").blur();\n            });\n            cy.get(\"[data-cy=property-add-btn]\").click();\n\n            cy.get(\"[data-cy=property-field-value]\").within(() => {\n                cy.get(\"input\").type(\"Yellow\").blur();\n            });\n            cy.get(\"[data-cy=property-add-btn]\").click();\n        });\n        // Confirm proper vocabulary labels are displayed on the UI.\n        cy.get(\"[data-cy=form-dialog]\").should(\"contain\", \"Color: Magenta\");\n        cy.get(\"[data-cy=form-dialog]\").should(\"contain\", \"Color: Pink\");\n        cy.get(\"[data-cy=form-dialog]\").should(\"contain\", \"Color: Yellow\");\n\n        cy.get(\"[data-cy=resource-properties-form]\").within(() => {\n            cy.get(\"[data-cy=property-field-key]\").within(() => {\n                cy.get(\"input\").focus();\n            });\n            cy.get(\"[data-cy=property-field-key]\").should(\"not.contain\", \"Color\");\n        });\n\n        // Create project and confirm the properties' real values.\n        cy.get(\"[data-cy=form-submit-btn]\").click();\n        cy.waitForDom();\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n        cy.get(\"[data-cy=breadcrumb-last]\").should('exist', { timeout: 10000 });\n        cy.get(\"[data-cy=breadcrumb-last]\").should(\"contain\", projName);\n        cy.doRequest(\"GET\", \"/arvados/v1/groups\", null, {\n            filters: `[[\"name\", \"=\", \"${projName}\"], [\"group_class\", \"=\", \"project\"]]`,\n        })\n            .its(\"body.items\")\n            .as(\"projects\")\n            .then(function () {\n                expect(this.projects).to.have.lengthOf(1);\n                expect(this.projects[0].properties).to.deep.equal(\n                    // Pink is not in the test vocab\n                    { IDTAGCOLORS: [\"IDVALCOLORS3\", \"Pink\", \"IDVALCOLORS1\"] }\n                );\n            });\n\n        // Open project edit via breadcrumbs\n        cy.get(\"[data-cy=breadcrumbs]\").contains(projName).rightclick();\n        cy.get(\"[data-cy=context-menu]\").contains(\"Edit\").click();\n        cy.get(\"[data-cy=form-dialog]\").within(() => {\n            cy.get(\"[data-cy=resource-properties-list]\").within(() => {\n                cy.get(\"div[role=button]\").contains(\"Color: Magenta\");\n                cy.get(\"div[role=button]\").contains(\"Color: Pink\");\n                cy.get(\"div[role=button]\").contains(\"Color: Yellow\");\n            });\n        });\n        // Add another property\n        cy.get(\"[data-cy=resource-properties-form]\").within(() => {\n            cy.get(\"[data-cy=property-field-key]\").within(() => {\n                cy.get(\"input\").type(\"Medium\").blur();\n            });\n            cy.get(\"[data-cy=property-field-value]\").within(() => {\n                cy.get(\"input\").type(\"Egg\").blur();\n            });\n            cy.get(\"[data-cy=property-add-btn]\").click();\n        });\n        cy.get(\"[data-cy=form-submit-btn]\").click();\n        cy.waitForDom();\n        // Reopen edit via breadcrumbs and verify properties\n        cy.get(\"[data-cy=breadcrumbs]\").contains(projName).rightclick();\n        cy.get(\"[data-cy=context-menu]\").contains(\"Edit\").click();\n        cy.get(\"[data-cy=form-dialog]\").within(() => {\n            cy.get(\"[data-cy=resource-properties-list]\").within(() => {\n                cy.get(\"div[role=button]\").contains(\"Color: Magenta\");\n                cy.get(\"div[role=button]\").contains(\"Color: Pink\");\n                cy.get(\"div[role=button]\").contains(\"Color: Yellow\");\n                cy.get(\"div[role=button]\").contains(\"Medium: Egg\");\n            });\n        });\n    });\n\n    it(\"creates a project without and with description\", function () {\n        const projName = `Test project (${Math.floor(999999 * Math.random())})`;\n        cy.loginAs(activeUser);\n        cy.goToPath(`/projects/${activeUser.user.uuid}`);\n\n        // Create project\n        cy.get(\"[data-cy=side-panel-button]\").click();\n        cy.get(\"[data-cy=side-panel-new-project]\").click();\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"New Project\")\n            .within(() => {\n                cy.get(\"[data-cy=name-field]\").within(() => {\n                    cy.get(\"input\").type(projName);\n                });\n            });\n        cy.get(\"[data-cy=form-submit-btn]\").click();\n        cy.waitForDom();\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n\n        const editProjectDescription = (name, type) => {\n            cy.get(\"[data-cy=side-panel-tree]\").contains(\"Home Projects\").click();\n            cy.waitForDom();\n            cy.get(\"[data-cy=project-panel] tbody\").contains(name).rightclick({ force: true });\n            cy.get(\"[data-cy=context-menu]\").contains(\"Edit\").click();\n            cy.get(\"[data-cy=form-dialog]\").within(() => {\n                cy.get(\"div[contenteditable=true]\").click().type(type);\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n            });\n            cy.waitForDom();\n        };\n\n        const verifyProjectDescription = (name, description) => {\n            cy.doRequest(\"GET\", \"/arvados/v1/groups\", null, {\n                filters: `[[\"name\", \"=\", \"${name}\"], [\"group_class\", \"=\", \"project\"]]`,\n            })\n                .its(\"body.items\")\n                .as(\"projects\")\n                .then(function () {\n                    expect(this.projects).to.have.lengthOf(1);\n                    expect(this.projects[0].description).to.equal(description);\n                });\n        };\n\n        // Edit description\n        editProjectDescription(projName, \"Test description\");\n\n        // Check description is set\n        verifyProjectDescription(projName, \"<p>Test description</p>\");\n\n        // Clear description\n        editProjectDescription(projName, \"{selectall}{backspace}\");\n\n        // Check description is null\n        verifyProjectDescription(projName, null);\n\n        // Set description to contain whitespace\n        editProjectDescription(projName, \"{selectall}{backspace}    x\");\n        editProjectDescription(projName, \"{backspace}\");\n\n        // Check description is null\n        verifyProjectDescription(projName, null);\n    });\n\n    it(\"creates a project from the context menu in the correct subfolder\", function () {\n        const parentProjName = `Test project (${Math.floor(999999 * Math.random())})`;\n        const childProjName = `Test project (${Math.floor(999999 * Math.random())})`;\n        cy.loginAs(activeUser);\n        cy.goToPath(`/projects/${activeUser.user.uuid}`);\n\n        // Create project\n        cy.get(\"[data-cy=side-panel-button]\").click();\n        cy.get(\"[data-cy=side-panel-new-project]\").click();\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"New Project\")\n            .within(() => {\n                cy.get(\"[data-cy=name-field]\").within(() => {\n                    cy.get(\"input\").type(parentProjName);\n                });\n            });\n        cy.get(\"[data-cy=form-submit-btn]\").click();\n        cy.waitForDom();\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n        cy.get(\"button\").contains('Home Projects').click();\n        cy.waitForDom();\n\n        // Create subproject from context menu\n        cy.get(\"[data-cy=project-panel]\").should('exist', { timeout: 10000 });\n        cy.get(\"[data-cy=project-panel]\").contains(parentProjName).should('exist').parents('td').rightclick();\n        cy.get(\"[data-cy=context-menu]\").contains(\"New project\").click();\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"New Project\")\n            .within(() => {\n                cy.get(\"[data-cy=name-field]\").within(() => {\n                    cy.get(\"input\").type(childProjName);\n                });\n            });\n        cy.get(\"[data-cy=form-submit-btn]\").click();\n        cy.waitForDom();\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n\n        // open details panel and check 'owner' field\n        cy.get('[data-cy=multiselect-button]').eq(0).trigger('mouseover');\n        cy.get('body').contains('View details').should('exist')\n        cy.get('[data-cy=multiselect-button]').eq(0).click();\n        cy.waitForDom();\n\n        cy.get(\"[data-cy=details-panel-owner]\").contains(parentProjName).should(\"be.visible\")\n        cy.get(\"[data-cy=close-details-btn]\").click();\n    });\n\n\n    it(\"creates new project on home project and then a subproject inside it\", function () {\n        const createProject = function (name, parentName) {\n            cy.get(\"[data-cy=side-panel-button]\").click();\n            cy.get(\"[data-cy=side-panel-new-project]\").click();\n            cy.get(\"[data-cy=form-dialog]\")\n                .should(\"contain\", \"New Project\")\n                .within(() => {\n                    cy.get(\"[data-cy=parent-field]\").within(() => {\n                        cy.get(\"input\")\n                            .invoke(\"val\")\n                            .then(val => {\n                                expect(val).to.include(parentName);\n                            });\n                    });\n                    cy.get(\"[data-cy=name-field]\").within(() => {\n                        cy.get(\"input\").type(name);\n                    });\n                });\n            cy.get(\"[data-cy=form-submit-btn]\").click();\n            cy.waitForDom();\n        };\n\n        cy.loginAs(activeUser);\n        cy.goToPath(`/projects/${activeUser.user.uuid}`);\n        cy.get(\"[data-cy=breadcrumb-first]\").should('exist');\n        cy.get(\"[data-cy=breadcrumb-first]\").should(\"contain\", \"Projects\");\n        cy.get(\"[data-cy=breadcrumb-last]\").should(\"not.exist\");\n        // Create new project\n        const projName = `Test project (${Math.floor(999999 * Math.random())})`;\n        createProject(projName, \"Home project\");\n        // Confirm that the user was taken to the newly created thing\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n        cy.get(\"[data-cy=breadcrumb-first]\").should(\"contain\", \"Projects\");\n        cy.waitForDom();\n        cy.get(\"[data-cy=breadcrumb-last]\").should('exist', { timeout: 10000 });\n        cy.get(\"[data-cy=breadcrumb-last]\").should(\"contain\", projName);\n        // Create a subproject\n        const subProjName = `Test project (${Math.floor(999999 * Math.random())})`;\n        createProject(subProjName, projName);\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n        cy.get(\"[data-cy=breadcrumb-first]\").should(\"contain\", \"Projects\");\n        cy.waitForDom();\n        cy.get(\"[data-cy=breadcrumb-last]\").should('exist', { timeout: 10000 });\n        cy.get(\"[data-cy=breadcrumb-last]\").should(\"contain\", subProjName); //here\n    });\n\n    it(\"attempts to use a preexisting name creating a project\", function () {\n        const name = `Test project ${Math.floor(Math.random() * 999999)}`;\n        cy.createGroup(activeUser.token, {\n            name: name,\n            group_class: \"project\",\n        });\n        cy.loginAs(activeUser);\n        cy.goToPath(`/projects/${activeUser.user.uuid}`);\n\n        // Attempt to create new collection with a duplicate name\n        cy.get(\"[data-cy=side-panel-button]\").click();\n        cy.get(\"[data-cy=side-panel-new-project]\").click();\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"New Project\")\n            .within(() => {\n                cy.get(\"[data-cy=name-field]\").within(() => {\n                    cy.get(\"input\").type(name);\n                });\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n            });\n        // Error message should display, allowing editing the name\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"exist\") //here\n            .and(\"contain\", \"Project with the same name already exists\")\n            .within(() => {\n                cy.get(\"[data-cy=name-field]\").within(() => {\n                    cy.get(\"input\").type(\" renamed\");\n                });\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n                cy.waitForDom();\n            });\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n    });\n\n    it(\"navigates to the parent project after trashing the one being displayed\", function () {\n        cy.createGroup(activeUser.token, {\n            name: `Test root project ${Math.floor(Math.random() * 999999)}`,\n            group_class: \"project\",\n        })\n            .as(\"testRootProject\")\n            .then(function () {\n                cy.createGroup(activeUser.token, {\n                    name: `Test subproject ${Math.floor(Math.random() * 999999)}`,\n                    group_class: \"project\",\n                    owner_uuid: this.testRootProject.uuid,\n                }).as(\"testSubProject\");\n            });\n        cy.getAll(\"@testRootProject\", \"@testSubProject\").then(function ([testRootProject, testSubProject]) {\n            cy.loginAs(activeUser);\n\n            // Go to subproject and trash it.\n            cy.goToPath(`/projects/${testSubProject.uuid}`);\n            cy.get(\"[data-cy=side-panel-tree]\").should('exist');\n            cy.get(\"[data-cy=side-panel-tree]\").should(\"contain\", testSubProject.name);\n            cy.waitForDom();\n            cy.get(\"[data-cy=breadcrumb-last]\").should('exist', { timeout: 10000 });\n            cy.get(\"[data-cy=breadcrumb-last]\").should(\"contain\", testSubProject.name).rightclick();\n            cy.get(\"[data-cy=context-menu]\").contains(\"Move to trash\").click();\n\n            // Confirm that the parent project should be displayed.\n            cy.waitForDom();\n            cy.get(\"[data-cy=breadcrumb-last]\").should('exist', { timeout: 10000 });\n            cy.get(\"[data-cy=breadcrumb-last]\").should(\"contain\", testRootProject.name);\n            cy.url().should(\"contain\", `/projects/${testRootProject.uuid}`);\n            cy.get(\"[data-cy=side-panel-tree]\").should(\"not.contain\", testSubProject.name);\n\n            // Checks for bugfix #17637.\n            cy.get(\"[data-cy=not-found-content]\").should(\"not.exist\");\n            cy.get(\"[data-cy=not-found-page]\").should(\"not.exist\");\n        });\n    });\n\n    it(\"resets the search box only when navigating out of the current project\", function () {\n        const fooProjectNameA = `Test foo project ${Math.floor(Math.random() * 999999)}`;\n        const fooProjectNameB = `Test foo project ${Math.floor(Math.random() * 999999)}`;\n        const barProjectNameA = `Test bar project ${Math.floor(Math.random() * 999999)}`;\n\n        [fooProjectNameA, fooProjectNameB, barProjectNameA].forEach(projName => {\n            cy.createGroup(activeUser.token, {\n                name: projName,\n                group_class: \"project\",\n            });\n        });\n\n        cy.loginAs(activeUser);\n        cy.doSidePanelNavigation('Home Projects');\n        cy.get(\"[data-cy=project-panel]\").should(\"contain\", fooProjectNameA).and(\"contain\", fooProjectNameB).and(\"contain\", barProjectNameA);\n\n        cy.doDataExplorerSearch(\"foo\");\n        cy.get(\"[data-cy=project-panel]\").should(\"contain\", fooProjectNameA).and(\"contain\", fooProjectNameB).and(\"not.contain\", barProjectNameA);\n\n        // Click on the table row to select it, search should remain the same.\n        cy.doDataExplorerSelect(fooProjectNameA);\n        cy.get(\"[data-cy=search-input] input\").should(\"have.value\", \"foo\");\n\n        // Click to navigate to the project, search should be reset\n        cy.doDataExplorerNavigate(fooProjectNameA);\n        cy.doMPVTabSelect(\"Data\");\n        cy.get(\"[data-cy=search-input] input\").should(\"not.have.value\", \"foo\");\n    });\n\n    it(\"navigates to the root project after trashing the parent of the one being displayed\", function () {\n        cy.createGroup(activeUser.token, {\n            name: `Test root project ${Math.floor(Math.random() * 999999)}`,\n            group_class: \"project\",\n        })\n            .as(\"testRootProject\")\n            .then(function () {\n                cy.createGroup(activeUser.token, {\n                    name: `Test subproject ${Math.floor(Math.random() * 999999)}`,\n                    group_class: \"project\",\n                    owner_uuid: this.testRootProject.uuid,\n                })\n                    .as(\"testSubProject\")\n                    .then(function () {\n                        cy.createGroup(activeUser.token, {\n                            name: `Test sub subproject ${Math.floor(Math.random() * 999999)}`,\n                            group_class: \"project\",\n                            owner_uuid: this.testSubProject.uuid,\n                        }).as(\"testSubSubProject\");\n                    });\n            });\n        cy.getAll(\"@testRootProject\", \"@testSubProject\", \"@testSubSubProject\").then(function ([testRootProject, testSubProject, testSubSubProject]) {\n            cy.loginAs(activeUser);\n\n            // Go to innermost project and trash its parent.\n            cy.goToPath(`/projects/${testSubSubProject.uuid}`);\n            cy.get(\"[data-cy=side-panel-tree]\").should('exist');\n            cy.get(\"[data-cy=side-panel-tree]\").should(\"contain\", testSubSubProject.name);\n            cy.waitForDom();\n            cy.get(\"[data-cy=breadcrumb-last]\").should('exist', { timeout: 10000 });\n            cy.get(\"[data-cy=breadcrumb-last]\").should(\"contain\", testSubSubProject.name);\n            cy.get(\"[data-cy=side-panel-tree]\").contains(testSubProject.name).rightclick();\n            cy.get(\"[data-cy=context-menu]\").contains(\"Move to trash\").click();\n\n            // Confirm that the trashed project's parent should be displayed.\n            cy.waitForDom();\n            cy.get(\"[data-cy=breadcrumb-last]\").should('exist', { timeout: 10000 });\n            cy.get(\"[data-cy=breadcrumb-last]\").should(\"contain\", testRootProject.name);\n            cy.url().should(\"contain\", `/projects/${testRootProject.uuid}`);\n            cy.get(\"[data-cy=side-panel-tree]\").should(\"not.contain\", testSubProject.name);\n            cy.get(\"[data-cy=side-panel-tree]\").should(\"not.contain\", testSubSubProject.name);\n\n            // Checks for bugfix #17637.\n            cy.get(\"[data-cy=not-found-content]\").should(\"not.exist\");\n            cy.get(\"[data-cy=not-found-page]\").should(\"not.exist\");\n        });\n    });\n\n    it(\"clears search input when changing project\", () => {\n        cy.createGroup(activeUser.token, {\n            name: `Test root project ${Math.floor(Math.random() * 999999)}`,\n            group_class: \"project\",\n        })\n            .as(\"testProject1\")\n            .then(testProject1 => {\n                cy.shareWith(adminUser.token, activeUser.user.uuid, testProject1.uuid, \"can_write\");\n            });\n\n        cy.getAll(\"@testProject1\").then(function ([testProject1]) {\n            cy.loginAs(activeUser);\n            cy.doSidePanelNavigation('Home Projects');\n\n            cy.get(\"[data-cy=side-panel-tree]\").contains(testProject1.name).click();\n            cy.waitForDom();\n            cy.doMPVTabSelect(\"Data\");\n\n            cy.get(\"[data-cy=search-input] input\").type(\"test123\");\n\n            cy.get(\"[data-cy=side-panel-tree]\").contains(\"Projects\").click();\n            cy.get(\"[data-cy=search-input] input\").should('exist');\n            cy.get(\"[data-cy=search-input] input\").should(\"not.have.value\", \"test123\");\n        });\n    });\n\n    it(\"opens advanced popup for project with username\", () => {\n        const projectName = `Test project ${Math.floor(Math.random() * 999999)}`;\n\n        cy.createGroup(adminUser.token, {\n            name: projectName,\n            group_class: \"project\",\n        }).as(\"mainProject\");\n\n        cy.getAll(\"@mainProject\").then(function ([mainProject]) {\n            cy.loginAs(adminUser);\n\n            cy.get(\"[data-cy=side-panel-tree]\").contains(\"Groups\").click();\n\n            cy.get(\"[data-cy=uuid]\")\n                .eq(0)\n                .invoke(\"text\")\n                .then(uuid => {\n                    cy.createLink(adminUser.token, {\n                        name: \"can_write\",\n                        link_class: \"permission\",\n                        head_uuid: mainProject.uuid,\n                        tail_uuid: uuid,\n                    });\n\n                    cy.createLink(adminUser.token, {\n                        name: \"can_write\",\n                        link_class: \"permission\",\n                        head_uuid: mainProject.uuid,\n                        tail_uuid: activeUser.user.uuid,\n                    });\n\n                    cy.get(\"[data-cy=side-panel-tree]\").contains(\"Projects\").click();\n\n                    cy.get(\"main\").contains(projectName).rightclick();\n\n                    cy.get(\"[data-cy=context-menu]\").contains(\"API Details\").click();\n\n                    cy.get(\"[role=tablist]\").contains(\"METADATA\").click();\n\n                    cy.get(\"td\").contains(uuid).should(\"exist\");\n\n                    cy.get(\"td\").contains(activeUser.user.uuid).should(\"exist\");\n                });\n        });\n    });\n\n    describe(\"Frozen projects\", () => {\n        beforeEach(() => {\n            cy.createGroup(activeUser.token, {\n                name: `Main project ${Math.floor(Math.random() * 999999)}`,\n                group_class: \"project\",\n            }).as(\"mainProject\");\n\n            cy.createGroup(adminUser.token, {\n                name: `Admin project ${Math.floor(Math.random() * 999999)}`,\n                group_class: \"project\",\n            })\n                .as(\"adminProject\")\n                .then(mainProject => {\n                    cy.shareWith(adminUser.token, activeUser.user.uuid, mainProject.uuid, \"can_write\");\n                });\n\n            cy.get(\"@mainProject\").then(mainProject => {\n                cy.createGroup(adminUser.token, {\n                    name: `Sub project ${Math.floor(Math.random() * 999999)}`,\n                    group_class: \"project\",\n                    owner_uuid: mainProject.uuid,\n                }).as(\"subProject\");\n\n                cy.createCollection(adminUser.token, {\n                    name: `Main collection ${Math.floor(Math.random() * 999999)}`,\n                    owner_uuid: mainProject.uuid,\n                    manifest_text: \"./subdir 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo\\n. 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n                }).as(\"mainCollection\");\n            });\n        });\n\n        it(\"should be able to freeze own project\", () => {\n            cy.getAll(\"@mainProject\").then(([mainProject]) => {\n                cy.loginAs(activeUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                cy.get(\"[data-cy=project-panel]\").contains(mainProject.name).rightclick();\n\n                cy.get(\"[data-cy=context-menu]\").contains(\"Freeze\").click();\n\n                cy.get(\"[data-cy=project-panel]\").contains(mainProject.name).rightclick();\n\n                cy.get(\"[data-cy=context-menu]\").contains(\"Freeze\").should(\"not.exist\");\n            });\n        });\n\n        it(\"should not be able to modify items within the frozen project\", () => {\n            cy.getAll(\"@mainProject\", \"@mainCollection\").then(([mainProject, mainCollection]) => {\n                cy.loginAs(activeUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                cy.get(\"[data-cy=project-panel]\").contains(mainProject.name).rightclick();\n\n                cy.get(\"[data-cy=context-menu]\").contains(\"Freeze\").click();\n\n                cy.get(\"[data-cy=project-panel]\").contains(mainProject.name).click();\n                cy.doMPVTabSelect(\"Data\");\n\n                cy.get('[data-cy=data-table]').should('exist', { timeout: 10000 });\n\n                cy.get(\"[data-cy=project-panel]\").contains(mainCollection.name).rightclick();\n\n                cy.get(\"[data-cy=context-menu]\").contains(\"Move to trash\").should(\"not.exist\");\n            });\n        });\n\n        it(\"should be able to freeze not owned project\", () => {\n            cy.getAll(\"@adminProject\").then(([adminProject]) => {\n                cy.loginAs(activeUser);\n\n                cy.get(\"[data-cy=side-panel-tree]\").contains(\"Shared with me\").click();\n\n                cy.get(\"main\").contains(adminProject.name).rightclick();\n\n                cy.get(\"[data-cy=context-menu]\").contains(\"Freeze\").should(\"not.exist\");\n            });\n        });\n\n        it(\"should be able to unfreeze project if user is an admin\", () => {\n            cy.getAll(\"@adminProject\").then(([adminProject]) => {\n                cy.loginAs(adminUser);\n                cy.doSidePanelNavigation('Home Projects');\n\n                cy.get(\"main\").contains(adminProject.name).rightclick();\n\n                cy.get(\"[data-cy=context-menu]\").contains(\"Freeze\").click();\n\n                cy.wait(1000);\n\n                cy.get(\"main\").contains(adminProject.name).rightclick();\n\n                cy.get(\"[data-cy=context-menu]\").contains(\"Unfreeze\").click();\n\n                cy.get(\"main\").contains(adminProject.name).rightclick();\n\n                cy.get(\"[data-cy=context-menu]\").contains(\"Freeze\").should(\"exist\");\n            });\n        });\n    });\n\n    // The following test is enabled on Electron only, as Chromium and Firefox\n    // require permissions to access the clipboard.\n    it(\"copies project URL to clipboard\", { browser: 'electron' }, () => {\n        const projectName = `Test project (${Math.floor(999999 * Math.random())})`;\n\n        cy.loginAs(activeUser);\n        cy.goToPath(`/projects/${activeUser.user.uuid}`);\n        cy.get(\"[data-cy=side-panel-button]\").click();\n        cy.get(\"[data-cy=side-panel-new-project]\").click();\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"New Project\")\n            .within(() => {\n                cy.get(\"[data-cy=name-field]\").within(() => {\n                    cy.get(\"input\").type(projectName);\n                });\n                cy.get(\"[data-cy=form-submit-btn]\").click();\n            });\n        cy.contains(\"Project has been successfully created\");\n        cy.waitForDom();\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n        cy.get(\"[data-cy=snackbar]\").should(\"not.exist\");\n        cy.get(\"[data-cy=side-panel-tree]\").contains(\"Projects\").click();\n        cy.waitForDom();\n        cy.get(\"[data-cy=project-panel]\").contains(projectName).should(\"be.visible\").rightclick();\n        cy.get(\"[data-cy=context-menu]\").contains(\"Copy link to clipboard\").click();\n        cy.window({ timeout: 10000 }).then(win =>{\n            win.focus();\n            win.navigator.clipboard.readText().then(text => {\n                expect(text).to.match(/https\\:\\/\\/127\\.0\\.0\\.1\\:[0-9]+\\/projects\\/[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}/);\n            })}\n        );\n    });\n\n    it(\"sorts displayed items correctly\", () => {\n        cy.loginAs(activeUser);\n        cy.doSidePanelNavigation('Home Projects');\n\n        cy.get('[data-cy=project-panel] button[aria-label=\"Select columns\"]').click();\n        cy.get(\"div[role=presentation] ul > div[role=button]\").contains(\"Date Created\").click();\n        cy.get(\"div[role=presentation] ul > div[role=button]\").contains(\"Trash at\").click();\n        cy.get(\"div[role=presentation] ul > div[role=button]\").contains(\"Delete at\").click();\n        cy.get(\"div[role=presentation] > div[aria-hidden=true]\").click();\n\n        cy.intercept({\n            method: \"GET\",\n            url: \"**/arvados/v1/groups/*/contents*\",\n            query: {\n                // Ignore the count=exact itemsavailable request\n                count: \"none\"\n            },\n        }).as(\"filteredQuery\");\n        [\n            {\n                name: \"Name\",\n                asc: \"collections.name asc,groups.name asc,workflows.name asc,created_at desc\",\n                desc: \"collections.name desc,groups.name desc,workflows.name desc,created_at desc\",\n            },\n            {\n                name: \"Last Modified\",\n                asc: \"collections.modified_at asc,groups.modified_at asc,workflows.modified_at asc,created_at desc\",\n                desc: \"collections.modified_at desc,groups.modified_at desc,workflows.modified_at desc,created_at desc\",\n            },\n            {\n                name: \"Date Created\",\n                asc: \"collections.created_at asc,groups.created_at asc,workflows.created_at asc,created_at desc\",\n                desc: \"collections.created_at desc,groups.created_at desc,workflows.created_at desc,created_at desc\",\n            },\n            {\n                name: \"Trash at\",\n                asc: \"collections.trash_at asc,groups.trash_at asc,workflows.trash_at asc,created_at desc\",\n                desc: \"collections.trash_at desc,groups.trash_at desc,workflows.trash_at desc,created_at desc\",\n            },\n            {\n                name: \"Delete at\",\n                asc: \"collections.delete_at asc,groups.delete_at asc,workflows.delete_at asc,created_at desc\",\n                desc: \"collections.delete_at desc,groups.delete_at desc,workflows.delete_at desc,created_at desc\",\n            },\n        ].forEach(test => {\n            cy.get(\"[data-cy=project-panel] table thead th\").contains(test.name).click();\n            cy.wait(\"@filteredQuery\").then(interception => {\n                const searchParams = new URLSearchParams(new URL(interception.request.url).search);\n                expect(searchParams.get(\"order\")).to.eq(test.asc);\n            });\n            cy.get(\"[data-cy=project-panel] table thead th\").contains(test.name).click();\n            cy.wait(\"@filteredQuery\").then(interception => {\n                const searchParams = new URLSearchParams(new URL(interception.request.url).search);\n                expect(searchParams.get(\"order\")).to.eq(test.desc);\n            });\n        });\n    });\n\n    it('resets project data pagination when changing location', () => {\n        const mainProjectName = `Main project`;\n        const emptyProjectName = \"Empty Project\"\n\n        // Create main project\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: mainProjectName,\n        }).as('mainProject');\n\n        // Create 15 projects\n        cy.getAll('@mainProject').then(([mainProject]) => {\n            for (var i = 0; i < 15; i++) {\n                cy.createProject({\n                    owningUser: activeUser,\n                    ownerUuid: mainProject.uuid,\n                    projectName: `${emptyProjectName} (${Math.floor(999999 * Math.random())})`,\n                });\n            }\n\n            // Navigate to containing project\n            cy.loginAs(activeUser);\n            cy.doSidePanelNavigation('Home Projects');\n            cy.doDataExplorerNavigate(mainProject.name);\n            cy.doMPVTabSelect(\"Data\");\n\n            // Change page size to 10 and go to next page\n            cy.doDataExplorerPageSize(10);\n            cy.doDataExporerNextPage();\n\n            // Assert correct page size and page\n            cy.assertDataExplorerPageSize(10);\n            cy.assertDataExplorerPage(2);\n\n            // Navigate using breadcrumb back to home\n            cy.doBreadcrumbsNavigation(\"Home Projects\");\n            cy.doMPVTabSelect(\"Data\");\n\n            // Assert we're back on page 1\n            cy.assertDataExplorerPageSize(10);\n            cy.assertDataExplorerPage(1);\n        });\n    });\n\n    it('resets project run pagination when changing location', () => {\n        const mainProjectName = `Main project`;\n        const blankWorkflowName = \"Dummy workflow\"\n\n        // Setup docker image\n        cy.setupDockerImage('arvados/jobs')\n            .as('dockerImage');\n\n        // Create main project\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: mainProjectName,\n        }).as('mainProject');\n\n        // Create 15 runs\n        cy.getAll('@mainProject', '@dockerImage').then(([mainProject, dockerImage]) => {\n            for (var i = 0; i < 15; i++) {\n                cy.createDefaultContainerRequest(\n                    adminUser.token,\n                    dockerImage,\n                    {\n                        name: `${blankWorkflowName} (${Math.floor(Math.random() * 999999)})`,\n                        state: \"Committed\",\n                        owner_uuid: mainProject.uuid,\n                    },\n                );\n            }\n\n            // Navigate to containing project\n            cy.loginAs(activeUser);\n            cy.doSidePanelNavigation('Home Projects');\n            cy.doDataExplorerNavigate(mainProject.name);\n            cy.doMPVTabSelect(\"Workflow Runs\");\n\n            // Change page size to 10 and go to next page\n            cy.doDataExplorerPageSize(10);\n            cy.doDataExporerNextPage();\n\n            // Assert correct page size and page\n            cy.assertDataExplorerPageSize(10);\n            cy.assertDataExplorerPage(2);\n\n            // Navigate using breadcrumb back to home\n            cy.doBreadcrumbsNavigation(\"Home Projects\");\n            cy.doMPVTabSelect(\"Workflow Runs\");\n\n            // Assert we're back on page 1\n            cy.assertDataExplorerPageSize(10);\n            cy.assertDataExplorerPage(1);\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/search.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe(\"Search tests\", function () {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser(\"admin\", \"Admin\", \"User\", true, true)\n            .as(\"adminUser\")\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser(\"collectionuser1\", \"Collection\", \"User\", false, true)\n            .as(\"activeUser\")\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it(\"can search for old collection versions\", function () {\n        const colName = `Versioned Collection ${Math.floor(Math.random() * Math.floor(999999))}`;\n        let colUuid = \"\";\n        let oldVersionUuid = \"\";\n        // Make sure no other collections with this name exist\n        cy.doRequest(\"GET\", \"/arvados/v1/collections\", null, {\n            filters: `[[\"name\", \"=\", \"${colName}\"]]`,\n            include_old_versions: true,\n        })\n            .its(\"body.items\")\n            .as(\"collections\")\n            .then(function () {\n                expect(this.collections).to.be.empty;\n            });\n        // Creates the collection using the admin token so we can set up\n        // a bogus manifest text without block signatures.\n        cy.createCollection(adminUser.token, {\n            name: colName,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        })\n            .as(\"originalVersion\")\n            .then(function () {\n                // Change the file name to create a new version.\n                cy.updateCollection(adminUser.token, this.originalVersion.uuid, {\n                    manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:foo\\n\",\n                });\n                colUuid = this.originalVersion.uuid;\n            });\n        // Confirm that there are 2 versions of the collection\n        cy.doRequest(\"GET\", \"/arvados/v1/collections\", null, {\n            filters: `[[\"name\", \"=\", \"${colName}\"]]`,\n            include_old_versions: true,\n        })\n            .its(\"body.items\")\n            .as(\"collections\")\n            .then(function () {\n                expect(this.collections).to.have.lengthOf(2);\n                this.collections.map(function (aCollection) {\n                    expect(aCollection.current_version_uuid).to.equal(colUuid);\n                    if (aCollection.uuid !== aCollection.current_version_uuid) {\n                        oldVersionUuid = aCollection.uuid;\n                    }\n                });\n                cy.loginAs(activeUser);\n                const searchQuery = `${colName} type:arvados#collection`;\n                // Search for only collection's current version\n                cy.doSearch(`${searchQuery}`);\n                cy.get(\"[data-cy=search-results]\").should(\"contain\", \"head version\");\n                cy.get(\"[data-cy=search-results]\").should(\"not.contain\", \"version 1\");\n                // ...and then, include old versions.\n                cy.doSearch(`${searchQuery} is:pastVersion`);\n                cy.get(\"[data-cy=search-results]\").should(\"contain\", \"head version\");\n                cy.get(\"[data-cy=search-results]\").should(\"contain\", \"version 1\");\n            });\n    });\n\n    it(\"can display path of the selected item\", function () {\n        const colName = `Collection ${Math.floor(Math.random() * Math.floor(999999))}`;\n\n        // Creates the collection using the admin token so we can set up\n        // a bogus manifest text without block signatures.\n        cy.createCollection(adminUser.token, {\n            name: colName,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).then(function () {\n            cy.loginAs(activeUser);\n\n            cy.doSearch(colName);\n\n            cy.get(\"[data-cy=search-results]\").should(\"contain\", colName);\n\n            cy.get(\"[data-cy=search-results]\").contains(colName).closest(\"tr\").click();\n\n            cy.get(\"[data-cy=element-path]\").should(\"contain\", `/ Projects / ${colName}`);\n        });\n    });\n\n    it(\"can search items using quotes\", function () {\n        const random = Math.floor(Math.random() * Math.floor(999999));\n        const colName = `Collection ${random}`;\n        const colName2 = `Collection test ${random}`;\n\n        // Creates the collection using the admin token so we can set up\n        // a bogus manifest text without block signatures.\n        cy.createCollection(adminUser.token, {\n            name: colName,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).as(\"collection1\");\n\n        cy.createCollection(adminUser.token, {\n            name: colName2,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).as(\"collection2\");\n\n        cy.getAll(\"@collection1\", \"@collection2\").then(function () {\n            cy.loginAs(activeUser);\n\n            cy.doSearch(colName);\n            cy.get(\"[data-cy=search-results] table tbody tr\").should(\"have.length\", 2);\n\n            cy.doSearch(`\"${colName}\"`);\n            cy.get(\"[data-cy=search-results] table tbody tr\").should(\"have.length\", 1);\n        });\n    });\n\n    it(\"can display owner of the item\", function () {\n        const colName = `Collection ${Math.floor(Math.random() * Math.floor(999999))}`;\n\n        cy.createCollection(adminUser.token, {\n            name: colName,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).then(function () {\n            cy.loginAs(activeUser);\n\n            cy.doSearch(colName);\n\n            cy.get(\"[data-cy=search-results]\").should(\"contain\", colName);\n\n            cy.get(\"[data-cy=search-results]\")\n                .contains(colName)\n                .closest(\"tr\")\n                .within(() => {\n                    cy.get(\"p\").contains(activeUser.user.uuid).should(\"contain\", activeUser.user.full_name);\n                });\n        });\n    });\n\n    // The following test is enabled on Electron only, as Chromium and Firefox\n    // require permissions to access the clipboard.\n    it(\"shows search context menu\", { browser: 'electron' } , function () {\n        const colName = `Home Collection ${Math.floor(Math.random() * Math.floor(999999))}`;\n        const federatedColName = `Federated Collection ${Math.floor(Math.random() * Math.floor(999999))}`;\n        const federatedColUuid = \"xxxxx-4zz18-000000000000000\";\n\n        // Intercept config to insert remote cluster\n        cy.intercept({ method: \"GET\", hostname: \"127.0.0.1\", url: \"**/arvados/v1/config?nocache=*\" }, req => {\n            req.on('response', res => {\n                res.body.RemoteClusters = {\n                    \"*\": res.body.RemoteClusters[\"*\"],\n                    xxxxx: {\n                        ActivateUsers: true,\n                        Host: \"xxxxx.fakecluster.tld\",\n                        Insecure: false,\n                        Proxy: true,\n                        Scheme: \"\",\n                    },\n                };\n            });\n        });\n\n        // Fake remote cluster config\n        cy.intercept(\n            {\n                method: \"GET\",\n                hostname: \"xxxxx.fakecluster.tld\",\n                url: \"**/arvados/v1/config\",\n            },\n            {\n                statusCode: 200,\n                body: {\n                    API: {},\n                    ClusterID: \"xxxxx\",\n                    Collections: {},\n                    Containers: {},\n                    InstanceTypes: {},\n                    Login: {},\n                    RemoteClusters: {\n                        \"*\": {\n                            ActivateUsers: false,\n                            Host: \"\",\n                            Insecure: false,\n                            Proxy: false,\n                            Scheme: \"https\",\n                        },\n                    },\n                    Services: {\n                        Composer: { ExternalURL: \"\" },\n                        Controller: { ExternalURL: \"https://xxxxx.fakecluster.tld:34763/\" },\n                        DispatchCloud: { ExternalURL: \"\" },\n                        DispatchLSF: { ExternalURL: \"\" },\n                        DispatchSLURM: { ExternalURL: \"\" },\n                        Health: { ExternalURL: \"https://xxxxx.fakecluster.tld:42915/\" },\n                        Keepbalance: { ExternalURL: \"\" },\n                        Keepproxy: { ExternalURL: \"https://xxxxx.fakecluster.tld:46773/\" },\n                        Keepstore: { ExternalURL: \"\" },\n                        RailsAPI: { ExternalURL: \"\" },\n                        WebDAV: { ExternalURL: \"https://xxxxx.fakecluster.tld:36041/\" },\n                        WebDAVDownload: { ExternalURL: \"https://xxxxx.fakecluster.tld:42957/\" },\n                        WebShell: { ExternalURL: \"\" },\n                        Websocket: { ExternalURL: \"wss://xxxxx.fakecluster.tld:37121/websocket\" },\n                        Workbench1: { ExternalURL: \"https://wb1.xxxxx.fakecluster.tld/\" },\n                        Workbench2: { ExternalURL: \"https://wb2.xxxxx.fakecluster.tld/\" },\n                    },\n                    StorageClasses: {\n                        default: { Default: true, Priority: 0 },\n                    },\n                    Users: { SupportEmailAddress: \"arvados@example.com\" },\n                    Volumes: {},\n                    Workbench: {},\n                },\n            }\n        );\n\n        cy.createCollection(adminUser.token, {\n            name: colName,\n            owner_uuid: activeUser.user.uuid,\n            preserve_version: true,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\",\n        }).then(function (testCollection) {\n            cy.loginAs(activeUser);\n\n            // Intercept search results to add federated result\n            cy.intercept({ method: \"GET\", url: \"**/arvados/v1/groups/contents?*\" }, req => {\n                req.on('response', res => {\n                    res.body.items = [\n                        res.body.items[0],\n                        {\n                            ...res.body.items[0],\n                            uuid: federatedColUuid,\n                            portable_data_hash: \"00000000000000000000000000000000+0\",\n                            name: federatedColName,\n                        },\n                    ];\n                    res.body.items_available += 1;\n                });\n            });\n\n            cy.doSearch(colName);\n\n            // Stub new window\n            cy.window().then(win => {\n                cy.stub(win, \"open\").as(\"Open\");\n            });\n\n            // Check Copy link to clipboard\n            cy.get(\"[data-cy=search-results]\").contains(colName).rightclick();\n            cy.get(\"[data-cy=context-menu]\").within(ctx => {\n                // Check that there are 5 items in the menu\n                cy.get(ctx).children().should(\"have.length\", 5);\n                cy.contains(\"API Details\");\n                cy.contains(\"Copy UUID\");\n                cy.contains(\"Copy link to clipboard\");\n                cy.contains(\"Open in new tab\");\n                cy.contains(\"View details\");\n\n                cy.contains(\"Copy link to clipboard\").click();\n                cy.waitForDom();\n                cy.window({ timeout: 15000 }).then(win =>\n                    win.navigator.clipboard.readText().then(text => {\n                        expect(text).to.match(new RegExp(`/collections/${testCollection.uuid}$`));\n                    })\n                );\n            });\n\n            // Check open in new tab\n            cy.get(\"[data-cy=search-results]\").contains(colName).rightclick();\n            cy.get(\"[data-cy=context-menu]\").within(() => {\n                cy.contains(\"Open in new tab\").click();\n                cy.waitForDom();\n                cy.get(\"@Open\").should(\"have.been.calledOnceWith\", `${window.location.origin}/collections/${testCollection.uuid}`);\n            });\n\n            // Check federated result Copy link to clipboard\n            cy.get(\"[data-cy=search-results]\").contains(federatedColName).rightclick();\n            cy.get(\"[data-cy=context-menu]\").within(() => {\n                cy.contains(\"Copy link to clipboard\").click();\n                cy.waitForDom();\n                cy.window({ timeout: 15000 }).then(win =>\n                    win.navigator.clipboard.readText().then(text => {\n                        expect(text).to.equal(`https://wb2.xxxxx.fakecluster.tld/collections/${federatedColUuid}`);\n                    })\n                );\n            });\n            // Check open in new tab\n            cy.get(\"[data-cy=search-results]\").contains(federatedColName).rightclick();\n            cy.get(\"[data-cy=context-menu]\").within(() => {\n                cy.contains(\"Open in new tab\").click();\n                cy.waitForDom();\n                cy.get(\"@Open\").should(\"have.been.calledWith\", `https://wb2.xxxxx.fakecluster.tld/collections/${federatedColUuid}`);\n            });\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/sharing.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('Sharing tests', function () {\n    let activeUser;\n    let adminUser;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser('collectionuser1', 'Collection', 'User', false, true)\n            .as('activeUser').then(function () {\n                activeUser = this.activeUser;\n            });\n    })\n\n    it('can create and delete sharing URLs on collections', () => {\n        const collName = 'shared-collection ' + new Date().getTime();\n        cy.createCollection(adminUser.token, {\n            name: collName,\n            owner_uuid: adminUser.uuid,\n        }).as('sharedCollection').then(function (sharedCollection) {\n            cy.loginAs(adminUser);\n            cy.doSidePanelNavigation('Home Projects');\n            cy.doMPVTabSelect(\"Data\");\n\n            cy.get('main').contains(sharedCollection.name).rightclick();\n            cy.get('[data-cy=context-menu]').within(() => {\n                cy.contains('Share').click({ waitForAnimations: false });\n            });\n            cy.get('.sharing-dialog').within(() => {\n                cy.contains('Sharing URLs').click();\n                cy.contains('Create sharing URL');\n                cy.contains('No sharing URLs');\n                cy.should('not.contain', 'Token');\n                cy.should('not.contain', 'expiring at:');\n\n                cy.contains('Create sharing URL').click();\n                cy.should('not.contain', 'No sharing URLs');\n                cy.contains('Token');\n                cy.contains('expiring at:');\n\n                cy.get('[data-cy=remove-url-btn]').find('button').click();\n                cy.contains('No sharing URLs');\n                cy.should('not.contain', 'Token');\n                cy.should('not.contain', 'expiring at:');\n            })\n        })\n    });\n\n    it('can share projects to other users', () => {\n        cy.loginAs(adminUser);\n        cy.doSidePanelNavigation('Home Projects');\n        cy.doMPVTabSelect(\"Data\");\n\n        cy.createGroup(adminUser.token, {\n            name: `my-shared-writable-project ${Math.floor(Math.random() * 999999)}`,\n            group_class: 'project',\n        }).as('mySharedWritableProject').then(function (mySharedWritableProject) {\n            cy.contains('Refresh').click();\n            cy.get('main').contains(mySharedWritableProject.name).rightclick();\n            cy.get('[data-cy=context-menu]').within(() => {\n                cy.contains('Share').click({ waitForAnimations: false });\n            });\n            cy.get('[data-cy=permission-select]').as('permissionSelect');\n            cy.get('@permissionSelect').click();\n            cy.contains('Write').click();\n            cy.get('.sharing-dialog').as('sharingDialog');\n            cy.get('[data-cy=invite-people-field]').find('input').type(activeUser.user.email);\n            cy.get('[data-cy=\"loading-spinner\"]').should('not.exist');\n            cy.get('[data-cy=\"users-tab-label\"]').click();\n            cy.get('[data-cy=sharing-suggestion]').click();\n            cy.get('@sharingDialog').within(() => {\n                cy.get('[data-cy=add-invited-people]').click();\n                cy.contains('Close').click({ waitForAnimations: false });\n            });\n        });\n\n        cy.createGroup(adminUser.token, {\n            name: `my-shared-readonly-project ${Math.floor(Math.random() * 999999)}`,\n            group_class: 'project',\n        }).as('mySharedReadonlyProject').then(function (mySharedReadonlyProject) {\n            cy.contains('Refresh').click();\n            cy.get('main').contains(mySharedReadonlyProject.name).rightclick();\n            cy.get('[data-cy=context-menu]').within(() => {\n                cy.contains('Share').click({ waitForAnimations: false });\n            });\n            cy.get('.sharing-dialog').as('sharingDialog');\n            cy.get('[data-cy=invite-people-field]').find('input').type(activeUser.user.email);\n            cy.get('[data-cy=\"loading-spinner\"]').should('not.exist');\n            cy.get('[data-cy=\"users-tab-label\"]').click();\n            cy.get('[data-cy=sharing-suggestion]').click();\n            cy.get('@sharingDialog').within(() => {\n                cy.get('[data-cy=add-invited-people]').click();\n                cy.contains('Close').click({ waitForAnimations: false });\n            });\n        });\n\n        cy.getAll('@mySharedWritableProject', '@mySharedReadonlyProject')\n            .then(function ([mySharedWritableProject, mySharedReadonlyProject]) {\n                cy.loginAs(activeUser);\n\n                cy.contains('Shared with me').click();\n\n                // Test search\n                cy.get('[data-cy=search-input] input').type('readonly');\n                cy.get('main').should('not.contain', mySharedWritableProject.name);\n                cy.get('main').should('contain', mySharedReadonlyProject.name);\n                cy.get('[data-cy=search-input] input').clear();\n\n                // Test filter\n                cy.waitForDom().get('th').contains('Type').click();\n                cy.get('div[role=presentation]').contains('Project').click();\n                cy.waitForDom().get('main table tr td').contains('Project').should('not.exist');\n                cy.get('div[role=presentation]').contains('Project').click();\n                cy.waitForDom().get('div[role=presentation] button').contains('Close').click();\n\n                // Test move to trash\n                cy.get('main').contains(mySharedWritableProject.name).rightclick();\n                cy.get('[data-cy=context-menu]').should('contain', 'Move to trash');\n                cy.get('[data-cy=context-menu]').contains('Move to trash').click({ waitForAnimations: false });\n\n                // GUARD: Let's wait for the above removed project to disappear\n                // before continuing, to avoid intermittent failures.\n                cy.get('main').should('not.contain', mySharedWritableProject.name);\n\n                cy.get('main').contains(mySharedReadonlyProject.name).rightclick();\n                cy.get('[data-cy=context-menu]').should('not.contain', 'Move to trash');\n            });\n    });\n\n    it('can edit project in shared with me', () => {\n        cy.createProject({\n            owningUser: adminUser,\n            targetUser: activeUser,\n            projectName: 'mySharedWritableProject',\n            canWrite: true,\n            addToFavorites: true\n        });\n\n        cy.getAll('@mySharedWritableProject')\n            .then(function ([mySharedWritableProject]) {\n                cy.loginAs(activeUser);\n\n                cy.get('[data-cy=side-panel-tree]').contains('Shared with me').click();\n\n                const newProjectName = `New project name ${mySharedWritableProject.name}`;\n                const newProjectDescription = `New project description ${mySharedWritableProject.name}`;\n\n                cy.testEditProjectOrCollection('main', mySharedWritableProject.name, newProjectName, newProjectDescription);\n            });\n    });\n\n    it('can share only when target users are present', () => {\n        const collName = `mySharedCollectionForUsers-${new Date().getTime()}`;\n        cy.createCollection(adminUser.token, {\n            name: collName,\n            owner_uuid: adminUser.uuid,\n        }).as('mySharedCollectionForUsers')\n\n        cy.getAll('@mySharedCollectionForUsers')\n            .then(function ([]) {\n                cy.loginAs(adminUser);\n                cy.doSidePanelNavigation('Home Projects');\n                cy.doMPVTabSelect(\"Data\");\n                cy.get('[data-cy=project-panel]').contains(collName).rightclick();\n                cy.get('[data-cy=context-menu]').within(() => {\n                    cy.get('[data-cy=Share]').click();\n            });\n\n                cy.get('button').get('[data-cy=add-invited-people]').should('be.disabled');\n                cy.get('[data-cy=invite-people-field] input').type('Anonymous');\n                cy.get('[data-cy=loading-spinner]').should('not.exist');\n                cy.get('[data-cy=\"users-tab-label\"]').click();\n                cy.waitForDom();\n                cy.get('[data-cy=sharing-suggestion]').contains('Anonymous').click();\n                cy.get('button').get('[data-cy=add-invited-people]').should('not.be.disabled');\n                cy.get('[data-cy=invite-people-field] div[role=button]').contains('anonymous').parent().find('svg').click();\n                cy.get('button').get('[data-cy=add-invited-people]').should('be.disabled');\n            });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/side-panel.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('Side panel tests', function() {\n    let activeUser;\n    let adminUser;\n\n    before(function() {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function() {\n                adminUser = this.adminUser;\n            }\n        );\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser').then(function() {\n                activeUser = this.activeUser;\n            }\n        );\n    })\n\n    it('enables the +NEW side panel button on users home project', function() {\n        cy.loginAs(activeUser);\n        cy.goToPath(`/projects/${activeUser.user.uuid}`);\n        cy.get('[data-cy=side-panel-button]')\n            .should('exist')\n            .and('not.be.disabled');\n    })\n\n    it('disables or enables the +NEW side panel button depending on project permissions', function() {\n        cy.loginAs(activeUser);\n        [true, false].map(function(isWritable) {\n            cy.createGroup(adminUser.token, {\n                name: `Test ${isWritable ? 'writable' : 'read-only'} project`,\n                group_class: 'project',\n            }).as('sharedGroup').then(function() {\n                cy.createLink(adminUser.token, {\n                    name: isWritable ? 'can_write' : 'can_read',\n                    link_class: 'permission',\n                    head_uuid: this.sharedGroup.uuid,\n                    tail_uuid: activeUser.user.uuid\n                })\n                cy.goToPath(`/projects/${this.sharedGroup.uuid}`);\n                cy.get('[data-cy=side-panel-button]')\n                    .should('exist')\n                    .and(`${isWritable ? 'not.' : ''}be.disabled`);\n            })\n        })\n    })\n\n    it('disables the +NEW side panel button on appropriate sections', function() {\n        cy.loginAs(activeUser);\n        [\n            {url: '/shared-with-me', label: 'Shared with me'},\n            {url: '/public-favorites', label: 'Public Favorites'},\n            {url: '/favorites', label: 'My Favorites'},\n            {url: '/all_processes', label: 'All Processes'},\n            {url: '/trash', label: 'Trash'},\n            {url: '/virtual-machines-user', label: 'Shell Access'},\n            {url: 'instance-types', label: 'Instance Types'},\n            {url: '/groups', label: 'Groups'},\n            {url: 'dashboard', label: 'Dashboard'},\n        ].map(function(section) {\n            cy.waitForDom().goToPath(section.url);\n            cy.get('[data-cy=breadcrumb-first]')\n                .should('contain', section.label);\n            cy.get('[data-cy=side-panel-button]')\n                .should('exist')\n                .and('be.disabled');\n        })\n    })\n\n    it('disables the +NEW side panel button when viewing filter group', function() {\n        cy.loginAs(adminUser);\n        cy.createGroup(adminUser.token, {\n            name: `my-favorite-filter-group`,\n            group_class: 'filter',\n            properties: {filters: []},\n        }).as('myFavoriteFilterGroup').then(function (myFavoriteFilterGroup) {\n            cy.goToPath(`/projects/${myFavoriteFilterGroup.uuid}`);\n            cy.waitForDom();\n            cy.get(\"[data-cy=breadcrumb-last]\").should('exist', { timeout: 10000 });\n            cy.get('[data-cy=breadcrumb-last]').should('contain', 'my-favorite-filter-group');\n\n            cy.get('[data-cy=side-panel-button]')\n                    .should('exist')\n                    .and(`be.disabled`);\n        })\n    })\n\n    it('can edit project in side panel', () => {\n        cy.createProject({\n            owningUser: activeUser,\n            targetUser: activeUser,\n            projectName: 'mySharedWritableProject',\n            canWrite: true,\n            addToFavorites: false\n        });\n\n        cy.getAll('@mySharedWritableProject')\n            .then(function ([mySharedWritableProject]) {\n                cy.loginAs(activeUser);\n\n                cy.get('[data-cy=side-panel-tree]').contains('Projects').click();\n\n                const newProjectName = `New project name ${mySharedWritableProject.name}`;\n                const newProjectDescription = `New project description ${mySharedWritableProject.name}`;\n\n                cy.testEditProjectOrCollection('[data-cy=side-panel-tree]', mySharedWritableProject.name, newProjectName, newProjectDescription);\n            });\n    });\n\n    it('side panel react to refresh when project data changes', () => {\n        const project = 'writableProject';\n\n        cy.createProject({\n            owningUser: activeUser,\n            targetUser: activeUser,\n            projectName: project,\n            canWrite: true,\n            addToFavorites: false\n        });\n\n        cy.getAll('@writableProject').then(function ([writableProject]) {\n            cy.loginAs(activeUser);\n            cy.get('[data-cy=side-panel-tree]')\n                .contains('Projects').click();\n            cy.get('[data-cy=side-panel-tree]')\n                .contains(writableProject.name).should('exist');\n            cy.trashGroup(activeUser.token, writableProject.uuid).then(() => {\n                cy.contains('Refresh').click();\n                cy.contains(writableProject.name).should('not.exist');\n            });\n        });\n    });\n\n    it('collapses and un-collapses', () => {\n\n        cy.loginAs(activeUser)\n        cy.get('[data-cy=side-panel-tree]').should('exist')\n        cy.get('[data-cy=side-panel-toggle]').click()\n        cy.get('[data-cy=side-panel-tree]').should('not.exist')\n        cy.get('[data-cy=side-panel-collapsed]').should('exist')\n        cy.get('[data-cy=side-panel-toggle]').click()\n        cy.get('[data-cy=side-panel-tree]').should('exist')\n        cy.get('[data-cy=side-panel-collapsed]').should('not.exist')\n    })\n\n    it('can navigate from collapsed panel', () => {\n\n        const collapsedCategories = {\n            'shared-with-me': '/shared-with-me',\n            'public-favorites': '/public-favorites',\n            'my-favorites': '/favorites',\n            'groups': '/groups',\n            'all-processes': '/all_processes',\n            'trash': '/trash',\n            'shell-access': '/virtual-machines-user',\n            'home-projects': `/projects/${activeUser.user.uuid}`,\n        }\n\n        cy.loginAs(activeUser)\n        cy.get('[data-cy=side-panel-tree]').should('exist')\n        cy.get('[data-cy=side-panel-toggle]').click()\n        cy.get('[data-cy=side-panel-collapsed]').should('exist')\n\n        for (const cat in collapsedCategories) {\n            cy.get(`[data-cy=collapsed-${cat}]`).should('exist').click()\n            cy.url().should('include', collapsedCategories[cat])\n        }\n    })\n})\n\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/trash.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('Trash tests', function () {\n    let adminUser;\n\n    before(function () {\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function () {\n                adminUser = this.adminUser;\n            });\n    });\n\n    it('trashes and untrashes projects', function() {\n        // Create test project\n        cy.createProject({\n            owningUser: adminUser,\n            projectName: `trashTestProject`,\n        }).as('testProject');\n\n        cy.getAll('@testProject')\n            .then(function ([testProject]) {\n                cy.loginAs(adminUser);\n                cy.doSidePanelNavigation('Home Projects');\n                cy.doMPVTabSelect(\"Data\");\n\n                // Project Trash Tests\n\n                // Trash with context menu\n                cy.doDataExplorerContextAction(testProject.name, 'Move to trash');\n\n                // Verify trashed and breadcrumbs correct\n                cy.assertDataExplorerContains(testProject.name, false);\n                cy.assertBreadcrumbs([\"Home Projects\"]);\n\n                // Restore with context menu\n                cy.get('[data-cy=side-panel-tree]').contains('Trash').click();\n                cy.assertBreadcrumbs([\"Trash\"]);\n                cy.doDataExplorerSearch(testProject.name);\n                cy.doDataExplorerContextAction(testProject.name, 'Restore');\n\n                // Verify navigated to project\n                cy.assertBreadcrumbs([\"Home Projects\", testProject.name]);\n                cy.assertUrlPathname(`/projects/${testProject.uuid}`);\n                // Verify present in home project\n                cy.get('[data-cy=side-panel-tree]').contains('Home Projects').click();\n                cy.assertBreadcrumbs([\"Home Projects\"]);\n                cy.doMPVTabSelect(\"Data\");\n                cy.assertDataExplorerContains(testProject.name, true);\n\n                // Test delete from toolbar\n                cy.doDataExplorerSelect(testProject.name);\n                cy.doToolbarAction(\"Move to trash\");\n\n                // Verify trashed and breadcrumbs correct\n                cy.assertDataExplorerContains(testProject.name, false);\n                cy.assertBreadcrumbs([\"Home Projects\"]);\n\n                // Restore with toolbar\n                cy.get('[data-cy=side-panel-tree]').contains('Trash').click();\n                cy.assertBreadcrumbs([\"Trash\"]);\n                cy.doDataExplorerSearch(testProject.name);\n                cy.doDataExplorerSelect(testProject.name);\n                cy.get(`[aria-label=\"Restore\"]`, { timeout: 5000 }).eq(0).click();\n                cy.waitForDom();\n\n                // Verify navigated to project\n                cy.assertBreadcrumbs([\"Home Projects\", testProject.name]);\n                cy.assertUrlPathname(`/projects/${testProject.uuid}`);\n                // Verify present in home project\n                cy.get('[data-cy=side-panel-tree]').contains('Home Projects').click();\n                cy.assertBreadcrumbs([\"Home Projects\"]);\n                cy.assertDataExplorerContains(testProject.name, true);\n            });\n    });\n\n    it(\"trashes and untrashes collections\", function() {\n        // Create test collection\n        cy.createCollection(adminUser.token, {\n            owner_uuid: adminUser.user.uuid,\n            name: `trashTestCollection ${Math.floor(Math.random() * 999999)}`,\n        }).as('testCollection');\n\n        cy.getAll('@testCollection')\n            .then(function ([testCollection]) {\n                cy.loginAs(adminUser);\n                cy.doSidePanelNavigation('Home Projects');\n                cy.doMPVTabSelect(\"Data\");\n\n                // Collection Trash Tests\n\n                // Trash with context menu\n                cy.doDataExplorerContextAction(testCollection.name, 'Move to trash');\n\n                // Verify trashed and breadcrumbs correct\n                cy.assertDataExplorerContains(testCollection.name, false);\n                cy.assertBreadcrumbs([\"Home Projects\"]);\n\n                // Restore with context menu\n                cy.get('[data-cy=side-panel-tree]').contains('Trash').click();\n                cy.assertBreadcrumbs([\"Trash\"]);\n                cy.doDataExplorerSearch(testCollection.name);\n                cy.doDataExplorerContextAction(testCollection.name, 'Restore');\n\n                // Verify not in trash and in home project\n                cy.assertDataExplorerContains(testCollection.name, false);\n                cy.assertBreadcrumbs([\"Trash\"]);\n                cy.get('[data-cy=side-panel-tree]').contains('Home Projects').click();\n                cy.assertBreadcrumbs([\"Home Projects\"]);\n                cy.assertDataExplorerContains(testCollection.name, true);\n\n                // Test delete from toolbar\n                cy.doDataExplorerSelect(testCollection.name);\n                cy.doToolbarAction(\"Move to trash\");\n\n                // Verify trashed and breadcrumbs correct\n                cy.assertDataExplorerContains(testCollection.name, false);\n                cy.assertBreadcrumbs([\"Home Projects\"]);\n\n                // Restore with toolbar\n                cy.get('[data-cy=side-panel-tree]').contains('Trash').click();\n                cy.assertBreadcrumbs([\"Trash\"]);\n                cy.doDataExplorerSearch(testCollection.name);\n                cy.doDataExplorerSelect(testCollection.name);\n                cy.get(`[aria-label=\"Restore\"]`, { timeout: 5000 }).eq(0).click();\n                cy.waitForDom();\n\n                // Verify not in trash and in home project\n                cy.assertDataExplorerContains(testCollection.name, false);\n                cy.assertBreadcrumbs([\"Trash\"]);\n                cy.get('[data-cy=side-panel-tree]').contains('Home Projects').click();\n                cy.assertBreadcrumbs([\"Home Projects\"]);\n                cy.assertDataExplorerContains(testCollection.name, true);\n            });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/user-preferences.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('User profile tests', function() {\n    let activeUser;\n    let testProjectName = `mainProject ${Math.floor(Math.random() * 999999)}`;\n\n    before(function() {\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser').then(function() {\n                activeUser = this.activeUser;\n            }\n        );\n    });\n\n    it('respects default project tab user preference', function() {\n        // Create test project\n        cy.createProject({\n            owningUser: activeUser,\n            projectName: testProjectName,\n        });\n\n        cy.loginAs(activeUser);\n        cy.doSidePanelNavigation('Home Projects');\n\n        // Verify default tab on load\n        cy.get('[data-cy=project-data]').should('exist');\n        cy.get('[data-cy=project-run]').should('not.exist');\n\n        // Navigate to project and switch to runs tab\n        cy.assertDataExplorerContains(testProjectName, true).click();\n        cy.get('[data-cy=mpv-tabs]').contains(\"Workflow Runs\").click();\n\n        // Verify tab state\n        cy.get('[data-cy=project-data]').should('not.exist');\n        cy.get('[data-cy=project-run]').should('exist');\n\n        // Navigate back to home\n        cy.doBreadcrumbsNavigation(\"Home Projects\");\n\n        // Verify tabs switched back to data and project visible\n        cy.assertDataExplorerContains(testProjectName, true);\n        cy.get('[data-cy=project-data]').should('exist');\n        cy.get('[data-cy=project-run]').should('not.exist');\n\n        // Change default tab preferecne\n        cy.doAccountMenuAction(\"Preferences\");\n        cy.get('input[type=radio][name=\"prefs.wb.default_project_tab\"][value=\"Workflow Runs\"]').click();\n        cy.get('[data-cy=preferences-form] button[type=submit]').click();\n\n        // Verify new default tab\n        cy.doSidePanelNavigation(\"Home Projects\");\n        cy.get('[data-cy=project-data]').should('not.exist');\n        cy.get('[data-cy=project-run]').should('exist');\n        cy.assertDataExplorerContains(testProjectName, false);\n\n        // Switch to data tab and navigate to project\n        cy.get('[data-cy=mpv-tabs]').contains(\"Data\").click();\n        cy.get('[data-cy=project-data]').should('exist');\n        cy.get('[data-cy=project-run]').should('not.exist');\n        cy.assertDataExplorerContains(testProjectName, true).click();\n\n        // Verify switched back to runs and project absent\n        cy.get('[data-cy=project-data]').should('not.exist');\n        cy.get('[data-cy=project-run]').should('exist');\n        cy.assertDataExplorerContains(testProjectName, false);\n\n        // Change default tab preferecne back\n        cy.doAccountMenuAction(\"Preferences\");\n        cy.get('input[type=radio][name=\"prefs.wb.default_project_tab\"][value=\"Data\"]').click();\n        cy.get('[data-cy=preferences-form] button[type=submit]').click();\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/user-profile.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('User profile tests', function() {\n    let activeUser;\n    let adminUser;\n    const roleGroupName = `Test role group (${Math.floor(999999 * Math.random())})`;\n    const projectGroupName = `Test project group (${Math.floor(999999 * Math.random())})`;\n\n    before(function() {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function() {\n                adminUser = this.adminUser;\n            }\n        );\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser').then(function() {\n                activeUser = this.activeUser;\n            }\n        );\n    });\n\n    function assertProfileValues({\n        firstName,\n        lastName,\n        email,\n        username,\n        org,\n        org_email,\n        role,\n        website,\n    }) {\n        cy.get('[data-cy=profile-form] input[name=\"firstName\"]').invoke('val').should('equal', firstName);\n        cy.get('[data-cy=profile-form] input[name=\"lastName\"]').invoke('val').should('equal', lastName);\n        cy.get('[data-cy=profile-form] [data-cy=email] [data-cy=value]').contains(email);\n        cy.get('[data-cy=profile-form] [data-cy=username] [data-cy=value]').contains(username);\n\n        cy.get('[data-cy=profile-form] input[name=\"prefs.profile.organization\"]').invoke('val').should('equal', org);\n        cy.get('[data-cy=profile-form] input[name=\"prefs.profile.organization_email\"]').invoke('val').should('equal', org_email);\n        cy.get('[data-cy=profile-form] select[name=\"prefs.profile.role\"]').invoke('val').should('equal', role);\n        cy.get('[data-cy=profile-form] input[name=\"prefs.profile.website_url\"]').invoke('val').should('equal', website);\n    }\n\n    function enterProfileValues({\n        org,\n        org_email,\n        role,\n        website,\n    }) {\n        cy.get('[data-cy=profile-form] input[name=\"prefs.profile.organization\"]').clear();\n        if (org) {\n            cy.get('[data-cy=profile-form] input[name=\"prefs.profile.organization\"]').type(org);\n        }\n        cy.get('[data-cy=profile-form] input[name=\"prefs.profile.organization_email\"]').clear();\n        if (org_email) {\n            cy.get('[data-cy=profile-form] input[name=\"prefs.profile.organization_email\"]').type(org_email);\n        }\n        cy.get('[data-cy=profile-form] select[name=\"prefs.profile.role\"]').select(role);\n        cy.get('[data-cy=profile-form] input[name=\"prefs.profile.website_url\"]').clear();\n        if (website) {\n            cy.get('[data-cy=profile-form] input[name=\"prefs.profile.website_url\"]').type(website);\n        }\n    }\n\n    function assertContextMenuItems({\n        account,\n        activate,\n        deactivate,\n        login,\n        setup\n    }) {\n        cy.get('[data-cy=user-profile-panel-options-btn]').click();\n        cy.get('[data-cy=context-menu]').within(() => {\n            cy.get('[role=button]').contains('API Details');\n\n            cy.get('[role=button]').should(account ? 'contain' : 'not.contain', 'Account Settings');\n            cy.get('[role=button]').should(activate ? 'contain' : 'not.contain', 'Activate user');\n            cy.get('[role=button]').should(deactivate ? 'contain' : 'not.contain', 'Deactivate user');\n            cy.get('[role=button]').should(login ? 'contain' : 'not.contain', 'Login as user');\n            cy.get('[role=button]').should(setup ? 'contain' : 'not.contain', 'Setup user');\n        });\n        //de-select the context menu\n        cy.get('body').click();\n    }\n\n    beforeEach(function() {\n        cy.updateResource(adminUser.token, 'users', adminUser.user.uuid, {\n            prefs: {\n                profile: {\n                    organization: '',\n                    organization_email: '',\n                    role: '',\n                    website_url: '',\n                },\n            },\n        });\n        cy.updateResource(adminUser.token, 'users', activeUser.user.uuid, {\n            prefs: {\n                profile: {\n                    organization: '',\n                    organization_email: '',\n                    role: '',\n                    website_url: '',\n                },\n            },\n        });\n    });\n\n    it('non-admin can edit own profile', function() {\n        cy.loginAs(activeUser);\n\n        cy.get('header button[aria-label=\"Account Management\"]').click();\n        cy.get('#account-menu').contains('My account').click();\n\n        // Admin actions should be hidden, no account menu\n        assertContextMenuItems({\n            account: false,\n            activate: false,\n            deactivate: false,\n            login: false,\n            setup: false,\n        });\n\n        // Check initial values\n        assertProfileValues({\n            firstName: 'Active',\n            lastName: 'User',\n            email: 'user@example.local',\n            username: 'user',\n            org: '',\n            org_email: '',\n            role: '',\n            website: '',\n        });\n\n        // Change values\n        enterProfileValues({\n            org: 'Org name',\n            org_email: 'email@example.com',\n            role: 'Data Scientist',\n            website: 'example.com',\n        });\n\n        cy.get('[data-cy=profile-form] button[type=\"submit\"]').should('not.be.disabled');\n\n        // Submit\n        cy.get('[data-cy=profile-form] button[type=\"submit\"]').click();\n\n        // Check new values\n        assertProfileValues({\n            firstName: 'Active',\n            lastName: 'User',\n            email: 'user@example.local',\n            username: 'user',\n            org: 'Org name',\n            org_email: 'email@example.com',\n            role: 'Data Scientist',\n            website: 'example.com',\n        });\n\n        // if it worked, the save button should be disabled.\n        cy.get('[data-cy=profile-form] button[type=\"submit\"]').should('be.disabled');\n    });\n\n    it('non-admin cannot edit other profile', function() {\n        cy.loginAs(activeUser);\n        cy.goToPath('/user/' + adminUser.user.uuid);\n\n        assertProfileValues({\n            firstName: 'Admin',\n            lastName: 'User',\n            email: 'admin@example.local',\n            username: 'admin',\n            org: '',\n            org_email: '',\n            role: '',\n            website: '',\n        });\n\n        // Inputs should be disabled\n        cy.get('[data-cy=profile-form] input[name=\"prefs.profile.organization\"]').should('be.disabled');\n        cy.get('[data-cy=profile-form] input[name=\"prefs.profile.organization_email\"]').should('be.disabled');\n        cy.get('[data-cy=profile-form] select[name=\"prefs.profile.role\"]').should('be.disabled');\n        cy.get('[data-cy=profile-form] input[name=\"prefs.profile.website_url\"]').should('be.disabled');\n\n        // Submit should be disabled\n        cy.get('[data-cy=profile-form] button[type=\"submit\"]').should('be.disabled');\n\n        // Admin actions should be hidden, no account menu\n        assertContextMenuItems({\n            account: false,\n            activate: false,\n            deactivate: false,\n            login: false,\n            setup: false,\n        });\n    });\n\n    it('admin can edit own profile', function() {\n        cy.loginAs(adminUser);\n\n        cy.get('header button[aria-label=\"Account Management\"]').click();\n        cy.get('#account-menu').contains('My account').click();\n\n        // Admin actions should be visible, no account menu\n        assertContextMenuItems({\n            account: false,\n            activate: false,\n            deactivate: true,\n            login: false,\n            setup: false,\n        });\n\n        // Check initial values\n        assertProfileValues({\n            firstName: 'Admin',\n            lastName: 'User',\n            email: 'admin@example.local',\n            username: 'admin',\n            org: '',\n            org_email: '',\n            role: '',\n            website: '',\n        });\n\n        // Change values\n        enterProfileValues({\n            org: 'Admin org name',\n            org_email: 'admin@example.com',\n            role: 'Researcher',\n            website: 'admin.local',\n        });\n        cy.get('[data-cy=profile-form] button[type=\"submit\"]').click();\n\n        // Check new values\n        assertProfileValues({\n            firstName: 'Admin',\n            lastName: 'User',\n            email: 'admin@example.local',\n            username: 'admin',\n            org: 'Admin org name',\n            org_email: 'admin@example.com',\n            role: 'Researcher',\n            website: 'admin.local',\n        });\n    });\n\n    it('admin can edit other profile', function() {\n        cy.loginAs(adminUser);\n        cy.goToPath('/user/' + activeUser.user.uuid);\n\n        // Check new values\n        assertProfileValues({\n            firstName: 'Active',\n            lastName: 'User',\n            email: 'user@example.local',\n            username: 'user',\n            org: '',\n            org_email: '',\n            role: '',\n            website: '',\n        });\n\n        enterProfileValues({\n            org: 'Changed org name',\n            org_email: 'changed@example.com',\n            role: 'Researcher',\n            website: 'changed.local',\n        });\n        cy.get('[data-cy=profile-form] button[type=\"submit\"]').click();\n\n        // Check new values\n        assertProfileValues({\n            firstName: 'Active',\n            lastName: 'User',\n            email: 'user@example.local',\n            username: 'user',\n            org: 'Changed org name',\n            org_email: 'changed@example.com',\n            role: 'Researcher',\n            website: 'changed.local',\n        });\n\n        // Admin actions should be visible, no account menu\n        assertContextMenuItems({\n            account: false,\n            activate: false,\n            deactivate: true,\n            login: true,\n            setup: false,\n        });\n    });\n\n    it('displays role groups on user profile', function() {\n        cy.loginAs(adminUser);\n\n        cy.createGroup(adminUser.token, {\n            name: roleGroupName,\n            group_class: 'role',\n        }).as('roleGroup').then(function() {\n            cy.createLink(adminUser.token, {\n                name: 'can_write',\n                link_class: 'permission',\n                head_uuid: this.roleGroup.uuid,\n                tail_uuid: adminUser.user.uuid\n            });\n            cy.createLink(adminUser.token, {\n                name: 'can_write',\n                link_class: 'permission',\n                head_uuid: this.roleGroup.uuid,\n                tail_uuid: activeUser.user.uuid\n            });\n        });\n\n        cy.createGroup(adminUser.token, {\n            name: projectGroupName,\n            group_class: 'project',\n        }).as('projectGroup').then(function() {\n            cy.createLink(adminUser.token, {\n                name: 'can_write',\n                link_class: 'permission',\n                head_uuid: this.projectGroup.uuid,\n                tail_uuid: adminUser.user.uuid\n            });\n            cy.createLink(adminUser.token, {\n                name: 'can_write',\n                link_class: 'permission',\n                head_uuid: this.projectGroup.uuid,\n                tail_uuid: activeUser.user.uuid\n            });\n        });\n\n        cy.goToPath('/user/' + activeUser.user.uuid);\n        cy.get('div [role=\"tab\"]').contains('GROUPS').click();\n        cy.get('[data-cy=user-profile-groups-data-explorer]').contains(roleGroupName);\n        cy.get('[data-cy=user-profile-groups-data-explorer]').should('not.contain', projectGroupName);\n\n        cy.goToPath('/user/' + adminUser.user.uuid);\n        cy.get('div [role=\"tab\"]').contains('GROUPS').click();\n        cy.get('[data-cy=user-profile-groups-data-explorer]').contains(roleGroupName);\n        cy.get('[data-cy=user-profile-groups-data-explorer]').should('not.contain', projectGroupName);\n    });\n\n    it('allows performing admin functions', function() {\n        cy.loginAs(adminUser);\n        cy.goToPath('/user/' + activeUser.user.uuid);\n\n        // Check that user is active\n        cy.get('[data-cy=account-status]').contains('Active');\n        cy.get('div [role=\"tab\"]').contains('GROUPS').click();\n        cy.get('[data-cy=user-profile-groups-data-explorer]').should('contain', 'All users');\n        cy.get('div [role=\"tab\"]').contains('PROFILE').click();\n        assertContextMenuItems({\n            account: false,\n            activate: false,\n            deactivate: true,\n            login: true,\n            setup: false,\n        });\n\n        // Deactivate user\n        cy.get('[data-cy=user-profile-panel-options-btn]').click();\n        cy.get('[data-cy=context-menu]').contains('Deactivate user').click();\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n\n        // Check that user is deactivated\n        cy.get('[data-cy=account-status]').contains('Inactive');\n        cy.get('div [role=\"tab\"]').contains('GROUPS').click();\n        cy.get('[data-cy=user-profile-groups-data-explorer]').should('not.contain', 'All users');\n        cy.get('div [role=\"tab\"]').contains('PROFILE').click();\n        assertContextMenuItems({\n            account: false,\n            activate: true,\n            deactivate: false,\n            login: true,\n            setup: true,\n        });\n\n        // Setup user\n        cy.get('[data-cy=user-profile-panel-options-btn]').click();\n        cy.get('[data-cy=context-menu]').contains('Setup user').click();\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n\n        // Check that user is setup\n        cy.get('[data-cy=account-status]').contains('Setup');\n        cy.get('div [role=\"tab\"]').contains('GROUPS').click();\n        cy.get('[data-cy=user-profile-groups-data-explorer]').should('contain', 'All users');\n        cy.get('div [role=\"tab\"]').contains('PROFILE').click();\n        assertContextMenuItems({\n            account: false,\n            activate: true,\n            deactivate: true,\n            login: true,\n            setup: false,\n        });\n\n        // Activate user\n        cy.get('[data-cy=user-profile-panel-options-btn]').click();\n        cy.get('[data-cy=context-menu]').contains('Activate user').click();\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n\n        // Check that user is active\n        cy.get('[data-cy=account-status]').contains('Active');\n        cy.get('div [role=\"tab\"]').contains('GROUPS').click();\n        cy.get('[data-cy=user-profile-groups-data-explorer]').should('contain', 'All users');\n        cy.get('div [role=\"tab\"]').contains('PROFILE').click();\n        assertContextMenuItems({\n            account: false,\n            activate: false,\n            deactivate: true,\n            login: true,\n            setup: false,\n        });\n\n        // Deactivate and activate user skipping setup\n        cy.get('[data-cy=user-profile-panel-options-btn]').click();\n        cy.get('[data-cy=context-menu]').contains('Deactivate user').click();\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n        // Check\n        cy.get('[data-cy=account-status]').contains('Inactive');\n        cy.get('div [role=\"tab\"]').contains('GROUPS').click();\n        cy.get('[data-cy=user-profile-groups-data-explorer]').should('not.contain', 'All users');\n        cy.get('div [role=\"tab\"]').contains('PROFILE').click();\n        assertContextMenuItems({\n            account: false,\n            activate: true,\n            deactivate: false,\n            login: true,\n            setup: true,\n        });\n        // reactivate\n        cy.get('[data-cy=user-profile-panel-options-btn]').click();\n        cy.get('[data-cy=context-menu]').contains('Activate user').click();\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').click();\n\n        // Check that user is active\n        cy.get('[data-cy=account-status]').contains('Active');\n        cy.get('div [role=\"tab\"]').contains('GROUPS').click();\n        cy.get('[data-cy=user-profile-groups-data-explorer]').should('contain', 'All users');\n        cy.get('div [role=\"tab\"]').contains('PROFILE').click();\n        assertContextMenuItems({\n            account: false,\n            activate: false,\n            deactivate: true,\n            login: true,\n            setup: false,\n        });\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/virtual-machine-admin.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe(\"Virtual machine login manage tests\", function () {\n    let activeUser;\n    let adminUser;\n\n    const vmHost = `vm-${Math.floor(999999 * Math.random())}.host`;\n\n    before(function () {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser(\"admin\", \"VMAdmin\", \"User\", true, true)\n            .as(\"adminUser\")\n            .then(function () {\n                adminUser = this.adminUser;\n            });\n        cy.getUser(\"user\", \"VMActive\", \"User\", false, true)\n            .as(\"activeUser\")\n            .then(function () {\n                activeUser = this.activeUser;\n            });\n    });\n\n    it(\"adds and removes vm logins\", function () {\n        cy.loginAs(adminUser);\n        cy.createVirtualMachine(adminUser.token, { hostname: vmHost });\n\n        // Navigate to VM admin\n        cy.get('header button[aria-label=\"Admin Panel\"]').click();\n        // Clicking on the menu results in a tooltip for \"Admin Panel\"\n        // that interferes with clicking on the first item in the\n        // menu, which is Shell Access.  This is basically a Cypress\n        // artifact because in the real world, there would be a\n        // mouseout event when the user moves the pointer to actually\n        // click on the first item.  Send a mouseout event to make the\n        // tooltip go away so we can get on with our testing.\n        cy.get('header button[aria-label=\"Admin Panel\"]').trigger('mouseout', {force: true})\n        cy.get(\"[role=tooltip]\").should(\"not.exist\");\n\n        cy.get(\"#admin-menu\").contains(\"Shell Access\").click();\n\n        // Add login permission to admin\n        cy.get(\"[data-cy=vm-admin-table]\")\n            .contains(vmHost)\n            .parents(\"tr\")\n            .within(() => {\n                cy.get('button[aria-label=\"Add Login Permission\"]').click();\n            });\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"Add login permission\")\n            .within(() => {\n                cy.get(\"label\")\n                    .contains(\"Search for user\")\n                    .parent()\n                    .within(() => {\n                        cy.get(\"input\").type(\"VMAdmin\");\n                    });\n            });\n        cy.waitForDom().get(\"[role=tooltip]\").click();\n        cy.get(\"[data-cy=form-dialog]\")\n            .as(\"add-login-dialog\")\n            .should(\"contain\", \"Add login permission\")\n            .within(() => {\n                cy.get(\"label\")\n                    .contains(\"Add groups\")\n                    .parent()\n                    .within(() => {\n                        cy.get(\"input\").type(\"docker \");\n                        // Veryfy submit enabled (form has changed)\n                        cy.get(\"@add-login-dialog\").within(() => {\n                            cy.get(\"[data-cy=form-submit-btn]\").should(\"be.enabled\");\n                        });\n                        cy.get(\"input\").type(\"sudo\");\n                        // Veryfy submit disabled (partial input in chips)\n                        cy.get(\"@add-login-dialog\").within(() => {\n                            cy.get(\"[data-cy=form-submit-btn]\").should(\"be.disabled\");\n                        });\n                        cy.get(\"input\").type(\"{enter}\");\n                    });\n            });\n        cy.get(\"[data-cy=form-dialog]\").within(() => {\n            cy.get(\"[data-cy=form-submit-btn]\").click();\n        });\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n\n        cy.get(\"[data-cy=vm-admin-table]\")\n            .contains(vmHost)\n            .parents(\"tr\")\n            .within(() => {\n                cy.get(\"td\").contains(\"admin\");\n            });\n\n        // Add login permission to activeUser\n        cy.get(\"[data-cy=vm-admin-table]\")\n            .contains(vmHost)\n            .parents(\"tr\")\n            .within(() => {\n                cy.get('button[aria-label=\"Add Login Permission\"]').click();\n            });\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"Add login permission\")\n            .within(() => {\n                cy.get(\"label\")\n                    .contains(\"Search for user\")\n                    .parent()\n                    .within(() => {\n                        cy.get(\"input\").type(\"VMActive user\");\n                    });\n            });\n        cy.get(\"[role=tooltip]\").click();\n        cy.get(\"[data-cy=form-dialog]\").within(() => {\n            cy.get(\"[data-cy=form-submit-btn]\").click();\n        });\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n\n        cy.get(\"[data-cy=vm-admin-table]\")\n            .contains(vmHost)\n            .parents(\"tr\")\n            .within(() => {\n                cy.get(\"td\").contains(\"user\");\n            });\n\n        // Check admin's vm page for login\n        cy.get(\"[data-cy=side-panel-tree]\").contains(\"Shell Access\").click();\n\n        cy.get(\"[data-cy=vm-user-table]\")\n            .contains(vmHost)\n            .parents(\"tr\")\n            .within(() => {\n                cy.get(\"td\").contains(\"admin\");\n                cy.get(\"td\").contains(\"docker\");\n                cy.get(\"td\").contains(\"sudo\");\n                cy.get(\"td\").contains(\"ssh admin@\" + vmHost);\n            });\n\n        // Check activeUser's vm page for login\n        cy.loginAs(activeUser);\n        cy.get(\"[data-cy=side-panel-tree]\").contains(\"Shell Access\").click();\n\n        cy.get(\"[data-cy=vm-user-table]\")\n            .contains(vmHost)\n            .parents(\"tr\")\n            .within(() => {\n                cy.get(\"td\").contains(\"user\");\n                cy.get(\"td\").should(\"not.contain\", \"docker\");\n                cy.get(\"td\").should(\"not.contain\", \"sudo\");\n                cy.get(\"td\").contains(\"ssh user@\" + vmHost);\n            });\n\n        // Edit login permissions\n        cy.loginAs(adminUser);\n        cy.get('header button[aria-label=\"Admin Panel\"]').click();\n        cy.get('header button[aria-label=\"Admin Panel\"]').trigger('mouseout', {force: true})\n        cy.get(\"[role=tooltip]\").should(\"not.exist\");\n        cy.get(\"#admin-menu\").contains(\"Shell Access\").click();\n\n        cy.get(\"[data-cy=vm-admin-table]\").contains(\"admin\"); // Wait for page to finish\n\n        cy.get(\"[data-cy=vm-admin-table]\").contains(vmHost).parents(\"tr\").contains(\"admin\").click();\n\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"Update login permission\")\n            .within(() => {\n                cy.get(\"label\").contains(\"Add groups\").parent().as(\"groupInput\");\n            });\n\n        cy.get(\"@groupInput\").within(() => {\n            cy.get(\"div[role=button]\").contains(\"sudo\").parent().find(\"svg\").click();\n            cy.get(\"div[role=button]\").contains(\"docker\").parent().find(\"svg\").click();\n        });\n\n        cy.get(\"[data-cy=form-dialog]\").within(() => {\n            cy.get(\"[data-cy=form-submit-btn]\").click();\n        });\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n\n        // Wait for page to finish loading\n        cy.get(\"[data-cy=vm-admin-table]\")\n            .contains(vmHost)\n            .parents(\"tr\")\n            .within(() => {\n                cy.get(\"div[role=button]\").parent().first().contains(\"admin\");\n            });\n\n        cy.get(\"[data-cy=vm-admin-table]\").contains(vmHost).parents(\"tr\").contains(\"user\").click();\n\n        cy.get(\"[data-cy=form-dialog]\")\n            .should(\"contain\", \"Update login permission\")\n            .within(() => {\n                cy.get(\"label\")\n                    .contains(\"Add groups\")\n                    .parent()\n                    .within(() => {\n                        cy.get(\"input\").type(\"docker{enter}\");\n                    });\n            });\n\n        cy.get(\"[data-cy=form-dialog]\").within(() => {\n            cy.get(\"[data-cy=form-submit-btn]\").click();\n        });\n        cy.get(\"[data-cy=form-dialog]\").should(\"not.exist\");\n\n        // Verify new login permissions\n        // Check admin's vm page for login\n        cy.get(\"[data-cy=side-panel-tree]\").contains(\"Shell Access\").click();\n\n        cy.get(\"[data-cy=vm-user-table]\")\n            .contains(vmHost)\n            .parents(\"tr\")\n            .within(() => {\n                cy.get(\"td\").contains(\"admin\");\n                cy.get(\"td\").should(\"not.contain\", \"docker\");\n                cy.get(\"td\").should(\"not.contain\", \"sudo\");\n                cy.get(\"td\").contains(\"ssh admin@\" + vmHost);\n            });\n\n        // Verify new login permissions\n        // Check activeUser's vm page for login\n        cy.loginAs(activeUser);\n        cy.get(\"[data-cy=side-panel-tree]\").contains(\"Shell Access\").click();\n\n        cy.get(\"[data-cy=vm-user-table]\")\n            .contains(vmHost)\n            .parents(\"tr\")\n            .within(() => {\n                cy.get(\"td\").contains(\"user\");\n                cy.get(\"td\").contains(\"docker\");\n                cy.get(\"td\").should(\"not.contain\", \"sudo\");\n                cy.get(\"td\").contains(\"ssh user@\" + vmHost);\n            });\n\n        // Remove login permissions\n        cy.loginAs(adminUser);\n        cy.get('header button[aria-label=\"Admin Panel\"]').click();\n        cy.get('header button[aria-label=\"Admin Panel\"]').trigger('mouseout', {force: true})\n        cy.get(\"[role=tooltip]\").should(\"not.exist\");\n        cy.get(\"#admin-menu\").contains(\"Shell Access\").click();\n\n        cy.get(\"[data-cy=vm-admin-table]\").contains(\"user\"); // Wait for page to finish\n\n        cy.get(\"[data-cy=vm-admin-table]\")\n            .contains(vmHost)\n            .parents(\"tr\")\n            .as(\"vmRow\")\n            .contains(\"user\")\n            .parents(\"[role=button]\")\n            .find(\"svg\")\n            .as(\"removeButton\");\n        cy.get(\"@removeButton\").click();\n        cy.get(\"[data-cy=confirmation-dialog-ok-btn]\").click();\n\n        cy.get(\"@vmRow\").within(() => {\n            cy.get(\"div[role=button]\").should(\"not.contain\", \"user\");\n            cy.get(\"div[role=button]\").should(\"have.length\", 1);\n        });\n\n        cy.get(\"@vmRow\").find(\"div[role=button]\").contains(\"admin\").parents(\"[role=button]\").find(\"svg\").as(\"removeButton\");\n        cy.get(\"@removeButton\").click();\n        cy.get(\"[data-cy=confirmation-dialog-ok-btn]\").click();\n\n        cy.waitForDom()\n            .get(\"[data-cy=vm-admin-table]\")\n            .contains(vmHost)\n            .parents(\"tr\")\n            .within(() => {\n                cy.get(\"div[role=button]\").should(\"not.exist\");\n            });\n\n        // Check admin's vm page for login\n        cy.get(\"[data-cy=side-panel-tree]\").contains(\"Shell Access\").click();\n\n        cy.get(\"[data-cy=vm-user-panel]\").should(\"not.contain\", vmHost);\n\n        // Check activeUser's vm page for login\n        cy.loginAs(activeUser);\n        cy.get(\"[data-cy=side-panel-tree]\").contains(\"Shell Access\").click();\n\n        cy.get(\"[data-cy=vm-user-panel]\").should(\"not.contain\", vmHost);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/e2e/workflow.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\ndescribe('Registered workflow panel tests', function() {\n    let activeUser;\n    let adminUser;\n\n    before(function() {\n        // Only set up common users once. These aren't set up as aliases because\n        // aliases are cleaned up after every test. Also it doesn't make sense\n        // to set the same users on beforeEach() over and over again, so we\n        // separate a little from Cypress' 'Best Practices' here.\n        cy.getUser('admin', 'Admin', 'User', true, true)\n            .as('adminUser').then(function() {\n                adminUser = this.adminUser;\n            }\n        );\n        cy.getUser('user', 'Active', 'User', false, true)\n            .as('activeUser').then(function() {\n                activeUser = this.activeUser;\n            }\n        );\n    });\n\n    it('should handle null definition', function() {\n        cy.createResource(activeUser.token, \"workflows\", {workflow: {name: \"Test wf\"}})\n            .then(function(workflowResource) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/workflows/${workflowResource.uuid}`);\n                cy.doMPVTabSelect(\"Overview\");;\n                cy.get('[data-cy=workflow-details-card]').should('contain', workflowResource.name);\n                cy.get('[data-cy=workflow-details-attributes-modifiedby-user]').contains(`Active User (${activeUser.user.uuid})`);\n            });\n    });\n\n    it('should handle malformed definition', function() {\n        cy.createResource(activeUser.token, \"workflows\", {workflow: {name: \"Test wf\", definition: \"zap:\"}})\n            .then(function(workflowResource) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/workflows/${workflowResource.uuid}`);\n                cy.doMPVTabSelect(\"Overview\");;\n                cy.get('[data-cy=workflow-details-card]').should('contain', workflowResource.name);\n                cy.get('[data-cy=workflow-details-attributes-modifiedby-user]').contains(`Active User (${activeUser.user.uuid})`);\n            });\n    });\n\n    it('should handle malformed run', function() {\n        cy.createResource(activeUser.token, \"workflows\", {workflow: {\n            name: \"Test wf\",\n            definition: JSON.stringify({\n                cwlVersion: \"v1.2\",\n                $graph: [\n                    {\n                        \"class\": \"Workflow\",\n                        \"id\": \"#main\",\n                        \"inputs\": [],\n                        \"outputs\": [],\n                        \"requirements\": [\n                            {\n                                \"class\": \"SubworkflowFeatureRequirement\"\n                            }\n                        ],\n                        \"steps\": [\n                            {\n                                \"id\": \"#main/cat1-testcli.cwl (v1.2.0-109-g9b091ed)\",\n                                \"in\": [],\n                                \"label\": \"cat1-testcli.cwl (v1.2.0-109-g9b091ed)\",\n                                \"out\": [\n                                    {\n                                        \"id\": \"#main/step/args\"\n                                    }\n                                ],\n                                \"run\": `keep:undefined/bar`\n                            }\n                        ]\n                    }\n                ],\n                \"cwlVersion\": \"v1.2\",\n                \"http://arvados.org/cwl#gitBranch\": \"1.2.1_proposed\",\n                \"http://arvados.org/cwl#gitCommit\": \"9b091ed7e0bef98b3312e9478c52b89ba25792de\",\n                \"http://arvados.org/cwl#gitCommitter\": \"GitHub <noreply@github.com>\",\n                \"http://arvados.org/cwl#gitDate\": \"Sun, 11 Sep 2022 21:24:42 +0200\",\n                \"http://arvados.org/cwl#gitDescribe\": \"v1.2.0-109-g9b091ed\",\n                \"http://arvados.org/cwl#gitOrigin\": \"git@github.com:common-workflow-language/cwl-v1.2\",\n                \"http://arvados.org/cwl#gitPath\": \"tests/cat1-testcli.cwl\",\n                \"http://arvados.org/cwl#gitStatus\": \"\"\n            })\n        }}).then(function(workflowResource) {\n            cy.loginAs(activeUser);\n            cy.goToPath(`/workflows/${workflowResource.uuid}`);\n            cy.doMPVTabSelect(\"Overview\");;\n            cy.get('[data-cy=workflow-details-card]').should('contain', workflowResource.name);\n            cy.get('[data-cy=workflow-details-attributes-modifiedby-user]').contains(`Active User (${activeUser.user.uuid})`);\n        });\n    });\n\n    const verifyIOParameter = (name, label, doc, val, collection) => {\n        cy.get('table tr').contains(name).parents('tr').within(($mainRow) => {\n            label && cy.contains(label);\n\n            if (val) {\n                if (Array.isArray(val)) {\n                    val.forEach(v => cy.contains(v));\n                } else {\n                    cy.contains(val);\n                }\n            }\n            if (collection) {\n                cy.contains(collection);\n            }\n        });\n    };\n\n    it('shows workflow details', function() {\n        cy.createCollection(adminUser.token, {\n            name: `Test collection ${Math.floor(Math.random() * 999999)}`,\n            owner_uuid: activeUser.user.uuid,\n            manifest_text: \". 37b51d194a7513e45b56f6524f2d51f2+3 0:3:bar\\n\"\n        })\n            .then(function(collectionResource) {\n                cy.createResource(activeUser.token, \"workflows\", {workflow: {\n                    name: \"Test wf\",\n                    definition: JSON.stringify({\n                        cwlVersion: \"v1.2\",\n                        $graph: [\n                            {\n                                \"class\": \"Workflow\",\n                                \"hints\": [\n                                    {\n                                        \"class\": \"DockerRequirement\",\n                                        \"dockerPull\": \"python:2-slim\"\n                                    }\n                                ],\n                                \"id\": \"#main\",\n                                \"inputs\": [\n                                    {\n                                        \"id\": \"#main/file1\",\n                                        \"type\": \"File\"\n                                    },\n                                    {\n                                        \"id\": \"#main/numbering\",\n                                        \"type\": [\n                                            \"null\",\n                                            \"boolean\"\n                                        ]\n                                    },\n                                    {\n                                        \"default\": {\n                                            \"basename\": \"args.py\",\n                                            \"class\": \"File\",\n                                            \"location\": \"keep:de738550734533c5027997c87dc5488e+53/args.py\",\n                                            \"nameext\": \".py\",\n                                            \"nameroot\": \"args\",\n                                            \"size\": 179\n                                        },\n                                        \"id\": \"#main/args.py\",\n                                        \"type\": \"File\"\n                                    }\n                                ],\n                                \"outputs\": [\n                                    {\n                                        \"id\": \"#main/args\",\n                                        \"outputSource\": \"#main/step/args\",\n                                        \"type\": {\n                                            \"items\": \"string\",\n                                            \"name\": \"_:b0adccc1-502d-476f-8a5b-c8ef7119e2dc\",\n                                            \"type\": \"array\"\n                                        }\n                                    }\n                                ],\n                                \"requirements\": [\n                                    {\n                                        \"class\": \"SubworkflowFeatureRequirement\"\n                                    }\n                                ],\n                                \"steps\": [\n                                    {\n                                        \"id\": \"#main/cat1-testcli.cwl (v1.2.0-109-g9b091ed)\",\n                                        \"in\": [\n                                            {\n                                                \"id\": \"#main/step/file1\",\n                                                \"source\": \"#main/file1\"\n                                            },\n                                            {\n                                                \"id\": \"#main/step/numbering\",\n                                                \"source\": \"#main/numbering\"\n                                            },\n                                            {\n                                                \"id\": \"#main/step/args.py\",\n                                                \"source\": \"#main/args.py\"\n                                            }\n                                        ],\n                                        \"label\": \"cat1-testcli.cwl (v1.2.0-109-g9b091ed)\",\n                                        \"out\": [\n                                            {\n                                                \"id\": \"#main/step/args\"\n                                            }\n                                        ],\n                                        \"run\": `keep:${collectionResource.portable_data_hash}/bar`\n                                    }\n                                ]\n                            }\n                        ],\n                        \"cwlVersion\": \"v1.2\",\n                        \"http://arvados.org/cwl#gitBranch\": \"1.2.1_proposed\",\n                        \"http://arvados.org/cwl#gitCommit\": \"9b091ed7e0bef98b3312e9478c52b89ba25792de\",\n                        \"http://arvados.org/cwl#gitCommitter\": \"GitHub <noreply@github.com>\",\n                        \"http://arvados.org/cwl#gitDate\": \"Sun, 11 Sep 2022 21:24:42 +0200\",\n                        \"http://arvados.org/cwl#gitDescribe\": \"v1.2.0-109-g9b091ed\",\n                        \"http://arvados.org/cwl#gitOrigin\": \"git@github.com:common-workflow-language/cwl-v1.2\",\n                        \"http://arvados.org/cwl#gitPath\": \"tests/cat1-testcli.cwl\",\n                        \"http://arvados.org/cwl#gitStatus\": \"\"\n                    })\n                }}).then(function(workflowResource) {\n                    cy.loginAs(activeUser);\n                    cy.goToPath(`/workflows/${workflowResource.uuid}`);\n                    cy.doMPVTabSelect(\"Overview\");;\n                    cy.get('[data-cy=workflow-details-card]').should('contain', workflowResource.name);\n                    cy.get('[data-cy=workflow-details-attributes-modifiedby-user]').contains(`Active User (${activeUser.user.uuid})`);\n                    cy.get('[data-cy=registered-workflow-info-panel')\n                        .should('contain', 'gitCommit: 9b091ed7e0bef98b3312e9478c52b89ba25792de')\n\n                    cy.doMPVTabSelect(\"Inputs\");\n                    cy.get('[data-cy=process-io-card] h6').contains('Input Parameters')\n                        .parents('[data-cy=process-io-card]').within(() => {\n                            verifyIOParameter('file1', null, '', '', '');\n                            verifyIOParameter('numbering', null, '', '', '');\n                            verifyIOParameter('args.py', null, '', 'args.py', 'de738550734533c5027997c87dc5488e+53');\n                        });\n                    cy.get('button').contains('Outputs').click();\n                    cy.get('[data-cy=process-io-card] h6').contains('Output Parameters')\n                        .parents('[data-cy=process-io-card]').within(() => {\n                            verifyIOParameter('args', null, '', '', '');\n                        });\n                    cy.get('button').contains('Definition').click();\n                    cy.get('[data-cy=collection-files-panel]').within(() => {\n                        cy.get('[data-cy=collection-files-right-panel]', { timeout: 5000 })\n                            .should('contain', 'bar');\n                    });\n                });\n            });\n    });\n\n    it('can delete a workflow', function() {\n        cy.createResource(activeUser.token, \"workflows\", {workflow: {name: \"Test wf\"}})\n            .then(function(workflowResource) {\n                cy.loginAs(activeUser);\n                cy.goToPath(`/projects/${activeUser.user.uuid}`);\n                cy.doMPVTabSelect(\"Data\");\n                cy.get('[data-cy=project-panel] table tbody').contains(workflowResource.name).rightclick();\n                cy.get('[data-cy=context-menu]').should('exist', { timeout: 10000})\n                cy.get('[data-cy=\"Delete Workflow\"]').click();\n                cy.get('[data-cy=confirmation-dialog-ok-btn]').should('exist').click();\n                cy.get('[data-cy=project-panel] table tbody').should('not.contain', workflowResource.name);\n            });\n    });\n\n    it('can delete multiple workflows', function() {\n        const { floor, random } = Math;\n        const wfNames = [`Test wf1 ${floor(random() * 999999)}`, `Test wf2 ${floor(random() * 999999)}`, `Test wf3 ${floor(random() * 999999)}`];\n\n        wfNames.forEach((wfName) => {\n            cy.createResource(activeUser.token, \"workflows\", {workflow: {name: wfName}})\n        });\n\n        cy.loginAs(activeUser);\n        cy.doSidePanelNavigation('Home Projects');\n\n        wfNames.forEach((wfName) => {\n            cy.get('tr').contains('td', wfName).should('exist', { timeout: 10000 }).parent('tr').find('input[type=\"checkbox\"]').click();\n        });\n\n        cy.waitForDom().get('[data-cy=multiselect-button]', {timeout: 10000}).should('be.visible')\n        cy.get('[data-cy=multiselect-button]', {timeout: 10000}).should('have.length', '1').trigger('mouseover');\n        cy.get('body').contains('Delete Workflow', {timeout: 10000}).should('exist')\n        cy.get('[data-cy=multiselect-button]').eq(0).click();\n        cy.get('[data-cy=confirmation-dialog-ok-btn]').should('exist').click({force: true});\n        cy.wait(3000);\n\n        wfNames.forEach((wfName) => {\n            cy.get('tr').contains(wfName).should('not.exist');\n        });\n    });\n\n    it('cannot delete readonly workflow', function() {\n        cy.createProject({\n            owningUser: adminUser,\n            targetUser: activeUser,\n            projectName: 'mySharedReadonlyProject',\n            canWrite: false,\n        });\n        cy.getAll('@mySharedReadonlyProject')\n            .then(function ([mySharedReadonlyProject]) {\n                cy.createResource(adminUser.token, \"workflows\", {workflow: {name: \"Test wf\", owner_uuid: mySharedReadonlyProject.uuid}})\n                    .then(function(workflowResource) {\n                        cy.loginAs(activeUser);\n                        cy.goToPath(`/shared-with-me`);\n                        cy.contains(\"mySharedReadonlyProject\").click();\n                        cy.doMPVTabSelect(\"Data\");\n                        cy.get('[data-cy=project-panel] table tbody').contains(workflowResource.name).rightclick();\n                        cy.get('[data-cy=context-menu]').should(\"not.contain\", 'Delete Workflow');\n                    });\n            });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/fixtures/.gitkeep",
    "content": ""
  },
  {
    "path": "services/workbench2/cypress/fixtures/files/banner.html",
    "content": "<div>\n    <h1>Hi there</h1>\n    <h3>This is my amazing</h3>\n    <h5 style=\"color: red\">Banner</h5>\n</div>"
  },
  {
    "path": "services/workbench2/cypress/fixtures/files/tooltips.txt",
    "content": "{\n    \"[data-cy=side-panel-tree]\": \"This allows you to navigate through the app\"\n}"
  },
  {
    "path": "services/workbench2/cypress/fixtures/webdav-propfind-outputs.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<D:multistatus xmlns:D=\"DAV:\">\n  <D:response>\n    <D:href>/c=zzzzz-4zz18-zzzzzzzzzzzzzzz/</D:href>\n    <D:propstat>\n      <D:prop>\n        <D:resourcetype>\n          <D:collection xmlns:D=\"DAV:\" />\n        </D:resourcetype>\n        <D:getlastmodified>Mon, 11 Jul 2022 21:54:20 GMT</D:getlastmodified>\n        <D:supportedlock>\n          <D:lockentry xmlns:D=\"DAV:\">\n            <D:lockscope>\n              <D:exclusive />\n            </D:lockscope>\n            <D:locktype>\n              <D:write />\n            </D:locktype>\n          </D:lockentry>\n        </D:supportedlock>\n        <D:displayname></D:displayname>\n      </D:prop>\n      <D:status>HTTP/1.1 200 OK</D:status>\n    </D:propstat>\n  </D:response>\n  <D:response>\n    <D:href>/c=zzzzz-4zz18-zzzzzzzzzzzzzzz/cwl.output.json</D:href>\n    <D:propstat>\n      <D:prop>\n        <D:displayname>cwl.output.json</D:displayname>\n        <D:getcontentlength>141</D:getcontentlength>\n        <D:getlastmodified>Mon, 11 Jul 2022 21:54:20 GMT</D:getlastmodified>\n        <D:supportedlock>\n          <D:lockentry xmlns:D=\"DAV:\">\n            <D:lockscope>\n              <D:exclusive />\n            </D:lockscope>\n            <D:locktype>\n              <D:write />\n            </D:locktype>\n          </D:lockentry>\n        </D:supportedlock>\n        <D:resourcetype></D:resourcetype>\n        <D:getcontenttype>application/json</D:getcontenttype>\n        <D:getetag>\"000000000000000000\"</D:getetag>\n      </D:prop>\n      <D:status>HTTP/1.1 200 OK</D:status>\n    </D:propstat>\n  </D:response>\n</D:multistatus>\n"
  },
  {
    "path": "services/workbench2/cypress/fixtures/workflow-with-optional-inputs.yaml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n---\n\"$graph\":\n- class: Workflow\n  cwlVersion: v1.2\n  hints:\n  - acrContainerImage: 7009415fdc959d0c2819ee2e9db96561+261\n    class: http://arvados.org/cwl#WorkflowRunnerResources\n  id: \"#main\"\n  inputs:\n  - default: null\n    id: \"#main/int_input\"\n    type: [\"int\", \"null\"]\n  - default: null\n    id: \"#main/string_input\"\n    type: [\"string\", \"null\"]\n  - default: null\n    id: \"#main/empty_string_input\"\n    type: [\"string\", \"null\"]\n  outputs: []\n  steps: []\ncwlVersion: v1.2\n"
  },
  {
    "path": "services/workbench2/cypress/fixtures/workflow_directory_array.yaml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n---\n\"$graph\":\n- class: Workflow\n  cwlVersion: v1.2\n  hints:\n  - acrContainerImage: 7009415fdc959d0c2819ee2e9db96561+261\n    class: http://arvados.org/cwl#WorkflowRunnerResources\n  id: \"#main\"\n  inputs:\n  - id: \"#main/directoryInputName\"\n    type:\n      items: Directory\n      type: array\n  outputs: []\n  steps: []\ncwlVersion: v1.2\n"
  },
  {
    "path": "services/workbench2/cypress/fixtures/workflow_with_array_fields.yaml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n---\n\"$graph\":\n- class: Workflow\n  cwlVersion: v1.2\n  hints:\n  - acrContainerImage: 7009415fdc959d0c2819ee2e9db96561+261\n    class: http://arvados.org/cwl#WorkflowRunnerResources\n  id: \"#main\"\n  inputs:\n  - id: \"#main/bar\"\n    type:\n      items: Directory\n      type: array\n  - id: \"#main/foo\"\n    type:\n      items: File\n      type: array\n  outputs: []\n  steps: []\ncwlVersion: v1.2\n"
  },
  {
    "path": "services/workbench2/cypress/fixtures/workflow_with_default_array_fields.yaml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n---\n\"$graph\":\n- class: Workflow\n  cwlVersion: v1.2\n  hints:\n  - acrContainerImage: 7009415fdc959d0c2819ee2e9db96561+261\n    class: http://arvados.org/cwl#WorkflowRunnerResources\n  id: \"#main\"\n  inputs:\n  - default: []\n    id: \"#main/bar\"\n    type:\n      items: Directory\n      type: array\n  - default: []\n    id: \"#main/foo\"\n    type:\n      items: File\n      type: array\n  outputs: []\n  steps: []\ncwlVersion: v1.2\n"
  },
  {
    "path": "services/workbench2/cypress/fixtures/workflow_with_secret_input.yaml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n---\n\"$graph\":\n- class: Workflow\n  cwlVersion: v1.2\n  hints:\n  - acrContainerImage: d740a57097711e08eb9b2a93518f20ab+174\n    class: http://arvados.org/cwl#WorkflowRunnerResources\n  - secrets: [\"#main/foo\"]\n    class: http://commonwl.org/cwltool#Secrets\n  id: \"#main\"\n  inputs:\n  - id: \"#main/bar\"\n    type: string\n  - id: \"#main/foo\"\n    type: string\n  outputs: []\n  steps: []\ncwlVersion: v1.2\n"
  },
  {
    "path": "services/workbench2/cypress/plugins/index.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n/// <reference types=\"cypress\" />\n// ***********************************************************\n// This example plugins/index.js can be used to load plugins\n//\n// You can change the location of this file or turn off loading\n// the plugins file with the 'pluginsFile' configuration option.\n//\n// You can read more here:\n// https://on.cypress.io/plugins-guide\n// ***********************************************************\n\n// This function is called when a project is opened or re-opened (e.g. due to\n// the project's config changing)\n\nconst fs = require('fs');\nconst path = require('path');\n\n/**\n * @type {Cypress.PluginConfig}\n */\nmodule.exports = (on, config) => {\n  // `on` is used to hook into various events Cypress emits\n  // `config` is the resolved Cypress config\n  on(\"before:browser:launch\", (browser = {}, launchOptions) => {\n    const downloadDirectory = path.join(__dirname, \"..\", \"downloads\");\n    if (browser.family === 'chromium' && browser.name !== 'electron') {\n     launchOptions.preferences.default[\"download\"] = {\n      default_directory: downloadDirectory\n     };\n    }\n    return launchOptions;\n  });\n\n  on('task', {\n    clearDownload({ filename }) {\n      fs.unlinkSync(filename);\n      return null;\n    }\n  });\n}\n"
  },
  {
    "path": "services/workbench2/cypress/support/commands.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// ***********************************************\n// This example commands.js shows you how to\n// create various custom commands and overwrite\n// existing commands.\n//\n// For more comprehensive examples of custom\n// commands please read more here:\n// https://on.cypress.io/custom-commands\n// ***********************************************\n//\n//\n// -- This is a parent command --\n// Cypress.Commands.add(\"login\", (email, password) => { ... })\n//\n//\n// -- This is a child command --\n// Cypress.Commands.add(\"drag\", { prevSubject: 'element'}, (subject, options) => { ... })\n//\n//\n// -- This is a dual command --\n// Cypress.Commands.add(\"dismiss\", { prevSubject: 'optional'}, (subject, options) => { ... })\n//\n//\n// -- This will overwrite an existing command --\n// Cypress.Commands.overwrite(\"visit\", (originalFn, url, options) => { ... })\n\nimport 'cypress-wait-until';\nimport { extractFilesData } from \"services/collection-service/collection-service-files-response\";\nimport _ from 'lodash';\nimport moment from 'moment';\n\nconst controllerURL = Cypress.env(\"controller_url\");\nconst systemToken = Cypress.env(\"system_token\");\nlet createdResources = [];\n\nconst containerLogFolderPrefix = \"log for container \";\n\n// Clean up anything that was created.  You can temporarily add\n// 'return' to the top if you need the resources to hang around to\n// debug a specific test.\nafterEach(function () {\n    if (createdResources.length === 0) {\n        return;\n    }\n    cy.log(`Cleaning ${createdResources.length} previously created resource(s).`);\n    // delete them in FIFO order because later created resources may\n    // be linked to the earlier ones.\n    createdResources.reverse().forEach(function ({ suffix, uuid }) {\n        // Don't fail when a resource isn't already there, some objects may have\n        // been removed, directly or indirectly, from the test that created them.\n        cy.deleteResource(systemToken, suffix, uuid, false);\n    });\n    createdResources = [];\n});\n\nCypress.Commands.add(\n    \"doRequest\",\n    (method = \"GET\", path = \"\", data = null, qs = null, token = systemToken, auth = false, followRedirect = true, failOnStatusCode = true) => {\n        return cy.request({\n            method: method,\n            url: `${controllerURL.replace(/\\/+$/, \"\")}/${path.replace(/^\\/+/, \"\")}`,\n            body: data,\n            qs: auth ? qs : Object.assign({ api_token: token }, qs),\n            auth: auth ? { bearer: `${token}` } : undefined,\n            followRedirect: followRedirect,\n            failOnStatusCode: failOnStatusCode,\n        });\n    }\n);\n\nCypress.Commands.add(\n    \"doWebDAVRequest\",\n    (method = \"GET\", path = \"\", data = null, qs = null, token = systemToken, auth = false, followRedirect = true, failOnStatusCode = true) => {\n        return cy.doRequest(\"GET\", \"/arvados/v1/config\", null, null).then(({ body: config }) => {\n            return cy.request({\n                method: method,\n                url: `${config.Services.WebDAVDownload.ExternalURL.replace(/\\/+$/, \"\")}/${path.replace(/^\\/+/, \"\")}`,\n                body: data,\n                qs: auth ? qs : Object.assign({ api_token: token }, qs),\n                auth: auth ? { bearer: `${token}` } : undefined,\n                followRedirect: followRedirect,\n                failOnStatusCode: failOnStatusCode,\n            });\n        });\n    }\n);\n\nCypress.Commands.add(\"getUser\", (username, first_name = \"\", last_name = \"\", is_admin = false, is_active = true) => {\n    // Create user if not already created\n    return (\n        cy\n            .doRequest(\n                \"POST\",\n                \"/auth/controller/callback\",\n                {\n                    auth_info: JSON.stringify({\n                        email: `${username}@example.local`,\n                        username: username,\n                        first_name: first_name,\n                        last_name: last_name,\n                        alternate_emails: [],\n                    }),\n                    return_to: \",https://controller.api.client.invalid\",\n                },\n                null,\n                systemToken,\n                true,\n                false\n            ) // Don't follow redirects so we can catch the token\n            .its(\"headers.location\")\n            .as(\"location\")\n            // Get its token and set the account up as admin and/or active\n            .then(function () {\n                this.userToken = this.location.split(\"=\")[1];\n                assert.isString(this.userToken);\n                return cy\n                    .doRequest(\"GET\", \"/arvados/v1/users\", null, {\n                        filters: `[[\"username\", \"=\", \"${username}\"]]`,\n                    })\n                    .its(\"body.items.0\")\n                    .as(\"aUser\")\n                    .then(function () {\n                        cy.doRequest(\"PUT\", `/arvados/v1/users/${this.aUser.uuid}`, {\n                            user: {\n                                is_admin: is_admin,\n                                is_active: is_active,\n                            },\n                        })\n                            .its(\"body\")\n                            .as(\"theUser\")\n                            .then(function () {\n                                return { user: this.theUser, token: this.userToken };\n                            });\n                    });\n            })\n    );\n});\n\nCypress.Commands.add(\"createLink\", (token, data) => {\n    return cy.createResource(token, \"links\", {\n        link: JSON.stringify(data),\n    });\n});\n\nCypress.Commands.add(\"createGroup\", (token, data) => {\n    return cy.createResource(token, \"groups\", {\n        group: JSON.stringify(data),\n        ensure_unique_name: true,\n    });\n});\n\nCypress.Commands.add(\"trashGroup\", (token, uuid) => {\n    return cy.deleteResource(token, \"groups\", uuid);\n});\n\nCypress.Commands.add(\"createWorkflow\", (token, data) => {\n    return cy.createResource(token, \"workflows\", {\n        workflow: JSON.stringify(data),\n        ensure_unique_name: true,\n    });\n});\n\nCypress.Commands.add(\"createCollection\", (token, data, keep = false) => {\n    return cy.createResource(token, \"collections\", {\n        collection: JSON.stringify(data),\n        ensure_unique_name: true,\n    }, keep);\n});\n\nCypress.Commands.add(\"getCollection\", (token, uuid) => {\n    return cy.getResource(token, \"collections\", uuid);\n});\n\nCypress.Commands.add(\"updateCollection\", (token, uuid, data) => {\n    return cy.updateResource(token, \"collections\", uuid, {\n        collection: JSON.stringify(data),\n    });\n});\n\nCypress.Commands.add(\"collectionReplaceFiles\", (token, uuid, data) => {\n    return cy.updateResource(token, \"collections\", uuid, {\n        collection: {\n            preserve_version: true,\n        },\n        replace_files: JSON.stringify(data),\n    });\n});\n\nCypress.Commands.add(\"getContainer\", (token, uuid) => {\n    return cy.getResource(token, \"containers\", uuid);\n});\n\nCypress.Commands.add(\"updateContainer\", (token, uuid, data) => {\n    return cy.updateResource(token, \"containers\", uuid, {\n        container: JSON.stringify(data),\n    });\n});\n\nCypress.Commands.add(\"getContainerRequest\", (token, uuid) => {\n    return cy.getResource(token, \"container_requests\", uuid);\n});\n\nCypress.Commands.add(\"createContainerRequest\", (token, data) => {\n    return cy.createResource(token, \"container_requests\", {\n        container_request: JSON.stringify(data),\n        ensure_unique_name: true,\n    });\n});\n\n/**\n * Creates a pre-made simple CR to avoid repeating this CR everywhere\n *\n * Can be overridden with any modifications, but has sensible defaults\n */\nCypress.Commands.add(\"createDefaultContainerRequest\", (token, dockerImage, data) => (\n    cy.createContainerRequest(token, {\n        name: data.name || `test_container_request ${Math.floor(Math.random() * 999999)}`,\n        command: data.command || ['echo', 'hello world'],\n        container_image: dockerImage.portable_data_hash, // for some reason, docker_image doesn't work here\n        output_path: '/var/spool/cwl',\n        priority: 1,\n        runtime_constraints: {\n            vcpus: 1,\n            ram: 1,\n        },\n        use_existing: data.use_existing || false,\n        state: data.state || \"Uncommitted\",\n        mounts: {\n            '/var/lib/cwl/workflow.json': {\n                kind: 'json',\n                content: {},\n            },\n            '/var/spool/cwl': {\n                kind: 'tmp',\n                capacity: 1000000,\n            },\n        },\n        owner_uuid: data.owner_uuid || undefined,\n        properties: data.properties || undefined,\n    })\n));\n\nCypress.Commands.add(\"updateContainerRequest\", (token, uuid, data) => {\n    return cy.updateResource(token, \"container_requests\", uuid, {\n        container_request: JSON.stringify(data),\n    });\n});\n\n/**\n * Requires an admin token for log_uuid modification to succeed\n */\nCypress.Commands.add(\"appendLog\", (token, crUuid, fileName, lines = []) =>\n    cy.getContainerRequest(token, crUuid).then(containerRequest => {\n        if (containerRequest.log_uuid) {\n            cy.listContainerRequestLogs(token, crUuid).then(logFiles => {\n                const filePath = `${containerRequest.log_uuid}/${containerLogFolderPrefix}${containerRequest.container_uuid}/${fileName}`;\n                if (logFiles.find(file => file.name === fileName)) {\n                    // File exists, fetch and append\n                    return cy\n                        .doWebDAVRequest(\"GET\", `c=${filePath}`, null, null, token)\n                        .then(({ body: contents }) =>\n                            cy.doWebDAVRequest(\"PUT\", `c=${filePath}`, contents.split(\"\\n\").concat(lines).join(\"\\n\"), null, token)\n                        );\n                } else {\n                    // File not exists, put new file\n                    cy.doWebDAVRequest(\"PUT\", `c=${filePath}`, lines.join(\"\\n\"), null, token);\n                }\n            });\n        } else {\n            // Create log collection\n            return cy\n                .createCollection(token, {\n                    name: `Test log collection ${Math.floor(Math.random() * 999999)}`,\n                    owner_uuid: containerRequest.owner_uuid,\n                    manifest_text: \"\",\n                })\n                .then(collection => {\n                    // Update CR log_uuid to fake log collection\n                    cy.updateContainerRequest(token, containerRequest.uuid, {\n                        log_uuid: collection.uuid,\n                    }).then(() =>\n                        // Create empty directory for container uuid\n                        cy\n                            .collectionReplaceFiles(token, collection.uuid, {\n                                [`/${containerLogFolderPrefix}${containerRequest.container_uuid}`]: \"d41d8cd98f00b204e9800998ecf8427e+0\",\n                            })\n                            .then(() =>\n                                // Put new log file with contents into fake log collection\n                                cy.doWebDAVRequest(\n                                    \"PUT\",\n                                    `c=${collection.uuid}/${containerLogFolderPrefix}${containerRequest.container_uuid}/${fileName}`,\n                                    lines.join(\"\\n\"),\n                                    null,\n                                    token\n                                )\n                            )\n                    );\n                });\n        }\n    })\n);\n\nCypress.Commands.add(\"listContainerRequestLogs\", (token, crUuid) =>\n    cy.getContainerRequest(token, crUuid).then(containerRequest =>\n        cy\n            .doWebDAVRequest(\n                \"PROPFIND\",\n                `c=${containerRequest.log_uuid}/${containerLogFolderPrefix}${containerRequest.container_uuid}`,\n                null,\n                null,\n                token\n            )\n            .then(({ body: data }) => {\n                return extractFilesData(new DOMParser().parseFromString(data, \"text/xml\"));\n            })\n    )\n);\n\nCypress.Commands.add(\"createVirtualMachine\", (token, data) => {\n    return cy.createResource(token, \"virtual_machines\", {\n        virtual_machine: JSON.stringify(data),\n        ensure_unique_name: true,\n    });\n});\n\nCypress.Commands.add(\"getResource\", (token, suffix, uuid) => {\n    return cy\n        .doRequest(\"GET\", `/arvados/v1/${suffix}/${uuid}`, null, {}, token)\n        .its(\"body\")\n        .then(function (resource) {\n            return resource;\n        });\n});\n\nCypress.Commands.add(\"createResource\", (token, suffix, data, keep = false) => {\n    return cy\n        .doRequest(\"POST\", \"/arvados/v1/\" + suffix, data, null, token, true)\n        .its(\"body\")\n        .then(function (resource) {\n            if (! keep) {\n                createdResources.push({ suffix, uuid: resource.uuid });\n            };\n            return resource;\n        });\n});\n\n\nCypress.Commands.add(\"deleteResource\", (token, suffix, uuid, failOnStatusCode = true) => {\n    return cy\n        .doRequest(\"DELETE\", \"/arvados/v1/\" + suffix + \"/\" + uuid, null, null, token, false, true, failOnStatusCode)\n        .its(\"body\")\n        .then(function (resource) {\n            return resource;\n        });\n});\n\nCypress.Commands.add(\"updateResource\", (token, suffix, uuid, data) => {\n    return cy\n        .doRequest(\"PATCH\", \"/arvados/v1/\" + suffix + \"/\" + uuid, data, null, token, true)\n        .its(\"body\")\n        .then(function (resource) {\n            return resource;\n        });\n});\n\nCypress.Commands.add(\"loginAs\", (user, preserveLocalStorage = false) => {\n    // This shouldn't be necessary unless we need to call loginAs multiple times\n    // in the same test.\n    cy.clearCookies();\n    if(preserveLocalStorage === false) {\n        cy.clearAllLocalStorage();\n        cy.clearAllSessionStorage();\n    }\n    cy.visit(`/token/?api_token=${user.token}`);\n    // Use waitUntil to avoid permafail race conditions with window.location being undefined\n    cy.waitUntil(() => cy.window().then(win =>\n        win?.location?.href &&\n        win.location.href.includes(\"/dashboard\")\n    ), { timeout: 15000 });\n    // Wait for page to settle before getting elements\n    cy.waitForDom();\n    cy.get(\"div#root\").should(\"contain\", \"Arvados Workbench (zzzzz)\");\n    cy.get(\"div#root\").should(\"not.contain\", \"Your account is inactive\");\n});\n\nCypress.Commands.add(\"testEditProjectOrCollection\", (containerElement, oldName, newName, newDescription, isProject = true) => {\n    cy.get(containerElement).contains(oldName).rightclick();\n    cy.get(\"[data-cy=context-menu]\")\n        .contains(isProject ? \"Edit project\" : \"Edit collection\")\n        .click();\n    cy.get(\"[data-cy=form-dialog]\").within(() => {\n        cy.get(\"input[name=name]\").clear().type(newName);\n        cy.get(\"div[contenteditable=true]\")\n            .clear()\n            .type(newDescription);\n        cy.get(\"[data-cy=form-submit-btn]\").click();\n    });\n\n    cy.get(containerElement).contains(newName).rightclick();\n    cy.get(\"[data-cy=context-menu]\")\n        .contains(isProject ? \"Edit project\" : \"Edit collection\")\n        .click();\n    cy.get(\"[data-cy=form-dialog]\").within(() => {\n        cy.get(\"input[name=name]\").should(\"have.value\", newName);\n        cy.get(\"span[data-text=true]\").contains(newDescription);\n        cy.get(\"[data-cy=form-cancel-btn]\").click();\n    });\n});\n\nCypress.Commands.add(\"doSearch\", searchTerm => {\n    cy.get(\"[data-cy=searchbar-input-field]\").type(`{selectall}${searchTerm}{enter}`);\n});\n\nCypress.Commands.add(\"goToPath\", path => {\n    return cy.visit(path);\n});\n\nCypress.Commands.add(\"getAll\", (...elements) => {\n    const promise = cy.wrap([], { log: false });\n\n    for (let element of elements) {\n        promise.then(arr => cy.get(element).then(got => cy.wrap([...arr, got])));\n    }\n\n    return promise;\n});\n\nCypress.Commands.add(\"shareWith\", (srcUserToken, targetUserUUID, itemUUID, permission = \"can_write\") => {\n    cy.createLink(srcUserToken, {\n        name: permission,\n        link_class: \"permission\",\n        head_uuid: itemUUID,\n        tail_uuid: targetUserUUID,\n    });\n});\n\nCypress.Commands.add(\"addToFavorites\", (userToken, userUUID, itemUUID) => {\n    cy.createLink(userToken, {\n        head_uuid: itemUUID,\n        link_class: \"star\",\n        name: \"\",\n        owner_uuid: userUUID,\n        tail_uuid: userUUID,\n    });\n});\n\nCypress.Commands.add(\"createProject\", ({ owningUser, targetUser, ownerUuid, projectName, canWrite, addToFavorites }) => {\n    const writePermission = canWrite ? \"can_write\" : \"can_read\";\n\n    cy.createGroup(owningUser.token, {\n        name: `${projectName} ${Math.floor(Math.random() * 999999)}`,\n        group_class: \"project\",\n        ...(ownerUuid ? {owner_uuid: ownerUuid} : {})\n    })\n        .as(`${projectName}`)\n        .then(project => {\n            if (targetUser && targetUser !== owningUser) {\n                cy.shareWith(owningUser.token, targetUser.user.uuid, project.uuid, writePermission);\n            }\n            if (addToFavorites) {\n                const user = targetUser ? targetUser : owningUser;\n                cy.addToFavorites(user.token, user.user.uuid, project.uuid);\n            }\n        });\n});\n\nCypress.Commands.add(\n    \"upload\",\n    {\n        prevSubject: \"element\",\n    },\n    (subject, file, fileName, binaryMode = true) => {\n        cy.window().then(window => {\n            const blob = binaryMode ? b64toBlob(file, \"\", 512) : new Blob([file], { type: \"text/plain\" });\n            const testFile = new window.File([blob], fileName);\n\n            const dataTransferFile = new File([testFile], testFile.name, {\n                type: 'text/plain',\n            });\n\n            const data = {\n                dataTransfer: {\n                    files: [dataTransferFile],\n                    items: [\n                        {\n                            kind: 'dataFile',\n                            type: dataTransferFile.type,\n                            getAsFile: () => dataTransferFile,\n                            webkitGetAsEntry: () => ({\n                                isFile: true,\n                                isDirectory: false,\n                                file: (cb) => cb(dataTransferFile),\n                            }),\n                        },\n                    ],\n                    types: ['Files'],\n                },\n            };\n\n            cy.wrap(subject)\n                .trigger('dragenter', data)\n                .trigger('dragover', data)\n                .trigger('drop', data)\n            });\n        }\n    );\n\nfunction b64toBlob(b64Data, contentType = \"\", sliceSize = 512) {\n    const byteCharacters = atob(b64Data);\n    const byteArrays = [];\n\n    for (let offset = 0; offset < byteCharacters.length; offset += sliceSize) {\n        const slice = byteCharacters.slice(offset, offset + sliceSize);\n\n        const byteNumbers = new Array(slice.length);\n        for (let i = 0; i < slice.length; i++) {\n            byteNumbers[i] = slice.charCodeAt(i);\n        }\n\n        const byteArray = new Uint8Array(byteNumbers);\n\n        byteArrays.push(byteArray);\n    }\n\n    const blob = new Blob(byteArrays, { type: contentType });\n    return blob;\n}\n\n// From https://github.com/cypress-io/cypress/issues/7306#issuecomment-1076451070=\n// This command requires the async package (https://www.npmjs.com/package/async)\nCypress.Commands.add(\"waitForDom\", () => {\n    cy.window({ timeout: 10000 }).then(\n        {\n            // Don't timeout before waitForDom finishes\n            timeout: 10000,\n        },\n        win => {\n            let timeElapsed = 0;\n\n            cy.log(\"Waiting for DOM mutations to complete\");\n\n            return new Cypress.Promise(resolve => {\n                // set the required variables\n                let async = require(\"async\");\n                let observerConfig = { attributes: true, childList: true, subtree: true };\n                let items = Array.apply(null, { length: 50 }).map(Number.call, Number);\n                win.mutationCount = 0;\n                win.previousMutationCount = null;\n\n                // create an observer instance\n                let observer = new win.MutationObserver(mutations => {\n                    mutations.forEach(mutation => {\n                        // Only record \"attributes\" type mutations that are not a \"class\" mutation.\n                        // If the mutation is not an \"attributes\" type, then we always record it.\n                        if (mutation.type === \"attributes\" && mutation.attributeName !== \"class\") {\n                            win.mutationCount += 1;\n                        } else if (mutation.type !== \"attributes\") {\n                            win.mutationCount += 1;\n                        }\n                    });\n\n                    // initialize the previousMutationCount\n                    if (win.previousMutationCount == null) win.previousMutationCount = 0;\n                });\n\n                // watch the document body for the specified mutations\n                observer.observe(win.document.body, observerConfig);\n\n                // check the DOM for mutations up to 50 times for a maximum time of 5 seconds\n                async.eachSeries(\n                    items,\n                    function iteratee(item, callback) {\n                        // keep track of the elapsed time so we can log it at the end of the command\n                        timeElapsed = timeElapsed + 100;\n\n                        // make each iteration of the loop 100ms apart\n                        setTimeout(() => {\n                            if (win.mutationCount === win.previousMutationCount) {\n                                // pass an argument to the async callback to exit the loop\n                                return callback(\"Resolved - DOM changes complete.\");\n                            } else if (win.previousMutationCount != null) {\n                                // only set the previous count if the observer has checked the DOM at least once\n                                win.previousMutationCount = win.mutationCount;\n                                return callback();\n                            } else if (win.mutationCount === 0 && win.previousMutationCount == null && item === 4) {\n                                // this is an early exit in case nothing is changing in the DOM. That way we only\n                                // wait 500ms instead of the full 5 seconds when no DOM changes are occurring.\n                                return callback(\"Resolved - Exiting early since no DOM changes were detected.\");\n                            } else {\n                                // proceed to the next iteration\n                                return callback();\n                            }\n                        }, 100);\n                    },\n                    function done() {\n                        // Log the total wait time so users can see it\n                        cy.log(`DOM mutations ${timeElapsed >= 5000 ? \"did not complete\" : \"completed\"} in ${timeElapsed} ms`);\n\n                        // disconnect the observer and resolve the promise\n                        observer.disconnect();\n                        resolve();\n                    }\n                );\n            });\n        }\n    );\n});\n\nCypress.Commands.add('waitForLocalStorage', (key, options = {}) => {\n    const timeout = options.timeout || 10000;\n    const interval = options.interval || 100;\n\n    cy.log(`Waiting for localStorage key: ${key}`)\n\n    const checkLocalStorage = () => {\n      return new Cypress.Promise((resolve, reject) => {\n        const startTime = Date.now();\n\n        const check = () => {\n          const value = localStorage.getItem(key);\n\n          if (value !== null) {\n            resolve(value);\n          } else if (Date.now() - startTime > timeout) {\n            reject(new Error(`Timed out waiting for localStorage key: ${key}`));\n          } else {\n            setTimeout(check, interval);\n          }\n        };\n\n        check();\n      });\n    };\n\n    return cy.wrap(checkLocalStorage());\n  });\n\n  //pauses test execution until the localStorage key changes\n  Cypress.Commands.add('waitForLocalStorageUpdate', (key, timeout = 10000) => {\n    const checkInterval = 200; // Interval to check the localStorage value\n    let previousValue = localStorage.getItem(key);\n\n    return new Cypress.Promise((resolve, reject) => {\n      const checkValue = () => {\n        const currentValue = localStorage.getItem(key);\n        if (currentValue !== previousValue) {\n          resolve(currentValue);\n        } else if (Date.now() - startTime >= timeout) {\n          reject(new Error(`Timed out waiting for localStorage key \"${key}\" to change`));\n        } else {\n          setTimeout(checkValue, checkInterval);\n        }\n      };\n\n      const startTime = Date.now();\n      checkValue();\n    });\n  });\n\nCypress.Commands.add(\"setupDockerImage\", (image_name) => {\n    // Create a collection that will be used as a docker image for the tests.\n    let activeUser;\n    let adminUser;\n\n    cy.getUser(\"admin\", \"Admin\", \"User\", true, true)\n        .as(\"adminUser\")\n        .then(function () {\n            adminUser = this.adminUser;\n        });\n\n    cy.getUser('activeuser', 'Active', 'User', false, true)\n        .as('activeUser').then(function () {\n            activeUser = this.activeUser;\n        });\n\n    cy.getAll('@activeUser', '@adminUser').then(([activeUser, adminUser]) => {\n\t    cy.createCollection(adminUser.token, {\n            name: \"docker_image\",\n            manifest_text:\n                \". d21353cfe035e3e384563ee55eadbb2f+67108864 5c77a43e329b9838cbec18ff42790e57+55605760 0:122714624:sha256:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678.tar\\n\",\n        })\n            .as(\"dockerImage\")\n            .then(function (dockerImage) {\n                // Give read permissions to the active user on the docker image.\n                cy.createLink(adminUser.token, {\n                    link_class: \"permission\",\n                    name: \"can_read\",\n                    tail_uuid: activeUser.user.uuid,\n                    head_uuid: dockerImage.uuid,\n                })\n                    .as(\"dockerImagePermission\")\n                    .then(function () {\n                        // Set-up docker image collection tags\n                        cy.createLink(activeUser.token, {\n                            link_class: \"docker_image_repo+tag\",\n                            name: image_name,\n                            head_uuid: dockerImage.uuid,\n                        }).as(\"dockerImageRepoTag\");\n                        cy.createLink(activeUser.token, {\n                            link_class: \"docker_image_hash\",\n                            name: \"sha256:d8309758b8fe2c81034ffc8a10c36460b77db7bc5e7b448c4e5b684f9d95a678\",\n                            head_uuid: dockerImage.uuid,\n                        }).as(\"dockerImageHash\");\n                    });\n            });\n    });\n    return cy.getAll(\"@dockerImage\", \"@dockerImageRepoTag\", \"@dockerImageHash\", \"@dockerImagePermission\").then(function ([dockerImage]) {\n        return dockerImage;\n    });\n});\n\nCypress.Commands.add('createExternalCredential', (token, testData = {}) => {\n    const credentialData = {\n        name: `Test External Credential ${Math.floor(Math.random() * 999999)}`,\n        expires_at: moment().add(1, 'year').toISOString(),\n        description: `Test Description ${Math.floor(Math.random() * 999999)}`,\n        credential_class: `Test Credential Class ${Math.floor(Math.random() * 999999)}`,\n        external_id: `Test External ID ${Math.floor(Math.random() * 999999)}`,\n        scopes: [`scope1 ${Math.floor(Math.random() * 999999)}`, `scope2 ${Math.floor(Math.random() * 999999)}`],\n        secret: 'test-secret',\n        ...testData\n    };\n    try {\n        return cy.createResource(token, 'credentials', credentialData)\n    } catch (error) {\n        console.error(error);\n    }\n});\n\n/**\n * Asserts the url path exactly matches (ignores host and hash)\n *\n * @returns the path for further use if needed\n */\nCypress.Commands.add(\"assertUrlPathname\", (path) => {\n    cy.waitForDom();\n    return cy.waitUntil(() => cy.location()\n        .then((url) => url.pathname.endsWith(path) ? url.pathname : false)\n    , {\n        errorMsg: \"Timed out waiting for URL path to match: \" + path,\n        timeout: 10000,\n    });\n});\n\n/**\n * Performs nagation to the specified account menu item\n */\nCypress.Commands.add(\"doAccountMenuAction\", (name) => {\n    cy.get('[data-cy=dropdown-menu-button][aria-label=\"Account Management\"]').click();\n    cy.get('#account-menu li[role=menuitem]').contains(name).click();\n    cy.waitForDom();\n});\n\n/**\n * Clicks on the first side panel item exactly matching name\n */\nCypress.Commands.add(\"doSidePanelNavigation\", (name) => {\n    cy.waitForDom();\n    cy.get('[data-cy=tree-li]')\n        .contains(new RegExp(\"^\" + name + \"$\", \"g\"))\n        .click();\n    cy.waitForDom();\n});\n\n/**\n * Clicks on the first breadcrumb exactly matching the passed name\n */\nCypress.Commands.add(\"doBreadcrumbsNavigation\", (name) => {\n    cy.waitForDom();\n    cy.get('[data-cy=breadcrumbs] button')\n        .contains(new RegExp(\"^\" + name + \"$\", \"g\"))\n        .click();\n    cy.waitForDom();\n});\n\n/**\n * Assert exact breadcrumb contents\n *\n * @returns the current breadcrumbs as a string array\n */\nCypress.Commands.add(\"assertBreadcrumbs\", (names) => {\n    cy.waitForDom();\n    // waitUntil allows retrying with a non-assert test\n    // This allows doing non-wrapped comparisons and improves retryability\n    return cy.waitUntil(() => cy.get('[data-cy=breadcrumbs] button')\n        .then(crumbs => {\n            // Everything within must not be a chai/cypress assertion\n            // otherwise it will fail and not retry\n            const crumbNames = crumbs.toArray().map(crumb => crumb.innerText);\n            return _.isEqual(crumbNames, names) ? crumbNames : false;\n        })\n    , {\n        errorMsg: `Timed out waiting for breadcrumbs to match: [ ${names.map(str => `\"${str}\"`).join(\", \")} ]`,\n        timeout: 10000,\n    });\n});\n\n/**\n * Asserts whether the DE contains a certain item, default to true\n */\nCypress.Commands.add(\"assertDataExplorerContains\", (name, contains = true) => {\n    cy.waitForDom();\n    contains\n        ? cy.get('[data-cy=data-table]').contains(name).should('exist')\n        : cy.get('[data-cy=data-table]').contains(name).should('not.exist');\n});\n\n/**\n * Finds the toolbar buttons and clicks the one exactly matching name\n *\n * Does not currently handle specifying which toolbar (DE or details card) or handling collapsed toolbar\n */\nCypress.Commands.add(\"doToolbarAction\", (name) => {\n    // Toolbars have mixed aria-label locations (button vs span) so this is kept generic\n    // and only searches for the aria-label within the toolbar\n    cy.get(`[data-cy=multiselect-toolbar] [aria-label=\"${name}\"]`, { timeout: 5000 }).click();\n});\n\n/**\n * Perform a context menu action on an item in the data explorer\n */\nCypress.Commands.add(\"doDataExplorerContextAction\", (name, action) => {\n    cy.waitForDom();\n    cy.get('[data-cy=data-table]', { timeout: 10000 }).contains(name, { timeout: 10000 }).rightclick();\n    cy.doContextMenuAction(action);\n});\n\n/**\n * Perform a collections panel options menu action (the top-right menu, not right click)\n */\nCypress.Commands.add(\"doCollectionPanelOptionsAction\", (action) => {\n    cy.waitForDom();\n    cy.get(\"[data-cy=collection-files-panel-options-btn]\", { timeout: 10000 }).click();\n    cy.doContextMenuAction(action);\n});\n\n/**\n * Selects data explorer row checkbox by name\n */\nCypress.Commands.add(\"doDataExplorerSelect\", (name) => {\n    cy.waitForDom();\n    cy.get('[data-cy=data-table]', { timeout: 10000 })\n        .contains(name)\n        .parents('[data-cy=data-table-row]')\n        .find('input[type=checkbox]')\n        .first()\n        .click()\n        .then(() => cy.waitForDom());\n});\n\n/**\n * Selects all visible data explorer items using the select all checkbox\n */\nCypress.Commands.add(\"doDataExplorerSelectAll\", () => {\n    cy.waitForDom();\n    cy.get('[data-cy=data-table] [data-cy=data-table-multiselect-popover]', { timeout: 10000 })\n        .parent()\n        .find('> input[type=checkbox]')\n        .click()\n        .then(() => cy.waitForDom());\n});\n\n/**\n * Selects data explorer rows in the collection files panel\n *\n * @param name Name of file to select\n */\nCypress.Commands.add(\"doCollectionFileSelect\", (name) => {\n    cy.waitForDom();\n    cy.get('[data-cy=collection-files-right-panel]', { timeout: 10000 })\n        .contains(name)\n        .parents('[data-item=true]')\n        .find('input[type=checkbox]')\n        .first()\n        .click()\n        .then(() => cy.waitForDom());\n});\n\n/**\n * Navigates to data explorer item by name\n */\nCypress.Commands.add(\"doDataExplorerNavigate\", (name) => {\n    cy.waitForDom();\n    cy.get('[data-cy=data-table]', { timeout: 10000 })\n        .contains(name)\n        .click()\n        .then(() => cy.waitForDom());\n});\n\n/**\n * Inputs value into data explorer search\n *\n * Useful for when there are too many items in a data explorer for the item of interest to be on the first page\n */\nCypress.Commands.add(\"doDataExplorerSearch\", (value) => {\n    cy.waitForDom();\n    cy.get('[data-cy=search-input]').clear().type(value);\n    cy.waitForDom();\n});\n\n/**\n * Changes MPV panel tabs\n */\nCypress.Commands.add(\"doMPVTabSelect\", (tabName) => {\n    cy.waitForDom();\n    cy.get('[data-cy=mpv-tabs] button').contains(tabName).click();\n    cy.waitForDom();\n});\n\n/**\n * asserts the correct toolbar buttons are displayed\n * by mousing over and checking for the tooltips\n *\n * @param tooltips array of strings\n * */\n\nCypress.Commands.add(\"assertToolbarButtons\", (tooltips) => {\n    cy.get('[data-cy=multiselect-button]').should('have.length', tooltips.length)\n        for (let i = 0; i < tooltips.length; i++) {\n            cy.get('[data-cy=multiselect-button]').eq(i).trigger('mouseover');\n            cy.get('body').contains(tooltips[i]).should('exist', {timeout: 10000})\n            cy.get('[data-cy=multiselect-button]').eq(i).trigger('mouseout');\n        }\n});\n\n/**\n * Checks if checkboxes associated for resources with uuids are checked or not\n *\n * @param uuids array of uuids\n */\n\nCypress.Commands.add(\"assertCheckboxes\", (uuids, shouldBeChecked) => {\n    uuids.forEach(uuid => {\n        cy.get(`input[data-cy=\"multiselect-checkbox-${uuid}\"]`).should(shouldBeChecked ? 'be.checked' : 'not.be.checked');\n    });\n});\n\n/**\n * Reusable perform context menu action - assumes menu is already open\n */\nCypress.Commands.add(\"doContextMenuAction\", (name) => {\n    cy.waitForDom();\n    cy.get(\"[data-cy=context-menu]\", { timeout: 5000 }).contains(name).click();\n    cy.waitForDom();\n});\n\n\n/**\n * Asserts the presence of a property tag\n * If shouldExist is false, only the property name is checked, as the value doesn't matter.\n *\n * @param propertyName name of the property\n * @param propertyValue value of the property\n * @param shouldExist whether the property tag should exist or not\n */\nCypress.Commands.add(\"assertPropertyTag\", (propertyName, propertyValue, shouldExist = true) => {\n    cy.get(\"[data-cy=resource-properties]\").within(() => {\n        cy.get('span').contains(propertyName).should(shouldExist ? 'exist' : 'not.exist');\n        if (shouldExist) {\n            cy.get('span').contains(propertyName).contains(propertyValue).should('exist');\n        }\n    });\n});\n\n/**\n * Asserts the presence of a details card and its name\n *\n * @param resourceName name of the resource\n * @param shouldExist whether the resource card should exist or not\n */\n\nCypress.Commands.add(\"assertDetailsCardTitle\", (resourceName, shouldExist = true) => {\n    cy.get(`[data-cy=user-details-card],\n            [data-cy=project-details-card],\n            [data-cy=collection-details-card],\n            [data-cy=workflow-details-card],\n            [data-cy=process-details-card]`)\n        .contains(resourceName)\n        .should(shouldExist ? 'exist' : 'not.exist')\n\n});\n\n/**\n * Sets the currently visible data explorer's page size\n *\n * @param size Desired page size, must exactly match the dropdown value\n */\nCypress.Commands.add(\"doDataExplorerPageSize\", (size) => {\n    // prev/next are buttons and the page size is the only input element\n    cy.get(\"[data-cy=table-pagination] input\").parent().click();\n    cy.get(`div[role=presentation] li[data-value=${size}]`).click();\n});\n\n/**\n * Click the currently visible data explorer's next page button\n */\nCypress.Commands.add(\"doDataExporerNextPage\", () => {\n    cy.get(\"[data-cy=table-pagination] button[title='Go to next page']\").click();\n});\n\n/**\n * Click the currently visible data explorer's prev page button\n */\nCypress.Commands.add(\"doDataExporerPrevPage\", () => {\n    cy.get(\"[data-cy=table-pagination] button[title='Go to previous page']\").click();\n});\n\n/**\n * Assert the current data explorer pagination page size\n */\nCypress.Commands.add(\"assertDataExplorerPageSize\", (size) => {\n    cy.get(\"[data-cy=table-pagination] input\").should('have.value', size);\n});\n\n/**\n * Since the actual page number of DE is not displayed, to assert the current DE\n * page, we can multiply the expected page number by the page size and check\n * that the page offset matches that + 1\n *\n * @param page Expected DE page number, 1 indexed\n */\nCypress.Commands.add(\"assertDataExplorerPage\", (page) => {\n    cy.get(\"[data-cy=table-pagination] input\")\n        .invoke('val')\n        .then(size => {\n            const firstItemNumber = ((page-1) * size) + 1;\n            // Special case for first page, which can start at 0 or 1\n            // Either is valid and reflects being on page 1 so we accept both\n            const expectedDisplayNumber = firstItemNumber === 1 ? \"[0-1]\" : firstItemNumber;\n            cy.get(\"[data-cy=table-pagination]\").contains(new RegExp(`^${expectedDisplayNumber}-[0-9]+ of [0-9]+$`));\n        });\n});\n"
  },
  {
    "path": "services/workbench2/cypress/support/commands.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n/// <reference types=\"cypress\" />\n// ***********************************************\n// This example commands.ts shows you how to\n// create various custom commands and overwrite\n// existing commands.\n//\n// For more comprehensive examples of custom\n// commands please read more here:\n// https://on.cypress.io/custom-commands\n// ***********************************************\n//\n//\n// -- This is a parent command --\n// Cypress.Commands.add('login', (email, password) => { ... })\n//\n//\n// -- This is a child command --\n// Cypress.Commands.add('drag', { prevSubject: 'element'}, (subject, options) => { ... })\n//\n//\n// -- This is a dual command --\n// Cypress.Commands.add('dismiss', { prevSubject: 'optional'}, (subject, options) => { ... })\n//\n//\n// -- This will overwrite an existing command --\n// Cypress.Commands.overwrite('visit', (originalFn, url, options) => { ... })\n//\n// declare global {\n//   namespace Cypress {\n//     interface Chainable {\n//       login(email: string, password: string): Chainable<void>\n//       drag(subject: string, options?: Partial<TypeOptions>): Chainable<Element>\n//       dismiss(subject: string, options?: Partial<TypeOptions>): Chainable<Element>\n//       visit(originalFn: CommandOriginalFn, url: string, options: Partial<VisitOptions>): Chainable<Element>\n//     }\n//   }\n// }"
  },
  {
    "path": "services/workbench2/cypress/support/component-index.html",
    "content": "<!-- Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: AGPL-3.0 -->\n\n<!DOCTYPE html>\n<html>\n  <head>\n    <meta charset=\"utf-8\">\n    <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\">\n    <meta name=\"viewport\" content=\"width=device-width,initial-scale=1.0\">\n    <title>Components App</title>\n  </head>\n  <body>\n    <div data-cy-root></div>\n  </body>\n</html>"
  },
  {
    "path": "services/workbench2/cypress/support/component.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// ***********************************************************\n// This example support/component.ts is processed and\n// loaded automatically before your test files.\n//\n// This is a great place to put global configuration and\n// behavior that modifies Cypress.\n//\n// You can change the location of this file or turn off\n// automatically serving support files with the\n// 'supportFile' configuration option.\n//\n// You can read more here:\n// https://on.cypress.io/configuration\n// ***********************************************************\n\n// Import commands.js using ES2015 syntax:\nimport './commands'\n\n// Alternatively you can use CommonJS syntax:\n// require('./commands')\n\nimport { mount } from 'cypress/react'\n\n// Augment the Cypress namespace to include type definitions for\n// your custom command.\n// Alternatively, can be defined in cypress/support/component.d.ts\n// with a <reference path=\"./component\" /> at the top of your spec.\n// declare global {\n//   namespace Cypress {\n//     interface Chainable {\n//       mount: typeof mount\n//     }\n//   }\n// }\n\nCypress.Commands.add('mount', mount)\n\n/*\n    The following is a workaraound for Arvados Issue #22483 which is known and persists in Cypress v14+:\n    https://github.com/cypress-io/cypress/issues/28644\n    The entire if statement can be removed once the bug is fixed by Cypress.\n*/\nif (window.Cypress) {\n    // Prevent chunk loading errors from failing tests\n    const originalOnError = window.onerror;\n    window.onerror = (msg, source, lineno, colno, err) => {\n        if (err && err.message && err.message.includes('Loading chunk')) {\n            console.warn('Chunk loading error intercepted:', err);\n            return false;\n        }\n        return originalOnError?.(msg, source, lineno, colno, err);\n    };\n\n    window.addEventListener('unhandledrejection', (event) => {\n        if (event.reason && event.reason.message && event.reason.message.includes('Loading chunk')) {\n            event.preventDefault();\n            console.warn('Chunk loading rejection intercepted:', event.reason);\n        }\n    });\n\n    window.addEventListener('error', (event) => {\n        if (event.error?.message?.includes('Loading chunk')) {\n            event.preventDefault();\n            cy.log('Chunk loading error detected - reloading page');\n            window.location.reload();\n        }\n    });\n}\n\n// Example use:\n// cy.mount(<MyComponent />)"
  },
  {
    "path": "services/workbench2/cypress/support/e2e.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// ***********************************************************\n// This example support/index.js is processed and\n// loaded automatically before your test files.\n//\n// This is a great place to put global configuration and\n// behavior that modifies Cypress.\n//\n// You can change the location of this file or turn off\n// automatically serving support files with the\n// 'supportFile' configuration option.\n//\n// You can read more here:\n// https://on.cypress.io/configuration\n// ***********************************************************\n\n// Import commands.js using ES2015 syntax:\nimport './commands'\n\n// Comment out to turn off fail-fast behavior for all tests\n// DO NOT FORGET TO UNCOMMENT THIS LINE BEFORE COMMITTING\nimport \"cypress-fail-fast\";\n\n// Alternatively you can use CommonJS syntax:\n// require('./commands')\n"
  },
  {
    "path": "services/workbench2/cypress/support/index.d.ts",
    "content": "/**\n  * This command tries to ensure that the elements in the DOM are actually visible\n  * and done (re)rendering. This is due to how React re-renders components.\n  *\n  * IMPORTANT NOTES:\n  *    => You should only use this command in instances where a test is failing due\n  *       to detached elements. Cypress will probably give you a warning along the lines\n  *       of, \"Element has an effective width/height of 0\". This warning is not very useful\n  *       in pointing out it is due to the element being detached from the DOM AFTER the\n  *       cy.get command had already retrieved it. This command can save you from that\n  *       by explicitly waiting for the DOM to stop changing.\n  *    => This command can take anywhere from 100ms to 5 seconds to complete\n  *    => This command will exit early (500ms) when no changes are occurring in the DOM.\n  *       We wait a minimum of 500ms because sometimes it can take up to around that time\n  *       for mutations to start occurring.\n  *\n  * GitHub Issues:\n  *    * https://github.com/cypress-io/cypress/issues/695 (Closed - no activity)\n  *    * https://github.com/cypress-io/cypress/issues/7306 (Open - re-get detached elements)\n  *\n  * @example Wait for the DOM to stop changing before retrieving an element\n  * cy.waitForDom().get('#an-elements-id')\n  */\n waitForDom(): Chainable<any>\n"
  },
  {
    "path": "services/workbench2/cypress/support/msToolbarTooltips.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const tooltips = {\n    // projects\n    multiProject: [\n        'Move to',\n        'Move to trash',\n    ],\n    adminProject: [\n        'View details',\n        'Open in new tab',\n        'Copy UUID',\n        'Share',\n        'Edit project',\n        'Move to trash',\n        'New project',\n        'Move to',\n        'Freeze project',\n        'Add to favorites',\n        'Add to public favorites',\n        'Copy link to clipboard',\n        'Open with 3rd party client',\n        'API Details',\n    ],\n    adminFrozenProject: [\n        'View details',\n        'Open in new tab',\n        'Copy UUID',\n        'Share',\n        'Unfreeze project',\n        'Add to favorites',\n        'Add to public favorites',\n        'Copy link to clipboard',\n        'Open with 3rd party client',\n        'API Details',\n    ],\n    nonAdminProject: [\n        'View details',\n        'Open in new tab',\n        'Copy UUID',\n        'Share',\n        'Edit project',\n        'Move to trash',\n        'New project',\n        'Move to',\n        'Freeze project',\n        'Add to favorites',\n        'Copy link to clipboard',\n        'Open with 3rd party client',\n        'API Details',\n    ],\n    readOnlyProject: [\n        'View details',\n        'Open in new tab',\n        'Copy UUID',\n        'Add to favorites',\n        'Copy link to clipboard',\n        'Open with 3rd party client',\n        'API Details',\n    ],\n    //colloections\n    multiCollection: [\n        'Move to',\n        'Make a copy',\n        'Move to trash',\n    ],\n    adminCollection: [\n        'View details',\n        'Open in new tab',\n        'Copy UUID',\n        'Share',\n        'Edit collection',\n        'Move to trash',\n        'Make a copy',\n        'Move to',\n        'Add to favorites',\n        'Add to public favorites',\n        'Copy link to clipboard',\n        'Open with 3rd party client',\n        'API Details',\n    ],\n    readonlyCollection: [\n        'View details',\n        'Open in new tab',\n        'Copy UUID',\n        'Make a copy',\n        'Add to favorites',\n        'Copy link to clipboard',\n        'Open with 3rd party client',\n        'API Details',\n    ],\n    readonlyMultiCollection: [\n        'Make a copy',\n    ],\n    //processes\n    multiProcess: [\n        'Remove',\n    ],\n    adminRunningProcess: [\n        'View details',\n        'Open in new tab',\n        'Copy UUID',\n        'Copy and re-run process',\n        'Cancel',\n        'Edit process',\n        'Remove',\n        'Outputs',\n        'Add to favorites',\n        'Add to public favorites',\n        'Copy link to clipboard',\n        'API Details',\n    ],\n    adminOnHoldProcess: [\n        'View details',\n        'Open in new tab',\n        'Copy UUID',\n        'Copy and re-run process',\n        'Edit process',\n        'Remove',\n        'Outputs',\n        'Add to favorites',\n        'Add to public favorites',\n        'Copy link to clipboard',\n        'API Details',\n    ],\n    //workflows\n    multiWorkflow: [\n        'Delete Workflow',\n    ],\n    adminWorkflow: [\n        'View details',\n        'Open in new tab',\n        'Copy UUID',\n        'Run Workflow',\n        'Delete Workflow',\n        'Copy link to clipboard',\n        'API Details',\n    ],\n    //groups\n    multiGroup: [\n        'Remove',\n    ],\n    nonAdminGroup: [\n        'View details',\n        'Copy UUID',\n        'API Details',\n        'Edit group',\n        'Remove',\n    ],\n    builtInGroup: [\n        'View details',\n        'Copy UUID',\n        'API Details',\n    ],\n    multiBuiltInGroup: [],\n    //users\n    multiUser: [\n        'Remove',\n    ],\n    nonAdminUser: [\n        'Copy UUID',\n        'API Details',\n        'Attributes',\n        'Remove',\n    ],\n    //multiple resource types\n    projectAndCollection: [\n        'Move to',\n        'Move to trash',\n    ],\n    processAndCollection: [],\n    processAndProject: [],\n    //external credentials\n    externalCredential: [\n        'Copy UUID',\n        'Share',\n        'Edit credential',\n        'Remove',\n        'API Details',\n    ],\n    multiExternalCredential: [\n        'Remove',\n    ],\n};\n"
  },
  {
    "path": "services/workbench2/cypress.config.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { defineConfig } from \"cypress\";\nimport baseWebpackConfig from \"./config/webpack.config\";\nimport path from \"path\";\nimport EventEmitter from \"events\";\n\n// Increase the default max listeners to avoid warnings\n// this doesn't matter when running the entire suite,\n// but necessary when running a single test repeatedly\nEventEmitter.defaultMaxListeners = 100;\n\nconst webpackConfig = baseWebpackConfig(\"development\");\n\nexport default defineConfig({\n  chromeWebSecurity: false,\n  viewportWidth: 1920,\n  viewportHeight: 1080,\n  downloadsFolder: \"cypress/downloads\",\n  videoCompression: false,\n\n  // projectId is for use with Cypress Cloud/CI, which we are currently not using\n  // projectId: \"pzrqer\",\n\n  e2e: {\n    // We've imported your old cypress plugins here.\n    // You may want to clean this up later by importing these.\n      setupNodeEvents(on, config) {\n          require(\"cypress-fail-fast/plugin\")(on, config);\n          require('./cypress/plugins/index.js')(on, config)\n          return config;\n      },\n      baseUrl: 'https://localhost:3000/',\n      experimentalRunAllSpecs: true,\n      // The 2 options below make Electron crash a lot less and Firefox behave better\n      experimentalMemoryManagement: true,\n      numTestsKeptInMemory: 2,\n  },\n\n  component: {\n    devServer: {\n      framework: \"react\",\n      bundler: \"webpack\",\n      webpackConfig: {\n        ...webpackConfig,\n        resolve: {\n          ...webpackConfig.resolve,\n          alias: {\n            ...webpackConfig.resolve.alias,\n            // redirect imported modules to the mock files for the cypress tests\n            \"common/service-provider\": path.resolve(\"src/cypress/mocks/service-provider.ts\"),\n          },\n        },\n      },\n    },\n  },\n});\n"
  },
  {
    "path": "services/workbench2/etc/arvados/workbench2/workbench2.example.json",
    "content": "{\n  \"API_HOST\": \"CHANGE.TO.YOUR.ARVADOS.API.HOST\" \n}"
  },
  {
    "path": "services/workbench2/package.json",
    "content": "{\n  \"name\": \"arvados-workbench-2\",\n  \"version\": \"0.1.0\",\n  \"private\": true,\n  \"dependencies\": {\n    \"@babel/core\": \"^7.16.0\",\n    \"@babel/runtime-corejs2\": \"^7.0.0\",\n    \"@coreui/coreui\": \"^4.3.2\",\n    \"@coreui/react\": \"^4.11.0\",\n    \"@date-io/date-fns\": \"1\",\n    \"@emotion/react\": \"^11.11.4\",\n    \"@emotion/styled\": \"^11.11.5\",\n    \"@fortawesome/fontawesome-svg-core\": \"1.2.28\",\n    \"@fortawesome/free-solid-svg-icons\": \"5.13.0\",\n    \"@fortawesome/react-fontawesome\": \"0.1.9\",\n    \"@mui/icons-material\": \"^5.15.20\",\n    \"@mui/material\": \"^5.15.20\",\n    \"@mui/styles\": \"^5.15.20\",\n    \"@mui/x-date-pickers\": \"^7.7.0\",\n    \"@pmmmwh/react-refresh-webpack-plugin\": \"^0.5.3\",\n    \"@svgr/webpack\": \"8.1.0\",\n    \"@types/debounce\": \"3.0.0\",\n    \"@types/dompurify\": \"^3.0.3\",\n    \"@types/file-saver\": \"2.0.0\",\n    \"@types/js-yaml\": \"3.11.2\",\n    \"@types/jssha\": \"0.0.29\",\n    \"@types/jszip\": \"3.1.5\",\n    \"@types/lodash\": \"4.14.116\",\n    \"@types/react\": \"17.0.11\",\n    \"@types/react-copy-to-clipboard\": \"^5.0.7\",\n    \"@types/react-highlight-words\": \"0.12.0\",\n    \"@types/react-virtualized-auto-sizer\": \"1.0.0\",\n    \"@types/react-window\": \"1.8.2\",\n    \"@types/redux-form\": \"7.4.12\",\n    \"@types/shell-escape\": \"^0.2.0\",\n    \"axios\": \"^1.15.0\",\n    \"babel-loader\": \"^8.2.3\",\n    \"babel-plugin-named-asset-import\": \"^0.3.8\",\n    \"babel-preset-react-app\": \"^10.0.1\",\n    \"bfj\": \"^7.0.2\",\n    \"bootstrap\": \"^5.3.2\",\n    \"browserslist\": \"^4.18.1\",\n    \"camelcase\": \"^6.2.1\",\n    \"caniuse-lite\": \"1.0.30001612\",\n    \"case-sensitive-paths-webpack-plugin\": \"^2.4.0\",\n    \"classnames\": \"2.2.6\",\n    \"connected-react-router\": \"^6.9.3\",\n    \"css-loader\": \"^6.5.1\",\n    \"css-minimizer-webpack-plugin\": \"^3.2.0\",\n    \"cwlts\": \"1.15.29\",\n    \"cypress-fail-fast\": \"^7.1.1\",\n    \"cypress-plugin-tab\": \"^1.0.5\",\n    \"date-fns\": \"^2.28.0\",\n    \"debounce\": \"1.2.0\",\n    \"dompurify\": \"^3.4.0\",\n    \"dotenv\": \"^10.0.0\",\n    \"dotenv-expand\": \"^5.1.0\",\n    \"eslint\": \"^8.3.0\",\n    \"eslint-config-react-app\": \"^7.0.0\",\n    \"eslint-webpack-plugin\": \"^3.1.1\",\n    \"file-loader\": \"^6.2.0\",\n    \"file-saver\": \"2.0.1\",\n    \"fs-extra\": \"^10.0.0\",\n    \"fstream\": \"1.0.12\",\n    \"html-webpack-plugin\": \"^5.5.0\",\n    \"identity-obj-proxy\": \"^3.0.0\",\n    \"is-image\": \"3.0.0\",\n    \"js-yaml\": \"^4.1.1\",\n    \"jssha\": \"2.3.1\",\n    \"jszip\": \"^3.10.1\",\n    \"lodash\": \"^4.18.1\",\n    \"lodash-es\": \"^4.18.1\",\n    \"mem\": \"4.0.0\",\n    \"mime\": \"^4.0.3\",\n    \"mini-css-extract-plugin\": \"^2.4.5\",\n    \"moment\": \"^2.29.4\",\n    \"parse-duration\": \"^2.1.3\",\n    \"path-browserify\": \"^1.0.1\",\n    \"popper-max-size-modifier\": \"^0.2.0\",\n    \"postcss\": \"^8.5.10\",\n    \"postcss-flexbugs-fixes\": \"^5.0.2\",\n    \"postcss-loader\": \"^6.2.1\",\n    \"postcss-normalize\": \"^10.0.1\",\n    \"postcss-preset-env\": \"^7.0.1\",\n    \"prompts\": \"^2.4.2\",\n    \"prop-types\": \"15.7.2\",\n    \"query-string\": \"6.9.0\",\n    \"react\": \"^17.0.0\",\n    \"react-app-polyfill\": \"^3.0.0\",\n    \"react-copy-to-clipboard\": \"^5.1.0\",\n    \"react-dev-utils\": \"^12.0.0\",\n    \"react-dnd\": \"5.0.0\",\n    \"react-dnd-html5-backend\": \"5.0.1\",\n    \"react-dom\": \"16.14.0\",\n    \"react-highlight-words\": \"0.14.0\",\n    \"react-idle-timer\": \"4.3.6\",\n    \"react-loading-skeleton\": \"^3.5.0\",\n    \"react-redux\": \"6.0.1\",\n    \"react-refresh\": \"^0.11.0\",\n    \"react-router\": \"5.3.4\",\n    \"react-router-dom\": \"5.3.4\",\n    \"react-rte\": \"^0.16.5\",\n    \"react-splitter-layout\": \"3.0.1\",\n    \"react-transition-group\": \"2.5.0\",\n    \"react-virtualized-auto-sizer\": \"1.0.2\",\n    \"react-window\": \"1.8.5\",\n    \"redux\": \"4.0.3\",\n    \"redux-devtools-extension\": \"^2.13.9\",\n    \"redux-form\": \"^8.3.10\",\n    \"redux-saga\": \"^1.3.0\",\n    \"redux-thunk\": \"2.3.0\",\n    \"reselect\": \"4.0.0\",\n    \"resolve\": \"^1.20.0\",\n    \"resolve-url-loader\": \"5.0.0\",\n    \"sass-loader\": \"^12.3.0\",\n    \"semver\": \"^7.3.5\",\n    \"set-value\": \"2.0.1\",\n    \"shell-escape\": \"^0.2.0\",\n    \"sinon\": \"7.3\",\n    \"source-map-loader\": \"^3.0.0\",\n    \"style-loader\": \"^3.3.1\",\n    \"styled-components\": \"^6.1.13\",\n    \"tailwindcss\": \"^3.0.2\",\n    \"terser-webpack-plugin\": \"^5.2.5\",\n    \"tippy.js\": \"^6.3.7\",\n    \"unionize\": \"2.1.2\",\n    \"uuid\": \"3.3.2\",\n    \"webpack\": \"5.105.2\",\n    \"webpack-dev-server\": \"^4.6.0\",\n    \"webpack-manifest-plugin\": \"^4.0.2\",\n    \"workbox-webpack-plugin\": \"^6.4.1\"\n  },\n  \"scripts\": {\n    \"start\": \"BROWSER=none node scripts/start.js\",\n    \"build\": \"REACT_APP_VERSION=$VERSION REACT_APP_BUILD_NUMBER=$BUILD_NUMBER REACT_APP_GIT_COMMIT=$GIT_COMMIT node scripts/build.js\",\n    \"build-local\": \"node scripts/build.js\",\n    \"test\": \"yarn cypress run --component\",\n    \"test-path\": \"yarn cypress run --component --spec\",\n    \"lint\": \"tslint src/** -t verbose\",\n    \"build-css\": \"node-sass src/ -o src/\",\n    \"watch-css\": \"npm run build-css && node-sass src/ -o src/ --watch --recursive\"\n  },\n  \"devDependencies\": {\n    \"@sinonjs/fake-timers\": \"^10.3.0\",\n    \"@types/classnames\": \"2.2.6\",\n    \"@types/is-image\": \"3.0.0\",\n    \"@types/node\": \"15.12.4\",\n    \"@types/react-dom\": \"17.0.8\",\n    \"@types/react-redux\": \"6.0.9\",\n    \"@types/react-router\": \"4.0.31\",\n    \"@types/react-router-dom\": \"4.3.1\",\n    \"@types/react-router-redux\": \"5.0.16\",\n    \"@types/redux-devtools\": \"3.0.44\",\n    \"@types/redux-mock-store\": \"1.0.2\",\n    \"@types/sinon\": \"7.5\",\n    \"@types/uuid\": \"3.4.4\",\n    \"axios-mock-adapter\": \"^2.1.0\",\n    \"cypress\": \"^13.6.6\",\n    \"cypress-wait-until\": \"^3.0.1\",\n    \"node-sass\": \"^9.0.0\",\n    \"redux-devtools\": \"3.4.1\",\n    \"redux-mock-store\": \"1.5.4\",\n    \"ts-mock-imports\": \"1.3.7\",\n    \"tslint\": \"5.20.0\",\n    \"tslint-etc\": \"1.6.0\",\n    \"typescript\": \"4.3.4\",\n    \"wait-on\": \"8.0.1\",\n    \"yamljs\": \"0.3.0\"\n  },\n  \"browserslist\": {\n    \"production\": [\n      \">0.2%\",\n      \"not dead\",\n      \"not op_mini all\"\n    ],\n    \"development\": [\n      \"last 1 chrome version\",\n      \"last 1 firefox version\",\n      \"last 1 safari version\"\n    ]\n  },\n  \"packageManager\": \"yarn@3.2.0\",\n  \"eslintConfig\": {\n    \"extends\": [\n      \"react-app\"\n    ],\n    \"ignorePatterns\": [\n      \"**/*.cy.js\",\n      \"**/commands.js\"\n    ],\n    \"rules\": {\n      \"react-hooks/exhaustive-deps\": \"off\"\n    }\n  },\n  \"babel\": {\n    \"presets\": [\n      \"react-app\"\n    ]\n  }\n}\n"
  },
  {
    "path": "services/workbench2/public/file-viewers-example.json",
    "content": "[\n    {\n        \"name\": \"File browser\",\n        \"extensions\": [\n            \".txt\",\n            \".zip\"\n        ],\n        \"url\": \"https://doc.arvados.org\",\n        \"filePathParam\": \"filePath\",\n        \"iconUrl\": \"https://material.io/tools/icons/static/icons/baseline-next_week-24px.svg\"\n    },\n    {\n        \"name\": \"Collection browser\",\n        \"extensions\": [],\n        \"collections\": true,\n        \"url\": \"https://doc.arvados.org\",\n        \"filePathParam\": \"collectionPath\"\n    },\n    {\n        \"name\": \"Universal browser\",\n        \"collections\": true,\n        \"url\": \"https://doc.arvados.org\",\n        \"filePathParam\": \"filePath\"\n    }\n]"
  },
  {
    "path": "services/workbench2/public/index.html",
    "content": "<!DOCTYPE html>\n<html lang=\"en\">\n  <head>\n    <meta charset=\"utf-8\">\n    <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\">\n    <meta name=\"theme-color\" content=\"#000000\">\n    <!--\n      manifest.json provides metadata used when your web app is added to the\n      homescreen on Android. See https://developers.google.com/web/fundamentals/engage-and-retain/web-app-manifest/\n    -->\n    <link rel=\"manifest\" href=\"%PUBLIC_URL%/manifest.json\">\n    <link rel=\"shortcut icon\" href=\"%PUBLIC_URL%/favicon.ico?v=1\">\n    <link href=\"//netdna.bootstrapcdn.com/font-awesome/3.2.1/css/font-awesome.css\" rel=\"stylesheet\">\n    <!--\n      Notice the use of %PUBLIC_URL% in the tags above.\n      It will be replaced with the URL of the `public` folder during the build.\n      Only files inside the `public` folder can be referenced from the HTML.\n\n      Unlike \"/favicon.ico\" or \"favicon.ico\", \"%PUBLIC_URL%/favicon.ico\" will\n      work correctly both with client-side routing and a non-root public URL.\n      Learn how to configure a non-root public URL by running `npm run build`.\n    -->\n    <title>Arvados Workbench 2</title>\n    <script>FontAwesomeConfig = { autoReplaceSvg: 'nest' }</script>\n    <script defer src=\"https://use.fontawesome.com/releases/v5.0.13/js/all.js\" integrity=\"sha384-xymdQtn1n3lH2wcu0qhcdaOpQwyoarkgLVxC/wZ5q7h9gHtxICrpcaSUfygqZGOe\" crossorigin=\"anonymous\"></script>\n  </head>\n  <body>\n    <noscript>\n      You need to enable JavaScript to run this app.\n    </noscript>\n    <div id=\"root\"></div>\n    <!--\n      This HTML file is a template.\n      If you open it directly in the browser, you will see an empty page.\n\n      You can add webfonts, meta tags, or analytics to this file.\n      The build step will place the bundled scripts into the <body> tag.\n\n      To begin the development, run `npm start` or `yarn start`.\n      To create a production bundle, use `npm run build` or `yarn build`.\n    -->\n  </body>\n</html>\n"
  },
  {
    "path": "services/workbench2/public/manifest.json",
    "content": "{\n  \"short_name\": \"Arvados Workbench 2\",\n  \"name\": \"Arvados Workbench 2\",\n  \"icons\": [\n    {\n      \"src\": \"favicon.ico\",\n      \"sizes\": \"64x64 32x32 24x24 16x16\",\n      \"type\": \"image/x-icon\"\n    }\n  ],\n  \"start_url\": \"./index.html\",\n  \"display\": \"standalone\",\n  \"theme_color\": \"#000000\",\n  \"background_color\": \"#ffffff\"\n}\n"
  },
  {
    "path": "services/workbench2/public/webshell/README",
    "content": "See also\n* VirtualMachinesController#webshell\n* https://code.google.com/p/shellinabox/source/browse/#git%2Fshellinabox\n"
  },
  {
    "path": "services/workbench2/public/webshell/index.html",
    "content": "<!DOCTYPE html>\n    <head>\n    <title></title>\n    <link rel=\"stylesheet\" href=\"styles.css\" type=\"text/css\">\n    <style type=\"text/css\">\n      body {\n        margin: 0px;\n      }\n      #notoken {\n        position: absolute;\n        top: 0;\n        left: 0;\n        right: 0;\n        bottom: 0;\n        text-align: center;\n        vertical-align: middle;\n        line-height: 100vh;\n        z-index: 100;\n        font-family: sans;\n      }\n    </style>\n    <script type=\"text/javascript\"><!--\n      (function() {\n        // We would like to hide overflowing lines as this can lead to\n        // visually jarring results if the browser substitutes oversized\n        // Unicode characters from different fonts. Unfortunately, a bug\n        // in Firefox prevents it from allowing multi-line text\n        // selections whenever we change the \"overflow\" style. So, only\n        // do so for non-Netscape browsers.\n        if (typeof navigator.appName == 'undefined' ||\n            navigator.appName != 'Netscape') {\n          document.write('<style type=\"text/css\">' +\n                         '#vt100 #console div, #vt100 #alt_console div {' +\n                         '  overflow: hidden;' +\n                         '}' +\n                         '</style>');\n        }\n      })();\n      var sh;\n      var urlParams = new URLSearchParams(window.location.search);\n      var token = urlParams.get('token');\n      var user = urlParams.get('login');\n      var host = urlParams.get('host');\n      var timeout = urlParams.get('timeout');\n      urlParams = null;\n\n      var idleTimeoutMs = timeout * 1000;\n\n      function updateIdleTimer() {\n        var currentTime = Date.now();\n        var lastTime = localStorage.getItem('lastActiveTimestamp');\n        if (currentTime - lastTime > 1000) {\n          localStorage.setItem('lastActiveTimestamp', currentTime);\n        }\n      }\n\n      function checkIdleTimer() {\n        var currentTime = Date.now();\n        var lastTime = localStorage.getItem('lastActiveTimestamp');\n        if (currentTime - lastTime > idleTimeoutMs) {\n          //logout\n          sh.reset();\n          sh.sessionClosed(\"Session timed out after \" + timeout + \" seconds.\");\n          document.body.onmousemove = undefined;\n          document.body.onkeydown = undefined;\n        } else {\n          setTimeout(checkIdleTimer, 1000);\n        }\n      }\n\n      function login() {\n        sh = new ShellInABox(host);\n\n        var findText = function(txt) {\n          var a = document.querySelectorAll(\"span.ansi0\");\n          for (var i = 0; i < a.length; i++) {\n            if (a[i].textContent.indexOf(txt) > -1) {\n              return true;\n            }\n          }\n          return false;\n        }\n\n        var trySendToken = function() {\n          // change this text when PAM is reconfigured to present a\n          // password prompt that we can wait for.\n          if (findText(\"assword:\")) {\n             sh.keysPressed(token + \"\\n\");\n             sh.vt100('(sent authentication token)\\n');\n             token = null;\n             if (timeout > 0) {\n               updateIdleTimer();\n               document.body.onmousemove = updateIdleTimer;\n               document.body.onkeydown = updateIdleTimer;\n               setTimeout(checkIdleTimer, 1000);\n             }\n          } else {\n            setTimeout(trySendToken, 200);\n          }\n        };\n\n        var trySendLogin = function() {\n          if (findText(\"login:\")) {\n            sh.keysPressed(user + \"\\n\");\n            // Make this wait shorter when PAM is reconfigured to\n            // present a password prompt that we can wait for.\n            setTimeout(trySendToken, 200);\n          } else {\n            setTimeout(trySendLogin, 200);\n          }\n        };\n\n        trySendLogin();\n      }\n\n      function init() {\n        if (token) {\n          history.replaceState(null, \"\", `/webshell/?host=${encodeURIComponent(host)}&timeout=${timeout}&login=${encodeURIComponent(user)}`);\n        } else if (localStorage.getItem('apiToken')) {\n          token = localStorage.getItem('apiToken');\n        } else {\n          document.getElementById(\"notoken\").style.display = \"block\";\n          return;\n        }\n        login();\n      }\n    // -->\n</script>\n    <script type=\"text/javascript\" src=\"shell_in_a_box.js\"></script>\n  </head>\n  <!-- Load ShellInABox from a timer as Konqueror sometimes fails to\n       correctly deal with the enclosing frameset (if any), if we do not\n       do this\n   -->\n<body onload=\"setTimeout(init, 1000)\"\n    scroll=\"no\"><noscript>JavaScript must be enabled for ShellInABox</noscript>\n    <div id=\"notoken\" style=\"display: none;\">\n      Error: No token found. Please return to <a href=\"/virtual-machines-user\">Virtual Machines</a> and try again.\n    </div>\n</body>\n</html>\n"
  },
  {
    "path": "services/workbench2/public/webshell/keyboard.html",
    "content": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\" xmlns:v=\"urn:schemas-microsoft-com:vml\" xml:lang=\"en\" lang=\"en\">\n<head>\n</head>\n<body><pre class=\"box\"><div\n  ><i id=\"27\">Esc</i><i id=\"112\">F1</i><i id=\"113\">F2</i><i id=\"114\">F3</i\n  ><i id=\"115\">F4</i><i id=\"116\">F5</i><i id=\"117\">F6</i><i id=\"118\">F7</i\n  ><i id=\"119\">F8</i><i id=\"120\">F9</i><i id=\"121\">F10</i><i id=\"122\">F11</i\n  ><i id=\"123\">F12</i><br\n  /><b><span class=\"unshifted\">`</span><span class=\"shifted\">~</span></b\n    ><b><span class=\"unshifted\">1</span><span class=\"shifted\">!</span></b\n    ><b><span class=\"unshifted\">2</span><span class=\"shifted\">@</span></b\n    ><b><span class=\"unshifted\">3</span><span class=\"shifted\">#</span></b\n    ><b><span class=\"unshifted\">4</span><span class=\"shifted\">&#36;</span></b\n    ><b><span class=\"unshifted\">5</span><span class=\"shifted\">&#37;</span></b\n    ><b><span class=\"unshifted\">6</span><span class=\"shifted\">^</span></b\n    ><b><span class=\"unshifted\">7</span><span class=\"shifted\">&amp;</span></b\n    ><b><span class=\"unshifted\">8</span><span class=\"shifted\">*</span></b\n    ><b><span class=\"unshifted\">9</span><span class=\"shifted\">(</span></b\n    ><b><span class=\"unshifted\">0</span><span class=\"shifted\">)</span></b\n    ><b><span class=\"unshifted\">-</span><span class=\"shifted\">_</span></b\n    ><b><span class=\"unshifted\">=</span><span class=\"shifted\">+</span></b\n    ><i id=\"8\">&nbsp;&larr;&nbsp;</i\n    ><br\n  /><i id=\"9\">Tab</i\n    ><b>Q</b><b>W</b><b>E</b><b>R</b><b>T</b><b>Y</b><b>U</b><b>I</b><b>O</b\n    ><b>P</b\n    ><b><span class=\"unshifted\">[</span><span class=\"shifted\">{</span></b\n    ><b><span class=\"unshifted\">]</span><span class=\"shifted\">}</span></b\n    ><b><span class=\"unshifted\">&#92;</span><span class=\"shifted\">|</span></b\n    ><br\n  /><u>Tab&nbsp;&nbsp;</u\n    ><b>A</b><b>S</b><b>D</b><b>F</b><b>G</b><b>H</b><b>J</b><b>K</b><b>L</b\n    ><b><span class=\"unshifted\">;</span><span class=\"shifted\">:</span></b\n    ><b><span class=\"unshifted\">&#39;</span><span class=\"shifted\">\"</span></b\n    ><i id=\"13\">Enter</i\n    ><br\n  /><u>&nbsp;&nbsp;</u\n    ><i id=\"16\">Shift</i\n    ><b>Z</b><b>X</b><b>C</b><b>V</b><b>B</b><b>N</b><b>M</b\n    ><b><span class=\"unshifted\">,</span><span class=\"shifted\">&lt;</span></b\n    ><b><span class=\"unshifted\">.</span><span class=\"shifted\">&gt;</span></b\n    ><b><span class=\"unshifted\">/</span><span class=\"shifted\">?</span></b\n    ><i id=\"16\">Shift</i\n    ><br\n  /><u>XXX</u\n    ><i id=\"17\">Ctrl</i\n    ><i id=\"18\">Alt</i\n    ><i style=\"width: 25ex\">&nbsp</i\n  ></div\n  >&nbsp;&nbsp;&nbsp;<div\n    ><i id=\"45\">Ins</i><i id=\"46\">Del</i><i id=\"36\">Home</i><i id=\"35\">End</i\n    ><br\n    /><u>&nbsp;</u><br\n    /><u>&nbsp;</u><br\n    /><u>Ins</u><s>&nbsp;</s><b id=\"38\">&uarr;</b><s>&nbsp;</s><u>&nbsp;</u\n      ><b id=\"33\">&uArr;</b><br\n    /><u>Ins</u><b id=\"37\">&larr;</b><b id=\"40\">&darr;</b\n      ><b id=\"39\">&rarr;</b><u>&nbsp;</u><b id=\"34\">&dArr;</b\n  ></div\n></pre></body></html>\n"
  },
  {
    "path": "services/workbench2/public/webshell/shell_in_a_box.js",
    "content": "// Copyright (C) 2008-2010 Markus Gutschke <markus@shellinabox.com> All rights reserved.\n//\n// SPDX-License-Identifier: GPL-2.0\n\n// This file contains code from shell_in_a_box.js and vt100.js\n\n\n// ShellInABox.js -- Use XMLHttpRequest to provide an AJAX terminal emulator.\n// Copyright (C) 2008-2010 Markus Gutschke <markus@shellinabox.com>\n//\n// This program is free software; you can redistribute it and/or modify\n// it under the terms of the GNU General Public License version 2 as\n// published by the Free Software Foundation.\n//\n// This program is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n// GNU General Public License for more details.\n//\n// You should have received a copy of the GNU General Public License along\n// with this program; if not, write to the Free Software Foundation, Inc.,\n// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n//\n// In addition to these license terms, the author grants the following\n// additional rights:\n//\n// If you modify this program, or any covered work, by linking or\n// combining it with the OpenSSL project's OpenSSL library (or a\n// modified version of that library), containing parts covered by the\n// terms of the OpenSSL or SSLeay licenses, the author\n// grants you additional permission to convey the resulting work.\n// Corresponding Source for a non-source form of such a combination\n// shall include the source code for the parts of OpenSSL used as well\n// as that of the covered work.\n//\n// You may at your option choose to remove this additional permission from\n// the work, or from any part of it.\n//\n// It is possible to build this program in a way that it loads OpenSSL\n// libraries at run-time. If doing so, the following notices are required\n// by the OpenSSL and SSLeay licenses:\n//\n// This product includes software developed by the OpenSSL Project\n// for use in the OpenSSL Toolkit. (http://www.openssl.org/)\n//\n// This product includes cryptographic software written by Eric Young\n// (eay@cryptsoft.com)\n//\n//\n// The most up-to-date version of this program is always available from\n// http://shellinabox.com\n//\n//\n// Notes:\n//\n// The author believes that for the purposes of this license, you meet the\n// requirements for publishing the source code, if your web server publishes\n// the source in unmodified form (i.e. with licensing information, comments,\n// formatting, and identifier names intact). If there are technical reasons\n// that require you to make changes to the source code when serving the\n// JavaScript (e.g to remove pre-processor directives from the source), these\n// changes should be done in a reversible fashion.\n//\n// The author does not consider websites that reference this script in\n// unmodified form, and web servers that serve this script in unmodified form\n// to be derived works. As such, they are believed to be outside of the\n// scope of this license and not subject to the rights or restrictions of the\n// GNU General Public License.\n//\n// If in doubt, consult a legal professional familiar with the laws that\n// apply in your country.\n\n// #define XHR_UNITIALIZED 0\n// #define XHR_OPEN        1\n// #define XHR_SENT        2\n// #define XHR_RECEIVING   3\n// #define XHR_LOADED      4\n\n// IE does not define XMLHttpRequest by default, so we provide a suitable\n// wrapper.\nif (typeof XMLHttpRequest == 'undefined') {\n  XMLHttpRequest = function() {\n    try { return new ActiveXObject('Msxml2.XMLHTTP.6.0');} catch (e) { }\n    try { return new ActiveXObject('Msxml2.XMLHTTP.3.0');} catch (e) { }\n    try { return new ActiveXObject('Msxml2.XMLHTTP');    } catch (e) { }\n    try { return new ActiveXObject('Microsoft.XMLHTTP'); } catch (e) { }\n    throw new Error('');\n  };\n}\n\nfunction extend(subClass, baseClass) {\n  function inheritance() { }\n  inheritance.prototype          = baseClass.prototype;\n  subClass.prototype             = new inheritance();\n  subClass.prototype.constructor = subClass;\n  subClass.prototype.superClass  = baseClass.prototype;\n};\n\nfunction ShellInABox(url, container) {\n  if (url == undefined) {\n    this.rooturl    = document.location.href;\n    this.url        = document.location.href.replace(/[?#].*/, '');\n  } else {\n    this.rooturl    = url;\n    this.url        = url;\n  }\n  if (document.location.hash != '') {\n    var hash        = decodeURIComponent(document.location.hash).\n                      replace(/^#/, '');\n    this.nextUrl    = hash.replace(/,.*/, '');\n    this.session    = hash.replace(/[^,]*,/, '');\n  } else {\n    this.nextUrl    = this.url;\n    this.session    = null;\n  }\n  this.pendingKeys  = '';\n  this.keysInFlight = false;\n  this.connected    = false;\n  this.superClass.constructor.call(this, container);\n\n  // We have to initiate the first XMLHttpRequest from a timer. Otherwise,\n  // Chrome never realizes that the page has loaded.\n  setTimeout(function(shellInABox) {\n               return function() {\n                 shellInABox.sendRequest(true);\n               };\n             }(this), 1);\n};\nextend(ShellInABox, VT100);\n\nShellInABox.prototype.sessionClosed = function(msg) {\n  try {\n    this.connected    = false;\n    if (this.session) {\n      this.session    = undefined;\n      if (this.cursorX > 0) {\n        this.vt100('\\r\\n');\n      }\n      this.vt100(msg || 'Session closed.');\n      this.currentRequest.abort();\n    }\n    // Revealing the \"reconnect\" button is commented out until we hook\n    // up the username+token auto-login mechanism to the new session:\n    //this.showReconnect(true);\n  } catch (e) {\n  }\n};\n\nShellInABox.prototype.reconnect = function() {\n  this.showReconnect(false);\n  if (!this.session) {\n    if (document.location.hash != '') {\n      // A shellinaboxd daemon launched from a CGI only allows a single\n      // session. In order to reconnect, we must reload the frame definition\n      // and obtain a new port number. As this is a different origin, we\n      // need to get enclosing page to help us.\n      parent.location        = this.nextUrl;\n    } else {\n      if (this.url != this.nextUrl) {\n        document.location.replace(this.nextUrl);\n      } else {\n        this.pendingKeys     = '';\n        this.keysInFlight    = false;\n        this.reset(true);\n        this.sendRequest(true);\n      }\n    }\n  }\n  return false;\n};\n\nShellInABox.prototype.sendRequest = function(init = false, request) {\n  if (request == undefined) {\n    request                  = new XMLHttpRequest();\n  }\n  request.open('POST', this.url + '?', true);\n  request.setRequestHeader('Cache-Control', 'no-cache');\n  request.setRequestHeader('Content-Type',\n                           'application/x-www-form-urlencoded; charset=utf-8');\n  var content                = 'width=' + this.terminalWidth +\n                               '&height=' + this.terminalHeight +\n                               (this.session ? '&session=' +\n                                encodeURIComponent(this.session) : '&rooturl='+\n                                encodeURIComponent(this.rooturl));\n\n  request.onreadystatechange = function(shellInABox) {\n    return function() {\n             try {\n               return shellInABox.onReadyStateChange(request, init);\n             } catch (e) {\n               shellInABox.sessionClosed();\n             }\n           }\n    }(this);\n  ShellInABox.lastRequestSent = Date.now();\n  request.send(content);\n  this.currentRequest = request;\n};\n\nShellInABox.prototype.onReadyStateChange = function(request, init) {\n  if (request.readyState == 4 /* XHR_LOADED */ && (this.connected || init)) {\n    if (request.status == 200) {\n      this.connected = true;\n      var response   = eval('(' + request.responseText + ')');\n      if (response.data) {\n        this.vt100(response.data);\n      }\n\n      if (!response.session ||\n          this.session && this.session != response.session) {\n        this.sessionClosed();\n      } else {\n        this.session = response.session;\n        this.sendRequest(false, request);\n      }\n    } else if (request.status == 0) {\n        if (ShellInABox.lastRequestSent + 2000 < Date.now()) {\n            // Timeout, try again\n            this.sendRequest(false, request);\n        } else {\n            this.vt100('\\r\\n\\r\\nRequest failed.');\n            this.sessionClosed();\n        }\n    } else {\n      this.sessionClosed();\n    }\n  }\n};\n\nShellInABox.prototype.sendKeys = function(keys) {\n  if (!this.connected) {\n    return;\n  }\n  if (this.keysInFlight || this.session == undefined) {\n    this.pendingKeys          += keys;\n  } else {\n    this.keysInFlight          = true;\n    keys                       = this.pendingKeys + keys;\n    this.pendingKeys           = '';\n    var request                = new XMLHttpRequest();\n    request.open('POST', this.url + '?', true);\n    request.setRequestHeader('Cache-Control', 'no-cache');\n    request.setRequestHeader('Content-Type',\n                           'application/x-www-form-urlencoded; charset=utf-8');\n    var content                = 'width=' + this.terminalWidth +\n                                 '&height=' + this.terminalHeight +\n                                 '&session=' +encodeURIComponent(this.session)+\n                                 '&keys=' + encodeURIComponent(keys);\n    request.onreadystatechange = function(shellInABox) {\n      return function() {\n               try {\n                 return shellInABox.keyPressReadyStateChange(request);\n               } catch (e) {\n               }\n             }\n      }(this);\n    request.send(content);\n  }\n};\n\nShellInABox.prototype.keyPressReadyStateChange = function(request) {\n  if (request.readyState == 4 /* XHR_LOADED */) {\n    this.keysInFlight = false;\n    if (this.pendingKeys) {\n      this.sendKeys('');\n    }\n  }\n};\n\nShellInABox.prototype.keysPressed = function(ch) {\n  var hex = '0123456789ABCDEF';\n  var s   = '';\n  for (var i = 0; i < ch.length; i++) {\n    var c = ch.charCodeAt(i);\n    if (c < 128) {\n      s += hex.charAt(c >> 4) + hex.charAt(c & 0xF);\n    } else if (c < 0x800) {\n      s += hex.charAt(0xC +  (c >> 10)       ) +\n           hex.charAt(       (c >>  6) & 0xF ) +\n           hex.charAt(0x8 + ((c >>  4) & 0x3)) +\n           hex.charAt(        c        & 0xF );\n    } else if (c < 0x10000) {\n      s += 'E'                                 +\n           hex.charAt(       (c >> 12)       ) +\n           hex.charAt(0x8 + ((c >> 10) & 0x3)) +\n           hex.charAt(       (c >>  6) & 0xF ) +\n           hex.charAt(0x8 + ((c >>  4) & 0x3)) +\n           hex.charAt(        c        & 0xF );\n    } else if (c < 0x110000) {\n      s += 'F'                                 +\n           hex.charAt(       (c >> 18)       ) +\n           hex.charAt(0x8 + ((c >> 16) & 0x3)) +\n           hex.charAt(       (c >> 12) & 0xF ) +\n           hex.charAt(0x8 + ((c >> 10) & 0x3)) +\n           hex.charAt(       (c >>  6) & 0xF ) +\n           hex.charAt(0x8 + ((c >>  4) & 0x3)) +\n           hex.charAt(        c        & 0xF );\n    }\n  }\n  this.sendKeys(s);\n};\n\nShellInABox.prototype.resized = function(w, h) {\n  // Do not send a resize request until we are fully initialized.\n  if (this.session) {\n    // sendKeys() always transmits the current terminal size. So, flush all\n    // pending keys.\n    this.sendKeys('');\n  }\n};\n\nShellInABox.prototype.toggleSSL = function() {\n  if (document.location.hash != '') {\n    if (this.nextUrl.match(/\\?plain$/)) {\n      this.nextUrl    = this.nextUrl.replace(/\\?plain$/, '');\n    } else {\n      this.nextUrl    = this.nextUrl.replace(/[?#].*/, '') + '?plain';\n    }\n    if (!this.session) {\n      parent.location = this.nextUrl;\n    }\n  } else {\n    this.nextUrl      = this.nextUrl.match(/^https:/)\n           ? this.nextUrl.replace(/^https:/, 'http:').replace(/\\/*$/, '/plain')\n           : this.nextUrl.replace(/^http/, 'https').replace(/\\/*plain$/, '');\n  }\n  if (this.nextUrl.match(/^[:]*:\\/\\/[^/]*$/)) {\n    this.nextUrl     += '/';\n  }\n  if (this.session && this.nextUrl != this.url) {\n    alert('This change will take effect the next time you login.');\n  }\n};\n\nShellInABox.prototype.extendContextMenu = function(entries, actions) {\n  // Modify the entries and actions in place, adding any locally defined\n  // menu entries.\n  var oldActions            = [ ];\n  for (var i = 0; i < actions.length; i++) {\n    oldActions[i]           = actions[i];\n  }\n  for (var node = entries.firstChild, i = 0, j = 0; node;\n       node = node.nextSibling) {\n    if (node.tagName == 'LI') {\n      actions[i++]          = oldActions[j++];\n      if (node.id == \"endconfig\") {\n        node.id             = '';\n        if (typeof serverSupportsSSL != 'undefined' && serverSupportsSSL &&\n            !(typeof disableSSLMenu != 'undefined' && disableSSLMenu)) {\n          // If the server supports both SSL and plain text connections,\n          // provide a menu entry to switch between the two.\n          var newNode       = document.createElement('li');\n          var isSecure;\n          if (document.location.hash != '') {\n            isSecure        = !this.nextUrl.match(/\\?plain$/);\n          } else {\n            isSecure        =  this.nextUrl.match(/^https:/);\n          }\n          newNode.innerHTML = (isSecure ? '&#10004; ' : '') + 'Secure';\n          if (node.nextSibling) {\n            entries.insertBefore(newNode, node.nextSibling);\n          } else {\n            entries.appendChild(newNode);\n          }\n          actions[i++]      = this.toggleSSL;\n          node              = newNode;\n        }\n        node.id             = 'endconfig';\n      }\n    }\n  }\n\n};\n\nShellInABox.prototype.about = function() {\n  alert(\"Shell In A Box version \" + \"2.10 (revision 239)\" +\n        \"\\nCopyright 2008-2010 by Markus Gutschke\\n\" +\n        \"For more information check http://shellinabox.com\" +\n        (typeof serverSupportsSSL != 'undefined' && serverSupportsSSL ?\n         \"\\n\\n\" +\n         \"This product includes software developed by the OpenSSL Project\\n\" +\n         \"for use in the OpenSSL Toolkit. (http://www.openssl.org/)\\n\" +\n         \"\\n\" +\n         \"This product includes cryptographic software written by \" +\n         \"Eric Young\\n(eay@cryptsoft.com)\" :\n         \"\"));\n};\n\n\n// VT100.js -- JavaScript based terminal emulator\n// Copyright (C) 2008-2010 Markus Gutschke <markus@shellinabox.com>\n//\n// This program is free software; you can redistribute it and/or modify\n// it under the terms of the GNU General Public License version 2 as\n// published by the Free Software Foundation.\n//\n// This program is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n// GNU General Public License for more details.\n//\n// You should have received a copy of the GNU General Public License along\n// with this program; if not, write to the Free Software Foundation, Inc.,\n// 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n//\n// In addition to these license terms, the author grants the following\n// additional rights:\n//\n// If you modify this program, or any covered work, by linking or\n// combining it with the OpenSSL project's OpenSSL library (or a\n// modified version of that library), containing parts covered by the\n// terms of the OpenSSL or SSLeay licenses, the author\n// grants you additional permission to convey the resulting work.\n// Corresponding Source for a non-source form of such a combination\n// shall include the source code for the parts of OpenSSL used as well\n// as that of the covered work.\n//\n// You may at your option choose to remove this additional permission from\n// the work, or from any part of it.\n//\n// It is possible to build this program in a way that it loads OpenSSL\n// libraries at run-time. If doing so, the following notices are required\n// by the OpenSSL and SSLeay licenses:\n//\n// This product includes software developed by the OpenSSL Project\n// for use in the OpenSSL Toolkit. (http://www.openssl.org/)\n//\n// This product includes cryptographic software written by Eric Young\n// (eay@cryptsoft.com)\n//\n//\n// The most up-to-date version of this program is always available from\n// http://shellinabox.com\n//\n//\n// Notes:\n//\n// The author believes that for the purposes of this license, you meet the\n// requirements for publishing the source code, if your web server publishes\n// the source in unmodified form (i.e. with licensing information, comments,\n// formatting, and identifier names intact). If there are technical reasons\n// that require you to make changes to the source code when serving the\n// JavaScript (e.g to remove pre-processor directives from the source), these\n// changes should be done in a reversible fashion.\n//\n// The author does not consider websites that reference this script in\n// unmodified form, and web servers that serve this script in unmodified form\n// to be derived works. As such, they are believed to be outside of the\n// scope of this license and not subject to the rights or restrictions of the\n// GNU General Public License.\n//\n// If in doubt, consult a legal professional familiar with the laws that\n// apply in your country.\n\n// #define ESnormal        0\n// #define ESesc           1\n// #define ESsquare        2\n// #define ESgetpars       3\n// #define ESgotpars       4\n// #define ESdeviceattr    5\n// #define ESfunckey       6\n// #define EShash          7\n// #define ESsetG0         8\n// #define ESsetG1         9\n// #define ESsetG2        10\n// #define ESsetG3        11\n// #define ESbang         12\n// #define ESpercent      13\n// #define ESignore       14\n// #define ESnonstd       15\n// #define ESpalette      16\n// #define EStitle        17\n// #define ESss2          18\n// #define ESss3          19\n\n// #define ATTR_DEFAULT   0x00F0\n// #define ATTR_REVERSE   0x0100\n// #define ATTR_UNDERLINE 0x0200\n// #define ATTR_DIM       0x0400\n// #define ATTR_BRIGHT    0x0800\n// #define ATTR_BLINK     0x1000\n\n// #define MOUSE_DOWN     0\n// #define MOUSE_UP       1\n// #define MOUSE_CLICK    2\n\nfunction VT100(container) {\n  if (typeof linkifyURLs == 'undefined' || linkifyURLs <= 0) {\n    this.urlRE            = null;\n  } else {\n    this.urlRE            = new RegExp(\n    // Known URL protocol are \"http\", \"https\", and \"ftp\".\n    '(?:http|https|ftp)://' +\n\n    // Optionally allow username and passwords.\n    '(?:[^:@/ \\u00A0]*(?::[^@/ \\u00A0]*)?@)?' +\n\n    // Hostname.\n    '(?:[1-9][0-9]{0,2}(?:[.][1-9][0-9]{0,2}){3}|' +\n    '[0-9a-fA-F]{0,4}(?::{1,2}[0-9a-fA-F]{1,4})+|' +\n    '(?!-)[^[!\"#$%&\\'()*+,/:;<=>?@\\\\^_`{|}~\\u0000- \\u007F-\\u00A0]+)' +\n\n    // Port\n    '(?::[1-9][0-9]*)?' +\n\n    // Path.\n    '(?:/(?:(?![/ \\u00A0]|[,.)}\"\\u0027!]+[ \\u00A0]|[,.)}\"\\u0027!]+$).)*)*|' +\n\n    (linkifyURLs <= 1 ? '' :\n    // Also support URLs without a protocol (assume \"http\").\n    // Optional username and password.\n    '(?:[^:@/ \\u00A0]*(?::[^@/ \\u00A0]*)?@)?' +\n\n    // Hostnames must end with a well-known top-level domain or must be\n    // numeric.\n    '(?:[1-9][0-9]{0,2}(?:[.][1-9][0-9]{0,2}){3}|' +\n    'localhost|' +\n    '(?:(?!-)' +\n        '[^.[!\"#$%&\\'()*+,/:;<=>?@\\\\^_`{|}~\\u0000- \\u007F-\\u00A0]+[.]){2,}' +\n    '(?:(?:com|net|org|edu|gov|aero|asia|biz|cat|coop|info|int|jobs|mil|mobi|'+\n    'museum|name|pro|tel|travel|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|' +\n    'au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|' +\n    'ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cu|cv|cx|cy|cz|de|dj|dk|dm|do|' +\n    'dz|ec|ee|eg|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|' +\n    'gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|' +\n    'ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|' +\n    'lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|' +\n    'mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|' +\n    'pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|' +\n    'sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|' +\n    'tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|' +\n    'yu|za|zm|zw|arpa)(?![a-zA-Z0-9])|[Xx][Nn]--[-a-zA-Z0-9]+))' +\n\n    // Port\n    '(?::[1-9][0-9]{0,4})?' +\n\n    // Path.\n    '(?:/(?:(?![/ \\u00A0]|[,.)}\"\\u0027!]+[ \\u00A0]|[,.)}\"\\u0027!]+$).)*)*|') +\n\n    // In addition, support e-mail address. Optionally, recognize \"mailto:\"\n    '(?:mailto:)' + (linkifyURLs <= 1 ? '' : '?') +\n\n    // Username:\n    '[-_.+a-zA-Z0-9]+@' +\n\n    // Hostname.\n    '(?!-)[-a-zA-Z0-9]+(?:[.](?!-)[-a-zA-Z0-9]+)?[.]' +\n    '(?:(?:com|net|org|edu|gov|aero|asia|biz|cat|coop|info|int|jobs|mil|mobi|'+\n    'museum|name|pro|tel|travel|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|' +\n    'au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|' +\n    'ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cu|cv|cx|cy|cz|de|dj|dk|dm|do|' +\n    'dz|ec|ee|eg|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|' +\n    'gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|' +\n    'ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|' +\n    'lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|' +\n    'mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|' +\n    'pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|' +\n    'sj|sk|sl|sm|sn|so|sr|st|su|sv|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|' +\n    'tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|' +\n    'yu|za|zm|zw|arpa)(?![a-zA-Z0-9])|[Xx][Nn]--[-a-zA-Z0-9]+)' +\n\n    // Optional arguments\n    '(?:[?](?:(?![ \\u00A0]|[,.)}\"\\u0027!]+[ \\u00A0]|[,.)}\"\\u0027!]+$).)*)?');\n  }\n  this.getUserSettings();\n  this.initializeElements(container);\n  this.maxScrollbackLines = 500;\n  this.npar               = 0;\n  this.par                = [ ];\n  this.isQuestionMark     = false;\n  this.savedX             = [ ];\n  this.savedY             = [ ];\n  this.savedAttr          = [ ];\n  this.savedUseGMap       = 0;\n  this.savedGMap          = [ this.Latin1Map, this.VT100GraphicsMap,\n                              this.CodePage437Map, this.DirectToFontMap ];\n  this.savedValid         = [ ];\n  this.respondString      = '';\n  this.titleString        = '';\n  this.internalClipboard  = undefined;\n  this.reset(true);\n}\n\nVT100.prototype.reset = function(clearHistory) {\n  this.isEsc                                         = 0 /* ESnormal */;\n  this.needWrap                                      = false;\n  this.autoWrapMode                                  = true;\n  this.dispCtrl                                      = false;\n  this.toggleMeta                                    = false;\n  this.insertMode                                    = false;\n  this.applKeyMode                                   = false;\n  this.cursorKeyMode                                 = false;\n  this.crLfMode                                      = false;\n  this.offsetMode                                    = false;\n  this.mouseReporting                                = false;\n  this.printing                                      = false;\n  if (typeof this.printWin != 'undefined' &&\n      this.printWin && !this.printWin.closed) {\n    this.printWin.close();\n  }\n  this.printWin                                      = null;\n  this.utfEnabled                                    = this.utfPreferred;\n  this.utfCount                                      = 0;\n  this.utfChar                                       = 0;\n  this.color                                         = 'ansi0 bgAnsi15';\n  this.style                                         = '';\n  this.attr                                          = 0x00F0 /* ATTR_DEFAULT */;\n  this.useGMap                                       = 0;\n  this.GMap                                          = [ this.Latin1Map,\n                                                         this.VT100GraphicsMap,\n                                                         this.CodePage437Map,\n                                                         this.DirectToFontMap];\n  this.translate                                     = this.GMap[this.useGMap];\n  this.top                                           = 0;\n  this.bottom                                        = this.terminalHeight;\n  this.lastCharacter                                 = ' ';\n  this.userTabStop                                   = [ ];\n\n  if (clearHistory) {\n    for (var i = 0; i < 2; i++) {\n      while (this.console[i].firstChild) {\n        this.console[i].removeChild(this.console[i].firstChild);\n      }\n    }\n  }\n\n  this.enableAlternateScreen(false);\n\n  var wasCompressed                                  = false;\n  var transform                                      = this.getTransformName();\n  if (transform) {\n    for (var i = 0; i < 2; ++i) {\n      wasCompressed                  |= this.console[i].style[transform] != '';\n      this.console[i].style[transform]               = '';\n    }\n    this.cursor.style[transform]                     = '';\n    this.space.style[transform]                      = '';\n    if (transform == 'filter') {\n      this.console[this.currentScreen].style.width   = '';\n    }\n  }\n  this.scale                                         = 1.0;\n  if (wasCompressed) {\n    this.resizer();\n  }\n\n  this.gotoXY(0, 0);\n  this.showCursor();\n  this.isInverted                                    = false;\n  this.refreshInvertedState();\n  this.clearRegion(0, 0, this.terminalWidth, this.terminalHeight,\n                   this.color, this.style);\n};\n\nVT100.prototype.addListener = function(elem, event, listener) {\n  try {\n    if (elem.addEventListener) {\n      elem.addEventListener(event, listener, false);\n    } else {\n      elem.attachEvent('on' + event, listener);\n    }\n  } catch (e) {\n  }\n};\n\nVT100.prototype.getUserSettings = function() {\n  // Compute hash signature to identify the entries in the userCSS menu.\n  // If the menu is unchanged from last time, default values can be\n  // looked up in a cookie associated with this page.\n  this.signature            = 3;\n  this.utfPreferred         = true;\n  this.visualBell           = typeof suppressAllAudio != 'undefined' &&\n                              suppressAllAudio;\n  this.autoprint            = true;\n  this.softKeyboard         = false;\n  this.blinkingCursor       = true;\n  if (this.visualBell) {\n    this.signature          = Math.floor(16807*this.signature + 1) %\n                                         ((1 << 31) - 1);\n  }\n  if (typeof userCSSList != 'undefined') {\n    for (var i = 0; i < userCSSList.length; ++i) {\n      var label             = userCSSList[i][0];\n      for (var j = 0; j < label.length; ++j) {\n        this.signature      = Math.floor(16807*this.signature+\n                                         label.charCodeAt(j)) %\n                                         ((1 << 31) - 1);\n      }\n      if (userCSSList[i][1]) {\n        this.signature      = Math.floor(16807*this.signature + 1) %\n                                         ((1 << 31) - 1);\n      }\n    }\n  }\n\n  var key                   = 'shellInABox=' + this.signature + ':';\n  var settings              = document.cookie.indexOf(key);\n  if (settings >= 0) {\n    settings                = document.cookie.substr(settings + key.length).\n                                                   replace(/([0-1]*).*/, \"$1\");\n    if (settings.length == 5 + (typeof userCSSList == 'undefined' ?\n                                0 : userCSSList.length)) {\n      this.utfPreferred     = settings.charAt(0) != '0';\n      this.visualBell       = settings.charAt(1) != '0';\n      this.autoprint        = settings.charAt(2) != '0';\n      this.softKeyboard     = settings.charAt(3) != '0';\n      this.blinkingCursor   = settings.charAt(4) != '0';\n      if (typeof userCSSList != 'undefined') {\n        for (var i = 0; i < userCSSList.length; ++i) {\n          userCSSList[i][2] = settings.charAt(i + 5) != '0';\n        }\n      }\n    }\n  }\n  this.utfEnabled           = this.utfPreferred;\n};\n\nVT100.prototype.storeUserSettings = function() {\n  var settings  = 'shellInABox=' + this.signature + ':' +\n                  (this.utfEnabled     ? '1' : '0') +\n                  (this.visualBell     ? '1' : '0') +\n                  (this.autoprint      ? '1' : '0') +\n                  (this.softKeyboard   ? '1' : '0') +\n                  (this.blinkingCursor ? '1' : '0');\n  if (typeof userCSSList != 'undefined') {\n    for (var i = 0; i < userCSSList.length; ++i) {\n      settings += userCSSList[i][2] ? '1' : '0';\n    }\n  }\n  var d         = new Date();\n  d.setDate(d.getDate() + 3653);\n  document.cookie = settings + ';expires=' + d.toGMTString();\n};\n\nVT100.prototype.initializeUserCSSStyles = function() {\n  this.usercssActions                    = [];\n  if (typeof userCSSList != 'undefined') {\n    var menu                             = '';\n    var group                            = '';\n    var wasSingleSel                     = 1;\n    var beginOfGroup                     = 0;\n    for (var i = 0; i <= userCSSList.length; ++i) {\n      if (i < userCSSList.length) {\n        var label                        = userCSSList[i][0];\n        var newGroup                     = userCSSList[i][1];\n        var enabled                      = userCSSList[i][2];\n\n        // Add user style sheet to document\n        var style                        = document.createElement('link');\n        var id                           = document.createAttribute('id');\n        id.nodeValue                     = 'usercss-' + i;\n        style.setAttributeNode(id);\n        var rel                          = document.createAttribute('rel');\n        rel.nodeValue                    = 'stylesheet';\n        style.setAttributeNode(rel);\n        var href                         = document.createAttribute('href');\n        href.nodeValue                   = 'usercss-' + i + '.css';\n        style.setAttributeNode(href);\n        var type                         = document.createAttribute('type');\n        type.nodeValue                   = 'text/css';\n        style.setAttributeNode(type);\n        document.getElementsByTagName('head')[0].appendChild(style);\n        style.disabled                   = !enabled;\n      }\n\n      // Add entry to menu\n      if (newGroup || i == userCSSList.length) {\n        if (beginOfGroup != 0 && (i - beginOfGroup > 1 || !wasSingleSel)) {\n          // The last group had multiple entries that are mutually exclusive;\n          // or the previous to last group did. In either case, we need to\n          // append a \"<hr />\" before we can add the last group to the menu.\n          menu                          += '<hr />';\n        }\n        wasSingleSel                     = i - beginOfGroup < 1;\n        menu                            += group;\n        group                            = '';\n\n        for (var j = beginOfGroup; j < i; ++j) {\n          this.usercssActions[this.usercssActions.length] =\n            function(vt100, current, begin, count) {\n\n              // Deselect all other entries in the group, then either select\n              // (for multiple entries in group) or toggle (for on/off entry)\n              // the current entry.\n              return function() {\n                var entry                = vt100.getChildById(vt100.menu,\n                                                              'beginusercss');\n                var i                    = -1;\n                var j                    = -1;\n                for (var c = count; c > 0; ++j) {\n                  if (entry.tagName == 'LI') {\n                    if (++i >= begin) {\n                      --c;\n                      var label          = vt100.usercss.childNodes[j];\n\n                      // Restore label to just the text content\n                      if (typeof label.textContent == 'undefined') {\n                        var s            = label.innerText;\n                        label.innerHTML  = '';\n                        label.appendChild(document.createTextNode(s));\n                      } else {\n                        label.textContent= label.textContent;\n                      }\n\n                      // User style sheets are numbered sequentially\n                      var sheet          = document.getElementById(\n                                                               'usercss-' + i);\n                      if (i == current) {\n                        if (count == 1) {\n                          sheet.disabled = !sheet.disabled;\n                        } else {\n                          sheet.disabled = false;\n                        }\n                        if (!sheet.disabled) {\n                          label.innerHTML= '<img src=\"/webshell/enabled.gif\" />' +\n                                           label.innerHTML;\n                        }\n                      } else {\n                        sheet.disabled   = true;\n                      }\n                      userCSSList[i][2]  = !sheet.disabled;\n                    }\n                  }\n                  entry                  = entry.nextSibling;\n                }\n\n                // If the font size changed, adjust cursor and line dimensions\n                this.cursor.style.cssText= '';\n                this.cursorWidth         = this.cursor.clientWidth;\n                this.cursorHeight        = this.lineheight.clientHeight;\n                for (i = 0; i < this.console.length; ++i) {\n                  for (var line = this.console[i].firstChild; line;\n                       line = line.nextSibling) {\n                    line.style.height    = this.cursorHeight + 'px';\n                  }\n                }\n                vt100.resizer();\n              };\n            }(this, j, beginOfGroup, i - beginOfGroup);\n        }\n\n        if (i == userCSSList.length) {\n          break;\n        }\n\n        beginOfGroup                     = i;\n      }\n      // Collect all entries in a group, before attaching them to the menu.\n      // This is necessary as we don't know whether this is a group of\n      // mutually exclusive options (which should be separated by \"<hr />\" on\n      // both ends), or whether this is a on/off toggle, which can be grouped\n      // together with other on/off options.\n      group                             +=\n        '<li>' + (enabled ? '<img src=\"/webshell/enabled.gif\" />' : '') +\n                 label +\n        '</li>';\n    }\n    this.usercss.innerHTML               = menu;\n  }\n};\n\nVT100.prototype.resetLastSelectedKey = function(e) {\n  var key                          = this.lastSelectedKey;\n  if (!key) {\n    return false;\n  }\n\n  var position                     = this.mousePosition(e);\n\n  // We don't get all the necessary events to reliably reselect a key\n  // if we moved away from it and then back onto it. We approximate the\n  // behavior by remembering the key until either we release the mouse\n  // button (we might never get this event if the mouse has since left\n  // the window), or until we move away too far.\n  var box                          = this.keyboard.firstChild;\n  if (position[0] <  box.offsetLeft + key.offsetWidth ||\n      position[1] <  box.offsetTop + key.offsetHeight ||\n      position[0] >= box.offsetLeft + box.offsetWidth - key.offsetWidth ||\n      position[1] >= box.offsetTop + box.offsetHeight - key.offsetHeight ||\n      position[0] <  box.offsetLeft + key.offsetLeft - key.offsetWidth ||\n      position[1] <  box.offsetTop + key.offsetTop - key.offsetHeight ||\n      position[0] >= box.offsetLeft + key.offsetLeft + 2*key.offsetWidth ||\n      position[1] >= box.offsetTop + key.offsetTop + 2*key.offsetHeight) {\n    if (this.lastSelectedKey.className) log.console('reset: deselecting');\n    this.lastSelectedKey.className = '';\n    this.lastSelectedKey           = undefined;\n  }\n  return false;\n};\n\nVT100.prototype.showShiftState = function(state) {\n  var style              = document.getElementById('shift_state');\n  if (state) {\n    this.setTextContentRaw(style,\n                           '#vt100 #keyboard .shifted {' +\n                             'display: inline }' +\n                           '#vt100 #keyboard .unshifted {' +\n                             'display: none }');\n  } else {\n    this.setTextContentRaw(style, '');\n  }\n  var elems              = this.keyboard.getElementsByTagName('I');\n  for (var i = 0; i < elems.length; ++i) {\n    if (elems[i].id == '16') {\n      elems[i].className = state ? 'selected' : '';\n    }\n  }\n};\n\nVT100.prototype.showCtrlState = function(state) {\n  var ctrl         = this.getChildById(this.keyboard, '17' /* Ctrl */);\n  if (ctrl) {\n    ctrl.className = state ? 'selected' : '';\n  }\n};\n\nVT100.prototype.showAltState = function(state) {\n  var alt         = this.getChildById(this.keyboard, '18' /* Alt */);\n  if (alt) {\n    alt.className = state ? 'selected' : '';\n  }\n};\n\nVT100.prototype.clickedKeyboard = function(e, elem, ch, key, shift, ctrl, alt){\n  var fake      = [ ];\n  fake.charCode = ch;\n  fake.keyCode  = key;\n  fake.ctrlKey  = ctrl;\n  fake.shiftKey = shift;\n  fake.altKey   = alt;\n  fake.metaKey  = alt;\n  return this.handleKey(fake);\n};\n\nVT100.prototype.addKeyBinding = function(elem, ch, key, CH, KEY) {\n  if (elem == undefined) {\n    return;\n  }\n  if (ch == '\\u00A0') {\n    // &nbsp; should be treated as a regular space character.\n    ch                                  = ' ';\n  }\n  if (ch != undefined && CH == undefined) {\n    // For letter keys, we automatically compute the uppercase character code\n    // from the lowercase one.\n    CH                                  = ch.toUpperCase();\n  }\n  if (KEY == undefined && key != undefined) {\n    // Most keys have identically key codes for both lowercase and uppercase\n    // keypresses. Normally, only function keys would have distinct key codes,\n    // whereas regular keys have character codes.\n    KEY                                 = key;\n  } else if (KEY == undefined && CH != undefined) {\n    // For regular keys, copy the character code to the key code.\n    KEY                                 = CH.charCodeAt(0);\n  }\n  if (key == undefined && ch != undefined) {\n    // For regular keys, copy the character code to the key code.\n    key                                 = ch.charCodeAt(0);\n  }\n  // Convert characters to numeric character codes. If the character code\n  // is undefined (i.e. this is a function key), set it to zero.\n  ch                                    = ch ? ch.charCodeAt(0) : 0;\n  CH                                    = CH ? CH.charCodeAt(0) : 0;\n\n  // Mouse down events high light the key. We also set lastSelectedKey. This\n  // is needed to that mouseout/mouseover can keep track of the key that\n  // is currently being clicked.\n  this.addListener(elem, 'mousedown',\n    function(vt100, elem, key) { return function(e) {\n      if ((e.which || e.button) == 1) {\n        if (vt100.lastSelectedKey) {\n          vt100.lastSelectedKey.className= '';\n        }\n        // Highlight the key while the mouse button is held down.\n        if (key == 16 /* Shift */) {\n          if (!elem.className != vt100.isShift) {\n            vt100.showShiftState(!vt100.isShift);\n          }\n        } else if (key == 17 /* Ctrl */) {\n          if (!elem.className != vt100.isCtrl) {\n            vt100.showCtrlState(!vt100.isCtrl);\n          }\n        } else if (key == 18 /* Alt */) {\n          if (!elem.className != vt100.isAlt) {\n            vt100.showAltState(!vt100.isAlt);\n          }\n        } else {\n          elem.className                  = 'selected';\n        }\n        vt100.lastSelectedKey             = elem;\n      }\n      return false; }; }(this, elem, key));\n  var clicked                           =\n    // Modifier keys update the state of the keyboard, but do not generate\n    // any key clicks that get forwarded to the application.\n    key >= 16 /* Shift */ && key <= 18 /* Alt */ ?\n    function(vt100, elem) { return function(e) {\n      if (elem == vt100.lastSelectedKey) {\n        if (key == 16 /* Shift */) {\n          // The user clicked the Shift key\n          vt100.isShift                 = !vt100.isShift;\n          vt100.showShiftState(vt100.isShift);\n        } else if (key == 17 /* Ctrl */) {\n          vt100.isCtrl                  = !vt100.isCtrl;\n          vt100.showCtrlState(vt100.isCtrl);\n        } else if (key == 18 /* Alt */) {\n          vt100.isAlt                   = !vt100.isAlt;\n          vt100.showAltState(vt100.isAlt);\n        }\n        vt100.lastSelectedKey           = undefined;\n      }\n      if (vt100.lastSelectedKey) {\n        vt100.lastSelectedKey.className = '';\n        vt100.lastSelectedKey           = undefined;\n      }\n      return false; }; }(this, elem) :\n    // Regular keys generate key clicks, when the mouse button is released or\n    // when a mouse click event is received.\n    function(vt100, elem, ch, key, CH, KEY) { return function(e) {\n      if (vt100.lastSelectedKey) {\n        if (elem == vt100.lastSelectedKey) {\n          // The user clicked a key.\n          if (vt100.isShift) {\n            vt100.clickedKeyboard(e, elem, CH, KEY,\n                                  true, vt100.isCtrl, vt100.isAlt);\n          } else {\n            vt100.clickedKeyboard(e, elem, ch, key,\n                                  false, vt100.isCtrl, vt100.isAlt);\n          }\n          vt100.isShift                 = false;\n          vt100.showShiftState(false);\n          vt100.isCtrl                  = false;\n          vt100.showCtrlState(false);\n          vt100.isAlt                   = false;\n          vt100.showAltState(false);\n        }\n        vt100.lastSelectedKey.className = '';\n        vt100.lastSelectedKey           = undefined;\n      }\n      elem.className                    = '';\n      return false; }; }(this, elem, ch, key, CH, KEY);\n  this.addListener(elem, 'mouseup', clicked);\n  this.addListener(elem, 'click', clicked);\n\n  // When moving the mouse away from a key, check if any keys need to be\n  // deselected.\n  this.addListener(elem, 'mouseout',\n    function(vt100, elem, key) { return function(e) {\n      if (key == 16 /* Shift */) {\n        if (!elem.className == vt100.isShift) {\n          vt100.showShiftState(vt100.isShift);\n        }\n      } else if (key == 17 /* Ctrl */) {\n        if (!elem.className == vt100.isCtrl) {\n          vt100.showCtrlState(vt100.isCtrl);\n        }\n      } else if (key == 18 /* Alt */) {\n        if (!elem.className == vt100.isAlt) {\n          vt100.showAltState(vt100.isAlt);\n        }\n      } else if (elem.className) {\n        elem.className                  = '';\n        vt100.lastSelectedKey           = elem;\n      } else if (vt100.lastSelectedKey) {\n        vt100.resetLastSelectedKey(e);\n      }\n      return false; }; }(this, elem, key));\n\n  // When moving the mouse over a key, select it if the user is still holding\n  // the mouse button down (i.e. elem == lastSelectedKey)\n  this.addListener(elem, 'mouseover',\n    function(vt100, elem, key) { return function(e) {\n      if (elem == vt100.lastSelectedKey) {\n        if (key == 16 /* Shift */) {\n          if (!elem.className != vt100.isShift) {\n            vt100.showShiftState(!vt100.isShift);\n          }\n        } else if (key == 17 /* Ctrl */) {\n          if (!elem.className != vt100.isCtrl) {\n            vt100.showCtrlState(!vt100.isCtrl);\n          }\n        } else if (key == 18 /* Alt */) {\n          if (!elem.className != vt100.isAlt) {\n            vt100.showAltState(!vt100.isAlt);\n          }\n        } else if (!elem.className) {\n          elem.className                = 'selected';\n        }\n      } else {\n        vt100.resetLastSelectedKey(e);\n      }\n      return false; }; }(this, elem, key));\n};\n\nVT100.prototype.initializeKeyBindings = function(elem) {\n  if (elem) {\n    if (elem.nodeName == \"I\" || elem.nodeName == \"B\") {\n      if (elem.id) {\n        // Function keys. The Javascript keycode is part of the \"id\"\n        var i     = parseInt(elem.id);\n        if (i) {\n          // If the id does not parse as a number, it is not a keycode.\n          this.addKeyBinding(elem, undefined, i);\n        }\n      } else {\n        var child = elem.firstChild;\n        if (child) {\n          if (child.nodeName == \"#text\") {\n            // If the key only has a text node as a child, then it is a letter.\n            // Automatically compute the lower and upper case version of the\n            // key.\n            var text = this.getTextContent(child) ||\n                       this.getTextContent(elem);\n            this.addKeyBinding(elem, text.toLowerCase());\n          } else if (child.nextSibling) {\n            // If the key has two children, they are the lower and upper case\n            // character code, respectively.\n            this.addKeyBinding(elem, this.getTextContent(child), undefined,\n                               this.getTextContent(child.nextSibling));\n          }\n        }\n      }\n    }\n  }\n  // Recursively parse all other child nodes.\n  for (elem = elem.firstChild; elem; elem = elem.nextSibling) {\n    this.initializeKeyBindings(elem);\n  }\n};\n\nVT100.prototype.initializeKeyboardButton = function() {\n  // Configure mouse event handlers for button that displays/hides keyboard\n  this.addListener(this.keyboardImage, 'click',\n    function(vt100) { return function(e) {\n      if (vt100.keyboard.style.display != '') {\n        if (vt100.reconnectBtn.style.visibility != '') {\n          vt100.initializeKeyboard();\n          vt100.showSoftKeyboard();\n        }\n      } else {\n        vt100.hideSoftKeyboard();\n        vt100.input.focus();\n      }\n      return false; }; }(this));\n\n  // Enable button that displays keyboard\n  if (this.softKeyboard) {\n    this.keyboardImage.style.visibility = 'visible';\n  }\n};\n\nVT100.prototype.initializeKeyboard = function() {\n  // Only need to initialize the keyboard the very first time. When doing so,\n  // copy the keyboard layout from the iframe.\n  if (this.keyboard.firstChild) {\n    return;\n  }\n  this.keyboard.innerHTML               =\n                                    this.layout.contentDocument.body.innerHTML;\n  var box                               = this.keyboard.firstChild;\n  this.hideSoftKeyboard();\n\n  // Configure mouse event handlers for on-screen keyboard\n  this.addListener(this.keyboard, 'click',\n    function(vt100) { return function(e) {\n      vt100.hideSoftKeyboard();\n      vt100.input.focus();\n      return false; }; }(this));\n  this.addListener(this.keyboard, 'selectstart', this.cancelEvent);\n  this.addListener(box, 'click', this.cancelEvent);\n  this.addListener(box, 'mouseup',\n    function(vt100) { return function(e) {\n      if (vt100.lastSelectedKey) {\n        vt100.lastSelectedKey.className = '';\n        vt100.lastSelectedKey           = undefined;\n      }\n      return false; }; }(this));\n  this.addListener(box, 'mouseout',\n    function(vt100) { return function(e) {\n      return vt100.resetLastSelectedKey(e); }; }(this));\n  this.addListener(box, 'mouseover',\n    function(vt100) { return function(e) {\n      return vt100.resetLastSelectedKey(e); }; }(this));\n\n  // Configure SHIFT key behavior\n  var style                             = document.createElement('style');\n  var id                                = document.createAttribute('id');\n  id.nodeValue                          = 'shift_state';\n  style.setAttributeNode(id);\n  var type                              = document.createAttribute('type');\n  type.nodeValue                        = 'text/css';\n  style.setAttributeNode(type);\n  document.getElementsByTagName('head')[0].appendChild(style);\n\n  // Set up key bindings\n  this.initializeKeyBindings(box);\n};\n\nVT100.prototype.initializeElements = function(container) {\n  // If the necessary objects have not already been defined in the HTML\n  // page, create them now.\n  if (container) {\n    this.container             = container;\n  } else if (!(this.container  = document.getElementById('vt100'))) {\n    this.container             = document.createElement('div');\n    this.container.id          = 'vt100';\n    document.body.appendChild(this.container);\n  }\n\n  if (!this.getChildById(this.container, 'reconnect')   ||\n      !this.getChildById(this.container, 'menu')        ||\n      !this.getChildById(this.container, 'keyboard')    ||\n      !this.getChildById(this.container, 'kbd_button')  ||\n      !this.getChildById(this.container, 'kbd_img')     ||\n      !this.getChildById(this.container, 'layout')      ||\n      !this.getChildById(this.container, 'scrollable')  ||\n      !this.getChildById(this.container, 'console')     ||\n      !this.getChildById(this.container, 'alt_console') ||\n      !this.getChildById(this.container, 'ieprobe')     ||\n      !this.getChildById(this.container, 'padding')     ||\n      !this.getChildById(this.container, 'cursor')      ||\n      !this.getChildById(this.container, 'lineheight')  ||\n      !this.getChildById(this.container, 'usercss')     ||\n      !this.getChildById(this.container, 'space')       ||\n      !this.getChildById(this.container, 'input')       ||\n      !this.getChildById(this.container, 'cliphelper')) {\n    // Only enable the \"embed\" object, if we have a suitable plugin. Otherwise,\n    // we might get a pointless warning that a suitable plugin is not yet\n    // installed. If in doubt, we'd rather just stay silent.\n    var embed                  = '';\n    try {\n      if (typeof navigator.mimeTypes[\"audio/x-wav\"].enabledPlugin.name !=\n          'undefined') {\n        embed                  = typeof suppressAllAudio != 'undefined' &&\n                                 suppressAllAudio ? \"\" :\n        '<embed classid=\"clsid:02BF25D5-8C17-4B23-BC80-D3488ABDDC6B\" ' +\n                       'id=\"beep_embed\" ' +\n                       'src=\"beep.wav\" ' +\n                       'autostart=\"false\" ' +\n                       'volume=\"100\" ' +\n                       'enablejavascript=\"true\" ' +\n                       'type=\"audio/x-wav\" ' +\n                       'height=\"16\" ' +\n                       'width=\"200\" ' +\n                       'style=\"position:absolute;left:-1000px;top:-1000px\" />';\n      }\n    } catch (e) {\n    }\n\n    this.container.innerHTML   =\n                       '<div id=\"reconnect\" style=\"visibility: hidden\">' +\n                         '<input type=\"button\" value=\"Connect\" ' +\n                                'onsubmit=\"return false\" />' +\n                       '</div>' +\n                       '<div id=\"cursize\" style=\"visibility: hidden\">' +\n                       '</div>' +\n                       '<div id=\"menu\"></div>' +\n                       '<div id=\"keyboard\" unselectable=\"on\">' +\n                       '</div>' +\n                       '<div id=\"scrollable\">' +\n                         '<table id=\"kbd_button\">' +\n                           '<tr><td width=\"100%\">&nbsp;</td>' +\n                           '<td><img id=\"kbd_img\" src=\"/webshell/keyboard.png\" /></td>' +\n                           '<td>&nbsp;&nbsp;&nbsp;&nbsp;</td></tr>' +\n                         '</table>' +\n                         '<pre id=\"lineheight\">&nbsp;</pre>' +\n                         '<pre id=\"console\">' +\n                           '<pre></pre>' +\n                           '<div id=\"ieprobe\"><span>&nbsp;</span></div>' +\n                         '</pre>' +\n                         '<pre id=\"alt_console\" style=\"display: none\"></pre>' +\n                         '<div id=\"padding\"></div>' +\n                         '<pre id=\"cursor\">&nbsp;</pre>' +\n                       '</div>' +\n                       '<div class=\"hidden\">' +\n                         '<div id=\"usercss\"></div>' +\n                         '<pre><div><span id=\"space\"></span></div></pre>' +\n                         '<input type=\"textfield\" id=\"input\" autocorrect=\"off\" autocapitalize=\"off\" />' +\n                         '<input type=\"textfield\" id=\"cliphelper\" />' +\n                         (typeof suppressAllAudio != 'undefined' &&\n                          suppressAllAudio ? \"\" :\n                         embed + '<bgsound id=\"beep_bgsound\" loop=1 />') +\n                          '<iframe id=\"layout\" src=\"/webshell/keyboard.html\" />' +\n                        '</div>';\n  }\n\n  // Find the object used for playing the \"beep\" sound, if any.\n  if (typeof suppressAllAudio != 'undefined' && suppressAllAudio) {\n    this.beeper                = undefined;\n  } else {\n    this.beeper                = this.getChildById(this.container,\n                                                   'beep_embed');\n    if (!this.beeper || !this.beeper.Play) {\n      this.beeper              = this.getChildById(this.container,\n                                                   'beep_bgsound');\n      if (!this.beeper || typeof this.beeper.src == 'undefined') {\n        this.beeper            = undefined;\n      }\n    }\n  }\n\n  // Initialize the variables for finding the text console and the\n  // cursor.\n  this.reconnectBtn            = this.getChildById(this.container,'reconnect');\n  this.curSizeBox              = this.getChildById(this.container, 'cursize');\n  this.menu                    = this.getChildById(this.container, 'menu');\n  this.keyboard                = this.getChildById(this.container, 'keyboard');\n  this.keyboardImage           = this.getChildById(this.container, 'kbd_img');\n  this.layout                  = this.getChildById(this.container, 'layout');\n  this.scrollable              = this.getChildById(this.container,\n                                                                 'scrollable');\n  this.lineheight              = this.getChildById(this.container,\n                                                                 'lineheight');\n  this.console                 =\n                          [ this.getChildById(this.container, 'console'),\n                            this.getChildById(this.container, 'alt_console') ];\n  var ieProbe                  = this.getChildById(this.container, 'ieprobe');\n  this.padding                 = this.getChildById(this.container, 'padding');\n  this.cursor                  = this.getChildById(this.container, 'cursor');\n  this.usercss                 = this.getChildById(this.container, 'usercss');\n  this.space                   = this.getChildById(this.container, 'space');\n  this.input                   = this.getChildById(this.container, 'input');\n  this.cliphelper              = this.getChildById(this.container,\n                                                                 'cliphelper');\n\n  // Add any user selectable style sheets to the menu\n  this.initializeUserCSSStyles();\n\n  // Remember the dimensions of a standard character glyph. We would\n  // expect that we could just check cursor.clientWidth/Height at any time,\n  // but it turns out that browsers sometimes invalidate these values\n  // (e.g. while displaying a print preview screen).\n  this.cursorWidth             = this.cursor.clientWidth;\n  this.cursorHeight            = this.lineheight.clientHeight;\n\n  // IE has a slightly different boxing model, that we need to compensate for\n  this.isIE                    = ieProbe.offsetTop > 1;\n  ieProbe                      = undefined;\n  this.console.innerHTML       = '';\n\n  // Determine if the terminal window is positioned at the beginning of the\n  // page, or if it is embedded somewhere else in the page. For full-screen\n  // terminals, automatically resize whenever the browser window changes.\n  var marginTop                = parseInt(this.getCurrentComputedStyle(\n                                          document.body, 'marginTop'));\n  var marginLeft               = parseInt(this.getCurrentComputedStyle(\n                                          document.body, 'marginLeft'));\n  var marginRight              = parseInt(this.getCurrentComputedStyle(\n                                          document.body, 'marginRight'));\n  var x                        = this.container.offsetLeft;\n  var y                        = this.container.offsetTop;\n  for (var parent = this.container; parent = parent.offsetParent; ) {\n    x                         += parent.offsetLeft;\n    y                         += parent.offsetTop;\n  }\n  this.isEmbedded              = marginTop != y ||\n                                 marginLeft != x ||\n                                 (window.innerWidth ||\n                                  document.documentElement.clientWidth ||\n                                  document.body.clientWidth) -\n                                 marginRight != x + this.container.offsetWidth;\n  if (!this.isEmbedded) {\n    // Some browsers generate resize events when the terminal is first\n    // shown. Disable showing the size indicator until a little bit after\n    // the terminal has been rendered the first time.\n    this.indicateSize          = false;\n    setTimeout(function(vt100) {\n      return function() {\n        vt100.indicateSize     = true;\n      };\n    }(this), 100);\n    this.addListener(window, 'resize',\n                     function(vt100) {\n                       return function() {\n                         vt100.hideContextMenu();\n                         vt100.resizer();\n                         vt100.showCurrentSize();\n                        }\n                      }(this));\n\n    // Hide extra scrollbars attached to window\n    document.body.style.margin = '0px';\n    try { document.body.style.overflow ='hidden'; } catch (e) { }\n    try { document.body.oncontextmenu = function() {return false;};} catch(e){}\n  }\n\n  // Set up onscreen soft keyboard\n  this.initializeKeyboardButton();\n\n  // Hide context menu\n  this.hideContextMenu();\n\n  // Add listener to reconnect button\n  this.addListener(this.reconnectBtn.firstChild, 'click',\n                   function(vt100) {\n                     return function() {\n                       var rc = vt100.reconnect();\n                       vt100.input.focus();\n                       return rc;\n                     }\n                   }(this));\n\n  // Add input listeners\n  this.addListener(this.input, 'blur',\n                   function(vt100) {\n                     return function() { vt100.blurCursor(); } }(this));\n  this.addListener(this.input, 'focus',\n                   function(vt100) {\n                     return function() { vt100.focusCursor(); } }(this));\n  this.addListener(this.input, 'keydown',\n                   function(vt100) {\n                     return function(e) {\n                       if (!e) e = window.event;\n                       return vt100.keyDown(e); } }(this));\n  this.addListener(this.input, 'keypress',\n                   function(vt100) {\n                     return function(e) {\n                       if (!e) e = window.event;\n                       return vt100.keyPressed(e); } }(this));\n  this.addListener(this.input, 'keyup',\n                   function(vt100) {\n                     return function(e) {\n                       if (!e) e = window.event;\n                       return vt100.keyUp(e); } }(this));\n\n  // Attach listeners that move the focus to the <input> field. This way we\n  // can make sure that we can receive keyboard input.\n  var mouseEvent               = function(vt100, type) {\n    return function(e) {\n      if (!e) e = window.event;\n      return vt100.mouseEvent(e, type);\n    };\n  };\n  this.addListener(this.scrollable,'mousedown',mouseEvent(this, 0 /* MOUSE_DOWN */));\n  this.addListener(this.scrollable,'mouseup',  mouseEvent(this, 1 /* MOUSE_UP */));\n  this.addListener(this.scrollable,'click',    mouseEvent(this, 2 /* MOUSE_CLICK */));\n\n  // Check that browser supports drag and drop\n  if ('draggable' in document.createElement('span')) {\n      var dropEvent            = function (vt100) {\n          return function(e) {\n              if (!e) e = window.event;\n              if (e.preventDefault) e.preventDefault();\n              vt100.keysPressed(e.dataTransfer.getData('Text'));\n              return false;\n          };\n      };\n      // Tell the browser that we *can* drop on this target\n      this.addListener(this.scrollable, 'dragover', cancel);\n      this.addListener(this.scrollable, 'dragenter', cancel);\n\n      // Add a listener for the drop event\n      this.addListener(this.scrollable, 'drop', dropEvent(this));\n  }\n\n  // Initialize the blank terminal window.\n  this.currentScreen           = 0;\n  this.cursorX                 = 0;\n  this.cursorY                 = 0;\n  this.numScrollbackLines      = 0;\n  this.top                     = 0;\n  this.bottom                  = 0x7FFFFFFF;\n  this.scale                   = 1.0;\n  this.resizer();\n  this.focusCursor();\n  this.input.focus();\n};\n\nfunction cancel(event) {\n  if (event.preventDefault) {\n    event.preventDefault();\n  }\n  return false;\n}\n\nVT100.prototype.getChildById = function(parent, id) {\n  var nodeList = parent.all || parent.getElementsByTagName('*');\n  if (typeof nodeList.namedItem == 'undefined') {\n    for (var i = 0; i < nodeList.length; i++) {\n      if (nodeList[i].id == id) {\n        return nodeList[i];\n      }\n    }\n    return null;\n  } else {\n    var elem = (parent.all || parent.getElementsByTagName('*')).namedItem(id);\n    return elem ? elem[0] || elem : null;\n  }\n};\n\nVT100.prototype.getCurrentComputedStyle = function(elem, style) {\n  if (typeof elem.currentStyle != 'undefined') {\n    return elem.currentStyle[style];\n  } else {\n    return document.defaultView.getComputedStyle(elem, null)[style];\n  }\n};\n\nVT100.prototype.reconnect = function() {\n  return false;\n};\n\nVT100.prototype.showReconnect = function(state) {\n  if (state) {\n    this.hideSoftKeyboard();\n    this.reconnectBtn.style.visibility = '';\n  } else {\n    this.reconnectBtn.style.visibility = 'hidden';\n  }\n};\n\nVT100.prototype.repairElements = function(console) {\n  for (var line = console.firstChild; line; line = line.nextSibling) {\n    if (!line.clientHeight) {\n      var newLine = document.createElement(line.tagName);\n      newLine.style.cssText       = line.style.cssText;\n      newLine.className           = line.className;\n      if (line.tagName == 'DIV') {\n        for (var span = line.firstChild; span; span = span.nextSibling) {\n          var newSpan             = document.createElement(span.tagName);\n          newSpan.style.cssText   = span.style.cssText;\n          newSpan.className       = span.className;\n          this.setTextContent(newSpan, this.getTextContent(span));\n          newLine.appendChild(newSpan);\n        }\n      } else {\n        this.setTextContent(newLine, this.getTextContent(line));\n      }\n      line.parentNode.replaceChild(newLine, line);\n      line                        = newLine;\n    }\n  }\n};\n\nVT100.prototype.resized = function(w, h) {\n};\n\nVT100.prototype.resizer = function() {\n  // Hide onscreen soft keyboard\n  this.hideSoftKeyboard();\n\n  // The cursor can get corrupted if the print-preview is displayed in Firefox.\n  // Recreating it, will repair it.\n  var newCursor                = document.createElement('pre');\n  this.setTextContent(newCursor, ' ');\n  newCursor.id                 = 'cursor';\n  newCursor.style.cssText      = this.cursor.style.cssText;\n  this.cursor.parentNode.insertBefore(newCursor, this.cursor);\n  if (!newCursor.clientHeight) {\n    // Things are broken right now. This is probably because we are\n    // displaying the print-preview. Just don't change any of our settings\n    // until the print dialog is closed again.\n    newCursor.parentNode.removeChild(newCursor);\n    return;\n  } else {\n    // Swap the old broken cursor for the newly created one.\n    this.cursor.parentNode.removeChild(this.cursor);\n    this.cursor                = newCursor;\n  }\n\n  // Really horrible things happen if the contents of the terminal changes\n  // while the print-preview is showing. We get HTML elements that show up\n  // in the DOM, but that do not take up any space. Find these elements and\n  // try to fix them.\n  this.repairElements(this.console[0]);\n  this.repairElements(this.console[1]);\n\n  // Lock the cursor size to the size of a normal character. This helps with\n  // characters that are taller/shorter than normal. Unfortunately, we will\n  // still get confused if somebody enters a character that is wider/narrower\n  // than normal. This can happen if the browser tries to substitute a\n  // characters from a different font.\n  this.cursor.style.width      = this.cursorWidth  + 'px';\n  this.cursor.style.height     = this.cursorHeight + 'px';\n\n  // Adjust height for one pixel padding of the #vt100 element.\n  // The latter is necessary to properly display the inactive cursor.\n  var console                  = this.console[this.currentScreen];\n  var height                   = (this.isEmbedded ? this.container.clientHeight\n                                  : (window.innerHeight ||\n                                     document.documentElement.clientHeight ||\n                                     document.body.clientHeight))-1;\n  var partial                  = height % this.cursorHeight;\n  this.scrollable.style.height = (height > 0 ? height : 0) + 'px';\n  this.padding.style.height    = (partial > 0 ? partial : 0) + 'px';\n  var oldTerminalHeight        = this.terminalHeight;\n  this.updateWidth();\n  this.updateHeight();\n\n  // Clip the cursor to the visible screen.\n  var cx                       = this.cursorX;\n  var cy                       = this.cursorY + this.numScrollbackLines;\n\n  // The alternate screen never keeps a scroll back buffer.\n  this.updateNumScrollbackLines();\n  while (this.currentScreen && this.numScrollbackLines > 0) {\n    console.removeChild(console.firstChild);\n    this.numScrollbackLines--;\n  }\n  cy                          -= this.numScrollbackLines;\n  if (cx < 0) {\n    cx                         = 0;\n  } else if (cx > this.terminalWidth) {\n    cx                         = this.terminalWidth - 1;\n    if (cx < 0) {\n      cx                       = 0;\n    }\n  }\n  if (cy < 0) {\n    cy                         = 0;\n  } else if (cy > this.terminalHeight) {\n    cy                         = this.terminalHeight - 1;\n    if (cy < 0) {\n      cy                       = 0;\n    }\n  }\n\n  // Clip the scroll region to the visible screen.\n  if (this.bottom > this.terminalHeight ||\n      this.bottom == oldTerminalHeight) {\n    this.bottom                = this.terminalHeight;\n  }\n  if (this.top >= this.bottom) {\n    this.top                   = this.bottom-1;\n    if (this.top < 0) {\n      this.top                 = 0;\n    }\n  }\n\n  // Truncate lines, if necessary. Explicitly reposition cursor (this is\n  // particularly important after changing the screen number), and reset\n  // the scroll region to the default.\n  this.truncateLines(this.terminalWidth);\n  this.putString(cx, cy, '', undefined);\n  this.scrollable.scrollTop    = this.numScrollbackLines *\n                                 this.cursorHeight + 1;\n\n  // Update classNames for lines in the scrollback buffer\n  var line                     = console.firstChild;\n  for (var i = 0; i < this.numScrollbackLines; i++) {\n    line.className             = 'scrollback';\n    line                       = line.nextSibling;\n  }\n  while (line) {\n    line.className             = '';\n    line                       = line.nextSibling;\n  }\n\n  // Reposition the reconnect button\n  this.reconnectBtn.style.left = (this.terminalWidth*this.cursorWidth/\n                                  this.scale -\n                                  this.reconnectBtn.clientWidth)/2 + 'px';\n  this.reconnectBtn.style.top  = (this.terminalHeight*this.cursorHeight-\n                                  this.reconnectBtn.clientHeight)/2 + 'px';\n\n  // Send notification that the window size has been changed\n  this.resized(this.terminalWidth, this.terminalHeight);\n};\n\nVT100.prototype.showCurrentSize = function() {\n  if (!this.indicateSize) {\n    return;\n  }\n  this.curSizeBox.innerHTML             = '' + this.terminalWidth + 'x' +\n                                               this.terminalHeight;\n  this.curSizeBox.style.left            =\n                                      (this.terminalWidth*this.cursorWidth/\n                                       this.scale -\n                                       this.curSizeBox.clientWidth)/2 + 'px';\n  this.curSizeBox.style.top             =\n                                      (this.terminalHeight*this.cursorHeight -\n                                       this.curSizeBox.clientHeight)/2 + 'px';\n  this.curSizeBox.style.visibility      = '';\n  if (this.curSizeTimeout) {\n    clearTimeout(this.curSizeTimeout);\n  }\n\n  // Only show the terminal size for a short amount of time after resizing.\n  // Then hide this information, again. Some browsers generate resize events\n  // throughout the entire resize operation. This is nice, and we will show\n  // the terminal size while the user is dragging the window borders.\n  // Other browsers only generate a single event when the user releases the\n  // mouse. In those cases, we can only show the terminal size once at the\n  // end of the resize operation.\n  this.curSizeTimeout                   = setTimeout(function(vt100) {\n    return function() {\n      vt100.curSizeTimeout              = null;\n      vt100.curSizeBox.style.visibility = 'hidden';\n    };\n  }(this), 1000);\n};\n\nVT100.prototype.selection = function() {\n  try {\n    return '' + (window.getSelection && window.getSelection() ||\n                 document.selection && document.selection.type == 'Text' &&\n                 document.selection.createRange().text || '');\n  } catch (e) {\n  }\n  return '';\n};\n\nVT100.prototype.cancelEvent = function(event) {\n  try {\n    // For non-IE browsers\n    event.stopPropagation();\n    event.preventDefault();\n  } catch (e) {\n  }\n  try {\n    // For IE\n    event.cancelBubble = true;\n    event.returnValue  = false;\n    event.button       = 0;\n    event.keyCode      = 0;\n  } catch (e) {\n  }\n  return false;\n};\n\nVT100.prototype.mousePosition = function(event) {\n  var offsetX      = this.container.offsetLeft;\n  var offsetY      = this.container.offsetTop;\n  for (var e = this.container; e = e.offsetParent; ) {\n    offsetX       += e.offsetLeft;\n    offsetY       += e.offsetTop;\n  }\n  return [ event.clientX - offsetX,\n           event.clientY - offsetY ];\n};\n\nVT100.prototype.mouseEvent = function(event, type) {\n  // If any text is currently selected, do not move the focus as that would\n  // invalidate the selection.\n  var selection    = this.selection();\n  if ((type == 1 /* MOUSE_UP */ || type == 2 /* MOUSE_CLICK */) && !selection.length) {\n    this.input.focus();\n  }\n\n  // Compute mouse position in characters.\n  var position     = this.mousePosition(event);\n  var x            = Math.floor(position[0] / this.cursorWidth);\n  var y            = Math.floor((position[1] + this.scrollable.scrollTop) /\n                                this.cursorHeight) - this.numScrollbackLines;\n  var inside       = true;\n  if (x >= this.terminalWidth) {\n    x              = this.terminalWidth - 1;\n    inside         = false;\n  }\n  if (x < 0) {\n    x              = 0;\n    inside         = false;\n  }\n  if (y >= this.terminalHeight) {\n    y              = this.terminalHeight - 1;\n    inside         = false;\n  }\n  if (y < 0) {\n    y              = 0;\n    inside         = false;\n  }\n\n  // Compute button number and modifier keys.\n  var button       = type != 0 /* MOUSE_DOWN */ ? 3 :\n                     typeof event.pageX != 'undefined' ? event.button :\n                     [ undefined, 0, 2, 0, 1, 0, 1, 0  ][event.button];\n  if (button != undefined) {\n    if (event.shiftKey) {\n      button      |= 0x04;\n    }\n    if (event.altKey || event.metaKey) {\n      button      |= 0x08;\n    }\n    if (event.ctrlKey) {\n      button      |= 0x10;\n    }\n  }\n\n  // Report mouse events if they happen inside of the current screen and\n  // with the SHIFT key unpressed. Both of these restrictions do not apply\n  // for button releases, as we always want to report those.\n  if (this.mouseReporting && !selection.length &&\n      (type != 0 /* MOUSE_DOWN */ || !event.shiftKey)) {\n    if (inside || type != 0 /* MOUSE_DOWN */) {\n      if (button != undefined) {\n        var report = '\\u001B[M' + String.fromCharCode(button + 32) +\n                                  String.fromCharCode(x      + 33) +\n                                  String.fromCharCode(y      + 33);\n        if (type != 2 /* MOUSE_CLICK */) {\n          this.keysPressed(report);\n        }\n\n        // If we reported the event, stop propagating it (not sure, if this\n        // actually works on most browsers; blocking the global \"oncontextmenu\"\n        // even is still necessary).\n        return this.cancelEvent(event);\n      }\n    }\n  }\n\n  // Bring up context menu.\n  if (button == 2 && !event.shiftKey) {\n    if (type == 0 /* MOUSE_DOWN */) {\n      this.showContextMenu(position[0], position[1]);\n    }\n    return this.cancelEvent(event);\n  }\n\n  if (this.mouseReporting) {\n    try {\n      event.shiftKey         = false;\n    } catch (e) {\n    }\n  }\n\n  return true;\n};\n\nVT100.prototype.replaceChar = function(s, ch, repl) {\n  for (var i = -1;;) {\n    i = s.indexOf(ch, i + 1);\n    if (i < 0) {\n      break;\n    }\n    s = s.substr(0, i) + repl + s.substr(i + 1);\n  }\n  return s;\n};\n\nVT100.prototype.htmlEscape = function(s) {\n  return this.replaceChar(this.replaceChar(this.replaceChar(this.replaceChar(\n                s, '&', '&amp;'), '<', '&lt;'), '\"', '&quot;'), ' ', '\\u00A0');\n};\n\nVT100.prototype.getTextContent = function(elem) {\n  return elem.textContent ||\n         (typeof elem.textContent == 'undefined' ? elem.innerText : '');\n};\n\nVT100.prototype.setTextContentRaw = function(elem, s) {\n  // Updating the content of an element is an expensive operation. It actually\n  // pays off to first check whether the element is still unchanged.\n  if (typeof elem.textContent == 'undefined') {\n    if (elem.innerText != s) {\n      try {\n        elem.innerText = s;\n      } catch (e) {\n        // Very old versions of IE do not allow setting innerText. Instead,\n        // remove all children, by setting innerHTML and then set the text\n        // using DOM methods.\n        elem.innerHTML = '';\n        elem.appendChild(document.createTextNode(\n                                          this.replaceChar(s, ' ', '\\u00A0')));\n      }\n    }\n  } else {\n    if (elem.textContent != s) {\n      elem.textContent = s;\n    }\n  }\n};\n\nVT100.prototype.setTextContent = function(elem, s) {\n  // Check if we find any URLs in the text. If so, automatically convert them\n  // to links.\n  if (this.urlRE && this.urlRE.test(s)) {\n    var inner          = '';\n    for (;;) {\n      var consumed = 0;\n      if (RegExp.leftContext != null) {\n        inner         += this.htmlEscape(RegExp.leftContext);\n        consumed      += RegExp.leftContext.length;\n      }\n      var url          = this.htmlEscape(RegExp.lastMatch);\n      var fullUrl      = url;\n\n      // If no protocol was specified, try to guess a reasonable one.\n      if (url.indexOf('http://') < 0 && url.indexOf('https://') < 0 &&\n          url.indexOf('ftp://')  < 0 && url.indexOf('mailto:')  < 0) {\n        var slash      = url.indexOf('/');\n        var at         = url.indexOf('@');\n        var question   = url.indexOf('?');\n        if (at > 0 &&\n            (at < question || question < 0) &&\n            (slash < 0 || (question > 0 && slash > question))) {\n          fullUrl      = 'mailto:' + url;\n        } else {\n          fullUrl      = (url.indexOf('ftp.') == 0 ? 'ftp://' : 'http://') +\n                          url;\n        }\n      }\n\n      inner           += '<a target=\"vt100Link\" href=\"' + fullUrl +\n                         '\">' + url + '</a>';\n      consumed        += RegExp.lastMatch.length;\n      s                = s.substr(consumed);\n      if (!this.urlRE.test(s)) {\n        if (RegExp.rightContext != null) {\n          inner       += this.htmlEscape(RegExp.rightContext);\n        }\n        break;\n      }\n    }\n    elem.innerHTML     = inner;\n    return;\n  }\n\n  this.setTextContentRaw(elem, s);\n};\n\nVT100.prototype.insertBlankLine = function(y, color, style) {\n  // Insert a blank line a position y. This method ignores the scrollback\n  // buffer. The caller has to add the length of the scrollback buffer to\n  // the position, if necessary.\n  // If the position is larger than the number of current lines, this\n  // method just adds a new line right after the last existing one. It does\n  // not add any missing lines in between. It is the caller's responsibility\n  // to do so.\n  if (!color) {\n    color                = 'ansi0 bgAnsi15';\n  }\n  if (!style) {\n    style                = '';\n  }\n  var line;\n  if (color != 'ansi0 bgAnsi15' && !style) {\n    line                 = document.createElement('pre');\n    this.setTextContent(line, '\\n');\n  } else {\n    line                 = document.createElement('div');\n    var span             = document.createElement('span');\n    span.style.cssText   = style;\n    span.className       = color;\n    this.setTextContent(span, this.spaces(this.terminalWidth));\n    line.appendChild(span);\n  }\n  line.style.height      = this.cursorHeight + 'px';\n  var console            = this.console[this.currentScreen];\n  if (console.childNodes.length > y) {\n    console.insertBefore(line, console.childNodes[y]);\n  } else {\n    console.appendChild(line);\n  }\n};\n\nVT100.prototype.updateWidth = function() {\n  this.terminalWidth = Math.floor(this.console[this.currentScreen].offsetWidth/\n                                  this.cursorWidth*this.scale);\n  return this.terminalWidth;\n};\n\nVT100.prototype.updateHeight = function() {\n  // We want to be able to display either a terminal window that fills the\n  // entire browser window, or a terminal window that is contained in a\n  // <div> which is embededded somewhere in the web page.\n  if (this.isEmbedded) {\n    // Embedded terminal. Use size of the containing <div> (id=\"vt100\").\n    this.terminalHeight = Math.floor((this.container.clientHeight-1) /\n                                     this.cursorHeight);\n  } else {\n    // Use the full browser window.\n    this.terminalHeight = Math.floor(((window.innerHeight ||\n                                       document.documentElement.clientHeight ||\n                                       document.body.clientHeight)-1)/\n                                     this.cursorHeight);\n  }\n  return this.terminalHeight;\n};\n\nVT100.prototype.updateNumScrollbackLines = function() {\n  var scrollback          = Math.floor(\n                                this.console[this.currentScreen].offsetHeight /\n                                this.cursorHeight) -\n                            this.terminalHeight;\n  this.numScrollbackLines = scrollback < 0 ? 0 : scrollback;\n  return this.numScrollbackLines;\n};\n\nVT100.prototype.truncateLines = function(width) {\n  if (width < 0) {\n    width             = 0;\n  }\n  for (var line = this.console[this.currentScreen].firstChild; line;\n       line = line.nextSibling) {\n    if (line.tagName == 'DIV') {\n      var x           = 0;\n\n      // Traverse current line and truncate it once we saw \"width\" characters\n      for (var span = line.firstChild; span;\n           span = span.nextSibling) {\n        var s         = this.getTextContent(span);\n        var l         = s.length;\n        if (x + l > width) {\n          this.setTextContent(span, s.substr(0, width - x));\n          while (span.nextSibling) {\n            line.removeChild(line.lastChild);\n          }\n          break;\n        }\n        x            += l;\n      }\n      // Prune white space from the end of the current line\n      var span       = line.lastChild;\n      while (span &&\n             span.className == 'ansi0 bgAnsi15' &&\n             !span.style.cssText.length) {\n        // Scan backwards looking for first non-space character\n        var s         = this.getTextContent(span);\n        for (var i = s.length; i--; ) {\n          if (s.charAt(i) != ' ' && s.charAt(i) != '\\u00A0') {\n            if (i+1 != s.length) {\n              this.setTextContent(s.substr(0, i+1));\n            }\n            span      = null;\n            break;\n          }\n        }\n        if (span) {\n          var sibling = span;\n          span        = span.previousSibling;\n          if (span) {\n            // Remove blank <span>'s from end of line\n            line.removeChild(sibling);\n          } else {\n            // Remove entire line (i.e. <div>), if empty\n            var blank = document.createElement('pre');\n            blank.style.height = this.cursorHeight + 'px';\n            this.setTextContent(blank, '\\n');\n            line.parentNode.replaceChild(blank, line);\n          }\n        }\n      }\n    }\n  }\n};\n\nVT100.prototype.putString = function(x, y, text, color, style) {\n  if (!color) {\n    color                           = 'ansi0 bgAnsi15';\n  }\n  if (!style) {\n    style                           = '';\n  }\n  var yIdx                          = y + this.numScrollbackLines;\n  var line;\n  var sibling;\n  var s;\n  var span;\n  var xPos                          = 0;\n  var console                       = this.console[this.currentScreen];\n  if (!text.length && (yIdx >= console.childNodes.length ||\n                       console.childNodes[yIdx].tagName != 'DIV')) {\n    // Positioning cursor to a blank location\n    span                            = null;\n  } else {\n    // Create missing blank lines at end of page\n    while (console.childNodes.length <= yIdx) {\n      // In order to simplify lookups, we want to make sure that each line\n      // is represented by exactly one element (and possibly a whole bunch of\n      // children).\n      // For non-blank lines, we can create a <div> containing one or more\n      // <span>s. For blank lines, this fails as browsers tend to optimize them\n      // away. But fortunately, a <pre> tag containing a newline character\n      // appears to work for all browsers (a &nbsp; would also work, but then\n      // copying from the browser window would insert superfluous spaces into\n      // the clipboard).\n      this.insertBlankLine(yIdx);\n    }\n    line                            = console.childNodes[yIdx];\n\n    // If necessary, promote blank '\\n' line to a <div> tag\n    if (line.tagName != 'DIV') {\n      var div                       = document.createElement('div');\n      div.style.height              = this.cursorHeight + 'px';\n      div.innerHTML                 = '<span></span>';\n      console.replaceChild(div, line);\n      line                          = div;\n    }\n\n    // Scan through list of <span>'s until we find the one where our text\n    // starts\n    span                            = line.firstChild;\n    var len;\n    while (span.nextSibling && xPos < x) {\n      len                           = this.getTextContent(span).length;\n      if (xPos + len > x) {\n        break;\n      }\n      xPos                         += len;\n      span                          = span.nextSibling;\n    }\n\n    if (text.length) {\n      // If current <span> is not long enough, pad with spaces or add new\n      // span\n      s                             = this.getTextContent(span);\n      var oldColor                  = span.className;\n      var oldStyle                  = span.style.cssText;\n      if (xPos + s.length < x) {\n        if (oldColor != 'ansi0 bgAnsi15' || oldStyle != '') {\n          span                      = document.createElement('span');\n          line.appendChild(span);\n          span.className            = 'ansi0 bgAnsi15';\n          span.style.cssText        = '';\n          oldColor                  = 'ansi0 bgAnsi15';\n          oldStyle                  = '';\n          xPos                     += s.length;\n          s                         = '';\n        }\n        do {\n          s                        += ' ';\n        } while (xPos + s.length < x);\n      }\n\n      // If styles do not match, create a new <span>\n      var del                       = text.length - s.length + x - xPos;\n      if (oldColor != color ||\n          (oldStyle != style && (oldStyle || style))) {\n        if (xPos == x) {\n          // Replacing text at beginning of existing <span>\n          if (text.length >= s.length) {\n            // New text is equal or longer than existing text\n            s                       = text;\n          } else {\n            // Insert new <span> before the current one, then remove leading\n            // part of existing <span>, adjust style of new <span>, and finally\n            // set its contents\n            sibling                 = document.createElement('span');\n            line.insertBefore(sibling, span);\n            this.setTextContent(span, s.substr(text.length));\n            span                    = sibling;\n            s                       = text;\n          }\n        } else {\n          // Replacing text some way into the existing <span>\n          var remainder             = s.substr(x + text.length - xPos);\n          this.setTextContent(span, s.substr(0, x - xPos));\n          xPos                      = x;\n          sibling                   = document.createElement('span');\n          if (span.nextSibling) {\n            line.insertBefore(sibling, span.nextSibling);\n            span                    = sibling;\n            if (remainder.length) {\n              sibling               = document.createElement('span');\n              sibling.className     = oldColor;\n              sibling.style.cssText = oldStyle;\n              this.setTextContent(sibling, remainder);\n              line.insertBefore(sibling, span.nextSibling);\n            }\n          } else {\n            line.appendChild(sibling);\n            span                    = sibling;\n            if (remainder.length) {\n              sibling               = document.createElement('span');\n              sibling.className     = oldColor;\n              sibling.style.cssText = oldStyle;\n              this.setTextContent(sibling, remainder);\n              line.appendChild(sibling);\n            }\n          }\n          s                         = text;\n        }\n        span.className              = color;\n        span.style.cssText          = style;\n      } else {\n        // Overwrite (partial) <span> with new text\n        s                           = s.substr(0, x - xPos) +\n          text +\n          s.substr(x + text.length - xPos);\n      }\n      this.setTextContent(span, s);\n\n\n      // Delete all subsequent <span>'s that have just been overwritten\n      sibling                       = span.nextSibling;\n      while (del > 0 && sibling) {\n        s                           = this.getTextContent(sibling);\n        len                         = s.length;\n        if (len <= del) {\n          line.removeChild(sibling);\n          del                      -= len;\n          sibling                   = span.nextSibling;\n        } else {\n          this.setTextContent(sibling, s.substr(del));\n          break;\n        }\n      }\n\n      // Merge <span> with next sibling, if styles are identical\n      if (sibling && span.className == sibling.className &&\n          span.style.cssText == sibling.style.cssText) {\n        this.setTextContent(span,\n                            this.getTextContent(span) +\n                            this.getTextContent(sibling));\n        line.removeChild(sibling);\n      }\n    }\n  }\n\n  // Position cursor\n  this.cursorX                      = x + text.length;\n  if (this.cursorX >= this.terminalWidth) {\n    this.cursorX                    = this.terminalWidth - 1;\n    if (this.cursorX < 0) {\n      this.cursorX                  = 0;\n    }\n  }\n  var pixelX                        = -1;\n  var pixelY                        = -1;\n  if (!this.cursor.style.visibility) {\n    var idx                         = this.cursorX - xPos;\n    if (span) {\n      // If we are in a non-empty line, take the cursor Y position from the\n      // other elements in this line. If dealing with broken, non-proportional\n      // fonts, this is likely to yield better results.\n      pixelY                        = span.offsetTop +\n                                      span.offsetParent.offsetTop;\n      s                             = this.getTextContent(span);\n      var nxtIdx                    = idx - s.length;\n      if (nxtIdx < 0) {\n        this.setTextContent(this.cursor, s.charAt(idx));\n        pixelX                      = span.offsetLeft +\n                                      idx*span.offsetWidth / s.length;\n      } else {\n        if (nxtIdx == 0) {\n          pixelX                    = span.offsetLeft + span.offsetWidth;\n        }\n        if (span.nextSibling) {\n          s                         = this.getTextContent(span.nextSibling);\n          this.setTextContent(this.cursor, s.charAt(nxtIdx));\n          if (pixelX < 0) {\n            pixelX                  = span.nextSibling.offsetLeft +\n                                      nxtIdx*span.offsetWidth / s.length;\n          }\n        } else {\n          this.setTextContent(this.cursor, ' ');\n        }\n      }\n    } else {\n      this.setTextContent(this.cursor, ' ');\n    }\n  }\n  if (pixelX >= 0) {\n    this.cursor.style.left          = (pixelX + (this.isIE ? 1 : 0))/\n                                      this.scale + 'px';\n  } else {\n    this.setTextContent(this.space, this.spaces(this.cursorX));\n    this.cursor.style.left          = (this.space.offsetWidth +\n                                       console.offsetLeft)/this.scale + 'px';\n  }\n  this.cursorY                      = yIdx - this.numScrollbackLines;\n  if (pixelY >= 0) {\n    this.cursor.style.top           = pixelY + 'px';\n  } else {\n    this.cursor.style.top           = yIdx*this.cursorHeight +\n                                      console.offsetTop + 'px';\n  }\n\n  if (text.length) {\n    // Merge <span> with previous sibling, if styles are identical\n    if ((sibling = span.previousSibling) &&\n        span.className == sibling.className &&\n        span.style.cssText == sibling.style.cssText) {\n      this.setTextContent(span,\n                          this.getTextContent(sibling) +\n                          this.getTextContent(span));\n      line.removeChild(sibling);\n    }\n\n    // Prune white space from the end of the current line\n    span                            = line.lastChild;\n    while (span &&\n           span.className == 'ansi0 bgAnsi15' &&\n           !span.style.cssText.length) {\n      // Scan backwards looking for first non-space character\n      s                             = this.getTextContent(span);\n      for (var i = s.length; i--; ) {\n        if (s.charAt(i) != ' ' && s.charAt(i) != '\\u00A0') {\n          if (i+1 != s.length) {\n            this.setTextContent(s.substr(0, i+1));\n          }\n          span                      = null;\n          break;\n        }\n      }\n      if (span) {\n        sibling                     = span;\n        span                        = span.previousSibling;\n        if (span) {\n          // Remove blank <span>'s from end of line\n          line.removeChild(sibling);\n        } else {\n          // Remove entire line (i.e. <div>), if empty\n          var blank                 = document.createElement('pre');\n          blank.style.height        = this.cursorHeight + 'px';\n          this.setTextContent(blank, '\\n');\n          line.parentNode.replaceChild(blank, line);\n        }\n      }\n    }\n  }\n};\n\nVT100.prototype.gotoXY = function(x, y) {\n  if (x >= this.terminalWidth) {\n    x           = this.terminalWidth - 1;\n  }\n  if (x < 0) {\n    x           = 0;\n  }\n  var minY, maxY;\n  if (this.offsetMode) {\n    minY        = this.top;\n    maxY        = this.bottom;\n  } else {\n    minY        = 0;\n    maxY        = this.terminalHeight;\n  }\n  if (y >= maxY) {\n    y           = maxY - 1;\n  }\n  if (y < minY) {\n    y           = minY;\n  }\n  this.putString(x, y, '', undefined);\n  this.needWrap = false;\n};\n\nVT100.prototype.gotoXaY = function(x, y) {\n  this.gotoXY(x, this.offsetMode ? (this.top + y) : y);\n};\n\nVT100.prototype.refreshInvertedState = function() {\n  if (this.isInverted) {\n    this.scrollable.className += ' inverted';\n  } else {\n    this.scrollable.className = this.scrollable.className.\n                                                     replace(/ *inverted/, '');\n  }\n};\n\nVT100.prototype.enableAlternateScreen = function(state) {\n  // Don't do anything, if we are already on the desired screen\n  if ((state ? 1 : 0) == this.currentScreen) {\n    // Calling the resizer is not actually necessary. But it is a good way\n    // of resetting state that might have gotten corrupted.\n    this.resizer();\n    return;\n  }\n\n  // We save the full state of the normal screen, when we switch away from it.\n  // But for the alternate screen, no saving is necessary. We always reset\n  // it when we switch to it.\n  if (state) {\n    this.saveCursor();\n  }\n\n  // Display new screen, and initialize state (the resizer does that for us).\n  this.currentScreen                                 = state ? 1 : 0;\n  this.console[1-this.currentScreen].style.display   = 'none';\n  this.console[this.currentScreen].style.display     = '';\n\n  // Select appropriate character pitch.\n  var transform                                      = this.getTransformName();\n  if (transform) {\n    if (state) {\n      // Upon enabling the alternate screen, we switch to 80 column mode. But\n      // upon returning to the regular screen, we restore the mode that was\n      // in effect previously.\n      this.console[1].style[transform]               = '';\n    }\n    var style                                        =\n                             this.console[this.currentScreen].style[transform];\n    this.cursor.style[transform]                     = style;\n    this.space.style[transform]                      = style;\n    this.scale                                       = style == '' ? 1.0:1.65;\n    if (transform == 'filter') {\n       this.console[this.currentScreen].style.width  = style == '' ? '165%':'';\n    }\n  }\n  this.resizer();\n\n  // If we switched to the alternate screen, reset it completely. Otherwise,\n  // restore the saved state.\n  if (state) {\n    this.gotoXY(0, 0);\n    this.clearRegion(0, 0, this.terminalWidth, this.terminalHeight);\n  } else {\n    this.restoreCursor();\n  }\n};\n\nVT100.prototype.hideCursor = function() {\n  var hidden = this.cursor.style.visibility == 'hidden';\n  if (!hidden) {\n    this.cursor.style.visibility = 'hidden';\n    return true;\n  }\n  return false;\n};\n\nVT100.prototype.showCursor = function(x, y) {\n  if (this.cursor.style.visibility) {\n    this.cursor.style.visibility = '';\n    this.putString(x == undefined ? this.cursorX : x,\n                   y == undefined ? this.cursorY : y,\n                   '', undefined);\n    return true;\n  }\n  return false;\n};\n\nVT100.prototype.scrollBack = function() {\n  var i                     = this.scrollable.scrollTop -\n                              this.scrollable.clientHeight;\n  this.scrollable.scrollTop = i < 0 ? 0 : i;\n};\n\nVT100.prototype.scrollFore = function() {\n  var i                     = this.scrollable.scrollTop +\n                              this.scrollable.clientHeight;\n  this.scrollable.scrollTop = i > this.numScrollbackLines *\n                                  this.cursorHeight + 1\n                              ? this.numScrollbackLines *\n                                this.cursorHeight + 1\n                              : i;\n};\n\nVT100.prototype.spaces = function(i) {\n  var s = '';\n  while (i-- > 0) {\n    s += ' ';\n  }\n  return s;\n};\n\nVT100.prototype.clearRegion = function(x, y, w, h, color, style) {\n  w         += x;\n  if (x < 0) {\n    x        = 0;\n  }\n  if (w > this.terminalWidth) {\n    w        = this.terminalWidth;\n  }\n  if ((w    -= x) <= 0) {\n    return;\n  }\n  h         += y;\n  if (y < 0) {\n    y        = 0;\n  }\n  if (h > this.terminalHeight) {\n    h        = this.terminalHeight;\n  }\n  if ((h    -= y) <= 0) {\n    return;\n  }\n\n  // Special case the situation where we clear the entire screen, and we do\n  // not have a scrollback buffer. In that case, we should just remove all\n  // child nodes.\n  if (!this.numScrollbackLines &&\n      w == this.terminalWidth && h == this.terminalHeight &&\n      (color == undefined || color == 'ansi0 bgAnsi15') && !style) {\n    var console = this.console[this.currentScreen];\n    while (console.lastChild) {\n      console.removeChild(console.lastChild);\n    }\n    this.putString(this.cursorX, this.cursorY, '', undefined);\n  } else {\n    var hidden = this.hideCursor();\n    var cx     = this.cursorX;\n    var cy     = this.cursorY;\n    var s      = this.spaces(w);\n    for (var i = y+h; i-- > y; ) {\n      this.putString(x, i, s, color, style);\n    }\n    hidden ? this.showCursor(cx, cy) : this.putString(cx, cy, '', undefined);\n  }\n};\n\nVT100.prototype.copyLineSegment = function(dX, dY, sX, sY, w) {\n  var text                            = [ ];\n  var className                       = [ ];\n  var style                           = [ ];\n  var console                         = this.console[this.currentScreen];\n  if (sY >= console.childNodes.length) {\n    text[0]                           = this.spaces(w);\n    className[0]                      = undefined;\n    style[0]                          = undefined;\n  } else {\n    var line = console.childNodes[sY];\n    if (line.tagName != 'DIV' || !line.childNodes.length) {\n      text[0]                         = this.spaces(w);\n      className[0]                    = undefined;\n      style[0]                        = undefined;\n    } else {\n      var x                           = 0;\n      for (var span = line.firstChild; span && w > 0; span = span.nextSibling){\n        var s                         = this.getTextContent(span);\n        var len                       = s.length;\n        if (x + len > sX) {\n          var o                       = sX > x ? sX - x : 0;\n          text[text.length]           = s.substr(o, w);\n          className[className.length] = span.className;\n          style[style.length]         = span.style.cssText;\n          w                          -= len - o;\n        }\n        x                            += len;\n      }\n      if (w > 0) {\n        text[text.length]             = this.spaces(w);\n        className[className.length]   = undefined;\n        style[style.length]           = undefined;\n      }\n    }\n  }\n  var hidden                          = this.hideCursor();\n  var cx                              = this.cursorX;\n  var cy                              = this.cursorY;\n  for (var i = 0; i < text.length; i++) {\n    var color;\n    if (className[i]) {\n      color                           = className[i];\n    } else {\n      color                           = 'ansi0 bgAnsi15';\n    }\n    this.putString(dX, dY - this.numScrollbackLines, text[i], color, style[i]);\n    dX                               += text[i].length;\n  }\n  hidden ? this.showCursor(cx, cy) : this.putString(cx, cy, '', undefined);\n};\n\nVT100.prototype.scrollRegion = function(x, y, w, h, incX, incY,\n                                        color, style) {\n  var left             = incX < 0 ? -incX : 0;\n  var right            = incX > 0 ?  incX : 0;\n  var up               = incY < 0 ? -incY : 0;\n  var down             = incY > 0 ?  incY : 0;\n\n  // Clip region against terminal size\n  var dontScroll       = null;\n  w                   += x;\n  if (x < left) {\n    x                  = left;\n  }\n  if (w > this.terminalWidth - right) {\n    w                  = this.terminalWidth - right;\n  }\n  if ((w              -= x) <= 0) {\n    dontScroll         = 1;\n  }\n  h                   += y;\n  if (y < up) {\n    y                  = up;\n  }\n  if (h > this.terminalHeight - down) {\n    h                  = this.terminalHeight - down;\n  }\n  if ((h              -= y) < 0) {\n    dontScroll         = 1;\n  }\n  if (!dontScroll) {\n    if (style && style.indexOf('underline')) {\n      // Different terminal emulators disagree on the attributes that\n      // are used for scrolling. The consensus seems to be, never to\n      // fill with underlined spaces. N.B. this is different from the\n      // cases when the user blanks a region. User-initiated blanking\n      // always fills with all of the current attributes.\n      style            = style.replace(/text-decoration:underline;/, '');\n    }\n\n    // Compute current scroll position\n    var scrollPos      = this.numScrollbackLines -\n                      (this.scrollable.scrollTop-1) / this.cursorHeight;\n\n    // Determine original cursor position. Hide cursor temporarily to avoid\n    // visual artifacts.\n    var hidden         = this.hideCursor();\n    var cx             = this.cursorX;\n    var cy             = this.cursorY;\n    var console        = this.console[this.currentScreen];\n\n    if (!incX && !x && w == this.terminalWidth) {\n      // Scrolling entire lines\n      if (incY < 0) {\n        // Scrolling up\n        if (!this.currentScreen && y == -incY &&\n            h == this.terminalHeight + incY) {\n          // Scrolling up with adding to the scrollback buffer. This is only\n          // possible if there are at least as many lines in the console,\n          // as the terminal is high\n          while (console.childNodes.length < this.terminalHeight) {\n            this.insertBlankLine(this.terminalHeight);\n          }\n\n          // Add new lines at bottom in order to force scrolling\n          for (var i = 0; i < y; i++) {\n            this.insertBlankLine(console.childNodes.length, color, style);\n          }\n\n          // Adjust the number of lines in the scrollback buffer by\n          // removing excess entries.\n          this.updateNumScrollbackLines();\n          while (this.numScrollbackLines >\n                 (this.currentScreen ? 0 : this.maxScrollbackLines)) {\n            console.removeChild(console.firstChild);\n            this.numScrollbackLines--;\n          }\n\n          // Mark lines in the scrollback buffer, so that they do not get\n          // printed.\n          for (var i = this.numScrollbackLines, j = -incY;\n               i-- > 0 && j-- > 0; ) {\n            console.childNodes[i].className = 'scrollback';\n          }\n        } else {\n          // Scrolling up without adding to the scrollback buffer.\n          for (var i = -incY;\n               i-- > 0 &&\n               console.childNodes.length >\n               this.numScrollbackLines + y + incY; ) {\n            console.removeChild(console.childNodes[\n                                          this.numScrollbackLines + y + incY]);\n          }\n\n          // If we used to have a scrollback buffer, then we must make sure\n          // that we add back blank lines at the bottom of the terminal.\n          // Similarly, if we are scrolling in the middle of the screen,\n          // we must add blank lines to ensure that the bottom of the screen\n          // does not move up.\n          if (this.numScrollbackLines > 0 ||\n              console.childNodes.length > this.numScrollbackLines+y+h+incY) {\n            for (var i = -incY; i-- > 0; ) {\n              this.insertBlankLine(this.numScrollbackLines + y + h + incY,\n                                   color, style);\n            }\n          }\n        }\n      } else {\n        // Scrolling down\n        for (var i = incY;\n             i-- > 0 &&\n             console.childNodes.length > this.numScrollbackLines + y + h; ) {\n          console.removeChild(console.childNodes[this.numScrollbackLines+y+h]);\n        }\n        for (var i = incY; i--; ) {\n          this.insertBlankLine(this.numScrollbackLines + y, color, style);\n        }\n      }\n    } else {\n      // Scrolling partial lines\n      if (incY <= 0) {\n        // Scrolling up or horizontally within a line\n        for (var i = y + this.numScrollbackLines;\n             i < y + this.numScrollbackLines + h;\n             i++) {\n          this.copyLineSegment(x + incX, i + incY, x, i, w);\n        }\n      } else {\n        // Scrolling down\n        for (var i = y + this.numScrollbackLines + h;\n             i-- > y + this.numScrollbackLines; ) {\n          this.copyLineSegment(x + incX, i + incY, x, i, w);\n        }\n      }\n\n      // Clear blank regions\n      if (incX > 0) {\n        this.clearRegion(x, y, incX, h, color, style);\n      } else if (incX < 0) {\n        this.clearRegion(x + w + incX, y, -incX, h, color, style);\n      }\n      if (incY > 0) {\n        this.clearRegion(x, y, w, incY, color, style);\n      } else if (incY < 0) {\n        this.clearRegion(x, y + h + incY, w, -incY, color, style);\n      }\n    }\n\n    // Reset scroll position\n    this.scrollable.scrollTop = (this.numScrollbackLines-scrollPos) *\n                                this.cursorHeight + 1;\n\n    // Move cursor back to its original position\n    hidden ? this.showCursor(cx, cy) : this.putString(cx, cy, '', undefined);\n  }\n};\n\nVT100.prototype.copy = function(selection) {\n  if (selection == undefined) {\n    selection                = this.selection();\n  }\n  this.internalClipboard     = undefined;\n  if (selection.length) {\n    try {\n      // IE\n      this.cliphelper.value  = selection;\n      this.cliphelper.select();\n      this.cliphelper.createTextRange().execCommand('copy');\n    } catch (e) {\n      this.internalClipboard = selection;\n    }\n    this.cliphelper.value    = '';\n  }\n};\n\nVT100.prototype.copyLast = function() {\n  // Opening the context menu can remove the selection. We try to prevent this\n  // from happening, but that is not possible for all browsers. So, instead,\n  // we compute the selection before showing the menu.\n  this.copy(this.lastSelection);\n};\n\nVT100.prototype.pasteFnc = function() {\n  var clipboard     = undefined;\n  if (this.internalClipboard != undefined) {\n    clipboard       = this.internalClipboard;\n  } else {\n    try {\n      this.cliphelper.value = '';\n      this.cliphelper.createTextRange().execCommand('paste');\n      clipboard     = this.cliphelper.value;\n    } catch (e) {\n    }\n  }\n  this.cliphelper.value = '';\n  if (clipboard && this.menu.style.visibility == 'hidden') {\n    return function() {\n      this.keysPressed('' + clipboard);\n    };\n  } else {\n    return undefined;\n  }\n};\n\nVT100.prototype.pasteBrowserFnc = function() {\n  var clipboard     = prompt(\"Paste into this box:\",\"\");\n  if (clipboard != undefined) {\n     return this.keysPressed('' + clipboard);\n  }\n};\n\nVT100.prototype.toggleUTF = function() {\n  this.utfEnabled   = !this.utfEnabled;\n\n  // We always persist the last value that the user selected. Not necessarily\n  // the last value that a random program requested.\n  this.utfPreferred = this.utfEnabled;\n};\n\nVT100.prototype.toggleBell = function() {\n  this.visualBell = !this.visualBell;\n};\n\nVT100.prototype.toggleSoftKeyboard = function() {\n  this.softKeyboard = !this.softKeyboard;\n  this.keyboardImage.style.visibility = this.softKeyboard ? 'visible' : '';\n};\n\nVT100.prototype.deselectKeys = function(elem) {\n  if (elem && elem.className == 'selected') {\n    elem.className = '';\n  }\n  for (elem = elem.firstChild; elem; elem = elem.nextSibling) {\n    this.deselectKeys(elem);\n  }\n};\n\nVT100.prototype.showSoftKeyboard = function() {\n  // Make sure no key is currently selected\n  this.lastSelectedKey           = undefined;\n  this.deselectKeys(this.keyboard);\n  this.isShift                   = false;\n  this.showShiftState(false);\n  this.isCtrl                    = false;\n  this.showCtrlState(false);\n  this.isAlt                     = false;\n  this.showAltState(false);\n\n  this.keyboard.style.left       = '0px';\n  this.keyboard.style.top        = '0px';\n  this.keyboard.style.width      = this.container.offsetWidth  + 'px';\n  this.keyboard.style.height     = this.container.offsetHeight + 'px';\n  this.keyboard.style.visibility = 'hidden';\n  this.keyboard.style.display    = '';\n\n  var kbd                        = this.keyboard.firstChild;\n  var scale                      = 1.0;\n  var transform                  = this.getTransformName();\n  if (transform) {\n    kbd.style[transform]         = '';\n    if (kbd.offsetWidth > 0.9 * this.container.offsetWidth) {\n      scale                      = (kbd.offsetWidth/\n                                    this.container.offsetWidth)/0.9;\n    }\n    if (kbd.offsetHeight > 0.9 * this.container.offsetHeight) {\n      scale                      = Math.max((kbd.offsetHeight/\n                                             this.container.offsetHeight)/0.9);\n    }\n    var style                    = this.getTransformStyle(transform,\n                                              scale > 1.0 ? scale : undefined);\n    kbd.style[transform]         = style;\n  }\n  if (transform == 'filter') {\n    scale                        = 1.0;\n  }\n  kbd.style.left                 = ((this.container.offsetWidth -\n                                     kbd.offsetWidth/scale)/2) + 'px';\n  kbd.style.top                  = ((this.container.offsetHeight -\n                                     kbd.offsetHeight/scale)/2) + 'px';\n\n  this.keyboard.style.visibility = 'visible';\n};\n\nVT100.prototype.hideSoftKeyboard = function() {\n  this.keyboard.style.display    = 'none';\n};\n\nVT100.prototype.toggleCursorBlinking = function() {\n  this.blinkingCursor = !this.blinkingCursor;\n};\n\nVT100.prototype.about = function() {\n  alert(\"VT100 Terminal Emulator \" + \"2.10 (revision 239)\" +\n        \"\\nCopyright 2008-2010 by Markus Gutschke\\n\" +\n        \"For more information check http://shellinabox.com\");\n};\n\nVT100.prototype.hideContextMenu = function() {\n  this.menu.style.visibility = 'hidden';\n  this.menu.style.top        = '-100px';\n  this.menu.style.left       = '-100px';\n  this.menu.style.width      = '0px';\n  this.menu.style.height     = '0px';\n};\n\nVT100.prototype.extendContextMenu = function(entries, actions) {\n};\n\nVT100.prototype.showContextMenu = function(x, y) {\n  this.menu.innerHTML         =\n    '<table class=\"popup\" ' +\n           'cellpadding=\"0\" cellspacing=\"0\">' +\n      '<tr><td>' +\n        '<ul id=\"menuentries\">' +\n          '<li id=\"beginclipboard\">Copy</li>' +\n          '<li id=\"endclipboard\">Paste</li>' +\n          '<li id=\"browserclipboard\">Paste from browser</li>' +\n          '<hr />' +\n          '<li id=\"reset\">Reset</li>' +\n          '<hr />' +\n          '<li id=\"beginconfig\">' +\n             (this.utfEnabled ? '<img src=\"/webshell/enabled.gif\" />' : '') +\n             'Unicode</li>' +\n          '<li>' +\n             (this.visualBell ? '<img src=\"/webshell/enabled.gif\" />' : '') +\n             'Visual Bell</li>'+\n          '<li>' +\n             (this.softKeyboard ? '<img src=\"/webshell/enabled.gif\" />' : '') +\n             'Onscreen Keyboard</li>' +\n          '<li id=\"endconfig\">' +\n             (this.blinkingCursor ? '<img src=\"/webshell/enabled.gif\" />' : '') +\n             'Blinking Cursor</li>'+\n          (this.usercss.firstChild ?\n           '<hr id=\"beginusercss\" />' +\n           this.usercss.innerHTML +\n           '<hr id=\"endusercss\" />' :\n           '<hr />') +\n          '<li id=\"about\">About...</li>' +\n        '</ul>' +\n      '</td></tr>' +\n    '</table>';\n\n  var popup                   = this.menu.firstChild;\n  var menuentries             = this.getChildById(popup, 'menuentries');\n\n  // Determine menu entries that should be disabled\n  this.lastSelection          = this.selection();\n  if (!this.lastSelection.length) {\n    menuentries.firstChild.className\n                              = 'disabled';\n  }\n  var p                       = this.pasteFnc();\n  if (!p) {\n    menuentries.childNodes[1].className\n                              = 'disabled';\n  }\n\n  // Actions for default items\n  var actions                 = [ this.copyLast, p, this.pasteBrowserFnc, this.reset,\n                                  this.toggleUTF, this.toggleBell,\n                                  this.toggleSoftKeyboard,\n                                  this.toggleCursorBlinking ];\n\n  // Actions for user CSS styles (if any)\n  for (var i = 0; i < this.usercssActions.length; ++i) {\n    actions[actions.length]   = this.usercssActions[i];\n  }\n  actions[actions.length]     = this.about;\n\n  // Allow subclasses to dynamically add entries to the context menu\n  this.extendContextMenu(menuentries, actions);\n\n  // Hook up event listeners\n  for (var node = menuentries.firstChild, i = 0; node;\n       node = node.nextSibling) {\n    if (node.tagName == 'LI') {\n      if (node.className != 'disabled') {\n        this.addListener(node, 'mouseover',\n                         function(vt100, node) {\n                           return function() {\n                             node.className = 'hover';\n                           }\n                         }(this, node));\n        this.addListener(node, 'mouseout',\n                         function(vt100, node) {\n                           return function() {\n                             node.className = '';\n                           }\n                         }(this, node));\n        this.addListener(node, 'mousedown',\n                         function(vt100, action) {\n                           return function(event) {\n                             vt100.hideContextMenu();\n                             action.call(vt100);\n                             vt100.storeUserSettings();\n                             return vt100.cancelEvent(event || window.event);\n                           }\n                         }(this, actions[i]));\n        this.addListener(node, 'mouseup',\n                         function(vt100) {\n                           return function(event) {\n                             return vt100.cancelEvent(event || window.event);\n                           }\n                         }(this));\n        this.addListener(node, 'mouseclick',\n                         function(vt100) {\n                           return function(event) {\n                             return vt100.cancelEvent(event || window.event);\n                           }\n                         }());\n      }\n      i++;\n    }\n  }\n\n  // Position menu next to the mouse pointer\n  this.menu.style.left        = '0px';\n  this.menu.style.top         = '0px';\n  this.menu.style.width       =  this.container.offsetWidth  + 'px';\n  this.menu.style.height      =  this.container.offsetHeight + 'px';\n  popup.style.left            = '0px';\n  popup.style.top             = '0px';\n\n  var margin                  = 2;\n  if (x + popup.clientWidth >= this.container.offsetWidth - margin) {\n    x              = this.container.offsetWidth-popup.clientWidth - margin - 1;\n  }\n  if (x < margin) {\n    x                         = margin;\n  }\n  if (y + popup.clientHeight >= this.container.offsetHeight - margin) {\n    y            = this.container.offsetHeight-popup.clientHeight - margin - 1;\n  }\n  if (y < margin) {\n    y                         = margin;\n  }\n  popup.style.left            = x + 'px';\n  popup.style.top             = y + 'px';\n\n  // Block all other interactions with the terminal emulator\n  this.addListener(this.menu, 'click', function(vt100) {\n                                         return function() {\n                                           vt100.hideContextMenu();\n                                         }\n                                       }(this));\n\n  // Show the menu\n  this.menu.style.visibility  = '';\n};\n\nVT100.prototype.keysPressed = function(ch) {\n  for (var i = 0; i < ch.length; i++) {\n    var c = ch.charCodeAt(i);\n    this.vt100(c >= 7 && c <= 15 ||\n               c == 24 || c == 26 || c == 27 || c >= 32\n               ? String.fromCharCode(c) : '<' + c + '>');\n  }\n};\n\nVT100.prototype.applyModifiers = function(ch, event) {\n  if (ch) {\n    if (event.ctrlKey) {\n      if (ch >= 32 && ch <= 127) {\n        // For historic reasons, some control characters are treated specially\n        switch (ch) {\n        case /* 3 */ 51: ch  =  27; break;\n        case /* 4 */ 52: ch  =  28; break;\n        case /* 5 */ 53: ch  =  29; break;\n        case /* 6 */ 54: ch  =  30; break;\n        case /* 7 */ 55: ch  =  31; break;\n        case /* 8 */ 56: ch  = 127; break;\n        case /* ? */ 63: ch  = 127; break;\n        default:         ch &=  31; break;\n        }\n      }\n    }\n    return String.fromCharCode(ch);\n  } else {\n    return undefined;\n  }\n};\n\nVT100.prototype.handleKey = function(event) {\n  // this.vt100('H: c=' + event.charCode + ', k=' + event.keyCode +\n  //            (event.shiftKey || event.ctrlKey || event.altKey ||\n  //             event.metaKey ? ', ' +\n  //             (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +\n  //             (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +\n  //            '\\r\\n');\n  var ch, key;\n  if (typeof event.charCode != 'undefined') {\n    // non-IE keypress events have a translated charCode value. Also, our\n    // fake events generated when receiving keydown events include this data\n    // on all browsers.\n    ch                                = event.charCode;\n    key                               = event.keyCode;\n  } else {\n    // When sending a keypress event, IE includes the translated character\n    // code in the keyCode field.\n    ch                                = event.keyCode;\n    key                               = undefined;\n  }\n\n  // Apply modifier keys (ctrl and shift)\n  if (ch) {\n    key                               = undefined;\n  }\n  ch                                  = this.applyModifiers(ch, event);\n\n  // By this point, \"ch\" is either defined and contains the character code, or\n  // it is undefined and \"key\" defines the code of a function key\n  if (ch != undefined) {\n    this.scrollable.scrollTop         = this.numScrollbackLines *\n                                        this.cursorHeight + 1;\n  } else {\n    if ((event.altKey || event.metaKey) && !event.shiftKey && !event.ctrlKey) {\n      // Many programs have difficulties dealing with parametrized escape\n      // sequences for function keys. Thus, if ALT is the only modifier\n      // key, return Emacs-style keycodes for commonly used keys.\n      switch (key) {\n      case  33: /* Page Up      */ ch = '\\u001B<';                      break;\n      case  34: /* Page Down    */ ch = '\\u001B>';                      break;\n      case  37: /* Left         */ ch = '\\u001Bb';                      break;\n      case  38: /* Up           */ ch = '\\u001Bp';                      break;\n      case  39: /* Right        */ ch = '\\u001Bf';                      break;\n      case  40: /* Down         */ ch = '\\u001Bn';                      break;\n      case  46: /* Delete       */ ch = '\\u001Bd';                      break;\n      default:                                                          break;\n      }\n    } else if (event.shiftKey && !event.ctrlKey &&\n               !event.altKey && !event.metaKey) {\n      switch (key) {\n      case  33: /* Page Up      */ this.scrollBack();                   return;\n      case  34: /* Page Down    */ this.scrollFore();                   return;\n      default:                                                          break;\n      }\n    }\n    if (ch == undefined) {\n      switch (key) {\n      case   8: /* Backspace    */ ch = '\\u007f';                       break;\n      case   9: /* Tab          */ ch = '\\u0009';                       break;\n      case  10: /* Return       */ ch = '\\u000A';                       break;\n      case  13: /* Enter        */ ch = this.crLfMode ?\n                                        '\\r\\n' : '\\r';                  break;\n      case  16: /* Shift        */                                      return;\n      case  17: /* Ctrl         */                                      return;\n      case  18: /* Alt          */                                      return;\n      case  19: /* Break        */                                      return;\n      case  20: /* Caps Lock    */                                      return;\n      case  27: /* Escape       */ ch = '\\u001B';                       break;\n      case  33: /* Page Up      */ ch = '\\u001B[5~';                    break;\n      case  34: /* Page Down    */ ch = '\\u001B[6~';                    break;\n      case  35: /* End          */ ch = '\\u001BOF';                     break;\n      case  36: /* Home         */ ch = '\\u001BOH';                     break;\n      case  37: /* Left         */ ch = this.cursorKeyMode ?\n                             '\\u001BOD' : '\\u001B[D';                   break;\n      case  38: /* Up           */ ch = this.cursorKeyMode ?\n                             '\\u001BOA' : '\\u001B[A';                   break;\n      case  39: /* Right        */ ch = this.cursorKeyMode ?\n                             '\\u001BOC' : '\\u001B[C';                   break;\n      case  40: /* Down         */ ch = this.cursorKeyMode ?\n                             '\\u001BOB' : '\\u001B[B';                   break;\n      case  45: /* Insert       */ ch = '\\u001B[2~';                    break;\n      case  46: /* Delete       */ ch = '\\u001B[3~';                    break;\n      case  91: /* Left Window  */                                      return;\n      case  92: /* Right Window */                                      return;\n      case  93: /* Select       */                                      return;\n      case  96: /* 0            */ ch = this.applyModifiers(48, event); break;\n      case  97: /* 1            */ ch = this.applyModifiers(49, event); break;\n      case  98: /* 2            */ ch = this.applyModifiers(50, event); break;\n      case  99: /* 3            */ ch = this.applyModifiers(51, event); break;\n      case 100: /* 4            */ ch = this.applyModifiers(52, event); break;\n      case 101: /* 5            */ ch = this.applyModifiers(53, event); break;\n      case 102: /* 6            */ ch = this.applyModifiers(54, event); break;\n      case 103: /* 7            */ ch = this.applyModifiers(55, event); break;\n      case 104: /* 8            */ ch = this.applyModifiers(56, event); break;\n      case 105: /* 9            */ ch = this.applyModifiers(58, event); break;\n      case 106: /* *            */ ch = this.applyModifiers(42, event); break;\n      case 107: /* +            */ ch = this.applyModifiers(43, event); break;\n      case 109: /* -            */ ch = this.applyModifiers(45, event); break;\n      case 110: /* .            */ ch = this.applyModifiers(46, event); break;\n      case 111: /* /            */ ch = this.applyModifiers(47, event); break;\n      case 112: /* F1           */ ch = '\\u001BOP';                     break;\n      case 113: /* F2           */ ch = '\\u001BOQ';                     break;\n      case 114: /* F3           */ ch = '\\u001BOR';                     break;\n      case 115: /* F4           */ ch = '\\u001BOS';                     break;\n      case 116: /* F5           */ ch = '\\u001B[15~';                   break;\n      case 117: /* F6           */ ch = '\\u001B[17~';                   break;\n      case 118: /* F7           */ ch = '\\u001B[18~';                   break;\n      case 119: /* F8           */ ch = '\\u001B[19~';                   break;\n      case 120: /* F9           */ ch = '\\u001B[20~';                   break;\n      case 121: /* F10          */ ch = '\\u001B[21~';                   break;\n      case 122: /* F11          */ ch = '\\u001B[23~';                   break;\n      case 123: /* F12          */ ch = '\\u001B[24~';                   break;\n      case 144: /* Num Lock     */                                      return;\n      case 145: /* Scroll Lock  */                                      return;\n      case 186: /* ;            */ ch = this.applyModifiers(59, event); break;\n      case 187: /* =            */ ch = this.applyModifiers(61, event); break;\n      case 188: /* ,            */ ch = this.applyModifiers(44, event); break;\n      case 189: /* -            */ ch = this.applyModifiers(45, event); break;\n      case 173: /* -            */ ch = this.applyModifiers(45, event); break; // FF15 Patch\n      case 190: /* .            */ ch = this.applyModifiers(46, event); break;\n      case 191: /* /            */ ch = this.applyModifiers(47, event); break;\n      // Conflicts with dead key \" on Swiss keyboards\n      //case 192: /* `            */ ch = this.applyModifiers(96, event); break;\n      // Conflicts with dead key \" on Swiss keyboards\n      //case 219: /* [            */ ch = this.applyModifiers(91, event); break;\n      case 220: /* \\            */ ch = this.applyModifiers(92, event); break;\n      // Conflicts with dead key ^ and ` on Swiss keaboards\n      //                         ^ and \" on French keyboards\n      //case 221: /* ]            */ ch = this.applyModifiers(93, event); break;\n      case 222: /* '            */ ch = this.applyModifiers(39, event); break;\n      default:                                                          return;\n      }\n      this.scrollable.scrollTop       = this.numScrollbackLines *\n                                        this.cursorHeight + 1;\n    }\n  }\n\n  // \"ch\" now contains the sequence of keycodes to send. But we might still\n  // have to apply the effects of modifier keys.\n  if (event.shiftKey || event.ctrlKey || event.altKey || event.metaKey) {\n    var start, digit, part1, part2;\n    if ((start = ch.substr(0, 2)) == '\\u001B[') {\n      for (part1 = start;\n           part1.length < ch.length &&\n             (digit = ch.charCodeAt(part1.length)) >= 48 && digit <= 57; ) {\n        part1                         = ch.substr(0, part1.length + 1);\n      }\n      part2                           = ch.substr(part1.length);\n      if (part1.length > 2) {\n        part1                        += ';';\n      }\n    } else if (start == '\\u001BO') {\n      part1                           = start;\n      part2                           = ch.substr(2);\n    }\n    if (part1 != undefined) {\n      ch                              = part1                                 +\n                                       ((event.shiftKey             ? 1 : 0)  +\n                                        (event.altKey|event.metaKey ? 2 : 0)  +\n                                        (event.ctrlKey              ? 4 : 0)) +\n                                        part2;\n    } else if (ch.length == 1 && (event.altKey || event.metaKey)) {\n      ch                              = '\\u001B' + ch;\n    }\n  }\n\n  if (this.menu.style.visibility == 'hidden') {\n    // this.vt100('R: c=');\n    // for (var i = 0; i < ch.length; i++)\n    //   this.vt100((i != 0 ? ', ' : '') + ch.charCodeAt(i));\n    // this.vt100('\\r\\n');\n    this.keysPressed(ch);\n  }\n};\n\nVT100.prototype.inspect = function(o, d) {\n  if (d == undefined) {\n    d       = 0;\n  }\n  var rc    = '';\n  if (typeof o == 'object' && ++d < 2) {\n    rc      = '[\\r\\n';\n    for (i in o) {\n      rc   += this.spaces(d * 2) + i + ' -> ';\n      try {\n        rc += this.inspect(o[i], d);\n      } catch (e) {\n        rc += '?' + '?' + '?\\r\\n';\n      }\n    }\n    rc     += ']\\r\\n';\n  } else {\n    rc     += ('' + o).replace(/\\n/g, ' ').replace(/ +/g,' ') + '\\r\\n';\n  }\n  return rc;\n};\n\nVT100.prototype.checkComposedKeys = function(event) {\n  // Composed keys (at least on Linux) do not generate normal events.\n  // Instead, they get entered into the text field. We normally catch\n  // this on the next keyup event.\n  var s              = this.input.value;\n  if (s.length) {\n    this.input.value = '';\n    if (this.menu.style.visibility == 'hidden') {\n      this.keysPressed(s);\n    }\n  }\n};\n\nVT100.prototype.fixEvent = function(event) {\n  // Some browsers report AltGR as a combination of ALT and CTRL. As AltGr\n  // is used as a second-level selector, clear the modifier bits before\n  // handling the event.\n  if (event.ctrlKey && event.altKey) {\n    var fake                = [ ];\n    fake.charCode           = event.charCode;\n    fake.keyCode            = event.keyCode;\n    fake.ctrlKey            = false;\n    fake.shiftKey           = event.shiftKey;\n    fake.altKey             = false;\n    fake.metaKey            = event.metaKey;\n    return fake;\n  }\n\n  // Some browsers fail to translate keys, if both shift and alt/meta is\n  // pressed at the same time. We try to translate those cases, but that\n  // only works for US keyboard layouts.\n  if (event.shiftKey) {\n    var u                   = undefined;\n    var s                   = undefined;\n    switch (this.lastNormalKeyDownEvent.keyCode) {\n    case  39: /* ' -> \" */ u = 39; s =  34; break;\n    case  44: /* , -> < */ u = 44; s =  60; break;\n    case  45: /* - -> _ */ u = 45; s =  95; break;\n    case  46: /* . -> > */ u = 46; s =  62; break;\n    case  47: /* / -> ? */ u = 47; s =  63; break;\n\n    case  48: /* 0 -> ) */ u = 48; s =  41; break;\n    case  49: /* 1 -> ! */ u = 49; s =  33; break;\n    case  50: /* 2 -> @ */ u = 50; s =  64; break;\n    case  51: /* 3 -> # */ u = 51; s =  35; break;\n    case  52: /* 4 -> $ */ u = 52; s =  36; break;\n    case  53: /* 5 -> % */ u = 53; s =  37; break;\n    case  54: /* 6 -> ^ */ u = 54; s =  94; break;\n    case  55: /* 7 -> & */ u = 55; s =  38; break;\n    case  56: /* 8 -> * */ u = 56; s =  42; break;\n    case  57: /* 9 -> ( */ u = 57; s =  40; break;\n\n    case  59: /* ; -> : */ u = 59; s =  58; break;\n    case  61: /* = -> + */ u = 61; s =  43; break;\n    case  91: /* [ -> { */ u = 91; s = 123; break;\n    case  92: /* \\ -> | */ u = 92; s = 124; break;\n    case  93: /* ] -> } */ u = 93; s = 125; break;\n    case  96: /* ` -> ~ */ u = 96; s = 126; break;\n\n    case 109: /* - -> _ */ u = 45; s =  95; break;\n    case 111: /* / -> ? */ u = 47; s =  63; break;\n\n    case 186: /* ; -> : */ u = 59; s =  58; break;\n    case 187: /* = -> + */ u = 61; s =  43; break;\n    case 188: /* , -> < */ u = 44; s =  60; break;\n    case 189: /* - -> _ */ u = 45; s =  95; break;\n    case 173: /* - -> _ */ u = 45; s =  95; break; // FF15 Patch\n    case 190: /* . -> > */ u = 46; s =  62; break;\n    case 191: /* / -> ? */ u = 47; s =  63; break;\n    case 192: /* ` -> ~ */ u = 96; s = 126; break;\n    case 219: /* [ -> { */ u = 91; s = 123; break;\n    case 220: /* \\ -> | */ u = 92; s = 124; break;\n    case 221: /* ] -> } */ u = 93; s = 125; break;\n    case 222: /* ' -> \" */ u = 39; s =  34; break;\n    default:                                break;\n    }\n    if (s && (event.charCode == u || event.charCode == 0)) {\n      var fake              = [ ];\n      fake.charCode         = s;\n      fake.keyCode          = event.keyCode;\n      fake.ctrlKey          = event.ctrlKey;\n      fake.shiftKey         = event.shiftKey;\n      fake.altKey           = event.altKey;\n      fake.metaKey          = event.metaKey;\n      return fake;\n    }\n  }\n  return event;\n};\n\nVT100.prototype.keyDown = function(event) {\n  // this.vt100('D: c=' + event.charCode + ', k=' + event.keyCode +\n  //            (event.shiftKey || event.ctrlKey || event.altKey ||\n  //             event.metaKey ? ', ' +\n  //             (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +\n  //             (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +\n  //            '\\r\\n');\n  this.checkComposedKeys(event);\n  this.lastKeyPressedEvent      = undefined;\n  this.lastKeyDownEvent         = undefined;\n  this.lastNormalKeyDownEvent   = event;\n\n  // Swiss keyboard conflicts:\n  // [ 59\n  // ] 192\n  // ' 219 (dead key)\n  // { 220\n  // ~ 221 (dead key)\n  // } 223\n  // French keyoard conflicts:\n  // ~ 50 (dead key)\n  // } 107\n  var asciiKey                  =\n    event.keyCode ==  32                         ||\n    event.keyCode >=  48 && event.keyCode <=  57 ||\n    event.keyCode >=  65 && event.keyCode <=  90;\n  var alphNumKey                =\n    asciiKey                                     ||\n    event.keyCode ==  59 ||\n    event.keyCode >=  96 && event.keyCode <= 105 ||\n    event.keyCode == 107 ||\n    event.keyCode == 192 ||\n    event.keyCode >= 219 && event.keyCode <= 221 ||\n    event.keyCode == 223 ||\n    event.keyCode == 226;\n  var normalKey                 =\n    alphNumKey                                   ||\n    event.keyCode ==  61 ||\n    event.keyCode == 106 ||\n    event.keyCode >= 109 && event.keyCode <= 111 ||\n    event.keyCode >= 186 && event.keyCode <= 191 ||\n    event.keyCode == 222 ||\n    event.keyCode == 252;\n  try {\n    if (navigator.appName == 'Konqueror') {\n      normalKey                |= event.keyCode < 128;\n    }\n  } catch (e) {\n  }\n\n  // We normally prefer to look at keypress events, as they perform the\n  // translation from keyCode to charCode. This is important, as the\n  // translation is locale-dependent.\n  // But for some keys, we must intercept them during the keydown event,\n  // as they would otherwise get interpreted by the browser.\n  // Even, when doing all of this, there are some keys that we can never\n  // intercept. This applies to some of the menu navigation keys in IE.\n  // In fact, we see them, but we cannot stop IE from seeing them, too.\n  if ((event.charCode || event.keyCode) &&\n      ((alphNumKey && (event.ctrlKey || event.altKey || event.metaKey) &&\n        !event.shiftKey &&\n        // Some browsers signal AltGR as both CTRL and ALT. Do not try to\n        // interpret this sequence ourselves, as some keyboard layouts use\n        // it for second-level layouts.\n        !(event.ctrlKey && event.altKey)) ||\n       this.catchModifiersEarly && normalKey && !alphNumKey &&\n       (event.ctrlKey || event.altKey || event.metaKey) ||\n       !normalKey)) {\n    this.lastKeyDownEvent       = event;\n    var fake                    = [ ];\n    fake.ctrlKey                = event.ctrlKey;\n    fake.shiftKey               = event.shiftKey;\n    fake.altKey                 = event.altKey;\n    fake.metaKey                = event.metaKey;\n    if (asciiKey) {\n      fake.charCode             = event.keyCode;\n      fake.keyCode              = 0;\n    } else {\n      fake.charCode             = 0;\n      fake.keyCode              = event.keyCode;\n      if (!alphNumKey && event.shiftKey) {\n        fake                    = this.fixEvent(fake);\n      }\n    }\n\n    this.handleKey(fake);\n    this.lastNormalKeyDownEvent = undefined;\n\n    try {\n      // For non-IE browsers\n      event.stopPropagation();\n      event.preventDefault();\n    } catch (e) {\n    }\n    try {\n      // For IE\n      event.cancelBubble = true;\n      event.returnValue  = false;\n      event.keyCode      = 0;\n    } catch (e) {\n    }\n\n    return false;\n  }\n  return true;\n};\n\nVT100.prototype.keyPressed = function(event) {\n  // this.vt100('P: c=' + event.charCode + ', k=' + event.keyCode +\n  //            (event.shiftKey || event.ctrlKey || event.altKey ||\n  //             event.metaKey ? ', ' +\n  //             (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +\n  //             (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +\n  //            '\\r\\n');\n  if (this.lastKeyDownEvent) {\n    // If we already processed the key on keydown, do not process it\n    // again here. Ideally, the browser should not even have generated a\n    // keypress event in this case. But that does not appear to always work.\n    this.lastKeyDownEvent     = undefined;\n  } else {\n    this.handleKey(event.altKey || event.metaKey\n                   ? this.fixEvent(event) : event);\n  }\n\n  try {\n    // For non-IE browsers\n    event.preventDefault();\n  } catch (e) {\n  }\n\n  try {\n    // For IE\n    event.cancelBubble = true;\n    event.returnValue  = false;\n    event.keyCode      = 0;\n  } catch (e) {\n  }\n\n  this.lastNormalKeyDownEvent = undefined;\n  this.lastKeyPressedEvent    = event;\n  return false;\n};\n\nVT100.prototype.keyUp = function(event) {\n  // this.vt100('U: c=' + event.charCode + ', k=' + event.keyCode +\n  //            (event.shiftKey || event.ctrlKey || event.altKey ||\n  //             event.metaKey ? ', ' +\n  //             (event.shiftKey ? 'S' : '') + (event.ctrlKey ? 'C' : '') +\n  //             (event.altKey ? 'A' : '') + (event.metaKey ? 'M' : '') : '') +\n  //            '\\r\\n');\n  if (this.lastKeyPressedEvent) {\n    // The compose key on Linux occasionally confuses the browser and keeps\n    // inserting bogus characters into the input field, even if just a regular\n    // key has been pressed. Detect this case and drop the bogus characters.\n    (event.target ||\n     event.srcElement).value      = '';\n  } else {\n    // This is usually were we notice that a key has been composed and\n    // thus failed to generate normal events.\n    this.checkComposedKeys(event);\n\n    // Some browsers don't report keypress events if ctrl or alt is pressed\n    // for non-alphanumerical keys. Patch things up for now, but in the\n    // future we will catch these keys earlier (in the keydown handler).\n    if (this.lastNormalKeyDownEvent) {\n      // this.vt100('ENABLING EARLY CATCHING OF MODIFIER KEYS\\r\\n');\n      this.catchModifiersEarly    = true;\n      var asciiKey                =\n        event.keyCode ==  32                         ||\n        // Conflicts with dead key ~ (code 50) on French keyboards\n        //event.keyCode >=  48 && event.keyCode <=  57 ||\n        event.keyCode >=  48 && event.keyCode <=  49 ||\n        event.keyCode >=  51 && event.keyCode <=  57 ||\n        event.keyCode >=  65 && event.keyCode <=  90;\n      var alphNumKey              =\n        asciiKey                                     ||\n        event.keyCode ==  50                         ||\n        event.keyCode >=  96 && event.keyCode <= 105;\n      var normalKey               =\n        alphNumKey                                   ||\n        event.keyCode ==  59 || event.keyCode ==  61 ||\n        event.keyCode == 106 || event.keyCode == 107 ||\n        event.keyCode >= 109 && event.keyCode <= 111 ||\n        event.keyCode >= 186 && event.keyCode <= 192 ||\n        event.keyCode >= 219 && event.keyCode <= 223 ||\n        event.keyCode == 252;\n      var fake                    = [ ];\n      fake.ctrlKey                = event.ctrlKey;\n      fake.shiftKey               = event.shiftKey;\n      fake.altKey                 = event.altKey;\n      fake.metaKey                = event.metaKey;\n      if (asciiKey) {\n        fake.charCode             = event.keyCode;\n        fake.keyCode              = 0;\n      } else {\n        fake.charCode             = 0;\n        fake.keyCode              = event.keyCode;\n        if (!alphNumKey && (event.ctrlKey || event.altKey || event.metaKey)) {\n          fake                    = this.fixEvent(fake);\n        }\n      }\n      this.lastNormalKeyDownEvent = undefined;\n      this.handleKey(fake);\n    }\n  }\n\n  try {\n    // For IE\n    event.cancelBubble            = true;\n    event.returnValue             = false;\n    event.keyCode                 = 0;\n  } catch (e) {\n  }\n\n  this.lastKeyDownEvent           = undefined;\n  this.lastKeyPressedEvent        = undefined;\n  return false;\n};\n\nVT100.prototype.animateCursor = function(inactive) {\n  if (!this.cursorInterval) {\n    this.cursorInterval       = setInterval(\n      function(vt100) {\n        return function() {\n          vt100.animateCursor();\n\n          // Use this opportunity to check whether the user entered a composed\n          // key, or whether somebody pasted text into the textfield.\n          vt100.checkComposedKeys();\n        }\n      }(this), 500);\n  }\n  if (inactive != undefined || this.cursor.className != 'inactive') {\n    if (inactive) {\n      this.cursor.className   = 'inactive';\n    } else {\n      if (this.blinkingCursor) {\n        this.cursor.className = this.cursor.className == 'bright'\n                                ? 'dim' : 'bright';\n      } else {\n        this.cursor.className = 'bright';\n      }\n    }\n  }\n};\n\nVT100.prototype.blurCursor = function() {\n  this.animateCursor(true);\n};\n\nVT100.prototype.focusCursor = function() {\n  this.animateCursor(false);\n};\n\nVT100.prototype.flashScreen = function() {\n  this.isInverted       = !this.isInverted;\n  this.refreshInvertedState();\n  this.isInverted       = !this.isInverted;\n  setTimeout(function(vt100) {\n               return function() {\n                 vt100.refreshInvertedState();\n               };\n             }(this), 100);\n};\n\nVT100.prototype.beep = function() {\n  if (this.visualBell) {\n    this.flashScreen();\n  } else {\n    try {\n      this.beeper.Play();\n    } catch (e) {\n      try {\n        this.beeper.src = 'beep.wav';\n      } catch (e) {\n      }\n    }\n  }\n};\n\nVT100.prototype.bs = function() {\n  if (this.cursorX > 0) {\n    this.gotoXY(this.cursorX - 1, this.cursorY);\n    this.needWrap = false;\n  }\n};\n\nVT100.prototype.ht = function(count) {\n  if (count == undefined) {\n    count        = 1;\n  }\n  var cx         = this.cursorX;\n  while (count-- > 0) {\n    while (cx++ < this.terminalWidth) {\n      var tabState = this.userTabStop[cx];\n      if (tabState == false) {\n        // Explicitly cleared tab stop\n        continue;\n      } else if (tabState) {\n        // Explicitly set tab stop\n        break;\n      } else {\n        // Default tab stop at each eighth column\n        if (cx % 8 == 0) {\n          break;\n        }\n      }\n    }\n  }\n  if (cx > this.terminalWidth - 1) {\n    cx           = this.terminalWidth - 1;\n  }\n  if (cx != this.cursorX) {\n    this.gotoXY(cx, this.cursorY);\n  }\n};\n\nVT100.prototype.rt = function(count) {\n  if (count == undefined) {\n    count          = 1 ;\n  }\n  var cx           = this.cursorX;\n  while (count-- > 0) {\n    while (cx-- > 0) {\n      var tabState = this.userTabStop[cx];\n      if (tabState == false) {\n        // Explicitly cleared tab stop\n        continue;\n      } else if (tabState) {\n        // Explicitly set tab stop\n        break;\n      } else {\n        // Default tab stop at each eighth column\n        if (cx % 8 == 0) {\n          break;\n        }\n      }\n    }\n  }\n  if (cx < 0) {\n    cx             = 0;\n  }\n  if (cx != this.cursorX) {\n    this.gotoXY(cx, this.cursorY);\n  }\n};\n\nVT100.prototype.cr = function() {\n  this.gotoXY(0, this.cursorY);\n  this.needWrap = false;\n};\n\nVT100.prototype.lf = function(count) {\n  if (count == undefined) {\n    count    = 1;\n  } else {\n    if (count > this.terminalHeight) {\n      count  = this.terminalHeight;\n    }\n    if (count < 1) {\n      count  = 1;\n    }\n  }\n  while (count-- > 0) {\n    if (this.cursorY == this.bottom - 1) {\n      this.scrollRegion(0, this.top + 1,\n                        this.terminalWidth, this.bottom - this.top - 1,\n                        0, -1, this.color, this.style);\n      offset = undefined;\n    } else if (this.cursorY < this.terminalHeight - 1) {\n      this.gotoXY(this.cursorX, this.cursorY + 1);\n    }\n  }\n};\n\nVT100.prototype.ri = function(count) {\n  if (count == undefined) {\n    count   = 1;\n  } else {\n    if (count > this.terminalHeight) {\n      count = this.terminalHeight;\n    }\n    if (count < 1) {\n      count = 1;\n    }\n  }\n  while (count-- > 0) {\n    if (this.cursorY == this.top) {\n      this.scrollRegion(0, this.top,\n                        this.terminalWidth, this.bottom - this.top - 1,\n                        0, 1, this.color, this.style);\n    } else if (this.cursorY > 0) {\n      this.gotoXY(this.cursorX, this.cursorY - 1);\n    }\n  }\n  this.needWrap = false;\n};\n\nVT100.prototype.respondID = function() {\n  this.respondString += '\\u001B[?6c';\n};\n\nVT100.prototype.respondSecondaryDA = function() {\n  this.respondString += '\\u001B[>0;0;0c';\n};\n\n\nVT100.prototype.updateStyle = function() {\n  this.style   = '';\n  if (this.attr & 0x0200 /* ATTR_UNDERLINE */) {\n    this.style = 'text-decoration: underline;';\n  }\n  var bg       = (this.attr >> 4) & 0xF;\n  var fg       =  this.attr       & 0xF;\n  if (this.attr & 0x0100 /* ATTR_REVERSE */) {\n    var tmp    = bg;\n    bg         = fg;\n    fg         = tmp;\n  }\n  if ((this.attr & (0x0100 /* ATTR_REVERSE */ | 0x0400 /* ATTR_DIM */)) == 0x0400 /* ATTR_DIM */) {\n    fg         = 8; // Dark grey\n  } else if (this.attr & 0x0800 /* ATTR_BRIGHT */) {\n    fg        |= 8;\n    this.style = 'font-weight: bold;';\n  }\n  if (this.attr & 0x1000 /* ATTR_BLINK */) {\n    this.style = 'text-decoration: blink;';\n  }\n  this.color   = 'ansi' + fg + ' bgAnsi' + bg;\n};\n\nVT100.prototype.setAttrColors = function(attr) {\n  if (attr != this.attr) {\n    this.attr = attr;\n    this.updateStyle();\n  }\n};\n\nVT100.prototype.saveCursor = function() {\n  this.savedX[this.currentScreen]     = this.cursorX;\n  this.savedY[this.currentScreen]     = this.cursorY;\n  this.savedAttr[this.currentScreen]  = this.attr;\n  this.savedUseGMap                   = this.useGMap;\n  for (var i = 0; i < 4; i++) {\n    this.savedGMap[i]                 = this.GMap[i];\n  }\n  this.savedValid[this.currentScreen] = true;\n};\n\nVT100.prototype.restoreCursor = function() {\n  if (!this.savedValid[this.currentScreen]) {\n    return;\n  }\n  this.attr      = this.savedAttr[this.currentScreen];\n  this.updateStyle();\n  this.useGMap   = this.savedUseGMap;\n  for (var i = 0; i < 4; i++) {\n    this.GMap[i] = this.savedGMap[i];\n  }\n  this.translate = this.GMap[this.useGMap];\n  this.needWrap  = false;\n  this.gotoXY(this.savedX[this.currentScreen],\n              this.savedY[this.currentScreen]);\n};\n\nVT100.prototype.getTransformName = function() {\n  var styles = [ 'transform', 'WebkitTransform', 'MozTransform', 'filter' ];\n  for (var i = 0; i < styles.length; ++i) {\n    if (typeof this.console[0].style[styles[i]] != 'undefined') {\n      return styles[i];\n    }\n  }\n  return undefined;\n};\n\nVT100.prototype.getTransformStyle = function(transform, scale) {\n  return scale && scale != 1.0\n    ? transform == 'filter'\n      ? 'progid:DXImageTransform.Microsoft.Matrix(' +\n                                 'M11=' + (1.0/scale) + ',M12=0,M21=0,M22=1,' +\n                                 \"sizingMethod='auto expand')\"\n      : 'translateX(-50%) ' +\n        'scaleX(' + (1.0/scale) + ') ' +\n        'translateX(50%)'\n    : '';\n};\n\nVT100.prototype.set80_132Mode = function(state) {\n  var transform                  = this.getTransformName();\n  if (transform) {\n    if ((this.console[this.currentScreen].style[transform] != '') == state) {\n      return;\n    }\n    var style                    = state ?\n                                   this.getTransformStyle(transform, 1.65):'';\n    this.console[this.currentScreen].style[transform] = style;\n    this.cursor.style[transform] = style;\n    this.space.style[transform]  = style;\n    this.scale                   = state ? 1.65 : 1.0;\n    if (transform == 'filter') {\n      this.console[this.currentScreen].style.width = state ? '165%' : '';\n    }\n    this.resizer();\n  }\n};\n\nVT100.prototype.setMode = function(state) {\n  for (var i = 0; i <= this.npar; i++) {\n    if (this.isQuestionMark) {\n      switch (this.par[i]) {\n      case  1: this.cursorKeyMode      = state;                      break;\n      case  3: this.set80_132Mode(state);                            break;\n      case  5: this.isInverted = state; this.refreshInvertedState(); break;\n      case  6: this.offsetMode         = state;                      break;\n      case  7: this.autoWrapMode       = state;                      break;\n      case 1000:\n      case  9: this.mouseReporting     = state;                      break;\n      case 25: this.cursorNeedsShowing = state;\n               if (state) { this.showCursor(); }\n               else       { this.hideCursor(); }                     break;\n      case 1047:\n      case 1049:\n      case 47: this.enableAlternateScreen(state);                    break;\n      default:                                                       break;\n      }\n    } else {\n      switch (this.par[i]) {\n      case  3: this.dispCtrl           = state;                      break;\n      case  4: this.insertMode         = state;                      break;\n      case  20:this.crLfMode           = state;                      break;\n      default:                                                       break;\n      }\n    }\n  }\n};\n\nVT100.prototype.statusReport = function() {\n  // Ready and operational.\n  this.respondString += '\\u001B[0n';\n};\n\nVT100.prototype.cursorReport = function() {\n  this.respondString += '\\u001B[' +\n                        (this.cursorY + (this.offsetMode ? this.top + 1 : 1)) +\n                        ';' +\n                        (this.cursorX + 1) +\n                        'R';\n};\n\nVT100.prototype.setCursorAttr = function(setAttr, xorAttr) {\n  // Changing of cursor color is not implemented.\n};\n\nVT100.prototype.openPrinterWindow = function() {\n  var rc            = true;\n  try {\n    if (!this.printWin || this.printWin.closed) {\n      this.printWin = window.open('', 'print-output',\n        'width=800,height=600,directories=no,location=no,menubar=yes,' +\n        'status=no,toolbar=no,titlebar=yes,scrollbars=yes,resizable=yes');\n      this.printWin.document.body.innerHTML =\n        '<link rel=\"stylesheet\" href=\"' +\n          document.location.protocol + '//' + document.location.host +\n          document.location.pathname.replace(/[^/]*$/, '') +\n          'print-styles.css\" type=\"text/css\">\\n' +\n        '<div id=\"options\"><input id=\"autoprint\" type=\"checkbox\"' +\n          (this.autoprint ? ' checked' : '') + '>' +\n          'Automatically, print page(s) when job is ready' +\n        '</input></div>\\n' +\n        '<div id=\"spacer\"><input type=\"checkbox\">&nbsp;</input></div>' +\n        '<pre id=\"print\"></pre>\\n';\n      var autoprint = this.printWin.document.getElementById('autoprint');\n      this.addListener(autoprint, 'click',\n                       (function(vt100, autoprint) {\n                         return function() {\n                           vt100.autoprint = autoprint.checked;\n                           vt100.storeUserSettings();\n                           return false;\n                         };\n                       })(this, autoprint));\n      this.printWin.document.title = 'ShellInABox Printer Output';\n    }\n  } catch (e) {\n    // Maybe, a popup blocker prevented us from working. Better catch the\n    // exception, so that we won't break the entire terminal session. The\n    // user probably needs to disable the blocker first before retrying the\n    // operation.\n    rc              = false;\n  }\n  rc               &= this.printWin && !this.printWin.closed &&\n                      (this.printWin.innerWidth ||\n                       this.printWin.document.documentElement.clientWidth ||\n                       this.printWin.document.body.clientWidth) > 1;\n\n  if (!rc && this.printing == 100) {\n    // Different popup blockers work differently. We try to detect a couple\n    // of common methods. And then we retry again a brief amount later, as\n    // false positives are otherwise possible. If we are sure that there is\n    // a popup blocker in effect, we alert the user to it. This is helpful\n    // as some popup blockers have minimal or no UI, and the user might not\n    // notice that they are missing the popup. In any case, we only show at\n    // most one message per print job.\n    this.printing   = true;\n    setTimeout((function(win) {\n                  return function() {\n                    if (!win || win.closed ||\n                        (win.innerWidth ||\n                         win.document.documentElement.clientWidth ||\n                         win.document.body.clientWidth) <= 1) {\n                      alert('Attempted to print, but a popup blocker ' +\n                            'prevented the printer window from opening');\n                    }\n                  };\n                })(this.printWin), 2000);\n  }\n  return rc;\n};\n\nVT100.prototype.sendToPrinter = function(s) {\n  this.openPrinterWindow();\n  try {\n    var doc   = this.printWin.document;\n    var print = doc.getElementById('print');\n    if (print.lastChild && print.lastChild.nodeName == '#text') {\n      print.lastChild.textContent += this.replaceChar(s, ' ', '\\u00A0');\n    } else {\n      print.appendChild(doc.createTextNode(this.replaceChar(s, ' ','\\u00A0')));\n    }\n  } catch (e) {\n    // There probably was a more aggressive popup blocker that prevented us\n    // from accessing the printer windows.\n  }\n};\n\nVT100.prototype.sendControlToPrinter = function(ch) {\n  // We get called whenever doControl() is active. But for the printer, we\n  // only implement a basic line printer that doesn't understand most of\n  // the escape sequences of the VT100 terminal. In fact, the only escape\n  // sequence that we really need to recognize is '^[[5i' for turning the\n  // printer off.\n  try {\n    switch (ch) {\n    case  9:\n      // HT\n      this.openPrinterWindow();\n      var doc                 = this.printWin.document;\n      var print               = doc.getElementById('print');\n      var chars               = print.lastChild &&\n                                print.lastChild.nodeName == '#text' ?\n                                print.lastChild.textContent.length : 0;\n      this.sendToPrinter(this.spaces(8 - (chars % 8)));\n      break;\n    case 10:\n      // CR\n      break;\n    case 12:\n      // FF\n      this.openPrinterWindow();\n      var pageBreak           = this.printWin.document.createElement('div');\n      pageBreak.className     = 'pagebreak';\n      pageBreak.innerHTML     = '<hr />';\n      this.printWin.document.getElementById('print').appendChild(pageBreak);\n      break;\n    case 13:\n      // LF\n      this.openPrinterWindow();\n      var lineBreak           = this.printWin.document.createElement('br');\n      this.printWin.document.getElementById('print').appendChild(lineBreak);\n      break;\n    case 27:\n      // ESC\n      this.isEsc              = 1 /* ESesc */;\n      break;\n    default:\n      switch (this.isEsc) {\n      case 1 /* ESesc */:\n        this.isEsc            = 0 /* ESnormal */;\n        switch (ch) {\n        case 0x5B /*[*/:\n          this.isEsc          = 2 /* ESsquare */;\n          break;\n        default:\n          break;\n        }\n        break;\n      case 2 /* ESsquare */:\n        this.npar             = 0;\n        this.par              = [ 0, 0, 0, 0, 0, 0, 0, 0,\n                                  0, 0, 0, 0, 0, 0, 0, 0 ];\n        this.isEsc            = 3 /* ESgetpars */;\n        this.isQuestionMark   = ch == 0x3F /*?*/;\n        if (this.isQuestionMark) {\n          break;\n        }\n        // Fall through\n      case 3 /* ESgetpars */:\n        if (ch == 0x3B /*;*/) {\n          this.npar++;\n          break;\n        } else if (ch >= 0x30 /*0*/ && ch <= 0x39 /*9*/) {\n          var par             = this.par[this.npar];\n          if (par == undefined) {\n            par               = 0;\n          }\n          this.par[this.npar] = 10*par + (ch & 0xF);\n          break;\n        } else {\n          this.isEsc          = 4 /* ESgotpars */;\n        }\n        // Fall through\n      case 4 /* ESgotpars */:\n        this.isEsc            = 0 /* ESnormal */;\n        if (this.isQuestionMark) {\n          break;\n        }\n        switch (ch) {\n        case 0x69 /*i*/:\n          this.csii(this.par[0]);\n          break;\n        default:\n          break;\n        }\n        break;\n      default:\n        this.isEsc            = 0 /* ESnormal */;\n        break;\n      }\n      break;\n    }\n  } catch (e) {\n    // There probably was a more aggressive popup blocker that prevented us\n    // from accessing the printer windows.\n  }\n};\n\nVT100.prototype.csiAt = function(number) {\n  // Insert spaces\n  if (number == 0) {\n    number      = 1;\n  }\n  if (number > this.terminalWidth - this.cursorX) {\n    number      = this.terminalWidth - this.cursorX;\n  }\n  this.scrollRegion(this.cursorX, this.cursorY,\n                    this.terminalWidth - this.cursorX - number, 1,\n                    number, 0, this.color, this.style);\n  this.needWrap = false;\n};\n\nVT100.prototype.csii = function(number) {\n  // Printer control\n  switch (number) {\n  case 0: // Print Screen\n    window.print();\n    break;\n  case 4: // Stop printing\n    try {\n      if (this.printing && this.printWin && !this.printWin.closed) {\n        var print = this.printWin.document.getElementById('print');\n        while (print.lastChild &&\n               print.lastChild.tagName == 'DIV' &&\n               print.lastChild.className == 'pagebreak') {\n          // Remove trailing blank pages\n          print.removeChild(print.lastChild);\n        }\n        if (this.autoprint) {\n          this.printWin.print();\n        }\n      }\n    } catch (e) {\n    }\n    this.printing = false;\n    break;\n  case 5: // Start printing\n    if (!this.printing && this.printWin && !this.printWin.closed) {\n      this.printWin.document.getElementById('print').innerHTML = '';\n    }\n    this.printing = 100;\n    break;\n  default:\n    break;\n  }\n};\n\nVT100.prototype.csiJ = function(number) {\n  switch (number) {\n  case 0: // Erase from cursor to end of display\n    this.clearRegion(this.cursorX, this.cursorY,\n                     this.terminalWidth - this.cursorX, 1,\n                     this.color, this.style);\n    if (this.cursorY < this.terminalHeight-2) {\n      this.clearRegion(0, this.cursorY+1,\n                       this.terminalWidth, this.terminalHeight-this.cursorY-1,\n                       this.color, this.style);\n    }\n    break;\n  case 1: // Erase from start to cursor\n    if (this.cursorY > 0) {\n      this.clearRegion(0, 0,\n                       this.terminalWidth, this.cursorY,\n                       this.color, this.style);\n    }\n    this.clearRegion(0, this.cursorY, this.cursorX + 1, 1,\n                     this.color, this.style);\n    break;\n  case 2: // Erase whole display\n    this.clearRegion(0, 0, this.terminalWidth, this.terminalHeight,\n                     this.color, this.style);\n    break;\n  default:\n    return;\n  }\n  needWrap = false;\n};\n\nVT100.prototype.csiK = function(number) {\n  switch (number) {\n  case 0: // Erase from cursor to end of line\n    this.clearRegion(this.cursorX, this.cursorY,\n                     this.terminalWidth - this.cursorX, 1,\n                     this.color, this.style);\n    break;\n  case 1: // Erase from start of line to cursor\n    this.clearRegion(0, this.cursorY, this.cursorX + 1, 1,\n                     this.color, this.style);\n    break;\n  case 2: // Erase whole line\n    this.clearRegion(0, this.cursorY, this.terminalWidth, 1,\n                     this.color, this.style);\n    break;\n  default:\n    return;\n  }\n  needWrap = false;\n};\n\nVT100.prototype.csiL = function(number) {\n  // Open line by inserting blank line(s)\n  if (this.cursorY >= this.bottom) {\n    return;\n  }\n  if (number == 0) {\n    number = 1;\n  }\n  if (number > this.bottom - this.cursorY) {\n    number = this.bottom - this.cursorY;\n  }\n  this.scrollRegion(0, this.cursorY,\n                    this.terminalWidth, this.bottom - this.cursorY - number,\n                    0, number, this.color, this.style);\n  needWrap = false;\n};\n\nVT100.prototype.csiM = function(number) {\n  // Delete line(s), scrolling up the bottom of the screen.\n  if (this.cursorY >= this.bottom) {\n    return;\n  }\n  if (number == 0) {\n    number = 1;\n  }\n  if (number > this.bottom - this.cursorY) {\n    number = bottom - cursorY;\n  }\n  this.scrollRegion(0, this.cursorY + number,\n                    this.terminalWidth, this.bottom - this.cursorY - number,\n                    0, -number, this.color, this.style);\n  needWrap = false;\n};\n\nVT100.prototype.csim = function() {\n  for (var i = 0; i <= this.npar; i++) {\n    switch (this.par[i]) {\n    case 0:  this.attr  = 0x00F0 /* ATTR_DEFAULT */;                                break;\n    case 1:  this.attr  = (this.attr & ~0x0400 /* ATTR_DIM */)|0x0800 /* ATTR_BRIGHT */;         break;\n    case 2:  this.attr  = (this.attr & ~0x0800 /* ATTR_BRIGHT */)|0x0400 /* ATTR_DIM */;         break;\n    case 4:  this.attr |= 0x0200 /* ATTR_UNDERLINE */;                              break;\n    case 5:  this.attr |= 0x1000 /* ATTR_BLINK */;                                  break;\n    case 7:  this.attr |= 0x0100 /* ATTR_REVERSE */;                                break;\n    case 10:\n      this.translate    = this.GMap[this.useGMap];\n      this.dispCtrl     = false;\n      this.toggleMeta   = false;\n      break;\n    case 11:\n      this.translate    = this.CodePage437Map;\n      this.dispCtrl     = true;\n      this.toggleMeta   = false;\n      break;\n    case 12:\n      this.translate    = this.CodePage437Map;\n      this.dispCtrl     = true;\n      this.toggleMeta   = true;\n      break;\n    case 21:\n    case 22: this.attr &= ~(0x0800 /* ATTR_BRIGHT */|0x0400 /* ATTR_DIM */);                     break;\n    case 24: this.attr &= ~ 0x0200 /* ATTR_UNDERLINE */;                            break;\n    case 25: this.attr &= ~ 0x1000 /* ATTR_BLINK */;                                break;\n    case 27: this.attr &= ~ 0x0100 /* ATTR_REVERSE */;                              break;\n    case 38: this.attr  = (this.attr & ~(0x0400 /* ATTR_DIM */|0x0800 /* ATTR_BRIGHT */|0x0F))|\n                          0x0200 /* ATTR_UNDERLINE */;                              break;\n    case 39: this.attr &= ~(0x0400 /* ATTR_DIM */|0x0800 /* ATTR_BRIGHT */|0x0200 /* ATTR_UNDERLINE */|0x0F); break;\n    case 49: this.attr |= 0xF0;                                        break;\n    default:\n      if (this.par[i] >= 30 && this.par[i] <= 37) {\n          var fg        = this.par[i] - 30;\n          this.attr     = (this.attr & ~0x0F) | fg;\n      } else if (this.par[i] >= 40 && this.par[i] <= 47) {\n          var bg        = this.par[i] - 40;\n          this.attr     = (this.attr & ~0xF0) | (bg << 4);\n      }\n      break;\n    }\n  }\n  this.updateStyle();\n};\n\nVT100.prototype.csiP = function(number) {\n  // Delete character(s) following cursor\n  if (number == 0) {\n    number = 1;\n  }\n  if (number > this.terminalWidth - this.cursorX) {\n    number = this.terminalWidth - this.cursorX;\n  }\n  this.scrollRegion(this.cursorX + number, this.cursorY,\n                    this.terminalWidth - this.cursorX - number, 1,\n                    -number, 0, this.color, this.style);\n  needWrap = false;\n};\n\nVT100.prototype.csiX = function(number) {\n  // Clear characters following cursor\n  if (number == 0) {\n    number++;\n  }\n  if (number > this.terminalWidth - this.cursorX) {\n    number = this.terminalWidth - this.cursorX;\n  }\n  this.clearRegion(this.cursorX, this.cursorY, number, 1,\n                   this.color, this.style);\n  needWrap = false;\n};\n\nVT100.prototype.settermCommand = function() {\n  // Setterm commands are not implemented\n};\n\nVT100.prototype.doControl = function(ch) {\n  if (this.printing) {\n    this.sendControlToPrinter(ch);\n    return '';\n  }\n  var lineBuf                = '';\n  switch (ch) {\n  case 0x00: /* ignored */                                              break;\n  case 0x08: this.bs();                                                 break;\n  case 0x09: this.ht();                                                 break;\n  case 0x0A:\n  case 0x0B:\n  case 0x0C:\n  case 0x84: this.lf(); if (!this.crLfMode)                             break;\n  case 0x0D: this.cr();                                                 break;\n  case 0x85: this.cr(); this.lf();                                      break;\n  case 0x0E: this.useGMap     = 1;\n             this.translate   = this.GMap[1];\n             this.dispCtrl    = true;                                   break;\n  case 0x0F: this.useGMap     = 0;\n             this.translate   = this.GMap[0];\n             this.dispCtrl    = false;                                  break;\n  case 0x18:\n  case 0x1A: this.isEsc       = 0 /* ESnormal */;                               break;\n  case 0x1B: this.isEsc       = 1 /* ESesc */;                                  break;\n  case 0x7F: /* ignored */                                              break;\n  case 0x88: this.userTabStop[this.cursorX] = true;                     break;\n  case 0x8D: this.ri();                                                 break;\n  case 0x8E: this.isEsc       = 18 /* ESss2 */;                                  break;\n  case 0x8F: this.isEsc       = 19 /* ESss3 */;                                  break;\n  case 0x9A: this.respondID();                                          break;\n  case 0x9B: this.isEsc       = 2 /* ESsquare */;                               break;\n  case 0x07: if (this.isEsc != 17 /* EStitle */) {\n               this.beep();                                             break;\n             }\n             /* fall thru */\n  default:   switch (this.isEsc) {\n    case 1 /* ESesc */:\n      this.isEsc              = 0 /* ESnormal */;\n      switch (ch) {\n/*%*/ case 0x25: this.isEsc   = 13 /* ESpercent */;                              break;\n/*(*/ case 0x28: this.isEsc   = 8 /* ESsetG0 */;                                break;\n/*-*/ case 0x2D:\n/*)*/ case 0x29: this.isEsc   = 9 /* ESsetG1 */;                                break;\n/*.*/ case 0x2E:\n/***/ case 0x2A: this.isEsc   = 10 /* ESsetG2 */;                                break;\n/*/*/ case 0x2F:\n/*+*/ case 0x2B: this.isEsc   = 11 /* ESsetG3 */;                                break;\n/*#*/ case 0x23: this.isEsc   = 7 /* EShash */;                                 break;\n/*7*/ case 0x37: this.saveCursor();                                     break;\n/*8*/ case 0x38: this.restoreCursor();                                  break;\n/*>*/ case 0x3E: this.applKeyMode = false;                              break;\n/*=*/ case 0x3D: this.applKeyMode = true;                               break;\n/*D*/ case 0x44: this.lf();                                             break;\n/*E*/ case 0x45: this.cr(); this.lf();                                  break;\n/*M*/ case 0x4D: this.ri();                                             break;\n/*N*/ case 0x4E: this.isEsc   = 18 /* ESss2 */;                                  break;\n/*O*/ case 0x4F: this.isEsc   = 19 /* ESss3 */;                                  break;\n/*H*/ case 0x48: this.userTabStop[this.cursorX] = true;                 break;\n/*Z*/ case 0x5A: this.respondID();                                      break;\n/*[*/ case 0x5B: this.isEsc   = 2 /* ESsquare */;                               break;\n/*]*/ case 0x5D: this.isEsc   = 15 /* ESnonstd */;                               break;\n/*c*/ case 0x63: this.reset();                                          break;\n/*g*/ case 0x67: this.flashScreen();                                    break;\n      default:                                                          break;\n      }\n      break;\n    case 15 /* ESnonstd */:\n      switch (ch) {\n/*0*/ case 0x30:\n/*1*/ case 0x31:\n/*2*/ case 0x32: this.isEsc   = 17 /* EStitle */; this.titleString = '';         break;\n/*P*/ case 0x50: this.npar    = 0; this.par = [ 0, 0, 0, 0, 0, 0, 0 ];\n                 this.isEsc   = 16 /* ESpalette */;                              break;\n/*R*/ case 0x52: // Palette support is not implemented\n                 this.isEsc   = 0 /* ESnormal */;                               break;\n      default:   this.isEsc   = 0 /* ESnormal */;                               break;\n      }\n      break;\n    case 16 /* ESpalette */:\n      if ((ch >= 0x30 /*0*/ && ch <= 0x39 /*9*/) ||\n          (ch >= 0x41 /*A*/ && ch <= 0x46 /*F*/) ||\n          (ch >= 0x61 /*a*/ && ch <= 0x66 /*f*/)) {\n        this.par[this.npar++] = ch > 0x39  /*9*/ ? (ch & 0xDF) - 55\n                                                : (ch & 0xF);\n        if (this.npar == 7) {\n          // Palette support is not implemented\n          this.isEsc          = 0 /* ESnormal */;\n        }\n      } else {\n        this.isEsc            = 0 /* ESnormal */;\n      }\n      break;\n    case 2 /* ESsquare */:\n      this.npar               = 0;\n      this.par                = [ 0, 0, 0, 0, 0, 0, 0, 0,\n                                  0, 0, 0, 0, 0, 0, 0, 0 ];\n      this.isEsc              = 3 /* ESgetpars */;\n/*[*/ if (ch == 0x5B) { // Function key\n        this.isEsc            = 6 /* ESfunckey */;\n        break;\n      } else {\n/*?*/   this.isQuestionMark   = ch == 0x3F;\n        if (this.isQuestionMark) {\n          break;\n        }\n      }\n      // Fall through\n    case 5 /* ESdeviceattr */:\n    case 3 /* ESgetpars */:\n/*;*/ if (ch == 0x3B) {\n        this.npar++;\n        break;\n      } else if (ch >= 0x30 /*0*/ && ch <= 0x39 /*9*/) {\n        var par               = this.par[this.npar];\n        if (par == undefined) {\n          par                 = 0;\n        }\n        this.par[this.npar]   = 10*par + (ch & 0xF);\n        break;\n      } else if (this.isEsc == 5 /* ESdeviceattr */) {\n        switch (ch) {\n/*c*/   case 0x63: if (this.par[0] == 0) this.respondSecondaryDA();     break;\n/*m*/   case 0x6D: /* (re)set key modifier resource values */           break;\n/*n*/   case 0x6E: /* disable key modifier resource values */           break;\n/*p*/   case 0x70: /* set pointer mode resource value */                break;\n        default:                                                        break;\n        }\n        this.isEsc            = 0 /* ESnormal */;\n        break;\n      } else {\n        this.isEsc            = 4 /* ESgotpars */;\n      }\n      // Fall through\n    case 4 /* ESgotpars */:\n      this.isEsc              = 0 /* ESnormal */;\n      if (this.isQuestionMark) {\n        switch (ch) {\n/*h*/   case 0x68: this.setMode(true);                                  break;\n/*l*/   case 0x6C: this.setMode(false);                                 break;\n/*c*/   case 0x63: this.setCursorAttr(this.par[2], this.par[1]);        break;\n        default:                                                        break;\n        }\n        this.isQuestionMark   = false;\n        break;\n      }\n      switch (ch) {\n/*!*/ case 0x21: this.isEsc   = 12 /* ESbang */;                                 break;\n/*>*/ case 0x3E: if (!this.npar) this.isEsc  = 5 /* ESdeviceattr */;            break;\n/*G*/ case 0x47:\n/*`*/ case 0x60: this.gotoXY(this.par[0] - 1, this.cursorY);            break;\n/*A*/ case 0x41: this.gotoXY(this.cursorX,\n                             this.cursorY - (this.par[0] ? this.par[0] : 1));\n                                                                        break;\n/*B*/ case 0x42:\n/*e*/ case 0x65: this.gotoXY(this.cursorX,\n                             this.cursorY + (this.par[0] ? this.par[0] : 1));\n                                                                        break;\n/*C*/ case 0x43:\n/*a*/ case 0x61: this.gotoXY(this.cursorX + (this.par[0] ? this.par[0] : 1),\n                             this.cursorY);                             break;\n/*D*/ case 0x44: this.gotoXY(this.cursorX - (this.par[0] ? this.par[0] : 1),\n                             this.cursorY);                             break;\n/*E*/ case 0x45: this.gotoXY(0, this.cursorY + (this.par[0] ? this.par[0] :1));\n                                                                        break;\n/*F*/ case 0x46: this.gotoXY(0, this.cursorY - (this.par[0] ? this.par[0] :1));\n                                                                        break;\n/*d*/ case 0x64: this.gotoXaY(this.cursorX, this.par[0] - 1);           break;\n/*H*/ case 0x48:\n/*f*/ case 0x66: this.gotoXaY(this.par[1] - 1, this.par[0] - 1);        break;\n/*I*/ case 0x49: this.ht(this.par[0] ? this.par[0] : 1);                break;\n/*@*/ case 0x40: this.csiAt(this.par[0]);                               break;\n/*i*/ case 0x69: this.csii(this.par[0]);                                break;\n/*J*/ case 0x4A: this.csiJ(this.par[0]);                                break;\n/*K*/ case 0x4B: this.csiK(this.par[0]);                                break;\n/*L*/ case 0x4C: this.csiL(this.par[0]);                                break;\n/*M*/ case 0x4D: this.csiM(this.par[0]);                                break;\n/*m*/ case 0x6D: this.csim();                                           break;\n/*P*/ case 0x50: this.csiP(this.par[0]);                                break;\n/*X*/ case 0x58: this.csiX(this.par[0]);                                break;\n/*S*/ case 0x53: this.lf(this.par[0] ? this.par[0] : 1);                break;\n/*T*/ case 0x54: this.ri(this.par[0] ? this.par[0] : 1);                break;\n/*c*/ case 0x63: if (!this.par[0]) this.respondID();                    break;\n/*g*/ case 0x67: if (this.par[0] == 0) {\n                   this.userTabStop[this.cursorX] = false;\n                 } else if (this.par[0] == 2 || this.par[0] == 3) {\n                   this.userTabStop               = [ ];\n                   for (var i = 0; i < this.terminalWidth; i++) {\n                     this.userTabStop[i]          = false;\n                   }\n                 }\n                 break;\n/*h*/ case 0x68: this.setMode(true);                                    break;\n/*l*/ case 0x6C: this.setMode(false);                                   break;\n/*n*/ case 0x6E: switch (this.par[0]) {\n                 case 5: this.statusReport();                           break;\n                 case 6: this.cursorReport();                           break;\n                 default:                                               break;\n                 }\n                 break;\n/*q*/ case 0x71: // LED control not implemented\n                                                                        break;\n/*r*/ case 0x72: var t        = this.par[0] ? this.par[0] : 1;\n                 var b        = this.par[1] ? this.par[1]\n                                            : this.terminalHeight;\n                 if (t < b && b <= this.terminalHeight) {\n                   this.top   = t - 1;\n                   this.bottom= b;\n                   this.gotoXaY(0, 0);\n                 }\n                 break;\n/*b*/ case 0x62: var c        = this.par[0] ? this.par[0] : 1;\n                 if (c > this.terminalWidth * this.terminalHeight) {\n                   c          = this.terminalWidth * this.terminalHeight;\n                 }\n                 while (c-- > 0) {\n                   lineBuf   += this.lastCharacter;\n                 }\n                 break;\n/*s*/ case 0x73: this.saveCursor();                                     break;\n/*u*/ case 0x75: this.restoreCursor();                                  break;\n/*Z*/ case 0x5A: this.rt(this.par[0] ? this.par[0] : 1);                break;\n/*]*/ case 0x5D: this.settermCommand();                                 break;\n      default:                                                          break;\n      }\n      break;\n    case 12 /* ESbang */:\n      if (ch == 'p') {\n        this.reset();\n      }\n      this.isEsc              = 0 /* ESnormal */;\n      break;\n    case 13 /* ESpercent */:\n      this.isEsc              = 0 /* ESnormal */;\n      switch (ch) {\n/*@*/ case 0x40: this.utfEnabled = false;                               break;\n/*G*/ case 0x47:\n/*8*/ case 0x38: this.utfEnabled = true;                                break;\n      default:                                                          break;\n      }\n      break;\n    case 6 /* ESfunckey */:\n      this.isEsc              = 0 /* ESnormal */;                               break;\n    case 7 /* EShash */:\n      this.isEsc              = 0 /* ESnormal */;\n/*8*/ if (ch == 0x38) {\n        // Screen alignment test not implemented\n      }\n      break;\n    case 8 /* ESsetG0 */:\n    case 9 /* ESsetG1 */:\n    case 10 /* ESsetG2 */:\n    case 11 /* ESsetG3 */:\n      var g                   = this.isEsc - 8 /* ESsetG0 */;\n      this.isEsc              = 0 /* ESnormal */;\n      switch (ch) {\n/*0*/ case 0x30: this.GMap[g] = this.VT100GraphicsMap;                  break;\n/*A*/ case 0x42:\n/*B*/ case 0x42: this.GMap[g] = this.Latin1Map;                         break;\n/*U*/ case 0x55: this.GMap[g] = this.CodePage437Map;                    break;\n/*K*/ case 0x4B: this.GMap[g] = this.DirectToFontMap;                   break;\n      default:                                                          break;\n      }\n      if (this.useGMap == g) {\n        this.translate        = this.GMap[g];\n      }\n      break;\n    case 17 /* EStitle */:\n      if (ch == 0x07) {\n        if (this.titleString && this.titleString.charAt(0) == ';') {\n          this.titleString    = this.titleString.substr(1);\n          if (this.titleString != '') {\n            this.titleString += ' - ';\n          }\n          this.titleString += 'Shell In A Box'\n        }\n        try {\n          window.document.title = this.titleString;\n        } catch (e) {\n        }\n        this.isEsc            = 0 /* ESnormal */;\n      } else {\n        this.titleString     += String.fromCharCode(ch);\n      }\n      break;\n    case 18 /* ESss2 */:\n    case 19 /* ESss3 */:\n      if (ch < 256) {\n          ch                  = this.GMap[this.isEsc - 18 /* ESss2 */ + 2]\n                                         [this.toggleMeta ? (ch | 0x80) : ch];\n        if ((ch & 0xFF00) == 0xF000) {\n          ch                  = ch & 0xFF;\n        } else if (ch == 0xFEFF || (ch >= 0x200A && ch <= 0x200F)) {\n          this.isEsc         = 0 /* ESnormal */;                                break;\n        }\n      }\n      this.lastCharacter      = String.fromCharCode(ch);\n      lineBuf                += this.lastCharacter;\n      this.isEsc              = 0 /* ESnormal */;                               break;\n    default:\n      this.isEsc              = 0 /* ESnormal */;                               break;\n    }\n    break;\n  }\n  return lineBuf;\n};\n\nVT100.prototype.renderString = function(s, showCursor) {\n  if (this.printing) {\n    this.sendToPrinter(s);\n    if (showCursor) {\n      this.showCursor();\n    }\n    return;\n  }\n\n  // We try to minimize the number of DOM operations by coalescing individual\n  // characters into strings. This is a significant performance improvement.\n  var incX = s.length;\n  if (incX > this.terminalWidth - this.cursorX) {\n    incX   = this.terminalWidth - this.cursorX;\n    if (incX <= 0) {\n      return;\n    }\n    s      = s.substr(0, incX - 1) + s.charAt(s.length - 1);\n  }\n  if (showCursor) {\n    // Minimize the number of calls to putString(), by avoiding a direct\n    // call to this.showCursor()\n    this.cursor.style.visibility = '';\n  }\n  this.putString(this.cursorX, this.cursorY, s, this.color, this.style);\n};\n\nVT100.prototype.vt100 = function(s) {\n  this.cursorNeedsShowing = this.hideCursor();\n  this.respondString      = '';\n  var lineBuf             = '';\n  for (var i = 0; i < s.length; i++) {\n    var ch = s.charCodeAt(i);\n    if (this.utfEnabled) {\n      // Decode UTF8 encoded character\n      if (ch > 0x7F) {\n        if (this.utfCount > 0 && (ch & 0xC0) == 0x80) {\n          this.utfChar    = (this.utfChar << 6) | (ch & 0x3F);\n          if (--this.utfCount <= 0) {\n            if (this.utfChar > 0xFFFF || this.utfChar < 0) {\n              ch = 0xFFFD;\n            } else {\n              ch          = this.utfChar;\n            }\n          } else {\n            continue;\n          }\n        } else {\n          if ((ch & 0xE0) == 0xC0) {\n            this.utfCount = 1;\n            this.utfChar  = ch & 0x1F;\n          } else if ((ch & 0xF0) == 0xE0) {\n            this.utfCount = 2;\n            this.utfChar  = ch & 0x0F;\n          } else if ((ch & 0xF8) == 0xF0) {\n            this.utfCount = 3;\n            this.utfChar  = ch & 0x07;\n          } else if ((ch & 0xFC) == 0xF8) {\n            this.utfCount = 4;\n            this.utfChar  = ch & 0x03;\n          } else if ((ch & 0xFE) == 0xFC) {\n            this.utfCount = 5;\n            this.utfChar  = ch & 0x01;\n          } else {\n            this.utfCount = 0;\n          }\n          continue;\n        }\n      } else {\n        this.utfCount     = 0;\n      }\n    }\n    var isNormalCharacter =\n      (ch >= 32 && ch <= 127 || ch >= 160 ||\n       this.utfEnabled && ch >= 128 ||\n       !(this.dispCtrl ? this.ctrlAlways : this.ctrlAction)[ch & 0x1F]) &&\n      (ch != 0x7F || this.dispCtrl);\n\n    if (isNormalCharacter && this.isEsc == 0 /* ESnormal */) {\n      if (ch < 256) {\n        ch                = this.translate[this.toggleMeta ? (ch | 0x80) : ch];\n      }\n      if ((ch & 0xFF00) == 0xF000) {\n        ch                = ch & 0xFF;\n      } else if (ch == 0xFEFF || (ch >= 0x200A && ch <= 0x200F)) {\n        continue;\n      }\n      if (!this.printing) {\n        if (this.needWrap || this.insertMode) {\n          if (lineBuf) {\n            this.renderString(lineBuf);\n            lineBuf       = '';\n          }\n        }\n        if (this.needWrap) {\n          this.cr(); this.lf();\n        }\n        if (this.insertMode) {\n          this.scrollRegion(this.cursorX, this.cursorY,\n                            this.terminalWidth - this.cursorX - 1, 1,\n                            1, 0, this.color, this.style);\n        }\n      }\n      this.lastCharacter  = String.fromCharCode(ch);\n      lineBuf            += this.lastCharacter;\n      if (!this.printing &&\n          this.cursorX + lineBuf.length >= this.terminalWidth) {\n        this.needWrap     = this.autoWrapMode;\n      }\n    } else {\n      if (lineBuf) {\n        this.renderString(lineBuf);\n        lineBuf           = '';\n      }\n      var expand          = this.doControl(ch);\n      if (expand.length) {\n        var r             = this.respondString;\n        this.respondString= r + this.vt100(expand);\n      }\n    }\n  }\n  if (lineBuf) {\n    this.renderString(lineBuf, this.cursorNeedsShowing);\n  } else if (this.cursorNeedsShowing) {\n    this.showCursor();\n  }\n  return this.respondString;\n};\n\nVT100.prototype.Latin1Map = [\n0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,\n0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,\n0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,\n0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,\n0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,\n0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,\n0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,\n0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,\n0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,\n0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,\n0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,\n0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,\n0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,\n0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,\n0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,\n0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x007F,\n0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,\n0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,\n0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,\n0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,\n0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,\n0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,\n0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7,\n0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,\n0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00C6, 0x00C7,\n0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF,\n0x00D0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7,\n0x00D8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00DE, 0x00DF,\n0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7,\n0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF,\n0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7,\n0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF\n];\n\nVT100.prototype.VT100GraphicsMap = [\n0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,\n0x0008, 0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x000E, 0x000F,\n0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,\n0x0018, 0x0019, 0x001A, 0x001B, 0x001C, 0x001D, 0x001E, 0x001F,\n0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,\n0x0028, 0x0029, 0x002A, 0x2192, 0x2190, 0x2191, 0x2193, 0x002F,\n0x2588, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,\n0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,\n0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,\n0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,\n0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,\n0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x00A0,\n0x25C6, 0x2592, 0x2409, 0x240C, 0x240D, 0x240A, 0x00B0, 0x00B1,\n0x2591, 0x240B, 0x2518, 0x2510, 0x250C, 0x2514, 0x253C, 0xF800,\n0xF801, 0x2500, 0xF803, 0xF804, 0x251C, 0x2524, 0x2534, 0x252C,\n0x2502, 0x2264, 0x2265, 0x03C0, 0x2260, 0x00A3, 0x00B7, 0x007F,\n0x0080, 0x0081, 0x0082, 0x0083, 0x0084, 0x0085, 0x0086, 0x0087,\n0x0088, 0x0089, 0x008A, 0x008B, 0x008C, 0x008D, 0x008E, 0x008F,\n0x0090, 0x0091, 0x0092, 0x0093, 0x0094, 0x0095, 0x0096, 0x0097,\n0x0098, 0x0099, 0x009A, 0x009B, 0x009C, 0x009D, 0x009E, 0x009F,\n0x00A0, 0x00A1, 0x00A2, 0x00A3, 0x00A4, 0x00A5, 0x00A6, 0x00A7,\n0x00A8, 0x00A9, 0x00AA, 0x00AB, 0x00AC, 0x00AD, 0x00AE, 0x00AF,\n0x00B0, 0x00B1, 0x00B2, 0x00B3, 0x00B4, 0x00B5, 0x00B6, 0x00B7,\n0x00B8, 0x00B9, 0x00BA, 0x00BB, 0x00BC, 0x00BD, 0x00BE, 0x00BF,\n0x00C0, 0x00C1, 0x00C2, 0x00C3, 0x00C4, 0x00C5, 0x00C6, 0x00C7,\n0x00C8, 0x00C9, 0x00CA, 0x00CB, 0x00CC, 0x00CD, 0x00CE, 0x00CF,\n0x00D0, 0x00D1, 0x00D2, 0x00D3, 0x00D4, 0x00D5, 0x00D6, 0x00D7,\n0x00D8, 0x00D9, 0x00DA, 0x00DB, 0x00DC, 0x00DD, 0x00DE, 0x00DF,\n0x00E0, 0x00E1, 0x00E2, 0x00E3, 0x00E4, 0x00E5, 0x00E6, 0x00E7,\n0x00E8, 0x00E9, 0x00EA, 0x00EB, 0x00EC, 0x00ED, 0x00EE, 0x00EF,\n0x00F0, 0x00F1, 0x00F2, 0x00F3, 0x00F4, 0x00F5, 0x00F6, 0x00F7,\n0x00F8, 0x00F9, 0x00FA, 0x00FB, 0x00FC, 0x00FD, 0x00FE, 0x00FF\n];\n\nVT100.prototype.CodePage437Map = [\n0x0000, 0x263A, 0x263B, 0x2665, 0x2666, 0x2663, 0x2660, 0x2022,\n0x25D8, 0x25CB, 0x25D9, 0x2642, 0x2640, 0x266A, 0x266B, 0x263C,\n0x25B6, 0x25C0, 0x2195, 0x203C, 0x00B6, 0x00A7, 0x25AC, 0x21A8,\n0x2191, 0x2193, 0x2192, 0x2190, 0x221F, 0x2194, 0x25B2, 0x25BC,\n0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,\n0x0028, 0x0029, 0x002A, 0x002B, 0x002C, 0x002D, 0x002E, 0x002F,\n0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,\n0x0038, 0x0039, 0x003A, 0x003B, 0x003C, 0x003D, 0x003E, 0x003F,\n0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,\n0x0048, 0x0049, 0x004A, 0x004B, 0x004C, 0x004D, 0x004E, 0x004F,\n0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,\n0x0058, 0x0059, 0x005A, 0x005B, 0x005C, 0x005D, 0x005E, 0x005F,\n0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,\n0x0068, 0x0069, 0x006A, 0x006B, 0x006C, 0x006D, 0x006E, 0x006F,\n0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,\n0x0078, 0x0079, 0x007A, 0x007B, 0x007C, 0x007D, 0x007E, 0x2302,\n0x00C7, 0x00FC, 0x00E9, 0x00E2, 0x00E4, 0x00E0, 0x00E5, 0x00E7,\n0x00EA, 0x00EB, 0x00E8, 0x00EF, 0x00EE, 0x00EC, 0x00C4, 0x00C5,\n0x00C9, 0x00E6, 0x00C6, 0x00F4, 0x00F6, 0x00F2, 0x00FB, 0x00F9,\n0x00FF, 0x00D6, 0x00DC, 0x00A2, 0x00A3, 0x00A5, 0x20A7, 0x0192,\n0x00E1, 0x00ED, 0x00F3, 0x00FA, 0x00F1, 0x00D1, 0x00AA, 0x00BA,\n0x00BF, 0x2310, 0x00AC, 0x00BD, 0x00BC, 0x00A1, 0x00AB, 0x00BB,\n0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556,\n0x2555, 0x2563, 0x2551, 0x2557, 0x255D, 0x255C, 0x255B, 0x2510,\n0x2514, 0x2534, 0x252C, 0x251C, 0x2500, 0x253C, 0x255E, 0x255F,\n0x255A, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256C, 0x2567,\n0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256B,\n0x256A, 0x2518, 0x250C, 0x2588, 0x2584, 0x258C, 0x2590, 0x2580,\n0x03B1, 0x00DF, 0x0393, 0x03C0, 0x03A3, 0x03C3, 0x00B5, 0x03C4,\n0x03A6, 0x0398, 0x03A9, 0x03B4, 0x221E, 0x03C6, 0x03B5, 0x2229,\n0x2261, 0x00B1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00F7, 0x2248,\n0x00B0, 0x2219, 0x00B7, 0x221A, 0x207F, 0x00B2, 0x25A0, 0x00A0\n];\n\nVT100.prototype.DirectToFontMap = [\n0xF000, 0xF001, 0xF002, 0xF003, 0xF004, 0xF005, 0xF006, 0xF007,\n0xF008, 0xF009, 0xF00A, 0xF00B, 0xF00C, 0xF00D, 0xF00E, 0xF00F,\n0xF010, 0xF011, 0xF012, 0xF013, 0xF014, 0xF015, 0xF016, 0xF017,\n0xF018, 0xF019, 0xF01A, 0xF01B, 0xF01C, 0xF01D, 0xF01E, 0xF01F,\n0xF020, 0xF021, 0xF022, 0xF023, 0xF024, 0xF025, 0xF026, 0xF027,\n0xF028, 0xF029, 0xF02A, 0xF02B, 0xF02C, 0xF02D, 0xF02E, 0xF02F,\n0xF030, 0xF031, 0xF032, 0xF033, 0xF034, 0xF035, 0xF036, 0xF037,\n0xF038, 0xF039, 0xF03A, 0xF03B, 0xF03C, 0xF03D, 0xF03E, 0xF03F,\n0xF040, 0xF041, 0xF042, 0xF043, 0xF044, 0xF045, 0xF046, 0xF047,\n0xF048, 0xF049, 0xF04A, 0xF04B, 0xF04C, 0xF04D, 0xF04E, 0xF04F,\n0xF050, 0xF051, 0xF052, 0xF053, 0xF054, 0xF055, 0xF056, 0xF057,\n0xF058, 0xF059, 0xF05A, 0xF05B, 0xF05C, 0xF05D, 0xF05E, 0xF05F,\n0xF060, 0xF061, 0xF062, 0xF063, 0xF064, 0xF065, 0xF066, 0xF067,\n0xF068, 0xF069, 0xF06A, 0xF06B, 0xF06C, 0xF06D, 0xF06E, 0xF06F,\n0xF070, 0xF071, 0xF072, 0xF073, 0xF074, 0xF075, 0xF076, 0xF077,\n0xF078, 0xF079, 0xF07A, 0xF07B, 0xF07C, 0xF07D, 0xF07E, 0xF07F,\n0xF080, 0xF081, 0xF082, 0xF083, 0xF084, 0xF085, 0xF086, 0xF087,\n0xF088, 0xF089, 0xF08A, 0xF08B, 0xF08C, 0xF08D, 0xF08E, 0xF08F,\n0xF090, 0xF091, 0xF092, 0xF093, 0xF094, 0xF095, 0xF096, 0xF097,\n0xF098, 0xF099, 0xF09A, 0xF09B, 0xF09C, 0xF09D, 0xF09E, 0xF09F,\n0xF0A0, 0xF0A1, 0xF0A2, 0xF0A3, 0xF0A4, 0xF0A5, 0xF0A6, 0xF0A7,\n0xF0A8, 0xF0A9, 0xF0AA, 0xF0AB, 0xF0AC, 0xF0AD, 0xF0AE, 0xF0AF,\n0xF0B0, 0xF0B1, 0xF0B2, 0xF0B3, 0xF0B4, 0xF0B5, 0xF0B6, 0xF0B7,\n0xF0B8, 0xF0B9, 0xF0BA, 0xF0BB, 0xF0BC, 0xF0BD, 0xF0BE, 0xF0BF,\n0xF0C0, 0xF0C1, 0xF0C2, 0xF0C3, 0xF0C4, 0xF0C5, 0xF0C6, 0xF0C7,\n0xF0C8, 0xF0C9, 0xF0CA, 0xF0CB, 0xF0CC, 0xF0CD, 0xF0CE, 0xF0CF,\n0xF0D0, 0xF0D1, 0xF0D2, 0xF0D3, 0xF0D4, 0xF0D5, 0xF0D6, 0xF0D7,\n0xF0D8, 0xF0D9, 0xF0DA, 0xF0DB, 0xF0DC, 0xF0DD, 0xF0DE, 0xF0DF,\n0xF0E0, 0xF0E1, 0xF0E2, 0xF0E3, 0xF0E4, 0xF0E5, 0xF0E6, 0xF0E7,\n0xF0E8, 0xF0E9, 0xF0EA, 0xF0EB, 0xF0EC, 0xF0ED, 0xF0EE, 0xF0EF,\n0xF0F0, 0xF0F1, 0xF0F2, 0xF0F3, 0xF0F4, 0xF0F5, 0xF0F6, 0xF0F7,\n0xF0F8, 0xF0F9, 0xF0FA, 0xF0FB, 0xF0FC, 0xF0FD, 0xF0FE, 0xF0FF\n];\n\nVT100.prototype.ctrlAction = [\n  true,  false, false, false, false, false, false, true,\n  true,  true,  true,  true,  true,  true,  true,  true,\n  false, false, false, false, false, false, false, false,\n  true,  false, true,  true,  false, false, false, false\n];\n\nVT100.prototype.ctrlAlways = [\n  true,  false, false, false, false, false, false, false,\n  true,  false, true,  false, true,  true,  true,  true,\n  false, false, false, false, false, false, false, false,\n  false, false, false, true,  false, false, false, false\n];\n"
  },
  {
    "path": "services/workbench2/public/webshell/styles.css",
    "content": "/* Copyright (C) 2008-2010 Markus Gutschke <markus@shellinabox.com> All rights reserved.\n   SPDX-License-Identifier: GPL-2.0\n*/\n\n#vt100 a {\n  text-decoration:      none;\n  color:                inherit;\n}\n\n#vt100 a:hover {\n  text-decoration:      underline;\n}\n\n#vt100 #reconnect {\n  position:             absolute;\n  z-index:              2;\n}\n\n#vt100 #reconnect input {\n  padding:              1ex;\n  font-weight:          bold;\n  font-size:            x-large;\n}\n\n#vt100 #cursize {\n  background:           #EEEEEE;\n  border:               1px solid black;\n  font-family:          sans-serif;\n  font-size:            large;\n  font-weight:          bold;\n  padding:              1ex;\n  position:             absolute;\n  z-index:              2;\n}\n\n#vt100 pre {\n  margin:               0px;\n}\n\n#vt100 pre pre {\n  overflow:             hidden;\n}\n\n#vt100 #scrollable {\n  overflow-x:           hidden;\n  overflow-y:           scroll;\n  position:             relative;\n  padding:              1px;\n}\n\n#vt100 #console, #vt100 #alt_console, #vt100 #cursor, #vt100 #lineheight, #vt100 .hidden pre {\n  font-family:          \"DejaVu Sans Mono\", \"Everson Mono\", FreeMono, \"Andale Mono\", monospace;\n}\n\n#vt100 #lineheight {\n  position:             absolute;\n  visibility:           hidden;\n}\n\n#vt100 #cursor {\n  position:             absolute;\n  left:                 0px;\n  top:                  0px;\n  overflow:             hidden;\n  z-index:              1;\n}\n\n#vt100 #cursor.bright {\n  background-color:     black;\n  color:                white;\n}\n\n#vt100 #cursor.dim {\n  visibility:           hidden;\n}\n\n#vt100 #cursor.inactive {\n  border:               1px solid;\n  margin:               -1px;\n}\n\n#vt100 #padding {\n  visibility:           hidden;\n  width:                1px;\n  height:               0px;\n  overflow:             hidden;\n}\n\n#vt100 .hidden {\n  position:             absolute;\n  top:                  -10000px;\n  left:                 -10000px;\n  width:                0px;\n  height:               0px;\n}\n\n#vt100 #menu {\n  overflow:             visible;\n  position:             absolute;\n  z-index:              3;\n}\n\n#vt100 #menu .popup {\n  background-color:     #EEEEEE;\n  border:               1px solid black;\n  font-family:          sans-serif;\n  position:             absolute;\n}\n\n#vt100 #menu .popup ul {\n  list-style-type:      none;\n  padding:              0px;\n  margin:               0px;\n  min-width:            10em;\n}\n\n#vt100 #menu .popup li {\n  padding:              3px 0.5ex 3px 0.5ex;\n}\n\n#vt100 #menu .popup li.hover {\n  background-color:     #444444;\n  color:                white;\n}\n\n#vt100 #menu .popup li.disabled {\n  color:                #AAAAAA;\n}\n\n#vt100 #menu .popup hr {\n  margin:               0.5ex 0px 0.5ex 0px;\n}\n\n#vt100 #menu img {\n  margin-right:         0.5ex;\n  width:                1ex;\n  height:               1ex;\n}\n\n#vt100 #scrollable.inverted { color:            #ffffff;\n                              background-color: #000000; }\n\n#vt100 #kbd_button {\n  float:                left;\n  position:             fixed;\n  z-index:              0;\n  visibility:           hidden;\n}\n\n#vt100 #keyboard {\n  z-index:              3;\n  position:             absolute;\n}\n\n#vt100 #keyboard .box {\n  font-family:          sans-serif;\n  background-color:     #cccccc;\n  padding:              .8em;\n  float:                left;\n  position:             absolute;\n  border-radius:        10px;\n  -moz-border-radius:   10px;\n  box-shadow:           4px 4px 6px #222222;\n  -webkit-box-shadow:   4px 4px 6px #222222;\n  /* Don't set the -moz-box-shadow. It doesn't properly scale when CSS\n   * transforms are in effect. Once Firefox supports box-shadow, it should\n   * automatically do the right thing. Until then, leave shadows disabled\n   * for Firefox.\n   */\n  opacity:              0.85;\n  -moz-opacity:         0.85;\n  filter:               alpha(opacity=85);\n}\n\n#vt100 #keyboard .box * {\n  vertical-align:       top;\n  display:              inline-block;\n}\n\n#vt100 #keyboard b, #vt100 #keyboard i, #vt100 #keyboard s, #vt100 #keyboard u {\n  font-style:           normal;\n  font-weight:          bold;\n  border-radius:        5px;\n  -moz-border-radius:   5px;\n  background-color:     #555555;\n  color:                #eeeeee;\n  box-shadow:           2px 2px 3px #222222;\n  -webkit-box-shadow:   2px 2px 3px #222222;\n  padding:              4px;\n  margin:               2px;\n  height:               2ex;\n  display:              inline-block;\n  text-align:           center;\n  text-decoration:      none;\n}\n\n#vt100 #keyboard b, #vt100 #keyboard s {\n  width:                2ex;\n}\n\n#vt100 #keyboard u, #vt100 #keyboard s {\n  visibility:           hidden;\n}\n\n#vt100 #keyboard .shifted {\n  display:              none;\n}\n\n#vt100 #keyboard .selected {\n  color:                #888888;\n  background-color:     #eeeeee;\n  box-shadow:           0px 0px 3px #222222;\n  -webkit-box-shadow:   0px 0px 3px #222222;\n  position:             relative;\n  top:                  1px;\n  left:                 1px;\n}\n\n[if DEFINES_COLORS]\n/* IE cannot properly handle \"inherit\" properties. So, the monochrome.css/\n * color.css style sheets cannot work, if we define colors in styles.css.\n */\n[else DEFINES_COLORS]\n#vt100 .ansi0               {                            }\n#vt100 .ansi1               { color:            #cd0000; }\n#vt100 .ansi2               { color:            #00cd00; }\n#vt100 .ansi3               { color:            #cdcd00; }\n#vt100 .ansi4               { color:            #0000ee; }\n#vt100 .ansi5               { color:            #cd00cd; }\n#vt100 .ansi6               { color:            #00cdcd; }\n#vt100 .ansi7               { color:            #e5e5e5; }\n#vt100 .ansi8               { color:            #7f7f7f; }\n#vt100 .ansi9               { color:            #ff0000; }\n#vt100 .ansi10              { color:            #00ff00; }\n#vt100 .ansi11              { color:            #e8e800; }\n#vt100 .ansi12              { color:            #5c5cff; }\n#vt100 .ansi13              { color:            #ff00ff; }\n#vt100 .ansi14              { color:            #00ffff; }\n#vt100 .ansi15              { color:            #ffffff; }\n\n#vt100 .bgAnsi0             { background-color: #000000; }\n#vt100 .bgAnsi1             { background-color: #cd0000; }\n#vt100 .bgAnsi2             { background-color: #00cd00; }\n#vt100 .bgAnsi3             { background-color: #cdcd00; }\n#vt100 .bgAnsi4             { background-color: #0000ee; }\n#vt100 .bgAnsi5             { background-color: #cd00cd; }\n#vt100 .bgAnsi6             { background-color: #00cdcd; }\n#vt100 .bgAnsi7             { background-color: #e5e5e5; }\n#vt100 .bgAnsi8             { background-color: #7f7f7f; }\n#vt100 .bgAnsi9             { background-color: #ff0000; }\n#vt100 .bgAnsi10            { background-color: #00ff00; }\n#vt100 .bgAnsi11            { background-color: #e8e800; }\n#vt100 .bgAnsi12            { background-color: #5c5cff; }\n#vt100 .bgAnsi13            { background-color: #ff00ff; }\n#vt100 .bgAnsi14            { background-color: #00ffff; }\n#vt100 .bgAnsi15            {                            }\n[endif DEFINES_COLORS]\n\n@media print {\n  #vt100 .scrollback {\n    display:            none;\n  }\n\n  #vt100 #reconnect, #vt100 #cursor, #vt100 #menu, #vt100 #kbd_button, #vt100 #keyboard {\n    visibility:         hidden;\n  }\n\n  #vt100 #scrollable {\n    overflow:           hidden;\n  }\n\n  #vt100 #console, #vt100 #alt_console {\n    overflow:           hidden;\n    width:              1000000ex;\n  }\n}\n"
  },
  {
    "path": "services/workbench2/scripts/build.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\n\n// Do this as the first thing so that any code reading it knows the right env.\nprocess.env.BABEL_ENV = 'production';\nprocess.env.NODE_ENV = 'production';\n\n// Makes the script crash on unhandled rejections instead of silently\n// ignoring them. In the future, promise rejections that are not handled will\n// terminate the Node.js process with a non-zero exit code.\nprocess.on('unhandledRejection', err => {\n  throw err;\n});\n\n// Ensure environment variables are read.\nrequire('../config/env');\n\nconst path = require('path');\nconst chalk = require('react-dev-utils/chalk');\nconst fs = require('fs-extra');\nconst bfj = require('bfj');\nconst webpack = require('webpack');\nconst configFactory = require('../config/webpack.config');\nconst paths = require('../config/paths');\nconst checkRequiredFiles = require('react-dev-utils/checkRequiredFiles');\nconst formatWebpackMessages = require('react-dev-utils/formatWebpackMessages');\nconst printHostingInstructions = require('react-dev-utils/printHostingInstructions');\nconst FileSizeReporter = require('react-dev-utils/FileSizeReporter');\nconst printBuildError = require('react-dev-utils/printBuildError');\n\nconst measureFileSizesBeforeBuild =\n  FileSizeReporter.measureFileSizesBeforeBuild;\nconst printFileSizesAfterBuild = FileSizeReporter.printFileSizesAfterBuild;\nconst useYarn = fs.existsSync(paths.yarnLockFile);\n\n// These sizes are pretty large. We'll warn for bundles exceeding them.\nconst WARN_AFTER_BUNDLE_GZIP_SIZE = 512 * 1024;\nconst WARN_AFTER_CHUNK_GZIP_SIZE = 1024 * 1024;\n\nconst isInteractive = process.stdout.isTTY;\n\n// Warn and crash if required files are missing\nif (!checkRequiredFiles([paths.appHtml, paths.appIndexJs])) {\n  process.exit(1);\n}\n\nconst argv = process.argv.slice(2);\nconst writeStatsJson = argv.indexOf('--stats') !== -1;\n\n// Generate configuration\nconst config = configFactory('production');\n\n// We require that you explicitly set browsers and do not fall back to\n// browserslist defaults.\nconst { checkBrowsers } = require('react-dev-utils/browsersHelper');\ncheckBrowsers(paths.appPath, isInteractive)\n  .then(() => {\n    // First, read the current file sizes in build directory.\n    // This lets us display how much they changed later.\n    return measureFileSizesBeforeBuild(paths.appBuild);\n  })\n  .then(previousFileSizes => {\n    // Remove all content but keep the directory so that\n    // if you're in it, you don't end up in Trash\n    fs.emptyDirSync(paths.appBuild);\n    // Merge with the public folder\n    copyPublicFolder();\n    // Start the webpack build\n    return build(previousFileSizes);\n  })\n  .then(\n    ({ stats, previousFileSizes, warnings }) => {\n      if (warnings.length) {\n        console.log(chalk.yellow('Compiled with warnings.\\n'));\n        console.log(warnings.join('\\n\\n'));\n        console.log(\n          '\\nSearch for the ' +\n            chalk.underline(chalk.yellow('keywords')) +\n            ' to learn more about each warning.'\n        );\n        console.log(\n          'To ignore, add ' +\n            chalk.cyan('// eslint-disable-next-line') +\n            ' to the line before.\\n'\n        );\n      } else {\n        console.log(chalk.green('Compiled successfully.\\n'));\n      }\n\n      console.log('File sizes after gzip:\\n');\n      printFileSizesAfterBuild(\n        stats,\n        previousFileSizes,\n        paths.appBuild,\n        WARN_AFTER_BUNDLE_GZIP_SIZE,\n        WARN_AFTER_CHUNK_GZIP_SIZE\n      );\n      console.log();\n\n      const appPackage = require(paths.appPackageJson);\n      const publicUrl = paths.publicUrlOrPath;\n      const publicPath = config.output.publicPath;\n      const buildFolder = path.relative(process.cwd(), paths.appBuild);\n      printHostingInstructions(\n        appPackage,\n        publicUrl,\n        publicPath,\n        buildFolder,\n        useYarn\n      );\n    },\n    err => {\n      const tscCompileOnError = process.env.TSC_COMPILE_ON_ERROR === 'true';\n      if (tscCompileOnError) {\n        console.log(\n          chalk.yellow(\n            'Compiled with the following type errors (you may want to check these before deploying your app):\\n'\n          )\n        );\n        printBuildError(err);\n      } else {\n        console.log(chalk.red('Failed to compile.\\n'));\n        printBuildError(err);\n        process.exit(1);\n      }\n    }\n  )\n  .catch(err => {\n    if (err && err.message) {\n      console.log(err.message);\n    }\n    process.exit(1);\n  });\n\n// Create the production build and print the deployment instructions.\nfunction build(previousFileSizes) {\n  console.log('Creating an optimized production build...');\n\n  const compiler = webpack(config);\n  return new Promise((resolve, reject) => {\n    compiler.run((err, stats) => {\n      let messages;\n      if (err) {\n        if (!err.message) {\n          return reject(err);\n        }\n\n        let errMessage = err.message;\n\n        // Add additional information for postcss errors\n        if (Object.prototype.hasOwnProperty.call(err, 'postcssNode')) {\n          errMessage +=\n            '\\nCompileError: Begins at CSS selector ' +\n            err['postcssNode'].selector;\n        }\n\n        messages = formatWebpackMessages({\n          errors: [errMessage],\n          warnings: [],\n        });\n      } else {\n        messages = formatWebpackMessages(\n          stats.toJson({ all: false, warnings: true, errors: true })\n        );\n      }\n      if (messages.errors.length) {\n        // Only keep the first error. Others are often indicative\n        // of the same problem, but confuse the reader with noise.\n        if (messages.errors.length > 1) {\n          messages.errors.length = 1;\n        }\n        return reject(new Error(messages.errors.join('\\n\\n')));\n      }\n      if (\n        process.env.CI &&\n        (typeof process.env.CI !== 'string' ||\n          process.env.CI.toLowerCase() !== 'false') &&\n        messages.warnings.length\n      ) {\n        // Ignore sourcemap warnings in CI builds. See #8227 for more info.\n        const filteredWarnings = messages.warnings.filter(\n          w => !/Failed to parse source map/.test(w)\n        );\n        if (filteredWarnings.length) {\n          console.log(\n            chalk.yellow(\n              '\\nTreating warnings as errors because process.env.CI = true.\\n' +\n                'Most CI servers set it automatically.\\n'\n            )\n          );\n          return reject(new Error(filteredWarnings.join('\\n\\n')));\n        }\n      }\n\n      const resolveArgs = {\n        stats,\n        previousFileSizes,\n        warnings: messages.warnings,\n      };\n\n      if (writeStatsJson) {\n        return bfj\n          .write(paths.appBuild + '/bundle-stats.json', stats.toJson())\n          .then(() => resolve(resolveArgs))\n          .catch(error => reject(new Error(error)));\n      }\n\n      return resolve(resolveArgs);\n    });\n  });\n}\n\nfunction copyPublicFolder() {\n  fs.copySync(paths.appPublic, paths.appBuild, {\n    dereference: true,\n    filter: file => file !== paths.appHtml,\n  });\n}\n"
  },
  {
    "path": "services/workbench2/scripts/start.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n'use strict';\n\n// Do this as the first thing so that any code reading it knows the right env.\nprocess.env.BABEL_ENV = 'development';\nprocess.env.NODE_ENV = 'development';\n\n// Makes the script crash on unhandled rejections instead of silently\n// ignoring them. In the future, promise rejections that are not handled will\n// terminate the Node.js process with a non-zero exit code.\nprocess.on('unhandledRejection', err => {\n  throw err;\n});\n\n// Ensure environment variables are read.\nrequire('../config/env');\n\nconst fs = require('fs');\nconst chalk = require('react-dev-utils/chalk');\nconst webpack = require('webpack');\nconst WebpackDevServer = require('webpack-dev-server');\nconst clearConsole = require('react-dev-utils/clearConsole');\nconst checkRequiredFiles = require('react-dev-utils/checkRequiredFiles');\nconst {\n  choosePort,\n  createCompiler,\n  prepareProxy,\n  prepareUrls,\n} = require('react-dev-utils/WebpackDevServerUtils');\nconst openBrowser = require('react-dev-utils/openBrowser');\nconst semver = require('semver');\nconst paths = require('../config/paths');\nconst configFactory = require('../config/webpack.config');\nconst createDevServerConfig = require('../config/webpackDevServer.config');\nconst getClientEnvironment = require('../config/env');\nconst react = require(require.resolve('react', { paths: [paths.appPath] }));\n\nconst env = getClientEnvironment(paths.publicUrlOrPath.slice(0, -1));\nconst useYarn = fs.existsSync(paths.yarnLockFile);\nconst isInteractive = process.stdout.isTTY;\n\n// Warn and crash if required files are missing\nif (!checkRequiredFiles([paths.appHtml, paths.appIndexJs])) {\n  process.exit(1);\n}\n\n// Tools like Cloud9 rely on this.\nconst DEFAULT_PORT = parseInt(process.env.PORT, 10) || 3000;\nconst HOST = process.env.HOST || '0.0.0.0';\n\nif (process.env.HOST) {\n  console.log(\n    chalk.cyan(\n      `Attempting to bind to HOST environment variable: ${chalk.yellow(\n        chalk.bold(process.env.HOST)\n      )}`\n    )\n  );\n  console.log(\n    `If this was unintentional, check that you haven't mistakenly set it in your shell.`\n  );\n  console.log(\n    `Learn more here: ${chalk.yellow('https://cra.link/advanced-config')}`\n  );\n  console.log();\n}\n\n// We require that you explicitly set browsers and do not fall back to\n// browserslist defaults.\nconst { checkBrowsers } = require('react-dev-utils/browsersHelper');\ncheckBrowsers(paths.appPath, isInteractive)\n  .then(() => {\n    // We attempt to use the default port but if it is busy, we offer the user to\n    // run on a different port. `choosePort()` Promise resolves to the next free port.\n    return choosePort(HOST, DEFAULT_PORT);\n  })\n  .then(port => {\n    if (port == null) {\n      // We have not found a port.\n      return;\n    }\n\n    const config = configFactory('development');\n    const protocol = process.env.HTTPS === 'true' ? 'https' : 'http';\n    const appName = require(paths.appPackageJson).name;\n\n    const useTypeScript = fs.existsSync(paths.appTsConfig);\n    const urls = prepareUrls(\n      protocol,\n      HOST,\n      port,\n      paths.publicUrlOrPath.slice(0, -1)\n    );\n    // Create a webpack compiler that is configured with custom messages.\n    const compiler = createCompiler({\n      appName,\n      config,\n      urls,\n      useYarn,\n      useTypeScript,\n      webpack,\n    });\n    // Load proxy config\n    const proxySetting = require(paths.appPackageJson).proxy;\n    const proxyConfig = prepareProxy(\n      proxySetting,\n      paths.appPublic,\n      paths.publicUrlOrPath\n    );\n    // Serve webpack assets generated by the compiler over a web server.\n    const serverConfig = {\n      ...createDevServerConfig(proxyConfig, urls.lanUrlForConfig),\n      host: HOST,\n      port,\n    };\n    const devServer = new WebpackDevServer(serverConfig, compiler);\n    // Launch WebpackDevServer.\n    devServer.startCallback(() => {\n      if (isInteractive) {\n        clearConsole();\n      }\n\n      if (env.raw.FAST_REFRESH && semver.lt(react.version, '16.10.0')) {\n        console.log(\n          chalk.yellow(\n            `Fast Refresh requires React 16.10 or higher. You are using React ${react.version}.`\n          )\n        );\n      }\n\n      console.log(chalk.cyan('Starting the development server...\\n'));\n      openBrowser(urls.localUrlForBrowser);\n    });\n\n    ['SIGINT', 'SIGTERM'].forEach(function (sig) {\n      process.on(sig, function () {\n        devServer.close();\n        process.exit();\n      });\n    });\n\n    if (process.env.CI !== 'true') {\n      // Gracefully exit when stdin ends\n      process.stdin.on('end', function () {\n        devServer.close();\n        process.exit();\n      });\n    }\n  })\n  .catch(err => {\n    if (err && err.message) {\n      console.log(err.message);\n    }\n    process.exit(1);\n  });\n"
  },
  {
    "path": "services/workbench2/src/common/app-info.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const getBuildInfo = (): string => {\n    if (process.env.REACT_APP_VERSION) {\n      return \"v\" + process.env.REACT_APP_VERSION;\n    } else {\n      const getBuildNumber = \"BN-\" + (process.env.REACT_APP_BUILD_NUMBER || \"dev\");\n      const getGitCommit = \"GIT-\" + (process.env.REACT_APP_GIT_COMMIT || \"latest\").substring(0, 7);\n      return getBuildNumber + \" / \" + getGitCommit;\n    }\n};\n\nexport const PROPERTY_CONTAINS_VALUE_MIN_API_REVISION = 20200212;\n\nexport const GROUP_CONTENTS_INCLUDE_CONTAINER_UUID_MIN_API_REVISION = 20240627;\n"
  },
  {
    "path": "services/workbench2/src/common/array-utils.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const sortByProperty = (propName: string) => (obj1: any, obj2: any) => {\n    const prop1 = obj1[propName];\n    const prop2 = obj2[propName];\n    \n    if (prop1 > prop2) {\n        return 1;\n    }\n\n    if (prop1 < prop2) {\n        return -1;\n    }\n\n    return 0;\n};\n"
  },
  {
    "path": "services/workbench2/src/common/codes.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const KEY_CODE_UP = 38;\nexport const KEY_CODE_DOWN = 40;\nexport const KEY_CODE_ESC = 27;\nexport const KEY_ENTER = 13;\n"
  },
  {
    "path": "services/workbench2/src/common/config.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport Axios from 'axios';\n\nexport const WORKBENCH_CONFIG_URL =\n    process.env.REACT_APP_ARVADOS_CONFIG_URL || '/config.json';\n\ninterface WorkbenchConfig {\n    API_HOST: string;\n    VOCABULARY_URL?: string;\n    FILE_VIEWERS_CONFIG_URL?: string;\n}\n\nexport interface ClusterConfigJSON {\n    API: {\n        UnfreezeProjectRequiresAdmin: boolean\n        MaxItemsPerResponse: number\n    },\n    ClusterID: string;\n    Containers: {\n        ReserveExtraRAM: number;\n    },\n    InstanceTypes?: {\n        [key: string]: {\n            AddedScratch: number;\n            GPU?: {\n                Stack:          string;\n                DriverVersion:  string;\n                HardwareTarget: string;\n                DeviceCount:    number;\n                VRAM:           number;\n            };\n            IncludedScratch: number;\n            Preemptible: boolean;\n            Price: number;\n            ProviderType: string;\n            RAM: number;\n            VCPUs: number;\n        };\n    };\n    RemoteClusters: {\n        [key: string]: {\n            ActivateUsers: boolean\n            Host: string\n            Insecure: boolean\n            Proxy: boolean\n            Scheme: string\n        }\n    };\n    Services: {\n        Controller: {\n            ExternalURL: string;\n        };\n        Workbench1: {\n            ExternalURL: string;\n        };\n        Workbench2: {\n            ExternalURL: string;\n        };\n        Workbench: {\n            DisableSharingURLsUI: boolean;\n            ArvadosDocsite: string;\n            FileViewersConfigURL: string;\n            WelcomePageHTML: string;\n            InactivePageHTML: string;\n            SSHHelpPageHTML: string;\n            SSHHelpHostSuffix: string;\n            SiteName: string;\n            IdleTimeout: string;\n        };\n        Websocket: {\n            ExternalURL: string;\n        };\n        WebDAV: {\n            ExternalURL: string;\n        };\n        WebDAVDownload: {\n            ExternalURL: string;\n        };\n        WebShell: {\n            ExternalURL: string;\n        };\n        ContainerWebServices: {\n            ExternalURL: string;\n        };\n    };\n    Workbench: {\n        DisableSharingURLsUI: boolean;\n        ArvadosDocsite: string;\n        FileViewersConfigURL: string;\n        WelcomePageHTML: string;\n        InactivePageHTML: string;\n        SSHHelpPageHTML: string;\n        SSHHelpHostSuffix: string;\n        SiteName: string;\n        IdleTimeout: string;\n        BannerUUID: string;\n        UserProfileFormFields: {};\n        UserProfileFormMessage: string;\n    };\n    Login: {\n        LoginCluster: string;\n        Google: {\n            Enable: boolean;\n        };\n        LDAP: {\n            Enable: boolean;\n        };\n        OpenIDConnect: {\n            Enable: boolean;\n        };\n        PAM: {\n            Enable: boolean;\n        };\n        SSO: {\n            Enable: boolean;\n        };\n        Test: {\n            Enable: boolean;\n        };\n    };\n    Collections: {\n        ForwardSlashNameSubstitution: string;\n        ManagedProperties?: {\n            [key: string]: {\n                Function: string;\n                Value: string;\n                Protected?: boolean;\n            };\n        };\n        TrustAllContent: boolean;\n    };\n    Volumes: {\n        [key: string]: {\n            StorageClasses: {\n                [key: string]: boolean;\n            };\n        };\n    };\n    Users: {\n        AnonymousUserToken: string;\n        SupportEmailAddress: string;\n    };\n}\n\nexport class Config {\n    baseUrl!: string;\n    keepWebServiceUrl!: string;\n    keepWebInlineServiceUrl!: string;\n    remoteHosts!: {\n        [key: string]: string;\n    };\n    rootUrl!: string;\n    uuidPrefix!: string;\n    websocketUrl!: string;\n    workbenchUrl!: string;\n    workbench2Url!: string;\n    vocabularyUrl!: string;\n    fileViewersConfigUrl!: string;\n    loginCluster!: string;\n    clusterConfig!: ClusterConfigJSON;\n    apiRevision!: number;\n}\n\nexport const buildConfig = (clusterConfig: ClusterConfigJSON): Config => {\n    const clusterConfigJSON = removeTrailingSlashes(clusterConfig);\n    const config = new Config();\n    config.rootUrl = clusterConfigJSON.Services.Controller.ExternalURL;\n    config.baseUrl = `${config.rootUrl}/${ARVADOS_API_PATH}`;\n    config.uuidPrefix = clusterConfigJSON.ClusterID;\n    config.websocketUrl = clusterConfigJSON.Services.Websocket.ExternalURL;\n    config.workbench2Url = clusterConfigJSON.Services.Workbench2.ExternalURL;\n    config.workbenchUrl = clusterConfigJSON.Services.Workbench1.ExternalURL;\n    config.keepWebServiceUrl =\n        clusterConfigJSON.Services.WebDAVDownload.ExternalURL;\n    config.keepWebInlineServiceUrl =\n        clusterConfigJSON.Services.WebDAV.ExternalURL;\n    config.loginCluster = clusterConfigJSON.Login.LoginCluster;\n    config.clusterConfig = clusterConfigJSON;\n    config.apiRevision = 0;\n    mapRemoteHosts(clusterConfigJSON, config);\n    return config;\n};\n\nexport const getStorageClasses = (config: Config): string[] => {\n    const classes: Set<string> = new Set(['default']);\n    const volumes = config.clusterConfig.Volumes;\n    Object.keys(volumes).forEach((v) => {\n        Object.keys(volumes[v].StorageClasses || {}).forEach((sc) => {\n            if (volumes[v].StorageClasses[sc]) {\n                classes.add(sc);\n            }\n        });\n    });\n    return Array.from(classes);\n};\n\nconst getApiRevision = async (apiUrl: string) => {\n    try {\n        const dd = (await Axios.get<any>(`${apiUrl}/${DISCOVERY_DOC_PATH}`)).data;\n        return parseInt(dd.revision, 10) || 0;\n    } catch {\n        console.warn(\n            'Unable to get API Revision number, defaulting to zero. Some features may not work properly.'\n        );\n        return 0;\n    }\n};\n\nconst removeTrailingSlashes = (\n    config: ClusterConfigJSON\n): ClusterConfigJSON => {\n    const svcs: any = {};\n    Object.keys(config.Services).forEach((s) => {\n        svcs[s] = config.Services[s];\n        if (svcs[s].hasOwnProperty('ExternalURL')) {\n            svcs[s].ExternalURL = svcs[s].ExternalURL.replace(/\\/+$/, '');\n        }\n    });\n    return { ...config, Services: svcs };\n};\n\nexport const fetchConfig = () => {\n    return getEnvConfig()\n        .catch((e) => {\n            // If ENV config not found, warn and default to config file\n            console.warn(e.message);\n            return getWBConfig();\n        })\n        .catch((e) => {\n            // If config file not found, warn and exit\n            console.warn(e.message);\n            throw new Error(`Unable to start Workbench: could not load config from ENV or file.`);\n        })\n        .then((workbenchConfig) => {\n            if (workbenchConfig.API_HOST === undefined) {\n                throw new Error(\n                    `Unable to start Workbench. API_HOST is undefined in ${WORKBENCH_CONFIG_URL} or the environment.`\n                );\n            }\n            return Axios.get<ClusterConfigJSON>(\n                getClusterConfigURL(workbenchConfig.API_HOST)\n            ).then(async (response) => {\n                const apiRevision = await getApiRevision(\n                    response.data.Services.Controller.ExternalURL.replace(/\\/+$/, '')\n                );\n                const config = { ...buildConfig(response.data), apiRevision };\n                const warnLocalConfig = (varName: string) =>\n                    console.warn(\n                        `A value for ${varName} was found in ${WORKBENCH_CONFIG_URL}. To use the Arvados centralized configuration instead, \\\nremove the entire ${varName} entry from ${WORKBENCH_CONFIG_URL}`\n                    );\n\n                // Check if the workbench config has an entry for vocabulary and file viewer URLs\n                // If so, use these values (even if it is an empty string), but print a console warning.\n                // Otherwise, use the cluster config.\n                let fileViewerConfigUrl;\n                if (workbenchConfig.FILE_VIEWERS_CONFIG_URL !== undefined) {\n                    warnLocalConfig('FILE_VIEWERS_CONFIG_URL');\n                    fileViewerConfigUrl = workbenchConfig.FILE_VIEWERS_CONFIG_URL;\n                } else {\n                    fileViewerConfigUrl =\n                        config.clusterConfig.Workbench.FileViewersConfigURL ||\n                        '/file-viewers-example.json';\n                }\n                config.fileViewersConfigUrl = fileViewerConfigUrl;\n\n                if (workbenchConfig.VOCABULARY_URL !== undefined) {\n                    console.warn(\n                        `A value for VOCABULARY_URL was found in ${WORKBENCH_CONFIG_URL}. It will be ignored as the cluster already provides its own endpoint, you can safely remove it.`\n                    );\n                }\n                config.vocabularyUrl = getVocabularyURL(workbenchConfig.API_HOST);\n\n                return { config, apiHost: workbenchConfig.API_HOST };\n            }).catch((e) => {\n                throw new Error(`Failed to fetch cluster config from ${workbenchConfig.API_HOST}: ${e.message}`);\n            });\n        });\n};\n\n// Maps remote cluster hosts and removes the default RemoteCluster entry\nexport const mapRemoteHosts = (\n    clusterConfigJSON: ClusterConfigJSON,\n    config: Config\n) => {\n    config.remoteHosts = {};\n    Object.keys(clusterConfigJSON.RemoteClusters).forEach((k) => {\n        config.remoteHosts[k] = clusterConfigJSON.RemoteClusters[k].Host;\n    });\n    delete config.remoteHosts['*'];\n};\n\nexport const mockClusterConfigJSON = (\n    config: Partial<ClusterConfigJSON>\n): ClusterConfigJSON => ({\n    API: {\n        UnfreezeProjectRequiresAdmin: false,\n        MaxItemsPerResponse: 1000,\n    },\n    ClusterID: '',\n    Containers: {\n        ReserveExtraRAM: 576716800,\n    },\n    RemoteClusters: {},\n    Services: {\n        Controller: { ExternalURL: '' },\n        Workbench1: { ExternalURL: '' },\n        Workbench2: { ExternalURL: '' },\n        Websocket: { ExternalURL: '' },\n        WebDAV: { ExternalURL: '' },\n        WebDAVDownload: { ExternalURL: '' },\n        WebShell: { ExternalURL: '' },\n        ContainerWebServices: { ExternalURL: '' },\n        Workbench: {\n            DisableSharingURLsUI: false,\n            ArvadosDocsite: \"\",\n            FileViewersConfigURL: \"\",\n            WelcomePageHTML: \"\",\n            InactivePageHTML: \"\",\n            SSHHelpPageHTML: \"\",\n            SSHHelpHostSuffix: \"\",\n            SiteName: \"\",\n            IdleTimeout: \"0s\"\n        },\n    },\n    Workbench: {\n        DisableSharingURLsUI: false,\n        ArvadosDocsite: '',\n        FileViewersConfigURL: '',\n        WelcomePageHTML: '',\n        InactivePageHTML: '',\n        SSHHelpPageHTML: '',\n        SSHHelpHostSuffix: '',\n        SiteName: '',\n        IdleTimeout: '0s',\n        BannerUUID: \"\",\n        UserProfileFormFields: {},\n        UserProfileFormMessage: '',\n    },\n    Login: {\n        LoginCluster: '',\n        Google: {\n            Enable: false,\n        },\n        LDAP: {\n            Enable: false,\n        },\n        OpenIDConnect: {\n            Enable: false,\n        },\n        PAM: {\n            Enable: false,\n        },\n        SSO: {\n            Enable: false,\n        },\n        Test: {\n            Enable: false,\n        },\n    },\n    Collections: {\n        ForwardSlashNameSubstitution: '',\n        TrustAllContent: false,\n    },\n    Volumes: {},\n    Users: {\n        AnonymousUserToken: \"\",\n        SupportEmailAddress: \"arvados@example.com\",\n    },\n    ...config,\n});\n\nexport const mockConfig = (config: Partial<Config>): Config => ({\n    baseUrl: '',\n    keepWebServiceUrl: '',\n    keepWebInlineServiceUrl: '',\n    remoteHosts: {},\n    rootUrl: '',\n    uuidPrefix: '',\n    websocketUrl: '',\n    workbenchUrl: '',\n    workbench2Url: '',\n    vocabularyUrl: '',\n    fileViewersConfigUrl: '',\n    loginCluster: '',\n    clusterConfig: mockClusterConfigJSON({}),\n    apiRevision: 0,\n    ...config,\n});\n\n/**\n * Loads config from ENV. Rejects promise if ENV var not set\n */\nconst getEnvConfig = async (): Promise<WorkbenchConfig> => {\n    if (process.env.REACT_APP_ARVADOS_API_HOST === undefined) {\n        throw new Error(`No API host was found in the environment: REACT_APP_ARVADOS_API_HOST not set`);\n    }\n\n    console.warn(`Using env API host: ${process.env.REACT_APP_ARVADOS_API_HOST}.`);\n    return {\n        API_HOST: process.env.REACT_APP_ARVADOS_API_HOST,\n        VOCABULARY_URL: undefined,\n        FILE_VIEWERS_CONFIG_URL: undefined,\n    };\n};\n\n/**\n * Loads WB config file. Promise rejects if file can't be loaded\n */\nconst getWBConfig = async (): Promise<WorkbenchConfig> => {\n    return Axios.get<WorkbenchConfig>(\n        WORKBENCH_CONFIG_URL + '?nocache=' + new Date().getTime()\n    )\n        .then((response) => response.data)\n        .catch(() => {\n            throw new Error(`There was an exception getting the Workbench config file at ${WORKBENCH_CONFIG_URL}.`);\n        });\n};\n\nexport const ARVADOS_API_PATH = 'arvados/v1';\nexport const CLUSTER_CONFIG_PATH = 'arvados/v1/config';\nexport const VOCABULARY_PATH = 'arvados/v1/vocabulary';\nexport const DISCOVERY_DOC_PATH = 'discovery/v1/apis/arvados/v1/rest';\nexport const getClusterConfigURL = (apiHost: string) =>\n    `https://${apiHost}/${CLUSTER_CONFIG_PATH}?nocache=${new Date().getTime()}`;\nexport const getVocabularyURL = (apiHost: string) =>\n    `https://${apiHost}/${VOCABULARY_PATH}?nocache=${new Date().getTime()}`;\n"
  },
  {
    "path": "services/workbench2/src/common/custom-theme.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { createTheme } from '@mui/material/styles';\nimport { StyleRulesCallback } from '@mui/styles';\nimport { DeprecatedThemeOptions, Theme } from '@mui/material/styles';\nimport { blue, grey, green, yellow, red } from '@mui/material/colors';\n\nexport interface ArvadosThemeOptions extends DeprecatedThemeOptions {\n    customs: any;\n    components?: any;\n}\n\nexport interface ArvadosTheme extends Theme {\n    customs: {\n        colors: Colors\n    };\n}\n\nexport type CustomStyleRulesCallback<ClassKey extends string = string> =\n    StyleRulesCallback<Theme, {}, ClassKey>\n\ninterface Colors {\n    green700: string;\n    green800: string;\n    yellow100: string;\n    yellow700: string;\n    yellow900: string;\n    red100: string;\n    red900: string;\n    blue500: string;\n    blue700: string;\n    grey500: string;\n    grey600: string;\n    grey700: string;\n    grey900: string;\n    purple: string;\n    orange: string;\n    darkOrange: string;\n    greyL: string;\n    greyD: string;\n    darkblue: string;\n}\n\n/**\n* arvadosGreyLight is the hex equivalent of rgba(0,0,0,0.87) on #fafafa background and arvadosGreyDark is the hex equivalent of rgab(0,0,0,0.54) on #fafafa background\n*/\n\nconst arvadosDarkBlue = '#052a3c';\nconst arvadosGreyLight = '#737373';\nconst arvadosGreyVeryLight = '#fafafa';\nconst arvadosGreyDark = '#212121';\nconst grey500 = grey[\"500\"];\nconst grey600 = grey[\"600\"];\nconst grey700 = grey[\"700\"];\nconst grey800 = grey[\"800\"];\nconst grey900 = grey[\"900\"];\n\nexport const themeOptions: ArvadosThemeOptions = {\n    customs: {\n        colors: {\n            green700: green[\"700\"],\n            green800: green[\"800\"],\n            yellow100: yellow[\"100\"],\n            yellow700: yellow[\"700\"],\n            yellow900: yellow[\"900\"],\n            red100: red[\"100\"],\n            red900: red['900'],\n            blue500: blue['500'],\n            blue700: blue['700'],\n            grey500: grey500,\n            grey600: grey600,\n            grey700: grey700,\n            grey800: grey800,\n            grey900: grey900,\n            darkblue: arvadosDarkBlue,\n            orange: '#f0ad4e',\n            darkOrange: '#9A6E31',\n            greyL: arvadosGreyLight,\n            greyD: arvadosGreyDark,\n        }\n    },\n    components: {\n        MuiTableCell: {\n            styleOverrides: {\n                root: { paddingTop: '12px', paddingBottom: '12px' }\n            },\n        },\n        MuiTypography: {\n            styleOverrides: {\n                body1: { fontSize: '0.875rem' }\n            },\n        },\n        MuiAppBar: {\n            styleOverrides: {\n                colorPrimary: { backgroundColor: arvadosDarkBlue }\n            },\n        },\n        MuiTabs: {\n            styleOverrides: {\n                root: {\n                    color: grey600\n                },\n                indicator: {\n                    backgroundColor: arvadosDarkBlue\n                },\n            },\n        },\n        MuiTab: {\n            styleOverrides: {\n                root: {\n                    '&$selected': {\n                        fontWeight: 700,\n                    },\n                },\n            },\n        },\n        MuiList: {\n            styleOverrides: {\n                root: {\n                    color: grey900\n                },\n            },\n        },\n        MuiListItem: {\n            styleOverrides: {\n                root: {\n                    color: grey900\n                },\n            }\n        },\n        MuiListItemText: {\n            styleOverrides: {\n                root: {\n                    padding: 0,\n                    paddingBottom: '2px',\n                },\n            },\n        },\n        MuiListItemIcon: {\n            styleOverrides: {\n                root: {\n                    fontSize: '1.25rem',\n                    minWidth: 0,\n                    marginRight: '16px'\n                },\n            },\n        },\n        MuiCardHeader: {\n            styleOverrides: {\n                avatar: {\n                    display: 'flex',\n                    alignItems: 'center'\n                },\n                title: {\n                    color: arvadosGreyDark,\n                    fontSize: '1.25rem'\n                },\n            },\n        },\n        MuiAccordion: {\n            styleOverrides: {\n                root: {\n                    backgroundColor: arvadosGreyVeryLight,\n                },\n            },\n        },\n        MuiAccordionDetails: {\n            styleOverrides: {\n                root: {\n                    marginBottom: 0,\n                    paddingBottom: '4px',\n                },\n            },\n        },\n        MuiAccordionSummary: {\n            styleOverrides: {\n                content: {\n                    '&$expanded': {\n                        margin: 0,\n                    },\n                    color: grey700,\n                    fontSize: '1.25rem',\n                    margin: 0,\n                },\n            },\n        },\n        MuiMenuItem: {\n            styleOverrides: {\n                root: {\n                    padding: '8px 16px'\n                },\n            },\n        },\n        MuiInput: {\n            styleOverrides: {\n                root: {\n                    fontSize: '0.875rem'\n                },\n                underline: {\n                    '&:after': {\n                        borderBottomColor: arvadosDarkBlue\n                    },\n                    '&:hover:not($disabled):not($focused):not($error):before': {\n                        borderBottom: '1px solid inherit'\n                    },\n                },\n            },\n        },\n        MuiFormLabel: {\n            styleOverrides: {\n                root: {\n                    fontSize: '0.875rem',\n                    \"&$focused\": {\n                        \"&$focused:not($error)\": {\n                            color: arvadosDarkBlue\n                        },\n                    },\n                },\n            },\n        },\n        MuiStepIcon: {\n            styleOverrides: {\n                root: {\n                    '&$active': {\n                        color: arvadosDarkBlue\n                    },\n                    '&$completed': {\n                        color: 'inherited'\n                    },\n                },\n            },\n        },\n        MuiStepConnector: {\n            styleOverrides: {\n                vertical: {\n                    flex: \"unset\",\n                },\n            },\n        },\n        MuiLinearProgress: {\n            styleOverrides: {\n                barColorSecondary: {\n                    backgroundColor: red['700']\n                },\n            },\n        },\n    },\n    mixins: {\n        toolbar: {\n            minHeight: '48px'\n        }\n    },\n    palette: {\n        primary: {\n            main: '#017ead',\n            dark: '#015272',\n            light: '#82cffd',\n            contrastText: '#fff',\n        },\n        background: {\n            default: '#fafafa',\n        },\n    },\n};\n\nexport const CustomTheme = createTheme(themeOptions);\n"
  },
  {
    "path": "services/workbench2/src/common/file.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const fileToArrayBuffer = (file: File) =>\n    new Promise<ArrayBuffer>((resolve, reject) => {\n        const reader = new FileReader();\n        reader.onload = () => {\n            resolve(reader.result as ArrayBuffer);\n        };\n        reader.onerror = () => {\n            reject();\n        };\n        reader.readAsArrayBuffer(file);\n    });\n"
  },
  {
    "path": "services/workbench2/src/common/formatters.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { formatFileSize, formatUploadSpeed, formatCost, formatCWLResourceSize } from \"./formatters\";\n\ndescribe('formatFileSize', () => {\n    it('should pick the largest unit', () => {\n        const base = 1024;\n        const testCases = [\n            {input: 0, output: '0 B'},\n            {input: 1, output: '1 B'},\n            {input: 1023, output: '1023 B'},\n            {input: base, output: '1.0 KiB'},\n            {input: 1.1 * base, output: '1.1 KiB'},\n            {input: 1.5 * base, output: '1.5 KiB'},\n            {input: base ** 2, output: '1.0 MiB'},\n            {input: 1.5 * (base ** 2), output: '1.5 MiB'},\n            {input: base ** 3, output: '1.0 GiB'},\n            {input: base ** 4, output: '1.0 TiB'},\n        ];\n\n        for (const { input, output } of testCases) {\n            expect(formatFileSize(input)).to.equal(output);\n        }\n    });\n\n    it('should handle accidental empty string or undefined input', () => {\n        expect(formatFileSize('')).to.equal('-');\n        expect(formatFileSize(undefined)).to.equal('-');\n    });\n\n    it('should handle accidental non-empty string input', () => {\n        expect(formatFileSize('foo')).to.equal('0 B');\n    });\n});\n\ndescribe('formatCWLResourceSize', () => {\n    it('should format bytes as MiB', () => {\n        const base = 1024 ** 2;\n\n        const testCases = [\n            {input: 0, output: '0 MiB'},\n            {input: 1, output: '0 MiB'},\n            {input: base - 1, output: '1 MiB'},\n            {input: 2 * base, output: '2 MiB'},\n            {input: 1024 * base, output: '1024 MiB'},\n            {input: 10000 * base, output: '10000 MiB'},\n        ];\n\n        for (const { input, output } of testCases) {\n            expect(formatCWLResourceSize(input)).to.equal(output);\n        }\n    });\n});\n\ndescribe('formatUploadSpeed', () => {\n    it('should show speed less than 1MB/s', () => {\n        // given\n        const speed = 900;\n\n        // when\n        const result = formatUploadSpeed(0, speed, 0, 1);\n\n        // then\n        expect(result).to.equal('0.90 MB/s');\n    });\n\n    it('should show 5MB/s', () => {\n        // given\n        const speed = 5230;\n\n        // when\n        const result = formatUploadSpeed(0, speed, 0, 1);\n\n        // then\n        expect(result).to.equal('5.23 MB/s');\n    });\n});\n\ndescribe('formatContainerCost', () => {\n    it('should correctly round to tenth of a cent', () => {\n        expect(formatCost(0.0)).to.equal('$0');\n        expect(formatCost(0.125)).to.equal('$0.125');\n        expect(formatCost(0.1254)).to.equal('$0.125');\n        expect(formatCost(0.1255)).to.equal('$0.126');\n    });\n\n    it('should round up any smaller value to 0.001', () => {\n        expect(formatCost(0.0)).to.equal('$0');\n        expect(formatCost(0.001)).to.equal('$0.001');\n        expect(formatCost(0.0001)).to.equal('$0.001');\n        expect(formatCost(0.00001)).to.equal('$0.001');\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/common/formatters.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { PropertyValue } from 'models/search-bar';\nimport {\n    Vocabulary,\n    getTagKeyLabel,\n    getTagValueLabel,\n} from 'models/vocabulary';\nimport moment from 'moment';\n\nexport const formatDateTime = (isoDate?: string | null, utc: boolean = false) => {\n    if (isoDate) {\n        const date = new Date(isoDate);\n        let text: string;\n        if (utc) {\n            text = date.toUTCString();\n        } else {\n            text = date.toLocaleString();\n        }\n        return text === 'Invalid Date' ? '(none)' : text;\n    }\n    return '-';\n};\n\nexport const formatDateOnly = (isoDate?: string | null, withDaysRemaining?: boolean) => {\n    if (isoDate) {\n        const date = new Date(isoDate);\n        if (date) {\n            return withDaysRemaining ? `${date.toLocaleDateString()} (${daysRemaining(isoDate)})` : date.toLocaleDateString();\n        }\n        return '-';\n    }\n    return '-';\n};\n\nexport const daysRemaining = (targetDate: string | Date): string => {\n    const now = moment();\n    const end = moment(targetDate);\n\n    if (end.isBefore(now)) return 'date is in the past';\n\n    const days = end.diff(now, 'days');\n\n    return days > 0 ? `in ${days} day${days > 1 ? 's' : ''}` : 'today';\n};\n\nexport const isElapsed = (isoString: string): boolean => {\n    return moment(isoString).isBefore(moment());\n};\n\nexport const isWithinExpiration = (isoString: string, limit: number): boolean => {\n    const today = moment().startOf('day');\n    const target = moment(isoString).startOf('day');\n\n    const diff = Math.abs(target.diff(today, 'days'));\n    return diff <= limit;\n};\n\nexport const formatFileSize = (size?: number | string) => {\n    if (typeof size === 'number') {\n        if (size === 0) {\n            return '0 B';\n        }\n\n        for (const { base, unit } of FILE_SIZES) {\n            if (size >= base) {\n                return `${(size / base).toFixed(base === 1 ? 0 : 1)} ${unit}`;\n            }\n        }\n    }\n    if ((typeof size === 'string' && size === '') || size === undefined) {\n        return '-';\n    }\n    return '0 B';\n};\n\nexport const formatCWLResourceSize = (size: number) => {\n    return `${(size / CWL_SIZE.base).toFixed(0)} ${CWL_SIZE.unit}`;\n};\n\nexport const formatTime = (time: number, seconds?: boolean) => {\n    const minutes = Math.floor((time / (1000 * 60)) % 60).toFixed(0);\n    const hours = Math.floor(time / (1000 * 60 * 60)).toFixed(0);\n\n    if (seconds) {\n        const seconds = Math.floor((time / 1000) % 60).toFixed(0);\n        return hours + 'h ' + minutes + 'm ' + seconds + 's';\n    }\n\n    return hours + 'h ' + minutes + 'm';\n};\n\nexport const getTimeDiff = (endTime: string, startTime: string) => {\n    return new Date(endTime).getTime() - new Date(startTime).getTime();\n};\n\nexport const formatProgress = (loaded: number, total: number) => {\n    const progress = loaded >= 0 && total > 0 ? (loaded * 100) / total : 0;\n    return `${progress.toFixed(2)}%`;\n};\n\nexport function formatUploadSpeed(\n    prevLoaded: number,\n    loaded: number,\n    prevTime: number,\n    currentTime: number\n) {\n    const speed =\n        loaded > prevLoaded && currentTime > prevTime\n            ? (loaded - prevLoaded) / (currentTime - prevTime)\n            : 0;\n\n    return `${(speed / 1000).toFixed(2)} MB/s`;\n}\n\nconst FILE_SIZES = [\n    {\n        base: 1024 ** 4,\n        unit: 'TiB',\n    },\n    {\n        base: 1024 ** 3,\n        unit: 'GiB',\n    },\n    {\n        base: 1024 ** 2,\n        unit: 'MiB',\n    },\n    {\n        base: 1024,\n        unit: 'KiB',\n    },\n    {\n        base: 1,\n        unit: 'B',\n    },\n];\n\nconst CWL_SIZE = {\n    base: 1024 ** 2,\n    unit: 'MiB',\n};\n\nexport const formatPropertyValue = (\n    pv: PropertyValue,\n    vocabulary?: Vocabulary\n) => {\n    if (vocabulary && pv.keyID && pv.valueID) {\n        return `${getTagKeyLabel(pv.keyID, vocabulary)}: ${getTagValueLabel(\n            pv.keyID,\n            pv.valueID!,\n            vocabulary\n        )}`;\n    }\n    if (pv.key) {\n        return pv.value ? `${pv.key}: ${pv.value}` : pv.key;\n    }\n    return '';\n};\n\nexport const formatCost = (cost: number): string => {\n    const decimalPlaces = 3;\n\n    const factor = Math.pow(10, decimalPlaces);\n    const rounded = Math.round(cost * factor) / factor;\n    if (cost > 0 && rounded === 0) {\n        // Display min value of 0.001\n        return `$${1 / factor}`;\n    } else {\n        // Otherwise use rounded value to proper decimal places\n        return `$${rounded}`;\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/common/frozen-resources.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ProjectResource, isProjectResource } from \"models/project\";\nimport { Resource } from \"models/resource\";\nimport { getResource } from \"store/resources/resources\";\nimport { ResourcesState } from \"store/resources/resources\";\nimport { memoize } from \"lodash\";\n\nexport const resourceIsFrozen = memoize((resource: Resource, resources: ResourcesState): boolean => {\n    let isFrozen: boolean = isProjectResource(resource) ? !!resource.frozenByUuid : false;\n    let ownerUuid: string | undefined = resource?.ownerUuid;\n\n    while(!isFrozen && !!ownerUuid && ownerUuid.indexOf('000000000000000') === -1) {\n        const parentResource: ProjectResource | undefined = getResource<ProjectResource>(ownerUuid)(resources);\n        isFrozen = !!parentResource?.frozenByUuid;\n        ownerUuid = parentResource?.ownerUuid;\n    }\n\n    return isFrozen;\n})"
  },
  {
    "path": "services/workbench2/src/common/getuser.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\n\nexport const getUserUuid = (state: RootState) => {\n    const user = state.auth.user;\n    if (user) {\n        return user.uuid;\n    } else {\n        return undefined;\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/common/html-sanitize.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport DOMPurify from 'dompurify';\n\ntype TDomPurifyConfig = {\n    ALLOWED_TAGS: string[];\n    ALLOWED_ATTR: string[];\n};\n\nconst domPurifyConfig: TDomPurifyConfig = {\n    ALLOWED_TAGS: [\n        'a',\n        'b',\n        'blockquote',\n        'br',\n        'code',\n        'del',\n        'dd',\n        'dl',\n        'dt',\n        'em',\n        'h1',\n        'h2',\n        'h3',\n        'h4',\n        'h5',\n        'h6',\n        'hr',\n        'i',\n        'img',\n        'kbd',\n        'li',\n        'ol',\n        'p',\n        'pre',\n        's',\n        'del',\n        'section',\n        'span',\n        'strong',\n        'sub',\n        'sup',\n        'ul',\n    ],\n    ALLOWED_ATTR: ['src', 'width', 'height', 'href', 'alt', 'title', 'style' ],\n};\n\nexport const sanitizeHTML = (dirtyString: string): string => DOMPurify.sanitize(dirtyString, domPurifyConfig);\n\n"
  },
  {
    "path": "services/workbench2/src/common/labels.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ResourceKind } from \"models/resource\";\nimport { ProcessTypeFilter } from \"store/resource-type-filters/resource-type-filters\";\n\nexport const resourceLabel = (type: string, subtype = '') => {\n    switch (type) {\n        case ResourceKind.COLLECTION:\n            return \"Data collection\";\n        case ResourceKind.PROJECT:\n            if (subtype === \"filter\") {\n                return \"Filter group\";\n            } else if (subtype === \"role\") {\n                return \"Group\";\n            }\n            return \"Project\";\n        case ResourceKind.PROCESS:\n            if (subtype === ProcessTypeFilter.MAIN_PROCESS) {\n                return \"Workflow Run\";\n            }\n            return \"Workflow Step\";\n        case ResourceKind.USER:\n            return \"User\";\n        case ResourceKind.GROUP:\n            return \"Group\";\n        case ResourceKind.VIRTUAL_MACHINE:\n            return \"Virtual Machine\";\n        case ResourceKind.WORKFLOW:\n            return \"Workflow\";\n        default:\n            return \"Unknown\";\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/common/link-update-name.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { LinkResource } from 'models/link';\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { ServiceRepository, getResourceService } from 'services/services';\nimport { Resource, TrashableResource, extractUuidKind } from 'models/resource';\nimport { CommonResourceServiceError, getCommonResourceServiceError } from 'services/common-service/common-resource-service';\nimport { getResource } from 'store/resources/resources';\n\ntype NameableResource = Resource & { name?: string };\n\n/**\n * Validates links are not to trashed resources and updates link resource names\n * to match resource name if necessary\n */\nconst verifyAndUpdateLink = async (link: LinkResource, dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<LinkResource | undefined> => {\n    //head resource should already be in the store\n    let headResource = getResource<Resource>(link.headUuid)(getState().resources);\n    //If resource not in store, fetch it\n    if (!headResource) {\n        try {\n            headResource = await fetchResource(link.headUuid)(dispatch, getState, services);\n        } catch (e) {\n            // If not found, assume deleted permanently and suppress this entry\n            if (getCommonResourceServiceError(e) === CommonResourceServiceError.NOT_FOUND) {\n                return undefined;\n            }\n            // If non-404 exception was raised, fall through to the headResource check\n        }\n        // Any other error we keep the entry but skip updating the name\n        if (!headResource) {\n            console.error('Could not validate link', link, 'because link head', link.headUuid, 'is not available');\n            // if it's missing name, we can't render it, so skip it\n            if (!link.name) {\n                return undefined\n            };\n            return link\n        }\n    }\n    // If resource is trashed, filter it out\n    if ((headResource as TrashableResource).isTrashed) {\n        return undefined;\n    }\n\n    if (validateLinkNameProp(link, headResource) === true) return link;\n\n    const updatedLink = updateLinkNameProp(link, headResource);\n    updateRemoteLinkName(updatedLink)(dispatch, getState, services);\n\n    return updatedLink;\n};\n\n/**\n * Filters links to trashed / 404ed resources and updates link name to match resource\n */\nexport const verifyAndUpdateLinks = async (links: LinkResource[], dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<LinkResource[]> => {\n    // Verify and update links in paralell\n    const updatedLinks = links.map((link) => verifyAndUpdateLink(link, dispatch, getState, services));\n    // Filter out undefined links (trashed, or 404)\n    const validLinks = (await Promise.all(updatedLinks)).filter((link): link is LinkResource => (link !== undefined));\n\n    return Promise.resolve(validLinks);\n};\n\n/**\n * Fetches any resource type for verifying link names / trash status\n * Exposes exceptions to allow the consumer to differentiate errors\n */\nconst fetchResource = (uuid: string, showErrors?: boolean) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<Resource | undefined> => {\n    const kind = extractUuidKind(uuid);\n    const service = getResourceService(kind)(services);\n    if (service) {\n        return await service.get(uuid, showErrors);\n    }\n    return undefined;\n};\n\nconst validateLinkNameProp = (link: LinkResource, head: NameableResource) => {\n    if (!link.name || link.name !== head.name) return false;\n    return true;\n};\n\nconst updateLinkNameProp = (link: LinkResource, head: NameableResource): LinkResource => {\n    const updatedLink = { ...link };\n    if (head.name) updatedLink.name = head.name;\n    return updatedLink;\n};\n\nconst updateRemoteLinkName = (link: LinkResource) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    try {\n        const kind = extractUuidKind(link.uuid);\n        const service = getResourceService(kind)(services);\n        if (service) {\n            service.update(link.uuid, { name: link.name });\n        }\n    } catch (error) {\n        console.error('Could not update link name', link, error);\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/common/menu-action-set-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet } from 'views-components/context-menu/context-menu-action-set';\nimport { ContextMenuResource, ContextMenuKind } from \"store/context-menu/context-menu\";\nimport { sortMenuItems, menuDirection } from 'views-components/context-menu/menu-item-sort';\n\nconst menuActionSets = new Map<string, ContextMenuActionSet>();\n\nexport const addMenuActionSet = (name: ContextMenuKind, itemSet: ContextMenuActionSet) => {\n    const sorted = itemSet.map(items => sortMenuItems(name, items, menuDirection.VERTICAL));\n    menuActionSets.set(name, sorted);\n};\n\nconst emptyActionSet: ContextMenuActionSet = [];\nexport const getMenuActionSet = (resource?: ContextMenuResource): ContextMenuActionSet =>\n    resource ? menuActionSets.get(resource.menuKind) || emptyActionSet : emptyActionSet;\n\nexport const getMenuActionSetByKind = (kind: ContextMenuKind): ContextMenuActionSet =>\n    menuActionSets.get(kind) || emptyActionSet;\n"
  },
  {
    "path": "services/workbench2/src/common/objects.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\nimport { union, keys as keys_1, filter } from \"lodash\";\n\nexport function getModifiedKeys(a: any, b: any) {\n    const keys = union(keys_1(a), keys_1(b));\n    return filter(keys, key => a[key] !== b[key]);\n}\n\nexport function getModifiedKeysValues(a: any, b: any) {\n    const keys = getModifiedKeys(a, b);\n    const obj = {};\n    keys.forEach(k => {\n        obj[k] = a[k];\n    });\n    return obj;\n}\n\nexport function sortByKey<T>(arr: T[], key: string): T[] {\n    return arr.sort((a, b) => {\n        if (a[key] < b[key]) {\n            return -1;\n        }\n        if (a[key] > b[key]) {\n            return 1;\n        }\n        return 0; // If equal\n    });\n}\n  \n  "
  },
  {
    "path": "services/workbench2/src/common/plugintypes.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Dispatch, Middleware } from 'redux';\nimport { RootStore, RootState } from 'store/store';\nimport { ResourcesState } from 'store/resources/resources';\nimport { Location } from 'history';\nimport { ServiceRepository } from \"services/services\";\n\nexport type ElementListReducer = (startingList: React.ReactElement[], itemClass?: string) => React.ReactElement[];\nexport type CategoriesListReducer = (startingList: string[]) => string[];\nexport type NavigateMatcher = (dispatch: Dispatch, getState: () => RootState, uuid: string) => boolean;\nexport type LocationChangeMatcher = (store: RootStore, pathname: string) => boolean;\nexport type EnableNew = (location: Location, currentItemId: string, currentUserUUID: string | undefined, resources: ResourcesState) => boolean;\nexport type MiddlewareListReducer = (startingList: Middleware[], services: ServiceRepository) => Middleware[];\n\n/* Workbench Plugin API\n\n   Code to your plugin should go into a subdirectory of 'plugins/'.\n\n   Your plugin should implement a \"register\" function, which will be\n   called with an object with the PluginConfig interface described\n   below.  The register function may make in-place modifications to\n   the pluginConfig object, but to preserve composability, it is\n   strongly advised this should be limited to push()ing new values\n   onto the various lists of hooks.\n\n   To enable a plugin, edit 'plugins.tsx', import the register\n   function exported by the plugin, and add a call to the register\n   function following the examples in the comments.  Then, build a new\n   Workbench package that includes the plugin.\n\n   Be aware that because plugins heavily leverage workbench, and in\n   fact must be compiled together, they are considered \"derived works\"\n   and so _must_ be license-compatible with AGPL-3.0.\n\n */\n\nexport interface PluginConfig {\n\n    /* During initialization, each\n     * function in the callback list will be called with the list of\n     * react - router \"Route\" components that will be used select what should\n     * be displayed in the central panel based on the navigation bar.\n     *\n     * The callback function may add, edit, or remove items from this list,\n     * and return a new list of components, which will be passed to the next\n     * function in `centerPanelList`.\n     *\n     * The hooks are applied in `views/workbench/workbench.tsx`.\n     *  */\n    centerPanelList: ElementListReducer[];\n\n    /* During initialization, each\n     * function in the callback list will be called with the list of strings\n     * that are the top-level categories in the left hand navigation tree.\n     *\n     * The callback function may add, edit, or remove items from this list,\n     * and return a new list of strings, which will be passed to the next\n     * function in `sidePanelCategories`.\n     *\n     * The hooks are applied in `store/side-panel-tree/side-panel-tree-actions.ts`.\n     *  */\n    sidePanelCategories: CategoriesListReducer[];\n\n    /* This is a list of additional dialog box components.\n     * Dialogs are components that are wrapped using the \"withDialog()\" method.\n     *\n     * These are added to the list in `views/workbench/workbench.tsx`.\n     *  */\n    dialogs: React.ReactElement[];\n\n    /* This is a list of additional navigation matchers.\n     * These are callbacks that are called by the navigateTo(uuid) method to\n     * set the path in the navigation bar to display the desired resource.\n     * Each handler should return \"true\" if the uuid was handled and \"false or \"undefined\" if not.\n     *\n     * These are used in `store/navigation/navigation-action.tsx`.\n     *  */\n    navigateToHandlers: NavigateMatcher[];\n\n    /* This is a list of additional location change matchers.\n     * These are callbacks called when the URL in the navigation bar changes\n     * (this could be in response to \"navigateTo()\" or due to the user\n     * entering/changing the URL directly).\n     *\n     * The Route components in centerPanelList should\n     * automatically change in response to navigation.  The\n     * purpose of these handlers is trigger additional loading,\n     * such as fetching the object contents that will be\n     * displayed.\n     *\n     * Each handler should return \"true\" if the path was handled and \"false or \"undefined\" if not.\n     *\n     * These are used in `routes/route-change-handlers.ts`.\n     */\n    locationChangeHandlers: LocationChangeMatcher[];\n\n    /* Replace the left side of the app bar.  Normally, this displays\n     * the site banner.\n     *\n     * Note: unlike most of the other hooks, this is not composable.\n     * This completely replaces that section of the app bar.  Multiple\n     * plugins setting this value will conflict.\n     *\n     * Used in 'views-components/main-app-bar/main-app-bar.tsx'\n     */\n    appBarLeft?: React.ReactElement;\n\n    /* Replace the middle part of the app bar.  Normally, this displays\n     * the search bar.\n     *\n     * Note: unlike most of the other hooks, this is not composable.\n     * This completely replaces that section of the app bar.  Multiple\n     * plugins setting this value will conflict.\n     *\n     * Used in 'views-components/main-app-bar/main-app-bar.tsx'\n     */\n    appBarMiddle?: React.ReactElement;\n\n    /* Replace the right part of the app bar.  Normally, this displays\n     * the admin menu and help menu.\n     * (Note: the user menu can be customized separately using accountMenuList)\n     *\n     * Note: unlike most of the other hooks, this is not composable.\n     * This completely replaces that section of the app bar.  Multiple\n     * plugins setting this value will conflict.\n     *\n     * Used in 'views-components/main-app-bar/main-app-bar.tsx'\n     */\n    appBarRight?: React.ReactElement;\n\n    /* During initialization, each\n     * function in the callback list will be called with the menu items that\n     * will appear in the \"user account\" menu.\n     *\n     * The callback function may add, edit, or remove items from this list,\n     * and return a new list of menu items, which will be passed to the next\n     * function in `accountMenuList`.\n     *\n     * The hooks are applied in 'views-components/main-app-bar/account-menu.tsx'.\n     *  */\n    accountMenuList: ElementListReducer[];\n\n    /* Each function in this list is called to determine if the the \"NEW\" button\n     * should be enabled or disabled.  If any function returns \"true\", the button\n     * (and corresponding drop-down menu) will be enabled.\n     *\n     * The hooks are applied in 'views-components/side-panel-button/side-panel-button.tsx'.\n     *  */\n    enableNewButtonMatchers: EnableNew[];\n\n    /* During initialization, each\n     * function in the callback list will be called with the menu items that\n     * will appear in the \"NEW\" dropdown menu.\n     *\n     * The callback function may add, edit, or remove items from this list,\n     * and return a new list of menu items, which will be passed to the next\n     * function in `newButtonMenuList`.\n     *\n     * The hooks are applied in 'views-components/side-panel-button/side-panel-button.tsx'.\n     *  */\n    newButtonMenuList: ElementListReducer[];\n\n    /* Add Middlewares to the Redux store.\n     *\n     * Middlewares intercept redux actions before they get to the reducer, and\n     * may produce side effects.  For example, the REQUEST_ITEMS action is intercepted by a middleware to\n     * trigger a load of data table contents.\n     *\n     * https://redux.js.org/tutorials/fundamentals/part-4-store#middleware\n     *\n     * Used in 'store/store.ts'\n     *  */\n    middlewares: MiddlewareListReducer[];\n}\n"
  },
  {
    "path": "services/workbench2/src/common/redirect-to.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { getInlineFileUrl } from 'views-components/context-menu/actions/helpers';\nimport { Config } from './config';\n\nexport const REDIRECT_TO_DOWNLOAD_KEY = 'redirectToDownload';\nexport const REDIRECT_TO_PREVIEW_KEY = 'redirectToPreview';\nexport const REDIRECT_TO_KEY = 'redirectTo';\n\nconst getRedirectKeyFromUrl = (href: string): string => {\n    let params = new URL(href).searchParams;\n    switch (true) {\n        case params.has(REDIRECT_TO_DOWNLOAD_KEY):\n            return REDIRECT_TO_DOWNLOAD_KEY;\n        case params.has(REDIRECT_TO_PREVIEW_KEY):\n            return REDIRECT_TO_PREVIEW_KEY;\n        case params.has(REDIRECT_TO_KEY):\n            return REDIRECT_TO_KEY;\n        default:\n            return \"\";\n    }\n}\n\nconst getRedirectKeyFromStorage = (localStorage: Storage): string => {\n    if (localStorage.getItem(REDIRECT_TO_DOWNLOAD_KEY)) {\n        return REDIRECT_TO_DOWNLOAD_KEY;\n    } else if (localStorage.getItem(REDIRECT_TO_PREVIEW_KEY)) {\n        return REDIRECT_TO_PREVIEW_KEY;\n    }\n    return \"\";\n}\n\nexport const storeRedirects = () => {\n    const { location: { href }, localStorage } = window;\n    const redirectKey = getRedirectKeyFromUrl(href);\n\n    // Change old redirectTo -> redirectToPreview when storing redirect\n    const redirectStoreKey = redirectKey === REDIRECT_TO_KEY ? REDIRECT_TO_PREVIEW_KEY : redirectKey;\n\n    if (localStorage && redirectKey && redirectStoreKey) {\n        let params = new URL(href).searchParams;\n        localStorage.setItem(redirectStoreKey, params.get(redirectKey) || \"\");\n    }\n};\n\nexport const handleRedirects = (token: string, config: Config) => {\n    const { localStorage } = window;\n    const { keepWebServiceUrl, keepWebInlineServiceUrl } = config;\n\n    if (localStorage) {\n        const redirectKey = getRedirectKeyFromStorage(localStorage);\n        const redirectPath = redirectKey ? localStorage.getItem(redirectKey) : '';\n        redirectKey && localStorage.removeItem(redirectKey);\n\n        if (redirectKey && redirectPath) {\n            let redirectUrl = new URL(keepWebServiceUrl);\n            // encodeURI will not touch characters such as # ? that may be\n            // delimiter in overall URL syntax\n            // Setting pathname attribute will in effect encode # and ?\n            // while leaving others minimally disturbed (useful for debugging\n            // and avoids excessive percent-encoding)\n            redirectUrl.pathname = encodeURI(redirectPath);\n            redirectUrl.searchParams.set(\"api_token\", token);\n            let u = redirectUrl.href;\n            if (redirectKey === REDIRECT_TO_PREVIEW_KEY) {\n                u = getInlineFileUrl(u, keepWebServiceUrl, keepWebInlineServiceUrl);\n            }\n            if (u) {\n                window.location.href = u;\n            }\n        }\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/common/regexp.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const escapeRegExp = (st: string) =>\n    st.replace(/[.*+?^${}()|[\\]\\\\]/g, '\\\\$&'); // $& means the whole matched string\n"
  },
  {
    "path": "services/workbench2/src/common/resource-to-menu-kind.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { AuthState } from 'store/auth/auth-reducer';\nimport { getResource } from 'store/resources/resources';\nimport { Resource, ResourceKind } from 'models/resource';\nimport { resourceIsFrozen } from 'common/frozen-resources';\nimport { GroupResource, GroupClass, isGroupResource, isUserGroup, isGroupMemberLink, isBuiltinGroup } from 'models/group';\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { getProcess, isProcessCancelable } from 'store/processes/process';\nimport { isCollectionResource } from 'models/collection';\nimport { ResourcesState } from 'store/resources/resources';\n\ntype ProjectToMenuArgs = {\n    isAdmin: boolean;\n    readonly: boolean;\n    isFrozen: boolean;\n    canManage: boolean;\n    canWrite: boolean;\n    isFilterGroup: boolean;\n    unfreezeRequiresAdmin: boolean;\n    isEditable: boolean;\n};\n\ntype CollectionToMenuArgs = {\n    isAdmin: boolean;\n    isEditable: boolean;\n    isOnlyWriteable: boolean;\n    isOldVersion: boolean;\n    isTrashed: boolean;\n};\n\ntype ProcessToMenuArgs = {\n    isAdmin: boolean;\n    isRunning: boolean;\n    canWriteProcess: boolean;\n};\n\ntype ProjectMenuKind = ContextMenuKind.PROJECT\n    | ContextMenuKind.PROJECT_ADMIN\n    | ContextMenuKind.FROZEN_PROJECT\n    | ContextMenuKind.FROZEN_PROJECT_ADMIN\n    | ContextMenuKind.FROZEN_MANAGEABLE_PROJECT\n    | ContextMenuKind.MANAGEABLE_PROJECT\n    | ContextMenuKind.READONLY_PROJECT\n    | ContextMenuKind.WRITEABLE_PROJECT\n    | ContextMenuKind.FILTER_GROUP\n    | ContextMenuKind.FILTER_GROUP_ADMIN;\n\ntype CollectionMenuKind = ContextMenuKind.COLLECTION\n    | ContextMenuKind.READONLY_COLLECTION\n    | ContextMenuKind.WRITEABLE_COLLECTION\n    | ContextMenuKind.OLD_VERSION_COLLECTION\n    | ContextMenuKind.TRASHED_COLLECTION\n    | ContextMenuKind.COLLECTION_ADMIN;\n\ntype ProcessMenuKind = ContextMenuKind.PROCESS_RESOURCE\n    | ContextMenuKind.PROCESS_ADMIN\n    | ContextMenuKind.RUNNING_PROCESS_RESOURCE\n    | ContextMenuKind.RUNNING_PROCESS_ADMIN\n    | ContextMenuKind.READONLY_PROCESS_RESOURCE;\n\nexport const resourceToMenuKind = (uuid: string, readonly = false) =>\n    (dispatch: Dispatch, getState: () => RootState): ContextMenuKind | undefined => {\n        const { auth, resources } = getState();\n        const resource = getResource<Resource>(uuid)(resources);\n        if (!resource) return;\n        const isAdmin = auth.user?.isAdmin || false;\n        const isFrozen = resourceIsFrozen(resource, resources);\n        const isEditable = getIsEditable(isAdmin, resource, resources, readonly, isFrozen);\n\n        if (isUserGroup(resource)) {\n            if (isBuiltinGroup(resource.uuid)) {\n                return ContextMenuKind.BUILT_IN_GROUP\n            }\n            return ContextMenuKind.GROUPS\n        }\n        if (isGroupResource(resource)) {\n            const { canManage = false, canWrite = false } = resource;\n            const unfreezeRequiresAdmin = getUnfreezeRequiresAdmin(auth);\n            const isFilterGroup = resource.groupClass === GroupClass.FILTER;\n            return getProjectMenuKind({ isAdmin, isFrozen, isEditable, canManage, canWrite, unfreezeRequiresAdmin, isFilterGroup, readonly });\n        }\n        if (isCollectionResource(resource)){\n            const collectionParent = getResource<GroupResource>(resource.ownerUuid)(resources);\n            const isOnlyWriteable = collectionParent?.canWrite === true && collectionParent.canManage === false;\n            const isOldVersion = resource.uuid !== resource.currentVersionUuid;\n            const isTrashed = resource.isTrashed || false;\n            return getCollectionMenuKind({ isAdmin, isEditable, isOldVersion, isTrashed, isOnlyWriteable });\n        }\n        switch (resource.kind) {\n            case ResourceKind.PROCESS:\n                const process = getProcess(uuid)(resources);\n                const canWriteProcess = !!(process && getResource<GroupResource>(process.containerRequest.ownerUuid)(resources)?.canWrite);\n                const isRunning = process ? isProcessCancelable(process) : false;\n                return getProcessMenuKind({ isAdmin, isRunning, canWriteProcess });\n            case ResourceKind.USER:\n                return ContextMenuKind.USER_DETAILS;\n            case ResourceKind.LINK:\n                if (isGroupMemberLink(resource)) return ContextMenuKind.GROUP_MEMBER;\n                return ContextMenuKind.LINK;\n            case ResourceKind.WORKFLOW:\n                return isEditable ? ContextMenuKind.WORKFLOW : ContextMenuKind.READONLY_WORKFLOW;\n            case ResourceKind.EXTERNAL_CREDENTIAL:\n                return ContextMenuKind.EXTERNAL_CREDENTIAL;\n            default:\n                return;\n        }\n    };\n\nconst getProjectMenuKind = ({ isAdmin, readonly, isFrozen, canManage, canWrite, unfreezeRequiresAdmin, isEditable, isFilterGroup }: ProjectToMenuArgs): ProjectMenuKind => {\n    if (isFrozen) {\n        if (isAdmin) {\n            return ContextMenuKind.FROZEN_PROJECT_ADMIN;\n        }\n        if (canManage) {\n            if (unfreezeRequiresAdmin) return ContextMenuKind.MANAGEABLE_PROJECT;\n            return ContextMenuKind.FROZEN_MANAGEABLE_PROJECT;\n        }\n        if (isEditable) {\n            return ContextMenuKind.FROZEN_PROJECT;\n        }\n        return ContextMenuKind.READONLY_PROJECT;\n    }\n\n    if (isAdmin && !readonly) {\n        if (isFilterGroup) return ContextMenuKind.FILTER_GROUP_ADMIN;\n        return ContextMenuKind.PROJECT_ADMIN;\n    }\n\n    if (canManage === false && canWrite === true) {\n        return ContextMenuKind.WRITEABLE_PROJECT;\n    }\n\n    if (!isEditable) {\n        return ContextMenuKind.READONLY_PROJECT;\n    }\n\n    if (isFilterGroup) return ContextMenuKind.FILTER_GROUP;\n\n    return ContextMenuKind.PROJECT;\n};\n\nconst getCollectionMenuKind = ({ isAdmin, isEditable, isOnlyWriteable, isOldVersion, isTrashed }: CollectionToMenuArgs): CollectionMenuKind => {\n    if (isOldVersion) {\n        return ContextMenuKind.OLD_VERSION_COLLECTION;\n    }\n\n    if (isTrashed && isEditable) {\n        return ContextMenuKind.TRASHED_COLLECTION;\n    }\n\n    if (isAdmin && isEditable) {\n        return ContextMenuKind.COLLECTION_ADMIN;\n    }\n\n    if (!isEditable) {\n        return ContextMenuKind.READONLY_COLLECTION;\n    }\n\n    return isOnlyWriteable ? ContextMenuKind.WRITEABLE_COLLECTION : ContextMenuKind.COLLECTION;\n};\n\nconst getProcessMenuKind = ({ isAdmin, isRunning, canWriteProcess }: ProcessToMenuArgs): ProcessMenuKind => {\n    if (isAdmin) {\n        return isRunning ? ContextMenuKind.RUNNING_PROCESS_ADMIN : ContextMenuKind.PROCESS_ADMIN;\n    }\n\n    if (isRunning) {\n        return ContextMenuKind.RUNNING_PROCESS_RESOURCE;\n    }\n\n    return canWriteProcess ? ContextMenuKind.PROCESS_RESOURCE : ContextMenuKind.READONLY_PROCESS_RESOURCE;\n};\n\n//Utils--------------------------------------------------------------\nconst getUnfreezeRequiresAdmin = (auth: AuthState) => {\n    const { remoteHostsConfig } = auth;\n    if (!remoteHostsConfig) return false;\n    return Object.keys(remoteHostsConfig).some((k) => remoteHostsConfig[k].clusterConfig.API.UnfreezeProjectRequiresAdmin);\n};\n\nconst getIsEditable = (isAdmin: boolean, resource: Resource, resources: ResourcesState, readonly: boolean, isFrozen: boolean) => {\n    const isEditable = (resources[resource.ownerUuid] as GroupResource)?.canWrite || (isGroupResource(resource) && resource.canWrite);\n    return (isAdmin || isEditable) && !readonly && !isFrozen;\n};\n"
  },
  {
    "path": "services/workbench2/src/common/resource-to-menukind.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { resourceToMenuKind } from 'common/resource-to-menu-kind';\nimport { ResourceKind } from 'models/resource';\nimport configureStore from 'redux-mock-store';\nimport thunk from 'redux-thunk';\nimport { PROJECT_PANEL_CURRENT_UUID } from \"store/project-panel/project-panel\";\nimport { GroupClass } from 'models/group';\nimport { LinkClass } from 'models/link';\n\ndescribe('context-menu-actions', () => {\n    describe('resourceToMenuKind', () => {\n        const middlewares = [thunk];\n        const mockStore = configureStore(middlewares);\n        const userUuid = 'zzzzz-tpzed-bbbbbbbbbbbbbbb';\n        const otherUserUuid = 'zzzzz-tpzed-bbbbbbbbbbbbbbc';\n        const headCollectionUuid = 'zzzzz-4zz18-aaaaaaaaaaaaaaa';\n        const oldCollectionUuid = 'zzzzz-4zz18-aaaaaaaaaaaaaab';\n        const projectUuid = 'zzzzz-j7d0g-ccccccccccccccc';\n        const filterGroupUuid = 'zzzzz-j7d0g-ccccccccccccccd';\n        const linkUuid = 'zzzzz-o0j2j-0123456789abcde';\n        const groupMemberLinkUuid = 'zzzzz-o0j2j-groupmemberlink';\n        const containerRequestUuid = 'zzzzz-xvhdp-0123456789abcde';\n\n        it('should return the correct menu kind', () => {\n            const cases = [\n                // resourceUuid, isAdminUser, isEditable, isTrashed, forceReadonly, expected\n                [headCollectionUuid, false, true, true, false, ContextMenuKind.TRASHED_COLLECTION],\n                [headCollectionUuid, false, true, false, false, ContextMenuKind.COLLECTION],\n                [headCollectionUuid, false, true, false, true, ContextMenuKind.READONLY_COLLECTION],\n                [headCollectionUuid, false, false, true, false, ContextMenuKind.READONLY_COLLECTION],\n                [headCollectionUuid, false, false, false, false, ContextMenuKind.READONLY_COLLECTION],\n                [headCollectionUuid, true, true, true, false, ContextMenuKind.TRASHED_COLLECTION],\n                [headCollectionUuid, true, true, false, false, ContextMenuKind.COLLECTION_ADMIN],\n                [headCollectionUuid, true, false, true, false, ContextMenuKind.TRASHED_COLLECTION],\n                [headCollectionUuid, true, false, false, false, ContextMenuKind.COLLECTION_ADMIN],\n                [headCollectionUuid, true, false, false, true, ContextMenuKind.READONLY_COLLECTION],\n\n                [oldCollectionUuid, false, true, true, false, ContextMenuKind.OLD_VERSION_COLLECTION],\n                [oldCollectionUuid, false, true, false, false, ContextMenuKind.OLD_VERSION_COLLECTION],\n                [oldCollectionUuid, false, false, true, false, ContextMenuKind.OLD_VERSION_COLLECTION],\n                [oldCollectionUuid, false, false, false, false, ContextMenuKind.OLD_VERSION_COLLECTION],\n                [oldCollectionUuid, true, true, true, false, ContextMenuKind.OLD_VERSION_COLLECTION],\n                [oldCollectionUuid, true, true, false, false, ContextMenuKind.OLD_VERSION_COLLECTION],\n                [oldCollectionUuid, true, false, true, false, ContextMenuKind.OLD_VERSION_COLLECTION],\n                [oldCollectionUuid, true, false, false, false, ContextMenuKind.OLD_VERSION_COLLECTION],\n\n                // FIXME: WB2 doesn't currently have context menu for trashed projects\n                // [projectUuid, false, true, true, false, ContextMenuKind.TRASHED_PROJECT],\n                [projectUuid, false, true, false, false, ContextMenuKind.WRITEABLE_PROJECT],\n                [projectUuid, false, true, false, true, ContextMenuKind.WRITEABLE_PROJECT],\n                [projectUuid, false, false, true, false, ContextMenuKind.READONLY_PROJECT],\n                [projectUuid, false, false, false, false, ContextMenuKind.READONLY_PROJECT],\n                // [projectUuid, true, true, true, false, ContextMenuKind.TRASHED_PROJECT],\n                [projectUuid, true, true, false, false, ContextMenuKind.PROJECT_ADMIN],\n                // [projectUuid, true, false, true, false, ContextMenuKind.TRASHED_PROJECT],\n                [projectUuid, true, false, false, false, ContextMenuKind.PROJECT_ADMIN],\n                [projectUuid, true, false, false, true, ContextMenuKind.READONLY_PROJECT],\n\n                [linkUuid, false, true, true, false, ContextMenuKind.LINK],\n                [linkUuid, false, true, false, false, ContextMenuKind.LINK],\n                [linkUuid, false, false, true, false, ContextMenuKind.LINK],\n                [linkUuid, false, false, false, false, ContextMenuKind.LINK],\n                [linkUuid, true, true, true, false, ContextMenuKind.LINK],\n                [linkUuid, true, true, false, false, ContextMenuKind.LINK],\n                [linkUuid, true, false, true, false, ContextMenuKind.LINK],\n                [linkUuid, true, false, false, false, ContextMenuKind.LINK],\n                [groupMemberLinkUuid, false, true, true, false, ContextMenuKind.GROUP_MEMBER],\n\n                [userUuid, false, true, true, false, ContextMenuKind.USER_DETAILS],\n                [userUuid, false, true, false, false, ContextMenuKind.USER_DETAILS],\n                [userUuid, false, false, true, false, ContextMenuKind.USER_DETAILS],\n                [userUuid, false, false, false, false, ContextMenuKind.USER_DETAILS],\n                [userUuid, true, true, true, false, ContextMenuKind.USER_DETAILS],\n                [userUuid, true, true, false, false, ContextMenuKind.USER_DETAILS],\n                [userUuid, true, false, true, false, ContextMenuKind.USER_DETAILS],\n                [userUuid, true, false, false, false, ContextMenuKind.USER_DETAILS],\n\n                [containerRequestUuid, false, true, true, false, ContextMenuKind.PROCESS_RESOURCE],\n                [containerRequestUuid, false, true, false, false, ContextMenuKind.PROCESS_RESOURCE],\n                [containerRequestUuid, false, false, true, false, ContextMenuKind.READONLY_PROCESS_RESOURCE],\n                [containerRequestUuid, false, false, false, false, ContextMenuKind.READONLY_PROCESS_RESOURCE],\n                [containerRequestUuid, false, false, false, true, ContextMenuKind.READONLY_PROCESS_RESOURCE],\n                [containerRequestUuid, true, true, true, false, ContextMenuKind.PROCESS_ADMIN],\n                [containerRequestUuid, true, true, false, false, ContextMenuKind.PROCESS_ADMIN],\n                [containerRequestUuid, true, false, true, false, ContextMenuKind.PROCESS_ADMIN],\n                [containerRequestUuid, true, false, false, false, ContextMenuKind.PROCESS_ADMIN],\n                [containerRequestUuid, true, false, false, true, ContextMenuKind.PROCESS_ADMIN],\n            ]\n\n            cases.forEach(([resourceUuid, isAdminUser, isEditable, isTrashed, forceReadonly, expected]) => {\n                const initialState = {\n                    properties: {\n                        [PROJECT_PANEL_CURRENT_UUID]: projectUuid,\n                    },\n                    resources: {\n                        [headCollectionUuid]: {\n                            uuid: headCollectionUuid,\n                            ownerUuid: projectUuid,\n                            currentVersionUuid: headCollectionUuid,\n                            isTrashed: isTrashed,\n                            kind: ResourceKind.COLLECTION,\n                        },\n                        [oldCollectionUuid]: {\n                            uuid: oldCollectionUuid,\n                            currentVersionUuid: headCollectionUuid,\n                            isTrashed: isTrashed,\n                            kind: ResourceKind.COLLECTION,\n                        },\n                        [projectUuid]: {\n                            uuid: projectUuid,\n                            ownerUuid: isEditable ? userUuid : otherUserUuid,\n                            canWrite: isEditable,\n                            groupClass: GroupClass.PROJECT,\n                            kind: ResourceKind.PROJECT,\n                        },\n                        [filterGroupUuid]: {\n                            uuid: filterGroupUuid,\n                            ownerUuid: isEditable ? userUuid : otherUserUuid,\n                            canWrite: isEditable,\n                            groupClass: GroupClass.FILTER,\n                            kind: ResourceKind.PROJECT,\n                        },\n                        [linkUuid]: {\n                            uuid: linkUuid,\n                            kind: ResourceKind.LINK,\n                        },\n                        [groupMemberLinkUuid]: {\n                            uuid: groupMemberLinkUuid,\n                            kind: ResourceKind.LINK,\n                            linkClass: LinkClass.PERMISSION,\n                            headKind: ResourceKind.GROUP,\n                        },\n                        [userUuid]: {\n                            uuid: userUuid,\n                            kind: ResourceKind.USER,\n                        },\n                        [containerRequestUuid]: {\n                            uuid: containerRequestUuid,\n                            ownerUuid: projectUuid,\n                            kind: ResourceKind.CONTAINER_REQUEST,\n                        },\n                    },\n                    auth: {\n                        user: {\n                            uuid: userUuid,\n                            isAdmin: isAdminUser,\n                        },\n                    },\n                };\n                const store = mockStore(initialState);\n\n                let menuKind;\n                try {\n                    menuKind = store.dispatch(resourceToMenuKind(resourceUuid, forceReadonly))\n                    expect(menuKind).to.equal(expected);\n                } catch (err) {\n                    console.error('Failed Assertion: ', err.message);\n                    throw new Error(`menuKind for resource ${JSON.stringify(initialState.resources[resourceUuid])} forceReadonly: ${forceReadonly} expected to be ${expected} but got ${menuKind}.`);\n                }\n            });\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/common/service-provider.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nclass ServicesProvider {\n\n    private static instance: ServicesProvider;\n\n    private store;\n    private services;\n\n    private constructor() {}\n\n    public static getInstance(): ServicesProvider {\n        if (!ServicesProvider.instance) {\n            ServicesProvider.instance = new ServicesProvider();\n        }\n\n        return ServicesProvider.instance;\n    }\n\n    public setServices(newServices): void {\n        if (!this.services) {\n            this.services = newServices;\n        }\n    }\n\n    public getServices() {\n        if (!this.services) {\n            throw \"Please check if services have been set in the index.ts before the app is initiated\"; // eslint-disable-line no-throw-literal\n        }\n        return this.services;\n    }\n\n    public setStore(newStore): void {\n        if (!this.store) {\n            this.store = newStore;\n        }\n    }\n\n    public getStore() {\n        if (!this.store) {\n            throw \"Please check if store has been set in the index.ts before the app is initiated\"; // eslint-disable-line no-throw-literal\n        }\n\n        return this.store;\n    }\n}\n\nexport default ServicesProvider.getInstance();\n"
  },
  {
    "path": "services/workbench2/src/common/unionize.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize as originalUnionize, SingleValueRec } from 'unionize';\n\nexport * from 'unionize';\n\ntype TagRecord<Record> = { [T in keyof Record]: T };\n\nexport function unionize<Record extends SingleValueRec>(record: Record) {\n    const tags = {} as TagRecord<Record>;\n    for (const tag in record) {\n        tags[tag] = tag;\n    }\n    return {...originalUnionize(record, {\n        tag: 'type',\n        value: 'payload'\n    }), tags};\n}\n"
  },
  {
    "path": "services/workbench2/src/common/url.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { customDecodeURI, customEncodeURI, injectTokenParam } from './url';\n\ndescribe('url', () => {\n    describe('customDecodeURI', () => {\n        it('should decode encoded URI', () => {\n            // given\n            const path = 'test%23test%2Ftest';\n            const expectedResult = 'test#test%2Ftest';\n\n            // when\n            const result = customDecodeURI(path);\n\n            // then\n            expect(result).to.equal(expectedResult);\n        });\n\n        it('ignores non parsable URI and return its original form', () => {\n            // given\n            const path = 'test/path/with%wrong/sign';\n\n            // when\n            const result = customDecodeURI(path);\n\n            // then\n            expect(result).to.equal(path);\n        });\n    });\n\n    describe('customEncodeURI', () => {\n        it('should encode URI', () => {\n            // given\n            const path = 'test#test/test';\n            const expectedResult = 'test%23test/test';\n\n            // when\n            const result = customEncodeURI(path);\n\n            // then\n            expect(result).to.equal(expectedResult);\n        });\n\n        it('ignores non encodable URI and return its original form', () => {\n            // given\n            const path = 22;\n\n            // when\n            const result = customEncodeURI(path);\n\n            // then\n            expect(result).to.equal(path);\n        });\n    });\n});\n\ndescribe('injectTokenParam', () => {\n    const apiToken = \"v2/xxxxx-gj3su-000000000000000/00000000000000000000000000000000000000000000000000\";\n\n    it('injects tokens into valid URLs', () => {\n        const testCases = [{\n            // Test normal case\n            url: \"http://example.com/\",\n            token: apiToken,\n            result: `http://example.com/?arvados_api_token=${apiToken}`,\n        },{\n            // Test no trailing slash - URL constructor will add trailing slash\n            url: \"https://example.com\",\n            token: \"foobar\",\n            result: \"https://example.com/?arvados_api_token=foobar\",\n        },{\n            // Test with basic auth\n            url: \"https://user:pass@example.com/\",\n            token: \"baz\",\n            result: \"https://user:pass@example.com/?arvados_api_token=baz\",\n        },{\n            // Test with existing params\n            url: \"https://example.com/?foo=bar\",\n            token: \"foo123\",\n            result: \"https://example.com/?arvados_api_token=foo123&foo=bar\",\n        },{\n            // Test with existing params and no slash - URL constructor will add slash\n            url: \"https://example.com?foo=bar\",\n            token: \"foo123\",\n            result: \"https://example.com/?arvados_api_token=foo123&foo=bar\",\n        },{\n            // Test with no params but with question mark\n            url: \"http://example.com/?\",\n            token: \"foobar\",\n            result: \"http://example.com/?arvados_api_token=foobar\",\n        }];\n\n        return Promise.all(testCases.map(async testCase => {\n            const result = await injectTokenParam(testCase.url, testCase.token);\n            expect(result).to.equal(testCase.result);\n        }));\n    });\n\n    it('raises exceptions for invalid situations', () => {\n        const invalidCases = [{\n            url: \"http://example.com\",\n            token: \"\",\n            msg: \"User token required\",\n        },{\n            url: \"\",\n            token: \"foo\",\n            msg: \"URL cannot be empty\",\n        }];\n\n        return Promise.all(invalidCases.map(testCase => {\n            const promise = injectTokenParam(testCase.url, testCase.token);\n\n            return promise.then(() => {\n                    throw new Error('Expected injectTokenParam() to return error but it did not. '\n                        + `Expected error: \"${testCase.msg}\" given url \"${testCase.url}\" and token \"${testCase.token}\"`);\n                }, (err) => {\n                    // Verify the promise rejection reason\n                    expect(err).to.equal(testCase.msg);\n                }\n            );\n        }));\n\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/common/url.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport function getUrlParameter(search: string, name: string) {\n    const safeName = name.replace(/[[]/, '\\\\[').replace(/[\\]]/, '\\\\]');\n    const regex = new RegExp('[\\\\?&]' + safeName + '=([^&#]*)');\n    const results = regex.exec(search);\n    return results === null ? '' : decodeURIComponent(results[1].replace(/\\+/g, ' '));\n}\n\nexport function normalizeURLPath(url: string) {\n    const u = new URL(url);\n    u.pathname = u.pathname.replace(/\\/\\//, '/');\n    if (u.pathname[u.pathname.length - 1] === '/') {\n        u.pathname = u.pathname.substring(0, u.pathname.length - 1);\n    }\n    return u.toString();\n}\n\nexport const customEncodeURI = (path: string) => {\n    try {\n        return path.split('/').map(encodeURIComponent).join('/');\n    } catch(e) {}\n\n    return path;\n};\n\nexport const customDecodeURI = (path: string) => {\n    try {\n        return path.split('%2F').map(decodeURIComponent).join('%2F');\n    } catch(e) {}\n\n    return path;\n};\n\nexport const injectTokenParam = (url: string, token: string): Promise<string> => {\n    if (url.length) {\n        if (token.length) {\n            const originalUrl = new URL(url);\n\n            // Remove leading ? for easier manipulation\n            const search = originalUrl.search.replace(/^\\?/, '');\n\n            // Everything after ?\n            const params = `${search}${originalUrl.hash}`;\n\n            // Since search and hash seems to not normalize anything,\n            // we should expect href to always end exactly with both.\n            // This sanity check should always pass\n            if (originalUrl.href.endsWith(params)) {\n                // It seems easier to lop off search/params and inject token\n                // instead of handling user:pass schemes\n                const baseUrl = originalUrl.href\n                    // Trim the params from the URL\n                    .substring(0, originalUrl.href.length - params.length)\n                    // Remove trailing ?\n                    .replace(/\\?$/, '');\n\n                // Prepend arvados token to search and construct search string\n                const searchWithToken = [`arvados_api_token=${token}`, search]\n                    // Remove empty elements from array to prevent extra &s with empty search\n                    .filter(e => String(e).trim())\n                    .join('&');\n\n                return Promise.resolve(`${baseUrl}?${searchWithToken}${originalUrl.hash}`);\n            } else {\n                // Original url does not end with search+hash, cannot add token\n                console.error(\"Failed to add token to malformed URL: \" + url);\n                return Promise.reject(\"Malformed URL\");\n            }\n        } else {\n            return Promise.reject(\"User token required\");\n        }\n    } else {\n        return Promise.reject(\"URL cannot be empty\");\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/common/use-async-interval.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useState, useEffect } from 'react';\nimport { useAsyncInterval } from './use-async-interval';\n\nconst TestComponent = ({callback}) => {\n  useAsyncInterval(callback, 1000);\n  return <div>test</div>;\n};\n\ndescribe('useAsyncInterval', () => {\n  it('should fire repeatedly after the interval', () => {\n    cy.clock();\n    const syncCallback = cy.spy().as('syncCallback');\n    cy.mount(<TestComponent callback={syncCallback} />);\n\n    cy.get('@syncCallback').should('not.have.been.called');\n\n    cy.tick(1000);\n    cy.wait(0);\n    \n    cy.get('@syncCallback').should('have.been.calledOnce');\n    \n    cy.tick(1000);\n    cy.wait(0);\n\n    cy.get('@syncCallback').should('have.been.calledTwice');\n\n    cy.tick(1000);\n    cy.wait(0);\n\n    cy.get('@syncCallback').should('have.been.calledThrice');\n    cy.clock().invoke('restore');\n  });\n\n    it('should wait for async callbacks to complete in between polling', async () => {\n        cy.clock();\n\n        const delayedCallback = cy.stub().callsFake(() => {\n            return new Promise((resolve) => {\n              setTimeout(() => {\n                resolve('done');\n              }, 2000);\n            });\n          }).as('delayedCallback');\n\n        cy.mount(<TestComponent\n            callback={delayedCallback}\n        />);\n\n        // cb queued with setInterval but not called\n        cy.get('@delayedCallback').should('not.have.been.called');\n\n        // Wait 2 seconds for first tick\n        cy.tick(2000);\n        cy.wait(0);\n\n        // First cb called after 2 seconds\n        cy.get('@delayedCallback').should('have.been.calledOnce');\n\n        // Wait for cb to resolve for 2 seconds\n        cy.tick(2000);\n        cy.wait(0);\n        cy.get('@delayedCallback').should('have.been.calledOnce');\n\n        // Wait 2 seconds for second tick\n        cy.tick(2000);\n        cy.wait(0);\n        cy.get('@delayedCallback').should('have.been.calledTwice');\n\n        // Wait for cb to resolve for 2 seconds\n        cy.tick(2000);\n        cy.wait(0);\n        cy.get('@delayedCallback').should('have.been.calledTwice');\n\n        // Wait 2 seconds for third tick\n        cy.tick(2000);\n        cy.wait(0);\n        cy.get('@delayedCallback').should('have.been.calledThrice');\n\n        // Wait for cb to resolve for 2 seconds\n        cy.tick(2000);\n        cy.wait(0);\n        cy.get('@delayedCallback').should('have.been.calledThrice');\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/common/use-async-interval.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\n\nexport const useAsyncInterval = function (callback, delay) {\n    const ref = React.useRef<{cb: () => Promise<any>, active: boolean}>({\n        cb: async () => {},\n        active: false}\n    );\n\n    // Remember the latest callback.\n    React.useEffect(() => {\n        ref.current.cb = callback;\n    }, [callback]);\n    // Set up the interval.\n    React.useEffect(() => {\n        function tick() {\n            if (ref.current.active) {\n                // Wrap execution chain with promise so that execution errors or\n                //   non-async callbacks still fall through to .finally, avoids breaking polling\n                new Promise((resolve) => {\n                    return resolve(ref.current.cb());\n                }).then(() => {\n                    // Promise succeeded\n                    // Possibly implement back-off reset\n                }).catch(() => {\n                    // Promise rejected\n                    // Possibly implement back-off in the future\n                }).finally(() => {\n                    setTimeout(tick, delay);\n                });\n            }\n        }\n        if (delay !== null) {\n            ref.current.active = true;\n            setTimeout(tick, 0); // want the first callback to happen immediately.\n        }\n        // Suppress warning about cleanup function - can be ignored when variables are unrelated to dom elements\n        //   https://github.com/facebook/react/issues/15841#issuecomment-500133759\n        // eslint-disable-next-line\n        return () => {ref.current.active = false;};\n    }, [delay]);\n};\n"
  },
  {
    "path": "services/workbench2/src/common/usePrevious.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { useEffect, useRef } from \"react\";\n\nexport function usePrevious<T>(value: T): T | undefined {\n  const ref = useRef<T>();\n\n  useEffect(() => {\n    ref.current = value;\n  }, [value]);\n\n  return ref.current;\n}\n"
  },
  {
    "path": "services/workbench2/src/common/useStateWithValidation.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { useEffect, useState } from \"react\";\nimport { getFieldErrors, Validator } from \"validators/validators\";\n\n/**\n * A custom hook that manages state for a value with automatic field validation.\n *\n * @param initialValue - The initial value for the state\n * @param validators - Array of validator functions to apply to the value\n * @param fieldName - Optional field name to prepend to error messages for better debugging\n * @returns A tuple containing:\n *   - The current value\n *   - A setter function to update the value\n *   - An array of validation error messages\n *\n * @example\n * ```tsx\n * const [name, setName, nameErrors] = useStateWithValidation('', REQUIRED_LENGTH255_VALIDATION, 'Name');\n * ```\n */\nexport const useStateWithValidation = <T>(initialValue: T, validators: Validator[], fieldName?: string) => {\n    const [thisValue, setThisValue] = useState<T>(initialValue);\n    const [errors, setErrors] = useState<string[]>(() => getFieldErrors(thisValue, validators, fieldName));\n\n    useEffect(() => {\n        const errs = getFieldErrors(thisValue, validators, fieldName);\n        setErrors(errs);\n    }, [thisValue]);\n\n    return [thisValue, setThisValue, errors] as const;\n}"
  },
  {
    "path": "services/workbench2/src/common/webdav.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { WebDAV } from \"./webdav\";\n\ndescribe('WebDAV', () => {\n    it('makes use of provided config', async () => {\n        const { open, load, setRequestHeader, createRequest } = mockCreateRequest();\n        const webdav = new WebDAV({ baseURL: 'http://foo.com/', headers: { Authorization: 'Basic' } }, createRequest);\n        const promise = webdav.propfind('foo');\n        load();\n        const request = await promise;\n        cy.get('@open').should('have.been.calledWith', 'PROPFIND', 'http://foo.com/foo');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Authorization', 'Basic');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Cache-Control', 'no-cache');\n        expect(request).to.be.instanceOf(XMLHttpRequest);\n    });\n\n    it('allows to modify defaults after instantiation', async () => {\n        const { open, load, setRequestHeader, createRequest } = mockCreateRequest();\n        const webdav = new WebDAV({ baseURL: 'http://foo.com/' }, createRequest);\n        webdav.setAuthorization('Basic');\n        const promise = webdav.propfind('foo');\n        load();\n        const request = await promise;\n        cy.get('@open').should('have.been.calledWith', 'PROPFIND', 'http://foo.com/foo');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Authorization', 'Basic');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Cache-Control', 'no-cache');\n        expect(request).to.be.instanceOf(XMLHttpRequest);\n    });\n\n    it('PROPFIND', async () => {\n        const { open, load, setRequestHeader, createRequest } = mockCreateRequest();\n        const webdav = new WebDAV(undefined, createRequest);\n        const promise = webdav.propfind('foo');\n        load();\n        const request = await promise;\n        cy.get('@open').should('have.been.calledWith', 'PROPFIND', 'foo');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Cache-Control', 'no-cache');\n        expect(request).to.be.instanceOf(XMLHttpRequest);\n    });\n\n    it('PUT', async () => {\n        const { open, send, load, progress, setRequestHeader, createRequest } = mockCreateRequest();\n        const webdav = new WebDAV(undefined, createRequest);\n        const promise = webdav.put('foo', 'Test data');\n        progress();\n        load();\n        const request = await promise;\n        cy.get('@open').should('have.been.calledWith', 'PUT', 'foo');\n        cy.get('@send').should('have.been.calledWith', 'Test data');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Cache-Control', 'no-cache');\n        expect(request).to.be.instanceOf(XMLHttpRequest);\n    });\n\n    it('COPY', async () => {\n        const { open, setRequestHeader, load, createRequest } = mockCreateRequest();\n        const webdav = new WebDAV({ baseURL: 'http://base' }, createRequest);\n        const promise = webdav.copy('foo', 'foo-copy');\n        load();\n        const request = await promise;\n        cy.get('@open').should('have.been.calledWith', 'COPY', 'http://base/foo');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Destination', 'http://base/foo-copy');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Cache-Control', 'no-cache');\n        expect(request).to.be.instanceOf(XMLHttpRequest);\n    });\n\n    it('COPY - adds baseURL with trailing slash to Destination header', async () => {\n        const { open, setRequestHeader, load, createRequest } = mockCreateRequest();\n        const webdav = new WebDAV({ baseURL: 'http://base' }, createRequest);\n        const promise = webdav.copy('foo', 'foo-copy');\n        load();\n        const request = await promise;\n        cy.get('@open').should('have.been.calledWith', 'COPY', 'http://base/foo');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Destination', 'http://base/foo-copy');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Cache-Control', 'no-cache');\n        expect(request).to.be.instanceOf(XMLHttpRequest);\n    });\n\n    it('COPY - adds baseURL without trailing slash to Destination header', async () => {\n        const { open, setRequestHeader, load, createRequest } = mockCreateRequest();\n        const webdav = new WebDAV({ baseURL: 'http://base' }, createRequest);\n        const promise = webdav.copy('foo', 'foo-copy');\n        load();\n        const request = await promise;\n        cy.get('@open').should('have.been.calledWith', 'COPY', 'http://base/foo');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Destination', 'http://base/foo-copy');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Cache-Control', 'no-cache');\n        expect(request).to.be.instanceOf(XMLHttpRequest);\n    });\n\n    it('MOVE', async () => {\n        const { open, setRequestHeader, load, createRequest } = mockCreateRequest();\n        const webdav = new WebDAV({ baseURL: 'http://base' }, createRequest);\n        const promise = webdav.move('foo', 'foo-moved');\n        load();\n        const request = await promise;\n        cy.get('@open').should('have.been.calledWith', 'MOVE', 'http://base/foo');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Destination', 'http://base/foo-moved');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Cache-Control', 'no-cache');\n        expect(request).to.be.instanceOf(XMLHttpRequest);\n    });\n\n    it('MOVE - adds baseURL with trailing slash to Destination header', async () => {\n        const { open, setRequestHeader, load, createRequest } = mockCreateRequest();\n        const webdav = new WebDAV({ baseURL: 'http://base' }, createRequest);\n        const promise = webdav.move('foo', 'foo-moved');\n        load();\n        const request = await promise;\n        cy.get('@open').should('have.been.calledWith', 'MOVE', 'http://base/foo');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Destination', 'http://base/foo-moved');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Cache-Control', 'no-cache');\n        expect(request).to.be.instanceOf(XMLHttpRequest);\n    });\n\n    it('MOVE - adds baseURL without trailing slash to Destination header', async () => {\n        const { open, setRequestHeader, load, createRequest } = mockCreateRequest();\n        const webdav = new WebDAV({ baseURL: 'http://base' }, createRequest);\n        const promise = webdav.move('foo', 'foo-moved');\n        load();\n        const request = await promise;\n        cy.get('@open').should('have.been.calledWith', 'MOVE', 'http://base/foo');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Destination', 'http://base/foo-moved');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Cache-Control', 'no-cache');\n        expect(request).to.be.instanceOf(XMLHttpRequest);\n    });\n\n    it('DELETE', async () => {\n        const { open, load, setRequestHeader, createRequest } = mockCreateRequest();\n        const webdav = new WebDAV(undefined, createRequest);\n        const promise = webdav.delete('foo');\n        load();\n        const request = await promise;\n        cy.get('@open').should('have.been.calledWith', 'DELETE', 'foo');\n        cy.get('@setRequestHeader').should('have.been.calledWith', 'Cache-Control', 'no-cache');\n        expect(request).to.be.instanceOf(XMLHttpRequest);\n    });\n});\n\nconst mockCreateRequest = () => {\n    const send = cy.stub().as('send');\n    const open = cy.stub().as('open');\n    const setRequestHeader = cy.stub().as('setRequestHeader');\n    const request = new XMLHttpRequest();\n    request.send = send;\n    request.open = open;\n    request.setRequestHeader = setRequestHeader;\n    const load = () => request.dispatchEvent(new Event('load'));\n    const progress = () => request.dispatchEvent(new Event('progress'));\n    return {\n        send,\n        open,\n        load,\n        progress,\n        setRequestHeader,\n        createRequest: () => request\n    };\n};\n"
  },
  {
    "path": "services/workbench2/src/common/webdav.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { customEncodeURI } from \"./url\";\n\nexport class WebDAV {\n\n    private defaults: WebDAVDefaults = {\n        baseURL: '',\n        headers: {\n            'Cache-Control': 'no-cache'\n        },\n    };\n\n    constructor(config?: Partial<WebDAVDefaults>, private createRequest = () => new XMLHttpRequest()) {\n        if (config) {\n            this.defaults = {\n                ...this.defaults,\n                ...config,\n                headers: {\n                    ...this.defaults.headers,\n                    ...config.headers\n                },\n            };\n        }\n    }\n\n    getBaseUrl = (): string => this.defaults.baseURL;\n    setAuthorization = (token?) => this.defaults.headers.Authorization = token;\n\n    propfind = (url: string, config: WebDAVRequestConfig = {}) =>\n        this.request({\n            ...config, url,\n            method: 'PROPFIND'\n        })\n\n    put = (url: string, data?: any, config: WebDAVRequestConfig = {}) =>\n        this.request({\n            ...config, url,\n            method: 'PUT',\n            data\n        })\n\n    get = (url: string, config: WebDAVRequestConfig = {}) =>\n        this.request({\n            ...config, url,\n            method: 'GET'\n        })\n\n    upload = (url: string, files: File[], config: WebDAVRequestConfig = {}) => {\n        return Promise.all(\n            files.map(file => this.request({\n                ...config, url,\n                method: 'PUT',\n                data: file\n            }))\n        );\n    }\n\n    copy = (url: string, destination: string, config: WebDAVRequestConfig = {}) =>\n        this.request({\n            ...config, url,\n            method: 'COPY',\n            headers: {\n                ...config.headers,\n                Destination: this.defaults.baseURL\n                    ? this.defaults.baseURL.replace(/\\/+$/, '') + '/' + destination.replace(/^\\/+/, '')\n                    : destination\n            }\n        })\n\n    move = (url: string, destination: string, config: WebDAVRequestConfig = {}) =>\n        this.request({\n            ...config, url,\n            method: 'MOVE',\n            headers: {\n                ...config.headers,\n                Destination: this.defaults.baseURL\n                    ? this.defaults.baseURL.replace(/\\/+$/, '') + '/' + destination.replace(/^\\/+/, '')\n                    : destination\n            }\n        })\n\n    delete = (url: string, config: WebDAVRequestConfig = {}) =>\n        this.request({\n            ...config, url,\n            method: 'DELETE'\n        })\n\n    private request = (config: RequestConfig) => {\n        return new Promise<XMLHttpRequest>((resolve, reject) => {\n            const r = this.createRequest();\n            this.defaults.baseURL = this.defaults.baseURL.replace(/\\/+$/, '');\n            r.open(config.method,\n                `${this.defaults.baseURL\n                    ? this.defaults.baseURL + '/'\n                    : ''}${customEncodeURI(config.url)}`);\n\n            const headers = { ...this.defaults.headers, ...config.headers };\n            Object\n                .keys(headers)\n                .forEach(key => r.setRequestHeader(key, headers[key]));\n\n            if (!(window as any).cancelTokens) {\n                Object.assign(window, { cancelTokens: {} });\n            }\n\n            (window as any).cancelTokens[config.url] = () => {\n                resolve(r);\n                r.abort();\n            }\n\n            if (config.onUploadProgress) {\n                r.upload.addEventListener('progress', config.onUploadProgress);\n            }\n\n            // This event gets triggered on *any* server response\n            r.addEventListener('load', () => {\n                if (r.status >= 400) {\n                    return reject(r);\n                } else {\n                    return resolve(r);\n                }\n            });\n\n            // This event gets triggered on network errors\n            r.addEventListener('error', () => {\n                return reject(r);\n            });\n\n            r.upload.addEventListener('error', () => {\n                return reject(r);\n            });\n\n            r.send(config.data);\n        });\n    }\n}\n\nexport interface WebDAVRequestConfig {\n    headers?: {\n        [key: string]: string;\n    };\n    onUploadProgress?: (event: ProgressEvent) => void;\n}\n\ninterface WebDAVDefaults {\n    baseURL: string;\n    headers: { [key: string]: string };\n}\n\ninterface RequestConfig {\n    method: string;\n    url: string;\n    headers?: { [key: string]: string };\n    data?: any;\n    onUploadProgress?: (event: ProgressEvent) => void;\n}\n"
  },
  {
    "path": "services/workbench2/src/common/xml.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { customDecodeURI } from \"./url\";\n\nexport const getTagValue = (document: Document | Element, tagName: string, defaultValue: string, skipDecoding: boolean = false) => {\n    const [el] = Array.from(document.getElementsByTagName(tagName));\n    const URI = el ? htmlDecode(el.innerHTML) : defaultValue;\n\n    if (!skipDecoding) {\n        try {\n            return customDecodeURI(URI);\n        } catch(e) {}\n    }\n\n    return URI;\n};\n\nconst htmlDecode = (input: string) => {\n    const out = input.split(' ').map((i) => {\n        const doc = new DOMParser().parseFromString(i, \"text/html\");\n        if (doc.documentElement !== null) {\n            return doc.documentElement.textContent || '';\n        }\n        return '';\n    });\n    return out.join(' ');\n};\n"
  },
  {
    "path": "services/workbench2/src/components/autocomplete/autocomplete.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport {\n    Input as MuiInput,\n    Chip as MuiChip,\n    Popper as MuiPopper,\n    Paper as MuiPaper,\n    FormControl,\n    InputLabel,\n    ListItemText,\n    ListItem,\n    List,\n    FormHelperText,\n    Tooltip,\n    Typography,\n} from '@mui/material';\nimport withStyles from '@mui/styles/withStyles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { PopperProps } from '@mui/material/Popper';\nimport { WithStyles } from '@mui/styles';\nimport { noop } from 'lodash';\nimport { isUserGroup } from 'models/group';\nimport { sortByKey } from 'common/objects';\nimport { TabbedList } from 'components/tabbedList/tabbed-list';\nimport maxSize from 'popper-max-size-modifier';\nimport { beforeWrite } from '@popperjs/core/lib';\n\nexport interface AutocompleteProps<Item, Suggestion> {\n    label?: string;\n    value: string;\n    items: Item[];\n    disabled?: boolean;\n    suggestions?: Suggestion[];\n    error?: boolean;\n    helperText?: string;\n    autofocus?: boolean;\n    onChange: (event: React.ChangeEvent<HTMLInputElement>) => void;\n    onBlur?: (event: React.FocusEvent<HTMLInputElement>) => void;\n    onFocus?: (event: React.FocusEvent<HTMLInputElement>) => void;\n    onCreate?: () => void;\n    onDelete?: (item: Item, index: number) => void;\n    onSelect?: (suggestion: Suggestion) => void;\n    renderChipValue?: (item: Item) => string;\n    renderChipTooltip?: (item: Item) => string;\n    renderSuggestion?: (suggestion: Suggestion) => React.ReactNode;\n    category?: AutocompleteCat;\n    isWorking?: boolean;\n    maxLength?: number;\n}\n\ntype AutocompleteClasses = 'listItemStyle';\n\nconst autocompleteStyles: CustomStyleRulesCallback<AutocompleteClasses> = theme => ({\n    listItemStyle: {\n        whiteSpace: 'nowrap',\n        overflow: 'hidden',\n        textOverflow: 'ellipsis',\n    },\n});\n\nexport enum AutocompleteCat {\n    SHARING = 'sharing',\n};\n\nexport interface AutocompleteState {\n    suggestionsOpen: boolean;\n    selectedTab: number;\n    selectedSuggestionIndex: number;\n    tabbedListContents: Record<string, any[]>;\n}\n\nexport const Autocomplete = withStyles(autocompleteStyles)(\n    class Autocomplete<Value, Suggestion> extends React.Component<AutocompleteProps<Value, Suggestion> & WithStyles<AutocompleteClasses>, AutocompleteState> {\n\n    state = {\n        suggestionsOpen: false,\n        selectedTab: 0,\n        selectedSuggestionIndex: 0,\n        tabbedListContents: {},\n    };\n\n    componentDidUpdate(prevProps: AutocompleteProps<Value, Suggestion>, prevState: AutocompleteState) {\n        const { suggestions = [], category } = this.props;\n            if( prevProps.suggestions?.length === 0 && suggestions.length > 0) {\n                this.setState({ selectedSuggestionIndex: 0, selectedTab: 0 });\n            }\n            if (category === AutocompleteCat.SHARING) {\n                if( prevProps.items.length !== this.props.items.length) {\n                    this.setState({ selectedTab: 0, selectedSuggestionIndex: 0 });\n                }\n                if (Object.keys(this.state.tabbedListContents).length === 0) {\n                    this.setState({ tabbedListContents: { groups: [], users: [] } });\n                }\n                if (prevProps.suggestions !== suggestions) {\n                    const users = sortByKey<Suggestion>(suggestions.filter(item => !isUserGroup(item)), 'fullName');\n                    const groups = sortByKey<Suggestion>(suggestions.filter(item => isUserGroup(item)), 'name');\n                    this.setState({ tabbedListContents: { groups: groups, users: users } });\n                }\n                if (prevState.selectedTab !== this.state.selectedTab) {\n                    this.setState({ selectedSuggestionIndex: 0 });\n                }\n            }\n    }\n\n    containerRef = React.createRef<HTMLDivElement>();\n    inputRef = React.createRef<HTMLInputElement>();\n\n    render() {\n        return <div ref={this.containerRef}>\n                    <FormControl variant=\"standard\" fullWidth error={this.props.error}>\n                        {this.renderLabel()}\n                        {this.renderInput()}\n                        {this.renderHelperText()}\n                        {this.props.category === AutocompleteCat.SHARING ? this.renderTabbedSuggestions() : this.renderSuggestions()}\n                    </FormControl>\n               </div>\n        }\n\n    renderLabel() {\n        const { label } = this.props;\n        return label && <InputLabel>{label}</InputLabel>;\n    }\n\n    renderInput() {\n        return <Input\n            disabled={this.props.disabled}\n            autoFocus={this.props.autofocus}\n            inputRef={this.inputRef}\n            value={this.props.value}\n            startAdornment={this.renderChips()}\n            onFocus={this.handleFocus}\n            onBlur={this.handleBlur}\n            onChange={this.props.onChange}\n            onKeyPress={this.handleKeyPress}\n            onKeyDown={this.handleNavigationKeyPress}\n        />;\n    }\n\n    renderHelperText() {\n        return <FormHelperText>{this.props.helperText}</FormHelperText>;\n    }\n\n    renderSuggestions() {\n        const { suggestions = [] } = this.props;\n        return (\n            <Popper\n                disablePortal={false}\n                modifiers={[\n                    maxSize,\n                    applyMaxSize\n                ]}\n                open={this.isSuggestionBoxOpen()}\n                anchorEl={this.inputRef.current}\n                key={suggestions.length}>\n                <Paper onMouseDown={this.preventBlur}>\n                    <List dense style={{ width: this.getSuggestionsWidth() }}>\n                        {suggestions.map(\n                            (suggestion, index) =>\n                                <ListItem\n                                    button\n                                    key={index}\n                                    onClick={this.handleSelect(suggestion)}\n                                    selected={index === this.state.selectedSuggestionIndex}>\n                                    {this.renderSuggestion(suggestion)}\n                                </ListItem>\n                        )}\n                    </List>\n                </Paper>\n            </Popper>\n        );\n    }\n\n    renderTabbedSuggestions() {\n        const { suggestions = [] } = this.props;\n\n        return (\n            <Popper\n                disablePortal={false}\n                modifiers={[\n                    maxSize,\n                    applyMaxSize\n                ]}\n                open={this.state.suggestionsOpen}\n                anchorEl={this.containerRef.current || this.inputRef.current}\n                key={suggestions.length}\n                style={{ width: this.getSuggestionsWidth()}}\n            >\n                <Paper onMouseDown={this.preventBlur}>\n                    <TabbedList\n                        tabbedListContents={this.state.tabbedListContents}\n                        renderListItem={this.renderSharingSuggestion}\n                        selectedIndex={this.state.selectedSuggestionIndex}\n                        selectedTab={this.state.selectedTab}\n                        handleTabChange={this.handleTabChange}\n                        handleSelect={this.handleSelect}\n                        includeContentsLength={true}\n                        isWorking={this.props.isWorking}\n                        maxLength={this.props.maxLength}\n                        />\n                </Paper>\n            </Popper>\n        );\n    }\n\n    isSuggestionBoxOpen() {\n        const { suggestions = [] } = this.props;\n        return this.state.suggestionsOpen && suggestions.length > 0;\n    }\n\n    handleFocus = (event: React.FocusEvent<HTMLInputElement>) => {\n        const { onFocus = noop } = this.props;\n        this.setState({ suggestionsOpen: true, selectedTab: 0 });\n        onFocus(event);\n    }\n\n    handleBlur = (event: React.FocusEvent<HTMLInputElement>) => {\n        setTimeout(() => {\n            const { onBlur = noop } = this.props;\n            this.setState({ suggestionsOpen: false });\n            onBlur(event);\n        });\n    }\n\n    handleTabChange = (event: React.SyntheticEvent, newValue: number) => {\n        event.preventDefault();\n        this.setState({ selectedTab: newValue });\n    };\n\n    handleKeyPress = (event: React.KeyboardEvent<HTMLInputElement>) => {\n        const { onCreate = noop, onSelect = noop, suggestions = [] } = this.props;\n        const { selectedSuggestionIndex, selectedTab } = this.state;\n        if (event.key === 'Enter') {\n            if (this.isSuggestionBoxOpen() && selectedSuggestionIndex < suggestions.length) {\n                // prevent form submissions when selecting a suggestion\n                event.preventDefault();\n                if(this.props.category === AutocompleteCat.SHARING) {\n                    onSelect(this.state.tabbedListContents[Object.keys(this.state.tabbedListContents)[selectedTab]][selectedSuggestionIndex]);\n                } else {\n                    onSelect(suggestions[selectedSuggestionIndex]);\n                }\n            } else if (this.props.value.length > 0) {\n                onCreate();\n            }\n        }\n    }\n\n    handleNavigationKeyPress = (ev: React.KeyboardEvent<HTMLInputElement>) => {\n        if (ev.key === 'Tab' && this.isSuggestionBoxOpen() && this.props.category === AutocompleteCat.SHARING) {\n            ev.preventDefault();\n            // Cycle through tabs, or loop back to the first tab\n            this.setState({ selectedTab: ((this.state.selectedTab + 1) % Object.keys(this.state.tabbedListContents).length)});\n        }\n        if (ev.key === 'ArrowUp') {\n            ev.preventDefault();\n            this.updateSelectedSuggestionIndex(-1);\n        } else if (ev.key === 'ArrowDown') {\n            ev.preventDefault();\n            this.updateSelectedSuggestionIndex(1);\n        }\n    }\n\n    updateSelectedSuggestionIndex(value: -1 | 1) {\n        const { suggestions = [], category } = this.props;\n        const { tabbedListContents, selectedTab, selectedSuggestionIndex } = this.state;\n        const tabLabels = Object.keys(tabbedListContents);\n        const currentList = category === AutocompleteCat.SHARING ? tabbedListContents[tabLabels[selectedTab]] : suggestions;\n        if(selectedSuggestionIndex <= 0 && value === -1) {\n            this.setState({selectedSuggestionIndex: currentList.length - 1});\n        } else {\n            this.setState(({ selectedSuggestionIndex }) => ({\n                selectedSuggestionIndex: (selectedSuggestionIndex + value) % currentList.length,\n                }));\n            }\n    }\n\n    renderChips() {\n        const { items, onDelete } = this.props;\n\n        /**\n         * If input startAdornment prop is not undefined, input's label will stay above the input.\n         * If there is not items, we want the label to go back to placeholder position.\n         * That why we return without a value instead of returning a result of a _map_ which is an empty array.\n         */\n        if (items.length === 0) {\n            return;\n        }\n\n        return items.map(\n            (item, index) => {\n                const tooltip = this.props.renderChipTooltip ? this.props.renderChipTooltip(item) : '';\n                if (tooltip && tooltip.length) {\n                    return <span key={index}>\n                        <Tooltip title={tooltip}>\n                        <Chip\n                            label={this.renderChipValue(item)}\n                            key={index}\n                            onDelete={onDelete && !this.props.disabled ? (() =>  onDelete(item, index)) : undefined} />\n                    </Tooltip></span>\n                } else {\n                    return <span key={index}><Chip\n                        label={this.renderChipValue(item)}\n                        onDelete={onDelete && !this.props.disabled ? (() =>  onDelete(item, index)) : undefined} /></span>\n                }\n            }\n        );\n    }\n\n    renderChipValue(value: Value) {\n        const { renderChipValue } = this.props;\n        return renderChipValue ? renderChipValue(value) : JSON.stringify(value);\n    }\n\n    preventBlur = (event: React.MouseEvent<HTMLElement>) => {\n        event.preventDefault();\n    }\n\n    handleClickAway = (event: React.MouseEvent<HTMLElement>) => {\n        if (event.target !== this.inputRef.current) {\n            this.setState({ suggestionsOpen: false });\n        }\n    }\n\n    handleSelect = (suggestion: Suggestion) => {\n        return () => {\n            const { onSelect = noop } = this.props;\n            const { current } = this.inputRef;\n            if (current) {\n                current.focus();\n            }\n            onSelect(suggestion);\n        };\n    }\n\n    renderSuggestion(suggestion: Suggestion) {\n        const { renderSuggestion } = this.props;\n        return renderSuggestion\n            ? renderSuggestion(suggestion)\n            : <ListItemText>{JSON.stringify(suggestion)}</ListItemText>;\n    }\n\n    renderSharingSuggestion = (suggestion: Suggestion) => {\n        return <ListItemText>\n                    <Typography className={this.props.classes.listItemStyle} data-cy=\"sharing-suggestion\">\n                        { isUserGroup(suggestion) ? `${(suggestion as any).name}` : `${(suggestion as any).fullName} (${(suggestion as any).email})` }\n                    </Typography>\n                </ListItemText>;\n    }\n\n    getSuggestionsWidth() {\n        return this.containerRef.current ? this.containerRef.current.offsetWidth : 'auto';\n    }\n});\n\nconst popperMargin = 16; // Space to keep between autocomplete and window edge\nconst popperPreferredMinHeight = 75; // Roughly 2 autocomplete suggestions\n\nconst applyMaxSize = {\n    name: \"applyMaxSize\",\n    enabled: true,\n    phase: beforeWrite,\n    requires: [\"maxSize\"],\n    fn({ state }) {\n        // Set maximum usable space with margin\n        const { height } = state.modifiersData.maxSize;\n        const maxHeight = height - popperMargin;\n        state.styles.popper.maxHeight = `${maxHeight}px`;\n        state.elements.popper.firstChild.style.maxHeight = `${maxHeight}px`;\n\n        // Get input field bounds and window height\n        const referenceElementBounds = state.elements.reference.getBoundingClientRect();\n        const windowHeight = window.innerHeight\n\n        if (referenceElementBounds && windowHeight) {\n            // Get available space above / below input field\n            const spaceAbove = referenceElementBounds.y;\n            const spaceBelow = windowHeight - referenceElementBounds.bottom;\n\n            // Swap popper direction if smaller than ~2 suggestions and the alternate direction has more space\n            // Space check prevents infinite direction swapping as this gets run again when direction is swapped\n            if (maxHeight < popperPreferredMinHeight) {\n                if (state.options.placement === 'bottom' && spaceAbove > spaceBelow) {\n                    state.options.placement = 'top';\n                } else if (state.options.placement === 'top' && spaceBelow > spaceAbove) {\n                    state.options.placement = 'bottom';\n                }\n            }\n        }\n    }\n};\n\ntype ChipClasses = 'root';\n\nconst chipStyles: CustomStyleRulesCallback<ChipClasses> = theme => ({\n    root: {\n        marginRight: theme.spacing(0.25),\n        height: theme.spacing(3),\n    }\n});\n\nconst Chip = withStyles(chipStyles)(MuiChip);\n\ntype PopperClasses = 'root';\n\nconst popperStyles: CustomStyleRulesCallback<ChipClasses> = theme => ({\n    root: {\n        zIndex: theme.zIndex.modal,\n    }\n});\n\nconst Popper = withStyles(popperStyles)(\n    ({ classes, ...props }: PopperProps & WithStyles<PopperClasses>) =>\n        <MuiPopper {...props} className={classes.root} />\n);\n\ntype InputClasses = 'root';\n\nconst inputStyles: CustomStyleRulesCallback<InputClasses> = () => ({\n    root: {\n        display: 'flex',\n        flexWrap: 'wrap',\n    },\n    input: {\n        minWidth: '20%',\n        flex: 1,\n    },\n});\n\nconst Input = withStyles(inputStyles)(MuiInput);\n\nconst Paper = withStyles({\n    root: {\n        maxHeight: '80vh',\n        overflowY: 'auto',\n    }\n})(MuiPaper);\n"
  },
  {
    "path": "services/workbench2/src/components/breadcrumbs/breadcrumbs.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Breadcrumbs } from \"./breadcrumbs\";\nimport { ThemeProvider, StyledEngineProvider } from \"@mui/material\";\nimport { CustomTheme } from 'common/custom-theme';\nimport { Provider } from \"react-redux\";\nimport { combineReducers, createStore } from \"redux\";\n\ndescribe(\"<Breadcrumbs />\", () => {\n\n    let onClick;\n    let resources = {};\n    let store;\n    beforeEach(() => {\n        onClick = cy.spy().as('onClick');\n        const initialAuthState = {\n            config: {\n                clusterConfig: {\n                    Collections: {\n                        ForwardSlashNameSubstitution: \"/\"\n                    }\n                }\n            }\n        }\n        store = createStore(combineReducers({\n            auth: (state = initialAuthState, action) => state,\n        }));\n    });\n\n    it(\"renders one item\", () => {\n        const items = [\n            { label: 'breadcrumb 1', uuid: '1' }\n        ];\n        cy.mount(\n            <Provider store={store}>\n                <StyledEngineProvider injectFirst>\n                    <ThemeProvider theme={CustomTheme}>\n                        <Breadcrumbs items={items} resources={resources} onClick={onClick} onContextMenu={cy.stub()} />\n                    </ThemeProvider>\n                </StyledEngineProvider>\n            </Provider>);\n        cy.get('button').should('have.length', 1);\n        cy.get('button').should('have.text', 'breadcrumb 1');\n        cy.get('[data-testid=ChevronRightIcon]').should('have.length', 0);\n    });\n\n    it(\"renders multiple items\", () => {\n        const items = [\n            { label: 'breadcrumb 1', uuid: '1' },\n            { label: 'breadcrumb 2', uuid: '2' }\n        ];\n        cy.mount(\n            <Provider store={store}>\n                <StyledEngineProvider injectFirst>\n                    <ThemeProvider theme={CustomTheme}>\n                        <Breadcrumbs items={items} resources={resources} onClick={onClick} onContextMenu={cy.stub()} />\n                    </ThemeProvider>\n                </StyledEngineProvider>\n            </Provider>);\n        cy.get('button').should('have.length', 2);\n        cy.get('[data-testid=ChevronRightIcon]').should('have.length', 1);\n    });\n\n    it(\"calls onClick with clicked item\", () => {\n        const items = [\n            { label: 'breadcrumb 1', uuid: '1' },\n            { label: 'breadcrumb 2', uuid: '2' }\n        ];\n        cy.mount(\n            <Provider store={store}>\n                <StyledEngineProvider injectFirst>\n                    <ThemeProvider theme={CustomTheme}>\n                        <Breadcrumbs items={items} resources={resources} onClick={onClick} onContextMenu={cy.stub()} />\n                    </ThemeProvider>\n                </StyledEngineProvider>\n            </Provider>);\n        cy.get('button').eq(1).click();\n        cy.get('@onClick').should('have.been.calledWith', Cypress.sinon.match.func, items[1]);\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/src/components/breadcrumbs/breadcrumbs.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Button, Grid, Typography, Tooltip } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport ChevronRightIcon from '@mui/icons-material/ChevronRight';\nimport withStyles from '@mui/styles/withStyles';\nimport { IllegalNamingWarning } from '../warning/warning';\nimport { IconType, FreezeIcon } from 'components/icon/icon';\nimport { getResource, ResourcesState } from 'store/resources/resources';\nimport classNames from 'classnames';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { GroupClass } from \"models/group\";\nimport { navigateTo, navigateToGroupDetails } from 'store/navigation/navigation-action';\nimport { grey } from '@mui/material/colors';\nimport { isEqual } from 'lodash';\nexport interface Breadcrumb {\n    label: string;\n    icon?: IconType;\n    uuid: string;\n}\n\ntype CssRules = \"item\" | \"chevron\" | \"label\" | \"buttonLabel\" | \"icon\" | \"frozenIcon\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    item: {\n        borderRadius: '16px',\n        height: '32px',\n        minWidth: '36px',\n        color: theme.customs.colors.grey700,\n        '&.parentItem': {\n            color: `${theme.palette.primary.main}`,\n        },\n    },\n    chevron: {\n        color: grey[\"600\"],\n    },\n    label: {\n        textTransform: \"none\",\n        paddingRight: '3px',\n        paddingLeft: '3px',\n        lineHeight: '1.4',\n    },\n    buttonLabel: {\n        overflow: 'hidden',\n        justifyContent: 'flex-start',\n        display: 'inherit',\n        alignItems: 'inherit',\n    },\n    icon: {\n        fontSize: 20,\n        color: grey[\"600\"],\n        marginRight: '5px',\n    },\n    frozenIcon: {\n        fontSize: 20,\n        color: grey[\"600\"],\n        marginLeft: '3px',\n    },\n});\n\nexport interface BreadcrumbsProps {\n    items: Breadcrumb[];\n    resources: ResourcesState;\n    onClick: (navFunc: (uuid: string) => void, breadcrumb: Breadcrumb) => void;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, breadcrumb: Breadcrumb) => void;\n}\n\nexport const Breadcrumbs = withStyles(styles)(\n    React.memo(({ classes, onClick, onContextMenu, items, resources }: BreadcrumbsProps & WithStyles<CssRules>) => {\n        return<Grid container data-cy='breadcrumbs' alignItems=\"center\" wrap=\"nowrap\">\n        {\n            items.map((item, index) => {\n                const isLastItem = index === items.length - 1;\n                const isFirstItem = index === 0;\n                const Icon = item.icon || (() => (null));\n                const resource = getResource(item.uuid)(resources) as any;\n                const navFunc = resource && 'groupClass' in resource && resource.groupClass === GroupClass.ROLE ? navigateToGroupDetails : navigateTo;\n\n                return (\n                    <React.Fragment key={index}>\n                        {isFirstItem ? null : <IllegalNamingWarning name={item.label} />}\n                        <Tooltip title={item.label} disableFocusListener>\n                            <Button\n                                data-cy={\n                                    isFirstItem\n                                    ? 'breadcrumb-first'\n                                    : isLastItem\n                                        ? 'breadcrumb-last'\n                                        : false}\n                                className={classNames(\n                                    isLastItem ? null : 'parentItem',\n                                    classes.item\n                                )}\n                                color=\"inherit\"\n                                onClick={() => onClick(navFunc, item)}\n                                onContextMenu={event => onContextMenu(event, item)}>\n                                <span className={classes.buttonLabel}>\n                                    <Icon className={classes.icon} />\n                                    <Typography\n                                        noWrap\n                                        color=\"inherit\"\n                                        className={classes.label}>\n                                        {item.label}\n                                    </Typography>\n                                    {\n                                        resource?.frozenByUuid ? <FreezeIcon className={classes.frozenIcon} /> : null\n                                    }\n                                </span>\n                            </Button>\n                        </Tooltip>\n                        {!isLastItem && <ChevronRightIcon color=\"inherit\" className={classNames('parentItem', classes.chevron)} />}\n                    </React.Fragment>\n                );\n            })\n        }\n        </Grid>\n    }, preventRerender)\n);\n\n// return true to prevent re-render, false to allow re-render\nfunction preventRerender(prevProps: BreadcrumbsProps, nextProps: BreadcrumbsProps) {\n    if (!isEqual(prevProps.items, nextProps.items)) return false;\n    if (!isEqual(prevProps.resources, nextProps.resources)) return false;\n    return true;\n}"
  },
  {
    "path": "services/workbench2/src/components/checkbox-field/checkbox-field.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { WrappedFieldProps } from 'redux-form';\nimport {\n    FormControlLabel,\n    Checkbox,\n    FormControl,\n    FormGroup,\n    FormLabel,\n    FormHelperText\n} from '@mui/material';\nimport { RootState } from 'store/store';\nimport { getStorageClasses } from 'common/config';\nimport { connect } from 'react-redux';\n\nexport const CheckboxField = (props: WrappedFieldProps & { label?: string }) =>\n    <FormControlLabel\n        control={\n            <Checkbox\n                checked={props.input.value}\n                onChange={props.input.onChange}\n                disabled={props.meta.submitting}\n                color=\"primary\" />\n        }\n        label={props.label}\n    />;\n\ntype MultiCheckboxFieldProps = {\n    items: string[];\n    defaultValues?: string[];\n    label?: string;\n    minSelection?: number;\n    maxSelection?: number;\n    helperText?: string;\n    rowLayout?: boolean;\n}\n\nexport const MultiCheckboxField = (props: WrappedFieldProps & MultiCheckboxFieldProps) => {\n    const isValid = (items: string[]) => (items.length >= (props.minSelection || 0)) &&\n        (items.length <= (props.maxSelection || items.length));\n    if (props.input.value.length === 0 && (props.defaultValues || []).length !== 0) {\n        props.input.value = props.defaultValues ? [...props.defaultValues] : [];\n    }\n    return (\n        <FormControl variant=\"standard\" error={!isValid(props.input.value)}>\n            <FormLabel component='label'>{props.label}</FormLabel>\n            <FormGroup row={props.rowLayout}>\n            { props.items.map((item, idx) =>\n                <FormControlLabel\n                    key={`label-${idx}`}\n                    control={\n                        <Checkbox\n                            data-cy={`checkbox-${item}`}\n                            key={`control-${idx}`}\n                            name={`${props.input.name}[${idx}]`}\n                            value={item}\n                            checked={\n                                props.input.value.indexOf(item) !== -1 ||\n                                (props.input.value.length === 0 &&\n                                    (props.defaultValues || []).indexOf(item) !== -1)\n                            }\n                            onChange={e => {\n                                const newValue = [...props.input.value];\n                                if (e.target.checked) {\n                                    newValue.push(item);\n                                } else {\n                                    newValue.splice(newValue.indexOf(item), 1);\n                                }\n                                if (!isValid(newValue)) { return; }\n                                return props.input.onChange(newValue);\n                            }}\n                            disabled={props.meta.submitting}\n                            color=\"primary\" />\n                    }\n                    label={item} />) }\n            </FormGroup>\n            <FormHelperText>{props.helperText}</FormHelperText>\n        </FormControl>\n    ); };\n\ntype DialogMultiCheckboxFieldProps = {\n    name: string,\n    items: string[];\n    defaultValues?: string[];\n    label?: string;\n    minSelection?: number;\n    maxSelection?: number;\n    helperText?: string;\n    onChange: (data: any) => void\n}\n\nconst mapState = (state: RootState) => ({\n    items: getStorageClasses(state.auth.config)\n});\n\nexport const DialogMultiCheckboxField = connect(mapState)((props: DialogMultiCheckboxFieldProps) => {\n    const [selectedClasses, setSelectedClasses] = React.useState(props.defaultValues || []);\n\n    const isValid = (items: string[]) => (items.length >= (props.minSelection || 0)) &&\n        (items.length <= (props.maxSelection || items.length));\n\n    if (selectedClasses.length === 0 && (props.defaultValues || []).length !== 0) {\n        setSelectedClasses(props.defaultValues ? [...props.defaultValues] : []);\n    }\n\n    return (\n        <FormControl variant=\"standard\" error={!isValid(selectedClasses)}>\n            <FormLabel component='label'>{props.label}</FormLabel>\n            <FormGroup row>\n            {props.items.map((item, idx) =>\n                <FormControlLabel\n                    key={`label-${idx}`}\n                    control={\n                        <Checkbox\n                            data-cy={`checkbox-${item}`}\n                            key={`control-${idx}`}\n                            name={`${props.name}[${idx}]`}\n                            value={item}\n                            checked={selectedClasses.includes(item)}\n                            onChange={e => {\n                                const newSelection = [...selectedClasses];\n                                if (e.target.checked) {\n                                    newSelection.push(item);\n                                } else {\n                                    newSelection.splice(newSelection.indexOf(item), 1);\n                                }\n                                if (!isValid(newSelection)) { return; }\n                                setSelectedClasses(newSelection);\n                                return props.onChange(newSelection);\n                            }}\n                            disabled={false}\n                            color=\"primary\" />\n                    }\n                    label={item} />) }\n            </FormGroup>\n            <FormHelperText>{props.helperText}</FormHelperText>\n        </FormControl>\n    );\n});\n\n"
  },
  {
    "path": "services/workbench2/src/components/chips/chips.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Chip, Grid } from '@mui/material';\nimport withStyles from '@mui/styles/withStyles';\nimport { getTagKeyID, getTagValueID, getTagKeyLabel, getTagValueLabel } from 'models/vocabulary';\nimport {\n    DragSource,\n    DragSourceSpec,\n    DragSourceCollector,\n    ConnectDragSource,\n    DropTarget,\n    DropTargetSpec,\n    DropTargetCollector,\n    ConnectDropTarget\n} from 'react-dnd';\nimport { compose } from 'lodash/fp';\nimport { WithStyles } from '@mui/styles';\nimport { Vocabulary } from 'models/vocabulary';\n\ninterface ChipsProps<Value> {\n    values: Value[];\n    getLabel?: (value: Value) => string;\n    filler?: React.ReactNode;\n    deletable?: boolean;\n    orderable?: boolean;\n    onChange: (value: Value[]) => void;\n    clickable?: boolean;\n}\n\ntype CssRules = 'root';\n\nconst styles: CustomStyleRulesCallback<CssRules> = ({ spacing }) => ({\n    root: {\n        margin: `0px ${spacing(0.5)}`,\n    },\n});\nexport const Chips = withStyles(styles)(\n    class Chips<Value> extends React.Component<ChipsProps<Value> & WithStyles<CssRules>> {\n        render() {\n            const { values, filler } = this.props;\n            return <Grid container spacing={1} className={this.props.classes.root}>\n                {values && values.map(this.renderChip)}\n                {filler && <Grid item xs>{filler}</Grid>}\n            </Grid>;\n        }\n\n        renderChip = (value: Value, index: number) => {\n            const { deletable, getLabel } = this.props;\n            return <Grid item key={index}>\n                <Chip onDelete={deletable ? this.deleteValue(value) : undefined}\n                    label={getLabel !== undefined ? getLabel(value) : value} />\n            </Grid>\n        }\n\n        type = 'chip';\n\n        dragSpec: DragSourceSpec<DraggableChipProps<Value>, { value: Value }> = {\n            beginDrag: ({ value }) => ({ value }),\n            endDrag: ({ value: dragValue }, monitor) => {\n                const result = monitor.getDropResult();\n                if (result) {\n                    const { value: dropValue } = monitor.getDropResult();\n                    const dragIndex = this.props.values.indexOf(dragValue);\n                    const dropIndex = this.props.values.indexOf(dropValue);\n                    const newValues = this.props.values.slice(0);\n                    if (dragIndex < dropIndex) {\n                        newValues.splice(dragIndex, 1);\n                        newValues.splice(dropIndex - 1 || 0, 0, dragValue);\n                    } else if (dragIndex > dropIndex) {\n                        newValues.splice(dragIndex, 1);\n                        newValues.splice(dropIndex, 0, dragValue);\n                    }\n                    this.props.onChange(newValues);\n                }\n            }\n        };\n\n        dragCollector: DragSourceCollector<{}> = connect => ({\n            connectDragSource: connect.dragSource(),\n        })\n\n        dropSpec: DropTargetSpec<DraggableChipProps<Value>> = {\n            drop: ({ value }) => ({ value }),\n        };\n\n        dropCollector: DropTargetCollector<{}> = (connect, monitor) => ({\n            connectDropTarget: connect.dropTarget(),\n            isOver: monitor.isOver(),\n        })\n        chip = compose(\n            DragSource(this.type, this.dragSpec, this.dragCollector),\n            DropTarget(this.type, this.dropSpec, this.dropCollector),\n        )(\n            ({ connectDragSource, connectDropTarget, isOver, value }: DraggableChipProps<Value> & CollectedProps) => {\n                const connect = compose(\n                    connectDragSource,\n                    connectDropTarget,\n                );\n\n                const chip =\n                    <span>\n                        <Chip\n                            color={isOver ? 'primary' : 'default'}\n                            onDelete={this.props.deletable\n                                ? this.deleteValue(value)\n                                : undefined}\n                            clickable={this.props.clickable}\n                            label={this.props.getLabel ?\n                                this.props.getLabel(value)\n                                : typeof value === 'object'\n                                    ? JSON.stringify(value)\n                                    : value} />\n                    </span>;\n\n                return this.props.orderable\n                    ? connect(chip)\n                    : chip;\n            }\n        );\n\n        deleteValue = (value: Value) => () => {\n            const { values } = this.props;\n            const index = values.indexOf(value);\n            const newValues = values.slice(0);\n            newValues.splice(index, 1);\n            this.props.onChange(newValues);\n        }\n    });\n\nexport const formatChips = (properties: Record<string, string | string[] | undefined>) => {\n    const result: string[] = [];\n    for (const key in properties) {\n        if (!properties[key]) continue;\n        if (typeof properties[key] === 'string') {\n            properties[key] = [properties[key] as string];\n        }\n        for (const value of properties[key]!) {\n            result.push(`${key}: ${value}`)\n        }\n    }\n    return result;\n};\n\ninterface CollectedProps {\n    connectDragSource: ConnectDragSource;\n    connectDropTarget: ConnectDropTarget;\n\n    isOver: boolean;\n}\n\ninterface DraggableChipProps<Value> {\n    value: Value;\n}\n\nexport type PropertyChips = Record<string, string | string[]>;\n\nexport const getVocabularyFromChips = (chips: PropertyChips, vocabulary: Vocabulary): PropertyChips => {\n    const vocabularyChips: PropertyChips = {};\n    const strictMode = vocabulary?.strict_tags === true;\n\n    for (const [keyLabel, valueLabel] of Object.entries(chips)) {\n        if (!valueLabel) continue;\n\n        const mappedTagKeyID = getTagKeyID(keyLabel, vocabulary);\n        const tagKeyID = mappedTagKeyID || keyLabel;\n        if (strictMode && !mappedTagKeyID) continue;\n\n        if (Array.isArray(valueLabel)) {\n            const vocabularyValues: string[] = [];\n            for (const singleValue of valueLabel) {\n                const tagValueID = getTagValueID(tagKeyID, singleValue, vocabulary);\n                if (tagValueID) {\n                    vocabularyValues.push(tagValueID);\n                    continue;\n                }\n                if (!strictMode) {\n                    vocabularyValues.push(singleValue);\n                }\n            }\n            if (vocabularyValues.length > 0) {\n                vocabularyChips[tagKeyID] = vocabularyValues;\n            }\n        } else {\n            const tagValueID = getTagValueID(tagKeyID, valueLabel, vocabulary);\n            if (tagValueID) {\n                vocabularyChips[tagKeyID] = tagValueID;\n            } else if (!strictMode) {\n                vocabularyChips[tagKeyID] = valueLabel;\n            }\n        }\n    }\n\n    return vocabularyChips;\n};\n\nexport const getChipsFromVocabulary = (properties: Record<string, string | string[] | undefined>, vocabulary: Vocabulary): PropertyChips => {\n    return properties ? Object.entries(properties).reduce((acc, [key, value]) => ({\n        ...acc,\n        [getTagKeyLabel(key, vocabulary)]: Array.isArray(value)? value.map(v => getTagValueLabel(key, v, vocabulary)) : getTagValueLabel(key, value || '', vocabulary)\n    }), {} as PropertyChips) : {} as PropertyChips;\n};"
  },
  {
    "path": "services/workbench2/src/components/chips-input/chips-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Chips } from 'components/chips/chips';\nimport { Input as MuiInput } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { InputProps } from '@mui/material/Input';\n\ninterface ChipsInputProps<Value> {\n    values: Value[];\n    getLabel?: (value: Value) => string;\n    onChange: (value: Value[]) => void;\n    onPartialInput?: (value: boolean) => void;\n    handleFocus?: (e: any) => void;\n    handleBlur?: (e: any) => void;\n    chipsClassName?: string;\n    createNewValue: (value: string) => Value;\n    inputComponent?: React.ComponentType<InputProps>;\n    inputProps?: InputProps;\n    deletable?: boolean;\n    orderable?: boolean;\n    disabled?: boolean;\n    pattern?: RegExp;\n}\n\ntype CssRules = 'chips' | 'input' | 'inputContainer';\n\nconst styles: CustomStyleRulesCallback = ({ spacing }) => ({\n    chips: {\n        minHeight: spacing(5),\n        zIndex: 1,\n        position: 'relative',\n    },\n    input: {\n        zIndex: 1,\n        marginBottom: 8,\n        position: 'relative',\n    },\n    inputContainer: {\n        marginTop: -34\n    },\n});\n\nexport const ChipsInput = withStyles(styles)(\n    class ChipsInput<Value> extends React.Component<ChipsInputProps<Value> & WithStyles<CssRules>> {\n\n        state = {\n            text: '',\n        };\n\n        filler = React.createRef<HTMLDivElement>();\n        timeout = -1;\n\n        setText = (event: React.ChangeEvent<HTMLInputElement>) => {\n            this.setState({ text: event.target.value }, () => {\n                // Update partial input status\n                this.props.onPartialInput && this.props.onPartialInput(this.state.text !== '');\n\n                // If pattern is provided, check for delimiter\n                if (this.props.pattern) {\n                    const matches = this.state.text.match(this.props.pattern);\n                    // Only create values if 1 match and the last character is a delimiter\n                    //   (user pressed an invalid character at the end of a token)\n                    //   or if multiple matches (user pasted text)\n                    if (matches &&\n                            (\n                                matches.length > 1 ||\n                                (matches.length === 1 && !this.state.text.endsWith(matches[0]))\n                            )) {\n                        this.createNewValue(matches.map((i) => i));\n                    }\n                }\n            });\n        }\n\n        handleKeyPress = (e: React.KeyboardEvent<HTMLInputElement>) => {\n            // Handle special keypresses\n            if (e.key === 'Enter') {\n                this.createNewValue();\n                e.preventDefault();\n            } else if (e.key === 'Backspace') {\n                this.deleteLastValue();\n            }\n        }\n\n        createNewValue = (matches?: string[]) => {\n            if (this.state.text) {\n                if (matches && matches.length > 0) {\n                    const newValues = matches.map((v) => this.props.createNewValue(v));\n                    this.setState({ text: '' });\n                    this.props.onChange([...this.props.values, ...newValues]);\n                } else {\n                    const newValue = this.props.createNewValue(this.state.text);\n                    this.setState({ text: '' });\n                    this.props.onChange([...this.props.values, newValue]);\n                }\n                this.props.onPartialInput && this.props.onPartialInput(false);\n            }\n        }\n\n        deleteLastValue = () => {\n            if (this.state.text.length === 0 && this.props.values.length > 0) {\n                this.props.onChange(this.props.values.slice(0, -1));\n            }\n        }\n\n        updateCursorPosition = () => {\n            if (this.timeout) {\n                clearTimeout(this.timeout);\n            }\n            this.timeout = window.setTimeout(() => this.setState({ ...this.state }));\n        }\n\n        getInputStyles = (): React.CSSProperties => ({\n            width: this.filler.current\n                ? this.filler.current.offsetWidth\n                : '100%',\n            right: this.filler.current\n                ? `calc(${this.filler.current.offsetWidth}px - 100%)`\n                : 0,\n\n        })\n\n        componentDidMount() {\n            this.updateCursorPosition();\n        }\n\n        render() {\n            return <>\n                {this.renderChips()}\n                {this.renderInput()}\n            </>;\n        }\n\n        renderChips() {\n            const { classes, ...props } = this.props;\n            return <div className={[classes.chips, this.props.chipsClassName].join(' ')}>\n                <Chips\n                    {...props}\n                    clickable={!props.disabled}\n                    filler={<div ref={this.filler} />}\n                />\n            </div>;\n        }\n\n        renderInput() {\n            const { inputProps: InputProps, inputComponent: Input = MuiInput, classes } = this.props;\n            return <Input\n                {...InputProps}\n                value={this.state.text}\n                onChange={this.setText}\n                disabled={this.props.disabled}\n                onKeyDown={this.handleKeyPress}\n                onFocus={this.props.handleFocus}\n                onBlur={this.props.handleBlur}\n                inputProps={{\n                    ...(InputProps && InputProps.inputProps),\n                    className: classes.input,\n                    style: this.getInputStyles(),\n                }}\n                fullWidth\n                className={classes.inputContainer} />;\n        }\n\n        componentDidUpdate(prevProps: ChipsInputProps<Value>) {\n            if (prevProps.values !== this.props.values) {\n                this.updateCursorPosition();\n            }\n        }\n        componentWillUnmount() {\n            clearTimeout(this.timeout);\n        }\n    });\n"
  },
  {
    "path": "services/workbench2/src/components/code-snippet/code-snippet.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Typography, Link } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport classNames from 'classnames';\nimport { connect, DispatchProp } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { FederationConfig, getNavUrl } from 'routes/routes';\nimport { Dispatch } from 'redux';\nimport { navigationNotAvailable } from 'store/navigation/navigation-action';\n\ntype CssRules = 'root' | 'inlineRoot' | 'space' | 'inline';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        boxSizing: 'border-box',\n        overflow: 'auto',\n        padding: theme.spacing(1),\n    },\n    inlineRoot: {\n        padding: \"3px\",\n        display: \"inline\",\n    },\n    space: {\n        marginLeft: '15px',\n    },\n    inline: {\n        display: 'inline',\n    },\n});\n\nexport interface CodeSnippetDataProps {\n    lines: string[];\n    className?: string;\n    apiResponse?: boolean;\n    linked?: boolean;\n    children?: JSX.Element;\n    inline?: boolean;\n}\n\ninterface CodeSnippetAuthProps {\n    auth: FederationConfig;\n}\n\ntype CodeSnippetProps = CodeSnippetDataProps & WithStyles<CssRules>;\n\nconst mapStateToProps = (state: RootState): CodeSnippetAuthProps => ({\n    auth: state.auth,\n});\n\nexport const CodeSnippet = withStyles(styles)(connect(mapStateToProps)(\n    ({ classes, lines, linked, className, apiResponse, dispatch, auth, children, inline }: CodeSnippetProps & CodeSnippetAuthProps & DispatchProp) =>\n        <Typography\n            component=\"div\"\n            className={classNames([classes.root, className, inline ? classes.inlineRoot : undefined])}>\n            <Typography className={apiResponse ? classes.space : classNames([className, inline ? classes.inline : undefined])} component=\"pre\">\n                {children}\n                {linked ?\n                    lines.map((line, index) => <React.Fragment key={index}>{renderLinks(auth, dispatch)(line)}{`\\n`}</React.Fragment>) :\n                    lines.join('\\n')\n                }\n            </Typography>\n        </Typography>\n));\n\nexport const renderLinks = (auth: FederationConfig, dispatch: Dispatch) => (text: string): JSX.Element => {\n    // Matches UUIDs & PDHs\n    const REGEX = /[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}|[0-9a-f]{32}\\+\\d+/g;\n    const links = text.match(REGEX);\n    if (!links) {\n        return <>{text}</>;\n    }\n    return <>\n        {text.split(REGEX).map((part, index) =>\n            <React.Fragment key={index}>\n                {part}\n                {links[index] &&\n                    <Link onClick={() => {\n                        const url = getNavUrl(links[index], auth)\n                        if (url) {\n                            window.open(`${window.location.origin}${url}`, '_blank', \"noopener\");\n                        } else {\n                            dispatch(navigationNotAvailable(links[index]));\n                        }\n                    }}\n                        style={{ cursor: 'pointer' }}>\n                        {links[index]}\n                    </Link>}\n            </React.Fragment>\n        )}\n    </>;\n};\n"
  },
  {
    "path": "services/workbench2/src/components/code-snippet/virtual-code-snippet.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Typography, Tooltip, IconButton } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport classNames from 'classnames';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { FederationConfig } from 'routes/routes';\nimport { renderLinks } from './code-snippet';\nimport { FixedSizeList } from 'react-window';\nimport AutoSizer from \"react-virtualized-auto-sizer\";\nimport CopyResultToClipboard from 'components/copy-to-clipboard/copy-result-to-clipboard';\nimport { CopyIcon } from 'components/icon/icon';\nimport { SnackbarKind, snackbarActions } from 'store/snackbar/snackbar-actions';\nimport { Dispatch } from \"redux\";\n\ntype CssRules = 'root' | 'space' | 'content' | 'copyButton' ;\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        position: 'relative',\n        boxSizing: 'border-box',\n        height: '100%',\n        padding: theme.spacing(1),\n    },\n    space: {\n        marginLeft: '15px',\n    },\n    content: {\n        maxHeight: '100%',\n        height: '100vh',\n    },\n    copyButton: {\n        position: 'absolute',\n        top: '8px',\n        right: '16px',\n        zIndex: 100,\n    },\n});\n\nexport interface CodeSnippetDataProps {\n    lines: string[];\n    lineFormatter?: (lines: string[], index: number) => string;\n    className?: string;\n    apiResponse?: boolean;\n    linked?: boolean;\n    copyButton?: boolean;\n}\n\nexport interface CodeSnippetActionProps {\n    renderLinks: (auth: FederationConfig) => (text: string) => JSX.Element;\n    onCopyToClipboard: () => void;\n}\n\ninterface CodeSnippetAuthProps {\n    auth: FederationConfig;\n}\n\ntype CodeSnippetProps = CodeSnippetDataProps & CodeSnippetActionProps & WithStyles<CssRules>;\n\nconst mapStateToProps = (state: RootState): CodeSnippetAuthProps => ({\n    auth: state.auth,\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch): CodeSnippetActionProps => ({\n    renderLinks: (auth: FederationConfig) => renderLinks(auth, dispatch),\n    onCopyToClipboard: () => {\n        dispatch<any>(\n            snackbarActions.OPEN_SNACKBAR({\n                message: \"Contents copied to clipboard\",\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS,\n            })\n        );\n    },\n});\n\nexport const VirtualCodeSnippet = withStyles(styles)(connect(mapStateToProps, mapDispatchToProps)(\n    ({ classes, lines, lineFormatter, linked, copyButton, renderLinks, onCopyToClipboard, className, apiResponse, auth }: CodeSnippetProps & CodeSnippetAuthProps) => {\n        const RenderRow = ({index, style}) => {\n            const lineContents = lineFormatter ? lineFormatter(lines, index) : lines[index];\n            return <span style={style}>{linked ? renderLinks(auth)(lineContents) : lineContents}</span>\n        };\n\n        const formatClipboardText = (lines: string[]) => () =>  {\n            return lines.join('\\n');\n        };\n\n\n\n        return <Typography\n            component=\"div\"\n            data-cy=\"virtual-code-snippet\"\n            className={classNames([classes.root, className])}>\n            {copyButton && <span className={classes.copyButton}>\n                <Tooltip title=\"Copy text to clipboard\" disableFocusListener>\n                    <IconButton>\n                        <CopyResultToClipboard\n                            getText={formatClipboardText(lines)}\n                            onCopy={onCopyToClipboard}\n                        >\n                            <CopyIcon />\n                        </CopyResultToClipboard>\n                    </IconButton>\n                </Tooltip>\n            </span>}\n            <Typography className={classNames(classes.content, apiResponse ? classes.space : className)} component=\"pre\">\n                <AutoSizer>\n                    {({ height, width }) =>\n                        <FixedSizeList\n                            height={height}\n                            width={width}\n                            itemSize={21}\n                            itemCount={lines.length}\n                        >\n                            {RenderRow}\n                        </FixedSizeList>\n                    }\n                </AutoSizer>\n            </Typography>\n        </Typography>;\n}));\n"
  },
  {
    "path": "services/workbench2/src/components/collection-panel-files/collection-panel-files.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport classNames from \"classnames\";\nimport { connect } from \"react-redux\";\nimport { FixedSizeList } from \"react-window\";\nimport AutoSizer from \"react-virtualized-auto-sizer\";\nimport servicesProvider from \"common/service-provider\";\nimport { DownloadIcon, MoreHorizontalIcon, MoreVerticalIcon } from \"components/icon/icon\";\nimport { SearchInput } from \"components/search-input/search-input\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport {\n    ListItemIcon,\n    Theme,\n    Tooltip,\n    IconButton,\n    Checkbox,\n    CircularProgress,\n    Button,\n} from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { FileTreeData } from \"../file-tree/file-tree-data\";\nimport { TreeItem, TreeItemStatus } from \"../tree/tree\";\nimport { RootState } from \"store/store\";\nimport { WebDAV, WebDAVRequestConfig } from \"common/webdav\";\nimport { AuthState } from \"store/auth/auth-reducer\";\nimport { extractFilesData } from \"services/collection-service/collection-service-files-response\";\nimport { DefaultIcon, DirectoryIcon, FileIcon, BackIcon, SidePanelRightArrowIcon } from \"components/icon/icon\";\nimport { setCollectionFiles } from \"store/collection-panel/collection-panel-files/collection-panel-files-actions\";\nimport { sortBy } from \"lodash\";\nimport { formatFileSize } from \"common/formatters\";\nimport { getInlineFileUrl, sanitizeToken } from \"views-components/context-menu/actions/helpers\";\nimport { extractUuidKind, ResourceKind } from \"models/resource\";\nimport { CollectionFile, CollectionDirectory } from \"models/collection-file\";\n\ntype CollectionFilesTreeItem = { id: string, data: CollectionFile | CollectionDirectory | undefined };\n\nexport interface CollectionPanelFilesProps {\n    isWritable: boolean;\n    onUploadDataClick: (targetLocation?: string) => void;\n    onSearchChange: (searchValue: string) => void;\n    onItemMenuOpen: (event: React.MouseEvent<HTMLElement>, item: CollectionFilesTreeItem, isWritable: boolean) => void;\n    onOptionsMenuOpen: (event: React.MouseEvent<HTMLElement>, isWritable: boolean) => void;\n    onSelectionToggle: (event: React.MouseEvent<HTMLElement>, item: TreeItem<FileTreeData>) => void;\n    onCollapseToggle: (id: string, status: TreeItemStatus) => void;\n    onFileClick: (id: string) => void;\n    currentItemUuid: any;\n    dispatch: Function;\n    collectionPanelFiles: any;\n    collectionPanel: any;\n}\n\ntype CssRules =\n    | \"filesPanel\"\n    | \"backButton\"\n    | \"backButtonHidden\"\n    | \"pathPanelPathWrapper\"\n    | \"uploadButton\"\n    | \"uploadIcon\"\n    | \"moreOptionsButton\"\n    | \"moreOptions\"\n    | \"loader\"\n    | \"wrapper\"\n    | \"dataWrapper\"\n    | \"row\"\n    | \"rowEmpty\"\n    | \"leftPanel\"\n    | \"rightPanel\"\n    | \"pathPanel\"\n    | \"pathPanelItem\"\n    | \"rowName\"\n    | \"listItemIcon\"\n    | \"rowActive\"\n    | \"filesHeader\"\n    | \"pathPanelMenu\"\n    | \"rowSelection\"\n    | \"leftPanelHidden\"\n    | \"leftPanelVisible\"\n    | \"searchWrapper\"\n    | \"searchWrapperHidden\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: Theme) => ({\n    filesPanel: {\n        // This component expects the parent container to be a flexbox\n        flexGrow: 1,\n        display: 'flex',\n        flexDirection: 'column',\n    },\n    wrapper: {\n        display: \"flex\",\n        flexGrow: 1,\n        flexShrink: 1,\n        color: \"rgba(0,0,0,0.87)\",\n        fontSize: \"0.875rem\",\n        fontFamily: '\"Roboto\", \"Helvetica\", \"Arial\", sans-serif',\n        fontWeight: 400,\n        lineHeight: \"1.5\",\n        letterSpacing: \"0.01071em\",\n    },\n    backButton: {\n        color: \"#00bfa5\",\n        cursor: \"pointer\",\n        float: \"left\",\n    },\n    backButtonHidden: {\n        display: \"none\",\n    },\n    filesHeader: {\n        flexShrink: 0,\n        flexGrow: 0,\n    },\n    dataWrapper: {\n        flexGrow: 1,\n    },\n    row: {\n        display: \"flex\",\n        marginTop: \"0.5rem\",\n        marginBottom: \"0.5rem\",\n        cursor: \"pointer\",\n        \"&:hover\": {\n            backgroundColor: \"rgba(0, 0, 0, 0.08)\",\n        },\n    },\n    rowEmpty: {\n        top: \"40%\",\n        width: \"100%\",\n        textAlign: \"center\",\n        position: \"absolute\",\n    },\n    loader: {\n        top: \"50%\",\n        left: \"50%\",\n        marginTop: \"-15px\",\n        marginLeft: \"-15px\",\n        position: \"absolute\",\n    },\n    rowName: {\n        display: \"inline-flex\",\n        flexDirection: \"column\",\n        justifyContent: \"center\",\n    },\n    searchWrapper: {\n        display: \"inline-block\",\n        marginBottom: \"1rem\",\n        marginLeft: \"1rem\",\n    },\n    searchWrapperHidden: {\n        width: \"0px\",\n    },\n    rowSelection: {\n        padding: \"0px\",\n    },\n    rowActive: {\n        color: `${theme.palette.primary.main} !important`,\n    },\n    listItemIcon: {\n        display: \"inline-flex\",\n        flexDirection: \"column\",\n        justifyContent: \"center\",\n    },\n    pathPanelMenu: {\n        float: \"right\",\n        marginTop: \"-15px\",\n    },\n    pathPanel: {\n        padding: \"0.5rem\",\n        marginBottom: \"0.5rem\",\n        backgroundColor: \"#fff\",\n    },\n    pathPanelPathWrapper: {\n        display: \"inline-block\",\n    },\n    leftPanel: {\n        display: 'flex',\n        flexDirection: 'column',\n        flex: 0,\n        padding: \"0 1rem 1rem\",\n        marginRight: \"1rem\",\n        whiteSpace: \"nowrap\",\n        position: \"relative\",\n        backgroundColor: \"#fff\",\n    },\n    leftPanelVisible: {\n        opacity: 1,\n        flex: \"50%\",\n        animation: `animateVisible 1000ms ${theme.transitions.easing.easeOut}`,\n    },\n    leftPanelHidden: {\n        opacity: 0,\n        flex: \"initial\",\n        padding: \"0\",\n        marginRight: \"0\",\n    },\n    \"@keyframes animateVisible\": {\n        \"0%\": {\n            opacity: 0,\n            flex: \"initial\",\n        },\n        \"100%\": {\n            opacity: 1,\n            flex: \"50%\",\n        },\n    },\n    rightPanel: {\n        display: 'flex',\n        flexDirection: 'column',\n        flex: \"50%\",\n        padding: \"1rem\",\n        paddingTop: \"0.5rem\",\n        marginTop: \"-0.5rem\",\n        position: \"relative\",\n        backgroundColor: \"#fff\",\n    },\n    pathPanelItem: {\n        cursor: \"pointer\",\n    },\n    uploadIcon: {\n        transform: \"rotate(180deg)\",\n    },\n    uploadButton: {\n        float: \"right\",\n    },\n    moreOptionsButton: {\n        width: theme.spacing(3),\n        height: theme.spacing(3),\n        marginRight: theme.spacing(1),\n        marginTop: \"auto\",\n        marginBottom: \"auto\",\n        justifyContent: \"center\",\n    },\n    moreOptions: {\n        position: \"absolute\",\n    },\n});\n\nconst pathPromise = {};\n\nexport const CollectionPanelFiles = withStyles(styles)(\n    connect((state: RootState) => ({\n        auth: state.auth,\n        collectionPanel: state.collectionPanel,\n        collectionPanelFiles: state.collectionPanelFiles,\n    }))((props: CollectionPanelFilesProps & WithStyles<CssRules> & { auth: AuthState }) => {\n        const { classes, onItemMenuOpen, onUploadDataClick, isWritable, dispatch, collectionPanelFiles, collectionPanel } = props;\n        const { apiToken, config } = props.auth;\n\n        const webdavClient = new WebDAV({\n            baseURL: config.keepWebServiceUrl,\n            headers: {\n                Authorization: `Bearer ${apiToken}`,\n            },\n        });\n\n        const webDAVRequestConfig: WebDAVRequestConfig = {\n            headers: {\n                Depth: \"1\",\n            },\n        };\n\n        const parentRef = React.useRef(null);\n        const [path, setPath] = React.useState<string[]>([]);\n        const [pathData, setPathData] = React.useState<Record<string, CollectionFile[]>>({});\n        const [isLoading, setIsLoading] = React.useState(false);\n        const [leftSearch, setLeftSearch] = React.useState(\"\");\n        const [rightSearch, setRightSearch] = React.useState(\"\");\n\n        const leftKey = (path.length > 1 ? path.slice(0, path.length - 1) : path).join(\"/\");\n        const rightKey = path.join(\"/\");\n\n        const leftData = pathData[leftKey] || [];\n        const rightData = pathData[rightKey];\n\n        React.useEffect(() => {\n            if (props.currentItemUuid && extractUuidKind(props.currentItemUuid) === ResourceKind.COLLECTION) {\n                setPathData({});\n                setPath([props.currentItemUuid]);\n            }\n        }, [props.currentItemUuid]);\n\n        const fetchData = (keys, ignoreCache = false) => {\n            const keyArray = Array.isArray(keys) ? keys : [keys];\n\n            Promise.all(\n                keyArray\n                    .filter(key => !!key)\n                    .map(key => {\n                        const dataExists = !!pathData[key];\n                        const runningRequest = pathPromise[key];\n\n                        if (ignoreCache || (!dataExists && !runningRequest)) {\n                            if (!isLoading) {\n                                setIsLoading(true);\n                            }\n\n                            pathPromise[key] = true;\n\n                            return webdavClient.propfind(`c=${key}`, webDAVRequestConfig);\n                        }\n\n                        return Promise.resolve(null);\n                    })\n                    .filter(promise => !!promise)\n            )\n                .then(requests => {\n                    const newState = requests\n                        .map((request, index) => {\n                            if (request && request.responseXML != null) {\n                                const key = keyArray[index];\n                                const result: any = extractFilesData(request.responseXML);\n                                const sortedResult = sortBy(result, n => n.name).sort((n1, n2) => {\n                                    if (n1.type === \"directory\" && n2.type !== \"directory\") {\n                                        return -1;\n                                    }\n                                    if (n1.type !== \"directory\" && n2.type === \"directory\") {\n                                        return 1;\n                                    }\n                                    return 0;\n                                });\n\n                                return { [key]: sortedResult };\n                            }\n                            return {};\n                        })\n                        .reduce((prev, next) => {\n                            return { ...next, ...prev };\n                        }, {});\n                    setPathData(state => ({ ...state, ...newState }));\n                }, () => {\n                    // Nothing to do\n                })\n                .finally(() => {\n                    setIsLoading(false);\n                    keyArray.forEach(key => delete pathPromise[key]);\n                });\n        };\n\n        React.useEffect(() => {\n            if (rightKey) {\n                fetchData(rightKey);\n                setLeftSearch(\"\");\n                setRightSearch(\"\");\n            }\n            // eslint-disable-next-line react-hooks/exhaustive-deps\n        }, [rightKey, rightData]);\n\n        const currentPDH = (collectionPanel.item || {}).portableDataHash;\n        React.useEffect(() => {\n            if (currentPDH) {\n                fetchData([leftKey, rightKey], true);\n            }\n            // eslint-disable-next-line react-hooks/exhaustive-deps\n        }, [currentPDH]);\n\n        React.useEffect(() => {\n            if (rightData) {\n                const filtered = rightData.filter(({ name }) => name.indexOf(rightSearch) > -1);\n                dispatch(setCollectionFiles(filtered, false));\n            }\n        }, [rightData, dispatch, rightSearch]);\n\n        const handleRightClick = React.useCallback(\n            event => {\n                event.preventDefault();\n                let elem = event.target;\n\n                while (elem && elem.dataset && !elem.dataset.item) {\n                    elem = elem.parentNode;\n                }\n\n                if (!elem || !elem.dataset) {\n                    return;\n                }\n\n                const { id } = elem.dataset;\n\n                const item: CollectionFilesTreeItem = {\n                    id,\n                    data: rightData.find(elem => elem.id === id),\n                };\n\n                if (id && item.data) {\n                    onItemMenuOpen(event, item, isWritable);\n                }\n            },\n            [onItemMenuOpen, isWritable, rightData]\n        );\n\n        React.useEffect(() => {\n            let node = null;\n\n            if (parentRef?.current) {\n                node = parentRef.current;\n                (node as any).addEventListener(\"contextmenu\", handleRightClick);\n            }\n\n            return () => {\n                if (node) {\n                    (node as any).removeEventListener(\"contextmenu\", handleRightClick);\n                }\n            };\n        }, [parentRef, handleRightClick]);\n\n        const handleClick = React.useCallback(\n            (event: any) => {\n                let isCheckbox = false;\n                let isMoreButton = false;\n                let elem = event.target;\n\n                if (elem.type === \"checkbox\") {\n                    isCheckbox = true;\n                }\n                // The \"More options\" button click event could be triggered on its\n                // internal graphic element.\n                else if (\n                    (elem.dataset && elem.dataset.id === \"moreOptions\") ||\n                    (elem.parentNode && elem.parentNode.dataset && elem.parentNode.dataset.id === \"moreOptions\")\n                ) {\n                    isMoreButton = true;\n                }\n\n                while (elem && elem.dataset && !elem.dataset.item) {\n                    elem = elem.parentNode;\n                }\n\n                if (elem && elem.dataset && !isCheckbox && !isMoreButton) {\n                    const { parentPath, subfolderPath, breadcrumbPath, type } = elem.dataset;\n\n                    if (breadcrumbPath) {\n                        const index = path.indexOf(breadcrumbPath);\n                        setPath(state => [...state.slice(0, index + 1)]);\n                    }\n\n                    if (parentPath && type === \"directory\") {\n                        if (path.length > 1) {\n                            path.pop();\n                        }\n\n                        setPath(state => [...state, parentPath]);\n                    }\n\n                    if (subfolderPath && type === \"directory\") {\n                        setPath(state => [...state, subfolderPath]);\n                    }\n\n                    if (elem.dataset.id && type === \"file\") {\n                        const item = rightData.find(({ id }) => id === elem.dataset.id) || leftData.find(({ id }) => id === elem.dataset.id);\n                        const enhancedItem = servicesProvider.getServices().collectionService.extendFileURL(item);\n                        const fileUrl = sanitizeToken(\n                            getInlineFileUrl(enhancedItem.url, config.keepWebServiceUrl, config.keepWebInlineServiceUrl),\n                            true\n                        );\n                        window.open(fileUrl, \"_blank\", \"noopener\");\n                    }\n                }\n\n                if (isCheckbox) {\n                    const { id } = elem.dataset;\n                    const item = collectionPanelFiles[id];\n                    props.onSelectionToggle(event, item);\n                }\n                if (isMoreButton) {\n                    const { id } = elem.dataset;\n                    const item: any = {\n                        id,\n                        data: rightData.find(elem => elem.id === id),\n                    };\n                    onItemMenuOpen(event, item, isWritable);\n                }\n            },\n            [path, setPath, collectionPanelFiles, isWritable]\n        );\n\n        const getItemIcon = React.useCallback(\n            (type: string, activeClass: string | null) => {\n                let Icon = DefaultIcon;\n\n                switch (type) {\n                    case \"directory\":\n                        Icon = DirectoryIcon;\n                        break;\n                    case \"file\":\n                        Icon = FileIcon;\n                        break;\n                }\n\n                return (\n                    <ListItemIcon className={classNames(classes.listItemIcon, activeClass)}>\n                        <Icon />\n                    </ListItemIcon>\n                );\n            },\n            [classes]\n        );\n\n        const getActiveClass = React.useCallback(\n            name => {\n                return path[path.length - 1] === name ? classes.rowActive : null;\n            },\n            [path, classes]\n        );\n\n        const onOptionsMenuOpen = React.useCallback(\n            (ev, isWritable) => {\n                props.onOptionsMenuOpen(ev, isWritable);\n            },\n            // eslint-disable-next-line react-hooks/exhaustive-deps\n            [props.onOptionsMenuOpen]\n        );\n\n        return (\n            <div\n                data-cy=\"collection-files-panel\"\n                className={classes.filesPanel}\n                onClick={handleClick}\n                ref={parentRef}\n            >\n                <div className={classes.pathPanel}>\n                    <div className={classes.pathPanelPathWrapper}>\n                        {path.map((p: string, index: number) => (\n                            <span\n                                key={`${index}-${p}`}\n                                data-item=\"true\"\n                                className={classes.pathPanelItem}\n                                data-breadcrumb-path={p}\n                            >\n                                <span className={classes.rowActive}>{index === 0 ? \"Home\" : p}</span> <b>/</b>&nbsp;\n                            </span>\n                        ))}\n                    </div>\n                    <Tooltip\n                        className={classes.pathPanelMenu}\n                        title=\"More options\"\n                        disableFocusListener\n                    >\n                        <IconButton\n                            data-cy=\"collection-files-panel-options-btn\"\n                            onClick={ev => {\n                                onOptionsMenuOpen(ev, isWritable);\n                            }}\n                            size=\"large\">\n                            <MoreVerticalIcon />\n                        </IconButton>\n                    </Tooltip>\n                </div>\n                <div className={classes.wrapper} data-cy=\"collection-files-panel-content\">\n                    <div\n                        className={classNames(classes.leftPanel, path.length > 1 ? classes.leftPanelVisible : classes.leftPanelHidden)}\n                        data-cy=\"collection-files-left-panel\"\n                    >\n                        <div className={classes.filesHeader}>\n                            <Tooltip\n                                title=\"Go back\"\n                                className={path.length > 1 ? classes.backButton : classes.backButtonHidden}\n                            >\n                                <IconButton\n                                    onClick={() => setPath(state => [...state.slice(0, state.length - 1)])}\n                                    size=\"large\">\n                                    <BackIcon />\n                                </IconButton>\n                            </Tooltip>\n                            <div className={path.length > 1 ? classes.searchWrapper : classes.searchWrapperHidden}>\n                                <SearchInput\n                                    selfClearProp={leftKey}\n                                    label=\"Search\"\n                                    value={leftSearch}\n                                    onSearch={setLeftSearch}\n                                />\n                            </div>\n                        </div>\n                        <div className={classes.dataWrapper}>\n                            {leftData.length > 0 ? (\n                                <AutoSizer defaultWidth={0}>\n                                    {({ height, width }) => {\n                                        const filtered = leftData.filter(({ name }) => name.indexOf(leftSearch) > -1);\n                                        return !!filtered.length ? (\n                                            <FixedSizeList\n                                                height={height}\n                                                itemCount={filtered.length}\n                                                itemSize={35}\n                                                width={width}\n                                            >\n                                                {({ index, style }) => {\n                                                    const { id, type, name } = filtered[index];\n                                                    return (\n                                                        <div\n                                                            data-id={id}\n                                                            style={style}\n                                                            data-item=\"true\"\n                                                            data-type={type}\n                                                            data-parent-path={name}\n                                                            className={classNames(classes.row, getActiveClass(name))}\n                                                            key={id}\n                                                        >\n                                                            {getItemIcon(type, getActiveClass(name))}\n                                                            <div className={classes.rowName}>{name}</div>\n                                                            {getActiveClass(name) ? (\n                                                                <SidePanelRightArrowIcon\n                                                                    style={{ display: \"inline\", marginTop: \"5px\", marginLeft: \"5px\" }}\n                                                                />\n                                                            ) : null}\n                                                        </div>\n                                                    );\n                                                }}\n                                            </FixedSizeList>\n                                        ) : (\n                                            <div className={classes.rowEmpty}>No directories available</div>\n                                        );\n                                    }}\n                                </AutoSizer>\n                            ) : (\n                                <div\n                                    data-cy=\"collection-loader\"\n                                    className={classes.row}\n                                >\n                                    <CircularProgress\n                                        className={classes.loader}\n                                        size={30}\n                                    />\n                                </div>\n                            )}\n                        </div>\n                    </div>\n                    <div\n                        className={classes.rightPanel}\n                        data-cy=\"collection-files-right-panel\"\n                    >\n                        <div className={classes.filesHeader}>\n                            <div className={classes.searchWrapper}>\n                                <SearchInput\n                                    selfClearProp={rightKey}\n                                    label=\"Search\"\n                                    value={rightSearch}\n                                    onSearch={setRightSearch}\n                                />\n                            </div>\n                            {isWritable && (\n                                <Button\n                                    className={classes.uploadButton}\n                                    data-cy=\"upload-button\"\n                                    onClick={() => {\n                                        onUploadDataClick(rightKey === leftKey ? undefined : rightKey);\n                                    }}\n                                    variant=\"contained\"\n                                    color=\"primary\"\n                                    size=\"small\"\n                                >\n                                    <DownloadIcon className={classes.uploadIcon} />\n                                    Upload data\n                                </Button>\n                            )}\n                        </div>\n                        <div className={classes.dataWrapper}>\n                            {rightData && !isLoading ? (\n                                <AutoSizer defaultHeight={500}>\n                                    {({ height, width }) => {\n                                        const filtered = rightData.filter(({ name }) => name.indexOf(rightSearch) > -1);\n                                        return !!filtered.length ? (\n                                            <FixedSizeList\n                                                height={height}\n                                                itemCount={filtered.length}\n                                                itemSize={35}\n                                                width={width}\n                                            >\n                                                {({ index, style }) => {\n                                                    const { id, type, name, size } = filtered[index];\n\n                                                    return (\n                                                        <div\n                                                            style={style}\n                                                            data-id={id}\n                                                            data-item=\"true\"\n                                                            data-type={type}\n                                                            data-subfolder-path={name}\n                                                            className={classes.row}\n                                                            key={id}\n                                                        >\n                                                            <Checkbox\n                                                                color=\"primary\"\n                                                                className={classes.rowSelection}\n                                                                checked={collectionPanelFiles[id] ? collectionPanelFiles[id].value.selected : false}\n                                                            />\n                                                            &nbsp;\n                                                            {getItemIcon(type, null)}\n                                                            <div className={classes.rowName}>{name}</div>\n                                                            <span\n                                                                className={classes.rowName}\n                                                                style={{\n                                                                    marginLeft: \"auto\",\n                                                                    marginRight: \"1rem\",\n                                                                }}\n                                                            >\n                                                                {formatFileSize(size)}\n                                                            </span>\n                                                            <Tooltip\n                                                                title=\"More options\"\n                                                                disableFocusListener\n                                                            >\n                                                                <IconButton\n                                                                    data-id=\"moreOptions\"\n                                                                    data-cy=\"file-item-options-btn\"\n                                                                    className={classes.moreOptionsButton}\n                                                                    size=\"large\">\n                                                                    <MoreHorizontalIcon\n                                                                        data-id=\"moreOptions\"\n                                                                        className={classes.moreOptions}\n                                                                    />\n                                                                </IconButton>\n                                                            </Tooltip>\n                                                        </div>\n                                                    );\n                                                }}\n                                            </FixedSizeList>\n                                        ) : (\n                                            <div className={classes.rowEmpty}>This collection is empty</div>\n                                        );\n                                    }}\n                                </AutoSizer>\n                            ) : (\n                                <div className={classes.row}>\n                                    <CircularProgress\n                                        className={classes.loader}\n                                        size={30}\n                                    />\n                                </div>\n                            )}\n                        </div>\n                    </div>\n                </div>\n            </div>\n        );\n    })\n);\n"
  },
  {
    "path": "services/workbench2/src/components/column-selector/column-selector.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { ColumnSelector } from \"./column-selector\";\n\ndescribe(\"<ColumnSelector />\", () => {\n    it(\"shows only configurable columns\", () => {\n        const columns = [\n            {\n                name: \"Column 1\",\n                render: () => <span />,\n                selected: true,\n                configurable: true\n            },\n            {\n                name: \"Column 2\",\n                render: () => <span />,\n                selected: true,\n                configurable: true,\n            },\n            {\n                name: \"Column 3\",\n                render: () => <span />,\n                selected: true,\n                configurable: false\n            }\n        ];\n        cy.mount(<ColumnSelector columns={columns} onColumnToggle={cy.stub()} />);\n        cy.get('button[aria-label=\"Select columns\"]').click();\n        cy.get('[data-cy=column-selector-li]').should('have.length', 2);\n    });\n\n    it(\"renders checked checkboxes next to selected columns\", () => {\n        const columns = [\n            {\n                name: \"Column 1\",\n                render: () => <span />,\n                selected: true,\n                configurable: true\n            },\n            {\n                name: \"Column 2\",\n                render: () => <span />,\n                selected: false,\n                configurable: true\n            },\n            {\n                name: \"Column 3\",\n                render: () => <span />,\n                selected: true,\n                configurable: true\n            }\n        ];\n        cy.mount(<ColumnSelector columns={columns} onColumnToggle={cy.stub()} />);\n        cy.get('button[aria-label=\"Select columns\"]').click();\n        cy.get('input[type=checkbox]').should('have.length', 3);\n        cy.get('input[type=checkbox]').eq(0).should('be.checked');\n        cy.get('input[type=checkbox]').eq(1).should('not.be.checked');\n        cy.get('input[type=checkbox]').eq(2).should('be.checked');\n    });\n\n    it(\"calls onColumnToggle with clicked column\", () => {\n        const columns = [\n            {\n                name: \"Column 1\",\n                render: () => <span />,\n                selected: true,\n                configurable: true\n            }\n        ];\n        const onColumnToggle = cy.spy().as(\"onColumnToggle\");\n        cy.mount(<ColumnSelector columns={columns} onColumnToggle={onColumnToggle} />);\n        cy.get('button[aria-label=\"Select columns\"]').click();\n        cy.get('[data-cy=column-selector-li]').click();\n        cy.get('@onColumnToggle').should('have.been.calledWith', columns[0]);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/components/column-selector/column-selector.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { IconButton, Paper, List, Checkbox, ListItemText, ListItem, Tooltip } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport MenuIcon from \"@mui/icons-material/Menu\";\nimport { DataColumn, DataColumns } from '../data-table/data-column';\nimport { Popover } from \"../popover/popover\";\nimport { IconButtonProps } from '@mui/material/IconButton';\nimport { ArvadosTheme } from \"common/custom-theme\";\n\ninterface ColumnSelectorDataProps {\n    columns: DataColumns<any, any>;\n    onColumnToggle: (column: DataColumn<any, any>) => void;\n    className?: string;\n}\n\ntype CssRules = \"checkbox\" | \"listItem\" | \"listItemText\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    checkbox: {\n        width: 24,\n        height: 24\n    },\n    listItem: {\n        padding: 0,\n    },\n    listItemText: {\n        paddingLeft: '4px',\n    }\n});\n\nexport type ColumnSelectorProps = ColumnSelectorDataProps & WithStyles<CssRules>;\n\nexport const ColumnSelector = withStyles(styles)(\n    ({ columns, onColumnToggle, classes }: ColumnSelectorProps) =>\n        <Popover triggerComponent={ColumnSelectorTrigger}>\n            <Paper>\n                <List dense>\n                    {columns\n                        .filter(column => column.configurable)\n                        .map((column, index) =>\n                            <ListItem\n                                button\n                                key={index}\n                                className={classes.listItem}\n                                data-cy={'column-selector-li'}\n                                onClick={() => onColumnToggle(column)}>\n                                <Checkbox\n                                    disableRipple\n                                    color=\"primary\"\n                                    checked={column.selected}\n                                    className={classes.checkbox} />\n                                <ListItemText\n                                    className={classes.listItemText}>\n                                    {column.name}\n                                </ListItemText>\n                            </ListItem>\n                        )}\n                </List>\n            </Paper>\n        </Popover>\n);\n\nexport const ColumnSelectorTrigger = (props: IconButtonProps) =>\n    <Tooltip disableFocusListener title=\"Select columns\">\n        <IconButton {...props} size=\"large\">\n            <MenuIcon aria-label=\"Select columns\" data-cy=\"column-selector-button\" />\n        </IconButton>\n    </Tooltip>;\n"
  },
  {
    "path": "services/workbench2/src/components/conditional-tabs/conditional-tabs.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { ConditionalTabs, TabData } from \"./conditional-tabs\";\nimport { Tab } from \"@mui/material\";\n\ndescribe(\"<ConditionalTabs />\", () => {\n    let tabs = [];\n    let WrappedComponent;\n\n    beforeEach(() => {\n        tabs = [{\n            show: true,\n            label: \"Tab1\",\n            content: <div id=\"content1\">Content1</div>,\n        },{\n            show: false,\n            label: \"Tab2\",\n            content: <div id=\"content2\">Content2</div>,\n        },{\n            show: true,\n            label: \"Tab3\",\n            content: <div id=\"content3\">Content3</div>,\n        }];\n\n        //necessary to update the props of a component after mounting\n        WrappedComponent = ({ tabs }) => {\n            const [newTabs, setNewTabs] = React.useState(tabs);\n\n            window.updateProps = (newTabs) => {\n                setNewTabs(newTabs);\n            };\n\n            return <ConditionalTabs tabs={newTabs} />;\n        };\n    });\n\n    it(\"renders only visible tabs\", () => {\n        // given\n        cy.mount(<WrappedComponent tabs={tabs} />);\n\n        // expect 2 visible tabs\n        cy.get('button[role=tab]').should('have.length', 2);\n        cy.get('button[role=tab]').eq(0).should('contain', 'Tab1');\n        cy.get('button[role=tab]').eq(1).should('contain', 'Tab3');\n\n        // expect visible content 1 and tab 3 to be hidden but exist\n        // content 2 stays unrendered since the tab is hidden\n        cy.contains('Content1').should('exist');\n        cy.contains('Content2').should('not.exist');\n        cy.contains('Content3').should('have.attr', 'hidden');\n\n        // Show second tab\n        cy.window().then((win) => {\n            win.updateProps([...tabs, tabs[1].show = true]);\n        });\n\n        // Expect 3 visible tabs\n        cy.get('button[role=tab]').should('have.length', 3);\n        cy.get('button[role=tab]').eq(0).should('contain', 'Tab1');\n        cy.get('button[role=tab]').eq(1).should('contain', 'Tab2');\n        cy.get('button[role=tab]').eq(2).should('contain', 'Tab3');\n\n        // Expect visible content 1 and hidden content 2/3\n        cy.get('div#content1').should('contain', 'Content1');\n        cy.get('div#content1').should('not.have.attr', 'hidden');\n        cy.get('div#content2').should('have.attr', 'hidden');\n        cy.get('div#content3').should('have.attr', 'hidden');\n\n        // Click on Tab2 (position 1)\n        cy.get('button[role=tab]').eq(1).click();\n\n        // Expect 3 visible tabs\n        cy.get('button[role=tab]').should('have.length', 3);\n        cy.get('button[role=tab]').eq(0).should('contain', 'Tab1');\n        cy.get('button[role=tab]').eq(1).should('contain', 'Tab2');\n        cy.get('button[role=tab]').eq(2).should('contain', 'Tab3');\n\n        // Expect visible content2 and hidden content 1/3\n        cy.get('div#content2').should('contain', 'Content2');\n        cy.get('div#content1').should('have.attr', 'hidden');\n        cy.get('div#content2').should('not.have.attr', 'hidden');\n        cy.get('div#content3').should('have.attr', 'hidden');\n    });\n\n    it(\"resets selected tab on tab visibility change\", () => {\n        // given\n        cy.mount(<WrappedComponent tabs={tabs} />);\n\n        // Expect second tab to be Tab3\n        cy.get('button[role=tab]').eq(1).should('contain', 'Tab3');\n        // Click on Tab3 (position 2)\n        cy.get('button[role=tab]').eq(1).click();\n        cy.get('div#content3').should('contain', 'Content3');\n        cy.get('div#content1').should('have.attr', 'hidden');\n        cy.get('div#content2').should('not.exist');\n        cy.get('div#content3').should('not.have.attr', 'hidden');\n\n        // Show second tab\n        cy.window().then((win) => {\n            win.updateProps([...tabs, tabs[1].show = true]);\n        });\n\n        // Selected tab resets to 1, tabs 2/3 are hidden\n        cy.get('div#content1').should('contain', 'Content1');\n        cy.get('div#content1').should('not.have.attr', 'hidden');\n        cy.get('div#content2').should('have.attr', 'hidden');\n        cy.get('div#content3').should('have.attr', 'hidden');\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/components/conditional-tabs/conditional-tabs.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { ReactElement, useEffect, useState } from \"react\";\nimport { Tabs, Tab } from \"@mui/material\";\nimport { TabsProps } from \"@mui/material/Tabs\";\n\ninterface ComponentWithHidden {\n    hidden: boolean;\n};\n\nexport type TabData = {\n    show: boolean;\n    label: string;\n    content: ReactElement<ComponentWithHidden>;\n};\n\ntype ConditionalTabsProps = {\n    tabs: TabData[];\n};\n\nexport const ConditionalTabs = ({ tabs: tabData, ...props }: Omit<TabsProps, 'value' | 'onChange'> & ConditionalTabsProps) => {\n    const [tabState, setTabState] = useState(0);\n    const visibleTabs = tabData.filter(tab => tab.show);\n    const visibleTabNames = visibleTabs.map(tab => tab.label).join();\n\n    const handleTabChange = (event: React.MouseEvent<HTMLElement>, value: number) => {\n        setTabState(value);\n    };\n\n    // Reset tab to 0 when tab visibility changes\n    // (or if tab set change causes visible set to change)\n    useEffect(() => {\n        setTabState(0);\n    }, [visibleTabNames]);\n\n    return <>\n        <Tabs\n            {...props}\n            value={tabState}\n            onChange={handleTabChange} >\n            {visibleTabs.map(tab => <Tab key={tab.label} label={tab.label} data-cy='conditional-tab' />)}\n        </Tabs>\n\n        {visibleTabs.map((tab, i) => (\n            React.cloneElement(tab.content, {key: i, hidden: i !== tabState})\n        ))}\n    </>;\n};\n"
  },
  {
    "path": "services/workbench2/src/components/confirmation-dialog/confirmation-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button, DialogContentText } from \"@mui/material\";\nimport { WithDialogProps } from \"store/dialog/with-dialog\";\nimport { WarningIcon } from 'components/icon/icon';\n\nexport interface ConfirmationDialogDataProps {\n    title: string;\n    text: string;\n    info?: string;\n    cancelButtonLabel?: string;\n    confirmButtonLabel?: string;\n}\n\nexport interface ConfirmationDialogProps {\n    onConfirm: () => void;\n}\n\nexport const ConfirmationDialog = (props: ConfirmationDialogProps & WithDialogProps<ConfirmationDialogDataProps>) =>\n    <Dialog open={props.open}>\n        <div data-cy='confirmation-dialog'>\n            <DialogTitle>{props.data.title}</DialogTitle>\n            <DialogContent style={{ display: 'flex', alignItems: 'center' }}>\n                <WarningIcon />\n                <DialogContentText style={{ paddingLeft: '8px' }}>\n                    <span style={{display: 'block'}}>{props.data.text}</span>\n                    <span style={{display: 'block'}}>{props.data.info}</span>\n                </DialogContentText>\n            </DialogContent>\n            <DialogActions style={{ margin: '0px 24px 24px' }}>\n                <Button\n                    data-cy='confirmation-dialog-cancel-btn'\n                    variant='text'\n                    color='primary'\n                    onClick={props.closeDialog}>\n                    {props.data.cancelButtonLabel || 'Cancel'}\n                </Button>\n                <Button\n                    data-cy='confirmation-dialog-ok-btn'\n                    variant='contained'\n                    color='primary'\n                    type='submit'\n                    onClick={props.onConfirm}>\n                    {props.data.confirmButtonLabel || 'Ok'}\n                </Button>\n            </DialogActions>\n        </div>\n    </Dialog>;\n"
  },
  {
    "path": "services/workbench2/src/components/context-menu/context-menu.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\nimport React from \"react\";\nimport { Popover, List, ListItem, ListItemIcon, ListItemText, Divider } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { DefaultTransformOrigin, createAnchorAt } from \"../popover/helpers\";\nimport { IconType } from \"../icon/icon\";\nimport { RootState } from \"store/store\";\nimport { ContextMenuResource, ContextMenuState } from \"store/context-menu/context-menu\";\nimport { ContextMenuActionSet } from \"views-components/context-menu/context-menu-action-set\";\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { CustomStyleRulesCallback } from \"common/custom-theme\";\n\nexport interface ContextMenuItem {\n    name: string;\n    icon?: IconType;\n    component?: React.ComponentType<any>;\n    filters?: ((state: RootState, resource: ContextMenuResource) => boolean)[]\n}\n\nexport type ContextMenuItemGroup = ContextMenuItem[];\n\nexport interface ContextMenuProps {\n    items: ContextMenuActionSet;\n    contextMenu: ContextMenuState;\n    onItemClick: (action: ContextMenuItem, resource: ContextMenuResource | undefined) => void;\n    onClose: () => void;\n}\n\ntype CssRules = \"nameRoot\"\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    nameRoot: {\n        display: 'flex',\n        flexDirection: 'column',\n        alignItems: 'center',\n        color: theme.palette.primary.main,\n    },\n});\n\nconst NameComponent = withStyles(styles)(({name, classes}: {name: string} & WithStyles<CssRules>) =>\n        <ListItem className={classes.nameRoot}>\n            {name.length > 30 ? name.slice(0, 28) + '...' : name}\n        </ListItem>)\n\nexport class ContextMenu extends React.PureComponent<ContextMenuProps> {\n    render() {\n        const { items, onClose, onItemClick } = this.props;\n        const { open, position, resource } = this.props.contextMenu;\n        const anchorEl = resource ? createAnchorAt(position) : undefined;\n        const name = resource?.name;\n        return <Popover\n            anchorEl={anchorEl}\n            open={open}\n            onClose={onClose}\n            transformOrigin={DefaultTransformOrigin}\n            anchorOrigin={DefaultTransformOrigin}\n            onContextMenu={this.handleContextMenu}>\n            <List data-cy='context-menu' dense>\n                {name && <NameComponent name={name} />}\n                {items.map((group, groupIndex) =>\n                    <React.Fragment key={groupIndex}>\n                        {group.map((item, actionIndex) =>\n                            item.component\n                                ? <item.component\n                                    key={actionIndex}\n                                    data-cy={item.name}\n                                    onClick={() => onItemClick(item, resource)} />\n                                : <ListItem\n                                    button\n                                    key={actionIndex}\n                                    data-cy={item.name}\n                                    onClick={() => onItemClick(item, resource)}>\n                                    {item.icon &&\n                                        <ListItemIcon>\n                                            <item.icon />\n                                        </ListItemIcon>}\n                                    {item.name &&\n                                        <ListItemText>\n                                            {item.name}\n                                        </ListItemText>}\n                                </ListItem>)}\n                        {\n                            items[groupIndex + 1] &&\n                            items[groupIndex + 1].length > 0 &&\n                            <Divider />\n                        }\n                    </React.Fragment>)}\n            </List>\n        </Popover>;\n    }\n\n    handleContextMenu = (event: React.MouseEvent<HTMLElement>) => {\n        event.preventDefault();\n        this.props.onClose();\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/components/copy-to-clipboard/copy-result-to-clipboard.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport copy from 'copy-to-clipboard';\n\ninterface CopyToClipboardProps {\n  getText: (() => string);\n  children: any\n  onCopy?(text: string, result: boolean): void;\n  options?: {\n    debug?: boolean;\n    message?: string;\n    format?: string; // MIME type\n  };\n}\n\nexport default class CopyResultToClipboard extends React.PureComponent<CopyToClipboardProps> {\n  static defaultProps = {\n    onCopy: undefined,\n    options: undefined\n  };\n\n  onClick = event => {\n    const {\n      getText,\n      onCopy,\n      children,\n      options\n    } = this.props;\n\n    const elem = React.Children.only(children);\n\n    const text = getText();\n\n    const result = copy(text, options);\n\n    if (onCopy) {\n      onCopy(text, result);\n    }\n\n    // Bypass onClick if it was present\n    if (elem && elem.props && typeof elem.props.onClick === 'function') {\n      elem.props.onClick(event);\n    }\n  };\n\n\n  render() {\n    const {\n      getText: _getText,\n      onCopy: _onCopy,\n      options: _options,\n      children,\n      ...props\n    } = this.props;\n    const elem = React.Children.only(children);\n\n    return React.cloneElement(elem, {...props, onClick: this.onClick});\n  }\n}\n"
  },
  {
    "path": "services/workbench2/src/components/copy-to-clipboard-snackbar/copy-to-clipboard-snackbar.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect, DispatchProp } from 'react-redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Tooltip } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport CopyToClipboard from 'react-copy-to-clipboard';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { CopyIcon } from 'components/icon/icon';\n\ntype CssRules = 'copyIcon';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    copyIcon: {\n        marginLeft: theme.spacing(1),\n        color: theme.palette.grey['500'],\n        cursor: 'pointer',\n        display: 'inline',\n        '& svg': {\n            fontSize: '1rem',\n            verticalAlign: 'middle',\n        },\n    },\n});\n\ninterface CopyToClipboardDataProps {\n    children?: React.ReactNode;\n    value: string;\n}\n\ntype CopyToClipboardProps = CopyToClipboardDataProps & WithStyles<CssRules> & DispatchProp;\n\nexport const CopyToClipboardSnackbar = connect()(\n    withStyles(styles)(\n        class CopyToClipboardSnackbar extends React.Component<CopyToClipboardProps> {\n            onCopy = () => {\n                this.props.dispatch(\n                    snackbarActions.OPEN_SNACKBAR({\n                        message: 'Copied',\n                        hideDuration: 2000,\n                        kind: SnackbarKind.SUCCESS,\n                    })\n                );\n            };\n\n            render() {\n                const { children, value, classes } = this.props;\n                return (\n                    <Tooltip title='Copy link to clipboard' onClick={(ev) => ev.stopPropagation()}>\n                        <span className={classes.copyIcon}>\n                            <CopyToClipboard text={value} onCopy={this.onCopy}>\n                                {children || <CopyIcon />}\n                            </CopyToClipboard>\n                        </span>\n                    </Tooltip>\n                );\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/components/dashboard/dashboard-item-row.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { CSSProperties } from 'react';\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { openContextMenuOnlyFromUuid } from 'store/context-menu/context-menu-actions';\nimport { navigateTo } from 'store/navigation/navigation-action';\n\nexport const DashboardColumnNames = {\n    STATUS: 'status',\n    NAME: 'Name',\n    MODIFIED_AT: 'last modified',\n    LAST_VISITED: 'last visited',\n    TYPE: 'type',\n    STARTED_AT: 'started at',\n}\n\ntype CssRules = 'root' | 'columns';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        padding: '8px',\n        margin: '4px 0',\n        width: '100%',\n        backgroundColor: theme.palette.background.paper,\n        borderRadius: '8px',\n        boxShadow: '0 1px 3px rgba(0,0,0,0.2)',\n        display: 'flex',\n        alignItems: 'center',\n        justifyContent: 'space-between',\n        '&:hover': {\n            background: theme.palette.grey[200],\n        },\n        cursor: 'pointer',\n    },\n    columns: {\n        display: 'flex',\n    },\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch): Pick<DashboardItemRowProps, 'navTo' | 'openContextMenu'> => ({\n    navTo: (uuid: string) => dispatch<any>(navigateTo(uuid)),\n    openContextMenu: (event: React.MouseEvent<HTMLElement>, uuid: string) => dispatch<any>(openContextMenuOnlyFromUuid(event, uuid)),\n});\n\nexport type DashboardItemRowStyles = Partial<Record<keyof typeof DashboardColumnNames, CSSProperties>>;\n\ntype DashboardItemRowProps = {\n    item: GroupContentsResource;\n    columns: Partial<Record<keyof typeof DashboardColumnNames, React.ReactElement<any>>>;\n    forwardStyles?: DashboardItemRowStyles;\n    navTo: (uuid: string) => void,\n    openContextMenu: (event: React.MouseEvent, uuid: string) => void;\n};\n\nexport const DashboardItemRow = connect(null, mapDispatchToProps)(\n    withStyles(styles)(({ item, columns, classes, forwardStyles, navTo, openContextMenu }: DashboardItemRowProps & WithStyles<CssRules>) => {\n\n        const handleContextMenu = (event: React.MouseEvent) => {\n                event.preventDefault();\n                event.stopPropagation();\n                openContextMenu(event, item.uuid);\n            };\n\n        return (\n            <div className={classes.root} onContextMenu={handleContextMenu} onClick={() => navTo(item.uuid)} data-cy={'dashboard-item-row'}>\n                <span>{columns[DashboardColumnNames.NAME]}</span>\n                <span className={classes.columns}>\n                    {Object.entries(columns).map(([key, element]) => {\n                        if (key === DashboardColumnNames.NAME) return null;\n                        return (<span key={key} style={forwardStyles ? forwardStyles[key] : undefined}>\n                            {element}\n                        </span>\n                    )})}\n                </span>\n            </div>\n        );\n    })\n);\n"
  },
  {
    "path": "services/workbench2/src/components/dashboard/dashboard.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect } from 'react';\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { FavePinsSection } from './favorite-pins/favorite-pins-section';\nimport { RecentWorkflowRunsSection } from './recent-workflow-runs';\nimport { RecentlyVisitedSection } from './recently-visited';\nimport { setDashboardBreadcrumbs } from 'store/breadcrumbs/breadcrumbs-actions';\n\ntype CssRules = 'root' | 'section';\n\nconst styles: CustomStyleRulesCallback<CssRules> = () => ({\n    root: {\n        width: '102%',\n        height: '100%',\n        display: 'flex',\n        flexDirection: 'column',\n        marginLeft: '-1rem',\n        marginRight: '-2rem',\n        padding: 0,\n        paddingTop: '0.5rem',\n        overflowY: 'scroll'\n    },\n    section : {\n        paddingBottom: '1rem'\n    }\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch): DashboardProps => ({\n    setDashboardBreadcrumbs: () => dispatch<any>(setDashboardBreadcrumbs()),\n})\n\ntype DashboardProps = {\n    setDashboardBreadcrumbs: () => void;\n};\n\n\nexport const Dashboard = connect(null, mapDispatchToProps)(\n    withStyles(styles)(({setDashboardBreadcrumbs, classes}: DashboardProps & WithStyles<CssRules>) => {\n\n    useEffect(() => {\n        setDashboardBreadcrumbs();\n    }, [setDashboardBreadcrumbs]);\n\n    return (\n        <section className={classes.root} data-cy=\"dashboard-root\">\n            <section className={classes.section} data-cy=\"dashboard-section\">\n                <FavePinsSection />\n            </section>\n            <section className={classes.section} data-cy=\"dashboard-section\">\n                <RecentlyVisitedSection />\n            </section>\n            <section className={classes.section} data-cy=\"dashboard-section\">\n                <RecentWorkflowRunsSection />\n            </section>\n        </section>\n    );\n}));\n"
  },
  {
    "path": "services/workbench2/src/components/dashboard/favorite-pins/favorite-pins-item.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Tooltip } from '@mui/material';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { connect } from 'react-redux';\nimport { Dispatch } from 'redux';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport StarIcon from '@mui/icons-material/Star';\nimport { renderIcon } from 'views-components/data-explorer/renderers';\nimport { openContextMenuOnlyFromUuid } from 'store/context-menu/context-menu-actions';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport { toggleFavorite } from 'store/favorites/favorites-actions';\n\ntype CssRules = 'item' | 'name' | 'icon' | 'namePlate' | 'star';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    item: {\n        height: '3.5rem',\n        marginTop: '0',\n        padding: theme.spacing(1),\n        backgroundColor: theme.palette.background.paper,\n        borderRadius: '8px',\n        display: 'flex',\n        alignItems: 'center',\n        justifyContent: 'center',\n        position: 'relative',\n        boxShadow: '0 1px 3px rgba(0,0,0,0.2)',\n        textAlign: 'center',\n        overflow: 'hidden',\n        boxSizing: 'border-box',\n        cursor: 'pointer',\n        '&:hover': {\n            background: theme.palette.grey[200],\n        },\n    },\n    name: {\n        width: '100%',\n        fontSize: '0.875rem',\n        textAlign: 'left',\n        lineHeight: '1.2',\n        maxHeight: '2.5rem',\n        overflow: 'hidden',\n        textOverflow: 'ellipsis',\n        display: '-webkit-box',\n        WebkitLineClamp: 2,\n        WebkitBoxOrient: 'vertical',\n    },\n    icon: {\n        color: theme.customs.colors.grey700,\n        marginRight: '0.5rem',\n    },\n    namePlate: {\n        width: '80%',\n        display: 'flex',\n        flexDirection: 'column',\n    },\n    star: {\n        fontSize: '1.25rem',\n        color: theme.customs.colors.grey700,\n        marginLeft: '0.5rem',\n    },\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch): Omit<FavePinItemProps, 'item'> => ({\n    navTo: (uuid: string) => dispatch<any>(navigateTo(uuid)),\n    toggleFavorite: (item: GroupContentsResource) => dispatch<any>(toggleFavorite({ uuid: item.uuid, name: item.name })),\n    openContextMenu: (ev: React.MouseEvent<HTMLElement>, uuid: string) => dispatch<any>(openContextMenuOnlyFromUuid(ev, uuid)),\n});\n\ntype FavePinItemProps = {\n    item: GroupContentsResource,\n    navTo: (uuid: string) => void,\n    toggleFavorite: (item: GroupContentsResource) => void,\n    openContextMenu: (event: React.MouseEvent, uuid: string) => void\n};\n\nexport const FavePinItem = connect(null, mapDispatchToProps)(\n    withStyles(styles)(({ item, openContextMenu, navTo, toggleFavorite, classes }: FavePinItemProps & WithStyles<CssRules>) => {\n\n    const handleContextMenu = (event: React.MouseEvent) => {\n        event.preventDefault();\n        event.stopPropagation();\n        openContextMenu(event, item.uuid);\n    };\n\n    const handleToggleFavorite = (event: React.MouseEvent) => {\n        event.preventDefault();\n        event.stopPropagation();\n        toggleFavorite(item);\n    };\n\n    return (\n        <div data-cy='favorite-pin'\n            className={classes.item}\n            onContextMenu={handleContextMenu}\n            onClick={() => navTo(item.uuid)}\n            >\n            <div className={classes.icon}>{renderIcon(item)}</div>\n            <div className={classes.namePlate}>\n                <div className={classes.name}>{item.name}</div>\n            </div>\n            <Tooltip title='Remove from Favorites' onClick={handleToggleFavorite}>\n                <StarIcon data-cy={`${item.uuid}-star`} className={classes.star} />\n            </Tooltip>\n        </div>\n    );\n}));\n"
  },
  {
    "path": "services/workbench2/src/components/dashboard/favorite-pins/favorite-pins-section.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect, useState } from 'react';\nimport { isEqual } from 'lodash';\nimport { Collapse, Grid } from '@mui/material';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { ExpandChevronRight } from 'components/expand-chevron-right/expand-chevron-right';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { FavePinItem } from './favorite-pins-item';\nimport { LinkResource } from 'models/link';\nimport { ResourcesState, getPopulatedResources, getResource } from 'store/resources/resources';\n\ntype CssRules = 'root' | 'title' | 'hr' | 'list';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n    },\n    title: {\n        margin: '0 1rem',\n        padding: '4px',\n        cursor: 'pointer',\n    },\n    hr: {\n        marginTop: '0',\n        marginBottom: '0',\n    },\n    list: {\n        marginTop: '0.5rem',\n        paddingLeft: '1rem',\n        width: '98.5%',\n    },\n});\n\nconst mapStateToProps = (state: RootState): Pick<FavePinsSectionProps, 'faves' | 'resources'> => {\n    return {\n        faves: state.dataExplorer.favoritePins?.items || [],\n        resources: state.resources,\n    };\n};\n\ntype FavePinsSectionProps = {\n    faves: string[];\n    resources: ResourcesState;\n};\n\nexport const FavePinsSection = connect(\n    mapStateToProps\n)(\n    withStyles(styles)(\n        React.memo(({ faves, resources, classes }: FavePinsSectionProps & WithStyles<CssRules>) => {\n            const [items, setItems] = useState<GroupContentsResource[]>([]);\n            const [isOpen, setIsOpen] = useState(true);\n\n            useEffect(() => {\n                const faveLinks = faves.reduce((acc: LinkResource[], fave: string): LinkResource[] => {\n                        const faveLink = getResource<LinkResource>(fave)(resources)\n                        if (faveLink) acc.push(faveLink);\n                        return acc;\n                    }, []);\n                const sortedFaves = faveLinks.sort((a, b) => b.createdAt.localeCompare(a.createdAt))\n                setItems(getPopulatedResources(sortedFaves.map(item => item.headUuid), resources));\n            }, [faves, resources]);\n\n            return (\n                <div className={classes.root}>\n                    <div\n                        className={classes.title}\n                        onClick={() => setIsOpen(!isOpen)}\n                    >\n                        <span>Favorites</span>\n                        <ExpandChevronRight expanded={isOpen} />\n                        <hr className={classes.hr} />\n                    </div>\n                    <Collapse in={isOpen}>\n                        <div className={classes.list}>\n                            <Grid\n                                container\n                                spacing={2}\n                                direction='row'\n                                justifyContent='flex-start'\n                                alignItems='flex-start'\n                                >\n                                {items.map((item) => (\n                                    <Grid item xs={12} sm={6} md={5} lg={4} xl={3} key={item.uuid}>\n                                        <FavePinItem\n                                            item={item}\n                                        />\n                                    </Grid>\n                                ))}\n                            </Grid>\n                        </div>\n                    </Collapse>\n                </div>\n            );\n        }, preventRerender)\n    )\n);\n\n// return true to prevent re-render, false to allow re-render\nfunction preventRerender(prevProps: FavePinsSectionProps, nextProps: FavePinsSectionProps) {\n    if (!isEqual(prevProps.faves, nextProps.faves)) {\n        return false;\n    }\n    if (!isEqual(prevProps.resources, nextProps.resources)) {\n        return false;\n    }\n    return true;\n}\n\n"
  },
  {
    "path": "services/workbench2/src/components/dashboard/recent-workflow-runs.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useState } from 'react';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\nimport { Collapse } from '@mui/material';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { ResourceName } from 'views-components/data-explorer/renderers';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { ExpandChevronRight } from 'components/expand-chevron-right/expand-chevron-right';\nimport { DashboardItemRow, DashboardColumnNames, DashboardItemRowStyles } from 'components/dashboard/dashboard-item-row';\nimport { ResourceStatus } from 'views-components/data-explorer/renderers';\nimport { ProcessResource } from 'models/process';\n\ntype CssRules = 'root' | 'subHeader' | 'titleBar' | 'headers' | 'startedAtHead' | 'hr' | 'list' | 'item';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n    },\n    subHeader: {\n        margin: '0 1rem',\n        padding: '4px',\n    },\n    titleBar: {\n        display: 'flex',\n        justifyContent: 'space-between',\n        cursor: 'pointer',\n    },\n    headers: {\n        display: 'flex',\n    },\n    startedAtHead: {\n        fontSize: '0.875rem',\n    },\n    hr: {\n        marginTop: '0',\n        marginBottom: '0',\n    },\n    list: {\n        display: 'flex',\n        flexWrap: 'wrap',\n        justifyContent: 'flex-start',\n        width: '100%',\n        marginLeft: '-1rem',\n    },\n    item: {\n        padding: '8px',\n        margin: '4px 0',\n        width: '100%',\n        background: '#fafafa',\n        borderRadius: '8px',\n        boxShadow: '0 1px 3px rgba(0,0,0,0.2)',\n        display: 'flex',\n        alignItems: 'center',\n        justifyContent: 'space-between',\n        '&:hover': {\n            background: 'lightgray',\n        },\n    },\n});\n\n// pass any styles to child elements\nconst forwardStyles: DashboardItemRowStyles = {\n    [DashboardColumnNames.STATUS]: {\n        marginRight: '1rem',\n        width: '12rem',\n        display: 'flex',\n        justifyContent: 'flex-end',\n    },\n    [DashboardColumnNames.STARTED_AT]: {\n        width: '12rem',\n        display: 'flex',\n        justifyContent: 'flex-end',\n    },\n}\n\nconst mapStateToProps = (state: RootState): Pick<RecentWorkflowRunsProps, 'items'> => {\n    const selection = (state.dataExplorer.recentWorkflowRuns?.items || []);\n    const recents = selection.map(uuid => state.resources[uuid] as ProcessResource).slice(0, 12);;\n    return {\n        items: recents\n    };\n};\n\ntype RecentWorkflowRunsProps = {\n    items: ProcessResource[];\n};\n\nexport const RecentWorkflowRunsSection = connect(mapStateToProps)(\n    withStyles(styles)(({items, classes}: RecentWorkflowRunsProps & WithStyles<CssRules>) => {\n\n        const [isOpen, setIsOpen] = useState(true);\n\n        return (\n            <div className={classes.root}>\n                <div className={classes.subHeader} onClick={() => setIsOpen(!isOpen)}>\n                    <span className={classes.titleBar}>\n                        <span>\n                            <span>Recent Workflow Runs</span>\n                            <ExpandChevronRight expanded={isOpen} />\n                        </span>\n                        {isOpen &&\n                            <span className={classes.headers}>\n                                <div className={classes.startedAtHead}>started at</div>\n                            </span>}\n                    </span>\n                    <hr className={classes.hr} />\n                </div>\n                <Collapse in={isOpen}>\n                    <ul className={classes.list}>\n                        {items.sort((a, b) => new Date(b.createdAt).getTime() - new Date(a.createdAt).getTime())\n                            .map(item =>\n                            <DashboardItemRow\n                                item={item}\n                                columns={\n                                    {\n                                        [DashboardColumnNames.NAME]: <ResourceName uuid={item.uuid} />,\n                                        [DashboardColumnNames.STATUS]: <ResourceStatus uuid={item.uuid} />,\n                                        [DashboardColumnNames.STARTED_AT]: <span>{new Date(item.createdAt).toLocaleString()}</span>,\n                                    }\n                                }\n                                forwardStyles={forwardStyles}\n                            />)}\n                    </ul>\n                </Collapse>\n            </div>\n        )\n}));"
  },
  {
    "path": "services/workbench2/src/components/dashboard/recently-visited.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useState, useEffect } from 'react';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\nimport { Collapse } from '@mui/material';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { ExpandChevronRight } from 'components/expand-chevron-right/expand-chevron-right';\nimport { ResourcesState, getPopulatedResources } from 'store/resources/resources';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { DashboardItemRow, DashboardColumnNames, DashboardItemRowStyles } from 'components/dashboard/dashboard-item-row';\nimport { RecentUuid } from 'models/user';\nimport { ResourceName } from 'views-components/data-explorer/renderers';\nimport { formatDateTime } from 'common/formatters';\n\ntype CssRules = 'root' | 'subHeader' | 'titleBar' | 'lastVisHead' | 'hr' | 'list';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n    },\n    subHeader: {\n        margin: '0 1rem',\n        padding: '4px',\n    },\n    titleBar: {\n        display: 'flex',\n        justifyContent: 'space-between',\n        cursor: 'pointer',\n    },\n    lastVisHead: {\n        fontSize: '0.875rem',\n    },\n    hr: {\n        marginTop: '0',\n        marginBottom: '0',\n    },\n    list: {\n        display: 'flex',\n        flexWrap: 'wrap',\n        justifyContent: 'flex-start',\n        width: '100%',\n        marginLeft: '-1rem',\n    },\n});\n\n// pass any styles to child elements\nconst forwardStyles: DashboardItemRowStyles = {\n    [DashboardColumnNames.LAST_VISITED]: {\n        marginLeft: '2rem',\n        width: '12rem',\n        display: 'flex',\n        justifyContent: 'flex-end',\n    },\n}\n\nconst mapStateToProps = (state: RootState) => {\n    return {\n        recents: state.auth.user?.prefs?.wb?.recentUuids || [],\n        resources: state.resources,\n    };\n};\n\ntype RecentlyVisitedProps = {\n    recents: RecentUuid[],\n    resources: ResourcesState\n};\n\nexport const RecentlyVisitedSection = connect(mapStateToProps)\n    (withStyles(styles)(\n        ({recents, resources, classes}: RecentlyVisitedProps & WithStyles<CssRules>) => {\n\n            const [items, setItems] = useState<GroupContentsResource[]>([]);\n            const [isOpen, setIsOpen] = useState(true);\n\n            useEffect(() => {\n                const recentUuids = recents.map(recent => recent.uuid);\n                setItems(getPopulatedResources(recentUuids, resources));\n            }, [recents, resources]);\n\n            return (\n                <div className={classes.root}>\n                    <div className={classes.subHeader} onClick={() => setIsOpen(!isOpen)}>\n                        <span className={classes.titleBar}>\n                            <span>\n                                <span>Recently Visited</span>\n                                <ExpandChevronRight expanded={isOpen} />\n                            </span>\n                            {isOpen &&<span className={classes.lastVisHead}>last visited</span>}\n                        </span>\n                        <hr className={classes.hr} />\n                    </div>\n                    <Collapse in={isOpen}>\n                        <ul className={classes.list}>\n                            {items.map(item =>\n                                <DashboardItemRow\n                                    item={item}\n                                    columns={\n                                        {\n                                            [DashboardColumnNames.NAME]: <ResourceName uuid={item.uuid} />,\n                                            [DashboardColumnNames.LAST_VISITED]: <span>{getLastVisitedDate(item.uuid, recents)}</span>,\n                                        }\n                                    }\n                                    forwardStyles={forwardStyles}\n                                />\n                            )}\n                        </ul>\n                    </Collapse>\n                </div>\n            )\n        })\n    );\n\nconst getLastVisitedDate = (targetUuid: string, recents: RecentUuid[]) => {\n    const targetRecent = recents.find(recent => recent.uuid === targetUuid);\n    if (targetRecent) {\n        return formatDateTime(targetRecent.lastVisited);\n    }\n    return '';\n}\n"
  },
  {
    "path": "services/workbench2/src/components/data-explorer/data-explorer.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\n\nimport { DataExplorer } from \"./data-explorer\";\nimport { DataTableFetchMode } from \"../data-table/data-table\";\nimport { ProjectIcon } from \"../icon/icon\";\nimport { SortDirection } from \"../data-table/data-column\";\nimport { combineReducers, createStore } from \"redux\";\nimport { Provider } from \"react-redux\";\nimport { ThemeProvider } from \"@mui/material\";\nimport { CustomTheme } from \"common/custom-theme\";\n\ndescribe(\"<DataExplorer />\", () => {\n    let store;\n    beforeEach(() => {\n        const initialMSState = {\n            multiselect: {\n                checkedList: {},\n                isVisible: false,\n            },\n            resources: {},\n        };\n        store = createStore(\n            combineReducers({\n                multiselect: (state = initialMSState.multiselect, action) => state,\n                resources: (state = initialMSState.resources, action) => state,\n            })\n        );\n    });\n\n    it(\"communicates with <SearchInput/>\", () => {\n        const onSearch = cy.stub().as(\"onSearch\");\n        const onSetColumns = cy.stub();\n\n        cy.mount(\n            <Provider store={store}>\n              <ThemeProvider theme={CustomTheme}>\n                <DataExplorer\n                    {...mockDataExplorerProps()}\n                    items={[{ name: \"item 1\" }]}\n                    searchValue=\"search value\"\n                    onSearch={onSearch}\n                    onSetColumns={onSetColumns}\n                />\n              </ThemeProvider>\n            </Provider>\n        );\n        cy.get('input[type=text]').should('have.value', 'search value');\n        cy.get('input[type=text]').clear();\n        cy.get('input[type=text]').type('new value');\n        cy.get('@onSearch').should('have.been.calledWith', 'new value');\n    });\n\n    it(\"communicates with <ColumnSelector/>\", () => {\n        const onColumnToggle = cy.spy().as(\"onColumnToggle\");\n        const onSetColumns = cy.stub();\n        const columns = [{ name: \"Column 1\", render: cy.stub(), selected: true, configurable: true, sortDirection: SortDirection.ASC, filters: {}}];\n        cy.mount(\n            <Provider store={store}>\n              <ThemeProvider theme={CustomTheme}>\n                <DataExplorer\n                    {...mockDataExplorerProps()}\n                    columns={columns}\n                    onColumnToggle={onColumnToggle}\n                    items={[{ name: \"item 1\" }]}\n                    onSetColumns={onSetColumns}\n                />\n              </ThemeProvider>\n            </Provider>\n        );\n        cy.get('[data-cy=column-selector-button]').should('exist').click();\n        cy.get('[data-cy=column-selector-li]').contains('Column 1').should('exist').click();\n        cy.get('@onColumnToggle').should('have.been.calledWith', columns[0]);\n    });\n\n    it(\"communicates with <DataTable/>\", () => {\n        const onFiltersChange = cy.spy().as(\"onFiltersChange\");\n        const onSortToggle = cy.spy().as(\"onSortToggle\");\n        const onRowClick = cy.spy().as(\"onRowClick\");\n        const onSetColumns = cy.stub();\n        const filters = { Filters : {\n            id: 'Filters id',\n            active: false,\n            children: ['Filter 1', 'Filter 2'],\n            expanded: false,\n            initialState: true,\n            parent: \"\",\n            selected: false,\n            status: \"LOADED\",\n            value: { name: 'Filters'}\n        } };\n        const columns = [\n            { name: \"Column 1\", render: cy.stub(), selected: true, configurable: true, sortDirection: SortDirection.ASC, filters },\n            { name: \"Column 2\", render: cy.stub(), selected: true, configurable: true, sortDirection: SortDirection.ASC, filters: {}, sort: true }\n        ];\n        const items = [{ name: \"item 1\" }];\n        cy.mount(\n            <Provider store={store}>\n              <ThemeProvider theme={CustomTheme}>\n                <DataExplorer\n                    {...mockDataExplorerProps()}\n                    columns={columns}\n                    items={items}\n                    onFiltersChange={onFiltersChange}\n                    onSortToggle={onSortToggle}\n                    onRowClick={onRowClick}\n                    onSetColumns={onSetColumns}\n                />\n              </ThemeProvider>\n            </Provider>\n        );\n        //check if the table and column are rendered\n        cy.get('[data-cy=data-table]').should('exist');\n        cy.get('[data-cy=data-table]').contains('Column 1').should('exist');\n        //check onRowClick\n        cy.get('[data-cy=data-table-row]').should('exist');\n        cy.get('[data-cy=data-table-row]').click();\n        cy.get('@onRowClick').should('have.been.calledWith', items[0]);\n        //check onFiltersChange\n        cy.contains('Column 1').click();\n        cy.get('[data-cy=tree-li]').contains('Filters').click();\n        cy.get('@onFiltersChange').should('have.been.calledWith', filters, columns[0] );\n        cy.contains('Close').click();\n        //check onSortToggle\n        cy.contains('Column 2').click();\n        cy.get('@onSortToggle').should('have.been.calledWith', columns[1]);\n    });\n\n    it(\"communicates with <TablePagination/>\", () => {\n        const onPageChange = cy.spy().as(\"onPageChange\");\n        const onChangeRowsPerPage = cy.spy().as(\"onChangeRowsPerPage\");\n        const onSetColumns = cy.stub();\n        cy.mount(\n            <Provider store={store}>\n              <ThemeProvider theme={CustomTheme}>\n                <DataExplorer\n                    {...mockDataExplorerProps()}\n                    items={hundredItems}\n                    itemsAvailable={100}\n                    page={0}\n                    rowsPerPage={50}\n                    rowsPerPageOptions={[10, 20, 50, 100]}\n                    onPageChange={onPageChange}\n                    onChangeRowsPerPage={onChangeRowsPerPage}\n                    onSetColumns={onSetColumns}\n                />\n              </ThemeProvider>\n            </Provider>\n        );\n        //check if the pagination is rendered\n        cy.get('[data-cy=table-pagination]').should('exist');\n        cy.get('[data-cy=table-pagination]').contains('1-50 of 100').should('exist');\n        cy.get('p').contains('Rows per page:').should('exist');\n        //check onPageChange\n        cy.get('button[title=\"Go to next page\"]').should('exist').click();\n        cy.get('@onPageChange').should('have.been.calledWith', 1);\n        //check onChangeRowsPerPage\n        cy.get('input[value=50]').should('exist').parent().click();\n        cy.get('li[data-value=10]').should('exist').click();\n        cy.get('@onChangeRowsPerPage').should('have.been.calledWith', 10);\n    });\n});\n\nconst mockDataExplorerProps = () => ({\n    fetchMode: DataTableFetchMode.PAGINATED,\n    columns: [],\n    items: [],\n    itemsAvailable: 0,\n    contextActions: [],\n    searchValue: \"\",\n    page: 0,\n    rowsPerPage: 0,\n    rowsPerPageOptions: [0],\n    onSearch: cy.stub(),\n    onFiltersChange: cy.stub(),\n    onSortToggle: cy.stub(),\n    onRowClick: cy.stub(),\n    onRowDoubleClick: cy.stub(),\n    onColumnToggle: cy.stub(),\n    onPageChange: cy.stub(),\n    onChangeRowsPerPage: cy.stub(),\n    onContextMenu: cy.stub(),\n    defaultIcon: ProjectIcon,\n    onSetColumns: cy.stub(),\n    onLoadMore: cy.stub(),\n    defaultMessages: [\"testing\"],\n    contextMenuColumn: true,\n    setCheckedListOnStore: cy.stub(),\n    toggleMSToolbar: cy.stub(),\n    isMSToolbarVisible: false,\n    checkedList: {},\n});\n\nconst hundredItems = Array.from({ length: 100 }, (v, i) => ({ name: `item ${i}` }));"
  },
  {
    "path": "services/workbench2/src/components/data-explorer/data-explorer.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport {\n    Grid,\n    Paper,\n    Toolbar,\n    TablePagination,\n    IconButton,\n    Tooltip,\n    Button,\n    Typography,\n} from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ColumnSelector } from \"components/column-selector/column-selector\";\nimport { DataColumns } from \"components/data-table/data-column\";\nimport { DataTable, DataTableFetchMode } from \"components/data-table/data-table\";\nimport { DataColumn } from \"components/data-table/data-column\";\nimport { SearchInput } from \"components/search-input/search-input\";\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { MultiselectToolbar } from \"components/multiselect-toolbar/MultiselectToolbar\";\nimport { TCheckedList } from \"components/data-table/data-table\";\nimport { createTree } from \"models/tree\";\nimport { DataTableFilters } from \"components/data-table-filters/data-table-filters\";\nimport { IconType, MoreVerticalIcon } from \"components/icon/icon\";\nimport { PaperProps } from \"@mui/material/Paper\";\nimport { MPVPanelProps } from \"components/multi-panel-view/multi-panel-view\";\nimport classNames from \"classnames\";\nimport { InlinePulser } from \"components/loading/inline-pulser\";\nimport { isMoreThanOneSelected } from \"store/multiselect/multiselect-actions\";\nimport { ProjectResource } from \"models/project\";\nimport { Process } from \"store/processes/process\";\nimport { ProcessStatusCounts, isAllProcessesPanel, isSharedWithMePanel } from \"store/subprocess-panel/subprocess-panel-actions\";\nimport { SUBPROCESS_PANEL_ID, isProcess } from \"store/subprocess-panel/subprocess-panel-actions\";\nimport { PROJECT_PANEL_RUN_ID } from \"store/project-panel/project-panel-action-bind\";\nimport { ALL_PROCESSES_PANEL_ID } from \"store/all-processes-panel/all-processes-panel-action\";\nimport { WORKFLOW_PROCESSES_PANEL_ID } from \"store/workflow-panel/workflow-panel-actions\";\nimport { SHARED_WITH_ME_PANEL_ID } from \"store/shared-with-me-panel/shared-with-me-panel-actions\";\nimport { ColumnFilterCounts } from \"components/data-table-filters/data-table-filters-tree\";\nimport { WorkflowResource } from \"models/workflow\";\n\ntype CssRules =\n    | 'titleWrapper'\n    | 'searchResultsTitleWrapper'\n    | 'msToolbarStyles'\n    | 'searchBox'\n    | 'headerMenu'\n    | 'toolbar'\n    | 'footer'\n    | 'loadMoreContainer'\n    | 'numResults'\n    | 'root'\n    | 'moreOptionsButton'\n    | 'title'\n    | 'dataTable'\n    | 'container'\n    | 'paginationLabel'\n    | 'paginationRoot'\n    | \"subToolbarWrapper\"\n    | 'runsToolbarWrapper'\n    | 'searchResultsToolbar'\n    | 'progressWrapper'\n    | 'progressWrapperNoTitle';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    titleWrapper: {\n        display: \"flex\",\n        justifyContent: \"space-between\",\n        marginTop: \"5px\",\n        marginBottom: \"-5px\",\n    },\n    searchResultsTitleWrapper: {\n        display: \"flex\",\n        justifyContent: \"space-between\",\n        marginTop: \"5px\",\n        height: \"30px\",\n    },\n    msToolbarStyles: {\n        padding: 0,\n        marginLeft: \"-5px\",\n    },\n    subToolbarWrapper: {\n        padding: 0,\n        marginTop: \"5px\",\n        marginLeft: \"-15px\",\n    },\n    runsToolbarWrapper: {\n        padding: 0,\n        marginTop: \"5px\",\n        marginLeft: \"-15px\",\n    },\n    searchResultsToolbar: {\n        marginTop: \"10px\",\n        marginBottom: \"auto\",\n    },\n    searchBox: {\n        paddingBottom: 0,\n    },\n    toolbar: {\n        paddingTop: 0,\n        paddingRight: theme.spacing(1),\n        paddingLeft: \"10px\",\n    },\n    footer: {\n        overflow: \"auto\",\n    },\n    loadMoreContainer: {\n        minWidth: '8rem',\n    },\n    root: {\n        height: \"100%\",\n        flex: 1,\n        overflowY: \"auto\",\n        boxShadow: 'none',\n    },\n    moreOptionsButton: {\n        padding: 0,\n    },\n    numResults: {\n        marginTop: 0,\n        fontSize: \"10px\",\n        marginLeft: \"10px\",\n        marginBottom: '-0.5rem',\n        minWidth: '8.5rem',\n    },\n    title: {\n        display: \"inline-block\",\n        paddingLeft: theme.spacing(2),\n        paddingTop: theme.spacing(2),\n        fontSize: \"18px\",\n        flexGrow: 1,\n        paddingRight: \"10px\",\n    },\n    progressWrapper: {\n        margin: \"14px 0 0\",\n        paddingLeft: \"20px\",\n        paddingRight: \"20px\",\n    },\n    progressWrapperNoTitle: {\n        marginTop: '12px',\n    },\n    dataTable: {\n        height: \"100%\",\n        overflowY: \"auto\",\n    },\n    container: {\n        height: \"100%\",\n    },\n    headerMenu: {\n        marginLeft: \"auto\",\n        flexBasis: \"initial\",\n        flexGrow: 0,\n    },\n    paginationLabel: {\n        margin: 0,\n        padding: 0,\n        fontSize: '0.75rem',\n    },\n    paginationRoot: {\n        fontSize: '0.75rem',\n        color: theme.palette.grey[\"600\"],\n    },\n});\n\ninterface DataExplorerDataProps<T> {\n    id: string;\n    fetchMode: DataTableFetchMode;\n    items: T[];\n    itemsAvailable: number;\n    loadingItemsAvailable: boolean;\n    columns: DataColumns<T, any>;\n    searchLabel?: string;\n    searchValue: string;\n    rowsPerPage: number;\n    rowsPerPageOptions: number[];\n    page: number;\n    contextMenuColumn: boolean;\n    defaultViewIcon?: IconType;\n    defaultViewMessages?: string[];\n    working?: boolean;\n    hideColumnSelector?: boolean;\n    paperProps?: PaperProps;\n    actions?: React.ReactNode;\n    hideSearchInput?: boolean;\n    title?: React.ReactNode;\n    path?: string;\n    currentRouteUuid: string;\n    selectedResourceUuid: string;\n    elementPath?: string;\n    isMSToolbarVisible: boolean;\n    checkedList: TCheckedList;\n    isNotFound: boolean;\n    searchBarValue: string;\n    paperClassName?: string;\n    forceMultiSelectMode?: boolean;\n    detailsPanelResourceUuid: string;\n    isDetailsPanelOpen: boolean;\n    isSelectedResourceInDataExplorer: boolean;\n    parentResource?: ProjectResource | Process | WorkflowResource;\n    typeFilter: string;\n}\n\ninterface DataExplorerActionProps<T> {\n    onSetColumns: (columns: DataColumns<T, any>) => void;\n    onSearch: (value: string) => void;\n    onRowClick: (item: T) => void;\n    onRowDoubleClick: (item: T) => void;\n    onColumnToggle: (column: DataColumn<T, any>) => void;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, item: T) => void;\n    onSortToggle: (column: DataColumn<T, any>) => void;\n    onFiltersChange: (filters: DataTableFilters, column: DataColumn<T, any>) => void;\n    onPageChange: (page: number) => void;\n    onChangeRowsPerPage: (rowsPerPage: number) => void;\n    onLoadMore: (page: number) => void;\n    extractKey?: (item: T) => React.Key;\n    toggleMSToolbar: (isVisible: boolean) => void;\n    setCheckedListOnStore: (checkedList: TCheckedList) => void;\n    setSelectedUuid: (uuid: string) => void;\n    usesDetailsCard: (uuid: string) => boolean;\n    loadDetailsPanel: (uuid: string) => void;\n    setIsSelectedResourceInDataExplorer: (isIn: boolean) => void;\n    fetchProcessStatusCounts: (parentResourceUuid: string, typeFilter?: string) => Promise<ProcessStatusCounts | undefined>;\n}\n\ntype DataExplorerProps<T> = DataExplorerDataProps<T> & DataExplorerActionProps<T> & WithStyles<CssRules> & MPVPanelProps;\n\ntype DataExplorerState = {\n    hideToolbar: boolean;\n    isSearchResults: boolean;\n    columnFilterCounts: ColumnFilterCounts;\n};\n\nexport enum FilteredColumnNames {\n    STATUS = 'Status',\n    TYPE = 'Type',\n}\n\nexport const DataExplorer = withStyles(styles)(\n    class DataExplorerGeneric<T> extends React.Component<DataExplorerProps<T>> {\n        state: DataExplorerState = {\n            hideToolbar: true,\n            isSearchResults: false,\n            columnFilterCounts: {},\n        };\n\n        multiSelectToolbarInTitle = !this.props.title;\n        maxItemsAvailable = 0;\n\n        componentDidMount() {\n            if (this.props.onSetColumns) {\n                this.props.onSetColumns(this.props.columns);\n            }\n            this.loadFilterCounts();\n            this.setState({\n                isSearchResults: this.props.path?.includes(\"search-results\") ? true : false ,\n            })\n        }\n\n        componentDidUpdate( prevProps: Readonly<DataExplorerProps<T>>, prevState: Readonly<DataExplorerState>, snapshot?: any ): void {\n            const { selectedResourceUuid, currentRouteUuid, path, usesDetailsCard, setIsSelectedResourceInDataExplorer } = this.props;\n            if(selectedResourceUuid !== prevProps.selectedResourceUuid || currentRouteUuid !== prevProps.currentRouteUuid) {\n                setIsSelectedResourceInDataExplorer(this.isSelectedResourceInTable(selectedResourceUuid));\n                this.setState({\n                    hideToolbar: usesDetailsCard(path || '') ? selectedResourceUuid === this.props.currentRouteUuid : false,\n                })\n            }\n            if (this.props.itemsAvailable !== prevProps.itemsAvailable) {\n                this.maxItemsAvailable = Math.max(this.maxItemsAvailable, this.props.itemsAvailable);\n            }\n            if (this.props.searchBarValue !== prevProps.searchBarValue) {\n                this.maxItemsAvailable = 0;\n            }\n            if (this.props.path !== prevProps.path) {\n                this.setState({ isSearchResults: this.props.path?.includes(\"search-results\") ? true : false })\n            }\n            if ((prevProps.items !== this.props.items || this.props.typeFilter !== prevProps.typeFilter)) {\n                this.loadFilterCounts();\n            }\n        }\n\n        loadFilterCounts = () => {\n            const { id, columns } = this.props;\n            const filterCountColumns = getFilterCountColumns(id, columns);\n            const parentUuid = getParentUuid(this.props.parentResource, id);\n            filterCountColumns.forEach(columnName => {\n                // more columns to fetch for can be added later\n                if(columnName === FilteredColumnNames.STATUS) {\n                    this.props.fetchProcessStatusCounts(parentUuid, this.props.typeFilter).then(result=>{\n                        if(result) {\n                            this.setState({\n                                columnFilterCounts: {...this.state.columnFilterCounts, [columnName]: result}\n                            })\n                        }\n                    })\n                }\n            })\n        }\n\n        isSelectedResourceInTable = (resourceUuid) => {\n            return this.props.items.includes(resourceUuid);\n        }\n\n        render() {\n            const {\n                columns,\n                onContextMenu,\n                onFiltersChange,\n                onSortToggle,\n                extractKey,\n                rowsPerPage,\n                rowsPerPageOptions,\n                onColumnToggle,\n                searchLabel,\n                searchValue,\n                onSearch,\n                items,\n                itemsAvailable,\n                loadingItemsAvailable,\n                onRowClick,\n                onRowDoubleClick,\n                classes,\n                defaultViewIcon,\n                defaultViewMessages,\n                hideColumnSelector,\n                actions,\n                paperProps,\n                hideSearchInput,\n                path,\n                fetchMode,\n                selectedResourceUuid,\n                title,\n                panelName,\n                elementPath,\n                toggleMSToolbar,\n                setCheckedListOnStore,\n                checkedList,\n                working,\n                paperClassName,\n                forceMultiSelectMode,\n                detailsPanelResourceUuid,\n                loadDetailsPanel,\n            } = this.props;\n            return (\n                <Paper\n                    className={classNames(classes.root, paperClassName)}\n                    {...paperProps}\n                    key={path}\n                    data-cy={this.props[\"data-cy\"]}\n                    >\n                    <Grid\n                        container\n                        direction=\"column\"\n                        wrap=\"nowrap\"\n                        className={classes.container}\n                >\n                    {title && this.state.isSearchResults && (\n                        <Grid\n                            item\n                            xs\n                            className={classes.title}\n                        >\n                            {title}\n\n                        </Grid>\n                    )}\n\n                <div data-cy=\"title-wrapper\" className={classNames(this.state.isSearchResults ? classes.searchResultsTitleWrapper : classes.titleWrapper)}>\n                    {title && !this.state.isSearchResults && (\n                        <Grid\n                            item\n                            xs\n                            className={classes.title}\n                        >\n                            {title}\n\n                        </Grid>\n                    )}\n                    {!this.state.hideToolbar\n                        && (this.props.isSelectedResourceInDataExplorer || isMoreThanOneSelected(this.props.checkedList))\n                        && (this.multiSelectToolbarInTitle\n                            ? <MultiselectToolbar toolbarClass={classes.msToolbarStyles} />\n                            : <MultiselectToolbar\n                                    forceMultiSelectMode={forceMultiSelectMode}\n                                    toolbarClass={classNames(panelName === 'Subprocesses' ? classes.subToolbarWrapper : panelName === 'Runs' ? classes.runsToolbarWrapper : '')}/>)\n                    }\n                    {(!hideColumnSelector || !hideSearchInput || !!actions) && (\n                        <Grid\n                            className={classes.headerMenu}\n                            item\n                            xs\n                        >\n                            <Toolbar className={classes.toolbar}>\n                                <Grid container justifyContent=\"space-between\" wrap=\"nowrap\" alignItems=\"center\">\n                                    {!hideSearchInput && (\n                                        <div className={classes.searchBox}>\n                                            {!hideSearchInput && (\n                                                <SearchInput\n                                                    label={searchLabel}\n                                                    value={searchValue}\n                                                    selfClearProp={\"\"}\n                                                    onSearch={onSearch}\n                                                />\n                                            )}\n                                        </div>\n                                    )}\n                                    {actions}\n                                    {!hideColumnSelector && (\n                                        <ColumnSelector\n                                            columns={columns}\n                                            onColumnToggle={onColumnToggle}\n                                        />\n                                    )}\n                                </Grid>\n                            </Toolbar>\n                        </Grid>\n                    )}\n\n                </div>\n                <Grid\n                    item\n                    className={classes.dataTable}\n                >\n                    <DataTable\n                        columns={this.props.contextMenuColumn ? [...columns, this.contextMenuColumn] : columns}\n                        items={items}\n                        onRowClick={(_, item: T) => onRowClick(item)}\n                        onContextMenu={onContextMenu}\n                        onRowDoubleClick={(_, item: T) => onRowDoubleClick(item)}\n                        onFiltersChange={onFiltersChange}\n                        onSortToggle={onSortToggle}\n                        extractKey={extractKey}\n                        defaultViewIcon={defaultViewIcon}\n                        defaultViewMessages={defaultViewMessages}\n                        currentRoute={path}\n                        toggleMSToolbar={toggleMSToolbar}\n                        setCheckedListOnStore={setCheckedListOnStore}\n                        checkedList={checkedList}\n                        selectedResourceUuid={selectedResourceUuid}\n                        setSelectedUuid={this.props.setSelectedUuid}\n                        currentRouteUuid={this.props.currentRouteUuid}\n                        working={working}\n                        isNotFound={this.props.isNotFound}\n                        detailsPanelResourceUuid={detailsPanelResourceUuid}\n                        loadDetailsPanel={loadDetailsPanel}\n                        columnFilterCounts={this.state.columnFilterCounts}\n                    />\n                </Grid>\n                <Grid\n                item\n                xs\n                >\n                <Toolbar className={classes.footer}>\n                {elementPath && (\n                    <Grid container>\n                        <span data-cy=\"element-path\">{elementPath.length > 2 ? elementPath : ''}</span>\n                    </Grid>\n                )}\n                <Grid\n                container={!elementPath}\n                                    justifyContent=\"flex-end\"\n                                >\n                                    {fetchMode === DataTableFetchMode.PAGINATED ? (\n                                        <TablePagination\n                                        data-cy=\"table-pagination\"\n                                            count={itemsAvailable}\n                                            rowsPerPage={rowsPerPage}\n                                            rowsPerPageOptions={rowsPerPageOptions}\n                                            page={this.props.page}\n                                            onPageChange={this.changePage}\n                                            onRowsPerPageChange={this.changeRowsPerPage}\n                                            labelDisplayedRows={renderPaginationLabel(loadingItemsAvailable)}\n                                            nextIconButtonProps={getPaginiationButtonProps(itemsAvailable, loadingItemsAvailable)}\n                                            component=\"div\"\n                                            classes={{\n                                                root: classes.paginationRoot,\n                                                selectLabel: classes.paginationLabel,\n                                                displayedRows: classes.paginationLabel,\n                                            }}\n                                        />\n                                    ) : (\n                                        <Grid className={classes.loadMoreContainer}>\n                                            <Typography  className={classes.numResults}>\n                                                Showing {items.length} / {this.maxItemsAvailable} results\n                                            </Typography>\n                                            <Button\n                                                size=\"small\"\n                                                onClick={this.loadMore}\n                                                variant=\"contained\"\n                                                color=\"primary\"\n                                                style={{width: '100%', margin: '10px'}}\n                                                disabled={working || items.length >= itemsAvailable}\n                                            >\n                                                Load more\n                                            </Button>\n                                        </Grid>\n                                    )}\n                                </Grid>\n                            </Toolbar>\n                        </Grid>\n                    </Grid>\n                </Paper>\n            );\n        }\n\n        changePage = (event: React.MouseEvent<HTMLButtonElement>, page: number) => {\n            this.props.onPageChange(page);\n        };\n\n        changeRowsPerPage: React.ChangeEventHandler<HTMLTextAreaElement | HTMLInputElement> = event => {\n            this.props.onChangeRowsPerPage(parseInt(event.target.value, 10));\n        };\n\n        loadMore = () => {\n            this.props.onLoadMore(this.props.page + 1);\n        };\n\n        renderContextMenuTrigger = (item: T) => (\n            <Grid\n                container\n                justifyContent=\"center\"\n            >\n                <Tooltip\n                    title=\"More options\"\n                    disableFocusListener\n                >\n                    <IconButton\n                        className={this.props.classes.moreOptionsButton}\n                        onClick={event => {\n                            event.stopPropagation()\n                            this.props.onContextMenu(event, item)\n                        }}\n                        size=\"large\">\n                        <MoreVerticalIcon />\n                    </IconButton>\n                </Tooltip>\n            </Grid>\n        );\n\n        contextMenuColumn: DataColumn<any, any> = {\n            name: \"Actions\",\n            selected: true,\n            configurable: false,\n            filters: createTree(),\n            key: \"context-actions\",\n            render: this.renderContextMenuTrigger,\n        };\n    }\n);\n\nconst renderPaginationLabel = (loading: boolean) => ({ from, to, count }) => (\n    loading ?\n        <InlinePulser/>\n        : <>{from}-{to} of {count}</>\n);\n\nconst getPaginiationButtonProps = (itemsAvailable: number, loading: boolean) => (\n    loading\n        ? { disabled: false } // Always allow paging while loading total\n        : itemsAvailable > 0\n            ? { }\n            : { disabled: true } // Disable next button on empty lists since that's not default behavior\n);\n\nconst getFilterCountColumns = (dataExplorerId: string, columns: DataColumns<any, any>) => {\n    const goodDataExplorers = [ PROJECT_PANEL_RUN_ID, SUBPROCESS_PANEL_ID, WORKFLOW_PROCESSES_PANEL_ID, ALL_PROCESSES_PANEL_ID, SHARED_WITH_ME_PANEL_ID ];\n    const goodColumnNames = [ FilteredColumnNames.STATUS ];\n    return columns.reduce((acc: string[], curr) => {\n        if(goodDataExplorers.includes(dataExplorerId) && goodColumnNames.includes(curr.name as FilteredColumnNames)) {\n            acc.push(curr.name);\n        }\n        return acc;\n    }, [])\n};\n\nconst getParentUuid = (parentResource: ProjectResource | Process | WorkflowResource | undefined, id: string) => {\n    if (parentResource) {\n        return isProcess(parentResource)\n            ? parentResource.containerRequest.uuid\n            : parentResource.uuid\n    }\n    if (isAllProcessesPanel(id) || isSharedWithMePanel(id)) {\n        return id;\n    }\n    return '';\n};\n"
  },
  {
    "path": "services/workbench2/src/components/data-table/data-column.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { DataTableFilters } from \"../data-table-filters/data-table-filters\";\nimport { createTree } from 'models/tree';\n\n/**\n *\n * @template I Type of dataexplorer item reference\n * @template R Type of resource to use to restrict values of column sort.field\n */\nexport interface DataColumn<I, R> {\n    key?: React.Key;\n    name: string;\n    selected: boolean;\n    configurable: boolean;\n\n    /**\n     * If set to true, filters on this column will be displayed in a\n     * radio group and only one filter can be selected at a time.\n     */\n    mutuallyExclusiveFilters?: boolean;\n    sort?: {direction: SortDirection, field: keyof R};\n    filters: DataTableFilters;\n    render: (item: I) => React.ReactElement<any>;\n    renderHeader?: () => React.ReactElement<any>;\n}\n\nexport enum SortDirection {\n    ASC = \"asc\",\n    DESC = \"desc\",\n    NONE = \"none\"\n}\n\nexport const toggleSortDirection = <I, R>(column: DataColumn<I, R>): DataColumn<I, R> => {\n    return column.sort\n        ? column.sort.direction === SortDirection.ASC\n            ? { ...column, sort: {...column.sort, direction: SortDirection.DESC} }\n            : { ...column, sort: {...column.sort, direction: SortDirection.ASC} }\n        : column;\n};\n\nexport const resetSortDirection = <I, R>(column: DataColumn<I, R>): DataColumn<I, R> => {\n    return column.sort ? { ...column, sort: {...column.sort, direction: SortDirection.NONE} } : column;\n};\n\nexport const createDataColumn = <I, R>(dataColumn: Partial<DataColumn<I, R>>): DataColumn<I, R> => ({\n    key: '',\n    name: '',\n    selected: true,\n    configurable: true,\n    filters: createTree(),\n    render: () => React.createElement('span'),\n    ...dataColumn,\n});\n\nexport type DataColumns<I, R> = Array<DataColumn<I, R>>;\n"
  },
  {
    "path": "services/workbench2/src/components/data-table/data-table.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Typography, Button } from \"@mui/material\";\nimport { DataTable } from \"./data-table\";\nimport { SortDirection, createDataColumn } from \"./data-column\";\nimport { ThemeProvider } from \"@mui/material\";\nimport { CustomTheme } from \"common/custom-theme\";\n\ndescribe(\"<DataTable />\", () => {\n    it(\"shows only selected columns\", () => {\n        const columns = [\n            createDataColumn({\n                name: \"Column 1\",\n                render: () => <span />,\n                selected: true,\n                configurable: true,\n            }),\n            createDataColumn({\n                name: \"Column 2\",\n                render: () => <span />,\n                selected: true,\n                configurable: true,\n            }),\n            createDataColumn({\n                name: \"Column 3\",\n                render: () => <span />,\n                selected: false,\n                configurable: true,\n            }),\n        ];\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <DataTable\n                    columns={columns}\n                    items={[{ key: \"1\", name: \"item 1\" }]}\n                    onFiltersChange={cy.stub()}\n                    onRowClick={cy.stub()}\n                    onRowDoubleClick={cy.stub()}\n                    onContextMenu={cy.stub()}\n                    onSortToggle={cy.stub()}\n                    setCheckedListOnStore={cy.stub()}\n                />\n            </ThemeProvider>\n        );\n        cy.get('th').should('have.length', 3);\n    });\n\n    it(\"renders column name\", () => {\n        const columns = [\n            createDataColumn({\n                name: \"Column 1\",\n                render: () => <span />,\n                selected: true,\n                configurable: true,\n            }),\n        ];\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <DataTable\n                    columns={columns}\n                    items={[\"item 1\"]}\n                    onFiltersChange={cy.stub()}\n                    onRowClick={cy.stub()}\n                    onRowDoubleClick={cy.stub()}\n                    onContextMenu={cy.stub()}\n                    onSortToggle={cy.stub()}\n                    setCheckedListOnStore={cy.stub()}\n                />\n            </ThemeProvider>\n        );\n        cy.get('th').last().contains('Column 1').should('exist');\n    });\n\n    it(\"uses renderHeader instead of name prop\", () => {\n        const columns = [\n            createDataColumn({\n                name: \"Column 1\",\n                renderHeader: () => <span>Column Header</span>,\n                render: () => <span />,\n                selected: true,\n                configurable: true,\n            }),\n        ];\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <DataTable\n                    columns={columns}\n                    items={[]}\n                    onFiltersChange={cy.stub()}\n                    onRowClick={cy.stub()}\n                    onRowDoubleClick={cy.stub()}\n                    onContextMenu={cy.stub()}\n                    onSortToggle={cy.stub()}\n                    setCheckedListOnStore={cy.stub()}\n                />\n            </ThemeProvider>\n        );\n        cy.get('th').last().contains('Column Header').should('exist');\n    });\n\n    it(\"passes column key prop to corresponding cells\", () => {\n        const columns = [\n            createDataColumn({\n                name: \"Column 1\",\n                key: \"column-1-key\",\n                render: () => <span />,\n                selected: true,\n                configurable: true,\n            }),\n        ];\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <DataTable\n                    columns={columns}\n                    working={false}\n                    items={[\"item 1\"]}\n                    onFiltersChange={cy.stub()}\n                    onRowClick={cy.stub()}\n                    onRowDoubleClick={cy.stub()}\n                    onContextMenu={cy.stub()}\n                    onSortToggle={cy.stub()}\n                    setCheckedListOnStore={cy.stub()}\n                />\n            </ThemeProvider>\n        );\n        setTimeout(() => {\n            // cannot access key prop directly, so data-cy is assigned to column.key value\n            cy.get('td').last().should('have.attr', 'data-cy', 'column-1-key');\n        }, 1000);\n    });\n\n    it(\"renders items\", () => {\n        const columns = [\n            createDataColumn({\n                name: \"Column 1\",\n                render: item => <Typography>{item}</Typography>,\n                selected: true,\n                configurable: true,\n            }),\n            createDataColumn({\n                name: \"Column 2\",\n                render: item => <Button>{item}</Button>,\n                selected: true,\n                configurable: true,\n            }),\n        ];\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <DataTable\n                    columns={columns}\n                    working={false}\n                    items={[\"item 1\"]}\n                    onFiltersChange={cy.stub()}\n                    onRowClick={cy.stub()}\n                    onRowDoubleClick={cy.stub()}\n                    onContextMenu={cy.stub()}\n                    onSortToggle={cy.stub()}\n                    setCheckedListOnStore={cy.stub()}\n                />\n            </ThemeProvider>\n        );\n        setTimeout(() => {\n            cy.get('p').last().contains('item 1').should('exist');\n            cy.get('button').last().contains('item 1').should('exist');\n        }, 1000);\n    });\n\n    it(\"passes sorting props to <TableSortLabel />\", () => {\n        const columns = [\n            createDataColumn({\n                name: \"Column 1\",\n                sort: { direction: SortDirection.ASC, field: \"length\" },\n                selected: true,\n                configurable: true,\n                render: item => <Typography>{item}</Typography>,\n            }),\n        ];\n        const onSortToggle = cy.spy().as(\"onSortToggle\");\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <DataTable\n                    columns={columns}\n                    items={[\"item 1\"]}\n                    onFiltersChange={cy.stub()}\n                    onRowClick={cy.stub()}\n                    onRowDoubleClick={cy.stub()}\n                    onContextMenu={cy.stub()}\n                    onSortToggle={onSortToggle}\n                    setCheckedListOnStore={cy.stub()}\n                />\n            </ThemeProvider>\n        );\n        setTimeout(() => {\n            cy.get('th').last().contains('Column 1').should('exist');\n            cy.get('[data-cy=\"sort-button\"]').should('exist').click();\n            cy.get('@onSortToggle').should('have.been.calledWith', columns[1]);\n        }, 1000);\n    });\n\n    it(\"does not display <DataTableFiltersPopover /> if there is no filters provided\", () => {\n        const columns = [\n            {\n                name: \"Column 1\",\n                selected: true,\n                configurable: true,\n                filters: [],\n                render: item => <Typography>{item}</Typography>,\n            },\n        ];\n        const onFiltersChange = cy.stub();\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <DataTable\n                    columns={columns}\n                    items={[]}\n                    onFiltersChange={onFiltersChange}\n                    onRowClick={cy.stub()}\n                    onRowDoubleClick={cy.stub()}\n                    onSortToggle={cy.stub()}\n                    onContextMenu={cy.stub()}\n                    setCheckedListOnStore={cy.stub()}\n                />\n            </ThemeProvider>\n        );\n        cy.get('[data-cy=data-table]').should('exist');\n        cy.get('[data-cy=popover]').should('not.exist');\n    });\n\n    it(\"passes filter props to <DataTableFiltersPopover />\", () => {\n        const filters = { Filters : {\n            id: 'Filters id',\n            active: false,\n            children: ['Filter 1', 'Filter 2'],\n            expanded: false,\n            initialState: true,\n            parent: \"\",\n            selected: false,\n            status: \"LOADED\",\n            value: { name: 'Filter'}\n        } };\n        const columns = [\n            {\n                name: \"Column 1\",\n                selected: true,\n                configurable: true,\n                filters: filters,\n                render: item => <Typography>{item}</Typography>,\n            },\n        ];\n        const onFiltersChange = cy.spy().as(\"onFiltersChange\");\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <DataTable\n                    columns={columns}\n                    items={[]}\n                    onFiltersChange={onFiltersChange}\n                    onRowClick={cy.stub()}\n                    onRowDoubleClick={cy.stub()}\n                    onSortToggle={cy.stub()}\n                    onContextMenu={cy.stub()}\n                    setCheckedListOnStore={cy.stub()}\n                />  \n            </ThemeProvider>\n        );\n        setTimeout(() => {\n            cy.get('span[role=\"button\"]').contains('Column 1').should('exist').click();\n            cy.get('[data-cy=\"tree-li\"]').contains('Filter').should('exist').click();\n            cy.get('@onFiltersChange').should('have.been.calledWith', filters, columns[1]);\n        }, 1000);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/components/data-table/data-table.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback, CustomTheme, ArvadosTheme } from 'common/custom-theme';\nimport {\n    Table,\n    TableBody,\n    TableRow,\n    TableCell,\n    TableHead,\n    TableSortLabel,\n    IconButton,\n    Tooltip,\n} from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport classnames from \"classnames\";\nimport { DataColumn, DataColumns, SortDirection } from \"./data-column\";\nimport { DataTableDefaultView } from \"../data-table-default-view/data-table-default-view\";\nimport { DataTableFilters } from \"../data-table-filters/data-table-filters\";\nimport { DataTableMultiselectPopover, DataTableMultiselectOption } from \"components/data-table-multiselect-popover/data-table-multiselect-popover\";\nimport { DataTableFiltersPopover } from \"../data-table-filters/data-table-filters-popover\";\nimport { countNodes, getTreeDirty, createTree } from \"models/tree\";\nimport { IconType } from \"components/icon/icon\";\nimport { SvgIconProps } from \"@mui/material/SvgIcon\";\nimport ArrowDownwardIcon from \"@mui/icons-material/ArrowDownward\";\nimport { isExactlyOneSelected } from \"store/multiselect/multiselect-actions\";\nimport { LoadingIndicator } from \"components/loading-indicator/loading-indicator\";\nimport { ColumnFilterCounts } from \"components/data-table-filters/data-table-filters-tree\";\n\nexport enum DataTableFetchMode {\n    PAGINATED,\n    INFINITE,\n}\n\nconst LOADING_PLACEHOLDER_COUNT = 3;\n\nenum DataTableContentType {\n    ROWS,\n    NOTFOUND,\n    LOADING,\n    EMPTY,\n};\n\nexport interface DataTableDataProps<I> {\n    items: I[];\n    columns: DataColumns<I, any>;\n    onRowClick: (event: React.MouseEvent<HTMLTableRowElement>, item: I) => void;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, item: I) => void;\n    onRowDoubleClick: (event: React.MouseEvent<HTMLTableRowElement>, item: I) => void;\n    onSortToggle: (column: DataColumn<I, any>) => void;\n    onFiltersChange: (filters: DataTableFilters, column: DataColumn<I, any>) => void;\n    extractKey?: (item: I) => React.Key;\n    working?: boolean;\n    defaultViewIcon?: IconType;\n    defaultViewMessages?: string[];\n    toggleMSToolbar: (isVisible: boolean) => void;\n    setCheckedListOnStore: (checkedList: TCheckedList) => void;\n    currentRoute?: string;\n    currentRouteUuid: string;\n    checkedList: TCheckedList;\n    selectedResourceUuid: string;\n    setSelectedUuid: (uuid: string | null) => void;\n    isNotFound?: boolean;\n    detailsPanelResourceUuid?: string;\n    loadDetailsPanel: (uuid: string) => void;\n    columnFilterCounts: ColumnFilterCounts;\n}\n\ntype CssRules =\n    | \"tableBody\"\n    | \"root\"\n    | \"content\"\n    | \"noItemsInfo\"\n    | \"checkBoxHead\"\n    | \"checkBoxCell\"\n    | \"clickBox\"\n    | \"checkBox\"\n    | \"firstTableCell\"\n    | \"tableCell\"\n    | \"firstTableHead\"\n    | \"tableHead\"\n    | \"selected\"\n    | \"hovered\"\n    | \"arrow\"\n    | \"arrowButton\"\n    | \"tableCellWorkflows\"\n    | \"loadingRow\"\n    | \"hiddenCell\"\n    | \"skeleton\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: \"100%\",\n    },\n    content: {\n        display: \"inline-block\",\n        width: \"100%\",\n    },\n    tableBody: {\n        background: theme.palette.background.paper,\n        overflow: \"auto\",\n    },\n    noItemsInfo: {\n        textAlign: \"center\",\n        padding: theme.spacing(1),\n    },\n    checkBoxHead: {\n        padding: \"0\",\n        display: \"flex\",\n        width: '2rem',\n        height: \"1.5rem\",\n        paddingLeft: '0.9rem',\n        marginRight: '0.5rem',\n        backgroundColor: theme.palette.background.paper,\n    },\n    checkBoxCell: {\n        padding: \"0\",\n        backgroundColor: theme.palette.background.paper,\n        cursor: \"pointer\",\n    },\n    clickBox: {\n        display: 'flex',\n        width: '1.6rem',\n        height: \"1.5rem\",\n        paddingLeft: '0.35rem',\n        paddingTop: '0.1rem',\n        marginLeft: '0.5rem',\n    },\n    checkBox: {\n        cursor: \"pointer\",\n    },\n    tableCell: {\n        wordWrap: \"break-word\",\n        paddingRight: \"24px\",\n    },\n    firstTableCell: {\n        paddingLeft: \"5px\",\n    },\n    firstTableHead: {\n        paddingLeft: \"5px\",\n    },\n    tableHead: {\n        wordWrap: \"break-word\",\n        paddingRight: \"24px\",\n        color: \"#737373\",\n        fontSize: \"0.8125rem\",\n        backgroundColor: theme.palette.background.paper,\n    },\n    selected: {\n        backgroundColor: `${CustomTheme.palette.grey['300']} !important`\n    },\n    hovered: {\n        backgroundColor: `${CustomTheme.palette.grey['100']} !important`\n    },\n    tableCellWorkflows: {\n        \"&:nth-last-child(2)\": {\n            padding: \"0px\",\n            maxWidth: \"48px\",\n        },\n        \"&:last-child\": {\n            padding: \"0px\",\n            paddingRight: \"24px\",\n            width: \"48px\",\n        },\n    },\n    arrow: {\n        margin: 0,\n    },\n    arrowButton: {\n        color: theme.palette.text.primary,\n    },\n    loadingRow: {\n        height: \"49px\",\n    },\n    hiddenCell: {\n        position: \"relative\",\n        \"& > *\": {\n            visibility: \"hidden\",\n        },\n    },\n    skeleton: {\n        visibility: \"visible\",\n        position: \"absolute\",\n        top: 0,\n        left: 0,\n        width: \"100%\",\n        height: \"100%\",\n        paddingLeft: \"5px\",\n        paddingRight: \"24px\",\n        display: \"flex\",\n        flexDirection: \"column\",\n        justifyContent: \"center\",\n        gap: \"8px\",\n    },\n});\n\nexport type TCheckedList = Record<string, boolean>;\n\ntype DataTableState = {\n    isSelected: boolean;\n    isLoaded: boolean;\n    hoveredIndex: number | null;\n};\n\ntype DataTableProps<T> = DataTableDataProps<T> & WithStyles<CssRules>;\n\nexport const DataTable = withStyles(styles)(\n    class Component<T> extends React.Component<DataTableProps<T>> {\n        state: DataTableState = {\n            isSelected: false,\n            isLoaded: false,\n            hoveredIndex: null,\n        };\n\n        componentDidMount(): void {\n            this.initializeCheckedList([]);\n            if(((this.props.items.length > 0) && !this.state.isLoaded) || !this.props.working) {\n                this.setState({ isLoaded: true });\n            }\n            if(this.props.detailsPanelResourceUuid !== this.props.selectedResourceUuid) {\n                this.props.loadDetailsPanel(this.props.selectedResourceUuid);\n            }\n        }\n\n        shouldComponentUpdate( nextProps: Readonly<DataTableProps<T>>, nextState: Readonly<DataTableState>, nextContext: any ): boolean {\n            const { items, currentRouteUuid, isNotFound, checkedList, columns, working, columnFilterCounts } = this.props;\n            const { isSelected, isLoaded, hoveredIndex } = this.state;\n            return items !== nextProps.items\n                || currentRouteUuid !== nextProps.currentRouteUuid\n                || isNotFound !== nextProps.isNotFound\n                || isLoaded !== nextState.isLoaded\n                || isSelected !== nextState.isSelected\n                || hoveredIndex !== nextState.hoveredIndex\n                || checkedList !== nextProps.checkedList\n                || columns !== nextProps.columns\n                || columnFilterCounts !== nextProps.columnFilterCounts\n                || working !== nextProps.working;\n        }\n\n        componentDidUpdate(prevProps: Readonly<DataTableProps<T>>, prevState: DataTableState) {\n            const { items, currentRouteUuid, checkedList, setCheckedListOnStore } = this.props;\n            const { isSelected } = this.state;\n            const singleSelected = isExactlyOneSelected(this.props.checkedList);\n            if (prevProps.items !== items) {\n                if (isSelected === true) this.setState({ isSelected: false });\n                if (items.length) this.initializeCheckedList(items);\n                else setCheckedListOnStore({});\n            }\n            if (items.length && checkedList && (Object.keys(checkedList)).length === 0) {\n                this.initializeCheckedList(items);\n            }\n            if (prevProps.currentRoute !== this.props.currentRoute) {\n                this.initializeCheckedList([]);\n            }\n            if (this.state.isLoaded){\n                this.setState({ isSelected: this.isAnySelected() });\n                if (singleSelected && singleSelected !== isExactlyOneSelected(prevProps.checkedList)) {\n                    this.props.setSelectedUuid(singleSelected);\n                }\n                if (!singleSelected && !!currentRouteUuid && !this.isAnySelected()) {\n                    this.props.setSelectedUuid(currentRouteUuid);\n                }\n                if (!singleSelected && this.isAnySelected()) {\n                    this.props.setSelectedUuid(null);\n                }\n            }\n            if(prevProps.working === false && this.props.working === true) {\n                this.setState({ isLoaded: false });\n                this.handleSelectNone(this.props.checkedList);\n            }\n            if(prevProps.working === true && this.props.working === false) {\n                this.setState({ isLoaded: true });\n            }\n            if((this.props.items.length > 0) && !this.state.isLoaded) {\n                this.setState({ isLoaded: true });\n            }\n        }\n\n        componentWillUnmount(): void {\n            this.initializeCheckedList([]);\n        }\n\n        checkBoxColumn: DataColumn<any, any> = {\n            name: \"checkBoxColumn\",\n            selected: true,\n            configurable: false,\n            filters: createTree(),\n            render: uuid => {\n                const { classes, checkedList } = this.props;\n                return (\n                    <div\n                        className={classes.clickBox}\n                        onClick={(ev) => {\n                            ev.stopPropagation()\n                            this.handleSelectOne(uuid)\n                        }}\n                        onDoubleClick={(ev) => ev.stopPropagation()}\n                    >\n                        <input\n                            data-cy={`multiselect-checkbox-${uuid}`}\n                            type='checkbox'\n                            name={uuid}\n                            className={classes.checkBox}\n                            checked={checkedList && checkedList[uuid] ? checkedList[uuid] : false}\n                            onChange={() => this.handleSelectOne(uuid)}\n                            onDoubleClick={(ev) => ev.stopPropagation()}\n                        ></input>\n                    </div>\n                );\n            },\n        };\n\n        multiselectOptions: DataTableMultiselectOption[] = [\n            { name: \"All\", fn: list => this.handleSelectAll(list) },\n            { name: \"None\", fn: list => this.handleSelectNone(list) },\n            { name: \"Invert\", fn: list => this.handleInvertSelect(list) },\n        ];\n\n        initializeCheckedList = (uuids: any[]): void => {\n            const newCheckedList = uuids\n                .reduce((acc, curr) => ({\n                    ...acc,\n                    [curr]: false\n                }), {} as TCheckedList);\n            this.props.setCheckedListOnStore(newCheckedList);\n        };\n\n        isAllSelected = (list: TCheckedList): boolean => {\n            return Object.keys(list)\n                .every((key) => list[key] === true);\n        };\n\n        isAnySelected = (): boolean => {\n            const { checkedList } = this.props;\n            return !!checkedList\n                && !!Object.keys(checkedList).length\n                && Object.keys(checkedList).some((key) => checkedList[key] === true);\n        };\n\n        handleSelectOne = (uuid: string): void => {\n            const { checkedList } = this.props;\n            const newCheckedList = { ...checkedList };\n            newCheckedList[uuid] = !checkedList[uuid];\n            this.setState({ isSelected: this.isAllSelected(newCheckedList) });\n            this.props.setCheckedListOnStore(newCheckedList);\n        };\n\n        handleSelectorSelect = (): void => {\n            const { checkedList } = this.props;\n            const { isSelected } = this.state;\n            isSelected ? this.handleSelectNone(checkedList) : this.handleSelectAll(checkedList);\n        };\n\n        handleSelectAll = (list: TCheckedList): void => {\n            if (Object.keys(list).length) {\n                const newCheckedList = Object.keys(list)\n                    .reduce((acc, curr) => ({\n                        ...acc,\n                        [curr]: true\n                    }), {} as TCheckedList);\n                this.setState({ isSelected: true });\n                this.props.setCheckedListOnStore(newCheckedList);\n            }\n        };\n\n        handleSelectNone = (list: TCheckedList): void => {\n            const newCheckedList = { ...list };\n            for (const key in newCheckedList) {\n                newCheckedList[key] = false;\n            }\n            this.setState({ isSelected: false });\n            this.props.setCheckedListOnStore(newCheckedList);\n        };\n\n        handleInvertSelect = (list: TCheckedList): void => {\n            if (Object.keys(list).length) {\n                const newCheckedList = { ...list };\n                for (const key in newCheckedList) {\n                    newCheckedList[key] = !list[key];\n                }\n                this.setState({ isSelected: this.isAllSelected(newCheckedList) });\n                this.props.setCheckedListOnStore(newCheckedList);\n            }\n        };\n\n        /**\n         * Helper to contain display state logic to avoid recalculating in multiple places\n         * @param items Data table items array\n         * @returns An enum value representing what should be displayed\n         */\n        getDataTableContentType = (items: T[]): DataTableContentType => {\n            const { working, isNotFound } = this.props;\n            const { isLoaded } = this.state;\n\n            if (isLoaded && !isNotFound && !!items.length && !working) {\n                return DataTableContentType.ROWS;\n            } else if (isNotFound && isLoaded) {\n                return DataTableContentType.NOTFOUND;\n            } else if (isLoaded === false || working === true) {\n                return DataTableContentType.LOADING;\n            } else {\n                // isLoaded && !working && !isNotFound\n                return DataTableContentType.EMPTY;\n            }\n        };\n\n        render() {\n            const { items, classes, columns } = this.props;\n            const dataTableContentType = this.getDataTableContentType(items);\n            if (columns.length && columns[0].name === this.checkBoxColumn.name) columns.shift();\n            columns.unshift(this.checkBoxColumn);\n            return (\n                <div className={classes.root}>\n                    <div className={classes.content}>\n                        <Table data-cy=\"data-table\" stickyHeader>\n                            <TableHead>\n                                <TableRow>{this.mapVisibleColumns(this.renderHeadCell)}</TableRow>\n                            </TableHead>\n                            <TableBody className={classes.tableBody}>\n                                {this.renderBody(items, dataTableContentType)}\n                            </TableBody>\n                        </Table>\n                        {this.renderNoItemsPlaceholder(dataTableContentType, this.props.columns)}\n                    </div>\n                </div>\n            );\n        }\n\n        renderLoadingPlaceholder = () => {\n            return <>\n                {(new Array(LOADING_PLACEHOLDER_COUNT).fill(0)).map(() => {\n                    return <TableRow hover className={this.props.classes.loadingRow}>\n                        {this.mapVisibleColumns((column, colIndex) => (\n                            <TableCell\n                                key={column.key || colIndex}\n                                data-cy={column.key || colIndex}\n                                >\n                                <LoadingIndicator />\n                            </TableCell>\n                        ))}\n                    </TableRow>\n                })}\n            </>;\n        };\n\n        renderNoItemsPlaceholder = (dataTableContentType: DataTableContentType, columns: DataColumns<T, any>) => {\n            const dirty = columns.some(column => getTreeDirty(\"\")(column.filters));\n            if (dataTableContentType === DataTableContentType.NOTFOUND) {\n                return (\n                    <DataTableDefaultView\n                        icon={this.props.defaultViewIcon}\n                        messages={[\"No items found\"]}\n                    />\n                );\n            } else if (dataTableContentType === DataTableContentType.EMPTY) {\n                // isLoaded && !working && !isNotFound\n                return (\n                    <DataTableDefaultView\n                        data-cy=\"data-table-default-view\"\n                        icon={this.props.defaultViewIcon}\n                        messages={this.props.defaultViewMessages}\n                        filtersApplied={dirty}\n                    />\n                );\n            } else {\n                return <></>;\n            }\n        };\n\n        renderHeadCell = (column: DataColumn<T, any>, index: number) => {\n            const { name, key, renderHeader, filters, sort } = column;\n            const { onSortToggle, onFiltersChange, classes, checkedList } = this.props;\n            const { isSelected } = this.state;\n            return column.name === \"checkBoxColumn\" ? (\n                <TableCell\n                    key={key || index}\n                    className={classes.checkBoxCell}>\n                    <div className={classes.checkBoxHead}>\n                        <Tooltip title={this.state.isSelected ? \"Deselect all\" : \"Select all\"}>\n                            <input\n                                data-cy=\"data-table-header-checkbox\"\n                                type=\"checkbox\"\n                                className={classes.checkBox}\n                                checked={isSelected}\n                                disabled={!this.props.items.length}\n                                onChange={this.handleSelectorSelect}></input>\n                        </Tooltip>\n                        <DataTableMultiselectPopover\n                            name={`Options`}\n                            disabled={!this.props.items.length}\n                            options={this.multiselectOptions}\n                            checkedList={checkedList}></DataTableMultiselectPopover>\n                    </div>\n                </TableCell>\n            ) : (\n                <TableCell\n                    className={classnames(classes.tableHead, index === 1 ? classes.firstTableHead : '')}\n                    key={key || index}>\n                    {renderHeader ? (\n                        renderHeader()\n                    ) : countNodes(filters) > 0 ? (\n                        <DataTableFiltersPopover\n                            name={`${name} filters`}\n                            mutuallyExclusive={column.mutuallyExclusiveFilters}\n                            onChange={filters => onFiltersChange && onFiltersChange(filters, column)}\n                            columnFilterCount={this.props.columnFilterCounts?.[name] || {}}\n                            filters={filters}>\n                            {name}\n                        </DataTableFiltersPopover>\n                    ) : sort ? (\n                        <TableSortLabel\n                            active={sort.direction !== SortDirection.NONE}\n                            direction={sort.direction !== SortDirection.NONE ? sort.direction : undefined}\n                            IconComponent={this.ArrowIcon}\n                            hideSortIcon\n                            onClick={() => onSortToggle && onSortToggle(column)}>\n                            {name}\n                        </TableSortLabel>\n                    ) : (\n                        <span>{name}</span>\n                    )}\n                </TableCell>\n            );\n        };\n\n        ArrowIcon = ({ className, ...props }: SvgIconProps) => (\n            <IconButton\n                data-cy=\"sort-button\"\n                component=\"span\"\n                className={this.props.classes.arrowButton}\n                tabIndex={-1}\n                size=\"large\">\n                <ArrowDownwardIcon\n                    {...props}\n                    className={classnames(className, this.props.classes.arrow)}\n                />\n            </IconButton>\n        );\n\n        renderBody = (items: any[], dataTableContentType: DataTableContentType) => {\n            if (items.length) {\n                // Have items, renderBodyRow renders rows or skeleton over rows\n                return items.map((item, index) => this.renderBodyRow(item, index, dataTableContentType));\n            } else if (dataTableContentType === DataTableContentType.LOADING) {\n                // No rows and loading, use static skeleton\n                return this.renderLoadingPlaceholder();\n            }\n            // No rows and not loading, let empty view outside table body display\n            return <></>;\n        };\n\n        renderBodyRow = (item: any, index: number, dataTableContentType: DataTableContentType) => {\n            const { onRowClick, onRowDoubleClick, extractKey, classes, currentRoute, checkedList } = this.props;\n            const { hoveredIndex } = this.state;\n            const isRowSelected = checkedList && checkedList[item] === true;\n            const getCellClassnames = (colIndex: number) => {\n                let cellClasses: string[] = [];\n                if (dataTableContentType === DataTableContentType.LOADING) cellClasses.push(classes.hiddenCell);\n                if(currentRoute === '/workflows') return classnames(cellClasses, classes.tableCellWorkflows);\n                if(colIndex === 0) return classnames(cellClasses, classes.checkBoxCell, isRowSelected ? classes.selected : index === hoveredIndex ? classes.hovered : \"\");\n                if(colIndex === 1) return classnames(cellClasses, classes.tableCell, classes.firstTableCell, isRowSelected ? classes.selected : \"\");\n                return classnames(cellClasses, classes.tableCell, isRowSelected ? classes.selected : \"\");\n            };\n            const handleHover = (index: number | null) => {\n                this.setState({ hoveredIndex: index });\n            }\n\n            const noopWhenLoading = (func) => {\n                if (dataTableContentType === DataTableContentType.LOADING) {\n                    return (e) => e.preventDefault();\n                } else {\n                    return func;\n                }\n            }\n\n            return (\n                <TableRow\n                    data-cy={'data-table-row'}\n                    hover\n                    key={extractKey ? extractKey(item) : index}\n                    onClick={noopWhenLoading(event => onRowClick && onRowClick(event, item))}\n                    onContextMenu={noopWhenLoading(this.handleRowContextMenu(item))}\n                    onDoubleClick={noopWhenLoading(event => onRowDoubleClick && onRowDoubleClick(event, item))}\n                    selected={isRowSelected}\n                    className={isRowSelected ? classes.selected : \"\"}\n                    onMouseEnter={()=>handleHover(index)}\n                    onMouseLeave={()=>handleHover(null)}\n                >\n                    {this.mapVisibleColumns((column, colIndex) => (\n                        <TableCell\n                            key={column.key || colIndex}\n                            data-cy={column.key || colIndex}\n                            className={getCellClassnames(colIndex)}>\n                            {column.render(item)}\n                            {dataTableContentType === DataTableContentType.LOADING && <LoadingIndicator inline={true} containerClassName={classes.skeleton} />}\n                        </TableCell>\n                    ))}\n                </TableRow>\n            );\n        };\n\n        mapVisibleColumns = (fn: (column: DataColumn<T, any>, index: number) => React.ReactElement<any>) => {\n            return this.props.columns.filter(column => column.selected).map(fn);\n        };\n\n        handleRowContextMenu = (item: T) => (event: React.MouseEvent<HTMLElement>) => this.props.onContextMenu(event, item);\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/components/data-table-default-view/data-table-default-view.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { DefaultViewDataProps, DefaultView } from 'components/default-view/default-view';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { DetailsIcon } from 'components/icon/icon';\n\ntype CssRules = 'classRoot';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    classRoot: {\n        marginTop: theme.spacing(4),\n        marginBottom: theme.spacing(4),\n    },\n});\ntype DataTableDefaultViewDataProps = Partial<Pick<DefaultViewDataProps, 'icon' | 'messages' | 'filtersApplied'>>;\ntype DataTableDefaultViewProps = DataTableDefaultViewDataProps & WithStyles<CssRules>;\n\nexport const DataTableDefaultView = withStyles(styles)(\n    ({ classes, ...props }: DataTableDefaultViewProps) => {\n        const icon = props.icon || DetailsIcon;\n        const filterWarning: string[] = props.filtersApplied ? ['Filters are applied to the data.'] : [];\n        const messages = filterWarning.concat(props.messages || ['No items found']);\n        return <DefaultView {...classes} {...{ icon, messages }} />;\n    });\n"
  },
  {
    "path": "services/workbench2/src/components/data-table-filters/data-table-filters-popover.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { DataTableFiltersPopover } from \"./data-table-filters-popover\";\nimport { getInitialProcessStatusFilters } from \"store/resource-type-filters/resource-type-filters\"\nimport { ThemeProvider } from \"@mui/material\";\nimport { CustomTheme } from \"common/custom-theme\";\n\ndescribe(\"<DataTableFiltersPopover />\", () => {\n    it(\"renders filters according to their state\", () => {\n        // 1st filter (All) is selected, the rest aren't.\n        const filters = getInitialProcessStatusFilters()\n        const columnFilterCount = {'All': '0', 'Draft': '1', 'On hold': '2', 'Queued': '3', 'Running': '4', 'Completed': '5', 'Cancelled': '6', 'Failed': '7'}\n\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <DataTableFiltersPopover name=\"\" filters={filters} columnFilterCount={columnFilterCount} />\n            </ThemeProvider>\n        );\n        cy.get('span[role=button]').eq(0).click();\n        cy.get('input[type=checkbox]').should('have.length', 8);\n        // check that each filter has the correct count\n        Object.keys(columnFilterCount).forEach((key, idx) => {\n            if (idx === 0) {\n                cy.get('[data-cy=tree-li]').contains(key).should('contain', 'All')\n            } else {\n                cy.get('[data-cy=tree-li]').contains(key).parent().should('contain', columnFilterCount[key])\n            }\n        })\n        //\"All\" should be the only item selected\n        cy.get('input[type=checkbox]').eq(0).should('be.checked');\n        cy.get('input[type=checkbox]').eq(1).should('not.be.checked');\n        cy.contains('Close').click();\n        cy.get('input[type=checkbox]').should('not.exist');\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/components/data-table-filters/data-table-filters-popover.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport {\n    ButtonBase,\n    Theme,\n    Popover,\n    Button,\n    Card,\n    CardActions,\n    Typography,\n    CardContent,\n    Tooltip,\n    IconButton,\n} from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport classnames from 'classnames';\nimport { DefaultTransformOrigin } from 'components/popover/helpers';\nimport { createTree } from 'models/tree';\nimport { DataTableFilters } from './data-table-filters';\nimport { DataTableFiltersTree } from './data-table-filters-tree';\nimport { getNodeDescendants } from 'models/tree';\nimport debounce from 'lodash/debounce';\nimport { ColumnFilterCount } from './data-table-filters-tree';\n\nexport type CssRules = 'root' | 'icon' | 'iconButton' | 'active' | 'checkbox' | 'closeButtonContainer';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: Theme) => ({\n    root: {\n        cursor: 'pointer',\n        display: 'inline-flex',\n        justifyContent: 'flex-start',\n        flexDirection: 'inherit',\n        alignItems: 'center',\n        '&:hover': {\n            color: theme.palette.text.primary,\n        },\n        '&:focus': {\n            color: theme.palette.text.primary,\n        },\n    },\n    active: {\n        color: theme.palette.text.primary,\n        '& $iconButton': {\n            opacity: 1,\n        },\n    },\n    icon: {\n        fontSize: 12,\n        userSelect: 'none',\n        width: 16,\n        height: 15,\n        paddingBottom: 18,\n    },\n    iconButton: {\n        color: theme.palette.text.primary,\n        opacity: 0.7,\n    },\n    checkbox: {\n        width: 24,\n        height: 24,\n    },\n    closeButtonContainer: {\n        display: 'flex',\n        justifyContent: 'flex-end',\n    },\n});\n\nenum SelectionMode {\n    ALL = 'all',\n    NONE = 'none',\n}\n\nexport interface DataTableFilterProps {\n    name: string;\n    filters: DataTableFilters;\n    onChange?: (filters: DataTableFilters) => void;\n    children: React.ReactNode;\n\n    /**\n     * When set to true, only one filter can be selected at a time.\n     */\n    mutuallyExclusive?: boolean;\n\n    /**\n     * By default `all` filters selection means that label should be grayed out.\n     * Use `none` when label is supposed to be grayed out when no filter is selected.\n     */\n    defaultSelection?: SelectionMode;\n    columnFilterCount: ColumnFilterCount;\n}\n\ninterface DataTableFilterState {\n    anchorEl?: HTMLElement;\n    filters: DataTableFilters;\n    prevFilters: DataTableFilters;\n}\n\nexport const DataTableFiltersPopover = withStyles(styles)(\n    class extends React.Component<DataTableFilterProps & WithStyles<CssRules>, DataTableFilterState> {\n        state: DataTableFilterState = {\n            anchorEl: undefined,\n            filters: createTree(),\n            prevFilters: createTree(),\n        };\n        icon = React.createRef<HTMLElement>();\n\n        componentWillUnmount(): void {\n            this.submit.cancel();\n        }\n\n        render() {\n            const { name, classes, defaultSelection = SelectionMode.ALL, children } = this.props;\n            const isActive = getNodeDescendants('')(this.state.filters).some((f) => (defaultSelection === SelectionMode.ALL ? !f.selected : f.selected));\n            return <>\n                <Tooltip title='Filters'>\n                    <ButtonBase className={classnames([classes.root, { [classes.active]: isActive }])} component='span' onClick={this.open} disableRipple>\n                        {children}\n                        <IconButton\n                            component='span'\n                            classes={{ root: classes.iconButton }}\n                            tabIndex={-1}\n                            size=\"large\">\n                            <i className={classnames(['fas fa-filter', classes.icon])} data-fa-transform='shrink-3' ref={this.icon} />\n                        </IconButton>\n                    </ButtonBase>\n                </Tooltip>\n                <Popover\n                    anchorEl={this.state.anchorEl}\n                    open={!!this.state.anchorEl}\n                    anchorOrigin={DefaultTransformOrigin}\n                    transformOrigin={DefaultTransformOrigin}\n                    onClose={this.close}\n                >\n                    <Card>\n                        <CardContent>\n                            <Typography variant='caption'>{name}</Typography>\n                        </CardContent>\n                        <DataTableFiltersTree\n                            filters={this.state.filters}\n                            mutuallyExclusive={this.props.mutuallyExclusive}\n                            columnFilterCount={this.props.columnFilterCount}\n                            onChange={this.onChange} />\n                        <section className={classes.closeButtonContainer}>\n                            <CardActions>\n                                <Button color='primary' variant='outlined' size='small' onClick={this.close}>\n                                    Close\n                                </Button>\n                            </CardActions>\n                        </section>\n                    </Card>\n                </Popover>\n            </>;\n        }\n\n        static getDerivedStateFromProps(props: DataTableFilterProps, state: DataTableFilterState): DataTableFilterState {\n            return props.filters !== state.prevFilters ? { ...state, filters: props.filters, prevFilters: props.filters } : state;\n        }\n\n        open = () => {\n            this.setState({ anchorEl: this.icon.current || undefined });\n        };\n\n        onChange = (filters) => {\n            this.setState({ filters });\n            if (this.props.mutuallyExclusive) {\n                // Mutually exclusive filters apply immediately\n                const { onChange } = this.props;\n                if (onChange) {\n                    onChange(filters);\n                }\n                this.close();\n            } else {\n                // Non-mutually exclusive filters are debounced\n                this.submit();\n            }\n        };\n\n        submit = debounce(() => {\n            const { onChange } = this.props;\n            if (onChange) {\n                onChange(this.state.filters);\n            }\n        }, 1000);\n\n        close = () => {\n            this.setState((prev) => ({\n                ...prev,\n                anchorEl: undefined,\n            }));\n        };\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/components/data-table-filters/data-table-filters-tree.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { toggleNodeSelection, getNode, initTreeNode, getNodeChildrenIds, selectNode, deselectNodes } from 'models/tree';\nimport { TreeComponent, TreeItem, TreeItemStatus} from 'components/tree/tree';\nimport { noop, map } from \"lodash/fp\";\nimport { toggleNodeCollapse } from 'models/tree';\nimport { countNodes, countChildren } from 'models/tree';\nimport { DataTableFilterItem, DataTableFilters } from './data-table-filters';\nimport { ThreeDotsSuspense } from \"components/loading/three-dots\";\n\nexport type ColumnFilterCount = Record<string, string>;\nexport type ColumnFilterCounts = Record<string, ColumnFilterCount>;\n\nexport interface DataTableFilterProps {\n    filters: DataTableFilters;\n    onChange?: (filters: DataTableFilters) => void;\n\n    /**\n     * When set to true, only one filter can be selected at a time.\n     */\n    mutuallyExclusive?: boolean;\n    columnFilterCount: ColumnFilterCount;\n}\n\nexport class DataTableFiltersTree extends React.Component<DataTableFilterProps> {\n\n    render() {\n        const { filters, columnFilterCount } = this.props;\n        const hasSubfilters = countNodes(filters) !== countChildren('')(filters);\n        return <TreeComponent\n            key={JSON.stringify(columnFilterCount)}\n            levelIndentation={hasSubfilters ? 20 : 0}\n            itemRightPadding={20}\n            items={filtersToTree(filters, columnFilterCount)}\n            render={this.props.mutuallyExclusive ? renderRadioItem : renderItem}\n            showSelection\n            useRadioButtons={this.props.mutuallyExclusive}\n            onContextMenu={noop}\n            toggleItemActive={\n                this.props.mutuallyExclusive\n                    ? this.toggleRadioButtonFilter\n                    : this.toggleFilter\n            }\n            toggleItemOpen={this.toggleOpen}\n        />;\n    }\n\n    /**\n     * Handler for when a tree item is toggled via a radio button.\n     * Ensures mutual exclusivity among filter tree items.\n     */\n    toggleRadioButtonFilter = (_: any, item: TreeItem<DataTableFilterItem>) => {\n        const { onChange = noop } = this.props;\n\n        // If the filter is already selected, do nothing.\n        if (item.selected) { return; }\n\n        // Otherwise select this node and deselect the others\n        const filters = selectNode(item.id, true)(this.props.filters);\n        const toDeselect = Object.keys(this.props.filters).filter((id) => (id !== item.id));\n        onChange(deselectNodes(toDeselect, true)(filters));\n    }\n\n    toggleFilter = (_: React.MouseEvent, item: TreeItem<DataTableFilterItem>) => {\n        const { onChange = noop } = this.props;\n        onChange(toggleNodeSelection(item.id, true)(this.props.filters));\n    }\n\n    toggleOpen = (_: React.MouseEvent, item: TreeItem<DataTableFilterItem>) => {\n        const { onChange = noop } = this.props;\n        onChange(toggleNodeCollapse(item.id)(this.props.filters));\n    }\n}\n\nconst renderedItemStyles = {\n    root: {\n        display: 'flex',\n        alignItems: 'center',\n        justifyContent: 'space-between',\n        '&hover': {\n            color: 'grey',\n        },\n    },\n    name: {\n        marginRight: '20px',\n    },\n};\n\nconst renderItem = ({data: {name, count}, initialState, selected}: TreeItem<DataTableFilterItem>) =>\n    count ? <div style={renderedItemStyles.root}>\n                <span style={renderedItemStyles.name}>{name}</span>\n                <ThreeDotsSuspense el={<span>{count}</span>} isLoaded={!!count} />\n                {initialState !== selected ? <>\n                    *\n                </> : null}\n            </div>\n            :\n            <span>\n                {name}{initialState !== selected ? <>*</> : null}\n            </span>;\n\nconst renderRadioItem = ({data: {name, count}}: TreeItem<DataTableFilterItem>) =>\n    <div style={renderedItemStyles.root}>\n        <span style={renderedItemStyles.name}>{name}</span>\n        <ThreeDotsSuspense el={<span>{count}</span>} isLoaded={!!count} />\n    </div>;\n\nconst filterToTreeItem = (filters: DataTableFilters, columnFilterCount: ColumnFilterCount) =>\n    (id: string): TreeItem<any> => {\n        const filterValue = filters[id].value;\n        if (filterValue) {\n            filterValue['count'] = columnFilterCount[id]\n        }\n        const node = getNode(id)(filters) || initTreeNode({ id: '', value: 'InvalidNode' });\n        const items = getNodeChildrenIds(node.id)(filters)\n            .map(filterToTreeItem(filters, columnFilterCount));\n        const isIndeterminate = !node.selected && items.some(i => i.selected || i.indeterminate);\n\n        return {\n            active: node.active,\n            data: node.value,\n            id: node.id,\n            items: items.length > 0 ? items : undefined,\n            open: node.expanded,\n            selected: node.selected,\n            initialState: node.initialState,\n            indeterminate: isIndeterminate,\n            status: TreeItemStatus.LOADED,\n        };\n    };\n\nconst filtersToTree = (filters: DataTableFilters, columnFilterCount: ColumnFilterCount): TreeItem<DataTableFilterItem>[] =>\n    map(filterToTreeItem(filters, columnFilterCount), getNodeChildrenIds('')(filters));\n"
  },
  {
    "path": "services/workbench2/src/components/data-table-filters/data-table-filters.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Tree } from 'models/tree';\n\nexport interface DataTableFilterItem {\n    name: string;\n    count?: string;\n}\n\nexport type DataTableFilters = Tree<DataTableFilterItem>;\n"
  },
  {
    "path": "services/workbench2/src/components/data-table-multiselect-popover/data-table-multiselect-popover.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { ButtonBase, Theme, Popover, Card, Tooltip, IconButton } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport classnames from \"classnames\";\nimport { DefaultTransformOrigin } from \"components/popover/helpers\";\nimport { grey } from \"@mui/material/colors\";\nimport { TCheckedList } from \"components/data-table/data-table\";\n\nexport type CssRules = \"root\" | \"icon\" | \"iconButton\" | \"disabled\" | \"optionsContainer\" | \"option\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: Theme) => ({\n    root: {\n        borderRadius: \"7px\",\n        \"&:hover\": {\n            backgroundColor: grey[200],\n        },\n        \"&:focus\": {\n            color: theme.palette.text.primary,\n        },\n    },\n    icon: {\n        cursor: \"pointer\",\n        fontSize: 20,\n        userSelect: \"none\",\n        \"&:hover\": {\n            color: theme.palette.text.primary,\n        },\n        paddingBottom: \"5px\",\n    },\n    iconButton: {\n        color: theme.palette.text.primary,\n        opacity: 0.6,\n        padding: 1,\n        paddingBottom: 5,\n    },\n    disabled: {\n        color: grey[500],\n    },\n    optionsContainer: {\n        padding: \"1rem 0\",\n        flex: 1,\n    },\n    option: {\n        cursor: \"pointer\",\n        display: \"flex\",\n        padding: \"3px 2rem\",\n        fontSize: \"0.9rem\",\n        alignItems: \"center\",\n        \"&:hover\": {\n            backgroundColor: \"rgba(0, 0, 0, 0.08)\",\n        },\n    },\n});\n\nexport type DataTableMultiselectOption = {\n    name: string;\n    fn: (checkedList) => void;\n};\n\nexport interface DataTableMultiselectProps {\n    name: string;\n    disabled: boolean;\n    options: DataTableMultiselectOption[];\n    checkedList: TCheckedList;\n}\n\ninterface DataTableFMultiselectPopState {\n    anchorEl?: HTMLElement;\n}\n\nexport const DataTableMultiselectPopover = withStyles(styles)(\n    class extends React.Component<DataTableMultiselectProps & WithStyles<CssRules>, DataTableFMultiselectPopState> {\n        state: DataTableFMultiselectPopState = {\n            anchorEl: undefined,\n        };\n        icon = React.createRef<HTMLElement>();\n\n        render() {\n            const { classes, children, options, checkedList, disabled } = this.props;\n            return <>\n                <Tooltip\n                    title=\"Select options\"\n                    data-cy=\"data-table-multiselect-popover\"\n                >\n                    <ButtonBase\n                        className={classnames(classes.root)}\n                        component=\"span\"\n                        onClick={disabled ? () => {} : this.open}\n                        disableRipple\n                    >\n                        {children}\n                        <IconButton\n                            component=\"span\"\n                            classes={{ root: classes.iconButton }}\n                            tabIndex={-1}\n                            size=\"large\">\n                            <i\n                                className={`${classnames([\"fas fa-sort-down\", classes.icon])}${disabled ? ` ${classes.disabled}` : \"\"}`}\n                                data-fa-transform=\"shrink-3\"\n                                ref={this.icon}\n                            />\n                        </IconButton>\n                    </ButtonBase>\n                </Tooltip>\n                <Popover\n                    anchorEl={this.state.anchorEl}\n                    open={!!this.state.anchorEl}\n                    anchorOrigin={DefaultTransformOrigin}\n                    transformOrigin={DefaultTransformOrigin}\n                    onClose={this.close}\n                >\n                    <Card>\n                        <div className={classes.optionsContainer}>\n                            {options.length &&\n                                options.map((option, i) => (\n                                    <div\n                                        data-cy={`multiselect-popover-${option.name}`}\n                                        key={i}\n                                        className={classes.option}\n                                        onClick={() => {\n                                            option.fn(checkedList);\n                                            this.close();\n                                        }}\n                                    >\n                                        {option.name}\n                                    </div>\n                                ))}\n                        </div>\n                    </Card>\n                </Popover>\n            </>;\n        }\n\n        open = () => {\n            this.setState({ anchorEl: this.icon.current || undefined });\n        };\n\n        close = () => {\n            this.setState(prev => ({\n                ...prev,\n                anchorEl: undefined,\n            }));\n        };\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/components/date-picker/date-picker.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DatePicker } from './date-picker';\nimport moment from 'moment';\n\ndescribe('DatePicker Component', () => {\n    let defaultProps;\n\n    beforeEach(() => {\n        defaultProps = {\n            label: 'Test Date',\n            input: {\n                value: '',\n                onChange: cy.stub().as('onChange'),\n            },\n        };\n\n        cy.mount(<DatePicker {...defaultProps} />);\n    });\n\n    it('renders with label', () => {\n        cy.get('label').should('contain', 'Test Date');\n    });\n\n    it('initializes with current date when no minDate provided', () => {\n        const today = moment().format('MM/DD/YYYY');\n        cy.get('input').should('have.value', today);\n    });\n\n    it('initializes with minDate when provided', () => {\n        const startValue = moment().add(1, 'year')\n\n        cy.mount(\n            <DatePicker\n                {...defaultProps}\n                startValue={startValue}\n            />\n        );\n\n        cy.get('input').should('have.value', startValue.format('MM/DD/YYYY'));\n    });\n\n    it('opens calendar on click', () => {\n        cy.get('button').click();\n        // cypress doesn't find the calendar div, so check if the current month is displayed\n        cy.contains(moment().format('MMMM'));\n    });\n\n    it('disables past dates when disablePast is true', () => {\n        cy.clock(new Date(2025, 9, 15));\n        cy.mount(\n            <DatePicker\n                {...defaultProps}\n                disablePast\n            />\n        );\n\n        cy.get('button').click();\n\n        // MUI uses 'disabled=\"disabled\"' instead of 'disabled={true}'\n        cy.get('div[role=\"dialog\"] [role=row] [role=gridcell]').contains('14').should('have.attr', 'disabled', 'disabled');\n    });\n\n    it('calls onChange when date is selected', () => {\n        // Click any future date (first day of next month)\n        const futureDate = moment().add(1, \"month\").startOf(\"month\");\n        const dayToSelect = futureDate.format('D');\n\n        cy.get('button').click();\n        cy.get('button[title=\"Next month\"]').click();\n        cy.get('div[role=\"dialog\"]').contains(dayToSelect).first().click();\n\n        // Verify onChange was called with the correct date\n        cy.get('@onChange').should('have.been.called');\n    });\n\n    it('updates input value when date is selected', () => {\n        cy.get('input').click();\n\n        const futureDate = moment().add(1, \"month\").startOf(\"month\");\n        const dayToSelect = futureDate.startOf('day').valueOf();\n\n        cy.get('button').click();\n        cy.get('button[title=\"Next month\"]').click();\n        cy.get(`[data-timestamp=\"${dayToSelect}\"]`).click();\n\n        const expectedDate = futureDate.format('MM/DD/YYYY');\n        cy.get('input').should('have.value', expectedDate);\n    });\n\n    it('handles keyboard navigation', () => {\n        cy.get('button').click();\n\n        // Navigate using arrow keys\n        cy.get('div[role=\"dialog\"]').should('exist').type('{rightarrow}').type('{enter}');\n\n        // Verify a date was selected\n        cy.get('@onChange').should('have.been.called');\n    });\n\n    it('initializes with startValue', () => {\n        const expectedDate = moment().add(1, 'week').format('MM/DD/YYYY');\n\n        cy.mount(\n            <DatePicker\n                label='Test Date'\n                startValue={expectedDate}\n                input={{\n                    value: '',\n                    onChange: cy.stub(),\n                }}\n            />\n        );\n\n        cy.get('input').should('have.value', expectedDate);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/components/date-picker/date-picker.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect } from 'react';\nimport { WrappedFieldProps } from 'redux-form';\nimport { FormControl } from '@mui/material';\nimport { LocalizationProvider } from '@mui/x-date-pickers/LocalizationProvider';\nimport { DesktopDatePicker } from '@mui/x-date-pickers/DesktopDatePicker';\nimport moment from 'moment';\nimport { AdapterMoment } from '@mui/x-date-pickers/AdapterMoment';\n\ntype DatePickerProps = {\n    label: string;\n    startValue?: string;\n}\n\nexport function DatePicker({label, startValue, input}: DatePickerProps & WrappedFieldProps) {\n    // Set initial value on mount, and when input is cleared\n    useEffect(() => {\n        if (!input.value) {\n            input.onChange(getInitialValue(startValue, input.value));\n        }\n    }, [input.value]);\n\n    return (\n        <FormControl variant=\"standard\" fullWidth>\n            <LocalizationProvider dateAdapter={AdapterMoment}>\n                <DesktopDatePicker\n                    disablePast\n                    label={label}\n                    value={getInitialValue(startValue, input.value)}\n                    onChange={input.onChange}\n                    slotProps={{\n                        textField: {\n                            inputProps: {\n                                'data-cy': 'date-picker-input'\n                            }\n                        }\n                    }}\n                />\n            </LocalizationProvider>\n        </FormControl>\n    );\n}\n\n\nconst getInitialValue = (startValue: string | undefined, inputValue: string | undefined) => {\n    if (inputValue) { // Set by the user\n        return moment(inputValue);\n    }\n    if (startValue) { // Passed in as a prop\n        return moment(startValue);\n    }\n    // If no value is set yet and no startValue is passed in, use today\n    return moment();\n};"
  },
  {
    "path": "services/workbench2/src/components/default-code-snippet/default-code-snippet.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { ThemeProvider, Theme, StyledEngineProvider, createTheme } from '@mui/material/styles';\nimport { CodeSnippet, CodeSnippetDataProps } from 'components/code-snippet/code-snippet';\nimport { themeOptions } from 'common/custom-theme';\nimport { grey } from '@mui/material/colors';\n\n\ndeclare module '@mui/styles/defaultTheme' {\n  // eslint-disable-next-line @typescript-eslint/no-empty-interface\n  interface DefaultTheme extends Theme {}\n}\n\n\nconst theme = createTheme(Object.assign({}, themeOptions, {\n    components: {\n        MuiTypography: {\n            styleOverrides: {\n                body1: {\n                    color: grey[\"900\"]\n                },\n                root: {\n                    backgroundColor: grey[\"200\"]\n                },\n            }\n        }\n    },\n    typography: {\n        fontFamily: 'monospace',\n    }\n}));\n\nexport const DefaultCodeSnippet = (props: CodeSnippetDataProps) =>\n    <StyledEngineProvider injectFirst>\n        <ThemeProvider theme={theme}>\n            <CodeSnippet {...props} />\n        </ThemeProvider>\n    </StyledEngineProvider>;\n"
  },
  {
    "path": "services/workbench2/src/components/default-code-snippet/default-virtual-code-snippet.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { ThemeProvider, Theme, StyledEngineProvider, createTheme } from '@mui/material/styles';\nimport { VirtualCodeSnippet, CodeSnippetDataProps } from 'components/code-snippet/virtual-code-snippet';\nimport { themeOptions } from 'common/custom-theme';\nimport { grey } from '@mui/material/colors';\n\n\ndeclare module '@mui/styles/defaultTheme' {\n  // eslint-disable-next-line @typescript-eslint/no-empty-interface\n  interface DefaultTheme extends Theme {}\n}\n\n\nconst theme = createTheme(Object.assign({}, themeOptions, {\n    components: {\n        MuiTypography: {\n            styleOverrides: {\n                body1: {\n                    color: grey[\"900\"]\n                },\n                root: {\n                    backgroundColor: grey[\"200\"]\n                },\n            }\n        }\n    },\n    typography: {\n        fontFamily: 'monospace',\n    }\n}));\n\nexport const DefaultVirtualCodeSnippet = (props: CodeSnippetDataProps) =>\n    <StyledEngineProvider injectFirst>\n        <ThemeProvider theme={theme}>\n            <VirtualCodeSnippet {...props} />\n        </ThemeProvider>\n    </StyledEngineProvider>;\n"
  },
  {
    "path": "services/workbench2/src/components/default-view/default-view.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from '../../common/custom-theme';\nimport { Typography } from '@mui/material';\nimport { IconType } from '../icon/icon';\nimport classnames from \"classnames\";\n\ntype CssRules = 'root' | 'icon' | 'message';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        textAlign: 'center'\n    },\n    icon: {\n        color: theme.palette.grey[\"500\"],\n        fontSize: '4.5rem'\n    },\n    message: {\n        color: theme.palette.grey[\"500\"]\n    }\n});\n\nexport interface DefaultViewDataProps {\n    classRoot?: string;\n    messages: string[];\n    filtersApplied?: boolean;\n    classMessage?: string;\n    icon?: IconType;\n    classIcon?: string;\n}\n\ntype DefaultViewProps = DefaultViewDataProps & WithStyles<CssRules>;\n\nexport const DefaultView = withStyles(styles)(\n    ({ classes, classRoot, messages, classMessage, icon: Icon, classIcon }: DefaultViewProps) =>\n        <Typography className={classnames([classes.root, classRoot])} component=\"div\">\n            {Icon && <Icon className={classnames([classes.icon, classIcon])} />}\n            {messages.map((msg: string, index: number) => {\n                return <Typography key={index}\n                    data-cy='default-view'\n                    className={classnames([classes.message, classMessage])}>{msg}</Typography>;\n            })}\n        </Typography>\n);\n"
  },
  {
    "path": "services/workbench2/src/components/details-attribute/details-attribute.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect, DispatchProp } from 'react-redux';\nimport Typography from '@mui/material/Typography';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { Tooltip } from '@mui/material';\nimport { CopyIcon } from 'components/icon/icon';\nimport CopyToClipboard from 'react-copy-to-clipboard';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport classnames from \"classnames\";\nimport { Link } from 'react-router-dom';\nimport { RootState } from \"store/store\";\nimport { FederationConfig, getNavUrl } from \"routes/routes\";\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\n\ntype CssRules = 'attribute' | 'label' | 'value' | 'lowercaseValue' | 'link' | 'copyIcon';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    attribute: {\n        marginBottom: \".6 rem\"\n    },\n    label: {\n        boxSizing: 'border-box',\n        color: theme.palette.grey[\"600\"],\n        width: '100%',\n        marginTop: \"0.4em\",\n    },\n    value: {\n        boxSizing: 'border-box',\n        alignItems: 'flex-start'\n    },\n    lowercaseValue: {\n        textTransform: 'lowercase'\n    },\n    link: {\n        color: theme.palette.primary.main,\n        textDecoration: 'none',\n        overflowWrap: 'break-word',\n        cursor: 'pointer'\n    },\n    copyIcon: {\n        marginLeft: theme.spacing(1),\n        color: theme.palette.grey[\"600\"],\n        cursor: 'pointer',\n        display: 'inline',\n        '& svg': {\n            fontSize: '1rem'\n        }\n    }\n});\n\ninterface DetailsAttributeDataProps {\n    label: string;\n    button?: React.ReactNode;\n    classLabel?: string;\n    value?: React.ReactNode;\n    classValue?: string;\n    lowercaseValue?: boolean;\n    link?: string;\n    children?: React.ReactNode;\n    onValueClick?: () => void;\n    linkToUuid?: string;\n    copyValue?: string;\n    uuidEnhancer?: Function;\n}\n\ntype DetailsAttributeProps = DetailsAttributeDataProps & WithStyles<CssRules> & FederationConfig & DispatchProp;\n\nconst mapStateToProps = ({ auth }: RootState): FederationConfig => ({\n    localCluster: auth.localCluster,\n    remoteHostsConfig: auth.remoteHostsConfig,\n    sessions: auth.sessions\n});\n\nexport const DetailsAttribute = connect(mapStateToProps)(withStyles(styles)(\n    class extends React.Component<DetailsAttributeProps> {\n\n        onCopy = (message: string) => {\n            this.props.dispatch(snackbarActions.OPEN_SNACKBAR({\n                message,\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS\n            }));\n        }\n\n        render() {\n            const { uuidEnhancer, link, value, classes, linkToUuid,\n                localCluster, remoteHostsConfig, sessions } = this.props;\n            let valueNode: React.ReactNode;\n\n            if (linkToUuid) {\n                const uuid = uuidEnhancer ? uuidEnhancer(linkToUuid) : linkToUuid;\n                const linkUrl = getNavUrl(linkToUuid || \"\", { localCluster, remoteHostsConfig, sessions });\n                if (linkUrl[0] === '/') {\n                    valueNode = <Link to={linkUrl} className={classes.link}>{uuid}</Link>;\n                } else {\n                    valueNode = <a href={linkUrl} className={classes.link} target='_blank' rel=\"noopener noreferrer\">{uuid}</a>;\n                }\n            } else if (link) {\n                valueNode = <a href={link} className={classes.link} target='_blank' rel=\"noopener noreferrer\">{value}</a>;\n            } else {\n                valueNode = value;\n            }\n\n            return <DetailsAttributeComponent {...this.props} value={valueNode} onCopy={this.onCopy} />;\n        }\n    }\n));\n\ninterface DetailsAttributeComponentProps {\n    value: React.ReactNode;\n    onCopy?: (msg: string) => void;\n}\n\nexport const DetailsAttributeComponent = withStyles(styles)(\n    (props: DetailsAttributeDataProps & WithStyles<CssRules> & DetailsAttributeComponentProps) =>\n        <Typography component=\"div\" className={props.classes.attribute} data-cy={`details-panel-${props.label.toLowerCase()}`}>\n            <Typography component=\"div\" className={classnames([props.classes.label, props.classLabel])}>{props.label}{props.button}</Typography>\n            <Typography\n                onClick={props.onValueClick}\n                component=\"div\"\n                data-cy=\"details-attribute-value\"\n                className={classnames([props.classes.value, props.classValue, { [props.classes.lowercaseValue]: props.lowercaseValue }])}>\n                {props.value}\n                {props.children}\n                {(props.linkToUuid || props.copyValue) && props.onCopy && <Tooltip title=\"Copy link to clipboard\">\n                    <span className={props.classes.copyIcon}>\n                        <CopyToClipboard text={props.linkToUuid || props.copyValue || \"\"} onCopy={() => props.onCopy!(\"Copied\")}>\n                            <CopyIcon />\n                        </CopyToClipboard>\n                    </span>\n                </Tooltip>}\n            </Typography>\n        </Typography>);\n"
  },
  {
    "path": "services/workbench2/src/components/dialog-actions/dialog-actions.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { DialogActions as MuiDialogActions } from '@mui/material/';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport withStyles from '@mui/styles/withStyles';\n\nconst styles: CustomStyleRulesCallback<'root'> = theme => {\n    const margin = theme.spacing(3);\n    return {\n        root: {\n            marginRight: margin,\n            marginBottom: margin,\n            marginLeft: margin,\n        },\n    };\n};\nexport const DialogActions = withStyles(styles)(MuiDialogActions);\n"
  },
  {
    "path": "services/workbench2/src/components/dialog-form/dialog-form.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect } from \"react\"\nimport { Button, Dialog, DialogActions } from \"@mui/material\"\nimport withStyles, { WithStyles } from \"@mui/styles/withStyles/withStyles\";\nimport { CustomStyleRulesCallback } from \"common/custom-theme\";\nimport { CircularSuspense } from \"components/loading/circular-suspense\";\n\ntype CssRules = \"paper\" | \"root\" | \"actions\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme) => ({\n    root: {\n        fontSize: '0.875rem',\n    },\n    paper: {\n        width: '800px',\n    },\n    actions: {\n        paddingTop: 0,\n        paddingRight: theme.spacing(2),\n        paddingBottom: theme.spacing(2),\n    }\n})\n\ntype DialogFormProps = WithStyles<CssRules> & {\n    open: boolean;\n    fields: React.ReactNode;\n    submitLabel?: string;\n    formErrors: string[];\n    submitDisabled?: boolean;\n    isSubmitting?: boolean;\n    onSubmit: (data: any) => void;\n    closeDialog: () => void;\n    clearFormValues: () => void;\n}\n\nexport const DialogForm = withStyles(styles)((props: DialogFormProps) => {\n    const { open, fields, submitLabel, classes, formErrors, submitDisabled = false, isSubmitting = false, onSubmit, closeDialog, clearFormValues } = props;\n\n    useEffect(() => {\n        if (!open) {\n            clearFormValues();\n        }\n    }, [open]);\n\n\tconst handleClose = (reason?: string) => {\n\t\tif (reason === 'backdropClick' || reason === 'escapeKeyDown') {\n\t\t\treturn\n\t\t}\n\t\tprops.closeDialog()\n\t}\n\n    return (\n        <Dialog\n            data-cy=\"form-dialog\"\n            open={open}\n            onClose={(_, reason) => handleClose(reason)}\n\t\t\tfullWidth\n\t\t\tmaxWidth={false}\n\t\t\tclassName={classes.root}\n            PaperProps={{\n                component: 'form',\n                className: classes.paper,\n                onSubmit: onSubmit,\n            }}\n        >\n            {fields}\n            <DialogActions className={classes.actions}>\n                <Button data-cy=\"form-cancel-btn\" onClick={closeDialog}>Cancel</Button>\n                <CircularSuspense\n                    showElement={!isSubmitting}\n                    element={<Button data-cy=\"form-submit-btn\" disabled={submitDisabled || formErrors.length > 0} type=\"submit\">\n                                {submitLabel && submitLabel.length > 0 ? submitLabel : \"Submit\"}\n                            </Button>}\n                />\n            </DialogActions>\n        </Dialog>\n    )\n})"
  },
  {
    "path": "services/workbench2/src/components/dialog-form/dialog-text-field.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useState, useEffect } from \"react\";\nimport classNames from \"classnames\";\nimport RichTextEditor from 'react-rte';\nimport { TextField, Typography } from \"@mui/material\";\nimport { getFieldErrors, Validator } from \"validators/validators\";\nimport { CustomStyleRulesCallback } from \"common/custom-theme\";\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { WithStyles } from \"@mui/styles/withStyles/withStyles\";\nimport withStyles from '@mui/styles/withStyles';\n\ntype RichTextCssRules = 'textField' | 'rte' | 'errorMessage' | 'redBorder';\n\nconst richTextStyles: CustomStyleRulesCallback<RichTextCssRules> = (theme: ArvadosTheme) => ({\n    textField: {\n        marginBottom: theme.spacing(1)\n    },\n    rte: {\n        fontFamily: 'Arial',\n        '& a': {\n            textDecoration: 'none',\n            color: theme.palette.primary.main,\n            '&:hover': {\n                cursor: 'pointer',\n                textDecoration: 'underline'\n            }\n        }\n    },\n    errorMessage: {\n        color: theme.palette.error.main,\n        fontSize: '0.78rem',\n        marginTop: '0.25rem',\n    },\n    redBorder: {\n        border: `1px solid ${theme.palette.error.main}`,\n    },\n});\n\ninterface DialogTextFieldProps {\n    disabled?: boolean;\n    label: string;\n    defaultValue: string;\n    validators: Validator[];\n    submitErr?: string;\n    setValue: React.Dispatch<React.SetStateAction<string>>;\n    setSubmitErr?: (errMsg: string) => void;\n}\n\nexport const DialogTextField = React.memo(({  disabled, label, defaultValue, validators, submitErr, setValue, setSubmitErr }: DialogTextFieldProps) => {\n    const [thisValue, setThisValue] = React.useState(defaultValue);\n    const errs = getFieldErrors(thisValue, validators)\n\n    React.useEffect(() => {\n            setValue(thisValue.trim())\n    }, [thisValue])\n\n    return (\n        <TextField\n            disabled={disabled}\n            value={thisValue}\n            onChange={(e) => {\n                setThisValue(e.target.value)\n                if (submitErr && setSubmitErr) setSubmitErr('')\n            }}\n            autoFocus\n            required\n            error={errs.length > 0 || !!submitErr}\n            helperText={errs.join(', ') || submitErr || ''}\n            margin=\"dense\"\n            id=\"name\"\n            name=\"name\"\n            type=\"text\"\n            fullWidth\n            variant=\"standard\"\n            label={label}\n            onBlur={() => setValue(thisValue)}\n        />\n    )\n})\n\ntype DialogRichTextFieldProps = {\n    label: string;\n    defaultValue: string;\n    validators: Validator[];\n    setValue: React.Dispatch<React.SetStateAction<string>>;\n}\n\nexport const DialogRichTextField = withStyles(richTextStyles)((props: WithStyles<RichTextCssRules> & DialogRichTextFieldProps) => {\n    const [rteValue, setRteValue] = useState(RichTextEditor.createValueFromString(props.defaultValue, 'html'));\n    const plainTextValue: string = rteValue.getEditorState().getCurrentContent().getPlainText().trim();\n    const htmlValue: string = plainTextValue ? rteValue.toString('html') : '';\n    const fieldErrors = getFieldErrors(plainTextValue, props.validators);\n\n        useEffect(() => {\n            props.setValue(htmlValue);\n        }, [htmlValue, props]);\n\n        const showError = () => fieldErrors.length > 0\n\n            return <div>\n                <RichTextEditor\n                    className={classNames(props.classes.rte, showError() && props.classes.redBorder)}\n                    value={rteValue}\n                    onChange={(value) => {\n                        setRteValue(value);\n                    }}\n                    placeholder={props.label} />\n                    {showError() &&\n                        <Typography>\n                            <span className={props.classes.errorMessage}>\n                                {fieldErrors.join(', ')}\n                            </span>\n                        </Typography>}\n                </div>;\n        }\n);\n"
  },
  {
    "path": "services/workbench2/src/components/dropdown-menu/dropdown-menu.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { DropdownMenu } from \"./dropdown-menu\";\nimport { MenuItem } from \"@mui/material\";\nimport { PaginationRightArrowIcon } from \"../icon/icon\";\n\ndescribe(\"<DropdownMenu />\", () => {\n    it(\"renders menu icon\", () => {\n        cy.mount(<DropdownMenu id=\"test-menu\" icon={<PaginationRightArrowIcon />} />);\n        cy.get('[data-cy=dropdown-menu-button]').should('have.length', 1);\n    });\n\n    it(\"opens and closes\", () => {\n        cy.mount(<DropdownMenu id=\"test-menu\" icon={<PaginationRightArrowIcon />} />);\n        cy.get('[data-cy=dropdown-menu-button]').click();\n        cy.get('ul[role=menu]').should('exist').click();\n        cy.get('ul[role=menu]').should('not.exist');\n    });\n\n    it(\"render menu items\", () => {\n        cy.mount(\n            <DropdownMenu id=\"test-menu\" icon={<PaginationRightArrowIcon />}>\n                <MenuItem>Item 1</MenuItem>\n                <MenuItem>Item 2</MenuItem>\n            </DropdownMenu>\n        );\n        cy.get('[data-cy=dropdown-menu-button]').click();\n        cy.get('li[role=menuitem]').should('have.length', 2);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/components/dropdown-menu/dropdown-menu.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport Menu from \"@mui/material/Menu\";\nimport IconButton from \"@mui/material/IconButton\";\nimport { PopoverOrigin } from \"@mui/material/Popover\";\nimport { Tooltip } from \"@mui/material\";\n\ninterface DropdownMenuProps {\n    id: string;\n    icon: React.ReactElement<any>;\n    title: string;\n}\n\ninterface DropdownMenuState {\n    anchorEl: any;\n}\n\nexport class DropdownMenu extends React.Component<DropdownMenuProps, DropdownMenuState> {\n    state = {\n        anchorEl: undefined,\n    };\n\n    transformOrigin: PopoverOrigin = {\n        vertical: 0,\n        horizontal: 0,\n    };\n\n    render() {\n        const { icon, id, children, title } = this.props;\n        const { anchorEl } = this.state;\n        return (\n            <div>\n                <Tooltip\n                    title={title}\n                    disableFocusListener>\n                    <IconButton\n                        data-cy=\"dropdown-menu-button\"\n                        aria-owns={anchorEl ? id : undefined}\n                        aria-haspopup=\"true\"\n                        color=\"inherit\"\n                        onClick={this.handleOpen}\n                        size=\"large\">\n                        {icon}\n                    </IconButton>\n                </Tooltip>\n                <Menu\n                    id={id}\n                    anchorEl={anchorEl}\n                    open={Boolean(anchorEl)}\n                    onClose={this.handleClose}\n                    onClick={this.handleClose}\n                    transformOrigin={this.transformOrigin}>\n                    {children}\n                </Menu>\n            </div>\n        );\n    }\n\n    handleClose = () => {\n        this.setState({ anchorEl: undefined });\n    };\n\n    handleOpen = (event: React.MouseEvent<HTMLButtonElement>) => {\n        this.setState({ anchorEl: event.currentTarget });\n    };\n}\n"
  },
  {
    "path": "services/workbench2/src/components/expand-chevron-right/expand-chevron-right.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { ChevronRight } from '@mui/icons-material';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { IconButton } from \"@mui/material\";\n\ntype CssRules = 'root' | 'default' | 'expanded';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '24px',\n        height: '24px',\n        cursor: 'pointer',\n        marginLeft: '.25em',\n        marginTop: \"-.2em\",\n    },\n    default: {\n        transition: 'all 0.1s ease',\n        transform: 'rotate(0deg)',\n    },\n    expanded: {\n        transition: 'all 0.1s ease',\n        transform: 'rotate(90deg)',\n    },\n});\n\nexport interface ExpandChevronRightDataProps {\n    expanded: boolean;\n    onClick?: () => void;\n}\n\ntype ExpandChevronRightProps = ExpandChevronRightDataProps & WithStyles<CssRules>;\n\nexport const ExpandChevronRight = withStyles(styles)(\n    class extends React.Component<ExpandChevronRightProps, {}> {\n        render() {\n            const { classes, expanded } = this.props;\n            return (\n                <span onClick={this.props.onClick}>\n                    <IconButton className={classes.root}>\n                        <ChevronRight className={expanded ? classes.expanded : classes.default} />\n                    </IconButton>\n                </span>\n            );\n        }\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/components/file-tree/file-thumbnail.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { FileThumbnail } from \"./file-thumbnail\";\nimport { CollectionFileType } from '../../models/collection-file';\nimport { Provider } from \"react-redux\";\nimport { combineReducers, createStore } from \"redux\";\nimport { ThemeProvider } from \"@mui/material\";\nimport { CustomTheme } from \"common/custom-theme\";\n\nlet store;\n\ndescribe(\"<FileThumbnail />\", () => {\n    let file;\n\n    beforeEach(() => {\n        const initialAuthState = {\n            config: {\n                keepWebServiceUrl: 'http://example.com/',\n                keepWebInlineServiceUrl: 'http://*.collections.example.com/',\n            }\n        }\n        store = createStore(combineReducers({\n            auth: (state= initialAuthState, action) => state,\n        }));\n\n        file = {\n            name: 'test-image.jpg',\n            type: CollectionFileType.FILE,\n            url: 'http://example.com/c=zzzzz-4zz18-0123456789abcde/t=v2/zzzzz-gj3su-0123456789abcde/xxxxxxtokenxxxxx/test-image.jpg',\n            size: 300\n        };\n    });\n\n    it(\"renders file thumbnail with proper src\", () => {\n        cy.mount(\n            <Provider store={store}>\n              <ThemeProvider theme={CustomTheme}>\n                <FileThumbnail file={file} />\n              </ThemeProvider>\n            </Provider>);\n        cy.get('img').should('have.attr', 'src', 'http://zzzzz-4zz18-0123456789abcde.collections.example.com/test-image.jpg?api_token=v2/zzzzz-gj3su-0123456789abcde/xxxxxxtokenxxxxx');\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/components/file-tree/file-thumbnail.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport isImage from 'is-image';\nimport { WithStyles, withStyles } from '@mui/styles';\nimport { FileTreeData } from 'components/file-tree/file-tree-data';\nimport { CollectionFileType } from 'models/collection-file';\nimport { getInlineFileUrl, sanitizeToken } from \"views-components/context-menu/actions/helpers\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { Styles } from '@mui/styles';\n\ninterface FileThumbnailProps {\n    file: FileTreeData;\n}\n\nexport const FileThumbnail =\n    ({ file }: FileThumbnailProps) =>\n        file.type === CollectionFileType.FILE && isImage(file.name)\n            ? <ImageFileThumbnail file={file} />\n            : null;\n\ntype ImageFileThumbnailCssRules = Styles<any, any, 'thumbnail'>;\n\nconst imageFileThumbnailStyle = withStyles<ImageFileThumbnailCssRules>(theme => ({\n    thumbnail: {\n        maxWidth: 250,\n        margin: `${theme.spacing(1)} 0`,\n    }\n}));\n\ninterface ImageFileThumbnailProps {\n    keepWebServiceUrl: string;\n    keepWebInlineServiceUrl: string;\n}\n\nconst mapStateToProps = ({ auth }: RootState): ImageFileThumbnailProps => ({\n    keepWebServiceUrl: auth.config.keepWebServiceUrl,\n    keepWebInlineServiceUrl: auth.config.keepWebInlineServiceUrl,\n});\n\nconst ImageFileThumbnail = connect(mapStateToProps)(imageFileThumbnailStyle(\n    ({ classes, file, keepWebServiceUrl, keepWebInlineServiceUrl }: WithStyles<ImageFileThumbnailCssRules> & FileThumbnailProps & ImageFileThumbnailProps) =>\n        <img\n            className={classes.thumbnail}\n            alt={file.name}\n            src={sanitizeToken(getInlineFileUrl(file.url, keepWebServiceUrl, keepWebInlineServiceUrl))} />\n));\n"
  },
  {
    "path": "services/workbench2/src/components/file-tree/file-tree-data.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport interface FileTreeData {\n    name: string;\n    type: string;\n    url: string;\n    size?: number;\n}\n"
  },
  {
    "path": "services/workbench2/src/components/file-tree/file-tree-item.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { DirectoryIcon, DefaultIcon, FileIcon } from \"../icon/icon\";\n\nexport const getIcon = (type: string) => {\n    switch (type) {\n        case 'directory':\n            return DirectoryIcon;\n        case 'file':\n            return FileIcon;\n        default:\n            return DefaultIcon;\n    }\n};\n\n"
  },
  {
    "path": "services/workbench2/src/components/file-upload/file-upload.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport classnames from 'classnames';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport {\n    Grid,\n    Table,\n    TableBody,\n    TableCell,\n    TableHead,\n    TableRow,\n    Typography,\n    IconButton,\n} from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { CloudUploadIcon, RemoveIcon } from \"../icon/icon\";\nimport { formatFileSize, formatProgress, formatUploadSpeed } from \"common/formatters\";\nimport { UploadFile } from 'store/file-uploader/file-uploader-actions';\nimport { UploadInput, FileUploadType } from 'components/file-upload/upload-input';\n\ntype CssRules = \"dropzoneWrapper\" | \"container\" | \"inputContainer\" | \"uploadIcon\"\n    | \"dropzoneBorder\" | \"dropzoneBorderLeft\" | \"dropzoneBorderRight\" | \"dropzoneBorderTop\" | \"dropzoneBorderBottom\"\n    | \"dropzoneBorderHorzActive\" | \"dropzoneBorderVertActive\" | \"deleteButton\" | \"deleteButtonDisabled\" | \"deleteIcon\" | \"fileTable\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    dropzoneWrapper: {\n        width: \"100%\",\n        height: \"200px\",\n        position: \"relative\",\n        border: \"1px solid rgba(0, 0, 0, 0.42)\",\n        boxSizing: 'border-box',\n        overflowY: \"scroll\",\n    },\n    dropzoneBorder: {\n        content: \"\",\n        position: \"absolute\",\n        transition: \"transform 200ms cubic-bezier(0.0, 0, 0.2, 1) 0ms\",\n        pointerEvents: \"none\",\n        backgroundColor: \"#6a1b9a\"\n    },\n    dropzoneBorderLeft: {\n        left: -1,\n        top: -1,\n        bottom: -1,\n        width: 2,\n        transform: \"scaleY(0)\",\n    },\n    dropzoneBorderRight: {\n        right: -1,\n        top: -1,\n        bottom: -1,\n        width: 2,\n        transform: \"scaleY(0)\",\n    },\n    dropzoneBorderTop: {\n        left: 0,\n        right: 0,\n        top: -1,\n        height: 2,\n        transform: \"scaleX(0)\",\n    },\n    dropzoneBorderBottom: {\n        left: 0,\n        right: 0,\n        bottom: -1,\n        height: 2,\n        transform: \"scaleX(0)\",\n    },\n    dropzoneBorderHorzActive: {\n        transform: \"scaleY(1)\"\n    },\n    dropzoneBorderVertActive: {\n        transform: \"scaleX(1)\"\n    },\n    container: {\n        height: \"100%\",\n        padding: '16px',\n        display: 'flex',\n        flexDirection: 'column',\n        alignItems: 'center',\n    },\n    inputContainer: {\n        width: '80%',\n        marginTop: '1rem',\n        display: 'flex',\n        alignItems: 'center',\n        justifyContent: 'space-around',\n    },\n    uploadIcon: {\n        verticalAlign: \"middle\"\n    },\n    deleteButton: {\n        cursor: \"pointer\"\n    },\n    deleteButtonDisabled: {\n        cursor: \"not-allowed\"\n    },\n    deleteIcon: {\n        marginLeft: \"-6px\"\n    },\n    fileTable: {\n        width: \"100%\",\n    },\n});\n\ninterface FileUploadPropsData {\n    files: UploadFile[];\n    disabled: boolean;\n    onDrop: (files: File[]) => void;\n    onDelete: (file: UploadFile) => void;\n}\n\ninterface FileUploadState {\n    focused: boolean;\n}\n\nexport type FileUploadProps = FileUploadPropsData & WithStyles<CssRules>;\n\nexport const FileUpload = withStyles(styles)(\n    class extends React.Component<FileUploadProps, FileUploadState> {\n        constructor(props: FileUploadProps) {\n            super(props);\n            this.state = {\n                focused: false\n            };\n        }\n        onDelete = (event: React.MouseEvent<HTMLButtonElement>, file: UploadFile): void => {\n            const { onDelete, disabled } = this.props;\n\n            event.stopPropagation();\n\n            if (!disabled) {\n                onDelete(file);\n            }\n\n            let interval = setInterval(() => {\n                const key = Object.keys((window as any).cancelTokens).find(key => key.indexOf(file.file.name) > -1);\n\n                if (key) {\n                    clearInterval(interval);\n                    (window as any).cancelTokens[key]();\n                    delete (window as any).cancelTokens[key];\n                }\n            }, 100);\n\n        }\n\n        fileInputRef = React.createRef<HTMLInputElement>();\n        folderInputRef = React.createRef<HTMLInputElement>();\n\n        handleDrop = async (event) => {\n            event.preventDefault();\n\n            const items = event.dataTransfer.items;\n            if (!items) return;\n\n            const entries: any[] = [];\n\n            for (let i = 0; i < items.length; i++) {\n                const entry = items[i].webkitGetAsEntry?.() || items[i].getAsEntry?.();\n                if (entry) entries.push(entry);\n            }\n\n            const filesArrays = await Promise.all(entries.map((entry) => traverseFileTree(entry)));\n            const allFiles = filesArrays.flat();\n\n            this.props.onDrop(allFiles as any); // includes `file.relativePath` if needed\n        }\n\n        handleInputChange = (event) => {\n                const files = Array.from(event.target.files);\n                this.props.onDrop(files as any);\n            };\n\n        getInputProps = () => ({\n            disabled: this.props.disabled,\n            handleInputChange: this.handleInputChange,\n            onFocus: ()=>this.setState({ focused: true }),\n            onBlur: ()=>this.setState({ focused: false }),\n        })\n\n        render() {\n            const { classes, disabled, files } = this.props;\n            return (\n                <div\n                    className={\"file-upload-dropzone \" + classes.dropzoneWrapper}\n                    onDrop={this.handleDrop}\n                    onDragOver={(e) => e.preventDefault()}\n                    >\n                    <div className={classnames(classes.dropzoneBorder, classes.dropzoneBorderLeft, { [classes.dropzoneBorderHorzActive]: this.state.focused })} />\n                    <div className={classnames(classes.dropzoneBorder, classes.dropzoneBorderRight, { [classes.dropzoneBorderHorzActive]: this.state.focused })} />\n                    <div className={classnames(classes.dropzoneBorder, classes.dropzoneBorderTop, { [classes.dropzoneBorderVertActive]: this.state.focused })} />\n                    <div className={classnames(classes.dropzoneBorder, classes.dropzoneBorderBottom, { [classes.dropzoneBorderVertActive]: this.state.focused })} />\n                    <div\n                        onClick={() => {\n                            const el = document.getElementsByClassName(\"file-upload-dropzone\")[0];\n                            const inputs = el.getElementsByTagName(\"input\");\n                            if (inputs.length > 0) {\n                                inputs[0].focus();\n                            }\n                        }}\n                        data-cy=\"drag-and-drop\"\n                        >\n                        {files.length === 0 &&\n                            <Grid container justifyContent=\"center\" alignItems=\"center\" className={classes.container}>\n                                <Grid item component={\"span\"}>\n                                    <Typography variant='subtitle1'>\n                                        <CloudUploadIcon className={classes.uploadIcon} /> Drag and drop data or click to browse\n                                    </Typography>\n                                </Grid>\n                                <Grid item component={\"div\"} className={classes.inputContainer}>\n                                    <UploadInput type={FileUploadType.FOLDER} inputRef={this.folderInputRef} {...this.getInputProps()} />\n                                    <UploadInput type={FileUploadType.FILE} inputRef={this.fileInputRef} {...this.getInputProps()} />\n                                </Grid>\n                            </Grid>}\n                        {files.length > 0 &&\n                            <Table className={classes.fileTable} stickyHeader>\n                                <TableHead>\n                                    <TableRow>\n                                        <TableCell>File name</TableCell>\n                                        <TableCell>File size</TableCell>\n                                        <TableCell>Upload speed</TableCell>\n                                        <TableCell>Upload progress</TableCell>\n                                        <TableCell>Delete</TableCell>\n                                    </TableRow>\n                                </TableHead>\n                                <TableBody>\n                                    {files.map(f =>\n                                        <TableRow key={f.id}>\n                                            <TableCell>{f.file.name}</TableCell>\n                                            <TableCell>{formatFileSize(f.file.size)}</TableCell>\n                                            <TableCell>{formatUploadSpeed(f.prevLoaded, f.loaded, f.prevTime, f.currentTime)}</TableCell>\n                                            <TableCell>{formatProgress(f.loaded, f.total)}</TableCell>\n                                            <TableCell>\n                                                <IconButton\n                                                    aria-label=\"Remove\"\n                                                    onClick={(event: React.MouseEvent<HTMLButtonElement, MouseEvent>) => this.onDelete(event, f)}\n                                                    className={disabled ? classnames(classes.deleteButtonDisabled, classes.deleteIcon) : classnames(classes.deleteButton, classes.deleteIcon)}\n                                                    size=\"large\">\n                                                    <RemoveIcon />\n                                                </IconButton>\n                                            </TableCell>\n                                        </TableRow>\n                                    )}\n                                </TableBody>\n                            </Table>\n                        }\n                    </div>\n                </div>\n            );\n        }\n    }\n);\n\nfunction traverseFileTree(item, path = '') {\n    return new Promise((resolve) => {\n        if (item.isFile) {\n            item.file((file) => {\n                file.relativePath = path + file.name;\n                resolve([file]);\n            });\n        } else if (item.isDirectory) {\n            const dirReader = item.createReader();\n            dirReader.readEntries(async (entries) => {\n                const files = await Promise.all(entries.map((entry) => traverseFileTree(entry, path + item.name + '/')));\n                resolve(files.flat());\n            });\n        }\n    });\n}\n"
  },
  {
    "path": "services/workbench2/src/components/file-upload/upload-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { Box, Typography, IconButton } from '@mui/material';\nimport DriveFolderUploadIcon from '@mui/icons-material/DriveFolderUpload';\nimport UploadFileIcon from '@mui/icons-material/UploadFile';\nimport { ArvadosTheme } from 'common/custom-theme';\n\ntype CssRules = 'label' | 'icon';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    label: {\n        cursor: 'pointer',\n    },\n    icon: {\n        color: theme.customs.colors.grey900,\n    },\n});\n\nexport enum FileUploadType {\n    FOLDER = 'folder',\n    FILE = 'file',\n}\n\nexport type UploadInputProps = {\n    type: FileUploadType;\n    disabled: boolean;\n    inputRef: React.RefObject<HTMLInputElement>;\n    handleInputChange: (event: React.ChangeEvent<HTMLInputElement>) => void;\n    onFocus: () => void;\n    onBlur: () => void;\n};\n\nexport const UploadInput = withStyles(styles)(({ type, disabled, inputRef, handleInputChange, onFocus, onBlur, classes }: UploadInputProps & WithStyles<CssRules>) => {\n    return (\n        <label className={classes.label}>\n            <Box\n                display='flex'\n                flexDirection='column'\n                alignItems='center'\n                gap={1}\n            >\n                <IconButton\n                    component='span'\n                    sx={{\n                        width: 80,\n                        height: 80,\n                        borderRadius: 2,\n                        bgcolor: 'grey.100',\n                        '&:hover': { bgcolor: 'grey.200' },\n                        display: 'flex',\n                        alignItems: 'center',\n                        justifyContent: 'center',\n                    }}\n                >\n                    {type === FileUploadType.FOLDER ? <DriveFolderUploadIcon fontSize='large' className={classes.icon} /> : <UploadFileIcon fontSize='large' className={classes.icon} />}\n                </IconButton>\n                <Typography variant='body2'>{type === FileUploadType.FOLDER ? 'Upload Folder' : 'Upload Files'}</Typography>\n                {type === FileUploadType.FOLDER ? (\n                    <input\n                        data-cy=\"folder-upload-input\"\n                        type='file'\n                        ref={inputRef}\n                        disabled={disabled}\n                        onChange={handleInputChange}\n                        onFocus={onFocus}\n                        onBlur={onBlur}\n                        multiple\n                        hidden\n                        {...({ webkitDirectory: 'true', directory: 'true' } as any)}\n                    />\n                ) : (\n                    <input\n                        data-cy=\"file-upload-input\"\n                        type='file'\n                        ref={inputRef}\n                        disabled={disabled}\n                        onChange={handleInputChange}\n                        onFocus={onFocus}\n                        onBlur={onBlur}\n                        multiple\n                        hidden\n                    />\n                )}\n            </Box>\n        </label>\n    );\n});\n"
  },
  {
    "path": "services/workbench2/src/components/float-input/float-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Input } from '@mui/material';\nimport { InputProps } from '@mui/material/Input';\n\nexport class FloatInput extends React.Component<InputProps> {\n    state = {\n        endsWithDecimalSeparator: false,\n    };\n\n    handleChange = (event: React.ChangeEvent<HTMLInputElement>) => {\n        const { onChange = () => { return; } } = this.props;\n        const [, fraction] = event.target.value.split('.');\n        this.setState({ endsWithDecimalSeparator: fraction === '' });\n        const parsedValue = parseFloat(event.target.value).toString();\n        event.target.value = parsedValue;\n        onChange(event);\n    }\n\n    render() {\n        const parsedValue = parseFloat(typeof this.props.value === 'string' ? this.props.value : '');\n        const value = isNaN(parsedValue) ? '' : parsedValue.toString();\n        const props = {\n            ...this.props,\n            value: value + (this.state.endsWithDecimalSeparator ? '.' : ''),\n            onChange: this.handleChange,\n        };\n        return <Input {...props} />;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/components/form-dialog/form-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { InjectedFormProps } from 'redux-form';\nimport { Dialog, DialogActions, DialogContent, DialogTitle } from '@mui/material/';\nimport { CustomTheme as theme } from 'common/custom-theme';\nimport { Button, CircularProgress } from '@mui/material';\nimport { WithDialogProps } from 'store/dialog/with-dialog';\n\ntype CssRules = \"button\" | \"lastButton\" | \"form\" | \"formContainer\" | \"dialogTitle\" | \"progressIndicator\" | \"dialogActions\";\n\nconst styles: Record<CssRules, Record<string, string | number>>  = {\n    button: {\n        marginLeft: theme.spacing(1),\n    },\n    lastButton: {\n        marginLeft: theme.spacing(1),\n        marginRight: \"0\",\n    },\n    form: {\n        display: 'flex',\n        overflowY: 'auto',\n        flexDirection: 'column',\n        flex: '0 1 auto',\n    },\n    formContainer: {\n        display: \"flex\",\n        flexDirection: \"column\",\n        paddingBottom: \"0\",\n    },\n    dialogTitle: {\n        paddingTop: theme.spacing(1),\n        paddingBottom: theme.spacing(1),\n    },\n    progressIndicator: {\n        position: \"absolute\",\n        minWidth: \"20px\",\n    },\n    dialogActions: {\n        marginBottom: theme.spacing(1),\n        marginRight: theme.spacing(3),\n    }\n};\n\ninterface DialogProjectDataProps {\n    cancelLabel?: string;\n    dialogTitle: string;\n    formFields: React.ComponentType<InjectedFormProps<any> & WithDialogProps<any>>;\n    submitLabel?: string;\n    cancelCallback?: Function;\n    enableWhenPristine?: boolean;\n    doNotDisableCancel?: boolean;\n}\n\ntype DialogProjectProps = DialogProjectDataProps & WithDialogProps<{}> & InjectedFormProps<any>;\n\nexport const FormDialog = (props: DialogProjectProps) => {\n    \n    const handleClose = (ev, reason) => {\n        if (reason !== 'backdropClick') {\n            props.closeDialog();\n        }\n    }\n    \n    return <Dialog\n                open={props.open}\n                onClose={handleClose}\n                disableEscapeKeyDown={props.submitting}\n                fullWidth\n                scroll='paper'\n                maxWidth='md'>\n                <form data-cy='form-dialog' style={styles.form}>\n                    <DialogTitle style={styles.dialogTitle}>\n                        {props.dialogTitle}\n                    </DialogTitle>\n                    <DialogContent style={styles.formContainer}>\n                        <props.formFields {...props} />\n                    </DialogContent>\n                    <DialogActions style={styles.dialogActions}>\n                        <Button\n                            data-cy='form-cancel-btn'\n                            onClick={() => {\n                                props.closeDialog();\n\n                                if (props.cancelCallback) {\n                                    props.cancelCallback();\n                                    props.reset();\n                                    props.initialize({});\n                                }\n                            }}\n                            style={styles.button}\n                            color=\"primary\"\n                            disabled={props.doNotDisableCancel ? false : props.submitting}>\n                            {props.cancelLabel || 'Cancel'}\n                        </Button>\n                        <Button\n                            data-cy='form-submit-btn'\n                            type=\"submit\"\n                            onClick={props.handleSubmit}\n                            style={styles.lastButton}\n                            color=\"primary\"\n                            disabled={props.invalid || props.submitting || (props.pristine && !props.enableWhenPristine)}\n                            variant=\"contained\">\n                            {props.submitLabel || 'Submit'}\n                            {props.submitting && <CircularProgress size={20} style={styles.progressIndicator} />}\n                        </Button>\n                    </DialogActions>\n                </form>\n            </Dialog>\n};\n"
  },
  {
    "path": "services/workbench2/src/components/form-field/form-field.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { WrappedFieldProps, WrappedFieldInputProps } from 'redux-form';\nimport { FormGroup, FormLabel, FormHelperText } from '@mui/material';\n\ninterface FormFieldCustomProps {\n    children: <P>(props: WrappedFieldInputProps) => React.ReactElement<P>;\n    label?: string;\n    helperText?: string;\n    required?: boolean;\n}\n\nexport type FormFieldProps = FormFieldCustomProps & WrappedFieldProps;\n\nexport const FormField = ({ children, ...props }: FormFieldProps & WrappedFieldProps) => {\n    return (\n        <FormGroup>\n\n            <FormLabel\n                focused={props.meta.active}\n                required={props.required}\n                error={props.meta.touched && !!props.meta.error}>\n                {props.label}\n            </FormLabel>\n\n            { children(props.input) }\n\n            <FormHelperText error={props.meta.touched && !!props.meta.error}>\n                {\n                    props.meta.touched && props.meta.error\n                        ? props.meta.error\n                        : props.helperText\n                }\n            </FormHelperText>\n\n        </FormGroup>\n    );\n};\n"
  },
  {
    "path": "services/workbench2/src/components/icon/icon.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Badge, SvgIcon, Tooltip } from \"@mui/material\";\nimport Add from \"@mui/icons-material/Add\";\nimport ArrowBack from \"@mui/icons-material/ArrowBack\";\nimport ArrowDropDown from \"@mui/icons-material/ArrowDropDown\";\nimport Build from \"@mui/icons-material/Build\";\nimport Cached from \"@mui/icons-material/Cached\";\nimport DescriptionIcon from \"@mui/icons-material/Description\";\nimport ChevronLeft from \"@mui/icons-material/ChevronLeft\";\nimport CloudUpload from \"@mui/icons-material/CloudUpload\";\nimport Code from \"@mui/icons-material/Code\";\nimport Create from \"@mui/icons-material/Create\";\nimport ImportContacts from \"@mui/icons-material/ImportContacts\";\nimport ChevronRight from \"@mui/icons-material/ChevronRight\";\nimport Close from \"@mui/icons-material/Close\";\nimport ContentCopy from \"@mui/icons-material/ContentCopy\";\nimport FileCopyOutlined from \"@mui/icons-material/FileCopyOutlined\";\nimport CreateNewFolder from \"@mui/icons-material/CreateNewFolder\";\nimport Delete from \"@mui/icons-material/Delete\";\nimport DeviceHub from \"@mui/icons-material/DeviceHub\";\nimport Edit from \"@mui/icons-material/Edit\";\nimport ErrorRoundedIcon from \"@mui/icons-material/ErrorRounded\";\nimport ExpandMoreIcon from \"@mui/icons-material/ExpandMore\";\nimport FlipToFront from \"@mui/icons-material/FlipToFront\";\nimport Folder from \"@mui/icons-material/Folder\";\nimport FolderShared from \"@mui/icons-material/FolderShared\";\nimport Pageview from \"@mui/icons-material/Pageview\";\nimport GetApp from \"@mui/icons-material/GetApp\";\nimport Help from \"@mui/icons-material/Help\";\nimport HelpOutline from \"@mui/icons-material/HelpOutline\";\nimport History from \"@mui/icons-material/History\";\nimport Inbox from \"@mui/icons-material/Inbox\";\nimport Memory from \"@mui/icons-material/Memory\";\nimport MoveToInbox from \"@mui/icons-material/MoveToInbox\";\nimport Info from \"@mui/icons-material/Info\";\nimport Input from \"@mui/icons-material/Input\";\nimport InsertDriveFile from \"@mui/icons-material/InsertDriveFile\";\nimport LastPage from \"@mui/icons-material/LastPage\";\nimport LibraryBooks from \"@mui/icons-material/LibraryBooks\";\nimport ListAlt from \"@mui/icons-material/ListAlt\";\nimport Menu from \"@mui/icons-material/Menu\";\nimport MoreVert from \"@mui/icons-material/MoreVert\";\nimport MoreHoriz from \"@mui/icons-material/MoreHoriz\";\nimport Mail from \"@mui/icons-material/Mail\";\nimport Notifications from \"@mui/icons-material/Notifications\";\nimport OpenInNew from \"@mui/icons-material/OpenInNew\";\nimport People from \"@mui/icons-material/People\";\nimport Person from \"@mui/icons-material/Person\";\nimport PersonAdd from \"@mui/icons-material/PersonAdd\";\nimport PlayArrow from \"@mui/icons-material/PlayArrow\";\nimport Public from \"@mui/icons-material/Public\";\nimport RateReview from \"@mui/icons-material/RateReview\";\nimport RestoreFromTrash from \"@mui/icons-material/History\";\nimport Search from \"@mui/icons-material/Search\";\nimport SettingsApplications from \"@mui/icons-material/SettingsApplications\";\nimport SettingsEthernet from \"@mui/icons-material/SettingsEthernet\";\nimport Settings from \"@mui/icons-material/Settings\";\nimport Star from \"@mui/icons-material/Star\";\nimport StarBorder from \"@mui/icons-material/StarBorder\";\nimport Warning from \"@mui/icons-material/Warning\";\nimport VpnKey from \"@mui/icons-material/VpnKey\";\nimport LinkOutlined from \"@mui/icons-material/LinkOutlined\";\nimport RemoveRedEye from \"@mui/icons-material/RemoveRedEye\";\nimport Computer from \"@mui/icons-material/Computer\";\nimport WrapText from \"@mui/icons-material/WrapText\";\nimport TextIncrease from \"@mui/icons-material/ZoomIn\";\nimport TextDecrease from \"@mui/icons-material/ZoomOut\";\nimport FullscreenSharp from \"@mui/icons-material/FullscreenSharp\";\nimport FullscreenExitSharp from \"@mui/icons-material/FullscreenExitSharp\";\nimport ExitToApp from \"@mui/icons-material/ExitToApp\";\nimport CheckCircleOutline from \"@mui/icons-material/CheckCircleOutline\";\nimport RemoveCircleOutline from \"@mui/icons-material/RemoveCircleOutline\";\nimport NotInterested from \"@mui/icons-material/NotInterested\";\nimport Image from \"@mui/icons-material/Image\";\nimport Stop from \"@mui/icons-material/Stop\";\nimport FileCopy from \"@mui/icons-material/FileCopy\";\nimport ShowChart from \"@mui/icons-material/ShowChart\";\n\n// Import FontAwesome icons\nimport { library } from \"@fortawesome/fontawesome-svg-core\";\nimport { faPencilAlt, faSlash, faUsers, faEllipsisH } from \"@fortawesome/free-solid-svg-icons\";\nimport { FormatAlignLeft } from \"@mui/icons-material\";\nlibrary.add(faPencilAlt, faSlash, faUsers, faEllipsisH);\n\nexport const FreezeIcon: IconType = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M20.79,13.95L18.46,14.57L16.46,13.44V10.56L18.46,9.43L20.79,10.05L21.31,8.12L19.54,7.65L20,5.88L18.07,5.36L17.45,7.69L15.45,8.82L13,7.38V5.12L14.71,3.41L13.29,2L12,3.29L10.71,2L9.29,3.41L11,5.12V7.38L8.5,8.82L6.5,7.69L5.92,5.36L4,5.88L4.47,7.65L2.7,8.12L3.22,10.05L5.55,9.43L7.55,10.56V13.45L5.55,14.58L3.22,13.96L2.7,15.89L4.47,16.36L4,18.12L5.93,18.64L6.55,16.31L8.55,15.18L11,16.62V18.88L9.29,20.59L10.71,22L12,20.71L13.29,22L14.7,20.59L13,18.88V16.62L15.5,15.17L17.5,16.3L18.12,18.63L20,18.12L19.53,16.35L21.3,15.88L20.79,13.95M9.5,10.56L12,9.11L14.5,10.56V13.44L12,14.89L9.5,13.44V10.56Z\" />\n    </SvgIcon>\n);\n\nexport const UnfreezeIcon: IconType = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M11 5.12L9.29 3.41L10.71 2L12 3.29L13.29 2L14.71 3.41L13 5.12V7.38L15.45 8.82L17.45 7.69L18.07 5.36L20 5.88L19.54 7.65L21.31 8.12L20.79 10.05L18.46 9.43L16.46 10.56V13.26L14.5 11.3V10.56L12.74 9.54L10.73 7.53L11 7.38V5.12M18.46 14.57L16.87 13.67L19.55 16.35L21.3 15.88L20.79 13.95L18.46 14.57M13 16.62V18.88L14.7 20.59L13.29 22L12 20.71L10.71 22L9.29 20.59L11 18.88V16.62L8.55 15.18L6.55 16.31L5.93 18.64L4 18.12L4.47 16.36L2.7 15.89L3.22 13.96L5.55 14.58L7.55 13.45V10.56L5.55 9.43L3.22 10.05L2.7 8.12L4.47 7.65L4 5.89L1.11 3L2.39 1.73L22.11 21.46L20.84 22.73L14.1 16L13 16.62M12 14.89L12.63 14.5L9.5 11.39V13.44L12 14.89Z\" />\n    </SvgIcon>\n);\n\nexport const PendingIcon = (props: any) => (\n    <span {...props}>\n        <span className=\"fas fa-ellipsis-h\" />\n    </span>\n);\n\nexport const ReadOnlyIcon = (props: any) => (\n    <span {...props}>\n        <div className=\"fa-layers fa-1x fa-fw\">\n            <span\n                className=\"fas fa-slash\"\n                data-fa-mask=\"fas fa-pencil-alt\"\n                data-fa-transform=\"down-1.5\"\n            />\n            <span className=\"fas fa-slash\" />\n        </div>\n    </span>\n);\n\nexport const GroupsIcon = (props: any) => (\n    <span {...props}>\n        <span className=\"fas fa-users\" />\n    </span>\n);\n\nexport const CollectionOldVersionIcon = (props: any) => (\n    <Tooltip title=\"Old version\">\n        <Badge badgeContent={<History fontSize=\"small\" />}>\n            <CollectionIcon {...props} />\n        </Badge>\n    </Tooltip>\n);\n\n// https://pictogrammers.com/library/mdi/icon/ship-wheel/\nexport const WheelIcon = (props: any) => (\n    <SvgIcon {...props} style={{fontSize: '1.6rem'}}>\n        <path d=\"M2,11L4.05,11.1C4.3,8.83 5.5,6.85 7.25,5.56L6.13,3.84C5.86,3.36 6,2.75 6.5,2.47C7,2.2 7.59,2.36 7.87,2.84L8.8,4.66C9.78,4.24 10.86,4 12,4C13.14,4 14.22,4.24 15.2,4.66L16.13,2.84C16.41,2.36 17,2.2 17.5,2.47C18,2.75 18.14,3.36 17.87,3.84L16.75,5.56C18.5,6.85 19.7,8.83 19.95,11.1L22,11A1,1 0 0,1 23,12A1,1 0 0,1 22,13L19.95,12.9C19.7,15.17 18.5,17.15 16.75,18.44L17.87,20.16C18.14,20.64 18,21.25 17.5,21.53C17,21.8 16.41,21.64 16.13,21.16L15.2,19.34C14.22,19.76 13.14,20 12,20C10.86,20 9.78,19.76 8.8,19.34L7.87,21.16C7.59,21.64 7,21.8 6.5,21.53C6,21.25 5.86,20.64 6.13,20.16L7.25,18.44C5.5,17.15 4.3,15.17 4.05,12.9L2,13A1,1 0 0,1 1,12A1,1 0 0,1 2,11M9.07,11.35C9.2,10.74 9.53,10.2 10,9.79L8.34,7.25C7.11,8.19 6.27,9.6 6.05,11.2L9.07,11.35M12,9C12.32,9 12.62,9.05 12.9,9.14L14.28,6.45C13.58,6.16 12.81,6 12,6C11.19,6 10.42,6.16 9.72,6.45L11.1,9.14C11.38,9.05 11.68,9 12,9M14.93,11.35L17.95,11.2C17.73,9.6 16.89,8.19 15.66,7.25L14,9.79C14.47,10.2 14.8,10.74 14.93,11.35M14.93,12.65C14.8,13.26 14.47,13.8 14,14.21L15.66,16.75C16.89,15.81 17.73,14.4 17.95,12.8L14.93,12.65M12,15C11.68,15 11.38,14.95 11.09,14.86L9.72,17.55C10.42,17.84 11.19,18 12,18C12.81,18 13.58,17.84 14.28,17.55L12.91,14.86C12.62,14.95 12.32,15 12,15M9.07,12.65L6.05,12.8C6.27,14.4 7.11,15.81 8.34,16.75L10,14.21C9.53,13.8 9.2,13.26 9.07,12.65Z\" />\n    </SvgIcon>\n);\n\n// https://materialdesignicons.com/icon/image-off\nexport const ImageOffIcon = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M21 17.2L6.8 3H19C20.1 3 21 3.9 21 5V17.2M20.7 22L19.7 21H5C3.9 21 3 20.1 3 19V4.3L2 3.3L3.3 2L22 20.7L20.7 22M16.8 18L12.9 14.1L11 16.5L8.5 13.5L5 18H16.8Z\" />\n    </SvgIcon>\n);\n\n// https://materialdesignicons.com/icon/inbox-arrow-up\nexport const OutputIcon: IconType = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M14,14H10V11H8L12,7L16,11H14V14M16,11M5,15V5H19V15H15A3,3 0 0,1 12,18A3,3 0 0,1 9,15H5M19,3H5C3.89,3 3,3.9 3,5V19A2,2 0 0,0 5,21H19A2,2 0 0,0 21,19V5A2,2 0 0,0 19,3\" />\n    </SvgIcon>\n);\n\n// https://pictogrammers.com/library/mdi/icon/file-move/\nexport const FileMoveIcon: IconType = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M14,17H18V14L23,18.5L18,23V20H14V17M13,9H18.5L13,3.5V9M6,2H14L20,8V12.34C19.37,12.12 18.7,12 18,12A6,6 0 0,0 12,18C12,19.54 12.58,20.94 13.53,22H6C4.89,22 4,21.1 4,20V4A2,2 0 0,1 6,2Z\" />\n    </SvgIcon>\n);\n\n// https://pictogrammers.com/library/mdi/icon/checkbox-multiple-outline/\nexport const CheckboxMultipleOutline: IconType = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M20,2H8A2,2 0 0,0 6,4V16A2,2 0 0,0 8,18H20A2,2 0 0,0 22,16V4A2,2 0 0,0 20,2M20,16H8V4H20V16M16,20V22H4A2,2 0 0,1 2,20V7H4V20H16M18.53,8.06L17.47,7L12.59,11.88L10.47,9.76L9.41,10.82L12.59,14L18.53,8.06Z\" />\n    </SvgIcon>\n);\n\n// https://pictogrammers.com/library/mdi/icon/checkbox-multiple-blank-outline/\nexport const CheckboxMultipleBlankOutline: IconType = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M20,16V4H8V16H20M22,16A2,2 0 0,1 20,18H8C6.89,18 6,17.1 6,16V4C6,2.89 6.89,2 8,2H20A2,2 0 0,1 22,4V16M16,20V22H4A2,2 0 0,1 2,20V7H4V20H16Z\" />\n    </SvgIcon>\n);\n\n//https://pictogrammers.com/library/mdi/icon/console/\nexport const TerminalIcon: IconType = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M20,19V7H4V19H20M20,3A2,2 0 0,1 22,5V19A2,2 0 0,1 20,21H4A2,2 0 0,1 2,19V5C2,3.89 2.9,3 4,3H20M13,17V15H18V17H13M9.58,13L5.57,9H8.4L11.7,12.3C12.09,12.69 12.09,13.33 11.7,13.72L8.42,17H5.59L9.58,13Z\" />\n    </SvgIcon>\n)\n\n//https://pictogrammers.com/library/mdi/icon/chevron-double-right/\nexport const DoubleRightArrows: IconType = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M5.59,7.41L7,6L13,12L7,18L5.59,16.59L10.17,12L5.59,7.41M11.59,7.41L13,6L19,12L13,18L11.59,16.59L16.17,12L11.59,7.41Z\" />\n    </SvgIcon>\n)\n\n//https://pictogrammers.com/library/memory/icon/box-light-vertical/\nexport const VerticalLineDivider: IconType = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M12 0V22H10V0H12Z\" />\n    </SvgIcon>\n)\n\n//https://pictogrammers.com/library/mdi/icon/delete-forever/\nexport const DeleteForever: IconType = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M6,19A2,2 0 0,0 8,21H16A2,2 0 0,0 18,19V7H6V19M8.46,11.88L9.87,10.47L12,12.59L14.12,10.47L15.53,11.88L13.41,14L15.53,16.12L14.12,17.53L12,15.41L9.88,17.53L8.47,16.12L10.59,14L8.46,11.88M15.5,4L14.5,3H9.5L8.5,4H5V6H19V4H15.5Z\" />\n    </SvgIcon>\n)\n\n//https://pictogrammers.com/library/mdi/icon/folder-key-outline/\nexport const FolderKeyIcon: IconType = (props: any) => (\n    <SvgIcon {...props}>\n        <path d=\"M20 18H4V8H20M20 6H12L10 4H4C2.9 4 2 4.9 2 6V18C2 19.1 2.9 20 4 20H20C21.1 20 22 19.1 22 18V8C22 6.9 21.1 6 20 6M12.8 12C12.4 10.8 11.3 10 10 10C8.3 10 7 11.3 7 13S8.3 16 10 16C11.3 16 12.4 15.2 12.8 14H15V16H17V14H19V12H12.8M10 14C9.4 14 9 13.6 9 13C9 12.4 9.4 12 10 12S11 12.4 11 13 10.6 14 10 14Z\" />\n    </SvgIcon>\n)\n\nexport type IconType = React.SFC<{ className?: string; style?: object }>;\n\nexport const AddIcon: IconType = props => <Add {...props} />;\nexport const AddFavoriteIcon: IconType = props => <StarBorder {...props} />;\nexport const AdminMenuIcon: IconType = props => <Build {...props} />;\nexport const AdvancedIcon: IconType = props => <SettingsApplications {...props} />;\nexport const AttributesIcon: IconType = props => <ListAlt {...props} />;\nexport const BackIcon: IconType = props => <ArrowBack {...props} />;\nexport const CustomizeTableIcon: IconType = props => <Menu {...props} />;\nexport const CommandIcon: IconType = props => <LastPage {...props} />;\nexport const CopyIcon: IconType = props => <ContentCopy {...props} />;\nexport const FileCopyIcon: IconType = props => <FileCopy {...props} />;\nexport const FileCopyOutlinedIcon: IconType = props => <FileCopyOutlined {...props} />;\nexport const CollectionIcon: IconType = props => <LibraryBooks {...props} />;\nexport const CloseIcon: IconType = props => <Close {...props} />;\nexport const CloudUploadIcon: IconType = props => <CloudUpload {...props} />;\nexport const DefaultIcon: IconType = props => <RateReview {...props} />;\nexport const DetailsIcon: IconType = props => <Info {...props} />;\nexport const DirectoryIcon: IconType = props => <Folder {...props} />;\nexport const DownloadIcon: IconType = props => <GetApp {...props} />;\nexport const EditSavedQueryIcon: IconType = props => <Create {...props} />;\nexport const ExpandIcon: IconType = props => <ExpandMoreIcon {...props} />;\nexport const ErrorIcon: IconType = props => (\n    <ErrorRoundedIcon\n        style={{ color: \"#ff0000\" }}\n        {...props}\n    />\n);\nexport const FavoriteIcon: IconType = props => <Star {...props} />;\nexport const FileIcon: IconType = props => <DescriptionIcon {...props} />;\nexport const HelpIcon: IconType = props => <Help {...props} />;\nexport const HelpOutlineIcon: IconType = props => <HelpOutline {...props} />;\nexport const ImportContactsIcon: IconType = props => <ImportContacts {...props} />;\nexport const InfoIcon: IconType = props => <Info {...props} />;\nexport const FileInputIcon: IconType = props => <InsertDriveFile {...props} />;\nexport const KeyIcon: IconType = props => <VpnKey {...props} />;\nexport const LogIcon: IconType = props => <SettingsEthernet {...props} />;\nexport const MailIcon: IconType = props => <Mail {...props} />;\nexport const MaximizeIcon: IconType = props => <FullscreenSharp {...props} />;\nexport const ResourceIcon: IconType = props => <Memory {...props} />;\nexport const UnMaximizeIcon: IconType = props => <FullscreenExitSharp {...props} />;\nexport const MoreVerticalIcon: IconType = props => <MoreVert {...props} />;\nexport const MoreHorizontalIcon: IconType = props => <MoreHoriz {...props} />;\nexport const MoveToIcon: IconType = props => <Input {...props} />;\nexport const NewProjectIcon: IconType = props => <CreateNewFolder {...props} />;\nexport const NotificationIcon: IconType = props => <Notifications {...props} />;\nexport const OpenIcon: IconType = props => <OpenInNew {...props} />;\nexport const InputIcon: IconType = props => <MoveToInbox {...props} />;\nexport const PaginationDownIcon: IconType = props => <ArrowDropDown {...props} />;\nexport const PaginationLeftArrowIcon: IconType = props => <ChevronLeft {...props} />;\nexport const PaginationRightArrowIcon: IconType = props => <ChevronRight {...props} />;\nexport const ProcessIcon: IconType = props => <Settings {...props} />;\nexport const ProjectIcon: IconType = props => <Folder {...props} />;\nexport const FilterGroupIcon: IconType = props => <Pageview {...props} />;\nexport const RootProjectIcon: IconType = props => <Inbox {...props} />;\nexport const ProvenanceGraphIcon: IconType = props => <DeviceHub {...props} />;\nexport const RemoveIcon: IconType = props => <Delete {...props} />;\nexport const RemoveFavoriteIcon: IconType = props => <Star {...props} />;\nexport const PublicFavoriteIcon: IconType = props => <Public {...props} />;\nexport const RenameIcon: IconType = props => <Edit {...props} />;\nexport const RestoreVersionIcon: IconType = props => <FlipToFront {...props} />;\nexport const RestoreFromTrashIcon: IconType = props => <RestoreFromTrash {...props} />;\nexport const ReRunProcessIcon: IconType = props => <Cached {...props} />;\nexport const SearchIcon: IconType = props => <Search {...props} />;\nexport const ShareIcon: IconType = props => <PersonAdd {...props} />;\nexport const ShareMeIcon: IconType = props => <People {...props} />;\nexport const SidePanelRightArrowIcon: IconType = props => <PlayArrow {...props} />;\nexport const TrashIcon: IconType = props => <Delete {...props} />;\nexport const UserPanelIcon: IconType = props => <Person {...props} />;\nexport const UsedByIcon: IconType = props => <Folder {...props} />;\nexport const WorkflowIcon: IconType = props => <Code {...props} />;\nexport const WarningIcon: IconType = props => (\n    <Warning\n        style={{ color: \"#fbc02d\", height: \"30px\", width: \"30px\" }}\n        {...props}\n    />\n);\nexport const Link: IconType = props => <LinkOutlined {...props} />;\nexport const FolderSharedIcon: IconType = props => <FolderShared {...props} />;\nexport const CanReadIcon: IconType = props => <RemoveRedEye {...props} />;\nexport const CanWriteIcon: IconType = props => <Edit {...props} />;\nexport const CanManageIcon: IconType = props => <Computer {...props} />;\nexport const AddUserIcon: IconType = props => <PersonAdd {...props} />;\nexport const WordWrapOnIcon: IconType = props => <WrapText {...props} />;\nexport const WordWrapOffIcon: IconType = props => <FormatAlignLeft {...props} />;\nexport const TextIncreaseIcon: IconType = props => <TextIncrease {...props} />;\nexport const TextDecreaseIcon: IconType = props => <TextDecrease {...props} />;\nexport const DeactivateUserIcon: IconType = props => <NotInterested {...props} />;\nexport const LoginAsIcon: IconType = props => <ExitToApp {...props} />;\nexport const ActiveIcon: IconType = props => <CheckCircleOutline {...props} />;\nexport const SetupIcon: IconType = props => <RemoveCircleOutline {...props} />;\nexport const InactiveIcon: IconType = props => <NotInterested {...props} />;\nexport const ImageIcon: IconType = props => <Image {...props} />;\nexport const StartIcon: IconType = props => <PlayArrow {...props} />;\nexport const StopIcon: IconType = props => <Stop {...props} />;\nexport const SelectAllIcon: IconType = props => <CheckboxMultipleOutline {...props} />;\nexport const SelectNoneIcon: IconType = props => <CheckboxMultipleBlankOutline {...props} />;\nexport const ShowChartIcon: IconType = props => <ShowChart {...props} />;\n"
  },
  {
    "path": "services/workbench2/src/components/int-input/int-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Input } from '@mui/material';\nimport { InputProps } from '@mui/material/Input';\n\nexport class IntInput extends React.Component<InputProps> {\n    handleChange = (event: React.ChangeEvent<HTMLInputElement>) => {\n        const { onChange = () => { return; } } = this.props;\n        const parsedValue = parseInt(event.target.value, 10);\n        event.target.value = parsedValue.toString();\n        onChange(event);\n    }\n\n    render() {\n        const parsedValue = parseInt(typeof this.props.value === 'string' ? this.props.value : '', 10);\n        const value = isNaN(parsedValue) ? '' : parsedValue.toString();\n        const props = {\n            ...this.props,\n            value,\n            onChange: this.handleChange,\n        };\n        return <Input {...props} />;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/components/list-item-text-icon/list-item-text-icon.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { ListItemIcon, ListItemText, Typography } from '@mui/material';\nimport { IconType } from '../icon/icon';\nimport classnames from \"classnames\";\n\ntype CssRules = 'root' | 'listItemText' | 'hasMargin' | 'active';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        display: 'flex',\n        alignItems: 'center'\n    },\n    listItemText: {\n        fontWeight: 400\n    },\n    active: {\n        color: theme.palette.primary.main,\n    },\n    hasMargin: {\n        marginLeft: theme.spacing(1),\n    }\n});\n\nexport interface ListItemTextIconDataProps {\n    icon: IconType;\n    name: string;\n    isActive?: boolean;\n    hasMargin?: boolean;\n    iconSize?: number;\n    nameDecorator?: JSX.Element;\n}\n\ntype ListItemTextIconProps = ListItemTextIconDataProps & WithStyles<CssRules>;\n\nexport const ListItemTextIcon = withStyles(styles)(\n    class extends React.Component<ListItemTextIconProps, {}> {\n        render() {\n            const { classes, isActive, hasMargin, name, icon: Icon, iconSize, nameDecorator } = this.props;\n            return (\n                <Typography component='span' className={classes.root}>\n                    <ListItemIcon className={classnames({\n                            [classes.hasMargin]: hasMargin,\n                            [classes.active]: isActive\n                        })}>\n\n                        <Icon style={{ fontSize: `${iconSize}rem` }} />\n                    </ListItemIcon>\n                    {nameDecorator || null}\n                    <ListItemText primary={\n                        <Typography className={classnames(classes.listItemText, {\n                                [classes.active]: isActive\n                            })}>\n                            {name}\n                        </Typography>\n                    } />\n                </Typography>\n            );\n        }\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/components/loading/circular-suspense.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useRef, useLayoutEffect, useState } from 'react'\nimport { CircularProgress } from '@mui/material'\nimport { WithStyles, withStyles } from '@mui/styles'\nimport { CustomStyleRulesCallback } from 'common/custom-theme'\n\ntype CssRules = 'container'\n\nconst styles: CustomStyleRulesCallback<CssRules> = () => ({\n\tcontainer: {\n\t\tdisplay: 'flex',\n\t\talignItems: 'center',\n\t\tjustifyContent: 'center',\n\t},\n})\n\ntype CircularSuspenseProps = {\n\telement: React.ReactNode\n\tshowElement: boolean\n}\n\n/**\n * A loading component that replaces an element with a circular progress indicator.\n *\n * The component measures the dimensions of the provided element and displays a\n * CircularProgress spinner in a container that matches those exact dimensions.\n * No layout shift should occur when switching between the element and the spinner.\n *\n * @param element - The React element to display when showElement is true\n * @param showElement - Whether to show the element (true) or the loading spinner (false)\n * @returns The element or a loading spinner in a container matching the element's size\n */\nexport const CircularSuspense = withStyles(styles)(({\n\telement,\n\tshowElement,\n\tclasses,\n}: CircularSuspenseProps & WithStyles<CssRules>) => {\n\tconst elementRef = useRef<HTMLDivElement>(null)\n\tconst [dimensions, setDimensions] = useState<{ width: number; height: number } | null>(null)\n\n\tuseLayoutEffect(() => {\n\t\tif (elementRef.current && !dimensions) {\n\t\t\tconst rect = elementRef.current.getBoundingClientRect()\n\t\t\tsetDimensions({\n\t\t\t\twidth: rect.width,\n\t\t\t\theight: rect.height,\n\t\t\t})\n\t\t}\n\t}, [showElement, element, dimensions])\n\n\tif (showElement) {\n\t\treturn <div ref={elementRef}>{element}</div>\n\t}\n\n\tif (!dimensions) {\n\t\treturn (\n\t\t\t<div ref={elementRef} style={{ visibility: 'hidden', position: 'absolute' }}>\n\t\t\t\t{element}\n\t\t\t</div>\n\t\t)\n\t}\n\n\tconst maxSize = Math.min(dimensions.width, dimensions.height) * 0.8\n\n\treturn (\n\t\t<div\n\t\t\tclassName={classes.container}\n\t\t\tstyle={{\n\t\t\t\twidth: dimensions.width,\n\t\t\t\theight: dimensions.height,\n\t\t\t}}\n\t\t>\n\t\t\t<CircularProgress size={maxSize} />\n\t\t</div>\n\t)\n})\n"
  },
  {
    "path": "services/workbench2/src/components/loading/inline-pulser.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { ThreeDots } from './three-dots';\nimport withTheme from '@mui/styles/withTheme';\nimport { ArvadosTheme } from 'common/custom-theme';\n\ntype ThemeProps = {\n    theme: ArvadosTheme;\n};\n\ntype Props = {\n    color?: string;\n    height?: number;\n    width?: number;\n    radius?: number;\n};\n\nexport const InlinePulser = withTheme((props: Props & ThemeProps) => (\n    <ThreeDots\n        visible={true}\n        height={props.height || \"30\"}\n        width={props.width || \"30\"}\n        color={props.color || props.theme.customs.colors.greyL}\n        radius={props.radius || \"10\"}\n        ariaLabel=\"three-dots-loading\"\n    />\n));\n"
  },
  {
    "path": "services/workbench2/src/components/loading/three-dots.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// The MIT License (MIT)\n\n/*\n * This file includes code from react-loader-spinner, which is licensed under the MIT License.\n * Copyright (c) 2018 Mohan Pd.\n * https://github.com/mhnpd/react-loader-spinner\n * See the LICENSE file for more details.\n */\n\n\nimport React, { FunctionComponent } from 'react';\nimport { styled } from 'styled-components';\n\nconst DEFAULT_COLOR = '#4fa94d';\n\nconst DEFAULT_WAI_ARIA_ATTRIBUTE = {\n    'aria-busy': true,\n    role: 'progressbar',\n};\n\ntype Style = {\n    [key: string]: string;\n};\n\ninterface PrimaryProps {\n    height?: string | number;\n    width?: string | number;\n    ariaLabel?: string;\n    wrapperStyle?: Style;\n    wrapperClass?: string;\n    visible?: boolean;\n}\n\ninterface BaseProps extends PrimaryProps {\n    color?: string;\n}\n\n//  changed from div to span to fix DOM nesting error\nconst SvgWrapper = styled.span<{ $visible: boolean }>`\n    display: ${(props) => (props.$visible ? 'flex' : 'none')};\n`;\n\ninterface ThreeDotsProps extends BaseProps {\n    radius?: string | number;\n}\n\nexport const ThreeDots: FunctionComponent<ThreeDotsProps> = ({\n    height = 80,\n    width = 80,\n    radius = 9,\n    color = DEFAULT_COLOR,\n    ariaLabel = 'three-dots-loading',\n    wrapperStyle,\n    wrapperClass,\n    visible = true,\n}) => (\n    <SvgWrapper\n        style={wrapperStyle}\n        $visible={visible}\n        className={wrapperClass}\n        data-testid='three-dots-loading'\n        aria-label={ariaLabel}\n        {...DEFAULT_WAI_ARIA_ATTRIBUTE}\n    >\n        <svg\n            width={width}\n            height={height}\n            viewBox='0 0 120 30'\n            xmlns={'http://www.w3.org/2000/svg'}\n            fill={color}\n            data-testid='three-dots-svg'\n        >\n            <circle\n                cx='15'\n                cy='15'\n                r={Number(radius) + 6}\n            >\n                <animate\n                    attributeName='r'\n                    from='15'\n                    to='15'\n                    begin='0s'\n                    dur='0.8s'\n                    values='15;9;15'\n                    calcMode='linear'\n                    repeatCount='indefinite'\n                />\n                <animate\n                    attributeName='fill-opacity'\n                    from='1'\n                    to='1'\n                    begin='0s'\n                    dur='0.8s'\n                    values='1;.5;1'\n                    calcMode='linear'\n                    repeatCount='indefinite'\n                />\n            </circle>\n            <circle\n                cx='60'\n                cy='15'\n                r={radius}\n                attributeName='fill-opacity'\n                from='1'\n                to='0.3'\n            >\n                <animate\n                    attributeName='r'\n                    from='9'\n                    to='9'\n                    begin='0s'\n                    dur='0.8s'\n                    values='9;15;9'\n                    calcMode='linear'\n                    repeatCount='indefinite'\n                />\n                <animate\n                    attributeName='fill-opacity'\n                    from='0.5'\n                    to='0.5'\n                    begin='0s'\n                    dur='0.8s'\n                    values='.5;1;.5'\n                    calcMode='linear'\n                    repeatCount='indefinite'\n                />\n            </circle>\n            <circle\n                cx='105'\n                cy='15'\n                r={Number(radius) + 6}\n            >\n                <animate\n                    attributeName='r'\n                    from='15'\n                    to='15'\n                    begin='0s'\n                    dur='0.8s'\n                    values='15;9;15'\n                    calcMode='linear'\n                    repeatCount='indefinite'\n                />\n                <animate\n                    attributeName='fill-opacity'\n                    from='1'\n                    to='1'\n                    begin='0s'\n                    dur='0.8s'\n                    values='1;.5;1'\n                    calcMode='linear'\n                    repeatCount='indefinite'\n                />\n            </circle>\n        </svg>\n    </SvgWrapper>\n);\n\ninterface ThreeDotsSuspenseProps {\n    el: React.ReactNode;\n    isLoaded: boolean;\n}\n\nexport const ThreeDotsSuspense: React.FC<ThreeDotsSuspenseProps> = ({ el, isLoaded }) => {\n    return isLoaded\n        ? <>{el}</>\n        : <ThreeDots height={10} width={60} color=\"#999\" />;\n};"
  },
  {
    "path": "services/workbench2/src/components/loading-indicator/loading-indicator.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport Skeleton, { SkeletonProps } from 'react-loading-skeleton';\n\nexport const LoadingIndicator = (props: SkeletonProps) => (<Skeleton duration={1} {...props} />);\n"
  },
  {
    "path": "services/workbench2/src/components/multi-panel-view/multi-panel-view.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { MPVContainer } from './multi-panel-view';\nimport { ThemeProvider } from \"@mui/material\";\nimport { CustomTheme } from \"common/custom-theme\";\nimport { Provider } from \"react-redux\";\nimport { combineReducers, createStore } from \"redux\";\n\nconst PanelMock = ({panelName, panelRef, children, ...rest}) =>\n    <div {...rest}>{children}</div>;\n\ndescribe('<MPVContainer />', () => {\n    let props;\n    let store;\n\n    beforeEach(() => {\n        props = {\n            classes: {},\n        };\n        const initialRouterState = { location: null };\n        store = createStore(combineReducers({\n            router: (state = initialRouterState, action) => state,\n        }));\n    });\n\n    it('should show default panel buttons for every child', () => {\n        const childs = [\n            <PanelMock key={1}>This is one panel</PanelMock>,\n            <PanelMock key={2}>This is another panel</PanelMock>,\n        ];\n        cy.mount(\n            <Provider store={store}>\n                <ThemeProvider theme={CustomTheme}>\n                    <MPVContainer {...props}>{[...childs]}</MPVContainer>\n                </ThemeProvider>\n            </Provider>\n        );\n        //check if the buttons are rendered\n        cy.get('button').should('have.length', 2);\n        cy.get('button').eq(0).should('contain', 'Panel 1');\n        cy.get('button').eq(1).should('contain', 'Panel 2');\n        //check if the panels are rendered\n        cy.contains('This is one panel');\n        cy.contains('This is another panel').should('not.exist');\n    });\n\n    it('should show panel when clicking on its button', () => {\n        const childs = [\n            <PanelMock key={1}>This is one panel</PanelMock>,\n            <PanelMock key={2}>This is another panel</PanelMock>,\n        ];\n\n        cy.mount(\n            <Provider store={store}>\n                <ThemeProvider theme={CustomTheme}>\n                    <MPVContainer {...props}>{[...childs]}</MPVContainer>\n                </ThemeProvider>\n            </Provider>\n        );\n\n        // Initial state: panel 2 not visible\n        cy.contains('This is one panel');\n        cy.contains('This is another panel').should('not.exist');\n\n        // Panel visible when clicking on its button\n        cy.get('button').contains('Panel 2').click();\n        cy.contains('This is one panel').should('not.exist');\n        cy.contains('This is another panel');\n    });\n\n    it('should show custom panel buttons when config provided', () => {\n        const childs = [\n            <PanelMock key={1}>This is one panel</PanelMock>,\n            <PanelMock key={2}>This is another panel</PanelMock>,\n        ];\n        props.panelStates = [\n            {name: 'First Panel'},\n        ]\n        cy.mount(\n            <Provider store={store}>\n                <ThemeProvider theme={CustomTheme}>\n                    <MPVContainer {...props}>{[...childs]}</MPVContainer>\n                </ThemeProvider>\n            </Provider>\n        );\n        // First panel received the custom button naming\n        cy.get('button').eq(0).should('contain', 'First Panel');\n        cy.contains('This is one panel');\n\n        // Second panel received the default button naming and hidden status by default\n        cy.get('button').eq(1).should('contain', 'Panel 2');\n        cy.contains('This is another panel').should('not.exist');\n        cy.get('button').eq(1).click();\n        cy.contains('This is another panel');\n    });\n\n    it('should set initial panel visibility according to panelStates prop', () => {\n        const childs = [\n            <PanelMock key={1}>This is one panel</PanelMock>,\n            <PanelMock key={2}>This is another panel</PanelMock>,\n        ];\n        props.panelStates = [\n            {name: 'First Panel'},\n            {name: 'Second Panel', visible: true},\n        ]\n        cy.mount(\n            <Provider store={store}>\n                <ThemeProvider theme={CustomTheme}>\n                    <MPVContainer {...props}>{[...childs]}</MPVContainer>\n                </ThemeProvider>\n            </Provider>\n        );\n        // Initial state: panel 2 not visible\n        cy.contains('This is one panel').should('not.exist');\n        cy.contains('This is another panel');\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/components/multi-panel-view/multi-panel-view.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { MutableRefObject, ReactElement, ReactNode, useEffect, useRef, useState } from 'react';\nimport { connect } from 'react-redux';\nimport { RouterState } from \"connected-react-router\";\nimport { RootState } from 'store/store';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, Paper, Tabs, Tab } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { GridProps } from '@mui/material/Grid';\nimport { isArray, isEqual } from 'lodash';\nimport classNames from 'classnames';\n\ntype CssRules =\n    | 'exclusiveGridContainerRoot'\n    | 'symmetricTabs'\n    | 'gridItemRoot'\n    | 'paperRoot'\n    | 'exclusiveContentPaper'\n    | 'exclusiveContent'\n    | 'tab'\n    | 'selectedTab';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    exclusiveGridContainerRoot: {\n        marginTop: 0,\n    },\n    symmetricTabs: {\n        \"& button\": {\n            flexBasis: \"0\",\n        },\n    },\n    gridItemRoot: {\n        paddingTop: '0 !important',\n        width: '100%',\n    },\n    paperRoot: {\n        height: '100%',\n        width: '100%',\n        display: 'flex',\n        flexDirection: 'column',\n    },\n    exclusiveContent: {\n        overflow: 'auto',\n        margin: 0,\n    },\n    exclusiveContentPaper: {\n        boxShadow: 'none',\n    },\n    tab: {\n        flexGrow: 1,\n        flexShrink: 1,\n        maxWidth: 'initial',\n        minWidth: 'fit-content',\n        padding: '0 5px',\n        borderBottom: `1px solid ${theme.palette.grey[300]}`,\n    },\n    selectedTab: {\n    },\n});\n\ninterface MPVHideablePanelDataProps {\n    name: string;\n    visible: boolean;\n    children: ReactNode;\n    panelRef?: MutableRefObject<any>;\n    paperClassName?: string;\n}\n\nconst MPVHideablePanel = ({ name, visible, paperClassName, ...props }: MPVHideablePanelDataProps) =>\n    visible\n        ? <>\n            {React.cloneElement((props.children as ReactElement), {\n                panelName: name,\n                panelRef: props.panelRef,\n                paperClassName,\n            })}\n        </>\n        : null;\n\ninterface MPVPanelDataProps {\n    panelName?: string;\n    panelRef?: MutableRefObject<any>;\n    forwardProps?: boolean;\n    maxHeight?: string;\n    minHeight?: string;\n    paperClassName?: string;\n}\n\n// Props received by panel implementors\nexport type MPVPanelProps = MPVPanelDataProps;\n\ntype MPVPanelContentProps = { children: ReactElement } & MPVPanelProps & GridProps;\n\n// Grid item compatible component for layout and MPV props passing\nexport const MPVPanelContent = React.memo(({ panelName,\n    panelRef, forwardProps, maxHeight, minHeight, paperClassName,\n    ...props }: MPVPanelContentProps) => {\n\n    return <Grid item style={{ maxHeight: maxHeight, minHeight, padding: '4px' }} {...props}>\n        <span ref={panelRef} /> {/* Element to scroll to when the panel is selected */}\n        <Paper style={{ height: '100%' }} elevation={0}>\n            {forwardProps\n                ? React.cloneElement(props.children, { panelName, paperClassName })\n                : React.cloneElement(props.children)}\n        </Paper>\n    </Grid>;\n}, preventRerender);\n\n// return true to prevent re-render, false to allow re-render\nfunction preventRerender(prevProps: MPVPanelContentProps, nextProps: MPVPanelContentProps) {\n    if (!isEqual(prevProps.children, nextProps.children)) {\n        return false;\n    }\n    return true;\n}\n\nexport interface MPVPanelState {\n    name: string;\n    visible?: boolean;\n}\ninterface MPVContainerDataProps {\n    panelStates?: MPVPanelState[];\n    router: RouterState;\n}\ntype MPVContainerProps = MPVContainerDataProps & GridProps;\n\nconst mapStateToProps = (state: RootState): Pick<MPVContainerDataProps, 'router'> => ({\n    router: state.router,\n});\n\n// Grid container compatible component that also handles panel toggling.\nconst MPVContainerComponent = ({ children, panelStates, classes, router, ...props }: MPVContainerProps & WithStyles<CssRules>) => {\n    if (children === undefined || children === null || Object.keys(children).length === 0) {\n        children = [];\n    } else if (!isArray(children)) {\n        children = [children];\n    } else {\n        children = children.filter(child => child !== null);\n    }\n\n    const [initialVisibility, setInitialVisibility] = useState<boolean[]>(getInitialVisibility(panelStates, children as []));\n\n    useEffect(() => {\n        setInitialVisibility(getInitialVisibility(panelStates, children as []));\n    }, [(children as []).length]);\n\n    const [panelVisibility, setPanelVisibility] = useState<boolean[]>(initialVisibility);\n    const currentSelectedPanel = panelVisibility.findIndex(Boolean);\n    const [selectedPanel, setSelectedPanel] = useState<number>(-1);\n    const panelRef = useRef<any>(null);\n\n    // Reset MPV to initial state when route changes\n    const currentRoute = router.location ? router.location.pathname : \"\";\n    useEffect(() => {\n        setPanelVisibility(initialVisibility);\n        setSelectedPanel(initialVisibility.indexOf(true));\n    }, [currentRoute, initialVisibility]);\n\n    let panels: JSX.Element[] = [];\n    let tabs: JSX.Element[] = [];\n    let tabBar: JSX.Element = <></>;\n\n    if (isArray(children)) {\n        const showFn = (idx: number) => () => {\n            // Hide all other panels\n            setPanelVisibility(Array.from({ length: (children as []).length }, (_, index) => index === idx));\n            setSelectedPanel(idx);\n        };\n\n        for (let idx = 0; idx < children.length; idx++) {\n            const panelName = panelStates === undefined\n                ? `Panel ${idx + 1}`\n                : (panelStates[idx] && panelStates[idx].name) || `Panel ${idx + 1}`;\n\n            tabs = [\n                ...tabs,\n                <>{panelName}</>\n            ];\n\n            const aPanel =\n                <MPVHideablePanel\n                    key={idx}\n                    visible={panelVisibility[idx]}\n                    name={panelName}\n                    paperClassName={classes.exclusiveContentPaper}\n                    panelRef={(idx === selectedPanel) ? panelRef : undefined}\n                    >\n                    {children[idx]}\n                </MPVHideablePanel>;\n            panels = [...panels, aPanel];\n        };\n\n        tabBar = (\n            <Tabs className={classes.symmetricTabs} value={currentSelectedPanel} onChange={(e, val) => showFn(val)()} data-cy={\"mpv-tabs\"}>\n                {tabs.map((tgl, idx) => <Tab className={classNames(classes.tab, idx === selectedPanel ? classes.selectedTab : '')} key={idx} label={tgl} />)}\n            </Tabs>);\n    };\n\n    const content = <Grid container direction=\"column\" item {...props} xs className={classes.exclusiveContent}>\n                        {panelVisibility.includes(true) && panels}\n                    </Grid>;\n\n        return (\n            <Grid container {...props} className={classNames(classes.exclusiveGridContainerRoot, props.className)}>\n                <Grid item {...props} className={classes.gridItemRoot}>\n                    <Paper className={classes.paperRoot}>\n                        {tabBar}\n                        {content}\n                    </Paper>\n                </Grid>\n            </Grid>);\n};\n\nconst getInitialVisibility = (panelStates: MPVPanelState[] | undefined, children: ReactNode[]) => {\n    if (panelStates && panelStates.some(state => state.visible)) {\n        return panelStates.map((panelState) => panelState.visible || false);\n    }\n    // if panelStates wasn't passed or passed with none selected, default to first panel visible\n    return new Array(children.length).fill(false).map((_, idx) => idx === 0);\n}\n\nexport const MPVContainer = connect(mapStateToProps)(withStyles(styles)(MPVContainerComponent));\n"
  },
  {
    "path": "services/workbench2/src/components/multiselect-toolbar/MultiselectToolbar.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useMemo } from \"react\";\nimport { connect } from \"react-redux\";\nimport { CustomStyleRulesCallback, ArvadosTheme } from 'common/custom-theme';\nimport { Toolbar, IconButton } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { RootState } from \"store/store\";\nimport { Dispatch } from \"redux\";\nimport { TCheckedList } from \"components/data-table/data-table\";\nimport { getResource, ResourcesState } from \"store/resources/resources\";\nimport { ContextMenuAction, ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { usesDetailsCard, selectedToArray, selectedToKindSet } from \"./MultiselectToolbar.utils\";\nimport { AuthState } from \"store/auth/auth-reducer\";\nimport { IntersectionObserverWrapper } from \"./ms-toolbar-overflow-wrapper\";\nimport classNames from \"classnames\";\nimport { sortMenuItems, menuDirection } from 'views-components/context-menu/menu-item-sort';\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { resourceToMenuKind } from \"common/resource-to-menu-kind\";\nimport { getMenuActionSetByKind } from \"common/menu-action-set-actions\";\nimport { intersection } from \"lodash\";\nimport { matchTrashRoute } from \"routes/routes\";\nimport { toggleTrashAction } from \"views-components/context-menu/action-sets/project-action-set\";\n\ntype CssRules = \"root\" | \"iconContainer\" | \"icon\" | \"divider\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        display: \"flex\",\n        flexDirection: \"row\",\n        height: '2.5rem',\n        width: 0,\n        padding: \"6px 0 0 0\",\n        margin: 0,\n        overflow: 'hidden',\n    },\n    iconContainer: {\n        height: '100%',\n    },\n    icon: {\n        marginLeft: '-5px',\n    },\n    divider: {\n        marginTop: '5px',\n        width: '2rem',\n        display: \"flex\",\n        alignItems: \"center\",\n        justifyContent: \"center\",\n    },\n});\n\nexport type MultiselectToolbarDataProps = {\n    checkedList: TCheckedList;\n    selectedResourceUuid: string | null;\n    resources: ResourcesState;\n    disabledButtons: Set<string>\n    auth: AuthState;\n    pathName: string;\n};\n\ntype MultiselectToolbarActionProps = {\n    getAllMenukinds: (checkedList: TCheckedList) => ContextMenuKind[];\n    executeComponent: (fn: (dispatch: Dispatch, res: any[]) => void, resources: any[]) => void;\n    executeMulti: (action: ContextMenuAction, checkedList: TCheckedList, resources: ResourcesState) => void;\n    resourceToMenukind: (uuid: string) => ContextMenuKind | undefined;\n};\n\ntype MultiselectToolbarRecievedProps = {\n    forceMultiSelectMode?: boolean;\n    toolbarClass?: string;\n}\n\ntype MultiselectToolbarProps = MultiselectToolbarDataProps & MultiselectToolbarActionProps & MultiselectToolbarRecievedProps & WithStyles<CssRules>;\n\nexport const MultiselectToolbar = connect(\n    mapStateToProps,\n    mapDispatchToProps\n)(\n    withStyles(styles)(React.memo((props: MultiselectToolbarProps) => {\n        const { classes, checkedList, resources, pathName, forceMultiSelectMode, toolbarClass: injectedStyles } = props;\n        const selectedResourceArray = selectedToArray(checkedList);\n        const selectedResourceUuid = usesDetailsCard(pathName) && selectedResourceArray.length < 2 ? props.selectedResourceUuid : selectedResourceArray.length === 1 ? selectedResourceArray[0] : null;\n        const singleResourceKind = selectedResourceUuid && !forceMultiSelectMode ? [props.resourceToMenukind(selectedResourceUuid)] : null\n        const currentResourceKinds = singleResourceKind && !!singleResourceKind[0] ? singleResourceKind : props.getAllMenukinds(checkedList);\n        const currentPathIsTrash = !!matchTrashRoute(pathName || \"\");\n\n        const rawActions =\n            currentPathIsTrash && selectedToKindSet(checkedList).size\n                ? [toggleTrashAction]\n                : selectActionsByKind(currentResourceKinds as ContextMenuKind[]).filter((action) =>\n                        selectedResourceUuid === null ? action.isForMulti : true\n                    );\n\n        const actions: ContextMenuAction[] = sortMenuItems(\n            singleResourceKind && singleResourceKind.length ? (singleResourceKind[0] as ContextMenuKind) : ContextMenuKind.MULTI,\n            rawActions,\n            menuDirection.HORIZONTAL\n        );\n\n        // eslint-disable-next-line\n        const memoizedActions = useMemo(() => actions, [currentResourceKinds, currentPathIsTrash, selectedResourceUuid]);\n\n        const targetResources = selectedResourceUuid ? {[selectedResourceUuid]: true} as TCheckedList : checkedList\n\n        const fetchedResources = selectedToArray(targetResources).map(uuid => resources[uuid]);\n\n        return (\n            <React.Fragment>\n                <Toolbar\n                    className={classNames(classes.root, injectedStyles)}\n                    style={{ width: `${(memoizedActions.length * 2.5) + 2}rem`, height: '2.5rem'}}\n                    data-cy='multiselect-toolbar'\n                    >\n                    {memoizedActions.length ? (\n                        <IntersectionObserverWrapper\n                            menuLength={memoizedActions.length}\n                            key={actions.map(a => a.name).join(',')}\n                            >\n                            {memoizedActions.map((action, i) =>{\n                                const { name } = action;\n                            return action.name === ContextMenuActionNames.DIVIDER && action.component ? (\n                                <div\n                                    className={classes.divider}\n                                    data-targetid={`${name}${i}`}\n                                    key={`${name}${i}`}\n                                >\n                                    {<action.component />}\n                                </div>\n                            ) : action.component ? (\n                                <span className={classes.iconContainer} key={`${name}${i}`} data-targetid={name}>\n                                    <action.component isInToolbar={true} onClick={()=>props.executeComponent(action.execute, fetchedResources)} />\n                                </span>\n                            ) : (\n                                //data-targetid is used to determine what goes to the overflow menu\n                                //data-title is used to display the tooltip text\n                                <span className={classes.iconContainer} key={`${name}${i}`} data-targetid={name} data-title={name}>\n                                    <IconButton\n                                        data-cy='multiselect-button'\n                                        onClick={() => {\n                                            props.executeMulti(action, targetResources, resources)}}\n                                        className={classes.icon}\n                                        size=\"large\">\n                                        {action.icon ? action.icon({}) : <span></span>}\n                                    </IconButton>\n                                </span>\n                            );\n                            })}\n                        </IntersectionObserverWrapper>\n                    ) : (\n                        <span></span>\n                    )}\n                </Toolbar>\n            </React.Fragment>\n        );\n    }, preventRerender))\n);\n\n// return true to skip re-render, false to force re-render\nfunction preventRerender(prevProps: MultiselectToolbarProps, nextProps: MultiselectToolbarProps) {\n    if (prevProps.selectedResourceUuid !== nextProps.selectedResourceUuid) {\n        return false;\n    }\n    if (prevProps.disabledButtons !== nextProps.disabledButtons) {\n        return false;\n    }\n    return true;\n}\n\nfunction selectActionsByKind(currentResourceKinds: ContextMenuKind[]): ContextMenuAction[] {\n    if (currentResourceKinds.length === 0) return [];\n    const allMenuActionSets = currentResourceKinds.map(kind => getMenuActionSetByKind(kind)).map(actionSetArray => actionSetArray[0]);\n    //if only one selected, return all actions\n    if (currentResourceKinds.length === 1) return allMenuActionSets[0];\n    const actionNames = allMenuActionSets.map(actionSet => actionSet.map(action => action.name));\n    const commonNames = new Set(intersection(...actionNames));\n    const commonActions = allMenuActionSets\n                            .reduce((prev, next) => prev.concat(next), [])\n                            .filter(action => commonNames.has(action.name) && action.isForMulti);\n\n    return Array.from(new Set(commonActions));\n}\n\nfunction findActionByName(name: string, actionSet: ContextMenuAction[][]) {\n    return actionSet[0].find(action => action.name === name);\n}\n\n//--------------------------------------------------//\n\nfunction mapStateToProps({auth, multiselect, resources, selectedResource}: RootState): MultiselectToolbarDataProps {\n    return {\n        checkedList: multiselect.checkedList as TCheckedList,\n        disabledButtons: new Set<string>(multiselect.disabledButtons),\n        auth,\n        selectedResourceUuid: selectedResource.selectedResourceUuid,\n        pathName: window.location.pathname,\n        resources,\n    }\n}\n\nfunction mapDispatchToProps(dispatch: Dispatch): MultiselectToolbarActionProps {\n    return {\n        getAllMenukinds: (checkedList: TCheckedList) => selectedToArray(checkedList).map(uuid => dispatch<any>(resourceToMenuKind(uuid))).filter(kind => !!kind),\n        resourceToMenukind: (uuid: string)=> {\n            const kind = dispatch<any>(resourceToMenuKind(uuid))\n            return kind;\n        },\n        executeComponent: (fn: (dispatch: Dispatch, res: any[]) => void, resources: any[]) => fn(dispatch, resources),\n        executeMulti: (selectedAction: ContextMenuAction, checkedList: TCheckedList, resources: ResourcesState): void => {\n            const selectedResources = selectedToArray(checkedList).map(uuid => getResource(uuid)(resources)).filter(resource => !!resource);\n            const allMenuKinds: ContextMenuKind[] = selectedToArray(checkedList).map(uuid => dispatch<any>(resourceToMenuKind(uuid))).filter(kind => !!kind);\n            const groupedActionSets = allMenuKinds.reduce((result, menuKind: ContextMenuKind): Record<string, ContextMenuAction[]> => {\n                    if (!result[menuKind]) {result[menuKind] = []};\n                    const action = findActionByName(selectedAction.name, getMenuActionSetByKind(menuKind));\n                    if (action) result[menuKind].push(action);\n                    return result;\n                }, {});\n            selectedResources.forEach(resource => {\n                if (!resource) return;\n                const corrsepondingActionSet = groupedActionSets[dispatch<any>(resourceToMenuKind(resource.uuid))!];\n                if (!corrsepondingActionSet) return;\n                corrsepondingActionSet.forEach(action => action.execute(dispatch, [resource]));\n            })\n        },\n    };\n}\n"
  },
  {
    "path": "services/workbench2/src/components/multiselect-toolbar/MultiselectToolbar.utils.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { TCheckedList } from \"components/data-table/data-table\";\nimport { extractUuidKind } from \"models/resource\";\nimport { isUserGroup } from \"models/group\";\nimport { getResource, ResourcesState } from \"store/resources/resources\";\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\n\nconst detailsCardPaths = [\n    '/projects',\n    '/workflows',\n    '/collections',\n    '/processes',\n];\n\nexport const usesDetailsCard = (location: string): boolean => {\n    return detailsCardPaths.some(path => location.includes(path));\n};\n\nexport function selectedToArray(checkedList: TCheckedList): Array<string> {\n    const arrayifiedSelectedList: Array<string> = [];\n    for (const [key, value] of Object.entries(checkedList)) {\n        if (value === true) {\n            arrayifiedSelectedList.push(key);\n        }\n    }\n    return arrayifiedSelectedList;\n}\n\nexport function selectedToKindSet(checkedList: TCheckedList, resources: ResourcesState = {}): Set<string> {\n    const setifiedList = new Set<string>();\n    for (const [key, value] of Object.entries(checkedList)) {\n        if (value === true) {\n            isRoleGroupResource(key, resources) ? setifiedList.add(ContextMenuKind.GROUPS) : setifiedList.add(extractUuidKind(key) as string);\n        }\n    }\n    return setifiedList;\n}\n\nexport const isRoleGroupResource = (uuid: string, resources: ResourcesState): boolean => {\n    const resource = getResource(uuid)(resources);\n    return isUserGroup(resource);\n};\n"
  },
  {
    "path": "services/workbench2/src/components/multiselect-toolbar/ms-toolbar-overflow-menu.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useState, useMemo, ReactElement, JSXElementConstructor } from 'react';\nimport { MoreVert } from '@mui/icons-material';\nimport classnames from 'classnames';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { IconButton, Menu, MenuItem, Tooltip } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\n\ntype CssRules = 'inOverflowMenu' | 'openMenuButton' | 'menu' | 'menuItem' | 'menuElement';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    inOverflowMenu: {\n        '&:hover': {\n            backgroundColor: 'transparent',\n        },\n    },\n    openMenuButton: {\n        right: '10px',\n    },\n    menu: {\n        marginLeft: 0,\n    },\n    menuItem: {\n        '&:hover': {\n            backgroundColor: 'white',\n        },\n        marginTop: 0,\n        paddingTop: 0,\n        paddingLeft: '0.5rem',\n        height: '2.5rem',\n        display: 'flex',\n        flexDirection: 'column',\n        alignItems: 'center',\n    },\n    menuElement: {\n        width: '2rem',\n    }\n});\n\nexport type OverflowChild = ReactElement<{ className: string; }, string | JSXElementConstructor<any>>\n\ntype OverflowMenuProps = {\n    children: OverflowChild[]\n    className: string\n    visibilityMap: {}\n}\n\nexport const OverflowMenu = withStyles(styles)((props: OverflowMenuProps & WithStyles<CssRules>) => {\n    const { children, className, visibilityMap, classes } = props;\n    const [anchorEl, setAnchorEl] = useState(null);\n    const open = Boolean(anchorEl);\n    const handleClick = (event) => {\n        setAnchorEl(event.currentTarget);\n    };\n\n    const handleClose = () => {\n        setAnchorEl(null);\n    };\n\n    const shouldShowMenu = useMemo(() => Object.values(visibilityMap).some((v) => v === false), [visibilityMap]);\n    if (!shouldShowMenu) {\n        return null;\n    }\n    return (\n        <div className={className}>\n            <Tooltip title=\"More options\" disableFocusListener>\n                <IconButton\n                    aria-label='more'\n                    aria-controls='long-menu'\n                    aria-haspopup='true'\n                    onClick={handleClick}\n                    className={classes.openMenuButton}\n                    data-cy='overflow-menu-button'\n                    size=\"large\">\n                    <MoreVert />\n                </IconButton>\n            </Tooltip>\n            <Menu\n                id='long-menu'\n                anchorEl={anchorEl}\n                keepMounted\n                open={open}\n                onClose={handleClose}\n                disableAutoFocusItem\n                className={classes.menu}\n                data-cy='overflow-menu'\n            >\n                {React.Children.map(children, (child: any) => {\n                    if (!visibilityMap[child.props['data-targetid']]) {\n                        return (\n                            <Tooltip\n                                title={child.props['data-title']}\n                                key={child.props['data-targetid']}\n                                disableFocusListener\n                                placement='left-start'\n                            >\n                                <MenuItem\n                                    key={child}\n                                    onClick={handleClose}\n                                    className={classes.menuItem}\n                                    >\n                                    {React.cloneElement(child, {\n                                        className: classnames(classes.menuElement),\n                                    })}\n                                </MenuItem>\n                        </Tooltip>)\n                    }\n                    return null;\n                })}\n            </Menu>\n        </div>\n    );\n});\n"
  },
  {
    "path": "services/workbench2/src/components/multiselect-toolbar/ms-toolbar-overflow-wrapper.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useState, useRef, useEffect } from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport classnames from 'classnames';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { OverflowMenu, OverflowChild } from './ms-toolbar-overflow-menu';\nimport { Tooltip } from '@mui/material';\n\ntype CssRules = 'visible' | 'inVisible' | 'tooltip' | 'toolbarWrapper' | 'overflowStyle';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    visible: {\n        order: 0,\n        visibility: 'visible',\n        opacity: 1,\n    },\n    inVisible: {\n        order: 100,\n        visibility: 'hidden',\n        pointerEvents: 'none',\n    },\n    toolbarWrapper: {\n        display: 'flex',\n        overflow: 'hidden',\n        padding: '0 0px 5px 20px',\n        width: '100%',\n    },\n    tooltip: {\n        width: \"2.5rem\",\n        height: \"2.5rem \",\n        paddingLeft: 0,\n        zIndex: 1000,\n        border: \"1px solid transparent\",\n    },\n    overflowStyle: {\n        order: 99,\n        position: 'sticky',\n        right: '-2rem',\n        width: 0,\n        height: '1rem',\n    },\n});\n\ntype WrapperProps = {\n    children: OverflowChild[];\n    menuLength: number;\n};\n\nexport const IntersectionObserverWrapper = withStyles(styles)((props: WrapperProps & WithStyles<CssRules>) => {\n    const { classes, children, menuLength } = props;\n    const lastEntryId = (children[menuLength - 1] as any).props['data-targetid'];\n    const navRef = useRef<any>(null);\n    const [visibilityMap, setVisibilityMap] = useState<Record<string, boolean>>({});\n    const [numHidden, setNumHidden] = useState(() => findNumHidden(visibilityMap));\n    const prevNumHidden = useRef(numHidden);\n\n    const handleIntersection = (entries) => {\n        const updatedEntries: Record<string, boolean> = {};\n        entries.forEach((entry) => {\n            const targetid = entry.target.dataset.targetid as string;\n            if (entry.isIntersecting) {\n                updatedEntries[targetid] = true;\n            } else {\n                updatedEntries[targetid] = false;\n            }\n        });\n\n        setVisibilityMap((prev) => ({\n            ...prev,\n            ...updatedEntries,\n            [lastEntryId]: Object.keys(updatedEntries)[0] === lastEntryId,\n        }));\n    };\n\n    //ensures that the last element is always visible if the second to last is visible\n    useEffect(() => {\n        if ((prevNumHidden.current > 1 || prevNumHidden.current === 0) && numHidden === 1) {\n            setVisibilityMap((prev) => ({\n                ...prev,\n                [lastEntryId]: true,\n            }));\n        }\n        prevNumHidden.current = numHidden;\n    }, [numHidden, lastEntryId]);\n\n    useEffect(() => {\n        setNumHidden(findNumHidden(visibilityMap));\n    }, [visibilityMap]);\n\n    useEffect((): any => {\n        setVisibilityMap({});\n        const observer = new IntersectionObserver(handleIntersection, {\n            root: navRef.current,\n            rootMargin: '0px -30px 0px 0px',\n            threshold: 1,\n        });\n        // We are adding observers to child elements of the container div\n        // with ref as navRef. Notice that we are adding observers\n        // only if we have the data attribute targetid on the child element\n        if (navRef.current)\n            Array.from(navRef.current.children).forEach((item: any) => {\n                if (item.dataset.targetid) {\n                    observer.observe(item);\n                }\n            });\n        return () => {\n            observer.disconnect();\n        };\n        // eslint-disable-next-line\n    }, [menuLength, navRef]);\n\n    function findNumHidden(visMap: {}) {\n        return Object.values(visMap).filter((x) => x === false).length;\n    }\n\n    return (\n        <div\n            className={classes.toolbarWrapper}\n            ref={navRef}\n        >\n            {React.Children.map(children, (child) => {\n                const isVisible = !!visibilityMap[child.props['data-targetid']];\n                return (\n                    <Tooltip\n                        className={classes.tooltip}\n                        title={child.props['data-title']}\n                        key={child.props['data-targetid']}\n                        disableFocusListener\n                        >\n                            { React.cloneElement(child, {\n                            className: classnames(child.props.className, {\n                                [classes.visible]: isVisible,\n                                [classes.inVisible]: !isVisible,\n                            }),\n                        })}\n                    </Tooltip>)\n                })}\n            {numHidden >= 2 && (\n                <OverflowMenu\n                    visibilityMap={visibilityMap}\n                    className={classes.overflowStyle}\n                >\n                    {children.filter((child) => !child.props['data-targetid'].includes(\"Divider\"))}\n                </OverflowMenu>\n            )}\n        </div>\n    );\n});\n"
  },
  {
    "path": "services/workbench2/src/components/overview-panel/overview-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { Grid } from '@mui/material';\nimport { RootState } from 'store/store';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\nimport { getResource } from 'store/resources/resources';\nimport { getPropertyChip } from 'views-components/resource-properties-form/property-chip';\nimport { CollectionResource } from 'models/collection';\nimport { ProjectResource } from 'models/project';\nimport { WorkflowResource } from 'models/workflow';\nimport { ResourceKind } from 'models/resource';\nimport { Process, getProcess } from 'store/processes/process';\nimport { ContainerRequestResource } from 'models/container-request';\nimport { ContainerResource } from 'models/container';\nimport { ProcessRuntimeStatus } from 'views-components/process-runtime-status/process-runtime-status';\nimport { isUserResource } from 'models/user';\nimport { getRegisteredWorkflowPanelData } from 'views-components/details-panel/workflow-details';\nimport { AuthState } from 'store/auth/auth-reducer';\nimport { DataTableDefaultView } from 'components/data-table-default-view/data-table-default-view';\nimport { getPropertyChips } from 'views-components/property-chips/get-property-chips';\n\ntype CssRules = 'root' | 'tag';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        height: '100%',\n        display: 'flex',\n        flexDirection: 'column',\n        justifyContent: 'space-between',\n        padding: theme.spacing(1),\n    },\n    tag: {\n        marginRight: theme.spacing(0.5),\n        marginBottom: theme.spacing(0.5),\n    },\n});\n\ntype OverviewPanelProps = {\n    auth: AuthState;\n    resource: ProjectResource | CollectionResource | ContainerRequestResource | WorkflowResource | undefined;\n    process?: Process;\n    container?: ContainerResource;\n    detailsElement: React.ReactNode;\n    progressIndicator: string[];\n} & WithStyles<CssRules>;\n\nconst mapStateToProps = (state: RootState): Pick<OverviewPanelProps, 'auth' |'resource' | 'container' | 'progressIndicator'> => {\n    const resource = getResource<any>(state.properties.currentRouteUuid)(state.resources);\n    const process = getProcess(resource?.uuid)(state.resources) || undefined;\n    return {\n        auth: state.auth,\n        resource: resource?.containerRequest ? process : resource,\n        container: process?.container,\n        progressIndicator: state.progressIndicator\n    };\n};\n\nexport const OverviewPanel = connect(mapStateToProps)(withStyles(styles)((({ auth,resource, container, detailsElement, progressIndicator, classes }: OverviewPanelProps) => {\n    const working = progressIndicator.length > 0;\n    if (isUserResource(resource)) {\n        return null\n    };\n    if (!resource) {\n        if (!working) {\n            return <DataTableDefaultView />\n        };\n        return null;\n    }\n\n    return (\n        <section className={classes.root}>\n            <section>\n                {resource.kind === ResourceKind.CONTAINER_REQUEST && <Grid item xs={12}>\n                    <ProcessRuntimeStatus runtimeStatus={container?.runtimeStatus} containerCount={resource.containerCount} />\n                </Grid>}\n                <Grid item xs={12} md={12}>\n                    <section data-cy='details-element'>\n                        {detailsElement}\n                    </section>\n                </Grid>\n            </section>\n            <PropertiesElement auth={auth} resource={resource} classes={classes} />\n        </section>\n    );\n})));\n\nconst PropertiesElement = ({auth, resource, classes}: { auth: AuthState, resource: ProjectResource | CollectionResource | ContainerRequestResource | WorkflowResource | undefined, classes: any }) => {\n    if (!resource) {\n        return null;\n    }\n    if (resource.kind === ResourceKind.WORKFLOW) {\n        const wfData = getRegisteredWorkflowPanelData(resource, auth);\n        if (Object.keys(wfData.gitprops).length === 0) {\n            return null;\n        }\n        return <section data-cy='resource-properties'>\n            {Object.keys(wfData.gitprops).map(k =>\n                getPropertyChip(k, wfData.gitprops[k], undefined, classes.tag)\n            )}\n        </section>;\n    }\n    if (typeof resource.properties === 'object' && Object.keys(resource.properties).length > 0) {\n        return getPropertyChips(resource, classes);\n    }\n    return null;\n}\n"
  },
  {
    "path": "services/workbench2/src/components/popover/helpers.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { PopoverOrigin } from \"@mui/material/Popover\";\n\nexport const createAnchorAt = (position: {x: number, y: number}) => {\n    const el = document.createElement('div');\n    const clientRect: DOMRect = {\n        x: position.x,\n        y: position.y,\n        toJSON: () => '',\n        left: position.x,\n        right: position.x,\n        top: position.y,\n        bottom: position.y,\n        width: 0,\n        height: 0\n    };\n    el.getBoundingClientRect = () => clientRect;\n    return el;\n};\n\nexport const DefaultTransformOrigin: PopoverOrigin = {\n    vertical: \"top\",\n    horizontal: \"right\",\n};"
  },
  {
    "path": "services/workbench2/src/components/popover/popover.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Popover } from \"./popover\";\nimport Button from \"@mui/material/Button\";\n\ndescribe(\"<Popover />\", () => {\n    it(\"opens on default trigger click\", () => {\n        cy.mount(<Popover />);\n        cy.get('[data-cy=popover]').should('not.exist');\n        cy.get('button').click();\n        cy.get('[data-cy=popover]').should('exist');\n    });\n\n    it(\"renders custom trigger\", () => {\n        cy.mount(<Popover triggerComponent={CustomTrigger} />);\n        cy.get('button').should('have.text', 'Open popover');\n    });\n\n    it(\"opens on custom trigger click\", () => {\n        cy.mount(<Popover triggerComponent={CustomTrigger} />);\n        cy.get('[data-cy=popover]').should('not.exist');\n        cy.get('button').should('have.text', 'Open popover').click();\n        cy.get('[data-cy=popover]').should('exist');\n    });\n\n    it(\"renders children when opened\", () => {\n        cy.mount(\n            <Popover>\n                <CustomTrigger />\n            </Popover>\n        );\n        cy.get('button').click();\n        cy.get('button').contains('Open popover').should('have.length', 1);\n    });\n\n    it(\"does not close if closeOnContentClick is not set\", () => {\n        cy.mount(\n            <Popover>\n                <CustomTrigger />\n            </Popover>\n        );\n        cy.get('button').click();\n        cy.get('button').should('have.text', 'Open popover');\n        cy.contains('Open popover').click();\n        cy.get('[data-cy=popover]').should('exist');\n    });\n\n    it(\"closes on content click if closeOnContentClick is set\", () => {\n        cy.mount(\n            <Popover closeOnContentClick>\n                <CustomTrigger />\n            </Popover>\n        );\n        cy.get('button').click();\n        cy.get('[data-cy=popover]').should('exist');\n        cy.contains('Open popover').click();\n        cy.get('[data-cy=popover]').should('not.exist');\n    });\n});\n\nconst CustomTrigger = (props) => (\n    <Button {...props}>\n        Open popover\n    </Button>\n);\n"
  },
  {
    "path": "services/workbench2/src/components/popover/popover.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Popover as MaterialPopover } from '@mui/material';\n\nimport { PopoverOrigin } from '@mui/material/Popover';\nimport IconButton, { IconButtonProps } from '@mui/material/IconButton';\n\nexport interface PopoverProps {\n    triggerComponent?: React.ComponentType<{ onClick: (event: React.MouseEvent<any>) => void }>;\n    closeOnContentClick?: boolean;\n}\n\nexport class Popover extends React.Component<PopoverProps> {\n    state = {\n        anchorEl: undefined\n    };\n\n    transformOrigin: PopoverOrigin = {\n        vertical: \"top\",\n        horizontal: \"right\",\n    };\n\n    render() {\n        const Trigger = this.props.triggerComponent || DefaultTrigger;\n        return (\n            <>\n                <Trigger onClick={this.handleTriggerClick} />\n                <MaterialPopover\n                    data-cy=\"popover\"\n                    anchorEl={this.state.anchorEl}\n                    open={Boolean(this.state.anchorEl)}\n                    onClose={this.handleClose}\n                    onClick={this.handleSelfClick}\n                    transformOrigin={this.transformOrigin}\n                    anchorOrigin={this.transformOrigin}\n                >\n                    {this.props.children}\n                </MaterialPopover>\n            </>\n        );\n    }\n\n    handleClose = () => {\n        this.setState({ anchorEl: undefined });\n    }\n\n    handleTriggerClick = (event: React.MouseEvent<any>) => {\n        this.setState({ anchorEl: event.currentTarget });\n    }\n\n    handleSelfClick = () => {\n        if (this.props.closeOnContentClick) {\n            this.handleClose();\n        }\n    }\n}\n\nexport const DefaultTrigger: React.SFC<IconButtonProps> = (props) => (\n    <IconButton {...props} size=\"large\">\n        <i className=\"fas\" />\n    </IconButton>\n);\n"
  },
  {
    "path": "services/workbench2/src/components/progress-button/progress-button.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport Button, { ButtonProps } from '@mui/material/Button';\nimport { CircularProgress } from '@mui/material';\nimport withStyles from '@mui/styles/withStyles';\nimport { CircularProgressProps } from '@mui/material/CircularProgress';\n\ninterface ProgressButtonProps extends ButtonProps {\n    loading?: boolean;\n    progressProps?: CircularProgressProps;\n}\n\nexport const ProgressButton = ({ loading, progressProps, children, disabled, ...props }: ProgressButtonProps) =>\n    <Button {...props} disabled={disabled || loading}>\n        {children}\n        {loading && <Progress {...progressProps} size={getProgressSize(props.size)} />}\n    </Button>;\n\nconst Progress = withStyles({\n    root: {\n        position: 'absolute',\n    },\n})(CircularProgress);\n\nconst getProgressSize = (size?: 'small' | 'medium' | 'large') => {\n    switch (size) {\n        case 'small':\n            return 16;\n        case 'large':\n            return 24;\n        default:\n            return 20;\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/components/radio-field/radio-field.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { FormControl, FormControlLabel, Radio, RadioGroup } from '@mui/material';\nimport { WrappedFieldProps } from 'redux-form';\nimport { ArvadosTheme, CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\n\ntype CssRules = 'radioGroupRow';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    radioGroupRow: {\n        flexDirection: 'row',\n    },\n});\n\ninterface RadioFieldDataProps {\n    items: {key: string, value: any}[],\n    flexRowDirection: boolean,\n}\n\nexport const RadioField = withStyles(styles)((props: WrappedFieldProps & RadioFieldDataProps & WithStyles<CssRules>) =>\n    <FormControl>\n        <RadioGroup\n            className={props.flexRowDirection ? props.classes.radioGroupRow : undefined}\n            name={props.input.name}\n            value={props.input.value}\n            onChange={props.input.onChange}\n        >\n            {props.items.map(item => (\n                <FormControlLabel key={item.key} value={item.key} control={<Radio />} label={item.value} />\n            ))}\n        </RadioGroup>\n    </FormControl>);\n"
  },
  {
    "path": "services/workbench2/src/components/refresh-button/refresh-button.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { LAST_REFRESH_TIMESTAMP, RefreshButton } from './refresh-button';\n\ndescribe('<RefreshButton />', () => {\n    let props;\n    let replace;\n    let urlPath;\n\n    beforeEach(() => {\n        props = {\n            history: {\n                replace: () => { },\n            },\n            classes: {},\n        };\n\n        replace = cy.spy(props.history, 'replace').as('replace');\n    });\n\n    it('should render without issues', () => {\n        // when\n        cy.mount(<RefreshButton {...props} />);\n\n        // then\n        cy.get('button').should('exist');\n    });\n\n    it('should pass window location to router', () => {\n        // setup\n        cy.mount(<RefreshButton {...props} />);\n\n        cy.window().then((win) => {\n            urlPath = win.location.pathname;\n            expect(!!win.localStorage.getItem(LAST_REFRESH_TIMESTAMP)).to.equal(false);\n        });\n\n        // when\n        cy.get('button').should('exist').click();\n\n        // then\n        cy.window().then((win) => {\n            cy.get('@replace').should('have.been.calledWith', urlPath);\n            expect(!!win.localStorage.getItem(LAST_REFRESH_TIMESTAMP)).not.to.equal(false);\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/components/refresh-button/refresh-button.tsx",
    "content": "\n// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport classNames from 'classnames';\nimport { withRouter, RouteComponentProps } from 'react-router';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Button } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ReRunProcessIcon } from 'components/icon/icon';\n\ntype CssRules = 'button' | 'buttonRight';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    button: {\n        boxShadow: 'none',\n        padding: '2px 10px 2px 5px',\n        fontSize: '0.75rem'\n    },\n    buttonRight: {\n        marginLeft: 'auto',\n    },\n});\n\ninterface RefreshButtonProps {\n    onClick?: () => void;\n}\n\nexport const LAST_REFRESH_TIMESTAMP = 'lastRefreshTimestamp';\n\nexport const RefreshButton = ({ history, classes, onClick }: RouteComponentProps & WithStyles<CssRules> & RefreshButtonProps) =>\n    <Button\n        color=\"primary\"\n        size=\"small\"\n        variant=\"contained\"\n        onClick={() => {\n            // Notify interested parties that the refresh button was clicked.\n            const now = (new Date()).getTime();\n            localStorage.setItem(LAST_REFRESH_TIMESTAMP, now.toString());\n            history.replace(window.location.pathname);\n            if (onClick) {\n                onClick();\n            }\n        }}\n        className={classNames(classes.buttonRight, classes.button)}>\n        <ReRunProcessIcon />\n        Refresh\n    </Button>;\n\nexport default withStyles(styles)(withRouter(RefreshButton));"
  },
  {
    "path": "services/workbench2/src/components/rich-text-editor-link/rich-text-editor-link.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Typography } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { openRichTextEditorDialog } from 'store/rich-text-editor-dialog/rich-text-editor-dialog-actions';\n\ntype CssRules = \"root\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        color: theme.palette.primary.main,\n        cursor: 'pointer'\n    }\n});\n\ninterface RichTextEditorLinkData {\n    title: string;\n    label: string;\n    content: string;\n}\n\ninterface RichTextEditorLinkActions {\n    onClick: (title: string, content: string) => void;\n}\n\ntype RichTextEditorLinkProps = RichTextEditorLinkData & RichTextEditorLinkActions & WithStyles<CssRules>;\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    onClick: (title: string, content: string) => dispatch<any>(openRichTextEditorDialog(title, content))\n});\n\nexport const RichTextEditorLink = connect(undefined, mapDispatchToProps)(\n    withStyles(styles)(({ classes, title, content, label, onClick }: RichTextEditorLinkProps) =>\n        <Typography component='span' className={classes.root}\n            onClick={() => onClick(title, content) }>\n            {label}\n        </Typography>\n    ));"
  },
  {
    "path": "services/workbench2/src/components/search-input/search-input.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\n// import { mount, configure } from \"enzyme\";\nimport { SearchInput, DEFAULT_SEARCH_DEBOUNCE } from \"./search-input\";\n// import Adapter from 'enzyme-adapter-react-16';\n\n// configure({ adapter: new Adapter() });\n\ndescribe(\"<SearchInput />\", () => {\n    let WrappedComponent;\n    let onSearch\n\n    beforeEach(() => {\n        cy.clock();\n        onSearch = cy.spy().as('onSearch');\n        // Wrap the component to test it with props update\n        WrappedComponent = ({ selfClearProp = '', textProp }) => {\n            const [text, setText] = React.useState(textProp);\n            const [selfClear, setSelfClear] = React.useState(selfClearProp);\n\n            window.updateProps = (newClear, newText) => {\n                setText(newText);\n                setSelfClear(newClear);\n            };\n\n            return <SearchInput selfClearProp={selfClear} value={text} onSearch={onSearch} />;\n        };\n    });\n\n    describe(\"on submit\", () => {\n        it(\"calls onSearch with initial value passed via props\", () => {\n            cy.mount(<SearchInput selfClearProp=\"\" value=\"initial value\" onSearch={onSearch} />);\n            cy.get('form').submit();\n            cy.get('@onSearch').should('have.been.calledWith', 'initial value');\n        });\n\n        it(\"calls onSearch with current value\", () => {\n            cy.mount(<SearchInput selfClearProp=\"\" value=\"\" onSearch={onSearch} />);\n            cy.get('input').type('current value');\n            cy.get('form').submit();\n            cy.get('@onSearch').should('have.been.calledWith', 'current value');\n        });\n\n        it(\"calls onSearch with new value passed via props\", () => {\n            cy.mount(<WrappedComponent />);\n            cy.get('input').type('current value');\n            //simulate change of props\n            cy.window().then((win) => {\n                win.updateProps('', 'new value');\n              });\n            cy.get('form').submit();\n            cy.get('@onSearch').should('have.been.calledWith', 'new value');\n        });\n\n        it(\"cancels timeout set on input value change\", () => {\n            cy.mount(<SearchInput selfClearProp=\"\" value=\"\" onSearch={onSearch} debounce={1000} />);\n            cy.get('input').type('current value');\n            cy.get('form').submit();\n            cy.get('@onSearch').should('have.been.calledOnce');\n            cy.tick(1000)\n            cy.get('@onSearch').should('have.been.calledOnce');\n            cy.get('@onSearch').should('have.been.calledWith', 'current value');\n        });\n\n    });\n\n    describe(\"on input value change\", () => {\n        it(\"calls onSearch after default timeout\", () => {\n            cy.mount(<SearchInput selfClearProp=\"\" value=\"\" onSearch={onSearch} />);\n            cy.get('input').type('current value');\n            cy.get('@onSearch').should('not.have.been.called');\n            cy.tick(DEFAULT_SEARCH_DEBOUNCE);\n            cy.get('@onSearch').should('have.been.calledWith', 'current value');\n        });\n\n        it(\"calls onSearch after the time specified in props has passed\", () => {\n            cy.mount(<SearchInput selfClearProp=\"\" value=\"\" onSearch={onSearch} debounce={2000}/>);\n            cy.get('input').type('current value');\n            cy.tick(1000);\n            cy.get('@onSearch').should('not.have.been.called');\n            cy.tick(1000);\n            cy.get('@onSearch').should('have.been.calledWith', 'current value');\n        });\n\n        it(\"calls onSearch only once after no change happened during the specified time\", () => {\n            cy.mount(<SearchInput selfClearProp=\"\" value=\"\" onSearch={onSearch} debounce={1000}/>);\n            cy.get('input').type('current value');\n            cy.tick(500);\n            cy.get('input').type('current value');\n            cy.tick(1000);\n            cy.get('@onSearch').should('have.been.calledOnce');\n        });\n\n        it(\"calls onSearch again after the specified time has passed since previous call\", () => {\n            cy.mount(<SearchInput selfClearProp=\"\" value=\"\" onSearch={onSearch} debounce={1000}/>);\n            cy.get('input').type('current value');\n            cy.tick(500);\n            cy.get('input').clear();\n            cy.get('input').type('intermediate value');\n            cy.tick(1000);\n            cy.get('@onSearch').should('have.been.calledWith', 'intermediate value');\n            cy.get('input').clear();\n            cy.get('input').type('latest value');\n            cy.tick(1000);\n            cy.get('@onSearch').should('have.been.calledWith', 'latest value');\n            cy.get('@onSearch').should('have.been.calledTwice');\n\n        });\n\n    });\n\n    describe(\"on input target change\", () => {\n        it(\"clears the input value on selfClearProp change\", () => {\n            cy.mount(<WrappedComponent selfClearProp=\"abc\" />);\n\n            // component should clear value upon creation\n            cy.tick(1000);\n            cy.get('@onSearch').should('have.been.calledWith', '');\n            cy.get('@onSearch').should('have.been.calledOnce');\n\n            // component should not clear on same selfClearProp\n            cy.window().then((win) => {\n                win.updateProps('', 'abc');\n              });\n            cy.tick(1000);\n            cy.get('@onSearch').should('have.been.called');\n\n            // component should clear on selfClearProp change\n            cy.window().then((win) => {\n                win.updateProps('', '111');\n              });\n            // cy.get('@onSearch').should('have.been.calledOnce');\n            cy.tick(1000);\n            cy.get('@onSearch').should('have.been.calledWith', '');\n            cy.get('@onSearch').should('have.been.calledTwice');\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/components/search-input/search-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, {useState, useEffect} from 'react';\nimport {\n    IconButton,\n    FormControl,\n    InputLabel,\n    Input,\n    InputAdornment,\n    Tooltip,\n} from '@mui/material';\nimport SearchIcon from '@mui/icons-material/Search';\n\ninterface SearchInputDataProps {\n    value: string;\n    label?: string;\n    selfClearProp: string;\n    width?: string;\n}\n\ninterface SearchInputActionProps {\n    onSearch: (value: string) => any;\n    debounce?: number;\n}\n\ntype SearchInputProps = SearchInputDataProps & SearchInputActionProps;\n\nexport const DEFAULT_SEARCH_DEBOUNCE = 1000;\n\nexport const SearchInput = (props: SearchInputProps) => {\n    const [timeout, setTimeout] = useState(0);\n    const [value, setValue] = useState(\"\");\n    const [label, setLabel] = useState(\"Search\");\n    const [selfClearProp, setSelfClearProp] = useState(\"\");\n\n    useEffect(() => {\n        if (props.value) {\n            setValue(props.value);\n        }\n        if (props.label) {\n            setLabel(props.label);\n        }\n\n        return () => {\n            setValue(\"\");\n            clearTimeout(timeout);\n        };\n    }, [props.value, props.label]);\n\n    useEffect(() => {\n        if (selfClearProp !== props.selfClearProp) {\n            setValue(\"\");\n            setSelfClearProp(props.selfClearProp);\n            handleChange({ target: { value: \"\" } } as any);\n        }\n    }, [props.selfClearProp]);\n\n    const handleSubmit = (event: React.FormEvent<HTMLElement>) => {\n        event.preventDefault();\n        clearTimeout(timeout);\n        props.onSearch(value);\n    };\n\n    const handleChange = (event: React.ChangeEvent<HTMLInputElement>) => {\n        const { target: { value: eventValue } } = event;\n        clearTimeout(timeout);\n        setValue(eventValue);\n\n        setTimeout(window.setTimeout(\n            () => {\n                props.onSearch(eventValue);\n            },\n            props.debounce || DEFAULT_SEARCH_DEBOUNCE\n        ));\n    };\n\n    return (\n        <form onSubmit={handleSubmit}>\n            <FormControl variant=\"standard\" style={{ width: props.width || '14rem', marginTop: '-10px'}}>\n                <InputLabel>{label}</InputLabel>\n                <Input\n                    type=\"text\"\n                    data-cy=\"search-input\"\n                    value={value}\n                    onChange={handleChange}\n                    endAdornment={\n                        <InputAdornment position=\"end\" style={{marginRight: '-0.6rem'}}>\n                            <Tooltip title='Search'>\n                                <IconButton onClick={handleSubmit} size=\"large\">\n                                    <SearchIcon />\n                                </IconButton>\n                            </Tooltip>\n                        </InputAdornment>\n                    } />\n            </FormControl>\n        </form>\n    );\n};\n"
  },
  {
    "path": "services/workbench2/src/components/select-field/select-field.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { WrappedFieldProps } from 'redux-form';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { FormControl, InputLabel, Select, FormHelperText } from '@mui/material';\n\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\n\ntype CssRules = 'formControl' | 'selectWrapper' | 'select' | 'option';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    formControl: {\n        width: '100%',\n    },\n    selectWrapper: {\n        backgroundColor: theme.palette.common.white,\n        '&:before': {\n            borderBottomColor: 'rgba(0, 0, 0, 0.42)',\n        },\n        '&:focus': {\n            outline: 'none',\n        },\n    },\n    select: {\n        fontSize: '0.875rem',\n        '&:focus': {\n            backgroundColor: 'rgba(0, 0, 0, 0.0)',\n        },\n    },\n    option: {\n        fontSize: '0.875rem',\n        backgroundColor: theme.palette.common.white,\n        height: '30px',\n    },\n});\n\ninterface NativeSelectFieldProps {\n    disabled?: boolean;\n}\n\nexport const NativeSelectField = withStyles(styles)((props: WrappedFieldProps & NativeSelectFieldProps & WithStyles<CssRules> & { items: any[] }) => (\n    <FormControl variant=\"standard\" className={props.classes.formControl}>\n        <Select\n            variant=\"standard\"\n            className={props.classes.selectWrapper}\n            native\n            value={props.input.value}\n            onChange={props.input.onChange}\n            disabled={props.meta.submitting || props.disabled}\n            name={props.input.name}\n            inputProps={{\n                id: `id-${props.input.name}`,\n                className: props.classes.select,\n            }}>\n            {props.items.map(item => (\n                <option\n                    key={item.key}\n                    value={item.key}\n                    className={props.classes.option}>\n                    {item.value}\n                </option>\n            ))}\n        </Select>\n    </FormControl>\n));\n\ninterface SelectFieldProps {\n    children: React.ReactNode;\n    label: string;\n}\n\ntype SelectFieldCssRules = 'formControl';\n\nconst selectFieldStyles: CustomStyleRulesCallback<SelectFieldCssRules> = (theme: ArvadosTheme) => ({\n    formControl: {\n        marginBottom: theme.spacing(3),\n    },\n});\nexport const SelectField = withStyles(selectFieldStyles)((props: WrappedFieldProps & SelectFieldProps & WithStyles<SelectFieldCssRules>) => (\n    <FormControl\n        variant=\"standard\"\n        error={props.meta.invalid}\n        className={props.classes.formControl}>\n        <InputLabel>{props.label}</InputLabel>\n        <Select variant=\"standard\" {...props.input}>{props.children}</Select>\n        <FormHelperText>{props.meta.error}</FormHelperText>\n    </FormControl>\n));\n"
  },
  {
    "path": "services/workbench2/src/components/string-array-input/string-array-mui-input.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { StringArrayMuiInput } from './string-array-mui-input';\n\ndescribe('StringArrayMuiInput Component', () => {\n    beforeEach(() => {\n        // Mount the component with required props\n        cy.mount(\n            <StringArrayMuiInput\n                input={{\n                    name: 'test',\n                    value: [],\n                    onChange: cy.stub().as('onChange'),\n                    onBlur: cy.stub(),\n                    onFocus: cy.stub(),\n                }}\n                meta={{\n                    touched: false,\n                    error: undefined,\n                }}\n                label='Test Input'\n            />\n        );\n    });\n\n    it('renders with initial empty state', () => {\n        cy.get('input').should('exist');\n        cy.get('input').should('have.value', '');\n        cy.get('.MuiChip-root').should('not.exist');\n    });\n\n    it('calls onChange on Enter key press', () => {\n        const testValue = 'test value';\n        cy.get('input').type(`${testValue}{enter}`);\n        cy.get('@onChange').should('have.been.calledWith', [testValue]);\n    });\n\n    it('calls onChange on Add button click', () => {\n        const testValue = 'test value';\n        cy.get('input').type(testValue);\n        cy.get('button').click();\n        cy.get('@onChange').should('have.been.calledWith', [testValue]);\n    });\n\n    it('trims whitespace from input values', () => {\n        const testValue = '  test value  ';\n        const trimmedValue = 'test value';\n\n        cy.get('input').type(`${testValue}{enter}`);\n\n        cy.get('@onChange').should('have.been.calledWith', [trimmedValue]);\n    });\n\n    it('clears input after adding value', () => {\n        cy.get('input').type('test value{enter}');\n        cy.get('input').should('have.value', '');\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/components/string-array-input/string-array-mui-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useState, KeyboardEvent, ChangeEvent, useEffect } from 'react';\nimport { TextField, Chip, Box, IconButton } from '@mui/material';\nimport AddIcon from '@mui/icons-material/Add';\nimport { WrappedFieldProps, WrappedFieldInputProps } from 'redux-form';\n\ninterface StringArrayMuiInputProps extends WrappedFieldProps {\n    label?: string;\n    input: { value: string[] } & WrappedFieldInputProps;\n}\n\nexport const StringArrayMuiInput = ({ input, label, meta }: StringArrayMuiInputProps) => {\n    const [currentValue, setCurrentValue] = useState<string>('');\n    const [error, setError] = useState<string | undefined>(undefined);\n    const [touched, setTouched] = useState(false);\n\n    // Update error state when meta.error changes\n    useEffect(() => {\n        setError(meta.error);\n    }, [meta.error]);\n\n    const handleKeyDown = (e: KeyboardEvent<HTMLInputElement>) => {\n        if (e.key === 'Enter' && currentValue.trim()) {\n            e.preventDefault();\n            addToChips((input.value), currentValue.trim());\n        }\n    };\n\n    const handleDelete = (chipToDelete: string) => {\n        const newValues = ((input.value) || []).filter((chip) => chip !== chipToDelete);\n        input.onChange(newValues);\n    };\n\n    const handleAddClick = () => {\n        if (currentValue.trim()) {\n            addToChips((input.value), currentValue.trim());\n        }\n    };\n\n    const addToChips = (currentValues: string[], newValue: string) => {\n        if (duplicateValueError(currentValues, newValue)) {\n            return;\n        }\n        const newChips = [...currentValues, newValue];\n        input.onChange(newChips);\n        setCurrentValue('');\n        setError(undefined);\n    };\n\n    const duplicateValueError = (currentValues: string[], newValue: string) => {\n        if (currentValues.includes(newValue)) {\n            const errorMsg = `Value \"${newValue}\" already exists`;\n            setError(errorMsg);\n            setTouched(true);\n            return true;\n        }\n        return false;\n    };\n\n    return (\n        <Box sx={{ display: 'flex', flexDirection: 'column', gap: 1 }}>\n            <TextField\n                name='string-array-input'\n                label={label}\n                value={currentValue}\n                onFocus={() => setTouched(true)}\n                onBlur={() => setTouched(false)}\n                onChange={(e: ChangeEvent<HTMLInputElement>) => {\n                    setCurrentValue(e.target.value);\n                    setError(undefined);\n                }}\n                onKeyDown={handleKeyDown}\n                InputProps={{\n                    endAdornment: (\n                        <IconButton\n                            onClick={handleAddClick}\n                            edge='end'\n                        >\n                            <AddIcon />\n                        </IconButton>\n                    ),\n                }}\n                error={Boolean(touched && error)}\n                helperText={touched && error ? error : ''}\n            />\n\n            <Box sx={{ display: 'flex', flexWrap: 'wrap', gap: 1 }}>\n                {((input.value) || []).map((val, idx) => (\n                    <Chip\n                        key={idx}\n                        label={val}\n                        onDelete={() => handleDelete(val)}\n                    />\n                ))}\n            </Box>\n        </Box>\n    );\n};\n"
  },
  {
    "path": "services/workbench2/src/components/subprocess-filter/subprocess-filter.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { Typography, Switch } from '@mui/material';\n\ntype CssRules = 'container' | 'label' | 'value';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    container: {\n        display: 'flex',\n        alignItems: 'center',\n        height: '20px'\n    },\n    label: {\n        width: '86px',\n        color: theme.palette.grey[\"500\"],\n        textAlign: 'right',\n    },\n    value: {\n        width: '24px',\n        paddingLeft: theme.spacing(1),\n    }\n});\n\nexport interface SubprocessFilterDataProps {\n    label: string;\n    value: number;\n    checked?: boolean;\n    key?: string;\n    onToggle?: () => void;\n}\n\ntype SubprocessFilterProps = SubprocessFilterDataProps & WithStyles<CssRules>;\n\nexport const SubprocessFilter = withStyles(styles)(\n    ({ classes, label, value, key, checked, onToggle }: SubprocessFilterProps) =>\n        <div className={classes.container} >\n            <Typography component=\"span\" className={classes.label}>{label}:</Typography>\n            <Typography component=\"span\" className={classes.value}>{value}</Typography>\n            {onToggle && <Switch\n                checked={checked}\n                onChange={onToggle}\n                value={key}\n                color=\"primary\" />\n            }\n        </div>\n);"
  },
  {
    "path": "services/workbench2/src/components/switch-field/switch-field.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { FormFieldProps, FormField } from 'components/form-field/form-field';\nimport { Switch } from '@mui/material';\nimport { SwitchProps } from '@mui/material/Switch';\n\nexport const SwitchField = ({ switchProps, ...props }: FormFieldProps & { switchProps: SwitchProps }) =>\n    <FormField {...props}>\n        {input => <Switch {...switchProps} checked={input.value} onChange={input.onChange} />}\n    </FormField>;\n\n"
  },
  {
    "path": "services/workbench2/src/components/tabbedList/tabbed-list.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Tabs, Tab, List, ListItemButton } from '@mui/material';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles, withStyles } from '@mui/styles';\nimport classNames from 'classnames';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { InlinePulser } from 'components/loading/inline-pulser';\n\ntype TabbedListClasses = 'root' | 'tabs' | 'listItem' | 'selected' | 'spinner' | 'notFoundLabel' | 'moreResults';\n\nconst tabbedListStyles: CustomStyleRulesCallback<TabbedListClasses> = (theme: ArvadosTheme) => ({\n    root: {\n        display: 'flex',\n        flexDirection: 'column',\n        height: '100%',\n    },\n    tabs: {\n        backgroundColor: theme.palette.background.paper,\n        position: 'sticky',\n        top: 0,\n        zIndex: 1,\n        borderBottom: '1px solid lightgrey',\n    },\n    listItem: {\n        height: '2rem',\n        cursor: 'pointer',\n        '&:hover': {\n            backgroundColor: theme.palette.grey[200],\n        }\n    },\n    selected: {\n        backgroundColor: `${theme.palette.grey['300']} !important`\n    },\n    spinner: {\n        display: 'flex',\n        justifyContent: 'center',\n        alignItems: 'center',\n        height: '4rem',\n    },\n    notFoundLabel: {\n        cursor: 'default',\n        padding: theme.spacing(1),\n        color: theme.palette.grey[700],\n        textAlign: 'center',\n    },\n    moreResults: {\n        padding: 0,\n        color: theme.palette.grey[700],\n        textAlign: 'center',\n        fontStyle: 'italic',\n        fontSize: '0.8rem',\n    },\n});\n\ntype TabPanelProps = {\n  children: React.ReactNode;\n  value: number;\n  index: number;\n};\n\ntype TabbedListProps<T> = {\n    tabbedListContents: Record<string, T[]>;\n    injectedStyles?: string;\n    selectedIndex?: number;\n    selectedTab?: number;\n    includeContentsLength: boolean;\n    isWorking?: boolean;\n    maxLength?: number;\n    handleSelect?: (selection: T) => React.MouseEventHandler<HTMLElement> | undefined;\n    renderListItem?: (item: T) => React.ReactNode;\n    handleTabChange?: (event: React.SyntheticEvent, newValue: number) => void;\n};\n\nexport const TabbedList = withStyles(tabbedListStyles)(\n    <T,>({\n        tabbedListContents,\n        selectedIndex = 0,\n        selectedTab = 0,\n        isWorking,\n        maxLength,\n        injectedStyles,\n        classes,\n        handleSelect,\n        renderListItem,\n        handleTabChange,\n        includeContentsLength,\n    }: TabbedListProps<T> & WithStyles<TabbedListClasses>) => {\n    const tabLabels = Object.keys(tabbedListContents);\n    const selectedTabLabel = tabLabels[selectedTab];\n    const listContents = tabbedListContents[selectedTabLabel] || [];\n\n    const getTabLabel = (label: string) => {\n        if (includeContentsLength) { \n            if (maxLength && tabbedListContents[label].length > maxLength) {\n                return `${label} (${maxLength}+)`;\n            }\n            return `${label} (${tabbedListContents[label].length})`;\n        } else {\n            return label;\n        }\n    };\n\n    const TabPanel = ({ children, value, index }: TabPanelProps) => {\n        return <div hidden={value !== index}>{value === index && children}</div>;\n    };\n\n    return (\n        <div className={classNames(classes.root, injectedStyles)}>\n            <Tabs\n                className={classes.tabs}\n                value={selectedTab}\n                onChange={handleTabChange}\n                variant='fullWidth'\n            >\n                {tabLabels.map((label) => (\n                    <Tab key={label} data-cy={`${label}-tab-label`} label={getTabLabel(label)} />\n                ))}\n            </Tabs>\n            <TabPanel\n                value={selectedTab}\n                index={selectedTab}\n            >\n                {isWorking ? <div data-cy=\"loading-spinner\" className={classes.spinner}><InlinePulser /></div> :\n                    <List dense>\n                    {listContents.length === 0 && <div className={classes.notFoundLabel}>no matching {tabLabels[selectedTab]} found</div>}\n                        {listContents.slice(0, maxLength).map((item, i) => (\n                        <div key={`${selectedTabLabel}-${i}`}>\n                            <ListItemButton\n                                className={classNames(classes.listItem, { [classes.selected]: i === selectedIndex })}\n                                selected={i === selectedIndex}\n                                onClick={handleSelect && handleSelect(item)}\n                                >\n                                {renderListItem ? renderListItem(item) : JSON.stringify(item)}\n                            </ListItemButton>\n                        </div>\n                        ))}\n                        {maxLength && listContents.length > maxLength && <div className={classes.moreResults}>{'keep typing to refine search results'}</div>}\n                    </List>\n                }\n            </TabPanel>\n        </div>\n    );\n});\n"
  },
  {
    "path": "services/workbench2/src/components/text-field/text-field.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect, useState } from 'react';\nimport { WrappedFieldProps } from 'redux-form';\nimport { Typography, FormControl } from '@mui/material';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { TextField as MaterialTextField, FormControlOwnProps } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport RichTextEditor from 'react-rte';\nimport classNames from 'classnames';\n\ntype CssRules = 'textField' | 'rte' | 'errorMessage' | 'redBorder';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    textField: {\n        marginBottom: theme.spacing(1)\n    },\n    rte: {\n        fontFamily: 'Arial',\n        '& a': {\n            textDecoration: 'none',\n            color: theme.palette.primary.main,\n            '&:hover': {\n                cursor: 'pointer',\n                textDecoration: 'underline'\n            }\n        }\n    },\n    errorMessage: {\n        color: theme.palette.error.main,\n        fontSize: '0.75rem',\n        marginTop: '0.25rem',\n    },\n    redBorder: {\n        border: `1px solid ${theme.palette.error.main}`,\n    },\n});\n\ntype TextFieldProps = WrappedFieldProps & WithStyles<CssRules>;\n\nexport const TextField = withStyles(styles)((props: TextFieldProps & {\n    label?: string, autoFocus?: boolean, required?: boolean, select?: boolean, disabled?: boolean, children: React.ReactNode, margin?: FormControlOwnProps[\"margin\"], placeholder?: string,\n    helperText?: string, type?: string, autoComplete?: string,\n}) =>\n    <MaterialTextField\n        variant=\"standard\"\n        helperText={(props.meta.touched && props.meta.error) || props.helperText}\n        className={props.classes.textField}\n        label={props.label}\n        disabled={props.disabled || props.meta.submitting}\n        error={props.meta.touched && !!props.meta.error}\n        autoComplete={props.autoComplete || 'off'}\n        autoFocus={props.autoFocus}\n        fullWidth={true}\n        required={props.required}\n        select={props.select}\n        children={props.children}\n        margin={props.margin}\n        placeholder={props.placeholder}\n        type={props.type}\n        {...props.input} />);\n\n\ninterface RichEditorTextFieldData {\n    label?: string;\n}\n\ntype RichEditorTextFieldProps = RichEditorTextFieldData & TextFieldProps;\n\nexport const RichEditorTextField = withStyles(styles)(\n    class RichEditorTextField extends React.Component<RichEditorTextFieldProps> {\n        state = {\n            value: RichTextEditor.createValueFromString(this.props.input.value, 'html'),\n            hasBlurred: false,\n            isFocused: false,\n        };\n\n        onChange = (value: any) => {\n            this.setState({ value });\n            this.props.input.onChange(\n                !!value.getEditorState().getCurrentContent().getPlainText().trim()\n                ? value.toString('html')\n                : null\n            );\n        }\n\n        onFocus = () => {\n            this.setState({ isFocused: true });\n        }\n\n        onBlur = () => {\n            this.setState({ hasBlurred: true });\n        }\n\n        fieldRequiredError = () => this.props.meta.error === \"This field is required.\";\n        showError = () => this.fieldRequiredError()\n                ? this.state.hasBlurred\n                : this.state.isFocused && this.props.meta.error;\n\n        render() {\n            return <div>\n                <RichTextEditor\n                    className={classNames(this.props.classes.rte, this.showError() && this.props.classes.redBorder)}\n                    value={this.state.value}\n                    onChange={this.onChange}\n                    onBlur={this.onBlur}\n                    onFocus={this.onFocus}\n                    placeholder={this.props.label} />\n                    {this.showError() &&\n                        <Typography className={this.props.classes.errorMessage}>\n                            {this.props.meta.error}\n                        </Typography>}\n                </div>;\n        }\n    }\n);\n\nexport const DateTextField = withStyles(styles)\n    ((props: TextFieldProps) =>\n        <MaterialTextField\n            variant=\"standard\"\n            type=\"date\"\n            disabled={props.meta.submitting}\n            helperText={props.meta.error}\n            error={!!props.meta.error}\n            fullWidth={true}\n            InputLabelProps={{\n                shrink: true\n            }}\n            name={props.input.name}\n            onChange={props.input.onChange}\n            value={props.input.value} />\n    );\n\ninterface TextFieldWithStartValueProps extends WrappedFieldProps {\n    startValue: string;\n    label?: string;\n    children?: React.ReactNode;\n}\n\nexport const TextFieldWithStartValue = (props: TextFieldWithStartValueProps) => {\n    const [hasBeenTouched, setHasBeenTouched] = useState(false);\n\n    useEffect(() => {\n        props.input.onChange(props.startValue);\n    }, []);\n\n    return (\n        <FormControl variant='standard' fullWidth>\n            <TextField\n                data-cy='text-field-input'\n                {...props}\n                input={{\n                    ...props.input,\n                    onFocus: () => setHasBeenTouched(true),\n                    value: hasBeenTouched ? props.input.value : props.startValue,\n                }}\n                label={props.label}\n                children={props.children}\n            />\n        </FormControl>\n    );\n};"
  },
  {
    "path": "services/workbench2/src/components/tree/tree.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\nimport React from 'react';\nimport { TreeComponent, TreeItemStatus } from './tree';\nimport { mockProjectResource } from '../../models/test-utils';\nimport { ThemeProvider } from '@mui/material/styles';\nimport { CustomTheme } from '../../common/custom-theme';\n\ndescribe(\"Tree component\", () => {\n\n    it(\"should render ListItem\", () => {\n        const project = {\n            data: mockProjectResource(),\n            id: \"3\",\n            open: true,\n            active: true,\n            status: TreeItemStatus.LOADED\n        };\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <TreeComponent\n                render={project => <div />}\n                toggleItemOpen={cy.stub()}\n                toggleItemActive={cy.stub()}\n                onContextMenu={cy.stub()}\n                items={[project]} />\n            </ThemeProvider>\n        );\n        cy.get('[data-cy=tree-li]').should('have.length', 1);\n    });\n\n    it(\"should render arrow\", () => {\n        const project = {\n            data: mockProjectResource(),\n            id: \"3\",\n            open: true,\n            active: true,\n            status: TreeItemStatus.LOADED,\n        };\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <TreeComponent\n                render={project => <div />}\n                toggleItemOpen={cy.stub()}\n                toggleItemActive={cy.stub()}\n                onContextMenu={cy.stub()}\n                items={[project]} />\n            </ThemeProvider>\n        );\n        cy.get('i').should('have.length', 1);\n    });\n\n    it(\"should render checkbox\", () => {\n        const project = {\n            data: mockProjectResource(),\n            id: \"3\",\n            open: true,\n            active: true,\n            status: TreeItemStatus.LOADED\n        };\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <TreeComponent\n                    showSelection={true}\n                    render={() => <div />}\n                    toggleItemOpen={cy.stub()}\n                    toggleItemActive={cy.stub()}\n                    onContextMenu={cy.stub()}\n                    items={[project]} />\n            </ThemeProvider>\n        );\n        cy.get('input[type=checkbox]').should('have.length', 1);\n    });\n\n    it(\"call onSelectionChanged with associated item\", () => {\n        const project = {\n            data: mockProjectResource(),\n            id: \"3\",\n            open: true,\n            active: true,\n            status: TreeItemStatus.LOADED,\n        };\n        const spy = cy.spy().as('spy');\n        const onSelectionChanged = (event, item) => spy(item);\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <TreeComponent\n                showSelection={true}\n                render={() => <div />}\n                toggleItemOpen={cy.stub()}\n                toggleItemActive={cy.stub()}\n                onContextMenu={cy.stub()}\n                toggleItemSelection={onSelectionChanged}\n                items={[project]} />\n            </ThemeProvider>\n            );\n        cy.get('input[type=checkbox]').click();\n        cy.get('@spy').should('have.been.calledWith', project);\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/src/components/tree/tree.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useCallback, useState } from 'react';\nimport { List, ListItem, ListItemIcon, Checkbox, Radio, Collapse } from \"@mui/material\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { CollectionIcon, DefaultIcon, DirectoryIcon, FileIcon, ProjectIcon, ProcessIcon, FilterGroupIcon, FreezeIcon } from 'components/icon/icon';\nimport { ReactElement } from \"react\";\nimport CircularProgress from '@mui/material/CircularProgress';\nimport classnames from \"classnames\";\nimport { getNodeChildrenIds, Tree, getNode, initTreeNode, createTree } from 'models/tree';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { SidePanelRightArrowIcon } from '../icon/icon';\nimport { Resource, ResourceKind } from 'models/resource';\nimport { GroupClass } from 'models/group';\nimport { SidePanelTreeCategory } from 'store/side-panel-tree/side-panel-tree-actions';\nimport { kebabCase } from 'lodash';\nimport { ResourcesState, getResource } from 'store/resources/resources';\nimport { TreePicker } from 'store/tree-picker/tree-picker';\nimport { isEqual } from 'lodash';\n\ntype CssRules = 'list'\n              | 'listItem'\n              | 'childLi'\n              | 'childItemName'\n              | 'active'\n              | 'loader'\n              | 'toggableIconContainer'\n              | 'iconClose'\n              | 'renderContainer'\n              | 'iconOpen'\n              | 'toggableIcon'\n              | 'checkbox'\n              | 'childItem'\n              | 'childItemIcon'\n              | 'frozenIcon'\n              | 'indentSpacer'\n              | 'itemWeightLight'\n              | 'itemWeightDark';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    list: {\n        padding: '3px 0px'\n    },\n    listItem: {\n        padding: '3px 0px',\n    },\n    loader: {\n        position: 'absolute',\n        transform: 'translate(0px)',\n        top: '3px'\n    },\n    toggableIconContainer: {\n        color: theme.palette.grey[\"700\"],\n        height: '14px',\n        width: '14px',\n        marginBottom: '0.4rem',\n    },\n    toggableIcon: {\n        fontSize: '14px',\n        minWidth: '14px',\n    },\n    renderContainer: {\n        overflow: 'hidden',\n        flex: 1\n    },\n    iconClose: {\n        transition: 'all 0.1s ease',\n    },\n    iconOpen: {\n        transition: 'all 0.1s ease',\n        transform: 'rotate(90deg)',\n    },\n    checkbox: {\n        width: theme.spacing(3),\n        height: theme.spacing(3),\n        margin: `0 ${theme.spacing(1)}`,\n        padding: 0,\n        color: theme.palette.grey[\"500\"],\n    },\n    childItem: {\n        cursor: 'pointer',\n        display: 'flex',\n        padding: '3px 20px',\n        fontSize: '0.875rem',\n        alignItems: 'center',\n        '&:hover': {\n            backgroundColor: 'rgba(0, 0, 0, 0.08)',\n        }\n    },\n    childLi: {\n        display: 'flex',\n        alignItems: 'center',\n    },\n    childItemName: {\n        fontSize: '0.875rem',\n    },\n    childItemIcon: {\n        marginLeft: '8px',\n        marginRight: '16px',\n        color: 'rgba(0, 0, 0, 0.54)',\n    },\n    active: {\n        color: theme.palette.primary.main,\n    },\n    itemWeightLight: {\n        color: theme.customs.colors.greyL,\n    },\n    itemWeightDark: {\n        color: \"black\",\n    },\n    frozenIcon: {\n        fontSize: 20,\n        color: theme.palette.grey[\"600\"],\n        marginLeft: '10px',\n    },\n    indentSpacer: {\n        width: '0.25rem'\n    }\n});\n\nexport enum TreeItemStatus {\n    INITIAL = 'INITIAL',\n    PENDING = 'PENDING',\n    LOADED = 'LOADED'\n}\n\nexport interface TreeItem<T> {\n    data: T;\n    depth?: number;\n    id: string;\n    open: boolean;\n    active: boolean;\n    selected?: boolean;\n    initialState?: boolean;\n    indeterminate?: boolean;\n    flatTree?: boolean;\n    status: TreeItemStatus;\n    items?: Array<TreeItem<T>>;\n    isFrozen?: boolean;\n}\n\nexport interface TreeProps<T> {\n    tree?: Tree<T>;\n    pickerId?: string;\n    treePicker?: TreePicker;\n    resources?: ResourcesState;\n    currentItemUuid?: string;\n    items?: Array<TreeItem<T>>;\n    level?: number;\n    itemsMap?: Map<string, TreeItem<T>>;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, item: TreeItem<T>) => void;\n    render: (item: TreeItem<T>, level?: number) => ReactElement<{}>;\n    showSelection?: boolean | ((item: TreeItem<T>) => boolean);\n    levelIndentation?: number;\n    itemRightPadding?: number;\n    toggleItemActive: (event: React.MouseEvent<HTMLElement>, item: TreeItem<T>) => void;\n    toggleItemOpen: (event: React.MouseEvent<HTMLElement>, item: TreeItem<T>) => void;\n    toggleItemSelection?: (event: React.MouseEvent<HTMLElement>, item: TreeItem<T>) => void;\n    selectedRef?: (node: HTMLDivElement | null) => void;\n\n    /**\n     * When set to true use radio buttons instead of checkboxes for item selection.\n     * This does not guarantee radio group behavior (i.e item mutual exclusivity).\n     * Any item selection logic must be done in the toggleItemActive callback prop.\n     */\n    useRadioButtons?: boolean;\n}\n\nexport enum TreeItemWeight {\n    NORMAL,\n    LIGHT,\n    DARK,\n};\n\nexport interface TreeItemWithWeight {\n    weight?: TreeItemWeight;\n};\n\nconst getActionAndId = (event: any, initAction: string | undefined = undefined) => {\n    const { nativeEvent: { target } } = event;\n    let currentTarget: HTMLElement = target as HTMLElement;\n    let action: string | undefined = initAction || currentTarget.dataset.action;\n    let id: string | undefined = currentTarget.dataset.id;\n\n    while (action === undefined || id === undefined) {\n        currentTarget = currentTarget.parentElement as HTMLElement;\n\n        if (!currentTarget) {\n            break;\n        }\n\n        action = action || currentTarget.dataset.action;\n        id = id || currentTarget.dataset.id;\n    }\n\n    return [action, id];\n};\n\nconst isInFavoritesTree = (item: TreeItem<any>): boolean => {\n    return item.id === SidePanelTreeCategory.FAVORITES || item.id === SidePanelTreeCategory.PUBLIC_FAVORITES;\n}\n\ninterface FlatTreeProps {\n    it: TreeItem<any>;\n    levelIndentation: number;\n    onContextMenu: Function;\n    handleToggleItemOpen: Function;\n    toggleItemActive: Function;\n    getToggableIconClassNames: Function;\n    getProperArrowAnimation: Function;\n    itemsMap?: Map<string, TreeItem<any>>;\n    classes: any;\n    showSelection: any;\n    useRadioButtons?: boolean;\n    handleCheckboxChange: Function;\n    selectedRef?: (node: HTMLDivElement | null) => void;\n}\n\nconst FLAT_TREE_ACTIONS = {\n    toggleOpen: 'TOGGLE_OPEN',\n    contextMenu: 'CONTEXT_MENU',\n    toggleActive: 'TOGGLE_ACTIVE',\n};\n\nconst ItemIcon = React.memo(({ type, kind, headKind, active, groupClass, classes }: any) => {\n    let Icon = ProjectIcon;\n\n    if (groupClass === GroupClass.FILTER) {\n        Icon = FilterGroupIcon;\n    }\n\n    if (type) {\n        switch (type) {\n            case 'directory':\n                Icon = DirectoryIcon;\n                break;\n            case 'file':\n                Icon = FileIcon;\n                break;\n            default:\n                Icon = DefaultIcon;\n        }\n    }\n\n    if (kind) {\n        if(kind === ResourceKind.LINK && headKind) kind = headKind;\n        switch (kind) {\n            case ResourceKind.COLLECTION:\n                Icon = CollectionIcon;\n                break;\n            case ResourceKind.CONTAINER_REQUEST:\n                Icon = ProcessIcon;\n                break;\n            default:\n                break;\n        }\n    }\n\n    return <Icon className={classnames({ [classes.active]: active }, classes.childItemIcon)} />;\n});\n\nconst FlatTree = (props: FlatTreeProps) =>\n    <div\n        onContextMenu={(event) => {\n            const id = getActionAndId(event, FLAT_TREE_ACTIONS.contextMenu)[1];\n            props.onContextMenu(event, { id } as any);\n        }}\n        onClick={(event) => {\n            const [action, id] = getActionAndId(event);\n\n            if (action && id) {\n                const item = props.itemsMap ? props.itemsMap[id] : { id };\n\n                switch (action) {\n                    case FLAT_TREE_ACTIONS.toggleOpen:\n                        props.handleToggleItemOpen(item as any, event);\n                        break;\n                    case FLAT_TREE_ACTIONS.toggleActive:\n                        props.toggleItemActive(event, item as any);\n                        break;\n                    default:\n                        break;\n                }\n            }\n        }}\n    >\n        {\n            (props.it.items || [])\n                .map((item: any, index: number) => <div key={item.id || index} data-id={item.id}\n                    className={classnames(props.classes.childItem, {\n                        [props.classes.active]: item.active,\n                        [props.classes.itemWeightLight]: (item.data.weight === TreeItemWeight.LIGHT && !item.active),\n                        [props.classes.itemWeightDark]: (item.data.weight === TreeItemWeight.DARK && !item.active),\n                    })}\n                    style={{ paddingLeft: `${item.depth * props.levelIndentation}px` }}>\n                    {isInFavoritesTree(props.it) ?\n                     <div className={props.classes.indentSpacer} />\n:\n                     <i data-action={FLAT_TREE_ACTIONS.toggleOpen} className={props.classes.toggableIconContainer}>\n                         <ListItemIcon className={props.getToggableIconClassNames(item.open, item.active)}>\n                             {props.getProperArrowAnimation(item.status, item.items!)}\n                         </ListItemIcon>\n                     </i>}\n                    {props.showSelection(item) && !props.useRadioButtons &&\n                     <Checkbox\n                         checked={item.selected}\n                         className={props.classes.checkbox}\n                         color=\"primary\"\n                         onClick={props.handleCheckboxChange(item)} />}\n                    {props.showSelection(item) && props.useRadioButtons &&\n                     <Radio\n                         checked={item.selected}\n                         className={props.classes.checkbox}\n                         color=\"primary\" />}\n                    <div data-action={FLAT_TREE_ACTIONS.toggleActive} className={props.classes.renderContainer} ref={item.active ? props.selectedRef : undefined}>\n                    <span className={props.classes.childLi}>\n                    <ItemIcon type={item.data.type} active={item.active} kind={item.data.kind} headKind={item.data.headKind || null} groupClass={item.data.kind === ResourceKind.GROUP ? item.data.groupClass : ''} classes={props.classes} />\n                    <span className={props.classes.childItemName}>\n                        {item.data.name}\n                    </span>\n                    {\n                        !!item.data.frozenByUuid ? <FreezeIcon className={props.classes.frozenIcon} /> : null\n                    }\n                        </span>\n                    </div>\n                </div>)\n        }\n    </div>;\n\nfunction treePickerToTreeItems<T>(tree: Tree<T>, resources: ResourcesState){\n    return function(id: string): TreeItem<any> {\n        const node = getNode(id)(tree) || initTreeNode({ id: '', value: 'InvalidNode' });\n        const items = getNodeChildrenIds(node.id)(tree)\n            .map(treePickerToTreeItems(tree, resources));\n        const resource = getResource<Resource>(node.id)(resources);\n\n        return {\n            active: node.active,\n            data: resource\n                ? {\n                    ...resource,\n                    name: typeof node.value === \"string\"\n                        ? node.value\n                        : typeof (node.value as any).name === \"string\"\n                        ? (node.value as any).name\n                        : \"\",\n                    weight: (node.value as any).weight\n                }\n                : node.value,\n            id: node.id,\n            items: items.length > 0 ? items : undefined,\n            open: node.expanded,\n            selected: node.selected,\n            status: TreeItemStatus[node.status],\n        };\n    };\n}\ntype ItemsMap<T> = Map<string, TreeItem<T>>;\n\nfunction flatTree<T>(itemsMap: ItemsMap<T>, depth: number, items?: TreeItem<T>[]): TreeItem<T>[]{\n    return items ? items\n        .map((item) => addToItemsMap(item, itemsMap))\n        .reduce((acc, next) => {\n            const { items } = next;\n            acc.push({ ...next, depth });\n            acc.push(...(next.open ? flatTree(itemsMap, depth + 1, items) : []));\n            return acc;\n        }, [] as TreeItem<T>[]) : [];\n};\n\nfunction addToItemsMap<T>(item: TreeItem<T>, itemsMap: Map<string, TreeItem<T>>): TreeItem<T> {\n    itemsMap[item.id] = item;\n    return item;\n};\n\nexport const TreeComponent = withStyles(styles)(\n    React.memo(function<T>(props: TreeProps<T> & WithStyles<CssRules>) {\n        const level = props.level ? props.level : 0;\n        const { classes, render, toggleItemActive, toggleItemOpen, currentItemUuid, useRadioButtons, resources, treePicker, pickerId } = props;\n        const pickedTree = treePicker && pickerId ? treePicker[pickerId] : createTree<T>();\n        const tree = props.tree || pickedTree;\n        const { list, listItem, loader, toggableIconContainer, renderContainer } = classes;\n        const itemsMap: ItemsMap<T> = new Map();\n        const fillMap = (tree: Tree<T>, resources: ResourcesState) => getNodeChildrenIds('')(tree)\n            .map(treePickerToTreeItems(tree, resources))\n            .map(item => addToItemsMap<T>(item, itemsMap))\n            .map(parentItem => ({\n                ...parentItem,\n                flatTree: true,\n                items: flatTree(itemsMap, 2, parentItem.items || []),\n            }))\n        const items = tree && resources ? fillMap(tree, resources) : props.items;\n\n        const showSelection = typeof props.showSelection === 'function'\n            ? props.showSelection\n            : () => props.showSelection ? true : false;\n\n        const getProperArrowAnimation = (status: string, items: Array<TreeItem<T>>) => {\n            return isSidePanelIconNotNeeded(status, items) ? <span /> : <SidePanelRightArrowIcon style={{ fontSize: '14px' }} data-cy=\"side-panel-arrow-icon\" />;\n        }\n\n        const isSidePanelIconNotNeeded = (status: string, items: Array<TreeItem<T>>) => {\n            return status === TreeItemStatus.PENDING ||\n                (status === TreeItemStatus.LOADED && !items) ||\n                (status === TreeItemStatus.LOADED && items && items.length === 0);\n        }\n\n        const getToggableIconClassNames = (isOpen?: boolean, isActive?: boolean) => {\n            const { iconOpen, iconClose, active, toggableIcon } = props.classes;\n            return classnames(toggableIcon, {\n                [iconOpen]: isOpen,\n                [iconClose]: !isOpen,\n                [active]: isActive\n            });\n        }\n\n        const handleCheckboxChange = (item: TreeItem<T>) => {\n            const { toggleItemSelection } = props;\n            return toggleItemSelection\n                ? (event: React.MouseEvent<HTMLElement>) => {\n                    event.stopPropagation();\n                    toggleItemSelection(event, item);\n                }\n                : undefined;\n        }\n\n        const handleToggleItemOpen = (item: TreeItem<T>, event: React.MouseEvent<HTMLElement>) => {\n            event.stopPropagation();\n            props.toggleItemOpen(event, item);\n        }\n\n        // Scroll to selected item whenever it changes, accepts selectedRef from props for recursive trees\n        const [cachedSelectedRef, setCachedRef] = useState<HTMLDivElement | null>(null)\n        const scrollToNode = useCallback((node: HTMLDivElement | null) => {\n            if (node && node.scrollIntoView && node !== cachedSelectedRef) {\n                node.scrollIntoView({ behavior: \"smooth\", block: \"center\" });\n            }\n            setCachedRef(node);\n        }, [cachedSelectedRef])\n        const selectedRef = props.selectedRef || scrollToNode;\n\n        const { levelIndentation = 20, itemRightPadding = 20 } = props;\n        return <List className={list}>\n            {items && items.map((it: TreeItem<T>, idx: number) => {\n                if (isInFavoritesTree(it) && it.open === true && it.items && it.items.length) {\n                    it = { ...it, items: it.items.filter(item => item.depth && item.depth < 3) }\n                }\n                return <div data-cy=\"tree-top-level-item\" key={`item/${level}/${it.id}`}>\n                    <ListItem button className={listItem}\n                        data-cy=\"tree-li\"\n                        style={{\n                            paddingLeft: (level + 1) * levelIndentation,\n                            paddingRight: itemRightPadding,\n                        }}\n                        disableRipple={true}\n                        onClick={event => toggleItemActive(event, it)}\n                        selected={showSelection(it) && it.id === currentItemUuid}\n                        onContextMenu={(event) => props.onContextMenu(event, it)}>\n                        {it.status === TreeItemStatus.PENDING ?\n                            <CircularProgress size={10} className={loader} /> : null}\n                        <i onClick={(e) => handleToggleItemOpen(it, e)}\n                            className={toggableIconContainer}>\n                            <ListItemIcon className={getToggableIconClassNames(it.open, it.active)}\n                                data-cy={`tree-item-toggle-${kebabCase(it.id.toString())}`}\n                                >\n                                {getProperArrowAnimation(it.status, it.items!)}\n                            </ListItemIcon>\n                        </i>\n                        {showSelection(it) && !useRadioButtons &&\n                            <Checkbox\n                                checked={it.selected}\n                                indeterminate={!it.selected && it.indeterminate}\n                                className={classes.checkbox}\n                                color=\"primary\"\n                                onClick={handleCheckboxChange(it)} />}\n                        {showSelection(it) && useRadioButtons &&\n                            <Radio\n                                checked={it.selected}\n                                className={classes.checkbox}\n                                color=\"primary\" />}\n                        <div className={renderContainer} data-active={it.active} ref={!!it.active ? selectedRef : undefined}>\n                            {render(it, level)}\n                        </div>\n                    </ListItem>\n                    {\n                        it.open && it.items && it.items.length > 0 &&\n                            it.flatTree ?\n                            <FlatTree\n                                it={it}\n                                itemsMap={itemsMap}\n                                showSelection={showSelection}\n                                classes={props.classes}\n                                useRadioButtons={useRadioButtons}\n                                levelIndentation={levelIndentation}\n                                handleCheckboxChange={handleCheckboxChange}\n                                onContextMenu={props.onContextMenu}\n                                handleToggleItemOpen={handleToggleItemOpen}\n                                toggleItemActive={props.toggleItemActive}\n                                getToggableIconClassNames={getToggableIconClassNames}\n                                getProperArrowAnimation={getProperArrowAnimation}\n                                selectedRef={selectedRef}\n                            /> :\n                            <Collapse in={it.open} timeout=\"auto\" unmountOnExit>\n                                <TreeComponent\n                                    tree={props.tree}\n                                    resources={props.resources}\n                                    showSelection={props.showSelection}\n                                    items={it.items}\n                                    render={render}\n                                    toggleItemOpen={toggleItemOpen}\n                                    toggleItemActive={toggleItemActive}\n                                    level={level + 1}\n                                    onContextMenu={props.onContextMenu}\n                                    toggleItemSelection={props.toggleItemSelection}\n                                    selectedRef={selectedRef}\n                                />\n                            </Collapse>\n                    }\n                </div>;\n            })}\n        </List>;\n    }, preventRerender)\n);\n\n// return true to prevent re-render, false to allow re-render\nfunction preventRerender<T>(prevProps: TreeProps<T>, nextProps: TreeProps<T>) {\n    if(prevProps.treePicker !== nextProps.treePicker) return false;\n    if (haveResourcesUpdated(nextProps)) return false;\n    if(!!nextProps.items && !isEqual(prevProps.items, nextProps.items)) return false;\n    return true;\n}\n\n// we don't want to update on every resource update, just the resources that are already in the tree\nfunction haveResourcesUpdated<T>(nextProps: TreeProps<T>) {\n    const { treePicker, pickerId, resources = {} } = nextProps;\n    const nextTreeItems = treePicker && pickerId && treePicker[pickerId];\n    if (nextTreeItems) {\n        for (const id in nextTreeItems) {\n            if (resources[id] && !isEqual(resources[id], nextTreeItems[id].value)) {\n                return true;\n            }\n        }\n    }\n    return false;\n}\n"
  },
  {
    "path": "services/workbench2/src/components/tree/virtual-tree.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport classnames from \"classnames\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ReactElement } from \"react\";\nimport { FixedSizeList, ListChildComponentProps } from \"react-window\";\nimport AutoSizer from \"react-virtualized-auto-sizer\";\n\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { TreeItem, TreeProps, TreeItemStatus } from './tree';\nimport { ListItem, Radio, Checkbox, CircularProgress, ListItemIcon } from '@mui/material';\nimport { SidePanelRightArrowIcon } from '../icon/icon';\n\ntype CssRules = 'list'\n    | 'listItem'\n    | 'active'\n    | 'loader'\n    | 'toggableIconContainer'\n    | 'iconClose'\n    | 'renderContainer'\n    | 'iconOpen'\n    | 'toggableIcon'\n    | 'checkbox'\n    | 'virtualFileTree'\n    | 'virtualizedList';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    list: {\n        padding: '3px 0px',\n    },\n    virtualFileTree: {\n        \"&:last-child\": {\n            paddingBottom: 20\n          }\n    },\n    virtualizedList: {\n        height: '200px',\n    },\n    listItem: {\n        padding: '3px 0px',\n    },\n    loader: {\n        position: 'absolute',\n        transform: 'translate(0px)',\n        top: '3px'\n    },\n    toggableIconContainer: {\n        color: theme.palette.grey[\"700\"],\n        height: '14px',\n        width: '14px',\n    },\n    toggableIcon: {\n        fontSize: '14px'\n    },\n    renderContainer: {\n        flex: 1\n    },\n    active: {\n        color: theme.palette.primary.main,\n    },\n    iconClose: {\n        transition: 'all 0.1s ease',\n    },\n    iconOpen: {\n        transition: 'all 0.1s ease',\n        transform: 'rotate(90deg)',\n    },\n    checkbox: {\n        width: theme.spacing(3),\n        height: theme.spacing(3),\n        margin: `0 ${theme.spacing(1)}`,\n        padding: 0,\n        color: theme.palette.grey[\"500\"],\n    }\n});\n\nexport interface VirtualTreeItem<T> extends TreeItem<T> {\n    itemCount?: number;\n    level?: number;\n}\n\n// For some reason, on TSX files it isn't accepted just one generic param, so\n// I'm using <T, _> as a workaround.\n// eslint-disable-next-line\nexport const Row =  <T, _>(itemList: VirtualTreeItem<T>[], render: any, treeProps: TreeProps<T>) => withStyles(styles)(\n    (props: React.PropsWithChildren<ListChildComponentProps> & WithStyles<CssRules>) => {\n        const { index, style, classes } = props;\n        const it = itemList[index];\n        const level = it.level || 0;\n        const { toggleItemActive, currentItemUuid, useRadioButtons } = treeProps;\n        const { listItem, loader, toggableIconContainer, renderContainer, virtualFileTree } = classes;\n        const { levelIndentation = 20, itemRightPadding = 20 } = treeProps;\n\n        const showSelection = typeof treeProps.showSelection === 'function'\n            ? treeProps.showSelection\n            : () => treeProps.showSelection ? true : false;\n\n        const handleRowContextMenu = (item: VirtualTreeItem<T>) =>\n            (event: React.MouseEvent<HTMLElement>) => {\n                treeProps.onContextMenu(event, item);\n            };\n\n        const handleToggleItemOpen = (item: VirtualTreeItem<T>) =>\n            (event: React.MouseEvent<HTMLElement>) => {\n                event.stopPropagation();\n                treeProps.toggleItemOpen(event, item);\n            };\n\n        const getToggableIconClassNames = (isOpen?: boolean, isActive?: boolean) => {\n            const { iconOpen, iconClose, active, toggableIcon } = props.classes;\n            return classnames(toggableIcon, {\n                [iconOpen]: isOpen,\n                [iconClose]: !isOpen,\n                [active]: isActive\n            });\n        };\n\n        const isSidePanelIconNotNeeded = (status: string, itemCount: number) => {\n            return status === TreeItemStatus.PENDING ||\n                (status === TreeItemStatus.LOADED && itemCount === 0);\n        };\n\n        const getProperArrowAnimation = (status: string, itemCount: number) => {\n            return isSidePanelIconNotNeeded(status, itemCount) ? <span /> : <SidePanelRightArrowIcon style={{ fontSize: '14px' }} />;\n        };\n\n        const handleCheckboxChange = (item: VirtualTreeItem<T>) => {\n            const { toggleItemSelection } = treeProps;\n            return toggleItemSelection\n                ? (event: React.MouseEvent<HTMLElement>) => {\n                    event.stopPropagation();\n                    toggleItemSelection(event, item);\n                }\n                : undefined;\n        };\n\n        return <div className={virtualFileTree} data-cy='virtual-file-tree' style={style}>\n            <ListItem button className={listItem}\n                style={{\n                    paddingLeft: (level + 1) * levelIndentation,\n                    paddingRight: itemRightPadding,\n                }}\n                disableRipple={true}\n                onClick={event => toggleItemActive(event, it)}\n                selected={showSelection(it) && it.id === currentItemUuid}\n                onContextMenu={handleRowContextMenu(it)}>\n                {it.status === TreeItemStatus.PENDING ?\n                    <CircularProgress size={10} className={loader} /> : null}\n                <i onClick={handleToggleItemOpen(it)}\n                    className={toggableIconContainer}>\n                    <ListItemIcon className={getToggableIconClassNames(it.open, it.active)}>\n                        {getProperArrowAnimation(it.status, it.itemCount!)}\n                    </ListItemIcon>\n                </i>\n                {showSelection(it) && !useRadioButtons &&\n                    <Checkbox\n                        checked={it.selected}\n                        className={classes.checkbox}\n                        color=\"primary\"\n                        onClick={handleCheckboxChange(it)} />}\n                {showSelection(it) && useRadioButtons &&\n                    <Radio\n                        checked={it.selected}\n                        className={classes.checkbox}\n                        color=\"primary\" />}\n                <div className={renderContainer}>\n                    {render(it, level)}\n                </div>\n            </ListItem>\n        </div>;\n    });\n\nconst itemSize = 30;\n\n// eslint-disable-next-line\nexport const VirtualList = <T, _>(height: number, width: number, items: VirtualTreeItem<T>[], render: any, treeProps: TreeProps<T>) =>\n    <FixedSizeList\n        height={height}\n        itemCount={items.length}\n        itemSize={itemSize}\n        width={width}\n    >\n        {Row(items, render, treeProps)}\n    </FixedSizeList>;\n\nexport const VirtualTree = withStyles(styles)(\n    class Component<T> extends React.Component<TreeProps<T> & WithStyles<CssRules>, {}> {\n        render(): ReactElement<any> {\n            const { items, render } = this.props;\n            return <AutoSizer>\n                {({ height, width }) => {\n                    return VirtualList(height, width, items || [], render, this.props);\n                }}\n            </AutoSizer>;\n        }\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/components/warning/warning.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { ErrorIcon } from \"components/icon/icon\";\nimport { Tooltip } from \"@mui/material\";\nimport { disallowSlash } from \"validators/valid-name\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\n\ninterface WarningComponentProps {\n    text: string;\n    rules: RegExp[];\n    message: string;\n}\n\nexport const WarningComponent = ({ text, rules, message }: WarningComponentProps) =>\n    !text ? <Tooltip title={\"No name\"}><ErrorIcon /></Tooltip>\n        : (rules.find(aRule => text.match(aRule) !== null)\n            ? message\n                ? <Tooltip title={message}><ErrorIcon /></Tooltip>\n                : <ErrorIcon />\n            : null);\n\ninterface IllegalNamingWarningProps {\n    name: string;\n    forwardSlashNameSubstitution: string;\n}\n\n\nexport const IllegalNamingWarning = connect(\n    (state: RootState) => {\n        return { forwardSlashNameSubstitution: state.auth.config.clusterConfig.Collections.ForwardSlashNameSubstitution };\n    })(({ name, forwardSlashNameSubstitution }: IllegalNamingWarningProps) =>\n        <WarningComponent\n            text={name} rules={forwardSlashNameSubstitution === \"\" ? [disallowSlash] : []}\n            message=\"Names embedding '/' will be renamed or invisible to file system access (arv-mount or WebDAV)\" />);\n"
  },
  {
    "path": "services/workbench2/src/components/warning-collection/warning-collection.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { WarningIcon } from \"components/icon/icon\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { DialogContentText } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\n\ntype CssRules = 'container' | 'text';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    container: {\n        display: 'flex',\n        alignItems: 'center',\n    },\n    text: {\n        paddingLeft: '8px'\n    }\n});\n\ninterface WarningCollectionProps {\n    text: string;\n}\n\nexport const WarningCollection = withStyles(styles)(({ classes, text }: WarningCollectionProps & WithStyles<CssRules>) =>\n    <span className={classes.container}>\n        <WarningIcon />\n        <DialogContentText className={classes.text}>{text}</DialogContentText>\n    </span>);"
  },
  {
    "path": "services/workbench2/src/components/workflow-inputs-form/validators.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CommandInputParameter } from 'models/workflow';\nimport { fieldRequire } from 'validators/require';\nimport { CWLType } from '../../models/workflow';\n\n\nconst alwaysValid = () => undefined;\n\nexport const required = ({ type }: CommandInputParameter) => {\n    if (type instanceof Array) {\n        for (const t of type) {\n            if (t === CWLType.NULL) {\n                return alwaysValid;\n            }\n        }\n    }\n    return fieldRequire;\n};\n"
  },
  {
    "path": "services/workbench2/src/components/workflow-inputs-form/workflow-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CommandInputParameter } from 'models/workflow';\nimport { TextField } from '@mui/material';\nimport { required } from 'components/workflow-inputs-form/validators';\n\nexport interface WorkflowInputProps {\n    input: CommandInputParameter;\n}\nexport const WorkflowInput = ({ input }: WorkflowInputProps) =>\n    <TextField\n        variant=\"standard\"\n        label={`${input.label || input.id}${required(input)() ? '*' : ''}`}\n        name={input.id}\n        helperText={input.doc}\n        fullWidth />;"
  },
  {
    "path": "services/workbench2/src/cypress/mocks/service-provider.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nconst mockServices = {\n    collectionService: {\n        files: ()=>({ then: (callback) => callback([{ name: 'banner.html' }]) }),\n        getFileContents: ()=>({ then: (callback) => callback('<h1>Test banner message</h1>') }),\n    },\n};\n\nconst serviceProvider = {\n    getServices: () => {\n        return mockServices\n    },\n}\n\nexport default serviceProvider;"
  },
  {
    "path": "services/workbench2/src/cypress/utils/contains-action-subset.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuAction } from \"../../views-components/context-menu/context-menu-action-set\";\n\nexport const containsActionSubSet = (mainSet: ContextMenuAction[], subSet: ContextMenuAction[]) => {\n    const mainNames = mainSet.map(action => action.name)\n    const subNames = subSet.map(action => action.name)\n    return subNames.every(name => mainNames.includes(name));\n}\n"
  },
  {
    "path": "services/workbench2/src/index.css",
    "content": "body {\n    margin: 0;\n    padding: 0;\n    font-family: 'Roboto', \"Helvetica\", \"Arial\", sans-serif;\n    width: 100vw;\n    height: 100vh;\n}\n\n.app-banner {\n    width: calc(100% - 2rem);\n    height: 150px;\n    z-index: 11111;\n    position: fixed;\n    top: 0px;\n    background-color: #00bfa5;\n    border: 1px solid #01685a;\n    color: #ffffff;\n    margin: 1rem;\n    box-sizing: border-box;\n    cursor: pointer;\n}\n\n.app-banner span {\n    font-size: 2rem;\n    text-align: center;\n    display: block;\n    margin: auto;\n    padding: 2rem;\n}"
  },
  {
    "path": "services/workbench2/src/index.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport ReactDOM from \"react-dom\";\nimport { Provider } from \"react-redux\";\nimport { MainPanel } from \"views/main-panel/main-panel\";\nimport \"index.css\";\nimport { Route, Switch } from \"react-router\";\nimport { createBrowserHistory } from \"history\";\nimport { History } from \"history\";\nimport { configureStore, RootStore } from \"store/store\";\nimport { ConnectedRouter } from \"connected-react-router\";\nimport { ApiToken } from \"views-components/api-token/api-token\";\nimport { AddSession } from \"views-components/add-session/add-session\";\nimport { initAuth, logout } from \"store/auth/auth-action\";\nimport { createServices } from \"services/services\";\nimport { ThemeProvider, Theme, StyledEngineProvider } from \"@mui/material/styles\";\nimport { CustomTheme } from \"common/custom-theme\";\nimport { fetchConfig } from \"common/config\";\nimport servicesProvider from \"common/service-provider\";\nimport { addMenuActionSet } from \"common/menu-action-set-actions\";\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { rootProjectActionSet } from \"views-components/context-menu/action-sets/root-project-action-set\";\nimport {\n    filterGroupActionSet,\n    frozenActionSet,\n    projectActionSet,\n    readOnlyProjectActionSet,\n    writeableProjectActionSet,\n    manageableProjectActionSet,\n    frozenManageableProjectActionSet,\n} from \"views-components/context-menu/action-sets/project-action-set\";\nimport { resourceActionSet } from \"views-components/context-menu/action-sets/resource-action-set\";\nimport { favoriteActionSet } from \"views-components/context-menu/action-sets/favorite-action-set\";\nimport {\n    collectionFilesActionSet,\n    collectionFilesMultipleActionSet,\n    readOnlyCollectionFilesActionSet,\n    readOnlyCollectionFilesMultipleActionSet,\n} from \"views-components/context-menu/action-sets/collection-files-action-set\";\nimport {\n    collectionDirectoryItemActionSet,\n    collectionFileItemActionSet,\n    readOnlyCollectionDirectoryItemActionSet,\n    readOnlyCollectionFileItemActionSet,\n} from \"views-components/context-menu/action-sets/collection-files-item-action-set\";\nimport { collectionFilesNotSelectedActionSet } from \"views-components/context-menu/action-sets/collection-files-not-selected-action-set\";\nimport {\n    collectionActionSet,\n    collectionAdminActionSet,\n    oldCollectionVersionActionSet,\n    readOnlyCollectionActionSet,\n    writeableCollectionSet,\n} from \"views-components/context-menu/action-sets/collection-action-set\";\nimport { loadWorkbench } from \"store/workbench/workbench-actions\";\nimport { Routes } from \"routes/routes\";\nimport { trashActionSet } from \"views-components/context-menu/action-sets/trash-action-set\";\nimport { ServiceRepository } from \"services/services\";\nimport { initWebSocket } from \"websocket/websocket\";\nimport { Config } from \"common/config\";\nimport { addRouteChangeHandlers } from \"./routes/route-change-handlers\";\nimport { setTokenDialogApiHost } from \"store/token-dialog/token-dialog-actions\";\nimport {\n    processResourceActionSet,\n    runningProcessResourceActionSet,\n    processResourceAdminActionSet,\n    runningProcessResourceAdminActionSet,\n    readOnlyProcessResourceActionSet,\n} from \"views-components/context-menu/action-sets/process-resource-action-set\";\nimport { trashedCollectionActionSet } from \"views-components/context-menu/action-sets/trashed-collection-action-set\";\nimport { setBuildInfo } from \"store/app-info/app-info-actions\";\nimport { getBuildInfo } from \"common/app-info\";\nimport { DragDropContextProvider } from \"react-dnd\";\nimport HTML5Backend from \"react-dnd-html5-backend\";\nimport { initAdvancedFormProjectsTree } from \"store/search-bar/search-bar-actions\";\nimport { repositoryActionSet } from \"views-components/context-menu/action-sets/repository-action-set\";\nimport { sshKeyActionSet } from \"views-components/context-menu/action-sets/ssh-key-action-set\";\nimport { keepServiceActionSet } from \"views-components/context-menu/action-sets/keep-service-action-set\";\nimport { loadVocabulary } from \"store/vocabulary/vocabulary-actions\";\nimport { virtualMachineActionSet } from \"views-components/context-menu/action-sets/virtual-machine-action-set\";\nimport { userActionSet } from \"views-components/context-menu/action-sets/user-action-set\";\nimport { UserDetailsActionSet } from \"views-components/context-menu/action-sets/user-details-action-set\";\nimport { apiClientAuthorizationActionSet } from \"views-components/context-menu/action-sets/api-client-authorization-action-set\";\nimport { groupActionSet, builtInGroupActionSet } from \"views-components/context-menu/action-sets/group-action-set\";\nimport { groupMemberActionSet } from \"views-components/context-menu/action-sets/group-member-action-set\";\nimport { linkActionSet } from \"views-components/context-menu/action-sets/link-action-set\";\nimport { loadFileViewersConfig } from \"store/file-viewers/file-viewers-actions\";\nimport {\n    filterGroupAdminActionSet,\n    frozenAdminActionSet,\n    projectAdminActionSet,\n} from \"views-components/context-menu/action-sets/project-admin-action-set\";\nimport { permissionEditActionSet } from \"views-components/context-menu/action-sets/permission-edit-action-set\";\nimport { workflowActionSet, readOnlyWorkflowActionSet } from \"views-components/context-menu/action-sets/workflow-action-set\";\nimport { storeRedirects } from \"./common/redirect-to\";\nimport { searchResultsActionSet } from \"views-components/context-menu/action-sets/search-results-action-set\";\nimport { externalCredentialActionSet } from \"views-components/context-menu/action-sets/external-credential-action-set\";\n\nimport 'bootstrap/dist/css/bootstrap.min.css';\nimport '@coreui/coreui/dist/css/coreui.min.css';\nimport 'react-loading-skeleton/dist/skeleton.css';\n\n\ndeclare module '@mui/styles/defaultTheme' {\n  // eslint-disable-next-line @typescript-eslint/no-empty-interface\n  interface DefaultTheme extends Theme {}\n}\n\n\nconsole.log(`Starting arvados [${getBuildInfo()}]`);\n\naddMenuActionSet(ContextMenuKind.ROOT_PROJECT, rootProjectActionSet);\naddMenuActionSet(ContextMenuKind.PROJECT, projectActionSet);\naddMenuActionSet(ContextMenuKind.READONLY_PROJECT, readOnlyProjectActionSet);\naddMenuActionSet(ContextMenuKind.FILTER_GROUP, filterGroupActionSet);\naddMenuActionSet(ContextMenuKind.RESOURCE, resourceActionSet);\naddMenuActionSet(ContextMenuKind.FAVORITE, favoriteActionSet);\naddMenuActionSet(ContextMenuKind.COLLECTION_FILES, collectionFilesActionSet);\naddMenuActionSet(ContextMenuKind.COLLECTION_FILES_MULTIPLE, collectionFilesMultipleActionSet);\naddMenuActionSet(ContextMenuKind.READONLY_COLLECTION_FILES, readOnlyCollectionFilesActionSet);\naddMenuActionSet(ContextMenuKind.READONLY_COLLECTION_FILES_MULTIPLE, readOnlyCollectionFilesMultipleActionSet);\naddMenuActionSet(ContextMenuKind.COLLECTION_FILES_NOT_SELECTED, collectionFilesNotSelectedActionSet);\naddMenuActionSet(ContextMenuKind.COLLECTION_DIRECTORY_ITEM, collectionDirectoryItemActionSet);\naddMenuActionSet(ContextMenuKind.READONLY_COLLECTION_DIRECTORY_ITEM, readOnlyCollectionDirectoryItemActionSet);\naddMenuActionSet(ContextMenuKind.COLLECTION_FILE_ITEM, collectionFileItemActionSet);\naddMenuActionSet(ContextMenuKind.READONLY_COLLECTION_FILE_ITEM, readOnlyCollectionFileItemActionSet);\naddMenuActionSet(ContextMenuKind.COLLECTION, collectionActionSet);\naddMenuActionSet(ContextMenuKind.READONLY_COLLECTION, readOnlyCollectionActionSet);\naddMenuActionSet(ContextMenuKind.OLD_VERSION_COLLECTION, oldCollectionVersionActionSet);\naddMenuActionSet(ContextMenuKind.TRASHED_COLLECTION, trashedCollectionActionSet);\naddMenuActionSet(ContextMenuKind.PROCESS_RESOURCE, processResourceActionSet);\naddMenuActionSet(ContextMenuKind.RUNNING_PROCESS_RESOURCE, runningProcessResourceActionSet);\naddMenuActionSet(ContextMenuKind.READONLY_PROCESS_RESOURCE, readOnlyProcessResourceActionSet);\naddMenuActionSet(ContextMenuKind.TRASH, trashActionSet);\naddMenuActionSet(ContextMenuKind.REPOSITORY, repositoryActionSet);\naddMenuActionSet(ContextMenuKind.SSH_KEY, sshKeyActionSet);\naddMenuActionSet(ContextMenuKind.VIRTUAL_MACHINE, virtualMachineActionSet);\naddMenuActionSet(ContextMenuKind.KEEP_SERVICE, keepServiceActionSet);\naddMenuActionSet(ContextMenuKind.USER, userActionSet);\naddMenuActionSet(ContextMenuKind.USER_DETAILS, UserDetailsActionSet);\naddMenuActionSet(ContextMenuKind.LINK, linkActionSet);\naddMenuActionSet(ContextMenuKind.API_CLIENT_AUTHORIZATION, apiClientAuthorizationActionSet);\naddMenuActionSet(ContextMenuKind.GROUPS, groupActionSet);\naddMenuActionSet(ContextMenuKind.BUILT_IN_GROUP, builtInGroupActionSet);\naddMenuActionSet(ContextMenuKind.GROUP_MEMBER, groupMemberActionSet);\naddMenuActionSet(ContextMenuKind.COLLECTION_ADMIN, collectionAdminActionSet);\naddMenuActionSet(ContextMenuKind.PROCESS_ADMIN, processResourceAdminActionSet);\naddMenuActionSet(ContextMenuKind.RUNNING_PROCESS_ADMIN, runningProcessResourceAdminActionSet);\naddMenuActionSet(ContextMenuKind.PROJECT_ADMIN, projectAdminActionSet);\naddMenuActionSet(ContextMenuKind.FROZEN_PROJECT, frozenActionSet);\naddMenuActionSet(ContextMenuKind.FROZEN_PROJECT_ADMIN, frozenAdminActionSet);\naddMenuActionSet(ContextMenuKind.FILTER_GROUP_ADMIN, filterGroupAdminActionSet);\naddMenuActionSet(ContextMenuKind.PERMISSION_EDIT, permissionEditActionSet);\naddMenuActionSet(ContextMenuKind.READONLY_WORKFLOW, readOnlyWorkflowActionSet);\naddMenuActionSet(ContextMenuKind.WORKFLOW, workflowActionSet);\naddMenuActionSet(ContextMenuKind.SEARCH_RESULTS, searchResultsActionSet);\naddMenuActionSet(ContextMenuKind.WRITEABLE_PROJECT, writeableProjectActionSet);\naddMenuActionSet(ContextMenuKind.WRITEABLE_COLLECTION, writeableCollectionSet);\naddMenuActionSet(ContextMenuKind.MANAGEABLE_PROJECT, manageableProjectActionSet);\naddMenuActionSet(ContextMenuKind.FROZEN_MANAGEABLE_PROJECT, frozenManageableProjectActionSet);\naddMenuActionSet(ContextMenuKind.EXTERNAL_CREDENTIAL, externalCredentialActionSet);\n\nstoreRedirects();\n\nfetchConfig().then(({ config, apiHost }) => {\n    const history = createBrowserHistory();\n\n    // Provide browser's history access to Cypress to allow programmatic\n    // navigation.\n    if ((window as any).Cypress) {\n        (window as any).appHistory = history;\n    }\n\n    const services = createServices(config, {\n        progressFn: (id, working) => {\n        },\n        errorFn: (id, error, showSnackBar: boolean) => {\n            if (showSnackBar) {\n                console.error(\"Backend error:\", error);\n                if (error.status === 401 && error.errors[0].indexOf(\"Not logged in\") > -1) {\n                    // Catch auth errors when navigating and redirect to login preserving url location\n                    store.dispatch(logout(false, true));\n                }\n            }\n        },\n    });\n\n    // be sure this is initiated before the app starts\n    servicesProvider.setServices(services);\n\n    const store = configureStore(history, services, config);\n\n    servicesProvider.setStore(store);\n\n    store.subscribe(initListener(history, store, services, config));\n    store.dispatch(initAuth(config));\n    store.dispatch(setBuildInfo());\n    store.dispatch(setTokenDialogApiHost(apiHost));\n    store.dispatch(loadVocabulary);\n    store.dispatch(loadFileViewersConfig);\n\n    const TokenComponent = (props: any) => (\n        <ApiToken\n            authService={services.authService}\n            config={config}\n            loadMainApp={true}\n            {...props}\n        />\n    );\n    const AddSessionComponent = (props: any) => <AddSession {...props} />;\n    const FedTokenComponent = (props: any) => (\n        <ApiToken\n            authService={services.authService}\n            config={config}\n            loadMainApp={false}\n            {...props}\n        />\n    );\n    const MainPanelComponent = (props: any) => <MainPanel {...props} />;\n\n    const App = () => (\n        <StyledEngineProvider injectFirst>\n            <ThemeProvider theme={CustomTheme}>\n                <DragDropContextProvider backend={HTML5Backend}>\n                    <Provider store={store}>\n                        <ConnectedRouter history={history}>\n                            <Switch>\n                                <Route\n                                    path={Routes.TOKEN}\n                                    component={TokenComponent}\n                                />\n                                <Route\n                                    path={Routes.FED_LOGIN}\n                                    component={FedTokenComponent}\n                                />\n                                <Route\n                                    path={Routes.ADD_SESSION}\n                                    component={AddSessionComponent}\n                                />\n                                <Route\n                                    path={Routes.ROOT}\n                                    component={MainPanelComponent}\n                                />\n                            </Switch>\n                        </ConnectedRouter>\n                    </Provider>\n                </DragDropContextProvider>\n            </ThemeProvider>\n        </StyledEngineProvider>\n    );\n\n    ReactDOM.render(<App />, document.getElementById(\"root\") as HTMLElement);\n}).catch((e) => {\n    console.error(`Fatal error: ${e.message}`);\n});\n\nconst initListener = (history: History, store: RootStore, services: ServiceRepository, config: Config) => {\n    let initialized = false;\n    return async () => {\n        const { router, auth } = store.getState();\n        if (router.location && auth.user && services.authService.getApiToken() && !initialized) {\n            initialized = true;\n            initWebSocket(config, services.authService, store);\n            await store.dispatch(loadWorkbench());\n            addRouteChangeHandlers(history, store);\n            // ToDo: move to searchBar component\n            store.dispatch(initAdvancedFormProjectsTree());\n            //expose store for cypress tests\n            if ((window as any).Cypress) {\n                console.log(\"setting redux store to localstorage\");\n                window.localStorage.setItem(\"arvadosStore\", JSON.stringify(store.getState()));\n                (window as any).store = store;\n                store.subscribe(() => {\n                    window.localStorage.setItem(\"arvadosStore\", JSON.stringify(store.getState()));\n                });\n            }\n        }\n    };\n};\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/assets/styles/_variables.scss",
    "content": "\n// Colors\n$color-primary: #11a7a7 !default;\n$color-neutral: rgb(154, 154, 154) !default;\n$background-color: white !default;\n\n// Workflow\n$background: $background-color !default;\n\n// Fonts\n$font-color: #333 !default;\n$text-stroke: #fff !default;\n$font-family: sans-serif !default;\n\n// Labels\n$label-stroke-color: $background-color !default;\n$label-stroke-width: 4px !default;\n\n// Edges\n$edge-inner-color: #222 !default;\n$edge-outer-color: #fff !default;\n$edge-inner-hover-color: $color-primary !default;\n$edge-inner-stroke-width: 2px !default;\n$edge-inner-stroke-color: $color-neutral !default;\n$edge-outer-stroke-width: 5px !default;\n$edge-outer-stroke-color: $background-color !default;\n\n// Nodes\n$node-outer-fill-color: $background-color !default;\n$node-outer-stroke-color: $color-neutral !default;\n$node-outer-stroke-width: 2px !default;\n\n$node-input-fill-color: #c3c3c3 !default;\n$node-output-fill-color: #c3c3c3 !default;\n$node-step-fill-color: $color-primary !default;\n\n$node-hover-port-transition: all .1s !default;\n\n// Node Icons\n$node-icon-fill-color: $font-color !default;\n$node-icon-stroke-color: $font-color !default;\n$node-icon-stroke-width: 3px !default;\n\n// Ports\n$port-fill-color: $color-neutral !default;\n$port-hover-stroke-color: darken($port-fill-color, 20%) !default;\n$port-hover-stroke-width: 2px !default;\n$port-label-color: $font-color !default;\n$port-label-size: .9em !default;\n\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/assets/styles/style.css",
    "content": "svg.cwl-workflow {\n  background: white;\n  color: #333;\n  font-family: sans-serif;\n  padding: 0;\n  width: 100%;\n  display: block;\n  transform: translateZ(0); }\n  svg.cwl-workflow [tabindex]:active, svg.cwl-workflow [tabindex]:focus {\n    outline: none; }\n  svg.cwl-workflow .hidden {\n    display: none; }\n  svg.cwl-workflow .workflow {\n    user-select: none; }\n  svg.cwl-workflow .pan-handle {\n    fill: transparent; }\n  svg.cwl-workflow .label {\n    fill: #333;\n    stroke: white;\n    stroke-width: 4px;\n    text-anchor: middle;\n    paint-order: stroke;\n    stroke-linecap: butt;\n    stroke-linejoin: miter; }\n  svg.cwl-workflow .node-icon {\n    fill: #333;\n    stroke: #333;\n    stroke-width: 3px;\n    stroke-linecap: round; }\n  svg.cwl-workflow .node .outer {\n    fill: white;\n    stroke: #9a9a9a;\n    stroke-width: 2px; }\n  svg.cwl-workflow .node .inner {\n    stroke: 0; }\n  svg.cwl-workflow .node.input .inner {\n    fill: #c3c3c3; }\n  svg.cwl-workflow .node.output .inner {\n    fill: #c3c3c3; }\n  svg.cwl-workflow .node.step .inner {\n    fill: #11a7a7; }\n  svg.cwl-workflow .node .core .inner,\n  svg.cwl-workflow .node .core .node-icon {\n    pointer-events: none; }\n  svg.cwl-workflow .node:hover .port .label {\n    transition: all 0.1s;\n    opacity: 1; }\n  svg.cwl-workflow .node .port {\n    fill: #9a9a9a; }\n    svg.cwl-workflow .node .port:hover {\n      stroke: #676767;\n      stroke-width: 2px; }\n    svg.cwl-workflow .node .port.output-port .label {\n      text-anchor: start;\n      transform: translate(10px, 0); }\n    svg.cwl-workflow .node .port.input-port .label {\n      text-anchor: end;\n      transform: translate(-10px, 0); }\n    svg.cwl-workflow .node .port .label {\n      fill: #333;\n      opacity: 0;\n      font-size: .9em;\n      user-select: none;\n      transition: all .1s;\n      pointer-events: none;\n      alignment-baseline: middle; }\n  svg.cwl-workflow .edge:hover .inner {\n    stroke: #11a7a7; }\n  svg.cwl-workflow .edge .inner, svg.cwl-workflow .edge .outer {\n    fill: none;\n    stroke-linecap: round; }\n  svg.cwl-workflow .edge .inner {\n    stroke-width: 2px;\n    stroke: #9a9a9a; }\n  svg.cwl-workflow .edge .outer {\n    stroke-width: 5px;\n    stroke: white; }\n  svg.cwl-workflow .unselectable {\n    user-select: none;\n    cursor: pointer; }\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/assets/styles/style.scss",
    "content": "@import \"variables\";\nsvg.cwl-workflow {\n\n  background: $background;\n  color: $font-color;\n  font-family: $font-family;\n\n  padding: 0;\n  width: 100%;\n  display: block;\n  transform: translateZ(0);\n\n  // We will add tabindex to some elements because they should be focusable, but style should not change\n  [tabindex]:active, [tabindex]:focus {\n    outline: none;\n  }\n\n  .hidden {\n    display: none;\n  }\n\n  .workflow {\n    user-select: none;\n  }\n\n  .pan-handle {\n    // Cannot be “none” because it wouldn't have a clickable zone\n    fill: transparent;\n  }\n\n  .label {\n    fill: $font-color;\n    stroke: $label-stroke-color;\n    stroke-width: $label-stroke-width;\n\n    text-anchor: middle;\n    paint-order: stroke;\n    stroke-linecap: butt;\n    stroke-linejoin: miter;\n\n  }\n\n  .node-icon {\n    fill: $node-icon-fill-color;\n    stroke: $node-icon-stroke-color;\n    stroke-width: $node-icon-stroke-width;\n\n    stroke-linecap: round;\n  }\n\n  .node {\n\n    .outer {\n      fill: $node-outer-fill-color;\n      stroke: $node-outer-stroke-color;\n      stroke-width: $node-outer-stroke-width;\n    }\n\n    .inner {\n      stroke: 0;\n    }\n\n    &.input .inner {\n      fill: $node-input-fill-color;\n    }\n\n    &.output .inner {\n      fill: $node-output-fill-color;\n    }\n\n    &.step .inner {\n      fill: $node-step-fill-color;\n    }\n\n    // Prevent mouseenter/leave events when hovering over nested elements\n    // otherwise, cursor would change while moving the mouse through the node\n    .core {\n      .inner,\n      .node-icon {\n        pointer-events: none;\n      }\n    }\n\n    &:hover {\n      .port .label {\n        transition: $node-hover-port-transition;\n        opacity: 1;\n      }\n    }\n\n    .port {\n      fill: $port-fill-color;\n\n      &:hover {\n        stroke: $port-hover-stroke-color;\n        stroke-width: $port-hover-stroke-width;\n      }\n\n      &.output-port .label {\n        text-anchor: start;\n        transform: translate(10px, 0);\n      }\n\n      &.input-port .label {\n        text-anchor: end;\n        transform: translate(-10px, 0);\n      }\n\n      .label {\n        fill: $port-label-color;\n\n        opacity: 0;\n        font-size: .9em;\n        user-select: none;\n        transition: all .1s;\n        pointer-events: none;\n        alignment-baseline: middle;\n      }\n    }\n  }\n\n  .edge {\n\n    &:hover .inner {\n      stroke: $edge-inner-hover-color;\n    }\n\n    .inner, .outer {\n      fill: none;\n      stroke-linecap: round;\n    }\n\n    .inner {\n      stroke-width: $edge-inner-stroke-width;\n      stroke: $edge-inner-stroke-color;\n    }\n    .outer {\n      stroke-width: $edge-outer-stroke-width;\n      stroke: $edge-outer-stroke-color;\n    }\n\n  }\n\n  .unselectable {\n    user-select: none;\n    cursor: pointer;\n  }\n\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/assets/styles/theme.css",
    "content": "svg.cwl-workflow {\n  background: white;\n  color: #333;\n  font-family: sans-serif;\n  padding: 0;\n  width: 100%;\n  display: block;\n  transform: translateZ(0); }\n  svg.cwl-workflow [tabindex]:active, svg.cwl-workflow [tabindex]:focus {\n    outline: none; }\n  svg.cwl-workflow .hidden {\n    display: none; }\n  svg.cwl-workflow .workflow {\n    user-select: none; }\n  svg.cwl-workflow .pan-handle {\n    fill: transparent; }\n  svg.cwl-workflow .label {\n    fill: #333;\n    stroke: white;\n    stroke-width: 4px;\n    text-anchor: middle;\n    paint-order: stroke;\n    stroke-linecap: butt;\n    stroke-linejoin: miter; }\n  svg.cwl-workflow .node-icon {\n    fill: #333;\n    stroke: #333;\n    stroke-width: 3px;\n    stroke-linecap: round; }\n  svg.cwl-workflow .node .outer {\n    fill: white;\n    stroke: #9a9a9a;\n    stroke-width: 2px; }\n  svg.cwl-workflow .node .inner {\n    stroke: 0; }\n  svg.cwl-workflow .node.input .inner {\n    fill: #c3c3c3; }\n  svg.cwl-workflow .node.output .inner {\n    fill: #c3c3c3; }\n  svg.cwl-workflow .node.step .inner {\n    fill: #11a7a7; }\n  svg.cwl-workflow .node .core .inner,\n  svg.cwl-workflow .node .core .node-icon {\n    pointer-events: none; }\n  svg.cwl-workflow .node:hover .port .label {\n    transition: all 0.1s;\n    opacity: 1; }\n  svg.cwl-workflow .node .port {\n    fill: #9a9a9a; }\n    svg.cwl-workflow .node .port:hover {\n      stroke: #676767;\n      stroke-width: 2px; }\n    svg.cwl-workflow .node .port.output-port .label {\n      text-anchor: start;\n      transform: translate(10px, 0); }\n    svg.cwl-workflow .node .port.input-port .label {\n      text-anchor: end;\n      transform: translate(-10px, 0); }\n    svg.cwl-workflow .node .port .label {\n      fill: #333;\n      opacity: 0;\n      font-size: .9em;\n      user-select: none;\n      transition: all .1s;\n      pointer-events: none;\n      alignment-baseline: middle; }\n  svg.cwl-workflow .edge:hover .inner {\n    stroke: #11a7a7; }\n  svg.cwl-workflow .edge .inner, svg.cwl-workflow .edge .outer {\n    fill: none;\n    stroke-linecap: round; }\n  svg.cwl-workflow .edge .inner {\n    stroke-width: 2px;\n    stroke: #9a9a9a; }\n  svg.cwl-workflow .edge .outer {\n    stroke-width: 5px;\n    stroke: white; }\n  svg.cwl-workflow .unselectable {\n    user-select: none;\n    cursor: pointer; }\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/assets/styles/theme.scss",
    "content": "@import \"./variables.scss\";\n@import \"./style.scss\";\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/assets/styles/themes/rabix-dark/_variables.scss",
    "content": "\n\n// Colors\n$background-color: rgb(48, 48, 48) !default;\n\n// Fonts\n$font-color: white !default;\n\n// Edges\n$edge-outer-stroke-width: 7px !default;\n\n// Node Icons\n$node-icon-fill-color: $background-color !default;\n$node-icon-stroke-color: $background-color !default;\n\n// Ports\n$port-hover-stroke-color: white !default;\n$port-fill-color: rgb(195, 195, 195) !default;\n\n@import \"../../variables\";"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/assets/styles/themes/rabix-dark/theme.css",
    "content": "svg.cwl-workflow {\n  background: #303030;\n  color: white;\n  font-family: sans-serif;\n  padding: 0;\n  width: 100%;\n  display: block;\n  transform: translateZ(0); }\n  svg.cwl-workflow [tabindex]:active, svg.cwl-workflow [tabindex]:focus {\n    outline: none; }\n  svg.cwl-workflow .hidden {\n    display: none; }\n  svg.cwl-workflow .workflow {\n    user-select: none; }\n  svg.cwl-workflow .pan-handle {\n    fill: transparent; }\n  svg.cwl-workflow .label {\n    fill: white;\n    stroke: #303030;\n    stroke-width: 4px;\n    text-anchor: middle;\n    paint-order: stroke;\n    stroke-linecap: butt;\n    stroke-linejoin: miter; }\n  svg.cwl-workflow .node-icon {\n    fill: #303030;\n    stroke: #303030;\n    stroke-width: 3px;\n    stroke-linecap: round; }\n  svg.cwl-workflow .node .outer {\n    fill: #303030;\n    stroke: #9a9a9a;\n    stroke-width: 2px; }\n  svg.cwl-workflow .node .inner {\n    stroke: 0; }\n  svg.cwl-workflow .node.input .inner {\n    fill: #c3c3c3; }\n  svg.cwl-workflow .node.output .inner {\n    fill: #c3c3c3; }\n  svg.cwl-workflow .node.step .inner {\n    fill: #11a7a7; }\n  svg.cwl-workflow .node .core .inner,\n  svg.cwl-workflow .node .core .node-icon {\n    pointer-events: none; }\n  svg.cwl-workflow .node:hover .port .label {\n    transition: all 0.1s;\n    opacity: 1; }\n  svg.cwl-workflow .node .port {\n    fill: #c3c3c3; }\n    svg.cwl-workflow .node .port:hover {\n      stroke: white;\n      stroke-width: 2px; }\n    svg.cwl-workflow .node .port.output-port .label {\n      text-anchor: start;\n      transform: translate(10px, 0); }\n    svg.cwl-workflow .node .port.input-port .label {\n      text-anchor: end;\n      transform: translate(-10px, 0); }\n    svg.cwl-workflow .node .port .label {\n      fill: white;\n      opacity: 0;\n      font-size: .9em;\n      user-select: none;\n      transition: all .1s;\n      pointer-events: none;\n      alignment-baseline: middle; }\n  svg.cwl-workflow .edge:hover .inner {\n    stroke: #11a7a7; }\n  svg.cwl-workflow .edge .inner, svg.cwl-workflow .edge .outer {\n    fill: none;\n    stroke-linecap: round; }\n  svg.cwl-workflow .edge .inner {\n    stroke-width: 2px;\n    stroke: #9a9a9a; }\n  svg.cwl-workflow .edge .outer {\n    stroke-width: 7px;\n    stroke: #303030; }\n  svg.cwl-workflow .unselectable {\n    user-select: none;\n    cursor: pointer; }\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/assets/styles/themes/rabix-dark/theme.scss",
    "content": "@import \"variables\";\n@import \"../../style.scss\";\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/behaviors/edge-panning.ts",
    "content": "import {Workflow} from \"..\";\n\nexport class EdgePanner {\n\n\n    /** ID of the requested animation frame for panning */\n    private panAnimationFrame: any;\n\n    private workflow: Workflow;\n\n    private movementSpeed = 10;\n    private scrollMargin  = 100;\n\n    /**\n     * Current state of collision on both axes, each negative if beyond top/left border,\n     * positive if beyond right/bottom, zero if inside the viewport\n     */\n    private collision = {x: 0, y: 0};\n\n    private viewportClientRect: ClientRect;\n    private panningCallback = (sdx: number, sdy: number) => {};\n\n    constructor(workflow: Workflow, config = {\n        scrollMargin: 100,\n        movementSpeed: 10\n    }) {\n        const options = Object.assign({\n            scrollMargin: 100,\n            movementSpeed: 10\n        }, config);\n\n        this.workflow      = workflow;\n        this.scrollMargin  = options.scrollMargin;\n        this.movementSpeed = options.movementSpeed;\n\n        this.viewportClientRect = this.workflow.svgRoot.getBoundingClientRect();\n    }\n\n    /**\n     * Calculates if dragged node is at or beyond the point beyond which workflow panning should be triggered.\n     * If collision state has changed, {@link onBoundaryCollisionChange} will be triggered.\n     */\n    triggerCollisionDetection(x: number, y: number, callback: (sdx: number, sdy: number) => void) {\n        const collision      = {x: 0, y: 0};\n        this.panningCallback = callback;\n\n        let {left, right, top, bottom} = this.viewportClientRect;\n\n        left   = left + this.scrollMargin;\n        right  = right - this.scrollMargin;\n        top    = top + this.scrollMargin;\n        bottom = bottom - this.scrollMargin;\n\n        if (x < left) {\n            collision.x = x - left;\n        } else if (x > right) {\n            collision.x = x - right;\n        }\n\n        if (y < top) {\n            collision.y = y - top;\n        } else if (y > bottom) {\n            collision.y = y - bottom;\n        }\n\n        if (\n            Math.sign(collision.x) !== Math.sign(this.collision.x)\n            || Math.sign(collision.y) !== Math.sign(this.collision.y)\n        ) {\n            const previous = this.collision;\n            this.collision = collision;\n            this.onBoundaryCollisionChange(collision, previous);\n        }\n    }\n\n    /**\n     * Triggered when {@link triggerCollisionDetection} determines that collision properties have changed.\n     */\n    private onBoundaryCollisionChange(current: { x: number, y: number }, previous: { x: number, y: number }): void {\n\n        this.stop();\n\n        if (current.x === 0 && current.y === 0) {\n            return;\n        }\n\n        this.start(this.collision);\n    }\n\n    private start(direction: { x: number, y: number }) {\n\n        let startTimestamp: number | undefined;\n\n        const scale    = this.workflow.scale;\n        const matrix   = this.workflow.workflow.transform.baseVal.getItem(0).matrix;\n        const sixtyFPS = 16.6666;\n\n        const onFrame = (timestamp: number) => {\n\n            const frameDeltaTime = timestamp - (startTimestamp || timestamp);\n            startTimestamp       = timestamp;\n\n            // We need to stop the animation at some point\n            // It should be stopped when there is no animation frame ID anymore,\n            // which means that stopScroll() was called\n            // However, don't do that if we haven't made the first move yet, which is a situation when ∆t is 0\n            if (frameDeltaTime !== 0 && !this.panAnimationFrame) {\n                startTimestamp = undefined;\n                return;\n            }\n\n            const moveX = Math.sign(direction.x) * this.movementSpeed * frameDeltaTime / sixtyFPS;\n            const moveY = Math.sign(direction.y) * this.movementSpeed * frameDeltaTime / sixtyFPS;\n\n            matrix.e -= moveX;\n            matrix.f -= moveY;\n\n            const frameDiffX = moveX / scale;\n            const frameDiffY = moveY / scale;\n\n            this.panningCallback(frameDiffX, frameDiffY);\n            this.panAnimationFrame = window.requestAnimationFrame(onFrame);\n        };\n\n        this.panAnimationFrame = window.requestAnimationFrame(onFrame);\n    }\n\n    stop() {\n        window.cancelAnimationFrame(this.panAnimationFrame);\n        this.panAnimationFrame = undefined;\n    }\n\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/graph/connectable.ts",
    "content": "export interface Connectable {\n    connectionId: string;\n    isVisible: boolean;\n}"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/graph/edge.ts",
    "content": "import {Edge as ModelEdge} from \"cwlts/models\";\nimport {Geometry} from \"../utils/geometry\";\nimport {IOPort} from \"./io-port\";\nimport {Workflow} from \"./workflow\";\n\nexport class Edge {\n\n    static makeTemplate(edge: ModelEdge, containerNode: SVGGElement, connectionStates?: string): string | undefined {\n        if (!edge.isVisible || edge.source.type === \"Step\" || edge.destination.type === \"Step\") {\n            return \"\";\n        }\n\n        const [, sourceStepId, sourcePort] = edge.source.id.split(\"/\");\n        const [, destStepId, destPort]       = edge.destination.id.split(\"/\");\n\n        const sourceVertex = containerNode.querySelector(`.node[data-id=\"${sourceStepId}\"] .output-port[data-port-id=\"${sourcePort}\"] .io-port`) as SVGGElement;\n        const destVertex   = containerNode.querySelector(`.node[data-id=\"${destStepId}\"] .input-port[data-port-id=\"${destPort}\"] .io-port`) as SVGGElement;\n\n        if (edge.source.type === edge.destination.type) {\n            console.error(\"Can't update edge between nodes of the same type.\", edge);\n            return;\n        }\n\n        if (!sourceVertex) {\n            console.error(\"Source vertex not found for edge \" + edge.source.id, edge);\n            return;\n        }\n\n        if (!destVertex) {\n            console.error(\"Destination vertex not found for edge \" + edge.destination.id, edge);\n            return;\n        }\n\n        const sourceCTM = sourceVertex.getCTM() as SVGMatrix;\n        const destCTM   = destVertex.getCTM() as SVGMatrix;\n\n        const wfMatrix = containerNode.transform.baseVal.getItem(0).matrix;\n\n        const pathStr = Workflow.makeConnectionPath(\n            (sourceCTM.e - wfMatrix.e) / sourceCTM.a,\n            (sourceCTM.f - wfMatrix.f) / sourceCTM.a,\n            (destCTM.e - wfMatrix.e) / sourceCTM.a,\n            (destCTM.f - wfMatrix.f) / sourceCTM.a\n        );\n\n        return `\n            <g tabindex=\"-1\" class=\"edge ${connectionStates}\"\n               data-source-port=\"${sourcePort}\"\n               data-destination-port=\"${destPort}\"\n               data-source-node=\"${sourceStepId}\"\n               data-source-connection=\"${edge.source.id}\"\n               data-destination-connection=\"${edge.destination.id}\"\n               data-destination-node=\"${destStepId}\">\n                <path class=\"sub-edge outer\" d=\"${pathStr}\"></path>\n                <path class=\"sub-edge inner\" d=\"${pathStr}\"></path>\n            </g>\n        `;\n    }\n\n    static spawn(pathStr = \"\", connectionIDs: {\n        source?: string,\n        destination?: string,\n    }                    = {}) {\n\n        const ns   = \"http://www.w3.org/2000/svg\";\n        const edge = document.createElementNS(ns, \"g\");\n\n        const [, sourceStepId, sourcePort] = (connectionIDs.source || \"//\").split(\"/\");\n        const [, destStepId, destPort]       = (connectionIDs.destination || \"//\").split(\"/\");\n\n        edge.classList.add(\"edge\");\n        if (sourceStepId) {\n            edge.classList.add(sourceStepId);\n        }\n        if (destStepId) {\n            edge.classList.add(destStepId);\n        }\n        edge.setAttribute(\"tabindex\", \"-1\");\n        edge.setAttribute(\"data-destination-node\", destStepId);\n        edge.setAttribute(\"data-destination-port\", destPort);\n        edge.setAttribute(\"data-source-port\", sourcePort);\n        edge.setAttribute(\"data-source-node\", sourceStepId);\n        edge.setAttribute(\"data-source-connection\", connectionIDs.source!);\n        edge.setAttribute(\"data-destination-connection\", connectionIDs.destination!);\n\n        edge.innerHTML = `\n            <path class=\"sub-edge outer\" d=\"${pathStr}\"></path>\n            <path class=\"sub-edge inner\" d=\"${pathStr}\"></path>\n        `;\n\n        return edge;\n    }\n\n    static spawnBetweenConnectionIDs(root: SVGElement, source: string, destination: string) {\n\n        if (source.startsWith(\"in\")) {\n            const tmp   = source;\n            source      = destination;\n            destination = tmp;\n        }\n\n        const sourceNode      = root.querySelector(`.port[data-connection-id=\"${source}\"]`) as SVGGElement;\n        const destinationNode = root.querySelector(`.port[data-connection-id=\"${destination}\"]`) as SVGAElement;\n\n        const sourceCTM = Geometry.getTransformToElement(sourceNode, root);\n        const destCTM   = Geometry.getTransformToElement(destinationNode, root);\n        const path      = IOPort.makeConnectionPath(sourceCTM.e, sourceCTM.f, destCTM.e, destCTM.f);\n\n        // If there is already a connection between these ports, update that one instead\n        const existingEdge = root.querySelector(`.edge[data-source-connection=\"${source}\"][data-destination-connection=\"${destination}\"]`);\n        if (existingEdge) {\n            existingEdge.querySelectorAll(\".sub-edge\").forEach(sub => sub.setAttribute(\"d\", path!));\n            return existingEdge;\n        }\n\n        const edge = Edge.spawn(path, {\n            source,\n            destination\n        });\n\n        const firstNode = root.querySelector(\".node\");\n        root.insertBefore(edge, firstNode);\n\n        return edge;\n    }\n\n    static findEdge(root: any, sourceConnectionID: string, destinationConnectionID: string) {\n        return root.querySelector(`[data-source-connection=\"${sourceConnectionID}\"][data-destination-connection=\"${destinationConnectionID}\"]`);\n    }\n\n    static parseConnectionID(cid: string) {\n        const [side, stepID, portID] = (cid || \"//\").split(\"/\");\n        return {side, stepID, portID};\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/graph/graph-node.ts",
    "content": "import {ParameterTypeModel, StepModel, WorkflowInputParameterModel, WorkflowOutputParameterModel} from \"cwlts/models\";\nimport {HtmlUtils} from \"../utils/html-utils\";\nimport {SVGUtils} from \"../utils/svg-utils\";\nimport {IOPort} from \"./io-port\";\n\nexport type NodePosition = { x: number, y: number };\nexport type NodeDataModel = WorkflowInputParameterModel | WorkflowOutputParameterModel | StepModel;\n\nexport class GraphNode {\n\n    public position: NodePosition = {x: 0, y: 0};\n\n    static radius = 30;\n\n    constructor(position: Partial<NodePosition>,\n                private dataModel: NodeDataModel) {\n\n        this.dataModel = dataModel;\n\n        Object.assign(this.position, position);\n    }\n\n    /**\n     * @FIXME Making icons increases the rendering time by 50-100%. Try embedding the SVG directly.\n     */\n\n    private static workflowIconSvg: string   = \"<svg class=\\\"node-icon\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\" viewBox=\\\"0 0 400.01 399.88\\\" x=\\\"-9\\\" y=\\\"-10\\\" width=\\\"20\\\" height=\\\"20\\\"><title>workflow</title><path d=\\\"M400,200a80,80,0,0,1-140.33,52.53L158.23,303.24a80,80,0,1,1-17.9-35.77l101.44-50.71a80.23,80.23,0,0,1,0-33.52L140.33,132.53a79.87,79.87,0,1,1,17.9-35.77l101.44,50.71A80,80,0,0,1,400,200Z\\\" transform=\\\"translate(0.01 -0.16)\\\"/></svg>\";\n    private static toolIconSvg: string       = \"<svg class=\\\"node-icon\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\" viewBox=\\\"0 0 398.39 397.78\\\" x=\\\"-10\\\" y=\\\"-8\\\" width=\\\"20\\\" height=\\\"15\\\"><title>tool2</title><polygon points=\\\"38.77 397.57 0 366 136.15 198.78 0 31.57 38.77 0 200.63 198.78 38.77 397.57\\\"/><rect x=\\\"198.39\\\" y=\\\"347.78\\\" width=\\\"200\\\" height=\\\"50\\\"/></svg>\";\n    private static fileInputIconSvg: string  = \"<svg class=\\\"node-icon\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\" viewBox=\\\"0 0 499 462.86\\\" y=\\\"-10\\\" x=\\\"-11\\\" width=\\\"20\\\" height=\\\"20\\\"><title>file_input</title><path d=\\\"M386.06,0H175V58.29l50,50V50H337.81V163.38h25l86.19.24V412.86H225V353.71l-50,50v59.15H499V112.94Zm1.75,113.45v-41l41.1,41.1Z\\\"/><polygon points=\\\"387.81 1.06 387.81 1.75 387.12 1.06 387.81 1.06\\\"/><polygon points=\\\"290.36 231 176.68 344.68 141.32 309.32 194.64 256 0 256 0 206 194.64 206 142.32 153.68 177.68 118.32 290.36 231\\\"/></svg>\";\n    private static fileOutputIconSvg: string = \"<svg class=\\\"node-icon\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\" viewBox=\\\"0 0 499 462.86\\\" x=\\\"-7\\\" y=\\\"-11\\\" width=\\\"20\\\" height=\\\"20\\\"><title>file_output</title><polygon points=\\\"387.81 1.06 387.81 1.75 387.12 1.06 387.81 1.06\\\"/><polygon points=\\\"499 231 385.32 344.68 349.96 309.32 403.28 256 208.64 256 208.64 206 403.28 206 350.96 153.68 386.32 118.32 499 231\\\"/><path d=\\\"M187.81,163.38l77.69.22H324V112.94L211.06,0H0V462.86H324V298.5H274V412.86H50V50H162.81V163.38Zm25-90.92,41.1,41.1-41.1-.11Z\\\"/></svg>\";\n    private static inputIconSvg: string      = \"<svg class=\\\"node-icon\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\" viewBox=\\\"0 0 499 365\\\" x=\\\"-11\\\" y=\\\"-10\\\" width=\\\"20\\\" height=\\\"20\\\"><title>type_input</title><g id=\\\"input\\\"><path d=\\\"M316.5,68a181.72,181.72,0,0,0-114.12,40.09L238,143.72a132.5,132.5,0,1,1,1.16,214.39L203.48,393.8A182.5,182.5,0,1,0,316.5,68Z\\\" transform=\\\"translate(0 -68)\\\"/><g id=\\\"Layer_22\\\" data-name=\\\"Layer 22\\\"><g id=\\\"Layer_9_copy_4\\\" data-name=\\\"Layer 9 copy 4\\\"><polygon points=\\\"290.36 182 176.68 295.68 141.32 260.32 194.64 207 0 207 0 157 194.64 157 142.32 104.68 177.68 69.32 290.36 182\\\"/></g></g></g></svg>\";\n    private static outputIconSvg: string     = \"<svg class=\\\"node-icon\\\" xmlns=\\\"http://www.w3.org/2000/svg\\\" viewBox=\\\"0 0 500.36 365\\\" x=\\\"-9\\\" y=\\\"-10\\\" width=\\\"20\\\" height=\\\"20\\\"><title>type_output</title><g id=\\\"output\\\"><path d=\\\"M291.95,325.23a134,134,0,0,1-15.76,19,132.5,132.5,0,1,1,0-187.38,133.9,133.9,0,0,1,16.16,19.55l35.81-35.81A182.5,182.5,0,1,0,327.73,361Z\\\" transform=\\\"translate(0 -68)\\\"/><g id=\\\"circle_source_copy\\\" data-name=\\\"circle source copy\\\"><g id=\\\"Layer_22_copy\\\" data-name=\\\"Layer 22 copy\\\"><g id=\\\"Layer_9_copy_5\\\" data-name=\\\"Layer 9 copy 5\\\"><polygon points=\\\"500.36 182 386.68 295.68 351.32 260.32 404.64 207 210 207 210 157 404.64 157 352.32 104.68 387.68 69.32 500.36 182\\\"/></g></g></g></g></svg>\";\n\n    private static makeIconFragment(model: any) {\n\n        let iconStr = \"\";\n\n        if (model instanceof StepModel && model.run) {\n\n            if (model.run.class === \"Workflow\") {\n                iconStr = this.workflowIconSvg;\n            } else if (model.run.class === \"CommandLineTool\") {\n                iconStr = this.toolIconSvg;\n            }\n\n        } else if (model instanceof WorkflowInputParameterModel && model.type) {\n            if (model.type.type === \"File\" || (model.type.type === \"array\" && model.type.items === \"File\")) {\n                iconStr = this.fileInputIconSvg;\n            } else {\n                iconStr = this.inputIconSvg;\n            }\n        } else if (model instanceof WorkflowOutputParameterModel && model.type) {\n            if (model.type.type === \"File\" || (model.type.type === \"array\" && model.type.items === \"File\")) {\n                iconStr = this.fileOutputIconSvg;\n            } else {\n                iconStr = this.outputIconSvg;\n            }\n        }\n\n        return iconStr;\n    }\n\n    static makeTemplate(dataModel: {\n        id: string,\n        connectionId: string,\n        label?: string,\n        in?: any[],\n        type?: ParameterTypeModel\n        out?: any[],\n        customProps?: {\n            \"sbg:x\"?: number\n            \"sbg:y\"?: number\n        }\n    }, labelScale = 1): string {\n\n        const x = ~~(dataModel.customProps && dataModel.customProps[\"sbg:x\"])!;\n        const y = ~~(dataModel.customProps && dataModel.customProps[\"sbg:y\"])!;\n\n        let nodeTypeClass = \"step\";\n        if (dataModel instanceof WorkflowInputParameterModel) {\n            nodeTypeClass = \"input\";\n        } else if (dataModel instanceof WorkflowOutputParameterModel) {\n            nodeTypeClass = \"output\";\n        }\n\n        const inputs   = (dataModel.in || []).filter(p => p.isVisible);\n        const outputs  = (dataModel.out || []).filter(p => p.isVisible);\n        const maxPorts = Math.max(inputs.length, outputs.length);\n        const radius   = GraphNode.radius + maxPorts * IOPort.radius;\n\n        let typeClass = \"\";\n        let itemsClass = \"\";\n\n        if (dataModel.type) {\n            typeClass = \"type-\" + dataModel.type.type;\n\n            if(dataModel.type.items){\n                itemsClass = \"items-\" + dataModel.type.items;\n            }\n        }\n\n        const inputPortTemplates = inputs\n            .sort((a, b) => -a.id.localeCompare(b.id))\n            .map((p, i, arr) => GraphNode.makePortTemplate(\n                p,\n                \"input\",\n                SVGUtils.matrixToTransformAttr(\n                    GraphNode.createPortMatrix(arr.length, i, radius, \"input\")\n                )\n            ))\n            .reduce((acc, tpl) => acc + tpl, \"\");\n\n        const outputPortTemplates = outputs\n            .sort((a, b) => -a.id.localeCompare(b.id))\n            .map((p, i, arr) => GraphNode.makePortTemplate(\n                p,\n                \"output\",\n                SVGUtils.matrixToTransformAttr(\n                    GraphNode.createPortMatrix(arr.length, i, radius, \"output\")\n                )\n            ))\n            .reduce((acc, tpl) => acc + tpl, \"\");\n\n        return `\n            <g tabindex=\"-1\" class=\"node ${nodeTypeClass} ${typeClass} ${itemsClass}\"\n               data-connection-id=\"${dataModel.connectionId}\"\n               transform=\"matrix(1, 0, 0, 1, ${x}, ${y})\"\n               data-id=\"${dataModel.id}\">\n               \n                <g class=\"core\" transform=\"matrix(1, 0, 0, 1, 0, 0)\">\n                    <circle cx=\"0\" cy=\"0\" r=\"${radius}\" class=\"outer\"></circle>\n                    <circle cx=\"0\" cy=\"0\" r=\"${radius * .75}\" class=\"inner\"></circle>\n                    \n                    ${GraphNode.makeIconFragment(dataModel)}\n                </g>\n                \n                <text transform=\"matrix(${labelScale},0,0,${labelScale},0,${radius + 30})\" class=\"title label\">${HtmlUtils.escapeHTML(dataModel.label || dataModel.id)}</text>\n                \n                ${inputPortTemplates}\n                ${outputPortTemplates}\n            </g>\n        `;\n    }\n\n    private static makePortTemplate(port: {\n                                        label?: string,\n                                        id: string,\n                                        connectionId: string\n                                    },\n                                    type: \"input\" | \"output\",\n                                    transform = \"matrix(1, 0, 0, 1, 0, 0)\"): string {\n\n        const portClass = type === \"input\" ? \"input-port\" : \"output-port\";\n        const label     = port.label || port.id;\n\n        return `\n            <g class=\"port ${portClass}\" transform=\"${transform || \"matrix(1, 0, 0, 1, 0, 0)\"}\"\n               data-connection-id=\"${port.connectionId}\"\n               data-port-id=\"${port.id}\"\n            >\n                <g class=\"io-port\">\n                    <circle cx=\"0\" cy=\"0\" r=\"7\" class=\"port-handle\"></circle>\n                </g>\n                <text x=\"0\" y=\"0\" transform=\"matrix(1,0,0,1,0,0)\" class=\"label unselectable\">${label}</text>\n            </g>\n            \n        `;\n    }\n\n    public static createPortMatrix(totalPortLength: number,\n                                   portIndex: number,\n                                   radius: number,\n                                   type: \"input\" | \"output\"): SVGMatrix {\n        const availableAngle = 140;\n\n        let rotationAngle =\n                // Starting rotation angle\n                (-availableAngle / 2) +\n                (\n                    // Angular offset by element index\n                    (portIndex + 1)\n                    // Angle between elements\n                    * availableAngle / (totalPortLength + 1)\n                );\n\n        if (type === \"input\") {\n            rotationAngle =\n                // Determines the starting rotation angle\n                180 - (availableAngle / -2)\n                // Determines the angular offset modifier for the current index\n                - (portIndex + 1)\n                // Determines the angular offset\n                * availableAngle / (totalPortLength + 1);\n        }\n\n        const matrix = SVGUtils.createMatrix();\n        return matrix.rotate(rotationAngle).translate(radius, 0).rotate(-rotationAngle);\n    }\n\n    static patchModelPorts<T>(model: T & { connectionId: string, id: string }): T {\n        const patch = [{connectionId: model.connectionId, isVisible: true, id: model.id}];\n        if (model instanceof WorkflowInputParameterModel) {\n            const copy = Object.create(model);\n            return Object.assign(copy, {out: patch});\n\n\n        } else if (model instanceof WorkflowOutputParameterModel) {\n            const copy = Object.create(model);\n            return Object.assign(copy, {in: patch});\n        }\n\n        return model;\n    }\n\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/graph/io-port.ts",
    "content": "export class IOPort {\n\n    static radius = 7;\n\n    /**\n     * @param x1\n     * @param y1\n     * @param x2\n     * @param y2\n     * @param {\"right\" | \"left\" | string} forceDirection\n     * @returns {string}\n     */\n    public static makeConnectionPath(x1: any, y1: any, x2: any, y2: any, forceDirection: \"right\" | \"left\" | string = \"right\"): string | undefined {\n\n        if (!forceDirection) {\n            return `M ${x1} ${y1} C ${(x1 + x2) / 2} ${y1} ${(x1 + x2) / 2} ${y2} ${x2} ${y2}`;\n        } else if (forceDirection === \"right\") {\n            const outDir = x1 + Math.abs(x1 - x2) / 2;\n            const inDir  = x2 - Math.abs(x1 - x2) / 2;\n\n            return `M ${x1} ${y1} C ${outDir} ${y1} ${inDir} ${y2} ${x2} ${y2}`;\n        } else if (forceDirection === \"left\") {\n            const outDir = x1 - Math.abs(x1 - x2) / 2;\n            const inDir  = x2 + Math.abs(x1 - x2) / 2;\n\n            return `M ${x1} ${y1} C ${outDir} ${y1} ${inDir} ${y2} ${x2} ${y2}`;\n        }\n\n        return undefined;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/graph/step-node.ts",
    "content": "import {StepModel} from \"cwlts/models\";\nimport {Edge} from \"./edge\";\nimport {GraphNode} from \"./graph-node\";\nimport {TemplateParser} from \"./template-parser\";\n\nexport class StepNode {\n\n    private svg: SVGSVGElement;\n    private stepEl: SVGElement;\n    private model: StepModel;\n\n    constructor(element: SVGElement, stepModel: StepModel) {\n\n        this.stepEl = element;\n        this.svg    = element.ownerSVGElement!;\n        this.model  = stepModel;\n\n    }\n\n    update() {\n        const tpl = GraphNode.makeTemplate(this.model);\n        const el  = TemplateParser.parse(tpl)!;\n\n        this.stepEl.innerHTML = el.innerHTML;\n\n        // Reposition all edges\n        const incomingEdges = this.svg.querySelectorAll(`.edge[data-destination-node=\"${this.model.connectionId}\"]`);\n        const outgoingEdges = this.svg.querySelectorAll(`.edge[data-source-node=\"${this.model.connectionId}\"`);\n\n        for (const edge of [...Array.from(incomingEdges), ...Array.from(outgoingEdges)]) {\n            Edge.spawnBetweenConnectionIDs(\n                this.svg.querySelector(\".workflow\") as SVGGElement,\n                edge.getAttribute(\"data-source-connection\")!,\n                edge.getAttribute(\"data-destination-connection\")!\n            );\n        }\n\n        console.log(\"Should redraw input port\", incomingEdges);\n\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/graph/template-parser.ts",
    "content": "export class TemplateParser {\n\n    static parse(tpl: any) {\n        const ns = \"http://www.w3.org/2000/svg\";\n        const node = document.createElementNS(ns, \"g\");\n        node.innerHTML = tpl;\n        return node.firstElementChild;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/graph/workflow.ts",
    "content": "import {WorkflowStepInputModel}       from \"cwlts/models/generic\";\nimport {StepModel}                    from \"cwlts/models/generic/StepModel\";\nimport {WorkflowInputParameterModel}  from \"cwlts/models/generic/WorkflowInputParameterModel\";\nimport {WorkflowModel}                from \"cwlts/models/generic/WorkflowModel\";\nimport {WorkflowOutputParameterModel} from \"cwlts/models/generic/WorkflowOutputParameterModel\";\nimport {SVGPlugin}                    from \"../plugins/plugin\";\nimport {DomEvents}                    from \"../utils/dom-events\";\nimport {EventHub}                     from \"../utils/event-hub\";\nimport {Connectable}                  from \"./connectable\";\nimport {Edge as GraphEdge}            from \"./edge\";\nimport {GraphNode}                    from \"./graph-node\";\nimport {StepNode}                     from \"./step-node\";\nimport {TemplateParser}               from \"./template-parser\";\nimport {WorkflowStepOutputModel}      from \"cwlts/models\";\n\n/**\n * @FIXME validation states of old and newly created edges\n */\nexport class Workflow {\n\n    readonly eventHub: EventHub;\n    readonly svgID = this.makeID();\n\n    minScale = 0.2;\n    maxScale = 2;\n\n    domEvents: DomEvents;\n    svgRoot: SVGSVGElement;\n    workflow: SVGGElement;\n    model: WorkflowModel;\n    editingEnabled = true;\n\n    /** Scale of labels, they are different than scale of other elements in the workflow */\n    labelScale = 1;\n\n    private workflowBoundingClientRect: any;\n    private plugins: SVGPlugin[]  = [];\n    private disposers: Function[] = [];\n\n    private pendingFirstDraw = true;\n\n    /** Stored in order to ensure that once destroyed graph cannot be reused again */\n    private isDestroyed = false;\n\n    constructor(parameters: {\n        svgRoot: SVGSVGElement,\n        model: WorkflowModel,\n        plugins?: SVGPlugin[],\n        editingEnabled?: boolean\n    }) {\n        this.svgRoot        = parameters.svgRoot;\n        this.plugins        = parameters.plugins || [];\n        this.domEvents      = new DomEvents(this.svgRoot as any);\n        this.model          = parameters.model;\n        this.editingEnabled = parameters.editingEnabled !== false; // default to true if undefined\n\n        this.svgRoot.classList.add(this.svgID);\n\n        this.svgRoot.innerHTML = `\n            <rect x=\"0\" y=\"0\" width=\"100%\" height=\"100%\" class=\"pan-handle\" transform=\"matrix(1,0,0,1,0,0)\"></rect>\n            <g class=\"workflow\" transform=\"matrix(1,0,0,1,0,0)\"></g>\n        `;\n\n        this.workflow = this.svgRoot.querySelector(\".workflow\") as any;\n\n        this.invokePlugins(\"registerWorkflow\", this);\n\n        this.eventHub = new EventHub([\n            \"connection.create\",\n            \"app.create.step\",\n            \"app.create.input\",\n            \"app.create.output\",\n            \"beforeChange\",\n            \"afterChange\",\n            \"afterRender\",\n            \"selectionChange\"\n        ]);\n\n        this.hookPlugins();\n        this.draw(parameters.model);\n\n\n        this.eventHub.on(\"afterRender\", () => this.invokePlugins(\"afterRender\"));\n    }\n\n    /** Current scale of the document */\n    private docScale = 1;\n\n    get scale() {\n        return this.docScale;\n    }\n\n    // noinspection JSUnusedGlobalSymbols\n    set scale(scale: number) {\n        this.workflowBoundingClientRect = this.svgRoot.getBoundingClientRect();\n\n        const x = (this.workflowBoundingClientRect.right + this.workflowBoundingClientRect.left) / 2;\n        const y = (this.workflowBoundingClientRect.top + this.workflowBoundingClientRect.bottom) / 2;\n\n        this.scaleAtPoint(scale, x, y);\n    }\n\n    static canDrawIn(element: SVGElement): boolean {\n        return element.getBoundingClientRect().width !== 0;\n    }\n\n    static makeConnectionPath(x1: any, y1: any, x2: any, y2: any, forceDirection: \"right\" | \"left\" | string = \"right\"): string | undefined {\n\n        if (!forceDirection) {\n            return `M ${x1} ${y1} C ${(x1 + x2) / 2} ${y1} ${(x1 + x2) / 2} ${y2} ${x2} ${y2}`;\n        } else if (forceDirection === \"right\") {\n            const outDir = x1 + Math.abs(x1 - x2) / 2;\n            const inDir  = x2 - Math.abs(x1 - x2) / 2;\n\n            return `M ${x1} ${y1} C ${outDir} ${y1} ${inDir} ${y2} ${x2} ${y2}`;\n        } else if (forceDirection === \"left\") {\n            const outDir = x1 - Math.abs(x1 - x2) / 2;\n            const inDir  = x2 + Math.abs(x1 - x2) / 2;\n\n            return `M ${x1} ${y1} C ${outDir} ${y1} ${inDir} ${y2} ${x2} ${y2}`;\n        }\n        return undefined;\n    }\n\n    draw(model: WorkflowModel = this.model) {\n\n        this.assertNotDestroyed(\"draw\");\n\n        // We will need to restore the transformations when we redraw the model, so save the current state\n        const oldTransform = this.workflow.getAttribute(\"transform\");\n\n        const modelChanged = this.model !== model;\n\n        if (modelChanged || this.pendingFirstDraw) {\n            this.pendingFirstDraw = false;\n\n            this.model = model;\n\n            const stepChangeDisposer        = this.model.on(\"step.change\", this.onStepChange.bind(this));\n            const stepCreateDisposer        = this.model.on(\"step.create\", this.onStepCreate.bind(this));\n            const stepRemoveDisposer        = this.model.on(\"step.remove\", this.onStepRemove.bind(this));\n            const inputCreateDisposer       = this.model.on(\"input.create\", this.onInputCreate.bind(this));\n            const inputRemoveDisposer       = this.model.on(\"input.remove\", this.onInputRemove.bind(this));\n            const outputCreateDisposer      = this.model.on(\"output.create\", this.onOutputCreate.bind(this));\n            const outputRemoveDisposer      = this.model.on(\"output.remove\", this.onOutputRemove.bind(this));\n            const stepInPortShowDisposer    = this.model.on(\"step.inPort.show\", this.onInputPortShow.bind(this));\n            const stepInPortHideDisposer    = this.model.on(\"step.inPort.hide\", this.onInputPortHide.bind(this));\n            const connectionCreateDisposer  = this.model.on(\"connection.create\", this.onConnectionCreate.bind(this));\n            const connectionRemoveDisposer  = this.model.on(\"connection.remove\", this.onConnectionRemove.bind(this));\n            const stepOutPortCreateDisposer = this.model.on(\"step.outPort.create\", this.onOutputPortCreate.bind(this));\n            const stepOutPortRemoveDisposer = this.model.on(\"step.outPort.remove\", this.onOutputPortRemove.bind(this));\n\n            this.disposers.push(() => {\n                stepChangeDisposer.dispose();\n                stepCreateDisposer.dispose();\n                stepRemoveDisposer.dispose();\n                inputCreateDisposer.dispose();\n                inputRemoveDisposer.dispose();\n                outputCreateDisposer.dispose();\n                outputRemoveDisposer.dispose();\n                stepInPortShowDisposer.dispose();\n                stepInPortHideDisposer.dispose();\n                connectionCreateDisposer.dispose();\n                connectionRemoveDisposer.dispose();\n                stepOutPortCreateDisposer.dispose();\n                stepOutPortRemoveDisposer.dispose();\n            });\n\n            this.invokePlugins(\"afterModelChange\");\n        }\n\n        this.clearCanvas();\n\n        const nodes = [\n            ...this.model.steps,\n            ...this.model.inputs,\n            ...this.model.outputs\n        ].filter(n => n.isVisible);\n\n        /**\n         * If there is a missing sbg:x or sbg:y property on any node model,\n         * graph should be arranged to avoid random placement.\n         */\n        let nodeTemplate = \"\";\n\n        for (const node of nodes) {\n            const patched  = GraphNode.patchModelPorts(node);\n            nodeTemplate += GraphNode.makeTemplate(patched);\n        }\n\n        this.workflow.innerHTML += nodeTemplate;\n\n        this.redrawEdges();\n\n        Array.from(this.workflow.querySelectorAll(\".node\")).forEach(e => {\n            this.workflow.appendChild(e);\n        });\n\n        this.addEventListeners();\n\n        this.workflow.setAttribute(\"transform\", oldTransform!);\n\n        this.scaleAtPoint(this.scale);\n\n\n        this.invokePlugins(\"afterRender\");\n    }\n\n    findParent(el: Element, parentClass = \"node\"): SVGGElement | undefined {\n        let parentNode: Element | null = el;\n        while (parentNode) {\n            if (parentNode.classList.contains(parentClass)) {\n                return parentNode as SVGGElement;\n            }\n            parentNode = parentNode.parentElement;\n        }\n        return undefined;\n    }\n\n    /**\n     * Retrieves a plugin instance\n     * @param {{new(...args: any[]) => T}} plugin\n     * @returns {T}\n     */\n    getPlugin<T extends SVGPlugin>(plugin: { new(...args: any[]): T }): T {\n        return this.plugins.find(p => p instanceof plugin) as T;\n    }\n\n    on(event: string, handler: any) {\n        this.eventHub.on(event, handler);\n    }\n\n    off(event: string, handler: any) {\n        this.eventHub.off(event, handler);\n    }\n\n    /**\n     * Scales the workflow to fit the available viewport\n     */\n    fitToViewport(ignoreScaleLimits = false): void {\n\n        this.scaleAtPoint(1);\n\n        Object.assign(this.workflow.transform.baseVal.getItem(0).matrix, {\n            e: 0,\n            f: 0\n        });\n\n        const clientBounds = this.svgRoot.getBoundingClientRect();\n        const wfBounds     = this.workflow.getBoundingClientRect();\n        const padding    = 100;\n\n        if (clientBounds.width === 0 || clientBounds.height === 0) {\n            throw new Error(\"Cannot fit workflow to the area that has no visible viewport.\");\n        }\n\n        const verticalScale   = (wfBounds.height) / (clientBounds.height - padding);\n        const horizontalScale = (wfBounds.width) / (clientBounds.width - padding);\n\n        const scaleFactor = Math.max(verticalScale, horizontalScale);\n\n        // Cap the upscaling to 1, we don't want to zoom in workflows that would fit anyway\n        let newScale = Math.min(this.scale / scaleFactor, 1);\n\n        if (!ignoreScaleLimits) {\n            newScale = Math.max(newScale, this.minScale);\n        }\n\n        this.scaleAtPoint(newScale);\n\n        const scaledWFBounds = this.workflow.getBoundingClientRect();\n\n        const moveY = clientBounds.top - scaledWFBounds.top + Math.abs(clientBounds.height - scaledWFBounds.height) / 2;\n        const moveX = clientBounds.left - scaledWFBounds.left + Math.abs(clientBounds.width - scaledWFBounds.width) / 2;\n\n        const matrix = this.workflow.transform.baseVal.getItem(0).matrix;\n        matrix.e += moveX;\n        matrix.f += moveY;\n    }\n\n    redrawEdges() {\n\n        const highlightedEdges = new Set();\n\n        Array.from(this.workflow.querySelectorAll(\".edge\")).forEach((el) => {\n            if (el.classList.contains(\"highlighted\")) {\n                const edgeID = el.attributes[\"data-source-connection\"].value + el.attributes[\"data-destination-connection\"].value;\n                highlightedEdges.add(edgeID);\n            }\n            el.remove();\n        });\n\n\n        const edgesTpl = this.model.connections\n            .map(c => {\n                const edgeId     = c.source.id + c.destination.id;\n                const edgeStates = highlightedEdges.has(edgeId) ? \"highlighted\" : \"\";\n                return GraphEdge.makeTemplate(c, this.workflow, edgeStates);\n            })\n            .reduce((acc, tpl) => acc! + tpl, \"\");\n\n        this.workflow.innerHTML = edgesTpl + this.workflow.innerHTML;\n    }\n\n    /**\n     * Scale the workflow by the scaleCoefficient (not compounded) over given coordinates\n     */\n    scaleAtPoint(scale = 1, x = 0, y = 0): void {\n\n        this.docScale     = scale;\n        this.labelScale = 1 + (1 - this.docScale) / (this.docScale * 2);\n\n        const transform         = this.workflow.transform.baseVal;\n        const matrix: SVGMatrix = transform.getItem(0).matrix;\n\n        const coords = this.transformScreenCTMtoCanvas(x, y);\n\n        matrix.e += matrix.a * coords.x;\n        matrix.f += matrix.a * coords.y;\n        matrix.a = matrix.d = scale;\n        matrix.e -= scale * coords.x;\n        matrix.f -= scale * coords.y;\n\n        const nodeLabels: any = this.workflow.querySelectorAll(\".node .label\") as  NodeListOf<SVGPathElement>;\n\n        for (const el of nodeLabels) {\n            const matrix = el.transform.baseVal.getItem(0).matrix;\n\n            Object.assign(matrix, {\n                a: this.labelScale,\n                d: this.labelScale\n            });\n        }\n\n    }\n\n    transformScreenCTMtoCanvas(x: any, y: any) {\n        const svg   = this.svgRoot;\n        const ctm   = this.workflow.getScreenCTM()!;\n        const point = svg.createSVGPoint();\n        point.x     = x;\n        point.y     = y;\n\n        const t = point.matrixTransform(ctm.inverse());\n        return {\n            x: t.x,\n            y: t.y\n        };\n    }\n\n    enableEditing(enabled: boolean): void {\n        this.invokePlugins(\"onEditableStateChange\", enabled);\n        this.editingEnabled = enabled;\n    }\n\n    // noinspection JSUnusedGlobalSymbols\n    destroy() {\n\n        this.svgRoot.classList.remove(this.svgID);\n\n        this.clearCanvas();\n        this.eventHub.empty();\n\n        this.invokePlugins(\"destroy\");\n\n        for (const dispose of this.disposers) {\n            dispose();\n        }\n\n        this.isDestroyed = true;\n    }\n\n    resetTransform() {\n        this.workflow.setAttribute(\"transform\", \"matrix(1,0,0,1,0,0)\");\n        this.scaleAtPoint();\n    }\n\n    private assertNotDestroyed(method: string) {\n        if (this.isDestroyed) {\n            throw new Error(\"Cannot call the \" + method + \" method on a destroyed graph. \" +\n                \"Destroying this object removes DOM listeners, \" +\n                \"and reusing it would result in unexpected things not working. \" +\n                \"Instead, you can just call the “draw” method with a different model, \" +\n                \"or create a new Workflow object.\");\n\n        }\n    }\n\n    private addEventListeners(): void {\n\n\n        /**\n         * Attach canvas panning\n         */\n        {\n            let pane: SVGGElement | undefined;\n            let x = 0;\n            let y = 0;\n            let matrix: SVGMatrix | undefined;\n            this.domEvents.drag(\".pan-handle\", (dx, dy) => {\n\n                matrix!.e = x + dx;\n                matrix!.f = y + dy;\n\n            }, (ev, el, root) => {\n                pane   = root!.querySelector(\".workflow\") as SVGGElement;\n                matrix = pane.transform.baseVal.getItem(0).matrix;\n                x      = matrix.e;\n                y      = matrix.f;\n            }, () => {\n                pane   = undefined;\n                matrix = undefined;\n            });\n        }\n\n        /**\n         * On mouse over node, bring it to the front\n         */\n        this.domEvents.on(\"mouseover\", \".node\", (ev, target, root) => {\n            if (this.workflow.querySelector(\".edge.dragged\")) {\n                return;\n            }\n            target!.parentElement!.appendChild(target!);\n        });\n\n    }\n\n    private clearCanvas() {\n        this.domEvents.detachAll();\n        this.workflow.innerHTML = \"\";\n        this.workflow.setAttribute(\"transform\", \"matrix(1,0,0,1,0,0)\");\n        this.workflow.setAttribute(\"class\", \"workflow\");\n    }\n\n    private hookPlugins() {\n\n        this.plugins.forEach(plugin => {\n\n            plugin.registerOnBeforeChange!(event => {\n                this.eventHub.emit(\"beforeChange\", event);\n            });\n\n            plugin.registerOnAfterChange!(event => {\n                this.eventHub.emit(\"afterChange\", event);\n            });\n\n            plugin.registerOnAfterRender!(event => {\n                this.eventHub.emit(\"afterRender\", event);\n            });\n        });\n    }\n\n    private invokePlugins(methodName: keyof SVGPlugin, ...args: any[]) {\n        this.plugins.forEach(plugin => {\n            if (typeof plugin[methodName] === \"function\") {\n                (plugin[methodName] as Function)(...args);\n            }\n        });\n    }\n\n    /**\n     * Listener for “connection.create” event on model that renders new edges on canvas\n     */\n    private onConnectionCreate(source: Connectable, destination: Connectable): void {\n\n        if (!source.isVisible || !destination.isVisible) {\n            return;\n        }\n\n        const sourceID      = source.connectionId;\n        const destinationID = destination.connectionId;\n\n        GraphEdge.spawnBetweenConnectionIDs(this.workflow, sourceID, destinationID);\n    }\n\n    /**\n     * Listener for \"connection.remove\" event on the model that disconnects nodes\n     */\n    private onConnectionRemove(source: Connectable, destination: Connectable): void {\n        if (!source.isVisible || !destination.isVisible) {\n            return;\n        }\n\n        const sourceID      = source.connectionId;\n        const destinationID = destination.connectionId;\n\n        const edge = this.svgRoot.querySelector(`.edge[data-source-connection=\"${sourceID}\"][data-destination-connection=\"${destinationID}\"`);\n        edge!.remove();\n    }\n\n    /**\n     * Listener for “input.create” event on model that renders workflow inputs\n     */\n    private onInputCreate(input: WorkflowInputParameterModel): void {\n        if (!input.isVisible) {\n            return;\n        }\n\n        const patched       = GraphNode.patchModelPorts(input);\n        const graphTemplate = GraphNode.makeTemplate(patched, this.labelScale);\n\n        const el = TemplateParser.parse(graphTemplate)!;\n        this.workflow.appendChild(el);\n\n    }\n\n    /**\n     * Listener for “output.create” event on model that renders workflow outputs\n     */\n    private onOutputCreate(output: WorkflowOutputParameterModel): void {\n\n        if (!output.isVisible) {\n            return;\n        }\n\n        const patched       = GraphNode.patchModelPorts(output);\n        const graphTemplate = GraphNode.makeTemplate(patched, this.labelScale);\n\n        const el = TemplateParser.parse(graphTemplate)!;\n        this.workflow.appendChild(el);\n    }\n\n    private onStepCreate(step: StepModel) {\n        // if the step doesn't have x & y coordinates, check if they are in the run property\n        if (!step.customProps[\"sbg:x\"] && step.run.customProps && step.run.customProps[\"sbg:x\"]) {\n\n            Object.assign(step.customProps, {\n                \"sbg:x\": step.run.customProps[\"sbg:x\"],\n                \"sbg:y\": step.run.customProps[\"sbg:y\"]\n            });\n\n            // remove them from the run property once finished\n            delete step.run.customProps[\"sbg:x\"];\n            delete step.run.customProps[\"sbg:y\"];\n        }\n\n        const template = GraphNode.makeTemplate(step, this.labelScale);\n        const element  = TemplateParser.parse(template)!;\n        this.workflow.appendChild(element);\n    }\n\n\n    private onStepChange(change: StepModel) {\n        const title = this.workflow.querySelector(`.step[data-id=\"${change.connectionId}\"] .title`) as SVGTextElement;\n        if (title) {\n            title.textContent = change.label;\n        }\n    }\n\n    private onInputPortShow(input: WorkflowStepInputModel) {\n\n        const stepEl = this.svgRoot.querySelector(`.step[data-connection-id=\"${input.parentStep.connectionId}\"]`) as SVGElement;\n        new StepNode(stepEl, input.parentStep).update();\n    }\n\n    private onInputPortHide(input: WorkflowStepInputModel) {\n        const stepEl = this.svgRoot.querySelector(`.step[data-connection-id=\"${input.parentStep.connectionId}\"]`) as SVGElement;\n        new StepNode(stepEl, input.parentStep).update();\n    }\n\n    private onOutputPortCreate(output: WorkflowStepOutputModel) {\n        const stepEl = this.svgRoot.querySelector(`.step[data-connection-id=\"${output.parentStep.connectionId}\"]`) as SVGElement;\n        new StepNode(stepEl, output.parentStep).update();\n    }\n\n    private onOutputPortRemove(output: WorkflowStepOutputModel) {\n        const stepEl = this.svgRoot.querySelector(`.step[data-connection-id=\"${output.parentStep.connectionId}\"]`) as SVGElement;\n        new StepNode(stepEl, output.parentStep).update();\n    }\n\n    /**\n     * Listener for \"step.remove\" event on model which removes steps\n     */\n    private onStepRemove(step: StepModel) {\n        const stepEl = this.svgRoot.querySelector(`.step[data-connection-id=\"${step.connectionId}\"]`) as SVGElement;\n        stepEl.remove();\n    }\n\n    /**\n     * Listener for \"input.remove\" event on model which removes inputs\n     */\n    private onInputRemove(input: WorkflowInputParameterModel) {\n        if (!input.isVisible) {\n            return;\n        }\n        const inputEl = this.svgRoot.querySelector(`.node.input[data-connection-id=\"${input.connectionId}\"]`);\n        inputEl!.remove();\n    }\n\n    /**\n     * Listener for \"output.remove\" event on model which removes outputs\n     */\n    private onOutputRemove(output: WorkflowOutputParameterModel) {\n        if (!output.isVisible) {\n            return;\n        }\n        const outputEl = this.svgRoot.querySelector(`.node.output[data-connection-id=\"${output.connectionId}\"]`);\n        outputEl!.remove();\n    }\n\n    private makeID(length = 6) {\n        let output    = \"\";\n        const charset = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\";\n\n        for (let i = 0; i < length; i++) {\n            output += charset.charAt(Math.floor(Math.random() * charset.length));\n        }\n\n        return output;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/index.ts",
    "content": "export * from \"./graph/workflow\";\nexport * from \"./plugins/zoom/zoom\";\nexport * from \"./plugins/arrange/arrange\";\nexport * from \"./plugins/validate/validate\";\nexport * from \"./plugins/node-move/node-move\";\nexport * from \"./plugins/port-drag/port-drag\";\nexport * from \"./plugins/selection/selection\";\nexport * from \"./plugins/edge-hover/edge-hover\";\nexport * from \"./plugins/deletion/deletion\";\nexport * from \"./utils/svg-dumper\";\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/arrange/arrange.ts",
    "content": "import {GraphNode}                                                  from '../../graph/graph-node';\nimport {Workflow}                                                   from '../../graph/workflow';\nimport {SVGUtils}                                                   from '../../utils/svg-utils';\nimport {GraphChange, SVGPlugin}                                     from '../plugin';\nimport {\n    StepModel,\n    WorkflowInputParameterModel,\n    WorkflowOutputParameterModel\n} from \"cwlts/models\";\n\nexport class SVGArrangePlugin implements SVGPlugin {\n    private workflow: Workflow;\n    private svgRoot: SVGSVGElement;\n    private onBeforeChange: () => void;\n    private onAfterChange: (updates: NodePositionUpdates) => void;\n    private triggerAfterRender: () => void;\n\n    registerWorkflow(workflow: Workflow): void {\n        this.workflow = workflow;\n        this.svgRoot  = workflow.svgRoot;\n    }\n\n\n    registerOnBeforeChange(fn: (change: GraphChange) => void): void {\n        this.onBeforeChange = () => fn({type: \"arrange\"});\n    }\n\n    registerOnAfterChange(fn: (change: GraphChange) => void): void {\n        this.onAfterChange = () => fn({type: \"arrange\"});\n    }\n\n    registerOnAfterRender(fn: (change: GraphChange) => void): void {\n        this.triggerAfterRender = () => fn({type: \"arrange\"});\n    }\n\n    afterRender(): void {\n        const model     = this.workflow.model;\n        const arr = [] as Array<WorkflowInputParameterModel | WorkflowOutputParameterModel | StepModel>;\n        const drawables = arr.concat(\n            model.steps || [],\n            model.inputs || [],\n            model.outputs || []\n        );\n\n        for (const node of drawables) {\n            if (node.isVisible) {\n                const missingCoordinate = isNaN(parseInt(node.customProps[\"sbg:x\"], 10));\n                if (missingCoordinate) {\n                    this.arrange();\n                    return;\n                }\n            }\n        }\n    }\n\n    arrange() {\n\n        this.onBeforeChange();\n\n        // We need to reset all transformations on the workflow for now.\n        // @TODO Make arranging work without this\n        this.workflow.resetTransform();\n\n        // We need main graph and dangling nodes separately, they will be distributed differently\n        const {mainGraph, danglingNodes} = this.makeNodeGraphs();\n\n        // Create an array of columns, each containing a list of NodeIOs\n        const columns = this.distributeNodesIntoColumns(mainGraph);\n\n        // Get total area in which we will fit the graph, and per-column dimensions\n        const {distributionArea, columnDimensions} = this.calculateColumnSizes(columns);\n\n        // This will be the vertical middle around which the graph should be centered\n        const verticalBaseline = distributionArea.height / 2;\n\n        let xOffset    = 0;\n        let maxYOffset = 0;\n\n        // Here we will store positions for each node that is to be updated.\n        // This should then be emitted as an afterChange event.\n        const nodePositionUpdates = {} as NodePositionUpdates;\n\n        columns.forEach((column, index) => {\n            const colSize = columnDimensions[index];\n            let yOffset   = verticalBaseline - (colSize.height / 2) - column[0].rect.height / 2;\n\n            column.forEach(node => {\n                yOffset += node.rect.height / 2;\n\n                const matrix = SVGUtils.createMatrix().translate(xOffset, yOffset);\n\n                yOffset += node.rect.height / 2;\n\n                if (yOffset > maxYOffset) {\n                    maxYOffset = yOffset;\n                }\n\n                node.el.setAttribute(\"transform\", SVGUtils.matrixToTransformAttr(matrix));\n\n                nodePositionUpdates[node.connectionID] = {\n                    x: matrix.e,\n                    y: matrix.f\n                };\n\n            });\n\n            xOffset += colSize.width;\n        });\n\n        const danglingNodeKeys = Object.keys(danglingNodes).sort((a, b) => {\n\n            const aIsInput  = a.startsWith(\"out/\");\n            const aIsOutput = a.startsWith(\"in/\");\n            const bIsInput  = b.startsWith(\"out/\");\n            const bIsOutput = b.startsWith(\"in/\");\n\n            const lowerA = a.toLowerCase();\n            const lowerB = b.toLowerCase();\n\n            if (aIsOutput) {\n\n                if (bIsOutput) {\n                    return lowerB.localeCompare(lowerA);\n                }\n                else {\n                    return 1;\n                }\n            } else if (aIsInput) {\n                if (bIsOutput) {\n                    return -1;\n                }\n                if (bIsInput) {\n                    return lowerB.localeCompare(lowerA);\n                }\n                else {\n                    return 1;\n                }\n            } else {\n                if (!bIsOutput && !bIsInput) {\n                    return lowerB.localeCompare(lowerA);\n                }\n                else {\n                    return -1;\n                }\n            }\n        });\n\n        const danglingNodeMarginOffset = 30;\n        const danglingNodeSideLength   = GraphNode.radius * 5;\n\n        let maxNodeHeightInRow = 0;\n        let row                = 0;\n        const indexWidthMap      = new Map<number, number>();\n        const rowMaxHeightMap    = new Map<number, number>();\n\n        xOffset = 0;\n\n        const danglingRowAreaWidth = Math.max(distributionArea.width, danglingNodeSideLength * 3);\n        danglingNodeKeys.forEach((connectionID, index) => {\n            const el   = danglingNodes[connectionID] as SVGGElement;\n            const rect = el.firstElementChild!.getBoundingClientRect();\n            indexWidthMap.set(index, rect.width);\n\n            if (xOffset === 0) {\n                xOffset -= rect.width / 2;\n            }\n            if (rect.height > maxNodeHeightInRow) {\n                maxNodeHeightInRow = rect.height;\n            }\n            xOffset += rect.width + danglingNodeMarginOffset + Math.max(150 - rect.width, 0);\n\n            if (xOffset >= danglingRowAreaWidth && index < danglingNodeKeys.length - 1) {\n                rowMaxHeightMap.set(row++, maxNodeHeightInRow);\n                maxNodeHeightInRow = 0;\n                xOffset            = 0;\n            }\n        });\n\n        rowMaxHeightMap.set(row, maxNodeHeightInRow);\n        let colYOffset = maxYOffset;\n        xOffset        = 0;\n        row            = 0;\n\n        danglingNodeKeys.forEach((connectionID, index) => {\n            const el        = danglingNodes[connectionID] as SVGGElement;\n            const width     = indexWidthMap.get(index)!;\n            const rowHeight = rowMaxHeightMap.get(row)!;\n            let left        = xOffset + width / 2;\n            const top       = colYOffset\n                + danglingNodeMarginOffset\n                + Math.ceil(rowHeight / 2)\n                + ((xOffset === 0 ? 0 : left) / danglingRowAreaWidth) * danglingNodeSideLength;\n\n            if (xOffset === 0) {\n                left -= width / 2;\n                xOffset -= width / 2;\n            }\n            xOffset += width + danglingNodeMarginOffset + Math.max(150 - width, 0);\n\n            const matrix = SVGUtils.createMatrix().translate(left, top);\n            el.setAttribute(\"transform\", SVGUtils.matrixToTransformAttr(matrix));\n\n            nodePositionUpdates[connectionID] = {x: matrix.e, y: matrix.f};\n\n            if (xOffset >= danglingRowAreaWidth) {\n                colYOffset += Math.ceil(rowHeight) + danglingNodeMarginOffset;\n                xOffset            = 0;\n                maxNodeHeightInRow = 0;\n                row++;\n            }\n        });\n\n        this.workflow.redrawEdges();\n        this.workflow.fitToViewport();\n\n        this.onAfterChange(nodePositionUpdates);\n        this.triggerAfterRender();\n\n        for (const id in nodePositionUpdates) {\n            const pos       = nodePositionUpdates[id];\n            const nodeModel = this.workflow.model.findById(id);\n            if (!nodeModel.customProps) {\n                nodeModel.customProps = {};\n            }\n\n            Object.assign(nodeModel.customProps, {\n                \"sbg:x\": pos.x,\n                \"sbg:y\": pos.y\n            });\n        }\n\n        return nodePositionUpdates;\n    }\n\n    /**\n     * Calculates column dimensions and total graph area\n     * @param {NodeIO[][]} columns\n     */\n    private calculateColumnSizes(columns: NodeIO[][]): {\n        columnDimensions: {\n            width: number,\n            height: number\n        }[],\n        distributionArea: {\n            width: number,\n            height: number\n        }\n    } {\n        const distributionArea = {width: 0, height: 0};\n        const columnDimensions: any[] = [];\n\n        for (let i = 1; i < columns.length; i++) {\n\n            let width  = 0;\n            let height = 0;\n\n            for (let j = 0; j < columns[i].length; j++) {\n                const entry = columns[i][j];\n\n                height += entry.rect.height;\n\n                if (width < entry.rect.width) {\n                    width = entry.rect.width;\n                }\n            }\n\n            columnDimensions[i] = {height, width};\n\n            distributionArea.width += width;\n            if (height > distributionArea.height) {\n                distributionArea.height = height;\n            }\n        }\n\n        return {\n            columnDimensions,\n            distributionArea\n        };\n\n    }\n\n    /**\n     * Maps node's connectionID to a 1-indexed column number\n     */\n    private distributeNodesIntoColumns(graph: NodeMap): Array<NodeIO[]> {\n        const idToZoneMap   = {};\n        const sortedNodeIDs = Object.keys(graph).sort((a, b) => b.localeCompare(a));\n        const zones         = [] as any[];\n\n        for (let i = 0; i < sortedNodeIDs.length; i++) {\n            const nodeID = sortedNodeIDs[i];\n            const node   = graph[nodeID];\n\n            // For outputs and steps, we calculate the zone as a longest path you can take to them\n            if (node.type !== \"input\") {\n                idToZoneMap[nodeID] = this.traceLongestNodePathLength(node, graph);\n            } else {\n                //\n                // Longest trace methods would put all inputs in the first column,\n                // but we want it just behind the leftmost step that it is connected to\n                // So instead of:\n                //\n                // (input)<----------------->(step)---\n                // (input)<---------->(step)----------\n                //\n                // It should be:\n                //\n                // ---------------(input)<--->(step)---\n                // --------(input)<-->(step)-----------\n                //\n\n                let closestNodeZone = Infinity;\n                for (let i = 0; i < node.outputs.length; i++) {\n                    const successorNodeZone = idToZoneMap[node.outputs[i]];\n\n                    if (successorNodeZone < closestNodeZone) {\n                        closestNodeZone = successorNodeZone;\n                    }\n                }\n                if (closestNodeZone === Infinity) {\n                    idToZoneMap[nodeID] = 1;\n                } else {\n                    idToZoneMap[nodeID] = closestNodeZone - 1;\n                }\n\n            }\n\n            const zone = idToZoneMap[nodeID];\n            zones[zone] || (zones[zone] = []);\n\n            zones[zone].push(graph[nodeID]);\n        }\n\n        return zones;\n\n    }\n\n    /**\n     * Finds all nodes in the graph, and indexes them by their \"data-connection-id\" attribute\n     */\n    private indexNodesByID(): { [dataConnectionID: string]: SVGGElement } {\n        const indexed = {};\n        const nodes   = this.svgRoot.querySelectorAll(\".node\");\n\n        for (let i = 0; i < nodes.length; i++) {\n            indexed[nodes[i].getAttribute(\"data-connection-id\")!] = nodes[i];\n        }\n\n        return indexed;\n    }\n\n    /**\n     * Finds length of the longest possible path from the graph root to a node.\n     * Lengths are 1-indexed. When a node has no predecessors, it will have length of 1.\n     */\n    private traceLongestNodePathLength(node: NodeIO, nodeGraph: any, visited = new Set<NodeIO>()): number {\n\n        visited.add(node);\n\n        if (node.inputs.length === 0) {\n            return 1;\n        }\n\n        const inputPathLengths: any[] = [];\n\n        for (let i = 0; i < node.inputs.length; i++) {\n            const el = nodeGraph[node.inputs[i]];\n\n            if (visited.has(el)) {\n                continue;\n            }\n\n            inputPathLengths.push(this.traceLongestNodePathLength(el, nodeGraph, visited));\n        }\n\n        return Math.max(...inputPathLengths) + 1;\n    }\n\n    private makeNodeGraphs(): {\n        mainGraph: NodeMap,\n        danglingNodes: { [nodeID: string]: SVGGElement }\n    } {\n\n        // We need all nodes in order to find the dangling ones, those will be sorted separately\n        const allNodes = this.indexNodesByID();\n\n        // Make a graph representation where you can trace inputs and outputs from/to connection ids\n        const nodeGraph = {} as NodeMap;\n\n        // Edges are the main source of information from which we will distribute nodes\n        const edges = this.svgRoot.querySelectorAll(\".edge\");\n\n        for (let i = 0; i < edges.length; i++) {\n\n            const edge = edges[i];\n\n            const sourceConnectionID      = edge.getAttribute(\"data-source-connection\")!;\n            const destinationConnectionID = edge.getAttribute(\"data-destination-connection\")!;\n\n            const [sourceSide, sourceNodeID, sourcePortID]                = sourceConnectionID.split(\"/\");\n            const [destinationSide, destinationNodeID, destinationPortID] = destinationConnectionID.split(\"/\");\n\n            // Both source and destination are considered to be steps by default\n            let sourceType      = \"step\";\n            let destinationType = \"step\";\n\n            // Ports have the same node and port ids\n            if (sourceNodeID === sourcePortID) {\n                sourceType = sourceSide === \"in\" ? \"output\" : \"input\";\n            }\n\n            if (destinationNodeID === destinationPortID) {\n                destinationType = destinationSide === \"in\" ? \"output\" : \"input\";\n            }\n\n            // Initialize keys on graph if they don't exist\n            const sourceNode      = this.svgRoot.querySelector(`.node[data-id=\"${sourceNodeID}\"]`) as SVGGElement;\n            const destinationNode = this.svgRoot.querySelector(`.node[data-id=\"${destinationNodeID}\"]`) as SVGGElement;\n\n            const sourceNodeConnectionID      = sourceNode.getAttribute(\"data-connection-id\")!;\n            const destinationNodeConnectionID = destinationNode.getAttribute(\"data-connection-id\")!;\n\n            // Source and destination of this edge are obviously not dangling, so we can remove them\n            // from the set of potentially dangling nodes\n            delete allNodes[sourceNodeConnectionID];\n            delete allNodes[destinationNodeConnectionID];\n\n            // Ensure that the source node has its entry in the node graph\n            (nodeGraph[sourceNodeID] || (nodeGraph[sourceNodeID] = {\n                inputs: [],\n                outputs: [],\n                type: sourceType,\n                connectionID: sourceNodeConnectionID,\n                el: sourceNode,\n                rect: sourceNode.getBoundingClientRect()\n            }));\n\n            // Ensure that the source node has its entry in the node graph\n            (nodeGraph[destinationNodeID] || (nodeGraph[destinationNodeID] = {\n                inputs: [],\n                outputs: [],\n                type: destinationType,\n                connectionID: destinationNodeConnectionID,\n                el: destinationNode,\n                rect: destinationNode.getBoundingClientRect()\n            }));\n\n            nodeGraph[sourceNodeID].outputs.push(destinationNodeID);\n            nodeGraph[destinationNodeID].inputs.push(sourceNodeID);\n        }\n\n        return {\n            mainGraph: nodeGraph,\n            danglingNodes: allNodes\n        };\n    }\n}\n\n\nexport type NodeIO = {\n    inputs: string[],\n    outputs: string[],\n    connectionID: string,\n    el: SVGGElement,\n    rect: ClientRect,\n    type: \"step\" | \"input\" | \"output\" | string\n};\nexport type NodeMap = { [connectionID: string]: NodeIO }\n\nexport type NodePositionUpdates = { [connectionID: string]: { x: number, y: number } };\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/deletion/deletion.ts",
    "content": "import {PluginBase} from \"../plugin-base\";\nimport {SelectionPlugin} from \"../selection/selection\";\nimport {StepModel, WorkflowInputParameterModel, WorkflowOutputParameterModel} from \"cwlts/models\";\n\nexport class DeletionPlugin extends PluginBase {\n\n    private boundDeleteFunction = this.onDelete.bind(this);\n\n    afterRender(): void {\n        this.attachDeleteBehavior();\n    }\n\n    onEditableStateChange(enable: boolean) {\n        if (enable) {\n            this.attachDeleteBehavior();\n        } else {\n            this.detachDeleteBehavior();\n        }\n    }\n\n    private attachDeleteBehavior() {\n\n        this.detachDeleteBehavior();\n        window.addEventListener(\"keyup\", this.boundDeleteFunction, true);\n    }\n\n    private detachDeleteBehavior() {\n        window.removeEventListener(\"keyup\", this.boundDeleteFunction, true);\n    }\n\n    private onDelete(ev: KeyboardEvent) {\n        if ((ev.which !== 8 && ev.which !== 46) || !(ev.target instanceof SVGElement)) {\n            return;\n        }\n\n        this.deleteSelection();\n    }\n\n    public deleteSelection() {\n        const selection = this.workflow.getPlugin(SelectionPlugin);\n\n        if (!selection || !this.workflow.editingEnabled) {\n            return;\n        }\n\n        const selected = selection.getSelection();\n        selected.forEach((type, id) => {\n            if (type === \"node\") {\n                const model = this.workflow.model.findById(id);\n\n                if (model instanceof StepModel) {\n                    this.workflow.model.removeStep(model);\n                    selection.clearSelection();\n\n                } else if (model instanceof WorkflowInputParameterModel) {\n                    this.workflow.model.removeInput(model);\n                    selection.clearSelection();\n\n                } else if (model instanceof WorkflowOutputParameterModel) {\n\n                    this.workflow.model.removeOutput(model);\n                    selection.clearSelection();\n                }\n            } else {\n                const [source, destination] = id.split(SelectionPlugin.edgePortsDelimiter);\n                this.workflow.model.disconnect(source, destination);\n                selection.clearSelection();\n            }\n        });\n    }\n\n    destroy() {\n        this.detachDeleteBehavior();\n    }\n}"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/edge-hover/edge-hover.ts",
    "content": "import {PluginBase} from \"../plugin-base\";\n\nexport class SVGEdgeHoverPlugin extends PluginBase {\n\n    private boundEdgeEnterFunction = this.onEdgeEnter.bind(this);\n\n    private modelListener: { dispose: Function } = {\n        dispose: () => void 0\n    };\n\n    afterRender(): void {\n        this.attachEdgeHoverBehavior();\n    }\n\n    destroy(): void {\n        this.detachEdgeHoverBehavior();\n        this.modelListener.dispose();\n    }\n\n    private attachEdgeHoverBehavior() {\n\n        this.detachEdgeHoverBehavior();\n        this.workflow.workflow.addEventListener(\"mouseenter\", this.boundEdgeEnterFunction, true);\n    }\n\n    private detachEdgeHoverBehavior() {\n        this.workflow.workflow.removeEventListener(\"mouseenter\", this.boundEdgeEnterFunction, true);\n    }\n\n    private onEdgeEnter(ev: MouseEvent) {\n\n\n        // Ignore if we did not enter an edge\n        if (!(ev.target! as Element).classList.contains(\"edge\")) return;\n\n        const target = ev.target as SVGGElement;\n        let tipEl: SVGGElement;\n\n        const onMouseMove = (ev: MouseEvent) => {\n            const coords = this.workflow.transformScreenCTMtoCanvas(ev.clientX, ev.clientY);\n            tipEl.setAttribute(\"x\", String(coords.x));\n            tipEl.setAttribute(\"y\", String(coords.y - 16));\n        };\n\n        const onMouseLeave = (ev: MouseEvent) => {\n            tipEl.remove();\n            target.removeEventListener(\"mousemove\", onMouseMove);\n            target.removeEventListener(\"mouseleave\", onMouseLeave)\n        };\n\n        this.modelListener = this.workflow.model.on(\"connection.remove\", (source, destination) => {\n            if (!tipEl) return;\n            const [tipS, tipD] = tipEl.getAttribute(\"data-source-destination\")!.split(\"$!$\");\n            if (tipS === source.connectionId && tipD === destination.connectionId) {\n                tipEl.remove();\n            }\n        });\n\n        const sourceNode    = target.getAttribute(\"data-source-node\");\n        const destNode      = target.getAttribute(\"data-destination-node\");\n        const sourcePort    = target.getAttribute(\"data-source-port\");\n        const destPort      = target.getAttribute(\"data-destination-port\");\n        const sourceConnect = target.getAttribute(\"data-source-connection\");\n        const destConnect   = target.getAttribute(\"data-destination-connection\");\n\n        const sourceLabel = sourceNode === sourcePort ? sourceNode : `${sourceNode} (${sourcePort})`;\n        const destLabel   = destNode === destPort ? destNode : `${destNode} (${destPort})`;\n\n        const coords = this.workflow.transformScreenCTMtoCanvas(ev.clientX, ev.clientY);\n\n        const ns = \"http://www.w3.org/2000/svg\";\n        tipEl    = document.createElementNS(ns, \"text\");\n        tipEl.classList.add(\"label\");\n        tipEl.classList.add(\"label-edge\");\n        tipEl.setAttribute(\"x\", String(coords.x));\n        tipEl.setAttribute(\"y\", String(coords.y));\n        tipEl.setAttribute(\"data-source-destination\", sourceConnect + \"$!$\" + destConnect);\n        tipEl.innerHTML = sourceLabel + \" → \" + destLabel;\n\n        this.workflow.workflow.appendChild(tipEl);\n\n        target.addEventListener(\"mousemove\", onMouseMove);\n        target.addEventListener(\"mouseleave\", onMouseLeave);\n\n    }\n\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/node-move/node-move.ts",
    "content": "import {Workflow}   from \"../..\";\nimport {PluginBase} from \"../plugin-base\";\nimport {EdgePanner} from \"../../behaviors/edge-panning\";\n\nexport interface ConstructorParams {\n    movementSpeed?: number,\n    scrollMargin?: number\n}\n\n/**\n * This plugin makes node dragging and movement possible.\n *\n * @FIXME: attach events for before and after change\n */\nexport class SVGNodeMovePlugin extends PluginBase {\n\n    /** Difference in movement on the X axis since drag start, adapted for scale and possibly panned distance */\n    private sdx: number;\n\n    /** Difference in movement on the Y axis since drag start, adapted for scale and possibly panned distance */\n    private sdy: number;\n\n    /** Stored onDragStart so we can put node to a fixed position determined by startX + ∆x */\n    private startX?: number;\n\n    /** Stored onDragStart so we can put node to a fixed position determined by startY + ∆y */\n    private startY?: number;\n\n    /** How far from the edge of the viewport does mouse need to be before panning is triggered */\n    private scrollMargin = 50;\n\n    /** How fast does workflow move while panning */\n    private movementSpeed = 10;\n\n    /** Holds an element that is currently being dragged. Stored onDragStart and translated afterwards. */\n    private movingNode?: SVGGElement;\n\n    /** Stored onDragStart to detect collision with viewport edges */\n    private boundingClientRect?: ClientRect;\n\n    /** Cache input edges and their parsed bezier curve parameters so we don't query for them on each mouse move */\n    private inputEdges?: Map<SVGPathElement, number[]>;\n\n    /** Cache output edges and their parsed bezier curve parameters so we don't query for them on each mouse move */\n    private outputEdges?: Map<SVGPathElement, number[]>;\n\n    /** Workflow panning at the time of onDragStart, used to adjust ∆x and ∆y while panning */\n    private startWorkflowTranslation?: { x: number, y: number };\n\n    private wheelPrevent = (ev: any) => ev.stopPropagation();\n\n    private boundMoveHandler      = this.onMove.bind(this);\n    private boundMoveStartHandler = this.onMoveStart.bind(this);\n    private boundMoveEndHandler   = this.onMoveEnd.bind(this);\n\n    private detachDragListenerFn: any = undefined;\n\n    private edgePanner: EdgePanner;\n\n    constructor(parameters: ConstructorParams = {}) {\n        super();\n        Object.assign(this, parameters);\n    }\n\n\n    onEditableStateChange(enabled: boolean): void {\n\n        if (enabled) {\n            this.attachDrag();\n        } else {\n            this.detachDrag();\n        }\n    }\n\n    afterRender() {\n\n        if (this.workflow.editingEnabled) {\n            this.attachDrag();\n        }\n\n    }\n\n    destroy(): void {\n        this.detachDrag();\n    }\n\n    registerWorkflow(workflow: Workflow): void {\n        super.registerWorkflow(workflow);\n\n        this.edgePanner = new EdgePanner(this.workflow, {\n            scrollMargin: this.scrollMargin,\n            movementSpeed: this.movementSpeed\n        });\n    }\n\n    private detachDrag() {\n        if (typeof this.detachDragListenerFn === \"function\") {\n            this.detachDragListenerFn();\n        }\n\n        this.detachDragListenerFn = undefined;\n    }\n\n    private attachDrag() {\n\n        this.detachDrag();\n\n        this.detachDragListenerFn = this.workflow.domEvents.drag(\n            \".node .core\",\n            this.boundMoveHandler,\n            this.boundMoveStartHandler,\n            this.boundMoveEndHandler\n        );\n    }\n\n    private getWorkflowMatrix(): SVGMatrix {\n        return this.workflow.workflow.transform.baseVal.getItem(0).matrix;\n    }\n\n    private onMove(dx: number, dy: number, ev: MouseEvent): void {\n\n        /** We will use workflow scale to determine how our mouse movement translate to svg proportions */\n        const scale = this.workflow.scale;\n\n        /** Need to know how far did the workflow itself move since when we started dragging */\n        const matrixMovement = {\n            x: this.getWorkflowMatrix().e - this.startWorkflowTranslation!.x,\n            y: this.getWorkflowMatrix().f - this.startWorkflowTranslation!.y\n        };\n\n        /** We might have hit the boundary and need to start panning */\n        this.edgePanner.triggerCollisionDetection(ev.clientX, ev.clientY, (sdx, sdy) => {\n            this.sdx += sdx;\n            this.sdy += sdy;\n\n            this.translateNodeBy(this.movingNode!, sdx, sdy);\n            this.redrawEdges(this.sdx, this.sdy);\n        });\n\n        /**\n         * We need to store scaled ∆x and ∆y because this is not the only place from which node is being moved.\n         * If mouse is outside the viewport, and the workflow is panning, startScroll will continue moving\n         * this node, so it needs to know where to start from and update it, so this method can take\n         * over when mouse gets back to the viewport.\n         *\n         * If there was no handoff, node would jump back and forth to\n         * last positions for each movement initiator separately.\n         */\n        this.sdx = (dx - matrixMovement.x) / scale;\n        this.sdy = (dy - matrixMovement.y) / scale;\n\n        const moveX = this.sdx + this.startX!;\n        const moveY = this.sdy + this.startY!;\n\n        this.translateNodeTo(this.movingNode!, moveX, moveY);\n        this.redrawEdges(this.sdx, this.sdy);\n    }\n\n    /**\n     * Triggered from {@link attachDrag} when drag starts.\n     * This method initializes properties that are needed for calculations during movement.\n     */\n    private onMoveStart(event: MouseEvent, handle: SVGGElement): void {\n\n        /** We will query the SVG dom for edges that we need to move, so store svg element for easy access */\n        const svg = this.workflow.svgRoot;\n\n        document.addEventListener(\"mousewheel\", this.wheelPrevent, true);\n\n        /** Our drag handle is not the whole node because that would include ports and labels, but a child of it*/\n        const node = handle.parentNode as SVGGElement;\n\n        /** Store initial transform values so we know how much we've moved relative from the starting position */\n        const nodeMatrix = node.transform.baseVal.getItem(0).matrix;\n        this.startX      = nodeMatrix.e;\n        this.startY      = nodeMatrix.f;\n\n        /** We have to query for edges that are attached to this node because we will move them as well */\n        const nodeID = node.getAttribute(\"data-id\");\n\n        /**\n         * When user drags the node to the edge and waits while workflow pans to the side,\n         * mouse movement stops, but workflow movement starts.\n         * We then utilize this to get movement ∆ of the workflow, and use that for translation instead.\n         */\n        this.startWorkflowTranslation = {\n            x: this.getWorkflowMatrix().e,\n            y: this.getWorkflowMatrix().f\n        };\n\n        /** Used to determine whether dragged node is hitting the edge, so we can pan the Workflow*/\n        this.boundingClientRect = svg.getBoundingClientRect();\n\n        /** Node movement can be initiated from both mouse events and animationFrame, so make it accessible */\n        this.movingNode = handle.parentNode as SVGGElement;\n\n        /**\n         * While node is being moved, incoming and outgoing edges also need to be moved in order to stay attached.\n         * We don't want to query them all the time, so we cache them in maps that point from their dom elements\n         * to an array of numbers that represent their bezier curves, since we will update those curves.\n         */\n        this.inputEdges = new Map();\n        this.outputEdges = new Map();\n\n        const outputsSelector = `.edge[data-source-node='${nodeID}'] .sub-edge`;\n        const inputsSelector  = `.edge[data-destination-node='${nodeID}'] .sub-edge`;\n\n        const query: any = svg.querySelectorAll([inputsSelector, outputsSelector].join(\", \")) as NodeListOf<SVGPathElement>;\n\n        for (let subEdge of query) {\n            const isInput = subEdge.parentElement.getAttribute(\"data-destination-node\") === nodeID;\n            const path    = subEdge.getAttribute(\"d\").split(\" \").map(Number).filter((e: any) => !isNaN(e));\n            isInput ? this.inputEdges.set(subEdge, path) : this.outputEdges.set(subEdge, path);\n        }\n    }\n\n    private translateNodeBy(node: SVGGElement, x?: number, y?: number): void {\n        const matrix = node.transform.baseVal.getItem(0).matrix;\n        this.translateNodeTo(node, matrix.e + x!, matrix.f + y!);\n    }\n\n    private translateNodeTo(node: SVGGElement, x?: number, y?: number): void {\n        node.transform.baseVal.getItem(0).setTranslate(x!, y!);\n    }\n\n    /**\n     * Redraws stored input and output edges so as to transform them with respect to\n     * scaled transformation differences, sdx and sdy.\n     */\n    private redrawEdges(sdx: number, sdy: number): void {\n        this.inputEdges!.forEach((p, el) => {\n            const path = Workflow.makeConnectionPath(p[0], p[1], p[6] + sdx, p[7] + sdy);\n            el.setAttribute(\"d\", path!);\n        });\n\n        this.outputEdges!.forEach((p, el) => {\n            const path = Workflow.makeConnectionPath(p[0] + sdx, p[1] + sdy, p[6], p[7]);\n            el.setAttribute(\"d\", path!);\n        });\n    }\n\n    /**\n     * Triggered from {@link attachDrag} after move event ends\n     */\n    private onMoveEnd(): void {\n\n        this.edgePanner.stop();\n\n        const id        = this.movingNode!.getAttribute(\"data-connection-id\")!;\n        const nodeModel = this.workflow.model.findById(id);\n\n        if (!nodeModel.customProps) {\n            nodeModel.customProps = {};\n        }\n\n        const matrix = this.movingNode!.transform.baseVal.getItem(0).matrix;\n\n        Object.assign(nodeModel.customProps, {\n            \"sbg:x\": matrix.e,\n            \"sbg:y\": matrix.f,\n        });\n\n        this.onAfterChange({type: \"node-move\"});\n\n        document.removeEventListener(\"mousewheel\", this.wheelPrevent, true);\n\n        delete this.startX;\n        delete this.startY;\n        delete this.movingNode;\n        delete this.inputEdges;\n        delete this.outputEdges;\n        delete this.boundingClientRect;\n        delete this.startWorkflowTranslation;\n    }\n\n\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/plugin-base.ts",
    "content": "import {GraphChange, SVGPlugin} from \"./plugin\";\nimport {Workflow}               from \"../graph/workflow\";\n\nexport abstract class PluginBase implements SVGPlugin {\n\n    protected workflow: Workflow;\n\n    /** plugin should trigger before a change is about to occur on the model */\n    protected onBeforeChange: (change: GraphChange) => void;\n\n    /** plugin should trigger after a change has occurred on the model */\n    protected onAfterChange: (change: GraphChange) => void;\n\n    /** plugin should trigger when internal svg elements have been deleted and new ones created */\n    protected onAfterRender: (change: GraphChange) => void;\n\n    registerWorkflow(workflow: Workflow): void {\n        this.workflow = workflow;\n    }\n\n    registerOnBeforeChange(fn: (change: GraphChange) => void): void {\n        this.onBeforeChange = fn;\n    }\n\n    registerOnAfterChange(fn: (change: GraphChange) => void): void {\n        this.onAfterChange = fn;\n    }\n\n    registerOnAfterRender(fn: (change: GraphChange) => void): void {\n        this.onAfterRender = fn;\n    }\n}"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/plugin.ts",
    "content": "import {Workflow} from '../graph/workflow';\n\nexport interface GraphChange {\n    type: string;\n\n}\n\nexport interface SVGPlugin {\n\n    registerWorkflow?(workflow: Workflow): void;\n\n    registerOnBeforeChange?(fn: (change: GraphChange) => void): void;\n\n    registerOnAfterChange?(fn: (change: GraphChange) => void): void;\n\n    registerOnAfterRender?(fn: (change: GraphChange) => void): void;\n\n    afterRender?(): void;\n\n    /**\n     * Invoked when the underlying model instance changes.\n     * Implementation should dispose listeners from the old model and attach listeners to the new one.\n     */\n    afterModelChange?(): void;\n\n    onEditableStateChange?(enabled: boolean): void;\n\n    /**\n     * Invoked when a graph should be destroyed.\n     * Implementations should remove attached DOM and model event listeners, as well as other stuff that\n     * might be left in memory.\n     */\n    destroy?(): void;\n}"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/port-drag/_variables.scss",
    "content": "@import \"../../assets/styles/variables\";\n\n$port-suggested-fill: $color-primary !default;\n$port-snap-highlight-stroke: $port-hover-stroke-color !default;\n$port-snap-stroke-width: 2px !default;\n$edge-dragged-stroke: $edge-inner-stroke-color !default;\n$edge-dragged-dasharray: 5 !default;\n$ghost-node-stroke-color: $node-input-fill-color !default;\n$ghost-node-fill-color: $background-color !default;"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/port-drag/port-drag.ts",
    "content": "import {PluginBase} from \"../plugin-base\";\nimport {Workflow}   from \"../..\";\nimport {GraphNode}  from \"../../graph/graph-node\";\nimport {Geometry}   from \"../../utils/geometry\";\nimport {Edge}       from \"../../graph/edge\";\nimport {EdgePanner} from \"../../behaviors/edge-panning\";\n\nexport class SVGPortDragPlugin extends PluginBase {\n\n    /** Stored on drag start to detect collision with viewport edges */\n    private boundingClientRect: ClientRect | undefined;\n\n    private portOrigins: Map<SVGGElement, SVGMatrix> | undefined;\n\n    /** Group of edges (compound element) leading from origin port to ghost node */\n    private edgeGroup: SVGGElement | undefined;\n\n    /** Coordinates of the node from which dragged port originates, stored so we can measure the distance from it */\n    private nodeCoords: { x: number; y: number } | undefined;\n\n    /** Reference to a node that marks a new input/output creation */\n    private ghostNode: SVGGElement | undefined;\n\n    /** How far away from the port you need to drag in order to create a new input/output instead of snapping */\n    private snapRadius = 120;\n\n    /** Tells if the port is on the left or on the right side of a node */\n    private portType: \"input\" | \"output\";\n\n    /** Stores a port to which a connection would snap if user stops the drag */\n    private snapPort: SVGGElement | undefined;\n\n    /** Map of CSS classes attached by this plugin */\n    private css = {\n\n        /** Added to svgRoot as a sign that this plugin is active */\n        plugin: \"__plugin-port-drag\",\n\n        /** Suggests that an element that contains it will be the one to snap to */\n        snap: \"__port-drag-snap\",\n\n        /** Added to svgRoot while dragging is in progress */\n        dragging: \"__port-drag-dragging\",\n\n        /** Will be added to suggested ports and their parent nodes */\n        suggestion: \"__port-drag-suggestion\",\n    };\n\n    /** Port from which we initiated the drag */\n    private originPort: SVGGElement | undefined;\n    private detachDragListenerFn: Function | undefined = undefined;\n\n    private wheelPrevent = (ev: any) => ev.stopPropagation();\n    private panner: EdgePanner;\n\n    private ghostX = 0;\n    private ghostY = 0;\n    private portOnCanvas: { x: number; y: number };\n    private lastMouseMove: { x: number; y: number };\n\n    registerWorkflow(workflow: Workflow): void {\n        super.registerWorkflow(workflow);\n        this.panner = new EdgePanner(this.workflow);\n\n        this.workflow.svgRoot.classList.add(this.css.plugin);\n    }\n\n    afterRender(): void {\n        if(this.workflow.editingEnabled){\n            this.attachPortDrag();\n        }\n\n    }\n\n    onEditableStateChange(enabled: boolean): void {\n\n        if (enabled) {\n            this.attachPortDrag();\n        } else {\n            this.detachPortDrag();\n        }\n    }\n\n\n    destroy(): void {\n        this.detachPortDrag();\n    }\n\n    detachPortDrag() {\n        if (typeof this.detachDragListenerFn === \"function\") {\n            this.detachDragListenerFn();\n        }\n\n        this.detachDragListenerFn = undefined;\n    }\n\n    attachPortDrag() {\n\n        this.detachPortDrag();\n\n        this.detachDragListenerFn = this.workflow.domEvents.drag(\n            \".port\",\n            this.onMove.bind(this),\n            this.onMoveStart.bind(this),\n            this.onMoveEnd.bind(this)\n        );\n\n    }\n\n    onMove(dx: number, dy: number, ev: MouseEvent, portElement: SVGGElement): void {\n\n\n        document.addEventListener(\"mousewheel\", this.wheelPrevent, true);\n        const mouseOnSVG = this.workflow.transformScreenCTMtoCanvas(ev.clientX, ev.clientY);\n        const scale      = this.workflow.scale;\n\n        const sdx = (dx - this.lastMouseMove.x) / scale;\n        const sdy = (dy - this.lastMouseMove.y) / scale;\n\n        /** We might have hit the boundary and need to start panning */\n        this.panner.triggerCollisionDetection(ev.clientX, ev.clientY, (sdx, sdy) => {\n            this.ghostX += sdx;\n            this.ghostY += sdy;\n            this.translateGhostNode(this.ghostX, this.ghostY);\n            this.updateEdge(this.portOnCanvas.x, this.portOnCanvas.y, this.ghostX, this.ghostY);\n        });\n\n        const nodeToMouseDistance = Geometry.distance(\n            this.nodeCoords!.x, this.nodeCoords!.y,\n            mouseOnSVG.x, mouseOnSVG.y\n        );\n\n        const closestPort = this.findClosestPort(mouseOnSVG.x, mouseOnSVG.y);\n        this.updateSnapPort(closestPort.portEl!, closestPort.distance);\n\n        this.ghostX += sdx;\n        this.ghostY += sdy;\n\n        this.translateGhostNode(this.ghostX, this.ghostY);\n        this.updateGhostNodeVisibility(nodeToMouseDistance, closestPort.distance);\n        this.updateEdge(this.portOnCanvas.x, this.portOnCanvas.y, this.ghostX, this.ghostY);\n\n        this.lastMouseMove = {x: dx, y: dy};\n    }\n\n    /**\n     * @FIXME: Add panning\n     * @param {MouseEvent} ev\n     * @param {SVGGElement} portEl\n     */\n    onMoveStart(ev: MouseEvent, portEl: SVGGElement): void {\n\n        this.lastMouseMove = {x: 0, y: 0};\n\n        this.originPort   = portEl;\n        const portCTM     = portEl.getScreenCTM()!;\n        this.portOnCanvas = this.workflow.transformScreenCTMtoCanvas(portCTM.e, portCTM.f);\n        this.ghostX       = this.portOnCanvas.x;\n        this.ghostY       = this.portOnCanvas.y;\n\n        // Needed for collision detection\n        this.boundingClientRect = this.workflow.svgRoot.getBoundingClientRect();\n\n        const nodeMatrix = this.workflow.findParent(portEl)!.transform.baseVal.getItem(0).matrix;\n        this.nodeCoords  = {\n            x: nodeMatrix.e,\n            y: nodeMatrix.f\n        };\n\n        const workflowGroup = this.workflow.workflow;\n\n        this.portType = portEl.classList.contains(\"input-port\") ? \"input\" : \"output\";\n\n        this.ghostNode = this.createGhostNode(this.portType);\n\n        workflowGroup.appendChild(this.ghostNode);\n\n        /** @FIXME: this should come from workflow */\n        this.edgeGroup = Edge.spawn();\n        this.edgeGroup.classList.add(this.css.dragging);\n\n        workflowGroup.appendChild(this.edgeGroup);\n\n        this.workflow.svgRoot.classList.add(this.css.dragging);\n\n\n        this.portOrigins = this.getPortCandidateTransformations(portEl);\n\n        this.highlightSuggestedPorts(portEl.getAttribute(\"data-connection-id\")!);\n\n\n    }\n\n    onMoveEnd(ev: MouseEvent): void {\n\n        document.removeEventListener(\"mousewheel\", this.wheelPrevent, true);\n\n        this.panner.stop();\n\n        const ghostType      = this.ghostNode!.getAttribute(\"data-type\");\n        const ghostIsVisible = !this.ghostNode!.classList.contains(\"hidden\");\n\n        const shouldSnap         = this.snapPort !== undefined;\n        const shouldCreateInput  = ghostIsVisible && ghostType === \"input\";\n        const shouldCreateOutput = ghostIsVisible && ghostType === \"output\";\n        const portID             = this.originPort!.getAttribute(\"data-connection-id\")!;\n\n        if (shouldSnap) {\n            this.createEdgeBetweenPorts(this.originPort!, this.snapPort!);\n        } else if (shouldCreateInput || shouldCreateOutput) {\n\n            const svgCoordsUnderMouse = this.workflow.transformScreenCTMtoCanvas(ev.clientX, ev.clientY);\n            const customProps         = {\n                \"sbg:x\": svgCoordsUnderMouse.x,\n                \"sbg:y\": svgCoordsUnderMouse.y\n            };\n\n            if (shouldCreateInput) {\n                this.workflow.model.createInputFromPort(portID, {customProps});\n            } else {\n                this.workflow.model.createOutputFromPort(portID, {customProps});\n            }\n        }\n\n        this.cleanMemory();\n        this.cleanStyles();\n    }\n\n    private updateSnapPort(closestPort: SVGGElement, closestPortDistance: number) {\n\n        const closestPortChanged      = closestPort !== this.snapPort;\n        const closestPortIsOutOfRange = closestPortDistance > this.snapRadius;\n\n        // We might need to remove old class for snapping if we are closer to some other port now\n        if (this.snapPort && (closestPortChanged || closestPortIsOutOfRange)) {\n            const node = this.workflow.findParent(this.snapPort)!;\n            this.snapPort.classList.remove(this.css.snap);\n            node.classList.remove(this.css.snap);\n            delete this.snapPort;\n        }\n\n        // If closest port is further away than our snapRadius, no highlighting should be done\n        if (closestPortDistance > this.snapRadius) {\n            return;\n        }\n\n        const originID = this.originPort!.getAttribute(\"data-connection-id\")!;\n        const targetID = closestPort.getAttribute(\"data-connection-id\")!;\n\n        if (this.findEdge(originID, targetID)) {\n            delete this.snapPort;\n            return;\n        }\n\n        this.snapPort = closestPort;\n\n        const node             = this.workflow.findParent(closestPort)!;\n        const oppositePortType = this.portType === \"input\" ? \"output\" : \"input\";\n\n        closestPort.classList.add(this.css.snap);\n        node.classList.add(this.css.snap);\n        node.classList.add(`${this.css.snap}-${oppositePortType}`);\n    }\n\n    private updateEdge(fromX: number, fromY: number, toX: number, toY: number): void {\n        const subEdges = this.edgeGroup!.children as HTMLCollectionOf<SVGPathElement>;\n\n        for (let subEdge of subEdges as any) {\n\n            const path = Workflow.makeConnectionPath(\n                fromX,\n                fromY,\n                toX,\n                toY,\n                this.portType === \"input\" ? \"left\" : \"right\"\n            );\n\n            subEdge.setAttribute(\"d\", path);\n        }\n    }\n\n    private updateGhostNodeVisibility(distanceToMouse: number, distanceToClosestPort: any) {\n\n        const isHidden        = this.ghostNode!.classList.contains(\"hidden\");\n        const shouldBeVisible = distanceToMouse > this.snapRadius && distanceToClosestPort > this.snapRadius;\n\n        if (shouldBeVisible && isHidden) {\n            this.ghostNode!.classList.remove(\"hidden\");\n        } else if (!shouldBeVisible && !isHidden) {\n            this.ghostNode!.classList.add(\"hidden\");\n        }\n    }\n\n    private translateGhostNode(x: number, y: number) {\n        this.ghostNode!.transform.baseVal.getItem(0).setTranslate(x, y);\n    }\n\n    private getPortCandidateTransformations(portEl: SVGGElement): Map<SVGGElement, SVGMatrix> {\n        const nodeEl           = this.workflow.findParent(portEl)!;\n        const nodeConnectionID = nodeEl.getAttribute(\"data-connection-id\");\n\n        const otherPortType = this.portType === \"input\" ? \"output\" : \"input\";\n        const portQuery     = `.node:not([data-connection-id=\"${nodeConnectionID}\"]) .port.${otherPortType}-port`;\n\n        const candidates: any = this.workflow.workflow.querySelectorAll(portQuery) as NodeListOf<SVGGElement>;\n        const matrices   = new Map<SVGGElement, SVGMatrix>();\n\n        for (let port of candidates) {\n            matrices.set(port, Geometry.getTransformToElement(port, this.workflow.workflow));\n        }\n\n        return matrices;\n    }\n\n    /**\n     * Highlights ports that are model says are suggested.\n     * Also marks their parent nodes as highlighted.\n     *\n     * @param {string} targetConnectionID ConnectionID of the origin port\n     */\n    private highlightSuggestedPorts(targetConnectionID: string): void {\n\n        // Find all ports that we can validly connect to\n        // Note that we can connect to any port, but some of them are suggested based on hypothetical validity.\n        const portModels = this.workflow.model.gatherValidConnectionPoints(targetConnectionID);\n\n        for (let i = 0; i < portModels.length; i++) {\n\n            const portModel = portModels[i];\n\n            if (!portModel.isVisible) continue;\n\n            // Find port element by this connectionID and it's parent node element\n            const portQuery   = `.port[data-connection-id=\"${portModel.connectionId}\"]`;\n            const portElement = this.workflow.workflow.querySelector(portQuery)!;\n            const parentNode  = this.workflow.findParent(portElement)!;\n\n            // Add highlighting classes to port and it's parent node\n            parentNode.classList.add(this.css.suggestion);\n            portElement.classList.add(this.css.suggestion);\n        }\n    }\n\n    /**\n     * @FIXME: GraphNode.radius should somehow come through Workflow,\n     */\n    private createGhostNode(type: \"input\" | \"output\"): SVGGElement {\n        const namespace = \"http://www.w3.org/2000/svg\";\n        const node      = document.createElementNS(namespace, \"g\");\n\n        node.setAttribute(\"transform\", \"matrix(1,0,0,1,0,0)\");\n        node.setAttribute(\"data-type\", type);\n        node.classList.add(\"ghost\");\n        node.classList.add(\"node\");\n        node.innerHTML = `<circle class=\"ghost-circle\" cx=\"0\" cy=\"0\" r=\"${GraphNode.radius / 1.5}\"></circle>`;\n\n        return node;\n    }\n\n    /**\n     * Finds a port closest to given SVG coordinates.\n     */\n    private findClosestPort(x: number, y: number): { portEl: SVGGElement | undefined, distance: number } {\n        let closestPort: any     = undefined;\n        let closestDistance: any = Infinity;\n\n        this.portOrigins!.forEach((matrix, port) => {\n\n            const distance = Geometry.distance(x, y, matrix.e, matrix.f);\n\n            if (distance < closestDistance) {\n                closestPort     = port;\n                closestDistance = distance;\n            }\n        });\n\n\n        return {\n            portEl: closestPort,\n            distance: closestDistance\n        };\n    }\n\n    /**\n     * Removes all dom elements and objects cached in-memory during dragging that are no longer needed.\n     */\n    private cleanMemory() {\n        this.edgeGroup!.remove();\n        this.ghostNode!.remove();\n\n        this.snapPort           = undefined;\n        this.edgeGroup          = undefined;\n        this.nodeCoords         = undefined;\n        this.originPort         = undefined;\n        this.portOrigins        = undefined;\n        this.boundingClientRect = undefined;\n\n    }\n\n    /**\n     * Removes all css classes attached by this plugin\n     */\n    private cleanStyles(): void {\n        this.workflow.svgRoot.classList.remove(this.css.dragging);\n\n        for (let cls in this.css) {\n            const query: any = this.workflow.svgRoot.querySelectorAll(\".\" + this.css[cls]);\n\n            for (let el of query) {\n                el.classList.remove(this.css[cls]);\n            }\n        }\n    }\n\n\n    /**\n     * Creates an edge (connection) between two elements determined by their connection IDs\n     * This edge is created on the model, and not rendered directly on graph, as main workflow\n     * is supposed to catch the creation event and draw it.\n     */\n    private createEdgeBetweenPorts(source: SVGGElement, destination: SVGGElement): void {\n\n        // Find the connection ids of origin port and the highlighted port\n        let sourceID      = source.getAttribute(\"data-connection-id\")!;\n        let destinationID = destination.getAttribute(\"data-connection-id\")!;\n\n        // Swap their places in case you dragged out from input to output, since they have to be ordered output->input\n        if (sourceID.startsWith(\"in\")) {\n            const tmp     = sourceID;\n            sourceID      = destinationID;\n            destinationID = tmp;\n        }\n\n        this.workflow.model.connect(sourceID, destinationID);\n    }\n\n    private findEdge(sourceID: string, destinationID: string): SVGGElement | undefined {\n        const ltrQuery = `[data-source-connection=\"${sourceID}\"][data-destination-connection=\"${destinationID}\"]`;\n        const rtlQuery = `[data-source-connection=\"${destinationID}\"][data-destination-connection=\"${sourceID}\"]`;\n        return this.workflow.workflow.querySelector(`${ltrQuery},${rtlQuery}`) as SVGGElement;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/port-drag/style.css",
    "content": ".cwl-workflow.__plugin-port-drag .port.__port-drag-suggestion {\n  fill: #11a7a7; }\n  .cwl-workflow.__plugin-port-drag .port.__port-drag-suggestion .label {\n    opacity: 1; }\n\n.cwl-workflow.__plugin-port-drag .port.__port-drag-snap {\n  stroke: #676767;\n  stroke-width: 2px; }\n\n.cwl-workflow.__plugin-port-drag .node.__port-drag-snap.__port-drag-snap-input .input-port .label,\n.cwl-workflow.__plugin-port-drag .node.__port-drag-snap.__port-drag-snap-output .output-port .label {\n  opacity: 1; }\n\n.cwl-workflow.__plugin-port-drag.__port-drag-dragging {\n  pointer-events: none; }\n\n.cwl-workflow.__plugin-port-drag .edge.__port-drag-dragging .inner {\n  stroke: #9a9a9a !important;\n  stroke-dasharray: 5; }\n\n.cwl-workflow.__plugin-port-drag .ghost {\n  stroke: #c3c3c3;\n  stroke-width: 2px;\n  stroke-dasharray: 5 3;\n  fill: white; }\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/port-drag/style.scss",
    "content": "@import \"variables\";\n\n// Expectations:\n//\n.cwl-workflow.__plugin-port-drag {\n\n  // Ports that are marked as suggested should be coloured differently and have visible labels\n  .port.__port-drag-suggestion {\n    fill: $port-suggested-fill;\n\n    .label {\n      opacity: 1;\n    }\n\n  }\n\n  // Port that is marked as a snap choice should have a stroke around it\n  .port.__port-drag-snap {\n    stroke: $port-snap-highlight-stroke;\n    stroke-width: $port-snap-stroke-width;\n  }\n\n  .node.__port-drag-snap {\n\n    // Nodes that are parents of snap choice ports should make all port labels on that side visible\n    &.__port-drag-snap-input .input-port,\n    &.__port-drag-snap-output .output-port {\n\n      .label {\n        opacity: 1;\n      }\n\n    }\n  }\n\n  // While dragging from a port, hover effects over other elements should not be triggered\n  // This also prevents zooming in and out using mouse scroll while dragging\n  &.__port-drag-dragging {\n    pointer-events: none;\n  }\n\n  .edge.__port-drag-dragging .inner {\n    stroke: $edge-dragged-stroke !important;\n    stroke-dasharray: $edge-dragged-dasharray;\n\n  }\n\n  .ghost {\n    stroke: $ghost-node-stroke-color;\n    stroke-width: 2px;\n\n    stroke-dasharray: 5 3;\n    fill: $ghost-node-fill-color;\n  }\n\n}"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/port-drag/theme.css",
    "content": ".cwl-workflow.__plugin-port-drag .port.__port-drag-suggestion {\n  fill: #11a7a7; }\n  .cwl-workflow.__plugin-port-drag .port.__port-drag-suggestion .label {\n    opacity: 1; }\n\n.cwl-workflow.__plugin-port-drag .port.__port-drag-snap {\n  stroke: #676767;\n  stroke-width: 2px; }\n\n.cwl-workflow.__plugin-port-drag .node.__port-drag-snap.__port-drag-snap-input .input-port .label,\n.cwl-workflow.__plugin-port-drag .node.__port-drag-snap.__port-drag-snap-output .output-port .label {\n  opacity: 1; }\n\n.cwl-workflow.__plugin-port-drag.__port-drag-dragging {\n  pointer-events: none; }\n\n.cwl-workflow.__plugin-port-drag .edge.__port-drag-dragging .inner {\n  stroke: #9a9a9a !important;\n  stroke-dasharray: 5; }\n\n.cwl-workflow.__plugin-port-drag .ghost {\n  stroke: #c3c3c3;\n  stroke-width: 2px;\n  stroke-dasharray: 5 3;\n  fill: white; }\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/port-drag/theme.dark.css",
    "content": ".cwl-workflow.__plugin-port-drag .port.__port-drag-suggestion {\n  fill: #00fff0; }\n  .cwl-workflow.__plugin-port-drag .port.__port-drag-suggestion .label {\n    opacity: 1; }\n\n.cwl-workflow.__plugin-port-drag .port.__port-drag-snap {\n  stroke: white;\n  stroke-width: 2px; }\n\n.cwl-workflow.__plugin-port-drag .node.__port-drag-snap.__port-drag-snap-input .input-port .label,\n.cwl-workflow.__plugin-port-drag .node.__port-drag-snap.__port-drag-snap-output .output-port .label {\n  opacity: 1; }\n\n.cwl-workflow.__plugin-port-drag.__port-drag-dragging {\n  pointer-events: none; }\n\n.cwl-workflow.__plugin-port-drag .edge.__port-drag-dragging .inner {\n  stroke: #9a9a9a !important;\n  stroke-dasharray: 5; }\n\n.cwl-workflow.__plugin-port-drag .ghost {\n  stroke: #c3c3c3;\n  stroke-width: 2px;\n  stroke-dasharray: 5 3;\n  fill: #303030; }\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/port-drag/theme.dark.scss",
    "content": "@import \"../../assets/styles/themes/rabix-dark/variables\";\n\n$port-suggested-fill: #00fff0 !default;\n\n@import \"variables\";\n@import \"./style.scss\";\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/port-drag/theme.scss",
    "content": "@import \"variables\";\n@import \"./style.scss\";\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/selection/_variables.scss",
    "content": "@import \"../../assets/styles/variables\";\n\n$color-neutral-faded: #e6e6e6 !default;\n$io-faded-fill: #f7f7f7 !default;\n\n$node-selected-outer-stroke: $color-primary !default;\n$edge-selected-inner-stroke: $color-primary !default;\n\n$node-faded-outer-stroke-color: $color-neutral-faded !default;\n$node-faded-step-fill-color: #c1d4d3 !default;\n$node-faded-input-fill-color: $io-faded-fill !default;\n$node-faded-output-fill-color: $io-faded-fill !default;\n\n$label-faded-color: #7e7d7d !default;\n$port-faded-fill-color: $color-neutral-faded !default;\n$edge-faded-inner-stroke-color: $color-neutral-faded !default;\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/selection/selection.ts",
    "content": "import {Workflow} from \"../..\";\nimport {PluginBase} from \"../plugin-base\";\n\nexport class SelectionPlugin extends PluginBase {\n\n    static edgePortsDelimiter = \"$!$\";\n    private svg: SVGSVGElement;\n    private selection = new Map<string, \"edge\" | \"node\">();\n    private cleanups: Function[] = [];\n    private detachModelEvents: Function | undefined;\n\n    private selectionChangeCallbacks: Function[] = [];\n\n    private css = {\n        selected: \"__selection-plugin-selected\",\n        highlight: \"__selection-plugin-highlight\",\n        fade: \"__selection-plugin-fade\",\n        plugin: \"__plugin-selection\"\n    };\n\n    registerWorkflow(workflow: Workflow): void {\n        super.registerWorkflow(workflow);\n\n        this.svg = this.workflow.svgRoot;\n\n        this.svg.classList.add(this.css.plugin);\n\n        const clickListener = this.onClick.bind(this);\n        this.svg.addEventListener(\"click\", clickListener);\n        this.cleanups.push(() => this.svg.removeEventListener(\"click\", clickListener));\n    }\n\n    afterRender() {\n        this.restoreSelection();\n    }\n\n    afterModelChange(): void {\n        if (typeof this.detachModelEvents === \"function\") {\n            this.detachModelEvents();\n        }\n\n        this.detachModelEvents = this.bindModelEvents();\n    }\n\n    destroy() {\n\n        if (this.detachModelEvents) {\n            this.detachModelEvents();\n        }\n        this.detachModelEvents = undefined;\n\n        this.svg.classList.remove(this.css.plugin);\n\n        for (const fn of this.cleanups) {\n            fn();\n        }\n    }\n\n    clearSelection(): void {\n\n        const selection: any  = this.svg.querySelectorAll(`.${this.css.selected}`);\n        const highlights: any = this.svg.querySelectorAll(`.${this.css.highlight}`);\n\n        for (const el of selection) {\n            el.classList.remove(this.css.selected);\n        }\n\n        for (const el of highlights) {\n            el.classList.remove(this.css.highlight);\n        }\n\n        this.svg.classList.remove(this.css.fade);\n\n        this.selection.clear();\n\n        this.emitChange(null);\n    }\n\n    getSelection() {\n        return this.selection;\n    }\n\n    registerOnSelectionChange(fn: (node: any) => any) {\n        this.selectionChangeCallbacks.push(fn);\n    }\n\n    selectStep(stepID: string) {\n        const query = `[data-connection-id=\"${stepID}\"]`;\n        const el = this.svg.querySelector(query) as SVGElement;\n\n        if (el) {\n            this.materializeClickOnElement(el);\n        }\n\n    }\n\n    private bindModelEvents() {\n\n        const handler = () => this.restoreSelection();\n        const cleanup: any[] = [];\n        const events  = [\"connection.create\", \"connection.remove\"];\n\n        for (const ev of events) {\n            const dispose = this.workflow.model.on(ev as any, handler);\n            cleanup.push(() => dispose.dispose());\n        }\n\n        return () => cleanup.forEach(fn => fn());\n    }\n\n    private restoreSelection() {\n        this.selection.forEach((type, connectionID) => {\n\n            if (type === \"node\") {\n\n                const el = this.svg.querySelector(`[data-connection-id=\"${connectionID}\"]`) as SVGElement;\n\n                if (el) {\n                    this.selectNode(el);\n                }\n\n            } else if (type === \"edge\") {\n\n                const [sID, dID]   = connectionID.split(SelectionPlugin.edgePortsDelimiter);\n                const edgeSelector = `[data-source-connection=\"${sID}\"][data-destination-connection=\"${dID}\"]`;\n\n                const edge = this.svg.querySelector(edgeSelector) as SVGElement;\n\n                if (edge) {\n                    this.selectEdge(edge);\n                }\n\n            }\n        });\n    }\n\n    private onClick(click: MouseEvent): void {\n        const target = click.target as SVGElement;\n\n        this.clearSelection();\n\n        this.materializeClickOnElement(target);\n    }\n\n    private materializeClickOnElement(target: SVGElement) {\n\n        let element: SVGElement | undefined;\n\n        if ((element = this.workflow.findParent(target, \"node\"))) {\n            this.selectNode(element);\n            this.selection.set(element.getAttribute(\"data-connection-id\")!, \"node\");\n            this.emitChange(element);\n\n        } else if ((element = this.workflow.findParent(target, \"edge\"))) {\n            this.selectEdge(element);\n            const cid = [\n                element.getAttribute(\"data-source-connection\"),\n                SelectionPlugin.edgePortsDelimiter,\n                element.getAttribute(\"data-destination-connection\")\n            ].join(\"\");\n\n            this.selection.set(cid, \"edge\");\n            this.emitChange(cid);\n        }\n    }\n\n    private selectNode(element: SVGElement): void {\n        // Fade everything on canvas so we can highlight only selected stuff\n        this.svg.classList.add(this.css.fade);\n\n        // Mark this node as selected\n        element.classList.add(this.css.selected);\n        // Highlight it in case there are no edges on the graph\n        element.classList.add(this.css.highlight);\n\n        // Take all adjacent edges since we should highlight them and move them above the other edges\n        const nodeID        = element.getAttribute(\"data-id\");\n        const adjacentEdges: any = this.svg.querySelectorAll(\n            `.edge[data-source-node=\"${nodeID}\"],` +\n            `.edge[data-destination-node=\"${nodeID}\"]`\n        );\n\n        // Find the first node to be an anchor, so we can put all those edges just before that one.\n        const firstNode = this.svg.getElementsByClassName(\"node\")[0];\n\n        for (const edge of adjacentEdges) {\n\n            // Highlight each adjacent edge\n            edge.classList.add(this.css.highlight);\n\n            // Move it above other edges\n            this.workflow.workflow.insertBefore(edge, firstNode);\n\n            // Find all adjacent nodes so we can highlight them\n            const sourceNodeID      = edge.getAttribute(\"data-source-node\");\n            const destinationNodeID = edge.getAttribute(\"data-destination-node\");\n            const connectedNodes: any    = this.svg.querySelectorAll(\n                `.node[data-id=\"${sourceNodeID}\"],` +\n                `.node[data-id=\"${destinationNodeID}\"]`\n            );\n\n            // Highlight each adjacent node\n            for (const n of connectedNodes) {\n                n.classList.add(this.css.highlight);\n            }\n        }\n    }\n\n    private selectEdge(element: SVGElement) {\n\n        element.classList.add(this.css.highlight);\n        element.classList.add(this.css.selected);\n\n        const sourceNode = element.getAttribute(\"data-source-node\");\n        const destNode   = element.getAttribute(\"data-destination-node\");\n        const sourcePort = element.getAttribute(\"data-source-port\");\n        const destPort   = element.getAttribute(\"data-destination-port\");\n\n        const inputPortSelector  = `.node[data-id=\"${destNode}\"] .input-port[data-port-id=\"${destPort}\"]`;\n        const outputPortSelector = `.node[data-id=\"${sourceNode}\"] .output-port[data-port-id=\"${sourcePort}\"]`;\n\n        const connectedPorts: any = this.svg.querySelectorAll(`${inputPortSelector}, ${outputPortSelector}`);\n\n        for (const port of connectedPorts) {\n            port.classList.add(this.css.highlight);\n        }\n    }\n\n    private emitChange(change: any) {\n        for (const fn of this.selectionChangeCallbacks) {\n            fn(change);\n        }\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/selection/style.css",
    "content": ".cwl-workflow.__plugin-selection .node,\n.cwl-workflow.__plugin-selection .edge {\n  cursor: pointer; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .outer {\n  stroke: #e6e6e6; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .inner {\n  fill: #c1d4d3; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight).input .inner {\n  fill: #f7f7f7; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight).output .inner {\n  fill: #f7f7f7; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .label {\n  fill: #7e7d7d; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .port {\n  fill: #e6e6e6; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .edge:not(.__selection-plugin-highlight) .inner {\n  stroke: #e6e6e6; }\n\n.cwl-workflow.__plugin-selection .port.__selection-plugin-highlight .label {\n  opacity: 1; }\n\n.cwl-workflow.__plugin-selection .__selection-plugin-selected.edge .inner {\n  stroke: #11a7a7; }\n\n.cwl-workflow.__plugin-selection .__selection-plugin-selected.node .outer {\n  stroke: #11a7a7; }\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/selection/style.scss",
    "content": "@import \"variables\";\n\n.cwl-workflow.__plugin-selection {\n\n  .node,\n  .edge {\n    cursor: pointer;\n  }\n\n  // When something is selected on canvas, everything should fade.\n  // Then, selected and highlighted elements should override that.\n  &.__selection-plugin-fade {\n\n    // This is how nodes fade out\n    .node:not(.__selection-plugin-highlight) {\n\n      .outer {\n        stroke: $node-faded-outer-stroke-color;\n      }\n\n      .inner {\n        fill: $node-faded-step-fill-color;\n      }\n\n      &.input .inner {\n        fill: $node-faded-input-fill-color;\n      }\n\n      &.output .inner {\n        fill: $node-faded-output-fill-color;\n      }\n\n      // Their labels fade away a bit less\n      .label {\n        fill: $label-faded-color;\n      }\n      // Ports are darker\n      .port {\n        fill: $port-faded-fill-color;;\n      }\n\n    }\n\n    .edge:not(.__selection-plugin-highlight) {\n      .inner {\n        stroke: $edge-faded-inner-stroke-color;\n      }\n    }\n\n  }\n\n  .port.__selection-plugin-highlight .label {\n    opacity: 1;\n  }\n\n  .__selection-plugin-selected {\n\n    &.edge .inner {\n      stroke: $edge-selected-inner-stroke;\n    }\n\n    &.node .outer {\n      stroke: $node-selected-outer-stroke;\n    }\n\n  }\n}"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/selection/theme.css",
    "content": ".cwl-workflow.__plugin-selection .node,\n.cwl-workflow.__plugin-selection .edge {\n  cursor: pointer; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .outer {\n  stroke: #e6e6e6; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .inner {\n  fill: #c1d4d3; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight).input .inner {\n  fill: #f7f7f7; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight).output .inner {\n  fill: #f7f7f7; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .label {\n  fill: #7e7d7d; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .port {\n  fill: #e6e6e6; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .edge:not(.__selection-plugin-highlight) .inner {\n  stroke: #e6e6e6; }\n\n.cwl-workflow.__plugin-selection .port.__selection-plugin-highlight .label {\n  opacity: 1; }\n\n.cwl-workflow.__plugin-selection .__selection-plugin-selected.edge .inner {\n  stroke: #11a7a7; }\n\n.cwl-workflow.__plugin-selection .__selection-plugin-selected.node .outer {\n  stroke: #11a7a7; }\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/selection/theme.dark.css",
    "content": ".cwl-workflow.__plugin-selection .node,\n.cwl-workflow.__plugin-selection .edge {\n  cursor: pointer; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .outer {\n  stroke: #444343; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .inner {\n  fill: #216b6b; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight).input .inner {\n  fill: #838383; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight).output .inner {\n  fill: #838383; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .label {\n  fill: #7e7d7d; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .node:not(.__selection-plugin-highlight) .port {\n  fill: #444343; }\n\n.cwl-workflow.__plugin-selection.__selection-plugin-fade .edge:not(.__selection-plugin-highlight) .inner {\n  stroke: #444343; }\n\n.cwl-workflow.__plugin-selection .port.__selection-plugin-highlight .label {\n  opacity: 1; }\n\n.cwl-workflow.__plugin-selection .__selection-plugin-selected.edge .inner {\n  stroke: #11a7a7; }\n\n.cwl-workflow.__plugin-selection .__selection-plugin-selected.node .outer {\n  stroke: #11a7a7; }\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/selection/theme.dark.scss",
    "content": "@import \"../../assets/styles/themes/rabix-dark/variables\";\n\n$color-neutral-faded: #444343 !default;\n$io-faded-fill: #838383 !default;\n\n$node-selected-outer-stroke: $color-primary !default;\n$edge-selected-inner-stroke: $color-primary !default;\n\n$node-faded-outer-stroke-color: $color-neutral-faded !default;\n$node-faded-step-fill-color: #216b6b !default;\n$node-faded-input-fill-color: $io-faded-fill !default;\n$node-faded-output-fill-color: $io-faded-fill !default;\n\n$label-faded-color: #7e7d7d !default;\n$port-faded-fill-color: $color-neutral-faded !default;\n$edge-faded-inner-stroke-color: $color-neutral-faded !default;\n\n@import \"variables\";\n@import \"./style.scss\";\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/selection/theme.scss",
    "content": "@import \"variables\";\n@import \"./style.scss\";\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/validate/validate.css",
    "content": "svg.cwl-workflow.__plugin-validate .workflow .__validate-invalid .inner {\n  stroke: #f89406; }\n\nsvg.cwl-workflow.__plugin-validate .workflow.has-selection .__validate-invalid:not(.highlighted) .inner {\n  stroke: #7c4a03; }\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/validate/validate.scss",
    "content": "$edge-invalid-fill: #f89406;\n\nsvg.cwl-workflow.__plugin-validate .workflow {\n\n    .__validate-invalid .inner {\n        stroke: $edge-invalid-fill;\n    }\n\n    &.has-selection {\n        .__validate-invalid:not(.highlighted) .inner {\n            stroke: darken($edge-invalid-fill, 25%);\n        }\n    }\n}"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/validate/validate.ts",
    "content": "import {Edge}          from \"cwlts/models\";\nimport {PluginBase}    from \"../plugin-base\";\nimport { Workflow } from \"lib/cwl-svg\";\n\nexport class SVGValidatePlugin extends PluginBase {\n\n    private modelDisposers: any[] = [];\n\n    /** Map of CSS classes attached by this plugin */\n    private css = {\n        plugin: \"__plugin-validate\",\n        invalid: \"__validate-invalid\"\n    };\n\n    registerWorkflow(workflow: Workflow): void {\n        super.registerWorkflow(workflow);\n\n        // add plugin specific class to the svgRoot for scoping\n        this.workflow.svgRoot.classList.add(this.css.plugin);\n    }\n\n    afterModelChange(): void {\n\n        this.disposeModelListeners();\n\n        // add listener for all subsequent edge validation\n        const update = this.workflow.model.on(\"connections.updated\", this.renderEdgeValidation.bind(this));\n        const create = this.workflow.model.on(\"connection.create\", this.renderEdgeValidation.bind(this));\n\n        this.modelDisposers.concat([update.dispose, create.dispose]);\n    }\n\n    destroy(): void {\n        this.disposeModelListeners();\n    }\n\n    afterRender(): void {\n        // do initial validation rendering for edges\n        this.renderEdgeValidation();\n    }\n\n    onEditableStateChange(enabled: boolean): void {\n\n        if (enabled) {\n            // only show validation if workflow is editable\n            this.renderEdgeValidation();\n        } else {\n            this.removeClasses(this.workflow.workflow.querySelectorAll(\".edge\"))\n        }\n    }\n\n    private disposeModelListeners(): void {\n        for (let disposeListener of this.modelDisposers) {\n            disposeListener();\n        }\n        this.modelDisposers = [];\n    }\n\n    private removeClasses(edges: NodeListOf<Element>): void {\n        // remove validity class on all edges\n        for (const e of (edges as any)) {\n            e.classList.remove(this.css.invalid);\n        }\n    }\n\n    private renderEdgeValidation(): void {\n        const graphEdges: any = this.workflow.workflow.querySelectorAll(\".edge\") as NodeListOf<Element>;\n\n        this.removeClasses(graphEdges);\n\n        // iterate through all modal connections\n        this.workflow.model.connections.forEach((e: Edge) => {\n            // if the connection isn't valid (should be colored on graph)\n            if (!e.isValid) {\n\n                // iterate through edges on the svg\n                for (const ge of graphEdges) {\n                    const sourceNodeID      = ge.getAttribute(\"data-source-connection\");\n                    const destinationNodeID = ge.getAttribute(\"data-destination-connection\");\n\n                    // compare invalid edge source/destination with svg edge\n                    if (e.source.id === sourceNodeID && e.destination.id === destinationNodeID) {\n                        // if its a match, tag it with the appropriate class and break from the loop\n                        ge.classList.add(this.css.invalid);\n                        break;\n                    }\n                }\n            }\n        });\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/zoom/index.ts",
    "content": "export * from \"./zoom\";"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/plugins/zoom/zoom.ts",
    "content": "import {Workflow}   from \"../..\";\nimport {PluginBase} from \"../plugin-base\";\n\nexport class ZoomPlugin extends PluginBase {\n    private svg: SVGSVGElement;\n    private dispose: Function | undefined;\n\n    registerWorkflow(workflow: Workflow): void {\n        super.registerWorkflow(workflow);\n        this.svg = workflow.svgRoot;\n\n        this.dispose = this.attachWheelListener();\n    }\n\n    attachWheelListener(): () => void {\n        const handler = this.onMouseWheel.bind(this);\n        this.svg.addEventListener(\"mousewheel\", handler, true);\n        return () => this.svg.removeEventListener(\"mousewheel\", handler, true);\n    }\n\n    onMouseWheel(event: MouseWheelEvent) {\n\n        const scale       = this.workflow.scale;\n        const scaleUpdate = scale - event.deltaY / 500;\n\n        const zoominOut = scaleUpdate < scale;\n        const zoomingIn = scaleUpdate > scale;\n\n        if (zoomingIn && this.workflow.maxScale < scaleUpdate) {\n            return;\n        }\n\n        if (zoominOut && this.workflow.minScale > scaleUpdate) {\n            return;\n        }\n\n        this.workflow.scaleAtPoint(scaleUpdate, event.clientX, event.clientY);\n        event.stopPropagation();\n    }\n\n    destroy(): void {\n        if (typeof this.dispose === \"function\") {\n            this.dispose();\n        }\n\n        this.dispose = undefined;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/utils/dom-events.ts",
    "content": "export class DomEvents {\n\n    private handlers = new Map<{ removeEventListener: Function }, { [key: string]: Function[] }>();\n\n    constructor(private root: HTMLElement) {\n\n    }\n\n    public on(event: string, selector: string, handler: (event: UIEvent, target?: Element, root?: Element) => any, root?: Element): Function;\n    public on(event: string, handler: (event: UIEvent, target?: Element, root?: Element) => any, root?: Element): Function;\n    public on(...args: any[]) {\n\n        const event    = args.shift();\n        const selector = typeof args[0] === \"string\" ? args.shift() : undefined;\n        const handler  = typeof args[0] === \"function\" ? args.shift() : () => {\n        };\n        const root     = args.shift();\n\n        const eventHolder = root || this.root;\n\n        if (!this.handlers.has(eventHolder)) {\n            this.handlers.set(eventHolder, {});\n        }\n        if (!this.handlers.get(eventHolder)![event]) {\n            this.handlers.get(eventHolder)![event] = [];\n        }\n\n        const evListener = (ev: UIEvent) => {\n            let target: any;\n            if (selector) {\n                const selected = Array.from(this.root.querySelectorAll(selector));\n                target         = ev.target as HTMLElement;\n                while (target) {\n                    // eslint-disable-next-line\n                    if (selected.find(el => el === target)) {\n                        break;\n                    }\n                    target = target.parentNode;\n                }\n\n                if (!target) {\n                    return;\n                }\n            }\n\n            const handlerOutput = handler(ev, target || ev.target, this.root);\n            if (handlerOutput === false) {\n                return false;\n            }\n\n            return false;\n        };\n\n        eventHolder.addEventListener(event, evListener);\n\n        this.handlers.get(eventHolder)![event].push(evListener);\n\n        return function off() {\n            eventHolder.removeEventListener(event, evListener);\n        }\n    }\n\n    public keyup() {\n\n    }\n\n    public adaptedDrag(selector: string,\n                       move?: (dx: number, dy: number, event: UIEvent, target?: Element, root?: Element) => any,\n                       start?: (event: UIEvent, target?: Element, root?: Element) => any,\n                       end?: (event: UIEvent, target?: Element, root?: Element) => any) {\n\n        let dragging       = false;\n        let lastMove: MouseEvent | undefined;\n        let draggedEl: Element | undefined;\n        let moveEventCount = 0;\n        let mouseDownEv: MouseEvent;\n        let threshold      = 3;\n        let mouseOverListeners: EventListener[];\n\n        const onMouseDown = (ev: MouseEvent, el: Element) => {\n            dragging    = true;\n            lastMove    = ev;\n            draggedEl   = el;\n            mouseDownEv = ev;\n\n            ev.preventDefault();\n\n            mouseOverListeners = this.detachHandlers(\"mouseover\");\n\n            document.addEventListener(\"mousemove\", moveHandler);\n            document.addEventListener(\"mouseup\", upHandler);\n\n            return false;\n        };\n\n        const off = this.on(\"mousedown\", selector, onMouseDown);\n\n        const moveHandler = (ev: MouseEvent) => {\n            if (!dragging) {\n                return;\n            }\n\n            const dx = ev.screenX - lastMove!.screenX;\n            const dy = ev.screenY - lastMove!.screenY;\n            moveEventCount++;\n\n            if (moveEventCount === threshold && typeof start === \"function\") {\n                start(mouseDownEv, draggedEl, this.root);\n            }\n\n            if (moveEventCount >= threshold && typeof move === \"function\") {\n                move(dx, dy, ev, draggedEl, this.root);\n            }\n        };\n        const upHandler   = (ev: MouseEvent) => {\n            if (moveEventCount >= threshold) {\n                if (dragging) {\n                    if (typeof end === \"function\") {\n                        end(ev, draggedEl, this.root)\n                    }\n                }\n\n                const parentNode        = draggedEl!.parentNode;\n                const clickCancellation = (ev: MouseEvent) => {\n                    ev.stopPropagation();\n                    parentNode!.removeEventListener(\"click\", clickCancellation, true);\n                };\n                parentNode!.addEventListener(\"click\", clickCancellation, true);\n            }\n\n            dragging       = false;\n            draggedEl      = undefined;\n            lastMove       = undefined;\n            moveEventCount = 0;\n            document.removeEventListener(\"mouseup\", upHandler);\n            document.removeEventListener(\"mousemove\", moveHandler);\n\n            for (let i in mouseOverListeners) {\n                this.root.addEventListener(\"mouseover\", mouseOverListeners[i]);\n                this.handlers.get(this.root)![\"mouseover\"] = [];\n                this.handlers.get(this.root)![\"mouseover\"].push(mouseOverListeners[i]);\n            }\n        };\n\n        return off;\n    }\n\n\n    public drag(selector: string,\n                move?: (dx: number, dy: number, event: UIEvent, target?: Element, root?: Element) => any,\n                start?: (event: UIEvent, target?: Element, root?: Element) => any,\n                end?: (event: UIEvent, target?: Element, root?: Element) => any) {\n\n        let dragging       = false;\n        let lastMove: MouseEvent | undefined;\n        let draggedEl: Element | undefined;\n        let moveEventCount = 0;\n        let mouseDownEv: MouseEvent;\n        let threshold      = 3;\n        let mouseOverListeners: EventListener[];\n\n        const onMouseDown = (ev: MouseEvent, el: Element, root: Element) => {\n            dragging    = true;\n            lastMove    = ev;\n            draggedEl   = el;\n            mouseDownEv = ev;\n\n            ev.preventDefault();\n\n            mouseOverListeners = this.detachHandlers(\"mouseover\");\n\n            document.addEventListener(\"mousemove\", moveHandler);\n            document.addEventListener(\"mouseup\", upHandler);\n\n            return false;\n        };\n\n        const off = this.on(\"mousedown\", selector, onMouseDown);\n\n        const moveHandler = (ev: MouseEvent) => {\n            if (!dragging) {\n                return;\n            }\n\n            const dx = ev.screenX - lastMove!.screenX;\n            const dy = ev.screenY - lastMove!.screenY;\n            moveEventCount++;\n\n            if (moveEventCount === threshold && typeof start === \"function\") {\n                start(mouseDownEv, draggedEl, this.root);\n            }\n\n            if (moveEventCount >= threshold && typeof move === \"function\") {\n                move(dx, dy, ev, draggedEl, this.root);\n            }\n        };\n\n        const upHandler = (ev: MouseEvent) => {\n\n            if (moveEventCount >= threshold) {\n                if (dragging) {\n                    if (typeof end === \"function\") {\n                        end(ev, draggedEl, this.root)\n                    }\n                }\n\n                // When releasing the mouse button, if it happens over the same element that we initially had\n                // the mouseDown event, it will trigger a click event. We want to stop that, so we intercept\n                // it by capturing click top-down and stopping its propagation.\n                // However, if the mouseUp didn't happen above the starting element, it wouldn't trigger a click,\n                // but it would intercept the next (unrelated) click event unless we prevent interception in the\n                // first place by checking if we released above the starting element.\n                if (draggedEl!.contains(ev.target as Node)) {\n                    const parentNode = draggedEl!.parentNode;\n\n                    const clickCancellation = (ev: MouseEvent) => {\n                        ev.stopPropagation();\n                        parentNode!.removeEventListener(\"click\", clickCancellation, true);\n                    };\n                    parentNode!.addEventListener(\"click\", clickCancellation, true);\n                }\n\n            }\n\n            dragging       = false;\n            draggedEl      = undefined;\n            lastMove       = undefined;\n            moveEventCount = 0;\n            document.removeEventListener(\"mouseup\", upHandler);\n            document.removeEventListener(\"mousemove\", moveHandler);\n\n\n            for (let i in mouseOverListeners) {\n                this.root.addEventListener(\"mouseover\", mouseOverListeners[i]);\n                this.handlers.get(this.root)![\"mouseover\"] = [];\n                this.handlers.get(this.root)![\"mouseover\"].push(mouseOverListeners[i]);\n            }\n        };\n\n        return off;\n    }\n\n    public hover(element: HTMLElement,\n                 hover: (event: UIEvent, target?: HTMLElement, root?: HTMLElement) => any = () => {},\n                 enter: (event: UIEvent, target?: HTMLElement, root?: HTMLElement) => any = () => {},\n                 leave: (event: UIEvent, target?: HTMLElement, root?: HTMLElement) => any = () => {}) {\n\n        let hovering = false;\n\n        element.addEventListener(\"mouseenter\", (ev: MouseEvent) => {\n            hovering = true;\n            enter(ev, element, this.root);\n\n        });\n\n        element.addEventListener(\"mouseleave\", (ev) => {\n            hovering = false;\n            leave(ev, element, this.root);\n        });\n\n        element.addEventListener(\"mousemove\", (ev) => {\n            if (!hovering) {\n                return;\n            }\n            hover(ev, element, this.root);\n        });\n    }\n\n    public detachHandlers(evName: string, root?: HTMLElement): EventListener[] {\n        root                                = root || this.root;\n        let eventListeners: EventListener[] = [];\n        this.handlers.forEach((handlers: { [event: string]: EventListener[] }, listenerRoot: Element) => {\n            if (listenerRoot.id !== root!.id || listenerRoot !== root) {\n                return;\n            }\n            for (let eventName in handlers) {\n                if (eventName !== evName) {\n                    continue;\n                }\n                handlers[eventName].forEach((handler) => {\n                    eventListeners.push(handler);\n                    listenerRoot.removeEventListener(eventName, handler);\n                });\n            }\n        });\n\n        delete this.handlers.get(this.root)![evName];\n\n        return eventListeners;\n    }\n\n    public detachAll() {\n        this.handlers.forEach((handlers: { [event: string]: EventListener[] }, listenerRoot: Element) => {\n            for (let eventName in handlers) {\n                handlers[eventName].forEach(handler => listenerRoot.removeEventListener(eventName, handler));\n            }\n        });\n\n        this.handlers.clear();\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/utils/dynamic-stylesheet.ts",
    "content": "import {Workflow} from \"..\";\n\nexport class DynamicStylesheet {\n    private styleElement: HTMLStyleElement;\n    private scopedSelector: string;\n    private innerStyle = \"\";\n\n    constructor(workflow: Workflow) {\n\n        this.styleElement      = document.createElement(\"style\");\n        this.styleElement.type = \"text/css\";\n\n        this.scopedSelector = `svg.${workflow.svgID}`;\n\n        document.getElementsByTagName(\"head\")[0].appendChild(this.styleElement);\n    }\n\n    remove() {\n        this.styleElement.remove();\n    }\n\n    set(style: string) {\n        this.innerStyle = style;\n\n        this.styleElement.innerHTML = `\n            ${this.scopedSelector} {\n                ${this.innerStyle}\n            }\n        `\n    }\n\n\n\n\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/utils/event-hub.ts",
    "content": "export class EventHub {\n    public readonly handlers: { [event: string]: Function[] };\n\n    constructor(validEventList: string[]) {\n        this.handlers = validEventList.reduce((acc, ev) => Object.assign(acc, {[ev]: []}), {});\n    }\n\n    on(event: string, handler: Function) {\n        this.guard(event, \"subscribe to\");\n        this.handlers[event].push(handler);\n\n        return () => this.off(event, handler);\n    }\n\n    off(event: string, handler: Function) {\n        this.guard(event, \"unsubscribe from\");\n        return this.handlers[event].splice(this.handlers[event].findIndex(h => handler === h), 1);\n    }\n\n    emit(event: string, ...data: any[]) {\n        this.guard(event, \"emit\");\n        for (let i = 0; i < this.handlers[event].length; i++) {\n            this.handlers[event][i](...data);\n        }\n    }\n\n    empty() {\n        for (let event in this.handlers) {\n            this.handlers[event] = [];\n        }\n    }\n\n    private guard(event: string, verb: string) {\n        if (!this.handlers[event]) {\n            console.warn(`Trying to ${verb} a non-supported event “${event}”. \n            Supported events are: ${Object.keys(this.handlers).join(\", \")}”`);\n        }\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/utils/geometry.ts",
    "content": "export class Geometry {\n\n    static distance(x1: number, y1: number, x2: number, y2: number) {\n        return Math.sqrt(Math.pow(x2 - x1, 2) + Math.pow(y2 - y1, 2));\n    }\n\n    static getTransformToElement(from: SVGElement, to: SVGElement) {\n        const getPosition = (node: SVGGElement, addE = 0, addF = 0): SVGMatrix => {\n\n            if (!node.ownerSVGElement) {\n                // node is the root svg element\n                const matrix = (node as SVGSVGElement).createSVGMatrix();\n                matrix.e = addE;\n                matrix.f = addF;\n                return matrix;\n            } else {\n                // node still has parent elements\n                const {e, f} = node.transform.baseVal.getItem(0).matrix;\n                return getPosition(node.parentNode as SVGGElement, e + addE, f + addF);\n            }\n        };\n\n        const toPosition = getPosition(to as SVGAElement);\n        const fromPosition = getPosition(from as SVGAElement);\n\n        const result = from.ownerSVGElement!.createSVGMatrix();\n        result.e = toPosition.e - fromPosition.e;\n        result.f = toPosition.f - fromPosition.f;\n\n        return result.inverse();\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/utils/html-utils.ts",
    "content": "export class HtmlUtils {\n\n    private static entityMap = {\n        \"&\": \"&amp;\",\n        \"<\": \"&lt;\",\n        \">\": \"&gt;\",\n        \"\\\"\\\"\": \"&quot;\",\n        \"'\": \"&#39;\",\n        \"/\": \"&#x2F;\"\n    };\n\n    public static escapeHTML(source: string): string {\n        return String(source).replace(/[&<>\"'/]/g, s => HtmlUtils.entityMap[s]);\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/utils/perf.ts",
    "content": "export class Perf {\n\n    static DEFAULT_THROTTLE = 1;\n\n    public static throttle(fn: Function, threshold = Perf.DEFAULT_THROTTLE, context?: any): Function {\n        let last: any, deferTimer: any;\n\n        return function () {\n            // @ts-ignore\n            const scope = context || this;\n\n            let now  = +new Date,\n                args = arguments;\n            if (last && now < last + threshold) {\n                clearTimeout(deferTimer);\n                deferTimer = setTimeout(function () {\n                    last = now;\n                    fn.apply(scope, args);\n                }, threshold);\n            } else {\n                last = now;\n                fn.apply(scope, args);\n            }\n        };\n    }\n\n}\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/utils/svg-dumper.ts",
    "content": "export class SvgDumper {\n\n    private containerElements = [\"svg\", \"g\"];\n    private embeddableStyles  = {\n        \"rect\": [\"fill\", \"stroke\", \"stroke-width\"],\n        \"path\": [\"fill\", \"stroke\", \"stroke-width\"],\n        \"circle\": [\"fill\", \"stroke\", \"stroke-width\"],\n        \"line\": [\"stroke\", \"stroke-width\"],\n        \"text\": [\"fill\", \"font-size\", \"text-anchor\", \"font-family\"],\n        \"polygon\": [\"stroke\", \"fill\"]\n    };\n\n    constructor(private svg: SVGSVGElement) {\n        this.svg = svg\n    }\n\n    dump({padding} = {padding: 50}): string {\n        this.adaptViewbox(this.svg, padding);\n        const clone = this.svg.cloneNode(true) as SVGSVGElement;\n\n        const portLabels: any = clone.querySelectorAll(\".port .label\");\n\n\n        for (const label of portLabels) {\n            label.parentNode.removeChild(label);\n        }\n\n        this.treeShakeStyles(clone, this.svg);\n\n        // Remove panning handle so we don't have to align it\n        const panHandle = clone.querySelector(\".pan-handle\");\n        if (panHandle) {\n            clone.removeChild(panHandle);\n        }\n\n        return new XMLSerializer().serializeToString(clone);\n\n    }\n\n    private adaptViewbox(svg: SVGSVGElement, padding = 50) {\n        const workflow = svg.querySelector(\".workflow\");\n        const rect     = workflow!.getBoundingClientRect();\n\n        const origin = this.getPointOnSVG(rect.left, rect.top);\n\n        const viewBox  = this.svg.viewBox.baseVal;\n        viewBox.x      = origin.x - padding / 2;\n        viewBox.y      = origin.y - padding / 2;\n        viewBox.height = rect.height + padding;\n        viewBox.width  = rect.width + padding;\n\n    }\n\n    private getPointOnSVG(x: number, y: number): SVGPoint {\n        const svgCTM = this.svg.getScreenCTM();\n        const point  = this.svg.createSVGPoint();\n        point.x      = x;\n        point.y      = y;\n\n        return point.matrixTransform(svgCTM!.inverse());\n\n    }\n\n    private treeShakeStyles(clone: SVGElement, original: SVGElement) {\n\n        const children             = clone.childNodes;\n        const originalChildrenData = original.childNodes as NodeListOf<SVGElement>;\n\n\n        for (let childIndex = 0; childIndex < children.length; childIndex++) {\n\n            const child   = children[childIndex] as SVGElement;\n            const tagName = child.tagName;\n\n            if (this.containerElements.indexOf(tagName) !== -1) {\n                this.treeShakeStyles(child, originalChildrenData[childIndex]);\n            } else if (tagName in this.embeddableStyles) {\n\n                const styleDefinition = window.getComputedStyle(originalChildrenData[childIndex]);\n\n                let styleString = \"\";\n                for (let st = 0; st < this.embeddableStyles[tagName].length; st++) {\n                    styleString +=\n                        this.embeddableStyles[tagName][st]\n                        + \":\"\n                        + styleDefinition.getPropertyValue(this.embeddableStyles[tagName][st])\n                        + \"; \";\n                }\n\n                child.setAttribute(\"style\", styleString);\n            }\n        }\n    }\n}\n\n"
  },
  {
    "path": "services/workbench2/src/lib/cwl-svg/utils/svg-utils.ts",
    "content": "export class SVGUtils {\n    static matrixToTransformAttr(matrix: SVGMatrix): string {\n        const {a, b, c, d, e, f} = matrix;\n        return `matrix(${a}, ${b}, ${c}, ${d}, ${e}, ${f})`;\n    }\n\n    static createMatrix(): SVGMatrix {\n        return document.createElementNS(\"http://www.w3.org/2000/svg\", \"svg\").createSVGMatrix();\n\n    }\n}"
  },
  {
    "path": "services/workbench2/src/lib/resource-properties.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { addProperty, deleteProperty } from \"./resource-properties\";\nimport { omit, isEqual } from \"lodash\";\n\ndescribe(\"Resource properties lib\", () => {\n\n    let properties;\n\n    beforeEach(() => {\n        properties = {\n            animal: 'dog',\n            color: ['brown', 'black'],\n            name: ['Toby']\n        }\n    })\n\n    it(\"should convert a single string value into a list when adding values\", () => {\n        expect(isEqual(addProperty(properties, 'animal', 'cat'), {...properties, animal: ['dog', 'cat']})).to.equal(true);\n    });\n\n    it(\"should convert a 2 value list into a string when removing values\", () => {\n        expect(isEqual(deleteProperty(properties, 'color', 'brown'), {...properties, color: 'black'})).to.equal(true);\n    });\n\n    it(\"shouldn't add duplicated key:value items\", () => {\n        expect(isEqual(addProperty(properties, 'name', 'Toby'), properties)).to.equal(true);\n    });\n\n    it(\"should remove the key when deleting from a one value list\", () => {\n        expect(isEqual(deleteProperty(properties, 'name', 'Toby'), omit(properties, 'name'))).to.equal(true);\n    });\n\n    it(\"should return the same when deleting non-existant value\", () => {\n        expect(isEqual(deleteProperty(properties, 'animal', 'dolphin'), properties)).to.equal(true);\n    });\n\n    it(\"should return the same when deleting non-existant key\", () => {\n        expect(isEqual(deleteProperty(properties, 'doesntexist', 'something'), properties)).to.equal(true);\n    });\n});"
  },
  {
    "path": "services/workbench2/src/lib/resource-properties.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const deleteProperty = (properties: any, key: string, value: string) => {\n    if (Array.isArray(properties[key])) {\n        properties[key] = properties[key].filter((v: string) => v !== value);\n        if (properties[key].length === 1) {\n            properties[key] = properties[key][0];\n        } else if (properties[key].length === 0) {\n            delete properties[key];\n        }\n    } else if (properties[key] === value) {\n        delete properties[key];\n    }\n    return properties;\n}\n\nexport const addProperty = (properties: any, key: string, value: string) => {\n    if (properties[key]) {\n        if (Array.isArray(properties[key])) {\n            properties[key] = [...properties[key], value];\n        } else {\n            properties[key] = [properties[key], value];\n        }\n        // Remove potential duplicate and save as single value if needed\n        properties[key] = Array.from(new Set(properties[key]));\n        if (properties[key].length === 1) {\n            properties[key] = properties[key][0];\n        }\n    } else {\n        properties[key] = value;\n    }\n    return properties;\n}"
  },
  {
    "path": "services/workbench2/src/models/api-client-authorization.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource } from 'models/resource';\n\nexport interface ApiClientAuthorization extends Resource {\n    uuid: string;\n    apiToken: string;\n    userId: number;\n    createdByIpAddress: string;\n    lastUsedByIpAddress: string;\n    lastUsedAt: string;\n    expiresAt: string;\n    createdAt: string;\n    updatedAt: string;\n    ownerUuid: string;\n    scopes: string[];\n}\n\nexport const getTokenV2 = (aca: ApiClientAuthorization): string =>\n    `v2/${aca.uuid}/${aca.apiToken}`;\n"
  },
  {
    "path": "services/workbench2/src/models/client-authorization.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport interface ClientAuthorizationResource {\n    uuid: string;\n    apiToken: string;\n    userId: number;\n    createdByIpAddress: string;\n    lastUsedByIpAddress: string;\n    lastUsedAt: string;\n    expiresAt: string;\n    ownerUuid: string;\n    scopes: string[];\n}\n"
  },
  {
    "path": "services/workbench2/src/models/collection-file.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Tree, createTree, setNode, TreeNodeStatus } from './tree';\nimport { head, split, pipe, join } from 'lodash/fp';\n\nexport type CollectionFilesTree = Tree<CollectionDirectory | CollectionFile>;\n\nexport enum CollectionFileType {\n    DIRECTORY = 'directory',\n    FILE = 'file'\n}\n\nexport interface CollectionDirectory {\n    path: string;\n    url: string;\n    id: string;\n    name: string;\n    type: CollectionFileType.DIRECTORY;\n}\n\nexport interface CollectionFile {\n    path: string;\n    url: string;\n    id: string;\n    name: string;\n    size: number;\n    type: CollectionFileType.FILE;\n}\n\nexport interface CollectionUploadFile {\n    name: string;\n}\n\nexport const createCollectionDirectory = (data: Partial<CollectionDirectory>): CollectionDirectory => ({\n    id: '',\n    name: '',\n    path: '',\n    url: '',\n    type: CollectionFileType.DIRECTORY,\n    ...data\n});\n\nexport const createCollectionFile = (data: Partial<CollectionFile>): CollectionFile => ({\n    id: '',\n    name: '',\n    path: '',\n    url: '',\n    size: 0,\n    type: CollectionFileType.FILE,\n    ...data\n});\n\nexport const createCollectionFilesTree = (data: Array<CollectionDirectory | CollectionFile>, joinParents: Boolean = true) => {\n    const directories = data.filter(item => item.type === CollectionFileType.DIRECTORY);\n    directories.sort((a, b) => a.path.localeCompare(b.path));\n    const files = data.filter(item => item.type === CollectionFileType.FILE);\n    return [...directories, ...files]\n        .reduce((tree, item) => setNode({\n            children: [],\n            id: item.id,\n            parent: joinParents ? getParentId(item) : '',\n            value: item,\n            active: false,\n            selected: false,\n            expanded: false,\n            status: TreeNodeStatus.INITIAL\n        })(tree), createTree<CollectionDirectory | CollectionFile>());\n};\n\nconst getParentId = (item: CollectionDirectory | CollectionFile) =>\n    item.path\n        ? join('', [getCollectionResourceCollectionUuid(item.id), item.path])\n        : item.path;\n\nexport const getCollectionResourceCollectionUuid = pipe(\n    split('/'),\n    head,\n);\n"
  },
  {
    "path": "services/workbench2/src/models/collection.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    Resource,\n    ResourceKind,\n    TrashableResource,\n    ResourceWithProperties\n} from \"./resource\";\n\nexport interface CollectionResource extends TrashableResource, ResourceWithProperties {\n    kind: ResourceKind.COLLECTION;\n    name: string;\n    description: string | null;\n    portableDataHash: string;\n    manifestText: string;\n    replicationDesired: number;\n    replicationConfirmed: number;\n    replicationConfirmedAt: string;\n    storageClassesDesired: string[];\n    storageClassesConfirmed: string[];\n    storageClassesConfirmedAt: string;\n    currentVersionUuid: string;\n    version: number;\n    preserveVersion: boolean;\n    unsignedManifestText?: string;\n    fileCount: number;\n    fileSizeTotal: number;\n}\n\n// We exclude 'manifestText' and 'unsignedManifestText' from the default\nexport const defaultCollectionSelectedFields = [\n    'name',\n    'description',\n    'portableDataHash',\n    'replicationDesired',\n    'replicationConfirmed',\n    'replicationConfirmedAt',\n    'storageClassesDesired',\n    'storageClassesConfirmed',\n    'storageClassesConfirmedAt',\n    'currentVersionUuid',\n    'version',\n    'preserveVersion',\n    'fileCount',\n    'fileSizeTotal',\n    // ResourceWithProperties field\n    'properties',\n    // TrashableResource fields\n    'trashAt',\n    'deleteAt',\n    'isTrashed',\n    // Resource fields\n    'uuid',\n    'ownerUuid',\n    'createdAt',\n    'modifiedByUserUuid',\n    'modifiedAt',\n    'kind',\n    'etag',\n];\n\nexport const getCollectionUrl = (uuid: string) => {\n    return `/collections/${uuid}`;\n};\n\nexport const isCollectionResource = (resource?: Resource): resource is CollectionResource => {\n    return !!resource && resource.kind === ResourceKind.COLLECTION;\n};\n\nexport const isCollectionResourceLatestVersion = (resource?: Resource): boolean => {\n    return isCollectionResource(resource) && resource.uuid === resource.currentVersionUuid;\n};\n\nexport enum CollectionType {\n    GENERAL = 'nil',\n    OUTPUT = 'output',\n    LOG = 'log',\n    INTERMEDIATE = 'intermediate',\n}\n"
  },
  {
    "path": "services/workbench2/src/models/container-request.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource, ResourceKind, ResourceWithProperties } from './resource';\nimport { MountType } from 'models/mount-types';\nimport { RuntimeConstraints } from './runtime-constraints';\nimport { SchedulingParameters } from './scheduling-parameters';\n\nexport enum ContainerRequestState {\n    UNCOMMITTED = 'Uncommitted',\n    COMMITTED = 'Committed',\n    FINAL = 'Final',\n}\n\nexport interface ContainerRequestResource\nextends Resource, ResourceWithProperties {\n    command: string[];\n    containerCountMax: number;\n    containerCount: number;\n    containerImage: string;\n    containerUuid: string | null;\n    cumulativeCost: number;\n    cwd: string;\n    description: string | null;\n    environment: any;\n    expiresAt: string;\n    filters: string;\n    kind: ResourceKind.CONTAINER_REQUEST;\n    logUuid: string | null;\n    mounts: { [path: string]: MountType };\n    name: string;\n    outputName: string;\n    outputPath: string;\n    outputProperties: any;\n    outputStorageClasses: string[];\n    outputTtl: number;\n    outputUuid: string | null;\n    outputGlob: string[];\n    priority: number | null;\n    requestingContainerUuid: string | null;\n    runtimeConstraints: RuntimeConstraints;\n    schedulingParameters: SchedulingParameters;\n    state: ContainerRequestState;\n    useExisting: boolean;\n}\n\n// Until the api supports unselecting fields, we need a list of all other fields to omit mounts\nexport const containerRequestFieldsNoMounts = [\n    \"command\",\n    \"container_count_max\",\n    \"container_count\",\n    \"container_image\",\n    \"container_uuid\",\n    \"created_at\",\n    \"cumulative_cost\",\n    \"cwd\",\n    \"description\",\n    \"environment\",\n    \"etag\",\n    \"expires_at\",\n    \"filters\",\n    \"kind\",\n    \"log_uuid\",\n    \"modified_at\",\n    \"modified_by_user_uuid\",\n    \"name\",\n    \"output_name\",\n    \"output_path\",\n    \"output_properties\",\n    \"output_storage_classes\",\n    \"output_ttl\",\n    \"output_uuid\",\n    \"output_glob\",\n    \"owner_uuid\",\n    \"priority\",\n    \"properties\",\n    \"published_ports\",\n    \"requesting_container_uuid\",\n    \"runtime_constraints\",\n    \"scheduling_parameters\",\n    \"state\",\n    \"use_existing\",\n    \"uuid\",\n];\n\nexport const isContainerRequestResource = (resource?: Resource): resource is ContainerRequestResource => {\n    return !!resource && resource.kind === ResourceKind.CONTAINER_REQUEST;\n};\n\nexport interface ContainerStatus {\n    uuid: string;\n    state: string;\n    schedulingStatus: string;\n};\n"
  },
  {
    "path": "services/workbench2/src/models/container.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource, ResourceKind } from \"./resource\";\nimport { MountType } from 'models/mount-types';\nimport { RuntimeConstraints } from \"models/runtime-constraints\";\nimport { SchedulingParameters } from './scheduling-parameters';\nimport { RuntimeStatus } from \"./runtime-status\";\n\nexport enum ContainerState {\n    QUEUED = 'Queued',\n    LOCKED = 'Locked',\n    RUNNING = 'Running',\n    COMPLETE = 'Complete',\n    CANCELLED = 'Cancelled',\n    UNCOMMITTED = 'Uncommitted',\n}\n\n/**\n * Schema for published service ports\n * camelcase is not used due to canonical mapKeys behavior with certain nested structures\n * base_url, initial_url, and external_port are observed to not always be present\n */\nexport type PublishedPort = {\n    access: 'public' | 'private';\n    label: string;\n    base_url?: string;\n    initial_path: string;\n    initial_url?: string;\n    external_port?: number;\n};\n\nexport interface ContainerResource extends Resource {\n    kind: ResourceKind.CONTAINER;\n    state: string;\n    startedAt: string | null;\n    finishedAt: string | null;\n    log: string | null;\n    environment: {};\n    cwd: string;\n    command: string[];\n    cost: number;\n    outputPath: string;\n    mounts: MountType[];\n    runtimeConstraints: RuntimeConstraints;\n    runtimeStatus: RuntimeStatus;\n    runtimeUserUuid: string;\n    schedulingParameters: SchedulingParameters;\n    output: string | null;\n    containerImage: string;\n    progress: number;\n    priority: number;\n    publishedPorts: Record<string, PublishedPort>;\n    exitCode: number | null;\n    authUuid: string | null;\n    lockedByUuid: string | null;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/details.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ProjectResource } from \"./project\";\nimport { CollectionResource } from \"./collection\";\nimport { ProcessResource } from \"./process\";\nimport { EmptyResource } from \"./empty\";\nimport { CollectionFile, CollectionDirectory } from 'models/collection-file';\nimport { WorkflowResource } from 'models/workflow';\nimport { UserResource } from \"./user\";\n\nexport type DetailsResource = ProjectResource | CollectionResource | ProcessResource | EmptyResource | CollectionFile | CollectionDirectory | WorkflowResource | UserResource & {name?: string};\n"
  },
  {
    "path": "services/workbench2/src/models/empty.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport interface EmptyResource {\n    name: string;\n    kind: undefined;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/external-credential.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource, ResourceKind } from \"./resource\";\n\nexport interface ExternalCredential extends Resource {\n    kind: ResourceKind.EXTERNAL_CREDENTIAL;\n    name: string;\n    description?: string;\n    credentialClass: string;\n    scopes?: string[];\n    externalId: string;\n    secret: string;\n    expiresAt: string;\n};\n\nexport const isExternalCredential = (resource?: Resource): resource is ExternalCredential => {\n    return !!resource && resource.kind === ResourceKind.EXTERNAL_CREDENTIAL;\n};\n"
  },
  {
    "path": "services/workbench2/src/models/file-viewers-config.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport type FileViewerList = FileViewer[];\n\nexport interface FileViewer {\n    /**\n     * Name is used as a label in file's context menu\n     */\n    name: string;\n\n    /**\n     * Limits files for which viewer is enabled\n     * If not given, viewer will be enabled for all files\n     * Viewer is enabled if file name ends with an extension.\n     * \n     * Example: `['.zip', '.tar.gz', 'bam']`\n     */\n    extensions?: string[];\n\n    /**\n     * Determines whether a viewer is enabled for collections.\n     */\n    collections?: boolean;\n\n    /**\n     * URL that redirects to a viewer \n     * Example: `https://bam-viewer.com`\n     */\n    url: string;\n\n    /**\n     * Name of a search param that will be used to send file's path to a viewer\n     * Example: \n     * \n     * `{ filePathParam: 'filePath' }`\n     * \n     * `https://bam-viewer.com?filePath=/path/to/file`\n     */\n    filePathParam: string;\n\n    /**\n     * Icon that will display next to a label\n     */\n    iconUrl?: string;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/group.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    Resource,\n    ResourceKind,\n    ResourceWithProperties,\n    RESOURCE_UUID_REGEX,\n    ResourceObjectType,\n    TrashableResource\n} from \"./resource\";\nimport { LinkResource, LinkClass } from \"./link\";\n\nexport interface GroupResource extends TrashableResource, ResourceWithProperties {\n    kind: ResourceKind.GROUP;\n    name: string;\n    groupClass: GroupClass | null;\n    description: string | null;\n    ensure_unique_name: boolean;\n    canWrite: boolean;\n    canManage: boolean;\n    // Optional local-only field, undefined for not loaded, null for failed to load\n    memberCount?: number | null;\n}\n\nexport enum GroupClass {\n    PROJECT = 'project',\n    FILTER = 'filter',\n    ROLE = 'role',\n}\n\nexport enum BuiltinGroups {\n    ALL = 'fffffffffffffff',\n    ANON = 'anonymouspublic',\n    SYSTEM = '000000000000000',\n}\n\nexport const getBuiltinGroupUuid = (cluster: string, groupName: BuiltinGroups): string => {\n    return cluster ? `${cluster}-${ResourceObjectType.GROUP}-${groupName}` : \"\";\n};\n\nexport const isBuiltinGroup = (uuid: string) => {\n    const match = RESOURCE_UUID_REGEX.exec(uuid);\n    const parts = match ? match[0].split('-') : [];\n    return parts.length === 3 && parts[1] === ResourceObjectType.GROUP && Object.values<string>(BuiltinGroups).includes(parts[2]);\n};\n\nexport const isUserGroup = (resource: any): resource is GroupResource => {\n    return resource && resource.kind === ResourceKind.GROUP && resource.groupClass === GroupClass.ROLE;\n};\n\nexport const isRoleGroup = (resource?: Resource) => {\n    return isGroupResource(resource) && resource.groupClass === GroupClass.ROLE;\n};\n\nexport const isFilterGroup = (resource?: Resource) => {\n    return isGroupResource(resource) && resource.groupClass === GroupClass.FILTER;\n};\n\nexport const isGroupResource = (resource?: Resource): resource is GroupResource => {\n    return !!resource && resource.kind === ResourceKind.GROUP;\n};\n\ntype GroupMemberLink = LinkResource & { linkClass: LinkClass.PERMISSION, headKind: ResourceKind.GROUP };\n\nexport const isGroupMemberLink = (resource?: Resource | LinkResource): resource is GroupMemberLink => {\n    return !!resource\n        && resource.kind === ResourceKind.LINK\n        && 'linkClass' in resource\n        && 'headKind' in resource\n        && resource.linkClass === LinkClass.PERMISSION\n        && resource.headKind === ResourceKind.GROUP;\n};\n\nexport const selectedFieldsOfGroup = [\n    \"uuid\",\n    \"name\",\n    \"group_class\",\n    \"description\",\n    \"properties\",\n    \"can_write\",\n    \"can_manage\",\n    \"trash_at\",\n    \"delete_at\",\n    \"is_trashed\",\n    \"frozen_by_uuid\"\n];\n"
  },
  {
    "path": "services/workbench2/src/models/keep-manifest.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport type KeepManifest = KeepManifestStream[];\n\nexport interface KeepManifestStream {\n    name: string;\n    locators: string[];\n    files: Array<KeepManifestStreamFile>;\n}\n\nexport interface KeepManifestStreamFile {\n    name: string;\n    position: string;\n    size: number;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/keep-services.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource } from 'models/resource';\n\nexport interface KeepServiceResource extends Resource {\n    serviceHost: string;\n    servicePort: number;\n    serviceSslFlag: boolean;\n    serviceType: string;\n    readOnly: boolean;\n}"
  },
  {
    "path": "services/workbench2/src/models/link-account.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport enum LinkAccountStatus {\n    SUCCESS,\n    CANCELLED,\n    FAILED\n}\n\nexport enum LinkAccountType {\n    ADD_OTHER_LOGIN,\n    ADD_LOCAL_TO_REMOTE,\n    ACCESS_OTHER_ACCOUNT,\n    ACCESS_OTHER_REMOTE_ACCOUNT\n}\n\nexport interface AccountToLink {\n    type: LinkAccountType;\n    userUuid: string;\n    token: string;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/link.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource, ResourceKind, ResourceWithProperties } from 'models/resource';\n\nexport interface LinkResource extends Resource, ResourceWithProperties {\n    headUuid: string;\n    headKind: ResourceKind;\n    tailUuid: string;\n    tailKind: string;\n    linkClass: LinkClass;\n    name: string;\n    kind: ResourceKind.LINK;\n}\n\nexport enum LinkClass {\n    STAR = 'star',\n    TAG = 'tag',\n    PERMISSION = 'permission',\n    PRESET = 'preset',\n}\n\nexport type NewFavoriteLink = Pick<LinkResource, 'ownerUuid' | 'tailUuid' | 'headUuid' | 'linkClass' | 'name'>;\n\nexport const hasCreateLinkProperties = (potentialLink: NewFavoriteLink): boolean => {\n    return !!potentialLink.ownerUuid && !!potentialLink.headUuid && !!potentialLink.tailUuid && !!potentialLink.linkClass && !!potentialLink.name;\n};\n"
  },
  {
    "path": "services/workbench2/src/models/log.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource, ResourceWithProperties } from \"./resource\";\nimport { ResourceKind } from 'models/resource';\n\nexport enum LogEventType {\n    CREATE = 'create',\n    UPDATE = 'update',\n    DISPATCH = 'dispatch',\n    CRUNCH_RUN = 'crunch-run',\n    CRUNCHSTAT = 'crunchstat',\n    HOSTSTAT = 'hoststat',\n    NODE_INFO = 'node-info',\n    ARV_MOUNT = 'arv-mount',\n    STDOUT = 'stdout',\n    STDERR = 'stderr',\n    CONTAINER = 'container',\n    KEEPSTORE = 'keepstore',\n    SCHEDULING = 'scheduling',\n}\n\nexport interface LogResource extends Resource, ResourceWithProperties {\n    kind: ResourceKind.LOG;\n    objectUuid: string;\n    eventAt: string;\n    eventType: LogEventType;\n    summary: string;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/mount-types.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport enum MountKind {\n    COLLECTION = 'collection',\n    GIT_TREE = 'git_tree',\n    TEMPORARY_DIRECTORY = 'tmp',\n    KEEP = 'keep',\n    MOUNTED_FILE = 'file',\n    JSON = 'json',\n    TEXT = 'text'\n}\n\nexport type MountType =\n    CollectionMount |\n    GitTreeMount |\n    TemporaryDirectoryMount |\n    KeepMount |\n    JSONMount |\n    FileMount |\n    TextMount;\n\nexport interface CollectionMount {\n    kind: MountKind.COLLECTION;\n    uuid?: string;\n    portable_data_hash?: string;\n    path?: string;\n    writable?: boolean;\n}\n\nexport interface GitTreeMount {\n    kind: MountKind.GIT_TREE;\n    uuid: string;\n    commit: string;\n    path?: string;\n}\n\nexport enum TemporaryDirectoryDeviceType {\n    RAM = 'ram',\n    SSD = 'ssd',\n    DISK = 'disk',\n    NETWORK = 'network',\n}\n\nexport interface TemporaryDirectoryMount {\n    kind: MountKind.TEMPORARY_DIRECTORY;\n    capacity: number;\n    deviceType: TemporaryDirectoryDeviceType;\n}\n\nexport interface KeepMount {\n    kind: MountKind.KEEP;\n}\n\nexport interface JSONMount {\n    kind: MountKind.JSON;\n    content: any;\n}\n\nexport interface TextMount {\n    kind: MountKind.TEXT;\n    content: string;\n}\n\nexport interface FileMount {\n    kind: MountKind.MOUNTED_FILE;\n    path: string;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/node.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource } from 'models/resource';\n\nexport interface NodeResource extends Resource {\n    slotNumber: number;\n    hostname: string;\n    domain: string;\n    ipAddress: string;\n    jobUuid: string;\n    firstPingAt: string;\n    lastPingAt: string;\n    status: string;\n    info: NodeInfo;\n    properties: NodeProperties;\n}\n\nexport interface NodeInfo {\n    last_action: string;\n    ping_secret: string;\n    ec2_instance_id: string;\n    slurm_state?: string;\n}\n\nexport interface NodeProperties {\n    cloud_node: CloudNode;\n    total_ram_mb: number;\n    total_cpu_cores: number;\n    total_scratch_mb: number;\n}\n\ninterface CloudNode {\n    size: string;\n    price: number;\n}"
  },
  {
    "path": "services/workbench2/src/models/object-types.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nconst USER_UUID_REGEX = /.*tpzed.*/;\nconst GROUP_UUID_REGEX = /.*-j7d0g-.*/;\n\nexport enum ObjectTypes {\n    USER = \"User\",\n    GROUP = \"Group\",\n    UNKNOWN = \"Unknown\"\n}\n\nexport const getUuidObjectType = (uuid: string) => {\n    switch (true) {\n        case USER_UUID_REGEX.test(uuid):\n            return ObjectTypes.USER;\n        case GROUP_UUID_REGEX.test(uuid):\n            return ObjectTypes.GROUP;\n        default:\n            return ObjectTypes.UNKNOWN;\n    }\n};"
  },
  {
    "path": "services/workbench2/src/models/permission.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { LinkResource, LinkClass } from './link';\n\nexport interface PermissionResource extends LinkResource {\n    linkClass: LinkClass.PERMISSION;\n}\n\nexport enum PermissionLevel {\n    NONE = 'none',\n    CAN_READ = 'can_read',\n    CAN_WRITE = 'can_write',\n    CAN_MANAGE = 'can_manage',\n    CAN_LOGIN = 'can_login',\n}\n"
  },
  {
    "path": "services/workbench2/src/models/process.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContainerRequestResource } from \"./container-request\";\nimport { MountType, MountKind } from 'models/mount-types';\nimport { WorkflowResource, parseWorkflowDefinition, getWorkflow, CwlSecrets } from 'models/workflow';\nimport { WorkflowInputsData } from './workflow';\n\nexport type ProcessResource = ContainerRequestResource;\n\nexport const MOUNT_PATH_CWL_WORKFLOW = '/var/lib/cwl/workflow.json';\nexport const MOUNT_PATH_CWL_INPUT = '/var/lib/cwl/cwl.input.json';\n\n\nexport const createWorkflowMounts = (workflow: WorkflowResource, inputs: WorkflowInputsData): { [path: string]: MountType } => {\n\n    const wfdef = parseWorkflowDefinition(workflow);\n    const mounts: {[path: string]: MountType} = {\n        '/var/spool/cwl': {\n            kind: MountKind.COLLECTION,\n            writable: true,\n        },\n        'stdout': {\n            kind: MountKind.MOUNTED_FILE,\n            path: '/var/spool/cwl/cwl.output.json',\n        },\n        '/var/lib/cwl/workflow.json': {\n            kind: MountKind.JSON,\n            content: wfdef,\n        },\n        '/var/lib/cwl/cwl.input.json': {\n            kind: MountKind.JSON,\n            content: inputs,\n        }\n    };\n\n    return mounts;\n};\n\nexport const createWorkflowSecretMounts = (workflow: WorkflowResource, inputs: WorkflowInputsData): { [path: string]: MountType } => {\n\n    const wfdef = parseWorkflowDefinition(workflow);\n    const secret_mounts: {[path: string]: MountType} = {};\n\n    const wf = getWorkflow(wfdef);\n    if (wf?.hints) {\n        const secrets = wf.hints.find(item => item.class === 'http://commonwl.org/cwltool#Secrets') as CwlSecrets | undefined;\n        if (secrets?.secrets) {\n            let secretCount = 0;\n            secrets.secrets.forEach((paramId) => {\n                const param = paramId.split(\"/\").pop();\n                if (!param || !inputs[param]) {\n                    return;\n                }\n                const value: string = inputs[param] as string;\n                const mnt = \"/secrets/s\"+secretCount;\n                secret_mounts[mnt] = {\n                    \"kind\": MountKind.TEXT,\n                    \"content\": value\n                }\n                inputs[param] = {\"$include\": mnt}\n                secretCount++;\n            });\n        }\n    }\n    return secret_mounts;\n};\n"
  },
  {
    "path": "services/workbench2/src/models/project.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { GroupClass, GroupResource, isGroupResource } from \"./group\";\nimport { Resource } from \"./resource\";\n\nexport interface ProjectResource extends GroupResource {\n    frozenByUuid: null | string;\n    groupClass: GroupClass.PROJECT | GroupClass.FILTER | GroupClass.ROLE;\n}\n\nexport const getProjectUrl = (uuid: string) => {\n    return `/projects/${uuid}`;\n};\n\nexport const isProjectResource = (resource?: Resource): resource is ProjectResource => {\n    return isGroupResource(resource) && 'frozenByUuid' in resource;\n};\n"
  },
  {
    "path": "services/workbench2/src/models/repositories.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource } from \"models/resource\";\n\nexport interface RepositoryResource extends Resource {\n    name: string;\n    cloneUrls: string[];\n}\n"
  },
  {
    "path": "services/workbench2/src/models/resource.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport interface Resource {\n    uuid: string;\n    ownerUuid: string;\n    createdAt: string;\n    modifiedByUserUuid: string;\n    modifiedAt: string;\n    kind: ResourceKind;\n    etag: string;\n}\n\nexport interface ResourceWithProperties extends Resource {\n    properties: any;\n}\n\nexport interface EditableResource extends Resource {\n    isEditable: boolean;\n}\n\nexport interface TrashableResource extends Resource {\n    trashAt: string;\n    deleteAt: string;\n    isTrashed: boolean;\n}\n\nexport interface NamedResource extends Resource {\n    name: string;\n}\n\nexport enum ResourceKind {\n    API_CLIENT_AUTHORIZATION = \"arvados#apiClientAuthorization\",\n    COLLECTION = \"arvados#collection\",\n    CONTAINER = \"arvados#container\",\n    CONTAINER_REQUEST = \"arvados#containerRequest\",\n    EXTERNAL_CREDENTIAL = \"arvados#credential\",\n    GROUP = \"arvados#group\",\n    LINK = \"arvados#link\",\n    LOG = \"arvados#log\",\n    PROCESS = \"arvados#containerRequest\",\n    PROJECT = \"arvados#group\",\n    REPOSITORY = \"arvados#repository\",\n    SSH_KEY = \"arvados#authorizedKeys\",\n    KEEP_SERVICE = \"arvados#keepService\",\n    USER = \"arvados#user\",\n    VIRTUAL_MACHINE = \"arvados#virtualMachine\",\n    WORKFLOW = \"arvados#workflow\",\n    NONE = \"arvados#none\"\n}\n\nexport enum ResourceObjectType {\n    API_CLIENT_AUTHORIZATION = 'gj3su',\n    COLLECTION = '4zz18',\n    CONTAINER = 'dz642',\n    CONTAINER_REQUEST = 'xvhdp',\n    EXTERNAL_CREDENTIAL = 'oss07',\n    GROUP = 'j7d0g',\n    LINK = 'o0j2j',\n    LOG = '57u5n',\n    REPOSITORY = 's0uqq',\n    USER = 'tpzed',\n    VIRTUAL_MACHINE = '2x53u',\n    WORKFLOW = '7fd4e',\n    SSH_KEY = 'fngyi',\n    KEEP_SERVICE = 'bi6l4'\n}\n\nexport const RESOURCE_UUID_PATTERN = '[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}';\nexport const PORTABLE_DATA_HASH_PATTERN = '[a-f0-9]{32}\\\\+\\\\d+';\nexport const RESOURCE_UUID_REGEX = new RegExp(\"^\" + RESOURCE_UUID_PATTERN + \"$\");\nexport const COLLECTION_PDH_REGEX = new RegExp(\"^\" + PORTABLE_DATA_HASH_PATTERN + \"$\");\nexport const KEEP_URL_REGEX = new RegExp(\"^(keep:)?\" + PORTABLE_DATA_HASH_PATTERN);\n\nexport const isResourceUuid = (uuid: string) =>\n    RESOURCE_UUID_REGEX.test(uuid);\n\nexport const extractUuidObjectType = (uuid: string) => {\n    const match = RESOURCE_UUID_REGEX.exec(uuid);\n    return match\n        ? match[0].split('-')[1]\n        : undefined;\n};\n\nexport const extractUuidKind = (uuid: string = '') => {\n    const objectType = extractUuidObjectType(uuid);\n    switch (objectType) {\n        case ResourceObjectType.USER:\n            return ResourceKind.USER;\n        case ResourceObjectType.GROUP:\n            return ResourceKind.GROUP;\n        case ResourceObjectType.COLLECTION:\n            return ResourceKind.COLLECTION;\n        case ResourceObjectType.CONTAINER_REQUEST:\n            return ResourceKind.CONTAINER_REQUEST;\n        case ResourceObjectType.CONTAINER:\n            return ResourceKind.CONTAINER;\n        case ResourceObjectType.LOG:\n            return ResourceKind.LOG;\n        case ResourceObjectType.WORKFLOW:\n            return ResourceKind.WORKFLOW;\n        case ResourceObjectType.VIRTUAL_MACHINE:\n            return ResourceKind.VIRTUAL_MACHINE;\n        case ResourceObjectType.REPOSITORY:\n            return ResourceKind.REPOSITORY;\n        case ResourceObjectType.SSH_KEY:\n            return ResourceKind.SSH_KEY;\n        case ResourceObjectType.KEEP_SERVICE:\n            return ResourceKind.KEEP_SERVICE;\n        case ResourceObjectType.API_CLIENT_AUTHORIZATION:\n            return ResourceKind.API_CLIENT_AUTHORIZATION;\n        case ResourceObjectType.LINK:\n            return ResourceKind.LINK;\n        case ResourceObjectType.EXTERNAL_CREDENTIAL:\n            return ResourceKind.EXTERNAL_CREDENTIAL;\n        default:\n            const match = COLLECTION_PDH_REGEX.exec(uuid);\n            return match ? ResourceKind.COLLECTION : undefined;\n    }\n};\n\nexport const isResourceResource = (resource?: any): resource is Resource => {\n    return !!resource && isResourceUuid(resource.uuid) && containsAllResourceProps(resource);\n};\n\nconst containsAllResourceProps = (obj: any) => {\n    const resourceKeys = Object.keys({} as Resource);\n    return Object.keys(obj).every(key => resourceKeys.includes(key));\n};\n"
  },
  {
    "path": "services/workbench2/src/models/runtime-constraints.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport interface GPUParameters {\n    stack: string;\n    driver_version: string;\n    hardware_target: string[];\n    device_count: number;\n    vram: number;\n}\n\nexport interface RuntimeConstraints {\n    ram: number;\n    vcpus: number;\n    keep_cache_ram?: number;\n    keep_cache_disk?: number;\n    API: boolean;\n    gpu?: GPUParameters;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/runtime-status.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport interface RuntimeStatus {\n    error?: string;\n    warning?: string;\n    activity?: string;\n    errorDetail?: string;\n    warningDetail?: string;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/scheduling-parameters.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport interface SchedulingParameters {\n    partitions?: string[];\n    preemptible?: boolean;\n    max_run_time?: number;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/search-bar.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ResourceKind } from 'models/resource';\nimport { GroupResource } from './group';\n\nexport type SearchBarAdvancedFormData = {\n    type?: ResourceKind;\n    cluster?: string;\n    projectUuid?: string;\n    projectObject?: GroupResource;\n    inTrash: boolean;\n    pastVersions: boolean;\n    dateFrom: string;\n    dateTo: string;\n    saveQuery: boolean;\n    queryName: string;\n    searchValue: string;\n    properties: PropertyValue[];\n};\n\nexport interface PropertyValue {\n    key: string;\n    keyID?: string;\n    value: string;\n    valueID?: string;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/session.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport enum SessionStatus {\n    INVALIDATED,\n    BEING_VALIDATED,\n    VALIDATED\n}\n\nexport interface Session {\n    clusterId: string;\n    remoteHost: string;\n    baseUrl: string;\n    name: string;\n    email: string;\n    token: string;\n    uuid: string;\n    loggedIn: boolean;\n    status: SessionStatus;\n    active: boolean;\n    userIsActive: boolean;\n    apiRevision: number;\n}\n"
  },
  {
    "path": "services/workbench2/src/models/ssh-key.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource } from 'models/resource';\n\nexport enum KeyType {\n    SSH = 'SSH'\n}\n\nexport interface SshKeyResource extends Resource {\n    name: string;\n    keyType: KeyType;\n    authorizedUserUuid: string;\n    publicKey: string;\n    expiresAt: string;\n}"
  },
  {
    "path": "services/workbench2/src/models/tag.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { LinkResource } from \"./link\";\n\nexport interface TagResource extends LinkResource {\n    tailUuid: TagTailType;\n    properties: TagProperty;\n}\n\nexport interface TagProperty {\n    uuid: string;\n    key: string;\n    keyID?: string;\n    value: string;\n    valueID?: string;\n}\n\nexport enum TagTailType {\n    COLLECTION = 'Collection',\n    JOB = 'Job'\n}"
  },
  {
    "path": "services/workbench2/src/models/test-utils.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { GroupClass, GroupResource } from \"./group\";\nimport { Resource, ResourceKind } from \"./resource\";\nimport { ProjectResource } from \"./project\";\n\nexport const mockGroupResource = (data: Partial<GroupResource> = {}): GroupResource => ({\n    createdAt: \"\",\n    deleteAt: \"\",\n    description: \"\",\n    etag: \"\",\n    groupClass: null,\n    isTrashed: false,\n    kind: ResourceKind.GROUP,\n    modifiedAt: \"\",\n    modifiedByUserUuid: \"\",\n    name: \"\",\n    ownerUuid: \"\",\n    properties: \"\",\n    trashAt: \"\",\n    uuid: \"\",\n    ensure_unique_name: true,\n    canWrite: false,\n    canManage: false,\n    ...data\n});\n\nexport const mockProjectResource = (data: Partial<ProjectResource> = {}): ProjectResource =>\n    mockGroupResource({ ...data, groupClass: GroupClass.PROJECT }) as ProjectResource;\n\nexport const mockCommonResource = (data: Partial<Resource>): Resource => ({\n    createdAt: \"\",\n    etag: \"\",\n    kind: ResourceKind.NONE,\n    modifiedAt: \"\",\n    modifiedByUserUuid: \"\",\n    ownerUuid: \"\",\n    uuid: \"\",\n    ...data\n});\n"
  },
  {
    "path": "services/workbench2/src/models/tree.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport * as Tree from './tree';\nimport { initTreeNode } from './tree';\nimport { pipe, isEqual } from 'lodash/fp';\n\ndescribe('Tree', () => {\n    let tree;\n\n    beforeEach(() => {\n        tree = Tree.createTree();\n    });\n\n    it('sets new node', () => {\n        const newTree = Tree.setNode(initTreeNode({ id: 'Node 1', value: 'Value 1' }))(tree);\n        expect(isEqual(Tree.getNode('Node 1')(newTree), initTreeNode({ id: 'Node 1', value: 'Value 1' }))).to.equal(true);\n    });\n\n    it('appends a subtree', () => {\n        const newTree = Tree.setNode(initTreeNode({ id: 'Node 1', value: 'Value 1' }))(tree);\n        const subtree = Tree.setNode(initTreeNode({ id: 'Node 2', value: 'Value 2' }))(Tree.createTree());\n        const mergedTree = Tree.appendSubtree('Node 1', subtree)(newTree);\n        expect(Tree.getNode('Node 1')(mergedTree)).to.not.equal('undefined');\n        expect(Tree.getNode('Node 2')(mergedTree)).to.not.equal('undefined');\n    });\n\n    it('adds new node reference to parent children', () => {\n        const newTree = pipe(\n            Tree.setNode(initTreeNode({ id: 'Node 1', parent: '', value: 'Value 1' })),\n            Tree.setNode(initTreeNode({ id: 'Node 2', parent: 'Node 1', value: 'Value 2' })),\n        )(tree);\n\n        expect(isEqual(Tree.getNode('Node 1')(newTree), {\n            ...initTreeNode({ id: 'Node 1', parent: '', value: 'Value 1' }),\n            children: ['Node 2']\n        })).to.equal(true);\n    });\n\n    it('gets node ancestors', () => {\n        const newTree = [\n            initTreeNode({ id: 'Node 1', parent: '', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2', parent: 'Node 1', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 3', parent: 'Node 2', value: 'Value 1' }),\n        ].reduce((tree, node) => Tree.setNode(node)(tree), tree);\n        expect(isEqual(Tree.getNodeAncestorsIds('Node 3')(newTree), ['Node 1', 'Node 2'])).to.equal(true);\n    });\n\n    it('gets node descendants', () => {\n        const newTree = [\n            initTreeNode({ id: 'Node 1', parent: '', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2', parent: 'Node 1', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2.1', parent: 'Node 2', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 3', parent: 'Node 1', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 3.1', parent: 'Node 3', value: 'Value 1' }),\n        ].reduce((tree, node) => Tree.setNode(node)(tree), tree);\n        expect(isEqual(Tree.getNodeDescendantsIds('Node 1')(newTree), ['Node 2', 'Node 3', 'Node 2.1', 'Node 3.1'])).to.equal(true);\n    });\n\n    it('gets root descendants', () => {\n        const newTree = [\n            initTreeNode({ id: 'Node 1', parent: '', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2', parent: 'Node 1', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2.1', parent: 'Node 2', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 3', parent: 'Node 1', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 3.1', parent: 'Node 3', value: 'Value 1' }),\n        ].reduce((tree, node) => Tree.setNode(node)(tree), tree);\n        expect(isEqual(Tree.getNodeDescendantsIds('')(newTree), ['Node 1', 'Node 2', 'Node 3', 'Node 2.1', 'Node 3.1'])).to.equal(true);\n    });\n\n    it('gets node children', () => {\n        const newTree = [\n            initTreeNode({ id: 'Node 1', parent: '', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2', parent: 'Node 1', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2.1', parent: 'Node 2', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 3', parent: 'Node 1', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 3.1', parent: 'Node 3', value: 'Value 1' }),\n        ].reduce((tree, node) => Tree.setNode(node)(tree), tree);\n        expect(isEqual(Tree.getNodeChildrenIds('Node 1')(newTree), ['Node 2', 'Node 3'])).to.equal(true);\n    });\n\n    it('gets root children', () => {\n        const newTree = [\n            initTreeNode({ id: 'Node 1', parent: '', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2', parent: 'Node 1', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2.1', parent: 'Node 2', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 3', parent: '', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 3.1', parent: 'Node 3', value: 'Value 1' }),\n        ].reduce((tree, node) => Tree.setNode(node)(tree), tree);\n        expect(isEqual(Tree.getNodeChildrenIds('')(newTree), ['Node 1', 'Node 3'])).to.equal(true);\n    });\n\n    it('maps tree', () => {\n        const newTree = [\n            initTreeNode({ id: 'Node 1', parent: '', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2', parent: 'Node 1', value: 'Value 2' }),\n        ].reduce((tree, node) => Tree.setNode(node)(tree), tree);\n        const mappedTree = Tree.mapTreeValues(value => parseInt(value.split(' ')[1], 10))(newTree);\n        expect(isEqual(Tree.getNode('Node 2')(mappedTree), initTreeNode({ id: 'Node 2', parent: 'Node 1', value: 2 }))).to.equal(true);\n    });\n\n    it('expands node ancestor chains', () => {\n        const newTree = [\n            initTreeNode({ id: 'Root Node 1', parent: '', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 1.1', parent: 'Root Node 1', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 1.1.1', parent: 'Node 1.1', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 1.2', parent: 'Root Node 1', value: 'Value 1' }),\n\n            initTreeNode({ id: 'Root Node 2', parent: '', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2.1', parent: 'Root Node 2', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 2.1.1', parent: 'Node 2.1', value: 'Value 1' }),\n\n            initTreeNode({ id: 'Root Node 3', parent: '', value: 'Value 1' }),\n            initTreeNode({ id: 'Node 3.1', parent: 'Root Node 3', value: 'Value 1' }),\n        ].reduce((tree, node) => Tree.setNode(node)(tree), tree);\n\n        const expandedTree = Tree.expandNodeAncestors(\n            'Node 1.1.1', // Expands 1.1 and 1\n            'Node 2.1', // Expands 2\n        )(newTree);\n\n        expect(Tree.getNode('Root Node 1')(expandedTree)?.expanded).to.equal(true);\n        expect(Tree.getNode('Node 1.1')(expandedTree)?.expanded).to.equal(true);\n        expect(Tree.getNode('Node 1.1.1')(expandedTree)?.expanded).to.equal(false);\n        expect(Tree.getNode('Node 1.2')(expandedTree)?.expanded).to.equal(false);\n        expect(Tree.getNode('Root Node 2')(expandedTree)?.expanded).to.equal(true);\n        expect(Tree.getNode('Node 2.1')(expandedTree)?.expanded).to.equal(false);\n        expect(Tree.getNode('Node 2.1.1')(expandedTree)?.expanded).to.equal(false);\n        expect(Tree.getNode('Root Node 3')(expandedTree)?.expanded).to.equal(false);\n        expect(Tree.getNode('Node 3.1')(expandedTree)?.expanded).to.equal(false);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/models/tree.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { pipe, map, reduce } from 'lodash/fp';\nexport type Tree<T> = Record<string, TreeNode<T>>;\n\nexport const TREE_ROOT_ID = '';\n\nexport interface TreeNode<T = any> {\n    children: string[];\n    value: T;\n    id: string;\n    parent: string;\n    active: boolean;\n    selected: boolean;\n    initialState?: boolean;\n    expanded: boolean;\n    status: TreeNodeStatus;\n}\n\nexport enum TreeNodeStatus {\n    INITIAL = 'INITIAL',\n    PENDING = 'PENDING',\n    LOADED = 'LOADED',\n}\n\nexport enum TreePickerId {\n    PROJECTS = 'Projects',\n    SHARED_WITH_ME = 'Shared with me',\n    FAVORITES = 'Favorites',\n    PUBLIC_FAVORITES = 'Public Favorites'\n}\n\nexport const createTree = <T>(): Tree<T> => ({});\n\nexport const getNode = (id: string) => <T>(tree: Tree<T>): TreeNode<T> | undefined => tree[id];\n\nexport const appendSubtree = <T>(id: string, subtree: Tree<T>) => (tree: Tree<T>) =>\n    pipe(\n        getNodeDescendants(''),\n        map(node => node.parent === '' ? { ...node, parent: id } : node),\n        reduce((newTree, node) => setNode(node)(newTree), tree)\n    )(subtree) as Tree<T>;\n\nexport const setNode = <T>(node: TreeNode<T>) => (tree: Tree<T>): Tree<T> => {\n    if (tree[node.id] && tree[node.id] === node) { return tree; }\n\n    tree[node.id] = node;\n    if (tree[node.parent]) {\n        tree[node.parent].children = Array.from(new Set([...tree[node.parent].children, node.id]));\n    }\n    return tree;\n};\n\nexport const getNodeValue = (id: string) => <T>(tree: Tree<T>) => {\n    const node = getNode(id)(tree);\n    return node ? node.value : undefined;\n};\n\nexport const setNodeValue = (id: string) => <T>(value: T) => (tree: Tree<T>) => {\n    const node = getNode(id)(tree);\n    return node\n        ? setNode(mapNodeValue(() => value)(node))(tree)\n        : tree;\n};\n\nexport const setNodeValueWith = <T>(mapFn: (value: T) => T) => (id: string) => (tree: Tree<T>) => {\n    const node = getNode(id)(tree);\n    return node\n        ? setNode(mapNodeValue(mapFn)(node))(tree)\n        : tree;\n};\n\nexport const mapTreeValues = <T, R>(mapFn: (value: T) => R) => (tree: Tree<T>): Tree<R> =>\n    getNodeDescendantsIds('')(tree)\n        .map(id => getNode(id)(tree))\n        .filter(node => !!node)\n        .map(mapNodeValue(mapFn))\n        .reduce((newTree, node) => setNode(node)(newTree), createTree<R>());\n\nexport const mapTree = <T, R = T>(mapFn: (node: TreeNode<T>) => TreeNode<R>) => (tree: Tree<T>): Tree<R> => {\n    const mappedTree = getNodeDescendantsIds('')(tree)\n        .map(id => getNode(id)(tree))\n        .map(mapFn)\n        .reduce((newTree, node) => setNode(node)(newTree), createTree<R>())\n    return Object.keys(mappedTree).length === 0 ? createTree<R>() : mappedTree;\n};\n\nexport const getNodeAncestors = (id: string) => <T>(tree: Tree<T>) =>\n    mapIdsToNodes(getNodeAncestorsIds(id)(tree))(tree);\n\n\nexport const getNodeAncestorsIds = (id: string) => <T>(tree: Tree<T>): string[] => {\n    const node = getNode(id)(tree);\n    return node && node.parent\n        ? [...getNodeAncestorsIds(node.parent)(tree), node.parent]\n        : [];\n};\n\nexport const getNodeDescendants = (id: string, limit = Infinity) => <T>(tree: Tree<T>) =>\n    mapIdsToNodes(getNodeDescendantsIds(id, limit)(tree))(tree);\n\nexport const countNodes = <T>(tree: Tree<T>) =>\n    getNodeDescendantsIds('')(tree).length;\n\nexport const countChildren = (id: string) => <T>(tree: Tree<T>) =>\n    getNodeChildren('')(tree).length;\n\nexport const getNodeDescendantsIds = (id: string, limit = Infinity) => <T>(tree: Tree<T>): string[] => {\n    const node = getNode(id)(tree);\n    const children = node ? node.children :\n        id === TREE_ROOT_ID\n            ? getRootNodeChildrenIds(tree)\n            : [];\n\n    return children\n        .concat(limit < 1\n            ? []\n            : children\n                .map(id => getNodeDescendantsIds(id, limit - 1)(tree))\n                .reduce((nodes, nodeChildren) => [...nodes, ...nodeChildren], []));\n};\n\nexport const getNodeChildren = (id: string) => <T>(tree: Tree<T>) =>\n    mapIdsToNodes(getNodeChildrenIds(id)(tree))(tree);\n\nexport const getNodeChildrenIds = (id: string) => <T>(tree: Tree<T>): string[] =>\n    getNodeDescendantsIds(id, 0)(tree);\n\nexport const mapIdsToNodes = (ids: string[]) => <T>(tree: Tree<T>) =>\n    ids.map(id => getNode(id)(tree)).filter((node): node is TreeNode<T> => node !== undefined);\n\nexport const activateNode = (id: string) => <T>(tree: Tree<T>) =>\n    mapTree((node: TreeNode<T>) => node.id === id ? { ...node, active: true } : { ...node, active: false })(tree);\n\nexport const deactivateNode = <T>(tree: Tree<T>) =>\n    mapTree((node: TreeNode<T>) => node.active ? { ...node, active: false } : node)(tree);\n\nexport const expandNode = (...ids: string[]) => <T>(tree: Tree<T>) =>\n    mapTree((node: TreeNode<T>) => ids.some(id => id === node.id) ? { ...node, expanded: true } : node)(tree);\n\nexport const expandNodeAncestors = (...ids: string[]) => <T>(tree: Tree<T>) => {\n    const ancestors = ids.reduce((acc, id): string[] => ([...acc, ...getNodeAncestorsIds(id)(tree)]), [] as string[]);\n    return mapTree((node: TreeNode<T>) => ancestors.some(id => id === node.id) ? { ...node, expanded: true } : node)(tree);\n}\n\nexport const collapseNode = (...ids: string[]) => <T>(tree: Tree<T>) =>\n    mapTree((node: TreeNode<T>) => ids.some(id => id === node.id) ? { ...node, expanded: false } : node)(tree);\n\nexport const toggleNodeCollapse = (...ids: string[]) => <T>(tree: Tree<T>) =>\n    mapTree((node: TreeNode<T>) => ids.some(id => id === node.id) ? { ...node, expanded: !node.expanded } : node)(tree);\n\nexport const setNodeStatus = (id: string) => (status: TreeNodeStatus) => <T>(tree: Tree<T>) => {\n    const node = getNode(id)(tree);\n    return node\n        ? setNode({ ...node, status })(tree)\n        : tree;\n};\n\nexport const toggleNodeSelection = (id: string, cascade: boolean) => <T>(tree: Tree<T>) => {\n    const node = getNode(id)(tree);\n\n    return node\n        ? cascade\n            ? pipe(\n                setNode({ ...node, selected: !node.selected }),\n                toggleAncestorsSelection(id),\n                toggleDescendantsSelection(id))(tree)\n            : setNode({ ...node, selected: !node.selected })(tree)\n        : tree;\n};\n\nexport const selectNode = (id: string, cascade: boolean) => <T>(tree: Tree<T>) => {\n    const node = getNode(id)(tree);\n    return node && node.selected\n        ? tree\n        : toggleNodeSelection(id, cascade)(tree);\n};\n\nexport const selectNodes = (id: string | string[], cascade: boolean) => <T>(tree: Tree<T>) => {\n    const ids = typeof id === 'string' ? [id] : id;\n    return ids.reduce((tree, id) => selectNode(id, cascade)(tree), tree);\n};\nexport const deselectNode = (id: string, cascade: boolean) => <T>(tree: Tree<T>) => {\n    const node = getNode(id)(tree);\n    return node && node.selected\n        ? toggleNodeSelection(id, cascade)(tree)\n        : tree;\n};\n\nexport const deselectNodes = (id: string | string[], cascade: boolean) => <T>(tree: Tree<T>) => {\n    const ids = typeof id === 'string' ? [id] : id;\n    return ids.reduce((tree, id) => deselectNode(id, cascade)(tree), tree);\n};\n\nexport const getSelectedNodes = <T>(tree: Tree<T>) =>\n    getNodeDescendants('')(tree)\n        .filter(node => node.selected);\n\nexport const initTreeNode = <T>(data: Pick<TreeNode<T>, 'id' | 'value'> & { parent?: string }): TreeNode<T> => ({\n    children: [],\n    active: false,\n    selected: false,\n    expanded: false,\n    status: TreeNodeStatus.INITIAL,\n    parent: '',\n    ...data,\n});\n\nexport const getTreeDirty = (id: string) => <T>(tree: Tree<T>): boolean => {\n    const node = getNode(id)(tree);\n    const children = getNodeDescendants(id)(tree);\n    return (node\n            && node.initialState !== undefined\n            && node.selected !== node.initialState\n            )\n            || children.some(child =>\n                child.initialState !== undefined\n                && child.selected !== child.initialState\n            );\n}\n\nconst toggleDescendantsSelection = (id: string) => <T>(tree: Tree<T>) => {\n    const node = getNode(id)(tree);\n    if (node) {\n        return getNodeDescendants(id)(tree)\n            .reduce((newTree, subNode) =>\n                setNode({ ...subNode, selected: node.selected })(newTree),\n                tree);\n    }\n    return tree;\n};\n\nconst toggleAncestorsSelection = (id: string) => <T>(tree: Tree<T>) => {\n    const ancestors = getNodeAncestorsIds(id)(tree).reverse();\n    return ancestors.reduce((newTree, parent) => parent ? toggleParentNodeSelection(parent)(newTree) : newTree, tree);\n};\n\nconst toggleParentNodeSelection = (id: string) => <T>(tree: Tree<T>) => {\n    const node = getNode(id)(tree);\n    if (node) {\n        const parentNode = getNode(node.id)(tree);\n        if (parentNode) {\n            const selected = parentNode.children\n                .map(id => getNode(id)(tree))\n                .every(node => node !== undefined && node.selected);\n            return setNode({ ...parentNode, selected })(tree);\n        }\n        return setNode(node)(tree);\n    }\n    return tree;\n};\n\nconst mapNodeValue = <T, R>(mapFn: (value: T) => R) => (node: TreeNode<T>): TreeNode<R> =>{\n    if(!mapFn) mapFn = value => value as unknown as R;\n    return { ...node, value: mapFn(node.value) };\n}\n\nconst getRootNodeChildrenIds = <T>(tree: Tree<T>) =>\n    Object\n        .keys(tree)\n        .filter(id => getNode(id)(tree)!.parent === TREE_ROOT_ID);\n"
  },
  {
    "path": "services/workbench2/src/models/user.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { getUserDisplayName } from './user';\n\ndescribe('User', () => {\n    it('gets the user display name', () => {\n        const testCases = [\n            {\n                caseName: 'Full data available',\n                user: {\n                    email: 'someuser@example.com', username: 'someuser',\n                    firstName: 'Some', lastName: 'User',\n                    uuid: 'zzzzz-tpzed-someusersuuid',\n                    ownerUuid: 'zzzzz-tpzed-someusersowneruuid',\n                    prefs: {}, isAdmin: false, isActive: true,\n                    canWrite: false, canManage: false,\n                },\n                expect: 'Some User'\n            },\n            {\n                caseName: 'Full data available (with email)',\n                withEmail: true,\n                user: {\n                    email: 'someuser@example.com', username: 'someuser',\n                    firstName: 'Some', lastName: 'User',\n                    uuid: 'zzzzz-tpzed-someusersuuid',\n                    ownerUuid: 'zzzzz-tpzed-someusersowneruuid',\n                    prefs: {}, isAdmin: false, isActive: true,\n                    canWrite: false, canManage: false\n                },\n                expect: 'Some User <someuser@example.com>'\n            },\n            {\n                caseName: 'Missing first name',\n                user: {\n                    email: 'someuser@example.com', username: 'someuser',\n                    firstName: '', lastName: 'User',\n                    uuid: 'zzzzz-tpzed-someusersuuid',\n                    ownerUuid: 'zzzzz-tpzed-someusersowneruuid',\n                    prefs: {}, isAdmin: false, isActive: true,\n                    canWrite: false, canManage: false,\n\n                },\n                expect: 'someuser@example.com'\n            },\n            {\n                caseName: 'Missing last name',\n                user: {\n                    email: 'someuser@example.com', username: 'someuser',\n                    firstName: 'Some', lastName: '',\n                    uuid: 'zzzzz-tpzed-someusersuuid',\n                    ownerUuid: 'zzzzz-tpzed-someusersowneruuid',\n                    prefs: {}, isAdmin: false, isActive: true,\n                    canWrite: false, canManage: false,\n                },\n                expect: 'someuser@example.com'\n            },\n            {\n                caseName: 'Missing first & last names',\n                user: {\n                    email: 'someuser@example.com', username: 'someuser',\n                    firstName: '', lastName: '',\n                    uuid: 'zzzzz-tpzed-someusersuuid',\n                    ownerUuid: 'zzzzz-tpzed-someusersowneruuid',\n                    prefs: {}, isAdmin: false, isActive: true,\n                    canWrite: false, canManage: false,\n                },\n                expect: 'someuser@example.com'\n            },\n            {\n                caseName: 'Missing first & last names (with email)',\n                withEmail: true,\n                user: {\n                    email: 'someuser@example.com', username: 'someuser',\n                    firstName: '', lastName: '',\n                    uuid: 'zzzzz-tpzed-someusersuuid',\n                    ownerUuid: 'zzzzz-tpzed-someusersowneruuid',\n                    prefs: {}, isAdmin: false, isActive: true,\n                    canWrite: false, canManage: false,\n                },\n                expect: 'someuser@example.com'\n            },\n            {\n                caseName: 'Missing first & last names, and email address',\n                user: {\n                    email: '', username: 'someuser',\n                    firstName: '', lastName: '',\n                    uuid: 'zzzzz-tpzed-someusersuuid',\n                    ownerUuid: 'zzzzz-tpzed-someusersowneruuid',\n                    prefs: {}, isAdmin: false, isActive: true,\n                    canWrite: false, canManage: false,\n                },\n                expect: 'someuser'\n            },\n            {\n                caseName: 'Missing first & last names, and email address (with email)',\n                withEmail: true,\n                user: {\n                    email: '', username: 'someuser',\n                    firstName: '', lastName: '',\n                    uuid: 'zzzzz-tpzed-someusersuuid',\n                    ownerUuid: 'zzzzz-tpzed-someusersowneruuid',\n                    prefs: {}, isAdmin: false, isActive: true,\n                    canWrite: false, canManage: false,\n                },\n                expect: 'someuser'\n            },\n            {\n                caseName: 'Missing all data (should not happen)',\n                user: {\n                    email: '', username: '',\n                    firstName: '', lastName: '',\n                    uuid: 'zzzzz-tpzed-someusersuuid',\n                    ownerUuid: 'zzzzz-tpzed-someusersowneruuid',\n                    prefs: {}, isAdmin: false, isActive: true,\n                    canWrite: false, canManage: false,\n                },\n                expect: 'zzzzz-tpzed-someusersuuid'\n            },\n        ];\n        testCases.forEach(c => {\n            const dispName = getUserDisplayName(c.user, c.withEmail);\n            expect(dispName).to.equal(c.expect);\n        })\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/models/user.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource, ResourceKind, RESOURCE_UUID_REGEX } from 'models/resource';\n\nexport type RecentUuid = {\n    uuid: string;\n    lastVisited: string;\n}\n\nexport type UserPrefs = {\n    profile?: {\n        organization?: string,\n        organization_email?: string,\n        lab?: string,\n        website_url?: string,\n        role?: string\n    },\n    wb?: {\n        default_project_tab?: string,\n        recentUuids?: RecentUuid[],\n    },\n};\n\nexport interface User {\n    email: string;\n    firstName: string;\n    lastName: string;\n    uuid: string;\n    ownerUuid: string;\n    username: string;\n    prefs: UserPrefs;\n    isAdmin: boolean;\n    isActive: boolean;\n    canWrite: boolean;\n    canManage: boolean;\n}\n\nexport const getUserFullname = (user: User) => {\n    return user.firstName && user.lastName\n        ? `${user.firstName} ${user.lastName}`\n        : \"\";\n};\n\nexport const getUserDisplayName = (user: User, withEmail = false, withUuid = false) => {\n    const displayName = getUserFullname(user) || user.email || user.username || user.uuid;\n    let parts: string[] = [displayName];\n    if (withEmail && user.email && displayName !== user.email) {\n        parts.push(`<${user.email}>`);\n    }\n    if (withUuid) {\n        parts.push(`(${user.uuid})`);\n    }\n    return parts.join(' ');\n};\n\nexport const getUserDetailsString = (user: User) => {\n    let parts: string[] = [];\n    const userCluster = getUserClusterID(user);\n    user.username.length && parts.push(user.username);\n    user.email.length && parts.push(`<${user.email}>`);\n    userCluster && userCluster.length && parts.push(`(${userCluster})`);\n    return parts.join(' ');\n};\n\nexport const getUserClusterID = (user: User): string | undefined => {\n    const match = RESOURCE_UUID_REGEX.exec(user.uuid);\n    const parts = match ? match[0].split('-') : [];\n    return parts.length === 3 ? parts[0] : undefined;\n};\n\nexport interface UserResource extends Resource, User {\n    kind: ResourceKind.USER;\n}\n\nexport const isUserResource = (resource?: Resource): resource is UserResource => {\n    return !!resource && resource.kind === ResourceKind.USER;\n};\n"
  },
  {
    "path": "services/workbench2/src/models/virtual-machines.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource } from \"models/resource\";\n\nexport interface VirtualMachinesResource extends Resource {\n    hostname: string;\n}\n\nexport interface VirtualMachinesLoginsItems {\n    hostname: string;\n    username: string;\n    public_key: string;\n    userUuid: string;\n    virtualMachineUuid: string;\n    authorizedKeyUuid: string;\n}\n\nexport interface VirtualMachineLogins {\n    kind: string;\n    items: VirtualMachinesLoginsItems[];\n}"
  },
  {
    "path": "services/workbench2/src/models/vocabulary.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport * as Vocabulary from './vocabulary';\nimport { isEqual } from 'lodash';\n\ndescribe('Vocabulary', () => {\n    let vocabulary;\n\n    beforeEach(() => {\n        vocabulary = {\n            strict_tags: false,\n            tags: {\n                IDKEYCOMMENT: {\n                    labels: []\n                },\n                IDKEYANIMALS: {\n                    strict: false,\n                    labels: [\n                        {label: \"Animal\" },\n                        {label: \"Creature\"},\n                        {label: \"Beast\"},\n                    ],\n                    values: {\n                        IDVALANIMALS1: {\n                            labels: [\n                                {label: \"Human\"},\n                                {label: \"Homo sapiens\"}\n                            ]\n                        },\n                        IDVALANIMALS2: {\n                            labels: [\n                                {label: \"Dog\"},\n                                {label: \"Canis lupus familiaris\"}\n                            ]\n                        },\n                    }\n                },\n                IDKEYSIZES: {\n                    labels: [{label: \"Sizes\"}],\n                    values: {\n                        IDVALSIZES1: {\n                            labels: [{label: \"Small\"}, {label: \"S\"}, {label: \"Little\"}]\n                        },\n                        IDVALSIZES2: {\n                            labels: [{label: \"Medium\"}, {label: \"M\"}]\n                        },\n                        IDVALSIZES3: {\n                            labels: [{label: \"Large\"}, {label: \"L\"}]\n                        },\n                        IDVALSIZES4: {\n                            labels: []\n                        }\n                    }\n                },\n                automation: {\n                    strict: true,\n                    labels: [],\n                    values: {\n                        upload: { labels: [] },\n                        results: { labels: [] },\n                    }\n                }\n            }\n        }\n    });\n\n    it('returns the list of tag keys', () => {\n        const tagKeys = Vocabulary.getTags(vocabulary);\n        // Alphabetically ordered by label\n        expect(isEqual(tagKeys, [\n            {id: \"IDKEYANIMALS\", label: \"Animal\"},\n            {id: \"IDKEYANIMALS\", label: \"Beast\"},\n            {id: \"IDKEYANIMALS\", label: \"Creature\"},\n            {id: \"IDKEYCOMMENT\", label: \"IDKEYCOMMENT\"},\n            {id: \"IDKEYSIZES\", label: \"Sizes\"},\n            {id: \"automation\", label: \"automation\"},\n        ])).to.equal(true);\n    });\n\n    it('returns the list of preferred tag keys', () => {\n        const preferredTagKeys = Vocabulary.getPreferredTags(vocabulary);\n        // Alphabetically ordered by label\n        expect(isEqual(preferredTagKeys, [\n            {id: \"IDKEYANIMALS\", label: \"Animal\", synonyms: []},\n            {id: \"IDKEYCOMMENT\", label: \"IDKEYCOMMENT\", synonyms: []},\n            {id: \"IDKEYSIZES\", label: \"Sizes\", synonyms: []},\n            {id: \"automation\", label: \"automation\", synonyms: []},\n        ])).to.equal(true);\n    });\n\n    it('returns the list of preferred tag keys with matching synonyms', () => {\n        const preferredTagKeys = Vocabulary.getPreferredTags(vocabulary, 'creat');\n        // Alphabetically ordered by label\n        expect(isEqual(preferredTagKeys, [\n            {id: \"IDKEYANIMALS\", label: \"Animal\", synonyms: [\"Creature\"]},\n            {id: \"IDKEYCOMMENT\", label: \"IDKEYCOMMENT\", synonyms: []},\n            {id: \"IDKEYSIZES\", label: \"Sizes\", synonyms: []},\n            {id: \"automation\", label: \"automation\", synonyms: []},\n        ])).to.equal(true);\n    });\n\n    it('returns the tag values for a given key', () => {\n        const tagValues = Vocabulary.getTagValues('IDKEYSIZES', vocabulary);\n        // Alphabetically ordered by label\n        expect(isEqual(tagValues, [\n            {id: \"IDVALSIZES4\", label: \"IDVALSIZES4\"},\n            {id: \"IDVALSIZES3\", label: \"L\"},\n            {id: \"IDVALSIZES3\", label: \"Large\"},\n            {id: \"IDVALSIZES1\", label: \"Little\"},\n            {id: \"IDVALSIZES2\", label: \"M\"},\n            {id: \"IDVALSIZES2\", label: \"Medium\"},\n            {id: \"IDVALSIZES1\", label: \"S\"},\n            {id: \"IDVALSIZES1\", label: \"Small\"},\n        ])).to.equal(true);\n        // Let's try a key that doesn't have any labels\n        const tagValues2 = Vocabulary.getTagValues('automation', vocabulary);\n        expect(isEqual(tagValues2, [\n            {id: \"results\", label: \"results\"},\n            {id: \"upload\", label: \"upload\"},\n        ])).to.equal(true);\n    });\n\n    it('returns the preferred tag values for a given key', () => {\n        const preferredTagValues = Vocabulary.getPreferredTagValues('IDKEYSIZES', vocabulary);\n        // Alphabetically ordered by label\n        expect(isEqual(preferredTagValues, [\n            {id: \"IDVALSIZES4\", label: \"IDVALSIZES4\", synonyms: []},\n            {id: \"IDVALSIZES3\", label: \"Large\", synonyms: []},\n            {id: \"IDVALSIZES2\", label: \"Medium\", synonyms: []},\n            {id: \"IDVALSIZES1\", label: \"Small\", synonyms: []},\n        ])).to.equal(true);\n        // Let's try a key that doesn't have any labels\n        const preferredTagValues2 = Vocabulary.getPreferredTagValues('automation', vocabulary);\n        expect(isEqual(preferredTagValues2, [\n            {id: \"results\", label: \"results\", synonyms: []},\n            {id: \"upload\", label: \"upload\", synonyms: []},\n        ])).to.equal(true);\n    });\n\n    it('returns the preferred tag values with matching synonyms for a given key', () => {\n        const preferredTagValues = Vocabulary.getPreferredTagValues('IDKEYSIZES', vocabulary, 'litt');\n        // Alphabetically ordered by label\n        expect(isEqual(preferredTagValues, [\n            {id: \"IDVALSIZES4\", label: \"IDVALSIZES4\", synonyms: []},\n            {id: \"IDVALSIZES3\", label: \"Large\", synonyms: []},\n            {id: \"IDVALSIZES2\", label: \"Medium\", synonyms: []},\n            {id: \"IDVALSIZES1\", label: \"Small\", synonyms: [\"Little\"]},\n        ])).to.equal(true);\n    });\n\n    it('returns an empty list of values for an non-existent key', () => {\n        const tagValues = Vocabulary.getTagValues('IDNONSENSE', vocabulary);\n        expect(isEqual(tagValues, [])).to.equal(true);\n    });\n\n    it('returns a key id for a given key label', () => {\n        const testCases = [\n            // Two labels belonging to the same ID\n            {keyLabel: 'Animal', expected: 'IDKEYANIMALS'},\n            {keyLabel: 'Creature', expected: 'IDKEYANIMALS'},\n            // Non-existent label returns empty string\n            {keyLabel: 'ThisKeyLabelDoesntExist', expected: ''},\n            // Key with no labels still returns the key ID\n            {keyLabel: 'automation', expected: 'automation'},\n        ]\n        testCases.forEach(tc => {\n            const tagValueID = Vocabulary.getTagKeyID(tc.keyLabel, vocabulary);\n            expect(tagValueID).to.equal(tc.expected);\n        });\n    });\n\n    it('returns an key label for a given key id', () => {\n        const testCases = [\n            // ID with many labels return the first one\n            {keyID: 'IDKEYANIMALS', expected: 'Animal'},\n            // Key IDs without any labels or unknown keys should return the literal\n            // key from the API's response (that is, the key 'id')\n            {keyID: 'IDKEYCOMMENT', expected: 'IDKEYCOMMENT'},\n            {keyID: 'FOO', expected: 'FOO'},\n        ]\n        testCases.forEach(tc => {\n            const tagValueID = Vocabulary.getTagKeyLabel(tc.keyID, vocabulary);\n            expect(tagValueID).to.equal(tc.expected);\n        });\n    });\n\n    it('returns a value id for a given key id and value label', () => {\n        const testCases = [\n            // Key ID and value label known\n            {keyID: 'IDKEYANIMALS', valueLabel: 'Human', expected: 'IDVALANIMALS1'},\n            {keyID: 'IDKEYANIMALS', valueLabel: 'Homo sapiens', expected: 'IDVALANIMALS1'},\n            // Key ID known, value label unknown\n            {keyID: 'IDKEYANIMALS', valueLabel: 'Dinosaur', expected: ''},\n            // Key ID unknown\n            {keyID: 'IDNONSENSE', valueLabel: 'Does not matter', expected: ''},\n            // Value with no labels still returns the value ID\n            {keyID: 'automation', valueLabel: 'results', expected: 'results'},\n        ]\n        testCases.forEach(tc => {\n            const tagValueID = Vocabulary.getTagValueID(tc.keyID, tc.valueLabel, vocabulary);\n            expect(tagValueID).to.equal(tc.expected);\n        });\n    });\n\n    it('returns a value label for a given key & value id pair', () => {\n        const testCases = [\n            // Known key & value ids with multiple value labels: returns the first label\n            {keyId: 'IDKEYANIMALS', valueId: 'IDVALANIMALS1', expected: 'Human'},\n            // Values without label or unknown values should return the literal value from\n            // the API's response (that is, the value 'id')\n            {keyId: 'IDKEYSIZES', valueId: 'IDVALSIZES4', expected: 'IDVALSIZES4'},\n            {keyId: 'IDKEYCOMMENT', valueId: 'FOO', expected: 'FOO'},\n            {keyId: 'IDKEYANIMALS', valueId: 'BAR', expected: 'BAR'},\n            {keyId: 'IDKEYNONSENSE', valueId: 'FOOBAR', expected: 'FOOBAR'},\n        ]\n        testCases.forEach(tc => {\n            const tagValueLabel = Vocabulary.getTagValueLabel(tc.keyId, tc.valueId, vocabulary);\n            expect(tagValueLabel).to.equal(tc.expected);\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/models/vocabulary.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { escapeRegExp } from 'common/regexp';\nimport { isObject, has, every } from 'lodash/fp';\n\nexport interface Vocabulary {\n    strict_tags: boolean;\n    tags: Record<string, Tag>;\n}\n\nexport interface Label {\n    lang?: string;\n    label: string;\n}\n\nexport interface TagValue {\n    labels: Label[];\n}\n\nexport interface Tag {\n    strict?: boolean;\n    labels: Label[];\n    values?: Record<string, TagValue>;\n}\n\nexport interface PropFieldSuggestion {\n    id: string;\n    label: string;\n    synonyms?: string[];\n}\n\nconst VOCABULARY_VALIDATORS = [\n    isObject,\n    has('strict_tags'),\n    has('tags'),\n];\n\nexport const isVocabulary = (value: any) =>\n    every(validator => validator(value), VOCABULARY_VALIDATORS);\n\nexport const isStrictTag = (tagKeyID: string, vocabulary: Vocabulary) => {\n    const tag = vocabulary.tags[tagKeyID];\n    return tag ? tag.strict : false;\n};\n\nexport const getTagValueID = (tagKeyID:string, tagValueLabel:string, vocabulary: Vocabulary) => {\n    // if labels and keys are present in vocabulary, strict_tags is irreleveant\n    if (vocabulary.tags[tagKeyID] && vocabulary.tags[tagKeyID].values) {\n        const values = vocabulary.tags[tagKeyID].values!;\n        return Object.keys(values).find(k =>\n            (k.toLowerCase() === tagValueLabel.toLowerCase())\n            || values[k].labels.find(\n                l => l.label.toLowerCase() === tagValueLabel.toLowerCase()) !== undefined)\n            || '';\n    };\n    if (vocabulary.strict_tags === false && vocabulary.tags[tagKeyID] && vocabulary.tags[tagKeyID].labels) {\n        return tagValueLabel;\n    }\n    return '';\n};\n\nexport const getTagValueLabel = (tagKeyID:string, tagValueID:string, vocabulary: Vocabulary) =>\n    vocabulary.tags[tagKeyID] &&\n    vocabulary.tags[tagKeyID].values &&\n    vocabulary.tags[tagKeyID].values![tagValueID] &&\n    vocabulary.tags[tagKeyID].values![tagValueID].labels.length > 0\n        ? vocabulary.tags[tagKeyID].values![tagValueID].labels[0].label\n        : tagValueID;\n\nconst compare = (a: PropFieldSuggestion, b: PropFieldSuggestion) => {\n    if (a.label < b.label) {return -1;}\n    if (a.label > b.label) {return 1;}\n    return 0;\n};\n\nexport const getTagValues = (tagKeyID: string, vocabulary: Vocabulary): PropFieldSuggestion[] => {\n    const tag = vocabulary.tags[tagKeyID];\n    return tag && tag.values\n        ? Object.keys(tag.values).map(\n            tagValueID => tag.values![tagValueID].labels && tag.values![tagValueID].labels.length > 0\n                ? tag.values![tagValueID].labels.map(\n                    lbl => Object.assign({}, {\"id\": tagValueID, \"label\": lbl.label}))\n                : [{\"id\": tagValueID, \"label\": tagValueID}])\n            .reduce((prev, curr) => [...prev, ...curr], [])\n            .sort(compare)\n        : [];\n};\n\nexport const getPreferredTagValues = (tagKeyID: string, vocabulary: Vocabulary, withMatch?: string): PropFieldSuggestion[] => {\n    const tag = vocabulary.tags[tagKeyID];\n    const regex = !!withMatch ? new RegExp(escapeRegExp(withMatch), 'i') : undefined;\n    return tag && tag.values\n        ? Object.keys(tag.values).map(\n            tagValueID => tag.values![tagValueID].labels && tag.values![tagValueID].labels.length > 0\n                ? {\n                    \"id\": tagValueID,\n                    \"label\": tag.values![tagValueID].labels[0].label,\n                    \"synonyms\": !!withMatch && tag.values![tagValueID].labels.length > 1\n                        ? tag.values![tagValueID].labels.slice(1)\n                            .filter(l => !!regex ? regex.test(l.label) : true)\n                            .map(l => l.label)\n                        : []\n                }\n                : {\"id\": tagValueID, \"label\": tagValueID, \"synonyms\": []})\n            .sort(compare)\n        : [];\n};\n\nexport const getTags = ({ tags }: Vocabulary): PropFieldSuggestion[] => {\n    return tags && Object.keys(tags)\n        ? Object.keys(tags).map(\n            tagID => tags[tagID].labels && tags[tagID].labels.length > 0\n                ? tags[tagID].labels.map(\n                    lbl => Object.assign({}, {\"id\": tagID, \"label\": lbl.label}))\n                : [{\"id\": tagID, \"label\": tagID}])\n            .reduce((prev, curr) => [...prev, ...curr], [])\n            .sort(compare)\n        : [];\n};\n\nexport const getPreferredTags = ({ tags }: Vocabulary, withMatch?: string): PropFieldSuggestion[] => {\n    const regex = !!withMatch ? new RegExp(escapeRegExp(withMatch), 'i') : undefined;\n    return tags && Object.keys(tags)\n        ? Object.keys(tags).map(\n            tagID => tags[tagID].labels && tags[tagID].labels.length > 0\n                ? {\n                    \"id\": tagID,\n                    \"label\": tags[tagID].labels[0].label,\n                    \"synonyms\": !!withMatch && tags[tagID].labels.length > 1\n                        ? tags[tagID].labels.slice(1)\n                                .filter(l => !!regex ? regex.test(l.label) : true)\n                                .map(lbl => lbl.label)\n                        : []\n                }\n                : {\"id\": tagID, \"label\": tagID, \"synonyms\": []})\n            .sort(compare)\n        : [];\n};\n\nexport const getTagKeyID = (tagKeyLabel: string, vocabulary: Vocabulary) =>\n    Object.keys(vocabulary.tags).find(k => (k.toLowerCase() === tagKeyLabel.toLowerCase())\n        || vocabulary.tags[k].labels.find(\n            l => l.label.toLowerCase() === tagKeyLabel.toLowerCase()) !== undefined)\n        || '';\n\nexport const getTagKeyLabel = (tagKeyID:string, vocabulary: Vocabulary) =>\n    vocabulary.tags[tagKeyID] && vocabulary.tags[tagKeyID].labels.length > 0\n    ? vocabulary.tags[tagKeyID].labels[0].label\n    : tagKeyID;\n"
  },
  {
    "path": "services/workbench2/src/models/workflow.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource, ResourceKind } from \"./resource\";\nimport yaml from 'js-yaml';\nimport { CommandOutputParameter } from \"cwlts/mappings/v1.0/CommandOutputParameter\";\n\nexport interface WorkflowResource extends Resource {\n    kind: ResourceKind.WORKFLOW;\n    name: string;\n    description: string | null;\n    definition: string;\n}\nexport interface WorkflowResourceDefinition {\n    cwlVersion: string;\n    $graph?: Array<Workflow | CommandLineTool>;\n}\nexport interface Workflow {\n    class: 'Workflow';\n    doc?: string;\n    id?: string;\n    inputs: CommandInputParameter[];\n    outputs: any[];\n    steps: any[];\n    hints?: ProcessRequirement[];\n}\n\nexport interface CommandLineTool {\n    class: 'CommandLineTool';\n    id: string;\n    inputs: CommandInputParameter[];\n    outputs: any[];\n    hints?: ProcessRequirement[];\n}\n\nexport type ProcessRequirement = GenericProcessRequirement | WorkflowRunnerResources;\n\nexport interface GenericProcessRequirement {\n    class: string;\n}\n\nexport interface WorkflowRunnerResources {\n    class: 'http://arvados.org/cwl#WorkflowRunnerResources';\n    ramMin?: number;\n    coresMin?: number;\n    keep_cache?: number;\n    acrContainerImage?: string;\n}\n\nexport type CommandInputParameter =\n    BooleanCommandInputParameter |\n    IntCommandInputParameter |\n    LongCommandInputParameter |\n    FloatCommandInputParameter |\n    DoubleCommandInputParameter |\n    StringCommandInputParameter |\n    FileCommandInputParameter |\n    DirectoryCommandInputParameter |\n    StringArrayCommandInputParameter |\n    IntArrayCommandInputParameter |\n    FloatArrayCommandInputParameter |\n    FileArrayCommandInputParameter |\n    DirectoryArrayCommandInputParameter |\n    EnumCommandInputParameter;\n\nexport enum CWLType {\n    NULL = 'null',\n    BOOLEAN = 'boolean',\n    INT = 'int',\n    LONG = 'long',\n    FLOAT = 'float',\n    DOUBLE = 'double',\n    STRING = 'string',\n    FILE = 'File',\n    DIRECTORY = 'Directory',\n}\n\nexport interface CommandInputEnumSchema {\n    symbols: string[];\n    type: 'enum';\n    label?: string;\n    name?: string;\n}\n\nexport interface CommandInputArraySchema<ItemType> {\n    items: ItemType;\n    type: 'array';\n    label?: string;\n}\n\nexport interface File {\n    class: CWLType.FILE;\n    location?: string;\n    path?: string;\n    basename?: string;\n}\n\nexport interface Directory {\n    class: CWLType.DIRECTORY;\n    location?: string;\n    path?: string;\n    basename?: string;\n}\n\nexport interface SecretInclude {\n    $include: string;\n}\n\nexport interface GenericCommandInputParameter<Type, Value> {\n    id: string;\n    label?: string;\n    doc?: string | string[];\n    default?: Value;\n    type?: Type | Array<Type | CWLType.NULL>;\n    value?: Value;\n    disabled?: boolean;\n    secret?: boolean;\n}\nexport type GenericArrayCommandInputParameter<Type, Value> = GenericCommandInputParameter<CommandInputArraySchema<Type>, Value[]>;\n\nexport type BooleanCommandInputParameter = GenericCommandInputParameter<CWLType.BOOLEAN, boolean>;\nexport type IntCommandInputParameter = GenericCommandInputParameter<CWLType.INT, number>;\nexport type LongCommandInputParameter = GenericCommandInputParameter<CWLType.LONG, number>;\nexport type FloatCommandInputParameter = GenericCommandInputParameter<CWLType.FLOAT, number>;\nexport type DoubleCommandInputParameter = GenericCommandInputParameter<CWLType.DOUBLE, number>;\nexport type StringCommandInputParameter = GenericCommandInputParameter<CWLType.STRING, string>;\nexport type FileCommandInputParameter = GenericCommandInputParameter<CWLType.FILE, File>;\nexport type DirectoryCommandInputParameter = GenericCommandInputParameter<CWLType.DIRECTORY, Directory>;\nexport type EnumCommandInputParameter = GenericCommandInputParameter<CommandInputEnumSchema, string>;\n\nexport type StringArrayCommandInputParameter = GenericArrayCommandInputParameter<CWLType.STRING, string>;\nexport type IntArrayCommandInputParameter = GenericArrayCommandInputParameter<CWLType.INT, string>;\nexport type FloatArrayCommandInputParameter = GenericArrayCommandInputParameter<CWLType.FLOAT, string>;\nexport type FileArrayCommandInputParameter = GenericArrayCommandInputParameter<CWLType.FILE, File>;\nexport type DirectoryArrayCommandInputParameter = GenericArrayCommandInputParameter<CWLType.DIRECTORY, Directory>;\nexport type SecretCommandInputParameter = GenericArrayCommandInputParameter<CWLType.STRING, SecretInclude>;\n\nexport type WorkflowInputsData = {\n    [key: string]: boolean | number | string | File | Directory | SecretInclude;\n};\nexport const parseWorkflowDefinition = (workflow: WorkflowResource): WorkflowResourceDefinition => {\n    const definition = yaml.load(workflow.definition);\n    return definition;\n};\n\nexport const getWorkflow = (workflowDefinition: WorkflowResourceDefinition) => {\n    if (!workflowDefinition.$graph) { return undefined; }\n    const mainWorkflow = workflowDefinition.$graph.find(item => item.id === '#main');\n    return mainWorkflow\n        ? mainWorkflow\n        : undefined;\n};\n\nexport interface CwlSecrets {\n    class: 'http://commonwl.org/cwltool#Secrets';\n    secrets: string[];\n}\n\nexport const getWorkflowInputs = (workflowDefinition: WorkflowResourceDefinition) => {\n    if (!workflowDefinition) { return undefined; }\n    const wf = getWorkflow(workflowDefinition);\n    if (!wf) { return undefined; }\n    const inputs = wf.inputs;\n    if (wf.hints) {\n        const secrets = wf.hints.find(item => item.class === 'http://commonwl.org/cwltool#Secrets') as CwlSecrets | undefined;\n        if (secrets?.secrets) {\n            inputs.forEach((param) => {\n                param.secret = secrets.secrets.includes(param.id);\n            });\n        }\n    }\n\n    return inputs;\n};\n\n\nexport const getWorkflowOutputs = (workflowDefinition: WorkflowResourceDefinition) => {\n    if (!workflowDefinition) { return undefined; }\n    return getWorkflow(workflowDefinition)\n        ? getWorkflow(workflowDefinition)!.outputs\n        : undefined;\n};\n\nexport const getInputLabel = (input: CommandInputParameter) => {\n    return `${input.label || input.id.split('/').pop()}`;\n};\n\nexport const getIOParamId = (input: CommandInputParameter | CommandOutputParameter) => {\n    return `${input.id.split('/').pop()}`;\n};\n\nexport const isRequiredInput = ({ type }: CommandInputParameter) => {\n    if (type instanceof Array) {\n        for (const t of type) {\n            if (t === CWLType.NULL) {\n                return false;\n            }\n        }\n    }\n    return true;\n};\n\nexport const isPrimitiveOfType = (input: GenericCommandInputParameter<any, any>, type: CWLType) =>\n    input.type instanceof Array\n        ? input.type.indexOf(type) > -1\n        : input.type === type;\n\nexport const isArrayOfType = (input: GenericCommandInputParameter<any, any>, type: CWLType) =>\n    input.type instanceof Array\n        ? (input.type.filter(t => typeof t === 'object' &&\n            t.type === 'array' &&\n            t.items === type).length > 0)\n        : (typeof input.type === 'object' &&\n            input.type.type === 'array' &&\n            input.type.items === type);\n\nexport const getEnumType = (input: GenericCommandInputParameter<any, any>) => {\n    if (input.type instanceof Array) {\n        const f = input.type.filter(t => typeof t === 'object' &&\n            !(t instanceof Array) &&\n            t.type === 'enum');\n        if (f.length > 0) {\n            return f[0];\n        }\n    } else {\n        if ((typeof input.type === 'object' &&\n            !(input.type instanceof Array) &&\n            input.type.type === 'enum')) {\n            return input.type;\n        }\n    }\n    return null;\n};\n\nexport const isSecret = (input: GenericCommandInputParameter<any, any>) =>\n    (typeof input.value === 'object') && input.value.$include?.startsWith(\"/secrets/\");\n\nexport const stringifyInputType = ({ type }: CommandInputParameter) => {\n\tif (typeof type === 'string') {\n        return type;\n    } else if (type instanceof Array) {\n        return type.join(' | ');\n    } else if (typeof type === 'object') {\n        if (type.type === 'enum') {\n            return 'enum';\n        } else if (type.type === 'array') {\n            return `${type.items}[]`;\n        } else {\n            return 'unknown';\n        }\n    } else {\n        return 'unknown';\n    }\n};\n\nexport const isWorkflowResource = (resource?: Resource): resource is WorkflowResource => {\n    return !!resource && resource.kind === ResourceKind.WORKFLOW;\n};\n"
  },
  {
    "path": "services/workbench2/src/plugins/README.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Plugin support\n\nWorkbench supports plugins to add new functionality to the user\ninterface.  It is also possible to remove the majority of standard UI\nelements and replace them with your own, enabling you to use workbench\nas a basis for developing essentially new applications for Arvados.\n\n## Installing plugins\n\n1. Check out the source of your plugin into a directory under `arvados-workbench2/src/plugins`\n\n2. Register the plugin by editing `arvados-workbench2/src/plugins/plugins.tsx`.\nIt will look something like this:\n\n```\nimport { register as examplePluginRegister } from 'plugins/example/index';\nexamplePluginRegister(pluginConfig);\n```\n\n3. Rebuild Workbench 2\n\nFor testing/development: `yarn start`\n\nFor production: `APP_NAME=arvados-workbench2-with-custom-plugins make packages`\n\nSet `APP_NAME=` to whatever you like, but it is important to name it\ndifferently from the standard `arvados-workbench2` to avoid confusion.\n\n## Existing plugins\n\n### example\n\nThis is an example plugin showing how to add a new navigation tree\nitem, displaying a new center panel, as well as adding account menu\nand \"New\" menu items, and showing how to use SET_PROPERTY and\ngetProperty() for state.\n\n### blank\n\nThis deletes all of the existing user interface.  If you want the\napplication to only display your plugin's UI elements and none of the\nstandard elements, you would load and register this first.\n\n### root-redirect\n\nThis helper takes a path when registered.  It tweaks the navigation\nbehavior so that the default starting location when the application\nloads will be the path you provide, instead of \"Projects\".\n\n### sample-tracker\n\nThis is a a new set of user interface screens that assist with\nclinical sample tracking and analysis.  It is intended as a demo of\nhow a real-world application can built using the Workbench 2\nplug-in interface.  It can be found at\nhttps://github.com/arvados/sample-tracker .\n\n## Developing plugins\n\nFor information about the plugin API, see\n[../common/plugintypes.ts](src/common/plugintypes.ts).\n"
  },
  {
    "path": "services/workbench2/src/plugins/blank/index.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// Example plugin.\n\nimport { PluginConfig } from 'common/plugintypes';\nimport React from 'react';\n\nexport const register = (pluginConfig: PluginConfig) => {\n\n    pluginConfig.centerPanelList.push((elms) => []);\n\n    pluginConfig.sidePanelCategories.push((cats: string[]): string[] => []);\n\n    pluginConfig.accountMenuList.push((elms) => []);\n    pluginConfig.newButtonMenuList.push((elms) => []);\n\n    pluginConfig.appBarLeft = <span />;\n    pluginConfig.appBarMiddle = <span />;\n    pluginConfig.appBarRight = <span />;\n};\n"
  },
  {
    "path": "services/workbench2/src/plugins/example/exampleComponents.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { ServiceRepository } from \"services/services\";\nimport { Dispatch } from \"redux\";\nimport { RootState } from 'store/store';\nimport { initialize } from 'redux-form';\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { reduxForm, InjectedFormProps, Field, reset, startSubmit } from 'redux-form';\nimport { TextField } from \"components/text-field/text-field\";\nimport { FormDialog } from 'components/form-dialog/form-dialog';\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport { compose } from \"redux\";\nimport { propertiesActions } from \"store/properties/properties-actions\";\nimport { DispatchProp, connect } from 'react-redux';\nimport { MenuItem } from \"@mui/material\";\nimport { Card, CardContent, Typography } from \"@mui/material\";\n\n// This is the name of the dialog box.  It in store actions that\n// open/close the dialog box.\nexport const EXAMPLE_DIALOG_FORM_NAME = \"exampleFormName\";\n\n// This is the name of the property that will be used to store the\n// \"pressed\" count\nexport const propertyKey = \"Example_menu_item_pressed_count\";\n\n// The model backing the form.\nexport interface ExampleFormDialogData {\n    pressedCount: number | string;  // Supposed to start as a number but TextField seems to turn this into a string, unfortunately.\n}\n\n// The actual component with the editing fields.  Enables editing\n// the 'pressedCount' field.\nconst ExampleEditFields = () => <span>\n    <Field\n        name='pressedCount'\n        component={TextField as any}\n        type=\"number\"\n    />\n</span>;\n\n// Callback for when the form is submitted.\nconst submitEditedPressedCount = (data: ExampleFormDialogData) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(startSubmit(EXAMPLE_DIALOG_FORM_NAME));\n        dispatch(propertiesActions.SET_PROPERTY({\n            key: propertyKey, value: parseInt(data.pressedCount as string, 10)\n        }));\n        dispatch(dialogActions.CLOSE_DIALOG({ id: EXAMPLE_DIALOG_FORM_NAME }));\n        dispatch(reset(EXAMPLE_DIALOG_FORM_NAME));\n    };\n\n// Props for the dialog component\ntype DialogExampleProps = WithDialogProps<{ updating: boolean }> & InjectedFormProps<ExampleFormDialogData>;\n\n// This is the component that renders the dialog.\nconst DialogExample = (props: DialogExampleProps) =>\n    <FormDialog\n        dialogTitle=\"Edit pressed count\"\n        formFields={ExampleEditFields}\n        submitLabel=\"Update pressed count\"\n        {...props}\n    />;\n\n// This ties it all together, withDialog() determines if the dialog is\n// visible based on state, and reduxForm manages the values of the\n// dialog's fields.\nexport const ExampleDialog = compose(\n    withDialog(EXAMPLE_DIALOG_FORM_NAME),\n    reduxForm<ExampleFormDialogData>({\n        form: EXAMPLE_DIALOG_FORM_NAME,\n        onSubmit: (data, dispatch) => {\n            dispatch(submitEditedPressedCount(data));\n        }\n    })\n)(DialogExample);\n\n\n// Callback, dispatches an action to set the value of property\n// \"Example_menu_item_pressed_count\"\nconst incrementPressedCount = (dispatch: Dispatch, pressedCount: number) => {\n    dispatch(propertiesActions.SET_PROPERTY({ key: propertyKey, value: pressedCount + 1 }));\n};\n\n// Callback, dispatches actions required to initialize and open the\n// dialog box.\nexport const openExampleDialog = (pressedCount: number) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(initialize(EXAMPLE_DIALOG_FORM_NAME, { pressedCount }));\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: EXAMPLE_DIALOG_FORM_NAME, data: {}\n        }));\n    };\n\n// Props definition used for menu items.\ninterface ExampleProps {\n    pressedCount: number;\n    className?: string;\n}\n\n// Called to get the props from the redux state for several of the\n// following components.\n// Gets the value of the property \"Example_menu_item_pressed_count\"\n// from the state and puts it in 'pressedCount'\nconst exampleMapStateToProps = (state: RootState) => ({ pressedCount: state.properties[propertyKey] || 0 });\n\n// Define component for the menu item that incremens the count each time it is pressed.\nexport const ExampleMenuComponent = connect(exampleMapStateToProps)(\n    ({ pressedCount, dispatch, className }: ExampleProps & DispatchProp<any>) =>\n        <MenuItem className={className} onClick={() => incrementPressedCount(dispatch, pressedCount)}>Example menu item</MenuItem >\n);\n\n// Define component for the menu item that opens the dialog box that lets you edit the count directly.\nexport const ExampleDialogMenuComponent = connect(exampleMapStateToProps)(\n    ({ pressedCount, dispatch, className }: ExampleProps & DispatchProp<any>) =>\n        <MenuItem className={className} onClick={() => dispatch(openExampleDialog(pressedCount))}>Open example dialog</MenuItem >\n);\n\n// The central panel.  Displays the \"pressed\" count.\nexport const ExamplePluginMainPanel = connect(exampleMapStateToProps)(\n    ({ pressedCount }: ExampleProps) =>\n        <Card>\n            <CardContent>\n                <Typography>\n                    This is a example main panel plugin.  The example menu item has been pressed {pressedCount} times.\n\t\t</Typography>\n            </CardContent>\n        </Card>);\n"
  },
  {
    "path": "services/workbench2/src/plugins/example/index.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// Example workbench plugin.  The entry point is the \"register\" method.\n\nimport { PluginConfig } from 'common/plugintypes';\nimport React from 'react';\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { push } from \"connected-react-router\";\nimport { Route, matchPath } from \"react-router\";\nimport { RootStore } from 'store/store';\nimport { activateSidePanelTreeItem } from 'store/side-panel-tree/side-panel-tree-actions';\nimport { setSidePanelBreadcrumbs } from 'store/breadcrumbs/breadcrumbs-actions';\nimport { Location } from 'history';\nimport { handleFirstTimeLoad } from 'store/workbench/workbench-actions';\nimport {\n    ExampleDialog,\n    ExamplePluginMainPanel,\n    ExampleMenuComponent,\n    ExampleDialogMenuComponent\n} from './exampleComponents';\n\nconst categoryName = \"Plugin Example\";\nexport const routePath = \"/examplePlugin\";\n\nexport const register = (pluginConfig: PluginConfig) => {\n\n    // Add this component to the main panel.  When the app navigates\n    // to '/examplePlugin' it will render ExamplePluginMainPanel.\n    pluginConfig.centerPanelList.push((elms) => {\n        elms.push(<Route path={routePath} component={ExamplePluginMainPanel} />);\n        return elms;\n    });\n\n    // Add ExampleDialogMenuComponent to the upper-right user account menu\n    pluginConfig.accountMenuList.push((elms, menuItemClass) => {\n        elms.push(<ExampleDialogMenuComponent className={menuItemClass} />);\n        return elms;\n    });\n\n    // Add ExampleMenuComponent to the \"New\" button dropdown.\n    pluginConfig.newButtonMenuList.push((elms, menuItemClass) => {\n        elms.push(<ExampleMenuComponent className={menuItemClass} />);\n        return elms;\n    });\n\n    // Add a hook so that when the 'Plugin Example' entry in the left\n    // hand tree view is clicked, which calls navigateTo('Plugin Example'),\n    // it will be implemented by navigating to '/examplePlugin'\n    pluginConfig.navigateToHandlers.push((dispatch: Dispatch, getState: () => RootState, uuid: string) => {\n        if (uuid === categoryName) {\n            dispatch(push(routePath));\n            return true;\n        }\n        return false;\n    });\n\n    // Adds 'Plugin Example' to the left hand tree view.\n    pluginConfig.sidePanelCategories.push((cats: string[]): string[] => { cats.push(categoryName); return cats; });\n\n    // When the location changes to '/examplePlugin', make sure\n    // 'Plugin Example' in the left hand tree view is selected, and\n    // make sure the breadcrumbs are updated.\n    pluginConfig.locationChangeHandlers.push((store: RootStore, pathname: string): boolean => {\n        if (matchPath(pathname, { path: routePath, exact: true })) {\n            store.dispatch(handleFirstTimeLoad(\n                (dispatch: Dispatch) => {\n                    dispatch<any>(activateSidePanelTreeItem(categoryName));\n                    dispatch<any>(setSidePanelBreadcrumbs(categoryName));\n                }));\n            return true;\n        }\n        return false;\n    });\n\n    // The \"New\" button can enabled or disabled based on the current\n    // context or selection.  This adds a new callback to that will\n    // enable the \"New\" button when the location is '/examplePlugin'\n    pluginConfig.enableNewButtonMatchers.push((location: Location) => (!!matchPath(location.pathname, { path: routePath, exact: true })));\n\n    // Add the example dialog box to the list of dialog box controls.\n    pluginConfig.dialogs.push(<ExampleDialog />);\n};\n"
  },
  {
    "path": "services/workbench2/src/plugins/root-redirect/index.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { PluginConfig } from 'common/plugintypes';\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { SidePanelTreeCategory } from 'store/side-panel-tree/side-panel-tree-actions';\nimport { push } from \"connected-react-router\";\n\nexport const register = (pluginConfig: PluginConfig, redirect: string) => {\n\n    pluginConfig.navigateToHandlers.push((dispatch: Dispatch, getState: () => RootState, uuid: string) => {\n        if (uuid === SidePanelTreeCategory.PROJECTS) {\n            dispatch(push(redirect));\n            return true;\n        }\n        return false;\n    });\n};\n"
  },
  {
    "path": "services/workbench2/src/plugins.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { PluginConfig } from 'common/plugintypes';\n\nexport const pluginConfig: PluginConfig = {\n    centerPanelList: [],\n    sidePanelCategories: [],\n    dialogs: [],\n    navigateToHandlers: [],\n    locationChangeHandlers: [],\n    appBarLeft: undefined,\n    appBarMiddle: undefined,\n    appBarRight: undefined,\n    accountMenuList: [],\n    enableNewButtonMatchers: [],\n    newButtonMenuList: [],\n    middlewares: []\n};\n\n// Starting here, import and register your Workbench 2 plugins. //\n\n// import { register as blankUIPluginRegister } from 'plugins/blank/index';\n// import { register as examplePluginRegister } from 'plugins/example/index';\n// import { register as rootRedirectRegister } from 'plugins/root-redirect/index';\n\n// blankUIPluginRegister(pluginConfig);\n// examplePluginRegister(pluginConfig);\n// rootRedirectRegister(pluginConfig, exampleRoutePath);\n"
  },
  {
    "path": "services/workbench2/src/react-app-env.d.ts",
    "content": "/// <reference types=\"node\" />\n/// <reference types=\"react\" />\n/// <reference types=\"react-dom\" />\n\ndeclare namespace NodeJS {\n  interface ProcessEnv {\n    readonly NODE_ENV: 'development' | 'production' | 'test';\n    readonly PUBLIC_URL: string;\n  }\n}\n\ndeclare module '*.avif' {\n  const src: string;\n  export default src;\n}\n\ndeclare module '*.bmp' {\n  const src: string;\n  export default src;\n}\n\ndeclare module '*.gif' {\n  const src: string;\n  export default src;\n}\n\ndeclare module '*.jpg' {\n  const src: string;\n  export default src;\n}\n\ndeclare module '*.jpeg' {\n  const src: string;\n  export default src;\n}\n\ndeclare module '*.png' {\n  const src: string;\n  export default src;\n}\n\ndeclare module '*.webp' {\n    const src: string;\n    export default src;\n}\n\ndeclare module '*.svg' {\n  import * as React from 'react';\n\n  export const ReactComponent: React.FunctionComponent<React.SVGProps<\n    SVGSVGElement\n  > & { title?: string }>;\n\n  const src: string;\n  export default src;\n}\n\ndeclare module '*.module.css' {\n  const classes: { readonly [key: string]: string };\n  export default classes;\n}\n\ndeclare module '*.module.scss' {\n  const classes: { readonly [key: string]: string };\n  export default classes;\n}\n\ndeclare module '*.module.sass' {\n  const classes: { readonly [key: string]: string };\n  export default classes;\n}\n\ndeclare type MouseWheelEvent = WheelEvent;"
  },
  {
    "path": "services/workbench2/src/routes/route-change-handlers.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { History, Location } from 'history';\nimport { RootStore } from 'store/store';\nimport * as Routes from 'routes/routes';\nimport * as WorkbenchActions from 'store/workbench/workbench-actions';\nimport { navigateToDashboard } from 'store/navigation/navigation-action';\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { contextMenuActions } from 'store/context-menu/context-menu-actions';\nimport { searchBarActions } from 'store/search-bar/search-bar-actions';\nimport { pluginConfig } from 'plugins';\nimport { openProjectPanel } from 'store/project-panel/project-panel-action';\n\nexport const addRouteChangeHandlers = (history: History, store: RootStore) => {\n    const handler = handleLocationChange(store);\n    handler(history.location);\n    history.listen(handler);\n};\n\nconst handleLocationChange = (store: RootStore) => ({ pathname }: Location) => {\n\n    const dashboardMatch = Routes.matchDashboardRoute(pathname);\n    const rootMatch = Routes.matchRootRoute(pathname);\n    const projectMatch = Routes.matchProjectRoute(pathname);\n    const collectionMatch = Routes.matchCollectionRoute(pathname);\n    const favoriteMatch = Routes.matchFavoritesRoute(pathname);\n    const publicFavoritesMatch = Routes.matchPublicFavoritesRoute(pathname);\n    const trashMatch = Routes.matchTrashRoute(pathname);\n    const processMatch = Routes.matchProcessRoute(pathname);\n    const repositoryMatch = Routes.matchRepositoriesRoute(pathname);\n    const searchResultsMatch = Routes.matchSearchResultsRoute(pathname);\n    const sharedWithMeMatch = Routes.matchSharedWithMeRoute(pathname);\n    const runProcessMatch = Routes.matchRunProcessRoute(pathname);\n    const virtualMachineUserMatch = Routes.matchUserVirtualMachineRoute(pathname);\n    const virtualMachineAdminMatch = Routes.matchAdminVirtualMachineRoute(pathname);\n    const sshKeysUserMatch = Routes.matchSshKeysUserRoute(pathname);\n    const sshKeysAdminMatch = Routes.matchSshKeysAdminRoute(pathname);\n    const instanceTypesMatch = Routes.matchInstanceTypesRoute(pathname);\n    const siteManagerMatch = Routes.matchSiteManagerRoute(pathname);\n    const keepServicesMatch = Routes.matchKeepServicesRoute(pathname);\n    const apiClientAuthorizationsMatch = Routes.matchApiClientAuthorizationsRoute(pathname);\n    const myAccountMatch = Routes.matchMyAccountRoute(pathname);\n    const preferencesMatch = Routes.matchPreferencesRoute(pathname);\n    const linkAccountMatch = Routes.matchLinkAccountRoute(pathname);\n    const usersMatch = Routes.matchUsersRoute(pathname);\n    const userProfileMatch = Routes.matchUserProfileRoute(pathname);\n    const groupsMatch = Routes.matchGroupsRoute(pathname);\n    const groupDetailsMatch = Routes.matchGroupDetailsRoute(pathname);\n    const linksMatch = Routes.matchLinksRoute(pathname);\n    const collectionsContentAddressMatch = Routes.matchCollectionsContentAddressRoute(pathname);\n    const allProcessesMatch = Routes.matchAllProcessesRoute(pathname);\n    const registeredWorkflowMatch = Routes.matchRegisteredWorkflowRoute(pathname);\n    const externalCredentialsMatch = Routes.matchExternalCredentialsRoute(pathname);\n\n    store.dispatch(dialogActions.CLOSE_ALL_DIALOGS());\n    store.dispatch(contextMenuActions.CLOSE_CONTEXT_MENU());\n    store.dispatch(searchBarActions.CLOSE_SEARCH_VIEW());\n\n    for (const locChangeFn of pluginConfig.locationChangeHandlers) {\n        if (locChangeFn(store, pathname)) {\n            return;\n        }\n    }\n\n    document.title = `Arvados (${store.getState().auth.config.uuidPrefix}) - ${pathname.slice(1)}`;\n\n    if (projectMatch) {\n        store.dispatch(openProjectPanel(projectMatch.params.id));\n    } else if (collectionMatch) {\n        store.dispatch(WorkbenchActions.loadCollection(collectionMatch.params.id));\n    } else if (favoriteMatch) {\n        store.dispatch(WorkbenchActions.loadFavorites());\n    } else if (publicFavoritesMatch) {\n        store.dispatch(WorkbenchActions.loadPublicFavorites());\n    } else if (trashMatch) {\n        store.dispatch(WorkbenchActions.loadTrash());\n    } else if (processMatch) {\n        store.dispatch(WorkbenchActions.loadProcess(processMatch.params.id));\n    } else if (rootMatch) {\n        store.dispatch(WorkbenchActions.loadDashboard());\n        store.dispatch(navigateToDashboard);\n    } else if (sharedWithMeMatch) {\n        store.dispatch(WorkbenchActions.loadSharedWithMe);\n    } else if (runProcessMatch) {\n        store.dispatch(WorkbenchActions.loadRunProcess);\n    } else if (searchResultsMatch) {\n        store.dispatch(WorkbenchActions.loadSearchResults);\n    } else if (virtualMachineUserMatch) {\n        store.dispatch(WorkbenchActions.loadVirtualMachines);\n    } else if (virtualMachineAdminMatch) {\n        store.dispatch(WorkbenchActions.loadVirtualMachinesAdmin);\n    } else if (repositoryMatch) {\n        store.dispatch(WorkbenchActions.loadRepositories);\n    } else if (sshKeysUserMatch) {\n        store.dispatch(WorkbenchActions.loadSshKeys);\n    } else if (sshKeysAdminMatch) {\n        store.dispatch(WorkbenchActions.loadSshKeys);\n    } else if (instanceTypesMatch) {\n        store.dispatch(WorkbenchActions.loadInstanceTypes);\n    } else if (siteManagerMatch) {\n        store.dispatch(WorkbenchActions.loadSiteManager);\n    } else if (keepServicesMatch) {\n        store.dispatch(WorkbenchActions.loadKeepServices);\n    } else if (apiClientAuthorizationsMatch) {\n        store.dispatch(WorkbenchActions.loadApiClientAuthorizations);\n    } else if (myAccountMatch) {\n        store.dispatch(WorkbenchActions.loadUserProfile());\n    } else if (preferencesMatch) {\n        store.dispatch(WorkbenchActions.loadUserPreferences());\n    } else if (linkAccountMatch) {\n        store.dispatch(WorkbenchActions.loadLinkAccount);\n    } else if (usersMatch) {\n        store.dispatch(WorkbenchActions.loadUsers);\n    } else if (userProfileMatch) {\n        store.dispatch(WorkbenchActions.loadUserProfile(userProfileMatch.params.id));\n    } else if (groupsMatch) {\n        store.dispatch(WorkbenchActions.loadGroupsPanel);\n    } else if (groupDetailsMatch) {\n        store.dispatch(WorkbenchActions.loadGroupDetailsPanel(groupDetailsMatch.params.id));\n    } else if (linksMatch) {\n        store.dispatch(WorkbenchActions.loadLinks);\n    } else if (collectionsContentAddressMatch) {\n        store.dispatch(WorkbenchActions.loadCollectionContentAddress);\n    } else if (allProcessesMatch) {\n        store.dispatch(WorkbenchActions.loadAllProcesses());\n    } else if (registeredWorkflowMatch) {\n        store.dispatch(WorkbenchActions.loadRegisteredWorkflow(registeredWorkflowMatch.params.id));\n    } else if (externalCredentialsMatch) {\n        store.dispatch(WorkbenchActions.loadExternalCredentialsPanel());\n    } else if (dashboardMatch) {\n        store.dispatch(WorkbenchActions.loadDashboard());\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/routes/routes.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { matchPath } from 'react-router';\nimport { ResourceKind, RESOURCE_UUID_PATTERN, extractUuidKind, COLLECTION_PDH_REGEX, PORTABLE_DATA_HASH_PATTERN } from 'models/resource';\nimport { getProjectUrl } from 'models/project';\nimport { getCollectionUrl } from 'models/collection';\nimport { Config } from 'common/config';\nimport { Session } from \"models/session\";\n\nexport interface FederationConfig {\n    localCluster: string;\n    remoteHostsConfig: { [key: string]: Config };\n    sessions: Session[];\n}\n\nexport const Routes = {\n    ROOT: '/',\n    TOKEN: '/token',\n    FED_LOGIN: '/fedtoken',\n    ADD_SESSION: '/add-session',\n    DASHBOARD: '/dashboard',\n    PROJECTS: `/projects/:id(${RESOURCE_UUID_PATTERN})`,\n    COLLECTIONS: `/collections/:id(${RESOURCE_UUID_PATTERN})`,\n    PROCESSES: `/processes/:id(${RESOURCE_UUID_PATTERN})`,\n    FAVORITES: '/favorites',\n    TRASH: '/trash',\n    REPOSITORIES: '/repositories',\n    SHARED_WITH_ME: '/shared-with-me',\n    RUN_PROCESS: '/run-process',\n    VIRTUAL_MACHINES_ADMIN: '/virtual-machines-admin',\n    VIRTUAL_MACHINES_USER: '/virtual-machines-user',\n    WORKFLOWS: '/workflows',\n    REGISTEREDWORKFLOW: `/workflows/:id(${RESOURCE_UUID_PATTERN})`,\n    SEARCH_RESULTS: '/search-results',\n    SSH_KEYS_ADMIN: `/ssh-keys-admin`,\n    SSH_KEYS_USER: `/ssh-keys-user`,\n    INSTANCE_TYPES: `/instance-types`,\n    SITE_MANAGER: `/site-manager`,\n    MY_ACCOUNT: '/my-account',\n    MY_PREFERENCES: '/preferences',\n    LINK_ACCOUNT: '/link_account',\n    KEEP_SERVICES: `/keep-services`,\n    USERS: '/users',\n    USER_PROFILE: `/user/:id(${RESOURCE_UUID_PATTERN})`,\n    API_CLIENT_AUTHORIZATIONS: `/api_client_authorizations`,\n    GROUPS: '/groups',\n    GROUP_DETAILS: `/group/:id(${RESOURCE_UUID_PATTERN})`,\n    LINKS: '/links',\n    PUBLIC_FAVORITES: '/public-favorites',\n    COLLECTIONS_CONTENT_ADDRESS: `/collections/:id(${PORTABLE_DATA_HASH_PATTERN})`,\n    ALL_PROCESSES: '/all_processes',\n    EXTERNAL_CREDENTIALS: '/external_credentials',\n    NO_MATCH: '*',\n};\n\nexport const getResourceUrl = (uuid: string) => {\n    const kind = extractUuidKind(uuid);\n    switch (kind) {\n        case ResourceKind.PROJECT:\n            return getProjectUrl(uuid);\n        case ResourceKind.USER:\n            return getProjectUrl(uuid);\n        case ResourceKind.COLLECTION:\n            return getCollectionUrl(uuid);\n        case ResourceKind.PROCESS:\n            return getProcessUrl(uuid);\n        case ResourceKind.WORKFLOW:\n            return getWorkflowUrl(uuid);\n        default:\n            return undefined;\n    }\n};\n\n/**\n * @returns A relative or federated url for the given uuid, with a token for federated WB1 urls\n */\nexport const getNavUrl = (uuid: string, config: FederationConfig, includeToken: boolean = true): string => {\n    const path = getResourceUrl(uuid) || \"\";\n    const cls = uuid.substring(0, 5);\n    if (cls === config.localCluster || extractUuidKind(uuid) === ResourceKind.USER || COLLECTION_PDH_REGEX.exec(uuid)) {\n        return path;\n    } else if (config.remoteHostsConfig[cls]) {\n        let u: URL;\n        if (config.remoteHostsConfig[cls].workbench2Url) {\n            /* NOTE: wb2 presently doesn't support passing api_token\n               to arbitrary page to set credentials, only through\n               api-token route.  So for navigation to work, user needs\n               to already be logged in.  In the future we want to just\n               request the records and display in the current\n               workbench instance making this redirect unnecessary. */\n            u = new URL(config.remoteHostsConfig[cls].workbench2Url);\n        } else {\n            u = new URL(config.remoteHostsConfig[cls].workbenchUrl);\n            if (includeToken) {\n                u.search = \"api_token=\" + config.sessions.filter((s) => s.clusterId === cls)[0].token;\n            }\n        }\n        u.pathname = path;\n        return u.toString();\n    } else {\n        return \"\";\n    }\n};\n\n\nexport const getProcessUrl = (uuid: string) => `/processes/${uuid}`;\n\nexport const getWorkflowUrl = (uuid: string) => `/workflows/${uuid}`;\n\nexport const getGroupUrl = (uuid: string) => `/group/${uuid}`;\n\nexport const getUserProfileUrl = (uuid: string) => `/user/${uuid}`;\n\nexport interface ResourceRouteParams {\n    id: string;\n}\n\nexport const matchDashboardRoute = (route: string) =>\n    matchPath(route, { path: Routes.DASHBOARD });\n\nexport const matchRootRoute = (route: string) =>\n    matchPath(route, { path: Routes.ROOT, exact: true });\n\nexport const matchFavoritesRoute = (route: string) =>\n    matchPath(route, { path: Routes.FAVORITES });\n\nexport const matchTrashRoute = (route: string) =>\n    matchPath(route, { path: Routes.TRASH });\n\nexport const matchAllProcessesRoute = (route: string) =>\n    matchPath(route, { path: Routes.ALL_PROCESSES });\n\nexport const matchRegisteredWorkflowRoute = (route: string) =>\n    matchPath<ResourceRouteParams>(route, { path: Routes.REGISTEREDWORKFLOW });\n\nexport const matchProjectRoute = (route: string) =>\n    matchPath<ResourceRouteParams>(route, { path: Routes.PROJECTS });\n\nexport const matchCollectionRoute = (route: string) =>\n    matchPath<ResourceRouteParams>(route, { path: Routes.COLLECTIONS });\n\nexport const matchProcessRoute = (route: string) =>\n    matchPath<ResourceRouteParams>(route, { path: Routes.PROCESSES });\n\nexport const matchSharedWithMeRoute = (route: string) =>\n    matchPath(route, { path: Routes.SHARED_WITH_ME });\n\nexport const matchRunProcessRoute = (route: string) =>\n    matchPath(route, { path: Routes.RUN_PROCESS });\n\nexport const matchWorkflowRoute = (route: string) =>\n    matchPath<ResourceRouteParams>(route, { path: Routes.WORKFLOWS });\n\nexport const matchSearchResultsRoute = (route: string) =>\n    matchPath<ResourceRouteParams>(route, { path: Routes.SEARCH_RESULTS });\n\nexport const matchUserVirtualMachineRoute = (route: string) =>\n    matchPath<ResourceRouteParams>(route, { path: Routes.VIRTUAL_MACHINES_USER });\n\nexport const matchAdminVirtualMachineRoute = (route: string) =>\n    matchPath<ResourceRouteParams>(route, { path: Routes.VIRTUAL_MACHINES_ADMIN });\n\nexport const matchRepositoriesRoute = (route: string) =>\n    matchPath<ResourceRouteParams>(route, { path: Routes.REPOSITORIES });\n\nexport const matchSshKeysUserRoute = (route: string) =>\n    matchPath(route, { path: Routes.SSH_KEYS_USER });\n\n    export const matchSshKeysAdminRoute = (route: string) =>\n    matchPath(route, { path: Routes.SSH_KEYS_ADMIN });\n\nexport const matchInstanceTypesRoute = (route: string) =>\n    matchPath(route, { path: Routes.INSTANCE_TYPES });\n\nexport const matchSiteManagerRoute = (route: string) =>\n    matchPath(route, { path: Routes.SITE_MANAGER });\n\nexport const matchMyAccountRoute = (route: string) =>\n    matchPath(route, { path: Routes.MY_ACCOUNT });\n\nexport const matchPreferencesRoute = (route: string) =>\n    matchPath(route, { path: Routes.MY_PREFERENCES });\n\nexport const matchLinkAccountRoute = (route: string) =>\n    matchPath(route, { path: Routes.LINK_ACCOUNT });\n\nexport const matchKeepServicesRoute = (route: string) =>\n    matchPath(route, { path: Routes.KEEP_SERVICES });\n\nexport const matchTokenRoute = (route: string) =>\n    matchPath(route, { path: Routes.TOKEN });\n\nexport const matchFedTokenRoute = (route: string) =>\n    matchPath(route, { path: Routes.FED_LOGIN });\n\nexport const matchUsersRoute = (route: string) =>\n    matchPath(route, { path: Routes.USERS });\n\nexport const matchUserProfileRoute = (route: string) =>\n    matchPath<ResourceRouteParams>(route, { path: Routes.USER_PROFILE });\n\nexport const matchApiClientAuthorizationsRoute = (route: string) =>\n    matchPath(route, { path: Routes.API_CLIENT_AUTHORIZATIONS });\n\nexport const matchGroupsRoute = (route: string) =>\n    matchPath(route, { path: Routes.GROUPS });\n\nexport const matchGroupDetailsRoute = (route: string) =>\n    matchPath<ResourceRouteParams>(route, { path: Routes.GROUP_DETAILS });\n\nexport const matchLinksRoute = (route: string) =>\n    matchPath(route, { path: Routes.LINKS });\n\nexport const matchPublicFavoritesRoute = (route: string) =>\n    matchPath(route, { path: Routes.PUBLIC_FAVORITES });\n\nexport const matchCollectionsContentAddressRoute = (route: string) =>\n    matchPath(route, { path: Routes.COLLECTIONS_CONTENT_ADDRESS });\n\nexport const matchExternalCredentialsRoute = (route: string) =>\n    matchPath(route, { path: Routes.EXTERNAL_CREDENTIALS });\n"
  },
  {
    "path": "services/workbench2/src/services/ancestors-service/ancestors-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { GroupsService } from \"services/groups-service/groups-service\";\nimport { UserService } from '../user-service/user-service';\nimport { GroupResource } from 'models/group';\nimport { UserResource } from 'models/user';\nimport { extractUuidObjectType, ResourceObjectType } from \"models/resource\";\nimport { CollectionService } from \"services/collection-service/collection-service\";\nimport { CollectionResource } from \"models/collection\";\n\nexport class AncestorService {\n    constructor(\n        private groupsService: GroupsService,\n        private userService: UserService,\n        private collectionService: CollectionService,\n    ) { }\n\n    async ancestors(startUuid: string, endUuid: string): Promise<Array<UserResource | GroupResource | CollectionResource>> {\n        return this._ancestors(startUuid, endUuid);\n    }\n\n    private async _ancestors(startUuid: string, endUuid: string, previousUuid = ''): Promise<Array<UserResource | GroupResource | CollectionResource>> {\n\n        if (startUuid === previousUuid) {\n            return [];\n        }\n\n        const service = this.getService(extractUuidObjectType(startUuid));\n        if (service) {\n            try {\n                const resource = await service.get(startUuid, false);\n                if (startUuid === endUuid) {\n                    return [resource];\n                } else {\n                    return [\n                        ...(await this._ancestors(resource.ownerUuid, endUuid, startUuid)),\n                        resource\n                    ];\n                }\n            } catch (e) {\n                return [];\n            }\n        }\n        return [];\n    }\n\n    private getService = (objectType?: string) => {\n        switch (objectType) {\n            case ResourceObjectType.GROUP:\n                return this.groupsService;\n            case ResourceObjectType.USER:\n                return this.userService;\n            case ResourceObjectType.COLLECTION:\n                return this.collectionService;\n            default:\n                return undefined;\n        }\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/api/api-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport type ProgressFn = (id: string, working: boolean) => void;\nexport type ErrorFn = (id: string, error: any, showSnackBar?: boolean) => void;\n\nexport interface ApiActions {\n    progressFn: ProgressFn;\n    errorFn: ErrorFn;\n}\n"
  },
  {
    "path": "services/workbench2/src/services/api/filter-builder.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { FilterBuilder } from \"./filter-builder\";\n\ndescribe(\"FilterBuilder\", () => {\n\n    let filters;\n\n    beforeEach(() => {\n        filters = new FilterBuilder();\n    });\n\n    it(\"should add 'equal' rule (string)\", () => {\n        expect(\n            filters.addEqual(\"etag\", \"etagValue\").getFilters()\n        ).to.equal(`[\"etag\",\"=\",\"etagValue\"]`);\n    });\n\n    it(\"should add 'equal' rule (boolean)\", () => {\n        expect(\n            filters.addEqual(\"is_trashed\", true).getFilters()\n        ).to.equal(`[\"is_trashed\",\"=\",true]`);\n    });\n\n    it(\"should add 'like' rule\", () => {\n        expect(\n            filters.addLike(\"etag\", \"etagValue\").getFilters()\n        ).to.equal(`[\"etag\",\"like\",\"%etagValue%\"]`);\n    });\n\n    it(\"should add 'ilike' rule\", () => {\n        expect(\n            filters.addILike(\"etag\", \"etagValue\").getFilters()\n        ).to.equal(`[\"etag\",\"ilike\",\"%etagValue%\"]`);\n    });\n\n    it(\"should add 'contains' rule\", () => {\n        expect(\n            filters.addContains(\"properties.someProp\", \"someValue\").getFilters()\n        ).to.equal(`[\"properties.someProp\",\"contains\",\"someValue\"]`);\n    });\n\n    it(\"should add 'is_a' rule\", () => {\n        expect(\n            filters.addIsA(\"etag\", \"etagValue\").getFilters()\n        ).to.equal(`[\"etag\",\"is_a\",\"etagValue\"]`);\n    });\n\n    it(\"should add 'is_a' rule for set\", () => {\n        expect(\n            filters.addIsA(\"etag\", [\"etagValue1\", \"etagValue2\"]).getFilters()\n        ).to.equal(`[\"etag\",\"is_a\",[\"etagValue1\",\"etagValue2\"]]`);\n    });\n\n    it(\"should add 'in' rule\", () => {\n        expect(\n            filters.addIn(\"etag\", \"etagValue\").getFilters()\n        ).to.equal(`[\"etag\",\"in\",\"etagValue\"]`);\n    });\n\n    it(\"should add 'in' rule for set\", () => {\n        expect(\n            filters.addIn(\"etag\", [\"etagValue1\", \"etagValue2\"]).getFilters()\n        ).to.equal(`[\"etag\",\"in\",[\"etagValue1\",\"etagValue2\"]]`);\n    });\n\n    it(\"should add 'not in' rule for set\", () => {\n        expect(\n            filters.addNotIn(\"etag\", [\"etagValue1\", \"etagValue2\"]).getFilters()\n        ).to.equal(`[\"etag\",\"not in\",[\"etagValue1\",\"etagValue2\"]]`);\n    });\n\n    it(\"should add multiple rules\", () => {\n        expect(\n            filters\n                .addIn(\"etag\", [\"etagValue1\", \"etagValue2\"])\n                .addEqual(\"name\", \"nameValue\")\n                .getFilters()\n        ).to.equal(`[\"etag\",\"in\",[\"etagValue1\",\"etagValue2\"]],[\"name\",\"=\",\"nameValue\"]`);\n    });\n\n    it(\"should add attribute prefix\", () => {\n        expect(new FilterBuilder()\n            .addIn(\"etag\", [\"etagValue1\", \"etagValue2\"], \"myPrefix\")\n            .getFilters())\n            .to.equal(`[\"myPrefix.etag\",\"in\",[\"etagValue1\",\"etagValue2\"]]`);\n    });\n\n    it('should add full text search', () => {\n        expect(\n            new FilterBuilder()\n                .addFullTextSearch('my custom search')\n                .getFilters()\n        ).to.equal(`[\"any\",\"ilike\",\"%my%\"],[\"any\",\"ilike\",\"%custom%\"],[\"any\",\"ilike\",\"%search%\"]`);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/services/api/filter-builder.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport function joinFilters(...filters: string[]) {\n    return filters.filter(s => s).join(\",\");\n}\n\nexport class FilterBuilder {\n    constructor(private filters = \"\") { }\n\n    public addEqual(field: string, value?: string | string[] | boolean | null, resourcePrefix?: string) {\n        return this.addCondition(field, \"=\", value, \"\", \"\", resourcePrefix);\n    }\n\n    public addDistinct(field: string, value?: string | boolean | null, resourcePrefix?: string) {\n        return this.addCondition(field, \"!=\", value, \"\", \"\", resourcePrefix);\n    }\n\n    public addLike(field: string, value?: string, resourcePrefix?: string) {\n        return this.addCondition(field, \"like\", value, \"%\", \"%\", resourcePrefix);\n    }\n\n    public addILike(field: string, value?: string, resourcePrefix?: string) {\n        return this.addCondition(field, \"ilike\", value, \"%\", \"%\", resourcePrefix);\n    }\n\n    public addContains(field: string, value?: string, resourcePrefix?: string) {\n        return this.addCondition(field, \"contains\", value, \"\", \"\", resourcePrefix);\n    }\n\n    public addIsA(field: string, value?: string | string[], resourcePrefix?: string) {\n        return this.addCondition(field, \"is_a\", value, \"\", \"\", resourcePrefix);\n    }\n\n    public addIn(field: string, value?: string | string[], resourcePrefix?: string) {\n        return this.addCondition(field, \"in\", value, \"\", \"\", resourcePrefix);\n    }\n\n    public addNotIn(field: string, value?: string | string[], resourcePrefix?: string) {\n        return this.addCondition(field, \"not in\", value, \"\", \"\", resourcePrefix);\n    }\n\n    public addGt(field: string, value?: string, resourcePrefix?: string) {\n        return this.addCondition(field, \">\", value, \"\", \"\", resourcePrefix);\n    }\n\n    public addGte(field: string, value?: string, resourcePrefix?: string) {\n        return this.addCondition(field, \">=\", value, \"\", \"\", resourcePrefix);\n    }\n\n    public addLt(field: string, value?: string, resourcePrefix?: string) {\n        return this.addCondition(field, \"<\", value, \"\", \"\", resourcePrefix);\n    }\n\n    public addLte(field: string, value?: string, resourcePrefix?: string) {\n        return this.addCondition(field, \"<=\", value, \"\", \"\", resourcePrefix);\n    }\n\n    public addExists(value?: string, resourcePrefix?: string) {\n        return this.addCondition(\"properties\", \"exists\", value, \"\", \"\", resourcePrefix);\n    }\n    public addDoesNotExist(field: string, resourcePrefix?: string) {\n        return this.addCondition(\"properties.\" + field, \"exists\", false, \"\", \"\", resourcePrefix);\n    }\n\n    public addFullTextSearch(value: string, table?: string) {\n        const regex = /\"[^\"]*\"/;\n        const matches: any[] = [];\n\n        let match = value.match(regex);\n\n        while (match) {\n            value = value.replace(match[0], \"\");\n            matches.push(match[0].replace(/\"/g, ''));\n            match = value.match(regex);\n        }\n\n        let searchIn = 'any';\n        if (table) {\n            searchIn = table + \".any\";\n        }\n\n        const terms = value.trim().split(/(\\s+)/).concat(matches);\n        terms.forEach(term => {\n            if (term !== \" \") {\n                this.addCondition(searchIn, \"ilike\", term, \"%\", \"%\");\n            }\n        });\n        return this;\n    }\n\n    public getFilters() {\n        return this.filters;\n    }\n\n    private addCondition(field: string, cond: string, value?: string | string[] | boolean | null, prefix: string = \"\", postfix: string = \"\", resourcePrefix?: string) {\n        if (value !== undefined) {\n            if (typeof value === \"string\") {\n                value = `\"${prefix}${value}${postfix}\"`;\n            } else if (Array.isArray(value)) {\n                value = `[\"${value.join(`\",\"`)}\"]`;\n            } else if (value !== null) {\n                value = value ? \"true\" : \"false\";\n            }\n\n            const resPrefix = resourcePrefix\n                ? resourcePrefix + \".\"\n                : \"\";\n\n            this.filters += `${this.filters ? \",\" : \"\"}[\"${resPrefix}${field}\",\"${cond}\",${value}]`;\n        }\n        return this;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/api/order-builder.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { OrderBuilder } from \"./order-builder\";\n\ndescribe(\"OrderBuilder\", () => {\n    it(\"should build correct order query\", () => {\n        const order = new OrderBuilder()\n            .addAsc(\"kind\")\n            .addDesc(\"createdAt\")\n            .getOrder();\n        expect(order).to.equal(\"kind asc,created_at desc\");\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/services/api/order-builder.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { snakeCase } from \"lodash\";\nimport { Resource } from \"models/resource\";\n\nexport enum OrderDirection { ASC, DESC }\n\nexport class OrderBuilder<T extends Resource = Resource> {\n\n    constructor(private order: string[] = []) {}\n\n    addOrder(direction: OrderDirection, attribute: keyof T, prefix?: string) {\n        this.order.push(`${prefix ? prefix + \".\" : \"\"}${snakeCase(attribute.toString())} ${direction === OrderDirection.ASC ? \"asc\" : \"desc\"}`);\n        return this;\n    }\n\n    addAsc(attribute: keyof T, prefix?: string) {\n        return this.addOrder(OrderDirection.ASC, attribute, prefix);\n    }\n\n    addDesc(attribute: keyof T, prefix?: string) {\n        return this.addOrder(OrderDirection.DESC, attribute, prefix);\n    }\n\n    getOrder() {\n        return this.order.join(\",\");\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/api/url-builder.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { joinUrls } from \"services/api/url-builder\";\n\ndescribe(\"UrlBuilder\", () => {\n    it(\"should join urls properly 1\", () => {\n        expect(joinUrls('http://localhost:3000', '/main')).to.equal('http://localhost:3000/main');\n    });\n    it(\"should join urls properly 2\", () => {\n        expect(joinUrls('http://localhost:3000/', '/main')).to.equal('http://localhost:3000/main');\n    });\n    it(\"should join urls properly 3\", () => {\n        expect(joinUrls('http://localhost:3000//', '/main')).to.equal('http://localhost:3000/main');\n    });\n    it(\"should join urls properly 4\", () => {\n        expect(joinUrls('http://localhost:3000', '//main')).to.equal('http://localhost:3000/main');\n    });\n    it(\"should join urls properly 5\", () => {\n        expect(joinUrls('http://localhost:3000///', 'main')).to.equal('http://localhost:3000/main');\n    });\n    it(\"should join urls properly 6\", () => {\n        expect(joinUrls('http://localhost:3000///', '//main')).to.equal('http://localhost:3000/main');\n    });\n    it(\"should join urls properly 7\", () => {\n        expect(joinUrls(undefined, '//main')).to.equal('/main');\n    });\n    it(\"should join urls properly 8\", () => {\n        expect(joinUrls(undefined, 'main')).to.equal('/main');\n    });\n    it(\"should join urls properly 9\", () => {\n        expect(joinUrls('http://localhost:3000///', undefined)).to.equal('http://localhost:3000');\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/services/api/url-builder.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport class UrlBuilder {\n    private readonly url: string = \"\";\n    private query: string = \"\";\n\n    constructor(host: string) {\n        this.url = host;\n    }\n\n    public addParam(param: string, value: string) {\n        if (this.query.length === 0) {\n            this.query += \"?\";\n        } else {\n            this.query += \"&\";\n        }\n        this.query += `${param}=${value}`;\n        return this;\n    }\n\n    public get() {\n        return this.url + this.query;\n    }\n}\n\nexport function joinUrls(url0?: string, url1?: string) {\n    let u0 = \"\";\n    if (url0) {\n        let idx0 = url0.length - 1;\n        while (url0[idx0] === '/') { --idx0; }\n        u0 = url0.substring(0, idx0 + 1);\n    }\n    let u1 = \"\";\n    if (url1) {\n        let idx1 = 0;\n        while (url1[idx1] === '/') { ++idx1; }\n        u1 = url1.substring(idx1);\n    }\n    let url = u0;\n    if (u1.length > 0) {\n        url += '/';\n    }\n    url += u1;\n    return url;\n}\n"
  },
  {
    "path": "services/workbench2/src/services/api-client-authorization-service/api-client-authorization-service.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport axios from \"axios\";\nimport { ApiClientAuthorizationService } from \"./api-client-authorization-service\";\n\n\ndescribe('ApiClientAuthorizationService', () => {\n    let apiClientAuthorizationService;\n    let serverApi;\n    let actions;\n\n    beforeEach(() => {\n        serverApi = axios.create();\n        actions = {\n            progressFn: cy.stub(),\n        };\n        apiClientAuthorizationService = new ApiClientAuthorizationService(serverApi, actions);\n    });\n\n    describe('createCollectionSharingToken', () => {\n        it('should return error on invalid collection uuid', () => {\n            expect(() => apiClientAuthorizationService.createCollectionSharingToken(\"foo\", undefined)).to.throw(\"UUID foo is not a collection\");\n        });\n\n        it('should make a create request with proper scopes and no expiration date', async () => {\n            serverApi.post = cy.stub().returns(Promise.resolve(\n                { data: { uuid: 'zzzzz-4zz18-0123456789abcde' } }\n            ));\n            const uuid = 'zzzzz-4zz18-0123456789abcde'\n            await apiClientAuthorizationService.createCollectionSharingToken(uuid, undefined);\n            expect(serverApi.post).to.be.calledWith(\n                '/api_client_authorizations', {\n                    scopes: [\n                        `GET /arvados/v1/collections/${uuid}`,\n                        `GET /arvados/v1/collections/${uuid}/`,\n                        `GET /arvados/v1/keep_services/accessible`,\n                    ]\n                }\n            );\n        });\n\n        it('should make a create request with proper scopes and expiration date', async () => {\n            serverApi.post = cy.stub().returns(Promise.resolve(\n                { data: { uuid: 'zzzzz-4zz18-0123456789abcde' } }\n            ));\n            const uuid = 'zzzzz-4zz18-0123456789abcde'\n            const expDate = new Date(2022, 8, 28, 12, 0, 0);\n            await apiClientAuthorizationService.createCollectionSharingToken(uuid, expDate);\n            expect(serverApi.post).to.be.calledWith(\n                '/api_client_authorizations', {\n                    scopes: [\n                        `GET /arvados/v1/collections/${uuid}`,\n                        `GET /arvados/v1/collections/${uuid}/`,\n                        `GET /arvados/v1/keep_services/accessible`,\n                    ],\n                    expires_at: expDate.toUTCString()\n                }\n            );\n        });\n    });\n\n    describe('listCollectionSharingToken', () => {\n        it('should return error on invalid collection uuid', () => {\n            expect(() => apiClientAuthorizationService.listCollectionSharingTokens(\"foo\")).to.throw(\"UUID foo is not a collection\");\n        });\n\n        it('should make a list request with proper scopes', async () => {\n            serverApi.get = cy.stub().returns(Promise.resolve(\n                { data: { items: [{}] } }\n            ));\n            const uuid = 'zzzzz-4zz18-0123456789abcde'\n            await apiClientAuthorizationService.listCollectionSharingTokens(uuid);\n            expect(serverApi.get).to.be.calledWith(\n                `/api_client_authorizations`, {params: {\n                    filters: JSON.stringify([[\"scopes\",\"=\",[\n                        `GET /arvados/v1/collections/${uuid}`,\n                        `GET /arvados/v1/collections/${uuid}/`,\n                        'GET /arvados/v1/keep_services/accessible',\n                    ]]]),\n                    select: undefined,\n                }}\n            );\n        });\n    });\n});"
  },
  {
    "path": "services/workbench2/src/services/api-client-authorization-service/api-client-authorization-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { AxiosInstance } from \"axios\";\nimport { ApiActions } from 'services/api/api-actions';\nimport { ApiClientAuthorization } from 'models/api-client-authorization';\nimport { CommonService, ListResults } from 'services/common-service/common-service';\nimport { extractUuidObjectType, ResourceObjectType } from \"models/resource\";\nimport { FilterBuilder } from \"services/api/filter-builder\";\n\nexport class ApiClientAuthorizationService extends CommonService<ApiClientAuthorization> {\n    constructor(serverApi: AxiosInstance, actions: ApiActions) {\n        super(serverApi, \"api_client_authorizations\", actions);\n    }\n\n    createCollectionSharingToken(uuid: string, expDate: Date | undefined): Promise<ApiClientAuthorization> {\n        if (extractUuidObjectType(uuid) !== ResourceObjectType.COLLECTION) {\n            throw new Error(`UUID ${uuid} is not a collection`);\n        }\n        const data = {\n            scopes: [\n                `GET /arvados/v1/collections/${uuid}`,\n                `GET /arvados/v1/collections/${uuid}/`,\n                `GET /arvados/v1/keep_services/accessible`,\n            ]\n        }\n        return expDate !== undefined\n            ? this.create({...data, expiresAt: expDate.toUTCString()})\n            : this.create(data);\n    }\n\n    listCollectionSharingTokens(uuid: string): Promise<ListResults<ApiClientAuthorization>> {\n        if (extractUuidObjectType(uuid) !== ResourceObjectType.COLLECTION) {\n            throw new Error(`UUID ${uuid} is not a collection`);\n        }\n        return this.list({\n            filters: new FilterBuilder()\n                .addEqual(\"scopes\", [\n                    `GET /arvados/v1/collections/${uuid}`,\n                    `GET /arvados/v1/collections/${uuid}/`,\n                    \"GET /arvados/v1/keep_services/accessible\"\n                ]).getFilters()\n        });\n    }\n}"
  },
  {
    "path": "services/workbench2/src/services/auth-service/auth-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { User, UserPrefs, getUserDisplayName } from 'models/user';\nimport { AxiosInstance } from \"axios\";\nimport { ApiActions } from \"services/api/api-actions\";\nimport uuid from \"uuid/v4\";\nimport { Session, SessionStatus } from \"models/session\";\nimport { Config } from \"common/config\";\nimport { uniqBy } from \"lodash\";\n\nexport const TARGET_URL = 'targetURL';\nexport const API_TOKEN_KEY = 'apiToken';\nexport const USER_EMAIL_KEY = 'userEmail';\nexport const USER_FIRST_NAME_KEY = 'userFirstName';\nexport const USER_LAST_NAME_KEY = 'userLastName';\nexport const USER_UUID_KEY = 'userUuid';\nexport const USER_OWNER_UUID_KEY = 'userOwnerUuid';\nexport const USER_IS_ADMIN = 'isAdmin';\nexport const USER_IS_ACTIVE = 'isActive';\nexport const USER_USERNAME = 'username';\nexport const USER_PREFS = 'prefs';\nexport const HOME_CLUSTER = 'homeCluster';\nexport const LOCAL_STORAGE = 'localStorage';\nexport const SESSION_STORAGE = 'sessionStorage';\n\nexport interface UserDetailsResponse {\n    email: string;\n    first_name: string;\n    last_name: string;\n    uuid: string;\n    owner_uuid: string;\n    is_admin: boolean;\n    is_active: boolean;\n    username: string;\n    prefs: UserPrefs;\n    can_write: boolean;\n    can_manage: boolean;\n}\n\nexport class AuthService {\n\n    constructor(\n        protected apiClient: AxiosInstance,\n        protected baseUrl: string,\n        protected actions: ApiActions,\n        protected useSessionStorage: boolean = false) { }\n\n    private getStorage() {\n        if (this.useSessionStorage) {\n            return sessionStorage;\n        }\n        return localStorage;\n    }\n\n    public getStorageType() {\n        if (this.useSessionStorage) {\n            return SESSION_STORAGE;\n        }\n        return LOCAL_STORAGE;\n    }\n\n    public saveApiToken(token: string) {\n        this.removeApiToken();\n        this.getStorage().setItem(API_TOKEN_KEY, token);\n        const sp = token.split('/');\n        if (sp.length === 3) {\n            this.getStorage().setItem(HOME_CLUSTER, sp[1].substring(0, 5));\n        }\n    }\n\n    public setTargetUrl(url: string) {\n        localStorage.setItem(TARGET_URL, url);\n    }\n\n    public removeTargetURL() {\n        localStorage.removeItem(TARGET_URL);\n    }\n\n    public getTargetURL() {\n        return localStorage.getItem(TARGET_URL);\n    }\n\n    public removeApiToken() {\n        localStorage.removeItem(API_TOKEN_KEY);\n        sessionStorage.removeItem(API_TOKEN_KEY);\n    }\n\n    public getApiToken() {\n        return this.getStorage().getItem(API_TOKEN_KEY) || undefined;\n    }\n\n    public getHomeCluster() {\n        return this.getStorage().getItem(HOME_CLUSTER) || undefined;\n    }\n\n    public getApiClient() {\n        return this.apiClient;\n    }\n\n    public removeUser() {\n        [localStorage, sessionStorage].forEach((storage) => {\n            storage.removeItem(USER_EMAIL_KEY);\n            storage.removeItem(USER_FIRST_NAME_KEY);\n            storage.removeItem(USER_LAST_NAME_KEY);\n            storage.removeItem(USER_UUID_KEY);\n            storage.removeItem(USER_OWNER_UUID_KEY);\n            storage.removeItem(USER_IS_ADMIN);\n            storage.removeItem(USER_IS_ACTIVE);\n            storage.removeItem(USER_USERNAME);\n            storage.removeItem(USER_PREFS);\n            storage.removeItem(TARGET_URL);\n        });\n    }\n\n    public login(uuidPrefix: string, homeCluster: string, loginCluster: string, remoteHosts: { [key: string]: string }) {\n        const currentUrl = `${window.location.protocol}//${window.location.host}/token`;\n        const homeClusterHost = remoteHosts[homeCluster];\n        const rd = new URL(window.location.href);\n        this.setTargetUrl(rd.pathname + rd.search);\n        window.location.assign(`https://${homeClusterHost}/login?${(uuidPrefix !== homeCluster && homeCluster !== loginCluster) ? \"remote=\" + uuidPrefix + \"&\" : \"\"}return_to=${currentUrl}`);\n    }\n\n    public logout(expireToken: string, preservePath: boolean) {\n        const fullUrl = new URL(window.location.href);\n        const wbBase = `${fullUrl.protocol}//${fullUrl.host}`;\n        const wbPath = fullUrl.pathname + fullUrl.search;\n        const returnTo = `${wbBase}${preservePath ? wbPath : ''}`\n\n        window.location.assign(`${this.baseUrl || \"\"}/logout?api_token=${expireToken}&return_to=${returnTo}`);\n    }\n\n    public getUserDetails = (showErrors?: boolean): Promise<User> => {\n        const reqId = uuid();\n        this.actions.progressFn(reqId, true);\n        return this.apiClient\n            .get<UserDetailsResponse>('/users/current')\n            .then(resp => {\n                this.actions.progressFn(reqId, false);\n                const prefs = resp.data.prefs.profile ? resp.data.prefs : { profile: {} };\n                return {\n                    email: resp.data.email,\n                    firstName: resp.data.first_name,\n                    lastName: resp.data.last_name,\n                    uuid: resp.data.uuid,\n                    ownerUuid: resp.data.owner_uuid,\n                    isAdmin: resp.data.is_admin,\n                    isActive: resp.data.is_active,\n                    username: resp.data.username,\n                    canWrite: resp.data.can_write,\n                    canManage: resp.data.can_manage,\n                    prefs\n                };\n            })\n            .catch(e => {\n                this.actions.progressFn(reqId, false);\n                this.actions.errorFn(reqId, e, showErrors);\n                throw e;\n            });\n    }\n\n    public getSessions(): Session[] {\n        try {\n            const sessions = JSON.parse(this.getStorage().getItem(\"sessions\") || '');\n            return sessions;\n        } catch {\n            return [];\n        }\n    }\n\n    public saveSessions(sessions: Session[]) {\n        this.removeSessions();\n        this.getStorage().setItem(\"sessions\", JSON.stringify(sessions));\n    }\n\n    public removeSessions() {\n        localStorage.removeItem(\"sessions\");\n        sessionStorage.removeItem(\"sessions\");\n    }\n\n    public buildSessions(cfg: Config, user?: User) {\n        const currentSession = {\n            clusterId: cfg.uuidPrefix,\n            remoteHost: cfg.rootUrl,\n            baseUrl: cfg.baseUrl,\n            name: user ? getUserDisplayName(user) : '',\n            email: user ? user.email : '',\n            userIsActive: user ? user.isActive : false,\n            token: this.getApiToken(),\n            loggedIn: true,\n            active: true,\n            uuid: user ? user.uuid : '',\n            status: SessionStatus.VALIDATED,\n            apiRevision: cfg.apiRevision,\n        } as Session;\n        const localSessions = this.getSessions().map(s => ({\n            ...s,\n            active: false,\n            status: SessionStatus.INVALIDATED\n        }));\n\n        const cfgSessions = Object.keys(cfg.remoteHosts).map(clusterId => {\n            const remoteHost = cfg.remoteHosts[clusterId];\n            return {\n                clusterId,\n                remoteHost,\n                baseUrl: '',\n                name: '',\n                email: '',\n                token: '',\n                loggedIn: false,\n                active: false,\n                uuid: '',\n                status: SessionStatus.INVALIDATED,\n                apiRevision: 0,\n            } as Session;\n        });\n        const sessions = [currentSession]\n            .concat(cfgSessions)\n            .concat(localSessions)\n            .filter((r: Session) => r.clusterId !== \"*\");\n\n        const uniqSessions = uniqBy(sessions, 'clusterId');\n\n        return uniqSessions;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/authorized-keys-service/authorized-keys-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { AxiosInstance } from \"axios\";\nimport { SshKeyResource } from 'models/ssh-key';\nimport { CommonResourceService, CommonResourceServiceError } from 'services/common-service/common-resource-service';\nimport { ApiActions } from \"services/api/api-actions\";\n\nexport enum AuthorizedKeysServiceError {\n    UNIQUE_PUBLIC_KEY = 'UniquePublicKey',\n    INVALID_PUBLIC_KEY = 'InvalidPublicKey',\n}\n\nexport class AuthorizedKeysService extends CommonResourceService<SshKeyResource> {\n    constructor(serverApi: AxiosInstance, actions: ApiActions) {\n        super(serverApi, \"authorized_keys\", actions);\n    }\n}\n\nexport const getAuthorizedKeysServiceError = (errorResponse: any) => {\n    if ('errors' in errorResponse && 'errorToken' in errorResponse) {\n        const error = errorResponse.errors.join('');\n        switch (true) {\n            case /Public key does not appear to be a valid ssh-rsa or dsa public key/.test(error):\n                return AuthorizedKeysServiceError.INVALID_PUBLIC_KEY;\n            case /Public key already exists in the database, use a different key./.test(error):\n                return AuthorizedKeysServiceError.UNIQUE_PUBLIC_KEY;\n            default:\n                return CommonResourceServiceError.UNKNOWN;\n        }\n    }\n    return CommonResourceServiceError.NONE;\n};"
  },
  {
    "path": "services/workbench2/src/services/collection-service/collection-service-files-response.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { getFileFullPath, extractFilesData } from './collection-service-files-response';\n\ndescribe('collection-service-files-response', () => {\n\n    describe('extractFilesData', () => {\n        it('should correctly decode URLs & file names', () => {\n            const testCases = [\n                // input URL, input display name, expected URL, expected name\n                ['table%201%202%203', 'table 1 2 3', 'table%201%202%203', 'table 1 2 3'],\n                ['table%25&amp;%3F%2A2', 'table%&amp;?*2', 'table%25&%3F%2A2', 'table%&?*2'],\n                [\"G%C3%BCnter%27s%20file.pdf\", \"Günter&#39;s file.pdf\", \"G%C3%BCnter%27s%20file.pdf\", \"Günter's file.pdf\"],\n                ['G%25C3%25BCnter%27s%2520file.pdf', 'G%C3%BCnter&#39;s%20file.pdf', \"G%25C3%25BCnter%27s%2520file.pdf\", \"G%C3%BCnter's%20file.pdf\"]\n            ];\n\n            testCases.forEach(([inputURL, inputDisplayName, expectedURL, expectedName]) => {\n                // given\n                const collUUID = 'xxxxx-zzzzz-vvvvvvvvvvvvvvv';\n                const xmlData = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n                <D:multistatus xmlns:D=\"DAV:\">\n                    <D:response>\n                        <D:href>/c=xxxxx-zzzzz-vvvvvvvvvvvvvvv/</D:href>\n                        <D:propstat>\n                            <D:prop>\n                                <D:resourcetype>\n                                    <D:collection xmlns:D=\"DAV:\"/>\n                                </D:resourcetype>\n                                <D:supportedlock>\n                                    <D:lockentry xmlns:D=\"DAV:\">\n                                        <D:lockscope>\n                                            <D:exclusive/>\n                                        </D:lockscope>\n                                        <D:locktype>\n                                            <D:write/>\n                                        </D:locktype>\n                                    </D:lockentry>\n                                </D:supportedlock>\n                                <D:displayname></D:displayname>\n                                <D:getlastmodified>Fri, 26 Mar 2021 14:44:08 GMT</D:getlastmodified>\n                            </D:prop>\n                            <D:status>HTTP/1.1 200 OK</D:status>\n                        </D:propstat>\n                    </D:response>\n                    <D:response>\n                        <D:href>/c=${collUUID}/${inputURL}</D:href>\n                        <D:propstat>\n                            <D:prop>\n                                <D:resourcetype></D:resourcetype>\n                                <D:getcontenttype>application/pdf</D:getcontenttype>\n                                <D:supportedlock>\n                                    <D:lockentry xmlns:D=\"DAV:\">\n                                        <D:lockscope>\n                                            <D:exclusive/>\n                                        </D:lockscope>\n                                        <D:locktype>\n                                            <D:write/>\n                                        </D:locktype>\n                                    </D:lockentry>\n                                </D:supportedlock>\n                                <D:displayname>${inputDisplayName}</D:displayname>\n                                <D:getcontentlength>3</D:getcontentlength>\n                                <D:getlastmodified>Fri, 26 Mar 2021 14:44:08 GMT</D:getlastmodified>\n                                <D:getetag>\"166feb9c9110c008325a59\"</D:getetag>\n                            </D:prop>\n                            <D:status>HTTP/1.1 200 OK</D:status>\n                        </D:propstat>\n                    </D:response>\n                </D:multistatus>\n                `;\n                const parser = new DOMParser();\n                const xmlDoc = parser.parseFromString(xmlData, \"text/xml\");\n\n                // when\n                const result = extractFilesData(xmlDoc);\n\n                // then\n                expect(result).to.deep.equal([\n                    { \n                        id: `${collUUID}/${expectedName}`, \n                        name: expectedName, \n                        path: \"\", \n                        size: 3, \n                        type: \"file\", \n                        url: `/c=${collUUID}/${expectedURL}`\n                    }\n                ]);\n            });\n        });\n    });\n\n    describe('getFileFullPath', () => {\n        it('should encode weird names', async () => {\n            // given\n            const file = {\n                name: '#test',\n                path: 'http://localhost',\n            };\n\n            // when\n            const result = getFileFullPath(file);\n\n            // then\n            expect(result).to.equal('http://localhost/#test');\n        });\n\n    });\n});"
  },
  {
    "path": "services/workbench2/src/services/collection-service/collection-service-files-response.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CollectionDirectory, CollectionFile, CollectionFileType, createCollectionDirectory, createCollectionFile } from \"../../models/collection-file\";\nimport { getTagValue } from \"common/xml\";\nimport { getNodeChildren, Tree, mapTree } from 'models/tree';\n\nexport const sortFilesTree = (tree: Tree<CollectionDirectory | CollectionFile>) => {\n    return mapTree<CollectionDirectory | CollectionFile>(node => {\n        const children = getNodeChildren(node.id)(tree);\n\n        children.sort((a, b) =>\n            a.value.type !== b.value.type\n                ? a.value.type === CollectionFileType.DIRECTORY ? -1 : 1\n                : a.value.name.localeCompare(b.value.name)\n        );\n        return { ...node, children: children.map(child => child.id) };\n    })(tree);\n};\n\nexport const extractFilesData = (document: Document) => {\n    const collectionUrlPrefix = /\\/c=([^/]*)/;\n    return Array\n        .from(document.getElementsByTagName('D:response'))\n        .slice(1) // omit first element which is collection itself\n        .map(element => {\n            const name = getTagValue(element, 'D:displayname', '', true); // skip decoding as value should be already decoded\n            const size = parseInt(getTagValue(element, 'D:getcontentlength', '0', true), 10);\n            const url = getTagValue(element, 'D:href', '', true);\n            const collectionUuidMatch = collectionUrlPrefix.exec(url);\n            const collectionUuid = collectionUuidMatch ? collectionUuidMatch.pop() : '';\n            const pathArray = url.split(`/`);\n            if (!pathArray.pop()) {\n                pathArray.pop();\n            }\n            const directory = pathArray.join('/')\n                .replace(collectionUrlPrefix, '')\n                .replace(/\\/\\//g, '/');\n\n            const parentPath = directory.replace(/\\/$/, '');\n            const data = {\n                url,\n                id: [\n                    collectionUuid ? collectionUuid : '',\n                    directory ? unescape(parentPath) : '',\n                    '/' + name\n                ].join(''),\n                name,\n                path: unescape(parentPath),\n            };\n\n            const result = getTagValue(element, 'D:resourcetype', '')\n                ? createCollectionDirectory(data)\n                : createCollectionFile({ ...data, size });\n\n            return result;\n        });\n};\n\nexport const getFileFullPath = ({ name, path }: CollectionFile | CollectionDirectory) => {\n    return `${path}/${name}`;\n};\n"
  },
  {
    "path": "services/workbench2/src/services/collection-service/collection-service.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport axios from 'axios';\nimport { snakeCase } from 'lodash';\nimport { defaultCollectionSelectedFields } from 'models/collection';\nimport { CollectionService, emptyCollectionPdh, getMinNecessaryPaths } from './collection-service';\n\ndescribe('collection-service', () => {\n    let collectionService = {};\n    let serverApi;\n    let keepWebdavClient;\n    let authService;\n    let actions;\n\n    const fakeXhr = {\n        status: 200,\n        responseText: '<note><to>User</to><from>Client</from><body>Hello!</body></note>',\n        getResponseHeader: (header) => {\n            if (header.toLowerCase() === 'content-type') {\n                return 'application/xml';\n            }\n            return null;\n        },\n        readyState: 4,\n        responseXML: new DOMParser().parseFromString('<note><to>User</to><from>Client</from><body>Hello!</body></note>', 'application/xml'),\n    };\n\n    beforeEach(() => {\n        serverApi = axios.create();\n        keepWebdavClient = {\n            delete: cy.stub(),\n            upload: cy.stub().as('upload'),\n            mkdir: cy.stub(),\n            propfind: cy.stub().resolves(fakeXhr),\n        };\n        authService = {};\n        actions = {\n            progressFn: cy.stub(),\n            errorFn: cy.stub(),\n        };\n        collectionService = new CollectionService(serverApi, keepWebdavClient, authService, actions);\n        collectionService.update = cy.stub();\n    });\n\n    describe('get', () => {\n        it('should make a request with default selected fields', async () => {\n            serverApi.get = cy.stub().returns(Promise.resolve(\n                { data: { items: [{}] } }\n            )).as('get');\n            const uuid = 'zzzzz-4zz18-0123456789abcde'\n            await collectionService.get(uuid);\n            cy.get('@get').should('be.calledWith', `/collections/${uuid}`, {\n                params: {\n                    select: JSON.stringify(defaultCollectionSelectedFields.map(snakeCase)),\n                }\n            }); \n        });\n\n        it('should be able to request specific fields', async () => {\n            serverApi.get = cy.stub().returns(Promise.resolve(\n                { data: { items: [{}] } }\n            )).as('get');\n            const uuid = 'zzzzz-4zz18-0123456789abcde'\n            await collectionService.get(uuid, undefined, ['manifestText']);\n            cy.get('@get').should('be.calledWith', `/collections/${uuid}`, {\n                params: {\n                    select: JSON.stringify(['manifest_text']),\n                }\n            });\n        });\n    });\n\n    describe('update', () => {\n        it('should call put selecting updated fields + others', async () => {\n            serverApi.put = cy.stub().returns(Promise.resolve({ data: {} })).as('put');\n            const data = {\n                name: 'foo',\n            };\n            const expected = {\n                collection: {\n                    ...data,\n                    preserve_version: true,\n                },\n                select: ['uuid', 'name', 'version', 'modified_at'],\n            }\n            collectionService = new CollectionService(serverApi, keepWebdavClient, authService, actions);\n            await collectionService.update('uuid', data);\n            cy.get('@put').should('be.calledWith', '/collections/uuid', expected);\n        });\n    });\n\n    describe('uploadFiles', () => {\n        it('should skip if no files to upload files', async () => {\n            // given\n            const files = [];\n            const collectionUUID = '';\n\n            // when\n            await collectionService.uploadFiles(collectionUUID, files);\n\n            // then\n            cy.get('@upload').should('not.have.been.called');\n        });\n\n        it('should upload files', async () => {\n            // given\n            const files = [{name: 'test-file1'}];\n            const collectionUUID = 'zzzzz-4zz18-0123456789abcde';\n\n            // when\n            await collectionService.uploadFiles(collectionUUID, files);\n\n            // then\n            cy.get('@upload').should('have.been.calledOnce');\n            cy.get('@upload').should('have.been.calledWith', \"c=zzzzz-4zz18-0123456789abcde/test-file1\");\n        });\n\n        it('should upload files with custom uplaod target', async () => {\n            // given\n            const files = [{name: 'test-file1'}];\n            const collectionUUID = 'zzzzz-4zz18-0123456789abcde';\n            const customTarget = 'zzzzz-4zz18-0123456789adddd/test-path/'\n\n            // when\n            await collectionService.uploadFiles(collectionUUID, files, undefined, customTarget);\n\n            // then\n            cy.get('@upload').should('have.been.calledOnce');\n            cy.get('@upload').should('have.been.calledWith', \"c=zzzzz-4zz18-0123456789adddd/test-path/test-file1\");\n        });\n    });\n\n    describe('deleteFiles', () => {\n        it('should remove no files', async () => {\n            // given\n            serverApi.put = cy.stub().returns(Promise.resolve({ data: {} })).as('put');\n            const filePaths = [];\n            const collectionUUID = 'zzzzz-tpzed-5o5tg0l9a57gxxx';\n\n            // when\n            await collectionService.deleteFiles(collectionUUID, filePaths);\n\n            // then\n            cy.get('@put').should('have.been.calledOnce');\n            cy.get('@put').should('have.been.calledWith', `/collections/${collectionUUID}`, {\n                collection: {\n                    preserve_version: true\n                },\n                replace_files: {},\n            });\n        });\n\n        it('should remove only root files', async () => {\n            // given\n            serverApi.put = cy.stub().returns(Promise.resolve({ data: {} })).as('put');\n            const filePaths = ['/root/1', '/root/1/100', '/root/1/100/test.txt', '/root/2', '/root/2/200', '/root/3/300/test.txt'];\n            const collectionUUID = 'zzzzz-tpzed-5o5tg0l9a57gxxx';\n\n            // when\n            await collectionService.deleteFiles(collectionUUID, filePaths);\n\n            // then\n            cy.get('@put').should('have.been.calledOnce');\n            cy.get('@put').should('have.been.calledWith', `/collections/${collectionUUID}`, {\n                collection: {\n                    preserve_version: true\n                },\n                replace_files: {\n                    '/root/1': '',\n                    '/root/2': '',\n                    '/root/3/300/test.txt': '',\n                },\n            });\n        });\n\n        it('should batch remove files', async () => {\n            serverApi.put = cy.stub().returns(Promise.resolve({ data: {} })).as('put');\n            // given\n            const filePaths = ['/root/1', '/secondFile', 'barefile.txt'];\n            const collectionUUID = 'zzzzz-4zz18-5o5tg0l9a57gxxx';\n\n            // when\n            await collectionService.deleteFiles(collectionUUID, filePaths);\n\n            // then\n            cy.get('@put').should('have.been.calledOnce');\n            cy.get('@put').should('have.been.calledWith', `/collections/${collectionUUID}`, {\n                collection: {\n                    preserve_version: true\n                },\n                replace_files: {\n                    '/root/1': '',\n                    '/secondFile': '',\n                    '/barefile.txt': '',\n                },\n            });\n        });\n    });\n\n    describe('renameFile', () => {\n        it('should rename file', async () => {\n            serverApi.put = cy.stub().returns(Promise.resolve({ data: {} })).as('put');\n            const collectionUuid = 'zzzzz-4zz18-ywq0rvhwwhkjnfq';\n            const collectionPdh = '8cd9ce1dfa21c635b620b1bfee7aaa08+180';\n            const oldPath = '/old/path';\n            const newPath = '/new/filename';\n\n            await collectionService.renameFile(collectionUuid, collectionPdh, oldPath, newPath);\n\n            cy.get('@put').should('have.been.calledOnce');\n            cy.get('@put').should('have.been.calledWith', `/collections/${collectionUuid}`, {\n                collection: {\n                    preserve_version: true\n                },\n                replace_files: {\n                    [newPath]: `${collectionPdh}${oldPath}`,\n                    [oldPath]: '',\n                },\n            });\n        });\n    });\n\n    describe('copyFiles', () => {\n        it('should batch copy files', async () => {\n            serverApi.put = cy.stub().returns(Promise.resolve({ data: {} })).as('put');\n            const filePaths = ['/root/1', '/secondFile', 'barefile.txt'];\n            const sourcePdh = '8cd9ce1dfa21c635b620b1bfee7aaa08+180';\n\n            const destinationUuid = 'zzzzz-4zz18-ywq0rvhwwhkjnfq';\n            const destinationPath = '/destinationPath';\n\n            // when\n            await collectionService.copyFiles(sourcePdh, filePaths, {uuid: destinationUuid}, destinationPath);\n\n            // then\n            cy.get('@put').should('have.been.calledOnce');\n            cy.get('@put').should('have.been.calledWith', `/collections/${destinationUuid}`, {\n                collection: {\n                    preserve_version: true\n                },\n                replace_files: {\n                    [`${destinationPath}/1`]: `${sourcePdh}/root/1`,\n                    [`${destinationPath}/secondFile`]: `${sourcePdh}/secondFile`,\n                    [`${destinationPath}/barefile.txt`]: `${sourcePdh}/barefile.txt`,\n                },\n            });            \n        });\n\n        it('should copy files from rooth', async () => {\n            // Test copying from root paths\n            serverApi.put = cy.stub().returns(Promise.resolve({ data: {} })).as('put');\n            const filePaths = ['/'];\n            const sourcePdh = '8cd9ce1dfa21c635b620b1bfee7aaa08+180';\n\n            const destinationUuid = 'zzzzz-4zz18-ywq0rvhwwhkjnfq';\n            const destinationPath = '/destinationPath';\n\n            await collectionService.copyFiles(sourcePdh, filePaths, {uuid: destinationUuid}, destinationPath);\n\n            cy.get('@put').should('have.been.calledOnce');\n            cy.get('@put').should('have.been.calledWith', `/collections/${destinationUuid}`, {\n                collection: {\n                    preserve_version: true\n                },\n                replace_files: {\n                    [`${destinationPath}`]: `${sourcePdh}/`,\n                },\n            });\n        });\n\n        it('should copy files to root path', async () => {\n            // Test copying to root paths\n            serverApi.put = cy.stub().returns(Promise.resolve({ data: {} })).as('put');\n            const filePaths = ['/'];\n            const sourcePdh = '8cd9ce1dfa21c635b620b1bfee7aaa08+180';\n\n            const destinationUuid = 'zzzzz-4zz18-ywq0rvhwwhkjnfq';\n            const destinationPath = '/';\n\n            await collectionService.copyFiles(sourcePdh, filePaths, {uuid: destinationUuid}, destinationPath);\n\n            cy.get('@put').should('have.been.calledOnce');\n            cy.get('@put').should('have.been.calledWith', `/collections/${destinationUuid}`, {\n                collection: {\n                    preserve_version: true\n                },\n                replace_files: {\n                    \"/\": `${sourcePdh}/`,\n                },\n            });\n        });\n    });\n\n    describe('moveFiles', () => {\n        it('should batch move files', async () => {\n            serverApi.put = cy.stub().returns(Promise.resolve({ data: {} })).as('put');\n            // given\n            const filePaths = ['/rootFile', '/secondFile', '/subpath/subfile', 'barefile.txt'];\n            const srcCollectionUUID = 'zzzzz-4zz18-5o5tg0l9a57gxxx';\n            const srcCollectionPdh = '8cd9ce1dfa21c635b620b1bfee7aaa08+180';\n\n            const destinationUuid = 'zzzzz-4zz18-ywq0rvhwwhkjnfq';\n            const destinationPath = '/destinationPath';\n\n            // when\n            await collectionService.moveFiles(srcCollectionUUID, srcCollectionPdh, filePaths, {uuid: destinationUuid}, destinationPath);\n\n            // then\n            cy.get('@put').should('have.been.calledTwice');\n            // Verify copy\n            cy.get('@put').should('have.been.calledWith', `/collections/${destinationUuid}`, {\n                collection: {\n                    preserve_version: true\n                },\n                replace_files: {\n                    [`${destinationPath}/rootFile`]: `${srcCollectionPdh}/rootFile`,\n                    [`${destinationPath}/secondFile`]: `${srcCollectionPdh}/secondFile`,\n                    [`${destinationPath}/subfile`]: `${srcCollectionPdh}/subpath/subfile`,\n                    [`${destinationPath}/barefile.txt`]: `${srcCollectionPdh}/barefile.txt`,\n                },\n            });\n            // Verify delete\n            cy.get('@put').should('have.been.calledWith', `/collections/${srcCollectionUUID}`, {\n                collection: {\n                    preserve_version: true\n                },\n                replace_files: {\n                    '/rootFile': '',\n                    '/secondFile': '',\n                    '/subpath/subfile': '',\n                    '/barefile.txt': '',\n                },\n            });\n        });\n\n        it('should batch move files within collection', async () => {\n            serverApi.put = cy.stub().returns(Promise.resolve({ data: {} })).as('put');\n            // given\n            const filePaths = ['/one', '/two', '/subpath/subfile', 'barefile.txt'];\n            const srcCollectionUUID = 'zzzzz-4zz18-5o5tg0l9a57gxxx';\n            const srcCollectionPdh = '8cd9ce1dfa21c635b620b1bfee7aaa08+180';\n\n            const destinationPath = '/destinationPath';\n\n            // when\n            await collectionService.moveFiles(srcCollectionUUID, srcCollectionPdh, filePaths, {uuid: srcCollectionUUID}, destinationPath);\n\n            // then\n            cy.get('@put').should('have.been.calledOnce');\n            // Verify copy\n            cy.get('@put').should('have.been.calledWith', `/collections/${srcCollectionUUID}`, {\n                collection: {\n                    preserve_version: true\n                },\n                replace_files: {\n                    [`${destinationPath}/one`]: `${srcCollectionPdh}/one`,\n                    ['/one']: '',\n                    [`${destinationPath}/two`]: `${srcCollectionPdh}/two`,\n                    ['/two']: '',\n                    [`${destinationPath}/subfile`]: `${srcCollectionPdh}/subpath/subfile`,\n                    ['/subpath/subfile']: '',\n                    [`${destinationPath}/barefile.txt`]: `${srcCollectionPdh}/barefile.txt`,\n                    ['/barefile.txt']: '',\n                },\n            });\n        });\n\n        it('should abort batch move when copy fails', async () => {\n            // Simulate failure to copy\n            // rejection error will show up in console, but it's expected\n            serverApi.put = cy.stub().returns(Promise.reject({\n                data: {},\n                response: {\n                    \"errors\": [\"error getting snapshot of \\\"rootFile\\\" from \\\"8cd9ce1dfa21c635b620b1bfee7aaa08+180\\\": file does not exist\"]\n                }\n            })).as('put');\n            // given\n            const filePaths = ['/rootFile', '/secondFile', '/subpath/subfile', 'barefile.txt'];\n            const srcCollectionUUID = 'zzzzz-4zz18-5o5tg0l9a57gxxx';\n            const srcCollectionPdh = '8cd9ce1dfa21c635b620b1bfee7aaa08+180';\n\n            const destinationUuid = 'zzzzz-4zz18-ywq0rvhwwhkjnfq';\n            const destinationPath = '/destinationPath';\n\n            // when\n            try {\n                await collectionService.moveFiles(srcCollectionUUID, srcCollectionPdh, filePaths, {uuid: destinationUuid}, destinationPath);\n            } catch {}\n\n            // then\n            cy.get('@put').should('have.been.calledOnce');\n            // Verify copy\n            cy.get('@put').should('have.been.calledWith', `/collections/${destinationUuid}`, {\n                collection: {\n                    preserve_version: true\n                },\n                replace_files: {\n                    [`${destinationPath}/rootFile`]: `${srcCollectionPdh}/rootFile`,\n                    [`${destinationPath}/secondFile`]: `${srcCollectionPdh}/secondFile`,\n                    [`${destinationPath}/subfile`]: `${srcCollectionPdh}/subpath/subfile`,\n                    [`${destinationPath}/barefile.txt`]: `${srcCollectionPdh}/barefile.txt`,\n                },\n            });\n        });\n    });\n\n    describe('createDirectory', () => {\n        it('creates empty directory', async () => {\n            // given\n            const directoryNames = [\n                {in: 'newDir', out: 'newDir'},\n                {in: '/fooDir', out: 'fooDir'},\n                {in: '/anotherPath/', out: 'anotherPath'},\n                {in: 'trailingSlash/', out: 'trailingSlash'},\n            ];\n            const collectionUuid = 'zzzzz-tpzed-5o5tg0l9a57gxxx';\n\n            for (var i = 0; i < directoryNames.length; i++) {\n                serverApi.put = cy.stub().returns(Promise.resolve({ data: {} })).as('put');\n                // when\n                await collectionService.createDirectory(collectionUuid, directoryNames.map(d => d.in));\n                // then\n                cy.get('@put').should('have.been.calledOnce');\n                cy.get('@put').should('have.been.calledWith', `/collections/${collectionUuid}`, {\n                    collection: {\n                        preserve_version: true\n                    },\n                    replace_files: {\n                        [\"/\" + directoryNames[i].out]: emptyCollectionPdh,\n                    },\n                });\n            }\n        });\n    });\n\n    describe('getMinNecessaryPaths', () => {\n        const testArgs = [\n            {\n                in: [\n                    '/foo/bar/baz/qux',\n                    '/foo/bar/baz/qux/quux',\n                    '/foo/bar/baz/qux/quux/quuux',\n                    '/foo/bar/baz/qux/quux/quuux/quuuux',\n                    '/foo/bar/baz/qux/quux/quuux/quuuux/quuuuxx',\n                    '/foo/bar/baz/qux/quux/quuux/quuuux/quuuuxx/quuuuxxx',\n                ],\n                out: ['/foo/bar/baz/qux/quux/quuux/quuuux/quuuuxx/quuuuxxx'],\n            },\n            {\n                in: [\n                    '/foo/bar/baz/qux',\n                    '/foo/bar/baz/quux',\n                    '/foo/bar/baz/quux/quuux',\n                    '/foo/bar/baz/qux/quux/quuux/quuuux/quuuuxx/quuuuxxx',\n                ],\n                out: ['/foo/bar/baz/quux/quuux', '/foo/bar/baz/qux/quux/quuux/quuuux/quuuuxx/quuuuxxx'],\n            },\n            {\n                in: [\n                    null,\n                    undefined,\n                    17,\n                    (foo) => {},\n                    '',\n                    '/foo/bar/baz/qux',\n                    '/foo/bar/baz/quux',\n                ],\n                out: ['/foo/bar/baz/qux', '/foo/bar/baz/quux'],\n            },\n        ];\n        it('should return the minimum unique paths', () => {\n            testArgs.forEach((testArg) => {\n                const paths = getMinNecessaryPaths(testArg.in);\n                expect(paths).to.deep.equal(testArg.out);\n            });\n        });\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/src/services/collection-service/collection-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CollectionResource, defaultCollectionSelectedFields } from \"models/collection\";\nimport { AxiosInstance, AxiosResponse } from \"axios\";\nimport { CollectionFile, CollectionDirectory } from \"models/collection-file\";\nimport { WebDAV } from \"common/webdav\";\nimport { AuthService } from \"../auth-service/auth-service\";\nimport { extractFilesData } from \"./collection-service-files-response\";\nimport { TrashableResourceService } from \"services/common-service/trashable-resource-service\";\nimport { ApiActions } from \"services/api/api-actions\";\nimport { Session } from \"models/session\";\nimport { CommonService } from \"services/common-service/common-service\";\nimport { snakeCase } from \"lodash\";\nimport { CommonResourceServiceError } from \"services/common-service/common-resource-service\";\n\nexport type UploadProgress = (fileId: number, loaded: number, total: number, currentTime: number) => void;\ntype CollectionPartialUpdateOrCreate =\n    | (Partial<CollectionResource> & Pick<CollectionResource, \"uuid\">)\n    | (Partial<CollectionResource> & Pick<CollectionResource, \"ownerUuid\">);\n\ntype ReplaceFilesPayload = {\n    collection: Partial<CollectionResource>;\n    replace_files: {[key: string]: string};\n}\n\ntype FileWithRelativePath = File & { relativePath?: string, webkitRelativePath?: string };\n\nexport const emptyCollectionPdh = \"d41d8cd98f00b204e9800998ecf8427e+0\";\nexport const SOURCE_DESTINATION_EQUAL_ERROR_MESSAGE = \"Source and destination cannot be the same\";\n\nexport class CollectionService extends TrashableResourceService<CollectionResource> {\n    constructor(serverApi: AxiosInstance, private keepWebdavClient: WebDAV, private authService: AuthService, actions: ApiActions) {\n        super(serverApi, \"collections\", actions, [\n            \"fileCount\",\n            \"fileSizeTotal\",\n            \"replicationConfirmed\",\n            \"replicationConfirmedAt\",\n            \"storageClassesConfirmed\",\n            \"storageClassesConfirmedAt\",\n            \"unsignedManifestText\",\n            \"version\",\n        ]);\n    }\n\n    async get(uuid: string, showErrors?: boolean, select?: string[], session?: Session) {\n        super.validateUuid(uuid);\n        const selectParam = select || defaultCollectionSelectedFields;\n        return super.get(uuid, showErrors, selectParam, session);\n    }\n\n    create(data?: Partial<CollectionResource>, showErrors?: boolean) {\n        return super.create({ ...data, preserveVersion: true }, showErrors);\n    }\n\n    update(uuid: string, data: Partial<CollectionResource>, showErrors?: boolean) {\n        const select = [...Object.keys(data), \"version\", \"modifiedAt\"];\n        return super.update(uuid, { ...data, preserveVersion: true }, showErrors, select);\n    }\n\n    async files(uuid: string) {\n        try {\n            const request = await this.keepWebdavClient.propfind(`c=${uuid}`);\n            if (request.responseXML != null) {\n                return extractFilesData(request.responseXML);\n            }\n        } catch (e) {\n            return Promise.reject(e);\n        }\n        return Promise.reject();\n    }\n\n    private combineFilePath(parts: string[]) {\n        return parts.reduce((path, part) => {\n            // Trim leading and trailing slashes\n            const trimmedPart = part.split(\"/\").filter(Boolean).join(\"/\");\n            if (trimmedPart.length) {\n                const separator = path.endsWith(\"/\") ? \"\" : \"/\";\n                return `${path}${separator}${trimmedPart}`;\n            } else {\n                return path;\n            }\n        }, \"/\");\n    }\n\n    private replaceFiles(data: CollectionPartialUpdateOrCreate, fileMap: {}, showErrors?: boolean, preserveVersion: boolean = true) {\n        const payload: ReplaceFilesPayload = {\n            collection: {\n                preserve_version: preserveVersion,\n                ...CommonService.mapKeys(snakeCase)(data),\n                // Don't send uuid in payload when creating\n                uuid: undefined,\n            },\n            replace_files: fileMap,\n        };\n        if (data.uuid) {\n            return CommonService.defaultResponse(\n                this.serverApi.put<ReplaceFilesPayload, AxiosResponse<CollectionResource>>(`/${this.resourceType}/${data.uuid}`, payload),\n                this.actions,\n                true, // mapKeys\n                showErrors\n            );\n        } else {\n            return CommonService.defaultResponse(\n                this.serverApi.post<ReplaceFilesPayload, AxiosResponse<CollectionResource>>(`/${this.resourceType}`, payload),\n                this.actions,\n                true, // mapKeys\n                showErrors\n            );\n        }\n    }\n\n    async uploadFiles(collectionUuid: string, files: File[], onProgress?: UploadProgress, targetLocation: string = \"\") {\n        if (collectionUuid === \"\" || files.length === 0) {\n            return;\n        }\n\n        const isAnyNested = files.some(file => {\n            const path = file[getPathKey(file)];\n            return path && path.indexOf('/') > -1;\n        });\n\n        if (isAnyNested) {\n            const existingDirPaths = new Set((await this.files(collectionUuid))\n                .filter(f => f.type === 'directory')\n                .map(f => f.id.split('/').slice(1).join('/')));\n\n            const allTargetPaths = files.reduce((acc, file: FileWithRelativePath) => {\n                const pathKey = getPathKey(file);\n                if (file[pathKey] && file[pathKey]!.length > 0) {\n                    acc.push(file[pathKey]!.split('/').slice(0, -1).join('/'));\n                }\n                return acc;\n            }, [] as string[]).filter(path => path.length > 0);\n\n            await this.createMinNecessaryDirs(collectionUuid, existingDirPaths, allTargetPaths, true);\n        }\n\n        // files have to be uploaded sequentially\n        for (let idx = 0; idx < files.length; idx++) {\n            const file = files[idx] as FileWithRelativePath;\n            let nestedPath: string | undefined = undefined;\n\n            if (isAnyNested) {\n                const pathKey = getPathKey(file);\n\n                if (file[pathKey] && file[pathKey]!.length > 0) {\n                    nestedPath = file[pathKey]!.split('/').slice(0, -1).join('/');\n                }\n            }\n\n            try {\n                if (nestedPath) {\n                    await this.uploadFile(collectionUuid, file, idx, onProgress, `${collectionUuid}/${nestedPath}/`.replace(\"//\", \"/\"));\n                } else {\n                    await this.uploadFile(collectionUuid, file, idx, onProgress, targetLocation);\n                }\n            } catch (error) {\n                console.error(\"Error uploading file\", `${collectionUuid}${nestedPath ? `/${nestedPath}`: ''}/${file.name}`, error);\n            }\n        }\n        await this.update(collectionUuid, { preserveVersion: true });\n    }\n\n    async renameFile(collectionUuid: string, collectionPdh: string, oldPath: string, newPath: string) {\n        return this.replaceFiles(\n            { uuid: collectionUuid },\n            {\n                [this.combineFilePath([newPath])]: `${collectionPdh}${this.combineFilePath([oldPath])}`,\n                [this.combineFilePath([oldPath])]: \"\",\n            }\n        );\n    }\n\n    extendFileURL = (file: CollectionDirectory | CollectionFile) => {\n        const baseUrl = this.keepWebdavClient.getBaseUrl().endsWith(\"/\")\n            ? this.keepWebdavClient.getBaseUrl().slice(0, -1)\n            : this.keepWebdavClient.getBaseUrl();\n        const apiToken = this.authService.getApiToken();\n        const encodedApiToken = apiToken ? encodeURI(apiToken) : \"\";\n        const userApiToken = `/t=${encodedApiToken}/`;\n        const splittedPrevFileUrl = file.url.split(\"/\");\n        const url = `${baseUrl}/${splittedPrevFileUrl[1]}${userApiToken}${splittedPrevFileUrl.slice(2).join(\"/\")}`;\n        return {\n            ...file,\n            url,\n        };\n    };\n\n    async getFileContents(file: CollectionFile) {\n        return (await this.keepWebdavClient.get(`c=${file.id}`)).response;\n    }\n\n    private async uploadFile(\n        collectionUuid: string,\n        file: File,\n        fileId: number,\n        onProgress: UploadProgress = () => {\n            return;\n        },\n        targetLocation: string = \"\"\n    ) {\n        const fileURL = `c=${targetLocation !== \"\" ? targetLocation : collectionUuid}/${file.name}`.replace(\"//\", \"/\");\n        const requestConfig = {\n            headers: {\n                \"Content-Type\": \"text/octet-stream\",\n            },\n            onUploadProgress: (e: ProgressEvent) => {\n                onProgress(fileId, e.loaded, e.total, Date.now());\n            },\n        };\n        return this.keepWebdavClient.upload(fileURL, [file], requestConfig);\n    }\n\n    deleteFiles(collectionUuid: string, files: string[], showErrors?: boolean) {\n        const optimizedFiles = files\n            .sort((a, b) => a.length - b.length)\n            .reduce((acc, currentPath) => {\n                const parentPathFound = acc.find(parentPath => currentPath.indexOf(`${parentPath}/`) > -1);\n\n                if (!parentPathFound) {\n                    return [...acc, currentPath];\n                }\n\n                return acc;\n            }, []);\n\n        const fileMap = optimizedFiles.reduce((obj, filePath) => {\n            return {\n                ...obj,\n                [this.combineFilePath([filePath])]: \"\",\n            };\n        }, {});\n\n        return this.replaceFiles({ uuid: collectionUuid }, fileMap, showErrors);\n    }\n\n    copyFiles(\n        sourcePdh: string,\n        files: string[],\n        destinationCollection: CollectionPartialUpdateOrCreate,\n        destinationPath: string,\n        showErrors?: boolean\n    ) {\n        const fileMap = files.reduce((obj, sourceFile) => {\n            const fileBasename = sourceFile.split(\"/\").filter(Boolean).slice(-1).join(\"\");\n            return {\n                ...obj,\n                [this.combineFilePath([destinationPath, fileBasename])]: `${sourcePdh}${this.combineFilePath([sourceFile])}`,\n            };\n        }, {});\n\n        return this.replaceFiles(destinationCollection, fileMap, showErrors);\n    }\n\n    moveFiles(\n        sourceUuid: string,\n        sourcePdh: string,\n        files: string[],\n        destinationCollection: CollectionPartialUpdateOrCreate,\n        destinationPath: string,\n        showErrors?: boolean\n    ) {\n        if (sourceUuid === destinationCollection.uuid) {\n            let errors: CommonResourceServiceError[] = [];\n            const fileMap = files.reduce((obj, sourceFile) => {\n                const fileBasename = sourceFile.split(\"/\").filter(Boolean).slice(-1).join(\"\");\n                const fileDestinationPath = this.combineFilePath([destinationPath, fileBasename]);\n                const fileSourcePath = this.combineFilePath([sourceFile]);\n                const fileSourceUri = `${sourcePdh}${fileSourcePath}`;\n\n                if (fileDestinationPath !== fileSourcePath) {\n                    return {\n                        ...obj,\n                        [fileDestinationPath]: fileSourceUri,\n                        [fileSourcePath]: \"\",\n                    };\n                } else {\n                    errors.push(CommonResourceServiceError.SOURCE_DESTINATION_CANNOT_BE_SAME);\n                    return obj;\n                }\n            }, {});\n\n            if (errors.length === 0) {\n                return this.replaceFiles({ uuid: sourceUuid }, fileMap, showErrors);\n            } else {\n                return Promise.reject({ errors });\n            }\n        } else {\n            return this.copyFiles(sourcePdh, files, destinationCollection, destinationPath, showErrors).then(() => {\n                return this.deleteFiles(sourceUuid, files, showErrors);\n            });\n        }\n    }\n\n    createDirectory(collectionUuid: string, paths: string[], showErrors?: boolean) {\n        const fileMap = paths.reduce((fMap, path)=> {\n            fMap[this.combineFilePath([path])] = emptyCollectionPdh;\n            return fMap;\n        }, {})\n\n        return this.replaceFiles({ uuid: collectionUuid }, fileMap, showErrors, false);\n    }\n\n    /* since creating a nested dir will create all parent dirs that don't exist,\n    *  we only create the longest unique paths\n    */\n    async createMinNecessaryDirs(collectionUuid: string, existingDirPaths: Set<string>, targetPaths: string[], showErrors?) {\n        const pathsToCreate = getMinNecessaryPaths(targetPaths).filter(path => !existingDirPaths.has(path));\n        if (pathsToCreate.length > 0) {\n            try {\n                await this.createDirectory(collectionUuid, pathsToCreate, showErrors);\n            } catch (error) {\n                console.error(`Error creating directory in ${collectionUuid}`, error);\n            }\n        }\n    }\n\n    downloadZip(collectionUuid: string, paths: string[], fileName: string) {\n        // Get webdav base url & token\n        const baseUrl = this.keepWebdavClient.getBaseUrl().endsWith(\"/\")\n            ? this.keepWebdavClient.getBaseUrl().slice(0, -1)\n            : this.keepWebdavClient.getBaseUrl();\n        const apiToken = this.authService.getApiToken();\n\n        // Throw error to be exposed in toast if token missing\n        if (!apiToken) {\n            throw new Error(\"Token missing\");\n        }\n\n        // Create form\n        const form = document.createElement(\"form\");\n        form.setAttribute(\"method\", \"get\");\n        form.setAttribute(\"action\", `${baseUrl}/c=${collectionUuid}`);\n\n        // Attach token\n        const tokenInput = document.createElement(\"input\");\n        tokenInput.name = \"api_token\";\n        tokenInput.value = apiToken;\n        form.appendChild(tokenInput);\n\n        // Add accept and disposition\n        const acceptInput = document.createElement(\"input\");\n        acceptInput.name = \"accept\";\n        acceptInput.value = \"application/zip\";\n        form.appendChild(acceptInput);\n\n        const dispositionInput = document.createElement(\"input\");\n        dispositionInput.name = \"disposition\";\n        dispositionInput.value = \"attachment\";\n        form.appendChild(dispositionInput);\n\n        // Add filename parameter\n        const fileNameInput = document.createElement(\"input\");\n        fileNameInput.name = \"download_filename\";\n        fileNameInput.value = fileName;\n        form.appendChild(fileNameInput);\n\n        // Add file parameters for each path\n        paths.forEach((path) => {\n            const fileInput = document.createElement(\"input\");\n            fileInput.name = \"files\";\n            fileInput.value = path.replace(/^\\//, '');\n            form.appendChild(fileInput);\n        });\n\n        // Append form to body, submit, and cleanup form\n        document.body.appendChild(form);\n        form.submit();\n        form.remove();\n    }\n}\n\n\nexport function getMinNecessaryPaths(paths: string[]): string[] {\n    //remove duplicates\n    const uniquePaths = Array.from(new Set(paths)).filter(path => !!path && typeof path === 'string' && path.length > 0);\n\n    return uniquePaths.filter((path) =>\n        uniquePaths.every((existing) =>\n            path === existing || !existing.startsWith(path + '/')\n        )\n    );\n\n}\n\nconst getPathKey = (file: FileWithRelativePath) => {\n    return file.relativePath ? 'relativePath' : 'webkitRelativePath';\n}"
  },
  {
    "path": "services/workbench2/src/services/common-service/common-resource-service.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CommonResourceService } from \"./common-resource-service\";\nimport axios from \"axios\";\nimport MockAdapter from \"axios-mock-adapter\";\n\nconst actions = {\n    progressFn: (id, working) => {},\n    errorFn: (id, message) => {}\n};\n\nexport const mockResourceService = (\n    Service => {\n        const axiosInstance = axios.create();\n        const service = new Service(axiosInstance, actions);\n        Object.keys(service).map(key => service[key] = cy.stub());\n        return service;\n    });\n\ndescribe(\"CommonResourceService\", () => {\n    let axiosInstance;\n    let axiosMock;\n\n    beforeEach(() => {\n        axiosInstance = axios.create();\n        axiosMock = new MockAdapter(axiosInstance);\n    });\n\n    it(\"#create\", async () => {\n        axiosMock\n            .onPost(\"/resources\")\n            .reply(200, { owner_uuid: \"ownerUuidValue\" });\n\n        const commonResourceService = new CommonResourceService(axiosInstance, \"resources\", actions);\n        const resource = await commonResourceService.create({ ownerUuid: \"ownerUuidValue\" });\n        expect(resource).to.deep.equal({ ownerUuid: \"ownerUuidValue\" });\n    });\n\n    it(\"#create maps request params to snake case\", async () => {\n        cy.stub(axiosInstance, \"post\").returns(Promise.resolve({data: {}}));\n        const commonResourceService = new CommonResourceService(axiosInstance, \"resources\", actions);\n        await commonResourceService.create({ ownerUuid: \"ownerUuidValue\" });\n    });\n\n    it(\"#create ignores fields listed as readonly\", async () => {\n        cy.stub(axiosInstance, \"post\").returns(Promise.resolve({data: {}}));\n        const commonResourceService = new CommonResourceService(axiosInstance, \"resources\", actions);\n        // UUID fields are read-only on all resources.\n        await commonResourceService.create({ uuid: \"this should be ignored\", ownerUuid: \"ownerUuidValue\" });\n        expect(axiosInstance.post).to.be.calledWith(\"/resources\", {resource: {owner_uuid: \"ownerUuidValue\"}});\n    });\n\n    it(\"#update ignores fields listed as readonly\", async () => {\n        cy.stub(axiosInstance, \"put\").returns(Promise.resolve({data: {}}));\n        const commonResourceService = new CommonResourceService(axiosInstance, \"resources\", actions);\n        // UUID fields are read-only on all resources.\n        await commonResourceService.update('resource-uuid', { uuid: \"this should be ignored\", ownerUuid: \"ownerUuidValue\" });\n        expect(axiosInstance.put).to.be.calledWith(\"/resources/resource-uuid\", {resource:  {owner_uuid: \"ownerUuidValue\"}});\n    });\n\n    it(\"#delete\", async () => {\n        axiosMock\n            .onDelete(\"/resources/uuid\")\n            .reply(200, { deleted_at: \"now\" });\n\n        const commonResourceService = new CommonResourceService(axiosInstance, \"resources\", actions);\n        const resource = await commonResourceService.delete(\"uuid\");\n        expect(resource).to.deep.equal({ deletedAt: \"now\" });\n    });\n\n    it(\"#get\", async () => {\n        axiosMock\n            .onGet(\"/resources/uuid\")\n            .reply(200, {\n                modified_at: \"now\",\n                properties: {\n                    responsible_owner_uuid: \"another_owner\"\n                }\n            });\n\n        const commonResourceService = new CommonResourceService(axiosInstance, \"resources\", actions);\n        const resource = await commonResourceService.get(\"uuid\");\n        // Only first level keys are mapped to camel case\n        expect(resource).to.deep.equal({\n            modifiedAt: \"now\",\n            properties: {\n                responsible_owner_uuid: \"another_owner\"\n            }\n        });\n    });\n\n    it(\"#list\", async () => {\n        axiosMock\n            .onGet(\"/resources\")\n            .reply(200, {\n                kind: \"kind\",\n                offset: 2,\n                limit: 10,\n                items: [{\n                    modified_at: \"now\",\n                    properties: {\n                        is_active: true\n                    }\n                }],\n                items_available: 20\n            });\n\n        const commonResourceService = new CommonResourceService(axiosInstance, \"resources\", actions);\n        const resource = await commonResourceService.list({ limit: 10, offset: 1 });\n        // First level keys are mapped to camel case inside \"items\" arrays\n        expect(resource).to.deep.equal({\n            kind: \"kind\",\n            offset: 2,\n            limit: 10,\n            items: [{\n                modifiedAt: \"now\",\n                properties: {\n                    is_active: true\n                }\n            }],\n            itemsAvailable: 20\n        });\n    });\n\n    it(\"#list using POST when query string is too big\", async () => {\n        axiosMock\n            .onAny(\"/resources\")\n            .reply(200);\n        const tooBig = 'x'.repeat(1500);\n        const commonResourceService = new CommonResourceService(axiosInstance, \"resources\", actions);\n        await commonResourceService.list({ filters: tooBig });\n        expect(axiosMock.history.get.length).to.equal(0);\n        expect(axiosMock.history.post.length).to.equal(1);\n        const postParams = new URLSearchParams(axiosMock.history.post[0].data);\n        expect(postParams.get('filters')).to.equal(`[${tooBig}]`);\n        expect(postParams.get('_method')).to.equal('GET');\n    });\n\n    it(\"#list using GET when query string is not too big\", async () => {\n        axiosMock\n            .onAny(\"/resources\")\n            .reply(200);\n        const notTooBig = 'x'.repeat(1480);\n        const commonResourceService = new CommonResourceService(axiosInstance, \"resources\", actions);\n        await commonResourceService.list({ filters: notTooBig });\n        expect(axiosMock.history.post.length).to.equal(0);\n        expect(axiosMock.history.get.length).to.equal(1);\n        expect(axiosMock.history.get[0].params.filters).to.equal(`[${notTooBig}]`);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/services/common-service/common-resource-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { AxiosInstance } from \"axios\";\nimport { snakeCase } from \"lodash\";\nimport { Resource } from \"models/resource\";\nimport { ApiActions } from \"services/api/api-actions\";\nimport { CommonService } from \"services/common-service/common-service\";\n\nexport enum CommonResourceServiceError {\n    UNIQUE_NAME_VIOLATION = 'UniqueNameViolation',\n    OWNERSHIP_CYCLE = 'OwnershipCycle',\n    MODIFYING_CONTAINER_REQUEST_FINAL_STATE = 'ModifyingContainerRequestFinalState',\n    NAME_HAS_ALREADY_BEEN_TAKEN = 'NameHasAlreadyBeenTaken',\n    NOT_FOUND = 'NotFound',\n    PERMISSION_ERROR_FORBIDDEN = 'PermissionErrorForbidden',\n    SOURCE_DESTINATION_CANNOT_BE_SAME = 'SourceDestinationCannotBeSame',\n    UNKNOWN = 'Unknown',\n    NONE = 'None'\n}\n\nexport class CommonResourceService<T extends Resource> extends CommonService<T> {\n    constructor(serverApi: AxiosInstance, resourceType: string, actions: ApiActions, readOnlyFields: string[] = []) {\n        super(serverApi, resourceType, actions, readOnlyFields.concat([\n            'uuid',\n            'etag',\n            'kind',\n            'canWrite',\n            'canManage',\n            'createdAt',\n            'modifiedAt',\n            'modifiedByUserUuid',\n            'writableBy',\n        ]));\n    }\n\n    create(data?: Partial<T>, showErrors?: boolean) {\n        let payload: any;\n        if (data !== undefined) {\n            this.readOnlyFields.forEach(field => delete data[field]);\n            payload = {\n                [this.resourceType.slice(0, -1)]: CommonService.mapKeys(snakeCase)(data),\n            };\n        }\n        return super.create(payload, showErrors);\n    }\n\n    update(uuid: string, data: Partial<T>, showErrors?: boolean, select?: string[]) {\n        let payload: any;\n        if (data !== undefined) {\n            this.readOnlyFields.forEach(field => delete data[field]);\n            payload = {\n                [this.resourceType.slice(0, -1)]: CommonService.mapKeys(snakeCase)(data),\n            };\n            if (select !== undefined && select.length > 0) {\n                payload.select = ['uuid', ...select.map(field => snakeCase(field))];\n            };\n        }\n        return super.update(uuid, payload, showErrors);\n    }\n}\n\nexport const getCommonResourceServiceError = (errorResponse: any) => {\n    if (errorResponse && 'errors' in errorResponse) {\n        const error = errorResponse.errors.join('');\n        const status = errorResponse.status;\n        switch (true) {\n            case /UniqueViolation/.test(error):\n                return CommonResourceServiceError.UNIQUE_NAME_VIOLATION;\n            case /ownership cycle/.test(error):\n                return CommonResourceServiceError.OWNERSHIP_CYCLE;\n            case /Mounts cannot be modified in state 'Final'/.test(error):\n                return CommonResourceServiceError.MODIFYING_CONTAINER_REQUEST_FINAL_STATE;\n            case /Name has already been taken/.test(error):\n                return CommonResourceServiceError.NAME_HAS_ALREADY_BEEN_TAKEN;\n            case status === 404:\n                return CommonResourceServiceError.NOT_FOUND;\n            case /403 Forbidden/.test(error):\n                return CommonResourceServiceError.PERMISSION_ERROR_FORBIDDEN;\n            case new RegExp(CommonResourceServiceError.SOURCE_DESTINATION_CANNOT_BE_SAME).test(error):\n                return CommonResourceServiceError.SOURCE_DESTINATION_CANNOT_BE_SAME;\n            default:\n                return CommonResourceServiceError.UNKNOWN;\n        }\n    }\n    return CommonResourceServiceError.NONE;\n};\n"
  },
  {
    "path": "services/workbench2/src/services/common-service/common-service.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CommonService } from \"./common-service\";\n\nconst actions = {\n    progressFn: (id, working) => {},\n    errorFn: (id, message) => {}\n};\n\ndescribe(\"CommonService\", () => {\n    let commonService;\n\n    beforeEach(() => {\n        commonService = new CommonService({}, \"resource\", actions);\n    });\n\n    it(\"throws an exception when passing uuid as empty string to get()\", () => {\n        expect(() => commonService.get(\"\")).to.throw(\"UUID cannot be empty string\");\n    });\n\n    it(\"throws an exception when passing uuid as empty string to update()\", () => {\n        expect(() => commonService.update(\"\", {})).to.throw(\"UUID cannot be empty string\");\n    });\n\n    it(\"throws an exception when passing uuid as empty string to delete()\", () => {\n        expect(() => commonService.delete(\"\")).to.throw(\"UUID cannot be empty string\");\n    });\n});"
  },
  {
    "path": "services/workbench2/src/services/common-service/common-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { camelCase, isPlainObject, isArray, snakeCase } from \"lodash\";\nimport { AxiosInstance, AxiosPromise, AxiosRequestConfig } from \"axios\";\nimport uuid from \"uuid/v4\";\nimport { ApiActions } from \"services/api/api-actions\";\nimport QueryString from \"query-string\";\nimport { Session } from \"models/session\";\n\ninterface Errors {\n    status: number;\n    errors: string[];\n    errorToken: string;\n}\n\nexport interface ListArguments {\n    limit?: number;\n    offset?: number;\n    filters?: string;\n    order?: string;\n    select?: string[];\n    distinct?: boolean;\n    count?: 'exact' | 'none';\n    includeOldVersions?: boolean;\n}\n\nexport interface ListResults<T> {\n    clusterId?: string;\n    kind: string;\n    offset: number;\n    limit: number;\n    items: T[];\n    itemsAvailable?: number;\n}\n\nexport class CommonService<T> {\n    protected serverApi: AxiosInstance;\n    protected resourceType: string;\n    protected actions: ApiActions;\n    protected readOnlyFields: string[];\n\n    constructor(serverApi: AxiosInstance, resourceType: string, actions: ApiActions, readOnlyFields: string[] = []) {\n        this.serverApi = serverApi;\n        this.resourceType = resourceType;\n        this.actions = actions;\n        this.readOnlyFields = readOnlyFields;\n    }\n\n    static mapResponseKeys = (response: { data: any }) =>\n        CommonService.mapKeys(camelCase)(response.data)\n\n    static mapKeys = (mapFn: (key: string) => string) =>\n        (value: any): any => {\n            switch (true) {\n                case isPlainObject(value):\n                    return Object\n                        .keys(value)\n                        .map(key => [key, mapFn(key)])\n                        .reduce((newValue, [key, newKey]) => ({\n                            ...newValue,\n                            [newKey]: (key === 'items' || key === 'included') ? CommonService.mapKeys(mapFn)(value[key]) : value[key]\n                        }), {});\n                case isArray(value):\n                    return value.map(CommonService.mapKeys(mapFn));\n                default:\n                    return value;\n            }\n        }\n\n    protected validateUuid(uuid: string) {\n        if (uuid === \"\") {\n            throw new Error('UUID cannot be empty string');\n        }\n    }\n\n    static defaultResponse<R>(promise: AxiosPromise<R>, actions: ApiActions, mapKeys = true, showErrors = true): Promise<R> {\n        const reqId = uuid();\n        actions.progressFn(reqId, true);\n        return promise\n            .then(data => {\n                actions.progressFn(reqId, false);\n                return data;\n            })\n            .then((response: { data: any }) => {\n                return mapKeys ? CommonService.mapResponseKeys(response) : response.data;\n            })\n            .catch(({ response }) => {\n                if (response) {\n                    actions.progressFn(reqId, false);\n                    const errors = CommonService.mapResponseKeys(response) as Errors;\n                    errors.status = response.status;\n                    actions.errorFn(reqId, errors, showErrors);\n                    throw errors;\n                }\n            });\n    }\n\n    create(data?: Partial<T>, showErrors?: boolean) {\n        return CommonService.defaultResponse(\n            this.serverApi\n                .post<T>(`/${this.resourceType}`, data && CommonService.mapKeys(snakeCase)(data)),\n            this.actions,\n            true, // mapKeys\n            showErrors\n        );\n    }\n\n    delete(uuid: string, showErrors?: boolean): Promise<T> {\n        this.validateUuid(uuid);\n        return CommonService.defaultResponse(\n            this.serverApi\n                .delete(`/${this.resourceType}/${uuid}`),\n            this.actions,\n            true, // mapKeys\n            showErrors\n        );\n    }\n\n    get(uuid: string, showErrors?: boolean, select?: string[], session?: Session) {\n        this.validateUuid(uuid);\n\n        const cfg: AxiosRequestConfig = {\n            params: {\n                select: select\n                    ? `[${select.map(snakeCase).map(s => `\"${s}\"`).join(',')}]`\n                    : undefined\n            }\n        };\n        if (session) {\n            cfg.baseURL = session.baseUrl;\n            cfg.headers = { 'Authorization': 'Bearer ' + session.token };\n        }\n\n        return CommonService.defaultResponse(\n            this.serverApi\n                .get<T>(`/${this.resourceType}/${uuid}`, cfg),\n            this.actions,\n            true, // mapKeys\n            showErrors\n        );\n    }\n\n    list(args: ListArguments = {}, showErrors?: boolean): Promise<ListResults<T>> {\n        const { filters, select, ...other } = args;\n        const params = {\n            ...CommonService.mapKeys(snakeCase)(other),\n            filters: filters ? `[${filters}]` : undefined,\n            select: select\n                ? `[${select.map(snakeCase).map(s => `\"${s}\"`).join(', ')}]`\n                : undefined\n        };\n\n        if (QueryString.stringify(params).length <= 1500) {\n            return CommonService.defaultResponse(\n                this.serverApi.get(`/${this.resourceType}`, { params }),\n                this.actions,\n                true,\n                showErrors\n            );\n        } else {\n            // Using the POST special case to avoid URI length 414 errors.\n            // We must use urlencoded post body since api doesn't support form data\n            // const formData = new FormData();\n            const formData = new URLSearchParams();\n            formData.append(\"_method\", \"GET\");\n            Object.keys(params).forEach(key => {\n                if (params[key] !== undefined) {\n                    formData.append(key, params[key]);\n                }\n            });\n            return CommonService.defaultResponse(\n                this.serverApi.post(`/${this.resourceType}`, formData, {}),\n                this.actions,\n                true,\n                showErrors\n            );\n        }\n    }\n\n    update(uuid: string, data: Partial<T>, showErrors?: boolean) {\n        this.validateUuid(uuid);\n        return CommonService.defaultResponse(\n            this.serverApi\n                .put<T>(`/${this.resourceType}/${uuid}`, data && CommonService.mapKeys(snakeCase)(data)),\n            this.actions,\n            undefined, // mapKeys\n            showErrors\n        );\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/common-service/trashable-resource-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { snakeCase } from \"lodash\";\nimport { AxiosInstance } from \"axios\";\nimport { TrashableResource } from \"models/resource\";\nimport { CommonResourceService } from \"services/common-service/common-resource-service\";\nimport { ApiActions } from \"services/api/api-actions\";\n\nexport class TrashableResourceService<T extends TrashableResource> extends CommonResourceService<T> {\n    constructor(serverApi: AxiosInstance, resourceType: string, actions: ApiActions, readOnlyFields: string[] = []) {\n        super(serverApi, resourceType, actions, readOnlyFields);\n    }\n\n    trash(uuid: string): Promise<T> {\n        return CommonResourceService.defaultResponse(this.serverApi.post(this.resourceType + `/${uuid}/trash`), this.actions);\n    }\n\n    untrash(uuid: string): Promise<T> {\n        const params = {\n            ensure_unique_name: true,\n        };\n        return CommonResourceService.defaultResponse(\n            this.serverApi.post(this.resourceType + `/${uuid}/untrash`, {\n                params: CommonResourceService.mapKeys(snakeCase)(params),\n            }),\n            this.actions,\n            undefined,\n            false\n        );\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/container-request-service/container-request-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CommonResourceService } from \"services/common-service/common-resource-service\";\nimport { AxiosInstance } from \"axios\";\nimport { ContainerRequestResource, ContainerStatus } from 'models/container-request';\nimport { ApiActions } from \"services/api/api-actions\";\nimport { CommonService } from \"services/common-service/common-service\";\n\nexport class ContainerRequestService extends CommonResourceService<ContainerRequestResource> {\n    constructor(serverApi: AxiosInstance, actions: ApiActions) {\n        super(serverApi, \"container_requests\", actions);\n    }\n\n    containerStatus(uuid: string, showErrors?: boolean) {\n        return CommonService.defaultResponse(\n            this.serverApi\n                .get<ContainerStatus>(`/${this.resourceType}/${uuid}/container_status`),\n            this.actions,\n            true, // mapKeys\n            showErrors\n        );\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/container-service/container-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CommonResourceService } from \"services/common-service/common-resource-service\";\nimport { AxiosInstance } from \"axios\";\nimport { ContainerResource } from 'models/container';\nimport { ApiActions } from \"services/api/api-actions\";\n\nexport class ContainerService extends CommonResourceService<ContainerResource> {\n    constructor(serverApi: AxiosInstance, actions: ApiActions) {\n        super(serverApi, \"containers\", actions);\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/external-credentials/external-credentials-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ListArguments, CommonService } from \"services/common-service/common-service\";\nimport { AxiosInstance } from \"axios\";\nimport { ApiActions } from \"services/api/api-actions\";\nimport { ListResults } from \"services/common-service/common-service\";\nimport { ExternalCredential } from \"models/external-credential\";\nimport { CreateExternalCredentialFormDialogData, UpdateExternalCredentialFormDialogData } from \"store/external-credentials/external-credential-dialog-data\";\n\nexport class ExternalCredentialsService extends CommonService<ExternalCredential> {\n    constructor(serverApi: AxiosInstance, actions: ApiActions) {\n            super(serverApi, \"credentials\", actions);\n        }\n\n        list( args?: ListArguments, showErrors?: boolean ): Promise<ListResults<ExternalCredential>> {\n            return super.list(args, showErrors);\n        }\n\n        create(data: CreateExternalCredentialFormDialogData, showErrors?: boolean): Promise<ExternalCredential> {\n            return super.create(data, showErrors);\n        }\n\n        delete( uuid: string, showErrors?: boolean ): Promise<ExternalCredential> {\n            return super.delete(uuid, showErrors);\n        }\n\n        update(uuid: string, data: UpdateExternalCredentialFormDialogData, showErrors?: boolean): Promise<ExternalCredential> {\n            return super.update(uuid, data, showErrors);\n        }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/favorite-service/favorite-service.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { LinkService } from \"../link-service/link-service\";\nimport { GroupsService } from \"../groups-service/groups-service\";\nimport { FavoriteService } from \"./favorite-service\";\nimport { LinkClass } from \"models/link\";\nimport { FilterBuilder } from \"services/api/filter-builder\";\nimport axios from \"axios\";\nimport { isEqual } from \"lodash\";\n\ndescribe(\"FavoriteService\", () => {\n\n    let linkService;\n    let groupService;\n\n    const mockListArgs = {\n        filters: [],\n        limit: undefined,\n        offset: undefined,\n        order: undefined,\n    };\n\n    const mockContentsArgs = {\n        limit: undefined,\n        offset: undefined,\n        order: undefined,\n        filters: [],\n        recursive: true\n    };\n\n    beforeEach(() => {\n        linkService = new LinkService(axios, []);\n        groupService = new GroupsService(axios, []);\n    });\n\n    it(\"marks resource as favorite\", async () => {\n        linkService.create = cy.stub().returns(Promise.resolve({ uuid: \"newUuid\" })).as(\"create\");\n        const favoriteService = new FavoriteService(linkService, groupService);\n        const newFavorite = await favoriteService.create({ userUuid: \"userUuid\", resource: { uuid: \"resourceUuid\", name: \"resource\" } });\n\n        cy.get(\"@create\").should(\"be.calledWith\", {\n            ownerUuid: \"userUuid\",\n            tailUuid: \"userUuid\",\n            headUuid: \"resourceUuid\",\n            linkClass: LinkClass.STAR,\n            name: \"resource\"\n        });\n        expect(newFavorite.uuid).to.equal(\"newUuid\");\n\n    });\n\n    it(\"unmarks resource as favorite\", async () => {\n        const list = cy.stub().returns(Promise.resolve({ items: [{ uuid: \"linkUuid\" }] })).as(\"list\");\n        const filters = new FilterBuilder()\n            .addEqual('owner_uuid', \"userUuid\")\n            .addEqual('head_uuid', \"resourceUuid\")\n            .addEqual('link_class', LinkClass.STAR);\n        linkService.list = list;\n        linkService.delete = cy.stub().returns(Promise.resolve({ uuid: \"linkUuid\" })).as(\"delete\");\n        const favoriteService = new FavoriteService(linkService, groupService);\n\n        const newFavorite = await favoriteService.delete({ userUuid: \"userUuid\", resourceUuid: \"resourceUuid\" });\n\n        cy.get(\"@list\").should(\"be.calledWith\", { filters: filters.getFilters() });\n        cy.get(\"@delete\").should(\"be.calledWith\", \"linkUuid\");\n        expect(newFavorite[0].uuid).to.equal(\"linkUuid\");\n    });\n\n    it(\"lists favorite resources\", async () => {\n        const list = cy.stub().returns(Promise.resolve({ items: [{ headUuid: \"headUuid\" }] })).as(\"list\");\n        const listFilters = new FilterBuilder()\n            .addEqual('owner_uuid', \"userUuid\")\n            .addEqual('link_class', LinkClass.STAR);\n        const contents = cy.stub().returns(Promise.resolve({ items: [{ uuid: \"resourceUuid\" }] })).as(\"contents\");\n        const contentFilters = new FilterBuilder().addIn('uuid', [\"headUuid\"]);\n        linkService.list = list;\n        groupService.contents = contents;\n        const favoriteService = new FavoriteService(linkService, groupService);\n\n        const favorites = await favoriteService.list(\"userUuid\");\n\n        cy.get(\"@list\").should(\"be.calledWith\", { ...mockListArgs, filters: listFilters.getFilters() });\n        cy.get(\"@contents\").should(\"be.calledWith\", \"userUuid\", { ...mockContentsArgs,  filters: contentFilters.getFilters() });\n        expect(isEqual(favorites, { items: [{ uuid: \"resourceUuid\" }] })).to.equal(true);\n    });\n\n    it(\"checks if resources are present in favorites\", async () => {\n        const list = cy.stub().returns(Promise.resolve({ items: [{ headUuid: \"foo\", linkClass: LinkClass.STAR }] })).as(\"list\");\n        const listFilters = new FilterBuilder()\n            .addIn(\"head_uuid\", [\"foo\", \"oof\"])\n            .addEqual(\"owner_uuid\", \"userUuid\")\n            .addEqual(\"link_class\", LinkClass.STAR);\n        linkService.list = list;\n        const favoriteService = new FavoriteService(linkService, groupService);\n\n        const favorites = await favoriteService.checkPresenceInFavorites(\"userUuid\", [\"foo\", \"oof\"]);\n\n        cy.get(\"@list\").should(\"be.calledWith\", { filters: listFilters.getFilters() });\n        expect(isEqual(favorites, { foo: true, oof: false })).to.be.true;\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/src/services/favorite-service/favorite-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { LinkService } from \"../link-service/link-service\";\nimport { GroupsService, GroupContentsResource } from \"../groups-service/groups-service\";\nimport { LinkClass, hasCreateLinkProperties, NewFavoriteLink } from \"models/link\";\nimport { FilterBuilder, joinFilters } from \"services/api/filter-builder\";\nimport { ListResults } from 'services/common-service/common-service';\n\nexport interface FavoriteListArguments {\n    limit?: number;\n    offset?: number;\n    filters?: string;\n    linkOrder?: string;\n    contentOrder?: string;\n}\n\nexport class FavoriteService {\n    constructor(\n        private linkService: LinkService,\n        private groupsService: GroupsService,\n    ) { }\n\n    create(data: { userUuid: string; resource: { uuid: string; name: string } }) {\n        const newLink: NewFavoriteLink = {\n            ownerUuid: data.userUuid,\n            tailUuid: data.userUuid,\n            headUuid: data.resource.uuid,\n            linkClass: LinkClass.STAR,\n            name: data.resource.name\n        }\n        if (!hasCreateLinkProperties(newLink)) {\n            return Promise.reject(\"Unable to create favorite: missing link properties\");\n        }\n        return this.linkService.create(newLink);\n    }\n\n    delete(data: { userUuid: string; resourceUuid: string; }) {\n        return this.linkService\n            .list({\n                filters: new FilterBuilder()\n                    .addEqual('owner_uuid', data.userUuid)\n                    .addEqual('head_uuid', data.resourceUuid)\n                    .addEqual('link_class', LinkClass.STAR)\n                    .getFilters()\n            })\n            .then(results => Promise.all(\n                results.items.map(item => this.linkService.delete(item.uuid))));\n    }\n\n    list(userUuid: string, { filters, limit, offset, linkOrder, contentOrder }: FavoriteListArguments = {}, showOnlyOwned: boolean = true): Promise<ListResults<GroupContentsResource>> {\n        const listFilters = new FilterBuilder()\n            .addEqual('owner_uuid', userUuid)\n            .addEqual('link_class', LinkClass.STAR)\n            .getFilters();\n\n        return this.linkService\n            .list({\n                filters: joinFilters(filters || '', listFilters),\n                limit,\n                offset,\n                order: linkOrder\n            })\n            .then(results => {\n                const uuids = results.items.map(item => item.headUuid);\n                return this.groupsService.contents(showOnlyOwned ? userUuid : '', {\n                    limit,\n                    offset,\n                    order: contentOrder,\n                    filters: new FilterBuilder().addIn('uuid', uuids).getFilters(),\n                    recursive: true\n                });\n            });\n    }\n\n    async checkPresenceInFavorites(userUuid: string, resourceUuids: string[]): Promise<Record<string, boolean>> {\n        try {\n            const result = await this.linkService\n                .list({\n                    filters: new FilterBuilder()\n                        .addIn(\"head_uuid\", resourceUuids)\n                        .addEqual(\"owner_uuid\", userUuid)\n                        .addEqual(\"link_class\", LinkClass.STAR)\n                        .getFilters()\n                })\n                .then(( response ) => resourceUuids.reduce((results, uuid) => {\n                    const filteredItems = response.items.filter(item => !!item.headUuid && item.linkClass === LinkClass.STAR);\n                    const isFavorite = filteredItems.some(item => item.headUuid === uuid);\n                    return { ...results, [uuid]: isFavorite };\n                }, {}));\n            return result;\n        } catch (error) {\n                console.error(\"Error while checking presence in favorites\", error);\n                return {};\n        }\n    }\n\n}\n"
  },
  {
    "path": "services/workbench2/src/services/file-viewers-config-service/file-viewers-config-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport Axios from 'axios';\nimport { FileViewerList } from 'models/file-viewers-config';\n\nexport class FileViewersConfigService {\n    constructor(\n        private url: string\n    ) { }\n\n    get() {\n        return Axios\n            .get<FileViewerList>(this.url)\n            .then(response => response.data);\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/groups-service/groups-service.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport axios from \"axios\";\nimport MockAdapter from \"axios-mock-adapter\";\nimport { GroupsService } from \"./groups-service\";\n\ndescribe(\"GroupsService\", () => {\n\n    const axiosMock = new MockAdapter(axios);\n\n    const actions = {\n        progressFn: (id, working) => {},\n        errorFn: (id, message) => {}\n    };\n\n    beforeEach(() => {\n        axiosMock.reset();\n    });\n\n    it(\"#contents\", async () => {\n        axiosMock\n            .onGet(\"/groups/1/contents\")\n            .reply(200, {\n                kind: \"kind\",\n                offset: 2,\n                limit: 10,\n                items: [{\n                    modified_at: \"now\"\n                }],\n                items_available: 20\n            });\n\n        const groupsService = new GroupsService(axios, actions);\n        const resource = await groupsService.contents(\"1\", { limit: 10, offset: 1 });\n        expect(resource).to.deep.equal({\n            kind: \"kind\",\n            offset: 2,\n            limit: 10,\n            items: [{\n                modifiedAt: \"now\"\n            }],\n            itemsAvailable: 20,\n            clusterId: undefined\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/services/groups-service/groups-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CancelToken } from 'axios';\nimport { snakeCase, camelCase } from \"lodash\";\nimport { CommonResourceService } from 'services/common-service/common-resource-service';\nimport {\n    ListResults,\n    ListArguments,\n} from 'services/common-service/common-service';\nimport { AxiosInstance, AxiosRequestConfig } from 'axios';\nimport { CollectionResource } from 'models/collection';\nimport { ProjectResource } from 'models/project';\nimport { ProcessResource } from 'models/process';\nimport { WorkflowResource } from 'models/workflow';\nimport { ContainerResource } from 'models/container';\nimport { UserResource } from 'models/user';\nimport { TrashableResourceService } from 'services/common-service/trashable-resource-service';\nimport { ApiActions } from 'services/api/api-actions';\nimport { GroupResource } from 'models/group';\nimport { Session } from 'models/session';\n\nexport interface ContentsArguments {\n    limit?: number;\n    offset?: number;\n    order?: string;\n    filters?: string;\n    recursive?: boolean;\n    includeTrash?: boolean;\n    excludeHomeProject?: boolean;\n    select?: string[];\n    count?: 'exact' | 'none';\n    include?: string | string[];\n}\n\nexport interface SharedArguments extends ListArguments {\n    include?: string;\n}\n\nexport type GroupContentsResource =\n    | CollectionResource\n    | ProjectResource\n    | ProcessResource\n    | WorkflowResource;\n\nexport type GroupContentsIncludedResource =\n    | UserResource\n    | ProjectResource\n    | ContainerResource;\n\nexport interface GroupContentsListResults extends ListResults<GroupContentsResource> {\n    included?: GroupContentsIncludedResource[];\n}\n\nexport class GroupsService<\nT extends GroupResource = GroupResource\n> extends TrashableResourceService<T> {\n    constructor(serverApi: AxiosInstance, actions: ApiActions) {\n        super(serverApi, 'groups', actions);\n    }\n\n    async contents(uuid: string, args: ContentsArguments = {}, session?: Session, cancelToken?: CancelToken):\n    Promise<GroupContentsListResults>\n    {\n        const { filters, order, select, include, ...other } = args;\n        const params = {\n            ...other,\n            filters: filters ? `[${filters}]` : undefined,\n            order: order ? order : undefined,\n            select: select\n                  ? JSON.stringify(select.map(sel => {\n                      const sp = sel.split(\".\");\n                      return sp.length === 2 ? (sp[0] + \".\" + snakeCase(sp[1])) : snakeCase(sel);\n                  }))\n                  : undefined,\n            include: include ? JSON.stringify(include) : undefined\n        };\n        const pathUrl = (uuid !== '') ? `/${uuid}/contents` : '/contents';\n        const cfg: AxiosRequestConfig = {\n            params: CommonResourceService.mapKeys(snakeCase)(params),\n        };\n\n        if (session) {\n            cfg.baseURL = session.baseUrl;\n            cfg.headers = { Authorization: 'Bearer ' + session.token };\n        }\n\n        if (cancelToken) {\n            cfg.cancelToken = cancelToken;\n        }\n\n        const response = await CommonResourceService.defaultResponse(\n            this.serverApi.get(this.resourceType + pathUrl, cfg),\n            this.actions,\n            false\n        );\n\n        return {\n            ...TrashableResourceService.mapKeys(camelCase)(response),\n            clusterId: session && session.clusterId,\n        };\n    }\n\n    shared(\n        params: SharedArguments = {}\n    ): Promise<ListResults<GroupContentsResource>> {\n        return CommonResourceService.defaultResponse(\n            this.serverApi.get(this.resourceType + '/shared', { params }),\n            this.actions\n        );\n    }\n}\n\nexport enum GroupContentsResourcePrefix {\n    COLLECTION = 'collections',\n    PROJECT = 'groups',\n    PROCESS = 'container_requests',\n    WORKFLOW = 'workflows',\n}\n"
  },
  {
    "path": "services/workbench2/src/services/keep-service/keep-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CommonResourceService } from \"services/common-service/common-resource-service\";\nimport { AxiosInstance } from \"axios\";\nimport { KeepServiceResource } from \"models/keep-services\";\nimport { ApiActions } from \"services/api/api-actions\";\n\nexport class KeepService extends CommonResourceService<KeepServiceResource> {\n    constructor(serverApi: AxiosInstance, actions: ApiActions) {\n        super(serverApi, \"keep_services\", actions);\n    }\n}"
  },
  {
    "path": "services/workbench2/src/services/link-account-service/link-account-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { AxiosInstance } from \"axios\";\nimport { ApiActions } from \"services/api/api-actions\";\nimport { AccountToLink, LinkAccountStatus } from \"models/link-account\";\nimport { CommonService } from \"services/common-service/common-service\";\n\nexport const USER_LINK_ACCOUNT_KEY = 'accountToLink';\nexport const ACCOUNT_LINK_STATUS_KEY = 'accountLinkStatus';\n\nexport class LinkAccountService {\n\n    constructor(\n        protected serverApi: AxiosInstance,\n        protected actions: ApiActions) { }\n\n    public saveAccountToLink(account: AccountToLink) {\n        sessionStorage.setItem(USER_LINK_ACCOUNT_KEY, JSON.stringify(account));\n    }\n\n    public removeAccountToLink() {\n        sessionStorage.removeItem(USER_LINK_ACCOUNT_KEY);\n    }\n\n    public getAccountToLink() {\n        const data = sessionStorage.getItem(USER_LINK_ACCOUNT_KEY);\n        return data ? JSON.parse(data) as AccountToLink : undefined;\n    }\n\n    public saveLinkOpStatus(status: LinkAccountStatus) {\n        sessionStorage.setItem(ACCOUNT_LINK_STATUS_KEY, JSON.stringify(status));\n    }\n\n    public removeLinkOpStatus() {\n        sessionStorage.removeItem(ACCOUNT_LINK_STATUS_KEY);\n    }\n\n    public getLinkOpStatus() {\n        const data = sessionStorage.getItem(ACCOUNT_LINK_STATUS_KEY);\n        return data ? JSON.parse(data) as LinkAccountStatus : undefined;\n    }\n\n    public linkAccounts(newUserToken: string, newGroupUuid: string) {\n        const params = {\n            new_user_token: newUserToken,\n            new_owner_uuid: newGroupUuid,\n            redirect_to_new_user: true\n        };\n        return CommonService.defaultResponse(\n            this.serverApi.post('/users/merge', params),\n            this.actions,\n            false\n        );\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/link-service/link-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CommonResourceService } from \"services/common-service/common-resource-service\";\nimport { LinkResource } from \"models/link\";\nimport { AxiosInstance } from \"axios\";\nimport { ApiActions } from \"services/api/api-actions\";\n\nexport class LinkService<Resource extends LinkResource = LinkResource> extends CommonResourceService<Resource> {\n    constructor(serverApi: AxiosInstance, actions: ApiActions) {\n        super(serverApi, \"links\", actions);\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/log-service/log-service.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { LogService } from \"./log-service\";\nimport axios from \"axios\";\nimport { LogEventType } from \"models/log\";\n\ndescribe(\"LogService\", () => {\n\n    let apiWebdavClient;\n    const axiosInstance = axios.create();\n    const actions = {\n        progressFn: (id, working) => {},\n        errorFn: (id, message) => {}\n    };\n\n    beforeEach(() => {\n        apiWebdavClient = {\n            delete: cy.stub(),\n            upload: cy.stub(),\n            mkdir: cy.stub(),\n            get: () => {},\n            propfind: () => {},\n        };\n    });\n\n    it(\"lists log files using propfind on live logs api endpoint\", async () => {\n        const logService = new LogService(axiosInstance, apiWebdavClient, actions);\n\n        // given\n        const containerRequest = {uuid: 'zzzzz-xvhdp-000000000000000', containerUuid: 'zzzzz-dz642-000000000000000'};\n        const xmlData = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n            <D:multistatus xmlns:D=\"DAV:\">\n                    <D:response>\n                            <D:href>/arvados/v1/container_requests/${containerRequest.uuid}/log/${containerRequest.containerUuid}/</D:href>\n                            <D:propstat>\n                                    <D:prop>\n                                            <D:resourcetype>\n                                                    <D:collection xmlns:D=\"DAV:\" />\n                                            </D:resourcetype>\n                                            <D:getlastmodified>Tue, 15 Aug 2023 12:54:37 GMT</D:getlastmodified>\n                                            <D:displayname></D:displayname>\n                                            <D:supportedlock>\n                                                    <D:lockentry xmlns:D=\"DAV:\">\n                                                            <D:lockscope>\n                                                                    <D:exclusive />\n                                                            </D:lockscope>\n                                                            <D:locktype>\n                                                                    <D:write />\n                                                            </D:locktype>\n                                                    </D:lockentry>\n                                            </D:supportedlock>\n                                    </D:prop>\n                                    <D:status>HTTP/1.1 200 OK</D:status>\n                            </D:propstat>\n                    </D:response>\n                    <D:response>\n                            <D:href>/arvados/v1/container_requests/${containerRequest.uuid}/log/${containerRequest.containerUuid}/stdout.txt</D:href>\n                            <D:propstat>\n                                    <D:prop>\n                                            <D:displayname>stdout.txt</D:displayname>\n                                            <D:getcontentlength>15</D:getcontentlength>\n                                            <D:getcontenttype>text/plain; charset=utf-8</D:getcontenttype>\n                                            <D:getetag>\"177b8fb161ff9f58f\"</D:getetag>\n                                            <D:supportedlock>\n                                                    <D:lockentry xmlns:D=\"DAV:\">\n                                                            <D:lockscope>\n                                                                    <D:exclusive />\n                                                            </D:lockscope>\n                                                            <D:locktype>\n                                                                    <D:write />\n                                                            </D:locktype>\n                                                    </D:lockentry>\n                                            </D:supportedlock>\n                                            <D:resourcetype></D:resourcetype>\n                                            <D:getlastmodified>Tue, 15 Aug 2023 12:54:37 GMT</D:getlastmodified>\n                                    </D:prop>\n                                    <D:status>HTTP/1.1 200 OK</D:status>\n                            </D:propstat>\n                    </D:response>\n                    <D:response>\n                            <D:href>/arvados/v1/container_requests/${containerRequest.uuid}/wrongpath.txt</D:href>\n                            <D:propstat>\n                                    <D:prop>\n                                            <D:displayname>wrongpath.txt</D:displayname>\n                                            <D:getcontentlength>15</D:getcontentlength>\n                                            <D:getcontenttype>text/plain; charset=utf-8</D:getcontenttype>\n                                            <D:getetag>\"177b8fb161ff9f58f\"</D:getetag>\n                                            <D:supportedlock>\n                                                    <D:lockentry xmlns:D=\"DAV:\">\n                                                            <D:lockscope>\n                                                                    <D:exclusive />\n                                                            </D:lockscope>\n                                                            <D:locktype>\n                                                                    <D:write />\n                                                            </D:locktype>\n                                                    </D:lockentry>\n                                            </D:supportedlock>\n                                            <D:resourcetype></D:resourcetype>\n                                            <D:getlastmodified>Tue, 15 Aug 2023 12:54:37 GMT</D:getlastmodified>\n                                    </D:prop>\n                                    <D:status>HTTP/1.1 200 OK</D:status>\n                            </D:propstat>\n                    </D:response>\n            </D:multistatus>`;\n        const xmlDoc = (new DOMParser()).parseFromString(xmlData, \"text/xml\");\n        apiWebdavClient.propfind = cy.stub().returns(Promise.resolve({responseXML: xmlDoc}));\n\n        // when\n        const logs = await logService.listLogFiles(containerRequest);\n\n        // then\n        expect(apiWebdavClient.propfind).to.be.calledWith(`container_requests/${containerRequest.uuid}/log/${containerRequest.containerUuid}`);\n        expect(logs.length).to.equal(1);\n        expect(logs[0].name).to.equal('stdout.txt');\n        expect(logs[0].type).to.equal('file');\n    });\n\n    it(\"requests log file contents with correct range request\", async () => {\n        const logService = new LogService(axiosInstance, apiWebdavClient, actions);\n\n        // given\n        const containerRequest = {uuid: 'zzzzz-xvhdp-000000000000000', containerUuid: 'zzzzz-dz642-000000000000000'};\n        const fileRecord = {name: `stdout.txt`};\n        const fileContents = `Line 1\\nLine 2\\nLine 3`;\n        cy.stub(apiWebdavClient, 'get', (path, options) => {\n                const matches = /bytes=([0-9]+)-([0-9]+)/.exec(options.headers?.Range || '');\n                if (matches?.length === 3) {\n                    return Promise.resolve({responseText: fileContents.substring(Number(matches[1]), Number(matches[2]) + 1)})\n                }\n                return Promise.reject();\n            })\n\n        // when\n        let result = await logService.getLogFileContents(containerRequest, fileRecord, 0, 3);\n        // then\n        expect(apiWebdavClient.get).to.be.calledWith(\n            `container_requests/${containerRequest.uuid}/log/${containerRequest.containerUuid}/${fileRecord.name}`,\n            {headers: {Range: `bytes=0-3`}}\n        );\n        expect(result.logType).to.equal(LogEventType.STDOUT);\n        expect(result.contents).to.deep.equal(['Line']);\n\n        // when\n        result = await logService.getLogFileContents(containerRequest, fileRecord, 0, 10);\n        // then\n        expect(apiWebdavClient.get).to.be.calledWith(\n            `container_requests/${containerRequest.uuid}/log/${containerRequest.containerUuid}/${fileRecord.name}`,\n            {headers: {Range: `bytes=0-10`}}\n        );\n        expect(result.logType).to.equal(LogEventType.STDOUT);\n        expect(result.contents).to.deep.equal(['Line 1', 'Line']);\n\n        // when\n        result = await logService.getLogFileContents(containerRequest, fileRecord, 6, 14);\n        // then\n        expect(apiWebdavClient.get).to.be.calledWith(\n            `container_requests/${containerRequest.uuid}/log/${containerRequest.containerUuid}/${fileRecord.name}`,\n            {headers: {Range: `bytes=6-14`}}\n        );\n        expect(result.logType).to.equal(LogEventType.STDOUT);\n        expect(result.contents).to.deep.equal(['', 'Line 2', 'L']);\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/src/services/log-service/log-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { AxiosInstance } from \"axios\";\nimport { LogEventType, LogResource } from 'models/log';\nimport { CommonResourceService } from \"services/common-service/common-resource-service\";\nimport { ApiActions } from \"services/api/api-actions\";\nimport { WebDAV } from \"common/webdav\";\nimport { extractFilesData } from \"services/collection-service/collection-service-files-response\";\nimport { CollectionFile } from \"models/collection-file\";\nimport { ContainerRequestResource } from \"models/container-request\";\n\nexport type LogFragment = {\n    logType: LogEventType;\n    contents: string[];\n}\n\nexport class LogService extends CommonResourceService<LogResource> {\n    constructor(serverApi: AxiosInstance, private apiWebdavClient: WebDAV, actions: ApiActions) {\n        super(serverApi, \"logs\", actions);\n    }\n\n    async listLogFiles(containerRequest: Pick<ContainerRequestResource, 'uuid' | 'containerUuid'>) {\n        const request = await this.apiWebdavClient.propfind(`container_requests/${containerRequest.uuid}/log/${containerRequest.containerUuid}`);\n        if (request?.responseXML != null) {\n            return extractFilesData(request.responseXML)\n                .filter((file) => (\n                    file.path === `/arvados/v1/container_requests/${containerRequest.uuid}/log/${containerRequest.containerUuid}`\n                ));\n        }\n        return Promise.reject();\n    }\n\n    /**\n     * Fetches the specified log file contents from the given container request's container live logs endpoint\n     * @param containerRequest Container request to fetch logs for\n     * @param fileRecord Log file to fetch\n     * @param startByte First byte index of the log file to fetch\n     * @param endByte Last byte index to include in the response\n     * @returns A promise that resolves to the LogEventType and a string array of the log file contents\n     */\n    async getLogFileContents(containerRequest: Pick<ContainerRequestResource, 'uuid' | 'containerUuid'>, fileRecord: Pick<CollectionFile, 'name'>, startByte: number, endByte: number): Promise<LogFragment> {\n        const request = await this.apiWebdavClient.get(\n            `container_requests/${containerRequest.uuid}/log/${containerRequest.containerUuid}/${fileRecord.name}`,\n            {headers: {Range: `bytes=${startByte}-${endByte}`}}\n        );\n        const logFileType = logFileToLogType(fileRecord);\n\n        if (request?.responseText && logFileType) {\n            return {\n                logType: logFileType,\n                contents: request.responseText.split(/\\r?\\n/),\n            };\n        } else {\n            return Promise.reject();\n        }\n    }\n}\n\nexport const logFileToLogType = (file: Pick<CollectionFile, 'name'>) => (file.name.replace(/\\.(txt|json)$/, '') as LogEventType);\n"
  },
  {
    "path": "services/workbench2/src/services/permission-service/permission-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { LinkService } from \"services/link-service/link-service\";\nimport { PermissionResource } from \"models/permission\";\nimport { CommonResourceService } from 'services/common-service/common-resource-service';\nimport { LinkClass } from '../../models/link';\nimport { ListArguments, ListResults } from 'services/common-service/common-service';\n\nexport class PermissionService extends LinkService<PermissionResource> {\n\n    permissionListService = new CommonResourceService(this.serverApi, 'permissions', this.actions);\n    create(data?: Partial<PermissionResource>) {\n        return super.create({ ...data, linkClass: LinkClass.PERMISSION });\n    }\n\n    listResourcePermissions(uuid: string, args: ListArguments = {}): Promise<ListResults<PermissionResource>> {\n        const service = new CommonResourceService<PermissionResource>(this.serverApi, `permissions/${uuid}`, this.actions);\n        return service.list(args);\n    }\n\n}\n"
  },
  {
    "path": "services/workbench2/src/services/project-service/project-service.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport axios from \"axios\";\nimport { ProjectService } from \"./project-service\";\nimport { FilterBuilder } from \"services/api/filter-builder\";\n\ndescribe(\"CommonResourceService\", () => {\n    const axiosInstance = axios.create();\n    const actions = {\n        progressFn: (id, working) => {},\n        errorFn: (id, message) => {}\n    };\n\n    it(`#create has groupClass set to \"project\"`, async () => {\n        axiosInstance.post = cy.stub().returns(Promise.resolve({ data: {} })).as(\"post\");\n        const projectService = new ProjectService(axiosInstance, actions);\n\n        await projectService.create({ name: \"nameValue\" });\n\n        cy.get(\"@post\").should(\"be.calledWith\", \"/groups\", {\n            group: {\n                name: \"nameValue\",\n                group_class: \"project\"\n            }\n        });\n    });\n\n    it(\"#list has groupClass filter set by default\", async () => {\n        axiosInstance.get = cy.stub().returns(Promise.resolve({ data: {} })).as(\"get\");\n        const projectService = new ProjectService(axiosInstance, actions);\n\n        await projectService.list();\n\n        cy.get(\"@get\").should(\"be.calledWith\", \"/groups\", {\n            params: {\n                filters: \"[\" + new FilterBuilder()\n                    .addEqual(\"group_class\", \"project\")\n                    .getFilters() + \"]\",\n                order: undefined\n            }\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/services/project-service/project-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { GroupsService } from \"../groups-service/groups-service\";\nimport { ProjectResource } from \"models/project\";\nimport { GroupClass } from \"models/group\";\nimport { ListArguments } from \"services/common-service/common-service\";\nimport { FilterBuilder, joinFilters } from \"services/api/filter-builder\";\nexport class ProjectService extends GroupsService<ProjectResource> {\n\n    create(data: Partial<ProjectResource>, showErrors?: boolean) {\n        const projectData = { ...data, groupClass: GroupClass.PROJECT };\n        return super.create(projectData, showErrors);\n    }\n\n    list(args: ListArguments = {}) {\n        return super.list({\n            ...args,\n            filters: joinFilters(\n                args.filters || '',\n                new FilterBuilder()\n                    .addIn('group_class', [GroupClass.PROJECT, GroupClass.FILTER])\n                    .getFilters()\n            )\n        });\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/repositories-service/repositories-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { AxiosInstance } from \"axios\";\nimport { CommonResourceService } from \"services/common-service/common-resource-service\";\nimport { RepositoryResource } from 'models/repositories';\nimport { ApiActions } from 'services/api/api-actions';\n\n export class RepositoriesService extends CommonResourceService<RepositoryResource> {\n    constructor(serverApi: AxiosInstance, actions: ApiActions) {\n        super(serverApi, \"repositories\", actions);\n    }\n\n     getAllPermissions() {\n        return CommonResourceService.defaultResponse(\n            this.serverApi\n                .get('repositories/get_all_permissions'),\n            this.actions\n        );\n    }\n} "
  },
  {
    "path": "services/workbench2/src/services/search-service/search-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { SearchBarAdvancedFormData } from 'models/search-bar';\n\nexport class SearchService {\n    private recentQueries = this.getRecentQueries();\n    private savedQueries: SearchBarAdvancedFormData[] = this.getSavedQueries();\n\n    saveRecentQuery(query: string) {\n        if (this.recentQueries.length >= MAX_NUMBER_OF_RECENT_QUERIES) {\n            this.recentQueries.shift();\n        }\n        this.recentQueries.push(query);\n        localStorage.setItem('recentQueries', JSON.stringify(this.recentQueries));\n    }\n\n    getRecentQueries(): string[] {\n        return JSON.parse(localStorage.getItem('recentQueries') || '[]');\n    }\n\n    saveQuery(data: SearchBarAdvancedFormData) {\n        this.savedQueries.push({...data});\n        localStorage.setItem('savedQueries', JSON.stringify(this.savedQueries));\n    }\n\n    editSavedQueries(data: SearchBarAdvancedFormData) {\n        const itemIndex = this.savedQueries.findIndex(item => item.queryName === data.queryName);\n        this.savedQueries[itemIndex] = {...data};\n        localStorage.setItem('savedQueries', JSON.stringify(this.savedQueries));\n    }\n\n    getSavedQueries() {\n        return JSON.parse(localStorage.getItem('savedQueries') || '[]') as SearchBarAdvancedFormData[];\n    }\n\n    deleteSavedQuery(id: number) {\n        this.savedQueries.splice(id, 1);\n        localStorage.setItem('savedQueries', JSON.stringify(this.savedQueries));\n    }\n}\n\nconst MAX_NUMBER_OF_RECENT_QUERIES = 5;\n"
  },
  {
    "path": "services/workbench2/src/services/services.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport Axios from \"axios\";\nimport { AxiosInstance } from \"axios\";\nimport { ApiClientAuthorizationService } from 'services/api-client-authorization-service/api-client-authorization-service';\nimport { AuthService } from \"./auth-service/auth-service\";\nimport { GroupsService } from \"./groups-service/groups-service\";\nimport { ProjectService } from \"./project-service/project-service\";\nimport { LinkService } from \"./link-service/link-service\";\nimport { FavoriteService } from \"./favorite-service/favorite-service\";\nimport { CollectionService } from \"./collection-service/collection-service\";\nimport { TagService } from \"./tag-service/tag-service\";\nimport { KeepService } from \"./keep-service/keep-service\";\nimport { WebDAV } from \"common/webdav\";\nimport { Config } from \"common/config\";\nimport { UserService } from './user-service/user-service';\nimport { AncestorService } from \"services/ancestors-service/ancestors-service\";\nimport { ResourceKind } from \"models/resource\";\nimport { ContainerRequestService } from './container-request-service/container-request-service';\nimport { ContainerService } from './container-service/container-service';\nimport { LogService } from './log-service/log-service';\nimport { ApiActions } from \"services/api/api-actions\";\nimport { WorkflowService } from \"services/workflow-service/workflow-service\";\nimport { SearchService } from 'services/search-service/search-service';\nimport { PermissionService } from \"services/permission-service/permission-service\";\nimport { VirtualMachinesService } from \"services/virtual-machines-service/virtual-machines-service\";\nimport { RepositoriesService } from 'services/repositories-service/repositories-service';\nimport { AuthorizedKeysService } from 'services/authorized-keys-service/authorized-keys-service';\nimport { VocabularyService } from 'services/vocabulary-service/vocabulary-service';\nimport { FileViewersConfigService } from 'services/file-viewers-config-service/file-viewers-config-service';\nimport { LinkAccountService } from \"./link-account-service/link-account-service\";\nimport { ExternalCredentialsService } from \"./external-credentials/external-credentials-service\";\nimport parse from \"parse-duration\";\n\nexport type ServiceRepository = ReturnType<typeof createServices>;\n\nexport function setAuthorizationHeader(services: ServiceRepository, token: string) {\n    services.apiClient.defaults.headers.common = {\n        Authorization: `Bearer ${token}`\n    };\n    services.keepWebdavClient.setAuthorization(`Bearer ${token}`);\n    services.apiWebdavClient.setAuthorization(`Bearer ${token}`);\n}\n\nexport function removeAuthorizationHeader(services: ServiceRepository) {\n    services.apiClient.defaults.headers.common = {};\n\n    services.keepWebdavClient.setAuthorization(undefined);\n    services.apiWebdavClient.setAuthorization(undefined);\n}\n\nexport const createServices = (config: Config, actions: ApiActions, useApiClient?: AxiosInstance) => {\n    // Need to give empty 'headers' object or it will create an\n    // instance with a reference to the global default headers object,\n    // which is very bad because that means setAuthorizationHeader\n    // would update the global default instead of the instance default.\n    const apiClient = useApiClient || Axios.create({ headers: {} });\n    apiClient.defaults.baseURL = config.baseUrl;\n\n    const keepWebdavClient = new WebDAV({\n        baseURL: config.keepWebServiceUrl\n    });\n\n    const apiWebdavClient = new WebDAV({\n        baseURL: config.baseUrl\n    });\n\n    const apiClientAuthorizationService = new ApiClientAuthorizationService(apiClient, actions);\n    const authorizedKeysService = new AuthorizedKeysService(apiClient, actions);\n    const containerRequestService = new ContainerRequestService(apiClient, actions);\n    const containerService = new ContainerService(apiClient, actions);\n    const groupsService = new GroupsService(apiClient, actions);\n    const keepService = new KeepService(apiClient, actions);\n    const linkService = new LinkService(apiClient, actions);\n    const logService = new LogService(apiClient, apiWebdavClient, actions);\n    const permissionService = new PermissionService(apiClient, actions);\n    const projectService = new ProjectService(apiClient, actions);\n    const repositoriesService = new RepositoriesService(apiClient, actions);\n    const userService = new UserService(apiClient, actions);\n    const virtualMachineService = new VirtualMachinesService(apiClient, actions);\n    const workflowService = new WorkflowService(apiClient, actions);\n    const linkAccountService = new LinkAccountService(apiClient, actions);\n\n    const idleTimeout = (config && config.clusterConfig && config.clusterConfig.Workbench.IdleTimeout) || '0s';\n    const authService = new AuthService(apiClient, config.rootUrl, actions,\n        (parse(idleTimeout, 's') || 0) > 0);\n\n    const collectionService = new CollectionService(apiClient, keepWebdavClient, authService, actions);\n    const ancestorsService = new AncestorService(groupsService, userService, collectionService);\n    const favoriteService = new FavoriteService(linkService, groupsService);\n    const tagService = new TagService(linkService);\n    const searchService = new SearchService();\n    const vocabularyService = new VocabularyService(config.vocabularyUrl);\n    const fileViewersConfig = new FileViewersConfigService(config.fileViewersConfigUrl);\n    const externalCredentialsService = new ExternalCredentialsService(apiClient, actions);\n\n    return {\n        ancestorsService,\n        apiClient,\n        apiClientAuthorizationService,\n        authService,\n        authorizedKeysService,\n        collectionService,\n        containerRequestService,\n        containerService,\n        favoriteService,\n        fileViewersConfig,\n        groupsService,\n        keepService,\n        linkService,\n        logService,\n        permissionService,\n        projectService,\n        repositoriesService,\n        searchService,\n        tagService,\n        userService,\n        virtualMachineService,\n        keepWebdavClient,\n        apiWebdavClient,\n        workflowService,\n        vocabularyService,\n        linkAccountService,\n        externalCredentialsService,\n    };\n};\n\nexport const getResourceService = (kind?: ResourceKind) => (serviceRepository: ServiceRepository) => {\n    switch (kind) {\n        case ResourceKind.USER:\n            return serviceRepository.userService;\n        case ResourceKind.GROUP:\n            return serviceRepository.groupsService;\n        case ResourceKind.COLLECTION:\n            return serviceRepository.collectionService;\n        case ResourceKind.LINK:\n            return serviceRepository.linkService;\n        default:\n            return undefined;\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/services/tag-service/tag-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { LinkService } from \"../link-service/link-service\";\nimport { LinkClass } from \"models/link\";\nimport { FilterBuilder } from \"services/api/filter-builder\";\nimport { TagTailType, TagResource } from \"models/tag\";\nimport { OrderBuilder } from \"services/api/order-builder\";\n\nexport class TagService {\n\n    constructor(private linkService: LinkService) { }\n\n    create(uuid: string, data: { key: string; value: string } ) {\n        return this.linkService\n            .create({\n                headUuid: uuid,\n                tailUuid: TagTailType.COLLECTION,\n                linkClass: LinkClass.TAG,\n                name: '',\n                properties: data\n            })\n            .then(tag => tag as TagResource );\n    }\n\n    list(uuid: string) {\n        const filters = new FilterBuilder()\n            .addEqual(\"head_uuid\", uuid)\n            .addEqual(\"tail_uuid\", TagTailType.COLLECTION)\n            .addEqual(\"link_class\", LinkClass.TAG)\n            .getFilters();\n\n        const order = new OrderBuilder<TagResource>()\n            .addAsc('createdAt')\n            .getOrder();\n\n        return this.linkService\n            .list({ filters, order })\n            .then(results => {\n                return results.items.map((tag => tag as TagResource ));\n            });\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/user-service/user-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { AxiosInstance } from \"axios\";\nimport { CommonResourceService } from \"services/common-service/common-resource-service\";\nimport { UserResource } from \"models/user\";\nimport { ApiActions } from \"services/api/api-actions\";\nimport { ListResults } from \"services/common-service/common-service\";\n\nexport class UserService extends CommonResourceService<UserResource> {\n    constructor(serverApi: AxiosInstance, actions: ApiActions, readOnlyFields: string[] = []) {\n        super(serverApi, \"users\", actions, readOnlyFields.concat([\n            'fullName',\n            'isInvited'\n        ]));\n    }\n\n    activate(uuid: string) {\n        return CommonResourceService.defaultResponse<UserResource>(\n            this.serverApi\n                .post(this.resourceType + `/${uuid}/activate`),\n            this.actions\n        );\n    }\n\n    setup(uuid: string) {\n        return CommonResourceService.defaultResponse<ListResults<any>>(\n            this.serverApi\n                .post(this.resourceType + `/setup`, {}, { params: { uuid } }),\n            this.actions\n        );\n    }\n\n    unsetup(uuid: string) {\n        return CommonResourceService.defaultResponse<UserResource>(\n            this.serverApi\n                .post(this.resourceType + `/${uuid}/unsetup`),\n            this.actions\n        );\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/virtual-machines-service/virtual-machines-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { AxiosInstance } from \"axios\";\nimport { CommonResourceService } from \"services/common-service/common-resource-service\";\nimport { VirtualMachineLogins, VirtualMachinesResource } from 'models/virtual-machines';\nimport { ApiActions } from 'services/api/api-actions';\n\nexport class VirtualMachinesService extends CommonResourceService<VirtualMachinesResource> {\n    constructor(serverApi: AxiosInstance, actions: ApiActions) {\n        super(serverApi, \"virtual_machines\", actions);\n    }\n\n    getRequestedDate(): string {\n        return localStorage.getItem('requestedDate') || '';\n    }\n\n    saveRequestedDate(date: string) {\n        localStorage.setItem('requestedDate', date);\n    }\n\n    logins(uuid: string): Promise<VirtualMachineLogins> {\n        return CommonResourceService.defaultResponse(\n            this.serverApi\n                .get(`virtual_machines/${uuid}/logins`),\n            this.actions\n        );\n    }\n\n    getAllLogins(): Promise<VirtualMachineLogins> {\n        return CommonResourceService.defaultResponse(\n            this.serverApi\n                .get('virtual_machines/get_all_logins'),\n            this.actions\n        );\n    }\n}"
  },
  {
    "path": "services/workbench2/src/services/vocabulary-service/vocabulary-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport Axios from 'axios';\nimport { Vocabulary } from 'models/vocabulary';\n\nexport class VocabularyService {\n    constructor(\n        private url: string\n    ) { }\n\n    async getVocabulary() {\n        const response = await Axios\n            .get<Vocabulary>(this.url);\n        return response.data;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/services/workflow-service/workflow-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { AxiosInstance } from \"axios\";\nimport { CommonResourceService } from \"services/common-service/common-resource-service\";\nimport { WorkflowResource } from 'models/workflow';\nimport { ApiActions } from 'services/api/api-actions';\nimport { LinkService } from 'services/link-service/link-service';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { LinkClass } from 'models/link';\nimport { OrderBuilder } from 'services/api/order-builder';\n\nexport class WorkflowService extends CommonResourceService<WorkflowResource> {\n\n    private linksService = new LinkService(this.serverApi, this.actions);\n\n    constructor(serverApi: AxiosInstance, actions: ApiActions) {\n        super(serverApi, \"workflows\", actions);\n    }\n\n    async presets(workflowUuid: string) {\n\n        const { items: presetLinks } = await this.linksService.list({\n            filters: new FilterBuilder()\n                .addEqual('tail_uuid', workflowUuid)\n                .addEqual('link_class', LinkClass.PRESET)\n                .getFilters()\n        });\n\n        const presetUuids = presetLinks.map(link => link.headUuid);\n\n        return this.list({\n            filters: new FilterBuilder()\n                .addIn('uuid', presetUuids)\n                .getFilters(),\n            order: new OrderBuilder<WorkflowResource>()\n                .addAsc('name')\n                .getOrder(),\n        });\n\n    }\n\n}\n"
  },
  {
    "path": "services/workbench2/src/store/advanced-tab/advanced-tab.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { RootState } from 'store/store';\nimport { ResourceKind, extractUuidKind } from 'models/resource';\nimport { getResource } from 'store/resources/resources';\nimport { GroupContentsResourcePrefix } from 'services/groups-service/groups-service';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { ContainerRequestResource } from 'models/container-request';\nimport { CollectionResource } from 'models/collection';\nimport { ProjectResource } from 'models/project';\nimport { ServiceRepository } from 'services/services';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { ListResults } from 'services/common-service/common-service';\nimport { RepositoryResource } from 'models/repositories';\nimport { SshKeyResource } from 'models/ssh-key';\nimport { VirtualMachinesResource } from 'models/virtual-machines';\nimport { UserResource } from 'models/user';\nimport { LinkResource } from 'models/link';\nimport { WorkflowResource } from 'models/workflow';\nimport { KeepServiceResource } from 'models/keep-services';\nimport { ApiClientAuthorization } from 'models/api-client-authorization';\nimport React from 'react';\nimport { ExternalCredential } from 'models/external-credential';\n\nexport const ADVANCED_TAB_DIALOG = 'advancedTabDialog';\n\nexport interface AdvancedTabDialogData {\n    uuid: string;\n    apiResponse: JSX.Element;\n    metadata: ListResults<LinkResource> | string;\n    user: UserResource | string;\n    pythonHeader: string;\n    pythonExample: string;\n    cliGetHeader: string;\n    cliGetExample: string;\n    cliUpdateHeader: string;\n    cliUpdateExample: string;\n    curlHeader: string;\n    curlExample: string;\n}\n\nenum CollectionData {\n    COLLECTION = 'collection',\n    STORAGE_CLASSES_CONFIRMED = 'storage_classes_confirmed'\n}\n\nenum ProcessData {\n    CONTAINER_REQUEST = 'container_request',\n    OUTPUT_NAME = 'output_name'\n}\n\nenum ProjectData {\n    GROUP = 'group',\n    DELETE_AT = 'delete_at'\n}\n\nenum RepositoryData {\n    REPOSITORY = 'repository',\n    CREATED_AT = 'created_at'\n}\n\nenum SshKeyData {\n    SSH_KEY = 'authorized_key',\n    CREATED_AT = 'created_at'\n}\n\nenum VirtualMachineData {\n    VIRTUAL_MACHINE = 'virtual_machine',\n    CREATED_AT = 'created_at'\n}\n\nenum ResourcePrefix {\n    REPOSITORIES = 'repositories',\n    AUTORIZED_KEYS = 'authorized_keys',\n    VIRTUAL_MACHINES = 'virtual_machines',\n    KEEP_SERVICES = 'keep_services',\n    USERS = 'users',\n    API_CLIENT_AUTHORIZATIONS = 'api_client_authorizations',\n    LINKS = 'links',\n    EXTERNAL_CREDENTIALS = 'external_credentials',\n}\n\nenum KeepServiceData {\n    KEEP_SERVICE = 'keep_services',\n    CREATED_AT = 'created_at'\n}\n\nenum UserData {\n    USER = 'user',\n    USERNAME = 'username'\n}\n\nenum ApiClientAuthorizationsData {\n    API_CLIENT_AUTHORIZATION = 'api_client_authorization',\n    EXPIRES_AT = 'expires_at'\n}\n\nenum LinkData {\n    LINK = 'link',\n    PROPERTIES = 'properties'\n}\n\nenum WorkflowData {\n    WORKFLOW = 'workflow',\n    CREATED_AT = 'created_at'\n}\n\nenum ExternalCredentialData {\n    EXTERNAL_CREDENTIAL = 'external_credential',\n    CREATED_AT = 'created_at'\n}\n\ntype AdvanceResourceKind = CollectionData | ProcessData | ProjectData | RepositoryData | SshKeyData | VirtualMachineData | KeepServiceData | ApiClientAuthorizationsData | UserData | LinkData | WorkflowData | ExternalCredentialData;\ntype AdvanceResourcePrefix = GroupContentsResourcePrefix | ResourcePrefix;\ntype AdvanceResponseData = ContainerRequestResource | ProjectResource | CollectionResource | RepositoryResource | SshKeyResource | VirtualMachinesResource | KeepServiceResource | ApiClientAuthorization | UserResource | LinkResource | WorkflowResource | ExternalCredential | undefined;\n\nexport const openAdvancedTabDialog = (uuid: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const kind = extractUuidKind(uuid);\n        switch (kind) {\n            case ResourceKind.COLLECTION:\n                const { data: dataCollection, metadata: metaCollection, user: userCollection } = await dispatch<any>(getDataForAdvancedTab(uuid));\n                const advanceDataCollection = advancedTabData({\n                    uuid,\n                    metadata: metaCollection,\n                    user: userCollection,\n                    apiResponseKind: collectionApiResponse,\n                    data: dataCollection,\n                    resourceKind: CollectionData.COLLECTION,\n                    resourcePrefix: GroupContentsResourcePrefix.COLLECTION,\n                    resourceKindProperty: CollectionData.STORAGE_CLASSES_CONFIRMED,\n                    property: dataCollection.storageClassesConfirmed\n                });\n                dispatch<any>(initAdvancedTabDialog(advanceDataCollection));\n                break;\n            case ResourceKind.PROCESS:\n                const { data: dataProcess, metadata: metaProcess, user: userProcess } = await dispatch<any>(getDataForAdvancedTab(uuid));\n                const advancedDataProcess = advancedTabData({\n                    uuid,\n                    metadata: metaProcess,\n                    user: userProcess,\n                    apiResponseKind: containerRequestApiResponse,\n                    data: dataProcess,\n                    resourceKind: ProcessData.CONTAINER_REQUEST,\n                    resourcePrefix: GroupContentsResourcePrefix.PROCESS,\n                    resourceKindProperty: ProcessData.OUTPUT_NAME,\n                    property: dataProcess.outputName\n                });\n                dispatch<any>(initAdvancedTabDialog(advancedDataProcess));\n                break;\n            case ResourceKind.PROJECT:\n                const { data: dataProject, metadata: metaProject, user: userProject } = await dispatch<any>(getDataForAdvancedTab(uuid));\n                const advanceDataProject = advancedTabData({\n                    uuid,\n                    metadata: metaProject,\n                    user: userProject,\n                    apiResponseKind: groupRequestApiResponse,\n                    data: dataProject,\n                    resourceKind: ProjectData.GROUP,\n                    resourcePrefix: GroupContentsResourcePrefix.PROJECT,\n                    resourceKindProperty: ProjectData.DELETE_AT,\n                    property: dataProject.deleteAt\n                });\n                dispatch<any>(initAdvancedTabDialog(advanceDataProject));\n                break;\n            case ResourceKind.REPOSITORY:\n                const dataRepository = getState().repositories.items.find(it => it.uuid === uuid);\n                const advanceDataRepository = advancedTabData({\n                    uuid,\n                    metadata: '',\n                    user: '',\n                    apiResponseKind: repositoryApiResponse,\n                    data: dataRepository,\n                    resourceKind: RepositoryData.REPOSITORY,\n                    resourcePrefix: ResourcePrefix.REPOSITORIES,\n                    resourceKindProperty: RepositoryData.CREATED_AT,\n                    property: dataRepository!.createdAt\n                });\n                dispatch<any>(initAdvancedTabDialog(advanceDataRepository));\n                break;\n            case ResourceKind.SSH_KEY:\n                const dataSshKey = getState().auth.sshKeys.find(it => it.uuid === uuid);\n                const advanceDataSshKey = advancedTabData({\n                    uuid,\n                    metadata: '',\n                    user: '',\n                    apiResponseKind: sshKeyApiResponse,\n                    data: dataSshKey,\n                    resourceKind: SshKeyData.SSH_KEY,\n                    resourcePrefix: ResourcePrefix.AUTORIZED_KEYS,\n                    resourceKindProperty: SshKeyData.CREATED_AT,\n                    property: dataSshKey!.createdAt\n                });\n                dispatch<any>(initAdvancedTabDialog(advanceDataSshKey));\n                break;\n            case ResourceKind.VIRTUAL_MACHINE:\n                const dataVirtualMachine = getState().virtualMachines.virtualMachines.items.find(it => it.uuid === uuid);\n                const advanceDataVirtualMachine = advancedTabData({\n                    uuid,\n                    metadata: '',\n                    user: '',\n                    apiResponseKind: virtualMachineApiResponse,\n                    data: dataVirtualMachine,\n                    resourceKind: VirtualMachineData.VIRTUAL_MACHINE,\n                    resourcePrefix: ResourcePrefix.VIRTUAL_MACHINES,\n                    resourceKindProperty: VirtualMachineData.CREATED_AT,\n                    property: dataVirtualMachine.createdAt\n                });\n                dispatch<any>(initAdvancedTabDialog(advanceDataVirtualMachine));\n                break;\n            case ResourceKind.KEEP_SERVICE:\n                const dataKeepService = getState().keepServices.find(it => it.uuid === uuid);\n                const advanceDataKeepService = advancedTabData({\n                    uuid,\n                    metadata: '',\n                    user: '',\n                    apiResponseKind: keepServiceApiResponse,\n                    data: dataKeepService,\n                    resourceKind: KeepServiceData.KEEP_SERVICE,\n                    resourcePrefix: ResourcePrefix.KEEP_SERVICES,\n                    resourceKindProperty: KeepServiceData.CREATED_AT,\n                    property: dataKeepService!.createdAt\n                });\n                dispatch<any>(initAdvancedTabDialog(advanceDataKeepService));\n                break;\n            case ResourceKind.USER:\n                const { resources } = getState();\n                const data = getResource<UserResource>(uuid)(resources);\n                const metadata = await services.linkService.list({\n                    filters: new FilterBuilder()\n                        .addEqual('head_uuid', uuid)\n                        .getFilters()\n                });\n                const advanceDataUser = advancedTabData({\n                    uuid,\n                    metadata,\n                    user: '',\n                    apiResponseKind: userApiResponse,\n                    data,\n                    resourceKind: UserData.USER,\n                    resourcePrefix: ResourcePrefix.USERS,\n                    resourceKindProperty: UserData.USERNAME,\n                    property: data!.username\n                });\n                dispatch<any>(initAdvancedTabDialog(advanceDataUser));\n                break;\n            case ResourceKind.API_CLIENT_AUTHORIZATION:\n                const apiClientAuthorizationResources = getState().resources;\n                const dataApiClientAuthorization = getResource<ApiClientAuthorization>(uuid)(apiClientAuthorizationResources);\n                const advanceDataApiClientAuthorization = advancedTabData({\n                    uuid,\n                    metadata: '',\n                    user: '',\n                    apiResponseKind: apiClientAuthorizationApiResponse,\n                    data: dataApiClientAuthorization,\n                    resourceKind: ApiClientAuthorizationsData.API_CLIENT_AUTHORIZATION,\n                    resourcePrefix: ResourcePrefix.API_CLIENT_AUTHORIZATIONS,\n                    resourceKindProperty: ApiClientAuthorizationsData.EXPIRES_AT,\n                    property: dataApiClientAuthorization!.createdAt\n                });\n                dispatch<any>(initAdvancedTabDialog(advanceDataApiClientAuthorization));\n                break;\n            case ResourceKind.LINK:\n                const linkResources = getState().resources;\n                const dataLink = getResource<LinkResource>(uuid)(linkResources);\n                const advanceDataLink = advancedTabData({\n                    uuid,\n                    metadata: '',\n                    user: '',\n                    apiResponseKind: linkApiResponse,\n                    data: dataLink,\n                    resourceKind: LinkData.LINK,\n                    resourcePrefix: ResourcePrefix.LINKS,\n                    resourceKindProperty: LinkData.PROPERTIES,\n                    property: dataLink!.properties\n                });\n                dispatch<any>(initAdvancedTabDialog(advanceDataLink));\n                break;\n            case ResourceKind.WORKFLOW:\n                const wfResources = getState().resources;\n                const dataWf = getResource<WorkflowResource>(uuid)(wfResources);\n                const advanceDataWf = advancedTabData({\n                    uuid,\n                    metadata: '',\n                    user: '',\n                    apiResponseKind: wfApiResponse,\n                    data: dataWf,\n                    resourceKind: WorkflowData.WORKFLOW,\n                    resourcePrefix: GroupContentsResourcePrefix.WORKFLOW,\n                    resourceKindProperty: WorkflowData.CREATED_AT,\n                    property: dataWf!.createdAt\n                });\n                dispatch<any>(initAdvancedTabDialog(advanceDataWf));\n                break;\n            case ResourceKind.EXTERNAL_CREDENTIAL:\n                const { resources: ecResources } = getState();\n                const dataExtCred = getResource<ExternalCredential>(uuid)(ecResources);\n                const advanceDataExtCred = advancedTabData({\n                    uuid,\n                    metadata: '',\n                    user: '',\n                    apiResponseKind: extCredApiResponse,\n                    data: dataExtCred,\n                    resourceKind: ExternalCredentialData.EXTERNAL_CREDENTIAL,\n                    resourcePrefix: ResourcePrefix.EXTERNAL_CREDENTIALS,\n                    resourceKindProperty: ExternalCredentialData.CREATED_AT,\n                    property: dataExtCred!.createdAt\n                });\n                dispatch<any>(initAdvancedTabDialog(advanceDataExtCred));\n                break;\n\n            default:\n                dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Could not open advanced tab for this resource.\", hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        }\n    };\n\nconst getDataForAdvancedTab = (uuid: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const { resources } = getState();\n        const data = getResource<any>(uuid)(resources);\n        const metadata = await services.linkService.list({\n            filters: new FilterBuilder()\n                .addEqual('head_uuid', uuid)\n                .getFilters()\n        });\n\n        return { data, metadata };\n    };\n\nconst initAdvancedTabDialog = (data: AdvancedTabDialogData) => dialogActions.OPEN_DIALOG({ id: ADVANCED_TAB_DIALOG, data });\n\ninterface AdvancedTabData {\n    uuid: string;\n    metadata: ListResults<LinkResource> | string;\n    user: UserResource | string;\n    apiResponseKind: (apiResponse) => JSX.Element;\n    data: AdvanceResponseData;\n    resourceKind: AdvanceResourceKind;\n    resourcePrefix: AdvanceResourcePrefix;\n    resourceKindProperty: AdvanceResourceKind;\n    property: any;\n}\n\nconst advancedTabData = ({ uuid, user, metadata, apiResponseKind, data, resourceKind, resourcePrefix, resourceKindProperty, property }: AdvancedTabData) => {\n    return {\n        uuid,\n        user,\n        metadata,\n        apiResponse: apiResponseKind(data),\n        pythonHeader: pythonHeader(resourceKind),\n        pythonExample: pythonExample(uuid, resourcePrefix),\n        cliGetHeader: cliGetHeader(resourceKind),\n        cliGetExample: cliGetExample(uuid, resourceKind),\n        cliUpdateHeader: cliUpdateHeader(resourceKind, resourceKindProperty),\n        cliUpdateExample: cliUpdateExample(uuid, resourceKind, property, resourceKindProperty),\n        curlHeader: curlHeader(resourceKind, resourceKindProperty),\n        curlExample: curlExample(uuid, resourcePrefix, property, resourceKind, resourceKindProperty),\n    };\n};\n\nconst pythonHeader = (resourceKind: string) =>\n    `An example python command to get a ${resourceKind} using its uuid:`;\n\nconst pythonExample = (uuid: string, resourcePrefix: string) => {\n    const pythonExample = `import arvados\n\nx = arvados.api().${resourcePrefix}().get(uuid='${uuid}').execute()`;\n\n    return pythonExample;\n};\n\nconst cliGetHeader = (resourceKind: string) =>\n    `An example arv command to get a ${resourceKind} using its uuid:`;\n\nconst cliGetExample = (uuid: string, resourceKind: string) => {\n    const cliGetExample = `arv ${resourceKind} get \\\\\n  --uuid ${uuid}`;\n\n    return cliGetExample;\n};\n\nconst cliUpdateHeader = (resourceKind: string, resourceName: string) =>\n    `An example arv command to update the \"${resourceName}\" attribute for the current ${resourceKind}:`;\n\nconst cliUpdateExample = (uuid: string, resourceKind: string, resource: string | string[], resourceName: string) => {\n    const CLIUpdateCollectionExample = `arv ${resourceKind} update \\\\\n  --uuid ${uuid} \\\\\n  --${resourceKind} '{\"${resourceName}\":${JSON.stringify(resource)}}'`;\n\n    return CLIUpdateCollectionExample;\n};\n\nconst curlHeader = (resourceKind: string, resource: string) =>\n    `An example curl command to update the \"${resource}\" attribute for the current ${resourceKind}:`;\n\nconst curlExample = (uuid: string, resourcePrefix: string, resource: string | string[], resourceKind: string, resourceName: string) => {\n    const curlExample = `curl -X PUT \\\\\n  -H \"Authorization: Bearer $ARVADOS_API_TOKEN\" \\\\\n  --data-urlencode ${resourceKind}@/dev/stdin \\\\\n  https://$ARVADOS_API_HOST/arvados/v1/${resourcePrefix}/${uuid} \\\\\n  <<EOF\n{\n  \"${resourceName}\": ${JSON.stringify(resource, null, 4)}\n}\nEOF`;\n\n    return curlExample;\n};\n\nconst stringify = (item: string | null | number | boolean) =>\n    JSON.stringify(item) || 'null';\n\nconst stringifyObject = (item: any) =>\n    JSON.stringify(item, null, 2) || 'null';\n\nconst containerRequestApiResponse = (apiResponse: ContainerRequestResource): JSX.Element => {\n    const { uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, name, description, properties, state, requestingContainerUuid, containerUuid,\n        containerCountMax, mounts, runtimeConstraints, containerImage, environment, cwd, command, outputPath, priority, expiresAt, filters, containerCount,\n        useExisting, schedulingParameters, outputUuid, logUuid, outputName, outputTtl, outputGlob } = apiResponse;\n    const response = `\n\"uuid\": \"${uuid}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"created_at\": \"${createdAt}\",\n\"modified_at\": ${stringify(modifiedAt)},\n\"modified_by_user_uuid\": ${stringify(modifiedByUserUuid)},\n\"name\": ${stringify(name)},\n\"description\": ${stringify(description)},\n\"properties\": ${stringifyObject(properties)},\n\"state\": ${stringify(state)},\n\"requesting_container_uuid\": ${stringify(requestingContainerUuid)},\n\"container_uuid\": ${stringify(containerUuid)},\n\"container_count_max\": ${stringify(containerCountMax)},\n\"mounts\": ${stringifyObject(mounts)},\n\"runtime_constraints\": ${stringifyObject(runtimeConstraints)},\n\"container_image\": ${stringify(containerImage)},\n\"environment\": ${stringifyObject(environment)},\n\"cwd\": ${stringify(cwd)},\n\"command\": ${stringifyObject(command)},\n\"output_path\": ${stringify(outputPath)},\n\"priority\": ${stringify(priority)},\n\"expires_at\": ${stringify(expiresAt)},\n\"filters\": ${stringify(filters)},\n\"container_count\": ${stringify(containerCount)},\n\"use_existing\": ${stringify(useExisting)},\n\"scheduling_parameters\": ${stringifyObject(schedulingParameters)},\n\"output_uuid\": ${stringify(outputUuid)},\n\"log_uuid\": ${stringify(logUuid)},\n\"output_name\": ${stringify(outputName)},\n\"output_ttl\": ${stringify(outputTtl)},\n\"output_glob\": ${stringifyObject(outputGlob)}`;\n\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n\nconst collectionApiResponse = (apiResponse: CollectionResource): JSX.Element => {\n    const { uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, name, description, properties, portableDataHash, replicationDesired,\n        replicationConfirmedAt, replicationConfirmed, deleteAt, trashAt, isTrashed, storageClassesDesired,\n        storageClassesConfirmed, storageClassesConfirmedAt, currentVersionUuid, version, preserveVersion, fileCount, fileSizeTotal } = apiResponse;\n    const response = `\n\"uuid\": \"${uuid}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"created_at\": \"${createdAt}\",\n\"modified_by_user_uuid\": ${stringify(modifiedByUserUuid)},\n\"modified_at\": ${stringify(modifiedAt)},\n\"portable_data_hash\": ${stringify(portableDataHash)},\n\"replication_desired\": ${stringify(replicationDesired)},\n\"replication_confirmed_at\": ${stringify(replicationConfirmedAt)},\n\"replication_confirmed\": ${stringify(replicationConfirmed)},\n\"name\": ${stringify(name)},\n\"description\": ${stringify(description)},\n\"properties\": ${stringifyObject(properties)},\n\"delete_at\": ${stringify(deleteAt)},\n\"trash_at\": ${stringify(trashAt)},\n\"is_trashed\": ${stringify(isTrashed)},\n\"storage_classes_desired\": ${JSON.stringify(storageClassesDesired, null, 2)},\n\"storage_classes_confirmed\": ${JSON.stringify(storageClassesConfirmed, null, 2)},\n\"storage_classes_confirmed_at\": ${stringify(storageClassesConfirmedAt)},\n\"current_version_uuid\": ${stringify(currentVersionUuid)},\n\"version\": ${version},\n\"preserve_version\": ${preserveVersion},\n\"file_count\": ${fileCount},\n\"file_size_total\": ${fileSizeTotal}`;\n\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n\nconst groupRequestApiResponse = (apiResponse: ProjectResource): JSX.Element => {\n    const { uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, name,\n        description, groupClass, trashAt, isTrashed, deleteAt, properties,\n        canWrite, canManage } = apiResponse;\n    const response = `\n\"uuid\": \"${uuid}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"created_at\": \"${createdAt}\",\n\"modified_by_user_uuid\": ${stringify(modifiedByUserUuid)},\n\"modified_at\": ${stringify(modifiedAt)},\n\"name\": ${stringify(name)},\n\"description\": ${stringify(description)},\n\"group_class\": ${stringify(groupClass)},\n\"trash_at\": ${stringify(trashAt)},\n\"is_trashed\": ${stringify(isTrashed)},\n\"delete_at\": ${stringify(deleteAt)},\n\"properties\": ${stringifyObject(properties)},\n\"can_write\": ${stringify(canWrite)},\n\"can_manage\": ${stringify(canManage)}`;\n\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n\nconst repositoryApiResponse = (apiResponse: RepositoryResource): JSX.Element => {\n    const { uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, name, cloneUrls } = apiResponse;\n    const response = `\n\"uuid\": \"${uuid}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"modified_by_user_uuid\": ${stringify(modifiedByUserUuid)},\n\"modified_at\": ${stringify(modifiedAt)},\n\"name\": ${stringify(name)},\n\"created_at\": \"${createdAt}\",\n\"clone_urls\": ${stringifyObject(cloneUrls)}`;\n\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n\nconst sshKeyApiResponse = (apiResponse: SshKeyResource): JSX.Element => {\n    const { uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, name, authorizedUserUuid, expiresAt } = apiResponse;\n    const response = `\n\"uuid\": \"${uuid}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"authorized_user_uuid\": \"${authorizedUserUuid}\",\n\"modified_by_user_uuid\": ${stringify(modifiedByUserUuid)},\n\"modified_at\": ${stringify(modifiedAt)},\n\"name\": ${stringify(name)},\n\"created_at\": \"${createdAt}\",\n\"expires_at\": \"${expiresAt}\"`;\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n\nconst virtualMachineApiResponse = (apiResponse: VirtualMachinesResource): JSX.Element => {\n    const { uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, hostname } = apiResponse;\n    const response = `\n\"hostname\": ${stringify(hostname)},\n\"uuid\": \"${uuid}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"modified_by_user_uuid\": ${stringify(modifiedByUserUuid)},\n\"modified_at\": ${stringify(modifiedAt)},\n\"modified_at\": ${stringify(modifiedAt)},\n\"created_at\": \"${createdAt}\"`;\n\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n\nconst keepServiceApiResponse = (apiResponse: KeepServiceResource): JSX.Element => {\n    const {\n        uuid, readOnly, serviceHost, servicePort, serviceSslFlag, serviceType,\n        ownerUuid, createdAt, modifiedAt, modifiedByUserUuid\n    } = apiResponse;\n    const response = `\n\"uuid\": \"${uuid}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"modified_by_user_uuid\": ${stringify(modifiedByUserUuid)},\n\"modified_at\": ${stringify(modifiedAt)},\n\"service_host\": \"${serviceHost}\",\n\"service_port\": \"${servicePort}\",\n\"service_ssl_flag\": \"${stringify(serviceSslFlag)}\",\n\"service_type\": \"${serviceType}\",\n\"created_at\": \"${createdAt}\",\n\"read_only\": \"${stringify(readOnly)}\"`;\n\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n\nconst userApiResponse = (apiResponse: UserResource): JSX.Element => {\n    const {\n        uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid,\n        email, firstName, lastName, username, isActive, isAdmin, prefs,\n    } = apiResponse;\n    const response = `\n\"uuid\": \"${uuid}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"created_at\": \"${createdAt}\",\n\"modified_by_user_uuid\": ${stringify(modifiedByUserUuid)},\n\"modified_at\": ${stringify(modifiedAt)},\n\"email\": \"${email}\",\n\"first_name\": \"${firstName}\",\n\"last_name\": \"${stringify(lastName)}\",\n\"username\": \"${username}\",\n\"is_active\": \"${isActive},\n\"is_admin\": \"${isAdmin},\n\"prefs\": \"${stringifyObject(prefs)},\n\"username\": \"${username}\"`;\n\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n\nconst apiClientAuthorizationApiResponse = (apiResponse: ApiClientAuthorization): JSX.Element => {\n    const {\n        uuid, ownerUuid, apiToken, createdByIpAddress, lastUsedByIpAddress,\n        lastUsedAt, expiresAt, scopes, updatedAt, createdAt\n    } = apiResponse;\n    const response = `\n\"uuid\": \"${uuid}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"api_token\": \"${stringify(apiToken)}\",\n\"created_by_ip_address\": \"${stringify(createdByIpAddress)}\",\n\"last_used_by_ip_address\": \"${stringify(lastUsedByIpAddress)}\",\n\"last_used_at\": \"${stringify(lastUsedAt)}\",\n\"expires_at\": \"${stringify(expiresAt)}\",\n\"created_at\": \"${stringify(createdAt)}\",\n\"updated_at\": \"${stringify(updatedAt)}\",\n\"scopes\": \"${JSON.stringify(scopes, null, 2)}\"`;\n\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n\nconst linkApiResponse = (apiResponse: LinkResource): JSX.Element => {\n    const {\n        uuid, name, headUuid, properties, headKind, tailUuid, tailKind, linkClass,\n        ownerUuid, createdAt, modifiedAt, modifiedByUserUuid\n    } = apiResponse;\n    const response = `\n\"uuid\": \"${uuid}\",\n\"name\": \"${name}\",\n\"head_uuid\": \"${headUuid}\",\n\"head_kind\": \"${headKind}\",\n\"tail_uuid\": \"${tailUuid}\",\n\"tail_kind\": \"${tailKind}\",\n\"link_class\": \"${linkClass}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"created_at\": \"${stringify(createdAt)}\",\n\"modified_at\": ${stringify(modifiedAt)},\n\"modified_by_user_uuid\": ${stringify(modifiedByUserUuid)},\n\"properties\": \"${JSON.stringify(properties, null, 2)}\"`;\n\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n\n\nconst wfApiResponse = (apiResponse: WorkflowResource): JSX.Element => {\n    const {\n        uuid, name,\n        ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, description\n    } = apiResponse;\n    const response = `\n\"uuid\": \"${uuid}\",\n\"name\": \"${name}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"created_at\": \"${stringify(createdAt)}\",\n\"modified_at\": ${stringify(modifiedAt)},\n\"modified_by_user_uuid\": ${stringify(modifiedByUserUuid)}\n\"description\": ${stringify(description)}`;\n\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n\nconst extCredApiResponse = (apiResponse: ExternalCredential): JSX.Element => {\n    const {\n        uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, name, description = null, scopes, expiresAt\n    } = apiResponse;\n    const response = `\n\"uuid\": \"${uuid}\",\n\"owner_uuid\": \"${ownerUuid}\",\n\"created_at\": \"${stringify(createdAt)}\",\n\"modified_by_user_uuid\": ${stringify(modifiedByUserUuid)},\n\"modified_at\": ${stringify(modifiedAt)},\n\"name\": ${stringify(name)},\n\"description\": ${stringify(description)},\n\"scopes\": ${JSON.stringify(scopes, null, 2)},\n\"expires_at\": \"${stringify(expiresAt)}\"`;\n\n    return <span style={{ marginLeft: '-15px' }}>{'{'} {response} {'\\n'} <span style={{ marginLeft: '-15px' }}>{'}'}</span></span>;\n};\n"
  },
  {
    "path": "services/workbench2/src/store/all-processes-panel/all-processes-panel-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { bindDataExplorerActions } from \"../data-explorer/data-explorer-action\";\n\nexport const ALL_PROCESSES_PANEL_ID = \"allProcessesPanel\";\nexport const allProcessesPanelActions = bindDataExplorerActions(ALL_PROCESSES_PANEL_ID);\n\nexport const loadAllProcessesPanel = () => (dispatch: Dispatch) => {\n    dispatch(allProcessesPanelActions.RESET_EXPLORER_SEARCH_VALUE());\n    dispatch(allProcessesPanelActions.REQUEST_ITEMS());\n}\n"
  },
  {
    "path": "services/workbench2/src/store/all-processes-panel/all-processes-panel-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { getDataExplorerColumnFilters } from \"store/data-explorer/data-explorer-middleware-service\";\nimport { RootState } from \"../store\";\nimport { ServiceRepository } from \"services/services\";\nimport { joinFilters } from \"services/api/filter-builder\";\nimport { allProcessesPanelActions } from \"./all-processes-panel-action\";\nimport { Dispatch, MiddlewareAPI } from \"redux\";\nimport { DataExplorer } from \"store/data-explorer/data-explorer-reducer\";\nimport { DataColumns } from \"components/data-table/data-column\";\nimport {\n    serializeOnlyProcessTypeFilters\n} from \"../resource-type-filters/resource-type-filters\";\nimport { AllProcessesPanelColumnNames } from \"views/all-processes-panel/all-processes-panel-columns\";\nimport { ProcessesMiddlewareService } from \"store/processes/processes-middleware-service\";\nimport { ContainerRequestResource } from 'models/container-request';\n\nexport class AllProcessesPanelMiddlewareService extends ProcessesMiddlewareService {\n    constructor(services: ServiceRepository, id: string) {\n        super(services, allProcessesPanelActions, id);\n    }\n\n    getFilters(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): string | null {\n        const sup = super.getFilters(api, dataExplorer);\n        if (sup === null) { return null; }\n        const columns = dataExplorer.columns as DataColumns<string, ContainerRequestResource>;\n\n        const typeFilters = serializeOnlyProcessTypeFilters(true)(getDataExplorerColumnFilters(columns, AllProcessesPanelColumnNames.TYPE));\n        return joinFilters(sup, typeFilters);\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/store/api-client-authorizations/api-client-authorizations-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from 'store/store';\nimport { setBreadcrumbs } from 'store/breadcrumbs/breadcrumbs-actions';\nimport { ServiceRepository } from \"services/services\";\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { snackbarActions } from 'store/snackbar/snackbar-actions';\nimport { navigateToRootProject } from 'store/navigation/navigation-action';\nimport { ApiClientAuthorization } from 'models/api-client-authorization';\nimport { bindDataExplorerActions } from 'store/data-explorer/data-explorer-action';\nimport { getResource } from 'store/resources/resources';\n\n\nexport const API_CLIENT_AUTHORIZATION_PANEL_ID = 'apiClientAuthorizationPanelId';\nexport const apiClientAuthorizationsActions = bindDataExplorerActions(API_CLIENT_AUTHORIZATION_PANEL_ID);\n\nexport const API_CLIENT_AUTHORIZATION_REMOVE_DIALOG = 'apiClientAuthorizationRemoveDialog';\nexport const API_CLIENT_AUTHORIZATION_ATTRIBUTES_DIALOG = 'apiClientAuthorizationAttributesDialog';\nexport const API_CLIENT_AUTHORIZATION_HELP_DIALOG = 'apiClientAuthorizationHelpDialog';\n\n\nexport const loadApiClientAuthorizationsPanel = () =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const user = getState().auth.user;\n        if (user && user.isAdmin) {\n            try {\n                dispatch(setBreadcrumbs([{ label: 'Api client authorizations' }]));\n                dispatch(apiClientAuthorizationsActions.REQUEST_ITEMS());\n            } catch (e) {\n                return;\n            }\n        } else {\n            dispatch(navigateToRootProject);\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"You don't have permissions to view this page\", hideDuration: 2000 }));\n        }\n    };\n\nexport const openApiClientAuthorizationAttributesDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const { resources } = getState();\n        const apiClientAuthorization = getResource<ApiClientAuthorization>(uuid)(resources);\n        dispatch(dialogActions.OPEN_DIALOG({ id: API_CLIENT_AUTHORIZATION_ATTRIBUTES_DIALOG, data: { apiClientAuthorization } }));\n    };\n\nexport const openApiClientAuthorizationRemoveDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: API_CLIENT_AUTHORIZATION_REMOVE_DIALOG,\n            data: {\n                title: 'Remove api client authorization',\n                text: 'Are you sure you want to remove this api client authorization?',\n                confirmButtonLabel: 'Remove',\n                uuid\n            }\n        }));\n    };\n\nexport const removeApiClientAuthorization = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removing ...' }));\n        try {\n            await services.apiClientAuthorizationService.delete(uuid);\n            dispatch(apiClientAuthorizationsActions.REQUEST_ITEMS());\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Api client authorization has been successfully removed.', hideDuration: 2000 }));\n        } catch (e) {\n            return;\n        }\n    };\n\nexport const openApiClientAuthorizationsHelpDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const apiHost = getState().properties.apiHost;\n        const user = getState().auth.user;\n        const email = user ? user.email : '';\n        const apiToken = getState().auth.apiToken;\n        dispatch(dialogActions.OPEN_DIALOG({ id: API_CLIENT_AUTHORIZATION_HELP_DIALOG, data: { apiHost, apiToken, email } }));\n    };"
  },
  {
    "path": "services/workbench2/src/store/api-client-authorizations/api-client-authorizations-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ServiceRepository } from 'services/services';\nimport { MiddlewareAPI, Dispatch } from 'redux';\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, getOrder, listResultsToDataExplorerItemsMeta } from 'store/data-explorer/data-explorer-middleware-service';\nimport { RootState } from 'store/store';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { apiClientAuthorizationsActions } from 'store/api-client-authorizations/api-client-authorizations-actions';\nimport { ListArguments, ListResults } from 'services/common-service/common-service';\nimport { ApiClientAuthorization } from 'models/api-client-authorization';\nimport { couldNotFetchItemsAvailable } from 'store/data-explorer/data-explorer-action';\n\nexport class ApiClientAuthorizationMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        try {\n            const response = await this.services.apiClientAuthorizationService.list(getParams(dataExplorer));\n            api.dispatch(updateResources(response.items));\n            api.dispatch(setItems(response));\n        } catch {\n            api.dispatch(couldNotFetchLinks());\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        if (criteriaChanged) {\n            // Get itemsAvailable\n            return this.services.apiClientAuthorizationService.list(getCountParams())\n                .then((results: ListResults<ApiClientAuthorization>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(apiClientAuthorizationsActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nconst getParams = (dataExplorer: DataExplorer): ListArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    order: getOrder<ApiClientAuthorization>(dataExplorer),\n    count: 'none',\n});\n\nconst getCountParams = (): ListArguments => ({\n    limit: 0,\n    count: 'exact',\n});\n\nexport const setItems = (listResults: ListResults<ApiClientAuthorization>) =>\n    apiClientAuthorizationsActions.SET_ITEMS({\n        ...listResultsToDataExplorerItemsMeta(listResults),\n        items: listResults.items.map(resource => resource.uuid),\n    });\n\nconst couldNotFetchLinks = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch api client authorizations.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/app-info/app-info-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from 'common/unionize';\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { getBuildInfo } from 'common/app-info';\n\nexport const appInfoActions = unionize({\n    SET_BUILD_INFO: ofType<string>()\n});\n\nexport type AppInfoAction = UnionOf<typeof appInfoActions>;\n\nexport const setBuildInfo = () => \n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) =>\n        dispatch(appInfoActions.SET_BUILD_INFO(getBuildInfo()));\n\n\n\n"
  },
  {
    "path": "services/workbench2/src/store/app-info/app-info-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { appInfoActions, AppInfoAction } from \"store/app-info/app-info-actions\";\n\nexport interface AppInfoState {\n    buildInfo: string;\n}\n\nconst initialState = {\n    buildInfo: ''\n};\n\nexport const appInfoReducer = (state: AppInfoState = initialState, action: AppInfoAction) =>\n    appInfoActions.match(action, {\n        SET_BUILD_INFO: buildInfo => ({ ...state, buildInfo }),\n        default: () => state\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/auth/auth-action-session.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { setBreadcrumbs } from \"store/breadcrumbs/breadcrumbs-actions\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository, createServices, setAuthorizationHeader } from \"services/services\";\nimport Axios, { AxiosInstance } from \"axios\";\nimport { User, getUserDisplayName } from \"models/user\";\nimport { authActions } from \"store/auth/auth-action\";\nimport {\n    Config, ClusterConfigJSON, CLUSTER_CONFIG_PATH, DISCOVERY_DOC_PATH,\n    buildConfig, mockClusterConfigJSON\n} from \"common/config\";\nimport { normalizeURLPath } from \"common/url\";\nimport { Session, SessionStatus } from \"models/session\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { AuthService } from \"services/auth-service/auth-service\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport jsSHA from \"jssha\";\nimport { initClusterBadges } from \"store/auth/cluster-badges\";\n\nconst getClusterConfig = async (origin: string, apiClient: AxiosInstance): Promise<Config | null> => {\n    let configFromDD: Config | undefined;\n    try {\n        const dd = (await apiClient.get<any>(`${origin}/${DISCOVERY_DOC_PATH}`)).data;\n        configFromDD = {\n            baseUrl: normalizeURLPath(dd.baseUrl),\n            keepWebServiceUrl: dd.keepWebServiceUrl,\n            keepWebInlineServiceUrl: dd.keepWebInlineServiceUrl,\n            remoteHosts: dd.remoteHosts,\n            rootUrl: dd.rootUrl,\n            uuidPrefix: dd.uuidPrefix,\n            websocketUrl: dd.websocketUrl,\n            workbenchUrl: dd.workbenchUrl,\n            workbench2Url: dd.workbench2Url,\n            loginCluster: \"\",\n            vocabularyUrl: \"\",\n            fileViewersConfigUrl: \"\",\n            clusterConfig: mockClusterConfigJSON({}),\n            apiRevision: parseInt(dd.revision, 10),\n        };\n    } catch { }\n\n    // Try public config endpoint\n    try {\n        const config = (await apiClient.get<ClusterConfigJSON>(`${origin}/${CLUSTER_CONFIG_PATH}`)).data;\n        return { ...buildConfig(config), apiRevision: configFromDD ? configFromDD.apiRevision : 0 };\n    } catch { }\n\n    // Fall back to discovery document\n    if (configFromDD !== undefined) {\n        return configFromDD;\n    }\n\n    return null;\n};\n\nexport const getRemoteHostConfig = async (remoteHost: string, useApiClient?: AxiosInstance): Promise<Config | null> => {\n    const apiClient = useApiClient || Axios.create({ headers: {} });\n\n    let url = remoteHost;\n    if (url.indexOf('://') < 0) {\n        url = 'https://' + url;\n    }\n    const origin = new URL(url).origin;\n\n    // Maybe it is an API server URL, try fetching config and discovery doc\n    let r = await getClusterConfig(origin, apiClient);\n    if (r !== null) {\n        return r;\n    }\n\n    // Maybe it is a Workbench2 URL, try getting config.json\n    try {\n        r = await getClusterConfig((await apiClient.get<any>(`${origin}/config.json`)).data.API_HOST, apiClient);\n        if (r !== null) {\n            return r;\n        }\n    } catch { }\n\n    // Maybe it is a Workbench1 URL, try getting status.json\n    try {\n        r = await getClusterConfig((await apiClient.get<any>(`${origin}/status.json`)).data.apiBaseURL, apiClient);\n        if (r !== null) {\n            return r;\n        }\n    } catch { }\n\n    return null;\n};\n\nconst invalidV2Token = \"Must be a v2 token\";\n\nexport const getSaltedToken = (clusterId: string, token: string) => {\n    const shaObj = new jsSHA(\"SHA-1\", \"TEXT\");\n    const [ver, uuid, secret] = token.split(\"/\");\n    if (ver !== \"v2\") {\n        throw new Error(invalidV2Token);\n    }\n    let salted = secret;\n    if (uuid.substring(0, 5) !== clusterId) {\n        shaObj.setHMACKey(secret, \"TEXT\");\n        shaObj.update(clusterId);\n        salted = shaObj.getHMAC(\"HEX\");\n    }\n    return `v2/${uuid}/${salted}`;\n};\n\nexport const getActiveSession = (sessions: Session[]): Session | undefined => sessions.find(s => s.active);\n\nexport const validateCluster = async (config: Config, useToken: string):\n    Promise<{ user: User; token: string }> => {\n\n    const saltedToken = getSaltedToken(config.uuidPrefix, useToken);\n\n    const svc = createServices(config, { progressFn: () => { }, errorFn: () => { } });\n    setAuthorizationHeader(svc, saltedToken);\n\n    const user = await svc.authService.getUserDetails(false);\n    return {\n        user,\n        token: saltedToken,\n    };\n};\n\nexport const validateSession = (session: Session, activeSession: Session, useApiClient?: AxiosInstance) =>\n    async (dispatch: Dispatch): Promise<Session> => {\n        dispatch(authActions.UPDATE_SESSION({ ...session, status: SessionStatus.BEING_VALIDATED }));\n        session.loggedIn = false;\n\n        const setupSession = (baseUrl: string, user: User, token: string, apiRevision: number) => {\n            session.baseUrl = baseUrl;\n            session.token = token;\n            session.email = user.email;\n            session.userIsActive = user.isActive;\n            session.uuid = user.uuid;\n            session.name = getUserDisplayName(user);\n            session.loggedIn = true;\n            session.apiRevision = apiRevision;\n        };\n\n        let fail: Error | null = null;\n        const config = await getRemoteHostConfig(session.remoteHost, useApiClient);\n        if (config !== null) {\n            dispatch(authActions.REMOTE_CLUSTER_CONFIG({ config }));\n            try {\n                const { user, token } = await validateCluster(config, session.token);\n                setupSession(config.baseUrl, user, token, config.apiRevision);\n            } catch (e) {\n                fail = new Error(`Getting current user for ${session.remoteHost}: ${e.message}`);\n                try {\n                    const { user, token } = await validateCluster(config, activeSession.token);\n                    setupSession(config.baseUrl, user, token, config.apiRevision);\n                    fail = null;\n                } catch (e2) {\n                    if (e.message === invalidV2Token) {\n                        fail = new Error(`Getting current user for ${session.remoteHost}: ${e2.message}`);\n                    }\n                }\n            }\n        } else {\n            fail = new Error(`Could not get config for ${session.remoteHost}`);\n        }\n        session.status = SessionStatus.VALIDATED;\n        dispatch(authActions.UPDATE_SESSION(session));\n\n        if (fail) {\n            throw fail;\n        }\n\n        return session;\n    };\n\nexport const validateSessions = (useApiClient?: AxiosInstance) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const sessions = getState().auth.sessions;\n        const activeSession = getActiveSession(sessions);\n        if (activeSession) {\n            dispatch(progressIndicatorActions.START_WORKING(\"sessionsValidation\"));\n            for (const session of sessions) {\n                if (session.status === SessionStatus.INVALIDATED) {\n                    try {\n\t\t\t/* Here we are dispatching a function, not an\n\t\t\t   action.  This is legal (it calls the\n\t\t\t   function with a 'Dispatch' object as the\n\t\t\t   first parameter) but the typescript\n\t\t\t   annotations don't understand this case, so\n\t\t\t   we get an error from typescript unless\n\t\t\t   override it using Dispatch<any>.  This\n\t\t\t   pattern is used in a bunch of different\n\t\t\t   places in Workbench2. */\n                        await dispatch(validateSession(session, activeSession, useApiClient));\n                    } catch (e) {\n                        // Don't do anything here.  User may get\n                        // spammed with multiple messages that are not\n                        // helpful.  They can see the individual\n                        // errors by going to site manager and trying\n                        // to toggle the session.\n                    }\n                }\n            }\n            services.authService.saveSessions(getState().auth.sessions);\n            dispatch(progressIndicatorActions.STOP_WORKING(\"sessionsValidation\"));\n        }\n    };\n\nexport const addRemoteConfig = (remoteHost: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const config = await getRemoteHostConfig(remoteHost);\n        if (!config) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: `Could not get config for ${remoteHost}`,\n                kind: SnackbarKind.ERROR\n            }));\n            return;\n        }\n        dispatch(authActions.REMOTE_CLUSTER_CONFIG({ config }));\n    };\n\nexport const addSession = (remoteHost: string, token?: string, sendToLogin?: boolean) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const sessions = getState().auth.sessions;\n        const activeSession = getActiveSession(sessions);\n        let useToken: string | null = null;\n        if (token) {\n            useToken = token;\n        } else if (activeSession) {\n            useToken = activeSession.token;\n        }\n\n        if (useToken) {\n            const config = await getRemoteHostConfig(remoteHost);\n            if (!config) {\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: `Could not get config for ${remoteHost}`,\n                    kind: SnackbarKind.ERROR\n                }));\n                return;\n            }\n\n            try {\n                dispatch(authActions.REMOTE_CLUSTER_CONFIG({ config }));\n                const { user, token } = await validateCluster(config, useToken);\n                const session = {\n                    loggedIn: true,\n                    status: SessionStatus.VALIDATED,\n                    active: false,\n                    email: user.email,\n                    userIsActive: user.isActive,\n                    name: getUserDisplayName(user),\n                    uuid: user.uuid,\n                    baseUrl: config.baseUrl,\n                    clusterId: config.uuidPrefix,\n                    remoteHost,\n                    token,\n                    apiRevision: config.apiRevision,\n                };\n\n                if (sessions.find(s => s.clusterId === config.uuidPrefix)) {\n                    await dispatch(authActions.UPDATE_SESSION(session));\n                } else {\n                    await dispatch(authActions.ADD_SESSION(session));\n                }\n                services.authService.saveSessions(getState().auth.sessions);\n\n                return session;\n            } catch {\n                if (sendToLogin) {\n                    const rootUrl = new URL(config.baseUrl);\n                    rootUrl.pathname = \"\";\n                    window.location.href = `${rootUrl.toString()}/login?return_to=` + encodeURI(`${window.location.protocol}//${window.location.host}/add-session?baseURL=` + encodeURI(rootUrl.toString()));\n                    return;\n                }\n            }\n        }\n        return Promise.reject(new Error(\"Could not validate cluster\"));\n    };\n\n\nexport const removeSession = (clusterId: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        await dispatch(authActions.REMOVE_SESSION(clusterId));\n        services.authService.saveSessions(getState().auth.sessions);\n    };\n\nexport const toggleSession = (session: Session) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const s: Session = { ...session };\n\n        if (session.loggedIn) {\n            s.loggedIn = false;\n            dispatch(authActions.UPDATE_SESSION(s));\n        } else {\n            const sessions = getState().auth.sessions;\n            const activeSession = getActiveSession(sessions);\n            if (activeSession) {\n                try {\n                    await dispatch(validateSession(s, activeSession));\n                } catch (e) {\n                    dispatch(snackbarActions.OPEN_SNACKBAR({\n                        message: e.message,\n                        kind: SnackbarKind.ERROR\n                    }));\n                    s.loggedIn = false;\n                    dispatch(authActions.UPDATE_SESSION(s));\n                }\n            }\n        }\n\n        services.authService.saveSessions(getState().auth.sessions);\n    };\n\nexport const initSessions = (authService: AuthService, config: Config, user: User) =>\n    (dispatch: Dispatch<any>) => {\n        const sessions = authService.buildSessions(config, user);\n        dispatch(authActions.SET_SESSIONS(sessions));\n        dispatch(validateSessions(authService.getApiClient()));\n        dispatch(initClusterBadges(sessions));\n    };\n\nexport const loadSiteManagerPanel = () =>\n    async (dispatch: Dispatch<any>) => {\n        try {\n            dispatch(setBreadcrumbs([{ label: 'Site Manager' }]));\n            dispatch(validateSessions());\n        } catch (e) {\n            return;\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/auth/auth-action-ssh.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { getUserUuid } from \"common/getuser\";\nimport { ServiceRepository } from \"services/services\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { FormErrors, reset, startSubmit, stopSubmit } from \"redux-form\";\nimport { KeyType } from \"models/ssh-key\";\nimport {\n    AuthorizedKeysServiceError,\n    getAuthorizedKeysServiceError\n} from \"services/authorized-keys-service/authorized-keys-service\";\nimport { setBreadcrumbs } from \"store/breadcrumbs/breadcrumbs-actions\";\nimport { authActions } from \"store/auth/auth-action\";\n\nexport const SSH_KEY_CREATE_FORM_NAME = 'sshKeyCreateFormName';\nexport const SSH_KEY_PUBLIC_KEY_DIALOG = 'sshKeyPublicKeyDialog';\nexport const SSH_KEY_REMOVE_DIALOG = 'sshKeyRemoveDialog';\nexport const SSH_KEY_ATTRIBUTES_DIALOG = 'sshKeyAttributesDialog';\n\nexport interface SshKeyCreateFormDialogData {\n    publicKey: string;\n    name: string;\n}\n\nexport const openSshKeyCreateDialog = () => dialogActions.OPEN_DIALOG({ id: SSH_KEY_CREATE_FORM_NAME, data: {} });\n\nexport const openPublicKeyDialog = (name: string, publicKey: string) =>\n    dialogActions.OPEN_DIALOG({ id: SSH_KEY_PUBLIC_KEY_DIALOG, data: { name, publicKey } });\n\nexport const openSshKeyAttributesDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sshKey = getState().auth.sshKeys.find(it => it.uuid === uuid);\n        dispatch(dialogActions.OPEN_DIALOG({ id: SSH_KEY_ATTRIBUTES_DIALOG, data: { sshKey } }));\n    };\n\nexport const openSshKeyRemoveDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: SSH_KEY_REMOVE_DIALOG,\n            data: {\n                title: 'Remove public key',\n                text: 'Are you sure you want to remove this public key?',\n                confirmButtonLabel: 'Remove',\n                uuid\n            }\n        }));\n    };\n\nexport const removeSshKey = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removing ...', kind: SnackbarKind.INFO }));\n        await services.authorizedKeysService.delete(uuid);\n        dispatch(authActions.REMOVE_SSH_KEY(uuid));\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Public Key has been successfully removed.', hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n    };\n\nexport const createSshKey = (data: SshKeyCreateFormDialogData) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        if (!userUuid) { return; }\n        const { name, publicKey } = data;\n        dispatch(startSubmit(SSH_KEY_CREATE_FORM_NAME));\n        try {\n            const newSshKey = await services.authorizedKeysService.create({\n                name,\n                publicKey,\n                keyType: KeyType.SSH,\n                authorizedUserUuid: userUuid\n            });\n            dispatch(authActions.ADD_SSH_KEY(newSshKey));\n            dispatch(dialogActions.CLOSE_DIALOG({ id: SSH_KEY_CREATE_FORM_NAME }));\n            dispatch(reset(SSH_KEY_CREATE_FORM_NAME));\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: \"Public key has been successfully created.\",\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS\n            }));\n        } catch (e) {\n            const error = getAuthorizedKeysServiceError(e);\n            if (error === AuthorizedKeysServiceError.UNIQUE_PUBLIC_KEY) {\n                dispatch(stopSubmit(SSH_KEY_CREATE_FORM_NAME, { publicKey: 'Public key already exists.' } as FormErrors));\n            } else if (error === AuthorizedKeysServiceError.INVALID_PUBLIC_KEY) {\n                dispatch(stopSubmit(SSH_KEY_CREATE_FORM_NAME, { publicKey: 'Public key is invalid' } as FormErrors));\n            }\n        }\n    };\n\nexport const loadSshKeysPanel = () =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            dispatch(setBreadcrumbs([{ label: 'SSH Keys' }]));\n            const response = await services.authorizedKeysService.list();\n            dispatch(authActions.SET_SSH_KEYS(response.items));\n        } catch (e) {\n            return;\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/auth/auth-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ofType, unionize, UnionOf } from 'common/unionize';\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"../store\";\nimport { ServiceRepository } from \"services/services\";\nimport { SshKeyResource } from 'models/ssh-key';\nimport { User } from \"models/user\";\nimport { Session } from \"models/session\";\nimport { Config } from 'common/config';\nimport { matchTokenRoute, matchFedTokenRoute } from 'routes/routes';\nimport { createServices, setAuthorizationHeader } from \"services/services\";\nimport { cancelLinking } from 'store/link-account-panel/link-account-panel-actions';\nimport { progressIndicatorActions, WORKBENCH_LOADING_SCREEN } from \"store/progress-indicator/progress-indicator-actions\";\nimport { addRemoteConfig, getRemoteHostConfig } from './auth-action-session';\nimport { getTokenV2 } from 'models/api-client-authorization';\n\nexport const authActions = unionize({\n    LOGIN: {},\n    LOGOUT: ofType<{ deleteLinkData: boolean, preservePath: boolean }>(),\n    SET_CONFIG: ofType<{ config: Config }>(),\n    SET_EXTRA_TOKEN: ofType<{ extraApiToken: string, extraApiTokenExpiration?: Date }>(),\n    RESET_EXTRA_TOKEN: {},\n    INIT_USER: ofType<{ user: User, token: string, tokenExpiration?: Date, tokenLocation?: string }>(),\n    USER_DETAILS_REQUEST: {},\n    USER_DETAILS_SUCCESS: ofType<User>(),\n    SET_SSH_KEYS: ofType<SshKeyResource[]>(),\n    ADD_SSH_KEY: ofType<SshKeyResource>(),\n    REMOVE_SSH_KEY: ofType<string>(),\n    SET_HOME_CLUSTER: ofType<string>(),\n    SET_SESSIONS: ofType<Session[]>(),\n    ADD_SESSION: ofType<Session>(),\n    REMOVE_SESSION: ofType<string>(),\n    UPDATE_SESSION: ofType<Session>(),\n    REMOTE_CLUSTER_CONFIG: ofType<{ config: Config }>(),\n});\n\nexport const initAuth = (config: Config) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<any> => {\n    // Cancel any link account ops in progress unless the user has\n    // just logged in or there has been a successful link operation\n    const data = services.linkAccountService.getLinkOpStatus();\n    if (!matchTokenRoute(window.location.pathname) &&\n        (!matchFedTokenRoute(window.location.pathname)) && data === undefined) {\n        await dispatch<any>(cancelLinking());\n    }\n    return dispatch<any>(init(config));\n};\n\nconst init = (config: Config) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    const remoteHosts = () => getState().auth.remoteHosts;\n    const token = services.authService.getApiToken();\n    let homeCluster = services.authService.getHomeCluster();\n    if (homeCluster && !config.remoteHosts[homeCluster]) {\n        homeCluster = undefined;\n    }\n    dispatch(authActions.SET_CONFIG({ config }));\n    Object.keys(remoteHosts()).forEach((remoteUuid: string) => {\n        const remoteHost = remoteHosts()[remoteUuid];\n        if (remoteUuid !== config.uuidPrefix) {\n            dispatch<any>(addRemoteConfig(remoteHost));\n        }\n    });\n    dispatch(authActions.SET_HOME_CLUSTER(config.loginCluster || homeCluster || config.uuidPrefix));\n\n    if (token && token !== \"undefined\") {\n        dispatch(progressIndicatorActions.START_WORKING(WORKBENCH_LOADING_SCREEN));\n        try {\n            await dispatch<any>(saveApiToken(token));\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(WORKBENCH_LOADING_SCREEN));\n        }\n    }\n};\n\nexport const getConfig = (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Config => {\n    const state = getState().auth;\n    return state.remoteHostsConfig[state.localCluster];\n};\n\nexport const getLocalCluster = (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): string => {\n    return getState().auth.localCluster;\n};\n\nexport const saveApiToken = (token: string) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<any> => {\n    let config: any;\n    const tokenParts = token.split('/');\n    const auth = getState().auth;\n    config = dispatch<any>(getConfig);\n\n    // If the token is from a LoginCluster federation, get user & token data\n    // from the token issuing cluster.\n    if (!config) {\n        return;\n    }\n    const lc = (config as Config).loginCluster\n    const tokenCluster = tokenParts.length === 3\n        ? tokenParts[1].substring(0, 5)\n        : undefined;\n    if (tokenCluster && tokenCluster !== auth.localCluster &&\n        lc && lc === tokenCluster) {\n        config = await getRemoteHostConfig(auth.remoteHosts[tokenCluster]);\n    }\n\n    const svc = createServices(config, { progressFn: () => { }, errorFn: () => { } });\n    setAuthorizationHeader(svc, token);\n    try {\n        const user = await svc.authService.getUserDetails();\n        const client = await svc.apiClientAuthorizationService.get('current');\n        const tokenExpiration = client.expiresAt ? new Date(client.expiresAt) : undefined;\n        const tokenLocation = await svc.authService.getStorageType();\n        dispatch(authActions.INIT_USER({ user, token, tokenExpiration, tokenLocation }));\n    } catch (e) {\n        dispatch(authActions.LOGOUT({ deleteLinkData: false, preservePath: false }));\n    }\n};\n\nexport const getNewExtraToken = (reuseStored: boolean = false) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const extraToken = getState().auth.extraApiToken;\n        if (reuseStored && extraToken !== undefined) {\n            const config = dispatch<any>(getConfig);\n            const svc = createServices(config, { progressFn: () => { }, errorFn: () => { } });\n            setAuthorizationHeader(svc, extraToken);\n            try {\n                // Check the extra token's validity before using it. Refresh its\n                // expiration date just in case it changed.\n                const client = await svc.apiClientAuthorizationService.get('current');\n                dispatch(authActions.SET_EXTRA_TOKEN({\n                    extraApiToken: extraToken,\n                    extraApiTokenExpiration: client.expiresAt ? new Date(client.expiresAt) : undefined,\n                }));\n                return extraToken;\n            } catch (e) {\n                dispatch(authActions.RESET_EXTRA_TOKEN());\n            }\n        }\n        const user = getState().auth.user;\n        const loginCluster = getState().auth.config.clusterConfig.Login.LoginCluster;\n        if (user === undefined) { return; }\n        if (loginCluster !== \"\" && getState().auth.homeCluster !== loginCluster) { return; }\n        try {\n            // Do not show errors on the create call, cluster security configuration may not\n            // allow token creation and there's no way to know that from workbench2 side in advance.\n            const client = await services.apiClientAuthorizationService.create(undefined, false);\n            const newExtraToken = getTokenV2(client);\n            dispatch(authActions.SET_EXTRA_TOKEN({\n                extraApiToken: newExtraToken,\n                extraApiTokenExpiration: client.expiresAt ? new Date(client.expiresAt) : undefined,\n            }));\n            return newExtraToken;\n        } catch {\n            console.warn(\"Cannot create new tokens with the current token, probably because of cluster's security settings.\");\n            return;\n        }\n    };\n\nexport const login = (uuidPrefix: string, homeCluster: string, loginCluster: string,\n    remoteHosts: { [key: string]: string }) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        services.authService.login(uuidPrefix, homeCluster, loginCluster, remoteHosts);\n        dispatch(authActions.LOGIN());\n    };\n\nexport const logout = (deleteLinkData: boolean = false, preservePath: boolean = false) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) =>\n        dispatch(authActions.LOGOUT({ deleteLinkData, preservePath }))\n\nexport type AuthAction = UnionOf<typeof authActions>;\n"
  },
  {
    "path": "services/workbench2/src/store/auth/auth-middleware.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Middleware } from \"redux\";\nimport { authActions, } from \"./auth-action\";\nimport { ServiceRepository, setAuthorizationHeader, removeAuthorizationHeader } from \"services/services\";\nimport { initSessions } from \"store/auth/auth-action-session\";\nimport { User } from \"models/user\";\nimport { RootState } from 'store/store';\nimport { progressIndicatorActions, WORKBENCH_LOADING_SCREEN } from \"store/progress-indicator/progress-indicator-actions\";\nimport { navigateToMyAccount } from 'store/navigation/navigation-action';\n\nexport const authMiddleware = (services: ServiceRepository): Middleware => store => next => action => {\n    // Middleware to update external state (local storage, window\n    // title) to ensure that they stay in sync with redux state.\n\n    authActions.match(action, {\n        INIT_USER: ({ user, token }) => {\n            // The \"next\" method passes the action to the next\n            // middleware in the chain, or the reducer.  That means\n            // after next() returns, the action has (presumably) been\n            // applied by the reducer to update the state.\n            next(action);\n\n            const state: RootState = store.getState();\n\n            if (state.auth.apiToken) {\n                services.authService.saveApiToken(state.auth.apiToken);\n                setAuthorizationHeader(services, state.auth.apiToken);\n            } else {\n                services.authService.removeApiToken();\n                services.authService.removeSessions();\n                removeAuthorizationHeader(services);\n            }\n\n            store.dispatch<any>(initSessions(services.authService, state.auth.remoteHostsConfig[state.auth.localCluster], user));\n            if (Object.keys(state.auth.config.clusterConfig.Workbench.UserProfileFormFields).length > 0 &&\n                user.isActive &&\n                (Object.keys(user.prefs).length === 0 ||\n                    user.prefs.profile === undefined ||\n                    Object.keys(user.prefs.profile!).length === 0)) {\n                // If the user doesn't have a profile set, send them\n                // to the user profile page to encourage them to fill it out.\n                store.dispatch(navigateToMyAccount);\n            }\n            if (!user.isActive) {\n                // As a special case, if the user is inactive, they\n                // may be able to self-activate using the \"activate\"\n                // method.  Note, for this to work there can't be any\n                // unsigned user agreements, we assume the API server is just going to\n                // rubber-stamp our activation request.  At some point in the future we'll\n                // want to either add support for displaying/signing user\n                // agreements or get rid of self-activation.\n                // For more details, see:\n                // https://doc.arvados.org/main/admin/user-management.html\n\n                store.dispatch(progressIndicatorActions.START_WORKING(WORKBENCH_LOADING_SCREEN));\n                services.userService.activate(user.uuid).then((user: User) => {\n                    store.dispatch(authActions.INIT_USER({ user, token }));\n                    store.dispatch(progressIndicatorActions.STOP_WORKING(WORKBENCH_LOADING_SCREEN));\n                }).catch(() => {\n                    store.dispatch(progressIndicatorActions.STOP_WORKING(WORKBENCH_LOADING_SCREEN));\n                });\n            }\n        },\n        SET_CONFIG: ({ config }) => {\n            document.title = `Arvados (${config.uuidPrefix})`;\n            next(action);\n        },\n        LOGOUT: ({ deleteLinkData, preservePath }) => {\n            next(action);\n            if (deleteLinkData) {\n                services.linkAccountService.removeAccountToLink();\n            }\n            const token = services.authService.getApiToken();\n            services.authService.removeApiToken();\n            services.authService.removeSessions();\n            services.authService.removeUser();\n            removeAuthorizationHeader(services);\n            services.authService.logout(token || '', preservePath);\n        },\n        default: () => next(action)\n    });\n};\n"
  },
  {
    "path": "services/workbench2/src/store/auth/auth-reducer.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { authReducer } from \"./auth-reducer\";\nimport { authActions } from \"./auth-action\";\nimport { createServices } from \"services/services\";\nimport { mockConfig } from 'common/config';\n\ndescribe('auth-reducer', () => {\n    let reducer;\n    const actions = {\n        progressFn: (id, working) => { },\n        errorFn: (id, message) => { }\n    };\n\n    before(() => {\n        localStorage.clear();\n        reducer = authReducer(createServices(mockConfig({}), actions));\n    });\n\n    it('should correctly initialise state', () => {\n        const initialState = undefined;\n        const user = {\n            email: \"test@test.com\",\n            firstName: \"John\",\n            lastName: \"Doe\",\n            uuid: \"zzzzz-tpzed-xurymjxw79nv3jz\",\n            ownerUuid: \"ownerUuid\",\n            username: \"username\",\n            prefs: {},\n            isAdmin: false,\n            isActive: true,\n            canWrite: false,\n            canManage: false,\n        };\n        const state = reducer(initialState, authActions.INIT_USER({ user, token: \"token\" }));\n        expect(state).to.deep.equal({\n            apiToken: \"token\",\n            apiTokenExpiration: undefined,\n            apiTokenLocation: undefined,\n            config: mockConfig({}),\n            user,\n            sshKeys: [],\n            sessions: [],\n            extraApiToken: undefined,\n            extraApiTokenExpiration: undefined,\n            homeCluster: \"zzzzz\",\n            localCluster: \"\",\n            loginCluster: \"\",\n            remoteHosts: {},\n            remoteHostsConfig: {}\n        });\n    });\n\n    it('should set user details on success fetch', () => {\n        const initialState = undefined;\n\n        const user = {\n            email: \"test@test.com\",\n            firstName: \"John\",\n            lastName: \"Doe\",\n            uuid: \"uuid\",\n            ownerUuid: \"ownerUuid\",\n            username: \"username\",\n            prefs: {},\n            isAdmin: false,\n            isActive: true,\n            canWrite: false,\n            canManage: false,\n        };\n\n        const state = reducer(initialState, authActions.USER_DETAILS_SUCCESS(user));\n        expect(state).to.deep.equal({\n            apiToken: undefined,\n            apiTokenExpiration: undefined,\n            apiTokenLocation: undefined,\n            config: mockConfig({}),\n            sshKeys: [],\n            sessions: [],\n            extraApiToken: undefined,\n            extraApiTokenExpiration: undefined,\n            homeCluster: \"uuid\",\n            localCluster: \"\",\n            loginCluster: \"\",\n            remoteHosts: {},\n            remoteHostsConfig: {},\n            user: {\n                email: \"test@test.com\",\n                firstName: \"John\",\n                lastName: \"Doe\",\n                uuid: \"uuid\",\n                ownerUuid: \"ownerUuid\",\n                username: \"username\",\n                prefs: {},\n                isAdmin: false,\n                isActive: true,\n                canManage: false,\n                canWrite: false,\n            }\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/auth/auth-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { authActions, AuthAction } from \"./auth-action\";\nimport { User } from \"models/user\";\nimport { ServiceRepository } from \"services/services\";\nimport { SshKeyResource } from 'models/ssh-key';\nimport { Session } from \"models/session\";\nimport { Config, mockConfig } from 'common/config';\n\nexport interface AuthState {\n    user?: User;\n    apiToken?: string;\n    apiTokenExpiration?: Date;\n    apiTokenLocation?: string;\n    extraApiToken?: string;\n    extraApiTokenExpiration?: Date;\n    sshKeys: SshKeyResource[];\n    sessions: Session[];\n    localCluster: string;\n    homeCluster: string;\n    loginCluster: string;\n    remoteHosts: { [key: string]: string };\n    remoteHostsConfig: { [key: string]: Config };\n    config: Config;\n}\n\nconst initialState: AuthState = {\n    user: undefined,\n    apiToken: undefined,\n    apiTokenExpiration: undefined,\n    apiTokenLocation: undefined,\n    extraApiToken: undefined,\n    extraApiTokenExpiration: undefined,\n    sshKeys: [],\n    sessions: [],\n    localCluster: \"\",\n    homeCluster: \"\",\n    loginCluster: \"\",\n    remoteHosts: {},\n    remoteHostsConfig: {},\n    config: mockConfig({})\n};\n\nexport const authReducer = (services: ServiceRepository) => (state = initialState, action: AuthAction) => {\n    return authActions.match(action, {\n        SET_CONFIG: ({ config }) =>\n            ({\n                ...state,\n                config,\n                localCluster: config.uuidPrefix,\n                remoteHosts: {\n                    ...config.remoteHosts,\n                    [config.uuidPrefix]: new URL(config.rootUrl).host\n                },\n                homeCluster: config.loginCluster || config.uuidPrefix,\n                loginCluster: config.loginCluster,\n                remoteHostsConfig: {\n                    ...state.remoteHostsConfig,\n                    [config.uuidPrefix]: config\n                }\n            }),\n        REMOTE_CLUSTER_CONFIG: ({ config }) =>\n            ({\n                ...state,\n                remoteHostsConfig: {\n                    ...state.remoteHostsConfig,\n                    [config.uuidPrefix]: config\n                },\n            }),\n        SET_EXTRA_TOKEN: ({ extraApiToken, extraApiTokenExpiration }) =>\n            ({ ...state, extraApiToken, extraApiTokenExpiration }),\n        RESET_EXTRA_TOKEN: () =>\n            ({ ...state, extraApiToken: undefined, extraApiTokenExpiration: undefined }),\n        INIT_USER: ({ user, token, tokenExpiration, tokenLocation = state.apiTokenLocation }) =>\n            ({ ...state,\n                user,\n                apiToken: token,\n                apiTokenExpiration: tokenExpiration,\n                apiTokenLocation: tokenLocation,\n                homeCluster: user.uuid.substring(0, 5)\n            }),\n        LOGIN: () => state,\n        LOGOUT: () => ({ ...state, apiToken: undefined }),\n        USER_DETAILS_SUCCESS: (user: User) =>\n            ({ ...state, user, homeCluster: user.uuid.substring(0, 5) }),\n        SET_SSH_KEYS: (sshKeys: SshKeyResource[]) => ({ ...state, sshKeys }),\n        ADD_SSH_KEY: (sshKey: SshKeyResource) =>\n            ({ ...state, sshKeys: state.sshKeys.concat(sshKey) }),\n        REMOVE_SSH_KEY: (uuid: string) =>\n            ({ ...state, sshKeys: state.sshKeys.filter((sshKey) => sshKey.uuid !== uuid) }),\n        SET_HOME_CLUSTER: (homeCluster: string) => ({ ...state, homeCluster }),\n        SET_SESSIONS: (sessions: Session[]) => ({ ...state, sessions }),\n        ADD_SESSION: (session: Session) =>\n            ({ ...state, sessions: state.sessions.concat(session) }),\n        REMOVE_SESSION: (clusterId: string) =>\n            ({\n                ...state,\n                sessions: state.sessions.filter(\n                    session => session.clusterId !== clusterId\n                )\n            }),\n        UPDATE_SESSION: (session: Session) =>\n            ({\n                ...state,\n                sessions: state.sessions.map(\n                    s => s.clusterId === session.clusterId ? session : s\n                )\n            }),\n        default: () => state\n    });\n};\n"
  },
  {
    "path": "services/workbench2/src/store/auth/cluster-badges.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { Session } from 'models/session';\nimport { propertiesActions } from 'store/properties/properties-actions';\nimport { sortByProperty } from 'common/array-utils';\n\nexport type ClusterBadge = {\n    text: string,\n    color: string,\n    backgroundColor: string\n}\n\nexport const initClusterBadges = (sessions: Session[]) => (dispatch: Dispatch) => {\n\n    const sortedSessions = [...sessions].sort(sortByProperty('name'));\n\n    const bgColors = [\n        '#000fd0', // royal blue\n        '#fb6b1c', // orange\n        '#2e8b57', // sea green\n        '#580082', // purple\n        '#733e07', // brown\n        '#961e0a', // dark red\n        '#ff49b4', // pink\n        '#00c6c9', // turquoise\n        '#c1802f', // tan\n        '#1e90ff', // light blue\n        '#972be2', // violet\n        '#baa844', // mustard yellow\n    ];\n    \n    const badges: ClusterBadge[] = sortedSessions.map((session, i) => {\n        const color = bgColors[i % bgColors.length];\n        return {\n            text: session.clusterId,\n            color: '#fff',\n            backgroundColor: color,\n        };\n    });\n\n    dispatch(propertiesActions.SET_PROPERTY({ key: 'clusterBadges', value: badges }));\n};\n"
  },
  {
    "path": "services/workbench2/src/store/banner/banner-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { unionize, UnionOf } from 'common/unionize';\n\nexport const bannerReducerActions = unionize({\n    OPEN_BANNER: {},\n    CLOSE_BANNER: {},\n});\n\nexport type BannerAction = UnionOf<typeof bannerReducerActions>;\n\nexport const openBanner = () =>\n    async (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch(bannerReducerActions.OPEN_BANNER());\n    };\n\nexport const closeBanner = () =>\n    async (dispatch: Dispatch<any>, getState: () => RootState) => {\n        dispatch(bannerReducerActions.CLOSE_BANNER());\n    };\n\nexport const bannerActions = {\n    openBanner,\n    closeBanner\n};\n"
  },
  {
    "path": "services/workbench2/src/store/banner/banner-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { BannerAction, bannerReducerActions } from \"./banner-action\";\n\nexport interface BannerState {\n    isOpen: boolean;\n}\n\nconst initialState = {\n    isOpen: false,\n};\n\nexport const bannerReducer = (state: BannerState = initialState, action: BannerAction) =>\n    bannerReducerActions.match(action, {\n        default: () => state,\n        OPEN_BANNER: () => ({\n             ...state,\n             isOpen: true,\n        }),\n        CLOSE_BANNER: () => ({\n            ...state,\n            isOpen: false,\n       }),\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/breadcrumbs/breadcrumbs-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { getUserUuid } from \"common/getuser\";\nimport { getResource } from 'store/resources/resources';\nimport { propertiesActions } from '../properties/properties-actions';\nimport { getProcess } from 'store/processes/process';\nimport { ServiceRepository } from 'services/services';\nimport { SidePanelTreeCategory, activateSidePanelTreeItem } from 'store/side-panel-tree/side-panel-tree-actions';\nimport { updateResources } from '../resources/resources-actions';\nimport { ResourceKind } from 'models/resource';\nimport { GroupResource } from 'models/group';\nimport { extractUuidKind } from 'models/resource';\nimport { UserResource } from 'models/user';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { ProcessResource } from 'models/process';\nimport { OrderBuilder } from 'services/api/order-builder';\nimport { Breadcrumb } from 'components/breadcrumbs/breadcrumbs';\nimport { ContainerRequestResource, containerRequestFieldsNoMounts } from 'models/container-request';\nimport { AdminMenuIcon, CollectionIcon, IconType, ProcessIcon, ProjectIcon, ResourceIcon, TerminalIcon, WorkflowIcon, FolderKeyIcon, WheelIcon } from 'components/icon/icon';\nimport { CollectionResource } from 'models/collection';\nimport { getSidePanelIcon } from 'store/side-panel-tree/side-panel-tree-actions';\nimport { WorkflowResource } from 'models/workflow';\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\n\nexport const BREADCRUMBS = 'breadcrumbs';\n\nexport const setBreadcrumbs = (breadcrumbs: any, currentItem?: CollectionResource | ContainerRequestResource | GroupResource | WorkflowResource) => {\n    if (currentItem) {\n        const currentCrumb = resourceToBreadcrumb(currentItem)\n        if (currentCrumb.label.length) breadcrumbs.push(currentCrumb);\n    }\n    return propertiesActions.SET_PROPERTY({ key: BREADCRUMBS, value: breadcrumbs });\n};\n\nconst resourceToBreadcrumbIcon = (resource: CollectionResource | ContainerRequestResource | GroupResource | WorkflowResource): IconType | undefined => {\n    switch (resource.kind) {\n        case ResourceKind.PROJECT:\n            return ProjectIcon;\n        case ResourceKind.PROCESS:\n            return ProcessIcon;\n        case ResourceKind.COLLECTION:\n            return CollectionIcon;\n        case ResourceKind.WORKFLOW:\n            return WorkflowIcon;\n        default:\n            return undefined;\n    }\n}\n\nconst resourceToBreadcrumb = (resource: (CollectionResource | ContainerRequestResource | GroupResource | WorkflowResource) & {fullName?: string}  ): Breadcrumb => ({\n    label: resource.name || resource.fullName || '',\n    uuid: resource.uuid,\n    icon: resourceToBreadcrumbIcon(resource),\n})\n\nexport const setSidePanelBreadcrumbs = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            dispatch(progressIndicatorActions.START_WORKING(uuid + \"-breadcrumbs\"));\n            const ancestors = await services.ancestorsService.ancestors(uuid, '');\n            dispatch(updateResources(ancestors));\n\n            let breadcrumbs: Breadcrumb[] = [];\n            const { collectionPanel: { item } } = getState();\n\n            const path = getState().router.location!.pathname;\n            const currentUuid = path.split('/')[2];\n            const uuidKind = extractUuidKind(currentUuid);\n            const rootUuid = getUserUuid(getState());\n\n            if (ancestors.find(ancestor => ancestor.uuid === rootUuid)) {\n                // Handle home project uuid root\n                breadcrumbs.push({\n                    label: SidePanelTreeCategory.PROJECTS,\n                    uuid: SidePanelTreeCategory.PROJECTS,\n                    icon: getSidePanelIcon(SidePanelTreeCategory.PROJECTS)\n                });\n            } else if (uuidKind === ResourceKind.USER) {\n                // Handle another user root project\n                const user = getResource<UserResource>(uuid)(getState().resources);\n                breadcrumbs.push({\n                    label: (user as any)?.fullName || user?.username || uuid,\n                    uuid: user?.uuid || uuid,\n                    icon: getSidePanelIcon(SidePanelTreeCategory.PROJECTS)\n                });\n            } else if (Object.values(SidePanelTreeCategory).includes(uuid as SidePanelTreeCategory)) {\n                // Handle SidePanelTreeCategory root\n                breadcrumbs.push({\n                    label: uuid,\n                    uuid: uuid,\n                    icon: getSidePanelIcon(uuid)\n                });\n            }\n\n            breadcrumbs = ancestors.reduce((breadcrumbs, ancestor) =>\n                ancestor.kind === ResourceKind.GROUP\n                    ? [...breadcrumbs, resourceToBreadcrumb(ancestor)]\n                    : breadcrumbs,\n                breadcrumbs);\n\n            if (uuidKind === ResourceKind.COLLECTION) {\n                const collectionItem = item ? item : await services.collectionService.get(currentUuid);\n                const parentProcessItem = await getCollectionParent(collectionItem)(services);\n                if (parentProcessItem) {\n                    const mainProcessItem = await getProcessParent(parentProcessItem)(services);\n                    mainProcessItem && breadcrumbs.push(resourceToBreadcrumb(mainProcessItem));\n                    breadcrumbs.push(resourceToBreadcrumb(parentProcessItem));\n                }\n                dispatch(setBreadcrumbs(breadcrumbs, collectionItem));\n            } else if (uuidKind === ResourceKind.PROCESS) {\n                const processItem = await services.containerRequestService.get(currentUuid);\n                const parentProcessItem = await getProcessParent(processItem)(services);\n                if (parentProcessItem) {\n                    breadcrumbs.push(resourceToBreadcrumb(parentProcessItem));\n                }\n                dispatch(setBreadcrumbs(breadcrumbs, processItem));\n            } else if (uuidKind === ResourceKind.WORKFLOW) {\n                const workflowItem = await services.workflowService.get(currentUuid);\n                dispatch(setBreadcrumbs(breadcrumbs, workflowItem));\n            }\n            dispatch(setBreadcrumbs(breadcrumbs));\n        } catch (e) {\n            console.log(\"Error setting breadcrumbs \"+e);\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(uuid + \"-breadcrumbs\"));\n        }\n    };\n\nexport const setSharedWithMeBreadcrumbs = (uuid: string) =>\n    setCategoryBreadcrumbs(uuid, SidePanelTreeCategory.SHARED_WITH_ME);\n\nexport const setTrashBreadcrumbs = (uuid: string) =>\n    setCategoryBreadcrumbs(uuid, SidePanelTreeCategory.TRASH);\n\nexport const setCategoryBreadcrumbs = (uuid: string, category: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            dispatch(progressIndicatorActions.START_WORKING(uuid + \"-breadcrumbs\"));\n            const ancestors = await services.ancestorsService.ancestors(uuid, '');\n            dispatch(updateResources(ancestors));\n            const initialBreadcrumbs: Breadcrumb[] = [\n                {\n                    label: category,\n                    uuid: category,\n                    icon: getSidePanelIcon(category)\n                }\n            ];\n            const { collectionPanel: { item } } = getState();\n            const path = getState().router.location!.pathname;\n            const currentUuid = path.split('/')[2];\n            const uuidKind = extractUuidKind(currentUuid);\n            let breadcrumbs = ancestors.reduce((breadcrumbs, ancestor) =>\n                ancestor.kind === ResourceKind.GROUP\n                    ? [...breadcrumbs, resourceToBreadcrumb(ancestor)]\n                    : breadcrumbs,\n                initialBreadcrumbs);\n            if (uuidKind === ResourceKind.COLLECTION) {\n                const collectionItem = item ? item : await services.collectionService.get(currentUuid);\n                const parentProcessItem = await getCollectionParent(collectionItem)(services);\n                if (parentProcessItem) {\n                    const mainProcessItem = await getProcessParent(parentProcessItem)(services);\n                    mainProcessItem && breadcrumbs.push(resourceToBreadcrumb(mainProcessItem));\n                    breadcrumbs.push(resourceToBreadcrumb(parentProcessItem));\n                }\n                dispatch(setBreadcrumbs(breadcrumbs, collectionItem));\n            } else if (uuidKind === ResourceKind.PROCESS) {\n                const processItem = await services.containerRequestService.get(currentUuid);\n                const parentProcessItem = await getProcessParent(processItem)(services);\n                if (parentProcessItem) {\n                    breadcrumbs.push(resourceToBreadcrumb(parentProcessItem));\n                }\n                dispatch(setBreadcrumbs(breadcrumbs, processItem));\n            } else if (uuidKind === ResourceKind.WORKFLOW) {\n                const workflowItem = await services.workflowService.get(currentUuid);\n                dispatch(setBreadcrumbs(breadcrumbs, workflowItem));\n            }\n            dispatch(setBreadcrumbs(breadcrumbs));\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(uuid + \"-breadcrumbs\"));\n        }\n    };\n\nconst getProcessParent = (childProcess: ContainerRequestResource) =>\n    async (services: ServiceRepository): Promise<ContainerRequestResource | undefined> => {\n        if (childProcess.requestingContainerUuid) {\n            const parentProcesses = await services.containerRequestService.list({\n                order: new OrderBuilder<ProcessResource>().addAsc('createdAt').getOrder(),\n                filters: new FilterBuilder().addEqual('container_uuid', childProcess.requestingContainerUuid).getFilters(),\n                select: containerRequestFieldsNoMounts,\n            });\n            if (parentProcesses.items.length > 0) {\n                return parentProcesses.items[0];\n            } else {\n                return undefined;\n            }\n        } else {\n            return undefined;\n        }\n    }\n\nconst getCollectionParent = (collection: CollectionResource) =>\n    async (services: ServiceRepository): Promise<ContainerRequestResource | undefined> => {\n        const parentOutputPromise = services.containerRequestService.list({\n            order: new OrderBuilder<ProcessResource>().addAsc('createdAt').getOrder(),\n            filters: new FilterBuilder().addEqual('output_uuid', collection.uuid).getFilters(),\n            select: containerRequestFieldsNoMounts,\n        });\n        const parentLogPromise = services.containerRequestService.list({\n            order: new OrderBuilder<ProcessResource>().addAsc('createdAt').getOrder(),\n            filters: new FilterBuilder().addEqual('log_uuid', collection.uuid).getFilters(),\n            select: containerRequestFieldsNoMounts,\n        });\n        const [parentOutput, parentLog] = await Promise.all([parentOutputPromise, parentLogPromise]);\n        return parentOutput.items.length > 0 ?\n            parentOutput.items[0] :\n            parentLog.items.length > 0 ?\n                parentLog.items[0] :\n                undefined;\n    }\n\n\nexport const setProjectBreadcrumbs = (uuid: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const ancestors = await services.ancestorsService.ancestors(uuid, '');\n        const rootUuid = getUserUuid(getState());\n        if (uuid === rootUuid || ancestors.find(ancestor => ancestor.uuid === rootUuid)) {\n            dispatch(setSidePanelBreadcrumbs(uuid));\n        } else {\n            dispatch(setSharedWithMeBreadcrumbs(uuid));\n            dispatch(activateSidePanelTreeItem(SidePanelTreeCategory.SHARED_WITH_ME));\n        }\n    };\n\nexport const setProcessBreadcrumbs = (processUuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const { resources } = getState();\n        const process = getProcess(processUuid)(resources);\n        if (process) {\n            dispatch<any>(setProjectBreadcrumbs(process.containerRequest.ownerUuid));\n        }\n    };\n\nexport const setGroupsBreadcrumbs = () =>\n    setBreadcrumbs([{\n        label: SidePanelTreeCategory.GROUPS,\n        uuid: SidePanelTreeCategory.GROUPS,\n        icon: getSidePanelIcon(SidePanelTreeCategory.GROUPS)\n    }]);\n\nexport const setGroupDetailsBreadcrumbs = (groupUuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n\n        const group = getResource<GroupResource>(groupUuid)(getState().resources);\n\n        const breadcrumbs: Breadcrumb[] = [\n            {\n                label: SidePanelTreeCategory.GROUPS,\n                uuid: SidePanelTreeCategory.GROUPS,\n                icon: getSidePanelIcon(SidePanelTreeCategory.GROUPS)\n            },\n            { label: group ? group.name : (await services.groupsService.get(groupUuid)).name, uuid: groupUuid },\n        ];\n\n        dispatch(setBreadcrumbs(breadcrumbs));\n\n    };\n\nexport const USERS_PANEL_LABEL = 'Users';\n\nexport const setUsersBreadcrumbs = () =>\n    setBreadcrumbs([{ label: USERS_PANEL_LABEL, uuid: USERS_PANEL_LABEL }]);\n\nexport const setUserProfileBreadcrumbs = (userUuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            const user = getResource<UserResource>(userUuid)(getState().resources)\n                || (await services.userService.get(userUuid, false));\n            const userProfileBreadcrumbs: Breadcrumb[] = [\n                { label: USERS_PANEL_LABEL, uuid: USERS_PANEL_LABEL },\n                { label: user ? `${user.firstName} ${user.lastName}` : userUuid, uuid: userUuid },\n            ];\n            dispatch(setBreadcrumbs(userProfileBreadcrumbs));\n        } catch (e) {\n            const breadcrumbs: Breadcrumb[] = [\n                { label: USERS_PANEL_LABEL, uuid: USERS_PANEL_LABEL },\n                { label: userUuid, uuid: userUuid },\n            ];\n            dispatch(setBreadcrumbs(breadcrumbs));\n        }\n    };\n\nexport const MY_ACCOUNT_PANEL_LABEL = 'My Account';\n\nexport const setMyAccountBreadcrumbs = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(setBreadcrumbs([\n            { label: MY_ACCOUNT_PANEL_LABEL, uuid: MY_ACCOUNT_PANEL_LABEL },\n        ]));\n    };\n\nexport const USER_PREFERENCES_LABEL = 'Preferences';\n\nexport const setUserPreferencesBreadcrumbs = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(setBreadcrumbs([\n            { label: USER_PREFERENCES_LABEL, uuid: USER_PREFERENCES_LABEL },\n        ]));\n    };\n\nexport const INSTANCE_TYPES_PANEL_LABEL = 'Instance Types';\n\nexport const setInstanceTypesBreadcrumbs = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(setBreadcrumbs([\n            { label: INSTANCE_TYPES_PANEL_LABEL, uuid: INSTANCE_TYPES_PANEL_LABEL, icon: ResourceIcon },\n        ]));\n    };\n\nexport const setVirtualMachinesBreadcrumbs = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(setBreadcrumbs([\n            { label: SidePanelTreeCategory.SHELL_ACCESS, uuid: SidePanelTreeCategory.SHELL_ACCESS, icon: TerminalIcon },\n        ]));\n    };\n\nexport const VIRTUAL_MACHINES_ADMIN_PANEL_LABEL = 'Shell Access Admin';\n\nexport const setVirtualMachinesAdminBreadcrumbs = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(setBreadcrumbs([\n            { label: VIRTUAL_MACHINES_ADMIN_PANEL_LABEL, uuid: VIRTUAL_MACHINES_ADMIN_PANEL_LABEL, icon: AdminMenuIcon },\n        ]));\n    };\n\nexport const REPOSITORIES_PANEL_LABEL = 'Repositories';\n\nexport const setRepositoriesBreadcrumbs = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(setBreadcrumbs([\n            { label: REPOSITORIES_PANEL_LABEL, uuid: REPOSITORIES_PANEL_LABEL, icon: AdminMenuIcon },\n        ]));\n    };\n\nexport const setExternalCredentialsBreadcrumbs = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(setBreadcrumbs([\n            { label: SidePanelTreeCategory.EXTERNAL_CREDENTIALS, uuid: SidePanelTreeCategory.EXTERNAL_CREDENTIALS, icon: FolderKeyIcon },\n            ]));\n    };\n\n    export const setDashboardBreadcrumbs = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(setBreadcrumbs([\n            { label: SidePanelTreeCategory.DASHBOARD, uuid: SidePanelTreeCategory.DASHBOARD, icon: WheelIcon },\n        ]));\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/collection-panel/collection-panel-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { CollectionResource } from 'models/collection';\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { snackbarActions } from \"../snackbar/snackbar-actions\";\nimport { resourcesActions } from \"store/resources/resources-actions\";\nimport { unionize, ofType, UnionOf } from 'common/unionize';\nimport { SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport { loadDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\n\nexport const collectionPanelActions = unionize({\n    SET_COLLECTION: ofType<CollectionResource>(),\n    RESET_COLLECTION_PANEL: ofType<{}>(),\n});\n\nexport type CollectionPanelAction = UnionOf<typeof collectionPanelActions>;\n\nexport const loadCollectionPanel = (uuid: string, forceReload = false) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const { collectionPanel: { item } } = getState();\n        let collection: CollectionResource | null = null;\n        if (!item || item.uuid !== uuid || forceReload) {\n            try {\n                dispatch(progressIndicatorActions.START_WORKING(uuid + \"-panel\"));\n                collection = await services.collectionService.get(uuid);\n                dispatch(collectionPanelActions.SET_COLLECTION(collection));\n                dispatch(resourcesActions.SET_RESOURCES([collection]));\n            } finally {\n                dispatch(progressIndicatorActions.STOP_WORKING(uuid + \"-panel\"));\n            }\n        } else {\n            collection = item;\n        }\n        dispatch<any>(loadDetailsPanel(collection.uuid));\n        return collection;\n    };\n\nexport const navigateToProcess = (uuid: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            await services.containerRequestService.get(uuid);\n            dispatch<any>(navigateTo(uuid));\n        } catch {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'This process does not exist!', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/collection-panel/collection-panel-files/collection-panel-files-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { Dispatch } from \"redux\";\nimport { CollectionFilesTree, CollectionFileType, createCollectionFilesTree, CollectionFile } from \"models/collection-file\";\nimport { ServiceRepository } from \"services/services\";\nimport { RootState } from \"../../store\";\nimport { showErrorSnackbar, snackbarActions, SnackbarKind } from \"../../snackbar/snackbar-actions\";\nimport { dialogActions } from '../../dialog/dialog-actions';\nimport { getNodeValue, mapTreeValues } from \"models/tree\";\nimport { CollectionPanelDirectory, CollectionPanelFile, filterCollectionFilesBySelection } from './collection-panel-files-state';\nimport { startSubmit, stopSubmit, initialize, FormErrors } from 'redux-form';\nimport { getDialog } from \"store/dialog/dialog-reducer\";\nimport { getFileFullPath, sortFilesTree } from \"services/collection-service/collection-service-files-response\";\nimport { CollectionResource } from \"models/collection\";\nimport { loadCollection } from \"store/workbench/workbench-actions\";\n\nexport const collectionPanelFilesAction = unionize({\n    SET_COLLECTION_FILES: ofType<CollectionFilesTree>(),\n    TOGGLE_COLLECTION_FILE_COLLAPSE: ofType<{ id: string }>(),\n    TOGGLE_COLLECTION_FILE_SELECTION: ofType<{ id: string }>(),\n    SELECT_ALL_COLLECTION_FILES: ofType<{}>(),\n    UNSELECT_ALL_COLLECTION_FILES: ofType<{}>(),\n    ON_SEARCH_CHANGE: ofType<string>(),\n});\n\nexport type CollectionPanelFilesAction = UnionOf<typeof collectionPanelFilesAction>;\n\nexport const COLLECTION_PANEL_LOAD_FILES = 'collectionPanelLoadFiles';\n\nexport const setCollectionFiles = (files: CollectionFile[], joinParents = true) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    const tree = createCollectionFilesTree(files, joinParents);\n    const sorted = sortFilesTree(tree);\n    const mapped = mapTreeValues(services.collectionService.extendFileURL)(sorted);\n    dispatch(collectionPanelFilesAction.SET_COLLECTION_FILES(mapped));\n};\n\nexport const removeCollectionFiles = (filePaths: string[]) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const currentCollection = getState().collectionPanel.item;\n        if (currentCollection) {\n            services.collectionService.deleteFiles(currentCollection.uuid, filePaths).then(() => {\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Removed.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS\n                }));\n            }).catch(e =>\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Could not remove file.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.ERROR\n                }))\n            );\n        }\n    };\n\nexport const removeCollectionsSelectedFiles = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const paths = filterCollectionFilesBySelection(getState().collectionPanelFiles, true)\n            .map(getFileFullPath);\n        dispatch<any>(removeCollectionFiles(paths));\n    };\n\nexport const FILE_REMOVE_DIALOG = 'fileRemoveDialog';\n\nexport const openFileRemoveDialog = (fileUuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const file = getNodeValue(fileUuid)(getState().collectionPanelFiles);\n        if (file) {\n            const filePath = getFileFullPath(file);\n            const isDirectory = file.type === CollectionFileType.DIRECTORY;\n            const title = isDirectory\n                ? 'Removing directory'\n                : 'Removing file';\n            const text = isDirectory\n                ? 'Are you sure you want to remove this directory?'\n                : 'Are you sure you want to remove this file?';\n            const info = isDirectory\n                ? 'Removing files will change content address.'\n                : 'Removing a file will change content address.';\n\n            dispatch(dialogActions.OPEN_DIALOG({\n                id: FILE_REMOVE_DIALOG,\n                data: {\n                    title,\n                    text,\n                    info,\n                    confirmButtonLabel: 'Remove',\n                    filePath\n                }\n            }));\n        }\n    };\n\nexport const MULTIPLE_FILES_REMOVE_DIALOG = 'multipleFilesRemoveDialog';\n\nexport const openMultipleFilesRemoveDialog = () =>\n    dialogActions.OPEN_DIALOG({\n        id: MULTIPLE_FILES_REMOVE_DIALOG,\n        data: {\n            title: 'Removing files',\n            text: 'Are you sure you want to remove selected files?',\n            info: 'Removing files will change content address.',\n            confirmButtonLabel: 'Remove'\n        }\n    });\n\nexport const RENAME_FILE_DIALOG = 'renameFileDialog';\nexport interface RenameFileDialogData {\n    name: string;\n    id: string;\n    path: string;\n}\n\nexport const openRenameFileDialog = (data: RenameFileDialogData) =>\n    (dispatch: Dispatch) => {\n        dispatch(initialize(RENAME_FILE_DIALOG, data));\n        dispatch(dialogActions.OPEN_DIALOG({ id: RENAME_FILE_DIALOG, data }));\n    };\n\nexport const renameFile = (newFullPath: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const dialog = getDialog<RenameFileDialogData>(getState().dialog, RENAME_FILE_DIALOG);\n        const currentCollection = getState().collectionPanel.item;\n        if (dialog && currentCollection) {\n            const file = getNodeValue(dialog.data.id)(getState().collectionPanelFiles);\n            if (file) {\n                dispatch(startSubmit(RENAME_FILE_DIALOG));\n                const oldPath = getFileFullPath(file);\n                const newPath = newFullPath;\n                services.collectionService.renameFile(currentCollection.uuid, currentCollection.portableDataHash, oldPath, newPath).then(() => {\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'File name changed.', hideDuration: 2000 }));\n                    dispatch<any>(loadCollection(currentCollection.uuid));\n                }).catch(e => {\n                    const errors: FormErrors<RenameFileDialogData, string> = {\n                        path: `Could not rename the file: ${e.responseText}`\n                    };\n                    dispatch(stopSubmit(RENAME_FILE_DIALOG, errors));\n                });\n            }\n        }\n        dispatch(dialogActions.CLOSE_DIALOG({ id: RENAME_FILE_DIALOG }));\n    };\n\nexport const DOWNLOAD_ZIP_DIALOG = 'downloadZipDialog';\n\nexport const openDownloadZipDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sourceCollection = getState().collectionPanel.item;\n        const files = filterCollectionFilesBySelection(getState().collectionPanelFiles, true);\n        const paths = files.map(getFileFullPath);\n\n        if (sourceCollection) {\n            const fileName = getCollectionZipFilename(sourceCollection, files);\n            dispatch(initialize(DOWNLOAD_ZIP_DIALOG, { collectionUuid: sourceCollection.uuid, paths, fileName }));\n            dispatch(dialogActions.OPEN_DIALOG({ id: DOWNLOAD_ZIP_DIALOG, data: {} }));\n        }\n    };\n\nconst getCollectionZipFilename = (collection: CollectionResource, files: (CollectionPanelFile | CollectionPanelDirectory)[]) => {\n    let additionalName = \"\";\n    if (files.length === 1) {\n        additionalName = ` - ${files[0].name}`;\n    } else if (files.length > 1) {\n        additionalName = ` - ${files.length} files`;\n    }\n\n    return `${collection.name}${additionalName}.zip`;\n};\n\nexport const downloadZip = (uuid: string, paths: string[], fileName: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            await services.collectionService.downloadZip(uuid, paths, fileName);\n        } catch (e) {\n            dispatch(showErrorSnackbar(`Error creating ZIP${e.message ? `: ${e.message}` : \"\"}`));\n        } finally {\n            dispatch(dialogActions.CLOSE_DIALOG({ id: DOWNLOAD_ZIP_DIALOG }));\n        }\n    }\n"
  },
  {
    "path": "services/workbench2/src/store/collection-panel/collection-panel-files/collection-panel-files-reducer.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { collectionPanelFilesReducer } from \"./collection-panel-files-reducer\";\nimport { collectionPanelFilesAction } from \"./collection-panel-files-actions\";\nimport { createCollectionFile, createCollectionDirectory } from \"models/collection-file\";\nimport { createTree, setNode, getNodeValue, mapTreeValues, TreeNodeStatus } from \"models/tree\";\n\ndescribe('CollectionPanelFilesReducer', () => {\n\n    const files = [\n        createCollectionDirectory({ id: 'Directory 1', name: 'Directory 1', path: '' }),\n        createCollectionDirectory({ id: 'Directory 2', name: 'Directory 2', path: 'Directory 1' }),\n        createCollectionDirectory({ id: 'Directory 3', name: 'Directory 3', path: '' }),\n        createCollectionDirectory({ id: 'Directory 4', name: 'Directory 4', path: 'Directory 3' }),\n        createCollectionFile({ id: 'file1.txt', name: 'file1.txt', path: 'Directory 2' }),\n        createCollectionFile({ id: 'file2.txt', name: 'file2.txt', path: 'Directory 2' }),\n        createCollectionFile({ id: 'file3.txt', name: 'file3.txt', path: 'Directory 3' }),\n        createCollectionFile({ id: 'file4.txt', name: 'file4.txt', path: 'Directory 3' }),\n        createCollectionFile({ id: 'file5.txt', name: 'file5.txt', path: 'Directory 4' }),\n    ];\n\n    const collectionFilesTree = files.reduce((tree, file) => setNode({\n        children: [],\n        id: file.id,\n        parent: file.path,\n        value: file,\n        active: false,\n        selected: false,\n        expanded: false,\n        status: TreeNodeStatus.INITIAL,\n    })(tree), createTree());\n\n    const collectionPanelFilesTree = collectionPanelFilesReducer(\n        createTree(),\n        collectionPanelFilesAction.SET_COLLECTION_FILES(collectionFilesTree));\n\n    it('SET_COLLECTION_FILES', () => {\n        expect(getNodeValue('Directory 1')(collectionPanelFilesTree)).to.deep.equal({\n            ...createCollectionDirectory({ id: 'Directory 1', name: 'Directory 1', path: '' }),\n            collapsed: true,\n            selected: false\n        });\n    });\n\n    it('TOGGLE_COLLECTION_FILE_COLLAPSE', () => {\n        const newTree = collectionPanelFilesReducer(\n            collectionPanelFilesTree,\n            collectionPanelFilesAction.TOGGLE_COLLECTION_FILE_COLLAPSE({ id: 'Directory 3' }));\n\n        const value = getNodeValue('Directory 3')(newTree);\n        expect(value.collapsed).to.equal(false);\n    });\n\n    it('TOGGLE_COLLECTION_FILE_SELECTION', () => {\n        const newTree = collectionPanelFilesReducer(\n            collectionPanelFilesTree,\n            collectionPanelFilesAction.TOGGLE_COLLECTION_FILE_SELECTION({ id: 'Directory 3' }));\n\n        const value = getNodeValue('Directory 3')(newTree);\n        expect(value.selected).to.equal(true);\n    });\n\n    it('TOGGLE_COLLECTION_FILE_SELECTION ancestors', () => {\n        const newTree = collectionPanelFilesReducer(\n            collectionPanelFilesTree,\n            collectionPanelFilesAction.TOGGLE_COLLECTION_FILE_SELECTION({ id: 'Directory 2' }));\n\n        const value = getNodeValue('Directory 1')(newTree);\n        expect(value.selected).to.equal(true);\n    });\n\n    it('TOGGLE_COLLECTION_FILE_SELECTION descendants', () => {\n        const newTree = collectionPanelFilesReducer(\n            collectionPanelFilesTree,\n            collectionPanelFilesAction.TOGGLE_COLLECTION_FILE_SELECTION({ id: 'Directory 2' }));\n        expect(getNodeValue('file1.txt')(newTree).selected).to.equal(true);\n        expect(getNodeValue('file2.txt')(newTree).selected).to.equal(true);\n    });\n\n    it('TOGGLE_COLLECTION_FILE_SELECTION unselect ancestors', () => {\n        const [newTree] = [collectionPanelFilesTree]\n            .map(tree => collectionPanelFilesReducer(\n                tree,\n                collectionPanelFilesAction.TOGGLE_COLLECTION_FILE_SELECTION({ id: 'Directory 2' })))\n            .map(tree => collectionPanelFilesReducer(\n                tree,\n                collectionPanelFilesAction.TOGGLE_COLLECTION_FILE_SELECTION({ id: 'file1.txt' })));\n\n        expect(getNodeValue('Directory 2')(newTree).selected).to.equal(false);\n    });\n\n    it('SELECT_ALL_COLLECTION_FILES', () => {\n        const newTree = collectionPanelFilesReducer(\n            collectionPanelFilesTree,\n            collectionPanelFilesAction.SELECT_ALL_COLLECTION_FILES());\n\n        mapTreeValues((v) => {\n            expect(v.selected).to.equal(true);\n            return v;\n        })(newTree);\n    });\n\n    it('SELECT_ALL_COLLECTION_FILES', () => {\n        const [newTree] = [collectionPanelFilesTree]\n            .map(tree => collectionPanelFilesReducer(\n                tree,\n                collectionPanelFilesAction.SELECT_ALL_COLLECTION_FILES()))\n            .map(tree => collectionPanelFilesReducer(\n                tree,\n                collectionPanelFilesAction.UNSELECT_ALL_COLLECTION_FILES()));\n\n        mapTreeValues((v) => {\n            expect(v.selected).to.equal(false);\n            return v;\n        })(newTree);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/collection-panel/collection-panel-files/collection-panel-files-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CollectionPanelFilesState, CollectionPanelFile, CollectionPanelDirectory, mapCollectionFileToCollectionPanelFile, mergeCollectionPanelFilesStates } from './collection-panel-files-state';\nimport { CollectionPanelFilesAction, collectionPanelFilesAction } from \"./collection-panel-files-actions\";\nimport { createTree, mapTreeValues, getNode, setNode, getNodeAncestorsIds, getNodeDescendantsIds, setNodeValueWith, mapTree } from \"models/tree\";\nimport { CollectionFileType } from \"models/collection-file\";\n\nlet fetchedFiles: any = {};\n\nexport const collectionPanelFilesReducer = (state: CollectionPanelFilesState = createTree(), action: CollectionPanelFilesAction) => {\n    // Low-level tree handling setNode() func does in-place data modifications\n    // for performance reasons, so we pass a copy of 'state' to avoid side effects.\n    return collectionPanelFilesAction.match(action, {\n        SET_COLLECTION_FILES: files => {\n            fetchedFiles = files;\n            return mergeCollectionPanelFilesStates({ ...state }, mapTree(mapCollectionFileToCollectionPanelFile)(files));\n        },\n\n        TOGGLE_COLLECTION_FILE_COLLAPSE: data =>\n            toggleCollapse(data.id)({ ...state }),\n\n        TOGGLE_COLLECTION_FILE_SELECTION: data => [{ ...state }]\n            .map(toggleSelected(data.id))\n            .map(toggleAncestors(data.id))\n            .map(toggleDescendants(data.id))[0],\n\n        ON_SEARCH_CHANGE: (searchValue) => {\n            const fileIds: string[] = [];\n            const directoryIds: string[] = [];\n            const filteredFiles = Object.keys(fetchedFiles)\n                .filter((key: string) => {\n                    const node = fetchedFiles[key];\n\n                    if (node.value === undefined) {\n                        return false;\n                    }\n\n                    const { id, value: { type, name } } = node;\n\n                    if (type === CollectionFileType.DIRECTORY) {\n                        directoryIds.push(id);\n                        return true;\n                    }\n\n                    const includeFile = name.toLowerCase().indexOf(searchValue.toLowerCase()) > -1;\n\n                    if (includeFile) {\n                        fileIds.push(id);\n                    }\n\n                    return includeFile;\n                })\n                .reduce((prev, next) => {\n                    const node = JSON.parse(JSON.stringify(fetchedFiles[next]));\n                    const { value: { type }, children } = node;\n\n                    node.children = node.children.filter((key: string) => {\n                        const isFile = directoryIds.indexOf(key) === -1;\n                        return isFile ?\n                          fileIds.indexOf(key) > -1 :\n                          !!fileIds.find(id => id.indexOf(key) > -1);\n                    });\n\n                    if (type === CollectionFileType.FILE || children.length > 0) {\n                        prev[next] = node;\n                    }\n\n                    return prev;\n                }, {});\n\n            return mapTreeValues((v: CollectionPanelDirectory | CollectionPanelFile) => {\n                if (v.type === CollectionFileType.DIRECTORY) {\n                    return ({\n                        ...v,\n                        collapsed: searchValue.length === 0,\n                    });\n                }\n\n                return ({ ...v });\n            })({ ...filteredFiles });\n        },\n\n        SELECT_ALL_COLLECTION_FILES: () =>\n            mapTreeValues((v: any) => ({ ...v, selected: true }))({ ...state }),\n\n        UNSELECT_ALL_COLLECTION_FILES: () =>\n            mapTreeValues((v: any) => ({ ...v, selected: false }))({ ...state }),\n\n        default: () => state\n    }) as CollectionPanelFilesState;\n};\n\nconst toggleCollapse = (id: string) => (tree: CollectionPanelFilesState) =>\n    setNodeValueWith((v: CollectionPanelDirectory | CollectionPanelFile) =>\n        v.type === CollectionFileType.DIRECTORY\n            ? { ...v, collapsed: !v.collapsed }\n            : v)(id)(tree);\n\n\nconst toggleSelected = (id: string) => (tree: CollectionPanelFilesState) =>\n    setNodeValueWith((v: CollectionPanelDirectory | CollectionPanelFile) => ({ ...v, selected: !v.selected }))(id)(tree);\n\n\nconst toggleDescendants = (id: string) => (tree: CollectionPanelFilesState) => {\n    const node = getNode(id)(tree);\n    if (node && node.value.type === CollectionFileType.DIRECTORY) {\n        return getNodeDescendantsIds(id)(tree)\n            .reduce((newTree, id) =>\n                setNodeValueWith((v: any) => ({ ...v, selected: node.value.selected }))(id)(newTree), tree);\n    }\n    return tree;\n};\n\nconst toggleAncestors = (id: string) => (tree: CollectionPanelFilesState) => {\n    const ancestors = getNodeAncestorsIds(id)(tree).reverse();\n    return ancestors.reduce((newTree, parent) => parent ? toggleParentNode(parent)(newTree) : newTree, tree);\n};\n\nconst toggleParentNode = (id: string) => (tree: CollectionPanelFilesState) => {\n    const node = getNode(id)(tree);\n    if (node) {\n        const parentNode = getNode(node.id)(tree);\n        if (parentNode) {\n            const selected = parentNode.children\n                .map(id => getNode(id)(tree))\n                .every(node => node !== undefined && node.value.selected);\n            return setNodeValueWith((v: any) => ({ ...v, selected }))(parentNode.id)(tree);\n        }\n        return setNode(node)(tree);\n    }\n    return tree;\n};\n\n\n"
  },
  {
    "path": "services/workbench2/src/store/collection-panel/collection-panel-files/collection-panel-files-state.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Tree, TreeNode, mapTreeValues, getNodeValue, getNodeDescendants } from 'models/tree';\nimport { CollectionFile, CollectionDirectory, CollectionFileType } from 'models/collection-file';\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { CollectionResource } from 'models/collection';\n\nexport type CollectionPanelFilesState = Tree<CollectionPanelDirectory | CollectionPanelFile>;\n\nexport interface CollectionPanelDirectory extends CollectionDirectory {\n    collapsed: boolean;\n    selected: boolean;\n}\n\nexport interface CollectionPanelFile extends CollectionFile {\n    selected: boolean;\n}\n\nexport interface CollectionFileSelection {\n    collection: CollectionResource;\n    selectedPaths: string[];\n}\n\nexport const mapCollectionFileToCollectionPanelFile = (node: TreeNode<CollectionDirectory | CollectionFile>): TreeNode<CollectionPanelDirectory | CollectionPanelFile> => {\n    return {\n        ...node,\n        value: node.value.type === CollectionFileType.DIRECTORY\n            ? { ...node.value, selected: false, collapsed: true }\n            : { ...node.value, selected: false }\n    };\n};\n\nexport const mergeCollectionPanelFilesStates = (oldState: CollectionPanelFilesState, newState: CollectionPanelFilesState) => {\n    return mapTreeValues((value: CollectionPanelDirectory | CollectionPanelFile) => {\n        const oldValue = getNodeValue(value.id)(oldState);\n        return oldValue\n            ? oldValue.type === CollectionFileType.DIRECTORY\n                ? { ...value, collapsed: oldValue.collapsed, selected: oldValue.selected }\n                : { ...value, selected: oldValue.selected }\n            : value;\n    })(newState);\n};\n\nexport const filterCollectionFilesBySelection = (tree: CollectionPanelFilesState, selected: boolean): (CollectionPanelFile | CollectionPanelDirectory)[] => {\n    const allFiles = getNodeDescendants('')(tree).map(node => node.value);\n    const selectedDirectories = allFiles.filter(file => file.selected === selected && file.type === CollectionFileType.DIRECTORY);\n    const selectedFiles = allFiles.filter(file => file.selected === selected && !selectedDirectories.some(dir => dir.id === file.path));\n    return [...selectedDirectories, ...selectedFiles]\n        .filter((value, index, array) => (\n            array.indexOf(value) === index\n        ));\n};\n\nexport const getCollectionSelection = (sourceCollection: CollectionResource, selectedItems: (CollectionPanelDirectory | CollectionPanelFile | ContextMenuResource)[]) => ({\n    collection: sourceCollection,\n    selectedPaths: selectedItems.map(itemsToPaths).map(trimPathUuids(sourceCollection.uuid)),\n})\n\nconst itemsToPaths = (item: (CollectionPanelDirectory | CollectionPanelFile | ContextMenuResource)): string => ('uuid' in item) ? item.uuid : item.id;\n\nconst trimPathUuids = (parentCollectionUuid: string) => (path: string) => path.replace(new RegExp(`(^${parentCollectionUuid})`), '');\n"
  },
  {
    "path": "services/workbench2/src/store/collection-panel/collection-panel-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { collectionPanelActions, CollectionPanelAction } from \"./collection-panel-action\";\nimport { CollectionResource } from \"models/collection\";\n\nexport interface CollectionPanelState {\n    item: CollectionResource | null;\n}\n\nconst initialState = {\n    item: null,\n};\n\nexport const collectionPanelReducer = (state: CollectionPanelState = initialState, action: CollectionPanelAction) =>\n    collectionPanelActions.match(action, {\n        default: () => state,\n        SET_COLLECTION: (item) => ({\n             ...state,\n             item,\n        }),\n        RESET_COLLECTION_PANEL: () => ({\n            ...state,\n            item: null,\n        }),\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/collections/collection-copy-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { resetPickerProjectTree } from \"store/project-tree-picker/project-tree-picker-actions\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { CopyFormDialogData } from \"store/copy-dialog/copy-dialog\";\nimport { initProjectsTreePicker } from \"store/tree-picker/tree-picker-actions\";\nimport { getResource } from \"store/resources/resources\";\nimport { CollectionResource } from \"models/collection\";\nimport { getResourcesFromCheckedList } from \"store/multiselect/multiselect-actions\";\n\nexport const COLLECTION_COPY_FORM_NAME = \"collectionCopyFormName\";\n\nexport const openCollectionCopy = (resource: { name: string; uuid: string; }) => (dispatch: Dispatch, getState: () => RootState) => {\n    const resourcesToCopy = getResourcesFromCheckedList(getState()).map(res => ({ name: res.name, uuid: res.uuid }));\n    if (!resourcesToCopy.length) resourcesToCopy.push(resource);\n    const isSingleResource = resourcesToCopy.length === 1;\n    dispatch<any>(resetPickerProjectTree());\n    dispatch<any>(initProjectsTreePicker(COLLECTION_COPY_FORM_NAME));\n    const initialData: CopyFormDialogData = { name: `Copy of: ${resource.name}`, ownerUuid: \"\", uuid: resource.uuid, isSingleResource };\n    dispatch(dialogActions.OPEN_DIALOG({ id: COLLECTION_COPY_FORM_NAME, data: initialData }) );\n}\n\nexport const copyCollection =\n    (resource: CopyFormDialogData) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        let collection = getResource<CollectionResource>(resource.uuid)(getState().resources);\n            if (!collection) {\n                collection = await services.collectionService.get(resource.uuid);\n            }\n            const collManifestText = await services.collectionService.get(resource.uuid, undefined, [\"manifestText\"]);\n            collection.manifestText = collManifestText.manifestText;\n            const newCollection = await services.collectionService.create(\n                {\n                    ...collection,\n                    ownerUuid: resource.ownerUuid,\n                    name: resource.name,\n                },\n                false\n            );\n            return newCollection;\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/collections/collection-create-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from 'store/store';\nimport { getUserUuid } from \"common/getuser\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { ServiceRepository } from 'services/services';\nimport { getCommonResourceServiceError, CommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport { uploadCollectionFiles } from './collection-upload-actions';\nimport { fileUploaderActions } from 'store/file-uploader/file-uploader-actions';\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { isProjectOrRunProcessRoute } from 'store/projects/project-create-actions';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { CollectionResource } from \"models/collection\";\n\nexport interface CollectionCreateFormDialogData {\n    ownerUuid: string;\n    name: string;\n    description: string;\n    storageClassesDesired: string[];\n    properties: CollectionProperties;\n}\n\nexport interface CollectionProperties {\n    [key: string]: string | string[];\n}\n\nexport const COLLECTION_CREATE_FORM_NAME = \"collectionCreateFormName\";\nexport const COLLECTION_CREATE_PROPERTIES_FORM_NAME = \"collectionCreatePropertiesFormName\";\n\nexport const openCollectionCreateDialog = (ownerUuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const { router } = getState();\n        let ownerUuidToUse = ownerUuid;\n        if (!isProjectOrRunProcessRoute(router)) {\n            const userUuid = getUserUuid(getState());\n            if (!userUuid) { return; }\n            ownerUuidToUse = userUuid;\n        }\n        dispatch(fileUploaderActions.CLEAR_UPLOAD());\n        dispatch(dialogActions.OPEN_DIALOG({ id: COLLECTION_CREATE_FORM_NAME, data: { ownerUuid: ownerUuidToUse } }));\n    };\n\nexport const createCollection = (data: CollectionCreateFormDialogData, setSubmitErr: (errMsg: string) => void) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        let newCollection: CollectionResource | undefined;\n        try {\n            dispatch(progressIndicatorActions.START_WORKING(COLLECTION_CREATE_FORM_NAME));\n            newCollection = await services.collectionService.create(data, false);\n            await dispatch<any>(uploadCollectionFiles(newCollection.uuid));\n            dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_CREATE_FORM_NAME }));\n            return newCollection;\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                setSubmitErr('Collection with the same name already exists.');\n            } else {\n                dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_CREATE_FORM_NAME }));\n                const errMsg = e.errors\n                    ? e.errors.join('')\n                    : 'There was an error while creating the collection';\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: errMsg,\n                    hideDuration: 2000,\n                    kind: SnackbarKind.ERROR\n                }));\n                if (newCollection) { await services.collectionService.delete(newCollection.uuid); }\n            }\n            return;\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_CREATE_FORM_NAME));\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/collections/collection-info-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ofType, unionize } from 'common/unionize';\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { CollectionResource } from \"models/collection\";\nimport { SshKeyResource } from 'models/ssh-key';\nimport { User } from \"models/user\";\nimport { Session } from \"models/session\";\nimport { Config } from 'common/config';\nimport { createServices, setAuthorizationHeader } from \"services/services\";\nimport { getTokenV2 } from 'models/api-client-authorization';\n\nexport const COLLECTION_WEBDAV_S3_DIALOG_NAME = 'collectionWebdavS3Dialog';\n\nexport interface WebDavS3InfoDialogData {\n    uuid: string;\n    token: string;\n    downloadUrl: string;\n    collectionsUrl: string;\n    localCluster: string;\n    username: string;\n    activeTab: number;\n    collectionName: string;\n    setActiveTab: (event: any, tabNr: number) => void;\n}\n\nexport const openWebDavS3InfoDialog = (uuid: string, activeTab?: number) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        await dispatch<any>(getNewExtraToken(true));\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: COLLECTION_WEBDAV_S3_DIALOG_NAME,\n            data: {\n                title: 'Open with 3rd party client',\n                token: getState().auth.extraApiToken || getState().auth.apiToken,\n                downloadUrl: getState().auth.config.keepWebServiceUrl,\n                collectionsUrl: getState().auth.config.keepWebInlineServiceUrl,\n                localCluster: getState().auth.localCluster,\n                username: getState().auth.user!.username,\n                activeTab: activeTab || 0,\n                collectionName: (getState().resources[uuid] as CollectionResource).name,\n                setActiveTab: (event: any, tabNr: number) => dispatch<any>(openWebDavS3InfoDialog(uuid, tabNr)),\n                uuid\n            }\n        }));\n    };\n\nconst authActions = unionize({\n    LOGIN: {},\n    LOGOUT: ofType<{ deleteLinkData: boolean, preservePath: boolean }>(),\n    SET_CONFIG: ofType<{ config: Config }>(),\n    SET_EXTRA_TOKEN: ofType<{ extraApiToken: string, extraApiTokenExpiration?: Date }>(),\n    RESET_EXTRA_TOKEN: {},\n    INIT_USER: ofType<{ user: User, token: string, tokenExpiration?: Date, tokenLocation?: string }>(),\n    USER_DETAILS_REQUEST: {},\n    USER_DETAILS_SUCCESS: ofType<User>(),\n    SET_SSH_KEYS: ofType<SshKeyResource[]>(),\n    ADD_SSH_KEY: ofType<SshKeyResource>(),\n    REMOVE_SSH_KEY: ofType<string>(),\n    SET_HOME_CLUSTER: ofType<string>(),\n    SET_SESSIONS: ofType<Session[]>(),\n    ADD_SESSION: ofType<Session>(),\n    REMOVE_SESSION: ofType<string>(),\n    UPDATE_SESSION: ofType<Session>(),\n    REMOTE_CLUSTER_CONFIG: ofType<{ config: Config }>(),\n});\n\nconst getConfig = (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Config => {\n    const state = getState().auth;\n    return state.remoteHostsConfig[state.localCluster];\n};\n\nconst getNewExtraToken =\n    (reuseStored: boolean = false) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const extraToken = getState().auth.extraApiToken;\n        if (reuseStored && extraToken !== undefined) {\n            const config = dispatch<any>(getConfig);\n            const svc = createServices(config, { progressFn: () => {}, errorFn: () => {} });\n            setAuthorizationHeader(svc, extraToken);\n            try {\n                // Check the extra token's validity before using it. Refresh its\n                // expiration date just in case it changed.\n                const client = await svc.apiClientAuthorizationService.get('current');\n                dispatch(\n                    authActions.SET_EXTRA_TOKEN({\n                        extraApiToken: extraToken,\n                        extraApiTokenExpiration: client.expiresAt ? new Date(client.expiresAt) : undefined,\n                    })\n                );\n                return extraToken;\n            } catch (e) {\n                dispatch(authActions.RESET_EXTRA_TOKEN());\n            }\n        }\n        const user = getState().auth.user;\n        const loginCluster = getState().auth.config.clusterConfig.Login.LoginCluster;\n        if (user === undefined) {\n            return;\n        }\n        if (loginCluster !== '' && getState().auth.homeCluster !== loginCluster) {\n            return;\n        }\n        try {\n            // Do not show errors on the create call, cluster security configuration may not\n            // allow token creation and there's no way to know that from workbench2 side in advance.\n            const client = await services.apiClientAuthorizationService.create(undefined, false);\n            const newExtraToken = getTokenV2(client);\n            dispatch(\n                authActions.SET_EXTRA_TOKEN({\n                    extraApiToken: newExtraToken,\n                    extraApiTokenExpiration: client.expiresAt ? new Date(client.expiresAt) : undefined,\n                })\n            );\n            return newExtraToken;\n        } catch {\n            console.warn(\"Cannot create new tokens with the current token, probably because of cluster's security settings.\");\n            return;\n        }\n    };"
  },
  {
    "path": "services/workbench2/src/store/collections/collection-move-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { ServiceRepository } from \"services/services\";\nimport { RootState } from \"store/store\";\nimport { getCommonResourceServiceError, CommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { projectPanelDataActions } from \"store/project-panel/project-panel-action-bind\";\nimport { MoveToFormDialogData } from \"store/move-to-dialog/move-to-dialog\";\nimport { resetPickerProjectTree } from \"store/project-tree-picker/project-tree-picker-actions\";\nimport { initProjectsTreePicker } from \"store/tree-picker/tree-picker-actions\";\nimport { getResource } from \"store/resources/resources\";\nimport { CollectionResource } from \"models/collection\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { matchProjectRoute } from \"routes/routes\";\n\nexport const COLLECTION_MOVE_FORM_NAME = \"collectionMoveFormName\";\n\nexport const openMoveCollectionDialog = (resource: { name: string; uuid: string }) => (dispatch: Dispatch) => {\n    dispatch<any>(resetPickerProjectTree());\n    dispatch<any>(initProjectsTreePicker(COLLECTION_MOVE_FORM_NAME));\n    dispatch(dialogActions.OPEN_DIALOG({ id: COLLECTION_MOVE_FORM_NAME, data: resource }));\n};\n\nexport const moveCollection =\n    (resource: MoveToFormDialogData) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(progressIndicatorActions.START_WORKING(COLLECTION_MOVE_FORM_NAME));\n        let cachedCollection = getResource<CollectionResource>(resource.uuid)(getState().resources);\n        try {\n            if (!cachedCollection) {\n                cachedCollection = await services.collectionService.get(resource.uuid);\n            }\n            const collection = await services.collectionService.update(resource.uuid, { ownerUuid: resource.ownerUuid });\n            if (matchProjectRoute(getState().router.location.pathname)) {\n                dispatch(projectPanelDataActions.REQUEST_ITEMS());\n            }\n            dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_MOVE_FORM_NAME }));\n            return { ...cachedCollection, ...collection };\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"A collection with the same name already exists in the target project.\", hideDuration: 2000, kind: SnackbarKind.ERROR }));\n            } else {\n                dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_MOVE_FORM_NAME }));\n                dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Could not move the collection.\", hideDuration: 2000, kind: SnackbarKind.ERROR }));\n            }\n            return;\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_MOVE_FORM_NAME));\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/collections/collection-partial-copy-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { resetPickerProjectTree } from 'store/project-tree-picker/project-tree-picker-actions';\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { ServiceRepository } from 'services/services';\nimport { CollectionFileSelection, CollectionPanelDirectory, CollectionPanelFile, filterCollectionFilesBySelection, getCollectionSelection } from '../collection-panel/collection-panel-files/collection-panel-files-state';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { getCommonResourceServiceError, CommonResourceServiceError } from 'services/common-service/common-resource-service';\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { FileOperationLocation } from \"store/tree-picker/tree-picker-actions\";\nimport { updateResources } from 'store/resources/resources-actions';\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { CollectionResource } from 'models/collection';\n\nexport const COLLECTION_PARTIAL_COPY_FORM_NAME = 'COLLECTION_PARTIAL_COPY_DIALOG';\nexport const COLLECTION_PARTIAL_COPY_TO_SELECTED_COLLECTION = 'COLLECTION_PARTIAL_COPY_TO_SELECTED_DIALOG';\nexport const COLLECTION_PARTIAL_COPY_TO_SEPARATE_COLLECTIONS = 'COLLECTION_PARTIAL_COPY_TO_SEPARATE_DIALOG';\n\nexport interface CollectionPartialCopyToNewCollectionFormData {\n    name: string;\n    description: string;\n    projectUuid: string;\n}\n\nexport interface CollectionPartialCopyToExistingCollectionFormData {\n    destination: FileOperationLocation;\n}\n\nexport interface CollectionPartialCopyToSeparateCollectionsFormData {\n    name: string;\n    projectUuid: string;\n}\n\nexport const openCollectionPartialCopyToNewCollectionDialog = (resource: ContextMenuResource) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sourceCollection = getState().collectionPanel.item;\n\n        if (sourceCollection) {\n            openCopyPartialToNewDialog(dispatch, sourceCollection, [resource]);\n        }\n    };\n\nexport const openCollectionPartialCopyMultipleToNewCollectionDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sourceCollection = getState().collectionPanel.item;\n        const selectedItems = filterCollectionFilesBySelection(getState().collectionPanelFiles, true);\n\n        if (sourceCollection && selectedItems.length) {\n            openCopyPartialToNewDialog(dispatch, sourceCollection, selectedItems);\n        }\n    };\n\nconst openCopyPartialToNewDialog = (dispatch: Dispatch, sourceCollection: CollectionResource, selectedItems: (CollectionPanelDirectory | CollectionPanelFile | ContextMenuResource)[]) => {\n    // Get selected files\n    const collectionFileSelection = getCollectionSelection(sourceCollection, selectedItems);\n    // Populate form initial state\n    const initialFormData = {\n        name: `Files extracted from: ${sourceCollection.name}`,\n        description: sourceCollection.description,\n        projectUuid: undefined\n    };\n    dispatch<any>(resetPickerProjectTree());\n    dispatch(dialogActions.OPEN_DIALOG({ id: COLLECTION_PARTIAL_COPY_FORM_NAME, data: { collectionFileSelection, initialFormData } }));\n};\n\nexport const copyCollectionPartialToNewCollection = (fileSelection: CollectionFileSelection, formData: CollectionPartialCopyToNewCollectionFormData) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        if (fileSelection.collection) {\n            try {\n                dispatch(progressIndicatorActions.START_WORKING(COLLECTION_PARTIAL_COPY_FORM_NAME));\n\n                // Copy files\n                const updatedCollection = await services.collectionService.copyFiles(\n                    fileSelection.collection.portableDataHash,\n                    fileSelection.selectedPaths,\n                    {\n                        name: formData.name,\n                        description: formData.description,\n                        ownerUuid: formData.projectUuid,\n                        uuid: undefined,\n                    },\n                    '/',\n                    false\n                );\n                dispatch(updateResources([updatedCollection]));\n                dispatch<any>(navigateTo(updatedCollection.uuid));\n\n                dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_COPY_FORM_NAME }));\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'New collection created.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS\n                }));\n            } catch (e) {\n                const error = getCommonResourceServiceError(e);\n                if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Collection with this name already exists', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                } else if (error === CommonResourceServiceError.UNKNOWN) {\n                    dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_COPY_FORM_NAME }));\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Could not create a copy of collection', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                } else {\n                    dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_COPY_FORM_NAME }));\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Collection has been copied but may contain incorrect files.', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                }\n            } finally {\n                dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_PARTIAL_COPY_FORM_NAME));\n            }\n        }\n    };\n\nexport const openCollectionPartialCopyToExistingCollectionDialog = (resource: ContextMenuResource) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sourceCollection = getState().collectionPanel.item;\n\n        if (sourceCollection) {\n            openCopyToExistingDialog(dispatch, sourceCollection, [resource]);\n        }\n    };\n\nexport const openCollectionPartialCopyMultipleToExistingCollectionDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sourceCollection = getState().collectionPanel.item;\n        const selectedItems = filterCollectionFilesBySelection(getState().collectionPanelFiles, true);\n\n        if (sourceCollection && selectedItems.length) {\n            openCopyToExistingDialog(dispatch, sourceCollection, selectedItems);\n        }\n    };\n\nconst openCopyToExistingDialog = (dispatch: Dispatch, sourceCollection: CollectionResource, selectedItems: (CollectionPanelDirectory | CollectionPanelFile | ContextMenuResource)[]) => {\n    // Get selected files\n    const collectionFileSelection = getCollectionSelection(sourceCollection, selectedItems);\n    // Populate form initial state\n    const initialFormData = {\n        destination: {uuid: sourceCollection.uuid, destinationPath: ''}\n    };\n    dispatch<any>(resetPickerProjectTree());\n    dispatch(dialogActions.OPEN_DIALOG({ id: COLLECTION_PARTIAL_COPY_TO_SELECTED_COLLECTION, data: { initialFormData, collectionFileSelection } }));\n}\n\nexport const copyCollectionPartialToExistingCollection = (fileSelection: CollectionFileSelection, formData: CollectionPartialCopyToExistingCollectionFormData) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        if (fileSelection.collection && formData.destination && formData.destination.uuid) {\n            try {\n                dispatch(progressIndicatorActions.START_WORKING(COLLECTION_PARTIAL_COPY_TO_SELECTED_COLLECTION));\n\n                // Copy files\n                const updatedCollection = await services.collectionService.copyFiles(\n                    fileSelection.collection.portableDataHash,\n                    fileSelection.selectedPaths,\n                    {uuid: formData.destination.uuid},\n                    formData.destination.subpath || '/',\n                    false\n                );\n                dispatch(updateResources([updatedCollection]));\n\n                dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_COPY_TO_SELECTED_COLLECTION }));\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Files has been copied to selected collection.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS\n                }));\n            } catch (e) {\n                const error = getCommonResourceServiceError(e);\n                if (error === CommonResourceServiceError.UNKNOWN) {\n                    dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_COPY_TO_SELECTED_COLLECTION }));\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Could not copy this files to selected collection', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                }\n            } finally {\n                dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_PARTIAL_COPY_TO_SELECTED_COLLECTION));\n            }\n        }\n    };\n\nexport const openCollectionPartialCopyToSeparateCollectionsDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sourceCollection = getState().collectionPanel.item;\n        const selectedItems = filterCollectionFilesBySelection(getState().collectionPanelFiles, true);\n\n        if (sourceCollection && selectedItems.length) {\n            // Get selected files\n            const collectionFileSelection = getCollectionSelection(sourceCollection, selectedItems);\n            dispatch<any>(resetPickerProjectTree());\n            dispatch(dialogActions.OPEN_DIALOG({ id: COLLECTION_PARTIAL_COPY_TO_SEPARATE_COLLECTIONS, data: {collectionFileSelection, sourceCollectionName: sourceCollection.name} }));\n        }\n    };\n\nexport const copyCollectionPartialToSeparateCollections = (fileSelection: CollectionFileSelection, formData: CollectionPartialCopyToSeparateCollectionsFormData) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        if (fileSelection.collection) {\n            try {\n                dispatch(progressIndicatorActions.START_WORKING(COLLECTION_PARTIAL_COPY_TO_SEPARATE_COLLECTIONS));\n\n                // Copy files\n                const collections = await Promise.all(fileSelection.selectedPaths.map((path) =>\n                    services.collectionService.copyFiles(\n                        fileSelection.collection.portableDataHash,\n                        [path],\n                        {\n                            name: `File copied from collection ${formData.name}${path}`,\n                            ownerUuid: formData.projectUuid,\n                            uuid: undefined,\n                        },\n                        '/',\n                        false\n                    )\n                ));\n                dispatch(updateResources(collections));\n                dispatch<any>(navigateTo(formData.projectUuid));\n\n                dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_COPY_TO_SEPARATE_COLLECTIONS }));\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'New collections created.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS\n                }));\n            } catch (e) {\n                const error = getCommonResourceServiceError(e);\n                if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Collection from one or more files already exists', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                } else if (error === CommonResourceServiceError.UNKNOWN) {\n                    dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_COPY_TO_SEPARATE_COLLECTIONS }));\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Could not create a copy of collection', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                } else {\n                    dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_COPY_TO_SEPARATE_COLLECTIONS }));\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Collection has been copied but may contain incorrect files.', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                }\n            } finally {\n                dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_PARTIAL_COPY_TO_SEPARATE_COLLECTIONS));\n            }\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/collections/collection-partial-move-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { CommonResourceServiceError, getCommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport { ServiceRepository } from \"services/services\";\nimport { CollectionFileSelection, CollectionPanelDirectory, CollectionPanelFile, filterCollectionFilesBySelection, getCollectionSelection } from \"store/collection-panel/collection-panel-files/collection-panel-files-state\";\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { navigateTo } from \"store/navigation/navigation-action\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { resetPickerProjectTree } from \"store/project-tree-picker/project-tree-picker-actions\";\nimport { updateResources } from \"store/resources/resources-actions\";\nimport { SnackbarKind, snackbarActions } from \"store/snackbar/snackbar-actions\";\nimport { RootState } from \"store/store\";\nimport { FileOperationLocation } from \"store/tree-picker/tree-picker-actions\";\nimport { CollectionResource } from \"models/collection\";\nimport { SOURCE_DESTINATION_EQUAL_ERROR_MESSAGE } from \"services/collection-service/collection-service\";\n\nexport const COLLECTION_PARTIAL_MOVE_TO_NEW_COLLECTION = 'COLLECTION_PARTIAL_MOVE_TO_NEW_DIALOG';\nexport const COLLECTION_PARTIAL_MOVE_TO_SELECTED_COLLECTION = 'COLLECTION_PARTIAL_MOVE_TO_SELECTED_DIALOG';\nexport const COLLECTION_PARTIAL_MOVE_TO_SEPARATE_COLLECTIONS = 'COLLECTION_PARTIAL_MOVE_TO_SEPARATE_DIALOG';\n\nexport interface CollectionPartialMoveToNewCollectionFormData {\n    name: string;\n    description: string;\n    projectUuid: string;\n}\n\nexport interface CollectionPartialMoveToExistingCollectionFormData {\n    destination: FileOperationLocation;\n}\n\nexport interface CollectionPartialMoveToSeparateCollectionsFormData {\n    name: string;\n    projectUuid: string;\n}\n\nexport const openCollectionPartialMoveToNewCollectionDialog = (resource: ContextMenuResource) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sourceCollection = getState().collectionPanel.item;\n\n        if (sourceCollection) {\n            openMoveToNewDialog(dispatch, sourceCollection, [resource]);\n        }\n    };\n\nexport const openCollectionPartialMoveMultipleToNewCollectionDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sourceCollection = getState().collectionPanel.item;\n        const selectedItems = filterCollectionFilesBySelection(getState().collectionPanelFiles, true);\n\n        if (sourceCollection && selectedItems.length) {\n            openMoveToNewDialog(dispatch, sourceCollection, selectedItems);\n        }\n    };\n\nconst openMoveToNewDialog = (dispatch: Dispatch, sourceCollection: CollectionResource, selectedItems: (CollectionPanelDirectory | CollectionPanelFile | ContextMenuResource)[]) => {\n    // Get selected files\n    const collectionFileSelection = getCollectionSelection(sourceCollection, selectedItems);\n    // Populate form initial state\n    const initialFormData = {\n        name: `Files moved from: ${sourceCollection.name}`,\n        description: sourceCollection.description,\n        projectUuid: undefined\n    };\n    dispatch<any>(resetPickerProjectTree());\n    dispatch(dialogActions.OPEN_DIALOG({ id: COLLECTION_PARTIAL_MOVE_TO_NEW_COLLECTION, data: { collectionFileSelection, initialFormData} }));\n}\n\nexport const moveCollectionPartialToNewCollection = (fileSelection: CollectionFileSelection, formData: CollectionPartialMoveToNewCollectionFormData) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        if (fileSelection.collection) {\n            try {\n                dispatch(progressIndicatorActions.START_WORKING(COLLECTION_PARTIAL_MOVE_TO_NEW_COLLECTION));\n\n                // Move files\n                const updatedCollection = await services.collectionService.moveFiles(\n                    fileSelection.collection.uuid,\n                    fileSelection.collection.portableDataHash,\n                    fileSelection.selectedPaths,\n                    {\n                        name: formData.name,\n                        description: formData.description,\n                        ownerUuid: formData.projectUuid,\n                        uuid: undefined,\n                    },\n                    '/',\n                    false\n                );\n                dispatch(updateResources([updatedCollection]));\n                dispatch<any>(navigateTo(updatedCollection.uuid));\n\n                dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_MOVE_TO_NEW_COLLECTION }));\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Files have been moved to selected collection.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS\n                }));\n            } catch (e) {\n                const error = getCommonResourceServiceError(e);\n                if (error === CommonResourceServiceError.UNKNOWN) {\n                    dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_MOVE_TO_NEW_COLLECTION }));\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Could not move files to selected collection', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                }\n            } finally {\n                dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_PARTIAL_MOVE_TO_NEW_COLLECTION));\n            }\n        }\n    };\n\nexport const openCollectionPartialMoveToExistingCollectionDialog = (resource: ContextMenuResource) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sourceCollection = getState().collectionPanel.item;\n\n        if (sourceCollection) {\n            openMoveToExistingDialog(dispatch, sourceCollection, [resource]);\n        }\n    };\n\nexport const openCollectionPartialMoveMultipleToExistingCollectionDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sourceCollection = getState().collectionPanel.item;\n        const selectedItems = filterCollectionFilesBySelection(getState().collectionPanelFiles, true);\n\n        if (sourceCollection && selectedItems.length) {\n            openMoveToExistingDialog(dispatch, sourceCollection, selectedItems);\n        }\n    };\n\nconst openMoveToExistingDialog = (dispatch: Dispatch, sourceCollection: CollectionResource, selectedItems: (CollectionPanelDirectory | CollectionPanelFile | ContextMenuResource)[]) => {\n    // Get selected files\n    const collectionFileSelection = getCollectionSelection(sourceCollection, selectedItems);\n    // Populate form initial state\n    const initialFormData = {\n        destination: {uuid: sourceCollection.uuid, path: ''}\n    };\n    dispatch<any>(resetPickerProjectTree());\n    dispatch(dialogActions.OPEN_DIALOG({ id: COLLECTION_PARTIAL_MOVE_TO_SELECTED_COLLECTION, data: { collectionFileSelection, initialFormData } }));\n}\n\nexport const moveCollectionPartialToExistingCollection = (fileSelection: CollectionFileSelection, formData: CollectionPartialMoveToExistingCollectionFormData) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        if (fileSelection.collection && formData.destination && formData.destination.uuid) {\n            try {\n                dispatch(progressIndicatorActions.START_WORKING(COLLECTION_PARTIAL_MOVE_TO_SELECTED_COLLECTION));\n\n                // Move files\n                const updatedCollection = await services.collectionService.moveFiles(\n                    fileSelection.collection.uuid,\n                    fileSelection.collection.portableDataHash,\n                    fileSelection.selectedPaths,\n                    {uuid: formData.destination.uuid},\n                    formData.destination.subpath || '/', false\n                );\n                dispatch(updateResources([updatedCollection]));\n\n                dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_MOVE_TO_SELECTED_COLLECTION }));\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Files have been moved to selected collection.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS\n                }));\n            } catch (e) {\n                const error = getCommonResourceServiceError(e);\n                if (error === CommonResourceServiceError.SOURCE_DESTINATION_CANNOT_BE_SAME) {\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: SOURCE_DESTINATION_EQUAL_ERROR_MESSAGE, hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                } else if (error === CommonResourceServiceError.UNKNOWN) {\n                    dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_MOVE_TO_SELECTED_COLLECTION }));\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Could not copy this files to selected collection', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                }\n            } finally {\n                dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_PARTIAL_MOVE_TO_SELECTED_COLLECTION));\n            }\n        }\n    };\n\nexport const openCollectionPartialMoveToSeparateCollectionsDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const sourceCollection = getState().collectionPanel.item;\n        const selectedItems = filterCollectionFilesBySelection(getState().collectionPanelFiles, true);\n\n        if (sourceCollection && selectedItems.length) {\n            // Get selected files\n            const collectionFileSelection = getCollectionSelection(sourceCollection, selectedItems);\n            // Populate form initial state\n            const initialData = {\n                name: sourceCollection.name,\n                projectUuid: undefined\n            };\n            dispatch<any>(resetPickerProjectTree());\n            dispatch(dialogActions.OPEN_DIALOG({ id: COLLECTION_PARTIAL_MOVE_TO_SEPARATE_COLLECTIONS, data: { collectionFileSelection, initialData } }));\n        }\n    };\n\nexport const moveCollectionPartialToSeparateCollections = (fileSelection: CollectionFileSelection, formData: CollectionPartialMoveToSeparateCollectionsFormData) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        if (fileSelection.collection) {\n            try {\n                dispatch(progressIndicatorActions.START_WORKING(COLLECTION_PARTIAL_MOVE_TO_SEPARATE_COLLECTIONS));\n\n                // Move files\n                const collections = await Promise.all(fileSelection.selectedPaths.map((path) =>\n                    services.collectionService.moveFiles(\n                        fileSelection.collection.uuid,\n                        fileSelection.collection.portableDataHash,\n                        [path],\n                        {\n                            name: `File moved from collection ${formData.name}${path}`,\n                            ownerUuid: formData.projectUuid,\n                            uuid: undefined,\n                        },\n                        '/',\n                        false\n                    )\n                ));\n                dispatch(updateResources(collections));\n                dispatch<any>(navigateTo(formData.projectUuid));\n\n                dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_MOVE_TO_SEPARATE_COLLECTIONS }));\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'New collections created.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS\n                }));\n            } catch (e) {\n                const error = getCommonResourceServiceError(e);\n                if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Collection from one or more files already exists', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                } else if (error === CommonResourceServiceError.UNKNOWN) {\n                    dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_MOVE_TO_SEPARATE_COLLECTIONS }));\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Could not create a copy of collection', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                } else {\n                    dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_PARTIAL_MOVE_TO_SEPARATE_COLLECTIONS }));\n                    dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Collection has been copied but may contain incorrect files.', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n                }\n            } finally {\n                dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_PARTIAL_MOVE_TO_SEPARATE_COLLECTIONS));\n            }\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/collections/collection-update-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { collectionPanelActions } from \"store/collection-panel/collection-panel-action\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { getCommonResourceServiceError, CommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport { ServiceRepository } from \"services/services\";\nimport { CollectionResource } from 'models/collection';\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { snackbarActions, SnackbarKind } from \"../snackbar/snackbar-actions\";\nimport { updateResources } from \"../resources/resources-actions\";\nimport { loadDetailsPanel } from \"../details-panel/details-panel-action\";\nimport { getResource } from \"store/resources/resources\";\nimport { CollectionProperties } from \"./collection-create-actions\";\nimport { loadSidePanelTreeProjects, SidePanelTreeCategory } from \"store/side-panel-tree/side-panel-tree-actions\";\n\nexport interface CollectionUpdateFormDialogData {\n    uuid: string;\n    name: string;\n    description?: string | null;\n    storageClassesDesired?: string[];\n    properties?: CollectionProperties;\n}\n\nexport const COLLECTION_UPDATE_FORM_NAME = 'collectionUpdateFormName';\nexport const COLLECTION_UPDATE_PROPERTIES_FORM_NAME = \"collectionUpdatePropertiesFormName\";\n\nexport const openCollectionUpdateDialog = (resource: CollectionUpdateFormDialogData) =>\n    (dispatch: Dispatch) => {\n        dispatch(dialogActions.OPEN_DIALOG({ id: COLLECTION_UPDATE_FORM_NAME, data: resource }));\n    };\n\nexport const updateCollection = (collection: CollectionUpdateFormDialogData, setSubmitErr: (errMsg: string) => void) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const uuid = collection.uuid || '';\n        dispatch(progressIndicatorActions.START_WORKING(COLLECTION_UPDATE_FORM_NAME));\n\n        const cachedCollection = getResource<CollectionResource>(collection.uuid)(getState().resources);\n        services.collectionService.update(uuid, {\n            name: collection.name,\n            storageClassesDesired: collection.storageClassesDesired,\n            description: collection.description,\n            properties: {...collection.properties, ...(cachedCollection || {}).properties} }, false\n        ).then(updatedCollection => {\n            updatedCollection = {...cachedCollection, ...updatedCollection};\n            dispatch(collectionPanelActions.SET_COLLECTION(updatedCollection));\n            dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_UPDATE_FORM_NAME }));\n            dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_UPDATE_FORM_NAME));\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: \"Collection has been successfully updated.\",\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS\n            }));\n            dispatch<any>(updateResources([updatedCollection]));\n            dispatch<any>(loadDetailsPanel(updatedCollection.uuid));\n            dispatch<any>(loadSidePanelTreeProjects(SidePanelTreeCategory.FAVORITES));\n        }).catch (e => {\n            dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_UPDATE_FORM_NAME));\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                setSubmitErr('Collection with the same name already exists.');\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Collection with the same name already exists.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.ERROR\n                }));\n            } else {\n                dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_UPDATE_FORM_NAME }));\n                const errMsg = e.errors\n                    ? e.errors.join('')\n                    : 'There was an error while updating the collection';\n                setSubmitErr(errMsg);\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: errMsg,\n                    hideDuration: 2000,\n                    kind: SnackbarKind.ERROR }));\n                }\n            }\n        );\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/collections/collection-upload-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { fileUploaderActions } from 'store/file-uploader/file-uploader-actions';\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport * as WorkbenchActions from 'store/workbench/workbench-actions';\n\nexport const uploadCollectionFiles = (collectionUuid: string, targetLocation?: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(fileUploaderActions.START_UPLOAD());\n        const files = getState().fileUploader.map(file => file.file);\n        await services.collectionService.uploadFiles(collectionUuid, files, handleUploadProgress(dispatch), targetLocation);\n        dispatch(WorkbenchActions.loadCollection(collectionUuid));\n        dispatch(fileUploaderActions.CLEAR_UPLOAD());\n    };\n\nexport const COLLECTION_UPLOAD_FILES_DIALOG = 'uploadCollectionFilesDialog';\n\nexport const openUploadCollectionFilesDialog = (targetLocation?: string) => (dispatch: Dispatch) => {\n    dispatch(fileUploaderActions.CLEAR_UPLOAD());\n    dispatch<any>(dialogActions.OPEN_DIALOG({ id: COLLECTION_UPLOAD_FILES_DIALOG, data: { targetLocation } }));\n};\n\nexport const submitCollectionFiles = (targetLocation?: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const currentCollection = getState().collectionPanel.item;\n        if (currentCollection) {\n            try {\n                dispatch(progressIndicatorActions.START_WORKING(COLLECTION_UPLOAD_FILES_DIALOG));\n                await dispatch<any>(uploadCollectionFiles(currentCollection.uuid, targetLocation));\n                dispatch(closeUploadCollectionFilesDialog());\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Data has been uploaded.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS\n                }));\n            } catch (e) {\n                dispatch(closeUploadCollectionFilesDialog());\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Error uploading file(s). See console for details.',\n                    hideDuration: 4000,\n                    kind: SnackbarKind.ERROR\n                }));\n            } finally {\n                dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_UPLOAD_FILES_DIALOG));\n            }\n        }\n    };\n\nexport const closeUploadCollectionFilesDialog = () => dialogActions.CLOSE_DIALOG({ id: COLLECTION_UPLOAD_FILES_DIALOG });\n\nconst handleUploadProgress = (dispatch: Dispatch) => (fileId: number, loaded: number, total: number, currentTime: number) => {\n    dispatch(fileUploaderActions.SET_UPLOAD_PROGRESS({ fileId, loaded, total, currentTime }));\n};\n"
  },
  {
    "path": "services/workbench2/src/store/collections/collection-version-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { snackbarActions, SnackbarKind } from \"../snackbar/snackbar-actions\";\nimport { resourcesActions } from \"../resources/resources-actions\";\nimport { navigateTo } from \"../navigation/navigation-action\";\nimport { dialogActions } from \"../dialog/dialog-actions\";\nimport { getResource } from \"store/resources/resources\";\nimport { CollectionResource } from \"models/collection\";\n\nexport const COLLECTION_RESTORE_VERSION_DIALOG = 'collectionRestoreVersionDialog';\n\nexport const openRestoreCollectionVersionDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: COLLECTION_RESTORE_VERSION_DIALOG,\n            data: {\n                title: 'Restore version',\n                text: \"This will copy the content of the selected version to the head. To make a new collection with the content of the selected version, use 'Make a copy' instead.\",\n                confirmButtonLabel: 'Restore',\n                uuid\n            }\n        }));\n    };\n\nexport const restoreVersion = (resourceUuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            // Request the manifest text because stored old versions usually\n            // don't include them.\n            let oldVersion = getResource<CollectionResource>(resourceUuid)(getState().resources);\n            if (!oldVersion) {\n                oldVersion = await services.collectionService.get(resourceUuid);\n            }\n            const oldVersionManifest = await services.collectionService.get(resourceUuid, undefined, ['manifestText']);\n            oldVersion.manifestText = oldVersionManifest.manifestText;\n\n            const { uuid, version, ...rest} = oldVersion;\n            const headVersion = await services.collectionService.update(\n                oldVersion.currentVersionUuid,\n                { ...rest }\n            );\n            dispatch(resourcesActions.SET_RESOURCES([headVersion]));\n            dispatch<any>(navigateTo(headVersion.uuid));\n        } catch (e) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: `Couldn't restore version: ${e.errors[0]}`,\n                hideDuration: 2000,\n                kind: SnackbarKind.ERROR\n            }));\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/collections-content-address-panel/collections-content-address-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ServiceRepository } from 'services/services';\nimport { MiddlewareAPI, Dispatch } from 'redux';\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, getOrder } from 'store/data-explorer/data-explorer-middleware-service';\nimport { RootState } from 'store/store';\nimport { getUserUuid } from \"common/getuser\";\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { resourcesActions } from 'store/resources/resources-actions';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';\nimport { collectionsContentAddressActions } from './collections-content-address-panel-actions';\nimport { updateFavorites } from 'store/favorites/favorites-actions';\nimport { updatePublicFavorites } from 'store/public-favorites/public-favorites-actions';\nimport { setBreadcrumbs } from '../breadcrumbs/breadcrumbs-actions';\nimport { ResourceKind, extractUuidKind } from 'models/resource';\nimport { ownerNameActions } from 'store/owner-name/owner-name-actions';\nimport { getUserDisplayName } from 'models/user';\nimport { CollectionResource } from 'models/collection';\nimport { replace } from \"connected-react-router\";\nimport { getNavUrl } from 'routes/routes';\nimport { ListArguments, ListResults } from 'services/common-service/common-service';\nimport { couldNotFetchItemsAvailable } from 'store/data-explorer/data-explorer-action';\n\nexport class CollectionsWithSameContentAddressMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const dataExplorer = getDataExplorer(api.getState().dataExplorer, this.getId());\n        if (!dataExplorer) {\n            api.dispatch(collectionPanelDataExplorerIsNotSet());\n        } else {\n            try {\n                if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n\n                const state = api.getState();\n                const userUuid = getUserUuid(state);\n                const pathname = state.router.location!.pathname;\n                const contentAddress = pathname.split('/')[2];\n\n                // Get items\n                const response = await this.services.collectionService.list(getParams(dataExplorer, contentAddress));\n                const userUuids = response.items.map(it => {\n                    if (extractUuidKind(it.ownerUuid) === ResourceKind.USER) {\n                        return it.ownerUuid;\n                    } else {\n                        return '';\n                    }\n                }\n                );\n                const groupUuids = response.items.map(it => {\n                    if (extractUuidKind(it.ownerUuid) === ResourceKind.GROUP) {\n                        return it.ownerUuid;\n                    } else {\n                        return '';\n                    }\n                });\n                const responseUsers = await this.services.userService.list({\n                    filters: new FilterBuilder()\n                        .addIn('uuid', userUuids)\n                        .getFilters(),\n                    count: \"none\"\n                });\n                const responseGroups = await this.services.groupsService.list({\n                    filters: new FilterBuilder()\n                        .addIn('uuid', groupUuids)\n                        .getFilters(),\n                    count: \"none\"\n                });\n                responseUsers.items.forEach(it => {\n                    api.dispatch<any>(ownerNameActions.SET_OWNER_NAME({\n                        name: it.uuid === userUuid\n                            ? 'User: Me'\n                            : `User: ${getUserDisplayName(it)}`,\n                        uuid: it.uuid\n                    }));\n                });\n                responseGroups.items.forEach(it => {\n                    api.dispatch<any>(ownerNameActions.SET_OWNER_NAME({ name: `Project: ${it.name}`, uuid: it.uuid }));\n                });\n                api.dispatch<any>(setBreadcrumbs([{ label: 'Projects', uuid: userUuid }]));\n                api.dispatch<any>(updateFavorites(response.items.map(item => item.uuid)));\n                api.dispatch<any>(updatePublicFavorites(response.items.map(item => item.uuid)));\n                if (response.itemsAvailable === 1) {\n                    api.dispatch<any>(replace(getNavUrl(response.items[0].uuid, api.getState().auth)));\n                } else {\n                    api.dispatch(resourcesActions.SET_RESOURCES(response.items));\n                    api.dispatch(collectionsContentAddressActions.SET_ITEMS({\n                        items: response.items.map((resource: any) => resource.uuid),\n                        itemsAvailable: response.itemsAvailable,\n                        page: Math.floor(response.offset / response.limit),\n                        rowsPerPage: response.limit\n                    }));\n                }\n            } catch (e) {\n                api.dispatch(collectionsContentAddressActions.SET_ITEMS({\n                    items: [],\n                    itemsAvailable: 0,\n                    page: 0,\n                    rowsPerPage: dataExplorer.rowsPerPage\n                }));\n                api.dispatch(couldNotFetchCollections());\n            } finally {\n                api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n            }\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        const pathname = state.router.location!.pathname;\n        const contentAddress = pathname.split('/')[2];\n\n        if (criteriaChanged) {\n            // Get itemsAvailable\n            return this.services.collectionService.list(getCountParams(dataExplorer, contentAddress))\n                .then((results: ListResults<CollectionResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(collectionsContentAddressActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nconst getFilters = (dataExplorer: DataExplorer, contentAddress: string) => (\n    new FilterBuilder()\n        .addEqual('portable_data_hash', contentAddress)\n        .addILike(\"name\", dataExplorer.searchValue)\n        .getFilters()\n);\n\nconst getParams = (dataExplorer: DataExplorer, contentAddress: string): ListArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    filters: getFilters(dataExplorer, contentAddress),\n    order: getOrder<CollectionResource>(dataExplorer),\n    includeOldVersions: true,\n    count: 'none',\n});\n\nconst getCountParams = (dataExplorer: DataExplorer, contentAddress: string): ListArguments => ({\n    limit: 0,\n    count: 'exact',\n    filters: getFilters(dataExplorer, contentAddress),\n    includeOldVersions: true,\n});\n\nconst collectionPanelDataExplorerIsNotSet = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Collection panel is not ready.',\n        kind: SnackbarKind.ERROR\n    });\n\nconst couldNotFetchCollections = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch collection with this content address.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/collections-content-address-panel/collections-content-address-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { bindDataExplorerActions } from 'store/data-explorer/data-explorer-action';\n\nexport const COLLECTIONS_CONTENT_ADDRESS_PANEL_ID = 'collectionsContentAddressPanel';\n\nexport const collectionsContentAddressActions = bindDataExplorerActions(COLLECTIONS_CONTENT_ADDRESS_PANEL_ID);\n\nexport const loadCollectionsContentAddressPanel = () =>\n    (dispatch: Dispatch) => {\n        dispatch(collectionsContentAddressActions.REQUEST_ITEMS());\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/context-menu/context-menu-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { ContextMenuPosition, ContextMenuKind, ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { getResource } from \"../resources/resources\";\nimport { UserResource } from \"models/user\";\nimport { isSidePanelTreeCategory } from \"store/side-panel-tree/side-panel-tree-actions\";\nimport { extractUuidKind, ResourceKind, Resource } from \"models/resource\";\nimport { RepositoryResource } from \"models/repositories\";\nimport { SshKeyResource } from \"models/ssh-key\";\nimport { VirtualMachinesResource } from \"models/virtual-machines\";\nimport { KeepServiceResource } from \"models/keep-services\";\nimport { GroupContentsResource } from \"services/groups-service/groups-service\";\nimport { LinkResource } from \"models/link\";\nimport { ProjectResource } from \"models/project\";\nimport { Process } from \"store/processes/process\";\nimport { filterCollectionFilesBySelection } from \"store/collection-panel/collection-panel-files/collection-panel-files-state\";\nimport { selectOne, deselectAllOthers } from \"store/multiselect/multiselect-actions\";\nimport { ContainerRequestResource } from \"models/container-request\";\nimport { resourceToMenuKind } from \"common/resource-to-menu-kind\";\n\nexport const contextMenuActions = unionize({\n    OPEN_CONTEXT_MENU: ofType<{ position: ContextMenuPosition; resource: ContextMenuResource }>(),\n    CLOSE_CONTEXT_MENU: ofType<{}>(),\n});\n\nexport type ContextMenuAction = UnionOf<typeof contextMenuActions>;\n\nexport const isKeyboardClick = (event: React.MouseEvent<HTMLElement>) => event.nativeEvent.detail === 0;\n\nexport const openContextMenuAndSelect = (event: React.MouseEvent<HTMLElement>, resource: ContextMenuResource) => (dispatch: Dispatch) => {\n    event.preventDefault();\n    dispatch<any>(selectOne(resource.uuid));\n    dispatch<any>(deselectAllOthers(resource.uuid));\n    const { left, top } = event.currentTarget.getBoundingClientRect();\n    dispatch(\n        contextMenuActions.OPEN_CONTEXT_MENU({\n            position: {\n                x: event.clientX || left,\n                y: event.clientY || top,\n            },\n            resource,\n        })\n    );\n};\n\nconst openContextMenuOnly = (event: React.MouseEvent<HTMLElement>, resource: ContextMenuResource) => (dispatch: Dispatch) => {\n    event.preventDefault();\n    const { left, top } = event.currentTarget.getBoundingClientRect();\n    dispatch(\n        contextMenuActions.OPEN_CONTEXT_MENU({\n            position: {\n                x: event.clientX || left,\n                y: event.clientY || top,\n            },\n            resource,\n        })\n    );\n};\n\nexport const openContextMenuOnlyFromUuid = (event: React.MouseEvent<HTMLElement>, uuid: string) => (dispatch: Dispatch, getState: () => RootState) => {\n    const resource = getResource<GroupContentsResource>(uuid)(getState().resources);\n    const menuKind = dispatch<any>(resourceToMenuKind(uuid));\n    if (resource) {\n        dispatch<any>(\n            openContextMenuOnly(event, {\n                name: resource.name,\n                uuid: resource.uuid,\n                ownerUuid: resource.ownerUuid,\n                kind: resource.kind,\n                menuKind: menuKind,\n            })\n        );\n    }\n};\n\nexport const openCollectionFilesContextMenu =\n    (event: React.MouseEvent<HTMLElement>, isWritable: boolean) => (dispatch: Dispatch, getState: () => RootState) => {\n        const selectedCount = filterCollectionFilesBySelection(getState().collectionPanelFiles, true).length;\n        const multiple = selectedCount > 1;\n        dispatch<any>(\n            openContextMenuAndSelect(event, {\n                name: \"\",\n                uuid: \"\",\n                ownerUuid: \"\",\n                description: \"\",\n                kind: ResourceKind.COLLECTION,\n                menuKind:\n                    selectedCount > 0\n                        ? isWritable\n                            ? multiple\n                                ? ContextMenuKind.COLLECTION_FILES_MULTIPLE\n                                : ContextMenuKind.COLLECTION_FILES\n                            : multiple\n                            ? ContextMenuKind.READONLY_COLLECTION_FILES_MULTIPLE\n                            : ContextMenuKind.READONLY_COLLECTION_FILES\n                        : ContextMenuKind.COLLECTION_FILES_NOT_SELECTED,\n            })\n        );\n    };\n\nexport const openRepositoryContextMenu =\n    (event: React.MouseEvent<HTMLElement>, repository: RepositoryResource) => (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch<any>(\n            openContextMenuAndSelect(event, {\n                name: \"\",\n                uuid: repository.uuid,\n                ownerUuid: repository.ownerUuid,\n                kind: ResourceKind.REPOSITORY,\n                menuKind: ContextMenuKind.REPOSITORY,\n            })\n        );\n    };\n\nexport const openVirtualMachinesContextMenu =\n    (event: React.MouseEvent<HTMLElement>, repository: VirtualMachinesResource) => (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch<any>(\n            openContextMenuAndSelect(event, {\n                name: \"\",\n                uuid: repository.uuid,\n                ownerUuid: repository.ownerUuid,\n                kind: ResourceKind.VIRTUAL_MACHINE,\n                menuKind: ContextMenuKind.VIRTUAL_MACHINE,\n            })\n        );\n    };\n\nexport const openSshKeyContextMenu = (event: React.MouseEvent<HTMLElement>, sshKey: SshKeyResource) => (dispatch: Dispatch) => {\n    dispatch<any>(\n        openContextMenuAndSelect(event, {\n            name: \"\",\n            uuid: sshKey.uuid,\n            ownerUuid: sshKey.ownerUuid,\n            kind: ResourceKind.SSH_KEY,\n            menuKind: ContextMenuKind.SSH_KEY,\n        })\n    );\n};\n\nexport const openKeepServiceContextMenu = (event: React.MouseEvent<HTMLElement>, keepService: KeepServiceResource) => (dispatch: Dispatch) => {\n    dispatch<any>(\n        openContextMenuAndSelect(event, {\n            name: \"\",\n            uuid: keepService.uuid,\n            ownerUuid: keepService.ownerUuid,\n            kind: ResourceKind.KEEP_SERVICE,\n            menuKind: ContextMenuKind.KEEP_SERVICE,\n        })\n    );\n};\n\nexport const openApiClientAuthorizationContextMenu = (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => (dispatch: Dispatch) => {\n    dispatch<any>(\n        openContextMenuAndSelect(event, {\n            name: \"\",\n            uuid: resourceUuid,\n            ownerUuid: \"\",\n            kind: ResourceKind.API_CLIENT_AUTHORIZATION,\n            menuKind: ContextMenuKind.API_CLIENT_AUTHORIZATION,\n        })\n    );\n};\n\nexport const openRootProjectContextMenu =\n    (event: React.MouseEvent<HTMLElement>, projectUuid: string) => (dispatch: Dispatch, getState: () => RootState) => {\n        const res = getResource<UserResource>(projectUuid)(getState().resources);\n        if (res) {\n            dispatch<any>(\n                openContextMenuOnly(event, {\n                    name: \"\",\n                    uuid: res.uuid,\n                    ownerUuid: res.uuid,\n                    kind: res.kind,\n                    menuKind: ContextMenuKind.ROOT_PROJECT,\n                    isTrashed: false,\n                })\n            );\n        }\n    };\n\nexport const openProjectContextMenu =\n    (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => (dispatch: Dispatch, getState: () => RootState) => {\n        const res = getResource<GroupContentsResource>(resourceUuid)(getState().resources);\n        const menuKind = dispatch<any>(resourceToMenuKind(resourceUuid));\n        if (res && menuKind) {\n            dispatch<any>(\n                openContextMenuOnly(event, {\n                    name: res.name,\n                    uuid: res.uuid,\n                    kind: res.kind,\n                    menuKind,\n                    description: res.description,\n                    ownerUuid: res.ownerUuid,\n                    isTrashed: \"isTrashed\" in res ? res.isTrashed : false,\n                    isFrozen: !!(res as ProjectResource).frozenByUuid,\n                })\n            );\n        }\n    };\n\nexport const openSidePanelContextMenu = (event: React.MouseEvent<HTMLElement>, id: string) => (dispatch: Dispatch, getState: () => RootState) => {\n    if (!isSidePanelTreeCategory(id)) {\n        const kind = extractUuidKind(id);\n        if (kind === ResourceKind.USER) {\n            dispatch<any>(openRootProjectContextMenu(event, id));\n        } else if (kind === ResourceKind.PROJECT) {\n            dispatch<any>(openProjectContextMenu(event, id));\n        }\n    }\n};\n\nexport const openProcessContextMenu = (event: React.MouseEvent<HTMLElement>, process: Process) => (dispatch: Dispatch, getState: () => RootState) => {\n    const res = getResource<ContainerRequestResource>(process.containerRequest.uuid)(getState().resources);\n    const menuKind = dispatch<any>(resourceToMenuKind(process.containerRequest.uuid));\n    if (res && menuKind) {\n        dispatch<any>(\n            openContextMenuAndSelect(event, {\n                uuid: process.containerRequest.uuid,\n                ownerUuid: process.containerRequest.ownerUuid,\n                kind: menuKind,\n                name: process.containerRequest.name,\n                description: process.containerRequest.description,\n                outputUuid: process.containerRequest.outputUuid || \"\",\n                workflowUuid: process.containerRequest.properties.template_uuid || \"\",\n                menuKind\n            })\n        );\n    }\n};\n\nexport const openPermissionEditContextMenu =\n    (event: React.MouseEvent<HTMLElement>, link: LinkResource) => (dispatch: Dispatch, getState: () => RootState) => {\n        if (link) {\n            dispatch<any>(\n                openContextMenuAndSelect(event, {\n                    name: link.name,\n                    uuid: link.uuid,\n                    kind: link.kind,\n                    menuKind: ContextMenuKind.PERMISSION_EDIT,\n                    ownerUuid: link.ownerUuid,\n                })\n            );\n        }\n    };\n\nexport const openUserContextMenu = (event: React.MouseEvent<HTMLElement>, user: UserResource) => (dispatch: Dispatch, getState: () => RootState) => {\n    dispatch<any>(\n        openContextMenuAndSelect(event, {\n            name: \"\",\n            uuid: user.uuid,\n            ownerUuid: user.ownerUuid,\n            kind: user.kind,\n            menuKind: ContextMenuKind.USER,\n        })\n    );\n};\n\nexport const openSearchResultsContextMenu =\n    (event: React.MouseEvent<HTMLElement>, uuid: string) => (dispatch: Dispatch, getState: () => RootState) => {\n        const res = getResource<Resource>(uuid)(getState().resources);\n        if (res) {\n            dispatch<any>(\n                openContextMenuAndSelect(event, {\n                    name: \"\",\n                    uuid: res.uuid,\n                    ownerUuid: \"\",\n                    kind: res.kind,\n                    menuKind: ContextMenuKind.SEARCH_RESULTS,\n                })\n            );\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/context-menu/context-menu-filters.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from \"store/store\";\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { getUserAccountStatus, UserAccountStatus } from \"store/users/users-actions\";\nimport { matchMyAccountRoute, matchUserProfileRoute } from \"routes/routes\";\n\nexport const isAdmin = (state: RootState, resource: ContextMenuResource) => {\n  return state.auth.user!.isAdmin;\n}\n\nexport const canActivateUser = (state: RootState, resource: ContextMenuResource) => {\n  const status = getUserAccountStatus(state, resource.uuid);\n  return status === UserAccountStatus.INACTIVE ||\n    status === UserAccountStatus.SETUP;\n};\n\nexport const canDeactivateUser = (state: RootState, resource: ContextMenuResource) => {\n  const status = getUserAccountStatus(state, resource.uuid);\n  return status === UserAccountStatus.SETUP ||\n    status === UserAccountStatus.ACTIVE;\n};\n\nexport const canSetupUser = (state: RootState, resource: ContextMenuResource) => {\n  const status = getUserAccountStatus(state, resource.uuid);\n  return status === UserAccountStatus.INACTIVE;\n};\n\nexport const needsUserProfileLink = (state: RootState, resource: ContextMenuResource) => (\n  state.router.location ?\n    !(matchUserProfileRoute(state.router.location.pathname)\n      || matchMyAccountRoute(state.router.location.pathname)\n    ) : true\n);\n\nexport const isOtherUser = (state: RootState, resource: ContextMenuResource) => {\n  return state.auth.user!.uuid !== resource.uuid;\n};\n"
  },
  {
    "path": "services/workbench2/src/store/context-menu/context-menu-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuState } from \"store/context-menu/context-menu\";\nimport { contextMenuActions, ContextMenuAction } from \"store/context-menu/context-menu-actions\";\n\nconst initialState = {\n    open: false,\n    position: { x: 0, y: 0 }\n};\n\nexport const contextMenuReducer = (state: ContextMenuState = initialState, action: ContextMenuAction) =>\n    contextMenuActions.match(action, {\n        default: () => state,\n        OPEN_CONTEXT_MENU: ({ resource, position }) => ({ open: true, resource, position }),\n        CLOSE_CONTEXT_MENU: () => ({ ...state, open: false })\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/context-menu/context-menu.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ResourceKind } from \"models/resource\";\n\nexport interface ContextMenuState {\n    open: boolean;\n    position: ContextMenuPosition;\n    resource?: ContextMenuResource;\n}\n\nexport interface ContextMenuPosition {\n    x: number;\n    y: number;\n}\n\nexport type ContextMenuResource = {\n    name: string;\n    uuid: string;\n    ownerUuid: string;\n    description?: string | null;\n    kind: ResourceKind;\n    menuKind: ContextMenuKind | string;\n    isTrashed?: boolean;\n    isEditable?: boolean;\n    outputUuid?: string;\n    workflowUuid?: string;\n    isAdmin?: boolean;\n    isFrozen?: boolean;\n    storageClassesDesired?: string[];\n    properties?: { [key: string]: string | string[]; };\n    isMulti?: boolean;\n};\n\nexport enum ContextMenuKind {\n    API_CLIENT_AUTHORIZATION = \"ApiClientAuthorization\",\n    COLLECTION = \"Collection\",\n    COLLECTION_ADMIN = \"CollectionAdmin\",\n    COLLECTION_DIRECTORY_ITEM = \"CollectionDirectoryItem\",\n    COLLECTION_FILE_ITEM = \"CollectionFileItem\",\n    COLLECTION_FILES = \"CollectionFiles\",\n    COLLECTION_FILES_MULTIPLE = \"CollectionFilesMultiple\",\n    COLLECTION_FILES_NOT_SELECTED = \"CollectionFilesNotSelected\",\n    EXTERNAL_CREDENTIAL = \"ExternalCredential\",\n    FAVORITE = \"Favorite\",\n    FILTER_GROUP = \"FilterGroup\",\n    FILTER_GROUP_ADMIN = \"FilterGroupAdmin\",\n    FROZEN_MANAGEABLE_PROJECT = \"FrozenManageableProject\",\n    FROZEN_PROJECT = \"FrozenProject\",\n    FROZEN_PROJECT_ADMIN = \"FrozenProjectAdmin\",\n    GROUPS = \"Group\",\n    BUILT_IN_GROUP = \"BuiltInGroup\",\n    GROUP_MEMBER = \"GroupMember\",\n    KEEP_SERVICE = \"KeepService\",\n    LINK = \"Link\",\n    MANAGEABLE_PROJECT = \"ManageableProject\",\n    MULTI = \"Multi\",\n    OLD_VERSION_COLLECTION = \"OldVersionCollection\",\n    PERMISSION_EDIT = \"PermissionEdit\",\n    PROCESS = \"Process\",\n    PROCESS_ADMIN = \"ProcessAdmin\",\n    PROCESS_LOGS = \"ProcessLogs\",\n    PROCESS_RESOURCE = \"ProcessResource\",\n    PROJECT = \"Project\",\n    PROJECT_ADMIN = \"ProjectAdmin\",\n    READONLY_COLLECTION = \"ReadOnlyCollection\",\n    READONLY_COLLECTION_DIRECTORY_ITEM = \"ReadOnlyCollectionDirectoryItem\",\n    READONLY_COLLECTION_FILE_ITEM = \"ReadOnlyCollectionFileItem\",\n    READONLY_COLLECTION_FILES = \"ReadOnlyCollectionFiles\",\n    READONLY_COLLECTION_FILES_MULTIPLE = \"ReadOnlyCollectionFilesMultiple\",\n    READONLY_PROCESS_RESOURCE = \"ReadOnlyProcessResource\",\n    READONLY_PROJECT = \"ReadOnlyProject\",\n    READONLY_WORKFLOW = \"ReadOnlyWorkflow\",\n    REPOSITORY = \"Repository\",\n    RESOURCE = \"Resource\",\n    ROOT_PROJECT = \"RootProject\",\n    ROOT_PROJECT_ADMIN = \"RootProjectAdmin\",\n    RUNNING_PROCESS_ADMIN = \"RunningProcessAdmin\",\n    RUNNING_PROCESS_RESOURCE = \"RunningProcessResource\",\n    SEARCH_RESULTS = \"SearchResults\",\n    SSH_KEY = \"SshKey\",\n    TRASH = \"Trash\",\n    TRASHED_COLLECTION = \"TrashedCollection\",\n    USER = \"User\",\n    USER_DETAILS = \"UserDetails\",\n    VIRTUAL_MACHINE = \"VirtualMachine\",\n    WORKFLOW = \"Workflow\",\n    WRITEABLE_COLLECTION = \"WriteableCollection\",\n    WRITEABLE_PROJECT = \"WriteableProject\"\n}\n"
  },
  {
    "path": "services/workbench2/src/store/copy-dialog/copy-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport interface CopyFormDialogData {\n    name: string;\n    uuid: string;\n    ownerUuid: string;\n    isSingleResource?: boolean;\n}\n"
  },
  {
    "path": "services/workbench2/src/store/data-explorer/data-explorer-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { DataColumns } from \"components/data-table/data-column\";\nimport { DataTableFetchMode } from \"components/data-table/data-table\";\nimport { DataTableFilters } from \"components/data-table-filters/data-table-filters\";\nimport { SnackbarKind, snackbarActions } from \"store/snackbar/snackbar-actions\";\n\nexport enum DataTableRequestState {\n    IDLE,\n    PENDING,\n    NEED_REFRESH,\n}\n\nexport const dataExplorerActions = unionize({\n    CLEAR: ofType<{ id: string }>(),\n    RESET_PAGINATION: ofType<{ id: string }>(),\n    SET_LOADING_ITEMS_AVAILABLE: ofType<{ id: string, loadingItemsAvailable: boolean }>(),\n    SET_ITEMS_AVAILABLE: ofType<{ id: string, itemsAvailable: number }>(),\n    RESET_ITEMS_AVAILABLE: ofType<{ id: string }>(),\n    REQUEST_ITEMS: ofType<{ id: string; criteriaChanged?: boolean, background?: boolean }>(),\n    REQUEST_COUNT: ofType<{ id: string; criteriaChanged?: boolean, background?: boolean }>(),\n    REQUEST_STATE: ofType<{ id: string; criteriaChanged?: boolean }>(),\n    SET_FETCH_MODE: ofType<{ id: string; fetchMode: DataTableFetchMode }>(),\n    SET_COLUMNS: ofType<{ id: string; columns: DataColumns<any, any> }>(),\n    SET_FILTERS: ofType<{ id: string; columnName: string; filters: DataTableFilters }>(),\n    SET_WORKING: ofType<{ id: string, working: boolean }>(),\n    SET_ITEMS: ofType<{ id: string; items: any[]; page: number; rowsPerPage: number; itemsAvailable?: number }>(),\n    APPEND_ITEMS: ofType<{ id: string; items: any[]; page: number; rowsPerPage: number; itemsAvailable?: number }>(),\n    SET_PAGE: ofType<{ id: string; page: number }>(),\n    SET_ROWS_PER_PAGE: ofType<{ id: string; rowsPerPage: number }>(),\n    TOGGLE_COLUMN: ofType<{ id: string; columnName: string }>(),\n    TOGGLE_SORT: ofType<{ id: string; columnName: string }>(),\n    SET_EXPLORER_SEARCH_VALUE: ofType<{ id: string; searchValue: string }>(),\n    RESET_EXPLORER_SEARCH_VALUE: ofType<{ id: string }>(),\n    SET_REQUEST_STATE: ofType<{ id: string; requestState: DataTableRequestState }>(),\n    SET_COUNT_REQUEST_STATE: ofType<{ id: string; countRequestState: DataTableRequestState }>(),\n    SET_IS_NOT_FOUND: ofType<{ id: string; isNotFound: boolean }>(),\n});\n\nexport type DataExplorerAction = UnionOf<typeof dataExplorerActions>;\n\nexport const bindDataExplorerActions = (id: string) => ({\n    CLEAR: () => dataExplorerActions.CLEAR({ id }),\n    RESET_PAGINATION: () => dataExplorerActions.RESET_PAGINATION({ id }),\n    SET_LOADING_ITEMS_AVAILABLE: (loadingItemsAvailable: boolean) => dataExplorerActions.SET_LOADING_ITEMS_AVAILABLE({ id, loadingItemsAvailable }),\n    SET_ITEMS_AVAILABLE: (itemsAvailable: number) => dataExplorerActions.SET_ITEMS_AVAILABLE({ id, itemsAvailable }),\n    RESET_ITEMS_AVAILABLE: () => dataExplorerActions.RESET_ITEMS_AVAILABLE({ id }),\n    REQUEST_ITEMS: (criteriaChanged?: boolean, background?: boolean) => dataExplorerActions.REQUEST_ITEMS({ id, criteriaChanged, background }),\n    REQUEST_COUNT: (criteriaChanged?: boolean, background?: boolean) => dataExplorerActions.REQUEST_COUNT({ id, criteriaChanged, background }),\n    SET_FETCH_MODE: (payload: { fetchMode: DataTableFetchMode }) => dataExplorerActions.SET_FETCH_MODE({ ...payload, id }),\n    SET_COLUMNS: (payload: { columns: DataColumns<any, any> }) => dataExplorerActions.SET_COLUMNS({ ...payload, id }),\n    SET_FILTERS: (payload: { columnName: string; filters: DataTableFilters }) => dataExplorerActions.SET_FILTERS({ ...payload, id }),\n    SET_WORKING: (working: boolean) => dataExplorerActions.SET_WORKING({ id, working }),\n    SET_ITEMS: (payload: { items: any[]; page: number; rowsPerPage: number; itemsAvailable?: number }) =>\n        dataExplorerActions.SET_ITEMS({ ...payload, id }),\n    APPEND_ITEMS: (payload: { items: any[]; page: number; rowsPerPage: number; itemsAvailable?: number }) =>\n        dataExplorerActions.APPEND_ITEMS({ ...payload, id }),\n    SET_PAGE: (payload: { page: number }) => dataExplorerActions.SET_PAGE({ ...payload, id }),\n    SET_ROWS_PER_PAGE: (payload: { rowsPerPage: number }) => dataExplorerActions.SET_ROWS_PER_PAGE({ ...payload, id }),\n    TOGGLE_COLUMN: (payload: { columnName: string }) => dataExplorerActions.TOGGLE_COLUMN({ ...payload, id }),\n    TOGGLE_SORT: (payload: { columnName: string }) => dataExplorerActions.TOGGLE_SORT({ ...payload, id }),\n    SET_EXPLORER_SEARCH_VALUE: (payload: { searchValue: string }) => dataExplorerActions.SET_EXPLORER_SEARCH_VALUE({ ...payload, id }),\n    RESET_EXPLORER_SEARCH_VALUE: () => dataExplorerActions.RESET_EXPLORER_SEARCH_VALUE({ id }),\n    SET_REQUEST_STATE: (payload: { requestState: DataTableRequestState }) => dataExplorerActions.SET_REQUEST_STATE({ ...payload, id }),\n    SET_COUNT_REQUEST_STATE: (payload: { countRequestState: DataTableRequestState }) => dataExplorerActions.SET_COUNT_REQUEST_STATE({ ...payload, id }),\n    SET_IS_NOT_FOUND: (payload: { isNotFound: boolean }) => dataExplorerActions.SET_IS_NOT_FOUND({ ...payload, id }),\n});\n\nexport type BoundDataExplorerActions = ReturnType<typeof bindDataExplorerActions>;\n\nexport const couldNotFetchItemsAvailable = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: \"Could not fetch total items.\",\n        kind: SnackbarKind.ERROR,\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/data-explorer/data-explorer-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, MiddlewareAPI } from 'redux';\nimport { RootState } from '../store';\nimport { DataExplorer, getSortColumn } from './data-explorer-reducer';\nimport { ListResults } from 'services/common-service/common-service';\nimport { createTree } from 'models/tree';\nimport { DataTableFilters } from 'components/data-table-filters/data-table-filters';\nimport { OrderBuilder, OrderDirection } from 'services/api/order-builder';\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport { Resource } from 'models/resource';\n\nexport abstract class DataExplorerMiddlewareService {\n    protected readonly id: string;\n\n    protected constructor(id: string) {\n        this.id = id;\n    }\n\n    public getId() {\n        return this.id;\n    }\n\n    public getColumnFilters<T>(\n        columns: DataColumns<T, any>,\n        columnName: string\n    ): DataTableFilters {\n        return getDataExplorerColumnFilters(columns, columnName);\n    }\n\n    /**\n     * Consumers can use this method to request\n     * total count separately and in parallel\n     * @param api\n     * @param criteriaChanged\n     * @param background\n     */\n    abstract requestCount(\n        api: MiddlewareAPI<Dispatch, RootState>,\n        criteriaChanged?: boolean,\n        background?: boolean\n    ): Promise<void>;\n\n    abstract requestItems(\n        api: MiddlewareAPI<Dispatch, RootState>,\n        criteriaChanged?: boolean,\n        background?: boolean\n    ): Promise<void>;\n}\n\nexport const getDataExplorerColumnFilters = <T>(\n    columns: DataColumns<T, any>,\n    columnName: string\n): DataTableFilters => {\n    const column = columns.find((c) => c.name === columnName);\n    return column ? column.filters : createTree();\n};\n\nexport const dataExplorerToListParams = (dataExplorer: DataExplorer) => ({\n    limit: dataExplorer.rowsPerPage,\n    offset: dataExplorer.page * dataExplorer.rowsPerPage,\n});\n\nexport const getOrder = <T extends Resource = Resource>(dataExplorer: DataExplorer) => {\n    const sortColumn = getSortColumn<T>(dataExplorer);\n    const order = new OrderBuilder<T>();\n    if (sortColumn && sortColumn.sort) {\n        const sortDirection = sortColumn.sort.direction === SortDirection.ASC\n            ? OrderDirection.ASC\n            : OrderDirection.DESC;\n\n        // Use createdAt as a secondary sort column so we break ties consistently.\n        return order\n            .addOrder(sortDirection, sortColumn.sort.field)\n            .addOrder(OrderDirection.DESC, \"createdAt\")\n            .getOrder();\n    } else {\n        return order.getOrder();\n    }\n};\n\nexport type DataExplorerMeta = {\n    itemsAvailable?: number;\n    page: number;\n    rowsPerPage: number;\n}\n\nexport const listResultsToDataExplorerItemsMeta = <R>({\n    itemsAvailable,\n    offset,\n    limit,\n}: ListResults<R>): DataExplorerMeta => ({\n    itemsAvailable,\n    page: Math.floor(offset / limit),\n    rowsPerPage: limit,\n});\n"
  },
  {
    "path": "services/workbench2/src/store/data-explorer/data-explorer-middleware.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { DataExplorerMiddlewareService } from \"./data-explorer-middleware-service\";\nimport { dataExplorerMiddleware } from \"./data-explorer-middleware\";\nimport { dataExplorerActions } from \"./data-explorer-action\";\nimport { SortDirection } from \"components/data-table/data-column\";\nimport { createTree } from 'models/tree';\n\ndescribe(\"DataExplorerMiddleware\", () => {\n\n    it(\"handles only actions that are identified by service id\", () => {\n        const config = {\n            id: \"ServiceId\",\n            columns: [{\n                name: \"Column\",\n                selected: true,\n                configurable: false,\n                sortDirection: SortDirection.NONE,\n                filters: createTree(),\n                render: cy.stub()\n            }],\n            requestItems: cy.stub(),\n            setApi: cy.stub()\n        };\n        const service = new ServiceMock(config);\n        const api = {\n            getState: cy.stub(),\n            dispatch: cy.stub()\n        };\n        const next = cy.stub();\n        const middleware = dataExplorerMiddleware(service)(api)(next);\n        middleware(dataExplorerActions.SET_PAGE({ id: \"OtherId\", page: 0 }));\n        middleware(dataExplorerActions.SET_PAGE({ id: \"ServiceId\", page: 0 }));\n        middleware(dataExplorerActions.SET_PAGE({ id: \"OtherId\", page: 0 }));\n        expect(api.dispatch).to.be.calledWithMatch(dataExplorerActions.REQUEST_ITEMS({ id: \"ServiceId\", criteriaChanged: false }));\n        expect(api.dispatch).to.be.calledOnce;\n    });\n\n    it(\"handles REQUEST_ITEMS action\", () => {\n        const config = {\n            id: \"ServiceId\",\n            columns: [{\n                name: \"Column\",\n                selected: true,\n                configurable: false,\n                sortDirection: SortDirection.NONE,\n                filters: createTree(),\n                render: cy.stub()\n            }],\n            requestItems: cy.stub(),\n            setApi: cy.stub()\n        };\n        const service = new ServiceMock(config);\n        const api = {\n            getState: cy.stub(),\n            dispatch: cy.stub()\n        };\n        const next = cy.stub();\n        const middleware = dataExplorerMiddleware(service)(api)(next);\n        middleware(dataExplorerActions.REQUEST_ITEMS({ id: \"ServiceId\" }));\n        expect(api.dispatch).to.be.calledOnce;\n    });\n\n    it(\"handles SET_PAGE action\", () => {\n        const config = {\n            id: \"ServiceId\",\n            columns: [],\n            requestItems: cy.stub(),\n            setApi: cy.stub()\n        };\n        const service = new ServiceMock(config);\n        const api = {\n            getState: cy.stub(),\n            dispatch: cy.stub()\n        };\n        const next = cy.stub();\n        const middleware = dataExplorerMiddleware(service)(api)(next);\n        middleware(dataExplorerActions.SET_PAGE({ id: service.getId(), page: 0 }));\n        expect(api.dispatch).to.be.calledOnce;\n    });\n\n    it(\"handles SET_ROWS_PER_PAGE action\", () => {\n        const config = {\n            id: \"ServiceId\",\n            columns: [],\n            requestItems: cy.stub(),\n            setApi: cy.stub()\n        };\n        const service = new ServiceMock(config);\n        const api = {\n            getState: cy.stub(),\n            dispatch: cy.stub()\n        };\n        const next = cy.stub();\n        const middleware = dataExplorerMiddleware(service)(api)(next);\n        middleware(dataExplorerActions.SET_ROWS_PER_PAGE({ id: service.getId(), rowsPerPage: 0 }));\n        expect(api.dispatch).to.be.calledOnce;\n    });\n\n    it(\"handles SET_FILTERS action\", () => {\n        const config = {\n            id: \"ServiceId\",\n            columns: [],\n            requestItems: cy.stub(),\n            setApi: cy.stub()\n        };\n        const service = new ServiceMock(config);\n        const api = {\n            getState: cy.stub(),\n            dispatch: cy.stub()\n        };\n        const next = cy.stub();\n        const middleware = dataExplorerMiddleware(service)(api)(next);\n        middleware(dataExplorerActions.SET_FILTERS({ id: service.getId(), columnName: \"\", filters: createTree() }));\n        expect(api.dispatch).to.be.calledThrice;\n    });\n\n    it(\"handles SET_ROWS_PER_PAGE action\", () => {\n        const config = {\n            id: \"ServiceId\",\n            columns: [],\n            requestItems: cy.stub(),\n            setApi: cy.stub()\n        };\n        const service = new ServiceMock(config);\n        const api = {\n            getState: cy.stub(),\n            dispatch: cy.stub()\n        };\n        const next = cy.stub();\n        const middleware = dataExplorerMiddleware(service)(api)(next);\n        middleware(dataExplorerActions.SET_ROWS_PER_PAGE({ id: service.getId(), rowsPerPage: 0 }));\n        expect(api.dispatch).to.be.calledOnce;\n    });\n\n    it(\"handles TOGGLE_SORT action\", () => {\n        const config = {\n            id: \"ServiceId\",\n            columns: [],\n            requestItems: cy.stub(),\n            setApi: cy.stub()\n        };\n        const service = new ServiceMock(config);\n        const api = {\n            getState: cy.stub(),\n            dispatch: cy.stub()\n        };\n        const next = cy.stub();\n        const middleware = dataExplorerMiddleware(service)(api)(next);\n        middleware(dataExplorerActions.TOGGLE_SORT({ id: service.getId(), columnName: \"\" }));\n        expect(api.dispatch).to.be.calledOnce;\n    });\n\n    it(\"handles SET_SEARCH_VALUE action\", () => {\n        const config = {\n            id: \"ServiceId\",\n            columns: [],\n            requestItems: cy.stub(),\n            setApi: cy.stub()\n        };\n        const service = new ServiceMock(config);\n        const api = {\n            getState: cy.stub(),\n            dispatch: cy.stub()\n        };\n        const next = cy.stub();\n        const middleware = dataExplorerMiddleware(service)(api)(next);\n        middleware(dataExplorerActions.SET_EXPLORER_SEARCH_VALUE({ id: service.getId(), searchValue: \"\" }));\n        expect(api.dispatch).to.be.calledThrice;\n    });\n\n    it(\"forwards other actions\", () => {\n        const config = {\n            id: \"ServiceId\",\n            columns: [],\n            requestItems: cy.stub(),\n            setApi: cy.stub()\n        };\n        const service = new ServiceMock(config);\n        const api = {\n            getState: cy.stub(),\n            dispatch: cy.stub()\n        };\n        const next = cy.stub();\n        const middleware = dataExplorerMiddleware(service)(api)(next);\n        middleware(dataExplorerActions.SET_COLUMNS({ id: service.getId(), columns: [] }));\n        middleware(dataExplorerActions.SET_ITEMS({ id: service.getId(), items: [], rowsPerPage: 0, itemsAvailable: 0, page: 0 }));\n        middleware(dataExplorerActions.TOGGLE_COLUMN({ id: service.getId(), columnName: \"\" }));\n        expect(api.dispatch).to.not.be.called;\n        expect(next).to.be.calledThrice;\n    });\n\n});\n\nclass ServiceMock extends DataExplorerMiddlewareService {\n    constructor(config) {\n        super(config.id);\n    }\n\n    getColumns() {\n        return this.config.columns;\n    }\n\n    requestItems(api) {\n        this.config.requestItems(api);\n        return Promise.resolve();\n    }\n\n    async requestCount() {}\n}\n"
  },
  {
    "path": "services/workbench2/src/store/data-explorer/data-explorer-middleware.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { Middleware } from 'redux';\nimport {\n    dataExplorerActions,\n    bindDataExplorerActions,\n    DataTableRequestState,\n    couldNotFetchItemsAvailable,\n} from './data-explorer-action';\nimport { getDataExplorer } from './data-explorer-reducer';\nimport { DataExplorerMiddlewareService } from './data-explorer-middleware-service';\n\nexport const dataExplorerMiddleware =\n    (service: DataExplorerMiddlewareService): Middleware =>\n        (api) =>\n            (next) => {\n                const actions = bindDataExplorerActions(service.getId());\n\n                return (action) => {\n                    const handleAction =\n                        <T extends { id: string }>(handler: (data: T) => void) =>\n                            (data: T) => {\n                                next(action);\n                                if (data.id === service.getId()) {\n                                    handler(data);\n                                }\n                            };\n                    dataExplorerActions.match(action, {\n                        SET_PAGE: handleAction(() => {\n                            api.dispatch(actions.REQUEST_ITEMS(false));\n                        }),\n                        SET_ROWS_PER_PAGE: handleAction(() => {\n                            api.dispatch(actions.REQUEST_ITEMS(true));\n                        }),\n                        SET_FILTERS: handleAction(() => {\n                            api.dispatch(actions.RESET_PAGINATION());\n                            api.dispatch(actions.SET_LOADING_ITEMS_AVAILABLE(true));\n                            api.dispatch(actions.REQUEST_ITEMS(true));\n                        }),\n                        TOGGLE_SORT: handleAction(() => {\n                            api.dispatch(actions.REQUEST_ITEMS(true));\n                        }),\n                        SET_EXPLORER_SEARCH_VALUE: handleAction(() => {\n                            api.dispatch(actions.RESET_PAGINATION());\n                            api.dispatch(actions.SET_LOADING_ITEMS_AVAILABLE(true));\n                            api.dispatch(actions.REQUEST_ITEMS(true));\n                        }),\n                        REQUEST_ITEMS: handleAction(({ criteriaChanged = true, background }) => {\n                            api.dispatch<any>(async (\n                                dispatch: Dispatch,\n                                getState: () => RootState,\n                                services: ServiceRepository\n                            ) => {\n                                if (!background) { api.dispatch(actions.SET_WORKING(true)); }\n                                while (true) {\n                                    let de = getDataExplorer(\n                                        getState().dataExplorer,\n                                        service.getId()\n                                    );\n                                    switch (de.requestState) {\n                                        case DataTableRequestState.IDLE:\n                                            // Start a new request.\n                                            try {\n                                                dispatch(\n                                                    actions.SET_REQUEST_STATE({\n                                                        requestState: DataTableRequestState.PENDING,\n                                                    })\n                                                );\n\n                                                // Fetch results\n                                                const result = service.requestItems(api, criteriaChanged, background);\n\n                                                // If criteria changed, fire off a count request\n                                                if (criteriaChanged) {\n                                                    dispatch(actions.REQUEST_COUNT(criteriaChanged, background));\n                                                }\n\n                                                // Await results\n                                                await result;\n\n                                            } catch {\n                                                dispatch(\n                                                    actions.SET_REQUEST_STATE({\n                                                        requestState: DataTableRequestState.NEED_REFRESH,\n                                                    })\n                                                );\n                                            }\n                                            // Now check if the state is still PENDING, if it moved to NEED_REFRESH\n                                            // then we need to reissue requestItems\n                                            de = getDataExplorer(\n                                                getState().dataExplorer,\n                                                service.getId()\n                                            );\n                                            const complete =\n                                                de.requestState === DataTableRequestState.PENDING;\n                                            dispatch(\n                                                actions.SET_REQUEST_STATE({\n                                                    requestState: DataTableRequestState.IDLE,\n                                                })\n                                            );\n                                            if (complete) {\n                                                api.dispatch(actions.SET_WORKING(false));\n                                                return;\n                                            }\n                                            break;\n                                        case DataTableRequestState.PENDING:\n                                            // State is PENDING, move it to NEED_REFRESH so that when the current request finishes it starts a new one.\n                                            if (!background) {\n                                                // Background refreshes are exempt from this behavior\n                                                // because the data will already be up to date when the current request finishes\n                                                // and to prevent refreshes from prolonging loading indicators of a non-background refresh\n                                                dispatch(\n                                                    actions.SET_REQUEST_STATE({\n                                                        requestState: DataTableRequestState.NEED_REFRESH,\n                                                    })\n                                                );\n                                            }\n                                            return;\n                                        case DataTableRequestState.NEED_REFRESH:\n                                            // Nothing to do right now.\n                                            return;\n                                    }\n                                }\n                            });\n                        }),\n                        REQUEST_COUNT: handleAction(({ criteriaChanged = true, background }) => {\n                            api.dispatch<any>(async (\n                                dispatch: Dispatch,\n                                getState: () => RootState,\n                                services: ServiceRepository\n                            ) => {\n                                while (true) {\n                                    let de = getDataExplorer(\n                                        getState().dataExplorer,\n                                        service.getId()\n                                    );\n                                    switch (de.countRequestState) {\n                                        case DataTableRequestState.IDLE:\n                                            // Start new count request\n                                            dispatch(\n                                                actions.SET_COUNT_REQUEST_STATE({\n                                                    countRequestState: DataTableRequestState.PENDING,\n                                                })\n                                            );\n\n                                            // Enable loading indicator on non-background fetches\n                                            if (!background) {\n                                                api.dispatch<any>(\n                                                    dataExplorerActions.SET_LOADING_ITEMS_AVAILABLE({\n                                                        id: service.getId(),\n                                                        loadingItemsAvailable: true\n                                                    })\n                                                );\n                                            }\n\n                                            // Fetch count\n                                            await service.requestCount(api, criteriaChanged, background)\n                                                .catch(() => {\n                                                    // Show error toast if count fetch failed\n                                                    couldNotFetchItemsAvailable();\n                                                })\n                                                .finally(() => {\n                                                    // Turn off itemsAvailable loading indicator when done\n                                                    api.dispatch<any>(\n                                                        dataExplorerActions.SET_LOADING_ITEMS_AVAILABLE({\n                                                            id: service.getId(),\n                                                            loadingItemsAvailable: false\n                                                        })\n                                                    );\n                                                });\n\n                                            // Now check if the state is still PENDING, if it moved to NEED_REFRESH\n                                            // then we need to reissue requestCount\n                                            de = getDataExplorer(\n                                                getState().dataExplorer,\n                                                service.getId()\n                                            );\n                                            const complete =\n                                                de.countRequestState === DataTableRequestState.PENDING;\n                                            dispatch(\n                                                actions.SET_COUNT_REQUEST_STATE({\n                                                    countRequestState: DataTableRequestState.IDLE,\n                                                })\n                                            );\n                                            if (complete) {\n                                                return;\n                                            }\n                                            break;\n                                        case DataTableRequestState.PENDING:\n                                            // State is PENDING, move it to NEED_REFRESH so that when the current request finishes it starts a new one.\n                                            dispatch(\n                                                actions.SET_COUNT_REQUEST_STATE({\n                                                    countRequestState: DataTableRequestState.NEED_REFRESH,\n                                                })\n                                            );\n                                            return;\n                                        case DataTableRequestState.NEED_REFRESH:\n                                            // Nothing to do right now.\n                                            return;\n                                    }\n                                }\n                            });\n                        }),\n                        default: () => next(action),\n                    });\n                };\n            };\n"
  },
  {
    "path": "services/workbench2/src/store/data-explorer/data-explorer-reducer.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { dataExplorerReducer, initialDataExplorer } from \"./data-explorer-reducer\";\nimport { dataExplorerActions, DataTableRequestState } from \"./data-explorer-action\";\nimport { DataTableFilterItem } from \"../../components/data-table-filters/data-table-filters\";\nimport { DataColumns } from \"../../components/data-table/data-table\";\nimport { SortDirection } from \"../../components/data-table/data-column\";\n\ndescribe('data-explorer-reducer', () => {\n    it('should set columns', () => {\n        const columns = [{\n            name: \"Column 1\",\n            filters: [],\n            render: cy.stub(),\n            selected: true,\n            configurable: true,\n            sort: {direction: SortDirection.NONE, field: \"name\"}\n        }];\n        const state = dataExplorerReducer(undefined,\n            dataExplorerActions.SET_COLUMNS({ id: \"Data explorer\", columns }));\n        expect(state[\"Data explorer\"].columns).to.equal(columns);\n    });\n\n    it('should toggle sorting', () => {\n        const columns = [{\n            name: \"Column 1\",\n            filters: [],\n            render: cy.stub(),\n            selected: true,\n            sort: {direction: SortDirection.ASC, field: \"name\"},\n            configurable: true\n        }, {\n            name: \"Column 2\",\n            filters: [],\n            render: cy.stub(),\n            selected: true,\n            configurable: true,\n            sort: {direction: SortDirection.NONE, field: \"name\"},\n        }];\n        const state = dataExplorerReducer({ \"Data explorer\": { ...initialDataExplorer, columns } },\n            dataExplorerActions.TOGGLE_SORT({ id: \"Data explorer\", columnName: \"Column 2\" }));\n        expect(state[\"Data explorer\"].columns[0].sort.direction).to.equal(\"none\");\n        expect(state[\"Data explorer\"].columns[1].sort.direction).to.equal(\"asc\");\n    });\n\n    it('should set filters', () => {\n        const columns = [{\n            name: \"Column 1\",\n            filters: [],\n            render: cy.stub(),\n            selected: true,\n            configurable: true,\n            sort: {direction: SortDirection.NONE, field: \"name\"}\n        }];\n\n        const filters = [{\n            name: \"Filter 1\",\n            selected: true\n        }];\n        const state = dataExplorerReducer({ \"Data explorer\": { ...initialDataExplorer, columns } },\n            dataExplorerActions.SET_FILTERS({ id: \"Data explorer\", columnName: \"Column 1\", filters }));\n        expect(state[\"Data explorer\"].columns[0].filters).to.equal(filters);\n    });\n\n    it('should set items', () => {\n        const items = [\"Item 1\", \"Item 2\"];\n        let state = dataExplorerReducer({},\n            dataExplorerActions.SET_REQUEST_STATE({\n                id: \"Data explorer\",\n                requestState: DataTableRequestState.PENDING\n            }));\n        state = dataExplorerReducer(state,\n            dataExplorerActions.SET_ITEMS({\n                id: \"Data explorer\",\n                items: items,\n                page: 0,\n                rowsPerPage: 10,\n                itemsAvailable: 100\n            }));\n        expect(state[\"Data explorer\"].items).to.equal(items);\n    });\n\n    it('should set page', () => {\n        const state = dataExplorerReducer({},\n            dataExplorerActions.SET_PAGE({ id: \"Data explorer\", page: 2 }));\n        expect(state[\"Data explorer\"].page).to.equal(2);\n    });\n\n    it('should set rows per page', () => {\n        const state = dataExplorerReducer({},\n            dataExplorerActions.SET_ROWS_PER_PAGE({ id: \"Data explorer\", rowsPerPage: 5 }));\n        expect(state[\"Data explorer\"].rowsPerPage).to.equal(5);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/data-explorer/data-explorer-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    DataColumn,\n    resetSortDirection,\n    SortDirection,\n    toggleSortDirection,\n    DataColumns,\n} from 'components/data-table/data-column';\nimport {\n    DataExplorerAction,\n    dataExplorerActions,\n    DataTableRequestState,\n} from './data-explorer-action';\nimport {\n    DataTableFetchMode,\n} from 'components/data-table/data-table';\nimport { DataTableFilters } from 'components/data-table-filters/data-table-filters';\n\nexport interface DataExplorer {\n    fetchMode: DataTableFetchMode;\n    columns: DataColumns<any, any>;\n    items: any[];\n    itemsAvailable: number;\n    loadingItemsAvailable: boolean;\n    page: number;\n    rowsPerPage: number;\n    rowsPerPageOptions: number[];\n    searchValue: string;\n    working?: boolean;\n    requestState: DataTableRequestState;\n    countRequestState: DataTableRequestState;\n    isNotFound: boolean;\n}\n\nexport const initialDataExplorer: DataExplorer = {\n    fetchMode: DataTableFetchMode.PAGINATED,\n    columns: [],\n    items: [],\n    itemsAvailable: 0,\n    loadingItemsAvailable: false,\n    page: 0,\n    rowsPerPage: 50,\n    rowsPerPageOptions: [10, 20, 50, 100, 200, 500],\n    searchValue: '',\n    requestState: DataTableRequestState.IDLE,\n    countRequestState: DataTableRequestState.IDLE,\n    isNotFound: false,\n};\n\nexport type DataExplorerState = Record<string, DataExplorer>;\n\nexport const dataExplorerReducer = (\n    state: DataExplorerState = {},\n    action: DataExplorerAction\n) => {\n    return dataExplorerActions.match(action, {\n        CLEAR: ({ id }) =>\n            update(state, id, (explorer) => ({\n                ...explorer,\n                page: 0,\n                itemsAvailable: 0,\n                items: [],\n            })),\n\n        RESET_PAGINATION: ({ id }) =>\n            update(state, id, (explorer) => ({ ...explorer, page: 0 })),\n\n        SET_FETCH_MODE: ({ id, fetchMode }) =>\n            update(state, id, (explorer) => ({ ...explorer, fetchMode })),\n\n        SET_COLUMNS: ({ id, columns }) => update(state, id, setColumns(columns)),\n\n        SET_FILTERS: ({ id, columnName, filters }) =>\n            update(state, id, mapColumns(setFilters(columnName, filters))),\n\n\n        SET_WORKING: ({ id, working }) =>\n            update(state, id, (explorer) => ({\n                ...explorer,\n                working,\n            })),\n\n        SET_ITEMS: ({ id, items, itemsAvailable, page, rowsPerPage }) => (\n            update(state, id, (explorer) => {\n                // Reject updates to pages other than current,\n                //  DataExplorer middleware should retry\n                // Also reject update if DE is pending, reduces flicker and appearance of race\n                const updatedPage = page || 0;\n                if (explorer.page === updatedPage && explorer.requestState === DataTableRequestState.PENDING) {\n                    return {\n                        ...explorer,\n                        items,\n                        itemsAvailable: itemsAvailable || explorer.itemsAvailable,\n                        page: updatedPage,\n                        rowsPerPage,\n                    }\n                } else {\n                    return explorer;\n                }\n            })\n        ),\n\n        SET_LOADING_ITEMS_AVAILABLE: ({ id, loadingItemsAvailable }) =>\n            update(state, id, (explorer) => ({\n                ...explorer,\n                loadingItemsAvailable,\n            })),\n\n        SET_ITEMS_AVAILABLE: ({ id, itemsAvailable }) =>\n            update(state, id, (explorer) => {\n                // Ignore itemsAvailable updates if another countRequest is requested\n                if (explorer.countRequestState === DataTableRequestState.PENDING) {\n                    return {\n                        ...explorer,\n                        itemsAvailable,\n                        loadingItemsAvailable: false,\n                    };\n                } else {\n                    return explorer;\n                }\n            }),\n\n        RESET_ITEMS_AVAILABLE: ({ id }) =>\n            update(state, id, (explorer) => ({ ...explorer, itemsAvailable: 0 })),\n\n        APPEND_ITEMS: ({ id, items, itemsAvailable, page, rowsPerPage }) =>\n            update(state, id, (explorer) => ({\n                ...explorer,\n                items: explorer.items.concat(items),\n                itemsAvailable: explorer.itemsAvailable + (itemsAvailable || 0),\n                page,\n                rowsPerPage,\n            })),\n\n        SET_PAGE: ({ id, page }) =>\n            update(state, id, (explorer) => ({ ...explorer, page })),\n\n        SET_ROWS_PER_PAGE: ({ id, rowsPerPage }) =>\n            update(state, id, (explorer) => ({ ...explorer, rowsPerPage })),\n\n        SET_EXPLORER_SEARCH_VALUE: ({ id, searchValue }) =>\n            update(state, id, (explorer) => ({ ...explorer, searchValue })),\n\n        RESET_EXPLORER_SEARCH_VALUE: ({ id }) =>\n            update(state, id, (explorer) => ({ ...explorer, searchValue: '' })),\n\n        SET_REQUEST_STATE: ({ id, requestState }) =>\n            update(state, id, (explorer) => ({ ...explorer, requestState })),\n\n        SET_COUNT_REQUEST_STATE: ({ id, countRequestState }) =>\n            update(state, id, (explorer) => ({ ...explorer, countRequestState })),\n\n        TOGGLE_SORT: ({ id, columnName }) =>\n            update(state, id, mapColumns(toggleSort(columnName))),\n\n        TOGGLE_COLUMN: ({ id, columnName }) =>\n            update(state, id, mapColumns(toggleColumn(columnName))),\n\n        SET_IS_NOT_FOUND: ({ id, isNotFound }) =>\n            update(state, id, (explorer) => ({ ...explorer, isNotFound })),\n\n        default: () => state,\n    });\n};\nexport const getDataExplorer = (state: DataExplorerState, id: string) => {\n    const returnValue = state[id] || initialDataExplorer;\n    return returnValue;\n};\n\nexport const getSortColumn = <R>(dataExplorer: DataExplorer): DataColumn<any, R> | undefined =>\n    dataExplorer.columns.find(\n        (c: DataColumn<any, R>) => !!c.sort && c.sort.direction !== SortDirection.NONE\n    );\n\nconst update = (\n    state: DataExplorerState,\n    id: string,\n    updateFn: (dataExplorer: DataExplorer) => DataExplorer\n) => ({ ...state, [id]: updateFn(getDataExplorer(state, id)) });\n\nconst canUpdateColumns = (\n    prevColumns: DataColumns<any, any>,\n    nextColumns: DataColumns<any, any>\n) => {\n    if (prevColumns.length !== nextColumns.length) {\n        return true;\n    }\n    for (let i = 0; i < nextColumns.length; i++) {\n        const pc = prevColumns[i];\n        const nc = nextColumns[i];\n        if (pc.key !== nc.key || pc.name !== nc.name) {\n            return true;\n        }\n    }\n    return false;\n};\n\nconst setColumns =\n    (columns: DataColumns<any, any>) => (dataExplorer: DataExplorer) => ({\n        ...dataExplorer,\n        columns: canUpdateColumns(dataExplorer.columns, columns)\n            ? columns\n            : dataExplorer.columns,\n    });\n\nconst mapColumns =\n    (mapFn: (column: DataColumn<any, any>) => DataColumn<any, any>) =>\n        (dataExplorer: DataExplorer) => ({\n            ...dataExplorer,\n            columns: dataExplorer.columns.map(mapFn),\n        });\n\nconst toggleSort = (columnName: string) => (column: DataColumn<any, any>) =>\n    column.name === columnName\n        ? toggleSortDirection(column)\n        : resetSortDirection(column);\n\nconst toggleColumn = (columnName: string) => (column: DataColumn<any, any>) =>\n    column.name === columnName\n        ? { ...column, selected: !column.selected }\n        : column;\n\nconst setFilters =\n    (columnName: string, filters: DataTableFilters) =>\n        (column: DataColumn<any, any>) =>\n            column.name === columnName ? { ...column, filters } : column;\n"
  },
  {
    "path": "services/workbench2/src/store/description-dialog/description-dialog-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { RootState } from \"store/store\";\n\nexport const DESCRIPTION_DIALOG = 'DESCRIPTION_DIALOG';\n\nexport type DescriptionDialogData = { uuid: string };\n\nexport const openDialog = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch(dialogActions.OPEN_DIALOG({id: DESCRIPTION_DIALOG, data: { uuid }}));\n    };\n\nexport const closeDialog = () =>\n    async (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch(dialogActions.CLOSE_DIALOG({id: DESCRIPTION_DIALOG}));\n    };\n\nexport const descriptionDialogActions = {\n    openDialog,\n    closeDialog\n};\n"
  },
  {
    "path": "services/workbench2/src/store/details-panel/details-panel-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from 'common/unionize';\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { getResource } from 'store/resources/resources';\nimport { ServiceRepository } from 'services/services';\nimport { resourcesActions } from 'store/resources/resources-actions';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { OrderBuilder } from 'services/api/order-builder';\nimport { CollectionResource } from 'models/collection';\nimport { extractUuidKind, ResourceKind } from 'models/resource';\nimport { setSelectedResourceUuid } from 'store/selected-resource/selected-resource-actions';\nimport { deselectAllOthers, selectOne } from 'store/multiselect/multiselect-actions';\n\nexport const SLIDE_TIMEOUT = 500;\nexport const CLOSE_DRAWER = 'CLOSE_DRAWER'\n\nexport const detailsPanelActions = unionize({\n    TOGGLE_DETAILS_PANEL: ofType<{}>(),\n    OPEN_DETAILS_PANEL: ofType<number>(),\n    LOAD_DETAILS_PANEL: ofType<string>(),\n});\n\nexport type DetailsPanelAction = UnionOf<typeof detailsPanelActions>;\n\nexport const loadDetailsPanel = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        if (getState().detailsPanel.isOpened) {\n            switch(extractUuidKind(uuid)) {\n                case ResourceKind.COLLECTION:\n                    const c = getResource<CollectionResource>(uuid)(getState().resources);\n                    dispatch<any>(refreshCollectionVersionsList(c!.currentVersionUuid));\n                    break;\n                default:\n                    break;\n            }\n        }\n        dispatch(detailsPanelActions.LOAD_DETAILS_PANEL(uuid));\n    };\n\nexport const openDetailsPanel = (uuid?: string, tabNr: number = 0) =>\n    (dispatch: Dispatch) => {\n        dispatch(detailsPanelActions.OPEN_DETAILS_PANEL(tabNr));\n        if (uuid !== undefined) {\n            dispatch<any>(selectOne(uuid));\n            dispatch<any>(deselectAllOthers(uuid));\n            dispatch<any>(setSelectedResourceUuid(uuid));\n            dispatch<any>(loadDetailsPanel(uuid));\n        }\n    };\n\nexport const refreshCollectionVersionsList = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        services.collectionService.list({\n            filters: new FilterBuilder()\n                .addEqual('current_version_uuid', uuid)\n                .getFilters(),\n            includeOldVersions: true,\n            order: new OrderBuilder<CollectionResource>().addDesc(\"version\").getOrder()\n        }).then(versions => dispatch(resourcesActions.SET_RESOURCES(versions.items))\n        ).catch(e => snackbarActions.OPEN_SNACKBAR({\n            message: `Couldn't retrieve versions: ${e.errors[0]}`,\n            hideDuration: 2000,\n            kind: SnackbarKind.ERROR })\n        );\n    };\n\nexport const toggleDetailsPanel = (uuid: string) => (dispatch: Dispatch, getState: () => RootState) => {\n    const { detailsPanel }= getState()\n    const isTargetUuidNew = uuid !== detailsPanel.resourceUuid\n    if(isTargetUuidNew && uuid !== CLOSE_DRAWER && detailsPanel.isOpened){\n        dispatch<any>(loadDetailsPanel(uuid));\n    } else {\n        // because of material-ui issue resizing details panel breaks tabs.\n        // triggering window resize event fixes that.\n        setTimeout(() => {\n            window.dispatchEvent(new Event('resize'));\n        }, SLIDE_TIMEOUT);\n        dispatch(detailsPanelActions.TOGGLE_DETAILS_PANEL());\n        if (getState().detailsPanel.isOpened) {\n            dispatch<any>(loadDetailsPanel(isTargetUuidNew ? uuid : detailsPanel.resourceUuid));\n        }\n    }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/details-panel/details-panel-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { detailsPanelActions, DetailsPanelAction } from \"./details-panel-action\";\n\nexport interface DetailsPanelState {\n    resourceUuid: string;\n    isOpened: boolean;\n    tabNr: number;\n}\n\nconst initialState = {\n    resourceUuid: '',\n    isOpened: false,\n    tabNr: 0,\n};\n\nexport const detailsPanelReducer = (state: DetailsPanelState = initialState, action: DetailsPanelAction) =>\n    detailsPanelActions.match(action, {\n        default: () => state,\n        LOAD_DETAILS_PANEL: resourceUuid => ({ ...state, resourceUuid }),\n        OPEN_DETAILS_PANEL: tabNr => ({ ...state, isOpened: true, tabNr }),\n        TOGGLE_DETAILS_PANEL: () => ({ ...state, isOpened: !state.isOpened }),\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/dialog/dialog-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\n\nexport const dialogActions = unionize({\n    OPEN_DIALOG: ofType<{ id: string, data: any }>(),\n    CLOSE_DIALOG: ofType<{ id: string }>(),\n    CLOSE_ALL_DIALOGS: ofType<{}>()\n});\n\nexport type DialogAction = UnionOf<typeof dialogActions>;\n"
  },
  {
    "path": "services/workbench2/src/store/dialog/dialog-reducer.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { dialogReducer } from \"./dialog-reducer\";\nimport { dialogActions } from \"./dialog-actions\";\n\ndescribe('DialogReducer', () => {\n    it('OPEN_DIALOG', () => {\n        const id = 'test id';\n        const data = 'test data';\n        const state = dialogReducer({}, dialogActions.OPEN_DIALOG({ id, data }));\n        expect(state[id]).to.deep.equal({ open: true, data });\n    });\n\n    it('CLOSE_DIALOG', () => {\n        const id = 'test id';\n        const state = dialogReducer({}, dialogActions.CLOSE_DIALOG({ id }));\n        expect(state[id]).to.deep.equal({ open: false, data: {} });\n    });\n    \n    it('CLOSE_DIALOG persist data', () => {\n        const id = 'test id';\n        const [newState] = [{}]\n            .map(state => dialogReducer(state, dialogActions.OPEN_DIALOG({ id, data: 'test data' })))\n            .map(state => dialogReducer(state, dialogActions.CLOSE_DIALOG({ id })));\n        \n        expect(newState[id]).to.deep.equal({ open: false, data: 'test data' });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/dialog/dialog-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { DialogAction, dialogActions } from './dialog-actions';\n\nexport type DialogState = Record<string, Dialog<any>>;\n\nexport interface Dialog<T> {\n    open: boolean;\n    data: T;\n}\n\nexport const dialogReducer = (state: DialogState = {}, action: DialogAction) =>\n    dialogActions.match(action, {\n        OPEN_DIALOG: ({ id, data }) => ({ ...state, [id]: { open: true, data } }),\n        CLOSE_DIALOG: ({ id }) => ({\n            ...state,\n            [id]: state[id] ? { ...state[id], open: false } : { open: false, data: {} },\n        }),\n        CLOSE_ALL_DIALOGS: () => ({}),\n        default: () => state,\n    });\n\nexport const getDialog = <T>(state: DialogState, id: string) => (state[id] ? (state[id] as Dialog<T>) : undefined);\n"
  },
  {
    "path": "services/workbench2/src/store/dialog/with-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { DialogState } from './dialog-reducer';\nimport { Dispatch } from 'redux';\nimport { dialogActions } from './dialog-actions';\n\nexport type WithDialogStateProps<T> = {\n    open: boolean;\n    data: T;\n};\n\nexport type WithDialogDispatchProps = {\n    closeDialog: () => void;\n};\n\nexport type WithDialogProps<T> = WithDialogStateProps<T> & WithDialogDispatchProps;\nexport const withDialog =\n    (id: string) =>\n    // TODO: How to make compiler happy with & P instead of & any?\n    // eslint-disable-next-line\n    <T, P>(component: React.ComponentType<WithDialogProps<T> & any>) =>\n        connect(mapStateToProps(id), mapDispatchToProps(id))(component);\n\nconst emptyData = {};\n\nexport const mapStateToProps =\n    (id: string) =>\n    <T>(state: { dialog: DialogState }): WithDialogStateProps<T> => {\n        const dialog = state.dialog[id];\n        return dialog ? dialog : { open: false, data: emptyData as T };\n    };\n\nexport const mapDispatchToProps =\n    (id: string) =>\n    (dispatch: Dispatch): WithDialogDispatchProps => ({\n        closeDialog: () => {\n            dispatch(dialogActions.CLOSE_DIALOG({ id }));\n        },\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/external-credentials/external-credential-dialog-data.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport interface CreateExternalCredentialFormDialogData {\n    name: string;\n    description?: string;\n    credentialClass: string;\n    externalId: string;\n    expiresAt: string;\n    secret: string;\n    scopes?: string[];\n}\n\nexport interface UpdateExternalCredentialFormDialogData {\n    name: string;\n    description?: string;\n    credentialClass: string;\n    externalId: string;\n    expiresAt: string;\n    secret?: string;\n    scopes?: string[];\n}"
  },
  {
    "path": "services/workbench2/src/store/external-credentials/external-credentials-actions.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { createExternalCredential, updateExternalCredential, removeExternalCredentialPermanently } from './external-credentials-actions';\n\ndescribe('External Credentials Actions', () => {\n    let dispatch;\n\n    const getState = () => ({\n        auth: { user: { uuid: 'test-user' } },\n        resources: {},\n        multiselect: {\n            checkedList: ['uuid1', 'uuid2'],\n        },\n    });\n\n    const generateExternalCredential = (\n        name,\n        expiresAt,\n        description = `Test Description ${Math.floor(Math.random() * 999999)}`,\n        credentialClass = `Test Credential Class ${Math.floor(Math.random() * 999999)}`,\n        externalId = `Test External ID ${Math.floor(Math.random() * 999999)}`,\n        scopes = [`scope1 ${Math.floor(Math.random() * 999999)}`, `scope2 ${Math.floor(Math.random() * 999999)}`],\n        secret = 'test-secret'\n    ) => {\n        return {\n            name: `${name} ${Math.floor(Math.random() * 999999)}`,\n            description,\n            credential_class: credentialClass,\n            external_id: externalId,\n            expires_at: expiresAt,\n            scopes,\n            secret,\n        };\n    };\n\n    beforeEach(() => {\n        if (dispatch) {\n            dispatch.reset();\n        }\n        dispatch = cy.stub();\n    });\n\n    it('creates external credential with correct values', () => {\n        const services = {\n            externalCredentialsService: {\n                create: cy.stub().resolves({}),\n            },\n        };\n\n        const testCredential = generateExternalCredential('Test Credential', '2024-01-01');\n\n        createExternalCredential(testCredential)(dispatch, getState, services);\n\n        cy.wrap(services.externalCredentialsService.create).should('have.been.calledOnce').and('have.been.calledWith', testCredential);\n    });\n\n    it('updates external credential without secret when empty', () => {\n        const services = {\n            externalCredentialsService: {\n                update: cy.stub().resolves({}),\n            },\n        };\n\n        const testCredential = generateExternalCredential('Test Credential', '2024-01-01');\n        testCredential.secret = '';\n        testCredential.uuid = 'test-uuid';\n\n        updateExternalCredential(testCredential)(dispatch, getState, services);\n\n        cy.wrap(services.externalCredentialsService.update)\n            .should('have.been.calledOnce')\n            .and(\n                'have.been.calledWith',\n                'test-uuid',\n                Cypress.sinon.match((obj) => {\n                    return !obj.hasOwnProperty('secret');\n                }),\n                false\n            );\n    });\n\n    it('updates external credential with secret when non-empty', () => {\n        const services = {\n            externalCredentialsService: {\n                update: cy.stub().resolves({}),\n            },\n        };\n\n        const testCredential = generateExternalCredential('Test Credential', '2024-01-01');\n        testCredential.uuid = 'test-uuid';\n\n        updateExternalCredential(testCredential)(dispatch, getState, services);\n\n        cy.wrap(services.externalCredentialsService.update)\n            .should('have.been.calledOnce')\n            .and(\n                'have.been.calledWith',\n                'test-uuid',\n                Cypress.sinon.match((obj) => {\n                    return obj.secret === testCredential.secret;\n                }),\n                false\n            );\n    });\n\n    it('removes multiple external credentials', (done) => {\n        const deleteStub = cy.stub().resolves({});\n        const services = {\n            externalCredentialsService: {\n                delete: deleteStub\n            }\n        };\n\n        removeExternalCredentialPermanently('any-uuid')(dispatch, getState, services);\n\n        // Give the Promise.allSettled time to process\n        setTimeout(() => {\n            cy.wrap(deleteStub)\n                .should('have.been.calledTwice');\n            cy.wrap(deleteStub.firstCall)\n                .should('have.been.calledWith', 'uuid1');\n            cy.wrap(deleteStub.secondCall)\n                .should('have.been.calledWith', 'uuid2');\n            done();\n        }, 0);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/external-credentials/external-credentials-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { bindDataExplorerActions } from \"store/data-explorer/data-explorer-action\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { CreateExternalCredentialFormDialogData, UpdateExternalCredentialFormDialogData } from \"store/external-credentials/external-credential-dialog-data\";\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { getCheckedListUuids } from \"store/multiselect/multiselect-actions\";\nimport { FormErrors, initialize, reset, startSubmit, stopSubmit } from \"redux-form\";\nimport { getCommonResourceServiceError, CommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport { getResource } from \"store/resources/resources\";\nimport { ExternalCredential } from \"models/external-credential\";\nimport { showGroupedCommonResourceResultSnackbars } from \"store/resources/resources-actions\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\n\nexport const EXTERNAL_CREDENTIALS_PANEL = 'externalCredentialsPanel';\nexport const CREATE_EXTERNAL_CREDENTIAL_FORM_NAME = 'newExternalCredentialFormName';\nexport const REMOVE_EXTERNAL_CREDENTIAL_DIALOG = \"removeExternalCredentialDialog\";\nexport const UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME = \"updateExternalCredentialFormName\";\n\nexport const externalCredentialsActions = bindDataExplorerActions(EXTERNAL_CREDENTIALS_PANEL);\n\nexport const loadExternalCredentials = () =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(progressIndicatorActions.START_WORKING(EXTERNAL_CREDENTIALS_PANEL));\n            try {\n                dispatch(externalCredentialsActions.REQUEST_ITEMS());\n            } catch (e) {\n                dispatch(\n                    snackbarActions.OPEN_SNACKBAR({\n                        message: e.message,\n                        hideDuration: 2000,\n                        kind: SnackbarKind.ERROR,\n                    })\n                );\n            } finally {\n                dispatch(progressIndicatorActions.STOP_WORKING(EXTERNAL_CREDENTIALS_PANEL));\n            }\n        };\n\nexport const openNewExternalCredentialDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch(initialize(CREATE_EXTERNAL_CREDENTIAL_FORM_NAME, {}));\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: CREATE_EXTERNAL_CREDENTIAL_FORM_NAME,\n            data: {},\n        }));\n    };\n\nexport const createExternalCredential = (data: CreateExternalCredentialFormDialogData) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(startSubmit(CREATE_EXTERNAL_CREDENTIAL_FORM_NAME));\n        try {\n            dispatch(progressIndicatorActions.START_WORKING(CREATE_EXTERNAL_CREDENTIAL_FORM_NAME));\n            const newExternalCredential = await services.externalCredentialsService.create(data);\n            dispatch(externalCredentialsActions.REQUEST_ITEMS());\n            dispatch(dialogActions.CLOSE_DIALOG({ id: CREATE_EXTERNAL_CREDENTIAL_FORM_NAME }));\n            dispatch(reset(CREATE_EXTERNAL_CREDENTIAL_FORM_NAME));\n            return newExternalCredential;\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                dispatch(stopSubmit(CREATE_EXTERNAL_CREDENTIAL_FORM_NAME, { name: \"Credential with the same name already exists.\" } as FormErrors));\n            } else {\n                dispatch(dialogActions.CLOSE_DIALOG({ id: CREATE_EXTERNAL_CREDENTIAL_FORM_NAME }));\n                const errMsg = e.errors ? e.errors.join(\"\") : \"Could not create the credential\";\n                dispatch(\n                    snackbarActions.OPEN_SNACKBAR({\n                        message: errMsg,\n                        hideDuration: 2000,\n                        kind: SnackbarKind.ERROR,\n                    })\n                );\n            }\n            return;\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(CREATE_EXTERNAL_CREDENTIAL_FORM_NAME));\n        }\n    };\n\nexport const openRemoveExternalCredentialDialog = (resource: ContextMenuResource) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const numOfCredentials = getCheckedListUuids(getState()).length;\n        dispatch(\n            dialogActions.OPEN_DIALOG({\n                id: REMOVE_EXTERNAL_CREDENTIAL_DIALOG,\n                data: {\n                    title: \"Remove External Credentials\",\n                    text: numOfCredentials === 1 ? \"Are you sure you want to remove this credential?\" : `Are you sure you want to remove these ${numOfCredentials} credentials?`,\n                    confirmButtonLabel: \"Remove\",\n                    uuid: resource.uuid,\n                    resource,\n                },\n            })\n        );\n    };\n\nexport const removeExternalCredentialPermanently = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(progressIndicatorActions.START_WORKING(EXTERNAL_CREDENTIALS_PANEL));\n        const credentialsToRemove = getCheckedListUuids(getState());\n\n        const messageFuncMap = {\n            [CommonResourceServiceError.NONE]: (count: number) => count > 1 ? `Removed ${count} items` : `Item removed`,\n            [CommonResourceServiceError.PERMISSION_ERROR_FORBIDDEN]: (count: number) => count > 1 ? `Remove ${count} items failed: Access Denied` : `Remove failed: Access Denied`,\n            [CommonResourceServiceError.UNKNOWN]: (count: number) => count > 1 ? `Remove ${count} items failed` : `Remove failed`,\n        };\n\n        await Promise.allSettled(credentialsToRemove.map(credential => services.externalCredentialsService.delete(credential))).then((promises) => {\n            const { success } = showGroupedCommonResourceResultSnackbars(dispatch, promises, messageFuncMap);\n            if (success.length) {\n                dispatch<any>(loadExternalCredentials());\n            }\n        });\n\n        dispatch(progressIndicatorActions.STOP_WORKING(EXTERNAL_CREDENTIALS_PANEL));\n    };\n\nexport const openExternalCredentialUpdateDialog = (resource: ContextMenuResource) => (dispatch: Dispatch, getState: () => RootState) => {\n    const credential = getResource<ExternalCredential>(resource.uuid)(getState().resources);\n    dispatch(initialize(UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME, credential));\n    dispatch(\n        dialogActions.OPEN_DIALOG({id: UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME, data: {}})\n    );\n};\n\nexport const updateExternalCredential =\n    (credential: UpdateExternalCredentialFormDialogData & { uuid?: string }) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const uuid = credential.uuid || \"\";\n        dispatch(startSubmit(UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME));\n        try {\n            dispatch(progressIndicatorActions.START_WORKING(UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME));\n            const updatedCredential = await services.externalCredentialsService.update(\n                uuid,\n                {\n                    name: credential.name,\n                    description: credential.description,\n                    credentialClass: credential.credentialClass,\n                    externalId: credential.externalId,\n                    expiresAt: credential.expiresAt,\n                    // if no secret, don't include the field at all\n                    ...(credential.secret && credential.secret.length > 0 ? { secret: credential.secret } : {}),\n                    scopes: credential.scopes || [],\n                },\n                false\n            );\n            dispatch(externalCredentialsActions.REQUEST_ITEMS());\n            dispatch(reset(UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME));\n            dispatch(dialogActions.CLOSE_DIALOG({ id: UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME }));\n            return updatedCredential;\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                dispatch(stopSubmit(UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME, { name: \"Credential with the same name already exists.\" } as FormErrors));\n            } else {\n                dispatch(dialogActions.CLOSE_DIALOG({ id: UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME }));\n                const errMsg = e.errors ? e.errors.join(\"\") : \"There was an error while updating the credential\";\n                dispatch(\n                    snackbarActions.OPEN_SNACKBAR({\n                        message: errMsg,\n                        hideDuration: 2000,\n                        kind: SnackbarKind.ERROR,\n                    })\n                );\n            }\n            return;\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME));\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/external-credentials/external-credentials-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ServiceRepository } from 'services/services';\nimport { MiddlewareAPI, Dispatch } from 'redux';\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, getOrder, listResultsToDataExplorerItemsMeta } from 'store/data-explorer/data-explorer-middleware-service';\nimport { RootState } from 'store/store';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { ListArguments, ListResults } from 'services/common-service/common-service';\nimport { ExternalCredential } from 'models/external-credential';\nimport { externalCredentialsActions } from 'store/external-credentials/external-credentials-actions';\nimport { couldNotFetchItemsAvailable } from 'store/data-explorer/data-explorer-action';\n\nexport class ExternalCredentialsMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        try {\n            const response = await this.services.externalCredentialsService.list(getParams(dataExplorer));\n            api.dispatch(updateResources(response.items));\n            api.dispatch(setItems(response));\n        } catch {\n            api.dispatch(couldNotFetchExternalCredentials());\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        if (criteriaChanged) {\n            // Get itemsAvailable\n            return this.services.externalCredentialsService.list(getCountParams())\n                .then((results: ListResults<ExternalCredential>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(externalCredentialsActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nconst getParams = (dataExplorer: DataExplorer): ListArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    order: getOrder<ExternalCredential>(dataExplorer),\n    count: 'none',\n});\n\nconst getCountParams = (): ListArguments => ({\n    limit: 0,\n    count: 'exact',\n});\n\nexport const setItems = (listResults: ListResults<ExternalCredential>) =>\n    externalCredentialsActions.SET_ITEMS({\n        ...listResultsToDataExplorerItemsMeta(listResults),\n        items: listResults.items.map(resource => resource.uuid),\n    });\n\nconst couldNotFetchExternalCredentials = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch external credentials.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/favorite-panel/favorite-panel-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { bindDataExplorerActions } from \"../data-explorer/data-explorer-action\";\n\nexport const FAVORITE_PANEL_ID = \"favoritePanel\";\nexport const favoritePanelActions = bindDataExplorerActions(FAVORITE_PANEL_ID);\n\nexport const loadFavoritePanel = () => (dispatch: Dispatch) => {\n    dispatch(favoritePanelActions.RESET_EXPLORER_SEARCH_VALUE());\n    dispatch(favoritePanelActions.REQUEST_ITEMS());\n};"
  },
  {
    "path": "services/workbench2/src/store/favorite-panel/favorite-panel-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta } from \"store/data-explorer/data-explorer-middleware-service\";\nimport { FavoritePanelColumnNames } from 'views/favorite-panel/favorite-panel-columns';\nimport { RootState } from \"../store\";\nimport { getUserUuid } from \"common/getuser\";\nimport { DataColumns } from \"components/data-table/data-column\";\nimport { ServiceRepository } from \"services/services\";\nimport { FilterBuilder } from \"services/api/filter-builder\";\nimport { updateFavorites } from \"../favorites/favorites-actions\";\nimport { favoritePanelActions } from \"./favorite-panel-action\";\nimport { Dispatch, MiddlewareAPI } from \"redux\";\nimport { resourcesActions } from \"store/resources/resources-actions\";\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';\nimport { DataExplorer, getDataExplorer } from \"store/data-explorer/data-explorer-reducer\";\nimport { getDataExplorerColumnFilters } from 'store/data-explorer/data-explorer-middleware-service';\nimport { serializeSimpleObjectTypeFilters } from '../resource-type-filters/resource-type-filters';\nimport { ResourceKind } from \"models/resource\";\nimport { LinkClass, LinkResource } from \"models/link\";\nimport { GroupContentsResource } from \"services/groups-service/groups-service\";\nimport { ListArguments, ListResults } from \"services/common-service/common-service\";\nimport { couldNotFetchItemsAvailable } from \"store/data-explorer/data-explorer-action\";\nimport { favoritesLinksActions } from \"store/favorites/favorites-links-reducer\";\n\nexport class FavoritePanelMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    getTypeFilters(dataExplorer: DataExplorer) {\n        const columns = dataExplorer.columns as DataColumns<string, GroupContentsResource>;\n        return serializeSimpleObjectTypeFilters(getDataExplorerColumnFilters(columns, FavoritePanelColumnNames.TYPE));\n    }\n\n    getLinkFilters(dataExplorer: DataExplorer, uuid: string): string {\n        return new FilterBuilder()\n            .addEqual(\"link_class\", LinkClass.STAR)\n            .addEqual('tail_uuid', uuid)\n            .addEqual('tail_kind', ResourceKind.USER)\n            .addIsA(\"head_uuid\", this.getTypeFilters(dataExplorer))\n            .getFilters();\n    }\n\n    getResourceFilters(dataExplorer: DataExplorer, uuids: string[]): string {\n        return new FilterBuilder()\n            .addIn(\"uuid\", uuids)\n            .addILike(\"name\", dataExplorer.searchValue)\n            .addIsA(\"uuid\", this.getTypeFilters(dataExplorer))\n            .getFilters();\n    }\n\n    getLinkParams(dataExplorer: DataExplorer, uuid: string): ListArguments {\n        return {\n            ...dataExplorerToListParams(dataExplorer),\n            filters: this.getLinkFilters(dataExplorer, uuid),\n            count: \"none\",\n        };\n    }\n\n    getCountParams(dataExplorer: DataExplorer, uuid: string): ListArguments {\n        return {\n            filters: this.getLinkFilters(dataExplorer, uuid),\n            limit: 0,\n            count: \"exact\",\n        };\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const dataExplorer = getDataExplorer(api.getState().dataExplorer, this.getId());\n        const uuid = getUserUuid(api.getState());\n        if (!dataExplorer) {\n            api.dispatch(favoritesPanelDataExplorerIsNotSet());\n        } else if (!uuid || !uuid.length) {\n            userNotAvailable();\n        } else {\n            try {\n                if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n\n                // Get items\n                const responseLinks = await this.services.linkService.list(this.getLinkParams(dataExplorer, uuid));\n                const uuids = responseLinks.items.map(it => it.headUuid);\n\n                const orderedItems = await this.services.groupsService.contents(\"\", {\n                    filters: this.getResourceFilters(dataExplorer, uuids),\n                    include: [\"owner_uuid\", \"container_uuid\"],\n                });\n\n                api.dispatch(favoritesLinksActions.setFavoritesLinks(responseLinks.items));\n                api.dispatch(resourcesActions.SET_RESOURCES(orderedItems.items));\n                api.dispatch(resourcesActions.SET_RESOURCES(orderedItems.included));\n                api.dispatch(favoritePanelActions.SET_ITEMS({\n                    ...listResultsToDataExplorerItemsMeta(responseLinks),\n                    items: orderedItems.items.map((resource: any) => resource.uuid),\n                }));\n                api.dispatch<any>(updateFavorites(uuids));\n            } catch (e) {\n                api.dispatch(favoritePanelActions.SET_ITEMS({\n                    items: [],\n                    itemsAvailable: 0,\n                    page: 0,\n                    rowsPerPage: dataExplorer.rowsPerPage\n                }));\n                api.dispatch(couldNotFetchFavoritesContents());\n            } finally {\n                api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n            }\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        const uuid = getUserUuid(api.getState());\n\n        if (criteriaChanged && uuid && uuid.length) {\n            // Get itemsAvailable\n            return this.services.linkService.list(this.getCountParams(dataExplorer, uuid))\n                .then((results: ListResults<LinkResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(favoritePanelActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nconst favoritesPanelDataExplorerIsNotSet = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Favorites panel is not ready.',\n        kind: SnackbarKind.ERROR\n    });\n\nconst couldNotFetchFavoritesContents = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch favorites contents.',\n        kind: SnackbarKind.ERROR\n    });\n\nconst userNotAvailable = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'User favorites not available.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/favorite-pins/favorite-pins-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta } from \"store/data-explorer/data-explorer-middleware-service\";\nimport { RootState } from \"../store\";\nimport { getUserUuid } from \"common/getuser\";\nimport { ServiceRepository } from \"services/services\";\nimport { FilterBuilder } from \"services/api/filter-builder\";\nimport { updateFavorites } from \"../favorites/favorites-actions\";\nimport { Dispatch, MiddlewareAPI } from \"redux\";\nimport { resourcesActions } from \"store/resources/resources-actions\";\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';\nimport { DataExplorer, getDataExplorer } from \"store/data-explorer/data-explorer-reducer\";\nimport { ResourceKind } from \"models/resource\";\nimport { LinkClass } from \"models/link\";\nimport { ListArguments } from \"services/common-service/common-service\";\nimport { bindDataExplorerActions } from \"../data-explorer/data-explorer-action\";\n\nexport const FAVORITE_PINS_ID = \"favoritePins\";\nexport const favoritePinsActions = bindDataExplorerActions(FAVORITE_PINS_ID);\n\nexport const loadFavoritePins = () => (dispatch: Dispatch) => {\n    dispatch(favoritePinsActions.RESET_EXPLORER_SEARCH_VALUE());\n    dispatch(favoritePinsActions.REQUEST_ITEMS());\n};\n\nexport class FavoritePinsMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    // Since FavoritePins does not use a data table, we can't get these types from the data table columns like we do normally\n    favoriteTypes = [ResourceKind.GROUP, ResourceKind.COLLECTION, ResourceKind.CONTAINER_REQUEST, ResourceKind.WORKFLOW];\n\n    getLinkFilters(dataExplorer: DataExplorer, uuid: string): string {\n        return new FilterBuilder()\n            .addEqual(\"link_class\", LinkClass.STAR)\n            .addEqual('tail_uuid', uuid)\n            .addEqual('tail_kind', ResourceKind.USER)\n            .addIsA(\"head_uuid\", this.favoriteTypes)\n            .getFilters();\n    }\n\n    getResourceFilters(dataExplorer: DataExplorer, uuids: string[]): string {\n        return new FilterBuilder()\n            .addIn(\"uuid\", uuids)\n            .addILike(\"name\", dataExplorer.searchValue)\n            .addIsA(\"uuid\", this.favoriteTypes)\n            .getFilters();\n    }\n\n    getLinkParams(dataExplorer: DataExplorer, uuid: string): ListArguments {\n        return {\n            ...dataExplorerToListParams(dataExplorer),\n            filters: this.getLinkFilters(dataExplorer, uuid),\n            limit: 12,\n            count: \"none\",\n        };\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const dataExplorer = getDataExplorer(api.getState().dataExplorer, this.getId());\n        const userUuid = getUserUuid(api.getState());\n        if (!userUuid || !userUuid.length) {\n            userNotAvailable();\n        } else {\n            try {\n                if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n\n                // Get favorite links\n                const responseLinks = await this.services.linkService.list(this.getLinkParams(dataExplorer, userUuid));\n                const uuids = responseLinks.items.map(it => it.headUuid);\n\n                // Get resources from links\n                const orderedItems = await this.services.groupsService.contents(\"\", {\n                    filters: this.getResourceFilters(dataExplorer, uuids),\n                    include: [\"owner_uuid\", \"container_uuid\"],\n                });\n\n                api.dispatch(resourcesActions.SET_RESOURCES(responseLinks.items));\n                api.dispatch(resourcesActions.SET_RESOURCES(orderedItems.items));\n                api.dispatch(resourcesActions.SET_RESOURCES(orderedItems.included));\n                api.dispatch(favoritePinsActions.SET_ITEMS({\n                    ...listResultsToDataExplorerItemsMeta(responseLinks),\n                    items: responseLinks.items.map((resource: any) => resource.uuid),\n                }));\n                api.dispatch<any>(updateFavorites(uuids));\n            } catch (e) {\n                api.dispatch(favoritePinsActions.SET_ITEMS({\n                    items: [],\n                    itemsAvailable: 0,\n                    page: 0,\n                    rowsPerPage: dataExplorer.rowsPerPage\n                }));\n                api.dispatch(couldNotFetchFavoritesContents());\n            } finally {\n                api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n            }\n        }\n    }\n\n    // Not used\n    async requestCount() {}\n}\n\nconst couldNotFetchFavoritesContents = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch favorites contents.',\n        kind: SnackbarKind.ERROR\n    });\n\nconst userNotAvailable = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'User favorites not available.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/favorites/favorites-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"../store\";\nimport { getUserUuid } from \"common/getuser\";\nimport { checkFavorite } from \"./favorites-reducer\";\nimport { snackbarActions, SnackbarKind } from \"../snackbar/snackbar-actions\";\nimport { ServiceRepository } from \"services/services\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { addDisabledButton, removeDisabledButton } from \"store/multiselect/multiselect-actions\";\nimport { SidePanelTreeCategory, loadSidePanelTreeProjects} from \"store/side-panel-tree/side-panel-tree-actions\";\nimport { favoritePinsActions } from \"store/favorite-pins/favorite-pins-middleware-service\";\n\nexport const favoritesActions = unionize({\n    TOGGLE_FAVORITE: ofType<{ resourceUuid: string }>(),\n    CHECK_PRESENCE_IN_FAVORITES: ofType<string[]>(),\n    UPDATE_FAVORITES: ofType<Record<string, boolean>>()\n});\n\nexport type FavoritesAction = UnionOf<typeof favoritesActions>;\n\nexport const toggleFavorite = (resource: { uuid: string; name: string }) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<any> => {\n        const userUuid = getUserUuid(getState());\n        if (!userUuid) {\n            return Promise.reject(\"No user\");\n        }\n        dispatch(progressIndicatorActions.START_WORKING(\"toggleFavorite\"));\n        dispatch<any>(addDisabledButton(ContextMenuActionNames.ADD_TO_FAVORITES))\n        dispatch(favoritesActions.TOGGLE_FAVORITE({ resourceUuid: resource.uuid }));\n        const isFavorite = checkFavorite(resource.uuid, getState().favorites);\n        dispatch(snackbarActions.OPEN_SNACKBAR({\n            message: isFavorite\n                ? \"Removing from favorites...\"\n                : \"Adding to favorites...\",\n            kind: SnackbarKind.INFO\n        }));\n\n        const promise: any = isFavorite\n            ? services.favoriteService.delete({ userUuid, resourceUuid: resource.uuid })\n            : services.favoriteService.create({ userUuid, resource });\n\n        return promise\n            .then(() => {\n                dispatch(favoritesActions.UPDATE_FAVORITES({ [resource.uuid]: !isFavorite }));\n                dispatch(snackbarActions.CLOSE_SNACKBAR());\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: isFavorite\n                        ? \"Removed from favorites\"\n                        : \"Added to favorites\",\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS\n                }));\n                dispatch<any>(favoritePinsActions.REQUEST_ITEMS());\n                dispatch<any>(removeDisabledButton(ContextMenuActionNames.ADD_TO_FAVORITES))\n                dispatch(progressIndicatorActions.STOP_WORKING(\"toggleFavorite\"));\n                dispatch<any>(loadSidePanelTreeProjects(SidePanelTreeCategory.FAVORITES));\n            })\n            .catch((e: any) => {\n                dispatch(progressIndicatorActions.STOP_WORKING(\"toggleFavorite\"));\n                throw e;\n            });\n    };\n\nexport const updateFavorites = (resourceUuids: string[]) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        if (!userUuid) { return; }\n        dispatch(favoritesActions.CHECK_PRESENCE_IN_FAVORITES(resourceUuids));\n        services.favoriteService\n            .checkPresenceInFavorites(userUuid, resourceUuids)\n            .then((results: any) => {\n                dispatch(favoritesActions.UPDATE_FAVORITES(results));\n            });\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/favorites/favorites-links-reducer.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { LinkResource } from \"models/link\";\n\ntype FavoritesLinksState = LinkResource[];\n\nconst SET_FAVORITES_LINKS = 'SET_FAVORITES_LINKS';\n\nexport const favoritesLinksActions = {\n    setFavoritesLinks: (links: LinkResource[]) => ({ type: SET_FAVORITES_LINKS, payload: links })\n}\n\ntype FavoritesLinksAction = {\n    type: string;\n    payload: LinkResource[];\n}\n\nexport const favoritesLinksReducer = (state: FavoritesLinksState = [], action: FavoritesLinksAction) => {\n    switch (action.type) {\n        case SET_FAVORITES_LINKS:\n            return action.payload;\n        default:\n            return state;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/store/favorites/favorites-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { FavoritesAction, favoritesActions } from \"./favorites-actions\";\n\nexport type FavoritesState = Record<string, boolean>;\n\nexport const favoritesReducer = (state: FavoritesState = {}, action: FavoritesAction) => \n    favoritesActions.match(action, {\n        UPDATE_FAVORITES: favorites => ({...state, ...favorites}),\n        default: () => state\n    });\n\nexport const checkFavorite = (uuid: string, state: FavoritesState) => state[uuid] === true;"
  },
  {
    "path": "services/workbench2/src/store/file-selection/file-selection-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { resetPickerProjectTree } from 'store/project-tree-picker/project-tree-picker-actions';\n\nexport const FILE_SELECTION = 'fileSelection';\n\nexport const openFileSelectionDialog = () =>\n    (dispatch: Dispatch) => {\n        dispatch<any>(resetPickerProjectTree());\n        dispatch(dialogActions.OPEN_DIALOG({ id: FILE_SELECTION, data: {} }));\n    };"
  },
  {
    "path": "services/workbench2/src/store/file-uploader/file-uploader-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { Dispatch } from \"redux\";\nimport { RootState } from 'store/store';\n\nexport interface UploadFile {\n    id: number;\n    file: File;\n    prevLoaded: number;\n    loaded: number;\n    total: number;\n    startTime: number;\n    prevTime: number;\n    currentTime: number;\n}\n\nexport const fileUploaderActions = unionize({\n    CLEAR_UPLOAD: ofType(),\n    SET_UPLOAD_FILES: ofType<File[]>(),\n    UPDATE_UPLOAD_FILES: ofType<File[]>(),\n    SET_UPLOAD_PROGRESS: ofType<{ fileId: number, loaded: number, total: number, currentTime: number }>(),\n    START_UPLOAD: ofType(),\n    DELETE_UPLOAD_FILE: ofType<UploadFile>(),\n    CANCEL_FILES_UPLOAD: ofType(),\n});\n\nexport type FileUploaderAction = UnionOf<typeof fileUploaderActions>;\n\nexport const getFileUploaderState = () => (dispatch: Dispatch, getState: () => RootState) => {\n    return getState().fileUploader;\n};"
  },
  {
    "path": "services/workbench2/src/store/file-uploader/file-uploader-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { UploadFile, fileUploaderActions, FileUploaderAction } from \"./file-uploader-actions\";\nimport { uniqBy } from 'lodash';\n\nexport type UploaderState = UploadFile[];\n\nconst initialState: UploaderState = [];\n\nexport const fileUploaderReducer = (state: UploaderState = initialState, action: FileUploaderAction) => {\n    return fileUploaderActions.match(action, {\n        SET_UPLOAD_FILES: files => files.map((f, idx) => ({\n            id: idx,\n            file: f,\n            prevLoaded: 0,\n            loaded: 0,\n            total: 0,\n            startTime: 0,\n            prevTime: 0,\n            currentTime: 0\n        })),\n        UPDATE_UPLOAD_FILES: files => {\n            const updateFiles = files.map((f, idx) => ({\n                id: state.length + idx,\n                file: f,\n                prevLoaded: 0,\n                loaded: 0,\n                total: 0,\n                startTime: 0,\n                prevTime: 0,\n                currentTime: 0\n            }));\n            const updatedState = state.concat(updateFiles);\n            const uniqUpdatedState = uniqBy(updatedState, 'file.name');\n\n            return uniqUpdatedState;\n        },\n        DELETE_UPLOAD_FILE: file => {\n            const idToDelete: number = file.id;\n            const updatedState = state.filter(file => file.id !== idToDelete);\n\n            return updatedState;\n        },\n        CANCEL_FILES_UPLOAD: () => {\n            state.forEach((file) => {\n                let interval = setInterval(() => {\n                    const key = Object.keys((window as any).cancelTokens).find(key => key.indexOf(file.file.name) > -1);\n    \n                    if (key) {\n                        clearInterval(interval);\n                        (window as any).cancelTokens[key]();\n                        delete (window as any).cancelTokens[key];\n                    }\n                }, 100);\n            });\n\n            return [];\n        },\n        START_UPLOAD: () => {\n            const startTime = Date.now();\n            return state.map(f => ({ ...f, startTime, prevTime: startTime }));\n        },\n        SET_UPLOAD_PROGRESS: ({ fileId, loaded, total, currentTime }) =>\n            state.map(f => f.id === fileId ? {\n                ...f,\n                prevLoaded: f.loaded,\n                loaded,\n                total,\n                prevTime: f.currentTime,\n                currentTime\n            } : f),\n        CLEAR_UPLOAD: () => [],\n        default: () => state\n    });\n};\n"
  },
  {
    "path": "services/workbench2/src/store/file-viewers/file-viewers-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { ServiceRepository } from 'services/services';\nimport { propertiesActions } from 'store/properties/properties-actions';\nimport { FILE_VIEWERS_PROPERTY_NAME, DEFAULT_FILE_VIEWERS } from 'store/file-viewers/file-viewers-selectors';\nimport { FileViewerList } from 'models/file-viewers-config';\n\nexport const loadFileViewersConfig = async (dispatch: Dispatch, _: {}, { fileViewersConfig }: ServiceRepository) => {\n    \n    let config: FileViewerList;\n    try{\n        config = await fileViewersConfig.get();\n    } catch (e){\n        config = DEFAULT_FILE_VIEWERS;\n    }\n\n    dispatch(propertiesActions.SET_PROPERTY({\n        key: FILE_VIEWERS_PROPERTY_NAME,\n        value: config,\n    }));\n\n};\n"
  },
  {
    "path": "services/workbench2/src/store/file-viewers/file-viewers-selectors.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { PropertiesState, getProperty } from 'store/properties/properties';\nimport { FileViewerList } from 'models/file-viewers-config';\n\nexport const FILE_VIEWERS_PROPERTY_NAME = 'fileViewers';\n\nexport const DEFAULT_FILE_VIEWERS: FileViewerList = [];\nexport const getFileViewers = (state: PropertiesState) =>\n    getProperty<FileViewerList>(FILE_VIEWERS_PROPERTY_NAME)(state) || DEFAULT_FILE_VIEWERS;\n"
  },
  {
    "path": "services/workbench2/src/store/group-details-panel/group-details-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { bindDataExplorerActions } from 'store/data-explorer/data-explorer-action';\nimport { Dispatch } from 'redux';\nimport { propertiesActions } from 'store/properties/properties-actions';\nimport { getProperty } from 'store/properties/properties';\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { deleteGroupMember } from 'store/groups-panel/groups-panel-actions';\nimport { getResource } from 'store/resources/resources';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { PermissionResource, PermissionLevel } from 'models/permission';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { LinkResource } from 'models/link';\nimport { deleteResources, updateResources } from 'store/resources/resources-actions';\nimport { openSharingDialog } from 'store/sharing-dialog/sharing-dialog-actions';\nimport { UserProfileGroupsActions } from 'store/user-profile/user-profile-actions';\nimport { getCheckedListUuids } from 'store/multiselect/multiselect-actions';\n\nexport const GROUP_DETAILS_MEMBERS_PANEL_ID = 'groupDetailsMembersPanel';\nexport const GROUP_DETAILS_PERMISSIONS_PANEL_ID = 'groupDetailsPermissionsPanel';\nexport const MEMBER_ATTRIBUTES_DIALOG = 'memberAttributesDialog';\nexport const MEMBER_REMOVE_DIALOG = 'memberRemoveDialog';\nexport const MULTIPLE_MEMBER_REMOVE_DIALOG = 'multipleMemberRemoveDialog';\n\nexport const GroupMembersPanelActions = bindDataExplorerActions(GROUP_DETAILS_MEMBERS_PANEL_ID);\nexport const GroupPermissionsPanelActions = bindDataExplorerActions(GROUP_DETAILS_PERMISSIONS_PANEL_ID);\n\nexport const loadGroupDetailsPanel = (groupUuid: string) =>\n    (dispatch: Dispatch) => {\n        dispatch(propertiesActions.SET_PROPERTY({ key: GROUP_DETAILS_MEMBERS_PANEL_ID, value: groupUuid }));\n        dispatch(GroupMembersPanelActions.REQUEST_ITEMS());\n        dispatch(propertiesActions.SET_PROPERTY({ key: GROUP_DETAILS_PERMISSIONS_PANEL_ID, value: groupUuid }));\n        dispatch(GroupPermissionsPanelActions.REQUEST_ITEMS());\n    };\n\nexport const getCurrentGroupDetailsPanelUuid = getProperty<string>(GROUP_DETAILS_MEMBERS_PANEL_ID);\n\nexport const openAddGroupMembersDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const groupUuid = getCurrentGroupDetailsPanelUuid(getState().properties);\n        if (groupUuid) {\n            dispatch<any>(openSharingDialog(groupUuid, () => {\n                dispatch(GroupMembersPanelActions.REQUEST_ITEMS());\n            }));\n        }\n    };\n\nexport const editPermissionLevel = (uuid: string, level: PermissionLevel) =>\n    async (dispatch: Dispatch, getState: () => RootState, { permissionService }: ServiceRepository) => {\n        try {\n            const permission = await permissionService.update(uuid, {name: level});\n            dispatch(updateResources([permission]));\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Permission level changed.', hideDuration: 2000 }));\n        } catch (e) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: 'Failed to update permission',\n                kind: SnackbarKind.ERROR,\n            }));\n        }\n    };\n\nexport const openGroupMemberAttributes = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const { resources } = getState();\n        const data = getResource<PermissionResource>(uuid)(resources);\n        dispatch(dialogActions.OPEN_DIALOG({ id: MEMBER_ATTRIBUTES_DIALOG, data }));\n    };\n\nexport const openRemoveGroupMemberDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: MEMBER_REMOVE_DIALOG,\n            data: {\n                title: 'Remove member',\n                text: 'Are you sure you want to remove this member from this group?',\n                confirmButtonLabel: 'Remove',\n                uuid\n            }\n        }));\n    };\n\nexport const removeGroupMember = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, { permissionService }: ServiceRepository) => {\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removing ...', kind: SnackbarKind.INFO }));\n        await deleteGroupMember({\n            link: {\n                uuid,\n            },\n            permissionService,\n            dispatch,\n        });\n        dispatch<any>(deleteResources([uuid]));\n        dispatch(GroupMembersPanelActions.REQUEST_ITEMS());\n        dispatch(UserProfileGroupsActions.REQUEST_ITEMS());\n\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removed.', hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n    };\n\nexport const openRemoveCheckedGroupMembersDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const uuidsToRemove = getCheckedListUuids(getState());\n\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: MULTIPLE_MEMBER_REMOVE_DIALOG,\n            data: {\n                title: uuidsToRemove.length > 1 ? 'Remove members' : 'Remove member',\n                text: uuidsToRemove.length > 1\n                    ? 'Are you sure you want to remove these members from this group?'\n                    : 'Are you sure you want to remove this member from this group?',\n                confirmButtonLabel: 'Remove',\n                uuidsToRemove\n            }\n        }));\n    };\n\nexport const removeMultipleGroupMembers = () =>\n    async (dispatch: Dispatch, getState: () => RootState, { permissionService }: ServiceRepository) => {\n        const uuidsToRemove = getCheckedListUuids(getState());\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removing ...', kind: SnackbarKind.INFO }));\n        for (const uuid of uuidsToRemove) {\n            await deleteGroupMember({\n                link: {\n                    uuid,\n                },\n                permissionService,\n                dispatch,\n            });\n        }\n        dispatch<any>(deleteResources(uuidsToRemove));\n        dispatch(GroupMembersPanelActions.REQUEST_ITEMS());\n        dispatch(UserProfileGroupsActions.REQUEST_ITEMS());\n\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removed.', hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n    };\n\nexport const setMemberIsHidden = (memberLinkUuid: string, permissionLinkUuid: string, visible: boolean) =>\n    async (dispatch: Dispatch, getState: () => RootState, { permissionService }: ServiceRepository) => {\n        const memberLink = getResource<LinkResource>(memberLinkUuid)(getState().resources);\n\n        if (!visible && permissionLinkUuid) {\n            // Remove read permission\n            try {\n                await permissionService.delete(permissionLinkUuid);\n                dispatch<any>(deleteResources([permissionLinkUuid]));\n                dispatch(GroupPermissionsPanelActions.REQUEST_ITEMS());\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Removed read permission.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS,\n                }));\n            } catch (e) {\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Failed to remove permission',\n                    kind: SnackbarKind.ERROR,\n                }));\n            }\n        } else if (visible && memberLink) {\n            // Create read permission\n            try {\n                const permission = await permissionService.create({\n                    headUuid: memberLink.tailUuid,\n                    tailUuid: memberLink.headUuid,\n                    name: PermissionLevel.CAN_READ,\n                });\n                dispatch(updateResources([permission]));\n                dispatch(GroupPermissionsPanelActions.REQUEST_ITEMS());\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Created read permission.',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS,\n                }));\n            } catch(e) {\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Failed to create permission',\n                    kind: SnackbarKind.ERROR,\n                }));\n            }\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/group-details-panel/group-details-panel-members-middleware-service.test.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { initialDataExplorer } from '../data-explorer/data-explorer-reducer'\nimport { getParams } from './group-details-panel-members-middleware-service'\n\ndescribe('group-details-panel-members-middleware', () => {\n    describe('getParams', () => {\n        it('should paginate', () => {\n            // given\n            const dataExplorer = initialDataExplorer;\n            let params = getParams(dataExplorer, 'uuid');\n\n            // expect\n            expect(params.offset).toBe(0);\n            expect(params.limit).toBe(50);\n\n            // when\n            dataExplorer.page = 1;\n            params = getParams(dataExplorer, 'uuid');\n\n            // expect\n            expect(params.offset).toBe(50);\n            expect(params.limit).toBe(50);\n        });\n    })\n})\n"
  },
  {
    "path": "services/workbench2/src/store/group-details-panel/group-details-panel-members-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, MiddlewareAPI } from \"redux\";\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta } from \"store/data-explorer/data-explorer-middleware-service\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from \"store/data-explorer/data-explorer-reducer\";\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { getCurrentGroupDetailsPanelUuid, GroupMembersPanelActions } from 'store/group-details-panel/group-details-panel-actions';\nimport { LinkClass } from 'models/link';\nimport { ResourceKind } from 'models/resource';\nimport { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';\nimport { couldNotFetchItemsAvailable } from \"store/data-explorer/data-explorer-action\";\nimport { ListArguments, ListResults } from \"services/common-service/common-service\";\nimport { PermissionResource } from \"models/permission\";\nimport { UserResource } from \"models/user\";\nimport { ProjectResource } from \"models/project\";\n\nexport class GroupDetailsPanelMembersMiddlewareService extends DataExplorerMiddlewareService {\n\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const dataExplorer = getDataExplorer(api.getState().dataExplorer, this.getId());\n        const groupUuid = getCurrentGroupDetailsPanelUuid(api.getState().properties);\n        if (!dataExplorer || !groupUuid) {\n            // Noop if data explorer refresh is triggered from another panel\n            return;\n        } else {\n            try {\n                if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n                const groupResource = await this.services.groupsService.get(groupUuid);\n                api.dispatch(updateResources([groupResource]));\n\n                // Get items\n                const permissionsIn = await this.services.permissionService.list(getParams(dataExplorer, groupUuid));\n                api.dispatch(updateResources(permissionsIn.items));\n\n                api.dispatch(GroupMembersPanelActions.SET_ITEMS({\n                    ...listResultsToDataExplorerItemsMeta(permissionsIn),\n                    items: permissionsIn.items.map(item => item.uuid),\n                }));\n\n                const userUuids = permissionsIn.items\n                    .filter((item) => item.tailKind === ResourceKind.USER)\n                    .map(item => item.tailUuid);\n                if (userUuids.length) {\n                    this.services.userService\n                        .list(getTypeParams(dataExplorer, userUuids))\n                        .then((usersIn: ListResults<UserResource>) => (\n                            api.dispatch(updateResources(usersIn.items))\n                        ));\n                }\n\n                const projectUuids = permissionsIn.items\n                    .filter((item) => item.tailKind === ResourceKind.PROJECT)\n                    .map(item => item.tailUuid);\n                if (projectUuids.length) {\n                    this.services.projectService\n                        .list(getTypeParams(dataExplorer, projectUuids))\n                        .then((projectsIn: ListResults<ProjectResource>) => (\n                            api.dispatch(updateResources(projectsIn.items))\n                        ));\n                }\n            } catch (e) {\n                api.dispatch(couldNotFetchGroupDetailsContents());\n            } finally {\n                api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n            }\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const groupUuid = getCurrentGroupDetailsPanelUuid(state.properties);\n\n        if (criteriaChanged && groupUuid) {\n            // Get itemsAvailable\n            return this.services.permissionService.list(getCountParams(groupUuid))\n                .then((results: ListResults<PermissionResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(GroupMembersPanelActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nexport const getParams = (dataExplorer: DataExplorer, groupUuid: string): ListArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    filters: getFilters(groupUuid),\n    count: 'none',\n});\n\nexport const getTypeParams = (dataExplorer: DataExplorer, uuids: string[]): ListArguments => ({\n    limit: dataExplorer.rowsPerPage,\n    filters: new FilterBuilder()\n        .addIn('uuid', uuids)\n        .getFilters(),\n    count: 'none',\n});\n\nexport const getCountParams = (groupUuid: string): ListArguments => ({\n    filters: getFilters(groupUuid),\n    limit: 0,\n    count: 'exact',\n});\n\nexport const getFilters = (groupUuid: string) => {\n    return new FilterBuilder()\n        .addEqual('head_uuid', groupUuid)\n        .addEqual('link_class', LinkClass.PERMISSION)\n        .getFilters();\n};\n\nconst couldNotFetchGroupDetailsContents = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch group members.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/group-details-panel/group-details-panel-permissions-middleware-service.test.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { initialDataExplorer } from '../data-explorer/data-explorer-reducer'\nimport { getParams } from './group-details-panel-permissions-middleware-service'\n\ndescribe('group-details-panel-permissions-middleware', () => {\n    describe('getParams', () => {\n        it('should paginate', () => {\n            // given\n            const dataExplorer = initialDataExplorer;\n            let params = getParams(dataExplorer, 'uuid');\n\n            // expect\n            expect(params.offset).toBe(0);\n            expect(params.limit).toBe(50);\n\n            // when\n            dataExplorer.page = 1;\n            params = getParams(dataExplorer, 'uuid');\n\n            // expect\n            expect(params.offset).toBe(50);\n            expect(params.limit).toBe(50);\n        });\n    })\n})\n"
  },
  {
    "path": "services/workbench2/src/store/group-details-panel/group-details-panel-permissions-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, MiddlewareAPI } from \"redux\";\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta } from \"store/data-explorer/data-explorer-middleware-service\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from \"store/data-explorer/data-explorer-reducer\";\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { getCurrentGroupDetailsPanelUuid, GroupPermissionsPanelActions } from 'store/group-details-panel/group-details-panel-actions';\nimport { LinkClass } from 'models/link';\nimport { ResourceKind } from 'models/resource';\nimport { ListArguments, ListResults } from \"services/common-service/common-service\";\nimport { PermissionResource } from \"models/permission\";\nimport { couldNotFetchItemsAvailable } from \"store/data-explorer/data-explorer-action\";\nimport { ProjectResource } from \"models/project\";\nimport { CollectionResource } from \"models/collection\";\nimport { UserResource } from \"models/user\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\n\nexport class GroupDetailsPanelPermissionsMiddlewareService extends DataExplorerMiddlewareService {\n\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const dataExplorer = getDataExplorer(api.getState().dataExplorer, this.getId());\n        const groupUuid = getCurrentGroupDetailsPanelUuid(api.getState().properties);\n        if (!dataExplorer || !groupUuid) {\n            // No-op if data explorer is not set since refresh may be triggered from elsewhere\n        } else {\n            try {\n                if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n\n                // Get items\n                const permissionsOut = await this.services.permissionService.list(getParams(dataExplorer, groupUuid));\n                api.dispatch(updateResources(permissionsOut.items));\n\n                api.dispatch(GroupPermissionsPanelActions.SET_ITEMS({\n                    ...listResultsToDataExplorerItemsMeta(permissionsOut),\n                    items: permissionsOut.items.map(item => item.uuid),\n                }));\n\n                const userUuids = permissionsOut.items\n                    .filter((item) => item.headKind === ResourceKind.USER)\n                    .map(item => item.headUuid);\n                if (userUuids.length) {\n                    this.services.userService\n                        .list(getMetadataParams(dataExplorer, userUuids))\n                        .then((usersOut: ListResults<UserResource>) => (\n                            api.dispatch(updateResources(usersOut.items))\n                        ));\n                }\n\n                const collectionUuids = permissionsOut.items\n                    .filter((item) => item.headKind === ResourceKind.COLLECTION)\n                    .map(item => item.headUuid);\n                if (collectionUuids.length) {\n                    this.services.collectionService\n                        .list(getMetadataParams(dataExplorer, collectionUuids))\n                        .then((collectionsOut: ListResults<CollectionResource>) => (\n                            api.dispatch(updateResources(collectionsOut.items))\n                        ));\n                }\n\n                const projectUuids = permissionsOut.items\n                    .filter((item) => item.headKind === ResourceKind.PROJECT)\n                    .map(item => item.headUuid);\n                if (projectUuids.length) {\n                    this.services.projectService\n                        .list(getMetadataParams(dataExplorer, projectUuids))\n                        .then((projectsOut: ListResults<ProjectResource>) => (\n                            api.dispatch(updateResources(projectsOut.items))\n                        ));\n                }\n            } catch (e) {\n                api.dispatch(couldNotFetchGroupDetailsContents());\n            } finally {\n                api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n            }\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const groupUuid = getCurrentGroupDetailsPanelUuid(state.properties);\n\n        if (criteriaChanged && groupUuid) {\n            // Get itemsAvailable\n            return this.services.permissionService.list(getCountParams(groupUuid))\n                .then((results: ListResults<PermissionResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(GroupPermissionsPanelActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nexport const getParams = (dataExplorer: DataExplorer, groupUuid: string): ListArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    filters: getFilters(groupUuid),\n    count: 'none',\n});\n\nexport const getMetadataParams = (dataExplorer: DataExplorer, uuids: string[]): ListArguments => ({\n    limit: dataExplorer.rowsPerPage,\n    filters: new FilterBuilder()\n        .addIn('uuid', uuids)\n        .getFilters(),\n    count: 'none',\n});\n\nexport const getCountParams = (groupUuid: string): ListArguments => ({\n    filters: getFilters(groupUuid),\n    limit: 0,\n    count: 'exact',\n});\n\nexport const getFilters = (groupUuid: string) => {\n    return new FilterBuilder()\n        .addEqual('tail_uuid', groupUuid)\n        .addEqual('link_class', LinkClass.PERMISSION)\n        .getFilters();\n};\n\nconst couldNotFetchGroupDetailsContents = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch group permissions.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/groups-panel/groups-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { bindDataExplorerActions } from \"store/data-explorer/data-explorer-action\";\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { getResource } from 'store/resources/resources';\nimport { GroupResource, GroupClass } from 'models/group';\nimport { getCommonResourceServiceError, CommonResourceServiceError } from 'services/common-service/common-resource-service';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { PermissionLevel } from 'models/permission';\nimport { PermissionService } from 'services/permission-service/permission-service';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { ProjectUpdateFormDialogData, PROJECT_UPDATE_FORM_NAME } from 'store/projects/project-update-actions';\nimport { PROJECT_CREATE_FORM_NAME } from 'store/projects/project-create-actions';\nimport { selectedToArray, isRoleGroupResource } from 'components/multiselect-toolbar/MultiselectToolbar.utils';\n\nexport const GROUPS_PANEL_ID = \"groupsPanel\";\n\nexport const GROUP_ATTRIBUTES_DIALOG = 'groupAttributesDialog';\nexport const GROUP_REMOVE_DIALOG = 'groupRemoveDialog';\n\nexport const GroupsPanelActions = bindDataExplorerActions(GROUPS_PANEL_ID);\n\nexport const loadGroupsPanel = () => (dispatch: Dispatch) => {\n    dispatch(GroupsPanelActions.RESET_EXPLORER_SEARCH_VALUE());\n    dispatch(GroupsPanelActions.REQUEST_ITEMS());\n};\n\nexport const openCreateGroupDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: PROJECT_CREATE_FORM_NAME,\n            data: {\n                sourcePanel: GroupClass.ROLE,\n            }\n        }));\n    };\n\nexport const openGroupAttributes = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const { resources } = getState();\n        const data = getResource<GroupResource>(uuid)(resources);\n        dispatch(dialogActions.OPEN_DIALOG({ id: GROUP_ATTRIBUTES_DIALOG, data }));\n    };\n\nexport const removeGroup = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const { multiselect, resources } = getState();\n        const groupsToRemove = selectedToArray(multiselect.checkedList).filter(uuid => isRoleGroupResource(uuid, resources));\n        if (!groupsToRemove.length) groupsToRemove.push(uuid);\n        for (const group of groupsToRemove) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removing ...', kind: SnackbarKind.INFO }));\n            await services.groupsService.delete(group);\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removed.', hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        }\n        dispatch<any>(loadGroupsPanel());\n    };\n\nexport const openRemoveGroupDialog = (uuid: string, numOfGroups = 1) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const titleText = numOfGroups > 1 ? 'Remove groups' : 'Remove group';\n        const confirmationText = numOfGroups > 1 ? `Are you sure you want to remove these ${numOfGroups} groups?` : 'Are you sure you want to remove this group?';\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: GROUP_REMOVE_DIALOG,\n            data: {\n                title: titleText,\n                text: confirmationText,\n                confirmButtonLabel: 'Remove',\n                uuid\n            }\n        }));\n    };\n\n// Group edit dialog uses project update dialog with sourcePanel set to reload the appropriate parts\nexport const openGroupUpdateDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const group = getResource<GroupResource>(uuid)(getState().resources);\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: PROJECT_UPDATE_FORM_NAME,\n            data: {\n                sourcePanel: GroupClass.ROLE,\n                ...group,\n            }\n        }));\n    };\n\nexport const updateGroup = (project: ProjectUpdateFormDialogData, setSubmitErr: (errMsg: string) => void) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const uuid = project.uuid || '';\n        try {\n            const updatedGroup = await services.groupsService.update(\n                uuid,\n                {\n                    name: project.name,\n                    properties: project.properties,\n                    description: project.description,\n                },\n                false\n            );\n            dispatch(GroupsPanelActions.REQUEST_ITEMS());\n            dispatch(dialogActions.CLOSE_DIALOG({ id: PROJECT_UPDATE_FORM_NAME }));\n            return updatedGroup;\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                setSubmitErr('Group with the same name already exists.');\n            }\n            return ;\n        }\n    };\n\nexport type GroupCreateFormDialogData = {\n    name: string;\n    description: string;\n    properties: {\n        [key: string]: string | string[];\n    };\n    users: { uuid: string, name: string }[];\n};\n\nexport const createGroupRunner = ({ name, users = [], description, properties }: GroupCreateFormDialogData, setSubmitErr: (err: string) => void) =>\n    async (dispatch: Dispatch, _: {}, { groupsService, permissionService }: ServiceRepository) => {\n        try {\n            const newGroup = await groupsService.create({\n                name,\n                properties,\n                description,\n                groupClass: GroupClass.ROLE\n            });\n            for (const user of users) {\n                await addGroupMember({\n                    user,\n                    group: newGroup,\n                    dispatch,\n                    permissionService,\n                });\n            }\n            dispatch(dialogActions.CLOSE_DIALOG({ id: PROJECT_CREATE_FORM_NAME }));\n            dispatch<any>(loadGroupsPanel());\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: `${newGroup.name} group has been created`,\n                kind: SnackbarKind.SUCCESS\n            }));\n            return newGroup;\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                setSubmitErr('Group with the same name already exists.');\n            }\n            return;\n        }\n    };\n\ninterface AddGroupMemberArgs {\n    user: { uuid: string, name: string };\n    group: { uuid: string, name: string };\n    dispatch: Dispatch;\n    permissionService: PermissionService;\n}\n\n/**\n * Group membership is determined by whether the group has can_read permission on an object.\n * If a group G can_read an object A, then we say A is a member of G.\n *\n * [Permission model docs](https://doc.arvados.org/api/permission-model.html)\n */\nexport const addGroupMember = async ({ user, group, ...args }: AddGroupMemberArgs) => {\n    await createPermission({\n        head: { ...group },\n        tail: { ...user },\n        permissionLevel: PermissionLevel.CAN_READ,\n        ...args,\n    });\n};\n\ninterface CreatePermissionLinkArgs {\n    head: { uuid: string, name: string };\n    tail: { uuid: string, name: string };\n    permissionLevel: PermissionLevel;\n    dispatch: Dispatch;\n    permissionService: PermissionService;\n}\n\nconst createPermission = async ({ head, tail, permissionLevel, dispatch, permissionService }: CreatePermissionLinkArgs) => {\n    try {\n        await permissionService.create({\n            tailUuid: tail.uuid,\n            headUuid: head.uuid,\n            name: permissionLevel,\n        });\n    } catch (e) {\n        dispatch(snackbarActions.OPEN_SNACKBAR({\n            message: `Could not add ${tail.name} -> ${head.name} relation`,\n            kind: SnackbarKind.ERROR,\n        }));\n    }\n};\n\ninterface DeleteGroupMemberArgs {\n    link: { uuid: string };\n    dispatch: Dispatch;\n    permissionService: PermissionService;\n}\n\nexport const deleteGroupMember = async ({ link, ...args }: DeleteGroupMemberArgs) => {\n    await deletePermission({\n        uuid: link.uuid,\n        ...args,\n    });\n};\n\ninterface DeletePermissionLinkArgs {\n    uuid: string;\n    dispatch: Dispatch;\n    permissionService: PermissionService;\n}\n\nexport const deletePermission = async ({ uuid, dispatch, permissionService }: DeletePermissionLinkArgs) => {\n    try {\n        const permissionsResponse = await permissionService.list({\n            filters: new FilterBuilder()\n                .addEqual('uuid', uuid)\n                .getFilters()\n        });\n        const [permission] = permissionsResponse.items;\n        if (permission) {\n            await permissionService.delete(permission.uuid);\n        } else {\n            throw new Error('Permission not found');\n        }\n    } catch (e) {\n        dispatch(snackbarActions.OPEN_SNACKBAR({\n            message: `Could not delete ${uuid} permission`,\n            kind: SnackbarKind.ERROR,\n        }));\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/store/groups-panel/groups-panel-middleware-service.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport Axios from \"axios\";\nimport { mockConfig } from \"common/config\";\nimport { createBrowserHistory } from \"history\";\nimport { dataExplorerActions } from \"store/data-explorer/data-explorer-action\";\nimport { GROUPS_PANEL_ID } from \"./groups-panel-actions\";\nimport { configureStore } from \"store/store\";\nimport { createServices } from \"services/services\";\nimport { getResource } from \"store/resources/resources\";\n\ndescribe(\"GroupsPanelMiddlewareService\", () => {\n    let axiosInst;\n    let store;\n    let services;\n    const config = {};\n    const actions = {\n        progressFn: (id, working) => { },\n        errorFn: (id, message) => { }\n    };\n\n    beforeEach(() => {\n        axiosInst = Axios.create({ headers: {} });\n        services = createServices(mockConfig({}), actions, axiosInst);\n        store = configureStore(createBrowserHistory(), services, config);\n    });\n\n    it(\"requests group member counts and updates resource store\", async () => {\n        // Given\n        const fakeUuid = \"zzzzz-j7d0g-000000000000000\";\n        axiosInst.get = cy.spy((url) => {\n            if (url === '/groups') {\n                return Promise.resolve(\n                    { data: {\n                        kind: \"\",\n                        offset: 0,\n                        limit: 100,\n                        items: [{\n                            can_manage: true,\n                            can_write: true,\n                            created_at: \"2023-11-15T20:57:01.723043000Z\",\n                            delete_at: null,\n                            description: null,\n                            etag: \"0000000000000000000000000\",\n                            frozen_by_uuid: null,\n                            group_class: \"role\",\n                            is_trashed: false,\n                            kind: \"arvados#group\",\n                            modified_at: \"2023-11-15T20:57:01.719986000Z\",\n                            modified_by_user_uuid: \"zzzzz-tpzed-000000000000000\",\n                            name: \"Test Group\",\n                            owner_uuid: \"zzzzz-tpzed-000000000000000\",\n                            properties: {},\n                            trash_at: null,\n                            uuid: fakeUuid,\n                            writable_by: [\n                                \"zzzzz-tpzed-000000000000000\",\n                            ]\n                        }],\n                        items_available: 1,\n                    }});\n            } else if (url === '/links') {\n                return Promise.resolve(\n                    { data: {\n                        items: [],\n                        items_available: 234,\n                        kind: \"arvados#linkList\",\n                        limit: 0,\n                        offset: 0\n                    }});\n            } else {\n                return Promise.resolve(\n                    { data: {}});\n            }\n        });\n\n        // When\n        await store.dispatch(dataExplorerActions.REQUEST_ITEMS({id: GROUPS_PANEL_ID}));\n        // Wait for async fetching of group count promises to resolve\n        await new Promise(resolve => setTimeout(resolve, 0));\n\n        // Expect\n        expect(axiosInst.get).to.be.calledThrice;\n        expect(axiosInst.get.getCall(0).args[0]).to.equal('/groups');\n        expect(axiosInst.get.getCall(0).args[1].params).to.deep.include({count: 'none'});\n        expect(axiosInst.get.getCall(1).args[0]).to.equal('/groups');\n        expect(axiosInst.get.getCall(1).args[1].params).to.deep.include({count: 'exact', limit: 0});\n        expect(axiosInst.get.getCall(2).args[0]).to.equal('/links');\n        const group = getResource(fakeUuid)(store.getState().resources);\n        expect(group?.memberCount).to.equal(234);\n    });\n\n    it('requests group member count and stores null on failure', async () => {\n        // Given\n        const fakeUuid = \"zzzzz-j7d0g-000000000000000\";\n        axiosInst.get = cy.spy((url) => {\n            if (url === '/groups') {\n                return Promise.resolve(\n                    { data: {\n                        kind: \"\",\n                        offset: 0,\n                        limit: 100,\n                        items: [{\n                            can_manage: true,\n                            can_write: true,\n                            created_at: \"2023-11-15T20:57:01.723043000Z\",\n                            delete_at: null,\n                            description: null,\n                            etag: \"0000000000000000000000000\",\n                            frozen_by_uuid: null,\n                            group_class: \"role\",\n                            is_trashed: false,\n                            kind: \"arvados#group\",\n                            modified_at: \"2023-11-15T20:57:01.719986000Z\",\n                            modified_by_user_uuid: \"zzzzz-tpzed-000000000000000\",\n                            name: \"Test Group\",\n                            owner_uuid: \"zzzzz-tpzed-000000000000000\",\n                            properties: {},\n                            trash_at: null,\n                            uuid: fakeUuid,\n                            writable_by: [\n                                \"zzzzz-tpzed-000000000000000\",\n                            ]\n                        }],\n                        items_available: 1,\n                    }});\n            } else if (url === '/links') {\n                return Promise.reject();\n            } else {\n                return Promise.resolve({ data: {}});\n            }\n        });\n\n        // When\n        await store.dispatch(dataExplorerActions.REQUEST_ITEMS({id: GROUPS_PANEL_ID}));\n        // Wait for async fetching of group count promises to resolve\n        await new Promise(resolve => setTimeout(resolve, 0));\n\n        // Expect\n        expect(axiosInst.get).to.be.calledThrice;\n        expect(axiosInst.get.getCall(0).args[0]).to.equal('/groups');\n        expect(axiosInst.get.getCall(0).args[1].params).to.deep.include({count: 'none'});\n        expect(axiosInst.get.getCall(1).args[0]).to.equal('/groups');\n        expect(axiosInst.get.getCall(1).args[1].params).to.deep.include({count: 'exact', limit: 0});\n        expect(axiosInst.get.getCall(2).args[0]).to.equal('/links');\n        const group = getResource(fakeUuid)(store.getState().resources);\n        expect(group?.memberCount).to.equal(null);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/groups-panel/groups-panel-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, MiddlewareAPI } from \"redux\";\nimport { DataExplorerMiddlewareService, listResultsToDataExplorerItemsMeta, dataExplorerToListParams } from \"store/data-explorer/data-explorer-middleware-service\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer, getSortColumn } from \"store/data-explorer/data-explorer-reducer\";\nimport { GroupsPanelActions } from 'store/groups-panel/groups-panel-actions';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { OrderBuilder, OrderDirection } from 'services/api/order-builder';\nimport { GroupResource, GroupClass } from 'models/group';\nimport { SortDirection } from 'components/data-table/data-column';\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { ListArguments, ListResults } from \"services/common-service/common-service\";\nimport { couldNotFetchItemsAvailable } from \"store/data-explorer/data-explorer-action\";\n\nexport class GroupsPanelMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    getOrder = (dataExplorer: DataExplorer) => {\n        const sortColumn = getSortColumn<GroupResource>(dataExplorer);\n        const order = new OrderBuilder<GroupResource>();\n        if (sortColumn && sortColumn.sort) {\n            const sortDirection = sortColumn.sort.direction === SortDirection.ASC ? OrderDirection.ASC : OrderDirection.DESC;\n\n            // Use createdAt as a secondary sort column so we break ties consistently.\n            return order\n                .addOrder(sortDirection, sortColumn.sort.field)\n                .addOrder(OrderDirection.DESC, \"createdAt\")\n                .getOrder();\n        } else {\n            return order.getOrder();\n        }\n    };\n\n    getFilters(dataExplorer: DataExplorer): string {\n        return new FilterBuilder()\n            .addEqual('group_class', GroupClass.ROLE)\n            .addILike('name', dataExplorer.searchValue)\n            .getFilters();\n    }\n\n    getParams(dataExplorer: DataExplorer): ListArguments {\n        return {\n            ...dataExplorerToListParams(dataExplorer),\n            filters: this.getFilters(dataExplorer),\n            order: this.getOrder(dataExplorer),\n            count: 'none',\n        };\n    }\n\n    getCountParams(dataExplorer: DataExplorer): ListArguments {\n        return {\n            filters: this.getFilters(dataExplorer),\n            limit: 0,\n            count: 'exact',\n        };\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const dataExplorer = getDataExplorer(api.getState().dataExplorer, this.getId());\n        if (!dataExplorer) {\n            api.dispatch(groupsPanelDataExplorerIsNotSet());\n        } else {\n            try {\n                if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n\n                // Get items\n                const groups = await this.services.groupsService.list(this.getParams(dataExplorer));\n                api.dispatch(updateResources(groups.items));\n                api.dispatch(GroupsPanelActions.SET_ITEMS({\n                    ...listResultsToDataExplorerItemsMeta(groups),\n                    items: groups.items.map(item => item.uuid),\n                }));\n\n                // Get group member counts\n                groups.items.map(group => (\n                    this.services.permissionService.list({\n                        limit: 0,\n                        filters: new FilterBuilder()\n                            .addEqual('head_uuid', group.uuid)\n                            .getFilters()\n                    }).then(members => {\n                        api.dispatch(updateResources([{\n                            ...group,\n                            memberCount: members.itemsAvailable,\n                        } as GroupResource]));\n                    }).catch(e => {\n                        // In case of error, store null to stop spinners and show failure icon\n                        api.dispatch(updateResources([{\n                            ...group,\n                            memberCount: null,\n                        } as GroupResource]));\n                    })\n                ));\n            } catch (e) {\n                api.dispatch(couldNotFetchGroupList());\n            } finally {\n                api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n            }\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n\n        if (criteriaChanged) {\n            // Get itemsAvailable\n            return this.services.groupsService.list(this.getCountParams(dataExplorer))\n                .then((results: ListResults<GroupResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(GroupsPanelActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nconst groupsPanelDataExplorerIsNotSet = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Groups panel is not ready.',\n        kind: SnackbarKind.ERROR\n    });\n\nconst couldNotFetchGroupList = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch groups.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/keep-services/keep-services-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { RootState } from 'store/store';\nimport { setBreadcrumbs } from 'store/breadcrumbs/breadcrumbs-actions';\nimport { ServiceRepository } from \"services/services\";\nimport { KeepServiceResource } from 'models/keep-services';\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport {snackbarActions, SnackbarKind} from 'store/snackbar/snackbar-actions';\nimport { navigateToRootProject } from 'store/navigation/navigation-action';\n\nexport const keepServicesActions = unionize({\n    SET_KEEP_SERVICES: ofType<KeepServiceResource[]>(),\n    REMOVE_KEEP_SERVICE: ofType<string>()\n});\n\nexport type KeepServicesActions = UnionOf<typeof keepServicesActions>;\n\nexport const KEEP_SERVICE_REMOVE_DIALOG = 'keepServiceRemoveDialog';\nexport const KEEP_SERVICE_ATTRIBUTES_DIALOG = 'keepServiceAttributesDialog';\n\nexport const loadKeepServicesPanel = () =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const user = getState().auth.user;\n        if(user && user.isAdmin) {\n            try {\n                dispatch(setBreadcrumbs([{ label: 'Keep Services' }]));\n                const response = await services.keepService.list();\n                dispatch(keepServicesActions.SET_KEEP_SERVICES(response.items));\n            } catch (e) {\n                return;\n            }\n        } else {\n            dispatch(navigateToRootProject);\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"You don't have permissions to view this page\", hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        }\n    };\n\nexport const openKeepServiceAttributesDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const keepService = getState().keepServices.find(it => it.uuid === uuid);\n        dispatch(dialogActions.OPEN_DIALOG({ id: KEEP_SERVICE_ATTRIBUTES_DIALOG, data: { keepService } }));\n    };\n\nexport const openKeepServiceRemoveDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: KEEP_SERVICE_REMOVE_DIALOG,\n            data: {\n                title: 'Remove keep service',\n                text: 'Are you sure you want to remove this keep service?',\n                confirmButtonLabel: 'Remove',\n                uuid\n            }\n        }));\n    };\n\nexport const removeKeepService = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removing ...', kind: SnackbarKind.INFO }));\n        try {\n            await services.keepService.delete(uuid);\n            dispatch(keepServicesActions.REMOVE_KEEP_SERVICE(uuid));\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Keep service has been successfully removed.', hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        } catch (e) {\n            return;\n        }\n    };"
  },
  {
    "path": "services/workbench2/src/store/keep-services/keep-services-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { keepServicesActions, KeepServicesActions } from 'store/keep-services/keep-services-actions';\nimport { KeepServiceResource } from 'models/keep-services';\n\nexport type KeepSericesState = KeepServiceResource[];\n\nconst initialState: KeepSericesState = [];\n\nexport const keepServicesReducer = (state: KeepSericesState = initialState, action: KeepServicesActions): KeepSericesState =>\n    keepServicesActions.match(action, {\n        SET_KEEP_SERVICES: items => items,\n        REMOVE_KEEP_SERVICE: (uuid: string) => state.filter((keepService) => keepService.uuid !== uuid),\n        default: () => state\n    });"
  },
  {
    "path": "services/workbench2/src/store/link-account-panel/link-account-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { getUserUuid } from \"common/getuser\";\nimport { ServiceRepository, createServices, setAuthorizationHeader } from \"services/services\";\nimport { setBreadcrumbs } from \"store/breadcrumbs/breadcrumbs-actions\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { LinkAccountType, AccountToLink, LinkAccountStatus } from \"models/link-account\";\nimport { authActions, getConfig } from \"store/auth/auth-action\";\nimport { unionize, ofType, UnionOf } from 'common/unionize';\nimport { UserResource } from \"models/user\";\nimport { GroupResource } from \"models/group\";\nimport { LinkAccountPanelError, OriginatingUser } from \"./link-account-panel-reducer\";\nimport { login, logout } from \"store/auth/auth-action\";\nimport { progressIndicatorActions, WORKBENCH_LOADING_SCREEN } from \"store/progress-indicator/progress-indicator-actions\";\n\nexport const linkAccountPanelActions = unionize({\n    LINK_INIT: ofType<{\n        targetUser: UserResource | undefined\n    }>(),\n    LINK_LOAD: ofType<{\n        originatingUser: OriginatingUser | undefined,\n        targetUser: UserResource | undefined,\n        targetUserToken: string | undefined,\n        userToLink: UserResource | undefined,\n        userToLinkToken: string | undefined\n    }>(),\n    LINK_INVALID: ofType<{\n        originatingUser: OriginatingUser | undefined,\n        targetUser: UserResource | undefined,\n        userToLink: UserResource | undefined,\n        error: LinkAccountPanelError\n    }>(),\n    SET_SELECTED_CLUSTER: ofType<{\n        selectedCluster: string\n    }>(),\n    SET_IS_PROCESSING: ofType<{\n        isProcessing: boolean\n    }>(),\n    HAS_SESSION_DATA: {}\n});\n\nexport type LinkAccountPanelAction = UnionOf<typeof linkAccountPanelActions>;\n\nfunction validateLink(userToLink: UserResource, targetUser: UserResource) {\n    if (userToLink.uuid === targetUser.uuid) {\n        return LinkAccountPanelError.SAME_USER;\n    }\n    else if (userToLink.isAdmin && !targetUser.isAdmin) {\n        return LinkAccountPanelError.NON_ADMIN;\n    }\n    else if (!targetUser.isActive) {\n        return LinkAccountPanelError.INACTIVE;\n    }\n    return LinkAccountPanelError.NONE;\n}\n\nconst newServices = (dispatch: Dispatch<any>, token: string) => {\n    const config = dispatch<any>(getConfig);\n    const svc = createServices(config, { progressFn: () => { }, errorFn: () => { } });\n    setAuthorizationHeader(svc, token);\n    return svc;\n};\n\nexport const checkForLinkStatus = () =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const status = services.linkAccountService.getLinkOpStatus();\n        if (status !== undefined) {\n            let msg: string;\n            let msgKind: SnackbarKind;\n            if (status.valueOf() === LinkAccountStatus.CANCELLED) {\n                msg = \"Account link cancelled!\";\n                msgKind = SnackbarKind.INFO;\n            }\n            else if (status.valueOf() === LinkAccountStatus.FAILED) {\n                msg = \"Account link failed!\";\n                msgKind = SnackbarKind.ERROR;\n            }\n            else if (status.valueOf() === LinkAccountStatus.SUCCESS) {\n                msg = \"Account link success!\";\n                msgKind = SnackbarKind.SUCCESS;\n            }\n            else {\n                msg = \"Unknown Error!\";\n                msgKind = SnackbarKind.ERROR;\n            }\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: msg, kind: msgKind, hideDuration: 3000 }));\n            services.linkAccountService.removeLinkOpStatus();\n        }\n    };\n\nexport const switchUser = (user: UserResource, token: string) =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(authActions.INIT_USER({ user, token }));\n    };\n\nexport const linkFailed = () =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        // If the link fails, switch to the user account that originated the link operation\n        const linkState = getState().linkAccountPanel;\n        if (linkState.userToLink && linkState.userToLinkToken && linkState.targetUser && linkState.targetUserToken) {\n            if (linkState.originatingUser === OriginatingUser.TARGET_USER) {\n                dispatch(switchUser(linkState.targetUser, linkState.targetUserToken));\n            }\n            else if ((linkState.originatingUser === OriginatingUser.USER_TO_LINK)) {\n                dispatch(switchUser(linkState.userToLink, linkState.userToLinkToken));\n            }\n        }\n        services.linkAccountService.removeAccountToLink();\n        services.linkAccountService.saveLinkOpStatus(LinkAccountStatus.FAILED);\n        window.location.reload();\n    };\n\nexport const loadLinkAccountPanel = () =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            // If there are remote hosts, set the initial selected cluster by getting the first cluster that isn't the local cluster\n            if (getState().linkAccountPanel.selectedCluster === undefined) {\n                const localCluster = getState().auth.localCluster;\n                let selectedCluster = localCluster;\n                for (const key in getState().auth.remoteHosts) {\n                    if (key !== localCluster) {\n                        selectedCluster = key;\n                        break;\n                    }\n                }\n                dispatch(linkAccountPanelActions.SET_SELECTED_CLUSTER({ selectedCluster }));\n            }\n\n            // First check if an account link operation has completed\n            dispatch(checkForLinkStatus());\n\n            // Continue loading the link account panel\n            dispatch(setBreadcrumbs([{ label: 'Link account' }]));\n            const curUser = getState().auth.user;\n            const curToken = getState().auth.apiToken;\n            if (curUser && curToken) {\n\n                // If there is link account session data, then the user has logged in a second time\n                const linkAccountData = services.linkAccountService.getAccountToLink();\n                if (linkAccountData) {\n\n                    dispatch(linkAccountPanelActions.SET_IS_PROCESSING({ isProcessing: true }));\n                    const curUserResource = await services.userService.get(curUser.uuid);\n\n                    // Use the token of the user we are getting data for. This avoids any admin/non-admin permissions\n                    // issues since a user will always be able to query the api server for their own user data.\n                    const svc = newServices(dispatch, linkAccountData.token);\n                    const savedUserResource = await svc.userService.get(linkAccountData.userUuid);\n\n                    let params: any;\n                    if (linkAccountData.type === LinkAccountType.ACCESS_OTHER_ACCOUNT || linkAccountData.type === LinkAccountType.ACCESS_OTHER_REMOTE_ACCOUNT) {\n                        params = {\n                            originatingUser: OriginatingUser.USER_TO_LINK,\n                            targetUser: curUserResource,\n                            targetUserToken: curToken,\n                            userToLink: savedUserResource,\n                            userToLinkToken: linkAccountData.token\n                        };\n                    }\n                    else if (linkAccountData.type === LinkAccountType.ADD_OTHER_LOGIN || linkAccountData.type === LinkAccountType.ADD_LOCAL_TO_REMOTE) {\n                        params = {\n                            originatingUser: OriginatingUser.TARGET_USER,\n                            targetUser: savedUserResource,\n                            targetUserToken: linkAccountData.token,\n                            userToLink: curUserResource,\n                            userToLinkToken: curToken\n                        };\n                    }\n                    else {\n                        throw new Error(\"Unknown link account type\");\n                    }\n\n                    dispatch(switchUser(params.targetUser, params.targetUserToken));\n                    const error = validateLink(params.userToLink, params.targetUser);\n                    if (error === LinkAccountPanelError.NONE) {\n                        dispatch(linkAccountPanelActions.LINK_LOAD(params));\n                    }\n                    else {\n                        dispatch(linkAccountPanelActions.LINK_INVALID({\n                            originatingUser: params.originatingUser,\n                            targetUser: params.targetUser,\n                            userToLink: params.userToLink,\n                            error\n                        }));\n                        return;\n                    }\n                }\n                else {\n                    // If there is no link account session data, set the state to invoke the initial UI\n                    const curUserResource = await services.userService.get(curUser.uuid);\n                    dispatch(linkAccountPanelActions.LINK_INIT({ targetUser: curUserResource }));\n                    return;\n                }\n            }\n        }\n        catch (e) {\n            dispatch(linkFailed());\n        }\n        finally {\n            dispatch(linkAccountPanelActions.SET_IS_PROCESSING({ isProcessing: false }));\n        }\n    };\n\nexport const startLinking = (t: LinkAccountType) =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        if (!userUuid) { return; }\n        const accountToLink = { type: t, userUuid, token: services.authService.getApiToken() } as AccountToLink;\n        services.linkAccountService.saveAccountToLink(accountToLink);\n\n        const auth = getState().auth;\n        const isLocalUser = auth.user!.uuid.substring(0, 5) === auth.localCluster;\n        let homeCluster = auth.localCluster;\n        if (isLocalUser && t === LinkAccountType.ACCESS_OTHER_REMOTE_ACCOUNT) {\n            homeCluster = getState().linkAccountPanel.selectedCluster!;\n        }\n\n        dispatch(logout());\n        dispatch(login(auth.localCluster, homeCluster, auth.loginCluster, auth.remoteHosts));\n    };\n\nexport const getAccountLinkData = () =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        return services.linkAccountService.getAccountToLink();\n    };\n\nexport const cancelLinking = (reload: boolean = false) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        let user: UserResource | undefined;\n        try {\n            // When linking is cancelled switch to the originating user (i.e. the user saved in session data)\n            dispatch(progressIndicatorActions.START_WORKING(WORKBENCH_LOADING_SCREEN));\n            const linkAccountData = services.linkAccountService.getAccountToLink();\n            if (linkAccountData) {\n                services.linkAccountService.removeAccountToLink();\n                const svc = newServices(dispatch, linkAccountData.token);\n                user = await svc.userService.get(linkAccountData.userUuid);\n                dispatch(switchUser(user, linkAccountData.token));\n                services.linkAccountService.saveLinkOpStatus(LinkAccountStatus.CANCELLED);\n            }\n        }\n        finally {\n            if (reload) {\n                window.location.reload();\n            }\n            else {\n                dispatch(progressIndicatorActions.STOP_WORKING(WORKBENCH_LOADING_SCREEN));\n            }\n        }\n    };\n\nexport const linkAccount = () =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const linkState = getState().linkAccountPanel;\n        if (linkState.userToLink && linkState.userToLinkToken && linkState.targetUser && linkState.targetUserToken) {\n\n            // First create a project owned by the target user\n            const projectName = `Migrated from ${linkState.userToLink.email} (${linkState.userToLink.uuid})`;\n            let newGroup: GroupResource;\n            try {\n                newGroup = await services.projectService.create({\n                    name: projectName,\n                    ensure_unique_name: true\n                });\n            }\n            catch (e) {\n                dispatch(linkFailed());\n                throw e;\n            }\n\n            try {\n                // The merge api links the user sending the request into the user\n                // specified in the request, so change the authorization header accordingly\n                const svc = newServices(dispatch, linkState.userToLinkToken);\n                await svc.linkAccountService.linkAccounts(linkState.targetUserToken, newGroup.uuid);\n                dispatch(switchUser(linkState.targetUser, linkState.targetUserToken));\n                services.linkAccountService.removeAccountToLink();\n                services.linkAccountService.saveLinkOpStatus(LinkAccountStatus.SUCCESS);\n                window.location.reload();\n            }\n            catch (e) {\n                // If the link operation fails, delete the previously made project\n                try {\n                    const svc = newServices(dispatch, linkState.targetUserToken);\n                    await svc.projectService.delete(newGroup.uuid);\n                }\n                finally {\n                    dispatch(linkFailed());\n                }\n                throw e;\n            }\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/link-account-panel/link-account-panel-reducer.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { linkAccountPanelReducer, LinkAccountPanelError, LinkAccountPanelStatus, OriginatingUser } from \"store/link-account-panel/link-account-panel-reducer\";\nimport { linkAccountPanelActions } from \"store/link-account-panel/link-account-panel-actions\";\n\ndescribe('link-account-panel-reducer', () => {\n    const initialState = undefined;\n\n    it('handles initial link account state', () => {\n        const targetUser = {};\n        targetUser.username = \"targetUser\";\n\n        const state = linkAccountPanelReducer(initialState, linkAccountPanelActions.LINK_INIT({targetUser}));\n        expect(state).to.deep.equal({\n            targetUser,\n            isProcessing: false,\n            selectedCluster: undefined,\n            targetUserToken: undefined,\n            userToLink: undefined,\n            userToLinkToken: undefined,\n            originatingUser: OriginatingUser.NONE,\n            error: LinkAccountPanelError.NONE,\n            status: LinkAccountPanelStatus.INITIAL\n        });\n    });\n\n    it('handles loaded link account state', () => {\n        const targetUser = {};\n        targetUser.username = \"targetUser\";\n        const targetUserToken = \"targettoken\";\n\n        const userToLink = {};\n        userToLink.username = \"userToLink\";\n        const userToLinkToken = \"usertoken\";\n\n        const originatingUser = OriginatingUser.TARGET_USER;\n\n        const state = linkAccountPanelReducer(initialState, linkAccountPanelActions.LINK_LOAD({\n            originatingUser, targetUser, targetUserToken, userToLink, userToLinkToken}));\n        expect(state).to.deep.equal({\n            targetUser,\n            targetUserToken,\n            isProcessing: false,\n            selectedCluster: undefined,\n            userToLink,\n            userToLinkToken,\n            originatingUser: OriginatingUser.TARGET_USER,\n            error: LinkAccountPanelError.NONE,\n            status: LinkAccountPanelStatus.LINKING\n        });\n    });\n\n    it('handles loaded invalid account state', () => {\n        const targetUser = {};\n        targetUser.username = \"targetUser\";\n\n        const userToLink = {};\n        userToLink.username = \"userToLink\";\n\n        const originatingUser = OriginatingUser.TARGET_USER;\n        const error = LinkAccountPanelError.NON_ADMIN;\n\n        const state = linkAccountPanelReducer(initialState, linkAccountPanelActions.LINK_INVALID({targetUser, userToLink, originatingUser, error}));\n        expect(state).to.deep.equal({\n            targetUser,\n            targetUserToken: undefined,\n            isProcessing: false,\n            selectedCluster: undefined,\n            userToLink,\n            userToLinkToken: undefined,\n            originatingUser: OriginatingUser.TARGET_USER,\n            error: LinkAccountPanelError.NON_ADMIN,\n            status: LinkAccountPanelStatus.ERROR\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/link-account-panel/link-account-panel-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { linkAccountPanelActions, LinkAccountPanelAction } from \"store/link-account-panel/link-account-panel-actions\";\nimport { UserResource } from \"models/user\";\n\nexport enum LinkAccountPanelStatus {\n    NONE,\n    INITIAL,\n    HAS_SESSION_DATA,\n    LINKING,\n    ERROR\n}\n\nexport enum LinkAccountPanelError {\n    NONE,\n    INACTIVE,\n    NON_ADMIN,\n    SAME_USER\n}\n\nexport enum OriginatingUser {\n    NONE,\n    TARGET_USER,\n    USER_TO_LINK\n}\n\nexport interface LinkAccountPanelState {\n    selectedCluster: string | undefined;\n    originatingUser: OriginatingUser | undefined;\n    targetUser: UserResource | undefined;\n    targetUserToken: string | undefined;\n    userToLink: UserResource | undefined;\n    userToLinkToken: string | undefined;\n    status: LinkAccountPanelStatus;\n    error: LinkAccountPanelError;\n    isProcessing: boolean;\n}\n\nconst initialState = {\n    selectedCluster: undefined,\n    originatingUser: OriginatingUser.NONE,\n    targetUser: undefined,\n    targetUserToken: undefined,\n    userToLink: undefined,\n    userToLinkToken: undefined,\n    isProcessing: false,\n    status: LinkAccountPanelStatus.NONE,\n    error: LinkAccountPanelError.NONE\n};\n\nexport const linkAccountPanelReducer = (state: LinkAccountPanelState = initialState, action: LinkAccountPanelAction) =>\n    linkAccountPanelActions.match(action, {\n        default: () => state,\n        LINK_INIT: ({ targetUser }) => ({\n            ...state,\n            targetUser, targetUserToken: undefined,\n            userToLink: undefined, userToLinkToken: undefined,\n            status: LinkAccountPanelStatus.INITIAL, error: LinkAccountPanelError.NONE, originatingUser: OriginatingUser.NONE\n        }),\n        LINK_LOAD: ({ originatingUser, userToLink, targetUser, targetUserToken, userToLinkToken}) => ({\n            ...state,\n            originatingUser,\n            targetUser, targetUserToken,\n            userToLink, userToLinkToken,\n            status: LinkAccountPanelStatus.LINKING, error: LinkAccountPanelError.NONE\n        }),\n        LINK_INVALID: ({ originatingUser, targetUser, userToLink, error }) => ({\n            ...state,\n            originatingUser,\n            targetUser, targetUserToken: undefined,\n            userToLink, userToLinkToken: undefined,\n            error, status: LinkAccountPanelStatus.ERROR\n        }),\n        SET_SELECTED_CLUSTER: ({ selectedCluster }) => ({\n            ...state, selectedCluster\n        }),\n        SET_IS_PROCESSING: ({ isProcessing }) =>({\n            ...state,\n            isProcessing\n        }),\n        HAS_SESSION_DATA: () => ({\n            ...state, status: LinkAccountPanelStatus.HAS_SESSION_DATA\n        })\n    });"
  },
  {
    "path": "services/workbench2/src/store/link-panel/link-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { bindDataExplorerActions } from 'store/data-explorer/data-explorer-action';\nimport { setBreadcrumbs } from 'store/breadcrumbs/breadcrumbs-actions';\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { LinkResource } from 'models/link';\nimport { getResource } from 'store/resources/resources';\nimport {snackbarActions, SnackbarKind} from 'store/snackbar/snackbar-actions';\n\nexport const LINK_PANEL_ID = \"linkPanelId\";\nexport const linkPanelActions = bindDataExplorerActions(LINK_PANEL_ID);\n\nexport const LINK_REMOVE_DIALOG = 'linkRemoveDialog';\nexport const LINK_ATTRIBUTES_DIALOG = 'linkAttributesDialog';\n\nexport const loadLinkPanel = () =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(setBreadcrumbs([{ label: 'Links' }]));\n        dispatch(linkPanelActions.REQUEST_ITEMS());\n    };\n\nexport const openLinkAttributesDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const { resources } = getState();\n        const link = getResource<LinkResource>(uuid)(resources);\n        dispatch(dialogActions.OPEN_DIALOG({ id: LINK_ATTRIBUTES_DIALOG, data: { link } }));\n    };\n\nexport const openLinkRemoveDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: LINK_REMOVE_DIALOG,\n            data: {\n                title: 'Remove link',\n                text: 'Are you sure you want to remove this link?',\n                confirmButtonLabel: 'Remove',\n                uuid\n            }\n        }));\n    };\n\nexport const removeLink = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removing ...', kind: SnackbarKind.INFO }));\n        try {\n            await services.linkService.delete(uuid);\n            dispatch(linkPanelActions.REQUEST_ITEMS());\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Link has been successfully removed.', hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        } catch (e) {\n            return;\n        }\n    };"
  },
  {
    "path": "services/workbench2/src/store/link-panel/link-panel-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ServiceRepository } from 'services/services';\nimport { MiddlewareAPI, Dispatch } from 'redux';\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, getOrder, listResultsToDataExplorerItemsMeta } from 'store/data-explorer/data-explorer-middleware-service';\nimport { RootState } from 'store/store';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { ListArguments, ListResults } from 'services/common-service/common-service';\nimport { LinkResource } from 'models/link';\nimport { linkPanelActions } from 'store/link-panel/link-panel-actions';\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { couldNotFetchItemsAvailable } from 'store/data-explorer/data-explorer-action';\n\nexport class LinkMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        try {\n            if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n            const response = await this.services.linkService.list(getParams(dataExplorer));\n            api.dispatch(updateResources(response.items));\n            api.dispatch(setItems(response));\n        } catch {\n            api.dispatch(couldNotFetchLinks());\n        } finally {\n            api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        if (criteriaChanged) {\n            // Get itemsAvailable\n            return this.services.linkService.list(getCountParams())\n                .then((results: ListResults<LinkResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(linkPanelActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nexport const getParams = (dataExplorer: DataExplorer): ListArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    order: getOrder<LinkResource>(dataExplorer),\n    count: 'none',\n});\n\nconst getCountParams = (): ListArguments => ({\n    limit: 0,\n    count: 'exact',\n});\n\nexport const setItems = (listResults: ListResults<LinkResource>) =>\n    linkPanelActions.SET_ITEMS({\n        ...listResultsToDataExplorerItemsMeta(listResults),\n        items: listResults.items.map(resource => resource.uuid),\n    });\n\nconst couldNotFetchLinks = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch links.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/move-to-dialog/move-to-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport interface MoveToFormDialogData {\n    name: string;\n    uuid: string;\n    ownerUuid: string;\n}\n"
  },
  {
    "path": "services/workbench2/src/store/multiselect/multiselect-actions.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { TCheckedList } from \"components/data-table/data-table\";\nimport { ContainerRequestResource } from \"models/container-request\";\nimport { Dispatch } from \"redux\";\nimport { navigateTo } from \"store/navigation/navigation-action\";\nimport { snackbarActions } from \"store/snackbar/snackbar-actions\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { getResource } from 'store/resources/resources';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\n\nexport const multiselectActionConstants = {\n    TOGGLE_VISIBLITY: \"TOGGLE_VISIBLITY\",\n    SET_CHECKEDLIST: \"SET_CHECKEDLIST\",\n    SELECT_ONE: 'SELECT_ONE',\n    DESELECT_ONE: \"DESELECT_ONE\",\n    DESELECT_ALL_OTHERS: 'DESELECT_ALL_OTHERS',\n    TOGGLE_ONE: 'TOGGLE_ONE',\n    SET_SELECTED_UUID: 'SET_SELECTED_UUID',\n    ADD_DISABLED: 'ADD_DISABLED',\n    REMOVE_DISABLED: 'REMOVE_DISABLED',\n};\n\nexport const msNavigateToOutput = (resource: ContextMenuResource | ContainerRequestResource) => async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n    try {\n        await services.collectionService.get(resource.outputUuid || '');\n        dispatch<any>(navigateTo(resource.outputUuid || ''));\n    } catch {\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Output collection was trashed or deleted.\", hideDuration: 4000, kind: SnackbarKind.WARNING }));\n    }\n};\n\nexport const isExactlyOneSelected = (checkedList: TCheckedList) => {\n    let tally = 0;\n    let current = '';\n    for (const uuid in checkedList) {\n        if (checkedList[uuid] === true) {\n            tally++;\n            current = uuid;\n        }\n    }\n    return tally === 1 ? current : null\n};\n\nexport const isMoreThanOneSelected = (checkedList: TCheckedList) => {\n    let tally = 0;\n    for (const uuid in checkedList) {\n        if (checkedList[uuid] === true) {\n            tally++;\n        }\n    }\n    return tally > 1 ? true : false\n};\n\nexport const toggleMSToolbar = (isVisible: boolean) => {\n    return dispatch => {\n        dispatch({ type: multiselectActionConstants.TOGGLE_VISIBLITY, payload: isVisible });\n    };\n};\n\nexport const setCheckedListOnStore = (checkedList: TCheckedList) => {\n    return dispatch => {\n        dispatch(setSelectedUuid(isExactlyOneSelected(checkedList)))\n        dispatch({ type: multiselectActionConstants.SET_CHECKEDLIST, payload: checkedList });\n    };\n};\n\nexport const selectOne = (uuid: string) => {\n    return dispatch => {\n        dispatch({ type: multiselectActionConstants.SELECT_ONE, payload: uuid });\n    };\n};\n\nexport const deselectOne = (uuid: string) => {\n    return dispatch => {\n        dispatch({ type: multiselectActionConstants.DESELECT_ONE, payload: uuid });\n    };\n};\n\nexport const deselectAllOthers = (uuid: string) => {\n    return dispatch => {\n        dispatch({ type: multiselectActionConstants.DESELECT_ALL_OTHERS, payload: uuid });\n    };\n};\n\nexport const toggleOne = (uuid: string) => {\n    return dispatch => {\n        dispatch({ type: multiselectActionConstants.TOGGLE_ONE, payload: uuid });\n    };\n};\n\nexport const setSelectedUuid = (uuid: string | null) => {\n    return dispatch => {\n        dispatch({ type: multiselectActionConstants.SET_SELECTED_UUID, payload: uuid });\n    };\n};\n\nexport const addDisabledButton = (buttonName: string) => {\n    return dispatch => {\n        dispatch({ type: multiselectActionConstants.ADD_DISABLED, payload: buttonName });\n    };\n};\n\nexport const removeDisabledButton = (buttonName: string) => {\n    return dispatch => {\n        dispatch({ type: multiselectActionConstants.REMOVE_DISABLED, payload: buttonName });\n    };\n};\n\nexport const getResourcesFromCheckedList = (state: RootState): GroupContentsResource[] => {\n    const checkedList = getCheckedListUuids(state);\n    const resources = checkedList\n        .map(uuid => getResource<GroupContentsResource>(uuid)(state.resources))\n        .filter((resource): resource is GroupContentsResource => !!resource);\n    return resources;\n};\n\nexport const getCheckedListUuids = (state: RootState): string[] => {\n    const checkedList = state.multiselect.checkedList;\n    return Object.keys(checkedList).filter(uuid => checkedList[uuid] === true);\n};\n"
  },
  {
    "path": "services/workbench2/src/store/multiselect/multiselect-reducer.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { multiselectActionConstants } from \"./multiselect-actions\";\nimport { TCheckedList } from \"components/data-table/data-table\";\n\nexport type MultiselectToolbarState = {\n    isVisible: boolean;\n    checkedList: TCheckedList;\n    disabledButtons: string[];\n};\n\nconst multiselectToolbarInitialState = {\n    isVisible: false,\n    checkedList: {},\n    disabledButtons: []\n};\n\nconst uncheckAllOthers = (inputList: TCheckedList, uuid: string) => {\n    const checkedlist = {...inputList}\n    for (const key in checkedlist) {\n        if (key !== uuid) checkedlist[key] = false;\n    }\n    return checkedlist;\n};\n\nconst toggleOneCheck = (inputList: TCheckedList, uuid: string)=>{\n    const checkedlist = { ...inputList };\n    return { ...inputList, [uuid]: !checkedlist[uuid]};\n}\n\nconst { TOGGLE_VISIBLITY, SET_CHECKEDLIST, SELECT_ONE, DESELECT_ONE, DESELECT_ALL_OTHERS, TOGGLE_ONE, ADD_DISABLED, REMOVE_DISABLED } = multiselectActionConstants;\n\nexport const multiselectReducer = (state: MultiselectToolbarState = multiselectToolbarInitialState, action) => {\n    switch (action.type) {\n        case TOGGLE_VISIBLITY:\n            return { ...state, isVisible: action.payload };\n        case SET_CHECKEDLIST:\n            return { ...state, checkedList: action.payload };\n        case SELECT_ONE:\n            return { ...state, checkedList: { ...state.checkedList, [action.payload]: true } };\n        case DESELECT_ONE:\n            return { ...state, checkedList: { ...state.checkedList, [action.payload]: false } };\n        case DESELECT_ALL_OTHERS:\n            return { ...state, checkedList: uncheckAllOthers(state.checkedList, action.payload) };\n        case TOGGLE_ONE:\n            return { ...state, checkedList: toggleOneCheck(state.checkedList, action.payload) };\n        case ADD_DISABLED:\n            return { ...state, disabledButtons: [...state.disabledButtons, action.payload]}\n        case REMOVE_DISABLED:\n            return { ...state, disabledButtons: state.disabledButtons.filter((button) => button !== action.payload) };\n        default:\n            return state;\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/store/navigation/navigation-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose, AnyAction } from \"redux\";\nimport { push } from \"connected-react-router\";\nimport { ResourceKind, extractUuidKind } from \"models/resource\";\nimport { SidePanelTreeCategory } from \"../side-panel-tree/side-panel-tree-actions\";\nimport { Routes, getGroupUrl, getNavUrl, getUserProfileUrl } from \"routes/routes\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { pluginConfig } from \"plugins\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { USERS_PANEL_LABEL, MY_ACCOUNT_PANEL_LABEL, INSTANCE_TYPES_PANEL_LABEL, VIRTUAL_MACHINES_ADMIN_PANEL_LABEL, REPOSITORIES_PANEL_LABEL, USER_PREFERENCES_LABEL } from \"store/breadcrumbs/breadcrumbs-actions\";\n\nexport const navigationNotAvailable = (id: string) =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: `${id} not available`,\n        hideDuration: 3000,\n        kind: SnackbarKind.ERROR,\n    });\n\nexport const navigateTo = (uuid: string) => async (dispatch: Dispatch, getState: () => RootState) => {\n    for (const navToFn of pluginConfig.navigateToHandlers) {\n        if (navToFn(dispatch, getState, uuid)) {\n            return;\n        }\n    }\n\n    const kind = extractUuidKind(uuid);\n    switch (kind) {\n        case ResourceKind.PROJECT:\n        case ResourceKind.USER:\n        case ResourceKind.COLLECTION:\n        case ResourceKind.CONTAINER_REQUEST:\n            dispatch<any>(pushOrGoto(getNavUrl(uuid, getState().auth)));\n            return;\n        case ResourceKind.VIRTUAL_MACHINE:\n            dispatch<any>(navigateToAdminVirtualMachines);\n            return;\n        case ResourceKind.WORKFLOW:\n            dispatch<any>(pushOrGoto(getNavUrl(uuid, getState().auth)));\n            return;\n    }\n\n    switch (uuid) {\n        case SidePanelTreeCategory.DASHBOARD:\n            dispatch<any>(navigateToDashboard);\n            return;\n        case SidePanelTreeCategory.PROJECTS:\n            const usr = getState().auth.user;\n            if (usr) {\n                dispatch<any>(pushOrGoto(getNavUrl(usr.uuid, getState().auth)));\n            }\n            return;\n        case SidePanelTreeCategory.FAVORITES:\n            dispatch<any>(navigateToFavorites);\n            return;\n        case SidePanelTreeCategory.PUBLIC_FAVORITES:\n            dispatch(navigateToPublicFavorites);\n            return;\n        case SidePanelTreeCategory.SHARED_WITH_ME:\n            dispatch(navigateToSharedWithMe);\n            return;\n        case SidePanelTreeCategory.TRASH:\n            dispatch(navigateToTrash);\n            return;\n        case SidePanelTreeCategory.GROUPS:\n            dispatch(navigateToGroups);\n            return;\n        case SidePanelTreeCategory.ALL_PROCESSES:\n            dispatch(navigateToAllProcesses);\n            return;\n        case SidePanelTreeCategory.SHELL_ACCESS:\n            dispatch(navigateToUserVirtualMachines)\n            return;\n        case USERS_PANEL_LABEL:\n            dispatch(navigateToUsers);\n            return;\n        case MY_ACCOUNT_PANEL_LABEL:\n            dispatch(navigateToMyAccount);\n            return;\n        case USER_PREFERENCES_LABEL:\n            dispatch(navigateToMyPreferences);\n            return;\n        case INSTANCE_TYPES_PANEL_LABEL:\n            dispatch(navigateToInstanceTypes);\n            return;\n        case VIRTUAL_MACHINES_ADMIN_PANEL_LABEL:\n            dispatch(navigateToAdminVirtualMachines);\n            return;\n        case REPOSITORIES_PANEL_LABEL:\n            dispatch(navigateToRepositories);\n            return;\n        case SidePanelTreeCategory.EXTERNAL_CREDENTIALS:\n            dispatch(navigateToExternalCredentials);\n            return;\n    }\n\n    dispatch(navigationNotAvailable(uuid));\n};\n\nexport const navigateToNotFound = push(Routes.NO_MATCH);\n\nexport const navigateToRoot = push(Routes.ROOT);\n\nexport const navigateToFavorites = push(Routes.FAVORITES);\n\nexport const navigateToTrash = push(Routes.TRASH);\n\nexport const navigateToPublicFavorites = push(Routes.PUBLIC_FAVORITES);\n\nexport const navigateToWorkflows = push(Routes.WORKFLOWS);\n\nexport const pushOrGoto = (url: string): AnyAction => {\n    if (url === \"\") {\n        return { type: \"noop\" };\n    } else if (url[0] === \"/\") {\n        return push(url);\n    } else {\n        window.location.href = url;\n        return { type: \"noop\" };\n    }\n};\n\nexport const navigateToRootProject = (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    navigateTo(SidePanelTreeCategory.PROJECTS)(dispatch, getState);\n};\n\nexport const navigateToSharedWithMe = push(Routes.SHARED_WITH_ME);\n\nexport const navigateToRunProcess = push(Routes.RUN_PROCESS);\n\nexport const navigateToSearchResults = (searchValue: string) => {\n    if (searchValue !== \"\") {\n        return push({ pathname: Routes.SEARCH_RESULTS, search: \"?q=\" + encodeURIComponent(searchValue) });\n    } else {\n        return push({ pathname: Routes.SEARCH_RESULTS });\n    }\n};\n\nexport const navigateToUserVirtualMachines = push(Routes.VIRTUAL_MACHINES_USER);\n\nexport const navigateToAdminVirtualMachines = push(Routes.VIRTUAL_MACHINES_ADMIN);\n\nexport const navigateToRepositories = push(Routes.REPOSITORIES);\n\nexport const navigateToSshKeysAdmin = push(Routes.SSH_KEYS_ADMIN);\n\nexport const navigateToSshKeysUser = push(Routes.SSH_KEYS_USER);\n\nexport const navigateToInstanceTypes = push(Routes.INSTANCE_TYPES);\n\nexport const navigateToSiteManager = push(Routes.SITE_MANAGER);\n\nexport const navigateToMyAccount = push(Routes.MY_ACCOUNT);\n\nexport const navigateToMyPreferences = push(Routes.MY_PREFERENCES);\n\nexport const navigateToLinkAccount = push(Routes.LINK_ACCOUNT);\n\nexport const navigateToKeepServices = push(Routes.KEEP_SERVICES);\n\nexport const navigateToUsers = push(Routes.USERS);\n\nexport const navigateToUserProfile = compose(push, getUserProfileUrl);\n\nexport const navigateToApiClientAuthorizations = push(Routes.API_CLIENT_AUTHORIZATIONS);\n\nexport const navigateToGroups = push(Routes.GROUPS);\n\nexport const navigateToGroupDetails = compose(push, getGroupUrl);\n\nexport const navigateToLinks = push(Routes.LINKS);\n\nexport const navigateToCollectionsContentAddress = push(Routes.COLLECTIONS_CONTENT_ADDRESS);\n\nexport const navigateToAllProcesses = push(Routes.ALL_PROCESSES);\n\nexport const navigateToExternalCredentials = push(Routes.EXTERNAL_CREDENTIALS);\n\nexport const navigateToDashboard = push(Routes.DASHBOARD);\n"
  },
  {
    "path": "services/workbench2/src/store/not-found-panel/not-found-panel-action.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { dialogActions } from 'store/dialog/dialog-actions';\n\nexport const NOT_FOUND_DIALOG_NAME = 'notFoundDialog';\n\nexport const openNotFoundDialog = () =>\n    (dispatch: Dispatch) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: NOT_FOUND_DIALOG_NAME,\n            data: {},\n        }));\n    };"
  },
  {
    "path": "services/workbench2/src/store/open-in-new-tab/open-in-new-tab.actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport copy from \"copy-to-clipboard\";\nimport { Dispatch } from \"redux\";\nimport { getNavUrl } from \"routes/routes\";\nimport { RootState } from \"store/store\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\n\nexport const openInNewTabAction = (resource: any) => (dispatch: Dispatch, getState: () => RootState) => {\n    const url = getNavUrl(resource.uuid, getState().auth);\n\n    if (url[0] === \"/\") {\n        window.open(`${window.location.origin}${url}`, \"_blank\", \"noopener\");\n    } else if (url.length) {\n        window.open(url, \"_blank\", \"noopener\");\n    }\n};\n\nexport const copyToClipboardAction = (resources: Array<any>) => (dispatch: Dispatch, getState: () => RootState) => {\n    // Copy link to clipboard omits token to avoid accidental sharing\n\n    let url = getNavUrl(resources[0].uuid, getState().auth, false);\n    let wasCopied;\n\n    if (url[0] === \"/\") wasCopied = copy(`${window.location.origin}${url}`);\n    else if (url.length) {\n        wasCopied = copy(url);\n    }\n\n    if (wasCopied)\n        dispatch(\n            snackbarActions.OPEN_SNACKBAR({\n                message: \"Copied\",\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS,\n            })\n        );\n};\n\nexport const copyStringToClipboardAction = (string: string) => (dispatch: Dispatch, getState: () => RootState) => {\n    let wasCopied;\n\n    if (string.length) {\n        wasCopied = copy(string);\n    }\n\n    if (wasCopied){\n        dispatch(\n            snackbarActions.OPEN_SNACKBAR({\n                message: \"Copied\",\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS,\n            })\n        );\n    } else {\n        dispatch(\n            snackbarActions.OPEN_SNACKBAR({\n                message: \"Failed to copy\",\n                hideDuration: 2000,\n                kind: SnackbarKind.ERROR,\n            })\n        );\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/store/owner-name/owner-name-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from 'common/unionize';\n\nexport const ownerNameActions = unionize({\n    SET_OWNER_NAME: ofType<OwnerNameState>()\n});\n\ninterface OwnerNameState {\n    name: string;\n    uuid: string;\n}\n\nexport type OwnerNameAction = UnionOf<typeof ownerNameActions>;\n"
  },
  {
    "path": "services/workbench2/src/store/owner-name/owner-name-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ownerNameActions, OwnerNameAction } from './owner-name-actions';\n\nexport const ownerNameReducer = (state = [], action: OwnerNameAction) =>\n    ownerNameActions.match(action, {\n        SET_OWNER_NAME: data => [...state, { uuid: data.uuid, name: data.name }],\n        default: () => state,\n    });"
  },
  {
    "path": "services/workbench2/src/store/process-logs-panel/process-logs-panel-actions.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    pollProcessLogs,\n    processLogsPanelActions,\n} from \"./process-logs-panel-actions\";\nimport { ContainerRequestState } from \"models/container-request\";\nimport { ContainerState } from \"models/container\";\n\ndescribe(\"pollProcessLogs\", () => {\n    const processUuid = \"xxxxx-xvhdp-000000000000000\";\n    const containerUuid = \"xxxxx-dz642-000000000000000\";\n    const dummyProcess = {\n        containerRequest: {\n            uuid: processUuid,\n            logUuid: \"xxxxx-4zz18-000000000000000\",\n            state: ContainerRequestState.COMMITTED,\n            containerUuid: containerUuid,\n        },\n        container: {\n            uuid: containerUuid,\n            state: ContainerState.QUEUED,\n        },\n    };\n\n    let dispatch;\n    let getState;\n    let logServiceStub;\n\n    // Fake Container Request Service that returns dummy scheduling statuses\n    const createCrServiceStub = (schedulingStatus) => ({\n        containerStatus: cy\n            .stub()\n            .resolves({ schedulingStatus }),\n    });\n\n\n    beforeEach(() => {\n        dispatch = cy.stub();\n\n        getState = () => ({\n            resources: {\n                [processUuid]: dummyProcess.containerRequest,\n                [containerUuid]: dummyProcess.container,\n            },\n            processLogsPanel: { logs: {}, filters: [] },\n        });\n\n        // Log service stubs\n        logServiceStub = {\n            listLogFiles: cy.stub().resolves([]),\n            getLogFileContents: cy.stub().resolves([]),\n        };\n\n    });\n\n    it(\"Dispatches ADD_PROCESS_LOGS_PANEL_ITEM for valid SCHEDULING logs\", async () => {\n        const schedulingStatus = \"some status\";\n\n        const thunk = pollProcessLogs(processUuid);\n\n        // Execute the thunk\n        const res = await thunk(dispatch, getState, {\n            logService: logServiceStub,\n            containerRequestService: createCrServiceStub(schedulingStatus),\n        });\n\n        // We expect add process logs panel to be dispatched with the sceduling log in relevant sections\n        expect(dispatch).to.have.been.calledWithMatch(processLogsPanelActions.ADD_PROCESS_LOGS_PANEL_ITEM({\n            \"Main logs\": {lastByte: undefined, contents: [ Cypress.sinon.match.string ]},\n            \"All logs\": {lastByte: undefined, contents: [ Cypress.sinon.match.string ]},\n            \"scheduling\": {lastByte: undefined, contents: [ Cypress.sinon.match.string ]},\n        }));\n\n        // Expect no calls\n        expect(dispatch).to.have.callCount(1);\n    });\n\n    it(\"Does not dispatch ADD_PROCESS_LOGS_PANEL_ITEM for whitespace-only SCHEDULING logs\", async () => {\n        // Tests tab, CR, LF, nbsp\n        const schedulingStatus = \"\\t \\r \\n \\xa0\";\n\n        const thunk = pollProcessLogs(processUuid);\n\n        // Execute the thunk\n        await thunk(dispatch, getState, {\n            logService: logServiceStub,\n            containerRequestService: createCrServiceStub(schedulingStatus),\n        });\n\n        // Since the scheduling status was only whitespace, no logs should be added\n        // pollProcessLogs only dispatches ADD_PROCESS_LOGS_PANEL_ITEM when logFragments.length > 0\n        expect(dispatch).to.not.have.been.called;\n\n        // Expect no calls\n        expect(dispatch).to.have.callCount(0);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/process-logs-panel/process-logs-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { ProcessLogs } from './process-logs-panel';\nimport { LogEventType } from 'models/log';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { Dispatch } from 'redux';\nimport { LogFragment, LogService, logFileToLogType } from 'services/log-service/log-service';\nimport { Process, getProcess } from 'store/processes/process';\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { CollectionFile, CollectionFileType } from \"models/collection-file\";\nimport { ContainerRequestResource, ContainerRequestState, ContainerStatus } from \"models/container-request\";\nimport { ContainerState } from \"models/container\";\n\nconst SNIPLINE = `================ ✀ ================ ✀ ========= Some log(s) were skipped ========= ✀ ================ ✀ ================`;\nconst LOG_TIMESTAMP_PATTERN = /^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\\.[0-9]{0,}Z/;\n\nexport const processLogsPanelActions = unionize({\n    RESET_PROCESS_LOGS_PANEL: ofType<{}>(),\n    INIT_PROCESS_LOGS_PANEL: ofType<{ filters: string[], logs: ProcessLogs }>(),\n    SET_PROCESS_LOGS_PANEL_FILTER: ofType<string>(),\n    ADD_PROCESS_LOGS_PANEL_ITEM: ofType<ProcessLogs>(),\n});\n\n// Max size of logs to fetch in bytes\nconst maxLogFetchSize: number = 128 * 1000;\n\ntype FileWithProgress = {\n    file: CollectionFile;\n    lastByte: number;\n}\n\ntype SortableLine = {\n    logType: LogEventType,\n    timestamp: string;\n    contents: string;\n}\n\nexport type ProcessLogsPanelAction = UnionOf<typeof processLogsPanelActions>;\n\nexport const setProcessLogsPanelFilter = (filter: string) =>\n    processLogsPanelActions.SET_PROCESS_LOGS_PANEL_FILTER(filter);\n\nexport const initProcessLogsPanel = (processUuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, { logService }: ServiceRepository) => {\n        let process: Process | undefined;\n        try {\n            dispatch(processLogsPanelActions.RESET_PROCESS_LOGS_PANEL());\n            process = getProcess(processUuid)(getState().resources);\n            if (process?.containerRequest?.uuid) {\n                // Get log file size info\n                const logFiles = await loadContainerLogFileList(process.containerRequest, logService);\n\n                // Populate lastbyte 0 for each file\n                const filesWithProgress = logFiles.map((file) => ({ file, lastByte: 0 }));\n\n                // Fetch array of LogFragments\n                const logLines = await loadContainerLogFileContents(filesWithProgress, logService, process);\n\n                // Populate initial state with filters\n                const initialState = createInitialLogPanelState(logFiles, logLines);\n                dispatch(processLogsPanelActions.INIT_PROCESS_LOGS_PANEL(initialState));\n            }\n        } catch (e) {\n            // On error, populate empty state to allow polling to start\n            const initialState = createInitialLogPanelState([], []);\n            dispatch(processLogsPanelActions.INIT_PROCESS_LOGS_PANEL(initialState));\n            // Only show toast on errors other than 404 since 404 is expected when logs do not exist yet\n            if (e.status !== 404) {\n                dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Error loading process logs', hideDuration: 4000, kind: SnackbarKind.ERROR }));\n            }\n            if (e.status === 404 && process?.containerRequest.state === ContainerRequestState.FINAL) {\n                dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Log collection was trashed or deleted.', hideDuration: 4000, kind: SnackbarKind.WARNING }));\n            }\n        }\n    };\n\nexport const pollProcessLogs = (processUuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, { logService, containerRequestService }: ServiceRepository) => {\n        try {\n            // Get log panel state and process from store\n            const currentState = getState().processLogsPanel;\n            const process = getProcess(processUuid)(getState().resources);\n\n            // Check if container request is present and initial logs state loaded\n            if (process?.containerRequest?.uuid) {\n\n                // Perform range request(s) for each file\n                let logFiles: CollectionFile[] = [];\n                let logFragments: LogFragment[] = [];\n\n                if (process.containerRequest.logUuid && Object.keys(currentState.logs).length > 0) {\n                    logFiles = await loadContainerLogFileList(process.containerRequest, logService);\n\n                    // Determine byte to fetch from while filtering unchanged files\n                    const filesToUpdateWithProgress = logFiles.reduce((acc, updatedFile) => {\n                        // Fetch last byte or 0 for new log files\n                        const currentStateLogLastByte = currentState.logs[logFileToLogType(updatedFile)]?.lastByte || 0;\n\n                        const isNew = !Object.keys(currentState.logs).find((currentStateLogName) => (updatedFile.name.startsWith(currentStateLogName)));\n                        const isChanged = !isNew && currentStateLogLastByte < updatedFile.size;\n\n                        if (isNew || isChanged) {\n                            return acc.concat({ file: updatedFile, lastByte: currentStateLogLastByte });\n                        } else {\n                            return acc;\n                        }\n                    }, [] as FileWithProgress[]);\n\n                    logFragments = await loadContainerLogFileContents(filesToUpdateWithProgress, logService, process);\n                }\n\n                if (process?.container?.state === ContainerState.QUEUED || process?.container?.state === ContainerState.LOCKED) {\n                    const containerStatus: ContainerStatus = await containerRequestService.containerStatus(process?.containerRequest?.uuid, false);\n\n                    if (containerStatus.schedulingStatus.trim().length) {\n                        // If schedulingStatus is not just whitespace, add it to the log view\n                        logFragments.push({\n                            logType: LogEventType.SCHEDULING,\n                            contents: [`${new Date().toISOString()} ${containerStatus.schedulingStatus}`],\n                        });\n                    }\n                }\n\n                if (logFragments.length) {\n                    // Convert LogFragments to ProcessLogs with All/Main sorting & line-merging\n                    const groupedLogs = groupLogs(logFiles, logFragments);\n                    await dispatch(processLogsPanelActions.ADD_PROCESS_LOGS_PANEL_ITEM(groupedLogs));\n                }\n            }\n            return Promise.resolve();\n        } catch (e) {\n            // Remove log when polling error is handled in some way instead of being ignored\n            console.error(\"Error occurred in pollProcessLogs:\", e);\n            return Promise.reject();\n        }\n    };\n\nconst loadContainerLogFileList = async (containerRequest: ContainerRequestResource, logService: LogService) => {\n    const logCollectionContents = await logService.listLogFiles(containerRequest);\n\n    // Filter only root directory files matching log event types which have bytes\n    return logCollectionContents.filter((file): file is CollectionFile => (\n        file.type === CollectionFileType.FILE &&\n        PROCESS_PANEL_LOG_EVENT_TYPES.indexOf(logFileToLogType(file)) > -1 &&\n        file.size > 0\n    ));\n};\n\n/**\n * Loads the contents of each file from each file's lastByte simultaneously\n *   while respecting the maxLogFetchSize by requesting the start and end\n *   of the desired block and inserting a snipline.\n * @param logFilesWithProgress CollectionFiles with the last byte previously loaded\n * @param logService\n * @param process\n * @returns LogFragment[] containing a single LogFragment corresponding to each input file\n */\nconst loadContainerLogFileContents = async (logFilesWithProgress: FileWithProgress[], logService: LogService, process: Process) => (\n    (await Promise.allSettled(logFilesWithProgress.filter(({ file }) => file.size > 0).map(({ file, lastByte }) => {\n        const requestSize = file.size - lastByte;\n        if (requestSize > maxLogFetchSize) {\n            const chunkSize = Math.floor(maxLogFetchSize / 2);\n            const firstChunkEnd = lastByte + chunkSize - 1;\n            return Promise.all([\n                logService.getLogFileContents(process.containerRequest, file, lastByte, firstChunkEnd),\n                logService.getLogFileContents(process.containerRequest, file, file.size - chunkSize, file.size - 1)\n            ] as Promise<(LogFragment)>[]);\n        } else {\n            return Promise.all([logService.getLogFileContents(process.containerRequest, file, lastByte, file.size - 1)]);\n        }\n    })).then((res) => {\n        if (res.length && res.every(promiseResult => (promiseResult.status === 'rejected'))) {\n            // Since allSettled does not pass promise rejection we throw an\n            //   error if every request failed\n            const error = res.find(\n                (promiseResult): promiseResult is PromiseRejectedResult => promiseResult.status === 'rejected'\n            )?.reason;\n            return Promise.reject(error);\n        }\n        return res.filter((promiseResult): promiseResult is PromiseFulfilledResult<LogFragment[]> => (\n            // Filter out log files with rejected promises\n            //   (Promise.all rejects on any failure)\n            promiseResult.status === 'fulfilled' &&\n            // Filter out files where any fragment is empty\n            //   (prevent incorrect snipline generation or an un-resumable situation)\n            !!promiseResult.value.every(logFragment => logFragment.contents.length)\n        )).map(one => one.value)\n    })).map((logResponseSet) => {\n        // For any multi fragment response set, modify the last line of non-final chunks to include a line break and snip line\n        //   Don't add snip line as a separate line so that sorting won't reorder it\n        for (let i = 1; i < logResponseSet.length; i++) {\n            const fragment = logResponseSet[i - 1];\n            const lastLineIndex = fragment.contents.length - 1;\n            const lastLineContents = fragment.contents[lastLineIndex];\n            const newLastLine = `${lastLineContents}\\n${SNIPLINE}`;\n\n            logResponseSet[i - 1].contents[lastLineIndex] = newLastLine;\n        }\n\n        // Merge LogFragment Array (representing multiple log line arrays) into single LogLine[] / LogFragment\n        return logResponseSet.reduce((acc, curr: LogFragment) => ({\n            logType: curr.logType,\n            contents: [...(acc.contents || []), ...curr.contents]\n        }), {} as LogFragment);\n    })\n);\n\nconst createInitialLogPanelState = (logFiles: CollectionFile[], logFragments: LogFragment[]): { filters: string[], logs: ProcessLogs } => {\n    const logs = groupLogs(logFiles, logFragments);\n    const filters = Object.keys(logs);\n    return { filters, logs };\n}\n\n/**\n * Converts LogFragments into ProcessLogs, grouping and sorting All/Main logs\n * @param logFiles\n * @param logFragments\n * @returns ProcessLogs for the store\n */\nconst groupLogs = (logFiles: CollectionFile[], logFragments: LogFragment[]): ProcessLogs => {\n    const sortableLogFragments = mergeMultilineLoglines(logFragments);\n\n    const allLogs = mergeSortLogFragments(sortableLogFragments);\n    const mainLogs = mergeSortLogFragments(sortableLogFragments.filter((fragment) => (MAIN_EVENT_TYPES.includes(fragment.logType))));\n\n    const groupedLogs = logFragments.reduce((grouped, fragment) => ({\n        ...grouped,\n        [fragment.logType as string]: { lastByte: fetchLastByteNumber(logFiles, fragment.logType), contents: fragment.contents }\n    }), {});\n\n    return {\n        [MAIN_FILTER_TYPE]: { lastByte: undefined, contents: mainLogs },\n        [ALL_FILTER_TYPE]: { lastByte: undefined, contents: allLogs },\n        ...groupedLogs,\n    }\n};\n\n/**\n * Checks for non-timestamped log lines and merges them with the previous line, assumes they are multi-line logs\n *   If there is no previous line (first line has no timestamp), the line is deleted.\n *   Only used for combined logs that need sorting by timestamp after merging\n * @param logFragments\n * @returns Modified LogFragment[]\n */\nconst mergeMultilineLoglines = (logFragments: LogFragment[]) => (\n    logFragments.map((fragment) => {\n        // Avoid altering the original fragment copy\n        let fragmentCopy: LogFragment = {\n            logType: fragment.logType,\n            contents: [...fragment.contents],\n        }\n        // Merge any non-timestamped lines in sortable log types with previous line\n        if (fragmentCopy.contents.length && !NON_SORTED_LOG_TYPES.includes(fragmentCopy.logType)) {\n            for (let i = 0; i < fragmentCopy.contents.length; i++) {\n                const lineContents = fragmentCopy.contents[i];\n                if (!lineContents.match(LOG_TIMESTAMP_PATTERN)) {\n                    // Partial line without timestamp detected\n                    if (i > 0) {\n                        // If not first line, copy line to previous line\n                        const previousLineContents = fragmentCopy.contents[i - 1];\n                        const newPreviousLineContents = `${previousLineContents}\\n${lineContents}`;\n                        fragmentCopy.contents[i - 1] = newPreviousLineContents;\n                    }\n                    // Delete the current line and prevent iterating\n                    fragmentCopy.contents.splice(i, 1);\n                    i--;\n                }\n            }\n        }\n        return fragmentCopy;\n    })\n);\n\n/**\n * Merges log lines of different types and sorts types that contain timestamps (are sortable)\n * @param logFragments\n * @returns string[] of merged and sorted log lines\n */\nconst mergeSortLogFragments = (logFragments: LogFragment[]): string[] => {\n    const sortableFragments = logFragments\n        .filter((fragment) => (!NON_SORTED_LOG_TYPES.includes(fragment.logType)));\n\n    const nonSortableLines = fragmentsToLines(logFragments\n        .filter((fragment) => (NON_SORTED_LOG_TYPES.includes(fragment.logType)))\n        .sort((a, b) => (a.logType.localeCompare(b.logType))));\n\n    return [...nonSortableLines, ...sortLogFragments(sortableFragments)];\n};\n\n/**\n * Performs merge and sort of input log fragment lines\n * @param logFragments set of sortable log fragments to be merged and sorted\n * @returns A string array containing all lines, sorted by timestamp and\n *          preserving line ordering and type grouping when timestamps match\n */\nconst sortLogFragments = (logFragments: LogFragment[]): string[] => {\n    const linesWithType: SortableLine[] = logFragments\n        // Map each logFragment into an array of SortableLine\n        .map((fragment: LogFragment): SortableLine[] => (\n            fragment.contents.map((singleLine: string) => {\n                const timestampMatch = singleLine.match(LOG_TIMESTAMP_PATTERN);\n                const timestamp = timestampMatch && timestampMatch[0] ? timestampMatch[0] : \"\";\n                return {\n                    logType: fragment.logType,\n                    timestamp: timestamp,\n                    contents: singleLine,\n                };\n            })\n        // Merge each array of SortableLine into single array\n        )).reduce((acc: SortableLine[], lines: SortableLine[]) => (\n            [...acc, ...lines]\n        ), [] as SortableLine[]);\n\n    return linesWithType\n        .sort(sortableLineSortFunc)\n        .map(lineWithType => lineWithType.contents);\n};\n\n/**\n * Sort func to sort lines\n *   Preserves original ordering of lines from the same source\n *   Stably orders lines of differing type but same timestamp\n *     (produces a block of same-timestamped lines of one type before a block\n *     of same timestamped lines of another type for readability)\n *   Sorts all other lines by contents (ie by timestamp)\n */\nconst sortableLineSortFunc = (a: SortableLine, b: SortableLine) => {\n    if (a.logType === b.logType) {\n        return 0;\n    } else if (a.timestamp === b.timestamp) {\n        return a.logType.localeCompare(b.logType);\n    } else {\n        return a.contents.localeCompare(b.contents);\n    }\n};\n\nconst fragmentsToLines = (fragments: LogFragment[]): string[] => (\n    fragments.reduce((acc, fragment: LogFragment) => (\n        acc.concat(...fragment.contents)\n    ), [] as string[])\n);\n\nconst fetchLastByteNumber = (logFiles: CollectionFile[], key: string) => {\n    return logFiles.find((file) => (file.name.startsWith(key)))?.size\n};\n\nexport const navigateToLogCollection = (uuid: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            await services.collectionService.get(uuid);\n            dispatch<any>(navigateTo(uuid));\n        } catch {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Log collection was trashed or deleted.', hideDuration: 4000, kind: SnackbarKind.WARNING }));\n        }\n    };\n\nconst ALL_FILTER_TYPE = 'All logs';\n\nconst MAIN_FILTER_TYPE = 'Main logs';\nconst MAIN_EVENT_TYPES = [\n    LogEventType.CRUNCH_RUN,\n    LogEventType.STDERR,\n    LogEventType.STDOUT,\n    LogEventType.SCHEDULING,\n];\n\nconst PROCESS_PANEL_LOG_EVENT_TYPES = [\n    LogEventType.ARV_MOUNT,\n    LogEventType.CRUNCH_RUN,\n    LogEventType.CRUNCHSTAT,\n    LogEventType.DISPATCH,\n    LogEventType.HOSTSTAT,\n    LogEventType.NODE_INFO,\n    LogEventType.STDERR,\n    LogEventType.STDOUT,\n    LogEventType.CONTAINER,\n    LogEventType.KEEPSTORE,\n    LogEventType.SCHEDULING\n];\n\nconst NON_SORTED_LOG_TYPES = [\n    LogEventType.NODE_INFO,\n    LogEventType.CONTAINER,\n];\n"
  },
  {
    "path": "services/workbench2/src/store/process-logs-panel/process-logs-panel-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ProcessLogs, ProcessLogsPanel } from './process-logs-panel';\nimport { ProcessLogsPanelAction, processLogsPanelActions } from './process-logs-panel-actions';\n\nconst initialState: ProcessLogsPanel = {\n    filters: [],\n    selectedFilter: '',\n    logs: {},\n};\n\nexport const processLogsPanelReducer = (state = initialState, action: ProcessLogsPanelAction): ProcessLogsPanel =>\n    processLogsPanelActions.match(action, {\n        RESET_PROCESS_LOGS_PANEL: () => initialState,\n        INIT_PROCESS_LOGS_PANEL: ({ filters, logs }) => ({\n            filters,\n            logs,\n            selectedFilter: filters[0] || '',\n        }),\n        SET_PROCESS_LOGS_PANEL_FILTER: selectedFilter => ({\n            ...state,\n            selectedFilter\n        }),\n        ADD_PROCESS_LOGS_PANEL_ITEM: (groupedLogs: ProcessLogs) => {\n            // Update filters\n            const newFilters = Object.keys(groupedLogs).filter((logType) => (!state.filters.includes(logType)));\n            const filters = [...state.filters, ...newFilters];\n\n            // Append new log lines\n            const logs = Object.keys(groupedLogs).reduce((acc, logType) => {\n                if (Object.keys(acc).includes(logType)) {\n                    // If log type exists, append lines and update lastByte\n                    return {...acc, [logType]: {\n                        lastByte: groupedLogs[logType].lastByte,\n                        contents: [...acc[logType].contents, ...groupedLogs[logType].contents]\n                    }};\n                } else {\n                    return {...acc, [logType]: groupedLogs[logType]};\n                }\n            }, state.logs);\n\n            return { ...state, logs, filters };\n        },\n        default: () => state,\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/process-logs-panel/process-logs-panel.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { matchProcessRoute } from 'routes/routes';\nimport { RouterState } from 'connected-react-router';\n\nexport interface ProcessLogsPanel {\n    filters: string[];\n    selectedFilter: string;\n    logs: ProcessLogs;\n}\n\nexport interface ProcessLogs {\n    [logType: string]: {lastByte: number | undefined, contents: string[]};\n}\n\nexport const getProcessPanelLogs = ({ selectedFilter, logs }: ProcessLogsPanel): string[] => {\n    return logs[selectedFilter]?.contents || [];\n};\n\nexport const getProcessLogsPanelCurrentUuid = (router: RouterState) => {\n    const pathname = router.location ? router.location.pathname : '';\n    const match = matchProcessRoute(pathname);\n    return match ? match.params.id : undefined;\n};\n"
  },
  {
    "path": "services/workbench2/src/store/process-panel/process-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { getInputs,\n         getOutputParameters,\n         getRawInputs,\n         getRawOutputs\n} from \"store/processes/processes-actions\";\nimport { Dispatch } from \"redux\";\nimport { Process, ProcessStatus } from \"store/processes/process\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { navigateTo } from \"store/navigation/navigation-action\";\nimport { snackbarActions } from \"store/snackbar/snackbar-actions\";\nimport { SnackbarKind } from \"../snackbar/snackbar-actions\";\nimport { loadSubprocessPanel, subprocessPanelActions } from \"../subprocess-panel/subprocess-panel-actions\";\nimport { initProcessLogsPanel, processLogsPanelActions } from \"store/process-logs-panel/process-logs-panel-actions\";\nimport { CollectionFile } from \"models/collection-file\";\nimport { ContainerRequestResource } from \"models/container-request\";\nimport { CommandOutputParameter } from \"cwlts/mappings/v1.0/CommandOutputParameter\";\nimport { CommandInputParameter, getIOParamId, WorkflowInputsData } from \"models/workflow\";\nimport { getIOParamDisplayValue, ProcessIOParameter } from \"views/process-panel/process-io-card\";\nimport { OutputDetails, NodeInstanceType, NodeInfo, UsageReport } from \"./process-panel\";\nimport { AuthState } from \"store/auth/auth-reducer\";\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { OutputDataUpdate } from \"./process-panel-reducer\";\nimport { updateResources } from \"store/resources/resources-actions\";\nimport { ContainerResource } from \"models/container\";\nimport { FilterBuilder } from \"services/api/filter-builder\";\n\nexport const processPanelActions = unionize({\n    RESET_PROCESS_PANEL: ofType<{}>(),\n    SET_PROCESS_PANEL_CONTAINER_REQUEST_UUID: ofType<string>(),\n    SET_PROCESS_PANEL_FILTERS: ofType<string[]>(),\n    TOGGLE_PROCESS_PANEL_FILTER: ofType<string>(),\n    SET_INPUT_RAW: ofType<WorkflowInputsData | null>(),\n    SET_INPUT_PARAMS: ofType<ProcessIOParameter[] | null>(),\n    SET_OUTPUT_DATA: ofType<OutputDataUpdate | null>(),\n    SET_OUTPUT_DEFINITIONS: ofType<CommandOutputParameter[]>(),\n    SET_OUTPUT_PARAMS: ofType<ProcessIOParameter[] | null>(),\n    SET_NODE_INFO: ofType<NodeInfo>(),\n    SET_USAGE_REPORT: ofType<UsageReport>(),\n});\n\nexport type ProcessPanelAction = UnionOf<typeof processPanelActions>;\n\nexport const toggleProcessPanelFilter = processPanelActions.TOGGLE_PROCESS_PANEL_FILTER;\n\nexport const loadProcess =\n    (containerRequestUuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<Process | undefined> => {\n        let containerRequest: ContainerRequestResource | undefined = undefined;\n        let container: ContainerResource | undefined = undefined;\n\n        try {\n            const containerRequestResult = await services.groupsService.contents(\n                '', {\n                    filters: new FilterBuilder().addIsA('uuid', 'arvados#containerRequest')\n                                                 .addEqual('uuid', containerRequestUuid)\n                                                 .getFilters(),\n                    include: [\"container_uuid\"]\n            });\n            if (containerRequestResult.items.length === 1) {\n                containerRequest = containerRequestResult.items[0] as ContainerRequestResource;\n                dispatch<any>(updateResources(containerRequestResult.items));\n\n                if (containerRequestResult.included?.length === 1) {\n                    container = containerRequestResult.included[0] as ContainerResource;\n                    dispatch<any>(updateResources(containerRequestResult.included));\n                }\n            }\n        } catch (e) {\n            if (!containerRequest) {\n                dispatch(\n                    snackbarActions.OPEN_SNACKBAR({\n                        message: e.message,\n                        hideDuration: 2000,\n                        kind: SnackbarKind.ERROR,\n                    })\n                );\n            }\n        }\n\n        if (!containerRequest) {\n            return undefined;\n        }\n\n        if (!container && containerRequest.containerUuid) {\n            // Get the container the old fashioned way\n            try {\n                container = await services.containerService.get(containerRequest.containerUuid, false);\n                dispatch<any>(updateResources([container]));\n            } catch {}\n        }\n\n        if (container && container.runtimeUserUuid) {\n            try {\n                const runtimeUser = await services.userService.get(container.runtimeUserUuid, false);\n                dispatch<any>(updateResources([runtimeUser]));\n            } catch {}\n        }\n\n        if (containerRequest.outputUuid) {\n            try {\n                const collection = await services.collectionService.get(containerRequest.outputUuid, false);\n                dispatch<any>(updateResources([collection]));\n            } catch {}\n        }\n\n        return { containerRequest, container };\n    };\n\nexport const loadProcessPanel = (uuid: string) => async (dispatch: Dispatch, getState: () => RootState): Promise<Process | undefined> => {\n    // Reset subprocess data explorer if navigating to new process\n    //  Avoids resetting pagination when refreshing same process\n    if (getState().processPanel.containerRequestUuid !== uuid) {\n        dispatch(subprocessPanelActions.CLEAR());\n    }\n    dispatch(processPanelActions.RESET_PROCESS_PANEL());\n    dispatch(processLogsPanelActions.RESET_PROCESS_LOGS_PANEL());\n    dispatch<ProcessPanelAction>(processPanelActions.SET_PROCESS_PANEL_CONTAINER_REQUEST_UUID(uuid));\n    const process = await dispatch<any>(loadProcess(uuid));\n    dispatch(initProcessPanelFilters);\n    dispatch<any>(initProcessLogsPanel(uuid));\n    dispatch<any>(loadSubprocessPanel());\n    return process;\n};\n\nexport const navigateToOutput = (resource: ContextMenuResource | ContainerRequestResource) => async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n    try {\n        await services.collectionService.get(resource.outputUuid || '');\n        dispatch<any>(navigateTo(resource.outputUuid || ''));\n    } catch {\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Output collection was trashed or deleted.\", hideDuration: 4000, kind: SnackbarKind.WARNING }));\n    }\n};\n\nexport const loadInputs =\n    (containerRequest: ContainerRequestResource) => async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        dispatch<ProcessPanelAction>(processPanelActions.SET_INPUT_RAW(getRawInputs(containerRequest)));\n        dispatch<ProcessPanelAction>(processPanelActions.SET_INPUT_PARAMS(formatInputData(getInputs(containerRequest), getState().auth)));\n    };\n\nexport const loadOutputs =\n    (containerRequest: ContainerRequestResource) => async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const noOutputs: OutputDetails = { raw: {} };\n\n        if (!containerRequest.outputUuid) {\n            dispatch<ProcessPanelAction>(processPanelActions.SET_OUTPUT_DATA({\n                uuid: containerRequest.uuid,\n                payload: noOutputs\n            }));\n            return;\n        }\n        let propsOutputs: any = undefined;\n        try {\n            propsOutputs = getRawOutputs(containerRequest);\n            const filesPromise = services.collectionService.files(containerRequest.outputUuid);\n            const collectionPromise = services.collectionService.get(containerRequest.outputUuid);\n            const [files, collection] = await Promise.all([filesPromise, collectionPromise]);\n\n            // If has propsOutput, skip fetching cwl.output.json\n            if (propsOutputs !== undefined) {\n                dispatch<ProcessPanelAction>(\n                    processPanelActions.SET_OUTPUT_DATA({\n                        uuid: containerRequest.uuid,\n                        payload: {\n                            raw: propsOutputs,\n                            pdh: collection.portableDataHash,\n                        },\n                    })\n                );\n            } else {\n                // Fetch outputs from keep\n                const outputFile = files.find(file => file.name === \"cwl.output.json\") as CollectionFile | undefined;\n                let outputData = outputFile ? await services.collectionService.getFileContents(outputFile) : undefined;\n                if (outputData && (outputData = JSON.parse(outputData)) && collection.portableDataHash) {\n                    dispatch<ProcessPanelAction>(\n                        processPanelActions.SET_OUTPUT_DATA({\n                            uuid: containerRequest.uuid,\n                            payload: {\n                                raw: outputData,\n                                pdh: collection.portableDataHash,\n                            },\n                        })\n                    );\n                } else {\n                    dispatch<ProcessPanelAction>(processPanelActions.SET_OUTPUT_DATA({ uuid: containerRequest.uuid, payload: noOutputs }));\n                }\n            }\n        } catch {\n            dispatch<ProcessPanelAction>(processPanelActions.SET_OUTPUT_DATA({ uuid: containerRequest.uuid, payload: { raw: propsOutputs, failedToLoadOutputCollection: true } }));\n        }\n    };\n\nexport const loadNodeJson =\n    (containerRequest: ContainerRequestResource) => async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const noLog = { nodeInfo: null };\n        if (!containerRequest.logUuid) {\n            dispatch<ProcessPanelAction>(processPanelActions.SET_NODE_INFO(noLog));\n            return;\n        }\n        try {\n            const filesPromise = services.collectionService.files(containerRequest.logUuid);\n            const collectionPromise = services.collectionService.get(containerRequest.logUuid);\n            const [files] = await Promise.all([filesPromise, collectionPromise]);\n\n            // Fetch node.json from keep\n            const nodeFile = files.find(file => file.name === \"node.json\") as CollectionFile | undefined;\n            let nodeData = nodeFile ? await services.collectionService.getFileContents(nodeFile) : undefined;\n            if (nodeData && (nodeData = JSON.parse(nodeData))) {\n                dispatch<ProcessPanelAction>(\n                    processPanelActions.SET_NODE_INFO({\n                        nodeInfo: nodeData as NodeInstanceType,\n                    })\n                );\n            } else {\n                dispatch<ProcessPanelAction>(processPanelActions.SET_NODE_INFO(noLog));\n            }\n\n            const usageReportFile = files.find(file => file.name === \"usage_report.html\") as CollectionFile | null;\n            dispatch<ProcessPanelAction>(processPanelActions.SET_USAGE_REPORT({ usageReport: usageReportFile }));\n        } catch {\n            dispatch<ProcessPanelAction>(processPanelActions.SET_NODE_INFO(noLog));\n            dispatch<ProcessPanelAction>(processPanelActions.SET_USAGE_REPORT({ usageReport: null }));\n        }\n    };\n\nexport const loadOutputDefinitions =\n    (containerRequest: ContainerRequestResource) => async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        if (containerRequest && containerRequest.mounts) {\n            dispatch<ProcessPanelAction>(processPanelActions.SET_OUTPUT_DEFINITIONS(getOutputParameters(containerRequest)));\n        }\n    };\n\nexport const updateOutputParams = () => async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n    const outputDefinitions = getState().processPanel.outputDefinitions;\n    const outputData = getState().processPanel.outputData;\n\n    if (outputData && outputData.raw) {\n        dispatch<ProcessPanelAction>(\n            processPanelActions.SET_OUTPUT_PARAMS(formatOutputData(outputDefinitions, outputData.raw, outputData.pdh, getState().auth))\n        );\n    }\n};\n\nexport const openWorkflow = (uuid: string) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    dispatch<any>(navigateTo(uuid));\n};\n\nexport const initProcessPanelFilters = processPanelActions.SET_PROCESS_PANEL_FILTERS([\n    ProcessStatus.QUEUED,\n    ProcessStatus.COMPLETED,\n    ProcessStatus.FAILED,\n    ProcessStatus.RUNNING,\n    ProcessStatus.ONHOLD,\n    ProcessStatus.FAILING,\n    ProcessStatus.WARNING,\n    ProcessStatus.CANCELLED,\n]);\n\nexport const formatInputData = (inputs: CommandInputParameter[], auth: AuthState): ProcessIOParameter[] => {\n    return inputs.flatMap((input): ProcessIOParameter[] => {\n        const processValues = getIOParamDisplayValue(auth, input);\n        return processValues.map((thisValue, i) => ({\n            id: i === 0 ? getIOParamId(input) : \"\",\n            label: i === 0 ? input.label || \"\" : \"\",\n            value: thisValue,\n        }));\n    });\n};\n\nexport const formatOutputData = (\n    definitions: CommandOutputParameter[],\n    values: any,\n    pdh: string | undefined,\n    auth: AuthState\n): ProcessIOParameter[] => {\n    return definitions.flatMap((output): ProcessIOParameter[] => {\n        const processValues = getIOParamDisplayValue(auth, Object.assign(output, { value: values[getIOParamId(output)] || [] }), pdh);\n        return processValues.map((thisValue, i) => ({\n            id: i === 0 ? getIOParamId(output) : \"\",\n            label: i === 0 ? output.label || \"\" : \"\",\n            value: thisValue,\n        }));\n    });\n};\n"
  },
  {
    "path": "services/workbench2/src/store/process-panel/process-panel-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { OutputDetails, ProcessPanel } from \"store/process-panel/process-panel\";\nimport { ProcessPanelAction, processPanelActions } from \"store/process-panel/process-panel-actions\";\n\nconst initialState: ProcessPanel = {\n    containerRequestUuid: \"\",\n    filters: {},\n    inputRaw: null,\n    inputParams: null,\n    outputData: null,\n    nodeInfo: null,\n    outputDefinitions: [],\n    outputParams: null,\n    usageReport: null,\n};\n\nexport type OutputDataUpdate = {\n    uuid: string;\n    payload: OutputDetails;\n};\n\nexport const processPanelReducer = (state = initialState, action: ProcessPanelAction): ProcessPanel =>\n    processPanelActions.match(action, {\n        RESET_PROCESS_PANEL: () => initialState,\n        SET_PROCESS_PANEL_CONTAINER_REQUEST_UUID: containerRequestUuid => ({\n            ...state,\n            containerRequestUuid,\n        }),\n        SET_PROCESS_PANEL_FILTERS: statuses => {\n            const filters = statuses.reduce((filters, status) => ({ ...filters, [status]: true }), {});\n            return { ...state, filters };\n        },\n        TOGGLE_PROCESS_PANEL_FILTER: status => {\n            const filters = { ...state.filters, [status]: !state.filters[status] };\n            return { ...state, filters };\n        },\n        SET_INPUT_RAW: inputRaw => {\n            // Since mounts can disappear and reappear, only set inputs\n            //   if current state is null or new inputs has content\n            if (state.inputRaw === null || (inputRaw && Object.keys(inputRaw).length)) {\n                return { ...state, inputRaw };\n            } else {\n                return state;\n            }\n        },\n        SET_INPUT_PARAMS: inputParams => {\n            // Since mounts can disappear and reappear, only set inputs\n            //   if current state is null or new inputs has content\n            if (state.inputParams === null || (inputParams && inputParams.length)) {\n                return { ...state, inputParams };\n            } else {\n                return state;\n            }\n        },\n        SET_OUTPUT_DATA: (update: OutputDataUpdate) => {\n            //never set output to {} unless initializing\n            if (state.outputData?.raw && Object.keys(state.outputData?.raw).length && state.containerRequestUuid === update.uuid) {\n                return state;\n            }\n            return { ...state, outputData: update.payload };\n        },\n        SET_NODE_INFO: ({ nodeInfo }) => {\n            return { ...state, nodeInfo };\n        },\n        SET_OUTPUT_DEFINITIONS: outputDefinitions => {\n            // Set output definitions is only additive to avoid clearing when mounts go temporarily missing\n            if (outputDefinitions.length) {\n                return { ...state, outputDefinitions };\n            } else {\n                return state;\n            }\n        },\n        SET_OUTPUT_PARAMS: outputParams => {\n            return { ...state, outputParams };\n        },\n        SET_USAGE_REPORT: ({ usageReport }) => {\n            return { ...state, usageReport };\n        },\n        default: () => state,\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/process-panel/process-panel.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { WorkflowInputsData } from 'models/workflow';\nimport { RouterState } from \"connected-react-router\";\nimport { matchProcessRoute } from \"routes/routes\";\nimport { ProcessIOParameter } from \"views/process-panel/process-io-card\";\nimport { CommandOutputParameter } from 'cwlts/mappings/v1.0/CommandOutputParameter';\nimport { CollectionFile } from 'models/collection-file';\n\nexport type OutputDetails = {\n    raw?: any;\n    pdh?: string;\n    failedToLoadOutputCollection?: boolean;\n}\n\nexport interface GPUFeatures {\n    // as of this writing, stack is \"cuda\" or \"rocm\"\n    Stack:          string;\n    DriverVersion:  string;\n    HardwareTarget: string;\n    DeviceCount:    number;\n    VRAM:           number;\n}\n\nexport interface NodeInstanceType {\n    Name: string;\n    ProviderType: string;\n    VCPUs: number;\n    RAM: number;\n    Scratch: number;\n    IncludedScratch: number;\n    AddedScratch: number;\n    Price: number;\n    Preemptible: boolean;\n    GPU: GPUFeatures;\n};\n\nexport interface NodeInfo {\n    nodeInfo: NodeInstanceType | null;\n};\n\nexport interface UsageReport {\n    usageReport: CollectionFile | null;\n};\n\nexport interface ProcessPanel {\n    containerRequestUuid: string;\n    filters: { [status: string]: boolean };\n    inputRaw: WorkflowInputsData | null;\n    inputParams: ProcessIOParameter[] | null;\n    outputData: OutputDetails | null;\n    outputDefinitions: CommandOutputParameter[];\n    outputParams: ProcessIOParameter[] | null;\n    nodeInfo: NodeInstanceType | null;\n    usageReport: CollectionFile | null;\n}\n\nexport const getProcessPanelCurrentUuid = (router: RouterState) => {\n    const pathname = router.location ? router.location.pathname : '';\n    const match = matchProcessRoute(pathname);\n    return match ? match.params.id : undefined;\n};\n"
  },
  {
    "path": "services/workbench2/src/store/processes/process-copy-actions.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { copyProcess } from './process-copy-actions';\nimport { CommonService } from 'services/common-service/common-service';\n\ndescribe('ProcessCopyAction', () => {\n    let dispatch, getState, services;\n\n    let sampleFailedProcess = {\n        command: [\n        \"arvados-cwl-runner\",\n        \"--api=containers\",\n        \"--local\",\n        \"--project-uuid=zzzzz-j7d0g-yr18k784zplfeza\",\n        \"/var/lib/cwl/workflow.json#main\",\n        \"/var/lib/cwl/cwl.input.json\",\n        ],\n        container_count: 1,\n        container_count_max: 10,\n        container_image: \"arvados/jobs\",\n        container_uuid: \"zzzzz-dz642-b9j9dtk1yikp9h0\",\n        created_at: \"2023-01-23T22:50:50.788284000Z\",\n        cumulative_cost: 0.00120553009559028,\n        cwd: \"/var/spool/cwl\",\n        description: \"test decsription\",\n        environment: {},\n        etag: \"2es6px6q7uo0yqi2i291x8gd6\",\n        expires_at: null,\n        filters: null,\n        kind: \"arvados#containerRequest\",\n        log_uuid: \"zzzzz-4zz18-a1gxqy9o6zyrdy8\",\n        modified_at: \"2023-01-24T21:13:54.772612000Z\",\n        modified_by_user_uuid: \"jutro-tpzed-vllbpebicy84rd5\",\n        mounts: {\n        \"/var/lib/cwl/cwl.input.json\": {\n            capacity: 0,\n            content: {\n            input: {\n                basename: \"logo.ai.no.whitespace.png\",\n                class: \"File\",\n                location:\n                \"keep:5d3238c4db721a92c98b0305a47b0485+75/logo.ai.no.whitespace.png\",\n            },\n            reverse_sort: true,\n            },\n            device_type: \"\",\n            exclude_from_output: false,\n            kind: \"json\",\n            path: \"\",\n            portable_data_hash: \"\",\n            uuid: \"\",\n            writable: false,\n        },\n        \"/var/lib/cwl/workflow.json\": {\n            capacity: 0,\n            content: {\n            $graph: [\n                {\n                class: \"Workflow\",\n                doc: \"Reverse the lines in a document, then sort those lines.\",\n                id: \"#main\",\n                inputs: [\n                    {\n                    default: null,\n                    doc: \"The input file to be processed.\",\n                    id: \"#main/input\",\n                    type: \"File\",\n                    },\n                    {\n                    default: true,\n                    doc: \"If true, reverse (decending) sort\",\n                    id: \"#main/reverse_sort\",\n                    type: \"boolean\",\n                    },\n                ],\n                outputs: [\n                    {\n                    doc: \"The output with the lines reversed and sorted.\",\n                    id: \"#main/output\",\n                    outputSource: \"#main/sorted/output\",\n                    type: \"File\",\n                    },\n                ],\n                steps: [\n                    {\n                    id: \"#main/rev\",\n                    in: [{ id: \"#main/rev/input\", source: \"#main/input\" }],\n                    out: [\"#main/rev/output\"],\n                    run: \"#revtool.cwl\",\n                    },\n                    {\n                    id: \"#main/sorted\",\n                    in: [\n                        { id: \"#main/sorted/input\", source: \"#main/rev/output\" },\n                        {\n                        id: \"#main/sorted/reverse\",\n                        source: \"#main/reverse_sort\",\n                        },\n                    ],\n                    out: [\"#main/sorted/output\"],\n                    run: \"#sorttool.cwl\",\n                    },\n                ],\n                },\n                {\n                baseCommand: \"rev\",\n                class: \"CommandLineTool\",\n                doc: \"Reverse each line using the `rev` command\",\n                hints: [{ class: \"ResourceRequirement\", ramMin: 8 }],\n                id: \"#revtool.cwl\",\n                inputs: [\n                    { id: \"#revtool.cwl/input\", inputBinding: {}, type: \"File\" },\n                ],\n                outputs: [\n                    {\n                    id: \"#revtool.cwl/output\",\n                    outputBinding: { glob: \"output.txt\" },\n                    type: \"File\",\n                    },\n                ],\n                stdout: \"output.txt\",\n                },\n                {\n                baseCommand: \"sort\",\n                class: \"CommandLineTool\",\n                doc: \"Sort lines using the `sort` command\",\n                hints: [{ class: \"ResourceRequirement\", ramMin: 8 }],\n                id: \"#sorttool.cwl\",\n                inputs: [\n                    {\n                    id: \"#sorttool.cwl/reverse\",\n                    inputBinding: { position: 1, prefix: \"-r\" },\n                    type: \"boolean\",\n                    },\n                    {\n                    id: \"#sorttool.cwl/input\",\n                    inputBinding: { position: 2 },\n                    type: \"File\",\n                    },\n                ],\n                outputs: [\n                    {\n                    id: \"#sorttool.cwl/output\",\n                    outputBinding: { glob: \"output.txt\" },\n                    type: \"File\",\n                    },\n                ],\n                stdout: \"output.txt\",\n                },\n            ],\n            cwlVersion: \"v1.0\",\n            },\n            device_type: \"\",\n            exclude_from_output: false,\n            kind: \"json\",\n            path: \"\",\n            portable_data_hash: \"\",\n            uuid: \"\",\n            writable: false,\n        },\n        \"/var/spool/cwl\": {\n            capacity: 0,\n            content: null,\n            device_type: \"\",\n            exclude_from_output: false,\n            kind: \"collection\",\n            path: \"\",\n            portable_data_hash: \"\",\n            uuid: \"\",\n            writable: true,\n        },\n        stdout: {\n            capacity: 0,\n            content: null,\n            device_type: \"\",\n            exclude_from_output: false,\n            kind: \"file\",\n            path: \"/var/spool/cwl/cwl.output.json\",\n            portable_data_hash: \"\",\n            uuid: \"\",\n            writable: false,\n        },\n        },\n        name: \"Copy of: Copy of: Copy of: revsort.cwl\",\n        output_name: \"Output from revsort.cwl\",\n        output_path: \"/var/spool/cwl\",\n        output_properties: { key: \"val\" },\n        output_storage_classes: [\"default\"],\n        output_ttl: 999999,\n        output_uuid: \"zzzzz-4zz18-wolwlyfxmlhmgd4\",\n        owner_uuid: \"zzzzz-j7d0g-yr18k784zplfeza\",\n        priority: 500,\n        properties: {\n        template_uuid: \"zzzzz-7fd4e-7xsza0vgfe785cy\",\n        workflowName: \"revsort.cwl\",\n        },\n        requesting_container_uuid: null,\n        runtime_constraints: {\n        API: true,\n        cuda: { device_count: 0, driver_version: \"\", hardware_capability: \"\" },\n        keep_cache_disk: 0,\n        keep_cache_ram: 0,\n        ram: 1342177280,\n        vcpus: 1,\n        },\n        runtime_token: \"\",\n        scheduling_parameters: {\n        max_run_time: 0,\n        partitions: [],\n        preemptible: false,\n        },\n        state: \"Final\",\n        use_existing: false,\n        uuid: \"zzzzz-xvhdp-111111111111111\",\n    };\n\n    let expectedContainerRequest = {\n        command: [\n        \"arvados-cwl-runner\",\n        \"--api=containers\",\n        \"--local\",\n        \"--project-uuid=zzzzz-j7d0g-yr18k784zplfeza\",\n        \"/var/lib/cwl/workflow.json#main\",\n        \"/var/lib/cwl/cwl.input.json\",\n        ],\n        containerCountMax: 10,\n        containerImage: \"arvados/jobs\",\n        cwd: \"/var/spool/cwl\",\n        description: \"test decsription\",\n        environment: {},\n        kind: \"arvados#containerRequest\",\n        mounts: {\n        \"/var/lib/cwl/cwl.input.json\": {\n            capacity: 0,\n            content: {\n            input: {\n                basename: \"logo.ai.no.whitespace.png\",\n                class: \"File\",\n                location:\n                \"keep:5d3238c4db721a92c98b0305a47b0485+75/logo.ai.no.whitespace.png\",\n            },\n            reverse_sort: true,\n            },\n            device_type: \"\",\n            exclude_from_output: false,\n            kind: \"json\",\n            path: \"\",\n            portable_data_hash: \"\",\n            uuid: \"\",\n            writable: false,\n        },\n        \"/var/lib/cwl/workflow.json\": {\n            capacity: 0,\n            content: {\n            $graph: [\n                {\n                class: \"Workflow\",\n                doc: \"Reverse the lines in a document, then sort those lines.\",\n                id: \"#main\",\n                inputs: [\n                    {\n                    default: null,\n                    doc: \"The input file to be processed.\",\n                    id: \"#main/input\",\n                    type: \"File\",\n                    },\n                    {\n                    default: true,\n                    doc: \"If true, reverse (decending) sort\",\n                    id: \"#main/reverse_sort\",\n                    type: \"boolean\",\n                    },\n                ],\n                outputs: [\n                    {\n                    doc: \"The output with the lines reversed and sorted.\",\n                    id: \"#main/output\",\n                    outputSource: \"#main/sorted/output\",\n                    type: \"File\",\n                    },\n                ],\n                steps: [\n                    {\n                    id: \"#main/rev\",\n                    in: [{ id: \"#main/rev/input\", source: \"#main/input\" }],\n                    out: [\"#main/rev/output\"],\n                    run: \"#revtool.cwl\",\n                    },\n                    {\n                    id: \"#main/sorted\",\n                    in: [\n                        {\n                        id: \"#main/sorted/input\",\n                        source: \"#main/rev/output\",\n                        },\n                        {\n                        id: \"#main/sorted/reverse\",\n                        source: \"#main/reverse_sort\",\n                        },\n                    ],\n                    out: [\"#main/sorted/output\"],\n                    run: \"#sorttool.cwl\",\n                    },\n                ],\n                },\n                {\n                baseCommand: \"rev\",\n                class: \"CommandLineTool\",\n                doc: \"Reverse each line using the `rev` command\",\n                hints: [{ class: \"ResourceRequirement\", ramMin: 8 }],\n                id: \"#revtool.cwl\",\n                inputs: [\n                    {\n                    id: \"#revtool.cwl/input\",\n                    inputBinding: {},\n                    type: \"File\",\n                    },\n                ],\n                outputs: [\n                    {\n                    id: \"#revtool.cwl/output\",\n                    outputBinding: { glob: \"output.txt\" },\n                    type: \"File\",\n                    },\n                ],\n                stdout: \"output.txt\",\n                },\n                {\n                baseCommand: \"sort\",\n                class: \"CommandLineTool\",\n                doc: \"Sort lines using the `sort` command\",\n                hints: [{ class: \"ResourceRequirement\", ramMin: 8 }],\n                id: \"#sorttool.cwl\",\n                inputs: [\n                    {\n                    id: \"#sorttool.cwl/reverse\",\n                    inputBinding: { position: 1, prefix: \"-r\" },\n                    type: \"boolean\",\n                    },\n                    {\n                    id: \"#sorttool.cwl/input\",\n                    inputBinding: { position: 2 },\n                    type: \"File\",\n                    },\n                ],\n                outputs: [\n                    {\n                    id: \"#sorttool.cwl/output\",\n                    outputBinding: { glob: \"output.txt\" },\n                    type: \"File\",\n                    },\n                ],\n                stdout: \"output.txt\",\n                },\n            ],\n            cwlVersion: \"v1.0\",\n            },\n            device_type: \"\",\n            exclude_from_output: false,\n            kind: \"json\",\n            path: \"\",\n            portable_data_hash: \"\",\n            uuid: \"\",\n            writable: false,\n        },\n        \"/var/spool/cwl\": {\n            capacity: 0,\n            content: null,\n            device_type: \"\",\n            exclude_from_output: false,\n            kind: \"collection\",\n            path: \"\",\n            portable_data_hash: \"\",\n            uuid: \"\",\n            writable: true,\n        },\n        stdout: {\n            capacity: 0,\n            content: null,\n            device_type: \"\",\n            exclude_from_output: false,\n            kind: \"file\",\n            path: \"/var/spool/cwl/cwl.output.json\",\n            portable_data_hash: \"\",\n            uuid: \"\",\n            writable: false,\n        },\n        },\n        name: \"newname.cwl\",\n        outputName: \"Output from revsort.cwl\",\n        outputPath: \"/var/spool/cwl\",\n        outputProperties: { key: \"val\" },\n        outputStorageClasses: [\"default\"],\n        outputTtl: 999999,\n        ownerUuid: \"zzzzz-j7d0g-000000000000000\",\n        priority: 500,\n        properties: {\n        template_uuid: \"zzzzz-7fd4e-7xsza0vgfe785cy\",\n        workflowName: \"revsort.cwl\",\n        },\n        runtimeConstraints: {\n        API: true,\n        cuda: {\n            device_count: 0,\n            driver_version: \"\",\n            hardware_capability: \"\",\n        },\n        keep_cache_disk: 0,\n        keep_cache_ram: 0,\n        ram: 1342177280,\n        vcpus: 1,\n        },\n        schedulingParameters: {\n        max_run_time: 0,\n        partitions: [],\n        preemptible: false,\n        },\n        state: \"Uncommitted\",\n        useExisting: false,\n    };\n\n    beforeEach(() => {\n        dispatch = cy.stub();\n        services = {\n            containerRequestService: {\n                get: cy.stub().returns(CommonService.mapResponseKeys({data: sampleFailedProcess})).as(\"get\"),\n                create: cy.spy(),\n            },\n        };\n        getState = () => ({\n            auth: {},\n        });\n    });\n\n    it(\"should request the failed process and return a copy with the proper fields\", async () => {\n        // when\n        const newprocess = await copyProcess({\n            name: \"newname.cwl\",\n            uuid: \"zzzzz-xvhdp-111111111111111\",\n            ownerUuid: \"zzzzz-j7d0g-000000000000000\",\n        })(dispatch, getState, services);\n\n        // then\n        cy.get(\"@get\").should(\"be.calledWith\", \"zzzzz-xvhdp-111111111111111\");\n        expect(services.containerRequestService.create).to.have.been.calledWithMatch(expectedContainerRequest);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/processes/process-copy-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { resetPickerProjectTree } from 'store/project-tree-picker/project-tree-picker-actions';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { CopyFormDialogData } from 'store/copy-dialog/copy-dialog';\nimport { getProcess } from 'store/processes/process';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { initProjectsTreePicker } from 'store/tree-picker/tree-picker-actions';\nimport { ContainerRequestState } from 'models/container-request';\n\nexport const PROCESS_COPY_FORM_NAME = 'processCopyFormName';\nexport const MULTI_PROCESS_COPY_FORM_NAME = 'multiProcessCopyFormName';\n\nexport const openCopyProcessDialog =\n    (resource: { name: string; uuid: string }) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const process = getProcess(resource.uuid)(getState().resources);\n        if (process) {\n            dispatch<any>(resetPickerProjectTree());\n            dispatch<any>(initProjectsTreePicker(PROCESS_COPY_FORM_NAME));\n            const initialData: CopyFormDialogData = { name: `Copy of: ${resource.name}`, uuid: resource.uuid, ownerUuid: '' };\n            dispatch(dialogActions.OPEN_DIALOG({ id: PROCESS_COPY_FORM_NAME, data: initialData }));\n        } else {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Process not found', hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        }\n    };\n\nexport const copyProcess = (resource: CopyFormDialogData) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    try {\n        const process = await services.containerRequestService.get(resource.uuid);\n        const {\n            command,\n            containerCountMax,\n            containerImage,\n            cwd,\n            description,\n            environment,\n            kind,\n            mounts,\n            outputName,\n            outputPath,\n            outputProperties,\n            outputStorageClasses,\n            outputTtl,\n            properties,\n            runtimeConstraints,\n            schedulingParameters,\n            useExisting,\n        } = process;\n        const newProcess = await services.containerRequestService.create({\n            command,\n            containerCountMax,\n            containerImage,\n            cwd,\n            description,\n            environment,\n            kind,\n            mounts,\n            name: resource.name,\n            outputName,\n            outputPath,\n            outputProperties,\n            outputStorageClasses,\n            outputTtl,\n            ownerUuid: resource.ownerUuid,\n            priority: 500,\n            properties,\n            runtimeConstraints,\n            schedulingParameters,\n            state: ContainerRequestState.UNCOMMITTED,\n            useExisting,\n        });\n        dispatch(dialogActions.CLOSE_DIALOG({ id: PROCESS_COPY_FORM_NAME }));\n        return newProcess;\n    } catch (e) {\n        dispatch(dialogActions.CLOSE_DIALOG({ id: PROCESS_COPY_FORM_NAME }));\n        throw new Error('Could not copy the process.');\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/store/processes/process-input-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { getProcess, Process } from 'store/processes/process';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { getWorkflowInputs } from 'models/workflow';\nimport { JSONMount } from 'models/mount-types';\nimport { MOUNT_PATH_CWL_WORKFLOW } from 'models/process';\n\nexport const PROCESS_INPUT_DIALOG_NAME = 'processInputDialog';\n\nexport const openProcessInputDialog = (processUuid: string) =>\n    (dispatch: Dispatch<any>, getState: () => RootState) => {\n        const process = getProcess(processUuid)(getState().resources);\n        if (process) {\n            const data: any = process;\n            const inputs = getInputsFromWFMount(process);\n            if (inputs && inputs.length > 0) {\n                dispatch(dialogActions.OPEN_DIALOG({ id: PROCESS_INPUT_DIALOG_NAME, data }));\n            } else {\n                dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'There are no inputs in this process!', kind: SnackbarKind.ERROR }));\n            }\n        }\n    };\n\nconst getInputsFromWFMount = (process: Process) => {\n    if (!process || !process.containerRequest.mounts[MOUNT_PATH_CWL_WORKFLOW] ) { return undefined; }\n    const mnt = process.containerRequest.mounts[MOUNT_PATH_CWL_WORKFLOW] as JSONMount;\n    return getWorkflowInputs(mnt.content);\n};"
  },
  {
    "path": "services/workbench2/src/store/processes/process-update-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { FormErrors, initialize, startSubmit, stopSubmit } from \"redux-form\";\nimport { RootState } from \"store/store\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { getCommonResourceServiceError, CommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport { ServiceRepository } from \"services/services\";\nimport { getProcess } from \"store/processes/process\";\nimport { projectPanelDataActions } from \"store/project-panel/project-panel-action-bind\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\n\nexport interface ProcessUpdateFormDialogData {\n    uuid: string;\n    name: string;\n    description?: string | null;\n}\n\nexport const PROCESS_UPDATE_FORM_NAME = \"processUpdateFormName\";\n\nexport const openProcessUpdateDialog =\n    (resource: ProcessUpdateFormDialogData) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const process = getProcess(resource.uuid)(getState().resources);\n        if (process) {\n            dispatch(initialize(PROCESS_UPDATE_FORM_NAME, { ...resource, name: process.containerRequest.name }));\n            dispatch(dialogActions.OPEN_DIALOG({ id: PROCESS_UPDATE_FORM_NAME, data: {} }));\n        } else {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Process not found\", hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        }\n    };\n\nexport const updateProcess =\n    (resource: ProcessUpdateFormDialogData) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(startSubmit(PROCESS_UPDATE_FORM_NAME));\n        try {\n            const updatedProcess = await services.containerRequestService.update(resource.uuid, {\n                name: resource.name,\n                description: resource.description,\n            });\n            dispatch(projectPanelDataActions.REQUEST_ITEMS());\n            dispatch(dialogActions.CLOSE_DIALOG({ id: PROCESS_UPDATE_FORM_NAME }));\n            return updatedProcess;\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                dispatch(stopSubmit(PROCESS_UPDATE_FORM_NAME, { name: \"Process with the same name already exists.\" } as FormErrors));\n            } else {\n                dispatch(dialogActions.CLOSE_DIALOG({ id: PROCESS_UPDATE_FORM_NAME }));\n                dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Could not update the process.\", hideDuration: 2000, kind: SnackbarKind.ERROR }));\n            }\n            return;\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/processes/process.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContainerRequestResource, ContainerRequestState } from '../../models/container-request';\nimport { ContainerResource, ContainerState } from '../../models/container';\nimport { ResourcesState, getResource } from 'store/resources/resources';\nimport { filterResources } from '../resources/resources';\nimport { ResourceKind, Resource, extractUuidKind } from 'models/resource';\nimport { getTimeDiff } from 'common/formatters';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { memoize } from 'lodash';\n\nexport interface Process {\n    containerRequest: ContainerRequestResource;\n    container?: ContainerResource;\n}\n\nexport enum ProcessStatus {\n    CANCELLED = 'Cancelled',\n    COMPLETED = 'Completed',\n    DRAFT = 'Draft',\n    FAILING = 'Failing',\n    FAILED = 'Failed',\n    ONHOLD = 'On hold',\n    QUEUED = 'Queued',\n    RUNNING = 'Running',\n    WARNING = 'Warning',\n    UNKNOWN = 'Unknown',\n    REUSED = 'Reused',\n    CANCELLING = 'Cancelling',\n    RESUBMITTED = 'Resubmitted',\n}\n\nexport enum ProcessProperties {\n    FAILED_CONTAINER_RESUBMITTED = \"arv:failed_container_resubmitted\",\n}\n\n/**\n * Gets a process from the store using container request uuid\n * @param uuid container request associated with process\n * @returns a Process object with containerRequest and optional container or undefined\n */\n\n// both memoizes are needed to avoid x18 calls\nexport const getProcess = memoize((uuid: string) => memoize((resources: ResourcesState): Process | undefined => {\n    if (extractUuidKind(uuid) === ResourceKind.CONTAINER_REQUEST) {\n        const containerRequest = getResource<ContainerRequestResource>(uuid)(resources);\n        if (containerRequest) {\n            if (containerRequest.containerUuid) {\n                const container = getResource<ContainerResource>(containerRequest.containerUuid)(resources);\n                if (container) {\n                    return { containerRequest, container };\n                }\n            }\n            return { containerRequest };\n        }\n    }\n    return;\n}));\n\nexport const getSubprocesses = (uuid: string) => (resources: ResourcesState) => {\n    const process = getProcess(uuid)(resources);\n    if (process && process.container) {\n        const containerRequests = filterResources(isSubprocess(process.container.uuid))(resources) as ContainerRequestResource[];\n        return containerRequests.reduce((subprocesses, { uuid }) => {\n            const process = getProcess(uuid)(resources);\n            return process\n                ? [...subprocesses, process]\n                : subprocesses;\n        }, []);\n    }\n    return [];\n};\n\nexport const getProcessRuntime = ({ container }: Process) => {\n    if (container) {\n        if (container.startedAt === null) {\n            return 0;\n        }\n        if (container.finishedAt === null) {\n            // Count it from now\n            return new Date().getTime() - new Date(container.startedAt).getTime();\n        }\n        return getTimeDiff(container.finishedAt, container.startedAt);\n    } else {\n        return 0;\n    }\n};\n\n\nexport const getProcessStatusStyles = (status: string, theme: ArvadosTheme): React.CSSProperties => {\n    let color = theme.customs.colors.grey500;\n    let running = false;\n    switch (status) {\n        case ProcessStatus.RUNNING:\n            color = theme.customs.colors.green800;\n            running = true;\n            break;\n        case ProcessStatus.COMPLETED:\n        case ProcessStatus.REUSED:\n            color = theme.customs.colors.green800;\n            break;\n        case ProcessStatus.WARNING:\n            color = theme.customs.colors.green800;\n            running = true;\n            break;\n        case ProcessStatus.RESUBMITTED:\n            color = theme.customs.colors.darkOrange;\n            break;\n        case ProcessStatus.FAILING:\n            color = theme.customs.colors.red900;\n            running = true;\n            break;\n        case ProcessStatus.CANCELLING:\n            color = theme.customs.colors.red900;\n            running = true;\n            break;\n        case ProcessStatus.CANCELLED:\n        case ProcessStatus.FAILED:\n            color = theme.customs.colors.red900;\n            break;\n        case ProcessStatus.QUEUED:\n            color = theme.customs.colors.grey600;\n            running = true;\n            break;\n        default:\n            color = theme.customs.colors.grey600;\n            break;\n    }\n\n    // Using color and running we build the text, border, and background style properties\n    return {\n        // Set background color when not running, otherwise use white\n        backgroundColor: running ? theme.palette.common.white : color,\n        // Set text color to status color when running, else use white text for solid button\n        color: running ? color : theme.palette.common.white,\n        // Set border color when running, else omit the style entirely\n        ...(running ? { border: `2px solid ${color}` } : {}),\n    };\n};\n\nexport const getProcessStatus = ({ containerRequest, container }: Process): ProcessStatus => {\n    switch (true) {\n        case containerRequest.containerUuid && !container:\n            return ProcessStatus.UNKNOWN;\n\n        case containerRequest.state === ContainerRequestState.UNCOMMITTED:\n            return ProcessStatus.DRAFT;\n\n        case containerRequest.state === ContainerRequestState.FINAL &&\n                   containerRequest.properties &&\n                   Boolean(containerRequest.properties[ProcessProperties.FAILED_CONTAINER_RESUBMITTED]):\n                   // Failed but a new container request for the same work was resubmitted.\n                   return ProcessStatus.RESUBMITTED;\n\n        case containerRequest.state === ContainerRequestState.FINAL &&\n                          container?.state === ContainerState.RUNNING:\n                          // It is about to be completed but we haven't\n                          // gotten the updated container record yet,\n                          // if we don't catch this and show it as \"Running\"\n                          // it will flicker \"Cancelled\" briefly\n                          return ProcessStatus.RUNNING;\n\n        case containerRequest.state === ContainerRequestState.FINAL &&\n                                 container?.state !== ContainerState.COMPLETE:\n                                 // Request was finalized before its container started (or the\n                                 // container was cancelled)\n            return ProcessStatus.CANCELLED;\n\n        case container && container.state === ContainerState.COMPLETE:\n            if (container?.exitCode === 0) {\n                if (containerRequest && container.finishedAt) {\n                    // don't compare on createdAt because the container can\n                    // have a slightly earlier creation time when it is created\n                    // in the same transaction as the container request.\n                    // use finishedAt because most people will assume \"reused\" means\n                    // no additional work needed to be done, it's possible\n                    // to share a running container but calling it \"reused\" in that case\n                    // is more likely to just be confusing.\n                    const finishedAt = new Date(container.finishedAt).getTime();\n                    const createdAt = new Date(containerRequest.createdAt).getTime();\n                    if (finishedAt < createdAt) {\n                        return ProcessStatus.REUSED;\n                    }\n                }\n                return ProcessStatus.COMPLETED;\n            }\n            return ProcessStatus.FAILED;\n\n        case container?.state === ContainerState.CANCELLED:\n            return ProcessStatus.CANCELLED;\n\n        case container?.state === ContainerState.QUEUED ||\n            container?.state === ContainerState.LOCKED:\n            if (containerRequest.priority === 0) {\n                return ProcessStatus.ONHOLD;\n            }\n            return ProcessStatus.QUEUED;\n\n        case container?.state === ContainerState.RUNNING:\n            if (container?.priority === 0) {\n                return ProcessStatus.CANCELLING;\n            }\n            if (!!container?.runtimeStatus.error) {\n                return ProcessStatus.FAILING;\n            }\n            if (!!container?.runtimeStatus.warning) {\n                return ProcessStatus.WARNING;\n            }\n            return ProcessStatus.RUNNING;\n\n        default:\n            return ProcessStatus.UNKNOWN;\n    }\n};\n\nexport const isProcessRunning = ({ container }: Process): boolean => (\n    container?.state === ContainerState.RUNNING\n);\n\nexport const isProcessQueued = ({ container }: Process): boolean => (\n    container?.state === ContainerState.QUEUED || container?.state === ContainerState.LOCKED\n);\n\nexport const isProcessRunnable = ({ containerRequest }: Process): boolean => (\n    containerRequest.state === ContainerRequestState.UNCOMMITTED\n);\n\nexport const isProcessResumable = ({ containerRequest, container }: Process): boolean => (\n    containerRequest.state === ContainerRequestState.COMMITTED &&\n    containerRequest.priority === 0 &&\n    // Don't show run button when container is present & running or cancelled\n    !(container && (container.state === ContainerState.RUNNING ||\n                    container.state === ContainerState.CANCELLED ||\n                    container.state === ContainerState.COMPLETE))\n);\n\nexport const isProcessCancelable = memoize(({ containerRequest, container }: Process): boolean => (\n    containerRequest.priority !== null &&\n    containerRequest.priority > 0 &&\n    container !== undefined &&\n    (container.state === ContainerState.QUEUED ||\n        container.state === ContainerState.LOCKED ||\n        container.state === ContainerState.RUNNING)\n));\n\nconst isSubprocess = (containerUuid: string) => (resource: Resource) =>\n    resource.kind === ResourceKind.CONTAINER_REQUEST\n    && (resource as ContainerRequestResource).requestingContainerUuid === containerUuid;\n"
  },
  {
    "path": "services/workbench2/src/store/processes/processes-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { showGroupedCommonResourceResultSnackbars, updateResources } from \"store/resources/resources-actions\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { projectPanelRunActions } from \"store/project-panel/project-panel-action-bind\";\nimport { navigateTo, navigateToRunProcess } from \"store/navigation/navigation-action\";\nimport { goToStep, runProcessPanelActions } from \"store/run-process-panel/run-process-panel-actions\";\nimport { getResource } from \"store/resources/resources\";\nimport { initialize } from \"redux-form\";\nimport { RUN_PROCESS_BASIC_FORM, RunProcessBasicFormData } from \"store/run-process-panel/run-process-panel-actions\";\nimport { RunProcessAdvancedFormData, RUN_PROCESS_ADVANCED_FORM } from \"store/run-process-panel/run-process-panel-actions\";\nimport { MOUNT_PATH_CWL_WORKFLOW, MOUNT_PATH_CWL_INPUT } from \"models/process\";\nimport { CommandInputParameter, getWorkflow, getWorkflowInputs, getWorkflowOutputs, WorkflowInputsData } from \"models/workflow\";\nimport { ProjectResource } from \"models/project\";\nimport { UserResource } from \"models/user\";\nimport { CommandOutputParameter } from \"cwlts/mappings/v1.0/CommandOutputParameter\";\nimport { ContainerRequestState } from \"models/container-request\";\nimport { FilterBuilder } from \"services/api/filter-builder\";\nimport { selectedToArray } from \"components/multiselect-toolbar/MultiselectToolbar.utils\";\nimport { Resource, ResourceKind } from \"models/resource\";\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { CommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport { getProcessPanelCurrentUuid } from \"store/process-panel/process-panel\";\nimport { getProjectPanelCurrentUuid } from \"store/project-panel/project-panel\";\nimport { loadSidePanelTreeProjects, SidePanelTreeCategory } from \"store/side-panel-tree/side-panel-tree-actions\";\nimport { matchProcessRoute, matchProjectRoute } from \"routes/routes\";\n\nexport const loadContainers =\n    (containerUuids: string[], loadMounts: boolean = true) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        let args: any = {\n            filters: new FilterBuilder().addIn(\"uuid\", containerUuids).getFilters(),\n            limit: containerUuids.length,\n        };\n        if (!loadMounts) {\n            args.select = containerFieldsNoMounts;\n        }\n        const { items } = await services.containerService.list(args);\n        dispatch<any>(updateResources(items));\n        return items;\n    };\n\n// Until the api supports unselecting fields, we need a list of all other fields to omit mounts\nexport const containerFieldsNoMounts = [\n    \"auth_uuid\",\n    \"command\",\n    \"container_image\",\n    \"cost\",\n    \"created_at\",\n    \"cwd\",\n    \"environment\",\n    \"etag\",\n    \"exit_code\",\n    \"finished_at\",\n    \"gateway_address\",\n    \"interactive_session_started\",\n    \"kind\",\n    \"lock_count\",\n    \"locked_by_uuid\",\n    \"log\",\n    \"modified_at\",\n    \"modified_by_user_uuid\",\n    \"output_path\",\n    \"output_properties\",\n    \"output_storage_classes\",\n    \"output\",\n    \"owner_uuid\",\n    \"priority\",\n    \"progress\",\n    \"runtime_auth_scopes\",\n    \"runtime_constraints\",\n    \"runtime_status\",\n    \"runtime_user_uuid\",\n    \"scheduling_parameters\",\n    \"started_at\",\n    \"state\",\n    \"subrequests_cost\",\n    \"uuid\",\n];\n\nexport const cancelRunningWorkflow = (uuid: string) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    try {\n        const process = await services.containerRequestService.update(uuid, { priority: 0 });\n        dispatch<any>(updateResources([process]));\n        if (process.containerUuid) {\n            const container = await services.containerService.get(process.containerUuid, false);\n            dispatch<any>(updateResources([container]));\n        }\n        return process;\n    } catch (e) {\n        throw new Error(\"Could not cancel the process.\");\n    }\n};\n\nexport const resumeOnHoldWorkflow = (uuid: string) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    try {\n        const process = await services.containerRequestService.update(uuid, { priority: 500 });\n        dispatch<any>(updateResources([process]));\n        if (process.containerUuid) {\n            const container = await services.containerService.get(process.containerUuid, false);\n            dispatch<any>(updateResources([container]));\n        }\n        return process;\n    } catch (e) {\n        throw new Error(\"Could not resume the process.\");\n    }\n};\n\nexport const startWorkflow = (uuid: string) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    try {\n        const process = await services.containerRequestService.update(uuid, { state: ContainerRequestState.COMMITTED });\n        if (process) {\n            dispatch<any>(updateResources([process]));\n            if (process.containerUuid) {\n                services.containerService\n                    .get(process.containerUuid, false)\n                    .then((container) => dispatch<any>(updateResources([container])))\n                    .catch((e) => {\n                        console.error(\"Failed to optimistically load container: \" + process.containerUuid, e);\n                    });\n            }\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Process started\", hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        } else {\n            dispatch<any>(snackbarActions.OPEN_SNACKBAR({ message: `Failed to start process`, kind: SnackbarKind.ERROR }));\n        }\n    } catch (e) {\n        dispatch<any>(snackbarActions.OPEN_SNACKBAR({ message: `Failed to start process`, kind: SnackbarKind.ERROR }));\n    }\n};\n\nexport const reRunProcess =\n    (processUuid: string, workflowUuid: string) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const process = getResource<any>(processUuid)(getState().resources);\n        const workflows = getState().runProcessPanel.searchWorkflows;\n        const workflow = workflows.find(workflow => workflow.uuid === workflowUuid);\n        if (workflow && process) {\n            const mainWf = getWorkflow(process.mounts[MOUNT_PATH_CWL_WORKFLOW]);\n            if (mainWf) {\n                mainWf.inputs = getInputs(process);\n            }\n            const stringifiedDefinition = JSON.stringify(process.mounts[MOUNT_PATH_CWL_WORKFLOW].content);\n            const newWorkflow = { ...workflow, definition: stringifiedDefinition };\n\n            const owner = getResource<ProjectResource | UserResource>(workflow.ownerUuid)(getState().resources);\n            const basicInitialData: RunProcessBasicFormData = { name: `Copy of: ${process.name}`, owner };\n            dispatch<any>(initialize(RUN_PROCESS_BASIC_FORM, basicInitialData));\n\n            const advancedInitialData: RunProcessAdvancedFormData = {\n                description: process.description,\n                output: process.outputName,\n                runtime: process.schedulingParameters.max_run_time,\n                ram: process.runtimeConstraints.ram,\n                vcpus: process.runtimeConstraints.vcpus,\n                keep_cache_ram: process.runtimeConstraints.keep_cache_ram,\n                acr_container_image: process.containerImage,\n            };\n            dispatch<any>(initialize(RUN_PROCESS_ADVANCED_FORM, advancedInitialData));\n\n            dispatch<any>(navigateToRunProcess);\n            dispatch<any>(goToStep(1));\n            dispatch(runProcessPanelActions.SET_STEP_CHANGED(true));\n            dispatch(runProcessPanelActions.SET_SELECTED_WORKFLOW(newWorkflow));\n        } else {\n            dispatch<any>(snackbarActions.OPEN_SNACKBAR({ message: `You can't re-run this process`, kind: SnackbarKind.ERROR }));\n        }\n    };\n\n/*\n * Fetches raw inputs from containerRequest mounts with fallback to properties\n * Returns undefined if containerRequest not loaded\n * Returns {} if inputs not found in mounts or props\n */\nexport const getRawInputs = (data: any): WorkflowInputsData | undefined => {\n    if (!data) {\n        return undefined;\n    }\n    const mountInput = data.mounts?.[MOUNT_PATH_CWL_INPUT]?.content;\n    const propsInput = data.properties?.cwl_input;\n    if (!mountInput && !propsInput) {\n        return {};\n    }\n    return mountInput || propsInput;\n};\n\nexport const getInputs = (data: any): CommandInputParameter[] => {\n    // Definitions from mounts are needed so we return early if missing\n    if (!data || !data.mounts || !data.mounts[MOUNT_PATH_CWL_WORKFLOW]) {\n        return [];\n    }\n    const content = getRawInputs(data) as any;\n    // Only escape if content is falsy to allow displaying definitions if no inputs are present\n    // (Don't check raw content length)\n    if (!content) {\n        return [];\n    }\n\n    const inputs = getWorkflowInputs(data.mounts[MOUNT_PATH_CWL_WORKFLOW].content);\n    return inputs\n        ? inputs.map((it: any) => ({\n              type: it.type,\n              id: it.id,\n              label: it.label,\n              default: content[it.id],\n              value: content[it.id.split(\"/\").pop()] || [],\n              doc: it.doc,\n          }))\n        : [];\n};\n\n/*\n * Fetches raw outputs from containerRequest properties\n * Assumes containerRequest is loaded\n */\nexport const getRawOutputs = (data: any): any | undefined => {\n    if (!data || !data.properties || !data.properties.cwl_output) {\n        return undefined;\n    }\n    return data.properties.cwl_output;\n};\n\nexport type InputCollectionMount = {\n    path: string;\n    pdh: string;\n};\n\nexport const getInputCollectionMounts = (data: any): InputCollectionMount[] => {\n    if (!data || !data.mounts) {\n        return [];\n    }\n    return Object.keys(data.mounts)\n        .map(key => ({\n            ...data.mounts[key],\n            path: key,\n        }))\n        .filter(mount => mount.kind === \"collection\" && mount.portable_data_hash && mount.path)\n        .map(mount => ({\n            path: mount.path,\n            pdh: mount.portable_data_hash,\n        }));\n};\n\nexport const getOutputParameters = (data: any): CommandOutputParameter[] => {\n    if (!data || !data.mounts || !data.mounts[MOUNT_PATH_CWL_WORKFLOW]) {\n        return [];\n    }\n    const outputs = getWorkflowOutputs(data.mounts[MOUNT_PATH_CWL_WORKFLOW].content);\n    return outputs\n        ? outputs.map((it: any) => ({\n              type: it.type,\n              id: it.id,\n              label: it.label,\n              doc: it.doc,\n          }))\n        : [];\n};\n\nexport const REMOVE_PROCESS_DIALOG = \"removeProcessDialog\";\n\nexport const openRemoveProcessDialog =\n    (resource: ContextMenuResource, numOfProcesses: Number) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const confirmationText =\n            numOfProcesses === 1\n                ? \"Are you sure you want to remove this process?\"\n                : `Are you sure you want to remove these ${numOfProcesses} processes?`;\n        const titleText = numOfProcesses === 1 ? \"Remove process permanently\" : \"Remove processes permanently\";\n\n        dispatch(\n            dialogActions.OPEN_DIALOG({\n                id: REMOVE_PROCESS_DIALOG,\n                data: {\n                    title: titleText,\n                    text: confirmationText,\n                    confirmButtonLabel: \"Remove\",\n                    uuid: resource.uuid,\n                    resource,\n                },\n            })\n        );\n    };\n\nexport const CANCEL_PROCESS_DIALOG = \"cancelProcessDialog\";\n\nexport const openCancelProcesswDialog = (uuid: string) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    dispatch(\n        dialogActions.OPEN_DIALOG({\n            id: CANCEL_PROCESS_DIALOG,\n            data: {\n                title: \"Cancel process\",\n                text: \"Are you sure you want to cancel this process?\",\n                cancelButtonLabel: \"Back\",\n                confirmButtonLabel: \"Confirm\",\n                uuid,\n            },\n        })\n    );\n};\n\nexport const removeProcessPermanently = (uuid: string) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    const currentProcessPanelUuid = getProcessPanelCurrentUuid(getState().router);\n    const currentProjectUuid = getProjectPanelCurrentUuid(getState());\n\n    //if checkedlist has items, use them\n    const checkedList = getState().multiselect.checkedList;\n    const uuidsToRemove: string[] = selectedToArray(checkedList);\n\n    //if no items in checkedlist, default to normal context menu behavior\n    if (!uuidsToRemove.length) uuidsToRemove.push(uuid);\n\n    const processesToRemove = uuidsToRemove\n        .map(uuid => getResource(uuid)(getState().resources) as Resource)\n        .filter(resource => resource.kind === ResourceKind.PROCESS);\n\n    const messageFuncMap = {\n        [CommonResourceServiceError.NONE]: (count: number) => count > 1 ? `Removed ${count} items` : `Item removed`,\n        [CommonResourceServiceError.PERMISSION_ERROR_FORBIDDEN]: (count: number) => count > 1 ? `Remove ${count} items failed: Access Denied` : `Remove failed: Access Denied`,\n        // Since processes are permanently removed only, skip duplicate name error since it's only for untrash\n        [CommonResourceServiceError.UNKNOWN]: (count: number) => count > 1 ? `Remove ${count} items failed` : `Remove failed`,\n    };\n\n    await Promise.allSettled(processesToRemove.map(process => services.containerRequestService.delete(process.uuid, false)))\n        .then(settledPromises => {\n            const { success } = showGroupedCommonResourceResultSnackbars(dispatch, settledPromises, messageFuncMap);\n\n            if (success.length) {\n                const { location } = getState().router;\n                // Processes are deleted immediately, refresh favorites to remove any deleted favorites\n                dispatch<any>(loadSidePanelTreeProjects(SidePanelTreeCategory.FAVORITES));\n\n                if (\n                    // If currently viewing any of the deleted runs, navigate to parent project\n                    matchProcessRoute(location ? location.pathname : \"\") &&\n                    currentProcessPanelUuid\n                ) {\n                    const currentProcessDeleted = success.find((promiseResult) => promiseResult.value.uuid === currentProcessPanelUuid);\n                    if (currentProcessDeleted) {\n                        dispatch<any>(navigateTo(currentProcessDeleted.value.ownerUuid));\n                    }\n                } else if (\n                    // If currently viewing the parent project of any of the deleted runs, refresh project runs tab\n                    matchProjectRoute(location ? location.pathname : \"\") &&\n                    currentProjectUuid &&\n                    success.find((promiseResult) => promiseResult.value.ownerUuid === currentProjectUuid)\n                ) {\n                    dispatch(projectPanelRunActions.REQUEST_ITEMS());\n                }\n            }\n        });\n};\n"
  },
  {
    "path": "services/workbench2/src/store/processes/processes-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ServiceRepository } from 'services/services';\nimport { MiddlewareAPI, Dispatch } from 'redux';\nimport {\n    DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta, getDataExplorerColumnFilters, getOrder\n} from 'store/data-explorer/data-explorer-middleware-service';\nimport { RootState } from 'store/store';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { BoundDataExplorerActions, couldNotFetchItemsAvailable } from 'store/data-explorer/data-explorer-action';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { ListArguments, ListResults } from 'services/common-service/common-service';\nimport { ContentsArguments } from 'services/groups-service/groups-service';\nimport { ProcessResource } from 'models/process';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { DataColumns } from 'components/data-table/data-column';\nimport { ProcessStatusFilter, buildProcessStatusFilters } from '../resource-type-filters/resource-type-filters';\nimport { ContainerRequestResource, containerRequestFieldsNoMounts } from 'models/container-request';\nimport { progressIndicatorActions } from '../progress-indicator/progress-indicator-actions';\nimport { containerFieldsNoMounts } from 'store/processes/processes-actions';\n\n    export class ProcessesMiddlewareService extends DataExplorerMiddlewareService {\n        constructor(private services: ServiceRepository, private actions: BoundDataExplorerActions, id: string) {\n        super(id);\n    }\n\n    getFilters(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): string | null {\n        const columns = dataExplorer.columns as DataColumns<string, ContainerRequestResource>;\n        const statusColumnFilters = getDataExplorerColumnFilters(columns, 'Status');\n        const activeStatusFilter = Object.keys(statusColumnFilters).find(\n            filterName => statusColumnFilters[filterName].selected\n        ) || ProcessStatusFilter.ALL;\n\n\n        let filters = new FilterBuilder().addIsA('uuid', 'arvados#containerRequest');\n        if (dataExplorer.searchValue && dataExplorer.searchValue !== \"\") {\n            filters = filters.addILike(\"name\", dataExplorer.searchValue);\n        }\n\n        return buildProcessStatusFilters(filters, activeStatusFilter).getFilters();\n    }\n\n\n    getParams(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): ContentsArguments | null {\n        const filters = this.getFilters(api, dataExplorer)\n        if (filters === null) {\n            return null;\n        }\n        return {\n            ...dataExplorerToListParams(dataExplorer),\n            filters,\n            order: getOrder<ProcessResource>(dataExplorer),\n            select: containerRequestFieldsNoMounts,\n            count: 'none',\n            include: [\"owner_uuid\", \"container_uuid\"]\n        };\n    }\n\n    getCountParams(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): ListArguments | null {\n        const filters = this.getFilters(api, dataExplorer);\n        if (filters === null) {\n            return null;\n        }\n        return {\n            filters,\n            limit: 0,\n            count: 'exact',\n        };\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n\n        try {\n            if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n            const params = this.getParams(api, dataExplorer);\n\n            // Get items\n            if (params !== null) {\n                const containerRequests = await this.services.groupsService.contents('',\n                    {\n                        ...this.getParams(api, dataExplorer),\n                        select: [...containerRequestFieldsNoMounts, \"can_write\", \"can_manage\"].concat(containerFieldsNoMounts)\n                });\n                api.dispatch(updateResources(containerRequests.items));\n                if (containerRequests.included) {\n                    api.dispatch(updateResources(containerRequests.included));\n                }\n\n                api.dispatch(this.actions.SET_ITEMS({\n                    ...listResultsToDataExplorerItemsMeta(containerRequests),\n                    items: containerRequests.items.map(resource => resource.uuid),\n                }));\n            } else {\n                api.dispatch(this.actions.SET_ITEMS({\n                    itemsAvailable: 0,\n                    page: 0,\n                    rowsPerPage: dataExplorer.rowsPerPage,\n                    items: [],\n                }));\n            }\n        } catch {\n            api.dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: 'Could not fetch process list.',\n                kind: SnackbarKind.ERROR\n            }));\n        } finally {\n            if (!background) { api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId())); }\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        const countParams = this.getCountParams(api, dataExplorer);\n\n        if (criteriaChanged && countParams !== null) {\n            // Get itemsAvailable\n            return this.services.groupsService.contents('', countParams)\n                       .then((results: ListResults<ContainerRequestResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(this.actions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/store/progress-indicator/progress-indicator-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\n\nexport const progressIndicatorActions = unionize({\n    START_WORKING: ofType<string>(),\n    STOP_WORKING: ofType<string>(),\n});\n\nexport type ProgressIndicatorAction = UnionOf<typeof progressIndicatorActions>;\n\nexport const WORKBENCH_LOADING_SCREEN = \"workbenchLoadingScreen\";\n\n"
  },
  {
    "path": "services/workbench2/src/store/progress-indicator/progress-indicator-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ProgressIndicatorAction, progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\n\ntype ProgressIndicatorState = string[];\n\nconst initialState: ProgressIndicatorState = [];\n\nexport const progressIndicatorReducer = (state: ProgressIndicatorState = initialState, action: ProgressIndicatorAction) => {\n    return progressIndicatorActions.match(action, {\n        START_WORKING: id => [...state, id],\n        STOP_WORKING: id => state.filter(p => p !== id),\n        default: () => state,\n    });\n};\n"
  },
  {
    "path": "services/workbench2/src/store/progress-indicator/with-progress.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\n\nexport type WithProgressStateProps = {\n    working: boolean;\n};\n\nexport const withProgress = (id: string) =>\n    (component: React.ComponentType<WithProgressStateProps>) =>\n        connect(mapStateToProps(id))(component);\n\nexport const mapStateToProps = (id: string) => (state: RootState): WithProgressStateProps => {\n    return { working: state.progressIndicator.includes(id) };\n};\n"
  },
  {
    "path": "services/workbench2/src/store/project-panel/project-panel-action-bind.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { bindDataExplorerActions } from \"store/data-explorer/data-explorer-action\";\n\n// These are split into a separate file to avoid circular imports causing\n// invariant violations with unit tests\n\nexport const PROJECT_PANEL_DATA_ID = \"projectPanelData\";\nexport const projectPanelDataActions = bindDataExplorerActions(PROJECT_PANEL_DATA_ID);\n\nexport const PROJECT_PANEL_RUN_ID = \"projectPanelRun\";\nexport const projectPanelRunActions = bindDataExplorerActions(PROJECT_PANEL_RUN_ID);\n"
  },
  {
    "path": "services/workbench2/src/store/project-panel/project-panel-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { propertiesActions } from \"store/properties/properties-actions\";\nimport { loadProject } from \"store/workbench/workbench-actions\";\nimport { projectPanelRunActions, projectPanelDataActions } from \"store/project-panel/project-panel-action-bind\";\nimport { PROJECT_PANEL_CURRENT_UUID, IS_PROJECT_PANEL_TRASHED } from \"./project-panel\";\n\n/**\n * Project panel tab labels\n * This is used to associate the labels used to display tabs / determine default\n * project tab with the values stored in user preferences which also referece these values\n */\nexport const ProjectPanelTabLabels = {\n    OVERVIEW: \"Overview\",\n    DATA: \"Data\",\n    RUNS: \"Workflow Runs\",\n};\n\nexport const RootProjectPanelTabLabels = {\n    DATA: \"Data\",\n    RUNS: \"Workflow Runs\",\n};\n\n/**\n * openProjectPanel runs on any route change that matches the project panel\n * This includes navigating within a project as well as the side panel\n * including refreshing the same location\n */\nexport const openProjectPanel = (projectUuid: string) => async (dispatch: Dispatch) => {\n    // Pre-emptively set working as early as possible to avoid delay from loadProject codepath\n    dispatch(projectPanelDataActions.SET_WORKING(true));\n    dispatch(projectPanelRunActions.SET_WORKING(true));\n\n    await dispatch<any>(loadProject(projectUuid));\n    // Setting the current UUID must be done before requesting items, it also triggers\n    // the project panel to reset pagination if the uuid changed (we can't tell here)\n    dispatch(propertiesActions.SET_PROPERTY({ key: PROJECT_PANEL_CURRENT_UUID, value: projectUuid }));\n\n    dispatch(projectPanelDataActions.RESET_EXPLORER_SEARCH_VALUE());\n    dispatch(projectPanelDataActions.REQUEST_ITEMS());\n\n    dispatch(projectPanelRunActions.RESET_EXPLORER_SEARCH_VALUE());\n    dispatch(projectPanelRunActions.REQUEST_ITEMS());\n};\n\nexport const setIsProjectPanelTrashed = (isTrashed: boolean) => propertiesActions.SET_PROPERTY({ key: IS_PROJECT_PANEL_TRASHED, value: isTrashed });\n"
  },
  {
    "path": "services/workbench2/src/store/project-panel/project-panel-data-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    DataExplorerMiddlewareService,\n    dataExplorerToListParams,\n    getDataExplorerColumnFilters,\n    listResultsToDataExplorerItemsMeta,\n} from \"store/data-explorer/data-explorer-middleware-service\";\nimport { ProjectPanelDataColumnNames } from \"views/project-panel/project-panel-columns\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { DataColumns, SortDirection } from \"components/data-table/data-column\";\nimport { OrderBuilder, OrderDirection } from \"services/api/order-builder\";\nimport { FilterBuilder, joinFilters } from \"services/api/filter-builder\";\nimport { ContentsArguments, GroupContentsResource, GroupContentsResourcePrefix } from \"services/groups-service/groups-service\";\nimport { updateFavorites } from \"store/favorites/favorites-actions\";\nimport { IS_PROJECT_PANEL_TRASHED, getProjectPanelCurrentUuid } from \"store/project-panel/project-panel\";\nimport { projectPanelDataActions } from \"store/project-panel/project-panel-action-bind\";\nimport { Dispatch, MiddlewareAPI } from \"redux\";\nimport { ProjectResource } from \"models/project\";\nimport { updateResources } from \"store/resources/resources-actions\";\nimport { getProperty } from \"store/properties/properties\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { DataExplorer, getDataExplorer } from \"store/data-explorer/data-explorer-reducer\";\nimport { ListResults } from \"services/common-service/common-service\";\nimport { getSortColumn } from \"store/data-explorer/data-explorer-reducer\";\nimport { buildProcessStatusFilters, serializeDataResourceTypeFilters } from \"store/resource-type-filters/resource-type-filters\";\nimport { updatePublicFavorites } from \"store/public-favorites/public-favorites-actions\";\nimport { selectedFieldsOfGroup } from \"models/group\";\nimport { defaultCollectionSelectedFields } from \"models/collection\";\nimport { ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { removeDisabledButton } from \"store/multiselect/multiselect-actions\";\nimport { couldNotFetchItemsAvailable } from \"store/data-explorer/data-explorer-action\";\n\nexport class ProjectPanelDataMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        const projectUuid = getProjectPanelCurrentUuid(state);\n        const isProjectTrashed = getProperty<string>(IS_PROJECT_PANEL_TRASHED)(state.properties);\n        if (!projectUuid) {\n            api.dispatch(projectPanelCurrentUuidIsNotSet());\n        } else if (!dataExplorer) {\n            api.dispatch(projectPanelDataExplorerIsNotSet());\n        } else {\n            try {\n                api.dispatch<any>(projectPanelDataActions.SET_IS_NOT_FOUND({ isNotFound: false }));\n                if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n\n                // Get items\n                const response = await this.services.groupsService.contents(projectUuid, getParams(dataExplorer, !!isProjectTrashed));\n                const resourceUuids = [...response.items.map(item => item.uuid), projectUuid];\n                api.dispatch<any>(updateFavorites(resourceUuids));\n                api.dispatch<any>(updatePublicFavorites(resourceUuids));\n                api.dispatch(updateResources(response.items));\n                api.dispatch(setItems(response));\n            } catch (e) {\n                api.dispatch(\n                    projectPanelDataActions.SET_ITEMS({\n                        items: [],\n                        itemsAvailable: 0,\n                        page: 0,\n                        rowsPerPage: dataExplorer.rowsPerPage,\n                    })\n                );\n                if (e.status === 404) {\n                    api.dispatch<any>(projectPanelDataActions.SET_IS_NOT_FOUND({ isNotFound: true}));\n                }\n                else {\n                    api.dispatch(couldNotFetchProjectContents());\n                }\n            } finally {\n                if (!background) {\n                    api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n                    api.dispatch<any>(removeDisabledButton(ContextMenuActionNames.MOVE_TO_TRASH))\n                }\n            }\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        const projectUuid = getProjectPanelCurrentUuid(state);\n        const isProjectTrashed = getProperty<string>(IS_PROJECT_PANEL_TRASHED)(state.properties);\n\n        if (criteriaChanged && projectUuid) {\n            // Get itemsAvailable\n            return this.services.groupsService.contents(projectUuid, getCountParams(dataExplorer, !!isProjectTrashed))\n                .then((results: ListResults<GroupContentsResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(projectPanelDataActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nexport const setItems = (listResults: ListResults<GroupContentsResource>) =>\n    projectPanelDataActions.SET_ITEMS({\n        ...listResultsToDataExplorerItemsMeta(listResults),\n        items: listResults.items.map(resource => resource.uuid),\n    });\n\nexport const getParams = (dataExplorer: DataExplorer, isProjectTrashed: boolean): ContentsArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    order: getOrder(dataExplorer),\n    filters: getFilters(dataExplorer),\n    includeTrash: isProjectTrashed,\n    select: selectedFieldsOfGroup.concat(defaultCollectionSelectedFields),\n    count: 'none',\n});\n\nconst getCountParams = (dataExplorer: DataExplorer, isProjectTrashed: boolean): ContentsArguments => ({\n    filters: getFilters(dataExplorer),\n    includeTrash: isProjectTrashed,\n    limit: 0,\n    count: 'exact',\n});\n\nexport const getFilters = (dataExplorer: DataExplorer) => {\n    const columns = dataExplorer.columns as DataColumns<string, ProjectResource>;\n    const typeFilters = serializeDataResourceTypeFilters(getDataExplorerColumnFilters(columns, ProjectPanelDataColumnNames.TYPE));\n    const statusColumnFilters = getDataExplorerColumnFilters(columns, \"Status\");\n    const activeStatusFilter = Object.keys(statusColumnFilters).find(filterName => statusColumnFilters[filterName].selected);\n\n    // TODO: Extract group contents name filter\n    const nameFilters = new FilterBuilder()\n        .addILike(\"name\", dataExplorer.searchValue, GroupContentsResourcePrefix.COLLECTION)\n        .addILike(\"name\", dataExplorer.searchValue, GroupContentsResourcePrefix.PROJECT)\n        .getFilters();\n\n    // Filter by container status\n    const statusFilters = buildProcessStatusFilters(new FilterBuilder(), activeStatusFilter || \"\", GroupContentsResourcePrefix.PROCESS).getFilters();\n\n    return joinFilters(statusFilters, typeFilters, nameFilters);\n};\n\nconst getOrder = (dataExplorer: DataExplorer) => {\n    const sortColumn = getSortColumn<ProjectResource>(dataExplorer);\n    const order = new OrderBuilder<ProjectResource>();\n    if (sortColumn && sortColumn.sort) {\n        const sortDirection = sortColumn.sort.direction === SortDirection.ASC ? OrderDirection.ASC : OrderDirection.DESC;\n\n        // Use createdAt as a secondary sort column so we break ties consistently.\n        return order\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.COLLECTION)\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.PROJECT)\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.WORKFLOW)\n            .addOrder(OrderDirection.DESC, \"createdAt\")\n            .getOrder();\n    } else {\n        return order.getOrder();\n    }\n};\n\nconst projectPanelCurrentUuidIsNotSet = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: \"Project panel is not opened.\",\n        kind: SnackbarKind.ERROR,\n    });\n\nconst couldNotFetchProjectContents = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: \"Could not fetch project contents.\",\n        kind: SnackbarKind.ERROR,\n    });\n\nconst projectPanelDataExplorerIsNotSet = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: \"Project panel is not ready.\",\n        kind: SnackbarKind.ERROR,\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/project-panel/project-panel-run-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    DataExplorerMiddlewareService,\n    dataExplorerToListParams,\n    getDataExplorerColumnFilters,\n    listResultsToDataExplorerItemsMeta,\n} from \"store/data-explorer/data-explorer-middleware-service\";\nimport { ProjectPanelRunColumnNames } from \"views/project-panel/project-panel-columns\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { DataColumns, SortDirection } from \"components/data-table/data-column\";\nimport { OrderBuilder, OrderDirection } from \"services/api/order-builder\";\nimport { FilterBuilder, joinFilters } from \"services/api/filter-builder\";\nimport { ContentsArguments, GroupContentsResource, GroupContentsResourcePrefix } from \"services/groups-service/groups-service\";\nimport { updateFavorites } from \"store/favorites/favorites-actions\";\nimport { IS_PROJECT_PANEL_TRASHED, getProjectPanelCurrentUuid } from \"store/project-panel/project-panel\";\nimport { projectPanelRunActions } from \"store/project-panel/project-panel-action-bind\";\nimport { Dispatch, MiddlewareAPI } from \"redux\";\nimport { ProjectResource } from \"models/project\";\nimport { updateResources } from \"store/resources/resources-actions\";\nimport { getProperty } from \"store/properties/properties\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { DataExplorer, getDataExplorer } from \"store/data-explorer/data-explorer-reducer\";\nimport { ListResults } from \"services/common-service/common-service\";\nimport { loadContainers } from \"store/processes/processes-actions\";\nimport { ResourceKind } from \"models/resource\";\nimport { getSortColumn } from \"store/data-explorer/data-explorer-reducer\";\nimport { buildProcessStatusFilters, serializeProcessTypeGroupContentsFilters } from \"store/resource-type-filters/resource-type-filters\";\nimport { updatePublicFavorites } from \"store/public-favorites/public-favorites-actions\";\nimport { containerRequestFieldsNoMounts } from \"models/container-request\";\nimport { ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { removeDisabledButton } from \"store/multiselect/multiselect-actions\";\nimport { couldNotFetchItemsAvailable } from \"store/data-explorer/data-explorer-action\";\n\nexport class ProjectPanelRunMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        const projectUuid = getProjectPanelCurrentUuid(state);\n        const isProjectTrashed = getProperty<string>(IS_PROJECT_PANEL_TRASHED)(state.properties);\n        if (!projectUuid) {\n            api.dispatch(projectPanelCurrentUuidIsNotSet());\n        } else if (!dataExplorer) {\n            api.dispatch(projectPanelDataExplorerIsNotSet());\n        } else {\n            try {\n                api.dispatch<any>(projectPanelRunActions.SET_IS_NOT_FOUND({ isNotFound: false }));\n                if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n\n                // Get items\n                const containerRequests = await this.services.groupsService.contents(projectUuid, getParams(dataExplorer, projectUuid, !!isProjectTrashed));\n                const resourceUuids = containerRequests.items.map(item => item.uuid);\n                api.dispatch<any>(updateFavorites(resourceUuids));\n                api.dispatch<any>(updatePublicFavorites(resourceUuids));\n                api.dispatch(updateResources(containerRequests.items));\n                await api.dispatch<any>(loadMissingProcessesInformation(containerRequests.items));\n                api.dispatch(setItems(containerRequests));\n            } catch (e) {\n                api.dispatch(\n                    projectPanelRunActions.SET_ITEMS({\n                        items: [],\n                        itemsAvailable: 0,\n                        page: 0,\n                        rowsPerPage: dataExplorer.rowsPerPage,\n                    })\n                );\n                if (e.status === 404) {\n                    api.dispatch<any>(projectPanelRunActions.SET_IS_NOT_FOUND({ isNotFound: true}));\n                }\n                else {\n                    api.dispatch(couldNotFetchProjectContents());\n                }\n            } finally {\n                if (!background) {\n                    api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n                    api.dispatch<any>(removeDisabledButton(ContextMenuActionNames.MOVE_TO_TRASH))\n                }\n            }\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        const projectUuid = getProjectPanelCurrentUuid(state);\n        const isProjectTrashed = getProperty<string>(IS_PROJECT_PANEL_TRASHED)(state.properties);\n\n        if (criteriaChanged && projectUuid) {\n            // Get itemsAvailable\n            return this.services.groupsService.contents(projectUuid, getCountParams(dataExplorer, projectUuid, !!isProjectTrashed))\n                .then((results: ListResults<GroupContentsResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(projectPanelRunActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nexport const loadMissingProcessesInformation = (resources: GroupContentsResource[]) => async (dispatch: Dispatch) => {\n    const containerUuids = resources.reduce((uuids, resource) => {\n        return resource.kind === ResourceKind.CONTAINER_REQUEST && resource.containerUuid && !uuids.includes(resource.containerUuid)\n            ? [...uuids, resource.containerUuid]\n            : uuids;\n    }, [] as string[]);\n    if (containerUuids.length > 0) {\n        await dispatch<any>(loadContainers(containerUuids, false));\n    }\n};\n\nexport const setItems = (listResults: ListResults<GroupContentsResource>) =>\n    projectPanelRunActions.SET_ITEMS({\n        ...listResultsToDataExplorerItemsMeta(listResults),\n        items: listResults.items.map(resource => resource.uuid),\n    });\n\nexport const getParams = (dataExplorer: DataExplorer, projectUuid: string, isProjectTrashed: boolean): ContentsArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    order: getOrder(dataExplorer),\n    filters: getFilters(dataExplorer, projectUuid),\n    includeTrash: isProjectTrashed,\n    select: containerRequestFieldsNoMounts,\n    count: 'none',\n});\n\nconst getCountParams = (dataExplorer: DataExplorer, projectUuid: string, isProjectTrashed: boolean): ContentsArguments => ({\n    filters: getFilters(dataExplorer, projectUuid),\n    includeTrash: isProjectTrashed,\n    limit: 0,\n    count: 'exact',\n});\n\nexport const getFilters = (dataExplorer: DataExplorer, projectUuid: string) => {\n    const columns = dataExplorer.columns as DataColumns<string, ProjectResource>;\n    const typeFilters = serializeProcessTypeGroupContentsFilters(getDataExplorerColumnFilters(columns, ProjectPanelRunColumnNames.TYPE));\n    const statusColumnFilters = getDataExplorerColumnFilters(columns, ProjectPanelRunColumnNames.STATUS);\n    const activeStatusFilter = Object.keys(statusColumnFilters).find(filterName => statusColumnFilters[filterName].selected);\n\n    // TODO: Extract group contents name filter\n    const nameFilters = new FilterBuilder()\n        .addEqual('owner_uuid', projectUuid)\n        .addILike(\"name\", dataExplorer.searchValue)\n        .getFilters();\n\n    // Filter by container status\n    const statusFilters = buildProcessStatusFilters(new FilterBuilder(), activeStatusFilter || \"\", GroupContentsResourcePrefix.PROCESS).getFilters();\n\n    return joinFilters(statusFilters, typeFilters, nameFilters);\n};\n\nconst getOrder = (dataExplorer: DataExplorer) => {\n    const sortColumn = getSortColumn<ProjectResource>(dataExplorer);\n    const order = new OrderBuilder<ProjectResource>();\n    if (sortColumn && sortColumn.sort) {\n        const sortDirection = sortColumn.sort.direction === SortDirection.ASC ? OrderDirection.ASC : OrderDirection.DESC;\n\n        // Use createdAt as a secondary sort column so we break ties consistently.\n        return order\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.PROCESS)\n            .addOrder(OrderDirection.DESC, \"createdAt\")\n            .getOrder();\n    } else {\n        return order.getOrder();\n    }\n};\n\nconst projectPanelCurrentUuidIsNotSet = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: \"Project panel is not opened.\",\n        kind: SnackbarKind.ERROR,\n    });\n\nconst couldNotFetchProjectContents = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: \"Could not fetch project contents.\",\n        kind: SnackbarKind.ERROR,\n    });\n\nconst projectPanelDataExplorerIsNotSet = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: \"Project panel is not ready.\",\n        kind: SnackbarKind.ERROR,\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/project-panel/project-panel.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { getProperty } from \"store/properties/properties\";\nimport { RootState } from \"store/store\";\n\nexport const PROJECT_PANEL_CURRENT_UUID = \"projectPanelCurrentUuid\";\nexport const IS_PROJECT_PANEL_TRASHED = \"isProjectPanelTrashed\";\n\nexport const getProjectPanelCurrentUuid = (state: RootState) => getProperty<string>(PROJECT_PANEL_CURRENT_UUID)(state.properties);\n"
  },
  {
    "path": "services/workbench2/src/store/project-tree-picker/project-tree-picker-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { getUserUuid } from \"common/getuser\";\nimport { ServiceRepository } from \"services/services\";\nimport { mockProjectResource } from \"models/test-utils\";\nimport { treePickerActions, receiveTreePickerProjectsData } from \"store/tree-picker/tree-picker-actions\";\nimport { TreePickerId } from 'models/tree';\n\nexport const resetPickerProjectTree = () => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    dispatch<any>(treePickerActions.RESET_TREE_PICKER({ pickerId: TreePickerId.PROJECTS }));\n    dispatch<any>(treePickerActions.RESET_TREE_PICKER({ pickerId: TreePickerId.SHARED_WITH_ME }));\n    dispatch<any>(treePickerActions.RESET_TREE_PICKER({ pickerId: TreePickerId.FAVORITES }));\n\n    dispatch<any>(initPickerProjectTree());\n};\n\nexport const initPickerProjectTree = () => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    const uuid = getUserUuid(getState());\n    if (!uuid) { return; }\n    dispatch<any>(getPickerTreeProjects(uuid));\n    dispatch<any>(getSharedWithMeProjectsPickerTree(uuid));\n    dispatch<any>(getFavoritesProjectsPickerTree(uuid));\n};\n\nconst getPickerTreeProjects = (uuid: string = '') => {\n    return getProjectsPickerTree(uuid, TreePickerId.PROJECTS);\n};\n\nconst getSharedWithMeProjectsPickerTree = (uuid: string = '') => {\n    return getProjectsPickerTree(uuid, TreePickerId.SHARED_WITH_ME);\n};\n\nconst getFavoritesProjectsPickerTree = (uuid: string = '') => {\n    return getProjectsPickerTree(uuid, TreePickerId.FAVORITES);\n};\n\nconst getProjectsPickerTree = (uuid: string, kind: string) => {\n    return receiveTreePickerProjectsData(\n        '',\n        [mockProjectResource({ uuid, name: kind })],\n        kind\n    );\n};\n"
  },
  {
    "path": "services/workbench2/src/store/projects/project-create-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from 'store/store';\nimport { getUserUuid } from \"common/getuser\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { getCommonResourceServiceError, CommonResourceServiceError } from 'services/common-service/common-resource-service';\nimport { ProjectResource } from 'models/project';\nimport { ServiceRepository } from 'services/services';\nimport { matchProjectRoute, matchRunProcessRoute } from 'routes/routes';\nimport { RouterState } from \"connected-react-router\";\nimport { GroupClass } from \"models/group\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\n\nexport interface ProjectCreateFormDialogData {\n    ownerUuid: string;\n    name: string;\n    description: string;\n    properties: ProjectProperties;\n}\n\nexport interface ProjectProperties {\n    [key: string]: string | string[];\n}\n\nexport const PROJECT_CREATE_FORM_NAME = 'projectCreateFormName';\nexport const PROJECT_CREATE_PROPERTIES_FORM_NAME = 'projectCreatePropertiesFormName';\n\nexport const isProjectOrRunProcessRoute = (router: RouterState) => {\n    const pathname = router.location ? router.location.pathname : '';\n    const matchProject = matchProjectRoute(pathname);\n    const matchRunProcess = matchRunProcessRoute(pathname);\n    return Boolean(matchProject || matchRunProcess);\n};\n\nexport const openProjectCreateDialog = (ownerUuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const { router } = getState();\n        if (!isProjectOrRunProcessRoute(router)) {\n            const userUuid = getUserUuid(getState());\n            if (!userUuid) { return; }\n        }\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: PROJECT_CREATE_FORM_NAME,\n            data: {\n                sourcePanel: GroupClass.PROJECT,\n                ownerUuid: ownerUuid,\n            }\n        }));\n    };\n\nexport const createProject = (project: Partial<ProjectResource>, setSubmitErr: (err: string) => void) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            dispatch(progressIndicatorActions.START_WORKING(PROJECT_CREATE_FORM_NAME));\n            const newProject = await services.projectService.create(project, false);\n            dispatch(dialogActions.CLOSE_DIALOG({ id: PROJECT_CREATE_FORM_NAME }));\n            return newProject;\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                setSubmitErr('Project with the same name already exists.');\n            } else {\n                dispatch(dialogActions.CLOSE_DIALOG({ id: PROJECT_CREATE_FORM_NAME }));\n                const errMsg = e.errors\n                    ? e.errors.join('')\n                    : 'There was an error while creating the project.';\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: errMsg,\n                    hideDuration: 2000,\n                    kind: SnackbarKind.ERROR\n                }));\n            }\n            return undefined;\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(PROJECT_CREATE_FORM_NAME));\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/projects/project-lock-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { ServiceRepository } from \"services/services\";\nimport { projectPanelDataActions } from \"store/project-panel/project-panel-action-bind\";\nimport { loadResource } from \"store/resources/resources-actions\";\nimport { RootState } from \"store/store\";\nimport { ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { addDisabledButton, removeDisabledButton } from \"store/multiselect/multiselect-actions\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\n\nexport const freezeProject = (uuid: string) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    dispatch<any>(addDisabledButton(ContextMenuActionNames.FREEZE_PROJECT))\n    const userUUID = getState().auth.user!.uuid;\n    let updatedProject;\n\n    try {\n        updatedProject = await services.projectService.update(uuid, {\n            frozenByUuid: userUUID,\n        });\n    } catch (e) {\n        console.error(e);\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Could not freeze project', hideDuration: 4000, kind: SnackbarKind.ERROR }));\n    }\n\n    dispatch(projectPanelDataActions.REQUEST_ITEMS());\n    dispatch<any>(loadResource(uuid, false));\n    dispatch<any>(removeDisabledButton(ContextMenuActionNames.FREEZE_PROJECT))\n    return updatedProject;\n};\n\n\nexport const unfreezeProject = (uuid: string) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    dispatch<any>(addDisabledButton(ContextMenuActionNames.FREEZE_PROJECT))\n    const updatedProject = await services.projectService.update(uuid, {\n        frozenByUuid: null,\n    });\n\n    dispatch(projectPanelDataActions.REQUEST_ITEMS());\n    dispatch<any>(loadResource(uuid, false));\n    dispatch<any>(removeDisabledButton(ContextMenuActionNames.FREEZE_PROJECT))\n    return updatedProject;\n};\n"
  },
  {
    "path": "services/workbench2/src/store/projects/project-move-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { ServiceRepository } from \"services/services\";\nimport { RootState } from \"store/store\";\nimport { getUserUuid } from \"common/getuser\";\nimport { getCommonResourceServiceError, CommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport { MoveToFormDialogData } from \"store/move-to-dialog/move-to-dialog\";\nimport { resetPickerProjectTree } from \"store/project-tree-picker/project-tree-picker-actions\";\nimport { initProjectsTreePicker } from \"store/tree-picker/tree-picker-actions\";\nimport { projectPanelDataActions } from \"store/project-panel/project-panel-action-bind\";\nimport { loadSidePanelTreeProjects } from \"../side-panel-tree/side-panel-tree-actions\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\n\nexport const PROJECT_MOVE_FORM_NAME = \"projectMoveFormName\";\n\nexport const openMoveProjectDialog = (resource: any) => {\n    return (dispatch: Dispatch) => {\n        dispatch<any>(resetPickerProjectTree());\n        dispatch<any>(initProjectsTreePicker(PROJECT_MOVE_FORM_NAME));\n        dispatch(dialogActions.OPEN_DIALOG({ id: PROJECT_MOVE_FORM_NAME, data: resource }));\n    };\n};\n\nexport const moveProject = (resource: MoveToFormDialogData) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    dispatch(progressIndicatorActions.START_WORKING(PROJECT_MOVE_FORM_NAME));\n    try {\n        const userUuid = getUserUuid(getState());\n        if (!userUuid) {\n            throw new Error(\"User UUID not found in state.\");\n        }\n        const newProject = await services.projectService.update(resource.uuid, { ownerUuid: resource.ownerUuid });\n        dispatch(projectPanelDataActions.REQUEST_ITEMS());\n\n        dispatch(dialogActions.CLOSE_DIALOG({ id: PROJECT_MOVE_FORM_NAME }));\n        await dispatch<any>(loadSidePanelTreeProjects(userUuid));\n        return newProject;\n    } catch (e) {\n        const error = getCommonResourceServiceError(e);\n        if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"A project with the same name already exists in the target project.\", hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        } else if (error === CommonResourceServiceError.OWNERSHIP_CYCLE) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Cannot move a project into itself or one of its sub-projects.\", hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        } else {\n            dispatch(dialogActions.CLOSE_DIALOG({ id: PROJECT_MOVE_FORM_NAME }));\n            throw new Error(`Could not move the project: ${e instanceof Error ? e.message : \"Unknown error.\"}`);\n        }\n        return;\n    } finally {\n        dispatch(progressIndicatorActions.STOP_WORKING(PROJECT_MOVE_FORM_NAME));\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/store/projects/project-update-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { getCommonResourceServiceError, CommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport { ServiceRepository } from \"services/services\";\nimport { projectPanelDataActions } from \"store/project-panel/project-panel-action-bind\";\nimport { GroupClass } from \"models/group\";\nimport { Participant } from \"views-components/sharing-dialog/participant-select\";\nimport { ProjectProperties } from \"./project-create-actions\";\nimport { getResource } from \"store/resources/resources\";\nimport { ProjectResource } from \"models/project\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { reloadProjectMatchingUuid } from \"store/workbench/workbench-actions\";\n\nexport interface ProjectUpdateFormDialogData {\n    uuid: string;\n    name: string;\n    users?: Participant[];\n    description?: string | null;\n    properties?: ProjectProperties;\n}\n\nexport const PROJECT_UPDATE_FORM_NAME = \"projectUpdateFormName\";\nexport const PROJECT_UPDATE_PROPERTIES_FORM_NAME = \"projectUpdatePropertiesFormName\";\n\nexport const openProjectUpdateDialog = (resource: ProjectUpdateFormDialogData) => (dispatch: Dispatch, getState: () => RootState) => {\n    // Get complete project resource from store to handle consumers passing in partial resources\n    const project = getResource<ProjectResource>(resource.uuid)(getState().resources);\n    dispatch(\n        dialogActions.OPEN_DIALOG({\n            id: PROJECT_UPDATE_FORM_NAME,\n            data: {\n                sourcePanel: GroupClass.PROJECT,\n                ...project,\n            },\n        })\n    );\n};\n\nexport const updateProject =\n    (project: ProjectUpdateFormDialogData, setSubmitErr: (errMsg: string) => void) =>\n        async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n            const uuid = project.uuid || \"\";\n            try {\n                const updatedProject = await services.projectService.update(\n                    uuid,\n                    {\n                        name: project.name,\n                        description: project.description,\n                        properties: project.properties,\n                    },\n                    false\n                );\n            dispatch(projectPanelDataActions.REQUEST_ITEMS());\n            dispatch<any>(reloadProjectMatchingUuid([uuid]));\n            dispatch(dialogActions.CLOSE_DIALOG({ id: PROJECT_UPDATE_FORM_NAME }));\n            return updatedProject;\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                setSubmitErr(\"Project with the same name already exists.\");\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: \"Project with the same name already exists.\",\n                    hideDuration: 2000,\n                    kind: SnackbarKind.ERROR,\n                }));\n            } else {\n                dispatch(dialogActions.CLOSE_DIALOG({ id: PROJECT_UPDATE_FORM_NAME }));\n                const errMsg = e.errors ? e.errors.join(\"\") : \"There was an error while updating the project\";\n                dispatch(\n                    snackbarActions.OPEN_SNACKBAR({\n                        message: errMsg,\n                        hideDuration: 2000,\n                        kind: SnackbarKind.ERROR,\n                    })\n                );\n            }\n            return;\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/properties/properties-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from 'common/unionize';\n\nexport const propertiesActions = unionize({\n    SET_PROPERTY: ofType<{ key: string, value: any }>(),\n    DELETE_PROPERTY: ofType<string>(),\n});\n\nexport type PropertiesAction = UnionOf<typeof propertiesActions>;\n"
  },
  {
    "path": "services/workbench2/src/store/properties/properties-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { PropertiesState, setProperty, deleteProperty } from './properties';\nimport { PropertiesAction, propertiesActions } from './properties-actions';\n\n\nexport const propertiesReducer = (state: PropertiesState = {}, action: PropertiesAction) =>\n    propertiesActions.match(action, {\n        SET_PROPERTY: ({ key, value }) => setProperty(key, value)(state),\n        DELETE_PROPERTY: key => deleteProperty(key)(state),\n        default: () => state,\n    });"
  },
  {
    "path": "services/workbench2/src/store/properties/properties.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport type PropertiesState = { [key: string]: any };\n\nexport const getProperty = <T>(id: string) =>\n    (state: PropertiesState): T | undefined =>\n        state[id];\n\nexport const setProperty = <T>(id: string, data: T) =>\n    (state: PropertiesState) => ({\n        ...state,\n        [id]: data\n    });\n\nexport const deleteProperty = (id: string) =>\n    (state: PropertiesState) => {\n        const newState = { ...state };\n        delete newState[id];\n        return newState;\n    };\n\n"
  },
  {
    "path": "services/workbench2/src/store/public-favorites/public-favorites-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"../store\";\nimport { checkPublicFavorite } from \"./public-favorites\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { ServiceRepository } from \"services/services\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { addDisabledButton, removeDisabledButton } from \"store/multiselect/multiselect-actions\";\nimport { ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { SidePanelTreeCategory, loadSidePanelTreeProjects } from \"store/side-panel-tree/side-panel-tree-actions\";\n\nexport const publicFavoritesActions = unionize({\n    TOGGLE_PUBLIC_FAVORITE: ofType<{ resourceUuid: string }>(),\n    CHECK_PRESENCE_IN_PUBLIC_FAVORITES: ofType<string[]>(),\n    UPDATE_PUBLIC_FAVORITES: ofType<Record<string, boolean>>()\n});\n\nexport type PublicFavoritesAction = UnionOf<typeof publicFavoritesActions>;\n\nexport const togglePublicFavorite = (resource: { uuid: string; name: string }) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<any> => {\n        dispatch(progressIndicatorActions.START_WORKING(\"togglePublicFavorite\"));\n        dispatch<any>(addDisabledButton(ContextMenuActionNames.ADD_TO_PUBLIC_FAVORITES))\n        const uuidPrefix = getState().auth.config.uuidPrefix;\n        const uuid = `${uuidPrefix}-j7d0g-publicfavorites`;\n        dispatch(publicFavoritesActions.TOGGLE_PUBLIC_FAVORITE({ resourceUuid: resource.uuid }));\n        const isPublicFavorite = checkPublicFavorite(resource.uuid, getState().publicFavorites);\n        dispatch(snackbarActions.OPEN_SNACKBAR({\n            message: isPublicFavorite\n                ? \"Removing from public favorites...\"\n                : \"Adding to public favorites...\",\n            kind: SnackbarKind.INFO\n        }));\n\n        const promise: any = isPublicFavorite\n            ? services.favoriteService.delete({ userUuid: uuid, resourceUuid: resource.uuid })\n            : services.favoriteService.create({ userUuid: uuid, resource });\n\n        return promise\n            .then(() => {\n                dispatch(publicFavoritesActions.UPDATE_PUBLIC_FAVORITES({ [resource.uuid]: !isPublicFavorite }));\n                dispatch(snackbarActions.CLOSE_SNACKBAR());\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: isPublicFavorite\n                        ? \"Removed from public favorites\"\n                        : \"Added to public favorites\",\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS\n                }));\n                dispatch<any>(removeDisabledButton(ContextMenuActionNames.ADD_TO_PUBLIC_FAVORITES))\n                dispatch(progressIndicatorActions.STOP_WORKING(\"togglePublicFavorite\"));\n                dispatch<any>(loadSidePanelTreeProjects(SidePanelTreeCategory.PUBLIC_FAVORITES));\n            })\n            .catch((e: any) => {\n                dispatch(progressIndicatorActions.STOP_WORKING(\"togglePublicFavorite\"));\n                throw e;\n            });\n    };\n\nexport const updatePublicFavorites = (resourceUuids: string[]) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const uuidPrefix = getState().auth.config.uuidPrefix;\n        const uuid = `${uuidPrefix}-j7d0g-publicfavorites`;\n        dispatch(publicFavoritesActions.CHECK_PRESENCE_IN_PUBLIC_FAVORITES(resourceUuids));\n        services.favoriteService\n            .checkPresenceInFavorites(uuid, resourceUuids)\n            .then((results: any) => {\n                dispatch(publicFavoritesActions.UPDATE_PUBLIC_FAVORITES(results));\n            });\n    };\n\nexport const getIsAdmin = () =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const resource = getState().auth.user!.isAdmin;\n        return resource;\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/public-favorites/public-favorites-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { PublicFavoritesState } from \"./public-favorites\";\nimport { PublicFavoritesAction, publicFavoritesActions } from \"./public-favorites-actions\";\n\nexport const publicFavoritesReducer = (state: PublicFavoritesState = {}, action: PublicFavoritesAction) =>\n    publicFavoritesActions.match(action, {\n        UPDATE_PUBLIC_FAVORITES: publicFavorites => ({ ...state, ...publicFavorites }),\n        default: () => state\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/public-favorites/public-favorites.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport type PublicFavoritesState = Record<string, boolean>;\n\nexport const checkPublicFavorite = (uuid: string, state: PublicFavoritesState) => state[uuid] === true;\n"
  },
  {
    "path": "services/workbench2/src/store/public-favorites-panel/public-favorites-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { bindDataExplorerActions } from \"store/data-explorer/data-explorer-action\";\n\nexport const PUBLIC_FAVORITE_PANEL_ID = \"publicFavoritePanel\";\nexport const publicFavoritePanelActions = bindDataExplorerActions(PUBLIC_FAVORITE_PANEL_ID);\n\nexport const loadPublicFavoritePanel = () => (dispatch: Dispatch) => {\n    dispatch(publicFavoritePanelActions.RESET_EXPLORER_SEARCH_VALUE());\n    dispatch(publicFavoritePanelActions.REQUEST_ITEMS());\n};"
  },
  {
    "path": "services/workbench2/src/store/public-favorites-panel/public-favorites-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ServiceRepository } from 'services/services';\nimport { MiddlewareAPI, Dispatch } from 'redux';\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, getDataExplorerColumnFilters, listResultsToDataExplorerItemsMeta } from 'store/data-explorer/data-explorer-middleware-service';\nimport { RootState } from 'store/store';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { resourcesActions } from 'store/resources/resources-actions';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { FavoritePanelColumnNames } from 'views/favorite-panel/favorite-panel-columns';\nimport { publicFavoritePanelActions } from 'store/public-favorites-panel/public-favorites-action';\nimport { DataColumns } from 'components/data-table/data-column';\nimport { serializeSimpleObjectTypeFilters } from '../resource-type-filters/resource-type-filters';\nimport { LinkClass, LinkResource } from 'models/link';\nimport { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';\nimport { updatePublicFavorites } from 'store/public-favorites/public-favorites-actions';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { ListArguments, ListResults } from 'services/common-service/common-service';\nimport { couldNotFetchItemsAvailable } from 'store/data-explorer/data-explorer-action';\n\nexport class PublicFavoritesMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    getTypeFilters(dataExplorer: DataExplorer) {\n        const columns = dataExplorer.columns as DataColumns<string, GroupContentsResource>;\n        return serializeSimpleObjectTypeFilters(getDataExplorerColumnFilters(columns, FavoritePanelColumnNames.TYPE));\n    }\n\n    getLinkFilters(dataExplorer: DataExplorer, publicProjectUuid: string): string {\n        return new FilterBuilder()\n            .addEqual('link_class', LinkClass.STAR)\n            .addEqual('owner_uuid', publicProjectUuid)\n            .addIsA(\"head_uuid\", this.getTypeFilters(dataExplorer))\n            .getFilters();\n    }\n\n    getResourceFilters(dataExplorer: DataExplorer, uuids: string[]): string {\n        return new FilterBuilder()\n            .addIn(\"uuid\", uuids)\n            .addILike(\"name\", dataExplorer.searchValue)\n            .addIsA(\"uuid\", this.getTypeFilters(dataExplorer))\n            .getFilters();\n    }\n\n    getLinkParams(dataExplorer: DataExplorer, publicProjectUuid: string): ListArguments {\n        return {\n            ...dataExplorerToListParams(dataExplorer),\n            filters: this.getLinkFilters(dataExplorer, publicProjectUuid),\n            count: \"none\",\n        };\n    }\n\n    getCountParams(dataExplorer: DataExplorer, publicProjectUuid: string): ListArguments {\n        return {\n            filters: this.getLinkFilters(dataExplorer, publicProjectUuid),\n            limit: 0,\n            count: \"exact\",\n        };\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const dataExplorer = getDataExplorer(api.getState().dataExplorer, this.getId());\n        if (!dataExplorer) {\n            api.dispatch(favoritesPanelDataExplorerIsNotSet());\n        } else {\n            try {\n                if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n\n                const uuidPrefix = api.getState().auth.config.uuidPrefix;\n                const publicProjectUuid = `${uuidPrefix}-j7d0g-publicfavorites`;\n\n                // Get items\n                const responseLinks = await this.services.linkService.list(this.getLinkParams(dataExplorer, publicProjectUuid));\n                const uuids = responseLinks.items.map(it => it.headUuid);\n\n                const orderedItems = await this.services.groupsService.contents(\"\", {\n                    filters: this.getResourceFilters(dataExplorer, uuids),\n                    include: [\"owner_uuid\", \"container_uuid\"],\n                });\n\n                api.dispatch(resourcesActions.SET_RESOURCES(orderedItems.items));\n                api.dispatch(resourcesActions.SET_RESOURCES(orderedItems.included));\n                api.dispatch(publicFavoritePanelActions.SET_ITEMS({\n                    ...listResultsToDataExplorerItemsMeta(responseLinks),\n                    items: orderedItems.items.map((resource: any) => resource.uuid),\n                }));\n                api.dispatch<any>(updatePublicFavorites(uuids));\n            } catch (e) {\n                api.dispatch(publicFavoritePanelActions.SET_ITEMS({\n                    items: [],\n                    itemsAvailable: 0,\n                    page: 0,\n                    rowsPerPage: dataExplorer.rowsPerPage\n                }));\n                api.dispatch(couldNotFetchPublicFavorites());\n            } finally {\n                api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n            }\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        const uuidPrefix = api.getState().auth.config.uuidPrefix;\n        const publicProjectUuid = `${uuidPrefix}-j7d0g-publicfavorites`;\n\n        if (criteriaChanged) {\n            // Get itemsAvailable\n            return this.services.linkService.list(this.getCountParams(dataExplorer, publicProjectUuid))\n                .then((results: ListResults<LinkResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(publicFavoritePanelActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nconst favoritesPanelDataExplorerIsNotSet = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Favorites panel is not ready.',\n        kind: SnackbarKind.ERROR\n    });\n\nconst couldNotFetchPublicFavorites = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch public favorites contents.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/recent-wf-runs/recent-wf-runs-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { bindDataExplorerActions } from \"../data-explorer/data-explorer-action\";\n\nexport const RECENT_WF_RUNS_ID = \"recentWorkflowRuns\";\nexport const recentWorkflowRunsActions = bindDataExplorerActions(RECENT_WF_RUNS_ID);\n\nexport const loadRecentWorkflows = () => (dispatch: Dispatch) => {\n    dispatch(recentWorkflowRunsActions.REQUEST_ITEMS());\n}\n"
  },
  {
    "path": "services/workbench2/src/store/recent-wf-runs/recent-wf-runs-middleware-sevice.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ServiceRepository } from 'services/services';\nimport { MiddlewareAPI, Dispatch } from 'redux';\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta } from 'store/data-explorer/data-explorer-middleware-service';\nimport { RootState } from 'store/store';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { ContentsArguments } from 'services/groups-service/groups-service';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { containerRequestFieldsNoMounts } from 'models/container-request';\nimport { progressIndicatorActions } from '../progress-indicator/progress-indicator-actions';\nimport { containerFieldsNoMounts } from 'store/processes/processes-actions';\nimport { recentWorkflowRunsActions } from './recent-wf-runs-action';\n\nexport class RecentWorkflowsMiddlewareService extends DataExplorerMiddlewareService {\n        constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    getParams(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): ContentsArguments | null {\n        return {\n            ...dataExplorerToListParams(dataExplorer),\n            filters: new FilterBuilder()\n                .addIsA('uuid', 'arvados#containerRequest')\n                .addEqual('container_requests.requesting_container_uuid', null)\n                .getFilters(),\n            select: containerRequestFieldsNoMounts,\n            count: 'none',\n            include: [\"owner_uuid\", \"container_uuid\"]\n        };\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n\n        try {\n            if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n            const params = this.getParams(api, dataExplorer);\n\n            // Get items\n            if (params !== null) {\n                const containerRequests = await this.services.groupsService.contents('',\n                    {\n                        ...this.getParams(api, dataExplorer),\n                        select: [...containerRequestFieldsNoMounts, \"can_write\", \"can_manage\"].concat(containerFieldsNoMounts)\n                });\n                api.dispatch(updateResources(containerRequests.items));\n                if (containerRequests.included) {\n                    api.dispatch(updateResources(containerRequests.included));\n                }\n\n                api.dispatch(recentWorkflowRunsActions.SET_ITEMS({\n                    ...listResultsToDataExplorerItemsMeta(containerRequests),\n                    items: containerRequests.items.map(resource => resource.uuid),\n                }));\n            } else {\n                api.dispatch(recentWorkflowRunsActions.SET_ITEMS({\n                    itemsAvailable: 0,\n                    page: 0,\n                    rowsPerPage: dataExplorer.rowsPerPage,\n                    items: [],\n                }));\n            }\n        } catch {\n            api.dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: 'Could not fetch recent workflow runs.',\n                kind: SnackbarKind.ERROR\n            }));\n        } finally {\n            if (!background) { api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId())); }\n        }\n    }\n\n    async requestCount() {}\n}\n"
  },
  {
    "path": "services/workbench2/src/store/recently-visited/recently-visited-actions.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from \"store/store\";\nimport { Dispatch } from 'redux';\nimport { ServiceRepository } from \"services/services\";\nimport { showErrorSnackbar } from \"store/snackbar/snackbar-actions\";\nimport { updateResources } from \"store/resources/resources-actions\";\nimport { authActions } from \"store/auth/auth-action\";\nimport { bindDataExplorerActions } from 'store/data-explorer/data-explorer-action';\nimport { RecentUuid } from \"models/user\";\n\nexport const RECENTLY_VISITED_PANEL_ID = \"recentlyVisited\";\nconst RECENTS_LOAD_ERROR = \"Could not load recently visited\";\nconst SAVE_RECENT_UUIDS_ERROR = \"Could not save recent uuids\";\n\nconst recentlyVisitedActions = bindDataExplorerActions(RECENTLY_VISITED_PANEL_ID);\n\nexport const loadRecentlyVisited = () => (dispatch: Dispatch) => {\n    dispatch(recentlyVisitedActions.REQUEST_ITEMS());\n};\n\nexport const saveRecentlyVisited = (uuid: string) => async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n    const user = getState().auth.user;\n    if (user) {\n        if (user.uuid !== uuid) {\n            const previousRecents = user.prefs?.wb?.recentUuids || [];\n            const updatedRecents = updateRecentUuids(previousRecents, uuid);\n            const userWithUpdatedRecents = {\n                ...user,\n                prefs: { ...user.prefs, wb: { ...(user.prefs?.wb || {}), recentUuids: updatedRecents } },\n            };\n            try {\n                const updatedUser = await services.userService.update(user.uuid, userWithUpdatedRecents);\n                dispatch(updateResources([updatedUser]));\n                // If edited user is current user, update auth store\n                const currentUserUuid = getState().auth.user?.uuid;\n                if (currentUserUuid && currentUserUuid === updatedUser.uuid) {\n                    dispatch(authActions.USER_DETAILS_SUCCESS(updatedUser));\n                }\n            } catch (e) {\n                dispatch(showErrorSnackbar(SAVE_RECENT_UUIDS_ERROR));\n            }\n        }\n    } else {\n        dispatch(showErrorSnackbar(RECENTS_LOAD_ERROR));\n    }\n};\n\nfunction updateRecentUuids(prevRecents: RecentUuid[], newUuid: string, maxLength = 12): RecentUuid[] {\n    const newRecentUuid: RecentUuid = { uuid: newUuid, lastVisited: new Date().toISOString() };\n\n    if (!prevRecents) {\n        return [newRecentUuid];\n    }\n\n    const index = prevRecents.findIndex(recent => recent.uuid === newUuid);\n    // Remove existing occurrence, if any\n    if (index !== -1) {\n        prevRecents.splice(index, 1);\n    }\n\n    // Add to front\n    prevRecents.unshift(newRecentUuid);\n\n    // Enforce max length\n    if (prevRecents.length > maxLength) {\n        prevRecents.pop();\n    }\n\n    return prevRecents;\n}\n"
  },
  {
    "path": "services/workbench2/src/store/recently-visited/recently-visited-middleware-services.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    DataExplorerMiddlewareService,\n    dataExplorerToListParams,\n} from \"../data-explorer/data-explorer-middleware-service\";\nimport { ServiceRepository } from \"services/services\";\nimport { MiddlewareAPI, Dispatch } from \"redux\";\nimport { RootState } from 'store/store';\nimport { getDataExplorer, DataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { ContentsArguments } from 'services/groups-service/groups-service';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';\nimport { RecentUuid } from 'models/user';\n\nexport class RecentlyVisitedMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        try {\n            if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n            const response = await this.services.groupsService\n                .contents('', getParams(dataExplorer, state.auth.user?.prefs?.wb?.recentUuids || []));\n            api.dispatch(updateResources(response.items));\n            if (response.included) { api.dispatch(updateResources(response.included)); }\n        } catch (e) {\n            api.dispatch(couldNotFetchRecentlyVisited());\n        } finally {\n            api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n        }\n    }\n\n    // required by DataExplorerMiddlewareService, but not used\n    async requestCount() {}\n}\n\nconst getParams = (dataExplorer: DataExplorer, recents: RecentUuid[]): ContentsArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    filters: new FilterBuilder().addIn('uuid', recents.map(recent => recent.uuid)).getFilters(),\n    include: [\"owner_uuid\", \"container_uuid\"]\n});\n\nconst couldNotFetchRecentlyVisited = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch recently visited items.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/redux-saga.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { call, all, spawn } from \"redux-saga/effects\";\nimport {\n    setTreePickerProjectSearchWatcher,\n    loadProjectWatcher,\n    loadSearchWatcher,\n    refreshTreePickerWatcher,\n    setTreePickerCollectionFilterWatcher,\n    loadFavoritesProjectWatcher,\n    loadPublicFavoritesProjectWatcher,\n} from \"./tree-picker/tree-picker-actions\";\n\n/**\n* Auto restart sagas with error logging\n*/\nexport const rootSaga = function* () {\n   const sagas = [\n       setTreePickerProjectSearchWatcher,\n       setTreePickerCollectionFilterWatcher,\n       refreshTreePickerWatcher,\n       loadProjectWatcher,\n       loadSearchWatcher,\n       loadFavoritesProjectWatcher,\n       loadPublicFavoritesProjectWatcher,\n   ];\n\n   yield all(sagas.map(saga =>\n       spawn(function* () {\n           while (true) {\n               try {\n                   yield call(saga);\n                   break;\n               } catch (e) {\n                   console.error(e);\n               }\n           }\n       }))\n   );\n}\n"
  },
  {
    "path": "services/workbench2/src/store/repositories/repositories-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { bindDataExplorerActions } from 'store/data-explorer/data-explorer-action';\nimport { RootState } from 'store/store';\nimport { getUserUuid } from \"common/getuser\";\nimport { ServiceRepository } from \"services/services\";\nimport { navigateToRepositories } from \"store/navigation/navigation-action\";\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { RepositoryResource } from \"models/repositories\";\nimport { startSubmit, reset, stopSubmit, FormErrors } from \"redux-form\";\nimport { getCommonResourceServiceError, CommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\n\nexport const repositoriesActions = unionize({\n    SET_REPOSITORIES: ofType<any>(),\n});\n\nexport type RepositoriesActions = UnionOf<typeof repositoriesActions>;\n\nexport const REPOSITORIES_PANEL = 'repositoriesPanel';\nexport const REPOSITORIES_SAMPLE_GIT_DIALOG = 'repositoriesSampleGitDialog';\nexport const REPOSITORY_ATTRIBUTES_DIALOG = 'repositoryAttributesDialog';\nexport const REPOSITORY_CREATE_FORM_NAME = 'repositoryCreateFormName';\nexport const REPOSITORY_REMOVE_DIALOG = 'repositoryRemoveDialog';\n\nexport const openRepositoriesSampleGitDialog = () =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const uuidPrefix = getState().properties.uuidPrefix;\n        dispatch(dialogActions.OPEN_DIALOG({ id: REPOSITORIES_SAMPLE_GIT_DIALOG, data: { uuidPrefix } }));\n    };\n\nexport const openRepositoryAttributes = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const repositoryData = getState().repositories.items.find(it => it.uuid === uuid);\n        dispatch(dialogActions.OPEN_DIALOG({ id: REPOSITORY_ATTRIBUTES_DIALOG, data: { repositoryData } }));\n    };\n\nexport const openRepositoryCreateDialog = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        if (!userUuid) { return; }\n        const user = await services.userService.get(userUuid!);\n        dispatch(reset(REPOSITORY_CREATE_FORM_NAME));\n        dispatch(dialogActions.OPEN_DIALOG({ id: REPOSITORY_CREATE_FORM_NAME, data: { user } }));\n    };\n\nexport const createRepository = (repository: RepositoryResource) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        if (!userUuid) { return; }\n        const user = await services.userService.get(userUuid!);\n        dispatch(startSubmit(REPOSITORY_CREATE_FORM_NAME));\n        try {\n            const newRepository = await services.repositoriesService.create({ name: `${user.username}/${repository.name}` });\n            dispatch(dialogActions.CLOSE_DIALOG({ id: REPOSITORY_CREATE_FORM_NAME }));\n            dispatch(reset(REPOSITORY_CREATE_FORM_NAME));\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Repository has been successfully created.\", hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n            dispatch<any>(loadRepositoriesData());\n            return newRepository;\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.NAME_HAS_ALREADY_BEEN_TAKEN) {\n                dispatch(stopSubmit(REPOSITORY_CREATE_FORM_NAME, { name: 'Repository with the same name already exists.' } as FormErrors));\n            }\n            return undefined;\n        }\n    };\n\nexport const openRemoveRepositoryDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: REPOSITORY_REMOVE_DIALOG,\n            data: {\n                title: 'Remove repository',\n                text: 'Are you sure you want to remove this repository?',\n                confirmButtonLabel: 'Remove',\n                uuid\n            }\n        }));\n    };\n\nexport const removeRepository = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removing ...', kind: SnackbarKind.INFO }));\n        await services.repositoriesService.delete(uuid);\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removed.', hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        dispatch<any>(loadRepositoriesData());\n    };\n\nconst repositoriesBindedActions = bindDataExplorerActions(REPOSITORIES_PANEL);\n\nexport const openRepositoriesPanel = () =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch<any>(navigateToRepositories);\n    };\n\nexport const loadRepositoriesData = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const repositories = await services.repositoriesService.list();\n        dispatch(repositoriesActions.SET_REPOSITORIES(repositories.items));\n    };\n\nexport const loadRepositoriesPanel = () =>\n    (dispatch: Dispatch) => {\n        dispatch(repositoriesBindedActions.REQUEST_ITEMS());\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/repositories/repositories-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { repositoriesActions, RepositoriesActions } from 'store/repositories/repositories-actions';\nimport { RepositoryResource } from 'models/repositories';\n\ninterface Repositories {\n    items: RepositoryResource[];\n}\n\nconst initialState: Repositories = {\n    items: []\n};\n\nexport const repositoriesReducer = (state = initialState, action: RepositoriesActions): Repositories =>\n    repositoriesActions.match(action, {\n        SET_REPOSITORIES: items => ({ ...state, items }),\n        default: () => state\n    });"
  },
  {
    "path": "services/workbench2/src/store/resource-type-filters/resource-type-filters.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { getInitialResourceTypeFilters, serializeResourceTypeFilters, ObjectTypeFilter, CollectionTypeFilter, ProcessTypeFilter, GroupTypeFilter, buildProcessStatusFilters, ProcessStatusFilter } from './resource-type-filters';\nimport { ResourceKind } from 'models/resource';\nimport { selectNode, deselectNode } from 'models/tree';\nimport { pipe } from 'lodash/fp';\nimport { FilterBuilder } from 'services/api/filter-builder';\n\ndescribe(\"buildProcessStatusFilters\", () => {\n    [\n        [ProcessStatusFilter.ALL, \"\"],\n        [ProcessStatusFilter.ONHOLD, `[\"state\",\"!=\",\"Final\"],[\"priority\",\"=\",\"0\"],[\"container.state\",\"in\",[\"Queued\",\"Locked\"]]`],\n        [ProcessStatusFilter.COMPLETED, `[\"container.state\",\"=\",\"Complete\"],[\"container.exit_code\",\"=\",\"0\"]`],\n        [ProcessStatusFilter.FAILED, `[\"container.state\",\"=\",\"Complete\"],[\"container.exit_code\",\"!=\",\"0\"]`],\n        [ProcessStatusFilter.QUEUED, `[\"container.state\",\"in\",[\"Queued\",\"Locked\"]],[\"priority\",\"!=\",\"0\"]`],\n        [ProcessStatusFilter.CANCELLED, `[\"container.state\",\"=\",\"Cancelled\"]`],\n        [ProcessStatusFilter.RUNNING, `[\"container.state\",\"=\",\"Running\"]`],\n    ].forEach(([status, expected]) => {\n        it(`can filter \"${status}\" processes`, () => {\n            const filters = buildProcessStatusFilters(new FilterBuilder(), status);\n            expect(filters.getFilters())\n                .to.equal(expected);\n        })\n    });\n});\n\ndescribe(\"serializeResourceTypeFilters\", () => {\n    it(\"should serialize all filters\", () => {\n        const filters = getInitialResourceTypeFilters();\n        const serializedFilters = serializeResourceTypeFilters(filters);\n        expect(serializedFilters)\n            .to.equal(`[\"uuid\",\"is_a\",[\"${ResourceKind.PROJECT}\",\"${ResourceKind.COLLECTION}\",\"${ResourceKind.WORKFLOW}\",\"${ResourceKind.PROCESS}\"]],[\"collections.properties.type\",\"not in\",[\"log\",\"intermediate\"]],[\"container_requests.requesting_container_uuid\",\"=\",null]`);\n    });\n\n    it(\"should serialize all but collection filters\", () => {\n        const filters = deselectNode(ObjectTypeFilter.COLLECTION, true)(getInitialResourceTypeFilters());\n        const serializedFilters = serializeResourceTypeFilters(filters);\n        expect(serializedFilters)\n            .to.equal(`[\"uuid\",\"is_a\",[\"${ResourceKind.PROJECT}\",\"${ResourceKind.WORKFLOW}\",\"${ResourceKind.PROCESS}\"]],[\"container_requests.requesting_container_uuid\",\"=\",null]`);\n    });\n\n    it(\"should serialize output collections and projects\", () => {\n        const filters = pipe(\n            () => getInitialResourceTypeFilters(),\n            deselectNode(ObjectTypeFilter.DEFINITION, true),\n            deselectNode(ProcessTypeFilter.MAIN_PROCESS, true),\n            deselectNode(CollectionTypeFilter.GENERAL_COLLECTION, true),\n            deselectNode(CollectionTypeFilter.LOG_COLLECTION, true),\n            deselectNode(CollectionTypeFilter.INTERMEDIATE_COLLECTION, true),\n        )();\n\n        const serializedFilters = serializeResourceTypeFilters(filters);\n        expect(serializedFilters)\n            .to.equal(`[\"uuid\",\"is_a\",[\"${ResourceKind.PROJECT}\",\"${ResourceKind.COLLECTION}\"]],[\"collections.properties.type\",\"in\",[\"output\"]]`);\n    });\n\n    it(\"should serialize output collections and projects\", () => {\n        const filters = pipe(\n            () => getInitialResourceTypeFilters(),\n            deselectNode(ObjectTypeFilter.DEFINITION, true),\n            deselectNode(ProcessTypeFilter.MAIN_PROCESS, true),\n            deselectNode(CollectionTypeFilter.GENERAL_COLLECTION, true),\n            deselectNode(CollectionTypeFilter.LOG_COLLECTION, true),\n            deselectNode(CollectionTypeFilter.INTERMEDIATE_COLLECTION, true),\n        )();\n\n        const serializedFilters = serializeResourceTypeFilters(filters);\n        expect(serializedFilters)\n            .to.equal(`[\"uuid\",\"is_a\",[\"${ResourceKind.PROJECT}\",\"${ResourceKind.COLLECTION}\"]],[\"collections.properties.type\",\"in\",[\"output\"]]`);\n    });\n\n    it(\"should serialize general collections\", () => {\n        const filters = pipe(\n            () => getInitialResourceTypeFilters(),\n            deselectNode(ObjectTypeFilter.PROJECT, true),\n            deselectNode(ObjectTypeFilter.DEFINITION, true),\n            deselectNode(ProcessTypeFilter.MAIN_PROCESS, true),\n            deselectNode(CollectionTypeFilter.OUTPUT_COLLECTION, true)\n        )();\n\n        const serializedFilters = serializeResourceTypeFilters(filters);\n        expect(serializedFilters)\n            .to.equal(`[\"uuid\",\"is_a\",[\"${ResourceKind.COLLECTION}\"]],[\"collections.properties.type\",\"not in\",[\"output\",\"log\",\"intermediate\"]]`);\n    });\n\n    it(\"should serialize only main processes\", () => {\n        const filters = pipe(\n            () => getInitialResourceTypeFilters(),\n            deselectNode(ObjectTypeFilter.PROJECT, true),\n            deselectNode(ProcessTypeFilter.CHILD_PROCESS, true),\n            deselectNode(ObjectTypeFilter.COLLECTION, true),\n            deselectNode(ObjectTypeFilter.DEFINITION, true),\n        )();\n\n        const serializedFilters = serializeResourceTypeFilters(filters);\n        expect(serializedFilters)\n            .to.equal(`[\"uuid\",\"is_a\",[\"${ResourceKind.PROCESS}\"]],[\"container_requests.requesting_container_uuid\",\"=\",null]`);\n    });\n\n    it(\"should serialize only child processes\", () => {\n        const filters = pipe(\n            () => getInitialResourceTypeFilters(),\n            deselectNode(ObjectTypeFilter.PROJECT, true),\n            deselectNode(ProcessTypeFilter.MAIN_PROCESS, true),\n            deselectNode(ObjectTypeFilter.DEFINITION, true),\n            deselectNode(ObjectTypeFilter.COLLECTION, true),\n\n            selectNode(ProcessTypeFilter.CHILD_PROCESS, true),\n        )();\n\n        const serializedFilters = serializeResourceTypeFilters(filters);\n        expect(serializedFilters)\n            .to.equal(`[\"uuid\",\"is_a\",[\"${ResourceKind.PROCESS}\"]],[\"container_requests.requesting_container_uuid\",\"!=\",null]`);\n    });\n\n    it(\"should serialize all project types\", () => {\n        const filters = pipe(\n            () => getInitialResourceTypeFilters(),\n            deselectNode(ObjectTypeFilter.COLLECTION, true),\n            deselectNode(ObjectTypeFilter.DEFINITION, true),\n            deselectNode(ProcessTypeFilter.MAIN_PROCESS, true),\n        )();\n\n        const serializedFilters = serializeResourceTypeFilters(filters);\n        expect(serializedFilters)\n            .to.equal(`[\"uuid\",\"is_a\",[\"${ResourceKind.GROUP}\"]]`);\n    });\n\n    it(\"should serialize filter groups\", () => {\n        const filters = pipe(\n            () => getInitialResourceTypeFilters(),\n            deselectNode(GroupTypeFilter.PROJECT, true),\n            deselectNode(ObjectTypeFilter.DEFINITION, true),\n            deselectNode(ProcessTypeFilter.MAIN_PROCESS, true),\n            deselectNode(ObjectTypeFilter.COLLECTION, true),\n        )();\n\n        const serializedFilters = serializeResourceTypeFilters(filters);\n        expect(serializedFilters)\n            .to.equal(`[\"uuid\",\"is_a\",[\"${ResourceKind.GROUP}\"]],[\"groups.group_class\",\"=\",\"filter\"]`);\n    });\n\n    it(\"should serialize projects (normal)\", () => {\n        const filters = pipe(\n            () => getInitialResourceTypeFilters(),\n            deselectNode(GroupTypeFilter.FILTER_GROUP, true),\n            deselectNode(ObjectTypeFilter.DEFINITION, true),\n            deselectNode(ProcessTypeFilter.MAIN_PROCESS, true),\n            deselectNode(ObjectTypeFilter.COLLECTION, true),\n        )();\n\n        const serializedFilters = serializeResourceTypeFilters(filters);\n        expect(serializedFilters)\n            .to.equal(`[\"uuid\",\"is_a\",[\"${ResourceKind.GROUP}\"]],[\"groups.group_class\",\"=\",\"project\"]`);\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/src/store/resource-type-filters/resource-type-filters.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { difference, pipe, values, includes, __ } from 'lodash/fp';\nimport { createTree, setNode, TreeNodeStatus, TreeNode, Tree } from 'models/tree';\nimport { DataTableFilterItem, DataTableFilters } from 'components/data-table-filters/data-table-filters';\nimport { ResourceKind } from 'models/resource';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { getSelectedNodes } from 'models/tree';\nimport { CollectionType } from 'models/collection';\nimport { GroupContentsResourcePrefix } from 'services/groups-service/groups-service';\nimport { ContainerState } from 'models/container';\nimport { ContainerRequestState } from 'models/container-request';\n\nexport enum ProcessStatusFilter {\n    ALL = 'All',\n    RUNNING = 'Running',\n    FAILED = 'Failed',\n    COMPLETED = 'Completed',\n    CANCELLED = 'Cancelled',\n    ONHOLD = 'On hold',\n    QUEUED = 'Queued',\n    DRAFT = 'Draft',\n}\n\nexport enum ObjectTypeFilter {\n    PROJECT = 'Project',\n    WORKFLOW = 'Workflow',\n    COLLECTION = 'Data collection',\n    DEFINITION = 'Definition',\n}\n\nexport enum GroupTypeFilter {\n    PROJECT = 'Project (normal)',\n    FILTER_GROUP = 'Filter group',\n}\n\nexport enum CollectionTypeFilter {\n    GENERAL_COLLECTION = 'General',\n    OUTPUT_COLLECTION = 'Output',\n    LOG_COLLECTION = 'Log',\n    INTERMEDIATE_COLLECTION = 'Intermediate',\n}\n\nexport enum ProcessTypeFilter {\n    MAIN_PROCESS = 'Workflow Runs',\n    CHILD_PROCESS = 'Workflow Steps',\n}\n\nconst initFilter = (name: string, parent = '', isSelected?: boolean, isExpanded?: boolean) =>\n    setNode<DataTableFilterItem>({\n        id: name,\n        value: { name },\n        parent,\n        children: [],\n        active: false,\n        selected: isSelected !== undefined ? isSelected : true,\n        initialState: isSelected !== undefined ? isSelected : true,\n        expanded: isExpanded !== undefined ? isExpanded : false,\n        status: TreeNodeStatus.LOADED,\n    });\n\nexport const getSimpleObjectTypeFilters = pipe(\n    (): DataTableFilters => createTree<DataTableFilterItem>(),\n    initFilter(ObjectTypeFilter.PROJECT),\n    initFilter(ObjectTypeFilter.WORKFLOW),\n    initFilter(ObjectTypeFilter.COLLECTION),\n    initFilter(ObjectTypeFilter.DEFINITION),\n);\n\n// Using pipe() with more than 7 arguments makes the return type be 'any',\n// causing compile issues.\nexport const getInitialResourceTypeFilters = pipe(\n    (): DataTableFilters => createTree<DataTableFilterItem>(),\n    pipe(\n        initFilter(ObjectTypeFilter.PROJECT, '', true, true),\n        initFilter(GroupTypeFilter.PROJECT, ObjectTypeFilter.PROJECT),\n        initFilter(GroupTypeFilter.FILTER_GROUP, ObjectTypeFilter.PROJECT),\n    ),\n    pipe(\n        initFilter(ObjectTypeFilter.WORKFLOW, '', false, true),\n        initFilter(ProcessTypeFilter.MAIN_PROCESS, ObjectTypeFilter.WORKFLOW),\n        initFilter(ProcessTypeFilter.CHILD_PROCESS, ObjectTypeFilter.WORKFLOW, false),\n        initFilter(ObjectTypeFilter.DEFINITION, ObjectTypeFilter.WORKFLOW),\n    ),\n    pipe(\n        initFilter(ObjectTypeFilter.COLLECTION, '', true, true),\n        initFilter(CollectionTypeFilter.GENERAL_COLLECTION, ObjectTypeFilter.COLLECTION),\n        initFilter(CollectionTypeFilter.OUTPUT_COLLECTION, ObjectTypeFilter.COLLECTION),\n        initFilter(CollectionTypeFilter.INTERMEDIATE_COLLECTION, ObjectTypeFilter.COLLECTION, false),\n        initFilter(CollectionTypeFilter.LOG_COLLECTION, ObjectTypeFilter.COLLECTION, false),\n    ),\n\n);\n\n/**\n * Resource type filters for Data tab (excludes main/sub process runs)\n */\nexport const getInitialDataResourceTypeFilters = pipe(\n    (): DataTableFilters => createTree<DataTableFilterItem>(),\n    pipe(\n        initFilter(ObjectTypeFilter.PROJECT, '', true, true),\n        initFilter(GroupTypeFilter.PROJECT, ObjectTypeFilter.PROJECT),\n        initFilter(GroupTypeFilter.FILTER_GROUP, ObjectTypeFilter.PROJECT),\n    ),\n    pipe(\n        initFilter(ObjectTypeFilter.WORKFLOW, '', true, true),\n        initFilter(ObjectTypeFilter.DEFINITION, ObjectTypeFilter.WORKFLOW),\n    ),\n    pipe(\n        initFilter(ObjectTypeFilter.COLLECTION, '', true, true),\n        initFilter(CollectionTypeFilter.GENERAL_COLLECTION, ObjectTypeFilter.COLLECTION),\n        initFilter(CollectionTypeFilter.OUTPUT_COLLECTION, ObjectTypeFilter.COLLECTION),\n        initFilter(CollectionTypeFilter.INTERMEDIATE_COLLECTION, ObjectTypeFilter.COLLECTION, false),\n        initFilter(CollectionTypeFilter.LOG_COLLECTION, ObjectTypeFilter.COLLECTION, false),\n    ),\n\n);\n\n// Using pipe() with more than 7 arguments makes the return type be 'any',\n// causing compile issues.\nexport const getInitialSearchTypeFilters = pipe(\n    (): DataTableFilters => createTree<DataTableFilterItem>(),\n    pipe(\n        initFilter(ObjectTypeFilter.PROJECT, '', true, true),\n        initFilter(GroupTypeFilter.PROJECT, ObjectTypeFilter.PROJECT),\n        initFilter(GroupTypeFilter.FILTER_GROUP, ObjectTypeFilter.PROJECT),\n    ),\n    pipe(\n        initFilter(ObjectTypeFilter.WORKFLOW, '', false, true),\n        initFilter(ProcessTypeFilter.MAIN_PROCESS, ObjectTypeFilter.WORKFLOW, false),\n        initFilter(ProcessTypeFilter.CHILD_PROCESS, ObjectTypeFilter.WORKFLOW, false),\n        initFilter(ObjectTypeFilter.DEFINITION, ObjectTypeFilter.WORKFLOW, false),\n    ),\n    pipe(\n        initFilter(ObjectTypeFilter.COLLECTION, '', true, true),\n        initFilter(CollectionTypeFilter.GENERAL_COLLECTION, ObjectTypeFilter.COLLECTION),\n        initFilter(CollectionTypeFilter.OUTPUT_COLLECTION, ObjectTypeFilter.COLLECTION),\n        initFilter(CollectionTypeFilter.INTERMEDIATE_COLLECTION, ObjectTypeFilter.COLLECTION, false),\n        initFilter(CollectionTypeFilter.LOG_COLLECTION, ObjectTypeFilter.COLLECTION, false),\n    ),\n);\n\nexport const getInitialProcessTypeFilters = pipe(\n    (): DataTableFilters => createTree<DataTableFilterItem>(),\n    initFilter(ProcessTypeFilter.MAIN_PROCESS),\n    initFilter(ProcessTypeFilter.CHILD_PROCESS, '', false)\n);\n\nexport const getInitialProcessStatusFilters = pipe(\n    (): DataTableFilters => createTree<DataTableFilterItem>(),\n    pipe(\n        initFilter(ProcessStatusFilter.ALL, '', true),\n        initFilter(ProcessStatusFilter.DRAFT, '', false),\n        initFilter(ProcessStatusFilter.ONHOLD, '', false),\n        initFilter(ProcessStatusFilter.QUEUED, '', false),\n        initFilter(ProcessStatusFilter.RUNNING, '', false),\n        initFilter(ProcessStatusFilter.COMPLETED, '', false),\n        initFilter(ProcessStatusFilter.CANCELLED, '', false),\n        initFilter(ProcessStatusFilter.FAILED, '', false),\n    ),\n);\n\nexport const getTrashPanelTypeFilters = pipe(\n    (): DataTableFilters => createTree<DataTableFilterItem>(),\n    initFilter(ObjectTypeFilter.PROJECT),\n    initFilter(ObjectTypeFilter.COLLECTION),\n    initFilter(CollectionTypeFilter.GENERAL_COLLECTION, ObjectTypeFilter.COLLECTION),\n    initFilter(CollectionTypeFilter.OUTPUT_COLLECTION, ObjectTypeFilter.COLLECTION),\n    initFilter(CollectionTypeFilter.INTERMEDIATE_COLLECTION, ObjectTypeFilter.COLLECTION),\n    initFilter(CollectionTypeFilter.LOG_COLLECTION, ObjectTypeFilter.COLLECTION),\n);\n\nconst createFiltersBuilder = (filters: DataTableFilters) =>\n    ({ fb: new FilterBuilder(), selectedFilters: getSelectedNodes(filters) });\n\nconst getMatchingFilters = (values: string[], filters: TreeNode<DataTableFilterItem>[]) =>\n    filters\n        .map(f => f.id)\n        .filter(includes(__, values));\n\nconst objectTypeToResourceKind = (type: ObjectTypeFilter) => {\n    switch (type) {\n        case ObjectTypeFilter.PROJECT:\n            return ResourceKind.PROJECT;\n        case ObjectTypeFilter.WORKFLOW:\n            return ResourceKind.PROCESS;\n        case ObjectTypeFilter.COLLECTION:\n            return ResourceKind.COLLECTION;\n        case ObjectTypeFilter.DEFINITION:\n            return ResourceKind.WORKFLOW;\n    }\n};\n\n/**\n * object to resource which clasifies workflow category as only registered workflows, not processes\n * Used for data tab that excludes process runs\n */\nconst dataObjectTypeToResourceKind = (type: ObjectTypeFilter) => {\n    switch (type) {\n        case ObjectTypeFilter.PROJECT:\n            return ResourceKind.PROJECT;\n        case ObjectTypeFilter.COLLECTION:\n            return ResourceKind.COLLECTION;\n        case ObjectTypeFilter.WORKFLOW:\n        case ObjectTypeFilter.DEFINITION:\n            return ResourceKind.WORKFLOW;\n    }\n};\n\nconst serializeObjectTypeFilters = ({ fb, selectedFilters }: ReturnType<typeof createFiltersBuilder>) => {\n    const groupFilters = getMatchingFilters(values(GroupTypeFilter), selectedFilters);\n    const collectionFilters = getMatchingFilters(values(CollectionTypeFilter), selectedFilters);\n    const processFilters = getMatchingFilters(values(ProcessTypeFilter), selectedFilters);\n    const typeFilters = pipe(\n        () => new Set(getMatchingFilters(values(ObjectTypeFilter), selectedFilters)),\n        set => groupFilters.length > 0\n            ? set.add(ObjectTypeFilter.PROJECT)\n            : set,\n        set => collectionFilters.length > 0\n            ? set.add(ObjectTypeFilter.COLLECTION)\n            : set,\n        set => processFilters.length > 0\n            ? set.add(ObjectTypeFilter.WORKFLOW)\n            : set,\n        set => Array.from(set)\n    )();\n\n    return {\n        fb: typeFilters.length > 0\n            ? fb.addIsA('uuid', typeFilters.map(objectTypeToResourceKind))\n            : fb.addIsA('uuid', ResourceKind.NONE),\n        selectedFilters,\n    };\n};\n\n/**\n * Serialize only data object types, excludes processes\n */\nconst serializeDataObjectTypeFilters = ({ fb, selectedFilters }: ReturnType<typeof createFiltersBuilder>) => {\n    const groupFilters = getMatchingFilters(values(GroupTypeFilter), selectedFilters);\n    const collectionFilters = getMatchingFilters(values(CollectionTypeFilter), selectedFilters);\n    const typeFilters = pipe(\n        () => new Set(getMatchingFilters(values(ObjectTypeFilter), selectedFilters)),\n        set => groupFilters.length > 0\n            ? set.add(ObjectTypeFilter.PROJECT)\n            : set,\n        set => collectionFilters.length > 0\n            ? set.add(ObjectTypeFilter.COLLECTION)\n            : set,\n        set => Array.from(set)\n    )();\n\n    return {\n        fb: typeFilters.length > 0\n            ? fb.addIsA('uuid', typeFilters.map(dataObjectTypeToResourceKind))\n            : fb.addIsA('uuid', ResourceKind.NONE),\n        selectedFilters,\n    };\n};\n\nconst collectionTypeToPropertyValue = (type: CollectionTypeFilter) => {\n    switch (type) {\n        case CollectionTypeFilter.GENERAL_COLLECTION:\n            return CollectionType.GENERAL;\n        case CollectionTypeFilter.OUTPUT_COLLECTION:\n            return CollectionType.OUTPUT;\n        case CollectionTypeFilter.LOG_COLLECTION:\n            return CollectionType.LOG;\n        case CollectionTypeFilter.INTERMEDIATE_COLLECTION:\n            return CollectionType.INTERMEDIATE;\n        default:\n            return CollectionType.GENERAL;\n    }\n};\n\nconst serializeCollectionTypeFilters = ({ fb, selectedFilters }: ReturnType<typeof createFiltersBuilder>) => pipe(\n    () => getMatchingFilters(values(CollectionTypeFilter), selectedFilters),\n    filters => filters.map(collectionTypeToPropertyValue),\n    mappedFilters => ({\n        fb: buildCollectionTypeFilters({ fb, filters: mappedFilters }),\n        selectedFilters\n    })\n)();\n\nconst COLLECTION_TYPES = values(CollectionType);\n\nconst NON_GENERAL_COLLECTION_TYPES = difference(COLLECTION_TYPES, [CollectionType.GENERAL]);\n\nconst COLLECTION_PROPERTIES_PREFIX = `${GroupContentsResourcePrefix.COLLECTION}.properties`;\n\nconst buildCollectionTypeFilters = ({ fb, filters }: { fb: FilterBuilder, filters: CollectionType[] }) => {\n    switch (true) {\n        case filters.length === 0 || filters.length === COLLECTION_TYPES.length:\n            return fb;\n        case includes(CollectionType.GENERAL, filters):\n            return fb.addNotIn('type', difference(NON_GENERAL_COLLECTION_TYPES, filters), COLLECTION_PROPERTIES_PREFIX);\n        default:\n            return fb.addIn('type', filters, COLLECTION_PROPERTIES_PREFIX);\n    }\n};\n\nconst serializeGroupTypeFilters = ({ fb, selectedFilters }: ReturnType<typeof createFiltersBuilder>) => pipe(\n    () => getMatchingFilters(values(GroupTypeFilter), selectedFilters),\n    filters => filters,\n    mappedFilters => ({\n        fb: buildGroupTypeFilters({ fb, filters: mappedFilters, use_prefix: true }),\n        selectedFilters\n    })\n)();\n\nconst GROUP_TYPES = values(GroupTypeFilter);\n\nconst buildGroupTypeFilters = ({ fb, filters, use_prefix }: { fb: FilterBuilder, filters: string[], use_prefix: boolean }) => {\n    switch (true) {\n        case filters.length === 0 || filters.length === GROUP_TYPES.length:\n            return fb;\n        case includes(GroupTypeFilter.PROJECT, filters):\n            return fb.addEqual('groups.group_class', 'project');\n        case includes(GroupTypeFilter.FILTER_GROUP, filters):\n            return fb.addEqual('groups.group_class', 'filter');\n        default:\n            return fb;\n    }\n};\n\nconst serializeProcessTypeFilters = ({ fb, selectedFilters }: ReturnType<typeof createFiltersBuilder>) => pipe(\n    () => getMatchingFilters(values(ProcessTypeFilter), selectedFilters),\n    filters => filters,\n    mappedFilters => ({\n        fb: buildProcessTypeFilters({ fb, filters: mappedFilters, use_prefix: true }),\n        selectedFilters\n    })\n)();\n\nconst PROCESS_TYPES = values(ProcessTypeFilter);\nconst PROCESS_PREFIX = GroupContentsResourcePrefix.PROCESS;\n\nconst buildProcessTypeFilters = ({ fb, filters, use_prefix }: { fb: FilterBuilder, filters: string[], use_prefix: boolean }) => {\n    switch (true) {\n        case filters.length === 0 || filters.length === PROCESS_TYPES.length:\n            return fb;\n        case includes(ProcessTypeFilter.MAIN_PROCESS, filters):\n            return fb.addEqual('requesting_container_uuid', null, use_prefix ? PROCESS_PREFIX : '');\n        case includes(ProcessTypeFilter.CHILD_PROCESS, filters):\n            return fb.addDistinct('requesting_container_uuid', null, use_prefix ? PROCESS_PREFIX : '');\n        default:\n            return fb;\n    }\n};\n\n/**\n * Serializes general resource type filters with prefix for group contents API\n */\nexport const serializeResourceTypeFilters = pipe(\n    createFiltersBuilder,\n    serializeObjectTypeFilters,\n    serializeGroupTypeFilters,\n    serializeCollectionTypeFilters,\n    serializeProcessTypeFilters,\n    ({ fb }) => fb.getFilters(),\n);\n\n/**\n * Serializes data tab resource type filters with prefix for group contents API\n */\nexport const serializeDataResourceTypeFilters = pipe(\n    createFiltersBuilder,\n    serializeDataObjectTypeFilters,\n    serializeGroupTypeFilters,\n    serializeCollectionTypeFilters,\n    ({ fb }) => fb.getFilters(),\n);\n\nexport const serializeOnlyProcessTypeFilters = (use_prefix: boolean) => pipe(\n    createFiltersBuilder,\n    ({ fb, selectedFilters }: ReturnType<typeof createFiltersBuilder>) => pipe(\n        () => getMatchingFilters(values(ProcessTypeFilter), selectedFilters),\n        filters => filters,\n        mappedFilters => ({\n            fb: buildProcessTypeFilters({ fb, filters: mappedFilters, use_prefix }),\n            selectedFilters\n        })\n    )(),\n    ({ fb }) => fb.getFilters(),\n);\n\n/**\n * Serializes process type filters with prefix for group contents request\n * Uses buildProcessTypeFilters to disable filters when no process type is selected\n */\nexport const serializeProcessTypeGroupContentsFilters = pipe(\n    createFiltersBuilder,\n    ({fb, selectedFilters }): ReturnType<typeof createFiltersBuilder> => ({\n            fb: fb.addIsA('uuid', [ResourceKind.PROCESS]),\n            selectedFilters,\n    }),\n    ({ fb, selectedFilters }: ReturnType<typeof createFiltersBuilder>) => pipe(\n        () => getMatchingFilters(values(ProcessTypeFilter), selectedFilters),\n        filters => filters,\n        mappedFilters => ({\n            fb: buildProcessTypeFilters({ fb, filters: mappedFilters, use_prefix: true }),\n            selectedFilters\n        })\n    )(),\n    ({ fb }) => fb.getFilters(),\n);\n\nexport const serializeSimpleObjectTypeFilters = (filters: Tree<DataTableFilterItem>) => {\n    return getSelectedNodes(filters)\n        .map(f => f.id)\n        .map(objectTypeToResourceKind);\n};\n\nexport const buildProcessStatusFilters = (fb: FilterBuilder, activeStatusFilter: string, resourcePrefix?: string): FilterBuilder => {\n    switch (activeStatusFilter) {\n        case ProcessStatusFilter.ONHOLD: {\n            fb.addDistinct('state', ContainerRequestState.FINAL, resourcePrefix);\n            fb.addEqual('priority', '0', resourcePrefix);\n            fb.addIn('container.state', [ContainerState.QUEUED, ContainerState.LOCKED], resourcePrefix);\n            break;\n        }\n        case ProcessStatusFilter.COMPLETED: {\n            fb.addEqual('container.state', ContainerState.COMPLETE, resourcePrefix);\n            fb.addEqual('container.exit_code', '0', resourcePrefix);\n            break;\n        }\n        case ProcessStatusFilter.FAILED: {\n            fb.addEqual('container.state', ContainerState.COMPLETE, resourcePrefix);\n            fb.addDistinct('container.exit_code', '0', resourcePrefix);\n            break;\n        }\n        case ProcessStatusFilter.QUEUED: {\n            fb.addIn('container.state', [ContainerState.QUEUED, ContainerState.LOCKED], resourcePrefix);\n            fb.addDistinct('priority', '0', resourcePrefix);\n            break;\n        }\n        case ProcessStatusFilter.CANCELLED:\n        case ProcessStatusFilter.RUNNING: {\n            fb.addEqual('container.state', activeStatusFilter, resourcePrefix);\n            break;\n        }\n        case ProcessStatusFilter.DRAFT: {\n            fb.addEqual('state', ContainerRequestState.UNCOMMITTED, resourcePrefix);\n            break;\n        }\n    }\n    return fb;\n};\n"
  },
  {
    "path": "services/workbench2/src/store/resources/resources-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from 'common/unionize';\nimport { extractUuidKind, Resource, ResourceWithProperties } from 'models/resource';\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { getResourceService } from 'services/services';\nimport { addProperty, deleteProperty } from 'lib/resource-properties';\nimport { showErrorSnackbar, showSuccessSnackbar, snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { getResource } from './resources';\nimport { TagProperty } from 'models/tag';\nimport { change, formValueSelector } from 'redux-form';\nimport { ResourcePropertiesFormData } from 'views-components/resource-properties-form/resource-properties-form';\nimport { CommonResourceServiceError, getCommonResourceServiceError } from 'services/common-service/common-resource-service';\n\nexport type ResourceWithDescription = Resource & { description?: string }\n\nexport const resourcesActions = unionize({\n    SET_RESOURCES: ofType<ResourceWithDescription[] >(),\n    DELETE_RESOURCES: ofType<string[]>()\n});\n\nexport type ResourcesAction = UnionOf<typeof resourcesActions>;\n\nexport const updateResources = (resources: Resource[]) => resourcesActions.SET_RESOURCES(resources);\n\nexport const deleteResources = (resources: string[]) => resourcesActions.DELETE_RESOURCES(resources);\n\nexport const loadResource = (uuid: string, showErrors?: boolean) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            const kind = extractUuidKind(uuid);\n            const service = getResourceService(kind)(services);\n            if (service) {\n                const resource = await service.get(uuid, showErrors);\n                dispatch<any>(updateResources([resource]));\n                return resource;\n            }\n        } catch {}\n        return undefined;\n    };\n\nexport const deleteResourceProperty = (uuid: string, key: string, value: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const { resources } = getState();\n\n        const rsc = getResource(uuid)(resources) as ResourceWithProperties;\n        if (!rsc) { return; }\n\n        const kind = extractUuidKind(uuid);\n        const service = getResourceService(kind)(services);\n        if (!service) { return; }\n\n        const properties = Object.assign({}, rsc.properties);\n\n        try {\n            let updatedRsc = await service.update(\n                uuid, {\n                    properties: deleteProperty(properties, key, value),\n                });\n            updatedRsc = {...rsc, ...updatedRsc};\n            dispatch<any>(updateResources([updatedRsc]));\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Property has been successfully deleted.\", hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        } catch (e) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: e.errors[0], hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        }\n    };\n\nexport const createResourceProperty = (data: TagProperty) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const { uuid } = data;\n        const { resources } = getState();\n\n        const rsc = getResource(uuid)(resources) as ResourceWithProperties;\n        if (!rsc) { return; }\n\n        const kind = extractUuidKind(uuid);\n        const service = getResourceService(kind)(services);\n        if (!service) { return; }\n\n        try {\n            const key = data.keyID || data.key;\n            const value = data.valueID || data.value;\n            const properties = Object.assign({}, rsc.properties);\n            let updatedRsc = await service.update(\n                rsc.uuid, {\n                    properties: addProperty(properties, key, value),\n                }\n            );\n            updatedRsc = {...rsc, ...updatedRsc};\n            dispatch<any>(updateResources([updatedRsc]));\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Property has been successfully added.\", hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        } catch (e) {\n            const errorMsg = e.errors && e.errors.length > 0 ? e.errors[0] : \"Error while adding property\";\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: errorMsg, hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        }\n    };\n\nexport const addPropertyToResourceForm = (data: ResourcePropertiesFormData, formName: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const properties = { ...formValueSelector(formName)(getState(), 'properties') };\n        const vocabulary = getState().properties.vocabulary?.tags;\n        const dataTags = getTagsIfExist(data.key, data.value, vocabulary);\n        const key = data.keyID || dataTags.key || data.key;\n        const value =  data.valueID || dataTags.value || data.value;\n        dispatch(change(\n            formName,\n            'properties',\n            addProperty(properties, key, value)));\n    };\n\nexport const removePropertyFromResourceForm = (key: string, value: string, formName: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const properties = { ...formValueSelector(formName)(getState(), 'properties') };\n        dispatch(change(\n            formName,\n            'properties',\n            deleteProperty(properties, key, value)));\n    };\n\n\nconst getTagsIfExist = (dataKey: string, dataValue: string, vocabulary: any) => {\n    let k, v;\n    for (const key in vocabulary) {\n        if (vocabulary[key].labels.find(l=>l.label === dataKey)) {\n            k = key;\n            const { values } = vocabulary[key];\n            for (const val in values) {\n                if (values[val].labels.find(l=>l.label === dataValue)) {\n                    v = val;\n                    break;\n                }\n            }\n        }\n    }\n    return { key: k, value: v };\n};\n\n/**\n * Holds a map of CommonResourceServiceError types to error messages\n * Allows consumers to easily specify error messages to be grouped and displayed\n * for a batch resource operation\n */\nexport type CommonResourceErrorMessageFuncMap = {\n    [key in CommonResourceServiceError]?: (count: number) => string;\n}\n\n/**\n * Utility type to hold a group of rejected promises associated with their error type\n */\ntype CommonResourceErrorResultMap = {\n    [key in CommonResourceServiceError]?: PromiseRejectedResult[];\n};\n\n/**\n * Just a small type to tie the return generic of the success result with the passed in promise array\n */\nexport type SettledPromiseSet<T> = {\n    success: PromiseFulfilledResult<T>[];\n    error: PromiseRejectedResult[];\n};\n\n/**\n * Accepts a batched set of settled CommonResource Promise results and displays grouped error / success messages\n * @param promiseResults Array of allSettled promise results to be processed\n * @param messageFuncMap Map of CommonResourceServiceErrors to error message generator funcs\n * @param showSuccess Func called to show a success toast with message\n * @param showError Func called to show an error toast with message\n * @returns The separated success / error Promise results for further use\n */\nexport const showGroupedCommonResourceResultSnackbars = <T>(\n    dispatch: Dispatch,\n    promiseResults: PromiseSettledResult<T>[],\n    messageFuncMap: CommonResourceErrorMessageFuncMap,\n): SettledPromiseSet<T> => {\n    // Split success and error promise results\n    // Gets returned for the consumer to use (update stores, refresh DEs, etc)\n    const success = promiseResults.filter((promiseResult): promiseResult is PromiseFulfilledResult<T> => promiseResult.status === 'fulfilled');\n    const error = promiseResults.filter((promiseResult): promiseResult is PromiseRejectedResult => promiseResult.status === 'rejected');\n\n    // Get the list of error types that we have error messages for\n    const errorTypesWithMessages = (Object.keys(messageFuncMap) as Array<keyof typeof messageFuncMap>);\n\n    // Group error promises by each CommonResourceError for which we have an associated message\n    const mappedErrors = errorTypesWithMessages.map((key: CommonResourceServiceError): CommonResourceErrorResultMap => {\n        // Filter the promises that match this error type\n        const matchingPriomiseResults = error.filter((promiseResult) => {\n            const errorType = getCommonResourceServiceError(promiseResult.reason);\n            return (\n                errorType === key &&\n                // NONE is used for success, filter out any rejected promises that lack errors\n                key !== CommonResourceServiceError.NONE &&\n                // UNKNOWN is excluded and bundled with types that lack a message\n                key !== CommonResourceServiceError.UNKNOWN\n            );\n        });\n        return {[key]: matchingPriomiseResults};\n    }).reduce((acc, curr) => {\n        // Merge each error type -> promise result array into a single CommonResourceErrorResultMap object\n        return Object.assign(acc, curr);\n    }, {} as CommonResourceErrorResultMap);\n\n    // Any errors not handled by the errorMessageMap are bundled into a generic error along with UNKNOWN\n    const genericErrors = error.filter((promiseResult) => {\n        return !Object.keys(messageFuncMap).includes(getCommonResourceServiceError(promiseResult.reason));\n    });\n\n    // Display success messages\n    if (success.length) {\n        const messageFunc = messageFuncMap[CommonResourceServiceError.NONE]\n        if (messageFunc) {\n            // Use NONE message func passed in for success message\n            dispatch(showSuccessSnackbar(messageFunc(success.length)));\n        } else {\n            const itemText = success.length > 1 ? \"items\" : \"item\";\n            dispatch(showSuccessSnackbar(`Operation successful (${success.length} ${itemText})`));\n        }\n    }\n\n    const errorTypesFromErrors = Object.keys(mappedErrors) as Array<keyof typeof mappedErrors>;\n\n    for(const errorType of errorTypesFromErrors) {\n        const messageFunc = messageFuncMap[errorType];\n        const errors = mappedErrors[errorType];\n\n        // Errors here were included in the map so they should always have a messageFunc\n        if (messageFunc && errors?.length) {\n            dispatch(showErrorSnackbar(messageFunc(errors.length)));\n        }\n    }\n\n    if (genericErrors.length) {\n        const messageFunc = messageFuncMap[CommonResourceServiceError.UNKNOWN]\n        if (messageFunc) {\n            // Use UNKNOWN messageFunc for generic+unknown errors if provided\n            dispatch(showErrorSnackbar(messageFunc(genericErrors.length)));\n        } else {\n            const itemText = genericErrors.length > 1 ? \"items\" : \"item\";\n            dispatch(showErrorSnackbar(`Operation failed (${genericErrors.length} ${itemText})`));\n        }\n    }\n\n    return { success, error };\n};\n"
  },
  {
    "path": "services/workbench2/src/store/resources/resources-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { sanitizeHTML } from 'common/html-sanitize';\nimport { ResourcesState, setResource, deleteResource } from './resources';\nimport { ResourcesAction, resourcesActions } from './resources-actions';\n\nexport const resourcesReducer = (state: ResourcesState = {}, action: ResourcesAction) => {\n    if (Array.isArray(action.payload)) {\n        for (const item of action.payload) {\n            if (typeof item === 'object' && item.description) {\n                item.description = sanitizeHTML(item.description);\n            }\n        }\n    }\n\n    return resourcesActions.match(action, {\n        SET_RESOURCES: resources => resources.reduce((state, resource) => setResource(resource.uuid, resource)(state), state),\n        DELETE_RESOURCES: ids => ids.reduce((state, id) => deleteResource(id)(state), state),\n        default: () => state,\n    });\n};"
  },
  {
    "path": "services/workbench2/src/store/resources/resources.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Resource } from \"models/resource\";\nimport { ResourceKind } from 'models/resource';\nimport { memoize } from \"lodash\";\n\nexport type ResourcesState = { [key: string]: Resource };\n\nexport const getResource = memoize(<T extends Resource = Resource>(id: string | undefined) =>\n    (state: ResourcesState): T | undefined =>\n        id ? state[id] as T : undefined);\n\nexport const getPopulatedResources = <T extends Resource>(uuids: string[], resources: ResourcesState) => {\n    return uuids.reduce((acc: T[], uuid: string) => {\n        const res = getResource<T>(uuid)(resources);\n        if (res) acc.push(res);\n        return acc;\n    }, []);\n};\n\nexport const setResource = <T extends Resource>(id: string, data: T) =>\n    (state: ResourcesState) => ({\n        ...state,\n        [id]: data\n    });\n\nexport const deleteResource = (id: string) =>\n    (state: ResourcesState) => {\n        const newState = { ...state };\n        delete newState[id];\n        return newState;\n    };\n\nexport const filterResources = (filter: (resource: Resource) => boolean) =>\n    (state: ResourcesState) => {\n        const items: Resource[] = [];\n        for (const id in state) {\n            const resource = state[id];\n            if (resource && filter(resource)) {\n                items.push(resource);\n            }\n        }\n        return items;\n    };\n\nexport const filterResourcesByKind = (kind: ResourceKind) =>\n    (state: ResourcesState) =>\n        filterResources(resource => resource.kind === kind)(state);\n"
  },
  {
    "path": "services/workbench2/src/store/rich-text-editor-dialog/rich-text-editor-dialog-actions.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { dialogActions } from \"store/dialog/dialog-actions\";\n\nexport const RICH_TEXT_EDITOR_DIALOG_NAME = 'richTextEditorDialogName';\nexport const openRichTextEditorDialog = (title: string, text: string) =>\n    dialogActions.OPEN_DIALOG({ id: RICH_TEXT_EDITOR_DIALOG_NAME, data: { title, text } });"
  },
  {
    "path": "services/workbench2/src/store/run-process-panel/run-process-panel-actions.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { runProcess } from \"./run-process-panel-actions\";\nimport { RUN_PROCESS_BASIC_FORM, RUN_PROCESS_INPUTS_FORM } from \"./run-process-panel-actions\";\nimport { navigateTo } from \"store/navigation/navigation-action\";\n\ndescribe(\"run-process-panel-actions\", () => {\n    describe(\"runProcess\", () => {\n        const newProcessUUID = 'newProcessUUID';\n        let dispatch, getState, services;\n\n        beforeEach(() => {\n            dispatch = cy.spy();\n            services = {\n                containerRequestService: {\n                    create: cy.stub().callsFake(async () => ({\n                        uuid: newProcessUUID,\n                    })),\n                },\n            };\n            cy.spy(navigateTo).as('navigateTo');\n        });\n\n        it(\"should return when userUuid is null\", async () => {\n            // given\n            getState = () => ({\n                auth: {},\n            });\n\n            // when\n            await runProcess(dispatch, getState, services);\n\n            // then\n            expect(dispatch).not.to.be.called;\n        });\n\n        it(\"should run workflow with project-uuid\", async () => {\n            // given\n            getState = () => ({\n                auth: {\n                    user: {\n                        email: \"test@gmail.com\",\n                        firstName: \"TestFirstName\",\n                        lastName: \"TestLastName\",\n                        uuid: \"zzzzz-tpzed-yid70bw31f51234\",\n                        ownerUuid: \"zzzzz-tpzed-000000000000000\",\n                        isAdmin: false,\n                        isActive: true,\n                        username: \"testfirstname\",\n                        prefs: {\n                            profile: {},\n                        },\n                    },\n                },\n                runProcessPanel: {\n                    processPathname: \"/projects/zzzzz-tpzed-yid70bw31f51234\",\n                    processOwnerUuid: \"zzzzz-tpzed-yid70bw31f51234\",\n                    selectedWorkflow: {\n                        kind: \"arvados#workflow\",\n                        etag: \"8gh5xlhlgo61yqscyl1spw8tc\",\n                        uuid: \"zzzzz-7fd4e-2tlnerdkxnl4fjt\",\n                        ownerUuid: \"zzzzz-tpzed-o4njwilpp4ov321\",\n                        createdAt: \"2020-07-15T19:40:50.296041000Z\",\n                        modifiedByClientUuid: \"zzzzz-ozdt8-libnr89sc5nq111\",\n                        modifiedByUserUuid: \"zzzzz-tpzed-o4njwilpp4ov321\",\n                        modifiedAt: \"2020-07-15T19:40:50.296376000Z\",\n                        name: \"revsort.cwl\",\n                        description:\n                            \"Reverse the lines in a document, then sort those lines.\",\n                        definition:\n                            '{\\n    \"$graph\": [\\n        {\\n            \"class\": \"Workflow\",\\n            \"doc\": \"Reverse the lines in a document, then sort those lines.\",\\n            \"id\": \"#main\",\\n            \"hints\":[{\"class\":\"http://arvados.org/cwl#WorkflowRunnerResources\",\"acrContainerImage\":\"arvados/jobs:2.0.4\", \"ramMin\": 16000}], \"inputs\": [\\n                {\\n                    \"default\": null,\\n                    \"doc\": \"The input file to be processed.\",\\n                    \"id\": \"#main/input\",\\n                    \"type\": \"File\"\\n                },\\n                {\\n                    \"default\": true,\\n                    \"doc\": \"If true, reverse (decending) sort\",\\n                    \"id\": \"#main/reverse_sort\",\\n                    \"type\": \"boolean\"\\n                }\\n            ],\\n            \"outputs\": [\\n                {\\n                    \"doc\": \"The output with the lines reversed and sorted.\",\\n                    \"id\": \"#main/output\",\\n                    \"outputSource\": \"#main/sorted/output\",\\n                    \"type\": \"File\"\\n                }\\n            ],\\n            \"steps\": [\\n                {\\n                    \"id\": \"#main/rev\",\\n                    \"in\": [\\n                        {\\n                            \"id\": \"#main/rev/input\",\\n                            \"source\": \"#main/input\"\\n                        }\\n                    ],\\n                    \"out\": [\\n                        \"#main/rev/output\"\\n                    ],\\n                    \"run\": \"#revtool.cwl\"\\n                },\\n                {\\n                    \"id\": \"#main/sorted\",\\n                    \"in\": [\\n                        {\\n                            \"id\": \"#main/sorted/input\",\\n                            \"source\": \"#main/rev/output\"\\n                        },\\n                        {\\n                            \"id\": \"#main/sorted/reverse\",\\n                            \"source\": \"#main/reverse_sort\"\\n                        }\\n                    ],\\n                    \"out\": [\\n                        \"#main/sorted/output\"\\n                    ],\\n                    \"run\": \"#sorttool.cwl\"\\n                }\\n            ]\\n        },\\n        {\\n            \"baseCommand\": \"rev\",\\n            \"class\": \"CommandLineTool\",\\n            \"doc\": \"Reverse each line using the `rev` command\",\\n            \"hints\": [\\n                {\\n                    \"class\": \"ResourceRequirement\",\\n                    \"ramMin\": 8\\n                }\\n            ],\\n            \"id\": \"#revtool.cwl\",\\n            \"inputs\": [\\n                {\\n                    \"id\": \"#revtool.cwl/input\",\\n                    \"inputBinding\": {},\\n                    \"type\": \"File\"\\n                }\\n            ],\\n            \"outputs\": [\\n                {\\n                    \"id\": \"#revtool.cwl/output\",\\n                    \"outputBinding\": {\\n                        \"glob\": \"output.txt\"\\n                    },\\n                    \"type\": \"File\"\\n                }\\n            ],\\n            \"stdout\": \"output.txt\"\\n        },\\n        {\\n            \"baseCommand\": \"sort\",\\n            \"class\": \"CommandLineTool\",\\n            \"doc\": \"Sort lines using the `sort` command\",\\n            \"hints\": [\\n                {\\n                    \"class\": \"ResourceRequirement\",\\n                    \"ramMin\": 8\\n                }\\n            ],\\n            \"id\": \"#sorttool.cwl\",\\n            \"inputs\": [\\n                {\\n                    \"id\": \"#sorttool.cwl/reverse\",\\n                    \"inputBinding\": {\\n                        \"position\": 1,\\n                        \"prefix\": \"-r\"\\n                    },\\n                    \"type\": \"boolean\"\\n                },\\n                {\\n                    \"id\": \"#sorttool.cwl/input\",\\n                    \"inputBinding\": {\\n                        \"position\": 2\\n                    },\\n                    \"type\": \"File\"\\n                }\\n            ],\\n            \"outputs\": [\\n                {\\n                    \"id\": \"#sorttool.cwl/output\",\\n                    \"outputBinding\": {\\n                        \"glob\": \"output.txt\"\\n                    },\\n                    \"type\": \"File\"\\n                }\\n            ],\\n            \"stdout\": \"output.txt\"\\n        }\\n    ],\\n    \"cwlVersion\": \"v1.0\"\\n}',\n                    },\n                },\n                form: {\n                    [RUN_PROCESS_BASIC_FORM]: {\n                        values: {\n                            name: \"basicFormTestName\",\n                            description: \"basicFormTestDescription\",\n                        },\n                    },\n                    [RUN_PROCESS_INPUTS_FORM]: {\n                        values: {\n                            inputs: {},\n                        }\n                    }\n                },\n            });\n\n            // when\n            await runProcess(dispatch, getState, services);\n\n            // then\n            expect(services.containerRequestService.create).to.be.calledWithMatch(testCreateArgs);\n            expect(dispatch).to.be.calledWithMatch(navigateTo(newProcessUUID));\n        });\n    });\n});\n\nconst testMounts = {\n    '/var/spool/cwl': {\n        kind: 'collection',\n        writable: true,\n    },\n    stdout: {\n        kind: 'file',\n        path: '/var/spool/cwl/cwl.output.json',\n    },\n    '/var/lib/cwl/workflow.json': {\n        kind: 'json',\n        content: {\n            $graph: [\n                {\n                    class: 'Workflow',\n                    doc: 'Reverse the lines in a document, then sort those lines.',\n                    id: '#main',\n                    hints: [\n                        {\n                            class: 'http://arvados.org/cwl#WorkflowRunnerResources',\n                            acrContainerImage: 'arvados/jobs:2.0.4',\n                            ramMin: 16000,\n                        },\n                    ],\n                    inputs: [\n                        {\n                            default: null,\n                            doc: 'The input file to be processed.',\n                            id: '#main/input',\n                            type: 'File',\n                        },\n                        {\n                            default: true,\n                            doc: 'If true, reverse (decending) sort',\n                            id: '#main/reverse_sort',\n                            type: 'boolean',\n                        },\n                    ],\n                    outputs: [\n                        {\n                            doc: 'The output with the lines reversed and sorted.',\n                            id: '#main/output',\n                            outputSource: '#main/sorted/output',\n                            type: 'File',\n                        },\n                    ],\n                    steps: [\n                        {\n                            id: '#main/rev',\n                            in: [\n                                {\n                                    id: '#main/rev/input',\n                                    source: '#main/input',\n                                },\n                            ],\n                            out: ['#main/rev/output'],\n                            run: '#revtool.cwl',\n                        },\n                        {\n                            id: '#main/sorted',\n                            in: [\n                                {\n                                    id: '#main/sorted/input',\n                                    source: '#main/rev/output',\n                                },\n                                {\n                                    id: '#main/sorted/reverse',\n                                    source: '#main/reverse_sort',\n                                },\n                            ],\n                            out: ['#main/sorted/output'],\n                            run: '#sorttool.cwl',\n                        },\n                    ],\n                },\n                {\n                    baseCommand: 'rev',\n                    class: 'CommandLineTool',\n                    doc: 'Reverse each line using the `rev` command',\n                    hints: [\n                        {\n                            class: 'ResourceRequirement',\n                            ramMin: 8,\n                        },\n                    ],\n                    id: '#revtool.cwl',\n                    inputs: [\n                        {\n                            id: '#revtool.cwl/input',\n                            inputBinding: {},\n                            type: 'File',\n                        },\n                    ],\n                    outputs: [\n                        {\n                            id: '#revtool.cwl/output',\n                            outputBinding: {\n                                glob: 'output.txt',\n                            },\n                            type: 'File',\n                        },\n                    ],\n                    stdout: 'output.txt',\n                },\n                {\n                    baseCommand: 'sort',\n                    class: 'CommandLineTool',\n                    doc: 'Sort lines using the `sort` command',\n                    hints: [\n                        {\n                            class: 'ResourceRequirement',\n                            ramMin: 8,\n                        },\n                    ],\n                    id: '#sorttool.cwl',\n                    inputs: [\n                        {\n                            id: '#sorttool.cwl/reverse',\n                            inputBinding: {\n                                position: 1,\n                                prefix: '-r',\n                            },\n                            type: 'boolean',\n                        },\n                        {\n                            id: '#sorttool.cwl/input',\n                            inputBinding: {\n                                position: 2,\n                            },\n                            type: 'File',\n                        },\n                    ],\n                    outputs: [\n                        {\n                            id: '#sorttool.cwl/output',\n                            outputBinding: {\n                                glob: 'output.txt',\n                            },\n                            type: 'File',\n                        },\n                    ],\n                    stdout: 'output.txt',\n                },\n            ],\n            cwlVersion: 'v1.0',\n        },\n    },\n    '/var/lib/cwl/cwl.input.json': {\n        kind: 'json',\n        content: {\n            '': {},\n        },\n    },\n};\n\nconst testCreateArgs = {\n    command: [\n        'arvados-cwl-runner',\n        '--local',\n        '--api=containers',\n        \"--no-log-timestamps\",\n        \"--disable-color\",\n        '--project-uuid=zzzzz-tpzed-yid70bw31f51234',\n        '/var/lib/cwl/workflow.json#main',\n        '/var/lib/cwl/cwl.input.json',\n    ],\n    containerImage: 'arvados/jobs:2.0.4',\n    cwd: '/var/spool/cwl',\n    description: undefined,\n    mounts: testMounts,\n    secretMounts: undefined,\n    name: 'basicFormTestName',\n    outputName: 'Output from basicFormTestName',\n    outputPath: '/var/spool/cwl',\n    ownerUuid: 'zzzzz-tpzed-yid70bw31f51234',\n    priority: 500,\n    properties: {\n        workflowName: 'revsort.cwl',\n        template_uuid: 'zzzzz-7fd4e-2tlnerdkxnl4fjt',\n    },\n    runtimeConstraints: {\n        API: true,\n        ram: 16256 * (1024 * 1024),\n        vcpus: 1,\n    },\n    schedulingParameters: { max_run_time: undefined },\n    state: 'Committed',\n    useExisting: false,\n};\n"
  },
  {
    "path": "services/workbench2/src/store/run-process-panel/run-process-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { ServiceRepository } from \"services/services\";\nimport { RootState } from 'store/store';\nimport { getUserUuid } from \"common/getuser\";\nimport { WorkflowResource, WorkflowRunnerResources, getWorkflow, getWorkflowInputs, parseWorkflowDefinition } from 'models/workflow';\nimport { getFormValues, initialize } from 'redux-form';\nimport { WorkflowInputsData } from 'models/workflow';\nimport { createWorkflowMounts, createWorkflowSecretMounts } from 'models/process';\nimport { ContainerRequestState } from 'models/container-request';\nimport { navigateTo } from '../navigation/navigation-action';\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { setBreadcrumbs } from 'store/breadcrumbs/breadcrumbs-actions';\nimport { getResource } from 'store/resources/resources';\nimport { ProjectResource } from \"models/project\";\nimport { UserResource } from \"models/user\";\nimport { CWLType } from 'models/workflow';\n\nexport const RUN_PROCESS_BASIC_FORM = 'runProcessBasicForm';\nexport const RUN_PROCESS_INPUTS_FORM = 'runProcessInputsForm';\n\nexport interface RunProcessBasicFormData {\n    name: string;\n    owner?: ProjectResource | UserResource;\n}\n\nexport const RUN_PROCESS_ADVANCED_FORM = 'runProcessAdvancedForm';\n\nexport const DESCRIPTION_FIELD = 'description';\nexport const OUTPUT_FIELD = 'output';\nexport const RUNTIME_FIELD = 'runtime';\nexport const RAM_FIELD = 'ram';\nexport const VCPUS_FIELD = 'vcpus';\nexport const KEEP_CACHE_RAM_FIELD = 'keep_cache_ram';\nexport const RUNNER_IMAGE_FIELD = 'acr_container_image';\n\nexport interface RunProcessAdvancedFormData {\n    [DESCRIPTION_FIELD]?: string;\n    [OUTPUT_FIELD]?: string;\n    [RUNTIME_FIELD]?: number;\n    [RAM_FIELD]: number;\n    [VCPUS_FIELD]: number;\n    [KEEP_CACHE_RAM_FIELD]: number;\n    [RUNNER_IMAGE_FIELD]: string;\n}\n\nexport const runProcessPanelActions = unionize({\n    SET_PROCESS_PATHNAME: ofType<string>(),\n    SET_PROCESS_OWNER_UUID: ofType<string>(),\n    SET_CURRENT_STEP: ofType<number>(),\n    SET_STEP_CHANGED: ofType<boolean>(),\n    SET_WORKFLOWS: ofType<WorkflowResource[]>(),\n    SET_SELECTED_WORKFLOW: ofType<WorkflowResource>(),\n    SET_WORKFLOW_PRESETS: ofType<WorkflowResource[]>(),\n    SELECT_WORKFLOW_PRESET: ofType<WorkflowResource>(),\n    SEARCH_WORKFLOWS: ofType<string>(),\n    RESET_RUN_PROCESS_PANEL: ofType<{}>(),\n});\n\nexport interface RunProcessSecondStepDataFormProps {\n    name: string;\n    description: string;\n}\n\nexport const SET_WORKFLOW_DIALOG = 'setWorkflowDialog';\nexport const RUN_PROCESS_SECOND_STEP_FORM_NAME = 'runProcessSecondStepFormName';\n\nexport type RunProcessPanelAction = UnionOf<typeof runProcessPanelActions>;\n\nexport const loadRunProcessPanel = () =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            dispatch(setBreadcrumbs([{ label: 'Run Process' }]));\n            const response = await services.workflowService.list({limit: 200});\n            dispatch(runProcessPanelActions.SET_WORKFLOWS(response.items));\n        } catch (e) {\n            return;\n        }\n    };\n\nexport const openSetWorkflowDialog = (workflow: WorkflowResource) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        const selectedWorkflow = getState().runProcessPanel.selectedWorkflow;\n        const isStepChanged = getState().runProcessPanel.isStepChanged;\n        if (isStepChanged && selectedWorkflow && selectedWorkflow.uuid !== workflow.uuid) {\n            dispatch(dialogActions.OPEN_DIALOG({\n                id: SET_WORKFLOW_DIALOG,\n                data: {\n                    title: 'Form will be cleared',\n                    text: 'Changing a workflow will clean all input fields in next step.',\n                    confirmButtonLabel: 'Change Workflow',\n                    workflow\n                }\n            }));\n        } else {\n            dispatch<any>(setWorkflow(workflow, false));\n        }\n    };\n\nexport const getWorkflowRunnerSettings = (workflow: WorkflowResource) => {\n    const advancedFormValues = {};\n    Object.assign(advancedFormValues, DEFAULT_ADVANCED_FORM_VALUES);\n\n    const wf = getWorkflow(parseWorkflowDefinition(workflow));\n    const hints = wf ? wf.hints : undefined;\n    if (hints) {\n        const resc = hints.find(item => item.class === 'http://arvados.org/cwl#WorkflowRunnerResources') as WorkflowRunnerResources | undefined;\n        if (resc) {\n            if (resc.ramMin) { advancedFormValues[RAM_FIELD] = resc.ramMin * (1024 * 1024); }\n            if (resc.coresMin) { advancedFormValues[VCPUS_FIELD] = resc.coresMin; }\n            if (resc.keep_cache) { advancedFormValues[KEEP_CACHE_RAM_FIELD] = resc.keep_cache * (1024 * 1024); }\n            if (resc.acrContainerImage) { advancedFormValues[RUNNER_IMAGE_FIELD] = resc.acrContainerImage; }\n        }\n    }\n    return advancedFormValues;\n};\n\nexport const setWorkflow = (workflow: WorkflowResource, isWorkflowChanged = true) =>\n    (dispatch: Dispatch<any>, getState: () => RootState) => {\n        const isStepChanged = getState().runProcessPanel.isStepChanged;\n\n        const advancedFormValues = getWorkflowRunnerSettings(workflow);\n\n        let owner = getResource<ProjectResource | UserResource>(getState().runProcessPanel.processOwnerUuid)(getState().resources);\n        if (!owner || !owner.canWrite) {\n            owner = undefined;\n        }\n\n        if (isStepChanged && isWorkflowChanged) {\n            dispatch(runProcessPanelActions.SET_STEP_CHANGED(false));\n            dispatch(runProcessPanelActions.SET_SELECTED_WORKFLOW(workflow));\n            dispatch<any>(loadPresets(workflow.uuid));\n            dispatch(initialize(RUN_PROCESS_BASIC_FORM, { name: workflow.name, owner }));\n            dispatch(initialize(RUN_PROCESS_ADVANCED_FORM, advancedFormValues));\n        }\n        if (!isWorkflowChanged) {\n            dispatch(runProcessPanelActions.SET_SELECTED_WORKFLOW(workflow));\n            dispatch<any>(loadPresets(workflow.uuid));\n            dispatch(initialize(RUN_PROCESS_BASIC_FORM, { name: workflow.name, owner }));\n            dispatch(initialize(RUN_PROCESS_ADVANCED_FORM, advancedFormValues));\n        }\n    };\n\nexport const loadPresets = (workflowUuid: string) =>\n    async (dispatch: Dispatch<any>, _: () => RootState, { workflowService }: ServiceRepository) => {\n        const { items } = await workflowService.presets(workflowUuid);\n        dispatch(runProcessPanelActions.SET_WORKFLOW_PRESETS(items));\n    };\n\nexport const selectPreset = (preset: WorkflowResource) =>\n    (dispatch: Dispatch<any>) => {\n        dispatch(runProcessPanelActions.SELECT_WORKFLOW_PRESET(preset));\n        const inputs = getWorkflowInputs(parseWorkflowDefinition(preset)) || [];\n        const values = inputs.reduce((values, input) => ({\n            ...values,\n            [input.id]: input.default,\n        }), {});\n        dispatch(initialize(RUN_PROCESS_INPUTS_FORM, values));\n    };\n\nexport const goToStep = (step: number) =>\n    (dispatch: Dispatch) => {\n        if (step === 1) {\n            dispatch(runProcessPanelActions.SET_STEP_CHANGED(true));\n        }\n        dispatch(runProcessPanelActions.SET_CURRENT_STEP(step));\n    };\n\nconst getInputTypes = (state: RootState): Record<string, CWLType[]> | undefined => {\n    if (!state.runProcessPanel?.inputs) return;\n    return state.runProcessPanel.inputs.reduce((acc, val) => {\n        acc[val.id] = val.type;\n        return acc;\n    }, {});\n};\n\nconst nullifyEmptyStrings = (inputsForm: WorkflowInputsData, inputTypes: Record<string, CWLType[]> | undefined): WorkflowInputsData => {\n    if (!inputTypes) return inputsForm;\n    return Object.keys(inputsForm).reduce((acc, key) => {\n        const value = inputsForm[key];\n        if (value === '' && inputTypes[key].includes(CWLType.NULL)) {\n            acc[key] = null;\n        } else {\n            acc[key] = value;\n        }\n        return acc;\n    }, {});\n};\n\nexport const runProcess = async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n    const state = getState();\n    const basicForm = getFormValues(RUN_PROCESS_BASIC_FORM)(state) as RunProcessBasicFormData;\n    const inputsForm = getFormValues(RUN_PROCESS_INPUTS_FORM)(state) as WorkflowInputsData;\n    const inputTypes = getInputTypes(state);\n    const nullifiedInputsForm = nullifyEmptyStrings(inputsForm, inputTypes);\n    const userUuid = getUserUuid(getState());\n    if (!userUuid) { return; }\n    const { processOwnerUuid, selectedWorkflow } = state.runProcessPanel;\n    const ownerUUid = basicForm.owner ? basicForm.owner.uuid : (processOwnerUuid ? processOwnerUuid : userUuid);\n    if (selectedWorkflow) {\n        const advancedForm = getFormValues(RUN_PROCESS_ADVANCED_FORM)(state) as RunProcessAdvancedFormData || getWorkflowRunnerSettings(selectedWorkflow);\n        const inputObject = normalizeInputKeys(nullifiedInputsForm);\n        const secret_mounts = createWorkflowSecretMounts(selectedWorkflow, inputObject);\n        const newProcessData = {\n            ownerUuid: ownerUUid,\n            name: basicForm.name,\n            description: advancedForm.description,\n            state: ContainerRequestState.COMMITTED,\n            mounts: createWorkflowMounts(selectedWorkflow, inputObject),\n            secret_mounts: secret_mounts,\n            runtimeConstraints: {\n                API: true,\n                vcpus: advancedForm[VCPUS_FIELD],\n                ram: (advancedForm[KEEP_CACHE_RAM_FIELD] + advancedForm[RAM_FIELD]),\n            },\n            schedulingParameters: {\n                max_run_time: advancedForm[RUNTIME_FIELD]\n            },\n            containerImage: advancedForm[RUNNER_IMAGE_FIELD],\n            cwd: '/var/spool/cwl',\n            command: [\n                'arvados-cwl-runner',\n                '--local',\n                '--api=containers',\n                '--no-log-timestamps',\n                '--disable-color',\n                `--project-uuid=${ownerUUid}`,\n                '/var/lib/cwl/workflow.json#main',\n                '/var/lib/cwl/cwl.input.json'\n            ],\n            outputPath: '/var/spool/cwl',\n            priority: 500,\n            outputName: advancedForm[OUTPUT_FIELD] ? advancedForm[OUTPUT_FIELD] : `Output from ${basicForm.name}`,\n            properties: {\n                template_uuid: selectedWorkflow.uuid,\n                workflowName: selectedWorkflow.name\n            },\n            useExisting: false\n        };\n        const newProcess = await services.containerRequestService.create(newProcessData);\n        dispatch(navigateTo(newProcess.uuid));\n    }\n};\n\nconst DEFAULT_ADVANCED_FORM_VALUES: Partial<RunProcessAdvancedFormData> = {\n    [VCPUS_FIELD]: 1,\n    [RAM_FIELD]: 1073741824,\n    [KEEP_CACHE_RAM_FIELD]: 268435456,\n    [RUNNER_IMAGE_FIELD]: \"arvados/jobs\"\n};\n\nconst normalizeInputKeys = (inputs: WorkflowInputsData): WorkflowInputsData =>\n    Object.keys(inputs).reduce((normalizedInputs, key) => ({\n        ...normalizedInputs,\n        [key.split('/').slice(1).join('/')]: inputs[key],\n    }), {});\nexport const searchWorkflows = (term: string) => runProcessPanelActions.SEARCH_WORKFLOWS(term);\n"
  },
  {
    "path": "services/workbench2/src/store/run-process-panel/run-process-panel-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RunProcessPanelAction, runProcessPanelActions } from 'store/run-process-panel/run-process-panel-actions';\nimport { WorkflowResource, CommandInputParameter, getWorkflowInputs, parseWorkflowDefinition } from 'models/workflow';\n\ninterface RunProcessPanel {\n    processPathname: string;\n    processOwnerUuid: string;\n    currentStep: number;\n    isStepChanged: boolean;\n    workflows: WorkflowResource[];\n    searchWorkflows: WorkflowResource[];\n    selectedWorkflow: WorkflowResource | undefined;\n    presets?: WorkflowResource[];\n    selectedPreset?: WorkflowResource;\n    inputs: CommandInputParameter[];\n}\n\nconst initialState: RunProcessPanel = {\n    processPathname: '',\n    processOwnerUuid: '',\n    currentStep: 0,\n    isStepChanged: false,\n    workflows: [],\n    selectedWorkflow: undefined,\n    inputs: [],\n    searchWorkflows: [],\n};\n\nexport const runProcessPanelReducer = (state = initialState, action: RunProcessPanelAction): RunProcessPanel =>\n    runProcessPanelActions.match(action, {\n        SET_PROCESS_PATHNAME: processPathname => ({ ...state, processPathname }),\n        SET_PROCESS_OWNER_UUID: processOwnerUuid => ({ ...state, processOwnerUuid }),\n        SET_CURRENT_STEP: currentStep => ({ ...state, currentStep }),\n        SET_STEP_CHANGED: isStepChanged => ({ ...state, isStepChanged }),\n        SET_SELECTED_WORKFLOW: selectedWorkflow => ({\n            ...state,\n            selectedWorkflow,\n            presets: undefined,\n            selectedPreset: selectedWorkflow,\n            inputs: getWorkflowInputs(parseWorkflowDefinition(selectedWorkflow)) || [],\n        }),\n        SET_WORKFLOW_PRESETS: presets => ({\n            ...state,\n            presets,\n        }),\n        SELECT_WORKFLOW_PRESET: selectedPreset => ({\n            ...state,\n            selectedPreset,\n        }),\n        SET_WORKFLOWS: workflows => ({ ...state, workflows, searchWorkflows: workflows }),\n        SEARCH_WORKFLOWS: term => {\n            const termRegex = new RegExp(term, 'i');\n            return {\n                ...state,\n                searchWorkflows: state.workflows.filter(workflow => (termRegex.test(workflow.name)||termRegex.test(workflow.description || \"\"))),\n            };\n        },\n        RESET_RUN_PROCESS_PANEL: () => ({ ...initialState, processOwnerUuid: state.processOwnerUuid }),\n        default: () => state\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/search-bar/search-bar-actions.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { getAdvancedDataFromQuery, getQueryFromAdvancedData } from \"store/search-bar/search-bar-actions\";\nimport { ResourceKind } from \"models/resource\";\n\ndescribe('search-bar-actions', () => {\n    describe('getAdvancedDataFromQuery', () => {\n        it('should correctly build advanced data record from query #1', () => {\n            const r = getAdvancedDataFromQuery('val0 has:\"file size\":\"100mb\" val2 has:\"user\":\"daniel\" is:starred val2 val0');\n            expect(r).to.deep.equal({\n                searchValue: 'val0 val2',\n                type: undefined,\n                cluster: undefined,\n                projectUuid: undefined,\n                inTrash: false,\n                pastVersions: false,\n                dateFrom: '',\n                dateTo: '',\n                properties: [{\n                    key: 'file size',\n                    value: '100mb'\n                }, {\n                    key: 'user',\n                    value: 'daniel'\n                }],\n                saveQuery: false,\n                queryName: ''\n            });\n        });\n\n        it('should correctly build advanced data record from query #2', () => {\n            const r = getAdvancedDataFromQuery('document from:2017-08-01 pdf has:\"filesize\":\"101mb\" is:trashed type:arvados#collection cluster:c97qx is:pastVersion');\n            expect(r).to.deep.equal({\n                searchValue: 'document pdf',\n                type: ResourceKind.COLLECTION,\n                cluster: 'c97qx',\n                projectUuid: undefined,\n                inTrash: true,\n                pastVersions: true,\n                dateFrom: '2017-08-01',\n                dateTo: '',\n                properties: [{\n                    key: 'filesize',\n                    value: '101mb'\n                }],\n                saveQuery: false,\n                queryName: ''\n            });\n        });\n    });\n\n    describe('getQueryFromAdvancedData', () => {\n        it('should build query from advanced data', () => {\n            const q = getQueryFromAdvancedData({\n                searchValue: 'document pdf',\n                type: ResourceKind.COLLECTION,\n                cluster: 'c97qx',\n                projectUuid: undefined,\n                inTrash: true,\n                pastVersions: false,\n                dateFrom: '2017-08-01',\n                dateTo: '',\n                properties: [\n                    { key: 'file size', value: '101mb' },\n                    { key: 'Species', value: 'Human' },\n                    { key: 'Species', value: 'Canine' },\n                ],\n                saveQuery: false,\n                queryName: ''\n            });\n            expect(q).to.equal('document pdf type:arvados#collection cluster:c97qx is:trashed from:2017-08-01 has:\"file size\":\"101mb\" has:\"Species\":\"Human\" has:\"Species\":\"Canine\"');\n        });\n\n        it('should build query from advanced data #2', () => {\n            const q = getQueryFromAdvancedData({\n                searchValue: 'document pdf',\n                type: ResourceKind.COLLECTION,\n                cluster: 'c97qx',\n                projectUuid: undefined,\n                inTrash: false,\n                pastVersions: true,\n                dateFrom: '2017-08-01',\n                dateTo: '',\n                properties: [\n                    { key: 'file size', value: '101mb' },\n                    { key: 'Species', value: 'Human' },\n                    { key: 'Species', value: 'Canine' },\n                ],\n                saveQuery: false,\n                queryName: ''\n            });\n            expect(q).to.equal('document pdf type:arvados#collection cluster:c97qx is:pastVersion from:2017-08-01 has:\"file size\":\"101mb\" has:\"Species\":\"Human\" has:\"Species\":\"Canine\"');\n        });\n\n        it('should add has:\"key\":\"value\" expression to query from same property key', () => {\n            const searchValue = 'document pdf has:\"file size\":\"101mb\" has:\"Species\":\"Canine\"';\n            const prevData = {\n                searchValue,\n                type: undefined,\n                cluster: undefined,\n                projectUuid: undefined,\n                inTrash: false,\n                pastVersions: false,\n                dateFrom: '',\n                dateTo: '',\n                properties: [\n                    { key: 'file size', value: '101mb' },\n                    { key: 'Species', value: 'Canine' },\n                ],\n                saveQuery: false,\n                queryName: ''\n            };\n            const currData = {\n                ...prevData,\n                properties: [\n                    { key: 'file size', value: '101mb' },\n                    { key: 'Species', value: 'Canine' },\n                    { key: 'Species', value: 'Human' },\n                ],\n            };\n            const q = getQueryFromAdvancedData(currData, prevData);\n            expect(q).to.equal('document pdf has:\"file size\":\"101mb\" has:\"Species\":\"Canine\" has:\"Species\":\"Human\"');\n        });\n\n        it('should add has:\"keyID\":\"valueID\" expression to query when necessary', () => {\n            const searchValue = 'document pdf has:\"file size\":\"101mb\"';\n            const prevData = {\n                searchValue,\n                type: undefined,\n                cluster: undefined,\n                projectUuid: undefined,\n                inTrash: false,\n                pastVersions: false,\n                dateFrom: '',\n                dateTo: '',\n                properties: [\n                    { key: 'file size', value: '101mb' },\n                ],\n                saveQuery: false,\n                queryName: ''\n            };\n            const currData = {\n                ...prevData,\n                properties: [\n                    { key: 'file size', value: '101mb' },\n                    { key: 'Species', keyID: 'IDTAGSPECIES', value: 'Human', valueID: 'IDVALHUMAN'},\n                ],\n            };\n            const q = getQueryFromAdvancedData(currData, prevData);\n            expect(q).to.equal('document pdf has:\"file size\":\"101mb\" has:\"IDTAGSPECIES\":\"IDVALHUMAN\"');\n        });\n\n        it('should remove has:\"key\":\"value\" expression from query', () => {\n            const searchValue = 'document pdf has:\"file size\":\"101mb\" has:\"Species\":\"Human\" has:\"Species\":\"Canine\"';\n            const prevData = {\n                searchValue,\n                type: undefined,\n                cluster: undefined,\n                projectUuid: undefined,\n                inTrash: false,\n                pastVersions: false,\n                dateFrom: '',\n                dateTo: '',\n                properties: [\n                    { key: 'file size', value: '101mb' },\n                    { key: 'Species', value: 'Canine' },\n                    { key: 'Species', value: 'Human' },\n                ],\n                saveQuery: false,\n                queryName: ''\n            };\n            const currData = {\n                ...prevData,\n                properties: [\n                    { key: 'file size', value: '101mb' },\n                    { key: 'Species', value: 'Canine' },\n                ],\n            };\n            const q = getQueryFromAdvancedData(currData, prevData);\n            expect(q).to.equal('document pdf has:\"file size\":\"101mb\" has:\"Species\":\"Canine\"');\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/search-bar/search-bar-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport axios from \"axios\";\nimport { ofType, unionize, UnionOf } from \"common/unionize\";\nimport { GroupContentsResource, GroupContentsResourcePrefix } from 'services/groups-service/groups-service';\nimport { Dispatch } from 'redux';\nimport { change, initialize, untouch } from 'redux-form';\nimport { RootState } from 'store/store';\nimport { initUserProject, treePickerActions } from 'store/tree-picker/tree-picker-actions';\nimport { ServiceRepository } from 'services/services';\nimport { FilterBuilder } from \"services/api/filter-builder\";\nimport { ResourceKind, RESOURCE_UUID_REGEX, COLLECTION_PDH_REGEX } from 'models/resource';\nimport { SearchView } from 'store/search-bar/search-bar-reducer';\nimport { navigateTo, navigateToSearchResults } from 'store/navigation/navigation-action';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { PropertyValue, SearchBarAdvancedFormData } from 'models/search-bar';\nimport { union } from \"lodash\";\nimport { getModifiedKeysValues } from \"common/objects\";\nimport { activateSearchBarProject } from \"store/search-bar/search-bar-tree-actions\";\nimport { Session } from \"models/session\";\nimport { searchResultsPanelActions } from \"store/search-results-panel/search-results-panel-actions\";\nimport { ListResults } from \"services/common-service/common-service\";\nimport * as parser from './search-query/arv-parser';\nimport { Keywords } from './search-query/arv-parser';\nimport { Vocabulary, getTagKeyLabel, getTagValueLabel } from \"models/vocabulary\";\nimport { PROPERTY_CONTAINS_VALUE_MIN_API_REVISION } from 'common/app-info';\n\nexport const searchBarActions = unionize({\n    SET_CURRENT_VIEW: ofType<string>(),\n    OPEN_SEARCH_VIEW: ofType<{}>(),\n    CLOSE_SEARCH_VIEW: ofType<{}>(),\n    SET_SEARCH_RESULTS: ofType<GroupContentsResource[]>(),\n    SET_SEARCH_VALUE: ofType<string>(),\n    SET_SAVED_QUERIES: ofType<SearchBarAdvancedFormData[]>(),\n    SET_RECENT_QUERIES: ofType<string[]>(),\n    UPDATE_SAVED_QUERY: ofType<SearchBarAdvancedFormData[]>(),\n    SET_SELECTED_ITEM: ofType<string>(),\n    MOVE_UP: ofType<{}>(),\n    MOVE_DOWN: ofType<{}>(),\n    SELECT_FIRST_ITEM: ofType<{}>(),\n    SET_SEARCH_OFFSETS: ofType<Record<string, number>>(),\n});\n\nexport type SearchBarActions = UnionOf<typeof searchBarActions>;\n\nexport const SEARCH_BAR_ADVANCED_FORM_NAME = 'searchBarAdvancedFormName';\n\nexport const SEARCH_BAR_ADVANCED_FORM_PICKER_ID = 'searchBarAdvancedFormPickerId';\n\nexport const DEFAULT_SEARCH_DEBOUNCE = 1000;\n\nexport const goToView = (currentView: string) => searchBarActions.SET_CURRENT_VIEW(currentView);\n\nexport const saveRecentQuery = (query: string) =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) =>\n        services.searchService.saveRecentQuery(query);\n\n\nexport const loadRecentQueries = () =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const recentQueries = services.searchService.getRecentQueries();\n        dispatch(searchBarActions.SET_RECENT_QUERIES(recentQueries));\n        return [...recentQueries].reverse();\n    };\n\nexport const searchData = (searchValue: string, useCancel = false) =>\n    async (dispatch: Dispatch, getState: () => RootState) => {\n        const currentView = getState().searchBar.currentView;\n        dispatch(searchResultsPanelActions.CLEAR());\n        dispatch(searchBarActions.SET_SEARCH_VALUE(searchValue));\n        if (searchValue.length > 0) {\n            dispatch<any>(searchGroups(searchValue, 5, useCancel));\n            if (currentView === SearchView.BASIC) {\n                dispatch(searchBarActions.CLOSE_SEARCH_VIEW());\n                dispatch(navigateToSearchResults(searchValue));\n            }\n        }\n    };\n\nexport const searchAdvancedData = (data: SearchBarAdvancedFormData) =>\n    async (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch<any>(saveQuery(data));\n        const searchValue = getState().searchBar.searchValue;\n        dispatch(searchResultsPanelActions.CLEAR());\n        dispatch(searchBarActions.SET_CURRENT_VIEW(SearchView.BASIC));\n        dispatch(searchBarActions.CLOSE_SEARCH_VIEW());\n        dispatch(navigateToSearchResults(searchValue));\n    };\n\nexport const setSearchValueFromAdvancedData = (data: SearchBarAdvancedFormData, prevData?: SearchBarAdvancedFormData) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        if (data.projectObject) {\n            data.projectUuid = data.projectObject.uuid;\n        }\n        const searchValue = getState().searchBar.searchValue;\n        const value = getQueryFromAdvancedData({\n            ...data,\n            searchValue\n        }, prevData);\n        dispatch(searchBarActions.SET_SEARCH_VALUE(value));\n    };\n\nexport const setAdvancedDataFromSearchValue = (search: string, vocabulary: Vocabulary) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const data = getAdvancedDataFromQuery(search, vocabulary);\n        if (data.projectUuid) {\n            data.projectObject = await services.projectService.get(data.projectUuid);\n        }\n        dispatch<any>(initialize(SEARCH_BAR_ADVANCED_FORM_NAME, data));\n        if (data.projectUuid) {\n            await dispatch<any>(activateSearchBarProject(data.projectUuid));\n            dispatch(treePickerActions.ACTIVATE_TREE_PICKER_NODE({ pickerId: SEARCH_BAR_ADVANCED_FORM_PICKER_ID, id: data.projectUuid }));\n        }\n    };\n\nconst saveQuery = (data: SearchBarAdvancedFormData) =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const savedQueries = services.searchService.getSavedQueries();\n        if (data.saveQuery && data.queryName) {\n            const filteredQuery = savedQueries.find(query => query.queryName === data.queryName);\n            data.searchValue = getState().searchBar.searchValue;\n            if (filteredQuery) {\n                services.searchService.editSavedQueries(data);\n                dispatch(searchBarActions.UPDATE_SAVED_QUERY(savedQueries));\n                dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Query has been successfully updated', hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n            } else {\n                services.searchService.saveQuery(data);\n                dispatch(searchBarActions.SET_SAVED_QUERIES(savedQueries));\n                dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Query has been successfully saved', hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n            }\n        }\n    };\n\nexport const deleteSavedQuery = (id: number) =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        services.searchService.deleteSavedQuery(id);\n        const savedSearchQueries = services.searchService.getSavedQueries();\n        dispatch(searchBarActions.SET_SAVED_QUERIES(savedSearchQueries));\n        return savedSearchQueries || [];\n    };\n\nexport const editSavedQuery = (data: SearchBarAdvancedFormData) =>\n    (dispatch: Dispatch<any>) => {\n        dispatch(searchBarActions.SET_CURRENT_VIEW(SearchView.ADVANCED));\n        dispatch(searchBarActions.SET_SEARCH_VALUE(getQueryFromAdvancedData(data)));\n        dispatch<any>(initialize(SEARCH_BAR_ADVANCED_FORM_NAME, data));\n    };\n\nexport const openSearchView = () =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const savedSearchQueries = services.searchService.getSavedQueries();\n        dispatch(searchBarActions.SET_SAVED_QUERIES(savedSearchQueries));\n        dispatch(loadRecentQueries());\n        dispatch(searchBarActions.OPEN_SEARCH_VIEW());\n        dispatch(searchBarActions.SELECT_FIRST_ITEM());\n    };\n\nexport const closeSearchView = () =>\n    (dispatch: Dispatch<any>) => {\n        dispatch(searchBarActions.SET_SELECTED_ITEM(''));\n        dispatch(searchBarActions.CLOSE_SEARCH_VIEW());\n    };\n\nexport const closeAdvanceView = () =>\n    (dispatch: Dispatch<any>) => {\n        dispatch(searchBarActions.SET_SEARCH_VALUE(''));\n        dispatch(treePickerActions.DEACTIVATE_TREE_PICKER_NODE({ pickerId: SEARCH_BAR_ADVANCED_FORM_PICKER_ID }));\n        dispatch(searchBarActions.SET_CURRENT_VIEW(SearchView.BASIC));\n    };\n\nexport const navigateToItem = (uuid: string) =>\n    (dispatch: Dispatch<any>) => {\n        dispatch(searchBarActions.SET_SELECTED_ITEM(''));\n        dispatch(searchBarActions.CLOSE_SEARCH_VIEW());\n        dispatch(navigateTo(uuid));\n    };\n\nexport const changeData = (searchValue: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch(searchBarActions.SET_SEARCH_VALUE(searchValue));\n        const currentView = getState().searchBar.currentView;\n        const searchValuePresent = searchValue.length > 0;\n\n        if (currentView === SearchView.ADVANCED) {\n            dispatch(searchBarActions.SET_CURRENT_VIEW(SearchView.AUTOCOMPLETE));\n        } else if (searchValuePresent) {\n            dispatch(searchBarActions.SET_CURRENT_VIEW(SearchView.AUTOCOMPLETE));\n            dispatch(searchBarActions.SET_SELECTED_ITEM(searchValue));\n        } else {\n            dispatch(searchBarActions.SET_CURRENT_VIEW(SearchView.BASIC));\n            dispatch(searchBarActions.SET_SEARCH_RESULTS([]));\n            dispatch(searchBarActions.SELECT_FIRST_ITEM());\n        }\n    };\n\nexport const submitData = (event: React.FormEvent<HTMLFormElement>) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        event.preventDefault();\n        const searchValue = getState().searchBar.searchValue;\n        dispatch<any>(saveRecentQuery(searchValue));\n        dispatch<any>(loadRecentQueries());\n        dispatch(searchBarActions.CLOSE_SEARCH_VIEW());\n        if (RESOURCE_UUID_REGEX.exec(searchValue) || COLLECTION_PDH_REGEX.exec(searchValue)) {\n            dispatch<any>(navigateTo(searchValue));\n        } else {\n            dispatch(searchBarActions.SET_SEARCH_VALUE(searchValue));\n            dispatch(searchBarActions.SET_SEARCH_RESULTS([]));\n            dispatch(searchResultsPanelActions.CLEAR());\n            dispatch(navigateToSearchResults(searchValue));\n        }\n    };\n\nlet cancelTokens: any[] = [];\nconst searchGroups = (searchValue: string, limit: number, useCancel = false) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const currentView = getState().searchBar.currentView;\n\n        if (cancelTokens.length > 0 && useCancel) {\n            cancelTokens.forEach(cancelToken => (cancelToken as any).cancel('New search request triggered.'));\n            cancelTokens = [];\n        }\n\n        setTimeout(async () => {\n            if (searchValue || currentView === SearchView.ADVANCED) {\n                const { cluster: clusterId } = getAdvancedDataFromQuery(searchValue);\n                const sessions = getSearchSessions(clusterId, getState().auth.sessions);\n                const lists: ListResults<GroupContentsResource>[] = await Promise.all(sessions.map((session, index) => {\n                    cancelTokens.push(axios.CancelToken.source());\n                    const filters = queryToFilters(searchValue, session.apiRevision);\n                    return services.groupsService.contents('', {\n                        filters,\n                        limit,\n                        recursive: true\n                    }, session, cancelTokens[index].token);\n                }));\n\n                cancelTokens = [];\n\n                const items = lists.reduce((items, list) => items.concat(list.items), [] as GroupContentsResource[]);\n\n                if (lists.filter(list => !!(list as any).items).length !== lists.length) {\n                    dispatch(searchBarActions.SET_SEARCH_RESULTS([]));\n                } else {\n                    dispatch(searchBarActions.SET_SEARCH_RESULTS(items));\n                }\n            }\n        }, 10);\n    };\n\nconst buildQueryFromKeyMap = (data: any, keyMap: string[][]) => {\n    let value = data.searchValue;\n\n    const addRem = (field: string, key: string) => {\n        const v = data[key];\n        // Remove previous search expression.\n        if (data.hasOwnProperty(key)) {\n            let pattern: string;\n            if (v === false) {\n                pattern = `${field.replace(':', '\\\\:\\\\s*')}\\\\s*`;\n            } else if (key.startsWith('prop-')) {\n                // On properties, only remove key:value duplicates, allowing\n                // multiple properties with the same key.\n                const oldValue = key.slice(5).split(':')[1];\n                pattern = `${field.replace(':', '\\\\:\\\\s*')}\\\\:\\\\s*${oldValue}\\\\s*`;\n            } else {\n                pattern = `${field.replace(':', '\\\\:\\\\s*')}\\\\:\\\\s*[\\\\w|\\\\#|\\\\-|\\\\/]*\\\\s*`;\n            }\n            value = value.replace(new RegExp(pattern), '');\n        }\n        // Re-add it with the current search value.\n        if (v) {\n            const nv = v === true\n                ? `${field}`\n                : `${field}:${v}`;\n            // Always append to the end to keep user-entered text at the start.\n            value = value + ' ' + nv;\n        }\n    };\n    keyMap.forEach(km => addRem(km[0], km[1]));\n    return value;\n};\n\nexport const getQueryFromAdvancedData = (data: SearchBarAdvancedFormData, prevData?: SearchBarAdvancedFormData) => {\n    let value = '';\n\n    const flatData = (data: SearchBarAdvancedFormData) => {\n        const fo = {\n            searchValue: data.searchValue,\n            type: data.type,\n            cluster: data.cluster,\n            projectUuid: data.projectUuid,\n            inTrash: data.inTrash,\n            pastVersions: data.pastVersions,\n            dateFrom: data.dateFrom,\n            dateTo: data.dateTo,\n        };\n        (data.properties || []).forEach(p =>\n            fo[`prop-\"${p.keyID || p.key}\":\"${p.valueID || p.value}\"`] = `\"${p.valueID || p.value}\"`\n        );\n        return fo;\n    };\n\n    const keyMap = [\n        ['type', 'type'],\n        ['cluster', 'cluster'],\n        ['project', 'projectUuid'],\n        [`is:${parser.States.TRASHED}`, 'inTrash'],\n        [`is:${parser.States.PAST_VERSION}`, 'pastVersions'],\n        ['from', 'dateFrom'],\n        ['to', 'dateTo']\n    ];\n    union(data.properties, prevData ? prevData.properties : [])\n        .forEach(p => keyMap.push(\n            [`has:\"${p.keyID || p.key}\"`, `prop-\"${p.keyID || p.key}\":\"${p.valueID || p.value}\"`]\n        ));\n\n    const modified = getModifiedKeysValues(flatData(data), prevData ? flatData(prevData) : {});\n    value = buildQueryFromKeyMap(\n        { searchValue: data.searchValue, ...modified } as SearchBarAdvancedFormData, keyMap);\n\n    value = value.trim();\n    return value;\n};\n\nexport const getAdvancedDataFromQuery = (query: string, vocabulary?: Vocabulary): SearchBarAdvancedFormData => {\n    const { tokens, searchString } = parser.parseSearchQuery(query);\n    const getValue = parser.getValue(tokens);\n    return {\n        searchValue: searchString,\n        type: getValue(Keywords.TYPE) as ResourceKind,\n        cluster: getValue(Keywords.CLUSTER),\n        projectUuid: getValue(Keywords.PROJECT),\n        inTrash: parser.isTrashed(tokens),\n        pastVersions: parser.isPastVersion(tokens),\n        dateFrom: getValue(Keywords.FROM) || '',\n        dateTo: getValue(Keywords.TO) || '',\n        properties: vocabulary\n            ? parser.getProperties(tokens).map(\n                p => {\n                    return {\n                        keyID: p.key,\n                        key: getTagKeyLabel(p.key, vocabulary),\n                        valueID: p.value,\n                        value: getTagValueLabel(p.key, p.value, vocabulary),\n                    };\n                })\n            : parser.getProperties(tokens),\n        saveQuery: false,\n        queryName: ''\n    };\n};\n\nexport const getSearchSessions = (clusterId: string | undefined, sessions: Session[]): Session[] => {\n    return sessions.filter(s => s.loggedIn && (!clusterId || s.clusterId === clusterId));\n};\n\nexport const queryToFilters = (query: string, apiRevision: number) => {\n    const data = getAdvancedDataFromQuery(query);\n    const filter = new FilterBuilder();\n    const resourceKind = data.type;\n\n    if (data.searchValue) {\n        filter.addFullTextSearch(data.searchValue);\n    }\n\n    if (data.projectUuid) {\n        filter.addEqual('owner_uuid', data.projectUuid);\n    }\n\n    if (data.dateFrom) {\n        filter.addGte('modified_at', buildDateFilter(data.dateFrom));\n    }\n\n    if (data.dateTo) {\n        filter.addLte('modified_at', buildDateFilter(data.dateTo));\n    }\n\n    data.properties.forEach(p => {\n        if (p.value) {\n            if (apiRevision < PROPERTY_CONTAINS_VALUE_MIN_API_REVISION) {\n                filter\n                    .addEqual(`properties.${p.key}`, p.value, GroupContentsResourcePrefix.PROJECT)\n                    .addEqual(`properties.${p.key}`, p.value, GroupContentsResourcePrefix.COLLECTION)\n                    .addEqual(`properties.${p.key}`, p.value, GroupContentsResourcePrefix.PROCESS);\n            } else {\n                filter\n                    .addContains(`properties.${p.key}`, p.value, GroupContentsResourcePrefix.PROJECT)\n                    .addContains(`properties.${p.key}`, p.value, GroupContentsResourcePrefix.COLLECTION)\n                    .addContains(`properties.${p.key}`, p.value, GroupContentsResourcePrefix.PROCESS);\n            }\n        }\n        filter.addExists(p.key);\n    });\n\n    return filter\n        .addIsA(\"uuid\", buildUuidFilter(resourceKind))\n        .getFilters();\n};\n\nconst buildUuidFilter = (type?: ResourceKind): ResourceKind[] => {\n    return type ? [type] : [ResourceKind.PROJECT, ResourceKind.COLLECTION, ResourceKind.PROCESS];\n};\n\nconst buildDateFilter = (date?: string): string => {\n    return date ? date : '';\n};\n\nexport const initAdvancedFormProjectsTree = () =>\n    (dispatch: Dispatch) => {\n        dispatch<any>(initUserProject(SEARCH_BAR_ADVANCED_FORM_PICKER_ID));\n    };\n\nexport const changeAdvancedFormProperty = (propertyField: string, value: PropertyValue[] | string = '') =>\n    (dispatch: Dispatch) => {\n        dispatch(change(SEARCH_BAR_ADVANCED_FORM_NAME, propertyField, value));\n    };\n\nexport const resetAdvancedFormProperty = (propertyField: string) =>\n    (dispatch: Dispatch) => {\n        dispatch(change(SEARCH_BAR_ADVANCED_FORM_NAME, propertyField, null));\n        dispatch(untouch(SEARCH_BAR_ADVANCED_FORM_NAME, propertyField));\n    };\n\nexport const moveUp = () =>\n    (dispatch: Dispatch) => {\n        dispatch(searchBarActions.MOVE_UP());\n    };\n\nexport const moveDown = () =>\n    (dispatch: Dispatch) => {\n        dispatch(searchBarActions.MOVE_DOWN());\n    };\n\nexport const setSearchOffsets = (sessionId: string, offset: number) => {\n    return (dispatch: Dispatch) => {\n        dispatch(searchBarActions.SET_SEARCH_OFFSETS({id:[sessionId], offset }));\n    };\n}\n"
  },
  {
    "path": "services/workbench2/src/store/search-bar/search-bar-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    getQueryFromAdvancedData,\n    searchBarActions,\n    SearchBarActions\n} from 'store/search-bar/search-bar-actions';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { SearchBarAdvancedFormData } from 'models/search-bar';\n\ntype SearchResult = GroupContentsResource;\nexport type SearchBarSelectedItem = {\n    id: string,\n    query: string\n};\n\ninterface SearchBar {\n    currentView: string;\n    open: boolean;\n    searchResults: SearchResult[];\n    searchValue: string;\n    savedQueries: SearchBarAdvancedFormData[];\n    recentQueries: string[];\n    selectedItem: SearchBarSelectedItem;\n    searchOffsets: Record<string, number>;\n}\n\nexport enum SearchView {\n    BASIC = 'basic',\n    ADVANCED = 'advanced',\n    AUTOCOMPLETE = 'autocomplete'\n}\n\nconst initialState: SearchBar = {\n    currentView: SearchView.BASIC,\n    open: false,\n    searchResults: [],\n    searchValue: '',\n    savedQueries: [],\n    recentQueries: [],\n    selectedItem: {\n        id: '',\n        query: ''\n    },\n    searchOffsets: {},\n};\n\nconst makeSelectedItem = (id: string, query?: string): SearchBarSelectedItem => ({ id, query: query ? query : id });\n\nconst makeQueryList = (recentQueries: string[], savedQueries: SearchBarAdvancedFormData[]) => {\n    const recentIds = recentQueries.map((q, idx) => makeSelectedItem(`RQ-${idx}-${q}`, q));\n    const savedIds = savedQueries.map((q, idx) => makeSelectedItem(`SQ-${idx}-${q.queryName}`, getQueryFromAdvancedData(q)));\n    return recentIds.concat(savedIds);\n};\n\nexport const searchBarReducer = (state = initialState, action: SearchBarActions): SearchBar =>\n    searchBarActions.match(action, {\n        SET_CURRENT_VIEW: currentView => ({\n            ...state,\n            currentView,\n            open: true\n        }),\n        OPEN_SEARCH_VIEW: () => ({ ...state, open: true }),\n        CLOSE_SEARCH_VIEW: () => ({ ...state, open: false }),\n        SET_SEARCH_RESULTS: searchResults => ({\n            ...state,\n            searchResults,\n            selectedItem: makeSelectedItem(searchResults.length > 0\n                ? searchResults.findIndex(r => r.uuid === state.selectedItem.id) >= 0\n                    ? state.selectedItem.id\n                    : state.searchValue\n                : state.searchValue\n            )\n        }),\n        SET_SEARCH_VALUE: searchValue => ({\n            ...state,\n            searchValue\n        }),\n        SET_SAVED_QUERIES: savedQueries => ({ ...state, savedQueries }),\n        SET_RECENT_QUERIES: recentQueries => ({ ...state, recentQueries }),\n        UPDATE_SAVED_QUERY: searchQuery => ({ ...state, savedQueries: searchQuery }),\n        SET_SELECTED_ITEM: item => ({ ...state, selectedItem: makeSelectedItem(item) }),\n        MOVE_UP: () => {\n            let selectedItem = state.selectedItem;\n            if (state.currentView === SearchView.AUTOCOMPLETE) {\n                const idx = state.searchResults.findIndex(r => r.uuid === selectedItem.id);\n                if (idx > 0) {\n                    selectedItem = makeSelectedItem(state.searchResults[idx - 1].uuid);\n                } else {\n                    selectedItem = makeSelectedItem(state.searchValue);\n                }\n            } else if (state.currentView === SearchView.BASIC) {\n                const items = makeQueryList(state.recentQueries, state.savedQueries);\n\n                const idx = items.findIndex(i => i.id === selectedItem.id);\n                if (idx > 0) {\n                    selectedItem = items[idx - 1];\n                }\n            }\n            return {\n                ...state,\n                selectedItem\n            };\n        },\n        MOVE_DOWN: () => {\n            let selectedItem = state.selectedItem;\n            if (state.currentView === SearchView.AUTOCOMPLETE) {\n                const idx = state.searchResults.findIndex(r => r.uuid === selectedItem.id);\n                if (idx >= 0 && idx < state.searchResults.length - 1) {\n                    selectedItem = makeSelectedItem(state.searchResults[idx + 1].uuid);\n                } else if (idx < 0 && state.searchResults.length > 0) {\n                    selectedItem = makeSelectedItem(state.searchResults[0].uuid);\n                }\n            } else if (state.currentView === SearchView.BASIC) {\n                const items = makeQueryList(state.recentQueries, state.savedQueries);\n\n                const idx = items.findIndex(i => i.id === selectedItem.id);\n                if (idx >= 0 && idx < items.length - 1) {\n                    selectedItem = items[idx + 1];\n                }\n\n                if (idx < 0 && items.length > 0) {\n                    selectedItem = items[0];\n                }\n            }\n            return {\n                ...state,\n                selectedItem\n            };\n        },\n        SELECT_FIRST_ITEM: () => {\n            let selectedItem = state.selectedItem;\n            if (state.currentView === SearchView.AUTOCOMPLETE) {\n                selectedItem = makeSelectedItem(state.searchValue);\n            } else if (state.currentView === SearchView.BASIC) {\n                const items = makeQueryList(state.recentQueries, state.savedQueries);\n                if (items.length > 0) {\n                    selectedItem = items[0];\n                }\n            }\n            return {\n                ...state,\n                selectedItem\n            };\n        },\n        SET_SEARCH_OFFSETS: ({id, offset}) => {\n            return {...state, searchOffsets: {...state.searchOffsets, [id]: offset}};\n        },\n        default: () => state\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/search-bar/search-bar-tree-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { getTreePicker, TreePicker } from \"store/tree-picker/tree-picker\";\nimport { getNode, getNodeAncestorsIds, initTreeNode } from \"models/tree\";\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { getUserUuid } from \"common/getuser\";\nimport { ServiceRepository } from \"services/services\";\nimport { treePickerActions } from \"store/tree-picker/tree-picker-actions\";\nimport { FilterBuilder } from \"services/api/filter-builder\";\nimport { OrderBuilder } from \"services/api/order-builder\";\nimport { ProjectResource } from \"models/project\";\nimport { resourcesActions } from \"store/resources/resources-actions\";\nimport { SEARCH_BAR_ADVANCED_FORM_PICKER_ID } from \"store/search-bar/search-bar-actions\";\n\nconst getSearchBarTreeNode = (id: string) => (treePicker: TreePicker) => {\n    const searchTree = getTreePicker(SEARCH_BAR_ADVANCED_FORM_PICKER_ID)(treePicker);\n    return searchTree\n        ? getNode(id)(searchTree)\n        : undefined;\n};\n\nexport const loadSearchBarTreeProjects = (projectUuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState) => {\n        const treePicker = getTreePicker(SEARCH_BAR_ADVANCED_FORM_PICKER_ID)(getState().treePicker);\n        const node = treePicker ? getNode(projectUuid)(treePicker) : undefined;\n        if (node || projectUuid === '') {\n            await dispatch<any>(loadSearchBarProject(projectUuid));\n        }\n    };\n\nexport const getSearchBarTreeNodeAncestorsIds = (id: string) => (treePicker: TreePicker) => {\n    const searchTree = getTreePicker(SEARCH_BAR_ADVANCED_FORM_PICKER_ID)(treePicker);\n    return searchTree\n        ? getNodeAncestorsIds(id)(searchTree)\n        : [];\n};\n\nexport const activateSearchBarTreeBranch = (id: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        if (!userUuid) { return; }\n        const ancestors = await services.ancestorsService.ancestors(id, userUuid);\n\n        for (const ancestor of ancestors) {\n            await dispatch<any>(loadSearchBarTreeProjects(ancestor.uuid));\n        }\n        dispatch(treePickerActions.EXPAND_TREE_PICKER_NODES({\n            ids: [\n                ...[],\n                ...ancestors.map(ancestor => ancestor.uuid)\n            ],\n            pickerId: SEARCH_BAR_ADVANCED_FORM_PICKER_ID\n        }));\n        dispatch(treePickerActions.ACTIVATE_TREE_PICKER_NODE({ id, pickerId: SEARCH_BAR_ADVANCED_FORM_PICKER_ID }));\n    };\n\nexport const expandSearchBarTreeItem = (id: string) =>\n    async (dispatch: Dispatch, getState: () => RootState) => {\n        const node = getSearchBarTreeNode(id)(getState().treePicker);\n        if (node && !node.expanded) {\n            dispatch(treePickerActions.TOGGLE_TREE_PICKER_NODE_COLLAPSE({ id, pickerId: SEARCH_BAR_ADVANCED_FORM_PICKER_ID }));\n        }\n    };\n\nexport const activateSearchBarProject = (id: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n\n\n        /*const { treePicker } = getState();\n        const node = getSearchBarTreeNode(id)(treePicker);\n        if (node && node.status !== TreeNodeStatus.LOADED) {\n            await dispatch<any>(loadSearchBarTreeProjects(id));\n        } else if (node === undefined) {\n            await dispatch<any>(activateSearchBarTreeBranch(id));\n        }\n        dispatch(treePickerActions.EXPAND_TREE_PICKER_NODES({\n            ids: getSearchBarTreeNodeAncestorsIds(id)(treePicker),\n            pickerId: SEARCH_BAR_ADVANCED_FORM_PICKER_ID\n        }));\n        dispatch<any>(expandSearchBarTreeItem(id));*/\n    };\n\n\nconst loadSearchBarProject = (projectUuid: string) =>\n    async (dispatch: Dispatch, _: () => RootState, services: ServiceRepository) => {\n        dispatch(treePickerActions.LOAD_TREE_PICKER_NODE({ id: projectUuid, pickerId: SEARCH_BAR_ADVANCED_FORM_PICKER_ID }));\n        const params = {\n            filters: new FilterBuilder()\n                .addEqual('owner_uuid', projectUuid)\n                .getFilters(),\n            order: new OrderBuilder<ProjectResource>()\n                .addAsc('name')\n                .getOrder()\n        };\n        const { items } = await services.projectService.list(params);\n        dispatch(treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({\n            id: projectUuid,\n            pickerId: SEARCH_BAR_ADVANCED_FORM_PICKER_ID,\n            nodes: items.map(item => initTreeNode({ id: item.uuid, value: item })),\n        }));\n        dispatch(resourcesActions.SET_RESOURCES(items));\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/search-bar/search-query/arv-parser.ts",
    "content": "\n// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport * as parser from 'store/search-bar/search-query/parser';\n\ninterface Property {\n    key: string;\n    value: string;\n}\n\nexport enum Keywords {\n    TYPE = 'type',\n    CLUSTER = 'cluster',\n    PROJECT = 'project',\n    IS = 'is',\n    FROM = 'from',\n    TO = 'to',\n}\n\nexport enum States {\n    TRASHED = 'trashed',\n    PAST_VERSION = 'pastVersion'\n}\n\nconst keyValuePattern = (key: string) => new RegExp(`${key}:([^ ]*)`);\nconst propertyPattern = /has:\"(.*?)\":\"(.*?)\"/;\n\nconst patterns = [\n    keyValuePattern(Keywords.TYPE),\n    keyValuePattern(Keywords.CLUSTER),\n    keyValuePattern(Keywords.PROJECT),\n    keyValuePattern(Keywords.IS),\n    keyValuePattern(Keywords.FROM),\n    keyValuePattern(Keywords.TO),\n    propertyPattern\n];\n\nexport const parseSearchQuery = parser.parseSearchQuery(patterns);\n\nexport const getValue = (tokens: string[]) => (key: string) => {\n    const pattern = keyValuePattern(key);\n    const token = tokens.find(t => pattern.test(t));\n    if (token) {\n        const [, value] = token.split(':');\n        return value;\n    }\n    return undefined;\n};\n\nexport const getProperties = (tokens: string[]) =>\n    tokens.reduce((properties, token) => {\n        const match = token.match(propertyPattern);\n        if (match) {\n            const [, key, value] = match;\n            const newProperty = { key, value };\n            return [...properties, newProperty];\n        }\n        return properties;\n    }, [] as Property[]);\n\n\nexport const isTrashed = (tokens: string[]) => isSomeState(States.TRASHED, tokens);\n\nexport const isPastVersion = (tokens: string[]) => isSomeState(States.PAST_VERSION, tokens);\n\nconst isSomeState = (state: string, tokens: string[]) => {\n    for (const token of tokens) {\n        const match = token.match(keyValuePattern(Keywords.IS)) || ['', ''];\n        if (match) {\n            const [, value] = match;\n            if(value === state) {\n                return true;\n            }\n        }\n    }\n    return false;\n};\n"
  },
  {
    "path": "services/workbench2/src/store/search-bar/search-query/parser.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { uniq } from 'lodash/fp';\n\nexport interface ParsedSearchQuery {\n    tokens: string[];\n    searchString: string;\n}\n\nexport const findToken = (query: string, patterns: RegExp[]) => {\n    for (const pattern of patterns) {\n        const match = query.match(pattern);\n        if (match) {\n            return match[0];\n        }\n    }\n    return null;\n};\n\nexport const findAllTokens = (query: string, patterns: RegExp[]): string[] => {\n    const token = findToken(query, patterns);\n    return token\n        ? [token].concat(findAllTokens(query.replace(token, ''), patterns))\n        : [];\n};\n\nexport const findSearchString = (query: string, tokens: string[]) => {\n    const uniqueWords = uniq(tokens\n        .reduce((q, token) => q.replace(token, ''), query)\n        .split(' ')\n        .filter(word => word !== '')\n    );\n    return uniqueWords.join(' ');\n};\n\nexport const parseSearchQuery = (patterns: RegExp[]) => (query: string): ParsedSearchQuery => {\n    const tokens = findAllTokens(query, patterns);\n    const searchString = findSearchString(query, tokens);\n    return { tokens, searchString };\n};\n"
  },
  {
    "path": "services/workbench2/src/store/search-results-panel/search-results-middleware-service.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { initialDataExplorer } from '../data-explorer/data-explorer-reducer'\nimport { getParams } from './search-results-middleware-service'\n\ndescribe('search-results-middleware', () => {\n    describe('getParams', () => {\n        it('should use include_old_versions=true when asked', () => {\n            const dataExplorer = initialDataExplorer;\n            const query = 'Search term is:pastVersion';\n            const apiRev = 20201013;\n            const params = getParams(dataExplorer, query, apiRev);\n            expect(params.includeOldVersions).to.equal(true);\n        });\n\n        it('should not use include_old_versions=true when not asked', () => {\n            const dataExplorer = initialDataExplorer;\n            const query = 'Search term';\n            const apiRev = 20201013;\n            const params = getParams(dataExplorer, query, apiRev);\n            expect(params.includeOldVersions).to.equal(false);\n        });\n    })\n})"
  },
  {
    "path": "services/workbench2/src/store/search-results-panel/search-results-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ServiceRepository } from 'services/services';\nimport { MiddlewareAPI, Dispatch } from 'redux';\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta, getDataExplorerColumnFilters } from 'store/data-explorer/data-explorer-middleware-service';\nimport { RootState } from 'store/store';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport { OrderDirection, OrderBuilder } from 'services/api/order-builder';\nimport { GroupContentsResource, GroupContentsResourcePrefix, ContentsArguments } from \"services/groups-service/groups-service\";\nimport { ListResults } from 'services/common-service/common-service';\nimport { searchResultsPanelActions } from 'store/search-results-panel/search-results-panel-actions';\nimport {\n    getSearchSessions,\n    queryToFilters,\n    getAdvancedDataFromQuery,\n    setSearchOffsets,\n} from 'store/search-bar/search-bar-actions';\nimport { getSortColumn } from \"store/data-explorer/data-explorer-reducer\";\nimport { FilterBuilder, joinFilters } from 'services/api/filter-builder';\nimport { serializeResourceTypeFilters } from 'store//resource-type-filters/resource-type-filters';\nimport { SearchResultsPanelColumnNames } from 'views/search-results-panel/search-results-panel-columns';\nimport { ResourceKind } from 'models/resource';\nimport { ContainerRequestResource } from 'models/container-request';\nimport { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';\nimport { dataExplorerActions } from 'store/data-explorer/data-explorer-action';\nimport { Session } from 'models/session';\nimport { SEARCH_RESULTS_PANEL_ID } from 'store/search-results-panel/search-results-panel-actions';\nimport { GROUP_CONTENTS_INCLUDE_CONTAINER_UUID_MIN_API_REVISION } from 'common/app-info';\n\nexport class SearchResultsMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        const searchValue = state.searchBar.searchValue;\n        const { cluster: clusterId } = getAdvancedDataFromQuery(searchValue);\n        const sessions = getSearchSessions(clusterId, state.auth.sessions);\n\n        if (searchValue.trim() === '') {\n            return;\n        }\n\n        const initial = {\n            itemsAvailable: 0,\n            items: [] as GroupContentsResource[],\n            kind: '',\n            offset: 0,\n            limit: 50\n        };\n\n        if (criteriaChanged) {\n            api.dispatch(setItems(initial));\n        }\n\n        const numberOfSessions = sessions.length;\n        let numberOfResolvedResponses = 0;\n        let totalNumItemsAvailable = 0;\n        api.dispatch(progressIndicatorActions.START_WORKING(this.id))\n        api.dispatch(dataExplorerActions.SET_IS_NOT_FOUND({ id: this.id, isNotFound: false }));\n\n        //In SearchResultsPanel, if we don't reset the items available, the items available will\n        //will be added to the previous value every time the 'load more' button is clicked.\n        api.dispatch(resetItemsAvailable());\n\n        sessions.forEach(session => {\n            const params: ContentsArguments = getParams(dataExplorer, searchValue, session.apiRevision);\n            //this prevents double fetching of the same search results when a new session is logged in\n            api.dispatch<any>(setSearchOffsets(session.clusterId, params.offset || 0));\n\n            if (session.apiRevision >= GROUP_CONTENTS_INCLUDE_CONTAINER_UUID_MIN_API_REVISION) {\n                params.include = [\"owner_uuid\", \"container_uuid\"];\n            } else {\n                params.include = \"owner_uuid\";\n            }\n\n            this.services.groupsService.contents('', params, session)\n                .then((response) => {\n                    api.dispatch(updateResources(response.items));\n                    if (response.included) {\n                        api.dispatch(updateResources(response.included));\n                    }\n                    api.dispatch(appendItems(response));\n                    numberOfResolvedResponses++;\n                    // Used to determine if all results are empty, so items.length works as well as itemsAvailable\n                    totalNumItemsAvailable += response.items.length;\n                    if (numberOfResolvedResponses === numberOfSessions) {\n                        api.dispatch(progressIndicatorActions.STOP_WORKING(this.id))\n                        if(totalNumItemsAvailable === 0) api.dispatch(dataExplorerActions.SET_IS_NOT_FOUND({ id: this.id, isNotFound: true }))\n                    }\n                    // Request all containers for process status to be available\n                    // Required when contacting legacy API servers (pre-Arvados 3.0)\n                    if (session.apiRevision < GROUP_CONTENTS_INCLUDE_CONTAINER_UUID_MIN_API_REVISION) {\n                        const containerRequests = response.items.filter((item) => item.kind === ResourceKind.CONTAINER_REQUEST) as ContainerRequestResource[];\n                        const containerUuids = containerRequests.map(container => container.containerUuid).filter(uuid => uuid !== null) as string[];\n                        containerUuids.length && this.services.containerService\n                                                     .list({\n                                                         filters: new FilterBuilder()\n                                                             .addIn('uuid', containerUuids)\n                                                             .getFilters()\n                                                     }, false)\n                                                     .then((containers) => {\n                                                         api.dispatch(updateResources(containers.items));\n                                                     });\n                    }\n                }).catch(() => {\n                    api.dispatch(couldNotFetchSearchResults(session.clusterId));\n                    api.dispatch(progressIndicatorActions.STOP_WORKING(this.id))\n                });\n        }\n        );\n    }\n\n    // Empty requestCount method since search relies on included itemsAvailable\n    async requestCount() {}\n}\n\nexport const searchSingleCluster = (session: Session, searchValue: string) =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const state = getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, SEARCH_RESULTS_PANEL_ID);\n\n        if (searchValue.trim() === '') {\n            return;\n        }\n\n        const params: ContentsArguments = getParams(dataExplorer, searchValue, session.apiRevision);\n\n        // If the clusterId & search offset has already been fetched, we don't need to fetch the results again\n        if(state.searchBar.searchOffsets[session.clusterId] === params.offset) {\n            return;\n        }\n\n        if (session.apiRevision >= GROUP_CONTENTS_INCLUDE_CONTAINER_UUID_MIN_API_REVISION) {\n            params.include = [\"owner_uuid\", \"container_uuid\"];\n        } else {\n            params.include = \"owner_uuid\";\n        }\n\n        dispatch(progressIndicatorActions.START_WORKING(SEARCH_RESULTS_PANEL_ID))\n\n        services.groupsService.contents('', params, session)\n                .then((response) => {\n                    dispatch<any>(setSearchOffsets(session.clusterId, params.offset || 0));\n                    dispatch(updateResources(response.items));\n                    if (response.included) {\n                        dispatch(updateResources(response.included));\n                    }\n                    dispatch(appendItems(response));\n                    // Request all containers for process status to be available\n                    if (session.apiRevision < GROUP_CONTENTS_INCLUDE_CONTAINER_UUID_MIN_API_REVISION) {\n                        const containerRequests = response.items.filter((item) => item.kind === ResourceKind.CONTAINER_REQUEST) as ContainerRequestResource[];\n                        const containerUuids = containerRequests.map(container => container.containerUuid).filter(uuid => uuid !== null) as string[];\n                        containerUuids.length && services.containerService\n                                                         .list({\n                                                             filters: new FilterBuilder()\n                                                                 .addIn('uuid', containerUuids)\n                                                                 .getFilters()\n                                                         }, false)\n                                                         .then((containers) => {\n                                                             dispatch(updateResources(containers.items));\n                                                         });\n                    }\n                }).catch(() => {\n                    dispatch(couldNotFetchSearchResults(session.clusterId));\n                    dispatch(progressIndicatorActions.STOP_WORKING(SEARCH_RESULTS_PANEL_ID))\n                });\n        dispatch(progressIndicatorActions.STOP_WORKING(SEARCH_RESULTS_PANEL_ID))\n    }\n\nconst typeFilters = (columns: DataColumns<string, GroupContentsResource>) => serializeResourceTypeFilters(getDataExplorerColumnFilters(columns, SearchResultsPanelColumnNames.TYPE));\n\nexport const getParams = (dataExplorer: DataExplorer, query: string, apiRevision: number) => ({\n    ...dataExplorerToListParams(dataExplorer),\n    filters: joinFilters(\n        queryToFilters(query, apiRevision),\n        typeFilters(dataExplorer.columns)\n    ),\n    order: getOrder(dataExplorer),\n    includeTrash: getAdvancedDataFromQuery(query).inTrash,\n    includeOldVersions: getAdvancedDataFromQuery(query).pastVersions,\n});\n\nconst getOrder = (dataExplorer: DataExplorer) => {\n    const sortColumn = getSortColumn<GroupContentsResource>(dataExplorer);\n    const order = new OrderBuilder<GroupContentsResource>();\n    if (sortColumn && sortColumn.sort) {\n        const sortDirection = sortColumn.sort.direction === SortDirection.ASC\n            ? OrderDirection.ASC\n            : OrderDirection.DESC;\n\n        // Use createdAt as a secondary sort column so we break ties consistently.\n        return order\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.COLLECTION)\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.PROCESS)\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.PROJECT)\n            .addOrder(OrderDirection.DESC, \"createdAt\", GroupContentsResourcePrefix.PROCESS)\n            .getOrder();\n    } else {\n        return order.getOrder();\n    }\n};\n\nexport const setItems = (listResults: ListResults<GroupContentsResource>) =>\n    searchResultsPanelActions.SET_ITEMS({\n        ...listResultsToDataExplorerItemsMeta(listResults),\n        items: listResults.items.map(resource => resource.uuid),\n    });\n\nconst resetItemsAvailable = () =>\n    searchResultsPanelActions.RESET_ITEMS_AVAILABLE();\n\nexport const appendItems = (listResults: ListResults<GroupContentsResource>) =>\n    searchResultsPanelActions.APPEND_ITEMS({\n        ...listResultsToDataExplorerItemsMeta(listResults),\n        items: listResults.items.map(resource => resource.uuid),\n    });\n\nconst couldNotFetchSearchResults = (cluster: string) =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: `Could not fetch search results from ${cluster}.`,\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/search-results-panel/search-results-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { bindDataExplorerActions } from 'store/data-explorer/data-explorer-action';\nimport { setBreadcrumbs } from 'store/breadcrumbs/breadcrumbs-actions';\nimport { searchBarActions } from 'store/search-bar/search-bar-actions';\n\nexport const SEARCH_RESULTS_PANEL_ID = \"searchResultsPanel\";\nexport const searchResultsPanelActions = bindDataExplorerActions(SEARCH_RESULTS_PANEL_ID);\n\nexport const loadSearchResultsPanel = () =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(setBreadcrumbs([{ label: 'Search results' }]));\n        const loc = getState().router.location;\n        if (loc !== null) {\n            const search = new URLSearchParams(loc.search);\n            const q = search.get('q');\n            if (q !== null) {\n                dispatch(searchBarActions.SET_SEARCH_VALUE(q));\n            }\n        }\n        dispatch(searchBarActions.SET_SEARCH_RESULTS([]));\n        dispatch(searchResultsPanelActions.CLEAR());\n        dispatch(searchResultsPanelActions.REQUEST_ITEMS(true));\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/selected-resource/selected-resource-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const selectedResourceActions = {\n    SET_SELECTED_RESOURCE: 'SET_SELECTED_RESOURCE',\n    SET_IS_IN_DATA_EXPLORER: 'IS_SELECTED_RESOURCE_IN_DATA_EXPLORER',\n}\n\nexport const setSelectedResourceUuid = (resourceUuid: string | null) => ({\n    type: selectedResourceActions.SET_SELECTED_RESOURCE,\n    payload: resourceUuid\n});\n\nexport const setIsSelectedResourceInDataExplorer = (isIn: boolean) => ({\n    type: selectedResourceActions.SET_IS_IN_DATA_EXPLORER,\n    payload: isIn\n});\n"
  },
  {
    "path": "services/workbench2/src/store/selected-resource/selected-resource-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { selectedResourceActions } from \"./selected-resource-actions\";\n\ntype SelectedResourceState = {\n    selectedResourceUuid: string | null,\n    isSelectedResourceInDataExplorer: boolean\n};\n\nconst initialState: SelectedResourceState = {\n    selectedResourceUuid: null,\n    isSelectedResourceInDataExplorer: false\n}\n\nexport const selectedResourceReducer = (state: SelectedResourceState = initialState, action: any) => {\n    if (action.type === selectedResourceActions.SET_SELECTED_RESOURCE) {\n        return { ...state, selectedResourceUuid: action.payload };\n    }\n    if (action.type === selectedResourceActions.SET_IS_IN_DATA_EXPLORER) {\n        return { ...state, isSelectedResourceInDataExplorer: action.payload };\n    }\n    return state;\n};"
  },
  {
    "path": "services/workbench2/src/store/shared-with-me-panel/shared-with-me-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    DataExplorerMiddlewareService,\n    listResultsToDataExplorerItemsMeta,\n    dataExplorerToListParams,\n    getDataExplorerColumnFilters,\n} from \"../data-explorer/data-explorer-middleware-service\";\nimport { ServiceRepository } from \"services/services\";\nimport { MiddlewareAPI, Dispatch } from \"redux\";\nimport { RootState } from 'store/store';\nimport { getDataExplorer, DataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { updateFavorites } from 'store/favorites/favorites-actions';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { sharedWithMePanelActions } from './shared-with-me-panel-actions';\nimport { ListResults } from 'services/common-service/common-service';\nimport { ContentsArguments, GroupContentsResource, GroupContentsResourcePrefix } from 'services/groups-service/groups-service';\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport { OrderBuilder, OrderDirection } from 'services/api/order-builder';\nimport { ProjectResource } from 'models/project';\nimport { getSortColumn } from \"store/data-explorer/data-explorer-reducer\";\nimport { updatePublicFavorites } from 'store/public-favorites/public-favorites-actions';\nimport { FilterBuilder, joinFilters } from 'services/api/filter-builder';\nimport { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';\nimport { AuthState } from 'store/auth/auth-reducer';\nimport { SharedWithMePanelColumnNames } from 'views/shared-with-me-panel/shared-with-me-columns';\nimport { buildProcessStatusFilters, serializeResourceTypeFilters } from 'store/resource-type-filters/resource-type-filters';\nimport { couldNotFetchItemsAvailable } from 'store/data-explorer/data-explorer-action';\n\nexport class SharedWithMeMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        try {\n            if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n            const response = await this.services.groupsService\n                .contents('', getParams(dataExplorer, state.auth));\n            api.dispatch<any>(updateFavorites(response.items.map(item => item.uuid)));\n            api.dispatch<any>(updatePublicFavorites(response.items.map(item => item.uuid)));\n            api.dispatch(updateResources(response.items));\n            if (response.included) { api.dispatch(updateResources(response.included)); }\n            api.dispatch(setItems(response));\n        } catch (e) {\n            api.dispatch(couldNotFetchSharedItems());\n        } finally {\n            api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n\n        if (criteriaChanged) {\n            // Get itemsAvailable\n            return this.services.groupsService.contents('', getCountParams(dataExplorer, state.auth))\n                .then((results: ListResults<GroupContentsResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(sharedWithMePanelActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                })\n        }\n    }\n}\n\nexport const getParams = (dataExplorer: DataExplorer, authState: AuthState): ContentsArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    order: getOrder(dataExplorer),\n    filters: getFilters(dataExplorer, authState),\n    excludeHomeProject: true,\n    count: \"none\",\n    include: [\"owner_uuid\", \"container_uuid\"]\n});\n\nconst getCountParams = (dataExplorer: DataExplorer, authState: AuthState): ContentsArguments => ({\n    limit: 0,\n    count: 'exact',\n    filters: getFilters(dataExplorer, authState),\n    excludeHomeProject: true,\n});\n\nconst getOrder = (dataExplorer: DataExplorer) => {\n    const sortColumn = getSortColumn<ProjectResource>(dataExplorer);\n    const order = new OrderBuilder<ProjectResource>();\n    if (sortColumn && sortColumn.sort) {\n        const sortDirection = sortColumn.sort.direction === SortDirection.ASC\n            ? OrderDirection.ASC\n            : OrderDirection.DESC;\n\n        // Use createdAt as a secondary sort column so we break ties consistently.\n        return order\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.COLLECTION)\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.PROCESS)\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.PROJECT)\n            .addOrder(OrderDirection.DESC, \"createdAt\", GroupContentsResourcePrefix.PROCESS)\n            .getOrder();\n    } else {\n        return order.getOrder();\n    }\n};\n\nconst getFilters = (dataExplorer: DataExplorer, authState: AuthState) => {\n    const columns = dataExplorer.columns as DataColumns<string, ProjectResource>;\n    const typeFilters = serializeResourceTypeFilters(getDataExplorerColumnFilters(columns, SharedWithMePanelColumnNames.TYPE));\n    const statusColumnFilters = getDataExplorerColumnFilters(columns, \"Status\");\n    const activeStatusFilter = Object.keys(statusColumnFilters).find(filterName => statusColumnFilters[filterName].selected);\n\n    // TODO: Extract group contents name filter\n    const nameFilters = new FilterBuilder()\n        .addILike(\"name\", dataExplorer.searchValue, GroupContentsResourcePrefix.COLLECTION)\n        .addILike(\"name\", dataExplorer.searchValue, GroupContentsResourcePrefix.PROCESS)\n        .addILike(\"name\", dataExplorer.searchValue, GroupContentsResourcePrefix.PROJECT)\n        .getFilters();\n\n    // Filter by container status\n    const statusFilters = buildProcessStatusFilters(new FilterBuilder(), activeStatusFilter || \"\", GroupContentsResourcePrefix.PROCESS).getFilters();\n\n    // Filter public favorites\n    const favoritesFilter = new FilterBuilder().addDistinct('uuid', `${authState.config.uuidPrefix}-j7d0g-publicfavorites`).getFilters()\n\n    return joinFilters(statusFilters, typeFilters, nameFilters, favoritesFilter);\n};\n\n\nexport const setItems = (listResults: ListResults<GroupContentsResource>) =>\n    sharedWithMePanelActions.SET_ITEMS({\n        ...listResultsToDataExplorerItemsMeta(listResults),\n        items: listResults.items.map(resource => resource.uuid),\n    });\n\nconst couldNotFetchSharedItems = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch shared items.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/shared-with-me-panel/shared-with-me-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { bindDataExplorerActions } from \"../data-explorer/data-explorer-action\";\n\nexport const SHARED_WITH_ME_PANEL_ID = \"sharedWithMePanel\";\nexport const sharedWithMePanelActions = bindDataExplorerActions(SHARED_WITH_ME_PANEL_ID);\nexport const loadSharedWithMePanel = () => (dispatch: Dispatch) => {\n    dispatch(sharedWithMePanelActions.RESET_EXPLORER_SEARCH_VALUE());\n    dispatch(sharedWithMePanelActions.REQUEST_ITEMS());\n};"
  },
  {
    "path": "services/workbench2/src/store/sharing-dialog/sharing-dialog-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport {\n    SHARING_DIALOG_NAME,\n    SHARING_INVITATION_FORM_NAME,\n    SharingManagementFormData,\n    SharingInvitationFormData,\n    getSharingMangementFormData,\n    SharingPublicAccessFormData,\n    VisibilityLevel,\n    SHARING_PUBLIC_ACCESS_FORM_NAME,\n} from './sharing-dialog-types';\nimport { Dispatch } from 'redux';\nimport { ServiceRepository } from \"services/services\";\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { initialize, getFormValues, reset } from 'redux-form';\nimport { SHARING_MANAGEMENT_FORM_NAME } from 'store/sharing-dialog/sharing-dialog-types';\nimport { RootState } from 'store/store';\nimport { getDialog } from 'store/dialog/dialog-reducer';\nimport { PermissionLevel, PermissionResource } from 'models/permission';\nimport { differenceWith } from \"lodash\";\nimport { withProgress } from \"store/progress-indicator/with-progress\";\nimport { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';\nimport { snackbarActions, SnackbarKind } from \"../snackbar/snackbar-actions\";\nimport {\n    extractUuidObjectType,\n    ResourceObjectType\n} from \"models/resource\";\nimport { resourcesActions } from \"store/resources/resources-actions\";\nimport { getPublicGroupUuid, getAllUsersGroupUuid } from \"store/workflow-panel/workflow-panel-actions\";\nimport { getSharingPublicAccessFormData } from './sharing-dialog-types';\nimport { UserResource } from \"models/user\";\nimport { GroupResource } from \"models/group\";\nimport { ListResults } from 'services/common-service/common-service';\n\nexport const openSharingDialog = (resourceUuid: string, refresh?: () => void) =>\n    (dispatch: Dispatch) => {\n        dispatch(dialogActions.OPEN_DIALOG({ id: SHARING_DIALOG_NAME, data: { resourceUuid, refresh } }));\n        dispatch<any>(loadSharingDialog);\n    };\n\nexport const closeSharingDialog = () =>\n    dialogActions.CLOSE_DIALOG({ id: SHARING_DIALOG_NAME });\n\nexport const connectSharingDialog = withDialog(SHARING_DIALOG_NAME);\nexport const connectSharingDialogProgress = withProgress(SHARING_DIALOG_NAME);\n\n\nexport const saveSharingDialogChanges = async (dispatch: Dispatch, getState: () => RootState) => {\n    dispatch(progressIndicatorActions.START_WORKING(SHARING_DIALOG_NAME));\n    await dispatch<any>(savePublicPermissionChanges);\n    await dispatch<any>(saveManagementChanges);\n    await dispatch<any>(sendInvitations);\n    dispatch(reset(SHARING_INVITATION_FORM_NAME));\n    await dispatch<any>(loadSharingDialog);\n    dispatch(progressIndicatorActions.STOP_WORKING(SHARING_DIALOG_NAME));\n\n    const dialog = getDialog<SharingDialogData>(getState().dialog, SHARING_DIALOG_NAME);\n    if (dialog && dialog.data.refresh) {\n        dialog.data.refresh();\n    }\n};\n\nexport interface SharingDialogData {\n    resourceUuid: string;\n    refresh: () => void;\n}\n\nexport const createSharingToken = (expDate: Date | undefined) => async (dispatch: Dispatch, getState: () => RootState, { apiClientAuthorizationService }: ServiceRepository) => {\n    const dialog = getDialog<SharingDialogData>(getState().dialog, SHARING_DIALOG_NAME);\n    if (dialog) {\n        const resourceUuid = dialog.data.resourceUuid;\n        if (extractUuidObjectType(resourceUuid) === ResourceObjectType.COLLECTION) {\n            dispatch(progressIndicatorActions.START_WORKING(SHARING_DIALOG_NAME));\n            try {\n                const sharingToken = await apiClientAuthorizationService.createCollectionSharingToken(resourceUuid, expDate);\n                dispatch(resourcesActions.SET_RESOURCES([sharingToken]));\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Sharing URL created',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS,\n                }));\n            } catch (e) {\n                dispatch(snackbarActions.OPEN_SNACKBAR({\n                    message: 'Failed to create sharing URL',\n                    hideDuration: 2000,\n                    kind: SnackbarKind.ERROR,\n                }));\n            } finally {\n                dispatch(progressIndicatorActions.STOP_WORKING(SHARING_DIALOG_NAME));\n            }\n        }\n    }\n};\n\nexport const deleteSharingToken = (uuid: string) => async (dispatch: Dispatch, getState: () => RootState, { apiClientAuthorizationService }: ServiceRepository) => {\n    dispatch(progressIndicatorActions.START_WORKING(SHARING_DIALOG_NAME));\n    try {\n        await apiClientAuthorizationService.delete(uuid);\n        dispatch(resourcesActions.DELETE_RESOURCES([uuid]));\n        dispatch(snackbarActions.OPEN_SNACKBAR({\n            message: 'Sharing URL removed',\n            hideDuration: 2000,\n            kind: SnackbarKind.SUCCESS,\n        }));\n    } catch (e) {\n        dispatch(snackbarActions.OPEN_SNACKBAR({\n            message: 'Failed to remove sharing URL',\n            hideDuration: 2000,\n            kind: SnackbarKind.ERROR,\n        }));\n    } finally {\n        dispatch(progressIndicatorActions.STOP_WORKING(SHARING_DIALOG_NAME));\n    }\n};\n\nconst loadSharingDialog = async (dispatch: Dispatch, getState: () => RootState, { apiClientAuthorizationService }: ServiceRepository) => {\n\n    const dialog = getDialog<SharingDialogData>(getState().dialog, SHARING_DIALOG_NAME);\n    const sharingURLsDisabled = getState().auth.config.clusterConfig.Workbench.DisableSharingURLsUI;\n    if (dialog) {\n        dispatch(progressIndicatorActions.START_WORKING(SHARING_DIALOG_NAME));\n        try {\n            const resourceUuid = dialog.data.resourceUuid;\n            await dispatch<any>(initializeManagementForm);\n            // For collections, we need to load the public sharing tokens\n            if (!sharingURLsDisabled && extractUuidObjectType(resourceUuid) === ResourceObjectType.COLLECTION) {\n                const sharingTokens = await apiClientAuthorizationService.listCollectionSharingTokens(resourceUuid);\n                dispatch(resourcesActions.SET_RESOURCES([...sharingTokens.items]));\n            }\n        } catch (e) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: 'You do not have access to share this item',\n                hideDuration: 2000,\n                kind: SnackbarKind.ERROR\n            }));\n            dispatch(dialogActions.CLOSE_DIALOG({ id: SHARING_DIALOG_NAME }));\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(SHARING_DIALOG_NAME));\n        }\n    }\n};\n\nexport const initializeManagementForm = async (dispatch: Dispatch, getState: () => RootState, { userService, groupsService, permissionService }: ServiceRepository) => {\n\n    const dialog = getDialog<SharingDialogData>(getState().dialog, SHARING_DIALOG_NAME);\n    if (!dialog) {\n        return;\n    }\n    dispatch(progressIndicatorActions.START_WORKING(SHARING_DIALOG_NAME));\n    try {\n        const resourceUuid = dialog?.data.resourceUuid;\n        const { items: permissionLinks } = await permissionService.listResourcePermissions(resourceUuid);\n        dispatch<any>(initializePublicAccessForm(permissionLinks));\n\n        const queryusers = permissionLinks.map(({ tailUuid }) => tailUuid)\n                                          .filter(uuid => extractUuidObjectType(uuid) === ResourceObjectType.USER);\n        const querygroups = permissionLinks.map(({ tailUuid }) => tailUuid)\n                                           .filter(uuid => extractUuidObjectType(uuid) === ResourceObjectType.GROUP);\n\n        const userfilters = new FilterBuilder()\n            .addIn('uuid', Array.from(new Set(queryusers)))\n            .getFilters();\n        const groupfilters = new FilterBuilder()\n            .addIn('uuid', Array.from(new Set(querygroups)))\n            .getFilters();\n\n        const userpromise = queryusers.length > 0 ? userService.list({ filters: userfilters, count: \"none\", limit: 1000 }) : Promise.resolve({items: ([] as Array<UserResource>)});\n        const grouppromise = querygroups.length > 0 ? groupsService.list({ filters: groupfilters, count: \"none\", limit: 1000 }) : Promise.resolve({items: ([] as Array<GroupResource>)});\n\n        const results = await Promise.all([userpromise, grouppromise]);\n\n        const users = (results[0] as ListResults<UserResource>).items;\n        const groups = (results[1] as ListResults<GroupResource>).items;\n\n        const getEmail = (tailUuid: string) => {\n            const user = users.find(({ uuid }) => uuid === tailUuid);\n            return user\n                 ? (user as UserResource).email\n                 : null;\n        };\n\n        const getFullname = (tailUuid: string) => {\n            const user = users.find(({ uuid }) => uuid === tailUuid);\n            const group = groups.find(({ uuid }) => uuid === tailUuid);\n            return user\n                 ? (user as UserResource & {fullName: string}).fullName\n                 : group\n                 ? (group as GroupResource).name\n                 : tailUuid;\n        };\n\n        const managementPermissions = permissionLinks\n            .map(({ tailUuid, name, uuid }) => ({\n                email: getEmail(tailUuid),\n                fullName: getFullname(tailUuid),\n                permissions: name as PermissionLevel,\n                permissionUuid: uuid,\n            }));\n\n        const managementFormData: SharingManagementFormData = {\n            permissions: managementPermissions,\n            initialPermissions: managementPermissions,\n        };\n\n        dispatch(initialize(SHARING_MANAGEMENT_FORM_NAME, managementFormData));\n    } finally {\n        dispatch(progressIndicatorActions.STOP_WORKING(SHARING_DIALOG_NAME));\n    }\n};\n\nconst initializePublicAccessForm = (permissionLinks: PermissionResource[]) =>\n    (dispatch: Dispatch, getState: () => RootState,) => {\n\n        const state = getState();\n\n        const [publicPermission] = permissionLinks\n            .filter(item => item.tailUuid === getPublicGroupUuid(state));\n\n        const [allUsersPermission] = permissionLinks\n            .filter(item => item.tailUuid === getAllUsersGroupUuid(state));\n\n        let publicAccessFormData: SharingPublicAccessFormData;\n\n        if (publicPermission) {\n            publicAccessFormData = {\n                visibility: VisibilityLevel.PUBLIC,\n                initialVisibility: VisibilityLevel.PUBLIC,\n                permissionUuid: publicPermission.uuid\n            };\n        } else if (allUsersPermission) {\n            publicAccessFormData = {\n                visibility: VisibilityLevel.ALL_USERS,\n                initialVisibility: VisibilityLevel.ALL_USERS,\n                permissionUuid: allUsersPermission.uuid\n            };\n        } else if (permissionLinks.length > 0) {\n            publicAccessFormData = {\n                visibility: VisibilityLevel.SHARED,\n                initialVisibility: VisibilityLevel.SHARED,\n                permissionUuid: ''\n            };\n        } else {\n            publicAccessFormData = {\n                visibility: VisibilityLevel.PRIVATE,\n                initialVisibility: VisibilityLevel.PRIVATE,\n                permissionUuid: ''\n            };\n        }\n\n        dispatch(initialize(SHARING_PUBLIC_ACCESS_FORM_NAME, publicAccessFormData));\n    };\n\nconst savePublicPermissionChanges = async (_: Dispatch, getState: () => RootState, { permissionService }: ServiceRepository) => {\n    const state = getState();\n    const { user } = state.auth;\n    const dialog = getDialog<SharingDialogData>(state.dialog, SHARING_DIALOG_NAME);\n    if (dialog && user) {\n        const { permissionUuid, visibility, initialVisibility } = getSharingPublicAccessFormData(state);\n        // If visibility level changed, delete the previous link to public/all users.\n        // On PRIVATE this link will be deleted by saveManagementChanges\n        // so don't double delete (which would show an error dialog).\n        if (permissionUuid !== \"\" && visibility !== initialVisibility) {\n            await permissionService.delete(permissionUuid);\n        }\n        if (visibility === VisibilityLevel.ALL_USERS) {\n            await permissionService.create({\n                ownerUuid: user.uuid,\n                headUuid: dialog.data.resourceUuid,\n                tailUuid: getAllUsersGroupUuid(state),\n                name: PermissionLevel.CAN_READ,\n            });\n        } else if (visibility === VisibilityLevel.PUBLIC) {\n            await permissionService.create({\n                ownerUuid: user.uuid,\n                headUuid: dialog.data.resourceUuid,\n                tailUuid: getPublicGroupUuid(state),\n                name: PermissionLevel.CAN_READ,\n            });\n        }\n    }\n};\n\nconst saveManagementChanges = async (_: Dispatch, getState: () => RootState, { permissionService }: ServiceRepository) => {\n    const state = getState();\n    const { user } = state.auth;\n    const dialog = getDialog<string>(state.dialog, SHARING_DIALOG_NAME);\n    if (dialog && user) {\n        const { initialPermissions, permissions } = getSharingMangementFormData(state);\n        const { visibility } = getSharingPublicAccessFormData(state);\n        const cancelledPermissions = visibility === VisibilityLevel.PRIVATE\n            ? initialPermissions\n            : differenceWith(\n                initialPermissions,\n                permissions,\n                (a, b) => a.permissionUuid === b.permissionUuid\n            );\n\n        const deletions = cancelledPermissions.map(async ({ permissionUuid }) => {\n            try {\n                await permissionService.delete(permissionUuid, false);\n            } catch (e) { }\n        });\n        const updates = permissions.map(async update => {\n            try {\n                await permissionService.update(update.permissionUuid, { name: update.permissions }, false);\n            } catch (e) { }\n        });\n        await Promise.all([...deletions, ...updates]);\n    }\n};\n\nconst sendInvitations = async (_: Dispatch, getState: () => RootState, { permissionService }: ServiceRepository) => {\n    const state = getState();\n    const { user } = state.auth;\n    const dialog = getDialog<SharingDialogData>(state.dialog, SHARING_DIALOG_NAME);\n    if (dialog && user) {\n        const invitations = getFormValues(SHARING_INVITATION_FORM_NAME)(state) as SharingInvitationFormData;\n        const data = invitations.invitedPeople.map(invitee => ({\n            ownerUuid: user.uuid,\n            headUuid: dialog.data.resourceUuid,\n            tailUuid: invitee.uuid,\n            name: invitations.permissions\n        }));\n        const changes = data.map(invitation => permissionService.create(invitation));\n        await Promise.all(changes);\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/store/sharing-dialog/sharing-dialog-types.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { PermissionLevel } from 'models/permission';\nimport { getFormValues, isDirty } from 'redux-form';\nimport { RootState } from 'store/store';\n\nexport const SHARING_DIALOG_NAME = 'SHARING_DIALOG_NAME';\nexport const SHARING_PUBLIC_ACCESS_FORM_NAME = 'SHARING_PUBLIC_ACCESS_FORM_NAME';\nexport const SHARING_MANAGEMENT_FORM_NAME = 'SHARING_MANAGEMENT_FORM_NAME';\nexport const SHARING_INVITATION_FORM_NAME = 'SHARING_INVITATION_FORM_NAME';\n\nexport enum VisibilityLevel {\n    PRIVATE = 'Private',\n    SHARED = 'Shared',\n    ALL_USERS = 'All users',\n    PUBLIC = 'Public',\n}\n\nexport interface SharingPublicAccessFormData {\n    visibility: VisibilityLevel;\n    initialVisibility: VisibilityLevel;\n    permissionUuid: string;\n}\n\nexport interface SharingManagementFormData {\n    permissions: SharingManagementFormDataRow[];\n    initialPermissions: SharingManagementFormDataRow[];\n}\n\nexport interface SharingManagementFormDataRow {\n    email: string | null;\n    permissions: PermissionLevel;\n    permissionUuid: string;\n}\n\nexport interface SharingInvitationFormData {\n    permissions: PermissionLevel;\n    invitedPeople: SharingInvitationFormPersonData[];\n}\n\nexport interface SharingInvitationFormPersonData {\n    email: string;\n    name: string;\n    uuid: string;\n}\n\nexport const getSharingMangementFormData = (state: any) =>\n    getFormValues(SHARING_MANAGEMENT_FORM_NAME)(state) as SharingManagementFormData;\n\nexport const getSharingPublicAccessFormData = (state: any) =>\n    getFormValues(SHARING_PUBLIC_ACCESS_FORM_NAME)(state) as SharingPublicAccessFormData;\n\nexport const hasChanges = (state: RootState) =>\n    isDirty(SHARING_PUBLIC_ACCESS_FORM_NAME)(state) ||\n    isDirty(SHARING_MANAGEMENT_FORM_NAME)(state) ||\n    (isDirty(SHARING_INVITATION_FORM_NAME)(state) && !!state.form[SHARING_INVITATION_FORM_NAME].values?.invitedPeople.length);\n"
  },
  {
    "path": "services/workbench2/src/store/side-panel/side-panel-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { navigateTo } from 'store/navigation/navigation-action';\n\nexport const sidePanelActions = {\n    TOGGLE_COLLAPSE: 'TOGGLE_COLLAPSE',\n}\n\nexport const navigateFromSidePanel = (id: string) =>\n    (dispatch: Dispatch) => {\n        dispatch<any>(navigateTo(id));\n    };\n\nexport const toggleSidePanel = (collapsedState: boolean) => {\n    return (dispatch: Dispatch) => {\n        dispatch({type: sidePanelActions.TOGGLE_COLLAPSE, payload: !collapsedState})\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/store/side-panel/side-panel-reducer.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { sidePanelActions } from \"./side-panel-action\"\n\ninterface SidePanelState {\n  collapsedState: boolean,\n}\n\nconst sidePanelInitialState = {\n  collapsedState: false,\n}\n\nexport const sidePanelReducer = (state: SidePanelState = sidePanelInitialState, action)=>{\n  if(action.type === sidePanelActions.TOGGLE_COLLAPSE) return {...state, collapsedState: action.payload}\n  return state\n}\n"
  },
  {
    "path": "services/workbench2/src/store/side-panel-tree/side-panel-tree-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { treePickerActions } from \"store/tree-picker/tree-picker-actions\";\nimport { RootState } from 'store/store';\nimport { getUserUuid } from \"common/getuser\";\nimport { ServiceRepository } from 'services/services';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { resourcesActions } from 'store/resources/resources-actions';\nimport { getTreePicker, TreePicker } from 'store/tree-picker/tree-picker';\nimport { getNodeAncestors, getNodeAncestorsIds, getNode, TreeNode, initTreeNode, TreeNodeStatus } from 'models/tree';\nimport { ProjectResource } from 'models/project';\nimport { OrderBuilder } from 'services/api/order-builder';\nimport { ResourceKind, extractUuidObjectType, ResourceObjectType, Resource } from 'models/resource';\nimport { CategoriesListReducer } from 'common/plugintypes';\nimport { pluginConfig } from 'plugins';\nimport { LinkClass, LinkResource } from 'models/link';\nimport { verifyAndUpdateLinks } from 'common/link-update-name';\nimport { ProcessIcon,\n    ProjectIcon,\n    FavoriteIcon,\n    RootProjectIcon,\n    ShareMeIcon,\n    TrashIcon,\n    PublicFavoriteIcon,\n    GroupsIcon,\n    TerminalIcon,\n    ResourceIcon,\n    FolderKeyIcon,\n    WheelIcon,\n} from 'components/icon/icon';\n\nexport enum SidePanelTreeCategory {\n    DASHBOARD = 'Dashboard',\n    PROJECTS = 'Home Projects',\n    FAVORITES = 'My Favorites',\n    PUBLIC_FAVORITES = 'Public Favorites',\n    SHARED_WITH_ME = 'Shared with me',\n    ALL_PROCESSES = 'All Processes',\n    INSTANCE_TYPES = 'Instance Types',\n    SHELL_ACCESS = 'Shell Access',\n    EXTERNAL_CREDENTIALS = 'External Credentials',\n    GROUPS = 'Groups',\n    TRASH = 'Trash',\n}\n\nexport const SIDE_PANEL_TREE = 'sidePanelTree';\nconst SIDEPANEL_TREE_NODE_LIMIT = 50\n\nexport const getSidePanelTree = (treePicker: TreePicker) =>\n    getTreePicker<ProjectResource | string>(SIDE_PANEL_TREE)(treePicker);\n\nexport const getSidePanelTreeBranch = (uuid: string) => (treePicker: TreePicker): Array<TreeNode<ProjectResource | string>> => {\n    const tree = getSidePanelTree(treePicker);\n    if (tree) {\n        const ancestors = getNodeAncestors(uuid)(tree);\n        const node = getNode(uuid)(tree);\n        if (node) {\n            return [...ancestors, node];\n        }\n    }\n    return [];\n};\n\nlet SIDE_PANEL_CATEGORIES: string[] = [\n    SidePanelTreeCategory.DASHBOARD,\n    SidePanelTreeCategory.PROJECTS,\n    SidePanelTreeCategory.FAVORITES,\n    SidePanelTreeCategory.PUBLIC_FAVORITES,\n    SidePanelTreeCategory.SHARED_WITH_ME,\n    SidePanelTreeCategory.ALL_PROCESSES,\n    SidePanelTreeCategory.INSTANCE_TYPES,\n    SidePanelTreeCategory.SHELL_ACCESS,\n    SidePanelTreeCategory.EXTERNAL_CREDENTIALS,\n    SidePanelTreeCategory.GROUPS,\n    SidePanelTreeCategory.TRASH\n];\n\nconst reduceCatsFn: (a: string[],\n    b: CategoriesListReducer) => string[] = (a, b) => b(a);\n\nSIDE_PANEL_CATEGORIES = pluginConfig.sidePanelCategories.reduce(reduceCatsFn, SIDE_PANEL_CATEGORIES);\n\nexport const isSidePanelTreeCategory = (id: string) => SIDE_PANEL_CATEGORIES.some(category => category === id);\n\n\nexport const initSidePanelTree = () =>\n    (dispatch: Dispatch, getState: () => RootState, { authService }: ServiceRepository) => {\n        const rootProjectUuid = getUserUuid(getState());\n        if (!rootProjectUuid) { return; }\n        const nodes = SIDE_PANEL_CATEGORIES.map(id => {\n            if (id === SidePanelTreeCategory.PROJECTS) {\n                return initTreeNode({ id: rootProjectUuid, value: SidePanelTreeCategory.PROJECTS });\n            } else {\n                return initTreeNode({ id, value: id });\n            }\n        });\n        dispatch(treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({\n            id: '',\n            pickerId: SIDE_PANEL_TREE,\n            nodes\n        }));\n        SIDE_PANEL_CATEGORIES.forEach(category => {\n                if (category !== SidePanelTreeCategory.PROJECTS && category !== SidePanelTreeCategory.FAVORITES && category !== SidePanelTreeCategory.PUBLIC_FAVORITES ) {\n                dispatch(treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({\n                    id: category,\n                    pickerId: SIDE_PANEL_TREE,\n                    nodes: []\n                }));\n            }\n        });\n    };\n\nexport const loadSidePanelTreeProjects = (projectUuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const treePicker = getTreePicker(SIDE_PANEL_TREE)(getState().treePicker);\n        const node = treePicker ? getNode(projectUuid)(treePicker) : undefined;\n        if (projectUuid === SidePanelTreeCategory.PUBLIC_FAVORITES) {\n            const unverifiedPubFaves = await dispatch<any>(fetchPublicFavoritesLinks());\n            await verifyAndUpdateLinkNames(projectUuid, unverifiedPubFaves, dispatch, getState, services);\n        } else if (projectUuid === SidePanelTreeCategory.FAVORITES) {\n            const unverifiedFaves = await dispatch<any>(fetchFavoritesLinks());\n            await setFaves(unverifiedFaves, dispatch, getState, services);\n            await verifyAndUpdateLinkNames(projectUuid, unverifiedFaves, dispatch, getState, services);\n        } else if (node || projectUuid !== '') {\n            await dispatch<any>(loadProject(projectUuid));\n        }\n    };\n\nconst loadProject = (projectUuid: string) =>\n    async (dispatch: Dispatch, _: () => RootState, services: ServiceRepository) => {\n\n        const objectType = extractUuidObjectType(projectUuid);\n        if (objectType !== ResourceObjectType.USER && objectType !== ResourceObjectType.GROUP) {\n            return;\n        }\n\n        dispatch(treePickerActions.LOAD_TREE_PICKER_NODE({ id: projectUuid, pickerId: SIDE_PANEL_TREE }));\n        const params = {\n            filters: new FilterBuilder()\n                .addEqual('owner_uuid', projectUuid)\n                .getFilters(),\n            order: new OrderBuilder<ProjectResource>()\n                .addDesc('createdAt')\n                .getOrder(),\n            limit: SIDEPANEL_TREE_NODE_LIMIT,\n        };\n\n        const { items } = await services.projectService.list(params);\n\n        dispatch(treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({\n            id: projectUuid,\n            pickerId: SIDE_PANEL_TREE,\n            nodes: items.map(item => initTreeNode({ id: item.uuid, value: item })),\n        }));\n        dispatch(resourcesActions.SET_RESOURCES(items));\n    };\n\nconst fetchFavoritesLinks = () => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    const params = {\n        filters: new FilterBuilder()\n            .addEqual('link_class', LinkClass.STAR)\n            .addEqual('tail_uuid', getUserUuid(getState()))\n            .addEqual('tail_kind', ResourceKind.USER)\n            .getFilters(),\n        order: new OrderBuilder<ProjectResource>().addDesc('createdAt').getOrder(),\n        limit: SIDEPANEL_TREE_NODE_LIMIT,\n    };\n\n    const { items } = await services.linkService.list(params);\n\n    return items;\n};\n\nconst setFaves = async(links: LinkResource[], dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    const uuids = links.map(it => it.headUuid);\n    const groupItems = services.groupsService.list({\n        filters: new FilterBuilder()\n            .addIn(\"uuid\", uuids)\n            .getFilters()\n    });\n    const collectionItems = services.collectionService.list({\n        filters: new FilterBuilder()\n            .addIn(\"uuid\", uuids)\n            .getFilters()\n    });\n    const processItems = services.containerRequestService.list({\n        filters: new FilterBuilder()\n            .addIn(\"uuid\", uuids)\n            .getFilters()\n    });\n\n    const resolvedItems = await Promise.all([groupItems, collectionItems, processItems]);\n\n    const responseItems = resolvedItems.reduce((acc, response) => acc.concat(response.items), [] as Resource[]);\n\n    //setting resources here so they won't be re-fetched in validation step\n    await dispatch(resourcesActions.SET_RESOURCES(responseItems));\n};\n\nconst verifyAndUpdateLinkNames = async (category: SidePanelTreeCategory, links: LinkResource[], dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    dispatch(treePickerActions.LOAD_TREE_PICKER_NODE({ id: category, pickerId: SIDE_PANEL_TREE }));\n\n    const verfifiedLinks = await verifyAndUpdateLinks(links, dispatch, getState, services);\n\n    dispatch(\n        treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({\n            id: category,\n            pickerId: SIDE_PANEL_TREE,\n            nodes: verfifiedLinks.map(item => initTreeNode({ id: item.headUuid, value: item })),\n        })\n    );\n};\n\nconst fetchPublicFavoritesLinks = () => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    const uuidPrefix = getState().auth.config.uuidPrefix;\n    const publicProjectUuid = `${uuidPrefix}-j7d0g-publicfavorites`;\n    const typeFilters = [ResourceKind.COLLECTION, ResourceKind.CONTAINER_REQUEST, ResourceKind.GROUP, ResourceKind.WORKFLOW];\n\n    const params = {\n        filters: new FilterBuilder()\n            .addEqual('link_class', LinkClass.STAR)\n            .addEqual('owner_uuid', publicProjectUuid)\n            .addIsA('head_uuid', typeFilters)\n            .getFilters(),\n        order: new OrderBuilder<ProjectResource>().addDesc('createdAt').getOrder(),\n        limit: SIDEPANEL_TREE_NODE_LIMIT,\n    };\n\n    const { items } = await services.linkService.list(params);\n\n    const uuids = items.map(it => it.headUuid);\n    const groupItems = services.groupsService.list({\n        filters: new FilterBuilder()\n            .addIn(\"uuid\", uuids)\n            .addIsA(\"uuid\", typeFilters)\n            .getFilters()\n    });\n    const collectionItems = services.collectionService.list({\n        filters: new FilterBuilder()\n            .addIn(\"uuid\", uuids)\n            .addIsA(\"uuid\", typeFilters)\n            .getFilters()\n    });\n    const processItems = services.containerRequestService.list({\n        filters: new FilterBuilder()\n            .addIn(\"uuid\", uuids)\n            .addIsA(\"uuid\", typeFilters)\n            .getFilters()\n    });\n\n    const resolvedItems = await Promise.all([groupItems, collectionItems, processItems]);\n\n    const responseItems = resolvedItems.reduce((acc, response) => acc.concat(response.items), [] as Resource[]);\n\n    const filteredItems = items.filter(item => responseItems.some(responseItem => responseItem.uuid === item.headUuid));\n\n    //setting resources here so they won't be re-fetched in validation step\n    await dispatch(resourcesActions.SET_RESOURCES(responseItems));\n\n    return filteredItems;\n};\n\nexport const activateSidePanelTreeItem = (id: string) =>\n    async (dispatch: Dispatch, getState: () => RootState) => {\n        const node = getSidePanelTreeNode(id)(getState().treePicker);\n        if (node && !node.active) {\n        dispatch(treePickerActions.ACTIVATE_TREE_PICKER_NODE({ id, pickerId: SIDE_PANEL_TREE }));\n    }\n    if (!isSidePanelTreeCategory(id)) {\n            await dispatch<any>(activateSidePanelTreeProject(id));\n        }\n    };\n\nexport const activateSidePanelTreeProject = (id: string) =>\n    async (dispatch: Dispatch, getState: () => RootState) => {\n        const { treePicker } = getState();\n        const node = getSidePanelTreeNode(id)(treePicker);\n        if (node && node.status !== TreeNodeStatus.LOADED) {\n            await dispatch<any>(loadSidePanelTreeProjects(id));\n        } else if (node === undefined) {\n            await dispatch<any>(activateSidePanelTreeBranch(id));\n        }\n        dispatch(treePickerActions.EXPAND_TREE_PICKER_NODES({\n            ids: getSidePanelTreeNodeAncestorsIds(id)(treePicker),\n            pickerId: SIDE_PANEL_TREE\n        }));\n        dispatch<any>(expandSidePanelTreeItem(id));\n    };\n\nexport const activateSidePanelTreeBranch = (id: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        if (!userUuid) { return; }\n        const ancestors = await services.ancestorsService.ancestors(id, userUuid);\n        for (const ancestor of ancestors) {\n            await dispatch<any>(loadSidePanelTreeProjects(ancestor.uuid));\n        }\n        dispatch(treePickerActions.EXPAND_TREE_PICKER_NODES({\n            ids: ancestors.map(ancestor => ancestor.uuid),\n            pickerId: SIDE_PANEL_TREE\n        }));\n        dispatch(treePickerActions.ACTIVATE_TREE_PICKER_NODE({ id, pickerId: SIDE_PANEL_TREE }));\n    };\n\nexport const toggleSidePanelTreeItemCollapse = (id: string) =>\n    async (dispatch: Dispatch, getState: () => RootState) => {\n        const node = getSidePanelTreeNode(id)(getState().treePicker);\n        if (node && node.status === TreeNodeStatus.INITIAL) {\n            await dispatch<any>(loadSidePanelTreeProjects(node.id));\n        }\n            dispatch(treePickerActions.TOGGLE_TREE_PICKER_NODE_COLLAPSE({ id, pickerId: SIDE_PANEL_TREE }));\n    };\n\nexport const expandSidePanelTreeItem = (id: string) =>\n    async (dispatch: Dispatch, getState: () => RootState) => {\n        const node = getSidePanelTreeNode(id)(getState().treePicker);\n        if (node && !node.expanded) {\n            dispatch(treePickerActions.TOGGLE_TREE_PICKER_NODE_COLLAPSE({ id, pickerId: SIDE_PANEL_TREE }));\n        }\n    };\n\nexport const getSidePanelTreeNode = (id: string) => (treePicker: TreePicker) => {\n    const sidePanelTree = getTreePicker(SIDE_PANEL_TREE)(treePicker);\n    return sidePanelTree\n        ? getNode(id)(sidePanelTree)\n        : undefined;\n};\n\nexport const getSidePanelTreeNodeAncestorsIds = (id: string) => (treePicker: TreePicker) => {\n    const sidePanelTree = getTreePicker(SIDE_PANEL_TREE)(treePicker);\n    return sidePanelTree\n        ? getNodeAncestorsIds(id)(sidePanelTree)\n        : [];\n};\n\nexport const getSidePanelIcon = (category: string) => {\n    switch (category) {\n        case SidePanelTreeCategory.FAVORITES:\n            return FavoriteIcon;\n        case SidePanelTreeCategory.PROJECTS:\n            return RootProjectIcon;\n        case SidePanelTreeCategory.SHARED_WITH_ME:\n            return ShareMeIcon;\n        case SidePanelTreeCategory.TRASH:\n            return TrashIcon;\n        case SidePanelTreeCategory.PUBLIC_FAVORITES:\n            return PublicFavoriteIcon;\n        case SidePanelTreeCategory.ALL_PROCESSES:\n            return ProcessIcon;\n        case SidePanelTreeCategory.INSTANCE_TYPES:\n            return ResourceIcon;\n        case SidePanelTreeCategory.GROUPS:\n            return GroupsIcon;\n        case SidePanelTreeCategory.SHELL_ACCESS:\n            return TerminalIcon\n        case SidePanelTreeCategory.EXTERNAL_CREDENTIALS:\n            return FolderKeyIcon\n        case SidePanelTreeCategory.DASHBOARD:\n            return WheelIcon\n        default:\n            return ProjectIcon;\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/store/snackbar/snackbar-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\n\nexport interface SnackbarMessage {\n    message: string;\n    hideDuration: number;\n    kind: SnackbarKind;\n    link?: string;\n}\n\nexport enum SnackbarKind {\n    SUCCESS = 1,\n    ERROR = 2,\n    INFO = 3,\n    WARNING = 4\n}\n\nexport const snackbarActions = unionize({\n    OPEN_SNACKBAR: ofType<{message: string; hideDuration?: number, kind?: SnackbarKind, link?: string}>(),\n    CLOSE_SNACKBAR: ofType<{}|null>(),\n    SHIFT_MESSAGES: ofType<{}>()\n});\n\nexport const showSuccessSnackbar = (message: string) =>\n    snackbarActions.OPEN_SNACKBAR({ message, hideDuration: 2000, kind: SnackbarKind.SUCCESS });\n\nexport const showErrorSnackbar = (message: string) =>\n    snackbarActions.OPEN_SNACKBAR({ message, hideDuration: 4000, kind: SnackbarKind.ERROR });\n\nexport type SnackbarAction = UnionOf<typeof snackbarActions>;\n"
  },
  {
    "path": "services/workbench2/src/store/snackbar/snackbar-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { SnackbarAction, snackbarActions, SnackbarKind, SnackbarMessage } from \"./snackbar-actions\";\n\nexport interface SnackbarState {\n    messages: SnackbarMessage[];\n    open: boolean;\n}\n\nconst DEFAULT_HIDE_DURATION = 3000;\n\nconst initialState: SnackbarState = {\n    messages: [],\n    open: false\n};\n\nexport const snackbarReducer = (state = initialState, action: SnackbarAction) => {\n    return snackbarActions.match(action, {\n        OPEN_SNACKBAR: data => {\n            return {\n                open: true,\n                messages: state.messages.concat({\n                    message: data.message,\n                    hideDuration: data.hideDuration ? data.hideDuration : DEFAULT_HIDE_DURATION,\n                    kind: data.kind ? data.kind : SnackbarKind.INFO, \n                    link: data.link\n                })\n            };\n        },\n        CLOSE_SNACKBAR: (payload) => {\n            let newMessages: any = [...state.messages];// state.messages.filter(({ message }) => message !== payload);\n\n            if (payload === undefined || JSON.stringify(payload) === '{}') {\n                newMessages.pop();\n            } else {\n                newMessages = state.messages.filter((message, index) => index !== payload);\n            }\n\n            return {\n                ...state,\n                messages: newMessages,\n                open: newMessages.length > 0\n            }\n        },\n        SHIFT_MESSAGES: () => {\n            const messages = state.messages.filter((m, idx) => idx > 0);\n            return {\n                open: messages.length > 0,\n                messages\n            };\n        },\n        default: () => state,\n    });\n};\n"
  },
  {
    "path": "services/workbench2/src/store/store.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { createStore, applyMiddleware, compose, Middleware, combineReducers, Store, Action, Dispatch } from \"redux\";\nimport { connectRouter, routerMiddleware } from 'connected-react-router';\nimport thunkMiddleware from \"redux-thunk\";\nimport { History } from \"history\";\nimport { handleRedirects } from \"../common/redirect-to\";\n\nimport { authReducer } from \"./auth/auth-reducer\";\nimport { authMiddleware } from \"./auth/auth-middleware\";\nimport { dataExplorerReducer } from \"./data-explorer/data-explorer-reducer\";\nimport { detailsPanelReducer } from \"./details-panel/details-panel-reducer\";\nimport { contextMenuReducer } from \"./context-menu/context-menu-reducer\";\nimport { reducer as formReducer } from \"redux-form\";\nimport { favoritesReducer } from \"./favorites/favorites-reducer\";\nimport { favoritesLinksReducer } from \"./favorites/favorites-links-reducer\";\nimport { snackbarReducer } from \"./snackbar/snackbar-reducer\";\nimport { collectionPanelFilesReducer } from \"./collection-panel/collection-panel-files/collection-panel-files-reducer\";\nimport { dataExplorerMiddleware } from \"./data-explorer/data-explorer-middleware\";\nimport { FAVORITE_PANEL_ID } from \"./favorite-panel/favorite-panel-action\";\nimport { WORKFLOW_PROCESSES_PANEL_ID } from \"./workflow-panel/workflow-panel-actions\";\nimport { PROJECT_PANEL_DATA_ID, PROJECT_PANEL_RUN_ID } from \"./project-panel/project-panel-action-bind\";\nimport { ProjectPanelDataMiddlewareService } from \"./project-panel/project-panel-data-middleware-service\";\nimport { ProjectPanelRunMiddlewareService } from \"./project-panel/project-panel-run-middleware-service\";\nimport { FavoritePanelMiddlewareService } from \"./favorite-panel/favorite-panel-middleware-service\";\nimport { AllProcessesPanelMiddlewareService } from \"./all-processes-panel/all-processes-panel-middleware-service\";\nimport { WorkflowProcessesMiddlewareService } from \"./workflow-panel/workflow-middleware-service\";\nimport { collectionPanelReducer } from \"./collection-panel/collection-panel-reducer\";\nimport { dialogReducer } from \"./dialog/dialog-reducer\";\nimport { ServiceRepository } from \"services/services\";\nimport { treePickerReducer, treePickerSearchReducer } from \"./tree-picker/tree-picker-reducer\";\nimport { resourcesReducer } from \"store/resources/resources-reducer\";\nimport { propertiesReducer } from \"./properties/properties-reducer\";\nimport { fileUploaderReducer } from \"./file-uploader/file-uploader-reducer\";\nimport { TrashPanelMiddlewareService } from \"store/trash-panel/trash-panel-middleware-service\";\nimport { TRASH_PANEL_ID } from \"store/trash-panel/trash-panel-action\";\nimport { processLogsPanelReducer } from \"./process-logs-panel/process-logs-panel-reducer\";\nimport { processPanelReducer } from \"store/process-panel/process-panel-reducer\";\nimport { SHARED_WITH_ME_PANEL_ID } from \"store/shared-with-me-panel/shared-with-me-panel-actions\";\nimport { SharedWithMeMiddlewareService } from \"./shared-with-me-panel/shared-with-me-middleware-service\";\nimport { progressIndicatorReducer } from \"./progress-indicator/progress-indicator-reducer\";\nimport { runProcessPanelReducer } from \"store/run-process-panel/run-process-panel-reducer\";\nimport { WorkflowMiddlewareService } from \"./workflow-panel/workflow-middleware-service\";\nimport { WORKFLOW_PANEL_ID } from \"./workflow-panel/workflow-panel-actions\";\nimport { appInfoReducer } from \"store/app-info/app-info-reducer\";\nimport { searchBarReducer } from \"./search-bar/search-bar-reducer\";\nimport { SEARCH_RESULTS_PANEL_ID } from \"store/search-results-panel/search-results-panel-actions\";\nimport { SearchResultsMiddlewareService } from \"./search-results-panel/search-results-middleware-service\";\nimport { virtualMachinesReducer } from \"store/virtual-machines/virtual-machines-reducer\";\nimport { repositoriesReducer } from \"store/repositories/repositories-reducer\";\nimport { keepServicesReducer } from \"store/keep-services/keep-services-reducer\";\nimport { UserMiddlewareService } from \"store/users/user-panel-middleware-service\";\nimport { USERS_PANEL_ID } from \"store/users/users-actions\";\nimport { UserProfileGroupsMiddlewareService } from \"store/user-profile/user-profile-groups-middleware-service\";\nimport { USER_PROFILE_PANEL_ID } from \"store/user-profile/user-profile-actions\";\nimport { GroupsPanelMiddlewareService } from \"store/groups-panel/groups-panel-middleware-service\";\nimport { GROUPS_PANEL_ID } from \"store/groups-panel/groups-panel-actions\";\nimport { GroupDetailsPanelMembersMiddlewareService } from \"store/group-details-panel/group-details-panel-members-middleware-service\";\nimport { GroupDetailsPanelPermissionsMiddlewareService } from \"store/group-details-panel/group-details-panel-permissions-middleware-service\";\nimport { GROUP_DETAILS_MEMBERS_PANEL_ID, GROUP_DETAILS_PERMISSIONS_PANEL_ID } from \"store/group-details-panel/group-details-panel-actions\";\nimport { LINK_PANEL_ID } from \"store/link-panel/link-panel-actions\";\nimport { LinkMiddlewareService } from \"store/link-panel/link-panel-middleware-service\";\nimport { API_CLIENT_AUTHORIZATION_PANEL_ID } from \"store/api-client-authorizations/api-client-authorizations-actions\";\nimport { ApiClientAuthorizationMiddlewareService } from \"store/api-client-authorizations/api-client-authorizations-middleware-service\";\nimport { PublicFavoritesMiddlewareService } from \"store/public-favorites-panel/public-favorites-middleware-service\";\nimport { PUBLIC_FAVORITE_PANEL_ID } from \"store/public-favorites-panel/public-favorites-action\";\nimport { publicFavoritesReducer } from \"store/public-favorites/public-favorites-reducer\";\nimport { linkAccountPanelReducer } from \"./link-account-panel/link-account-panel-reducer\";\nimport { CollectionsWithSameContentAddressMiddlewareService } from \"store/collections-content-address-panel/collections-content-address-middleware-service\";\nimport { COLLECTIONS_CONTENT_ADDRESS_PANEL_ID } from \"store/collections-content-address-panel/collections-content-address-panel-actions\";\nimport { ownerNameReducer } from \"store/owner-name/owner-name-reducer\";\nimport { SubprocessMiddlewareService } from \"store/subprocess-panel/subprocess-panel-middleware-service\";\nimport { SUBPROCESS_PANEL_ID } from \"store/subprocess-panel/subprocess-panel-actions\";\nimport { ALL_PROCESSES_PANEL_ID } from \"./all-processes-panel/all-processes-panel-action\";\nimport { Config } from \"common/config\";\nimport { pluginConfig } from \"plugins\";\nimport { MiddlewareListReducer } from \"common/plugintypes\";\nimport { tooltipsMiddleware } from \"./tooltips/tooltips-middleware\";\nimport { sidePanelReducer } from \"./side-panel/side-panel-reducer\";\nimport { bannerReducer } from \"./banner/banner-reducer\";\nimport { multiselectReducer } from \"./multiselect/multiselect-reducer\";\nimport { composeWithDevTools } from \"redux-devtools-extension\";\nimport { selectedResourceReducer } from \"./selected-resource/selected-resource-reducer\";\nimport { ExternalCredentialsMiddlewareService } from \"./external-credentials/external-credentials-middleware-service\";\nimport { EXTERNAL_CREDENTIALS_PANEL } from \"./external-credentials/external-credentials-actions\";\nimport createSagaMiddleware from 'redux-saga';\nimport { rootSaga } from \"./redux-saga\";\nimport { RecentlyVisitedMiddlewareService } from \"./recently-visited/recently-visited-middleware-services\";\nimport { RECENTLY_VISITED_PANEL_ID } from \"./recently-visited/recently-visited-actions\";\nimport { RecentWorkflowsMiddlewareService } from \"./recent-wf-runs/recent-wf-runs-middleware-sevice\";\nimport { RECENT_WF_RUNS_ID } from \"./recent-wf-runs/recent-wf-runs-action\";\nimport { FavoritePinsMiddlewareService, FAVORITE_PINS_ID } from \"./favorite-pins/favorite-pins-middleware-service\";\n\ndeclare global {\n    interface Window {\n        __REDUX_DEVTOOLS_EXTENSION_COMPOSE__?: typeof compose;\n    }\n}\n\nexport type RootState = ReturnType<ReturnType<typeof createRootReducer>>;\n\nexport type RootStore = Store<RootState, Action> & { dispatch: Dispatch<any> };\n\nexport function configureStore(history: History, services: ServiceRepository, config: Config): RootStore {\n    const rootReducer = createRootReducer(services, history);\n\n    const projectPanelDataMiddleware = dataExplorerMiddleware(new ProjectPanelDataMiddlewareService(services, PROJECT_PANEL_DATA_ID));\n    const projectPanelRunMiddleware = dataExplorerMiddleware(new ProjectPanelRunMiddlewareService(services, PROJECT_PANEL_RUN_ID));\n    const favoritePanelMiddleware = dataExplorerMiddleware(new FavoritePanelMiddlewareService(services, FAVORITE_PANEL_ID));\n    const allProcessessPanelMiddleware = dataExplorerMiddleware(new AllProcessesPanelMiddlewareService(services, ALL_PROCESSES_PANEL_ID));\n    const workflowProcessessPanelMiddleware = dataExplorerMiddleware(new WorkflowProcessesMiddlewareService(services, WORKFLOW_PROCESSES_PANEL_ID));\n    const trashPanelMiddleware = dataExplorerMiddleware(new TrashPanelMiddlewareService(services, TRASH_PANEL_ID));\n    const searchResultsPanelMiddleware = dataExplorerMiddleware(new SearchResultsMiddlewareService(services, SEARCH_RESULTS_PANEL_ID));\n    const sharedWithMePanelMiddleware = dataExplorerMiddleware(new SharedWithMeMiddlewareService(services, SHARED_WITH_ME_PANEL_ID));\n    const workflowPanelMiddleware = dataExplorerMiddleware(new WorkflowMiddlewareService(services, WORKFLOW_PANEL_ID));\n    const userPanelMiddleware = dataExplorerMiddleware(new UserMiddlewareService(services, USERS_PANEL_ID));\n    const userProfileGroupsMiddleware = dataExplorerMiddleware(new UserProfileGroupsMiddlewareService(services, USER_PROFILE_PANEL_ID));\n    const groupsPanelMiddleware = dataExplorerMiddleware(new GroupsPanelMiddlewareService(services, GROUPS_PANEL_ID));\n    const groupDetailsPanelMembersMiddleware = dataExplorerMiddleware(\n        new GroupDetailsPanelMembersMiddlewareService(services, GROUP_DETAILS_MEMBERS_PANEL_ID)\n    );\n    const groupDetailsPanelPermissionsMiddleware = dataExplorerMiddleware(\n        new GroupDetailsPanelPermissionsMiddlewareService(services, GROUP_DETAILS_PERMISSIONS_PANEL_ID)\n    );\n    const linkPanelMiddleware = dataExplorerMiddleware(new LinkMiddlewareService(services, LINK_PANEL_ID));\n    const apiClientAuthorizationMiddlewareService = dataExplorerMiddleware(\n        new ApiClientAuthorizationMiddlewareService(services, API_CLIENT_AUTHORIZATION_PANEL_ID)\n    );\n    const publicFavoritesMiddleware = dataExplorerMiddleware(new PublicFavoritesMiddlewareService(services, PUBLIC_FAVORITE_PANEL_ID));\n    const collectionsContentAddress = dataExplorerMiddleware(\n        new CollectionsWithSameContentAddressMiddlewareService(services, COLLECTIONS_CONTENT_ADDRESS_PANEL_ID)\n    );\n    const subprocessMiddleware = dataExplorerMiddleware(new SubprocessMiddlewareService(services, SUBPROCESS_PANEL_ID));\n    const externalCredentialsMiddleware = dataExplorerMiddleware(\n        new ExternalCredentialsMiddlewareService(services, EXTERNAL_CREDENTIALS_PANEL)\n    );\n    const recentlyVisitedMiddleware = dataExplorerMiddleware(new RecentlyVisitedMiddlewareService(services, RECENTLY_VISITED_PANEL_ID));\n    const recentWorkflowsMiddleware = dataExplorerMiddleware(new RecentWorkflowsMiddlewareService(services, RECENT_WF_RUNS_ID));\n    const favoritePinsMiddleware = dataExplorerMiddleware(new FavoritePinsMiddlewareService(services, FAVORITE_PINS_ID));\n\n    const redirectToMiddleware = (store: any) => (next: any) => (action: any) => {\n        const state = store.getState();\n\n        if (state.auth && state.auth.apiToken) {\n            handleRedirects(state.auth.apiToken, config);\n        }\n\n        return next(action);\n    };\n\n    const sagaMiddleware = createSagaMiddleware({\n        context: { services }\n    });\n\n    let middlewares: Middleware[] = [\n        routerMiddleware(history),\n        thunkMiddleware.withExtraArgument(services),\n        sagaMiddleware,\n        authMiddleware(services),\n        tooltipsMiddleware(services),\n        projectPanelDataMiddleware,\n        projectPanelRunMiddleware,\n        favoritePanelMiddleware,\n        allProcessessPanelMiddleware,\n        trashPanelMiddleware,\n        searchResultsPanelMiddleware,\n        sharedWithMePanelMiddleware,\n        workflowPanelMiddleware,\n        userPanelMiddleware,\n        userProfileGroupsMiddleware,\n        groupsPanelMiddleware,\n        groupDetailsPanelMembersMiddleware,\n        groupDetailsPanelPermissionsMiddleware,\n        linkPanelMiddleware,\n        apiClientAuthorizationMiddlewareService,\n        publicFavoritesMiddleware,\n        collectionsContentAddress,\n        subprocessMiddleware,\n        workflowProcessessPanelMiddleware,\n        externalCredentialsMiddleware,\n        recentlyVisitedMiddleware,\n        recentWorkflowsMiddleware,\n        favoritePinsMiddleware,\n    ];\n\n    const reduceMiddlewaresFn: (a: Middleware[], b: MiddlewareListReducer) => Middleware[] = (a, b) => b(a, services);\n\n    middlewares = pluginConfig.middlewares.reduce(reduceMiddlewaresFn, middlewares);\n\n    const enhancer = composeWithDevTools({\n        /* uncomment to turn on stack trace in redux-devtools\n        re-comment afterwards to prevent performance impact */\n                // trace: true,\n    })(applyMiddleware(redirectToMiddleware, ...middlewares));\n    const store = createStore(rootReducer, enhancer);\n\n    sagaMiddleware.run(rootSaga);\n\n    return store;\n}\n\nconst createRootReducer = (services: ServiceRepository, history: History) =>\n    combineReducers({\n        auth: authReducer(services),\n        banner: bannerReducer,\n        collectionPanel: collectionPanelReducer,\n        collectionPanelFiles: collectionPanelFilesReducer,\n        contextMenu: contextMenuReducer,\n        dataExplorer: dataExplorerReducer,\n        detailsPanel: detailsPanelReducer,\n        dialog: dialogReducer,\n        favorites: favoritesReducer,\n        favoritesLinks: favoritesLinksReducer,\n        ownerName: ownerNameReducer,\n        publicFavorites: publicFavoritesReducer,\n        form: formReducer,\n        processLogsPanel: processLogsPanelReducer,\n        properties: propertiesReducer,\n        resources: resourcesReducer,\n        router: connectRouter(history),\n        selectedResource: selectedResourceReducer,\n        snackbar: snackbarReducer,\n        treePicker: treePickerReducer,\n        treePickerSearch: treePickerSearchReducer,\n        fileUploader: fileUploaderReducer,\n        processPanel: processPanelReducer,\n        progressIndicator: progressIndicatorReducer,\n        runProcessPanel: runProcessPanelReducer,\n        appInfo: appInfoReducer,\n        searchBar: searchBarReducer,\n        virtualMachines: virtualMachinesReducer,\n        repositories: repositoriesReducer,\n        keepServices: keepServicesReducer,\n        linkAccountPanel: linkAccountPanelReducer,\n        sidePanel: sidePanelReducer,\n        multiselect: multiselectReducer,\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/subprocess-panel/subprocess-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { bindDataExplorerActions } from 'store/data-explorer/data-explorer-action';\nimport { FilterBuilder, joinFilters } from 'services/api/filter-builder';\nimport { ProcessStatusFilter, buildProcessStatusFilters } from 'store/resource-type-filters/resource-type-filters';\nimport { Process } from 'store/processes/process';\nimport { ProjectResource } from 'models/project';\nimport { getResource } from 'store/resources/resources';\nimport { ContainerRequestResource } from 'models/container-request';\nimport { WorkflowResource } from 'models/workflow';\nimport { Resource, ResourceKind } from 'models/resource';\nimport { ALL_PROCESSES_PANEL_ID } from 'store/all-processes-panel/all-processes-panel-action';\nimport { SHARED_WITH_ME_PANEL_ID } from 'store/shared-with-me-panel/shared-with-me-panel-actions';\n\nexport const SUBPROCESS_PANEL_ID = \"subprocessPanel\";\nexport const SUBPROCESS_ATTRIBUTES_DIALOG = 'subprocessAttributesDialog';\nexport const subprocessPanelActions = bindDataExplorerActions(SUBPROCESS_PANEL_ID);\n\nexport const loadSubprocessPanel = () =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(subprocessPanelActions.REQUEST_ITEMS());\n    };\n\n/**\n * Holds a Process status type and process count result\n */\ntype ProcessStatusCount = {\n    status: keyof ProcessStatusCounts;\n    count: string | null;\n};\n\nexport type ProcessStatusCounts = {\n    [ProcessStatusFilter.ALL]: string | null;\n    [ProcessStatusFilter.COMPLETED]: string | null;\n    [ProcessStatusFilter.RUNNING]: string | null;\n    [ProcessStatusFilter.FAILED]: string | null;\n    [ProcessStatusFilter.QUEUED]: string | null;\n    [ProcessStatusFilter.ONHOLD]: string | null;\n    [ProcessStatusFilter.CANCELLED]: string | null;\n    [ProcessStatusFilter.DRAFT]: string | null;\n};\n\n/**\n * Associates each of the limited progress bar segment types with an array of\n * ProcessStatusFilterTypes to be combined when displayed\n */\ntype ProcessStatusMap = Record<keyof ProcessStatusCounts, ProcessStatusFilter[]>;\n\nconst statusMap: ProcessStatusMap = {\n        [ProcessStatusFilter.ALL]: [ProcessStatusFilter.ALL],\n        [ProcessStatusFilter.COMPLETED]: [ProcessStatusFilter.COMPLETED],\n        [ProcessStatusFilter.RUNNING]: [ProcessStatusFilter.RUNNING],\n        [ProcessStatusFilter.FAILED]: [ProcessStatusFilter.FAILED],\n        [ProcessStatusFilter.CANCELLED]: [ProcessStatusFilter.CANCELLED],\n        [ProcessStatusFilter.QUEUED]: [ProcessStatusFilter.QUEUED],\n        [ProcessStatusFilter.ONHOLD]: [ProcessStatusFilter.ONHOLD],\n        [ProcessStatusFilter.DRAFT]: [ProcessStatusFilter.DRAFT],\n};\n\n/**\n * Utility type to hold a pair of associated progress bar status and process status\n */\ntype ProcessStatusPair = {\n    barStatus: keyof ProcessStatusMap;\n    processStatus: ProcessStatusFilter;\n};\n\n/**\n * Type guard to distinguish Processes from other Resources\n * @param resource The item to check\n * @returns if the resource is a Process\n */\nexport const isProcess = <T extends Resource>(resource: T | Process | undefined): resource is Process => {\n    return !!resource && 'containerRequest' in resource;\n};\n\n/**\n * Type guard to distinguish ContainerRequestResources from Resources\n * @param resource The item to check\n * @returns if the resource is a ContainerRequestResource\n */\nconst isContainerRequest = <T extends Resource>(resource: T | ContainerRequestResource | undefined): resource is ContainerRequestResource => {\n    return !!resource && 'containerUuid' in resource;\n};\n\nexport const fetchProcessStatusCounts = (parentResourceUuid: string, typeFilter?: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<ProcessStatusCounts | undefined> => {\n        const resources = getState().resources;\n        const parentResource = getResource<ProjectResource | ContainerRequestResource | WorkflowResource>(parentResourceUuid)(resources);\n\n        const requestContainerStatusCount = async (fb: FilterBuilder) => {\n            return await services.containerRequestService.list({\n                limit: 0,\n                offset: 0,\n                filters: fb.getFilters(),\n            });\n        }\n\n        const requestGroupsServiceCount = async (fb: FilterBuilder) => {\n            return await services.groupsService.contents('', {\n                limit: 0,\n                count: 'exact',\n                filters: fb.getFilters(),\n                excludeHomeProject: true,\n            });\n        }\n\n        let baseFilter: string = \"\";\n        if (isContainerRequest(parentResource) && parentResource.containerUuid) {\n            // Prevent CR without containerUuid from generating baseFilter\n            baseFilter = new FilterBuilder().addEqual('requesting_container_uuid', parentResource.containerUuid).getFilters();\n            // isCR type narrowing needed since CR without container may fall through\n        } else if (parentResource?.kind === ResourceKind.WORKFLOW && !isContainerRequest(parentResource)) {\n            baseFilter = new FilterBuilder().addEqual('properties.template_uuid', parentResource.uuid).getFilters();\n        } else if (parentResource && !isContainerRequest(parentResource)) {\n            baseFilter = new FilterBuilder().addEqual('owner_uuid', parentResource.uuid).getFilters();\n        } else if (!isContainerRequest(parentResource) && isSharedWithMePanel(parentResourceUuid)) {\n            const { auth } = getState();\n            baseFilter = new FilterBuilder()\n                .addIsA('uuid', 'arvados#containerRequest')\n                .addEqual('requesting_container_uuid', null)\n                .addDistinct('uuid', `${auth.config.uuidPrefix}-j7d0g-publicfavorites`)\n                .addDistinct('owner_uuid', `${auth.user?.uuid}`)\n                .getFilters();\n        }\n\n        if ((parentResource && baseFilter) || (isSharedWithMePanel(parentResourceUuid) && baseFilter) || isAllProcessesPanel(parentResourceUuid)) {\n            // Add type filters from consumers that want to sync progress stats with filters\n            if (typeFilter) {\n                baseFilter = joinFilters(baseFilter, typeFilter);\n            }\n\n            try {\n                // Create return object\n                let result: ProcessStatusCounts = {\n                    [ProcessStatusFilter.ALL]: null,\n                    [ProcessStatusFilter.COMPLETED]: null,\n                    [ProcessStatusFilter.RUNNING]: null,\n                    [ProcessStatusFilter.FAILED]: null,\n                    [ProcessStatusFilter.QUEUED]: null,\n                    [ProcessStatusFilter.ONHOLD]: null,\n                    [ProcessStatusFilter.CANCELLED]: null,\n                    [ProcessStatusFilter.DRAFT]: null,\n                }\n\n                // Create array of promises that returns the status associated with the item count\n                // Helps to make the requests simultaneously while preserving the association with the status key as a typed key\n                const promises = (Object.keys(statusMap) as Array<keyof ProcessStatusMap>)\n                    // Split statusMap into pairs of progress bar status and process status\n                    .reduce((acc, curr) => [...acc, ...statusMap[curr].map(processStatus => ({barStatus: curr, processStatus}))], [] as ProcessStatusPair[])\n                    .map(async (statusPair: ProcessStatusPair): Promise<ProcessStatusCount> => {\n                        // For each status pair, request count and return bar status and count\n                        const { barStatus, processStatus } = statusPair;\n                        const filter = buildProcessStatusFilters(new FilterBuilder(baseFilter), processStatus);\n                        const requestFunc = isSharedWithMePanel(parentResourceUuid) ? requestGroupsServiceCount : requestContainerStatusCount;\n                        const count = (await requestFunc(filter))?.itemsAvailable?.toLocaleString();\n                        if (count === undefined) return Promise.reject();\n                        return {status: barStatus, count};\n                    });\n\n                // Simultaneously requests each status count and apply them to the return object\n                const results = await resolvePromisesSequentially(promises);\n                results.forEach((singleResult) => {\n                    result[singleResult.status] = singleResult.count;\n                });\n\n                return result;\n            } catch (e) {\n                return undefined;\n            }\n        }\n        return undefined;\n    };\n\nasync function resolvePromisesSequentially<T>(promises: Promise<T>[]) {\n    const results: T[] = [];\n\n    for (const promise of promises) {\n        try {\n            // Yield control to the event loop before awaiting the promise\n            await new Promise(resolve => setTimeout(resolve, 0));\n            results.push(await promise);\n        } catch (error) {\n            console.error(\"Error while resolving promises sequentially\", error);\n        }\n    }\n\n    return results;\n}\n\nexport const isAllProcessesPanel = (parentResourceUuid: string) => parentResourceUuid === ALL_PROCESSES_PANEL_ID;\nexport const isSharedWithMePanel = (parentResourceUuid: string) => parentResourceUuid === SHARED_WITH_ME_PANEL_ID;"
  },
  {
    "path": "services/workbench2/src/store/subprocess-panel/subprocess-panel-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from \"../store\";\nimport { ServiceRepository } from \"services/services\";\nimport { FilterBuilder, joinFilters } from \"services/api/filter-builder\";\nimport { Dispatch, MiddlewareAPI } from \"redux\";\nimport { DataExplorer } from \"store/data-explorer/data-explorer-reducer\";\nimport { ProcessesMiddlewareService } from \"store/processes/processes-middleware-service\";\nimport { subprocessPanelActions } from './subprocess-panel-actions';\nimport { getProcess } from \"store/processes/process\";\n\nexport class SubprocessMiddlewareService extends ProcessesMiddlewareService {\n    constructor(services: ServiceRepository, id: string) {\n        super(services, subprocessPanelActions, id);\n    }\n\n    getFilters(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): string | null {\n        const state = api.getState();\n        const parentContainerRequestUuid = state.processPanel.containerRequestUuid;\n        if (!parentContainerRequestUuid) { return null; }\n\n        const process = getProcess(parentContainerRequestUuid)(state.resources);\n        if (!process?.container) { return null; }\n\n        const requesting_container = new FilterBuilder().addEqual('requesting_container_uuid', process.container.uuid).getFilters();\n        const sup = super.getFilters(api, dataExplorer);\n        if (sup === null) { return null; }\n\n        return joinFilters(sup, requesting_container);\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/store/token-dialog/token-dialog-actions.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { getProperty } from 'store/properties/properties';\nimport { propertiesActions } from 'store/properties/properties-actions';\nimport { RootState } from 'store/store';\n\nexport const TOKEN_DIALOG_NAME = 'tokenDialog';\nconst API_HOST_PROPERTY_NAME = 'apiHost';\n\nexport interface TokenDialogData {\n    token: string;\n    tokenExpiration?: Date;\n    apiHost: string;\n    canCreateNewTokens: boolean;\n}\n\nexport const setTokenDialogApiHost = (apiHost: string) =>\n    propertiesActions.SET_PROPERTY({ key: API_HOST_PROPERTY_NAME, value: apiHost });\n\nexport const getTokenDialogData = (state: RootState): TokenDialogData => {\n    const loginCluster = state.auth.config.clusterConfig.Login.LoginCluster;\n    const canCreateNewTokens = !(loginCluster !== \"\" && state.auth.homeCluster !== loginCluster);\n\n    return {\n        apiHost: getProperty<string>(API_HOST_PROPERTY_NAME)(state.properties) || '',\n        token: state.auth.extraApiToken || state.auth.apiToken || '',\n        tokenExpiration: state.auth.extraApiToken\n            ? state.auth.extraApiTokenExpiration\n            : state.auth.apiTokenExpiration,\n        canCreateNewTokens,\n    };\n};\n\nexport const openTokenDialog = dialogActions.OPEN_DIALOG({ id: TOKEN_DIALOG_NAME, data: {} });\n"
  },
  {
    "path": "services/workbench2/src/store/tooltips/tooltips-middleware.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CollectionDirectory, CollectionFile } from \"models/collection-file\";\nimport { Middleware, Store } from \"redux\";\nimport { ServiceRepository } from \"services/services\";\nimport { RootState } from \"store/store\";\nimport tippy, { createSingleton } from 'tippy.js';\nimport 'tippy.js/dist/tippy.css';\n\nlet running = false;\nlet tooltipsContents = null;\nlet tooltipsFetchFailed = false;\nexport const TOOLTIP_LOCAL_STORAGE_KEY = \"TOOLTIP_LOCAL_STORAGE_KEY\";\n\nconst tippySingleton = createSingleton([], {delay: 10});\n\nexport const tooltipsMiddleware = (services: ServiceRepository): Middleware => (store: Store) => next => action => {\n    const state: RootState = store.getState();\n\n    if (state && state.auth && state.auth.config && state.auth.config.clusterConfig && state.auth.config.clusterConfig.Workbench) {\n        const hideTooltip = localStorage.getItem(TOOLTIP_LOCAL_STORAGE_KEY);\n        const { BannerUUID: bannerUUID } = state.auth.config.clusterConfig.Workbench;\n    \n        if (bannerUUID && !tooltipsContents && !hideTooltip && !tooltipsFetchFailed && !running) {\n            running = true;\n            fetchTooltips(services, bannerUUID);\n        } else if (tooltipsContents && !hideTooltip && !tooltipsFetchFailed) {\n            applyTooltips();\n        }\n    }\n\n    return next(action);\n};\n\nconst fetchTooltips = (services, bannerUUID) => {\n    services.collectionService.files(bannerUUID)\n        .then(results => {\n            const tooltipsFile: CollectionDirectory | CollectionFile | undefined = results.find(({ name }) => name === 'tooltips.json');\n\n            if (tooltipsFile) {\n                running = true;\n                services.collectionService.getFileContents(tooltipsFile as CollectionFile)\n                    .then(data => {\n                        tooltipsContents = JSON.parse(data);\n                        applyTooltips();\n                    })\n                    .catch(() => {})\n                    .finally(() => {\n                        running = false;\n                    });\n            }  else {\n                tooltipsFetchFailed = true;\n            }\n        })\n        .catch(() => {})\n        .finally(() => {\n            running = false;\n        });\n};\n\nconst applyTooltips = () => {\n    const tippyInstances: any[] = Object.keys(tooltipsContents as any)\n        .map((key) => {\n            const content = (tooltipsContents as any)[key]\n            const element = document.querySelector(key);\n\n            if (element) {\n                const hasTippyAttatched = !!(element as any)._tippy;\n\n                if (!hasTippyAttatched && tooltipsContents) {\n                    return tippy(element as any, { content });\n                }\n            }\n\n            return null;\n        })\n        .filter(data => !!data);\n\n    if (tippyInstances.length > 0) {\n        tippySingleton.setInstances(tippyInstances);\n    }\n};"
  },
  {
    "path": "services/workbench2/src/store/trash/trash-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { ServiceRepository } from \"services/services\";\nimport { trashPanelActions } from \"store/trash-panel/trash-panel-action\";\nimport { activateSidePanelTreeItem, loadSidePanelTreeProjects, SidePanelTreeCategory } from \"store/side-panel-tree/side-panel-tree-actions\";\nimport { projectPanelDataActions } from \"store/project-panel/project-panel-action-bind\";\nimport { sharedWithMePanelActions } from \"store/shared-with-me-panel/shared-with-me-panel-actions\";\nimport { extractUuidKind, ResourceKind } from \"models/resource\";\nimport { navigateTo } from \"store/navigation/navigation-action\";\nimport { matchFavoritesRoute, matchProjectRoute, matchSharedWithMeRoute, matchTrashRoute } from \"routes/routes\";\nimport { ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { addDisabledButton } from \"store/multiselect/multiselect-actions\";\nimport { showGroupedCommonResourceResultSnackbars, updateResources } from \"store/resources/resources-actions\";\nimport { favoritePanelActions } from \"store/favorite-panel/favorite-panel-action\";\nimport { CommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport _ from \"lodash\";\n\n/**\n * Toggles the trash status of an array of UUIDS based on the current isTrashed status\n * @param uuids list of uuids to trash/untrash\n * @param isTrashed Current trashed status to be toggled\n * @returns Dispatchable action that yields a void promise\n *\n * This only handles trashable resources aka Collection / Group\n */\nexport const toggleResourceTrashed =\n    (uuids: string[], isTrashed: boolean) =>\n        async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<any> => {\n            dispatch<any>(addDisabledButton(ContextMenuActionNames.MOVE_TO_TRASH));\n\n            const verb = isTrashed ? \"untrash\" : \"trash\";\n            const messageFuncMap = {\n                [CommonResourceServiceError.NONE]: (count: number) => count > 1 ? `${_.startCase(verb)}ed ${count} items` : `Item ${verb}ed`,\n                [CommonResourceServiceError.PERMISSION_ERROR_FORBIDDEN]: (count: number) => count > 1 ? `${_.startCase(verb)} ${count} items failed: Access Denied` : `${_.startCase(verb)} failed: Access Denied`,\n                [CommonResourceServiceError.UNIQUE_NAME_VIOLATION]: (count: number) => count > 1 ? `${_.startCase(verb)} ${count} items failed: Duplicate Name` : `${_.startCase(verb)} failed: Duplicate Name`,\n                [CommonResourceServiceError.UNKNOWN]: (count: number) => count > 1 ? `${_.startCase(verb)} ${count} items failed` : `${_.startCase(verb)} failed`,\n            };\n\n            const trashFunc = async (uuid: string) => {\n                const kind = extractUuidKind(uuid);\n                if (kind === ResourceKind.COLLECTION) {\n                    return isTrashed ? services.collectionService.untrash(uuid) : services.collectionService.trash(uuid);\n                } else if (kind === ResourceKind.GROUP) {\n                    return isTrashed ? services.groupsService.untrash(uuid) : services.groupsService.trash(uuid);\n                }\n                console.error(\"Trash operation failed: resource type not trashable \" + uuid);\n                return Promise.reject();\n            };\n\n            await Promise.allSettled(uuids.map((uuid) => trashFunc(uuid)))\n                .then(async settledPromises => {\n                    const { success } = showGroupedCommonResourceResultSnackbars(dispatch, settledPromises, messageFuncMap);\n\n                    if (success.length) {\n                        const { location } = getState().router;\n                        // Update store\n                        await dispatch<any>(updateResources(success.map(success => success.value)));\n                        if (isTrashed) {\n                            // Refresh trash panel after untrash\n                            if (matchTrashRoute(location ? location.pathname : \"\")) {\n                                dispatch(trashPanelActions.REQUEST_ITEMS());\n                            }\n                            // Navigate to untrashed project when only item\n                            if (uuids.length === 1 && success.length === 1) {\n                                const uuid = success[0].value.uuid;\n                                if (extractUuidKind(uuid) === ResourceKind.GROUP) {\n                                    dispatch<any>(navigateTo(uuid));\n                                    dispatch<any>(activateSidePanelTreeItem(uuid));\n                                }\n                            }\n                            // Reload favorites\n                            dispatch<any>(loadSidePanelTreeProjects(SidePanelTreeCategory.FAVORITES));\n                        } else {\n                            // Refresh favorites / project view after trashed\n                            if (matchFavoritesRoute(location ? location.pathname : \"\")) {\n                                dispatch(favoritePanelActions.REQUEST_ITEMS());\n                            } else if (matchProjectRoute(location ? location.pathname : \"\")) {\n                                dispatch(projectPanelDataActions.REQUEST_ITEMS());\n                            } else if (matchSharedWithMeRoute(location ? location.pathname : \"\")) {\n                                dispatch(sharedWithMePanelActions.REQUEST_ITEMS());\n                            }\n\n                            // If 1 item trashed, navigate to parent\n                            if (uuids.length === 1 && success.length === 1) {\n                                dispatch<any>(navigateTo(success[0].value.ownerUuid));\n                            }\n\n                            // Reload favorites\n                            dispatch<any>(loadSidePanelTreeProjects(SidePanelTreeCategory.FAVORITES))\n                                // Using then to ensure loadSidePanelTreeProjects finished\n                                .then(() => {\n                                    // Refresh each project's parent in the side panel tree\n                                    // Get every successfully untrashed reasource\n                                    success.map(result => result.value)\n                                        // Filter to only GROUP (project)\n                                        .filter(resource => resource.kind === ResourceKind.GROUP)\n                                        // Load side panel for each\n                                        .map(resource =>\n                                            dispatch<any>(loadSidePanelTreeProjects(resource.ownerUuid))\n                                        );\n                                });\n                        }\n                    }\n                });\n        };\n"
  },
  {
    "path": "services/workbench2/src/store/trash-panel/trash-panel-action.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { bindDataExplorerActions } from \"store/data-explorer/data-explorer-action\";\n\nexport const TRASH_PANEL_ID = \"trashPanel\";\nexport const trashPanelActions = bindDataExplorerActions(TRASH_PANEL_ID);\n\nexport const loadTrashPanel = () => (dispatch: Dispatch) => {\n    dispatch(trashPanelActions.RESET_EXPLORER_SEARCH_VALUE());\n    dispatch(trashPanelActions.REQUEST_ITEMS());\n};"
  },
  {
    "path": "services/workbench2/src/store/trash-panel/trash-panel-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    DataExplorerMiddlewareService, dataExplorerToListParams,\n    listResultsToDataExplorerItemsMeta\n} from \"../data-explorer/data-explorer-middleware-service\";\nimport { RootState } from \"../store\";\nimport { getUserUuid } from \"common/getuser\";\nimport { ServiceRepository } from \"services/services\";\nimport { DataColumns, SortDirection } from \"components/data-table/data-column\";\nimport { FilterBuilder } from \"services/api/filter-builder\";\nimport { trashPanelActions } from \"./trash-panel-action\";\nimport { Dispatch, MiddlewareAPI } from \"redux\";\nimport { OrderBuilder, OrderDirection } from \"services/api/order-builder\";\nimport { ContentsArguments, GroupContentsResource, GroupContentsResourcePrefix } from \"services/groups-service/groups-service\";\nimport { TrashPanelColumnNames } from 'views/trash-panel/trash-panel-columns';\nimport { updateFavorites } from \"store/favorites/favorites-actions\";\nimport { updatePublicFavorites } from 'store/public-favorites/public-favorites-actions';\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { updateResources } from \"store/resources/resources-actions\";\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { DataExplorer, getDataExplorer, getSortColumn } from \"store/data-explorer/data-explorer-reducer\";\nimport { serializeResourceTypeFilters } from 'store//resource-type-filters/resource-type-filters';\nimport { getDataExplorerColumnFilters } from 'store/data-explorer/data-explorer-middleware-service';\nimport { joinFilters } from 'services/api/filter-builder';\nimport { CollectionResource } from \"models/collection\";\nimport { ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { removeDisabledButton } from \"store/multiselect/multiselect-actions\";\nimport { couldNotFetchItemsAvailable } from \"store/data-explorer/data-explorer-action\";\nimport { ListResults } from \"services/common-service/common-service\";\nexport class TrashPanelMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n\n        const userUuid = getUserUuid(api.getState());\n        if (!userUuid) { return; }\n        try {\n            if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n\n            // Get items\n            const listResults = await this.services.groupsService\n                .contents('', getParams(dataExplorer));\n\n            const items = listResults.items.map(it => it.uuid);\n            api.dispatch(trashPanelActions.SET_ITEMS({\n                ...listResultsToDataExplorerItemsMeta(listResults),\n                items\n            }));\n            api.dispatch<any>(updateFavorites(items));\n            api.dispatch<any>(updatePublicFavorites(items));\n            api.dispatch(updateResources(listResults.items));\n        } catch (e) {\n            api.dispatch(trashPanelActions.SET_ITEMS({\n                items: [],\n                itemsAvailable: 0,\n                page: 0,\n                rowsPerPage: dataExplorer.rowsPerPage\n            }));\n            api.dispatch(couldNotFetchTrashContents());\n        } finally {\n            api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n        }\n        api.dispatch<any>(removeDisabledButton(ContextMenuActionNames.MOVE_TO_TRASH))\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n\n        if (criteriaChanged) {\n            // Get itemsAvailable\n            return this.services.groupsService.contents('', getCountParams(dataExplorer))\n                .then((results: ListResults<GroupContentsResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(trashPanelActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nconst getOrder = (dataExplorer: DataExplorer) => {\n    const sortColumn = getSortColumn<GroupContentsResource>(dataExplorer);\n    const order = new OrderBuilder<GroupContentsResource>();\n    if (sortColumn && sortColumn.sort) {\n        const sortDirection = sortColumn.sort.direction === SortDirection.ASC\n            ? OrderDirection.ASC\n            : OrderDirection.DESC;\n\n        // Use createdAt as a secondary sort column so we break ties consistently.\n        return order\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.COLLECTION)\n            .addOrder(sortDirection, sortColumn.sort.field, GroupContentsResourcePrefix.PROJECT)\n            .addOrder(OrderDirection.DESC, \"createdAt\", GroupContentsResourcePrefix.PROCESS)\n            .getOrder();\n    } else {\n        return order.getOrder();\n    }\n};\n\nconst getFilters = (dataExplorer: DataExplorer) => {\n    const columns = dataExplorer.columns as DataColumns<string, CollectionResource>;\n    const typeFilters = serializeResourceTypeFilters(getDataExplorerColumnFilters(columns, TrashPanelColumnNames.TYPE));\n\n    const otherFilters = new FilterBuilder()\n        .addILike(\"name\", dataExplorer.searchValue, GroupContentsResourcePrefix.COLLECTION)\n        .addILike(\"name\", dataExplorer.searchValue, GroupContentsResourcePrefix.PROJECT)\n        .addEqual(\"is_trashed\", true)\n        .getFilters();\n\n    return joinFilters(\n        typeFilters,\n        otherFilters,\n    );\n};\n\nconst getParams = (dataExplorer: DataExplorer): ContentsArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    order: getOrder(dataExplorer),\n    filters: getFilters(dataExplorer),\n    recursive: true,\n    includeTrash: true,\n    count: 'none',\n});\n\nconst getCountParams = (dataExplorer: DataExplorer): ContentsArguments => ({\n    filters: getFilters(dataExplorer),\n    recursive: true,\n    includeTrash: true,\n    limit: 0,\n    count: 'exact',\n});\n\nconst couldNotFetchTrashContents = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch trash contents.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/tree-picker/picker-id.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\n\nexport interface PickerIdProp {\n    pickerId: string;\n}\n\nexport const pickerId =\n    (id: string) =>\n    <P extends PickerIdProp>(Component: React.ComponentType<P>) =>\n    (props: P) => {\n        return (\n            <Component\n                {...props}\n                pickerId={id}\n            />\n        );\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/tree-picker/tree-picker-actions.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { createServices } from \"services/services\";\nimport { configureStore } from \"../store\";\nimport { createBrowserHistory } from \"history\";\nimport { mockConfig } from 'common/config';\nimport Axios from \"axios\";\nimport MockAdapter from \"axios-mock-adapter\";\nimport { ResourceKind } from 'models/resource';\nimport { SHARED_PROJECT_ID, initProjectsTreePicker } from \"./tree-picker-actions\";\nimport { CollectionFileType } from \"models/collection-file\";\n\ndescribe('tree-picker-actions', () => {\n    const axiosInst = Axios.create({ headers: {} });\n    const axiosMock = new MockAdapter(axiosInst);\n\n    let store;\n    let services;\n    const config = {};\n    const actions = {\n        progressFn: (id, working) => { },\n        errorFn: (id, message) => { }\n    };\n    let importMocks;\n\n    beforeEach(() => {\n        axiosMock.reset();\n        services = createServices(mockConfig({}), actions, axiosInst);\n        store = configureStore(createBrowserHistory(), services, config);\n        localStorage.clear();\n        importMocks = [];\n    });\n\n    afterEach(() => {\n        importMocks.map(m => m.restore());\n    });\n\n    it('initializes preselected tree picker nodes', async () => {\n        const dispatchMock = cy.stub();\n        const dispatchWrapper = (action) => {\n            dispatchMock(action);\n            return store.dispatch(action);\n        };\n\n        const emptyCollectionUuid = \"zzzzz-4zz18-000000000000000\";\n        const collectionUuid = \"zzzzz-4zz18-111111111111111\";\n        const parentProjectUuid = \"zzzzz-j7d0g-000000000000000\";\n        const childCollectionUuid = \"zzzzz-4zz18-222222222222222\";\n\n        const fakeResources = {\n            [emptyCollectionUuid]: {\n                kind: ResourceKind.COLLECTION,\n                ownerUuid: '',\n                files: [],\n            },\n            [collectionUuid]: {\n                kind: ResourceKind.COLLECTION,\n                ownerUuid: '',\n                files: [{\n                    id: `${collectionUuid}/directory`,\n                    name: \"directory\",\n                    path: \"\",\n                    type: CollectionFileType.DIRECTORY,\n                    url: `/c=${collectionUuid}/directory/`,\n                }]\n            },\n            [parentProjectUuid]: {\n                kind: ResourceKind.GROUP,\n                ownerUuid: '',\n            },\n            [childCollectionUuid]: {\n                kind: ResourceKind.COLLECTION,\n                ownerUuid: parentProjectUuid,\n                files: [\n                    {\n                        id: `${childCollectionUuid}/mainDir`,\n                        name: \"mainDir\",\n                        path: \"\",\n                        type: CollectionFileType.DIRECTORY,\n                        url: `/c=${childCollectionUuid}/mainDir/`,\n                    },\n                    {\n                        id: `${childCollectionUuid}/mainDir/subDir`,\n                        name: \"subDir\",\n                        path: \"/mainDir\",\n                        type: CollectionFileType.DIRECTORY,\n                        url: `/c=${childCollectionUuid}/mainDir/subDir`,\n                    }\n                ],\n            },\n        };\n\n        services.ancestorsService.ancestors = cy.stub().callsFake((startUuid, endUuid) => {\n            let ancestors = [];\n            let uuid = startUuid;\n            while (uuid?.length && fakeResources[uuid]) {\n                const resource = fakeResources[uuid];\n                if (resource.kind === ResourceKind.COLLECTION) {\n                    ancestors.unshift({\n                        uuid, kind: resource.kind,\n                        ownerUuid: resource.ownerUuid,\n                    });\n                } else if (resource.kind === ResourceKind.GROUP) {\n                    ancestors.unshift({\n                        uuid, kind: resource.kind,\n                        ownerUuid: resource.ownerUuid,\n                    });\n                }\n                uuid = resource.ownerUuid;\n            }\n            return ancestors;\n        });\n\n        services.collectionService.files = cy.stub(async (uuid)=> {\n            return fakeResources[uuid]?.files || [];\n        });\n\n        services.groupsService.contents = cy.stub(async (uuid, args) => {\n            const items = Object.keys(fakeResources).map(uuid => ({...fakeResources[uuid], uuid})).filter(item => item.ownerUuid === uuid);\n            return {items: items, itemsAvailable: items.length};\n        });\n\n        const pickerId = \"pickerId\";\n\n        // When collection preselected\n        await initProjectsTreePicker(pickerId, {\n            selectedItemUuids: [emptyCollectionUuid],\n            includeDirectories: true,\n            includeFiles: false,\n            multi: true,\n        })(dispatchWrapper, store.getState, services);\n\n        // Expect ancestor service to be called\n        expect(services.ancestorsService.ancestors).to.be.calledWith(emptyCollectionUuid, '');\n        // Expect top level to be expanded and node to be selected\n        expect(store.getState().treePicker[\"pickerId_shared\"][SHARED_PROJECT_ID].expanded).to.equal(true);\n        expect(store.getState().treePicker[\"pickerId_shared\"][emptyCollectionUuid].selected).to.equal(true);\n\n\n        // When collection subdirectory is preselected\n        await initProjectsTreePicker(pickerId, {\n            selectedItemUuids: [`${collectionUuid}/directory`],\n            includeDirectories: true,\n            includeFiles: false,\n            multi: true,\n        })(dispatchWrapper, store.getState, services);\n\n        // Expect ancestor service to be called\n        expect(services.ancestorsService.ancestors).to.be.calledWith(collectionUuid, '');\n        // Expect top level to be expanded and node to be selected\n        expect(store.getState().treePicker[\"pickerId_shared\"][SHARED_PROJECT_ID].expanded).to.equal(true);\n        expect(store.getState().treePicker[\"pickerId_shared\"][collectionUuid].expanded).to.equal(true);\n        expect(store.getState().treePicker[\"pickerId_shared\"][collectionUuid].selected).to.equal(false);\n        expect(store.getState().treePicker[\"pickerId_shared\"][`${collectionUuid}/directory`].selected).to.equal(true);\n\n\n        // When subdirectory of collection inside project is preselected\n        await initProjectsTreePicker(pickerId, {\n            selectedItemUuids: [`${childCollectionUuid}/mainDir/subDir`],\n            includeDirectories: true,\n            includeFiles: false,\n            multi: true,\n        })(dispatchWrapper, store.getState, services);\n\n        // Expect ancestor service to be called\n        expect(services.ancestorsService.ancestors).to.be.calledWith(childCollectionUuid, '');\n        // Expect parent project and collection to be expanded\n        expect(store.getState().treePicker[\"pickerId_shared\"][SHARED_PROJECT_ID].expanded).to.equal(true);\n        expect(store.getState().treePicker[\"pickerId_shared\"][parentProjectUuid].expanded).to.equal(true);\n        expect(store.getState().treePicker[\"pickerId_shared\"][parentProjectUuid].selected).to.equal(false);\n        expect(store.getState().treePicker[\"pickerId_shared\"][childCollectionUuid].expanded).to.equal(true);\n        expect(store.getState().treePicker[\"pickerId_shared\"][childCollectionUuid].selected).to.equal(false);\n        // Expect main directory to be expanded\n        expect(store.getState().treePicker[\"pickerId_shared\"][`${childCollectionUuid}/mainDir`].expanded).to.equal(true);\n        expect(store.getState().treePicker[\"pickerId_shared\"][`${childCollectionUuid}/mainDir`].selected).to.equal(false);\n        // Expect sub directory to be selected\n        expect(store.getState().treePicker[\"pickerId_shared\"][`${childCollectionUuid}/mainDir/subDir`].expanded).to.equal(false);\n        expect(store.getState().treePicker[\"pickerId_shared\"][`${childCollectionUuid}/mainDir/subDir`].selected).to.equal(true);\n\n\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/tree-picker/tree-picker-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { TreeNode, initTreeNode, getNodeDescendants, TreeNodeStatus, getNode, TreePickerId, Tree, setNode, createTree, getNodeDescendantsIds } from 'models/tree';\nimport { CollectionFileType, createCollectionFilesTree, getCollectionResourceCollectionUuid } from \"models/collection-file\";\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { getUserUuid } from \"common/getuser\";\nimport { ServiceRepository } from 'services/services';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { pipe, values } from 'lodash/fp';\nimport { ResourceKind, ResourceObjectType, extractUuidObjectType, COLLECTION_PDH_REGEX } from 'models/resource';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { getTreePicker, TreePicker} from './tree-picker';\nimport { ProjectsTreePickerItem } from './tree-picker-middleware';\nimport { OrderBuilder } from 'services/api/order-builder';\nimport { ProjectResource } from 'models/project';\nimport { mapTree } from '../../models/tree';\nimport { LinkResource, LinkClass } from \"models/link\";\nimport { mapTreeValues } from \"models/tree\";\nimport { sortFilesTree } from \"services/collection-service/collection-service-files-response\";\nimport { GroupClass, GroupResource } from \"models/group\";\nimport { CollectionResource } from \"models/collection\";\nimport { getResource } from \"store/resources/resources\";\nimport { updateResources } from \"store/resources/resources-actions\";\nimport { SnackbarKind, snackbarActions } from \"store/snackbar/snackbar-actions\";\nimport { call, put, takeEvery, takeLatest, getContext, select, all } from \"redux-saga/effects\";\nimport { TreeItemWeight, TreeItemWithWeight } from \"components/tree/tree\";\n\nexport const treePickerActions = unionize({\n    LOAD_TREE_PICKER_NODE: ofType<{ id: string, pickerId: string }>(),\n    LOAD_TREE_PICKER_NODE_SUCCESS: ofType<{ id: string, nodes: Array<TreeNode<any>>, pickerId: string }>(),\n    APPEND_TREE_PICKER_NODE_SUBTREE: ofType<{ id: string, subtree: Tree<any>, pickerId: string }>(),\n    TOGGLE_TREE_PICKER_NODE_COLLAPSE: ofType<{ id: string, pickerId: string }>(),\n    EXPAND_TREE_PICKER_NODE: ofType<{ id: string, pickerId: string }>(),\n    EXPAND_TREE_PICKER_NODE_ANCESTORS: ofType<{ id: string, pickerId: string }>(),\n    ACTIVATE_TREE_PICKER_NODE: ofType<{ id: string, pickerId: string, relatedTreePickers?: string[] }>(),\n    DEACTIVATE_TREE_PICKER_NODE: ofType<{ pickerId: string }>(),\n    TOGGLE_TREE_PICKER_NODE_SELECTION: ofType<{ id: string, pickerId: string, cascade: boolean }>(),\n    SELECT_TREE_PICKER_NODE: ofType<{ id: string | string[], pickerId: string, cascade: boolean }>(),\n    DESELECT_TREE_PICKER_NODE: ofType<{ id: string | string[], pickerId: string, cascade: boolean }>(),\n    EXPAND_TREE_PICKER_NODES: ofType<{ ids: string[], pickerId: string }>(),\n    RESET_TREE_PICKER: ofType<{ pickerId: string }>()\n});\n\nexport type TreePickerAction = UnionOf<typeof treePickerActions>;\n\nexport interface LoadProjectParams {\n    includeCollections?: boolean;\n    includeDirectories?: boolean;\n    includeFiles?: boolean;\n    includeFilterGroups?: boolean;\n    options?: { showOnlyOwned: boolean; showOnlyWritable: boolean; };\n}\n\nexport const treePickerSearchActions = unionize({\n    SET_TREE_PICKER_PROJECT_SEARCH: ofType<{ pickerId: string, projectSearchValue: string }>(),\n    SET_TREE_PICKER_COLLECTION_FILTER: ofType<{ pickerId: string, collectionFilterValue: string }>(),\n    SET_TREE_PICKER_LOAD_PARAMS: ofType<{ pickerId: string, params: LoadProjectParams }>(),\n});\n\nexport type TreePickerSearchAction = UnionOf<typeof treePickerSearchActions>;\n\nexport const treePickerSearchSagas = unionize({\n    SET_PROJECT_SEARCH: ofType<{ pickerId: string, projectSearchValue: string }>(),\n    SET_COLLECTION_FILTER: ofType<{ pickerMainId: string, collectionFilterValue: string }>(),\n    APPLY_COLLECTION_FILTER: ofType<{ pickerId: string }>(),\n    LOAD_PROJECT: ofType<LoadProjectParamsWithId>(),\n    LOAD_SEARCH: ofType<LoadProjectParamsWithId>(),\n    LOAD_FAVORITES_PROJECT: ofType<LoadFavoritesProjectParams>(),\n    LOAD_PUBLIC_FAVORITES_PROJECT: ofType<LoadFavoritesProjectParams>(),\n    REFRESH_TREE_PICKER: ofType<{ pickerId: string }>(),\n});\n\nexport function* setTreePickerProjectSearchWatcher() {\n    // Race conditions are handled in loadSearchWatcher so takeEvery is used here to avoid confusion\n    yield takeEvery(treePickerSearchSagas.tags.SET_PROJECT_SEARCH, setTreePickerProjectSearchSaga);\n}\n\nfunction* setTreePickerProjectSearchSaga({type, payload}: {\n    type: typeof treePickerSearchSagas.tags.SET_PROJECT_SEARCH,\n    payload: typeof treePickerSearchSagas._Record.SET_PROJECT_SEARCH,\n}) {\n    try {\n        const { pickerId , projectSearchValue } = payload;\n        const state: RootState = yield select();\n        const searchChanged = state.treePickerSearch.projectSearchValues[pickerId] !== projectSearchValue;\n\n        if (searchChanged) {\n            yield put(treePickerSearchActions.SET_TREE_PICKER_PROJECT_SEARCH(payload));\n            const picker = getTreePicker<ProjectsTreePickerItem>(pickerId)(state.treePicker);\n            if (picker) {\n                const loadParams = state.treePickerSearch.loadProjectParams[pickerId];\n                // Put is non-blocking so race-condition prevention is handled by the loadSearchWatcher\n                yield put(treePickerSearchSagas.LOAD_SEARCH({\n                    ...loadParams,\n                    id: SEARCH_PROJECT_ID,\n                    pickerId,\n                }));\n            }\n        }\n    } catch (e) {\n        yield put(snackbarActions.OPEN_SNACKBAR({ message: `Failed to search`, kind: SnackbarKind.ERROR }));\n    }\n}\n\n/**\n * Race-free collection filter saga as long as it's invoked through SET_COLLECTION_FILTER\n */\nexport function* setTreePickerCollectionFilterWatcher() {\n    yield takeLatest(treePickerSearchSagas.tags.SET_COLLECTION_FILTER, setTreePickerCollectionFilterSaga);\n}\n\nfunction* setTreePickerCollectionFilterSaga({type, payload}: {\n    type: typeof treePickerSearchSagas.tags.SET_COLLECTION_FILTER,\n    payload: typeof treePickerSearchSagas._Record.SET_COLLECTION_FILTER,\n}) {\n    try {\n        const state: RootState = yield select();\n        const { pickerMainId , collectionFilterValue } = payload;\n        const pickerRootItemIds = Object.values(getProjectsTreePickerIds(pickerMainId));\n\n        const changedRootItemIds = pickerRootItemIds.filter((pickerRootId) =>\n            state.treePickerSearch.collectionFilterValues[pickerRootId] !== collectionFilterValue\n        );\n\n        yield all(pickerRootItemIds.map(pickerId =>\n            put(treePickerSearchActions.SET_TREE_PICKER_COLLECTION_FILTER({\n                pickerId,\n                collectionFilterValue,\n            }))\n        ));\n\n        yield all(changedRootItemIds.map(pickerId =>\n            call(applyCollectionFilterSaga, {\n                type: treePickerSearchSagas.tags.APPLY_COLLECTION_FILTER,\n                payload: { pickerId }\n            })\n        ));\n    } catch (e) {\n        yield put(snackbarActions.OPEN_SNACKBAR({ message: `Failed to search`, kind: SnackbarKind.ERROR }));\n    } finally {\n        // Optionally handle cleanup when task cancelled\n        // if (yield cancelled()) {}\n    }\n}\n\n/**\n * Only meant to be called synchronously via call from other sagas that implement takeLatest\n */\nfunction* applyCollectionFilterSaga({type, payload}: {\n    type: typeof treePickerSearchSagas.tags.APPLY_COLLECTION_FILTER,\n    payload: typeof treePickerSearchSagas._Record.APPLY_COLLECTION_FILTER,\n}) {\n    try {\n        const state: RootState = yield select();\n        const { pickerId } = payload;\n        if (state.treePickerSearch.projectSearchValues[pickerId] !== \"\") {\n            yield call(refreshTreePickerSaga, {\n                type: treePickerSearchSagas.tags.REFRESH_TREE_PICKER,\n                payload: { pickerId }\n            });\n        } else {\n            const picker = getTreePicker<ProjectsTreePickerItem>(pickerId)(state.treePicker);\n            if (picker) {\n                const loadParams = state.treePickerSearch.loadProjectParams[pickerId];\n                yield call(loadProjectSaga, {\n                    type: treePickerSearchSagas.tags.LOAD_PROJECT,\n                    payload: {\n                        ...loadParams,\n                        id: SEARCH_PROJECT_ID,\n                        pickerId,\n                }});\n            }\n        }\n    } catch (e) {\n        yield put(snackbarActions.OPEN_SNACKBAR({ message: `Failed to search`, kind: SnackbarKind.ERROR }));\n    }\n}\n\nexport const getProjectsTreePickerIds = (pickerId: string) => ({\n    home: `${pickerId}_home`,\n    shared: `${pickerId}_shared`,\n    favorites: `${pickerId}_favorites`,\n    publicFavorites: `${pickerId}_publicFavorites`,\n    search: `${pickerId}_search`,\n});\n\nexport const SEARCH_PROJECT_ID_PREFIX = \"search-\";\n\nexport const getAllNodes = <Value>(pickerId: string, filter = (node: TreeNode<Value>) => true) => (state: TreePicker) =>\n    pipe(\n        () => values(getProjectsTreePickerIds(pickerId)),\n\n        ids => ids\n            .map(id => getTreePicker<Value>(id)(state)),\n\n        trees => trees\n            .map(getNodeDescendants(''))\n            .reduce((allNodes, nodes) => allNodes.concat(nodes), []),\n\n        allNodes => allNodes\n            .reduce((map, node) =>\n                filter(node)\n                    ? map.set(node.id, node)\n                    : map, new Map<string, TreeNode<Value>>())\n            .values(),\n\n        uniqueNodes => Array.from(uniqueNodes),\n    )();\nexport const getSelectedNodes = <Value>(pickerId: string) => (state: TreePicker) =>\n    getAllNodes<Value>(pickerId, node => node.selected)(state);\n\ninterface TreePickerPreloadParams {\n    selectedItemUuids: string[];\n    includeDirectories: boolean;\n    includeFiles: boolean;\n    multi: boolean;\n}\n\nexport const initProjectsTreePicker = (pickerId: string, preloadParams?: TreePickerPreloadParams) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const { home, shared, favorites, publicFavorites, search } = getProjectsTreePickerIds(pickerId);\n        dispatch<any>(initUserProject(home));\n        dispatch<any>(initSharedProject(shared));\n        dispatch<any>(initFavoritesProject(favorites));\n        dispatch<any>(initPublicFavoritesProject(publicFavorites));\n        dispatch<any>(initSearchProject(search));\n\n        if (preloadParams && preloadParams.selectedItemUuids.length) {\n            await dispatch<any>(loadInitialValue(\n                preloadParams.selectedItemUuids,\n                pickerId,\n                preloadParams.includeDirectories,\n                preloadParams.includeFiles,\n                preloadParams.multi\n            ));\n        }\n    };\n\ninterface ReceiveTreePickerDataParams<T> {\n    data: T[];\n    extractNodeData: (value: T) => { id: string, value: T, status?: TreeNodeStatus };\n    id: string;\n    pickerId: string;\n}\n\nexport const receiveTreePickerData = <T>(params: ReceiveTreePickerDataParams<T>) =>\n    (dispatch: Dispatch) => {\n        const { data, extractNodeData, id, pickerId, } = params;\n        dispatch(treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({\n            id,\n            nodes: data.map(item => initTreeNode(extractNodeData(item))),\n            pickerId,\n        }));\n        dispatch(treePickerActions.EXPAND_TREE_PICKER_NODE({ id, pickerId }));\n    };\n\nexport const extractGroupContentsNodeData = (expandableCollections: boolean) => (item: GroupContentsResource & TreeItemWithWeight) => {\n    if (item.uuid === \"more-items-available\") {\n        return {\n            id: item.uuid,\n            value: item,\n            status: TreeNodeStatus.LOADED\n        }\n    } else if (item.weight === TreeItemWeight.LIGHT) {\n        return {\n            id: SEARCH_PROJECT_ID_PREFIX+item.uuid,\n            value: item,\n            status: item.kind === ResourceKind.PROJECT\n                  ? TreeNodeStatus.INITIAL\n                  : item.kind === ResourceKind.COLLECTION && expandableCollections\n                  ? TreeNodeStatus.INITIAL\n                  : TreeNodeStatus.LOADED\n        };\n    } else {\n        return { id: item.uuid,\n                 value: item,\n                 status: item.kind === ResourceKind.PROJECT\n                       ? TreeNodeStatus.INITIAL\n                       : item.kind === ResourceKind.COLLECTION && expandableCollections\n                       ? TreeNodeStatus.INITIAL\n                       : TreeNodeStatus.LOADED\n        };\n    }\n};\n\ninterface LoadProjectParamsWithId extends LoadProjectParams {\n    id: string;\n    pickerId: string;\n    loadShared?: boolean;\n}\n\n/**\n * Kicks off a picker search load that allows paralell runs\n * Used for expanding nodes\n */\nexport const loadProject = (params: LoadProjectParamsWithId) => (treePickerSearchSagas.LOAD_PROJECT(params));\nexport function* loadProjectWatcher() {\n    yield takeEvery(treePickerSearchSagas.tags.LOAD_PROJECT, loadProjectSaga);\n}\n\n/**\n * Asynchronously kicks off a race-free picker search load - does not block when used this way\n */\nexport const loadSearch = (params: LoadProjectParamsWithId) => (treePickerSearchSagas.LOAD_SEARCH(params));\nexport function* loadSearchWatcher() {\n    yield takeLatest(treePickerSearchSagas.tags.LOAD_SEARCH, loadProjectSaga);\n}\n\n/**\n * loadProjectSaga is used to load or refresh a project node in a tree picker\n * Errors are caught and a toast is shown if the project fails to load\n * Blocks when called directly with call(), can be composed into race-free groups\n */\nfunction* loadProjectSaga({type, payload}: {\n    type: typeof treePickerSearchSagas.tags.LOAD_PROJECT,\n    payload: typeof treePickerSearchSagas._Record.LOAD_PROJECT,\n}) {\n    try {\n        const services: ServiceRepository = yield getContext(\"services\");\n        const state: RootState = yield select();\n\n        const {\n            id,\n            pickerId,\n            includeCollections = false,\n            includeDirectories = false,\n            includeFiles = false,\n            includeFilterGroups = false,\n            loadShared = false,\n            options,\n        } = payload;\n\n        const searching = (id === SEARCH_PROJECT_ID);\n        const collectionFilter = state.treePickerSearch.collectionFilterValues[pickerId];\n        const projectFilter = state.treePickerSearch.projectSearchValues[pickerId];\n\n        let filterB = new FilterBuilder();\n\n        let includeOwners: string|undefined = undefined;\n\n        if (id.startsWith(SEARCH_PROJECT_ID_PREFIX)) {\n            return;\n        }\n\n        if (searching) {\n            // opening top level search\n            if (projectFilter) {\n                includeOwners = \"owner_uuid\";\n                filterB = filterB.addIsA('uuid', [ResourceKind.PROJECT]);\n\n                const objtype = extractUuidObjectType(projectFilter);\n                if (objtype === ResourceObjectType.GROUP || objtype === ResourceObjectType.USER) {\n                        filterB = filterB.addEqual('uuid', projectFilter);\n                }\n                else {\n                    filterB = filterB.addFullTextSearch(projectFilter, 'groups');\n                }\n\n            } else if (collectionFilter) {\n                includeOwners = \"owner_uuid\";\n                filterB = filterB.addIsA('uuid', [ResourceKind.COLLECTION]);\n\n                const objtype = extractUuidObjectType(collectionFilter);\n                if (objtype === ResourceObjectType.COLLECTION) {\n                    filterB = filterB.addEqual('uuid', collectionFilter);\n                } else if (COLLECTION_PDH_REGEX.exec(collectionFilter)) {\n                    filterB = filterB.addEqual('portable_data_hash', collectionFilter);\n                } else {\n                    filterB = filterB.addFullTextSearch(collectionFilter, 'collections');\n                }\n            } else {\n                return;\n            }\n        } else {\n            // opening a folder below the top level\n            if (includeCollections) {\n                filterB = filterB.addIsA('uuid', [ResourceKind.PROJECT, ResourceKind.COLLECTION]);\n            } else {\n                filterB = filterB.addIsA('uuid', [ResourceKind.PROJECT]);\n            }\n\n            if (projectFilter) {\n                filterB = filterB.addFullTextSearch(projectFilter, 'groups');\n            }\n            if (collectionFilter) {\n                filterB = filterB.addFullTextSearch(collectionFilter, 'collections');\n            }\n        }\n\n        filterB = filterB.addNotIn(\"collections.properties.type\", [\"intermediate\", \"log\"]);\n\n        const globalSearch = loadShared || id === SEARCH_PROJECT_ID;\n\n        const filters = filterB.getFilters();\n\n        // Must be under 1000\n        const itemLimit = 200;\n\n        yield put(treePickerActions.LOAD_TREE_PICKER_NODE({ id, pickerId }));\n\n        let { items, included } = yield call(\n            {context: services.groupsService, fn: services.groupsService.contents},\n            globalSearch ? '' : id,\n            {\n                filters,\n                excludeHomeProject: loadShared || undefined,\n                limit: itemLimit+1,\n                count: \"none\",\n                include: includeOwners,\n            }\n        );\n\n        if (!included) {\n            includeOwners = undefined;\n        }\n\n        //let rootItems: GroupContentsResource[] | GroupContentsIncludedResource[] = items;\n        let rootItems: any[] = items;\n\n        const seen = {};\n\n        if (includeOwners && included) {\n            included = included.filter(item => {\n                if (seen.hasOwnProperty(item.uuid)) {\n                    return false;\n                } else {\n                    seen[item.uuid] = item;\n                    return true;\n                }\n            });\n            yield put(updateResources(included));\n\n            rootItems = included;\n        }\n\n        items = items.filter(item => {\n            if (seen.hasOwnProperty(item.uuid)) {\n                return false;\n            } else {\n                seen[item.uuid] = item;\n                if (!seen[item.ownerUuid] && includeOwners) {\n                    rootItems.push(item);\n                }\n                return true;\n            }\n        });\n        yield put(updateResources(items));\n\n        if (items.length > itemLimit) {\n            rootItems.push({\n                uuid: \"more-items-available-\"+id,\n                kind: ResourceKind.WORKFLOW,\n                name: `*** Not all items listed, reduce item count with search or filter ***`,\n                description: \"\",\n                definition: \"\",\n                ownerUuid: \"\",\n                createdAt: \"\",\n                modifiedByUserUuid: \"\",\n                modifiedAt: \"\",\n                etag: \"\"\n            });\n        }\n\n        yield put(receiveTreePickerData<GroupContentsResource>({\n            id,\n            pickerId,\n            data: rootItems.filter(item => {\n                if (!includeFilterGroups && (item as GroupResource).groupClass && (item as GroupResource).groupClass === GroupClass.FILTER) {\n                    return false;\n                }\n\n                if (options && options.showOnlyWritable && item.hasOwnProperty('frozenByUuid') && (item as ProjectResource).frozenByUuid) {\n                    return false;\n                }\n                return true;\n            }).map(item => {\n                if (extractUuidObjectType(item.uuid) === ResourceObjectType.USER) {\n                    return {...item,\n                            uuid: item.uuid,\n                            name: item['fullName'] + \" Home Project\",\n                            weight: includeOwners ? TreeItemWeight.LIGHT : TreeItemWeight.NORMAL,\n                            kind: ResourceKind.USER,\n                    }\n                }\n                return {...item,\n                        uuid: item.uuid,\n                        weight: includeOwners ? TreeItemWeight.LIGHT : TreeItemWeight.NORMAL,};\n\n            }),\n            extractNodeData: extractGroupContentsNodeData(includeDirectories || includeFiles),\n        }));\n\n        if (includeOwners) {\n            // Searching, we already have the\n            // contents to put in the owner projects so load it up.\n            const projects = {};\n            items.forEach(item => {\n                if (!projects.hasOwnProperty(item.ownerUuid)) {\n                    projects[item.ownerUuid] = [];\n                }\n                projects[item.ownerUuid].push({...item, weight: TreeItemWeight.DARK});\n            });\n            for (const prj in projects) {\n                yield put(receiveTreePickerData<GroupContentsResource>({\n                    id: SEARCH_PROJECT_ID_PREFIX+prj,\n                    pickerId,\n                    data: projects[prj],\n                    extractNodeData: extractGroupContentsNodeData(includeDirectories || includeFiles),\n                }));\n            }\n        }\n    } catch(e) {\n        console.error(\"Failed to load project into tree picker:\", e);;\n        yield put(snackbarActions.OPEN_SNACKBAR({ message: `Failed to load project`, kind: SnackbarKind.ERROR }));\n    } finally {\n        // Optionally handle cleanup when task cancelled\n        // if (yield cancelled()) {}\n    }\n};\n\nexport const refreshTreePicker = (params: typeof treePickerSearchSagas._Record.REFRESH_TREE_PICKER) => (treePickerSearchSagas.REFRESH_TREE_PICKER(params));\n\nexport function* refreshTreePickerWatcher() {\n    yield takeEvery(treePickerSearchSagas.tags.REFRESH_TREE_PICKER, refreshTreePickerSaga);\n}\n\n/**\n * Refreshes a single tree picker subtree\n */\nfunction* refreshTreePickerSaga({type, payload}: {\n    type: typeof treePickerSearchSagas.tags.REFRESH_TREE_PICKER,\n    payload: typeof treePickerSearchSagas._Record.REFRESH_TREE_PICKER,\n}) {\n    try {\n        const state: RootState = yield select();\n        const { pickerId } = payload;\n\n        const picker = getTreePicker<ProjectsTreePickerItem>(pickerId)(state.treePicker);\n        if (picker) {\n            const loadParams = state.treePickerSearch.loadProjectParams[pickerId];\n            yield all((getNodeDescendantsIds('')(picker)\n                .reduce((acc, id) => {\n                    const node = getNode(id)(picker);\n                    if (node && node.status !== TreeNodeStatus.INITIAL) {\n                        if (node.id.substring(6, 11) === 'tpzed' || node.id.substring(6, 11) === 'j7d0g') {\n                            return acc.concat(call(loadProjectSaga, {\n                                type: treePickerSearchSagas.tags.LOAD_PROJECT,\n                                payload: {\n                                    ...loadParams,\n                                    id: node.id,\n                                    pickerId,\n                            }}));\n                        }\n                        if (node.id === SHARED_PROJECT_ID) {\n                            return acc.concat(call(loadProjectSaga, {\n                                type: treePickerSearchSagas.tags.LOAD_PROJECT,\n                                payload: {\n                                    ...loadParams,\n                                    id: node.id,\n                                    pickerId,\n                                    loadShared: true\n                            }}));\n                        }\n                        if (node.id === SEARCH_PROJECT_ID) {\n                            return acc.concat(call(loadProjectSaga, {\n                                type: treePickerSearchSagas.tags.LOAD_PROJECT,\n                                payload: {\n                                    ...loadParams,\n                                    id: node.id,\n                                    pickerId,\n                            }}));\n                        }\n                        if (node.id === FAVORITES_PROJECT_ID) {\n                            return acc.concat(call(loadFavoritesProjectSaga, {\n                                type: treePickerSearchSagas.tags.LOAD_FAVORITES_PROJECT,\n                                payload: {\n                                    ...loadParams,\n                                    pickerId,\n                            }}));\n                        }\n                        if (node.id === PUBLIC_FAVORITES_PROJECT_ID) {\n                            return acc.concat(call(loadPublicFavoritesProjectSaga, {\n                                type: treePickerSearchSagas.tags.LOAD_PUBLIC_FAVORITES_PROJECT,\n                                payload: {\n                                    ...loadParams,\n                                    pickerId,\n                            }}));\n                        }\n                    }\n                    return acc;\n                }, [] as Object[])));\n        }\n    } catch (e) {\n        yield put(snackbarActions.OPEN_SNACKBAR({ message: `Failed to search`, kind: SnackbarKind.ERROR }));\n    }\n}\n\nexport const loadCollection = (id: string, pickerId: string, includeDirectories?: boolean, includeFiles?: boolean) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(treePickerActions.LOAD_TREE_PICKER_NODE({ id, pickerId }));\n\n        const picker = getTreePicker<ProjectsTreePickerItem>(pickerId)(getState().treePicker);\n        if (picker) {\n\n            const node = getNode(id)(picker);\n            if (node && 'kind' in node.value && node.value.kind === ResourceKind.COLLECTION) {\n                const files = (await services.collectionService.files(node.value.uuid))\n                    .filter((file) => (\n                        (includeFiles) ||\n                        (includeDirectories && file.type === CollectionFileType.DIRECTORY)\n                    ));\n                const tree = createCollectionFilesTree(files);\n                const sorted = sortFilesTree(tree);\n                const filesTree = mapTreeValues(services.collectionService.extendFileURL)(sorted);\n\n                // await tree modifications so that consumers can guarantee node presence\n                await dispatch(\n                    treePickerActions.APPEND_TREE_PICKER_NODE_SUBTREE({\n                        id,\n                        pickerId,\n                        subtree: mapTree(node => ({ ...node, status: TreeNodeStatus.LOADED }))(filesTree)\n                    }));\n\n                // Expand collection root node\n                dispatch(treePickerActions.EXPAND_TREE_PICKER_NODE({ id, pickerId }));\n            }\n        }\n    };\n\nexport const HOME_PROJECT_ID = 'Home Projects';\nexport const initUserProject = (pickerId: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const uuid = getUserUuid(getState());\n        if (uuid) {\n            dispatch(receiveTreePickerData({\n                id: '',\n                pickerId,\n                data: [{ uuid, name: HOME_PROJECT_ID }],\n                extractNodeData: value => ({\n                    id: value.uuid,\n                    status: TreeNodeStatus.INITIAL,\n                    value,\n                }),\n            }));\n        }\n    };\nexport const loadUserProject = (pickerId: string, includeCollections = false, includeDirectories = false, includeFiles = false, options?: { showOnlyOwned: boolean, showOnlyWritable: boolean }) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const uuid = getUserUuid(getState());\n        if (uuid) {\n            dispatch(loadProject({ id: uuid, pickerId, includeCollections, includeDirectories, includeFiles, options }));\n        }\n    };\n\nexport const SHARED_PROJECT_ID = 'Shared with me';\nexport const initSharedProject = (pickerId: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(receiveTreePickerData({\n            id: '',\n            pickerId,\n            data: [{ uuid: SHARED_PROJECT_ID, name: SHARED_PROJECT_ID }],\n            extractNodeData: value => ({\n                id: value.uuid,\n                status: TreeNodeStatus.INITIAL,\n                value,\n            }),\n        }));\n    };\n\ntype PickerItemPreloadData = {\n    itemId: string;\n    mainItemUuid: string;\n    ancestors: (GroupResource | CollectionResource)[];\n    isHomeProjectItem: boolean;\n}\n\ntype PickerTreePreloadData = {\n    tree: Tree<GroupResource | CollectionResource>;\n    pickerTreeId: string;\n    pickerTreeRootUuid: string;\n};\n\nexport const loadInitialValue = (pickerItemIds: string[], pickerId: string, includeDirectories: boolean, includeFiles: boolean, multi: boolean,) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const homeUuid = getUserUuid(getState());\n\n        // Request ancestor trees in paralell and save home project status\n        const pickerItemsData: PickerItemPreloadData[] = await Promise.allSettled(pickerItemIds.map(async itemId => {\n            const mainItemUuid = itemId.includes('/') ? itemId.split('/')[0] : itemId;\n\n            const ancestors = (await services.ancestorsService.ancestors(mainItemUuid, ''))\n            .filter(item =>\n                item.kind === ResourceKind.GROUP ||\n                item.kind === ResourceKind.COLLECTION\n            ) as (GroupResource | CollectionResource)[];\n\n            const isOnlyHomeProject = pickerItemIds.length === 1 && pickerItemIds[0] === homeUuid;\n\n            if (ancestors.length === 0 && !isOnlyHomeProject) {\n                return Promise.reject({item: itemId});\n            }\n\n            const isHomeProjectItem = !!((homeUuid && ancestors.some(item => item.ownerUuid === homeUuid)) || isOnlyHomeProject);\n\n            return {\n                itemId,\n                mainItemUuid,\n                ancestors,\n                isHomeProjectItem,\n            };\n        })).then((res) => {\n            // Show toast if any selections failed to restore\n            const rejectedPromises = res.filter((promiseResult): promiseResult is PromiseRejectedResult => (promiseResult.status === 'rejected'));\n            if (rejectedPromises.length) {\n                rejectedPromises.forEach(item => {\n                    console.error(\"The following item failed to load into the tree picker\", item.reason);\n                });\n                dispatch<any>(snackbarActions.OPEN_SNACKBAR({ message: `Some selections failed to load and were removed. See console for details.`, kind: SnackbarKind.ERROR }));\n            }\n            // Filter out any failed promises and map to resulting preload data with ancestors\n            return res.filter((promiseResult): promiseResult is PromiseFulfilledResult<PickerItemPreloadData> => (\n                promiseResult.status === 'fulfilled'\n            )).map(res => res.value)\n        });\n\n        // Group items to preload / ancestor data by home/shared picker and create initial Trees to preload\n        const initialTreePreloadData: PickerTreePreloadData[] = [\n            pickerItemsData.filter((item) => item.isHomeProjectItem),\n            pickerItemsData.filter((item) => !item.isHomeProjectItem),\n        ]\n            .filter((items) => items.length > 0)\n            .map((itemGroup) =>\n                itemGroup.reduce(\n                    (preloadTree, itemData) => ({\n                        tree: createInitialPickerTree(\n                            itemData.ancestors,\n                            itemData.mainItemUuid,\n                            preloadTree.tree\n                        ),\n                        pickerTreeId: getPickerItemTreeId(itemData, homeUuid, pickerId),\n                        pickerTreeRootUuid: getPickerItemRootUuid(itemData, homeUuid),\n                    }),\n                    {\n                        tree: createTree<GroupResource | CollectionResource>(),\n                        pickerTreeId: '',\n                        pickerTreeRootUuid: '',\n                    } as PickerTreePreloadData\n                )\n            );\n\n        // Load initial trees into corresponding picker store\n        await Promise.all(initialTreePreloadData.map(preloadTree => (\n            dispatch(\n                treePickerActions.APPEND_TREE_PICKER_NODE_SUBTREE({\n                    id: preloadTree.pickerTreeRootUuid,\n                    pickerId: preloadTree.pickerTreeId,\n                    subtree: preloadTree.tree,\n                })\n            )\n        )));\n\n        // Await loading collection before attempting to select items\n        await Promise.all(pickerItemsData.map(async itemData => {\n            const pickerTreeId = getPickerItemTreeId(itemData, homeUuid, pickerId);\n\n            // Selected item resides in collection subpath\n            if (itemData.itemId.includes('/')) {\n                // Load collection into tree\n                // loadCollection includes more than dispatched actions and must be awaited\n                await dispatch(loadCollection(itemData.mainItemUuid, pickerTreeId, includeDirectories, includeFiles));\n            }\n            // Expand nodes down to destination\n            dispatch(treePickerActions.EXPAND_TREE_PICKER_NODE_ANCESTORS({ id: itemData.itemId, pickerId: pickerTreeId }));\n        }));\n\n        // Select or activate nodes\n        pickerItemsData.forEach(itemData => {\n            const pickerTreeId = getPickerItemTreeId(itemData, homeUuid, pickerId);\n\n            if (multi) {\n                dispatch(treePickerActions.SELECT_TREE_PICKER_NODE({ id: itemData.itemId, pickerId: pickerTreeId, cascade: false}));\n            } else {\n                dispatch(treePickerActions.ACTIVATE_TREE_PICKER_NODE({ id: itemData.itemId, pickerId: pickerTreeId }));\n            }\n        });\n\n        // Refresh triggers loading in all adjacent items that were not included in the ancestor tree\n        await initialTreePreloadData.map(preloadTree => dispatch(treePickerSearchSagas.REFRESH_TREE_PICKER({ pickerId: preloadTree.pickerTreeId })));\n    }\n\nconst getPickerItemTreeId = (itemData: PickerItemPreloadData, homeUuid: string | undefined, pickerId: string) => {\n    const { home, shared } = getProjectsTreePickerIds(pickerId);\n    return ((itemData.isHomeProjectItem && homeUuid) ? home : shared);\n};\n\nconst getPickerItemRootUuid = (itemData: PickerItemPreloadData, homeUuid: string | undefined) => {\n    return (itemData.isHomeProjectItem && homeUuid) ? homeUuid : SHARED_PROJECT_ID;\n};\n\nexport const FAVORITES_PROJECT_ID = 'Favorites';\nexport const initFavoritesProject = (pickerId: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(receiveTreePickerData({\n            id: '',\n            pickerId,\n            data: [{ uuid: FAVORITES_PROJECT_ID, name: FAVORITES_PROJECT_ID }],\n            extractNodeData: value => ({\n                id: value.uuid,\n                status: TreeNodeStatus.INITIAL,\n                value,\n            }),\n        }));\n    };\n\nexport const PUBLIC_FAVORITES_PROJECT_ID = 'Public Favorites';\nexport const initPublicFavoritesProject = (pickerId: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(receiveTreePickerData({\n            id: '',\n            pickerId,\n            data: [{ uuid: PUBLIC_FAVORITES_PROJECT_ID, name: PUBLIC_FAVORITES_PROJECT_ID }],\n            extractNodeData: value => ({\n                id: value.uuid,\n                status: TreeNodeStatus.INITIAL,\n                value,\n            }),\n        }));\n    };\n\nexport const SEARCH_PROJECT_ID = 'Search all Projects';\nexport const initSearchProject = (pickerId: string) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(receiveTreePickerData({\n            id: '',\n            pickerId,\n            data: [{ uuid: SEARCH_PROJECT_ID, name: SEARCH_PROJECT_ID }],\n            extractNodeData: value => ({\n                id: value.uuid,\n                status: TreeNodeStatus.INITIAL,\n                value,\n            }),\n        }));\n    };\n\n\ninterface LoadFavoritesProjectParams {\n    pickerId: string;\n    includeCollections?: boolean;\n    includeDirectories?: boolean;\n    includeFiles?: boolean;\n    options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n}\n\nexport const loadFavoritesProject = (params: typeof treePickerSearchSagas._Record.LOAD_FAVORITES_PROJECT) => (treePickerSearchSagas.LOAD_FAVORITES_PROJECT(params));\n\nexport function* loadFavoritesProjectWatcher() {\n    yield takeEvery(treePickerSearchSagas.tags.LOAD_FAVORITES_PROJECT, loadFavoritesProjectSaga);\n}\n\nfunction* loadFavoritesProjectSaga({type, payload}: {\n    type: typeof treePickerSearchSagas.tags.LOAD_FAVORITES_PROJECT,\n    payload: typeof treePickerSearchSagas._Record.LOAD_FAVORITES_PROJECT,\n}) {\n    try {\n        const services: ServiceRepository = yield getContext(\"services\");\n        const state: RootState = yield select();\n\n        const {\n            pickerId,\n            includeCollections = false,\n            includeDirectories = false,\n            includeFiles = false,\n            options = { showOnlyOwned: true, showOnlyWritable: false },\n        } = payload;\n        const uuid = getUserUuid(state);\n        if (uuid) {\n            const filters = pipe(\n                (fb: FilterBuilder) => includeCollections\n                    ? fb.addIsA('head_uuid', [ResourceKind.PROJECT, ResourceKind.COLLECTION])\n                    : fb.addIsA('head_uuid', [ResourceKind.PROJECT]),\n                fb => fb.getFilters(),\n            )(new FilterBuilder());\n\n            const { items } = yield call(\n                {context: services.favoriteService, fn: services.favoriteService.list},\n                uuid,\n                { filters },\n                options.showOnlyOwned,\n            );\n\n            yield put(receiveTreePickerData<GroupContentsResource>({\n                id: 'Favorites',\n                pickerId,\n                data: items.filter((item) => {\n                    if (options.showOnlyWritable && !(item as GroupResource).canWrite) {\n                        return false;\n                    }\n\n                    if (options.showOnlyWritable && item.hasOwnProperty('frozenByUuid') && (item as ProjectResource).frozenByUuid) {\n                        return false;\n                    }\n\n                    return true;\n                }),\n                extractNodeData: extractGroupContentsNodeData(includeDirectories || includeFiles),\n            }));\n        }\n    } catch(e) {\n        yield put(snackbarActions.OPEN_SNACKBAR({ message: `Failed to load favorites`, kind: SnackbarKind.ERROR }));\n    }\n}\n\nexport const loadPublicFavoritesProject = (params: typeof treePickerSearchSagas._Record.LOAD_PUBLIC_FAVORITES_PROJECT) => (treePickerSearchSagas.LOAD_PUBLIC_FAVORITES_PROJECT(params));\n\nexport function* loadPublicFavoritesProjectWatcher() {\n    yield takeEvery(treePickerSearchSagas.tags.LOAD_PUBLIC_FAVORITES_PROJECT, loadPublicFavoritesProjectSaga);\n}\n\nfunction* loadPublicFavoritesProjectSaga({type, payload}: {\n    type: typeof treePickerSearchSagas.tags.LOAD_PUBLIC_FAVORITES_PROJECT,\n    payload: typeof treePickerSearchSagas._Record.LOAD_PUBLIC_FAVORITES_PROJECT,\n}) {\n    try {\n        const services: ServiceRepository = yield getContext(\"services\");\n        const state: RootState = yield select();\n\n        const { pickerId, includeCollections = false, includeDirectories = false, includeFiles = false, options } = payload;\n        const uuidPrefix = state.auth.config.uuidPrefix;\n        const publicProjectUuid = `${uuidPrefix}-j7d0g-publicfavorites`;\n\n        // TODO:\n        // favorites and public favorites ought to use a single method\n        // after getting back a list of links, need to look and stash the resources\n\n        const filters = pipe(\n            (fb: FilterBuilder) => includeCollections\n                ? fb.addIsA('head_uuid', [ResourceKind.PROJECT, ResourceKind.COLLECTION])\n                : fb.addIsA('head_uuid', [ResourceKind.PROJECT]),\n            fb => fb\n                .addEqual('link_class', LinkClass.STAR)\n                .addEqual('owner_uuid', publicProjectUuid)\n                .getFilters(),\n        )(new FilterBuilder());\n\n        const { items } = yield call(\n            {context: services.linkService, fn: services.linkService.list},\n            { filters },\n        );\n\n        yield put(receiveTreePickerData<LinkResource>({\n            id: 'Public Favorites',\n            pickerId,\n            data: items.filter(item => {\n                if (options && options.showOnlyWritable && item.hasOwnProperty('frozenByUuid') && (item as any).frozenByUuid) {\n                    return false;\n                }\n\n                return true;\n            }),\n            extractNodeData: item => ({\n                id: item.headUuid,\n                value: item,\n                status: item.headKind === ResourceKind.PROJECT\n                    ? TreeNodeStatus.INITIAL\n                    : includeDirectories || includeFiles\n                        ? TreeNodeStatus.INITIAL\n                        : TreeNodeStatus.LOADED\n            }),\n        }));\n    } catch (e) {\n        yield put(snackbarActions.OPEN_SNACKBAR({ message: `Failed to load public favorites`, kind: SnackbarKind.ERROR }));\n    }\n}\n\nexport const receiveTreePickerProjectsData = (id: string, projects: ProjectResource[], pickerId: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({\n            id,\n            nodes: projects.map(project => initTreeNode({ id: project.uuid, value: project })),\n            pickerId,\n        }));\n\n        dispatch(treePickerActions.EXPAND_TREE_PICKER_NODE({ id, pickerId }));\n    };\n\nexport const loadProjectTreePickerProjects = (id: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(treePickerActions.LOAD_TREE_PICKER_NODE({ id, pickerId: TreePickerId.PROJECTS }));\n\n\n        const ownerUuid = id.length === 0 ? getUserUuid(getState()) || '' : id;\n        const { items } = await services.projectService.list(buildParams(ownerUuid));\n\n        dispatch<any>(receiveTreePickerProjectsData(id, items, TreePickerId.PROJECTS));\n    };\n\nexport const loadFavoriteTreePickerProjects = (id: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const parentId = getUserUuid(getState()) || '';\n\n        if (id === '') {\n            dispatch(treePickerActions.LOAD_TREE_PICKER_NODE({ id: parentId, pickerId: TreePickerId.FAVORITES }));\n            const { items } = await services.favoriteService.list(parentId);\n            dispatch<any>(receiveTreePickerProjectsData(parentId, items as ProjectResource[], TreePickerId.FAVORITES));\n        } else {\n            dispatch(treePickerActions.LOAD_TREE_PICKER_NODE({ id, pickerId: TreePickerId.FAVORITES }));\n            const { items } = await services.projectService.list(buildParams(id));\n            dispatch<any>(receiveTreePickerProjectsData(id, items, TreePickerId.FAVORITES));\n        }\n\n    };\n\nexport const loadPublicFavoriteTreePickerProjects = (id: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const parentId = getUserUuid(getState()) || '';\n\n        if (id === '') {\n            dispatch(treePickerActions.LOAD_TREE_PICKER_NODE({ id: parentId, pickerId: TreePickerId.PUBLIC_FAVORITES }));\n            const { items } = await services.favoriteService.list(parentId);\n            dispatch<any>(receiveTreePickerProjectsData(parentId, items as ProjectResource[], TreePickerId.PUBLIC_FAVORITES));\n        } else {\n            dispatch(treePickerActions.LOAD_TREE_PICKER_NODE({ id, pickerId: TreePickerId.PUBLIC_FAVORITES }));\n            const { items } = await services.projectService.list(buildParams(id));\n            dispatch<any>(receiveTreePickerProjectsData(id, items, TreePickerId.PUBLIC_FAVORITES));\n        }\n\n    };\n\nconst buildParams = (ownerUuid: string) => {\n    return {\n        filters: new FilterBuilder()\n            .addEqual('owner_uuid', ownerUuid)\n            .getFilters(),\n        order: new OrderBuilder<ProjectResource>()\n            .addAsc('name')\n            .getOrder()\n    };\n};\n\n/**\n * Given a tree picker item, return collection uuid and path\n *   if the item represents a valid target/destination location\n */\nexport type FileOperationLocation = {\n    name: string;\n    uuid: string;\n    pdh?: string;\n    subpath: string;\n}\n\nexport const isFileOperationLocation = (obj: any): obj is FileOperationLocation => {\n    return obj && typeof obj === 'object' &&\n        typeof obj.name === 'string' &&\n        typeof obj.uuid === 'string' && obj.uuid.length > 0 &&\n        typeof obj.subpath === 'string' && obj.subpath.length > 0;\n}\n\nexport const getFileOperationLocation = (item: ProjectsTreePickerItem) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository): Promise<FileOperationLocation | undefined> => {\n        if ('kind' in item && item.kind === ResourceKind.COLLECTION) {\n            return {\n                name: item.name,\n                uuid: item.uuid,\n                pdh: item.portableDataHash,\n                subpath: '/',\n            };\n        } else if ('type' in item && item.type === CollectionFileType.DIRECTORY) {\n            const uuid = getCollectionResourceCollectionUuid(item.id);\n            if (uuid) {\n                const collection = getResource<CollectionResource>(uuid)(getState().resources);\n                if (collection) {\n                    const itemPath = [item.path, item.name].join('/');\n\n                    return {\n                        name: item.name,\n                        uuid,\n                        pdh: collection.portableDataHash,\n                        subpath: itemPath,\n                    };\n                }\n            }\n        }\n        return undefined;\n    };\n\n/**\n * Create an expanded tree picker subtree from array of nested projects/collection\n *   First item is assumed to be root and gets empty parent id\n *   Nodes must be sorted from top down to prevent orphaned nodes\n */\nexport const createInitialPickerTree = (sortedAncestors: Array<GroupResource | CollectionResource>, tailUuid: string, initialTree: Tree<GroupResource | CollectionResource>) => {\n    return sortedAncestors\n        .reduce((tree, item, index) => {\n            if (getNode(item.uuid)(tree)) {\n                return tree;\n            } else {\n                return setNode({\n                    children: [],\n                    id: item.uuid,\n                    parent: index === 0 ? '' : item.ownerUuid,\n                    value: item,\n                    active: false,\n                    selected: false,\n                    expanded: false,\n                    status: item.uuid !== tailUuid ? TreeNodeStatus.LOADED : TreeNodeStatus.INITIAL,\n                })(tree);\n            }\n        }, initialTree);\n};\n\nexport const fileOperationLocationToPickerId = (location: FileOperationLocation): string => {\n    let id = location.uuid;\n    if (location.subpath.length && location.subpath !== '/') {\n        id = id + location.subpath;\n    }\n    return id;\n}\n"
  },
  {
    "path": "services/workbench2/src/store/tree-picker/tree-picker-middleware.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { LinkResource } from \"models/link\";\nimport { UserResource } from \"models/user\";\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { CollectionDirectory, CollectionFile } from 'models/collection-file';\n\nexport interface ProjectsTreePickerRootItem {\n    id: string;\n    name: string;\n}\n\nexport type ProjectsTreePickerItem = ProjectsTreePickerRootItem | GroupContentsResource | CollectionDirectory | CollectionFile | LinkResource | UserResource;\n"
  },
  {
    "path": "services/workbench2/src/store/tree-picker/tree-picker-reducer.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { createTree, getNodeChildrenIds, getNode, TreeNodeStatus } from 'models/tree';\nimport { pipe } from 'lodash/fp';\nimport { treePickerReducer } from \"./tree-picker-reducer\";\nimport { treePickerActions } from \"./tree-picker-actions\";\nimport { initTreeNode } from 'models/tree';\n\ndescribe('TreePickerReducer', () => {\n    it('LOAD_TREE_PICKER_NODE - initial state', () => {\n        const tree = createTree();\n        const newState = treePickerReducer({}, treePickerActions.LOAD_TREE_PICKER_NODE({ id: '1', pickerId: \"projects\" }));\n        expect(newState).to.deep.equal({ 'projects': tree });\n    });\n\n    it('LOAD_TREE_PICKER_NODE', () => {\n        const node = initTreeNode({ id: '1', value: '1' });\n        const newState = pipe(\n            (state) => treePickerReducer(state, treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({ id: '', nodes: [node], pickerId: \"projects\" })),\n            state => treePickerReducer(state, treePickerActions.LOAD_TREE_PICKER_NODE({ id: '1', pickerId: \"projects\" }))\n        )({ projects: createTree() });\n\n        expect(getNode('1')(newState.projects)).to.deep.equal({\n            ...initTreeNode({ id: '1', value: '1' }),\n            status: TreeNodeStatus.PENDING\n        });\n    });\n\n    it('LOAD_TREE_PICKER_NODE_SUCCESS - initial state', () => {\n        const subNode = initTreeNode({ id: '1.1', value: '1.1' });\n        const newState = treePickerReducer({}, treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({ id: '', nodes: [subNode], pickerId: \"projects\" }));\n        expect(getNodeChildrenIds('')(newState.projects)).to.deep.equal(['1.1']);\n    });\n\n    it('LOAD_TREE_PICKER_NODE_SUCCESS', () => {\n        const node = initTreeNode({ id: '1', value: '1' });\n        const subNode = initTreeNode({ id: '1.1', value: '1.1' });\n        const newState = pipe(\n            (state) => treePickerReducer(state, treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({ id: '', nodes: [node], pickerId: \"projects\" })),\n            state => treePickerReducer(state, treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({ id: '1', nodes: [subNode], pickerId: \"projects\" }))\n        )({ projects: createTree() });\n        expect(getNodeChildrenIds('1')(newState.projects)).to.deep.equal(['1.1']);\n        expect(getNode('1')(newState.projects)).to.deep.equal({\n            ...initTreeNode({ id: '1', value: '1' }),\n            children: ['1.1'],\n            status: TreeNodeStatus.LOADED\n        });\n    });\n\n    it('TOGGLE_TREE_PICKER_NODE_COLLAPSE - expanded', () => {\n        const node = initTreeNode({ id: '1', value: '1' });\n        const newState = pipe(\n            (state) => treePickerReducer(state, treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({ id: '', nodes: [node], pickerId: \"projects\" })),\n            state => treePickerReducer(state, treePickerActions.TOGGLE_TREE_PICKER_NODE_COLLAPSE({ id: '1', pickerId: \"projects\" }))\n        )({ projects: createTree() });\n        expect(getNode('1')(newState.projects)).to.deep.equal({\n            ...initTreeNode({ id: '1', value: '1' }),\n            expanded: true\n        });\n    });\n\n    it('TOGGLE_TREE_PICKER_NODE_COLLAPSE - expanded', () => {\n        const node = initTreeNode({ id: '1', value: '1' });\n        const newState = pipe(\n            (state) => treePickerReducer(state, treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({ id: '', nodes: [node], pickerId: \"projects\" })),\n            state => treePickerReducer(state, treePickerActions.TOGGLE_TREE_PICKER_NODE_COLLAPSE({ id: '1', pickerId: \"projects\" })),\n            state => treePickerReducer(state, treePickerActions.TOGGLE_TREE_PICKER_NODE_COLLAPSE({ id: '1', pickerId: \"projects\" })),\n        )({ projects: createTree() });\n        expect(getNode('1')(newState.projects)).to.deep.equal({\n            ...initTreeNode({ id: '1', value: '1' }),\n            expanded: false\n        });\n    });\n\n    it('ACTIVATE_TREE_PICKER_NODE', () => {\n        const node = initTreeNode({ id: '1', value: '1' });\n        const newState = pipe(\n            (state) => treePickerReducer(state, treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({ id: '', nodes: [node], pickerId: \"projects\" })),\n            state => treePickerReducer(state, treePickerActions.ACTIVATE_TREE_PICKER_NODE({ id: '1', pickerId: \"projects\" })),\n        )({ projects: createTree() });\n        expect(getNode('1')(newState.projects)).to.deep.equal({\n            ...initTreeNode({ id: '1', value: '1' }),\n            active: true\n        });\n    });\n\n    it('TOGGLE_TREE_PICKER_NODE_SELECTION', () => {\n        const node = initTreeNode({ id: '1', value: '1' });\n        const subNode = initTreeNode({ id: '1.1', value: '1.1' });\n        const newState = pipe(\n            (state) => treePickerReducer(state, treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({ id: '', nodes: [node], pickerId: \"projects\" })),\n            state => treePickerReducer(state, treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({ id: '1', nodes: [subNode], pickerId: \"projects\" })),\n            state => treePickerReducer(state, treePickerActions.TOGGLE_TREE_PICKER_NODE_SELECTION({ id: '1.1', pickerId: \"projects\", cascade: true })),\n        )({ projects: createTree() });\n        expect(getNode('1')(newState.projects)).to.deep.equal({\n            ...initTreeNode({ id: '1', value: '1' }),\n            selected: true,\n            children: ['1.1'],\n            status: TreeNodeStatus.LOADED,\n        });\n    });\n\n    it('does not set malformed node', () => {\n        const node = initTreeNode({ id: '1', value: '1' });\n        const malformedNode = initTreeNode({ id: '', value: NaN });\n        const newState = pipe(\n            (state) => treePickerReducer(state, treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({ id: node.id, nodes: [node], pickerId: \"projects\" })),\n            state => treePickerReducer(state, treePickerActions.LOAD_TREE_PICKER_NODE_SUCCESS({ id: malformedNode.id, nodes: [malformedNode], pickerId: \"projects\" })),\n        )({ projects: createTree() });\n        expect(getNode(node.id)(newState.projects)).to.deep.equal({\n            active: false,\n            children: [ \"1\" ],\n            expanded: false,\n            id: \"1\",\n            parent: \"1\",\n            selected: false,\n            status: \"INITIAL\",\n            value: \"1\"\n        });\n        expect(getNode(malformedNode.id)(newState.projects)).to.equal(undefined);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/store/tree-picker/tree-picker-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    createTree, TreeNode, setNode, Tree, TreeNodeStatus, setNodeStatus,\n    expandNode, deactivateNode, selectNodes, deselectNodes,\n    activateNode, getNode, toggleNodeCollapse, toggleNodeSelection, appendSubtree, expandNodeAncestors\n} from 'models/tree';\nimport { TreePicker } from \"./tree-picker\";\nimport { treePickerActions, treePickerSearchActions, TreePickerAction, TreePickerSearchAction, LoadProjectParams } from \"./tree-picker-actions\";\nimport { compose } from \"redux\";\nimport { pipe } from 'lodash/fp';\n\nexport const treePickerReducer = (state: TreePicker = {}, action: TreePickerAction) =>\n    treePickerActions.match(action, {\n        LOAD_TREE_PICKER_NODE: ({ id, pickerId }) =>\n            updateOrCreatePicker(state, pickerId, setNodeStatus(id)(TreeNodeStatus.PENDING)),\n\n        LOAD_TREE_PICKER_NODE_SUCCESS: ({ id, nodes, pickerId }) =>\n            updateOrCreatePicker(state, pickerId, compose(receiveNodes(nodes)(id), setNodeStatus(id)(TreeNodeStatus.LOADED))),\n\n        APPEND_TREE_PICKER_NODE_SUBTREE: ({ id, subtree, pickerId }) =>\n            updateOrCreatePicker(state, pickerId, compose(appendSubtree(id, subtree), setNodeStatus(id)(TreeNodeStatus.LOADED))),\n\n        TOGGLE_TREE_PICKER_NODE_COLLAPSE: ({ id, pickerId }) =>\n            updateOrCreatePicker(state, pickerId, toggleNodeCollapse(id)),\n\n        EXPAND_TREE_PICKER_NODE: ({ id, pickerId }) =>\n            updateOrCreatePicker(state, pickerId, expandNode(id)),\n\n        EXPAND_TREE_PICKER_NODE_ANCESTORS: ({ id, pickerId }) =>\n            updateOrCreatePicker(state, pickerId, expandNodeAncestors(id)),\n\n        ACTIVATE_TREE_PICKER_NODE: ({ id, pickerId, relatedTreePickers = [] }) =>\n            pipe(\n                () => relatedTreePickers.reduce(\n                    (state, relatedPickerId) => updateOrCreatePicker(state, relatedPickerId, deactivateNode),\n                    state\n                ),\n                state => updateOrCreatePicker(state, pickerId, activateNode(id))\n            )(),\n\n        DEACTIVATE_TREE_PICKER_NODE: ({ pickerId }) =>\n            updateOrCreatePicker(state, pickerId, deactivateNode),\n\n        TOGGLE_TREE_PICKER_NODE_SELECTION: ({ id, pickerId, cascade }) =>\n            updateOrCreatePicker(state, pickerId, toggleNodeSelection(id, cascade)),\n\n        SELECT_TREE_PICKER_NODE: ({ id, pickerId, cascade }) =>\n            updateOrCreatePicker(state, pickerId, selectNodes(id, cascade)),\n\n        DESELECT_TREE_PICKER_NODE: ({ id, pickerId, cascade }) =>\n            updateOrCreatePicker(state, pickerId, deselectNodes(id, cascade)),\n\n        RESET_TREE_PICKER: ({ pickerId }) =>\n            updateOrCreatePicker(state, pickerId, createTree),\n\n        EXPAND_TREE_PICKER_NODES: ({ pickerId, ids }) =>\n            updateOrCreatePicker(state, pickerId, expandNode(...ids)),\n\n        default: () => state\n    });\n\nconst updateOrCreatePicker = <V>(state: TreePicker, pickerId: string, func: (value: Tree<V>) => Tree<V>) => {\n    const picker = state[pickerId] || createTree();\n    const updatedPicker = func(picker);\n    return { ...state, [pickerId]: updatedPicker };\n};\n\nconst receiveNodes = <V>(nodes: Array<TreeNode<V>>) => (parent: string) => (state: Tree<V>) => {\n    const parentNode = getNode(parent)(state);\n    let newState = state;\n    if (parentNode) {\n        newState = setNode({ ...parentNode, children: [] })(state);\n    }\n    return nodes.reduce((tree, node) => {\n        if (!node.id) return tree;\n        const preexistingNode = getNode(node.id)(state);\n        if (preexistingNode) {\n            node = { ...preexistingNode, value: node.value };\n        }\n        return setNode({ ...node, parent })(tree);\n    }, newState);\n};\n\ninterface TreePickerSearch {\n    projectSearchValues: { [pickerId: string]: string };\n    collectionFilterValues: { [pickerId: string]: string };\n    loadProjectParams: { [pickerId: string]: LoadProjectParams };\n}\n\nexport const treePickerSearchReducer = (state: TreePickerSearch = { projectSearchValues: {}, collectionFilterValues: {}, loadProjectParams: {} }, action: TreePickerSearchAction) =>\n    treePickerSearchActions.match(action, {\n        SET_TREE_PICKER_PROJECT_SEARCH: ({ pickerId, projectSearchValue }) => ({\n            ...state, projectSearchValues: { ...state.projectSearchValues, [pickerId]: projectSearchValue }\n        }),\n\n        SET_TREE_PICKER_COLLECTION_FILTER: ({ pickerId, collectionFilterValue }) => ({\n            ...state, collectionFilterValues: { ...state.collectionFilterValues, [pickerId]: collectionFilterValue }\n        }),\n\n        SET_TREE_PICKER_LOAD_PARAMS: ({ pickerId, params }) => ({\n            ...state, loadProjectParams: { ...state.loadProjectParams, [pickerId]: params }\n        }),\n\n        default: () => state\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/tree-picker/tree-picker.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Tree } from \"models/tree\";\nimport { TreeNodeStatus } from 'models/tree';\n\nexport type TreePicker = { [key: string]: Tree<any> };\n\nexport const getTreePicker = <Value = {}>(id: string) => (state: TreePicker): Tree<Value> | undefined => state[id];\n\nexport const createTreePickerNode = (data: { nodeId: string, value: any }) => ({\n    ...data,\n    selected: false,\n    collapsed: true,\n    status: TreeNodeStatus.INITIAL\n});\n\n"
  },
  {
    "path": "services/workbench2/src/store/user-preferences/user-preferences-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\nimport { RootState } from \"store/store\";\nimport { Dispatch } from 'redux';\nimport { initialize, reset } from \"redux-form\";\nimport { ServiceRepository } from \"services/services\";\nimport { showErrorSnackbar, showSuccessSnackbar } from \"store/snackbar/snackbar-actions\";\nimport { updateResources } from \"store/resources/resources-actions\";\nimport { UserResource } from \"models/user\";\nimport { authActions } from \"store/auth/auth-action\";\n\nexport const USER_PREFERENCES_FORM = 'userPreferencesForm';\n\nconst GENERIC_LOAD_ERROR = \"Could not load user profile\";\nconst SAVE_ERROR = \"Could not save preferences\";\n\nexport const loadUserPreferencesPanel = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const uuid = getState().auth.user?.uuid;\n        if (uuid) {\n            try {\n                const user = await services.userService.get(uuid, false);\n                dispatch(initialize(USER_PREFERENCES_FORM, user));\n                dispatch(updateResources([user]));\n            } catch (e) {\n                dispatch(reset(USER_PREFERENCES_FORM));\n                dispatch(showErrorSnackbar(GENERIC_LOAD_ERROR));\n            }\n        } else {\n            dispatch(showErrorSnackbar(GENERIC_LOAD_ERROR));\n        }\n    }\n\nexport const saveUserPreferences = (user: UserResource) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        if (user.uuid) {\n            try {\n                const updatedUser = await services.userService.update(user.uuid, user);\n                dispatch(updateResources([updatedUser]));\n                // If edited user is current user, update auth store\n                const currentUserUuid = getState().auth.user?.uuid;\n                if (currentUserUuid && currentUserUuid === updatedUser.uuid) {\n                    dispatch(authActions.USER_DETAILS_SUCCESS(updatedUser));\n                }\n                dispatch(initialize(USER_PREFERENCES_FORM, updatedUser));\n                dispatch(showSuccessSnackbar(\"Preferences saved\"));\n            } catch (e) {\n                dispatch(showErrorSnackbar(SAVE_ERROR));\n            }\n        } else {\n            dispatch(showErrorSnackbar(GENERIC_LOAD_ERROR));\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/user-profile/user-profile-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\nimport { RootState } from \"store/store\";\nimport { Dispatch } from 'redux';\nimport { initialize, reset } from \"redux-form\";\nimport { ServiceRepository } from \"services/services\";\nimport { bindDataExplorerActions } from \"store/data-explorer/data-explorer-action\";\nimport { propertiesActions } from 'store/properties/properties-actions';\nimport { getProperty } from 'store/properties/properties';\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { deleteResources, updateResources } from \"store/resources/resources-actions\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { filterResources } from \"store/resources/resources\";\nimport { ResourceKind } from \"models/resource\";\nimport { LinkClass, LinkResource } from \"models/link\";\nimport { BuiltinGroups, getBuiltinGroupUuid } from \"models/group\";\nimport { authActions } from \"store/auth/auth-action\";\n\nexport const USER_PROFILE_PANEL_ID = 'userProfilePanel';\nexport const USER_PROFILE_FORM = 'userProfileForm';\nexport const DEACTIVATE_DIALOG = 'deactivateDialog';\nexport const SETUP_DIALOG = 'setupDialog';\nexport const ACTIVATE_DIALOG = 'activateDialog';\nexport const IS_PROFILE_INACCESSIBLE = 'isProfileInaccessible';\n\nexport const UserProfileGroupsActions = bindDataExplorerActions(USER_PROFILE_PANEL_ID);\n\nexport const getCurrentUserProfilePanelUuid = getProperty<string>(USER_PROFILE_PANEL_ID);\nexport const getUserProfileIsInaccessible = getProperty<boolean>(IS_PROFILE_INACCESSIBLE);\n\nexport const loadUserProfilePanel = (userUuid?: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        // Reset isInacessible to ensure error screen is hidden\n        dispatch(propertiesActions.SET_PROPERTY({ key: IS_PROFILE_INACCESSIBLE, value: false }));\n        // Get user uuid from route or use current user uuid\n        const uuid = userUuid || getState().auth.user?.uuid;\n        if (uuid) {\n            await dispatch(propertiesActions.SET_PROPERTY({ key: USER_PROFILE_PANEL_ID, value: uuid }));\n            try {\n                const user = await services.userService.get(uuid, false, [\"uuid\", \"first_name\", \"last_name\", \"email\", \"username\", \"prefs\", \"is_admin\", \"is_active\"]);\n                dispatch(initialize(USER_PROFILE_FORM, user));\n                dispatch(updateResources([user]));\n                dispatch(UserProfileGroupsActions.REQUEST_ITEMS());\n            } catch (e) {\n                if (e.status === 404) {\n                    await dispatch(propertiesActions.SET_PROPERTY({ key: IS_PROFILE_INACCESSIBLE, value: true }));\n                    dispatch(reset(USER_PROFILE_FORM));\n                } else {\n                    dispatch(snackbarActions.OPEN_SNACKBAR({\n                        message: 'Could not load user profile',\n                        kind: SnackbarKind.ERROR\n                    }));\n                }\n            }\n        }\n    }\n\nexport const saveEditedUser = (resource: any) =>\n    async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            const updatedUser = await services.userService.update(resource.uuid, resource);\n            dispatch(updateResources([updatedUser]));\n            // If edited user is current user, update auth store\n            const currentUserUuid = getState().auth.user?.uuid;\n            if (currentUserUuid && currentUserUuid === updatedUser.uuid) {\n                dispatch(authActions.USER_DETAILS_SUCCESS(updatedUser));\n            }\n            dispatch(initialize(USER_PROFILE_FORM, updatedUser));\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Profile has been updated.\", hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        } catch (e) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: \"Could not update profile\",\n                kind: SnackbarKind.ERROR,\n            }));\n        }\n    };\n\nexport const openSetupDialog = (uuid: string) =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: SETUP_DIALOG,\n            data: {\n                title: 'Setup user',\n                text: 'Are you sure you want to setup this user?',\n                confirmButtonLabel: 'Confirm',\n                uuid\n            }\n        }));\n    };\n\nexport const openActivateDialog = (uuid: string) =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: ACTIVATE_DIALOG,\n            data: {\n                title: 'Activate user',\n                text: 'Are you sure you want to activate this user?',\n                confirmButtonLabel: 'Confirm',\n                uuid\n            }\n        }));\n    };\n\nexport const openDeactivateDialog = (uuid: string) =>\n    (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: DEACTIVATE_DIALOG,\n            data: {\n                title: 'Deactivate user',\n                text: 'Are you sure you want to deactivate this user?',\n                confirmButtonLabel: 'Confirm',\n                uuid\n            }\n        }));\n    };\n\nexport const setup = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            const resources = await services.userService.setup(uuid);\n            dispatch(updateResources(resources.items));\n\n            // Refresh data explorer\n            dispatch(UserProfileGroupsActions.REQUEST_ITEMS());\n\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"User has been setup\", hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        } catch (e) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: e.message, hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        } finally {\n            dispatch(dialogActions.CLOSE_DIALOG({ id: SETUP_DIALOG }));\n        }\n    };\n\nexport const activate = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            const user = await services.userService.activate(uuid);\n            dispatch(updateResources([user]));\n\n            // Refresh data explorer\n            dispatch(UserProfileGroupsActions.REQUEST_ITEMS());\n\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"User has been activated\", hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        } catch (e) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: e.message, hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        }\n    };\n\nexport const deactivate = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            const { resources, auth } = getState();\n            // Call unsetup\n            const user = await services.userService.unsetup(uuid);\n            dispatch(updateResources([user]));\n\n            // Find and remove all users membership\n            const allUsersGroupUuid = getBuiltinGroupUuid(auth.localCluster, BuiltinGroups.ALL);\n            const memberships = filterResources((resource: LinkResource) =>\n                resource.kind === ResourceKind.LINK &&\n                resource.linkClass === LinkClass.PERMISSION &&\n                resource.headUuid === allUsersGroupUuid &&\n                resource.tailUuid === uuid\n            )(resources);\n            // Remove all users membership locally\n            dispatch<any>(deleteResources(memberships.map(link => link.uuid)));\n\n            // Refresh data explorer\n            dispatch(UserProfileGroupsActions.REQUEST_ITEMS());\n\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: \"User has been deactivated.\",\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS\n            }));\n        } catch (e) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: \"Could not deactivate user\",\n                kind: SnackbarKind.ERROR,\n            }));\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/user-profile/user-profile-groups-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ServiceRepository } from 'services/services';\nimport { MiddlewareAPI, Dispatch } from 'redux';\nimport { DataExplorerMiddlewareService, listResultsToDataExplorerItemsMeta } from 'store/data-explorer/data-explorer-middleware-service';\nimport { RootState } from 'store/store';\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { getCurrentUserProfilePanelUuid, UserProfileGroupsActions } from 'store/user-profile/user-profile-actions';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { LinkClass } from 'models/link';\nimport { ResourceKind } from 'models/resource';\nimport { GroupClass } from 'models/group';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\n\nexport class UserProfileGroupsMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>) {\n        const state = api.getState();\n        const userUuid = getCurrentUserProfilePanelUuid(state.properties);\n        if (userUuid) {\n            try {\n                api.dispatch(progressIndicatorActions.START_WORKING(this.getId()));\n\n                // Get user\n                const user = await this.services.userService.get(userUuid);\n                api.dispatch(updateResources([user]));\n\n                // Get user's group memberships\n                const groupMembershipLinks = await this.services.permissionService.list({\n                    filters: new FilterBuilder()\n                        .addEqual('tail_uuid', userUuid)\n                        .addEqual('link_class', LinkClass.PERMISSION)\n                        .addEqual('head_kind', ResourceKind.GROUP)\n                        .getFilters()\n                });\n                // Update resources, includes \"project\" groups\n                api.dispatch(updateResources(groupMembershipLinks.items));\n\n                // Get user's groups details and filter to role groups\n                const groups = await this.services.groupsService.list({\n                    filters: new FilterBuilder()\n                        .addIn('uuid', groupMembershipLinks.items\n                            .map(item => item.headUuid))\n                        .addEqual('group_class', GroupClass.ROLE)\n                        .getFilters(),\n                    count: \"none\"\n                });\n                api.dispatch(updateResources(groups.items));\n\n                // Get permission links for only role groups\n                const roleGroupMembershipLinks = await this.services.permissionService.list({\n                    filters: new FilterBuilder()\n                        .addIn('head_uuid', groups.items.map(item => item.uuid))\n                        .addEqual('tail_uuid', userUuid)\n                        .addEqual('link_class', LinkClass.PERMISSION)\n                        .addEqual('head_kind', ResourceKind.GROUP)\n                        .getFilters()\n                });\n\n                api.dispatch(UserProfileGroupsActions.SET_ITEMS({\n                    ...listResultsToDataExplorerItemsMeta(roleGroupMembershipLinks),\n                    items: roleGroupMembershipLinks.items.map(item => item.uuid),\n                }));\n            } catch {\n                api.dispatch(couldNotFetchGroups());\n            } finally {\n                api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n            }\n        }\n    }\n\n    // Groups are filtered from a list request\n    // and cannot currently support separate count requests\n    async requestCount() {}\n}\n\nconst couldNotFetchGroups = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch groups.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/users/user-panel-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ServiceRepository } from 'services/services';\nimport { MiddlewareAPI, Dispatch } from 'redux';\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, listResultsToDataExplorerItemsMeta } from 'store/data-explorer/data-explorer-middleware-service';\nimport { RootState } from 'store/store';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { SortDirection } from 'components/data-table/data-column';\nimport { OrderDirection, OrderBuilder } from 'services/api/order-builder';\nimport { ListArguments, ListResults } from 'services/common-service/common-service';\nimport { userBindedActions } from 'store/users/users-actions';\nimport { getSortColumn } from \"store/data-explorer/data-explorer-reducer\";\nimport { UserResource } from 'models/user';\nimport { UserPanelColumnNames } from 'views/user-panel/user-panel-columns';\nimport { BuiltinGroups, getBuiltinGroupUuid } from 'models/group';\nimport { LinkClass } from 'models/link';\nimport { progressIndicatorActions } from \"store/progress-indicator/progress-indicator-actions\";\nimport { couldNotFetchItemsAvailable } from 'store/data-explorer/data-explorer-action';\n\nexport class UserMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        try {\n            if (!background) { api.dispatch(progressIndicatorActions.START_WORKING(this.getId())); }\n            const users = await this.services.userService.list(getParams(dataExplorer));\n            api.dispatch(updateResources(users.items));\n            api.dispatch(setItems(users));\n\n            // Get \"all users\" group memberships for account status\n            const allUsersGroupUuid = getBuiltinGroupUuid(state.auth.localCluster, BuiltinGroups.ALL);\n            const allUserMemberships = await this.services.permissionService.list({\n                filters: new FilterBuilder()\n                    .addEqual('head_uuid', allUsersGroupUuid)\n                    .addEqual('link_class', LinkClass.PERMISSION)\n                    .getFilters()\n            });\n            api.dispatch(updateResources(allUserMemberships.items));\n        } catch {\n            api.dispatch(couldNotFetchUsers());\n        } finally {\n            api.dispatch(progressIndicatorActions.STOP_WORKING(this.getId()));\n        }\n    }\n\n    async requestCount(api: MiddlewareAPI<Dispatch, RootState>, criteriaChanged?: boolean, background?: boolean) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n\n        if (criteriaChanged) {\n            // Get itemsAvailable\n            return this.services.userService.list(getCountParams(dataExplorer))\n                .then((results: ListResults<UserResource>) => {\n                    if (results.itemsAvailable !== undefined) {\n                        api.dispatch<any>(userBindedActions.SET_ITEMS_AVAILABLE(results.itemsAvailable));\n                    } else {\n                        couldNotFetchItemsAvailable();\n                    }\n                });\n        }\n    }\n}\n\nconst getFilters = (dataExplorer: DataExplorer) => (\n    new FilterBuilder()\n        .addFullTextSearch(dataExplorer.searchValue)\n        .getFilters()\n);\n\nconst getParams = (dataExplorer: DataExplorer): ListArguments => ({\n    ...dataExplorerToListParams(dataExplorer),\n    order: getOrder(dataExplorer),\n    filters: getFilters(dataExplorer),\n    count: 'none',\n});\n\nconst getCountParams = (dataExplorer: DataExplorer): ListArguments => ({\n    limit: 0,\n    count: 'exact',\n    filters: getFilters(dataExplorer),\n});\n\nconst getOrder = (dataExplorer: DataExplorer) => {\n    const sortColumn = getSortColumn<UserResource>(dataExplorer);\n    const order = new OrderBuilder<UserResource>();\n    if (sortColumn && sortColumn.sort) {\n        const sortDirection = sortColumn.sort.direction === SortDirection.ASC\n            ? OrderDirection.ASC\n            : OrderDirection.DESC;\n\n        if (sortColumn.name === UserPanelColumnNames.NAME) {\n            order.addOrder(sortDirection, \"firstName\")\n                .addOrder(sortDirection, \"lastName\");\n        } else {\n            order.addOrder(sortDirection, sortColumn.sort.field);\n        }\n\n        // Use createdAt as a secondary sort column so we break ties consistently.\n        order.addOrder(OrderDirection.DESC, \"createdAt\");\n    }\n    return order.getOrder();\n};\n\nexport const setItems = (listResults: ListResults<UserResource>) =>\n    userBindedActions.SET_ITEMS({\n        ...listResultsToDataExplorerItemsMeta(listResults),\n        items: listResults.items.map(resource => resource.uuid),\n    });\n\nconst couldNotFetchUsers = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch users.',\n        kind: SnackbarKind.ERROR\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/users/users-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { bindDataExplorerActions } from 'store/data-explorer/data-explorer-action';\nimport { RootState } from 'store/store';\nimport { getUserUuid } from \"common/getuser\";\nimport { ServiceRepository } from \"services/services\";\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { startSubmit, reset, stopSubmit } from \"redux-form\";\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { UserResource } from \"models/user\";\nimport { filterResources, getResource } from 'store/resources/resources';\nimport { navigateTo, navigateToUsers, navigateToRootProject } from \"store/navigation/navigation-action\";\nimport { authActions } from 'store/auth/auth-action';\nimport { getTokenV2 } from \"models/api-client-authorization\";\nimport { VIRTUAL_MACHINE_ADD_LOGIN_GROUPS_FIELD, VIRTUAL_MACHINE_ADD_LOGIN_VM_FIELD } from \"store/virtual-machines/virtual-machines-actions\";\nimport { PermissionLevel } from \"models/permission\";\nimport { updateResources } from \"store/resources/resources-actions\";\nimport { BuiltinGroups, getBuiltinGroupUuid } from \"models/group\";\nimport { LinkClass, LinkResource } from \"models/link\";\nimport { ResourceKind } from \"models/resource\";\n\nexport const USERS_PANEL_ID = 'usersPanel';\nexport const USER_ATTRIBUTES_DIALOG = 'userAttributesDialog';\nexport const USER_CREATE_FORM_NAME = 'userCreateFormName';\n\nexport interface UserCreateFormDialogData {\n    email: string;\n    [VIRTUAL_MACHINE_ADD_LOGIN_VM_FIELD]: string;\n    [VIRTUAL_MACHINE_ADD_LOGIN_GROUPS_FIELD]: string[];\n}\n\nexport const userBindedActions = bindDataExplorerActions(USERS_PANEL_ID);\n\nexport const openUserAttributes = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const { resources } = getState();\n        const data = getResource<UserResource>(uuid)(resources);\n        dispatch(dialogActions.OPEN_DIALOG({ id: USER_ATTRIBUTES_DIALOG, data }));\n    };\n\nexport const loginAs = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        if (userUuid === uuid) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: 'You are already logged in as this user',\n                kind: SnackbarKind.WARNING\n            }));\n        } else {\n            try {\n                const { resources } = getState();\n                const data = getResource<UserResource>(uuid)(resources);\n                const client = await services.apiClientAuthorizationService.create({ ownerUuid: uuid }, false);\n                if (data) {\n                    dispatch<any>(authActions.INIT_USER({ user: data, token: getTokenV2(client) }));\n                    window.location.reload();\n                    dispatch<any>(navigateToRootProject);\n                }\n            } catch (e) {\n                if (e.status === 403) {\n                    dispatch(snackbarActions.OPEN_SNACKBAR({\n                        message: 'You do not have permission to login as this user',\n                        kind: SnackbarKind.WARNING\n                    }));\n                } else {\n                    dispatch(snackbarActions.OPEN_SNACKBAR({\n                        message: 'Failed to login as this user',\n                        kind: SnackbarKind.ERROR\n                    }));\n                }\n            }\n        }\n    };\n\nexport const openUserCreateDialog = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        if (!userUuid) { return; }\n        const user = await services.userService.get(userUuid!);\n        const virtualMachines = await services.virtualMachineService.list();\n        dispatch(reset(USER_CREATE_FORM_NAME));\n        dispatch(dialogActions.OPEN_DIALOG({ id: USER_CREATE_FORM_NAME, data: { user, ...virtualMachines } }));\n    };\n\nexport const openUserProjects = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch<any>(navigateTo(uuid));\n    };\n\nexport const createUser = (data: UserCreateFormDialogData) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(startSubmit(USER_CREATE_FORM_NAME));\n        try {\n            const newUser = await services.userService.create({\n                email: data.email,\n            });\n            dispatch(updateResources([newUser]));\n\n            if (data[VIRTUAL_MACHINE_ADD_LOGIN_VM_FIELD]) {\n                const permission = await services.permissionService.create({\n                    headUuid: data[VIRTUAL_MACHINE_ADD_LOGIN_VM_FIELD],\n                    tailUuid: newUser.uuid,\n                    name: PermissionLevel.CAN_LOGIN,\n                    properties: {\n                        username: newUser.username,\n                        groups: data.groups,\n                    }\n                });\n                dispatch(updateResources([permission]));\n            }\n\n            dispatch(dialogActions.CLOSE_DIALOG({ id: USER_CREATE_FORM_NAME }));\n            dispatch(reset(USER_CREATE_FORM_NAME));\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"User has been successfully created.\", hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n            dispatch<any>(loadUsersPanel());\n            dispatch(userBindedActions.REQUEST_ITEMS());\n            return newUser;\n        } catch (e) {\n            return;\n        } finally {\n            dispatch(stopSubmit(USER_CREATE_FORM_NAME));\n        }\n    };\n\nexport const openUserPanel = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const user = getState().auth.user;\n        if (user && user.isAdmin) {\n            dispatch<any>(navigateToUsers);\n        } else {\n            dispatch<any>(navigateToRootProject);\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"You don't have permissions to view this page\", hideDuration: 2000 }));\n        }\n    };\n\nexport const loadUsersPanel = () =>\n    (dispatch: Dispatch) => {\n        dispatch(userBindedActions.RESET_EXPLORER_SEARCH_VALUE());\n        dispatch(userBindedActions.REQUEST_ITEMS());\n    };\n\nexport enum UserAccountStatus {\n        ACTIVE = 'Active',\n        INACTIVE = 'Inactive',\n        SETUP = 'Setup',\n        OTHER = 'Other',\n    }\n\nexport const getUserAccountStatus = (state: RootState, uuid: string) => {\n    const user = getResource<UserResource>(uuid)(state.resources);\n    // Get membership links for all users group\n    const allUsersGroupUuid = getBuiltinGroupUuid(state.auth.localCluster, BuiltinGroups.ALL);\n    const permissions = filterResources((resource: LinkResource) =>\n        resource.kind === ResourceKind.LINK &&\n        resource.linkClass === LinkClass.PERMISSION &&\n        resource.headUuid === allUsersGroupUuid &&\n        resource.tailUuid === uuid\n    )(state.resources);\n\n    return user && user.isActive\n        ? UserAccountStatus.ACTIVE\n        : permissions.length > 0\n            ? UserAccountStatus.SETUP\n            : UserAccountStatus.INACTIVE;\n}\n"
  },
  {
    "path": "services/workbench2/src/store/virtual-machines/virtual-machines-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from \"services/services\";\nimport { navigateToUserVirtualMachines, navigateToAdminVirtualMachines, navigateToRootProject } from \"store/navigation/navigation-action\";\nimport { bindDataExplorerActions } from 'store/data-explorer/data-explorer-action';\nimport { formatDateTime } from \"common/formatters\";\nimport { unionize, ofType, UnionOf } from \"common/unionize\";\nimport { VirtualMachineLogins } from 'models/virtual-machines';\nimport { FilterBuilder } from \"services/api/filter-builder\";\nimport { ListResults } from \"services/common-service/common-service\";\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { PermissionLevel } from \"models/permission\";\nimport { deleteResources, updateResources } from 'store/resources/resources-actions';\nimport { Participant } from \"views-components/sharing-dialog/participant-select\";\nimport { initialize, reset } from \"redux-form\";\nimport { getUserDisplayName, UserResource } from \"models/user\";\nimport { progressIndicatorActions } from 'store/progress-indicator/progress-indicator-actions';\n\nexport const virtualMachinesActions = unionize({\n    SET_REQUESTED_DATE: ofType<string>(),\n    SET_VIRTUAL_MACHINES: ofType<ListResults<any>>(),\n    SET_LOGINS: ofType<VirtualMachineLogins>(),\n    SET_LINKS: ofType<ListResults<any>>()\n});\n\nexport type VirtualMachineActions = UnionOf<typeof virtualMachinesActions>;\n\nexport const VIRTUAL_MACHINES_PANEL = 'virtualMachinesPanel';\nexport const VIRTUAL_MACHINE_ATTRIBUTES_DIALOG = 'virtualMachineAttributesDialog';\nexport const VIRTUAL_MACHINE_REMOVE_DIALOG = 'virtualMachineRemoveDialog';\nexport const VIRTUAL_MACHINE_ADD_LOGIN_DIALOG = 'virtualMachineAddLoginDialog';\nexport const VIRTUAL_MACHINE_ADD_LOGIN_FORM = 'virtualMachineAddLoginForm';\nexport const VIRTUAL_MACHINE_REMOVE_LOGIN_DIALOG = 'virtualMachineRemoveLoginDialog';\n\nexport const VIRTUAL_MACHINE_UPDATE_LOGIN_UUID_FIELD = 'uuid';\nexport const VIRTUAL_MACHINE_ADD_LOGIN_VM_FIELD = 'vmUuid';\nexport const VIRTUAL_MACHINE_ADD_LOGIN_USER_FIELD = 'user';\nexport const VIRTUAL_MACHINE_ADD_LOGIN_GROUPS_FIELD = 'groups';\nexport const VIRTUAL_MACHINE_ADD_LOGIN_EXCLUDE = 'excludedPerticipants';\n\nexport const openUserVirtualMachines = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch<any>(navigateToUserVirtualMachines);\n    };\n\nexport const openAdminVirtualMachines = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const user = getState().auth.user;\n        if (user && user.isAdmin) {\n            dispatch<any>(navigateToAdminVirtualMachines);\n        } else {\n            dispatch<any>(navigateToRootProject);\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"You don't have permissions to view this page\", hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        }\n    };\n\nexport const openVirtualMachineAttributes = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const virtualMachineData = getState().virtualMachines.virtualMachines.items.find(it => it.uuid === uuid);\n        dispatch(dialogActions.OPEN_DIALOG({ id: VIRTUAL_MACHINE_ATTRIBUTES_DIALOG, data: { virtualMachineData } }));\n    };\n\nconst loadRequestedDate = () =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const date = services.virtualMachineService.getRequestedDate();\n        dispatch(virtualMachinesActions.SET_REQUESTED_DATE(date));\n    };\n\nexport const loadVirtualMachinesAdminData = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            dispatch(progressIndicatorActions.START_WORKING(\"virtual-machines-admin\"));\n            dispatch<any>(loadRequestedDate());\n\n            const virtualMachines = await services.virtualMachineService.list();\n            dispatch(updateResources(virtualMachines.items));\n            dispatch(virtualMachinesActions.SET_VIRTUAL_MACHINES(virtualMachines));\n\n\n            const logins = await services.permissionService.list({\n                filters: new FilterBuilder()\n                    .addIn('head_uuid', virtualMachines.items.map(item => item.uuid))\n                    .addEqual('name', PermissionLevel.CAN_LOGIN)\n                    .getFilters(),\n                limit: 1000\n            });\n            dispatch(updateResources(logins.items));\n            dispatch(virtualMachinesActions.SET_LINKS(logins));\n\n            const users = await services.userService.list({\n                filters: new FilterBuilder()\n                    .addIn('uuid', logins.items.map(item => item.tailUuid))\n                    .getFilters(),\n                count: \"none\", // Necessary for federated queries\n                limit: 1000\n            });\n            dispatch(updateResources(users.items));\n\n            const getAllLogins = await services.virtualMachineService.getAllLogins();\n            dispatch(virtualMachinesActions.SET_LOGINS(getAllLogins));\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(\"virtual-machines-admin\"));\n        }\n    };\n\nexport const loadVirtualMachinesUserData = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            dispatch(progressIndicatorActions.START_WORKING(\"virtual-machines-user\"));\n\n            dispatch<any>(loadRequestedDate());\n            const user = getState().auth.user;\n            const virtualMachines = await services.virtualMachineService.list();\n            const virtualMachinesUuids = virtualMachines.items.map(it => it.uuid);\n            const links = await services.linkService.list({\n                filters: new FilterBuilder()\n                    .addIn(\"head_uuid\", virtualMachinesUuids)\n                    .addEqual(\"tail_uuid\", user?.uuid)\n                    .getFilters()\n            });\n            dispatch(virtualMachinesActions.SET_VIRTUAL_MACHINES(virtualMachines));\n            dispatch(virtualMachinesActions.SET_LINKS(links));\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(\"virtual-machines-user\"));\n        }\n    };\n\nexport const openAddVirtualMachineLoginDialog = (vmUuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        // Get login permissions of vm\n        const virtualMachines = await services.virtualMachineService.list();\n        dispatch(updateResources(virtualMachines.items));\n        const logins = await services.permissionService.list({\n            filters: new FilterBuilder()\n                .addIn('head_uuid', virtualMachines.items.map(item => item.uuid))\n                .addEqual('name', PermissionLevel.CAN_LOGIN)\n                .getFilters()\n        });\n        dispatch(updateResources(logins.items));\n\n        dispatch(initialize(VIRTUAL_MACHINE_ADD_LOGIN_FORM, {\n            [VIRTUAL_MACHINE_ADD_LOGIN_VM_FIELD]: vmUuid,\n            [VIRTUAL_MACHINE_ADD_LOGIN_GROUPS_FIELD]: [],\n        }));\n        dispatch(dialogActions.OPEN_DIALOG({ id: VIRTUAL_MACHINE_ADD_LOGIN_DIALOG, data: { excludedParticipants: logins.items.map(it => it.tailUuid) } }));\n    }\n\nexport const openEditVirtualMachineLoginDialog = (permissionUuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const login = await services.permissionService.get(permissionUuid);\n        const user = await services.userService.get(login.tailUuid);\n        dispatch(initialize(VIRTUAL_MACHINE_ADD_LOGIN_FORM, {\n            [VIRTUAL_MACHINE_UPDATE_LOGIN_UUID_FIELD]: permissionUuid,\n            [VIRTUAL_MACHINE_ADD_LOGIN_USER_FIELD]: { name: getUserDisplayName(user, true, true), uuid: login.tailUuid },\n            [VIRTUAL_MACHINE_ADD_LOGIN_GROUPS_FIELD]: login.properties.groups,\n        }));\n        dispatch(dialogActions.OPEN_DIALOG({ id: VIRTUAL_MACHINE_ADD_LOGIN_DIALOG, data: { updating: true } }));\n    }\n\nexport interface AddLoginFormData {\n    [VIRTUAL_MACHINE_UPDATE_LOGIN_UUID_FIELD]: string;\n    [VIRTUAL_MACHINE_ADD_LOGIN_VM_FIELD]: string;\n    [VIRTUAL_MACHINE_ADD_LOGIN_USER_FIELD]: Participant;\n    [VIRTUAL_MACHINE_ADD_LOGIN_GROUPS_FIELD]: string[];\n}\n\n\nexport const addUpdateVirtualMachineLogin = ({ uuid, vmUuid, user, groups }: AddLoginFormData) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        let userResource: UserResource | undefined = undefined;\n        try {\n            // Get user\n            userResource = await services.userService.get(user.uuid, false);\n        } catch (e) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Failed to get user details.\", hideDuration: 2000, kind: SnackbarKind.ERROR }));\n            return;\n        }\n        try {\n            if (uuid) {\n                const permission = await services.permissionService.update(uuid, {\n                    tailUuid: userResource.uuid,\n                    name: PermissionLevel.CAN_LOGIN,\n                    properties: {\n                        username: userResource.username,\n                        groups,\n                    }\n                });\n                dispatch(updateResources([permission]));\n            } else {\n                const permission = await services.permissionService.create({\n                    headUuid: vmUuid,\n                    tailUuid: userResource.uuid,\n                    name: PermissionLevel.CAN_LOGIN,\n                    properties: {\n                        username: userResource.username,\n                        groups,\n                    }\n                });\n                dispatch(updateResources([permission]));\n            }\n\n            dispatch(reset(VIRTUAL_MACHINE_ADD_LOGIN_FORM));\n            dispatch(dialogActions.CLOSE_DIALOG({ id: VIRTUAL_MACHINE_ADD_LOGIN_DIALOG }));\n            dispatch<any>(loadVirtualMachinesAdminData());\n\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: `Permission updated`,\n                kind: SnackbarKind.SUCCESS\n            }));\n        } catch (e) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: e.message, hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        }\n    };\n\nexport const openRemoveVirtualMachineLoginDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: VIRTUAL_MACHINE_REMOVE_LOGIN_DIALOG,\n            data: {\n                title: 'Remove login permission',\n                text: 'Are you sure you want to remove this permission?',\n                confirmButtonLabel: 'Remove',\n                uuid\n            }\n        }));\n    };\n\nexport const removeVirtualMachineLogin = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        try {\n            await services.permissionService.delete(uuid);\n            dispatch<any>(deleteResources([uuid]));\n\n            dispatch<any>(loadVirtualMachinesAdminData());\n\n            dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: `Login permission removed`,\n                kind: SnackbarKind.SUCCESS\n            }));\n        } catch (e) {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: e.message, hideDuration: 2000, kind: SnackbarKind.ERROR }));\n        }\n    };\n\nexport const saveRequestedDate = () =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const date = formatDateTime((new Date()).toISOString());\n        services.virtualMachineService.saveRequestedDate(date);\n        dispatch<any>(loadRequestedDate());\n    };\n\nexport const openRemoveVirtualMachineDialog = (uuid: string) =>\n    (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(dialogActions.OPEN_DIALOG({\n            id: VIRTUAL_MACHINE_REMOVE_DIALOG,\n            data: {\n                title: 'Remove virtual machine',\n                text: 'Are you sure you want to remove this virtual machine?',\n                confirmButtonLabel: 'Remove',\n                uuid\n            }\n        }));\n    };\n\nexport const removeVirtualMachine = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removing ...', kind: SnackbarKind.INFO }));\n        await services.virtualMachineService.delete(uuid);\n        dispatch(snackbarActions.OPEN_SNACKBAR({ message: 'Removed.', hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        dispatch<any>(loadVirtualMachinesAdminData());\n    };\n\nconst virtualMachinesBindedActions = bindDataExplorerActions(VIRTUAL_MACHINES_PANEL);\n\nexport const loadVirtualMachinesPanel = () =>\n    (dispatch: Dispatch) => {\n        dispatch(virtualMachinesBindedActions.REQUEST_ITEMS());\n    };\n"
  },
  {
    "path": "services/workbench2/src/store/virtual-machines/virtual-machines-reducer.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { virtualMachinesActions, VirtualMachineActions } from 'store/virtual-machines/virtual-machines-actions';\nimport { ListResults } from 'services/common-service/common-service';\nimport { VirtualMachineLogins } from 'models/virtual-machines';\n\ninterface VirtualMachines {\n    date: string;\n    virtualMachines: ListResults<any>;\n    logins: VirtualMachineLogins;\n    links: ListResults<any>;\n}\n\nconst initialState: VirtualMachines = {\n    date: '',\n    virtualMachines: {\n        kind: '',\n        offset: 0,\n        limit: 0,\n        itemsAvailable: 0,\n        items: []\n    },\n    logins: {\n        kind: '',\n        items: []\n    },\n    links: {\n        kind: '',\n        offset: 0,\n        limit: 0,\n        itemsAvailable: 0,\n        items: []\n    }\n};\n\nexport const virtualMachinesReducer = (state = initialState, action: VirtualMachineActions): VirtualMachines =>\n    virtualMachinesActions.match(action, {\n        SET_REQUESTED_DATE: date => ({ ...state, date }),\n        SET_VIRTUAL_MACHINES: virtualMachines => ({ ...state, virtualMachines }),\n        SET_LOGINS: logins => ({ ...state, logins }),\n        SET_LINKS: links => ({ ...state, links }),\n        default: () => state\n    });\n"
  },
  {
    "path": "services/workbench2/src/store/vocabulary/vocabulary-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { ServiceRepository } from 'services/services';\nimport { propertiesActions } from 'store/properties/properties-actions';\nimport { VOCABULARY_PROPERTY_NAME, DEFAULT_VOCABULARY } from './vocabulary-selectors';\nimport { isVocabulary } from 'models/vocabulary';\n\nexport const loadVocabulary = async (dispatch: Dispatch, _: {}, { vocabularyService }: ServiceRepository) => {\n    const vocabulary = await vocabularyService.getVocabulary();\n    dispatch(propertiesActions.SET_PROPERTY({\n        key: VOCABULARY_PROPERTY_NAME,\n        value: isVocabulary(vocabulary)\n            ? vocabulary\n            : DEFAULT_VOCABULARY,\n    }));\n};\n"
  },
  {
    "path": "services/workbench2/src/store/vocabulary/vocabulary-selectors.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { PropertiesState, getProperty } from 'store/properties/properties';\nimport { Vocabulary } from 'models/vocabulary';\n\nexport const VOCABULARY_PROPERTY_NAME = 'vocabulary';\n\nexport const DEFAULT_VOCABULARY: Vocabulary = {\n    strict_tags: false,\n    tags: {},\n};\n\nexport const getVocabulary = (state: PropertiesState) =>\n    getProperty<Vocabulary>(VOCABULARY_PROPERTY_NAME)(state) || DEFAULT_VOCABULARY;\n"
  },
  {
    "path": "services/workbench2/src/store/workbench/workbench-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { getUserUuid } from \"common/getuser\";\nimport { loadDetailsPanel } from \"store/details-panel/details-panel-action\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\nimport { favoritePanelActions, loadFavoritePanel } from \"store/favorite-panel/favorite-panel-action\";\nimport { setIsProjectPanelTrashed } from \"store/project-panel/project-panel-action\";\nimport { getProjectPanelCurrentUuid } from \"store/project-panel/project-panel\";\nimport { projectPanelDataActions, projectPanelRunActions } from \"store/project-panel/project-panel-action-bind\";\nimport {\n    activateSidePanelTreeItem,\n    initSidePanelTree,\n    loadSidePanelTreeProjects,\n    SidePanelTreeCategory,\n    SIDE_PANEL_TREE,\n} from \"store/side-panel-tree/side-panel-tree-actions\";\nimport { updateResources } from \"store/resources/resources-actions\";\nimport { matchRootRoute } from \"routes/routes\";\nimport {\n    setGroupDetailsBreadcrumbs,\n    setGroupsBreadcrumbs,\n    setProcessBreadcrumbs,\n    setSharedWithMeBreadcrumbs,\n    setSidePanelBreadcrumbs,\n    setTrashBreadcrumbs,\n    setUsersBreadcrumbs,\n    setMyAccountBreadcrumbs,\n    setUserProfileBreadcrumbs,\n    setInstanceTypesBreadcrumbs,\n    setVirtualMachinesBreadcrumbs,\n    setVirtualMachinesAdminBreadcrumbs,\n    setRepositoriesBreadcrumbs,\n    setUserPreferencesBreadcrumbs,\n    setExternalCredentialsBreadcrumbs,\n} from \"store/breadcrumbs/breadcrumbs-actions\";\nimport { navigateTo, navigateToDashboard } from \"store/navigation/navigation-action\";\nimport { MoveToFormDialogData } from \"store/move-to-dialog/move-to-dialog\";\nimport { ServiceRepository } from \"services/services\";\nimport { getResource } from \"store/resources/resources\";\nimport * as projectCreateActions from \"store/projects/project-create-actions\";\nimport * as projectMoveActions from \"store/projects/project-move-actions\";\nimport * as projectUpdateActions from \"store/projects/project-update-actions\";\nimport * as collectionCreateActions from \"store/collections/collection-create-actions\";\nimport * as collectionCopyActions from \"store/collections/collection-copy-actions\";\nimport { COLLECTION_COPY_FORM_NAME } from \"store/collections/collection-copy-actions\";\nimport * as collectionMoveActions from \"store/collections/collection-move-actions\";\nimport * as processUpdateActions from \"store/processes/process-update-actions\";\nimport * as processCopyActions from \"store/processes/process-copy-actions\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\nimport { loadTrashPanel, trashPanelActions } from \"store/trash-panel/trash-panel-action\";\nimport { loadProcessPanel } from \"store/process-panel/process-panel-actions\";\nimport { loadSharedWithMePanel, sharedWithMePanelActions } from \"store/shared-with-me-panel/shared-with-me-panel-actions\";\nimport { CopyFormDialogData } from \"store/copy-dialog/copy-dialog\";\nimport { workflowPanelActions } from \"store/workflow-panel/workflow-panel-actions\";\nimport { loadSshKeysPanel } from \"store/auth/auth-action-ssh\";\nimport { loadLinkAccountPanel, linkAccountPanelActions } from \"store/link-account-panel/link-account-panel-actions\";\nimport { loadSiteManagerPanel } from \"store/auth/auth-action-session\";\nimport { progressIndicatorActions, WORKBENCH_LOADING_SCREEN } from \"store/progress-indicator/progress-indicator-actions\";\nimport { extractUuidKind, Resource, ResourceKind } from \"models/resource\";\nimport { FilterBuilder } from \"services/api/filter-builder\";\nimport { GroupContentsResource } from \"services/groups-service/groups-service\";\nimport { MatchCases, ofType, unionize, UnionOf } from \"common/unionize\";\nimport { loadRunProcessPanel } from \"store/run-process-panel/run-process-panel-actions\";\nimport { collectionPanelActions, loadCollectionPanel } from \"store/collection-panel/collection-panel-action\";\nimport { CollectionResource } from \"models/collection\";\nimport { WorkflowResource } from \"models/workflow\";\nimport { loadSearchResultsPanel, searchResultsPanelActions } from \"store/search-results-panel/search-results-panel-actions\";\nimport { loadVirtualMachinesPanel } from \"store/virtual-machines/virtual-machines-actions\";\nimport { loadRepositoriesPanel } from \"store/repositories/repositories-actions\";\nimport { loadKeepServicesPanel } from \"store/keep-services/keep-services-actions\";\nimport { loadUsersPanel, userBindedActions } from \"store/users/users-actions\";\nimport * as userProfilePanelActions from \"store/user-profile/user-profile-actions\";\nimport { linkPanelActions, loadLinkPanel } from \"store/link-panel/link-panel-actions\";\nimport { loadApiClientAuthorizationsPanel, apiClientAuthorizationsActions } from \"store/api-client-authorizations/api-client-authorizations-actions\";\nimport * as groupPanelActions from \"store/groups-panel/groups-panel-actions\";\nimport * as groupDetailsPanelActions from \"store/group-details-panel/group-details-panel-actions\";\nimport { loadPublicFavoritePanel, publicFavoritePanelActions } from \"store/public-favorites-panel/public-favorites-action\";\nimport {\n    loadCollectionsContentAddressPanel,\n    collectionsContentAddressActions,\n} from \"store/collections-content-address-panel/collections-content-address-panel-actions\";\nimport { subprocessPanelActions } from \"store/subprocess-panel/subprocess-panel-actions\";\nimport { deselectOne } from \"store/multiselect/multiselect-actions\";\nimport { treePickerActions } from \"store/tree-picker/tree-picker-actions\";\nimport { workflowProcessesPanelActions } from \"store/workflow-panel/workflow-panel-actions\";\nimport { loadAllProcessesPanel, allProcessesPanelActions } from \"../all-processes-panel/all-processes-panel-action\";\nimport { PROJECT_MOVE_FORM_NAME } from \"store/projects/project-move-actions\";\nimport { DataTableFetchMode } from \"components/data-table/data-table\";\nimport { selectedToArray, selectedToKindSet } from \"components/multiselect-toolbar/MultiselectToolbar.utils\";\nimport { matchProjectRoute } from \"routes/routes\";\n// When importing columns, make sure not to import anything that imports DataExplorer to avoid cyclic imports\nimport { sharedWithMePanelColumns } from \"views/shared-with-me-panel/shared-with-me-columns\";\nimport { workflowPanelColumns } from 'views/workflow-panel/workflow-panel-columns';\nimport { searchResultsPanelColumns } from 'views/search-results-panel/search-results-panel-columns';\nimport { linkPanelColumns } from 'views/link-panel/link-panel-columns';\nimport { userPanelColumns } from 'views/user-panel/user-panel-columns';\nimport { apiClientAuthorizationPanelColumns } from 'views/api-client-authorization-panel/api-client-authorization-panel-columns';\nimport { groupsPanelColumns } from 'views/groups-panel/groups-panel-columns';\nimport { groupDetailsMembersPanelColumns, groupDetailsPermissionsPanelColumns } from \"views/group-details-panel/group-details-panel-columns\";\nimport { publicFavoritePanelColumns } from 'views/public-favorites-panel/public-favorites-panel-columns';\nimport { collectionContentAddressPanelColumns } from 'views/collection-content-address-panel/collection-content-address-panel-columns';\nimport { subprocessPanelColumns } from 'views/subprocess-panel/subprocess-panel-columns';\nimport { allProcessesPanelColumns } from \"views/all-processes-panel/all-processes-panel-columns\";\nimport { userProfileGroupsColumns } from 'views/user-profile-panel/user-profile-panel-columns';\nimport { workflowProcessesPanelColumns } from 'views/workflow-panel/workflow-processes-panel-columns';\nimport { trashPanelColumns } from 'views/trash-panel/trash-panel-columns';\nimport { projectPanelDataColumns, projectPanelRunColumns } from \"views/project-panel/project-panel-columns\";\nimport { favoritePanelColumns } from 'views/favorite-panel/favorite-panel-columns';\nimport { loadUserPreferencesPanel } from \"store/user-preferences/user-preferences-actions\";\nimport { loadExternalCredentials } from \"store/external-credentials/external-credentials-actions\";\nimport { externalCredentialsActions } from \"store/external-credentials/external-credentials-actions\";\nimport { externalCredentialsPanelColumns } from \"views/external-credentials-panel/external-credentials-panel-columns\";\nimport { loadRecentWorkflows } from \"store/recent-wf-runs/recent-wf-runs-action\";\nimport { loadRecentlyVisited } from \"store/recently-visited/recently-visited-actions\";\nimport { loadFavoritePins } from \"store/favorite-pins/favorite-pins-middleware-service\";\nimport { COLLECTION_MOVE_FORM_NAME } from \"store/collections/collection-move-actions\";\nimport { getCommonResourceServiceError, CommonResourceServiceError } from \"services/common-service/common-resource-service\";\n\nexport const handleFirstTimeLoad = (action: any) => async (dispatch: Dispatch<any>, getState: () => RootState) => {\n    try {\n        await dispatch(action);\n    } catch (e) {\n        snackbarActions.OPEN_SNACKBAR({\n            message: \"Error \" + e,\n            hideDuration: 8000,\n            kind: SnackbarKind.WARNING,\n        })\n    } finally {\n        if (getState().progressIndicator.includes(WORKBENCH_LOADING_SCREEN)) {\n            dispatch(progressIndicatorActions.STOP_WORKING(WORKBENCH_LOADING_SCREEN));\n        }\n    }\n};\n\nexport const loadWorkbench = () => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    dispatch(progressIndicatorActions.START_WORKING(WORKBENCH_LOADING_SCREEN));\n    const { auth, router } = getState();\n    const { user } = auth;\n    if (user) {\n        dispatch(projectPanelDataActions.SET_COLUMNS({ columns: projectPanelDataColumns }));\n        dispatch(projectPanelRunActions.SET_COLUMNS({ columns: projectPanelRunColumns }));\n        dispatch(favoritePanelActions.SET_COLUMNS({ columns: favoritePanelColumns }));\n        dispatch(\n            allProcessesPanelActions.SET_COLUMNS({\n                columns: allProcessesPanelColumns,\n            })\n        );\n        dispatch(\n            publicFavoritePanelActions.SET_COLUMNS({\n                columns: publicFavoritePanelColumns,\n            })\n        );\n        dispatch(trashPanelActions.SET_COLUMNS({ columns: trashPanelColumns }));\n        dispatch(sharedWithMePanelActions.SET_COLUMNS({ columns: sharedWithMePanelColumns }));\n        dispatch(workflowPanelActions.SET_COLUMNS({ columns: workflowPanelColumns }));\n        dispatch(\n            searchResultsPanelActions.SET_FETCH_MODE({\n                fetchMode: DataTableFetchMode.INFINITE,\n            })\n        );\n        dispatch(\n            searchResultsPanelActions.SET_COLUMNS({\n                columns: searchResultsPanelColumns,\n            })\n        );\n        dispatch(userBindedActions.SET_COLUMNS({ columns: userPanelColumns }));\n        dispatch(\n            groupPanelActions.GroupsPanelActions.SET_COLUMNS({\n                columns: groupsPanelColumns,\n            })\n        );\n        dispatch(\n            groupDetailsPanelActions.GroupMembersPanelActions.SET_COLUMNS({\n                columns: groupDetailsMembersPanelColumns,\n            })\n        );\n        dispatch(\n            groupDetailsPanelActions.GroupPermissionsPanelActions.SET_COLUMNS({\n                columns: groupDetailsPermissionsPanelColumns,\n            })\n        );\n        dispatch(\n            userProfilePanelActions.UserProfileGroupsActions.SET_COLUMNS({\n                columns: userProfileGroupsColumns,\n            })\n        );\n        dispatch(linkPanelActions.SET_COLUMNS({ columns: linkPanelColumns }));\n        dispatch(\n            apiClientAuthorizationsActions.SET_COLUMNS({\n                columns: apiClientAuthorizationPanelColumns,\n            })\n        );\n        dispatch(\n            collectionsContentAddressActions.SET_COLUMNS({\n                columns: collectionContentAddressPanelColumns,\n            })\n        );\n        dispatch(subprocessPanelActions.SET_COLUMNS({ columns: subprocessPanelColumns }));\n        dispatch(workflowProcessesPanelActions.SET_COLUMNS({ columns: workflowProcessesPanelColumns }));\n\n        if (services.linkAccountService.getAccountToLink()) {\n            dispatch(linkAccountPanelActions.HAS_SESSION_DATA());\n        }\n        dispatch<any>(externalCredentialsActions.SET_COLUMNS({ columns: externalCredentialsPanelColumns }));\n\n        dispatch<any>(initSidePanelTree());\n        if (router.location) {\n            const match = matchRootRoute(router.location.pathname);\n            if (match) {\n                dispatch<any>(navigateToDashboard);\n                if (getState().progressIndicator.includes(WORKBENCH_LOADING_SCREEN)) {\n                    dispatch(progressIndicatorActions.STOP_WORKING(WORKBENCH_LOADING_SCREEN));\n                }\n\n            }\n        }\n    } else {\n        dispatch(userIsNotAuthenticated);\n    }\n};\n\nexport const loadDashboard = () => handleFirstTimeLoad((dispatch: Dispatch) => {\n    dispatch<any>(loadRecentWorkflows());\n    dispatch<any>(loadRecentlyVisited());\n    dispatch<any>(loadFavoritePins());\n    dispatch<any>(activateSidePanelTreeItem(SidePanelTreeCategory.DASHBOARD));\n    dispatch<any>(setSidePanelBreadcrumbs(SidePanelTreeCategory.DASHBOARD));\n});\n\nexport const loadFavorites = () =>\n    handleFirstTimeLoad((dispatch: Dispatch) => {\n        dispatch<any>(activateSidePanelTreeItem(SidePanelTreeCategory.FAVORITES));\n        dispatch<any>(loadFavoritePanel());\n        dispatch<any>(setSidePanelBreadcrumbs(SidePanelTreeCategory.FAVORITES));\n    });\n\nexport const loadCollectionContentAddress = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    await dispatch(loadCollectionsContentAddressPanel());\n});\n\nexport const loadTrash = () =>\n    handleFirstTimeLoad((dispatch: Dispatch) => {\n        dispatch<any>(activateSidePanelTreeItem(SidePanelTreeCategory.TRASH));\n        dispatch<any>(loadTrashPanel());\n        dispatch<any>(setSidePanelBreadcrumbs(SidePanelTreeCategory.TRASH));\n    });\n\nexport const loadAllProcesses = () =>\n    handleFirstTimeLoad((dispatch: Dispatch) => {\n        dispatch<any>(activateSidePanelTreeItem(SidePanelTreeCategory.ALL_PROCESSES));\n        dispatch<any>(loadAllProcessesPanel());\n        dispatch<any>(setSidePanelBreadcrumbs(SidePanelTreeCategory.ALL_PROCESSES));\n    });\n\nexport const loadProject = (uuid: string) =>\n    handleFirstTimeLoad(async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        dispatch(setIsProjectPanelTrashed(false));\n        if (!userUuid) {\n            return;\n        }\n        try {\n            dispatch(progressIndicatorActions.START_WORKING(uuid));\n            if (extractUuidKind(uuid) === ResourceKind.USER && userUuid !== uuid) {\n                // Load another users home projects\n                dispatch(finishLoadingProject(uuid));\n                dispatch<any>(setSidePanelBreadcrumbs(uuid));\n            } else if (userUuid !== uuid) {\n                await dispatch(finishLoadingProject(uuid));\n                const match = await loadGroupContentsResource({\n                    uuid,\n                    userUuid,\n                    services,\n                });\n                match({\n                    OWNED: async () => {\n                        await dispatch(activateSidePanelTreeItem(uuid));\n                        dispatch<any>(setSidePanelBreadcrumbs(uuid));\n                    },\n                    SHARED: async () => {\n                        await dispatch(activateSidePanelTreeItem(uuid));\n                        dispatch<any>(setSharedWithMeBreadcrumbs(uuid));\n                    },\n                    TRASHED: async () => {\n                        await dispatch(activateSidePanelTreeItem(SidePanelTreeCategory.TRASH));\n                        dispatch<any>(setTrashBreadcrumbs(uuid));\n                        dispatch(setIsProjectPanelTrashed(true));\n                    },\n                });\n            } else {\n                await dispatch(finishLoadingProject(userUuid));\n                await dispatch(activateSidePanelTreeItem(userUuid));\n                dispatch<any>(setSidePanelBreadcrumbs(userUuid));\n            }\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(uuid));\n        }\n    });\n\nexport const createProjectRunner = (data: projectCreateActions.ProjectCreateFormDialogData, setSubmitErr: (err: string) => void) => async (dispatch: Dispatch) => {\n    const newProject = await dispatch<any>(projectCreateActions.createProject(data, setSubmitErr));\n    if (newProject) {\n        dispatch(\n            snackbarActions.OPEN_SNACKBAR({\n                message: \"Project has been successfully created.\",\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS,\n            })\n        );\n        await dispatch<any>(loadSidePanelTreeProjects(newProject.ownerUuid));\n        dispatch<any>(navigateTo(newProject.uuid));\n    }\n};\n\nexport const moveProjectRunner =\n    (data: MoveToFormDialogData, isSecondaryMove = false) =>\n        async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n            dispatch(progressIndicatorActions.START_WORKING(PROJECT_MOVE_FORM_NAME));\n            const checkedList = getState().multiselect.checkedList;\n            const uuidsToMove: string[] = selectedToArray(checkedList);\n\n            //if no items in checkedlist default to normal context menu behavior\n            if (!isSecondaryMove && !uuidsToMove.length) uuidsToMove.push(data.uuid);\n\n            const sourceUuid = getResource(data.uuid)(getState().resources)?.ownerUuid;\n            const destinationUuid = data.ownerUuid;\n\n            const projectsToMove: MoveableResource[] = uuidsToMove\n                .map(uuid => getResource(uuid)(getState().resources) as MoveableResource)\n                .filter(resource => resource.kind === ResourceKind.PROJECT);\n\n            for (const project of projectsToMove) {\n                await moveSingleProject(project);\n            }\n\n            //omly propagate if this call is the original\n            if (!isSecondaryMove) {\n                const kindsToMove: Set<string> = selectedToKindSet(checkedList);\n                kindsToMove.delete(ResourceKind.PROJECT);\n\n                kindsToMove.forEach(kind => {\n                    secondaryMove[kind](data, true)(dispatch, getState, services);\n                });\n            }\n\n            async function moveSingleProject(project: MoveableResource) {\n                try {\n                    const oldProject: MoveToFormDialogData = { name: project.name, uuid: project.uuid, ownerUuid: data.ownerUuid };\n                    const oldOwnerUuid = oldProject ? oldProject.ownerUuid : \"\";\n                    const movedProject = await dispatch<any>(projectMoveActions.moveProject(oldProject));\n                    if (movedProject) {\n                        dispatch(\n                            snackbarActions.OPEN_SNACKBAR({\n                                message: \"Project has been moved\",\n                                hideDuration: 2000,\n                                kind: SnackbarKind.SUCCESS,\n                            })\n                        );\n                        await dispatch<any>(reloadProjectMatchingUuid([oldOwnerUuid, movedProject.ownerUuid, movedProject.uuid]));\n                    }\n                } catch (e) {\n                    dispatch(\n                        snackbarActions.OPEN_SNACKBAR({\n                            message: !!(project as any).frozenByUuid ? 'Could not move frozen project.' : e.message,\n                            hideDuration: 2000,\n                            kind: SnackbarKind.ERROR,\n                        })\n                    );\n                }\n            }\n            if (sourceUuid) await dispatch<any>(loadSidePanelTreeProjects(sourceUuid));\n            await dispatch<any>(loadSidePanelTreeProjects(destinationUuid));\n            dispatch(dialogActions.CLOSE_DIALOG({ id: PROJECT_MOVE_FORM_NAME }));\n            dispatch(progressIndicatorActions.STOP_WORKING(PROJECT_MOVE_FORM_NAME));\n        };\n\nexport const updateProjectRunner = (data: projectUpdateActions.ProjectUpdateFormDialogData, setSubmitErr: (errMsg: string) => void) => async (dispatch: Dispatch) => {\n    const updatedProject = await dispatch<any>(projectUpdateActions.updateProject(data, setSubmitErr));\n    if (updatedProject) {\n        dispatch(\n            snackbarActions.OPEN_SNACKBAR({\n                message: \"Project has been successfully updated.\",\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS,\n            })\n        );\n        await dispatch<any>(loadSidePanelTreeProjects(updatedProject.ownerUuid));\n        dispatch<any>(reloadProjectMatchingUuid([updatedProject.ownerUuid, updatedProject.uuid]));\n    }\n};\n\nexport const updateGroupRunner = (data: projectUpdateActions.ProjectUpdateFormDialogData, setSubmitErr: (errMsg: string) => void) => async (dispatch: Dispatch) => {\n    const updatedGroup = await dispatch<any>(groupPanelActions.updateGroup(data, setSubmitErr));\n    if (updatedGroup) {\n        dispatch(\n            snackbarActions.OPEN_SNACKBAR({\n                message: \"Group has been successfully updated.\",\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS,\n            })\n        );\n        await dispatch<any>(loadSidePanelTreeProjects(updatedGroup.ownerUuid));\n        dispatch<any>(reloadProjectMatchingUuid([updatedGroup.ownerUuid, updatedGroup.uuid]));\n    }\n};\n\nexport const loadCollection = (uuid: string) =>\n    handleFirstTimeLoad(async (dispatch: Dispatch<any>, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        try {\n            dispatch(progressIndicatorActions.START_WORKING(uuid));\n            if (userUuid) {\n                const match = await loadGroupContentsResource({\n                    uuid,\n                    userUuid,\n                    services,\n                });\n                let collection: CollectionResource | undefined;\n                let breadcrumbfunc:\n                    | ((uuid: string) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => Promise<void>)\n                    | undefined;\n                let sidepanel: string | undefined;\n                match({\n                    OWNED: thecollection => {\n                        collection = thecollection as CollectionResource;\n                        sidepanel = collection.ownerUuid;\n                        breadcrumbfunc = setSidePanelBreadcrumbs;\n                    },\n                    SHARED: thecollection => {\n                        collection = thecollection as CollectionResource;\n                        sidepanel = collection.ownerUuid;\n                        breadcrumbfunc = setSharedWithMeBreadcrumbs;\n                    },\n                    TRASHED: thecollection => {\n                        collection = thecollection as CollectionResource;\n                        sidepanel = SidePanelTreeCategory.TRASH;\n                        breadcrumbfunc = () => setTrashBreadcrumbs(\"\");\n                    },\n                });\n                if (collection && breadcrumbfunc && sidepanel) {\n                    dispatch(updateResources([collection]));\n                    await dispatch<any>(finishLoadingProject(collection.ownerUuid));\n                    dispatch(collectionPanelActions.SET_COLLECTION(collection));\n                    await dispatch(activateSidePanelTreeItem(sidepanel));\n                    dispatch(breadcrumbfunc(collection.ownerUuid));\n                    dispatch(loadCollectionPanel(collection.uuid));\n                }\n            }\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(uuid));\n        }\n    });\n\nexport const createCollectionRunner = (data: collectionCreateActions.CollectionCreateFormDialogData, setSubmitErr: (errMsg: string) => void) => async (dispatch: Dispatch) => {\n    const collection = await dispatch<any>(collectionCreateActions.createCollection(data, setSubmitErr));\n    if (collection) {\n        dispatch(\n            snackbarActions.OPEN_SNACKBAR({\n                message: \"Collection has been successfully created.\",\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS,\n            })\n        );\n        dispatch<any>(updateResources([collection]));\n        dispatch<any>(navigateTo(collection.uuid));\n    }\n};\n\nexport const copyCollectionRunner = (data: CopyFormDialogData) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    const checkedList = getState().multiselect.checkedList;\n    const uuidsToCopy: string[] = selectedToArray(checkedList);\n    dispatch(progressIndicatorActions.START_WORKING(COLLECTION_COPY_FORM_NAME));\n\n    //if no items in checkedlist && no items passed in, default to normal context menu behavior\n    if (!uuidsToCopy.length) uuidsToCopy.push(data.uuid);\n\n    const collectionsToCopy: CollectionCopyResource[] = uuidsToCopy\n        .map(uuid => getResource(uuid)(getState().resources) as CollectionCopyResource)\n        .filter(resource => resource.kind === ResourceKind.COLLECTION);\n\n    for (const collection of collectionsToCopy) {\n        await copySingleCollection({ ...collection, ownerUuid: data.ownerUuid } as CollectionCopyResource);\n    }\n\n    async function copySingleCollection(sourceCollection: CollectionCopyResource) {\n        const newName = collectionsToCopy.length === 1 ? data.name : `Copy of: ${sourceCollection.name}`;\n        try {\n            const newCollection = await dispatch<any>(\n                collectionCopyActions.copyCollection({\n                    ...sourceCollection,\n                    name: newName,\n                })\n            );\n            if (sourceCollection && newCollection) {\n                await dispatch<any>(reloadProjectMatchingUuid([sourceCollection.uuid]));\n                dispatch(\n                    snackbarActions.OPEN_SNACKBAR({\n                        message: \"Collection has been copied.\",\n                        hideDuration: 3000,\n                        kind: SnackbarKind.SUCCESS,\n                        link: newCollection.ownerUuid,\n                    })\n                );\n                dispatch<any>(deselectOne(sourceCollection.uuid));\n            }\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.UNIQUE_NAME_VIOLATION) {\n                dispatch(\n                    snackbarActions.OPEN_SNACKBAR({\n                        message: \"A collection with the same name already exists in the target project.\",\n                        hideDuration: 3000,\n                        kind: SnackbarKind.ERROR,\n                    })\n                );\n            } else {\n                dispatch(\n                    snackbarActions.OPEN_SNACKBAR({\n                        message: e.message,\n                        hideDuration: 2000,\n                        kind: SnackbarKind.ERROR,\n                    })\n                )\n            }\n        }\n        dispatch(dialogActions.CLOSE_DIALOG({ id: COLLECTION_COPY_FORM_NAME }));\n    }\n    dispatch(projectPanelDataActions.REQUEST_ITEMS());\n    dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_COPY_FORM_NAME));\n};\n\nexport const moveCollectionRunner =\n    (data: MoveToFormDialogData, isSecondaryMove = false) =>\n        async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n            const state = getState();\n            dispatch(progressIndicatorActions.START_WORKING(COLLECTION_MOVE_FORM_NAME));\n            const checkedList = state.multiselect.checkedList;\n            const uuidsToMove: string[] = selectedToArray(checkedList);\n\n            //if no items in checkedlist && no items passed in, default to normal context menu behavior\n            if (!isSecondaryMove && !uuidsToMove.length) uuidsToMove.push(data.uuid);\n\n            const collectionsToMove: MoveableResource[] = uuidsToMove\n                .map(uuid => getResource(uuid)(state.resources) as MoveableResource)\n                .filter(resource => resource.kind === ResourceKind.COLLECTION);\n\n            for (const collection of collectionsToMove) {\n                await moveSingleCollection(collection);\n            }\n\n            //omly propagate if this call is the original\n            if (!isSecondaryMove) {\n                const kindsToMove: Set<string> = selectedToKindSet(checkedList);\n                kindsToMove.delete(ResourceKind.COLLECTION);\n\n                kindsToMove.forEach(kind => {\n                    secondaryMove[kind](data, true)(dispatch, getState, services);\n                });\n            }\n\n            async function moveSingleCollection(collection: MoveableResource) {\n                try {\n                    const oldCollection: MoveToFormDialogData = { name: collection.name, uuid: collection.uuid, ownerUuid: data.ownerUuid };\n                    const movedCollection = await dispatch<any>(collectionMoveActions.moveCollection(oldCollection));\n                    dispatch<any>(updateResources([movedCollection]));\n                    if (matchProjectRoute(state.router.location.pathname)) {\n                        dispatch<any>(reloadProjectMatchingUuid([movedCollection.ownerUuid]));\n                    }\n                    dispatch(\n                        snackbarActions.OPEN_SNACKBAR({\n                            message: \"Collection has been moved.\",\n                            hideDuration: 2000,\n                            kind: SnackbarKind.SUCCESS,\n                        })\n                    );\n                } catch (e) {\n                    dispatch(\n                        snackbarActions.OPEN_SNACKBAR({\n                            message: e.message,\n                            hideDuration: 2000,\n                            kind: SnackbarKind.ERROR,\n                        })\n                    );\n                }\n            }\n\n            dispatch<any>(loadSidePanelTreeProjects(data.ownerUuid));\n            dispatch(progressIndicatorActions.STOP_WORKING(COLLECTION_MOVE_FORM_NAME));\n        };\n\nexport const loadProcess = (uuid: string) =>\n    handleFirstTimeLoad(async (dispatch: Dispatch, getState: () => RootState) => {\n        try {\n            dispatch(progressIndicatorActions.START_WORKING(uuid));\n            const process = await dispatch<any>(loadProcessPanel(uuid));\n            if (process) {\n                await dispatch<any>(finishLoadingProject(process.containerRequest.ownerUuid));\n                await dispatch<any>(activateSidePanelTreeItem(process.containerRequest.ownerUuid));\n                dispatch<any>(setProcessBreadcrumbs(uuid));\n                dispatch<any>(loadDetailsPanel(uuid));\n            }\n        } finally {\n            dispatch(progressIndicatorActions.STOP_WORKING(uuid));\n        }\n    });\n\nexport const loadRegisteredWorkflow = (uuid: string) =>\n    handleFirstTimeLoad(async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const userUuid = getUserUuid(getState());\n        if (userUuid) {\n            const match = await loadGroupContentsResource({\n                uuid,\n                userUuid,\n                services,\n            });\n            let workflow: WorkflowResource | undefined;\n            let breadcrumbfunc:\n                | ((uuid: string) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => Promise<void>)\n                | undefined;\n            match({\n                OWNED: async theworkflow => {\n                    workflow = theworkflow as WorkflowResource;\n                    breadcrumbfunc = setSidePanelBreadcrumbs;\n                },\n                SHARED: async theworkflow => {\n                    workflow = theworkflow as WorkflowResource;\n                    breadcrumbfunc = setSharedWithMeBreadcrumbs;\n                },\n                TRASHED: () => { },\n            });\n            if (workflow && breadcrumbfunc) {\n                dispatch(updateResources([workflow]));\n                await dispatch<any>(finishLoadingProject(workflow.ownerUuid));\n                await dispatch<any>(activateSidePanelTreeItem(workflow.ownerUuid));\n                dispatch<any>(breadcrumbfunc(workflow.ownerUuid));\n                dispatch(workflowProcessesPanelActions.REQUEST_ITEMS());\n            }\n        }\n    });\n\nexport const updateProcessRunner = (data: processUpdateActions.ProcessUpdateFormDialogData) => async (dispatch: Dispatch) => {\n    try {\n        const process = await dispatch<any>(processUpdateActions.updateProcess(data));\n        if (process) {\n            dispatch(\n                snackbarActions.OPEN_SNACKBAR({\n                    message: \"Process has been successfully updated.\",\n                    hideDuration: 2000,\n                    kind: SnackbarKind.SUCCESS,\n                })\n            );\n            dispatch<any>(updateResources([process]));\n            dispatch<any>(reloadProjectMatchingUuid([process.ownerUuid]));\n        }\n    } catch (e) {\n        dispatch(\n            snackbarActions.OPEN_SNACKBAR({\n                message: e.message,\n                hideDuration: 2000,\n                kind: SnackbarKind.ERROR,\n            })\n        );\n    }\n};\n\nexport const copyProcessRunner = (data: CopyFormDialogData) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    try {\n        const process = await dispatch<any>(processCopyActions.copyProcess(data));\n        dispatch<any>(updateResources([process]));\n        dispatch<any>(reloadProjectMatchingUuid([process.ownerUuid]));\n        dispatch(\n            snackbarActions.OPEN_SNACKBAR({\n                message: \"Process has been copied.\",\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS,\n            })\n        );\n        dispatch<any>(navigateTo(process.uuid));\n    } catch (e) {\n        dispatch(\n            snackbarActions.OPEN_SNACKBAR({\n                message: e.message,\n                hideDuration: 2000,\n                kind: SnackbarKind.ERROR,\n            })\n        );\n    }\n};\n\nexport const resourceIsNotLoaded = (uuid: string) =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: `Resource identified by ${uuid} is not loaded.`,\n        kind: SnackbarKind.ERROR,\n    });\n\nexport const userIsNotAuthenticated = snackbarActions.OPEN_SNACKBAR({\n    message: \"User is not authenticated\",\n    kind: SnackbarKind.ERROR,\n});\n\nexport const couldNotLoadUser = snackbarActions.OPEN_SNACKBAR({\n    message: \"Could not load user\",\n    kind: SnackbarKind.ERROR,\n});\n\nexport const reloadProjectMatchingUuid =\n    (matchingUuids: string[]) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const currentProjectPanelUuid = getProjectPanelCurrentUuid(getState());\n        if (currentProjectPanelUuid && matchingUuids.some(uuid => uuid === currentProjectPanelUuid)) {\n            dispatch<any>(loadProject(currentProjectPanelUuid));\n        }\n    };\n\nexport const loadSharedWithMe = handleFirstTimeLoad(async (dispatch: Dispatch) => {\n    dispatch<any>(loadSharedWithMePanel());\n    await dispatch<any>(activateSidePanelTreeItem(SidePanelTreeCategory.SHARED_WITH_ME));\n    await dispatch<any>(setSidePanelBreadcrumbs(SidePanelTreeCategory.SHARED_WITH_ME));\n});\n\nexport const loadRunProcess = handleFirstTimeLoad(async (dispatch: Dispatch) => {\n    await dispatch<any>(loadRunProcessPanel());\n});\n\nexport const loadPublicFavorites = () =>\n    handleFirstTimeLoad((dispatch: Dispatch) => {\n        dispatch<any>(activateSidePanelTreeItem(SidePanelTreeCategory.PUBLIC_FAVORITES));\n        dispatch<any>(loadPublicFavoritePanel());\n        dispatch<any>(setSidePanelBreadcrumbs(SidePanelTreeCategory.PUBLIC_FAVORITES));\n    });\n\nexport const loadSearchResults = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    await dispatch(loadSearchResultsPanel());\n});\n\nexport const loadLinks = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    await dispatch(loadLinkPanel());\n});\n\nexport const loadVirtualMachines = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    await dispatch(loadVirtualMachinesPanel());\n    dispatch(setVirtualMachinesBreadcrumbs());\n    dispatch<any>(activateSidePanelTreeItem(SidePanelTreeCategory.SHELL_ACCESS));\n});\n\nexport const loadVirtualMachinesAdmin = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    await dispatch(loadVirtualMachinesPanel());\n    dispatch(setVirtualMachinesAdminBreadcrumbs());\n    dispatch(treePickerActions.DEACTIVATE_TREE_PICKER_NODE({ pickerId: SIDE_PANEL_TREE }))\n});\n\nexport const loadRepositories = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    await dispatch(loadRepositoriesPanel());\n    dispatch(setRepositoriesBreadcrumbs());\n});\n\nexport const loadSshKeys = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    await dispatch(loadSshKeysPanel());\n});\n\nexport const loadInstanceTypes = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    dispatch<any>(activateSidePanelTreeItem(SidePanelTreeCategory.INSTANCE_TYPES));\n    dispatch(setInstanceTypesBreadcrumbs());\n});\n\nexport const loadSiteManager = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    await dispatch(loadSiteManagerPanel());\n});\n\nexport const loadUserProfile = (userUuid?: string) =>\n    handleFirstTimeLoad((dispatch: Dispatch<any>) => {\n        if (userUuid) {\n            dispatch(setUserProfileBreadcrumbs(userUuid));\n            dispatch(userProfilePanelActions.loadUserProfilePanel(userUuid));\n        } else {\n            dispatch(setMyAccountBreadcrumbs());\n            dispatch(userProfilePanelActions.loadUserProfilePanel());\n        }\n    });\n\nexport const loadUserPreferences = () =>\n    handleFirstTimeLoad((dispatch: Dispatch<any>) => {\n        dispatch(setUserPreferencesBreadcrumbs());\n        dispatch(loadUserPreferencesPanel());\n    });\n\nexport const loadLinkAccount = handleFirstTimeLoad((dispatch: Dispatch<any>) => {\n    dispatch(loadLinkAccountPanel());\n});\n\nexport const loadKeepServices = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    await dispatch(loadKeepServicesPanel());\n});\n\nexport const loadUsers = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    await dispatch(loadUsersPanel());\n    dispatch(setUsersBreadcrumbs());\n});\n\nexport const loadApiClientAuthorizations = handleFirstTimeLoad(async (dispatch: Dispatch<any>) => {\n    await dispatch(loadApiClientAuthorizationsPanel());\n});\n\nexport const loadGroupsPanel = handleFirstTimeLoad((dispatch: Dispatch<any>) => {\n    dispatch<any>(activateSidePanelTreeItem(SidePanelTreeCategory.GROUPS));\n    dispatch(setGroupsBreadcrumbs());\n    dispatch(groupPanelActions.loadGroupsPanel());\n});\n\nexport const loadGroupDetailsPanel = (groupUuid: string) =>\n    handleFirstTimeLoad((dispatch: Dispatch<any>) => {\n        dispatch(setGroupDetailsBreadcrumbs(groupUuid));\n        dispatch(groupDetailsPanelActions.loadGroupDetailsPanel(groupUuid));\n    });\n\nexport const loadExternalCredentialsPanel = () =>\n    handleFirstTimeLoad((dispatch: Dispatch<any>) => {\n        dispatch<any>(activateSidePanelTreeItem(SidePanelTreeCategory.EXTERNAL_CREDENTIALS));\n        dispatch(setExternalCredentialsBreadcrumbs());\n        dispatch<any>(loadExternalCredentials());\n    });\n\nconst finishLoadingProject = (project: GroupContentsResource | string) => async (dispatch: Dispatch<any>) => {\n    const uuid = typeof project === \"string\" ? project : project.uuid;\n    dispatch(loadDetailsPanel(uuid));\n    if (typeof project !== \"string\") {\n        dispatch(updateResources([project]));\n    }\n};\n\nconst loadGroupContentsResource = async (params: { uuid: string; userUuid: string; services: ServiceRepository }) => {\n    const filters = new FilterBuilder().addEqual(\"uuid\", params.uuid).getFilters();\n    const { items } = await params.services.groupsService.contents(params.userUuid, {\n        filters,\n        recursive: true,\n        includeTrash: true,\n    });\n    const resource = items.shift();\n    let handler: GroupContentsHandler;\n    if (resource) {\n        handler =\n            (resource.kind === ResourceKind.COLLECTION || resource.kind === ResourceKind.PROJECT) && resource.isTrashed\n                ? groupContentsHandlers.TRASHED(resource)\n                : groupContentsHandlers.OWNED(resource);\n    } else {\n        const kind = extractUuidKind(params.uuid);\n        let resource: GroupContentsResource;\n        if (kind === ResourceKind.COLLECTION) {\n            resource = await params.services.collectionService.get(params.uuid);\n        } else if (kind === ResourceKind.PROJECT) {\n            resource = await params.services.projectService.get(params.uuid);\n        } else if (kind === ResourceKind.WORKFLOW) {\n            resource = await params.services.workflowService.get(params.uuid);\n        } else if (kind === ResourceKind.CONTAINER_REQUEST) {\n            resource = await params.services.containerRequestService.get(params.uuid);\n        } else {\n            throw new Error(\"loadGroupContentsResource unsupported kind \" + kind);\n        }\n        handler = groupContentsHandlers.SHARED(resource);\n    }\n    return (cases: MatchCases<typeof groupContentsHandlersRecord, GroupContentsHandler, void>) => groupContentsHandlers.match(handler, cases);\n};\n\nconst groupContentsHandlersRecord = {\n    TRASHED: ofType<GroupContentsResource>(),\n    SHARED: ofType<GroupContentsResource>(),\n    OWNED: ofType<GroupContentsResource>(),\n};\n\nconst groupContentsHandlers = unionize(groupContentsHandlersRecord);\n\ntype GroupContentsHandler = UnionOf<typeof groupContentsHandlers>;\n\ntype CollectionCopyResource = Resource & { name: string; };\n\ntype MoveableResource = Resource & { name: string };\n\ntype MoveFunc = (\n    data: MoveToFormDialogData,\n    isSecondaryMove?: boolean\n) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => Promise<void>;\n\nconst secondaryMove: Record<string, MoveFunc> = {\n    [ResourceKind.PROJECT]: moveProjectRunner,\n    [ResourceKind.COLLECTION]: moveCollectionRunner,\n};\n"
  },
  {
    "path": "services/workbench2/src/store/workflow-panel/workflow-middleware-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ServiceRepository } from 'services/services';\nimport { MiddlewareAPI, Dispatch } from 'redux';\nimport { DataExplorerMiddlewareService, dataExplorerToListParams, getOrder, listResultsToDataExplorerItemsMeta } from 'store/data-explorer/data-explorer-middleware-service';\nimport { RootState } from 'store/store';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { DataExplorer, getDataExplorer } from 'store/data-explorer/data-explorer-reducer';\nimport { updateResources } from 'store/resources/resources-actions';\nimport { FilterBuilder } from 'services/api/filter-builder';\nimport { WorkflowResource } from 'models/workflow';\nimport { ListResults } from 'services/common-service/common-service';\nimport { workflowPanelActions } from 'store/workflow-panel/workflow-panel-actions';\nimport { matchRegisteredWorkflowRoute } from 'routes/routes';\nimport { ProcessesMiddlewareService } from \"store/processes/processes-middleware-service\";\nimport { workflowProcessesPanelActions } from \"./workflow-panel-actions\";\nimport { joinFilters } from \"services/api/filter-builder\";\n\nexport class WorkflowMiddlewareService extends DataExplorerMiddlewareService {\n    constructor(private services: ServiceRepository, id: string) {\n        super(id);\n    }\n\n    async requestItems(api: MiddlewareAPI<Dispatch, RootState>) {\n        const state = api.getState();\n        const dataExplorer = getDataExplorer(state.dataExplorer, this.getId());\n        try {\n            const response = await this.services.workflowService.list(getParams(dataExplorer));\n            api.dispatch(updateResources(response.items));\n            api.dispatch(setItems(response));\n        } catch {\n            api.dispatch(couldNotFetchWorkflows());\n        }\n    }\n\n    // Don't use separate request count on unused WF panel\n    async requestCount() {}\n}\n\nexport const getParams = (dataExplorer: DataExplorer) => ({\n    ...dataExplorerToListParams(dataExplorer),\n    order: getOrder<WorkflowResource>(dataExplorer),\n    filters: getFilters(dataExplorer)\n});\n\nexport const getFilters = (dataExplorer: DataExplorer) => {\n    const filters = new FilterBuilder()\n        .addILike(\"name\", dataExplorer.searchValue)\n        .getFilters();\n    return filters;\n};\n\nexport const setItems = (listResults: ListResults<WorkflowResource>) =>\n    workflowPanelActions.SET_ITEMS({\n        ...listResultsToDataExplorerItemsMeta(listResults),\n        items: listResults.items.map(resource => resource.uuid),\n    });\n\nconst couldNotFetchWorkflows = () =>\n    snackbarActions.OPEN_SNACKBAR({\n        message: 'Could not fetch workflows.',\n        kind: SnackbarKind.ERROR\n    });\n\n\nexport class WorkflowProcessesMiddlewareService extends ProcessesMiddlewareService {\n    constructor(services: ServiceRepository, id: string) {\n        super(services, workflowProcessesPanelActions, id);\n    }\n\n    getFilters(api: MiddlewareAPI<Dispatch, RootState>, dataExplorer: DataExplorer): string | null {\n        const state = api.getState();\n\n        if (!state.router.location) { return null; }\n\n        const registeredWorkflowMatch = matchRegisteredWorkflowRoute(state.router.location.pathname);\n        if (!registeredWorkflowMatch) { return null; }\n\n        const workflow_uuid = registeredWorkflowMatch.params.id;\n\n        const template_uuid = new FilterBuilder().addEqual('properties.template_uuid', workflow_uuid, 'container_requests').getFilters();\n        const sup = super.getFilters(api, dataExplorer);\n        if (sup === null) { return null; }\n\n        return joinFilters(sup, template_uuid);\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/store/workflow-panel/workflow-panel-actions.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { isEqual } from \"lodash\";\nimport { createServices } from \"services/services\";\nimport { configureStore } from \"../store\";\nimport { createBrowserHistory } from \"history\";\nimport { mockConfig } from 'common/config';\nimport Axios from \"axios\";\nimport MockAdapter from \"axios-mock-adapter\";\nimport { openRunProcess } from './workflow-panel-actions';\nimport { runProcessPanelActions } from 'store/run-process-panel/run-process-panel-actions';\nimport { initialize } from 'redux-form';\nimport { RUN_PROCESS_INPUTS_FORM, RUN_PROCESS_BASIC_FORM } from 'store/run-process-panel/run-process-panel-actions';\nimport { ResourceKind } from 'models/resource';\n\ndescribe('workflow-panel-actions', () => {\n    const axiosInst = Axios.create({ headers: {} });\n    const axiosMock = new MockAdapter(axiosInst);\n\n    let store;\n    let services;\n    const config = {};\n    const actions = {\n        progressFn: (id, working) => { },\n        errorFn: (id, message) => { }\n    };\n    let importMocks;\n\n    beforeEach(() => {\n        axiosMock.reset();\n        services = createServices(mockConfig({}), actions, axiosInst);\n        store = configureStore(createBrowserHistory(), services, config);\n        localStorage.clear();\n        importMocks = [];\n    });\n\n    afterEach(() => {\n        importMocks.map(m => m.restore());\n    });\n\n    it('opens the run process panel', async () => {\n        const wflist = [{\n            uuid: \"zzzzz-7fd4e-0123456789abcde\",\n            name: \"foo\",\n            description: \"\",\n            definition: \"$graph: []\",\n            kind: ResourceKind.WORKFLOW,\n            ownerUuid: \"\",\n            createdAt: \"\",\n            modifiedByUserUuid: \"\",\n            modifiedAt: \"\",\n            etag: \"\"\n        }];\n        axiosMock\n            .onGet(\"/workflows\")\n            .reply(200, {\n                items: wflist\n            }).onGet(\"/links\")\n            .reply(200, {\n                items: []\n            });\n\n        const dispatchMock = cy.spy().as('dispatchMock');\n        const dispatchWrapper = (action ) => {\n            dispatchMock(action);\n            return store.dispatch(action);\n        };\n\n        const expectedBasicArgs = {\n            type: \"@@redux-form/INITIALIZE\",\n            meta: {\n              form: RUN_PROCESS_BASIC_FORM,\n              keepDirty: undefined,\n            },\n            payload: {\n              name: \"testing\",\n              owner: undefined,\n            }\n          }\n\n        await openRunProcess(\"zzzzz-7fd4e-0123456789abcde\", \"zzzzz-tpzed-0123456789abcde\", \"testing\", { inputparm: \"value\" })(dispatchWrapper, store.getState, services);\n        cy.get('@dispatchMock').then((dispatchMock) => {\n            expect(dispatchMock).to.be.calledWith(runProcessPanelActions.SET_WORKFLOWS(wflist));\n            expect(dispatchMock).to.be.calledWith(runProcessPanelActions.SET_SELECTED_WORKFLOW(wflist[0]));\n            expect(arrayDeeplyIncludesObject(dispatchMock.args, expectedBasicArgs)).to.be.true;\n            expect(dispatchMock).to.be.calledWith(initialize(RUN_PROCESS_INPUTS_FORM, { inputparm: \"value\" }));\n        });\n    });\n});\n\nconst arrayDeeplyIncludesObject = (array, object) => {\n    return array.some((item) => {\n        if (isEqual(item, object)) {\n            return true;\n        }\n        if (typeof item === 'object') {\n            return arrayDeeplyIncludesObject(Object.values(item), object);\n        }\n        return false;\n    });\n};\n"
  },
  {
    "path": "services/workbench2/src/store/workflow-panel/workflow-panel-actions.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { ServiceRepository } from 'services/services';\nimport { bindDataExplorerActions } from 'store/data-explorer/data-explorer-action';\nimport { propertiesActions } from 'store/properties/properties-actions';\nimport { navigateToRunProcess, navigateTo } from 'store/navigation/navigation-action';\nimport {\n    goToStep,\n    runProcessPanelActions,\n    loadPresets,\n    getWorkflowRunnerSettings\n} from 'store/run-process-panel/run-process-panel-actions';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { initialize } from 'redux-form';\nimport { RUN_PROCESS_BASIC_FORM, RUN_PROCESS_INPUTS_FORM, RUN_PROCESS_ADVANCED_FORM } from 'store/run-process-panel/run-process-panel-actions';\nimport { getResource } from 'store/resources/resources';\nimport { ProjectResource } from 'models/project';\nimport { UserResource } from 'models/user';\nimport { getWorkflowInputs, parseWorkflowDefinition } from 'models/workflow';\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { dialogActions } from 'store/dialog/dialog-actions';\nimport { ResourceKind, Resource } from 'models/resource';\nimport { selectedToArray } from \"components/multiselect-toolbar/MultiselectToolbar.utils\";\nimport { CommonResourceServiceError, getCommonResourceServiceError } from \"services/common-service/common-resource-service\";\nimport { projectPanelDataActions } from \"store/project-panel/project-panel-action-bind\";\n\nexport const WORKFLOW_PANEL_ID = \"workflowPanel\";\nconst UUID_PREFIX_PROPERTY_NAME = 'uuidPrefix';\nexport const WORKFLOW_PANEL_DETAILS_UUID = 'workflowPanelDetailsUuid';\nexport const workflowPanelActions = bindDataExplorerActions(WORKFLOW_PANEL_ID);\n\nexport const WORKFLOW_PROCESSES_PANEL_ID = \"workflowProcessesPanel\";\nexport const workflowProcessesPanelActions = bindDataExplorerActions(WORKFLOW_PROCESSES_PANEL_ID);\n\nexport const loadWorkflowPanel = () =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        dispatch(workflowPanelActions.REQUEST_ITEMS());\n        const response = await services.workflowService.list();\n        dispatch(runProcessPanelActions.SET_WORKFLOWS(response.items));\n    };\n\nexport const setUuidPrefix = (uuidPrefix: string) =>\n    propertiesActions.SET_PROPERTY({ key: UUID_PREFIX_PROPERTY_NAME, value: uuidPrefix });\n\nexport const getUuidPrefix = (state: RootState) => {\n    return state.properties.uuidPrefix;\n};\n\nexport const openRunProcess = (workflowUuid: string, ownerUuid?: string, name?: string, inputObj?: { [key: string]: any }) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const response = await services.workflowService.list();\n        dispatch(runProcessPanelActions.SET_WORKFLOWS(response.items));\n\n        const workflows = getState().runProcessPanel.searchWorkflows;\n        const listedWorkflow = workflows.find(workflow => workflow.uuid === workflowUuid);\n        const workflow = listedWorkflow || (await services.workflowService.get(workflowUuid));\n        if (workflow) {\n            dispatch<any>(navigateToRunProcess);\n            dispatch<any>(goToStep(1));\n            dispatch(runProcessPanelActions.SET_STEP_CHANGED(true));\n            dispatch(runProcessPanelActions.SET_SELECTED_WORKFLOW(workflow));\n            dispatch<any>(loadPresets(workflow.uuid));\n\n            dispatch(initialize(RUN_PROCESS_ADVANCED_FORM, getWorkflowRunnerSettings(workflow)));\n            let owner;\n            if (ownerUuid) {\n                // Must be writable.\n                owner = getResource<ProjectResource | UserResource>(ownerUuid)(getState().resources);\n                if (!owner || !owner.canWrite) {\n                    owner = undefined;\n                }\n            }\n            if (owner) {\n                dispatch(runProcessPanelActions.SET_PROCESS_OWNER_UUID(owner.uuid));\n            }\n\n            dispatch(initialize(RUN_PROCESS_BASIC_FORM, { name, owner }));\n\n            const definition = parseWorkflowDefinition(workflow);\n            if (definition) {\n                const inputs = getWorkflowInputs(definition);\n                if (inputs) {\n                    const values = inputs.reduce((values, input) => ({\n                        ...values,\n                        [input.id]: input.default,\n                    }), {});\n                    dispatch(initialize(RUN_PROCESS_INPUTS_FORM, values));\n                }\n            }\n\n            if (inputObj) {\n                dispatch(initialize(RUN_PROCESS_INPUTS_FORM, inputObj));\n            }\n        } else {\n            dispatch<any>(snackbarActions.OPEN_SNACKBAR({ message: `You can't run this process` }));\n        }\n    };\n\nexport const getPublicUserUuid = (state: RootState) => {\n    const prefix = state.auth.localCluster;\n    return `${prefix}-tpzed-anonymouspublic`;\n};\nexport const getPublicGroupUuid = (state: RootState) => {\n    const prefix = state.auth.localCluster;\n    return `${prefix}-j7d0g-anonymouspublic`;\n};\nexport const getAllUsersGroupUuid = (state: RootState) => {\n    const prefix = state.auth.localCluster;\n    return `${prefix}-j7d0g-fffffffffffffff`;\n};\n\nexport const showWorkflowDetails = (uuid: string) =>\n    propertiesActions.SET_PROPERTY({ key: WORKFLOW_PANEL_DETAILS_UUID, value: uuid });\n\nexport const openRemoveWorkflowDialog =\n(resource: ContextMenuResource, numOfWorkflows: Number) => (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    const confirmationText =\n        numOfWorkflows === 1\n            ? \"Are you sure you want to remove this workflow?\"\n            : `Are you sure you want to remove these ${numOfWorkflows} workflows?`;\n    const titleText = numOfWorkflows === 1 ? \"Remove workflow permanently\" : \"Remove workflows permanently\";\n\n    dispatch(\n        dialogActions.OPEN_DIALOG({\n            id: REMOVE_WORKFLOW_DIALOG,\n            data: {\n                title: titleText,\n                text: confirmationText,\n                confirmButtonLabel: \"Remove\",\n                uuid: resource.uuid,\n                resource,\n            },\n        })\n    );\n};\n\nexport const REMOVE_WORKFLOW_DIALOG = \"removeWorkflowDialog\";\n\nexport const removeWorkflowPermanently = (uuid: string, ownerUuid?: string) => async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n    const checkedList = getState().multiselect.checkedList;\n    const uuidsToRemove: string[] = selectedToArray(checkedList);\n\n    //if no items in checkedlist, default to normal context menu behavior\n    if (!uuidsToRemove.length) uuidsToRemove.push(uuid);\n    if(ownerUuid) dispatch<any>(navigateTo(ownerUuid));\n\n    const workflowsToRemove = uuidsToRemove\n        .map(uuid => getResource(uuid)(getState().resources) as Resource)\n        .filter(resource => resource.kind === ResourceKind.WORKFLOW);\n\n    for (const workflow of workflowsToRemove) {\n        try {\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Removing ...\", kind: SnackbarKind.INFO }));\n            await services.workflowService.delete(workflow.uuid);\n            dispatch(projectPanelDataActions.REQUEST_ITEMS());\n            dispatch(snackbarActions.OPEN_SNACKBAR({ message: \"Removed.\", hideDuration: 2000, kind: SnackbarKind.SUCCESS }));\n        } catch (e) {\n            const error = getCommonResourceServiceError(e);\n            if (error === CommonResourceServiceError.PERMISSION_ERROR_FORBIDDEN) {\n                dispatch(snackbarActions.OPEN_SNACKBAR({ message: `Access denied`, hideDuration: 2000, kind: SnackbarKind.ERROR }));\n            } else {\n                dispatch(snackbarActions.OPEN_SNACKBAR({ message: `Deletion failed`, hideDuration: 2000, kind: SnackbarKind.ERROR }));\n            }\n        }\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/validators/is-float.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { isNumber } from 'lodash';\n\nconst ERROR_MESSAGE = 'This field must be a float';\n\nexport const isFloat = (value: any) => {\n    return isNumber(value) ? undefined : ERROR_MESSAGE;\n};\n"
  },
  {
    "path": "services/workbench2/src/validators/is-integer.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { isInteger as isInt } from 'lodash';\n\nconst ERROR_MESSAGE = 'This field can only contain integer values';\n\nexport const isInteger = (value: any) => {\n    return isInt(value) ? undefined : ERROR_MESSAGE;\n};\n"
  },
  {
    "path": "services/workbench2/src/validators/is-number.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { isNumber as isNum } from 'lodash';\nconst ERROR_MESSAGE = 'This field can only contain numeric values';\n\nexport const isNumber = (value: any) => {\n    return !isNaN(value) && isNum(value) ? undefined : ERROR_MESSAGE;\n};\n"
  },
  {
    "path": "services/workbench2/src/validators/is-remote-host.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n\nconst ERROR_MESSAGE = 'Remote host is invalid';\n\nexport const isRemoteHost = (value: string) => {\n    return value.match(/\\w+\\.\\w+\\.\\w+/i) ? undefined : ERROR_MESSAGE;\n};\n"
  },
  {
    "path": "services/workbench2/src/validators/is-rsa-key.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { isRsaKey } from './is-rsa-key';\n\ndescribe('rsa-key-validator', () => {\n    const rsaKey = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDPpavAS1wUq2+j7PgwkDS+9lm43AkdGxZo+T8qm6ZcB009EUEXya3lQolA52gg/i5aGZg4LT3t1OKxbsaClMd7sNZXYrMW9vd/utvGgAlNEbE/yXsEl2kpxt8lz7RI1XLnoWcV+aKyrsiKdrMKnZyG8CBxKdtzxHzWRl4N1BGrFJf/RnUWJv2VvM/h4/O+KXIjFokPkJ1F8yQChp5OKGkBKGXQ1vV4LjXqEXGVlgiQFM4U2NvCA8hXQR8mYm1vOsTYJzoSsnb+ewbXlVH5d7XsR5S2ULOr88vuYN/P4DF/Q3pEBi7BOyee61P3eHvhCNtb+jQMt59Vj/96y5C/reTMRo2R3B4bmX+Zxr3+DCC5tO1y+U5V39fu7cweimKXc78QDGGAVN0kz4P6P137b5WkCYIozeiBvWRsbGIlHjlGu9+0WuotdluD+OrTguuZ2zr8f32ijddO6y0J+aIdmTxQPxtmcQuRtpRfquoJGLhWAJH6mNZKbWkqqVfd5BA0TYs=';\n    const badKey = 'ssh-rsa bad'\n\n    const ERROR_MESSAGE = 'Public key is invalid';\n\n    describe('rsaKeyValidation', () => {\n        it('should accept keys with comment', () => {\n            // then\n            expect(isRsaKey(rsaKey + \" firstlast@example.com\")).to.be.undefined;\n        });\n\n        it('should accept keys without comment', () => {\n            // then\n            expect(isRsaKey(rsaKey)).to.be.undefined;\n        });\n\n        it('should reject keys with trailing whitespace', () => {\n            // then\n            expect(isRsaKey(rsaKey + \" \")).to.equal(ERROR_MESSAGE);\n            expect(isRsaKey(rsaKey + \"\\n\")).to.equal(ERROR_MESSAGE);\n            expect(isRsaKey(rsaKey + \"\\r\\n\")).to.equal(ERROR_MESSAGE);\n            expect(isRsaKey(rsaKey + \"\\t\")).to.equal(ERROR_MESSAGE);\n        });\n\n        it('should reject invalid keys', () => {\n            // then\n            expect(isRsaKey(badKey)).to.equal(ERROR_MESSAGE);\n        });\n\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/src/validators/is-rsa-key.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n\nconst ERROR_MESSAGE = 'Public key is invalid';\n\nexport const isRsaKey = (value: any) => {\n    return value.match(/ssh-rsa AAAA[0-9A-Za-z+/]+[=]{0,3}(( [^@]+@[^@]+)|$)/i) ? undefined : ERROR_MESSAGE;\n};\n"
  },
  {
    "path": "services/workbench2/src/validators/is-valid-file-ops-location.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { isFileOperationLocation } from \"store/tree-picker/tree-picker-actions\";\n\nexport const isValidFileOpsLocation = (value: any) => {\n    if (isFileOperationLocation(value)) {\n        return undefined;\n    }\n    return 'Invalid file operation location.';\n}"
  },
  {
    "path": "services/workbench2/src/validators/is-valid-future-date.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport moment from \"moment\";\nimport { Validator } from \"redux-form\"; // optional type hint\n\nexport const isValidFutureDate: Validator = (value) => {\n  const momentDate = moment(value);\n  return momentDate.isValid() && momentDate.isAfter(moment()) ? undefined : \"Invalid date\";\n};\n"
  },
  {
    "path": "services/workbench2/src/validators/is-zip-filename.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n\nconst ERROR_MESSAGE = 'Filename must end in .zip';\n\nexport const isZipFilename = (value: any) => {\n    return value.match(/\\.zip$/i) ? undefined : ERROR_MESSAGE;\n};\n"
  },
  {
    "path": "services/workbench2/src/validators/max-length.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const ERROR_MESSAGE = 'Maximum string length of this field is: ';\nexport const DEFAULT_MAX_VALUE = 60;\n\nexport const maxLength: any = (maxLengthValue = DEFAULT_MAX_VALUE, errorMessage = ERROR_MESSAGE) => {\n    return (value: string) => {\n        if (value) {\n            return  value && value.length <= maxLengthValue ? undefined : `${errorMessage || ERROR_MESSAGE} ${maxLengthValue}`;\n        }\n\n        return undefined;\n    };\n};\n"
  },
  {
    "path": "services/workbench2/src/validators/min-length.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const ERROR_MESSAGE = (minLength: number) => `Min length is ${minLength}`;\n\nexport const minLength =\n    (minLength: number, errorMessage = ERROR_MESSAGE) =>\n        (value: { length: number }) =>\n            value && value.length >= minLength ? undefined : errorMessage(minLength);\n"
  },
  {
    "path": "services/workbench2/src/validators/min.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { isNumber } from 'lodash';\n\nexport const ERROR_MESSAGE = (minValue: number) => `Minimum value is ${minValue}`;\n\nexport const min =\n    (minValue: number, errorMessage = ERROR_MESSAGE) =>\n        (value: any) =>\n            isNumber(value) && value >= minValue ? undefined : errorMessage(minValue);\n"
  },
  {
    "path": "services/workbench2/src/validators/optional.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const optional = (validator: (value: any) => string | undefined) =>\n    (value: any) =>\n        value === undefined || value === null || value === ''  ? undefined : validator(value);"
  },
  {
    "path": "services/workbench2/src/validators/require.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const ERROR_MESSAGE = 'This field is required.';\n\nexport const fieldRequire: any = (value: string) => {\n    return value && value.length > 0 ? undefined : ERROR_MESSAGE;\n};\n"
  },
  {
    "path": "services/workbench2/src/validators/valid-name.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport const disallowDotName = /^\\.{1,2}$/;\nexport const disallowSlash = /\\//;\nexport const disallowLeadingWhitespaces = /^\\s+/;\nexport const disallowTrailingWhitespaces = /\\s+$/;\n\nexport const validName = (value: string) => {\n    return [disallowDotName, disallowSlash].find(aRule => value.match(aRule) !== null)\n        ? \"Name cannot be '.' or '..' or contain '/' characters\"\n        : undefined;\n};\n\nexport const validNameAllowSlash = (value: string) => {\n    return [disallowDotName].find(aRule => value.match(aRule) !== null)\n        ? \"Name cannot be '.' or '..'\"\n        : undefined;\n};\n\nexport const validFileName = (value: string) => {\n    return [\n        disallowLeadingWhitespaces,\n        disallowTrailingWhitespaces\n    ].find(aRule => value.match(aRule) !== null)\n        ? `Leading/trailing whitespaces not allowed on '${value}'`\n        : undefined;\n};\n\nexport const validFilePath = (filePath: string) => {\n    const errors = filePath.split('/').map(pathPart => {\n        if (pathPart === \"\") { return \"Empty dir name not allowed\"; }\n        return validNameAllowSlash(pathPart) || validFileName(pathPart);\n    });\n    return errors.filter(e => e !== undefined)[0];\n};"
  },
  {
    "path": "services/workbench2/src/validators/validators.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { fieldRequire } from './require';\nimport { maxLength } from './max-length';\nimport { isRsaKey } from './is-rsa-key';\nimport { isRemoteHost } from \"./is-remote-host\";\nimport { validFileName, validFilePath, validName, validNameAllowSlash } from \"./valid-name\";\nimport { isZipFilename } from './is-zip-filename';\nimport { isValidFutureDate } from './is-valid-future-date';\nimport { isValidFileOpsLocation } from './is-valid-file-ops-location';\n\nexport type Validator = <T>(value: T) => string | undefined;\n\n/**\n * Validates a field value against an array of validation functions and returns error messages.\n *\n * @param value - The value to validate\n * @param validationArray - Array of validator functions to apply to the value\n * @param fieldName - Optional field name to prepend to error messages for better debugging\n * @returns Array of validation error messages. Empty array if no errors.\n *\n * @example\n * ```tsx\n * const errors = getFieldErrors('', [fieldRequire, maxLength(255)], 'Name');\n * // Returns: ['Name: This field is required'] if value is empty\n * ```\n */\nexport const getFieldErrors = (value: unknown, validationArray: Validator[], fieldName?: string): string[] => {\n  const errMessages: string[] = [];\n  for (const validation of validationArray) {\n    const result = validation(value);\n    const errorMsg = result ? (fieldName ? `${fieldName}: ${result}` : result) : null;\n    if (errorMsg) {\n      errMessages.push(errorMsg);\n    }\n  }\n  return errMessages;\n}\n\nexport const TAG_KEY_VALIDATION: Validator[] = [maxLength(255)];\nexport const TAG_VALUE_VALIDATION: Validator[] = [maxLength(255)];\n\nexport const PROJECT_NAME_VALIDATION: Validator[] = [fieldRequire, validName, maxLength(255)];\nexport const PROJECT_NAME_VALIDATION_ALLOW_SLASH: Validator[] = [fieldRequire, validNameAllowSlash, maxLength(255)];\nexport const PROJECT_DESCRIPTION_VALIDATION: Validator[] = [maxLength(524_288)];\n\nexport const COLLECTION_NAME_VALIDATION: Validator[] = [fieldRequire, validName, maxLength(255)];\nexport const COLLECTION_NAME_VALIDATION_ALLOW_SLASH: Validator[] = [fieldRequire, validNameAllowSlash, maxLength(255)];\nexport const COLLECTION_DESCRIPTION_VALIDATION: Validator[] = [maxLength(524_288)];\nexport const COLLECTION_PROJECT_VALIDATION: Validator[] = [fieldRequire];\n\nexport const COPY_NAME_VALIDATION: Validator[] = [fieldRequire, maxLength(255)];\nexport const COPY_FILE_VALIDATION: Validator[] = [fieldRequire];\nexport const RENAME_FILE_VALIDATION: Validator[] = [fieldRequire, validFilePath];\nexport const DOWNLOAD_ZIP_VALIDATION: Validator[] = [fieldRequire, isZipFilename, validFileName];\n\nexport const MOVE_TO_VALIDATION: Validator[] = [fieldRequire];\n\nexport const PROCESS_NAME_VALIDATION: Validator[] = [fieldRequire, maxLength(255)];\nexport const PROCESS_DESCRIPTION_VALIDATION: Validator[] = [maxLength(255)];\n\nexport const REPOSITORY_NAME_VALIDATION: Validator[] = [fieldRequire, maxLength(255)];\n\nexport const USER_EMAIL_VALIDATION: Validator[] = [fieldRequire, maxLength(255)];\nexport const PROFILE_EMAIL_VALIDATION: Validator[] = [maxLength(255)];\nexport const PROFILE_URL_VALIDATION: Validator[] = [maxLength(255)];\nexport const USER_LENGTH_VALIDATION: Validator[] = [maxLength(255)];\n\nexport const SSH_KEY_PUBLIC_VALIDATION: Validator[] = [fieldRequire, isRsaKey, maxLength(1024)];\nexport const SSH_KEY_NAME_VALIDATION: Validator[] = [fieldRequire, maxLength(255)];\n\nexport const SITE_MANAGER_REMOTE_HOST_VALIDATION: Validator[] = [fieldRequire, isRemoteHost, maxLength(255)];\n\nexport const MY_ACCOUNT_VALIDATION: Validator[] = [fieldRequire];\nexport const CHOOSE_VM_VALIDATION: Validator[] = [fieldRequire];\n\nexport const REQUIRED_VALIDATION: Validator[] = [fieldRequire];\n\nexport const LENGTH255_VALIDATION: Validator[] = [maxLength(255)];\nexport const REQUIRED_LENGTH255_VALIDATION: Validator[] = [fieldRequire, maxLength(255)];\nexport const REQUIRED_VALIDNAME_LENGTH255_VALIDATION: Validator[] = [fieldRequire, validName, maxLength(255)];\nexport const MAXLENGTH_524288_VALIDATION: Validator[] = [maxLength(524_288)];\n\nexport const DATE_VALIDATION: Validator[] = [isValidFutureDate];\n\nexport const FILE_OPS_LOCATION_VALIDATION: Validator[] = [isValidFileOpsLocation];"
  },
  {
    "path": "services/workbench2/src/views/all-processes-panel/all-processes-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { DataColumns, SortDirection } from \"components/data-table/data-column\";\nimport {\n    ProcessStatus,\n    ResourceName,\n    ResourceOwnerWithName,\n    ResourceType,\n    ContainerRunTime,\n    ResourceCreatedAtDate\n} from \"views-components/data-explorer/renderers\";\nimport { ContainerRequestResource } from \"models/container-request\";\nimport { createTree } from \"models/tree\";\nimport { getInitialProcessStatusFilters, getInitialProcessTypeFilters } from \"store/resource-type-filters/resource-type-filters\";\n\n\nexport enum AllProcessesPanelColumnNames {\n    NAME = \"Name\",\n    STATUS = \"Status\",\n    TYPE = \"Type\",\n    OWNER = \"Owner\",\n    CREATED_AT = \"Created at\",\n    RUNTIME = \"Run Time\"\n}\n\nexport const allProcessesPanelColumns: DataColumns<string, ContainerRequestResource> = [\n    {\n        name: AllProcessesPanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"name\" },\n        filters: createTree(),\n        render: uuid => <ResourceName uuid={uuid} />,\n    },\n    {\n        name: AllProcessesPanelColumnNames.STATUS,\n        selected: true,\n        configurable: true,\n        mutuallyExclusiveFilters: true,\n        filters: getInitialProcessStatusFilters(),\n        render: uuid => <ProcessStatus uuid={uuid} />,\n    },\n    {\n        name: AllProcessesPanelColumnNames.TYPE,\n        selected: true,\n        configurable: true,\n        filters: getInitialProcessTypeFilters(),\n        render: uuid => <ResourceType uuid={uuid} />,\n    },\n    {\n        name: AllProcessesPanelColumnNames.OWNER,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceOwnerWithName uuid={uuid} />,\n    },\n    {\n        name: AllProcessesPanelColumnNames.CREATED_AT,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.DESC, field: \"createdAt\" },\n        filters: createTree(),\n        render: uuid => <ResourceCreatedAtDate uuid={uuid} />,\n    },\n    {\n        name: AllProcessesPanelColumnNames.RUNTIME,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ContainerRunTime uuid={uuid} />,\n    },\n];\n"
  },
  {
    "path": "services/workbench2/src/views/all-processes-panel/all-processes-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { connect, DispatchProp } from \"react-redux\";\nimport { RouteComponentProps } from \"react-router\";\nimport { DataTableFilterItem } from \"components/data-table-filters/data-table-filters\";\nimport { ResourceKind } from \"models/resource\";\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { ALL_PROCESSES_PANEL_ID } from \"store/all-processes-panel/all-processes-panel-action\";\nimport { ProcessIcon } from \"components/icon/icon\";\nimport { openProcessContextMenu } from \"store/context-menu/context-menu-actions\";\nimport { loadDetailsPanel } from \"store/details-panel/details-panel-action\";\nimport { navigateTo } from \"store/navigation/navigation-action\";\nimport { ContainerRequestState } from \"models/container-request\";\nimport { RootState } from \"store/store\";\nimport { getProcess } from \"store/processes/process\";\nimport { ResourcesState } from \"store/resources/resources\";\nimport { toggleOne } from \"store/multiselect/multiselect-actions\";\n\ntype CssRules = \"toolbar\" | \"button\" | \"root\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    toolbar: {\n        paddingBottom: theme.spacing(3),\n        textAlign: \"right\",\n    },\n    button: {\n        marginLeft: theme.spacing(1),\n    },\n    root: {\n        width: \"100%\",\n        boxShadow: \"0px 1px 3px 0px rgb(0 0 0 / 20%), 0px 1px 1px 0px rgb(0 0 0 / 14%), 0px 2px 1px -1px rgb(0 0 0 / 12%)\",\n    },\n});\n\nexport interface AllProcessesPanelFilter extends DataTableFilterItem {\n    type: ResourceKind | ContainerRequestState;\n}\n\ninterface AllProcessesPanelDataProps {\n    resources: ResourcesState;\n}\n\ninterface AllProcessesPanelActionProps {\n    onItemClick: (item: string) => void;\n    onDialogOpen: (ownerUuid: string) => void;\n    onItemDoubleClick: (item: string) => void;\n}\nconst mapStateToProps = (state: RootState): AllProcessesPanelDataProps => ({\n    resources: state.resources,\n});\n\ntype AllProcessesPanelProps = AllProcessesPanelDataProps &\n    AllProcessesPanelActionProps &\n    DispatchProp &\n    WithStyles<CssRules> &\n    RouteComponentProps<{ id: string }>;\n\nexport const AllProcessesPanel = withStyles(styles)(\n    connect(mapStateToProps)(\n        class extends React.Component<AllProcessesPanelProps> {\n            handleContextMenu = (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => {\n                const process = getProcess(resourceUuid)(this.props.resources);\n                if (process) {\n                    this.props.dispatch<any>(openProcessContextMenu(event, process));\n                }\n                this.props.dispatch<any>(loadDetailsPanel(resourceUuid));\n            };\n\n            handleRowDoubleClick = (uuid: string) => {\n                this.props.dispatch<any>(navigateTo(uuid));\n            };\n\n            handleRowClick = (uuid: string) => {\n                this.props.dispatch<any>(toggleOne(uuid))\n            };\n\n            render() {\n                return (\n                    <div className={this.props.classes.root}>\n                        <DataExplorer\n                            id={ALL_PROCESSES_PANEL_ID}\n                            onRowClick={this.handleRowClick}\n                            onRowDoubleClick={this.handleRowDoubleClick}\n                            onContextMenu={this.handleContextMenu}\n                            contextMenuColumn={false}\n                            defaultViewIcon={ProcessIcon}\n                            defaultViewMessages={[\"Processes list empty.\"]}\n                        />\n                    </div>\n                );\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views/api-client-authorization-panel/api-client-authorization-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { createTree } from 'models/tree';\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport {\n    CommonUuid, TokenApiToken, TokenCreatedByIpAddress, TokenExpiresAt,\n    TokenLastUsedAt, TokenLastUsedByIpAddress, TokenScopes, TokenUserId\n} from 'views-components/data-explorer/renderers';\nimport { ApiClientAuthorization } from 'models/api-client-authorization';\n\nexport enum ApiClientAuthorizationPanelColumnNames {\n    UUID = 'UUID',\n    API_TOKEN = 'API Token',\n    CREATED_BY_IP_ADDRESS = 'Created by IP address',\n    EXPIRES_AT = 'Expires at',\n    LAST_USED_AT = 'Last used at',\n    LAST_USED_BY_IP_ADDRESS = 'Last used by IP address',\n    SCOPES = 'Scopes',\n    USER_ID = 'User ID'\n}\n\nexport const apiClientAuthorizationPanelColumns: DataColumns<string, ApiClientAuthorization> = [\n    {\n        name: ApiClientAuthorizationPanelColumnNames.UUID,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"uuid\" },\n        filters: createTree(),\n        render: uuid => <CommonUuid uuid={uuid} />\n    },\n    {\n        name: ApiClientAuthorizationPanelColumnNames.API_TOKEN,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <TokenApiToken uuid={uuid} />\n    },\n    {\n        name: ApiClientAuthorizationPanelColumnNames.CREATED_BY_IP_ADDRESS,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <TokenCreatedByIpAddress uuid={uuid} />\n    },\n    {\n        name: ApiClientAuthorizationPanelColumnNames.EXPIRES_AT,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <TokenExpiresAt uuid={uuid} />\n    },\n    {\n        name: ApiClientAuthorizationPanelColumnNames.LAST_USED_AT,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <TokenLastUsedAt uuid={uuid} />\n    },\n    {\n        name: ApiClientAuthorizationPanelColumnNames.LAST_USED_BY_IP_ADDRESS,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <TokenLastUsedByIpAddress uuid={uuid} />\n    },\n    {\n        name: ApiClientAuthorizationPanelColumnNames.SCOPES,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <TokenScopes uuid={uuid} />\n    },\n    {\n        name: ApiClientAuthorizationPanelColumnNames.USER_ID,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <TokenUserId uuid={uuid} />\n    }\n];\n"
  },
  {
    "path": "services/workbench2/src/views/api-client-authorization-panel/api-client-authorization-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { ShareMeIcon } from 'components/icon/icon';\nimport { API_CLIENT_AUTHORIZATION_PANEL_ID } from '../../store/api-client-authorizations/api-client-authorizations-actions';\nimport { DataExplorer } from 'views-components/data-explorer/data-explorer';\nimport { ResourcesState } from 'store/resources/resources';\n\ntype CssRules = 'root';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n    }\n});\n\nconst DEFAULT_MESSAGE = 'Your api client authorization list is empty.';\n\nexport interface ApiClientAuthorizationPanelRootActionProps {\n    onItemClick: (item: string) => void;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, item: string) => void;\n    onItemDoubleClick: (item: string) => void;\n}\n\nexport interface ApiClientAuthorizationPanelRootDataProps {\n    resources: ResourcesState;\n}\n\ntype ApiClientAuthorizationPanelRootProps = ApiClientAuthorizationPanelRootActionProps\n    & ApiClientAuthorizationPanelRootDataProps & WithStyles<CssRules>;\n\nexport const ApiClientAuthorizationPanelRoot = withStyles(styles)(\n    ({ classes, onItemDoubleClick, onItemClick, onContextMenu }: ApiClientAuthorizationPanelRootProps) =>\n        <div className={classes.root}><DataExplorer\n            id={API_CLIENT_AUTHORIZATION_PANEL_ID}\n            onRowClick={onItemClick}\n            onRowDoubleClick={onItemDoubleClick}\n            onContextMenu={onContextMenu}\n            contextMenuColumn={true}\n            hideColumnSelector\n            hideSearchInput\n            defaultViewIcon={ShareMeIcon}\n            defaultViewMessages={[DEFAULT_MESSAGE]} />\n        </div>\n);\n"
  },
  {
    "path": "services/workbench2/src/views/api-client-authorization-panel/api-client-authorization-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport {\n    ApiClientAuthorizationPanelRoot,\n    ApiClientAuthorizationPanelRootDataProps,\n    ApiClientAuthorizationPanelRootActionProps\n} from 'views/api-client-authorization-panel/api-client-authorization-panel-root';\nimport { openApiClientAuthorizationContextMenu } from 'store/context-menu/context-menu-actions';\n\nconst mapStateToProps = (state: RootState): ApiClientAuthorizationPanelRootDataProps => {\n    return {\n        resources: state.resources\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): ApiClientAuthorizationPanelRootActionProps => ({\n    onContextMenu: (event, apiClientAuthorization) => {\n        dispatch<any>(openApiClientAuthorizationContextMenu(event, apiClientAuthorization));\n    },\n    onItemClick: (resourceUuid: string) => { return; },\n    onItemDoubleClick: uuid => { return; },\n});\n\nexport const ApiClientAuthorizationPanel = connect(mapStateToProps, mapDispatchToProps)(ApiClientAuthorizationPanelRoot);"
  },
  {
    "path": "services/workbench2/src/views/collection-content-address-panel/collection-content-address-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport { createTree } from 'models/tree';\nimport {\n    ResourceName,\n    ResourceOwnerName,\n    ResourceLastModifiedDate,\n    ResourceStatus\n} from 'views-components/data-explorer/renderers';\nimport { CollectionResource } from 'models/collection';\n\nenum CollectionContentAddressPanelColumnNames {\n    COLLECTION_WITH_THIS_ADDRESS = \"Collection with this address\",\n    STATUS = \"Status\",\n    LOCATION = \"Location\",\n    LAST_MODIFIED = \"Last modified\"\n}\n\nexport const collectionContentAddressPanelColumns: DataColumns<string, CollectionResource> = [\n    {\n        name: CollectionContentAddressPanelColumnNames.COLLECTION_WITH_THIS_ADDRESS,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"uuid\" },\n        filters: createTree(),\n        render: uuid => <ResourceName uuid={uuid} />\n    },\n    {\n        name: CollectionContentAddressPanelColumnNames.STATUS,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceStatus uuid={uuid} />\n    },\n    {\n        name: CollectionContentAddressPanelColumnNames.LOCATION,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceOwnerName uuid={uuid} />\n    },\n    {\n        name: CollectionContentAddressPanelColumnNames.LAST_MODIFIED,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.DESC, field: \"modifiedAt\" },\n        filters: createTree(),\n        render: uuid => <ResourceLastModifiedDate uuid={uuid} />\n    }\n];\n"
  },
  {
    "path": "services/workbench2/src/views/collection-content-address-panel/collection-content-address-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Button } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { CollectionIcon } from 'components/icon/icon';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { toggleOne } from 'store/multiselect/multiselect-actions';\nimport { BackIcon } from 'components/icon/icon';\nimport { COLLECTIONS_CONTENT_ADDRESS_PANEL_ID } from 'store/collections-content-address-panel/collections-content-address-panel-actions';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { Dispatch } from 'redux';\nimport {\n    openContextMenuAndSelect\n} from 'store/context-menu/context-menu-actions';\nimport { ResourceKind } from 'models/resource';\nimport { loadDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { connect } from 'react-redux';\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport { getResource, ResourcesState } from 'store/resources/resources';\nimport { RootState } from 'store/store';\nimport { CollectionResource } from 'models/collection';\nimport { resourceToMenuKind } from 'common/resource-to-menu-kind';\n\ntype CssRules = 'backLink' | 'backIcon' | 'root' | 'content';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    backLink: {\n        fontSize: '12px',\n        fontWeight: 600,\n        display: 'flex',\n        alignItems: 'center',\n        padding: theme.spacing(1),\n        marginBottom: theme.spacing(1),\n        color: theme.palette.grey[\"500\"],\n    },\n    backIcon: {\n        marginRight: theme.spacing(1),\n    },\n    root: {\n        width: '100%',\n    },\n    content: {\n        // reserve space for the content address bar\n        height: `calc(100% - ${theme.spacing(7)})`,\n    },\n});\n\ninterface CollectionContentAddressPanelActionProps {\n    onContextMenu: (resources: ResourcesState) => (event: React.MouseEvent<any>, uuid: string) => void;\n    onItemClick: (item: string) => void;\n    onItemDoubleClick: (item: string) => void;\n}\n\ninterface CollectionContentAddressPanelDataProps {\n    resources: ResourcesState;\n}\n\nconst mapStateToProps = ({ resources }: RootState): CollectionContentAddressPanelDataProps => ({\n    resources\n})\n\nconst mapDispatchToProps = (dispatch: Dispatch): CollectionContentAddressPanelActionProps => ({\n    onContextMenu: (resources: ResourcesState) => (event, resourceUuid) => {\n        const resource = getResource<CollectionResource>(resourceUuid)(resources);\n        const menuKind = dispatch<any>(resourceToMenuKind(resourceUuid));\n        if (menuKind) {\n            dispatch<any>(openContextMenuAndSelect(event, {\n                name: resource ? resource.name : '',\n                description: resource ? resource.description : '',\n                storageClassesDesired: resource ? resource.storageClassesDesired : [],\n                uuid: resourceUuid,\n                ownerUuid: '',\n                kind: ResourceKind.NONE,\n                menuKind\n            }));\n        }\n        dispatch<any>(loadDetailsPanel(resourceUuid));\n    },\n    onItemClick: (uuid: string) => {\n        dispatch<any>(toggleOne(uuid))\n    },\n    onItemDoubleClick: uuid => {\n        dispatch<any>(navigateTo(uuid));\n    }\n});\n\ninterface CollectionContentAddressDataProps {\n    match: {\n        params: { id: string }\n    };\n}\n\nexport const CollectionsContentAddressPanel = withStyles(styles)(\n    connect(mapStateToProps, mapDispatchToProps)(\n        class extends React.Component<CollectionContentAddressPanelActionProps & CollectionContentAddressPanelDataProps & CollectionContentAddressDataProps & WithStyles<CssRules>> {\n            render() {\n                return <div className={this.props.classes.root}>\n                    <Button\n                        onClick={() => window.history.back()}\n                        className={this.props.classes.backLink}>\n                        <BackIcon className={this.props.classes.backIcon} />\n                        Back\n                    </Button>\n                    <div className={this.props.classes.content}>\n                        <DataExplorer\n                            id={COLLECTIONS_CONTENT_ADDRESS_PANEL_ID}\n                            hideSearchInput\n                            onRowClick={this.props.onItemClick}\n                            onRowDoubleClick={this.props.onItemDoubleClick}\n                            onContextMenu={this.props.onContextMenu(this.props.resources)}\n                            contextMenuColumn={false}\n                            title={`Content address: ${this.props.match.params.id}`}\n                            defaultViewIcon={CollectionIcon}\n                            defaultViewMessages={['Collections with this content address not found.']}\n                        />\n                    </div>\n                </div>;\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views/collection-panel/collection-attributes.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Dispatch } from 'redux';\nimport { Link } from 'react-router-dom';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, Typography, Tooltip, Link as ButtonLink } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { connect } from \"react-redux\";\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { RootState } from 'store/store';\nimport { DetailsAttribute } from 'components/details-attribute/details-attribute';\nimport { getResource } from 'store/resources/resources';\nimport { formatDateTime, formatFileSize } from \"common/formatters\";\nimport { ResourceWithName, RenderResponsiblePerson } from 'views-components/data-explorer/renderers';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { getUserFullname, UserResource } from 'models/user';\nimport { Resource, ResourceKind } from 'models/resource';\nimport { navigateToProcess } from 'store/collection-panel/collection-panel-action';\nimport { CollectionResource, getCollectionUrl } from 'models/collection';\nimport { openDetailsPanel } from 'store/details-panel/details-panel-action';\n\ntype CssRules = 'label' | 'value' | 'link' | 'button' | 'warningLabel'\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    label: {\n        fontSize: '0.875rem',\n    },\n    value: {\n        textTransform: 'none',\n        fontSize: '0.875rem'\n    },\n    link: {\n        fontSize: '0.875rem',\n        color: theme.palette.primary.main,\n        '&:hover': {\n            cursor: 'pointer'\n        }\n    },\n    button: {\n        cursor: 'pointer'\n    },\n    warningLabel: {\n        fontStyle: 'italic'\n    },\n});\n\nconst mapStateToProps = (state: RootState): Omit<CollectionAttributesProps, 'navigateToProcess' | 'showVersionBrowser'> => {\n    const item = getResource<CollectionResource>(state.properties.currentRouteUuid)(state.resources);\n    const { responsiblePersonUUID, responsiblePersonName } = getResponsibleData(state, item?.uuid);\n    return {\n        item, responsiblePersonUUID, responsiblePersonName\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): Pick<CollectionAttributesProps, 'navigateToProcess' | 'showVersionBrowser'> => ({\n    navigateToProcess: (uuid: string) => dispatch<any>(navigateToProcess(uuid)),\n    showVersionBrowser: (item: CollectionResource) => dispatch<any>(openDetailsPanel(item.uuid, 1))\n});\n\n\ninterface CollectionAttributesProps {\n    item?: CollectionResource;\n    responsiblePersonUUID: string;\n    responsiblePersonName: string;\n    navigateToProcess: (uuid: string) => void;\n    showVersionBrowser: (item: CollectionResource) => void;\n}\n\nexport const CollectionAttributes = connect(mapStateToProps, mapDispatchToProps)(withStyles(styles)((props: CollectionAttributesProps & WithStyles<CssRules>) => {\n    if (!props.item) {\n        return null;\n    }\n    const { item, classes, responsiblePersonUUID, responsiblePersonName } = props;\n    const isOldVersion = item && item.currentVersionUuid !== item.uuid;\n    const mdSize = 4;\n    return <Grid container>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label={isOldVersion ? \"This version's UUID\" : \"Collection UUID\"}\n                linkToUuid={item.uuid} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label={isOldVersion ? \"This version's PDH\" : \"Portable data hash\"}\n                linkToUuid={item.portableDataHash} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Owner' linkToUuid={item.ownerUuid}\n                uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n        </Grid>\n        {responsiblePersonUUID && <Grid item xs={12} md={mdSize} data-cy=\"responsible-person-wrapper\">\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Responsible person' linkToUuid={item.ownerUuid}\n                uuidEnhancer={(uuid: string) => <RenderResponsiblePerson responsiblePersonUUID={responsiblePersonUUID} responsiblePersonName={responsiblePersonName} />} />\n        </Grid>}\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Head version'\n                value={isOldVersion ? undefined : 'this one'}\n                linkToUuid={isOldVersion ? item.currentVersionUuid : undefined} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute\n                classLabel={classes.label} classValue={classes.value}\n                label='Version number'\n                value={\n                <Tooltip title=\"Open version browser\">\n                    <ButtonLink underline='none' className={classes.button} onClick={() => props.showVersionBrowser(item)}>\n                        <span data-cy='collection-version-number'>{item.version}</span>\n                    </ButtonLink>\n                </Tooltip>\n                }\n            />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute label='Created at' value={formatDateTime(item.createdAt)} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute label='Last modified' value={formatDateTime(item.modifiedAt)} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Number of files' value={<span data-cy='collection-file-count'>{item.fileCount}</span>} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Content size' value={formatFileSize(item.fileSizeTotal)} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Storage classes' value={item.storageClassesDesired ? item.storageClassesDesired.join(', ') : [\"default\"]} />\n        </Grid>\n        {(item.properties.container_request || item.properties.containerRequest) &&\n            <Grid item xs={12} md={mdSize}\n                onClick={() => props.navigateToProcess(item.properties.container_request || item.properties.containerRequest)}>\n                <DetailsAttribute classLabel={classes.link} label='Link to process' />\n            </Grid>\n        }\n        {isOldVersion &&\n            <Grid item xs={12} md={12}>\n                <Typography className={classes.warningLabel} variant=\"caption\">\n                    This is an old version. Make a copy to make changes. Go to the <Link to={getCollectionUrl(item.currentVersionUuid)}>head version</Link> for sharing options.\n                </Typography>\n            </Grid>\n        }\n    </Grid>;\n}));\n\nconst getResponsibleData = (state: RootState, uuid: string | undefined) => {\n        let responsiblePersonName: string = \"\";\n        let responsiblePersonUUID: string = \"\";\n        let responsiblePersonProperty: string = \"\";\n\n        if (state.auth.config.clusterConfig.Collections.ManagedProperties) {\n            let index = 0;\n            const keys = Object.keys(state.auth.config.clusterConfig.Collections.ManagedProperties);\n\n            while (!responsiblePersonProperty && keys[index]) {\n                const key = keys[index];\n                if (state.auth.config.clusterConfig.Collections.ManagedProperties[key].Function === \"original_owner\") {\n                    responsiblePersonProperty = key;\n                }\n                index++;\n            }\n        }\n\n        let resource: Resource | undefined = getResource<GroupContentsResource & UserResource>(uuid)(state.resources);\n\n        while (resource && resource.kind !== ResourceKind.USER && responsiblePersonProperty) {\n            responsiblePersonUUID = (resource as CollectionResource).properties[responsiblePersonProperty];\n            resource = getResource<GroupContentsResource & UserResource>(responsiblePersonUUID)(state.resources);\n        }\n\n        if (resource && resource.kind === ResourceKind.USER) {\n            responsiblePersonName = getUserFullname(resource as UserResource) || (resource as GroupContentsResource).name;\n        }\n\n        return { responsiblePersonUUID, responsiblePersonName, };\n    }\n"
  },
  {
    "path": "services/workbench2/src/views/collection-panel/collection-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { connect, DispatchProp } from \"react-redux\";\nimport { RouteComponentProps } from 'react-router';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { RootState } from 'store/store';\nimport { CollectionIcon } from 'components/icon/icon';\nimport { CollectionResource } from 'models/collection';\nimport { CollectionPanelFiles } from 'views-components/collection-panel-files/collection-panel-files';\nimport { ResourcesState, getResource } from 'store/resources/resources';\nimport { GroupResource } from 'models/group';\nimport { UserResource } from 'models/user';\nimport { MPVContainer, MPVPanelContent, MPVPanelState } from 'components/multi-panel-view/multi-panel-view';\nimport { resourceIsFrozen } from 'common/frozen-resources';\nimport { NotFoundView } from 'views/not-found-panel/not-found-panel';\nimport { setSelectedResourceUuid } from 'store/selected-resource/selected-resource-actions';\nimport { collectionPanelActions } from 'store/collection-panel/collection-panel-action';\nimport { DetailsCardRoot } from 'views-components/details-card/details-card-root';\nimport { OverviewPanel } from 'components/overview-panel/overview-panel';\nimport { CollectionAttributes } from './collection-attributes';\n\ntype CssRules =\n    'root'\n    | 'mpvRoot'\n    | 'filesCard'\n    | 'value'\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        display: 'flex',\n        flexDirection: 'column',\n    },\n    mpvRoot: {\n        width: '100%',\n        height: '100%',\n    },\n    filesCard: {\n        padding: 0,\n        height: '100%',\n        display: 'flex',\n        flexDirection: 'column',\n    },\n    value: {\n        textTransform: 'none',\n        fontSize: '0.875rem'\n    },\n});\n\ninterface CollectionPanelDataProps {\n    currentUserUUID: string;\n    resources: ResourcesState;\n}\n\ntype CollectionPanelProps = CollectionPanelDataProps & DispatchProp & WithStyles<CssRules>\n\ntype CollectionPanelState = {\n    item: CollectionResource | null;\n    itemOwner: GroupResource | UserResource | null;\n    isWritable: boolean;\n    isOldVersion: boolean;\n}\n\nexport const CollectionPanel = withStyles(styles)(connect(\n    (state: RootState) => {\n        return {\n            currentUserUUID: state.auth.user?.uuid,\n            resources: state.resources\n        };\n    })(\n        class extends React.Component<CollectionPanelProps & RouteComponentProps<{ id: string }>> {\n            state: CollectionPanelState = {\n                item: null,\n                itemOwner: null,\n                isWritable: false,\n                isOldVersion: false,\n            }\n\n            shouldComponentUpdate( nextProps: Readonly<CollectionPanelProps & RouteComponentProps<{ id: string }>>, nextState: Readonly<CollectionPanelState>, nextContext: any ): boolean {\n                    return this.props.match.params.id !== nextProps.match.params.id\n                        || this.props.resources !== nextProps.resources\n                        || this.state.isWritable !== nextState.isWritable\n            }\n\n            componentDidUpdate( prevProps: Readonly<CollectionPanelProps>, prevState: Readonly<CollectionPanelState>, snapshot?: any ): void {\n                const { currentUserUUID, resources } = this.props;\n                const collection = getResource<CollectionResource>(this.props.match.params.id)(this.props.resources);\n                if (!this.state.item && collection) this.setState({ item: collection });\n                if (collection) {\n                    this.setState({\n                        hasDescription: collection.description && collection.description.length > 0,\n                    });\n                    const itemOwner = collection ? getResource<GroupResource | UserResource>(collection.ownerUuid)(this.props.resources) : undefined;\n                    if (prevState.item !== collection) {\n                        this.props.dispatch<any>(setSelectedResourceUuid(collection.uuid))\n                        this.setState({\n                            item: collection,\n                            itemOwner: itemOwner,\n                            isOldVersion: collection.currentVersionUuid !== collection.uuid,\n                        });\n                    }\n                    if (prevProps.resources !== resources && itemOwner) {\n                        const isWritable = this.checkIsWritable(collection, itemOwner, currentUserUUID, resourceIsFrozen(collection, resources));\n                        this.setState({ isWritable: isWritable });\n                    }\n                }\n            }\n\n            componentWillUnmount(): void {\n                this.props.dispatch<any>(collectionPanelActions.RESET_COLLECTION_PANEL());\n            }\n\n            checkIsWritable = (item: CollectionResource, itemOwner: GroupResource | UserResource | null, currentUserUUID: string, isFrozen: boolean): boolean => {\n                let isWritable = false;\n\n                if (item && !this.state.isOldVersion) {\n                    if (item.ownerUuid === currentUserUUID) {\n                        isWritable = true;\n                    } else {\n                        if (itemOwner) {\n                            isWritable = itemOwner.canWrite;\n                        }\n                    }\n                }\n                if (item && isWritable) {\n                    isWritable = !isFrozen;\n                }\n                return isWritable;\n            }\n\n            render() {\n                const { classes } = this.props;\n                const { isWritable, item } = this.state;\n                // Set up panels and default tab\n                const panelsData: MPVPanelState[] = [\n                    { name: \"Overview\" },\n                    { name: \"Files\", visible: true },\n                ];\n                return item\n                    ? <section className={classes.root}>\n                        <DetailsCardRoot />\n                        <MPVContainer container className={classes.mpvRoot} justifyContent=\"flex-start\" panelStates={panelsData}>\n                            <MPVPanelContent item xs>\n                                <OverviewPanel detailsElement={<CollectionAttributes />} />\n                            </MPVPanelContent>\n                            <MPVPanelContent item xs>\n                                <section className={classes.filesCard}>\n                                    <CollectionPanelFiles isWritable={isWritable} />\n                                </section>\n                            </MPVPanelContent>\n                        </MPVContainer >\n                    </section>\n                    : <NotFoundView\n                        icon={CollectionIcon}\n                        messages={[\"Collection not found\"]}\n                    />;\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views/external-credentials-panel/external-credentials-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { DataColumns, SortDirection } from \"components/data-table/data-column\";\nimport { camelCase } from \"lodash\";\nimport { ExternalCredential } from \"models/external-credential\";\nimport { createTree } from \"models/tree\";\nimport {\n    ResourceNameNoLink,\n    ResourceExpiresAtDate,\n    RenderResourceStringField,\n    RenderScopes,\n    RenderDescriptionInTD,\n} from \"views-components/data-explorer/renderers\";\n\nexport enum ExternalCredentialsPanelColumnNames {\n    NAME = \"Name\",\n    DESCRIPTION = \"Description\",\n    EXTERNAL_ID = \"External ID\",\n    CREDENTIAL_CLASS = \"Credential class\",\n    EXPIRES_AT = \"Expires at\",\n    SCOPES = \"Scopes\"\n}\n\nexport const externalCredentialsPanelColumns: DataColumns<string, ExternalCredential> = [\n    {\n        name: ExternalCredentialsPanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"name\" },\n        filters: createTree(),\n        render: uuid => <ResourceNameNoLink uuid={uuid} />,\n    },\n    {\n        name: ExternalCredentialsPanelColumnNames.DESCRIPTION,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <RenderDescriptionInTD uuid={uuid} />,\n    },\n    {\n        name: ExternalCredentialsPanelColumnNames.CREDENTIAL_CLASS,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <RenderResourceStringField<ExternalCredential>\n            uuid={uuid}\n            field={camelCase(ExternalCredentialsPanelColumnNames.CREDENTIAL_CLASS) as keyof ExternalCredential} />,\n    },\n    {\n        name: ExternalCredentialsPanelColumnNames.EXTERNAL_ID,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <RenderResourceStringField<ExternalCredential>\n            uuid={uuid}\n            field={camelCase(ExternalCredentialsPanelColumnNames.EXTERNAL_ID) as keyof ExternalCredential} />,\n    },\n    {\n        name: ExternalCredentialsPanelColumnNames.EXPIRES_AT,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceExpiresAtDate uuid={uuid} />,\n    },\n    {\n        name: ExternalCredentialsPanelColumnNames.SCOPES,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <RenderScopes uuid={uuid} />,\n    },\n];\n"
  },
  {
    "path": "services/workbench2/src/views/external-credentials-panel/external-credentials-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { Grid, Button } from \"@mui/material\";\nimport { noop } from \"lodash\";\nimport { connect } from \"react-redux\";\nimport { Dispatch } from \"redux\";\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { EXTERNAL_CREDENTIALS_PANEL, openNewExternalCredentialDialog } from \"store/external-credentials/external-credentials-actions\";\nimport { FolderKeyIcon, AddIcon } from \"components/icon/icon\";\nimport { loadDetailsPanel } from \"store/details-panel/details-panel-action\";\nimport { RootState } from \"store/store\";\nimport { ResourcesState, getResource } from \"store/resources/resources\";\nimport { toggleOne } from \"store/multiselect/multiselect-actions\";\nimport { ExternalCredential } from \"models/external-credential\";\nimport { ContextMenuResource, ContextMenuKind } from \"store/context-menu/context-menu\";\nimport { openContextMenuAndSelect } from \"store/context-menu/context-menu-actions\";\n\ntype CssRules = \"toolbar\" | \"button\" | \"root\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    toolbar: {\n        paddingBottom: theme.spacing(3),\n        textAlign: \"right\",\n    },\n    button: {\n        marginLeft: theme.spacing(2),\n    },\n    root: {\n        width: \"100%\",\n        boxShadow: \"0px 1px 3px 0px rgb(0 0 0 / 20%), 0px 1px 1px 0px rgb(0 0 0 / 14%), 0px 2px 1px -1px rgb(0 0 0 / 12%)\",\n    },\n});\n\ninterface ExternalCredentialsPanelDataProps {\n    resources: ResourcesState;\n}\n\ninterface ExternalCredentialsPanelActionProps {\n    onNewCredential: () => void;\n    openContextMenuAndSelect: (event: React.MouseEvent<HTMLElement>, resource: ContextMenuResource) => void;\n    loadDetailsPanel: (resourceUuid: string) => void;\n    toggleOne: (uuid: string) => void;\n}\nconst mapStateToProps = (state: RootState): ExternalCredentialsPanelDataProps => ({\n    resources: state.resources,\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch): ExternalCredentialsPanelActionProps => ({\n    onNewCredential: () => dispatch<any>(openNewExternalCredentialDialog()),\n    openContextMenuAndSelect: (event: React.MouseEvent<HTMLElement>, resource: ContextMenuResource) => dispatch<any>(openContextMenuAndSelect(event, resource)),\n    loadDetailsPanel: (resourceUuid: string) => dispatch<any>(loadDetailsPanel(resourceUuid)),\n    toggleOne: (uuid: string) => dispatch<any>(toggleOne(uuid)),\n});\n\ntype ExternalCredentialsPanelProps = ExternalCredentialsPanelDataProps &\n    ExternalCredentialsPanelActionProps &\n    WithStyles<CssRules>\n\nexport const ExternalCredentialsPanel = withStyles(styles)(\n    connect(mapStateToProps, mapDispatchToProps)(\n        class extends React.Component<ExternalCredentialsPanelProps> {\n            handleContextMenu = (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => {\n                const externalCredential = getResource<ExternalCredential>(resourceUuid)(this.props.resources);\n                if (externalCredential) {\n                    this.props.openContextMenuAndSelect(event, {\n                        name: externalCredential.name,\n                        uuid: externalCredential.uuid,\n                        ownerUuid: externalCredential.ownerUuid,\n                        kind: externalCredential.kind,\n                        menuKind: ContextMenuKind.EXTERNAL_CREDENTIAL\n                    });\n                }\n                this.props.loadDetailsPanel(resourceUuid);\n            };\n\n            handleRowClick = (uuid: string) => {\n                this.props.toggleOne(uuid);\n            };\n\n            render() {\n                return (\n                    <div className={this.props.classes.root}>\n                        <DataExplorer\n                            id={EXTERNAL_CREDENTIALS_PANEL}\n                            onRowClick={this.handleRowClick}\n                            onRowDoubleClick={noop}\n                            onContextMenu={this.handleContextMenu}\n                            contextMenuColumn={false}\n                            defaultViewIcon={FolderKeyIcon}\n                            defaultViewMessages={[\"External credentials list empty.\"]}\n                            hideColumnSelector\n                            actions={\n                                <Grid container justifyContent='flex-end'>\n                                    <Button\n                                        className={this.props.classes.button}\n                                        data-cy=\"new-credential-button\"\n                                        variant=\"contained\"\n                                        color=\"primary\"\n                                        onClick={this.props.onNewCredential}>\n                                        <AddIcon /> New External Credential\n                                    </Button>\n                                </Grid>\n                            }\n                        />\n                    </div>\n                );\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views/favorite-panel/favorite-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { DataColumns } from \"components/data-table/data-column\";\nimport { createTree } from \"models/tree\";\nimport { GroupContentsResource } from \"services/groups-service/groups-service\";\nimport { getSimpleObjectTypeFilters } from \"store/resource-type-filters/resource-type-filters\";\nimport {\n    ProcessStatus,\n    ResourceFileSize,\n    ResourceLastModifiedDate,\n    ResourceName,\n    ResourceOwnerWithName,\n    ResourceType,\n} from \"views-components/data-explorer/renderers\";\n\nexport enum FavoritePanelColumnNames {\n    NAME = \"Name\",\n    STATUS = \"Status\",\n    TYPE = \"Type\",\n    OWNER = \"Owner\",\n    FILE_SIZE = \"File size\",\n    LAST_MODIFIED = \"Last modified\"\n}\n\nexport const favoritePanelColumns: DataColumns<string, GroupContentsResource> = [\n    {\n        name: FavoritePanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceName uuid={uuid} />\n    },\n    {\n        name: \"Status\",\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ProcessStatus uuid={uuid} />\n    },\n    {\n        name: FavoritePanelColumnNames.TYPE,\n        selected: true,\n        configurable: true,\n        filters: getSimpleObjectTypeFilters(),\n        render: uuid => <ResourceType uuid={uuid} />\n    },\n    {\n        name: FavoritePanelColumnNames.OWNER,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceOwnerWithName uuid={uuid} />\n    },\n    {\n        name: FavoritePanelColumnNames.FILE_SIZE,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceFileSize uuid={uuid} />\n    },\n    {\n        name: FavoritePanelColumnNames.LAST_MODIFIED,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLastModifiedDate uuid={uuid} />\n    }\n];\n"
  },
  {
    "path": "services/workbench2/src/views/favorite-panel/favorite-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { connect, DispatchProp } from 'react-redux';\nimport { RouteComponentProps } from 'react-router';\nimport { DataTableFilterItem } from 'components/data-table-filters/data-table-filters';\nimport { ResourceKind } from 'models/resource';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { FAVORITE_PANEL_ID } from \"store/favorite-panel/favorite-panel-action\";\nimport { FavoriteIcon } from 'components/icon/icon';\nimport {\n    openContextMenuAndSelect,\n} from 'store/context-menu/context-menu-actions';\nimport { loadDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport { ContainerRequestState } from \"models/container-request\";\nimport { FavoritesState } from 'store/favorites/favorites-reducer';\nimport { RootState } from 'store/store';\nimport { getResource, ResourcesState } from 'store/resources/resources';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { GroupClass, GroupResource } from 'models/group';\nimport { PROJECT_PANEL_CURRENT_UUID } from \"store/project-panel/project-panel\";\nimport { CollectionResource } from 'models/collection';\nimport { toggleOne } from 'store/multiselect/multiselect-actions';\nimport { getProperty } from 'store/properties/properties';\nimport { resourceToMenuKind } from 'common/resource-to-menu-kind';\n\ntype CssRules = \"toolbar\" | \"button\" | \"root\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    toolbar: {\n        paddingBottom: theme.spacing(3),\n        textAlign: \"right\"\n    },\n    button: {\n        marginLeft: theme.spacing(1)\n    },\n    root: {\n        width: '100%',\n        boxShadow: \"0px 1px 3px 0px rgb(0 0 0 / 20%), 0px 1px 1px 0px rgb(0 0 0 / 14%), 0px 2px 1px -1px rgb(0 0 0 / 12%)\",\n    },\n});\n\nexport interface FavoritePanelFilter extends DataTableFilterItem {\n    type: ResourceKind | ContainerRequestState;\n}\n\ninterface FavoritePanelDataProps {\n    currentItemId: string | undefined;\n    favorites: FavoritesState;\n    resources: ResourcesState;\n    userUuid: string;\n}\n\ninterface FavoritePanelActionProps {\n    onItemClick: (item: string) => void;\n    onDialogOpen: (ownerUuid: string) => void;\n    onItemDoubleClick: (item: string) => void;\n}\nconst mapStateToProps = (state : RootState): FavoritePanelDataProps => ({\n    favorites: state.favorites,\n    resources: state.resources,\n    userUuid: state.auth.user!.uuid,\n    currentItemId: getProperty<string>(PROJECT_PANEL_CURRENT_UUID)(state.properties),\n});\n\ntype FavoritePanelProps = FavoritePanelDataProps & FavoritePanelActionProps & DispatchProp\n    & WithStyles<CssRules> & RouteComponentProps<{ id: string }>;\n\nexport const FavoritePanel = withStyles(styles)(\n    connect(mapStateToProps)(\n        class extends React.Component<FavoritePanelProps> {\n\n            handleContextMenu = (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => {\n                const { resources, currentItemId } = this.props;\n                const resource = getResource<GroupContentsResource>(resourceUuid)(resources);\n\n                let readonly = false;\n                const project = currentItemId ? getResource<GroupResource>(currentItemId)(resources) : undefined;\n\n                if (project && project.groupClass === GroupClass.FILTER) {\n                    readonly = true;\n                }\n\n                const menuKind = this.props.dispatch<any>(resourceToMenuKind(resourceUuid, readonly));\n\n                if (menuKind && resource) {\n                    this.props.dispatch<any>(openContextMenuAndSelect(event, {\n                        name: resource.name,\n                        uuid: resource.uuid,\n                        ownerUuid: resource.ownerUuid,\n                        isTrashed: ('isTrashed' in resource) ? resource.isTrashed: false,\n                        kind: resource.kind,\n                        menuKind,\n                        description: resource.description,\n                        storageClassesDesired: (resource as CollectionResource).storageClassesDesired,\n                    }));\n                }\n                this.props.dispatch<any>(loadDetailsPanel(resourceUuid));\n            }\n\n            handleRowDoubleClick = (uuid: string) => {\n                this.props.dispatch<any>(navigateTo(uuid));\n            }\n\n            handleRowClick = (uuid: string) => {\n                this.props.dispatch<any>(toggleOne(uuid))\n            }\n\n            render() {\n                return <div className={this.props.classes.root}><DataExplorer\n                    id={FAVORITE_PANEL_ID}\n                    onRowClick={this.handleRowClick}\n                    onRowDoubleClick={this.handleRowDoubleClick}\n                    onContextMenu={this.handleContextMenu}\n                    contextMenuColumn={false}\n                    defaultViewIcon={FavoriteIcon}\n                    defaultViewMessages={['Your favorites list is empty.']} />\n                </div>;\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views/group-details-panel/group-details-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataColumns } from 'components/data-table/data-column';\nimport { PermissionResource } from 'models/permission';\nimport { createTree } from 'models/tree';\nimport {\n    ResourceLinkHeadUuid,\n    ResourceLinkTailUsername,\n    ResourceLinkHeadPermissionLevel,\n    ResourceLinkTailPermissionLevel,\n    ResourceLinkHead,\n    ResourceLinkTail,\n    ResourceLinkDelete,\n    ResourcePermissionsDelete,\n    ResourceLinkTailAccountStatus,\n    ResourceLinkTailIsVisible,\n} from 'views-components/data-explorer/renderers';\n\nexport enum GroupDetailsPanelMembersColumnNames {\n    FULL_NAME = \"Name\",\n    USERNAME = \"Username\",\n    STATUS = \"Account Status\",\n    VISIBLE = \"Visible to other members\",\n    PERMISSION = \"Permission\",\n    REMOVE = \"Remove\",\n}\n\nexport enum GroupDetailsPanelPermissionsColumnNames {\n    NAME = \"Name\",\n    PERMISSION = \"Permission\",\n    UUID = \"UUID\",\n    REMOVE = \"Remove\",\n}\n\nexport const groupDetailsMembersPanelColumns: DataColumns<string, PermissionResource> = [\n    {\n        name: GroupDetailsPanelMembersColumnNames.FULL_NAME,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkTail uuid={uuid} />\n    },\n    {\n        name: GroupDetailsPanelMembersColumnNames.USERNAME,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkTailUsername uuid={uuid} />\n    },\n    {\n        name: GroupDetailsPanelMembersColumnNames.STATUS,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkTailAccountStatus uuid={uuid} />\n    },\n    {\n        name: GroupDetailsPanelMembersColumnNames.VISIBLE,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkTailIsVisible uuid={uuid} />\n    },\n    {\n        name: GroupDetailsPanelMembersColumnNames.PERMISSION,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkTailPermissionLevel uuid={uuid} />\n    },\n    {\n        name: GroupDetailsPanelMembersColumnNames.REMOVE,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkDelete uuid={uuid} />\n    },\n];\n\nexport const groupDetailsPermissionsPanelColumns: DataColumns<string, PermissionResource> = [\n    {\n        name: GroupDetailsPanelPermissionsColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkHead uuid={uuid} />\n    },\n    {\n        name: GroupDetailsPanelPermissionsColumnNames.PERMISSION,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkHeadPermissionLevel uuid={uuid} />\n    },\n    {\n        name: GroupDetailsPanelPermissionsColumnNames.UUID,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkHeadUuid uuid={uuid} />\n    },\n    {\n        name: GroupDetailsPanelPermissionsColumnNames.REMOVE,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourcePermissionsDelete uuid={uuid} />\n    },\n];\n"
  },
  {
    "path": "services/workbench2/src/views/group-details-panel/group-details-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { getResource } from 'store/resources/resources';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { noop } from 'lodash/fp';\nimport { RootState } from 'store/store';\nimport { GROUP_DETAILS_MEMBERS_PANEL_ID, GROUP_DETAILS_PERMISSIONS_PANEL_ID, openAddGroupMembersDialog, getCurrentGroupDetailsPanelUuid } from 'store/group-details-panel/group-details-panel-actions';\nimport { openContextMenuAndSelect } from 'store/context-menu/context-menu-actions';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, Button, Tabs, Tab, Paper } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { AddIcon, UserPanelIcon, KeyIcon } from 'components/icon/icon';\nimport { GroupResource, isBuiltinGroup } from 'models/group';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { getUserUuid } from 'common/getuser';\nimport {\n    GroupDetailsPanelMembersColumnNames,\n    GroupDetailsPanelPermissionsColumnNames,\n    groupDetailsMembersPanelColumns,\n    groupDetailsPermissionsPanelColumns,\n} from './group-details-panel-columns';\n\nexport { GroupDetailsPanelMembersColumnNames, GroupDetailsPanelPermissionsColumnNames, groupDetailsMembersPanelColumns, groupDetailsPermissionsPanelColumns };\n\ntype CssRules = \"root\" | \"content\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n    },\n    content: {\n        // reserve space for the tab bar\n        height: `calc(100% - ${theme.spacing(7)})`,\n    }\n});\n\nconst MEMBERS_DEFAULT_MESSAGE = 'Members list is empty.';\nconst PERMISSIONS_DEFAULT_MESSAGE = 'Permissions list is empty.';\n\nconst mapStateToProps = (state: RootState) => {\n    const groupUuid = getCurrentGroupDetailsPanelUuid(state.properties);\n    const group = groupUuid ? getResource<GroupResource>(groupUuid)(state.resources) : undefined;\n    const userUuid = getUserUuid(state);\n\n    return {\n        userUuid,\n        group,\n    };\n};\n\nconst mapDispatchToProps = {\n    onContextMenu: openContextMenuAndSelect,\n    onAddUser: openAddGroupMembersDialog,\n};\n\nexport interface GroupDetailsPanelProps {\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, item: any) => void;\n    onAddUser: () => void;\n    userUuid: string;\n    group: GroupResource | undefined;\n}\n\ntype GroupDetailsPanelState = {\n    value: number;\n    groupCanManage: boolean;\n}\n\nexport const GroupDetailsPanel = withStyles(styles)(connect(\n    mapStateToProps, mapDispatchToProps\n)(\n    class GroupDetailsPanel extends React.Component<GroupDetailsPanelProps & WithStyles<CssRules>> {\n        state: GroupDetailsPanelState = {\n            value: 0,\n            groupCanManage: false,\n        };\n\n        componentDidMount() {\n            this.setState({ value: 0 });\n        }\n\n        shouldComponentUpdate(nextProps: Readonly<GroupDetailsPanelProps>, nextState: Readonly<GroupDetailsPanelState>, nextContext: any): boolean {\n            return this.props.group !== nextProps.group\n                || this.state.value !== nextState.value\n                || this.state.groupCanManage !== nextState.groupCanManage;\n        }\n\n        componentDidUpdate(prevProps: Readonly<GroupDetailsPanelProps>, prevState: Readonly<{}>, snapshot?: any): void {\n            if (this.props.group && (prevProps.userUuid!== this.props.userUuid || prevProps.group !== this.props.group)) {\n                this.setState({ groupCanManage: this.groupCanManage(this.props.userUuid, this.props.group) });\n            }\n        }\n\n        groupCanManage = (userUuid: string, group: GroupResource) => {\n            return userUuid && !isBuiltinGroup(group?.uuid || '') ? group.canManage : false\n        }\n\n        render() {\n            const { value } = this.state;\n            return (\n                <Paper className={this.props.classes.root}>\n                    <Tabs value={value} onChange={this.handleChange} variant=\"fullWidth\">\n                        <Tab data-cy=\"group-details-members-tab\" label=\"MEMBERS\" />\n                        <Tab data-cy=\"group-details-permissions-tab\" label=\"PERMISSIONS\" />\n                    </Tabs>\n                    <div className={this.props.classes.content}>\n                        {value === 0 &&\n                            <DataExplorer\n                                id={GROUP_DETAILS_MEMBERS_PANEL_ID}\n                                data-cy=\"group-members-data-explorer\"\n                                onRowClick={noop}\n                                onRowDoubleClick={noop}\n                                onContextMenu={noop}\n                                contextMenuColumn={false}\n                                defaultViewIcon={UserPanelIcon}\n                                defaultViewMessages={[MEMBERS_DEFAULT_MESSAGE]}\n                                hideColumnSelector\n                                hideSearchInput\n                                actions={\n                                    this.state.groupCanManage &&\n                                    <Grid container justifyContent='flex-end'>\n                                        <Button\n                                            data-cy=\"group-member-add\"\n                                            variant=\"contained\"\n                                            color=\"primary\"\n                                            onClick={this.props.onAddUser}>\n                                            <AddIcon /> Add user\n                                        </Button>\n                                    </Grid>\n                                }\n                                paperProps={{\n                                    elevation: 0,\n                                }} />\n                        }\n                        {value === 1 &&\n                            <DataExplorer\n                                id={GROUP_DETAILS_PERMISSIONS_PANEL_ID}\n                                data-cy=\"group-permissions-data-explorer\"\n                                onRowClick={noop}\n                                onRowDoubleClick={noop}\n                                onContextMenu={noop}\n                                contextMenuColumn={false}\n                                defaultViewIcon={KeyIcon}\n                                defaultViewMessages={[PERMISSIONS_DEFAULT_MESSAGE]}\n                                hideColumnSelector\n                                hideSearchInput\n                                paperProps={{\n                                    elevation: 0,\n                                }} />\n                        }\n                    </div>\n                </Paper>\n            );\n        }\n\n        handleChange = (event: React.MouseEvent<HTMLElement>, value: number) => {\n            this.setState({ value });\n        }\n    }));\n"
  },
  {
    "path": "services/workbench2/src/views/groups-panel/groups-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport { GroupMembersCount, ResourceUuid } from 'views-components/data-explorer/renderers';\nimport { ResourceName } from 'views-components/data-explorer/renderers';\nimport { createTree } from 'models/tree';\nimport { GroupResource } from 'models/group';\n\n\nexport enum GroupsPanelColumnNames {\n    GROUP = \"Name\",\n    UUID = \"UUID\",\n    MEMBERS = \"Members\"\n}\n\nexport const groupsPanelColumns: DataColumns<string, GroupResource> = [\n    {\n        name: GroupsPanelColumnNames.GROUP,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.ASC, field: \"name\" },\n        filters: createTree(),\n        render: uuid => <ResourceName uuid={uuid} />\n    },\n    {\n        name: GroupsPanelColumnNames.UUID,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceUuid uuid={uuid} />,\n    },\n    {\n        name: GroupsPanelColumnNames.MEMBERS,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <GroupMembersCount uuid={uuid} />,\n    },\n];\n"
  },
  {
    "path": "services/workbench2/src/views/groups-panel/groups-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, Button } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { AddIcon } from 'components/icon/icon';\nimport { GROUPS_PANEL_ID, openCreateGroupDialog } from 'store/groups-panel/groups-panel-actions';\nimport { noop } from 'lodash/fp';\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { getResource, ResourcesState } from 'store/resources/resources';\nimport { GroupResource, isBuiltinGroup } from 'models/group';\nimport { RootState } from 'store/store';\nimport { openContextMenuAndSelect } from 'store/context-menu/context-menu-actions';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { toggleOne } from 'store/multiselect/multiselect-actions';\n\ntype CssRules = \"root\" | \"button\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        boxShadow: \"0px 1px 3px 0px rgb(0 0 0 / 20%), 0px 1px 1px 0px rgb(0 0 0 / 14%), 0px 2px 1px -1px rgb(0 0 0 / 12%)\",\n    },\n    button: {\n        marginLeft: theme.spacing(2),\n    }\n});\n\nconst mapStateToProps = (state: RootState) => {\n    return {\n        resources: state.resources\n    };\n};\n\nconst mapDispatchToProps = (dispatch: any) => {\n    return {\n        onContextMenu: (ev, resource) => dispatch(openContextMenuAndSelect(ev, resource)),\n        onNewGroup: () => dispatch(openCreateGroupDialog()),\n        handleRowClick: (uuid: string) => {\n            dispatch(toggleOne(uuid))\n        }\n    };\n};\n\nexport interface GroupsPanelProps {\n    onNewGroup: () => void;\n    handleRowClick: (uuid: string) => void;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, item: any) => void;\n    resources: ResourcesState;\n}\n\nexport const GroupsPanel = withStyles(styles)(connect(\n    mapStateToProps, mapDispatchToProps\n)(\n    class GroupsPanel extends React.Component<GroupsPanelProps & WithStyles<CssRules>> {\n\n        render() {\n            return (\n                <div className={this.props.classes.root}>\n                    <DataExplorer\n                        id={GROUPS_PANEL_ID}\n                        data-cy=\"groups-panel-data-explorer\"\n                        onRowClick={this.props.handleRowClick}\n                        onRowDoubleClick={noop}\n                        onContextMenu={this.handleContextMenu}\n                        contextMenuColumn={false}\n                        hideColumnSelector\n                        actions={\n                            <Grid container justifyContent='flex-end'>\n                                <Button\n                                    className={this.props.classes.button}\n                                    data-cy=\"groups-panel-new-group\"\n                                    variant=\"contained\"\n                                    color=\"primary\"\n                                    onClick={this.props.onNewGroup}>\n                                    <AddIcon /> New group\n                                </Button>\n                            </Grid>\n                        } />\n                    </div>\n            );\n        }\n\n        handleContextMenu = (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => {\n            const resource = getResource<GroupResource>(resourceUuid)(this.props.resources);\n            if (resource) {\n                this.props.onContextMenu(event, {\n                    name: resource.name,\n                    uuid: resource.uuid,\n                    description: resource.description,\n                    ownerUuid: resource.ownerUuid,\n                    kind: resource.kind,\n                    menuKind: isBuiltinGroup(resource.uuid) ? ContextMenuKind.BUILT_IN_GROUP : ContextMenuKind.GROUPS,\n                });\n            }\n        }\n    }));\n"
  },
  {
    "path": "services/workbench2/src/views/inactive-panel/inactive-panel.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomTheme } from 'common/custom-theme';\nimport { InactivePanelRoot } from './inactive-panel';\nimport { ThemeProvider, StyledEngineProvider } from '@mui/material';\n\ndescribe('InactivePanel', () => {\n    let props;\n\n    beforeEach(() => {\n        props = {\n            classes: {\n                root: 'root',\n                title: 'title',\n                ontop: 'ontop',\n            },\n            loginCluster: 'foo',\n            inactivePageText: 'Inactive page content',\n        };\n    });\n\n    it('should render content and link account option', () => {\n        // given\n        const expectedMessage = \"Inactive page content\";\n        const expectedLinkAccountText = 'If you would like to use this login to access another account click \"Link Account\"';\n\n        // when\n        cy.mount(\n            <StyledEngineProvider injectFirst>\n                <ThemeProvider theme={CustomTheme}>\n                    <InactivePanelRoot {...props} />\n                </ThemeProvider>\n            </StyledEngineProvider>\n            );\n\n        // then\n        cy.get('p').eq(0).contains(expectedMessage);\n        cy.get('p').eq(1).contains(expectedLinkAccountText);\n    })\n\n    it('should render content and link account warning on LoginCluster federations', () => {\n        // given\n        props.loginCluster = \"\";\n        const expectedMessage = \"Inactive page content\";\n        const expectedLinkAccountText = 'If you would like to use this login to access another account, please contact your administrator';\n\n        // when\n        cy.mount(\n            <StyledEngineProvider injectFirst>\n                <ThemeProvider theme={CustomTheme}>\n                    <InactivePanelRoot {...props} />\n                </ThemeProvider>\n            </StyledEngineProvider>\n            );\n\n        // then\n        cy.get('p').eq(0).contains(expectedMessage);\n        cy.get('p').eq(1).contains(expectedLinkAccountText);\n    })\n});"
  },
  {
    "path": "services/workbench2/src/views/inactive-panel/inactive-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { Grid, Typography, Button } from '@mui/material';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { navigateToLinkAccount } from 'store/navigation/navigation-action';\nimport { RootState } from 'store/store';\nimport { sanitizeHTML } from 'common/html-sanitize';\n\nexport type CssRules = 'root' | 'ontop' | 'title';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        position: 'relative',\n        backgroundColor: theme.palette.grey[\"200\"],\n        background: 'url(\"arvados-logo-big.png\") no-repeat center center',\n        backgroundBlendMode: 'soft-light',\n    },\n    ontop: {\n        zIndex: 10\n    },\n    title: {\n        marginBottom: theme.spacing(6),\n        color: theme.palette.grey[\"800\"]\n    }\n});\n\nexport interface InactivePanelActionProps {\n    startLinking: () => void;\n}\n\nconst mapDispatchToProps = (dispatch: Dispatch): InactivePanelActionProps => ({\n    startLinking: () => {\n        dispatch<any>(navigateToLinkAccount);\n    }\n});\n\nconst mapStateToProps = (state: RootState): InactivePanelStateProps => ({\n    inactivePageText: state.auth.config.clusterConfig.Workbench.InactivePageHTML,\n    loginCluster: state.auth.config.clusterConfig.Login.LoginCluster,\n});\n\nexport interface InactivePanelStateProps {\n    inactivePageText: string;\n    loginCluster: string;\n}\n\ntype InactivePanelProps = WithStyles<CssRules> & InactivePanelActionProps & InactivePanelStateProps;\n\nexport const InactivePanelRoot = ({ classes, startLinking, inactivePageText, loginCluster }: InactivePanelProps) =>{\n    const isLoginClusterFederation = loginCluster === \"\";\n    return <Grid container justifyContent=\"center\" alignItems=\"center\" direction=\"column\" spacing={3}\n        className={classes.root}\n        style={{ marginTop: 56, height: \"100%\" }}>\n        <Grid item>\n            <Typography>\n                <span dangerouslySetInnerHTML={{ __html: sanitizeHTML(inactivePageText) }} style={{ margin: \"1em\" }} />\n            </Typography>\n        </Grid>\n        { !isLoginClusterFederation\n        ? <><Grid item>\n            <Typography align=\"center\">\n            If you would like to use this login to access another account click \"Link Account\".\n            </Typography>\n        </Grid>\n        <Grid item>\n            <Button className={classes.ontop} color=\"primary\" variant=\"contained\" onClick={() => startLinking()}>\n                Link Account\n            </Button>\n        </Grid></>\n        : <><Grid item>\n            <Typography align=\"center\">\n                If you would like to use this login to access another account, please contact your administrator.\n            </Typography>\n        </Grid></> }\n    </Grid >\n};\n\nexport const InactivePanel = connect(mapStateToProps, mapDispatchToProps)(\n    withStyles(styles)(InactivePanelRoot));\n"
  },
  {
    "path": "services/workbench2/src/views/instance-types-panel/instance-types-panel.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { InstanceTypesPanel, calculateKeepBufferOverhead, discountRamByPercent } from './instance-types-panel';\nimport {\n    ThemeProvider,\n    StyledEngineProvider,\n} from \"@mui/material\";\nimport { CustomTheme } from 'common/custom-theme';\nimport { combineReducers, createStore } from \"redux\";\nimport { Provider } from \"react-redux\";\nimport { formatFileSize, formatCWLResourceSize } from 'common/formatters';\n\ndescribe('<InstanceTypesPanel />', () => {\n    let store;\n\n    const initialAuthState = {\n        config: {\n            clusterConfig: {\n                InstanceTypes: {\n                    \"normalType\" : {\n                        ProviderType: \"provider\",\n                        Price: 0.123,\n                        VCPUs: 6,\n                        Preemptible: false,\n                        IncludedScratch: 1000,\n                        RAM: 5000,\n                    },\n                    \"gpuType\" : {\n                        ProviderType: \"gpuProvider\",\n                        Price: 0.456,\n                        VCPUs: 8,\n                        Preemptible: true,\n                        IncludedScratch: 500,\n                        RAM: 6000,\n                        GPU: {\n                            DeviceCount: 1,\n                            HardwareTarget: '8.6',\n                            DriverVersion: '11.4',\n                        },\n                    },\n                },\n                Containers: {\n                    ReserveExtraRAM: 1000,\n                }\n            }\n        }\n    }\n\n    beforeEach(() => {\n        store = createStore(combineReducers({\n            auth: (state = initialAuthState, action) => state,\n        }));\n    });\n\n    it('renders instance types', () => {\n        // when\n        cy.mount(\n            <Provider store={store}>\n                <StyledEngineProvider injectFirst>\n                    <ThemeProvider theme={CustomTheme}>\n                        <InstanceTypesPanel />\n                    </ThemeProvider>\n                </StyledEngineProvider>\n            </Provider>);\n\n        // then\n        Object.keys(initialAuthState.config.clusterConfig.InstanceTypes).forEach((instanceKey) => {\n            const instanceType = initialAuthState.config.clusterConfig.InstanceTypes[instanceKey];\n            cy.get(`[data-cy=\"${instanceKey}\"]`).as('item');\n\n            cy.get('@item').find('h6').contains(instanceKey);\n            cy.get('@item').contains(`Provider type${instanceType.ProviderType}`);\n            cy.get('@item').contains(`Price$${instanceType.Price}`);\n            cy.get('@item').contains(`Cores${instanceType.VCPUs}`);\n            cy.get('@item').contains(`Preemptible${instanceType.Preemptible.toString()}`);\n            cy.get('@item').contains(`Max disk request${formatCWLResourceSize(instanceType.IncludedScratch)} (${formatFileSize(instanceType.IncludedScratch)})`);\n            if (instanceType.GPU && instanceType.GPU.DeviceCount > 0) {\n                cy.get('@item').contains(`GPUs${instanceType.GPU.DeviceCount}`);\n                cy.get('@item').contains(`Hardware target${instanceType.GPU.HardwareTarget}`);\n                cy.get('@item').contains(`Driver version${instanceType.GPU.DriverVersion}`);\n            }\n        });\n    });\n});\n\ndescribe('calculateKeepBufferOverhead', () => {\n    it('should calculate correct buffer size', () => {\n        const testCases = [\n            {input: 0, output: (220<<20)},\n            {input: 1, output: (220<<20) + ((1<<26) * (11/10))},\n            {input: 2, output: (220<<20) + 2*((1<<26) * (11/10))},\n        ];\n\n        for (const {input, output} of testCases) {\n            expect(calculateKeepBufferOverhead(input)).to.equal(output);\n        }\n    });\n});\n\ndescribe('discountRamByPercent', () => {\n    it('should inflate ram requirement by 5% of final amount', () => {\n        const testCases = [\n            {input: 0, output: 0},\n            {input: 114, output: 120},\n        ];\n\n        for (const {input, output} of testCases) {\n            expect(discountRamByPercent(input)).to.equal(output);\n        }\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/views/instance-types-panel/instance-types-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Card, CardContent, Typography, Grid } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { ResourceIcon } from 'components/icon/icon';\nimport { RootState } from 'store/store';\nimport { connect } from 'react-redux';\nimport { ClusterConfigJSON } from 'common/config';\nimport { NotFoundView } from 'views/not-found-panel/not-found-panel';\nimport { formatCWLResourceSize, formatCost, formatFileSize } from 'common/formatters';\nimport { DetailsAttribute } from 'components/details-attribute/details-attribute';\nimport { DefaultCodeSnippet } from 'components/default-code-snippet/default-code-snippet';\n\ntype CssRules = 'root' | 'infoBox' | 'instanceType';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n       width: \"calc(100% + 20px)\",\n       margin: \"0 -10px\",\n       overflow: 'auto'\n    },\n    infoBox: {\n        padding: \"0 10px 10px\",\n    },\n    instanceType: {\n        padding: \"10px\",\n    },\n});\n\ntype InstanceTypesPanelConnectedProps = {config: ClusterConfigJSON};\n\ntype InstanceTypesPanelRootProps = InstanceTypesPanelConnectedProps & WithStyles<CssRules>;\n\nconst mapStateToProps = ({auth}: RootState): InstanceTypesPanelConnectedProps => ({\n    config: auth.config.clusterConfig,\n});\n\nexport const InstanceTypesPanel = withStyles(styles)(connect(mapStateToProps)(\n    ({ config, classes }: InstanceTypesPanelRootProps) => {\n\n        const instances = config.InstanceTypes || {};\n\n        return <Grid className={classes.root} container direction=\"row\">\n            <Grid className={classes.infoBox} item xs={12}>\n                <Card>\n                    <CardContent>\n                        <Typography variant=\"body2\">\n                            These are the cloud compute instance types\n                            configured for this cluster. The core count and\n                            maximum RAM request correspond to the greatest\n                            values you can put in the CWL Workflow\n                            ResourceRequest{\" \"}\n                            <DefaultCodeSnippet\n                                inline\n                                lines={[\"minCores\"]}\n                            />{\" \"}\n                            and{\" \"}\n                            <DefaultCodeSnippet inline lines={[\"minRAM\"]} />{\" \"}\n                            and still be scheduled on that instance type.\n                        </Typography>\n                    </CardContent>\n                </Card>\n            </Grid>\n            {Object.keys(instances).length > 0 ?\n                                             Object.keys(instances)\n                                                   .sort((a, b) => {\n                                                       const typeA = instances[a];\n                                                       const typeB = instances[b];\n\n                                                       if (typeA.Price !== typeB.Price) {\n                                                           return typeA.Price - typeB.Price;\n                                                       } else {\n                                                           return typeA.ProviderType.localeCompare(typeB.ProviderType);\n                                                       }\n                                                   }).map((instanceKey) => {\n                                                       const instanceType = instances[instanceKey];\n                                                       const maxDiskRequest = instanceType.IncludedScratch;\n                                                       const keepBufferOverhead = calculateKeepBufferOverhead(instanceType.VCPUs);\n                                                       const maxRamRequest = discountRamByPercent(instanceType.RAM - config.Containers.ReserveExtraRAM - keepBufferOverhead);\n\n                                                       return <Grid data-cy={instanceKey} className={classes.instanceType} item sm={6} xs={12} key={instanceKey}>\n                                                           <Card>\n                                                               <CardContent>\n                                                                   <Typography variant=\"h6\">\n                                                                       {instanceKey}\n                                                                   </Typography>\n                                                                   <Grid item xs={12}>\n                                                                       <DetailsAttribute label=\"Provider type\" value={instanceType.ProviderType} />\n                                                                   </Grid>\n                                                                   <Grid item xs={12}>\n                                                                       <DetailsAttribute label=\"Price\" value={formatCost(instanceType.Price)} />\n                                                                   </Grid>\n                                                                   <Grid item xs={12}>\n                                                                       <DetailsAttribute label=\"Cores\" value={instanceType.VCPUs} />\n                                                                   </Grid>\n                                                                   <Grid item xs={12}>\n                                                                       <DetailsAttribute label=\"Max RAM request\" value={`${formatCWLResourceSize(maxRamRequest)} (${formatFileSize(maxRamRequest)})`} />\n                                                                   </Grid>\n                                                                   <Grid item xs={12}>\n                                                                       <DetailsAttribute label=\"Max disk request\" value={`${formatCWLResourceSize(maxDiskRequest)} (${formatFileSize(maxDiskRequest)})`} />\n                                                                   </Grid>\n                                                                   <Grid item xs={12}>\n                                                                       <DetailsAttribute label=\"Preemptible\" value={instanceType.Preemptible.toString()} />\n                                                                   </Grid>\n                                                                   {instanceType.GPU && instanceType.GPU.DeviceCount > 0 ? <>\n                                                                       <Grid item xs={12}>\n                                                                           <DetailsAttribute label=\"Stack\" value={instanceType.GPU.Stack} />\n                                                                       </Grid>\n                                                                   <Grid item xs={12}>\n                                                                       <DetailsAttribute label=\"VRAM\" value={instanceType.GPU.VRAM} />\n                                                                   </Grid>\n                                                                   <Grid item xs={12}>\n                                                                       <DetailsAttribute label=\"GPUs\" value={instanceType.GPU.DeviceCount} />\n                                                                   </Grid>\n                                                                   <Grid item xs={12}>\n                                                                       <DetailsAttribute label=\"Hardware target\" value={instanceType.GPU.HardwareTarget} />\n                                                                   </Grid>\n                                                                   <Grid item xs={12}>\n                                                                       <DetailsAttribute label=\"Driver version\" value={instanceType.GPU.DriverVersion} />\n                                                                   </Grid>\n                                                                   </> : <></>\n                                                                   }\n                                                               </CardContent>\n                                                           </Card>\n                                                       </Grid>;\n                                                   }) :\n                                             <NotFoundView\n                                                 icon={ResourceIcon}\n                                                 messages={[\"No instances found\"]}\n                                             />\n            }\n        </Grid>;\n    }\n));\n\nexport const calculateKeepBufferOverhead = (coreCount: number): number => {\n    // TODO replace with exported server config\n    const buffersPerVCPU = 1;\n\n    // Returns 220 MiB + 64MiB+10% per buffer\n    return (220 << 20) + (buffersPerVCPU * coreCount * (1 << 26) * (11/10))\n};\n\nexport const discountRamByPercent = (requestedRamBytes: number): number => {\n    // TODO replace this with exported server config or remove when no longer\n    // used by server in ram calculation\n    const discountPercent = 5;\n\n    return requestedRamBytes * 100 / (100-discountPercent);\n};\n"
  },
  {
    "path": "services/workbench2/src/views/keep-service-panel/keep-service-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport {\n    Card,\n    CardContent,\n    Grid,\n    Table,\n    TableHead,\n    TableRow,\n    TableCell,\n    TableBody,\n    Tooltip,\n    IconButton,\n    Checkbox,\n} from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { MoreVerticalIcon } from 'components/icon/icon';\nimport { KeepServiceResource } from 'models/keep-services';\n\ntype CssRules = 'root' | 'tableRow';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        overflow: 'auto'\n    },\n    tableRow: {\n        '& td, th': {\n            whiteSpace: 'nowrap'\n        }\n    }\n});\n\nexport interface KeepServicePanelRootActionProps {\n    openRowOptions: (event: React.MouseEvent<HTMLElement>, keepService: KeepServiceResource) => void;\n}\n\nexport interface KeepServicePanelRootDataProps {\n    keepServices: KeepServiceResource[];\n}\n\ntype KeepServicePanelRootProps = KeepServicePanelRootActionProps & KeepServicePanelRootDataProps & WithStyles<CssRules>;\n\nexport const KeepServicePanelRoot = withStyles(styles)(\n    ({ classes, keepServices, openRowOptions }: KeepServicePanelRootProps) => {\n        const hasKeepSerices = keepServices.length > 0;\n        return <Card className={classes.root}>\n            <CardContent>\n                {hasKeepSerices && <Grid container direction=\"row\">\n                    <Grid item xs={12}>\n                        <Table>\n                            <TableHead>\n                                <TableRow className={classes.tableRow}>\n                                    <TableCell>UUID</TableCell>\n                                    <TableCell>Read only</TableCell>\n                                    <TableCell>Service host</TableCell>\n                                    <TableCell>Service port</TableCell>\n                                    <TableCell>Service SSL flag</TableCell>\n                                    <TableCell>Service type</TableCell>\n                                    <TableCell />\n                                </TableRow>\n                            </TableHead>\n                            <TableBody>\n                                {keepServices.map((keepService, index) =>\n                                    <TableRow key={index} className={classes.tableRow}>\n                                        <TableCell>{keepService.uuid}</TableCell>\n                                        <TableCell>\n                                            <Checkbox\n                                                disableRipple\n                                                color=\"primary\"\n                                                checked={keepService.readOnly} />\n                                        </TableCell>\n                                        <TableCell>{keepService.serviceHost}</TableCell>\n                                        <TableCell>{keepService.servicePort}</TableCell>\n                                        <TableCell>\n                                            <Checkbox\n                                                disableRipple\n                                                color=\"primary\"\n                                                checked={keepService.serviceSslFlag} />\n                                        </TableCell>\n                                        <TableCell>{keepService.serviceType}</TableCell>\n                                        <TableCell>\n                                            <Tooltip title=\"More options\" disableFocusListener>\n                                                <IconButton onClick={event => openRowOptions(event, keepService)} size=\"large\">\n                                                    <MoreVerticalIcon />\n                                                </IconButton>\n                                            </Tooltip>\n                                        </TableCell>\n                                    </TableRow>)}\n                            </TableBody>\n                        </Table>\n                    </Grid>\n                </Grid>}\n            </CardContent>\n        </Card>\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views/keep-service-panel/keep-service-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport {\n    KeepServicePanelRoot,\n    KeepServicePanelRootDataProps,\n    KeepServicePanelRootActionProps\n} from 'views/keep-service-panel/keep-service-panel-root';\nimport { openKeepServiceContextMenu } from 'store/context-menu/context-menu-actions';\n\nconst mapStateToProps = (state: RootState): KeepServicePanelRootDataProps => {\n    return {\n        keepServices: state.keepServices,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): KeepServicePanelRootActionProps => ({\n    openRowOptions: (event, keepService) => {\n        dispatch<any>(openKeepServiceContextMenu(event, keepService));\n    }\n});\n\nexport const KeepServicePanel = connect(mapStateToProps, mapDispatchToProps)(KeepServicePanelRoot);"
  },
  {
    "path": "services/workbench2/src/views/link-account-panel/link-account-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Card, CardContent, Button, Grid, Select, CircularProgress } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { UserResource } from \"models/user\";\nimport { LinkAccountType } from \"models/link-account\";\nimport { formatDateTime } from \"common/formatters\";\nimport { LinkAccountPanelStatus, LinkAccountPanelError } from \"store/link-account-panel/link-account-panel-reducer\";\nimport { Config } from 'common/config';\n\ntype CssRules = 'root';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        overflow: 'auto',\n        display: 'flex'\n    }\n});\n\nexport interface LinkAccountPanelRootDataProps {\n    targetUser?: UserResource;\n    userToLink?: UserResource;\n    remoteHostsConfig: { [key: string]: Config };\n    hasRemoteHosts: boolean;\n    localCluster: string;\n    loginCluster: string;\n    status: LinkAccountPanelStatus;\n    error: LinkAccountPanelError;\n    selectedCluster?: string;\n    isProcessing: boolean;\n}\n\nexport interface LinkAccountPanelRootActionProps {\n    startLinking: (type: LinkAccountType) => void;\n    cancelLinking: () => void;\n    linkAccount: () => void;\n    setSelectedCluster: (cluster: string) => void;\n}\n\nfunction displayUser(user: UserResource, showCreatedAt: boolean = false, showCluster: boolean = false) {\n    const disp: JSX.Element[] = [];\n    disp.push(<span><b>{user.email}</b> ({user.username}, {user.uuid})</span>);\n    if (showCluster) {\n        const homeCluster = user.uuid.substring(0, 5);\n        disp.push(<span> hosted on cluster <b>{homeCluster}</b> and </span>);\n    }\n    if (showCreatedAt) {\n        disp.push(<span> created on <b>{formatDateTime(user.createdAt)}</b></span>);\n    }\n    return disp;\n}\n\nfunction isLocalUser(uuid: string, localCluster: string) {\n    return uuid.substring(0, 5) === localCluster;\n}\n\ntype LinkAccountPanelRootProps = LinkAccountPanelRootDataProps & LinkAccountPanelRootActionProps & WithStyles<CssRules>;\n\nexport const LinkAccountPanelRoot = withStyles(styles)(\n    ({ classes, targetUser, userToLink, status, isProcessing, error, startLinking, cancelLinking, linkAccount,\n        remoteHostsConfig, hasRemoteHosts, selectedCluster, setSelectedCluster, localCluster, loginCluster }: LinkAccountPanelRootProps) => {\n\n        // If a LoginFederation is configured, the self-serve account linking is not\n        // currently available.\n        if (loginCluster !== \"\") {\n            return <Card className={classes.root}><CardContent>\n                <Grid container spacing={2}>\n                    <Grid item xs={12}>\n                        If you would like to link this account to another one, please contact your administrator.\n                    </Grid>\n                </Grid>\n            </CardContent></Card>;\n        }\n        return (\n            <Card className={classes.root}><CardContent>\n                { isProcessing && <Grid container item direction=\"column\" alignContent=\"center\" spacing={3}>\n                    <Grid item>\n                        Loading user info. Please wait.\n                    </Grid>\n                    <Grid item style={{ alignSelf: 'center' }}>\n                        <CircularProgress />\n                    </Grid>\n                </Grid> }\n\n                { !isProcessing && status === LinkAccountPanelStatus.INITIAL && targetUser && <div>\n                    { isLocalUser(targetUser.uuid, localCluster)\n                    ? <Grid container spacing={3}>\n                        <Grid container item direction=\"column\" spacing={3}>\n                            <Grid item>\n                                You are currently logged in as {displayUser(targetUser, true)}\n                            </Grid>\n                            <Grid item>\n                                You can link Arvados accounts. After linking, either login will take you to the same account.\n                            </Grid >\n                        </Grid>\n                        <Grid container item direction=\"row\" spacing={3}>\n                            <Grid item>\n                                <Button disabled={!targetUser.isActive} color=\"primary\" variant=\"contained\" onClick={() => startLinking(LinkAccountType.ADD_OTHER_LOGIN)}>\n                                    Add another login to this account\n                                </Button>\n                            </Grid>\n                            <Grid item>\n                                <Button color=\"primary\" variant=\"contained\" onClick={() => startLinking(LinkAccountType.ACCESS_OTHER_ACCOUNT)}>\n                                    Use this login to access another account\n                                </Button>\n                            </Grid>\n                        </Grid>\n                        {hasRemoteHosts && selectedCluster && <Grid container item direction=\"column\" spacing={3}>\n                            <Grid item>\n                                You can also link {displayUser(targetUser, false)} with an account from a remote cluster.\n                            </Grid>\n                            <Grid item>\n                                Please select the cluster that hosts the account you want to link with:\n                                <Select\n                                    variant=\"standard\"\n                                    id=\"remoteHostsDropdown\"\n                                    native\n                                    defaultValue={selectedCluster}\n                                    style={{ marginLeft: \"1em\" }}\n                                    onChange={(event) => setSelectedCluster(event.target.value)}>\n                                    {Object.keys(remoteHostsConfig).map((k) => k !== localCluster ? <option key={k} value={k}>{k}</option> : null)}\n                                </Select>\n                            </Grid>\n                            <Grid item>\n                                <Button color=\"primary\" variant=\"contained\" onClick={() => startLinking(LinkAccountType.ACCESS_OTHER_REMOTE_ACCOUNT)}>\n                                    Link with an account on&nbsp;{hasRemoteHosts ? <label>{selectedCluster} </label> : null}\n                                </Button>\n                            </Grid>\n                        </Grid>}\n                    </Grid>\n                    : <Grid container spacing={3}>\n                        <Grid container item direction=\"column\" spacing={3}>\n                            <Grid item>\n                                You are currently logged in as {displayUser(targetUser, true, true)}\n                            </Grid>\n                            { targetUser.isActive\n                            ? (loginCluster === \"\"\n                                ? <> <Grid item>\n                                    This a remote account. You can link a local Arvados account to this one.\n                                    After linking, you can access the local account's data by logging into the\n                                    <b>{localCluster}</b> cluster as user <b>{targetUser.email}</b>\n                                    from <b>{targetUser.uuid.substring(0, 5)}</b>.\n                                </Grid >\n                                <Grid item>\n                                    <Button color=\"primary\" variant=\"contained\" onClick={() => startLinking(LinkAccountType.ADD_LOCAL_TO_REMOTE)}>\n                                        Link an account from {localCluster} to this account\n                                    </Button>\n                                </Grid></>\n                                : <Grid item>Please visit cluster\n                                    <a href={remoteHostsConfig[loginCluster].workbench2Url + \"/link_account\"}>{loginCluster}</a> to perform account linking.\n                                </Grid> )\n                            : <Grid item>\n                                This an inactive remote account. An administrator must activate your\n                                account before you can proceed.  After your accounts is activated,\n                                you can link a local Arvados account hosted by the <b>{localCluster}</b> cluster to this one.\n                            </Grid> }\n                        </Grid>\n                    </Grid> }\n                </div> }\n\n                {!isProcessing && (status === LinkAccountPanelStatus.LINKING || status === LinkAccountPanelStatus.ERROR) && userToLink && targetUser &&\n                    <Grid container spacing={3}>\n                        {status === LinkAccountPanelStatus.LINKING && <Grid container item direction=\"column\" spacing={3}>\n                            <Grid item>\n                                Clicking 'Link accounts' will link {displayUser(userToLink, true, !isLocalUser(targetUser.uuid, localCluster))} to {displayUser(targetUser, true, !isLocalUser(targetUser.uuid, localCluster))}.\n                            </Grid>\n                            {(isLocalUser(targetUser.uuid, localCluster)) && <Grid item>\n                                After linking, logging in as {displayUser(userToLink)} will log you into the same account as {displayUser(targetUser)}.\n                            </Grid>}\n                            <Grid item>\n                                Any object owned by {displayUser(userToLink)} will be transfered to {displayUser(targetUser)}.\n                            </Grid>\n                            {!isLocalUser(targetUser.uuid, localCluster) && <Grid item>\n                                You can access <b>{userToLink.email}</b> data by logging into <b>{localCluster}</b> with the <b>{targetUser.email}</b> account.\n                            </Grid>}\n                        </Grid>}\n                        {error === LinkAccountPanelError.NON_ADMIN && <Grid item>\n                            Cannot link admin account {displayUser(userToLink)} to non-admin account {displayUser(targetUser)}.\n                        </Grid>}\n                        {error === LinkAccountPanelError.SAME_USER && <Grid item>\n                            Cannot link {displayUser(targetUser)} to the same account.\n                        </Grid>}\n                        {error === LinkAccountPanelError.INACTIVE && <Grid item>\n                            Cannot link account {displayUser(userToLink)} to inactive account {displayUser(targetUser)}.\n                        </Grid>}\n                        <Grid container item direction=\"row\" spacing={3}>\n                            <Grid item>\n                                <Button variant=\"contained\" onClick={() => cancelLinking()}>\n                                    Cancel\n                                </Button>\n                            </Grid>\n                            <Grid item>\n                                <Button disabled={status === LinkAccountPanelStatus.ERROR} color=\"primary\" variant=\"contained\" onClick={() => linkAccount()}>\n                                    Link accounts\n                                </Button>\n                            </Grid>\n                        </Grid>\n                    </Grid>}\n            </CardContent></Card>\n        );\n    });\n"
  },
  {
    "path": "services/workbench2/src/views/link-account-panel/link-account-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { startLinking, linkAccount, linkAccountPanelActions, cancelLinking } from 'store/link-account-panel/link-account-panel-actions';\nimport { LinkAccountType } from 'models/link-account';\nimport {\n    LinkAccountPanelRoot,\n    LinkAccountPanelRootDataProps,\n    LinkAccountPanelRootActionProps\n} from 'views/link-account-panel/link-account-panel-root';\n\nconst mapStateToProps = (state: RootState): LinkAccountPanelRootDataProps => {\n    return {\n        remoteHostsConfig: state.auth.remoteHostsConfig,\n        hasRemoteHosts: Object.keys(state.auth.remoteHosts).length > 1 && state.auth.loginCluster === \"\",\n        selectedCluster: state.linkAccountPanel.selectedCluster,\n        localCluster: state.auth.localCluster,\n        loginCluster: state.auth.loginCluster,\n        targetUser: state.linkAccountPanel.targetUser,\n        userToLink: state.linkAccountPanel.userToLink,\n        status: state.linkAccountPanel.status,\n        error: state.linkAccountPanel.error,\n        isProcessing: state.linkAccountPanel.isProcessing\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): LinkAccountPanelRootActionProps => ({\n    startLinking: (type: LinkAccountType) => dispatch<any>(startLinking(type)),\n    cancelLinking: () => dispatch<any>(cancelLinking(true)),\n    linkAccount: () => dispatch<any>(linkAccount()),\n    setSelectedCluster: (selectedCluster: string) => dispatch<any>(linkAccountPanelActions.SET_SELECTED_CLUSTER({ selectedCluster }))\n});\n\nexport const LinkAccountPanel = connect(mapStateToProps, mapDispatchToProps)(LinkAccountPanelRoot);\n"
  },
  {
    "path": "services/workbench2/src/views/link-panel/link-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport { createTree } from 'models/tree';\nimport {\n    ResourceLinkUuid, ResourceLinkHead, ResourceLinkTail,\n    ResourceLinkClass, ResourceLinkName\n} from 'views-components/data-explorer/renderers';\nimport { LinkResource } from 'models/link';\n\n\nexport enum LinkPanelColumnNames {\n    NAME = \"Name\",\n    LINK_CLASS = \"Link Class\",\n    TAIL = \"Tail\",\n    HEAD = 'Head',\n    UUID = \"UUID\"\n}\n\nexport const linkPanelColumns: DataColumns<string, LinkResource> = [\n    {\n        name: LinkPanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"name\" },\n        filters: createTree(),\n        render: uuid => <ResourceLinkName uuid={uuid} />\n    },\n    {\n        name: LinkPanelColumnNames.LINK_CLASS,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkClass uuid={uuid} />\n    },\n    {\n        name: LinkPanelColumnNames.TAIL,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkTail uuid={uuid} />\n    },\n    {\n        name: LinkPanelColumnNames.HEAD,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkHead uuid={uuid} />\n    },\n    {\n        name: LinkPanelColumnNames.UUID,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkUuid uuid={uuid} />\n    }\n];\n"
  },
  {
    "path": "services/workbench2/src/views/link-panel/link-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { LINK_PANEL_ID } from 'store/link-panel/link-panel-actions';\nimport { DataExplorer } from 'views-components/data-explorer/data-explorer';\nimport { ResourcesState } from 'store/resources/resources';\nimport { ShareMeIcon } from 'components/icon/icon';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\n\ntype CssRules = \"root\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n    }\n});\n\nexport interface LinkPanelRootDataProps {\n    resources: ResourcesState;\n}\n\nexport interface LinkPanelRootActionProps {\n    onItemClick: (item: string) => void;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, item: string) => void;\n    onItemDoubleClick: (item: string) => void;\n}\n\nexport type LinkPanelRootProps = LinkPanelRootDataProps & LinkPanelRootActionProps & WithStyles<CssRules>;\n\nexport const LinkPanelRoot = withStyles(styles)((props: LinkPanelRootProps) => {\n    return <div className={props.classes.root}><DataExplorer\n        id={LINK_PANEL_ID}\n        onRowClick={props.onItemClick}\n        onRowDoubleClick={props.onItemDoubleClick}\n        onContextMenu={props.onContextMenu}\n        contextMenuColumn={true}\n        hideColumnSelector\n        hideSearchInput\n        defaultViewIcon={ShareMeIcon}\n        defaultViewMessages={['Your link list is empty.']} />\n    </div>;\n});\n"
  },
  {
    "path": "services/workbench2/src/views/link-panel/link-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { RootState } from 'store/store';\nimport {\n    openContextMenuAndSelect,\n} from 'store/context-menu/context-menu-actions';\nimport {\n    LinkPanelRoot,\n    LinkPanelRootActionProps,\n    LinkPanelRootDataProps\n} from 'views/link-panel/link-panel-root';\nimport { ResourceKind } from 'models/resource';\nimport { resourceToMenuKind } from 'common/resource-to-menu-kind';\n\nconst mapStateToProps = (state: RootState): LinkPanelRootDataProps => {\n    return {\n        resources: state.resources\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): LinkPanelRootActionProps => ({\n    onContextMenu: (event, resourceUuid) => {\n        const kind = dispatch<any>(resourceToMenuKind(resourceUuid));\n        if (kind) {\n            dispatch<any>(openContextMenuAndSelect(event, {\n                name: '',\n                uuid: resourceUuid,\n                ownerUuid: '',\n                kind: ResourceKind.LINK,\n                menuKind: kind\n            }));\n        }\n    },\n    onItemClick: (resourceUuid: string) => { return; },\n    onItemDoubleClick: uuid => { return; }\n});\n\nexport const LinkPanel = connect(mapStateToProps, mapDispatchToProps)(LinkPanelRoot);"
  },
  {
    "path": "services/workbench2/src/views/login-panel/login-panel.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { requirePasswordLogin } from './login-panel';\n\ndescribe('<LoginPanel />', () => {\n    describe('requirePasswordLogin', () => {\n        it('should return false if no config specified', () => {\n            // given\n            const config = null;\n  \n            // when\n            const result = requirePasswordLogin(config);\n  \n            // then\n            expect(!!result).to.equal(false);\n        });\n  \n        it('should return false if no config.clusterConfig specified', () => {\n            // given\n            const config = {};\n  \n            // when\n            const result = requirePasswordLogin(config);\n  \n            // then\n            expect(!!result).to.equal(false);\n  \n        });\n  \n        it('should return false if no config.clusterConfig.Login specified', () => {\n            // given\n            const config = {\n                clusterConfig: {},\n            };\n  \n            // when\n            const result = requirePasswordLogin(config);\n  \n            // then\n            expect(!!result).to.equal(false);\n        });\n  \n        it('should return false if no config.clusterConfig.Login.LDAP and config.clusterConfig.Login.PAM specified', () => {\n            // given\n            const config = {\n                clusterConfig: {\n                    Login: {}\n                },\n            };\n  \n            // when\n            const result = requirePasswordLogin(config);\n  \n            // then\n            expect(!!result).to.equal(false);\n        });\n  \n        it('should return false if config.clusterConfig.Login.LDAP.Enable and config.clusterConfig.Login.PAM.Enable not specified', () => {\n            // given\n            const config = {\n                clusterConfig: {\n                    Login: {\n                        PAM: {},\n                        LDAP: {},\n                    },\n                },\n            };\n  \n            // when\n            const result = requirePasswordLogin(config);\n  \n            // then\n            expect(!!result).to.equal(false);\n        });\n  \n        it('should return value from config.clusterConfig.Login.LDAP.Enable', () => {\n            // given\n            const config = {\n                clusterConfig: {\n                    Login: {\n                        PAM: {},\n                        LDAP: {\n                            Enable: true\n                        },\n                    },\n                },\n            };\n  \n            // when\n            const result = requirePasswordLogin(config);\n  \n            // then\n            expect(!!result).to.equal(true);\n        });\n  \n        it('should return value from config.clusterConfig.Login.PAM.Enable', () => {\n            // given\n            const config = {\n                clusterConfig: {\n                    Login: {\n                        LDAP: {},\n                        PAM: {\n                            Enable: true\n                        },\n                    },\n                },\n            };\n  \n            // when\n            const result = requirePasswordLogin(config);\n  \n            // then\n            expect(!!result).to.equal(true);\n  \n        });\n  \n        it('should return false for not specified config option config.clusterConfig.Login.NOT_EXISTING.Enable', () => {\n            // given\n            const config = {\n                clusterConfig: {\n                    Login: {\n                        NOT_EXISTING: {\n                            Enable: true\n                        },\n                    },\n                },\n            };\n  \n            // when\n            const result = requirePasswordLogin(config);\n  \n            // then\n            expect(!!result).to.equal(false);\n  \n        });\n    });\n  });"
  },
  {
    "path": "services/workbench2/src/views/login-panel/login-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useMemo} from 'react';\nimport { connect, DispatchProp } from 'react-redux';\nimport { Grid, Typography, Button, Select } from '@mui/material';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { login, authActions } from 'store/auth/auth-action';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { RootState } from 'store/store';\nimport { LoginForm } from 'views-components/login-form/login-form';\nimport Axios, { AxiosResponse } from 'axios';\nimport { Config } from 'common/config';\nimport { sanitizeHTML } from 'common/html-sanitize';\n\ntype CssRules = 'root' | 'container' | 'title' | 'content' | 'content__bolder' | 'button';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        position: 'relative',\n        backgroundColor: theme.palette.grey[\"200\"],\n        '&::after': {\n            content: `''`,\n            position: 'absolute',\n            top: 0,\n            left: 0,\n            bottom: 0,\n            right: 0,\n            opacity: 0.2,\n        }\n    },\n    container: {\n        width: '560px',\n        zIndex: 10\n    },\n    title: {\n        marginBottom: theme.spacing(6),\n        color: theme.palette.grey[\"800\"]\n    },\n    content: {\n        marginBottom: theme.spacing(3),\n        lineHeight: '1.2rem',\n        color: theme.palette.grey[\"800\"]\n    },\n    'content__bolder': {\n        fontWeight: 'bolder'\n    },\n    button: {\n        boxShadow: 'none'\n    }\n});\n\nexport type PasswordLoginResponse = {\n    uuid?: string;\n    api_token?: string;\n    message?: string;\n};\n\nconst doPasswordLogin = (url: string) => (username: string, password: string) => {\n    const formData: string[] = [];\n    formData.push('username='+encodeURIComponent(username));\n    formData.push('password='+encodeURIComponent(password));\n    return Axios.post<string, AxiosResponse<PasswordLoginResponse>>(`${url}/arvados/v1/users/authenticate`, formData.join('&'), {\n        headers: {\n            'Content-Type': 'application/x-www-form-urlencoded'\n        },\n    });\n};\n\ntype LoginPanelProps = DispatchProp<any> & WithStyles<CssRules> & {\n    remoteHosts: { [key: string]: string },\n    homeCluster: string,\n    localCluster: string,\n    loginCluster: string,\n    welcomePage: string,\n    config: Config,\n};\n\nconst loginOptions = ['LDAP', 'PAM', 'Test'];\n\nexport const requirePasswordLogin = (config: Config): boolean => {\n    if (config && config.clusterConfig && config.clusterConfig.Login) {\n        return loginOptions\n            .filter(loginOption => !!config.clusterConfig.Login[loginOption])\n            .map(loginOption => config.clusterConfig.Login[loginOption].Enable)\n            .find(enabled => enabled === true) || false;\n    }\n    return false;\n};\n\nexport const LoginPanel = withStyles(styles)(\n    connect((state: RootState) => ({\n        remoteHosts: state.auth.remoteHosts,\n        homeCluster: state.auth.homeCluster,\n        localCluster: state.auth.localCluster,\n        loginCluster: state.auth.loginCluster,\n        welcomePage: state.auth.config.clusterConfig.Workbench.WelcomePageHTML,\n        config: state.auth.remoteHostsConfig[state.auth.loginCluster || state.auth.homeCluster],\n        }))(({ classes, dispatch, remoteHosts, homeCluster, localCluster, loginCluster, welcomePage, config }: LoginPanelProps) => {\n        const passwordLogin = useMemo(() => requirePasswordLogin(config), [config]);\n        const loginBtnLabel = `Log in${(localCluster !== homeCluster && loginCluster !== homeCluster) ? \" to \"+localCluster+\" with user from \"+homeCluster : ''}`;\n\n        return (\n            <Grid container justifyContent=\"center\" alignItems=\"center\"\n                className={classes.root}\n                style={{ marginTop: 56, overflowY: \"auto\", height: \"100%\" }}>\n                <Grid item className={classes.container}>\n                    <Typography component=\"div\">\n                        <div dangerouslySetInnerHTML={{ __html: sanitizeHTML(welcomePage) }} style={{ margin: \"1em\" }} />\n                    </Typography>\n                    {Object.keys(remoteHosts).length > 1 && loginCluster === \"\" &&\n\n                        <Typography component=\"div\" align=\"right\">\n                            <label>Please select the cluster that hosts your user account:</label>\n                            <Select\n                                variant=\"standard\"\n                                native\n                                value={homeCluster}\n                                style={{ margin: \"1em\" }}\n                                onChange={(event) => dispatch(authActions.SET_HOME_CLUSTER(event.target.value))}>\n                                {Object.keys(remoteHosts).map((k) => <option key={k} value={k}>{k}</option>)}\n                            </Select>\n                        </Typography>}\n\n                    {passwordLogin\n                    ? <Typography component=\"div\">\n                        <LoginForm dispatch={dispatch}\n                            loginLabel={loginBtnLabel}\n                            handleSubmit={doPasswordLogin(`https://${remoteHosts[loginCluster || homeCluster]}`)}/>\n                    </Typography>\n                    : <Typography component=\"div\" align=\"right\">\n                        <Button variant=\"contained\" color=\"primary\" style={{ margin: \"1em\" }}\n                            className={classes.button}\n                            onClick={() => dispatch(login(localCluster, homeCluster, loginCluster, remoteHosts))}>\n                            {loginBtnLabel}\n                        </Button>\n                    </Typography>}\n                </Grid>\n            </Grid >\n        );}\n    ));\n"
  },
  {
    "path": "services/workbench2/src/views/main-panel/main-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect } from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, LinearProgress } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { User } from \"models/user\";\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { WorkbenchPanel } from 'views/workbench/workbench';\nimport { LoginPanel } from 'views/login-panel/login-panel';\nimport { InactivePanel } from 'views/inactive-panel/inactive-panel';\nimport { WorkbenchLoadingScreen } from 'views/workbench/workbench-loading-screen';\nimport { MainAppBar } from 'views-components/main-app-bar/main-app-bar';\nimport { Routes, matchLinkAccountRoute } from 'routes/routes';\nimport { RouterState } from \"connected-react-router\";\nimport parse from 'parse-duration';\nimport { Config } from 'common/config';\nimport { LinkAccountPanelState, LinkAccountPanelStatus } from 'store/link-account-panel/link-account-panel-reducer';\nimport { WORKBENCH_LOADING_SCREEN } from 'store/progress-indicator/progress-indicator-actions';\n\ntype CssRules = 'root';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        overflow: 'hidden',\n        width: '100vw',\n        height: '100vh'\n    }\n});\n\nexport interface MainPanelRootDataProps {\n    user?: User;\n    progressIndicator: string[];\n    buildInfo: string;\n    uuidPrefix: string;\n    linkAccountPanel: LinkAccountPanelState;\n    config: Config;\n    sidePanelIsCollapsed: boolean;\n    isDetailsPanelOpen: boolean;\n    router: RouterState;\n}\n\ninterface MainPanelRootDispatchProps {\n    toggleSidePanel: () => void,\n    setCurrentRouteUuid: (uuid: string | null) => void;\n    saveRecentlyVisited: (uuid: string) => void;\n}\n\ntype MainPanelRootProps = MainPanelRootDataProps & MainPanelRootDispatchProps & WithStyles<CssRules>;\n\nexport const MainPanelRoot = withStyles(styles)(\n    ({ classes, progressIndicator, user, buildInfo, uuidPrefix, config, linkAccountPanel,\n        sidePanelIsCollapsed, isDetailsPanelOpen, setCurrentRouteUuid, saveRecentlyVisited, router}: MainPanelRootProps) =>{\n\n            const working = progressIndicator.length > 0;\n            const loading = progressIndicator.includes(WORKBENCH_LOADING_SCREEN);\n            const isLinkingPath = router.location ? matchLinkAccountRoute(router.location.pathname) !== null : false;\n            const currentRoute = router.location ? router.location.pathname : '';\n            const isNotLinking = linkAccountPanel.status === LinkAccountPanelStatus.NONE || linkAccountPanel.status === LinkAccountPanelStatus.INITIAL;\n            const siteBanner = config.clusterConfig.Workbench.SiteName;\n            const sessionIdleTimeout = parse(config.clusterConfig.Workbench.IdleTimeout, 's') || 0;\n\n            useEffect(() => {\n                const splitRoute = currentRoute.split('/');\n                const uuid = splitRoute[splitRoute.length - 1];\n                if(Object.values(Routes).includes(`/${uuid}`) === false) {\n                    setCurrentRouteUuid(uuid);\n                    if(user) saveRecentlyVisited(uuid);\n                } else {\n                    setCurrentRouteUuid(null);\n                }\n            }, [currentRoute]);\n\n        return loading\n            ? <WorkbenchLoadingScreen />\n            : <>\n            {isNotLinking && <MainAppBar\n                user={user}\n                buildInfo={buildInfo}\n                uuidPrefix={uuidPrefix}\n                siteBanner={siteBanner}\n                >\n                {working\n                    ? <LinearProgress color=\"secondary\" data-cy=\"linear-progress\" />\n                    : null}\n            </MainAppBar>}\n            <Grid container direction=\"column\" className={classes.root}>\n                {user\n                    ? (user.isActive || (!user.isActive && isLinkingPath)\n                    ? <WorkbenchPanel\n                        isNotLinking={isNotLinking}\n                        isUserActive={user.isActive}\n                        sessionIdleTimeout={sessionIdleTimeout}\n                        sidePanelIsCollapsed={sidePanelIsCollapsed}\n                        isDetailsPanelOpen={isDetailsPanelOpen} />\n                    : <InactivePanel />)\n                    : <LoginPanel />}\n            </Grid>\n        </>\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views/main-panel/main-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\nimport { connect } from 'react-redux';\nimport { MainPanelRoot, MainPanelRootDataProps } from 'views/main-panel/main-panel-root';\nimport { toggleSidePanel } from \"store/side-panel/side-panel-action\";\nimport { propertiesActions } from 'store/properties/properties-actions';\nimport { saveRecentlyVisited } from \"store/recently-visited/recently-visited-actions\";\n\nconst mapStateToProps = (state: RootState): MainPanelRootDataProps => {\n    return {\n        user: state.auth.user,\n        progressIndicator: state.progressIndicator,\n        buildInfo: state.appInfo.buildInfo,\n        uuidPrefix: state.auth.localCluster,\n        linkAccountPanel: state.linkAccountPanel,\n        config: state.auth.config,\n        sidePanelIsCollapsed: state.sidePanel.collapsedState,\n        isDetailsPanelOpen: state.detailsPanel.isOpened,\n        router: state.router,\n    };\n};\n\nconst mapDispatchToProps = (dispatch) => {\n    return {\n        toggleSidePanel: (collapsedState)=>{\n            return dispatch(toggleSidePanel(collapsedState))\n        },\n        setCurrentRouteUuid: (uuid: string) => {\n            return dispatch(propertiesActions.SET_PROPERTY({key: 'currentRouteUuid', value: uuid}))\n        },\n        saveRecentlyVisited: (uuid: string) => {\n            return dispatch(saveRecentlyVisited(uuid))\n        },\n    }\n};\n\nexport const MainPanel = connect(mapStateToProps, mapDispatchToProps)(MainPanelRoot);\n"
  },
  {
    "path": "services/workbench2/src/views/not-found-panel/not-found-panel-root.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { ThemeProvider, StyledEngineProvider } from '@mui/material';\nimport { CustomTheme } from 'common/custom-theme';\nimport { NotFoundPanelRoot } from './not-found-panel-root';\n\ndescribe('NotFoundPanelRoot', () => {\n    let props;\n\n    beforeEach(() => {\n        props = {\n            classes: {\n                root: 'root',\n                title: 'title',\n                active: 'active',\n            },\n            clusterConfig: {\n                Users: {\n                    SupportEmailAddress: 'support@example.com'\n                }\n            },\n            location: null,\n        };\n    });\n\n    it('should render component', () => {\n        // given\n        const expectedMessage = \"The page you requested was not found\";\n\n        // when\n        cy.mount(\n            <StyledEngineProvider injectFirst>\n                <ThemeProvider theme={CustomTheme}>\n                    <NotFoundPanelRoot {...props} />\n                </ThemeProvider>\n            </StyledEngineProvider>\n            );\n\n        // then\n        cy.get('p').contains(expectedMessage);\n    });\n\n    it('should render component without email url when no email', () => {\n        // setup\n        props.clusterConfig.Users.SupportEmailAddress = '';\n\n        // when\n        cy.mount(\n            <StyledEngineProvider injectFirst>\n                <ThemeProvider theme={CustomTheme}>\n                    <NotFoundPanelRoot {...props} />\n                </ThemeProvider>\n            </StyledEngineProvider>\n            );\n\n        // then\n        cy.get('a').should('not.exist');\n    });\n\n    it('should render component with additional message and email url', () => {\n        // given\n        const hash = '123hash123';\n        const pathname = `/collections/${hash}`;\n\n        // setup\n        props.location = {\n            pathname,\n        };\n\n        // when\n        cy.mount(\n            <StyledEngineProvider injectFirst>\n                <ThemeProvider theme={CustomTheme}>\n                    <NotFoundPanelRoot {...props} />\n                </ThemeProvider>\n            </StyledEngineProvider>\n            );\n\n        // then\n        cy.get('p').eq(0).contains(hash);\n\n        // and\n        cy.get('a').should('have.length', 1);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/views/not-found-panel/not-found-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Location } from 'history';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Paper, Grid } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { ClusterConfigJSON } from 'common/config';\n\nexport type CssRules = 'root' | 'title' | 'active';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        overflow: 'hidden',\n        width: '100vw',\n        height: '100vh'\n    },\n    title: {\n        paddingLeft: theme.spacing(3),\n        paddingTop: theme.spacing(3),\n        paddingBottom: theme.spacing(3),\n        fontSize: '18px'\n    },\n    active: {\n        color: theme.customs.colors.grey700,\n        textDecoration: 'none',\n    }\n});\n\nexport interface NotFoundPanelOwnProps {\n    notWrapped?: boolean;\n}\n\nexport interface NotFoundPanelRootDataProps {\n    location: Location<any> | null;\n    clusterConfig: ClusterConfigJSON;\n}\n\ntype NotFoundPanelRootProps = NotFoundPanelRootDataProps & NotFoundPanelOwnProps & WithStyles<CssRules>;\n\nconst getAdditionalMessage = (location: Location | null) => {\n    if (!location) {\n        return null;\n    }\n\n    const { pathname } = location;\n\n    if (pathname.indexOf('collections') > -1) {\n        const uuidHash = pathname.replace('/collections/', '');\n\n        return (\n            <p>\n                Please make sure that provided UUID/ObjectHash '{uuidHash}' is valid.\n            </p>\n        );\n    }\n\n    return null;\n};\n\nconst getEmailLink = (email: string, classes: Record<CssRules, string>) => {\n    const { location: { href: windowHref } } = window;\n    const href = `mailto:${email}?body=${encodeURIComponent('Problem while viewing page ')}${encodeURIComponent(windowHref)}&subject=${encodeURIComponent('Workbench problem report')}`;\n\n    return (<a\n        className={classes.active}\n        href={href}>\n        email us\n    </a>);\n};\n\n\nexport const NotFoundPanelRoot = withStyles(styles)(\n    ({ classes, clusterConfig, location, notWrapped }: NotFoundPanelRootProps) => {\n\n        const content = <Grid container justifyContent=\"space-between\" wrap=\"nowrap\" alignItems=\"center\">\n            <div data-cy=\"not-found-content\" className={classes.title}>\n                <h2>Not Found</h2>\n                {getAdditionalMessage(location)}\n                <p>\n                    The page you requested was not found,&nbsp;\n                    {\n                        !!clusterConfig.Users && clusterConfig.Users.SupportEmailAddress ?\n                            getEmailLink(clusterConfig.Users.SupportEmailAddress, classes) :\n                            'email us'\n                    }\n                    &nbsp;if you suspect this is a bug.\n                </p>\n            </div>\n        </Grid>;\n\n        return !notWrapped ? <Paper data-cy=\"not-found-page\"> {content}</Paper> : content;\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views/not-found-panel/not-found-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { NotFoundPanelRoot, NotFoundPanelRootDataProps } from 'views/not-found-panel/not-found-panel-root';\nimport { Grid } from '@mui/material';\nimport { DefaultView } from \"components/default-view/default-view\";\nimport { IconType } from 'components/icon/icon';\n\nconst mapStateToProps = (state: RootState): NotFoundPanelRootDataProps => {\n    return {\n        location: state.router.location,\n        clusterConfig: state.auth.config.clusterConfig,\n    };\n};\n\nconst mapDispatchToProps = null;\n\nexport const NotFoundPanel = connect(mapStateToProps, mapDispatchToProps)\n    (NotFoundPanelRoot) as any;\n\nexport interface NotFoundViewDataProps {\n    messages: string[];\n    icon?: IconType;\n}\n\n// TODO: optionally pass in the UUID and check if the\n// reason the item is not found is because\n// it or a parent project is actually in the trash.\n// If so, offer to untrash the item or the parent project.\nexport const NotFoundView =\n    ({ messages, icon: Icon }: NotFoundViewDataProps) =>\n        <Grid\n            container\n            alignItems=\"center\"\n            justifyContent=\"center\"\n            style={{ minHeight: \"100%\" }}\n            data-cy=\"not-found-view\">\n            <DefaultView\n                icon={Icon}\n                messages={messages}\n            />\n        </Grid>;\n"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-attributes.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid } from \"@mui/material\";\nimport withStyles from '@mui/styles/withStyles';\nimport { Dispatch } from 'redux';\nimport { formatCost, formatDateTime } from \"common/formatters\";\nimport { resourceLabel } from \"common/labels\";\nimport { DetailsAttribute } from \"components/details-attribute/details-attribute\";\nimport { ResourceKind } from \"models/resource\";\nimport { CollectionName, ContainerRunTime, ResourceWithName } from \"views-components/data-explorer/renderers\";\nimport { getProcess, getProcessStatus } from \"store/processes/process\";\nimport { RootState } from \"store/store\";\nimport { connect } from \"react-redux\";\nimport { ProcessResource, MOUNT_PATH_CWL_WORKFLOW } from \"models/process\";\nimport { ContainerResource } from \"models/container\";\nimport { navigateToOutput, openWorkflow } from \"store/process-panel/process-panel-actions\";\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { ContainerRequestResource } from \"models/container-request\";\nimport { filterResources } from \"store/resources/resources\";\nimport { JSONMount, MountType } from 'models/mount-types';\nimport { getCollectionUrl } from 'models/collection';\nimport { ResourcesState } from \"store/resources/resources\";\n\ntype CssRules = 'link' | 'propertyTag';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    link: {\n        fontSize: '0.875rem',\n        color: theme.palette.primary.main,\n        '&:hover': {\n            cursor: 'pointer'\n        }\n    },\n    propertyTag: {\n        marginRight: theme.spacing(0.5),\n        marginBottom: theme.spacing(0.5)\n    },\n});\n\nconst mapStateToProps = (state: RootState, props: { request: ProcessResource, container?: ContainerResource }) => {\n    return {\n        requestUuid: props.request.uuid,\n        resources: state.resources,\n    };\n};\n\ninterface ProcessDetailsAttributesActionProps {\n    navigateToOutput: (resource: ContainerRequestResource) => void;\n    openWorkflow: (uuid: string) => void;\n}\n\nconst mapDispatchToProps = (dispatch: Dispatch): ProcessDetailsAttributesActionProps => ({\n    navigateToOutput: (resource) => dispatch<any>(navigateToOutput(resource)),\n    openWorkflow: (uuid) => dispatch<any>(openWorkflow(uuid)),\n});\n\ntype ProcessDetailsDataProps = {\n    request: ProcessResource,\n    container?: ContainerResource,\n    twoCol?: boolean,\n    hideProcessPanelRedundantFields?: boolean,\n    classes: Record<CssRules, string>\n    requestUuid: string;\n    resources: ResourcesState;\n}\n\nexport const ProcessAttributes = withStyles(styles, { withTheme: true })(\n    connect(mapStateToProps, mapDispatchToProps)(\n        (props: ProcessDetailsDataProps & ProcessDetailsAttributesActionProps) => {\n            const process = getProcess(props.request.uuid)(props.resources);\n            const subprocesses = filterResources((resource: ContainerRequestResource) =>\n                (resource.kind === ResourceKind.CONTAINER_REQUEST &&\n                    resource.requestingContainerUuid === process?.containerRequest.containerUuid)\n            )(props.resources)\n            const mounts = process?.containerRequest?.mounts;\n            const containerRequest = process?.containerRequest;\n            const container = props.container;\n            const classes = props.classes;\n            const mdSize = 6;\n            const { workflowCollection, workflowPath } = parseMounts(mounts);\n            const hasTotalCost = containerRequest && containerRequest.cumulativeCost > 0;\n            const totalCostNotReady = container && container.cost > 0 && container.state === \"Running\" && containerRequest && containerRequest.cumulativeCost === 0 && subprocesses.length > 0;\n\n            function parseMounts(mounts: { [path: string]: MountType } | undefined) {\n                if (!mounts || !mounts[MOUNT_PATH_CWL_WORKFLOW]) {\n                    return { workflowCollection: \"\", workflowPath: \"\" };\n                }\n                const wf = mounts[MOUNT_PATH_CWL_WORKFLOW] as JSONMount;\n                let workflowCollection = \"\";\n                let workflowPath = \"\";\n                if (wf.content) {\n                    if (wf.content[\"$graph\"] &&\n                        wf.content[\"$graph\"].length > 0 &&\n                        wf.content[\"$graph\"][0] &&\n                        wf.content[\"$graph\"][0][\"steps\"] &&\n                        wf.content[\"$graph\"][0][\"steps\"][0]) {\n\n                        const REGEX = /keep:([0-9a-f]{32}\\+\\d+)\\/(.*)/;\n                        const pdh = wf.content[\"$graph\"][0][\"steps\"][0].run.match(REGEX);\n                        if (pdh) {\n                            workflowCollection = pdh[1];\n                            workflowPath = pdh[2];\n                        }\n                    }\n                }\n                return { workflowCollection, workflowPath };\n            }\n\n            if (!containerRequest) return <></>;\n\n            return <Grid container>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Type' value={resourceLabel(ResourceKind.PROCESS)} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Container request UUID' linkToUuid={containerRequest.uuid} value={containerRequest.uuid} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Docker image locator'\n                                  linkToUuid={containerRequest.containerImage} value={containerRequest.containerImage} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute\n                    label='Owner' linkToUuid={containerRequest.ownerUuid}\n                    uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Container UUID' value={containerRequest.containerUuid} />\n            </Grid>\n            {!props.hideProcessPanelRedundantFields && <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Status' value={getProcessStatus({ containerRequest, container })} />\n            </Grid>}\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Created at' value={formatDateTime(containerRequest.createdAt)} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Started at' value={container ? formatDateTime(container.startedAt) : \"(none)\"} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Finished at' value={container ? formatDateTime(container.finishedAt) : \"(none)\"} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Container run time'>\n                    <ContainerRunTime uuid={containerRequest.uuid} />\n                </DetailsAttribute>\n            </Grid>\n            {(containerRequest && containerRequest.modifiedByUserUuid) && <Grid item xs={12} md={mdSize} data-cy=\"process-details-attributes-modifiedby-user\">\n                <DetailsAttribute\n                    label='Submitted by' linkToUuid={containerRequest.modifiedByUserUuid}\n                    uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n            </Grid>}\n            {(container && container.runtimeUserUuid && container.runtimeUserUuid !== containerRequest.modifiedByUserUuid) && <Grid item xs={12} md={mdSize} data-cy=\"process-details-attributes-runtime-user\">\n                <DetailsAttribute\n                    label='Run as' linkToUuid={container.runtimeUserUuid}\n                    uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n            </Grid>}\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Requesting container UUID' value={containerRequest.requestingContainerUuid || \"(none)\"} />\n            </Grid>\n            <Grid item xs={6}>\n                <DetailsAttribute label='Output collection' />\n                {containerRequest.outputUuid && <span onClick={() => props.navigateToOutput(containerRequest!)}>\n                    <CollectionName className={classes.link} uuid={containerRequest.outputUuid} />\n                </span>}\n            </Grid>\n            {container && <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Cost' value={\n                `${hasTotalCost ? formatCost(containerRequest.cumulativeCost) + ' total, ' : (totalCostNotReady ? 'total pending completion, ' : '')}${container.cost > 0 ? formatCost(container.cost) : 'not available'} for this container`\n                } />\n            </Grid>}\n            {container && workflowCollection && <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Workflow code' link={getCollectionUrl(workflowCollection)} value={workflowPath} />\n            </Grid>}\n            {containerRequest.properties.template_uuid &&\n             <Grid item xs={12} md={mdSize}>\n                 <span onClick={() => props.openWorkflow(containerRequest.properties.template_uuid)}>\n                     <DetailsAttribute classValue={classes.link}\n                                       label='Workflow' value={containerRequest.properties.workflowName} />\n                 </span>\n             </Grid>}\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Priority' value={containerRequest.priority} />\n            </Grid>\n            </Grid>;\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-cmd-card.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { CardHeader, IconButton, CardContent, Tooltip, Typography, Grid } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { CommandIcon, CopyIcon } from 'components/icon/icon';\nimport { MPVPanelProps } from 'components/multi-panel-view/multi-panel-view';\nimport { DefaultVirtualCodeSnippet } from 'components/default-code-snippet/default-virtual-code-snippet';\nimport { Process } from 'store/processes/process';\nimport shellescape from 'shell-escape';\nimport CopyResultToClipboard from 'components/copy-to-clipboard/copy-result-to-clipboard';\n\ntype CssRules = 'card' | 'content' | 'title' | 'header' | 'avatar' | 'iconHeader';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    card: {\n        height: '100%'\n    },\n    header: {\n        paddingTop: theme.spacing(1),\n        paddingBottom: 0,\n    },\n    iconHeader: {\n        fontSize: '1.875rem',\n        color: theme.customs.colors.greyL,\n    },\n    avatar: {\n        alignSelf: 'flex-start',\n        paddingTop: theme.spacing(0.5)\n    },\n    content: {\n        height: `calc(100% - ${theme.spacing(6)})`,\n        padding: theme.spacing(1),\n        paddingTop: 0,\n        '&:last-child': {\n            paddingBottom: theme.spacing(1),\n        }\n    },\n    title: {\n        overflow: 'hidden',\n        paddingTop: theme.spacing(0.5),\n        color: theme.customs.colors.greyD,\n        fontSize: '1.875rem'\n    },\n});\n\ninterface ProcessCmdCardDataProps {\n  process: Process;\n  onCopy: (text: string) => void;\n}\n\ntype ProcessCmdCardProps = ProcessCmdCardDataProps & WithStyles<CssRules> & MPVPanelProps;\n\nexport const ProcessCmdCard = withStyles(styles)(\n  ({\n    process,\n    onCopy,\n    classes,\n  }: ProcessCmdCardProps) => {\n\n    const formatLine = (lines: string[], index: number): string => {\n      // Escape each arg separately\n      let line = shellescape([lines[index]])\n      // Indent lines after the first\n      const indent = index > 0 ? '  ' : '';\n      // Add backslash \"escaped linebreak\"\n      const lineBreak = lines.length > 1 && index < lines.length - 1 ? ' \\\\' : '';\n\n      return `${indent}${line}${lineBreak}`;\n    };\n\n    const formatClipboardText = (command: string[]) => (): string => (\n      command.map((v) =>\n        shellescape([v]) // Escape each arg separately\n      ).join(' ')\n    );\n\n    return (\n      <section className={classes.card}>\n        <CardHeader\n          className={classes.header}\n          classes={{\n            content: classes.title,\n            avatar: classes.avatar,\n          }}\n          avatar={<CommandIcon className={classes.iconHeader} />}\n          title={\n            <Typography noWrap variant=\"h6\" color=\"inherit\">\n              Command\n            </Typography>\n          }\n          action={\n            <Grid container direction=\"row\" alignItems=\"center\">\n              <Grid item>\n                <Tooltip title=\"Copy link to clipboard\" disableFocusListener>\n                  <IconButton size=\"large\">\n                    <CopyResultToClipboard\n                      getText={formatClipboardText(process.containerRequest.command)}\n                      onCopy={() => onCopy(\"Command copied to clipboard\")}\n                    >\n                      <CopyIcon />\n                    </CopyResultToClipboard>\n                  </IconButton>\n                </Tooltip>\n              </Grid>\n            </Grid>\n          }\n        />\n        <CardContent className={classes.content}>\n          <DefaultVirtualCodeSnippet\n            lines={process.containerRequest.command}\n            lineFormatter={formatLine}\n            linked\n          />\n        </CardContent>\n      </section>\n    );\n  }\n);\n"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-details-attributes.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, Typography } from \"@mui/material\";\nimport withStyles from '@mui/styles/withStyles';\nimport { Dispatch } from 'redux';\nimport { formatCost, formatDateTime } from \"common/formatters\";\nimport { resourceLabel } from \"common/labels\";\nimport { DetailsAttribute } from \"components/details-attribute/details-attribute\";\nimport { ResourceKind } from \"models/resource\";\nimport { CollectionName, ContainerRunTime, ResourceWithName } from \"views-components/data-explorer/renderers\";\nimport { getProcess, getProcessStatus, ProcessProperties } from \"store/processes/process\";\nimport { RootState } from \"store/store\";\nimport { connect } from \"react-redux\";\nimport { ProcessResource, MOUNT_PATH_CWL_WORKFLOW } from \"models/process\";\nimport { ContainerResource } from \"models/container\";\nimport { navigateToOutput, openWorkflow } from \"store/process-panel/process-panel-actions\";\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { ProcessRuntimeStatus } from \"views-components/process-runtime-status/process-runtime-status\";\nimport { ContainerRequestResource } from \"models/container-request\";\nimport { filterResources } from \"store/resources/resources\";\nimport { JSONMount, MountType } from 'models/mount-types';\nimport { getCollectionUrl } from 'models/collection';\nimport { Link } from \"react-router-dom\";\nimport { getResourceUrl } from \"routes/routes\";\nimport WarningIcon from '@mui/icons-material/Warning';\nimport { ResourcesState } from \"store/resources/resources\";\nimport { getPropertyChips } from \"views-components/property-chips/get-property-chips\";\n\ntype CssRules = 'link' | 'propertyTag';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    link: {\n        fontSize: '0.875rem',\n        color: theme.palette.primary.main,\n        '&:hover': {\n            cursor: 'pointer'\n        }\n    },\n    propertyTag: {\n        marginRight: theme.spacing(0.5),\n        marginBottom: theme.spacing(0.5)\n    },\n});\n\nconst mapStateToProps = (state: RootState, props: { request: ProcessResource, container?: ContainerResource }) => {\n    return {\n        requestUuid: props.request.uuid,\n        resources: state.resources,\n    };\n};\n\ninterface ProcessDetailsAttributesActionProps {\n    navigateToOutput: (resource: ContainerRequestResource) => void;\n    openWorkflow: (uuid: string) => void;\n}\n\nconst mapDispatchToProps = (dispatch: Dispatch): ProcessDetailsAttributesActionProps => ({\n    navigateToOutput: (resource) => dispatch<any>(navigateToOutput(resource)),\n    openWorkflow: (uuid) => dispatch<any>(openWorkflow(uuid)),\n});\n\ntype ProcessDetailsDataProps = {\n    request: ProcessResource,\n    container?: ContainerResource,\n    twoCol?: boolean,\n    hideProcessPanelRedundantFields?: boolean,\n    classes: Record<CssRules, string>\n    requestUuid: string;\n    resources: ResourcesState;\n}\n\nexport const ProcessDetailsAttributes = withStyles(styles, { withTheme: true })(\n    connect(mapStateToProps, mapDispatchToProps)(\n        (props: ProcessDetailsDataProps & ProcessDetailsAttributesActionProps) => {\n            const process = getProcess(props.request.uuid)(props.resources);\n            const subprocesses = filterResources((resource: ContainerRequestResource) =>\n                (resource.kind === ResourceKind.CONTAINER_REQUEST &&\n                    resource.requestingContainerUuid === process?.containerRequest.containerUuid)\n            )(props.resources)\n            const mounts = process?.containerRequest?.mounts;\n            const containerRequest = process?.containerRequest;\n            const container = props.container;\n            const classes = props.classes;\n            const mdSize = props.twoCol ? 6 : 12;\n            const { workflowCollection, workflowPath } = parseMounts(mounts);\n            const hasTotalCost = containerRequest && containerRequest.cumulativeCost > 0;\n            const totalCostNotReady = container && container.cost > 0 && container.state === \"Running\" && containerRequest && containerRequest.cumulativeCost === 0 && subprocesses.length > 0;\n            const resubmittedUrl = containerRequest && getResourceUrl(containerRequest.properties[ProcessProperties.FAILED_CONTAINER_RESUBMITTED]);\n            const hasDescription = containerRequest?.description && containerRequest.description.length > 0;\n\n            function parseMounts(mounts: { [path: string]: MountType } | undefined) {\n                if (!mounts || !mounts[MOUNT_PATH_CWL_WORKFLOW]) {\n                    return { workflowCollection: \"\", workflowPath: \"\" };\n                }\n                const wf = mounts[MOUNT_PATH_CWL_WORKFLOW] as JSONMount;\n                let workflowCollection = \"\";\n                let workflowPath = \"\";\n                if (wf.content) {\n                    if (wf.content[\"$graph\"] &&\n                        wf.content[\"$graph\"].length > 0 &&\n                        wf.content[\"$graph\"][0] &&\n                        wf.content[\"$graph\"][0][\"steps\"] &&\n                        wf.content[\"$graph\"][0][\"steps\"][0]) {\n\n                        const REGEX = /keep:([0-9a-f]{32}\\+\\d+)\\/(.*)/;\n                        const pdh = wf.content[\"$graph\"][0][\"steps\"][0].run.match(REGEX);\n                        if (pdh) {\n                            workflowCollection = pdh[1];\n                            workflowPath = pdh[2];\n                        }\n                    }\n                }\n                return { workflowCollection, workflowPath };\n            }\n\n            if (!containerRequest) return <></>;\n\n            return <Grid container>\n            <Grid item xs={12}>\n                <ProcessRuntimeStatus runtimeStatus={container?.runtimeStatus} containerCount={containerRequest.containerCount} />\n            </Grid>\n            {!props.hideProcessPanelRedundantFields && <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Type' value={resourceLabel(ResourceKind.PROCESS)} />\n            </Grid>}\n            {resubmittedUrl && <Grid item xs={12}>\n                <Typography>\n                    <WarningIcon />\n                    This process failed but was automatically resubmitted.  <Link to={resubmittedUrl}> Click here to go to the resubmitted process.</Link>\n                </Typography>\n            </Grid>}\n            <Grid item xs={12} md={12}>\n                <DetailsAttribute label={'Description'}>\n                    {hasDescription\n                        ? <Typography>{containerRequest.description}</Typography>\n                        : <Typography>No description available</Typography>}\n                </DetailsAttribute>\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Container request UUID' linkToUuid={containerRequest.uuid} value={containerRequest.uuid} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Docker image locator'\n                                  linkToUuid={containerRequest.containerImage} value={containerRequest.containerImage} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute\n                    label='Owner' linkToUuid={containerRequest.ownerUuid}\n                    uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Container UUID' value={containerRequest.containerUuid} />\n            </Grid>\n            {!props.hideProcessPanelRedundantFields && <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Status' value={getProcessStatus({ containerRequest, container })} />\n            </Grid>}\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Created at' value={formatDateTime(containerRequest.createdAt)} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Started at' value={container ? formatDateTime(container.startedAt) : \"(none)\"} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Finished at' value={container ? formatDateTime(container.finishedAt) : \"(none)\"} />\n            </Grid>\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Container run time'>\n                    <ContainerRunTime uuid={containerRequest.uuid} />\n                </DetailsAttribute>\n            </Grid>\n            {(containerRequest && containerRequest.modifiedByUserUuid) && <Grid item xs={12} md={mdSize} data-cy=\"process-details-attributes-modifiedby-user\">\n                <DetailsAttribute\n                    label='Submitted by' linkToUuid={containerRequest.modifiedByUserUuid}\n                    uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n            </Grid>}\n            {(container && container.runtimeUserUuid && container.runtimeUserUuid !== containerRequest.modifiedByUserUuid) && <Grid item xs={12} md={mdSize} data-cy=\"process-details-attributes-runtime-user\">\n                <DetailsAttribute\n                    label='Run as' linkToUuid={container.runtimeUserUuid}\n                    uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n            </Grid>}\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Requesting container UUID' value={containerRequest.requestingContainerUuid || \"(none)\"} />\n            </Grid>\n            <Grid item xs={6}>\n                <DetailsAttribute label='Output collection' />\n                {containerRequest.outputUuid && <span onClick={() => props.navigateToOutput(containerRequest!)}>\n                    <CollectionName className={classes.link} uuid={containerRequest.outputUuid} />\n                </span>}\n            </Grid>\n            {container && <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Cost' value={\n                `${hasTotalCost ? formatCost(containerRequest.cumulativeCost) + ' total, ' : (totalCostNotReady ? 'total pending completion, ' : '')}${container.cost > 0 ? formatCost(container.cost) : 'not available'} for this container`\n                } />\n            </Grid>}\n            {container && workflowCollection && <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Workflow code' link={getCollectionUrl(workflowCollection)} value={workflowPath} />\n            </Grid>}\n            {containerRequest.properties.template_uuid &&\n             <Grid item xs={12} md={mdSize}>\n                 <span onClick={() => props.openWorkflow(containerRequest.properties.template_uuid)}>\n                     <DetailsAttribute classValue={classes.link}\n                                       label='Workflow' value={containerRequest.properties.workflowName} />\n                 </span>\n             </Grid>}\n            <Grid item xs={12} md={mdSize}>\n                <DetailsAttribute label='Priority' value={containerRequest.priority} />\n            </Grid>\n            {/*\n                NOTE: The property list should be kept at the bottom, because it spans\n                the entire available width, without regards of the twoCol prop.\n              */}\n            <Grid item xs={12} md={12}>\n                <DetailsAttribute label='Properties' />\n                {getPropertyChips(containerRequest, classes)}\n            </Grid>\n            </Grid>;\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-io-card.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { combineReducers, createStore } from \"redux\";\nimport {\n    ThemeProvider,\n    StyledEngineProvider,\n} from \"@mui/material\";\nimport { CustomTheme } from 'common/custom-theme';\nimport { Provider } from 'react-redux';\nimport { ProcessIOCard, ProcessIOCardType } from './process-io-card';\nimport { MemoryRouter } from 'react-router-dom';\n\ndescribe('renderers', () => {\n    let store;\n\n    describe('ProcessStatus', () => {\n\n        beforeEach(() => {\n            store = createStore(combineReducers({\n                auth: (state = {}, action) => { return {...state, config: {} } },\n                collectionPanel: (state = {}, action) => state,\n                collectionPanelFiles: (state = {}, action) => { return {...state, item: { portableDataHash: '12345'} } },\n            }));\n        });\n\n        it('shows main process input loading when raw or params null', () => {\n            // when\n            cy.mount(\n                <Provider store={store}>\n                    <StyledEngineProvider injectFirst>\n                        <ThemeProvider theme={CustomTheme}>\n                            <ProcessIOCard\n                                label={ProcessIOCardType.INPUT}\n                                process={false} // Treat as a main process, no requestingContainerUuid\n                                params={null}\n                                raw={{}}\n                            />\n                        </ThemeProvider>\n                    </StyledEngineProvider>\n                </Provider>\n                );\n\n            // then\n            cy.get('[data-cy=process-io-card]').within(() => {\n                cy.get('[data-cy=conditional-tab]').should('not.exist');\n                cy.get('[data-cy=process-io-circular-progress]').should('exist');\n            });\n\n            // when\n            cy.mount(\n                <Provider store={store}>\n                    <StyledEngineProvider injectFirst>\n                        <ThemeProvider theme={CustomTheme}>\n                            <ProcessIOCard\n                                label={ProcessIOCardType.INPUT}\n                                process={false} // Treat as a main process, no requestingContainerUuid\n                                params={[]}\n                                raw={null}\n                            />\n                        </ThemeProvider>\n                    </StyledEngineProvider>\n                </Provider>\n                );\n\n            // then\n            cy.get('[data-cy=process-io-card]').within(() => {\n                cy.get('[data-cy=conditional-tab]').should('not.exist');\n                cy.get('[data-cy=process-io-circular-progress]').should('exist');\n            });\n        });\n\n        it('shows main process empty params and raw', () => {\n            // when\n            cy.mount(\n                <Provider store={store}>\n                    <StyledEngineProvider injectFirst>\n                        <ThemeProvider theme={CustomTheme}>\n                            <ProcessIOCard\n                                label={ProcessIOCardType.INPUT}\n                                process={false} // Treat as a main process, no requestingContainerUuid\n                                params={[]}\n                                raw={{}}\n                            />\n                        </ThemeProvider>\n                    </StyledEngineProvider>\n                </Provider>\n                );\n\n            // then\n            cy.get('[data-cy=process-io-card]').within(() => {\n                cy.get('[data-cy=conditional-tab]').should('not.exist');\n                cy.get('[data-cy=process-io-circular-progress]').should('not.exist');\n                cy.get('[data-cy=default-view]').should('exist').within(() => {\n                    cy.contains('No parameters found');\n                });\n            });\n        });\n\n        it('shows main process with raw', () => {\n            // when\n            const raw = {some: 'data'};\n            cy.mount(\n                <Provider store={store}>\n                    <StyledEngineProvider injectFirst>\n                        <ThemeProvider theme={CustomTheme}>\n                            <ProcessIOCard\n                                label={ProcessIOCardType.INPUT}\n                                process={false} // Treat as a main process, no requestingContainerUuid\n                                params={[]}\n                                raw={raw}\n                            />\n                        </ThemeProvider>\n                    </StyledEngineProvider>\n                </Provider>\n                );\n\n            // then\n            cy.get('[data-cy=process-io-card]').within(() => {\n                cy.get('[data-cy=conditional-tab]').should('exist');\n                cy.get('[data-cy=process-io-circular-progress]').should('not.exist');\n                cy.get('[data-cy=virtual-code-snippet]').should('exist').within(() => {\n                    cy.contains(JSON.stringify(raw, null, 2).replace(/\\n/g, ''));\n                });\n            });\n        });\n\n        it('shows main process with params', () => {\n            // when\n            const parameters = [{id: 'someId', label: 'someLabel', value: {display: 'someValue'}}];\n            cy.mount(\n                <Provider store={store}>\n                    <StyledEngineProvider injectFirst>\n                        <ThemeProvider theme={CustomTheme}>\n                            <ProcessIOCard\n                                label={ProcessIOCardType.INPUT}\n                                process={false} // Treat as a main process, no requestingContainerUuid\n                                params={parameters}\n                                raw={{}}\n                            />\n                        </ThemeProvider>\n                    </StyledEngineProvider>\n                </Provider>\n                );\n\n            // then\n            cy.get('[data-cy=process-io-card]').within(() => {\n                cy.get('[data-cy=process-io-circular-progress]').should('not.exist');\n                cy.get('[data-cy=conditional-tab]').should('have.length', 2); // Empty raw is shown if parameters are present\n                cy.get('tbody').should('exist').within(() => {\n                    cy.contains('someId');\n                    cy.contains('someLabel');\n                    cy.contains('someValue');\n                });\n            });\n        });\n\n        it('shows main process with output collection', () => {\n            // when\n            const outputCollection = '987654321';\n            const parameters = [{id: 'someId', label: 'someLabel', value: {display: 'someValue'}}];\n\n            cy.mount(\n                <Provider store={store}>\n                    <StyledEngineProvider injectFirst>\n                        <ThemeProvider theme={CustomTheme}>\n                            <ProcessIOCard\n                                label={ProcessIOCardType.OUTPUT}\n                                process={false} // Treat as a main process, no requestingContainerUuid\n                                outputUuid={outputCollection}\n                                params={parameters}\n                                raw={{}}\n                            />\n                        </ThemeProvider>\n                    </StyledEngineProvider>\n                </Provider>\n                );\n\n            // then\n            cy.get('[data-cy=process-io-card]').within(() => {\n                cy.get('[data-cy=process-io-circular-progress]').should('not.exist');\n                cy.get('[data-cy=conditional-tab]').should('have.length', 3); // Empty raw is shown if parameters are present\n                cy.get('tbody').should('exist').within(() => {\n                    cy.contains('someId');\n                    cy.contains('someLabel');\n                    cy.contains('someValue');\n                });\n            });\n\n            // Visit output tab\n            cy.get('[data-cy=conditional-tab]').contains('Collection').should('exist').click();\n            cy.get('[data-cy=collection-files-panel]').should('exist');\n            cy.get('[data-cy=output-uuid-display]').should('contain', outputCollection);\n        });\n\n        // Subprocess\n\n        it('shows subprocess loading', () => {\n            // when\n            const subprocess = {containerRequest: {requestingContainerUuid: 'xyz'}};\n            cy.mount(\n                <Provider store={store}>\n                    <StyledEngineProvider injectFirst>\n                        <ThemeProvider theme={CustomTheme}>\n                            <ProcessIOCard\n                                label={ProcessIOCardType.INPUT}\n                                process={subprocess} // Treat as a subprocess without outputUuid\n                                params={null}\n                                raw={null}\n                            />\n                        </ThemeProvider>\n                    </StyledEngineProvider>\n                </Provider>\n                );\n\n            // then\n            cy.get('[data-cy=process-io-card]').within(() => {\n                cy.get('[data-cy=conditional-tab]').should('not.exist');\n                cy.get('[data-cy=subprocess-circular-progress]').should('exist');\n            });\n        });\n\n        it('shows subprocess mounts', () => {\n            // when\n            const subprocess = {containerRequest: {requestingContainerUuid: 'xyz'}};\n            const sampleMount = {path: '/', pdh: 'abcdef12abcdef12abcdef12abcdef12+0'};\n            cy.mount(\n                <Provider store={store}>\n                    <MemoryRouter>\n                        <StyledEngineProvider injectFirst>\n                            <ThemeProvider theme={CustomTheme}>\n                                <ProcessIOCard\n                                    label={ProcessIOCardType.INPUT}\n                                    process={subprocess} // Treat as a subprocess without outputUuid\n                                    params={null}\n                                    raw={null}\n                                    mounts={[sampleMount]}\n                                />\n                            </ThemeProvider>\n                        </StyledEngineProvider>\n                    </MemoryRouter>\n                </Provider>\n                );\n\n            // then\n            cy.get('[data-cy=process-io-card]').within(() => {\n                cy.get('[data-cy=subprocess-circular-progress]').should('not.exist');\n                cy.getAll('[data-cy=conditional-tab]').should('have.length', 1); // Empty raw is shown if parameters are present\n                cy.get('tbody').should('exist').within(() => {\n                    cy.contains(sampleMount.pdh);\n                });\n            });\n        });\n\n        it('shows subprocess output collection', () => {\n            // when\n            const subprocess = {containerRequest: {requestingContainerUuid: 'xyz'}};\n            const outputCollection = '123456789';\n            cy.mount(\n                <Provider store={store}>\n                    <StyledEngineProvider injectFirst>\n                        <ThemeProvider theme={CustomTheme}>\n                            <ProcessIOCard\n                                label={ProcessIOCardType.OUTPUT}\n                                process={subprocess} // Treat as a subprocess with outputUuid\n                                outputUuid={outputCollection}\n                                params={null}\n                                raw={null}\n                            />\n                        </ThemeProvider>\n                    </StyledEngineProvider>\n                </Provider>\n                );\n\n            // then\n            cy.get('[data-cy=process-io-card]').within(() => {\n                cy.get('[data-cy=process-io-circular-progress]').should('not.exist');\n                cy.get('[data-cy=conditional-tab]').should('have.length', 1); // Empty raw is shown if parameters are present\n                cy.get('[data-cy=output-uuid-display]').should('contain', outputCollection);\n            });\n        });\n\n        it('shows empty subprocess raw', () => {\n            // when\n            const subprocess = {containerRequest: {requestingContainerUuid: 'xyz'}};\n            const outputCollection = '123456789';\n            cy.mount(\n                <Provider store={store}>\n                    <StyledEngineProvider injectFirst>\n                        <ThemeProvider theme={CustomTheme}>\n                            <ProcessIOCard\n                                label={ProcessIOCardType.OUTPUT}\n                                process={subprocess} // Treat as a subprocess with outputUuid\n                                outputUuid={outputCollection}\n                                params={null}\n                                raw={{}}\n                            />\n                        </ThemeProvider>\n                    </StyledEngineProvider>\n                </Provider>\n                );\n\n            // then\n            cy.get('[data-cy=process-io-card]').within(() => {\n                cy.get('[data-cy=process-io-circular-progress]').should('not.exist');\n                cy.get('[data-cy=conditional-tab]').should('have.length', 2); // Empty raw is shown if parameters are present\n                cy.get('[data-cy=conditional-tab]').eq(1).should('exist')\n                cy.get('[data-cy=output-uuid-display]').should('contain', outputCollection);\n            });\n        });\n\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-io-card.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { ReactElement, memo } from \"react\";\nimport { Dispatch } from \"redux\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport {\n    CardHeader,\n    CardContent,\n    Tooltip,\n    Typography,\n    Table,\n    TableHead,\n    TableBody,\n    TableRow,\n    TableCell,\n    Paper,\n    Grid,\n    Chip,\n    CircularProgress,\n} from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { InputIcon, OutputIcon, InfoIcon, WarningIcon } from \"components/icon/icon\";\nimport { MPVPanelProps } from \"components/multi-panel-view/multi-panel-view\";\nimport {\n    BooleanCommandInputParameter,\n    CommandInputParameter,\n    CWLType,\n    Directory,\n    DirectoryArrayCommandInputParameter,\n    DirectoryCommandInputParameter,\n    EnumCommandInputParameter,\n    FileArrayCommandInputParameter,\n    FileCommandInputParameter,\n    FloatArrayCommandInputParameter,\n    FloatCommandInputParameter,\n    IntArrayCommandInputParameter,\n    IntCommandInputParameter,\n    isArrayOfType,\n    isPrimitiveOfType,\n    isSecret,\n    StringArrayCommandInputParameter,\n    StringCommandInputParameter,\n    getEnumType,\n} from \"models/workflow\";\nimport { CommandOutputParameter } from \"cwlts/mappings/v1.0/CommandOutputParameter\";\nimport { File } from \"models/workflow\";\nimport { getInlineFileUrl } from \"views-components/context-menu/actions/helpers\";\nimport { AuthState } from \"store/auth/auth-reducer\";\nimport mime from \"mime\";\nimport { DefaultView } from \"components/default-view/default-view\";\nimport { getNavUrl } from \"routes/routes\";\nimport { Link as RouterLink } from \"react-router-dom\";\nimport { Link as MuiLink } from \"@mui/material\";\nimport { InputCollectionMount } from \"store/processes/processes-actions\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { ProcessOutputCollectionFiles } from \"./process-output-collection-files\";\nimport { Process } from \"store/processes/process\";\nimport { navigateTo } from \"store/navigation/navigation-action\";\nimport classNames from \"classnames\";\nimport { DefaultVirtualCodeSnippet } from \"components/default-code-snippet/default-virtual-code-snippet\";\nimport { KEEP_URL_REGEX } from \"models/resource\";\nimport { FixedSizeList } from 'react-window';\nimport AutoSizer from \"react-virtualized-auto-sizer\";\nimport { LinkProps } from \"@mui/material/Link\";\nimport { ConditionalTabs } from \"components/conditional-tabs/conditional-tabs\";\n\ntype CssRules =\n    | \"card\"\n    | \"content\"\n    | \"title\"\n    | \"header\"\n    | \"avatar\"\n    | \"iconHeader\"\n    | \"tableWrapper\"\n    | \"virtualListTableRoot\"\n    | \"parameterTableRoot\"\n    | \"inputMountTableRoot\"\n    | \"virtualListCellText\"\n    | \"jsonWrapper\"\n    | \"keepLink\"\n    | \"collectionLink\"\n    | \"secondaryVal\"\n    | \"emptyValue\"\n    | \"noBorderRow\"\n    | \"symmetricTabs\"\n    | \"wrapTooltip\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    card: {\n        height: \"100%\",\n    },\n    header: {\n        paddingTop: theme.spacing(1),\n        paddingBottom: 0,\n    },\n    iconHeader: {\n        fontSize: \"1.875rem\",\n        color: theme.customs.colors.greyL,\n    },\n    avatar: {\n        alignSelf: \"flex-start\",\n        paddingTop: theme.spacing(0.5),\n    },\n    // Card content\n    content: {\n        height: `calc(100% - ${theme.spacing(6)})`,\n        padding: theme.spacing(1),\n        paddingTop: 0,\n        \"&:last-child\": {\n            paddingBottom: 0,\n        },\n    },\n    // Card title\n    title: {\n        overflow: \"hidden\",\n        paddingTop: theme.spacing(0.5),\n        color: theme.customs.colors.greyD,\n        fontSize: \"1.875rem\",\n    },\n    // Applies to parameters / input collection virtual lists and output collection\n    tableWrapper: {\n        height: `calc(100% - ${theme.spacing(6)})`,\n        overflow: \"auto\",\n        // Use flexbox to keep scrolling at the virtual list level\n        display: \"flex\",\n        flexDirection: \"column\",\n        alignItems: \"stretch\", // Stretches output collection to full width\n    },\n    // Parameters / input collection virtual list table styles\n    virtualListTableRoot: {\n        display: \"flex\",\n        flexDirection: \"column\",\n        overflow: \"hidden\",\n        // Flex header\n        \"& thead tr\": {\n            alignItems: \"end\",\n            \"& th\": {\n                padding: \"4px 25px 10px\",\n            },\n        },\n        \"& tbody\": {\n            height: \"100vh\", // Must be constrained by panel maxHeight\n        },\n        // Flex header/body rows\n        \"& thead tr, & > tbody tr\": {\n            display: \"flex\",\n            // Flex header/body cells\n            \"& th, & td\": {\n                flexGrow: 1,\n                flexShrink: 1,\n                flexBasis: 0,\n                overflow: \"hidden\",\n            },\n        },\n        // Flex body rows\n        \"& tbody tr\": {\n            height: \"40px\",\n            // Flex body cells\n            \"& td\": {\n                padding: \"2px 25px 2px\",\n                overflow: \"hidden\",\n                display: \"flex\",\n                flexDirection: \"row\",\n                alignItems: \"center\",\n                whiteSpace: \"nowrap\",\n            },\n        },\n    },\n    // Parameter tab table column widths\n    parameterTableRoot: {\n        \"& thead tr, & > tbody tr\": {\n            // ID\n            \"& th:nth-of-type(1), & td:nth-of-type(1)\": {\n                flexGrow: 0.7,\n            },\n            // Collection\n            \"& th:nth-last-of-type(1), & td:nth-last-of-type(1)\": {\n                flexGrow: 2,\n            },\n        },\n    },\n    // Input collection tab table column widths\n    inputMountTableRoot: {\n        \"& thead tr, & > tbody tr\": {\n            // Path\n            \"& th:nth-of-type(1), & td:nth-of-type(1)\": {\n                flexGrow: 1,\n            },\n            // PDH\n            \"& th:nth-last-of-type(1), & td:nth-last-of-type(1)\": {\n                flexGrow: 0.8,\n            },\n        },\n    },\n    // Param value cell typography styles\n    virtualListCellText: {\n        overflow: \"hidden\",\n        display: \"flex\",\n        // Every cell contents requires a wrapper for the ellipsis\n        // since adding ellipses to an anchor element parent results in misaligned tooltip\n        \"& a, & span\": {\n            overflow: \"hidden\",\n            textOverflow: \"ellipsis\",\n        },\n        '& pre': {\n            margin: 0,\n            overflow: \"hidden\",\n            textOverflow: \"ellipsis\",\n        },\n    },\n    // JSON tab wrapper\n    jsonWrapper: {\n        height: `calc(100% - ${theme.spacing(6)})`,\n    },\n    keepLink: {\n        color: theme.palette.primary.main,\n        textDecoration: \"none\",\n        // Overflow wrap for mounts table\n        overflowWrap: \"break-word\",\n        cursor: \"pointer\",\n    },\n    // Output collection tab link\n    collectionLink: {\n        margin: \"10px\",\n        \"& a\": {\n            color: theme.palette.primary.main,\n            textDecoration: \"none\",\n            overflowWrap: \"break-word\",\n            cursor: \"pointer\",\n        },\n    },\n    secondaryVal: {\n        paddingLeft: \"20px\",\n    },\n    emptyValue: {\n        color: theme.customs.colors.grey700,\n    },\n    noBorderRow: {\n        \"& td\": {\n            borderBottom: \"none\",\n            paddingTop: \"2px\",\n            paddingBottom: \"2px\",\n        },\n        height: \"24px\",\n    },\n    symmetricTabs: {\n        \"& button\": {\n            flexBasis: \"0\",\n        },\n    },\n    wrapTooltip: {\n        maxWidth: \"600px\",\n        wordWrap: \"break-word\",\n    },\n});\n\nexport enum ProcessIOCardType {\n    INPUT = \"Input Parameters\",\n    OUTPUT = \"Output Parameters\",\n}\nexport interface ProcessIOCardDataProps {\n    process?: Process;\n    label: ProcessIOCardType;\n    params: ProcessIOParameter[] | null;\n    raw: any;\n    mounts?: InputCollectionMount[];\n    outputUuid?: string;\n    forceShowParams?: boolean;\n    failedToLoadOutputCollection?: boolean;\n}\n\ntype ProcessIOCardProps = ProcessIOCardDataProps & WithStyles<CssRules> & MPVPanelProps;\n\nexport const ProcessIOCard = withStyles(styles)(\n    ({\n        classes,\n        label,\n        params,\n        raw,\n        mounts,\n        outputUuid,\n        panelName,\n        process,\n        forceShowParams,\n        failedToLoadOutputCollection,\n    }: ProcessIOCardProps) => {\n        const PanelIcon = label === ProcessIOCardType.INPUT ? InputIcon : OutputIcon;\n        const mainProcess = !(process && process!.containerRequest.requestingContainerUuid);\n        const showParamTable = mainProcess || forceShowParams;\n\n        const loading = (raw === null || raw === undefined || params === null) && !failedToLoadOutputCollection;\n\n        const hasRaw = !!(raw && Object.keys(raw).length > 0);\n        const hasParams = !!(params && params.length > 0);\n        // isRawLoaded allows subprocess panel to display raw even if it's {}\n        const isRawLoaded = !!(raw && Object.keys(raw).length >= 0);\n\n        // Subprocess\n        const hasInputMounts = !!(label === ProcessIOCardType.INPUT && mounts && mounts.length);\n        const hasOutputCollecton = !!(label === ProcessIOCardType.OUTPUT && outputUuid) && !failedToLoadOutputCollection;\n        // Subprocess should not show loading if hasOutputCollection or hasInputMounts\n        const subProcessLoading = loading && !hasOutputCollecton && !hasInputMounts;\n\n        return (\n            <section\n            className={classes.card}\n            data-cy=\"process-io-card\"\n            >\n            <CardHeader\n                className={classes.header}\n                classes={{\n                    content: classes.title,\n                    avatar: classes.avatar,\n                }}\n                avatar={<PanelIcon className={classes.iconHeader} />}\n                title={\n                    <Typography\n                        noWrap\n                        variant=\"h6\"\n                        color=\"inherit\"\n                    >\n                                 {label}\n                    </Typography>\n                }\n            />\n            <CardContent className={classes.content}>\n            {!loading && failedToLoadOutputCollection && !hasRaw && !hasParams ?\n             (<Grid\n                  container\n                  item\n                  alignItems=\"center\"\n                  justifyContent=\"center\"\n              >\n                 <DefaultView messages={[\"Output collection was trashed or deleted\"]} icon={WarningIcon} />\n             </Grid>) :\n                 (showParamTable ? (\n                     <>\n                     {/* raw is undefined until params are loaded */}\n                     {loading && (\n                         <Grid\n                             container\n                             item\n                             alignItems=\"center\"\n                             justifyContent=\"center\"\n                         >\n                             <CircularProgress data-cy=\"process-io-circular-progress\" />\n                         </Grid>\n                     )}\n                    {/* Once loaded, either raw or params may still be empty\n                      *   Raw when all params are empty\n                      *   Params when raw is provided by containerRequest properties but workflow mount is absent for preview\n                      */}\n                    {!loading && (hasRaw || hasParams) && (\n                        <ConditionalTabs\n                            variant=\"fullWidth\"\n                            className={classes.symmetricTabs}\n                            tabs={[\n                                {\n                                    // params will be empty on processes without workflow definitions in mounts, so we only show raw\n                                    show: hasParams,\n                                    label: \"Parameters\",\n                                    content: <ProcessIOPreview\n                                                 data={params || []}\n                                                 valueLabel={forceShowParams ? \"Default value\" : \"Value\"}\n                                             />,\n                                },\n                                {\n                                    show: !forceShowParams,\n                                    label: \"JSON\",\n                                    content: <ProcessIORaw data={raw} />,\n                                },\n                                {\n                                    show: hasOutputCollecton,\n                                    label: \"Collection\",\n                                    content: <ProcessOutputCollection outputUuid={outputUuid} />,\n                                },\n                            ]}\n                        />\n                    )}\n                    {!loading && !hasRaw && !hasParams && (\n                        <Grid\n                            container\n                            item\n                            alignItems=\"center\"\n                            justifyContent=\"center\"\n                        >\n                            <DefaultView messages={[\"No parameters found\"]} />\n                        </Grid>\n                    )}\n                        </>\n                        ) : (\n                        // Subprocess\n                        <>\n                            {subProcessLoading ? (\n                                <Grid\n                                    container\n                                    item\n                                    alignItems=\"center\"\n                                    justifyContent=\"center\"\n                                >\n                                    <CircularProgress data-cy=\"subprocess-circular-progress\"/>\n                                </Grid>\n                            ) : !subProcessLoading && (hasInputMounts || hasOutputCollecton || isRawLoaded) ? (\n                                <ConditionalTabs\n                                    variant=\"fullWidth\"\n                                    className={classes.symmetricTabs}\n                                    tabs={[\n                                        {\n                                            show: hasInputMounts,\n                                            label: \"Collections\",\n                                            content: <ProcessInputMounts mounts={mounts || []} />,\n                                        },\n                                        {\n                                            show: hasOutputCollecton,\n                                            label: \"Collection\",\n                                            content: <ProcessOutputCollection outputUuid={outputUuid} />,\n                                        },\n                                        {\n                                            show: isRawLoaded,\n                                            label: \"JSON\",\n                                            content: <ProcessIORaw data={raw} />,\n                                        },\n                                    ]}\n                                />\n                            ) : (\n                                <Grid\n                                    container\n                                    item\n                                    alignItems=\"center\"\n                                    justifyContent=\"center\"\n                                >\n                                    <DefaultView messages={[\"No data to display\"]} />\n                                </Grid>\n                            )}\n                            </>\n                ))}\n                </CardContent>\n                </section>\n        );\n            }\n);\n\nexport type ProcessIOValue = {\n    display: ReactElement<any, any>;\n    imageUrl?: string;\n    collection?: ReactElement<any, any>;\n    secondary?: boolean;\n};\n\nexport type ProcessIOParameter = {\n    id: string;\n    label: string;\n    value: ProcessIOValue;\n};\n\ninterface ProcessIOPreviewDataProps {\n    data: ProcessIOParameter[];\n    valueLabel: string;\n    hidden?: boolean;\n}\n\ntype ProcessIOPreviewProps = ProcessIOPreviewDataProps & WithStyles<CssRules>;\n\nconst ProcessIOPreview = memo(\n    withStyles(styles)(({ data, valueLabel, hidden, classes }: ProcessIOPreviewProps) => {\n        const showLabel = data.some((param: ProcessIOParameter) => param.label);\n\n        const hasMoreValues = (index: number) => (\n            data[index+1] && !isMainRow(data[index+1])\n        );\n\n        const isMainRow = (param: ProcessIOParameter) => (\n            param &&\n            ((param.id || param.label) &&\n            !param.value.secondary)\n        );\n\n        const RenderRow = ({index, style}) => {\n            const param = data[index];\n\n            const rowClasses = {\n                [classes.noBorderRow]: hasMoreValues(index),\n            };\n\n            return <TableRow\n                style={style}\n                className={classNames(rowClasses)}\n                data-cy={isMainRow(param) ? \"process-io-param\" : \"\"}>\n                <TableCell>\n                    <Tooltip title={param.id}>\n                        <Typography className={classes.virtualListCellText}>\n                            <span>\n                                {param.id}\n                            </span>\n                        </Typography>\n                    </Tooltip>\n                </TableCell>\n                {showLabel && <TableCell>\n                    <Tooltip title={param.label}>\n                        <Typography className={classes.virtualListCellText}>\n                            <span>\n                                {param.label}\n                            </span>\n                        </Typography>\n                    </Tooltip>\n                </TableCell>}\n                <TableCell>\n                    <ProcessValuePreview\n                        value={param.value}\n                    />\n                </TableCell>\n                <TableCell>\n                    <Typography className={classes.virtualListCellText}>\n                        {/** Collection is an anchor so doesn't require wrapper element */}\n                        {param.value.collection}\n                    </Typography>\n                </TableCell>\n            </TableRow>;\n        };\n\n        return <div className={classes.tableWrapper} hidden={hidden}>\n            <Table\n                className={classNames(classes.virtualListTableRoot, classes.parameterTableRoot)}\n                aria-label=\"Process IO Preview\"\n            >\n                <TableHead>\n                    <TableRow>\n                        <TableCell>Name</TableCell>\n                        {showLabel && <TableCell>Label</TableCell>}\n                        <TableCell>{valueLabel}</TableCell>\n                        <TableCell>Collection</TableCell>\n                    </TableRow>\n                </TableHead>\n                <TableBody>\n                    <AutoSizer>\n                        {({ height, width }) =>\n                            <FixedSizeList\n                                height={height}\n                                itemCount={data.length}\n                                itemSize={40}\n                                width={width}\n                            >\n                                {RenderRow}\n                            </FixedSizeList>\n                        }\n                    </AutoSizer>\n                </TableBody>\n            </Table>\n        </div>;\n    })\n);\n\ninterface ProcessValuePreviewProps {\n    value: ProcessIOValue;\n}\n\nconst ProcessValuePreview = withStyles(styles)(({ value, classes }: ProcessValuePreviewProps & WithStyles<CssRules>) => (\n    <Typography className={classNames(classes.virtualListCellText, value.secondary && classes.secondaryVal)}>\n        {value.display}\n    </Typography>\n));\n\ninterface ProcessIORawDataProps {\n    data: ProcessIOParameter[];\n    hidden?: boolean;\n}\n\nconst ProcessIORaw = withStyles(styles)(({ data, hidden, classes }: ProcessIORawDataProps & WithStyles<CssRules>) => (\n    <div className={classes.jsonWrapper} hidden={hidden}>\n        <Paper elevation={0} style={{minWidth: \"100%\", height: \"100%\"}}>\n            <DefaultVirtualCodeSnippet\n                lines={JSON.stringify(data, null, 2).split('\\n')}\n                linked\n                copyButton\n            />\n        </Paper>\n    </div>\n));\n\ninterface ProcessInputMountsDataProps {\n    mounts: InputCollectionMount[];\n    hidden?: boolean;\n}\n\ntype ProcessInputMountsProps = ProcessInputMountsDataProps & WithStyles<CssRules>;\n\nconst ProcessInputMounts = withStyles(styles)(\n    connect((state: RootState) => ({\n        auth: state.auth,\n    }))(({ mounts, hidden, classes, auth }: ProcessInputMountsProps & { auth: AuthState }) => {\n\n        const RenderRow = ({index, style}) => {\n            const mount = mounts[index];\n\n            return <TableRow\n                key={mount.path}\n                style={style}>\n                <TableCell>\n                    <Tooltip title={mount.path}>\n                        <Typography className={classes.virtualListCellText}>\n                            <pre>{mount.path}</pre>\n                        </Typography>\n                    </Tooltip>\n                </TableCell>\n                <TableCell>\n                    <Tooltip title={mount.pdh}>\n                        <Typography className={classes.virtualListCellText}>\n                            <RouterLink\n                                to={getNavUrl(mount.pdh, auth)}\n                                className={classes.keepLink}\n                            >\n                                {mount.pdh}\n                            </RouterLink>\n                        </Typography>\n                    </Tooltip>\n                </TableCell>\n            </TableRow>;\n        };\n\n        return <div className={classes.tableWrapper} hidden={hidden}>\n            <Table\n                className={classNames(classes.virtualListTableRoot, classes.inputMountTableRoot)}\n                aria-label=\"Process Input Mounts\"\n                hidden={hidden}\n            >\n                <TableHead>\n                    <TableRow>\n                        <TableCell>Path</TableCell>\n                        <TableCell>Portable Data Hash</TableCell>\n                    </TableRow>\n                </TableHead>\n                <TableBody>\n                    <AutoSizer>\n                        {({ height, width }) =>\n                            <FixedSizeList\n                                height={height}\n                                itemCount={mounts.length}\n                                itemSize={40}\n                                width={width}\n                            >\n                                {RenderRow}\n                            </FixedSizeList>\n                        }\n                    </AutoSizer>\n                </TableBody>\n            </Table>\n        </div>;\n    })\n);\n\nexport interface ProcessOutputCollectionActionProps {\n    navigateTo: (uuid: string) => void;\n}\n\nconst mapNavigateToProps = (dispatch: Dispatch): ProcessOutputCollectionActionProps => ({\n    navigateTo: uuid => dispatch<any>(navigateTo(uuid)),\n});\n\ntype ProcessOutputCollectionProps = {outputUuid: string | undefined, hidden?: boolean} & ProcessOutputCollectionActionProps &  WithStyles<CssRules>;\n\nconst ProcessOutputCollection = withStyles(styles)(connect(null, mapNavigateToProps)(({ outputUuid, hidden, navigateTo, classes }: ProcessOutputCollectionProps) => (\n    <div className={classes.tableWrapper} hidden={hidden}>\n        <>\n            {outputUuid && (\n                <Typography className={classes.collectionLink} data-cy=\"output-uuid-display\">\n                    Output Collection:{\" \"}\n                    <MuiLink\n                        className={classes.keepLink}\n                        onClick={() => {\n                            navigateTo(outputUuid || \"\");\n                        }}\n                    >\n                        {outputUuid}\n                    </MuiLink>\n                </Typography>\n            )}\n            <ProcessOutputCollectionFiles\n                isWritable={false}\n                currentItemUuid={outputUuid}\n            />\n        </>\n    </div>\n)));\n\ntype FileWithSecondaryFiles = {\n    secondaryFiles: File[];\n};\n\nexport const getIOParamDisplayValue = (auth: AuthState, input: CommandInputParameter | CommandOutputParameter, pdh?: string): ProcessIOValue[] => {\n    switch (true) {\n        case isSecret(input):\n            return [{ display: <SecretValue /> }];\n\n        case isPrimitiveOfType(input, CWLType.BOOLEAN):\n            const boolValue = (input as BooleanCommandInputParameter).value;\n            return boolValue !== undefined && !(Array.isArray(boolValue) && boolValue.length === 0)\n                ? [{ display: <PrimitiveTooltip data={boolValue}>{renderPrimitiveValue(boolValue, false)}</PrimitiveTooltip> }]\n                : [{ display: <EmptyValue /> }];\n\n        case isPrimitiveOfType(input, CWLType.INT):\n        case isPrimitiveOfType(input, CWLType.LONG):\n            const intValue = (input as IntCommandInputParameter).value;\n            return intValue !== undefined &&\n                // Missing values are empty array\n                !(Array.isArray(intValue) && intValue.length === 0)\n                ? [{ display: <PrimitiveTooltip data={intValue}>{renderPrimitiveValue(intValue, false)}</PrimitiveTooltip> }]\n                : [{ display: <EmptyValue /> }];\n\n        case isPrimitiveOfType(input, CWLType.FLOAT):\n        case isPrimitiveOfType(input, CWLType.DOUBLE):\n            const floatValue = (input as FloatCommandInputParameter).value;\n            return floatValue !== undefined && !(Array.isArray(floatValue) && floatValue.length === 0)\n                ? [{ display: <PrimitiveTooltip data={floatValue}>{renderPrimitiveValue(floatValue, false)}</PrimitiveTooltip> }]\n                : [{ display: <EmptyValue /> }];\n\n        case isPrimitiveOfType(input, CWLType.STRING):\n            const stringValue = (input as StringCommandInputParameter).value || undefined;\n            return stringValue !== undefined && !(Array.isArray(stringValue) && stringValue.length === 0)\n                ? [{ display: <PrimitiveTooltip data={stringValue}>{renderPrimitiveValue(stringValue, false)}</PrimitiveTooltip> }]\n                : [{ display: <EmptyValue /> }];\n\n        case isPrimitiveOfType(input, CWLType.FILE):\n            const mainFile = (input as FileCommandInputParameter).value;\n            // secondaryFiles: File[] is not part of CommandOutputParameter so we cast to access secondaryFiles\n            const secondaryFiles = (mainFile as unknown as FileWithSecondaryFiles)?.secondaryFiles || [];\n            const files = [...(mainFile && !(Array.isArray(mainFile) && mainFile.length === 0) ? [mainFile] : []), ...secondaryFiles];\n            const mainFilePdhUrl = mainFile ? getResourcePdhUrl(mainFile, pdh) : \"\";\n            return files.length\n                ? files.map((file, i) => fileToProcessIOValue(file, i > 0, auth, pdh, i > 0 ? mainFilePdhUrl : \"\"))\n                : [{ display: <EmptyValue /> }];\n\n        case isPrimitiveOfType(input, CWLType.DIRECTORY):\n            const directory = (input as DirectoryCommandInputParameter).value;\n            return directory !== undefined && !(Array.isArray(directory) && directory.length === 0)\n                ? [directoryToProcessIOValue(directory, auth, pdh)]\n                : [{ display: <EmptyValue /> }];\n\n        case getEnumType(input) !== null:\n            const enumValue = (input as EnumCommandInputParameter).value;\n            return enumValue !== undefined && enumValue ? [{ display: <PrimitiveTooltip data={enumValue}>{enumValue}</PrimitiveTooltip> }] : [{ display: <EmptyValue /> }];\n\n        case isArrayOfType(input, CWLType.STRING):\n            const strArray = (input as StringArrayCommandInputParameter).value || [];\n            return strArray.length ? [{ display: <PrimitiveArrayTooltip data={strArray}>{strArray.map(val => renderPrimitiveValue(val, true))}</PrimitiveArrayTooltip> }] : [{ display: <EmptyValue /> }];\n\n        case isArrayOfType(input, CWLType.INT):\n        case isArrayOfType(input, CWLType.LONG):\n            const intArray = (input as IntArrayCommandInputParameter).value || [];\n            return intArray.length ? [{ display: <PrimitiveArrayTooltip data={intArray}>{intArray.map(val => renderPrimitiveValue(val, true))}</PrimitiveArrayTooltip> }] : [{ display: <EmptyValue /> }];\n\n        case isArrayOfType(input, CWLType.FLOAT):\n        case isArrayOfType(input, CWLType.DOUBLE):\n            const floatArray = (input as FloatArrayCommandInputParameter).value || [];\n            return floatArray.length ? [{ display: <PrimitiveArrayTooltip data={floatArray}>{floatArray.map(val => renderPrimitiveValue(val, true))}</PrimitiveArrayTooltip> }] : [{ display: <EmptyValue /> }];\n\n        case isArrayOfType(input, CWLType.FILE):\n            const fileArrayMainFiles = (input as FileArrayCommandInputParameter).value || [];\n            const firstMainFilePdh = fileArrayMainFiles.length > 0 && fileArrayMainFiles[0] ? getResourcePdhUrl(fileArrayMainFiles[0], pdh) : \"\";\n\n            // Convert each main and secondaryFiles into array of ProcessIOValue preserving ordering\n            let fileArrayValues: ProcessIOValue[] = [];\n            for (let i = 0; i < fileArrayMainFiles.length; i++) {\n                const secondaryFiles = (fileArrayMainFiles[i] as unknown as FileWithSecondaryFiles)?.secondaryFiles || [];\n                fileArrayValues.push(\n                    // Pass firstMainFilePdh to secondary files and every main file besides the first to hide pdh if equal\n                    ...(fileArrayMainFiles[i] ? [fileToProcessIOValue(fileArrayMainFiles[i], false, auth, pdh, i > 0 ? firstMainFilePdh : \"\")] : []),\n                    ...secondaryFiles.map(file => fileToProcessIOValue(file, true, auth, pdh, firstMainFilePdh))\n                );\n            }\n\n            return fileArrayValues.length ? fileArrayValues : [{ display: <EmptyValue /> }];\n\n        case isArrayOfType(input, CWLType.DIRECTORY):\n            const directories = (input as DirectoryArrayCommandInputParameter).value || [];\n            return directories.length ? directories.map(directory => directoryToProcessIOValue(directory, auth, pdh)) : [{ display: <EmptyValue /> }];\n\n        default:\n            return [{ display: <UnsupportedValue /> }];\n    }\n};\n\ninterface PrimitiveTooltipProps {\n    data: boolean | number | string;\n}\n\nconst PrimitiveTooltip = (props: React.PropsWithChildren<PrimitiveTooltipProps>) => (\n    <Tooltip title={typeof props.data !== 'object' ? String(props.data) : \"\"}>\n        <Typography component='span' fontFamily='monospace'>{props.children}</Typography>\n    </Tooltip>\n);\n\ninterface PrimitiveArrayTooltipProps {\n    data: string[];\n}\n\nconst PrimitiveArrayTooltip = (props: React.PropsWithChildren<PrimitiveArrayTooltipProps>) => (\n    <Tooltip title={props.data.join(', ')}>\n        <span>{props.children}</span>\n    </Tooltip>\n);\n\n\nconst renderPrimitiveValue = (value: any, asChip: boolean) => {\n    const isObject = typeof value === \"object\";\n    if (!isObject) {\n        return asChip ? (\n            <Chip\n                key={value}\n                label={String(value)}\n                style={{marginRight: \"10px\"}}\n            />\n        ) : (\n            <>{String(value)}</>\n        );\n    } else {\n        return asChip ? <UnsupportedValueChip /> : <UnsupportedValue />;\n    }\n};\n\n/*\n * @returns keep url without keep: prefix\n */\nconst getKeepUrl = (file: File | Directory, pdh?: string): string => {\n    const isKeepUrl = file.location?.startsWith(\"keep:\") || false;\n    const keepUrl = isKeepUrl ? file.location?.replace(\"keep:\", \"\") : pdh ? `${pdh}/${file.location}` : file.location;\n    return keepUrl || \"\";\n};\n\ninterface KeepUrlProps {\n    auth: AuthState;\n    res: File | Directory;\n    pdh?: string;\n}\n\nconst getResourcePdhUrl = (res: File | Directory, pdh?: string): string => {\n    const keepUrl = getKeepUrl(res, pdh);\n    return keepUrl ? keepUrl.split(\"/\").slice(0, 1)[0] : \"\";\n};\n\nconst KeepUrlBase = withStyles(styles)(({ auth, res, pdh, classes }: KeepUrlProps & WithStyles<CssRules>) => {\n    const pdhUrl = getResourcePdhUrl(res, pdh);\n    // Passing a pdh always returns a relative wb2 collection url\n    const pdhWbPath = getNavUrl(pdhUrl, auth);\n    return pdhUrl && pdhWbPath ? (\n        <Tooltip title={<>View collection in Workbench<br />{pdhUrl}</>}>\n            <span>\n                <RouterLink\n                    to={pdhWbPath}\n                    className={classes.keepLink}\n                    >\n                    {pdhUrl}\n                </RouterLink>\n            </span>\n        </Tooltip>\n    ) : (\n        <></>\n    );\n});\n\nconst KeepUrlPath = withStyles(styles)(({ auth, res, pdh, classes }: KeepUrlProps & WithStyles<CssRules>) => {\n    const keepUrl = getKeepUrl(res, pdh);\n    const keepUrlParts = keepUrl ? keepUrl.split(\"/\") : [];\n    const keepUrlPath = keepUrlParts.length > 1 ? keepUrlParts.slice(1).join(\"/\") : \"\";\n\n    const keepUrlPathNav = getKeepNavUrl(auth, res, pdh);\n    return keepUrlPathNav ? (\n        <Tooltip classes={{tooltip: classes.wrapTooltip}} title={<>View in keep-web<br />{keepUrlPath || \"/\"}</>}>\n            <a\n                className={classes.keepLink}\n                href={keepUrlPathNav}\n                target=\"_blank\"\n                rel=\"noopener noreferrer\"\n            >\n                {keepUrlPath || \"/\"}\n            </a>\n        </Tooltip>\n    ) : (\n        <EmptyValue />\n    );\n});\n\nconst getKeepNavUrl = (auth: AuthState, file: File | Directory, pdh?: string): string => {\n    let keepUrl = getKeepUrl(file, pdh);\n    return getInlineFileUrl(\n        `${auth.config.keepWebServiceUrl}/c=${keepUrl}?api_token=${auth.apiToken}`,\n        auth.config.keepWebServiceUrl,\n        auth.config.keepWebInlineServiceUrl\n    );\n};\n\nconst getImageUrl = (auth: AuthState, file: File, pdh?: string): string => {\n    const keepUrl = getKeepUrl(file, pdh);\n    return getInlineFileUrl(\n        `${auth.config.keepWebServiceUrl}/c=${keepUrl}?api_token=${auth.apiToken}`,\n        auth.config.keepWebServiceUrl,\n        auth.config.keepWebInlineServiceUrl\n    );\n};\n\nconst isFileImage = (basename?: string): boolean => {\n    return basename ? (mime.getType(basename) || \"\").startsWith(\"image/\") : false;\n};\n\nconst isFileUrl = (location?: string): boolean =>\n    !!location && !KEEP_URL_REGEX.exec(location) && (location.startsWith(\"http://\") || location.startsWith(\"https://\"));\n\nconst normalizeDirectoryLocation = (directory: Directory): Directory => {\n    if (!directory.location) {\n        return directory;\n    }\n    return {\n        ...directory,\n        location: (directory.location || \"\").endsWith(\"/\") ? directory.location : directory.location + \"/\",\n    };\n};\n\nconst directoryToProcessIOValue = (directory: Directory, auth: AuthState, pdh?: string): ProcessIOValue => {\n    if (isExternalValue(directory)) {\n        return { display: <UnsupportedValue /> };\n    }\n\n    const normalizedDirectory = normalizeDirectoryLocation(directory);\n    return {\n        display: (\n            <KeepUrlPath\n                auth={auth}\n                res={normalizedDirectory}\n                pdh={pdh}\n            />\n        ),\n        collection: (\n            <KeepUrlBase\n                auth={auth}\n                res={normalizedDirectory}\n                pdh={pdh}\n            />\n        ),\n    };\n};\n\ntype MuiLinkWithTooltipProps = WithStyles<CssRules> & React.PropsWithChildren<LinkProps>;\n\nconst MuiLinkWithTooltip = withStyles(styles)((props: MuiLinkWithTooltipProps) => (\n    <Tooltip title={props.title} classes={{tooltip: props.classes.wrapTooltip}}>\n        <MuiLink {...props}>\n            {props.children}\n        </MuiLink>\n    </Tooltip>\n));\n\nconst fileToProcessIOValue = (file: File, secondary: boolean, auth: AuthState, pdh: string | undefined, mainFilePdh: string): ProcessIOValue => {\n    if (isExternalValue(file)) {\n        return { display: <UnsupportedValue /> };\n    }\n\n    if (isFileUrl(file.location)) {\n        return {\n            display: (\n                <MuiLinkWithTooltip\n                    href={file.location}\n                    target=\"_blank\"\n                    rel=\"noopener\"\n                    title={file.location}\n                >\n                    {file.location}\n                </MuiLinkWithTooltip>\n            ),\n            secondary,\n        };\n    }\n\n    const resourcePdh = getResourcePdhUrl(file, pdh);\n    return {\n        display: (\n            <KeepUrlPath\n                auth={auth}\n                res={file}\n                pdh={pdh}\n            />\n        ),\n        secondary,\n        imageUrl: isFileImage(file.basename) ? getImageUrl(auth, file, pdh) : undefined,\n        collection:\n            resourcePdh !== mainFilePdh ? (\n                <KeepUrlBase\n                    auth={auth}\n                    res={file}\n                    pdh={pdh}\n                />\n            ) : (\n                <></>\n            ),\n    };\n};\n\nconst isExternalValue = (val: any) => Object.keys(val).includes(\"$import\") || Object.keys(val).includes(\"$include\");\n\nexport const EmptyValue = withStyles(styles)(({ classes }: WithStyles<CssRules>) => <span className={classes.emptyValue}>No value</span>);\n\nconst UnsupportedValue = withStyles(styles)(({ classes }: WithStyles<CssRules>) => <span className={classes.emptyValue}>Cannot display value</span>);\n\nconst SecretValue = withStyles(styles)(({ classes }: WithStyles<CssRules>) => <span className={classes.emptyValue}>Cannot display secret</span>);\n\nconst UnsupportedValueChip = withStyles(styles)(({ classes }: WithStyles<CssRules>) => (\n    <Chip\n        icon={<InfoIcon />}\n        label={\"Cannot display value\"}\n    />\n));\n"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-log-card.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useState } from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { IconButton, CardContent, Tooltip, Grid, Typography } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { useAsyncInterval } from 'common/use-async-interval';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport {\n    CollectionIcon,\n    CopyIcon,\n    LogIcon,\n    TextDecreaseIcon,\n    TextIncreaseIcon,\n    WordWrapOffIcon,\n    WordWrapOnIcon,\n} from 'components/icon/icon';\nimport { Process, isProcessRunning, isProcessQueued } from 'store/processes/process';\nimport { MPVPanelProps } from 'components/multi-panel-view/multi-panel-view';\nimport {\n    FilterOption,\n    ProcessLogForm\n} from 'views/process-panel/process-log-form';\nimport { ProcessLogCodeSnippet } from 'views/process-panel/process-log-code-snippet';\nimport { DefaultView } from 'components/default-view/default-view';\nimport { CodeSnippetDataProps } from 'components/code-snippet/code-snippet';\nimport CopyToClipboard from 'react-copy-to-clipboard';\n\ntype CssRules = 'card' | 'content' | 'title' | 'iconHeader' | 'header' | 'namePlate' | 'toolbarWrapper' | 'toolbar' | 'root' | 'logViewer' | 'logViewerContainer';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        height: '100%',\n        overflow: 'hidden',\n        display: 'flex',\n        flexDirection: 'column',\n    },\n    card: {\n        height: '100%',\n    },\n    header: {\n        paddingTop: theme.spacing(1),\n        paddingBottom: theme.spacing(1),\n        display: 'flex',\n        flexDirection: 'row',\n        alignItems: 'center',\n        justifyContent: 'space-between',\n    },\n    namePlate: {\n        display: 'flex',\n        paddingTop: theme.spacing(1),\n        paddingLeft: theme.spacing(1),\n    },\n    toolbarWrapper: {\n        flexShrink: 1,\n        marginRight: theme.spacing(0.5),\n        marginTop: '-5px',\n        marginBottom: '-5px',\n        zIndex: 1000,\n    },\n    toolbar: {\n        justifyContent: 'flex-end',\n    },\n    content: {\n        padding: theme.spacing(0),\n        height: '100%',\n    },\n    logViewer: {\n        height: '100%',\n        overflowY: 'scroll', // Required for MacOS's Safari -- See #19687\n    },\n    logViewerContainer: {\n        height: '100%',\n    },\n    title: {\n        overflow: 'hidden',\n        paddingLeft: theme.spacing(1),\n        color: theme.customs.colors.greyD\n    },\n    iconHeader: {\n        fontSize: '1.875rem',\n        color: theme.customs.colors.greyL\n    },\n});\n\nexport interface ProcessLogsCardDataProps {\n    process: Process;\n    selectedFilter: FilterOption;\n    filters: FilterOption[];\n}\n\nexport interface ProcessLogsCardActionProps {\n    onLogFilterChange: (filter: FilterOption) => void;\n    navigateToLog: (uuid: string) => void;\n    onCopy: (text: string) => void;\n    pollProcessLogs: (processUuid: string) => Promise<void>;\n}\n\ntype ProcessLogsCardProps = ProcessLogsCardDataProps\n    & ProcessLogsCardActionProps\n    & CodeSnippetDataProps\n    & WithStyles<CssRules>\n    & MPVPanelProps;\n\nexport const ProcessLogsCard = withStyles(styles)(\n    ({ classes, process, filters, selectedFilter, lines, onLogFilterChange, navigateToLog, onCopy, pollProcessLogs, panelName }: ProcessLogsCardProps) => {\n        const [wordWrap, setWordWrap] = useState<boolean>(true);\n        const [fontSize, setFontSize] = useState<number>(3);\n        const fontBaseSize = 10;\n        const fontStepSize = 1;\n\n        useAsyncInterval(() => (\n            pollProcessLogs(process.containerRequest.uuid)\n        ), isProcessQueued(process) ? 20000 : (isProcessRunning(process) ? 2000 : null));\n\n        return (\n            <Grid item className={classes.root} xs={12}>\n                <section className={classes.card}>\n                    <div className={classes.header}>\n                        <div className={classes.namePlate}>\n                            <LogIcon className={classes.iconHeader} />\n                            <Typography noWrap variant='h6' className={classes.title}>\n                                Logs\n                            </Typography>\n                        </div>\n                        <div className={classes.toolbarWrapper}>\n                            <Grid container direction='row' alignItems='center' className={classes.toolbar}>\n                                <Grid item>\n                                    <ProcessLogForm selectedFilter={selectedFilter} filters={filters} onChange={onLogFilterChange} />\n                                </Grid>\n                                <Grid item>\n                                    <Tooltip title=\"Decrease font size\" disableFocusListener>\n                                        <IconButton onClick={() => fontSize > 1 && setFontSize(fontSize-1)} size=\"large\">\n                                            <TextDecreaseIcon />\n                                        </IconButton>\n                                    </Tooltip>\n                                </Grid>\n                                <Grid item>\n                                    <Tooltip title=\"Increase font size\" disableFocusListener>\n                                        <IconButton onClick={() => fontSize < 5 && setFontSize(fontSize+1)} size=\"large\">\n                                            <TextIncreaseIcon />\n                                        </IconButton>\n                                    </Tooltip>\n                                </Grid>\n                                <Grid item>\n                                    <Tooltip title=\"Copy link to clipboard\" disableFocusListener>\n                                        <IconButton size=\"large\">\n                                            <CopyToClipboard text={lines.join()} onCopy={() => onCopy(\"Log copied to clipboard\")}>\n                                                <CopyIcon />\n                                            </CopyToClipboard>\n                                        </IconButton>\n                                    </Tooltip>\n                                </Grid>\n                                <Grid item>\n                                    <Tooltip title={`${wordWrap ? 'Disable' : 'Enable'} word wrapping`} disableFocusListener>\n                                        <IconButton onClick={() => setWordWrap(!wordWrap)} size=\"large\">\n                                                            {wordWrap ? <WordWrapOffIcon /> : <WordWrapOnIcon />}\n                                        </IconButton>\n                                    </Tooltip>\n                                </Grid>\n                                <Grid item>\n                                    <Tooltip title=\"Go to Log collection\" disableFocusListener>\n                                        <IconButton\n                                            onClick={() => navigateToLog(process.containerRequest.logUuid!)}\n                                            size=\"large\">\n                                            <CollectionIcon />\n                                        </IconButton>\n                                    </Tooltip>\n                                </Grid>\n                            </Grid>\n                        </div>\n                    </div>\n                    <CardContent className={classes.content}>\n                        {lines.length > 0 ?\n                            <Grid className={classes.logViewerContainer} container spacing={3} direction='column'>\n                                <Grid className={classes.logViewer} item xs>\n                                    <ProcessLogCodeSnippet fontSize={fontBaseSize+(fontStepSize*fontSize)} wordWrap={wordWrap} lines={lines} />\n                                </Grid>\n                            </Grid>\n                            :\n                            <DefaultView icon={LogIcon} messages={['No logs yet']} />\n                        }\n                    </CardContent>\n                </section>\n            </Grid >\n        );\n});\n"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-log-code-snippet.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect, useRef, useState } from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { ThemeProvider, Theme, StyledEngineProvider, createTheme } from '@mui/material/styles';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { Link, Typography } from '@mui/material';\nimport { navigationNotAvailable } from 'store/navigation/navigation-action';\nimport { Dispatch } from 'redux';\nimport { connect, DispatchProp } from 'react-redux';\nimport classNames from 'classnames';\nimport { FederationConfig, getNavUrl } from 'routes/routes';\nimport { RootState } from 'store/store';\nimport { grey } from '@mui/material/colors';\n\n\ndeclare module '@mui/styles/defaultTheme' {\n  // eslint-disable-next-line @typescript-eslint/no-empty-interface\n  interface DefaultTheme extends Theme {}\n}\n\n\ntype CssRules = 'root' | 'wordWrapOn' | 'wordWrapOff' | 'logText';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        boxSizing: 'border-box',\n        overflow: 'auto',\n        backgroundColor: '#000',\n        height: `calc(100% - ${theme.spacing(4)})`, // so that horizontal scollbar is visible\n        \"& a\": {\n            color: theme.palette.primary.main,\n        },\n    },\n    logText: {\n        color: '#fff',\n        padding: theme.spacing(0, 0.5),\n        display: 'block',\n    },\n    wordWrapOn: {\n        overflowWrap: 'anywhere',\n    },\n    wordWrapOff: {\n        whiteSpace: 'nowrap',\n    },\n});\n\nconst theme = createTheme({\n    components: {\n        MuiTypography: {\n            styleOverrides: {\n                body2: {\n                    color: grey[\"200\"]\n                },\n            },\n        },\n    },\n    typography: {\n        fontFamily: 'monospace',\n    }\n});\n\ninterface ProcessLogCodeSnippetProps {\n    lines: string[];\n    fontSize: number;\n    wordWrap?: boolean;\n}\n\ninterface ProcessLogCodeSnippetAuthProps {\n    auth: FederationConfig;\n}\n\nconst renderLinks = (fontSize: number, auth: FederationConfig, dispatch: Dispatch) => (text: string) => {\n    // Matches UUIDs & PDHs\n    const REGEX = /[a-z0-9]{5}-[a-z0-9]{5}-[a-z0-9]{15}|[0-9a-f]{32}\\+\\d+/g;\n    const links = text.match(REGEX);\n    if (!links) {\n        return <Typography style={{ fontSize: fontSize }}>{text}</Typography>;\n    }\n    return <Typography style={{ fontSize: fontSize }}>\n        {text.split(REGEX).map((part, index) =>\n            <React.Fragment key={index}>\n                {part}\n                {links[index] &&\n                    <Link onClick={() => {\n                        const url = getNavUrl(links[index], auth)\n                        if (url) {\n                            window.open(`${window.location.origin}${url}`, '_blank', \"noopener\");\n                        } else {\n                            dispatch(navigationNotAvailable(links[index]));\n                        }\n                    }}\n                        style={{ cursor: 'pointer' }}>\n                        {links[index]}\n                    </Link>}\n            </React.Fragment>\n        )}\n    </Typography>;\n};\n\nconst mapStateToProps = (state: RootState): ProcessLogCodeSnippetAuthProps => ({\n    auth: state.auth,\n});\n\nexport const ProcessLogCodeSnippet = withStyles(styles)(connect(mapStateToProps)(\n    ({ classes, lines, fontSize, auth, dispatch, wordWrap }: ProcessLogCodeSnippetProps & WithStyles<CssRules> & ProcessLogCodeSnippetAuthProps & DispatchProp) => {\n        const [followMode, setFollowMode] = useState<boolean>(true);\n        const scrollRef = useRef<HTMLDivElement>(null);\n\n        useEffect(() => {\n            if (followMode && scrollRef.current && lines.length > 0) {\n                // Scroll to bottom\n                scrollRef.current.scrollTop = scrollRef.current.scrollHeight;\n            }\n        }, [followMode, lines, scrollRef]);\n\n        return (\n            <StyledEngineProvider injectFirst>\n                <ThemeProvider theme={theme}>\n                    <div ref={scrollRef} className={classes.root}\n                        onScroll={(e) => {\n                            const elem = e.target as HTMLDivElement;\n                            if (elem.scrollTop + (elem.clientHeight * 1.1) >= elem.scrollHeight) {\n                                setFollowMode(true);\n                            } else {\n                                setFollowMode(false);\n                            }\n                        }}>\n                        {lines.map((line: string, index: number) =>\n                            <Typography key={index} component=\"span\"\n                                className={classNames(classes.logText, wordWrap ? classes.wordWrapOn : classes.wordWrapOff)}>\n                                {renderLinks(fontSize, auth, dispatch)(line)}\n                            </Typography>\n                        )}\n                    </div>\n                </ThemeProvider>\n            </StyledEngineProvider>\n        );\n    }));\n"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-log-form.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { FormControl, Select, MenuItem, Input } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\n\ntype CssRules = 'formControl';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    formControl: {\n        minWidth: theme.spacing(15),\n    }\n});\n\nexport interface FilterOption {\n    label: string;\n    value: string;\n}\n\nexport interface ProcessLogFormDataProps {\n    selectedFilter: FilterOption;\n    filters: FilterOption[];\n}\n\nexport interface ProcessLogFormActionProps {\n    onChange: (filter: FilterOption) => void;\n}\n\ntype ProcessLogFormProps = ProcessLogFormDataProps & ProcessLogFormActionProps & WithStyles<CssRules>;\n\nexport const ProcessLogForm = withStyles(styles)(\n    ({ classes, selectedFilter, onChange, filters }: ProcessLogFormProps) =>\n        <form autoComplete=\"off\" data-cy=\"process-logs-filter\">\n            <FormControl variant=\"standard\" className={classes.formControl}>\n                <Select\n                    variant=\"standard\"\n                    value={selectedFilter.value}\n                    onChange={(ev: any) => onChange({ label: ev.target.innerText, value: ev.target.value})}\n                    input={<Input name=\"eventType\" id=\"log-label-placeholder\" />}\n                    name=\"eventType\">\n                    {\n                        filters.map(option =>\n                            <MenuItem key={option.value} value={option.value}>{option.label}</MenuItem>\n                        )\n                    }\n                </Select>\n            </FormControl>\n        </form>\n);"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-output-collection-files.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from \"react-redux\";\nimport {\n    CollectionPanelFiles as Component,\n    CollectionPanelFilesProps\n} from \"components/collection-panel-files/collection-panel-files\";\nimport { Dispatch } from \"redux\";\nimport { collectionPanelFilesAction } from \"store/collection-panel/collection-panel-files/collection-panel-files-actions\";\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { openContextMenuAndSelect, openCollectionFilesContextMenu } from 'store/context-menu/context-menu-actions';\nimport { openUploadCollectionFilesDialog } from 'store/collections/collection-upload-actions';\nimport { ResourceKind } from \"models/resource\";\nimport { openDetailsPanel } from 'store/details-panel/details-panel-action';\n\nconst mapDispatchToProps = (dispatch: Dispatch): Pick<CollectionPanelFilesProps, 'onSearchChange' | 'onFileClick' | 'onUploadDataClick' | 'onCollapseToggle' | 'onSelectionToggle' | 'onItemMenuOpen' | 'onOptionsMenuOpen'> => ({\n    onUploadDataClick: (targetLocation?: string) => {\n        dispatch<any>(openUploadCollectionFilesDialog(targetLocation));\n    },\n    onCollapseToggle: (id) => {\n        dispatch(collectionPanelFilesAction.TOGGLE_COLLECTION_FILE_COLLAPSE({ id }));\n    },\n    onSelectionToggle: (event, item) => {\n        dispatch(collectionPanelFilesAction.TOGGLE_COLLECTION_FILE_SELECTION({ id: item.id }));\n    },\n    onItemMenuOpen: (event, item, isWritable) => {\n        const isDirectory = item.data?.type === 'directory';\n        dispatch<any>(openContextMenuAndSelect(\n            event,\n            {\n                menuKind: isWritable\n                    ? isDirectory\n                        ? ContextMenuKind.COLLECTION_DIRECTORY_ITEM\n                        : ContextMenuKind.COLLECTION_FILE_ITEM\n                    : isDirectory\n                        ? ContextMenuKind.READONLY_COLLECTION_DIRECTORY_ITEM\n                        : ContextMenuKind.READONLY_COLLECTION_FILE_ITEM,\n                kind: ResourceKind.COLLECTION,\n                name: item.data?.name || '',\n                uuid: item.id,\n                ownerUuid: ''\n            }\n        ));\n    },\n    onSearchChange: (searchValue: string) => {\n        dispatch(collectionPanelFilesAction.ON_SEARCH_CHANGE(searchValue));\n    },\n    onOptionsMenuOpen: (event, isWritable) => {\n        dispatch<any>(openCollectionFilesContextMenu(event, isWritable));\n    },\n    onFileClick: (id) => {\n        dispatch<any>(openDetailsPanel(id));\n    },\n});\n\nexport const ProcessOutputCollectionFiles = connect(null, mapDispatchToProps)(Component);\n"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport {  Grid, Typography } from '@mui/material';\nimport { ProcessIcon } from \"components/icon/icon\";\nimport { getProcess, ProcessStatus, getProcessStatus, isProcessQueued, isProcessRunning } from \"store/processes/process\";\nimport { SubprocessPanel } from \"views/subprocess-panel/subprocess-panel\";\nimport { MPVContainer, MPVPanelContent, MPVPanelState } from \"components/multi-panel-view/multi-panel-view\";\nimport { ProcessIOCard, ProcessIOCardType } from \"./process-io-card\";\nimport { ProcessResourceCard } from \"./process-resource-card\";\nimport { getProcessPanelLogs, ProcessLogsPanel } from \"store/process-logs-panel/process-logs-panel\";\nimport { ProcessLogsCard } from \"./process-log-card\";\nimport { FilterOption } from \"views/process-panel/process-log-form\";\nimport { getInputCollectionMounts } from \"store/processes/processes-actions\";\nimport { AuthState } from \"store/auth/auth-reducer\";\nimport { ProcessCmdCard } from \"./process-cmd-card\";\nimport { ContainerRequestResource } from \"models/container-request\";\nimport { ProcessPanel as ProcessPanelState } from \"store/process-panel/process-panel\";\nimport { NotFoundView } from 'views/not-found-panel/not-found-panel';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { useAsyncInterval } from \"common/use-async-interval\";\nimport { WebSocketService } from \"websocket/websocket-service\";\nimport { RouteComponentProps } from 'react-router';\nimport { ResourcesState } from 'store/resources/resources';\nimport { getInlineFileUrl } from \"views-components/context-menu/actions/helpers\";\nimport { CollectionFile } from \"models/collection-file\";\nimport { DetailsCardRoot } from \"views-components/details-card/details-card-root\";\nimport { OverviewPanel } from 'components/overview-panel/overview-panel';\nimport WarningIcon from '@mui/icons-material/Warning';\nimport { Link } from \"react-router-dom\";\nimport { ProcessProperties } from \"store/processes/process\";\nimport { getResourceUrl } from \"routes/routes\";\nimport { ProcessAttributes } from './process-attributes';\n\ntype CssRules = \"root\" | 'mpvRoot' | 'overview';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: \"100%\",\n        display: \"flex\",\n        flexDirection: \"column\",\n    },\n    mpvRoot: {\n        flexGrow: 1,\n        display: 'flex',\n        flexDirection: 'column',\n        flexWrap: 'nowrap',\n        minHeight: \"500px\",\n        '& > div': {\n            height: '100%',\n        },\n    },\n    overview: {\n        height: '100%',\n    },\n});\n\nexport interface ProcessPanelRootDataProps {\n    resources: ResourcesState;\n    processPanel: ProcessPanelState;\n    processLogsPanel: ProcessLogsPanel;\n    auth: AuthState;\n    usageReport: CollectionFile | null;\n}\n\nexport interface ProcessPanelRootActionProps {\n    onToggle: (status: string) => void;\n    onLogFilterChange: (filter: FilterOption) => void;\n    navigateToLog: (uuid: string) => void;\n    onCopyToClipboard: (uuid: string) => void;\n    loadInputs: (containerRequest: ContainerRequestResource) => void;\n    loadOutputs: (containerRequest: ContainerRequestResource) => void;\n    loadNodeJson: (containerRequest: ContainerRequestResource) => void;\n    loadOutputDefinitions: (containerRequest: ContainerRequestResource) => void;\n    updateOutputParams: () => void;\n    pollProcessLogs: (processUuid: string) => Promise<void>;\n    refreshProcess: (processUuid: string) => Promise<void>;\n}\n\nexport type ProcessPanelRootProps = ProcessPanelRootDataProps & ProcessPanelRootActionProps & WithStyles<CssRules>;\n\nconst panelsData: MPVPanelState[] = [\n    { name: \"Overview\" },\n    { name: \"Logs\" },\n    { name: \"Subprocesses\" },\n    { name: \"Outputs\" },\n    { name: \"Inputs\" },\n    { name: \"Command\" },\n    { name: \"Resources\" },\n];\n\nexport const ProcessPanelRoot = withStyles(styles)(({\n    auth,\n    resources,\n    processPanel,\n    processLogsPanel,\n    loadInputs,\n    loadOutputs,\n    loadNodeJson,\n    loadOutputDefinitions,\n    updateOutputParams,\n    pollProcessLogs,\n    refreshProcess,\n    ...props\n}: ProcessPanelRootProps & RouteComponentProps<{ id: string }>) => {\n    const process = getProcess(props.match.params.id)(resources);\n    const outputUuid = process?.containerRequest.outputUuid;\n    const containerRequest = process?.containerRequest;\n    const inputMounts = getInputCollectionMounts(process?.containerRequest);\n    const webSocketConnected = WebSocketService.getInstance().isActive();\n    const resubmittedUrl = containerRequest && getResourceUrl(containerRequest.properties[ProcessProperties.FAILED_CONTAINER_RESUBMITTED]);\n    const { inputRaw, inputParams, outputData, outputDefinitions, outputParams, nodeInfo, usageReport } = processPanel;\n\n    const usageReportWithUrl = (process || null) && usageReport && getInlineFileUrl(\n                `${auth.config.keepWebServiceUrl}${usageReport.url}?api_token=${auth.apiToken}`,\n                auth.config.keepWebServiceUrl,\n                auth.config.keepWebInlineServiceUrl\n            )\n\n    React.useEffect(() => {\n        if (containerRequest) {\n            // Load inputs from mounts or props\n            loadInputs(containerRequest);\n            // Fetch raw output (loads from props or keep)\n            loadOutputs(containerRequest);\n            // Loads output definitions from mounts into store\n            loadOutputDefinitions(containerRequest);\n            // load the assigned instance type from node.json in\n            // the log collection\n            loadNodeJson(containerRequest);\n        }\n    }, [containerRequest, loadInputs, loadOutputs, loadOutputDefinitions, loadNodeJson]);\n\n    const maxHeight = \"100%\";\n\n    // Trigger processing output params when raw or definitions change\n    React.useEffect(() => {\n        updateOutputParams();\n    }, [outputData, outputDefinitions, updateOutputParams]);\n\n    // If WebSocket not connected, poll queued/running process for status updates\n    const shouldPoll =\n        !webSocketConnected &&\n        process && (\n            isProcessQueued(process)\n            || isProcessRunning(process)\n            // Status is unknown if has containerUuid but container resource not loaded\n            || getProcessStatus(process) === ProcessStatus.UNKNOWN\n        );\n    useAsyncInterval(async () => {\n        process && await refreshProcess(process.containerRequest.uuid);\n    }, shouldPoll ? 15000 : null);\n\n        return process ? (\n            <section className={props.classes.root}>\n                <DetailsCardRoot />\n                <MPVContainer\n                    className={props.classes.mpvRoot}\n                    panelStates={panelsData}\n                    justifyContent=\"flex-start\">\n                    <MPVPanelContent\n                        forwardProps\n                        item\n                        xs=\"auto\"\n                        className={props.classes.overview}\n                        data-cy=\"process-details\">\n                        <>\n                            {resubmittedUrl && <Grid item xs={12}>\n                                <Typography>\n                                    <WarningIcon />\n                                    This process failed but was automatically resubmitted.  <Link to={resubmittedUrl}> Click here to go to the resubmitted process.</Link>\n                                </Typography>\n                            </Grid>}\n                            <OverviewPanel detailsElement={<ProcessAttributes request={process.containerRequest} container={process.container} hideProcessPanelRedundantFields />} />\n                        </>\n                    </MPVPanelContent>\n                    <MPVPanelContent\n                        forwardProps\n                        item\n                        xs\n                        minHeight={maxHeight}\n                        maxHeight={maxHeight}\n                        data-cy=\"process-logs\">\n                        <ProcessLogsCard\n                            onCopy={props.onCopyToClipboard}\n                            process={process}\n                            lines={getProcessPanelLogs(processLogsPanel)}\n                            selectedFilter={{\n                                label: processLogsPanel.selectedFilter,\n                                value: processLogsPanel.selectedFilter,\n                            }}\n                            filters={processLogsPanel.filters.map(filter => ({ label: filter, value: filter }))}\n                            onLogFilterChange={props.onLogFilterChange}\n                            navigateToLog={props.navigateToLog}\n                            pollProcessLogs={pollProcessLogs}\n                        />\n                    </MPVPanelContent>\n                    <MPVPanelContent\n                        forwardProps\n                        xs\n                        item\n                        maxHeight={maxHeight}\n                        data-cy=\"process-children\">\n                        <SubprocessPanel process={process} />\n                    </MPVPanelContent>\n                    <MPVPanelContent\n                        forwardProps\n                        xs\n                        item\n                        maxHeight={maxHeight}\n                        data-cy=\"process-outputs\">\n                        <ProcessIOCard\n                            label={ProcessIOCardType.OUTPUT}\n                            process={process}\n                            params={outputParams}\n                            raw={outputData?.raw}\n                            failedToLoadOutputCollection={outputData?.failedToLoadOutputCollection}\n                            outputUuid={outputUuid || \"\"}\n                        />\n                    </MPVPanelContent>\n                    <MPVPanelContent\n                        forwardProps\n                        xs\n                        item\n                        maxHeight={maxHeight}\n                        data-cy=\"process-inputs\">\n                        <ProcessIOCard\n                            label={ProcessIOCardType.INPUT}\n                            process={process}\n                            params={inputParams}\n                            raw={inputRaw}\n                            mounts={inputMounts}\n                        />\n                    </MPVPanelContent>\n                    <MPVPanelContent\n                        forwardProps\n                        xs=\"auto\"\n                        item\n                        maxHeight={maxHeight}\n                        data-cy=\"process-cmd\">\n                        <ProcessCmdCard\n                            onCopy={props.onCopyToClipboard}\n                            process={process}\n                        />\n                    </MPVPanelContent>\n                    <MPVPanelContent\n                        forwardProps\n                        xs\n                        item\n                        data-cy=\"process-resources\">\n                        <ProcessResourceCard\n                            process={process}\n                            nodeInfo={nodeInfo}\n                            usageReport={usageReportWithUrl}\n                        />\n                    </MPVPanelContent>\n                </MPVContainer>\n            </section>\n        ) : (\n            <NotFoundView\n                icon={ProcessIcon}\n                messages={[\"Process not found\"]}\n            />\n        );\n}\n);\n"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from \"store/store\";\nimport { connect } from \"react-redux\";\nimport { Dispatch } from \"redux\";\nimport { ProcessPanelRootDataProps, ProcessPanelRootActionProps, ProcessPanelRoot } from \"./process-panel-root\";\nimport {\n    loadInputs,\n    loadOutputDefinitions,\n    loadOutputs,\n    toggleProcessPanelFilter,\n    updateOutputParams,\n    loadNodeJson,\n    loadProcess,\n} from \"store/process-panel/process-panel-actions\";\nimport { navigateToLogCollection, pollProcessLogs, setProcessLogsPanelFilter } from \"store/process-logs-panel/process-logs-panel-actions\";\nimport { snackbarActions, SnackbarKind } from \"store/snackbar/snackbar-actions\";\n\nconst mapStateToProps = ({ auth, resources, processPanel, processLogsPanel }: RootState): ProcessPanelRootDataProps => {\n    return {\n        resources,\n        processLogsPanel: processLogsPanel,\n        auth: auth,\n        processPanel: processPanel,\n        usageReport: processPanel.usageReport,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): ProcessPanelRootActionProps => ({\n    onCopyToClipboard: (message: string) => {\n        dispatch<any>(\n            snackbarActions.OPEN_SNACKBAR({\n                message,\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS,\n            })\n        );\n    },\n    onToggle: status => {\n        dispatch<any>(toggleProcessPanelFilter(status));\n    },\n    onLogFilterChange: filter => dispatch(setProcessLogsPanelFilter(filter.value)),\n    navigateToLog: uuid => dispatch<any>(navigateToLogCollection(uuid)),\n    loadInputs: containerRequest => dispatch<any>(loadInputs(containerRequest)),\n    loadOutputs: containerRequest => dispatch<any>(loadOutputs(containerRequest)),\n    loadOutputDefinitions: containerRequest => dispatch<any>(loadOutputDefinitions(containerRequest)),\n    updateOutputParams: () => dispatch<any>(updateOutputParams()),\n    loadNodeJson: containerRequest => dispatch<any>(loadNodeJson(containerRequest)),\n    pollProcessLogs: processUuid => dispatch<any>(pollProcessLogs(processUuid)),\n    refreshProcess: processUuid => dispatch<any>(loadProcess(processUuid)),\n});\n\nexport const ProcessPanel = connect(mapStateToProps, mapDispatchToProps)(ProcessPanelRoot);\n"
  },
  {
    "path": "services/workbench2/src/views/process-panel/process-resource-card.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { CardHeader, CardContent, Typography, Grid, Link } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport {\n    ResourceIcon,\n    ShowChartIcon,\n} from 'components/icon/icon';\nimport { MPVPanelProps } from 'components/multi-panel-view/multi-panel-view';\nimport { connect } from 'react-redux';\nimport { Process } from 'store/processes/process';\nimport { NodeInstanceType } from 'store/process-panel/process-panel';\nimport { DetailsAttribute } from \"components/details-attribute/details-attribute\";\nimport { formatFileSize } from \"common/formatters\";\nimport { MountKind } from 'models/mount-types';\n\ninterface ProcessResourceCardDataProps {\n    process: Process;\n    nodeInfo: NodeInstanceType | null;\n    usageReport: string | null;\n}\n\ntype CssRules = \"card\" | \"header\" | \"title\" | \"avatar\" | \"iconHeader\" | \"content\" | \"sectionH3\" | \"reportButton\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    card: {\n        height: '100%'\n    },\n    header: {\n        paddingBottom: \"0px\"\n    },\n    title: {\n        paddingTop: theme.spacing(0.5),\n    },\n    avatar: {\n        paddingTop: theme.spacing(0.5),\n    },\n    iconHeader: {\n        fontSize: '1.875rem',\n        color: theme.customs.colors.greyL,\n    },\n    content: {\n        paddingTop: \"0px\",\n        maxHeight: `calc(100% - ${theme.spacing(7.5)})`,\n        overflow: \"auto\"\n    },\n    sectionH3: {\n        margin: \"0.5em\",\n        color: theme.customs.colors.greyD,\n        fontSize: \"0.8125rem\",\n        textTransform: \"uppercase\",\n    },\n    reportButton: {\n    }\n});\n\ntype ProcessResourceCardProps = ProcessResourceCardDataProps & WithStyles<CssRules> & MPVPanelProps;\n\nexport const ProcessResourceCard = withStyles(styles)(connect()(\n    ({ classes, nodeInfo, usageReport, panelName, process, }: ProcessResourceCardProps) => {\n        let diskRequest = 0;\n        if (process.container?.mounts) {\n            for (const mnt in process.container.mounts) {\n                const mp = process.container.mounts[mnt];\n                if (mp.kind === MountKind.TEMPORARY_DIRECTORY) {\n                    diskRequest += mp.capacity;\n                }\n            }\n        }\n\n        return (\n            <section className={classes.card} data-cy=\"process-resources-card\">\n                <CardHeader\n                    className={classes.header}\n                    classes={{\n                        content: classes.title,\n                        avatar: classes.avatar,\n                    }}\n                    avatar={<ResourceIcon className={classes.iconHeader} />}\n                    title={\n                        <Typography noWrap variant='h6' color='inherit'>\n                                                    Resources\n                        </Typography>\n                    }\n                    action={\n                        <div>\n                              {usageReport &&\n                               <Link href={usageReport} className={classes.reportButton} target=\"_blank\"><ShowChartIcon /> Resource usage report</Link>}\n                        </div>\n                    } />\n                <CardContent className={classes.content}>\n                    <Grid container>\n                        <Grid item xs={4}>\n                            <h3 className={classes.sectionH3}>Requested Resources</h3>\n                            <Grid container>\n                                <Grid item xs={12}>\n                                    <DetailsAttribute label=\"Cores\" value={process.container?.runtimeConstraints.vcpus} />\n                                </Grid>\n                                <Grid item xs={12}>\n                                    <DetailsAttribute label=\"RAM*\" value={formatFileSize(process.container?.runtimeConstraints.ram)} />\n                                </Grid>\n                                <Grid item xs={12}>\n                                    <DetailsAttribute label=\"Disk\" value={formatFileSize(diskRequest)} />\n                                </Grid>\n\n                                {process.container?.runtimeConstraints.gpu &&\n                                 process.container?.runtimeConstraints.gpu.device_count > 0 ? <>\n                                     <Grid item xs={12}>\n                                         <DetailsAttribute label=\"GPU stack\" value={process.container?.runtimeConstraints.gpu.stack} />\n                                     </Grid>\n                                <Grid item xs={12}>\n                                    <DetailsAttribute label=\"GPU devices\" value={process.container?.runtimeConstraints.gpu.device_count} />\n                                </Grid>\n                                <Grid item xs={12}>\n                                    <DetailsAttribute label=\"GPU VRAM\" value={process.container?.runtimeConstraints.gpu.vram} />\n                                </Grid>\n                                <Grid item xs={12}>\n                                    <DetailsAttribute label=\"GPU hardware target\" value={process.container?.runtimeConstraints.gpu.hardware_target} />\n                                </Grid>\n                                <Grid item xs={12}>\n                                    <DetailsAttribute label=\"GPU driver version\" value={process.container?.runtimeConstraints.gpu.driver_version} />\n                                </Grid>\n                                 </> : null}\n\n                                {process.container?.runtimeConstraints.keep_cache_ram &&\n                                 process.container?.runtimeConstraints.keep_cache_ram > 0 ?\n                                                                                        <Grid item xs={12}>\n                                                                                            <DetailsAttribute label=\"Keep cache (RAM)\" value={formatFileSize(process.container?.runtimeConstraints.keep_cache_ram)} />\n                                                                                        </Grid> : null}\n\n                                {process.container?.runtimeConstraints.keep_cache_disk &&\n                                 process.container?.runtimeConstraints.keep_cache_disk > 0 ?\n                                                                                         <Grid item xs={12}>\n                                                                                             <DetailsAttribute label=\"Keep cache (disk)\" value={formatFileSize(process.container?.runtimeConstraints.keep_cache_disk)} />\n                                                                                         </Grid> : null}\n\n                                {process.container?.runtimeConstraints.API ? <Grid item xs={12}>\n                                    <DetailsAttribute label=\"API access\" value={process.container?.runtimeConstraints.API.toString()} />\n                                </Grid> : null}\n\n                            </Grid>\n                        </Grid>\n\n\n                        <Grid item xs={8}>\n                            <h3 className={classes.sectionH3}>Assigned Instance Type</h3>\n                            {nodeInfo === null ? <Grid item xs={8}>\n                                No instance type recorded\n                            </Grid>\n                            :\n                             <Grid container>\n                                 <Grid item xs={6}>\n                                     <DetailsAttribute label=\"Cores\" value={nodeInfo.VCPUs} />\n                                 </Grid>\n\n                                 <Grid item xs={6}>\n                                     <DetailsAttribute label=\"Provider type\" value={nodeInfo.ProviderType} />\n                                 </Grid>\n\n                                 <Grid item xs={6}>\n                                     <DetailsAttribute label=\"RAM\" value={formatFileSize(nodeInfo.RAM)} />\n                                 </Grid>\n\n                                 <Grid item xs={6}>\n                                     <DetailsAttribute label=\"Price\" value={\"$\" + nodeInfo.Price.toString()} />\n                                 </Grid>\n\n                                 <Grid item xs={6}>\n                                     <DetailsAttribute label=\"Disk\" value={formatFileSize(nodeInfo.IncludedScratch + nodeInfo.AddedScratch)} />\n                                 </Grid>\n\n                                 <Grid item xs={6}>\n                                     <DetailsAttribute label=\"Preemptible\" value={nodeInfo.Preemptible.toString()} />\n                                 </Grid>\n\n                                 {nodeInfo.GPU && nodeInfo.GPU.DeviceCount > 0 &&\n                                  <>\n                                      <Grid item xs={6}>\n                                          <DetailsAttribute label=\"GPU stack\" value={nodeInfo.GPU.Stack} />\n                                      </Grid>\n\n                                 <Grid item xs={6}>\n                                     <DetailsAttribute label=\"GPU devices\" value={nodeInfo.GPU.DeviceCount} />\n                                 </Grid>\n\n                                 <Grid item xs={6}>\n                                     <DetailsAttribute label=\"GPU VRAM\" value={nodeInfo.GPU.VRAM} />\n                                 </Grid>\n\n                                 <Grid item xs={6}>\n                                     <DetailsAttribute label=\"GPU hardware target\" value={nodeInfo.GPU.HardwareTarget} />\n                                 </Grid>\n\n                                 <Grid item xs={6}>\n                                     <DetailsAttribute label=\"GPU driver version\" value={nodeInfo.GPU.DriverVersion} />\n                                 </Grid>\n\n                                  </>\n                                 }\n                             </Grid>}\n                        </Grid>\n                    </Grid>\n                    <Typography>* RAM available to the program is limited to Requested RAM, not Instance RAM</Typography>\n                </CardContent>\n            </section >\n        );\n    }\n));\n"
  },
  {
    "path": "services/workbench2/src/views/project-panel/project-attributes.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { Grid } from '@mui/material';\nimport { RootState } from 'store/store';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\nimport { getResource } from 'store/resources/resources';\nimport { ResourceKind } from 'models/resource';\nimport { resourceLabel } from 'common/labels';\nimport { DetailsAttribute } from 'components/details-attribute/details-attribute';\nimport { ResourceWithName } from 'views-components/data-explorer/renderers';\nimport { GroupClass } from 'models/group';\nimport { formatDateTime } from 'common/formatters';\n\ntype CssRules = 'root' | 'tag';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n    },\n    tag: {\n        marginRight: theme.spacing(0.5),\n        marginBottom: theme.spacing(0.5),\n    },\n});\n\ntype ProjectOverviewProps = {\n    project: any;\n} & WithStyles<CssRules>;\n\nconst mapStateToProps = (state: RootState): Pick<ProjectOverviewProps, 'project'> => {\n    return {\n        project: getResource(state.properties.projectPanelCurrentUuid)(state.resources),\n    };\n};\n\nexport const ProjectAttributes = connect(mapStateToProps)(withStyles(styles)((({ project, classes }: ProjectOverviewProps) => {\n    if (!project || project.kind !== ResourceKind.PROJECT) {\n        return null;\n    }\n    return (\n        <Grid container spacing={1} className={classes.root}>\n            <Grid item xs={12} md={6}>\n                <DetailsAttribute\n                    label='Type'\n                    value={project.groupClass === GroupClass.FILTER ? 'Filter group' : resourceLabel(ResourceKind.PROJECT)}\n                />\n            </Grid>\n            <Grid item xs={12} md={6}>\n                <DetailsAttribute\n                    label='UUID'\n                    linkToUuid={project.uuid}\n                    value={project.uuid}\n                    />\n            </Grid>\n            <Grid item xs={12} md={6}>\n                <DetailsAttribute\n                    label='Owner'\n                    linkToUuid={project.ownerUuid}\n                    uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />}\n                    />\n            </Grid>\n            <Grid item xs={12} md={6}>\n                <DetailsAttribute\n                    label='Created at'\n                    value={formatDateTime(project.createdAt)}\n                />\n            </Grid>\n            <Grid item xs={12} md={6}>\n                <DetailsAttribute\n                    label='Last modified'\n                    value={formatDateTime(project.modifiedAt)}\n                />\n            </Grid>\n            <Grid item xs={12} md={6}>\n                <DetailsAttribute\n                    label='Last modified by'\n                    linkToUuid={project.modifiedByUserUuid}\n                    uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />}\n                    />\n            </Grid>\n        </Grid>\n    );\n})));\n"
  },
  {
    "path": "services/workbench2/src/views/project-panel/project-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { DataColumns, SortDirection } from \"components/data-table/data-column\";\nimport { ProjectResource } from \"models/project\";\nimport { createTree } from \"models/tree\";\nimport {\n    getInitialDataResourceTypeFilters,\n    getInitialProcessStatusFilters,\n    getInitialProcessTypeFilters,\n} from \"store/resource-type-filters/resource-type-filters\";\nimport {\n    ContainerRunTime,\n    ResourceContainerUuid,\n    ResourceCreatedAtDate,\n    ResourceDeleteDate,\n    ResourceFileCount,\n    ResourceFileSize,\n    ResourceLastModifiedDate,\n    ResourceLogUuid,\n    ResourceModifiedByUserUuid,\n    ResourceName,\n    ResourceOutputUuid,\n    ResourceOwnerWithName,\n    ResourceParentProcess,\n    ResourcePortableDataHash,\n    ResourceStatus,\n    ResourceTrashDate,\n    ResourceType,\n    ResourceUUID,\n    ResourceVersion,\n} from \"views-components/data-explorer/renderers\";\n\nexport enum ProjectPanelDataColumnNames {\n    NAME = 'Name',\n    STATUS = 'Status',\n    TYPE = 'Type',\n    OWNER = 'Owner',\n    PORTABLE_DATA_HASH = 'Portable Data Hash',\n    FILE_SIZE = 'File Size',\n    FILE_COUNT = 'File Count',\n    UUID = 'UUID',\n    CONTAINER_UUID = 'Container UUID',\n    RUNTIME = 'Runtime',\n    OUTPUT_UUID = 'Output UUID',\n    LOG_UUID = 'Log UUID',\n    PARENT_PROCESS = 'Parent Process UUID',\n    MODIFIED_BY_USER_UUID = 'Modified by User UUID',\n    VERSION = 'Version',\n    CREATED_AT = 'Date Created',\n    LAST_MODIFIED = 'Last Modified',\n    TRASH_AT = 'Trash at',\n    DELETE_AT = 'Delete at'\n}\n\nexport const projectPanelDataColumns: DataColumns<string, ProjectResource> = [\n    {\n        name: ProjectPanelDataColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'name' },\n        filters: createTree(),\n        render: (uuid) => <ResourceName uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.TYPE,\n        selected: true,\n        configurable: true,\n        filters: getInitialDataResourceTypeFilters(),\n        render: (uuid) => <ResourceType uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.OWNER,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceOwnerWithName uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.PORTABLE_DATA_HASH,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourcePortableDataHash uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.FILE_SIZE,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceFileSize uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.FILE_COUNT,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceFileCount uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceUUID uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.MODIFIED_BY_USER_UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceModifiedByUserUuid uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.VERSION,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceVersion uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.CREATED_AT,\n        selected: false,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'createdAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceCreatedAtDate uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.LAST_MODIFIED,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.DESC, field: 'modifiedAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceLastModifiedDate uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.TRASH_AT,\n        selected: false,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'trashAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceTrashDate uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelDataColumnNames.DELETE_AT,\n        selected: false,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'deleteAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceDeleteDate uuid={uuid} />,\n    },\n];\n\nexport enum ProjectPanelRunColumnNames {\n    NAME = 'Name',\n    STATUS = 'Status',\n    TYPE = 'Type',\n    OWNER = 'Owner',\n    PORTABLE_DATA_HASH = 'Portable Data Hash',\n    FILE_SIZE = 'File Size',\n    FILE_COUNT = 'File Count',\n    UUID = 'UUID',\n    CONTAINER_UUID = 'Container UUID',\n    RUNTIME = 'Runtime',\n    OUTPUT_UUID = 'Output UUID',\n    LOG_UUID = 'Log UUID',\n    PARENT_PROCESS = 'Parent Process UUID',\n    MODIFIED_BY_USER_UUID = 'Modified by User UUID',\n    VERSION = 'Version',\n    CREATED_AT = 'Date Created',\n    LAST_MODIFIED = 'Last Modified',\n    TRASH_AT = 'Trash at',\n    DELETE_AT = 'Delete at'\n}\n\nexport const projectPanelRunColumns: DataColumns<string, ProjectResource> = [\n    {\n        name: ProjectPanelRunColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'name' },\n        filters: createTree(),\n        render: (uuid) => <ResourceName uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.STATUS,\n        selected: true,\n        configurable: true,\n        mutuallyExclusiveFilters: true,\n        filters: getInitialProcessStatusFilters(),\n        render: (uuid) => <ResourceStatus uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.TYPE,\n        selected: true,\n        configurable: true,\n        filters: getInitialProcessTypeFilters(),\n        render: (uuid) => <ResourceType uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.OWNER,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceOwnerWithName uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceUUID uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.CONTAINER_UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceContainerUuid uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.RUNTIME,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ContainerRunTime uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.OUTPUT_UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceOutputUuid uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.LOG_UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceLogUuid uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.PARENT_PROCESS,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceParentProcess uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.MODIFIED_BY_USER_UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceModifiedByUserUuid uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.CREATED_AT,\n        selected: false,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'createdAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceCreatedAtDate uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.LAST_MODIFIED,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.DESC, field: 'modifiedAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceLastModifiedDate uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.TRASH_AT,\n        selected: false,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'trashAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceTrashDate uuid={uuid} />,\n    },\n    {\n        name: ProjectPanelRunColumnNames.DELETE_AT,\n        selected: false,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'deleteAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceDeleteDate uuid={uuid} />,\n    },\n];\n"
  },
  {
    "path": "services/workbench2/src/views/project-panel/project-panel-data.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { ProjectIcon } from \"components/icon/icon\";\nimport { PROJECT_PANEL_DATA_ID } from \"store/project-panel/project-panel-action-bind\";\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\n\nconst DEFAULT_VIEW_MESSAGES = ['No data found'];\n\ninterface ProjectPanelDataProps {\n    paperClassName?: string;\n    onRowClick: (uuid: string) => void;\n    onRowDoubleClick: (uuid: string) => void;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => void;\n};\n\nexport const ProjectPanelData = (props: ProjectPanelDataProps) => (\n    <DataExplorer\n        id={PROJECT_PANEL_DATA_ID}\n        onRowClick={props.onRowClick}\n        onRowDoubleClick={props.onRowDoubleClick}\n        onContextMenu={props.onContextMenu}\n        contextMenuColumn={false}\n        defaultViewIcon={ProjectIcon}\n        defaultViewMessages={DEFAULT_VIEW_MESSAGES}\n        paperClassName={props.paperClassName}\n    />\n);\n"
  },
  {
    "path": "services/workbench2/src/views/project-panel/project-panel-run.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { ProjectIcon } from \"components/icon/icon\";\nimport { PROJECT_PANEL_RUN_ID } from \"store/project-panel/project-panel-action-bind\";\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { ProjectResource } from 'models/project';\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { getProjectPanelCurrentUuid } from \"store/project-panel/project-panel\";\nimport { getResource } from \"store/resources/resources\";\n\nconst DEFAULT_VIEW_MESSAGES = ['No workflow runs found'];\n\ninterface ProjectPanelRunProps {\n    project?: ProjectResource;\n    paperClassName?: string;\n    onRowClick: (uuid: string) => void;\n    onRowDoubleClick: (uuid: string) => void;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => void;\n}\n\nconst mapStateToProps = (state: RootState): Pick<ProjectPanelRunProps, 'project'> => {\n    const projectUuid = getProjectPanelCurrentUuid(state);\n    const project = getResource<ProjectResource>(projectUuid)(state.resources);\n    return {\n        project,\n    };\n};\n\nexport const ProjectPanelRun = connect(mapStateToProps)((props: ProjectPanelRunProps) => (\n    <DataExplorer\n        id={PROJECT_PANEL_RUN_ID}\n        onRowClick={props.onRowClick}\n        onRowDoubleClick={props.onRowDoubleClick}\n        onContextMenu={props.onContextMenu}\n        contextMenuColumn={false}\n        defaultViewIcon={ProjectIcon}\n        defaultViewMessages={DEFAULT_VIEW_MESSAGES}\n        parentResource={props.project}\n        paperClassName={props.paperClassName}\n    />\n));\n"
  },
  {
    "path": "services/workbench2/src/views/project-panel/project-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect } from 'react';\nimport withStyles from '@mui/styles/withStyles';\nimport { Dispatch } from 'redux';\nimport { DispatchProp, connect } from 'react-redux';\nimport { RouteComponentProps } from 'react-router';\nimport { WithStyles } from '@mui/styles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { RootState } from 'store/store';\nimport { ResourcesState, getResource } from 'store/resources/resources';\nimport { loadDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { openContextMenuAndSelect } from 'store/context-menu/context-menu-actions';\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport { getProjectPanelCurrentUuid } from \"store/project-panel/project-panel\";\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { GroupClass, GroupResource } from 'models/group';\nimport { CollectionResource } from 'models/collection';\nimport { resourceIsFrozen } from 'common/frozen-resources';\nimport { toggleOne } from 'store/multiselect/multiselect-actions';\nimport { DetailsCardRoot } from 'views-components/details-card/details-card-root';\nimport { MPVContainer, MPVPanelContent, MPVPanelState } from 'components/multi-panel-view/multi-panel-view';\nimport { ProjectPanelData } from './project-panel-data';\nimport { ProjectPanelRun } from './project-panel-run';\nimport { isEqual } from 'lodash';\nimport { resourceToMenuKind } from 'common/resource-to-menu-kind';\nimport { ProjectPanelTabLabels, RootProjectPanelTabLabels } from 'store/project-panel/project-panel-action';\nimport { OverviewPanel } from 'components/overview-panel/overview-panel';\nimport { ProjectAttributes } from './project-attributes';\nimport { isUserResource } from 'models/user';\nimport { ProjectResource } from 'models/project';\nimport { projectPanelDataActions, projectPanelRunActions } from 'store/project-panel/project-panel-action-bind';\n\ntype CssRules = 'root' | 'button' | 'mpvRoot' | 'dataExplorer';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        display: 'flex',\n        flexDirection: 'column',\n    },\n    button: {\n        marginLeft: theme.spacing(1),\n    },\n    mpvRoot: {\n        flexGrow: 1,\n        display: 'flex',\n        flexDirection: 'column',\n        flexWrap: 'nowrap',\n        minHeight: \"500px\",\n        '& > div': {\n            height: '100%',\n        },\n    },\n    dataExplorer: {\n        height: \"100%\",\n    },\n});\n\ninterface ProjectPanelDataProps {\n    currentItemId: string | undefined;\n    resources: ResourcesState;\n    isAdmin: boolean;\n    defaultTab?: string;\n    isRootProject: boolean;\n}\n\ninterface ProjectPanelActionProps {\n    resetPagination: () => void;\n}\n\ntype ProjectPanelProps = ProjectPanelDataProps & ProjectPanelActionProps & DispatchProp & WithStyles<CssRules> & RouteComponentProps<{ id: string }>;\n\nconst mapStateToProps = (state: RootState): ProjectPanelDataProps => {\n    const currentItemId = getProjectPanelCurrentUuid(state);\n    const resource = getResource<ProjectResource>(currentItemId)(state.resources);\n    return {\n        currentItemId,\n        resources: state.resources,\n        isAdmin: state.auth.user!.isAdmin,\n        defaultTab: state.auth.user?.prefs.wb?.default_project_tab,\n        isRootProject: isUserResource(resource) || currentItemId === state.auth.user?.uuid ,\n    };\n}\n\nconst mapDispatchToProps = (dispatch: Dispatch): ProjectPanelActionProps & DispatchProp => ({\n    resetPagination: () => {\n        dispatch(projectPanelDataActions.RESET_PAGINATION());\n        dispatch(projectPanelRunActions.RESET_PAGINATION());\n    },\n    dispatch,\n});\n\nexport const ProjectPanel = withStyles(styles)(connect(mapStateToProps, mapDispatchToProps)(\n    React.memo((props: ProjectPanelProps) => {\n        const { classes, isRootProject, currentItemId, resetPagination } = props;\n\n        // Reset all data explorer tab pagination on uuid change\n        useEffect(() => {\n            resetPagination();\n        }, [currentItemId, resetPagination]);\n\n        // Root project doesn't have Overview Panel\n        const tabSet = isRootProject ? RootProjectPanelTabLabels : ProjectPanelTabLabels;\n        // Default to Data tab if no user preference\n        const defaultTab = props.defaultTab || tabSet.DATA;\n        // Apply user preference or default to initial state\n        const initialPanelState: MPVPanelState[] = Object.keys(tabSet).map(key => ({\n                name: tabSet[key],\n                visible: tabSet[key] === defaultTab,\n        }));\n\n        const handleContextMenu = (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => {\n            const { resources, isAdmin, currentItemId } = props;\n            const resource = getResource<GroupContentsResource>(resourceUuid)(resources);\n            // When viewing the contents of a filter group, all contents should be treated as read only.\n            let readonly = false;\n            const project = currentItemId ? getResource<GroupResource>(currentItemId)(resources) : undefined;\n            if (project && project.groupClass === GroupClass.FILTER) {\n                readonly = true;\n            }\n\n            const menuKind = props.dispatch<any>(resourceToMenuKind(resourceUuid, readonly));\n            if (menuKind && resource) {\n                props.dispatch<any>(\n                    openContextMenuAndSelect(event, {\n                        name: resource.name,\n                        uuid: resource.uuid,\n                        ownerUuid: resource.ownerUuid,\n                        isTrashed: 'isTrashed' in resource ? resource.isTrashed : false,\n                        kind: resource.kind,\n                        menuKind,\n                        isAdmin,\n                        isFrozen: resourceIsFrozen(resource, resources),\n                        description: resource.description,\n                        storageClassesDesired: (resource as CollectionResource).storageClassesDesired,\n                        properties: 'properties' in resource ? resource.properties : {},\n                    })\n                );\n            }\n            props.dispatch<any>(loadDetailsPanel(resourceUuid));\n        };\n\n        const handleRowDoubleClick = (uuid: string) => {\n            props.dispatch<any>(navigateTo(uuid));\n        };\n\n        const handleRowClick = (uuid: string) => {\n            props.dispatch<any>(toggleOne(uuid))\n        };\n\n        return <div data-cy='project-panel' className={classes.root}>\n            <DetailsCardRoot />\n            <MPVContainer\n                className={classes.mpvRoot}\n                panelStates={initialPanelState}\n                justify-content=\"flex-start\"\n                style={{flexWrap: 'nowrap'}}>\n                {isRootProject ? null : <MPVPanelContent\n                    forwardProps\n                    xs=\"auto\"\n                    item\n                    data-cy=\"project-details\"\n                    className={classes.dataExplorer}>\n                    <OverviewPanel detailsElement={<ProjectAttributes />} />\n                </MPVPanelContent>}\n                <MPVPanelContent\n                    forwardProps\n                    xs=\"auto\"\n                    item\n                    data-cy=\"project-data\"\n                    className={classes.dataExplorer}>\n                    <ProjectPanelData\n                        onRowClick={handleRowClick}\n                        onRowDoubleClick={handleRowDoubleClick}\n                        onContextMenu={handleContextMenu}\n                    />\n                </MPVPanelContent>\n                <MPVPanelContent\n                    forwardProps\n                    xs=\"auto\"\n                    item\n                    data-cy=\"project-run\"\n                    className={classes.dataExplorer}>\n                    <ProjectPanelRun\n                        onRowClick={handleRowClick}\n                        onRowDoubleClick={handleRowDoubleClick}\n                        onContextMenu={handleContextMenu}\n                    />\n                </MPVPanelContent>\n            </MPVContainer>\n        </div>;\n    }, preventRerender)\n));\n\nfunction preventRerender(prevProps: ProjectPanelProps, nextProps: ProjectPanelProps) {\n    if (!isEqual(prevProps.resources, nextProps.resources)) {\n        return false;\n    }\n    if (prevProps.currentItemId !== nextProps.currentItemId) {\n        return false;\n    }\n    return true;\n}\n"
  },
  {
    "path": "services/workbench2/src/views/public-favorites-panel/public-favorites-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataColumns } from 'components/data-table/data-column';\nimport {\n    ProcessStatus,\n    ResourceFileSize,\n    ResourceLastModifiedDate,\n    ResourceType,\n    ResourceName,\n    ResourceOwnerWithName\n} from 'views-components/data-explorer/renderers';\nimport { createTree } from 'models/tree';\nimport { getSimpleObjectTypeFilters } from 'store/resource-type-filters/resource-type-filters';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\n\n\nexport enum PublicFavoritePanelColumnNames {\n    NAME = \"Name\",\n    STATUS = \"Status\",\n    TYPE = \"Type\",\n    OWNER = \"Owner\",\n    FILE_SIZE = \"File size\",\n    LAST_MODIFIED = \"Last modified\"\n}\n\n\nexport const publicFavoritePanelColumns: DataColumns<string, GroupContentsResource> = [\n    {\n        name: PublicFavoritePanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceName uuid={uuid} />\n    },\n    {\n        name: \"Status\",\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ProcessStatus uuid={uuid} />\n    },\n    {\n        name: PublicFavoritePanelColumnNames.TYPE,\n        selected: true,\n        configurable: true,\n        filters: getSimpleObjectTypeFilters(),\n        render: uuid => <ResourceType uuid={uuid} />\n    },\n    {\n        name: PublicFavoritePanelColumnNames.OWNER,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceOwnerWithName uuid={uuid} />\n    },\n    {\n        name: PublicFavoritePanelColumnNames.FILE_SIZE,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceFileSize uuid={uuid} />\n    },\n    {\n        name: PublicFavoritePanelColumnNames.LAST_MODIFIED,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLastModifiedDate uuid={uuid} />\n    }\n];\n"
  },
  {
    "path": "services/workbench2/src/views/public-favorites-panel/public-favorites-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { connect, DispatchProp } from 'react-redux';\nimport { RouteComponentProps } from 'react-router';\nimport { DataTableFilterItem } from 'components/data-table-filters/data-table-filters';\nimport { ResourceKind } from 'models/resource';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { PublicFavoriteIcon } from 'components/icon/icon';\nimport { Dispatch } from 'redux';\nimport {\n    openContextMenuAndSelect,\n} from 'store/context-menu/context-menu-actions';\nimport { loadDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport { ContainerRequestState } from \"models/container-request\";\nimport { RootState } from 'store/store';\nimport { PUBLIC_FAVORITE_PANEL_ID } from 'store/public-favorites-panel/public-favorites-action';\nimport { PublicFavoritesState } from \"store/public-favorites/public-favorites\";\nimport { getResource, ResourcesState } from 'store/resources/resources';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { CollectionResource } from 'models/collection';\nimport { toggleOne } from 'store/multiselect/multiselect-actions';\nimport { resourceToMenuKind } from 'common/resource-to-menu-kind';\n\ntype CssRules = \"toolbar\" | \"button\" | \"root\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    toolbar: {\n        paddingBottom: theme.spacing(3),\n        textAlign: \"right\"\n    },\n    button: {\n        marginLeft: theme.spacing(1)\n    },\n    root: {\n        width: '100%',\n        boxShadow: \"0px 1px 3px 0px rgb(0 0 0 / 20%), 0px 1px 1px 0px rgb(0 0 0 / 14%), 0px 2px 1px -1px rgb(0 0 0 / 12%)\",\n    },\n});\n\nexport interface FavoritePanelFilter extends DataTableFilterItem {\n    type: ResourceKind | ContainerRequestState;\n}\n\ninterface PublicFavoritePanelDataProps {\n    publicFavorites: PublicFavoritesState;\n    resources: ResourcesState;\n}\n\ninterface PublicFavoritePanelActionProps {\n    onItemClick: (item: string) => void;\n    onContextMenu: (resources: ResourcesState) => (event: React.MouseEvent<HTMLElement>, item: string) => void;\n    onDialogOpen: (ownerUuid: string) => void;\n    onItemDoubleClick: (item: string) => void;\n}\nconst mapStateToProps = ({ publicFavorites, resources }: RootState): PublicFavoritePanelDataProps => ({\n    publicFavorites,\n    resources,\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch): PublicFavoritePanelActionProps => ({\n    onContextMenu: (resources: ResourcesState) => (event, resourceUuid) => {\n        const resource = getResource<GroupContentsResource>(resourceUuid)(resources);\n        const kind = dispatch<any>(resourceToMenuKind(resourceUuid));\n        if (kind && resource) {\n            dispatch<any>(openContextMenuAndSelect(event, {\n                name: resource.name,\n                description: resource.description,\n                storageClassesDesired: (resource as CollectionResource).storageClassesDesired,\n                uuid: resourceUuid,\n                ownerUuid: '',\n                kind: ResourceKind.NONE,\n                menuKind: kind\n            }));\n        }\n        dispatch<any>(loadDetailsPanel(resourceUuid));\n    },\n    onDialogOpen: (ownerUuid: string) => { return; },\n    onItemClick: (uuid: string) => {\n                dispatch<any>(toggleOne(uuid))\n    },\n    onItemDoubleClick: uuid => {\n        dispatch<any>(navigateTo(uuid));\n    }\n});\n\ntype FavoritePanelProps = PublicFavoritePanelDataProps & PublicFavoritePanelActionProps & DispatchProp\n    & WithStyles<CssRules> & RouteComponentProps<{ id: string }>;\n\nexport const PublicFavoritePanel = withStyles(styles)(\n    connect(mapStateToProps, mapDispatchToProps)(\n        class extends React.Component<FavoritePanelProps> {\n            render() {\n                return <div className={this.props.classes.root}><DataExplorer\n                    id={PUBLIC_FAVORITE_PANEL_ID}\n                    onRowClick={this.props.onItemClick}\n                    onRowDoubleClick={this.props.onItemDoubleClick}\n                    onContextMenu={this.props.onContextMenu(this.props.resources)}\n                    contextMenuColumn={false}\n                    defaultViewIcon={PublicFavoriteIcon}\n                    defaultViewMessages={['Public favorites list is empty.']} />\n                </div>;\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views/repositories-panel/repositories-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { Grid, Typography, Button, Card, CardContent, TableBody, TableCell, TableHead, TableRow, Table, Tooltip, IconButton } from '@mui/material';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { Link } from 'react-router-dom';\nimport { Dispatch, compose } from 'redux';\nimport { RootState } from 'store/store';\nimport { HelpIcon, AddIcon, MoreVerticalIcon } from 'components/icon/icon';\nimport { loadRepositoriesData, openRepositoriesSampleGitDialog, openRepositoryCreateDialog } from 'store/repositories/repositories-actions';\nimport { RepositoryResource } from 'models/repositories';\nimport { openRepositoryContextMenu } from 'store/context-menu/context-menu-actions';\nimport { Routes } from 'routes/routes';\n\n\ntype CssRules = 'link' | 'button' | 'icon' | 'iconRow' | 'moreOptionsButton' | 'moreOptions' | 'cloneUrls';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    link: {\n        textDecoration: 'none',\n        color: theme.palette.primary.main,\n        \"&:hover\": {\n            color: theme.palette.primary.dark,\n            transition: 'all 0.5s ease'\n        }\n    },\n    button: {\n        textAlign: 'right',\n        alignSelf: 'center'\n    },\n    icon: {\n        cursor: 'pointer',\n        color: theme.palette.grey[\"500\"],\n        \"&:hover\": {\n            color: theme.palette.common.black,\n            transition: 'all 0.5s ease'\n        }\n    },\n    iconRow: {\n        paddingTop: theme.spacing(2),\n        textAlign: 'right'\n    },\n    moreOptionsButton: {\n        padding: 0\n    },\n    moreOptions: {\n        textAlign: 'right',\n        '&:last-child': {\n            paddingRight: 0\n        }\n    },\n    cloneUrls: {\n        whiteSpace: 'pre-wrap'\n    }\n});\n\nconst mapStateToProps = (state: RootState) => {\n    return {\n        repositories: state.repositories.items\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): Pick<RepositoriesActionProps, 'onOptionsMenuOpen' | 'loadRepositories' | 'openRepositoriesSampleGitDialog' | 'openRepositoryCreateDialog'> => ({\n    loadRepositories: () => dispatch<any>(loadRepositoriesData()),\n    onOptionsMenuOpen: (event, repository) => {\n        dispatch<any>(openRepositoryContextMenu(event, repository));\n    },\n    openRepositoriesSampleGitDialog: () => dispatch<any>(openRepositoriesSampleGitDialog()),\n    openRepositoryCreateDialog: () => dispatch<any>(openRepositoryCreateDialog())\n});\n\ninterface RepositoriesActionProps {\n    loadRepositories: () => void;\n    onOptionsMenuOpen: (event: React.MouseEvent<HTMLElement>, repository: RepositoryResource) => void;\n    openRepositoriesSampleGitDialog: () => void;\n    openRepositoryCreateDialog: () => void;\n}\n\ninterface RepositoriesDataProps {\n    repositories: RepositoryResource[];\n}\n\n\ntype RepositoriesProps = RepositoriesDataProps & RepositoriesActionProps & WithStyles<CssRules>;\n\nexport const RepositoriesPanel = compose(\n    withStyles(styles),\n    connect(mapStateToProps, mapDispatchToProps))(\n        class extends React.Component<RepositoriesProps> {\n            componentDidMount() {\n                this.props.loadRepositories();\n            }\n            render() {\n                const { classes, repositories, onOptionsMenuOpen, openRepositoriesSampleGitDialog, openRepositoryCreateDialog } = this.props;\n                return (\n                    <Card>\n                        <CardContent>\n                            <Grid container direction=\"row\">\n                                <Grid item xs={8}>\n                                    <Typography variant='body1'>\n                                        When you are using an Arvados virtual machine, you should clone the https:// URLs. This will authenticate automatically using your API token. <br />\n                                        In order to clone git repositories using SSH, <Link to={Routes.SSH_KEYS_USER} className={classes.link}>add an SSH key to your account</Link> and clone the git@ URLs.\n                                    </Typography>\n                                </Grid>\n                                <Grid item xs={4} className={classes.button}>\n                                    <Button variant=\"contained\" color=\"primary\" onClick={openRepositoryCreateDialog}>\n                                        <AddIcon /> NEW REPOSITORY\n                                    </Button>\n                                </Grid>\n                            </Grid>\n                            <Grid item xs={12}>\n                                <div className={classes.iconRow}>\n                                    <Tooltip title=\"Sample git quick start\">\n                                        <IconButton\n                                            className={classes.moreOptionsButton}\n                                            onClick={openRepositoriesSampleGitDialog}\n                                            size=\"large\">\n                                            <HelpIcon className={classes.icon} />\n                                        </IconButton>\n                                    </Tooltip>\n                                </div>\n                            </Grid>\n                            <Grid item xs={12}>\n                                {repositories && <Table>\n                                    <TableHead>\n                                        <TableRow>\n                                            <TableCell>Name</TableCell>\n                                            <TableCell>URL</TableCell>\n                                            <TableCell />\n                                        </TableRow>\n                                    </TableHead>\n                                    <TableBody>\n                                        {repositories.map((repository, index) =>\n                                            <TableRow key={index}>\n                                                <TableCell>{repository.name}</TableCell>\n                                                <TableCell className={classes.cloneUrls}>{repository.cloneUrls.join(\"\\n\")}</TableCell>\n                                                <TableCell className={classes.moreOptions}>\n                                                    <Tooltip title=\"More options\" disableFocusListener>\n                                                        <IconButton\n                                                            onClick={event => onOptionsMenuOpen(event, repository)}\n                                                            className={classes.moreOptionsButton}\n                                                            size=\"large\">\n                                                            <MoreVerticalIcon />\n                                                        </IconButton>\n                                                    </Tooltip>\n                                                </TableCell>\n                                            </TableRow>)}\n                                    </TableBody>\n                                </Table>}\n                            </Grid>\n                        </CardContent>\n                    </Card>\n                );\n            }\n        }\n    );\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/boolean-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { memoize } from 'lodash/fp';\nimport { BooleanCommandInputParameter } from 'models/workflow';\nimport { Field } from 'redux-form';\nimport { Switch, Theme } from '@mui/material';\nimport { GenericInputProps, GenericInput } from './generic-input';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\n\nexport interface BooleanInputProps {\n    input: BooleanCommandInputParameter;\n}\nexport const BooleanInput = ({ input }: BooleanInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={BooleanInputComponent}\n        normalize={normalize}\n    />;\n\nconst normalize = (_: any, prevValue: boolean) => !prevValue;\n\nconst BooleanInputComponent = (props: GenericInputProps) =>\n    <GenericInput\n        component={Input}\n        {...props} />;\n\ntype CssRules = \"switch\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: Theme) => ({\n    switch: {\n        marginTop: \"12px\",\n        marginBottom: \"-6px\", // To line up hint text with GenericInput\n    },\n});\n\nconst Input = withStyles(styles)(({ input, commandInput, classes }: GenericInputProps & WithStyles<CssRules>) =>\n    <Switch\n        color='primary'\n        className={classes.switch}\n        checked={input.value}\n        onChange={handleChange(input.onChange, input.value)}\n        disabled={commandInput.disabled}\n    />\n);\n\nconst handleChange = memoize(\n    (onChange: (value: string) => void, value: string) => () => onChange(value)\n);\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/directory-array-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport {\n    isRequiredInput,\n    DirectoryArrayCommandInputParameter,\n    Directory,\n    CWLType\n} from 'models/workflow';\nimport { Field } from 'redux-form';\nimport { ERROR_MESSAGE } from 'validators/require';\nimport {\n    Input,\n    Dialog,\n    DialogTitle,\n    DialogContent,\n    DialogActions,\n    Button,\n    Divider,\n    Typography,\n} from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport { GenericInputProps, GenericInput } from './generic-input';\nimport { ProjectsTreePicker } from 'views-components/projects-tree-picker/projects-tree-picker';\nimport { connect, DispatchProp } from 'react-redux';\nimport { initProjectsTreePicker, getSelectedNodes, treePickerActions, getProjectsTreePickerIds, FileOperationLocation, getFileOperationLocation, fileOperationLocationToPickerId } from 'store/tree-picker/tree-picker-actions';\nimport { ProjectsTreePickerItem } from 'store/tree-picker/tree-picker-middleware';\nimport { createSelector, createStructuredSelector } from 'reselect';\nimport { ChipsInput } from 'components/chips-input/chips-input';\nimport { identity, values, noop } from 'lodash';\nimport { InputProps } from '@mui/material/Input';\nimport { TreePicker } from 'store/tree-picker/tree-picker';\nimport { RootState } from 'store/store';\nimport { Chips } from 'components/chips/chips';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport withStyles from '@mui/styles/withStyles';\nimport { CollectionResource } from 'models/collection';\nimport { PORTABLE_DATA_HASH_PATTERN, ResourceKind } from 'models/resource';\nimport { Dispatch } from 'redux';\nimport { CollectionDirectory, CollectionFileType } from 'models/collection-file';\n\nconst LOCATION_REGEX = new RegExp(\"^(?:keep:)?(\" + PORTABLE_DATA_HASH_PATTERN + \")(/.*)?$\");\nexport interface DirectoryArrayInputProps {\n    input: DirectoryArrayCommandInputParameter;\n    options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n}\n\nexport const DirectoryArrayInput = ({ input }: DirectoryArrayInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={DirectoryArrayInputComponent as any}\n        parse={parseDirectories}\n        format={formatDirectories}\n        validate={validationSelector(input)} />;\n\ninterface FormattedDirectory {\n    name: string;\n    portableDataHash: string;\n    subpath: string;\n}\n\nconst parseDirectories = (directories: FileOperationLocation[] | string) =>\n    typeof directories === 'string'\n        ? undefined\n        : directories.map(parse);\n\nconst parse = (directory: FileOperationLocation): Directory => ({\n    class: CWLType.DIRECTORY,\n    basename: directory.name,\n    location: `keep:${directory.pdh}${directory.subpath}`,\n});\n\nconst formatDirectories = (directories: Directory[] = []): FormattedDirectory[] =>\n    directories ? directories.map(format).filter((dir): dir is FormattedDirectory => Boolean(dir)) : [];\n\nconst format = ({ location = '', basename = '' }: Directory): FormattedDirectory | undefined => {\n    const match = LOCATION_REGEX.exec(location);\n\n    if (match) {\n        return {\n            portableDataHash: match[1],\n            subpath: match[2],\n            name: basename,\n        };\n    }\n    return undefined;\n};\n\nconst validationSelector = createSelector(\n    isRequiredInput,\n    isRequired => isRequired\n        ? [required]\n        : undefined\n);\n\nconst required = (value?: Directory[]) =>\n    value && value.length > 0\n        ? undefined\n        : ERROR_MESSAGE;\ninterface DirectoryArrayInputComponentState {\n    open: boolean;\n    directories: FileOperationLocation[];\n}\n\ninterface DirectoryArrayInputDataProps {\n    treePickerState: TreePicker;\n}\n\nconst treePickerSelector = (state: RootState) => state.treePicker;\n\nconst mapStateToProps = createStructuredSelector({\n    treePickerState: treePickerSelector,\n});\n\ninterface DirectoryArrayInputActionProps {\n    initProjectsTreePicker: (pickerId: string) => void;\n    selectTreePickerNode: (pickerId: string, id: string | string[]) => void;\n    deselectTreePickerNode: (pickerId: string, id: string | string[]) => void;\n    getFileOperationLocation: (item: ProjectsTreePickerItem) => Promise<FileOperationLocation | undefined>;\n}\n\nconst mapDispatchToProps = (dispatch: Dispatch): DirectoryArrayInputActionProps => ({\n    initProjectsTreePicker: (pickerId: string) => dispatch<any>(initProjectsTreePicker(pickerId)),\n    selectTreePickerNode: (pickerId: string, id: string | string[]) =>\n        dispatch<any>(treePickerActions.SELECT_TREE_PICKER_NODE({\n            pickerId, id, cascade: false\n        })),\n    deselectTreePickerNode: (pickerId: string, id: string | string[]) =>\n        dispatch<any>(treePickerActions.DESELECT_TREE_PICKER_NODE({\n            pickerId, id, cascade: false\n        })),\n    getFileOperationLocation: (item: ProjectsTreePickerItem) => dispatch<any>(getFileOperationLocation(item)),\n});\n\nconst DirectoryArrayInputComponent = connect(mapStateToProps, mapDispatchToProps)(\n    class DirectoryArrayInputComponent extends React.Component<GenericInputProps & DirectoryArrayInputDataProps & DirectoryArrayInputActionProps & DispatchProp & {\n        options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n    }, DirectoryArrayInputComponentState> {\n        state: DirectoryArrayInputComponentState = {\n            open: false,\n            directories: [],\n        };\n\n        directoryRefreshTimeout = -1;\n\n        componentDidMount() {\n            this.props.initProjectsTreePicker(this.props.commandInput.id);\n        }\n\n        render() {\n            return <>\n                <this.input />\n                <this.dialog />\n            </>;\n        }\n\n        openDialog = () => {\n            this.setState({ open: true });\n        }\n\n        closeDialog = () => {\n            this.setState({ open: false });\n        }\n\n        submit = () => {\n            this.closeDialog();\n            this.props.input.onChange(this.state.directories);\n        }\n\n        setDirectoriesFromResources = async (directories: (CollectionResource | CollectionDirectory)[]) => {\n            const locations = (await Promise.all(\n                directories.map(directory => (this.props.getFileOperationLocation(directory)))\n            )).filter((location): location is FileOperationLocation => (\n                location !== undefined\n            ));\n\n            this.setDirectories(locations);\n        }\n\n        refreshDirectories = () => {\n            clearTimeout(this.directoryRefreshTimeout);\n            this.directoryRefreshTimeout = window.setTimeout(this.setDirectoriesFromTree);\n        }\n\n        setDirectoriesFromTree = () => {\n            const nodes = getSelectedNodes<ProjectsTreePickerItem>(this.props.commandInput.id)(this.props.treePickerState);\n            const initialDirectories: (CollectionResource | CollectionDirectory)[] = [];\n            const directories = nodes\n                .reduce((directories, { value }) =>\n                    (('kind' in value && value.kind === ResourceKind.COLLECTION) ||\n                    ('type' in value && value.type === CollectionFileType.DIRECTORY))\n                        ? directories.concat(value)\n                        : directories, initialDirectories);\n            this.setDirectoriesFromResources(directories);\n        }\n\n        setDirectories = (locations: FileOperationLocation[]) => {\n            const deletedDirectories = this.state.directories\n                .reduce((deletedDirectories, directory) =>\n                    locations.some(({ uuid, subpath }) => uuid === directory.uuid && subpath === directory.subpath)\n                        ? deletedDirectories\n                        : [...deletedDirectories, directory]\n                    , [] as FileOperationLocation[]);\n\n            this.setState({ directories: locations });\n\n            const ids = values(getProjectsTreePickerIds(this.props.commandInput.id));\n            ids.forEach(pickerId => {\n                this.props.deselectTreePickerNode(\n                    pickerId,\n                    deletedDirectories.map(fileOperationLocationToPickerId)\n                );\n            });\n        };\n\n        input = () =>\n            <GenericInput\n                component={this.chipsInput}\n                {...this.props} />\n\n        chipsInput = () =>\n            <ChipsInput\n                values={this.props.input.value}\n                onChange={noop}\n                disabled={this.props.commandInput.disabled}\n                createNewValue={identity}\n                getLabel={(data: FormattedDirectory) => data.name}\n                inputComponent={this.textInput} />\n\n        textInput = (props: InputProps) =>\n            <Input\n                {...props}\n                error={this.props.meta.touched && !!this.props.meta.error}\n                readOnly\n                onClick={!this.props.commandInput.disabled ? this.openDialog : undefined}\n                onKeyPress={!this.props.commandInput.disabled ? this.openDialog : undefined}\n                onBlur={this.props.input.onBlur}\n                disabled={this.props.commandInput.disabled} />\n\n        dialogContentStyles: CustomStyleRulesCallback<DialogContentCssRules> = ({ spacing }) => ({\n            root: {\n                display: 'flex',\n                flexDirection: 'column',\n                height: \"80vh\",\n            },\n            pickerWrapper: {\n                display: 'flex',\n                flexDirection: 'column',\n                height: \"100%\",\n            },\n            tree: {\n                flex: 3,\n                overflow: 'auto',\n            },\n            divider: {\n                margin: `${spacing(1)} 0`,\n            },\n            chips: {\n                flex: 1,\n                overflow: 'auto',\n                padding: `${spacing(1)} 0`,\n                overflowX: 'hidden',\n            },\n        });\n\n        dialog = withStyles(this.dialogContentStyles)(\n            ({ classes }: WithStyles<DialogContentCssRules>) =>\n                <Dialog\n                    open={this.state.open}\n                    onClose={this.closeDialog}\n                    fullWidth\n                    maxWidth='md' >\n                    <DialogTitle>Choose directories</DialogTitle>\n                    <DialogContent className={classes.root}>\n                        <div className={classes.pickerWrapper}>\n                            <div className={classes.tree}>\n                                <ProjectsTreePicker\n                                    pickerId={this.props.commandInput.id}\n                                    currentUuids={this.state.directories.map(dir => fileOperationLocationToPickerId(dir))}\n                                    includeCollections\n                                    includeDirectories\n                                    showSelection\n                                    cascadeSelection={false}\n                                    options={this.props.options}\n                                    toggleItemSelection={this.refreshDirectories} />\n                            </div>\n                            <Divider />\n                            <div className={classes.chips}>\n                                <Typography variant='subtitle1'>Selected collections ({this.state.directories.length}):</Typography>\n                                <Chips\n                                    orderable\n                                    deletable\n                                    values={this.state.directories}\n                                    onChange={this.setDirectories}\n                                    getLabel={(directory: CollectionResource) => directory.name} />\n                            </div>\n                        </div>\n\n                    </DialogContent>\n                    <DialogActions>\n                        <Button onClick={this.closeDialog}>Cancel</Button>\n                        <Button\n                            data-cy='ok-button'\n                            variant='contained'\n                            color='primary'\n                            onClick={this.submit}>Ok</Button>\n                    </DialogActions>\n                </Dialog>\n        );\n\n    });\n\ntype DialogContentCssRules = 'root' | 'pickerWrapper' | 'tree' | 'divider' | 'chips';\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/directory-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect, DispatchProp } from 'react-redux';\nimport { memoize } from 'lodash/fp';\nimport { Field } from 'redux-form';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Input, Dialog, DialogTitle, DialogContent, DialogActions, Button } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport {\n    isRequiredInput,\n    DirectoryCommandInputParameter,\n    CWLType,\n    Directory\n} from 'models/workflow';\nimport { GenericInputProps, GenericInput } from './generic-input';\nimport { ProjectsTreePicker } from 'views-components/projects-tree-picker/projects-tree-picker';\nimport { FileOperationLocation, getFileOperationLocation, initProjectsTreePicker } from 'store/tree-picker/tree-picker-actions';\nimport { TreeItem } from 'components/tree/tree';\nimport { ProjectsTreePickerItem } from 'store/tree-picker/tree-picker-middleware';\nimport { ERROR_MESSAGE } from 'validators/require';\nimport { Dispatch } from 'redux';\n\nexport interface DirectoryInputProps {\n    input: DirectoryCommandInputParameter;\n    options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n}\n\ntype DialogContentCssRules = 'root' | 'pickerWrapper';\n\nexport const DirectoryInput = ({ input, options }: DirectoryInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={DirectoryInputComponent as any}\n        format={format}\n        parse={parse}\n        {...{\n            options\n        }}\n        validate={getValidation(input)} />;\n\nconst format = (value?: Directory) => value ? value.basename : '';\n\nconst parse = (directory: FileOperationLocation): Directory => ({\n    class: CWLType.DIRECTORY,\n    location: `keep:${directory.pdh}${directory.subpath}`,\n    basename: directory.name,\n});\n\nconst getValidation = memoize(\n    (input: DirectoryCommandInputParameter) => ([\n        isRequiredInput(input)\n            ? (directory?: Directory) => directory ? undefined : ERROR_MESSAGE\n            : () => undefined,\n    ])\n);\n\ninterface DirectoryInputComponentState {\n    open: boolean;\n    directory?: FileOperationLocation;\n}\n\ninterface DirectoryInputActionProps {\n    initProjectsTreePicker: (pickerId: string) => void;\n    getFileOperationLocation: (item: ProjectsTreePickerItem) => Promise<FileOperationLocation | undefined>;\n}\n\nconst mapDispatchToProps = (dispatch: Dispatch): DirectoryInputActionProps => ({\n    initProjectsTreePicker: (pickerId: string) => dispatch<any>(initProjectsTreePicker(pickerId)),\n    getFileOperationLocation: (item: ProjectsTreePickerItem) => dispatch<any>(getFileOperationLocation(item)),\n});\n\nconst DirectoryInputComponent = connect(null, mapDispatchToProps)(\n    class FileInputComponent extends React.Component<GenericInputProps & DirectoryInputActionProps & DispatchProp & {\n        options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n    }, DirectoryInputComponentState> {\n        state: DirectoryInputComponentState = {\n            open: false,\n        };\n\n        componentDidMount() {\n            this.props.initProjectsTreePicker(this.props.commandInput.id);\n        }\n\n        render() {\n            return <>\n                {this.renderInput()}\n                <this.dialog />\n            </>;\n        }\n\n        openDialog = () => {\n            this.setState({ open: true });\n        }\n\n        closeDialog = () => {\n            this.setState({ open: false });\n        }\n\n        submit = () => {\n            this.closeDialog();\n            this.props.input.onChange(this.state.directory);\n        }\n\n        setDirectory = async (_: {}, { data: item }: TreeItem<ProjectsTreePickerItem>) => {\n            const location = await this.props.getFileOperationLocation(item);\n            this.setState({ directory: location });\n        }\n\n        renderInput() {\n            return <GenericInput\n                component={props =>\n                    <Input\n                        readOnly\n                        fullWidth\n                        value={props.input.value}\n                        error={props.meta.touched && !!props.meta.error}\n                        disabled={props.commandInput.disabled}\n                        onClick={!this.props.commandInput.disabled ? this.openDialog : undefined}\n                        onKeyPress={!this.props.commandInput.disabled ? this.openDialog : undefined}\n                        onMouseDown={(e) => e.preventDefault()} />}\n                {...this.props} />;\n        }\n\n        dialogContentStyles: CustomStyleRulesCallback<DialogContentCssRules> = ({ spacing }) => ({\n            root: {\n                display: 'flex',\n                flexDirection: 'column',\n                height: \"80vh\",\n            },\n            pickerWrapper: {\n                display: 'flex',\n                flexDirection: 'column',\n                height: \"100%\",\n            },\n        });\n\n        dialog = withStyles(this.dialogContentStyles)(\n            ({ classes }: WithStyles<DialogContentCssRules>) =>\n                <Dialog\n                    open={this.state.open}\n                    onClose={this.closeDialog}\n                    fullWidth\n                    data-cy=\"choose-a-directory-dialog\"\n                    maxWidth='md'>\n                    <DialogTitle>Choose a directory</DialogTitle>\n                    <DialogContent className={classes.root}>\n                        <div className={classes.pickerWrapper}>\n                            <ProjectsTreePicker\n                                pickerId={this.props.commandInput.id}\n                                includeCollections\n                                includeDirectories\n                                cascadeSelection={false}\n                                options={this.props.options}\n                                toggleItemActive={this.setDirectory} />\n                        </div>\n                    </DialogContent>\n                    <DialogActions>\n                        <Button onClick={this.closeDialog}>Cancel</Button>\n                        <Button\n                            disabled={!this.state.directory}\n                            variant='contained'\n                            color='primary'\n                            onClick={this.submit}>Ok</Button>\n                    </DialogActions>\n                </Dialog>\n        );\n\n    });\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/enum-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Field } from 'redux-form';\nimport { memoize } from 'lodash/fp';\nimport { fieldRequire } from 'validators/require';\nimport { Select, MenuItem } from '@mui/material';\nimport { EnumCommandInputParameter, CommandInputEnumSchema, isRequiredInput, getEnumType } from 'models/workflow';\nimport { GenericInputProps, GenericInput } from './generic-input';\n\nexport interface EnumInputProps {\n    input: EnumCommandInputParameter;\n}\n\nconst getValidation = memoize(\n    (input: EnumCommandInputParameter) => ([\n        isRequiredInput(input)\n            ? fieldRequire\n            : () => undefined,\n    ]));\n\nconst emptyToNull = value => {\n    if (value === '') {\n        return null;\n    } else {\n        return value;\n    }\n};\n\nexport const EnumInput = ({ input }: EnumInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={EnumInputComponent}\n        validate={getValidation(input)}\n        normalize={emptyToNull}\n    />;\n\nconst EnumInputComponent = (props: GenericInputProps) =>\n    <GenericInput\n        component={Input}\n        {...props} />;\n\nconst Input = (props: GenericInputProps) => {\n    const type = getEnumType(props.commandInput) as CommandInputEnumSchema;\n    return (\n        <Select\n            variant=\"standard\"\n            value={props.input.value}\n            onChange={props.input.onChange}\n            disabled={props.commandInput.disabled}>\n            {(isRequiredInput(props.commandInput) ? [] : [<MenuItem key={'_empty'} value={''} />]).concat(type.symbols.map(symbol =>\n                <MenuItem key={symbol} value={extractValue(symbol)}>\n                    {extractValue(symbol)}\n                </MenuItem>))}\n        </Select>\n    );\n};\n\n/**\n * Values in workflow definition have an absolute form, for example:\n *\n * ```#input_collector.cwl/enum_type/Pathway table```\n *\n * We want a value that is in form accepted by backend.\n * According to the example above, the correct value is:\n *\n * ```Pathway table```\n */\nconst extractValue = (symbol: string) => symbol.split('/').pop();\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/file-array-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport {\n    isRequiredInput,\n    FileArrayCommandInputParameter,\n    File,\n    CWLType\n} from 'models/workflow';\nimport { Field } from 'redux-form';\nimport { ERROR_MESSAGE } from 'validators/require';\nimport {\n    Input,\n    Dialog,\n    DialogTitle,\n    DialogContent,\n    DialogActions,\n    Button,\n    Divider,\n    Typography,\n} from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport { GenericInputProps, GenericInput } from './generic-input';\nimport { ProjectsTreePicker } from 'views-components/projects-tree-picker/projects-tree-picker';\nimport { connect, DispatchProp } from 'react-redux';\nimport { initProjectsTreePicker, getSelectedNodes, treePickerActions, getProjectsTreePickerIds } from 'store/tree-picker/tree-picker-actions';\nimport { ProjectsTreePickerItem } from 'store/tree-picker/tree-picker-middleware';\nimport { CollectionFile, CollectionFileType } from 'models/collection-file';\nimport { createSelector, createStructuredSelector } from 'reselect';\nimport { ChipsInput } from 'components/chips-input/chips-input';\nimport { identity, values, noop } from 'lodash';\nimport { InputProps } from '@mui/material/Input';\nimport { TreePicker } from 'store/tree-picker/tree-picker';\nimport { RootState } from 'store/store';\nimport { Chips } from 'components/chips/chips';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport withStyles from '@mui/styles/withStyles';\n\nexport interface FileArrayInputProps {\n    input: FileArrayCommandInputParameter;\n    options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n}\nexport const FileArrayInput = ({ input }: FileArrayInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={FileArrayInputComponent as any}\n        parse={parseFiles}\n        format={formatFiles}\n        validate={validationSelector(input)} />;\n\nconst parseFiles = (files: CollectionFile[] | string) =>\n    typeof files === 'string'\n        ? undefined\n        : files.map(parse);\n\nconst parse = (file: CollectionFile): File => ({\n    class: CWLType.FILE,\n    basename: file.name,\n    location: `keep:${file.id}`,\n    path: file.path,\n});\n\nconst formatFiles = (files: File[] = []) =>\n    files ? files.map(format) : [];\n\nconst format = (file: File): CollectionFile => ({\n    id: file.location\n        ? file.location.replace('keep:', '')\n        : '',\n    name: file.basename || '',\n    path: file.path || '',\n    size: 0,\n    type: CollectionFileType.FILE,\n    url: '',\n});\n\nconst validationSelector = createSelector(\n    isRequiredInput,\n    isRequired => isRequired\n        ? [required]\n        : undefined\n);\n\nconst required = (value?: File[]) =>\n    value && value.length > 0\n        ? undefined\n        : ERROR_MESSAGE;\ninterface FileArrayInputComponentState {\n    open: boolean;\n    files: CollectionFile[];\n}\n\ninterface FileArrayInputComponentProps {\n    treePickerState: TreePicker;\n}\n\nconst treePickerSelector = (state: RootState) => state.treePicker;\n\nconst mapStateToProps = createStructuredSelector({\n    treePickerState: treePickerSelector,\n});\n\nconst FileArrayInputComponent = connect(mapStateToProps)(\n    class FileArrayInputComponent extends React.Component<FileArrayInputComponentProps & GenericInputProps & DispatchProp & {\n        options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n    }, FileArrayInputComponentState> {\n        state: FileArrayInputComponentState = {\n            open: false,\n            files: [],\n        };\n\n        fileRefreshTimeout = -1;\n\n        componentDidMount() {\n            this.props.dispatch<any>(\n                initProjectsTreePicker(this.props.commandInput.id));\n        }\n\n        render() {\n            return <>\n                <this.input />\n                <this.dialog />\n            </>;\n        }\n\n        openDialog = () => {\n            this.setFilesFromProps(this.props.input.value);\n            this.setState({ open: true });\n        }\n\n        closeDialog = () => {\n            this.setState({ open: false });\n        }\n\n        submit = () => {\n            this.closeDialog();\n            this.props.input.onChange(this.state.files);\n        }\n\n        setFiles = (files: CollectionFile[]) => {\n\n            const deletedFiles = this.state.files\n                .reduce((deletedFiles, file) =>\n                    files.some(({ id }) => id === file.id)\n                        ? deletedFiles\n                        : [...deletedFiles, file]\n                    , []);\n\n            this.setState({ files });\n\n            const ids = values(getProjectsTreePickerIds(this.props.commandInput.id));\n            ids.forEach(pickerId => {\n                this.props.dispatch(\n                    treePickerActions.DESELECT_TREE_PICKER_NODE({\n                        pickerId,\n                        id: deletedFiles.map(({ id }) => id),\n                        cascade: true,\n                    })\n                );\n            });\n\n        }\n\n        setFilesFromProps = (files: CollectionFile[]) => {\n\n            const addedFiles = files\n                .reduce((addedFiles, file) =>\n                    this.state.files.some(({ id }) => id === file.id)\n                        ? addedFiles\n                        : [...addedFiles, file]\n                    , []);\n\n            const ids = values(getProjectsTreePickerIds(this.props.commandInput.id));\n            ids.forEach(pickerId => {\n                this.props.dispatch(\n                    treePickerActions.SELECT_TREE_PICKER_NODE({\n                        pickerId,\n                        id: addedFiles.map(({ id }) => id),\n                        cascade: true,\n                    })\n                );\n            });\n\n            this.setFiles(files);\n        }\n\n        refreshFiles = () => {\n            clearTimeout(this.fileRefreshTimeout);\n            this.fileRefreshTimeout = window.setTimeout(this.setSelectedFiles);\n        }\n\n        setSelectedFiles = () => {\n            const nodes = getSelectedNodes<ProjectsTreePickerItem>(this.props.commandInput.id)(this.props.treePickerState);\n            const initialFiles: CollectionFile[] = [];\n            const files = nodes\n                .reduce((files, { value }) =>\n                    'type' in value && value.type === CollectionFileType.FILE\n                        ? files.concat(value)\n                        : files, initialFiles);\n\n            this.setFiles(files);\n        }\n        input = () =>\n            <GenericInput\n                component={this.chipsInput}\n                {...this.props} />\n\n        chipsInput = () =>\n            <ChipsInput\n                values={this.props.input.value}\n                disabled={this.props.commandInput.disabled}\n                onChange={noop}\n                createNewValue={identity}\n                getLabel={(file: CollectionFile) => file.name}\n                inputComponent={this.textInput} />\n\n        textInput = (props: InputProps) =>\n            <Input\n                {...props}\n                error={this.props.meta.touched && !!this.props.meta.error}\n                readOnly\n                disabled={this.props.commandInput.disabled}\n                onClick={!this.props.commandInput.disabled ? this.openDialog : undefined}\n                onKeyPress={!this.props.commandInput.disabled ? this.openDialog : undefined}\n                onBlur={this.props.input.onBlur} />\n\n        dialogContentStyles: CustomStyleRulesCallback<DialogContentCssRules> = ({ spacing }) => ({\n            root: {\n                display: 'flex',\n                flexDirection: 'column',\n                height: \"80vh\",\n            },\n            pickerWrapper: {\n                display: 'flex',\n                flexDirection: 'column',\n                height: \"100%\",\n            },\n            tree: {\n                flex: 3,\n                overflow: 'auto',\n            },\n            divider: {\n                margin: `${spacing(1)} 0`,\n            },\n            chips: {\n                flex: 1,\n                overflow: 'auto',\n                padding: `${spacing(1)} 0`,\n                overflowX: 'hidden',\n            },\n        })\n\n\n        dialog = withStyles(this.dialogContentStyles)(\n            ({ classes }: WithStyles<DialogContentCssRules>) =>\n                <Dialog\n                    open={this.state.open}\n                    onClose={this.closeDialog}\n                    fullWidth\n                    maxWidth='md' >\n                    <DialogTitle>Choose files</DialogTitle>\n                    <DialogContent className={classes.root}>\n                        <div className={classes.pickerWrapper}>\n                            <div className={classes.tree}>\n                                <ProjectsTreePicker\n                                    pickerId={this.props.commandInput.id}\n                                    includeCollections\n                                    includeDirectories\n                                    includeFiles\n                                    showSelection\n                                    cascadeSelection={true}\n                                    options={this.props.options}\n                                    toggleItemSelection={this.refreshFiles} />\n                            </div>\n                            <Divider />\n                            <div className={classes.chips}>\n                                <Typography variant='subtitle1'>Selected files ({this.state.files.length}):</Typography>\n                                <Chips\n                                    orderable\n                                    deletable\n                                    values={this.state.files}\n                                    onChange={this.setFiles}\n                                    getLabel={(file: CollectionFile) => file.name} />\n                            </div>\n                        </div>\n\n                    </DialogContent>\n                    <DialogActions>\n                        <Button onClick={this.closeDialog}>Cancel</Button>\n                        <Button\n                            data-cy='ok-button'\n                            variant='contained'\n                            color='primary'\n                            onClick={this.submit}>Ok</Button>\n                    </DialogActions>\n                </Dialog>\n        );\n\n    });\n\ntype DialogContentCssRules = 'root' | 'pickerWrapper' | 'tree' | 'divider' | 'chips';\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/file-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { memoize } from 'lodash/fp';\nimport {\n    isRequiredInput,\n    FileCommandInputParameter,\n    File,\n    CWLType\n} from 'models/workflow';\nimport { Field } from 'redux-form';\nimport { ERROR_MESSAGE } from 'validators/require';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Input, Dialog, DialogTitle, DialogContent, DialogActions, Button } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { GenericInputProps, GenericInput } from './generic-input';\nimport { ProjectsTreePicker } from 'views-components/projects-tree-picker/projects-tree-picker';\nimport { connect, DispatchProp } from 'react-redux';\nimport { initProjectsTreePicker } from 'store/tree-picker/tree-picker-actions';\nimport { TreeItem } from 'components/tree/tree';\nimport { ProjectsTreePickerItem } from 'store/tree-picker/tree-picker-middleware';\nimport { CollectionFile, CollectionFileType } from 'models/collection-file';\n\nexport interface FileInputProps {\n    input: FileCommandInputParameter;\n    options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n}\n\ntype DialogContentCssRules = 'root' | 'pickerWrapper';\n\nexport const FileInput = ({ input, options }: FileInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={FileInputComponent as any}\n        format={format}\n        parse={parse}\n        {...{\n            options\n        }}\n        validate={getValidation(input)} />;\n\nconst format = (value?: File) => value ? value.basename : '';\n\nconst parse = (file: CollectionFile): File => ({\n    class: CWLType.FILE,\n    location: `keep:${file.id}`,\n    basename: file.name,\n});\n\nconst getValidation = memoize(\n    (input: FileCommandInputParameter) => ([\n        isRequiredInput(input)\n            ? (file?: File) => file ? undefined : ERROR_MESSAGE\n            : () => undefined,\n    ]));\n\ninterface FileInputComponentState {\n    open: boolean;\n    file?: CollectionFile;\n}\n\nconst FileInputComponent = connect()(\n    class FileInputComponent extends React.Component<GenericInputProps & DispatchProp & {\n        options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n    }, FileInputComponentState> {\n        state: FileInputComponentState = {\n            open: false,\n        };\n\n        componentDidMount() {\n            this.props.dispatch<any>(\n                initProjectsTreePicker(this.props.commandInput.id));\n        }\n\n        render() {\n            return <>\n                {this.renderInput()}\n                <this.dialog />\n            </>;\n        }\n\n        openDialog = () => {\n            this.componentDidMount();\n            this.setState({ open: true });\n        }\n\n        closeDialog = () => {\n            this.setState({ open: false });\n        }\n\n        submit = () => {\n            this.closeDialog();\n            this.props.input.onChange(this.state.file);\n        }\n\n        setFile = (_: {}, { data }: TreeItem<ProjectsTreePickerItem>) => {\n            if ('type' in data && data.type === CollectionFileType.FILE) {\n                this.setState({ file: data });\n            } else {\n                this.setState({ file: undefined });\n            }\n        }\n\n        renderInput() {\n            return <GenericInput\n                component={props =>\n                    <Input\n                        readOnly\n                        fullWidth\n                        disabled={props.commandInput.disabled}\n                        value={props.input.value}\n                        error={props.meta.touched && !!props.meta.error}\n                        onClick={!props.commandInput.disabled ? this.openDialog : undefined}\n                        onKeyPress={!props.commandInput.disabled ? this.openDialog : undefined}\n                        onMouseDown={(e) => e.preventDefault()} />}\n                {...this.props} />;\n        }\n\n        dialogContentStyles: CustomStyleRulesCallback<DialogContentCssRules> = ({ spacing }) => ({\n            root: {\n                display: 'flex',\n                flexDirection: 'column',\n                height: \"80vh\",\n            },\n            pickerWrapper: {\n                minHeight: 0,\n                display: 'flex',\n                flexDirection: 'column',\n                height: \"100%\",\n            },\n        });\n\n        dialog = withStyles(this.dialogContentStyles)(\n            ({ classes }: WithStyles<DialogContentCssRules>) =>\n                <Dialog\n                    open={this.state.open}\n                    onClose={this.closeDialog}\n                    fullWidth\n                    data-cy=\"choose-a-file-dialog\"\n                    maxWidth='md'>\n                    <DialogTitle>Choose a file</DialogTitle>\n                    <DialogContent className={classes.root}>\n                        <div className={classes.pickerWrapper}>\n                            <ProjectsTreePicker\n                                pickerId={this.props.commandInput.id}\n                                includeCollections\n                                includeDirectories\n                                includeFiles\n                                cascadeSelection={false}\n                                options={this.props.options}\n                                toggleItemActive={this.setFile} />\n                        </div>\n                    </DialogContent>\n                    <DialogActions>\n                        <Button onClick={this.closeDialog} data-cy='file-input-cancel-button'>Cancel</Button>\n                        <Button\n                            disabled={!this.state.file}\n                            variant='contained'\n                            color='primary'\n                            onClick={this.submit}>Ok</Button>\n                    </DialogActions>\n                </Dialog >\n        );\n    });\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/float-array-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { isRequiredInput, FloatArrayCommandInputParameter } from 'models/workflow';\nimport { Field } from 'redux-form';\nimport { ERROR_MESSAGE } from 'validators/require';\nimport { GenericInputProps, GenericInput } from 'views/run-process-panel/inputs/generic-input';\nimport { ChipsInput } from 'components/chips-input/chips-input';\nimport { createSelector } from 'reselect';\nimport { FloatInput } from 'components/float-input/float-input';\n\nexport interface FloatArrayInputProps {\n    input: FloatArrayCommandInputParameter;\n}\nexport const FloatArrayInput = ({ input }: FloatArrayInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={FloatArrayInputComponent}\n        validate={validationSelector(input)} />;\n\n\nconst validationSelector = createSelector(\n    isRequiredInput,\n    isRequired => isRequired\n        ? [required]\n        : undefined\n);\n\nconst required = (value: string[]) =>\n    value && value.length > 0\n        ? undefined\n        : ERROR_MESSAGE;\n\nconst FloatArrayInputComponent = (props: GenericInputProps) =>\n    <GenericInput\n        component={InputComponent}\n        {...props} />;\n\nclass InputComponent extends React.PureComponent<GenericInputProps>{\n    render() {\n        const { commandInput, input, meta } = this.props;\n        return <ChipsInput\n            deletable={!commandInput.disabled}\n            orderable={!commandInput.disabled}\n            disabled={commandInput.disabled}\n            values={input.value}\n            onChange={this.handleChange}\n            createNewValue={parseFloat}\n            inputComponent={FloatInput}\n            inputProps={{\n                error: meta.error,\n            }} />;\n    }\n\n    handleChange = (values: {}[]) => {\n        const { input, meta } = this.props;\n        if (!meta.touched) {\n            input.onBlur(values);\n        }\n        input.onChange(values);\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/float-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { memoize } from 'lodash/fp';\nimport { FloatCommandInputParameter, isRequiredInput } from 'models/workflow';\nimport { Field } from 'redux-form';\nimport { isNumber } from 'validators/is-number';\nimport { GenericInputProps, GenericInput } from './generic-input';\nimport { FloatInput as FloatInputComponent } from 'components/float-input/float-input';\nexport interface FloatInputProps {\n    input: FloatCommandInputParameter;\n}\nexport const FloatInput = ({ input }: FloatInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={Input}\n        parse={parseFloat}\n        format={format}\n        validate={getValidation(input)} />;\n\nconst format = (value: any) => isNaN(value) ? '' : JSON.stringify(value);\n\nconst getValidation = memoize(\n    (input: FloatCommandInputParameter) => ([\n        isRequiredInput(input)\n            ? isNumber\n            : () => undefined,])\n);\n\nconst Input = (props: GenericInputProps) =>\n    <GenericInput\n        component={InputComponent}\n        {...props} />;\n\nconst InputComponent = ({ input, meta, commandInput }: GenericInputProps) =>\n    <FloatInputComponent\n        fullWidth\n        error={meta.touched && !!meta.error}\n        disabled={commandInput.disabled}\n        {...input} />;\n\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/generic-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { WrappedFieldProps } from 'redux-form';\nimport { FormHelperText, InputLabel, FormControl } from '@mui/material';\nimport { GenericCommandInputParameter, getInputLabel, isRequiredInput } from 'models/workflow';\n\nexport type GenericInputProps = WrappedFieldProps & {\n    commandInput: GenericCommandInputParameter<any, any>;\n};\n\ntype GenericInputContainerProps = GenericInputProps & {\n    component: React.ComponentType<GenericInputProps>;\n    required?: boolean;\n};\nexport const GenericInput = ({ component: Component, ...props }: GenericInputContainerProps) => {\n    return <FormControl fullWidth>\n        <InputLabel\n            shrink\n            variant={\"standard\"} // Filled and outlined cause a left gap\n            focused={props.meta.active}\n            required={props.required !== undefined ? props.required : isRequiredInput(props.commandInput)}\n            error={props.meta.touched && !!props.meta.error}>\n            {getInputLabel(props.commandInput)}\n        </InputLabel>\n        <Component {...props} />\n        <FormHelperText error={props.meta.touched && !!props.meta.error}>\n            {\n                props.meta.touched && props.meta.error\n                    ? props.meta.error\n                    : props.commandInput.doc\n            }\n        </FormHelperText>\n    </FormControl>;\n};\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/int-array-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { isRequiredInput, IntArrayCommandInputParameter } from 'models/workflow';\nimport { Field } from 'redux-form';\nimport { ERROR_MESSAGE } from 'validators/require';\nimport { GenericInputProps, GenericInput } from 'views/run-process-panel/inputs/generic-input';\nimport { ChipsInput } from 'components/chips-input/chips-input';\nimport { createSelector } from 'reselect';\nimport { IntInput } from 'components/int-input/int-input';\n\nexport interface IntArrayInputProps {\n    input: IntArrayCommandInputParameter;\n}\nexport const IntArrayInput = ({ input }: IntArrayInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={IntArrayInputComponent}\n        validate={validationSelector(input)} />;\n\n\nconst validationSelector = createSelector(\n    isRequiredInput,\n    isRequired => isRequired\n        ? [required]\n        : undefined\n);\n\nconst required = (value: string[]) =>\n    value && value.length > 0\n        ? undefined\n        : ERROR_MESSAGE;\n\nconst IntArrayInputComponent = (props: GenericInputProps) =>\n    <GenericInput\n        component={InputComponent}\n        {...props} />;\n\nclass InputComponent extends React.PureComponent<GenericInputProps>{\n    render() {\n        const { commandInput, input, meta } = this.props;\n        return <ChipsInput\n            deletable={!commandInput.disabled}\n            orderable={!commandInput.disabled}\n            disabled={commandInput.disabled}\n            values={input.value}\n            onChange={this.handleChange}\n            createNewValue={value => parseInt(value, 10)}\n            inputComponent={IntInput}\n            inputProps={{\n                error: meta.error,\n            }} />;\n    }\n\n    handleChange = (values: {}[]) => {\n        const { input, meta } = this.props;\n        if (!meta.touched) {\n            input.onBlur(values);\n        }\n        input.onChange(values);\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/int-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { memoize } from 'lodash/fp';\nimport { IntCommandInputParameter, isRequiredInput } from 'models/workflow';\nimport { Field } from 'redux-form';\nimport { isInteger } from 'validators/is-integer';\nimport { GenericInputProps, GenericInput } from 'views/run-process-panel/inputs/generic-input';\nimport { IntInput as IntInputComponent } from 'components/int-input/int-input';\n\nexport interface IntInputProps {\n    input: IntCommandInputParameter;\n}\nexport const IntInput = ({ input }: IntInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={InputComponent}\n        parse={parse}\n        format={format}\n        validate={getValidation(input)} />;\n\nexport const parse = (value: any) => value === '' ? '' : parseInt(value, 10);\n\nexport const format = (value: any) => isNaN(value) ? '' : JSON.stringify(value);\n\nconst getValidation = memoize(\n    (input: IntCommandInputParameter) => ([\n        isRequiredInput(input)\n            ? isInteger\n            : () => undefined,\n    ]));\n\nconst InputComponent = (props: GenericInputProps) =>\n    <GenericInput\n        component={Input}\n        {...props} />;\n\n\nconst Input = (props: GenericInputProps) =>\n    <IntInputComponent\n        fullWidth\n        type='number'\n        error={props.meta.touched && !!props.meta.error}\n        disabled={props.commandInput.disabled}\n        {...props.input} />;\n\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/run-wf-project-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect, DispatchProp } from 'react-redux';\nimport { Field } from 'redux-form';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Input, Dialog, DialogTitle, DialogContent, DialogActions, Button } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport {\n    GenericCommandInputParameter\n} from 'models/workflow';\nimport { GenericInput, GenericInputProps } from './generic-input';\nimport { ProjectsTreePicker } from 'views-components/projects-tree-picker/projects-tree-picker';\nimport { initProjectsTreePicker } from 'store/tree-picker/tree-picker-actions';\nimport { TreeItem } from 'components/tree/tree';\nimport { ProjectsTreePickerItem } from 'store/tree-picker/tree-picker-middleware';\nimport { ProjectResource } from 'models/project';\nimport { ResourceKind } from 'models/resource';\nimport { RootState } from 'store/store';\nimport { getUserUuid } from 'common/getuser';\nimport { getResource } from 'store/resources/resources';\nimport { loadProject } from 'store/workbench/workbench-actions';\nimport { runProcessPanelActions } from 'store/run-process-panel/run-process-panel-actions';\nimport { isUserResource } from 'models/user';\n\nexport type RunWfProjectCommandInputParameter = GenericCommandInputParameter<ProjectResource, ProjectResource>;\n\nconst isUndefined: any = (value?: ProjectResource) => (value === undefined);\n\ninterface ProjectInputProps {\n    required: boolean;\n    input: RunWfProjectCommandInputParameter;\n    options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n}\n\ntype DialogContentCssRules = 'root' | 'pickerWrapper';\n\nexport const RunWfProjectInput = ({ required, input, options }: ProjectInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={ProjectInputComponent as any}\n        format={format}\n        validate={required ? isUndefined : undefined}\n        {...{\n            options,\n            required\n        }} />;\n\nconst format = (value?: ProjectResource) => value ? value.name : '';\n\ninterface ProjectInputComponentState {\n    open: boolean;\n    hasBeenOpened: boolean;\n    defaultProject?: ProjectResource;\n    selectedProject?: ProjectResource;\n    targetProject?: ProjectResource;\n}\n\ntype ProjectInputComponentProps = {\n    userUuid: string | undefined;\n    userRootProject: ProjectResource | undefined;\n    defaultTargetProject: ProjectResource | undefined;\n    options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n    required?: boolean;\n}\n\ninterface HasUserUuid {\n    userUuid: string;\n}\n\nconst mapStateToProps = (state: RootState): Pick<ProjectInputComponentProps, 'userUuid' | 'userRootProject' | 'defaultTargetProject' > => {\n    const userUuid = getUserUuid(state)\n    const userRootProject = getResource<ProjectResource>(userUuid)(state.resources);\n    const defaultTargetProject = getResource<ProjectResource>(state.runProcessPanel.processOwnerUuid)(state.resources)\n    return {\n        userUuid,\n        userRootProject,\n        defaultTargetProject,\n    }\n};\n\nconst ProjectInputComponent = connect(mapStateToProps)(\n    class ProjectInputComponent extends React.Component<GenericInputProps & DispatchProp & HasUserUuid & ProjectInputComponentProps, ProjectInputComponentState> {\n\n        state: ProjectInputComponentState = {\n            open: false,\n            hasBeenOpened: false,\n            defaultProject: undefined, // defined in redux as the current project where the workflow will run\n            selectedProject: undefined, // current project selected in the dialog\n            targetProject: undefined, // set on submit when dialog closes\n        };\n\n        componentDidMount() {\n            this.props.dispatch<any>(\n                initProjectsTreePicker(this.props.commandInput.id));\n            const project = this.getDefaultProject();\n            // set initial selected project\n            if (!this.state.selectedProject && project) {\n                this.setState({\n                    defaultProject: project,\n                    selectedProject: project,\n                });\n            }\n            // load user root project if not already loaded\n            if (this.props.userUuid && (!isUserResource(this.props.userRootProject) || !('firstName' in this.props.userRootProject))) {\n                this.props.dispatch<any>(loadProject(this.props.userUuid));\n            }\n            // open dialog automatically when input mounts\n            if (this.state.hasBeenOpened === false) {\n                this.setState({ open: true, hasBeenOpened: true });\n            }\n        }\n\n        componentDidUpdate(prevProps: ProjectInputComponentProps, prevState: ProjectInputComponentState) {\n            // set target project if not already set\n            if (!this.state.targetProject) {\n                const project = this.getDefaultProject();\n                if (project) {\n                    this.setState({ targetProject: project });\n                }\n            }\n            // set default project if user root project changes (e.g. when user root project loads)\n            if (this.props.userRootProject && prevProps.userRootProject !== this.props.userRootProject) {\n                const project = this.getDefaultProject();\n                this.setState({\n                    defaultProject: project,\n                    selectedProject: project,\n                    targetProject: project,\n                });\n            }\n            // ensures that the target & selected project are set if page reloads\n            if (prevProps.defaultTargetProject !== this.props.defaultTargetProject) {\n                const project = this.getDefaultProject();\n                this.setState({ selectedProject: project, targetProject: project });\n            }\n        }\n\n        componentWillUnmount(): void {\n            this.props.dispatch<any>(runProcessPanelActions.SET_PROCESS_OWNER_UUID(''));\n            this.setState({ targetProject: undefined });\n        }\n\n        getDefaultProject = () => {\n            const { userUuid, userRootProject, defaultTargetProject } = this.props;\n            if (defaultTargetProject?.canWrite) {\n                return defaultTargetProject;\n            }\n            const isTargetUser = (defaultTargetProject as any)?.kind === ResourceKind.USER;\n            const isTargetUserThisUser = isTargetUser && defaultTargetProject?.uuid === userUuid;\n            if (isTargetUserThisUser) {\n                if (defaultTargetProject) {\n                    return defaultTargetProject;\n                }\n                return userRootProject;\n            }\n            return defaultTargetProject || userRootProject\n        }\n\n        render() {\n            return <>\n                {this.renderInput()}\n                <DialogComponent\n                    targetProject={this.state.targetProject}\n                    open={this.state.open}\n                    closeDialog={this.closeDialog}\n                    setProject={this.setProject}\n                    submit={this.submit}\n                    invalid={this.invalid}\n                    commandInput={this.props.commandInput}\n                    options={this.props.options}\n                />\n            </>;\n        }\n\n        openDialog = () => {\n            this.componentDidMount();\n            this.setState({ open: true });\n        }\n\n        closeDialog = () => {\n            this.setState({ open: false });\n        }\n\n        submit = () => {\n            this.closeDialog();\n            if (this.state.selectedProject) {\n                if (this.state.selectedProject.kind === ResourceKind.PROJECT || this.state.selectedProject.kind === ResourceKind.USER) {\n                    this.props.dispatch<any>(runProcessPanelActions.SET_PROCESS_OWNER_UUID(this.state.selectedProject.uuid));\n                    this.setState({ targetProject: this.state.selectedProject });\n                    // Update redux form value to be submitted\n                    this.props.input.onChange(this.state.selectedProject);\n                }\n            }\n        }\n\n        setProject = (_: {}, { data }: TreeItem<ProjectsTreePickerItem>) => {\n            if ('kind' in data){\n                if (data.kind === ResourceKind.PROJECT) {\n                    this.setState({ selectedProject: data });\n                } else if (data.kind === ResourceKind.USER) {\n                    this.setState({ selectedProject: this.props.userRootProject });\n                }\n            } else {\n                this.setState({ selectedProject: undefined });\n            }\n        }\n\n        getDisplayName(item: ProjectsTreePickerItem | undefined): string {\n            if (item === undefined) {\n                return '';\n            }\n            if ('kind' in item && item.kind === ResourceKind.USER) {\n                return `${item.firstName} ${item.lastName} (root project)`;\n            }\n            if ('name' in item) {\n                return item.name;\n            } else {\n                return '';\n            }\n        }\n\n\n        invalid = () => (!this.state.selectedProject || !this.state.selectedProject.canWrite);\n\n        renderInput() {\n            return <GenericInput\n                component={props =>\n                    <Input\n                        readOnly\n                        fullWidth\n                        data-cy='run-wf-project-input'\n                        value={this.getDisplayName(this.state.targetProject)}\n                        error={props.meta.touched && !!props.meta.error}\n                        disabled={props.commandInput.disabled}\n                        onClick={!this.props.commandInput.disabled ? this.openDialog : undefined}\n                        onKeyPress={!this.props.commandInput.disabled ? () => this.openDialog : undefined}\n                        onMouseDown={(e) => e.preventDefault()} />}\n                {...this.props} />;\n        }\n    });\n\nconst dialogContentStyles: CustomStyleRulesCallback<DialogContentCssRules> = ({ spacing }) => ({\n    root: {\n        display: 'flex',\n        flexDirection: 'column',\n        height: \"80vh\",\n    },\n    pickerWrapper: {\n        display: 'flex',\n        flexDirection: 'column',\n        height: \"100%\",\n    },\n});\n\nconst DialogComponent = withStyles(dialogContentStyles)(\n    ( props: WithStyles<DialogContentCssRules> & {\n        targetProject: ProjectResource | undefined,\n        open: boolean,\n        closeDialog: () => void,\n        setProject: (_: {}, { data }: TreeItem<ProjectsTreePickerItem>) => void,\n        submit: () => void,\n        invalid: () => boolean,\n        commandInput: GenericCommandInputParameter<ProjectResource, ProjectResource>,\n        options?: { showOnlyOwned: boolean, showOnlyWritable: boolean },\n    }) =>\n        props.open ?\n            <Dialog\n                open={props.open}\n                onClose={props.closeDialog}\n                fullWidth\n                data-cy=\"choose-a-project-dialog\"\n                maxWidth='md'>\n                    <DialogTitle>Choose the project where the workflow will run</DialogTitle>\n                    <DialogContent className={props.classes.root}>\n                        <div className={props.classes.pickerWrapper}>\n                            {props.targetProject && <ProjectsTreePicker\n                                pickerId={props.commandInput.id}\n                                cascadeSelection={false}\n                                options={props.options}\n                                project={props.targetProject}\n                                currentUuids={[props.targetProject.uuid]}\n                                toggleItemActive={props.setProject} />}\n                        </div>\n                    </DialogContent>\n                    <DialogActions>\n                        <Button onClick={props.closeDialog} data-cy='run-wf-project-picker-cancel-button'>\n                            Cancel\n                        </Button>\n                        <Button\n                            data-cy='run-wf-project-picker-ok-button'\n                            disabled={props.invalid()}\n                            variant='contained'\n                            color='primary'\n                            onClick={props.submit}>Ok</Button>\n                    </DialogActions>\n            </Dialog> : null\n        );\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/search-project-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect, DispatchProp } from 'react-redux';\nimport { Field } from 'redux-form';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Input, Dialog, DialogTitle, DialogContent, DialogActions, Button } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport {\n    GenericCommandInputParameter\n} from 'models/workflow';\nimport { GenericInput, GenericInputProps } from './generic-input';\nimport { ProjectsTreePicker } from 'views-components/projects-tree-picker/projects-tree-picker';\nimport { initProjectsTreePicker } from 'store/tree-picker/tree-picker-actions';\nimport { TreeItem } from 'components/tree/tree';\nimport { ProjectsTreePickerItem } from 'store/tree-picker/tree-picker-middleware';\nimport { ProjectResource } from 'models/project';\nimport { ResourceKind } from 'models/resource';\nimport { RootState } from 'store/store';\nimport { getUserUuid } from 'common/getuser';\n\nexport type SearchProjectCommandInputParameter = GenericCommandInputParameter<ProjectResource, ProjectResource>;\n\nconst isUndefined: any = (value?: ProjectResource) => (value === undefined);\n\ninterface ProjectInputProps {\n    required: boolean;\n    input: SearchProjectCommandInputParameter;\n    options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n}\n\ntype DialogContentCssRules = 'root' | 'pickerWrapper';\n\nexport const SearchProjectInput = ({ required, input, options }: ProjectInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={ProjectInputComponent as any}\n        format={format}\n        validate={required ? isUndefined : undefined}\n        {...{\n            options,\n            required\n        }} />;\n\nconst format = (value?: ProjectResource) => value ? value.name : '';\n\ninterface ProjectInputComponentState {\n    open: boolean;\n    project?: ProjectResource;\n}\n\ninterface HasUserUuid {\n    userUuid: string;\n}\n\nconst mapStateToProps = (state: RootState) => ({ userUuid: getUserUuid(state) });\n\nconst ProjectInputComponent = connect(mapStateToProps)(\n    class ProjectInputComponent extends React.Component<GenericInputProps & DispatchProp & HasUserUuid & {\n        options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n        required?: boolean;\n    }, ProjectInputComponentState> {\n        state: ProjectInputComponentState = {\n            open: false,\n        };\n\n        componentDidMount() {\n            this.props.dispatch<any>(\n                initProjectsTreePicker(this.props.commandInput.id));\n        }\n\n        render() {\n            return <>\n                {this.renderInput()}\n                <this.dialog />\n            </>;\n        }\n\n        openDialog = () => {\n            this.componentDidMount();\n            this.setState({ open: true });\n        }\n\n        closeDialog = () => {\n            this.setState({ open: false });\n        }\n\n        submit = () => {\n            this.closeDialog();\n            this.props.input.onChange(this.state.project);\n        }\n\n        setProject = (_: {}, { data }: TreeItem<ProjectsTreePickerItem>) => {\n            if ('kind' in data && data.kind === ResourceKind.PROJECT) {\n                this.setState({ project: data });\n            } else {\n                this.setState({ project: undefined });\n            }\n        }\n\n        invalid = () => (!this.state.project || !this.state.project.canWrite);\n\n        renderInput() {\n            return <GenericInput\n                component={props =>\n                    <Input\n                        readOnly\n                        fullWidth\n                        value={props.input.value}\n                        error={props.meta.touched && !!props.meta.error}\n                        disabled={props.commandInput.disabled}\n                        onClick={!this.props.commandInput.disabled ? this.openDialog : undefined}\n                        onKeyPress={!this.props.commandInput.disabled ? this.openDialog : undefined} />}\n                {...this.props} />;\n        }\n\n        dialogContentStyles: CustomStyleRulesCallback<DialogContentCssRules> = ({ spacing }) => ({\n            root: {\n                display: 'flex',\n                flexDirection: 'column',\n                height: \"80vh\",\n            },\n            pickerWrapper: {\n                display: 'flex',\n                flexDirection: 'column',\n                height: \"100%\",\n            },\n        });\n\n        dialog = withStyles(this.dialogContentStyles)(\n            ({ classes }: WithStyles<DialogContentCssRules>) =>\n                this.state.open ? <Dialog\n                                      open={this.state.open}\n                                      onClose={this.closeDialog}\n                                      fullWidth\n                                      data-cy=\"choose-a-project-dialog\"\n                                      maxWidth='md'>\n                    <DialogTitle>Choose a project</DialogTitle>\n                    <DialogContent className={classes.root}>\n                        <div className={classes.pickerWrapper}>\n                            <ProjectsTreePicker\n                                pickerId={this.props.commandInput.id}\n                                cascadeSelection={false}\n                                options={this.props.options}\n                                toggleItemActive={this.setProject} />\n                        </div>\n                    </DialogContent>\n                    <DialogActions>\n                        <Button onClick={this.closeDialog}>Cancel</Button>\n                        <Button\n                            disabled={this.invalid()}\n                            variant='contained'\n                            color='primary'\n                            onClick={this.submit}>Ok</Button>\n                    </DialogActions>\n                </Dialog> : null\n        );\n\n    });\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/string-array-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { isRequiredInput, StringArrayCommandInputParameter } from 'models/workflow';\nimport { Field } from 'redux-form';\nimport { ERROR_MESSAGE } from 'validators/require';\nimport { GenericInputProps, GenericInput } from 'views/run-process-panel/inputs/generic-input';\nimport { ChipsInput } from 'components/chips-input/chips-input';\nimport { identity } from 'lodash';\nimport { createSelector } from 'reselect';\nimport { Input } from '@mui/material';\n\nexport interface StringArrayInputProps {\n    input: StringArrayCommandInputParameter;\n}\nexport const StringArrayInput = ({ input }: StringArrayInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={StringArrayInputComponent}\n        validate={validationSelector(input)} />;\n\n\nconst validationSelector = createSelector(\n    isRequiredInput,\n    isRequired => isRequired\n        ? [required]\n        : undefined\n);\n\nconst required = (value: string[] = []) =>\n    value && value.length > 0\n        ? undefined\n        : ERROR_MESSAGE;\n\nconst StringArrayInputComponent = (props: GenericInputProps) =>\n    <GenericInput\n        component={InputComponent}\n        {...props} />;\n\nclass InputComponent extends React.PureComponent<GenericInputProps>{\n    render() {\n        const { commandInput, input, meta } = this.props;\n        return <ChipsInput\n            deletable={!commandInput.disabled}\n            orderable={!commandInput.disabled}\n            disabled={commandInput.disabled}\n            values={input.value}\n            onChange={this.handleChange}\n            createNewValue={identity}\n            inputComponent={Input}\n            inputProps={{\n                error: meta.error\n            }} />;\n    }\n\n    handleChange = (values: {}[]) => {\n        const { input, meta } = this.props;\n        if (!meta.touched) {\n            input.onBlur(values);\n        }\n        input.onChange(values);\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/inputs/string-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { memoize } from 'lodash/fp';\nimport { isRequiredInput, StringCommandInputParameter } from 'models/workflow';\nimport { Field } from 'redux-form';\nimport { fieldRequire } from 'validators/require';\nimport { GenericInputProps, GenericInput } from 'views/run-process-panel/inputs/generic-input';\nimport { Input as MaterialInput } from '@mui/material';\n\nexport interface StringInputProps {\n    input: StringCommandInputParameter;\n}\nexport const StringInput = ({ input }: StringInputProps) =>\n    <Field\n        name={input.id}\n        commandInput={input}\n        component={StringInputComponent}\n        validate={getValidation(input)} />;\n\nconst getValidation = memoize(\n    (input: StringCommandInputParameter) => ([\n        isRequiredInput(input)\n            ? fieldRequire\n            : () => undefined,\n    ]));\n\nconst StringInputComponent = (props: GenericInputProps) =>\n    <GenericInput\n        component={Input}\n        {...props} />;\n\nconst Input = (props: GenericInputProps) =>\n    <MaterialInput\n        fullWidth\n        error={props.meta.touched && !!props.meta.error}\n        disabled={props.commandInput.disabled}\n\ttype={props.commandInput.secret ? 'password' : 'text'}\n        {...props.input} />;\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/run-process-advanced-form.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Accordion, AccordionDetails, AccordionSummary } from '@mui/material';\nimport { reduxForm, Field } from 'redux-form';\nimport { Grid } from '@mui/material';\nimport { TextField } from 'components/text-field/text-field';\nimport { ExpandIcon } from 'components/icon/icon';\nimport * as IntInput from './inputs/int-input';\nimport { min } from 'validators/min';\nimport { optional } from 'validators/optional';\nimport { RUN_PROCESS_ADVANCED_FORM,\n         OUTPUT_FIELD,\n         RUNTIME_FIELD,\n         RAM_FIELD,\n         VCPUS_FIELD,\n         KEEP_CACHE_RAM_FIELD,\n         RUNNER_IMAGE_FIELD,\n         RunProcessAdvancedFormData\n} from 'store/run-process-panel/run-process-panel-actions';\n\nexport const RunProcessAdvancedForm =\n    reduxForm<RunProcessAdvancedFormData>({\n        form: RUN_PROCESS_ADVANCED_FORM,\n    })(() =>\n        <form>\n            <Accordion elevation={0}>\n                <AccordionSummary style={{ padding: 0 }} expandIcon={<ExpandIcon />}>\n                    Advanced\n                </AccordionSummary>\n                <AccordionDetails style={{ padding: 0 }}>\n                    <Grid container spacing={4}>\n                        <Grid item xs={12} md={6}>\n                            <Field\n                                name={OUTPUT_FIELD}\n                                component={TextField as any}\n                                label=\"Output name\" />\n                        </Grid>\n                        <Grid item xs={12} md={6}>\n                            <Field\n                                name={RUNTIME_FIELD}\n                                component={TextField as any}\n                                helperText=\"Maximum running time (in seconds) that this container will be allowed to run before being cancelled.\"\n                                label=\"Runtime limit\"\n                                parse={IntInput.parse}\n                                format={IntInput.format}\n                                type='number'\n                                validate={runtimeValidation} />\n                        </Grid>\n                        <Grid item xs={12} md={6}>\n                            <Field\n                                name={RAM_FIELD}\n                                component={TextField as any}\n                                label=\"RAM\"\n                                helperText=\"Number of ram bytes to be used to run this process.\"\n                                parse={IntInput.parse}\n                                format={IntInput.format}\n                                type='number'\n                                required\n                                validate={ramValidation} />\n                        </Grid>\n                        <Grid item xs={12} md={6}>\n                            <Field\n                                name={VCPUS_FIELD}\n                                component={TextField as any}\n                                label=\"VCPUs\"\n                                helperText=\"Number of cores to be used to run this process.\"\n                                parse={IntInput.parse}\n                                format={IntInput.format}\n                                type='number'\n                                required\n                                validate={vcpusValidation} />\n                        </Grid>\n                        <Grid item xs={12} md={6}>\n                            <Field\n                                name={KEEP_CACHE_RAM_FIELD}\n                                component={TextField as any}\n                                label=\"Keep cache RAM\"\n                                helperText=\"Number of keep cache bytes to be used to run this process.\"\n                                parse={IntInput.parse}\n                                format={IntInput.format}\n                                type='number'\n                                validate={keepCacheRamValidation} />\n                        </Grid>\n                        <Grid item xs={12} md={6}>\n                            <Field\n                                name={RUNNER_IMAGE_FIELD}\n                                component={TextField as any}\n                                label='Runner'\n                                required\n                                helperText='The container image with arvados-cwl-runner that will execute this workflow.' />\n                        </Grid>\n                    </Grid>\n                </AccordionDetails>\n            </Accordion>\n        </form >);\n\nconst ramValidation = [min(0)];\nconst vcpusValidation = [min(1)];\nconst keepCacheRamValidation = [optional(min(0))];\nconst runtimeValidation = [optional(min(1))];\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/run-process-basic-form.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { reduxForm, Field, InjectedFormProps } from 'redux-form';\nimport { Grid, Typography } from '@mui/material';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\nimport { TextField } from 'components/text-field/text-field';\nimport { RunWfProjectInput, RunWfProjectCommandInputParameter } from 'views/run-process-panel/inputs/run-wf-project-input';\nimport { PROCESS_NAME_VALIDATION } from 'validators/validators';\nimport { WorkflowResource } from 'models/workflow';\nimport { ArvadosTheme, CustomStyleRulesCallback } from 'common/custom-theme';\nimport { RUN_PROCESS_BASIC_FORM, RunProcessBasicFormData } from 'store/run-process-panel/run-process-panel-actions';\n\ninterface RunProcessBasicFormProps {\n    workflow?: WorkflowResource;\n}\n\ntype CssRules = 'root' | 'name' | 'description' | 'inputItem';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        fontSize: '1.125rem',\n    },\n    name: {\n        overflow: 'hidden',\n        color: theme.customs.colors.greyD,\n        fontSize: '1.875rem',\n    },\n    description: {},\n    inputItem: {\n        marginBottom: theme.spacing(2),\n    }\n});\n\nexport const RunProcessBasicForm = reduxForm<RunProcessBasicFormData, RunProcessBasicFormProps>({\n    form: RUN_PROCESS_BASIC_FORM,\n})(\n    withStyles(styles)((props: InjectedFormProps<RunProcessBasicFormData, RunProcessBasicFormProps> & RunProcessBasicFormProps & WithStyles<CssRules>) => (\n        <form className={props.classes.root}>\n            <Grid\n                container\n                spacing={4}\n            >\n                <Grid\n                    item\n                    xs={12}\n                >\n                    {props.workflow && (\n                        <Typography\n                            className={props.classes.name}\n                            data-cy='workflow-name'\n                        >\n                            {props.workflow.name}\n                        </Typography>\n                    )}\n                </Grid>\n                <Grid\n                    item\n                    xs={12}\n                >\n                    {props.workflow && (\n                        <Typography\n                            className={props.classes.description}\n                            data-cy='workflow-description'\n                            //dangerouslySetInnerHTML is ok here only if description is sanitized,\n                            //which it is before it is loaded into the redux store\n                            dangerouslySetInnerHTML={{ __html: props.workflow.description || \"\" }}\n                        />\n                    )}\n                </Grid>\n                <Grid\n                    item\n                    xs={12}\n                    md={6}\n                    className={props.classes.inputItem}\n                    >\n                    <Field\n                        name='name'\n                        component={TextField as any}\n                        label='Name for this workflow run'\n                        required\n                        validate={PROCESS_NAME_VALIDATION}\n                    />\n                </Grid>\n                <Grid\n                    item\n                    xs={12}\n                    md={6}\n                    className={props.classes.inputItem}\n                >\n                    <RunWfProjectInput\n                        required\n                        input={\n                            {\n                                id: 'owner',\n                                label: 'Project where the workflow will run',\n                            } as RunWfProjectCommandInputParameter\n                        }\n                        options={{ showOnlyOwned: false, showOnlyWritable: true }}\n                    />\n                </Grid>\n            </Grid>\n        </form>\n    ))\n);\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/run-process-first-step.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, Button, List, ListItem, ListItemText, ListItemIcon } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { WorkflowResource } from 'models/workflow';\nimport { WorkflowIcon } from 'components/icon/icon';\nimport { WorkflowDetailsCard } from 'views/workflow-panel/workflow-description-card';\nimport { SearchInput } from 'components/search-input/search-input';\n\ntype CssRules = 'root' | 'searchGrid' | 'workflowDetailsGrid' | 'list' | 'listItem' | 'itemSelected' | 'listItemText' | 'listItemIcon';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        alignSelf: 'flex-start'\n    },\n    searchGrid: {\n        marginBottom: theme.spacing(2)\n    },\n    workflowDetailsGrid: {\n        borderLeft: `1px solid ${theme.palette.grey[\"300\"]}`\n    },\n    list: {\n        height: \"50vh\",\n        position: 'relative',\n        overflow: 'auto'\n    },\n    listItem: {\n        padding: theme.spacing(1),\n    },\n    itemSelected: {\n        backgroundColor: 'rgba(3, 190, 171, 0.3) !important'\n    },\n    listItemText: {\n        fontSize: '0.875rem'\n    },\n    listItemIcon: {\n        color: theme.customs.colors.red900\n    }\n});\n\nexport interface RunProcessFirstStepDataProps {\n    workflows: WorkflowResource[];\n    selectedWorkflow: WorkflowResource | undefined;\n}\n\nexport interface RunProcessFirstStepActionProps {\n    onSearch: (term: string) => void;\n    onSetStep: (step: number) => void;\n    onSetWorkflow: (workflow: WorkflowResource) => void;\n}\n\ntype RunProcessFirstStepProps = RunProcessFirstStepDataProps & RunProcessFirstStepActionProps & WithStyles<CssRules>;\n\nexport const RunProcessFirstStep = withStyles(styles)(\n    ({ onSearch, onSetStep, onSetWorkflow, workflows, selectedWorkflow, classes }: RunProcessFirstStepProps) =>\n        <Grid container spacing={2}>\n            <Grid container item xs={6} className={classes.root}>\n                <Grid item xs={12} className={classes.searchGrid}>\n                    <SearchInput selfClearProp=\"\" value='' onSearch={onSearch} />\n                </Grid>\n                <Grid item xs={12}>\n                    <List className={classes.list}>\n                        {workflows.map(workflow => (\n                            <ListItem key={workflow.uuid} button\n                                      classes={{ root: classes.listItem, selected: classes.itemSelected}}\n                                      selected={selectedWorkflow && (selectedWorkflow.uuid === workflow.uuid)}\n                                      onClick={() => onSetWorkflow(workflow)}>\n                                <ListItemIcon>\n                                    <WorkflowIcon className={classes.listItemIcon}/>\n                                </ListItemIcon>\n                                <ListItemText className={classes.listItemText} primary={workflow.name} disableTypography={true} />\n                            </ListItem>\n                        ))}\n                    </List>\n                </Grid>\n            </Grid>\n            <Grid item xs={6} className={classes.workflowDetailsGrid}>\n                <WorkflowDetailsCard workflow={selectedWorkflow}/>\n            </Grid>\n            <Grid item xs={12}>\n                <Button variant=\"contained\" color=\"primary\"\n                        data-cy=\"run-process-next-button\"\n                        disabled={!(!!selectedWorkflow)}\n                        onClick={() => onSetStep(1)}>\n                    Next\n                </Button>\n            </Grid>\n        </Grid>\n);\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/run-process-inputs-form.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { reduxForm, InjectedFormProps } from 'redux-form';\nimport { CommandInputParameter, CWLType, IntCommandInputParameter, BooleanCommandInputParameter, FileCommandInputParameter, DirectoryCommandInputParameter, DirectoryArrayCommandInputParameter, FloatArrayCommandInputParameter, IntArrayCommandInputParameter } from 'models/workflow';\nimport { IntInput } from 'views/run-process-panel/inputs/int-input';\nimport { StringInput } from 'views/run-process-panel/inputs/string-input';\nimport { StringCommandInputParameter, FloatCommandInputParameter, isPrimitiveOfType, WorkflowInputsData, EnumCommandInputParameter, isArrayOfType, StringArrayCommandInputParameter, FileArrayCommandInputParameter, getEnumType } from '../../models/workflow';\nimport { FloatInput } from 'views/run-process-panel/inputs/float-input';\nimport { BooleanInput } from './inputs/boolean-input';\nimport { FileInput } from './inputs/file-input';\nimport { connect } from 'react-redux';\nimport { compose } from 'redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { EnumInput } from './inputs/enum-input';\nimport { DirectoryInput } from './inputs/directory-input';\nimport { StringArrayInput } from './inputs/string-array-input';\nimport { createStructuredSelector, createSelector } from 'reselect';\nimport { FileArrayInput } from './inputs/file-array-input';\nimport { DirectoryArrayInput } from './inputs/directory-array-input';\nimport { FloatArrayInput } from './inputs/float-array-input';\nimport { IntArrayInput } from './inputs/int-array-input';\nimport { RUN_PROCESS_INPUTS_FORM } from 'store/run-process-panel/run-process-panel-actions';\n\nexport interface RunProcessInputFormProps {\n    inputs: CommandInputParameter[];\n}\n\nconst inputsSelector = (props: RunProcessInputFormProps) =>\n    props.inputs;\n\nconst initialValuesSelector = createSelector(\n    inputsSelector,\n    inputs => inputs.reduce(\n        (values, input) => ({ ...values, [input.id]: input.value || input.default }),\n        {}));\n\nconst propsSelector = createStructuredSelector({\n    initialValues: initialValuesSelector,\n});\n\nconst mapStateToProps = (_: any, props: RunProcessInputFormProps) =>\n    propsSelector(props);\n\ntype RunProcessCssRules = 'formGridContainer';\n\nconst runProcessStyles: CustomStyleRulesCallback<RunProcessCssRules> = theme => ({\n    formGridContainer: {\n        marginTop: 0,\n    }\n});\n\nexport const RunProcessInputsForm = compose(\n    connect(mapStateToProps),\n    withStyles(runProcessStyles),\n    reduxForm<WorkflowInputsData, RunProcessInputFormProps>({\n        form: RUN_PROCESS_INPUTS_FORM\n    }))(\n        (props: InjectedFormProps & RunProcessInputFormProps & WithStyles<RunProcessCssRules>) =>\n            <form>\n                <Grid container spacing={4} className={props.classes.formGridContainer}>\n                    {props.inputs.map(input =>\n                        <InputItem input={input} key={input.id} />)}\n                </Grid>\n            </form>);\n\ntype InputCssRules = 'inputItem';\n\nconst inputStyles: CustomStyleRulesCallback<InputCssRules> = theme => ({\n    inputItem: {\n        marginBottom: theme.spacing(2),\n    }\n});\n\nconst InputItem = withStyles(inputStyles)(\n    (props: WithStyles<InputCssRules> & { input: CommandInputParameter }) =>\n        <Grid item xs={12} md={6} className={props.classes.inputItem}>\n            {getInputComponent(props.input)}\n        </Grid>);\n\nconst getInputComponent = (input: CommandInputParameter) => {\n    switch (true) {\n        case isPrimitiveOfType(input, CWLType.BOOLEAN):\n            return <BooleanInput input={input as BooleanCommandInputParameter} />;\n\n        case isPrimitiveOfType(input, CWLType.INT):\n        case isPrimitiveOfType(input, CWLType.LONG):\n            return <IntInput input={input as IntCommandInputParameter} />;\n\n        case isPrimitiveOfType(input, CWLType.FLOAT):\n        case isPrimitiveOfType(input, CWLType.DOUBLE):\n            return <FloatInput input={input as FloatCommandInputParameter} />;\n\n        case isPrimitiveOfType(input, CWLType.STRING):\n            return <StringInput input={input as StringCommandInputParameter} />;\n\n        case isPrimitiveOfType(input, CWLType.FILE):\n            return <FileInput options={{ showOnlyOwned: false, showOnlyWritable: false }} input={input as FileCommandInputParameter} />;\n\n        case isPrimitiveOfType(input, CWLType.DIRECTORY):\n            return <DirectoryInput options={{ showOnlyOwned: false, showOnlyWritable: false }} input={input as DirectoryCommandInputParameter} />;\n\n        case getEnumType(input) !== null:\n            return <EnumInput input={input as EnumCommandInputParameter} />;\n\n        case isArrayOfType(input, CWLType.STRING):\n            return <StringArrayInput input={input as StringArrayCommandInputParameter} />;\n\n        case isArrayOfType(input, CWLType.INT):\n        case isArrayOfType(input, CWLType.LONG):\n            return <IntArrayInput input={input as IntArrayCommandInputParameter} />;\n\n        case isArrayOfType(input, CWLType.FLOAT):\n        case isArrayOfType(input, CWLType.DOUBLE):\n            return <FloatArrayInput input={input as FloatArrayCommandInputParameter} />;\n\n        case isArrayOfType(input, CWLType.FILE):\n            return <FileArrayInput options={{ showOnlyOwned: false, showOnlyWritable: false }} input={input as FileArrayCommandInputParameter} />;\n\n        case isArrayOfType(input, CWLType.DIRECTORY):\n            return <DirectoryArrayInput options={{ showOnlyOwned: false, showOnlyWritable: false }} input={input as DirectoryArrayCommandInputParameter} />;\n\n        default:\n            return null;\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/run-process-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Stepper, Step, StepLabel, StepContent } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { RunProcessFirstStepDataProps, RunProcessFirstStepActionProps, RunProcessFirstStep } from 'views/run-process-panel/run-process-first-step';\nimport { RunProcessSecondStepForm } from './run-process-second-step';\n\nexport type RunProcessPanelRootDataProps = {\n    currentStep: number;\n} & RunProcessFirstStepDataProps;\n\nexport type RunProcessPanelRootActionProps = RunProcessFirstStepActionProps & {\n    runProcess: () => void;\n};\n\ntype RunProcessPanelRootProps = RunProcessPanelRootDataProps & RunProcessPanelRootActionProps;\n\ntype CssRules = 'stepper';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    stepper: {\n        overflow: \"scroll\",\n        width: \"100%\",\n    }\n});\n\nexport const RunProcessPanelRoot = withStyles(styles)(\n    ({ runProcess, currentStep, onSearch, onSetStep, onSetWorkflow, workflows, selectedWorkflow, classes }: WithStyles<CssRules> & RunProcessPanelRootProps) =>\n        <Stepper activeStep={currentStep} orientation=\"vertical\" elevation={2} className={classes.stepper}>\n            <Step>\n                <StepLabel>Choose a workflow</StepLabel>\n                <StepContent>\n                    <RunProcessFirstStep\n                        workflows={workflows}\n                        selectedWorkflow={selectedWorkflow}\n                        onSearch={onSearch}\n                        onSetStep={onSetStep}\n                        onSetWorkflow={onSetWorkflow} />\n                </StepContent>\n            </Step>\n            <Step>\n                <StepLabel>Select inputs</StepLabel>\n                <StepContent>\n                    <RunProcessSecondStepForm\n                        goBack={() => onSetStep(0)}\n                        runProcess={runProcess} />\n                </StepContent>\n            </Step>\n        </Stepper>);\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/run-process-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { RunProcessPanelRootDataProps, RunProcessPanelRootActionProps, RunProcessPanelRoot } from 'views/run-process-panel/run-process-panel-root';\nimport { goToStep, runProcess, searchWorkflows, openSetWorkflowDialog } from 'store/run-process-panel/run-process-panel-actions';\nimport { WorkflowResource } from 'models/workflow';\n\nconst mapStateToProps = ({ runProcessPanel }: RootState): RunProcessPanelRootDataProps => {\n    return {\n        workflows: runProcessPanel.searchWorkflows,\n        currentStep: runProcessPanel.currentStep,\n        selectedWorkflow: runProcessPanel.selectedWorkflow\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): RunProcessPanelRootActionProps => ({\n    onSetStep: (step: number) => {\n        dispatch<any>(goToStep(step));\n    },\n    onSetWorkflow: (workflow: WorkflowResource) => {\n        dispatch<any>(openSetWorkflowDialog(workflow));\n    },\n    runProcess: () => {\n        dispatch<any>(runProcess);\n    },\n    onSearch: (term: string) => {\n        dispatch<any>(searchWorkflows(term));\n    }\n});\n\nexport const RunProcessPanel = connect(mapStateToProps, mapDispatchToProps)(RunProcessPanelRoot);\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/run-process-second-step.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Dispatch } from 'redux';\nimport { Grid, Button } from '@mui/material';\nimport {\n         RUN_PROCESS_BASIC_FORM,\n         RUN_PROCESS_INPUTS_FORM,\n    RUN_PROCESS_ADVANCED_FORM\n} from 'store/run-process-panel/run-process-panel-actions';\nimport { RunProcessBasicForm } from './run-process-basic-form';\nimport { RunProcessAdvancedForm } from './run-process-advanced-form';\nimport { RunProcessInputsForm } from 'views/run-process-panel/run-process-inputs-form';\nimport { CommandInputParameter, WorkflowResource } from 'models/workflow';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { isValid, getFormSyncErrors } from 'redux-form';\nimport { createStructuredSelector } from 'reselect';\nimport { selectPreset } from 'store/run-process-panel/run-process-panel-actions';\nimport { getResource } from 'store/resources/resources';\nimport { ProjectResource } from 'models/project';\nimport { runProcessPanelActions } from 'store/run-process-panel/run-process-panel-actions';\nimport { getUserUuid } from 'common/getuser';\n\nexport interface RunProcessSecondStepFormDataProps {\n    userUuid: string;\n    inputs: CommandInputParameter[];\n    workflow?: WorkflowResource;\n    workflowOwner?: ProjectResource;\n    defaultTargetProject?: ProjectResource;\n    presets?: WorkflowResource[];\n    selectedPreset?: WorkflowResource;\n    valid: boolean;\n}\n\nexport interface RunProcessSecondStepFormActionProps {\n    goBack: () => void;\n    runProcess: () => void;\n    onPresetChange: (preset: WorkflowResource) => void;\n    setProcessOwner: (ownerUuid: string) => void;\n}\n\nconst selectedWorkflowSelector = (state: RootState) =>\n    state.runProcessPanel.selectedWorkflow;\n\nconst presetsSelector = (state: RootState) =>\n    state.runProcessPanel.presets;\n\nconst selectedPresetSelector = (state: RootState) =>\n    state.runProcessPanel.selectedPreset;\n\nconst inputsSelector = (state: RootState) =>\n    state.runProcessPanel.inputs;\n\nconst validSelector = (state: RootState) => {\n    let isBasicFormValid = isValid(RUN_PROCESS_BASIC_FORM)(state);\n    if (isBasicFormValid === false) {\n        const syncErrors = getFormSyncErrors(RUN_PROCESS_BASIC_FORM)(state) as any;\n        if (syncErrors && 'owner' in syncErrors && syncErrors.owner === true) {\n            const defaultOwner = getResource<any>(state.runProcessPanel.processOwnerUuid)(state.resources);\n            if (defaultOwner && defaultOwner.canWrite) {\n                isBasicFormValid = true;\n            }\n        }\n    }\n    return isBasicFormValid && isValid(RUN_PROCESS_INPUTS_FORM)(state) && isValid(RUN_PROCESS_ADVANCED_FORM)(state);\n}\n\nconst workflowOwnerSelector = (state: RootState) =>\n    getResource<ProjectResource>(state.runProcessPanel.selectedWorkflow?.ownerUuid)(state.resources);\n\nconst defaultTargetProjectSelector = (state: RootState) =>\n    getResource<ProjectResource>(state.runProcessPanel.processOwnerUuid)(state.resources);\n\nconst userUuidSelector = (state: RootState) =>\n    getUserUuid(state);\n\nconst mapStateToProps = createStructuredSelector({\n    userUuid: userUuidSelector,\n    inputs: inputsSelector,\n    valid: validSelector,\n    workflow: selectedWorkflowSelector,\n    workflowOwner: workflowOwnerSelector,\n    defaultTargetProject: defaultTargetProjectSelector,\n    presets: presetsSelector,\n    selectedPreset: selectedPresetSelector,\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    setProcessOwner: (ownerUuid: string) => dispatch<any>(runProcessPanelActions.SET_PROCESS_OWNER_UUID(ownerUuid)),\n    onPresetChange: selectPreset,\n});\n\nexport type RunProcessSecondStepFormProps = RunProcessSecondStepFormDataProps & RunProcessSecondStepFormActionProps;\nexport const RunProcessSecondStepForm = connect(mapStateToProps, mapDispatchToProps)(\n    ({ userUuid, inputs, workflow, workflowOwner, defaultTargetProject, selectedPreset, presets, onPresetChange, valid, goBack, runProcess, setProcessOwner }: RunProcessSecondStepFormProps) => {\n        return <Grid container spacing={2} data-cy=\"new-process-panel\">\n                <Grid item xs={12}>\n                    <RunProcessBasicForm workflow={workflow} />\n                    <RunProcessInputsForm inputs={inputs} />\n                    <RunProcessAdvancedForm />\n                </Grid>\n                <Grid item xs={12}>\n                    <Button color=\"primary\" onClick={goBack}>\n                        Back\n                    </Button>\n                    <Button disabled={!valid} variant=\"contained\" color=\"primary\" onClick={runProcess}>\n                        Run workflow\n                    </Button>\n                </Grid>\n            </Grid>\n        }\n    );\n"
  },
  {
    "path": "services/workbench2/src/views/run-process-panel/workflow-preset-select.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Select, FormControl, InputLabel, MenuItem, Tooltip } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WorkflowResource } from 'models/workflow';\nimport { DetailsIcon } from 'components/icon/icon';\n\nexport interface WorkflowPresetSelectProps {\n    workflow: WorkflowResource;\n    selectedPreset: WorkflowResource;\n    presets: WorkflowResource[];\n    onChange: (preset: WorkflowResource) => void;\n}\n\ntype CssRules = 'root' | 'icon';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    root: {\n        display: 'flex',\n    },\n    icon: {\n        color: 'rgba(0, 0, 0, 0.38)',\n        marginTop: 18,\n        marginLeft: 8,\n    },\n});\n\nexport const WorkflowPresetSelect = withStyles(styles)(\n    class extends React.Component<WorkflowPresetSelectProps & WithStyles<CssRules>> {\n\n        render() {\n\n            const { selectedPreset, workflow, presets, classes } = this.props;\n\n            return (\n                <div className={classes.root}>\n                    <FormControl variant=\"standard\" fullWidth>\n                        <InputLabel>Preset</InputLabel>\n                        <Select\n                            variant=\"standard\"\n                            value={selectedPreset.uuid}\n                            onChange={(event: any)=>this.handleChange(event)}>\n                            <MenuItem value={workflow.uuid}>\n                                <em>Default</em>\n                            </MenuItem>\n                            {presets.map(\n                                ({ uuid, name }) => <MenuItem key={uuid} value={uuid}>{name}</MenuItem>\n                            )}\n                        </Select>\n                    </FormControl>\n                    <Tooltip title='List of already defined set of inputs to run a workflow'>\n                        <DetailsIcon className={classes.icon} />\n                    </Tooltip>\n                </div >\n            );\n        }\n\n        handleChange = ({ target }: React.ChangeEvent<HTMLSelectElement>) => {\n\n            const { workflow, presets, onChange } = this.props;\n\n            const selectedPreset = [workflow, ...presets]\n                .find(({ uuid }) => uuid === target.value);\n\n            if (selectedPreset) {\n                onChange(selectedPreset);\n            }\n        }\n    });\n"
  },
  {
    "path": "services/workbench2/src/views/search-results-panel/search-results-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport {\n    ResourceCluster,\n    ResourceFileSize,\n    ResourceLastModifiedDate,\n    ResourceName,\n    ResourceOwnerWithName,\n    ResourceStatus,\n    ResourceType\n} from 'views-components/data-explorer/renderers';\nimport { createTree } from 'models/tree';\nimport { getInitialSearchTypeFilters } from 'store/resource-type-filters/resource-type-filters';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\n\nexport enum SearchResultsPanelColumnNames {\n    CLUSTER = \"Cluster\",\n    NAME = \"Name\",\n    STATUS = \"Status\",\n    TYPE = 'Type',\n    OWNER = \"Owner\",\n    FILE_SIZE = \"File size\",\n    LAST_MODIFIED = \"Last modified\"\n}\n\nexport const searchResultsPanelColumns: DataColumns<string, GroupContentsResource> = [\n    {\n        name: SearchResultsPanelColumnNames.CLUSTER,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid: string) => <ResourceCluster uuid={uuid} />\n    },\n    {\n        name: SearchResultsPanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"name\" },\n        filters: createTree(),\n        render: (uuid: string) => <ResourceName uuid={uuid} />\n    },\n    {\n        name: SearchResultsPanelColumnNames.STATUS,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceStatus uuid={uuid} />\n    },\n    {\n        name: SearchResultsPanelColumnNames.TYPE,\n        selected: true,\n        configurable: true,\n        filters: getInitialSearchTypeFilters(),\n        render: (uuid: string) => <ResourceType uuid={uuid} />,\n    },\n    {\n        name: SearchResultsPanelColumnNames.OWNER,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceOwnerWithName uuid={uuid} />\n    },\n    {\n        name: SearchResultsPanelColumnNames.FILE_SIZE,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceFileSize uuid={uuid} />\n    },\n    {\n        name: SearchResultsPanelColumnNames.LAST_MODIFIED,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.DESC, field: \"modifiedAt\" },\n        filters: createTree(),\n        render: uuid => <ResourceLastModifiedDate uuid={uuid} />\n    }\n];\n"
  },
  {
    "path": "services/workbench2/src/views/search-results-panel/search-results-panel-view.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect, useCallback, useState } from 'react';\nimport { DataTableFilterItem } from 'components/data-table-filters/data-table-filters';\nimport { extractUuidKind, ResourceKind } from 'models/resource';\nimport { ContainerRequestState } from 'models/container-request';\nimport { SEARCH_RESULTS_PANEL_ID } from 'store/search-results-panel/search-results-panel-actions';\nimport { DataExplorer } from 'views-components/data-explorer/data-explorer';\nimport { ResourceCluster } from 'views-components/data-explorer/renderers';\nimport servicesProvider from 'common/service-provider';\nimport { SearchResultsPanelProps } from \"./search-results-panel\";\nimport { Routes } from 'routes/routes';\nimport { Link } from 'react-router-dom';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { getSearchSessions } from 'store/search-bar/search-bar-actions';\nimport { camelCase } from 'lodash';\n\nexport type CssRules = 'siteManagerLink' | 'searchResults';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    searchResults: {\n        width: '100%'\n    },\n    siteManagerLink: {\n        marginRight: theme.spacing(2),\n        float: 'right'\n    }\n});\n\nexport interface WorkflowPanelFilter extends DataTableFilterItem {\n    type: ResourceKind | ContainerRequestState;\n}\n\nexport const SearchResultsPanelView = withStyles(styles, { withTheme: true })(\n    (props: SearchResultsPanelProps & WithStyles<CssRules, true>) => {\n        const homeCluster = props.user.uuid.substring(0, 5);\n        const loggedIn = props.sessions.filter((ss) => ss.loggedIn && ss.userIsActive);\n        const [selectedItem, setSelectedItem] = useState('');\n        const [itemPath, setItemPath] = useState<string[]>([]);\n\n        useEffect(() => {\n            let tmpPath: string[] = [];\n\n            (async () => {\n                if (selectedItem !== '') {\n                    let searchUuid = selectedItem;\n                    let itemKind = extractUuidKind(searchUuid);\n\n                    while (itemKind !== ResourceKind.USER) {\n                        const clusterId = searchUuid.split('-')[0];\n                        const serviceType = camelCase(itemKind?.replace('arvados#', ''));\n                        const service = Object.values(servicesProvider.getServices())\n                            .filter(({ resourceType }) => !!resourceType)\n                            .find(({ resourceType }) => camelCase(resourceType).indexOf(serviceType) > -1);\n                        const sessions = getSearchSessions(clusterId, props.sessions);\n\n                        if (sessions.length > 0) {\n                            const session = sessions[0];\n                            const { name, ownerUuid } = await (service as any).get(searchUuid, false, undefined, session);\n                            tmpPath.push(name);\n                            searchUuid = ownerUuid;\n                            itemKind = extractUuidKind(searchUuid);\n                        } else {\n                            break;\n                        }\n                    }\n\n                    tmpPath.push(props.user.uuid === searchUuid ? 'Projects' : 'Shared with me');\n                    setItemPath(tmpPath);\n                }\n            })();\n        }, [selectedItem]);\n\n        const onItemClick = useCallback((uuid) => {\n            setSelectedItem(uuid);\n            props.onItemClick(uuid);\n        }, [props.onItemClick]);\n\n        return <span data-cy='search-results' className={props.classes.searchResults}>\n            <DataExplorer\n                id={SEARCH_RESULTS_PANEL_ID}\n                onRowClick={onItemClick}\n                onRowDoubleClick={props.onItemDoubleClick}\n                onContextMenu={props.onContextMenu}\n                contextMenuColumn={false}\n                elementPath={`/ ${itemPath.reverse().join(' / ')}`}\n                hideSearchInput\n                title={\n                    <div>\n                        {loggedIn.length === 1 ?\n                            <span>Searching local cluster <ResourceCluster uuid={props.localCluster} /></span>\n                            : <span>Searching clusters: {loggedIn.map((ss) => <span key={ss.clusterId}>\n                                <a href={props.remoteHostsConfig[ss.clusterId] && props.remoteHostsConfig[ss.clusterId].workbench2Url} style={{ textDecoration: 'none' }}> <ResourceCluster uuid={ss.clusterId} /></a>\n                            </span>)}</span>}\n                        {loggedIn.length === 1 && props.localCluster !== homeCluster ?\n                            <span>To search multiple clusters, <a href={props.remoteHostsConfig[homeCluster] && props.remoteHostsConfig[homeCluster].workbench2Url}> start from your home Workbench.</a></span>\n                            : <span style={{ marginLeft: \"2em\" }}>Use <Link to={Routes.SITE_MANAGER} >Site Manager</Link> to manage which clusters will be searched.</span>}\n                    </div >\n                }\n            /></span>;\n    });\n"
  },
  {
    "path": "services/workbench2/src/views/search-results-panel/search-results-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport { openSearchResultsContextMenu } from 'store/context-menu/context-menu-actions';\nimport { SearchResultsPanelView } from 'views/search-results-panel/search-results-panel-view';\nimport { RootState } from 'store/store';\nimport { SearchBarAdvancedFormData } from 'models/search-bar';\nimport { User } from \"models/user\";\nimport { Config } from 'common/config';\nimport { Session } from \"models/session\";\nimport { toggleOne } from \"store/multiselect/multiselect-actions\";\n\nexport interface SearchResultsPanelDataProps {\n    data: SearchBarAdvancedFormData;\n    user: User;\n    sessions: Session[];\n    remoteHostsConfig: { [key: string]: Config };\n    localCluster: string;\n}\n\nexport interface SearchResultsPanelActionProps {\n    onItemClick: (item: string) => void;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, item: string) => void;\n    onDialogOpen: (ownerUuid: string) => void;\n    onItemDoubleClick: (item: string) => void;\n}\n\nexport type SearchResultsPanelProps = SearchResultsPanelDataProps & SearchResultsPanelActionProps;\n\nconst mapStateToProps = (rootState: RootState) => {\n    return {\n        user: rootState.auth.user,\n        sessions: rootState.auth.sessions,\n        remoteHostsConfig: rootState.auth.remoteHostsConfig,\n        localCluster: rootState.auth.localCluster,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): SearchResultsPanelActionProps => ({\n    onContextMenu: (event, resourceUuid) => {\n        dispatch<any>(openSearchResultsContextMenu(event, resourceUuid));\n    },\n    onDialogOpen: (ownerUuid: string) => { return; },\n    onItemClick: (resourceUuid: string) => {\n        dispatch<any>(toggleOne(resourceUuid))\n    },\n    onItemDoubleClick: uuid => {\n        dispatch<any>(navigateTo(uuid));\n    }\n});\n\nexport const SearchResultsPanel = connect(mapStateToProps, mapDispatchToProps)(SearchResultsPanelView);\n"
  },
  {
    "path": "services/workbench2/src/views/shared-with-me-panel/shared-with-me-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { DataColumns } from 'components/data-table/data-column';\nimport {\n    ResourceName,\n    ProcessStatus as ResourceStatus,\n    ResourceType,\n    ResourceOwnerWithNameLink,\n    ResourcePortableDataHash,\n    ResourceFileSize,\n    ResourceFileCount,\n    ResourceUUID,\n    ResourceContainerUuid,\n    ContainerRunTime,\n    ResourceOutputUuid,\n    ResourceLogUuid,\n    ResourceParentProcess,\n    ResourceModifiedByUserUuid,\n    ResourceVersion,\n    ResourceCreatedAtDate,\n    ResourceLastModifiedDate,\n    ResourceTrashDate,\n    ResourceDeleteDate,\n} from 'views-components/data-explorer/renderers';\nimport { ProjectResource } from 'models/project';\nimport { createTree } from 'models/tree';\nimport { SortDirection } from 'components/data-table/data-column';\nimport { getInitialResourceTypeFilters, getInitialProcessStatusFilters } from 'store/resource-type-filters/resource-type-filters';\n\nexport enum SharedWithMePanelColumnNames {\n    NAME = 'Name',\n    STATUS = 'Status',\n    TYPE = 'Type',\n    OWNER = 'Owner',\n    PORTABLE_DATA_HASH = 'Portable Data Hash',\n    FILE_SIZE = 'File Size',\n    FILE_COUNT = 'File Count',\n    UUID = 'UUID',\n    CONTAINER_UUID = 'Container UUID',\n    RUNTIME = 'Runtime',\n    OUTPUT_UUID = 'Output UUID',\n    LOG_UUID = 'Log UUID',\n    PARENT_PROCESS = 'Parent Process UUID',\n    MODIFIED_BY_USER_UUID = 'Modified by User UUID',\n    VERSION = 'Version',\n    CREATED_AT = 'Date Created',\n    LAST_MODIFIED = 'Last Modified',\n    TRASH_AT = 'Trash at',\n    DELETE_AT = 'Delete at',\n}\n\nexport const sharedWithMePanelColumns: DataColumns<string, ProjectResource> = [\n    {\n        name: SharedWithMePanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'name' },\n        filters: createTree(),\n        render: (uuid) => <ResourceName uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.STATUS,\n        selected: true,\n        configurable: true,\n        mutuallyExclusiveFilters: true,\n        filters: getInitialProcessStatusFilters(),\n        render: (uuid) => <ResourceStatus uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.TYPE,\n        selected: true,\n        configurable: true,\n        filters: getInitialResourceTypeFilters(),\n        render: (uuid) => <ResourceType uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.OWNER,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceOwnerWithNameLink uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.PORTABLE_DATA_HASH,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourcePortableDataHash uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.FILE_SIZE,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceFileSize uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.FILE_COUNT,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceFileCount uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceUUID uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.CONTAINER_UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceContainerUuid uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.RUNTIME,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ContainerRunTime uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.OUTPUT_UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceOutputUuid uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.LOG_UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceLogUuid uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.PARENT_PROCESS,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceParentProcess uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.MODIFIED_BY_USER_UUID,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceModifiedByUserUuid uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.VERSION,\n        selected: false,\n        configurable: true,\n        filters: createTree(),\n        render: (uuid) => <ResourceVersion uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.CREATED_AT,\n        selected: false,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'createdAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceCreatedAtDate uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.LAST_MODIFIED,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.DESC, field: 'modifiedAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceLastModifiedDate uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.TRASH_AT,\n        selected: false,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'trashAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceTrashDate uuid={uuid} />,\n    },\n    {\n        name: SharedWithMePanelColumnNames.DELETE_AT,\n        selected: false,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: 'deleteAt' },\n        filters: createTree(),\n        render: (uuid) => <ResourceDeleteDate uuid={uuid} />,\n    },\n];\n"
  },
  {
    "path": "services/workbench2/src/views/shared-with-me-panel/shared-with-me-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { connect, DispatchProp } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { ShareMeIcon } from 'components/icon/icon';\nimport { ResourcesState, getResource } from 'store/resources/resources';\nimport { ResourceKind } from 'models/resource';\nimport { navigateTo } from \"store/navigation/navigation-action\";\nimport { loadDetailsPanel } from \"store/details-panel/details-panel-action\";\nimport { SHARED_WITH_ME_PANEL_ID } from 'store/shared-with-me-panel/shared-with-me-panel-actions';\nimport { openContextMenuAndSelect } from 'store/context-menu/context-menu-actions';\nimport { DataTableFilterItem } from 'components/data-table-filters/data-table-filters';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { toggleOne } from 'store/multiselect/multiselect-actions';\nimport { ContainerRequestState } from 'models/container-request';\nimport { resourceToMenuKind } from 'common/resource-to-menu-kind';\n\n\ntype CssRules = \"toolbar\" | \"button\" | \"root\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    toolbar: {\n        paddingBottom: theme.spacing(3),\n        textAlign: \"right\"\n    },\n    button: {\n        marginLeft: theme.spacing(1)\n    },\n    root: {\n        width: '100%',\n        boxShadow: \"0px 1px 3px 0px rgb(0 0 0 / 20%), 0px 1px 1px 0px rgb(0 0 0 / 14%), 0px 2px 1px -1px rgb(0 0 0 / 12%)\",\n    },\n});\n\n\nexport interface ProjectPanelFilter extends DataTableFilterItem {\n    type: ResourceKind | ContainerRequestState;\n}\n\n\n\ninterface SharedWithMePanelDataProps {\n    resources: ResourcesState;\n    userUuid: string;\n}\n\ntype SharedWithMePanelProps = SharedWithMePanelDataProps & DispatchProp & WithStyles<CssRules>;\n\nexport const SharedWithMePanel = withStyles(styles)(\n    connect((state: RootState) => ({\n        resources: state.resources,\n        userUuid: state.auth.user!.uuid,\n    }))(\n        class extends React.Component<SharedWithMePanelProps> {\n            render() {\n                return <div className={this.props.classes.root}><DataExplorer\n                    id={SHARED_WITH_ME_PANEL_ID}\n                    onRowClick={this.handleRowClick}\n                    onRowDoubleClick={this.handleRowDoubleClick}\n                    onContextMenu={this.handleContextMenu}\n                    contextMenuColumn={false}\n                    defaultViewIcon={ShareMeIcon}\n                    defaultViewMessages={['No shared items']} />\n                </div>;\n            }\n\n            handleContextMenu = (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => {\n                const { resources } = this.props;\n                const resource = getResource<GroupContentsResource>(resourceUuid)(resources);\n                const menuKind = this.props.dispatch<any>(resourceToMenuKind(resourceUuid));\n                if (menuKind && resource) {\n                    this.props.dispatch<any>(openContextMenuAndSelect(event, {\n                        name: resource.name,\n                        uuid: resource.uuid,\n                        description: resource.description,\n                        ownerUuid: resource.ownerUuid,\n                        isTrashed: ('isTrashed' in resource) ? resource.isTrashed: false,\n                        kind: resource.kind,\n                        menuKind\n                    }));\n                }\n                this.props.dispatch<any>(loadDetailsPanel(resourceUuid));\n            }\n\n            handleRowDoubleClick = (uuid: string) => {\n                this.props.dispatch<any>(navigateTo(uuid));\n            }\n\n            handleRowClick = (uuid: string) => {\n                this.props.dispatch<any>(toggleOne(uuid))\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views/site-manager-panel/site-manager-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport {\n    Card,\n    CardContent,\n    CircularProgress,\n    Grid,\n    IconButton,\n    Table,\n    TableBody,\n    TableCell,\n    TableHead,\n    TableRow,\n    Typography,\n} from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { Session, SessionStatus } from \"models/session\";\nimport Button from \"@mui/material/Button\";\nimport { compose, Dispatch } from \"redux\";\nimport { Field, FormErrors, InjectedFormProps, reduxForm, reset, stopSubmit } from \"redux-form\";\nimport { TextField } from \"components/text-field/text-field\";\nimport { addSession } from \"store/auth/auth-action-session\";\nimport { SITE_MANAGER_REMOTE_HOST_VALIDATION } from \"validators/validators\";\nimport { Config } from 'common/config';\nimport { ResourceCluster } from 'views-components/data-explorer/renderers';\nimport { TrashIcon } from \"components/icon/icon\";\n\ntype CssRules = 'root' | 'link' | 'buttonContainer' | 'table' | 'tableRow' |\n    'remoteSiteInfo' | 'buttonAdd' | 'buttonLoggedIn' | 'buttonLoggedOut' |\n    'statusCell';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        overflow: 'auto'\n    },\n    link: {\n        color: theme.palette.primary.main,\n        textDecoration: 'none',\n        margin: '0px 4px'\n    },\n    buttonContainer: {\n        textAlign: 'right'\n    },\n    table: {\n        marginTop: theme.spacing(1)\n    },\n    tableRow: {\n        '& td, th': {\n            whiteSpace: 'nowrap'\n        }\n    },\n    statusCell: {\n        minWidth: 160\n    },\n    remoteSiteInfo: {\n        marginTop: 20\n    },\n    buttonAdd: {\n        marginLeft: 10,\n        marginTop: theme.spacing(3),\n    },\n    buttonLoggedIn: {\n        minHeight: theme.spacing(1),\n        padding: 5,\n        color: '#fff',\n        backgroundColor: '#009966',\n        '&:hover': {\n            backgroundColor: '#008450',\n        }\n    },\n    buttonLoggedOut: {\n        minHeight: theme.spacing(1),\n        padding: 5,\n        color: '#000',\n        backgroundColor: '#FFC414',\n        '&:hover': {\n            backgroundColor: '#eaaf14',\n        }\n    }\n});\n\nexport interface SiteManagerPanelRootActionProps {\n    toggleSession: (session: Session) => void;\n    removeSession: (session: Session) => void;\n}\n\nexport interface SiteManagerPanelRootDataProps {\n    sessions: Session[];\n    remoteHostsConfig: { [key: string]: Config };\n    localClusterConfig: Config;\n}\n\ntype SiteManagerPanelRootProps = SiteManagerPanelRootDataProps & SiteManagerPanelRootActionProps & WithStyles<CssRules> & InjectedFormProps;\nconst SITE_MANAGER_FORM_NAME = 'siteManagerForm';\n\nconst submitSession = (remoteHost: string) =>\n    (dispatch: Dispatch) => {\n        dispatch<any>(addSession(remoteHost, undefined, true)).then(() => {\n            dispatch(reset(SITE_MANAGER_FORM_NAME));\n        }).catch((e: any) => {\n            const errors = {\n                remoteHost: e\n            } as FormErrors;\n            dispatch(stopSubmit(SITE_MANAGER_FORM_NAME, errors));\n        });\n    };\n\nexport const SiteManagerPanelRoot = compose(\n    reduxForm<{ remoteHost: string }>({\n        form: SITE_MANAGER_FORM_NAME,\n        touchOnBlur: false,\n        onSubmit: (data, dispatch) => {\n            dispatch(submitSession(data.remoteHost));\n        }\n    }),\n    withStyles(styles))\n    (({ classes, sessions, handleSubmit, toggleSession, removeSession, localClusterConfig, remoteHostsConfig }: SiteManagerPanelRootProps) =>\n        <Card className={classes.root}>\n            <CardContent>\n                <Grid container direction=\"row\">\n                    <Grid item xs={12}>\n                        <Typography paragraph={true} >\n                            You can log in to multiple Arvados sites here, then use the multi-site search page to search collections and projects on all sites at once.\n\t\t    </Typography>\n                    </Grid>\n                </Grid>\n                <Grid item xs={12}>\n                    {sessions.length > 0 && <Table className={classes.table}>\n                        <TableHead>\n                            <TableRow className={classes.tableRow}>\n                                <TableCell>Cluster ID</TableCell>\n                                <TableCell>Host</TableCell>\n                                <TableCell>Email</TableCell>\n                                <TableCell>UUID</TableCell>\n                                <TableCell>Status</TableCell>\n                                <TableCell>Actions</TableCell>\n                            </TableRow>\n                        </TableHead>\n                        <TableBody>\n                            {sessions.map((session, index) => {\n                                const validating = session.status === SessionStatus.BEING_VALIDATED;\n                                return (\n                                    <TableRow key={index} className={classes.tableRow}>\n                                        <TableCell>{remoteHostsConfig[session.clusterId] ?\n                                            <a href={remoteHostsConfig[session.clusterId].workbench2Url} style={{ textDecoration: 'none' }}> <ResourceCluster uuid={session.clusterId} /></a>\n                                            : session.clusterId}</TableCell>\n                                        <TableCell>{session.remoteHost}</TableCell>\n                                        <TableCell>{validating ? <CircularProgress size={20} /> : session.email}</TableCell>\n                                        <TableCell>{validating ? <CircularProgress size={20} /> : session.uuid}</TableCell>\n                                        <TableCell className={classes.statusCell}>\n                                            <Button fullWidth\n                                                disabled={validating || session.status === SessionStatus.INVALIDATED || session.active}\n                                                className={session.loggedIn ? classes.buttonLoggedIn : classes.buttonLoggedOut}\n                                                onClick={() => toggleSession(session)}>\n                                                {validating ? \"Validating\"\n                                                    : (session.loggedIn ?\n                                                        (session.userIsActive ? \"Logged in\" : \"Inactive\")\n                                                        : \"Logged out\")}\n                                            </Button>\n                                        </TableCell>\n                                        <TableCell>\n                                            {session.clusterId !== localClusterConfig.uuidPrefix &&\n                                                !localClusterConfig.clusterConfig.RemoteClusters[session.clusterId] &&\n                                                <IconButton onClick={() => removeSession(session)} size=\"large\">\n                                                    <TrashIcon />\n                                                </IconButton>}\n                                        </TableCell>\n                                    </TableRow>\n                                );\n                            })}\n                        </TableBody>\n                    </Table>}\n                </Grid>\n                <form onSubmit={handleSubmit}>\n                    <Grid container direction=\"row\">\n                        <Grid item xs={12}>\n                            <Typography paragraph={true} className={classes.remoteSiteInfo}>\n                                To add a remote Arvados site, paste the remote site's host here (see \"ARVADOS_API_HOST\" on the \"current token\" page).\n                        </Typography>\n                        </Grid>\n                        <Grid item xs={8}>\n                            <Field\n                                name='remoteHost'\n                                validate={SITE_MANAGER_REMOTE_HOST_VALIDATION}\n                                component={TextField as any}\n                                placeholder=\"zzzz.arvadosapi.com\"\n                                margin=\"normal\"\n                                label=\"New cluster\"\n                                autoFocus />\n                        </Grid>\n                        <Grid item xs={3}>\n                            <Button type=\"submit\" variant=\"contained\" color=\"primary\"\n                                className={classes.buttonAdd}>\n                                {\"ADD\"}</Button>\n                        </Grid>\n                    </Grid>\n                </form>\n            </CardContent>\n        </Card>\n    );\n"
  },
  {
    "path": "services/workbench2/src/views/site-manager-panel/site-manager-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport {\n    SiteManagerPanelRoot, SiteManagerPanelRootActionProps,\n    SiteManagerPanelRootDataProps\n} from \"views/site-manager-panel/site-manager-panel-root\";\nimport { Session } from \"models/session\";\nimport { toggleSession, removeSession } from \"store/auth/auth-action-session\";\n\nconst mapStateToProps = (state: RootState): SiteManagerPanelRootDataProps => {\n    return {\n        sessions: state.auth.sessions,\n        remoteHostsConfig: state.auth.remoteHostsConfig,\n        localClusterConfig: state.auth.remoteHostsConfig[state.auth.localCluster]\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): SiteManagerPanelRootActionProps => ({\n    toggleSession: (session: Session) => {\n        dispatch<any>(toggleSession(session));\n    },\n    removeSession: (session: Session) => {\n        dispatch<any>(removeSession(session.clusterId));\n    },\n});\n\nexport const SiteManagerPanel = connect(mapStateToProps, mapDispatchToProps)(SiteManagerPanelRoot);\n"
  },
  {
    "path": "services/workbench2/src/views/ssh-key-panel/ssh-key-admin-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { openSshKeyCreateDialog, openPublicKeyDialog } from 'store/auth/auth-action-ssh';\nimport { openSshKeyContextMenu } from 'store/context-menu/context-menu-actions';\nimport { SshKeyPanelRoot, SshKeyPanelRootDataProps, SshKeyPanelRootActionProps } from 'views/ssh-key-panel/ssh-key-panel-root';\n\nconst mapStateToProps = (state: RootState): SshKeyPanelRootDataProps => {\n    return {\n        sshKeys: state.auth.sshKeys,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): SshKeyPanelRootActionProps => ({\n    openSshKeyCreateDialog: () => {\n        dispatch<any>(openSshKeyCreateDialog());\n    },\n    openRowOptions: (event, sshKey) => {\n        dispatch<any>(openSshKeyContextMenu(event, sshKey));\n    },\n    openPublicKeyDialog: (name: string, publicKey: string) => {\n        dispatch<any>(openPublicKeyDialog(name, publicKey));\n    }\n});\n\nexport const SshKeyAdminPanel = connect(mapStateToProps, mapDispatchToProps)(SshKeyPanelRoot);\n"
  },
  {
    "path": "services/workbench2/src/views/ssh-key-panel/ssh-key-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport {\n    Card,\n    CardContent,\n    Button,\n    Typography,\n    Grid,\n    Table,\n    TableHead,\n    TableRow,\n    TableCell,\n    TableBody,\n    Tooltip,\n    IconButton,\n} from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { SshKeyResource } from 'models/ssh-key';\nimport { AddIcon, MoreVerticalIcon, KeyIcon } from 'components/icon/icon';\n\ntype CssRules = 'root' | 'link' | 'buttonContainer' | 'table' | 'tableRow' | 'keyIcon';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        overflow: 'auto'\n    },\n    link: {\n        color: theme.palette.primary.main,\n        textDecoration: 'none',\n        margin: '0px 4px'\n    },\n    buttonContainer: {\n        textAlign: 'right'\n    },\n    table: {\n        marginTop: theme.spacing(1)\n    },\n    tableRow: {\n        '& td, th': {\n            whiteSpace: 'nowrap'\n        }\n    },\n    keyIcon: {\n        color: theme.palette.primary.main\n    }\n});\n\nexport interface SshKeyPanelRootActionProps {\n    openSshKeyCreateDialog: () => void;\n    openRowOptions: (event: React.MouseEvent<HTMLElement>, sshKey: SshKeyResource) => void;\n    openPublicKeyDialog: (name: string, publicKey: string) => void;\n}\n\nexport interface SshKeyPanelRootDataProps {\n    sshKeys: SshKeyResource[];\n}\n\ntype SshKeyPanelRootProps = SshKeyPanelRootDataProps & SshKeyPanelRootActionProps & WithStyles<CssRules>;\n\nexport const SshKeyPanelRoot = withStyles(styles)(\n    ({ classes, sshKeys, openSshKeyCreateDialog, openPublicKeyDialog, openRowOptions }: SshKeyPanelRootProps) => {\n        const hasKeys = React.useMemo(() => sshKeys.length > 0, [sshKeys.length]);\n        return <Card className={classes.root}>\n            <CardContent>\n                <Grid container direction=\"row\">\n                    <Grid item xs={8}>\n                        {!hasKeys && <Typography paragraph={true} >\n                            You have not yet set up an SSH public key for use with Arvados.\n                            <a href='https://doc.arvados.org/user/getting_started/ssh-access-unix.html'\n                                target='blank' rel=\"noopener\" className={classes.link}>\n                                Learn more.\n                            </a>\n                        </Typography>}\n                        {!hasKeys && <Typography paragraph={true}>\n                            When you have an SSH key you would like to use, add it using button below.\n                        </Typography>}\n                    </Grid>\n                    <Grid item xs={4} className={classes.buttonContainer}>\n                        <Button onClick={openSshKeyCreateDialog} color=\"primary\" variant=\"contained\">\n                            <AddIcon /> Add New Ssh Key\n                        </Button>\n                    </Grid>\n                </Grid>\n                <Grid item xs={12}>\n                    {hasKeys && <Table className={classes.table}>\n                        <TableHead>\n                            <TableRow className={classes.tableRow}>\n                                <TableCell>Name</TableCell>\n                                <TableCell>UUID</TableCell>\n                                <TableCell>Authorized user</TableCell>\n                                <TableCell>Expires at</TableCell>\n                                <TableCell>Key type</TableCell>\n                                <TableCell>Public Key</TableCell>\n                                <TableCell />\n                            </TableRow>\n                        </TableHead>\n                        <TableBody>\n                            {sshKeys.map((sshKey, index) =>\n                                <TableRow key={index} className={classes.tableRow}>\n                                    <TableCell>{sshKey.name}</TableCell>\n                                    <TableCell>{sshKey.uuid}</TableCell>\n                                    <TableCell>{sshKey.authorizedUserUuid}</TableCell>\n                                    <TableCell>{sshKey.expiresAt || '(none)'}</TableCell>\n                                    <TableCell>{sshKey.keyType}</TableCell>\n                                    <TableCell>\n                                        <Tooltip title=\"Public Key\" disableFocusListener>\n                                            <IconButton\n                                                onClick={() => openPublicKeyDialog(sshKey.name, sshKey.publicKey)}\n                                                size=\"large\">\n                                                <KeyIcon className={classes.keyIcon} />\n                                            </IconButton>\n                                        </Tooltip>\n                                    </TableCell>\n                                    <TableCell>\n                                        <Tooltip title=\"More options\" disableFocusListener>\n                                            <IconButton onClick={event => openRowOptions(event, sshKey)} size=\"large\">\n                                                <MoreVerticalIcon />\n                                            </IconButton>\n                                        </Tooltip>\n                                    </TableCell>\n                                </TableRow>)}\n                        </TableBody>\n                    </Table>}\n                </Grid>\n            </CardContent>\n        </Card>\n});\n"
  },
  {
    "path": "services/workbench2/src/views/ssh-key-panel/ssh-key-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { openSshKeyCreateDialog, openPublicKeyDialog } from 'store/auth/auth-action-ssh';\nimport { openSshKeyContextMenu } from 'store/context-menu/context-menu-actions';\nimport { SshKeyPanelRoot, SshKeyPanelRootDataProps, SshKeyPanelRootActionProps } from 'views/ssh-key-panel/ssh-key-panel-root';\n\nconst mapStateToProps = (state: RootState): SshKeyPanelRootDataProps => {\n    const sshKeys = state.auth.sshKeys.filter((key) => {\n      return key.authorizedUserUuid === (state.auth.user ? state.auth.user.uuid : null);\n    });\n\n    return {\n        sshKeys: sshKeys,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): SshKeyPanelRootActionProps => ({\n    openSshKeyCreateDialog: () => {\n        dispatch<any>(openSshKeyCreateDialog());\n    },\n    openRowOptions: (event, sshKey) => {\n        dispatch<any>(openSshKeyContextMenu(event, sshKey));\n    },\n    openPublicKeyDialog: (name: string, publicKey: string) => {\n        dispatch<any>(openPublicKeyDialog(name, publicKey));\n    }\n});\n\nexport const SshKeyPanel = connect(mapStateToProps, mapDispatchToProps)(SshKeyPanelRoot);\n"
  },
  {
    "path": "services/workbench2/src/views/subprocess-panel/subprocess-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport { ResourceCreatedAtDate, ProcessStatus, ContainerRunTime } from 'views-components/data-explorer/renderers';\nimport { ResourceName } from 'views-components/data-explorer/renderers';\nimport { createTree } from 'models/tree';\nimport { getInitialProcessStatusFilters } from 'store/resource-type-filters/resource-type-filters';\nimport { ProcessResource } from 'models/process';\n\n\nexport enum SubprocessPanelColumnNames {\n    NAME = \"Name\",\n    STATUS = \"Status\",\n    CREATED_AT = \"Created At\",\n    RUNTIME = \"Run Time\"\n}\n\nexport const subprocessPanelColumns: DataColumns<string, ProcessResource> = [\n    {\n        name: SubprocessPanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"name\" },\n        filters: createTree(),\n        render: uuid => <ResourceName uuid={uuid} />\n    },\n    {\n        name: SubprocessPanelColumnNames.STATUS,\n        selected: true,\n        configurable: true,\n        mutuallyExclusiveFilters: true,\n        filters: getInitialProcessStatusFilters(),\n        render: uuid => <ProcessStatus uuid={uuid} />,\n    },\n    {\n        name: SubprocessPanelColumnNames.CREATED_AT,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.DESC, field: \"createdAt\" },\n        filters: createTree(),\n        render: uuid => <ResourceCreatedAtDate uuid={uuid} />\n    },\n    {\n        name: SubprocessPanelColumnNames.RUNTIME,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ContainerRunTime uuid={uuid} />\n    }\n];\n"
  },
  {
    "path": "services/workbench2/src/views/subprocess-panel/subprocess-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { DataTableFilterItem } from 'components/data-table-filters/data-table-filters';\nimport { ContainerRequestState } from 'models/container-request';\nimport { ResourceKind } from 'models/resource';\nimport { ProcessIcon } from 'components/icon/icon';\nimport { SUBPROCESS_PANEL_ID } from 'store/subprocess-panel/subprocess-panel-actions';\nimport { ResourcesState } from 'store/resources/resources';\nimport { MPVPanelProps } from 'components/multi-panel-view/multi-panel-view';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Typography } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { Process } from 'store/processes/process';\n\ntype CssRules = 'iconHeader' | 'cardHeader';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    iconHeader: {\n        fontSize: '1.875rem',\n        color: theme.customs.colors.greyL,\n        marginRight: theme.spacing(2),\n    },\n    cardHeader: {\n        display: 'flex',\n        marginTop: '-5px',\n        marginBottom: '15px',\n    },\n});\n\nexport interface SubprocessPanelFilter extends DataTableFilterItem {\n    type: ResourceKind | ContainerRequestState;\n}\n\nexport interface SubprocessPanelDataProps {\n    process: Process;\n    resources: ResourcesState;\n}\n\nexport interface SubprocessPanelActionProps {\n    onRowClick: (item: string) => void;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, item: string, resources: ResourcesState) => void;\n    onItemDoubleClick: (item: string) => void;\n}\n\ntype SubprocessPanelProps = SubprocessPanelActionProps & SubprocessPanelDataProps;\n\nconst DEFAULT_VIEW_MESSAGES = [\n    'No subprocesses available for listing.',\n    'The current process may not have any or none matches current filtering.'\n];\n\ntype SubProcessesTitleProps = WithStyles<CssRules>;\n\nconst SubProcessesTitle = withStyles(styles)(\n    ({classes}: SubProcessesTitleProps) =>\n        <div className={classes.cardHeader}>\n            <ProcessIcon className={classes.iconHeader} /><span></span>\n            <Typography noWrap variant='h6' color='inherit'>\n                Subprocesses\n            </Typography>\n        </div>\n);\n\nexport const SubprocessPanelRoot = (props: SubprocessPanelProps & MPVPanelProps) => {\n    return <DataExplorer\n        id={SUBPROCESS_PANEL_ID}\n        onRowClick={props.onRowClick}\n        onRowDoubleClick={props.onItemDoubleClick}\n        onContextMenu={(event, item) => props.onContextMenu(event, item, props.resources)}\n        contextMenuColumn={false}\n        defaultViewIcon={ProcessIcon}\n        defaultViewMessages={DEFAULT_VIEW_MESSAGES}\n        panelName={props.panelName}\n        title={<SubProcessesTitle/>}\n        parentResource={props.process}\n    />;\n};\n"
  },
  {
    "path": "services/workbench2/src/views/subprocess-panel/subprocess-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { openProcessContextMenu } from \"store/context-menu/context-menu-actions\";\nimport { SubprocessPanelRoot, SubprocessPanelActionProps, SubprocessPanelDataProps } from \"views/subprocess-panel/subprocess-panel-root\";\nimport { RootState } from \"store/store\";\nimport { navigateTo } from \"store/navigation/navigation-action\";\nimport { getProcess } from \"store/processes/process\";\nimport { toggleOne } from 'store/multiselect/multiselect-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch): SubprocessPanelActionProps => ({\n    onContextMenu: (event, resourceUuid, resources) => {\n        const process = getProcess(resourceUuid)(resources);\n        if (process) {\n            dispatch<any>(openProcessContextMenu(event, process));\n        }\n    },\n    onRowClick: (uuid: string) => {\n        dispatch<any>(toggleOne(uuid))\n    },\n    onItemDoubleClick: uuid => {\n        dispatch<any>(navigateTo(uuid));\n    },\n});\n\nconst mapStateToProps = (state: RootState): Omit<SubprocessPanelDataProps,'process'> => ({\n    resources: state.resources,\n});\n\nexport const SubprocessPanel = connect(mapStateToProps, mapDispatchToProps)(SubprocessPanelRoot);\n"
  },
  {
    "path": "services/workbench2/src/views/trash-panel/trash-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from \"react-redux\";\nimport { Dispatch } from \"redux\";\nimport { CollectionResource } from 'models/collection';\nimport { RootState } from \"store/store\";\nimport { toggleResourceTrashed } from \"store/trash/trash-actions\";\nimport { getTrashPanelTypeFilters } from 'store/resource-type-filters/resource-type-filters';\nimport {\n    getResource,\n    ResourcesState\n} from \"store/resources/resources\";\nimport { IconButton, Tooltip } from \"@mui/material\";\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport {\n    ResourceDeleteDate,\n    ResourceFileSize,\n    ResourceName,\n    ResourceTrashDate,\n    ResourceType\n} from \"views-components/data-explorer/renderers\";\nimport { createTree } from 'models/tree';\nimport { RestoreFromTrashIcon } from \"components/icon/icon\";\nimport { TrashableResource } from \"models/resource\";\n\nexport enum TrashPanelColumnNames {\n    NAME = \"Name\",\n    TYPE = \"Type\",\n    FILE_SIZE = \"File size\",\n    TRASHED_DATE = \"Trashed date\",\n    TO_BE_DELETED = \"To be deleted\"\n}\n\nexport const trashPanelColumns: DataColumns<string, CollectionResource> = [\n    {\n        name: TrashPanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"name\" },\n        filters: createTree(),\n        render: uuid => <ResourceName uuid={uuid} />\n    },\n    {\n        name: TrashPanelColumnNames.TYPE,\n        selected: true,\n        configurable: true,\n        filters: getTrashPanelTypeFilters(),\n        render: uuid => <ResourceType uuid={uuid} />,\n    },\n    {\n        name: TrashPanelColumnNames.FILE_SIZE,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"fileSizeTotal\" },\n        filters: createTree(),\n        render: uuid => <ResourceFileSize uuid={uuid} />\n    },\n    {\n        name: TrashPanelColumnNames.TRASHED_DATE,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.DESC, field: \"trashAt\" },\n        filters: createTree(),\n        render: uuid => <ResourceTrashDate uuid={uuid} />\n    },\n    {\n        name: TrashPanelColumnNames.TO_BE_DELETED,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"deleteAt\" },\n        filters: createTree(),\n        render: uuid => <ResourceDeleteDate uuid={uuid} />\n    },\n    {\n        name: '',\n        selected: true,\n        configurable: false,\n        filters: createTree(),\n        render: uuid => <ResourceRestore uuid={uuid} />\n    }\n];\n\nexport const ResourceRestore = connect((state: RootState, props: { uuid: string; dispatch?: Dispatch<any>; }) => {\n    return { uuid: props.uuid, resources: state.resources, dispatch: props.dispatch };\n})((props: { uuid: string; resources: ResourcesState; dispatch?: Dispatch<any>; }) => {\n    const resource = getResource<TrashableResource>(props.uuid)(props.resources);\n    return <Tooltip title=\"Restore\">\n        <IconButton\n            style={{ padding: '0' }}\n            onClick={() => {\n                if (resource && props.dispatch) {\n                    props.dispatch(toggleResourceTrashed(\n                        [resource.uuid],\n                        resource.isTrashed\n                    ));\n                }\n            } }\n            size=\"large\">\n            <RestoreFromTrashIcon />\n        </IconButton>\n    </Tooltip>;\n});\n"
  },
  {
    "path": "services/workbench2/src/views/trash-panel/trash-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { connect, DispatchProp } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { DataTableFilterItem } from 'components/data-table-filters/data-table-filters';\nimport { ResourceKind, TrashableResource } from 'models/resource';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { TrashIcon } from 'components/icon/icon';\nimport { TRASH_PANEL_ID } from \"store/trash-panel/trash-panel-action\";\nimport { openContextMenuAndSelect } from \"store/context-menu/context-menu-actions\";\nimport { getResource, ResourcesState } from \"store/resources/resources\";\nimport { navigateTo } from \"store/navigation/navigation-action\";\nimport { loadDetailsPanel } from \"store/details-panel/details-panel-action\";\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { toggleOne } from 'store/multiselect/multiselect-actions';\n\ntype CssRules = \"toolbar\" | \"button\" | \"root\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    toolbar: {\n        paddingBottom: theme.spacing(3),\n        textAlign: \"right\"\n    },\n    button: {\n        marginLeft: theme.spacing(1)\n    },\n    root: {\n        width: '100%',\n        boxShadow: \"0px 1px 3px 0px rgb(0 0 0 / 20%), 0px 1px 1px 0px rgb(0 0 0 / 14%), 0px 2px 1px -1px rgb(0 0 0 / 12%)\",\n    },\n});\n\nexport interface TrashPanelFilter extends DataTableFilterItem {\n    type: ResourceKind;\n}\n\ninterface TrashPanelDataProps {\n    resources: ResourcesState;\n}\n\ntype TrashPanelProps = TrashPanelDataProps & DispatchProp & WithStyles<CssRules>;\n\nexport const TrashPanel = withStyles(styles)(\n    connect((state: RootState) => ({\n        resources: state.resources\n    }))(\n        class extends React.Component<TrashPanelProps> {\n            render() {\n                return <div className={this.props.classes.root}><DataExplorer\n                    id={TRASH_PANEL_ID}\n                    onRowClick={this.handleRowClick}\n                    onRowDoubleClick={this.handleRowDoubleClick}\n                    onContextMenu={this.handleContextMenu}\n                    contextMenuColumn={false}\n                    defaultViewIcon={TrashIcon}\n                    defaultViewMessages={['Your trash list is empty.']} />\n                </div>;\n            }\n\n            handleContextMenu = (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => {\n                const resource = getResource<TrashableResource>(resourceUuid)(this.props.resources);\n                if (resource) {\n                    this.props.dispatch<any>(openContextMenuAndSelect(event, {\n                        name: '',\n                        uuid: resource.uuid,\n                        ownerUuid: resource.ownerUuid,\n                        isTrashed: resource.isTrashed,\n                        kind: resource.kind,\n                        menuKind: ContextMenuKind.TRASH\n                    }));\n                }\n                this.props.dispatch<any>(loadDetailsPanel(resourceUuid));\n            }\n\n            handleRowDoubleClick = (uuid: string) => {\n                this.props.dispatch<any>(navigateTo(uuid));\n            }\n\n            handleRowClick = (uuid: string) => {\n                this.props.dispatch<any>(toggleOne(uuid))\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views/user-panel/user-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport {\n    UserResourceFullName,\n    ResourceUuid,\n    ResourceEmail,\n    ResourceIsAdmin,\n    ResourceUsername,\n    UserResourceAccountStatus\n} from \"views-components/data-explorer/renderers\";\nimport { createTree } from 'models/tree';\nimport { UserResource } from 'models/user';\n\n\nexport enum UserPanelColumnNames {\n    NAME = \"Name\",\n    UUID = \"Uuid\",\n    EMAIL = \"Email\",\n    STATUS = \"Account Status\",\n    ADMIN = \"Admin\",\n    REDIRECT_TO_USER = \"Redirect to user\",\n    USERNAME = \"Username\"\n}\n\nexport const userPanelColumns: DataColumns<string, UserResource> = [\n    {\n        name: UserPanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"firstName\" },\n        filters: createTree(),\n        render: uuid => <UserResourceFullName uuid={uuid} link={true} />\n    },\n    {\n        name: UserPanelColumnNames.UUID,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"uuid\" },\n        filters: createTree(),\n        render: uuid => <ResourceUuid uuid={uuid} />\n    },\n    {\n        name: UserPanelColumnNames.EMAIL,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"email\" },\n        filters: createTree(),\n        render: uuid => <ResourceEmail uuid={uuid} />\n    },\n    {\n        name: UserPanelColumnNames.STATUS,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <UserResourceAccountStatus uuid={uuid} />\n    },\n    {\n        name: UserPanelColumnNames.ADMIN,\n        selected: true,\n        configurable: false,\n        filters: createTree(),\n        render: uuid => <ResourceIsAdmin uuid={uuid} />\n    },\n    {\n        name: UserPanelColumnNames.USERNAME,\n        selected: true,\n        configurable: false,\n        sort: { direction: SortDirection.NONE, field: \"username\" },\n        filters: createTree(),\n        render: uuid => <ResourceUsername uuid={uuid} />\n    }\n];\n"
  },
  {
    "path": "services/workbench2/src/views/user-panel/user-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Paper, Typography } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { connect, DispatchProp } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { openUserContextMenu } from \"store/context-menu/context-menu-actions\";\nimport { getResource, ResourcesState } from \"store/resources/resources\";\nimport { navigateToUserProfile } from \"store/navigation/navigation-action\";\nimport { compose, Dispatch } from 'redux';\nimport { UserResource } from 'models/user';\nimport { ShareMeIcon } from 'components/icon/icon';\nimport { USERS_PANEL_ID, openUserCreateDialog } from 'store/users/users-actions';\nimport { noop } from 'lodash';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\n\ntype UserPanelRules = \"button\" | 'root';\n\nconst styles: CustomStyleRulesCallback<UserPanelRules> = (theme) => ({\n    button: {\n        marginTop: theme.spacing(1),\n        marginRight: theme.spacing(2),\n        textAlign: 'right',\n        alignSelf: 'center'\n    },\n    root: {\n        width: '100%',\n    },\n});\n\ninterface UserPanelDataProps {\n    resources: ResourcesState;\n}\n\ninterface UserPanelActionProps {\n    openUserCreateDialog: () => void;\n    handleRowClick: (uuid: string) => void;\n    handleContextMenu: (event, resource: UserResource) => void;\n}\n\nconst mapStateToProps = (state: RootState) => {\n    return {\n        resources: state.resources\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    openUserCreateDialog: () => dispatch<any>(openUserCreateDialog()),\n    handleRowClick: (uuid: string) => dispatch<any>(navigateToUserProfile(uuid)),\n    handleContextMenu: (event, resource: UserResource) => dispatch<any>(openUserContextMenu(event, resource)),\n});\n\ntype UserPanelProps = UserPanelDataProps & UserPanelActionProps & DispatchProp & WithStyles<UserPanelRules>;\n\nexport const UserPanel = compose(\n    withStyles(styles),\n    connect(mapStateToProps, mapDispatchToProps))(\n        class extends React.Component<UserPanelProps> {\n            render() {\n                return <Paper className={this.props.classes.root}>\n                    <DataExplorer\n                        id={USERS_PANEL_ID}\n                        title={\n                            <Typography>\n                           User records are created automatically on first log in.\n                           To add a new user, add them to your configured log in provider.\n                            </Typography>}\n                        onRowClick={noop}\n                        onRowDoubleClick={noop}\n                        onContextMenu={this.handleContextMenu}\n                        contextMenuColumn={true}\n                        hideColumnSelector\n                        paperProps={{\n                            elevation: 0,\n                        }}\n                        defaultViewIcon={ShareMeIcon}\n                        defaultViewMessages={['Your user list is empty.']}\n                        forceMultiSelectMode />\n                </Paper>;\n            }\n\n            handleContextMenu = (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => {\n                event.stopPropagation();\n                const resource = getResource<UserResource>(resourceUuid)(this.props.resources);\n                if (resource) {\n                    this.props.handleContextMenu(event, resource);\n                }\n            }\n        }\n    );\n"
  },
  {
    "path": "services/workbench2/src/views/user-preferences-panel/user-preferences-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Field, InjectedFormProps } from \"redux-form\";\nimport { DispatchProp } from 'react-redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport {\n    CardContent,\n    Typography,\n    Grid,\n    Paper,\n    InputLabel,\n    Button,\n} from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { ResourcesState } from 'store/resources/resources';\nimport { ProjectPanelTabLabels } from 'store/project-panel/project-panel-action';\nimport { RadioField } from 'components/radio-field/radio-field';\n\ntype CssRules = 'root' | 'fullHeight' | 'mainPane' | 'actionPane' | 'inputRow' | 'label' | 'title';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        overflow: 'auto'\n    },\n    fullHeight: {\n        height: '100%',\n    },\n    mainPane: {\n        flexBasis: 0,\n        flexGrow: 1,\n        flexShrink: 1,\n    },\n    actionPane: {\n        flexBasis: \"initial\",\n    },\n    inputRow: {\n        // Add some space below each input\n        marginBottom: 20,\n    },\n    label: {\n        fontSize: '0.8rem',\n        color: theme.palette.grey['600']\n    },\n    title: {\n        fontSize: '1.1rem',\n    },\n});\n\nexport interface UserPreferencesPanelRootDataProps {\n    isPristine: boolean;\n    isValid: boolean;\n    userUuid: string;\n    resources: ResourcesState;\n}\n\ntype UserPreferencesPanelRootProps = InjectedFormProps<{}> & UserPreferencesPanelRootDataProps & DispatchProp & WithStyles<CssRules>;\n\nconst ProjectPanelDefaultTabOptions = Object.keys(ProjectPanelTabLabels).map((key) => ({\n    key: ProjectPanelTabLabels[key],\n    value: ProjectPanelTabLabels[key],\n}));\n\nexport const UserPreferencesPanelRoot = withStyles(styles)(\n    class extends React.Component<UserPreferencesPanelRootProps> {\n        render() {\n            return (\n                <Paper className={this.props.classes.root}>\n                    <form className={this.props.classes.fullHeight} onSubmit={this.props.handleSubmit} data-cy=\"preferences-form\">\n                        <CardContent className={this.props.classes.fullHeight}>\n                            <Grid container direction=\"column\" flexWrap=\"nowrap\" className={this.props.classes.fullHeight}>\n                                <Grid item sm={12} overflow=\"hidden scroll\" className={this.props.classes.mainPane}>\n                                    <Grid container spacing={3} flexWrap=\"nowrap\" direction=\"column\">\n                                        <Grid item>\n                                            <Typography className={this.props.classes.title}>\n                                                Project Settings\n                                            </Typography>\n                                        </Grid>\n                                        <Grid item className={this.props.classes.inputRow} data-cy=\"prefs.wb.default_project_tab\">\n                                            <InputLabel className={this.props.classes.label} htmlFor=\"prefs.wb.default_project_tab\">Default Project Tab</InputLabel>\n                                            <Field\n                                                id=\"prefs.wb.default_project_tab\"\n                                                name=\"prefs.wb.default_project_tab\"\n                                                component={RadioField as any}\n                                                items={ProjectPanelDefaultTabOptions}\n                                                flexRowDirection\n                                            />\n                                        </Grid>\n                                    </Grid>\n                                </Grid>\n                                <Grid item sm={12} className={this.props.classes.actionPane}>\n                                    <Grid container direction=\"row\" justifyContent=\"flex-end\">\n                                        <Button color=\"primary\" onClick={this.props.reset} disabled={this.props.isPristine}>Discard changes</Button>\n                                        <Button\n                                            color=\"primary\"\n                                            variant=\"contained\"\n                                            type=\"submit\"\n                                            disabled={this.props.isPristine || this.props.invalid || this.props.submitting}>\n                                            Save changes\n                                        </Button>\n                                    </Grid>\n                                </Grid>\n                            </Grid>\n                        </CardContent>\n                    </form >\n                </Paper >\n            );\n        }\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views/user-preferences-panel/user-preferences-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\nimport { compose } from 'redux';\nimport { reduxForm, isPristine, isValid } from 'redux-form';\nimport { connect } from 'react-redux';\nimport { UserPreferencesPanelRoot, UserPreferencesPanelRootDataProps } from 'views/user-preferences-panel/user-preferences-panel-root';\nimport { USER_PREFERENCES_FORM, saveUserPreferences } from 'store/user-preferences/user-preferences-actions';\n\nconst mapStateToProps = (state: RootState): UserPreferencesPanelRootDataProps => {\n    const uuid = state.auth.user?.uuid || '';\n\n    return {\n        isPristine: isPristine(USER_PREFERENCES_FORM)(state),\n        isValid: isValid(USER_PREFERENCES_FORM)(state),\n        userUuid: uuid,\n        resources: state.resources,\n    }\n};\n\nexport const UserPreferencesPanel = compose(\n    connect(mapStateToProps),\n    reduxForm({\n        form: USER_PREFERENCES_FORM,\n        onSubmit: (data, dispatch) => {\n            dispatch(saveUserPreferences(data));\n        }\n    }))(UserPreferencesPanelRoot);\n"
  },
  {
    "path": "services/workbench2/src/views/user-profile-panel/user-profile-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataColumns } from 'components/data-table/data-column';\nimport { ResourceLinkHeadUuid, ResourceLinkHeadPermissionLevel, ResourceLinkHead, ResourceLinkDelete, ResourceLinkTailIsVisible } from 'views-components/data-explorer/renderers';\nimport { createTree } from 'models/tree';\nimport { PermissionResource } from 'models/permission';\n\n\nexport enum UserProfileGroupsColumnNames {\n    NAME = \"Name\",\n    PERMISSION = \"Permission\",\n    VISIBLE = \"Visible to other members\",\n    UUID = \"UUID\",\n    REMOVE = \"Remove\"\n}\n\nexport const userProfileGroupsColumns: DataColumns<string, PermissionResource> = [\n    {\n        name: UserProfileGroupsColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkHead uuid={uuid} />\n    },\n    {\n        name: UserProfileGroupsColumnNames.PERMISSION,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkHeadPermissionLevel uuid={uuid} />\n    },\n    {\n        name: UserProfileGroupsColumnNames.VISIBLE,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkTailIsVisible uuid={uuid} />\n    },\n    {\n        name: UserProfileGroupsColumnNames.UUID,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkHeadUuid uuid={uuid} />\n    },\n    {\n        name: UserProfileGroupsColumnNames.REMOVE,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ResourceLinkDelete uuid={uuid} />\n    },\n];\n"
  },
  {
    "path": "services/workbench2/src/views/user-profile-panel/user-profile-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Field, InjectedFormProps } from \"redux-form\";\nimport { DispatchProp } from 'react-redux';\nimport { UserResource } from 'models/user';\nimport { TextField } from \"components/text-field/text-field\";\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { NativeSelectField } from \"components/select-field/select-field\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport {\n    CardContent,\n    Button,\n    Typography,\n    Grid,\n    InputLabel,\n    Tabs,\n    Tab,\n    Paper,\n    Tooltip,\n    IconButton,\n} from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { PROFILE_EMAIL_VALIDATION, PROFILE_URL_VALIDATION } from \"validators/validators\";\nimport { USER_PROFILE_PANEL_ID } from 'store/user-profile/user-profile-actions';\nimport { noop } from 'lodash';\nimport { DetailsIcon, GroupsIcon, MoreVerticalIcon } from 'components/icon/icon';\nimport { UserResourceAccountStatus } from 'views-components/data-explorer/renderers';\nimport { getResource, ResourcesState } from 'store/resources/resources';\nimport { DefaultView } from 'components/default-view/default-view';\nimport { CopyToClipboardSnackbar } from 'components/copy-to-clipboard-snackbar/copy-to-clipboard-snackbar';\n\ntype CssRules = 'root' | 'emptyRoot' | 'gridItem' | 'label' | 'readOnlyValue' | 'title' | 'description' | 'actions' | 'content' | 'copyIcon' | 'userProfileFormMessage';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        overflow: 'auto'\n    },\n    emptyRoot: {\n        width: '100%',\n        overflow: 'auto',\n        padding: theme.spacing(4),\n    },\n    gridItem: {\n        height: 45,\n        marginBottom: 20\n    },\n    label: {\n        fontSize: '0.675rem',\n        color: theme.palette.grey['600']\n    },\n    readOnlyValue: {\n        fontSize: '0.875rem',\n    },\n    title: {\n        fontSize: '1.1rem',\n    },\n    description: {\n        color: theme.palette.grey[\"600\"]\n    },\n    actions: {\n        display: 'flex',\n        justifyContent: 'flex-end'\n    },\n    content: {\n        // reserve space for the tab bar\n        height: `calc(100% - ${theme.spacing(7)})`,\n    },\n    copyIcon: {\n        marginLeft: theme.spacing(1),\n        color: theme.palette.grey[\"500\"],\n        cursor: 'pointer',\n        display: 'inline',\n        '& svg': {\n            fontSize: '1rem'\n        }\n    },\n    userProfileFormMessage: {\n        fontSize: '1.1rem',\n    }\n});\n\nexport interface UserProfilePanelRootActionProps {\n    handleContextMenu: (event, resource: UserResource) => void;\n}\n\nexport interface UserProfilePanelRootDataProps {\n    isAdmin: boolean;\n    isSelf: boolean;\n    isPristine: boolean;\n    isValid: boolean;\n    isInaccessible: boolean;\n    userUuid: string;\n    resources: ResourcesState;\n    localCluster: string;\n    userProfileFormMessage: string;\n}\n\nconst RoleTypes = [\n    { key: '', value: '' },\n    { key: 'Bio-informatician', value: 'Bio-informatician' },\n    { key: 'Data Scientist', value: 'Data Scientist' },\n    { key: 'Analyst', value: 'Analyst' },\n    { key: 'Researcher', value: 'Researcher' },\n    { key: 'Software Developer', value: 'Software Developer' },\n    { key: 'System Administrator', value: 'System Administrator' },\n    { key: 'Other', value: 'Other' }\n];\n\ntype UserProfilePanelRootProps = InjectedFormProps<{}> & UserProfilePanelRootActionProps & UserProfilePanelRootDataProps & DispatchProp & WithStyles<CssRules>;\n\nenum TABS {\n    PROFILE = \"PROFILE\",\n    GROUPS = \"GROUPS\",\n\n}\n\nconst ReadOnlyField = withStyles(styles)(\n    (props: ({ label: string, input: { value: string } }) & WithStyles<CssRules>) => (\n        <Grid item xs={12} data-cy=\"field\">\n            <Typography className={props.classes.label}>\n                {props.label}\n            </Typography>\n            <Typography className={props.classes.readOnlyValue} data-cy=\"value\">\n                {props.input.value}\n            </Typography>\n        </Grid>\n    )\n);\n\nexport const UserProfilePanelRoot = withStyles(styles)(\n    class extends React.Component<UserProfilePanelRootProps> {\n        state = {\n            value: TABS.PROFILE,\n        };\n\n        componentDidMount() {\n            this.setState({ value: TABS.PROFILE });\n        }\n\n        render() {\n            if (this.props.isInaccessible) {\n                return (\n                    <Paper className={this.props.classes.emptyRoot}>\n                        <CardContent>\n                            <DefaultView icon={DetailsIcon} messages={['This user does not exist or your account does not have permission to view it']} />\n                        </CardContent>\n                    </Paper>\n                );\n            } else {\n                return (\n                    <Paper className={this.props.classes.root}>\n                        <Tabs value={this.state.value} onChange={this.handleChange} variant={\"fullWidth\"}>\n                            <Tab label={TABS.PROFILE} value={TABS.PROFILE} />\n                            <Tab label={TABS.GROUPS} value={TABS.GROUPS} />\n                        </Tabs>\n                        {this.state.value === TABS.PROFILE &&\n                            <CardContent>\n                                <Grid container justifyContent=\"space-between\">\n                                    <Grid item>\n                                        <Typography className={this.props.classes.title}>\n                                            {this.props.userUuid}\n                                            <CopyToClipboardSnackbar value={this.props.userUuid} />\n                                        </Typography>\n                                    </Grid>\n                                    <Grid item>\n                                        <Grid container alignItems=\"center\">\n                                            <Grid item style={{ marginRight: '10px' }}><UserResourceAccountStatus uuid={this.props.userUuid} /></Grid>\n                                            <Grid item>\n                                                <Tooltip title=\"Actions\" disableFocusListener>\n                                                    <IconButton\n                                                        data-cy='user-profile-panel-options-btn'\n                                                        aria-label=\"Actions\"\n                                                        onClick={(event) => this.handleContextMenu(event, this.props.userUuid)}\n                                                        size=\"large\">\n                                                        <MoreVerticalIcon />\n                                                    </IconButton>\n                                                </Tooltip>\n                                            </Grid>\n                                        </Grid>\n                                    </Grid>\n                                </Grid>\n                                <form onSubmit={this.props.handleSubmit} data-cy=\"profile-form\">\n                                    <Grid container spacing={3}>\n                                        <Grid item className={this.props.classes.gridItem} sm={6} xs={12} data-cy=\"firstName\">\n                                            <Field\n                                                label=\"First name\"\n                                                name=\"firstName\"\n                                                component={TextField as any}\n                                                disabled={!this.props.isAdmin && !this.props.isSelf}\n                                            />\n                                        </Grid>\n                                        <Grid item className={this.props.classes.gridItem} sm={6} xs={12} data-cy=\"lastName\">\n                                            <Field\n                                                label=\"Last name\"\n                                                name=\"lastName\"\n                                                component={TextField as any}\n                                                disabled={!this.props.isAdmin && !this.props.isSelf}\n                                            />\n                                        </Grid>\n                                        <Grid item className={this.props.classes.gridItem} sm={6} xs={12} data-cy=\"email\">\n                                            <Field\n                                                label=\"E-mail\"\n                                                name=\"email\"\n                                                component={ReadOnlyField as any}\n                                                disabled\n                                            />\n                                        </Grid>\n                                        <Grid item className={this.props.classes.gridItem} sm={6} xs={12} data-cy=\"username\">\n                                            <Field\n                                                label=\"Username\"\n                                                name=\"username\"\n                                                component={ReadOnlyField as any}\n                                                disabled\n                                            />\n                                        </Grid>\n                                        <Grid item className={this.props.classes.gridItem} xs={12}>\n                                            <span className={this.props.classes.userProfileFormMessage}>{this.props.userProfileFormMessage}</span>\n                                        </Grid>\n                                        <Grid item className={this.props.classes.gridItem} sm={6} xs={12}>\n                                            <Field\n                                                label=\"Organization\"\n                                                name=\"prefs.profile.organization\"\n                                                component={TextField as any}\n                                                disabled={!this.props.isAdmin && !this.props.isSelf}\n                                            />\n                                        </Grid>\n                                        <Grid item className={this.props.classes.gridItem} sm={6} xs={12}>\n                                            <Field\n                                                label=\"E-mail at Organization\"\n                                                name=\"prefs.profile.organization_email\"\n                                                component={TextField as any}\n                                                disabled={!this.props.isAdmin && !this.props.isSelf}\n                                                validate={PROFILE_EMAIL_VALIDATION}\n                                            />\n                                        </Grid>\n                                        <Grid item className={this.props.classes.gridItem} sm={6} xs={12}>\n                                            <InputLabel className={this.props.classes.label} htmlFor=\"prefs.profile.role\">Role</InputLabel>\n                                            <Field\n                                                id=\"prefs.profile.role\"\n                                                name=\"prefs.profile.role\"\n                                                component={NativeSelectField as any}\n                                                items={RoleTypes}\n                                                disabled={!this.props.isAdmin && !this.props.isSelf}\n                                            />\n                                        </Grid>\n                                        <Grid item className={this.props.classes.gridItem} sm={6} xs={12}>\n                                            <Field\n                                                label=\"Website\"\n                                                name=\"prefs.profile.website_url\"\n                                                component={TextField as any}\n                                                disabled={!this.props.isAdmin && !this.props.isSelf}\n                                                validate={PROFILE_URL_VALIDATION}\n                                            />\n                                        </Grid>\n                                        <Grid item sm={12}>\n                                            <Grid container direction=\"row\" justifyContent=\"flex-end\">\n                                                <Button color=\"primary\" onClick={this.props.reset} disabled={this.props.isPristine}>Discard changes</Button>\n                                                <Button\n                                                    color=\"primary\"\n                                                    variant=\"contained\"\n                                                    type=\"submit\"\n                                                    disabled={this.props.isPristine || this.props.invalid || this.props.submitting}>\n                                                    Save changes\n                                                </Button>\n                                            </Grid>\n                                        </Grid>\n                                    </Grid>\n                                </form >\n                            </CardContent>\n                        }\n                        {this.state.value === TABS.GROUPS &&\n                            <div className={this.props.classes.content}>\n                                <DataExplorer\n                                    id={USER_PROFILE_PANEL_ID}\n                                    data-cy=\"user-profile-groups-data-explorer\"\n                                    onRowClick={noop}\n                                    onRowDoubleClick={noop}\n                                    onContextMenu={noop}\n                                    contextMenuColumn={false}\n                                    hideColumnSelector\n                                    hideSearchInput\n                                    paperProps={{\n                                        elevation: 0,\n                                    }}\n                                    defaultViewIcon={GroupsIcon}\n                                    defaultViewMessages={['Group list is empty.']} />\n                            </div>}\n                    </Paper >\n                );\n            }\n        }\n\n        handleChange = (event: React.MouseEvent<HTMLElement>, value: number) => {\n            this.setState({ value });\n        }\n\n        handleContextMenu = (event: React.MouseEvent<HTMLElement>, resourceUuid: string) => {\n            event.stopPropagation();\n            const resource = getResource<UserResource>(resourceUuid)(this.props.resources);\n            if (resource) {\n                this.props.handleContextMenu(event, resource);\n            }\n        }\n\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views/user-profile-panel/user-profile-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\nimport { compose, Dispatch } from 'redux';\nimport { reduxForm, isPristine, isValid } from 'redux-form';\nimport { connect } from 'react-redux';\nimport { UserResource } from 'models/user';\nimport { getUserProfileIsInaccessible, saveEditedUser } from 'store/user-profile/user-profile-actions';\nimport { UserProfilePanelRoot, UserProfilePanelRootDataProps } from 'views/user-profile-panel/user-profile-panel-root';\nimport { USER_PROFILE_FORM } from \"store/user-profile/user-profile-actions\";\nimport { matchUserProfileRoute } from 'routes/routes';\nimport { openUserContextMenu } from 'store/context-menu/context-menu-actions';\n\nconst mapStateToProps = (state: RootState): UserProfilePanelRootDataProps => {\n    const pathname = state.router.location ? state.router.location.pathname : '';\n    const match = matchUserProfileRoute(pathname);\n    const uuid = match ? match.params.id : state.auth.user?.uuid || '';\n\n    return {\n        isAdmin: state.auth.user!.isAdmin,\n        isSelf: state.auth.user!.uuid === uuid,\n        isPristine: isPristine(USER_PROFILE_FORM)(state),\n        isValid: isValid(USER_PROFILE_FORM)(state),\n        isInaccessible: getUserProfileIsInaccessible(state.properties) || false,\n        localCluster: state.auth.localCluster,\n        userUuid: uuid,\n        resources: state.resources,\n        userProfileFormMessage: state.auth.config.clusterConfig.Workbench.UserProfileFormMessage,\n    }\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    handleContextMenu: (event, resource: UserResource) => dispatch<any>(openUserContextMenu(event, resource)),\n});\n\nexport const UserProfilePanel = compose(\n    connect(mapStateToProps, mapDispatchToProps),\n    reduxForm({\n        form: USER_PROFILE_FORM,\n        onSubmit: (data, dispatch) => {\n            dispatch(saveEditedUser(data));\n        }\n    }))(UserProfilePanelRoot);\n"
  },
  {
    "path": "services/workbench2/src/views/virtual-machine-panel/virtual-machine-admin-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { Grid, Card, Chip, CardContent, TableBody, TableCell, TableHead, TableRow, Table, Tooltip, IconButton } from '@mui/material';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { compose, Dispatch } from 'redux';\nimport { loadVirtualMachinesAdminData, openAddVirtualMachineLoginDialog, openRemoveVirtualMachineLoginDialog, openEditVirtualMachineLoginDialog } from 'store/virtual-machines/virtual-machines-actions';\nimport { RootState } from 'store/store';\nimport { ListResults } from 'services/common-service/common-service';\nimport { MoreVerticalIcon, AddUserIcon } from 'components/icon/icon';\nimport { VirtualMachineLogins, VirtualMachinesResource } from 'models/virtual-machines';\nimport { openVirtualMachinesContextMenu } from 'store/context-menu/context-menu-actions';\nimport { ResourceUuid, VirtualMachineHostname, VirtualMachineLogin } from 'views-components/data-explorer/renderers';\n\ntype CssRules = 'moreOptionsButton' | 'moreOptions' | 'chipsRoot' | 'vmTableWrapper';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    moreOptionsButton: {\n        padding: 0\n    },\n    moreOptions: {\n        textAlign: 'right',\n        '&:last-child': {\n            paddingRight: 0\n        }\n    },\n    chipsRoot: {\n        margin: `0px -${theme.spacing(0.5)}`,\n    },\n    vmTableWrapper: {\n        overflowX: 'auto',\n    },\n});\n\nconst mapStateToProps = (state: RootState) => {\n    return {\n        userUuid: state.auth.user!.uuid,\n        ...state.virtualMachines\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): Pick<VirtualMachinesPanelActionProps, 'loadVirtualMachinesData' | 'onOptionsMenuOpen' | 'onAddLogin' | 'onDeleteLogin' | 'onLoginEdit'> => ({\n    loadVirtualMachinesData: () => dispatch<any>(loadVirtualMachinesAdminData()),\n    onOptionsMenuOpen: (event, virtualMachine) => {\n        dispatch<any>(openVirtualMachinesContextMenu(event, virtualMachine));\n    },\n    onAddLogin: (uuid: string) => {\n        dispatch<any>(openAddVirtualMachineLoginDialog(uuid));\n    },\n    onDeleteLogin: (uuid: string) => {\n        dispatch<any>(openRemoveVirtualMachineLoginDialog(uuid));\n    },\n    onLoginEdit: (uuid: string) => {\n        dispatch<any>(openEditVirtualMachineLoginDialog(uuid));\n    },\n});\n\ninterface VirtualMachinesPanelDataProps {\n    virtualMachines: ListResults<any>;\n    logins: VirtualMachineLogins;\n    links: ListResults<any>;\n    userUuid: string;\n}\n\ninterface VirtualMachinesPanelActionProps {\n    loadVirtualMachinesData: () => string;\n    onOptionsMenuOpen: (event: React.MouseEvent<HTMLElement>, virtualMachine: VirtualMachinesResource) => void;\n    onAddLogin: (uuid: string) => void;\n    onDeleteLogin: (uuid: string) => void;\n    onLoginEdit: (uuid: string) => void;\n}\n\ntype VirtualMachineProps = VirtualMachinesPanelActionProps & VirtualMachinesPanelDataProps & WithStyles<CssRules>;\n\nexport const VirtualMachineAdminPanel = compose(\n    withStyles(styles),\n    connect(mapStateToProps, mapDispatchToProps))(\n        class extends React.Component<VirtualMachineProps> {\n            componentDidMount() {\n                this.props.loadVirtualMachinesData();\n            }\n\n            render() {\n                const { virtualMachines } = this.props;\n                return (\n                    <Grid container spacing={2}>\n                        {virtualMachines.items.length > 0 && <CardContentWithVirtualMachines {...this.props} />}\n                    </Grid>\n                );\n            }\n        }\n    );\n\nconst CardContentWithVirtualMachines = (props: VirtualMachineProps) =>\n    <Grid item xs={12}>\n        <Card>\n            <CardContent className={props.classes.vmTableWrapper}>\n                {virtualMachinesTable(props)}\n            </CardContent>\n        </Card>\n    </Grid>;\n\nconst virtualMachinesTable = (props: VirtualMachineProps) =>\n    <Table data-cy=\"vm-admin-table\">\n        <TableHead>\n            <TableRow>\n                <TableCell>Uuid</TableCell>\n                <TableCell>Host name</TableCell>\n                <TableCell>Logins</TableCell>\n                <TableCell />\n                <TableCell />\n            </TableRow>\n        </TableHead>\n        <TableBody>\n            {props.virtualMachines.items.map((machine, index) =>\n                <TableRow key={index}>\n                    <TableCell><ResourceUuid uuid={machine.uuid} /></TableCell>\n                    <TableCell><VirtualMachineHostname uuid={machine.uuid} /></TableCell>\n                    <TableCell>\n                        <Grid container spacing={1} className={props.classes.chipsRoot}>\n                            {props.links.items.filter((link) => (link.headUuid === machine.uuid)).map((permission, i) => (\n                                <Grid item key={i}>\n                                    <Chip label={<VirtualMachineLogin linkUuid={permission.uuid} />} onDelete={event => props.onDeleteLogin(permission.uuid)} onClick={event => props.onLoginEdit(permission.uuid)} />\n                                </Grid>\n                            ))}\n                        </Grid>\n                    </TableCell>\n                    <TableCell>\n                        <Tooltip title=\"Add Login Permission\" disableFocusListener>\n                            <IconButton\n                                onClick={event => props.onAddLogin(machine.uuid)}\n                                className={props.classes.moreOptionsButton}\n                                size=\"large\">\n                                <AddUserIcon />\n                            </IconButton>\n                        </Tooltip>\n                    </TableCell>\n                    <TableCell className={props.classes.moreOptions}>\n                        <Tooltip title=\"More options\" disableFocusListener>\n                            <IconButton\n                                onClick={event => props.onOptionsMenuOpen(event, machine)}\n                                className={props.classes.moreOptionsButton}\n                                size=\"large\">\n                                <MoreVerticalIcon />\n                            </IconButton>\n                        </Tooltip>\n                    </TableCell>\n                </TableRow>\n            )}\n        </TableBody>\n    </Table>;\n"
  },
  {
    "path": "services/workbench2/src/views/virtual-machine-panel/virtual-machine-user-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { Grid, Typography, Button, Card, CardContent, TableBody, TableCell, TableHead, TableRow, Table, Tooltip, Chip } from '@mui/material';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { compose, Dispatch } from 'redux';\nimport { saveRequestedDate, loadVirtualMachinesUserData } from 'store/virtual-machines/virtual-machines-actions';\nimport { RootState } from 'store/store';\nimport { ListResults } from 'services/common-service/common-service';\nimport { HelpIcon } from 'components/icon/icon';\nimport { SESSION_STORAGE } from \"services/auth-service/auth-service\";\n// import * as CopyToClipboard from 'react-copy-to-clipboard';\nimport parse from \"parse-duration\";\nimport { CopyIcon } from 'components/icon/icon';\nimport CopyToClipboard from 'react-copy-to-clipboard';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { sanitizeHTML } from 'common/html-sanitize';\n\ntype CssRules = 'button' | 'codeSnippet' | 'link' | 'linkIcon' | 'rightAlign' | 'cardWithoutMachines' | 'icon' | 'chipsRoot' | 'copyIcon' | 'tableWrapper' | 'webshellButton';\n\nconst EXTRA_TOKEN = \"exraToken\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    button: {\n        marginTop: theme.spacing(1),\n        marginBottom: theme.spacing(1)\n    },\n    codeSnippet: {\n        borderRadius: theme.spacing(0.5),\n        border: '1px solid',\n        borderColor: theme.palette.grey[\"400\"],\n    },\n    link: {\n        textDecoration: 'none',\n        color: theme.palette.primary.main,\n        \"&:hover\": {\n            color: theme.palette.primary.dark,\n            transition: 'all 0.5s ease'\n        }\n    },\n    linkIcon: {\n        textDecoration: 'none',\n        color: theme.palette.grey[\"500\"],\n        textAlign: 'right',\n        \"&:hover\": {\n            color: theme.palette.common.black,\n            transition: 'all 0.5s ease'\n        }\n    },\n    rightAlign: {\n        textAlign: \"right\"\n    },\n    cardWithoutMachines: {\n        display: 'flex'\n    },\n    icon: {\n        textAlign: \"right\",\n        marginTop: theme.spacing(1)\n    },\n    chipsRoot: {\n        margin: `0px -${theme.spacing(0.5)}`,\n    },\n    copyIcon: {\n        marginLeft: theme.spacing(1),\n        color: theme.palette.grey[\"500\"],\n        cursor: 'pointer',\n        display: 'inline',\n        '& svg': {\n            fontSize: '1rem'\n        }\n    },\n    tableWrapper: {\n        overflowX: 'auto',\n    },\n    webshellButton: {\n        textTransform: \"initial\",\n    },\n});\n\nconst mapStateToProps = (state: RootState) => {\n    return {\n        requestedDate: state.virtualMachines.date,\n        userUuid: state.auth.user!.uuid,\n        helpText: state.auth.config.clusterConfig.Workbench.SSHHelpPageHTML,\n        hostSuffix: state.auth.config.clusterConfig.Workbench.SSHHelpHostSuffix || \"\",\n        token: state.auth.extraApiToken || state.auth.apiToken || '',\n        tokenLocation: state.auth.extraApiToken ? EXTRA_TOKEN : (state.auth.apiTokenLocation || ''),\n        webshellUrl: state.auth.config.clusterConfig.Services.WebShell.ExternalURL,\n        idleTimeout: parse(state.auth.config.clusterConfig.Workbench.IdleTimeout, 's') || 0,\n        ...state.virtualMachines\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): Pick<VirtualMachinesPanelActionProps, 'loadVirtualMachinesData' | 'saveRequestedDate' | 'onCopy'> => ({\n    saveRequestedDate: () => dispatch<any>(saveRequestedDate()),\n    loadVirtualMachinesData: () => dispatch<any>(loadVirtualMachinesUserData()),\n    onCopy: (message: string) => {\n        dispatch(snackbarActions.OPEN_SNACKBAR({\n            message,\n            hideDuration: 2000,\n            kind: SnackbarKind.SUCCESS\n        }));\n    },\n});\n\ninterface VirtualMachinesPanelDataProps {\n    requestedDate: string;\n    virtualMachines: ListResults<any>;\n    userUuid: string;\n    links: ListResults<any>;\n    helpText: string;\n    hostSuffix: string;\n    token: string;\n    tokenLocation: string;\n    webshellUrl: string;\n    idleTimeout: number;\n}\n\ninterface VirtualMachinesPanelActionProps {\n    saveRequestedDate: () => void;\n    loadVirtualMachinesData: () => string;\n    onCopy: (message: string) => void;\n}\n\ntype VirtualMachineProps = VirtualMachinesPanelActionProps & VirtualMachinesPanelDataProps & WithStyles<CssRules>;\n\nexport const VirtualMachineUserPanel = compose(\n    withStyles(styles),\n    connect(mapStateToProps, mapDispatchToProps))(\n        class extends React.Component<VirtualMachineProps> {\n            componentDidMount() {\n                this.props.loadVirtualMachinesData();\n            }\n\n            render() {\n                const { virtualMachines, links } = this.props;\n                return (\n                    <Grid container spacing={2} data-cy=\"vm-user-panel\">\n                        {virtualMachines.items.length === 0 && <CardContentWithoutVirtualMachines {...this.props} />}\n                        {virtualMachines.items.length > 0 && links.items.length > 0 && <CardContentWithVirtualMachines {...this.props} />}\n                        {<CardSSHSection {...this.props} />}\n                    </Grid>\n                );\n            }\n        }\n    );\n\nconst CardContentWithoutVirtualMachines = (props: VirtualMachineProps) =>\n    <Grid item xs={12}>\n        <Card>\n            <CardContent className={props.classes.cardWithoutMachines}>\n                <Grid item xs={6}>\n                    <Typography variant='body1'>\n                        You do not have access to any virtual machines. Some Arvados features require using the command line. You may request access to a hosted virtual machine with the command line shell.\n                    </Typography>\n                </Grid>\n                <Grid item xs={6} className={props.classes.rightAlign}>\n                    {virtualMachineSendRequest(props)}\n                </Grid>\n            </CardContent>\n        </Card>\n    </Grid>;\n\nconst CardContentWithVirtualMachines = (props: VirtualMachineProps) =>\n    <Grid item xs={12}>\n        <Card>\n            <CardContent>\n                <span>\n                    <div className={props.classes.rightAlign}>\n                        {virtualMachineSendRequest(props)}\n                    </div>\n                    <div className={props.classes.icon}>\n                        <a href=\"https://doc.arvados.org/user/getting_started/vm-login-with-webshell.html\" target=\"_blank\" rel=\"noopener noreferrer\" className={props.classes.linkIcon}>\n                            <Tooltip title=\"Access VM using webshell\">\n                                <span><HelpIcon /></span>\n                            </Tooltip>\n                        </a>\n                    </div>\n                    <div className={props.classes.tableWrapper}>\n                        {virtualMachinesTable(props)}\n                    </div>\n                </span>\n\n            </CardContent>\n        </Card>\n    </Grid>;\n\nconst virtualMachineSendRequest = (props: VirtualMachineProps) =>\n    <span>\n        <Button variant=\"contained\" color=\"primary\" className={props.classes.button} onClick={props.saveRequestedDate}>\n            SEND REQUEST FOR SHELL ACCESS\n        </Button>\n        {props.requestedDate &&\n            <Typography >\n                A request for shell access was sent on {props.requestedDate}\n            </Typography>}\n    </span>;\n\nconst virtualMachinesTable = (props: VirtualMachineProps) =>\n    <Table data-cy=\"vm-user-table\">\n        <TableHead>\n            <TableRow>\n                <TableCell>Host name</TableCell>\n                <TableCell>Login name</TableCell>\n                <TableCell>Groups</TableCell>\n                <TableCell>Command line</TableCell>\n                <TableCell>Web shell</TableCell>\n            </TableRow>\n        </TableHead>\n        <TableBody>\n            {props.virtualMachines.items.map(it =>\n                props.links.items.map(lk => {\n                    if (lk.tailUuid === props.userUuid && lk.headUuid === it.uuid) {\n                        const username = lk.properties.username;\n                        const command = `ssh ${username}@${it.hostname}${props.hostSuffix}`;\n                        let tokenParam = \"\";\n                        if (props.tokenLocation === SESSION_STORAGE || props.tokenLocation === EXTRA_TOKEN) {\n                            tokenParam = `&token=${encodeURIComponent(props.token)}`;\n                        }\n                        const loginHref = `/webshell/?host=${encodeURIComponent(props.webshellUrl + '/' + it.hostname)}&timeout=${props.idleTimeout}&login=${encodeURIComponent(username)}${tokenParam}`;\n                        return <TableRow key={lk.uuid}>\n                            <TableCell>{it.hostname}</TableCell>\n                            <TableCell>{username}</TableCell>\n                            <TableCell>\n                                <Grid container spacing={1} className={props.classes.chipsRoot}>\n                                    {\n                                        (lk.properties.groups || []).map((group, i) => (\n                                            <Grid item key={i}>\n                                                <Chip label={group} />\n                                            </Grid>\n                                        ))\n                                    }\n                                </Grid>\n                            </TableCell>\n                            <TableCell>\n                                {command}\n                                <Tooltip title=\"Copy link to clipboard\">\n                                    <span className={props.classes.copyIcon}>\n                                        <CopyToClipboard text={command || \"\"} onCopy={() => props.onCopy!(\"Copied\")}>\n                                            <CopyIcon />\n                                        </CopyToClipboard>\n                                    </span>\n                                </Tooltip>\n                            </TableCell>\n                            <TableCell>\n                                <Button\n                                    className={props.classes.webshellButton}\n                                    variant=\"contained\"\n                                    size=\"small\"\n                                    href={loginHref}\n                                    target=\"_blank\"\n                                    rel=\"noopener\">\n                                    Log in as {username}\n                                </Button>\n                            </TableCell>\n                        </TableRow>;\n                    }\n                    return null;\n                }\n                ))}\n        </TableBody>\n    </Table>;\n\nconst CardSSHSection = (props: VirtualMachineProps) =>\n    <Grid item xs={12}>\n        <Card>\n            <CardContent>\n                <Typography>\n                    <span dangerouslySetInnerHTML={{ __html: sanitizeHTML(props.helpText) }} style={{ margin: \"1em\" }} />\n                </Typography>\n            </CardContent>\n        </Card>\n    </Grid>;\n"
  },
  {
    "path": "services/workbench2/src/views/workbench/fed-login.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { User } from \"models/user\";\nimport { getSaltedToken } from 'store/auth/auth-action-session';\nimport { Config } from 'common/config';\n\nexport interface FedLoginProps {\n    user?: User;\n    apiToken?: string;\n    localCluster: string;\n    remoteHostsConfig: { [key: string]: Config };\n}\n\nconst mapStateToProps = ({ auth }: RootState) => ({\n    user: auth.user,\n    apiToken: auth.apiToken,\n    remoteHostsConfig: auth.remoteHostsConfig,\n    localCluster: auth.localCluster,\n});\n\nexport const FedLogin = connect(mapStateToProps)(\n    class extends React.Component<FedLoginProps> {\n        render() {\n            const { apiToken, user, localCluster, remoteHostsConfig } = this.props;\n            if (!apiToken || !user || !user.uuid.startsWith(localCluster)) {\n                return <></>;\n            }\n            return <div id={\"fedtoken-iframe-div\"}>\n                {Object.keys(remoteHostsConfig)\n                    .map((k) => {\n                        if (k === localCluster) {\n                            return null;\n                        }\n                        if (!remoteHostsConfig[k].workbench2Url) {\n                            console.log(`Cluster ${k} does not define workbench2Url.  Federated login / cross-site linking to ${k} is unavailable.  Tell the admin of ${k} to set Services->Workbench2->ExternalURL in config.yml.`);\n                            return null;\n                        }\n                        const fedtoken = (remoteHostsConfig[k].loginCluster === localCluster)\n                            ? apiToken : getSaltedToken(k, apiToken);\n                        return <iframe key={k} title={k} src={`${remoteHostsConfig[k].workbench2Url}/fedtoken?api_token=${fedtoken}`} style={{\n                            height: 0,\n                            width: 0,\n                            visibility: \"hidden\"\n                        }}\n                        />;\n                    })}\n            </div>;\n        }\n    });\n"
  },
  {
    "path": "services/workbench2/src/views/workbench/workbench-loading-screen.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { Grid, CircularProgress } from '@mui/material';\n\ntype CssRules = 'root' | 'img';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    img: {\n        marginBottom: theme.spacing(4)\n    },\n    root: {\n        background: theme.palette.background.default,\n        bottom: 0,\n        left: 0,\n        position: 'fixed',\n        right: 0,\n        top: 0,\n        zIndex: theme.zIndex.appBar + 1,\n    }\n});\n\nexport const WorkbenchLoadingScreen = withStyles(styles)(({ classes }: WithStyles<CssRules>) =>\n    <Grid container direction=\"column\" alignItems='center' justifyContent='center' className={classes.root}>\n        <img src='/arvados_logo.png' alt='Arvados logo' className={classes.img} />\n        <CircularProgress data-cy='loading-spinner' />\n    </Grid>\n);\n"
  },
  {
    "path": "services/workbench2/src/views/workbench/workbench.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport ReactDOM from 'react-dom';\nimport { WorkbenchPanel } from './workbench';\nimport { Provider } from \"react-redux\";\nimport { configureStore } from \"store/store\";\nimport { createBrowserHistory } from \"history\";\nimport { ConnectedRouter } from \"connected-react-router\";\nimport { ThemeProvider, StyledEngineProvider } from '@mui/material/styles';\nimport { CustomTheme } from 'common/custom-theme';\nimport { createServices } from \"services/services\";\n\nconst history = createBrowserHistory();\n\nit('renders without crashing', () => {\n    const div = document.createElement('div');\n    const services = createServices(\"/arvados/v1\");\n\tservices.authService.getUuid = cy.stub().returns('test');\n    const store = configureStore(createBrowserHistory(), services);\n    ReactDOM.render(\n        <StyledEngineProvider injectFirst>\n            <ThemeProvider theme={CustomTheme}>\n                <Provider store={store}>\n                    <ConnectedRouter history={history}>\n                        <WorkbenchPanel />\n                    </ConnectedRouter>\n                </Provider>\n            </ThemeProvider>\n        </StyledEngineProvider>,\n    div);\n    ReactDOM.unmountComponentAtNode(div);\n});\n"
  },
  {
    "path": "services/workbench2/src/views/workbench/workbench.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { Route, Switch } from \"react-router\";\nimport { ProjectPanel } from \"views/project-panel/project-panel\";\nimport { DetailsPanel } from \"views-components/details-panel/details-panel\";\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { ContextMenu } from \"views-components/context-menu/context-menu\";\nimport { FavoritePanel } from \"../favorite-panel/favorite-panel\";\nimport { TokenDialog } from \"views-components/token-dialog/token-dialog\";\nimport { RichTextEditorDialog } from \"views-components/rich-text-editor-dialog/rich-text-editor-dialog\";\nimport { Snackbar } from \"views-components/snackbar/snackbar\";\nimport { CollectionPanel } from \"../collection-panel/collection-panel\";\nimport { RenameFileDialog } from \"views-components/rename-file-dialog/rename-file-dialog\";\nimport { FileRemoveDialog } from \"views-components/file-remove-dialog/file-remove-dialog\";\nimport { MultipleFilesRemoveDialog } from \"views-components/file-remove-dialog/multiple-files-remove-dialog\";\nimport { Routes } from \"routes/routes\";\nimport { SidePanel } from \"views-components/side-panel/side-panel\";\nimport { ProcessPanel } from \"views/process-panel/process-panel\";\nimport { ChangeWorkflowDialog } from \"views-components/run-process-dialog/change-workflow-dialog\";\nimport { DialogCollectionCreate } from \"views-components/dialog-create/dialog-collection-create\";\nimport { CopyCollectionDialog } from \"views-components/dialog-copy/dialog-copy\";\nimport { DialogProcessRerun } from \"views-components/dialog-copy/dialog-process-rerun\";\nimport { DialogCollectionUpdate } from 'views-components/dialog-update/dialog-collection-update';\nimport { UpdateProcessDialog } from \"views-components/dialog-forms/update-process-dialog\";\nimport { DialogProjectUpdate } from 'views-components/dialog-update/dialog-project-update';\nimport { DialogMoveProject } from \"views-components/dialog-move/dialog-move-project\";\nimport { DialogCollectionFilesUpload } from 'views-components/dialog-upload/dialog-collection-files-upload';\nimport { DialogCollectionPartialCopyToNewCollection } from \"views-components/dialog-copy/dialog-collection-partial-copy-to-new-collection\";\nimport { DialogCollectionPartialCopyToExistingCollection } from \"views-components/dialog-copy/dialog-collection-partial-copy-to-existing-collection\";\nimport { DialogCollectionPartialCopyToSeparateCollection } from \"views-components/dialog-copy/dialog-collection-partial-copy-to-separate-collections\";\nimport { DialogCollectionPartialMoveToExistingCollection } from \"views-components/dialog-move/dialog-collection-partial-move-to-existing-collection\";\nimport { RemoveProcessDialog } from \"views-components/process-remove-dialog/process-remove-dialog\";\nimport { RemoveWorkflowDialog } from \"views-components/workflow-remove-dialog/workflow-remove-dialog\";\nimport { RemoveExternalCredentialDialog } from \"views-components/dialog-remove/external-credential-remove-dialog\";\nimport { MainContentBar } from \"views-components/main-content-bar/main-content-bar\";\nimport { Grid } from \"@mui/material\";\nimport { TrashPanel } from \"views/trash-panel/trash-panel\";\nimport { SharedWithMePanel } from \"views/shared-with-me-panel/shared-with-me-panel\";\nimport { RunProcessPanel } from \"views/run-process-panel/run-process-panel\";\nimport SplitterLayout from \"react-splitter-layout\";\nimport { WorkflowPanel } from \"views/workflow-panel/workflow-panel\";\nimport { RegisteredWorkflowPanel } from \"views/workflow-panel/registered-workflow-panel\";\nimport { SearchResultsPanel } from \"views/search-results-panel/search-results-panel\";\nimport { SshKeyPanel } from \"views/ssh-key-panel/ssh-key-panel\";\nimport { SshKeyAdminPanel } from \"views/ssh-key-panel/ssh-key-admin-panel\";\nimport { SiteManagerPanel } from \"views/site-manager-panel/site-manager-panel\";\nimport { UserProfilePanel } from \"views/user-profile-panel/user-profile-panel\";\nimport { SharingDialog } from \"views-components/sharing-dialog/sharing-dialog\";\nimport { NotFoundDialog } from \"views-components/not-found-dialog/not-found-dialog\";\nimport { AdvancedTabDialog } from \"views-components/advanced-tab-dialog/advanced-tab-dialog\";\nimport { ProcessInputDialog } from \"views-components/process-input-dialog/process-input-dialog\";\nimport { VirtualMachineUserPanel } from \"views/virtual-machine-panel/virtual-machine-user-panel\";\nimport { VirtualMachineAdminPanel } from \"views/virtual-machine-panel/virtual-machine-admin-panel\";\nimport { RepositoriesPanel } from \"views/repositories-panel/repositories-panel\";\nimport { KeepServicePanel } from \"views/keep-service-panel/keep-service-panel\";\nimport { ApiClientAuthorizationPanel } from \"views/api-client-authorization-panel/api-client-authorization-panel\";\nimport { LinkPanel } from \"views/link-panel/link-panel\";\nimport { RepositoriesSampleGitDialog } from \"views-components/repositories-sample-git-dialog/repositories-sample-git-dialog\";\nimport { RepositoryAttributesDialog } from \"views-components/repository-attributes-dialog/repository-attributes-dialog\";\nimport { CreateRepositoryDialog } from \"views-components/dialog-forms/create-repository-dialog\";\nimport { RemoveRepositoryDialog } from \"views-components/repository-remove-dialog/repository-remove-dialog\";\nimport { CreateSshKeyDialog } from \"views-components/dialog-forms/create-ssh-key-dialog\";\nimport { PublicKeyDialog } from \"views-components/ssh-keys-dialog/public-key-dialog\";\nimport { RemoveApiClientAuthorizationDialog } from \"views-components/api-client-authorizations-dialog/remove-dialog\";\nimport { RemoveKeepServiceDialog } from \"views-components/keep-services-dialog/remove-dialog\";\nimport { RemoveLinkDialog } from \"views-components/links-dialog/remove-dialog\";\nimport { RemoveSshKeyDialog } from \"views-components/ssh-keys-dialog/remove-dialog\";\nimport { VirtualMachineAttributesDialog } from \"views-components/virtual-machines-dialog/attributes-dialog\";\nimport { RemoveVirtualMachineDialog } from \"views-components/virtual-machines-dialog/remove-dialog\";\nimport { RemoveVirtualMachineLoginDialog } from \"views-components/virtual-machines-dialog/remove-login-dialog\";\nimport { VirtualMachineAddLoginDialog } from \"views-components/virtual-machines-dialog/add-login-dialog\";\nimport { AttributesApiClientAuthorizationDialog } from \"views-components/api-client-authorizations-dialog/attributes-dialog\";\nimport { AttributesKeepServiceDialog } from \"views-components/keep-services-dialog/attributes-dialog\";\nimport { AttributesLinkDialog } from \"views-components/links-dialog/attributes-dialog\";\nimport { AttributesSshKeyDialog } from \"views-components/ssh-keys-dialog/attributes-dialog\";\nimport { UserPanel } from \"views/user-panel/user-panel\";\nimport { UserAttributesDialog } from \"views-components/user-dialog/attributes-dialog\";\nimport { CreateUserDialog } from \"views-components/dialog-forms/create-user-dialog\";\nimport { HelpApiClientAuthorizationDialog } from \"views-components/api-client-authorizations-dialog/help-dialog\";\nimport { DeactivateDialog } from \"views-components/user-dialog/deactivate-dialog\";\nimport { ActivateDialog } from \"views-components/user-dialog/activate-dialog\";\nimport { SetupDialog } from \"views-components/user-dialog/setup-dialog\";\nimport { GroupsPanel } from \"views/groups-panel/groups-panel\";\nimport { RemoveGroupDialog } from \"views-components/groups-dialog/remove-dialog\";\nimport { GroupAttributesDialog } from \"views-components/groups-dialog/attributes-dialog\";\nimport { CreateExternalCredentialDialog } from \"views-components/dialog-create/create-external-credential-dialog\";\nimport { UpdateExternalCredentialDialog } from \"views-components/dialog-forms/update-external-credential-dialog\";\nimport { GroupDetailsPanel } from \"views/group-details-panel/group-details-panel\";\nimport { RemoveGroupMemberDialog, RemoveMultipleGroupMembersDialog } from \"views-components/groups-dialog/member-remove-dialog\";\nimport { GroupMemberAttributesDialog } from \"views-components/groups-dialog/member-attributes-dialog\";\nimport { PublicFavoritePanel } from \"views/public-favorites-panel/public-favorites-panel\";\nimport { LinkAccountPanel } from \"views/link-account-panel/link-account-panel\";\nimport { CollectionsContentAddressPanel } from \"views/collection-content-address-panel/collection-content-address-panel\";\nimport { AllProcessesPanel } from \"../all-processes-panel/all-processes-panel\";\nimport { ExternalCredentialsPanel } from \"views/external-credentials-panel/external-credentials-panel\";\nimport { NotFoundPanel } from \"../not-found-panel/not-found-panel\";\nimport { AutoLogout } from \"views-components/auto-logout/auto-logout\";\nimport { RestoreCollectionVersionDialog } from \"views-components/collections-dialog/restore-version-dialog\";\nimport { WebDavS3InfoDialog } from \"views-components/webdav-s3-dialog/webdav-s3-dialog\";\nimport { pluginConfig } from \"plugins\";\nimport { ElementListReducer } from \"common/plugintypes\";\nimport { Banner } from \"views-components/baner/banner\";\nimport { InstanceTypesPanel } from \"views/instance-types-panel/instance-types-panel\";\nimport classNames from \"classnames\";\nimport { UserPreferencesPanel } from \"views/user-preferences-panel/user-preferences-panel\";\nimport { Dashboard } from \"components/dashboard/dashboard\";\nimport { DownloadFilesAsZipDialog } from \"views-components/download-files-as-zip/download-files-as-zip\";\nimport { CancelProcessDialog } from \"views-components/process-cancel-dialog/process-cancel-dialog\";\nimport { DialogProjectCreate } from \"views-components/dialog-create/dialog-project-create\";\nimport { DialogCollectionPartialMoveToNewCollection } from \"views-components/dialog-move/dialog-collection-partial-move-to-new-collection\";\nimport { DialogCollectionPartialMoveToSeparateCollections } from \"views-components/dialog-move/dialog-collection-partial-move-to-separate-collections\";\nimport { DialogMoveCollection } from 'views-components/dialog-move/dialog-move-collection';\nimport { DescriptionDialog } from \"views-components/description-dialog/description-dialog\";\n\ntype CssRules = \"root\" | \"container\" | \"splitter\" | \"splitterSidePanel\" | \"splitterDetails\" | \"asidePanel\" | \"contentWrapper\" | \"content\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        paddingTop: theme.spacing(7),\n        background: theme.palette.background.default,\n    },\n    container: {\n        position: \"relative\",\n    },\n    splitter: {\n        \"& > .layout-splitter\": {\n            width: \"3px\",\n        },\n        \"& > .layout-splitter[disabled]\": {\n            pointerEvents: \"none\",\n            cursor: \"pointer\",\n        },\n        \"& > .layout-pane\": {\n            overflow: \"hidden auto\",\n        },\n    },\n    splitterSidePanel: {\n        \"& > .layout-splitter::after\": {\n            content: `\"\"`,\n            marginLeft: \"3px\", // Matches splitter line width\n            width: \"8px\",\n            display: \"block\",\n            position: \"relative\",\n            height: \"100%\",\n            zIndex: 100, // Needed for drag handle to overlap middle panel\n        }\n    },\n    splitterDetails: {\n        \"& > .layout-splitter::after\": {\n            content: `\"\"`,\n            marginLeft: \"-8px\",\n            width: \"8px\",\n            display: \"block\",\n            position: \"relative\",\n            height: \"100%\",\n        }\n    },\n    asidePanel: {\n        paddingTop: theme.spacing(1),\n        height: \"100%\",\n    },\n    contentWrapper: {\n        paddingTop: theme.spacing(1),\n        minWidth: 0,\n    },\n    content: {\n        minWidth: 0,\n        maxWidth: \"100%\",\n        paddingLeft: theme.spacing(3),\n        paddingRight: theme.spacing(3),\n        // Reserve vertical space for app bar + MainContentBar\n        minHeight: `calc(100vh - ${theme.spacing(16)})`,\n        display: \"flex\",\n        flexBasis: \"0px\", // Arbitrary flexbasis to allow content to shrink\n    },\n});\n\nlet routes = (\n    <>\n        <Route\n            path={Routes.DASHBOARD}\n            component={Dashboard}\n        />\n        <Route\n            path={Routes.PROJECTS}\n            component={ProjectPanel}\n        />\n        <Route\n            path={Routes.COLLECTIONS}\n            component={CollectionPanel}\n        />\n        <Route\n            path={Routes.FAVORITES}\n            component={FavoritePanel}\n        />\n        <Route\n            path={Routes.ALL_PROCESSES}\n            component={AllProcessesPanel}\n        />\n        <Route\n            path={Routes.PROCESSES}\n            component={ProcessPanel}\n        />\n        <Route\n            path={Routes.TRASH}\n            component={TrashPanel}\n        />\n        <Route\n            path={Routes.SHARED_WITH_ME}\n            component={SharedWithMePanel}\n        />\n        <Route\n            path={Routes.RUN_PROCESS}\n            component={RunProcessPanel}\n        />\n        <Route\n            path={Routes.REGISTEREDWORKFLOW}\n            component={RegisteredWorkflowPanel}\n        />\n        <Route\n            path={Routes.WORKFLOWS}\n            component={WorkflowPanel}\n        />\n        <Route\n            path={Routes.SEARCH_RESULTS}\n            component={SearchResultsPanel}\n        />\n        <Route\n            path={Routes.VIRTUAL_MACHINES_USER}\n            component={VirtualMachineUserPanel}\n        />\n        <Route\n            path={Routes.VIRTUAL_MACHINES_ADMIN}\n            component={VirtualMachineAdminPanel}\n        />\n        <Route\n            path={Routes.REPOSITORIES}\n            component={RepositoriesPanel}\n        />\n        <Route\n            path={Routes.SSH_KEYS_USER}\n            component={SshKeyPanel}\n        />\n        <Route\n            path={Routes.SSH_KEYS_ADMIN}\n            component={SshKeyAdminPanel}\n        />\n        <Route\n            path={Routes.INSTANCE_TYPES}\n            component={InstanceTypesPanel}\n        />\n        <Route\n            path={Routes.SITE_MANAGER}\n            component={SiteManagerPanel}\n        />\n        <Route\n            path={Routes.KEEP_SERVICES}\n            component={KeepServicePanel}\n        />\n        <Route\n            path={Routes.USERS}\n            component={UserPanel}\n        />\n        <Route\n            path={Routes.API_CLIENT_AUTHORIZATIONS}\n            component={ApiClientAuthorizationPanel}\n        />\n        <Route\n            path={Routes.MY_ACCOUNT}\n            component={UserProfilePanel}\n        />\n        <Route\n            path={Routes.USER_PROFILE}\n            component={UserProfilePanel}\n        />\n        <Route\n            path={Routes.MY_PREFERENCES}\n            component={UserPreferencesPanel}\n        />\n        <Route\n            path={Routes.GROUPS}\n            component={GroupsPanel}\n        />\n        <Route\n            path={Routes.GROUP_DETAILS}\n            component={GroupDetailsPanel}\n        />\n        <Route\n            path={Routes.LINKS}\n            component={LinkPanel}\n        />\n        <Route\n            path={Routes.PUBLIC_FAVORITES}\n            component={PublicFavoritePanel}\n        />\n        <Route\n            path={Routes.LINK_ACCOUNT}\n            component={LinkAccountPanel}\n        />\n        <Route\n            path={Routes.COLLECTIONS_CONTENT_ADDRESS}\n            component={CollectionsContentAddressPanel}\n        />\n        <Route\n            path={Routes.EXTERNAL_CREDENTIALS}\n            component={ExternalCredentialsPanel}\n        />\n    </>\n);\n\nconst reduceRoutesFn: (a: React.ReactElement[], b: ElementListReducer) => React.ReactElement[] = (a, b) => b(a);\n\nroutes = React.createElement(\n    React.Fragment,\n    null,\n    pluginConfig.centerPanelList.reduce(reduceRoutesFn, React.Children.toArray(routes.props.children))\n);\n\ntype SplitterPanelSettings = {\n    storageKey: string;\n    minSize: number;\n    defaultSize: number;\n}\n\ninterface WorkbenchDataProps {\n    isUserActive: boolean;\n    isNotLinking: boolean;\n    sessionIdleTimeout: number;\n    sidePanelIsCollapsed: boolean;\n    isDetailsPanelOpen: boolean;\n}\n\ntype WorkbenchPanelProps = WithStyles<CssRules> & WorkbenchDataProps;\n\nexport const WorkbenchPanel = withStyles(styles)((props: WorkbenchPanelProps) => {\n    const { classes, sidePanelIsCollapsed, isNotLinking, isDetailsPanelOpen, isUserActive, sessionIdleTimeout } = props;\n\n    const SIDE_PANEL_COLLAPSED_WIDTH = 50;\n    const MAIN_PANEL_MIN_SIZE = 300;\n\n    const splitterSettings: Record<string, SplitterPanelSettings> = {\n        LEFT: {\n            storageKey: \"splitterSize\",\n            minSize: 210,\n            defaultSize: 240,\n        },\n        RIGHT: {\n            storageKey: \"detailsPanelSplitterSize\",\n            minSize: 250,\n            defaultSize: 320,\n        },\n    };\n\n    const saveSplitterSize = (panel: SplitterPanelSettings) => (size: number) => {\n        localStorage.setItem(panel.storageKey, size.toString());\n        if (panel.storageKey === splitterSettings.LEFT.storageKey) {\n            // Trigger resize on subSplitters when LEFT panel resized\n            nestedSplitter.current && nestedSplitter.current.handleResize();\n        }\n    };\n\n    const getSplitterInitialSize = (panel: SplitterPanelSettings) => {\n        const storedSize = localStorage.getItem(panel.storageKey);\n        return storedSize ? Math.max(Number(storedSize), panel.minSize) : panel.defaultSize;\n    };\n\n    // Updates left panel collapsed state\n    const applyCollapsedState = () => {\n        const sidePanel: Element = document.getElementsByClassName(\"layout-pane\")[0];\n\n        if (sidePanel) {\n            if (sidePanelIsCollapsed) {\n                // Using max-width overrides any resize calculations when left panel is collapsed\n                sidePanel.setAttribute(\"style\", `max-width: ${SIDE_PANEL_COLLAPSED_WIDTH}px`);\n            } else {\n                sidePanel.setAttribute(\"style\", `width: ${getSplitterInitialSize(splitterSettings.LEFT)}px`);\n            }\n        }\n\n        const splitter = document.getElementsByClassName(\"layout-splitter\")[0];\n        sidePanelIsCollapsed ? splitter?.setAttribute(\"disabled\", \"\") : splitter?.removeAttribute(\"disabled\");\n\n        // Trigger resize on subSplitters\n        nestedSplitter.current && nestedSplitter.current.handleResize();\n    };\n\n    const nestedSplitter = React.useRef<{ handleResize: () => void }>();\n\n    applyCollapsedState();\n\n    return (\n        <Grid\n            container\n            item\n            xs\n            className={classes.root}\n        >\n            {sessionIdleTimeout > 0 && <AutoLogout />}\n            <Grid\n                container\n                item\n                xs\n                className={classes.container}\n            >\n                <SplitterLayout\n                    customClassName={classNames(classes.splitter, classes.splitterSidePanel)}\n                    percentage={false}\n                    primaryIndex={1}\n                    secondaryInitialSize={getSplitterInitialSize(splitterSettings.LEFT)}\n                    secondaryMinSize={splitterSettings.LEFT.minSize}\n                    primaryMinSize={MAIN_PANEL_MIN_SIZE}\n                    // Resize event only exists for secondary\n                    onSecondaryPaneSizeChange={saveSplitterSize(splitterSettings.LEFT)}\n                >\n                    {isUserActive && isNotLinking && (\n                        <Grid\n                            container\n                            item\n                            xs\n                            component=\"aside\"\n                            direction=\"column\"\n                            className={classes.asidePanel}\n                        >\n                            <SidePanel />\n                        </Grid>\n                    )}\n                    <Grid\n                        container\n                        item\n                        xs\n                    >\n                        <SplitterLayout\n                            customClassName={classNames(classes.splitter, classes.splitterDetails)}\n                            percentage={false}\n                            primaryIndex={0}\n                            primaryMinSize={MAIN_PANEL_MIN_SIZE}\n                            secondaryInitialSize={getSplitterInitialSize(splitterSettings.RIGHT)}\n                            secondaryMinSize={splitterSettings.RIGHT.minSize}\n                            onSecondaryPaneSizeChange={saveSplitterSize(splitterSettings.RIGHT)}\n                            ref={nestedSplitter}\n                        >\n                            <Grid\n                                container\n                                item\n                                xs\n                                component=\"main\"\n                                direction=\"column\"\n                                className={classes.contentWrapper}\n                            >\n                                <Grid xs>\n                                    {isNotLinking && <MainContentBar />}\n                                </Grid>\n                                <Grid\n                                    className={classes.content}\n                                >\n                                    <Switch>\n                                        {routes.props.children}\n                                        <Route\n                                            path={Routes.NO_MATCH}\n                                            component={NotFoundPanel}\n                                        />\n                                    </Switch>\n                                </Grid>\n                            </Grid>\n                            {isDetailsPanelOpen && <Grid item style={{height: \"100%\"}}>\n                                <DetailsPanel />\n                            </Grid>}\n                        </SplitterLayout>\n                    </Grid>\n                </SplitterLayout>\n            </Grid>\n            <AdvancedTabDialog />\n            <AttributesApiClientAuthorizationDialog />\n            <AttributesKeepServiceDialog />\n            <AttributesLinkDialog />\n            <AttributesSshKeyDialog />\n            <ChangeWorkflowDialog />\n            <ContextMenu />\n            <CopyCollectionDialog />\n            <DialogCollectionCreate />\n            <DialogProjectCreate />\n            <CreateRepositoryDialog />\n            <CreateSshKeyDialog />\n            <CreateUserDialog />\n            <CreateExternalCredentialDialog />\n            <DialogCollectionPartialCopyToSeparateCollection />\n            <DialogProcessRerun />\n            <DescriptionDialog />\n            <TokenDialog />\n            <FileRemoveDialog />\n            <DialogCollectionFilesUpload />\n            <GroupAttributesDialog />\n            <GroupMemberAttributesDialog />\n            <HelpApiClientAuthorizationDialog />\n            <DialogMoveCollection />\n            <DialogMoveProject />\n            <MultipleFilesRemoveDialog />\n            <PublicKeyDialog />\n            <DialogCollectionPartialCopyToNewCollection />\n            <DialogCollectionPartialCopyToExistingCollection />\n            <DialogCollectionPartialMoveToNewCollection />\n            <DialogCollectionPartialMoveToExistingCollection />\n            <DialogCollectionPartialMoveToSeparateCollections />\n            <ProcessInputDialog />\n            <RestoreCollectionVersionDialog />\n            <RemoveApiClientAuthorizationDialog />\n            <RemoveGroupDialog />\n            <RemoveGroupMemberDialog />\n            <RemoveMultipleGroupMembersDialog />\n            <RemoveKeepServiceDialog />\n            <RemoveLinkDialog />\n            <RemoveProcessDialog />\n            <RemoveWorkflowDialog />\n            <RemoveRepositoryDialog />\n            <RemoveSshKeyDialog />\n            <RemoveVirtualMachineDialog />\n            <RemoveVirtualMachineLoginDialog />\n            <RemoveExternalCredentialDialog />\n            <CancelProcessDialog />\n            <VirtualMachineAddLoginDialog />\n            <RenameFileDialog />\n            <DownloadFilesAsZipDialog />\n            <RepositoryAttributesDialog />\n            <RepositoriesSampleGitDialog />\n            <RichTextEditorDialog />\n            <SharingDialog />\n            <NotFoundDialog />\n            <Snackbar />\n            <DialogCollectionUpdate />\n            <UpdateProcessDialog />\n            <DialogProjectUpdate />\n            <UpdateExternalCredentialDialog />\n            <UserAttributesDialog />\n            <DeactivateDialog />\n            <ActivateDialog />\n            <SetupDialog />\n            <VirtualMachineAttributesDialog />\n            <WebDavS3InfoDialog />\n            <Banner />\n            {React.createElement(React.Fragment, null, pluginConfig.dialogs)}\n        </Grid>\n    );\n});\n"
  },
  {
    "path": "services/workbench2/src/views/workflow-panel/registered-workflow-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { CardHeader } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { connect, DispatchProp } from \"react-redux\";\nimport { RouteComponentProps } from 'react-router';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { RootState } from 'store/store';\nimport { WorkflowIcon } from 'components/icon/icon';\nimport { WorkflowResource } from 'models/workflow';\nimport { ProcessOutputCollectionFiles } from 'views/process-panel/process-output-collection-files';\nimport { WorkflowDetailsAttributes, RegisteredWorkflowPanelDataProps, getRegisteredWorkflowPanelData } from 'views-components/details-panel/workflow-details';\nimport { getResource } from 'store/resources/resources';\nimport { openContextMenuAndSelect } from 'store/context-menu/context-menu-actions';\nimport { MPVContainer, MPVPanelContent, MPVPanelState } from 'components/multi-panel-view/multi-panel-view';\nimport { ProcessIOCard, ProcessIOCardType } from 'views/process-panel/process-io-card';\nimport { NotFoundView } from 'views/not-found-panel/not-found-panel';\nimport { WorkflowProcessesPanel } from './workflow-processes-panel';\nimport { resourceToMenuKind } from 'common/resource-to-menu-kind';\nimport { DetailsCardRoot } from 'views-components/details-card/details-card-root';\nimport { OverviewPanel } from 'components/overview-panel/overview-panel';\n\ntype CssRules =\n    'root'\n    | 'mpvRoot'\n    | 'overviewCard'\n    | 'filesCard'\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        display: 'flex',\n        flexDirection: 'column',\n    },\n    mpvRoot: {\n        flexGrow: 1,\n        display: 'flex',\n        flexDirection: 'column',\n        flexWrap: 'nowrap',\n        minHeight: \"500px\",\n        '& > div': {\n            height: '100%',\n        },\n    },\n    overviewCard: {\n        height: \"100%\",\n    },\n    filesCard: {\n        padding: 0,\n        height: '100%',\n        display: 'flex',\n        flexDirection: 'column',\n    },\n});\n\ntype RegisteredWorkflowPanelProps = RegisteredWorkflowPanelDataProps & DispatchProp & WithStyles<CssRules>\n\nexport const RegisteredWorkflowPanel = withStyles(styles)(connect(\n    (state: RootState, props: RouteComponentProps<{ id: string }>) => {\n        const item = getResource<WorkflowResource>(props.match.params.id)(state.resources);\n        if (item) {\n            return getRegisteredWorkflowPanelData(item, state.auth);\n        }\n        return { item, inputParams: [], outputParams: [], workflowCollection: \"\", gitprops: {} };\n    })(\n        class extends React.Component<RegisteredWorkflowPanelProps> {\n            render() {\n                const { classes, item, inputParams, outputParams, workflowCollection } = this.props;\n                // Set up panels and default tab\n                const panelsData: MPVPanelState[] = [\n                    { name: 'Overview' },\n                    { name: 'Runs', visible: true },\n                    { name: 'Outputs' },\n                    { name: 'Inputs' },\n                    { name: 'Definition' }\n                ];\n                return item ? (\n                    <section className={classes.root}>\n                        <DetailsCardRoot />\n                        <MPVContainer\n                            className={classes.mpvRoot}\n                            justifyContent='flex-start'\n                            panelStates={panelsData}>\n                            <MPVPanelContent\n                                xs='auto'\n                                className={classes.overviewCard}\n                                data-cy='registered-workflow-info-panel'>\n                                        <OverviewPanel detailsElement={<WorkflowDetailsAttributes workflow={item} />} />\n                            </MPVPanelContent>\n                            <MPVPanelContent\n                                forwardProps\n                                xs\n                                maxHeight='100%'>\n                                <WorkflowProcessesPanel />\n                            </MPVPanelContent>\n                            <MPVPanelContent\n                                forwardProps\n                                xs\n                                data-cy='process-outputs'\n                                maxHeight='100%'>\n                                <ProcessIOCard\n                                    label={ProcessIOCardType.OUTPUT}\n                                    params={outputParams}\n                                    raw={{}}\n                                    forceShowParams={true}/>\n                            </MPVPanelContent>\n                            <MPVPanelContent\n                                forwardProps\n                                xs\n                                data-cy='process-inputs'\n                                maxHeight='100%'>\n                                <ProcessIOCard\n                                    label={ProcessIOCardType.INPUT}\n                                    params={inputParams}\n                                    raw={{}}\n                                    forceShowParams={true}/>\n                            </MPVPanelContent>\n                            <MPVPanelContent\n                                xs\n                                maxHeight='100%'>\n                                <section className={classes.filesCard}>\n                                    <CardHeader title='Workflow Definition' />\n                                    <ProcessOutputCollectionFiles\n                                        isWritable={false}\n                                        currentItemUuid={workflowCollection}\n                                    />\n                                </section>\n                            </MPVPanelContent>\n                        </MPVContainer>\n                    </section>\n                ) : (\n                    <NotFoundView\n                        icon={WorkflowIcon}\n                        messages={['Workflow not found']}\n                    />\n                );\n            }\n\n            handleContextMenu = (event: React.MouseEvent<any>) => {\n                const { uuid, ownerUuid, name, description,\n                    kind } = this.props.item;\n                const menuKind = this.props.dispatch<any>(resourceToMenuKind(uuid));\n                const resource = {\n                    uuid,\n                    ownerUuid,\n                    name,\n                    description,\n                    kind,\n                    menuKind,\n                };\n                // Avoid expanding/collapsing the panel\n                event.stopPropagation();\n                this.props.dispatch<any>(openContextMenuAndSelect(event, resource));\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views/workflow-panel/workflow-description-card.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { CardContent, Tab, Tabs, Table, TableHead, TableCell, TableBody, TableRow, Typography } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { WorkflowIcon } from 'components/icon/icon';\nimport { DataTableDefaultView } from 'components/data-table-default-view/data-table-default-view';\nimport { parseWorkflowDefinition, getWorkflowInputs, getInputLabel, stringifyInputType } from 'models/workflow';\nimport { WorkflowDetailsCardDataProps, WorkflowDetailsAttributes } from 'views-components/details-panel/workflow-details';\n\nexport type CssRules = 'root' | 'tab' | 'inputTab' | 'graphTab' | 'graphTabWithChosenWorkflow' | 'descriptionTab' | 'inputsTable' | 'workflowName';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        height: '100%'\n    },\n    tab: {\n        minWidth: '33%'\n    },\n    inputTab: {\n        overflow: 'auto',\n        height: \"50vh\",\n        marginTop: theme.spacing(1)\n    },\n    graphTab: {\n        marginTop: theme.spacing(1),\n    },\n    graphTabWithChosenWorkflow: {\n        overflow: 'auto',\n        height: '450px',\n        marginTop: theme.spacing(1),\n    },\n    descriptionTab: {\n        overflow: 'auto',\n        height: \"50vh\",\n        marginTop: theme.spacing(1),\n    },\n    inputsTable: {\n        tableLayout: 'fixed',\n    },\n    workflowName: {\n        minHeight: \"4rem\",\n    }\n});\n\ntype WorkflowDetailsCardProps = WorkflowDetailsCardDataProps & WithStyles<CssRules>;\n\nexport const WorkflowDetailsCard = withStyles(styles)(\n    class extends React.Component<WorkflowDetailsCardProps> {\n        state = {\n            value: 0,\n        };\n\n        handleChange = (event: React.MouseEvent<HTMLElement>, value: number) => {\n            this.setState({ value });\n        }\n\n        render() {\n            const { classes, workflow } = this.props;\n            const { value } = this.state;\n            return <div className={classes.root}>\n                <Typography className={classes.workflowName} variant='h6'>\n                    {workflow && workflow.name}\n                </Typography>\n                <Tabs value={value} onChange={this.handleChange} centered={true}>\n                    <Tab className={classes.tab} label=\"Description\" />\n                    <Tab className={classes.tab} label=\"Inputs\" />\n                    <Tab className={classes.tab} label=\"Details\" />\n                </Tabs>\n                {value === 0 && <CardContent className={classes.descriptionTab}>\n                    {workflow ? <div dangerouslySetInnerHTML={{ __html: workflow.description || '(no-description)' }}></div> : (\n                        <DataTableDefaultView\n                            icon={WorkflowIcon}\n                            messages={['Please select a workflow to see its description.']} />\n                    )}\n                </CardContent>}\n                {value === 1 && <CardContent className={classes.inputTab}>\n                    {workflow\n                    ? this.renderInputsTable()\n                    : <DataTableDefaultView\n                          icon={WorkflowIcon}\n                          messages={['Please select a workflow to see its inputs.']} />\n                    }\n                </CardContent>}\n                {value === 2 && <CardContent className={classes.descriptionTab}>\n                    {workflow\n                    ? <WorkflowDetailsAttributes workflow={workflow} />\n                    : <DataTableDefaultView\n                          icon={WorkflowIcon}\n                          messages={['Please select a workflow to see its details.']} />\n                    }\n                </CardContent>}\n            </div>;\n        }\n\n        get inputs() {\n            if (this.props.workflow) {\n                const definition = parseWorkflowDefinition(this.props.workflow);\n                if (definition) {\n                    return getWorkflowInputs(definition);\n                }\n            }\n            return undefined;\n        }\n\n        renderInputsTable() {\n            return <Table className={this.props.classes.inputsTable}>\n                <TableHead>\n                    <TableRow>\n                        <TableCell>Label</TableCell>\n                        <TableCell>Type</TableCell>\n                        <TableCell>Description</TableCell>\n                    </TableRow>\n                </TableHead>\n                <TableBody>\n                    {this.inputs && this.inputs.map(input =>\n                        <TableRow key={input.id}>\n                            <TableCell>{getInputLabel(input)}</TableCell>\n                            <TableCell>{stringifyInputType(input)}</TableCell>\n                            <TableCell>{input.doc}</TableCell>\n                        </TableRow>)}\n                </TableBody>\n            </Table>;\n        }\n    });\n"
  },
  {
    "path": "services/workbench2/src/views/workflow-panel/workflow-graph.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { WorkflowResource } from \"models/workflow\";\nimport { WorkflowFactory } from \"cwlts/models\";\nimport yaml from 'js-yaml';\nimport \"lib/cwl-svg/assets/styles/themes/rabix-dark/theme.css\";\nimport \"lib/cwl-svg/plugins/port-drag/theme.dark.css\";\nimport \"lib/cwl-svg/plugins/selection/theme.dark.css\";\nimport {\n    SelectionPlugin,\n    SVGArrangePlugin,\n    SVGEdgeHoverPlugin,\n    SVGNodeMovePlugin,\n    SVGPortDragPlugin, Workflow,\n    ZoomPlugin\n} from \"lib/cwl-svg\";\n\ninterface WorkflowGraphProps {\n    workflow: WorkflowResource;\n}\nexport class WorkflowGraph extends React.Component<WorkflowGraphProps, {}> {\n    private svgRoot: React.RefObject<SVGSVGElement> = React.createRef();\n\n    setGraph() {\n        const graphs = yaml.load(this.props.workflow.definition, { json: true });\n\n        let workflowGraph = graphs;\n        if (graphs.$graph) {\n          workflowGraph = graphs.$graph.find((g: any) => g.class === 'Workflow');\n        }\n\n        const model = WorkflowFactory.from(workflowGraph);\n\n        const workflow = new Workflow({\n            model,\n            svgRoot: this.svgRoot.current!,\n            plugins: [\n                new SVGArrangePlugin(),\n                new SVGEdgeHoverPlugin(),\n                new SVGNodeMovePlugin({\n                    movementSpeed: 2\n                }),\n                new SVGPortDragPlugin(),\n                new SelectionPlugin(),\n                new ZoomPlugin(),\n            ]\n        });\n        workflow.draw();\n    }\n\n    componentDidMount() {\n        this.setGraph();\n    }\n\n    componentDidUpdate() {\n        this.setGraph();\n    }\n\n    render() {\n        return <svg\n            ref={this.svgRoot}\n            className=\"cwl-workflow\"\n            style={{\n                width: '100%',\n                height: '100%'\n            }}\n        />;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/views/workflow-panel/workflow-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport {\n    ResourceLastModifiedDate,\n    ResourceWorkflowName,\n    ResourceWorkflowStatus,\n    ResourceShare,\n    ResourceRunProcess\n} from \"views-components/data-explorer/renderers\";\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport { WorkflowResource } from 'models/workflow';\nimport { createTree } from 'models/tree';\n\n// TODO: restore filters\n// const resourceStatus = (type: string) => {\n//     switch (type) {\n//         case ResourceStatus.PUBLIC:\n//             return \"Public\";\n//         case ResourceStatus.PRIVATE:\n//             return \"Private\";\n//         case ResourceStatus.SHARED:\n//             return \"Shared\";\n//         default:\n//             return \"Unknown\";\n//     }\n// };\n\nexport enum WorkflowPanelColumnNames {\n    NAME = \"Name\",\n    AUTHORISATION = \"Authorisation\",\n    LAST_MODIFIED = \"Last modified\",\n    SHARE = 'Share'\n}\n\nexport const workflowPanelColumns: DataColumns<string, WorkflowResource> = [\n    {\n        name: WorkflowPanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.ASC, field: \"name\" },\n        filters: createTree(),\n        render: (uuid: string) => <ResourceWorkflowName uuid={uuid} />\n    },\n    {\n        name: WorkflowPanelColumnNames.AUTHORISATION,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        // TODO: restore filters\n        // filters: [\n        //     {\n        //         name: resourceStatus(ResourceStatus.PUBLIC),\n        //         selected: true,\n        //         type: ResourceStatus.PUBLIC\n        //     },\n        //     {\n        //         name: resourceStatus(ResourceStatus.PRIVATE),\n        //         selected: true,\n        //         type: ResourceStatus.PRIVATE\n        //     },\n        //     {\n        //         name: resourceStatus(ResourceStatus.SHARED),\n        //         selected: true,\n        //         type: ResourceStatus.SHARED\n        //     }\n        // ],\n        render: (uuid: string) => <ResourceWorkflowStatus uuid={uuid} />,\n    },\n    {\n        name: WorkflowPanelColumnNames.LAST_MODIFIED,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"modifiedAt\" },\n        filters: createTree(),\n        render: (uuid: string) => <ResourceLastModifiedDate uuid={uuid} />\n    },\n    {\n        name: '',\n        selected: true,\n        configurable: false,\n        filters: createTree(),\n        render: (uuid: string) => <ResourceShare uuid={uuid} />\n    },\n    {\n        name: '',\n        selected: true,\n        configurable: false,\n        filters: createTree(),\n        render: (uuid: string) => <ResourceRunProcess uuid={uuid} />\n    }\n];\n"
  },
  {
    "path": "services/workbench2/src/views/workflow-panel/workflow-panel-view.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { WorkflowIcon } from 'components/icon/icon';\nimport { WORKFLOW_PANEL_ID } from 'store/workflow-panel/workflow-panel-actions';\nimport { DataTableFilterItem } from 'components/data-table-filters/data-table-filters';\nimport { Grid, Paper } from '@mui/material';\nimport { WorkflowDetailsCard } from './workflow-description-card';\nimport { WorkflowResource } from 'models/workflow';\n\nexport interface WorkflowPanelFilter extends DataTableFilterItem {\n    type: ResourceStatus;\n}\n\nexport interface WorkflowPanelDataProps {\n    uuid?: string;\n    workflows?: WorkflowResource[];\n}\n\nexport interface WorfklowPanelActionProps {\n    handleRowDoubleClick: (workflowUuid: string) => void;\n    handleRowClick: (workflowUuid: string) => void;\n}\n\nexport type WorkflowPanelProps = WorkflowPanelDataProps & WorfklowPanelActionProps;\n\nexport enum ResourceStatus {\n    PUBLIC = \"Public\",\n    PRIVATE = \"Private\",\n    SHARED = \"Shared\"\n}\n\nexport const WorkflowPanelView = (props: WorkflowPanelProps) => {\n    const workflow = props.uuid ? props.workflows?.find(workflow => workflow.uuid === props.uuid) : undefined;\n    return <Grid container spacing={2} style={{ minHeight: '500px' }}>\n        <Grid item xs={6}>\n            <DataExplorer\n                id={WORKFLOW_PANEL_ID}\n                onRowClick={props.handleRowClick}\n                onRowDoubleClick={props.handleRowDoubleClick}\n                contextMenuColumn={false}\n                onContextMenu={e => e}\n                defaultViewIcon={WorkflowIcon}\n                defaultViewMessages={['Workflow list is empty.']} />\n        </Grid>\n        <Grid item xs={6}>\n            <Paper style={{ height: '100%' }}>\n                <WorkflowDetailsCard workflow={workflow} />\n            </Paper>\n        </Grid>\n    </Grid>;\n};\n"
  },
  {
    "path": "services/workbench2/src/views/workflow-panel/workflow-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport { WorkflowPanelView } from 'views/workflow-panel/workflow-panel-view';\nimport { WorfklowPanelActionProps, WorkflowPanelDataProps } from './workflow-panel-view';\nimport { showWorkflowDetails } from 'store/workflow-panel/workflow-panel-actions';\nimport { RootState } from 'store/store';\nimport { WORKFLOW_PANEL_DETAILS_UUID } from 'store/workflow-panel/workflow-panel-actions';\nimport { getProperty } from 'store/properties/properties';\n\nconst mapStateToProps = (state: RootState): WorkflowPanelDataProps => {\n    const uuid = getProperty<string>(WORKFLOW_PANEL_DETAILS_UUID)(state.properties);\n    const workflows = state.runProcessPanel.workflows;\n    return {\n        uuid,\n        workflows,\n    }\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): WorfklowPanelActionProps => ({\n    handleRowDoubleClick: (uuid: string) => {\n        dispatch<any>(navigateTo(uuid));\n    },\n\n    handleRowClick: (uuid: string) => {\n        dispatch(showWorkflowDetails(uuid));\n    }\n});\n\nexport const WorkflowPanel = connect(mapStateToProps, mapDispatchToProps)(WorkflowPanelView);\n"
  },
  {
    "path": "services/workbench2/src/views/workflow-panel/workflow-processes-panel-columns.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DataColumns, SortDirection } from 'components/data-table/data-column';\nimport { ResourceCreatedAtDate, ProcessStatus, ContainerRunTime } from 'views-components/data-explorer/renderers';\nimport { ResourceName } from 'views-components/data-explorer/renderers';\nimport { createTree } from 'models/tree';\nimport { getInitialProcessStatusFilters } from 'store/resource-type-filters/resource-type-filters';\nimport { ProcessResource } from 'models/process';\n\n\nexport enum WorkflowProcessesPanelColumnNames {\n    NAME = \"Name\",\n    STATUS = \"Status\",\n    CREATED_AT = \"Created At\",\n    RUNTIME = \"Run Time\"\n}\n\nexport const workflowProcessesPanelColumns: DataColumns<string, ProcessResource> = [\n    {\n        name: WorkflowProcessesPanelColumnNames.NAME,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.NONE, field: \"name\" },\n        filters: createTree(),\n        render: uuid => <ResourceName uuid={uuid} />\n    },\n    {\n        name: WorkflowProcessesPanelColumnNames.STATUS,\n        selected: true,\n        configurable: true,\n        mutuallyExclusiveFilters: true,\n        filters: getInitialProcessStatusFilters(),\n        render: uuid => <ProcessStatus uuid={uuid} />,\n    },\n    {\n        name: WorkflowProcessesPanelColumnNames.CREATED_AT,\n        selected: true,\n        configurable: true,\n        sort: { direction: SortDirection.DESC, field: \"createdAt\" },\n        filters: createTree(),\n        render: uuid => <ResourceCreatedAtDate uuid={uuid} />\n    },\n    {\n        name: WorkflowProcessesPanelColumnNames.RUNTIME,\n        selected: true,\n        configurable: true,\n        filters: createTree(),\n        render: uuid => <ContainerRunTime uuid={uuid} />\n    }\n];\n"
  },
  {
    "path": "services/workbench2/src/views/workflow-panel/workflow-processes-panel-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { DataExplorer } from \"views-components/data-explorer/data-explorer\";\nimport { DataTableFilterItem } from 'components/data-table-filters/data-table-filters';\nimport { ContainerRequestState } from 'models/container-request';\nimport { ResourceKind } from 'models/resource';\nimport { ProcessIcon } from 'components/icon/icon';\nimport { WORKFLOW_PROCESSES_PANEL_ID } from 'store/workflow-panel/workflow-panel-actions';\nimport { ResourcesState } from 'store/resources/resources';\nimport { MPVPanelProps } from 'components/multi-panel-view/multi-panel-view';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Typography } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { getResource } from 'store/resources/resources';\nimport { WorkflowResource } from 'models/workflow';\nimport { RootState } from 'store/store';\n\ntype CssRules = 'iconHeader' | 'cardHeader';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    iconHeader: {\n        fontSize: '1.875rem',\n        color: theme.customs.colors.greyL,\n        marginRight: theme.spacing(2),\n    },\n    cardHeader: {\n        display: 'flex',\n        marginTop: '-5px',\n        marginBottom: '15px',\n    },\n});\n\nexport interface WorkflowProcessesPanelFilter extends DataTableFilterItem {\n    type: ResourceKind | ContainerRequestState;\n}\n\nexport interface WorkflowProcessesPanelDataProps {\n    resources: ResourcesState;\n    workflow?: WorkflowResource;\n}\n\nexport interface WorkflowProcessesPanelActionProps {\n    onItemClick: (item: string) => void;\n    onContextMenu: (event: React.MouseEvent<HTMLElement>, item: string, resources: ResourcesState) => void;\n    onItemDoubleClick: (item: string) => void;\n}\n\ntype WorkflowProcessesPanelProps = WorkflowProcessesPanelActionProps & WorkflowProcessesPanelDataProps;\n\nconst DEFAULT_VIEW_MESSAGES = [\n    'No processes available for listing.',\n    'The current process may not have any or none matches current filtering.'\n];\n\ntype WorkflowProcessesTitleProps = WithStyles<CssRules>;\n\nconst WorkflowProcessesTitle = withStyles(styles)(\n    ({ classes }: WorkflowProcessesTitleProps) =>\n        <div className={classes.cardHeader}>\n            <ProcessIcon className={classes.iconHeader} /><span></span>\n            <Typography noWrap variant='h6' color='inherit'>\n                Run History\n            </Typography>\n        </div>\n);\n\nconst mapStateToProps = (state: RootState): Pick<WorkflowProcessesPanelDataProps, 'workflow'> => {\n    const currentRouteUuid = state.properties.currentRouteUuid;\n    const workflow = getResource<WorkflowResource>(currentRouteUuid)(state.resources);\n    return {\n        workflow,\n    };\n};\n\nexport const WorkflowProcessesPanelRoot = connect(mapStateToProps)((props: WorkflowProcessesPanelProps & MPVPanelProps) => {\n    return <DataExplorer\n        id={WORKFLOW_PROCESSES_PANEL_ID}\n        onRowClick={props.onItemClick}\n        onRowDoubleClick={props.onItemDoubleClick}\n        onContextMenu={(event, item) => props.onContextMenu(event, item, props.resources)}\n        contextMenuColumn={false}\n        defaultViewIcon={ProcessIcon}\n        defaultViewMessages={DEFAULT_VIEW_MESSAGES}\n        panelName={props.panelName}\n        parentResource={props.workflow}\n        title={<WorkflowProcessesTitle />}\n        />;\n});\n"
  },
  {
    "path": "services/workbench2/src/views/workflow-panel/workflow-processes-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { openProcessContextMenu } from \"store/context-menu/context-menu-actions\";\nimport { WorkflowProcessesPanelRoot, WorkflowProcessesPanelActionProps, WorkflowProcessesPanelDataProps } from \"views/workflow-panel/workflow-processes-panel-root\";\nimport { RootState } from \"store/store\";\nimport { navigateTo } from \"store/navigation/navigation-action\";\nimport { getProcess } from \"store/processes/process\";\nimport { toggleOne } from 'store/multiselect/multiselect-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch): WorkflowProcessesPanelActionProps => ({\n    onContextMenu: (event, resourceUuid, resources) => {\n        const process = getProcess(resourceUuid)(resources);\n        if (process) {\n            dispatch<any>(openProcessContextMenu(event, process));\n        }\n    },\n    onItemClick: (uuid: string) => {\n        dispatch<any>(toggleOne(uuid))\n    },\n    onItemDoubleClick: uuid => {\n        dispatch<any>(navigateTo(uuid));\n    },\n});\n\nconst mapStateToProps = (state: RootState): WorkflowProcessesPanelDataProps => ({\n    resources: state.resources,\n});\n\nexport const WorkflowProcessesPanel = connect(mapStateToProps, mapDispatchToProps)(WorkflowProcessesPanelRoot);\n"
  },
  {
    "path": "services/workbench2/src/views-components/add-session/add-session.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RouteProps } from \"react-router\";\nimport React from \"react\";\nimport { connect, DispatchProp } from \"react-redux\";\nimport { getUrlParameter } from \"common/url\";\nimport { navigateToSiteManager } from \"store/navigation/navigation-action\";\nimport { addSession } from \"store/auth/auth-action-session\";\n\nexport const AddSession = connect()(\n    class extends React.Component<RouteProps & DispatchProp<any>, {}> {\n        componentDidMount() {\n            const search = this.props.location ? this.props.location.search : \"\";\n            const apiToken = getUrlParameter(search, 'api_token');\n            const baseURL = getUrlParameter(search, 'baseURL');\n\n            this.props.dispatch(addSession(baseURL, apiToken));\n            this.props.dispatch(navigateToSiteManager);\n        }\n        render() {\n            return <div />;\n        }\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/advanced-tab-dialog/advanced-tab-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport {\n    Dialog,\n    DialogActions,\n    Button,\n    DialogTitle,\n    DialogContent,\n    Tabs,\n    Tab,\n    DialogContentText,\n} from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport { compose } from 'redux';\nimport { AdvancedTabDialogData, ADVANCED_TAB_DIALOG } from \"store/advanced-tab/advanced-tab\";\nimport { DefaultCodeSnippet } from \"components/default-code-snippet/default-code-snippet\";\nimport { MetadataTab } from 'views-components/advanced-tab-dialog/metadataTab';\nimport { LinkResource } from \"models/link\";\nimport { ListResults } from \"services/common-service/common-service\";\n\ntype CssRules = 'content' | 'codeSnippet' | 'spacing';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    content: {\n        paddingTop: theme.spacing(3),\n        minHeight: '400px',\n        minWidth: '1232px'\n    },\n    codeSnippet: {\n        borderRadius: theme.spacing(0.5),\n        border: '1px solid',\n        borderColor: theme.palette.grey[\"400\"],\n        maxHeight: '400px'\n    },\n    spacing: {\n        paddingBottom: theme.spacing(2)\n    },\n});\n\nexport const AdvancedTabDialog = compose(\n    withDialog(ADVANCED_TAB_DIALOG),\n    withStyles(styles),\n)(\n    class extends React.Component<WithDialogProps<AdvancedTabDialogData> & WithStyles<CssRules>>{\n        state = {\n            value: 0,\n        };\n\n        componentDidMount() {\n            this.setState({ value: 0 });\n        }\n\n        handleChange = (event: React.MouseEvent<HTMLElement>, value: number) => {\n            this.setState({ value });\n        }\n        render() {\n            const { classes, open, closeDialog } = this.props;\n            const { value } = this.state;\n            const {\n                apiResponse,\n                metadata,\n                pythonHeader,\n                pythonExample,\n                cliGetHeader,\n                cliGetExample,\n                cliUpdateHeader,\n                cliUpdateExample,\n                curlHeader,\n                curlExample,\n                uuid,\n            } = this.props.data;\n            return (\n                <Dialog\n                    open={open}\n                    maxWidth=\"lg\"\n                    onClose={closeDialog}\n                    TransitionProps={{\n                        onExit: () => this.setState({ value: 0 })\n                    }}>\n                    <DialogTitle>API Details</DialogTitle>\n                    <Tabs value={value} onChange={this.handleChange} variant=\"fullWidth\">\n                        <Tab label=\"API RESPONSE\" />\n                        <Tab label=\"METADATA\" />\n                        <Tab label=\"PYTHON EXAMPLE\" />\n                        <Tab label=\"CLI EXAMPLE\" />\n                        <Tab label=\"CURL EXAMPLE\" />\n                    </Tabs>\n                    <DialogContent className={classes.content}>\n                        {value === 0 && <div>{dialogContentExample(apiResponse, classes)}</div>}\n                        {value === 1 && <div>\n                            {metadata !== '' && (metadata as ListResults<LinkResource>).items.length > 0 ?\n                                <MetadataTab items={(metadata as ListResults<LinkResource>).items} uuid={uuid} />\n                                : dialogContentHeader('(No metadata links found)')}\n                        </div>}\n                        {value === 2 && dialogContent(pythonHeader, pythonExample, classes)}\n                        {value === 3 && <div>\n                            {dialogContent(cliGetHeader, cliGetExample, classes)}\n                            {dialogContent(cliUpdateHeader, cliUpdateExample, classes)}\n                        </div>}\n                        {value === 4 && dialogContent(curlHeader, curlExample, classes)}\n                    </DialogContent>\n                    <DialogActions>\n                        <Button data-cy=\"close-advanced-dialog\" variant='text' color='primary' onClick={closeDialog}>\n                            Close\n                        </Button>\n                    </DialogActions>\n                </Dialog>\n            );\n        }\n    }\n);\n\nconst dialogContent = (header: string, example: string, classes: any) =>\n    <div className={classes.spacing}>\n        {dialogContentHeader(header)}\n        {dialogContentExample(example, classes)}\n    </div>;\n\nconst dialogContentHeader = (header: string) =>\n    <DialogContentText>\n        {header}\n    </DialogContentText>;\n\nconst dialogContentExample = (example: JSX.Element | string, classes: any) => {\n    // Pass string to lines param or JSX to child props\n    const stringData = example && (example as string).length ? (example as string) : undefined;\n    return <DefaultCodeSnippet\n        apiResponse\n        className={classes.codeSnippet}\n        lines={stringData ? [stringData] : []}\n    >\n        {React.isValidElement(example) ? (example as JSX.Element) : undefined}\n    </DefaultCodeSnippet>;\n}\n"
  },
  {
    "path": "services/workbench2/src/views-components/advanced-tab-dialog/metadataTab.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Table, TableHead, TableCell, TableRow, TableBody } from '@mui/material';\n\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\n\ntype CssRules = 'cell';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    cell: {\n        paddingRight: theme.spacing(2)\n    }\n});\n\ninterface MetadataTable {\n    uuid: string;\n    linkClass: string;\n    name: string;\n    tailUuid: string;\n    headUuid: string;\n    properties: any;\n}\n\ninterface MetadataProps {\n    items: MetadataTable[];\n    uuid: string;\n}\n\nexport const MetadataTab = withStyles(styles)((props: MetadataProps & WithStyles<CssRules>) =>\n    <Table>\n        <TableHead>\n            <TableRow>\n                <TableCell>uuid</TableCell>\n                <TableCell>link_class</TableCell>\n                <TableCell>name</TableCell>\n                <TableCell>tail</TableCell>\n                <TableCell>head</TableCell>\n                <TableCell>properties</TableCell>\n            </TableRow>\n        </TableHead>\n        <TableBody>\n            {props.items.map((it, index) =>\n                <TableRow key={index}>\n                    <TableCell className={props.classes.cell}>{it.uuid}</TableCell>\n                    <TableCell className={props.classes.cell}>{it.linkClass}</TableCell>\n                    <TableCell className={props.classes.cell}>{it.name}</TableCell>\n                    <TableCell className={props.classes.cell}>{it.tailUuid}</TableCell>\n                    <TableCell className={props.classes.cell}>{it.headUuid === props.uuid ? 'this' : it.headUuid}</TableCell>\n                    <TableCell className={props.classes.cell}>{JSON.stringify(it.properties)}</TableCell>\n                </TableRow>\n            )}\n        </TableBody>\n    </Table>\n);"
  },
  {
    "path": "services/workbench2/src/views-components/api-client-authorizations-dialog/attributes-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { compose } from 'redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button, Grid } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithDialogProps, withDialog } from \"store/dialog/with-dialog\";\nimport { API_CLIENT_AUTHORIZATION_ATTRIBUTES_DIALOG } from 'store/api-client-authorizations/api-client-authorizations-actions';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { ApiClientAuthorization } from 'models/api-client-authorization';\nimport { formatDateTime } from 'common/formatters';\n\ntype CssRules = 'root';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        fontSize: '0.875rem',\n        '& div:nth-child(odd)': {\n            textAlign: 'right',\n            color: theme.palette.grey[\"500\"]\n        }\n    }\n});\n\ninterface AttributesKeepServiceDialogDataProps {\n    apiClientAuthorization: ApiClientAuthorization;\n}\n\nexport const AttributesApiClientAuthorizationDialog = compose(\n    withDialog(API_CLIENT_AUTHORIZATION_ATTRIBUTES_DIALOG),\n    withStyles(styles))(\n        ({ open, closeDialog, data, classes }: WithDialogProps<AttributesKeepServiceDialogDataProps> & WithStyles<CssRules>) =>\n            <Dialog open={open} onClose={closeDialog} fullWidth maxWidth='sm'>\n                <DialogTitle>Attributes</DialogTitle>\n                <DialogContent>\n                    {data.apiClientAuthorization && <Grid container direction=\"row\" spacing={2} className={classes.root}>\n                        <Grid item xs={5}>UUID</Grid>\n                        <Grid item xs={7}>{data.apiClientAuthorization.uuid}</Grid>\n                        <Grid item xs={5}>Owner uuid</Grid>\n                        <Grid item xs={7}>{data.apiClientAuthorization.ownerUuid}</Grid>\n                        <Grid item xs={5}>API Token</Grid>\n                        <Grid item xs={7}>{data.apiClientAuthorization.apiToken}</Grid>\n                        <Grid item xs={5}>Created by IP address</Grid>\n                        <Grid item xs={7}>{data.apiClientAuthorization.createdByIpAddress || '(none)'}</Grid>\n                        <Grid item xs={5}>Expires at</Grid>\n                        <Grid item xs={7}>{formatDateTime(data.apiClientAuthorization.expiresAt) || '(none)'}</Grid>\n                        <Grid item xs={5}>Last used at</Grid>\n                        <Grid item xs={7}>{formatDateTime(data.apiClientAuthorization.lastUsedAt) || '(none)'}</Grid>\n                        <Grid item xs={5}>Last used by IP address</Grid>\n                        <Grid item xs={7}>{data.apiClientAuthorization.lastUsedByIpAddress || '(none)'}</Grid>\n                        <Grid item xs={5}>Scopes</Grid>\n                        <Grid item xs={7}>{JSON.stringify(data.apiClientAuthorization.scopes || '(none)')}</Grid>\n                        <Grid item xs={5}>User ID</Grid>\n                        <Grid item xs={7}>{data.apiClientAuthorization.userId || '(none)'}</Grid>\n                        <Grid item xs={5}>Created at</Grid>\n                        <Grid item xs={7}>{formatDateTime(data.apiClientAuthorization.createdAt) || '(none)'}</Grid>\n                        <Grid item xs={5}>Updated at</Grid>\n                        <Grid item xs={7}>{formatDateTime(data.apiClientAuthorization.updatedAt) || '(none)'}</Grid>\n                    </Grid>}\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={closeDialog}>\n                        Close\n                    </Button>\n                </DialogActions>\n            </Dialog>\n    );\n"
  },
  {
    "path": "services/workbench2/src/views-components/api-client-authorizations-dialog/help-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button } from \"@mui/material\";\nimport { WithDialogProps } from \"store/dialog/with-dialog\";\nimport { withDialog } from 'store/dialog/with-dialog';\nimport { DefaultCodeSnippet } from 'components/default-code-snippet/default-code-snippet';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { compose } from \"redux\";\nimport { API_CLIENT_AUTHORIZATION_HELP_DIALOG } from 'store/api-client-authorizations/api-client-authorizations-actions';\n\ntype CssRules = 'codeSnippet';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    codeSnippet: {\n        borderRadius: theme.spacing(0.5),\n        border: `1px solid ${theme.palette.grey[\"400\"]}`,\n        '& pre': {\n            fontSize: '0.815rem'\n        }\n    }\n});\n\ninterface HelpApiClientAuthorizationDataProps {\n    apiHost: string;\n    apiToken: string;\n    email: string;\n}\n\nexport const HelpApiClientAuthorizationDialog = compose(\n    withDialog(API_CLIENT_AUTHORIZATION_HELP_DIALOG),\n    withStyles(styles))(\n        (props: WithDialogProps<HelpApiClientAuthorizationDataProps> & WithStyles<CssRules>) =>\n            <Dialog open={props.open}\n                onClose={props.closeDialog}\n                fullWidth\n                maxWidth='md'>\n                <DialogTitle>HELP:</DialogTitle>\n                <DialogContent>\n                    <DefaultCodeSnippet\n                        className={props.classes.codeSnippet}\n                        lines={[snippetText(props.data)]} />\n                        {/* // lines={snippetText2(props.data)} /> */}\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={props.closeDialog}>\n                        Close\n                </Button>\n                </DialogActions>\n            </Dialog>\n    );\n\nconst snippetText = (data: HelpApiClientAuthorizationDataProps) => `### Pasting the following lines at a shell prompt will allow Arvados SDKs\n### to authenticate to your account, ${data.email}\n\nread ARVADOS_API_TOKEN <<EOF\n${data.apiToken}\nEOF\nexport ARVADOS_API_TOKEN ARVADOS_API_HOST=${data.apiHost}\nunset ARVADOS_API_HOST_INSECURE`;\n"
  },
  {
    "path": "services/workbench2/src/views-components/api-client-authorizations-dialog/remove-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { API_CLIENT_AUTHORIZATION_REMOVE_DIALOG, removeApiClientAuthorization } from 'store/api-client-authorizations/api-client-authorizations-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeApiClientAuthorization(props.data.uuid));\n    }\n});\n\nexport const RemoveApiClientAuthorizationDialog = compose(\n    withDialog(API_CLIENT_AUTHORIZATION_REMOVE_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);"
  },
  {
    "path": "services/workbench2/src/views-components/api-token/api-token.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RouteProps } from \"react-router\";\nimport React from \"react\";\nimport { Dispatch } from \"redux\";\nimport { RootState } from \"store/store\";\nimport { connect, DispatchProp } from \"react-redux\";\nimport { saveApiToken } from \"store/auth/auth-action\";\nimport { getUrlParameter } from \"common/url\";\nimport { AuthService } from \"services/auth-service/auth-service\";\nimport { navigateToLinkAccount, navigateToDashboard } from \"store/navigation/navigation-action\";\nimport { Config } from \"common/config\";\nimport { getAccountLinkData } from \"store/link-account-panel/link-account-panel-actions\";\nimport { replace } from \"connected-react-router\";\nimport { User } from \"models/user\";\n\ninterface ApiTokenProps {\n    authService: AuthService;\n    config: Config;\n    loadMainApp: boolean;\n    user?: User;\n}\n\nexport const ApiToken = connect((state: RootState) => ({\n    user: state.auth.user,\n}), null)(\n    class extends React.Component<ApiTokenProps & RouteProps & DispatchProp<any>, {}> {\n        componentDidMount() {\n            const search = this.props.location ? this.props.location.search : \"\";\n            const apiToken = getUrlParameter(search, 'api_token');\n            this.props.dispatch<any>(saveApiToken(apiToken));\n        }\n\n        componentDidUpdate() {\n            const redirectURL = this.props.authService.getTargetURL();\n\n            if (this.props.loadMainApp && this.props.user) {\n                if (redirectURL) {\n                    asyncReplaceURL(redirectURL, this.props.authService, this.props.dispatch);\n                }\n                else if (this.props.dispatch(getAccountLinkData())) {\n                    this.props.dispatch(navigateToLinkAccount);\n                }\n                else {\n                    this.props.dispatch(navigateToDashboard);\n                }\n            }\n        }\n\n        render() {\n            return <div />;\n        }\n    }\n);\n\nasync function asyncReplaceURL(url: string, authService: AuthService, dispatch: Dispatch) {\n    await authService.removeTargetURL();\n    dispatch(replace(url));\n}\n"
  },
  {
    "path": "services/workbench2/src/views-components/auto-logout/auto-logout.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { AutoLogoutComponent, LAST_ACTIVE_TIMESTAMP } from './auto-logout';\n\ndescribe('<AutoLogoutComponent />', () => {\n    let props;\n    const sessionIdleTimeout = 300;\n    const lastWarningDuration = 60;\n    const eventListeners = {};\n\n    beforeEach(() => {\n        cy.clock();\n        window.addEventListener = cy.stub((event, cb) => {\n            eventListeners[event] = cb;\n        });\n        props = {\n            sessionIdleTimeout: sessionIdleTimeout,\n            lastWarningDuration: lastWarningDuration,\n            doLogout: cy.spy().as('doLogout'),\n            doWarn: cy.stub().as('doWarn'),\n            doCloseWarn: cy.stub(),\n        };\n        cy.mount(<div><AutoLogoutComponent {...props} /></div>);\n    });\n\n    afterEach(() => {\n        cy.clock().invoke('restore');\n    });\n\n    it('should logout after idle timeout', () => {\n        cy.tick((sessionIdleTimeout-1)*1000);\n        cy.get('@doLogout').should('not.have.been.called');\n        cy.tick(1000);\n        cy.get('@doLogout').should('have.been.called');\n    });\n\n    it('should warn the user previous to close the session', () => {\n        cy.tick((sessionIdleTimeout-lastWarningDuration-1)*1000);\n        cy.get('@doWarn').should('not.have.been.called');\n        cy.tick(1000);\n        cy.get('@doWarn').should('have.been.called');\n    });\n\n    it('should reset the idle timer when activity event is received', () => {\n        cy.tick((sessionIdleTimeout-lastWarningDuration-1)*1000);\n        cy.get('@doWarn').should('not.have.been.called');\n        // Simulate activity from other window/tab\n        eventListeners.storage({\n            key: LAST_ACTIVE_TIMESTAMP,\n            newValue: '42' // value currently doesn't matter\n        })\n        cy.tick(1000);\n        // Warning should not appear because idle timer was reset\n        cy.get('@doWarn').should('not.have.been.called');\n    });\n});"
  },
  {
    "path": "services/workbench2/src/views-components/auto-logout/auto-logout.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from \"react-redux\";\nimport { useIdleTimer } from \"react-idle-timer\";\nimport { Dispatch } from \"redux\";\n\nimport { RootState } from \"store/store\";\nimport { SnackbarKind, snackbarActions } from \"store/snackbar/snackbar-actions\";\nimport { logout } from \"store/auth/auth-action\";\nimport parse from \"parse-duration\";\nimport React from \"react\";\nimport { min } from \"lodash\";\n\ninterface AutoLogoutDataProps {\n    sessionIdleTimeout: number;\n    lastWarningDuration: number;\n}\n\ninterface AutoLogoutActionProps {\n    doLogout: () => void;\n    doWarn: (message: string, duration: number) => void;\n    doCloseWarn: () => void;\n}\n\nconst mapStateToProps = (state: RootState, ownProps: any): AutoLogoutDataProps => ({\n    sessionIdleTimeout: parse(state.auth.config.clusterConfig.Workbench.IdleTimeout, 's') || 0,\n    lastWarningDuration: ownProps.lastWarningDuration || 60,\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch): AutoLogoutActionProps => ({\n    doLogout: () => dispatch<any>(logout(true, true)),\n    doWarn: (message: string, duration: number) =>\n        dispatch(snackbarActions.OPEN_SNACKBAR({\n            message, hideDuration: duration, kind: SnackbarKind.WARNING })),\n    doCloseWarn: () => dispatch(snackbarActions.CLOSE_SNACKBAR()),\n});\n\nexport type AutoLogoutProps = AutoLogoutDataProps & AutoLogoutActionProps;\n\nconst debounce = (delay: number | undefined, fn: Function) => {\n    let timerId: NodeJS.Timer | null;\n    return (...args: any[]) => {\n        if (timerId) { clearTimeout(timerId); }\n        timerId = setTimeout(() => {\n            fn(...args);\n            timerId = null;\n        }, delay);\n    };\n};\n\nexport const LAST_ACTIVE_TIMESTAMP = 'lastActiveTimestamp';\n\nexport const AutoLogoutComponent = (props: AutoLogoutProps) => {\n    let logoutTimer: NodeJS.Timer;\n    const lastWarningDuration = min([props.lastWarningDuration, props.sessionIdleTimeout])! * 1000;\n\n    // Runs once after render\n    React.useEffect(() => {\n        window.addEventListener('storage', handleStorageEvents);\n        // Component cleanup\n        return () => {\n            window.removeEventListener('storage', handleStorageEvents);\n        };\n    });\n\n    const handleStorageEvents = (e: StorageEvent) => {\n        if (e.key === LAST_ACTIVE_TIMESTAMP) {\n            // Other tab activity detected by a localStorage change event.\n            debounce(500, () => {\n                handleOnActive();\n                reset();\n            })();\n        }\n    };\n\n    const handleOnIdle = () => {\n        logoutTimer = setTimeout(\n            () => props.doLogout(), lastWarningDuration);\n        props.doWarn(\n            \"Your session is about to be closed due to inactivity\",\n            lastWarningDuration);\n    };\n\n    const handleOnActive = () => {\n        if (logoutTimer) { clearTimeout(logoutTimer); }\n        props.doCloseWarn();\n    };\n\n    const handleOnAction = () => {\n        // Notify the other tabs there was some activity.\n        const now = (new Date()).getTime();\n        localStorage.setItem(LAST_ACTIVE_TIMESTAMP, now.toString());\n    };\n\n    const { reset } = useIdleTimer({\n        timeout: (props.lastWarningDuration < props.sessionIdleTimeout)\n            ? 1000 * (props.sessionIdleTimeout - props.lastWarningDuration)\n            : 1,\n        onIdle: handleOnIdle,\n        onActive: handleOnActive,\n        onAction: handleOnAction,\n        debounce: 500\n    });\n\n    return <span />;\n};\n\nexport const AutoLogout = connect(mapStateToProps, mapDispatchToProps)(AutoLogoutComponent);\n"
  },
  {
    "path": "services/workbench2/src/views-components/baner/banner.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { BannerComponent } from './banner';\nimport servicesProvider from 'common/service-provider';\nimport { Provider } from \"react-redux\";\nimport { ThemeProvider } from \"@mui/material\";\nimport { CustomTheme } from 'common/custom-theme';\nimport { configureStore } from \"store/store\";\nimport { createBrowserHistory } from \"history\";\nimport { createServices } from \"services/services\";\n\ndescribe('<BannerComponent />', () => {\n\n    let props;\n\n    beforeEach(() => {\n        props = {\n            isOpen: true,\n            bannerUUID: undefined,\n            keepWebInlineServiceUrl: '',\n            openBanner: cy.stub(),\n            closeBanner: cy.stub(),\n            classes: {},\n        }\n    });\n\n    const services = createServices(\"/arvados/v1\");\n    const store = configureStore(createBrowserHistory(), services);\n\n    it('renders without crashing', () => {\n        // when\n        cy.mount(\n            <Provider store={store}>\n              <ThemeProvider theme={CustomTheme}>\n                <BannerComponent {...props} />\n              </ThemeProvider>\n            </Provider>);\n\n        // then\n        cy.get('button').should('exist');\n    });\n\n    it('calls collectionService', () => {\n        // given\n        props.isOpen = true;\n        props.bannerUUID = '123';\n\n        cy.spy(servicesProvider, 'getServices').as('getServices');\n        cy.spy(servicesProvider.getServices().collectionService, 'files').as('files');\n        cy.spy(servicesProvider.getServices().collectionService, 'getFileContents').as('getFileContents');\n\n        // when\n        cy.mount(\n            <Provider store={store}>\n              <ThemeProvider theme={CustomTheme}>\n                <BannerComponent {...props} />\n              </ThemeProvider>\n            </Provider>);\n\n        // then\n        cy.get('@getServices').should('be.called');\n        cy.get('@files').should('be.called');\n        cy.get('@getFileContents').should('be.called');\n        cy.get('html').should('contain', 'Test banner message');\n    });\n});\n\n"
  },
  {
    "path": "services/workbench2/src/views-components/baner/banner.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useState, useCallback, useEffect } from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Dialog, DialogContent, DialogActions, Button } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { bannerActions } from \"store/banner/banner-action\";\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport servicesProvider from \"common/service-provider\";\nimport { Dispatch } from \"redux\";\nimport { sanitizeHTML } from \"common/html-sanitize\";\n\ntype CssRules = \"dialogContent\" | \"dialogContentIframe\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    dialogContent: {\n        minWidth: \"550px\",\n        minHeight: \"500px\",\n        display: \"block\",\n    },\n    dialogContentIframe: {\n        minWidth: \"550px\",\n        minHeight: \"500px\",\n    },\n});\n\ninterface BannerProps {\n    isOpen: boolean;\n    bannerUUID?: string;\n    keepWebInlineServiceUrl: string;\n}\n\ntype BannerComponentProps = BannerProps &\n    WithStyles<CssRules> & {\n        openBanner: Function;\n        closeBanner: Function;\n    };\n\nconst mapStateToProps = (state: RootState): BannerProps => ({\n    isOpen: state.banner.isOpen,\n    bannerUUID: state.auth.config.clusterConfig.Workbench.BannerUUID,\n    keepWebInlineServiceUrl: state.auth.config.keepWebInlineServiceUrl,\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    openBanner: () => dispatch<any>(bannerActions.openBanner()),\n    closeBanner: () => dispatch<any>(bannerActions.closeBanner()),\n});\n\nexport const BANNER_LOCAL_STORAGE_KEY = \"bannerFileData\";\n\nexport const BannerComponent = (props: BannerComponentProps) => {\n    const { isOpen, openBanner, closeBanner, bannerUUID, keepWebInlineServiceUrl } = props;\n    const [bannerContents, setBannerContents] = useState(`<h1>Loading ...</h1>`);\n\n    const onConfirm = useCallback(() => {\n        closeBanner();\n    }, [closeBanner]);\n\n    useEffect(() => {\n        if (!!bannerUUID && bannerUUID !== \"\") {\n            try {\n            servicesProvider\n                .getServices()\n                .collectionService.files(bannerUUID)\n                .then(results => {\n                    const bannerFileData = results.find(({ name }) => name === \"banner.html\");\n                    const result = localStorage.getItem(BANNER_LOCAL_STORAGE_KEY);\n\n                    if (result && result === JSON.stringify(bannerFileData) && !isOpen) {\n                        return;\n                    }\n\n                    if (bannerFileData) {\n                        servicesProvider\n                            .getServices()\n                            .collectionService.getFileContents(bannerFileData)\n                            .then(data => {\n                                setBannerContents(data);\n                                openBanner();\n                                localStorage.setItem(BANNER_LOCAL_STORAGE_KEY, JSON.stringify(bannerFileData));\n                            });\n                    }\n                })\n            } catch (error) {\n                console.error(\"Failed to load banner\", error);\n            }\n        }\n    }, [bannerUUID, keepWebInlineServiceUrl, openBanner, isOpen]);\n\n    return (\n        <Dialog\n            open={isOpen}\n            maxWidth=\"md\"\n        >\n            <div data-cy=\"confirmation-dialog\">\n                <DialogContent className={props.classes.dialogContent}>\n                    <div dangerouslySetInnerHTML={{ __html: sanitizeHTML(bannerContents) }}></div>\n                </DialogContent>\n                <DialogActions style={{ margin: \"0px 24px 24px\" }}>\n                    <Button\n                        data-cy=\"confirmation-dialog-ok-btn\"\n                        variant=\"contained\"\n                        color=\"primary\"\n                        type=\"submit\"\n                        onClick={onConfirm}\n                    >\n                        Close\n                    </Button>\n                </DialogActions>\n            </div>\n        </Dialog>\n    );\n};\n\nexport const Banner = withStyles(styles)(connect(mapStateToProps, mapDispatchToProps)(BannerComponent));\n"
  },
  {
    "path": "services/workbench2/src/views-components/breadcrumbs/breadcrumbs.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from \"react-redux\";\nimport { Breadcrumb, Breadcrumbs as BreadcrumbsComponent, BreadcrumbsProps } from 'components/breadcrumbs/breadcrumbs';\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { BREADCRUMBS } from '../../store/breadcrumbs/breadcrumbs-actions';\nimport { openSidePanelContextMenu } from 'store/context-menu/context-menu-actions';\nimport { getProperty } from \"store/properties/properties\";\n\ntype BreadcrumbsDataProps = Pick<BreadcrumbsProps, 'items' | 'resources'>;\ntype BreadcrumbsActionProps = Pick<BreadcrumbsProps, 'onClick' | 'onContextMenu'>;\n\nconst mapStateToProps = () => ({ properties, resources }: RootState): BreadcrumbsDataProps => ({\n    items: (getProperty<Breadcrumb[]>(BREADCRUMBS)(properties) || []),\n    resources,\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch): BreadcrumbsActionProps => ({\n    onClick: (navFunc, { uuid }: Breadcrumb) => {\n        dispatch<any>(navFunc(uuid));\n    },\n    onContextMenu: (event, breadcrumb: Breadcrumb) => {\n        dispatch<any>(openSidePanelContextMenu(event, breadcrumb.uuid));\n    }\n});\n\nexport const Breadcrumbs = connect(mapStateToProps(), mapDispatchToProps)(BreadcrumbsComponent);\n"
  },
  {
    "path": "services/workbench2/src/views-components/collection-panel-files/collection-panel-files.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from \"react-redux\";\nimport {\n    CollectionPanelFiles as Component,\n    CollectionPanelFilesProps\n} from \"components/collection-panel-files/collection-panel-files\";\nimport { RootState } from \"store/store\";\nimport { Dispatch } from \"redux\";\nimport { collectionPanelFilesAction } from \"store/collection-panel/collection-panel-files/collection-panel-files-actions\";\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { openContextMenuAndSelect, openCollectionFilesContextMenu } from 'store/context-menu/context-menu-actions';\nimport { openUploadCollectionFilesDialog } from 'store/collections/collection-upload-actions';\nimport { ResourceKind } from \"models/resource\";\nimport { openDetailsPanel } from 'store/details-panel/details-panel-action';\n\nconst mapStateToProps = (state: RootState): Pick<CollectionPanelFilesProps, \"currentItemUuid\"> => ({\n    currentItemUuid: state.detailsPanel.resourceUuid\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch): Pick<CollectionPanelFilesProps, 'onSearchChange' | 'onFileClick' | 'onUploadDataClick' | 'onCollapseToggle' | 'onSelectionToggle' | 'onItemMenuOpen' | 'onOptionsMenuOpen'> => ({\n    onUploadDataClick: (targetLocation?: string) => {\n        dispatch<any>(openUploadCollectionFilesDialog(targetLocation));\n    },\n    onCollapseToggle: (id) => {\n        dispatch(collectionPanelFilesAction.TOGGLE_COLLECTION_FILE_COLLAPSE({ id }));\n    },\n    onSelectionToggle: (event, item) => {\n        dispatch(collectionPanelFilesAction.TOGGLE_COLLECTION_FILE_SELECTION({ id: item.id }));\n    },\n    onItemMenuOpen: (event, item, isWritable) => {\n        const isDirectory = item.data?.type === 'directory';\n        dispatch<any>(openContextMenuAndSelect(\n            event,\n            {\n                menuKind: isWritable\n                    ? isDirectory\n                        ? ContextMenuKind.COLLECTION_DIRECTORY_ITEM\n                        : ContextMenuKind.COLLECTION_FILE_ITEM\n                    : isDirectory\n                        ? ContextMenuKind.READONLY_COLLECTION_DIRECTORY_ITEM\n                        : ContextMenuKind.READONLY_COLLECTION_FILE_ITEM,\n                kind: ResourceKind.COLLECTION,\n                name: item.data?.name || '',\n                uuid: item.id,\n                ownerUuid: ''\n            }\n        ));\n    },\n    onSearchChange: (searchValue: string) => {\n        dispatch(collectionPanelFilesAction.ON_SEARCH_CHANGE(searchValue));\n    },\n    onOptionsMenuOpen: (event, isWritable) => {\n        dispatch<any>(openCollectionFilesContextMenu(event, isWritable));\n    },\n    onFileClick: (id) => {\n        dispatch<any>(openDetailsPanel(id));\n    },\n});\n\nexport const CollectionPanelFiles = connect(mapStateToProps, mapDispatchToProps)(Component);\n"
  },
  {
    "path": "services/workbench2/src/views-components/collections-dialog/restore-version-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { COLLECTION_RESTORE_VERSION_DIALOG, restoreVersion } from 'store/collections/collection-version-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(restoreVersion(props.data.uuid));\n    }\n});\n\nexport const RestoreCollectionVersionDialog = compose(\n    withDialog(COLLECTION_RESTORE_VERSION_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/api-client-authorization-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport {\n    openApiClientAuthorizationAttributesDialog,\n    openApiClientAuthorizationRemoveDialog,\n} from \"store/api-client-authorizations/api-client-authorizations-actions\";\nimport { openAdvancedTabDialog } from \"store/advanced-tab/advanced-tab\";\nimport { ContextMenuActionSet, ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { AdvancedIcon, RemoveIcon, AttributesIcon, CopyIcon } from \"components/icon/icon\";\nimport { copyStringToClipboardAction } from \"store/open-in-new-tab/open-in-new-tab.actions\";\n\nexport const apiClientAuthorizationActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.ATTRIBUTES,\n            icon: AttributesIcon,\n            execute: (dispatch, resources) => {\n                    dispatch<any>(openApiClientAuthorizationAttributesDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.API_DETAILS,\n            icon: AdvancedIcon,\n            execute: (dispatch, resources) => {\n                    dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.REMOVE,\n            icon: RemoveIcon,\n            execute: (dispatch, resources) => {\n                    dispatch<any>(openApiClientAuthorizationRemoveDialog(resources[0].uuid));\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/collection-action-set.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { collectionActionSet, readOnlyCollectionActionSet } from \"./collection-action-set\";\nimport { containsActionSubSet } from \"../../../cypress/utils/contains-action-subset\";\n\ndescribe('collection-action-set', () => {\n    const flattCollectionActionSet = collectionActionSet.reduce((prev, next) => prev.concat(next), []);\n    const flattReadOnlyCollectionActionSet = readOnlyCollectionActionSet.reduce((prev, next) => prev.concat(next), []);\n    describe('collectionActionSet', () => {\n        it('should not be empty', () => {\n            // then\n            expect(flattCollectionActionSet).to.have.length.greaterThan(0);\n        });\n\n        it('should contain readOnlyCollectionActionSet items', () => {\n            // then\n            expect(containsActionSubSet(flattCollectionActionSet, flattReadOnlyCollectionActionSet)).to.be.true;\n        })\n    });\n\n    describe('readOnlyCollectionActionSet', () => {\n        it('should not be empty', () => {\n            // then\n            expect(flattReadOnlyCollectionActionSet).to.have.length.greaterThan(0);\n        });\n\n        it('should not contain collectionActionSet items', () => {\n            // then\n            expect(containsActionSubSet(flattReadOnlyCollectionActionSet, flattCollectionActionSet)).to.be.false;\n        })\n    });\n});"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/collection-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuAction, ContextMenuActionSet, ContextMenuActionNames } from \"../context-menu-action-set\";\nimport { ToggleFavoriteAction } from \"../actions/favorite-action\";\nimport { toggleFavorite } from \"store/favorites/favorites-actions\";\nimport {\n    RenameIcon,\n    ShareIcon,\n    MoveToIcon,\n    CopyIcon,\n    DetailsIcon,\n    AdvancedIcon,\n    OpenIcon,\n    Link,\n    RestoreVersionIcon,\n    FolderSharedIcon,\n    FileCopyOutlinedIcon,\n} from \"components/icon/icon\";\nimport { openCollectionUpdateDialog } from \"store/collections/collection-update-actions\";\nimport { favoritePanelActions } from \"store/favorite-panel/favorite-panel-action\";\nimport { openMoveCollectionDialog } from \"store/collections/collection-move-actions\";\nimport { openCollectionCopy } from \"store/collections/collection-copy-actions\";\nimport { openWebDavS3InfoDialog } from \"store/collections/collection-info-actions\";\nimport { ToggleTrashAction } from \"views-components/context-menu/actions/trash-action\";\nimport { toggleResourceTrashed } from \"store/trash/trash-actions\";\nimport { openSharingDialog } from \"store/sharing-dialog/sharing-dialog-actions\";\nimport { openAdvancedTabDialog } from \"store/advanced-tab/advanced-tab\";\nimport { openDetailsPanel } from \"store/details-panel/details-panel-action\";\nimport { copyToClipboardAction, copyStringToClipboardAction, openInNewTabAction } from \"store/open-in-new-tab/open-in-new-tab.actions\";\nimport { openRestoreCollectionVersionDialog } from \"store/collections/collection-version-actions\";\nimport { TogglePublicFavoriteAction } from \"../actions/public-favorite-action\";\nimport { togglePublicFavorite } from \"store/public-favorites/public-favorites-actions\";\nimport { publicFavoritePanelActions } from \"store/public-favorites-panel/public-favorites-action\";\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\n\nconst toggleFavoriteAction: ContextMenuAction = {\n    component: ToggleFavoriteAction,\n    name: ContextMenuActionNames.ADD_TO_FAVORITES,\n    execute: (dispatch, resources) => {\n        for (const resource of [...resources]) {\n            dispatch<any>(toggleFavorite(resource)).then(() => {\n                dispatch<any>(favoritePanelActions.REQUEST_ITEMS());\n            });\n        }\n    },\n};\nconst commonActionSet: ContextMenuActionSet = [\n    [\n        {\n            icon: OpenIcon,\n            name: ContextMenuActionNames.OPEN_IN_NEW_TAB,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openInNewTabAction(resources[0]));\n            },\n        },\n        {\n            icon: Link,\n            name: ContextMenuActionNames.COPY_LINK_TO_CLIPBOARD,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyToClipboardAction(resources));\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n        {\n            icon: FileCopyOutlinedIcon,\n            name: ContextMenuActionNames.MAKE_A_COPY,\n            isForMulti: true,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openCollectionCopy(resources[0]));\n            },\n        },\n        {\n            icon: DetailsIcon,\n            name: ContextMenuActionNames.VIEW_DETAILS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openDetailsPanel(resources[0].uuid));\n            },\n        },\n        {\n            icon: AdvancedIcon,\n            name: ContextMenuActionNames.API_DETAILS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n    ],\n];\n\nexport const readOnlyCollectionActionSet: ContextMenuActionSet = [\n    [\n        ...commonActionSet.reduce((prev, next) => prev.concat(next), []),\n        toggleFavoriteAction,\n        {\n            icon: FolderSharedIcon,\n            name: ContextMenuActionNames.OPEN_WITH_3RD_PARTY_CLIENT,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openWebDavS3InfoDialog(resources[0].uuid));\n            },\n        },\n    ],\n];\n\nexport const collectionActionSet: ContextMenuActionSet = [\n    [\n        ...readOnlyCollectionActionSet.reduce((prev, next) => prev.concat(next), []),\n        {\n            icon: RenameIcon,\n            name: ContextMenuActionNames.EDIT_COLLECTION,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openCollectionUpdateDialog(resources[0]));\n            },\n        },\n        {\n            icon: ShareIcon,\n            name: ContextMenuActionNames.SHARE,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openSharingDialog(resources[0].uuid));\n            },\n        },\n        {\n            icon: MoveToIcon,\n            name: ContextMenuActionNames.MOVE_TO,\n            isForMulti: true,\n            execute: (dispatch, resources) => dispatch<any>(openMoveCollectionDialog(resources[0])),\n        },\n        {\n            component: ToggleTrashAction,\n            name: ContextMenuActionNames.MOVE_TO_TRASH,\n            isForMulti: true,\n            execute: (dispatch, resources: ContextMenuResource[]) => {\n                dispatch<any>(toggleResourceTrashed(resources.map(res => res.uuid), resources.some(res => res.isTrashed)));\n            },\n        },\n    ],\n];\n\nexport const collectionAdminActionSet: ContextMenuActionSet = [\n    [\n        ...collectionActionSet.reduce((prev, next) => prev.concat(next), []),\n        {\n            component: TogglePublicFavoriteAction,\n            name: ContextMenuActionNames.ADD_TO_PUBLIC_FAVORITES,\n            execute: (dispatch, resources) => {\n                for (const resource of [...resources]) {\n                    dispatch<any>(togglePublicFavorite(resource)).then(() => {\n                        dispatch<any>(publicFavoritePanelActions.REQUEST_ITEMS());\n                    });\n                }\n            },\n        },\n    ],\n];\n\nexport const oldCollectionVersionActionSet: ContextMenuActionSet = [\n    [\n        ...commonActionSet.reduce((prev, next) => prev.concat(next), []),\n        {\n            icon: RestoreVersionIcon,\n            name: ContextMenuActionNames.RESTORE_VERSION,\n            execute: (dispatch, resources) => {\n                for (const resource of [...resources]) {\n                    dispatch<any>(openRestoreCollectionVersionDialog(resource.uuid));\n                }\n            },\n        },\n    ],\n];\n\nexport const writeableCollectionSet: ContextMenuActionSet = [\n    [\n        ...collectionActionSet.reduce((prev, next) => {\n            return prev.concat(next.filter(action => action.name !== ContextMenuActionNames.SHARE));\n        }, []),\n    ]\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/collection-files-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuAction, ContextMenuActionSet, ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { collectionPanelFilesAction, openDownloadZipDialog, openMultipleFilesRemoveDialog } from \"store/collection-panel/collection-panel-files/collection-panel-files-actions\";\nimport {\n    openCollectionPartialCopyMultipleToNewCollectionDialog,\n    openCollectionPartialCopyMultipleToExistingCollectionDialog,\n    openCollectionPartialCopyToSeparateCollectionsDialog\n} from 'store/collections/collection-partial-copy-actions';\nimport { openCollectionPartialMoveMultipleToExistingCollectionDialog, openCollectionPartialMoveMultipleToNewCollectionDialog, openCollectionPartialMoveToSeparateCollectionsDialog } from \"store/collections/collection-partial-move-actions\";\nimport { DownloadIcon, FileCopyIcon, FileMoveIcon, RemoveIcon, SelectAllIcon, SelectNoneIcon } from \"components/icon/icon\";\n\nconst copyActions: ContextMenuAction[] = [\n    {\n        name: ContextMenuActionNames.COPY_SELECTED_INTO_NEW_COLLECTION,\n        icon: FileCopyIcon,\n        execute: dispatch => {\n            dispatch<any>(openCollectionPartialCopyMultipleToNewCollectionDialog());\n        }\n    },\n    {\n        name: ContextMenuActionNames.COPY_SELECTED_INTO_EXISTING_COLLECTION,\n        icon: FileCopyIcon,\n        execute: dispatch => {\n            dispatch<any>(openCollectionPartialCopyMultipleToExistingCollectionDialog());\n        }\n    },\n];\n\nconst copyActionsMultiple: ContextMenuAction[] = [\n    ...copyActions,\n    {\n        name: ContextMenuActionNames.COPY_SELECTED_INTO_SEPARATE_COLLECTIONS,\n        icon: FileCopyIcon,\n        execute: dispatch => {\n            dispatch<any>(openCollectionPartialCopyToSeparateCollectionsDialog());\n        }\n    }\n];\n\nconst moveActions: ContextMenuAction[] = [\n    {\n        name: ContextMenuActionNames.MOVE_SELECTED_INTO_NEW_COLLECTION,\n        icon: FileMoveIcon,\n        execute: dispatch => {\n            dispatch<any>(openCollectionPartialMoveMultipleToNewCollectionDialog());\n        }\n    },\n    {\n        name: ContextMenuActionNames.MOVE_SELECTED_INTO_EXISTING_COLLECTION,\n        icon: FileMoveIcon,\n        execute: dispatch => {\n            dispatch<any>(openCollectionPartialMoveMultipleToExistingCollectionDialog());\n        }\n    },\n];\n\nconst moveActionsMultiple: ContextMenuAction[] = [\n    ...moveActions,\n    {\n        name: ContextMenuActionNames.MOVE_SELECTED_INTO_SEPARATE_COLLECTIONS,\n        icon: FileMoveIcon,\n        execute: dispatch => {\n            dispatch<any>(openCollectionPartialMoveToSeparateCollectionsDialog());\n        }\n    }\n];\n\nconst selectActions: ContextMenuAction[] = [\n    {\n        name: ContextMenuActionNames.SELECT_ALL,\n        icon: SelectAllIcon,\n        execute: dispatch => {\n            dispatch(collectionPanelFilesAction.SELECT_ALL_COLLECTION_FILES());\n        }\n    },\n    {\n        name: ContextMenuActionNames.UNSELECT_ALL,\n        icon: SelectNoneIcon,\n        execute: dispatch => {\n            dispatch(collectionPanelFilesAction.UNSELECT_ALL_COLLECTION_FILES());\n        }\n    },\n];\n\nconst removeAction: ContextMenuAction = {\n    name: ContextMenuActionNames.REMOVE_SELECTED,\n    icon: RemoveIcon,\n    execute: dispatch => {\n        dispatch(openMultipleFilesRemoveDialog());\n    }\n};\n\nconst downloadZipAction: ContextMenuAction = {\n    name: ContextMenuActionNames.DOWNLOAD_SELECTED,\n    icon: DownloadIcon,\n    execute: dispatch => {\n        dispatch<any>(openDownloadZipDialog());\n    }\n};\n\n// These action sets are used on the multi-select actions button.\nexport const readOnlyCollectionFilesActionSet: ContextMenuActionSet = [\n    selectActions,\n    [downloadZipAction],\n    copyActions,\n];\n\nexport const readOnlyCollectionFilesMultipleActionSet: ContextMenuActionSet = [\n    selectActions,\n    [downloadZipAction],\n    copyActionsMultiple,\n];\n\nexport const collectionFilesActionSet: ContextMenuActionSet = readOnlyCollectionFilesActionSet.concat([[\n    removeAction,\n    ...moveActions,\n]]);\n\nexport const collectionFilesMultipleActionSet: ContextMenuActionSet = readOnlyCollectionFilesMultipleActionSet.concat([[\n    removeAction,\n    ...moveActionsMultiple,\n]]);\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/collection-files-item-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from \"../context-menu-action-set\";\nimport { FileCopyIcon, FileMoveIcon, RemoveIcon, RenameIcon } from \"components/icon/icon\";\nimport { DownloadCollectionFileAction } from \"../actions/download-collection-file-action\";\nimport { openFileRemoveDialog, openRenameFileDialog } from \"store/collection-panel/collection-panel-files/collection-panel-files-actions\";\nimport { CollectionFileViewerAction } from \"views-components/context-menu/actions/collection-file-viewer-action\";\nimport { CollectionUUIDCopyToClipboardAction,\n\t CollectionPDHCopyToClipboardAction,\n\t CollectionCWLCopyToClipboardAction\n} from \"../actions/collection-copy-to-clipboard-action\";\nimport {\n    openCollectionPartialMoveToExistingCollectionDialog,\n    openCollectionPartialMoveToNewCollectionDialog,\n} from \"store/collections/collection-partial-move-actions\";\nimport {\n    openCollectionPartialCopyToExistingCollectionDialog,\n    openCollectionPartialCopyToNewCollectionDialog,\n} from \"store/collections/collection-partial-copy-actions\";\n\nexport const readOnlyCollectionDirectoryItemActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.COPY_ITEM_INTO_NEW_COLLECTION,\n            icon: FileCopyIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openCollectionPartialCopyToNewCollectionDialog(resources[0]));\n            },\n        },\n        {\n            name: ContextMenuActionNames.COPY_ITEM_INTO_EXISTING_COLLECTION,\n            icon: FileCopyIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openCollectionPartialCopyToExistingCollectionDialog(resources[0]));\n            },\n        },\n        {\n            component: CollectionFileViewerAction,\n            name: ContextMenuActionNames.OPEN_IN_NEW_TAB,\n            execute: () => {\n                return;\n            },\n        },\n        {\n            component: CollectionUUIDCopyToClipboardAction,\n            name: ContextMenuActionNames.COPY_UUID_LINK_TO_CLIPBOARD,\n            execute: () => {\n                return;\n            },\n        },\n        {\n            component: CollectionPDHCopyToClipboardAction,\n            name: ContextMenuActionNames.COPY_PDH_LINK_TO_CLIPBOARD,\n            execute: () => {\n                return;\n            },\n        },\n        {\n            component: CollectionCWLCopyToClipboardAction,\n            name: ContextMenuActionNames.COPY_CWL_LINK_TO_CLIPBOARD,\n            execute: () => {\n                return;\n            },\n        },\n    ],\n];\n\nexport const readOnlyCollectionFileItemActionSet: ContextMenuActionSet = [\n    [\n        {\n            component: DownloadCollectionFileAction,\n            name: ContextMenuActionNames.DOWNLOAD,\n            execute: () => {\n                return;\n            },\n        },\n        ...readOnlyCollectionDirectoryItemActionSet.reduce((prev, next) => prev.concat(next), []),\n    ],\n];\n\nconst writableActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.MOVE_ITEM_INTO_NEW_COLLECTION,\n            icon: FileMoveIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openCollectionPartialMoveToNewCollectionDialog(resources[0]));\n            },\n        },\n        {\n            name: ContextMenuActionNames.MOVE_ITEM_INTO_EXISTING_COLLECTION,\n            icon: FileMoveIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openCollectionPartialMoveToExistingCollectionDialog(resources[0]));\n            },\n        },\n        {\n            name: ContextMenuActionNames.RENAME,\n            icon: RenameIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(\n                    openRenameFileDialog({\n                        name: resources[0].name,\n                        id: resources[0].uuid,\n                        path: resources[0].uuid.split(\"/\").slice(1).join(\"/\"),\n                    })\n                );\n            },\n        },\n        {\n            name: ContextMenuActionNames.REMOVE,\n            icon: RemoveIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openFileRemoveDialog(resources[0].uuid));\n            },\n        },\n    ],\n];\n\nexport const collectionDirectoryItemActionSet: ContextMenuActionSet = readOnlyCollectionDirectoryItemActionSet.concat(writableActionSet);\n\nexport const collectionFileItemActionSet: ContextMenuActionSet = readOnlyCollectionFileItemActionSet.concat(writableActionSet);\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/collection-files-not-selected-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { collectionPanelFilesAction, openDownloadZipDialog } from \"store/collection-panel/collection-panel-files/collection-panel-files-actions\";\nimport { DownloadIcon, SelectAllIcon } from \"components/icon/icon\";\n\nexport const collectionFilesNotSelectedActionSet: ContextMenuActionSet = [\n    [{\n        name: ContextMenuActionNames.SELECT_ALL,\n        icon: SelectAllIcon,\n        execute: dispatch => {\n            dispatch(collectionPanelFilesAction.SELECT_ALL_COLLECTION_FILES());\n        }\n    }],\n    [{\n        name: ContextMenuActionNames.DOWNLOAD_ALL,\n        icon: DownloadIcon,\n        execute: dispatch => {\n            dispatch<any>(openDownloadZipDialog());\n        }\n    }],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/external-credential-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { ContextMenuActionSet, ContextMenuActionNames } from \"../context-menu-action-set\";\nimport { RenameIcon, AdvancedIcon, DeleteForever, CopyIcon } from \"components/icon/icon\";\nimport { openRemoveExternalCredentialDialog, openExternalCredentialUpdateDialog } from \"store/external-credentials/external-credentials-actions\";\nimport { ShareIcon } from \"components/icon/icon\";\nimport { openSharingDialog } from \"store/sharing-dialog/sharing-dialog-actions\";\nimport { openAdvancedTabDialog } from \"store/advanced-tab/advanced-tab\";\nimport { copyStringToClipboardAction } from \"store/open-in-new-tab/open-in-new-tab.actions\";\n\nexport const advancedAction = {\n    icon: AdvancedIcon,\n    name: ContextMenuActionNames.API_DETAILS,\n    execute: (dispatch: Dispatch, resources: ContextMenuResource[]) => {\n        dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n    },\n};\n\nexport const editExternalCredentialAction = {\n    icon: RenameIcon,\n    name: ContextMenuActionNames.EDIT_CREDENTIAL,\n    execute: (dispatch: Dispatch, resources: ContextMenuResource[]) => {\n        dispatch<any>(openExternalCredentialUpdateDialog(resources[0]));\n    },\n};\n\nexport const shareAction = {\n    icon: ShareIcon,\n    name: ContextMenuActionNames.SHARE,\n    execute: (dispatch: Dispatch, resources: ContextMenuResource[]) => {\n        dispatch<any>(openSharingDialog(resources[0].uuid));\n    },\n};\n\nexport const deleteAction = {\n    name: ContextMenuActionNames.REMOVE,\n    icon: DeleteForever,\n    isForMulti: true,\n    execute: (dispatch: Dispatch, resources: ContextMenuResource[]) => {\n        dispatch<any>(openRemoveExternalCredentialDialog(resources[0]));\n    },\n};\n\nexport const copyUuidAction = {\n    icon: CopyIcon,\n    name: ContextMenuActionNames.COPY_UUID,\n    execute: (dispatch: Dispatch, resources: ContextMenuResource[]) => {\n        dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n    },\n};\n\nexport const externalCredentialActionSet: ContextMenuActionSet = [\n    [\n        advancedAction,\n        editExternalCredentialAction,\n        shareAction,\n        deleteAction,\n        copyUuidAction,\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/favorite-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from '../context-menu-action-set';\nimport { ToggleFavoriteAction } from '../actions/favorite-action';\nimport { toggleFavorite } from 'store/favorites/favorites-actions';\nimport { favoritePanelActions } from 'store/favorite-panel/favorite-panel-action';\n\nexport const favoriteActionSet: ContextMenuActionSet = [\n    [\n        {\n            component: ToggleFavoriteAction,\n            name: ContextMenuActionNames.ADD_TO_FAVORITES,\n            execute: (dispatch, resources) => {\n                resources.forEach((resource) =>\n                    dispatch<any>(toggleFavorite(resource)).then(() => {\n                        dispatch<any>(favoritePanelActions.REQUEST_ITEMS());\n                    })\n                );\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/group-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\nimport { RenameIcon, AdvancedIcon, RemoveIcon, DetailsIcon, CopyIcon } from 'components/icon/icon';\nimport { openAdvancedTabDialog } from 'store/advanced-tab/advanced-tab';\nimport { openRemoveGroupDialog, openGroupUpdateDialog } from 'store/groups-panel/groups-panel-actions';\nimport { toggleDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { copyStringToClipboardAction } from 'store/open-in-new-tab/open-in-new-tab.actions';\n\nexport const groupActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.EDIT_GROUP,\n            icon: RenameIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openGroupUpdateDialog(resources[0].uuid))\n            },\n        },\n        {\n            name: ContextMenuActionNames.API_DETAILS,\n            icon: AdvancedIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.REMOVE,\n            icon: RemoveIcon,\n            isForMulti: true,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openRemoveGroupDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.VIEW_DETAILS,\n            icon: DetailsIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(toggleDetailsPanel(resources[0].uuid));\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ],\n];\n\nexport const builtInGroupActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.API_DETAILS,\n            icon: AdvancedIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.VIEW_DETAILS,\n            icon: DetailsIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(toggleDetailsPanel(resources[0].uuid));\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ]\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/group-member-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\nimport { AdvancedIcon, RemoveIcon, AttributesIcon, CopyIcon } from 'components/icon/icon';\nimport { openAdvancedTabDialog } from 'store/advanced-tab/advanced-tab';\nimport { openGroupMemberAttributes, openRemoveCheckedGroupMembersDialog } from 'store/group-details-panel/group-details-panel-actions';\nimport { copyStringToClipboardAction } from 'store/open-in-new-tab/open-in-new-tab.actions';\n\nexport const groupMemberActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.ATTRIBUTES,\n            icon: AttributesIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openGroupMemberAttributes(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.API_DETAILS,\n            icon: AdvancedIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.REMOVE,\n            icon: RemoveIcon,\n            isForMulti: true,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openRemoveCheckedGroupMembersDialog());\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/keep-service-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { openKeepServiceAttributesDialog, openKeepServiceRemoveDialog } from 'store/keep-services/keep-services-actions';\nimport { openAdvancedTabDialog } from 'store/advanced-tab/advanced-tab';\nimport { ContextMenuActionSet, ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\nimport { AdvancedIcon, RemoveIcon, AttributesIcon, CopyIcon } from 'components/icon/icon';\nimport { copyStringToClipboardAction } from 'store/open-in-new-tab/open-in-new-tab.actions';\n\nexport const keepServiceActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.ATTRIBUTES,\n            icon: AttributesIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openKeepServiceAttributesDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.API_DETAILS,\n            icon: AdvancedIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.REMOVE,\n            icon: RemoveIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openKeepServiceRemoveDialog(resources[0].uuid));\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/link-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { openLinkAttributesDialog, openLinkRemoveDialog } from 'store/link-panel/link-panel-actions';\nimport { openAdvancedTabDialog } from 'store/advanced-tab/advanced-tab';\nimport { ContextMenuActionSet, ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\nimport { AdvancedIcon, RemoveIcon, AttributesIcon, CopyIcon } from 'components/icon/icon';\nimport { copyStringToClipboardAction } from 'store/open-in-new-tab/open-in-new-tab.actions';\n\nexport const linkActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.ATTRIBUTES,\n            icon: AttributesIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openLinkAttributesDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.API_DETAILS,\n            icon: AdvancedIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.REMOVE,\n            icon: RemoveIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openLinkRemoveDialog(resources[0].uuid));\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/permission-edit-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\nimport { CanReadIcon, CanManageIcon, CanWriteIcon } from 'components/icon/icon';\nimport { editPermissionLevel } from 'store/group-details-panel/group-details-panel-actions';\nimport { PermissionLevel } from 'models/permission';\n\nexport const permissionEditActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.READ,\n            icon: CanReadIcon,\n            execute: (dispatch, resources) => {\n                resources.forEach((resource) => dispatch<any>(editPermissionLevel(resource.uuid, PermissionLevel.CAN_READ)));\n            },\n        },\n        {\n            name: ContextMenuActionNames.WRITE,\n            icon: CanWriteIcon,\n            execute: (dispatch, resources) => {\n                resources.forEach((resource) => dispatch<any>(editPermissionLevel(resource.uuid, PermissionLevel.CAN_WRITE)));\n            },\n        },\n        {\n            name: ContextMenuActionNames.MANAGE,\n            icon: CanManageIcon,\n            execute: (dispatch, resources) => {\n                resources.forEach((resource) => dispatch<any>(editPermissionLevel(resource.uuid, PermissionLevel.CAN_MANAGE)));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/process-resource-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from \"../context-menu-action-set\";\nimport { ToggleFavoriteAction } from \"../actions/favorite-action\";\nimport { toggleFavorite } from \"store/favorites/favorites-actions\";\nimport {\n    RenameIcon,\n    DetailsIcon,\n    ReRunProcessIcon,\n    OutputIcon,\n    AdvancedIcon,\n    OpenIcon,\n    StopIcon,\n    CopyIcon,\n    DeleteForever,\n    Link,\n} from \"components/icon/icon\";\nimport { favoritePanelActions } from \"store/favorite-panel/favorite-panel-action\";\nimport { openProcessUpdateDialog } from \"store/processes/process-update-actions\";\nimport { openCopyProcessDialog } from \"store/processes/process-copy-actions\";\nimport { openRemoveProcessDialog } from \"store/processes/processes-actions\";\nimport { openDetailsPanel } from \"store/details-panel/details-panel-action\";\nimport { navigateToOutput } from \"store/process-panel/process-panel-actions\";\nimport { openAdvancedTabDialog } from \"store/advanced-tab/advanced-tab\";\nimport { TogglePublicFavoriteAction } from \"../actions/public-favorite-action\";\nimport { togglePublicFavorite } from \"store/public-favorites/public-favorites-actions\";\nimport { publicFavoritePanelActions } from \"store/public-favorites-panel/public-favorites-action\";\nimport { openInNewTabAction } from \"store/open-in-new-tab/open-in-new-tab.actions\";\nimport { openCancelProcesswDialog } from \"store/processes/processes-actions\";\nimport { copyStringToClipboardAction } from \"store/open-in-new-tab/open-in-new-tab.actions\";\n\nexport const readOnlyProcessResourceActionSet: ContextMenuActionSet = [\n    [\n        {\n            component: ToggleFavoriteAction,\n            name: ContextMenuActionNames.ADD_TO_FAVORITES,\n            execute: (dispatch, resources) => {\n                dispatch<any>(toggleFavorite(resources[0])).then(() => {\n                    dispatch<any>(favoritePanelActions.REQUEST_ITEMS());\n                });\n            },\n        },\n        {\n            icon: OpenIcon,\n            name: ContextMenuActionNames.OPEN_IN_NEW_TAB,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openInNewTabAction(resources[0]));\n            },\n        },\n        {\n            icon: ReRunProcessIcon,\n            name: ContextMenuActionNames.COPY_AND_RERUN_PROCESS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openCopyProcessDialog(resources[0]));\n            },\n        },\n        {\n            icon: OutputIcon,\n            name: ContextMenuActionNames.OUTPUTS,\n            execute: (dispatch, resources) => {\n                if (resources[0]) {\n                    dispatch<any>(navigateToOutput(resources[0]));\n                }\n            },\n        },\n        {\n            icon: DetailsIcon,\n            name: ContextMenuActionNames.VIEW_DETAILS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openDetailsPanel(resources[0].uuid));\n            },\n        },\n        {\n            icon: AdvancedIcon,\n            name: ContextMenuActionNames.API_DETAILS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n        {\n            icon: Link,\n            name: ContextMenuActionNames.COPY_LINK_TO_CLIPBOARD,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        }\n    ],\n];\n\nexport const processResourceActionSet: ContextMenuActionSet = [\n    [\n        ...readOnlyProcessResourceActionSet.reduce((prev, next) => prev.concat(next), []),\n        {\n            icon: RenameIcon,\n            name: ContextMenuActionNames.EDIT_PROCESS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openProcessUpdateDialog(resources[0]));\n            },\n        },\n        {\n            name: ContextMenuActionNames.REMOVE,\n            icon: DeleteForever,\n            isForMulti: true,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openRemoveProcessDialog(resources[0], resources.length));\n            },\n        },\n    ],\n];\n\nconst runningProcessOnlyActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.CANCEL,\n            icon: StopIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openCancelProcesswDialog(resources[0].uuid));\n            },\n        },\n    ]\n];\n\nexport const processResourceAdminActionSet: ContextMenuActionSet = [\n    [\n        ...processResourceActionSet.reduce((prev, next) => prev.concat(next), []),\n        {\n            component: TogglePublicFavoriteAction,\n            name: ContextMenuActionNames.ADD_TO_PUBLIC_FAVORITES,\n            execute: (dispatch, resources) => {\n                dispatch<any>(togglePublicFavorite(resources[0])).then(() => {\n                    dispatch<any>(publicFavoritePanelActions.REQUEST_ITEMS());\n                });\n            },\n        },\n    ],\n];\n\nexport const runningProcessResourceActionSet = [\n    [\n        ...processResourceActionSet.reduce((prev, next) => prev.concat(next), []),\n        ...runningProcessOnlyActionSet.reduce((prev, next) => prev.concat(next), []),\n    ],\n];\n\nexport const runningProcessResourceAdminActionSet: ContextMenuActionSet = [\n    [\n        ...processResourceAdminActionSet.reduce((prev, next) => prev.concat(next), []),\n        ...runningProcessOnlyActionSet.reduce((prev, next) => prev.concat(next), []),\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/project-action-set.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { filterGroupActionSet, projectActionSet, readOnlyProjectActionSet } from \"./project-action-set\";\nimport { containsActionSubSet } from \"../../../cypress/utils/contains-action-subset\";\n\ndescribe('project-action-set', () => {\n    const flattProjectActionSet = projectActionSet.reduce((prev, next) => prev.concat(next), []);\n    const flattReadOnlyProjectActionSet = readOnlyProjectActionSet.reduce((prev, next) => prev.concat(next), []);\n    const flattFilterGroupActionSet = filterGroupActionSet.reduce((prev, next) => prev.concat(next), []);\n\n    describe('projectActionSet', () => {\n        it('should not be empty', () => {\n            // then\n            expect(flattProjectActionSet).to.have.length.greaterThan(0);\n        });\n\n        it('should contain readOnlyProjectActionSet items', () => {\n            // then\n            expect(containsActionSubSet(flattProjectActionSet, flattReadOnlyProjectActionSet)).to.be.true;\n        })\n    });\n\n    describe('readOnlyProjectActionSet', () => {\n        it('should not be empty', () => {\n            // then\n            expect(flattReadOnlyProjectActionSet).to.have.length.greaterThan(0);\n        });\n\n        it('should not contain projectActionSet items', () => {\n            // then\n            expect(containsActionSubSet(flattReadOnlyProjectActionSet, flattProjectActionSet)).to.be.false;\n        })\n    });\n\n    describe('filterGroupActionSet', () => {\n        it('should not be empty', () => {\n            // then\n            expect(flattFilterGroupActionSet).to.have.length.greaterThan(0);\n        });\n\n        it('should not contain projectActionSet items', () => {\n            // then\n            expect(containsActionSubSet(flattFilterGroupActionSet, flattProjectActionSet)).to.be.false;\n        })\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/project-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from \"../context-menu-action-set\";\nimport { NewProjectIcon, RenameIcon, MoveToIcon, DetailsIcon, AdvancedIcon, OpenIcon, Link, FolderSharedIcon, CopyIcon } from \"components/icon/icon\";\nimport { ToggleFavoriteAction } from \"../actions/favorite-action\";\nimport { toggleFavorite } from \"store/favorites/favorites-actions\";\nimport { favoritePanelActions } from \"store/favorite-panel/favorite-panel-action\";\nimport { openMoveProjectDialog } from \"store/projects/project-move-actions\";\nimport { openProjectCreateDialog } from \"store/projects/project-create-actions\";\nimport { openProjectUpdateDialog } from \"store/projects/project-update-actions\";\nimport { ToggleTrashAction } from \"views-components/context-menu/actions/trash-action\";\nimport { toggleResourceTrashed } from \"store/trash/trash-actions\";\nimport { ShareIcon } from \"components/icon/icon\";\nimport { openSharingDialog } from \"store/sharing-dialog/sharing-dialog-actions\";\nimport { openAdvancedTabDialog } from \"store/advanced-tab/advanced-tab\";\nimport { openDetailsPanel } from \"store/details-panel/details-panel-action\";\nimport { copyToClipboardAction, copyStringToClipboardAction, openInNewTabAction } from \"store/open-in-new-tab/open-in-new-tab.actions\";\nimport { openWebDavS3InfoDialog } from \"store/collections/collection-info-actions\";\nimport { ToggleLockAction } from \"../actions/lock-action\";\nimport { freezeProject, unfreezeProject } from \"store/projects/project-lock-actions\";\n\nexport const toggleFavoriteAction = {\n    component: ToggleFavoriteAction,\n    name: ContextMenuActionNames.ADD_TO_FAVORITES,\n    execute: (dispatch, resources) => {\n        dispatch(toggleFavorite(resources[0])).then(() => {\n            dispatch(favoritePanelActions.REQUEST_ITEMS());\n        });\n    },\n};\n\nexport const openInNewTabMenuAction = {\n    icon: OpenIcon,\n    name: ContextMenuActionNames.OPEN_IN_NEW_TAB,\n    execute: (dispatch, resources) => {\n        dispatch(openInNewTabAction(resources[0]));\n    },\n};\n\nexport const copyToClipboardMenuAction = {\n    icon: Link,\n    name: ContextMenuActionNames.COPY_LINK_TO_CLIPBOARD,\n    execute: (dispatch, resources) => {\n        dispatch(copyToClipboardAction(resources));\n    },\n};\n\nexport const copyUuidAction = {\n    icon: CopyIcon,\n    name: ContextMenuActionNames.COPY_UUID,\n    execute: (dispatch, resources) => {\n        dispatch(copyStringToClipboardAction(resources[0].uuid));\n    },\n};\n\nexport const viewDetailsAction = {\n    icon: DetailsIcon,\n    name: ContextMenuActionNames.VIEW_DETAILS,\n    execute: (dispatch, resources) => {\n        dispatch(openDetailsPanel(resources[0].uuid));\n    },\n};\n\nexport const advancedAction = {\n    icon: AdvancedIcon,\n    name: ContextMenuActionNames.API_DETAILS,\n    execute: (dispatch, resources) => {\n        dispatch(openAdvancedTabDialog(resources[0].uuid));\n    },\n};\n\nexport const openWith3rdPartyClientAction = {\n    icon: FolderSharedIcon,\n    name: ContextMenuActionNames.OPEN_WITH_3RD_PARTY_CLIENT,\n    execute: (dispatch, resources) => {\n        dispatch(openWebDavS3InfoDialog(resources[0].uuid));\n    },\n};\n\nexport const editProjectAction = {\n    icon: RenameIcon,\n    name: ContextMenuActionNames.EDIT_PROJECT,\n    execute: (dispatch, resources) => {\n        dispatch(openProjectUpdateDialog(resources[0]));\n    },\n};\n\nexport const shareAction = {\n    icon: ShareIcon,\n    name: ContextMenuActionNames.SHARE,\n    execute: (dispatch, resources) => {\n        dispatch(openSharingDialog(resources[0].uuid));\n    },\n};\n\nexport const moveToAction = {\n    icon: MoveToIcon,\n    name: ContextMenuActionNames.MOVE_TO,\n    isForMulti: true,\n    execute: (dispatch, resource) => {\n        dispatch(openMoveProjectDialog(resource[0]));\n    },\n};\n\nexport const toggleTrashAction = {\n    component: ToggleTrashAction,\n    name: ContextMenuActionNames.MOVE_TO_TRASH,\n    isForMulti: true,\n    execute: (dispatch, resources) => {\n        dispatch(toggleResourceTrashed(resources.map(res => res.uuid), resources.some(res => res.isTrashed)));\n    },\n};\n\nexport const freezeProjectAction = {\n    component: ToggleLockAction,\n    name: ContextMenuActionNames.FREEZE_PROJECT,\n    execute: (dispatch, resources) => {\n        if (resources[0].frozenByUuid  || resources[0].isFrozen) {\n            dispatch(unfreezeProject(resources[0].uuid));\n        } else {\n            dispatch(freezeProject(resources[0].uuid));\n        }\n    },\n};\n\nexport const newProjectAction: any = {\n    icon: NewProjectIcon,\n    name: ContextMenuActionNames.NEW_PROJECT,\n    execute: (dispatch, resources): void => {\n        dispatch(openProjectCreateDialog(resources[0].uuid));\n    },\n};\n\nexport const readOnlyProjectActionSet: ContextMenuActionSet = [\n    [toggleFavoriteAction, openInNewTabMenuAction, copyToClipboardMenuAction, copyUuidAction, viewDetailsAction, advancedAction, openWith3rdPartyClientAction],\n];\n\nexport const filterGroupActionSet: ContextMenuActionSet = [\n    [\n        toggleFavoriteAction,\n        openInNewTabMenuAction,\n        copyToClipboardMenuAction,\n        viewDetailsAction,\n        advancedAction,\n        openWith3rdPartyClientAction,\n        editProjectAction,\n        shareAction,\n        moveToAction,\n        toggleTrashAction,\n        copyUuidAction,\n    ],\n];\n\nexport const frozenActionSet: ContextMenuActionSet = [\n    [\n        shareAction,\n        toggleFavoriteAction,\n        openInNewTabMenuAction,\n        copyToClipboardMenuAction,\n        viewDetailsAction,\n        advancedAction,\n        openWith3rdPartyClientAction,\n        freezeProjectAction,\n        copyUuidAction,\n    ],\n];\n\nexport const projectActionSet: ContextMenuActionSet = [\n    [\n        toggleFavoriteAction,\n        openInNewTabMenuAction,\n        copyToClipboardMenuAction,\n        viewDetailsAction,\n        advancedAction,\n        openWith3rdPartyClientAction,\n        editProjectAction,\n        shareAction,\n        moveToAction,\n        toggleTrashAction,\n        newProjectAction,\n        freezeProjectAction,\n        copyUuidAction,\n    ],\n];\n\nexport const writeableProjectActionSet = [\n    [\n        toggleFavoriteAction,\n        openInNewTabMenuAction,\n        copyToClipboardMenuAction,\n        viewDetailsAction,\n        advancedAction,\n        openWith3rdPartyClientAction,\n        editProjectAction,\n        moveToAction,\n        toggleTrashAction,\n        newProjectAction,\n        copyUuidAction,\n    ],\n];\n\nexport const manageableProjectActionSet = [\n    [\n        viewDetailsAction,\n        openInNewTabMenuAction,\n        copyUuidAction,\n        shareAction,\n        freezeProjectAction,\n        toggleFavoriteAction,\n        copyToClipboardMenuAction,\n        openWith3rdPartyClientAction,\n        advancedAction,\n    ],\n];\n\nexport const frozenManageableProjectActionSet = [\n    [\n        viewDetailsAction,\n        openInNewTabMenuAction,\n        copyUuidAction,\n        shareAction,\n        toggleFavoriteAction,\n        copyToClipboardMenuAction,\n        openWith3rdPartyClientAction,\n        advancedAction,\n        freezeProjectAction,\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/project-admin-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from \"../context-menu-action-set\";\nimport { TogglePublicFavoriteAction } from \"views-components/context-menu/actions/public-favorite-action\";\nimport { togglePublicFavorite } from \"store/public-favorites/public-favorites-actions\";\nimport { publicFavoritePanelActions } from \"store/public-favorites-panel/public-favorites-action\";\n\nimport {\n    shareAction,\n    toggleFavoriteAction,\n    openInNewTabMenuAction,\n    copyToClipboardMenuAction,\n    viewDetailsAction,\n    advancedAction,\n    openWith3rdPartyClientAction,\n    freezeProjectAction,\n    editProjectAction,\n    moveToAction,\n    toggleTrashAction,\n    newProjectAction,\n    copyUuidAction,\n} from \"views-components/context-menu/action-sets/project-action-set\";\n\nexport const togglePublicFavoriteAction = {\n    component: TogglePublicFavoriteAction,\n    name: ContextMenuActionNames.ADD_TO_PUBLIC_FAVORITES,\n    execute: (dispatch, resources) => {\n        dispatch(togglePublicFavorite(resources[0])).then(() => {\n            dispatch(publicFavoritePanelActions.REQUEST_ITEMS());\n        });\n    },\n};\n\nexport const projectAdminActionSet: ContextMenuActionSet = [\n    [\n        toggleFavoriteAction,\n        openInNewTabMenuAction,\n        copyToClipboardMenuAction,\n        viewDetailsAction,\n        advancedAction,\n        openWith3rdPartyClientAction,\n        editProjectAction,\n        shareAction,\n        moveToAction,\n        toggleTrashAction,\n        newProjectAction,\n        freezeProjectAction,\n        togglePublicFavoriteAction,\n        copyUuidAction,\n    ],\n];\n\nexport const filterGroupAdminActionSet: ContextMenuActionSet = [\n    [\n        toggleFavoriteAction,\n        openInNewTabMenuAction,\n        copyToClipboardMenuAction,\n        viewDetailsAction,\n        advancedAction,\n        openWith3rdPartyClientAction,\n        editProjectAction,\n        shareAction,\n        moveToAction,\n        toggleTrashAction,\n        togglePublicFavoriteAction,\n        copyUuidAction,\n    ],\n];\n\nexport const frozenAdminActionSet: ContextMenuActionSet = [\n    [\n        shareAction,\n        togglePublicFavoriteAction,\n        toggleFavoriteAction,\n        openInNewTabMenuAction,\n        copyToClipboardMenuAction,\n        viewDetailsAction,\n        advancedAction,\n        openWith3rdPartyClientAction,\n        freezeProjectAction,\n        copyUuidAction,\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/repository-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\nimport { AdvancedIcon, RemoveIcon, ShareIcon, AttributesIcon } from 'components/icon/icon';\nimport { openAdvancedTabDialog } from 'store/advanced-tab/advanced-tab';\nimport { openRepositoryAttributes, openRemoveRepositoryDialog } from 'store/repositories/repositories-actions';\nimport { openSharingDialog } from 'store/sharing-dialog/sharing-dialog-actions';\n\nexport const repositoryActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.ATTRIBUTES,\n            icon: AttributesIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openRepositoryAttributes(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.SHARE,\n            icon: ShareIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openSharingDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.API_DETAILS,\n            icon: AdvancedIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.REMOVE,\n            icon: RemoveIcon,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openRemoveRepositoryDialog(resources[0].uuid));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/resource-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from '../context-menu-action-set';\nimport { ToggleFavoriteAction } from '../actions/favorite-action';\nimport { toggleFavorite } from 'store/favorites/favorites-actions';\n\nexport const resourceActionSet: ContextMenuActionSet = [\n    [\n        {\n            component: ToggleFavoriteAction,\n            name: ContextMenuActionNames.ADD_TO_FAVORITES,\n            execute: (dispatch, resources) => {\n                resources.forEach((resource) => dispatch<any>(toggleFavorite(resource)));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/root-project-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from '../context-menu-action-set';\nimport { openCollectionCreateDialog } from 'store/collections/collection-create-actions';\nimport { NewProjectIcon, CollectionIcon } from 'components/icon/icon';\nimport { openProjectCreateDialog } from 'store/projects/project-create-actions';\n\nexport const rootProjectActionSet: ContextMenuActionSet = [\n    [\n        {\n            icon: NewProjectIcon,\n            name: ContextMenuActionNames.NEW_PROJECT,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openProjectCreateDialog(resources[0].uuid));\n            },\n        },\n        {\n            icon: CollectionIcon,\n            name: ContextMenuActionNames.NEW_COLLECTION,\n            execute: (dispatch, resources) => {\n                 dispatch<any>(openCollectionCreateDialog(resources[0].uuid));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/search-results-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from '../context-menu-action-set';\nimport { DetailsIcon, AdvancedIcon, OpenIcon, Link, CopyIcon } from 'components/icon/icon';\nimport { openAdvancedTabDialog } from 'store/advanced-tab/advanced-tab';\nimport { openDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { copyStringToClipboardAction, copyToClipboardAction, openInNewTabAction } from 'store/open-in-new-tab/open-in-new-tab.actions';\n\nexport const searchResultsActionSet: ContextMenuActionSet = [\n    [\n        {\n            icon: OpenIcon,\n            name: ContextMenuActionNames.OPEN_IN_NEW_TAB,\n            execute: (dispatch, resources) => {\n                resources.forEach((resource) => dispatch<any>(openInNewTabAction(resource)));\n            },\n        },\n        {\n            icon: Link,\n            name: ContextMenuActionNames.COPY_LINK_TO_CLIPBOARD,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyToClipboardAction(resources));\n            },\n        },\n        {\n            icon: DetailsIcon,\n            name: ContextMenuActionNames.VIEW_DETAILS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openDetailsPanel(resources[0].uuid));\n            },\n        },\n        {\n            icon: AdvancedIcon,\n            name: ContextMenuActionNames.API_DETAILS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/ssh-key-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\nimport { AdvancedIcon, RemoveIcon, AttributesIcon, CopyIcon } from 'components/icon/icon';\nimport { openSshKeyRemoveDialog, openSshKeyAttributesDialog } from 'store/auth/auth-action-ssh';\nimport { openAdvancedTabDialog } from 'store/advanced-tab/advanced-tab';\nimport { copyStringToClipboardAction } from 'store/open-in-new-tab/open-in-new-tab.actions';\n\nexport const sshKeyActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.ATTRIBUTES,\n            icon: AttributesIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openSshKeyAttributesDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.API_DETAILS,\n            icon: AdvancedIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.REMOVE,\n            icon: RemoveIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openSshKeyRemoveDialog(resources[0].uuid));\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/trash-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from '../context-menu-action-set';\nimport { ToggleTrashAction } from 'views-components/context-menu/actions/trash-action';\nimport { toggleResourceTrashed } from 'store/trash/trash-actions';\n\nexport const trashActionSet: ContextMenuActionSet = [\n    [\n        {\n            component: ToggleTrashAction,\n            name: ContextMenuActionNames.MOVE_TO_TRASH,\n            execute: (dispatch, resources) => {\n                dispatch<any>(toggleResourceTrashed(resources.map(res => res.uuid), resources.some(res => res.isTrashed)));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/trashed-collection-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from '../context-menu-action-set';\nimport { DetailsIcon, ProvenanceGraphIcon, AdvancedIcon, RestoreFromTrashIcon } from 'components/icon/icon';\nimport { toggleResourceTrashed } from 'store/trash/trash-actions';\nimport { openAdvancedTabDialog } from 'store/advanced-tab/advanced-tab';\nimport { openDetailsPanel } from 'store/details-panel/details-panel-action';\n\nexport const trashedCollectionActionSet: ContextMenuActionSet = [\n    [\n        {\n            icon: DetailsIcon,\n            name: ContextMenuActionNames.VIEW_DETAILS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openDetailsPanel(resources[0].uuid));\n            },\n        },\n        {\n            icon: ProvenanceGraphIcon,\n            name: ContextMenuActionNames.PROVENANCE_GRAPH,\n            execute: (dispatch, resource) => {\n                // add code\n            },\n        },\n        {\n            icon: AdvancedIcon,\n            name: ContextMenuActionNames.API_DETAILS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            icon: RestoreFromTrashIcon,\n            name: ContextMenuActionNames.RESTORE,\n            execute: (dispatch, resources) => {\n                dispatch<any>(toggleResourceTrashed(resources.map(res => res.uuid), resources.some(res => res.isTrashed)));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/user-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\nimport {\n    AdvancedIcon,\n    ProjectIcon,\n    AttributesIcon,\n    DeactivateUserIcon,\n    UserPanelIcon,\n    LoginAsIcon,\n    AdminMenuIcon,\n    ActiveIcon,\n    CopyIcon,\n} from 'components/icon/icon';\nimport { openAdvancedTabDialog } from 'store/advanced-tab/advanced-tab';\nimport { loginAs, openUserAttributes, openUserProjects } from 'store/users/users-actions';\nimport { openSetupDialog, openDeactivateDialog, openActivateDialog } from 'store/user-profile/user-profile-actions';\nimport { navigateToUserProfile } from 'store/navigation/navigation-action';\nimport {\n    canActivateUser,\n    canDeactivateUser,\n    canSetupUser,\n    isAdmin,\n    needsUserProfileLink,\n    isOtherUser,\n} from 'store/context-menu/context-menu-filters';\nimport { copyStringToClipboardAction } from 'store/open-in-new-tab/open-in-new-tab.actions';\n\nexport const userActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.ATTRIBUTES,\n            icon: AttributesIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openUserAttributes(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.HOME_PROJECT,\n            icon: ProjectIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openUserProjects(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.API_DETAILS,\n            icon: AdvancedIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.ACCOUNT_SETTINGS,\n            icon: UserPanelIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(navigateToUserProfile(resources[0].uuid));\n            },\n            filters: [needsUserProfileLink],\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ],\n    [\n        {\n            name: ContextMenuActionNames.ACTIVATE_USER,\n            icon: ActiveIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openActivateDialog(resources[0].uuid));\n            },\n            filters: [isAdmin, canActivateUser],\n        },\n        {\n            name: ContextMenuActionNames.SETUP_USER,\n            icon: AdminMenuIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openSetupDialog(resources[0].uuid));\n            },\n            filters: [isAdmin, canSetupUser],\n        },\n        {\n            name: ContextMenuActionNames.LOGIN_AS_USER,\n            icon: LoginAsIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(loginAs(resources[0].uuid));\n            },\n            filters: [isAdmin, isOtherUser],\n        },\n        {\n            name: ContextMenuActionNames.DEACTIVATE_USER,\n            icon: DeactivateUserIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openDeactivateDialog(resources[0].uuid));\n            },\n            filters: [isAdmin, canDeactivateUser],\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/user-details-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet } from 'views-components/context-menu/context-menu-action-set';\nimport { AdvancedIcon, UserPanelIcon, DetailsIcon, CopyIcon } from 'components/icon/icon';\nimport { openAdvancedTabDialog } from 'store/advanced-tab/advanced-tab';\nimport { openDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { navigateToUserProfile } from 'store/navigation/navigation-action';\nimport { needsUserProfileLink } from 'store/context-menu/context-menu-filters';\nimport { ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\nimport { copyStringToClipboardAction } from 'store/open-in-new-tab/open-in-new-tab.actions';\n\nexport const UserDetailsActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.VIEW_DETAILS,\n            icon: DetailsIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openDetailsPanel(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.API_DETAILS,\n            icon: AdvancedIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.USER_ACCOUNT,\n            icon: UserPanelIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(navigateToUserProfile(resources[0].uuid));\n            },\n            filters: [needsUserProfileLink],\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/virtual-machine-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\nimport { AdvancedIcon, RemoveIcon, AttributesIcon, CopyIcon } from 'components/icon/icon';\nimport { openAdvancedTabDialog } from 'store/advanced-tab/advanced-tab';\nimport { openVirtualMachineAttributes, openRemoveVirtualMachineDialog } from 'store/virtual-machines/virtual-machines-actions';\nimport { copyStringToClipboardAction } from 'store/open-in-new-tab/open-in-new-tab.actions';\n\nexport const virtualMachineActionSet: ContextMenuActionSet = [\n    [\n        {\n            name: ContextMenuActionNames.ATTRIBUTES,\n            icon: AttributesIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openVirtualMachineAttributes(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.API_DETAILS,\n            icon: AdvancedIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            name: ContextMenuActionNames.REMOVE,\n            icon: RemoveIcon,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openRemoveVirtualMachineDialog(resources[0].uuid));\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/action-sets/workflow-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuActionSet, ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport { openRunProcess, openRemoveWorkflowDialog } from \"store/workflow-panel/workflow-panel-actions\";\nimport { DetailsIcon, AdvancedIcon, OpenIcon, Link, StartIcon, DeleteForever, CopyIcon } from \"components/icon/icon\";\nimport { copyToClipboardAction, openInNewTabAction } from \"store/open-in-new-tab/open-in-new-tab.actions\";\nimport { openDetailsPanel } from \"store/details-panel/details-panel-action\";\nimport { openAdvancedTabDialog } from \"store/advanced-tab/advanced-tab\";\nimport { copyStringToClipboardAction } from \"store/open-in-new-tab/open-in-new-tab.actions\";\n\nexport const readOnlyWorkflowActionSet: ContextMenuActionSet = [\n    [\n        {\n            icon: OpenIcon,\n            name: ContextMenuActionNames.OPEN_IN_NEW_TAB,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openInNewTabAction(resources[0]));\n            },\n        },\n        {\n            icon: Link,\n            name: ContextMenuActionNames.COPY_LINK_TO_CLIPBOARD,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyToClipboardAction(resources));\n            },\n        },\n        {\n            icon: DetailsIcon,\n            name: ContextMenuActionNames.VIEW_DETAILS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openDetailsPanel(resources[0].uuid));\n            },\n        },\n        {\n            icon: AdvancedIcon,\n            name: ContextMenuActionNames.API_DETAILS,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openAdvancedTabDialog(resources[0].uuid));\n            },\n        },\n        {\n            icon: StartIcon,\n            name: ContextMenuActionNames.RUN_WORKFLOW,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openRunProcess(resources[0].uuid, resources[0].ownerUuid, resources[0].name));\n            },\n        },\n        {\n            icon: CopyIcon,\n            name: ContextMenuActionNames.COPY_UUID,\n            execute: (dispatch, resources) => {\n                dispatch<any>(copyStringToClipboardAction(resources[0].uuid));\n            },\n        },\n    ],\n];\n\nexport const workflowActionSet: ContextMenuActionSet = [\n    [\n        ...readOnlyWorkflowActionSet[0],\n        {\n            icon: DeleteForever,\n            name: \"Delete Workflow\",\n            isForMulti: true,\n            execute: (dispatch, resources) => {\n                dispatch<any>(openRemoveWorkflowDialog(resources[0], resources.length));\n            },\n        },\n    ],\n];\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/collection-copy-to-clipboard-action.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from \"react-redux\";\nimport { RootState } from \"../../../store/store\";\nimport { getNodeValue } from \"models/tree\";\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { CopyToClipboardAction } from \"./copy-to-clipboard-action\";\nimport { replaceCollectionId, getCollectionItemClipboardUrl, sanitizeToken } from \"./helpers\";\n\nconst mapStateToPropsUUID = (state: RootState) => {\n    const { resource } = state.contextMenu;\n    const currentCollectionUuid = state.collectionPanel.item ? state.collectionPanel.item.uuid : '';\n    if (resource && [\n        ContextMenuKind.COLLECTION_FILE_ITEM,\n        ContextMenuKind.READONLY_COLLECTION_FILE_ITEM,\n        ContextMenuKind.COLLECTION_DIRECTORY_ITEM,\n        ContextMenuKind.READONLY_COLLECTION_DIRECTORY_ITEM ].indexOf(resource.menuKind as ContextMenuKind) > -1) {\n        const file = getNodeValue(resource.uuid)(state.collectionPanelFiles);\n        if (file) {\n\t    return {\n                href: getCollectionItemClipboardUrl(replaceCollectionId(file.url, currentCollectionUuid),\n\t\t\t\t\t\t    state.auth.config.keepWebServiceUrl,\n\t\t\t\t\t\t    state.auth.config.keepWebInlineServiceUrl),\n                kind: 'file',\n\t\tcustomText: \"Copy link to latest version\",\n\t    };\n        }\n    }\n    return {};\n};\n\nconst mapStateToPropsPDH = (state: RootState) => {\n    const { resource } = state.contextMenu;\n    const currentCollectionPDH = state.collectionPanel.item ? state.collectionPanel.item.portableDataHash : '';\n    if (resource && [\n        ContextMenuKind.COLLECTION_FILE_ITEM,\n        ContextMenuKind.READONLY_COLLECTION_FILE_ITEM,\n        ContextMenuKind.COLLECTION_DIRECTORY_ITEM,\n        ContextMenuKind.READONLY_COLLECTION_DIRECTORY_ITEM ].indexOf(resource.menuKind as ContextMenuKind) > -1) {\n        const file = getNodeValue(resource.uuid)(state.collectionPanelFiles);\n        if (file) {\n\t    return {\n                href: getCollectionItemClipboardUrl(replaceCollectionId(file.url, currentCollectionPDH),\n\t\t\t\t\t\t    state.auth.config.keepWebServiceUrl,\n\t\t\t\t\t\t    state.auth.config.keepWebInlineServiceUrl),\n\t\tkind: 'file',\n\t\tcustomText: \"Copy link to immutable version\",\n\t    };\n        }\n    }\n    return {};\n};\n\nconst mapStateToPropsCWL = (state: RootState) => {\n    const { resource } = state.contextMenu;\n    const currentCollectionPDH = state.collectionPanel.item ? state.collectionPanel.item.portableDataHash : '';\n    if (resource && [\n        ContextMenuKind.COLLECTION_FILE_ITEM,\n        ContextMenuKind.READONLY_COLLECTION_FILE_ITEM,\n        ContextMenuKind.COLLECTION_DIRECTORY_ITEM,\n        ContextMenuKind.READONLY_COLLECTION_DIRECTORY_ITEM ].indexOf(resource.menuKind as ContextMenuKind) > -1) {\n        const file = getNodeValue(resource.uuid)(state.collectionPanelFiles);\n        if (file) {\n\t    let url = file.url;\n\t    url = replaceCollectionId(url, '');\n\t    url = sanitizeToken(url, false);\n\t    const path = new URL(url).pathname;\n\t    return {\n                href: `keep:${currentCollectionPDH}${path}`,\n\t\tkind: 'file',\n\t\tcustomText: \"Copy CWL file reference\",\n\t    };\n        }\n    }\n    return {};\n};\n\nexport const CollectionUUIDCopyToClipboardAction = connect(mapStateToPropsUUID)(CopyToClipboardAction);\n\nexport const CollectionPDHCopyToClipboardAction = connect(mapStateToPropsPDH)(CopyToClipboardAction);\n\nexport const CollectionCWLCopyToClipboardAction = connect(mapStateToPropsCWL)(CopyToClipboardAction);\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/collection-file-viewer-action.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport configureMockStore from 'redux-mock-store'\nimport { Provider } from 'react-redux';\nimport { CollectionFileViewerAction } from './collection-file-viewer-action';\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { createTree, initTreeNode, setNode } from \"models/tree\";\nimport { getInlineFileUrl, sanitizeToken } from \"./helpers\";\n\nconst middlewares = [];\nconst mockStore = configureMockStore(middlewares);\n\ndescribe('CollectionFileViewerAction', () => {\n    let defaultStore;\n    const fileUrl = \"https://download.host:12345/c=abcde-4zz18-abcdefghijklmno/t=v2/token2/token3/cat.jpg\";\n    const insecureKeepInlineUrl = \"https://download.host:12345/\";\n    const secureKeepInlineUrl = \"https://*.collections.host:12345/\";\n\n    beforeEach(() => {\n        let filesTree = createTree();\n        let data = {id: \"000\", value: {\"url\": fileUrl}};\n        filesTree = setNode(initTreeNode(data))(filesTree);\n\n        defaultStore = {\n            auth: {\n                config: {\n                    keepWebServiceUrl: \"https://download.host:12345/\",\n                    keepWebInlineServiceUrl: insecureKeepInlineUrl,\n                    clusterConfig: {\n                        Collections: {\n                          TrustAllContent: false\n                        }\n                    }\n                }\n            },\n            contextMenu: {\n                resource: {\n                    uuid: \"000\",\n                    menuKind: ContextMenuKind.COLLECTION_FILE_ITEM,\n                }\n            },\n            collectionPanel: {\n                item: {\n                    uuid: \"\"\n                }\n            },\n            collectionPanelFiles: filesTree\n        };\n    });\n\n    it('should hide open in new tab when unsafe', () => {\n        // given\n        const store = mockStore(defaultStore);\n\n        // when\n        cy.mount(<Provider store={store}>\n            <CollectionFileViewerAction />\n        </Provider>);\n\n        // ensure cy.mount has been successful\n        cy.get('[data-cy-root').should('exist');\n\n        // and\n        cy.get('[data-cy=open-in-new-tab]').should('have.length', 0);\n    });\n\n    it('should show open in new tab when TrustAllContent=true', () => {\n        // given\n        let initialState = defaultStore;\n        initialState.auth.config.clusterConfig.Collections.TrustAllContent = true;\n        const store = mockStore(initialState);\n\n        // when\n        cy.mount(<Provider store={store}>\n            <CollectionFileViewerAction />\n        </Provider>);\n\n        // then\n        cy.get('[data-cy=open-in-new-tab]').should('exist');\n\n        // and\n        cy.get('[data-cy=open-in-new-tab]').should(\n            'have.attr',\n            'href',\n            sanitizeToken(getInlineFileUrl(fileUrl, initialState.auth.config.keepWebServiceUrl, initialState.auth.config.keepWebInlineServiceUrl))\n        );\n    });\n\n    it('should show open in new tab when inline url is secure', () => {\n        // given\n        let initialState = defaultStore;\n        initialState.auth.config.keepWebInlineServiceUrl = secureKeepInlineUrl;\n        const store = mockStore(initialState);\n\n        // when\n        cy.mount(<Provider store={store}>\n            <CollectionFileViewerAction />\n        </Provider>);\n\n        // then\n        cy.get('[data-cy=open-in-new-tab]').should('exist');\n\n        // and\n        sanitizeToken(getInlineFileUrl(fileUrl, initialState.auth.config.keepWebServiceUrl, initialState.auth.config.keepWebInlineServiceUrl))\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/collection-file-viewer-action.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from \"react-redux\";\nimport { RootState } from \"../../../store/store\";\nimport { FileViewerAction } from 'views-components/context-menu/actions/file-viewer-action';\nimport { getNodeValue } from \"models/tree\";\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { getInlineFileUrl, sanitizeToken, isInlineFileUrlSafe } from \"./helpers\";\n\nconst mapStateToProps = (state: RootState) => {\n    const { resource } = state.contextMenu;\n    const currentCollectionUuid = state.collectionPanel.item ? state.collectionPanel.item.uuid : '';\n    if (resource && [\n        ContextMenuKind.COLLECTION_FILE_ITEM,\n        ContextMenuKind.READONLY_COLLECTION_FILE_ITEM,\n        ContextMenuKind.COLLECTION_DIRECTORY_ITEM,\n        ContextMenuKind.READONLY_COLLECTION_DIRECTORY_ITEM ].indexOf(resource.menuKind as ContextMenuKind) > -1) {\n        const file = getNodeValue(resource.uuid)(state.collectionPanelFiles);\n        const shouldShowInlineUrl = isInlineFileUrlSafe(\n                                file ? file.url : \"\",\n                                state.auth.config.keepWebServiceUrl,\n                                state.auth.config.keepWebInlineServiceUrl\n                              ) || state.auth.config.clusterConfig.Collections.TrustAllContent;\n        if (file && shouldShowInlineUrl) {\n            const fileUrl = sanitizeToken(getInlineFileUrl(\n                file.url,\n                state.auth.config.keepWebServiceUrl,\n                state.auth.config.keepWebInlineServiceUrl), true);\n            return {\n                href: fileUrl,\n                kind: 'file',\n                currentCollectionUuid\n            };\n        }\n    }\n    return {};\n};\n\nexport const CollectionFileViewerAction = connect(mapStateToProps)(FileViewerAction);\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/context-menu-divider.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { ContextMenuAction } from '../context-menu-action-set';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Divider as DividerComponent } from '@mui/material';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { VerticalLineDivider } from 'components/icon/icon';\n\ntype CssRules = 'horizontal' | 'vertical';\n\nconst styles:CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n  horizontal: {\n      backgroundColor: 'black',\n  },\n  vertical: {\n    color: theme.palette.grey[\"400\"],\n    margin: 'auto 0',\n    transform: 'scaleY(1.25)',\n  },\n});\n\nexport const VerticalLine = withStyles(styles)((props: WithStyles<CssRules>) => {\n  return  <VerticalLineDivider className={props.classes.vertical} />;\n});\n\nexport const HorizontalLine = withStyles(styles)((props: WithStyles<CssRules>) => {\n  return  <DividerComponent variant='middle' className={props.classes.horizontal} />;\n});\n\nexport const horizontalMenuDivider: ContextMenuAction = {\n  name: 'Divider',\n  icon: () => null,\n  component: VerticalLine,\n  execute: () => null,\n};\n\nexport const verticalMenuDivider: ContextMenuAction = {\n  name: 'Divider',\n  icon: () => null,\n  component: HorizontalLine,\n  execute: () => null,\n};"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/copy-to-clipboard-action.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CopyToClipboardAction } from './copy-to-clipboard-action';\n\ndescribe('CopyToClipboardAction', () => {\n    let props;\n\n    beforeEach(() => {\n        props = {\n            onClick: cy.stub().as('onClick'),\n            href: 'https://collections.example.com/c=zzzzz-4zz18-k0hamvtwyit6q56/t=xxxxxxxx/LIMS/1.html',\n        };\n    });\n\n    it('should render properly and handle click', () => {\n        // when\n        cy.mount(<CopyToClipboardAction {...props} />);\n\n        // check\n        cy.contains('Copy link to clipboard').click();\n\n        // then\n        cy.get('@onClick').should('have.been.called');\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/copy-to-clipboard-action.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport copy from 'copy-to-clipboard';\nimport { ListItemIcon, ListItemText, ListItem } from \"@mui/material\";\nimport { Link } from \"components/icon/icon\";\n\ninterface CopyToClipboardActionProps {\n    href?: any;\n    kind?: string;\n    customText?: string;\n    onClick?: () => void;\n};\n\nexport const CopyToClipboardAction = (props: CopyToClipboardActionProps) => {\n    const copyToClipboard = () => {\n        if (props.href) {\n\t    copy(props.href);\n        }\n\n        if (props.onClick) {\n            props.onClick();\n        }\n    };\n\n    return props.href\n         ? <ListItem button onClick={copyToClipboard}>\n             <ListItemIcon>\n                 <Link />\n             </ListItemIcon>\n             <ListItemText>\n                 {props.customText ? props.customText : \"Copy link to clipboard\"}\n             </ListItemText>\n         </ListItem>\n         : null;\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/download-action.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport axios from 'axios';\nimport { DownloadAction } from './download-action';\nimport { ThemeProvider } from '@mui/material';\nimport { CustomTheme } from 'common/custom-theme';\n\ndescribe('<DownloadAction />', () => {\n    let props;\n    let zip;\n\n    beforeEach(() => {\n        props = {};\n    });\n\n    it('should return null if missing href or kind of file in props', () => {\n        // when\n        cy.mount(\n                <ThemeProvider theme={CustomTheme}>\n                    <DownloadAction {...props} />\n                </ThemeProvider>);\n\n        // then\n        cy.get('[data-cy-root]').children().should('have.length', 0);\n    });\n\n    it('should return a element', () => {\n        // setup\n        props.href = '#';\n\n        // when\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <DownloadAction {...props} />\n            </ThemeProvider>);\n\n        // then\n        cy.get('[data-cy-root]').children().should('have.length.greaterThan', 0);\n    });\n\n    it('should handle download', () => {\n        // setup\n        props = {\n            href: ['file1'],\n            kind: 'files',\n            download: [],\n            currentCollectionUuid: '123412-123123'\n        };\n\n        Cypress.on('uncaught:exception', (err, runnable) => {\n            // Returning false here prevents Cypress from failing the test when axios returns 404\n            if (err.message.includes('Request failed with status code 404')) {\n                return false;\n              }\n              // Otherwise, let the error fail the test\n              return true;\n          });\n        \n        cy.intercept('GET', '*', (req) => {\n            req.reply({\n              statusCode: 200,\n              body: { message: 'Mocked response' },\n            });\n          }).as('getData');\n        \n        cy.spy(axios, 'get').as('get');\n        \n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <DownloadAction {...props} />\n            </ThemeProvider>);\n\n        // when\n        cy.get('span').contains('Download selected').click();\n\n        // then\n        cy.get('@get').should('be.calledWith', props.href[0]);\n    });\n});"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/download-action.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { ListItemIcon, ListItemText, ListItem } from '@mui/material';\nimport { DownloadIcon } from '../../../components/icon/icon';\nimport JSZip from 'jszip';\nimport FileSaver from 'file-saver';\nimport axios from 'axios';\n\nexport const DownloadAction = (props: { href?: any, download?: any, onClick?: () => void, kind?: string, currentCollectionUuid?: string; }) => {\n    const downloadProps = props.download ? { download: props.download } : {};\n\n    const createZip = (fileUrls: string[], download: string[]) => {\n        let id = 1;\n        const zip = new JSZip();\n        const filteredFileUrls = fileUrls\n            .filter((href: string) => {\n                const letter = href.split('').pop();\n                return letter !== '/';\n            });\n\n        filteredFileUrls.forEach((href: string) => {\n            axios.get(href).then(response => response).then(({ data }: any) => {\n                const splittedByDot = href.split('.');\n                if (splittedByDot[splittedByDot.length - 1] !== 'json') {\n                    if (filteredFileUrls.length === id) {\n                        zip.file(download[id - 1], data);\n                        zip.generateAsync({ type: 'blob' }).then((content) => {\n                            FileSaver.saveAs(content, `download-${props.currentCollectionUuid}.zip`);\n                        });\n                    } else {\n                        zip.file(download[id - 1], data);\n                        zip.generateAsync({ type: 'blob' });\n                    }\n                } else {\n                    zip.file(download[id - 1], JSON.stringify(data));\n                    zip.generateAsync({ type: 'blob' });\n                }\n                id++;\n            });\n        });\n    };\n\n    return props.href || props.kind === 'files'\n        ? <a\n            style={{ textDecoration: 'none' }}\n            href={props.kind === 'files' ? undefined : `${props.href}&disposition=attachment`}\n            onClick={props.onClick}\n            {...downloadProps}>\n            <ListItem button onClick={() => props.kind === 'files' ? createZip(props.href, props.download) : undefined}>\n                {props.kind !== 'files' ?\n                    <ListItemIcon>\n                        <DownloadIcon />\n                    </ListItemIcon> : <span />}\n                <ListItemText>\n                    {props.kind === 'files' ? 'Download selected' : 'Download'}\n                </ListItemText>\n            </ListItem>\n        </a>\n        : null;\n};"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/download-collection-file-action.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from \"react-redux\";\nimport { RootState } from \"../../../store/store\";\nimport { DownloadAction } from \"./download-action\";\nimport { getNodeValue } from \"../../../models/tree\";\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\nimport { filterCollectionFilesBySelection } from \"store/collection-panel/collection-panel-files/collection-panel-files-state\";\nimport { sanitizeToken } from \"./helpers\";\n\nconst mapStateToProps = (state: RootState) => {\n    const { resource } = state.contextMenu;\n    const currentCollectionUuid = state.collectionPanel.item ? state.collectionPanel.item.uuid : '';\n    if (resource && [\n        ContextMenuKind.COLLECTION_FILE_ITEM,\n        ContextMenuKind.READONLY_COLLECTION_FILE_ITEM,\n        ContextMenuKind.COLLECTION_DIRECTORY_ITEM,\n        ContextMenuKind.READONLY_COLLECTION_DIRECTORY_ITEM ].indexOf(resource.menuKind as ContextMenuKind) > -1) {\n        const file = getNodeValue(resource.uuid)(state.collectionPanelFiles);\n        if (file) {\n            return {\n                href: sanitizeToken(file.url, true),\n                kind: 'file',\n                currentCollectionUuid\n            };\n        }\n    } else {\n        const files = filterCollectionFilesBySelection(state.collectionPanelFiles, true);\n        return {\n            href: files.map(file => file.url),\n            kind: 'files',\n            currentCollectionUuid\n        };\n    }\n    return {};\n};\n\nexport const DownloadCollectionFileAction = connect(mapStateToProps)(DownloadAction);\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/favorite-action.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { ListItemIcon, ListItemText, ListItem, Tooltip, IconButton, Typography } from \"@mui/material\";\nimport { AddFavoriteIcon, RemoveFavoriteIcon } from \"components/icon/icon\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { FavoritesState } from \"store/favorites/favorites-reducer\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { componentItemStyles, ComponentCssRules } from \"../component-item-styles\";\nimport { ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport classNames from \"classnames\";\n\ntype ToggleFavoriteActionProps = {\n    isInToolbar: boolean,\n    contextMenuResourceUuid: string,\n    selectedResourceUuid?: string,\n    favorites: FavoritesState,\n    disabledButtons: Set<string>,\n    onClick: () => void\n}\n\nconst mapStateToProps = (state: RootState): Pick<ToggleFavoriteActionProps, 'selectedResourceUuid' | 'contextMenuResourceUuid' | 'favorites' | 'disabledButtons'> => ({\n    contextMenuResourceUuid: state.contextMenu.resource?.uuid || '',\n    selectedResourceUuid: state.selectedResource.selectedResourceUuid,\n    favorites: state.favorites,\n    disabledButtons: new Set<string>(state.multiselect.disabledButtons),\n});\n\nexport const ToggleFavoriteAction = connect(mapStateToProps)(withStyles(componentItemStyles)((props: ToggleFavoriteActionProps & WithStyles<ComponentCssRules>) => {\n    const { classes, onClick, isInToolbar, contextMenuResourceUuid, selectedResourceUuid, favorites, disabledButtons } = props;\n\n    const faveResourceUuid = isInToolbar ? selectedResourceUuid : contextMenuResourceUuid;\n    const isFavorite = faveResourceUuid !== undefined && favorites[faveResourceUuid] === true;\n    const isDisabled = disabledButtons.has(ContextMenuActionNames.ADD_TO_FAVORITES);\n\n    return props.isInToolbar ? (\n        <Tooltip title={isFavorite ? \"Remove from favorites\" : \"Add to favorites\"}>\n            <IconButton\n                data-cy='multiselect-button'\n                className={classes.toolbarButton}\n                disabled={isDisabled}\n                onClick={onClick}>\n                <ListItemIcon className={classNames(classes.toolbarIcon, isDisabled && classes.disabled)}>\n                    {isFavorite\n                        ? <RemoveFavoriteIcon />\n                        : <AddFavoriteIcon />}\n                </ListItemIcon>\n            </IconButton>\n        </Tooltip>\n        ) : (\n            <ListItem\n                button\n                onClick={onClick}>\n                <ListItemIcon>\n                    {isFavorite\n                        ? <RemoveFavoriteIcon />\n                        : <AddFavoriteIcon />}\n                </ListItemIcon>\n                <ListItemText style={{ textDecoration: 'none' }}>\n                {isFavorite\n                        ? <Typography>Remove from favorites</Typography>\n                        : <Typography>Add to favorites</Typography>}\n                </ListItemText>\n        </ListItem>\n        )\n}));\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/file-viewer-action.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { FileViewerAction } from './file-viewer-action';\nimport { ThemeProvider } from '@mui/material';\nimport { CustomTheme } from 'common/custom-theme';\n\ndescribe('FileViewerAction', () => {\n    let props;\n\n    beforeEach(() => {\n        props = {\n            onClick: cy.stub().as('onClick'),\n            href: 'https://example.com',\n        };\n    });\n\n    it('should render properly and handle click', () => {\n        // when\n        cy.mount(\n            <ThemeProvider theme={CustomTheme}>\n                <FileViewerAction {...props} />\n            </ThemeProvider>);\n        \n        // then\n        cy.get('[data-cy=open-in-new-tab]').should('exist');\n        cy.get('[data-cy=open-in-new-tab]').click();\n\n        // and\n        cy.get('@onClick').should('have.been.called');\n    });\n});"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/file-viewer-action.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { ListItemIcon, ListItemText, ListItem } from \"@mui/material\";\nimport { OpenIcon } from \"components/icon/icon\";\n\nexport const FileViewerAction = (props: any) => {\n    return props.href\n        ? <a\n            style={{ textDecoration: 'none' }}\n            href={props.href}\n            target=\"_blank\"\n            rel=\"noopener noreferrer\"\n            data-cy=\"open-in-new-tab\"\n            onClick={props.onClick}>\n            <ListItem button>\n                <ListItemIcon>\n                    <OpenIcon />\n                </ListItemIcon>\n                <ListItemText>\n                    Open in new tab\n                </ListItemText>\n            </ListItem>\n        </a>\n        : null;\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/file-viewer-actions.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { ListItemText, ListItem, ListItemIcon, Icon } from \"@mui/material\";\nimport { RootState } from 'store/store';\nimport { getNodeValue } from 'models/tree';\nimport { CollectionDirectory, CollectionFile, CollectionFileType } from 'models/collection-file';\nimport { FileViewerList, FileViewer } from 'models/file-viewers-config';\nimport { getFileViewers } from 'store/file-viewers/file-viewers-selectors';\nimport { connect } from 'react-redux';\nimport { OpenIcon } from 'components/icon/icon';\n\ninterface FileViewerActionProps {\n    fileUrl: string;\n    viewers: FileViewerList;\n}\n\nconst mapStateToProps = (state: RootState): FileViewerActionProps => {\n    const { resource } = state.contextMenu;\n    if (resource) {\n        const file = getNodeValue(resource.uuid)(state.collectionPanelFiles);\n        if (file) {\n            const fileViewers = getFileViewers(state.properties);\n            return {\n                fileUrl: file.url,\n                viewers: fileViewers.filter(enabledViewers(file)),\n            };\n        }\n    }\n    return {\n        fileUrl: '',\n        viewers: [],\n    };\n};\n\nconst enabledViewers = (file: CollectionFile | CollectionDirectory) =>\n    ({ extensions, collections }: FileViewer) => {\n        if (collections && file.type === CollectionFileType.DIRECTORY) {\n            return true;\n        } else if (extensions) {\n            return extensions.some(extension => file.name.endsWith(extension));\n        } else {\n            return true;\n        }\n    };\n\nconst fillViewerUrl = (fileUrl: string, { url, filePathParam }: FileViewer) => {\n    const viewerUrl = new URL(url);\n    viewerUrl.searchParams.append(filePathParam, fileUrl);\n    return viewerUrl.href;\n};\n\nexport const FileViewerActions = connect(mapStateToProps)(\n    ({ fileUrl, viewers, onClick }: FileViewerActionProps & { onClick: () => void }) =>\n        <>\n            {viewers.map(viewer =>\n                <ListItem\n                    button\n                    component='a'\n                    key={viewer.name}\n                    style={{ textDecoration: 'none' }}\n                    href={fillViewerUrl(fileUrl, viewer)}\n                    onClick={onClick}\n                    rel=\"noopener\"\n                    target='_blank'>\n                    <ListItemIcon>\n                        {\n                            viewer.iconUrl\n                                ? <Icon>\n                                    <img src={viewer.iconUrl} />\n                                </Icon>\n                                : <OpenIcon />\n                        }\n                    </ListItemIcon>\n                    <ListItemText>\n                        {viewer.name}\n                    </ListItemText>\n                </ListItem>\n            )}\n        </>);\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/helpers.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { sanitizeToken, getCollectionItemClipboardUrl, getInlineFileUrl } from \"./helpers\";\n\ndescribe('helpers', () => {\n    // given\n    const url = 'https://example.com/c=zzzzz-4zz18-0123456789abcde/t=v2/a/b/LIMS/1.html';\n    const urlWithPdh = 'https://example.com/c=012345678901234567890123456789aa+0/t=v2/a/b/LIMS/1.html';\n\n    describe('sanitizeToken', () => {\n        it('should sanitize token from the url', () => {\n            // when\n            const result = sanitizeToken(url);\n\n            // then\n            expect(result).to.equal('https://example.com/c=zzzzz-4zz18-0123456789abcde/LIMS/1.html?api_token=v2/a/b');\n        });\n    });\n\n    describe('getInlineFileUrl', () => {\n        it('should add the collection\\'s uuid to the hostname', () => {\n            // when\n            const webDavUrlA = 'https://*.collections.example.com/';\n            const webDavUrlB = 'https://*--collections.example.com/';\n            const webDavDownloadUrl = 'https://example.com/';\n\n            // then\n            expect(getInlineFileUrl(url, webDavDownloadUrl, webDavUrlA))\n                .to.equal('https://zzzzz-4zz18-0123456789abcde.collections.example.com/t=v2/a/b/LIMS/1.html');\n            expect(getInlineFileUrl(url, webDavDownloadUrl, webDavUrlB))\n                .to.equal('https://zzzzz-4zz18-0123456789abcde--collections.example.com/t=v2/a/b/LIMS/1.html');\n            expect(getInlineFileUrl(urlWithPdh, webDavDownloadUrl, webDavUrlA))\n                .to.equal('https://012345678901234567890123456789aa-0.collections.example.com/t=v2/a/b/LIMS/1.html');\n            expect(getInlineFileUrl(urlWithPdh, webDavDownloadUrl, webDavUrlB))\n                .to.equal('https://012345678901234567890123456789aa-0--collections.example.com/t=v2/a/b/LIMS/1.html');\n        });\n\n        it('should keep the url the same when no inline url available', () => {\n            // when\n            const webDavUrl = '';\n            const webDavDownloadUrl = 'https://example.com/';\n            const result = getInlineFileUrl(url, webDavDownloadUrl, webDavUrl);\n\n            // then\n            expect(result).to.equal('https://example.com/c=zzzzz-4zz18-0123456789abcde/t=v2/a/b/LIMS/1.html');\n        });\n\n        it('should replace the url when available', () => {\n            // when\n            const webDavUrl = 'https://download.example.com/';\n            const webDavDownloadUrl = 'https://example.com/';\n            const result = getInlineFileUrl(url, webDavDownloadUrl, webDavUrl);\n\n            // then\n            expect(result).to.equal('https://download.example.com/c=zzzzz-4zz18-0123456789abcde/t=v2/a/b/LIMS/1.html');\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/helpers.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { extractUuidKind, ResourceKind } from \"models/resource\";\n\nexport const sanitizeToken = (href: string, tokenAsQueryParam = true): string => {\n    const [prefix, suffix] = href.split('/t=');\n    const [token1, token2, token3, ...rest] = suffix.split('/');\n    const token = `${token1}/${token2}/${token3}`;\n    const sep = href.indexOf(\"?\") > -1 ? \"&\" : \"?\";\n\n    return `${[prefix, ...rest].join('/')}${tokenAsQueryParam ? `${sep}api_token=${token}` : ''}`;\n};\n\nexport const replaceCollectionId = (href: string, rep: string): string => {\n    const [prefix, suffix] = href.split('/c=');\n    const colPath = suffix.split('/').slice(1);\n\n    if (rep) {\n        return `${prefix}/c=${rep}/${colPath.join('/')}`;\n    } else {\n        return `${prefix}/${colPath.join('/')}`;\n    }\n};\n\n/**\n * @returns A shareable token-free WB2 url that redirects to keep-web after login\n */\nexport const getCollectionItemClipboardUrl = (href: string, keepWebServiceUrl: string, keepWebInlineServiceUrl: string): string => {\n    const url = sanitizeToken(href, false);\n    return getInlineFileUrl(url, keepWebServiceUrl, keepWebInlineServiceUrl);\n};\n\nexport const getInlineFileUrl = (url: string, keepWebSvcUrl: string, keepWebInlineSvcUrl: string): string => {\n    const collMatch = url.match(/\\/c=([a-z0-9-+]+)\\//);\n    if (collMatch === null) { return ''; }\n    if (extractUuidKind(collMatch[1]) !== ResourceKind.COLLECTION) { return ''; }\n    const collId = collMatch[1].replace('+', '-');\n    let inlineUrl = keepWebInlineSvcUrl !== \"\"\n        ? url.replace(keepWebSvcUrl, keepWebInlineSvcUrl)\n        : url;\n    let uuidOnHostname = false;\n    // Inline URLs as 'https://*.collections.example.com' or\n    // 'https://*--collections.example.com' should get the uuid on their hostnames\n    // See: https://doc.arvados.org/v2.1/api/keep-web-urls.html\n    if (inlineUrl.indexOf('*.') > -1) {\n        inlineUrl = inlineUrl.replace('*.', `${collId}.`);\n        uuidOnHostname = true;\n    } else if (inlineUrl.indexOf('*--') > -1) {\n        inlineUrl = inlineUrl.replace('*--', `${collId}--`);\n        uuidOnHostname = true;\n    }\n    if (uuidOnHostname) {\n        inlineUrl = inlineUrl.replace(`/c=${collMatch[1]}`, '');\n    }\n    return inlineUrl;\n};\n\nexport const isInlineFileUrlSafe = (url: string, keepWebSvcUrl: string, keepWebInlineSvcUrl: string): boolean => {\n  let inlineUrl = keepWebInlineSvcUrl !== \"\"\n      ? url.replace(keepWebSvcUrl, keepWebInlineSvcUrl)\n      : url;\n  return inlineUrl.indexOf('*.') > -1;\n}\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/lock-action.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { ListItemIcon, ListItemText, ListItem, Tooltip, IconButton, Typography } from \"@mui/material\";\nimport { FreezeIcon, UnfreezeIcon } from \"components/icon/icon\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { resourceIsFrozen } from \"common/frozen-resources\";\nimport { getResource } from \"store/resources/resources\";\nimport { GroupResource } from \"models/group\";\nimport { memoize } from \"lodash\";\nimport { ResourcesState } from \"store/resources/resources\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { componentItemStyles, ComponentCssRules } from \"../component-item-styles\";\nimport { ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport classNames from \"classnames\";\n\ntype ToggleLockActionProps = {\n    isInToolbar: boolean;\n    selectedResourceUuid: string;\n    contextMenuResourceUuid: string,\n    resources: ResourcesState,\n    disabledButtons: Set<string>,\n    onClick: () => void;\n};\n\nconst mapStateToProps = (state: RootState): Pick<ToggleLockActionProps, 'selectedResourceUuid' | 'contextMenuResourceUuid' | 'resources' | 'disabledButtons'> => ({\n    contextMenuResourceUuid: state.contextMenu.resource?.uuid || '',\n    selectedResourceUuid: state.selectedResource.selectedResourceUuid,\n    resources: state.resources,\n    disabledButtons: new Set<string>(state.multiselect.disabledButtons),\n});\n\nexport const ToggleLockAction = connect(mapStateToProps)(withStyles(componentItemStyles)(memoize((props: ToggleLockActionProps & WithStyles<ComponentCssRules>) => {\n    const { classes, onClick, isInToolbar, contextMenuResourceUuid, selectedResourceUuid, resources, disabledButtons } = props;\n\n    const lockResourceUuid = isInToolbar ? selectedResourceUuid : contextMenuResourceUuid;\n    const resource = getResource<GroupResource>(lockResourceUuid)(resources);\n    const isLocked = resource ? resourceIsFrozen(resource, resources) : false;\n    const isDisabled = disabledButtons.has(ContextMenuActionNames.FREEZE_PROJECT);\n\n    return isInToolbar ? (\n            <Tooltip title={isLocked ? \"Unfreeze project\" : \"Freeze project\"}>\n                <IconButton\n                data-cy='multiselect-button'\n                className={classes.toolbarButton}\n                disabled={isDisabled}\n                onClick={onClick}>\n                <ListItemIcon className={classNames(classes.toolbarIcon, isDisabled && classes.disabled)}>\n                        {isLocked\n                            ? <UnfreezeIcon />\n                            : <FreezeIcon />}\n                    </ListItemIcon>\n                </IconButton>\n            </Tooltip>\n            ) : (\n            <ListItem button onClick={onClick} data-cy=\"toggle-lock-action\">\n                <ListItemIcon>\n                    {isLocked\n                        ? <UnfreezeIcon />\n                        : <FreezeIcon />}\n                </ListItemIcon>\n                    <ListItemText style={{ textDecoration: 'none' }}>\n                        {isLocked\n                            ? <Typography>Unfreeze project</Typography>\n                            : <Typography>Freeze project</Typography>}\n                    </ListItemText>\n            </ListItem>\n    );\n})));"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/public-favorite-action.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { ListItemIcon, ListItemText, ListItem, Tooltip, IconButton, Typography } from \"@mui/material\";\nimport { PublicFavoriteIcon } from \"components/icon/icon\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { PublicFavoritesState } from \"store/public-favorites/public-favorites\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { componentItemStyles, ComponentCssRules } from \"../component-item-styles\";\nimport { ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport classNames from \"classnames\";\n\nconst mapStateToProps = (state: RootState): Pick<TogglePublicFavoriteActionProps, 'selectedResourceUuid' | 'contextMenuResourceUuid' | 'publicFavorites' | 'disabledButtons'> => ({\n    contextMenuResourceUuid: state.contextMenu.resource?.uuid || '',\n    selectedResourceUuid: state.selectedResource.selectedResourceUuid,\n    publicFavorites: state.publicFavorites,\n    disabledButtons: new Set<string>(state.multiselect.disabledButtons),\n});\n\ntype TogglePublicFavoriteActionProps = {\n    isInToolbar: boolean;\n    contextMenuResourceUuid: string;\n    selectedResourceUuid?: string;\n    publicFavorites: PublicFavoritesState;\n    disabledButtons: Set<string>,\n    onClick: () => void;\n};\n\nexport const TogglePublicFavoriteAction = connect(mapStateToProps)(withStyles(componentItemStyles)((props: TogglePublicFavoriteActionProps & WithStyles<ComponentCssRules>) => {\n    const { classes, onClick, isInToolbar, contextMenuResourceUuid, selectedResourceUuid, publicFavorites, disabledButtons } = props;\n\n    const publicFaveUuid = isInToolbar ? selectedResourceUuid : contextMenuResourceUuid;\n    const isPublicFavorite = publicFaveUuid !== undefined && publicFavorites[publicFaveUuid] === true;\n    const isDisabled = disabledButtons.has(ContextMenuActionNames.ADD_TO_PUBLIC_FAVORITES);\n\n    return isInToolbar ? (\n        <Tooltip title={isPublicFavorite ? \"Remove from public favorites\" : \"Add to public favorites\"}>\n            <IconButton\n                data-cy='multiselect-button'\n                className={classes.toolbarButton}\n                disabled={isDisabled}\n                onClick={onClick}>\n                <ListItemIcon className={classNames(classes.toolbarIcon, isDisabled && classes.disabled)}>\n                    {isPublicFavorite\n                        ? <PublicFavoriteIcon />\n                        : <PublicFavoriteIcon />}\n                </ListItemIcon>\n            </IconButton>\n        </Tooltip>\n        ) : (\n            <ListItem\n                button\n                onClick={onClick}>\n                <ListItemIcon>\n                    {isPublicFavorite\n                        ? <PublicFavoriteIcon />\n                        : <PublicFavoriteIcon />}\n                </ListItemIcon>\n                <ListItemText style={{ textDecoration: 'none' }}>\n                    {isPublicFavorite\n                        ? <Typography>Remove from public favorites</Typography>\n                        : <Typography>Add to public favorites</Typography>}\n                </ListItemText>\n            </ListItem>\n        )\n}));\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/actions/trash-action.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { ListItemIcon, ListItemText, ListItem, Tooltip , Typography, IconButton } from \"@mui/material\";\nimport { RestoreFromTrashIcon, TrashIcon } from \"components/icon/icon\";\nimport { connect } from \"react-redux\";\nimport { GroupResource } from \"models/group\";\nimport { RootState } from \"store/store\";\nimport { ResourcesState, getResource } from \"store/resources/resources\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { componentItemStyles, ComponentCssRules } from \"../component-item-styles\";\nimport { ContextMenuActionNames } from \"views-components/context-menu/context-menu-action-set\";\nimport classNames from \"classnames\";\nimport { matchTrashRoute } from \"routes/routes\";\n\nconst mapStateToProps = (state: RootState): Pick<ToggleTrashActionProps, 'selectedResourceUuid' | 'contextMenuResourceUuid' | 'resources' | 'disabledButtons' | 'pathname'> => ({\n    contextMenuResourceUuid: state.contextMenu.resource?.uuid || '',\n    selectedResourceUuid: state.selectedResource.selectedResourceUuid,\n    resources: state.resources,\n    disabledButtons: new Set<string>(state.multiselect.disabledButtons),\n    pathname: state.router.location?.pathname,\n});\n\ntype ToggleTrashActionProps = {\n    isInToolbar: boolean;\n    contextMenuResourceUuid: string;\n    selectedResourceUuid: string;\n    resources: ResourcesState\n    disabledButtons: Set<string>,\n    pathname: string | undefined;\n    onClick: () => void;\n};\n\nexport const ToggleTrashAction = connect(mapStateToProps)(withStyles(componentItemStyles)((props: ToggleTrashActionProps & WithStyles<ComponentCssRules>) => {\n    const { classes, onClick, isInToolbar, contextMenuResourceUuid, selectedResourceUuid, resources, disabledButtons } = props;\n\n    const currentPathIsTrash = matchTrashRoute(props.pathname || \"\");\n    const trashResourceUuid = isInToolbar ? selectedResourceUuid : contextMenuResourceUuid;\n    const isTrashed = getResource<GroupResource>(trashResourceUuid)(resources)?.isTrashed || currentPathIsTrash;\n    const isDisabled = disabledButtons.has(ContextMenuActionNames.MOVE_TO_TRASH);\n\n    return isInToolbar ? (\n            <Tooltip title={isTrashed ? \"Restore\" : \"Move to trash\"}>\n                <IconButton\n                    data-cy='multiselect-button'\n                    className={classes.toolbarButton}\n                    disabled={isDisabled}\n                    onClick={onClick}>\n                    <ListItemIcon className={classNames(classes.toolbarIcon, isDisabled && classes.disabled)}>\n                        {isTrashed\n                            ? <RestoreFromTrashIcon />\n                            : <TrashIcon />}\n                    </ListItemIcon>\n                </IconButton>\n            </Tooltip>\n            ) : (\n            <ListItem button\n                onClick={onClick}>\n                <ListItemIcon data-cy='context-move-to-trash'>\n                    {isTrashed\n                        ? <RestoreFromTrashIcon/>\n                        : <TrashIcon/>}\n                </ListItemIcon>\n                    <ListItemText style={{ textDecoration: 'none' }}>\n                        <Typography>\n                            {isTrashed ? \"Restore\" : \"Move to trash\"}\n                        </Typography>\n                    </ListItemText>\n            </ListItem >\n        )\n}));\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/component-item-styles.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\n\nexport type ComponentCssRules = \"toolbarIcon\" | \"toolbarButton\" | \"disabled\";\n\nexport const componentItemStyles: CustomStyleRulesCallback<ComponentCssRules> = theme => ({\n    toolbarIcon: {\n        marginLeft: '1rem',\n    },\n    toolbarButton: {\n        width: '3rem',\n        height: '3rem',\n    },\n    disabled: {\n        color: theme.palette.grey[400],\n        cursor: 'none',\n    },\n});"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/context-menu-action-set.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { ContextMenuItem } from \"components/context-menu/context-menu\";\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\n\nexport enum ContextMenuActionNames {\n    ACCOUNT_SETTINGS = 'Account settings',\n    ACTIVATE_USER = 'Activate user',\n    ADD_TO_FAVORITES = 'Add to favorites',\n    ADD_TO_PUBLIC_FAVORITES = 'Add to public favorites',\n    ATTRIBUTES = 'Attributes',\n    API_DETAILS = 'API Details',\n    CANCEL = 'Cancel',\n    COPY_AND_RERUN_PROCESS = 'Copy and re-run process',\n    COPY_ITEM_INTO_EXISTING_COLLECTION = 'Copy item into existing collection',\n    COPY_ITEM_INTO_NEW_COLLECTION = 'Copy item into new collection',\n    COPY_SELECTED_INTO_EXISTING_COLLECTION = 'Copy selected into existing collection',\n    COPY_SELECTED_INTO_SEPARATE_COLLECTIONS = 'Copy selected into separate collections',\n    COPY_SELECTED_INTO_NEW_COLLECTION = 'Copy selected into new collection',\n    COPY_LINK_TO_CLIPBOARD = 'Copy link to clipboard',\n    COPY_UUID_LINK_TO_CLIPBOARD = 'Copy link to clipboard (uuid)',\n    COPY_PDH_LINK_TO_CLIPBOARD = 'Copy link to clipboard (pdh)',\n    COPY_CWL_LINK_TO_CLIPBOARD = 'Copy link to clipboard (cwl)',\n    COPY_UUID = 'Copy UUID',\n    DEACTIVATE_USER = 'Deactivate user',\n    DELETE_WORKFLOW = 'Delete Workflow',\n    DIVIDER = 'Divider',\n    DOWNLOAD = 'Download',\n    EDIT_COLLECTION = 'Edit collection',\n    EDIT_CREDENTIAL = 'Edit credential',\n    EDIT_GROUP = 'Edit group',\n    EDIT_PROCESS = 'Edit process',\n    EDIT_PROJECT = 'Edit project',\n    FREEZE_PROJECT = 'Freeze project',\n    HOME_PROJECT = 'Home project',\n    LOGIN_AS_USER = 'Login as user',\n    MAKE_A_COPY = 'Make a copy',\n    MANAGE = 'Manage',\n    MOVE_ITEM_INTO_EXISTING_COLLECTION = 'Move item into existing collection',\n    MOVE_ITEM_INTO_NEW_COLLECTION = 'Move item into new collection',\n    MOVE_SELECTED_INTO_EXISTING_COLLECTION = 'Move selected into existing collection',\n    MOVE_SELECTED_INTO_NEW_COLLECTION = 'Move selected into new collection',\n    MOVE_SELECTED_INTO_SEPARATE_COLLECTIONS = 'Move selected into separate collections',\n    MOVE_TO = 'Move to',\n    MOVE_TO_TRASH = 'Move to trash',\n    NEW_COLLECTION = 'New collection',\n    NEW_PROJECT = 'New project',\n    OPEN_IN_NEW_TAB = 'Open in new tab',\n    OPEN_WITH_3RD_PARTY_CLIENT = 'Open with 3rd party client',\n    OUTPUTS = 'Outputs',\n    PROVENANCE_GRAPH = 'Provenance graph',\n    READ = 'Read',\n    REMOVE = 'Remove',\n    REMOVE_SELECTED = 'Remove selected',\n    DOWNLOAD_SELECTED = 'Download selected files as zip',\n    DOWNLOAD_ALL = 'Download entire collection as zip',\n    RENAME = 'Rename',\n    RESTORE = 'Restore',\n    RESTORE_VERSION = 'Restore version',\n    RUN_WORKFLOW = 'Run Workflow',\n    SELECT_ALL = 'Select all',\n    SETUP_USER = 'Setup user',\n    SHARE = 'Share',\n    UNSELECT_ALL = 'Unselect all',\n    USER_ACCOUNT = 'User account',\n    VIEW_DETAILS = 'View details',\n    WRITE = 'Write',\n}\n\nexport interface ContextMenuAction extends ContextMenuItem {\n    isForMulti?: boolean;\n    execute(dispatch: Dispatch, resources: ContextMenuResource[], state?: any): void;\n}\n\nexport type ContextMenuActionSet = Array<Array<ContextMenuAction>>;\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/context-menu.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { contextMenuActions } from \"store/context-menu/context-menu-actions\";\nimport { ContextMenuResource } from \"store/context-menu/context-menu\";\nimport { ContextMenu as ContextMenuComponent, ContextMenuProps, ContextMenuItem } from \"components/context-menu/context-menu\";\nimport { ContextMenuAction } from \"./context-menu-action-set\";\nimport { Dispatch } from \"redux\";\nimport { memoize } from \"lodash\";\nimport { getMenuActionSet } from \"common/menu-action-set-actions\";\n\ntype DataProps = Pick<ContextMenuProps, \"contextMenu\" | \"items\"> & { resource?: ContextMenuResource };\n\nconst filteredItems = memoize((resource: ContextMenuResource | undefined, state: RootState) => {\n    const actionSet = getMenuActionSet(resource);\n    return actionSet.map(group => group.filter(action => {\n        if(resource && action.filters) {\n            return action.filters.every(filter => filter(state, resource))\n        } else {\n            return true;\n        }\n    }));\n});\n\nconst mapStateToProps = (state: RootState): DataProps => {\n    return {\n        items: filteredItems(state.contextMenu.resource, state),\n        contextMenu: state.contextMenu,\n    };\n};\n\ntype ActionProps = Pick<ContextMenuProps, \"onClose\"> & { onItemClick: (item: ContextMenuItem, resource?: ContextMenuResource) => void };\nconst mapDispatchToProps = (dispatch: Dispatch): ActionProps => ({\n    onClose: () => {\n        dispatch(contextMenuActions.CLOSE_CONTEXT_MENU());\n    },\n    onItemClick: (action: ContextMenuAction, resource?: ContextMenuResource) => {\n        dispatch(contextMenuActions.CLOSE_CONTEXT_MENU());\n        if (resource) {\n            action.execute(dispatch, [resource]);\n        }\n    },\n});\n\nexport const ContextMenu = connect(mapStateToProps, mapDispatchToProps)(ContextMenuComponent);\n"
  },
  {
    "path": "services/workbench2/src/views-components/context-menu/menu-item-sort.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { ContextMenuAction } from './context-menu-action-set';\nimport { ContextMenuActionNames } from 'views-components/context-menu/context-menu-action-set';\nimport { sortByProperty } from 'common/array-utils';\nimport { horizontalMenuDivider, verticalMenuDivider } from './actions/context-menu-divider';\nimport { ContextMenuKind } from 'store/context-menu/context-menu';\n\nconst processOrder = [\n    ContextMenuActionNames.VIEW_DETAILS,\n    ContextMenuActionNames.OPEN_IN_NEW_TAB,\n    ContextMenuActionNames.COPY_UUID,\n    ContextMenuActionNames.COPY_AND_RERUN_PROCESS,\n    ContextMenuActionNames.CANCEL,\n    ContextMenuActionNames.EDIT_PROCESS,\n    ContextMenuActionNames.REMOVE,\n    ContextMenuActionNames.DIVIDER,\n    ContextMenuActionNames.OUTPUTS,\n    ContextMenuActionNames.ADD_TO_FAVORITES,\n    ContextMenuActionNames.ADD_TO_PUBLIC_FAVORITES,\n    ContextMenuActionNames.DIVIDER,\n    ContextMenuActionNames.COPY_LINK_TO_CLIPBOARD,\n    ContextMenuActionNames.API_DETAILS,\n];\n\nexport const projectOrder = [\n    ContextMenuActionNames.VIEW_DETAILS,\n    ContextMenuActionNames.OPEN_IN_NEW_TAB,\n    ContextMenuActionNames.COPY_UUID,\n    ContextMenuActionNames.SHARE,\n    ContextMenuActionNames.EDIT_PROJECT,\n    ContextMenuActionNames.MOVE_TO_TRASH,\n    ContextMenuActionNames.DIVIDER,\n    ContextMenuActionNames.NEW_PROJECT,\n    ContextMenuActionNames.MOVE_TO,\n    ContextMenuActionNames.FREEZE_PROJECT,\n    ContextMenuActionNames.ADD_TO_FAVORITES,\n    ContextMenuActionNames.ADD_TO_PUBLIC_FAVORITES,\n    ContextMenuActionNames.DIVIDER,\n    ContextMenuActionNames.COPY_LINK_TO_CLIPBOARD,\n    ContextMenuActionNames.OPEN_WITH_3RD_PARTY_CLIENT,\n    ContextMenuActionNames.API_DETAILS,\n];\n\nconst groupOrder = [\n    ContextMenuActionNames.VIEW_DETAILS,\n    ContextMenuActionNames.COPY_UUID,\n    ContextMenuActionNames.DIVIDER,\n    ContextMenuActionNames.API_DETAILS,\n    ContextMenuActionNames.EDIT_PROJECT,\n    ContextMenuActionNames.MOVE_TO_TRASH,\n];\n\nconst builtInGroupOrder = [\n    ContextMenuActionNames.VIEW_DETAILS,\n    ContextMenuActionNames.COPY_UUID,\n    ContextMenuActionNames.DIVIDER,\n    ContextMenuActionNames.API_DETAILS,\n];\n\nexport const collectionOrder = [\n    ContextMenuActionNames.VIEW_DETAILS,\n    ContextMenuActionNames.OPEN_IN_NEW_TAB,\n    ContextMenuActionNames.COPY_UUID,\n    ContextMenuActionNames.SHARE,\n    ContextMenuActionNames.EDIT_COLLECTION,\n    ContextMenuActionNames.MOVE_TO_TRASH,\n    ContextMenuActionNames.DIVIDER,\n    ContextMenuActionNames.MAKE_A_COPY,\n    ContextMenuActionNames.MOVE_TO,\n    ContextMenuActionNames.ADD_TO_FAVORITES,\n    ContextMenuActionNames.ADD_TO_PUBLIC_FAVORITES,\n    ContextMenuActionNames.DIVIDER,\n    ContextMenuActionNames.COPY_LINK_TO_CLIPBOARD,\n    ContextMenuActionNames.OPEN_WITH_3RD_PARTY_CLIENT,\n    ContextMenuActionNames.API_DETAILS,\n];\n\nexport const workflowOrder = [\n    ContextMenuActionNames.VIEW_DETAILS,\n    ContextMenuActionNames.OPEN_IN_NEW_TAB,\n    ContextMenuActionNames.COPY_UUID,\n    ContextMenuActionNames.RUN_WORKFLOW,\n    ContextMenuActionNames.DELETE_WORKFLOW,\n    ContextMenuActionNames.DIVIDER,\n    ContextMenuActionNames.COPY_LINK_TO_CLIPBOARD,\n    ContextMenuActionNames.API_DETAILS,\n]\n\nconst rootProjectOrder = [\n    ContextMenuActionNames.VIEW_DETAILS,\n    ContextMenuActionNames.USER_ACCOUNT,\n    ContextMenuActionNames.API_DETAILS,\n];\n\nconst userDetailsOrder = [\n    ContextMenuActionNames.VIEW_DETAILS,\n    ContextMenuActionNames.USER_ACCOUNT,\n    ContextMenuActionNames.API_DETAILS,\n];\n\nconst credentialOrder = [\n    ContextMenuActionNames.COPY_UUID,\n    ContextMenuActionNames.SHARE,\n    ContextMenuActionNames.EDIT_CREDENTIAL,\n    ContextMenuActionNames.REMOVE,\n    ContextMenuActionNames.DIVIDER,\n    ContextMenuActionNames.API_DETAILS,\n];\n\nconst defaultMultiOrder = [\n    ContextMenuActionNames.MOVE_TO,\n    ContextMenuActionNames.MAKE_A_COPY,\n    ContextMenuActionNames.MOVE_TO_TRASH,\n];\n\nconst apiKeyOrder = [\n    ContextMenuActionNames.COPY_UUID,\n    ContextMenuActionNames.REMOVE,\n    ContextMenuActionNames.DIVIDER,\n    ContextMenuActionNames.API_DETAILS,\n];\n\nconst kindToOrder: Record<string, ContextMenuActionNames[]> = {\n    [ContextMenuKind.MULTI]: defaultMultiOrder,\n\n    [ContextMenuKind.PROCESS]: processOrder,\n    [ContextMenuKind.PROCESS_ADMIN]: processOrder,\n    [ContextMenuKind.PROCESS_RESOURCE]: processOrder,\n    [ContextMenuKind.RUNNING_PROCESS_ADMIN]: processOrder,\n    [ContextMenuKind.RUNNING_PROCESS_RESOURCE]: processOrder,\n    [ContextMenuKind.READONLY_PROCESS_RESOURCE]: processOrder,\n\n    [ContextMenuKind.PROJECT]: projectOrder,\n    [ContextMenuKind.PROJECT_ADMIN]: projectOrder,\n    [ContextMenuKind.READONLY_PROJECT]: projectOrder,\n    [ContextMenuKind.FROZEN_PROJECT]: projectOrder,\n    [ContextMenuKind.FROZEN_PROJECT_ADMIN]: projectOrder,\n    [ContextMenuKind.WRITEABLE_PROJECT]: projectOrder,\n    [ContextMenuKind.MANAGEABLE_PROJECT]: projectOrder,\n    [ContextMenuKind.FROZEN_MANAGEABLE_PROJECT]: projectOrder,\n\n    [ContextMenuKind.COLLECTION]: collectionOrder,\n    [ContextMenuKind.COLLECTION_ADMIN]: collectionOrder,\n    [ContextMenuKind.READONLY_COLLECTION]: collectionOrder,\n    [ContextMenuKind.WRITEABLE_COLLECTION]: collectionOrder,\n    [ContextMenuKind.OLD_VERSION_COLLECTION]: collectionOrder,\n\n    [ContextMenuKind.WORKFLOW]: workflowOrder,\n    [ContextMenuKind.READONLY_WORKFLOW]: workflowOrder,\n\n    [ContextMenuKind.GROUPS]: groupOrder,\n    [ContextMenuKind.GROUP_MEMBER]: groupOrder,\n    [ContextMenuKind.BUILT_IN_GROUP]: builtInGroupOrder,\n\n    [ContextMenuKind.FILTER_GROUP]: projectOrder,\n    [ContextMenuKind.FILTER_GROUP_ADMIN]: projectOrder,\n\n    [ContextMenuKind.ROOT_PROJECT]: rootProjectOrder,\n    [ContextMenuKind.ROOT_PROJECT_ADMIN]: rootProjectOrder,\n\n    [ContextMenuKind.USER_DETAILS]: userDetailsOrder,\n\n    [ContextMenuKind.EXTERNAL_CREDENTIAL]: credentialOrder,\n\n    [ContextMenuKind.API_CLIENT_AUTHORIZATION]: apiKeyOrder,\n};\n\nexport const menuDirection = {\n    VERTICAL: 'vertical',\n    HORIZONTAL: 'horizontal'\n}\n\nexport const sortMenuItems = (menuKind: ContextMenuKind, menuItems: ContextMenuAction[], orthagonality: string): ContextMenuAction[] => {\n    const preferredOrder = kindToOrder[menuKind];\n    //if no specified order, sort by name\n    if (!preferredOrder) return menuItems.sort(sortByProperty(\"name\"));\n\n    const bucketMap = new Map();\n    const leftovers: ContextMenuAction[] = [];\n\n    // if we have multiple dividers, we need each of them to have a different \"name\" property\n    let count = 0;\n\n    preferredOrder.forEach((name) => {\n        if (name === ContextMenuActionNames.DIVIDER) {\n            count++;\n            bucketMap.set(`${name}-${count}`, orthagonality === menuDirection.VERTICAL ? verticalMenuDivider : horizontalMenuDivider)\n        } else {\n            bucketMap.set(name, null)\n        }\n    });\n    [...menuItems].forEach((item) => {\n        if (bucketMap.has(item.name)) bucketMap.set(item.name, item);\n        else if (item.name !== ContextMenuActionNames.DIVIDER) leftovers.push(item);\n    });\n\n    const result =  Array.from(bucketMap.values()).concat(leftovers).filter((item) => item !== null).reduce((acc, val)=>{\n        return acc.at(-1)?.name === \"Divider\" && val.name === \"Divider\" ? acc : acc.concat(val)\n    }, []);\n\n    return result.at(-1)?.name === \"Divider\" ? result.slice(0, -1) : result;\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/data-explorer/data-explorer.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { DataExplorer as DataExplorerComponent, FilteredColumnNames } from \"components/data-explorer/data-explorer\";\nimport { getDataExplorer } from \"store/data-explorer/data-explorer-reducer\";\nimport { Dispatch } from \"redux\";\nimport { dataExplorerActions } from \"store/data-explorer/data-explorer-action\";\nimport { DataColumn, DataColumns, } from \"components/data-table/data-column\";\nimport { TCheckedList } from \"components/data-table/data-table\";\nimport { DataTableFilters } from \"components/data-table-filters/data-table-filters\";\nimport { toggleMSToolbar, setCheckedListOnStore } from \"store/multiselect/multiselect-actions\";\nimport { setSelectedResourceUuid, setIsSelectedResourceInDataExplorer } from \"store/selected-resource/selected-resource-actions\";\nimport { usesDetailsCard } from \"components/multiselect-toolbar/MultiselectToolbar.utils\";\nimport { loadDetailsPanel } from \"store/details-panel/details-panel-action\";\nimport { fetchProcessStatusCounts } from \"store/subprocess-panel/subprocess-panel-actions\";\nimport { getDataExplorerColumnFilters } from \"store/data-explorer/data-explorer-middleware-service\";\nimport { serializeOnlyProcessTypeFilters } from \"store/resource-type-filters/resource-type-filters\";\n\ninterface Props {\n    id: string;\n    onRowClick: (item: any) => void;\n    onContextMenu?: (event: React.MouseEvent<HTMLElement>, item: any, isAdmin?: boolean) => void;\n    onRowDoubleClick: (item: any) => void;\n    extractKey?: (item: any) => React.Key;\n    working?: boolean;\n}\n\nconst mapStateToProps = ({ dataExplorer, router, multiselect, selectedResource, properties, searchBar, detailsPanel}: RootState, { id }: Props) => {\n    const dataExplorerState = getDataExplorer(dataExplorer, id);\n    const working = dataExplorerState.working;\n    const currentRoute = router.location ? router.location.pathname : \"\";\n    const isMSToolbarVisible = multiselect.isVisible;\n    return {\n        ...dataExplorerState,\n        id,\n        path: currentRoute,\n        currentRouteUuid: properties.currentRouteUuid,\n        isMSToolbarVisible,\n        selectedResourceUuid: selectedResource.selectedResourceUuid,\n        isSelectedResourceInDataExplorer: selectedResource.isSelectedResourceInDataExplorer,\n        checkedList: multiselect.checkedList,\n        working,\n        searchBarValue: searchBar.searchValue,\n        detailsPanelResourceUuid: detailsPanel.resourceUuid,\n        isDetailsPanelOpen: detailsPanel.isOpened,\n        typeFilter: serializeOnlyProcessTypeFilters(false)(getDataExplorerColumnFilters(dataExplorerState.columns, FilteredColumnNames.TYPE ))\n    };\n};\n\nconst mapDispatchToProps = () => {\n    return (dispatch: Dispatch, { id, onRowClick, onRowDoubleClick, onContextMenu }: Props) => ({\n        onSetColumns: (columns: DataColumns<any, any>) => {\n            dispatch(dataExplorerActions.SET_COLUMNS({ id, columns }));\n        },\n\n        onSearch: (searchValue: string) => {\n            dispatch(dataExplorerActions.SET_EXPLORER_SEARCH_VALUE({ id, searchValue }));\n        },\n\n        onColumnToggle: (column: DataColumn<any, any>) => {\n            dispatch(dataExplorerActions.TOGGLE_COLUMN({ id, columnName: column.name }));\n        },\n\n        onSortToggle: (column: DataColumn<any, any>) => {\n            dispatch(dataExplorerActions.TOGGLE_SORT({ id, columnName: column.name }));\n        },\n\n        onFiltersChange: (filters: DataTableFilters, column: DataColumn<any, any>) => {\n            dispatch(dataExplorerActions.SET_FILTERS({ id, columnName: column.name, filters }));\n        },\n\n        onPageChange: (page: number) => {\n            dispatch(dataExplorerActions.SET_PAGE({ id, page }));\n        },\n\n        onChangeRowsPerPage: (rowsPerPage: number) => {\n            dispatch(dataExplorerActions.SET_ROWS_PER_PAGE({ id, rowsPerPage }));\n        },\n\n        onLoadMore: (page: number) => {\n            dispatch(dataExplorerActions.SET_PAGE({ id, page }));\n        },\n\n        toggleMSToolbar: (isVisible: boolean) => {\n            dispatch<any>(toggleMSToolbar(isVisible));\n        },\n\n        setCheckedListOnStore: (checkedList: TCheckedList) => {\n            dispatch<any>(setCheckedListOnStore(checkedList));\n        },\n\n        setSelectedUuid: (uuid: string | null) => {\n            dispatch<any>(setSelectedResourceUuid(uuid));\n        },\n\n        loadDetailsPanel: (uuid: string) => {\n            dispatch<any>(loadDetailsPanel(uuid || ''));\n        },\n\n        setIsSelectedResourceInDataExplorer: (isIn: boolean) => {\n            dispatch<any>(setIsSelectedResourceInDataExplorer(isIn));\n        },\n\n        fetchProcessStatusCounts: (parentResourceUuid: string, typeFilter?: string) => {\n            return dispatch<any>(fetchProcessStatusCounts(parentResourceUuid, typeFilter));\n        },\n\n        onRowClick,\n\n        onRowDoubleClick,\n\n        onContextMenu,\n\n        usesDetailsCard,\n    });\n};\n\nexport const DataExplorer = connect(mapStateToProps, mapDispatchToProps)(DataExplorerComponent);\n"
  },
  {
    "path": "services/workbench2/src/views-components/data-explorer/renderers.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { GroupMembersCount, ProcessStatus, ResourceFileSize } from './renderers';\nimport { Provider } from 'react-redux';\nimport configureMockStore from 'redux-mock-store'\nimport { ResourceKind } from '../../models/resource';\nimport { ContainerRequestState as CR } from '../../models/container-request';\nimport { ContainerState as C } from '../../models/container';\nimport { ProcessStatus as PS } from '../../store/processes/process';\nimport { ThemeProvider, Theme, StyledEngineProvider } from '@mui/material';\nimport { CustomTheme } from 'common/custom-theme';\n\nconst middlewares = [];\nconst mockStore = configureMockStore(middlewares);\n\ndescribe('renderers', () => {\n    let props = null;\n\n    describe('ProcessStatus', () => {\n        props = {\n            uuid: 'zzzzz-xvhdp-zzzzzzzzzzzzzzz',\n            theme: {\n                customs: {\n                    colors: {\n                        // Color values are arbitrary, but they should be\n                        // representative of the colors used in the UI.\n                        green800: 'rgb(0, 255, 0)',\n                        red900: 'rgb(255, 0, 0)',\n                        orange: 'rgb(240, 173, 78)',\n                        grey600: 'rgb(128, 128, 128)',\n                    }\n                },\n                spacing: (value) => value * 8,\n                palette: {\n                    common: {\n                        white: 'rgb(255, 255, 255)',\n                    },\n                },\n            },\n        };\n\n        [\n            // CR Status ; Priority ; C Status ; Exit Code ; C RuntimeStatus ; Expected label ; Expected bg color ; Expected fg color\n            [CR.COMMITTED, 1, C.RUNNING, null, {}, PS.RUNNING, props.theme.palette.common.white, props.theme.customs.colors.green800],\n            [CR.COMMITTED, 1, C.RUNNING, null, { error: 'whoops' }, PS.FAILING, props.theme.palette.common.white, props.theme.customs.colors.red900],\n            [CR.COMMITTED, 1, C.RUNNING, null, { warning: 'watch out!' }, PS.WARNING, props.theme.palette.common.white, props.theme.customs.colors.green800],\n            [CR.FINAL, 1, C.CANCELLED, null, {}, PS.CANCELLED, props.theme.customs.colors.red900, props.theme.palette.common.white],\n            [CR.FINAL, 1, C.COMPLETE, 137, {}, PS.FAILED, props.theme.customs.colors.red900, props.theme.palette.common.white],\n            [CR.FINAL, 1, C.COMPLETE, 0, {}, PS.COMPLETED, props.theme.customs.colors.green800, props.theme.palette.common.white],\n            [CR.COMMITTED, 0, C.LOCKED, null, {}, PS.ONHOLD, props.theme.customs.colors.grey600, props.theme.palette.common.white],\n            [CR.COMMITTED, 0, C.QUEUED, null, {}, PS.ONHOLD, props.theme.customs.colors.grey600, props.theme.palette.common.white],\n            [CR.COMMITTED, 1, C.LOCKED, null, {}, PS.QUEUED, props.theme.palette.common.white, props.theme.customs.colors.grey600],\n            [CR.COMMITTED, 1, C.QUEUED, null, {}, PS.QUEUED, props.theme.palette.common.white, props.theme.customs.colors.grey600],\n        ].forEach(([crState, crPrio, cState, exitCode, rs, eLabel, eColor, tColor]) => {\n            it(`should render the state label '${eLabel}' and color '${eColor}' for CR state=${crState}, priority=${crPrio}, C state=${cState}, exitCode=${exitCode} and RuntimeStatus=${JSON.stringify(rs)}`, () => {\n                const containerUuid = 'zzzzz-dz642-zzzzzzzzzzzzzzz';\n                const store = mockStore({\n                    resources: {\n                        [props.uuid]: {\n                            kind: ResourceKind.CONTAINER_REQUEST,\n                            state: crState,\n                            containerUuid: containerUuid,\n                            priority: crPrio,\n                        },\n                        [containerUuid]: {\n                            kind: ResourceKind.CONTAINER,\n                            state: cState,\n                            runtimeStatus: rs,\n                            exitCode: exitCode,\n                        },\n                    }\n                });\n\n                cy.mount(\n                    <Provider store={store}>\n                        <ThemeProvider theme={CustomTheme}>\n                            <ProcessStatus {...props} />\n                        </ThemeProvider>\n                    </Provider>);\n\n                cy.get('span').should('have.text', eLabel);\n                cy.get('span').should('have.css', 'color', tColor);\n                cy.get('[data-cy=process-status-chip]').should('have.css', 'background-color', eColor);\n            });\n        })\n    });\n\n    describe('ResourceFileSize', () => {\n        beforeEach(() => {\n            props = {\n                uuid: 'UUID',\n            };\n        });\n\n        it('should render collection fileSizeTotal', () => {\n            // given\n            const store = mockStore({\n                resources: {\n                    [props.uuid]: {\n                        kind: ResourceKind.COLLECTION,\n                        fileSizeTotal: 100,\n                    }\n                }\n            });\n\n            // when\n            cy.mount(<Provider store={store}>\n                <ResourceFileSize {...props}></ResourceFileSize>\n            </Provider>);\n\n            // then\n            cy.get('p').should('have.text', '100 B');\n        });\n\n        it('should render 0 B as file size', () => {\n            // given\n            const store = mockStore({ resources: {} });\n\n            // when\n            cy.mount(<Provider store={store}>\n                <ResourceFileSize {...props}></ResourceFileSize>\n            </Provider>);\n\n            // then\n            cy.get('p').should('have.text', '0 B');\n        });\n\n        it('should render empty string for non collection resource', () => {\n            // given\n            const store1 = mockStore({\n                resources: {\n                    [props.uuid]: {\n                        kind: ResourceKind.PROJECT,\n                        fileSizeTotal: 100,\n                    }\n                }\n            });\n            const store2 = mockStore({\n                resources: {\n                    [props.uuid]: {\n                        kind: ResourceKind.PROCESS,\n                        fileSizeTotal: 200,\n                    }\n                }\n            });\n\n            // when\n            cy.mount(<Provider store={store1}>\n                <ResourceFileSize {...props}></ResourceFileSize>\n            </Provider>);\n\n            // then\n            cy.get('p').should('have.text', '-');\n            \n            // when\n            cy.mount(<Provider store={store2}>\n                <ResourceFileSize {...props}></ResourceFileSize>\n            </Provider>);\n\n            // then\n            cy.get('p').should('have.text', '-');\n        });\n    });\n\n    describe('GroupMembersCount', () => {\n        let fakeGroup;\n        beforeEach(() => {\n            props = {\n                uuid: 'zzzzz-j7d0g-000000000000000',\n            };\n            fakeGroup = {\n                \"canManage\": true,\n                \"canWrite\": true,\n                \"createdAt\": \"2020-09-24T22:52:57.546521000Z\",\n                \"deleteAt\": null,\n                \"description\": \"Test Group\",\n                \"etag\": \"0000000000000000000000000\",\n                \"frozenByUuid\": null,\n                \"groupClass\": \"role\",\n                \"isTrashed\": false,\n                \"kind\": ResourceKind.GROUP,\n                \"modifiedAt\": \"2020-09-24T22:52:57.545669000Z\",\n                \"modifiedByUserUuid\": \"zzzzz-tpzed-000000000000000\",\n                \"name\": \"System group\",\n                \"ownerUuid\": \"zzzzz-tpzed-000000000000000\",\n                \"properties\": {},\n                \"trashAt\": null,\n                \"uuid\": props.uuid,\n                \"writableBy\": [\n                    \"zzzzz-tpzed-000000000000000\",\n                ]\n            };\n        });\n\n        it('shows loading group count when no memberCount', () => {\n            // Given\n            const store = mockStore({resources: {\n                [props.uuid]: fakeGroup,\n            }});\n\n            const wrapper = cy.mount(<Provider store={store}>\n                <StyledEngineProvider injectFirst>\n                    <ThemeProvider theme={CustomTheme}>\n                        <GroupMembersCount {...props} />\n                    </ThemeProvider>\n                </StyledEngineProvider>\n            </Provider>);\n\n            cy.get('[data-testid=three-dots-svg]').should('exist');\n        });\n\n        it('shows group count when memberCount present', () => {\n            // Given\n            const store = mockStore({resources: {\n                [props.uuid]: {\n                    ...fakeGroup,\n                    \"memberCount\": 765,\n                }\n            }});\n\n            cy.mount(<Provider store={store}>\n                <StyledEngineProvider injectFirst>\n                    <ThemeProvider theme={CustomTheme}>\n                        <GroupMembersCount {...props} />\n                    </ThemeProvider>\n                </StyledEngineProvider>\n            </Provider>);\n\n            cy.get('p').should('have.text', '765');\n        });\n\n        it('shows group count error icon when memberCount is null', () => {\n            // Given\n            const store = mockStore({resources: {\n                [props.uuid]: {\n                    ...fakeGroup,\n                    \"memberCount\": null,\n                }\n            }});\n\n            cy.mount(<Provider store={store}>\n                <StyledEngineProvider injectFirst>\n                    <ThemeProvider theme={CustomTheme}>\n                        <GroupMembersCount {...props} />\n                    </ThemeProvider>\n                </StyledEngineProvider>\n            </Provider>);\n\n            cy.get('[data-testid=ErrorRoundedIcon]').should('exist');\n        });\n\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/data-explorer/renderers.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Grid, Typography, Tooltip, IconButton, Checkbox, Chip } from \"@mui/material\";\nimport withStyles from '@mui/styles/withStyles';\nimport withTheme from '@mui/styles/withTheme';\nimport { FavoriteStar, PublicFavoriteStar } from \"../favorite-star/favorite-star\";\nimport { NamedResource, Resource, ResourceKind, TrashableResource } from \"models/resource\";\nimport {\n    FreezeIcon,\n    ProjectIcon,\n    FilterGroupIcon,\n    CollectionIcon,\n    ProcessIcon,\n    DefaultIcon,\n    ShareIcon,\n    CollectionOldVersionIcon,\n    WorkflowIcon,\n    RemoveIcon,\n    RenameIcon,\n    ActiveIcon,\n    SetupIcon,\n    InactiveIcon,\n    ErrorIcon,\n    FolderKeyIcon,\n    RootProjectIcon,\n} from \"components/icon/icon\";\nimport { formatDateTime, formatFileSize, formatTime, formatDateOnly, isElapsed, isWithinExpiration, daysRemaining } from \"common/formatters\";\nimport { resourceLabel } from \"common/labels\";\nimport { connect, DispatchProp } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { getResource, filterResources } from \"store/resources/resources\";\nimport { GroupContentsResource } from \"services/groups-service/groups-service\";\nimport { getProcess, Process, getProcessStatus, getProcessStatusStyles, getProcessRuntime } from \"store/processes/process\";\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { compose, Dispatch } from \"redux\";\nimport { WorkflowResource, isWorkflowResource } from \"models/workflow\";\nimport { getUuidPrefix, openRunProcess } from \"store/workflow-panel/workflow-panel-actions\";\nimport { openSharingDialog } from \"store/sharing-dialog/sharing-dialog-actions\";\nimport { getUserFullname, getUserDisplayName, User, UserResource, isUserResource } from \"models/user\";\nimport { LinkClass, LinkResource } from \"models/link\";\nimport { navigateTo, navigateToGroupDetails, navigateToUserProfile } from \"store/navigation/navigation-action\";\nimport { withResourceData } from \"views-components/data-explorer/with-resources\";\nimport { CollectionResource, isCollectionResource, isCollectionResourceLatestVersion } from \"models/collection\";\nimport { IllegalNamingWarning } from \"components/warning/warning\";\nimport { loadResource } from \"store/resources/resources-actions\";\nimport { BuiltinGroups, getBuiltinGroupUuid, GroupResource, isBuiltinGroup, isFilterGroup, isUserGroup } from \"models/group\";\nimport { openRemoveGroupMemberDialog } from \"store/group-details-panel/group-details-panel-actions\";\nimport { setMemberIsHidden } from \"store/group-details-panel/group-details-panel-actions\";\nimport { formatPermissionLevel } from \"views-components/sharing-dialog/permission-select\";\nimport { PermissionLevel } from \"models/permission\";\nimport { openPermissionEditContextMenu } from \"store/context-menu/context-menu-actions\";\nimport { VirtualMachinesResource } from \"models/virtual-machines\";\nimport { CopyToClipboardSnackbar } from \"components/copy-to-clipboard-snackbar/copy-to-clipboard-snackbar\";\nimport { ProjectResource, isProjectResource } from \"models/project\";\nimport { ProcessResource } from \"models/process\";\nimport { ExternalCredential, isExternalCredential } from \"models/external-credential\";\nimport { ServiceRepository } from \"services/services\";\nimport { loadUsersPanel } from \"store/users/users-actions\";\nimport { InlinePulser } from \"components/loading/inline-pulser\";\nimport { ProcessTypeFilter } from \"store/resource-type-filters/resource-type-filters\";\nimport { CustomTheme } from \"common/custom-theme\";\nimport { getProperty } from \"store/properties/properties\";\nimport { ClusterBadge } from \"store/auth/cluster-badges\";\nimport { isContainerRequestResource } from \"models/container-request\";\n\nenum WorkflowStatus {\n    PUBLIC = \"Public\",\n    PRIVATE = \"Private\",\n    SHARED = \"Shared\"\n}\n\nexport const toggleIsAdmin = (uuid: string) =>\n    async (dispatch: Dispatch, getState: () => RootState, services: ServiceRepository) => {\n        const { resources } = getState();\n        const data = getResource<UserResource>(uuid)(resources);\n        const isAdmin = data!.isAdmin;\n        const newActivity = await services.userService.update(uuid, { isAdmin: !isAdmin });\n        dispatch<any>(loadUsersPanel());\n        return newActivity;\n    };\n\nconst renderName = (dispatch: Dispatch, item: NamedResource | UserResource, isLink: boolean = true) => {\n    const navFunc = isUserGroup(item) ? navigateToGroupDetails : navigateTo;\n    const displayName = isUserResource(item) ? `${item.firstName} ${item.lastName}` : item.name;\n    return (\n        <Grid\n            container\n            alignItems=\"center\"\n            wrap=\"nowrap\"\n            spacing={2}\n        >\n            <Grid item style={{color: CustomTheme.palette.grey['600'] }}>{renderIcon(item)}</Grid>\n            <Grid item>\n                <Typography\n                    color={isLink ? \"primary\" : \"textPrimary\"}\n                    style={{ width: \"auto\", cursor: isLink ? \"pointer\" : \"default\" }}\n                    onClick={(ev) => {\n                        ev.stopPropagation()\n                        if (isLink) dispatch<any>(navFunc(item.uuid))\n                    }}\n                >\n                    {item.kind === ResourceKind.PROJECT || item.kind === ResourceKind.COLLECTION ? <IllegalNamingWarning name={item.name} /> : null}\n                    {displayName}\n                </Typography>\n            </Grid>\n            <Grid item>\n                <Typography variant=\"caption\">\n                    <FavoriteStar resourceUuid={item.uuid} />\n                    <PublicFavoriteStar resourceUuid={item.uuid} />\n                    {isProjectResource(item) && <FrozenProject item={item} />}\n                </Typography>\n            </Grid>\n        </Grid>\n    );\n};\n\nexport const FrozenProject = (props: { item: ProjectResource }) => {\n    const [fullUsername, setFullusername] = React.useState<any>(null);\n    const getFullName = React.useCallback(() => {\n        if (props.item.frozenByUuid) {\n            setFullusername(<UserNameFromID uuid={props.item.frozenByUuid} />);\n        }\n    }, [props.item, setFullusername]);\n\n    if (props.item.frozenByUuid) {\n        return (\n            <Tooltip\n                onOpen={getFullName}\n                enterDelay={500}\n                title={<span>Project was frozen by {fullUsername}</span>}\n            >\n                <span><FreezeIcon style={{ fontSize: \"inherit\" }} /></span>\n            </Tooltip>\n        );\n    } else {\n        return null;\n    }\n};\n\nexport const ResourceName = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<NamedResource | UserResource>(props.uuid)(state.resources);\n    return { resource };\n})((props: {resource?: NamedResource | UserResource} & DispatchProp<any>) => props.resource ? renderName(props.dispatch, props.resource, true) : null);\n\nexport const ResourceNameNoLink = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<NamedResource | UserResource>(props.uuid)(state.resources);\n    return { resource };\n})((props: {resource?: NamedResource | UserResource} & DispatchProp<any>) => props.resource ? renderName(props.dispatch, props.resource, false) : null);\n\nexport const renderIcon = (item: Resource): JSX.Element => {\n    if (isProjectResource(item)) {\n        if (isFilterGroup(item)) {\n            return <FilterGroupIcon />;\n        }\n        return <ProjectIcon />;\n    }\n    if (isCollectionResource(item)) {\n        if (isCollectionResourceLatestVersion(item)) {\n            return <CollectionIcon />;\n        }\n        return <CollectionOldVersionIcon />;\n    }\n    if (isContainerRequestResource(item)) {\n        return <ProcessIcon />;\n    }\n    if (isWorkflowResource(item)) {\n        return <WorkflowIcon />;\n    }\n    if (isExternalCredential(item)) {\n        return <FolderKeyIcon />;\n    }\n    if (isUserResource(item)) {\n        return <RootProjectIcon />;\n    }\n    return <DefaultIcon />;\n};\n\nconst renderDateTime = (date?: string) => {\n    return (\n        <Typography\n            noWrap\n            style={{ minWidth: \"100px\" }}\n        >\n            {formatDateTime(date)}\n        </Typography>\n    );\n};\n\nconst renderDateOnly = (date?: string, withTimeRemaining: boolean = false) => {\n    return (\n        <Typography\n            style={{ minWidth: \"100px\" }}\n        >\n            {formatDateOnly(date, withTimeRemaining)}\n        </Typography>\n    );\n};\n\nconst renderExpiring = (date?: string) =>\n    <Typography noWrap style={{minHeight: '1.55rem'}}>\n        <Grid container alignItems=\"center\" wrap=\"nowrap\">\n            <Grid item style={{ width: '80px' }}>\n                {formatDateOnly(date)}\n            </Grid>\n            <Grid item>\n                {renderExpiringBadge(date)}\n            </Grid>\n        </Grid>\n    </Typography>\n\nconst renderExpiringBadge = (date?: string) =>\n    <span\n        data-cy=\"expiring-badge\"\n        style={{\n            border: `1px solid ${(CustomTheme as any).customs.colors.red900}`,\n            color: (CustomTheme as any).customs.colors.red900,\n            fontSize: \"0.75rem\",\n            padding: \"0px 7px\",\n            borderRadius: 3,\n            boxSizing: 'border-box',\n        }}>\n            {date ? daysRemaining(date) : 'Expiring soon'}\n    </span>\n\nconst renderExpired = (date?: string) =>\n    <Typography noWrap>\n        <Grid container alignItems=\"center\" wrap=\"nowrap\">\n            <Grid item style={{ width: '80px' }}>\n                {formatDateOnly(date)}\n            </Grid>\n            <Grid item>\n                {renderExpiredBadge()}\n            </Grid>\n        </Grid>\n    </Typography>\n\nconst renderExpiredBadge = () =>\n    <span\n        data-cy=\"expired-badge\"\n        style={{\n            backgroundColor: (CustomTheme as any).customs.colors.red900,\n            color: 'white',\n            fontSize: \"0.75rem\",\n            padding: \"0px 7px\",\n            borderRadius: 3,\n        }}>\n            Expired\n    </span>\n\nconst renderWorkflowName = (item: WorkflowResource) => (\n    <Grid\n        container\n        alignItems=\"center\"\n        wrap=\"nowrap\"\n        spacing={2}\n    >\n        <Grid item>{renderIcon(item)}</Grid>\n        <Grid item>\n            <Typography\n                color=\"primary\"\n                style={{ width: \"100px\" }}\n            >\n                {item.name}\n            </Typography>\n        </Grid>\n    </Grid>\n);\n\nexport const ResourceWorkflowName = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<WorkflowResource>(props.uuid)(state.resources);\n    return resource;\n})(renderWorkflowName);\n\nconst getPublicUuid = (uuidPrefix: string) => {\n    return `${uuidPrefix}-tpzed-anonymouspublic`;\n};\n\nconst resourceShare = (dispatch: Dispatch, uuidPrefix: string, ownerUuid?: string, uuid?: string) => {\n    const isPublic = ownerUuid === getPublicUuid(uuidPrefix);\n    return (\n        <div>\n            {!isPublic && uuid && (\n                <Tooltip title=\"Share\">\n                    <IconButton onClick={() => dispatch<any>(openSharingDialog(uuid))} size=\"large\">\n                        <ShareIcon />\n                    </IconButton>\n                </Tooltip>\n            )}\n        </div>\n    );\n};\n\nexport const ResourceShare = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<WorkflowResource>(props.uuid)(state.resources);\n    const uuidPrefix = getUuidPrefix(state);\n    return {\n        uuid: resource ? resource.uuid : \"\",\n        ownerUuid: resource ? resource.ownerUuid : \"\",\n        uuidPrefix,\n    };\n})((props: { ownerUuid?: string; uuidPrefix: string; uuid?: string } & DispatchProp<any>) =>\n    resourceShare(props.dispatch, props.uuidPrefix, props.ownerUuid, props.uuid)\n);\n\n// User Resources\nconst renderFirstName = (item: { firstName: string }) => {\n    return <Typography noWrap>{item.firstName}</Typography>;\n};\n\nexport const ResourceFirstName = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<UserResource>(props.uuid)(state.resources);\n    return resource || { firstName: \"\" };\n})(renderFirstName);\n\nconst renderLastName = (item: { lastName: string }) => <Typography noWrap>{item.lastName}</Typography>;\n\nexport const ResourceLastName = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<UserResource>(props.uuid)(state.resources);\n    return resource || { lastName: \"\" };\n})(renderLastName);\n\nconst renderFullName = (dispatch: Dispatch, item: { uuid: string; firstName: string; lastName: string }, link?: boolean) => {\n    const displayName = (item.firstName + \" \" + item.lastName).trim() || item.uuid;\n    return link ? (\n        <Typography\n            noWrap\n            color=\"primary\"\n            style={{ cursor: \"pointer\" }}\n            onClick={() => dispatch<any>(navigateToUserProfile(item.uuid))}\n        >\n            {displayName}\n        </Typography>\n    ) : (\n        <Typography noWrap>{displayName}</Typography>\n    );\n};\n\nexport const UserResourceFullName = connect((state: RootState, props: { uuid: string; link?: boolean }) => {\n    const resource = getResource<UserResource>(props.uuid)(state.resources);\n    return { item: resource || { uuid: \"\", firstName: \"\", lastName: \"\" }, link: props.link };\n})((props: { item: { uuid: string; firstName: string; lastName: string }; link?: boolean } & DispatchProp<any>) =>\n    renderFullName(props.dispatch, props.item, props.link)\n);\n\nconst renderUuid = (item: { uuid: string }) => (\n    <Typography\n        data-cy=\"uuid\"\n        noWrap\n    >\n        {item.uuid}\n        {(item.uuid && <CopyToClipboardSnackbar value={item.uuid} />) || \"-\"}\n    </Typography>\n);\n\nconst renderUuidCopyIcon = (item: { uuid: string }) => (\n    <Typography\n        data-cy=\"uuid\"\n        noWrap\n    >\n        {(item.uuid && <CopyToClipboardSnackbar value={item.uuid} />) || \"-\"}\n    </Typography>\n);\n\nexport const ResourceUuid = connect(\n    (state: RootState, props: { uuid: string }) => getResource<UserResource>(props.uuid)(state.resources) || { uuid: \"\" }\n)(renderUuid);\n\nconst renderEmail = (item: { email: string }) => <Typography noWrap>{item.email}</Typography>;\n\nexport const ResourceEmail = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<UserResource>(props.uuid)(state.resources);\n    return resource || { email: \"\" };\n})(renderEmail);\n\nenum UserAccountStatus {\n    ACTIVE = \"Active\",\n    INACTIVE = \"Inactive\",\n    SETUP = \"Setup\",\n    UNKNOWN = \"\",\n}\n\nconst renderAccountStatus = (props: { status: UserAccountStatus }) => (\n    <Grid\n        container\n        alignItems=\"center\"\n        wrap=\"nowrap\"\n        spacing={1}\n        data-cy=\"account-status\"\n    >\n        <Grid item>\n            {(() => {\n                switch (props.status) {\n                    case UserAccountStatus.ACTIVE:\n                        return <ActiveIcon style={{ color: \"#4caf50\", verticalAlign: \"middle\" }} />;\n                    case UserAccountStatus.SETUP:\n                        return <SetupIcon style={{ color: \"#2196f3\", verticalAlign: \"middle\" }} />;\n                    case UserAccountStatus.INACTIVE:\n                        return <InactiveIcon style={{ color: \"#9e9e9e\", verticalAlign: \"middle\" }} />;\n                    default:\n                        return <></>;\n                }\n            })()}\n        </Grid>\n        <Grid item>\n            <Typography noWrap>{props.status}</Typography>\n        </Grid>\n    </Grid>\n);\n\nconst getUserAccountStatus = (state: RootState, props: { uuid: string }) => {\n    const user = getResource<UserResource>(props.uuid)(state.resources);\n    // Get membership links for all users group\n    const allUsersGroupUuid = getBuiltinGroupUuid(state.auth.localCluster, BuiltinGroups.ALL);\n    const permissions = filterResources(\n        (resource: LinkResource) =>\n            resource.kind === ResourceKind.LINK &&\n            resource.linkClass === LinkClass.PERMISSION &&\n            resource.headUuid === allUsersGroupUuid &&\n            resource.tailUuid === props.uuid\n    )(state.resources);\n\n    if (user) {\n        return user.isActive\n            ? { status: UserAccountStatus.ACTIVE }\n            : permissions.length > 0\n            ? { status: UserAccountStatus.SETUP }\n            : { status: UserAccountStatus.INACTIVE };\n    } else {\n        return { status: UserAccountStatus.UNKNOWN };\n    }\n};\n\nexport const ResourceLinkTailAccountStatus = connect((state: RootState, props: { uuid: string }) => {\n    const link = getResource<LinkResource>(props.uuid)(state.resources);\n    return link && link.tailKind === ResourceKind.USER ? getUserAccountStatus(state, { uuid: link.tailUuid }) : { status: UserAccountStatus.UNKNOWN };\n})(renderAccountStatus);\n\nexport const UserResourceAccountStatus = connect(getUserAccountStatus)(renderAccountStatus);\n\nconst renderIsHidden = (props: {\n    memberLinkUuid: string;\n    permissionLinkUuid: string;\n    visible: boolean;\n    canManage: boolean;\n    setMemberIsHidden: (memberLinkUuid: string, permissionLinkUuid: string, hide: boolean) => void;\n}) => {\n    if (props.memberLinkUuid) {\n        return (\n            <Checkbox\n                data-cy=\"user-visible-checkbox\"\n                color=\"primary\"\n                checked={props.visible}\n                disabled={!props.canManage}\n                onClick={e => {\n                    e.stopPropagation();\n                    props.setMemberIsHidden(props.memberLinkUuid, props.permissionLinkUuid, !props.visible);\n                }}\n            />\n        );\n    } else {\n        return <Typography />;\n    }\n};\n\nexport const ResourceLinkTailIsVisible = connect(\n    (state: RootState, props: { uuid: string }) => {\n        const link = getResource<LinkResource>(props.uuid)(state.resources);\n        const member = getResource<Resource>(link?.tailUuid || \"\")(state.resources);\n        const group = getResource<GroupResource>(link?.headUuid || \"\")(state.resources);\n        const permissions = filterResources((resource: LinkResource) => {\n            return (\n                resource.linkClass === LinkClass.PERMISSION &&\n                resource.headUuid === link?.tailUuid &&\n                resource.tailUuid === group?.uuid &&\n                resource.name === PermissionLevel.CAN_READ\n            );\n        })(state.resources);\n\n        const permissionLinkUuid = permissions.length > 0 ? permissions[0].uuid : \"\";\n        const isVisible = link && group && permissions.length > 0;\n        // Consider whether the current user canManage this resurce in addition when it's possible\n        const isBuiltin = isBuiltinGroup(link?.headUuid || \"\");\n\n        return member?.kind === ResourceKind.USER\n            ? { memberLinkUuid: link?.uuid, permissionLinkUuid, visible: isVisible, canManage: !isBuiltin }\n            : { memberLinkUuid: \"\", permissionLinkUuid: \"\", visible: false, canManage: false };\n    },\n    { setMemberIsHidden }\n)(renderIsHidden);\n\nconst renderIsAdmin = (props: { uuid: string; isAdmin: boolean; toggleIsAdmin: (uuid: string) => void }) => (\n    <Checkbox\n        color=\"primary\"\n        checked={props.isAdmin}\n        onClick={e => {\n            e.stopPropagation();\n            props.toggleIsAdmin(props.uuid);\n        }}\n    />\n);\n\nexport const ResourceIsAdmin = connect(\n    (state: RootState, props: { uuid: string }) => {\n        const resource = getResource<UserResource>(props.uuid)(state.resources);\n        return resource || { isAdmin: false };\n    },\n    { toggleIsAdmin }\n)(renderIsAdmin);\n\nconst renderUsername = (item: { username: string; uuid: string }) => <Typography noWrap>{item.username || item.uuid}</Typography>;\n\nexport const ResourceUsername = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<UserResource>(props.uuid)(state.resources);\n    return resource || { username: \"\", uuid: props.uuid };\n})(renderUsername);\n\n// Virtual machine resource\n\nconst renderHostname = (item: { hostname: string }) => <Typography noWrap>{item.hostname}</Typography>;\n\nexport const VirtualMachineHostname = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<VirtualMachinesResource>(props.uuid)(state.resources);\n    return resource || { hostname: \"\" };\n})(renderHostname);\n\nconst renderVirtualMachineLogin = (login: { user: string }) => <Typography noWrap>{login.user}</Typography>;\n\nexport const VirtualMachineLogin = connect((state: RootState, props: { linkUuid: string }) => {\n    const permission = getResource<LinkResource>(props.linkUuid)(state.resources);\n    const user = getResource<UserResource>(permission?.tailUuid || \"\")(state.resources);\n\n    return { user: user?.username || permission?.tailUuid || \"\" };\n})(renderVirtualMachineLogin);\n\n// Common methods\nconst renderCommonData = (data: string) => <Typography noWrap>{data}</Typography>;\n\nconst renderCommonDate = (date: string) => <Typography noWrap>{formatDateTime(date)}</Typography>;\n\nexport const CommonUuid = withResourceData(\"uuid\", renderCommonData);\n\n// Api Client Authorizations\nexport const TokenApiToken = withResourceData(\"apiToken\", renderCommonData);\n\nexport const TokenCreatedByIpAddress = withResourceData(\"createdByIpAddress\", renderCommonDate);\n\nexport const TokenExpiresAt = withResourceData(\"expiresAt\", renderCommonDate);\n\nexport const TokenLastUsedAt = withResourceData(\"lastUsedAt\", renderCommonDate);\n\nexport const TokenLastUsedByIpAddress = withResourceData(\"lastUsedByIpAddress\", renderCommonData);\n\nexport const TokenScopes = withResourceData(\"scopes\", renderCommonData);\n\nexport const TokenUserId = withResourceData(\"userId\", renderCommonData);\n\nexport const ResourceCluster = connect((state: RootState, props: { uuid: string }) => {\n    const clusterId = props.uuid.slice(0, 5)\n    const clusterBadge = getProperty<ClusterBadge[]>('clusterBadges')(state.properties)?.find(badge => badge.text === clusterId);\n    // dark grey is default BG color\n    return clusterBadge || { text: clusterId, color: '#fff', backgroundColor: '#696969' };\n})(renderClusterBadge);\n\nfunction renderClusterBadge(badge: ClusterBadge) {\n\n    const style = {\n        backgroundColor: badge.backgroundColor,\n        color: badge.color,\n        padding: \"2px 7px\",\n        borderRadius: 3,\n    };\n\n    return <span style={style}>{badge.text}</span>\n};\n\n// Links Resources\nconst renderLinkName = (item: { name: string }) => <Typography noWrap>{item.name || \"-\"}</Typography>;\n\nexport const ResourceLinkName = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<LinkResource>(props.uuid)(state.resources);\n    return resource || { name: \"\" };\n})(renderLinkName);\n\nconst renderLinkClass = (item: { linkClass: string }) => <Typography noWrap>{item.linkClass}</Typography>;\n\nexport const ResourceLinkClass = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<LinkResource>(props.uuid)(state.resources);\n    return resource || { linkClass: \"\" };\n})(renderLinkClass);\n\nconst getResourceDisplayName = (resource: Resource): string => {\n    if ((resource as UserResource).kind === ResourceKind.USER && typeof (resource as UserResource).firstName !== \"undefined\") {\n        // We can be sure the resource is UserResource\n        return getUserDisplayName(resource as UserResource);\n    } else {\n        return (resource as GroupContentsResource).name;\n    }\n};\n\nconst renderResourceLink = (dispatch: Dispatch, item: Resource ) => {\n    var displayName = getResourceDisplayName(item);\n\n    return (\n        <Typography\n            noWrap\n            color=\"primary\"\n            style={{ cursor: \"pointer\" }}\n            onClick={() => {\n                item.kind === ResourceKind.GROUP && (item as GroupResource).groupClass === \"role\"\n                    ? dispatch<any>(navigateToGroupDetails(item.uuid))\n                    : item.kind === ResourceKind.USER\n                    ? dispatch<any>(navigateToUserProfile(item.uuid))\n                    : dispatch<any>(navigateTo(item.uuid));\n            }}\n        >\n            {resourceLabel(item.kind, item && item.kind === ResourceKind.GROUP ? (item as GroupResource).groupClass || \"\" : \"\")}:{\" \"}\n            {displayName || item.uuid}\n        </Typography>\n    );\n};\n\nexport const ResourceLinkTail = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<LinkResource>(props.uuid)(state.resources);\n    const tailResource = getResource<Resource>(resource?.tailUuid || \"\")(state.resources);\n\n    return {\n        item: tailResource || { uuid: resource?.tailUuid || \"\", kind: resource?.tailKind || ResourceKind.NONE },\n    };\n})((props: { item: Resource } & DispatchProp<any>) => renderResourceLink(props.dispatch, props.item));\n\nexport const ResourceLinkHead = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<LinkResource>(props.uuid)(state.resources);\n    const headResource = getResource<Resource>(resource?.headUuid || \"\")(state.resources);\n\n    return {\n        item: headResource || { uuid: resource?.headUuid || \"\", kind: resource?.headKind || ResourceKind.NONE },\n    };\n})((props: { item: Resource } & DispatchProp<any>) => renderResourceLink(props.dispatch, props.item));\n\nexport const ResourceLinkUuid = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<LinkResource>(props.uuid)(state.resources);\n    return resource || { uuid: \"\" };\n})(renderUuid);\n\nexport const ResourceLinkHeadUuid = connect((state: RootState, props: { uuid: string }) => {\n    const link = getResource<LinkResource>(props.uuid)(state.resources);\n    const headResource = getResource<Resource>(link?.headUuid || \"\")(state.resources);\n\n    return headResource || { uuid: \"\" };\n})(renderUuid);\n\nexport const ResourceLinkTailUuid = connect((state: RootState, props: { uuid: string }) => {\n    const link = getResource<LinkResource>(props.uuid)(state.resources);\n    const tailResource = getResource<Resource>(link?.tailUuid || \"\")(state.resources);\n\n    return tailResource || { uuid: \"\" };\n})(renderUuid);\n\nconst renderLinkDelete = (dispatch: Dispatch, item: LinkResource, canManage: boolean) => {\n    if (item.uuid) {\n        return canManage ? (\n            <Typography noWrap>\n                <IconButton\n                    data-cy=\"resource-delete-button\"\n                    onClick={() => dispatch<any>(openRemoveGroupMemberDialog(item.uuid))}\n                    size=\"large\">\n                    <RemoveIcon />\n                </IconButton>\n            </Typography>\n        ) : (\n            <Typography noWrap>\n                <IconButton disabled data-cy=\"resource-delete-button\" size=\"large\">\n                    <RemoveIcon />\n                </IconButton>\n            </Typography>\n        );\n    } else {\n        return <Typography noWrap></Typography>;\n    }\n};\n\nexport const ResourceLinkDelete = connect((state: RootState, props: { uuid: string }) => {\n    const link = getResource<LinkResource>(props.uuid)(state.resources);\n    const isBuiltin = isBuiltinGroup(link?.headUuid || \"\");\n    const canManage = link && getResourceLinkCanManage(state, link) && !isBuiltin;\n\n    return {\n        item: link || { uuid: \"\", kind: ResourceKind.NONE },\n        canManage\n    };\n})((props: { item: LinkResource; canManage: boolean } & DispatchProp<any>) => renderLinkDelete(props.dispatch, props.item, props.canManage));\n\nexport const ResourcePermissionsDelete = connect((state: RootState, props: { uuid: string }) => {\n    const link = getResource<LinkResource>(props.uuid)(state.resources);\n    const isBuiltin = isBuiltinGroup(link?.tailUuid || \"\");\n    const canManage = link && getResourceLinkCanManage(state, link) && !isBuiltin;\n\n    return {\n        item: link || { uuid: \"\", kind: ResourceKind.NONE },\n        canManage\n    };\n})((props: { item: LinkResource; canManage: boolean } & DispatchProp<any>) => renderLinkDelete(props.dispatch, props.item, props.canManage));\n\nexport const ResourceLinkTailEmail = connect((state: RootState, props: { uuid: string }) => {\n    const link = getResource<LinkResource>(props.uuid)(state.resources);\n    const resource = getResource<UserResource>(link?.tailUuid || \"\")(state.resources);\n\n    return resource || { email: \"\" };\n})(renderEmail);\n\nexport const ResourceLinkTailUsername = connect((state: RootState, props: { uuid: string }) => {\n    const link = getResource<LinkResource>(props.uuid)(state.resources);\n    const resource = getResource<UserResource>(link?.tailUuid || \"\")(state.resources);\n\n    return resource || { username: \"\" };\n})(renderUsername);\n\nconst renderPermissionLevel = (dispatch: Dispatch, link: LinkResource, canManage: boolean) => {\n    return (\n        <Typography noWrap>\n            {formatPermissionLevel(link.name as PermissionLevel)}\n            {canManage ? (\n                <IconButton\n                    data-cy=\"edit-permission-button\"\n                    onClick={event => dispatch<any>(openPermissionEditContextMenu(event, link))}\n                    size=\"large\">\n                    <RenameIcon />\n                </IconButton>\n            ) : (\n                \"\"\n            )}\n        </Typography>\n    );\n};\n\nexport const ResourceLinkHeadPermissionLevel = connect((state: RootState, props: { uuid: string }) => {\n    const link = getResource<LinkResource>(props.uuid)(state.resources);\n    const isBuiltin = isBuiltinGroup(link?.headUuid || \"\") || isBuiltinGroup(link?.tailUuid || \"\");\n\n    return {\n        link: link || { uuid: \"\", name: \"\", kind: ResourceKind.NONE },\n        canManage: link && getResourceLinkCanManage(state, link) && !isBuiltin,\n    };\n})((props: { link: LinkResource; canManage: boolean } & DispatchProp<any>) => renderPermissionLevel(props.dispatch, props.link, props.canManage));\n\nexport const ResourceLinkTailPermissionLevel = connect((state: RootState, props: { uuid: string }) => {\n    const link = getResource<LinkResource>(props.uuid)(state.resources);\n    const isBuiltin = isBuiltinGroup(link?.headUuid || \"\") || isBuiltinGroup(link?.tailUuid || \"\");\n\n    return {\n        link: link || { uuid: \"\", name: \"\", kind: ResourceKind.NONE },\n        canManage: link && getResourceLinkCanManage(state, link) && !isBuiltin,\n    };\n})((props: { link: LinkResource; canManage: boolean } & DispatchProp<any>) => renderPermissionLevel(props.dispatch, props.link, props.canManage));\n\nconst getResourceLinkCanManage = (state: RootState, link: LinkResource) => {\n    const headResource = getResource<Resource>(link.headUuid)(state.resources);\n    if (headResource && headResource.kind === ResourceKind.GROUP) {\n        return (headResource as GroupResource).canManage;\n    } else {\n        // true for now\n        return true;\n    }\n};\n\n// Process Resources\nconst resourceRunProcess = (dispatch: Dispatch, uuid: string) => {\n    return (\n        <div>\n            {uuid && (\n                <Tooltip title=\"Run process\">\n                    <IconButton onClick={() => dispatch<any>(openRunProcess(uuid))} size=\"large\">\n                        <ProcessIcon />\n                    </IconButton>\n                </Tooltip>\n            )}\n        </div>\n    );\n};\n\nexport const ResourceRunProcess = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<WorkflowResource>(props.uuid)(state.resources);\n    return {\n        uuid: resource ? resource.uuid : \"\",\n    };\n})((props: { uuid: string } & DispatchProp<any>) => resourceRunProcess(props.dispatch, props.uuid));\n\nconst renderWorkflowStatus = (uuidPrefix: string, ownerUuid?: string) => {\n    if (ownerUuid === getPublicUuid(uuidPrefix)) {\n        return renderStatus(WorkflowStatus.PUBLIC);\n    } else {\n        return renderStatus(WorkflowStatus.PRIVATE);\n    }\n};\n\nconst renderStatus = (status: string) => (\n    <Typography\n        noWrap\n        style={{ width: \"60px\" }}\n    >\n        {status}\n    </Typography>\n);\n\nexport const ResourceWorkflowStatus = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<WorkflowResource>(props.uuid)(state.resources);\n    const uuidPrefix = getUuidPrefix(state);\n    return {\n        ownerUuid: resource ? resource.ownerUuid : \"\",\n        uuidPrefix,\n    };\n})((props: { ownerUuid?: string; uuidPrefix: string }) => renderWorkflowStatus(props.uuidPrefix, props.ownerUuid));\n\nexport const ResourceContainerUuid = connect((state: RootState, props: { uuid: string }) => {\n    const process = getProcess(props.uuid)(state.resources);\n    return { uuid: process?.container?.uuid ? process?.container?.uuid : \"\" };\n})((props: { uuid: string }) => renderUuid({ uuid: props.uuid }));\n\nenum ColumnSelection {\n    OUTPUT_UUID = \"outputUuid\",\n    LOG_UUID = \"logUuid\",\n}\n\nconst renderUuidLinkWithCopyIcon = (dispatch: Dispatch, item: ProcessResource, column: string) => {\n    const selectedColumnUuid = item[column];\n    return (\n        <Grid\n            container\n            alignItems=\"center\"\n            wrap=\"nowrap\"\n        >\n            <Grid item>\n                {selectedColumnUuid ? (\n                    <Typography\n                        color=\"primary\"\n                        style={{ width: \"auto\", cursor: \"pointer\" }}\n                        noWrap\n                        onClick={() => dispatch<any>(navigateTo(selectedColumnUuid))}\n                    >\n                        {selectedColumnUuid}\n                    </Typography>\n                ) : (\n                    \"-\"\n                )}\n            </Grid>\n            <Grid item>{selectedColumnUuid && renderUuidCopyIcon({ uuid: selectedColumnUuid })}</Grid>\n        </Grid>\n    );\n};\n\nexport const ResourceOutputUuid = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<ProcessResource>(props.uuid)(state.resources);\n    return resource;\n})((process: ProcessResource & DispatchProp<any>) => renderUuidLinkWithCopyIcon(process.dispatch, process, ColumnSelection.OUTPUT_UUID));\n\nexport const ResourceLogUuid = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<ProcessResource>(props.uuid)(state.resources);\n    return resource;\n})((process: ProcessResource & DispatchProp<any>) => renderUuidLinkWithCopyIcon(process.dispatch, process, ColumnSelection.LOG_UUID));\n\nexport const ResourceParentProcess = connect((state: RootState, props: { uuid: string }) => {\n    const process = getProcess(props.uuid)(state.resources);\n    return { parentProcess: process?.containerRequest?.requestingContainerUuid || \"\" };\n})((props: { parentProcess: string }) => renderUuid({ uuid: props.parentProcess }));\n\nexport const ResourceModifiedByUserUuid = connect((state: RootState, props: { uuid: string }) => {\n    const process = getProcess(props.uuid)(state.resources);\n    return { userUuid: process?.containerRequest?.modifiedByUserUuid || \"\" };\n})((props: { userUuid: string }) => renderUuid({ uuid: props.userUuid }));\n\nexport const ResourceCreatedAtDate = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<GroupContentsResource>(props.uuid)(state.resources);\n    return { date: resource ? resource.createdAt : \"\" };\n})((props: { date: string }) => renderDateTime(props.date));\n\nexport const ResourceLastModifiedDate = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<GroupContentsResource>(props.uuid)(state.resources);\n    return { date: resource ? resource.modifiedAt : \"\" };\n})((props: { date: string }) => renderDateTime(props.date));\n\nexport const ResourceTrashDate = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<TrashableResource>(props.uuid)(state.resources);\n    return { date: resource ? resource.trashAt : \"\" };\n})((props: { date: string }) => renderDateTime(props.date));\n\nexport const ResourceDeleteDate = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<TrashableResource>(props.uuid)(state.resources);\n    return { date: resource ? resource.deleteAt : \"\" };\n})((props: { date: string }) => renderDateTime(props.date));\n\nexport const ResourceExpiresAtDate = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<ExternalCredential>(props.uuid)(state.resources);\n    return { date: resource ? resource.expiresAt : \"\" };\n})((props: { date: string }): JSX.Element => renderExpiresAtDate(props.date));\n\nconst renderExpiresAtDate = (date?: string) => {\n    if (date) {\n        if (isElapsed(date)) {\n            return renderExpired(date);\n        } else if (isWithinExpiration(date, 100)) {\n            return renderExpiring(date);\n        } else {\n            return renderDateOnly(date);\n        }\n    }\n    return <>-</>;\n}\n\nexport const RenderResourceStringField = <T extends Resource>(props: { uuid: string, field: keyof T }) => {\n    const ConnectedComponent = connect((state: RootState) => {\n        const resource = getResource<T>(props.uuid)(state.resources);\n        return { [props.field]: resource ? resource[props.field] : \"\" };\n    })((renderProps: { [key: string]: string }) =>\n        renderString(renderProps[props.field as keyof typeof renderProps]));\n    return <ConnectedComponent />;\n};\n\nconst renderString = (data?: string) => <Typography noWrap>{data || '-'}</Typography>;\n\nexport const RenderDescriptionInTD = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<GroupContentsResource>(props.uuid)(state.resources);\n    return { description: resource ? resource.description : \"\" };\n})((props: { description?: string }) =>\n    props.description ? <Typography\n        component='div'\n        // Remove <p> tags from description so they don't affect table display\n        dangerouslySetInnerHTML={{ __html: props.description.replace(/<\\/?p>/g, \"\") }} />\n    : <>-</>);\n\nexport const RenderScopes = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<ExternalCredential>(props.uuid)(state.resources);\n    // account for https://dev.arvados.org/issues/23152\n    let scopes: string[] = [];\n    if (resource) {\n        if (Array.isArray(resource.scopes)) {\n            scopes = resource.scopes;\n        } else if (typeof resource.scopes === 'string') {\n            scopes = (resource.scopes as string).split(\",\").map(s => s.trim()).filter(Boolean);;\n        }\n    }\n    return { scopes };\n})((props: { scopes: string[] }) => renderStringArray(props.scopes, false));\n\nconst renderStringArray = (data: string[], noWrap: boolean = true) => <Typography noWrap={!!noWrap}>{data.length ? data.join(', ') : '-'}</Typography>;\n\nexport const renderFileSize = (fileSize?: number) => (\n    <Typography\n        noWrap\n        style={{ minWidth: \"45px\" }}\n    >\n        {formatFileSize(fileSize)}\n    </Typography>\n);\n\nexport const ResourceFileSize = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<CollectionResource>(props.uuid)(state.resources);\n\n    if (resource && resource.kind !== ResourceKind.COLLECTION) {\n        return { fileSize: \"\" };\n    }\n\n    return { fileSize: resource ? resource.fileSizeTotal : 0 };\n})((props: { fileSize?: number }) => renderFileSize(props.fileSize));\n\nconst renderOwner = (owner: string) => <Typography noWrap>{owner || \"-\"}</Typography>;\n\nexport const ResourceOwner = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<GroupContentsResource>(props.uuid)(state.resources);\n    return { owner: resource ? resource.ownerUuid : \"\" };\n})((props: { owner: string }) => renderOwner(props.owner));\n\nexport const ResourceOwnerName = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<GroupContentsResource>(props.uuid)(state.resources);\n    const ownerNameState = state.ownerName;\n    const ownerName = ownerNameState.find(it => it.uuid === resource!.ownerUuid);\n    return { owner: ownerName ? ownerName!.name : resource!.ownerUuid };\n})((props: { owner: string }) => renderOwner(props.owner));\n\nexport const ResourceUUID = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<CollectionResource>(props.uuid)(state.resources);\n    return { uuid: resource ? resource.uuid : \"\" };\n})((props: { uuid: string }) => renderUuid({ uuid: props.uuid }));\n\nconst renderVersion = (version: number) => {\n    return <Typography>{version ?? \"-\"}</Typography>;\n};\n\nexport const ResourceVersion = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<CollectionResource>(props.uuid)(state.resources);\n    return { version: resource ? resource.version : \"\" };\n})((props: { version: number }) => renderVersion(props.version));\n\nconst renderPortableDataHash = (portableDataHash: string | null) => (\n    <Typography noWrap>\n        {portableDataHash ? (\n            <>\n                {portableDataHash}\n                <CopyToClipboardSnackbar value={portableDataHash} />\n            </>\n        ) : (\n            \"-\"\n        )}\n    </Typography>\n);\n\nexport const ResourcePortableDataHash = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<CollectionResource>(props.uuid)(state.resources);\n    return { portableDataHash: resource ? resource.portableDataHash : \"\" };\n})((props: { portableDataHash: string }) => renderPortableDataHash(props.portableDataHash));\n\nconst renderFileCount = (fileCount: number) => {\n    return <Typography>{fileCount ?? \"-\"}</Typography>;\n};\n\nexport const ResourceFileCount = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<CollectionResource>(props.uuid)(state.resources);\n    return { fileCount: resource ? resource.fileCount : \"\" };\n})((props: { fileCount: number }) => renderFileCount(props.fileCount));\n\nconst userFromID = connect((state: RootState, props: { uuid: string }) => {\n    let userFullname = \"\";\n    const resource = getResource<GroupContentsResource & UserResource>(props.uuid)(state.resources);\n\n    if (resource) {\n        userFullname = getUserFullname(resource as User) || (resource as GroupContentsResource).name;\n    }\n\n    return { uuid: props.uuid, userFullname };\n});\n\nconst ownerFromResourceId = compose(\n    connect((state: RootState, props: { uuid: string }) => {\n        const childResource = getResource<GroupContentsResource & UserResource>(props.uuid)(state.resources);\n        return { uuid: childResource ? (childResource as Resource).ownerUuid : \"\" };\n    }),\n    userFromID\n);\n\nconst _resourceWithName = withStyles(\n    {},\n    { withTheme: true }\n)((props: { uuid: string; userFullname: string; dispatch: Dispatch; theme: ArvadosTheme }) => {\n    const { uuid, userFullname, dispatch, theme } = props;\n    if (userFullname === \"\") {\n        dispatch<any>(loadResource(uuid, false));\n        return (\n            <Typography\n                style={{ color: theme.palette.primary.main }}\n                display=\"inline\"\n            >\n                {uuid}\n            </Typography>\n        );\n    }\n\n    return (\n        <Typography\n            style={{ color: theme.palette.primary.main }}\n            display=\"inline\"\n        >\n            {userFullname} ({uuid})\n        </Typography>\n    );\n});\n\nconst _resourceWithNameLink = withStyles(\n    {},\n    { withTheme: true }\n)((props: { uuid: string; userFullname: string; dispatch: Dispatch; theme: ArvadosTheme }) => {\n    const { uuid, userFullname, dispatch, theme } = props;\n    if (!userFullname) {\n        dispatch<any>(loadResource(uuid, false));\n    }\n\n    return (\n        <Typography\n            style={{ color: theme.palette.primary.main, cursor: 'pointer' }}\n            display=\"inline\"\n            noWrap\n            onClick={() => dispatch<any>(navigateTo(uuid))}\n        >\n            {userFullname ? userFullname : uuid}\n        </Typography>\n    )\n});\n\n\nexport const ResourceOwnerWithNameLink = ownerFromResourceId(_resourceWithNameLink);\n\nexport const ResourceOwnerWithName = ownerFromResourceId(_resourceWithName);\n\nexport const ResourceWithName = userFromID(_resourceWithName);\n\nexport const UserNameFromID = compose(userFromID)((props: { uuid: string; displayAsText?: string; userFullname: string; dispatch: Dispatch }) => {\n    const { uuid, userFullname, dispatch } = props;\n\n    if (userFullname === \"\") {\n        dispatch<any>(loadResource(uuid, false));\n    }\n    return <span>{userFullname ? userFullname : uuid}</span>;\n});\n\nexport const ResponsiblePerson = compose(\n    connect((state: RootState, props: { uuid: string; parentRef: HTMLElement | null }) => {\n        let responsiblePersonName: string = \"\";\n        let responsiblePersonUUID: string = \"\";\n        let responsiblePersonProperty: string = \"\";\n\n        if (state.auth.config.clusterConfig.Collections.ManagedProperties) {\n            let index = 0;\n            const keys = Object.keys(state.auth.config.clusterConfig.Collections.ManagedProperties);\n\n            while (!responsiblePersonProperty && keys[index]) {\n                const key = keys[index];\n                if (state.auth.config.clusterConfig.Collections.ManagedProperties[key].Function === \"original_owner\") {\n                    responsiblePersonProperty = key;\n                }\n                index++;\n            }\n        }\n\n        let resource: Resource | undefined = getResource<GroupContentsResource & UserResource>(props.uuid)(state.resources);\n\n        while (resource && resource.kind !== ResourceKind.USER && responsiblePersonProperty) {\n            responsiblePersonUUID = (resource as CollectionResource).properties[responsiblePersonProperty];\n            resource = getResource<GroupContentsResource & UserResource>(responsiblePersonUUID)(state.resources);\n        }\n\n        if (resource && resource.kind === ResourceKind.USER) {\n            responsiblePersonName = getUserFullname(resource as UserResource) || (resource as GroupContentsResource).name;\n        }\n\n        return { uuid: responsiblePersonUUID, responsiblePersonName, parentRef: props.parentRef };\n    }),\n    withStyles({}, { withTheme: true })\n)((props: { uuid: string | null; responsiblePersonName: string; parentRef: HTMLElement | null; theme: ArvadosTheme }) => {\n    const { uuid, responsiblePersonName, parentRef, theme } = props;\n\n    if (!uuid && parentRef) {\n        parentRef.style.display = \"none\";\n        return null;\n    } else if (parentRef) {\n        parentRef.style.display = \"block\";\n    }\n\n    if (!responsiblePersonName) {\n        return (\n            <Typography\n                style={{ color: theme.palette.primary.main }}\n                display=\"inline\"\n                noWrap\n            >\n                {uuid}\n            </Typography>\n        );\n    }\n\n    return (\n        <Typography\n            style={{ color: theme.palette.primary.main }}\n            display=\"inline\"\n            noWrap\n        >\n            {responsiblePersonName} ({uuid})\n        </Typography>\n    );\n});\n\nexport const RenderResponsiblePerson = (props: { responsiblePersonUUID: string | null; responsiblePersonName: string}) => {\n    const { responsiblePersonUUID, responsiblePersonName} = props;\n\n    if (!responsiblePersonName) {\n        return (\n            <Typography\n                display=\"inline\"\n                noWrap\n            >\n                {responsiblePersonUUID}\n            </Typography>\n        );\n    }\n\n    return (\n        <Typography\n            display=\"inline\"\n            noWrap\n        >\n            {responsiblePersonName} ({responsiblePersonUUID})\n        </Typography>\n    );\n}\n\nconst renderType = (type: string, subtype: string) => <Typography noWrap>{resourceLabel(type, subtype)}</Typography>;\n\nexport const ResourceType = connect((state: RootState, props: { uuid: string }) => {\n    const resource = getResource<GroupContentsResource>(props.uuid)(state.resources);\n    return {\n        type: resource ? resource.kind : \"\",\n        subtype: resource\n            ? resource.kind === ResourceKind.GROUP\n                ? resource.groupClass\n                : resource.kind === ResourceKind.PROCESS\n                    ? resource.requestingContainerUuid\n                        ? ProcessTypeFilter.CHILD_PROCESS\n                        : ProcessTypeFilter.MAIN_PROCESS\n                    : \"\"\n            : \"\"\n    };\n})((props: { type: string; subtype: string }) => renderType(props.type, props.subtype));\n\nexport const ResourceStatus = connect((state: RootState, props: { uuid: string }) => {\n    return { resource: getResource<GroupContentsResource>(props.uuid)(state.resources) };\n})((props: { resource: GroupContentsResource }) =>\n    props.resource && props.resource.kind === ResourceKind.COLLECTION ? (\n        <CollectionStatus uuid={props.resource.uuid} />\n    ) : (\n        <ProcessStatus uuid={props.resource.uuid} />\n    )\n);\n\nexport const CollectionStatus = connect((state: RootState, props: { uuid: string }) => {\n    return { collection: getResource<CollectionResource>(props.uuid)(state.resources) };\n})((props: { collection: CollectionResource }) =>\n    props.collection.uuid !== props.collection.currentVersionUuid ? (\n        <Typography>version {props.collection.version}</Typography>\n    ) : (\n        <Typography>head version</Typography>\n    )\n);\n\nexport const CollectionName = connect((state: RootState, props: { uuid: string; className?: string }) => {\n    return {\n        collection: getResource<CollectionResource>(props.uuid)(state.resources),\n        uuid: props.uuid,\n        className: props.className,\n    };\n})((props: { collection: CollectionResource; uuid: string; className?: string }) => (\n    <Typography className={props.className}>{props.collection?.name || props.uuid}</Typography>\n));\n\nexport const ProcessStatus = compose(\n    connect((state: RootState, props: { uuid: string }) => {\n        return { process: getProcess(props.uuid)(state.resources) };\n    }),\n    withStyles({}, { withTheme: true })\n)((props: { process?: Process; theme: ArvadosTheme }) =>\n    props.process ? (\n        <Chip\n            data-cy=\"process-status-chip\"\n            label={getProcessStatus(props.process)}\n            style={{\n                height: props.theme.spacing(3),\n                width: props.theme.spacing(12),\n                ...getProcessStatusStyles(getProcessStatus(props.process), props.theme),\n                fontSize: \"0.875rem\",\n                borderRadius: props.theme.spacing(0.625),\n            }}\n        />\n    ) : (\n        <Typography>-</Typography>\n    )\n);\n\nexport const ProcessStartDate = connect((state: RootState, props: { uuid: string }) => {\n    const process = getProcess(props.uuid)(state.resources);\n    return { date: process && process.container ? process.container.startedAt : \"\" };\n})((props: { date: string }) => renderDateTime(props.date));\n\nexport const renderRunTime = (time: number) => (\n    <Typography\n        noWrap\n        style={{ minWidth: \"45px\" }}\n    >\n        {formatTime(time, true)}\n    </Typography>\n);\n\ninterface ContainerRunTimeProps {\n    process: Process;\n}\n\ninterface ContainerRunTimeState {\n    runtime: number;\n}\n\nexport const ContainerRunTime = connect((state: RootState, props: { uuid: string }) => {\n    return { process: getProcess(props.uuid)(state.resources) };\n})(\n    class extends React.Component<ContainerRunTimeProps, ContainerRunTimeState> {\n        private timer: any;\n\n        constructor(props: ContainerRunTimeProps) {\n            super(props);\n            this.state = { runtime: this.getRuntime() };\n        }\n\n        getRuntime() {\n            return this.props.process ? getProcessRuntime(this.props.process) : 0;\n        }\n\n        updateRuntime() {\n            this.setState({ runtime: this.getRuntime() });\n        }\n\n        componentDidMount() {\n            this.timer = setInterval(this.updateRuntime.bind(this), 5000);\n        }\n\n        componentWillUnmount() {\n            clearInterval(this.timer);\n        }\n\n        render() {\n            return this.props.process ? renderRunTime(this.state.runtime) : <Typography>-</Typography>;\n        }\n    }\n);\n\nexport const GroupMembersCount = connect(\n    (state: RootState, props: { uuid: string }) => {\n        const group = getResource<GroupResource>(props.uuid)(state.resources);\n\n        return {\n            value: group?.memberCount,\n        };\n\n    }\n)(withTheme((props: {value: number | null | undefined, theme:ArvadosTheme}) => {\n    if (props.value === undefined) {\n        // Loading\n        return <Typography component={\"div\"}>\n            <InlinePulser />\n        </Typography>;\n    } else if (props.value === null) {\n        // Error\n        return <Typography>\n            <Tooltip title=\"Failed to load member count\">\n                <ErrorIcon style={{color: props.theme.customs.colors.greyL}}/>\n            </Tooltip>\n        </Typography>;\n    } else {\n        return <Typography children={props.value} />;\n    }\n}));\n"
  },
  {
    "path": "services/workbench2/src/views-components/data-explorer/with-resources.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { getResource } from 'store/resources/resources';\nimport { Resource } from 'models/resource';\n\ninterface WithResourceProps {\n    resource?: Resource;\n}\n\nexport const withResource = (component: React.ComponentType<WithResourceProps & { uuid: string }>) =>\n    connect<WithResourceProps>(\n        (state: RootState, props: { uuid: string }): WithResourceProps => ({\n            resource: getResource(props.uuid)(state.resources)\n        })\n    )(component);\n\nexport const getDataFromResource = (property: string, resource?: Resource) => {\n    return resource && resource[property] ? resource[property] : '(none)';\n};\n\nexport const withResourceData = (property: string, render: (data: any) => React.ReactElement<any>) =>\n    withResource(({ resource }) => render(getDataFromResource(property, resource)));\n"
  },
  {
    "path": "services/workbench2/src/views-components/description-dialog/description-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dialog, DialogContent, DialogActions, Button, Typography } from \"@mui/material\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { Dispatch, compose } from \"redux\";\nimport { descriptionDialogActions, DESCRIPTION_DIALOG, DescriptionDialogData } from \"store/description-dialog/description-dialog-actions\";\nimport { WithDialogProps, withDialog } from \"store/dialog/with-dialog\";\nimport { getResource } from \"store/resources/resources\";\nimport { getDialog } from \"store/dialog/dialog-reducer\";\nimport { ProjectResource } from \"models/project\";\nimport { CollectionResource } from \"models/collection\";\nimport { WorkflowResource } from \"models/workflow\";\nimport { ContainerRequestResource } from \"models/container-request\";\n\ntype DescribedResource = ProjectResource | CollectionResource | WorkflowResource | ContainerRequestResource\n\ninterface DescriptionDialogDataProps {\n    description: string,\n}\n\nconst mapStateToProps = (state: RootState): DescriptionDialogDataProps => {\n    const dialog = getDialog<DescriptionDialogData>(state.dialog, DESCRIPTION_DIALOG);\n    const resource = getResource<DescribedResource>(dialog?.data.uuid)(state.resources);\n    return {\n        description: resource?.description || \"\",\n    }\n};\n\ninterface DescriptionDialogActionProps {\n    closeDialog: Function;\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    closeDialog: () => dispatch<any>(descriptionDialogActions.closeDialog()),\n});\n\ntype DescriptionDialogComponentProps = DescriptionDialogDataProps & DescriptionDialogActionProps & WithDialogProps<DescriptionDialogData>;\n\nexport const DescriptionDialogComponent = (props: DescriptionDialogComponentProps) => {\n    const { open, description } = props;\n\n    return (\n        <Dialog\n            open={open}\n            onClose={props.closeDialog}\n            maxWidth=\"md\"\n            fullWidth={true}\n        >\n            <DialogContent>\n                <Typography\n                    component=\"div\"\n                    style={{ paddingTop: '20px' }}\n                    //dangerouslySetInnerHTML is ok here only if description is sanitized,\n                    //which it is before it is loaded into the redux store\n                    dangerouslySetInnerHTML={{\n                        __html: description,\n                    }}\n                />\n            </DialogContent>\n            <DialogActions style={{ margin: \"0px 12px 12px\" }}>\n                <Button\n                    data-cy=\"confirmation-dialog-ok-btn\"\n                    variant=\"contained\"\n                    color=\"primary\"\n                    type=\"submit\"\n                    onClick={props.closeDialog}\n                >\n                    Close\n                </Button>\n            </DialogActions>\n        </Dialog>\n    );\n};\n\nexport const DescriptionDialog = compose(\n    withDialog(DESCRIPTION_DIALOG),\n    connect(mapStateToProps, mapDispatchToProps)\n)(DescriptionDialogComponent);\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-card/collection-details-card.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect, useState } from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Card, CardHeader, Typography, Grid, Tooltip } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { connect } from 'react-redux';\nimport { getResource } from 'store/resources/resources';\nimport { MultiselectToolbar } from 'components/multiselect-toolbar/MultiselectToolbar';\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { loadDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { setSelectedResourceUuid } from 'store/selected-resource/selected-resource-actions';\nimport { deselectAllOthers } from 'store/multiselect/multiselect-actions';\nimport { CollectionResource } from 'models/collection';\nimport { IllegalNamingWarning } from 'components/warning/warning';\nimport { GroupResource } from 'models/group';\nimport { UserResource } from 'models/user';\nimport { resourceIsFrozen } from 'common/frozen-resources';\nimport { ReadOnlyIcon } from 'components/icon/icon';\nimport { DescriptionPreview } from './description-preview';\n\ntype CssRules = 'root' | 'cardHeaderContainer' | 'cardHeader' | 'readOnlyIcon' | 'nameContainer';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        marginBottom: '1rem',\n        flex: '0 0 auto',\n        padding: 0,\n        minHeight: '3rem',\n    },\n    nameContainer: {\n        display: 'flex',\n        alignItems: 'center',\n        minHeight: '2.7rem',\n    },\n    cardHeaderContainer: {\n        width: '100%',\n        display: 'flex',\n        flexDirection: 'row',\n        alignItems: 'flex-start',\n        justifyContent: 'space-between',\n    },\n    cardHeader: {\n        minWidth: '30rem',\n        padding: '0.2rem 0.4rem 0.2rem 1rem',\n    },\n    readOnlyIcon: {\n        marginLeft: theme.spacing(1),\n        fontSize: 'small',\n    },\n});\n\nconst mapStateToProps = ({ auth, selectedResource, resources, properties }: RootState): Pick<CollectionCardProps, 'currentUserUUID' | 'currentResource' | 'isSelected' | 'itemOwner' | 'isFrozen'> => {\n    const currentResource = getResource<CollectionResource>(properties.currentRouteUuid)(resources);\n    const isSelected = selectedResource.selectedResourceUuid === properties.currentRouteUuid;\n    const itemOwner = currentResource ? getResource<GroupResource | UserResource>(currentResource.ownerUuid)(resources) : undefined;\n    const isFrozen = (currentResource && resourceIsFrozen(currentResource, resources)) || false;\n\n    return {\n        currentUserUUID: auth.user?.uuid || '',\n        currentResource,\n        isSelected,\n        itemOwner,\n        isFrozen,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    handleCardClick: (uuid: string) => {\n        dispatch<any>(loadDetailsPanel(uuid));\n        dispatch<any>(setSelectedResourceUuid(uuid));\n        dispatch<any>(deselectAllOthers(uuid));\n    },\n});\n\ntype CollectionCardProps = WithStyles<CssRules> & {\n    currentResource: CollectionResource | undefined;\n    isSelected: boolean;\n    currentUserUUID: string;\n    itemOwner: GroupResource | UserResource | undefined;\n    isFrozen: boolean;\n    handleCardClick: (resource: any) => void;\n};\n\nexport const CollectionCard = connect(\n    mapStateToProps,\n    mapDispatchToProps\n)(\n    withStyles(styles)((props: CollectionCardProps) => {\n        const { classes, currentResource, handleCardClick, isSelected, currentUserUUID, itemOwner, isFrozen } = props;\n        if (!currentResource) return null;\n        const { name, uuid } = currentResource;\n\n        const [isWritable, setIsWritable] = useState(false);\n\n        useEffect(() => {\n            const isWritable = checkIsWritable(currentResource, itemOwner, currentUserUUID, isFrozen);\n            setIsWritable(isWritable);\n        }, [currentResource, currentUserUUID, isFrozen]);\n\n        return (\n            <Card\n                className={classes.root}\n                onClick={() => handleCardClick(uuid)}\n                data-cy='collection-details-card'\n            >\n                <Grid\n                    container\n                    wrap='nowrap'\n                    className={classes.cardHeaderContainer}\n                >\n                    <CardHeader\n                        className={classes.cardHeader}\n                        title={\n                            <section className={classes.nameContainer}>\n                                <IllegalNamingWarning name={name} />\n                                <Typography\n                                    variant='h6'\n                                >\n                                    {name}\n                                </Typography>\n                                {!isWritable &&\n                                    <Tooltip title=\"Read-only\">\n                                        <span><ReadOnlyIcon data-cy=\"read-only-icon\" className={classes.readOnlyIcon} /></span>\n                                    </Tooltip>\n                                }\n                            </section>\n                        }\n                    />\n                    {isSelected && <MultiselectToolbar />}\n                </Grid>\n                <DescriptionPreview resource={currentResource} />\n            </Card>\n        );\n    })\n);\n\nconst checkIsWritable = (item: CollectionResource, itemOwner: GroupResource | UserResource | undefined, currentUserUUID: string, isFrozen: boolean): boolean => {\n    const isCurrentVersion = item.currentVersionUuid === item.uuid\n\n    let isWritable = false;\n\n    if (isCurrentVersion) {\n        if (item.ownerUuid === currentUserUUID) {\n            isWritable = true;\n        } else {\n            if (itemOwner) {\n                isWritable = itemOwner.canWrite;\n            }\n        }\n    }\n    if (isWritable) {\n        isWritable = !isFrozen;\n    }\n    return isWritable;\n}\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-card/description-preview.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Dispatch } from 'redux';\nimport { ArvadosTheme, CustomStyleRulesCallback } from 'common/custom-theme';\nimport withStyles, { WithStyles } from '@mui/styles/withStyles';\nimport { Typography, Grid } from '@mui/material';\nimport { descriptionDialogActions } from 'store/description-dialog/description-dialog-actions';\nimport { connect } from 'react-redux';\nimport { ProjectResource } from 'models/project';\nimport { CollectionResource } from 'models/collection';\nimport { WorkflowResource } from 'models/workflow';\nimport { ContainerRequestResource } from 'models/container-request';\n\ntype CssRules =\n    | 'wrapper'\n    | 'preview'\n    | 'overflowButton';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    wrapper: {\n        // Max height 5 lines at line height 1.5\n        // Updates to maxHeight must be mirrored in overflowButton bottom\n        maxHeight: 'calc(0.875rem * 1.5 * 5)',\n        overflow: 'hidden',\n        position: 'relative',\n        // Added bottom margin to match space above title\n        margin: '0 0 8px',\n    },\n    preview: {\n        margin: '0 1rem',\n        // All text small and inline\n        '& :is(h1, h2, h3, h4, h5, h6, p)': {\n            display: 'inline',\n            fontSize: '0.875rem',\n        },\n        // Return line breaks after paragraphs\n        '& p': {\n            '&::after': {\n                content: `\"\"`,\n                display: 'block',\n            },\n        },\n        // Add line breaks before images to avoid pushing text away\n        // Conveniently, the editor wraps images in paragraphs\n        '& p:has(> img)::before': {\n            content: `\"\"`,\n            display: 'block',\n        },\n        // Headers bold\n        '& :is(h1, h2, h3, h4, h5, h6)': {\n            fontWeight: 'bold',\n        },\n        // Header separator - this style doesn't work when nested for some reason\n        '& :is(h1, h2, h3, h4, h5, h6)::after': {\n            fontWeight: 'bold',\n            content: `\" —\"`,\n        },\n    },\n    overflowButton: {\n        cursor: 'pointer',\n        // Avoid taking up more space than necessary\n        lineHeight: 1,\n        // Use contentbox so that vertical text alignment is nice\n        // and to easily add padding to the height\n        boxSizing: 'content-box',\n        // Must use calc to account for margin due to content-box\n        width: 'calc(100% - (0.7rem * 2))',\n        // Start height at font size\n        height: 'calc(0.875rem)',\n        position: 'absolute',\n        // Bottom calc must match wrapper maxHeight\n        bottom: 'calc((100% - calc(0.875rem * 1.5 * 5)) * 10000)',\n        color: theme.palette.primary.main,\n        margin: '0 1rem',\n        // Added padding for overlapping linear gradient\n        // Bottom padding instead of margin prevents covered content from peeking\n        padding: 'calc(0.875rem * 1.5) 0 0.25rem',\n        // Gradient end should match line height\n        background: 'linear-gradient(transparent 0rem, #fff 0.875rem)',\n    },\n});\n\ninterface DescriptionPreviewDispatchProps {\n    openDescriptionDialog: (uuid: string) => void;\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    openDescriptionDialog: (uuid: string) => {\n        dispatch<any>(descriptionDialogActions.openDialog(uuid));\n    },\n});\n\ninterface DescriptionPreviewDataProps {\n    resource: ProjectResource | CollectionResource | WorkflowResource | ContainerRequestResource;\n};\n\ntype DescriptionPreviewProps = WithStyles<CssRules> & DescriptionPreviewDispatchProps & DescriptionPreviewDataProps;\n\nexport const DescriptionPreview = connect(\n    null,\n    mapDispatchToProps\n)(\n    withStyles(styles)((props: DescriptionPreviewProps) => {\n        const { classes, resource } = props;\n\n        return resource.description?.length ? (\n            <Grid className={classes.wrapper}>\n                <Typography\n                    className={classes.preview}\n                    component=\"div\"\n                    //dangerouslySetInnerHTML is ok here only if description is sanitized,\n                    //which it is before it is loaded into the redux store\n                    dangerouslySetInnerHTML={{\n                        __html: resource.description,\n                    }}\n                />\n                <Typography\n                    className={classes.overflowButton}\n                    onClick={() => {\n                        props.openDescriptionDialog(resource.uuid);\n                    }}\n                >\n                    Read full description...\n                </Typography>\n            </Grid>\n        ) : <></>;\n    })\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-card/details-card-root.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect } from 'react';\nimport { Dispatch } from 'redux';\nimport { RootState } from 'store/store';\nimport { connect } from 'react-redux';\nimport { ProjectResource } from 'models/project';\nimport { ResourceKind } from 'models/resource';\nimport { UserResource } from 'models/user';\nimport { CollectionResource } from 'models/collection';\nimport { WorkflowResource } from 'models/workflow';\nimport { ProcessResource } from 'models/process';\nimport { UserCard } from './user-details-card';\nimport { ProjectCard } from './project-details-card';\nimport { getResource } from 'store/resources/resources';\nimport { CollectionCard } from './collection-details-card';\nimport { WorkflowCard } from './workflow-details-card';\nimport { ProcessCard } from './process-details-card';\nimport { setSelectedResourceUuid } from 'store/selected-resource/selected-resource-actions';\n\nconst mapStateToProps = ({ resources, properties, selectedResource }: RootState) => {\n    const currentResource = getResource(properties.currentRouteUuid)(resources);\n    return {\n        currentResource,\n        selectedResourceUuid: selectedResource.selectedResourceUuid,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    setSelectedResourceUuid: (uuid: string) => dispatch<any>(setSelectedResourceUuid(uuid)),\n});\n\ntype DetailsCardProps = {\n    currentResource: ProjectResource | UserResource | CollectionResource | WorkflowResource | ProcessResource | undefined;\n    selectedResourceUuid: string;\n    setSelectedResourceUuid: (uuid: string) => void;\n};\n\nexport const DetailsCardRoot = connect(mapStateToProps, mapDispatchToProps)(({ currentResource, selectedResourceUuid, setSelectedResourceUuid }: DetailsCardProps) => {\n    if (!currentResource) {\n        return null;\n    }\n\n    useEffect(() => {\n        if (selectedResourceUuid !== currentResource.uuid) {\n            setSelectedResourceUuid(currentResource.uuid);\n        }\n    }, [currentResource.uuid]);\n\n    switch (currentResource.kind as string) {\n        case ResourceKind.USER:\n            return <UserCard />;\n        case ResourceKind.PROJECT:\n            return <ProjectCard />;\n        case ResourceKind.COLLECTION:\n            return <CollectionCard />;\n        case ResourceKind.WORKFLOW:\n            return <WorkflowCard />;\n        case ResourceKind.PROCESS:\n            return <ProcessCard />;\n        default:\n            return null;\n    }\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-card/process-details-card.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport classNames from 'classnames';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Card, CardHeader, Typography, Grid, Button } from '@mui/material';\nimport { StartIcon, StopIcon } from 'components/icon/icon';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { connect } from 'react-redux';\nimport { MultiselectToolbar } from 'components/multiselect-toolbar/MultiselectToolbar';\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { loadDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { setSelectedResourceUuid } from 'store/selected-resource/selected-resource-actions';\nimport { deselectAllOthers } from 'store/multiselect/multiselect-actions';\nimport { isProcessCancelable, isProcessRunnable, isProcessResumable, isProcessRunning } from 'store/processes/process';\nimport { ProcessStatus } from 'views-components/data-explorer/renderers';\nimport { openCancelProcesswDialog, resumeOnHoldWorkflow, startWorkflow } from 'store/processes/processes-actions';\nimport { Process } from 'store/processes/process';\nimport { getProcess } from 'store/processes/process';\nimport { PublishedPort } from 'models/container';\nimport { ServiceMenu } from './service-menu';\nimport { DescriptionPreview } from './description-preview';\n\ntype CssRules = 'root' | 'cardHeaderContainer' | 'cardHeader' | 'nameContainer' | 'buttonContainer' | 'runStatusContainer' | 'runStatusContainerWithServiceButton' | 'actionButton' | 'runButton' | 'cancelButton' | 'serviceButton' | 'runningToolbarStyles';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        marginBottom: '1rem',\n        flex: '0 0 auto',\n        padding: 0,\n        minHeight: '3rem',\n    },\n    nameContainer: {\n        display: 'flex',\n        alignItems: 'center',\n        minHeight: '2.7rem',\n        gap: '2rem',\n    },\n    cardHeaderContainer: {\n        width: '100%',\n        display: 'flex',\n        flexDirection: 'row',\n        alignItems: 'flex-start',\n        justifyContent: 'space-between',\n    },\n    cardHeader: {\n        minWidth: '30rem',\n        padding: '0.2rem 0.4rem 0.2rem 1rem',\n        '& > div': {\n            overflow: \"hidden\",\n        },\n    },\n    buttonContainer: {\n        overflow: 'hidden',\n        display: 'flex',\n        flexDirection: 'column',\n        alignItems: 'flex-start',\n        rowGap: '5px',\n        flexWrap: 'wrap',\n        flexGrow: 0,\n        flexBasis: '200px',\n        minWidth: '200px',\n    },\n    runStatusContainer: {\n        width: '100%',\n        display: 'flex',\n        columnGap: '5px',\n\n    },\n    // Only active when service button is shown\n    runStatusContainerWithServiceButton: {\n        '& > *': {\n            // Allow run/cancel status to share space\n            flexGrow: 1,\n            flexShrink: 1,\n        },\n    },\n    actionButton: {\n        padding: \"0px 5px 0 0\",\n        fontSize: '0.78rem',\n        // Set icon size for all buttons\n        '& svg': {\n            fontSize: '22px',\n        },\n        whiteSpace: 'nowrap',\n    },\n    runButton: {\n        flexShrink: 0,\n    },\n    cancelButton: {\n        flexShrink: 0,\n        color: theme.palette.common.white,\n        backgroundColor: theme.customs.colors.red900,\n        '&:hover': {\n            backgroundColor: theme.customs.colors.red900,\n        },\n    },\n    serviceButton: {\n        width: '100%',\n        // Add padding to account for no icon\n        paddingLeft: '5px',\n        justifyContent: 'center',\n        '& span': {\n            // Ellipse button text\n            overflow: 'hidden',\n            textOverflow: 'ellipsis',\n        },\n        '& .MuiButton-endIcon': {\n            flexShrink: 0,\n        },\n    },\n    runningToolbarStyles: {\n        marginTop: '3px',\n    },\n});\n\nconst mapStateToProps = ({ auth, selectedResource, resources, properties }: RootState) => {\n    const currentResource = getProcess(properties.currentRouteUuid)(resources);\n    const isSelected = selectedResource.selectedResourceUuid === properties.currentRouteUuid;\n\n    return {\n        isAdmin: auth.user?.isAdmin,\n        currentResource,\n        isSelected,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    handleCardClick: (uuid: string) => {\n        dispatch<any>(loadDetailsPanel(uuid));\n        dispatch<any>(setSelectedResourceUuid(uuid));\n        dispatch<any>(deselectAllOthers(uuid));\n    },\n    cancelProcess: (uuid: string) => dispatch<any>(openCancelProcesswDialog(uuid)),\n    startProcess: (uuid: string) => dispatch<any>(startWorkflow(uuid)),\n    resumeOnHoldWorkflow: (uuid: string) => dispatch<any>(resumeOnHoldWorkflow(uuid)),\n});\n\ntype ProcessCardProps = WithStyles<CssRules> & {\n    currentResource: Process;\n    isSelected: boolean;\n    handleCardClick: (resource: any) => void;\n    cancelProcess: (uuid: string) => void;\n    startProcess: (uuid: string) => void;\n    resumeOnHoldWorkflow: (uuid: string) => void;\n};\n\nexport const ProcessCard = connect(\n    mapStateToProps,\n    mapDispatchToProps\n)(\n    withStyles(styles)((props: ProcessCardProps) => {\n        const { classes, currentResource, handleCardClick, isSelected , cancelProcess, startProcess, resumeOnHoldWorkflow } = props;\n        const { name, uuid } = currentResource.containerRequest;\n        let publishedPorts: PublishedPort[] = [];\n\n        if (currentResource.container && currentResource.container.publishedPorts) {\n            const ports = currentResource.container.publishedPorts;\n            publishedPorts = Object.keys(ports).map((port: string) => (ports[port]));\n        }\n\n        const showServiceMenu: boolean = isProcessRunning(currentResource) && !!publishedPorts.length;\n\n        let runAction;\n        if (isProcessRunnable(currentResource)) {\n            runAction = startProcess;\n        } else if (isProcessResumable(currentResource)) {\n            runAction = resumeOnHoldWorkflow;\n        }\n\n        return (\n            <Card\n                className={classes.root}\n                onClick={() => handleCardClick(uuid)}\n                data-cy='process-details-card'\n            >\n                <Grid\n                    container\n                    wrap='nowrap'\n                    className={classes.cardHeaderContainer}\n                >\n                    <CardHeader\n                        className={classes.cardHeader}\n                        title={\n                            <section className={classes.nameContainer}>\n                                <Typography\n                                    variant='h6'\n                                >\n                                    {name}\n                                </Typography>\n                                <section className={classes.buttonContainer}>\n                                    <div className={classNames(classes.runStatusContainer, showServiceMenu ? classes.runStatusContainerWithServiceButton : undefined)}>\n                                        {runAction !== undefined &&\n                                            <Button\n                                                data-cy=\"process-run-button\"\n                                                variant=\"contained\"\n                                                size=\"small\"\n                                                color=\"primary\"\n                                                className={classNames(classes.actionButton, classes.runButton)}\n                                                onClick={() => runAction && runAction(currentResource.containerRequest.uuid)}>\n                                                <StartIcon />\n                                                Run\n                                            </Button>}\n                                        {isProcessCancelable(currentResource) &&\n                                            <Button\n                                                data-cy=\"process-cancel-button\"\n                                                variant=\"contained\"\n                                                size=\"small\"\n                                                color=\"primary\"\n                                                className={classNames(classes.actionButton, classes.cancelButton)}\n                                                onClick={() => cancelProcess(currentResource.containerRequest.uuid)}>\n                                                <StopIcon />\n                                                Cancel\n                                            </Button>}\n                                        <ProcessStatus uuid={currentResource.containerRequest.uuid} />\n                                    </div>\n                                    {showServiceMenu && <ServiceMenu buttonClass={classNames(classes.actionButton, classes.serviceButton)} services={publishedPorts} />}\n                                </section>\n                            </section>\n                        }\n                    />\n                    {isSelected && <MultiselectToolbar toolbarClass={showServiceMenu ? classes.runningToolbarStyles : undefined} />}\n                </Grid>\n                <DescriptionPreview resource={currentResource.containerRequest} />\n            </Card>\n        );\n    })\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-card/project-details-card.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Card, CardHeader, Typography, Tooltip, Grid } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { RootState } from 'store/store';\nimport { connect } from 'react-redux';\nimport { getResource } from 'store/resources/resources';\nimport { ProjectResource } from 'models/project';\nimport { FavoriteStar, PublicFavoriteStar } from 'views-components/favorite-star/favorite-star';\nimport { FreezeIcon } from 'components/icon/icon';\nimport { Resource } from 'models/resource';\nimport { Dispatch } from 'redux';\nimport { loadDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { MultiselectToolbar } from 'components/multiselect-toolbar/MultiselectToolbar';\nimport { setSelectedResourceUuid } from 'store/selected-resource/selected-resource-actions';\nimport { deselectAllOthers } from 'store/multiselect/multiselect-actions';\nimport { DescriptionPreview } from './description-preview';\n\ntype CssRules =\n    | 'root'\n    | 'cardHeaderContainer'\n    | 'cardHeader'\n    | 'nameSection'\n    | 'namePlate'\n    | 'faveIcon'\n    | 'frozenIcon'\n    | 'toolbarStyles';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        marginBottom: '1rem',\n        flex: '0 0 auto',\n        padding: 0,\n        minHeight: '3rem',\n    },\n    cardHeaderContainer: {\n        width: '100%',\n        display: 'flex',\n        flexDirection: 'row',\n        alignItems: 'flex-start',\n        justifyContent: 'space-between',\n    },\n    cardHeader: {\n        minWidth: '30rem',\n        padding: '0.2rem 0.4rem 0.2rem 1rem',\n    },\n    nameSection: {\n        display: 'flex',\n        flexDirection: 'row',\n        alignItems: 'center',\n    },\n    namePlate: {\n        display: 'flex',\n        flexDirection: 'row',\n        alignItems: 'center',\n        margin: 0,\n        minHeight: '2.7rem',\n    },\n    faveIcon: {\n        fontSize: '0.8rem',\n        margin: 'auto 0 0.2rem 0.3rem',\n        color: theme.palette.text.primary,\n    },\n    frozenIcon: {\n        fontSize: '0.5rem',\n        marginLeft: '0.3rem',\n        height: '1rem',\n        color: theme.palette.text.primary,\n    },\n    toolbarStyles: {\n        marginRight: '-0.5rem',\n    },\n});\n\nconst mapStateToProps = ({ auth, selectedResource, resources, properties }: RootState) => {\n    const currentResource = getResource(properties.currentRouteUuid)(resources);\n    const frozenByUser = currentResource && getResource((currentResource as ProjectResource).frozenByUuid as string)(resources);\n    const frozenByFullName = frozenByUser && (frozenByUser as Resource & { fullName: string }).fullName;\n    const isSelected = selectedResource.selectedResourceUuid === properties.currentRouteUuid;\n\n    return {\n        isAdmin: auth.user?.isAdmin,\n        currentResource,\n        frozenByFullName,\n        isSelected,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    handleCardClick: (uuid: string) => {\n        dispatch<any>(loadDetailsPanel(uuid));\n        dispatch<any>(setSelectedResourceUuid(uuid));\n        dispatch<any>(deselectAllOthers(uuid));\n    },\n});\n\ntype ProjectCardProps = WithStyles<CssRules> & {\n    currentResource: ProjectResource;\n    frozenByFullName: string | undefined;\n    isAdmin: boolean;\n    isSelected: boolean;\n    handleCardClick: (resource: any) => void;\n};\n\nexport const ProjectCard = connect(\n    mapStateToProps,\n    mapDispatchToProps\n)(\n    withStyles(styles)((props: ProjectCardProps) => {\n        const { classes, currentResource, frozenByFullName, handleCardClick, isSelected } = props;\n        const { name, uuid } = currentResource as ProjectResource;\n\n        return (\n            <Card\n                className={classes.root}\n                onClick={() => handleCardClick(uuid)}\n                data-cy='project-details-card'\n            >\n                <Grid\n                    container\n                    wrap='nowrap'\n                    className={classes.cardHeaderContainer}\n                >\n                    <CardHeader\n                        className={classes.cardHeader}\n                        title={\n                            <section className={classes.nameSection}>\n                                <section className={classes.namePlate}>\n                                    <Typography\n                                        variant='h6'\n                                        style={{ marginRight: '1rem' }}\n                                    >\n                                        {name}\n                                    <FavoriteStar\n                                        className={classes.faveIcon}\n                                        resourceUuid={currentResource.uuid}\n                                        />\n                                    <PublicFavoriteStar\n                                        className={classes.faveIcon}\n                                        resourceUuid={currentResource.uuid}\n                                        />\n                                    {!!frozenByFullName && (\n                                        <Tooltip\n                                        className={classes.frozenIcon}\n                                        disableFocusListener\n                                        title={<span>Project was frozen by {frozenByFullName}</span>}\n                                        >\n                                            <span><FreezeIcon style={{ fontSize: '1.25rem' }} /></span>\n                                        </Tooltip>\n                                    )}\n                                    </Typography>\n                                </section>\n                            </section>\n                        }\n                    />\n                    {isSelected && <MultiselectToolbar toolbarClass={classes.toolbarStyles} />}\n                </Grid>\n                <DescriptionPreview resource={currentResource} />\n            </Card>\n        );\n    })\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-card/service-menu.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Provider } from \"react-redux\";\nimport { combineReducers, createStore } from \"redux\";\nimport { ServiceMenu } from './service-menu';\n\nconst apiToken = \"v2/xxxxx-gj3su-000000000000000/00000000000000000000000000000000000000000000000000\";\n\ndescribe('ServiceMenu', () => {\n    let store;\n\n    beforeEach(() => {\n        const initialAuthState = { apiToken: apiToken };\n\n        store = createStore(combineReducers({\n            auth: (state = initialAuthState, action) => state,\n        }));\n\n        // Stub the global window.open\n        cy.window().then((win) => {\n            cy.stub(win, 'open').as('open');\n        });\n    });\n\n    it(\"displays single service\", () => {\n        const service = {\n            access: \"public\",\n            label: \"My Service\",\n            initial_url: \"http://example.com/\",\n        };\n\n        cy.mount(\n            <Provider store={store}>\n                <ServiceMenu\n                    buttonClass=\"serviceButton\"\n                    services={[service]}\n                />\n            </Provider>\n        );\n\n        // Verify button has correct text\n        cy.get('.serviceButton').should('have.text', `Connect to ${service.label}`);\n\n        // Click button\n        cy.get('.serviceButton').click();\n        // Verify correct URL opened\n        cy.get('@open').should(\"have.been.calledWith\", service.initial_url);\n    });\n\n    it(\"displays multiple services\", () => {\n        const services = [{\n            access: \"public\",\n            label: \"Foo Service\",\n            initial_url: \"http://example.com/foo\",\n            expected_url: \"http://example.com/foo\",\n        }, {\n            access: \"private\",\n            label: \"Bar Service\",\n            initial_url: \"http://example.com/bar\",\n            expected_url: `http://example.com/bar?arvados_api_token=${apiToken}`,\n        }, {\n            access: \"private\",\n            label: \"A Secret Third Service\",\n            initial_url: \"http://example.com/bar?existing=something\",\n            expected_url: `http://example.com/bar?arvados_api_token=${apiToken}&existing=something`,\n        }];\n\n        cy.mount(\n            <Provider store={store}>\n                <ServiceMenu\n                    buttonClass=\"serviceButton\"\n                    services={services}\n                />\n            </Provider>\n        );\n\n        // Verify button has correct text\n        cy.get('.serviceButton').should('have.text', \"Connect to service\");\n\n        // Open menu and verify service list contains the correct items\n        cy.get('.serviceButton').click();\n        cy.get('#service-menu ul li')\n            .should('have.length', services.length)\n            .each((el, i) => {\n                // Click on each service and verify opened url\n                cy.wrap(el)\n                    .should('have.text', services[i].label)\n                    .click();\n                cy.get('@open').should(\"have.been.calledWith\", services[i].expected_url);\n            });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-card/service-menu.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { RootState } from 'store/store';\nimport { connect } from 'react-redux';\nimport { Dispatch } from 'redux';\nimport { Button, Menu, MenuItem, Tooltip } from '@mui/material';\nimport { ExpandIcon } from 'components/icon/icon';\nimport { PublishedPort } from 'models/container';\nimport { showErrorSnackbar } from 'store/snackbar/snackbar-actions';\nimport { injectTokenParam } from 'common/url';\n\nconst mapStateToProps = ({ auth }: RootState) => ({\n    userToken: auth.apiToken,\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    showErrorSnackbar: (message: string) => dispatch<any>(showErrorSnackbar(message)),\n});\n\ntype ServiceMenuProps = {\n    services: PublishedPort[];\n    buttonClass?: string;\n    userToken: string | undefined;\n    showErrorSnackbar: (message: string) => void;\n};\n\nexport const ServiceMenu = connect(\n    mapStateToProps,\n    mapDispatchToProps\n)(({ services, buttonClass, userToken, showErrorSnackbar }: ServiceMenuProps) => {\n    const [anchorEl, setAnchorEl] = React.useState<null | HTMLElement>(null);\n    const open = Boolean(anchorEl);\n    const handleOpen = (event: React.MouseEvent<HTMLButtonElement>) => {\n        setAnchorEl(event.currentTarget);\n    };\n    const handleClose = () => {\n        setAnchorEl(null);\n    };\n    const handleClick = (service: PublishedPort) => async () => {\n        handleClose();\n\n        if (!service.initial_url) {\n            showErrorSnackbar(\"Service URL not set\");\n            return;\n        }\n\n        if (service.access === 'public') {\n            // Open public links as-is\n            window.open(service.initial_url, \"_blank\", \"noopener\");\n        } else if (service.access === 'private') {\n            // Open private links with user token\n            if (userToken) {\n                try {\n                    const url = await injectTokenParam(service.initial_url, userToken);\n                    window.open(url, \"_blank\", \"noopener\");\n                } catch(e) {\n                    showErrorSnackbar(\"Failed to open service: \" + e.message);\n                }\n            } else {\n                showErrorSnackbar(\"User token not available\");\n            }\n        } else {\n            showErrorSnackbar(\"Published port access value not valid\");\n        }\n    };\n\n    if (services.length) {\n        if (services.length === 1) {\n            const service = services[0];\n\n            return (\n                <Tooltip arrow disableInteractive title={`Connect to ${service.label || \"service\"}`}>\n                    <Button\n                        className={buttonClass}\n                        variant=\"contained\"\n                        size=\"small\"\n                        color=\"primary\"\n                        data-cy=\"service-button\"\n                        id=\"service-button\"\n                        onClick={handleClick(service)}\n                    >\n                        <span>Connect to {service.label || \"service\"}</span>\n                    </Button>\n                </Tooltip>\n            );\n        } else if (services.length > 1) {\n            return <>\n                <Button\n                    className={buttonClass}\n                    variant=\"contained\"\n                    size=\"small\"\n                    color=\"primary\"\n                    data-cy=\"service-button\"\n                    id=\"service-button\"\n                    aria-controls={open ? 'service-menu' : undefined}\n                    aria-haspopup=\"true\"\n                    aria-expanded={open ? 'true' : undefined}\n                    onClick={handleOpen}\n                    endIcon={<ExpandIcon />}\n                >\n                    <span>Connect to service</span>\n                </Button>\n                <Menu\n                    id=\"service-menu\"\n                    anchorEl={anchorEl}\n                    open={open}\n                    onClose={handleClose}\n                    MenuListProps={{\n                        'aria-labelledby': 'service-button',\n                    }}\n                >\n                    {services.map((service: PublishedPort) => (\n                        <MenuItem onClick={handleClick(service)}>\n                            <span>{service.label}</span>\n                        </MenuItem>\n                    ))}\n                </Menu>\n            </>;\n        }\n    }\n\n    // Return empty fragment when no services\n    return <></>;\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-card/user-details-card.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Card, CardHeader, Typography, Grid } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { UserResource } from 'models/user';\nimport { connect } from 'react-redux';\nimport { getResource } from 'store/resources/resources';\nimport { UserResourceAccountStatus } from 'views-components/data-explorer/renderers';\nimport { MultiselectToolbar } from 'components/multiselect-toolbar/MultiselectToolbar';\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { loadDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { setSelectedResourceUuid } from 'store/selected-resource/selected-resource-actions';\nimport { deselectAllOthers } from 'store/multiselect/multiselect-actions';\nimport { Resource } from 'models/resource';\nimport { ProjectResource } from 'models/project';\n\ntype CssRules = 'root' | 'cardHeaderContainer' | 'cardHeader' | 'userNameContainer' | 'accountStatusSection';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        marginBottom: '1rem',\n        flex: '0 0 auto',\n        padding: 0,\n        minHeight: '3rem',\n    },\n    userNameContainer: {\n        display: 'flex',\n        alignItems: 'center',\n        minHeight: '2.7rem',\n    },\n    cardHeaderContainer: {\n        width: '100%',\n        display: 'flex',\n        flexDirection: 'row',\n        alignItems: 'flex-start',\n        justifyContent: 'space-between',\n    },\n    cardHeader: {\n        minWidth: '30rem',\n        padding: '0.2rem 0.4rem 0.2rem 1rem',\n    },\n    accountStatusSection: {\n        display: 'flex',\n        flexDirection: 'row',\n        alignItems: 'center',\n        paddingLeft: '1rem',\n    },\n});\n\nconst mapStateToProps = ({ auth, selectedResource, resources, properties }: RootState) => {\n    const currentResource = getResource(properties.currentRouteUuid)(resources);\n    const frozenByUser = currentResource && getResource((currentResource as ProjectResource).frozenByUuid as string)(resources);\n    const frozenByFullName = frozenByUser && (frozenByUser as Resource & { fullName: string }).fullName;\n    const isSelected = selectedResource.selectedResourceUuid === properties.currentRouteUuid;\n\n    return {\n        isAdmin: auth.user?.isAdmin,\n        currentResource,\n        frozenByFullName,\n        isSelected,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    handleCardClick: (uuid: string) => {\n        dispatch<any>(loadDetailsPanel(uuid));\n        dispatch<any>(setSelectedResourceUuid(uuid));\n        dispatch<any>(deselectAllOthers(uuid));\n    },\n});\n\ntype UserCardProps = WithStyles<CssRules> & {\n    currentResource: UserResource;\n    isAdmin: boolean;\n    isSelected: boolean;\n    handleCardClick: (resource: any) => void;\n};\n\nexport const UserCard = connect(\n    mapStateToProps,\n    mapDispatchToProps\n)(\n    withStyles(styles)((props: UserCardProps) => {\n        const { classes, currentResource, handleCardClick, isSelected } = props;\n        const { fullName, uuid } = currentResource as UserResource & { fullName: string };\n\n        return (\n            <Card\n                className={classes.root}\n                onClick={() => handleCardClick(uuid)}\n                data-cy='user-details-card'\n            >\n                <Grid\n                    container\n                    wrap='nowrap'\n                    className={classes.cardHeaderContainer}\n                >\n                    <CardHeader\n                        className={classes.cardHeader}\n                        title={\n                            <section className={classes.userNameContainer}>\n                                <Typography\n                                    variant='h6'\n                                >\n                                    {fullName}\n                                </Typography>\n                                <section className={classes.accountStatusSection}>\n                                    {!currentResource.isActive && (\n                                        <Typography>\n                                            <UserResourceAccountStatus uuid={uuid} />\n                                        </Typography>\n                                    )}\n                                </section>\n                            </section>\n                        }\n                    />\n                    {isSelected && <MultiselectToolbar />}\n                </Grid>\n            </Card>\n        );\n    })\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-card/workflow-details-card.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Card, CardHeader, Typography, Grid } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { connect } from 'react-redux';\nimport { getResource } from 'store/resources/resources';\nimport { MultiselectToolbar } from 'components/multiselect-toolbar/MultiselectToolbar';\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport { loadDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { setSelectedResourceUuid } from 'store/selected-resource/selected-resource-actions';\nimport { deselectAllOthers } from 'store/multiselect/multiselect-actions';\nimport { WorkflowResource } from 'models/workflow';\nimport { DescriptionPreview } from './description-preview';\n\ntype CssRules = 'root' | 'cardHeaderContainer' | 'cardHeader' | 'nameContainer';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        width: '100%',\n        marginBottom: '1rem',\n        flex: '0 0 auto',\n        padding: 0,\n        minHeight: '3rem',\n    },\n    nameContainer: {\n        display: 'flex',\n        alignItems: 'center',\n        minHeight: '2.7rem',\n    },\n    cardHeaderContainer: {\n        width: '100%',\n        display: 'flex',\n        flexDirection: 'row',\n        alignItems: 'flex-start',\n        justifyContent: 'space-between',\n    },\n    cardHeader: {\n        minWidth: '30rem',\n        padding: '0.2rem 0.4rem 0.2rem 1rem',\n    },\n});\n\nconst mapStateToProps = ({ auth, selectedResource, resources, properties }: RootState) => {\n    const currentResource = getResource(properties.currentRouteUuid)(resources);\n    const isSelected = selectedResource.selectedResourceUuid === properties.currentRouteUuid;\n\n    return {\n        isAdmin: auth.user?.isAdmin,\n        currentResource,\n        isSelected,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    handleCardClick: (uuid: string) => {\n        dispatch<any>(loadDetailsPanel(uuid));\n        dispatch<any>(setSelectedResourceUuid(uuid));\n        dispatch<any>(deselectAllOthers(uuid));\n    },\n});\n\ntype WorkflowCardProps = WithStyles<CssRules> & {\n    currentResource: WorkflowResource;\n    isSelected: boolean;\n    handleCardClick: (resource: any) => void;\n};\n\nexport const WorkflowCard = connect(\n    mapStateToProps,\n    mapDispatchToProps\n)(\n    withStyles(styles)((props: WorkflowCardProps) => {\n        const { classes, currentResource, handleCardClick, isSelected } = props;\n        const { name, uuid } = currentResource;\n\n        return (\n            <Card\n                className={classes.root}\n                onClick={() => handleCardClick(uuid)}\n                data-cy='workflow-details-card'\n            >\n                <Grid\n                    container\n                    wrap='nowrap'\n                    className={classes.cardHeaderContainer}\n                >\n                    <CardHeader\n                        className={classes.cardHeader}\n                        title={\n                            <section className={classes.nameContainer}>\n                                <Typography\n                                    variant='h6'\n                                >\n                                    {name}\n                                </Typography>\n                            </section>\n                        }\n                    />\n                    {isSelected && <MultiselectToolbar />}\n                </Grid>\n                <DescriptionPreview resource={currentResource} />\n            </Card>\n        );\n    })\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-panel/collection-details.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CollectionIcon, RenameIcon } from 'components/icon/icon';\nimport { CollectionResource } from 'models/collection';\nimport { DetailsData } from \"./details-data\";\nimport { RootState } from 'store/store';\nimport { filterResources, getResource, ResourcesState } from 'store/resources/resources';\nimport { connect } from 'react-redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Button, Grid, ListItem, Typography, Tooltip, Link as ButtonLink } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { formatDateTime, formatFileSize } from 'common/formatters';\nimport { UserNameFromID } from '../data-explorer/renderers';\nimport { Dispatch } from 'redux';\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport { openContextMenuAndSelect } from 'store/context-menu/context-menu-actions';\nimport { openCollectionUpdateDialog } from 'store/collections/collection-update-actions';\nimport { resourceIsFrozen } from 'common/frozen-resources';\nimport { resourceToMenuKind } from 'common/resource-to-menu-kind';\nimport { getPropertyChips } from 'views-components/property-chips/get-property-chips';\nimport { ResourceWithName, ResponsiblePerson } from 'views-components/data-explorer/renderers';\nimport { DetailsAttribute } from 'components/details-attribute/details-attribute';\n\nexport type CssRules = 'versionBrowserHeader'\n    | 'versionBrowserItem'\n    | 'versionBrowserField'\n    | 'editButton'\n    | 'editIcon'\n    | 'tag';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    versionBrowserHeader: {\n        textAlign: 'center',\n        fontWeight: 'bold',\n    },\n    versionBrowserItem: {\n        flexWrap: 'wrap',\n    },\n    versionBrowserField: {\n        textAlign: 'center',\n    },\n    editIcon: {\n        paddingRight: theme.spacing(0.5),\n        fontSize: '1.125rem',\n    },\n    editButton: {\n        boxShadow: 'none',\n        padding: '2px 10px 2px 5px',\n        fontSize: '0.75rem'\n    },\n    tag: {\n        marginRight: theme.spacing(0.5),\n        marginBottom: theme.spacing(0.5)\n    },\n});\n\nexport class CollectionDetails extends DetailsData<CollectionResource> {\n\n    getIcon(className?: string) {\n        return <CollectionIcon className={className} />;\n    }\n\n    getTabLabels() {\n        return ['Details', 'Versions'];\n    }\n\n    getDetails({tabNr}) {\n        switch (tabNr) {\n            case 0:\n                return this.getCollectionInfo();\n            case 1:\n                return this.getVersionBrowser();\n            default:\n                return <div />;\n        }\n    }\n\n    private getCollectionInfo() {\n        return <CollectionInfo />;\n    }\n\n    private getVersionBrowser() {\n        return <CollectionVersionBrowser />;\n    }\n}\n\ninterface CollectionInfoDataProps {\n    resources: ResourcesState;\n    currentCollection: CollectionResource | undefined;\n}\n\ninterface CollectionInfoDispatchProps {\n    editCollection: (collection: CollectionResource | undefined) => void;\n}\n\nconst ciMapStateToProps = (state: RootState): CollectionInfoDataProps => {\n    return {\n        resources: state.resources,\n        currentCollection: getResource<CollectionResource>(state.detailsPanel.resourceUuid)(state.resources),\n    };\n};\n\nconst ciMapDispatchToProps = (dispatch: Dispatch): CollectionInfoDispatchProps => ({\n    editCollection: (collection: CollectionResource) =>\n        dispatch<any>(openCollectionUpdateDialog({\n            uuid: collection.uuid,\n            name: collection.name,\n            description: collection.description,\n            properties: collection.properties,\n            storageClassesDesired: collection.storageClassesDesired,\n        })),\n});\n\ntype CollectionInfoProps = CollectionInfoDataProps & CollectionInfoDispatchProps & WithStyles<CssRules>;\n\nconst CollectionInfo = withStyles(styles)(\n    connect(ciMapStateToProps, ciMapDispatchToProps)(\n        ({ currentCollection, resources, editCollection, classes }: CollectionInfoProps) =>\n            currentCollection !== undefined\n                ? <div>\n                    <Button\n                        disabled={resourceIsFrozen(currentCollection, resources)}\n                        className={classes.editButton} variant='contained'\n                        data-cy='details-panel-edit-btn' color='primary' size='small'\n                        onClick={() => editCollection(currentCollection)}>\n                        <RenameIcon className={classes.editIcon} /> Edit\n                    </Button>\n                    <CollectionDetailsAttributes classes={classes} twoCol={false} item={currentCollection} />\n                </div>\n                : <div />\n    )\n);\n\ninterface CollectionVersionBrowserProps {\n    currentCollection: CollectionResource | undefined;\n    versions: CollectionResource[];\n}\n\ninterface CollectionVersionBrowserDispatchProps {\n    showVersion: (c: CollectionResource) => void;\n    handleContextMenu: (event: React.MouseEvent<HTMLElement>, collection: CollectionResource) => void;\n}\n\nconst vbMapStateToProps = (state: RootState): CollectionVersionBrowserProps => {\n    const currentCollection = getResource<CollectionResource>(state.detailsPanel.resourceUuid)(state.resources);\n    const versions = (currentCollection\n        && filterResources(rsc =>\n            (rsc as CollectionResource).currentVersionUuid === currentCollection.currentVersionUuid)(state.resources)\n                .sort((a: CollectionResource, b: CollectionResource) => b.version - a.version) as CollectionResource[])\n        || [];\n    return { currentCollection, versions };\n};\n\nconst vbMapDispatchToProps = () =>\n    (dispatch: Dispatch): CollectionVersionBrowserDispatchProps => ({\n        showVersion: (collection) => dispatch<any>(navigateTo(collection.uuid)),\n        handleContextMenu: (event: React.MouseEvent<HTMLElement>, collection: CollectionResource) => {\n            const menuKind = dispatch<any>(resourceToMenuKind(collection.uuid));\n            if (collection && menuKind) {\n                dispatch<any>(openContextMenuAndSelect(event, {\n                    name: collection.name,\n                    uuid: collection.uuid,\n                    description: collection.description,\n                    storageClassesDesired: collection.storageClassesDesired,\n                    ownerUuid: collection.ownerUuid,\n                    isTrashed: collection.isTrashed,\n                    kind: collection.kind,\n                    menuKind\n                }));\n            }\n        },\n    });\n\nconst CollectionVersionBrowser = withStyles(styles)(\n    connect(vbMapStateToProps, vbMapDispatchToProps)(\n        ({ currentCollection, versions, showVersion, handleContextMenu, classes }: CollectionVersionBrowserProps & CollectionVersionBrowserDispatchProps & WithStyles<CssRules>) => {\n            return <div data-cy=\"collection-version-browser\">\n                <Grid container>\n                    <Grid item xs={2}>\n                        <Typography variant=\"caption\" className={classes.versionBrowserHeader}>\n                            Nr\n                        </Typography>\n                    </Grid>\n                    <Grid item xs={4}>\n                        <Typography variant=\"caption\" className={classes.versionBrowserHeader}>\n                            Size\n                        </Typography>\n                    </Grid>\n                    <Grid item xs={6}>\n                        <Typography variant=\"caption\" className={classes.versionBrowserHeader}>\n                            Date\n                        </Typography>\n                    </Grid>\n                { versions.map(item => {\n                    const isSelectedVersion = !!(currentCollection && currentCollection.uuid === item.uuid);\n                    return (\n                        <ListItem button style={{padding: '4px'}}\n                            data-cy={`collection-version-browser-select-${item.version}`}\n                            key={item.version}\n                            onClick={e => showVersion(item)}\n                            onContextMenu={event => handleContextMenu(event, item)}\n                            selected={isSelectedVersion}\n                            className={classes.versionBrowserItem}>\n                            <Grid item xs={2}>\n                                <Typography variant=\"caption\" className={classes.versionBrowserField}>\n                                    {item.version}\n                                </Typography>\n                            </Grid>\n                            <Grid item xs={4}>\n                                <Typography variant=\"caption\" className={classes.versionBrowserField}>\n                                    {formatFileSize(item.fileSizeTotal)}\n                                </Typography>\n                            </Grid>\n                            <Grid item xs={6}>\n                                <Typography variant=\"caption\" className={classes.versionBrowserField}>\n                                    {formatDateTime(item.modifiedAt)}\n                                </Typography>\n                            </Grid>\n                            <Grid item xs={12}>\n                                <Typography variant=\"caption\" className={classes.versionBrowserField}>\n                                    Modified by: <UserNameFromID uuid={item.modifiedByUserUuid} />\n                                </Typography>\n                            </Grid>\n                        </ListItem>\n                    );\n                })}\n                </Grid>\n            </div>;\n        }));\n\ninterface CollectionDetailsProps {\n    item: CollectionResource;\n    classes?: any;\n    twoCol?: boolean;\n    showVersionBrowser?: () => void;\n}\n\nexport const CollectionDetailsAttributes = (props: CollectionDetailsProps) => {\n    const item = props.item;\n    const classes = props.classes || { label: '', value: '', button: '', tag: '' };\n    const isOldVersion = item && item.currentVersionUuid !== item.uuid;\n    const mdSize = props.twoCol ? 6 : 12;\n    const showVersionBrowser = props.showVersionBrowser;\n    const responsiblePersonRef = React.useRef(null);\n    return <Grid container>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label={isOldVersion ? \"This version's UUID\" : \"Collection UUID\"}\n                linkToUuid={item.uuid} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label={isOldVersion ? \"This version's PDH\" : \"Portable data hash\"}\n                linkToUuid={item.portableDataHash} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Owner' linkToUuid={item.ownerUuid}\n                uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n        </Grid>\n        <div data-cy=\"responsible-person-wrapper\" ref={responsiblePersonRef}>\n            <Grid item xs={12} md={12}>\n                <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                    label='Responsible person' linkToUuid={item.ownerUuid}\n                    uuidEnhancer={(uuid: string) => <ResponsiblePerson uuid={item.uuid} parentRef={responsiblePersonRef.current} />} />\n            </Grid>\n        </div>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Head version'\n                value={isOldVersion ? undefined : 'this one'}\n                linkToUuid={isOldVersion ? item.currentVersionUuid : undefined} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute\n                classLabel={classes.label} classValue={classes.value}\n                label='Version number'\n                value={showVersionBrowser !== undefined\n                    ? <Tooltip title=\"Open version browser\"><ButtonLink underline='none' className={classes.button} onClick={() => showVersionBrowser()}>\n                        {<span data-cy='collection-version-number'>{item.version}</span>}\n                    </ButtonLink></Tooltip>\n                    : item.version\n                }\n            />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute label='Created at' value={formatDateTime(item.createdAt)} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute label='Last modified' value={formatDateTime(item.modifiedAt)} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Number of files' value={<span data-cy='collection-file-count'>{item.fileCount}</span>} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Content size' value={formatFileSize(item.fileSizeTotal)} />\n        </Grid>\n        <Grid item xs={12} md={mdSize}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Storage classes' value={item.storageClassesDesired ? item.storageClassesDesired.join(', ') : [\"default\"]} />\n        </Grid>\n\n        {/*\n            NOTE: The property list should be kept at the bottom, because it spans\n            the entire available width, without regards of the twoCol prop.\n          */}\n        <Grid item xs={12} md={12}>\n            <DetailsAttribute classLabel={classes.label} classValue={classes.value}\n                label='Properties' />\n                {getPropertyChips(item, classes)}\n        </Grid>\n    </Grid>;\n};\n\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-panel/details-data.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DetailsResource } from \"models/details\";\n\ninterface GetDetailsParams {\n  tabNr?: number\n  showPreview?: boolean\n}\n\nexport abstract class DetailsData<T extends DetailsResource = DetailsResource> {\n    constructor(protected item: T) { }\n\n    getTitle(): string {\n        return this.item.name || 'Projects';\n    }\n\n    getTabLabels(): string[] {\n        return ['Details'];\n    }\n\n    abstract getIcon(className?: string): React.ReactElement<any>;\n    abstract getDetails({tabNr, showPreview}: GetDetailsParams): React.ReactElement<any>;\n}\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-panel/details-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { IconButton, Tabs, Tab, Typography, Grid, Tooltip } from '@mui/material';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { Transition } from 'react-transition-group';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { CloseIcon } from 'components/icon/icon';\nimport { EmptyResource } from 'models/empty';\nimport { Dispatch } from \"redux\";\nimport { ResourceKind, isResourceResource } from \"models/resource\";\nimport { ProjectDetails } from \"./project-details\";\nimport { RootProjectDetails } from './root-project-details';\nimport { CollectionDetails } from \"./collection-details\";\nimport { ProcessDetails } from \"./process-details\";\nimport { EmptyDetails } from \"./empty-details\";\nimport { WorkflowDetails } from \"./workflow-details\";\nimport { DetailsData } from \"./details-data\";\nimport { DetailsResource } from \"models/details\";\nimport { Config } from 'common/config';\nimport { isInlineFileUrlSafe } from \"../context-menu/actions/helpers\";\nimport { getResource } from 'store/resources/resources';\nimport { toggleDetailsPanel, SLIDE_TIMEOUT, openDetailsPanel } from 'store/details-panel/details-panel-action';\nimport { FileDetails } from 'views-components/details-panel/file-details';\nimport { getNode } from 'models/tree';\nimport { resourceIsFrozen } from 'common/frozen-resources';\nimport { CLOSE_DRAWER } from 'store/details-panel/details-panel-action';\n\ntype CssRules =\n    | \"root\"\n    | \"container\"\n    | \"headerContainer\"\n    | \"headerTitleWrapper\"\n    | \"headerIconWrapper\"\n    | \"headerIcon\"\n    | \"tabContainerWrapper\"\n    | \"tabContainer\"\n    | \"tab\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        background: theme.palette.background.paper,\n        height: '100%',\n        overflow: 'hidden',\n    },\n    container: {\n        maxWidth: 'none',\n    },\n    headerContainer: {\n        color: theme.palette.grey[\"600\"],\n        margin: `${theme.spacing(1)} 0`,\n        textAlign: 'center',\n    },\n    headerIconWrapper: {\n        margin: '0 10px',\n    },\n    headerTitleWrapper: {\n        overflow: 'hidden',\n    },\n    headerIcon: {\n        fontSize: '2.125rem',\n    },\n    tabContainerWrapper: {\n        // Grid container wrapper prevents horizontal overflow\n        // Flex styles prevent unscrollable vertical overflow\n        overflowY: 'scroll',\n        flexGrow: 1,\n        flexShrink: 1,\n        flexBasis: 'inherit',\n    },\n    tabContainer: {\n        overflow: 'auto',\n        padding: theme.spacing(1),\n    },\n    tab: {\n        borderBottom: `1px solid ${theme.palette.grey[300]}`,\n    },\n});\n\nconst EMPTY_RESOURCE: EmptyResource = { kind: undefined, name: 'Projects' };\n\nconst getItem = (res: DetailsResource, pathName: string): DetailsData => {\n    if ('kind' in res) {\n        switch (res.kind) {\n            case ResourceKind.PROJECT:\n                return new ProjectDetails(res);\n            case ResourceKind.COLLECTION:\n                return new CollectionDetails(res);\n            case ResourceKind.PROCESS:\n                return new ProcessDetails(res);\n            case ResourceKind.WORKFLOW:\n                return new WorkflowDetails(res);\n            case ResourceKind.USER:\n                if(pathName.includes('projects')) {\n                    return new RootProjectDetails(res);\n                }\n                return new EmptyDetails(EMPTY_RESOURCE);\n            default:\n                return new EmptyDetails(res as EmptyResource);\n        }\n    } else {\n        return new FileDetails(res);\n    }\n};\n\nconst mapStateToProps = ({ auth, detailsPanel, resources, collectionPanelFiles, selectedResource, properties, router }: RootState) => {\n    const resource = getResource(selectedResource.selectedResourceUuid ?? properties.currentRouteUuid)(resources) as DetailsResource | undefined;\n    const file = resource\n        ? undefined\n        : getNode(detailsPanel.resourceUuid)(collectionPanelFiles);\n\n    let isFrozen = false;\n    if (isResourceResource(resource)) {\n        isFrozen = resourceIsFrozen(resource, resources);\n    }\n\n    return {\n        isFrozen,\n        authConfig: auth.config,\n        isOpened: detailsPanel.isOpened,\n        tabNr: detailsPanel.tabNr,\n        res: resource || (file && file.value) || EMPTY_RESOURCE,\n        pathname: router.location ? router.location.pathname : \"\",\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    onCloseDrawer: (currentItemId) => {\n        dispatch<any>(toggleDetailsPanel(currentItemId));\n    },\n    setActiveTab: (tabNr: number) => {\n        dispatch<any>(openDetailsPanel(undefined, tabNr));\n    },\n});\n\nexport interface DetailsPanelDataProps {\n    onCloseDrawer: (currentItemId) => void;\n    setActiveTab: (tabNr: number) => void;\n    authConfig: Config;\n    isOpened: boolean;\n    tabNr: number;\n    res: DetailsResource;\n    isFrozen: boolean;\n    pathname: string;\n}\n\ntype DetailsPanelProps = DetailsPanelDataProps & WithStyles<CssRules>;\n\nexport const DetailsPanel = withStyles(styles)(\n    connect(mapStateToProps, mapDispatchToProps)(\n        class extends React.Component<DetailsPanelProps> {\n            shouldComponentUpdate(nextProps: DetailsPanelProps) {\n                if ('etag' in nextProps.res && 'etag' in this.props.res &&\n                    nextProps.res.etag === this.props.res.etag &&\n                    nextProps.isOpened === this.props.isOpened &&\n                    nextProps.tabNr === this.props.tabNr) {\n                    return false;\n                }\n                return true;\n            }\n\n            handleChange = (event: any, value: number) => {\n                this.props.setActiveTab(value);\n            }\n\n            render() {\n                const { classes, isOpened } = this.props;\n                return (\n                    <Grid\n                        container\n                        direction=\"column\"\n                        className={classes.root}>\n                        <Transition\n                            in={isOpened}\n                            timeout={SLIDE_TIMEOUT}\n                            unmountOnExit>\n                            {isOpened ? this.renderContent() : <div />}\n                        </Transition>\n                    </Grid>\n                );\n            }\n\n            renderContent() {\n                const { classes, onCloseDrawer, res, tabNr, authConfig, pathname } = this.props;\n                let shouldShowInlinePreview = false;\n                if (!('kind' in res)) {\n                    shouldShowInlinePreview = isInlineFileUrlSafe(\n                        res ? res.url : \"\",\n                        authConfig.keepWebServiceUrl,\n                        authConfig.keepWebInlineServiceUrl\n                    ) || authConfig.clusterConfig.Collections.TrustAllContent;\n                }\n\n                const item = getItem(res, pathname);\n                return (\n                    <Grid\n                        data-cy='details-panel'\n                        container\n                        direction=\"column\"\n                        item\n                        xs\n                        className={classes.container} >\n                        <Grid\n                            item\n                            className={classes.headerContainer}\n                            container\n                            alignItems='center'\n                            justifyContent='space-between'\n                            wrap=\"nowrap\">\n                            <Grid item className={classes.headerIconWrapper}>\n                                {item.getIcon(classes.headerIcon)}\n                            </Grid>\n                            <Grid item className={classes.headerTitleWrapper}>\n                                <Tooltip title={item.getTitle()}>\n                                    <Typography variant='h6' noWrap>\n                                        {item.getTitle()}\n                                    </Typography>\n                                </Tooltip>\n                            </Grid>\n                            <Grid item>\n                                <IconButton data-cy=\"close-details-btn\" color=\"inherit\" onClick={()=>onCloseDrawer(CLOSE_DRAWER)} size=\"large\">\n                                    <CloseIcon />\n                                </IconButton>\n                            </Grid>\n                        </Grid>\n                        <Grid item>\n                            <Tabs onChange={this.handleChange}\n                                variant='fullWidth'\n                                value={(item.getTabLabels().length >= tabNr + 1) ? tabNr : 0}>\n                                {item.getTabLabels().map((tabLabel, idx) =>\n                                    <Tab className={classes.tab} key={`tab-label-${idx}`} data-cy={`details-panel-tab-${tabLabel}`} disableRipple label={tabLabel} />)\n                                }\n                            </Tabs>\n                        </Grid>\n                        <Grid item container className={this.props.classes.tabContainerWrapper}>\n                            <Grid item xs className={this.props.classes.tabContainer}>\n                                {item.getDetails({ tabNr, showPreview: shouldShowInlinePreview })}\n                            </Grid>\n                        </Grid>\n                    </Grid >\n                );\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-panel/empty-details.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DefaultIcon, RootProjectIcon } from 'components/icon/icon';\nimport { EmptyResource } from 'models/empty';\nimport { DetailsData } from \"./details-data\";\nimport { DefaultView } from 'components/default-view/default-view';\n\nexport class EmptyDetails extends DetailsData<EmptyResource> {\n    getIcon(className?: string) {\n        return <RootProjectIcon className={className}/>;\n    }\n\n    getDetails() {\n        return <DefaultView icon={DefaultIcon} messages={['Select a file or folder to view its details.']} />;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-panel/file-details.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { DetailsData } from \"./details-data\";\nimport { CollectionFile, CollectionDirectory, CollectionFileType } from 'models/collection-file';\nimport { getIcon } from 'components/file-tree/file-tree-item';\nimport { DetailsAttribute } from 'components/details-attribute/details-attribute';\nimport { formatFileSize } from 'common/formatters';\nimport { FileThumbnail } from 'components/file-tree/file-thumbnail';\nimport isImage from 'is-image';\n\nexport class FileDetails extends DetailsData<CollectionFile | CollectionDirectory> {\n\n    getIcon(className?: string) {\n        const Icon = getIcon(this.item.type);\n        return <Icon className={className} />;\n    }\n\n    getDetails({showPreview}) {\n        const { item } = this;\n        return item.type === CollectionFileType.FILE\n            ? <>\n                <DetailsAttribute label='Size' value={formatFileSize(item.size)} />\n                {\n                    isImage(item.url) && showPreview && <>\n                        <DetailsAttribute label='Preview' />\n                        <FileThumbnail file={item} />\n                    </>\n                }\n            </>\n            : <div />;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-panel/process-details.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { ProcessIcon } from 'components/icon/icon';\nimport { ProcessResource } from 'models/process';\nimport { DetailsData } from \"./details-data\";\nimport { ProcessDetailsAttributes } from 'views/process-panel/process-details-attributes';\n\nexport class ProcessDetails extends DetailsData<ProcessResource> {\n\n    getIcon(className?: string) {\n        return <ProcessIcon className={className} />;\n    }\n\n    getDetails() {\n        return <ProcessDetailsAttributes request={this.item} />;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-panel/project-details.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { ProjectIcon, RenameIcon, FilterGroupIcon } from 'components/icon/icon';\nimport { ProjectResource } from 'models/project';\nimport { formatDateTime } from 'common/formatters';\nimport { ResourceKind } from 'models/resource';\nimport { resourceLabel } from 'common/labels';\nimport { DetailsData } from \"./details-data\";\nimport { DetailsAttribute } from \"components/details-attribute/details-attribute\";\nimport { RichTextEditorLink } from 'components/rich-text-editor-link/rich-text-editor-link';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Button } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { Dispatch } from 'redux';\nimport { getPropertyChips } from 'views-components/property-chips/get-property-chips';\nimport { ResourceWithName } from '../data-explorer/renderers';\nimport { GroupClass } from \"models/group\";\nimport { openProjectUpdateDialog, ProjectUpdateFormDialogData } from 'store/projects/project-update-actions';\nimport { RootState } from 'store/store';\nimport { ResourcesState } from 'store/resources/resources';\nimport { resourceIsFrozen } from 'common/frozen-resources';\n\nexport class ProjectDetails extends DetailsData<ProjectResource> {\n    getIcon(className?: string) {\n        if (this.item.groupClass === GroupClass.FILTER) {\n            return <FilterGroupIcon className={className} />;\n        }\n        return <ProjectIcon className={className} />;\n    }\n\n    getDetails() {\n        return <ProjectDetailsComponent project={this.item} />;\n    }\n}\n\ntype CssRules = 'tag' | 'editIcon' | 'editButton';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    tag: {\n        marginRight: theme.spacing(0.5),\n        marginBottom: theme.spacing(0.5),\n    },\n    editIcon: {\n        paddingRight: theme.spacing(0.5),\n        fontSize: '1.125rem',\n    },\n    editButton: {\n        boxShadow: 'none',\n        padding: '2px 10px 2px 5px',\n        fontSize: '0.75rem'\n    },\n});\n\ninterface ProjectDetailsComponentDataProps {\n    project: ProjectResource;\n    hideEdit?: boolean;\n}\n\ninterface ProjectDetailsComponentActionProps {\n    onClick: (prj: ProjectUpdateFormDialogData) => () => void;\n}\n\nconst mapStateToProps = (state: RootState): { resources: ResourcesState } => {\n    return {\n        resources: state.resources\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    onClick: (prj: ProjectUpdateFormDialogData) =>\n        () => dispatch<any>(openProjectUpdateDialog(prj)),\n});\n\ntype ProjectDetailsComponentProps = ProjectDetailsComponentDataProps & ProjectDetailsComponentActionProps & WithStyles<CssRules>;\n\nexport const ProjectDetailsComponent = connect(mapStateToProps, mapDispatchToProps)(\n    withStyles(styles)(\n        ({ classes, project, resources, onClick, hideEdit }: ProjectDetailsComponentProps & { resources: ResourcesState }) => <div>\n            {project.groupClass !== GroupClass.FILTER && !hideEdit ?\n             <Button onClick={onClick({\n                 uuid: project.uuid,\n                 name: project.name,\n                 description: project.description,\n                 properties: project.properties,\n             })}\n                     disabled={resourceIsFrozen(project, resources)}\n                     className={classes.editButton} variant='contained'\n                     data-cy='details-panel-edit-btn' color='primary' size='small'>\n                 <RenameIcon className={classes.editIcon} /> Edit\n             </Button>\n            : ''\n            }\n            <DetailsAttribute label='Type' value={project.groupClass === GroupClass.FILTER ? 'Filter group' : resourceLabel(ResourceKind.PROJECT)} />\n            <DetailsAttribute label='UUID' linkToUuid={project.uuid} value={project.uuid} />\n            <DetailsAttribute label='Owner' linkToUuid={project.ownerUuid}\n                              uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n            <DetailsAttribute label='Created at' value={formatDateTime(project.createdAt)} />\n            <DetailsAttribute label='Last modified' value={formatDateTime(project.modifiedAt)} />\n            <DetailsAttribute label='Last modified by' linkToUuid={project.modifiedByUserUuid}\n                              uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n            <DetailsAttribute label='Description'>\n                {project.description ?\n                 <RichTextEditorLink\n                     title={`Description of ${project.name}`}\n                     content={project.description}\n                     label='Show full description' />\n                : '---'\n                }\n            </DetailsAttribute>\n            <DetailsAttribute label='Properties' />\n            {getPropertyChips(project, classes)}\n        </div>\n));\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-panel/root-project-details.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect } from 'react-redux';\nimport { RootProjectIcon } from 'components/icon/icon';\nimport { formatDateTime } from 'common/formatters';\nimport { DetailsData } from \"./details-data\";\nimport { DetailsAttribute } from \"components/details-attribute/details-attribute\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { Dispatch } from 'redux';\nimport { openProjectUpdateDialog, ProjectUpdateFormDialogData } from 'store/projects/project-update-actions';\nimport { RootState } from 'store/store';\nimport { ResourcesState } from 'store/resources/resources';\nimport { UserResource } from 'models/user';\nimport { UserResourceFullName } from 'views-components/data-explorer/renderers';\n\nexport class RootProjectDetails extends DetailsData<UserResource> {\n    getIcon(className?: string) {\n        return <RootProjectIcon className={className} />;\n    }\n\n    getDetails() {\n        return <RootProjectDetailsComponent rootProject={this.item} />;\n    }\n}\n\ntype CssRules = 'tag' | 'editIcon' | 'editButton';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    tag: {\n        marginRight: theme.spacing(0.5),\n        marginBottom: theme.spacing(0.5),\n    },\n    editIcon: {\n        paddingRight: theme.spacing(0.5),\n        fontSize: '1.125rem',\n    },\n    editButton: {\n        boxShadow: 'none',\n        padding: '2px 10px 2px 5px',\n        fontSize: '0.75rem'\n    },\n});\n\ninterface RootProjectDetailsComponentDataProps {\n    rootProject: any;\n}\n\nconst mapStateToProps = (state: RootState): { resources: ResourcesState } => {\n    return {\n        resources: state.resources\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    onClick: (prj: ProjectUpdateFormDialogData) =>\n        () => dispatch<any>(openProjectUpdateDialog(prj)),\n});\n\ntype RootProjectDetailsComponentProps = RootProjectDetailsComponentDataProps & WithStyles<CssRules>;\n\nexport const RootProjectDetailsComponent = connect(mapStateToProps, mapDispatchToProps)(\n    withStyles(styles)(\n        ({ rootProject}: RootProjectDetailsComponentProps & { resources: ResourcesState }) => <div>\n            <DetailsAttribute label='Type' value=\"Root Project\" />\n            <DetailsAttribute label='User' />\n            <UserResourceFullName uuid={rootProject.uuid} link={true} />\n            <DetailsAttribute label='Created at' value={formatDateTime(rootProject.createdAt)} />\n            <DetailsAttribute label='UUID' linkToUuid={rootProject.uuid} value={rootProject.uuid} />\n        </div>\n    ));\n"
  },
  {
    "path": "services/workbench2/src/views-components/details-panel/workflow-details.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { WorkflowIcon } from 'components/icon/icon';\nimport {\n    WorkflowResource, parseWorkflowDefinition, getWorkflowInputs,\n    getWorkflowOutputs, getWorkflow\n} from 'models/workflow';\nimport { DetailsData } from \"./details-data\";\nimport { DetailsAttribute } from 'components/details-attribute/details-attribute';\nimport { ResourceWithName } from 'views-components/data-explorer/renderers';\nimport { formatDateTime } from \"common/formatters\";\nimport { Grid } from '@mui/material';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\nimport { openRunProcess } from \"store/workflow-panel/workflow-panel-actions\";\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { ProcessIOParameter } from 'views/process-panel/process-io-card';\nimport { formatInputData, formatOutputData } from 'store/process-panel/process-panel-actions';\nimport { AuthState } from 'store/auth/auth-reducer';\nimport { RootState } from 'store/store';\nimport { getPropertyChip } from 'views-components/resource-properties-form/property-chip';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { ArvadosTheme } from 'common/custom-theme';\n\nexport interface WorkflowDetailsCardDataProps {\n    workflow?: WorkflowResource;\n    includeGitprops?: boolean;\n}\n\nexport interface WorkflowDetailsCardActionProps {\n    onClick: (wf: WorkflowResource) => () => void;\n}\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    onClick: (wf: WorkflowResource) =>\n        () => wf && dispatch<any>(openRunProcess(wf.uuid, wf.ownerUuid, wf.name)),\n});\n\ntype CssRules = 'propertyTag';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    propertyTag: {\n        marginRight: theme.spacing(0.5),\n        marginBottom: theme.spacing(0.5)\n    },\n});\n\ninterface AuthStateDataProps {\n    auth: AuthState;\n}\n\nexport interface RegisteredWorkflowPanelDataProps {\n    item: WorkflowResource;\n    workflowCollection: string;\n    inputParams: ProcessIOParameter[];\n    outputParams: ProcessIOParameter[];\n    gitprops: { [key: string]: string; };\n}\n\nexport const getRegisteredWorkflowPanelData = (item: WorkflowResource, auth: AuthState): RegisteredWorkflowPanelDataProps => {\n    let inputParams: ProcessIOParameter[] = [];\n    let outputParams: ProcessIOParameter[] = [];\n    let workflowCollection = \"\";\n    const gitprops: { [key: string]: string; } = {};\n\n    // parse definition\n    const wfdef = parseWorkflowDefinition(item);\n\n    if (wfdef) {\n        const inputs = getWorkflowInputs(wfdef);\n        if (inputs) {\n            inputs.forEach(elm => {\n                if (elm.default !== undefined && elm.default !== null) {\n                    elm.value = elm.default;\n                }\n            });\n            inputParams = formatInputData(inputs, auth);\n        }\n\n        const outputs = getWorkflowOutputs(wfdef);\n        if (outputs) {\n            outputParams = formatOutputData(outputs, {}, undefined, auth);\n        }\n\n        const wf = getWorkflow(wfdef);\n        if (wf) {\n            const REGEX = /keep:([0-9a-f]{32}\\+\\d+)\\/.*/;\n            if (wf[\"steps\"]) {\n                const pdh = wf[\"steps\"][0].run.match(REGEX);\n                if (pdh) {\n                    workflowCollection = pdh[1];\n                }\n            }\n        }\n\n        for (const elm in wfdef) {\n            if (elm.startsWith(\"http://arvados.org/cwl#git\")) {\n                gitprops[elm.substr(23)] = wfdef[elm]\n            }\n        }\n    }\n\n    return { item, workflowCollection, inputParams, outputParams, gitprops };\n};\n\nconst mapStateToProps = (state: RootState): AuthStateDataProps => {\n    return { auth: state.auth };\n};\n\nexport const WorkflowDetailsAttributes = connect(mapStateToProps, mapDispatchToProps)(\n    withStyles(styles)(\n        ({ workflow, auth, includeGitprops, classes }: WorkflowDetailsCardDataProps & AuthStateDataProps & WorkflowDetailsCardActionProps & WithStyles<CssRules>) => {\n            if (!workflow) {\n                return <Grid />\n            }\n            const data = getRegisteredWorkflowPanelData(workflow, auth);\n\n            return <Grid container>\n                <Grid item xs={12} >\n                    <DetailsAttribute\n                        label={\"Workflow UUID\"}\n                        linkToUuid={workflow?.uuid} />\n                </Grid>\n                <Grid item xs={12} >\n                    <DetailsAttribute\n                        label='Owner' linkToUuid={workflow?.ownerUuid}\n                        uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n                </Grid>\n                <Grid item xs={12}>\n                    <DetailsAttribute label='Created at' value={formatDateTime(workflow?.createdAt)} />\n                </Grid>\n                <Grid item xs={12}>\n                    <DetailsAttribute label='Last modified' value={formatDateTime(workflow?.modifiedAt)} />\n                </Grid>\n                <Grid item xs={12} data-cy=\"workflow-details-attributes-modifiedby-user\">\n                    <DetailsAttribute\n                        label='Last modified by user' linkToUuid={workflow?.modifiedByUserUuid}\n                        uuidEnhancer={(uuid: string) => <ResourceWithName uuid={uuid} />} />\n                </Grid>\n                {includeGitprops && <Grid item xs={12} md={12}>\n                    <DetailsAttribute label='Properties' />\n                    {Object.keys(data.gitprops).map(k =>\n                        getPropertyChip(k, data.gitprops[k], undefined, classes.propertyTag))}\n                </Grid>}\n            </Grid >;\n        }));\n\nexport class WorkflowDetails extends DetailsData<WorkflowResource> {\n    getIcon(className?: string) {\n        return <WorkflowIcon className={className} />;\n    }\n\n    getDetails() {\n        return <WorkflowDetailsAttributes workflow={this.item} includeGitprops={true} />;\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-copy/dialog-collection-partial-copy-to-existing-collection.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { compose, Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { withDialog } from 'store/dialog/with-dialog';\nimport { DialogForm } from \"components/dialog-form/dialog-form\";\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { DialogTitle } from \"@mui/material\";\nimport { CollectionFileSelection } from 'store/collection-panel/collection-panel-files/collection-panel-files-state';\nimport { CollectionPartialCopyToExistingCollectionFormData, copyCollectionPartialToExistingCollection, COLLECTION_PARTIAL_COPY_TO_SELECTED_COLLECTION } from 'store/collections/collection-partial-copy-actions';\nimport { PickerIdProp } from \"store/tree-picker/picker-id\";\nimport { DirectoryTreePickerDialogField } from \"views-components/projects-tree-picker/tree-picker-field\";\nimport { useStateWithValidation } from \"common/useStateWithValidation\";\nimport { FILE_OPS_LOCATION_VALIDATION } from \"validators/validators\";\nimport { FileOperationLocation } from \"store/tree-picker/tree-picker-actions\";\n\ntype DialogCollectionPartialCopyProps = WithDialogProps<{ initialFormData: CollectionPartialCopyToExistingCollectionFormData, collectionFileSelection: CollectionFileSelection }> & {\n} & PickerIdProp & {\n    copyCollectionPartialToExistingCollection: (\n        fileSelection: CollectionFileSelection,\n        formData: CollectionPartialCopyToExistingCollectionFormData\n    ) => void\n}\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    copyCollectionPartialToExistingCollection: (\n        fileSelection: CollectionFileSelection,\n        formData: CollectionPartialCopyToExistingCollectionFormData\n    ) => {\n        dispatch<any>(copyCollectionPartialToExistingCollection(fileSelection, formData));\n    },\n});\n\nexport const DialogCollectionPartialCopyToExistingCollection = compose(\n    withDialog(COLLECTION_PARTIAL_COPY_TO_SELECTED_COLLECTION),\n    connect(null, mapDispatchToProps)\n)((props: DialogCollectionPartialCopyProps & PickerIdProp) => {\n    const { open, data, copyCollectionPartialToExistingCollection } = props;\n    const { collectionFileSelection, initialFormData } = data;\n    const [selectedDestination, setSelectedDestination, errs] = useStateWithValidation<FileOperationLocation | null>(null, FILE_OPS_LOCATION_VALIDATION, 'Collection');\n\n    const handleDirectoryChange = (res: FileOperationLocation) => {\n        setSelectedDestination(res);\n    }\n\n    const fields = () => (\n        <>\n            <DialogTitle>Copy Selected Files to Existing Collection</DialogTitle>\n            <DirectoryTreePickerDialogField\n                currentUuids={initialFormData?.destination.uuid ? [initialFormData.destination.uuid] : []}\n                pickerId={props.pickerId}\n                handleDirectoryChange={handleDirectoryChange}\n            />\n        </>\n    );\n\n\n    return <DialogForm\n                open={open}\n                fields={fields()}\n                submitLabel=\"Copy Files\"\n                onSubmit={(ev)=>{\n                    ev.preventDefault();\n                    if (!!selectedDestination) {\n                        copyCollectionPartialToExistingCollection(collectionFileSelection, { destination: selectedDestination });\n                    }\n                }}\n                formErrors={errs}\n                closeDialog={props.closeDialog}\n                clearFormValues={()=> {\n                    setSelectedDestination(null);\n                }}\n            />;\n});"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-copy/dialog-collection-partial-copy-to-new-collection.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react'\nimport { compose, Dispatch } from 'redux'\nimport { DialogForm } from 'components/dialog-form/dialog-form'\nimport { connect } from 'react-redux'\nimport { withDialog } from 'store/dialog/with-dialog'\nimport { DialogCollectionNameField } from 'views-components/form-fields/collection-form-fields'\nimport {\n\tCollectionPartialCopyToNewCollectionFormData,\n\tcopyCollectionPartialToNewCollection,\n\tCOLLECTION_PARTIAL_COPY_FORM_NAME,\n} from 'store/collections/collection-partial-copy-actions'\nimport { ProjectTreePickerDialogField } from 'views-components/projects-tree-picker/tree-picker-field'\nimport { CollectionFileSelection } from 'store/collection-panel/collection-panel-files/collection-panel-files-state'\nimport { WithDialogProps } from 'store/dialog/with-dialog'\nimport { PickerIdProp } from 'store/tree-picker/picker-id'\nimport { DialogTitle, DialogContent } from '@mui/material'\nimport { DialogRichTextField } from 'components/dialog-form/dialog-text-field'\nimport { REQUIRED_VALIDATION, REQUIRED_LENGTH255_VALIDATION, MAXLENGTH_524288_VALIDATION } from 'validators/validators'\nimport { useStateWithValidation } from 'common/useStateWithValidation'\n\ntype DialogCollectionPartialCopyProps = WithDialogProps<{\n\tinitialFormData: CollectionPartialCopyToNewCollectionFormData\n\tcollectionFileSelection: CollectionFileSelection\n}> &\n\tPickerIdProp & {\n\t\tcopyCollectionPartialToNewCollection: (\n\t\t\tfileSelection: CollectionFileSelection,\n\t\t\tformData: CollectionPartialCopyToNewCollectionFormData\n\t\t) => void\n\t}\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n\tcopyCollectionPartialToNewCollection: (\n\t\tfileSelection: CollectionFileSelection,\n\t\tformData: CollectionPartialCopyToNewCollectionFormData\n\t) => {\n\t\tdispatch<any>(copyCollectionPartialToNewCollection(fileSelection, formData))\n\t},\n})\n\nexport const DialogCollectionPartialCopyToNewCollection = compose(\n\twithDialog(COLLECTION_PARTIAL_COPY_FORM_NAME),\n\tconnect(null, mapDispatch)\n)((props: DialogCollectionPartialCopyProps) => {\n\tconst { open, data, pickerId } = props\n\tconst { initialFormData, collectionFileSelection } = data\n\tconst { name, description } = initialFormData || {}\n\n\tconst [thisName, setThisName, nameErrs] = useStateWithValidation(name, REQUIRED_LENGTH255_VALIDATION, 'Name')\n    const [thisDescription, setThisDescription, descriptionErrs] = useStateWithValidation(description, MAXLENGTH_524288_VALIDATION, 'Description')\n    const [thisOwnerUuid, setThisOwnerUuid, ownerUuidErrs] = useStateWithValidation('', REQUIRED_VALIDATION, 'Project')\n\n    const [formErrors, setFormErrors] = React.useState<string[]>([])\n\n    React.useEffect(() => {\n        setFormErrors([...nameErrs, ...descriptionErrs, ...ownerUuidErrs])\n    }, [nameErrs, descriptionErrs, ownerUuidErrs])\n\n\tconst fields = () => (\n\t\t<>\n\t\t\t<DialogTitle>Copy to new collection</DialogTitle>\n\t\t\t<DialogContent>\n\t\t\t\t<DialogCollectionNameField defaultValue={name} setValue={setThisName} />\n\t\t\t\t<DialogRichTextField\n\t\t\t\t\tlabel=\"Description\"\n\t\t\t\t\tdefaultValue={description}\n\t\t\t\t\tsetValue={setThisDescription}\n\t\t\t\t\tvalidators={MAXLENGTH_524288_VALIDATION}\n\t\t\t\t/>\n\t\t\t\t<ProjectTreePickerDialogField\n\t\t\t\t\tpickerId={pickerId}\n\t\t\t\t\tsetSelectedProject={setThisOwnerUuid}\n\t\t\t\t/>\n\t\t\t</DialogContent>\n\t\t</>\n\t)\n\n\treturn (\n\t\t<DialogForm\n\t\t\topen={open}\n\t\t\tfields={fields()}\n            submitLabel='Create Collection'\n\t\t\tformErrors={formErrors}\n\t\t\tonSubmit={(event: React.FormEvent<HTMLFormElement>) => {\n\t\t\t\tevent.preventDefault()\n\t\t\t\tprops.copyCollectionPartialToNewCollection(collectionFileSelection, {\n\t\t\t\t\tname: thisName,\n\t\t\t\t\tdescription: thisDescription,\n\t\t\t\t\tprojectUuid: thisOwnerUuid,\n\t\t\t\t})\n\t\t\t}}\n\t\t\tcloseDialog={props.closeDialog}\n\t\t\tclearFormValues={() => {\n\t\t\t\tsetThisName('')\n\t\t\t\tsetThisDescription('')\n\t\t\t\tsetThisOwnerUuid('')\n\t\t\t}}\n\t\t/>\n\t)\n})\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-copy/dialog-collection-partial-copy-to-separate-collections.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react'\nimport { connect } from 'react-redux'\nimport { compose, Dispatch } from 'redux'\nimport { DialogTitle } from '@mui/material'\nimport { withDialog, WithDialogProps } from 'store/dialog/with-dialog'\nimport { ProjectTreePickerDialogField } from 'views-components/projects-tree-picker/tree-picker-field'\nimport { DialogForm } from 'components/dialog-form/dialog-form'\nimport {\n\tCollectionPartialCopyToSeparateCollectionsFormData,\n\tcopyCollectionPartialToSeparateCollections,\n} from 'store/collections/collection-partial-copy-actions'\nimport { PickerIdProp } from 'store/tree-picker/picker-id'\nimport { COLLECTION_PARTIAL_COPY_TO_SEPARATE_COLLECTIONS } from 'store/collections/collection-partial-copy-actions'\nimport { CollectionFileSelection } from 'store/collection-panel/collection-panel-files/collection-panel-files-state'\nimport { getFieldErrors, REQUIRED_VALIDATION } from 'validators/validators'\n\ntype DialogCollectionPartialCopyProps = WithDialogProps<{ collectionFileSelection: CollectionFileSelection, sourceCollectionName: string }> &\n\tPickerIdProp & {\n\t\tcopyCollectionPartialToSeparateCollections: (\n\t\t\tfileSelection: CollectionFileSelection,\n\t\t\tformData: CollectionPartialCopyToSeparateCollectionsFormData\n\t\t) => void\n\t}\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n\tcopyCollectionPartialToSeparateCollections: (\n\t\tfileSelection: CollectionFileSelection,\n\t\tformData: CollectionPartialCopyToSeparateCollectionsFormData\n\t) => {\n\t\tdispatch<any>(copyCollectionPartialToSeparateCollections(fileSelection, formData))\n\t},\n})\n\nexport const DialogCollectionPartialCopyToSeparateCollection = compose(\n\twithDialog(COLLECTION_PARTIAL_COPY_TO_SEPARATE_COLLECTIONS),\n\tconnect(null, mapDispatch),\n)((props: DialogCollectionPartialCopyProps) => {\n\tconst { open, data } = props\n\tconst { collectionFileSelection, sourceCollectionName } = data\n\tconst [selectedProjectUuid, setSelectedProjectUuid] = React.useState<string>('')\n\tconst [formErrors, setFormErrors] = React.useState<string[]>([])\n\n\tconst fieldErrors = getFieldErrors(selectedProjectUuid, REQUIRED_VALIDATION, 'Project')\n\n\tReact.useEffect(() => {\n\t\tsetFormErrors([...fieldErrors])\n\t}, [selectedProjectUuid])\n\n\tconst fields = () => (\n\t\t<>\n\t\t\t<DialogTitle>Copy Selected Files to Separate Collections</DialogTitle>\n\t\t\t<ProjectTreePickerDialogField\n\t\t\t\tpickerId={props.pickerId}\n\t\t\t\tsetSelectedProject={setSelectedProjectUuid}\n\t\t\t/>\n\t\t</>\n\t)\n\n\treturn (\n\t\t<DialogForm\n\t\t\topen={open}\n\t\t\tfields={fields()}\n\t\t\tonSubmit={(event: React.FormEvent<HTMLFormElement>) => {\n\t\t\t\tevent.preventDefault()\n\t\t\t\tprops.copyCollectionPartialToSeparateCollections(collectionFileSelection, {\n\t\t\t\t\tname: sourceCollectionName,\n\t\t\t\t\tprojectUuid: selectedProjectUuid,\n\t\t\t\t})\n\t\t\t}}\n\t\t\tformErrors={formErrors}\n\t\t\tsubmitLabel='Create Collections'\n\t\t\tcloseDialog={props.closeDialog}\n\t\t\tclearFormValues={() => {\n\t\t\t\tsetSelectedProjectUuid('')\n\t\t\t}}\n\t\t/>\n\t)\n})\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-copy/dialog-copy.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react'\nimport { compose, Dispatch } from 'redux'\nimport { connect } from 'react-redux'\nimport { DialogTitle, DialogContent } from '@mui/material'\nimport { WithDialogProps, withDialog } from 'store/dialog/with-dialog'\nimport { ProjectTreePickerDialogField } from 'views-components/projects-tree-picker/tree-picker-field'\nimport { COPY_NAME_VALIDATION, REQUIRED_VALIDATION } from 'validators/validators'\nimport { CopyFormDialogData } from 'store/copy-dialog/copy-dialog'\nimport { PickerIdProp } from 'store/tree-picker/picker-id'\nimport { copyCollectionRunner } from 'store/workbench/workbench-actions'\nimport { COLLECTION_COPY_FORM_NAME } from 'store/collections/collection-copy-actions'\nimport { DialogForm } from 'components/dialog-form/dialog-form'\nimport { DialogTextField } from 'components/dialog-form/dialog-text-field'\nimport { useStateWithValidation } from 'common/useStateWithValidation'\n\ntype CopyDialogProps = WithDialogProps<CopyFormDialogData> &\n\tPickerIdProp & {\n\t\tselectedCollectionUuid: string | undefined\n\t\tcopyCollection: (data: CopyFormDialogData) => void\n\t}\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n\tcopyCollection: (data: CopyFormDialogData) => dispatch<any>(copyCollectionRunner(data)),\n})\n\nexport const CopyCollectionDialog = compose(\n\twithDialog(COLLECTION_COPY_FORM_NAME),\n\tconnect(null, mapDispatchToProps)\n)((props: CopyDialogProps) => {\n\tconst { open, data, pickerId } = props\n\tconst [nameVal, setNameVal, nameErrs] = useStateWithValidation(data.name || '', COPY_NAME_VALIDATION, 'Name')\n\tconst [selectedProjectUuid, setSelectedProjectUuid, selectedProjectErrs] = useStateWithValidation('', REQUIRED_VALIDATION, 'Project')\n\tconst [formErrors, setFormErrors] = React.useState<string[]>([])\n\n\tReact.useEffect(() => {\n\t\tsetFormErrors([...selectedProjectErrs, ...nameErrs])\n\t}, [nameErrs, selectedProjectErrs])\n\n\tconst fields = () => (\n\t\t<>\n\t\t\t{data.isSingleResource ? (\n\t\t\t\t<>\n\t\t\t\t\t<DialogTitle>Make a copy</DialogTitle>\n\t\t\t\t\t<DialogContent>\n\t\t\t\t\t\t<DialogTextField\n                            label=\"Enter a new name for the copy\"\n\t\t\t\t\t\t\tdefaultValue={data.name}\n\t\t\t\t\t\t\tsetValue={setNameVal}\n\t\t\t\t\t\t\tvalidators={COPY_NAME_VALIDATION}\n\t\t\t\t\t\t/>\n\t\t\t\t\t</DialogContent>\n\t\t\t\t</>\n\t\t\t) : (\n\t\t\t\t<DialogTitle>Make copies</DialogTitle>\n\t\t\t)}\n\t\t\t<ProjectTreePickerDialogField\n\t\t\t\tpickerId={pickerId}\n\t\t\t\tsetSelectedProject={setSelectedProjectUuid}\n\t\t\t/>\n\t\t</>\n\t)\n\n\treturn (\n\t\t<DialogForm\n\t\t\topen={open}\n\t\t\tfields={fields()}\n\t\t\tsubmitLabel='Copy Collection'\n\t\t\tonSubmit={(event: React.FormEvent<HTMLFormElement>) => {\n\t\t\t\tevent.preventDefault()\n\t\t\t\tprops.copyCollection({\n\t\t\t\t\tname: nameVal,\n\t\t\t\t\tuuid: data.uuid,\n\t\t\t\t\townerUuid: selectedProjectUuid,\n\t\t\t\t})\n\t\t\t}}\n\t\t\tformErrors={formErrors}\n\t\t\tcloseDialog={props.closeDialog}\n\t\t\tclearFormValues={() => {\n\t\t\t\tsetSelectedProjectUuid('')\n\t\t\t\tsetNameVal('')\n\t\t\t}}\n\t\t/>\n\t)\n})\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-copy/dialog-process-rerun.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Dispatch, compose } from 'redux';\nimport { connect } from 'react-redux';\nimport { WithDialogProps, withDialog } from 'store/dialog/with-dialog';\nimport { DialogForm } from 'components/dialog-form/dialog-form';\nimport { ProjectTreePickerDialogField } from 'views-components/projects-tree-picker/tree-picker-field';\nimport { COPY_NAME_VALIDATION, COPY_FILE_VALIDATION } from 'validators/validators';\nimport { CopyFormDialogData } from 'store/copy-dialog/copy-dialog';\nimport { PickerIdProp } from 'store/tree-picker/picker-id';\nimport { copyProcessRunner } from 'store/workbench/workbench-actions';\nimport { DialogTitle, DialogContent } from '@mui/material';\nimport { DialogTextField } from 'components/dialog-form/dialog-text-field';\nimport { useStateWithValidation } from 'common/useStateWithValidation';\nimport { PROCESS_COPY_FORM_NAME } from 'store/processes/process-copy-actions';\n\ntype ProcessRerunFormDialogProps = WithDialogProps<CopyFormDialogData> & PickerIdProp & {\n    copyProcess: (data: CopyFormDialogData) => void\n};\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n    copyProcess: (data: CopyFormDialogData) => {\n        dispatch<any>(copyProcessRunner(data));\n    }\n});\n\nexport const DialogProcessRerun = compose(\n    withDialog(PROCESS_COPY_FORM_NAME),\n    connect(null, mapDispatch))\n    ((props: ProcessRerunFormDialogProps) => {\n        const { open, data, pickerId, copyProcess } = props;\n        const [name, setName, nameErrs] = useStateWithValidation(data.name || '', COPY_NAME_VALIDATION, 'Name');\n        const [ownerUuid, setOwnerUuid, ownerUuidErrs] = useStateWithValidation(data.ownerUuid || '', COPY_FILE_VALIDATION, 'Project');\n        const [formErrors, setFormErrors] = React.useState<string[]>([]);\n\n        React.useEffect(() => {\n            setFormErrors([...nameErrs, ...ownerUuidErrs]);\n        }, [nameErrs, ownerUuidErrs]);\n\n        const fields = () => (\n            <>\n                <DialogTitle>Choose location for re-run</DialogTitle>\n                <DialogContent>\n                    <DialogTextField\n                        label=\"Enter a new name for the copy\"\n                        defaultValue={data.name || ''}\n                        setValue={setName}\n                        validators={COPY_NAME_VALIDATION}\n                    />\n                    <ProjectTreePickerDialogField\n                        pickerId={pickerId}\n                        setSelectedProject={setOwnerUuid}\n                    />\n                </DialogContent>\n            </>\n        );\n\n        return (\n            <DialogForm\n                open={open}\n                fields={fields()}\n                submitLabel=\"Copy\"\n                formErrors={formErrors}\n                onSubmit={(event: React.FormEvent<HTMLFormElement>) => {\n                    event.preventDefault();\n                    copyProcess({\n                        name,\n                        ownerUuid,\n                        uuid: data?.uuid || ''});\n                }}\n                closeDialog={props.closeDialog}\n                clearFormValues={() => {\n                    setName('');\n                    setOwnerUuid('');\n                }}\n            />\n        );\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-create/create-external-credential-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { compose, Dispatch } from \"redux\";\nimport { reduxForm } from 'redux-form';\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport { DialogExternalCredentialCreate } from \"./dialog-external-credential-create\";\nimport { CREATE_EXTERNAL_CREDENTIAL_FORM_NAME } from \"store/external-credentials/external-credentials-actions\";\nimport { CreateExternalCredentialFormDialogData } from \"store/external-credentials/external-credential-dialog-data\";\nimport { createExternalCredential } from \"store/external-credentials/external-credentials-actions\";\n\nexport const CreateExternalCredentialDialog = compose(\n    withDialog(CREATE_EXTERNAL_CREDENTIAL_FORM_NAME),\n    reduxForm<CreateExternalCredentialFormDialogData>({\n        form: CREATE_EXTERNAL_CREDENTIAL_FORM_NAME,\n        onSubmit: (data: CreateExternalCredentialFormDialogData, dispatch: Dispatch) => {\n            for (const key in data) {\n                if (typeof data[key] === 'string') {\n                    data[key] = data[key].trim();\n                }\n            }\n            dispatch<any>(createExternalCredential(data));\n            return;\n        }\n    })\n)(DialogExternalCredentialCreate);"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-create/dialog-collection-create.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Dispatch, compose } from 'redux';\nimport { connect } from 'react-redux'\nimport { WithDialogProps, withDialog } from 'store/dialog/with-dialog';\nimport { DialogContent, DialogTitle } from '@mui/material/';\nimport { CollectionCreateFormDialogData } from 'store/collections/collection-create-actions';\nimport {\n    DialogCollectionNameField,\n} from 'views-components/form-fields/collection-form-fields';\nimport { ResourceParentField } from '../form-fields/resource-form-fields';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { FormLabel } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { DialogForm } from 'components/dialog-form/dialog-form';\nimport { useStateWithValidation } from 'common/useStateWithValidation';\nimport { COLLECTION_NAME_VALIDATION, MAXLENGTH_524288_VALIDATION, REQUIRED_VALIDATION } from 'validators/validators';\nimport { DialogRichTextField } from 'components/dialog-form/dialog-text-field';\nimport { DialogResourcePropertiesForm } from 'views-components/resource-properties-form/resource-properties-form'\nimport { createCollectionRunner } from 'store/workbench/workbench-actions';\nimport { PropertyChips, getVocabularyFromChips } from 'components/chips/chips';\nimport { RootState } from 'store/store';\nimport { DialogMultiCheckboxField } from 'components/checkbox-field/checkbox-field'\nimport { DialogFileUploaderField } from '../file-uploader/file-uploader';\nimport { Vocabulary } from 'models/vocabulary';\nimport { COLLECTION_CREATE_FORM_NAME } from 'store/collections/collection-create-actions';\n\ntype CssRules = 'propertiesForm';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    propertiesForm: {\n        marginTop: theme.spacing(2),\n        marginBottom: theme.spacing(2),\n    },\n});\n\nconst mapState = (state: RootState) => ({\n    vocabulary: state.properties.vocabulary\n});\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n    createCollection: (data: CollectionCreateFormDialogData, setSubmitErr: (errMsg: string) => void) => dispatch<any>(createCollectionRunner(data, setSubmitErr))\n});\n\ntype DialogCollectionProps = WithDialogProps<CollectionCreateFormDialogData> & {\n    createCollection: (data: CollectionCreateFormDialogData, setSubmitErr: (errMsg: string) => void) => Promise<void>;\n    vocabulary: Vocabulary;\n};\n\nexport const DialogCollectionCreate = compose(\n    connect(mapState, mapDispatch),\n    withStyles(styles),\n    withDialog(COLLECTION_CREATE_FORM_NAME)\n)(({ createCollection, data, closeDialog, open, vocabulary, classes }: DialogCollectionProps & WithStyles<CssRules>) =>{\n    const [collectionName, setCollectionName, collectionNameErrs] = useStateWithValidation('', [...REQUIRED_VALIDATION, ...COLLECTION_NAME_VALIDATION], 'Collection Name');\n    const [description, setDescription, descriptionErrs] = useStateWithValidation('', MAXLENGTH_524288_VALIDATION, 'Description');\n    const [chips, setChips] = React.useState<PropertyChips>({} as PropertyChips);\n    const [storageClassesDesired, setStorageClassesDesired] = React.useState<string[]>([]);\n    const [formErrors, setFormErrors] = React.useState<string[]>([]);\n    const [submitErr, setSubmitErr] = React.useState<string>('');\n    const [isSubmitting, setIsSubmitting] = React.useState<boolean>(false);\n\n    React.useEffect(() => {\n        setFormErrors([...collectionNameErrs, ...descriptionErrs]);\n        if (submitErr) {\n            setFormErrors(prevErrors => [...prevErrors, submitErr]);\n        }\n    }, [collectionNameErrs, descriptionErrs, submitErr]);\n\n    const handleSubmit = (ev) => {\n        ev.preventDefault();\n        setIsSubmitting(true);\n        createCollection({\n                ownerUuid: data.ownerUuid,\n                name: collectionName,\n                description: description,\n                storageClassesDesired: storageClassesDesired,\n                properties: getVocabularyFromChips(chips, vocabulary),\n            },\n            setSubmitErr\n        ).finally(() => {\n            setIsSubmitting(false);\n        });\n    };\n\n    const fields = () => (\n        <>\n            <DialogTitle>New collection</DialogTitle>\n            <DialogContent>\n                <ResourceParentField ownerUuid={data ? data.ownerUuid : ''} />\n                <DialogCollectionNameField setValue={setCollectionName} submitErr={submitErr} setSubmitErr={setSubmitErr} />\n                <DialogRichTextField\n                    label=\"Description\"\n                    defaultValue={description}\n                    setValue={setDescription}\n                    validators={MAXLENGTH_524288_VALIDATION}\n                />\n            <FormLabel>Properties</FormLabel>\n                <DialogResourcePropertiesForm\n                    setChips={setChips}\n                    onSubmit={(ev)=> ev.preventDefault()}\n                    />\n                <DialogMultiCheckboxField\n                    name=\"storageClassesDesired\"\n                    defaultValues={['default']}\n                    label=\"Storage classes\"\n                    onChange={setStorageClassesDesired}\n                />\n                <DialogFileUploaderField />\n            </DialogContent>\n        </>\n    )\n\n    return <DialogForm\n        fields={fields()}\n        submitLabel='Create a Collection'\n        formErrors={formErrors}\n        isSubmitting={isSubmitting}\n        onSubmit={handleSubmit}\n        closeDialog={closeDialog}\n        clearFormValues={() => {\n            setCollectionName('');\n            setDescription('');\n            setChips({} as PropertyChips);\n            setStorageClassesDesired([]);\n        }}\n        open={open}\n    />;\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-create/dialog-external-credential-create.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { InjectedFormProps } from 'redux-form';\nimport { Grid } from \"@mui/material\";\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { FormDialog } from 'components/form-dialog/form-dialog';\nimport { ExternalCredentialNameField,\n    ExternalCredentialDescriptionField,\n    ExternalCredentialClassCreateField,\n    ExternalCredentialExternalIdField,\n    ExternalCredentialExpiresAtField,\n    ExternalCredentialSecretCreateField,\n    ExternalCredentialScopesField } from 'views-components/form-fields/external-credential-form-fields';\nimport { CreateExternalCredentialFormDialogData } from 'store/external-credentials/external-credential-dialog-data';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { GroupClass } from 'models/group';\n\ntype CssRules = 'description';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    description: {\n        marginTop: theme.spacing(2),\n        marginBottom: theme.spacing(2),\n    },\n});\n\ntype DialogProjectProps = WithDialogProps<{sourcePanel: GroupClass}> & InjectedFormProps<CreateExternalCredentialFormDialogData>;\n\nexport const DialogExternalCredentialCreate = (props: DialogProjectProps) => {\n    const title = 'New External Credential';\n\n    return <FormDialog\n        dialogTitle={title}\n        formFields={NewExternalCredentialFields as any}\n        submitLabel='Create'\n        {...props}\n    />;\n};\n\nconst NewExternalCredentialFields = withStyles(styles)(\n    ({ classes }: WithStyles<CssRules>) => <span>\n        <ExternalCredentialNameField />\n        <div className={classes.description}>\n            <ExternalCredentialDescriptionField />\n        </div>\n        <Grid container direction={'row'} xs={12} spacing={2}>\n            <Grid item xs={6}><ExternalCredentialClassCreateField /></Grid>\n            <Grid item xs={6}><ExternalCredentialExternalIdField /></Grid>\n            <Grid item xs={6}><ExternalCredentialSecretCreateField /></Grid>\n            <Grid item xs={6}><ExternalCredentialExpiresAtField /></Grid>\n            <Grid item xs={12}><ExternalCredentialScopesField /></Grid>\n        </Grid>\n    </span>);\n\n\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-create/dialog-project-create.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useState, useEffect} from 'react';\nimport { DialogTitle, DialogContent, FormGroup, FormLabel } from '@mui/material';\nimport { Dispatch, compose } from 'redux';\nimport { connect } from 'react-redux';\nimport { WithDialogProps, withDialog } from 'store/dialog/with-dialog';\nimport { ProjectCreateFormDialogData, PROJECT_CREATE_FORM_NAME } from 'store/projects/project-create-actions';\nimport { ResourceParentField } from '../form-fields/resource-form-fields';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { GroupClass } from 'models/group';\nimport { DialogForm } from 'components/dialog-form/dialog-form';\nimport { useStateWithValidation } from 'common/useStateWithValidation';\nimport { PROJECT_NAME_VALIDATION, PROJECT_NAME_VALIDATION_ALLOW_SLASH, PROJECT_DESCRIPTION_VALIDATION, REQUIRED_VALIDATION, MAXLENGTH_524288_VALIDATION } from 'validators/validators';\nimport { DialogTextField, DialogRichTextField } from 'components/dialog-form/dialog-text-field';\nimport { DialogResourcePropertiesForm } from 'views-components/resource-properties-form/resource-properties-form';\nimport { createProjectRunner } from 'store/workbench/workbench-actions';\nimport { createGroupRunner, GroupCreateFormDialogData } from 'store/groups-panel/groups-panel-actions';\nimport { PropertyChips, getVocabularyFromChips } from 'components/chips/chips';\nimport { RootState } from 'store/store';\nimport { Vocabulary } from 'models/vocabulary';\nimport { Participant, ParticipantSelect } from 'views-components/sharing-dialog/participant-select';\n\ntype CssRules = 'propertiesForm' | 'description';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    propertiesForm: {\n        marginTop: theme.spacing(2),\n        marginBottom: theme.spacing(2),\n    },\n    description: {\n        marginTop: theme.spacing(2),\n        marginBottom: theme.spacing(2),\n    },\n});\n\nconst mapState = (state: RootState) => ({\n    vocabulary: state.properties.vocabulary,\n    allowSlash: state.auth.config.clusterConfig.Collections.ForwardSlashNameSubstitution !== \"\"\n});\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n    createProject: (data: ProjectCreateFormDialogData, setSubmitErr: (err: string) => void) => dispatch<any>(createProjectRunner(data, setSubmitErr)),\n    createGroup: (data: GroupCreateFormDialogData, setSubmitErr: (err: string) => void) => dispatch<any>(createGroupRunner(data, setSubmitErr))\n});\n\ntype DialogProjectProps = WithDialogProps<{sourcePanel: GroupClass, ownerUuid: string}> & {\n    createProject: (data: ProjectCreateFormDialogData, setSubmitErr: (err: string) => void) => Promise<void>;\n    createGroup: (data: GroupCreateFormDialogData, setSubmitErr: (err: string) => void) => Promise<void>;\n    vocabulary: Vocabulary;\n    allowSlash: boolean;\n};\n\nexport const DialogProjectCreate = compose(\n    connect(mapState, mapDispatch),\n    withStyles(styles),\n    withDialog(PROJECT_CREATE_FORM_NAME)\n)(({ createProject, createGroup, data, closeDialog, open, vocabulary, allowSlash, classes }: DialogProjectProps & WithStyles<CssRules>) => {\n    const [projectName, setProjectName, projectNameErrs] = useStateWithValidation('',\n        [...REQUIRED_VALIDATION, ...(allowSlash ? PROJECT_NAME_VALIDATION_ALLOW_SLASH : PROJECT_NAME_VALIDATION)],\n        'Project Name');\n    const [description, setDescription, descriptionErrs] = useStateWithValidation('', MAXLENGTH_524288_VALIDATION, 'Description');\n    const [chips, setChips] = useState<PropertyChips>({} as PropertyChips);\n    const [users, setUsers] = useState<Participant[]>([]);\n    const [formErrors, setFormErrors] = useState<string[]>([]);\n    const [submitErr, setSubmitErr] = useState<string>('');\n    const [isSubmitting, setIsSubmitting] = useState<boolean>(false);\n\n    useEffect(() => {\n        setFormErrors([...projectNameErrs, ...descriptionErrs]);\n        if (submitErr) {\n            setFormErrors(prevErrors => [...prevErrors, submitErr]);\n        }\n    }, [projectNameErrs, descriptionErrs, submitErr]);\n\n    const sourcePanel = data?.sourcePanel || GroupClass.PROJECT;\n    const isGroup = sourcePanel === GroupClass.ROLE;\n    const title = isGroup ? 'New Group' : 'New Project';\n\n    const fields = () => (\n        <>\n            <DialogTitle>{title}</DialogTitle>\n            <DialogContent>\n                {(isGroup === false) && <ResourceParentField ownerUuid={data ? data.ownerUuid : ''} />}\n                <div data-cy=\"name-field\">\n                    <DialogTextField\n                        label={isGroup ? \"Group Name\" : \"Project Name\"}\n                        defaultValue={projectName}\n                        setValue={setProjectName}\n                        validators={allowSlash ? PROJECT_NAME_VALIDATION_ALLOW_SLASH : PROJECT_NAME_VALIDATION}\n                        submitErr={submitErr}\n                        setSubmitErr={setSubmitErr}\n                    />\n                </div>\n                {isGroup && (\n                    <div data-cy=\"users-field\">\n                        <ParticipantSelect\n                            onlyPeople\n                            label='Search for users to add to the group'\n                            items={users}\n                            onSelect={(user: Participant) => setUsers([...users, user])}\n                            onDelete={(index: number) => setUsers(users.filter((_, i) => i !== index))}\n                        />\n                    </div>\n                )}\n                <div className={classes.description}>\n                    <DialogRichTextField\n                        label=\"Description\"\n                        defaultValue={description}\n                        setValue={setDescription}\n                        validators={PROJECT_DESCRIPTION_VALIDATION}\n                    />\n                </div>\n                <div className={classes.propertiesForm}>\n                    <FormLabel>Properties</FormLabel>\n                    <FormGroup>\n                        <DialogResourcePropertiesForm\n                            setChips={setChips}\n                            onSubmit={(ev) => ev.preventDefault()}\n                        />\n                    </FormGroup>\n                </div>\n            </DialogContent>\n        </>\n    );\n\n    const submitFunc = () => {\n        if (isGroup) {\n            const groupData: GroupCreateFormDialogData = {\n                name: projectName,\n                description: description,\n                properties: getVocabularyFromChips(chips, vocabulary),\n                users,\n            };\n            createGroup(groupData, setSubmitErr)\n                .finally(() => {\n                    setIsSubmitting(false);\n                });\n        } else {\n            const projectData: ProjectCreateFormDialogData = {\n                ownerUuid: data.ownerUuid,\n                name: projectName,\n                description: description,\n                properties: getVocabularyFromChips(chips, vocabulary),\n            };\n            createProject(projectData, setSubmitErr)\n                .finally(() => {\n                    setIsSubmitting(false);\n                });\n        }\n    }\n\n    return <DialogForm\n        fields={fields()}\n        submitLabel='Create'\n        formErrors={formErrors}\n        isSubmitting={isSubmitting}\n        onSubmit={(ev) => {\n            ev.preventDefault();\n            setIsSubmitting(true);\n            submitFunc();\n        }}\n        closeDialog={closeDialog}\n        clearFormValues={() => {\n            setProjectName('');\n            setDescription('');\n            setChips({} as PropertyChips);\n            setUsers([]);\n        }}\n        open={open}\n    />;\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-create/dialog-repository-create.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { InjectedFormProps } from 'redux-form';\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { FormDialog } from 'components/form-dialog/form-dialog';\nimport { RepositoryNameField } from 'views-components/form-fields/repository-form-fields';\n\ntype DialogRepositoryProps = WithDialogProps<{}> & InjectedFormProps<any>;\n\nexport const DialogRepositoryCreate = (props: DialogRepositoryProps) =>\n    <FormDialog\n        dialogTitle='Add new repository'\n        formFields={RepositoryNameField}\n        submitLabel='CREATE REPOSITORY'\n        {...props}\n    />;\n\n\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-create/dialog-ssh-key-create.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { InjectedFormProps } from 'redux-form';\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { FormDialog } from 'components/form-dialog/form-dialog';\nimport { SshKeyPublicField, SshKeyNameField } from 'views-components/form-fields/ssh-key-form-fields';\nimport { SshKeyCreateFormDialogData } from 'store/auth/auth-action-ssh';\n\ntype DialogSshKeyProps = WithDialogProps<{}> & InjectedFormProps<SshKeyCreateFormDialogData>;\n\nexport const DialogSshKeyCreate = (props: DialogSshKeyProps) =>\n    <FormDialog\n        dialogTitle='Add new SSH key'\n        formFields={SshKeyAddFields}\n        submitLabel='Add new ssh key'\n        {...props}\n    />;\n\nconst SshKeyAddFields = () => <span>\n    <SshKeyPublicField />\n    <SshKeyNameField />\n</span>;\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-create/dialog-user-create.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { InjectedFormProps } from 'redux-form';\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { FormDialog } from 'components/form-dialog/form-dialog';\nimport { UserEmailField, UserVirtualMachineField, UserGroupsVirtualMachineField } from 'views-components/form-fields/user-form-fields';\nimport { UserCreateFormDialogData } from 'store/users/users-actions';\nimport { UserResource } from 'models/user';\nimport { VirtualMachinesResource } from 'models/virtual-machines';\n\nexport type DialogUserProps = WithDialogProps<{}> & InjectedFormProps<UserCreateFormDialogData>;\n\ninterface DataProps {\n    user: UserResource;\n    items: VirtualMachinesResource[];\n}\n\nexport const UserRepositoryCreate = (props: DialogUserProps) =>\n    <FormDialog\n        dialogTitle='New user'\n        formFields={UserAddFields}\n        submitLabel='ADD NEW USER'\n        {...props}\n    />;\n\nconst UserAddFields = (props: DialogUserProps) => <span>\n    <UserEmailField />\n    <UserVirtualMachineField data={props.data as DataProps}/>\n    <UserGroupsVirtualMachineField />\n</span>;\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-forms/create-repository-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { compose } from \"redux\";\nimport { reduxForm } from 'redux-form';\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport { createRepository, REPOSITORY_CREATE_FORM_NAME } from \"store/repositories/repositories-actions\";\nimport { DialogRepositoryCreate } from \"views-components/dialog-create/dialog-repository-create\";\n\nexport const CreateRepositoryDialog = compose(\n    withDialog(REPOSITORY_CREATE_FORM_NAME),\n    reduxForm<any>({\n        form: REPOSITORY_CREATE_FORM_NAME,\n        onSubmit: (repositoryName, dispatch) => {\n            dispatch(createRepository(repositoryName));\n        }\n    })\n)(DialogRepositoryCreate);"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-forms/create-ssh-key-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { compose } from \"redux\";\nimport { reduxForm } from 'redux-form';\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport {\n    SSH_KEY_CREATE_FORM_NAME,\n    createSshKey,\n    SshKeyCreateFormDialogData\n} from 'store/auth/auth-action-ssh';\nimport { DialogSshKeyCreate } from 'views-components/dialog-create/dialog-ssh-key-create';\n\nexport const CreateSshKeyDialog = compose(\n    withDialog(SSH_KEY_CREATE_FORM_NAME),\n    reduxForm<SshKeyCreateFormDialogData>({\n        form: SSH_KEY_CREATE_FORM_NAME,\n        onSubmit: (data, dispatch) => {\n            dispatch(createSshKey(data));\n        }\n    })\n)(DialogSshKeyCreate);\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-forms/create-user-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { compose } from \"redux\";\nimport { reduxForm } from 'redux-form';\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport { USER_CREATE_FORM_NAME, createUser, UserCreateFormDialogData } from \"store/users/users-actions\";\nimport { UserRepositoryCreate } from \"views-components/dialog-create/dialog-user-create\";\n\nexport const CreateUserDialog = compose(\n    withDialog(USER_CREATE_FORM_NAME),\n    reduxForm<UserCreateFormDialogData>({\n        form: USER_CREATE_FORM_NAME,\n        onSubmit: (data, dispatch) => {\n            dispatch(createUser(data));\n        }\n    })\n)(UserRepositoryCreate);"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-forms/update-external-credential-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { compose, Dispatch } from \"redux\";\nimport { reduxForm } from 'redux-form';\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport { DialogExternalCredentialUpdate } from \"views-components/dialog-update/dialog-external-credential-update\";\nimport { UpdateExternalCredentialFormDialogData } from \"store/external-credentials/external-credential-dialog-data\";\nimport { UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME } from \"store/external-credentials/external-credentials-actions\";\nimport { updateExternalCredential } from \"store/external-credentials/external-credentials-actions\";\n\nexport const UpdateExternalCredentialDialog = compose(\n    withDialog(UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME),\n    reduxForm<UpdateExternalCredentialFormDialogData>({\n        form: UPDATE_EXTERNAL_CREDENTIAL_FORM_NAME,\n        onSubmit: (data: UpdateExternalCredentialFormDialogData, dispatch: Dispatch) => {\n            for (const key in data) {\n                if (typeof data[key] === 'string') {\n                    data[key] = data[key].trim();\n                }\n            }\n            dispatch<any>(updateExternalCredential(data));\n        }\n    })\n)(DialogExternalCredentialUpdate);\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-forms/update-process-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { compose } from \"redux\";\nimport { reduxForm } from 'redux-form';\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport { DialogProcessUpdate } from 'views-components/dialog-update/dialog-process-update';\nimport { PROCESS_UPDATE_FORM_NAME, ProcessUpdateFormDialogData } from 'store/processes/process-update-actions';\nimport { updateProcessRunner } from \"store/workbench/workbench-actions\";\n\nexport const UpdateProcessDialog = compose(\n    withDialog(PROCESS_UPDATE_FORM_NAME),\n    reduxForm<ProcessUpdateFormDialogData>({\n        form: PROCESS_UPDATE_FORM_NAME,\n        onSubmit: (data, dispatch) => {\n            dispatch(updateProcessRunner(data));\n        }\n    })\n)(DialogProcessUpdate);"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-move/dialog-collection-partial-move-to-existing-collection.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react'\nimport { compose, Dispatch } from 'redux'\nimport { DialogForm } from 'components/dialog-form/dialog-form'\nimport { connect } from 'react-redux'\nimport { withDialog } from 'store/dialog/with-dialog'\nimport {\n\tCollectionPartialMoveToExistingCollectionFormData,\n\tmoveCollectionPartialToExistingCollection,\n\tCOLLECTION_PARTIAL_MOVE_TO_SELECTED_COLLECTION,\n} from 'store/collections/collection-partial-move-actions'\nimport { DirectoryTreePickerDialogField } from 'views-components/projects-tree-picker/tree-picker-field'\nimport { CollectionFileSelection } from 'store/collection-panel/collection-panel-files/collection-panel-files-state'\nimport { WithDialogProps } from 'store/dialog/with-dialog'\nimport { PickerIdProp } from 'store/tree-picker/picker-id'\nimport { DialogTitle, DialogContent } from '@mui/material'\nimport { FILE_OPS_LOCATION_VALIDATION } from 'validators/validators'\nimport { useStateWithValidation } from 'common/useStateWithValidation'\n\ntype DialogCollectionPartialMoveProps = WithDialogProps<{\n\tinitialFormData: CollectionPartialMoveToExistingCollectionFormData\n\tcollectionFileSelection: CollectionFileSelection\n}> &\n\tPickerIdProp & {\n\t\tmoveCollectionPartialToExistingCollection: (\n\t\t\tfileSelection: CollectionFileSelection,\n\t\t\tformData: CollectionPartialMoveToExistingCollectionFormData\n\t\t) => void\n\t}\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n\tmoveCollectionPartialToExistingCollection: (\n\t\tfileSelection: CollectionFileSelection,\n\t\tformData: CollectionPartialMoveToExistingCollectionFormData\n\t) => {\n\t\tdispatch<any>(moveCollectionPartialToExistingCollection(fileSelection, formData))\n\t},\n})\n\nexport const DialogCollectionPartialMoveToExistingCollection = compose(\n\twithDialog(COLLECTION_PARTIAL_MOVE_TO_SELECTED_COLLECTION),\n\tconnect(null, mapDispatch)\n)((props: DialogCollectionPartialMoveProps) => {\n\tconst { open, data, pickerId } = props\n\tconst { initialFormData, collectionFileSelection } = data\n\tconst [destination, setDestination, destinationErrs] = useStateWithValidation(initialFormData?.destination || {}, FILE_OPS_LOCATION_VALIDATION, 'Destination')\n\n\tconst fields = () => (\n\t\t<>\n\t\t\t<DialogTitle>Move to existing collection</DialogTitle>\n\t\t\t<DialogContent>\n\t\t\t\t<DirectoryTreePickerDialogField\n\t\t\t\t\tpickerId={pickerId}\n\t\t\t\t\tcurrentUuids={initialFormData?.destination.uuid ? [initialFormData.destination.uuid] : []}\n\t\t\t\t\thandleDirectoryChange={setDestination}\n\t\t\t\t/>\n\t\t\t</DialogContent>\n\t\t</>\n\t)\n\n\treturn (\n\t\t<DialogForm\n\t\t\topen={open}\n\t\t\tfields={fields()}\n            submitLabel='Move files'\n\t\t\tformErrors={destinationErrs}\n\t\t\tonSubmit={(event: React.FormEvent<HTMLFormElement>) => {\n\t\t\t\tevent.preventDefault()\n\t\t\t\tprops.moveCollectionPartialToExistingCollection(collectionFileSelection, {\n\t\t\t\t\tdestination: destination,\n\t\t\t\t})\n\t\t\t}}\n\t\t\tcloseDialog={props.closeDialog}\n\t\t\tclearFormValues={() => {\n\t\t\t\tsetDestination({} as any)\n\t\t\t}}\n\t\t/>\n\t)\n})\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-move/dialog-collection-partial-move-to-new-collection.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react'\nimport { compose, Dispatch } from 'redux'\nimport { DialogForm } from 'components/dialog-form/dialog-form'\nimport { connect } from 'react-redux'\nimport { withDialog } from 'store/dialog/with-dialog'\nimport { DialogCollectionNameField } from 'views-components/form-fields/collection-form-fields'\nimport {\n\tCollectionPartialMoveToNewCollectionFormData,\n\tmoveCollectionPartialToNewCollection,\n\tCOLLECTION_PARTIAL_MOVE_TO_NEW_COLLECTION,\n} from 'store/collections/collection-partial-move-actions'\nimport { ProjectTreePickerDialogField } from 'views-components/projects-tree-picker/tree-picker-field'\nimport { CollectionFileSelection } from 'store/collection-panel/collection-panel-files/collection-panel-files-state'\nimport { WithDialogProps } from 'store/dialog/with-dialog'\nimport { PickerIdProp } from 'store/tree-picker/picker-id'\nimport { DialogTitle, DialogContent } from '@mui/material'\nimport { DialogRichTextField } from 'components/dialog-form/dialog-text-field'\nimport { REQUIRED_VALIDATION, REQUIRED_LENGTH255_VALIDATION, MAXLENGTH_524288_VALIDATION } from 'validators/validators'\nimport { useStateWithValidation } from 'common/useStateWithValidation'\n\ntype DialogCollectionPartialMoveProps = WithDialogProps<{\n\tinitialFormData: CollectionPartialMoveToNewCollectionFormData\n\tcollectionFileSelection: CollectionFileSelection\n}> &\n\tPickerIdProp & {\n\t\tmoveCollectionPartialToNewCollection: (\n\t\t\tfileSelection: CollectionFileSelection,\n\t\t\tformData: CollectionPartialMoveToNewCollectionFormData\n\t\t) => void\n\t}\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n\tmoveCollectionPartialToNewCollection: (\n\t\tfileSelection: CollectionFileSelection,\n\t\tformData: CollectionPartialMoveToNewCollectionFormData\n\t) => {\n\t\tdispatch<any>(moveCollectionPartialToNewCollection(fileSelection, formData))\n\t},\n})\n\nexport const DialogCollectionPartialMoveToNewCollection = compose(\n\twithDialog(COLLECTION_PARTIAL_MOVE_TO_NEW_COLLECTION),\n\tconnect(null, mapDispatch)\n)((props: DialogCollectionPartialMoveProps) => {\n\tconst { open, data, pickerId } = props\n\tconst { initialFormData, collectionFileSelection } = data\n\tconst { name, description } = initialFormData || {}\n\n\tconst [thisName, setThisName, nameErrs] = useStateWithValidation(name, REQUIRED_LENGTH255_VALIDATION, 'Name')\n    const [thisDescription, setThisDescription, descriptionErrs] = useStateWithValidation(description, MAXLENGTH_524288_VALIDATION, 'Description')\n    const [thisOwnerUuid, setThisOwnerUuid, ownerUuidErrs] = useStateWithValidation('', REQUIRED_VALIDATION, 'Project')\n\n    const [formErrors, setFormErrors] = React.useState<string[]>([])\n\n    React.useEffect(() => {\n        setFormErrors([...nameErrs, ...descriptionErrs, ...ownerUuidErrs])\n    }, [nameErrs, descriptionErrs, ownerUuidErrs])\n\n\tconst fields = () => (\n\t\t<>\n\t\t\t<DialogTitle>Move to new collection</DialogTitle>\n\t\t\t<DialogContent>\n\t\t\t\t<DialogCollectionNameField defaultValue={name} setValue={setThisName} />\n\t\t\t\t<DialogRichTextField\n\t\t\t\t\tlabel=\"Description\"\n\t\t\t\t\tdefaultValue={description}\n\t\t\t\t\tsetValue={setThisDescription}\n\t\t\t\t\tvalidators={MAXLENGTH_524288_VALIDATION}\n\t\t\t\t/>\n\t\t\t\t<ProjectTreePickerDialogField\n\t\t\t\t\tpickerId={pickerId}\n\t\t\t\t\tsetSelectedProject={setThisOwnerUuid}\n\t\t\t\t/>\n\t\t\t</DialogContent>\n\t\t</>\n\t)\n\n\treturn (\n\t\t<DialogForm\n\t\t\topen={open}\n\t\t\tfields={fields()}\n            submitLabel='Create Collection'\n\t\t\tformErrors={formErrors}\n\t\t\tonSubmit={(event: React.FormEvent<HTMLFormElement>) => {\n\t\t\t\tevent.preventDefault()\n\t\t\t\tprops.moveCollectionPartialToNewCollection(collectionFileSelection, {\n\t\t\t\t\tname: thisName,\n\t\t\t\t\tdescription: thisDescription,\n\t\t\t\t\tprojectUuid: thisOwnerUuid,\n\t\t\t\t})\n\t\t\t}}\n\t\t\tcloseDialog={props.closeDialog}\n\t\t\tclearFormValues={() => {\n\t\t\t\tsetThisName('')\n\t\t\t\tsetThisDescription('')\n\t\t\t\tsetThisOwnerUuid('')\n\t\t\t}}\n\t\t/>\n\t)\n})\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-move/dialog-collection-partial-move-to-separate-collections.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react'\nimport { compose, Dispatch } from 'redux'\nimport { DialogForm } from 'components/dialog-form/dialog-form'\nimport { connect } from 'react-redux'\nimport { withDialog } from 'store/dialog/with-dialog'\nimport {\n\tCollectionPartialMoveToSeparateCollectionsFormData,\n\tmoveCollectionPartialToSeparateCollections,\n\tCOLLECTION_PARTIAL_MOVE_TO_SEPARATE_COLLECTIONS,\n} from 'store/collections/collection-partial-move-actions'\nimport { ProjectTreePickerDialogField } from 'views-components/projects-tree-picker/tree-picker-field'\nimport { CollectionFileSelection } from 'store/collection-panel/collection-panel-files/collection-panel-files-state'\nimport { WithDialogProps } from 'store/dialog/with-dialog'\nimport { PickerIdProp } from 'store/tree-picker/picker-id'\nimport { DialogTitle, DialogContent } from '@mui/material'\nimport { COLLECTION_PROJECT_VALIDATION } from 'validators/validators'\nimport { useStateWithValidation } from 'common/useStateWithValidation'\n\ntype DialogCollectionPartialMoveProps = WithDialogProps<{\n\tinitialData: CollectionPartialMoveToSeparateCollectionsFormData\n\tcollectionFileSelection: CollectionFileSelection\n}> &\n\tPickerIdProp & {\n\t\tmoveCollectionPartialToSeparateCollections: (\n\t\t\tfileSelection: CollectionFileSelection,\n\t\t\tformData: CollectionPartialMoveToSeparateCollectionsFormData\n\t\t) => void\n\t}\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n\tmoveCollectionPartialToSeparateCollections: (\n\t\tfileSelection: CollectionFileSelection,\n\t\tformData: CollectionPartialMoveToSeparateCollectionsFormData\n\t) => {\n\t\tdispatch<any>(moveCollectionPartialToSeparateCollections(fileSelection, formData))\n\t},\n})\n\nexport const DialogCollectionPartialMoveToSeparateCollections = compose(\n\twithDialog(COLLECTION_PARTIAL_MOVE_TO_SEPARATE_COLLECTIONS),\n\tconnect(null, mapDispatch)\n)((props: DialogCollectionPartialMoveProps) => {\n\tconst { open, data, pickerId } = props\n\tconst { initialData, collectionFileSelection } = data\n\n\tconst [projectUuid, setProjectUuid, projectUuidErrs] = useStateWithValidation(initialData?.projectUuid || '', COLLECTION_PROJECT_VALIDATION, 'Project')\n\n\tconst fields = () => (\n\t\t<>\n\t\t\t<DialogTitle>Move to separate collections</DialogTitle>\n\t\t\t<DialogContent>\n\t\t\t\t<ProjectTreePickerDialogField\n\t\t\t\t\tpickerId={pickerId}\n\t\t\t\t\tsetSelectedProject={setProjectUuid}\n\t\t\t\t/>\n\t\t\t</DialogContent>\n\t\t</>\n\t)\n\n\treturn (\n\t\t<DialogForm\n\t\t\topen={open}\n\t\t\tfields={fields()}\n\t\t\tsubmitLabel='Create collections'\n\t\t\tformErrors={projectUuidErrs}\n\t\t\tonSubmit={(event: React.FormEvent<HTMLFormElement>) => {\n\t\t\t\tevent.preventDefault()\n\t\t\t\tprops.moveCollectionPartialToSeparateCollections(collectionFileSelection, {\n\t\t\t\t\tname: initialData?.name || '',\n\t\t\t\t\tprojectUuid: projectUuid,\n\t\t\t\t})\n\t\t\t}}\n\t\t\tcloseDialog={props.closeDialog}\n\t\t\tclearFormValues={() => {\n\t\t\t\tsetProjectUuid('')\n\t\t\t}}\n\t\t/>\n\t)\n})\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-move/dialog-move-collection.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react'\nimport { compose, Dispatch } from 'redux'\nimport { connect } from 'react-redux'\nimport { withDialog } from 'store/dialog/with-dialog'\nimport { WithDialogProps } from 'store/dialog/with-dialog'\nimport { DialogForm } from 'components/dialog-form/dialog-form'\nimport { ProjectTreePickerDialogField } from 'views-components/projects-tree-picker/tree-picker-field'\nimport { MOVE_TO_VALIDATION } from 'validators/validators'\nimport { MoveToFormDialogData } from 'store/move-to-dialog/move-to-dialog'\nimport { PickerIdProp } from 'store/tree-picker/picker-id'\nimport { DialogTitle, DialogContent } from '@mui/material'\nimport { useStateWithValidation } from 'common/useStateWithValidation'\nimport { COLLECTION_MOVE_FORM_NAME } from 'store/collections/collection-move-actions'\nimport { moveCollectionRunner } from 'store/workbench/workbench-actions'\n\ntype DialogMoveCollectionProps = WithDialogProps<MoveToFormDialogData> & PickerIdProp & {\n\tmoveCollections: (data: MoveToFormDialogData) => void\n}\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n\tmoveCollections: (data: MoveToFormDialogData) => {\n\t\tdispatch<any>(moveCollectionRunner(data))\n\t},\n})\n\nexport const DialogMoveCollection = compose(\n\twithDialog(COLLECTION_MOVE_FORM_NAME),\n\tconnect(null, mapDispatch)\n)((props: DialogMoveCollectionProps) => {\n\tconst { open, data, pickerId } = props\n\tconst initialData = data || { ownerUuid: '' }\n\n\tconst [ownerUuid, setOwnerUuid, ownerUuidErrs] = useStateWithValidation(initialData.ownerUuid || '', MOVE_TO_VALIDATION, 'Owner')\n\n\tconst fields = () => (\n\t\t<>\n\t\t\t<DialogTitle>Move to</DialogTitle>\n\t\t\t<DialogContent>\n\t\t\t\t<ProjectTreePickerDialogField\n\t\t\t\t\tpickerId={pickerId}\n\t\t\t\t\tcurrentUuids={data?.uuid ? [data.uuid] : []}\n\t\t\t\t\tsetSelectedProject={setOwnerUuid}\n\t\t\t\t/>\n\t\t\t</DialogContent>\n\t\t</>\n\t)\n\n\treturn (\n\t\t<DialogForm\n\t\t\topen={open}\n\t\t\tfields={fields()}\n\t\t\tsubmitLabel='Move'\n\t\t\tformErrors={ownerUuidErrs}\n\t\t\tonSubmit={(event: React.FormEvent<HTMLFormElement>) => {\n\t\t\t\tevent.preventDefault()\n\t\t\t\tprops.moveCollections({\n\t\t\t\t\townerUuid: ownerUuid,\n\t\t\t\t\tuuid: initialData.uuid || '',\n\t\t\t\t\tname: initialData.name || '',\n\t\t\t\t})\n\t\t\t}}\n\t\t\tcloseDialog={props.closeDialog}\n\t\t\tclearFormValues={() => {\n\t\t\t\tsetOwnerUuid('')\n\t\t\t}}\n\t\t/>\n\t)\n})\n\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-move/dialog-move-project.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react'\nimport { compose, Dispatch } from 'redux'\nimport { connect } from 'react-redux'\nimport { withDialog } from 'store/dialog/with-dialog'\nimport { WithDialogProps } from 'store/dialog/with-dialog'\nimport { DialogForm } from 'components/dialog-form/dialog-form'\nimport { ProjectTreePickerDialogField } from 'views-components/projects-tree-picker/tree-picker-field'\nimport { MOVE_TO_VALIDATION } from 'validators/validators'\nimport { MoveToFormDialogData } from 'store/move-to-dialog/move-to-dialog'\nimport { PickerIdProp } from 'store/tree-picker/picker-id'\nimport { DialogTitle, DialogContent } from '@mui/material'\nimport { useStateWithValidation } from 'common/useStateWithValidation'\nimport { PROJECT_MOVE_FORM_NAME } from 'store/projects/project-move-actions'\nimport { moveProjectRunner } from 'store/workbench/workbench-actions'\n\ntype DialogMoveProjectProps = WithDialogProps<MoveToFormDialogData> & PickerIdProp & {\n    moveProjects: (data: MoveToFormDialogData,) => void\n}\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n    moveProjects: (data: MoveToFormDialogData) => {\n        dispatch<any>(moveProjectRunner(data))\n    },\n})\n\nexport const DialogMoveProject = compose(\n    withDialog(PROJECT_MOVE_FORM_NAME),\n    connect(null, mapDispatch)\n)((props: DialogMoveProjectProps) => {\n    const { open, data, pickerId } = props\n    const initialData = data || { ownerUuid: '' }\n    const [ownerUuid, setOwnerUuid, ownerUuidErrs] = useStateWithValidation('', MOVE_TO_VALIDATION, 'Owner')\n    const [isSubmitting, setIsSubmitting] = React.useState<boolean>(false);\n\n    React.useEffect(() => {\n            if (!open) {\n                setIsSubmitting(false);\n            }\n        }, [open]);\n\n    const fields = () => (\n        <>\n            <DialogTitle>Move to</DialogTitle>\n            <DialogContent>\n                <ProjectTreePickerDialogField\n                    pickerId={pickerId}\n                    setSelectedProject={setOwnerUuid}\n                />\n            </DialogContent>\n        </>\n    )\n\n    return (\n        <DialogForm\n            open={open}\n            fields={fields()}\n            isSubmitting={isSubmitting}\n            submitLabel='Move'\n            formErrors={ownerUuidErrs}\n            onSubmit={(event: React.FormEvent<HTMLFormElement>) => {\n                event.preventDefault()\n                setIsSubmitting(true);\n                props.moveProjects({\n                    ownerUuid: ownerUuid,\n                    uuid: initialData.uuid || '',\n                    name: initialData.name || '',\n                })\n            }}\n            closeDialog={props.closeDialog}\n            clearFormValues={() => {\n                setOwnerUuid('')\n            }}\n        />\n    )\n})\n\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-remove/external-credential-remove-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { removeExternalCredentialPermanently, REMOVE_EXTERNAL_CREDENTIAL_DIALOG } from 'store/external-credentials/external-credentials-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeExternalCredentialPermanently(props.data.uuid));\n    }\n});\n\nexport const RemoveExternalCredentialDialog = compose(\n    withDialog(REMOVE_EXTERNAL_CREDENTIAL_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-update/dialog-collection-update.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect, useState } from 'react';\nimport { compose, Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { CollectionUpdateFormDialogData, updateCollection } from 'store/collections/collection-update-actions';\nimport { DialogForm } from 'components/dialog-form/dialog-form';\nimport { DialogCollectionNameField } from 'views-components/form-fields/collection-form-fields';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { FormGroup, FormLabel, DialogTitle, DialogContent } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { useStateWithValidation } from 'common/useStateWithValidation';\nimport { COLLECTION_NAME_VALIDATION, COLLECTION_DESCRIPTION_VALIDATION } from 'validators/validators';\nimport { DialogRichTextField } from 'components/dialog-form/dialog-text-field';\nimport { DialogResourcePropertiesForm } from 'views-components/resource-properties-form/resource-properties-form';\nimport { PropertyChips, getVocabularyFromChips, getChipsFromVocabulary } from 'components/chips/chips';\nimport { RootState } from 'store/store';\nimport { DialogMultiCheckboxField } from 'components/checkbox-field/checkbox-field';\nimport { Vocabulary } from 'models/vocabulary';\nimport { getStorageClasses } from 'common/config';\nimport { COLLECTION_UPDATE_FORM_NAME } from 'store/collections/collection-update-actions';\nimport { withDialog, WithDialogProps } from 'store/dialog/with-dialog';\nimport { isEqual } from 'lodash';\n\ntype CssRules = 'propertiesForm';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    propertiesForm: {\n        marginTop: theme.spacing(2),\n        marginBottom: theme.spacing(2),\n    },\n});\n\nconst mapState = (state: RootState) => ({\n    vocabulary: state.properties.vocabulary,\n    storageClasses: getStorageClasses(state.auth.config)\n});\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n    updateCollection: (data: CollectionUpdateFormDialogData, setSubmitErr: (errMsg: string) => void) =>\n        dispatch<any>(updateCollection(data, setSubmitErr))\n});\n\ntype DialogCollectionProps = WithDialogProps<CollectionUpdateFormDialogData> & {\n    updateCollection: (data: CollectionUpdateFormDialogData, setSubmitErr: (errMsg: string) => void) => Promise<void>;\n    vocabulary: Vocabulary;\n    storageClasses: string[];\n};\n\nexport const DialogCollectionUpdate = compose(\n    connect(mapState, mapDispatch),\n    withStyles(styles),\n    withDialog(COLLECTION_UPDATE_FORM_NAME)\n)(({ data, closeDialog, open, vocabulary, storageClasses, classes, updateCollection }: DialogCollectionProps & WithStyles<CssRules>) => {\n        const initialData = data || { uuid: '', name: '', description: '', properties: {}, storageClassesDesired: [] };\n        const initialProperties = initialData.properties || {};\n        const initialStorageClassesDesired = initialData.storageClassesDesired || storageClasses || ['default'];\n        const [collectionName, setCollectionName, collectionNameErrs] = useStateWithValidation(initialData.name || '', COLLECTION_NAME_VALIDATION, 'Collection Name');\n        const [description, setDescription, descriptionErrs] = useStateWithValidation(initialData.description || '', COLLECTION_DESCRIPTION_VALIDATION, 'Description');\n        const [chips, setChips] = useState<PropertyChips>(getChipsFromVocabulary(initialProperties, vocabulary));\n        const [storageClassesDesired, setStorageClassesDesired] = useState<string[]>(initialStorageClassesDesired);\n        const [formErrors, setFormErrors] = useState<string[]>([]);\n        const [submitErr, setSubmitErr] = useState<string>('');\n        const [isSubmitting, setIsSubmitting] = useState<boolean>(false);\n        const currentProperties = getVocabularyFromChips(chips, vocabulary);\n        const submitDisabled = !collectionNameErrs.length && !descriptionErrs.length &&\n            collectionName === (initialData.name || '') &&\n            description === (initialData.description || '') &&\n            isEqual(currentProperties, initialProperties) &&\n            isEqual(storageClassesDesired, initialStorageClassesDesired);\n\n        useEffect(() => {\n            if (data) {\n                setCollectionName(data.name || '');\n                setDescription(data.description || '');\n                setChips(getChipsFromVocabulary(data.properties || {}, vocabulary));\n                setStorageClassesDesired(data.storageClassesDesired || storageClasses || ['default']);\n            }\n        }, [data]);\n\n        useEffect(() => {\n            setFormErrors([...collectionNameErrs, ...descriptionErrs]);\n            if (submitErr) {\n                setFormErrors(prevErrors => [...prevErrors, submitErr]);\n            }\n        }, [collectionNameErrs, descriptionErrs, submitErr]);\n\n        const handleSubmit = (ev) => {\n            ev.preventDefault();\n            setIsSubmitting(true);\n            updateCollection({\n                    uuid: initialData.uuid,\n                    name: collectionName,\n                    description: description,\n                    storageClassesDesired: storageClassesDesired,\n                    properties: currentProperties,\n                }, setSubmitErr\n            ).finally(() => {\n                setIsSubmitting(false);\n            });\n        };\n\n        const fields = () => (\n            <>\n                <DialogTitle>Edit Collection</DialogTitle>\n                <DialogContent>\n                    <DialogCollectionNameField\n                        setValue={setCollectionName}\n                        defaultValue={initialData.name}\n                        submitErr={submitErr}\n                        setSubmitErr={setSubmitErr}\n                    />\n                    <DialogRichTextField\n                        label=\"Description\"\n                        defaultValue={description}\n                        setValue={setDescription}\n                        validators={COLLECTION_DESCRIPTION_VALIDATION}\n                    />\n                    <div className={classes.propertiesForm}>\n                        <FormLabel>Properties</FormLabel>\n                        <FormGroup>\n                            <DialogResourcePropertiesForm\n                                initialProperties={getChipsFromVocabulary(initialData.properties || {}, vocabulary)}\n                                setChips={setChips}\n                                onSubmit={(ev) => ev.preventDefault()}\n                            />\n                        </FormGroup>\n                    </div>\n                    <DialogMultiCheckboxField\n                        name=\"storageClassesDesired\"\n                        defaultValues={storageClassesDesired}\n                        label=\"Storage classes\"\n                        onChange={setStorageClassesDesired}\n                        minSelection={1}\n                        helperText='At least one class should be selected'\n                    />\n                </DialogContent>\n            </>\n        );\n\n        return (\n            <DialogForm\n                fields={fields()}\n                submitLabel='Save'\n                formErrors={formErrors}\n                submitDisabled={submitDisabled}\n                isSubmitting={isSubmitting}\n                onSubmit={handleSubmit}\n                closeDialog={closeDialog}\n                clearFormValues={() => {\n                    setCollectionName('');\n                    setDescription('');\n                    setChips({} as PropertyChips);\n                    setStorageClassesDesired([]);\n                }}\n                open={open}\n            />\n        );\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-update/dialog-external-credential-update.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { InjectedFormProps } from 'redux-form';\nimport { Grid } from \"@mui/material\";\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { FormDialog } from 'components/form-dialog/form-dialog';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { UpdateExternalCredentialFormDialogData } from 'store/external-credentials/external-credential-dialog-data';\nimport { ExternalCredentialNameField,\n    ExternalCredentialDescriptionField,\n    ExternalCredentialClassUpdateField,\n    ExternalCredentialExternalIdField,\n    ExternalCredentialSecretUpdateField,\n    ExternalCredentialExpiresAtField,\n    ExternalCredentialScopesField } from 'views-components/form-fields/external-credential-form-fields';\n\ntype CssRules = 'description';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    description: {\n        marginTop: theme.spacing(2),\n        marginBottom: theme.spacing(2),\n    },\n});\n\ntype DialogExternalCredentialProps = WithDialogProps<{}> & InjectedFormProps<UpdateExternalCredentialFormDialogData>;\n\nexport const DialogExternalCredentialUpdate = (props: DialogExternalCredentialProps) => {\n    let title = 'Edit External Credential';\n\n    return <FormDialog\n        dialogTitle={title}\n        formFields={ExternalCredentialEditFields as any}\n        submitLabel='Save'\n        {...props}\n    />;\n};\n\nconst ExternalCredentialEditFields = withStyles(styles)(\n    ({ classes }: WithStyles<CssRules>) => <span>\n        <ExternalCredentialNameField />\n        <div className={classes.description}>\n            <ExternalCredentialDescriptionField />\n        </div>\n        <Grid container direction={'row'} xs={12} spacing={2}>\n            <Grid item xs={6}><ExternalCredentialClassUpdateField /></Grid>\n            <Grid item xs={6}><ExternalCredentialExternalIdField /></Grid>\n            <Grid item xs={6}><ExternalCredentialSecretUpdateField /></Grid>\n            <Grid item xs={6}><ExternalCredentialExpiresAtField /></Grid>\n            <Grid item xs={12}><ExternalCredentialScopesField /></Grid>\n        </Grid>\n    </span>);\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-update/dialog-process-update.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { InjectedFormProps } from 'redux-form';\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { ProcessUpdateFormDialogData } from 'store/processes/process-update-actions';\nimport { FormDialog } from 'components/form-dialog/form-dialog';\nimport { ProcessNameField, ProcessDescriptionField } from 'views-components/form-fields/process-form-fields';\n\ntype DialogProcessProps = WithDialogProps<{}> & InjectedFormProps<ProcessUpdateFormDialogData>;\n\nexport const DialogProcessUpdate = (props: DialogProcessProps) =>\n    <FormDialog\n        dialogTitle='Edit Process'\n        formFields={ProcessEditFields}\n        submitLabel='Save'\n        {...props}\n    />;\n\nconst ProcessEditFields = () => <span>\n    <ProcessNameField />\n    <ProcessDescriptionField />\n</span>;\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-update/dialog-project-update.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { useEffect, useState } from 'react';\nimport { compose, Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { ProjectUpdateFormDialogData, PROJECT_UPDATE_FORM_NAME } from 'store/projects/project-update-actions';\nimport { updateProjectRunner } from 'store/workbench/workbench-actions'\nimport { updateGroup } from 'store/groups-panel/groups-panel-actions';\nimport { DialogForm } from 'components/dialog-form/dialog-form';\nimport { DialogTextField, DialogRichTextField } from 'components/dialog-form/dialog-text-field';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { FormGroup, FormLabel, DialogTitle, DialogContent } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { useStateWithValidation } from 'common/useStateWithValidation';\nimport { PROJECT_NAME_VALIDATION, PROJECT_NAME_VALIDATION_ALLOW_SLASH, PROJECT_DESCRIPTION_VALIDATION } from 'validators/validators';\nimport { DialogResourcePropertiesForm } from 'views-components/resource-properties-form/resource-properties-form';\nimport { PropertyChips, getVocabularyFromChips, getChipsFromVocabulary } from 'components/chips/chips';\nimport { RootState } from 'store/store';\nimport { Vocabulary } from 'models/vocabulary';\nimport { withDialog, WithDialogProps } from 'store/dialog/with-dialog';\nimport { GroupClass } from 'models/group';\nimport { isEqual } from 'lodash';\n\ntype CssRules = 'propertiesForm';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    propertiesForm: {\n        marginTop: theme.spacing(2),\n        marginBottom: theme.spacing(2),\n    },\n});\n\nconst mapState = (state: RootState) => ({\n    vocabulary: state.properties.vocabulary\n});\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n    updateProject: (data: ProjectUpdateFormDialogData, setSubmitErr: (errMsg: string) => void) =>\n        dispatch<any>(updateProjectRunner(data, setSubmitErr)),\n    updateGroup: (data: ProjectUpdateFormDialogData, setSubmitErr: (errMsg: string) => void) =>\n        dispatch<any>(updateGroup(data, setSubmitErr))\n});\n\ntype DialogProjectProps = WithDialogProps<{sourcePanel: GroupClass} & ProjectUpdateFormDialogData> & {\n    updateProject: (data: ProjectUpdateFormDialogData, setSubmitErr: (errMsg: string) => void) => Promise<void>;\n    updateGroup: (data: ProjectUpdateFormDialogData, setSubmitErr: (errMsg: string) => void) => Promise<void>;\n    vocabulary: Vocabulary;\n    allowSlash: boolean;\n};\n\nexport const DialogProjectUpdate = compose(\n    connect(mapState, mapDispatch),\n    withStyles(styles),\n    withDialog(PROJECT_UPDATE_FORM_NAME)\n)(({ data, closeDialog, open, vocabulary, allowSlash, classes, updateProject, updateGroup }: DialogProjectProps & WithStyles<CssRules>) => {\n        const initialData = data || { uuid: '', name: '', description: '', properties: {} };\n    const initialProperties = initialData.properties || {};\n        const [projectName, setProjectName, projectNameErrs] = useStateWithValidation(initialData.name || '', PROJECT_NAME_VALIDATION, 'Project Name');\n        const [description, setDescription, descriptionErrs] = useStateWithValidation(initialData.description || '', PROJECT_DESCRIPTION_VALIDATION, 'Description');\n    const [chips, setChips] = useState<PropertyChips>(getChipsFromVocabulary(initialProperties, vocabulary));\n        const [formErrors, setFormErrors] = useState<string[]>([]);\n        const [submitErr, setSubmitErr] = useState<string>('');\n        const [isSubmitting, setIsSubmitting] = useState<boolean>(false);\n\n        const sourcePanel = data?.sourcePanel || GroupClass.PROJECT;\n            const isGroup = sourcePanel === GroupClass.ROLE;\n            const title = isGroup ? 'Edit Group' : 'Edit Project';\n        const currentProperties = getVocabularyFromChips(chips, vocabulary);\n        const submitDisabled = !projectNameErrs.length && !descriptionErrs.length &&\n            projectName === (initialData.name || '') &&\n            description === (initialData.description || '') &&\n            isEqual(currentProperties, initialProperties);\n\n        useEffect(() => {\n            if (data) {\n                setProjectName(data.name || '');\n                setDescription(data.description || '');\n                setChips(getChipsFromVocabulary(data.properties || {}, vocabulary));\n            }\n        }, [data, vocabulary]);\n\n        useEffect(() => {\n            setFormErrors([...projectNameErrs, ...descriptionErrs]);\n            if (submitErr) {\n                setFormErrors(prevErrors => [...prevErrors, submitErr]);\n            }\n        }, [projectNameErrs, descriptionErrs, submitErr]);\n\n        const handleSubmit = (ev) => {\n            ev.preventDefault();\n            setIsSubmitting(true);\n            const updateFn = sourcePanel === GroupClass.ROLE ? updateGroup : updateProject;\n            updateFn({\n                    uuid: initialData.uuid,\n                    name: projectName,\n                    description: description,\n                    properties: currentProperties,\n                }, setSubmitErr\n            ).finally(() => {\n                setIsSubmitting(false);\n            });\n        };\n\n        const fields = () => (\n            <>\n                <DialogTitle>{title}</DialogTitle>\n                <DialogContent>\n                    <DialogTextField\n                        label={isGroup ? \"Group Name\" : \"Project Name\"}\n                        defaultValue={projectName}\n                        setValue={setProjectName}\n                        validators={allowSlash ? PROJECT_NAME_VALIDATION_ALLOW_SLASH : PROJECT_NAME_VALIDATION}\n                        submitErr={submitErr}\n                        setSubmitErr={setSubmitErr}\n                    />\n                    <DialogRichTextField\n                        label=\"Description\"\n                        defaultValue={description}\n                        setValue={setDescription}\n                        validators={PROJECT_DESCRIPTION_VALIDATION}\n                    />\n                    <div className={classes.propertiesForm}>\n                        <FormLabel>Properties</FormLabel>\n                        <FormGroup>\n                            <DialogResourcePropertiesForm\n                                initialProperties={getChipsFromVocabulary(initialData.properties || {}, vocabulary)}\n                                setChips={setChips}\n                                onSubmit={(ev) => ev.preventDefault()}\n                            />\n                        </FormGroup>\n                    </div>\n                </DialogContent>\n            </>\n        );\n\n        return (\n            <DialogForm\n                fields={fields()}\n                submitLabel='Save'\n                formErrors={formErrors}\n                submitDisabled={submitDisabled}\n                isSubmitting={isSubmitting}\n                onSubmit={handleSubmit}\n                closeDialog={closeDialog}\n                clearFormValues={() => {\n                    setProjectName('');\n                    setDescription('');\n                    setChips({} as PropertyChips);\n                }}\n                open={open}\n            />\n        );\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/dialog-upload/dialog-collection-files-upload.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { compose, Dispatch } from \"redux\";\nimport { connect } from 'react-redux';\nimport { WithDialogProps, withDialog } from 'store/dialog/with-dialog';\nimport { DialogForm } from 'components/dialog-form/dialog-form';\nimport { DialogFileUploaderField } from 'views-components/file-uploader/file-uploader';\nimport { WarningCollection } from 'components/warning-collection/warning-collection';\nimport { DialogTitle, DialogContent } from '@mui/material';\nimport { COLLECTION_UPLOAD_FILES_DIALOG, submitCollectionFiles } from 'store/collections/collection-upload-actions';\n\ntype DialogCollectionFilesUploadProps = WithDialogProps<{targetLocation?: string}> & {\n    submitCollectionFiles: (targetLocation?: string) => void;\n};\n\nconst mapDispatch = (dispatch: Dispatch) => ({\n    submitCollectionFiles: (targetLocation?: string) => dispatch<any>(submitCollectionFiles(targetLocation))\n});\n\nexport const DialogCollectionFilesUpload = compose(\n    connect(null, mapDispatch),\n    withDialog(COLLECTION_UPLOAD_FILES_DIALOG)\n)((props: DialogCollectionFilesUploadProps) => {\n    const { open, data, closeDialog } = props;\n    const [isPopulated, setIsPopulated] = React.useState(false);\n    const [isSubmitting, setIsSubmitting] = React.useState(false);\n\n    const fields = () => (\n        <>\n            <DialogTitle>Upload data</DialogTitle>\n            <DialogContent>\n                <DialogFileUploaderField onDrop={(files: File[]) => setIsPopulated(files.length > 0)} />\n                <WarningCollection text=\"Uploading new files will change content address. Empty folders will be ignored.\" />\n            </DialogContent>\n        </>\n    );\n\n    return (\n        <DialogForm\n            open={open}\n            fields={fields()}\n            submitLabel=\"Upload data\"\n            formErrors={isPopulated ? [] : ['Please add files to upload']} // content of err string doesn't matter here\n            isSubmitting={isSubmitting}\n            onSubmit={(event: React.FormEvent<HTMLFormElement>) => {\n                event.preventDefault();\n                setIsSubmitting(true);\n                props.submitCollectionFiles(data?.targetLocation);\n            }}\n            closeDialog={closeDialog}\n            clearFormValues={() => {\n                setIsPopulated(false);\n                setIsSubmitting(false);\n            }}\n        />\n    );\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/download-files-as-zip/download-files-as-zip.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { compose, Dispatch } from 'redux';\nimport { reduxForm, InjectedFormProps, Field } from 'redux-form';\nimport { withDialog, WithDialogProps } from 'store/dialog/with-dialog';\nimport { FormDialog } from 'components/form-dialog/form-dialog';\nimport { DialogContentText } from '@mui/material';\nimport { TextField } from 'components/text-field/text-field';\nimport { DOWNLOAD_ZIP_DIALOG, downloadZip } from 'store/collection-panel/collection-panel-files/collection-panel-files-actions';\nimport { DOWNLOAD_ZIP_VALIDATION } from 'validators/validators';\n\ninterface DownloadZipFormData {\n    collectionUuid: string;\n    fileName: string;\n    paths: string[];\n}\n\nexport const DownloadFilesAsZipDialog = compose(\n    withDialog(DOWNLOAD_ZIP_DIALOG),\n    reduxForm({\n        form: DOWNLOAD_ZIP_DIALOG,\n        touchOnChange: true,\n        onSubmit: (data: DownloadZipFormData, dispatch: Dispatch) => {\n            dispatch<any>(downloadZip(data.collectionUuid, data.paths, data.fileName));\n        }\n    })\n)((props: WithDialogProps<{}> & InjectedFormProps<DownloadZipFormData>) =>\n    <FormDialog\n        dialogTitle='Download'\n        formFields={DownloadFilesAsZipFormFields}\n        submitLabel='Ok'\n        enableWhenPristine={true}\n        {...props}\n    />);\n\nconst DownloadFilesAsZipFormFields = () => <>\n    <DialogContentText>\n        {\"Please enter a name for the downloaded zip\"}\n    </DialogContentText>\n    <Field\n        name='fileName'\n        component={TextField as any}\n        autoFocus={true}\n        validate={DOWNLOAD_ZIP_VALIDATION}\n    />\n</>;\n"
  },
  {
    "path": "services/workbench2/src/views-components/favorite-star/favorite-star.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { FavoriteIcon, PublicFavoriteIcon } from \"components/icon/icon\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Tooltip } from \"@mui/material\";\n\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\n\ntype CssRules = \"icon\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    icon: {\n        fontSize: \"inherit\"\n    }\n});\n\nconst mapStateToProps = (state: RootState, props: { resourceUuid: string; className?: string; }) => ({\n    ...props,\n    isFavoriteVisible: state.favorites[props.resourceUuid],\n    isPublicFavoriteVisible: state.publicFavorites[props.resourceUuid]\n});\n\nexport const FavoriteStar = connect(mapStateToProps)(\n    withStyles(styles)((props: { isFavoriteVisible: boolean; className?: string; } & WithStyles<CssRules>) => {\n        if (props.isFavoriteVisible) {\n            return  <Tooltip enterDelay={500} title=\"Favorite\">\n                        <span><FavoriteIcon data-cy=\"favorite-star\" className={props.className || props.classes.icon} /></span>\n                </Tooltip>;\n        } else {\n            return null;\n        }\n    }));\n\nexport const PublicFavoriteStar = connect(mapStateToProps)(\n    withStyles(styles)((props: { isPublicFavoriteVisible: boolean; className?: string; } & WithStyles<CssRules>) => {\n        if (props.isPublicFavoriteVisible) {\n            return <Tooltip enterDelay={500} title=\"Public Favorite\">\n                    <span><PublicFavoriteIcon data-cy=\"public-favorite-star\" className={props.className || props.classes.icon} /></span>\n                </Tooltip>;\n        } else {\n            return null;\n        }\n    }));\n"
  },
  {
    "path": "services/workbench2/src/views-components/file-remove-dialog/file-remove-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from 'store/dialog/with-dialog';\nimport { RootState } from 'store/store';\nimport { removeCollectionFiles, FILE_REMOVE_DIALOG } from 'store/collection-panel/collection-panel-files/collection-panel-files-actions';\n\nconst mapStateToProps = (state: RootState, props: WithDialogProps<{ filePath: string }>) => ({\n    filePath: props.data.filePath\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<{ filePath: string }>) => ({\n    onConfirm: (filePath: string) => {\n        props.closeDialog();\n        dispatch<any>(removeCollectionFiles([filePath]));\n    }\n});\n\nconst mergeProps = (\n    stateProps: { filePath: string },\n    dispatchProps: { onConfirm: (filePath: string) => void },\n    props: WithDialogProps<{ filePath: string }>) => ({\n        onConfirm: () => dispatchProps.onConfirm(stateProps.filePath),\n        ...props\n    });\n\n// TODO: Remove as any\nexport const [FileRemoveDialog] = [ConfirmationDialog]\n    .map(connect(mapStateToProps, mapDispatchToProps, mergeProps) as any)\n    .map(withDialog(FILE_REMOVE_DIALOG));\n"
  },
  {
    "path": "services/workbench2/src/views-components/file-remove-dialog/multiple-files-remove-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { MULTIPLE_FILES_REMOVE_DIALOG, removeCollectionsSelectedFiles } from \"../../store/collection-panel/collection-panel-files/collection-panel-files-actions\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeCollectionsSelectedFiles());\n    }\n});\n\nexport const [MultipleFilesRemoveDialog] = [ConfirmationDialog]\n    .map(connect(undefined, mapDispatchToProps))\n    .map(withDialog(MULTIPLE_FILES_REMOVE_DIALOG));\n"
  },
  {
    "path": "services/workbench2/src/views-components/file-uploader/file-uploader.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { FileUpload } from 'components/file-upload/file-upload';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { FileUploadProps } from '../../components/file-upload/file-upload';\nimport { Dispatch } from 'redux';\nimport { fileUploaderActions, getFileUploaderState } from 'store/file-uploader/file-uploader-actions';\nimport { WrappedFieldProps } from 'redux-form';\nimport { Typography } from '@mui/material';\n\nexport type FileUploaderProps = Pick<FileUploadProps, 'disabled'> & { onDrop?: FileUploadProps['onDrop'] };\n\nconst mapStateToProps = (state: RootState, { disabled }: FileUploaderProps): Pick<FileUploadProps, 'files' | 'disabled'> => ({\n    disabled,\n    files: state.fileUploader,\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch, { onDrop }: FileUploaderProps): Pick<FileUploadProps, 'onDrop' | 'onDelete'> => ({\n    onDrop: files => {\n        const state = dispatch<any>(getFileUploaderState());\n        if (files.length > 0 && state.length === 0) {\n            dispatch(fileUploaderActions.SET_UPLOAD_FILES(files));\n            onDrop?.(files);\n        } else if (files.length > 0 && state.length > 0) {\n            dispatch(fileUploaderActions.UPDATE_UPLOAD_FILES(files));\n            onDrop?.(files);\n        }\n    },\n    onDelete: file => dispatch(fileUploaderActions.DELETE_UPLOAD_FILE(file))\n});\n\nconst FileUploader = connect(mapStateToProps, mapDispatchToProps)(FileUpload);\n\nexport const FileUploaderField = (props: WrappedFieldProps & { label?: string }) => {\n    return <>\n        <Typography variant='caption'>{props.label}</Typography>\n        <FileUploader disabled={false} onDrop={props.input.onChange} />\n    </>\n};\n\ntype DialogFileUploaderFieldProps = {\n    label?: string,\n    onDrop?: (files: File[]) => void;\n}\n\nexport const DialogFileUploaderField = (props: DialogFileUploaderFieldProps) => {\n    return <>\n        <Typography variant='caption'>{props.label}</Typography>\n        <FileUploader disabled={false} onDrop={props.onDrop} />\n    </>\n};"
  },
  {
    "path": "services/workbench2/src/views-components/form-fields/collection-form-fields.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Validator } from 'validators/validators';\nimport { DialogTextField } from \"components/dialog-form/dialog-text-field\";\nimport {\n    COLLECTION_NAME_VALIDATION, COLLECTION_NAME_VALIDATION_ALLOW_SLASH,\n} from \"validators/validators\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\n\ntype DialogCollectionNameFieldProps = {\n    defaultValue?: string;\n    validators: Validator[];\n    submitErr?: string;\n    setSubmitErr?: (errMsg: string) => void;\n    setValue: (value: string) => void;\n}\n\nexport const DialogCollectionNameField = connect(\n    (state: RootState) => {\n        return {\n            validators: (state.auth.config.clusterConfig.Collections.ForwardSlashNameSubstitution === \"\" ?\n                COLLECTION_NAME_VALIDATION : COLLECTION_NAME_VALIDATION_ALLOW_SLASH)\n        };\n    })(({ defaultValue, setValue, validators, submitErr, setSubmitErr }: DialogCollectionNameFieldProps) => {\n        return <span data-cy='name-field'>\n            <DialogTextField\n                label='Collection Name'\n                defaultValue={defaultValue || ''}\n                setValue={setValue}\n                validators={validators}\n                submitErr={submitErr}\n                setSubmitErr={setSubmitErr}\n            />\n        </span>\n    })\n"
  },
  {
    "path": "services/workbench2/src/views-components/form-fields/external-credential-form-fields.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Field } from \"redux-form\";\nimport moment from \"moment\";\nimport { TextField, RichEditorTextField, TextFieldWithStartValue } from \"components/text-field/text-field\";\nimport { REQUIRED_VALIDATION, LENGTH255_VALIDATION, REQUIRED_LENGTH255_VALIDATION, REQUIRED_VALIDNAME_LENGTH255_VALIDATION, DATE_VALIDATION } from \"validators/validators\";\nimport { DatePicker } from \"components/date-picker/date-picker\";\nimport { StringArrayMuiInput } from \"components/string-array-input/string-array-mui-input\";\n\nexport const ExternalCredentialNameField = () =>\n    <Field\n        name='name'\n        component={TextField as any}\n        validate={REQUIRED_VALIDNAME_LENGTH255_VALIDATION}\n        label={\"Credential Name *\"}\n        autoFocus={true} />;\n\nexport const ExternalCredentialDescriptionField = () =>\n    <Field\n        name='description'\n        component={RichEditorTextField as any}\n        validate={LENGTH255_VALIDATION}\n        label=\"Description\" />;\n\nexport const ExternalCredentialClassCreateField = () =>\n    <Field\n        name='credentialClass'\n        component={TextFieldWithStartValue as any}\n        startValue={'arv:aws_access_key'}\n        validate={REQUIRED_LENGTH255_VALIDATION}\n        label=\"Credential Class *\" />;\n\nexport const ExternalCredentialClassUpdateField = () =>\n    <Field\n        name='credentialClass'\n        component={TextField as any}\n        validate={REQUIRED_LENGTH255_VALIDATION}\n        label=\"Credential Class *\" />;\n\nexport const ExternalCredentialExternalIdField = () =>\n    <Field\n        name='externalId'\n        component={TextField as any}\n        validate={REQUIRED_LENGTH255_VALIDATION}\n        label=\"External ID *\" />;\n\nexport const ExternalCredentialExpiresAtField = () =>\n    <Field\n        name='expiresAt'\n        component={DatePicker as any}\n        startValue={moment().add(1, 'year')}\n        validate={DATE_VALIDATION}\n        label=\"Expires at\" />;\n\nexport const ExternalCredentialSecretCreateField = () =>\n    <Field\n        name='secret'\n        component={TextField as any}\n        type='password'\n        autoComplete=\"new-password\"\n        validate={REQUIRED_VALIDATION}\n        label=\"Secret *\" />;\n\nexport const ExternalCredentialSecretUpdateField = () =>\n    <Field\n        name='secret'\n        component={TextField as any}\n        type='password'\n        autoComplete=\"new-password\"\n        helperText=\"Leave blank to keep the same secret\"\n        label=\"Secret\" />;\n\nexport const ExternalCredentialScopesField = () =>\n        <Field\n            name=\"scopes\"\n            component={StringArrayMuiInput as any}\n            label=\"Applicable scopes\"\n        />\n"
  },
  {
    "path": "services/workbench2/src/views-components/form-fields/process-form-fields.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Field } from \"redux-form\";\nimport { TextField } from \"components/text-field/text-field\";\nimport { PROCESS_NAME_VALIDATION, PROCESS_DESCRIPTION_VALIDATION } from \"validators/validators\";\n\nexport const ProcessNameField = () =>\n    <Field\n        name='name'\n        component={TextField as any}\n        validate={PROCESS_NAME_VALIDATION}\n        label=\"Process Name\" />;\n\nexport const ProcessDescriptionField = () =>\n    <Field\n        name='description'\n        component={TextField as any}\n        validate={PROCESS_DESCRIPTION_VALIDATION}\n        label=\"Process Description\" />;\n"
  },
  {
    "path": "services/workbench2/src/views-components/form-fields/project-form-fields.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Field, FieldArray, Validator, WrappedFieldArrayProps } from \"redux-form\";\nimport { TextField, RichEditorTextField } from \"components/text-field/text-field\";\nimport { PROJECT_NAME_VALIDATION, PROJECT_NAME_VALIDATION_ALLOW_SLASH, PROJECT_DESCRIPTION_VALIDATION } from \"validators/validators\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { Participant, ParticipantSelect } from \"views-components/sharing-dialog/participant-select\";\n\ninterface ProjectNameFieldProps {\n    validate: Validator[];\n    label?: string;\n}\n\n// Validation behavior depends on the value of ForwardSlashNameSubstitution.\n//\n// Redux form doesn't let you pass anonymous functions to 'validate'\n// -- it fails with a very confusing recursive-update-exceeded error.\n// So we can't construct the validation function on the fly.\n//\n// As a workaround, use ForwardSlashNameSubstitution to choose between one of two const-defined validators.\n\nexport const ProjectNameField = connect(\n    (state: RootState) => {\n        return {\n            validate: (state.auth.config.clusterConfig.Collections.ForwardSlashNameSubstitution === \"\" ?\n                PROJECT_NAME_VALIDATION : PROJECT_NAME_VALIDATION_ALLOW_SLASH)\n        };\n    })((props: ProjectNameFieldProps) =>\n        <span data-cy='name-field'><Field\n            name='name'\n            component={TextField as any}\n            validate={props.validate}\n            label={props.label || \"Project Name\"}\n            autoFocus={true} /></span>\n    );\n\nexport const ProjectDescriptionField = () =>\n    <Field\n        name='description'\n        component={RichEditorTextField as any}\n        validate={PROJECT_DESCRIPTION_VALIDATION}\n        label=\"Description\" />;\n\nexport const UsersField = () =>\n        <span data-cy='users-field'><FieldArray\n            name=\"users\"\n            component={UsersSelect as any} /></span>;\n\nexport const UsersSelect = ({ fields }: WrappedFieldArrayProps<Participant>) =>\n        <ParticipantSelect\n            onlyPeople\n            label='Search for users to add to the group'\n            items={fields.getAll() || []}\n            onSelect={fields.push}\n            onDelete={fields.remove} />;\n"
  },
  {
    "path": "services/workbench2/src/views-components/form-fields/repository-form-fields.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Field } from \"redux-form\";\nimport { TextField } from \"components/text-field/text-field\";\nimport { REPOSITORY_NAME_VALIDATION } from \"validators/validators\";\nimport { Grid } from \"@mui/material\";\n\nexport const RepositoryNameField = (props: any) =>\n    <Grid container style={{ marginTop: '0', paddingTop: '24px' }}>\n        <Grid item xs={3}>\n            {props.data.user.username}/\n        </Grid>\n        <Grid item xs={7} style={{ bottom: '24px', position: 'relative' }}>\n            <Field\n                name='name'\n                component={TextField as any}\n                validate={REPOSITORY_NAME_VALIDATION}\n                label=\"Name\"\n                autoFocus={true} />\n        </Grid>\n        <Grid item xs={2}>\n            .git\n        </Grid>\n        <Grid item xs={12}>\n            It may take a minute or two before you can clone your new repository.\n        </Grid>\n    </Grid>;"
  },
  {
    "path": "services/workbench2/src/views-components/form-fields/resource-form-fields.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { ResourcesState, getResource } from \"store/resources/resources\";\nimport { GroupResource } from \"models/group\";\nimport { getUserUuid } from \"common/getuser\";\nimport { DialogTextField } from \"components/dialog-form/dialog-text-field\";\n\ninterface ResourceParentFieldProps {\n    resources: ResourcesState;\n    userUuid: string|undefined;\n    ownerUuid: string;\n}\n\nexport const ResourceParentField = connect(\n    (state: RootState) => {\n        return {\n            resources: state.resources,\n            userUuid: getUserUuid(state),\n        };\n    })\n    ((props: ResourceParentFieldProps) => {\n        const format = (value: string) => {\n            if (value === props.userUuid) {\n                return 'Home project';\n            }\n            const rsc = getResource<GroupResource>(value)(props.resources);\n            if (rsc !== undefined) {\n                return `${rsc.name} (${rsc.uuid})`;\n            }\n            return value;\n        }\n\n        return <span data-cy='parent-field'>\n            <DialogTextField\n                label='Parent project'\n                validators={[]}\n                defaultValue={format(props.ownerUuid || '')}\n                setValue={() => { /* no-op */ }}\n                disabled={true}\n            />\n        </span>\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/form-fields/search-bar-form-fields.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Field, FieldArray } from 'redux-form';\nimport { TextField, DateTextField } from \"components/text-field/text-field\";\nimport { CheckboxField } from 'components/checkbox-field/checkbox-field';\nimport { NativeSelectField } from 'components/select-field/select-field';\nimport { ResourceKind } from 'models/resource';\nimport { SearchBarAdvancedPropertiesView } from 'views-components/search-bar/search-bar-advanced-properties-view';\nimport { PropertyKeyField, } from 'views-components/resource-properties-form/property-key-field';\nimport { PropertyValueField } from 'views-components/resource-properties-form/property-value-field';\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { SearchProjectInput, SearchProjectCommandInputParameter } from 'views/run-process-panel/inputs/search-project-input';\n\nexport const SearchBarTypeField = () =>\n    <Field\n        name='type'\n        component={NativeSelectField as any}\n        items={[\n            { key: '', value: 'Any' },\n            { key: ResourceKind.COLLECTION, value: 'Collection' },\n            { key: ResourceKind.PROJECT, value: 'Project' },\n            { key: ResourceKind.PROCESS, value: 'Process' }\n        ]} />;\n\n\ninterface SearchBarClusterFieldProps {\n    clusters: { key: string, value: string }[];\n}\n\nexport const SearchBarClusterField = connect(\n    (state: RootState) => ({\n        clusters: [{ key: '', value: 'Any' }].concat(\n            state.auth.sessions\n                .filter(s => s.loggedIn)\n                .map(s => ({\n                    key: s.clusterId,\n                    value: s.clusterId\n                })))\n    }))((props: SearchBarClusterFieldProps) => <Field\n        name='cluster'\n        component={NativeSelectField as any}\n        items={props.clusters} />\n    );\n\nexport const SearchBarProjectField = () =>\n    <SearchProjectInput required={false} input={{\n        id: \"projectObject\",\n        label: \"Limit search to Project\"\n    } as SearchProjectCommandInputParameter}\n        options={{ showOnlyOwned: false, showOnlyWritable: false }} />\n\nexport const SearchBarTrashField = () =>\n    <Field\n        name='inTrash'\n        component={CheckboxField}\n        label=\"In trash\" />;\n\nexport const SearchBarPastVersionsField = () =>\n    <Field\n        name='pastVersions'\n        component={CheckboxField}\n        label=\"Past versions\" />;\n\nexport const SearchBarDateFromField = () =>\n    <Field\n        name='dateFrom'\n        component={DateTextField as any} />;\n\nexport const SearchBarDateToField = () =>\n    <Field\n        name='dateTo'\n        component={DateTextField as any} />;\n\nexport const SearchBarPropertiesField = () =>\n    <FieldArray\n        name=\"properties\"\n        component={SearchBarAdvancedPropertiesView as any} />;\n\nexport const SearchBarKeyField = () =>\n    <PropertyKeyField skipValidation={true} />;\n\nexport const SearchBarValueField = () =>\n    <PropertyValueField skipValidation={true} />;\n\nexport const SearchBarSaveSearchField = () =>\n    <Field\n        name='saveQuery'\n        component={CheckboxField}\n        label=\"Save query\" />;\n\nexport const SearchBarQuerySearchField = () =>\n    <Field\n        name='queryName'\n        component={TextField as any}\n        label=\"Query name\" />;\n"
  },
  {
    "path": "services/workbench2/src/views-components/form-fields/ssh-key-form-fields.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Field } from \"redux-form\";\nimport { TextField } from \"components/text-field/text-field\";\nimport { SSH_KEY_PUBLIC_VALIDATION, SSH_KEY_NAME_VALIDATION } from \"validators/validators\";\n\nexport const SshKeyPublicField = () =>\n    <Field\n        name='publicKey'\n        component={TextField as any}\n        validate={SSH_KEY_PUBLIC_VALIDATION}\n        autoFocus={true}\n        label=\"Public Key\" />;\n\nexport const SshKeyNameField = () =>\n    <Field\n        name='name'\n        component={TextField as any}\n        validate={SSH_KEY_NAME_VALIDATION}\n        label=\"Name\" />;\n\n\n"
  },
  {
    "path": "services/workbench2/src/views-components/form-fields/user-form-fields.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Field } from \"redux-form\";\nimport { TextField } from \"components/text-field/text-field\";\nimport { USER_EMAIL_VALIDATION, CHOOSE_VM_VALIDATION } from \"validators/validators\";\nimport { NativeSelectField } from \"components/select-field/select-field\";\nimport { InputLabel } from \"@mui/material\";\nimport { VirtualMachinesResource } from \"models/virtual-machines\";\nimport { VIRTUAL_MACHINE_ADD_LOGIN_GROUPS_FIELD, VIRTUAL_MACHINE_ADD_LOGIN_VM_FIELD } from \"store/virtual-machines/virtual-machines-actions\";\nimport { GroupArrayInput } from \"views-components/virtual-machines-dialog/group-array-input\";\n\ninterface VirtualMachinesProps {\n    data: {\n        items: VirtualMachinesResource[];\n    };\n}\n\nexport const UserEmailField = () =>\n    <Field\n        name='email'\n        component={TextField as any}\n        validate={USER_EMAIL_VALIDATION}\n        autoFocus={true}\n        label=\"Email\" />;\n\nexport const RequiredUserVirtualMachineField = ({ data }: VirtualMachinesProps) =>\n    <div style={{ marginBottom: '21px' }}>\n        <InputLabel>Virtual Machine</InputLabel>\n        <Field\n            name={VIRTUAL_MACHINE_ADD_LOGIN_VM_FIELD}\n            component={NativeSelectField as any}\n            validate={CHOOSE_VM_VALIDATION}\n            items={getVirtualMachinesList(data.items)} />\n    </div>;\n\nexport const UserVirtualMachineField = ({ data }: VirtualMachinesProps) =>\n    <div style={{ marginBottom: '21px' }}>\n        <InputLabel>Virtual Machine</InputLabel>\n        <Field\n            name={VIRTUAL_MACHINE_ADD_LOGIN_VM_FIELD}\n            component={NativeSelectField as any}\n            items={getVirtualMachinesList(data.items)} />\n    </div>;\n\nexport const UserGroupsVirtualMachineField = () =>\n    <GroupArrayInput\n        name={VIRTUAL_MACHINE_ADD_LOGIN_GROUPS_FIELD}\n        input={{id:\"Add groups to VM login (eg: docker, sudo)\", disabled:false}}\n        required={false}\n    />\n\nconst getVirtualMachinesList = (virtualMachines: VirtualMachinesResource[]) =>\n    [{ key: \"\", value: \"\" }].concat(virtualMachines.map(it => ({ key: it.uuid, value: it.hostname })));\n"
  },
  {
    "path": "services/workbench2/src/views-components/groups-dialog/attributes-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button, Typography, Grid } from \"@mui/material\";\nimport { WithDialogProps } from \"store/dialog/with-dialog\";\nimport { withDialog } from 'store/dialog/with-dialog';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { compose } from \"redux\";\nimport { GroupResource } from \"models/group\";\nimport { GROUP_ATTRIBUTES_DIALOG } from \"store/groups-panel/groups-panel-actions\";\n\ntype CssRules = 'rightContainer' | 'leftContainer' | 'spacing';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    rightContainer: {\n        textAlign: 'right',\n        paddingRight: theme.spacing(2),\n        color: theme.palette.grey[\"500\"]\n    },\n    leftContainer: {\n        textAlign: 'left',\n        paddingLeft: theme.spacing(2)\n    },\n    spacing: {\n        paddingTop: theme.spacing(2)\n    },\n});\n\ninterface GroupAttributesDataProps {\n    data: GroupResource;\n}\n\ntype GroupAttributesProps = GroupAttributesDataProps & WithStyles<CssRules>;\n\nexport const GroupAttributesDialog = compose(\n    withDialog(GROUP_ATTRIBUTES_DIALOG),\n    withStyles(styles))(\n        (props: WithDialogProps<GroupAttributesProps> & GroupAttributesProps) =>\n            <Dialog open={props.open}\n                onClose={props.closeDialog}\n                fullWidth\n                maxWidth=\"sm\">\n                <DialogTitle>Attributes</DialogTitle>\n                <DialogContent>\n                    <Typography variant='body1' className={props.classes.spacing}>\n                        {props.data && attributes(props.data, props.classes)}\n                    </Typography>\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={props.closeDialog}>\n                        Close\n                </Button>\n                </DialogActions>\n            </Dialog>\n    );\n\nconst attributes = (group: GroupResource, classes: any) => {\n    const { uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, name, deleteAt, description, etag, isTrashed, trashAt} = group;\n    return (\n        <span>\n            <Grid container direction=\"row\">\n                <Grid item xs={5} className={classes.rightContainer}>\n                    {name && <Grid item>Name</Grid>}\n                    {ownerUuid && <Grid item>Owner uuid</Grid>}\n                    {createdAt && <Grid item>Created at</Grid>}\n                    {modifiedAt && <Grid item>Modified at</Grid>}\n                    {modifiedByUserUuid && <Grid item>Modified by user uuid</Grid>}\n                    {uuid && <Grid item>uuid</Grid>}\n                    {deleteAt && <Grid item>Delete at</Grid>}\n                    {description && <Grid item>Description</Grid>}\n                    {etag && <Grid item>Etag</Grid>}\n                    {isTrashed && <Grid item>Is trashed</Grid>}\n                    {trashAt && <Grid item>Trashed at</Grid>}\n                </Grid>\n                <Grid item xs={7} className={classes.leftContainer}>\n                    <Grid item>{name}</Grid>\n                    <Grid item>{ownerUuid}</Grid>\n                    <Grid item>{createdAt}</Grid>\n                    <Grid item>{modifiedAt}</Grid>\n                    <Grid item>{modifiedByUserUuid}</Grid>\n                    <Grid item>{uuid}</Grid>\n                    <Grid item>{deleteAt}</Grid>\n                    <Grid item>{description}</Grid>\n                    <Grid item>{etag}</Grid>\n                    <Grid item>{isTrashed}</Grid>\n                    <Grid item>{trashAt}</Grid>\n                </Grid>\n            </Grid>\n        </span>\n    );\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/groups-dialog/member-attributes-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button, Typography, Grid } from \"@mui/material\";\nimport { WithDialogProps } from \"store/dialog/with-dialog\";\nimport { withDialog } from 'store/dialog/with-dialog';\nimport { WithStyles } from '@mui/styles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { compose } from \"redux\";\nimport { PermissionResource } from \"models/permission\";\nimport { MEMBER_ATTRIBUTES_DIALOG } from 'store/group-details-panel/group-details-panel-actions';\n\ntype CssRules = 'rightContainer' | 'leftContainer' | 'spacing';\n\nconst styles: CustomStyleRulesCallback<CssRules> =(theme: ArvadosTheme) => ({\n    rightContainer: {\n        textAlign: 'right',\n        paddingRight: theme.spacing(2),\n        color: theme.palette.grey[\"500\"]\n    },\n    leftContainer: {\n        textAlign: 'left',\n        paddingLeft: theme.spacing(2)\n    },\n    spacing: {\n        paddingTop: theme.spacing(2)\n    },\n});\n\ninterface GroupAttributesDataProps {\n    data: PermissionResource;\n}\n\ntype GroupAttributesProps = GroupAttributesDataProps & WithStyles<CssRules>;\n\nexport const GroupMemberAttributesDialog = compose(\n    withDialog(MEMBER_ATTRIBUTES_DIALOG),\n    withStyles(styles))(\n        (props: WithDialogProps<GroupAttributesProps> & GroupAttributesProps) =>\n            <Dialog open={props.open}\n                onClose={props.closeDialog}\n                fullWidth\n                maxWidth=\"sm\">\n                <DialogTitle>Attributes</DialogTitle>\n                <DialogContent>\n                    <Typography variant='body1' className={props.classes.spacing}>\n                        {props.data && attributes(props.data, props.classes)}\n                    </Typography>\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={props.closeDialog}>\n                        Close\n                </Button>\n                </DialogActions>\n            </Dialog>\n    );\n\nconst attributes = (memberGroup: PermissionResource, classes: any) => {\n    const { uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, name, etag, linkClass } = memberGroup;\n    return (\n        <span>\n            <Grid container direction=\"row\">\n                <Grid item xs={5} className={classes.rightContainer}>\n                    {name && <Grid item>Name</Grid>}\n                    {ownerUuid && <Grid item>Owner uuid</Grid>}\n                    {createdAt && <Grid item>Created at</Grid>}\n                    {modifiedAt && <Grid item>Modified at</Grid>}\n                    {modifiedByUserUuid && <Grid item>Modified by user uuid</Grid>}\n                    {uuid && <Grid item>uuid</Grid>}\n                    {linkClass && <Grid item>Link Class</Grid>}\n                    {etag && <Grid item>Etag</Grid>}\n                </Grid>\n                <Grid item xs={7} className={classes.leftContainer}>\n                    <Grid item>{name}</Grid>\n                    <Grid item>{ownerUuid}</Grid>\n                    <Grid item>{createdAt}</Grid>\n                    <Grid item>{modifiedAt}</Grid>\n                    <Grid item>{modifiedByUserUuid}</Grid>\n                    <Grid item>{uuid}</Grid>\n                    <Grid item>{linkClass}</Grid>\n                    <Grid item>{etag}</Grid>\n                </Grid>\n            </Grid>\n        </span>\n    );\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/groups-dialog/member-remove-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { removeGroupMember, removeMultipleGroupMembers, MEMBER_REMOVE_DIALOG, MULTIPLE_MEMBER_REMOVE_DIALOG } from 'store/group-details-panel/group-details-panel-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeGroupMember(props.data.uuid));\n    }\n});\n\nexport const RemoveGroupMemberDialog = compose(\n    withDialog(MEMBER_REMOVE_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);\n\nconst multipleMapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeMultipleGroupMembers());\n    }\n});\n\nexport const RemoveMultipleGroupMembersDialog = compose(\n    withDialog(MULTIPLE_MEMBER_REMOVE_DIALOG),\n    connect(null, multipleMapDispatchToProps)\n)(ConfirmationDialog);"
  },
  {
    "path": "services/workbench2/src/views-components/groups-dialog/remove-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { removeGroup, GROUP_REMOVE_DIALOG } from 'store/groups-panel/groups-panel-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeGroup(props.data.uuid));\n    }\n});\n\nexport const RemoveGroupDialog = compose(\n    withDialog(GROUP_REMOVE_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);"
  },
  {
    "path": "services/workbench2/src/views-components/keep-services-dialog/attributes-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { compose } from 'redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button, Grid } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithDialogProps, withDialog } from \"store/dialog/with-dialog\";\nimport { KEEP_SERVICE_ATTRIBUTES_DIALOG } from 'store/keep-services/keep-services-actions';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { KeepServiceResource } from 'models/keep-services';\n\ntype CssRules = 'root';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        fontSize: '0.875rem',\n        '& div:nth-child(odd)': {\n            textAlign: 'right',\n            color: theme.palette.grey[\"500\"]\n        }\n    }\n});\n\ninterface AttributesKeepServiceDialogDataProps {\n    keepService: KeepServiceResource;\n}\n\nexport const AttributesKeepServiceDialog = compose(\n    withDialog(KEEP_SERVICE_ATTRIBUTES_DIALOG),\n    withStyles(styles))(\n        ({ open, closeDialog, data, classes }: WithDialogProps<AttributesKeepServiceDialogDataProps> & WithStyles<CssRules>) =>\n            <Dialog open={open} onClose={closeDialog} fullWidth maxWidth='sm'>\n                <DialogTitle>Attributes</DialogTitle>\n                <DialogContent>\n                    {data.keepService && <Grid container direction=\"row\" spacing={2} className={classes.root}>\n                        <Grid item xs={5}>UUID</Grid>\n                        <Grid item xs={7}>{data.keepService.uuid}</Grid>\n                        <Grid item xs={5}>Read only</Grid>\n                        <Grid item xs={7}>{JSON.stringify(data.keepService.readOnly)}</Grid>\n                        <Grid item xs={5}>Service host</Grid>\n                        <Grid item xs={7}>{data.keepService.serviceHost}</Grid>\n                        <Grid item xs={5}>Service port</Grid>\n                        <Grid item xs={7}>{data.keepService.servicePort}</Grid>\n                        <Grid item xs={5}>Service SSL flag</Grid>\n                        <Grid item xs={7}>{JSON.stringify(data.keepService.serviceSslFlag)}</Grid>\n                        <Grid item xs={5}>Service type</Grid>\n                        <Grid item xs={7}>{data.keepService.serviceType}</Grid>\n                        <Grid item xs={5}>Owner uuid</Grid>\n                        <Grid item xs={7}>{data.keepService.ownerUuid}</Grid>\n                        <Grid item xs={5}>Created at</Grid>\n                        <Grid item xs={7}>{data.keepService.createdAt}</Grid>\n                        <Grid item xs={5}>Modified at</Grid>\n                        <Grid item xs={7}>{data.keepService.modifiedAt}</Grid>\n                        <Grid item xs={5}>Modified by user uuid</Grid>\n                        <Grid item xs={7}>{data.keepService.modifiedByUserUuid}</Grid>\n                    </Grid>}\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={closeDialog}>\n                        Close\n                    </Button>\n                </DialogActions>\n            </Dialog>\n    );\n"
  },
  {
    "path": "services/workbench2/src/views-components/keep-services-dialog/remove-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { KEEP_SERVICE_REMOVE_DIALOG, removeKeepService } from 'store/keep-services/keep-services-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeKeepService(props.data.uuid));\n    }\n});\n\nexport const RemoveKeepServiceDialog = compose(\n    withDialog(KEEP_SERVICE_REMOVE_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);"
  },
  {
    "path": "services/workbench2/src/views-components/links-dialog/attributes-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { compose } from 'redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button, Grid } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithDialogProps, withDialog } from \"store/dialog/with-dialog\";\nimport { LINK_ATTRIBUTES_DIALOG } from 'store/link-panel/link-panel-actions';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { LinkResource } from 'models/link';\n\ntype CssRules = 'root';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        fontSize: '0.875rem',\n        '& div:nth-child(odd)': {\n            textAlign: 'right',\n            color: theme.palette.grey[\"500\"]\n        }\n    }\n});\n\ninterface AttributesLinkDialogDataProps {\n    link: LinkResource;\n}\n\nexport const AttributesLinkDialog = compose(\n    withDialog(LINK_ATTRIBUTES_DIALOG),\n    withStyles(styles))(\n    ({ open, closeDialog, data, classes }: WithDialogProps<AttributesLinkDialogDataProps> & WithStyles<CssRules>) =>\n            <Dialog open={open}\n                onClose={closeDialog}\n                fullWidth\n                maxWidth='sm'>\n                <DialogTitle>Attributes</DialogTitle>\n                <DialogContent>\n                    {data.link && <Grid container direction=\"row\" spacing={2} className={classes.root}>\n                        <Grid item xs={5}>Uuid</Grid>\n                        <Grid item xs={7}>{data.link.uuid}</Grid>\n                        <Grid item xs={5}>Name</Grid>\n                        <Grid item xs={7}>{data.link.name}</Grid>\n                        <Grid item xs={5}>Head uuid</Grid>\n                        <Grid item xs={7}>{data.link.headUuid}</Grid>\n                        <Grid item xs={5}>Head kind</Grid>\n                        <Grid item xs={7}>{data.link.headKind}</Grid>\n                        <Grid item xs={5}>Tail uuid</Grid>\n                        <Grid item xs={7}>{data.link.tailUuid}</Grid>\n                        <Grid item xs={5}>Link class</Grid>\n                        <Grid item xs={7}>{data.link.linkClass}</Grid>\n                        <Grid item xs={5}>Owner uuid</Grid>\n                        <Grid item xs={7}>{data.link.ownerUuid}</Grid>\n                        <Grid item xs={5}>Created at</Grid>\n                        <Grid item xs={7}>{data.link.createdAt}</Grid>\n                        <Grid item xs={5}>Modified at</Grid>\n                        <Grid item xs={7}>{data.link.modifiedAt}</Grid>\n                        <Grid item xs={5}>Modified by user uuid</Grid>\n                        <Grid item xs={7}>{data.link.modifiedByUserUuid}</Grid>\n                    </Grid>}\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={closeDialog}>\n                        Close\n                    </Button>\n                </DialogActions>\n            </Dialog>\n    );\n"
  },
  {
    "path": "services/workbench2/src/views-components/links-dialog/remove-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { LINK_REMOVE_DIALOG, removeLink } from 'store/link-panel/link-panel-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeLink(props.data.uuid));\n    }\n});\n\nexport const RemoveLinkDialog = compose(\n    withDialog(LINK_REMOVE_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);"
  },
  {
    "path": "services/workbench2/src/views-components/login-form/login-form.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { useState, useEffect, useRef } from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport CircularProgress from '@mui/material/CircularProgress';\nimport { Button, Card, CardContent, TextField, CardActions } from '@mui/material';\nimport { green } from '@mui/material/colors';\nimport { AxiosPromise } from 'axios';\nimport { DispatchProp } from 'react-redux';\nimport { saveApiToken } from 'store/auth/auth-action';\nimport { navigateToDashboard } from 'store/navigation/navigation-action';\nimport { replace } from 'connected-react-router';\nimport { PasswordLoginResponse } from 'views/login-panel/login-panel';\n\ntype CssRules = 'root' | 'loginBtn' | 'card' | 'wrapper' | 'progress';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    root: {\n        display: 'flex',\n        flexWrap: 'wrap',\n        width: '100%',\n        margin: `${theme.spacing(1)} auto`\n    },\n    loginBtn: {\n        marginTop: theme.spacing(1),\n        flexGrow: 1\n    },\n    card: {\n        marginTop: theme.spacing(1),\n        width: '100%'\n    },\n    wrapper: {\n        margin: theme.spacing(1),\n        position: 'relative',\n    },\n    progress: {\n        color: green[500],\n        position: 'absolute',\n        top: '50%',\n        left: '50%',\n        marginTop: -12,\n        marginLeft: -12,\n    },\n});\n\ntype LoginFormProps = DispatchProp<any> & WithStyles<CssRules> & {\n    handleSubmit: (username: string, password: string) => AxiosPromise<PasswordLoginResponse>;\n    loginLabel?: string,\n};\n\nexport const LoginForm = withStyles(styles)(\n    ({ handleSubmit, loginLabel, dispatch, classes }: LoginFormProps) => {\n        const userInput = useRef<HTMLInputElement>(null);\n        const [username, setUsername] = useState('');\n        const [password, setPassword] = useState('');\n        const [isButtonDisabled, setIsButtonDisabled] = useState(true);\n        const [isSubmitting, setSubmitting] = useState(false);\n        const [helperText, setHelperText] = useState('');\n        const [error, setError] = useState(false);\n\n        useEffect(() => {\n            setError(false);\n            setHelperText('');\n            if (username.trim() && password.trim()) {\n                setIsButtonDisabled(false);\n            } else {\n                setIsButtonDisabled(true);\n            }\n        }, [username, password]);\n\n        // This only runs once after render.\n        useEffect(() => {\n            setFocus();\n        }, []);\n\n        const setFocus = () => {\n            userInput.current!.focus();\n        };\n\n        const handleLogin = () => {\n            setError(false);\n            setHelperText('');\n            setSubmitting(true);\n            handleSubmit(username, password)\n                .then((response) => {\n                    setSubmitting(false);\n                    if (response.data.uuid && response.data.api_token) {\n                        const apiToken = `v2/${response.data.uuid}/${response.data.api_token}`;\n                        const rd = new URL(window.location.href);\n                        const rdUrl = rd.pathname + rd.search;\n                        dispatch<any>(saveApiToken(apiToken)).finally(\n                            () => {\n                                if ((new URL(window.location.href).pathname) !== '/my-account') {\n                                    rdUrl === '/' ? dispatch(navigateToDashboard) : dispatch(replace(rdUrl))\n                                }\n                            }\n                        );\n                    } else {\n                        setError(true);\n                        setHelperText(response.data.message || 'Please try again');\n                        setFocus();\n                    }\n                })\n                .catch((err) => {\n                    setError(true);\n                    setSubmitting(false);\n                    setHelperText(`${(err.response && err.response.data && err.response.data.errors[0]) || 'Error logging in: ' + err}`);\n                    setFocus();\n                });\n        };\n\n        const handleKeyPress = (e: any) => {\n            if (e.keyCode === 13 || e.which === 13) {\n                if (!isButtonDisabled) {\n                    handleLogin();\n                }\n            }\n        };\n\n        return (\n            <React.Fragment>\n                <form className={classes.root} noValidate autoComplete=\"off\">\n                    <Card className={classes.card}>\n                        <div className={classes.wrapper}>\n                            <CardContent>\n                                <TextField\n                                    variant=\"standard\"\n                                    inputRef={userInput}\n                                    disabled={isSubmitting}\n                                    error={error}\n                                    fullWidth\n                                    id=\"username\"\n                                    type=\"email\"\n                                    label=\"Username\"\n                                    margin=\"normal\"\n                                    onChange={(e) => setUsername(e.target.value)}\n                                    onKeyPress={(e) => handleKeyPress(e)} />\n                                <TextField\n                                    variant=\"standard\"\n                                    disabled={isSubmitting}\n                                    error={error}\n                                    fullWidth\n                                    id=\"password\"\n                                    type=\"password\"\n                                    label=\"Password\"\n                                    margin=\"normal\"\n                                    helperText={helperText}\n                                    onChange={(e) => setPassword(e.target.value)}\n                                    onKeyPress={(e) => handleKeyPress(e)} />\n                            </CardContent>\n                            <CardActions>\n                                <Button variant=\"contained\" size=\"large\" color=\"primary\"\n                                    className={classes.loginBtn} onClick={() => handleLogin()}\n                                    disabled={isSubmitting || isButtonDisabled}>\n                                    {loginLabel || 'Log in'}\n                                </Button>\n                            </CardActions>\n                            {isSubmitting && <CircularProgress color='secondary' className={classes.progress} />}\n                        </div>\n                    </Card>\n                </form>\n            </React.Fragment>\n        );\n    });\n"
  },
  {
    "path": "services/workbench2/src/views-components/main-app-bar/account-menu.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { AccountMenuComponent } from './account-menu';\n\ndescribe('<AccountMenu />', () => {\n    let props;\n\n    beforeEach(() => {\n      props = {\n        classes: {},\n        user: {\n            email: 'email@example.com',\n            firstName: 'User',\n            lastName: 'Test',\n            uuid: 'zzzzz-tpzed-testuseruuid',\n            ownerUuid: '',\n            username: 'testuser',\n            prefs: {},\n            isAdmin: false,\n            isActive: true\n        },\n        currentRoute: '',\n        workbenchURL: '',\n        localCluser: 'zzzzz',\n        dispatch: cy.stub().as('dispatch'),\n      };\n    });\n\n    describe('Logout Menu Item', () => {\n        beforeEach(() => {\n            cy.mount(<AccountMenuComponent {...props} />);\n        });\n\n        it('should dispatch a logout action when clicked', () => {\n            cy.get('button').should('exist').click();\n            cy.get('[data-cy=\"logout-menuitem\"]').click();\n            cy.get('@dispatch').should('have.been.calledWith', {\n                payload: {deleteLinkData: true, preservePath: false},\n                type: 'LOGOUT',\n            });\n        });\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/main-app-bar/account-menu.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { MenuItem } from \"@mui/material\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { User, getUserDisplayName } from \"models/user\";\nimport { DropdownMenu } from \"components/dropdown-menu/dropdown-menu\";\nimport { UserPanelIcon } from \"components/icon/icon\";\nimport { DispatchProp, connect } from 'react-redux';\nimport { authActions, getNewExtraToken } from 'store/auth/auth-action';\nimport { RootState } from \"store/store\";\nimport { openTokenDialog } from 'store/token-dialog/token-dialog-actions';\nimport {\n    navigateToSiteManager,\n    navigateToSshKeysUser,\n    navigateToMyAccount,\n    navigateToLinkAccount,\n    navigateToMyPreferences,\n} from 'store/navigation/navigation-action';\nimport { pluginConfig } from 'plugins';\nimport { ElementListReducer } from 'common/plugintypes';\n\ninterface AccountMenuProps {\n    user?: User;\n    currentRoute: string;\n    workbenchURL: string;\n    apiToken?: string;\n    localCluster: string;\n}\n\nconst mapStateToProps = (state: RootState): AccountMenuProps => ({\n    user: state.auth.user,\n    currentRoute: state.router.location ? state.router.location.pathname : '',\n    workbenchURL: state.auth.config.workbenchUrl,\n    apiToken: state.auth.apiToken,\n    localCluster: state.auth.localCluster\n});\n\ntype CssRules = 'link';\n\nconst styles: CustomStyleRulesCallback<CssRules> = () => ({\n    link: {\n        textDecoration: 'none',\n        color: 'inherit'\n    }\n});\n\nexport const AccountMenuComponent =\n    ({ user, dispatch, currentRoute, workbenchURL, apiToken, localCluster, classes }: AccountMenuProps & DispatchProp<any> & WithStyles<CssRules>) => {\n        let accountMenuItems = [\n            <MenuItem key={'get-api-token'} onClick={() => {\n                dispatch<any>(getNewExtraToken(true));\n                dispatch(openTokenDialog);\n            }}>Get API token</MenuItem>,\n            <MenuItem key={'ssh-keys'} onClick={() => dispatch(navigateToSshKeysUser)}>SSH Keys</MenuItem>,\n            <MenuItem key={'site-manager'} onClick={() => dispatch(navigateToSiteManager)}>Site Manager</MenuItem>,\n            <MenuItem key={'my-account'} onClick={() => dispatch(navigateToMyAccount)}>My account</MenuItem>,\n            <MenuItem key={'preferences'} onClick={() => dispatch(navigateToMyPreferences)}>Preferences</MenuItem>,\n            <MenuItem key={'link-account'} onClick={() => dispatch(navigateToLinkAccount)}>Link account</MenuItem>,\n        ];\n\n        const reduceItemsFn: (a: React.ReactElement[],\n            b: ElementListReducer) => React.ReactElement[] = (a, b) => b(a);\n\n        accountMenuItems = pluginConfig.accountMenuList.reduce(reduceItemsFn, accountMenuItems);\n\n        return user\n            ? <DropdownMenu\n                icon={<UserPanelIcon />}\n                id=\"account-menu\"\n                title=\"Account Management\"\n                key={currentRoute}>\n                <MenuItem key={'account'} disabled>\n                    {getUserDisplayName(user)} {user.uuid.substring(0, 5) !== localCluster && `(${user.uuid.substring(0, 5)})`}\n                </MenuItem>\n                {user.isActive && accountMenuItems}\n                <MenuItem key={'logout'} data-cy=\"logout-menuitem\"\n                    onClick={() => dispatch(authActions.LOGOUT({ deleteLinkData: true, preservePath: false }))}>\n                    Logout\n                </MenuItem>\n            </DropdownMenu>\n            : null;\n    };\n\nexport const AccountMenu = withStyles(styles)(connect(mapStateToProps)(AccountMenuComponent));\n"
  },
  {
    "path": "services/workbench2/src/views-components/main-app-bar/admin-menu.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { MenuItem } from \"@mui/material\";\nimport { User } from \"models/user\";\nimport { DropdownMenu } from \"components/dropdown-menu/dropdown-menu\";\nimport { AdminMenuIcon } from \"components/icon/icon\";\nimport { DispatchProp, connect } from 'react-redux';\nimport { RootState } from \"store/store\";\nimport * as NavigationAction from 'store/navigation/navigation-action';\nimport { openAdminVirtualMachines } from \"store/virtual-machines/virtual-machines-actions\";\nimport { openUserPanel } from \"store/users/users-actions\";\ninterface AdminMenuProps {\n    user?: User;\n    currentRoute: string;\n}\n\nconst mapStateToProps = (state: RootState): AdminMenuProps => ({\n    user: state.auth.user,\n    currentRoute: state.router.location ? state.router.location.pathname : ''\n});\n\nexport const AdminMenu = connect(mapStateToProps)(\n    ({ user, dispatch, currentRoute }: AdminMenuProps & DispatchProp<any>) =>\n        user\n            ? <DropdownMenu\n                icon={<AdminMenuIcon />}\n                id=\"admin-menu\"\n                title=\"Admin Panel\"\n                key={currentRoute}>\n                <MenuItem onClick={() => dispatch(openAdminVirtualMachines())}>Shell Access</MenuItem>\n                <MenuItem onClick={() => dispatch(NavigationAction.navigateToSshKeysAdmin)}>SSH Keys</MenuItem>\n                <MenuItem onClick={() => dispatch(NavigationAction.navigateToApiClientAuthorizations)}>API Tokens</MenuItem>\n                <MenuItem onClick={() => dispatch(openUserPanel())}>Users</MenuItem>\n                <MenuItem onClick={() => dispatch(NavigationAction.navigateToGroups)}>Groups</MenuItem>\n                <MenuItem onClick={() => dispatch(NavigationAction.navigateToKeepServices)}>Keep Services</MenuItem>\n                <MenuItem onClick={() => dispatch(NavigationAction.navigateToLinks)}>Links</MenuItem>\n            </DropdownMenu>\n            : null);\n"
  },
  {
    "path": "services/workbench2/src/views-components/main-app-bar/anonymous-menu.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Button } from '@mui/material';\nimport { DispatchProp, connect } from 'react-redux';\nimport { login } from 'store/auth/auth-action';\n\nexport const AnonymousMenu = connect()(\n    ({ dispatch }: DispatchProp<any>) =>\n        <Button\n            color=\"inherit\"\n            onClick={() => dispatch(login(\"\", \"\", \"\", {}))}>\n            Sign in\n        </Button>);\n"
  },
  {
    "path": "services/workbench2/src/views-components/main-app-bar/help-menu.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { MenuItem, Typography } from \"@mui/material\";\nimport { DropdownMenu } from \"components/dropdown-menu/dropdown-menu\";\nimport { ImportContactsIcon, HelpIcon } from \"components/icon/icon\";\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { RootState } from \"store/store\";\nimport { compose } from \"redux\";\nimport { connect } from \"react-redux\";\n\ntype CssRules = 'link' | 'icon' | 'title' | 'linkTitle';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    link: {\n        textDecoration: 'none',\n        color: 'inherit',\n        width: '100%',\n        display: 'flex'\n    },\n    icon: {\n        width: '16px',\n        height: '16px'\n    },\n    title: {\n        paddingBottom: theme.spacing(0.5),\n        paddingLeft: theme.spacing(2),\n        paddingTop: theme.spacing(0.5),\n        outline: 'none',\n    },\n    linkTitle: {\n        marginLeft: theme.spacing(1)\n    },\n});\n\nconst links = [\n    {\n        title: \"Tutorials and User guide\",\n        link: \"http://doc.arvados.org/user/\",\n    },\n    {\n        title: \"API Reference\",\n        link: \"http://doc.arvados.org/api/\",\n    },\n    {\n        title: \"SDK Reference\",\n        link: \"http://doc.arvados.org/sdk/\"\n    },\n];\n\ninterface HelpMenuProps {\n    currentRoute: string;\n}\n\nconst mapStateToProps = ({ router }: RootState) => ({\n    currentRoute: router.location ? router.location.pathname : '',\n});\n\nexport const HelpMenu = compose(\n    connect(mapStateToProps),\n    withStyles(styles))(\n        ({ classes, currentRoute }: HelpMenuProps & WithStyles<CssRules>) =>\n            <DropdownMenu\n                icon={<HelpIcon />}\n                id=\"help-menu\"\n                title=\"Help\"\n                key={currentRoute}>\n                <MenuItem disabled>Help</MenuItem>\n                {\n                    links.map(link =>\n                        <MenuItem key={link.title}>\n                            <a href={link.link} target=\"_blank\" rel=\"noopener noreferrer\" className={classes.link}>\n                                <ImportContactsIcon className={classes.icon} />\n                                <Typography className={classes.linkTitle}>{link.title}</Typography>\n                            </a>\n                        </MenuItem>\n                    )\n                }\n            </DropdownMenu>\n    );\n"
  },
  {
    "path": "services/workbench2/src/views-components/main-app-bar/main-app-bar.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { AppBar, Toolbar, Typography, Grid } from \"@mui/material\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { Link } from \"react-router-dom\";\nimport { User } from \"models/user\";\nimport { SearchBar } from \"views-components/search-bar/search-bar\";\nimport { Routes } from 'routes/routes';\nimport { NotificationsMenu } from \"views-components/main-app-bar/notifications-menu\";\nimport { AccountMenu } from \"views-components/main-app-bar/account-menu\";\nimport { HelpMenu } from 'views-components/main-app-bar/help-menu';\nimport { ReactNode } from \"react\";\nimport { AdminMenu } from \"views-components/main-app-bar/admin-menu\";\nimport { pluginConfig } from 'plugins';\nimport { sanitizeHTML } from \"common/html-sanitize\";\n\ntype CssRules = 'toolbar' | 'link';\n\nconst styles: CustomStyleRulesCallback<CssRules> = () => ({\n    link: {\n        textDecoration: 'none',\n        color: 'inherit'\n    },\n    toolbar: {\n        height: '56px',\n    }\n});\n\ninterface MainAppBarDataProps {\n    user?: User;\n    buildInfo?: string;\n    children?: ReactNode;\n    uuidPrefix: string;\n    siteBanner: string;\n}\n\nexport type MainAppBarProps = MainAppBarDataProps & WithStyles<CssRules>;\n\nexport const MainAppBar = withStyles(styles)(\n    (props: MainAppBarProps) => {\n        return (\n            <AppBar position=\"absolute\">\n                <Toolbar className={props.classes.toolbar}>\n                    <Grid container justifyContent=\"space-between\">\n                        {pluginConfig.appBarLeft || <Grid container item xs={3} direction=\"column\" justifyContent=\"center\">\n                            <Typography variant='h6' color=\"inherit\" noWrap>\n                                <Link to={Routes.ROOT} className={props.classes.link}>\n                                    <span dangerouslySetInnerHTML={{ __html: sanitizeHTML(props.siteBanner) }} /> ({props.uuidPrefix})\n                    </Link>\n                            </Typography>\n                            <Typography variant=\"caption\" color=\"inherit\">\n                                {props.buildInfo}</Typography>\n                        </Grid>}\n                        <Grid\n                            item\n                            xs={6}\n                            container\n                            alignItems=\"center\">\n                            {pluginConfig.appBarMiddle || (props.user && props.user.isActive && <SearchBar />)}\n                        </Grid>\n                        <Grid\n                            item\n                            xs={3}\n                            container\n                            alignItems=\"center\"\n                            justifyContent=\"flex-end\"\n                            wrap=\"nowrap\">\n                            {props.user ? <>\n                                <NotificationsMenu />\n                                <AccountMenu />\n                                {pluginConfig.appBarRight ||\n                                    <>\n                                        {props.user.isAdmin && <AdminMenu />}\n                                        <HelpMenu />\n                                    </>}\n                            </> :\n                                pluginConfig.appBarRight || <HelpMenu />\n                            }\n                        </Grid>\n                    </Grid>\n                </Toolbar>\n                {props.children}\n            </AppBar>\n        );\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/main-app-bar/notifications-menu.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { Badge, MenuItem } from \"@mui/material\";\nimport { DropdownMenu } from \"components/dropdown-menu/dropdown-menu\";\nimport { NotificationIcon } from \"components/icon/icon\";\nimport { bannerActions } from \"store/banner/banner-action\";\nimport { BANNER_LOCAL_STORAGE_KEY } from \"views-components/baner/banner\";\nimport { RootState } from \"store/store\";\nimport { TOOLTIP_LOCAL_STORAGE_KEY } from \"store/tooltips/tooltips-middleware\";\nimport { useCallback } from \"react\";\n\nconst mapStateToProps = (state: RootState): NotificationsMenuProps => ({\n    isOpen: state.banner.isOpen,\n    bannerUUID: state.auth.config.clusterConfig.Workbench.BannerUUID,\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    openBanner: () => dispatch<any>(bannerActions.openBanner()),\n});\n\ntype NotificationsMenuProps = {\n    isOpen: boolean;\n    bannerUUID?: string;\n};\n\ntype NotificationsMenuComponentProps = NotificationsMenuProps & {\n    openBanner: any;\n};\n\nexport const NotificationsMenuComponent = (props: NotificationsMenuComponentProps) => {\n    const { isOpen, openBanner } = props;\n    const bannerResult = localStorage.getItem(BANNER_LOCAL_STORAGE_KEY);\n    const tooltipResult = localStorage.getItem(TOOLTIP_LOCAL_STORAGE_KEY);\n    const menuItems: any[] = [];\n\n    if (!isOpen && bannerResult) {\n        menuItems.push(\n            <MenuItem onClick={openBanner} data-cy=\"restore-banner-li\">\n                <span>Restore Banner</span>\n            </MenuItem>\n        );\n    }\n\n    const toggleTooltips = useCallback(() => {\n        if (tooltipResult) {\n            localStorage.removeItem(TOOLTIP_LOCAL_STORAGE_KEY);\n        } else {\n            localStorage.setItem(TOOLTIP_LOCAL_STORAGE_KEY, \"true\");\n        }\n        window.location.reload();\n    }, [tooltipResult]);\n\n    if (tooltipResult) {\n        menuItems.push(\n            <MenuItem onClick={toggleTooltips} data-cy=\"enable-tooltip-toggle\">\n                <span>Enable tooltips</span>\n            </MenuItem>\n        );\n    } else {\n        menuItems.push(\n            <MenuItem onClick={toggleTooltips} data-cy=\"disable-tooltip-toggle\">\n                <span>Disable tooltips</span>\n            </MenuItem>\n        );\n    }\n\n    if (menuItems.length === 0) {\n        menuItems.push(<MenuItem>You are up to date</MenuItem>);\n    }\n\n    return (\n        <DropdownMenu\n            icon={\n                <Badge\n                    badgeContent={0}\n                    color=\"primary\"\n                    data-cy=\"notifications-menu\"\n                >\n                    <NotificationIcon />\n                </Badge>\n            }\n            id=\"account-menu\"\n            title=\"Notifications\"\n        >\n            {menuItems.map((item, i) => (\n                <div key={i}>{item}</div>\n            ))}\n        </DropdownMenu>\n    );\n};\n\nexport const NotificationsMenu = connect(mapStateToProps, mapDispatchToProps)(NotificationsMenuComponent);\n"
  },
  {
    "path": "services/workbench2/src/views-components/main-content-bar/main-content-bar.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Toolbar, Grid } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { Breadcrumbs } from \"views-components/breadcrumbs/breadcrumbs\";\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport RefreshButton from \"components/refresh-button/refresh-button\";\nimport { loadSidePanelTreeProjects } from \"store/side-panel-tree/side-panel-tree-actions\";\nimport { Dispatch } from \"redux\";\n\ntype CssRules = 'mainBar' | 'breadcrumbContainer';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    mainBar: {\n        flexWrap: 'nowrap',\n    },\n    breadcrumbContainer: {\n        overflow: 'hidden',\n    },\n});\n\ninterface MainContentBarProps {\n    onRefreshButtonClick: (id: string) => void;\n    projectUuid: string;\n}\n\nconst mapStateToProps = (state: RootState) => {\n    const currentRoute = state.router.location?.pathname.split('/') || [];\n    const projectUuid = currentRoute[currentRoute.length - 1];\n    return {\n        projectUuid,\n    }\n};\n\nconst mapDispatchToProps = () => (dispatch: Dispatch) => ({\n    onRefreshButtonClick: (id: string) => {\n        dispatch<any>(loadSidePanelTreeProjects(id));\n    }\n});\n\nexport const MainContentBar = connect(mapStateToProps, mapDispatchToProps)(withStyles(styles)(\n    (props: MainContentBarProps & WithStyles<CssRules>) =>\n        <Toolbar><Grid container className={props.classes.mainBar}>\n            <Grid container item xs alignItems=\"center\" className={props.classes.breadcrumbContainer}>\n                <Breadcrumbs />\n            </Grid>\n            <Grid item>\n                <RefreshButton onClick={() => {\n                    props.onRefreshButtonClick(props.projectUuid);\n                }} />\n            </Grid>\n        </Grid></Toolbar>\n));\n"
  },
  {
    "path": "services/workbench2/src/views-components/not-found-dialog/not-found-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { RootState } from 'store/store';\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { NOT_FOUND_DIALOG_NAME } from 'store/not-found-panel/not-found-panel-action';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Dialog, DialogContent, DialogActions, Button } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { NotFoundPanel } from \"views/not-found-panel/not-found-panel\";\n\ntype CssRules = 'tag';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    tag: {\n        marginRight: theme.spacing(1),\n        marginBottom: theme.spacing(1)\n    }\n});\n\ninterface NotFoundDialogDataProps {\n\n}\n\ninterface NotFoundDialogActionProps {\n\n}\n\nconst mapStateToProps = (state: RootState): NotFoundDialogDataProps => ({\n\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch): NotFoundDialogActionProps => ({\n\n});\n\ntype NotFoundDialogProps =  NotFoundDialogDataProps & NotFoundDialogActionProps & WithDialogProps<{}> & WithStyles<CssRules>;\n\nexport const NotFoundDialog = connect(mapStateToProps, mapDispatchToProps)(\n    withStyles(styles)(\n    withDialog(NOT_FOUND_DIALOG_NAME)(\n        ({ open, closeDialog }: NotFoundDialogProps) =>\n            <Dialog\n                open={open}\n                onClose={closeDialog}\n                fullWidth\n                maxWidth='md'\n                disableEscapeKeyDown>\n                <DialogContent>\n                    <NotFoundPanel notWrapped />\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={closeDialog}>\n                        Close\n                    </Button>\n                </DialogActions>\n            </Dialog>\n    )\n));"
  },
  {
    "path": "services/workbench2/src/views-components/process-cancel-dialog/process-cancel-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { CANCEL_PROCESS_DIALOG, cancelRunningWorkflow } from 'store/processes/processes-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(cancelRunningWorkflow(props.data.uuid));\n    }\n});\n\nexport const CancelProcessDialog = compose(\n    withDialog(CANCEL_PROCESS_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);\n"
  },
  {
    "path": "services/workbench2/src/views-components/process-input-dialog/process-input-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dialog, DialogActions, Button, CardHeader, DialogContent } from '@mui/material';\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport { PROCESS_INPUT_DIALOG_NAME } from 'store/processes/process-input-actions';\nimport { RunProcessInputsForm } from \"views/run-process-panel/run-process-inputs-form\";\nimport { MOUNT_PATH_CWL_WORKFLOW, MOUNT_PATH_CWL_INPUT } from \"models/process\";\nimport { getWorkflowInputs } from \"models/workflow\";\n\nexport const ProcessInputDialog = withDialog(PROCESS_INPUT_DIALOG_NAME)(\n    (props: WithDialogProps<any>) =>\n        <Dialog\n            open={props.open}\n            maxWidth={false}\n            onClose={props.closeDialog}>\n            <CardHeader\n                title=\"Inputs - Pipeline template that generates a config file from a template\" />\n            <DialogContent>\n                <RunProcessInputsForm inputs={getInputs(props.data.containerRequest)} />\n            </DialogContent>\n            <DialogActions>\n                <Button\n                    variant='text'\n                    color='primary'\n                    onClick={props.closeDialog}>\n                    Close\n                </Button>\n            </DialogActions>\n        </Dialog>\n);\n\nconst getInputs = (data: any) => {\n    if (!data || !data.mounts || !data.mounts[MOUNT_PATH_CWL_WORKFLOW]) { return []; }\n    const inputs = getWorkflowInputs(data.mounts[MOUNT_PATH_CWL_WORKFLOW].content);\n    return inputs\n        ? inputs.map( (it: any) => (\n            {\n                type: it.type,\n                id: it.id,\n                label: it.label,\n                value: data.mounts[MOUNT_PATH_CWL_INPUT].content[it.id.split('/').pop()] || [],\n                disabled: true\n            }))\n        : [];\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/process-remove-dialog/process-remove-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { removeProcessPermanently, REMOVE_PROCESS_DIALOG } from 'store/processes/processes-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeProcessPermanently(props.data.uuid));\n    }\n});\n\nexport const RemoveProcessDialog = compose(\n    withDialog(REMOVE_PROCESS_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);\n"
  },
  {
    "path": "services/workbench2/src/views-components/process-runtime-status/process-runtime-status.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Accordion, AccordionDetails, AccordionSummary, Paper, Typography } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport ExpandMoreIcon from '@mui/icons-material/ExpandMore';\nimport { RuntimeStatus } from \"models/runtime-status\";\nimport { ArvadosTheme } from 'common/custom-theme';\nimport classNames from 'classnames';\n\ntype CssRules = 'root'\n              | 'heading'\n              | 'summary'\n              | 'summaryText'\n              | 'details'\n              | 'detailsText'\n              | 'error'\n              | 'errorColor'\n              | 'warning'\n              | 'warningColor'\n              | 'paperRoot';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        marginBottom: theme.spacing(1),\n    },\n    heading: {\n        fontSize: '1rem',\n    },\n    summary: {\n        paddingLeft: theme.spacing(1),\n        paddingRight: theme.spacing(1),\n    },\n    summaryText: {\n        whiteSpace: 'pre-line',\n    },\n    details: {\n        paddingLeft: theme.spacing(1),\n        paddingRight: theme.spacing(1),\n    },\n    detailsText: {\n        fontSize: '0.8rem',\n        marginTop: '0px',\n        marginBottom: '0px',\n        whiteSpace: 'pre-line',\n    },\n    errorColor: {\n        color: theme.customs.colors.grey700,\n    },\n    error: {\n        backgroundColor: theme.customs.colors.red100,\n\n    },\n    warning: {\n        backgroundColor: theme.customs.colors.yellow100,\n    },\n    warningColor: {\n        color: theme.customs.colors.grey700,\n    },\n    paperRoot: {\n        minHeight: theme.spacing(6),\n        display: 'flex',\n        alignItems: 'center',\n    },\n});\nexport interface ProcessRuntimeStatusDataProps {\n    runtimeStatus: RuntimeStatus | undefined;\n    containerCount: number;\n}\n\ntype ProcessRuntimeStatusProps = ProcessRuntimeStatusDataProps & WithStyles<CssRules>;\n\nexport const ProcessRuntimeStatus = withStyles(styles)(\n    ({ runtimeStatus, containerCount, classes }: ProcessRuntimeStatusProps) => {\n        return <div className={classes.root}>\n        { runtimeStatus?.error &&\n          <div data-cy='process-runtime-status-error'><Accordion className={classes.error} elevation={0}>\n              <AccordionSummary className={classNames(classes.summary, classes.detailsText)} expandIcon={<ExpandMoreIcon />}>\n                  <Typography className={classNames(classes.heading, classes.errorColor)}>\n                      {`Error: ${runtimeStatus.error }`}\n                  </Typography>\n              </AccordionSummary>\n              <AccordionDetails className={classes.details}>\n                  <Typography className={classNames(classes.errorColor, classes.detailsText)}>\n                      {runtimeStatus?.errorDetail || 'No additional error details available'}\n                  </Typography>\n              </AccordionDetails>\n          </Accordion></div>\n        }\n            { runtimeStatus?.warning &&\n              <div data-cy='process-runtime-status-warning' ><Accordion className={classes.warning} elevation={0}>\n                  <AccordionSummary className={classNames(classes.summary, classes.detailsText)} expandIcon={<ExpandMoreIcon />}>\n                      <Typography className={classNames(classes.heading, classes.warningColor)}>\n                          {`Warning: ${runtimeStatus.warning }`}\n                      </Typography>\n                  </AccordionSummary>\n                  <AccordionDetails className={classes.details}>\n                      <Typography className={classNames(classes.warningColor, classes.detailsText)}>\n                          {runtimeStatus?.warningDetail || 'No additional warning details available'}\n                      </Typography>\n                  </AccordionDetails>\n              </Accordion></div>\n            }\n            { containerCount > 1 &&\n              <div data-cy='process-runtime-status-retry-warning' >\n                  <Paper className={classNames(classes.warning, classes.paperRoot)} elevation={0}>\n                      <Typography className={classNames(classes.heading, classes.summary, classes.warningColor)}>\n                          {`Warning: Process retried ${containerCount - 1} time${containerCount > 2 ? 's' : ''} due to failure.`}\n                      </Typography>\n                  </Paper>\n              </div>\n            }\n        </div>\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/projects-tree-picker/favorites-tree-picker.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from 'react-redux';\nimport { ProjectsTreePicker, ProjectsTreePickerProps } from 'views-components/projects-tree-picker/generic-projects-tree-picker';\nimport { Dispatch } from 'redux';\nimport { FavoriteIcon } from 'components/icon/icon';\nimport { loadFavoritesProject } from 'store/tree-picker/tree-picker-actions';\n\nexport const FavoritesTreePicker = connect(() => ({\n    rootItemIcon: FavoriteIcon,\n}), (dispatch: Dispatch): Pick<ProjectsTreePickerProps, 'loadRootItem'> => ({\n    loadRootItem: (_, pickerId, includeCollections, includeDirectories, includeFiles, options) => {\n        dispatch<any>(loadFavoritesProject({ pickerId, includeCollections, includeDirectories, includeFiles, options }));\n    },\n}))(ProjectsTreePicker);\n"
  },
  {
    "path": "services/workbench2/src/views-components/projects-tree-picker/generic-projects-tree-picker.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { isEqual } from 'lodash/fp';\nimport { TreeItem, TreeItemStatus } from 'components/tree/tree';\nimport { ProjectResource } from \"models/project\";\nimport { treePickerActions } from \"store/tree-picker/tree-picker-actions\";\nimport { ListItemTextIcon } from \"components/list-item-text-icon/list-item-text-icon\";\nimport { ProjectIcon, FileInputIcon, IconType, CollectionIcon } from 'components/icon/icon';\nimport { loadProject, loadCollection } from 'store/tree-picker/tree-picker-actions';\nimport { ProjectsTreePickerItem, ProjectsTreePickerRootItem } from 'store/tree-picker/tree-picker-middleware';\nimport { ResourceKind } from 'models/resource';\nimport { TreePickerProps, TreePicker } from \"views-components/tree-picker/tree-picker\";\nimport { CollectionFileType } from 'models/collection-file';\n\n\ntype PickedTreePickerProps = Pick<TreePickerProps<ProjectsTreePickerItem>, 'onContextMenu' | 'toggleItemActive' | 'toggleItemOpen' | 'toggleItemSelection'>;\n\nexport interface ProjectsTreePickerDataProps {\n    cascadeSelection: boolean;\n    includeCollections?: boolean;\n    includeDirectories?: boolean;\n    includeFiles?: boolean;\n    rootItemIcon: IconType;\n    showSelection?: boolean;\n    relatedTreePickers?: string[];\n    disableActivation?: string[];\n    options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n    loadRootItem: (item: TreeItem<ProjectsTreePickerRootItem>, pickerId: string,\n        includeCollections?: boolean, includeDirectories?: boolean, includeFiles?: boolean, options?: { showOnlyOwned: boolean, showOnlyWritable: boolean }) => void;\n}\n\nexport type ProjectsTreePickerProps = ProjectsTreePickerDataProps & Partial<PickedTreePickerProps>;\n\nconst mapStateToProps = (_: any, { rootItemIcon, showSelection, cascadeSelection }: ProjectsTreePickerProps) => ({\n    render: renderTreeItem(rootItemIcon),\n    showSelection: isSelectionVisible(showSelection, cascadeSelection),\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch, { loadRootItem, includeCollections, includeDirectories, includeFiles, relatedTreePickers, options, ...props }: ProjectsTreePickerProps): PickedTreePickerProps => ({\n    onContextMenu: () => { return; },\n    toggleItemActive: (event, item, pickerId) => {\n\n        const { disableActivation = [] } = props;\n        if (disableActivation.some(isEqual(item.id))) {\n            return;\n        }\n\n        dispatch(treePickerActions.ACTIVATE_TREE_PICKER_NODE({ id: item.id, pickerId, relatedTreePickers }));\n        if (props.toggleItemActive) {\n            props.toggleItemActive(event, item, pickerId);\n        }\n    },\n    toggleItemOpen: (_, item, pickerId) => {\n        const { id, data, status } = item;\n        if (status === TreeItemStatus.INITIAL) {\n            if ('kind' in data) {\n                dispatch<any>(\n                    data.kind === ResourceKind.COLLECTION\n                        ? loadCollection(id, pickerId, includeDirectories, includeFiles)\n                        : loadProject({ id, pickerId, includeCollections, includeDirectories, includeFiles, options })\n                );\n            } else if (!('type' in data) && loadRootItem) {\n                loadRootItem(item as TreeItem<ProjectsTreePickerRootItem>, pickerId, includeCollections, includeDirectories, includeFiles, options);\n            }\n        } else if (status === TreeItemStatus.LOADED) {\n            dispatch(treePickerActions.TOGGLE_TREE_PICKER_NODE_COLLAPSE({ id, pickerId }));\n        }\n    },\n    toggleItemSelection: (event, item, pickerId) => {\n        dispatch<any>(treePickerActions.TOGGLE_TREE_PICKER_NODE_SELECTION({ id: item.id, pickerId, cascade: props.cascadeSelection }));\n        if (props.toggleItemSelection) {\n            props.toggleItemSelection(event, item, pickerId);\n        }\n    },\n});\n\nexport const ProjectsTreePicker = connect(mapStateToProps, mapDispatchToProps)(TreePicker);\n\nconst getProjectPickerIcon = ({ data }: TreeItem<ProjectsTreePickerItem>, rootIcon: IconType): IconType => {\n    if ('headKind' in data) {\n        switch (data.headKind) {\n            case ResourceKind.COLLECTION:\n                return CollectionIcon;\n            default:\n                return ProjectIcon;\n        }\n    }\n    if ('kind' in data) {\n        switch (data.kind) {\n            case ResourceKind.COLLECTION:\n                return CollectionIcon;\n            default:\n                return ProjectIcon;\n        }\n    } else if ('type' in data) {\n        switch (data.type) {\n            case CollectionFileType.FILE:\n                return FileInputIcon;\n            default:\n                return ProjectIcon;\n        }\n    } else {\n        return rootIcon;\n    }\n};\n\nconst isSelectionVisible = (shouldBeVisible: boolean | undefined, cascadeSelection: boolean) =>\n    ({ status, items, data }: TreeItem<ProjectsTreePickerItem>): boolean => {\n        if (shouldBeVisible) {\n            if (!cascadeSelection && 'kind' in data && data.kind === ResourceKind.COLLECTION) {\n                // In non-casecade mode collections are selectable without being loaded\n                return true;\n            } else if (items && items.length > 0) {\n                return items.every(isSelectionVisible(shouldBeVisible, cascadeSelection));\n            }\n            return status === TreeItemStatus.LOADED;\n        }\n        return false;\n    };\n\nconst renderTreeItem = (rootItemIcon: IconType) => (item: TreeItem<ProjectResource>) =>\n    <ListItemTextIcon\n        icon={getProjectPickerIcon(item, rootItemIcon)}\n        name={item.data.name}\n        isActive={item.active}\n        hasMargin={true} />;\n"
  },
  {
    "path": "services/workbench2/src/views-components/projects-tree-picker/home-tree-picker.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from 'react-redux';\nimport { ProjectsTreePicker, ProjectsTreePickerProps } from 'views-components/projects-tree-picker/generic-projects-tree-picker';\nimport { Dispatch } from 'redux';\nimport { loadUserProject } from 'store/tree-picker/tree-picker-actions';\nimport { RootProjectIcon } from 'components/icon/icon';\n\nexport const HomeTreePicker = connect(() => ({\n    rootItemIcon: RootProjectIcon,\n}), (dispatch: Dispatch): Pick<ProjectsTreePickerProps, 'loadRootItem'> => ({\n    loadRootItem: (_, pickerId, includeCollections, includeDirectories, includeFiles, options) => {\n        dispatch<any>(loadUserProject(pickerId, includeCollections, includeDirectories, includeFiles, options));\n    },\n}))(ProjectsTreePicker);\n"
  },
  {
    "path": "services/workbench2/src/views-components/projects-tree-picker/projects-tree-picker.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Dispatch } from 'redux';\nimport { connect, DispatchProp } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { values, pipe } from 'lodash/fp';\nimport { HomeTreePicker } from 'views-components/projects-tree-picker/home-tree-picker';\nimport { SharedTreePicker } from 'views-components/projects-tree-picker/shared-tree-picker';\nimport { FavoritesTreePicker } from 'views-components/projects-tree-picker/favorites-tree-picker';\nimport { SearchProjectsPicker } from 'views-components/projects-tree-picker/search-projects-picker';\nimport {\n    getProjectsTreePickerIds, treePickerActions, treePickerSearchActions, initProjectsTreePicker,\n    SHARED_PROJECT_ID, FAVORITES_PROJECT_ID, treePickerSearchSagas\n} from 'store/tree-picker/tree-picker-actions';\nimport { TreeItem } from 'components/tree/tree';\nimport { ProjectsTreePickerItem } from 'store/tree-picker/tree-picker-middleware';\nimport { PublicFavoritesTreePicker } from './public-favorites-tree-picker';\nimport { SearchInput } from 'components/search-input/search-input';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { ResourceKind } from 'models/resource';\nimport { CollectionFileType } from 'models/collection-file';\nimport { DefaultView } from 'components/default-view/default-view';\nimport { ProjectDetailsComponent } from 'views-components/details-panel/project-details';\nimport { CollectionDetailsAttributes } from 'views-components/details-panel/collection-details';\nimport { RootProjectDetailsComponent } from 'views-components/details-panel/root-project-details';\nimport { DetailsAttribute } from 'components/details-attribute/details-attribute';\nimport { formatFileSize } from 'common/formatters';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport { Typography } from '@mui/material';\nimport { UserResource } from 'models/user';\nimport { ProjectResource } from 'models/project';\nimport { isEqual } from 'lodash';\nexport interface ToplevelPickerProps {\n    currentUuids?: string[];\n    pickerId: string;\n    cascadeSelection: boolean;\n    includeCollections?: boolean;\n    includeDirectories?: boolean;\n    includeFiles?: boolean;\n    showSelection?: boolean;\n    project?: ProjectResource;\n    options?: { showOnlyOwned: boolean, showOnlyWritable: boolean };\n    toggleItemActive?: (event: React.MouseEvent<HTMLElement>, item: TreeItem<ProjectsTreePickerItem>, pickerId: string) => void;\n    toggleItemSelection?: (event: React.MouseEvent<HTMLElement>, item: TreeItem<ProjectsTreePickerItem>, pickerId: string) => void;\n}\n\ninterface ProjectsTreePickerSearchProps {\n    projectSearch: string;\n    collectionFilter: string;\n}\n\ninterface ProjectsTreePickerActionProps {\n    onProjectSearch: (value: string) => void;\n    onCollectionFilter: (value: string) => void;\n}\n\nconst mapStateToProps = (state: RootState, props: ToplevelPickerProps): ProjectsTreePickerSearchProps => {\n    const { search } = getProjectsTreePickerIds(props.pickerId);\n    return {\n        projectSearch: state.treePickerSearch.projectSearchValues[search] || state.treePickerSearch.collectionFilterValues[search],\n        collectionFilter: state.treePickerSearch.collectionFilterValues[search],\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: ToplevelPickerProps): (ProjectsTreePickerActionProps & DispatchProp) => {\n    const { home, shared, favorites, publicFavorites, search } = getProjectsTreePickerIds(props.pickerId);\n    const params = {\n        includeCollections: props.includeCollections,\n        includeDirectories: props.includeDirectories,\n        includeFiles: props.includeFiles,\n        options: props.options\n    };\n    dispatch(treePickerSearchActions.SET_TREE_PICKER_LOAD_PARAMS({ pickerId: home, params }));\n    dispatch(treePickerSearchActions.SET_TREE_PICKER_LOAD_PARAMS({ pickerId: shared, params }));\n    dispatch(treePickerSearchActions.SET_TREE_PICKER_LOAD_PARAMS({ pickerId: favorites, params }));\n    dispatch(treePickerSearchActions.SET_TREE_PICKER_LOAD_PARAMS({ pickerId: publicFavorites, params }));\n    dispatch(treePickerSearchActions.SET_TREE_PICKER_LOAD_PARAMS({ pickerId: search, params }));\n\n    return {\n        onProjectSearch: (projectSearchValue: string) => dispatch(treePickerSearchSagas.SET_PROJECT_SEARCH({ pickerId: search, projectSearchValue })),\n        onCollectionFilter: (collectionFilterValue: string) => {\n            dispatch(treePickerSearchSagas.SET_COLLECTION_FILTER({ pickerMainId: props.pickerId, collectionFilterValue }));\n        },\n        dispatch\n    }\n};\n\ntype CssRules = 'pickerHeight' | 'searchFlex' | 'scrolledBox' | 'detailsBox' | 'twoCol';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    pickerHeight: {\n        height: \"100%\",\n    },\n    searchFlex: {\n        display: \"flex\",\n        justifyContent: \"space-around\",\n        height: \"64px\",\n        marginTop: \"8px\",\n    },\n    scrolledBox: {\n        overflow: \"scroll\",\n        width: \"calc(100% - 320px)\",\n        marginRight: \"8px\",\n        height: \"100%\",\n    },\n    twoCol: {\n        display: \"flex\",\n        flexDirection: \"row\",\n        height: \"calc(100% - 64px)\",\n    },\n    detailsBox: {\n        width: \"320px\",\n        height: \"100%\",\n        overflow: \"scroll\",\n        borderLeft: \"1px solid rgba(0, 0, 0, 0.12)\",\n        paddingLeft: \"4px\",\n    }\n});\n\ntype ProjectsTreePickerCombinedProps = ToplevelPickerProps & ProjectsTreePickerSearchProps & ProjectsTreePickerActionProps & DispatchProp & WithStyles<CssRules>;\n\ninterface SelectionComponentState {\n    activeItem?: ProjectsTreePickerItem;\n}\n\nconst displayUserName = (user: UserResource) => {\n    if (!user.firstName || !user.lastName) {\n        return '';\n    }\n    return `${user.firstName} ${user.lastName} (root project)`;\n};\n\nconst DetailsWithName = (resource: GroupContentsResource | UserResource, detailsComponent: JSX.Element) => {\n    const displayName = resource.kind === ResourceKind.GROUP || resource.kind === ResourceKind.COLLECTION ? resource.name\n                            : resource.kind === ResourceKind.USER ? displayUserName(resource)\n                                : '';\n\n    return <div data-cy=\"project-picker-details\">\n        <Typography variant=\"h6\" gutterBottom>{displayName}</Typography>\n        {detailsComponent}\n    </div>;\n};\n\nconst Details = (props: { res?: ProjectsTreePickerItem }) => {\n    if (props.res) {\n        if ('kind' in props.res) {\n            switch (props.res.kind) {\n                case ResourceKind.PROJECT:\n                    return DetailsWithName(props.res, <ProjectDetailsComponent project={props.res} hideEdit={true} />);\n                case ResourceKind.COLLECTION:\n                    return DetailsWithName(props.res, <CollectionDetailsAttributes item={props.res} />);\n                case ResourceKind.USER:\n                    return DetailsWithName(props.res, <RootProjectDetailsComponent rootProject={props.res} />);\n                    // case ResourceKind.PROCESS:\n                    //                         return new ProcessDetails(res);\n                    // case ResourceKind.WORKFLOW:\n                    //     return new WorkflowDetails(res);\n            }\n        } else if ('type' in props.res) {\n            if (props.res.type === CollectionFileType.FILE) {\n                return <>\n                <DetailsAttribute label='Type' value=\"File\" />\n                <DetailsAttribute label='Size' value={formatFileSize(props.res.size)} />\n                </>;\n            } else {\n                return <DetailsAttribute label='Type' value=\"Directory\" />\n            }\n        }\n    }\n    return <DefaultView messages={['Select a file or folder to view its details.']} />;\n};\n\n\nexport const ProjectsTreePicker = connect(mapStateToProps, mapDispatchToProps)(\n    withStyles(styles)(\n        class FileInputComponent extends React.Component<ProjectsTreePickerCombinedProps> {\n            state: SelectionComponentState = {\n                activeItem: undefined,\n            };\n\n            componentDidMount() {\n                const { search } = getProjectsTreePickerIds(this.props.pickerId);\n\n                this.setInitialActiveItem();\n\n                this.props.dispatch(treePickerSearchSagas.SET_PROJECT_SEARCH({ pickerId: search, projectSearchValue: \"\" }));\n                this.props.dispatch(treePickerSearchSagas.SET_COLLECTION_FILTER({ pickerMainId: this.props.pickerId, collectionFilterValue: \"\" }));\n                if (this.props.project) {\n                    this.setState({ activeItem: this.props.project });\n                }\n            }\n\n            componentWillUnmount() {\n                // Release all the state, we don't need it to hang around forever.\n                this.resetAllPickers();\n            }\n\n            componentDidUpdate( prevProps: Readonly<ProjectsTreePickerCombinedProps>, prevState: Readonly<{}>, snapshot?: any ): void {\n                // update active item if project updates while being displayed\n                if (prevProps.project !== this.props.project && prevProps.project?.uuid === this.props.project?.uuid) {\n                    this.setState({ activeItem: this.props.project });\n                }\n                if (!this.state.activeItem && this.props.project) {\n                    this.setState({ activeItem: this.props.project });\n                }\n                if (prevProps.project !== this.props.project) {\n                    this.setState({ activeItem: this.props.project });\n                }\n                if (!isEqual(prevProps.currentUuids, this.props.currentUuids)) {\n                    this.setInitialActiveItem();\n                }\n            }\n\n            async setInitialActiveItem() {\n                // must be awaited because React batches state updates\n                await this.resetAllPickers();\n                const preloadParams = this.props.currentUuids ? {\n                    selectedItemUuids: this.props.currentUuids,\n                    includeDirectories: !!this.props.includeDirectories,\n                    includeFiles: !!this.props.includeFiles,\n                    multi: !!this.props.showSelection,\n                } : undefined;\n                this.props.dispatch<any>(initProjectsTreePicker(this.props.pickerId, preloadParams));\n            }\n\n            setSelection(event: React.MouseEvent<HTMLElement>, item: TreeItem<ProjectsTreePickerItem>, pickerId: string) {\n                this.setState({activeItem: item.data});\n            }\n\n            resetAllPickers() {\n                const { home, shared, favorites, publicFavorites, search } = getProjectsTreePickerIds(this.props.pickerId);\n                this.props.dispatch(treePickerActions.RESET_TREE_PICKER({ pickerId: search }));\n                this.props.dispatch(treePickerActions.RESET_TREE_PICKER({ pickerId: home }));\n                this.props.dispatch(treePickerActions.RESET_TREE_PICKER({ pickerId: shared }));\n                this.props.dispatch(treePickerActions.RESET_TREE_PICKER({ pickerId: favorites }));\n                this.props.dispatch(treePickerActions.RESET_TREE_PICKER({ pickerId: publicFavorites }));\n            }\n\n            render() {\n                const pickerId = this.props.pickerId;\n                const onProjectSearch = this.props.onProjectSearch;\n                const onCollectionFilter = this.props.onCollectionFilter;\n\n                const { home, shared, favorites, publicFavorites, search } = getProjectsTreePickerIds(pickerId);\n                const relatedTreePickers = getRelatedTreePickers(pickerId);\n                const _this = this;\n                const pickerProps = {\n                    cascadeSelection: this.props.cascadeSelection,\n                    includeCollections: this.props.includeCollections,\n                    includeDirectories: this.props.includeDirectories,\n                    includeFiles: this.props.includeFiles,\n                    showSelection: this.props.showSelection,\n                    options: this.props.options,\n                    toggleItemActive: (event: React.MouseEvent<HTMLElement>, item: TreeItem<ProjectsTreePickerItem>, pickerId: string): void => {\n                                        _this.setSelection(event, item, pickerId);\n                                        if (_this.props.toggleItemActive) {\n                                            _this.props.toggleItemActive(event, item, pickerId);\n                                        }\n                                    },\n                    toggleItemSelection: this.props.toggleItemSelection,\n                    relatedTreePickers,\n                    disableActivation,\n                };\n\n\n                return <>\n                    <div className={this.props.classes.searchFlex}>\n                        <span data-cy=\"picker-dialog-project-search\"><SearchInput value=\"\" label=\"Project search\" selfClearProp='' onSearch={onProjectSearch} debounce={500} width=\"18rem\"  /></span>\n                {this.props.includeCollections &&\n                    <span data-cy=\"picker-dialog-collection-search\" ><SearchInput value=\"\" label=\"Collection search\" selfClearProp='' onSearch={onCollectionFilter} debounce={500} width=\"18rem\" /></span>}\n                </div>\n\n                <div className={this.props.classes.twoCol}>\n                    <div className={this.props.classes.scrolledBox}>\n                        {this.props.projectSearch ?\n                        <div data-cy=\"projects-tree-search-picker\">\n                            <SearchProjectsPicker {...pickerProps} pickerId={search} />\n                        </div>\n                        :\n                        <>\n                            <div data-cy=\"projects-tree-home-tree-picker\">\n                                <HomeTreePicker {...pickerProps} pickerId={home} />\n                            </div>\n                            <div data-cy=\"projects-tree-shared-tree-picker\">\n                                <SharedTreePicker {...pickerProps} pickerId={shared} />\n                            </div>\n                            <div data-cy=\"projects-tree-public-favourites-tree-picker\">\n                                <PublicFavoritesTreePicker {...pickerProps} pickerId={publicFavorites} />\n                            </div>\n                            <div data-cy=\"projects-tree-favourites-tree-picker\">\n                                <FavoritesTreePicker {...pickerProps} pickerId={favorites} />\n                            </div>\n                        </>}\n                    </div>\n\n                    <div className={this.props.classes.detailsBox} data-cy=\"picker-dialog-details\">\n                        <Details res={this.state.activeItem} />\n                    </div>\n                </div>\n                </>;\n        }\n}));\n\nconst getRelatedTreePickers = pipe(getProjectsTreePickerIds, values);\nconst disableActivation = [SHARED_PROJECT_ID, FAVORITES_PROJECT_ID];\n"
  },
  {
    "path": "services/workbench2/src/views-components/projects-tree-picker/public-favorites-tree-picker.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from 'react-redux';\nimport { ProjectsTreePicker, ProjectsTreePickerProps } from 'views-components/projects-tree-picker/generic-projects-tree-picker';\nimport { Dispatch } from 'redux';\nimport { PublicFavoriteIcon } from 'components/icon/icon';\nimport { loadPublicFavoritesProject } from 'store/tree-picker/tree-picker-actions';\n\nexport const PublicFavoritesTreePicker = connect(() => ({\n    rootItemIcon: PublicFavoriteIcon,\n}), (dispatch: Dispatch): Pick<ProjectsTreePickerProps, 'loadRootItem'> => ({\n    loadRootItem: (_, pickerId, includeCollections, includeDirectories, includeFiles, options) => {\n        dispatch<any>(loadPublicFavoritesProject({ pickerId, includeCollections, includeDirectories, includeFiles, options }));\n    },\n}))(ProjectsTreePicker);\n"
  },
  {
    "path": "services/workbench2/src/views-components/projects-tree-picker/search-projects-picker.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from 'react-redux';\nimport { ProjectsTreePicker, ProjectsTreePickerProps } from 'views-components/projects-tree-picker/generic-projects-tree-picker';\nimport { Dispatch } from 'redux';\nimport { SearchIcon } from 'components/icon/icon';\nimport { loadProject } from 'store/tree-picker/tree-picker-actions';\nimport { SEARCH_PROJECT_ID } from 'store/tree-picker/tree-picker-actions';\n\nexport const SearchProjectsPicker = connect(() => ({\n    rootItemIcon: SearchIcon,\n}), (dispatch: Dispatch): Pick<ProjectsTreePickerProps, 'loadRootItem'> => ({\n    loadRootItem: (_, pickerId, includeCollections, includeDirectories, includeFiles, options) => {\n        dispatch<any>(loadProject({ id: SEARCH_PROJECT_ID, pickerId, includeCollections, includeDirectories, includeFiles, options }));\n    },\n}))(ProjectsTreePicker);\n"
  },
  {
    "path": "services/workbench2/src/views-components/projects-tree-picker/shared-tree-picker.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from 'react-redux';\nimport { ProjectsTreePicker, ProjectsTreePickerProps } from 'views-components/projects-tree-picker/generic-projects-tree-picker';\nimport { Dispatch } from 'redux';\nimport { ShareMeIcon } from 'components/icon/icon';\nimport { loadProject } from 'store/tree-picker/tree-picker-actions';\nimport { SHARED_PROJECT_ID } from 'store/tree-picker/tree-picker-actions';\n\nexport const SharedTreePicker = connect(() => ({\n    rootItemIcon: ShareMeIcon,\n}), (dispatch: Dispatch): Pick<ProjectsTreePickerProps, 'loadRootItem'> => ({\n    loadRootItem: (_, pickerId, includeCollections, includeDirectories, includeFiles, options) => {\n        dispatch<any>(loadProject({ id: SHARED_PROJECT_ID, pickerId, includeCollections, includeDirectories, includeFiles, loadShared: true, options }));\n    },\n}))(ProjectsTreePicker);\n"
  },
  {
    "path": "services/workbench2/src/views-components/projects-tree-picker/tree-picker-field.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Typography } from \"@mui/material\";\nimport { TreeItem } from \"components/tree/tree\";\nimport { WrappedFieldProps } from 'redux-form';\nimport { ProjectsTreePicker } from 'views-components/projects-tree-picker/projects-tree-picker';\nimport { ProjectsTreePickerItem } from 'store/tree-picker/tree-picker-middleware';\nimport { PickerIdProp } from 'store/tree-picker/picker-id';\nimport { FileOperationLocation, getFileOperationLocation, SEARCH_PROJECT_ID_PREFIX } from \"store/tree-picker/tree-picker-actions\";\nimport { connect } from \"react-redux\";\nimport { Dispatch } from \"redux\";\n\ntype TreePickerDialogProps = {\n    pickerId: string;\n    currentUuids?: string[];\n    setSelectedProject: (uuid: string) => void;\n}\n\nexport const ProjectTreePickerField = (props: WrappedFieldProps & PickerIdProp) =>\n    <div style={{ display: 'flex', minHeight: 0, flexDirection: 'column' }}>\n        <div style={{ flexBasis: '960px', flexShrink: 1, minHeight: 0, display: 'flex', flexDirection: 'column' }}>\n            <ProjectsTreePicker\n                pickerId={props.pickerId}\n                toggleItemActive={handleChange(props)}\n                cascadeSelection={false}\n                options={{ showOnlyOwned: false, showOnlyWritable: true }} />\n            {props.meta.dirty && props.meta.error &&\n                <Typography variant='caption' color='error'>\n                    {props.meta.error}\n                </Typography>}\n        </div>\n    </div>;\n\nexport const ProjectTreePickerDialogField = (props: TreePickerDialogProps) =>\n    <div style={{ display: 'flex', minHeight: 0, flexDirection: 'column' }}>\n        <div style={{ flexBasis: '960px', flexShrink: 1, minHeight: 0, display: 'flex', flexDirection: 'column' }}>\n            <ProjectsTreePicker\n                pickerId={props.pickerId}\n                currentUuids={props.currentUuids}\n                toggleItemActive={(_: any, { id }) => props.setSelectedProject(id)}\n                cascadeSelection={false}\n                options={{ showOnlyOwned: false, showOnlyWritable: true }} />\n        </div>\n    </div>;\n\nconst handleChange = (props: WrappedFieldProps) =>\n    (_: any, { id }: TreeItem<ProjectsTreePickerItem>) => {\n        if (id.startsWith(SEARCH_PROJECT_ID_PREFIX)) {\n            props.input.onChange(id.slice(SEARCH_PROJECT_ID_PREFIX.length));\n        } else {\n            props.input.onChange(id);\n        }\n    }\n\ntype ProjectsTreePickerActionProps = {\n    getFileOperationLocation: (item: ProjectsTreePickerItem) => Promise<FileOperationLocation | undefined>;\n}\n\nconst projectsTreePickerMapDispatchToProps = (dispatch: Dispatch): ProjectsTreePickerActionProps => ({\n    getFileOperationLocation: (item: ProjectsTreePickerItem) => dispatch<any>(getFileOperationLocation(item)),\n});\n\ntype ProjectsTreePickerCombinedProps = ProjectsTreePickerActionProps & WrappedFieldProps & PickerIdProp;\n\nexport const DirectoryTreePickerField = connect(null, projectsTreePickerMapDispatchToProps)(\n    class DirectoryTreePickerFieldComponent extends React.Component<ProjectsTreePickerCombinedProps> {\n\n        handleDirectoryChange = (props: WrappedFieldProps) =>\n            async (_: any, { data }: TreeItem<ProjectsTreePickerItem>) => {\n                const location = await this.props.getFileOperationLocation(data);\n                props.input.onChange(location || '');\n            }\n\n        render() {\n            return <div style={{ display: 'flex', minHeight: 0, flexDirection: 'column' }}>\n                <div style={{ flexBasis: '275px', flexShrink: 1, minHeight: 0, display: 'flex', flexDirection: 'column' }}>\n                    <ProjectsTreePicker\n                        currentUuids={[this.props.input.value.uuid]}\n                        pickerId={this.props.pickerId}\n                        toggleItemActive={this.handleDirectoryChange(this.props)}\n                        cascadeSelection={false}\n                        options={{ showOnlyOwned: false, showOnlyWritable: true }}\n                        includeCollections\n                        includeDirectories />\n                    {this.props.meta.dirty && this.props.meta.error &&\n                        <Typography variant='caption' color='error'>\n                            {this.props.meta.error}\n                        </Typography>}\n                </div>\n            </div>;\n        }\n    });\n\ntype DirectoryTreePickerDialogFieldProps = PickerIdProp & {\n    currentUuids?: string[];\n    getFileOperationLocation: (item: ProjectsTreePickerItem) => Promise<FileOperationLocation | undefined>;\n    handleDirectoryChange: (destination: FileOperationLocation) => void;\n};\n\nexport const DirectoryTreePickerDialogField = connect(null, projectsTreePickerMapDispatchToProps)(\n    (props: DirectoryTreePickerDialogFieldProps)=> {\n\n    const handleDirectoryChange = (props: DirectoryTreePickerDialogFieldProps) =>\n            async (_: any, { data }: TreeItem<ProjectsTreePickerItem>) => {\n                const location: FileOperationLocation | undefined = await props.getFileOperationLocation(data);\n                if (location) {\n                    props.handleDirectoryChange(location);\n                } else {\n                    props.handleDirectoryChange({} as FileOperationLocation);\n                }\n            }\n\n    return (\n        <div style={{ display: 'flex', minHeight: 0, flexDirection: 'column' }}>\n            <div style={{ flexBasis: '960px', flexShrink: 1, minHeight: 0, display: 'flex', flexDirection: 'column' }}>\n                <ProjectsTreePicker\n                    currentUuids={props.currentUuids}\n                    pickerId={props.pickerId}\n                    toggleItemActive={handleDirectoryChange(props)}\n                    cascadeSelection={false}\n                    options={{ showOnlyOwned: false, showOnlyWritable: true }}\n                    includeCollections\n                    includeDirectories />\n            </div>\n        </div>\n        );\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/property-chips/get-property-chips.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Provider } from 'react-redux';\nimport { createStore, combineReducers } from 'redux';\nimport { getPropertyChips } from './get-property-chips';\n\ndescribe(\"getPropertyChips\", () => {\n    let store;\n\n    beforeEach(() => {\n        store = createStore(combineReducers({\n            properties: (state = {}, action) => state,\n        }));\n    });\n\n    it(\"renders property chips\", () => {\n        const resource = {\n            properties: {\n                foo: 'bar',\n                baz: ['qux', 'quux', [ 'quuz' ]]\n            }\n        };\n        cy.mount(\n                    <Provider store={store}>\n                        {getPropertyChips(resource, {})}\n                    </Provider>);\n        cy.get('html').should('contain', 'foo: bar');\n        cy.get('html').should('contain', 'baz: qux');\n        cy.get('html').should('contain', 'baz: quux');\n        cy.get('html').should('contain', 'baz: quuz');\n    });\n\n    it(\"filters out objects\", () => {\n        const resource = {\n            properties: {\n                foo: 'bar',\n                baz: { qux: 'quux' }\n            }\n        };\n        cy.mount(\n                    <Provider store={store}>\n                        {getPropertyChips(resource, {})}\n                    </Provider>);\n        cy.get('html').should('contain', 'foo: bar');\n        // should not contain baz at all, because its value is an object\n        cy.get('html').should('not.contain', 'baz');\n    });\n});"
  },
  {
    "path": "services/workbench2/src/views-components/property-chips/get-property-chips.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\n\nimport { getPropertyChip } from 'views-components/resource-properties-form/property-chip';\nimport { ContainerRequestResource } from 'models/container-request';\nimport { CollectionResource } from 'models/collection';\nimport { ProjectResource } from 'models/project';\n\nexport const getPropertyChips = (resource: ProjectResource | CollectionResource | ContainerRequestResource | undefined, classes: any): JSX.Element | null => {\n    if (!resource || !resource.properties || typeof resource.properties !== 'object') return null;\n\n    const properties = { ...resource.properties } as Record<string, any>;\n\n    return (\n        <section data-cy='resource-properties'>\n            {Object.keys(properties).map((k) =>\n                Array.isArray(properties[k])\n                    ? properties[k].map((v: string) => getPropertyChip(k, v, undefined, classes.tag))\n                    : typeof properties[k] === 'object'\n                        ? null\n                        : getPropertyChip(k, properties[k], undefined, classes.tag)\n            )}\n        </section>\n    );\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/remove-dialog/remove-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button } from \"@mui/material\";\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport { dialogActions } from \"store/dialog/dialog-actions\";\n\nexport const REMOVE_DIALOG = 'removeCollectionFilesDialog';\n\nexport const RemoveDialog = withDialog(REMOVE_DIALOG)(\n    (props) =>\n        <Dialog open={props.open}>\n            <DialogTitle>{`Removing ${props.data}`}</DialogTitle>\n            <DialogContent>\n                {`Are you sure you want to remove ${props.data}?`}\n            </DialogContent>\n            <DialogActions>\n                <Button\n                    variant='text'\n                    color='primary'\n                    onClick={props.closeDialog}>\n                    Cancel\n                </Button>\n                <Button variant='contained' color='primary'>\n                    Remove\n                </Button>\n            </DialogActions>\n        </Dialog>\n);\n\nexport const openRemoveDialog = (removedDataName: string) =>\n    dialogActions.OPEN_DIALOG({ id: REMOVE_DIALOG, data: removedDataName });\n"
  },
  {
    "path": "services/workbench2/src/views-components/rename-file-dialog/rename-file-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { compose, Dispatch } from 'redux';\nimport { reduxForm, InjectedFormProps, Field } from 'redux-form';\nimport { withDialog, WithDialogProps } from 'store/dialog/with-dialog';\nimport { FormDialog } from 'components/form-dialog/form-dialog';\nimport { DialogContentText } from '@mui/material';\nimport { TextField } from 'components/text-field/text-field';\nimport { RENAME_FILE_DIALOG, RenameFileDialogData, renameFile } from 'store/collection-panel/collection-panel-files/collection-panel-files-actions';\nimport { WarningCollection } from 'components/warning-collection/warning-collection';\nimport { RENAME_FILE_VALIDATION } from 'validators/validators';\n\nexport const RenameFileDialog = compose(\n    withDialog(RENAME_FILE_DIALOG),\n    reduxForm({\n        form: RENAME_FILE_DIALOG,\n        touchOnChange: true,\n        onSubmit: (data: { path: string }, dispatch: Dispatch) => {\n            dispatch<any>(renameFile(data.path));\n        }\n    })\n)((props: WithDialogProps<RenameFileDialogData> & InjectedFormProps<{ name: string, path: string }>) =>\n    <FormDialog\n        dialogTitle='Rename'\n        formFields={RenameDialogFormFields}\n        submitLabel='Ok'\n        {...props}\n    />);\n\nconst RenameDialogFormFields = (props: WithDialogProps<RenameFileDialogData>) => <>\n    <DialogContentText>\n        {`Please, enter a new name for ${props.data.name}`}\n    </DialogContentText>\n    <Field\n        name='path'\n        component={TextField as any}\n        autoFocus={true}\n        validate={RENAME_FILE_VALIDATION}\n    />\n    <WarningCollection text=\"Renaming a file will change the collection's content address.\" />\n</>;\n"
  },
  {
    "path": "services/workbench2/src/views-components/repositories-sample-git-dialog/repositories-sample-git-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button, Typography } from \"@mui/material\";\nimport { WithDialogProps } from \"store/dialog/with-dialog\";\nimport { withDialog } from 'store/dialog/with-dialog';\nimport { REPOSITORIES_SAMPLE_GIT_DIALOG } from \"store/repositories/repositories-actions\";\nimport { DefaultCodeSnippet } from 'components/default-code-snippet/default-code-snippet';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { compose } from \"redux\";\n\ntype CssRules = 'codeSnippet' | 'link' | 'spacing';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    codeSnippet: {\n        borderRadius: theme.spacing(0.5),\n        border: '1px solid',\n        borderColor: theme.palette.grey[\"400\"],\n    },\n    link: {\n        textDecoration: 'none',\n        color: theme.palette.primary.main,\n        \"&:hover\": {\n            color: theme.palette.primary.dark,\n            transition: 'all 0.5s ease'\n        }\n    },\n    spacing: {\n        paddingTop: theme.spacing(2)\n    }\n});\n\ninterface RepositoriesSampleGitDataProps {\n    uuidPrefix: string;\n}\n\ntype RepositoriesSampleGitProps = RepositoriesSampleGitDataProps & WithStyles<CssRules>;\n\nexport const RepositoriesSampleGitDialog = compose(\n    withDialog(REPOSITORIES_SAMPLE_GIT_DIALOG),\n    withStyles(styles))(\n        (props: WithDialogProps<RepositoriesSampleGitProps> & RepositoriesSampleGitProps) =>\n            <Dialog open={props.open}\n                onClose={props.closeDialog}\n                fullWidth\n                maxWidth='sm'>\n                <DialogTitle>Sample git quick start:</DialogTitle>\n                <DialogContent>\n                    <DefaultCodeSnippet\n                        className={props.classes.codeSnippet}\n                        lines={[snippetText(props.data.uuidPrefix)]} />\n                    <Typography variant='body1' className={props.classes.spacing}>\n                        See also:\n                        <div><a href=\"https://doc.arvados.org/user/getting_started/ssh-access-unix.html\" className={props.classes.link} target=\"_blank\" rel=\"noopener noreferrer\">SSH access</a></div>\n                        <div><a href=\"https://doc.arvados.org/user/tutorials/tutorial-firstscript.html\" className={props.classes.link} target=\"_blank\" rel=\"noopener noreferrer\">Writing a Crunch Script</a></div>\n                    </Typography>\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={props.closeDialog}>\n                        Close\n                    </Button>\n                </DialogActions>\n            </Dialog>\n    );\n\nconst snippetText = (uuidPrefix: string) => `git clone git@git.${uuidPrefix}.arvadosapi.com:arvados.git\ncd arvados\n# edit files\ngit add the/files/you/changed\ngit commit\ngit push\n`;\n"
  },
  {
    "path": "services/workbench2/src/views-components/repository-attributes-dialog/repository-attributes-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button, Typography, Grid } from \"@mui/material\";\nimport { WithDialogProps } from \"store/dialog/with-dialog\";\nimport { withDialog } from 'store/dialog/with-dialog';\nimport { REPOSITORY_ATTRIBUTES_DIALOG } from \"store/repositories/repositories-actions\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { compose } from \"redux\";\nimport { RepositoryResource } from \"models/repositories\";\n\ntype CssRules = 'rightContainer' | 'leftContainer' | 'spacing';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    rightContainer: {\n        textAlign: 'right',\n        paddingRight: theme.spacing(2),\n        color: theme.palette.grey[\"500\"]\n    },\n    leftContainer: {\n        textAlign: 'left',\n        paddingLeft: theme.spacing(2)\n    },\n    spacing: {\n        paddingTop: theme.spacing(2)\n    },\n});\n\ninterface RepositoryAttributesDataProps {\n    repositoryData: RepositoryResource;\n}\n\ntype RepositoryAttributesProps = RepositoryAttributesDataProps & WithStyles<CssRules>;\n\nexport const RepositoryAttributesDialog = compose(\n    withDialog(REPOSITORY_ATTRIBUTES_DIALOG),\n    withStyles(styles))(\n        (props: WithDialogProps<RepositoryAttributesProps> & RepositoryAttributesProps) =>\n            <Dialog open={props.open}\n                onClose={props.closeDialog}\n                fullWidth\n                maxWidth=\"sm\">\n                <DialogTitle>Attributes</DialogTitle>\n                <DialogContent>\n                    <Typography variant='body1' className={props.classes.spacing}>\n                        {props.data.repositoryData && attributes(props.data.repositoryData, props.classes)}\n                    </Typography>\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={props.closeDialog}>\n                        Close\n                </Button>\n                </DialogActions>\n            </Dialog>\n    );\n\nconst attributes = (repositoryData: RepositoryResource, classes: any) => {\n    const { uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, name } = repositoryData;\n    return (\n        <span>\n            <Grid container direction=\"row\">\n                <Grid item xs={5} className={classes.rightContainer}>\n                    <Grid item>Name</Grid>\n                    <Grid item>Owner uuid</Grid>\n                    <Grid item>Created at</Grid>\n                    <Grid item>Modified at</Grid>\n                    <Grid item>Modified by user uuid</Grid>\n                    <Grid item>uuid</Grid>\n                </Grid>\n                <Grid item xs={7} className={classes.leftContainer}>\n                    <Grid item>{name}</Grid>\n                    <Grid item>{ownerUuid}</Grid>\n                    <Grid item>{createdAt}</Grid>\n                    <Grid item>{modifiedAt}</Grid>\n                    <Grid item>{modifiedByUserUuid}</Grid>\n                    <Grid item>{uuid}</Grid>\n                </Grid>\n            </Grid>\n        </span>\n    );\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/repository-remove-dialog/repository-remove-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { removeRepository, REPOSITORY_REMOVE_DIALOG } from 'store/repositories/repositories-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeRepository(props.data.uuid));\n    }\n});\n\nexport const RemoveRepositoryDialog = compose(\n    withDialog(REPOSITORY_REMOVE_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);"
  },
  {
    "path": "services/workbench2/src/views-components/resource-properties-form/property-chip.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Chip } from '@mui/material';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport CopyToClipboard from 'react-copy-to-clipboard';\nimport { getVocabulary } from 'store/vocabulary/vocabulary-selectors';\nimport { Dispatch } from 'redux';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { getTagValueLabel, getTagKeyLabel, Vocabulary } from 'models/vocabulary';\n\ninterface PropertyChipComponentDataProps {\n    propKey: string;\n    propValue: string;\n    className: string;\n    vocabulary: Vocabulary;\n}\n\ninterface PropertyChipComponentActionProps {\n    onDelete?: () => void;\n    onCopy: (message: string) => void;\n}\n\ntype PropertyChipComponentProps = PropertyChipComponentActionProps & PropertyChipComponentDataProps;\n\nconst mapStateToProps = ({ properties }: RootState) => ({\n    vocabulary: getVocabulary(properties),\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    onCopy: (message: string) => dispatch(snackbarActions.OPEN_SNACKBAR({\n        message,\n        hideDuration: 2000,\n        kind: SnackbarKind.SUCCESS\n    }))\n});\n\n// Renders a Chip with copyable-on-click tag:value data based on the vocabulary\nexport const PropertyChipComponent = connect(mapStateToProps, mapDispatchToProps)(\n    ({ propKey, propValue, vocabulary, className, onCopy, onDelete }: PropertyChipComponentProps) => {\n        const label = `${getTagKeyLabel(propKey, vocabulary)}: ${getTagValueLabel(propKey, propValue, vocabulary)}`;\n        return (\n            <span onClick={(ev)=>ev.stopPropagation()}>\n                <CopyToClipboard key={propKey} text={label} onCopy={() => onCopy(\"Copied to clipboard\")}>\n                    <Chip onDelete={onDelete} key={propKey}\n                        className={className} label={label} />\n                </CopyToClipboard>\n            </span>\n        );\n    }\n);\n\nexport const getPropertyChip = (k: string, v: string, handleDelete: any, className: string) =>\n    <PropertyChipComponent\n        key={`${k}-${v}`} className={className}\n        onDelete={handleDelete}\n        propKey={k} propValue={v} />;\n"
  },
  {
    "path": "services/workbench2/src/views-components/resource-properties-form/property-field-common.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from 'react-redux';\nimport { change, WrappedFieldMetaProps, WrappedFieldInputProps, WrappedFieldProps } from 'redux-form';\nimport { Vocabulary, PropFieldSuggestion } from 'models/vocabulary';\nimport { RootState } from 'store/store';\nimport { getVocabulary } from 'store/vocabulary/vocabulary-selectors';\n\nexport interface VocabularyProp {\n    vocabulary: Vocabulary;\n}\n\nexport interface ValidationProp {\n    skipValidation?: boolean;\n    clearPropertyKeyOnSelect?: boolean;\n}\n\nexport const mapStateToProps = (state: RootState, ownProps: ValidationProp): VocabularyProp & ValidationProp => ({\n    skipValidation: ownProps.skipValidation,\n    vocabulary: getVocabulary(state.properties),\n});\n\nexport const connectVocabulary = connect(mapStateToProps);\n\nexport const ITEMS_PLACEHOLDER: string[] = [];\n\nexport const hasError = ({ touched, invalid }: WrappedFieldMetaProps) =>\n    touched && invalid;\n\nexport const getErrorMsg = (meta: WrappedFieldMetaProps) =>\n    hasError(meta)\n        ? meta.error\n        : '';\n\nexport const buildProps = ({ input, meta }: WrappedFieldProps) => {\n    return {\n        value: input.value,\n        items: ITEMS_PLACEHOLDER,\n        renderSuggestion: (item: PropFieldSuggestion) => item.label,\n        error: hasError(meta),\n        helperText: getErrorMsg(meta),\n    };\n};\n\n// Attempts to match a manually typed value label with a value ID, when the user\n// doesn't select the value from the suggestions list.\nexport const handleBlur = (\n    fieldName: string,\n    formName: string,\n    { dispatch }: WrappedFieldMetaProps,\n    { onBlur, value }: WrappedFieldInputProps,\n    fieldValue: string) =>\n    () => {\n        dispatch(change(formName, fieldName, fieldValue));\n        onBlur(value);\n    };\n\n// When selecting a property value, save its ID for later usage.\nexport const handleSelect = (\n    fieldName: string,\n    formName: string,\n    { onChange }: WrappedFieldInputProps,\n    { dispatch }: WrappedFieldMetaProps) =>\n    (item: PropFieldSuggestion) => {\n        if (item) {\n            onChange(item.label);\n            dispatch(change(formName, fieldName, item.id));\n        }\n    };\n"
  },
  {
    "path": "services/workbench2/src/views-components/resource-properties-form/property-key-field.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { WrappedFieldProps, Field, FormName, reset, change, WrappedFieldInputProps, WrappedFieldMetaProps } from 'redux-form';\nimport { Autocomplete } from 'components/autocomplete/autocomplete';\nimport {\n    Vocabulary,\n    getTags,\n    getTagKeyID,\n    getTagKeyLabel,\n    getPreferredTags,\n    PropFieldSuggestion\n} from 'models/vocabulary';\nimport {\n    handleSelect,\n    handleBlur,\n    connectVocabulary,\n    VocabularyProp,\n    ValidationProp,\n    buildProps\n} from 'views-components/resource-properties-form/property-field-common';\nimport { TAG_KEY_VALIDATION, REQUIRED_LENGTH255_VALIDATION, Validator } from 'validators/validators';\nimport { escapeRegExp } from 'common/regexp';\nimport { ChangeEvent } from 'react';\nimport { useStateWithValidation } from 'common/useStateWithValidation';\n\nexport const PROPERTY_KEY_FIELD_NAME = 'key';\nexport const PROPERTY_KEY_FIELD_ID = 'keyID';\n\nexport const PropertyKeyField = connectVocabulary(\n    ({ vocabulary, skipValidation, clearPropertyKeyOnSelect }: VocabularyProp & ValidationProp) =>\n        <span data-cy='property-field-key'>\n        <Field\n            clearPropertyKeyOnSelect\n            name={PROPERTY_KEY_FIELD_NAME}\n            component={PropertyKeyInput}\n            vocabulary={vocabulary}\n            validate={skipValidation ? undefined : getValidation(vocabulary)} />\n        </span>\n);\n\nconst PropertyKeyInput = ({ vocabulary, ...props }: WrappedFieldProps & VocabularyProp & { clearPropertyKeyOnSelect?: boolean }) =>\n    <FormName children={data => (\n        <Autocomplete\n            {...buildProps(props)}\n            label='Key'\n            suggestions={getSuggestions(props.input.value, vocabulary)}\n            renderSuggestion={\n                (s: PropFieldSuggestion) => s.synonyms && s.synonyms.length > 0\n                    ? `${s.label} (${s.synonyms.join('; ')})`\n                    : s.label\n            }\n            onFocus={() => {\n                if (props.clearPropertyKeyOnSelect && props.input.value) {\n                    props.meta.dispatch(reset(props.meta.form));\n                }\n            }}\n            onSelect={handleSelect(PROPERTY_KEY_FIELD_ID, data.form, props.input, props.meta)}\n            onBlur={() => {\n                // Case-insensitive search for the key in the vocabulary\n                const foundKeyID = getTagKeyID(props.input.value, vocabulary);\n                if (foundKeyID !== '') {\n                    props.input.value = getTagKeyLabel(foundKeyID, vocabulary);\n                }\n                handleBlur(PROPERTY_KEY_FIELD_ID, data.form, props.meta, props.input, foundKeyID)();\n            }}\n            onChange={(e: ChangeEvent<HTMLInputElement>) => {\n                const newValue = e.currentTarget.value;\n                handleChange(data.form, props.input, props.meta, newValue);\n            }}\n        />\n    )} />;\n\nconst handleChange = (\n    formName: string,\n    { onChange }: WrappedFieldInputProps,\n    { dispatch }: WrappedFieldMetaProps,\n    value: string) => {\n        // Properties' values are dependant on the keys, if any value is\n        // pre-existant, a change on the property key should mean that the\n        // previous value is invalid, so we better reset the whole form before\n        // setting the new tag key.\n        dispatch(reset(formName));\n\n        onChange(value);\n        dispatch(change(formName, PROPERTY_KEY_FIELD_NAME, value));\n    };\n\ntype DialogPropertyKeyInputProps = VocabularyProp & {\n    showErrors?: boolean\n    skipValidation?: boolean,\n    clearPropertyKeyOnSelect?: boolean,\n    setCurrentValue?: (value: string | undefined) => void,\n    onSelect: (value: string) => void,\n    setKeyErrors: (errors: string[]) => void,\n};\n\nexport const DialogPropertyKeyInput = ({ vocabulary, showErrors, skipValidation, clearPropertyKeyOnSelect, onSelect, setKeyErrors, setCurrentValue }: DialogPropertyKeyInputProps) => {\n    const validationArray = skipValidation ? [] : getKeyValidation(vocabulary);\n    const [key, setKey, keyErrs] = useStateWithValidation('', validationArray, 'Key');\n\n    // report errors to parent component\n    React.useEffect(() => {\n        setKeyErrors(keyErrs);\n    }, [keyErrs]);\n\n    const handleSetKey = (newKey: string) => {\n        if (setCurrentValue) {\n            setCurrentValue(undefined);\n        }\n        setKey(newKey);\n        onSelect(newKey);\n    }\n\n    return <Autocomplete\n        label='Key'\n        items={[]}\n        value={key}\n        error={showErrors && keyErrs.length > 0}\n        helperText={showErrors ? keyErrs.join(', ') : undefined}\n        suggestions={getSuggestions(key, vocabulary)}\n        renderSuggestion={\n            (s: PropFieldSuggestion) => s.synonyms && s.synonyms.length > 0\n                ? `${s.label} (${s.synonyms.join('; ')})`\n                : s.label\n        }\n        onFocus={() => {\n            setKey('');\n            onSelect('');\n            if (clearPropertyKeyOnSelect && key && setCurrentValue) {\n                setCurrentValue(undefined);\n            }\n        }}\n        onSelect={(selectedSuggestion: PropFieldSuggestion) => {\n            handleSetKey(selectedSuggestion.label);\n        }}\n        onBlur={() => {\n            // Case-insensitive search for the key in the vocabulary\n            const foundKeyID = getTagKeyID(key, vocabulary);\n            if (foundKeyID !== '') {\n                const foundKeyLabel = getTagKeyLabel(foundKeyID, vocabulary);\n                handleSetKey(foundKeyLabel);\n            }\n        }}\n        onChange={(e: ChangeEvent<HTMLInputElement>) => {\n            const newValue = e.currentTarget.value;\n            if (vocabulary.strict_tags === false) {\n                handleSetKey(newValue);\n            } else {\n                setKey(newValue);\n            }\n        }}\n    />\n};\n\nconst getValidation =\n    (vocabulary: Vocabulary) =>\n        vocabulary.strict_tags\n            ? [...TAG_KEY_VALIDATION, matchTags(vocabulary)]\n            : TAG_KEY_VALIDATION\n\nconst createStrictTagValidator = (vocabulary: Vocabulary): Validator => {\n    const validTags = getTags(vocabulary).map(tag => tag.label);\n    const validTagSet = new Set(validTags);\n\n    return ((value: string) =>\n        validTagSet.has(value) ? undefined : 'Incorrect key'\n    ) as Validator;\n};\n\nconst getKeyValidation = (vocabulary: Vocabulary) => {\n    if (vocabulary.strict_tags) {\n        return [...REQUIRED_LENGTH255_VALIDATION, createStrictTagValidator(vocabulary)];\n    }\n    return REQUIRED_LENGTH255_VALIDATION;\n}\n\nconst matchTags = (vocabulary: Vocabulary) =>\n    (value: string) =>\n        getTags(vocabulary).find(tag => tag.label === value)\n            ? undefined\n            : 'Incorrect key';\n\nconst getSuggestions = (value: string, vocabulary: Vocabulary): PropFieldSuggestion[] => {\n    const re = new RegExp(escapeRegExp(value), \"i\");\n    return getPreferredTags(vocabulary, value).filter(\n        tag => (tag.label !== value && re.test(tag.label)) ||\n            (tag.synonyms && tag.synonyms.some(s => re.test(s))));\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/resource-properties-form/property-value-field.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { WrappedFieldProps, Field, formValues, FormName, WrappedFieldInputProps, WrappedFieldMetaProps, change } from 'redux-form';\nimport { compose } from 'redux';\nimport { Autocomplete } from 'components/autocomplete/autocomplete';\nimport { Vocabulary, isStrictTag, getTagValues, getTagValueID, getTagValueLabel, PropFieldSuggestion, getPreferredTagValues } from 'models/vocabulary';\nimport { PROPERTY_KEY_FIELD_ID, PROPERTY_KEY_FIELD_NAME } from 'views-components/resource-properties-form/property-key-field';\nimport {\n    handleSelect,\n    handleBlur,\n    VocabularyProp,\n    ValidationProp,\n    connectVocabulary,\n    buildProps\n} from 'views-components/resource-properties-form/property-field-common';\nimport { TAG_VALUE_VALIDATION, REQUIRED_LENGTH255_VALIDATION } from 'validators/validators';\nimport { escapeRegExp } from 'common/regexp';\nimport { ChangeEvent } from 'react';\nimport { memoize } from 'lodash';\nimport { useStateWithValidation } from 'common/useStateWithValidation';\nimport { Validator } from 'validators/validators';\n\ninterface PropertyKeyProp {\n    propertyKeyId: string;\n    propertyKeyName: string;\n}\n\ninterface PropertyValueInputProp {\n    disabled: boolean;\n}\n\ntype PropertyValueFieldProps = VocabularyProp & PropertyKeyProp & ValidationProp & PropertyValueInputProp;\n\nexport const PROPERTY_VALUE_FIELD_NAME = 'value';\nexport const PROPERTY_VALUE_FIELD_ID = 'valueID';\n\nconst connectVocabularyAndPropertyKey = compose(\n    connectVocabulary,\n    formValues({\n        propertyKeyId: PROPERTY_KEY_FIELD_ID,\n        propertyKeyName: PROPERTY_KEY_FIELD_NAME,\n    }),\n);\n\nexport const PropertyValueField = connectVocabularyAndPropertyKey(\n    ({ skipValidation, ...props }: PropertyValueFieldProps) =>\n        <span data-cy='property-field-value'>\n        <Field\n            name={PROPERTY_VALUE_FIELD_NAME}\n            component={PropertyValueInput}\n            validate={skipValidation ? undefined : getValidation(props.propertyKeyId, props.vocabulary)}\n            {...{...props, disabled: !props.propertyKeyName}} />\n        </span>\n);\n\nconst PropertyValueInput = ({ vocabulary, propertyKeyId, propertyKeyName, ...props }: WrappedFieldProps & PropertyValueFieldProps) =>\n    <FormName children={data => (\n        <Autocomplete\n            {...buildProps(props)}\n            label='Value'\n            disabled={props.disabled}\n            suggestions={getSuggestions(props.input.value, propertyKeyId, vocabulary)}\n            renderSuggestion={\n                (s: PropFieldSuggestion) => s.synonyms && s.synonyms.length > 0\n                    ? `${s.label} (${s.synonyms.join('; ')})`\n                    : s.label\n            }\n            onSelect={handleSelect(PROPERTY_VALUE_FIELD_ID, data.form, props.input, props.meta)}\n            onBlur={() => {\n                // Case-insensitive search for the value in the vocabulary\n                const foundValueID =  getTagValueID(propertyKeyId, props.input.value, vocabulary);\n                if (foundValueID !== '') {\n                    props.input.value = getTagValueLabel(propertyKeyId, foundValueID, vocabulary);\n                }\n                handleBlur(PROPERTY_VALUE_FIELD_ID, data.form, props.meta, props.input, foundValueID)();\n            }}\n            onChange={(e: ChangeEvent<HTMLInputElement>) => {\n                const newValue = e.currentTarget.value;\n                const tagValueID = getTagValueID(propertyKeyId, newValue, vocabulary);\n                handleChange(data.form, tagValueID, props.input, props.meta, newValue);\n            }}\n        />\n    )} />;\n\ntype DialogPropertyValueInputProps = VocabularyProp & {\n    showErrors?: boolean,\n    skipValidation?: boolean,\n    propertyKeyId: string,\n    propertyKeyName?: string,\n    currentValue?: string,\n    onSelect: (value: string) => void,\n    setValueErrors: (errors: string[]) => void,\n};\n\nexport const DialogPropertyValueInput = ({ vocabulary, propertyKeyId, propertyKeyName, currentValue, showErrors, skipValidation, onSelect, setValueErrors }: DialogPropertyValueInputProps) => {\n    const validationArray = skipValidation ? [] : getValueValidation(propertyKeyId, vocabulary);\n    const [value, setValue, valueErrs] = useStateWithValidation(currentValue || '', validationArray, 'Value');\n    const prevValue = React.useRef(currentValue);\n\n    React.useEffect(() => {\n        if (prevValue.current && prevValue.current?.length > 0 && (currentValue?.length === 0 || currentValue === undefined)) {\n            setValue('');\n        }\n        prevValue.current = currentValue;\n    }, [propertyKeyName, currentValue]);\n\n    React.useEffect(() => {\n        setValueErrors(valueErrs);\n    }, [valueErrs]);\n\n    const hasVocabularyKey = propertyKeyId.length > 0;\n    const allowArbitraryValue = vocabulary.strict_tags === false && !!propertyKeyName;\n    const isDisabled = !(hasVocabularyKey || allowArbitraryValue);\n\n    return <Autocomplete\n        label='Value'\n        items={[]}\n        value={value}\n        error={showErrors && valueErrs.length > 0}\n        helperText={showErrors ? valueErrs.join(', ') : undefined}\n        disabled={isDisabled}\n        suggestions={getSuggestions(value, propertyKeyId, vocabulary)}\n        renderSuggestion={\n            (s: PropFieldSuggestion) => s.synonyms && s.synonyms.length > 0\n                ? `${s.label} (${s.synonyms.join('; ')})`\n                : s.label\n        }\n        onSelect={(selectedSuggestion: PropFieldSuggestion) => {\n            onSelect(selectedSuggestion.label);\n            setValue(selectedSuggestion.label);\n        }}\n        onBlur={() => {\n            // Case-insensitive search for the value in the vocabulary\n            const foundValueID = getTagValueID(propertyKeyId, value, vocabulary);\n            if (foundValueID !== '') {\n                setValue(getTagValueLabel(propertyKeyId, foundValueID, vocabulary));\n            }\n        }}\n        onChange={(e: ChangeEvent<HTMLInputElement>) => {\n            const newValue = e.currentTarget.value;\n            setValue(newValue);\n            if (vocabulary.strict_tags === false || (vocabulary.tags[propertyKeyId] && vocabulary.tags[propertyKeyId].strict === false)) {\n                onSelect(newValue);\n            }\n        }}\n    />\n};\n\n/**\n * getValidation must be memoized to prevent infinite re-renders due to Field\n * checking it for changes\n */\nconst getValidation = memoize((propertyKeyId: string, vocabulary: Vocabulary) =>\n    isStrictTag(propertyKeyId, vocabulary)\n        ? [...TAG_VALUE_VALIDATION, matchTagValues(propertyKeyId, vocabulary)]\n        : TAG_VALUE_VALIDATION);\n\nconst matchTagValues = (propertyKeyId: string, vocabulary: Vocabulary) =>\n    (value: string) =>\n        getTagValues(propertyKeyId, vocabulary).find(v => !value || v.label === value)\n            ? undefined\n            : 'Incorrect value';\n\nconst createStrictValueValidator = (propertyKeyId: string, vocabulary: Vocabulary): Validator => {\n    const validValues = getTagValues(propertyKeyId, vocabulary).map(value => value.label);\n    const validValueSet = new Set(validValues);\n\n    return ((value: string) =>\n        validValueSet.has(value) ? undefined : 'Incorrect value'\n    ) as Validator;\n};\n\nconst getValueValidation = (propertyKeyId: string, vocabulary: Vocabulary) => {\n    if (isStrictTag(propertyKeyId, vocabulary)) {\n        return [...REQUIRED_LENGTH255_VALIDATION, createStrictValueValidator(propertyKeyId, vocabulary)];\n    }\n    return REQUIRED_LENGTH255_VALIDATION;\n};\n\nconst getSuggestions = (value: string, tagName: string, vocabulary: Vocabulary) => {\n    const re = new RegExp(escapeRegExp(value), \"i\");\n    return getPreferredTagValues(tagName, vocabulary, value).filter(\n        val => (val.label !== value && re.test(val.label)) ||\n            (val.synonyms && val.synonyms.some(s => re.test(s))));\n};\n\nconst handleChange = (\n    formName: string,\n    tagValueID: string,\n    { onChange }: WrappedFieldInputProps,\n    { dispatch }: WrappedFieldMetaProps,\n    value: string) => {\n        onChange(value);\n        dispatch(change(formName, PROPERTY_VALUE_FIELD_NAME, value));\n        dispatch(change(formName, PROPERTY_VALUE_FIELD_ID, tagValueID));\n    };\n"
  },
  {
    "path": "services/workbench2/src/views-components/resource-properties-form/resource-properties-form.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { RootState } from 'store/store';\nimport { connect } from 'react-redux';\nimport { Grid } from '@mui/material';\nimport withStyles from '@mui/styles/withStyles';\nimport { DialogPropertyKeyInput, PROPERTY_KEY_FIELD_NAME, PROPERTY_KEY_FIELD_ID } from './property-key-field';\nimport { DialogPropertyValueInput, PROPERTY_VALUE_FIELD_NAME, PROPERTY_VALUE_FIELD_ID } from './property-value-field';\nimport { getTagKeyID, Vocabulary } from 'models/vocabulary';\nimport { ProgressButton } from 'components/progress-button/progress-button';\nimport { Chips, PropertyChips, formatChips } from 'components/chips/chips'\n\nconst AddButton = withStyles(theme => ({\n    root: { marginTop: theme.spacing(1) }\n}))(ProgressButton);\nexport interface ResourcePropertiesFormData {\n    uuid: string;\n    [PROPERTY_KEY_FIELD_NAME]: string;\n    [PROPERTY_KEY_FIELD_ID]: string;\n    [PROPERTY_VALUE_FIELD_NAME]: string;\n    [PROPERTY_VALUE_FIELD_ID]: string;\n    clearPropertyKeyOnSelect?: boolean;\n}\n\nconst mapState = (state: RootState) => {\n    return {\n        vocabulary: state.properties.vocabulary\n    }\n}\n\ntype DialogResourcePropertiesFormProps = {\n    initialProperties?: PropertyChips;\n    vocabulary: Vocabulary,\n    setChips: React.Dispatch<React.SetStateAction<PropertyChips>>,\n    onSubmit: (event: React.FormEvent<HTMLFormElement>) => void,\n};\n\nexport const DialogResourcePropertiesForm = connect(mapState)(({ vocabulary, setChips, initialProperties }: DialogResourcePropertiesFormProps) => {\n    const [properties, setProperties] = React.useState<PropertyChips>(initialProperties || {});\n    const [propertyKeyId, setPropertyKeyId] = React.useState<string | undefined>(undefined);\n    const [currentKey, setCurrentKey] = React.useState<string | undefined>(undefined);\n    const [currentValue, setCurrentValue] = React.useState<string | undefined>(undefined);\n    const [keyErrors, setKeyErrors] = React.useState<string[]>([]);\n    const [valueErrors, setValueErrors] = React.useState<string[]>([]);\n\n    React.useEffect(() => {\n        if (currentKey) {\n            setPropertyKeyId(getTagKeyID(currentKey, vocabulary));\n        } else {\n            setPropertyKeyId(undefined);\n        }\n    }, [currentKey]);\n\n    React.useEffect(() => {\n        setChips(properties);\n    }, [properties]);\n\n    const addPropertyValue = (prev: PropertyChips, key: string, value: string): PropertyChips => {\n        const existing = prev[key];\n        if (Array.isArray(existing)) {\n            return existing.includes(value) ? prev : { ...prev, [key]: [...existing, value] };\n        }\n        if (typeof existing === 'string') {\n            return existing === value ? prev : { ...prev, [key]: [existing, value] };\n        }\n        return { ...prev, [key]: value };\n    };\n\n    const handleAddProperty = (ev: React.FormEvent) => {\n        ev.preventDefault();\n        if (!currentKey || !currentValue) return;\n        setProperties(prev => addPropertyValue(prev, currentKey, currentValue));\n        setCurrentValue(undefined);\n    };\n\n    const onChipsChange = (newValues: string[]) => {\n        const newProperties: PropertyChips = {};\n        for (const chip of newValues) {\n            const [key, value] = chip.split(': ').map(s => s.trim());\n            if (newProperties[key]) {\n                if (Array.isArray(newProperties[key])) {\n                    (newProperties[key] as string[]).push(value);\n                } else {\n                    newProperties[key] = [newProperties[key] as string, value];\n                }\n            } else {\n                newProperties[key] = value;\n            }\n        }\n        setProperties(newProperties);\n    };\n\n    return <form data-cy='resource-properties-form'>\n        <Grid container spacing={2}>\n            <Grid item xs\n            data-cy='property-field-key'>\n                <DialogPropertyKeyInput\n                    clearPropertyKeyOnSelect={true}\n                    vocabulary={vocabulary}\n                    onSelect={setCurrentKey}\n                    setKeyErrors={setKeyErrors}\n                    // used to clear the value field when a new key is selected\n                    setCurrentValue={setCurrentValue}\n                />\n            </Grid>\n            <Grid item xs\n            data-cy='property-field-value'>\n                <DialogPropertyValueInput\n                    propertyKeyId={propertyKeyId || ''}\n                    propertyKeyName={currentKey || ''}\n                    vocabulary={vocabulary}\n                    currentValue={currentValue}\n                    onSelect={setCurrentValue}\n                    setValueErrors={setValueErrors}\n                />\n            </Grid>\n            <Grid item>\n                <AddButton\n                    data-cy='property-add-btn'\n                    disabled={keyErrors.length > 0 || valueErrors.length > 0 || !currentKey || !currentValue}\n                    color='primary'\n                    variant='contained'\n                    onClick={handleAddProperty}\n                    >\n                    Add\n                </AddButton>\n            </Grid>\n        </Grid>\n        <Grid data-cy='resource-properties-list'>\n            <Chips\n                values={formatChips(properties)}\n                clickable={true}\n                deletable={true}\n                onChange={onChipsChange}\n            />\n        </Grid>\n    </form>\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/rich-text-editor-dialog/rich-text-editor-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { WithDialogProps } from \"store/dialog/with-dialog\";\nimport { withDialog } from 'store/dialog/with-dialog';\nimport { RICH_TEXT_EDITOR_DIALOG_NAME } from \"store/rich-text-editor-dialog/rich-text-editor-dialog-actions\";\nimport RichTextEditor from 'react-rte';\n\ntype CssRules = 'rte';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    rte: {\n        fontFamily: 'Arial',\n        '& a': {\n            textDecoration: 'none',\n            color: theme.palette.primary.main,\n            '&:hover': {\n                cursor: 'pointer',\n                textDecoration: 'underline'\n            }\n        }\n    },\n\n});\n\nexport interface RichTextEditorDialogDataProps {\n    title: string;\n    text: string;\n}\n\nexport const RichTextEditorDialog = withStyles(styles)(withDialog(RICH_TEXT_EDITOR_DIALOG_NAME)(\n    (props: WithDialogProps<RichTextEditorDialogDataProps> & WithStyles<CssRules>) =>\n        <Dialog open={props.open}\n            onClose={props.closeDialog}\n            fullWidth\n            maxWidth='md'>\n            <DialogTitle>{props.data.title}</DialogTitle>\n            <DialogContent>\n                <RichTextEditor\n                    className={props.classes.rte}\n                    value={props.data.text ?\n                        RichTextEditor.createValueFromString(props.data.text.replace(/&lt;/g, '<').replace(/&gt;/g, '>').replace(/&amp;/g, '&'), 'html') : ''}\n                    readOnly={true} />\n            </DialogContent>\n            <DialogActions>\n                <Button\n                    variant='text'\n                    color='primary'\n                    onClick={props.closeDialog}>\n                    Close\n                </Button>\n            </DialogActions>\n        </Dialog>)\n);"
  },
  {
    "path": "services/workbench2/src/views-components/run-process-dialog/change-workflow-dialog.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { RootState } from 'store/store';\nimport { setWorkflow, SET_WORKFLOW_DIALOG } from 'store/run-process-panel/run-process-panel-actions';\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { WorkflowResource } from 'models/workflow';\n\nconst mapStateToProps = (state: RootState, props: WithDialogProps<{ workflow: WorkflowResource }>) => ({\n    workflow: props.data.workflow\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: (workflow: WorkflowResource) => {\n        props.closeDialog();\n        dispatch<any>(setWorkflow(workflow));\n    }\n});\n\nconst mergeProps = (\n    stateProps: { workflow: WorkflowResource },\n    dispatchProps: { onConfirm: (workflow: WorkflowResource) => void },\n    props: WithDialogProps<{ workflow: WorkflowResource }>) => ({\n        onConfirm: () => dispatchProps.onConfirm(stateProps.workflow),\n        ...props\n    });\n\nexport const [ChangeWorkflowDialog] = [ConfirmationDialog]\n    .map(connect(mapStateToProps, mapDispatchToProps, mergeProps) as any)\n    .map(withDialog(SET_WORKFLOW_DIALOG));"
  },
  {
    "path": "services/workbench2/src/views-components/search-bar/search-bar-advanced-properties-view.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Dispatch, compose } from 'redux';\nimport { connect } from 'react-redux';\nimport { InjectedFormProps, formValueSelector } from 'redux-form';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, Button } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { RootState } from 'store/store';\nimport {\n    SEARCH_BAR_ADVANCED_FORM_NAME,\n    changeAdvancedFormProperty,\n    resetAdvancedFormProperty\n} from 'store/search-bar/search-bar-actions';\nimport { PropertyValue } from 'models/search-bar';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { SearchBarKeyField, SearchBarValueField } from 'views-components/form-fields/search-bar-form-fields';\nimport { Chips } from 'components/chips/chips';\nimport { formatPropertyValue } from \"common/formatters\";\nimport { Vocabulary } from 'models/vocabulary';\nimport { connectVocabulary } from '../resource-properties-form/property-field-common';\nimport { isEqual } from 'lodash';\n\ntype CssRules = 'label' | 'button';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    label: {\n        color: theme.palette.grey[\"500\"],\n        fontSize: '0.8125rem',\n        alignSelf: 'center'\n    },\n    button: {\n        boxShadow: 'none'\n    }\n});\n\ninterface SearchBarAdvancedPropertiesViewDataProps {\n    submitting: boolean;\n    invalid: boolean;\n    pristine: boolean;\n    propertyValues: PropertyValue;\n    fields: PropertyValue[];\n    vocabulary: Vocabulary;\n}\n\ninterface SearchBarAdvancedPropertiesViewActionProps {\n    setProps: () => void;\n    addProp: (propertyValues: PropertyValue, properties: PropertyValue[]) => void;\n    getAllFields: (propertyValues: PropertyValue[]) => PropertyValue[] | [];\n}\n\ntype SearchBarAdvancedPropertiesViewProps = SearchBarAdvancedPropertiesViewDataProps\n    & SearchBarAdvancedPropertiesViewActionProps\n    & InjectedFormProps & WithStyles<CssRules>;\n\nconst selector = formValueSelector(SEARCH_BAR_ADVANCED_FORM_NAME);\nconst mapStateToProps = (state: RootState) => {\n    return {\n        propertyValues: selector(state, 'key', 'value', 'keyID', 'valueID')\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch) => ({\n    setProps: (propertyValues: PropertyValue[]) => {\n        dispatch<any>(changeAdvancedFormProperty('properties', propertyValues));\n    },\n    addProp: (propertyValue: PropertyValue, properties: PropertyValue[]) => {\n        // Remove potential duplicates\n        properties = properties.filter(x => ! isEqual(\n            {\n                key: x.keyID || x.key,\n                value: x.valueID || x.value\n            }, {\n                key: propertyValue.keyID || propertyValue.key,\n                value: propertyValue.valueID || propertyValue.value\n            }));\n        dispatch<any>(changeAdvancedFormProperty(\n            'properties',\n            [...properties, propertyValue]\n        ));\n        dispatch<any>(resetAdvancedFormProperty('key'));\n        dispatch<any>(resetAdvancedFormProperty('value'));\n        dispatch<any>(resetAdvancedFormProperty('keyID'));\n        dispatch<any>(resetAdvancedFormProperty('valueID'));\n    },\n    getAllFields: (fields: any) => {\n        return fields.getAll() || [];\n    }\n});\n\nexport const SearchBarAdvancedPropertiesView = compose(\n    connectVocabulary,\n    connect(mapStateToProps, mapDispatchToProps))(\n    withStyles(styles)(\n        ({ classes, fields, propertyValues, setProps, addProp, getAllFields, vocabulary }: SearchBarAdvancedPropertiesViewProps) =>\n            <Grid container item xs={12} spacing={2}>\n                <Grid item xs={2} className={classes.label}>Properties</Grid>\n                <Grid item xs={4}>\n                    <SearchBarKeyField />\n                </Grid>\n                <Grid item xs={4}>\n                    <SearchBarValueField />\n                </Grid>\n                <Grid container item xs={2} justifyContent='flex-end' alignItems=\"center\">\n                    <Button className={classes.button} onClick={() => addProp(propertyValues, getAllFields(fields))}\n                        color=\"primary\"\n                        size='small'\n                        variant=\"contained\"\n                        disabled={!Boolean(propertyValues.key && propertyValues.value)}>\n                        Add\n                    </Button>\n                </Grid>\n                <Grid item xs={2} />\n                <Grid container item xs={10} spacing={1}>\n                    <Chips values={getAllFields(fields)}\n                        deletable\n                        onChange={setProps}\n                        getLabel={(field: PropertyValue) => formatPropertyValue(field, vocabulary)} />\n                </Grid>\n            </Grid>\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/search-bar/search-bar-advanced-view.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { reduxForm, InjectedFormProps, reset } from 'redux-form';\nimport { compose, Dispatch } from 'redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Paper, Button, Grid, IconButton, CircularProgress } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport {\n    SEARCH_BAR_ADVANCED_FORM_NAME, SEARCH_BAR_ADVANCED_FORM_PICKER_ID,\n    searchAdvancedData,\n    setSearchValueFromAdvancedData\n} from 'store/search-bar/search-bar-actions';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { CloseIcon } from 'components/icon/icon';\nimport { SearchBarAdvancedFormData } from 'models/search-bar';\nimport {\n    SearchBarTypeField, SearchBarClusterField, SearchBarProjectField, SearchBarTrashField,\n    SearchBarDateFromField, SearchBarDateToField, SearchBarPropertiesField,\n    SearchBarSaveSearchField, SearchBarQuerySearchField, SearchBarPastVersionsField\n} from 'views-components/form-fields/search-bar-form-fields';\nimport { treePickerActions } from \"store/tree-picker/tree-picker-actions\";\n\ntype CssRules = 'container' | 'closeIcon' | 'label' | 'buttonWrapper'\n    | 'button' | 'circularProgress' | 'searchView' | 'selectGrid';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    container: {\n        padding: theme.spacing(2),\n        borderBottom: `1px solid ${theme.palette.grey[\"200\"]}`,\n        position: 'relative',\n    },\n    closeIcon: {\n        position: 'absolute',\n        top: '12px',\n        right: '12px'\n    },\n    label: {\n        color: theme.palette.grey[\"500\"],\n        fontSize: '0.8125rem',\n        alignSelf: 'center'\n    },\n    buttonWrapper: {\n        marginRight: '14px',\n        marginTop: '14px',\n        position: 'relative',\n    },\n    button: {\n        boxShadow: 'none'\n    },\n    circularProgress: {\n        position: 'absolute',\n        top: 0,\n        bottom: 0,\n        left: 0,\n        right: 0,\n        margin: 'auto'\n    },\n    searchView: {\n        color: theme.palette.common.black,\n        borderRadius: `0 0 ${theme.spacing(0.5)} ${theme.spacing(0.5)}`\n    },\n    selectGrid: {\n        marginBottom: theme.spacing(2)\n    }\n});\n\n// ToDo: maybe we should remove invalid and prostine\ninterface SearchBarAdvancedViewFormDataProps {\n    submitting: boolean;\n    invalid: boolean;\n    pristine: boolean;\n}\n\n// ToDo: maybe we should remove tags\nexport interface SearchBarAdvancedViewDataProps {\n    tags: any;\n    saveQuery: boolean;\n}\n\nexport interface SearchBarAdvancedViewActionProps {\n    closeAdvanceView: () => void;\n}\n\ntype SearchBarAdvancedViewProps = SearchBarAdvancedViewActionProps & SearchBarAdvancedViewDataProps;\n\ntype SearchBarAdvancedViewFormProps = SearchBarAdvancedViewProps & SearchBarAdvancedViewFormDataProps\n    & InjectedFormProps & WithStyles<CssRules>;\n\nconst validate = (values: any) => {\n    const errors: any = {};\n\n    if (values.dateFrom && values.dateTo) {\n        if (new Date(values.dateFrom).getTime() > new Date(values.dateTo).getTime()) {\n            errors.dateFrom = 'Invalid date';\n        }\n    }\n\n    return errors;\n};\n\nexport const SearchBarAdvancedView = compose(\n    reduxForm<SearchBarAdvancedFormData, SearchBarAdvancedViewProps>({\n        form: SEARCH_BAR_ADVANCED_FORM_NAME,\n        validate,\n        onSubmit: (data: SearchBarAdvancedFormData, dispatch: Dispatch) => {\n            dispatch<any>(searchAdvancedData(data));\n            dispatch(reset(SEARCH_BAR_ADVANCED_FORM_NAME));\n            dispatch(treePickerActions.DEACTIVATE_TREE_PICKER_NODE({ pickerId: SEARCH_BAR_ADVANCED_FORM_PICKER_ID }));\n        },\n        onChange: (data: SearchBarAdvancedFormData, dispatch: Dispatch, props: any, prevData: SearchBarAdvancedFormData) => {\n            dispatch<any>(setSearchValueFromAdvancedData(data, prevData));\n        },\n    }),\n    withStyles(styles))(\n        ({ classes, closeAdvanceView, handleSubmit, submitting, invalid, pristine, tags, saveQuery }: SearchBarAdvancedViewFormProps) =>\n            <Paper className={classes.searchView}>\n                <form onSubmit={handleSubmit}>\n                    <Grid container direction=\"column\" justifyContent=\"flex-start\" alignItems=\"flex-start\">\n                        <Grid item xs={12} container className={classes.container}>\n                            <Grid item container xs={12} className={classes.selectGrid}>\n                                <Grid item xs={2} className={classes.label}>Type</Grid>\n                                <Grid item xs={5}>\n                                    <SearchBarTypeField />\n                                </Grid>\n                            </Grid>\n                            <Grid item container xs={12} className={classes.selectGrid}>\n                                <Grid item xs={2} className={classes.label}>Cluster</Grid>\n                                <Grid item xs={5}>\n                                    <SearchBarClusterField />\n                                </Grid>\n                            </Grid>\n                            <Grid item container xs={12}>\n                                <Grid item xs={2} className={classes.label}>Project</Grid>\n                                <Grid item xs={10}>\n                                    <SearchBarProjectField />\n                                </Grid>\n                            </Grid>\n                            <Grid item container xs={12}>\n                                <Grid item xs={2} className={classes.label} />\n                                <Grid item xs={5}>\n                                    <SearchBarTrashField />\n                                </Grid>\n                                <Grid item xs={5}>\n                                    <SearchBarPastVersionsField />\n                                </Grid>\n                            </Grid>\n                            <IconButton onClick={closeAdvanceView} className={classes.closeIcon} size=\"large\">\n                                <CloseIcon />\n                            </IconButton>\n                        </Grid>\n                        <Grid container item xs={12} className={classes.container} spacing={2}>\n                            <Grid item xs={2} className={classes.label}>Date modified</Grid>\n                            <Grid item xs={4}>\n                                <SearchBarDateFromField />\n                            </Grid>\n                            <Grid item xs={4}>\n                                <SearchBarDateToField />\n                            </Grid>\n                        </Grid>\n                        <Grid container item xs={12} className={classes.container}>\n                            <SearchBarPropertiesField />\n                            <Grid container item xs={12} justifyContent=\"flex-start\" alignItems=\"center\" spacing={2}>\n                                <Grid item xs={2} className={classes.label} />\n                                <Grid item xs={4}>\n                                    <SearchBarSaveSearchField />\n                                </Grid>\n                                <Grid item xs={4}>\n                                    {saveQuery && <SearchBarQuerySearchField />}\n                                </Grid>\n                            </Grid>\n                            <Grid container item xs={12} justifyContent='flex-end'>\n                                <div className={classes.buttonWrapper}>\n                                    <Button type=\"submit\" className={classes.button}\n                                        // ToDo: create easier condition\n                                        // Question: do we need this condition?\n                                        // disabled={invalid || submitting || pristine || !!(tags && tags.values && ((tags.values.key) || (tags.values.value)) && !Object.keys(tags.values).find(el => el !== 'value' && el !== 'key'))}\n                                        color=\"primary\"\n                                        size='small'\n                                        variant=\"contained\">\n                                        Search\n                                    </Button>\n                                    {submitting && <CircularProgress size={20} className={classes.circularProgress} />}\n                                </div>\n                            </Grid>\n                        </Grid>\n                    </Grid>\n                </form>\n            </Paper>\n    );\n"
  },
  {
    "path": "services/workbench2/src/views-components/search-bar/search-bar-autocomplete-view.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Paper, List, ListItem, ListItemText } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { GroupContentsResource } from 'services/groups-service/groups-service';\nimport Highlighter from \"react-highlight-words\";\nimport { SearchBarSelectedItem } from \"store/search-bar/search-bar-reducer\";\n\ntype CssRules = 'searchView' | 'list' | 'listItem';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => {\n    return {\n        searchView: {\n            borderRadius: `0 0 ${theme.spacing(0.5)} ${theme.spacing(0.5)}`\n        },\n        list: {\n            padding: 0\n        },\n        listItem: {\n            paddingLeft: theme.spacing(1),\n            paddingRight: theme.spacing(2),\n        }\n    };\n};\n\nexport interface SearchBarAutocompleteViewDataProps {\n    searchResults: GroupContentsResource[];\n    searchValue?: string;\n    selectedItem: SearchBarSelectedItem;\n}\n\nexport interface SearchBarAutocompleteViewActionProps {\n    navigateTo: (uuid: string) => void;\n}\n\ntype SearchBarAutocompleteViewProps = SearchBarAutocompleteViewDataProps & SearchBarAutocompleteViewActionProps & WithStyles<CssRules>;\n\nexport const SearchBarAutocompleteView = withStyles(styles)(\n    ({ classes, searchResults, searchValue, navigateTo, selectedItem }: SearchBarAutocompleteViewProps) => {\n        return <Paper className={classes.searchView}>\n            <List component=\"nav\" className={classes.list}>\n                <ListItem button className={classes.listItem} selected={!selectedItem || searchValue === selectedItem.id}>\n                    <ListItemText secondary={searchValue}/>\n                </ListItem>\n                {searchResults.map((item: GroupContentsResource) =>\n                    <ListItem button key={item.uuid} className={classes.listItem} selected={item.uuid === selectedItem.id}>\n                        <ListItemText secondary={getFormattedText(item.name, searchValue)}\n                                      onClick={() => navigateTo(item.uuid)}/>\n                    </ListItem>\n                )}\n            </List>\n        </Paper>;\n    });\n\nconst getFormattedText = (textToHighlight: string, searchString = '') => {\n    return <Highlighter searchWords={[searchString]} autoEscape={true} textToHighlight={textToHighlight} />;\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/search-bar/search-bar-basic-view.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Paper } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport {\n    SearchBarRecentQueries,\n    SearchBarRecentQueriesActionProps\n} from 'views-components/search-bar/search-bar-recent-queries';\nimport {\n    SearchBarSavedQueries,\n    SearchBarSavedQueriesDataProps,\n    SearchBarSavedQueriesActionProps\n} from 'views-components/search-bar/search-bar-save-queries';\n\ntype CssRules = 'advanced' | 'label' | 'root';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => {\n    return {\n        root: {\n            color: theme.palette.common.black,\n            borderRadius: `0 0 ${theme.spacing(0.5)} ${theme.spacing(0.5)}`\n        },\n        advanced: {\n            display: 'flex',\n            justifyContent: 'flex-end',\n            padding: theme.spacing(1),\n            fontSize: '0.875rem',\n            cursor: 'pointer',\n            color: theme.palette.primary.main\n        },\n        label: {\n            fontSize: '0.775rem',\n            padding: `${theme.spacing(1)} ${theme.spacing(1)} `,\n            color: theme.palette.grey[\"900\"],\n            background: 'white',\n            textAlign: 'right',\n            fontWeight: 'bold'\n        }\n    };\n};\n\nexport type SearchBarBasicViewDataProps = SearchBarSavedQueriesDataProps;\n\nexport type SearchBarBasicViewActionProps = {\n    onSetView: (currentView: string) => void;\n    onSearch: (searchValue: string) => void;\n} & SearchBarRecentQueriesActionProps & SearchBarSavedQueriesActionProps;\n\ntype SearchBarBasicViewProps = SearchBarBasicViewDataProps & SearchBarBasicViewActionProps & WithStyles<CssRules>;\n\nexport const SearchBarBasicView = withStyles(styles)(\n    ({ classes, onSetView, deleteSavedQuery, savedQueries, onSearch, editSavedQuery, selectedItem, recentQueries }: SearchBarBasicViewProps& { recentQueries: string[] }) =>\n        <Paper className={classes.root}>\n            <div className={classes.label}>{\"Recent queries\"}</div>\n            <SearchBarRecentQueries\n                onSearch={onSearch}\n                recentQueries={recentQueries}\n                selectedItem={selectedItem} />\n            <div className={classes.label}>{\"Saved queries\"}</div>\n            <SearchBarSavedQueries\n                onSearch={onSearch}\n                savedQueries={savedQueries}\n                editSavedQuery={editSavedQuery}\n                deleteSavedQuery={deleteSavedQuery}\n                selectedItem={selectedItem} />\n        </Paper>\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/search-bar/search-bar-recent-queries.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { List, ListItem, ListItemText } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { SearchBarSelectedItem } from \"store/search-bar/search-bar-reducer\";\n\ntype CssRules = 'root' | 'listItem' | 'listItemText';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        padding: '0px'\n    },\n    listItem: {\n        paddingLeft: theme.spacing(1),\n        paddingRight: theme.spacing(2),\n    },\n    listItemText: {\n        fontSize: '0.8125rem',\n        color: theme.palette.grey[\"900\"]\n    }\n});\n\nexport interface SearchBarRecentQueriesDataProps {\n    selectedItem: SearchBarSelectedItem;\n    recentQueries: string[];\n}\n\nexport interface SearchBarRecentQueriesActionProps {\n    onSearch: (searchValue: string) => void;\n}\n\ntype SearchBarRecentQueriesProps = SearchBarRecentQueriesDataProps & SearchBarRecentQueriesActionProps & WithStyles<CssRules>;\n\nexport const SearchBarRecentQueries = withStyles(styles)(\n    ({ classes, onSearch, selectedItem, recentQueries }: SearchBarRecentQueriesProps) =>\n        <List component=\"nav\" className={classes.root}>\n            {recentQueries.map((query, index) =>\n                <ListItem button key={index} className={classes.listItem} selected={`RQ-${index}-${query}` === selectedItem.id}>\n                    <ListItemText disableTypography\n                        secondary={query}\n                        onClick={() => onSearch(query)}\n                        className={classes.listItemText} />\n                </ListItem>\n            )}\n        </List>);\n"
  },
  {
    "path": "services/workbench2/src/views-components/search-bar/search-bar-save-queries.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { List, ListItem, ListItemText, ListItemSecondaryAction, Tooltip, IconButton } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { RemoveIcon, EditSavedQueryIcon } from 'components/icon/icon';\nimport { SearchBarAdvancedFormData } from 'models/search-bar';\nimport { SearchBarSelectedItem } from \"store/search-bar/search-bar-reducer\";\nimport { getQueryFromAdvancedData } from \"store/search-bar/search-bar-actions\";\n\ntype CssRules = 'root' | 'listItem' | 'listItemText' | 'button';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        padding: '0px'\n    },\n    listItem: {\n        paddingLeft: theme.spacing(1),\n        paddingRight: theme.spacing(2)\n    },\n    listItemText: {\n        fontSize: '0.8125rem',\n        color: theme.palette.grey[\"900\"]\n    },\n    button: {\n        padding: '6px',\n        marginRight: theme.spacing(1)\n    }\n});\n\nexport interface SearchBarSavedQueriesDataProps {\n    savedQueries: SearchBarAdvancedFormData[];\n    selectedItem: SearchBarSelectedItem;\n}\n\nexport interface SearchBarSavedQueriesActionProps {\n    onSearch: (searchValue: string) => void;\n    deleteSavedQuery: (id: number) => void;\n    editSavedQuery: (data: SearchBarAdvancedFormData, id: number) => void;\n}\n\ntype SearchBarSavedQueriesProps = SearchBarSavedQueriesDataProps\n    & SearchBarSavedQueriesActionProps\n    & WithStyles<CssRules>;\n\nexport const SearchBarSavedQueries = withStyles(styles)(\n    ({ classes, savedQueries, onSearch, editSavedQuery, deleteSavedQuery, selectedItem }: SearchBarSavedQueriesProps) =>\n        <List component=\"nav\" className={classes.root}>\n            {savedQueries.map((query, index) =>\n                <ListItem button key={index} className={classes.listItem} selected={`SQ-${index}-${query.queryName}` === selectedItem.id}>\n                    <ListItemText disableTypography\n                        secondary={query.queryName}\n                        onClick={() => onSearch(getQueryFromAdvancedData(query))}\n                        className={classes.listItemText} />\n                    <ListItemSecondaryAction>\n                        <Tooltip title=\"Edit\">\n                            <IconButton\n                                aria-label=\"Edit\"\n                                onClick={() => editSavedQuery(query, index)}\n                                className={classes.button}\n                                size=\"large\">\n                                <EditSavedQueryIcon />\n                            </IconButton>\n                        </Tooltip>\n                        <Tooltip title=\"Remove\">\n                            <IconButton\n                                aria-label=\"Remove\"\n                                onClick={() => deleteSavedQuery(index)}\n                                className={classes.button}\n                                size=\"large\">\n                                <RemoveIcon />\n                            </IconButton>\n                        </Tooltip>\n                    </ListItemSecondaryAction>\n                </ListItem>\n            )}\n    </List>);\n"
  },
  {
    "path": "services/workbench2/src/views-components/search-bar/search-bar-view.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\n\ndescribe(\"<SearchBarView />\", () => {\n    let onSearch;\n\n    beforeEach(() => {\n        onSearch = cy.stub();\n    });\n\n    describe(\"on input value change\", () => {\n        // TODO fix tests and delete beneath one\n        it(\"fix tests\", () => {\n            const test = 1;\n            expect(test).to.equal(1);\n        });\n        // it(\"calls onSearch after default timeout\", () => {\n        //     const searchBar = mount(<SearchBarView onSearch={onSearch} value=\"current value\" {...mockSearchProps()} />);\n        //     searchBar.find(\"input\").simulate(\"change\", { target: { value: \"current value\" } });\n        //     expect(onSearch).not.toBeCalled();\n        //     jest.advanceTimersByTime(DEFAULT_SEARCH_DEBOUNCE);\n        //     expect(onSearch).toBeCalledWith(\"current value\");\n        // });\n\n        // it(\"calls onSearch after the time specified in props has passed\", () => {\n        //     const searchBar = mount(<SearchBarView onSearch={onSearch} value=\"current value\" debounce={2000} {...mockSearchProps()} />);\n        //     searchBar.find(\"input\").simulate(\"change\", { target: { value: \"current value\" } });\n        //     jest.advanceTimersByTime(1000);\n        //     expect(onSearch).not.toBeCalled();\n        //     jest.advanceTimersByTime(1000);\n        //     expect(onSearch).toBeCalledWith(\"current value\");\n        // });\n\n        // it(\"calls onSearch only once after no change happened during the specified time\", () => {\n        //     const searchBar = mount(<SearchBarView onSearch={onSearch} value=\"current value\" debounce={1000} {...mockSearchProps()} />);\n        //     searchBar.find(\"input\").simulate(\"change\", { target: { value: \"current value\" } });\n        //     jest.advanceTimersByTime(500);\n        //     searchBar.find(\"input\").simulate(\"change\", { target: { value: \"changed value\" } });\n        //     jest.advanceTimersByTime(1000);\n        //     expect(onSearch).toHaveBeenCalledTimes(1);\n        // });\n\n        // it(\"calls onSearch again after the specified time has passed since previous call\", () => {\n        //     const searchBar = mount(<SearchBarView onSearch={onSearch} value=\"latest value\" debounce={1000} {...mockSearchProps()} />);\n        //     searchBar.find(\"input\").simulate(\"change\", { target: { value: \"current value\" } });\n        //     jest.advanceTimersByTime(500);\n        //     searchBar.find(\"input\").simulate(\"change\", { target: { value: \"intermediate value\" } });\n        //     jest.advanceTimersByTime(1000);\n        //     expect(onSearch).toBeCalledWith(\"intermediate value\");\n        //     searchBar.find(\"input\").simulate(\"change\", { target: { value: \"latest value\" } });\n        //     jest.advanceTimersByTime(1000);\n        //     expect(onSearch).toBeCalledWith(\"latest value\");\n        //     expect(onSearch).toHaveBeenCalledTimes(2);\n\n        // });\n    });\n});\n\nconst mockSearchProps = () => ({\n    currentView: '',\n    open: true,\n    onSetView: jest.fn(),\n    openView: jest.fn(),\n    loseView: jest.fn(),\n    closeView: jest.fn(),\n    saveRecentQuery: jest.fn(),\n    loadRecentQueries: () => ['test'],\n    saveQuery: jest.fn(),\n    deleteSavedQuery: jest.fn(),\n    openSearchView: jest.fn(),\n    editSavedQuery: jest.fn(),\n    navigateTo: jest.fn(),\n    searchDataOnEnter: jest.fn()\n});"
  },
  {
    "path": "services/workbench2/src/views-components/search-bar/search-bar-view.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { compose } from \"redux\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { IconButton, Paper, Tooltip, InputAdornment, Input } from \"@mui/material\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport SearchIcon from \"@mui/icons-material/Search\";\nimport ArrowDropDownIcon from \"@mui/icons-material/ArrowDropDown\";\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { SearchView } from \"store/search-bar/search-bar-reducer\";\nimport { SearchBarBasicView, SearchBarBasicViewDataProps, SearchBarBasicViewActionProps } from \"views-components/search-bar/search-bar-basic-view\";\nimport {\n    SearchBarAutocompleteView,\n    SearchBarAutocompleteViewDataProps,\n    SearchBarAutocompleteViewActionProps,\n} from \"views-components/search-bar/search-bar-autocomplete-view\";\nimport {\n    SearchBarAdvancedView,\n    SearchBarAdvancedViewDataProps,\n    SearchBarAdvancedViewActionProps,\n} from \"views-components/search-bar/search-bar-advanced-view\";\nimport { KEY_CODE_DOWN, KEY_CODE_ESC, KEY_CODE_UP, KEY_ENTER } from \"common/codes\";\nimport { debounce } from \"debounce\";\nimport { Vocabulary } from \"models/vocabulary\";\nimport { connectVocabulary } from \"../resource-properties-form/property-field-common\";\nimport { Session } from \"models/session\";\nimport { isEqual } from \"lodash\";\n\ntype CssRules = \"container\" | \"containerSearchViewOpened\" | \"input\" | \"view\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => {\n    return {\n        container: {\n            position: \"relative\",\n            width: \"100%\",\n            borderRadius: theme.spacing(0.5),\n            zIndex: theme.zIndex.modal,\n        },\n        containerSearchViewOpened: {\n            position: \"relative\",\n            width: \"100%\",\n            borderRadius: `${theme.spacing(0.5)} ${theme.spacing(0.5)} 0 0`,\n            zIndex: theme.zIndex.modal,\n        },\n        input: {\n            border: \"none\",\n            padding: `0`,\n        },\n        view: {\n            position: \"absolute\",\n            width: \"100%\",\n            zIndex: 1,\n        },\n    };\n};\n\nexport type SearchBarDataProps = SearchBarViewDataProps &\n    SearchBarAutocompleteViewDataProps &\n    SearchBarAdvancedViewDataProps &\n    SearchBarBasicViewDataProps;\n\ninterface SearchBarViewDataProps {\n    searchValue: string;\n    currentView: string;\n    isPopoverOpen: boolean;\n    debounce?: number;\n    vocabulary?: Vocabulary;\n    sessions: Session[];\n}\n\nexport type SearchBarActionProps = SearchBarViewActionProps &\n    SearchBarAutocompleteViewActionProps &\n    SearchBarAdvancedViewActionProps &\n    SearchBarBasicViewActionProps;\n\ninterface SearchBarViewActionProps {\n    onChange: (event: React.ChangeEvent<HTMLInputElement>) => void;\n    onSubmit: (event: React.FormEvent<HTMLFormElement>) => void;\n    onSetView: (currentView: string) => void;\n    closeView: () => void;\n    openSearchView: () => void;\n    loadRecentQueries: () => string[];\n    moveUp: () => void;\n    moveDown: () => void;\n    setAdvancedDataFromSearchValue: (search: string, vocabulary?: Vocabulary) => void;\n    searchSingleCluster: (session: Session, searchValue: string) => any;\n}\n\ntype SearchBarViewProps = SearchBarDataProps & SearchBarActionProps & WithStyles<CssRules>;\n\nconst handleKeyDown = (e: React.KeyboardEvent, props: SearchBarViewProps) => {\n    if (e.keyCode === KEY_CODE_DOWN) {\n        e.preventDefault();\n        if (!props.isPopoverOpen) {\n            props.onSetView(SearchView.AUTOCOMPLETE);\n            props.openSearchView();\n        } else {\n            props.moveDown();\n        }\n    } else if (e.keyCode === KEY_CODE_UP) {\n        e.preventDefault();\n        props.moveUp();\n    } else if (e.keyCode === KEY_CODE_ESC) {\n        e.preventDefault();\n        props.closeView();\n    } else if (e.keyCode === KEY_ENTER) {\n        if (props.currentView === SearchView.BASIC) {\n            e.preventDefault();\n            props.onSearch(props.selectedItem.query);\n        } else if (props.currentView === SearchView.AUTOCOMPLETE) {\n            if (props.selectedItem.id !== props.searchValue) {\n                e.preventDefault();\n                props.navigateTo(props.selectedItem.id);\n            }\n        }\n    }\n};\n\nconst handleInputClick = (e: React.MouseEvent, props: SearchBarViewProps) => {\n    if (props.searchValue) {\n        props.onSetView(SearchView.AUTOCOMPLETE);\n    } else {\n        props.onSetView(SearchView.BASIC);\n    }\n    props.openSearchView();\n};\n\nconst handleDropdownClick = (e: React.MouseEvent, props: SearchBarViewProps) => {\n    e.stopPropagation();\n    if (props.isPopoverOpen && props.currentView === SearchView.ADVANCED) {\n        props.closeView();\n    } else {\n        props.setAdvancedDataFromSearchValue(props.searchValue, props.vocabulary);\n        props.onSetView(SearchView.ADVANCED);\n    }\n};\n\nexport const SearchBarView = compose(\n    connectVocabulary,\n    withStyles(styles)\n)(\n    class extends React.Component<SearchBarViewProps> {\n        state={\n            loggedInSessions: [],\n            recentQueries: [],\n        }\n\n        debouncedSearch = debounce(() => {\n            this.props.onSearch(this.props.searchValue);\n        }, 1000);\n\n        handleFocus = (event: React.FocusEvent<HTMLInputElement | HTMLTextAreaElement>) => {\n            this.setState({ recentQueries: this.props.loadRecentQueries()});\n        };\n\n        handleChange = (event: React.ChangeEvent<HTMLInputElement>) => {\n            this.debouncedSearch();\n            this.props.onChange(event);\n        };\n\n        handleSubmit = (event: React.FormEvent<HTMLFormElement>) => {\n            this.debouncedSearch.clear();\n            this.props.onSubmit(event);\n            this.setState({ recentQueries: this.props.loadRecentQueries()});\n        };\n\n        componentDidMount(): void {\n            this.setState({ loggedInSessions: this.props.sessions.filter((ss) => ss.loggedIn && ss.userIsActive)});\n            this.setState({ recentQueries: this.props.loadRecentQueries()});\n        }\n\n        shouldComponentUpdate( nextProps: Readonly<SearchBarViewProps>, nextState: Readonly<{}>, nextContext: any ): boolean {\n            return !isEqual(nextProps, this.props)\n        }\n\n        componentDidUpdate( prevProps: Readonly<SearchBarViewProps>, prevState: Readonly<{loggedInSessions: Session[]}>, snapshot?: any ): void {\n            if (prevProps.sessions !== this.props.sessions) {\n                this.setState({ loggedInSessions: this.props.sessions.filter((ss) => ss.loggedIn)});\n            }\n            //if a new session is logged in after a search is started, search the new cluster and append those to the results\n            if(prevState.loggedInSessions.length !== this.state.loggedInSessions.length){\n                const newLogin = this.state.loggedInSessions.filter((ss) => !prevState.loggedInSessions.includes(ss));\n                this.props.searchSingleCluster(newLogin[0], this.props.searchValue);\n            }\n        }\n\n        componentWillUnmount() {\n            this.debouncedSearch.clear();\n        }\n\n        render() {\n            const { children, ...props } = this.props;\n            const { classes, isPopoverOpen } = this.props;\n            return <>\n                {isPopoverOpen && <Backdrop onClick={props.closeView} />}\n\n                <Paper className={isPopoverOpen ? classes.containerSearchViewOpened : classes.container}>\n                    <form\n                        data-cy=\"searchbar-parent-form\"\n                        onSubmit={this.handleSubmit}>\n                        <Input\n                            data-cy=\"searchbar-input-field\"\n                            className={classes.input}\n                            onChange={this.handleChange}\n                            placeholder=\"Search\"\n                            value={props.searchValue}\n                            fullWidth={true}\n                            disableUnderline={true}\n                            onClick={e => handleInputClick(e, props)}\n                            onKeyDown={e => handleKeyDown(e, props)}\n                            onFocus={e => this.handleFocus(e)}\n                            startAdornment={\n                                <InputAdornment position=\"start\">\n                                    <Tooltip title=\"Search\">\n                                        <IconButton type=\"submit\" size=\"large\">\n                                            <SearchIcon />\n                                        </IconButton>\n                                    </Tooltip>\n                                </InputAdornment>\n                            }\n                            endAdornment={\n                                <InputAdornment position=\"end\">\n                                    <Tooltip title=\"Advanced search\">\n                                        <IconButton onClick={e => handleDropdownClick(e, props)} size=\"large\">\n                                            <ArrowDropDownIcon />\n                                        </IconButton>\n                                    </Tooltip>\n                                </InputAdornment>\n                            }\n                        />\n                    </form>\n                    <div className={classes.view}>{isPopoverOpen && getView({ ...props }, this.state.recentQueries)}</div>\n                </Paper>\n            </>;\n        }\n    }\n);\n\nconst getView = (props: SearchBarViewProps, recentQueries: string[]) => {\n    switch (props.currentView) {\n        case SearchView.AUTOCOMPLETE:\n            return (\n                <SearchBarAutocompleteView\n                    navigateTo={props.navigateTo}\n                    searchResults={props.searchResults}\n                    searchValue={props.searchValue}\n                    selectedItem={props.selectedItem}\n                />\n            );\n        case SearchView.ADVANCED:\n            return (\n                <SearchBarAdvancedView\n                    closeAdvanceView={props.closeAdvanceView}\n                    tags={props.tags}\n                    saveQuery={props.saveQuery}\n                />\n            );\n        default:\n            return (\n                <SearchBarBasicView\n                    onSetView={props.onSetView}\n                    onSearch={props.onSearch}\n                    // loadRecentQueries={props.loadRecentQueries}\n                    recentQueries={recentQueries}\n                    savedQueries={props.savedQueries}\n                    deleteSavedQuery={props.deleteSavedQuery}\n                    editSavedQuery={props.editSavedQuery}\n                    selectedItem={props.selectedItem}\n                />\n            );\n    }\n};\n\nconst backdropStyles: CustomStyleRulesCallback<\"backdrop\"> = theme => ({\n    backdrop: {\n        position: \"fixed\",\n        top: 0,\n        right: 0,\n        bottom: 0,\n        left: 0,\n        zIndex: theme.zIndex.modal,\n    },\n});\n\nconst Backdrop = compose(withStyles(backdropStyles))(({ classes, ...props }: WithStyles<\"backdrop\"> & React.HTMLProps<HTMLDivElement>) => (\n    <div\n        className={classes.backdrop}\n        {...props}\n    />\n));\n"
  },
  {
    "path": "services/workbench2/src/views-components/search-bar/search-bar.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { Dispatch } from 'redux';\nimport {\n    goToView,\n    searchData,\n    deleteSavedQuery,\n    loadRecentQueries,\n    openSearchView,\n    closeSearchView,\n    closeAdvanceView,\n    navigateToItem,\n    editSavedQuery,\n    changeData,\n    submitData, moveUp, moveDown, setAdvancedDataFromSearchValue, SEARCH_BAR_ADVANCED_FORM_NAME\n} from 'store/search-bar/search-bar-actions';\nimport { SearchBarView, SearchBarActionProps, SearchBarDataProps } from 'views-components/search-bar/search-bar-view';\nimport { SearchBarAdvancedFormData } from 'models/search-bar';\nimport { Vocabulary } from 'models/vocabulary';\nimport { searchSingleCluster } from 'store/search-results-panel/search-results-middleware-service';\nimport { Session } from 'models/session';\n\nconst mapStateToProps = ({ searchBar, form , auth }: RootState): SearchBarDataProps => {\n    return {\n        searchValue: searchBar.searchValue,\n        currentView: searchBar.currentView,\n        isPopoverOpen: searchBar.open,\n        searchResults: searchBar.searchResults,\n        selectedItem: searchBar.selectedItem,\n        savedQueries: searchBar.savedQueries,\n        tags: form[SEARCH_BAR_ADVANCED_FORM_NAME],\n        saveQuery: form[SEARCH_BAR_ADVANCED_FORM_NAME] &&\n            form[SEARCH_BAR_ADVANCED_FORM_NAME].values &&\n            form[SEARCH_BAR_ADVANCED_FORM_NAME].values!.saveQuery,\n        sessions: auth.sessions,\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): SearchBarActionProps => ({\n    onSearch: (valueSearch: string) => dispatch<any>(searchData(valueSearch, true)),\n    onChange: (event: React.ChangeEvent<HTMLInputElement>) => dispatch<any>(changeData(event.target.value)),\n    onSetView: (currentView: string) => dispatch(goToView(currentView)),\n    onSubmit: (event: React.FormEvent<HTMLFormElement>) => dispatch<any>(submitData(event)),\n    closeView: () => dispatch<any>(closeSearchView()),\n    closeAdvanceView: () => dispatch<any>(closeAdvanceView()),\n    loadRecentQueries: () => dispatch<any>(loadRecentQueries()),\n    deleteSavedQuery: (id: number) => dispatch<any>(deleteSavedQuery(id)),\n    openSearchView: () => dispatch<any>(openSearchView()),\n    navigateTo: (uuid: string) => dispatch<any>(navigateToItem(uuid)),\n    editSavedQuery: (data: SearchBarAdvancedFormData) => dispatch<any>(editSavedQuery(data)),\n    moveUp: () => dispatch<any>(moveUp()),\n    moveDown: () => dispatch<any>(moveDown()),\n    setAdvancedDataFromSearchValue: (search: string, vocabulary: Vocabulary) => dispatch<any>(setAdvancedDataFromSearchValue(search, vocabulary)),\n    searchSingleCluster: (session: Session, searchValue: string) => {dispatch<any>(searchSingleCluster(session, searchValue))},\n});\n\nexport const SearchBar = connect(mapStateToProps, mapDispatchToProps)(SearchBarView);\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/participant-select.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Autocomplete, AutocompleteCat } from 'components/autocomplete/autocomplete';\nimport { connect, DispatchProp } from 'react-redux';\nimport { ServiceRepository } from 'services/services';\nimport { FilterBuilder } from '../../services/api/filter-builder';\nimport { debounce } from 'debounce';\nimport { ListItemText, Typography } from '@mui/material';\nimport { noop } from 'lodash/fp';\nimport { GroupClass, GroupResource } from 'models/group';\nimport { getUserDetailsString, getUserDisplayName, UserResource } from 'models/user';\nimport { Resource, ResourceKind } from 'models/resource';\nimport { ListResults } from 'services/common-service/common-service';\n\nexport interface Participant {\n    name: string;\n    tooltip: string;\n    uuid: string;\n}\n\ntype ParticipantResource = GroupResource | UserResource;\n\ninterface ParticipantSelectProps {\n    items: Participant[];\n    excludedParticipants?: string[];\n    label?: string;\n    autofocus?: boolean;\n    onlyPeople?: boolean;\n    onlyActive?: boolean;\n    disabled?: boolean;\n    category?: AutocompleteCat;\n\n    onBlur?: (event: React.FocusEvent<HTMLInputElement>) => void;\n    onFocus?: (event: React.FocusEvent<HTMLInputElement>) => void;\n    onCreate?: (person: Participant) => void;\n    onDelete?: (index: number) => void;\n    onSelect?: (person: Participant) => void;\n}\n\ninterface ParticipantSelectState {\n    isWorking: boolean;\n    value: string;\n    suggestions: ParticipantResource[];\n    cachedSuggestions: ParticipantResource[];\n}\n\nconst getDisplayName = (item: GroupResource | UserResource, detailed: boolean) => {\n    switch (item.kind) {\n        case ResourceKind.USER:\n            return getUserDisplayName(item, detailed, detailed);\n        case ResourceKind.GROUP:\n            return item.name + `(${`(${(item as Resource).uuid})`})`;\n        default:\n            return (item as Resource).uuid;\n    }\n};\n\nconst getSharingDisplayName = (item: GroupResource | UserResource, detailed: boolean = false) => {\n    switch (item.kind) {\n        case ResourceKind.USER:\n            return `${getUserDisplayName(item, detailed, detailed)} (${item.email})`;\n        case ResourceKind.GROUP:\n            return item.name;\n        default:\n            return (item as Resource).uuid;\n    }\n};\n\nconst getDisplayTooltip = (item: GroupResource | UserResource) => {\n    switch (item.kind) {\n        case ResourceKind.USER:\n            return getUserDetailsString(item);\n        case ResourceKind.GROUP:\n            return item.name + `(${`(${(item as Resource).uuid})`})`;\n        default:\n            return (item as Resource).uuid;\n    }\n};\n\nexport const ParticipantSelect = connect()(\n    class ParticipantSelect extends React.Component<ParticipantSelectProps & DispatchProp, ParticipantSelectState> {\n        state: ParticipantSelectState = {\n            isWorking: false,\n            value: '',\n            suggestions: [],\n            cachedSuggestions: [],\n        };\n\n        componentDidUpdate(prevProps: ParticipantSelectProps & DispatchProp, prevState: ParticipantSelectState) {\n            if (prevState.suggestions.length === 0 && this.state.suggestions.length > 0 && this.state.value.length === 0) {\n                this.setState({ cachedSuggestions: this.state.suggestions });\n            }\n        }\n\n        render() {\n            const { label = 'Add people and groups' } = this.props;\n\n            return (\n                <Autocomplete\n                    label={label}\n                    value={this.state.value}\n                    items={this.props.items}\n                    suggestions={this.state.suggestions}\n                    autofocus={this.props.autofocus}\n                    onChange={this.handleChange}\n                    onCreate={this.handleCreate}\n                    onSelect={this.handleSelect}\n                    onDelete={this.props.onDelete && !this.props.disabled ? this.handleDelete : undefined}\n                    onFocus={this.props.onFocus || this.onFocus}\n                    onBlur={this.onBlur}\n                    renderChipValue={this.renderChipValue}\n                    renderChipTooltip={this.renderChipTooltip}\n                    renderSuggestion={this.renderSuggestion}\n                    category={this.props.category}\n                    isWorking={this.state.isWorking}\n                    maxLength={this.props.category === AutocompleteCat.SHARING ? 10 : undefined}\n                    disabled={this.props.disabled} />\n            );\n        }\n\n        onFocus = (e) => {\n            this.setState({ isWorking: true });\n            this.getSuggestions();\n        }\n\n        onBlur = (e) => {\n            if (this.props.onBlur) {\n                this.props.onBlur(e);\n            }\n            setTimeout(() => this.setState({ value: '', suggestions: [] }), 200);\n        }\n\n        renderChipValue(chipValue: Participant) {\n            const { name, uuid } = chipValue;\n            return name || uuid;\n        }\n\n        renderChipTooltip(item: Participant) {\n            return item.tooltip;\n        }\n\n        renderSuggestion(item: ParticipantResource) {\n            return (\n                <ListItemText>\n                    <Typography noWrap>{getDisplayName(item, true)}</Typography>\n                </ListItemText>\n            );\n        }\n\n        handleDelete = (_: Participant, index: number) => {\n            const { onDelete = noop } = this.props;\n            onDelete(index);\n        }\n\n        handleCreate = () => {\n            const { onCreate } = this.props;\n            if (onCreate) {\n                this.setState({ value: '', suggestions: [] });\n                onCreate({\n                    name: '',\n                    tooltip: '',\n                    uuid: this.state.value,\n                });\n            }\n        }\n\n        handleSelect = (selection: ParticipantResource) => {\n            if (!selection) return;\n            const { uuid } = selection;\n            const { onSelect = noop } = this.props;\n            this.setState({ value: '', suggestions: this.state.cachedSuggestions });\n            onSelect({\n                name: this.props.category === AutocompleteCat.SHARING ? getSharingDisplayName(selection) : getDisplayName(selection, false),\n                tooltip: getDisplayTooltip(selection),\n                uuid,\n            });\n        }\n\n        handleChange = (event: React.ChangeEvent<HTMLInputElement>) => {\n            this.setState({ value: event.target.value }, this.getSuggestions);\n        }\n\n        getSuggestions = debounce(() => this.props.dispatch<any>(this.requestSuggestions), 500);\n\n        requestSuggestions = async (_: void, __: void, { userService, groupsService }: ServiceRepository) => {\n            this.setState({ isWorking: true });\n            const { value } = this.state;\n            // +1 to see if there are more than 10 results\n            const limit = 11;\n\n            const filterUsers = new FilterBuilder()\n                .addILike('any', value)\n                .addEqual('is_active', this.props.onlyActive || undefined)\n                .addNotIn('uuid', this.props.excludedParticipants)\n                .getFilters();\n            const userItems: ListResults<any> = await userService.list({ filters: filterUsers, limit, count: \"none\" });\n\n            const filterGroups = new FilterBuilder()\n                .addNotIn('group_class', [GroupClass.PROJECT, GroupClass.FILTER])\n                .addNotIn('uuid', this.props.excludedParticipants)\n                .addILike('name', value)\n                .getFilters();\n\n            const groupItems: ListResults<any> = await groupsService.list({ filters: filterGroups, limit, count: \"none\" });\n            this.setState({\n                suggestions: this.props.onlyPeople\n                    ? userItems.items\n                    : userItems.items.concat(groupItems.items),\n                isWorking: false,\n            });\n        }\n    });\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/permission-select.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { MenuItem, Select } from '@mui/material';\nimport RemoveRedEye from '@mui/icons-material/RemoveRedEye';\nimport Edit from '@mui/icons-material/Edit';\nimport Computer from '@mui/icons-material/Computer';\nimport { SelectProps } from '@mui/material/Select';\nimport { SelectItem } from './select-item';\nimport { PermissionLevel } from '../../models/permission';\n\nexport enum PermissionSelectValue {\n    READ = 'Read',\n    WRITE = 'Write',\n    MANAGE = 'Manage',\n}\n\nexport const parsePermissionLevel = (value: PermissionSelectValue) => {\n    switch (value) {\n        case PermissionSelectValue.READ:\n            return PermissionLevel.CAN_READ;\n        case PermissionSelectValue.WRITE:\n            return PermissionLevel.CAN_WRITE;\n        case PermissionSelectValue.MANAGE:\n            return PermissionLevel.CAN_MANAGE;\n        default:\n            return PermissionLevel.NONE;\n    }\n};\n\nexport const formatPermissionLevel = (value: PermissionLevel) => {\n    switch (value) {\n        case PermissionLevel.CAN_READ:\n            return PermissionSelectValue.READ;\n        case PermissionLevel.CAN_WRITE:\n            return PermissionSelectValue.WRITE;\n        case PermissionLevel.CAN_MANAGE:\n            return PermissionSelectValue.MANAGE;\n        default:\n            return PermissionSelectValue.READ;\n    }\n};\n\n\nexport const PermissionSelect = (props: SelectProps) =>\n    <Select\n        variant=\"standard\"\n        {...props}\n        disableUnderline\n        data-cy=\"permission-select\"\n        renderValue={renderPermissionItem}>\n        <MenuItem value={PermissionSelectValue.READ}>\n            {renderPermissionItem(PermissionSelectValue.READ)}\n        </MenuItem>\n        <MenuItem value={PermissionSelectValue.WRITE}>\n            {renderPermissionItem(PermissionSelectValue.WRITE)}\n        </MenuItem>\n        <MenuItem value={PermissionSelectValue.MANAGE}>\n            {renderPermissionItem(PermissionSelectValue.MANAGE)}\n        </MenuItem>\n    </Select>;\n\nconst renderPermissionItem = (value: string) =>\n    <SelectItem {...{ value, icon: getIcon(value) }} />;\n\nconst getIcon = (value: string) => {\n    switch (value) {\n        case PermissionSelectValue.READ:\n            return RemoveRedEye;\n        case PermissionSelectValue.WRITE:\n            return Edit;\n        case PermissionSelectValue.MANAGE:\n            return Computer;\n        default:\n            return Computer;\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/select-item.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid } from '@mui/material';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithStyles } from '@mui/styles';\nimport { SvgIconProps } from '@mui/material/SvgIcon';\n\ntype SelectItemClasses = 'value' | 'icon';\n\nconst permissionItemStyles: CustomStyleRulesCallback<SelectItemClasses> = theme => ({\n    value: {\n        marginLeft: theme.spacing(1),\n    },\n    icon: {\n        margin: `${theme.spacing(0.5)} 0`\n    }\n});\n\n/**\n * This component should be used as a child of MenuItem component.\n */\nexport const SelectItem = withStyles(permissionItemStyles)(\n    ({ value, icon: Icon, classes }: { value: string, icon: React.ComponentType<SvgIconProps> } & WithStyles<SelectItemClasses>) => {\n        return (\n            <Grid container alignItems='center'>\n                <Icon className={classes.icon} />\n                <span className={classes.value}>\n                    {value}\n                </span>\n            </Grid>);\n    });\n\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-dialog-component.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Provider } from 'react-redux';\nimport { combineReducers, createStore } from 'redux';\nimport { SharingDialogComponent } from './sharing-dialog-component';\nimport {\n    extractUuidObjectType,\n    ResourceObjectType\n} from 'models/resource';\nimport { ThemeProvider } from \"@mui/material\";\nimport { CustomTheme } from 'common/custom-theme';\n\ndescribe(\"<SharingDialogComponent />\", () => {\n    let props;\n    let store;\n\n    beforeEach(() => {\n        const initialAuthState = {\n            config: {\n                keepWebServiceUrl: 'http://example.com/',\n                keepWebInlineServiceUrl: 'http://*.collections.example.com/',\n                clusterConfig: {\n                    Users: {\n                        AnonymousUserToken: \"\"\n                    }\n                }\n            }\n        }\n        store = createStore(combineReducers({\n            auth: (state = initialAuthState, action) => state,\n        }));\n\n        props = {\n            open: true,\n            loading: false,\n            saveEnabled: false,\n            sharedResourceUuid: 'zzzzz-4zz18-zzzzzzzzzzzzzzz',\n            privateAccess: true,\n            sharingURLsNr: 2,\n            sharingURLsDisabled: false,\n            onClose: cy.stub(),\n            onSave: cy.stub(),\n            onCreateSharingToken: cy.stub(),\n            refreshPermissions: cy.stub(),\n        };\n    });\n\n    it(\"show sharing urls tab on collections when not disabled\", () => {\n        expect(props.sharingURLsDisabled).to.equal(false);\n        expect(props.sharingURLsNr).to.equal(2);\n        expect(extractUuidObjectType(props.sharedResourceUuid)).to.equal(ResourceObjectType.COLLECTION)\n        cy.mount(\n            <Provider store={store}>\n                <ThemeProvider theme={CustomTheme}>\n                    <SharingDialogComponent {...props} />\n                </ThemeProvider>\n            </Provider>);\n        cy.get('html').should('contain', 'Sharing URLs (2)');\n\n        // disable Sharing URLs UI\n        props.sharingURLsDisabled = true;\n        cy.mount(\n            <Provider store={store}>\n                <ThemeProvider theme={CustomTheme}>\n                    <SharingDialogComponent {...props} />\n                </ThemeProvider>\n            </Provider>);\n        cy.get('html').should('not.contain', 'Sharing URLs');\n    });\n\n    it(\"does not show sharing urls on non-collection resources\", () => {\n        props.sharedResourceUuid = 'zzzzz-j7d0g-0123456789abcde';\n        expect(extractUuidObjectType(props.sharedResourceUuid)).to.not.equal(ResourceObjectType.COLLECTION);\n        expect(props.sharingURLsDisabled).to.equal(false);\n        cy.mount(\n            <Provider store={store}>\n                <ThemeProvider theme={CustomTheme}>\n                    <SharingDialogComponent {...props} />\n                </ThemeProvider>\n            </Provider>);\n        cy.get('html').should('not.contain', 'Sharing URLs');\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-dialog-component.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport {\n    Dialog,\n    DialogTitle,\n    Button,\n    Grid,\n    DialogContent,\n    CircularProgress,\n    Paper,\n    Tabs,\n    Tab,\n    Checkbox,\n    FormControlLabel,\n    Typography,\n} from '@mui/material';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { DialogActions } from 'components/dialog-actions/dialog-actions';\nimport { SharingURLsContent } from './sharing-urls';\nimport {\n    extractUuidObjectType,\n    ResourceObjectType\n} from 'models/resource';\nimport { SharingInvitationForm } from './sharing-invitation-form';\nimport { SharingManagementForm } from './sharing-management-form';\nimport moment, { Moment } from 'moment';\nimport { SharingPublicAccessForm } from './sharing-public-access-form';\nimport { LocalizationProvider } from '@mui/x-date-pickers/LocalizationProvider';\nimport { AdapterMoment } from '@mui/x-date-pickers/AdapterMoment';\nimport { StaticDateTimePicker } from '@mui/x-date-pickers/StaticDateTimePicker';\n\nexport interface SharingDialogDataProps {\n    open: boolean;\n    loading: boolean;\n    saveEnabled: boolean;\n    sharedResourceUuid: string;\n    sharingURLsNr: number;\n    privateAccess: boolean;\n    sharingURLsDisabled: boolean;\n    permissions: any[];\n}\nexport interface SharingDialogActionProps {\n    onClose: () => void;\n    onSave: () => void;\n    onCreateSharingToken: (d: Date | undefined) => () => void;\n    refreshPermissions: () => void;\n}\nenum SharingDialogTab {\n    PERMISSIONS = 0,\n    URLS = 1,\n}\nexport type SharingDialogComponentProps = SharingDialogDataProps & SharingDialogActionProps;\n\nexport const SharingDialogComponent = (props: SharingDialogComponentProps) => {\n    const { open, loading, saveEnabled, sharedResourceUuid,\n        sharingURLsNr, privateAccess, sharingURLsDisabled,\n        onClose, onSave, onCreateSharingToken, refreshPermissions } = props;\n    const showTabs = !sharingURLsDisabled && extractUuidObjectType(sharedResourceUuid) === ResourceObjectType.COLLECTION;\n    const [tabNr, setTabNr] = React.useState<number>(SharingDialogTab.PERMISSIONS);\n    const [expDate, setExpDate] = React.useState<Moment>();\n    const [withExpiration, setWithExpiration] = React.useState<boolean>(false);\n\n    const handleChange = (newValue: moment.Moment) => setExpDate(newValue);\n    const handleClose = (ev, reason) => {\n        if (reason !== 'backdropClick') {\n            onClose();\n        }\n    }\n\n    // Sets up the dialog depending on the resource type\n    if (!showTabs && tabNr !== SharingDialogTab.PERMISSIONS) {\n        setTabNr(SharingDialogTab.PERMISSIONS);\n    }\n\n    React.useEffect(() => {\n        if (!withExpiration) {\n            setExpDate(undefined);\n        } else {\n            setExpDate(moment().add(2, 'hour'));\n        }\n    }, [withExpiration]);\n\n    return (\n        <Dialog {...{ open, onClose }} className=\"sharing-dialog\" onClose={handleClose} fullWidth maxWidth='md' data-cy=\"sharing-dialog\">\n            <DialogTitle>\n                Sharing settings\n            </DialogTitle>\n            {showTabs &&\n                <Tabs value={tabNr}\n                    onChange={(_, tb) => {\n                        if (tb === SharingDialogTab.PERMISSIONS) {\n                            refreshPermissions();\n                        }\n                        setTabNr(tb)\n                    }\n                    }>\n                    <Tab label=\"With users/groups\" />\n                    <Tab label={`Sharing URLs ${sharingURLsNr > 0 ? '(' + sharingURLsNr + ')' : ''}`} disabled={saveEnabled} />\n                </Tabs>\n            }\n            <DialogContent>\n                {tabNr === SharingDialogTab.PERMISSIONS &&\n                    <Grid container direction='column' spacing={3}>\n                        <Grid item>\n                            <SharingInvitationForm onSave={onSave} />\n                        </Grid>\n                        <Grid item>\n                            <SharingManagementForm onSave={onSave} />\n                        </Grid>\n                        <Grid item>\n                            <SharingPublicAccessForm onSave={onSave} />\n                        </Grid>\n                    </Grid>\n                }\n                {tabNr === SharingDialogTab.URLS &&\n                    <SharingURLsContent uuid={sharedResourceUuid} />\n                }\n            </DialogContent>\n            <DialogActions>\n                <Grid container spacing={1} style={{ display: 'flex', width: '100%', flexDirection: 'column', alignItems: 'center'}}>\n                    {tabNr === SharingDialogTab.URLS && withExpiration && \n                        <>\n                            <section style={{minHeight: '42dvh', display: 'flex', flexDirection: 'column' }}>\n                                <LocalizationProvider dateAdapter={AdapterMoment}>\n                                    <StaticDateTimePicker \n                                        orientation=\"landscape\" \n                                        onChange={handleChange} \n                                        value={expDate || moment().add(2, 'hour')} \n                                        disablePast\n                                        minutesStep={5}\n                                        ampm={false}\n                                        slots={{\n                                            //removes redundant action bar\n                                            actionBar: () => null,\n                                        }}\n                                    />\n                                </LocalizationProvider>\n                            </section>\n                            <Typography variant='caption' align='center' marginBottom='1rem'>\n                                Maximum expiration date may be limited by the cluster configuration.\n                            </Typography>\n                        </>\n                        }\n                    {tabNr === SharingDialogTab.PERMISSIONS && !sharingURLsDisabled &&\n                        privateAccess && sharingURLsNr > 0 &&\n                        <Grid item md={12}>\n                            <Typography variant='caption' align='center' color='error'>\n                                Although there aren't specific permissions set, this is publicly accessible via Sharing URL(s).\n                            </Typography>\n                        </Grid>\n                    }\n                    <Grid style={{display: 'flex', justifyContent: 'end', flexDirection: 'row', width: '100%', marginBottom: '-0.5rem'}}>\n                        {tabNr === SharingDialogTab.URLS && \n                            <Grid container style={{ display: 'flex', justifyContent: 'space-between'}}>\n                                <Grid display='flex'>\n                                    <Grid item>\n                                        <FormControlLabel\n                                            control={<Checkbox color=\"primary\" checked={withExpiration}\n                                                onChange={(e) => setWithExpiration(e.target.checked)} />}\n                                            label=\"With expiration\" />\n                                    </Grid>\n                                    <Grid item>\n                                        <Button variant=\"contained\" color=\"primary\"\n                                            disabled={expDate !== undefined && expDate.toDate() <= new Date()}\n                                            onClick={onCreateSharingToken(expDate?.toDate())}>\n                                            Create sharing URL\n                                        </Button>\n                                    </Grid>\n                                </Grid>\n                            </Grid>\n                        }\n                        <Grid>\n                            <Grid style={{display: 'flex'}}>\n                                <Button onClick={() => {\n                                    onClose();\n                                    setWithExpiration(false);\n                                    }}\n                                    disabled={saveEnabled}\n                                    color='primary'\n                                    size='small'\n                                    style={{ marginLeft: '10px' }}\n                                    >\n                                        Close\n                                </Button>\n                                {tabNr !== SharingDialogTab.URLS && \n                                    <Button onClick={() => {\n                                            onSave();\n                                        }}\n                                        data-cy=\"add-invited-people\"\n                                        disabled={!saveEnabled}\n                                        color='primary'\n                                        variant='contained'\n                                        size='small'\n                                        style={{ marginLeft: '10px' }}\n                                        >\n                                            Save\n                                    </Button>\n                                }\n                            </Grid>\n                        </Grid>\n                    </Grid>\n                </Grid>\n            </DialogActions>\n            {\n                loading && <LoadingIndicator />\n            }\n        </Dialog>\n    );\n};\n\nconst loadingIndicatorStyles: CustomStyleRulesCallback<'root'> = theme => ({\n    root: {\n        position: 'absolute',\n        top: 0,\n        right: 0,\n        bottom: 0,\n        left: 0,\n        display: 'flex',\n        alignItems: 'center',\n        justifyContent: 'center',\n        backgroundColor: 'rgba(255, 255, 255, 0.8)',\n    },\n});\n\nconst LoadingIndicator = withStyles(loadingIndicatorStyles)(\n    (props: WithStyles<'root'>) =>\n        <Paper classes={props.classes}>\n            <CircularProgress />\n        </Paper>\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { compose, Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { formValueSelector } from 'redux-form'\nimport {\n    connectSharingDialog,\n    saveSharingDialogChanges,\n    connectSharingDialogProgress,\n    SharingDialogData,\n    createSharingToken,\n    initializeManagementForm\n} from 'store/sharing-dialog/sharing-dialog-actions';\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport {\n    SharingDialogComponent,\n    SharingDialogDataProps,\n    SharingDialogActionProps\n} from './sharing-dialog-component';\nimport {\n    getSharingPublicAccessFormData,\n    hasChanges,\n    SHARING_DIALOG_NAME,\n    SHARING_MANAGEMENT_FORM_NAME,\n    VisibilityLevel\n} from 'store/sharing-dialog/sharing-dialog-types';\nimport { WithProgressStateProps } from 'store/progress-indicator/with-progress';\nimport { getDialog } from 'store/dialog/dialog-reducer';\nimport { filterResources } from 'store/resources/resources';\nimport { ApiClientAuthorization } from 'models/api-client-authorization';\nimport { ResourceKind } from 'models/resource';\n\ntype Props = WithDialogProps<string> & WithProgressStateProps;\n\nconst sharingManagementFormSelector = formValueSelector(SHARING_MANAGEMENT_FORM_NAME);\n\nconst mapStateToProps = (state: RootState, { working, ...props }: Props): SharingDialogDataProps => {\n    const dialog = getDialog<SharingDialogData>(state.dialog, SHARING_DIALOG_NAME);\n    const sharedResourceUuid = dialog?.data.resourceUuid || '';\n    const sharingURLsDisabled = state.auth.config.clusterConfig.Workbench.DisableSharingURLsUI;\n    return ({\n        ...props,\n        permissions: sharingManagementFormSelector(state, 'permissions'),\n        saveEnabled: hasChanges(state),\n        loading: working,\n        sharedResourceUuid,\n        sharingURLsDisabled,\n        sharingURLsNr: !sharingURLsDisabled\n            ? (filterResources((resource: ApiClientAuthorization) =>\n                resource.kind === ResourceKind.API_CLIENT_AUTHORIZATION &&\n                resource.scopes.includes(`GET /arvados/v1/collections/${sharedResourceUuid}`) &&\n                resource.scopes.includes(`GET /arvados/v1/collections/${sharedResourceUuid}/`) &&\n                resource.scopes.includes('GET /arvados/v1/keep_services/accessible')\n            )(state.resources) as ApiClientAuthorization[]).length\n            : 0,\n        privateAccess: getSharingPublicAccessFormData(state)?.visibility === VisibilityLevel.PRIVATE,\n    })\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch, { ...props }: Props): SharingDialogActionProps => ({\n    ...props,\n    onClose: props.closeDialog,\n    onSave: () => {\n        setTimeout(() => dispatch<any>(saveSharingDialogChanges), 0);\n    },\n    onCreateSharingToken: (d: Date) => () => {\n        dispatch<any>(createSharingToken(d));\n    },\n    refreshPermissions: () => {\n        dispatch<any>(initializeManagementForm);\n    }\n});\n\nexport const SharingDialog = compose(\n    connectSharingDialog,\n    connectSharingDialogProgress,\n    connect(mapStateToProps, mapDispatchToProps)\n)(SharingDialogComponent);\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-invitation-form-component.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Field, WrappedFieldProps, FieldArray, WrappedFieldArrayProps } from 'redux-form';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, FormControl, InputLabel } from '@mui/material';\nimport { PermissionSelect, parsePermissionLevel, formatPermissionLevel } from './permission-select';\nimport { ParticipantSelect, Participant } from './participant-select';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { AutocompleteCat } from 'components/autocomplete/autocomplete';\n\ntype SharingStyles = 'root';\n\nconst styles: CustomStyleRulesCallback<SharingStyles> = (theme: ArvadosTheme) => ({\n    root: {\n        padding: `${theme.spacing(1)} 0`,\n    },\n});\n\nconst SharingInvitationFormComponent = (props: { onSave: () => void }) => <StyledSharingInvitationFormComponent onSave={props.onSave} />\n\nexport default SharingInvitationFormComponent;\n\nconst StyledSharingInvitationFormComponent = withStyles(styles)(\n    ({ classes }: { onSave: () => void } & WithStyles<SharingStyles>) =>\n        <Grid container spacing={1} wrap='nowrap' className={classes.root} >\n            <Grid data-cy=\"invite-people-field\" item xs={8}>\n                <InvitedPeopleField />\n            </Grid>\n            <Grid data-cy=\"permission-select-field\" item xs={4} container wrap='nowrap'>\n                <PermissionSelectField />\n            </Grid>\n        </Grid >);\n\nconst InvitedPeopleField = () =>\n    <FieldArray\n        name='invitedPeople'\n        component={InvitedPeopleFieldComponent as any} />;\n\n\nconst InvitedPeopleFieldComponent = ({ fields }: WrappedFieldArrayProps<Participant>) =>\n    <ParticipantSelect\n        items={fields.getAll() || []}\n        onSelect={fields.push}\n        onDelete={fields.remove}\n        category={AutocompleteCat.SHARING} />;\n\nconst PermissionSelectField = () =>\n    <Field\n        name='permissions'\n        component={PermissionSelectComponent}\n        format={formatPermissionLevel}\n        parse={parsePermissionLevel} />;\n\nconst PermissionSelectComponent = ({ input }: WrappedFieldProps) =>\n    <FormControl variant=\"standard\" fullWidth>\n        <InputLabel>Authorization</InputLabel>\n        <PermissionSelect {...input} />\n    </FormControl>;\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-invitation-form.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { reduxForm } from 'redux-form';\nimport SharingInvitationFormComponent from './sharing-invitation-form-component';\nimport { SHARING_INVITATION_FORM_NAME } from 'store/sharing-dialog/sharing-dialog-types';\nimport { PermissionLevel } from 'models/permission';\n\ninterface InvitationFormData {\n    permissions: PermissionLevel;\n    invitedPeople: string[];\n}\n\ninterface SaveProps {\n    onSave: () => void;\n}\n\nexport const SharingInvitationForm =\n    reduxForm<InvitationFormData, SaveProps>({\n        form: SHARING_INVITATION_FORM_NAME,\n        initialValues: {\n            permissions: PermissionLevel.CAN_READ,\n            invitedPeople: [],\n        }\n    })(SharingInvitationFormComponent);\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-management-form-component.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, Divider, IconButton, Typography, Tooltip } from '@mui/material';\nimport {\n    Field,\n    WrappedFieldProps,\n    WrappedFieldArrayProps,\n    FieldArray,\n    FieldArrayFieldsProps,\n    InjectedFormProps\n} from 'redux-form';\nimport { PermissionSelect, formatPermissionLevel, parsePermissionLevel } from './permission-select';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { CloseIcon } from 'components/icon/icon';\nimport { ArvadosTheme } from 'common/custom-theme';\n\nexport interface SaveProps {\n    onSave: () => void;\n}\n\nconst headerStyles: CustomStyleRulesCallback<'heading'> = (theme: ArvadosTheme) => ({\n    heading: {\n        fontSize: '1.25rem',\n    }\n});\n\nexport const SharingManagementFormComponent = withStyles(headerStyles)(\n    ({ classes, onSave }: WithStyles<'heading'> & SaveProps & InjectedFormProps<{}, SaveProps>) =>\n        <>\n            <Typography className={classes.heading}>People with access</Typography>\n            <FieldArray<{ onSave: () => void }> name='permissions' component={SharingManagementFieldArray as any} props={{ onSave }} />\n        </>);\n\nexport default SharingManagementFormComponent;\n\nconst SharingManagementFieldArray = ({ fields, onSave }: { onSave: () => void } & WrappedFieldArrayProps<{ email: string, fullName: string }>) =>\n    <div>{fields.map((field, index, fields) =>\n        <PermissionManagementRow key={field} {...{ field, index, fields }} onSave={onSave} />)}\n    </div>;\n\nconst permissionManagementRowStyles: CustomStyleRulesCallback<'root'> = theme => ({\n    root: {\n        padding: `${theme.spacing(0.5)} 0`,\n    }\n});\n\nconst PermissionManagementRow = withStyles(permissionManagementRowStyles)(\n    ({ field, index, fields, classes, onSave }: { field: string, index: number, fields: FieldArrayFieldsProps<{ email: string, fullName: string }>, onSave: () => void; } & WithStyles<'root'>) => {\n        const { email, fullName } = fields.get(index);\n        return <>\n            <Grid container alignItems='center' spacing={1} wrap='nowrap' className={classes.root}>\n                <Grid item xs={7}>\n                    <Typography noWrap variant='subtitle1'>{email ? `${fullName} (${email})` : fullName}</Typography>\n                </Grid>\n                <Grid item xs={1} container wrap='nowrap'>\n                    <Tooltip title='Remove access'>\n                        <IconButton onClick={() => { fields.remove(index); onSave(); }} size=\"large\">\n                            <CloseIcon />\n                        </IconButton>\n                    </Tooltip>\n                </Grid>\n                <Grid item xs={4} container wrap='nowrap'>\n                    <Field\n                        name={`${field}.permissions` as string}\n                        component={PermissionSelectComponent}\n                        format={formatPermissionLevel}\n                        parse={parsePermissionLevel}\n                        onChange={onSave}\n                    />\n                    \n                </Grid>\n            </Grid>\n            <Divider />\n        </>\n    }\n);\n\nconst PermissionSelectComponent = ({ input }: WrappedFieldProps) =>\n    <PermissionSelect fullWidth disableUnderline {...input} />;\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-management-form.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { reduxForm } from 'redux-form';\nimport { SharingManagementFormComponent, SaveProps } from './sharing-management-form-component';\nimport { SHARING_MANAGEMENT_FORM_NAME } from 'store/sharing-dialog/sharing-dialog-types';\n\nexport const SharingManagementForm = reduxForm<{}, SaveProps>(\n    { form: SHARING_MANAGEMENT_FORM_NAME }\n)(SharingManagementFormComponent);\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-public-access-form-component.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, Typography } from '@mui/material';\nimport { Field, WrappedFieldProps } from 'redux-form';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { VisibilityLevelSelect } from './visibility-level-select';\nimport { VisibilityLevel } from 'store/sharing-dialog/sharing-dialog-types';\n\nconst sharingPublicAccessStyles: CustomStyleRulesCallback<'root'> = theme => ({\n    root: {\n        padding: `${theme.spacing(2)} 0`,\n    },\n    heading: {\n        fontSize: '1.25rem',\n    }\n});\n\ninterface AccessProps {\n    visibility: VisibilityLevel;\n    includePublic: boolean;\n    onSave: () => void;\n}\n\nconst SharingPublicAccessForm = withStyles(sharingPublicAccessStyles)(\n    ({ classes, visibility, includePublic, onSave }: WithStyles<'root' | 'heading'> & AccessProps) =>\n        <>\n            <Typography className={classes.heading}>General access</Typography>\n            <Grid container alignItems='center' className={classes.root}>\n                <Grid item xs={8}>\n                    <Typography variant='subtitle1'>\n                        {renderVisibilityInfo(visibility)}\n                    </Typography>\n                </Grid>\n                <Grid item xs={4} wrap='nowrap'>\n                    <Field<{ includePublic: boolean }> name='visibility' component={VisibilityLevelSelectComponent} includePublic={includePublic} onChange={onSave} />\n                </Grid>\n            </Grid>\n        </>\n);\n\nconst renderVisibilityInfo = (visibility: VisibilityLevel) => {\n    switch (visibility) {\n        case VisibilityLevel.PUBLIC:\n            return 'Shared with anyone on the Internet';\n        case VisibilityLevel.ALL_USERS:\n            return 'Shared with all users on this cluster';\n        case VisibilityLevel.SHARED:\n            return 'Shared with specific people';\n        case VisibilityLevel.PRIVATE:\n            return 'Not shared';\n        default:\n            return '';\n    }\n};\n\nconst SharingPublicAccessFormComponent = ({ visibility, includePublic, onSave }: AccessProps) =>\n    <SharingPublicAccessForm {...{ visibility, includePublic, onSave }} />;\n\nexport default SharingPublicAccessFormComponent;\n\nconst VisibilityLevelSelectComponent = ({ input, includePublic }: { includePublic: boolean } & WrappedFieldProps) =>\n    <VisibilityLevelSelect fullWidth disableUnderline includePublic={includePublic} {...input} />;\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-public-access-form.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { reduxForm } from 'redux-form';\nimport { compose } from 'redux';\nimport { connect } from 'react-redux';\nimport SharingPublicAccessFormComponent from './sharing-public-access-form-component';\nimport { SHARING_PUBLIC_ACCESS_FORM_NAME, VisibilityLevel } from 'store/sharing-dialog/sharing-dialog-types';\nimport { RootState } from 'store/store';\nimport { getSharingPublicAccessFormData } from '../../store/sharing-dialog/sharing-dialog-types';\n\ninterface SaveProps {\n    onSave: () => void;\n}\n\nexport const SharingPublicAccessForm = compose(\n    reduxForm<{}, SaveProps>(\n        { form: SHARING_PUBLIC_ACCESS_FORM_NAME }\n    ),\n    connect(\n        (state: RootState) => {\n            const { visibility } = getSharingPublicAccessFormData(state) || { visibility: VisibilityLevel.PRIVATE };\n            const includePublic = state.auth.config.clusterConfig.Users.AnonymousUserToken.length > 0;\n            return { visibility, includePublic };\n        }\n    )\n)(SharingPublicAccessFormComponent);\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-urls-component.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { SharingURLsComponent } from './sharing-urls-component';\nimport { ThemeProvider } from \"@mui/material\";\nimport { CustomTheme } from 'common/custom-theme';\nimport { Provider } from \"react-redux\";\nimport { configureStore } from \"store/store\";\nimport { createBrowserHistory } from \"history\";\n\ndescribe(\"<SharingURLsComponent />\", () => {\n    let props;\n    const store = configureStore(createBrowserHistory());\n\n    beforeEach(() => {\n        props = {\n            collectionUuid: 'collection-uuid',\n            sharingURLsPrefix: 'sharing-urls-prefix',\n            sharingTokens: [\n                {\n                    uuid: 'token-uuid1',\n                    apiToken: 'aaaaaaaaaa',\n                    expiresAt: '2009-01-03T18:15:00Z',\n                },\n                {\n                    uuid: 'token-uuid2',\n                    apiToken: 'bbbbbbbbbb',\n                    expiresAt: '2009-01-03T18:15:01Z',\n                },\n            ],\n            onCopy: cy.stub(),\n            onDeleteSharingToken: cy.stub().as('onDeleteSharingToken'),\n        };\n        cy.mount(\n            <Provider store={store}>\n                <ThemeProvider theme={CustomTheme}>\n                    <SharingURLsComponent {...props} />\n                </ThemeProvider>\n            </Provider>);\n    });\n\n    it(\"renders a list of sharing URLs\", () => {\n        // Check number of URLs\n        cy.get('a').should('have.length', 2);\n        // Check 1st URL\n        cy.get('a').eq(0).should('contain', `Token aaaaaaaa... expiring at: ${new Date(props.sharingTokens[0].expiresAt).toLocaleString()}`);\n        cy.get('a').eq(0).should('have.attr', 'href', `${props.sharingURLsPrefix}/c=${props.collectionUuid}/t=${props.sharingTokens[0].apiToken}/_/`);\n        // Check 2nd URL\n        cy.get('a').eq(1).should('contain', `Token bbbbbbbb... expiring at: ${new Date(props.sharingTokens[1].expiresAt).toLocaleString()}`);\n        cy.get('a').eq(1).should('have.attr', 'href', `${props.sharingURLsPrefix}/c=${props.collectionUuid}/t=${props.sharingTokens[1].apiToken}/_/`);\n    });\n\n    it(\"renders a list URLs with collection UUIDs as subdomains\", () => {\n        props.sharingURLsPrefix = '*.sharing-urls-prefix';\n        const sharingPrefix = '.sharing-urls-prefix';\n        cy.mount(\n            <Provider store={store}>\n                <ThemeProvider theme={CustomTheme}>\n                    <SharingURLsComponent {...props} />\n                </ThemeProvider>\n            </Provider>);\n\n        cy.get('a').eq(0).should('have.attr', 'href', `${props.collectionUuid}${sharingPrefix}/t=${props.sharingTokens[0].apiToken}/_/`);\n        cy.get('a').eq(1).should('have.attr', 'href', `${props.collectionUuid}${sharingPrefix}/t=${props.sharingTokens[1].apiToken}/_/`);\n    });\n\n    it(\"renders a list of URLs with no expiration\", () => {\n        props.sharingTokens[0].expiresAt = null;\n        props.sharingTokens[1].expiresAt = null;\n        cy.mount(\n            <Provider store={store}>\n                <ThemeProvider theme={CustomTheme}>\n                    <SharingURLsComponent {...props} />\n                </ThemeProvider>\n            </Provider>);\n        cy.get('a').eq(0).should('contain', `Token aaaaaaaa... with no expiration date`);\n        cy.get('a').eq(1).should('contain', `Token bbbbbbbb... with no expiration date`);\n    });\n\n    it(\"calls delete token handler when delete button is clicked\", () => {\n        cy.get('button').eq(0).click();\n        cy.get('@onDeleteSharingToken').should('be.calledWith', props.sharingTokens[0].uuid);\n    });\n});"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-urls-component.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Grid, IconButton, Link, Tooltip, Typography } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ApiClientAuthorization } from 'models/api-client-authorization';\nimport { CopyIcon, CloseIcon } from 'components/icon/icon';\nimport CopyToClipboard from 'react-copy-to-clipboard';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport moment from 'moment';\n\ntype CssRules = 'sharingUrlText'\n    | 'sharingUrlButton'\n    | 'sharingUrlList'\n    | 'sharingUrlRow';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    sharingUrlText: {\n        fontSize: '1rem',\n    },\n    sharingUrlButton: {\n        color: theme.palette.grey[\"500\"],\n        cursor: 'pointer',\n        '& svg': {\n            fontSize: '1rem'\n        },\n        verticalAlign: 'middle',\n    },\n    sharingUrlList: {\n        marginTop: '-0.5rem',\n    },\n    sharingUrlRow: {\n        marginLeft: theme.spacing(1),\n        borderBottom: `1px solid ${theme.palette.grey[\"300\"]}`,\n    },\n});\n\nexport interface SharingURLsComponentDataProps {\n    collectionUuid: string;\n    sharingTokens: ApiClientAuthorization[];\n    sharingURLsPrefix: string;\n}\n\nexport interface SharingURLsComponentActionProps {\n    onDeleteSharingToken: (uuid: string) => void;\n    onCopy: (message: string) => void;\n}\n\nexport type SharingURLsComponentProps = SharingURLsComponentDataProps & SharingURLsComponentActionProps;\n\nexport const SharingURLsComponent = withStyles(styles)((props: SharingURLsComponentProps & WithStyles<CssRules>) => <Grid container direction='column' spacing={3} className={props.classes.sharingUrlList}>\n    {props.sharingTokens.length > 0\n        ? props.sharingTokens\n            .sort((a, b) => (new Date(a.expiresAt).getTime() - new Date(b.expiresAt).getTime()))\n            .map(token => {\n                const url = props.sharingURLsPrefix.includes('*')\n                    ? `${props.sharingURLsPrefix.replace('*', props.collectionUuid)}/t=${token.apiToken}/_/`\n                    : `${props.sharingURLsPrefix}/c=${props.collectionUuid}/t=${token.apiToken}/_/`;\n                const expDate = new Date(token.expiresAt);\n                const urlLabel = !!token.expiresAt\n                    ? `Token ${token.apiToken.slice(0, 8)}... expiring at: ${expDate.toLocaleString()} (${moment(expDate).fromNow()})`\n                    : `Token ${token.apiToken.slice(0, 8)}... with no expiration date`;\n\n                return (\n                    <Grid container alignItems='center' key={token.uuid} className={props.classes.sharingUrlRow}>\n                        <Grid item>\n                            <Link className={props.classes.sharingUrlText} href={url} target='_blank' rel=\"noopener\">\n                                {urlLabel}\n                            </Link>\n                        </Grid>\n                        <Grid item xs />\n                        <Grid item>\n                            <Tooltip title='Copy link to clipboard'>\n                                <span className={props.classes.sharingUrlButton}>\n                                    <CopyToClipboard text={url} onCopy={() => props.onCopy('Sharing URL copied')}>\n                                        <CopyIcon />\n                                    </CopyToClipboard>\n                                </span>\n                            </Tooltip>\n                            <span data-cy='remove-url-btn' className={props.classes.sharingUrlButton}>\n                                <Tooltip title='Remove'>\n                                    <IconButton onClick={() => props.onDeleteSharingToken(token.uuid)} size=\"large\">\n                                        <CloseIcon />\n                                    </IconButton>\n                                </Tooltip>\n                            </span>\n                        </Grid>\n                    </Grid>\n                );\n            })\n        : <Grid item><Typography>No sharing URLs</Typography></Grid>}\n</Grid>);\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/sharing-urls.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootState } from 'store/store';\nimport { connect } from 'react-redux';\nimport { Dispatch } from 'redux';\nimport { ApiClientAuthorization } from 'models/api-client-authorization';\nimport { filterResources } from 'store/resources/resources';\nimport { ResourceKind } from 'models/resource';\nimport {\n    SharingURLsComponent,\n    SharingURLsComponentActionProps,\n    SharingURLsComponentDataProps\n} from './sharing-urls-component';\nimport {\n    snackbarActions,\n    SnackbarKind\n} from 'store/snackbar/snackbar-actions';\nimport { deleteSharingToken } from 'store/sharing-dialog/sharing-dialog-actions';\n\nconst mapStateToProps =\n    (state: RootState, ownProps: { uuid: string }): SharingURLsComponentDataProps => {\n        const sharingTokens = filterResources(\n            (resource: ApiClientAuthorization) =>\n                resource.kind === ResourceKind.API_CLIENT_AUTHORIZATION  &&\n                resource.scopes.includes(`GET /arvados/v1/collections/${ownProps.uuid}`) &&\n                resource.scopes.includes(`GET /arvados/v1/collections/${ownProps.uuid}/`) &&\n                resource.scopes.includes('GET /arvados/v1/keep_services/accessible')\n            )(state.resources) as ApiClientAuthorization[];\n        const sharingURLsPrefix = state.auth.config.keepWebInlineServiceUrl;\n        return {\n            collectionUuid: ownProps.uuid,\n            sharingTokens,\n            sharingURLsPrefix,\n        }\n    }\n\nconst mapDispatchToProps = (dispatch: Dispatch): SharingURLsComponentActionProps => ({\n    onDeleteSharingToken(uuid: string) {\n        dispatch<any>(deleteSharingToken(uuid));\n    },\n    onCopy(message: string) {\n        dispatch(snackbarActions.OPEN_SNACKBAR({\n            message,\n            hideDuration: 2000,\n            kind: SnackbarKind.SUCCESS\n        }));\n    },\n})\n\nexport const SharingURLsContent = connect(mapStateToProps, mapDispatchToProps)(SharingURLsComponent)\n\n"
  },
  {
    "path": "services/workbench2/src/views-components/sharing-dialog/visibility-level-select.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { MenuItem, Select } from '@mui/material';\nimport withStyles from '@mui/styles/withStyles';\nimport Lock from '@mui/icons-material/Lock';\nimport People from '@mui/icons-material/People';\nimport Public from '@mui/icons-material/Public';\nimport { WithStyles } from '@mui/styles';\nimport { SelectProps } from '@mui/material/Select';\nimport { SelectItem } from './select-item';\nimport { VisibilityLevel } from 'store/sharing-dialog/sharing-dialog-types';\n\n\ntype VisibilityLevelSelectClasses = 'root';\n\nconst VisibilityLevelSelectStyles: CustomStyleRulesCallback<VisibilityLevelSelectClasses> = theme => ({\n    root: {\n    }\n});\nexport const VisibilityLevelSelect = withStyles(VisibilityLevelSelectStyles)(\n    ({ classes, includePublic, ...props }: { includePublic: boolean } & SelectProps & WithStyles<VisibilityLevelSelectClasses>) =>\n        <Select\n            variant=\"standard\"\n            {...props}\n            renderValue={renderPermissionItem}\n            inputProps={{ classes }}>\n            {includePublic && <MenuItem value={VisibilityLevel.PUBLIC}>\n                {renderPermissionItem(VisibilityLevel.PUBLIC)}\n            </MenuItem>}\n            <MenuItem value={VisibilityLevel.ALL_USERS}>\n                {renderPermissionItem(VisibilityLevel.ALL_USERS)}\n            </MenuItem>\n            <MenuItem value={VisibilityLevel.SHARED}>\n                {renderPermissionItem(VisibilityLevel.SHARED)}\n            </MenuItem>\n            <MenuItem value={VisibilityLevel.PRIVATE}>\n                {renderPermissionItem(VisibilityLevel.PRIVATE)}\n            </MenuItem>\n        </Select>);\n\nconst renderPermissionItem = (value: string) =>\n    <SelectItem {...{ value, icon: getIcon(value) }} />;\n\nconst getIcon = (value: string) => {\n    switch (value) {\n        case VisibilityLevel.PUBLIC:\n            return Public;\n        case VisibilityLevel.ALL_USERS:\n            return Public;\n        case VisibilityLevel.SHARED:\n            return People;\n        case VisibilityLevel.PRIVATE:\n            return Lock;\n        default:\n            return Lock;\n    }\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/side-panel/side-panel-collapsed.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React, { ReactElement } from 'react'\nimport { connect } from 'react-redux'\nimport { RootProjectIcon,\n    ProcessIcon,\n    FavoriteIcon,\n    ShareMeIcon,\n    TrashIcon,\n    PublicFavoriteIcon,\n    GroupsIcon,\n    ResourceIcon,\n    FolderKeyIcon,\n    WheelIcon,\n } from 'components/icon/icon'\nimport { TerminalIcon } from 'components/icon/icon'\nimport { IconButton, List, ListItem, Tooltip } from '@mui/material'\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme'\nimport { navigateTo, navigateToInstanceTypes } from 'store/navigation/navigation-action'\nimport { RootState } from 'store/store'\nimport { Dispatch } from 'redux'\nimport {\n    navigateToSharedWithMe,\n    navigateToPublicFavorites,\n    navigateToFavorites,\n    navigateToGroups,\n    navigateToAllProcesses,\n    navigateToTrash,\n    navigateToExternalCredentials,\n    navigateToDashboard,\n} from 'store/navigation/navigation-action'\nimport { navigateToUserVirtualMachines } from 'store/navigation/navigation-action'\nimport { RouterAction } from 'connected-react-router'\nimport { User } from 'models/user'\n\ntype CssRules = 'button' | 'unselected' | 'selected'\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    button: {\n        width: '40px',\n        height: '40px',\n        paddingLeft: '-2rem',\n        marginLeft: '-0.6rem',\n        marginBottom: '-1rem'\n    },\n    unselected: {\n        color: theme.customs.colors.grey600,\n    },\n    selected: {\n        color: theme.palette.primary.main,\n    },\n})\n\nenum SidePanelCollapsedCategory {\n    DASHBOARD = 'Dashboard',\n    PROJECTS = 'Home Projects',\n    FAVORITES = 'My Favorites',\n    PUBLIC_FAVORITES = 'Public Favorites',\n    SHARED_WITH_ME = 'Shared with me',\n    ALL_PROCESSES = 'All Processes',\n    INSTANCE_TYPES = 'Instance Types',\n    SHELL_ACCESS = 'Shell Access',\n    EXTERNAL_CREDENTIALS = 'External Credentials',\n    GROUPS = 'Groups',\n    TRASH = 'Trash',\n}\n\ntype TCollapsedCategory = {\n    name: SidePanelCollapsedCategory\n    icon: ReactElement\n    navTarget: RouterAction | ''\n}\n\nconst sidePanelCollapsedCategories: TCollapsedCategory[] = [\n    { name: SidePanelCollapsedCategory.DASHBOARD,\n        icon: <WheelIcon />,\n        navTarget: navigateToDashboard,\n    },\n    {\n        name: SidePanelCollapsedCategory.PROJECTS,\n        icon: <RootProjectIcon />,\n        navTarget: '',\n    },\n    {\n        name: SidePanelCollapsedCategory.FAVORITES,\n        icon: <FavoriteIcon />,\n        navTarget: navigateToFavorites,\n    },\n    {\n        name: SidePanelCollapsedCategory.PUBLIC_FAVORITES,\n        icon: <PublicFavoriteIcon />,\n        navTarget: navigateToPublicFavorites,\n    },\n    {\n        name: SidePanelCollapsedCategory.SHARED_WITH_ME,\n        icon: <ShareMeIcon />,\n        navTarget: navigateToSharedWithMe,\n    },\n    {\n        name: SidePanelCollapsedCategory.ALL_PROCESSES,\n        icon: <ProcessIcon />,\n        navTarget: navigateToAllProcesses,\n    },\n    {\n        name: SidePanelCollapsedCategory.INSTANCE_TYPES,\n        icon: <ResourceIcon />,\n        navTarget: navigateToInstanceTypes,\n    },\n    {\n        name: SidePanelCollapsedCategory.SHELL_ACCESS,\n        icon: <TerminalIcon />,\n        navTarget: navigateToUserVirtualMachines,\n    },\n    {\n        name: SidePanelCollapsedCategory.EXTERNAL_CREDENTIALS,\n        icon: <FolderKeyIcon />,\n        navTarget: navigateToExternalCredentials,\n    },\n    {\n        name: SidePanelCollapsedCategory.GROUPS,\n        icon: <GroupsIcon style={{marginLeft: '2px', scale: '85%'}}/>,\n        navTarget: navigateToGroups,\n    },\n    {\n        name: SidePanelCollapsedCategory.TRASH,\n        icon: <TrashIcon />,\n        navTarget: navigateToTrash,\n    },\n]\n\ntype SidePanelCollapsedProps = {\n    user: User;\n    selectedPath: string;\n    navToHome: (uuid: string) => void;\n    navTo: (navTarget: RouterAction | '') => void;\n};\n\nconst mapStateToProps = ({auth, properties }: RootState) => {\n        return {\n            user: auth.user,\n            selectedPath: properties.breadcrumbs\n                ? properties.breadcrumbs[0].label\n                : SidePanelCollapsedCategory.PROJECTS,\n        }\n}\n\nconst mapDispatchToProps = (dispatch: Dispatch) => {\n    return {\n        navToHome: (navTarget) => dispatch<any>(navigateTo(navTarget)),\n        navTo: (navTarget) => dispatch<any>(navTarget),\n    }\n}\n\nexport const SidePanelCollapsed = withStyles(styles)(\n    connect(mapStateToProps, mapDispatchToProps)(({ classes, user, selectedPath, navToHome, navTo }: WithStyles<CssRules> & SidePanelCollapsedProps) => {\n\n        const handleClick = (cat: TCollapsedCategory) => {\n            if (cat.name === SidePanelCollapsedCategory.PROJECTS) navToHome(user.uuid)\n            else navTo(cat.navTarget)\n        }\n\n        const { button, unselected, selected } = classes\n        return (\n            <List data-cy='side-panel-collapsed'>\n                {sidePanelCollapsedCategories.map((cat) => (\n                    <ListItem\n                        key={cat.name}\n                        data-cy={`collapsed-${cat.name.toLowerCase().replace(/\\s+/g, '-')}`}\n                        onClick={() => handleClick(cat)}\n                        >\n                        <Tooltip\n                            className={selectedPath === cat.name ? selected : unselected}\n                            title={cat.name}\n                            disableFocusListener\n                            >\n                            <IconButton className={button} size=\"large\">{cat.icon}</IconButton>\n                        </Tooltip>\n                    </ListItem>\n                ))}\n            </List>\n        );\n    })\n)\n"
  },
  {
    "path": "services/workbench2/src/views-components/side-panel/side-panel.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { SidePanelTree, SidePanelTreeProps } from 'views-components/side-panel-tree/side-panel-tree';\nimport { Dispatch } from 'redux';\nimport { connect } from 'react-redux';\nimport { navigateFromSidePanel } from 'store/side-panel/side-panel-action';\nimport { Grid } from '@mui/material';\nimport { SidePanelButton } from 'views-components/side-panel-button/side-panel-button';\nimport { RootState } from 'store/store';\nimport SidePanelToggle from 'views-components/side-panel-toggle/side-panel-toggle';\nimport { SidePanelCollapsed } from './side-panel-collapsed';\n\ntype CssRules = 'sidePanelGridItem' | 'topButtonContainer';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    sidePanelGridItem: {\n        maxWidth: 'inherit',\n        wordBreak: 'break-word',\n    },\n    topButtonContainer: {\n        display: 'flex',\n        justifyContent: 'space-between'\n    }\n});\n\nconst mapDispatchToProps = (dispatch: Dispatch): SidePanelTreeProps => ({\n    onItemActivation: id => {\n        dispatch<any>(navigateFromSidePanel(id));\n    },\n});\n\nconst mapStateToProps = ({ router, sidePanel }: RootState): Partial<SidePanelTreeProps> => ({\n    currentRoute: router.location ? router.location.pathname : '',\n    isCollapsed: sidePanel.collapsedState,\n});\n\nexport const SidePanel = withStyles(styles)(\n    connect(mapStateToProps, mapDispatchToProps)(\n        ({ classes, ...props }: WithStyles<CssRules> & SidePanelTreeProps ) => (\n            <Grid item xs className={classes.sidePanelGridItem}>\n                {props.isCollapsed ?\n                    <div>\n                        <SidePanelToggle />\n                        <SidePanelCollapsed />\n                    </div>\n                        :\n                    <div>\n                        <div className={classes.topButtonContainer}>\n                            <SidePanelButton key={props.currentRoute} />\n                            <SidePanelToggle/>\n                        </div>\n                        <SidePanelTree {...props} />\n                    </div>\n                }\n            </Grid>\n        )\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/side-panel-button/side-panel-button.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { isProjectTrashed } from './side-panel-button';\n\ndescribe('<SidePanelButton />', () => {\n    describe('isProjectTrashed', () => {\n        it('should return false if project is undefined', () => {\n            // given\n            const proj = undefined;\n            const resources = {};\n\n            // when\n            const result = isProjectTrashed(proj, resources);\n\n            // then\n            expect(!!result).to.equal(false);\n        });\n\n        it('should return false if parent project is undefined', () => {\n            // given\n            const proj = {};\n            const resources = {};\n\n            // when\n            const result = isProjectTrashed(proj, resources);\n\n            // then\n            expect(!!result).to.equal(false);\n        });\n\n        it('should return false for owner', () => {\n            // given\n            const proj = {\n                ownerUuid: 'ce8i5-tpzed-000000000000000',\n            };\n            const resources = {};\n\n            // when\n            const result = isProjectTrashed(proj, resources);\n\n            // then\n            expect(!!result).to.equal(false);\n        });\n\n        it('should return true for trashed', () => {\n            // given\n            const proj = {\n                isTrashed: true,\n            };\n            const resources = {};\n\n            // when\n            const result = isProjectTrashed(proj, resources);\n\n            // then\n            expect(!!result).to.equal(true);\n        });\n\n        it('should return false for undefined parent projects', () => {\n            // given\n            const proj = {\n                ownerUuid: 'ce8i5-j7d0g-000000000000000',\n            };\n            const resources = {};\n\n            // when\n            const result = isProjectTrashed(proj, resources);\n\n            // then\n            expect(!!result).to.equal(false);\n        });\n    });\n});"
  },
  {
    "path": "services/workbench2/src/views-components/side-panel-button/side-panel-button.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { connect, DispatchProp } from 'react-redux';\nimport { RootState } from 'store/store';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { PopoverOrigin } from '@mui/material/Popover';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Toolbar, Grid, Button, MenuItem, Menu } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { AddIcon, CollectionIcon, ProcessIcon, ProjectIcon } from 'components/icon/icon';\nimport { openProjectCreateDialog } from 'store/projects/project-create-actions';\nimport { openCollectionCreateDialog } from 'store/collections/collection-create-actions';\nimport { navigateToRunProcess } from 'store/navigation/navigation-action';\nimport { runProcessPanelActions } from 'store/run-process-panel/run-process-panel-actions';\nimport { getUserUuid } from 'common/getuser';\nimport { matchProjectRoute } from 'routes/routes';\nimport { GroupClass, GroupResource } from 'models/group';\nimport { ResourcesState, getResource } from 'store/resources/resources';\nimport { extractUuidKind, ResourceKind } from 'models/resource';\nimport { pluginConfig } from 'plugins';\nimport { ElementListReducer } from 'common/plugintypes';\nimport { Location } from 'history';\nimport { ProjectResource } from 'models/project';\n\ntype CssRules = 'button' | 'menuItem' | 'icon';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    button: {\n        boxShadow: 'none',\n        padding: '2px 10px 2px 5px',\n        fontSize: '0.75rem'\n    },\n    menuItem: {\n        fontSize: '0.875rem',\n        color: theme.palette.grey[\"700\"]\n    },\n    icon: {\n        marginRight: theme.spacing(1)\n    }\n});\n\ninterface SidePanelDataProps {\n    location: Location;\n    currentItemId: string;\n    resources: ResourcesState;\n    currentUserUUID: string | undefined;\n}\n\ninterface SidePanelState {\n    anchorEl: any;\n}\n\ntype SidePanelProps = SidePanelDataProps & DispatchProp & WithStyles<CssRules>;\n\nconst transformOrigin: PopoverOrigin = {\n    vertical: -50,\n    horizontal: 0\n};\n\nexport const isProjectTrashed = (proj: GroupResource | undefined, resources: ResourcesState): boolean => {\n    if (proj === undefined) { return false; }\n    if (proj.isTrashed) { return true; }\n    if (extractUuidKind(proj.ownerUuid) === ResourceKind.USER) { return false; }\n    const parentProj = getResource<GroupResource>(proj.ownerUuid)(resources);\n    return isProjectTrashed(parentProj, resources);\n};\n\nexport const SidePanelButton = withStyles(styles)(\n    connect((state: RootState) => ({\n        currentItemId: state.router.location\n            ? state.router.location.pathname.split('/').slice(-1)[0]\n            : null,\n        location: state.router.location,\n        resources: state.resources,\n        currentUserUUID: getUserUuid(state),\n    }))(\n        class extends React.Component<SidePanelProps> {\n\n            state: SidePanelState = {\n                anchorEl: undefined\n            };\n\n            render() {\n                const { classes, location, resources, currentUserUUID, currentItemId } = this.props;\n                const { anchorEl } = this.state;\n                let enabled = false;\n                if (currentItemId === currentUserUUID) {\n                    enabled = true;\n                } else if (matchProjectRoute(location ? location.pathname : '')) {\n                    const currentProject = getResource<ProjectResource>(currentItemId)(resources);\n                    if (currentProject && currentProject.canWrite &&\n                        !currentProject.frozenByUuid &&\n                        !isProjectTrashed(currentProject, resources) &&\n                        currentProject.groupClass !== GroupClass.FILTER) {\n                        enabled = true;\n                    }\n                }\n\n                for (const enableFn of pluginConfig.enableNewButtonMatchers) {\n                    if (enableFn(location, currentItemId, currentUserUUID, resources)) {\n                        enabled = true;\n                    }\n                }\n\n                let menuItems = [\n                    <MenuItem key={'new-collection'} data-cy='side-panel-new-collection' className={classes.menuItem} onClick={this.handleNewCollectionClick}>\n                        <CollectionIcon className={classes.icon} /> New collection\n                    </MenuItem>,\n                    <MenuItem key={'run-process'} data-cy='side-panel-run-process' className={classes.menuItem} onClick={this.handleRunProcessClick}>\n                        <ProcessIcon className={classes.icon} /> Run a workflow\n                    </MenuItem>,\n                    <MenuItem key={'new-project'} data-cy='side-panel-new-project' className={classes.menuItem} onClick={this.handleNewProjectClick}>\n                        <ProjectIcon className={classes.icon} /> New project\n                    </MenuItem>,\n                ];\n\n                const reduceItemsFn: (a: React.ReactElement[], b: ElementListReducer) => React.ReactElement[] =\n                    (a, b) => b(a, classes.menuItem);\n\n                menuItems = pluginConfig.newButtonMenuList.reduce(reduceItemsFn, menuItems);\n\n                return (\n                    <Toolbar style={{paddingRight: 0}}>\n                        <Grid container>\n                            <Grid container item xs alignItems=\"center\" justifyContent=\"flex-start\">\n                                <Button data-cy=\"side-panel-button\" variant=\"contained\" disabled={!enabled}\n                                    color=\"primary\" size=\"small\" className={classes.button}\n                                    aria-owns={anchorEl ? 'aside-menu-list' : undefined}\n                                    aria-haspopup=\"true\"\n                                    onClick={this.handleOpen}>\n                                    <AddIcon />\n                                    New\n                                </Button>\n                                <Menu\n                                    id='aside-menu-list'\n                                    anchorEl={anchorEl}\n                                    open={Boolean(anchorEl)}\n                                    onClose={this.handleClose}\n                                    onClick={this.handleClose}\n                                    transformOrigin={transformOrigin}>\n                                    {menuItems}\n                                </Menu>\n                            </Grid>\n                        </Grid>\n                    </Toolbar>\n                );\n            }\n\n            handleNewProjectClick = () => {\n                this.props.dispatch<any>(openProjectCreateDialog(this.props.currentItemId));\n            }\n\n            handleRunProcessClick = () => {\n                const location = this.props.location;\n                this.props.dispatch(runProcessPanelActions.RESET_RUN_PROCESS_PANEL());\n                this.props.dispatch(runProcessPanelActions.SET_PROCESS_PATHNAME(location.pathname));\n                this.props.dispatch(runProcessPanelActions.SET_PROCESS_OWNER_UUID(this.props.currentItemId));\n\n                this.props.dispatch<any>(navigateToRunProcess);\n            }\n\n            handleNewCollectionClick = () => {\n                this.props.dispatch<any>(openCollectionCreateDialog(this.props.currentItemId));\n            }\n\n            handleClose = () => {\n                this.setState({ anchorEl: undefined });\n            }\n\n            handleOpen = (event: React.MouseEvent<HTMLButtonElement>) => {\n                this.setState({ anchorEl: event.currentTarget });\n            }\n        }\n    )\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/side-panel-toggle/side-panel-toggle.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { Tooltip, IconButton } from '@mui/material';\nimport { connect } from 'react-redux';\nimport { toggleSidePanel } from \"store/side-panel/side-panel-action\";\nimport { RootState } from 'store/store';\n\ntype collapseButtonProps = {\n    isCollapsed: boolean;\n    toggleSidePanel: (collapsedState: boolean) => void\n}\n\nexport const COLLAPSE_ICON_SIZE = 35\n\nconst SidePanelToggle = (props: collapseButtonProps) => {\n    const collapseButtonIconStyles = {\n        root: {\n            width: `${COLLAPSE_ICON_SIZE}px`,\n            height: `${COLLAPSE_ICON_SIZE}px`,\n            marginTop: '0.4rem',\n            marginLeft: '0.7rem',\n            paddingTop: '1rem',\n            paddingRight: '1rem'\n        },\n        icon: {\n            opacity: '0.5',\n            marginBottom: '0.54rem'\n        },\n    }\n\n    return (\n        <Tooltip disableFocusListener title=\"Toggle Side Panel\">\n            <IconButton\n                data-cy=\"side-panel-toggle\"\n                style={collapseButtonIconStyles.root}\n                onClick={() => { props.toggleSidePanel(props.isCollapsed) }}\n                size=\"large\">\n                <div>\n                    {props.isCollapsed ?\n                        <img style={{...collapseButtonIconStyles.icon, marginLeft:'0.25rem'}} src='/mui-start-icon.svg' alt='an arrow pointing right'/>\n                        :\n                        <img style={{ ...collapseButtonIconStyles.icon, transform: \"rotate(180deg)\"}} src='/mui-start-icon.svg' alt='an arrow pointing right'/>}\n                </div>\n            </IconButton>\n        </Tooltip>\n    );\n};\n\nconst mapStateToProps = (state: RootState) => {\n    return {\n        isCollapsed: state.sidePanel.collapsedState\n    }\n}\n\nconst mapDispatchToProps = (dispatch) => {\n    return {\n        toggleSidePanel: (collapsedState) => {\n            return dispatch(toggleSidePanel(collapsedState))\n        }\n    }\n};\n\nexport default connect(mapStateToProps, mapDispatchToProps)(SidePanelToggle)\n"
  },
  {
    "path": "services/workbench2/src/views-components/side-panel-tree/side-panel-tree.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { TreePicker, TreePickerProps } from \"../tree-picker/tree-picker\";\nimport { TreeItem } from \"components/tree/tree\";\nimport { ProjectResource } from \"models/project\";\nimport { ListItemTextIcon } from \"components/list-item-text-icon/list-item-text-icon\";\nimport { activateSidePanelTreeItem,\n         toggleSidePanelTreeItemCollapse,\n         SIDE_PANEL_TREE,\n         SidePanelTreeCategory,\n         getSidePanelIcon\n} from 'store/side-panel-tree/side-panel-tree-actions';\nimport { openSidePanelContextMenu } from 'store/context-menu/context-menu-actions';\nimport { noop } from 'lodash';\nimport { ResourceKind } from \"models/resource\";\nimport { IllegalNamingWarning } from \"components/warning/warning\";\nimport { GroupClass } from \"models/group\";\nimport { setSelectedResourceUuid } from \"store/selected-resource/selected-resource-actions\";\nimport { FilterGroupIcon, RootProjectIcon } from 'components/icon/icon';\n\nexport interface SidePanelTreeProps {\n    onItemActivation: (id: string) => void;\n    isCollapsed?: boolean;\n    currentRoute?: string;\n}\n\ntype SidePanelTreeActionProps = Pick<TreePickerProps<ProjectResource | string>, 'onContextMenu' | 'toggleItemActive' | 'toggleItemOpen' | 'toggleItemSelection'>;\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: SidePanelTreeProps): SidePanelTreeActionProps => ({\n    onContextMenu: (event, { id }) => {\n        dispatch<any>(openSidePanelContextMenu(event, id));\n    },\n    toggleItemActive: (_, { id }) => {\n        dispatch<any>(activateSidePanelTreeItem(id));\n        const isSidePanelCat = Object.values(SidePanelTreeCategory).includes(id as SidePanelTreeCategory);\n        dispatch<any>(setSelectedResourceUuid(isSidePanelCat ? null : id));\n        props.onItemActivation(id);\n    },\n    toggleItemOpen: (_, { id }) => {\n        dispatch<any>(toggleSidePanelTreeItemCollapse(id));\n    },\n    toggleItemSelection: noop,\n});\n\nexport const SidePanelTree = connect(undefined, mapDispatchToProps)(\n    (props: SidePanelTreeActionProps) =>\n        <div data-cy=\"side-panel-tree\">\n            <TreePicker {...props} render={renderSidePanelItem} pickerId={SIDE_PANEL_TREE} />\n        </div>);\n\nconst renderSidePanelItem = (item: TreeItem<ProjectResource>) => {\n    const name = typeof item.data === 'string' ? item.data : item.data.name;\n    const warn = typeof item.data !== 'string' && item.data.kind === ResourceKind.PROJECT\n        ? <IllegalNamingWarning name={name} />\n        : undefined;\n    return <ListItemTextIcon\n        icon={getProjectPickerIcon(item)}\n        name={name}\n        nameDecorator={warn}\n        isActive={item.active}\n        hasMargin={true}\n    />;\n};\n\nconst getProjectPickerIcon = (item: TreeItem<ProjectResource | string>) =>\n    typeof item.data === 'string'\n        ? getSidePanelIcon(item.data)\n        : (item.data && item.data.groupClass === GroupClass.FILTER)\n            ? FilterGroupIcon\n            : RootProjectIcon;\n"
  },
  {
    "path": "services/workbench2/src/views-components/snackbar/snackbar.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dispatch } from \"redux\";\nimport { connect } from \"react-redux\";\nimport { RootState } from \"store/store\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Button, IconButton, SnackbarContent } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport MaterialSnackbar, { SnackbarOrigin } from \"@mui/material/Snackbar\";\nimport { snackbarActions, SnackbarKind, SnackbarMessage } from \"store/snackbar/snackbar-actions\";\nimport { navigateTo } from 'store/navigation/navigation-action';\nimport WarningIcon from '@mui/icons-material/Warning';\nimport CheckCircleIcon from '@mui/icons-material/CheckCircle';\nimport ErrorIcon from '@mui/icons-material/Error';\nimport InfoIcon from '@mui/icons-material/Info';\nimport CloseIcon from '@mui/icons-material/Close';\nimport { ArvadosTheme } from \"common/custom-theme\";\nimport { amber, green } from \"@mui/material/colors\";\nimport classNames from 'classnames';\n\ninterface SnackbarDataProps {\n    anchorOrigin?: SnackbarOrigin;\n    autoHideDuration?: number;\n    open: boolean;\n    messages: SnackbarMessage[];\n}\n\ninterface SnackbarEventProps {\n    onClose?: (event: React.SyntheticEvent<any>, reason: string, message?: string) => void;\n    onExited: () => void;\n    onClick: (uuid: string) => void;\n}\n\nconst mapStateToProps = (state: RootState): SnackbarDataProps => {\n    const messages = state.snackbar.messages;\n    return {\n        anchorOrigin: { vertical: \"bottom\", horizontal: \"right\" },\n        open: state.snackbar.open,\n        messages,\n        autoHideDuration: messages.length > 0 ? messages[0].hideDuration : 0\n    };\n};\n\nconst mapDispatchToProps = (dispatch: Dispatch): SnackbarEventProps => ({\n    onClose: (event: any, reason: string, id: undefined) => {\n        if (reason !== \"clickaway\") {\n            dispatch(snackbarActions.CLOSE_SNACKBAR(id));\n        }\n    },\n    onExited: () => {\n        dispatch(snackbarActions.SHIFT_MESSAGES());\n    },\n    onClick: (uuid: string) => {\n        dispatch<any>(navigateTo(uuid));\n    }\n});\n\ntype CssRules = \"success\" | \"error\" | \"info\" | \"warning\" | \"icon\" | \"iconVariant\" | \"message\" | \"linkButton\" | \"snackbarContent\";\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    success: {\n        backgroundColor: green[600]\n    },\n    error: {\n        backgroundColor: theme.palette.error.dark\n    },\n    info: {\n        backgroundColor: theme.palette.primary.main\n    },\n    warning: {\n        backgroundColor: amber[700]\n    },\n    icon: {\n        fontSize: 20\n    },\n    iconVariant: {\n        opacity: 0.9,\n        marginRight: theme.spacing(1)\n    },\n    message: {\n        display: 'flex',\n        alignItems: 'center'\n    },\n    linkButton: {\n        fontWeight: 'bolder'\n    },\n    snackbarContent: {\n        marginBottom: '1rem'\n    }\n});\n\ntype SnackbarProps = SnackbarDataProps & SnackbarEventProps & WithStyles<CssRules>;\n\nexport const Snackbar = withStyles(styles)(connect(mapStateToProps, mapDispatchToProps)(\n    (props: SnackbarProps) => {\n        const { classes } = props;\n\n        const variants = {\n            [SnackbarKind.INFO]: [InfoIcon, classes.info],\n            [SnackbarKind.WARNING]: [WarningIcon, classes.warning],\n            [SnackbarKind.SUCCESS]: [CheckCircleIcon, classes.success],\n            [SnackbarKind.ERROR]: [ErrorIcon, classes.error]\n        };\n\n        return (\n            <MaterialSnackbar\n                TransitionProps={\n                    {\n                        onExited: props.onExited\n                    }\n                }\n                open={props.open}\n                onClose={props.onClose}\n                anchorOrigin={props.anchorOrigin}\n                autoHideDuration={props.autoHideDuration}>\n                <div data-cy=\"snackbar\">\n                    {\n                         props.messages.map((message, index) => {\n                            const [Icon, cssClass] = variants[message.kind];\n\n                            return <SnackbarContent\n                                key={`${index}-${message.message}`}\n                                className={classNames(cssClass, classes.snackbarContent)}\n                                aria-describedby=\"client-snackbar\"\n                                message={\n                                    <span id=\"client-snackbar\" className={classes.message}>\n                                        <Icon className={classNames(classes.icon, classes.iconVariant)} />\n                                        {message.message}\n                                    </span>\n                                }\n                                action={actions(message, props.onClick, props.onClose, classes, index, props.autoHideDuration)}\n                            />\n                         })\n                    }\n                </div>\n            </MaterialSnackbar>\n        );\n    }\n));\n\nconst actions = (props: SnackbarMessage, onClick, onClose, classes, index, autoHideDuration) => {\n    if (onClose && autoHideDuration) {\n        setTimeout(onClose, autoHideDuration);\n    }\n\n    const actions = [\n        <IconButton\n            key=\"close\"\n            aria-label=\"Close\"\n            color=\"inherit\"\n            onClick={e => onClose && onClose(e, '', index)}\n            size=\"large\">\n            <CloseIcon className={classes.icon} />\n        </IconButton>\n    ];\n    if (props.link) {\n        actions.splice(0, 0,\n            <Button key=\"goTo\"\n                aria-label=\"goTo\"\n                size=\"small\"\n                color=\"inherit\"\n                className={classes.linkButton}\n                onClick={() => onClick(props.link)}>\n                <span data-cy='snackbar-goto-action'>Go To</span>\n            </Button>\n        );\n    }\n    return actions;\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/ssh-keys-dialog/attributes-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { compose } from 'redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button, Grid } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithDialogProps, withDialog } from \"store/dialog/with-dialog\";\nimport { SSH_KEY_ATTRIBUTES_DIALOG } from 'store/auth/auth-action-ssh';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { SshKeyResource } from \"models/ssh-key\";\n\ntype CssRules = 'root';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    root: {\n        fontSize: '0.875rem',\n        '& div:nth-child(odd)': {\n            textAlign: 'right',\n            color: theme.palette.grey[\"500\"]\n        }\n    }\n});\n\ninterface AttributesSshKeyDialogDataProps {\n    sshKey: SshKeyResource;\n}\n\nexport const AttributesSshKeyDialog = compose(\n    withDialog(SSH_KEY_ATTRIBUTES_DIALOG),\n    withStyles(styles))(\n        ({ open, closeDialog, data, classes }: WithDialogProps<AttributesSshKeyDialogDataProps> & WithStyles<CssRules>) =>\n            <Dialog open={open}\n                onClose={closeDialog}\n                fullWidth\n                maxWidth='sm'>\n                <DialogTitle>Attributes</DialogTitle>\n                <DialogContent>\n                    {data.sshKey && <Grid container direction=\"row\" spacing={2} className={classes.root}>\n                        <Grid item xs={5}>Name</Grid>\n                        <Grid item xs={7}>{data.sshKey.name}</Grid>\n                        <Grid item xs={5}>uuid</Grid>\n                        <Grid item xs={7}>{data.sshKey.uuid}</Grid>\n                        <Grid item xs={5}>Owner uuid</Grid>\n                        <Grid item xs={7}>{data.sshKey.ownerUuid}</Grid>\n                        <Grid item xs={5}>Authorized user uuid</Grid>\n                        <Grid item xs={7}>{data.sshKey.authorizedUserUuid}</Grid>\n                        <Grid item xs={5}>Created at</Grid>\n                        <Grid item xs={7}>{data.sshKey.createdAt}</Grid>\n                        <Grid item xs={5}>Modified at</Grid>\n                        <Grid item xs={7}>{data.sshKey.modifiedAt}</Grid>\n                        <Grid item xs={5}>Expires at</Grid>\n                        <Grid item xs={7}>{data.sshKey.expiresAt}</Grid>\n                        <Grid item xs={5}>Modified by user uuid</Grid>\n                        <Grid item xs={7}>{data.sshKey.modifiedByUserUuid}</Grid>\n                    </Grid>}\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={closeDialog}>\n                        Close\n                    </Button>\n                </DialogActions>\n            </Dialog>\n    );\n"
  },
  {
    "path": "services/workbench2/src/views-components/ssh-keys-dialog/public-key-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { compose } from 'redux';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { WithDialogProps, withDialog } from \"store/dialog/with-dialog\";\nimport { SSH_KEY_PUBLIC_KEY_DIALOG } from 'store/auth/auth-action-ssh';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { DefaultCodeSnippet } from 'components/default-code-snippet/default-code-snippet';\n\ntype CssRules = 'codeSnippet';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    codeSnippet: {\n        borderRadius: theme.spacing(0.5),\n        border: '1px solid',\n        borderColor: theme.palette.grey[\"400\"],\n        '& pre': {\n            wordWrap: 'break-word',\n            whiteSpace: 'pre-wrap'\n        }\n    },\n});\n\ninterface PublicKeyDialogDataProps {\n    name: string;\n    publicKey: string;\n}\n\nexport const PublicKeyDialog = compose(\n    withDialog(SSH_KEY_PUBLIC_KEY_DIALOG),\n    withStyles(styles))(\n        ({ open, closeDialog, data, classes }: WithDialogProps<PublicKeyDialogDataProps> & WithStyles<CssRules>) =>\n            <Dialog open={open}\n                onClose={closeDialog}\n                fullWidth\n                maxWidth='sm'>\n                <DialogTitle>{data.name} - SSH Key</DialogTitle>\n                <DialogContent>\n                    {data && data.publicKey && <DefaultCodeSnippet\n                        className={classes.codeSnippet}\n                        lines={data.publicKey.split(' ')} />}\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={closeDialog}>\n                        Close\n                    </Button>\n                </DialogActions>\n            </Dialog>\n    );\n"
  },
  {
    "path": "services/workbench2/src/views-components/ssh-keys-dialog/remove-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { SSH_KEY_REMOVE_DIALOG, removeSshKey } from 'store/auth/auth-action-ssh';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeSshKey(props.data.uuid));\n    }\n});\n\nexport const RemoveSshKeyDialog = compose(\n    withDialog(SSH_KEY_REMOVE_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);\n"
  },
  {
    "path": "services/workbench2/src/views-components/token-dialog/token-dialog.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { TokenDialogComponent } from './token-dialog';\nimport { ThemeProvider } from \"@mui/material\";\nimport { CustomTheme } from 'common/custom-theme';\nimport { combineReducers, createStore } from \"redux\";\nimport { Provider } from \"react-redux\";\n\ndescribe('<CurrentTokenDialog />', () => {\n  let props;\n  let store;\n\n  beforeEach(() => {\n    props = {\n      classes: {},\n      token: 'xxxtokenxxx',\n      apiHost: 'example.com',\n      open: true,\n      dispatch: cy.spy().as('dispatch'),\n    };\n\n    const initialAuthState = {\n      localCluster: \"zzzzz\",\n      remoteHostsConfig: {},\n      sessions: {},\n    };\n\n    store = createStore(combineReducers({\n      auth: (state = initialAuthState, action) => state,\n    }));\n  });\n\n  describe('Get API Token dialog', () => {\n    beforeEach(() => {\n      cy.mount(\n        <Provider store={store}>\n          <ThemeProvider theme={CustomTheme}>\n            <TokenDialogComponent {...props} />\n          </ThemeProvider>\n        </Provider>);\n    });\n\n    it('should include API host and token', () => {\n      cy.get('pre').contains('export ARVADOS_API_HOST=example.com');\n      cy.get('pre').contains('export ARVADOS_API_TOKEN=xxxtokenxxx');\n    });\n\n    it('should show the token expiration if present', () => {\n      expect(props.tokenExpiration).to.be.undefined;\n      cy.get('[data-cy=details-attribute-value]').contains('This token does not have an expiration date');\n\n      const someDate = '2140-01-01T00:00:00.000Z'\n      props.tokenExpiration = new Date(someDate);\n      cy.mount(\n        <Provider store={store}>\n          <ThemeProvider theme={CustomTheme}>\n            <TokenDialogComponent {...props} />\n          </ThemeProvider>\n        </Provider>);\n      cy.get('[data-cy=details-attribute-value]').contains(props.tokenExpiration.toLocaleString());\n    });\n\n    it('should show a create new token button when allowed', () => {\n      expect(!!props.canCreateNewTokens).to.equal(false);\n      cy.contains('GET NEW TOKEN').should('not.exist');\n\n      props.canCreateNewTokens = true;\n      cy.mount(\n        <Provider store={store}>\n          <ThemeProvider theme={CustomTheme}>\n            <TokenDialogComponent {...props} />\n          </ThemeProvider>\n        </Provider>);\n      cy.contains('GET NEW TOKEN').should('exist');\n    });\n  });\n\n  describe('Copy link to clipboard button', () => {\n    beforeEach(() => {\n      cy.mount(\n        <Provider store={store}>\n          <ThemeProvider theme={CustomTheme}>\n            <TokenDialogComponent {...props} />\n          </ThemeProvider>\n        </Provider>);\n    });\n\n    it('should copy API TOKEN to the clipboard', () => {\n      cy.get('button').contains('Copy').click();\n      cy.get('@dispatch').should('be.calledWith', {\n        payload: {\n          hideDuration: 2000,\n          kind: 1,\n          message: 'Shell code block copied',\n        },\n        type: 'OPEN_SNACKBAR',\n      });\n    });\n  });\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/token-dialog/token-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Dialog, DialogActions, DialogTitle, DialogContent, Button, Typography } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport CopyToClipboard from 'react-copy-to-clipboard';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { withDialog } from 'store/dialog/with-dialog';\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { connect, DispatchProp } from 'react-redux';\nimport {\n    TokenDialogData,\n    getTokenDialogData,\n    TOKEN_DIALOG_NAME,\n} from 'store/token-dialog/token-dialog-actions';\nimport { DefaultCodeSnippet } from 'components/default-code-snippet/default-code-snippet';\nimport { snackbarActions, SnackbarKind } from 'store/snackbar/snackbar-actions';\nimport { getNewExtraToken } from 'store/auth/auth-action';\nimport { DetailsAttributeComponent } from 'components/details-attribute/details-attribute';\nimport moment from 'moment';\n\ntype CssRules = 'link' | 'paper' | 'button' | 'actionButton' | 'codeBlock';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    link: {\n        color: theme.palette.primary.main,\n        textDecoration: 'none',\n        margin: '0px 4px'\n    },\n    paper: {\n        padding: theme.spacing(1),\n        marginBottom: theme.spacing(2),\n        backgroundColor: theme.palette.grey[\"200\"],\n        border: `1px solid ${theme.palette.grey[\"300\"]}`\n    },\n    button: {\n        fontSize: '0.8125rem',\n        fontWeight: 600\n    },\n    actionButton: {\n        boxShadow: 'none',\n        marginTop: theme.spacing(2),\n        marginBottom: theme.spacing(2),\n        marginRight: theme.spacing(2),\n    },\n    codeBlock: {\n        fontSize: '0.8125rem',\n    },\n});\n\ntype TokenDialogProps = TokenDialogData & WithDialogProps<{}> & WithStyles<CssRules> & DispatchProp;\n\nexport class TokenDialogComponent extends React.Component<TokenDialogProps> {\n    onCopy = (message: string) => {\n        this.props.dispatch(snackbarActions.OPEN_SNACKBAR({\n            message,\n            hideDuration: 2000,\n            kind: SnackbarKind.SUCCESS\n        }));\n    }\n\n    onGetNewToken = async () => {\n        const newToken = await this.props.dispatch<any>(getNewExtraToken());\n        if (newToken) {\n            this.props.dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: 'New token retrieved',\n                hideDuration: 2000,\n                kind: SnackbarKind.SUCCESS\n            }));\n        } else {\n            this.props.dispatch(snackbarActions.OPEN_SNACKBAR({\n                message: 'Creating new tokens is not allowed',\n                hideDuration: 2000,\n                kind: SnackbarKind.WARNING\n            }));\n        }\n    }\n\n    getSnippet = ({ apiHost, token }: TokenDialogData) =>\n        `HISTIGNORE=$HISTIGNORE:'export ARVADOS_API_TOKEN=*'\nexport ARVADOS_API_TOKEN=${token}\nexport ARVADOS_API_HOST=${apiHost}\nunset ARVADOS_API_HOST_INSECURE`\n\n    render() {\n        const { classes, open, closeDialog, ...data } = this.props;\n        const tokenExpiration = data.tokenExpiration\n            ? `${data.tokenExpiration.toLocaleString()} (${moment(data.tokenExpiration).fromNow()})`\n            : `This token does not have an expiration date`;\n\n        return <Dialog\n            open={open}\n            onClose={closeDialog}\n            fullWidth={true}\n            maxWidth='md'>\n            <DialogTitle>Get API Token</DialogTitle>\n            <DialogContent>\n                <Typography paragraph={true}>\n                    The Arvados API token is a secret key that enables the Arvados SDKs to access Arvados with the proper permissions.\n                    <Typography component='span'>\n                        For more information see\n                        <a href='http://doc.arvados.org/user/reference/api-tokens.html' target='blank' rel=\"noopener\" className={classes.link}>\n                            Getting an API token.\n                        </a>\n                    </Typography>\n                </Typography>\n\n                <DetailsAttributeComponent label='API Host' value={data.apiHost} copyValue={data.apiHost} onCopy={this.onCopy} />\n                <DetailsAttributeComponent label='API Token' value={data.token} copyValue={data.token} onCopy={this.onCopy} />\n                <DetailsAttributeComponent label='Token expiration' value={tokenExpiration} />\n                {this.props.canCreateNewTokens && <Button\n                    onClick={() => this.onGetNewToken()}\n                    color=\"primary\"\n                    size=\"small\"\n                    variant=\"contained\"\n                    className={classes.actionButton}\n                >\n                    GET NEW TOKEN\n                </Button>}\n\n                <Typography paragraph={true}>\n                    Paste the following lines at a shell prompt to set up the necessary environment for Arvados SDKs to authenticate to your account.\n                </Typography>\n                <DefaultCodeSnippet className={classes.codeBlock} lines={[this.getSnippet(data)]} />\n                <CopyToClipboard text={this.getSnippet(data)} onCopy={() => this.onCopy('Shell code block copied')}>\n                    <Button\n                        color=\"primary\"\n                        size=\"small\"\n                        variant=\"contained\"\n                        className={classes.actionButton}\n                    >\n                        Copy link to clipBOARD\n                    </Button>\n                </CopyToClipboard>\n                <Typography>\n                    Arvados\n                    <a href='http://doc.arvados.org/user/reference/api-tokens.html' target='blank' rel=\"noopener\" className={classes.link}>virtual machines</a>\n                    do this for you automatically. This setup is needed only when you use the API remotely (e.g., from your own workstation).\n                </Typography>\n            </DialogContent>\n            <DialogActions>\n                <Button onClick={closeDialog} className={classes.button} color=\"primary\">CLOSE</Button>\n            </DialogActions>\n        </Dialog>;\n    }\n}\n\nexport const TokenDialog =\n    withStyles(styles)(\n        connect(getTokenDialogData)(\n            withDialog(TOKEN_DIALOG_NAME)(TokenDialogComponent)));\n"
  },
  {
    "path": "services/workbench2/src/views-components/tree-picker/tree-picker.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport Axios from \"axios\";\nimport { mockConfig } from \"common/config\";\nimport { createServices } from \"services/services\";\nimport { createBrowserHistory } from \"history\";\nimport { Provider } from \"react-redux\";\nimport { configureStore } from \"store/store\";\nimport { TreePicker } from \"./tree-picker\";\nimport { initUserProject, receiveTreePickerData, extractGroupContentsNodeData } from \"store/tree-picker/tree-picker-actions\";\nimport { authActions } from \"store/auth/auth-action\";\nimport { ResourceKind } from \"models/resource\";\nimport { updateResources } from \"store/resources/resources-actions\";\nimport { CustomTheme } from \"common/custom-theme\";\nimport { ThemeProvider } from \"@mui/material\";\n\ndescribe('<TreePicker />', () => {\n    let store;\n    let services;\n    const axiosInst = Axios.create({ headers: {} });\n    const config = {};\n    const actions = {\n        progressFn: (id, working) => { },\n        errorFn: (id, message) => { }\n    };\n    const TEST_PICKER_ID = 'testPickerId';\n    const fakeUser = {\n        email: \"test@test.com\",\n        firstName: \"John\",\n        lastName: \"Doe\",\n        uuid: \"zzzzz-tpzed-xurymjxw79nv3jz\",\n        ownerUuid: \"ownerUuid\",\n        username: \"username\",\n        prefs: {},\n        isAdmin: false,\n        isActive: true,\n        canWrite: false,\n        canManage: false,\n    };\n    const renderItem = (item) => (\n        <li data-id={item.id}>{item.data.name}</li>\n    );\n\n    beforeEach(() => {\n        services = createServices(mockConfig({}), actions, axiosInst);\n        store = configureStore(createBrowserHistory(), services, config);\n        store.dispatch(authActions.USER_DETAILS_SUCCESS(fakeUser));\n        store.dispatch(initUserProject(TEST_PICKER_ID));\n    });\n\n    it(\"renders tree picker with initial home project state\", () => {\n        cy.mount(\n            <Provider store={store}>\n              <ThemeProvider theme={CustomTheme}>\n                <TreePicker\n                    pickerId={TEST_PICKER_ID}\n                    render={renderItem}\n                    onContextMenu={() => {}}\n                    toggleItemOpen={() => {}}\n                    toggleItemActive={() => {}}\n                    toggleItemSelection={() => {}}\n                />\n                </ThemeProvider>\n            </Provider>);\n\n        cy.get(`li[data-id=\"${fakeUser.uuid}\"]`).should('have.text', 'Home Projects');\n    });\n\n    it(\"displays item loaded into treePicker store\", () => {\n        const fakeProject = {\n            uuid: \"zzzzz-j7d0g-111111111111111\",\n            name: \"FakeProject\",\n            kind: ResourceKind.PROJECT,\n        };\n\n        store.dispatch(receiveTreePickerData({\n            id: fakeUser.uuid,\n            pickerId: TEST_PICKER_ID,\n            data: [fakeProject],\n            extractNodeData: extractGroupContentsNodeData(false)\n        }));\n\n        cy.mount(\n            <Provider store={store}>\n              <ThemeProvider theme={CustomTheme}>\n                <TreePicker\n                    pickerId={TEST_PICKER_ID}\n                    render={renderItem}\n                    onContextMenu={() => {}}\n                    toggleItemOpen={() => {}}\n                    toggleItemActive={() => {}}\n                    toggleItemSelection={() => {}}\n                />\n                </ThemeProvider>\n            </Provider>);\n\n        cy.get(`li[data-id=\"${fakeUser.uuid}\"]`).should('have.text', 'Home Projects');\n        cy.get(`[data-id=\"${fakeProject.uuid}\"]`).should('have.text', 'FakeProject');\n    });\n\n    it(\"preserves treenode name when exists in resources\", () => {\n        const treeProjectResource = {\n            uuid: \"zzzzz-j7d0g-111111111111111\",\n            name: \"FakeProject\",\n            kind: ResourceKind.PROJECT,\n        };\n        const treeProjectResource2 = {\n            uuid: \"zzzzz-j7d0g-222222222222222\",\n            name: \"\",\n            kind: ResourceKind.PROJECT,\n        };\n\n        const storeProjectResource = {\n            ...treeProjectResource,\n            name: \"StoreProjectName\",\n            description: \"Test description\",\n        };\n        const storeProjectResource2 = {\n            ...treeProjectResource2,\n            name: \"StoreProjectName2\",\n            description: \"Test description\",\n        };\n\n        store.dispatch(updateResources([storeProjectResource, storeProjectResource2]));\n        store.dispatch(receiveTreePickerData({\n            id: fakeUser.uuid,\n            pickerId: TEST_PICKER_ID,\n            data: [treeProjectResource, treeProjectResource2],\n            extractNodeData: extractGroupContentsNodeData(false)\n        }));\n\n        cy.mount(\n            <Provider store={store}>\n              <ThemeProvider theme={CustomTheme}>\n                <TreePicker\n                    pickerId={TEST_PICKER_ID}\n                    render={renderItem}\n                    onContextMenu={() => {}}\n                    toggleItemOpen={() => {}}\n                    toggleItemActive={() => {}}\n                    toggleItemSelection={() => {}}\n                />\n                </ThemeProvider>\n            </Provider>);\n\n        cy.get(`[data-id=\"${fakeUser.uuid}\"]`).should('have.text', 'Home Projects');\n        cy.get(`[data-id=\"${treeProjectResource.uuid}\"]`).should('have.text', 'FakeProject');\n        cy.get(`[data-id=\"${treeProjectResource2.uuid}\"]`).should('have.text', '');\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/tree-picker/tree-picker.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { connect } from \"react-redux\";\nimport { TreeComponent, TreeProps, TreeItem} from \"components/tree/tree\";\nimport { RootState } from \"store/store\";\nimport { Dispatch } from \"redux\";\n\ntype Callback<T> = (event: React.MouseEvent<HTMLElement>, item: TreeItem<T>, pickerId: string) => void;\nexport interface TreePickerProps<T> {\n    pickerId: string;\n    onContextMenu: Callback<T>;\n    toggleItemOpen: Callback<T>;\n    toggleItemActive: Callback<T>;\n    toggleItemSelection: Callback<T>;\n}\n\nconst mapStateToProps =\n    <T>(state: RootState, props: TreePickerProps<T>): Pick<TreeProps<T>, 'resources' | 'treePicker' | 'pickerId'> => {\n        return {\n            treePicker: state.treePicker,\n            pickerId: props.pickerId,\n            resources: state.resources,\n        };\n    };\n\nconst mapDispatchToProps = <T>(_: Dispatch, props: TreePickerProps<T>): Pick<TreeProps<T>, 'onContextMenu' | 'toggleItemOpen' | 'toggleItemActive' | 'toggleItemSelection'> => ({\n    onContextMenu: (event, item) => props.onContextMenu(event, item, props.pickerId),\n    toggleItemActive: (event, item) => props.toggleItemActive(event, item, props.pickerId),\n    toggleItemOpen: (event, item) => props.toggleItemOpen(event, item, props.pickerId),\n    toggleItemSelection: (event, item) => props.toggleItemSelection(event, item, props.pickerId),\n});\n\nexport const TreePicker = connect(mapStateToProps, mapDispatchToProps)(TreeComponent);\n\n"
  },
  {
    "path": "services/workbench2/src/views-components/user-dialog/activate-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { activate, ACTIVATE_DIALOG } from 'store/user-profile/user-profile-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(activate(props.data.uuid));\n    }\n});\n\nexport const ActivateDialog = compose(\n    withDialog(ACTIVATE_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);\n"
  },
  {
    "path": "services/workbench2/src/views-components/user-dialog/attributes-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button, Typography, Grid } from \"@mui/material\";\nimport { WithDialogProps } from \"store/dialog/with-dialog\";\nimport { withDialog } from 'store/dialog/with-dialog';\nimport { WithStyles } from '@mui/styles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport withStyles from '@mui/styles/withStyles';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { compose } from \"redux\";\nimport { USER_ATTRIBUTES_DIALOG } from \"store/users/users-actions\";\nimport { UserResource } from \"models/user\";\n\ntype CssRules = 'rightContainer' | 'leftContainer' | 'spacing';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    rightContainer: {\n        textAlign: 'right',\n        paddingRight: theme.spacing(2),\n        color: theme.palette.grey[\"500\"]\n    },\n    leftContainer: {\n        textAlign: 'left',\n        paddingLeft: theme.spacing(2)\n    },\n    spacing: {\n        paddingTop: theme.spacing(2)\n    },\n});\n\ninterface UserAttributesDataProps {\n    data: UserResource;\n}\n\ntype UserAttributesProps = UserAttributesDataProps & WithStyles<CssRules>;\n\nexport const UserAttributesDialog = compose(\n    withDialog(USER_ATTRIBUTES_DIALOG),\n    withStyles(styles))(\n        (props: WithDialogProps<UserAttributesProps> & UserAttributesProps) =>\n            <Dialog open={props.open}\n                onClose={props.closeDialog}\n                fullWidth\n                maxWidth=\"sm\">\n                <DialogTitle>Attributes</DialogTitle>\n                <DialogContent>\n                    <Typography variant='body1' className={props.classes.spacing}>\n                        {props.data && attributes(props.data, props.classes)}\n                    </Typography>\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={props.closeDialog}>\n                        Close\n                </Button>\n                </DialogActions>\n            </Dialog>\n    );\n\nconst attributes = (user: UserResource, classes: any) => {\n    const { uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid,\n        firstName, lastName, username, email, isActive, isAdmin } = user;\n    return (\n        <span>\n            <Grid container direction=\"row\">\n                <Grid item xs={5} className={classes.rightContainer}>\n                    {uuid && <Grid item>Uuid</Grid>}\n                    {firstName && <Grid item>First name</Grid>}\n                    {lastName && <Grid item>Last name</Grid>}\n                    {email && <Grid item>Email</Grid>}\n                    {username && <Grid item>Username</Grid>}\n                    {isActive && <Grid item>Is active</Grid>}\n                    {isAdmin && <Grid item>Is admin</Grid>}\n                    {createdAt && <Grid item>Created at</Grid>}\n                    {modifiedAt && <Grid item>Modified at</Grid>}\n                    {ownerUuid && <Grid item>Owner uuid</Grid>}\n                    {modifiedByUserUuid && <Grid item>Modified by user uuid</Grid>}\n                </Grid>\n                <Grid item xs={7} className={classes.leftContainer}>\n                    <Grid item>{uuid}</Grid>\n                    <Grid item>{firstName}</Grid>\n                    <Grid item>{lastName}</Grid>\n                    <Grid item>{email}</Grid>\n                    <Grid item>{username}</Grid>\n                    <Grid item>{isActive}</Grid>\n                    <Grid item>{isAdmin}</Grid>\n                    <Grid item>{createdAt}</Grid>\n                    <Grid item>{modifiedAt}</Grid>\n                    <Grid item>{ownerUuid}</Grid>\n                    <Grid item>{modifiedByUserUuid}</Grid>\n                </Grid>\n            </Grid>\n        </span>\n    );\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/user-dialog/deactivate-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { deactivate, DEACTIVATE_DIALOG } from 'store/user-profile/user-profile-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(deactivate(props.data.uuid));\n    }\n});\n\nexport const DeactivateDialog = compose(\n    withDialog(DEACTIVATE_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);\n"
  },
  {
    "path": "services/workbench2/src/views-components/user-dialog/setup-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { setup, SETUP_DIALOG } from 'store/user-profile/user-profile-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(setup(props.data.uuid));\n    }\n});\n\nexport const SetupDialog = compose(\n    withDialog(SETUP_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);\n"
  },
  {
    "path": "services/workbench2/src/views-components/virtual-machines-dialog/add-login-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { compose } from \"redux\";\nimport { reduxForm, InjectedFormProps, Field, GenericField } from 'redux-form';\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { FormDialog } from 'components/form-dialog/form-dialog';\nimport { VIRTUAL_MACHINE_ADD_LOGIN_DIALOG, VIRTUAL_MACHINE_ADD_LOGIN_FORM, addUpdateVirtualMachineLogin, AddLoginFormData, VIRTUAL_MACHINE_ADD_LOGIN_USER_FIELD, VIRTUAL_MACHINE_ADD_LOGIN_GROUPS_FIELD } from 'store/virtual-machines/virtual-machines-actions';\nimport { ParticipantSelect } from 'views-components/sharing-dialog/participant-select';\nimport { GroupArrayInput, GroupArrayDataProps } from 'views-components/virtual-machines-dialog/group-array-input';\n\nexport const VirtualMachineAddLoginDialog = compose(\n    withDialog(VIRTUAL_MACHINE_ADD_LOGIN_DIALOG),\n    reduxForm<AddLoginFormData>({\n        form: VIRTUAL_MACHINE_ADD_LOGIN_FORM,\n        onSubmit: (data, dispatch) => {\n            dispatch(addUpdateVirtualMachineLogin(data));\n        }\n    })\n)(\n    (props: CreateGroupDialogComponentProps) => {\n        const [hasPartialGroupInput, setPartialGroupInput] = React.useState<boolean>(false);\n\n        return <FormDialog\n            dialogTitle={props.data.updating ? \"Update login permission\" : \"Add login permission\"}\n            formFields={AddLoginFormFields}\n            submitLabel={props.data.updating ? \"Update\" : \"Add\"}\n            {...props}\n            data={{\n                ...props.data,\n                setPartialGroupInput,\n                hasPartialGroupInput,\n            }}\n            invalid={props.invalid || hasPartialGroupInput}\n        />;\n    }\n);\n\ntype CreateGroupDialogComponentProps = WithDialogProps<{updating: boolean}> & GroupArrayDataProps & InjectedFormProps<AddLoginFormData>;\n\nconst AddLoginFormFields = (props) => {\n    return <>\n        <ParticipantField\n            name={VIRTUAL_MACHINE_ADD_LOGIN_USER_FIELD}\n            component={props.data.updating ? ReadOnlyUserSelect : UserSelect}\n            excludedParticipants={props.data.excludedParticipants}\n        />\n        <GroupArrayInput\n            name={VIRTUAL_MACHINE_ADD_LOGIN_GROUPS_FIELD}\n            input={{id:\"Add groups to VM login (eg: docker, sudo)\", disabled:false}}\n            required={false}\n            setPartialGroupInput={props.data.setPartialGroupInput}\n            hasPartialGroupInput={props.data.hasPartialGroupInput}\n        />\n    </>;\n}\n\ninterface UserFieldProps {\n    excludedParticipants: string[];\n}\n\nconst ParticipantField = Field as new () => GenericField<UserFieldProps>;\n\nconst UserSelect = (props) =>\n    <ParticipantSelect\n        onlyPeople\n        onlyActive\n        label='Search for user to grant login permission'\n        items={props.input.value ? [props.input.value] : []}\n        excludedParticipants={props.excludedParticipants}\n        onSelect={props.input.onChange}\n        onDelete={() => (props.input.onChange(''))} />;\n\nconst ReadOnlyUserSelect = (props) =>\n        <ParticipantSelect\n            onlyPeople\n            onlyActive\n            label='User'\n            items={props.input.value ? [props.input.value] : []}\n            disabled={true} />;\n"
  },
  {
    "path": "services/workbench2/src/views-components/virtual-machines-dialog/attributes-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { Dialog, DialogTitle, DialogContent, DialogActions, Button, Typography, Grid } from \"@mui/material\";\nimport { WithDialogProps } from \"store/dialog/with-dialog\";\nimport { withDialog } from 'store/dialog/with-dialog';\nimport { VIRTUAL_MACHINE_ATTRIBUTES_DIALOG } from \"store/virtual-machines/virtual-machines-actions\";\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { ArvadosTheme } from 'common/custom-theme';\nimport { compose } from \"redux\";\nimport { VirtualMachinesResource } from \"models/virtual-machines\";\n\ntype CssRules = 'rightContainer' | 'leftContainer' | 'spacing';\n\nconst styles: CustomStyleRulesCallback<CssRules> = (theme: ArvadosTheme) => ({\n    rightContainer: {\n        textAlign: 'right',\n        paddingRight: theme.spacing(2),\n        color: theme.palette.grey[\"500\"]\n    },\n    leftContainer: {\n        textAlign: 'left',\n        paddingLeft: theme.spacing(2)\n    },\n    spacing: {\n        paddingTop: theme.spacing(2)\n    },\n});\n\ninterface VirtualMachineAttributesDataProps {\n    virtualMachineData: VirtualMachinesResource;\n}\n\ntype VirtualMachineAttributesProps = VirtualMachineAttributesDataProps & WithStyles<CssRules>;\n\nexport const VirtualMachineAttributesDialog = compose(\n    withDialog(VIRTUAL_MACHINE_ATTRIBUTES_DIALOG),\n    withStyles(styles))(\n        (props: WithDialogProps<VirtualMachineAttributesProps> & VirtualMachineAttributesProps) =>\n            <Dialog open={props.open}\n                onClose={props.closeDialog}\n                fullWidth\n                maxWidth=\"sm\">\n                <DialogTitle>Attributes</DialogTitle>\n                <DialogContent>\n                    <Typography variant='body1' className={props.classes.spacing}>\n                        {props.data.virtualMachineData && attributes(props.data.virtualMachineData, props.classes)}\n                    </Typography>\n                </DialogContent>\n                <DialogActions>\n                    <Button\n                        variant='text'\n                        color='primary'\n                        onClick={props.closeDialog}>\n                        Close\n                </Button>\n                </DialogActions>\n            </Dialog>\n    );\n\nconst attributes = (virtualMachine: VirtualMachinesResource, classes: any) => {\n    const { uuid, ownerUuid, createdAt, modifiedAt, modifiedByUserUuid, hostname } = virtualMachine;\n    return (\n        <span>\n            <Grid container direction=\"row\">\n                <Grid item xs={5} className={classes.rightContainer}>\n                    <Grid item>Hostname</Grid>\n                    <Grid item>Owner uuid</Grid>\n                    <Grid item>Created at</Grid>\n                    <Grid item>Modified at</Grid>\n                    <Grid item>Modified by user uuid</Grid>\n                    <Grid item>uuid</Grid>\n                </Grid>\n                <Grid item xs={7} className={classes.leftContainer}>\n                    <Grid item>{hostname}</Grid>\n                    <Grid item>{ownerUuid}</Grid>\n                    <Grid item>{createdAt}</Grid>\n                    <Grid item>{modifiedAt}</Grid>\n                    <Grid item>{modifiedByUserUuid}</Grid>\n                    <Grid item>{uuid}</Grid>\n                </Grid>\n            </Grid>\n        </span>\n    );\n};\n"
  },
  {
    "path": "services/workbench2/src/views-components/virtual-machines-dialog/group-array-input.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { StringArrayCommandInputParameter } from 'models/workflow';\nimport { Field, GenericField } from 'redux-form';\nimport { GenericInputProps } from 'views/run-process-panel/inputs/generic-input';\nimport { ChipsInput } from 'components/chips-input/chips-input';\nimport { identity } from 'lodash';\nimport { FormGroup, Input, InputLabel, FormControl, FormHelperText } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport classnames from \"classnames\";\nimport { ArvadosTheme } from 'common/custom-theme';\n\nexport interface GroupArrayDataProps {\n  hasPartialGroupInput?: boolean;\n  setPartialGroupInput?: (value: boolean) => void;\n}\n\ninterface GroupArrayFieldProps {\n  commandInput: StringArrayCommandInputParameter;\n}\n\nconst GroupArrayField = Field as new () => GenericField<GroupArrayDataProps & GroupArrayFieldProps>;\n\nexport interface GroupArrayInputProps {\n  name: string;\n  input: StringArrayCommandInputParameter;\n  required: boolean;\n}\n\ntype CssRules = 'chips' | 'partialInputHelper' | 'partialInputHelperVisible';\n\nconst styles = (theme: ArvadosTheme) => ({\n    chips: {\n        marginTop: \"16px\",\n    },\n    partialInputHelper: {\n        textAlign: 'right' as 'right',\n        visibility: 'hidden' as 'hidden',\n        color: theme.palette.error.dark,\n    },\n    partialInputHelperVisible: {\n        visibility: 'visible' as 'visible',\n    }\n});\n\nexport const GroupArrayInput = ({name, input, setPartialGroupInput, hasPartialGroupInput}: GroupArrayInputProps & GroupArrayDataProps) => {\n  return <GroupArrayField\n      name={name}\n      commandInput={input}\n      component={GroupArrayInputComponent as any}\n      setPartialGroupInput={setPartialGroupInput}\n      hasPartialGroupInput={hasPartialGroupInput}\n      />;\n}\n\nconst GroupArrayInputComponent = (props: GenericInputProps & GroupArrayDataProps) => {\n  return (\n      <FormGroup>\n            <FormControl variant=\"standard\" fullWidth error={props.meta.error}>\n              <InputLabel shrink={props.meta.active || props.input.value.length > 0}>{props.commandInput.id}</InputLabel>\n              <StyledInputComponent {...props} />\n            </FormControl>\n        </FormGroup>\n  );\n    };\n\nconst StyledInputComponent = withStyles(styles)(\n  class InputComponent extends React.PureComponent<GenericInputProps & WithStyles<CssRules> & GroupArrayDataProps>{\n      render() {\n          const { classes } = this.props;\n          const { commandInput, input, meta, hasPartialGroupInput } = this.props;\n          return <>\n            <ChipsInput\n                deletable={!commandInput.disabled}\n                orderable={!commandInput.disabled}\n                disabled={commandInput.disabled}\n                values={input.value}\n                onChange={this.handleChange}\n                handleFocus={input.onFocus}\n                createNewValue={identity}\n                inputComponent={Input}\n                chipsClassName={classes.chips}\n                pattern={/[_a-z][-0-9_a-z]*/ig}\n                onPartialInput={this.props.setPartialGroupInput}\n                inputProps={{\n                    error: meta.error || hasPartialGroupInput,\n                }} />\n                <FormHelperText className={classnames([classes.partialInputHelper, ...(hasPartialGroupInput ? [classes.partialInputHelperVisible] : [])])}>\n                  Press enter to complete group name\n                </FormHelperText>\n          </>;\n      }\n\n      handleChange = (values: {}[]) => {\n        const { input, meta } = this.props;\n          if (!meta.touched) {\n              input.onBlur(values);\n          }\n          input.onChange(values);\n      }\n\n  }\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/virtual-machines-dialog/remove-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { VIRTUAL_MACHINE_REMOVE_DIALOG, removeVirtualMachine } from 'store/virtual-machines/virtual-machines-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeVirtualMachine(props.data.uuid));\n    }\n});\n\nexport const RemoveVirtualMachineDialog = compose(\n    withDialog(VIRTUAL_MACHINE_REMOVE_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);"
  },
  {
    "path": "services/workbench2/src/views-components/virtual-machines-dialog/remove-login-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { VIRTUAL_MACHINE_REMOVE_LOGIN_DIALOG, removeVirtualMachineLogin, loadVirtualMachinesAdminData } from 'store/virtual-machines/virtual-machines-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeVirtualMachineLogin(props.data.uuid));\n        dispatch<any>(loadVirtualMachinesAdminData());\n    }\n});\n\nexport const RemoveVirtualMachineLoginDialog = compose(\n    withDialog(VIRTUAL_MACHINE_REMOVE_LOGIN_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);\n"
  },
  {
    "path": "services/workbench2/src/views-components/webdav-s3-dialog/webdav-s3-dialog.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from 'react';\nimport { ThemeProvider, StyledEngineProvider } from '@mui/material';\nimport { CustomTheme } from 'common/custom-theme';\nimport { WebDavS3InfoDialog } from './webdav-s3-dialog';\nimport { COLLECTION_WEBDAV_S3_DIALOG_NAME } from 'store/collections/collection-info-actions';\nimport { Provider } from \"react-redux\";\nimport { createStore, combineReducers } from 'redux';\n\ndescribe('WebDavS3InfoDialog', () => {\n    let props;\n    let store;\n\n    beforeEach(() => {\n        const initialDialogState = {\n            [COLLECTION_WEBDAV_S3_DIALOG_NAME]: {\n                open: true,\n                data: {\n                    uuid: \"zzzzz-4zz18-b1f8tbldjrm8885\",\n                    token: \"v2/zzzzb-jjjjj-123123/xxxtokenxxx\",\n                    downloadUrl: \"https://download.example.com\",\n                    collectionsUrl: \"https://collections.example.com\",\n                    localCluster: \"zzzzz\",\n                    username: \"bobby\",\n                    activeTab: 0,\n                    setActiveTab: (event, tabNr) => { }\n                }\n            }\n        };\n        const initialAuthState = {\n            localCluster: \"zzzzz\",\n            remoteHostsConfig: {},\n            sessions: {},\n        };\n        store = createStore(combineReducers({\n            dialog: (state = initialDialogState, action) => state,\n            auth: (state = initialAuthState, action) => state,\n        }));\n\n        props = {\n            classes: {\n                details: 'details',\n            }\n        };\n    });\n\n    it('render cyberduck tab', () => {\n        store.getState().dialog[COLLECTION_WEBDAV_S3_DIALOG_NAME].data.activeTab = 0;\n        // when\n        cy.mount(\n            <StyledEngineProvider injectFirst>\n                <ThemeProvider theme={CustomTheme}>\n                    <Provider store={store}>\n                        <WebDavS3InfoDialog {...props} />\n                    </Provider>\n                </ThemeProvider>\n            </StyledEngineProvider>\n        );\n\n        // then\n        cy.contains(\"davs://bobby@download.example.com/c=zzzzz-4zz18-b1f8tbldjrm8885\").should('exist');\n    });\n\n    it('render win/mac tab', () => {\n        store.getState().dialog[COLLECTION_WEBDAV_S3_DIALOG_NAME].data.activeTab = 1;\n        // when\n        cy.mount(\n            <StyledEngineProvider injectFirst>\n                <ThemeProvider theme={CustomTheme}>\n                    <Provider store={store}>\n                        <WebDavS3InfoDialog {...props} />\n                    </Provider>\n                </ThemeProvider>\n            </StyledEngineProvider>\n        );\n\n        // then\n        cy.contains(\"https://download.example.com/c=zzzzz-4zz18-b1f8tbldjrm8885\").should('exist');\n    });\n\n    it('render s3 tab with federated token', () => {\n        store.getState().dialog[COLLECTION_WEBDAV_S3_DIALOG_NAME].data.activeTab = 2;\n        // when\n        cy.mount(\n            <StyledEngineProvider injectFirst>\n                <ThemeProvider theme={CustomTheme}>\n                    <Provider store={store}>\n                        <WebDavS3InfoDialog {...props} />\n                    </Provider>\n                </ThemeProvider>\n            </StyledEngineProvider>\n        );\n\n        // then\n        cy.contains(\"Secret Keyv2_zzzzb-jjjjj-123123_xxxtokenxxx\").should('exist');\n    });\n\n    it('render s3 tab with local token', () => {\n        store.getState().dialog[COLLECTION_WEBDAV_S3_DIALOG_NAME].data.activeTab = 2;\n        store.getState().dialog[COLLECTION_WEBDAV_S3_DIALOG_NAME].data.token = \"v2/zzzzz-jjjjj-123123/xxxtokenxxx\";\n        // when\n        cy.mount(\n            <StyledEngineProvider injectFirst>\n                <ThemeProvider theme={CustomTheme}>\n                    <Provider store={store}>\n                        <WebDavS3InfoDialog {...props} />\n                    </Provider>\n                </ThemeProvider>\n            </StyledEngineProvider>\n        );\n\n        // then\n        cy.contains(\"Access Keyzzzzz-jjjjj-123123Secret Keyxxxtokenxxx\").should('exist');\n    });\n\n    it('render cyberduck tab with wildcard DNS', () => {\n        store.getState().dialog[COLLECTION_WEBDAV_S3_DIALOG_NAME].data.activeTab = 0;\n        store.getState().dialog[COLLECTION_WEBDAV_S3_DIALOG_NAME].data.collectionsUrl = \"https://*.collections.example.com\";\n        // when\n        cy.mount(\n            <StyledEngineProvider injectFirst>\n                <ThemeProvider theme={CustomTheme}>\n                    <Provider store={store}>\n                        <WebDavS3InfoDialog {...props} />\n                    </Provider>\n                </ThemeProvider>\n            </StyledEngineProvider>\n        );\n\n        // then\n        cy.contains(\"davs://bobby@zzzzz-4zz18-b1f8tbldjrm8885.collections.example.com\").should('exist');\n    });\n\n});\n"
  },
  {
    "path": "services/workbench2/src/views-components/webdav-s3-dialog/webdav-s3-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport React from \"react\";\nimport { CustomStyleRulesCallback } from 'common/custom-theme';\nimport { Dialog, DialogActions, Button, CardHeader, Tab, Tabs } from '@mui/material';\nimport { WithStyles } from '@mui/styles';\nimport withStyles from '@mui/styles/withStyles';\nimport { withDialog } from \"store/dialog/with-dialog\";\nimport { COLLECTION_WEBDAV_S3_DIALOG_NAME, WebDavS3InfoDialogData } from 'store/collections/collection-info-actions';\nimport { WithDialogProps } from 'store/dialog/with-dialog';\nimport { compose } from 'redux';\nimport { DetailsAttribute } from \"components/details-attribute/details-attribute\";\nimport { DownloadIcon } from \"components/icon/icon\";\nimport { DefaultCodeSnippet } from \"components/default-code-snippet/default-code-snippet\";\n\nexport type CssRules = 'details' | 'downloadButton' | 'detailsAttrValWithCode';\n\nconst styles: CustomStyleRulesCallback<CssRules> = theme => ({\n    details: {\n        marginLeft: theme.spacing(3),\n        marginRight: theme.spacing(3),\n    },\n    downloadButton: {\n        marginTop: theme.spacing(2),\n    },\n    detailsAttrValWithCode: {\n        display: \"flex\",\n        alignItems: \"center\",\n    }\n});\n\ninterface TabPanelData {\n    children: React.ReactElement<any>[];\n    value: number;\n    index: number;\n}\n\nfunction TabPanel(props: TabPanelData) {\n    const { children, value, index } = props;\n\n    return (\n        <div\n            role=\"tabpanel\"\n            hidden={value !== index}\n            id={`simple-tabpanel-${index}`}\n            aria-labelledby={`simple-tab-${index}`}\n        >\n            {value === index && children}\n        </div>\n    );\n}\n\nconst isValidIpAddress = (ipAddress: string): Boolean => {\n    if (/^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/.test(ipAddress)) {\n        return true;\n    }\n\n    return false;\n};\n\nconst mountainduckTemplate = ({\n    uuid,\n    username,\n    cyberDavStr,\n    collectionsUrl\n}: any) => {\n\n    return `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n        <!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n        <plist version=\"1.0\">\n        <dict>\n            <key>Protocol</key>\n            <string>davs</string>\n            <key>Provider</key>\n            <string>iterate GmbH</string>\n            <key>UUID</key>\n            <string>${uuid}</string>\n            <key>Hostname</key>\n            <string>${collectionsUrl.replace('https://', ``).replace('*', uuid).split(':')[0]}</string>\n            <key>Port</key>\n            <string>${(cyberDavStr.split(':')[2] || '443').split('/')[0]}</string>\n            <key>Username</key>\n            <string>${username}</string>${isValidIpAddress(collectionsUrl.replace('https://', ``).split(':')[0]) ?\n            `\n            <key>Path</key>\n            <string>/c=${uuid}</string>` : ''}\n            <key>Labels</key>\n            <array>\n            </array>\n        </dict>\n        </plist>`.split(/\\r?\\n/).join('\\n');\n};\n\nconst downloadMountainduckFileHandler = (filename: string, text: string) => {\n    const element = document.createElement('a');\n    element.setAttribute('href', 'data:text/plain;charset=utf-8,' + encodeURIComponent(text));\n    element.setAttribute('download', filename);\n\n    element.style.display = 'none';\n    document.body.appendChild(element);\n\n    element.click();\n\n    document.body.removeChild(element);\n};\n\nexport const WebDavS3InfoDialog = compose(\n    withDialog(COLLECTION_WEBDAV_S3_DIALOG_NAME),\n    withStyles(styles),\n)(\n    (props: WithDialogProps<WebDavS3InfoDialogData> & WithStyles<CssRules>) => {\n        if (!props.data.downloadUrl) { return null; }\n\n        let winDav;\n        let cyberDav;\n\n        if (props.data.collectionsUrl.indexOf(\"*\") > -1) {\n            const withuuid = props.data.collectionsUrl.replace(\"*\", props.data.uuid);\n            winDav = new URL(withuuid);\n            cyberDav = new URL(withuuid);\n        } else {\n            winDav = new URL(props.data.downloadUrl);\n            cyberDav = new URL(props.data.downloadUrl);\n            winDav.pathname = `/c=${props.data.uuid}`;\n            cyberDav.pathname = `/c=${props.data.uuid}`;\n        }\n\n        cyberDav.username = props.data.username;\n        const cyberDavStr = \"dav\" + cyberDav.toString().slice(4);\n\n        const s3endpoint = new URL(props.data.collectionsUrl.replace(/\\/\\*(--[^.]+)?\\./, \"/\"));\n\n        const sp = props.data.token.split(\"/\");\n        let tokenUuid;\n        let tokenSecret;\n        if (sp.length === 3 && sp[0] === \"v2\" && sp[1].slice(0, 5) === props.data.localCluster) {\n            tokenUuid = sp[1];\n            tokenSecret = sp[2];\n        } else {\n            tokenUuid = props.data.token.replace(/\\//g, \"_\");\n            tokenSecret = tokenUuid;\n        }\n\n        const isCollection = (props.data.uuid.indexOf(\"-4zz18-\") === 5);\n\n        let activeTab = props.data.activeTab;\n        if (!isCollection) {\n            activeTab = 2;\n        }\n\n        const wgetCommand = `wget --http-user=${props.data.username} --http-passwd=${props.data.token} --mirror --no-parent --no-host --cut-dirs=0 ${winDav.toString()}`;\n        const curlCommand = `curl -O -u ${props.data.username}:${props.data.token} ${winDav.toString()}`;\n\n        return <Dialog\n            open={props.open}\n            maxWidth=\"md\"\n            onClose={props.closeDialog}\n            style={{ alignSelf: 'stretch' }}>\n            <CardHeader\n                title={`Open with 3rd party client`} />\n            <div className={props.classes.details} >\n                <Tabs value={activeTab} onChange={props.data.setActiveTab}>\n                    {isCollection && <Tab value={0} key=\"cyberduck\" label=\"WebDAV\" />}\n                    {isCollection && <Tab value={1} key=\"windows\" label=\"Windows or MacOS\" />}\n                    <Tab value={2} key=\"s3\" label=\"S3 bucket\" />\n                    {isCollection && <Tab value={3} key=\"cli\" label=\"wget / curl\" />}\n                </Tabs>\n\n                <TabPanel index={1} value={activeTab}>\n                    <h2>Settings</h2>\n\n                    <DetailsAttribute\n                        label='Internet address'\n                        value={<a href={winDav.toString()} target=\"_blank\" rel=\"noopener noreferrer\">{winDav.toString()}</a>}\n                        copyValue={winDav.toString()} />\n\n                    <DetailsAttribute\n                        label='Username'\n                        value={props.data.username}\n                        copyValue={props.data.username} />\n\n                    <DetailsAttribute\n                        label='Password'\n                        value={props.data.token}\n                        copyValue={props.data.token} />\n\n                    <h3>Windows</h3>\n                    <ol>\n                        <li>Open File Explorer</li>\n                        <li>Click on \"This PC\", then go to Computer &rarr; Add a Network Location</li>\n                        <li>Click Next, then choose \"Add a custom network location\", then click Next</li>\n                        <li>Use the \"internet address\" and credentials listed under Settings, above</li>\n                    </ol>\n\n                    <h3>MacOS</h3>\n                    <ol>\n                        <li>Open Finder</li>\n                        <li>Click Go &rarr; Connect to server</li>\n                        <li>Use the \"internet address\" and credentials listed under Settings, above</li>\n                    </ol>\n                </TabPanel>\n\n                <TabPanel index={0} value={activeTab}>\n                    <DetailsAttribute\n                        label='Server'\n                        value={<a href={cyberDavStr} target=\"_blank\" rel=\"noopener noreferrer\">{cyberDavStr}</a>}\n                        copyValue={cyberDavStr} />\n\n                    <DetailsAttribute\n                        label='Username'\n                        value={props.data.username}\n                        copyValue={props.data.username} />\n\n                    <DetailsAttribute\n                        label='Password'\n                        value={props.data.token}\n                        copyValue={props.data.token} />\n\n                    <h3>Cyberduck/Mountain Duck</h3>\n\n                    <Button\n                        data-cy='download-button'\n                        className={props.classes.downloadButton}\n                        onClick={() => downloadMountainduckFileHandler(`${props.data.collectionName || props.data.uuid}.duck`, mountainduckTemplate({ ...props.data, cyberDavStr }))}\n                        variant='contained'\n                        color='primary'\n                        size='small'>\n                        <DownloadIcon />\n                        Download Cyber/Mountain Duck bookmark\n                    </Button>\n\n                    <h3>GNOME</h3>\n                    <ol>\n                        <li>Open Files</li>\n                        <li>Select +Other Locations</li>\n                        <li>Connect to Server &rarr; Enter server address</li>\n                    </ol>\n\n                </TabPanel>\n\n                <TabPanel index={2} value={activeTab}>\n                    <DetailsAttribute\n                        label='Endpoint'\n                        value={s3endpoint.host}\n                        copyValue={s3endpoint.host} />\n\n                    <DetailsAttribute\n                        label='Bucket'\n                        value={props.data.uuid}\n                        copyValue={props.data.uuid} />\n\n                    <DetailsAttribute\n                        label='Access Key'\n                        value={tokenUuid}\n                        copyValue={tokenUuid} />\n\n                    <DetailsAttribute\n                        label='Secret Key'\n                        value={tokenSecret}\n                        copyValue={tokenSecret} />\n\n                </TabPanel>\n\n                <TabPanel index={3} value={activeTab}>\n\n                    <DetailsAttribute\n                        label='Wget command'\n                        copyValue={wgetCommand}\n                        classValue={props.classes.detailsAttrValWithCode}>\n                        <DefaultCodeSnippet\n                            lines={[wgetCommand]} />\n                    </DetailsAttribute>\n\n                    <DetailsAttribute\n                        label='Curl command'\n                        copyValue={curlCommand}\n                        classValue={props.classes.detailsAttrValWithCode}>\n                        <DefaultCodeSnippet\n                            lines={[curlCommand]} />\n                    </DetailsAttribute>\n\n                    <p>\n                        Note: This curl command downloads single files.\n                        Append the desired filename to the end of the URL.\n                    </p>\n\n                </TabPanel>\n\n            </div>\n            <DialogActions>\n                <Button\n                    variant='text'\n                    color='primary'\n                    onClick={props.closeDialog}>\n                    Close\n                </Button>\n            </DialogActions>\n\n        </Dialog >;\n    }\n);\n"
  },
  {
    "path": "services/workbench2/src/views-components/workflow-remove-dialog/workflow-remove-dialog.tsx",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { Dispatch, compose } from 'redux';\nimport { connect } from \"react-redux\";\nimport { ConfirmationDialog } from \"components/confirmation-dialog/confirmation-dialog\";\nimport { withDialog, WithDialogProps } from \"store/dialog/with-dialog\";\nimport { removeWorkflowPermanently, REMOVE_WORKFLOW_DIALOG } from 'store/workflow-panel/workflow-panel-actions';\n\nconst mapDispatchToProps = (dispatch: Dispatch, props: WithDialogProps<any>) => ({\n    onConfirm: () => {\n        props.closeDialog();\n        dispatch<any>(removeWorkflowPermanently(props.data.uuid));\n    }\n});\n\nexport const RemoveWorkflowDialog = compose(\n    withDialog(REMOVE_WORKFLOW_DIALOG),\n    connect(null, mapDispatchToProps)\n)(ConfirmationDialog);\n"
  },
  {
    "path": "services/workbench2/src/websocket/resource-event-message.ts",
    "content": "import { LogEventType } from '../models/log';\n// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nexport interface ResourceEventMessage<Properties = {}> {\n    eventAt: string;\n    eventType: LogEventType;\n    id: string;\n    msgID: string;\n    objectKind: string;\n    objectOwnerUuid: string;\n    objectUuid: string;\n    properties: Properties;\n    uuid: string;\n}\n"
  },
  {
    "path": "services/workbench2/src/websocket/websocket-service.cy.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport Axios from \"axios\";\nimport { mockConfig } from \"common/config\";\nimport { createBrowserHistory } from \"history\";\nimport { configureStore } from \"store/store\";\nimport { createServices } from \"services/services\";\nimport { initWebSocket } from \"./websocket\"\nimport { ResourceKind } from \"models/resource\";\nimport { WebSocketService } from \"./websocket-service\";\n\ndescribe('WebSocketService', () => {\n\n    let mockAuthService;\n    let webSocketStub;\n\n    beforeEach(() => {\n        webSocketStub = (url) => {\n            // Not testing the open delay so we start it as OPEN instead of CONNECTING\n            let readyState = WebSocket.OPEN;\n            const eventListeners = {};\n\n            const fakeWebSocket = {\n                url,\n                readyState,\n                send: cy.stub().as('send'),\n                // Receive method for testing, triggers message event listeners\n                receive: (data) => {\n                    eventListeners['message'].forEach(callback => callback({\n                        data: JSON.stringify(data)\n                    }));\n                },\n                close: cy.stub().callsFake(() => {\n                    readyState = WebSocket.CLOSED;\n                }),\n                addEventListener: (event, callback) => {\n                    if (!eventListeners[event]) {\n                        eventListeners[event] = [];\n                    }\n                    eventListeners[event].push(callback);\n                },\n            };\n\n            // Use settimeout to allow open callback to be set after WS is\n            // constructed so that the callback is set when fired\n            // Setting OPEN would be correct here but we aren't testing the\n            // connection delay\n            setTimeout(() => {\n                if (eventListeners['open']) {\n                    eventListeners['open'].forEach(callback => callback());\n                }\n            }, 0);\n\n            return fakeWebSocket;\n        }\n\n        // Stub the global WebSocket\n        cy.stub(window, 'WebSocket', url => webSocketStub(url));\n\n        // Mock auth service\n        mockAuthService = {\n            getApiToken: cy.stub().returns('mock-token'),\n        };\n    });\n\n    afterEach(() => {\n        // Clear out singleton instance in between tests\n        WebSocketService['instance'] = undefined;\n    });\n\n    it('should operate as a singleton and allow externally checking connection status', () => {\n        const webSocketService = WebSocketService.getInstance();\n        // Verify isActive is false\n        expect(webSocketService.isActive()).to.be.false;\n\n        // Connect the WebSocket\n        webSocketService.connect('wss://mockurl', mockAuthService);\n\n        // Check that connection is established\n        expect(webSocketService.isActive()).to.be.true;\n\n        // Verify singleton behavior\n        const anotherInstance = WebSocketService.getInstance();\n        expect(anotherInstance).to.equal(webSocketService); // Should be the same instance\n        expect(anotherInstance.isActive()).to.be.true; // Should also reflect the active connection\n    });\n\n    it('should fire open callback after connecting', () => {\n        const webSocketService = WebSocketService.getInstance();\n        // Verify isActive is false\n        expect(webSocketService.isActive()).to.be.false;\n\n        // Connect the WebSocket\n        webSocketService.connect('wss://mockurl', mockAuthService);\n\n        // Check that connection is established\n        expect(webSocketService.isActive()).to.be.true;\n\n        // Check that the service sent a subscribe request after open\n        cy.get('@send').should('have.been.calledWith', '{\"method\":\"subscribe\"}');\n    });\n\n    it('throttles calls to DE', () => {\n        // For real store and services\n        const storeConfig = {};\n        const actions = {\n            progressFn: (id, working) => { },\n            errorFn: (id, message) => { }\n        };\n\n        // Create real store\n        let axiosInst = Axios.create({ headers: {} });\n        let services = createServices(mockConfig({}), actions, axiosInst);\n        let store = configureStore(createBrowserHistory(), services, storeConfig);\n        cy.stub(store, 'getState').callsFake(() => {\n            return {\n                // Rest of store is not really needed\n                // also calling store.getState here infinite loops\n                router: {\n                    location: {\n                        // Fake all processes page so that container WS updates trigger DE refresh\n                        pathname: '/all_processes',\n                    },\n                },\n            };\n        });\n\n        const wsConfig = { websocketUrl: \"wss://mockurl\" };\n        const fakeDispatch = cy.stub(store, 'dispatch');\n\n        initWebSocket(wsConfig, mockAuthService, store);\n        const webSocketService = WebSocketService.getInstance();\n\n        // Verify isActive is true\n        expect(webSocketService.isActive()).to.be.true;\n        // Expect no calls so far\n        expect(fakeDispatch.callCount).to.equal(0);\n\n        // Send 5 WS messages\n        for (let i = 0; i < 5; i++) {\n            webSocketService.internal_getWsInstance().receive({\n                event_type: \"update\",\n                objectKind: ResourceKind.CONTAINER,\n            });\n        }\n\n        // Expect only 1 dispatch call to refresh the DE\n        expect(fakeDispatch.callCount).to.equal(1);\n    });\n});\n"
  },
  {
    "path": "services/workbench2/src/websocket/websocket-service.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { AuthService } from 'services/auth-service/auth-service';\nimport { ResourceEventMessage } from './resource-event-message';\nimport { camelCase } from 'lodash';\nimport { CommonResourceService } from \"services/common-service/common-resource-service\";\n\ntype MessageListener = (message: ResourceEventMessage) => void;\n\nexport class WebSocketService {\n    private static instance: WebSocketService;\n\n    private ws: WebSocket;\n    private messageListener: MessageListener;\n    private url: string;\n    private authService: AuthService\n\n    /**\n     * Empty constructor so that consumers checking for WS initialization need\n     * not pass in configuration\n     */\n    private constructor() {}\n\n    /**\n     * Gets the singleton WebSocketService instance\n     * @returns The singleton WebSocketService\n     */\n    public static getInstance() {\n        if (this.instance) {\n            return this.instance;\n        }\n        this.instance = new WebSocketService();\n        return this.instance;\n    }\n\n    /**\n     * Testing method to access the WS instance\n     * @returns The internal WebSocket instance. Used for testing purposes.\n     */\n    public internal_getWsInstance() {\n        return this.ws;\n    }\n\n    /**\n     * Sets connection params, starts WS connection, and attaches handlers\n     * @param url WS url\n     * @param authService Auth service containing API token\n     */\n    public connect(url: string, authService: AuthService) {\n        if (this.ws) {\n            this.ws.close();\n        }\n        this.url = url;\n        this.authService = authService;\n        this.ws = new WebSocket(this.getUrl());\n        this.ws.addEventListener('message', this.handleMessage);\n        this.ws.addEventListener('open', this.handleOpen);\n    }\n\n    public setMessageListener = (listener: MessageListener) => {\n        this.messageListener = listener;\n    }\n\n    /**\n     * Returns true if the WS is in any active state, including \"CLOSING\"\n     * Useful to prevent re-initialization before WS is closed\n     * Only returns false if the WS is not initialized or fully closed\n     * @returns whether the WebSocket is initialized or in transition state\n     */\n    isInitialized = (): boolean => {\n        return !!this.ws && this.ws.readyState !== WebSocket.CLOSED;\n    }\n\n    /**\n     * Returns true only if the WebSocket connection is active\n     * Returns false in any other state, including connecting and closing\n     * @returns whether the WebSocket is active\n     */\n    isActive = (): boolean => {\n        return !!this.ws && this.ws.readyState === WebSocket.OPEN;\n    }\n\n    private getUrl() {\n        return `${this.url}?api_token=${this.authService.getApiToken()}`;\n    }\n\n    private handleMessage = (event: MessageEvent) => {\n        if (this.messageListener) {\n            const data = JSON.parse(event.data);\n            const message = CommonResourceService.mapKeys(camelCase)(data);\n            this.messageListener(message);\n        }\n    }\n\n    private handleOpen = () => {\n        this.ws.send('{\"method\":\"subscribe\"}');\n    }\n}\n"
  },
  {
    "path": "services/workbench2/src/websocket/websocket.ts",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nimport { RootStore } from \"store/store\";\nimport { AuthService } from \"services/auth-service/auth-service\";\nimport { Config } from \"common/config\";\nimport { WebSocketService } from \"./websocket-service\";\nimport { ResourceEventMessage } from \"./resource-event-message\";\nimport { ResourceKind } from \"models/resource\";\nimport { loadProcess } from \"store/process-panel/process-panel-actions\";\nimport { getProcess, getSubprocesses } from \"store/processes/process\";\nimport { LogEventType } from \"models/log\";\nimport { subprocessPanelActions } from \"store/subprocess-panel/subprocess-panel-actions\";\nimport { projectPanelDataActions } from \"store/project-panel/project-panel-action-bind\";\nimport { getProjectPanelCurrentUuid } from \"store/project-panel/project-panel\";\nimport { allProcessesPanelActions } from \"store/all-processes-panel/all-processes-panel-action\";\nimport { loadCollection } from \"store/workbench/workbench-actions\";\nimport { matchAllProcessesRoute, matchProjectRoute, matchProcessRoute } from \"routes/routes\";\nimport { throttle } from \"lodash\";\n\ntype ThrottleSet = {\n    subProcess: () => void;\n    allProcesses: () => void;\n    project: () => void;\n    process: (uuid: string) => void;\n    collection: (uuid: string) => void;\n};\n\nconst THROTTLE_INTERVAL = 15000;\n\nexport const initWebSocket = (config: Config, authService: AuthService, store: RootStore) => {\n    if (config.websocketUrl) {\n        const webSocketService = WebSocketService.getInstance();\n\n        // Throttles must be constructed once\n        const throttleSet: ThrottleSet = {\n            subProcess: throttle(() => {\n                store.dispatch(subprocessPanelActions.REQUEST_ITEMS(false, true));\n            }, THROTTLE_INTERVAL),\n            allProcesses: throttle(() => {\n                store.dispatch(allProcessesPanelActions.REQUEST_ITEMS(false, true));\n            }, THROTTLE_INTERVAL),\n            project: throttle(() => {\n                store.dispatch(projectPanelDataActions.REQUEST_ITEMS(false, true));\n            }, THROTTLE_INTERVAL),\n            process: throttle((uuid: string) => {\n                store.dispatch(loadProcess(uuid));\n            }, THROTTLE_INTERVAL),\n            collection: throttle((uuid: string) => {\n                store.dispatch(loadCollection(uuid));\n            }, THROTTLE_INTERVAL),\n        };\n\n        webSocketService.setMessageListener(messageListener(store, throttleSet));\n        webSocketService.connect(config.websocketUrl, authService);\n    } else {\n        console.warn(\"WARNING: Websocket ExternalURL is not set on the cluster config.\");\n    }\n};\n\nconst messageListener = (store: RootStore, throttles: ThrottleSet) => (message: ResourceEventMessage) => {\n    if (message.eventType === LogEventType.CREATE || message.eventType === LogEventType.UPDATE) {\n        const state = store.getState();\n        const location = state.router.location ? state.router.location.pathname : \"\";\n\n        switch (message.objectKind) {\n            case ResourceKind.COLLECTION:\n                const currentCollection = state.collectionPanel.item;\n                if (currentCollection && currentCollection.uuid === message.objectUuid) {\n                    throttles.collection(message.objectUuid);\n                }\n                return;\n            case ResourceKind.CONTAINER_REQUEST:\n                if (matchProcessRoute(location)) {\n                    // Currently viewing updated process\n                    if (state.processPanel.containerRequestUuid === message.objectUuid) {\n                        throttles.process(message.objectUuid);\n                    }\n                    // New child process\n                    const proc = getProcess(state.processPanel.containerRequestUuid)(state.resources);\n                    if (proc && proc.container && proc.container.uuid === message.properties[\"new_attributes\"][\"requesting_container_uuid\"]) {\n                        throttles.subProcess();\n                        return;\n                    }\n                }\n            // fall through, this will happen for container requests as well.\n            case ResourceKind.CONTAINER:\n                if (matchProcessRoute(location)) {\n                    // refresh only if this is a subprocess of the currently displayed process.\n                    const subproc = getSubprocesses(state.processPanel.containerRequestUuid)(state.resources);\n                    for (const sb of subproc) {\n                        if (sb.containerRequest.uuid === message.objectUuid || (sb.container && sb.container.uuid === message.objectUuid)) {\n                            throttles.subProcess();\n                            break;\n                        }\n                    }\n                }\n                if (matchAllProcessesRoute(location)) {\n                    throttles.allProcesses();\n                }\n                if (matchProjectRoute(location) && message.objectOwnerUuid === getProjectPanelCurrentUuid(state)) {\n                    throttles.project();\n                }\n                return;\n            default:\n                return;\n        }\n    }\n};\n"
  },
  {
    "path": "services/workbench2/tools/arvados_config.yml",
    "content": "Clusters:\n  zzzzz:\n    ManagementToken: e687950a23c3a9bceec28c6223a06c79\n    SystemRootToken: systemusertesttoken1234567890aoeuidhtnsqjkxbmwvzpy\n    API:\n      RequestTimeout: 30s\n      VocabularyPath: \"\"\n      MaxTokenLifetime: 24h\n    TLS:\n      Insecure: true\n    Collections:\n      CollectionVersioning: true\n      PreserveVersionIfIdle: -1s\n      BlobSigningKey: zfhgfenhffzltr9dixws36j1yhksjoll2grmku38mi7yxd66h5j4q9w4jzanezacp8s6q0ro3hxakfye02152hncy6zml2ed0uc\n      TrustAllContent: true\n      ForwardSlashNameSubstitution: /\n      ManagedProperties:\n        original_owner_uuid: {Function: original_owner, Protected: true}\n    Login:\n      TrustPrivateNetworks: true\n      Test:\n        Enable: true\n        Users:\n          randomuser1234:\n            Email: randomuser1234@example.invalid\n            Password: topsecret\n    StorageClasses:\n      default:\n        Default: true\n      foo: {}\n      bar: {}\n    Volumes:\n      zzzzz-nyw5e-000000000000000:\n        StorageClasses:\n          default: true\n          foo: true\n      zzzzz-nyw5e-000000000000001:\n        StorageClasses:\n          default: true\n          bar: true\n"
  },
  {
    "path": "services/workbench2/tools/example-vocabulary.json",
    "content": "{\n    \"strict_tags\": false,\n    \"tags\": {\n        \"IDTAGFRUITS\": {\n            \"strict\": false,\n            \"labels\": [\n                {\"label\": \"Fruit\"}\n            ],\n            \"values\": {\n                \"IDVALFRUITS1\": {\n                    \"labels\": [\n                        {\"label\": \"Pineapple\"}\n                    ]\n                },\n                \"IDVALFRUITS2\": {\n                    \"labels\": [\n                        {\"label\": \"Tomato\"}\n                    ]\n                },\n                \"IDVALFRUITS3\": {\n                    \"labels\": [\n                        {\"label\": \"Orange\"}\n                    ]\n                },\n                \"IDVALFRUITS4\": {\n                    \"labels\": [\n                        {\"label\": \"Banana\"}\n                    ]\n                },\n                \"IDVALFRUITS5\": {\n                    \"labels\": [\n                        {\"label\": \"Advocado\"}\n                    ]\n                },\n                \"IDVALFRUITS6\": {\n                    \"labels\": [\n                        {\"label\": \"Lemon\"}\n                    ]\n                },\n                \"IDVALFRUITS7\": {\n                    \"labels\": [\n                        {\"label\": \"Apple\"}\n                    ]\n                },\n                \"IDVALFRUITS8\": {\n                    \"labels\": [\n                        {\"label\": \"Peach\"}\n                    ]\n                },\n                \"IDVALFRUITS9\": {\n                    \"labels\": [\n                        {\"label\": \"Strawberry\"}\n                    ]\n                }\n            }\n        },\n        \"IDTAGANIMALS\": {\n            \"strict\": false,\n            \"labels\": [\n                {\"label\": \"Animal\" },\n                {\"label\": \"Creature\"}\n            ],\n            \"values\": {\n                \"IDVALANIMALS1\": {\n                    \"labels\": [\n                        {\"label\": \"Human\"},\n                        {\"label\": \"Homo sapiens\"}\n                    ]\n                },\n                \"IDVALANIMALS2\": {\n                    \"labels\": [\n                        {\"label\": \"Dog\"},\n                        {\"label\": \"Canis lupus familiaris\"}\n                    ]\n                },\n                \"IDVALANIMALS3\": {\n                    \"labels\": [\n                        {\"label\": \"Elephant\"},\n                        {\"label\": \"Loxodonta\"}\n                    ]\n                },\n                \"IDVALANIMALS4\": {\n                    \"labels\": [\n                        {\"label\": \"Eagle\"},\n                        {\"label\": \"Haliaeetus leucocephalus\"}\n                    ]\n                }\n            }\n        },\n        \"IDTAGCOLORS\": {\n            \"strict\": false,\n            \"labels\": [\n                {\"label\": \"Color\"}\n            ],\n            \"values\": {\n                \"IDVALCOLORS1\": {\n                    \"labels\": [\n                        {\"label\": \"Yellow\"}\n                    ]\n                },\n                \"IDVALCOLORS2\": {\n                    \"labels\": [\n                        {\"label\": \"Red\"}\n                    ]\n                },\n                \"IDVALCOLORS3\": {\n                    \"labels\": [\n                        {\"label\": \"Magenta\"}\n                    ]\n                },\n                \"IDVALCOLORS4\": {\n                    \"labels\": [\n                        {\"label\": \"Green\"}\n                    ]\n                }\n            }\n        },\n        \"IDTAGCOMMENT\": {\n            \"labels\": [\n                {\"label\": \"Comment\"},\n                {\"label\": \"Text\"}\n            ]\n        },\n        \"IDTAGCATEGORIES\": {\n            \"strict\": true,\n            \"labels\": [\n                {\"label\": \"Category\"}\n            ],\n            \"values\": {\n                \"IDTAGCAT1\": {\n                    \"labels\": [\n                        {\"label\": \"Experimental\"}\n                    ]\n                },\n                \"IDTAGCAT2\": {\n                    \"labels\": [\n                        {\"label\": \"Development\"}\n                    ]\n                },\n                \"IDTAGCAT3\": {\n                    \"labels\": [\n                        {\"label\": \"Production\"}\n                    ]\n                }\n            }\n        },\n        \"IDTAGIMPORTANCES\": {\n            \"strict\": true,\n            \"labels\": [\n                {\"label\": \"Importance\"},\n                {\"label\": \"Priority\"}\n            ],\n            \"values\": {\n                \"IDVALIMPORTANCES1\": {\n                    \"labels\": [\n                        {\"label\": \"Critical\"},\n                        {\"label\": \"Urgent\"},\n                        {\"label\": \"High\"}\n                    ]\n                },\n                \"IDVALIMPORTANCES2\": {\n                    \"labels\": [\n                        {\"label\": \"Normal\"},\n                        {\"label\": \"Moderate\"}\n                    ]\n                },\n                \"IDVALIMPORTANCES3\": {\n                    \"labels\": [\n                        {\"label\": \"Low\"}\n                    ]\n                }\n            }\n        },\n        \"IDTAGSIZES\": {\n            \"strict\": true,\n            \"labels\": [\n                {\"label\": \"Size\"}\n            ],\n            \"values\": {\n                \"IDVALSIZES1\": {\n                    \"labels\": [\n                        {\"label\": \"XS\"},\n                        {\"label\": \"x-small\"}\n                    ]\n                },\n                \"IDVALSIZES2\": {\n                    \"labels\": [\n                        {\"label\": \"S\"},\n                        {\"label\": \"small\"}\n                    ]\n                },\n                \"IDVALSIZES3\": {\n                    \"labels\": [\n                        {\"label\": \"M\"},\n                        {\"label\": \"medium\"}\n                    ]\n                },\n                \"IDVALSIZES4\": {\n                    \"labels\": [\n                        {\"label\": \"L\"},\n                        {\"label\": \"large\"}\n                    ]\n                },\n                \"IDVALSIZES5\": {\n                    \"labels\": [\n                        {\"label\": \"XL\"},\n                        {\"label\": \"x-large\"}\n                    ]\n                }\n            }\n        }\n    }\n}"
  },
  {
    "path": "services/workbench2/tools/run-integration-tests.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e -o pipefail\n\ncleanup() {\n    set -x\n    set +e +o pipefail\n    kill ${arvboot_PID} ${consume_stdout_PID} ${wb2_PID} ${consume_wb2_stdout_PID}\n    wait ${arvboot_PID} ${consume_stdout_PID} ${wb2_PID} ${consume_wb2_stdout_PID} || true\n    echo >&2 \"done\"\n}\n\nrandom_free_port() {\n    while port=$(shuf -n1 -i $(cat /proc/sys/net/ipv4/ip_local_port_range | tr '\\011' '-'))\n    netstat -atun | grep -q \":$port\\s\" ; do\n        continue\n    done\n    echo $port\n}\n\nusage() {\n    cat <<EOF\nUsage: $0 [options]\nOptions:\n  -i            Run Cypress in interactive mode.\nEnvironment:\n  ARVADOS_DIRECTORY  Path to an Arvados Git checkout\n  WORKSPACE          Path to the Workbench source in that checkout\nEOF\n    exit 0\n}\n\nCYPRESS_MODE=\"run\"\nwhile getopts \"i\" o; do\n    case \"${o}\" in\n        i)\n            # Interactive mode\n            CYPRESS_MODE=\"open --e2e\"\n            ;;\n        *)\n            echo \"Invalid Option: -$OPTARG\" 1>&2\n            usage\n            ;;\n    esac\ndone\nshift $((OPTIND-1))\n\necho \"ARVADOS_DIRECTORY is ${ARVADOS_DIRECTORY}\"\ncd \"${WORKSPACE:=$ARVADOS_DIRECTORY/services/workbench2}\"\n\nif [ ! -f src/index.tsx ]; then\n    echo \"ERROR: '${WORKSPACE}' isn't workbench2's directory\" >&2\n    usage\nfi\n\necho \"Launching arvados in test mode...\"\nTESTTMP=\"$ARVADOS_DIRECTORY/tmp/workbench2-integration\"\nmkdir -p \"$TESTTMP\"\nARVADOS_LOG=\"${TESTTMP}/arvados-workbench2-tests.log\"\nTEST_CONFIG=\"$TESTTMP/arvados_config.yml\"\nyq -y \".Clusters.zzzzz.API.VocabularyPath = \\\"$WORKSPACE/tools/example-vocabulary.json\\\"\" \\\n   <\"$WORKSPACE/tools/arvados_config.yml\" >\"$TEST_CONFIG\"\ncoproc arvboot (\"$(go env GOPATH)/bin/arvados-server\" boot \\\n    -type test \\\n    -source \"$ARVADOS_DIRECTORY\" \\\n    -config \"$TEST_CONFIG\" \\\n    -no-workbench1 \\\n    -no-workbench2 \\\n    -own-temporary-database \\\n    -timeout 20m 2>\"$ARVADOS_LOG\")\ntrap cleanup ERR EXIT\n\nread controllerURL _ <&\"${arvboot[0]}\"\necho \"Arvados up and running at ${controllerURL}\"\n\n# Copy coproc's stdout to stderr, to ensure `arvados-server boot`\n# doesn't get blocked trying to write stdout.\nexec 7<&\"${arvboot[0]}\"; coproc consume_stdout (cat <&7 >&2)\n\necho \"Launching workbench2...\"\nexport NODE_TLS_REJECT_UNAUTHORIZED=0  # Allow self-signed certs on 'wait-on'\nWB2_PORT=`random_free_port`\ncoproc wb2 (PORT=${WB2_PORT} \\\n    REACT_APP_ARVADOS_API_HOST=\"$(echo \"$controllerURL\" | cut -d/ -f3)\" \\\n    yarn start)\nexec 8<&\"${wb2[0]}\"; coproc consume_wb2_stdout (cat <&8 >&2)\n\n# Wait for workbench2 to be up.\n# Using https-get to avoid false positive 'ready' detection.\nyarn run wait-on --timeout 300000 https-get://127.0.0.1:${WB2_PORT}\n\necho \"Running tests...\"\nCYPRESS_system_token=\"$(yq -r .Clusters.zzzzz.SystemRootToken \"$TEST_CONFIG\")\" \\\n    CYPRESS_controller_url=${controllerURL} \\\n    CYPRESS_BASE_URL=https://127.0.0.1:${WB2_PORT} \\\n    yarn run cypress ${CYPRESS_MODE} \"$@\"\n"
  },
  {
    "path": "services/workbench2/tools/setup-docker-volume.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n# This script runs inside an arvados/workbench Docker container to\n# initialize the home volume for test runs.\n\nset -euo pipefail\nDEV_USER=\"$(id --user --name 1000)\"\nDEV_HOME=\"/mnt/$DEV_USER\"\n\nrun_setup() {\n    local rundir=\"$1\"; shift\n    sudo -u \"$DEV_USER\" env -C \"$rundir\" HOME=\"$DEV_HOME\" \"$@\"\n}\n\ncd \"/home/$DEV_USER\"\n# The virtualenv needs to use the /home path, and will write everything there\n# regardless of other settings, so we must set it up fully before copying the\n# home directory.\nrun_setup . python3 -m venv VENV3DIR\nwhile read key val; do\n    run_setup VENV3DIR bin/pip config --site set \"$key\" \"$val\"\ndone <<EOF\nglobal.disable-pip-version-check true\nglobal.no-cache-dir true\nglobal.no-input true\nglobal.no-python-version-warning true\ninstall.progress-bar off\nEOF\ngrep --no-filename -E '^yq[^-_[:alnum:]]' \"$ARVADOS_DIRECTORY\"/build/requirements.*.txt |\n    run_setup VENV3DIR xargs -d\\\\n bin/pip install\ncp --archive . \"$DEV_HOME\"\n\n# Now install everything else directly to the volume.\ncd \"$ARVADOS_DIRECTORY\"\nrun_setup cmd/arvados-server go install\nrun_setup services/workbench2 yarn run cypress install\nrun_setup services/workbench2 yarn run cypress verify\n"
  },
  {
    "path": "services/workbench2/tsconfig.json",
    "content": "{\n  \"compilerOptions\": {\n    \"outDir\": \"build/dist\",\n    \"module\": \"esnext\",\n    \"target\": \"es5\",\n    \"lib\": [\n      \"es6\",\n      \"es2020\",\n      \"dom\"\n    ],\n    \"sourceMap\": true,\n    \"allowJs\": true,\n    \"jsx\": \"react-jsx\",\n    \"moduleResolution\": \"node\",\n    \"rootDir\": \"src\",\n    \"baseUrl\": \"src\",\n    \"forceConsistentCasingInFileNames\": true,\n    \"noImplicitReturns\": true,\n    \"noImplicitThis\": true,\n    \"noImplicitAny\": false,\n    \"strictNullChecks\": true,\n    \"suppressImplicitAnyIndexErrors\": true,\n    \"noUnusedLocals\": false,\n    \"experimentalDecorators\": true,\n    \"emitDecoratorMetadata\": true,\n    \"skipLibCheck\": true,\n    \"esModuleInterop\": true,\n    \"allowSyntheticDefaultImports\": true,\n    \"strict\": false,\n    \"resolveJsonModule\": true,\n    \"isolatedModules\": true,\n    \"incremental\": true,\n    \"noEmit\": true,\n    \"alwaysStrict\": false,\n    \"strictFunctionTypes\": false,\n    \"strictPropertyInitialization\": false,\n    \"noFallthroughCasesInSwitch\": false\n  },\n  \"exclude\": [\n    \"node_modules\",\n    \"build\",\n    \"scripts\",\n    \"acceptance-tests\",\n    \"webpack\",\n    \"jest\",\n    \"cypress\",\n    \"src/setupTests.ts\",\n    \"**/*.test.tsx\"\n  ],\n  \"include\": [\n    \"src\"\n  ]\n}\n"
  },
  {
    "path": "services/workbench2/tsconfig.prod.json",
    "content": "{\n  \"extends\": \"./tsconfig.json\"\n}"
  },
  {
    "path": "services/workbench2/tsconfig.test.json",
    "content": "{\n  \"extends\": \"./tsconfig.json\",\n  \"compilerOptions\": {\n    \"module\": \"commonjs\"\n  }\n}\n"
  },
  {
    "path": "services/workbench2/tslint.json",
    "content": "{\n  \"extends\": [\"tslint:recommended\", \"tslint-react\", \"tslint-config-prettier\", \"tslint-etc\"],\n  \"rules\": {\n    \"ordered-imports\": false,\n    \"member-ordering\": false,\n    \"object-literal-sort-keys\": false,\n    \"interface-name\": false,\n    \"no-empty-interface\": false,\n    \"member-access\": false,\n    \"jsx-boolean-value\": false,\n    \"jsx-no-lambda\": false,\n    \"no-debugger\": false,\n    \"no-console\": false,\n    \"no-shadowed-variable\": false,\n    \"semicolon\": true,\n    \"array-type\": false,\n    \"interface-over-type-literal\": false,\n    \"no-empty\": false,\n    \"no-bitwise\": false,\n    \"ban-types\": false,\n    \"no-unused-declaration\": true\n  },\n  \"linterOptions\": {\n    \"exclude\": [\n      \"config/**/*.js\",\n      \"node_modules/**/*.ts\",\n      \"src/lib/**\",\n      \"src/**/*.test.ts\",\n      \"coverage/lcov-report/*.js\",\n      \"src/common/custom-theme.ts\"\n    ]\n  }\n}\n"
  },
  {
    "path": "services/workbench2/typings/global.d.ts",
    "content": "declare interface Window {\n  __REDUX_DEVTOOLS_EXTENSION__: any;\n  __REDUX_DEVTOOLS_EXTENSION_COMPOSE__: any;\n}\n\ndeclare interface NodeModule {\n  hot?: { accept: (path: string, callback: () => void) => void };\n}\n\ndeclare interface System {\n  import<T = any>(module: string): Promise<T>\n}\ndeclare var System: System;\n\ndeclare module 'react-splitter-layout';\ndeclare module 'react-rte';\n\ndeclare module 'is-image' {\n  export default function isImage(value: string): boolean;\n}"
  },
  {
    "path": "services/workbench2/typings/images.d.ts",
    "content": "declare module '*.svg'\ndeclare module '*.png'\ndeclare module '*.jpg'\n"
  },
  {
    "path": "services/workbench2/version-at-commit.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e -o pipefail\ncommit=\"$1\"\nversionglob=\"[0-9].[0-9]*.[0-9]*\"\ndevsuffix=\"~dev\"\n\n# automatically assign version\n#\n# handles the following cases:\n#\n# 1. commit is directly tagged.  print that.\n#\n# 2. commit is on main or a development branch, the nearest tag is older\n#    than commit where this branch joins main.\n#    -> take greatest version tag in repo X.Y.Z and assign X.(Y+1).0\n#\n# 3. commit is on a release branch, the nearest tag is newer\n#    than the commit where this branch joins main.\n#    -> take nearest tag X.Y.Z and assign X.Y.(Z+1)\n\ntagged=$(git tag --points-at \"$commit\")\n\nif [[ -n \"$tagged\" ]] ; then\n    echo $tagged\nelse\n    # 1. get the nearest tag with 'git describe'\n    # 2. get the merge base between this commit and main\n    # 3. if the tag is an ancestor of the merge base,\n    #    (tag is older than merge base) increment minor version\n    #    else, tag is newer than merge base, so increment point version\n\n    nearest_tag=$(git describe --tags --abbrev=0 --match \"$versionglob\" \"$commit\")\n    merge_base=$(git merge-base origin/main \"$commit\")\n\n    if git merge-base --is-ancestor \"$nearest_tag\" \"$merge_base\" ; then\n        # x.(y+1).0~devTIMESTAMP, where x.y.z is the newest version that does not contain $commit\n\t# grep reads the list of tags (-f) that contain $commit and filters them out (-v)\n\t# this prevents a newer tag from retroactively changing the versions of everything before it\n        v=$(git tag | grep -vFf <(git tag --contains \"$commit\") | sort -Vr | head -n1 | perl -pe 's/(\\d+)\\.(\\d+)\\.\\d+.*/\"$1.\".($2+1).\".0\"/e')\n    else\n        # x.y.(z+1)~devTIMESTAMP, where x.y.z is the latest released ancestor of $commit\n        v=$(echo $nearest_tag | perl -pe 's/(\\d+)$/$1+1/e')\n    fi\n    isodate=$(TZ=UTC git log -n1 --format=%cd --date=iso \"$commit\")\n    ts=$(TZ=UTC date --date=\"$isodate\" \"+%Y%m%d%H%M%S\")\n    echo \"${v}${devsuffix}${ts}\"\nfi\n"
  },
  {
    "path": "services/ws/doc.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\n// Package ws exposes Arvados APIs (currently just one, the\n// cache-invalidation event feed at \"ws://.../websocket\") to\n// websocket clients.\n//\n// # Installation and configuration\n//\n// See https://doc.arvados.org/install/install-ws.html.\n//\n// # Developer info\n//\n// See https://dev.arvados.org/projects/arvados/wiki/Hacking_websocket_server.\npackage ws\n"
  },
  {
    "path": "services/ws/event.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"database/sql\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype eventSink interface {\n\tChannel() <-chan *event\n\tStop()\n}\n\ntype eventSource interface {\n\tNewSink() eventSink\n\tDB() *sql.DB\n\tDBHealth() error\n}\n\ntype event struct {\n\tLogID    int64\n\tReceived time.Time\n\tReady    time.Time\n\tSerial   uint64\n\tDB       *sql.DB\n\tLogger   logrus.FieldLogger\n\n\tlogRow *arvados.Log\n\terr    error\n\tmtx    sync.Mutex\n}\n\n// Detail returns the database row corresponding to the event. It can\n// be called safely from multiple goroutines. Only one attempt will be\n// made. If the database row cannot be retrieved, Detail returns nil.\nfunc (e *event) Detail() *arvados.Log {\n\te.mtx.Lock()\n\tdefer e.mtx.Unlock()\n\tif e.logRow != nil || e.err != nil {\n\t\treturn e.logRow\n\t}\n\tvar logRow arvados.Log\n\tvar propYAML []byte\n\te.err = e.DB.QueryRow(`SELECT id, uuid, object_uuid, COALESCE(object_owner_uuid,''), COALESCE(event_type,''), event_at, created_at, properties FROM logs WHERE id = $1`, e.LogID).Scan(\n\t\t&logRow.ID,\n\t\t&logRow.UUID,\n\t\t&logRow.ObjectUUID,\n\t\t&logRow.ObjectOwnerUUID,\n\t\t&logRow.EventType,\n\t\t&logRow.EventAt,\n\t\t&logRow.CreatedAt,\n\t\t&propYAML)\n\tif e.err != nil {\n\t\te.Logger.WithField(\"LogID\", e.LogID).WithError(e.err).Error(\"QueryRow failed\")\n\t\treturn nil\n\t}\n\tif len(propYAML) == 0 {\n\t\tlogRow.Properties = map[string]interface{}{}\n\t} else {\n\t\te.err = yaml.Unmarshal(propYAML, &logRow.Properties)\n\t\tif e.err != nil {\n\t\t\te.Logger.WithField(\"LogID\", e.LogID).WithError(e.err).Error(\"yaml decode failed\")\n\t\t\treturn nil\n\t\t}\n\t}\n\te.logRow = &logRow\n\treturn e.logRow\n}\n"
  },
  {
    "path": "services/ws/event_source.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"context\"\n\t\"database/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/stats\"\n\t\"github.com/lib/pq\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nvar (\n\tlistenerPingInterval = time.Minute\n\ttestSlowPing         = false\n)\n\ntype pgEventSource struct {\n\tDataSource   string\n\tMaxOpenConns int\n\tQueueSize    int\n\tLogger       logrus.FieldLogger\n\tReg          *prometheus.Registry\n\n\tdb         *sql.DB\n\tpqListener *pq.Listener\n\tqueue      chan *event\n\tsinks      map[*pgEventSink]bool\n\tmtx        sync.Mutex\n\n\tlastQDelay time.Duration\n\teventsIn   prometheus.Counter\n\teventsOut  prometheus.Counter\n\n\tcancel func()\n\n\tsetupOnce sync.Once\n\tready     chan bool\n}\n\nfunc (ps *pgEventSource) listenerProblem(et pq.ListenerEventType, err error) {\n\tif et == pq.ListenerEventConnected {\n\t\tps.Logger.Debug(\"pgEventSource connected\")\n\t\treturn\n\t}\n\n\t// Until we have a mechanism for catching up on missed events,\n\t// we cannot recover from a dropped connection without\n\t// breaking our promises to clients.\n\tps.Logger.\n\t\tWithField(\"eventType\", et).\n\t\tWithError(err).\n\t\tError(\"listener problem\")\n\tps.cancel()\n}\n\nfunc (ps *pgEventSource) setup() {\n\tps.ready = make(chan bool)\n\tps.Reg.MustRegister(prometheus.NewGaugeFunc(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"arvados\",\n\t\t\tSubsystem: \"ws\",\n\t\t\tName:      \"queue_len\",\n\t\t\tHelp:      \"Current number of events in queue\",\n\t\t}, func() float64 { return float64(len(ps.queue)) }))\n\tps.Reg.MustRegister(prometheus.NewGaugeFunc(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"arvados\",\n\t\t\tSubsystem: \"ws\",\n\t\t\tName:      \"queue_cap\",\n\t\t\tHelp:      \"Event queue capacity\",\n\t\t}, func() float64 { return float64(cap(ps.queue)) }))\n\tps.Reg.MustRegister(prometheus.NewGaugeFunc(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"arvados\",\n\t\t\tSubsystem: \"ws\",\n\t\t\tName:      \"queue_delay\",\n\t\t\tHelp:      \"Queue delay of the last emitted event\",\n\t\t}, func() float64 { return ps.lastQDelay.Seconds() }))\n\tps.Reg.MustRegister(prometheus.NewGaugeFunc(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"arvados\",\n\t\t\tSubsystem: \"ws\",\n\t\t\tName:      \"sinks\",\n\t\t\tHelp:      \"Number of active sinks (connections)\",\n\t\t}, func() float64 { return float64(len(ps.sinks)) }))\n\tps.Reg.MustRegister(prometheus.NewGaugeFunc(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"arvados\",\n\t\t\tSubsystem: \"ws\",\n\t\t\tName:      \"sinks_blocked\",\n\t\t\tHelp:      \"Number of sinks (connections) that are busy and blocking the main event stream\",\n\t\t}, func() float64 {\n\t\t\tps.mtx.Lock()\n\t\t\tdefer ps.mtx.Unlock()\n\t\t\tblocked := 0\n\t\t\tfor sink := range ps.sinks {\n\t\t\t\tblocked += len(sink.channel)\n\t\t\t}\n\t\t\treturn float64(blocked)\n\t\t}))\n\tps.eventsIn = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"ws\",\n\t\tName:      \"events_in\",\n\t\tHelp:      \"Number of events received from postgresql notify channel\",\n\t})\n\tps.Reg.MustRegister(ps.eventsIn)\n\tps.eventsOut = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"ws\",\n\t\tName:      \"events_out\",\n\t\tHelp:      \"Number of events sent to client sessions (before filtering)\",\n\t})\n\tps.Reg.MustRegister(ps.eventsOut)\n\n\tmaxConnections := prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"ws\",\n\t\tName:      \"db_max_connections\",\n\t\tHelp:      \"Maximum number of open connections to the database\",\n\t})\n\tps.Reg.MustRegister(maxConnections)\n\topenConnections := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"ws\",\n\t\tName:      \"db_open_connections\",\n\t\tHelp:      \"Open connections to the database\",\n\t}, []string{\"inuse\"})\n\tps.Reg.MustRegister(openConnections)\n\n\tupdateDBStats := func() {\n\t\tstats := ps.db.Stats()\n\t\tmaxConnections.Set(float64(stats.MaxOpenConnections))\n\t\topenConnections.WithLabelValues(\"0\").Set(float64(stats.Idle))\n\t\topenConnections.WithLabelValues(\"1\").Set(float64(stats.InUse))\n\t}\n\tgo func() {\n\t\t<-ps.ready\n\t\tif ps.db == nil {\n\t\t\treturn\n\t\t}\n\t\tupdateDBStats()\n\t\tfor range time.Tick(time.Second) {\n\t\t\tupdateDBStats()\n\t\t}\n\t}()\n}\n\n// Close stops listening for new events and disconnects all clients.\nfunc (ps *pgEventSource) Close() {\n\tps.WaitReady()\n\tps.cancel()\n}\n\n// WaitReady returns when the event listener is connected.\nfunc (ps *pgEventSource) WaitReady() {\n\tps.setupOnce.Do(ps.setup)\n\t<-ps.ready\n}\n\n// Run listens for event notifications on the \"logs\" channel and sends\n// them to all subscribers.\nfunc (ps *pgEventSource) Run() {\n\tps.Logger.Debug(\"pgEventSource Run starting\")\n\tdefer ps.Logger.Debug(\"pgEventSource Run finished\")\n\n\tps.setupOnce.Do(ps.setup)\n\tready := ps.ready\n\tdefer func() {\n\t\tif ready != nil {\n\t\t\tclose(ready)\n\t\t}\n\t}()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tps.cancel = cancel\n\tdefer cancel()\n\n\tdefer func() {\n\t\t// Disconnect all clients\n\t\tps.mtx.Lock()\n\t\tfor sink := range ps.sinks {\n\t\t\tclose(sink.channel)\n\t\t}\n\t\tps.sinks = nil\n\t\tps.mtx.Unlock()\n\t}()\n\n\tdb, err := sql.Open(\"postgres\", ps.DataSource)\n\tif err != nil {\n\t\tps.Logger.WithError(err).Error(\"sql.Open failed\")\n\t\treturn\n\t}\n\tif ps.MaxOpenConns <= 0 {\n\t\tps.Logger.Warn(\"no database connection limit configured -- consider setting PostgreSQL.ConnectionPool>0 in arvados-ws configuration file\")\n\t}\n\tdb.SetMaxOpenConns(ps.MaxOpenConns)\n\tif err = db.Ping(); err != nil {\n\t\tps.Logger.WithError(err).Error(\"db.Ping failed\")\n\t\treturn\n\t}\n\tps.db = db\n\n\tps.pqListener = pq.NewListener(ps.DataSource, time.Second, time.Minute, ps.listenerProblem)\n\terr = ps.pqListener.Listen(\"logs\")\n\tif err != nil {\n\t\tps.Logger.WithError(err).Error(\"pq Listen failed\")\n\t\treturn\n\t}\n\tdefer ps.pqListener.Close()\n\tps.Logger.Debug(\"pq Listen setup done\")\n\n\tclose(ready)\n\t// Avoid double-close in deferred func\n\tready = nil\n\n\tps.queue = make(chan *event, ps.QueueSize)\n\tdefer close(ps.queue)\n\n\tgo func() {\n\t\tfor e := range ps.queue {\n\t\t\t// Wait for the \"select ... from logs\" call to\n\t\t\t// finish. This limits max concurrent queries\n\t\t\t// to ps.QueueSize. Without this, max\n\t\t\t// concurrent queries would be bounded by\n\t\t\t// client_count X client_queue_size.\n\t\t\te.Detail()\n\n\t\t\tps.Logger.\n\t\t\t\tWithField(\"serial\", e.Serial).\n\t\t\t\tWithField(\"detail\", e.Detail()).\n\t\t\t\tDebug(\"event ready\")\n\t\t\te.Ready = time.Now()\n\t\t\tps.lastQDelay = e.Ready.Sub(e.Received)\n\n\t\t\tps.mtx.Lock()\n\t\t\tfor sink := range ps.sinks {\n\t\t\t\tsink.channel <- e\n\t\t\t\tps.eventsOut.Inc()\n\t\t\t}\n\t\t\tps.mtx.Unlock()\n\t\t}\n\t}()\n\n\tvar serial uint64\n\n\tgo func() {\n\t\tticker := time.NewTicker(listenerPingInterval)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tps.Logger.Debug(\"ctx done\")\n\t\t\t\treturn\n\n\t\t\tcase <-ticker.C:\n\t\t\t\tps.Logger.Debug(\"listener ping\")\n\t\t\t\tif testSlowPing {\n\t\t\t\t\ttime.Sleep(time.Second / 2)\n\t\t\t\t}\n\t\t\t\terr := ps.pqListener.Ping()\n\t\t\t\tif err != nil {\n\t\t\t\t\tps.listenerProblem(-1, fmt.Errorf(\"pqListener ping failed: %s\", err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tps.Logger.Debug(\"ctx done\")\n\t\t\treturn\n\n\t\tcase pqEvent, ok := <-ps.pqListener.Notify:\n\t\t\tif !ok {\n\t\t\t\tps.Logger.Error(\"pqListener Notify chan closed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif pqEvent == nil {\n\t\t\t\t// pq should call listenerProblem\n\t\t\t\t// itself in addition to sending us a\n\t\t\t\t// nil event, so this might be\n\t\t\t\t// superfluous:\n\t\t\t\tps.listenerProblem(-1, errors.New(\"pqListener Notify chan received nil event\"))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif pqEvent.Channel != \"logs\" {\n\t\t\t\tps.Logger.WithField(\"pqEvent\", pqEvent).Error(\"unexpected notify from wrong channel\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlogID, err := strconv.ParseInt(pqEvent.Extra, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tps.Logger.WithField(\"pqEvent\", pqEvent).Error(\"bad notify payload\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tserial++\n\t\t\te := &event{\n\t\t\t\tLogID:    logID,\n\t\t\t\tReceived: time.Now(),\n\t\t\t\tSerial:   serial,\n\t\t\t\tDB:       ps.db,\n\t\t\t\tLogger:   ps.Logger,\n\t\t\t}\n\t\t\tps.Logger.WithField(\"event\", e).Debug(\"incoming\")\n\t\t\tps.eventsIn.Inc()\n\t\t\tps.queue <- e\n\t\t\tgo e.Detail()\n\t\t}\n\t}\n}\n\n// NewSink subscribes to the event source. NewSink returns an\n// eventSink, whose Channel() method returns a channel: a pointer to\n// each subsequent event will be sent to that channel.\n//\n// The caller must ensure events are received from the sink channel as\n// quickly as possible because when one sink stops being ready, all\n// other sinks block.\nfunc (ps *pgEventSource) NewSink() eventSink {\n\tsink := &pgEventSink{\n\t\tchannel: make(chan *event, 1),\n\t\tsource:  ps,\n\t}\n\tps.mtx.Lock()\n\tif ps.sinks == nil {\n\t\tps.sinks = make(map[*pgEventSink]bool)\n\t}\n\tps.sinks[sink] = true\n\tps.mtx.Unlock()\n\treturn sink\n}\n\nfunc (ps *pgEventSource) DB() *sql.DB {\n\tps.WaitReady()\n\treturn ps.db\n}\n\nfunc (ps *pgEventSource) DBHealth() error {\n\tif ps.db == nil {\n\t\treturn errors.New(\"database not connected\")\n\t}\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second))\n\tdefer cancel()\n\tvar i int\n\treturn ps.db.QueryRowContext(ctx, \"SELECT 1\").Scan(&i)\n}\n\nfunc (ps *pgEventSource) DebugStatus() interface{} {\n\tps.mtx.Lock()\n\tdefer ps.mtx.Unlock()\n\tblocked := 0\n\tfor sink := range ps.sinks {\n\t\tblocked += len(sink.channel)\n\t}\n\treturn map[string]interface{}{\n\t\t\"Queue\":        len(ps.queue),\n\t\t\"QueueLimit\":   cap(ps.queue),\n\t\t\"QueueDelay\":   stats.Duration(ps.lastQDelay),\n\t\t\"Sinks\":        len(ps.sinks),\n\t\t\"SinksBlocked\": blocked,\n\t\t\"DBStats\":      ps.db.Stats(),\n\t}\n}\n\ntype pgEventSink struct {\n\tchannel chan *event\n\tsource  *pgEventSource\n}\n\nfunc (sink *pgEventSink) Channel() <-chan *event {\n\treturn sink.channel\n}\n\n// Stop sending events to the sink's channel.\nfunc (sink *pgEventSink) Stop() {\n\tgo func() {\n\t\t// Ensure this sink cannot fill up and block the\n\t\t// server-side queue (which otherwise could in turn\n\t\t// block our mtx.Lock() here)\n\t\tfor range sink.channel {\n\t\t}\n\t}()\n\tsink.source.mtx.Lock()\n\tif _, ok := sink.source.sinks[sink]; ok {\n\t\tdelete(sink.source.sinks, sink)\n\t\tclose(sink.channel)\n\t}\n\tsink.source.mtx.Unlock()\n}\n"
  },
  {
    "path": "services/ws/event_source_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"database/sql\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/ghodss/yaml\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&eventSourceSuite{})\n\ntype eventSourceSuite struct{}\n\nfunc testDBConfig() arvados.PostgreSQLConnection {\n\tcfg, err := arvados.GetConfig(arvados.DefaultConfigFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcc, err := cfg.GetCluster(\"zzzzz\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn cc.PostgreSQL.Connection\n}\n\nfunc testDB() *sql.DB {\n\tdb, err := sql.Open(\"postgres\", testDBConfig().String())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn db\n}\n\nfunc (*eventSourceSuite) TestEventSource(c *check.C) {\n\tvar logfixtures map[string]struct {\n\t\tID int\n\t}\n\tyamldata, err := ioutil.ReadFile(\"../api/test/fixtures/logs.yml\")\n\tc.Assert(err, check.IsNil)\n\tc.Assert(yaml.Unmarshal(yamldata, &logfixtures), check.IsNil)\n\tvar logIDs []int\n\tfor _, logfixture := range logfixtures {\n\t\tlogIDs = append(logIDs, logfixture.ID)\n\t}\n\tsort.Ints(logIDs)\n\n\tcfg := testDBConfig()\n\tdb := testDB()\n\tpges := &pgEventSource{\n\t\tDataSource: cfg.String(),\n\t\tQueueSize:  4,\n\t\tLogger:     ctxlog.TestLogger(c),\n\t\tReg:        prometheus.NewRegistry(),\n\t}\n\tgo pges.Run()\n\tsinks := make([]eventSink, 18)\n\tfor i := range sinks {\n\t\tsinks[i] = pges.NewSink()\n\t}\n\n\tpges.WaitReady()\n\tdefer pges.cancel()\n\n\tdone := make(chan bool, 1)\n\n\tgo func() {\n\t\tfor _, id := range logIDs {\n\t\t\t_, err := db.Exec(fmt.Sprintf(`NOTIFY logs, '%d'`, id))\n\t\t\tif err != nil {\n\t\t\t\tdone <- true\n\t\t\t\tc.Fatal(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(sinks))\n\tfor si, s := range sinks {\n\t\tgo func(si int, s eventSink) {\n\t\t\tdefer wg.Done()\n\t\t\tdefer sinks[si].Stop()\n\t\t\tfor _, logID := range logIDs {\n\t\t\t\tev := <-sinks[si].Channel()\n\t\t\t\tc.Logf(\"sink %d received event %d\", si, logID)\n\t\t\t\tc.Check(ev.LogID, check.Equals, int64(logID))\n\t\t\t\trow := ev.Detail()\n\t\t\t\tif c.Check(row, check.NotNil) {\n\t\t\t\t\tc.Check(row.ID, check.Equals, int64(logID))\n\t\t\t\t\tc.Check(row.UUID, check.Not(check.Equals), \"\")\n\t\t\t\t}\n\t\t\t}\n\t\t}(si, s)\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tdone <- true\n\t}()\n\n\tselect {\n\tcase <-done:\n\tcase <-time.After(10 * time.Second):\n\t\tc.Fatal(\"timed out\")\n\t}\n\n\tc.Check(pges.DBHealth(), check.IsNil)\n}\n"
  },
  {
    "path": "services/ws/event_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"net/url\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&eventSuite{})\n\ntype eventSuite struct{}\n\nfunc (*eventSuite) TestDetail(c *check.C) {\n\te := &event{\n\t\tLogID:  17,\n\t\tDB:     testDB(),\n\t\tLogger: ctxlog.TestLogger(c),\n\t}\n\tlogRow := e.Detail()\n\tc.Assert(logRow, check.NotNil)\n\tc.Check(logRow, check.Equals, e.logRow)\n\tc.Check(logRow.UUID, check.Equals, \"zzzzz-57u5n-containerlog006\")\n\tc.Check(logRow.ObjectUUID, check.Equals, \"zzzzz-dz642-logscontainer03\")\n\tc.Check(logRow.EventType, check.Equals, \"crunchstat\")\n\tc.Check(logRow.Properties[\"text\"], check.Equals, \"2013-11-07_23:33:41 zzzzz-dz642-logscontainer03 29610 1 stderr crunchstat: cpu 1935.4300 user 59.4100 sys 8 cpus -- interval 10.0002 seconds 12.9900 user 0.9900 sys\")\n}\n\nfunc (*eventSuite) TestDetail_Properties(c *check.C) {\n\tac := arvados.NewClientFromEnv()\n\tjsondata, err := json.Marshal(map[string]interface{}{\n\t\t\"object_uuid\": arvadostest.RunningContainerUUID,\n\t\t\"event_type\":  \"blip\",\n\t\t\"properties\":  nil,\n\t})\n\tc.Assert(err, check.IsNil)\n\tvar lg arvados.Log\n\terr = ac.RequestAndDecode(&lg, \"POST\", \"arvados/v1/logs\", bytes.NewBufferString(url.Values{\"log\": []string{string(jsondata)}}.Encode()), nil)\n\tc.Assert(err, check.IsNil)\n\tdefer testDB().Exec(`delete from logs where id=$1`, lg.ID)\n\n\tenoprop := &event{\n\t\tLogID:  lg.ID,\n\t\tDB:     testDB(),\n\t\tLogger: ctxlog.TestLogger(c),\n\t}\n\tlogRow := enoprop.Detail()\n\tc.Assert(logRow, check.NotNil)\n\tc.Check(logRow.Properties, check.DeepEquals, map[string]interface{}{})\n\n\t_, err = testDB().Exec(`update logs set properties='bad properties' where id=$1`, lg.ID)\n\tc.Assert(err, check.IsNil)\n\tebadprop := &event{\n\t\tLogID:  lg.ID,\n\t\tDB:     testDB(),\n\t\tLogger: ctxlog.TestLogger(c),\n\t}\n\tlogRow = ebadprop.Detail()\n\tc.Check(logRow, check.IsNil)\n}\n"
  },
  {
    "path": "services/ws/gocheck_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"testing\"\n\n\tcheck \"gopkg.in/check.v1\"\n)\n\nfunc TestGocheck(t *testing.T) {\n\tcheck.TestingT(t)\n}\n\nfunc init() {\n\ttestMode = true\n}\n"
  },
  {
    "path": "services/ws/handler.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/stats\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype handler struct {\n\tClient      arvados.Client\n\tPingTimeout time.Duration\n\tQueueSize   int\n\n\tmtx       sync.Mutex\n\tlastDelay map[chan interface{}]stats.Duration\n\tsetupOnce sync.Once\n}\n\ntype handlerStats struct {\n\tQueueDelayNs time.Duration\n\tWriteDelayNs time.Duration\n\tEventBytes   uint64\n\tEventCount   uint64\n}\n\nfunc (h *handler) Handle(ws wsConn, logger logrus.FieldLogger, eventSource eventSource, newSession func(wsConn, chan<- interface{}) (session, error)) (hStats handlerStats) {\n\th.setupOnce.Do(h.setup)\n\n\tctx, cancel := context.WithCancel(ws.Request().Context())\n\tdefer cancel()\n\n\tqueue := make(chan interface{}, h.QueueSize)\n\th.mtx.Lock()\n\th.lastDelay[queue] = 0\n\th.mtx.Unlock()\n\tdefer func() {\n\t\th.mtx.Lock()\n\t\tdelete(h.lastDelay, queue)\n\t\th.mtx.Unlock()\n\t}()\n\n\tsess, err := newSession(ws, queue)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"newSession failed\")\n\t\treturn\n\t}\n\n\t// Receive websocket frames from the client and pass them to\n\t// sess.Receive().\n\tgo func() {\n\t\tdefer cancel()\n\t\tbuf := make([]byte, 2<<20)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\tws.SetReadDeadline(time.Now().Add(24 * 365 * time.Hour))\n\t\t\tn, err := ws.Read(buf)\n\t\t\tbuf := buf[:n]\n\t\t\tlogger.WithField(\"frame\", string(buf[:n])).Debug(\"received frame\")\n\t\t\tif err == nil && n == cap(buf) {\n\t\t\t\terr = errFrameTooBig\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF && ctx.Err() == nil {\n\t\t\t\t\tlogger.WithError(err).Info(\"read error\")\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = sess.Receive(buf)\n\t\t\tif err != nil {\n\t\t\t\tlogger.WithError(err).Error(\"sess.Receive() failed\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Take items from the outgoing queue, serialize them using\n\t// sess.EventMessage() as needed, and send them to the client\n\t// as websocket frames.\n\tgo func() {\n\t\tdefer cancel()\n\t\tfor {\n\t\t\tvar ok bool\n\t\t\tvar data interface{}\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase data, ok = <-queue:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar e *event\n\t\t\tvar buf []byte\n\t\t\tvar err error\n\t\t\tlogger := logger\n\n\t\t\tswitch data := data.(type) {\n\t\t\tcase []byte:\n\t\t\t\tbuf = data\n\t\t\tcase *event:\n\t\t\t\te = data\n\t\t\t\tlogger = logger.WithField(\"serial\", e.Serial)\n\t\t\t\tbuf, err = sess.EventMessage(e)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.WithError(err).Error(\"EventMessage failed\")\n\t\t\t\t\treturn\n\t\t\t\t} else if len(buf) == 0 {\n\t\t\t\t\tlogger.Debug(\"skip\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tlogger.WithField(\"data\", data).Error(\"bad object in client queue\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlogger.WithField(\"frame\", string(buf)).Debug(\"send event\")\n\t\t\tws.SetWriteDeadline(time.Now().Add(h.PingTimeout))\n\t\t\tt0 := time.Now()\n\t\t\t_, err = ws.Write(buf)\n\t\t\tif err != nil {\n\t\t\t\tif ctx.Err() == nil {\n\t\t\t\t\tlogger.WithError(err).Error(\"write failed\")\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlogger.Debug(\"sent\")\n\n\t\t\tif e != nil {\n\t\t\t\thStats.QueueDelayNs += t0.Sub(e.Ready)\n\t\t\t\th.mtx.Lock()\n\t\t\t\th.lastDelay[queue] = stats.Duration(time.Since(e.Ready))\n\t\t\t\th.mtx.Unlock()\n\t\t\t}\n\t\t\thStats.WriteDelayNs += time.Since(t0)\n\t\t\thStats.EventBytes += uint64(len(buf))\n\t\t\thStats.EventCount++\n\t\t}\n\t}()\n\n\t// Filter incoming events against the current subscription\n\t// list, and forward matching events to the outgoing message\n\t// queue. Close the queue and return when the request context\n\t// is done/cancelled or the incoming event stream ends. Shut\n\t// down the handler if the outgoing queue fills up.\n\tgo func() {\n\t\tdefer cancel()\n\t\tticker := time.NewTicker(h.PingTimeout)\n\t\tdefer ticker.Stop()\n\n\t\tincoming := eventSource.NewSink()\n\t\tdefer incoming.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\t// If the outgoing queue is empty,\n\t\t\t\t// send an empty message. This can\n\t\t\t\t// help detect a disconnected network\n\t\t\t\t// socket, and prevent an idle socket\n\t\t\t\t// from being closed.\n\t\t\t\tif len(queue) == 0 {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase queue <- []byte(`{}`):\n\t\t\t\t\tdefault:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase e, ok := <-incoming.Channel():\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif !sess.Filter(e) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase queue <- e:\n\t\t\t\tdefault:\n\t\t\t\t\tlogger.WithError(errQueueFull).Error(\"terminate\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t<-ctx.Done()\n\treturn\n}\n\nfunc (h *handler) DebugStatus() interface{} {\n\th.mtx.Lock()\n\tdefer h.mtx.Unlock()\n\n\tvar s struct {\n\t\tQueueCount    int\n\t\tQueueMin      int\n\t\tQueueMax      int\n\t\tQueueTotal    uint64\n\t\tQueueDelayMin stats.Duration\n\t\tQueueDelayMax stats.Duration\n\t}\n\tfor q, lastDelay := range h.lastDelay {\n\t\ts.QueueCount++\n\t\tn := len(q)\n\t\ts.QueueTotal += uint64(n)\n\t\tif s.QueueMax < n {\n\t\t\ts.QueueMax = n\n\t\t}\n\t\tif s.QueueMin > n || s.QueueCount == 1 {\n\t\t\ts.QueueMin = n\n\t\t}\n\t\tif (s.QueueDelayMin > lastDelay || s.QueueDelayMin == 0) && lastDelay > 0 {\n\t\t\ts.QueueDelayMin = lastDelay\n\t\t}\n\t\tif s.QueueDelayMax < lastDelay {\n\t\t\ts.QueueDelayMax = lastDelay\n\t\t}\n\t}\n\treturn &s\n}\n\nfunc (h *handler) setup() {\n\th.lastDelay = make(map[chan interface{}]stats.Duration)\n}\n"
  },
  {
    "path": "services/ws/permission.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n)\n\nconst (\n\tmaxPermCacheAge = time.Hour\n\tminPermCacheAge = 5 * time.Minute\n)\n\ntype permChecker interface {\n\tSetToken(token string)\n\tCheck(ctx context.Context, uuid string) (bool, error)\n}\n\nfunc newPermChecker(ac *arvados.Client) permChecker {\n\treturn &cachingPermChecker{\n\t\tac:         ac,\n\t\ttoken:      \"-\",\n\t\tcache:      make(map[string]cacheEnt),\n\t\tmaxCurrent: 16,\n\t}\n}\n\ntype cacheEnt struct {\n\ttime.Time\n\tallowed bool\n}\n\ntype cachingPermChecker struct {\n\tac         *arvados.Client\n\ttoken      string\n\tcache      map[string]cacheEnt\n\tmaxCurrent int\n\n\tnChecks  uint64\n\tnMisses  uint64\n\tnInvalid uint64\n}\n\nfunc (pc *cachingPermChecker) SetToken(token string) {\n\tif pc.token == token {\n\t\treturn\n\t}\n\tpc.token = token\n\tpc.cache = make(map[string]cacheEnt)\n}\n\nfunc (pc *cachingPermChecker) Check(ctx context.Context, uuid string) (bool, error) {\n\tpc.nChecks++\n\tlogger := ctxlog.FromContext(ctx).\n\t\tWithField(\"token\", pc.token).\n\t\tWithField(\"uuid\", uuid)\n\tpc.tidy()\n\tnow := time.Now()\n\tif perm, ok := pc.cache[uuid]; ok && now.Sub(perm.Time) < maxPermCacheAge {\n\t\tlogger.WithField(\"allowed\", perm.allowed).Debug(\"cache hit\")\n\t\treturn perm.allowed, nil\n\t}\n\n\tpath, err := pc.ac.PathForUUID(\"get\", uuid)\n\tif err != nil {\n\t\tpc.nInvalid++\n\t\treturn false, err\n\t}\n\n\tpc.nMisses++\n\tctx = arvados.ContextWithAuthorization(ctx, \"Bearer \"+pc.token)\n\tctx, cancel := context.WithDeadline(ctx, time.Now().Add(time.Minute))\n\tdefer cancel()\n\tvar buf map[string]interface{}\n\terr = pc.ac.RequestAndDecodeContext(ctx, &buf, \"GET\", path, nil, url.Values{\n\t\t\"include_trash\": {\"true\"},\n\t\t\"select\":        {`[\"uuid\"]`},\n\t})\n\n\tvar allowed bool\n\tif err == nil {\n\t\tallowed = true\n\t} else if txErr, ok := err.(*arvados.TransactionError); ok && pc.isNotAllowed(txErr.StatusCode) {\n\t\tallowed = false\n\t} else {\n\t\t// If \"context deadline exceeded\", \"client\n\t\t// disconnected\", HTTP 5xx, network error, etc., don't\n\t\t// cache the result.\n\t\tlogger.WithError(err).Error(\"lookup error\")\n\t\treturn false, err\n\t}\n\tlogger.WithField(\"allowed\", allowed).Debug(\"cache miss\")\n\tpc.cache[uuid] = cacheEnt{Time: now, allowed: allowed}\n\treturn allowed, nil\n}\n\nfunc (pc *cachingPermChecker) isNotAllowed(status int) bool {\n\tswitch status {\n\tcase http.StatusForbidden, http.StatusUnauthorized, http.StatusNotFound:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (pc *cachingPermChecker) tidy() {\n\tif len(pc.cache) <= pc.maxCurrent*2 {\n\t\treturn\n\t}\n\ttooOld := time.Now().Add(-minPermCacheAge)\n\tfor uuid, t := range pc.cache {\n\t\tif t.Before(tooOld) {\n\t\t\tdelete(pc.cache, uuid)\n\t\t}\n\t}\n\tpc.maxCurrent = len(pc.cache)\n}\n"
  },
  {
    "path": "services/ws/permission_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"context\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&permSuite{})\n\ntype permSuite struct{}\n\nfunc (s *permSuite) TestCheck(c *check.C) {\n\tclient := arvados.NewClientFromEnv()\n\t// Disable auto-retry\n\tclient.Timeout = 0\n\n\tpc := newPermChecker(client).(*cachingPermChecker)\n\tsetToken := func(label, token string) {\n\t\tc.Logf(\"...%s token %q\", label, token)\n\t\tpc.SetToken(token)\n\t}\n\twantError := func(uuid string) {\n\t\tc.Log(uuid)\n\t\tok, err := pc.Check(context.Background(), uuid)\n\t\tc.Check(ok, check.Equals, false)\n\t\tc.Check(err, check.NotNil)\n\t}\n\twantYes := func(uuid string) {\n\t\tc.Log(uuid)\n\t\tok, err := pc.Check(context.Background(), uuid)\n\t\tc.Check(ok, check.Equals, true)\n\t\tc.Check(err, check.IsNil)\n\t}\n\twantNo := func(uuid string) {\n\t\tc.Log(uuid)\n\t\tok, err := pc.Check(context.Background(), uuid)\n\t\tc.Check(ok, check.Equals, false)\n\t\tc.Check(err, check.IsNil)\n\t}\n\n\tsetToken(\"no\", \"\")\n\twantNo(arvadostest.UserAgreementCollection)\n\twantNo(arvadostest.UserAgreementPDH)\n\twantNo(arvadostest.FooBarDirCollection)\n\n\tsetToken(\"anonymous\", arvadostest.AnonymousToken)\n\twantYes(arvadostest.UserAgreementCollection)\n\twantYes(arvadostest.UserAgreementPDH)\n\twantNo(arvadostest.FooBarDirCollection)\n\twantNo(arvadostest.FooCollection)\n\n\tsetToken(\"active\", arvadostest.ActiveToken)\n\twantYes(arvadostest.UserAgreementCollection)\n\twantYes(arvadostest.UserAgreementPDH)\n\twantYes(arvadostest.FooBarDirCollection)\n\twantYes(arvadostest.FooCollection)\n\n\tsetToken(\"admin\", arvadostest.AdminToken)\n\twantYes(arvadostest.UserAgreementCollection)\n\twantYes(arvadostest.UserAgreementPDH)\n\twantYes(arvadostest.FooBarDirCollection)\n\twantYes(arvadostest.FooCollection)\n\n\t// hack to empty the cache\n\tpc.SetToken(\"\")\n\tpc.SetToken(arvadostest.ActiveToken)\n\n\tc.Log(\"...network error\")\n\tpc.ac.APIHost = \"127.0.0.1:9\"\n\twantError(arvadostest.UserAgreementCollection)\n\twantError(arvadostest.FooBarDirCollection)\n\n\tc.Logf(\"%d checks, %d misses, %d invalid, %d cached\", pc.nChecks, pc.nMisses, pc.nInvalid, len(pc.cache))\n}\n"
  },
  {
    "path": "services/ws/router.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"io\"\n\t\"net/http\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/health\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/net/websocket\"\n)\n\ntype wsConn interface {\n\tio.ReadWriter\n\tRequest() *http.Request\n\tSetReadDeadline(time.Time) error\n\tSetWriteDeadline(time.Time) error\n}\n\ntype router struct {\n\tclient         *arvados.Client\n\tcluster        *arvados.Cluster\n\teventSource    eventSource\n\tnewPermChecker func() permChecker\n\n\thandler   *handler\n\tmux       *http.ServeMux\n\tsetupOnce sync.Once\n\tdone      chan struct{}\n\treg       *prometheus.Registry\n}\n\nfunc (rtr *router) setup() {\n\tmSockets := prometheus.NewGaugeVec(prometheus.GaugeOpts{\n\t\tNamespace: \"arvados\",\n\t\tSubsystem: \"ws\",\n\t\tName:      \"sockets\",\n\t\tHelp:      \"Number of connected sockets\",\n\t}, []string{\"version\"})\n\trtr.reg.MustRegister(mSockets)\n\n\trtr.handler = &handler{\n\t\tPingTimeout: time.Duration(rtr.cluster.API.SendTimeout),\n\t\tQueueSize:   rtr.cluster.API.WebsocketClientEventQueue,\n\t}\n\trtr.mux = http.NewServeMux()\n\trtr.mux.Handle(\"/websocket\", rtr.makeServer(newSessionV0, mSockets.WithLabelValues(\"0\")))\n\trtr.mux.Handle(\"/arvados/v1/events.ws\", rtr.makeServer(newSessionV1, mSockets.WithLabelValues(\"1\")))\n\trtr.mux.Handle(\"/_health/\", &health.Handler{\n\t\tToken:  rtr.cluster.ManagementToken,\n\t\tPrefix: \"/_health/\",\n\t\tRoutes: health.Routes{\n\t\t\t\"db\": rtr.eventSource.DBHealth,\n\t\t},\n\t\tLog: func(r *http.Request, err error) {\n\t\t\tif err != nil {\n\t\t\t\tctxlog.FromContext(r.Context()).WithError(err).Error(\"error\")\n\t\t\t}\n\t\t},\n\t})\n}\n\nfunc exemptFromDeadline(h http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\thttpserver.ExemptFromDeadline(req)\n\t\th.ServeHTTP(w, req)\n\t})\n}\n\nfunc (rtr *router) makeServer(newSession sessionFactory, gauge prometheus.Gauge) http.Handler {\n\tvar connected int64\n\treturn exemptFromDeadline(&websocket.Server{\n\t\tHandshake: func(c *websocket.Config, r *http.Request) error {\n\t\t\treturn nil\n\t\t},\n\t\tHandler: websocket.Handler(func(ws *websocket.Conn) {\n\t\t\tt0 := time.Now()\n\t\t\tlogger := ctxlog.FromContext(ws.Request().Context())\n\t\t\tatomic.AddInt64(&connected, 1)\n\t\t\tgauge.Set(float64(atomic.LoadInt64(&connected)))\n\n\t\t\tstats := rtr.handler.Handle(ws, logger, rtr.eventSource,\n\t\t\t\tfunc(ws wsConn, sendq chan<- interface{}) (session, error) {\n\t\t\t\t\treturn newSession(ws, sendq, rtr.eventSource.DB(), rtr.newPermChecker(), rtr.client)\n\t\t\t\t})\n\n\t\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\t\"elapsed\": time.Now().Sub(t0).Seconds(),\n\t\t\t\t\"stats\":   stats,\n\t\t\t}).Info(\"client disconnected\")\n\t\t\tws.Close()\n\t\t\tatomic.AddInt64(&connected, -1)\n\t\t\tgauge.Set(float64(atomic.LoadInt64(&connected)))\n\t\t}),\n\t})\n}\n\nfunc (rtr *router) ServeHTTP(resp http.ResponseWriter, req *http.Request) {\n\trtr.setupOnce.Do(rtr.setup)\n\trtr.mux.ServeHTTP(httpserver.ResponseControllerShim{ResponseWriter: resp}, req)\n}\n\nfunc (rtr *router) CheckHealth() error {\n\trtr.setupOnce.Do(rtr.setup)\n\treturn rtr.eventSource.DBHealth()\n}\n\nfunc (rtr *router) Done() <-chan struct{} {\n\treturn rtr.done\n}\n"
  },
  {
    "path": "services/ws/service.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/cmd\"\n\t\"git.arvados.org/arvados.git/lib/service\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar testMode = false\n\nvar Command cmd.Handler = service.Command(arvados.ServiceNameWebsocket, newHandler)\n\nfunc newHandler(ctx context.Context, cluster *arvados.Cluster, token string, reg *prometheus.Registry) service.Handler {\n\tclient, err := arvados.NewClientFromConfig(cluster)\n\tif err != nil {\n\t\treturn service.ErrorHandler(ctx, cluster, fmt.Errorf(\"error initializing client from cluster config: %s\", err))\n\t}\n\tclient.Timeout = time.Minute\n\teventSource := &pgEventSource{\n\t\tDataSource:   cluster.PostgreSQL.Connection.String(),\n\t\tMaxOpenConns: cluster.PostgreSQL.ConnectionPool,\n\t\tQueueSize:    cluster.API.WebsocketServerEventQueue,\n\t\tLogger:       ctxlog.FromContext(ctx),\n\t\tReg:          reg,\n\t}\n\tdone := make(chan struct{})\n\tgo func() {\n\t\teventSource.Run()\n\t\tctxlog.FromContext(ctx).Error(\"event source stopped\")\n\t\tclose(done)\n\t}()\n\teventSource.WaitReady()\n\tif err := eventSource.DBHealth(); err != nil {\n\t\treturn service.ErrorHandler(ctx, cluster, err)\n\t}\n\trtr := &router{\n\t\tcluster:        cluster,\n\t\tclient:         client,\n\t\teventSource:    eventSource,\n\t\tnewPermChecker: func() permChecker { return newPermChecker(client) },\n\t\tdone:           done,\n\t\treg:            reg,\n\t}\n\treturn rtr\n}\n"
  },
  {
    "path": "services/ws/service_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/lib/config\"\n\t\"git.arvados.org/arvados.git/lib/service\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"git.arvados.org/arvados.git/sdk/go/httpserver\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nvar _ = check.Suite(&serviceSuite{})\n\ntype serviceSuite struct {\n\thandler service.Handler\n\treg     *prometheus.Registry\n\tsrv     *httptest.Server\n\tcluster *arvados.Cluster\n\twg      sync.WaitGroup\n}\n\nfunc (s *serviceSuite) SetUpTest(c *check.C) {\n\tvar err error\n\ts.cluster, err = s.testConfig(c)\n\tc.Assert(err, check.IsNil)\n}\n\nfunc (s *serviceSuite) start(c *check.C) {\n\ts.reg = prometheus.NewRegistry()\n\ts.handler = newHandler(context.Background(), s.cluster, \"\", s.reg)\n\tinstrumented := httpserver.Instrument(s.reg, ctxlog.TestLogger(c), s.handler)\n\ts.srv = httptest.NewServer(instrumented.ServeAPI(s.cluster.ManagementToken, instrumented))\n}\n\nfunc (s *serviceSuite) TearDownTest(c *check.C) {\n\tif s.srv != nil {\n\t\ts.srv.Close()\n\t}\n}\n\nfunc (*serviceSuite) testConfig(c *check.C) (*arvados.Cluster, error) {\n\tldr := config.NewLoader(nil, ctxlog.TestLogger(c))\n\tcfg, err := ldr.Load()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcluster, err := cfg.GetCluster(\"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient := arvados.NewClientFromEnv()\n\tcluster.Services.Controller.ExternalURL.Host = client.APIHost\n\tcluster.SystemRootToken = client.AuthToken\n\tcluster.TLS.Insecure = client.Insecure\n\tcluster.PostgreSQL.Connection = testDBConfig()\n\tcluster.PostgreSQL.ConnectionPool = 12\n\tcluster.Services.Websocket.InternalURLs = map[arvados.URL]arvados.ServiceInstance{{Host: \":\"}: {}}\n\tcluster.ManagementToken = arvadostest.ManagementToken\n\treturn cluster, nil\n}\n\n// TestBadDB ensures the server returns an error (instead of panicking\n// or deadlocking) if it can't connect to the database server at\n// startup.\nfunc (s *serviceSuite) TestBadDB(c *check.C) {\n\ts.cluster.PostgreSQL.Connection[\"password\"] = \"1234\"\n\ts.start(c)\n\tresp, err := http.Get(s.srv.URL)\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.StatusCode, check.Equals, http.StatusInternalServerError)\n\tc.Check(s.handler.CheckHealth(), check.ErrorMatches, \"database not connected\")\n\tc.Check(err, check.IsNil)\n\tc.Check(resp.StatusCode, check.Equals, http.StatusInternalServerError)\n}\n\nfunc (s *serviceSuite) TestHealth(c *check.C) {\n\ts.start(c)\n\tfor _, token := range []string{\"\", \"foo\", s.cluster.ManagementToken} {\n\t\treq, err := http.NewRequest(\"GET\", s.srv.URL+\"/_health/ping\", nil)\n\t\tc.Assert(err, check.IsNil)\n\t\tif token != \"\" {\n\t\t\treq.Header.Add(\"Authorization\", \"Bearer \"+token)\n\t\t}\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tc.Check(err, check.IsNil)\n\t\tif token == s.cluster.ManagementToken {\n\t\t\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\t\t\tbuf, err := ioutil.ReadAll(resp.Body)\n\t\t\tc.Check(err, check.IsNil)\n\t\t\tc.Check(string(buf), check.Equals, `{\"health\":\"OK\"}`+\"\\n\")\n\t\t} else {\n\t\t\tc.Check(resp.StatusCode, check.Not(check.Equals), http.StatusOK)\n\t\t}\n\t}\n}\n\nfunc (s *serviceSuite) TestMetrics(c *check.C) {\n\ts.start(c)\n\ts.handler.CheckHealth()\n\tfor deadline := time.Now().Add(time.Second); ; {\n\t\treq, err := http.NewRequest(\"GET\", s.srv.URL+\"/metrics\", nil)\n\t\tc.Assert(err, check.IsNil)\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+s.cluster.ManagementToken)\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(resp.StatusCode, check.Equals, http.StatusOK)\n\t\ttext, err := ioutil.ReadAll(resp.Body)\n\t\tc.Check(err, check.IsNil)\n\t\tif strings.Contains(string(text), \"_db_max_connections 0\\n\") {\n\t\t\t// wait for the first db stats update\n\t\t\tif time.Now().After(deadline) {\n\t\t\t\tc.Fatal(\"timed out\")\n\t\t\t}\n\t\t\ttime.Sleep(time.Second / 50)\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(string(text), check.Matches, `(?ms).*\\narvados_ws_db_max_connections 12\\n.*`)\n\t\tc.Check(string(text), check.Matches, `(?ms).*\\narvados_ws_db_open_connections\\{inuse=\"0\"\\} \\d+\\n.*`)\n\t\tc.Check(string(text), check.Matches, `(?ms).*\\narvados_ws_db_open_connections\\{inuse=\"1\"\\} \\d+\\n.*`)\n\t\tbreak\n\t}\n}\n\nfunc (s *serviceSuite) TestHealthDisabled(c *check.C) {\n\ts.cluster.ManagementToken = \"\"\n\ts.start(c)\n\tfor _, token := range []string{\"\", \"foo\", arvadostest.ManagementToken} {\n\t\treq, err := http.NewRequest(\"GET\", s.srv.URL+\"/_health/ping\", nil)\n\t\tc.Assert(err, check.IsNil)\n\t\treq.Header.Add(\"Authorization\", \"Bearer \"+token)\n\t\tresp, err := http.DefaultClient.Do(req)\n\t\tc.Check(err, check.IsNil)\n\t\tc.Check(resp.StatusCode, check.Equals, http.StatusNotFound)\n\t}\n}\n\nfunc (s *serviceSuite) TestLoadLegacyConfig(c *check.C) {\n\tcontent := []byte(`\nClient:\n  APIHost: example.com\n  AuthToken: abcdefg\nPostgres:\n  \"dbname\": \"arvados_production\"\n  \"user\": \"arvados\"\n  \"password\": \"xyzzy\"\n  \"host\": \"localhost\"\n  \"connect_timeout\": \"30\"\n  \"sslmode\": \"require\"\n  \"fallback_application_name\": \"arvados-ws\"\nPostgresPool: 63\nListen: \":8765\"\nLogLevel: \"debug\"\nLogFormat: \"text\"\nPingTimeout: 61s\nClientEventQueue: 62\nServerEventQueue:  5\nManagementToken: qqqqq\n`)\n\ttmpfile, err := ioutil.TempFile(\"\", \"example\")\n\tif err != nil {\n\t\tc.Error(err)\n\t}\n\n\tdefer os.Remove(tmpfile.Name()) // clean up\n\n\tif _, err := tmpfile.Write(content); err != nil {\n\t\tc.Error(err)\n\t}\n\tif err := tmpfile.Close(); err != nil {\n\t\tc.Error(err)\n\n\t}\n\tldr := config.NewLoader(&bytes.Buffer{}, logrus.New())\n\tflagset := flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tldr.SetupFlags(flagset)\n\tflagset.Parse(ldr.MungeLegacyConfigArgs(ctxlog.TestLogger(c), []string{\"-config\", tmpfile.Name()}, \"-legacy-ws-config\"))\n\tcfg, err := ldr.Load()\n\tc.Check(err, check.IsNil)\n\tcluster, err := cfg.GetCluster(\"\")\n\tc.Check(err, check.IsNil)\n\tc.Check(cluster, check.NotNil)\n\n\tc.Check(cluster.Services.Controller.ExternalURL, check.Equals, arvados.URL{Scheme: \"https\", Host: \"example.com\", Path: \"/\"})\n\tc.Check(cluster.SystemRootToken, check.Equals, \"abcdefg\")\n\n\tc.Check(cluster.PostgreSQL.Connection, check.DeepEquals, arvados.PostgreSQLConnection{\n\t\t\"connect_timeout\":           \"30\",\n\t\t\"dbname\":                    \"arvados_production\",\n\t\t\"fallback_application_name\": \"arvados-ws\",\n\t\t\"host\":                      \"localhost\",\n\t\t\"password\":                  \"xyzzy\",\n\t\t\"sslmode\":                   \"require\",\n\t\t\"user\":                      \"arvados\"})\n\tc.Check(cluster.PostgreSQL.ConnectionPool, check.Equals, 63)\n\tc.Check(cluster.Services.Websocket.InternalURLs[arvados.URL{Host: \":8765\"}], check.NotNil)\n\tc.Check(cluster.SystemLogs.LogLevel, check.Equals, \"debug\")\n\tc.Check(cluster.SystemLogs.Format, check.Equals, \"text\")\n\tc.Check(cluster.API.SendTimeout, check.Equals, arvados.Duration(61*time.Second))\n\tc.Check(cluster.API.WebsocketClientEventQueue, check.Equals, 62)\n\tc.Check(cluster.API.WebsocketServerEventQueue, check.Equals, 5)\n\tc.Check(cluster.ManagementToken, check.Equals, \"qqqqq\")\n}\n"
  },
  {
    "path": "services/ws/session.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"database/sql\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\ntype session interface {\n\t// Receive processes a message received from the client. If a\n\t// non-nil error is returned, the connection will be\n\t// terminated.\n\tReceive([]byte) error\n\n\t// Filter returns true if the event should be queued for\n\t// sending to the client. It should return as fast as\n\t// possible, and must not block.\n\tFilter(*event) bool\n\n\t// EventMessage encodes the given event (from the front of the\n\t// queue) into a form suitable to send to the client. If a\n\t// non-nil error is returned, the connection is terminated. If\n\t// the returned buffer is empty, nothing is sent to the client\n\t// and the event is not counted in statistics.\n\t//\n\t// Unlike Filter, EventMessage can block without affecting\n\t// other connections. If EventMessage is slow, additional\n\t// incoming events will be queued. If the event queue fills\n\t// up, the connection will be dropped.\n\tEventMessage(*event) ([]byte, error)\n}\n\ntype sessionFactory func(wsConn, chan<- interface{}, *sql.DB, permChecker, *arvados.Client) (session, error)\n"
  },
  {
    "path": "services/ws/session_v0.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"database/sql\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"reflect\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nvar (\n\terrQueueFull   = errors.New(\"client queue full\")\n\terrFrameTooBig = errors.New(\"frame too big\")\n\n\t// Send clients only these keys from the\n\t// log.properties.old_attributes and\n\t// log.properties.new_attributes hashes.\n\tsendObjectAttributes = []string{\n\t\t\"is_trashed\",\n\t\t\"name\",\n\t\t\"owner_uuid\",\n\t\t\"portable_data_hash\",\n\t\t\"requesting_container_uuid\",\n\t\t\"state\",\n\t}\n\n\tv0subscribeOK   = []byte(`{\"status\":200}`)\n\tv0subscribeFail = []byte(`{\"status\":400}`)\n)\n\ntype v0session struct {\n\tac            *arvados.Client\n\tws            wsConn\n\tsendq         chan<- interface{}\n\tdb            *sql.DB\n\tpermChecker   permChecker\n\tsubscriptions []v0subscribe\n\tlastMsgID     uint64\n\tlog           logrus.FieldLogger\n\tmtx           sync.Mutex\n\tsetupOnce     sync.Once\n}\n\n// newSessionV0 returns a v0 session: a partial port of the Rails/puma\n// implementation, with just enough functionality to support Workbench\n// and arv-mount.\nfunc newSessionV0(ws wsConn, sendq chan<- interface{}, db *sql.DB, pc permChecker, ac *arvados.Client) (session, error) {\n\tsess := &v0session{\n\t\tsendq:       sendq,\n\t\tws:          ws,\n\t\tdb:          db,\n\t\tac:          ac,\n\t\tpermChecker: pc,\n\t\tlog:         ctxlog.FromContext(ws.Request().Context()),\n\t}\n\n\terr := ws.Request().ParseForm()\n\tif err != nil {\n\t\tsess.log.WithError(err).Error(\"ParseForm failed\")\n\t\treturn nil, err\n\t}\n\ttoken := ws.Request().Form.Get(\"api_token\")\n\tsess.permChecker.SetToken(token)\n\tsess.log.WithField(\"token\", token).Debug(\"set token\")\n\n\treturn sess, nil\n}\n\nfunc (sess *v0session) Receive(buf []byte) error {\n\tvar sub v0subscribe\n\tif err := json.Unmarshal(buf, &sub); err != nil {\n\t\tsess.log.WithError(err).Info(\"invalid message from client\")\n\t} else if sub.Method == \"subscribe\" {\n\t\tsub.prepare(sess)\n\t\tsess.log.WithField(\"sub\", sub).Debug(\"sub prepared\")\n\t\tsess.sendq <- v0subscribeOK\n\t\tsess.mtx.Lock()\n\t\tsess.subscriptions = append(sess.subscriptions, sub)\n\t\tsess.mtx.Unlock()\n\t\tsub.sendOldEvents(sess)\n\t\treturn nil\n\t} else if sub.Method == \"unsubscribe\" {\n\t\tsess.mtx.Lock()\n\t\tfound := false\n\t\tfor i, s := range sess.subscriptions {\n\t\t\tif !reflect.DeepEqual(s.Filters, sub.Filters) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcopy(sess.subscriptions[i:], sess.subscriptions[i+1:])\n\t\t\tsess.subscriptions = sess.subscriptions[:len(sess.subscriptions)-1]\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t\tsess.mtx.Unlock()\n\t\tsess.log.WithField(\"sub\", sub).WithField(\"found\", found).Debug(\"unsubscribe\")\n\t\tif found {\n\t\t\tsess.sendq <- v0subscribeOK\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\tsess.log.WithField(\"Method\", sub.Method).Info(\"unknown method\")\n\t}\n\tsess.sendq <- v0subscribeFail\n\treturn nil\n}\n\nfunc (sess *v0session) EventMessage(e *event) ([]byte, error) {\n\tdetail := e.Detail()\n\tif detail == nil {\n\t\treturn nil, nil\n\t}\n\n\tvar permTarget string\n\tif detail.EventType == \"delete\" {\n\t\t// It's pointless to check permission by reading\n\t\t// ObjectUUID if it has just been deleted, but if the\n\t\t// client has permission on the parent project then\n\t\t// it's OK to send the event.\n\t\tpermTarget = detail.ObjectOwnerUUID\n\t} else {\n\t\tpermTarget = detail.ObjectUUID\n\t}\n\tok, err := sess.permChecker.Check(sess.ws.Request().Context(), permTarget)\n\tif err != nil || !ok {\n\t\treturn nil, err\n\t}\n\n\tkind, _ := sess.ac.KindForUUID(detail.ObjectUUID)\n\tmsg := map[string]interface{}{\n\t\t\"msgID\":             atomic.AddUint64(&sess.lastMsgID, 1),\n\t\t\"id\":                detail.ID,\n\t\t\"uuid\":              detail.UUID,\n\t\t\"object_uuid\":       detail.ObjectUUID,\n\t\t\"object_owner_uuid\": detail.ObjectOwnerUUID,\n\t\t\"object_kind\":       kind,\n\t\t\"event_type\":        detail.EventType,\n\t\t\"event_at\":          detail.EventAt,\n\t}\n\tif detail.Properties[\"text\"] != nil {\n\t\tmsg[\"properties\"] = detail.Properties\n\t} else {\n\t\tmsgProps := map[string]map[string]interface{}{}\n\t\tfor _, ak := range []string{\"old_attributes\", \"new_attributes\"} {\n\t\t\teventAttrs, ok := detail.Properties[ak].(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmsgAttrs := map[string]interface{}{}\n\t\t\tfor _, k := range sendObjectAttributes {\n\t\t\t\tif v, ok := eventAttrs[k]; ok {\n\t\t\t\t\tmsgAttrs[k] = v\n\t\t\t\t}\n\t\t\t}\n\t\t\tmsgProps[ak] = msgAttrs\n\t\t}\n\t\tmsg[\"properties\"] = msgProps\n\t}\n\treturn json.Marshal(msg)\n}\n\nfunc (sess *v0session) Filter(e *event) bool {\n\tsess.mtx.Lock()\n\tdefer sess.mtx.Unlock()\n\tfor _, sub := range sess.subscriptions {\n\t\tif sub.match(sess, e) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (sub *v0subscribe) sendOldEvents(sess *v0session) {\n\tif sub.LastLogID == 0 {\n\t\treturn\n\t}\n\tsess.log.WithField(\"LastLogID\", sub.LastLogID).Debug(\"sendOldEvents\")\n\t// Here we do a \"select id\" query and queue an event for every\n\t// log since the given ID, then use (*event)Detail() to\n\t// retrieve the whole row and decide whether to send it. This\n\t// approach is very inefficient if the subscriber asks for\n\t// last_log_id==1, even if the filters end up matching very\n\t// few events.\n\t//\n\t// To mitigate this, filter on \"created > 10 minutes ago\" when\n\t// retrieving the list of old event IDs to consider.\n\trows, err := sess.db.Query(\n\t\t`SELECT id FROM logs WHERE id > $1 AND created_at > $2 ORDER BY id`,\n\t\tsub.LastLogID,\n\t\ttime.Now().UTC().Add(-10*time.Minute).Format(time.RFC3339Nano))\n\tif err != nil {\n\t\tsess.log.WithError(err).Error(\"sendOldEvents db.Query failed\")\n\t\treturn\n\t}\n\n\tvar ids []int64\n\tfor rows.Next() {\n\t\tvar id int64\n\t\terr := rows.Scan(&id)\n\t\tif err != nil {\n\t\t\tsess.log.WithError(err).Error(\"sendOldEvents row Scan failed\")\n\t\t\tcontinue\n\t\t}\n\t\tids = append(ids, id)\n\t}\n\tif err := rows.Err(); err != nil {\n\t\tsess.log.WithError(err).Error(\"sendOldEvents db.Query failed\")\n\t}\n\trows.Close()\n\n\tfor _, id := range ids {\n\t\tfor len(sess.sendq)*2 > cap(sess.sendq) {\n\t\t\t// Ugly... but if we fill up the whole client\n\t\t\t// queue with a backlog of old events, a\n\t\t\t// single new event will overflow it and\n\t\t\t// terminate the connection, and then the\n\t\t\t// client will probably reconnect and do the\n\t\t\t// same thing all over again.\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tif sess.ws.Request().Context().Err() != nil {\n\t\t\t\t// Session terminated while we were sleeping\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tnow := time.Now()\n\t\te := &event{\n\t\t\tLogID:    id,\n\t\t\tReceived: now,\n\t\t\tReady:    now,\n\t\t\tDB:       sess.db,\n\t\t\tLogger:   sess.log,\n\t\t}\n\t\tif sub.match(sess, e) {\n\t\t\tselect {\n\t\t\tcase sess.sendq <- e:\n\t\t\tcase <-sess.ws.Request().Context().Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype v0subscribe struct {\n\tMethod    string\n\tFilters   []v0filter\n\tLastLogID int64 `json:\"last_log_id\"`\n\n\tfuncs []func(*event) bool\n}\n\ntype v0filter [3]interface{}\n\nfunc (sub *v0subscribe) match(sess *v0session, e *event) bool {\n\tlog := sess.log.WithField(\"LogID\", e.LogID)\n\tdetail := e.Detail()\n\tif detail == nil {\n\t\tlog.Error(\"match failed, no detail\")\n\t\treturn false\n\t}\n\tlog = log.WithField(\"funcs\", len(sub.funcs))\n\tfor i, f := range sub.funcs {\n\t\tif !f(e) {\n\t\t\tlog.WithField(\"func\", i).Debug(\"match failed\")\n\t\t\treturn false\n\t\t}\n\t}\n\tlog.Debug(\"match passed\")\n\treturn true\n}\n\nfunc (sub *v0subscribe) prepare(sess *v0session) {\n\tfor _, f := range sub.Filters {\n\t\tif len(f) != 3 {\n\t\t\tcontinue\n\t\t}\n\t\tif col, ok := f[0].(string); ok && col == \"event_type\" {\n\t\t\top, ok := f[1].(string)\n\t\t\tif !ok || op != \"in\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tarr, ok := f[2].([]interface{})\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar strs []string\n\t\t\tfor _, s := range arr {\n\t\t\t\tif s, ok := s.(string); ok {\n\t\t\t\t\tstrs = append(strs, s)\n\t\t\t\t}\n\t\t\t}\n\t\t\tsub.funcs = append(sub.funcs, func(e *event) bool {\n\t\t\t\tfor _, s := range strs {\n\t\t\t\t\tif s == e.Detail().EventType {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t})\n\t\t} else if ok && col == \"created_at\" {\n\t\t\top, ok := f[1].(string)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttstr, ok := f[2].(string)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt, err := time.Parse(time.RFC3339Nano, tstr)\n\t\t\tif err != nil {\n\t\t\t\tsess.log.WithField(\"data\", tstr).WithError(err).Info(\"time.Parse failed\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar fn func(*event) bool\n\t\t\tswitch op {\n\t\t\tcase \">=\":\n\t\t\t\tfn = func(e *event) bool {\n\t\t\t\t\treturn !e.Detail().CreatedAt.Before(t)\n\t\t\t\t}\n\t\t\tcase \"<=\":\n\t\t\t\tfn = func(e *event) bool {\n\t\t\t\t\treturn !e.Detail().CreatedAt.After(t)\n\t\t\t\t}\n\t\t\tcase \">\":\n\t\t\t\tfn = func(e *event) bool {\n\t\t\t\t\treturn e.Detail().CreatedAt.After(t)\n\t\t\t\t}\n\t\t\tcase \"<\":\n\t\t\t\tfn = func(e *event) bool {\n\t\t\t\t\treturn e.Detail().CreatedAt.Before(t)\n\t\t\t\t}\n\t\t\tcase \"=\":\n\t\t\t\tfn = func(e *event) bool {\n\t\t\t\t\treturn e.Detail().CreatedAt.Equal(t)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tsess.log.WithField(\"operator\", op).Info(\"bogus operator\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsub.funcs = append(sub.funcs, fn)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "services/ws/session_v0_test.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/rand\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n\t\"git.arvados.org/arvados.git/sdk/go/arvadostest\"\n\t\"git.arvados.org/arvados.git/sdk/go/ctxlog\"\n\t\"golang.org/x/net/websocket\"\n\tcheck \"gopkg.in/check.v1\"\n)\n\nfunc init() {\n\tif os.Getenv(\"ARVADOS_DEBUG\") != \"\" {\n\t\tctxlog.SetLevel(\"debug\")\n\t}\n}\n\nvar _ = check.Suite(&v0Suite{})\n\ntype v0Suite struct {\n\tserviceSuite serviceSuite\n\ttoken        string\n\ttoDelete     []string\n\twg           sync.WaitGroup\n\tignoreLogID  int64\n}\n\nfunc (s *v0Suite) SetUpTest(c *check.C) {\n\ts.serviceSuite.SetUpTest(c)\n\ts.serviceSuite.start(c)\n\n\ts.token = arvadostest.ActiveToken\n\ts.ignoreLogID = s.lastLogID(c)\n}\n\nfunc (s *v0Suite) TearDownTest(c *check.C) {\n\ts.wg.Wait()\n\ts.serviceSuite.TearDownTest(c)\n}\n\nfunc (s *v0Suite) TearDownSuite(c *check.C) {\n\ts.deleteTestObjects(c)\n}\n\nfunc (s *v0Suite) deleteTestObjects(c *check.C) {\n\tac := arvados.NewClientFromEnv()\n\tac.AuthToken = arvadostest.AdminToken\n\tfor _, path := range s.toDelete {\n\t\terr := ac.RequestAndDecode(nil, \"DELETE\", path, nil, nil)\n\t\tc.Check(err, check.IsNil)\n\t}\n\ts.toDelete = nil\n}\n\nfunc (s *v0Suite) TestFilters(c *check.C) {\n\tconn, r, w, err := s.testClient()\n\tc.Assert(err, check.IsNil)\n\tdefer conn.Close()\n\n\tcmd := func(method, eventType string, status int) {\n\t\tc.Check(w.Encode(map[string]interface{}{\n\t\t\t\"method\":  method,\n\t\t\t\"filters\": [][]interface{}{{\"event_type\", \"in\", []string{eventType}}},\n\t\t}), check.IsNil)\n\t\ts.expectStatus(c, r, status)\n\t}\n\tcmd(\"subscribe\", \"update\", 200)\n\tcmd(\"subscribe\", \"update\", 200)\n\tcmd(\"subscribe\", \"create\", 200)\n\tcmd(\"subscribe\", \"update\", 200)\n\tcmd(\"unsubscribe\", \"blip\", 400)\n\tcmd(\"unsubscribe\", \"create\", 200)\n\tcmd(\"unsubscribe\", \"update\", 200)\n\n\tgo s.emitEvents(c, nil, nil)\n\tlg := s.expectLog(c, r)\n\tc.Check(lg.EventType, check.Equals, \"update\")\n\n\tcmd(\"unsubscribe\", \"update\", 200)\n\tcmd(\"unsubscribe\", \"update\", 200)\n\tcmd(\"unsubscribe\", \"update\", 400)\n}\n\nfunc (s *v0Suite) TestLastLogID(c *check.C) {\n\tlastID := s.lastLogID(c)\n\n\tcheckLogs := func(r *json.Decoder, uuid string) {\n\t\tfor _, etype := range []string{\"create\", \"blip\", \"update\"} {\n\t\t\tlg := s.expectLog(c, r)\n\t\t\tfor lg.ObjectUUID != uuid {\n\t\t\t\tlg = s.expectLog(c, r)\n\t\t\t}\n\t\t\tc.Check(lg.EventType, check.Equals, etype)\n\t\t}\n\t}\n\n\t// Connecting connEarly (before sending the early events) lets\n\t// us confirm all of the \"early\" events have already passed\n\t// through the server.\n\tconnEarly, rEarly, wEarly, err := s.testClient()\n\tc.Assert(err, check.IsNil)\n\tdefer connEarly.Close()\n\tc.Check(wEarly.Encode(map[string]interface{}{\n\t\t\"method\": \"subscribe\",\n\t}), check.IsNil)\n\ts.expectStatus(c, rEarly, 200)\n\n\t// Send the early events.\n\tuuidChan := make(chan string, 1)\n\ts.emitEvents(c, uuidChan, nil)\n\tuuidEarly := <-uuidChan\n\n\t// Wait for the early events to pass through.\n\tcheckLogs(rEarly, uuidEarly)\n\n\t// Connect the client that wants to get old events via\n\t// last_log_id.\n\tconn, r, w, err := s.testClient()\n\tc.Assert(err, check.IsNil)\n\tdefer conn.Close()\n\n\tc.Check(w.Encode(map[string]interface{}{\n\t\t\"method\":      \"subscribe\",\n\t\t\"last_log_id\": lastID,\n\t}), check.IsNil)\n\ts.expectStatus(c, r, 200)\n\n\tcheckLogs(r, uuidEarly)\n\ts.emitEvents(c, uuidChan, nil)\n\tcheckLogs(r, <-uuidChan)\n}\n\nfunc (s *v0Suite) TestPermission(c *check.C) {\n\tconn, r, w, err := s.testClient()\n\tc.Assert(err, check.IsNil)\n\tdefer conn.Close()\n\n\tc.Check(w.Encode(map[string]interface{}{\n\t\t\"method\": \"subscribe\",\n\t}), check.IsNil)\n\ts.expectStatus(c, r, 200)\n\n\tuuidChan := make(chan string, 2)\n\tgo func() {\n\t\ts.token = arvadostest.AdminToken\n\t\ts.emitEvents(c, uuidChan, nil)\n\t\ts.token = arvadostest.ActiveToken\n\t\ts.emitEvents(c, uuidChan, nil)\n\t}()\n\n\twrongUUID := <-uuidChan\n\trightUUID := <-uuidChan\n\tlg := s.expectLog(c, r)\n\tfor lg.ObjectUUID != rightUUID {\n\t\tc.Check(lg.ObjectUUID, check.Not(check.Equals), wrongUUID)\n\t\tlg = s.expectLog(c, r)\n\t}\n}\n\n// Two users create private objects; admin deletes both objects; each\n// user receives a \"delete\" event for their own object (not for the\n// other user's object).\nfunc (s *v0Suite) TestEventTypeDelete(c *check.C) {\n\tclients := []struct {\n\t\ttoken string\n\t\tuuid  string\n\t\tconn  *websocket.Conn\n\t\tr     *json.Decoder\n\t\tw     *json.Encoder\n\t}{{token: arvadostest.ActiveToken}, {token: arvadostest.SpectatorToken}}\n\tfor i := range clients {\n\t\tuuidChan := make(chan string, 1)\n\t\ts.token = clients[i].token\n\t\ts.emitEvents(c, uuidChan, nil)\n\t\tclients[i].uuid = <-uuidChan\n\n\t\tvar err error\n\t\tclients[i].conn, clients[i].r, clients[i].w, err = s.testClient()\n\t\tc.Assert(err, check.IsNil)\n\n\t\tc.Check(clients[i].w.Encode(map[string]interface{}{\n\t\t\t\"method\": \"subscribe\",\n\t\t}), check.IsNil)\n\t\ts.expectStatus(c, clients[i].r, 200)\n\t}\n\n\ts.ignoreLogID = s.lastLogID(c)\n\ts.deleteTestObjects(c)\n\n\tfor _, client := range clients {\n\t\tlg := s.expectLog(c, client.r)\n\t\tc.Check(lg.ObjectUUID, check.Equals, client.uuid)\n\t\tc.Check(lg.EventType, check.Equals, \"delete\")\n\t}\n}\n\nfunc (s *v0Suite) TestEventPropertiesFields(c *check.C) {\n\tac := arvados.NewClientFromEnv()\n\tac.AuthToken = s.token\n\n\tconn, r, w, err := s.testClient()\n\tc.Assert(err, check.IsNil)\n\tdefer conn.Close()\n\n\tc.Check(w.Encode(map[string]interface{}{\n\t\t\"method\":  \"subscribe\",\n\t\t\"filters\": [][]string{{\"object_uuid\", \"=\", arvadostest.RunningContainerUUID}},\n\t}), check.IsNil)\n\ts.expectStatus(c, r, 200)\n\n\terr = ac.RequestAndDecode(nil, \"POST\", \"arvados/v1/logs\", s.jsonBody(\"log\", map[string]interface{}{\n\t\t\"object_uuid\": arvadostest.RunningContainerUUID,\n\t\t\"event_type\":  \"update\",\n\t\t\"properties\": map[string]interface{}{\n\t\t\t\"new_attributes\": map[string]interface{}{\n\t\t\t\t\"name\":                      \"namevalue\",\n\t\t\t\t\"requesting_container_uuid\": \"uuidvalue\",\n\t\t\t\t\"state\":                     \"statevalue\",\n\t\t\t},\n\t\t},\n\t}), nil)\n\tc.Assert(err, check.IsNil)\n\n\tlg := s.expectLog(c, r)\n\tc.Check(lg.ObjectUUID, check.Equals, arvadostest.RunningContainerUUID)\n\tc.Check(lg.EventType, check.Equals, \"update\")\n\tc.Check(lg.Properties[\"new_attributes\"].(map[string]interface{})[\"requesting_container_uuid\"], check.Equals, \"uuidvalue\")\n\tc.Check(lg.Properties[\"new_attributes\"].(map[string]interface{})[\"name\"], check.Equals, \"namevalue\")\n\tc.Check(lg.Properties[\"new_attributes\"].(map[string]interface{})[\"state\"], check.Equals, \"statevalue\")\n}\n\n// Trashing/deleting a collection produces an \"update\" event with\n// properties[\"new_attributes\"][\"is_trashed\"] == true.\nfunc (s *v0Suite) TestTrashedCollection(c *check.C) {\n\tac := arvados.NewClientFromEnv()\n\tac.AuthToken = s.token\n\n\tvar coll arvados.Collection\n\terr := ac.RequestAndDecode(&coll, \"POST\", \"arvados/v1/collections\", s.jsonBody(\"collection\", `{\"manifest_text\":\"\"}`), map[string]interface{}{\"ensure_unique_name\": true})\n\tc.Assert(err, check.IsNil)\n\ts.ignoreLogID = s.lastLogID(c)\n\n\tconn, r, w, err := s.testClient()\n\tc.Assert(err, check.IsNil)\n\tdefer conn.Close()\n\n\tc.Check(w.Encode(map[string]interface{}{\n\t\t\"method\": \"subscribe\",\n\t}), check.IsNil)\n\ts.expectStatus(c, r, 200)\n\n\terr = ac.RequestAndDecode(nil, \"DELETE\", \"arvados/v1/collections/\"+coll.UUID, nil, nil)\n\tc.Assert(err, check.IsNil)\n\n\tlg := s.expectLog(c, r)\n\tc.Check(lg.ObjectUUID, check.Equals, coll.UUID)\n\tc.Check(lg.EventType, check.Equals, \"update\")\n\tc.Check(lg.Properties[\"old_attributes\"].(map[string]interface{})[\"is_trashed\"], check.Equals, false)\n\tc.Check(lg.Properties[\"new_attributes\"].(map[string]interface{})[\"is_trashed\"], check.Equals, true)\n}\n\nfunc (s *v0Suite) TestSendBadJSON(c *check.C) {\n\tconn, r, w, err := s.testClient()\n\tc.Assert(err, check.IsNil)\n\tdefer conn.Close()\n\n\tc.Check(w.Encode(map[string]interface{}{\n\t\t\"method\": \"subscribe\",\n\t}), check.IsNil)\n\ts.expectStatus(c, r, 200)\n\n\t_, err = fmt.Fprint(conn, \"^]beep\\n\")\n\tc.Check(err, check.IsNil)\n\ts.expectStatus(c, r, 400)\n\n\tc.Check(w.Encode(map[string]interface{}{\n\t\t\"method\": \"subscribe\",\n\t}), check.IsNil)\n\ts.expectStatus(c, r, 200)\n}\n\nfunc (s *v0Suite) TestSubscribe(c *check.C) {\n\tconn, r, w, err := s.testClient()\n\tc.Assert(err, check.IsNil)\n\tdefer conn.Close()\n\n\ts.emitEvents(c, nil, nil)\n\n\terr = w.Encode(map[string]interface{}{\"21\": 12})\n\tc.Check(err, check.IsNil)\n\ts.expectStatus(c, r, 400)\n\n\terr = w.Encode(map[string]interface{}{\"method\": \"subscribe\", \"filters\": []string{}})\n\tc.Check(err, check.IsNil)\n\ts.expectStatus(c, r, 200)\n\n\tuuidChan := make(chan string, 1)\n\tgo s.emitEvents(c, uuidChan, nil)\n\tuuid := <-uuidChan\n\n\tfor _, etype := range []string{\"create\", \"blip\", \"update\"} {\n\t\tlg := s.expectLog(c, r)\n\t\tfor lg.ObjectUUID != uuid {\n\t\t\tlg = s.expectLog(c, r)\n\t\t}\n\t\tc.Check(lg.EventType, check.Equals, etype)\n\t}\n}\n\nfunc (s *v0Suite) TestManyEventsAndSubscribers(c *check.C) {\n\t// Frequent slow listener pings create the conditions for a\n\t// deadlock issue with the lib/pq example listener usage.\n\t//\n\t// Specifically: a lib/pq/example/listen-style event loop can\n\t// deadlock if enough (~32) server notifications arrive after\n\t// the event loop decides to call Ping (e.g., while\n\t// listener.Ping() is waiting for a response from the server,\n\t// or in the time.Sleep() invoked by testSlowPing).\n\t//\n\t// (*ListenerConn)listenerConnLoop() doesn't see the server's\n\t// ping response until it finishes sending a previous\n\t// notification through its internal queue to\n\t// (*Listener)listenerConnLoop(), which is blocked on sending\n\t// to our Notify channel, which is blocked on waiting for the\n\t// Ping response.\n\tdefer func(d time.Duration) {\n\t\tlistenerPingInterval = d\n\t\ttestSlowPing = false\n\t}(listenerPingInterval)\n\tlistenerPingInterval = time.Second / 2\n\ttestSlowPing = true\n\t// Restart the test server in order to get one that uses our\n\t// test globals.\n\ts.TearDownTest(c)\n\ts.SetUpTest(c)\n\n\tdone := make(chan struct{})\n\tdefer close(done)\n\tgo s.emitEvents(c, nil, done)\n\n\t// We will expect to receive at least one event during each\n\t// one-second interval while the test is running.\n\tt0 := time.Now()\n\tseconds := 10\n\treceivedPerSecond := make([]int64, seconds)\n\n\tctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Duration(seconds)*time.Second))\n\tdefer cancel()\n\tfor clientID := 0; clientID < 100; clientID++ {\n\t\tclientID := clientID\n\t\tgo func() {\n\t\t\tfor ctx.Err() == nil {\n\t\t\t\tconn, r, w, err := s.testClient()\n\t\t\t\tif ctx.Err() != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tc.Assert(err, check.IsNil)\n\t\t\t\tdefer conn.Close()\n\t\t\t\terr = w.Encode(map[string]interface{}{\"method\": \"subscribe\", \"filters\": []string{}})\n\t\t\t\tif ctx.Err() != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\ts.expectStatus(c, r, 200)\n\t\t\t\tfor {\n\t\t\t\t\tif clientID%10 == 0 {\n\t\t\t\t\t\t// slow client\n\t\t\t\t\t\ttime.Sleep(time.Second / 20)\n\t\t\t\t\t} else if rand.Float64() < 0.01 {\n\t\t\t\t\t\t// disconnect+reconnect\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tvar lg arvados.Log\n\t\t\t\t\terr := r.Decode(&lg)\n\t\t\t\t\tif ctx.Err() != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tc.Check(err, check.IsNil)\n\t\t\t\t\tif i := int(time.Since(t0) / time.Second); i < seconds {\n\t\t\t\t\t\tatomic.AddInt64(&receivedPerSecond[i], 1)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tconn.Close()\n\t\t\t}\n\t\t}()\n\t}\n\t<-ctx.Done()\n\tc.Log(\"done\")\n\tfor i, n := range receivedPerSecond {\n\t\tc.Logf(\"t<%d n=%d\", i+1, n)\n\t\tc.Check(int64(n), check.Not(check.Equals), int64(0))\n\t}\n}\n\n// Generate some events by creating and updating a workflow object,\n// and creating a custom log entry (event_type=\"blip\") about the newly\n// created workflow.\n//\n// If uuidChan is not nil, send the new workflow UUID to uuidChan as\n// soon as it's known.\n//\n// If done is not nil, keep generating events until done receives or\n// closes.\nfunc (s *v0Suite) emitEvents(c *check.C, uuidChan chan<- string, done <-chan struct{}) {\n\ts.wg.Add(1)\n\tdefer s.wg.Done()\n\n\tac := arvados.NewClientFromEnv()\n\tac.AuthToken = s.token\n\twf := &arvados.Workflow{\n\t\tName: \"ws_test\",\n\t}\n\terr := ac.RequestAndDecode(wf, \"POST\", \"arvados/v1/workflows\", s.jsonBody(\"workflow\", `{\"name\":\"ws_test\"}`), map[string]interface{}{\"ensure_unique_name\": true})\n\tc.Assert(err, check.IsNil)\n\ts.toDelete = append(s.toDelete, \"arvados/v1/workflows/\"+wf.UUID)\n\tif uuidChan != nil {\n\t\tuuidChan <- wf.UUID\n\t}\n\tfor i := 0; ; i++ {\n\t\tlg := &arvados.Log{}\n\t\terr = ac.RequestAndDecode(lg, \"POST\", \"arvados/v1/logs\", s.jsonBody(\"log\", map[string]interface{}{\n\t\t\t\"object_uuid\": wf.UUID,\n\t\t\t\"event_type\":  \"blip\",\n\t\t\t\"properties\": map[string]interface{}{\n\t\t\t\t\"beep\": \"boop\",\n\t\t\t},\n\t\t}), nil)\n\t\ts.toDelete = append(s.toDelete, \"arvados/v1/logs/\"+lg.UUID)\n\t\tif done != nil {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\tdefault:\n\t\t\t\tif i%50 == 0 {\n\t\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tbreak\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = ac.RequestAndDecode(wf, \"PUT\", \"arvados/v1/workflows/\"+wf.UUID, s.jsonBody(\"workflow\", `{\"name\":\"ws_test\"}`), nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (s *v0Suite) jsonBody(rscName string, ob interface{}) io.Reader {\n\tval, ok := ob.(string)\n\tif !ok {\n\t\tj, err := json.Marshal(ob)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tval = string(j)\n\t}\n\tv := url.Values{}\n\tv[rscName] = []string{val}\n\treturn bytes.NewBufferString(v.Encode())\n}\n\nfunc (s *v0Suite) expectStatus(c *check.C, r *json.Decoder, status int) {\n\tmsg := map[string]interface{}{}\n\tc.Check(r.Decode(&msg), check.IsNil)\n\tc.Check(int(msg[\"status\"].(float64)), check.Equals, status)\n}\n\nfunc (s *v0Suite) expectLog(c *check.C, r *json.Decoder) *arvados.Log {\n\tlg := &arvados.Log{}\n\tok := make(chan struct{})\n\tgo func() {\n\t\tdefer close(ok)\n\t\tfor lg.ID <= s.ignoreLogID {\n\t\t\tc.Assert(r.Decode(lg), check.IsNil)\n\t\t}\n\t}()\n\tselect {\n\tcase <-time.After(10 * time.Second):\n\t\tc.Error(\"timed out\")\n\t\tc.FailNow()\n\t\treturn lg\n\tcase <-ok:\n\t\treturn lg\n\t}\n}\n\nfunc (s *v0Suite) testClient() (*websocket.Conn, *json.Decoder, *json.Encoder, error) {\n\tsrv := s.serviceSuite.srv\n\tconn, err := websocket.Dial(strings.Replace(srv.URL, \"http\", \"ws\", 1)+\"/websocket?api_token=\"+s.token, \"\", srv.URL)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tw := json.NewEncoder(conn)\n\tr := json.NewDecoder(conn)\n\treturn conn, r, w, nil\n}\n\nfunc (s *v0Suite) lastLogID(c *check.C) int64 {\n\tvar lastID int64\n\tc.Assert(testDB().QueryRow(`SELECT MAX(id) FROM logs`).Scan(&lastID), check.IsNil)\n\treturn lastID\n}\n"
  },
  {
    "path": "services/ws/session_v1.go",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\npackage ws\n\nimport (\n\t\"database/sql\"\n\t\"errors\"\n\n\t\"git.arvados.org/arvados.git/sdk/go/arvados\"\n)\n\n// newSessionV1 returns a v1 session -- see\n// https://dev.arvados.org/projects/arvados/wiki/Websocket_server\nfunc newSessionV1(ws wsConn, sendq chan<- interface{}, db *sql.DB, pc permChecker, ac *arvados.Client) (session, error) {\n\treturn nil, errors.New(\"Not implemented\")\n}\n"
  },
  {
    "path": "tools/ansible/README.md",
    "content": "# Arvados Ansible Playbooks\n\n<!--\nCopyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: Apache-2.0\n-->\n\nThis directory includes Ansible playbooks and supporting infrastructure to automate various aspects of Arvados deployment.\n\n## Installing Ansible\n\n### Install with pipx\n\nInstalling with pipx is the recommended method: it automatically manages a virtualenv for you and adds installed tools to your `$PATH`. Install `pipx` from your distribution, then run:\n\n      ./install-ansible.sh\n\n### Install to your own virtualenv\n\nIf you need to keep this Ansible install isolated, you can install it to a virtualenv you set up. You'll need to activate this virtualenv when you want to run Arvados Ansible playbooks.\n\nMake sure you have Python and its standard `venv` module installed from your distribution. You should be able to run `python3 -m venv --help`. (On Debian/Ubuntu, `apt install python3-venv`.) Then run:\n\n      ./install-ansible.sh VENV_DIR\n\n`VENV_DIR` can be any path you like. If you already have a virtualenv activated, you can install inside it by running:\n\n      ./install-ansible.sh -V\n\n### Manual installation\n\nIf you need to orchestrate your own install, you must install the Python packages listed in `requirements.txt`, then the Ansible collections listed in `requirements.yml`.\n"
  },
  {
    "path": "tools/ansible/build-compute-image.yml",
    "content": "#!/usr/bin/env ansible-playbook\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Bootstrap node\n  hosts: default\n  gather_facts: no\n  tasks:\n    - name: Load Arvados configuration file\n      no_log: yes\n      delegate_to: localhost\n      ansible.builtin.include_vars:\n        name: arvados_config\n        file: \"{{ arvados_config_file }}\"\n    - name: Load Arvados cluster configuration\n      no_log: yes\n      ansible.builtin.set_fact:\n        arvados_cluster: \"{{ arvados_config.Clusters[arvados_cluster_id] }}\"\n      failed_when: arvados_cluster is undefined\n    - name: Get Crunch dispatch public key\n      no_log: yes\n      when: arvados_cluster.Containers.DispatchPrivateKey is defined and arvados_cluster.Containers.DispatchPrivateKey is truthy\n      delegate_to: localhost\n      block:\n        # `ssh-keygen` supports reading stdin for some operations with `-f -`,\n        # but `-y` is not one of those operations as of April 2025.\n        # We MUST have the dispatch private key in a file with correct\n        # permissions for `ssh-keygen -y -f` to read.\n        - name: Prepare tempfile for dispatch private key\n          ansible.builtin.tempfile:\n            suffix: \".key\"\n          register: key_tempfile\n\n        # Try to parse DispatchPrivateKey as a URL.\n        # If it's recognized as a file: URL, copy that path to the tempfile.\n        # Otherwise, expect it's the private key,\n        # and write that content directly to the tempfile.\n        - name: Save dispatch private key to tempfile\n          vars:\n            key_url: \"{{ arvados_cluster.Containers.DispatchPrivateKey | urlsplit }}\"\n          ansible.builtin.copy:\n            src: \"{{ key_url.path if key_url.scheme == 'file' else omit }}\"\n            content: \"{{ arvados_cluster.Containers.DispatchPrivateKey|regex_replace('\\\\n?$', '\\\\n') if key_url.scheme != 'file' else omit }}\"\n            dest: \"{{ key_tempfile.path }}\"\n            mode: 0600\n\n        - name: Derive dispatch public key\n          ansible.builtin.command:\n            argv:\n              - ssh-keygen\n              - \"-y\"\n              - \"-f\"\n              - \"{{ key_tempfile.path }}\"\n          register: compute_dispatch_ssh_keygen\n\n      always:\n        - name: Remove dispatch private key tempfile\n          when: key_tempfile is defined\n          ansible.builtin.file:\n            path: \"{{ key_tempfile.path }}\"\n            state: absent\n\n    - ansible.builtin.include_role:\n        name: distro_bootstrap\n\n- name: Set up compute node base distribution\n  # `default` is the name that the Packer Ansible plugin assigns to the\n  # instance used to create the image.\n  hosts: default\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_apt\n    - name: List linux-image packages pre-upgrade\n      ansible.builtin.shell:\n        cmd: |\n          dpkg-query --list \"linux-image-[1-9]*-$(dpkg --print-architecture)\" |\n          awk '($1 ~ /^[irp][HUFWti]$/) { print $2; }'\n      register: linux_image_preupgrade\n    - name: apt update if needed\n      ansible.builtin.meta: flush_handlers\n    - name: Upgrade packages\n      become: yes\n      ansible.builtin.apt:\n        upgrade: true\n    - name: Remove unwanted packages\n      become: yes\n      ansible.builtin.apt:\n        state: absent\n        purge: true\n        name:\n          - unattended-upgrades\n    - name: List linux-image packages post-upgrade\n      ansible.builtin.shell:\n        cmd: |\n          dpkg-query --list \"linux-image-[1-9]*-$(dpkg --print-architecture)\" |\n          awk '($1 ~ /^[irp][HUFWti]$/) { print $2; }'\n      register: linux_image_postupgrade\n    # Rebooting now accomplishes a few things: it means we can remove the old\n    # linux-image afterward, and the `ansible_kernel` fact will reflect what\n    # the image will boot into when used.\n    - name: Reboot into new kernel\n      when: \"linux_image_preupgrade.stdout != linux_image_postupgrade.stdout\"\n      become: yes\n      ansible.builtin.reboot: {}\n    - name: Remove old kernel(s)\n      when: \"linux_image_preupgrade.stdout != linux_image_postupgrade.stdout\"\n      become: yes\n      ansible.builtin.apt:\n        state: absent\n        purge: true\n        name: \"{{ linux_image_preupgrade.stdout_lines }}\"\n\n- name: Install compute node software\n  hosts: default\n  tasks:\n    - ansible.builtin.include_role:\n        name: compute_encrypt_tmp\n    - ansible.builtin.include_role:\n        name: arvados_compute\n    - ansible.builtin.include_role:\n        name: compute_docker\n      when: \"arvados_cluster.Containers.RuntimeEngine == 'docker'\"\n      vars:\n        arvados_docker_data_root: /tmp/docker-data\n    - ansible.builtin.include_role:\n        name: compute_user\n\n    - name: Configure DNS\n      become: yes\n      ansible.builtin.lineinfile:\n        path: /etc/dhcp/dhclient.conf\n        regexp: \"^[# ]*prepend +domain-name-servers \"\n        line: \"prepend domain-name-servers {{ dns_resolver }};\"\n      when: dns_resolver is defined\n\n    - name: Clean apt packages\n      become: yes\n      ansible.builtin.apt:\n        autoremove: true\n        clean: true\n"
  },
  {
    "path": "tools/ansible/build-debian-nspawn-vm.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# build-debian-nspawn-vm.yml - Ansible playbook to build a new Debian/Ubuntu\n# systemd-nspawn VM from scratch\n#\n# Run this playbook on a host with systemd-nspawn installed, and it will create\n# a minimal Debian/Ubuntu system with networking, SSH, and a user account with\n# full sudo access. This is enough that you can start the VM and run more\n# Ansible playbooks on it.\n#\n# The VM expects to work with a private network. It expects the host to provide\n# a DHCP lease (e.g., the host is running systemd-networkd) and forward IP.\n#\n# You MUST set the following variables to run this playbook:\n#\n# * `image_name`: The name of the image to create. This must be a valid DNS\n#   component. Note a container by this name will be started while the playbook\n#   is running.\n#\n# * `image_authorized_keys`: SSH public key string or URL.\n#\n# Other interesting variables you MAY set include:\n#\n# * `debootstrap_suite`: The codename of the Debian/Ubuntu release to install,\n#   like 'bookworm' or 'noble'. The default is Debian stable.\n#\n# * `debootstrap_mirror`: The URL of the Debian/Ubuntu mirror to install from.\n#   You MUST set this to an Ubuntu mirror if you want to install Ubuntu.\n#\n# * `image_username`, `image_passhash`, `image_gecos`, `image_shell`: These all\n#   define parameters for the user account created inside the VM. For details\n#   about how to generate `image_passhash`, see\n# <https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#how-do-i-generate-encrypted-passwords-for-the-user-module>\n\n- name: Bootstrap image\n  hosts: localhost\n  vars:\n    image_path: \"/var/lib/machines/{{ image_name }}\"\n    debootstrap_suite: stable\n    debootstrap_mirror: \"http://deb.debian.org/debian\"\n    debootstrap_script: \"{{ 'gutsy' if debootstrap_mirror is search('\\\\bubuntu\\\\b') else 'sid' }}\"\n  tasks:\n    - name: debootstrap\n      become: yes\n      ansible.builtin.command:\n        argv:\n          - debootstrap\n          - --include=dbus,openssh-server,python3,sudo,systemd\n          - \"{{ debootstrap_suite }}\"\n          - \"{{ image_path }}\"\n          - \"{{ debootstrap_mirror }}\"\n          - \"{{ debootstrap_script }}\"\n        creates: \"{{ (image_path, 'etc/os-release')|path_join }}\"\n    - name: Set up authorized SSH keys for root\n      become: yes\n      ansible.posix.authorized_key:\n        user: root\n        path: \"{{ (image_path, 'root/.ssh/authorized_keys')|path_join }}\"\n        key: \"{{ image_authorized_keys }}\"\n\n- name: Start VM and add host\n  hosts: localhost\n  vars:\n    image_interface: host0\n  tasks:\n    # We want to start the VM as early as possible because it's easier to\n    # manage the system state when we know it's running. We restart the VM\n    # to ensure it's running from the image we just built.\n    - name: Start VM\n      become: yes\n      ansible.builtin.systemd_service:\n        name: \"systemd-nspawn@{{ image_name }}.service\"\n        state: restarted\n\n    - name: Enable networking and sshd\n      become: yes\n      ansible.builtin.command:\n        argv:\n          - systemctl\n          - \"--machine={{ image_name }}\"\n          - enable\n          - --now\n          - ssh\n          - systemd-networkd\n      register: nspawn_enable\n      # Retry if we tried the command faster than the VM could start dbus.\n      until: \"nspawn_enable.stderr is not search('^Failed to connect to bus:', multiline=true)\"\n      retries: 15\n      delay: 1\n    - name: Wait for VM network\n      become: yes\n      ansible.builtin.command:\n        argv:\n          - systemd-run\n          - \"--machine={{ image_name }}\"\n          - --wait\n          - /usr/lib/systemd/systemd-networkd-wait-online\n          - \"--interface={{ image_interface }}\"\n          - --timeout=60\n    - name: Get VM network address\n      become: yes\n      ansible.builtin.command:\n        argv:\n          - systemd-run\n          - \"--machine={{ image_name }}\"\n          - --pipe\n          - networkctl\n          - status\n          - --json=short\n          - \"{{ image_interface }}\"\n      register: nspawn_netctl\n    - name: Add VM Ansible host\n      vars:\n        vm_addr: \"{{ (nspawn_netctl.stdout|from_json).Addresses|selectattr('ScopeString', '==', 'global')|first }}\"\n      ansible.builtin.add_host:\n        name: nspawn_vm\n        ansible_host: \"{{ vm_addr.Address|join('.' if vm_addr.Family == 2 else ':') }}\"\n        ansible_user: root\n\n- name: Set up VM user with sudo\n  hosts: nspawn_vm\n  vars:\n    image_username: admin\n    image_passhash: \"!\"\n    image_gecos: \"\"\n    image_shell: /usr/bin/bash\n  tasks:\n    - name: Create user account\n      ansible.builtin.user:\n        name: \"{{ image_username }}\"\n        password: \"{{ image_passhash }}\"\n        comment: \"{{ image_gecos }}\"\n        shell: \"{{ image_shell }}\"\n        groups:\n          - sudo\n        append: yes\n    - name: Set up authorized SSH keys for user\n      ansible.posix.authorized_key:\n        user: \"{{ image_username }}\"\n        key: \"{{ image_authorized_keys }}\"\n    - name: Clean up authorized SSH keys for root\n      ansible.posix.authorized_key:\n        user: root\n        key: \"{{ image_authorized_keys }}\"\n        state: absent\n\n- name: Stop VM\n  hosts: localhost\n  tasks:\n    - name: Stop VM\n      become: yes\n      ansible.builtin.systemd_service:\n        name: \"systemd-nspawn@{{ image_name }}.service\"\n        state: stopped\n"
  },
  {
    "path": "tools/ansible/build-docker-image.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# build-docker-image.yml - Build a Docker image from another playbook\n#\n# Typical usage looks like:\n#\n#   ansible-playbook -i files/development-docker-images.yml \\\n#     -e arvados_build_playbook=FILENAME.yml \\\n#     [--limit=...] build-docker-image.yml\n#\n# `arvados_build_playbook` is the name of the playbook that turns a base image\n# into a new image. The inventory defines container names, base images,\n# and built image tags.\n\n- name: Start container(s)\n  hosts: all\n  gather_facts: no\n  tasks:\n    - name: Start container\n      delegate_to: localhost\n      community.docker.docker_container:\n        name: \"{{ inventory_hostname }}\"\n        state: \"{{ arvados_docker_startstate|default('healthy') }}\"\n        image: \"{{ arvados_docker_from }}\"\n        pull: \"{{ arvados_docker_pull|default('missing') }}\"\n        command: \"{{ arvados_docker_command|default(['sleep', 'infinity']) }}\"\n\n- name: Run playbook\n  ansible.builtin.import_playbook: \"{{ arvados_build_playbook }}\"\n\n- name: Commit image(s)\n  hosts: all\n  tasks:\n    - name: Commit container\n      delegate_to: localhost\n      vars:\n        argv_start:\n          - docker\n          - container\n          - commit\n          - \"--author={{ arvados_docker_author|default('Arvados Package Maintainers <packaging@arvados.org>') }}\"\n          - '--change=CMD [\"bash\"]'\n        # `map` just adds the option flag to the start of each string.\n        argv_changes: \"{{ arvados_docker_changes|default([])|map('replace', '', '--change=', 1)|list }}\"\n        argv_args:\n          - \"{{ inventory_hostname }}\"\n          - \"{{ arvados_docker_tag }}\"\n      ansible.builtin.command:\n        argv: \"{{ argv_start + argv_changes + argv_args }}\"\n\n    - name: Clean up container\n      delegate_to: localhost\n      community.docker.docker_container:\n        name: \"{{ inventory_hostname }}\"\n        state: \"{{ arvados_docker_endstate|default('absent') }}\"\n"
  },
  {
    "path": "tools/ansible/examples/full-cluster-inventory.yml",
    "content": "### Example inventory for install-arvados-cluster.yml ###\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# This file illustrates host groups you can deploy with\n# install-arvados-cluster.yml and variables you can use to customize\n# the deployment. Copy this file somewhere else, edit it following the\n# comments, and run the installer like:\n#   $ ansible-playbook -Ki YOUR-INVENTORY.yml install-arvados-cluster.yml\n\n### Core cluster configuration settings ###\nall:\n  vars:\n    # To deploy a cluster, you must write a cluster configuration file and have\n    # a copy on the node where you run the Ansible installer. This file will be\n    # deployed to cluster hosts as needed and read by the Ansible installer for\n    # service configuration.\n    # Refer to <https://doc.arvados.org/admin/config.html> for details.\n    arvados_config_file: /PATH/TO/arvados/config.yml\n\n    # This is the cluster identifier (five lowercase alphanumerics) for the\n    # cluster configured in `arvados_config_file` that you want to deploy.\n    arvados_cluster_id: xurid\n\n    # If you are deploying a cluster to AWS, name the region you are deploying\n    # to here. You can override this value per group or per host as needed.\n    #aws_region: us-east-1\n\n    # If specified, this file will be deployed to cluster hosts as\n    # /etc/arvados/ca-certificates.crt. Arvados services use this file as a\n    # source of trusted CA certificates.\n    #arvados_certificates_file: /PATH/TO/ca-certificates.crt\n\n    # `arvados_tls` defines the source of the TLS certificate for each Arvados\n    # service. Each key corresponds to a key in the `Services` section of your\n    # cluster configuration.\n    arvados_tls:\n      Controller:\n        cert:\n          # Path to the certificate on the managed node\n          path: /PATH/TO/controller.pem\n          # All of the following parameters are optional.\n          # Set src for the `cert` and `key` files to deploy them from the\n          # controller node to the hosts running this service. Otherwise, they\n          # will be assumed to be at the specified paths.\n          #src: /PATH/TO/controller.pem\n          #owner: root\n          #group: ssl-cert\n          #mode: \"0640\" # mode must be quoted\n        key:\n          # path to the private key on the managed node\n          path: /PATH/TO/controller.key\n          # If the key file requires a passphrase stored in an AWS secret,\n          # provide the name of that secret as `aws_secret`. This requires\n          # you to set `aws_region` too.\n          #aws_secret: ExampleSecretName\n          # Also takes optional src, owner, group, and mode like cert\n          #src: /PATH/TO/controller.key\n          #owner: root\n          #group: ssl-cert\n          #mode: \"0640\" # mode must be quoted\n      # Settings from this `Default` section will be used for any service that\n      # does not have a specific configuration in `arvados_tls`. This is handy\n      # to use if you have a wildcard certificate that can be used by most/all\n      # of your cluster services.\n      #Default:\n      #  cert:\n      #    path: /PATH/TO/arvados-cluster.pem\n      #  key:\n      #    path: /PATH/TO/arvados-cluster.key\n\n    # `arvados_nginx_internal_networks` is a map of netmasks that should be\n    # considered \"internal\" to the Arvados cluster. For each netmask key, if its\n    # value is the literal `false`, it will be considered external. *Any* other\n    # value will cause the netmask to be considered internal.\n    #arvados_nginx_internal_networks:\n    #  \"10.0.0.0/8\": true\n    #  \"172.16.0.0/12\": true\n    #  \"192.168.0.0/16: true\n\n    # `arvados_apt_periodic_default` defines the number of days that periodic\n    # apt tasks like unattended upgrades should run. Set this to `0` to disable\n    # periodic tasks.\n    #arvados_apt_periodic_default: 1\n    # If you need more advanced control, `arvados_apt_periodic` can set separate\n    # values for different periodic settings. Refer to\n    # `roles/distro_apt/defaults/main.yml`.\n\n    # `arvados_postgresql_repository` controls whether postgresql is installed\n    # through distro packages or from the postgresql.org apt repo. This allows\n    # installing a wider range of postgresql versions. When this is set to\n    # postgresql, a version must be set in `arvados_postgresql_version`\n    # Can be either \"distro\" or \"postgresql\"\n    #arvados_postgresql_repository: distro\n    # Can name a version number like 15, 17, etc.\n    #arvados_postgresql_version: null\n\n### Host groups for core Arvados services ###\n# Most Arvados clusters will have at least one host in each of these groups.\n# It's normal to have the same host in multiple groups. For example, most\n# clusters have the same host(s) in the `arvados_api`, `arvados_controller`,\n# and `arvados_websocket` groups.\n\n# arvados_api host(s) run the Arvados Rails API server.\narvados_api:\n  hosts:\n    api.arvados.example:\n  vars:\n    # If your cluster will use an external database, specify the database\n    # superuser credentials here. These will be used to set up a dedicated\n    # Arvados role and database.\n    #arvados_database_login_user: \"\"\n    #arvados_database_login_password: \"\"\n\n# arvados_controller host(s) run the Arvados API controller.\narvados_controller:\n  hosts:\n    api.arvados.example:\n  vars:\n    # `arvados_nginx_server_config` is a block of nginx configuration\n    # directives that will be included in the server block for this\n    # service. You can use this to answer to additional names or ports,\n    # or do whatever other customization you need.\n    #arvados_nginx_server_config: |\n    #  server_name  arvados.example;\n\n# arvados_websocket host(s) run the Arvados websockets server.\narvados_websocket:\n  hosts:\n    api.arvados.example:\n\n# arvados_keepstore host(s) run the Arvados keepstore services.\narvados_keepstore:\n  hosts:\n    keep0.arvados.example:\n    keep1.arvados.example:\n\n# arvados_keepbalance host(s) run the Arvados keep-balance service.\narvados_keepbalance:\n  hosts:\n    keep0.arvados.example:\n    keep1.arvados.example:\n\n# arvados_keepproxy host(s) run the Arvados keepproxy service.\narvados_keepproxy:\n  hosts:\n    keep.arvados.example:\n\n# arvados_keep_web host(s) run the Arvados keep-web/WebDAV service.\narvados_keep_web:\n  hosts:\n    webdav.arvados.example:\n\n# arvados_workbench host(s) serve the Arvados Workbench front-end.\narvados_workbench:\n  hosts:\n    workbench.arvados.example:\n\n\n### Host groups for Arvados dispatchers ###\n# A complete Arvados cluster must deploy at least one dispatcher, but\n# different clusters run different dispatchers. Uncomment and define the\n# group(s) for the dispatcher(s) your cluster will run.\n\n# arvados_dispatch_cloud host(s) run the arvados-dispatch-cloud service\n# to create dedicated cloud nodes to configure containers. You must have\n# `Containers.CloudVMs` configured in your cluster configuration for this\n# to work.\n#arvados_dispatch_cloud:\n#  hosts:\n#    api.arvados.example:\n\n# arvados_dispatch_local host(s) run the crunch-dispatch-local service\n# to run containers on the local host. This is only intended for small\n# development clusters, not production use. These hosts will automatically\n# have compute requirements like Docker and arv-mount installed.\n#arvados_dispatch_local:\n#  hosts:\n#    compute0.arvados.example:\n#    compute1.arvados.example:\n\n\n### Host groups for Arvados supporting services ###\n# These groups deploy services that can make an Arvados cluster easier to\n# deploy or use, but they are not required for all clusters. Uncomment and\n# define the group(s) for the services you want to run.\n\n# arvados_postgresql host(s) will have the PostgreSQL server installed to host\n# the cluster database. The Ansible installer currently does minimal server\n# configuration and does not set up replication or other high-level features.\n# You do not need this group if your cluster uses an externally managed database\n# (e.g., Amazon RDS).\n#arvados_postgresql:\n#  hosts:\n#    api.arvados.example:\n#  vars:\n#    # `arvados_postgresql_hba_sources` is an array of IPv4 or v6 netmasks,\n#    # or special values recognized in `pg_hba.conf`. The Arvados database user\n#    # will be allowed to connect from these sources. This must be configured\n#    # so that all cluster services are allowed to connect to the database.\n#    arvados_postgresql_hba_sources:\n#      - samenet\n#    # `arvados_postgresql_hba_method` defines the PostgreSQL authentication\n#    # method that will be accepted for the Arvados database user. Set this\n#    # to `md5` if your distribution includes a version of PostgreSQL too old\n#    # to support `scram-sha-256`.\n#    arvados_postgresql_hba_method: scram-sha-256\n#    # If you need to do advanced database setup, refer to other settings in\n#    # `roles/arvados_postgresql/defaults/main.yml`.\n\n# arvados_shell host(s) have all client tools installed so cluster users can\n# access a complete client environment via SSH or similar.\n#arvados_shell:\n#  hosts:\n#    shell.arvados.example:\n#      # The installer will automatically create an Arvados virtual machine\n#      # record for each shell node. The `hostname` attribute in that record\n#      # will come from the inventory hostname by default. If you need to use\n#      # a different hostname externally, set `arvados_shell_hostname` like this:\n#      #arvados_shell_hostname: shellhost.arvados.example\n\n\n### Centralized logging ###\n# The installer can set up Prometheus to collect logs from all Arvados services\n# and report metrics.\n# This is especially helpful to collate logs on multi-node installs.\n# The Arvados cluster activity report can use these metrics to report additional\n# information about cluster health.\n\n# arvados_loki host(s) will have Loki installed from the Grafana APT repository.\n# The required Loki configuration file is defined by `arvados_loki_config_file`.\n# Typically this should be installed on all nodes running Arvados services as\n# illustrated in the example below.\n#arvados_loki:\n#  children:\n#    arvados_api:\n#    arvados_controller:\n#    arvados_websocket:\n#    arvados_keepstore:\n#    arvados_keepbalance:\n#    arvados_keepproxy:\n#    arvados_keep_web:\n#    arvados_workbench:\n#    arvados_dispatch_cloud:\n#    arvados_dispatch_local:\n#  vars:\n#    arvados_loki_config_file: /PATH/TO/config.yml\n\n# arvados_alloy host(s) will have Alloy installed from the Grafana APT repository.\n# The required Alloy configuration file is defined by `arvados_alloy_config_file`.\n#arvados_alloy:\n#  hosts:\n#    prometheus.arvados.example:\n#  vars:\n#    arvados_alloy_config_file: /PATH/TO/config.alloy\n\n# arvados_grafana host(s) will have Grafana installed from the Grafana APT repository.\n# The required Grafana configuration file is defined by `arvados_grafana_config_file`.\n#arvados_grafana:\n#  hosts:\n#    prometheus.arvados.example:\n#  vars:\n#    arvados_grafana_config_file: /PATH/TO/grafana.ini\n\n# arvados_prometheus host(s) will have Prometheus installed from the Prometheus APT repository.\n# The required Prometheus configuration file is defined by `arvados_prometheus_config_file`.\n#arvados_prometheus:\n#  hosts:\n#    prometheus.arvados.example:\n#  vars:\n#    arvados_prometheus_config_file: /PATH/TO/prometheus.yml\n"
  },
  {
    "path": "tools/ansible/examples/simple-cluster-config.yml",
    "content": "### Example single-node cluster configuration ###\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# You can use this file as a baseline for a simple single-node Arvados\n# cluster like you would install with `simple-cluster-inventory.yml`.\n# To use this configuration, you MUST:\n#  * Change the cluster ID `xurid` to your own ID.\n#    Choose a cluster ID with exactly five lowercase alphanumerics.\n#    Make sure you update the references under `PostgreSQL` and `Volumes` too.\n#  * Change each of `ManagementToken`, `SystemRootToken`, `BlobSigningKey`,\n#    and `PostgreSQL.Connection.password` to a unique, strong password.\n#    Tokens can only contain alphanumerics. You can generate one by running:\n#          tr -dc A-Za-z0-9 </dev/urandom | head -c 64\n#  * In every `ExternalURL` setting, change `hostname.example` to the name\n#    or address of your cluster host.\n# You MAY change other settings as noted in comments below.\n\nClusters:\n  xurid:\n    # These settings are appropriate if you are installing a PostgreSQL\n    # server on the Arvados cluster node (the default). To use an external\n    # database, make sure all parameters match what the database administrator\n    # provides.\n    PostgreSQL:\n      Connection:\n        user: arvados\n        password: FIXMEDefaultPostgreSQLPassword\n        host: localhost\n        port: \"5432\"\n        dbname: \"arvados_xurid\"\n\n    ManagementToken: FIXMEDefaultManagementToken\n    SystemRootToken: FIXMEDefaultSystemRootToken\n\n    Collections:\n      BlobSigningKey: FIXMEDefaultBlobSigningKey\n      DefaultReplication: 1\n\n    # This configuration lets users log in to Arvados using the same Unix\n    # username and password they use on the cluster host.\n    Login:\n      PAM:\n        Enable: true\n\n    Users:\n      AutoAdminFirstUser: true\n\n    TLS:\n      Certificate: /etc/ssl/certs/ssl-cert-snakeoil.pem\n      Key: /etc/ssl/private/ssl-cert-snakeoil.key\n\n    Volumes:\n      xurid-nyw5e-000000000000000:\n        Driver: Directory\n        Replication: 1\n        DriverParameters:\n          Root: /var/lib/arvados/keep-data\n\n    # Port ranges used by services:\n    #  8040-8079: Internal service behind nginx\n    #  8440-8479: nginx front-end for corresponding service\n    #  8080-8099: Internal-only service\n    #  8900-8999: Service container ports\n    Services:\n      RailsAPI:\n        InternalURLs:\n          \"http://localhost:8080\": {}\n\n      Keepstore:\n        InternalURLs:\n          \"http://localhost:8088\": {}\n\n      Keepbalance:\n        InternalURLs:\n          \"http://localhost:8089\": {}\n\n      Websocket:\n        InternalURLs:\n          \"http://localhost:8040\": {}\n        ExternalURL: \"wss://hostname.example:8440/websocket\"\n\n      Controller:\n        InternalURLs:\n          \"http://localhost:8043\": {}\n        ExternalURL: \"https://hostname.example:8443\"\n\n      ContainerWebServices:\n        # This ExternalURL should match Controller's.\n        ExternalURL: \"https://hostname.example:8443\"\n        ExternalPortMin: 8900\n        ExternalPortMax: 8999\n\n      Keepproxy:\n        InternalURLs:\n          \"http://localhost:8044\": {}\n        ExternalURL: \"https://hostname.example:8444\"\n\n      WebDAV:\n        InternalURLs:\n          \"http://localhost:8048\": {}\n        ExternalURL: \"https://hostname.example:8448\"\n\n      WebDAVDownload:\n        # These URLs should match WebDAV's.\n        InternalURLs:\n          \"http://localhost:8048\": {}\n        ExternalURL: \"https://hostname.example:8448\"\n\n      Workbench2:\n        ExternalURL: \"https://hostname.example\"\n\n      Workbench1:\n        # These URLs should match Workbench2's.\n        ExternalURL: \"https://hostname.example\"\n"
  },
  {
    "path": "tools/ansible/examples/simple-cluster-inventory.yml",
    "content": "### Example inventory for install-arvados-cluster.yml ###\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# This file illustrates how to deploy a simple single-node cluster with\n# install-arvados-cluster.yml and variables you can use to customize\n# the deployment. Copy this file somewhere else, edit it following the\n# comments, and run the installer like:\n#   $ ansible-playbook -Ki YOUR-INVENTORY.yml install-arvados-cluster.yml\n\n### Core cluster configuration settings ###\narvados_cluster_host:\n  hosts:\n    # Write the \"main\" hostname of your cluster host, with the trailing `:`.\n    hostname.example:\n\n  vars:\n    # To deploy a cluster, you must write a cluster configuration file and have\n    # a copy on the node where you run the Ansible installer. This file will be\n    # deployed to cluster hosts as needed and read by the Ansible installer for\n    # service configuration.\n    # Refer to `examples/simple-cluster-config.yml` for an example.\n    arvados_config_file: /PATH/TO/xurid-config.yml\n\n    # This is the cluster identifier (five lowercase alphanumerics) for the\n    # cluster configured in `arvados_config_file` that you want to deploy.\n    arvados_cluster_id: xurid\n\n    # `arvados_tls` defines the source of the TLS certificate for each Arvados\n    # service. This illustrates how to use the self-signed cert on Debian/Ubuntu\n    # for Arvados services.\n    arvados_tls:\n      Default:\n        cert:\n          path: /etc/ssl/certs/ssl-cert-snakeoil.pem\n          # If you have `cert` and `key` files on the host running Ansible and\n          # want to install them for Arvados services on the cluster host,\n          # set `src` for `cert` and `key`.\n          #src: /PATH/TO/arvados-cluster.pem\n        key:\n          path: /etc/ssl/private/ssl-cert-snakeoil.key\n          #src: /PATH/TO/arvados-cluster.key\n\n    # `arvados_apt_suites` identifies which set of Arvados packages to install.\n    # By default it will get the latest official release. You can set this to\n    # \"-testing\" to get install the latest release candidate, or \"-dev\" to get\n    # the very latest packages built out of the main development tree. These\n    # options will install a less stable cluster with more bugs, but let you\n    # experiment with Arvados features under development.\n    #arvados_apt_suites: \"-dev\"\n\n\n### Database installation ###\n# The configuration below will install a PostgreSQL server on the cluster host\n# for all Arvados services to use. If your Arvados cluster configuration refers\n# to a database server that already exists, you can remove this\n# `arvados_postgresql` section, and set database superuser credentials under\n# the next `arvados_api` section.\narvados_postgresql:\n  children:\n    arvados_cluster_host:\n  vars:\n    arvados_postgresql_config: {}\n    arvados_postgresql_hba_sources:\n      - samehost\n\narvados_api:\n  children:\n    arvados_cluster_host:\n  vars:\n    # If your cluster will use an external database, specify the database\n    # superuser credentials here. These will be used to set up a dedicated\n    # Arvados role and database.\n    #arvados_database_login_user: \"\"\n    #arvados_database_login_password: \"\"\n\n\n### Arvados services ###\n# The rest of the inventory defines the Arvados services to run on the\n# cluster host. You should not need to change anything from here on.\narvados_controller:\n  children:\n    arvados_cluster_host:\n\narvados_websocket:\n  children:\n    arvados_cluster_host:\n\narvados_keepstore:\n  children:\n    arvados_cluster_host:\n\narvados_keepbalance:\n  children:\n    arvados_cluster_host:\n\narvados_keepproxy:\n  children:\n    arvados_cluster_host:\n\narvados_keep_web:\n  children:\n    arvados_cluster_host:\n\narvados_workbench:\n  children:\n    arvados_cluster_host:\n\narvados_dispatch_local:\n  children:\n    arvados_cluster_host:\n"
  },
  {
    "path": "tools/ansible/files/default-test-config.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# These are the \"default\" Arvados test database credentials.\n# You can use this file as the `arvados_config_file` for the\n# `install-dev-tools.yml` playbook in low-security applications.\n\nClusters:\n  zzzzz:\n    PostgreSQL:\n      Connection:\n        host: localhost\n        port: \"5432\"\n        dbname: arvados_test\n        user: arvados\n        password: insecure_arvados_test\n"
  },
  {
    "path": "tools/ansible/files/development-docker-images.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# This inventory defines container names, base images, and built image tags\n# for the `build-docker-image.yml` playbook.\n\nall:\n  vars:\n    ansible_connection: docker\n    ansible_user: root\n    arvados_dev_from_pkgs: true\n    arvados_dnf_command: \"microdnf --assumeyes\"\n    distro_bootstrap_dnf_command: \"{{ arvados_dnf_command }}\"\n\narvados_build_one_target:\n  hosts:\n    arvados_pkgbuild_debian12:\n      arvados_docker_from: debian:12-slim\n      arvados_docker_tag: arvados/build:debian12\n    arvados_pkgbuild_ubuntu2204:\n      arvados_docker_from: ubuntu:jammy\n      arvados_docker_tag: arvados/build:ubuntu2204\n    arvados_pkgbuild_ubuntu2404:\n      arvados_docker_from: ubuntu:noble\n      arvados_docker_tag: arvados/build:ubuntu2404\n    arvados_pkgbuild_rocky8:\n      ansible_python_interpreter: /usr/libexec/platform-python\n      arvados_docker_from: rockylinux:8-minimal\n      arvados_docker_tag: arvados/build:rocky8\n    arvados_pkgbuild_rocky9:\n      arvados_docker_from: rockylinux:9-minimal\n      arvados_docker_tag: arvados/build:rocky9\n    arvados_pkgbuild_rocky10:\n      arvados_docker_from: rockylinux/rockylinux:10-minimal\n      arvados_docker_tag: arvados/build:rocky10\n  vars:\n    arvados_docker_changes:\n      - \"ENV WORKSPACE=/arvados\"\n\narvados_package_tests:\n  hosts:\n    arvados_pkgtest_debian12:\n      arvados_docker_from: debian:12-slim\n      arvados_docker_tag: arvados/package-test:debian12\n    arvados_pkgtest_ubuntu2204:\n      arvados_docker_from: ubuntu:jammy\n      arvados_docker_tag: arvados/package-test:ubuntu2204\n    arvados_pkgtest_ubuntu2404:\n      arvados_docker_from: ubuntu:noble\n      arvados_docker_tag: arvados/package-test:ubuntu2404\n    arvados_pkgtest_rocky8:\n      ansible_python_interpreter: /usr/libexec/platform-python\n      arvados_docker_from: rockylinux:8-minimal\n      arvados_docker_tag: arvados/package-test:rocky8\n      arvados_pkgtest_dnf_devel_basename: Rocky-PowerTools\n      arvados_pkgtest_dnf_devel_section: powertools\n    arvados_pkgtest_rocky9:\n      arvados_docker_from: rockylinux:9-minimal\n      arvados_docker_tag: arvados/package-test:rocky9\n    arvados_pkgtest_rocky10:\n      arvados_docker_from: rockylinux/rockylinux:10-minimal\n      arvados_docker_tag: arvados/package-test:rocky10\n  vars:\n    arvados_docker_changes:\n      - \"ENV DEBIAN_FRONTEND=noninteractive\"\n\narvados_test_workbench:\n  hosts:\n    arvados_workbench:\n      arvados_docker_from: debian:12-slim\n      arvados_docker_tag: arvados/workbench:latest\n  vars:\n    arvados_config_file: \"{{ lookup('env', 'WORKSPACE') }}/services/workbench2/tools/arvados_config.yml\"\n    arvados_dev_user: workbench\n    arvados_docker_changes:\n      - \"ENV ARVADOS_DIRECTORY=/opt/arvados\"\n      # VENV3DIR gets created by volume setup. The rest of PATH is standard.\n      - \"ENV PATH=/usr/local/bin:/usr/bin:/usr/sbin:/home/{{ arvados_dev_user }}/VENV3DIR/bin\"\n      - \"USER {{ arvados_dev_user }}\"\n"
  },
  {
    "path": "tools/ansible/filter_plugins/arvados.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Arvados filters for Ansible\"\"\"\n\nimport dataclasses\nimport ipaddress\nimport itertools\nimport operator\nimport socket\nimport typing as t\nimport urllib.parse\n\nfrom collections import abc\n\nConfig = abc.Mapping[str, t.Any]\nDistroID = t.Tuple[str, str]\nPackageMapping = abc.Mapping[str, t.List[str]]\n\n# This mapping defines all the distributions we support. The values translate\n# package names from the latest supported Debian to the named distribution.\n_PACKAGE_NAMES_MAP: t.Dict[DistroID, PackageMapping] = {\n    ('Debian', '12'): {},\n    ('Debian', '13'): {},\n    ('Ubuntu', '22'): {},\n    ('Ubuntu', '24'): {},\n}\n# Unversioned package translation table for RHEL-based distributions.\n_RHEL_BASE: PackageMapping = {\n        'g++': ['gcc-c++'],\n        'libbz2-dev': ['bzip2-devel'],\n        'libcurl4-openssl-dev': ['libcurl-devel'],\n        'libdb-dev': ['libdb-dev'],\n        'libexpat1-dev': ['expat-devel'],\n        'libffi-dev': ['libffi-devel'],\n        'libfuse-dev': ['fuse-devel'],\n        'libgdbm-compat-dev': ['gdbm-devel'],\n        'libgdbm-dev': ['gdbm-devel'],\n        'libgmp-dev': ['gmp-devel'],\n        'libpam-dev': ['pam-devel'],\n        'libpq-dev': ['postgresql-devel'],\n        'libsqlite3-dev': ['sqlite-devel'],\n        'libssl-dev': ['openssl-devel'],\n        'libyaml-dev': ['libyaml-devel'],\n        'locales-all': ['glibc'],\n        'lsb-release': ['redhat-lsb'],\n        'media-types': ['mailcap'],\n        'netbase': ['setup'],\n        'postgresql': ['postgresql-server'],\n        'postgresql-client': ['postgresql'],\n        'procps': ['procps-ng'],\n        'python3-dev': ['python3-devel'],\n        'python3-venv': ['python3'],\n        'ruby-dev': ['ruby-devel'],\n        'xz-utils': ['xz'],\n        'zlib1g-dev': ['zlib-devel'],\n}\n# Versions of RHEL we support and their version-specific package translations.\n_RHEL_VERSIONS: t.Dict[str, PackageMapping] = {\n    '8': _RHEL_BASE | {\n        'python3-dev': ['python3.11-devel'],\n        'python3-venv': ['python3.11'],\n    },\n    '9': _RHEL_BASE | {\n        'python3-dev': ['python3.11-devel'],\n        'python3-venv': ['python3.11'],\n    },\n    '10': _RHEL_BASE,\n}\n# Add all the RHEL variants we support to _PACKAGE_NAMES_MAP.\nfor _name, (_version, _mapping) in itertools.product([\n        'AlmaLinux',\n        'Red Hat Enterprise Linux',\n        'Rocky',\n], _RHEL_VERSIONS.items()):\n    _PACKAGE_NAMES_MAP[(_name, _version)] = _mapping\ndel _RHEL_BASE, _RHEL_VERSIONS, _name, _version, _mapping\n\nclass FilterModule:\n    \"\"\"Export functions as Jinja filters to Ansible\"\"\"\n    _FILTERS_MAP: t.Dict[str, abc.Callable] = {}\n\n    @classmethod\n    def register(cls, func: abc.Callable) -> abc.Callable:\n        cls._FILTERS_MAP[func.__name__] = func\n        return func\n\n    def filters(self) -> abc.Mapping[str, abc.Callable]:\n        return self._FILTERS_MAP\n\n\n@dataclasses.dataclass\nclass ListenAddress:\n    \"\"\"Parse and query an Arvados service's listen address\"\"\"\n    address: str\n    \"\"\"The address a service should listen on. May be an IP address or hostname.\"\"\"\n    port: int\n    \"\"\"The port a service should listen on. May be 0 to be assigned a port.\"\"\"\n\n    GLOBAL_ADDR = ipaddress.ip_address('1.1.1.1')\n    LOOPBACK_ADDR = ipaddress.ip_address('127.0.0.1')\n\n    @classmethod\n    def parse(cls, s: str) -> 'ListenAddress':\n        \"\"\"Parse a ListenAddress from a URL string\"\"\"\n        parts = urllib.parse.urlparse(s)\n        address = parts.hostname\n        if address is None:\n            raise ValueError(f\"no address or hostname in {s!r}\")\n        port = parts.port\n        if port is None:\n            try:\n                port = socket.getservbyname(parts.scheme)\n            except (OSError, TypeError):\n                raise ValueError(f\"no port or known scheme in {s!r}\")\n        return cls(address, port)\n\n    def sort_score(self) -> int:\n        \"\"\"Return a sort key for this address\n\n        Used to choose the single \"best\" listen address in configuration.\n        Returns an arbitrary integer that represents the priority of this\n        address: smaller numbers means higher priority. The current order is:\n\n        1. the \"all addresses\" zero address\n        2. global addresses\n        3. non-loopback addresses\n        4. any address\n        \"\"\"\n        try:\n            addr = ipaddress.ip_address(self.address)\n        except ValueError:\n            # The address is a hostname. Synthesize an IP address for scoring.\n            addr = self.LOOPBACK_ADDR if self.address == 'localhost' else self.GLOBAL_ADDR\n        if int(addr) == 0:\n            return 0\n        elif addr.is_global:\n            return 1\n        elif not addr.is_loopback:\n            return 2\n        else:\n            return 255\n\n    def __str__(self) -> str:\n        return f'{self.address}:{self.port}'\n\n\n@FilterModule.register\ndef distro_packages(\n        names: abc.Sequence[str],\n        distro_name: str,\n        distro_version: str,\n) -> abc.Iterator[str]:\n    \"\"\"Translate package names from Debian stable to a target distribution\n\n    Given a list of Debian stable package names, iterates the equivalent package\n    names for the given distribution+version. These typically come from the\n    `ansible_distribution` and `ansible_distribution_major_version` facts.\n    \"\"\"\n    if isinstance(names, str):\n        names = [names]\n    try:\n        names_map = _PACKAGE_NAMES_MAP[(distro_name, distro_version)]\n    except KeyError:\n        raise ValueError(f\"no package translations available for {distro_name} {distro_version}\") from None\n    for name in names:\n        try:\n            translation = names_map[name]\n        except KeyError:\n            yield name\n        else:\n            yield from translation\n\n\n@FilterModule.register\ndef external_addr(svc_config: Config) -> ListenAddress:\n    \"\"\"Parse and return a listen address from a service's ExternalURL\n\n    Pass in an Arvados service configuration like\n    `arvados_cluster.Services.RailsAPI`. This function parses and returns the\n    service's ExternalURL.\n    \"\"\"\n    try:\n        url = svc_config['ExternalURL']\n    except KeyError:\n        raise ValueError(\"no ExternalURL defined in service configuration\")\n    else:\n        return ListenAddress.parse(url)\n\n\n@FilterModule.register\ndef listen_addrs(svc_config: Config) -> abc.Iterator[ListenAddress]:\n    \"\"\"Iterate all listen addresses for an Arvados service\n\n    Pass in an Arvados service configuration like\n    `arvados_cluster.Services.RailsAPI`. This function iterates all valid\n    listen addresses in the configuration.\n    \"\"\"\n    for url, url_config in svc_config.get('InternalURLs', {}).items():\n        listen_url = url_config.get('ListenURL', url)\n        try:\n            addr = ListenAddress.parse(listen_url)\n        except ValueError:\n            pass\n        else:\n            yield addr\n    try:\n        addr\n    except NameError:\n        raise ValueError(\"no valid ListenURLs in service configuration\") from None\n\n\n@FilterModule.register\ndef listen_addr(svc_config: Config) -> ListenAddress:\n    \"\"\"Return a single listen address for an Arvados service\n\n    Pass in an Arvados service configuration like\n    `arvados_cluster.Services.RailsAPI`. This function finds and returns the\n    most preferred address to listen on.\n    \"\"\"\n    try:\n        return min(listen_addrs(svc_config), key=operator.methodcaller('sort_score'))\n    except ValueError:\n        raise ValueError(\"no listen URLs defined in service configuration\") from None\n\n\n@FilterModule.register\ndef internal_addrs(svc_config: Config) -> ListenAddress:\n    \"\"\"Iterate listen addresses from a service's InternalURLs\n\n    Pass in an Arvados service configuration like\n    `arvados_cluster.Services.RailsAPI`. This function parses and returns the\n    service's InternalURLs.\n    \"\"\"\n    for url in svc_config.get('InternalURLs', ()):\n        try:\n            addr = ListenAddress.parse(url)\n        except ValueError:\n            pass\n        else:\n            yield addr\n    try:\n        addr\n    except NameError:\n        raise ValueError(\"no valid InternalURLs in service configuration\") from None\n\n\ndef systemd_escape(value: str) -> str:\n    \"\"\"Internal helper to handle basic systemd escapes\n\n    Pass in a string, returns an escaped string\n    \"\"\"\n    # Standard systemd escapes based on\n    # https://www.freedesktop.org/software/systemd/man/latest/systemd.syntax.html#Quoting\n    return value.translate(str.maketrans({\n        # Escape backslashes and quotes\n        '\\\\': '\\\\\\\\',\n        '\\\"': '\\\\\\\"',\n        '\\'': '\\\\\\'',\n        # Escape percent sign\n        '%': '%%',\n        # Transform newlines into multi line value\n        '\\n': '\\\\\\n'\n    }))\n\n\n@FilterModule.register\ndef systemd_env_quote(value: str) -> str:\n    \"\"\"Escapes necessary characters for systemd env usage\n\n    Given an string value, returns a quoted and escaped string ready for use in\n    a systemd env directive.\n    \"\"\"\n    value = systemd_escape(value)\n    return f'\"{value}\"'\n\n\n@FilterModule.register\ndef systemd_exec_quote(value: str) -> str:\n    \"\"\"Escapes necessary characters for systemd ExecStart usage\n\n    Given a string value, returns a quoted and escaped string ready for use in\n    a systemd ExecStart directive.\n    \"\"\"\n    # Start with basic systemd escape\n    value = systemd_escape(value)\n    # Additionally escape $ for exec\n    value = value.replace('$', '$$')\n    return f'\"{value}\"'\n"
  },
  {
    "path": "tools/ansible/group_vars/all/vars.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# Arvados playbooks are written to work with the distro Python;\n# e.g., we get module dependencies from packages as needed.\nansible_python_interpreter: /usr/bin/python3\n"
  },
  {
    "path": "tools/ansible/group_vars/arvados_postgresql/database.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# These variables rig up the installer playbook so hosts in the\n# `arvados_postgresql` group automatically become the `postgres` user\n# for maintenance tasks, rather than connecting over the network.\n# See also roles/arvados_database/defaults/main.yml.\n\narvados_database_login_host: \"\"\n"
  },
  {
    "path": "tools/ansible/install-ansible.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# This script installs all the Python packages and Ansible collections necessary\n# to run Arvados playbooks. In order to provide the best experience and stay\n# maintainable, it follows a couple of rules:\n#  1. It is not *too* automatic.\n#     If it can't do the thing you ask, it will fail.\n#     It will not change system configuration or elevate privileges.\n#  2. Running without options does the most recommended thing.\n#     You have to specify options to do anything more advanced.\n\nset -e\nset -u\n\nANSIBLE_DIR=\"$(dirname \"$0\")\"\nANSIBLE_PKG=ansible-core\nEX_UNAVAILABLE=69\nEX_SOFTWARE=70\nEX_CONFIG=78\n\nerrexit() {\n    local exitcode=\"$1\"; shift\n    echo ERROR: \"$@\"\n    exit \"$exitcode\"\n}\n\nusage() {\n    if [ $# -eq 0 ]; then\n        exitcode=0\n        out_fd=1\n    else\n        echo ERROR: \"$@\" >&2\n        exitcode=2\n        out_fd=2\n    fi\n    cat >&\"$out_fd\" <<EOF\nusage: install-ansible.sh <-V | VIRTUALENV_DIR>\n\nBy default this script installs Ansible with pipx. You can install Ansible to a\nvirtualenv by naming a directory or using the -V option.\n\noptions:\n  -V  Install Ansible to the currently activated virtualenv\nEOF\n    exit \"$exitcode\"\n}\n\nVENVDIR=\nwhile getopts Vh opt; do\n    case \"$opt\" in\n        V)\n            if ! [ -e \"${VIRTUAL_ENV:-/nonexistent}/bin/pip\" ]\n            then usage \"must activate a virtualenv before using -V\"\n            fi\n            ;;\n        h) usage ;;\n        \"?\") usage \"unknown option \\`$OPTARG\\`\" ;;\n    esac\ndone\nshift $((OPTIND - 1))\ncase \"$VENVDIR\" in\n    \"\")\n        if [ $# -gt 1 ]\n        then usage \"too many arguments\"\n        fi\n        VENVDIR=\"${1:-}\"\n        ;;\n    *)\n        if [ $# -gt 0 ]\n        then usage \"cannot specify a virtualenv directory with -V\"\n        fi\n        ;;\nesac\n\ncase \"$VENVDIR\" in\n    \"\") # pipx install\n        pipx --version >/dev/null ||\n            errexit \"$EX_UNAVAILABLE\" \"failed to run pipx\"\n\n        ansible_req=\"$(grep -E \"^$ANSIBLE_PKG[^-_[:alnum:]]\" \"$ANSIBLE_DIR/requirements.txt\")\" ||\n            errexit \"$EX_SOFTWARE\" \"failed to find $ANSIBLE_PKG requirement in requirements.txt\"\n\n        pipx install \"$ansible_req\" ||\n            errexit \"$?\" \"failed to pipx install \\`$ansible_pkg\\`\"\n\n        pipx runpip \"$ANSIBLE_PKG\" install -r \"$ANSIBLE_DIR/requirements.txt\" ||\n            errexit \"$?\" \"failed to install requirements.txt\"\n\n        VENVDIR=\"$(pipx environment --value=PIPX_LOCAL_VENVS)/$ANSIBLE_PKG\" ||\n            errexit \"$EX_CONFIG\" \"failed to load pipx environment\"\n\n        ;;\n\n    *) # pip install inside $VENVDIR\n        PIP=\"$VENVDIR/bin/pip\"\n        if ! [ -x \"$PIP\" ]\n        then\n            python3 -m venv \"$VENVDIR\" ||\n                errexit \"$?\" \"failed to create virtualenv at $VENVDIR\"\n\n            if ! [ -x \"$PIP\" ]\n            then errexit \"$EX_SOFTWARE\" \"failed to find pip after creating virtualenv at $VENVDIR\"\n            fi\n        fi\n\n        \"$PIP\" install -r \"$ANSIBLE_DIR/requirements.txt\" ||\n            errexit \"$?\" \"failed to pip install requirements.txt\"\n\n        ;;\nesac\n\n\"$VENVDIR/bin/ansible-galaxy\" install -r \"$ANSIBLE_DIR/requirements.yml\" ||\n    errexit \"$?\" \"failed to install requirements.yml with ansible-galaxy\"\n\nprintf \"%s\\n\" \"\" \"Ansible successfully installed!\"\n"
  },
  {
    "path": "tools/ansible/install-arvados-cluster.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# install-arvados-cluster.yml - Ansible playbook to set up an Arvados cluster\n#\n# This playbook installs all services necessary for an Arvados cluster. It is\n# still in early development, so it does not support all configurations or\n# integrate with third-party services. Refer to\n# `examples/full-cluster-inventory.yml` for information about how to write\n# configuration for this playbook.\n\n- name: Bootstrap nodes\n  hosts: all\n  gather_facts: no\n  tasks:\n    - name: Load Arvados configuration file\n      no_log: yes\n      delegate_to: localhost\n      ansible.builtin.include_vars:\n        name: arvados_config\n        file: \"{{ arvados_config_file }}\"\n    - name: Load Arvados cluster configuration\n      no_log: yes\n      ansible.builtin.set_fact:\n        arvados_cluster: \"{{ arvados_config.Clusters[arvados_cluster_id] }}\"\n      failed_when: arvados_cluster is undefined\n    - ansible.builtin.include_role:\n        name: distro_bootstrap\n\n- name: Install PostgreSQL\n  hosts: arvados_postgresql\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_postgresql\n\n- name: Set up Arvados database\n  hosts: arvados_postgresql,arvados_api\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_database\n      run_once: true\n\n- name: Set up RailsAPI service(s)\n  hosts: arvados_api\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_api\n\n- name: Set up API controller(s)\n  hosts: arvados_controller\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_controller\n\n- name: Set up WebSocket server\n  hosts: arvados_websocket\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_websocket\n\n- name: Set up keepstore(s)\n  hosts: arvados_keepstore\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_keepstore\n\n- name: Set up keep-balance\n  hosts: arvados_keepbalance\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_keepbalance\n\n- name: Set up keepproxy(ies)\n  hosts: arvados_keepproxy\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_keepproxy\n\n- name: Set up keep-web(s)\n  hosts: arvados_keep_web\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_keep_web\n\n- name: Set up Workbench(es)\n  hosts: arvados_workbench\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_workbench\n\n- name: Set up shell node(s)\n  hosts: arvados_shell\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_shell\n\n- name: Set up local dispatch nodes\n  hosts: arvados_dispatch_local\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_dispatch_local\n\n- name: Set up cloud dispatcher(s)\n  hosts: arvados_dispatch_cloud\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_dispatch_cloud\n\n- name: Set up Loki\n  hosts: arvados_loki\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_loki\n\n- name: Set up Alloy\n  hosts: arvados_alloy\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_alloy\n\n- name: Set up Grafana\n  hosts: arvados_grafana\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_grafana\n\n- name: Set up Prometheus\n  hosts: arvados_prometheus\n  serial: 1\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_prometheus\n"
  },
  {
    "path": "tools/ansible/install-dev-tools.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# install-dev-tools.yml - Install Arvados development tooling\n#\n### Introduction\n#\n# This playbook installs and configures software necessary for Arvados\n# development. It uses host groups from your inventory to select which\n# dependencies are managed.\n#\n# The most inclusive group is `arvados_test_all`. Hosts in this group will\n# have everything they need to clone the Arvados source and run\n# `build/run-tests.sh`.\n#\n# Another useful group is `arvados_build_one_target`. Hosts in this group\n# will have enough software to build Arvados istribution packages. It's\n# meant to run inside Docker containers for different target distributions.\n#\n### Example Inventory\n#\n# arvados_test_all:\n#   hosts:\n#     dev.arvados.example:\n#   vars:\n#     # See files/default-test-config.yml for an example.\n#     # You can change the Arvados database configuration and this playbook\n#     # will set up PostgreSQL to match.\n#     arvados_config_file: /home/example/path/arvados/config.yml\n#\n### Run the playbook\n#\n# $ ansible-playbook -Ki YOUR-INVENTORY.yml install-dev-tools.yml\n#\n### Advanced groups\n#\n# This documentation is aimed at Arvados developers building tooling with\n# this playbook.\n#\n# The pattern of group names is `arvados_build_COMPONENT` and\n# `arvados_test_COMPONENT`. The `build` group installs everything you need\n# to \"build\" the component from source. (Exactly what that means varies by\n# language.) `arvados_test_COMPONENT` adds everything you need to run that\n# component's tests. Any host in an `arvados_test` group is automatically\n# added to its corresponding `arvados_build` group.\n#\n# See the `core_components` variable below for the list of components. In\n# general, it is the name of a language we have multiple components in; or\n# the name of a component that can be tested independently in `run-tests.sh`.\n\n- name: Bootstrap nodes\n  hosts: all\n  gather_facts: no\n  vars:\n    # The `arvados_build_one_target` group will be expanded to the\n    # `arvados_build_NAME` group for every name in this list.\n    # This corresponds to the components we build distribution packages for.\n    core_components:\n      - cwl\n      - go\n      - python\n      - ruby\n      - workbench\n    # `arvados_test_all` works similarly with additional components.\n    test_components:\n      # `arvados_build_all_targets` installs Docker+Ansible to build and run\n      # package build/test Docker images.\n      # `arvados_test_all_targets` installs additional tools necessary to\n      # orchestrate the package test Docker containers.\n      - all_targets\n      - doc\n      - java\n      - R\n\n  tasks:\n    - ansible.builtin.include_role:\n        name: distro_bootstrap\n\n    # If the host is in no `arvados` groups, add it to `arvados_test_all`.\n    - name: Build default group arvados_test_all\n      when: \"hostvars[item]['group_names']|map('regex_search', '^arvados_')|select|first is undefined\"\n      ansible.builtin.add_host:\n        host: \"{{ item }}\"\n        groups:\n          - arvados_test_all\n      loop: \"{{ ansible_play_hosts }}\"\n\n    - name: Expand arvados_build_one_target group\n      ansible.builtin.add_host:\n        host: \"{{ item }}\"\n        groups: \"{{ core_components|map('replace', '', 'arvados_build_', 1) }}\"\n      loop: \"{{ groups.arvados_build_one_target|default([]) }}\"\n\n    - name: Expand arvados_test_all group\n      ansible.builtin.add_host:\n        host: \"{{ item }}\"\n        groups: \"{{ (core_components + test_components)|map('replace', '', 'arvados_test_', 1) }}\"\n      loop: \"{{ groups.arvados_test_all|default([]) }}\"\n\n    - name: Add test hosts to build groups\n      ansible.builtin.add_host:\n        host: \"{{ item }}\"\n        groups: \"{{ hostvars[item]['group_names']|map('regex_replace', '^arvados_test_', 'arvados_build_', 1)|list }}\"\n      loop: \"{{ ansible_play_hosts }}\"\n\n### Core dependencies\n\n- hosts: arvados_build_*\n  tasks:\n    - ansible.builtin.include_role:\n        name: distro_packages\n      vars:\n        task_name: Install common build tools\n        package_names:\n          - curl\n          - diffutils\n          - findutils\n          - git\n          - jq\n\n    - name: Set up Arvados development user\n      become: yes\n      ansible.builtin.user:\n        name: \"{{ arvados_dev_user|default(ansible_user_id) }}\"\n      register: dev_user\n\n# All of these test suites will spin up an entire development cluster, which\n# requires:\n#  * building and running arvados-server\n#  * building Ruby gems and running the API server in test mode\n#  * installing the Python SDK to run `run_test_server.py`\n# So we take this as one big group, and this is the play where we set up all\n# the prerequisites to do that.\n- hosts: arvados_test_cwl:arvados_test_go:arvados_test_python:arvados_test_ruby:arvados_test_workbench\n  tasks:\n    # Most arvados ansible roles don't currently have dnf tasks, support RHEL\n    # configuration paths, etc.\n    - name: Check distribution support\n      when: \"ansible_pkg_mgr != 'apt'\"\n      ansible.builtin.fail:\n        msg: Install test prerequisites is currently only supported on Debian and Ubuntu\n\n    - name: Add host to prerequisite build groups\n      ansible.builtin.add_host:\n        host: \"{{ item }}\"\n        groups:\n          - arvados_build_go\n          - arvados_build_python\n          - arvados_build_ruby\n      loop: \"{{ ansible_play_hosts }}\"\n\n    - name: Load Arvados configuration file\n      delegate_to: localhost\n      ansible.builtin.include_vars:\n        name: arvados_config\n        file: \"{{ arvados_config_file }}\"\n\n    - name: Load Arvados cluster configuration\n      ansible.builtin.set_fact:\n        arvados_cluster: \"{{ arvados_config.Clusters.zzzzz }}\"\n      failed_when: arvados_cluster is undefined\n\n    - name: Install shared test dependencies\n      become: yes\n      ansible.builtin.apt:\n        name:\n          - locales\n          - nginx\n          - openssl\n          # Direct dependencies of run-tests.sh\n          - bsdextrautils\n          - net-tools\n          # Used by `arvados-server boot`\n          - rsync\n\n    # Tests assume the underlying database uses en_US.UTF-8.\n    # It must be generated before starting the PostgreSQL server.\n    - name: Configure en_US.UTF-8 locale\n      become: yes\n      ansible.builtin.lineinfile:\n        path: /etc/locale.gen\n        regexp: \"^[# ]*en_US.UTF-8 +UTF-8 *$\"\n        line: en_US.UTF-8 UTF-8\n      register: locale_gen\n\n    - name: Run locale-gen\n      when: locale_gen.changed\n      become: yes\n      ansible.builtin.command:\n        cmd: locale-gen\n\n    - ansible.builtin.include_role:\n        name: arvados_postgresql\n      vars:\n        arvados_postgresql_config: {}\n        arvados_postgresql_hba_sources:\n          - \"127.0.0.0/24\"\n          - \"::1/128\"\n\n    - ansible.builtin.include_role:\n        name: arvados_database\n      when: \"ansible_virtualization_type != 'docker'\"\n      vars:\n        arvados_database_login_host: \"\"\n        # Let the test user drop and recreate the database wholesale\n        arvados_database_role_attr_flags: CREATEDB\n\n    - name: Set up .config/arvados\n      become: yes\n      become_user: \"{{ dev_user.name }}\"\n      ansible.builtin.file:\n        path: \"{{ (dev_user.home, item)|path_join }}\"\n        state: directory\n      loop:\n        - .config\n        - .config/arvados\n\n    - name: Write arvados/config.yml for testing\n      become: yes\n      become_user: \"{{ dev_user.name }}\"\n      ansible.builtin.copy:\n        src: \"{{ arvados_config_file }}\"\n        dest: \"{{ (dev_user.home, '.config/arvados/config.yml')|path_join }}\"\n        mode: 0600\n\n    - name: Add Arvados test configuration to profile.d\n      become: yes\n      ansible.builtin.copy:\n        content: |\n          if [ -z \"${CONFIGSRC:-}\" ] && [ -e ~/.config/arvados/config.yml ]; then\n            export CONFIGSRC=\"$HOME/.config/arvados\"\n          fi\n        dest: /etc/profile.d/arvados-test.sh\n\n### Core language build dependencies\n\n- hosts: arvados_build_go\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_go\n\n    - ansible.builtin.include_role:\n        name: distro_packages\n      vars:\n        task_name: Install Go build dependencies\n        package_names:\n          - libpam-dev\n\n- hosts: arvados_build_python\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_python\n\n- hosts: arvados_build_ruby\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_ruby\n\n### Distribution package dependencies\n# These are ordered here because they depend on the core language\n# dependencies above, but some of the language test suites later expand the\n# Docker installation.\n\n- hosts: arvados_build_one_target\n  tasks:\n    - name: Install fpm gem\n      become: yes\n      community.general.gem:\n        name: fpm\n        user_install: no\n        version: \"~> 1.16\"\n\n    - name: Install rpm-build\n      when: \"ansible_pkg_mgr == 'dnf'\"\n      become: yes\n      ansible.builtin.dnf:\n        name:\n          - rpm-build\n\n- hosts: arvados_build_all_targets\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_ansible\n      vars:\n        arvados_ansible_galaxy_user: \"{{ dev_user.name }}\"\n\n    - ansible.builtin.include_role:\n        name: arvados_docker\n\n    - name: Add development user to docker group\n      become: yes\n      ansible.builtin.user:\n        name: \"{{ dev_user.name }}\"\n        groups:\n          - docker\n        append: yes\n\n- hosts: arvados_test_all_targets\n  tasks:\n    - name: Install package test dependencies\n      become: yes\n      ansible.builtin.apt:\n        name:\n          - apt-utils\n          - createrepo-c\n          - dpkg-dev\n\n### Core language test dependencies\n\n- hosts: arvados_test_cwl:arvados_test_go\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_compute\n      vars:\n        arvados_compute_packages: []\n        arvados_compute_docker: true\n        arvados_compute_singularity: \"{{ 'arvados_test_go' in group_names }}\"\n\n    - name: Add development user to docker group\n      become: yes\n      ansible.builtin.user:\n        name: \"{{ dev_user.name }}\"\n        groups:\n          - docker\n        append: yes\n\n- hosts: arvados_test_go\n  tasks:\n    - name: Install Go test dependencies\n      become: yes\n      ansible.builtin.apt:\n        name:\n          # services/keep-web\n          - cadaver\n          - media-types\n\n### Individual component dependencies\n\n- hosts: arvados_build_doc\n  tasks:\n    # The documentation build just needs basic Ruby and Python.\n    # If we already installed those for the corresponding Arvados components,\n    # we don't need to do anything. The version we installed will work whether\n    # it's from source or distro packages.\n    - ansible.builtin.include_role:\n        name: distro_packages\n      when: \"'arvados_build_ruby' not in group_names\"\n      vars:\n        task_name: Install Ruby (rake) for documentation build\n        package_names:\n          - ruby\n\n    - ansible.builtin.include_role:\n        name: distro_packages\n      when: \"'arvados_build_python' not in group_names\"\n      vars:\n        task_name: Install Python for PySDK documentation build\n        package_names:\n          - python3-venv\n\n- hosts: arvados_test_doc\n  tasks:\n    - name: Install doc test requirements\n      become: yes\n      ansible.builtin.apt:\n        name:\n          - linkchecker\n\n- hosts: arvados_build_java\n  tasks:\n    - name: Install Java build requirements\n      become: yes\n      ansible.builtin.apt:\n        name:\n          - default-jdk-headless\n          - gradle\n\n- hosts: arvados_build_R\n  tasks:\n    - name: Install R build requirements\n      become: yes\n      ansible.builtin.apt:\n        name:\n          - g++\n          - libfontconfig1-dev\n          - libfreetype6-dev\n          - libfribidi-dev\n          - libharfbuzz-dev\n          - libjpeg-dev\n          - libpng-dev\n          - libtiff5-dev\n          - libxml2-dev\n          - make\n          - r-base\n\n- hosts: arvados_test_R\n  tasks:\n    - name: Install R test requirements\n      become: yes\n      ansible.builtin.apt:\n        name:\n          - r-cran-testthat\n\n- hosts: arvados_build_workbench\n  tasks:\n    - ansible.builtin.include_role:\n        name: arvados_nodejs\n\n- hosts: arvados_test_workbench\n  tasks:\n    - name: Install Workbench test requirements\n      become: yes\n      ansible.builtin.apt:\n        name:\n          # <https://docs.cypress.io/app/get-started/install-cypress#Linux-Prerequisites>\n          - firefox-esr\n          - libasound2\n          - libgbm-dev\n          - libgtk-3-0\n          - libgtk2.0-0\n          - libnotify-dev\n          - libnss3\n          - libxss1\n          - libxtst6\n          - xauth\n          - xvfb\n\n    - name: Check fs.inotify.max_user_watches sysctl value\n      ansible.builtin.command:\n        cmd: /sbin/sysctl --values fs.inotify.max_user_watches\n      register: max_user_watches_value\n\n    - name: Increase fs.inotify.max_user_watches\n      vars:\n        max_user_watches_wanted: 524288\n      when: \"max_user_watches_value.stdout|int < max_user_watches_wanted\"\n      become: yes\n      ansible.builtin.command:\n        argv:\n          - sysctl\n          - \"fs.inotify.max_user_watches={{ max_user_watches_wanted }}\"\n      register: max_user_watches_set\n\n    - name: Set fs.inotify.max_user_watches permanently\n      when: max_user_watches_set.changed\n      become: yes\n      ansible.builtin.copy:\n        content: |\n          ### This file is managed by Ansible\n          # React sets many inotify watchers and needs the limit increased.\n          {{ max_user_watches_set.stdout }}\n        dest: /etc/sysctl.d/arvados-workbench.conf\n        owner: root\n        group: root\n        mode: 0644\n"
  },
  {
    "path": "tools/ansible/privilege-nspawn-vm.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# privilege-nspawn-vm.yml - Add privileges to a systemd-nspawn VM to run\n# Arvados components\n#\n# Run this playbook on a host with systemd-nspawn installed. It will configure a\n# named VM container with all of the privileges necessary to run different Arvados\n# components.\n#\n# You MUST run this playbook with the `container_name` variable set to the name\n# of the VM to configure.\n#\n# By default the playbook grants privileges required for all Arvados components.\n# You can revoke the privileges for a component by setting any of the variables\n# `docker_privileges`, `fuse_privileges`, or `singularity_privileges` to the\n# string 'absent'. For example, if you're building a compute node VM that only\n# uses the Docker compute engine, you could set `singularity_privileges=absent`\n# to avoid granting privileges that are only required for Singularity.\n\n- name: Add privileges to systemd-nspawn VM\n  hosts: localhost\n  become: yes\n\n  vars:\n    docker_privileges: present\n    fuse_privileges: present\n    singularity_privileges: present\n    nspawn_container_conffile: \"/etc/systemd/nspawn/{{ container_name }}.nspawn\"\n    nspawn_service_conffile: \"/etc/systemd/system/systemd-nspawn@{{ container_name }}.service.d/arvados-ansible.conf\"\n\n  module_defaults:\n    community.general.ini_file:\n      exclusive: false\n      ignore_spaces: true\n      no_extra_spaces: true\n      owner: root\n      group: root\n      mode: 0644\n\n  tasks:\n    - name: Create systemd-nspawn drop-in directory\n      ansible.builtin.file:\n        state: directory\n        path: \"{{ nspawn_service_conffile|dirname }}\"\n        owner: root\n        group: root\n        mode: 0755\n\n    - name: Control access to FUSE device\n      community.general.ini_file:\n        state: \"{{ fuse_privileges }}\"\n        path: \"{{ nspawn_service_conffile }}\"\n        section: Service\n        option: DeviceAllow\n        value: \"/dev/fuse rw\"\n      notify: daemon-reload\n\n    - name: Control access to block loop devices\n      community.general.ini_file:\n        state: \"{{ singularity_privileges }}\"\n        path: \"{{ nspawn_service_conffile }}\"\n        section: Service\n        option: DeviceAllow\n        value: \"block-loop rwm\"\n      notify: daemon-reload\n    - name: Control block loop device ordering\n      community.general.ini_file:\n        state: \"{{ singularity_privileges }}\"\n        path: \"{{ nspawn_service_conffile }}\"\n        section: Unit\n        option: \"{{ item }}\"\n        value: \"modprobe@loop.service\"\n      loop:\n        - Wants\n        - After\n      notify: daemon-reload\n\n    - name: Filter system calls for Docker\n      community.general.ini_file:\n        state: \"{{ docker_privileges }}\"\n        path: \"{{ nspawn_container_conffile }}\"\n        section: Exec\n        option: SystemCallFilter\n        value: \"{{ item }}\"\n      loop:\n        - add_key\n        - bpf\n        - keyctl\n\n    - name: Map private users for Singularity\n      community.general.ini_file:\n        state: \"{{ singularity_privileges }}\"\n        path: \"{{ nspawn_container_conffile }}\"\n        section: Exec\n        option: PrivateUsers\n        value: \"0\"\n\n    - name: Bind FUSE device\n      community.general.ini_file:\n        state: \"{{ fuse_privileges }}\"\n        path: \"{{ nspawn_container_conffile }}\"\n        section: Files\n        option: Bind\n        value: /dev/fuse\n\n    - name: Bind block loop control device\n      community.general.ini_file:\n        state: \"{{ singularity_privileges }}\"\n        path: \"{{ nspawn_container_conffile }}\"\n        section: Files\n        option: Bind\n        value: /dev/loop-control\n\n  handlers:\n    - name: daemon-reload\n      ansible.builtin.systemd_service:\n        daemon_reload: true\n"
  },
  {
    "path": "tools/ansible/requirements.txt",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# This file documents the Python modules required to run Arvados playbooks.\n# If you're doing something advanced, you can use this as a constraints file\n# to install only the components you want and still get the right versions.\n# For example, if you only want Ansible and no extras:\n#\n#      pip install -c requirements.txt ansible-core\n\n# The oldest version we support must be able to manage nodes with the oldest\n# version of Python on all Arvados-supported distributions.\n# As of March 2026 that's Python 3.6 on RHEL 8.\n# The newest version we support is the newest version we've tested playbooks\n# on. It should be supported by Ansible upstream.\nansible-core >= 2.16.0, < 2.21\ndocker ~= 7.0\n\n# It's not required for any playbook, but if you want to automate creating or\n# editing Arvados config.yml files, yq is our go-to tool.\nyq ~= 3.4\n"
  },
  {
    "path": "tools/ansible/requirements.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncollections:\n  - name: ansible.posix\n    version: \">=2.0,<3.0\"\n  - name: community.docker\n    version: \">=4.6,<5.0\"\n  - name: community.general\n    version: \">=10.7,<11.0\"\n  - name: community.postgresql\n    version: \">=4.0,<5.0\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_alloy/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_grafana\n"
  },
  {
    "path": "tools/ansible/roles/arvados_alloy/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install Alloy package\n  become: yes\n  ansible.builtin.apt:\n    name: alloy\n\n- name: Create Alloy config dir\n  become: yes\n  ansible.builtin.file:\n    path: /etc/alloy\n    state: directory\n    owner: root\n    group: alloy # Config folder must be owned by alloy\n    mode: 0770 # Matches the default permissions used by the grafana alloy package\n\n- name: Write Alloy config\n  become: yes\n  no_log: yes\n  ansible.builtin.copy:\n    src: \"{{ arvados_alloy_config_file }}\"\n    dest: /etc/alloy/config.alloy\n    owner: root\n    group: root\n    mode: 0644 # Matches the default permissions used by the grafana alloy package\n  register: arvados_alloy_config_copy\n\n- name: Start and enable alloy.service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: alloy.service\n    state: \"{{ 'restarted' if arvados_alloy_config_copy.changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_ansible/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# The directory where the Ansible virtualenv will be installed.\narvados_ansible_venv_dir: /opt/arvados-ansible\n\n# The user that will install Galaxy collections.\n# If this is not set (the default), collection installation is skipped.\narvados_ansible_galaxy_user: null\n\n# Arvados developers must get the oldest version in order to build Docker\n# images for the oldest distros\narvados_ansible_minimum_version: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_ansible/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# These packages are required by ansible.builtin.pip.\ndependencies:\n  - role: distro_packages\n    vars:\n      task_name: Install Python development tools\n      package_names:\n        - python3-packaging\n        - python3-venv\n"
  },
  {
    "path": "tools/ansible/roles/arvados_ansible/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Create requirements.txt tempfile\n  ansible.builtin.tempfile:\n    prefix: \"requirements-\"\n    suffix: \".txt\"\n  register: requirements_tempfile\n\n- name: Upload requirements.txt\n  ansible.builtin.copy:\n    src: requirements.txt\n    dest: \"{{ requirements_tempfile.path }}\"\n\n- name: Install minimum Ansible version for Arvados development\n  when: arvados_ansible_minimum_version|bool\n  ansible.builtin.replace:\n    path: \"{{ requirements_tempfile.path }}\"\n    regexp: \"^ansible-core\\\\s*>=\"\n    replace: \"ansible-core ~=\"\n\n- name: Install Ansible virtualenv\n  become: yes\n  ansible.builtin.pip:\n    virtualenv: \"{{ arvados_ansible_venv_dir }}\"\n    virtualenv_command: \"{{ ansible_python.executable|quote }} -m venv\"\n    requirements: \"{{ requirements_tempfile.path }}\"\n    umask: \"0022\"\n\n# It would be cute to reuse the same tempfile, but we can't because\n# ansible-galaxy is sensitive to the filename extension.\n\n- name: Remove requirements.txt\n  ansible.builtin.file:\n    path: \"{{ requirements_tempfile.path }}\"\n    state: absent\n\n- name: Add Ansible commands to PATH\n  become: yes\n  ansible.builtin.file:\n    state: link\n    src: \"{{ (arvados_ansible_venv_dir, 'bin', item)|path_join }}\"\n    dest: \"{{ ('/usr/local/bin', item)|path_join }}\"\n  loop:\n    - ansible\n    - ansible-galaxy\n    - ansible-playbook\n\n- name: Install Ansible requirements\n  when: arvados_ansible_galaxy_user is truthy\n  become: \"{{ arvados_ansible_galaxy_user != ansible_user_id }}\"\n  become_user: \"{{ arvados_ansible_galaxy_user }}\"\n  block:\n    - name: Create requirements.yml tempfile\n      ansible.builtin.tempfile:\n        prefix: \"requirements-\"\n        suffix: \".yml\"\n      register: requirements_tempfile\n\n    - name: Upload requirements.yml\n      ansible.builtin.copy:\n        src: requirements.yml\n        dest: \"{{ requirements_tempfile.path }}\"\n\n    - name: Install Ansible requirements\n      ansible.builtin.command:\n        argv:\n          - \"{{ (arvados_ansible_venv_dir, 'bin', 'ansible-galaxy')|path_join }}\"\n          - install\n          - \"-r\"\n          - \"{{ requirements_tempfile.path }}\"\n\n    - name: Remove requirements.yml\n      ansible.builtin.file:\n        path: \"{{ requirements_tempfile.path }}\"\n        state: absent\n"
  },
  {
    "path": "tools/ansible/roles/arvados_api/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_api_max_pool_size: \"{{ ansible_processor_nproc | int }}\"\narvados_api_max_request_queue_size: \"{{ arvados_api_max_pool_size | int * 2 + 1 }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_api/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_apt\n  - role: arvados_service\n  - role: distro_postgresql\n"
  },
  {
    "path": "tools/ansible/roles/arvados_api/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# Install the RailsAPI server and configure it to match the cluster\n# configuration.\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install PostgreSQL client package from postgresql.org\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - \"postgresql-client-{{ arvados_postgresql_version }}\"\n  when: \"arvados_postgresql_version is defined\"\n\n- name: Install arvados-api-server\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - arvados-api-server\n  register: arvados_api_server_apt\n\n- name: Create arvados-railsapi.service.d\n  become: yes\n  ansible.builtin.file:\n    path: /etc/systemd/system/arvados-railsapi.service.d\n    state: directory\n    owner: root\n    group: root\n    mode: 0755\n\n- name: Write cluster configuration to arvados-railsapi.service override\n  become: yes\n  ansible.builtin.template:\n    src: cluster.conf.j2\n    dest: \"/etc/systemd/system/arvados-railsapi.service.d/ansible-{{ arvados_cluster_id }}.conf\"\n    owner: root\n    group: root\n    mode: 0644\n  register: arvados_api_server_override\n\n- name: Start and enable arvados-railsapi.service\n  become: yes\n  vars:\n    arvados_api_server_changed: \"{{ arvados_api_server_apt.changed or arvados_api_server_override.changed or arvados_config_changed }}\"\n  ansible.builtin.systemd_service:\n    name: arvados-railsapi.service\n    daemon_reload: \"{{ arvados_api_server_changed }}\"\n    state: \"{{ 'restarted' if arvados_api_server_changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_api/templates/cluster.conf.j2",
    "content": "### This file is managed by Ansible ###\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\n# This override sets Passenger to listen on an address matching\n# the cluster InternalURLs configuration.\n{% set listen_address = arvados_cluster.Services.RailsAPI | listen_addr %}\n[Service]\nEnvironment=PASSENGER_ADDRESS={{ listen_address.address }}\nEnvironment=PASSENGER_PORT={{ listen_address.port }}\nEnvironment=PASSENGER_MAX_POOL_SIZE={{ arvados_api_max_pool_size }}\nEnvironment=PASSENGER_MAX_REQUEST_QUEUE_SIZE={{ arvados_api_max_request_queue_size }}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_apt/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_apt_url: \"https://apt.arvados.org\"\narvados_apt_suites: \"{{ ansible_distribution_release }}\"\narvados_pin_version: \"{{ '' if arvados_apt_suites.endswith(('-dev', '-testing')) else '3.2.1' }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_apt/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_apt\n"
  },
  {
    "path": "tools/ansible/roles/arvados_apt/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Install Arvados package pins\n  when: \"arvados_pin_version != ''\"\n  become: yes\n  ansible.builtin.template:\n    src: arvados.pref.j2\n    dest: /etc/apt/preferences.d/arvados.pref\n    owner: root\n    group: root\n    mode: 0644\n\n- name: Install Arvados apt repository\n  become: yes\n  ansible.builtin.deb822_repository:\n    name: arvados\n    types: deb\n    uris: \"{{ arvados_apt_url }}/{{ ansible_distribution_release }}\"\n    # As a convenient shortcut for users, if arvados_apt_suites looks like a\n    # suffix (i.e., it starts with `-`), prepend the current distro codename.\n    suites: \"{{ ansible_distribution_release if arvados_apt_suites.startswith('-') else '' }}{{ arvados_apt_suites }}\"\n    components: main\n    signed_by: \"{{ arvados_apt_url }}/pubkey.gpg\"\n  notify:\n    - apt update\n"
  },
  {
    "path": "tools/ansible/roles/arvados_apt/templates/arvados.pref.j2",
    "content": "### This file is managed by Ansible ###\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nPackage: arvados-* crunch-* keep-* python3-arvados-* keepproxy keepstore libpam-arvados-go python3-crunchstat-summary\nPin: version {{ arvados_pin_version }}{{ '' if arvados_pin_version is search('(-|\\\\*$)') else '-*' }}\nPin-Priority: 995\n"
  },
  {
    "path": "tools/ansible/roles/arvados_aws_secret/files/arvados-aws-secret.sh",
    "content": "#!/bin/sh\n\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nset -e\nset -u\n\nSECRET_ID=\"$1\"\nOUT_FILE=\"${2:-$RUNTIME_DIRECTORY/$SECRET_ID}\"\n\nwhile true; do\n    aws secretsmanager get-secret-value --secret-id \"$SECRET_ID\" |\n        jq -r .SecretString >\"$OUT_FILE\"\n    sleep 1\ndone\n"
  },
  {
    "path": "tools/ansible/roles/arvados_aws_secret/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Install aws script dependencies\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - awscli\n      - jq\n\n- name: Install aws secret script\n  become: yes\n  ansible.builtin.copy:\n    src: arvados-aws-secret.sh\n    dest: /usr/local/sbin/arvados-aws-secret.sh\n    owner: root\n    group: root\n    mode: 0755\n\n- name: Write arvados_aws_secret service definitions\n  become: yes\n  ansible.builtin.template:\n    src: arvados_aws_secret.service.j2\n    dest: /etc/systemd/system/arvados_aws_secret.service\n    owner: root\n    group: root\n    mode: 0644\n\n- name: Add symlink from password_secret_connector service to arvados_aws_secret\n  become: yes\n  ansible.builtin.file:\n    state: link\n    src: /etc/systemd/system/arvados_aws_secret.service\n    dest: /etc/systemd/system/password_secret_connector.service\n"
  },
  {
    "path": "tools/ansible/roles/arvados_aws_secret/templates/arvados_aws_secret.service.j2",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n[Unit]\nDescription=Arvados AWS secret retrieval service\nWants=network-online.target\nAfter=network-online.target\n\n[Service]\n# AWS_SHARED_CREDENTIALS_FILE is set to /dev/null to avoid AWS's CLI\n# loading invalid credentials on nodes who use ~/.aws/credentials for other\n# purposes (e.g.: the dispatcher credentials)\n# Access to the secrets manager is given by using an instance profile.\nEnvironment=AWS_SHARED_CREDENTIALS_FILE=/dev/null\nEnvironment={{ (\"AWS_REGION=\" ~ aws_region) | systemd_env_quote }}\nRuntimeDirectory=arvados\nExecStartPre=/usr/bin/mkfifo --mode=0600 %t/arvados/{{ arvados_secret_id | systemd_exec_quote }}\nExecStart=/usr/local/sbin/arvados-aws-secret.sh {{ arvados_secret_id | systemd_exec_quote }}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_compute/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# List of Arvados packages to install.\n# You can set this empty if you intend to install from source/PyPI.\narvados_compute_packages:\n  - crunch-run\n  - python3-arvados-fuse\n\n# These flags control which container and GPU engine(s) get installed.\n# Note that AMD ROCm support is still in development and untested.\narvados_compute_amd_rocm: false\narvados_compute_docker: \"{{ arvados_cluster.Containers.RuntimeEngine|default('docker') == 'docker' }}\"\narvados_compute_nvidia: false\narvados_compute_singularity: \"{{ arvados_cluster.Containers.RuntimeEngine|default('') == 'singularity' }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_compute/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: \"{{ 'arvados_apt' if (arvados_compute_packages is defined and arvados_compute_packages is truthy) else 'distro_apt' }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_compute/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# Install and configure core software necessary to run containers with\n# crunch-run. This includes at least one container engine and optionally\n# GPU support.\n# You can set flags documented in defaults/main.yml to control which software\n# gets installed.\n# Note this only covers everything \"under\" crunch-run: it does not configure\n# the node to receive dispatched jobs. Test nodes want to be able to install\n# this software without that configuration.\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install Arvados compute packages\n  when: arvados_compute_packages is truthy\n  become: yes\n  ansible.builtin.apt:\n    name: \"{{ arvados_compute_packages }}\"\n\n- name: Check for /etc/fuse.conf\n  ansible.builtin.stat:\n    path: /etc/fuse.conf\n  register: fuse_conf\n\n- name: Install FUSE\n  when: not fuse_conf.stat.exists\n  become: yes\n  ansible.builtin.apt:\n    name: fuse3\n\n- name: Configure FUSE with user_allow_other\n  become: yes\n  ansible.builtin.lineinfile:\n    path: /etc/fuse.conf\n    regexp: \"^[# ]*user_allow_other *$\"\n    line: user_allow_other\n\n- ansible.builtin.include_role:\n    name: arvados_docker\n  when: arvados_compute_docker|bool\n- ansible.builtin.include_role:\n    name: compute_singularity\n  when: arvados_compute_singularity|bool\n- ansible.builtin.include_role:\n    name: compute_amd_rocm\n  when: arvados_compute_amd_rocm|bool\n- ansible.builtin.include_role:\n    name: compute_nvidia\n  when: arvados_compute_nvidia|bool\n"
  },
  {
    "path": "tools/ansible/roles/arvados_controller/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_apt\n  - role: arvados_service\n  - role: arvados_nginx_frontend\n    vars:\n      arvados_nginx_service_key: Controller\n"
  },
  {
    "path": "tools/ansible/roles/arvados_controller/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install arvados-controller\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - arvados-controller\n\n- name: Start and enable arvados-controller.service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: arvados-controller.service\n    state: \"{{ 'restarted' if arvados_config_changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_database/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# User and database to set up\narvados_database_name: \"{{ arvados_cluster.PostgreSQL.Connection.dbname }}\"\narvados_database_user_name: \"{{ arvados_cluster.PostgreSQL.Connection.user }}\"\narvados_database_user_password: \"{{ arvados_cluster.PostgreSQL.Connection.password }}\"\n\n# Comma-separated list of `CREATE ROLE WITH` arguments,\n# like `CREATEDB`, `NOLOGIN`, etc.\narvados_database_role_attr_flags: \"\"\n\n# How to connect to the PostgreSQL server.\n# If login_host is empty, the role will \"become\" login_user on the managed node\n# to perform database administration.\narvados_database_login_host: \"{{ arvados_cluster.PostgreSQL.Connection.host }}\"\narvados_database_login_port: \"{{ arvados_cluster.PostgreSQL.Connection.port }}\"\n\n# Credentials for the PostgreSQL server.\narvados_database_login_user: postgres\narvados_database_login_password: \"\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_database/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_apt\n"
  },
  {
    "path": "tools/ansible/roles/arvados_database/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install psycopg2\n  become: yes\n  ansible.builtin.apt:\n    name: python3-psycopg2\n\n- name: Create database user\n  no_log: yes\n  become: \"{{ arvados_database_login_host is falsy }}\"\n  become_user: \"{{ arvados_database_login_user }}\"\n  community.postgresql.postgresql_user:\n    name: \"{{ arvados_database_user_name }}\"\n    password: \"{{ arvados_database_user_password }}\"\n    role_attr_flags: \"{{ arvados_database_role_attr_flags }}\"\n    login_host: \"{{ arvados_database_login_host }}\"\n    login_password: \"{{ arvados_database_login_password }}\"\n    login_port: \"{{ arvados_database_login_port }}\"\n    login_user: \"{{ arvados_database_login_user }}\"\n\n- name: Create database\n  no_log: yes\n  become: \"{{ arvados_database_login_host is falsy }}\"\n  become_user: \"{{ arvados_database_login_user }}\"\n  community.postgresql.postgresql_db:\n    db: \"{{ arvados_database_name }}\"\n    encoding: UTF8\n    owner: \"{{ arvados_database_user_name }}\"\n    template: template0\n    login_host: \"{{ arvados_database_login_host }}\"\n    login_password: \"{{ arvados_database_login_password }}\"\n    login_port: \"{{ arvados_database_login_port }}\"\n    login_user: \"{{ arvados_database_login_user }}\"\n\n- name: Create pg_trgm extension\n  no_log: yes\n  become: \"{{ arvados_database_login_host is falsy }}\"\n  become_user: \"{{ arvados_database_login_user }}\"\n  community.postgresql.postgresql_ext:\n    name: pg_trgm\n    login_db: \"{{ arvados_database_name }}\"\n    login_host: \"{{ arvados_database_login_host }}\"\n    login_password: \"{{ arvados_database_login_password }}\"\n    login_port: \"{{ arvados_database_login_port }}\"\n    login_user: \"{{ arvados_database_login_user }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_dispatch_cloud/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_service\n"
  },
  {
    "path": "tools/ansible/roles/arvados_dispatch_cloud/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install arvados-dispatch-cloud\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - arvados-dispatch-cloud\n\n- name: Start and enable arvados-dispatch-cloud.service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: arvados-dispatch-cloud.service\n    state: \"{{ 'restarted' if arvados_config_changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_dispatch_local/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_compute\n  - role: arvados_service\n"
  },
  {
    "path": "tools/ansible/roles/arvados_dispatch_local/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Configure Docker if needed\n  when: \"arvados_compute_docker|bool\"\n  ansible.builtin.include_role:\n    name: compute_docker\n\n- name: Install crunch-dispatch-local\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - crunch-dispatch-local\n\n- name: Start and enable crunch-dispatch-local.service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: crunch-dispatch-local.service\n    state: \"{{ 'restarted' if arvados_config_changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_docker/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_compute_pin_packages: true\ndocker_apt_url: \"https://download.docker.com/linux/{{ ansible_distribution|lower }}\"\ndocker_dnf_url: \"https://download.docker.com/linux/rhel\"\n# Packages to be installed with (optionally) their required version spec. Use empty string\n# when no specific version is needed.\ndocker_dnf_packages:\n  containerd.io: \"1.7.*\"\n  docker-ce: \"3:28.*\"\n  docker-ce-cli: \"1:28.*\"\n  docker-ce-rootless-extras: \"28.*\""
  },
  {
    "path": "tools/ansible/roles/arvados_docker/files/arvados-docker.pref",
    "content": "### This file is managed by Ansible ###\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# Pin Docker dependencies to tested and known-good versions\n\nPackage: src:docker-ce\nPin: version 5:28.*\nPin-Priority: 995\n\nPackage: containerd.io\nPin: version 1.7.*\nPin-Priority: 995\n"
  },
  {
    "path": "tools/ansible/roles/arvados_docker/tasks/apt.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- ansible.builtin.include_role:\n    name: distro_apt\n\n- name: Install Docker package pins\n  become: yes\n  ansible.builtin.copy:\n    src: arvados-docker.pref\n    dest: /etc/apt/preferences.d/arvados-docker.pref\n    owner: root\n    group: root\n    mode: 0644\n  when: \"arvados_compute_pin_packages|bool\"\n\n- name: Install Docker repository\n  become: yes\n  ansible.builtin.deb822_repository:\n    name: docker\n    types: deb\n    uris: \"{{ docker_apt_url }}\"\n    suites: \"{{ ansible_distribution_release }}\"\n    components: stable\n    signed_by: \"{{ docker_apt_url }}/gpg\"\n  notify:\n    - apt update\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install Docker package\n  become: yes\n  ansible.builtin.apt:\n    name: docker-ce\n    install_recommends: false\n"
  },
  {
    "path": "tools/ansible/roles/arvados_docker/tasks/dnf.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Install Docker repository\n  become: yes\n  ansible.builtin.yum_repository:\n    name: docker\n    description: Docker-CE repository\n    baseurl: \"{{ docker_dnf_url }}/$releasever/$basearch/stable\"\n    gpgcheck: true\n    gpgkey: \"{{ docker_dnf_url }}/gpg\"\n\n- when: \"arvados_compute_pin_packages|bool\"\n  become: yes\n  block:\n    - name: Install current Docker dnf package pins\n      community.general.dnf_versionlock:\n        name: \"{{ docker_dnf_packages|items|map('join', '-')|list }}\"\n        state: present\n      register: docker_dnf_versionlock\n\n    - name: Clean-up Docker dnf package pins\n      vars:\n        # Names of packages with changed locks\n        locked_pkgs: \"{{ docker_dnf_versionlock.specs_toadd|map('regex_replace', '-[^-]+-[^-]+$', '')|unique }}\"\n        # Regexp to match all locks on locked_pkgs\n        lock_re: \"^({{ locked_pkgs|map('regex_escape')|join('|') }})-[^-]+-[^-]+$\"\n        # All locks for locked_pkgs that weren't just added\n        old_locks: \"{{ docker_dnf_versionlock.locklist_post|select('match', lock_re)|reject('in', docker_dnf_versionlock.specs_toadd)|list }}\"\n      when: \"old_locks is truthy\"\n      community.general.dnf_versionlock:\n        name: \"{{ old_locks }}\"\n        state: absent\n\n- name: Install Docker packages\n  become: yes\n  ansible.builtin.dnf:\n    name: \"{{ docker_dnf_packages|list }}\"\n    state: latest\n    allow_downgrade: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_docker/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- ansible.builtin.include_tasks: \"{{ ansible_pkg_mgr }}.yml\""
  },
  {
    "path": "tools/ansible/roles/arvados_go/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_go_destdir: \"/opt/go-{{ arvados_go_version }}\"\narvados_go_version: \"1.25.6\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_go/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_packages\n    vars:\n      task_name: Install Go unarchive dependencies\n      package_names:\n        - gzip\n        - tar\n"
  },
  {
    "path": "tools/ansible/roles/arvados_go/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Set up Go directory\n  become: yes\n  ansible.builtin.file:\n    state: directory\n    path: \"{{ arvados_go_destdir }}\"\n    owner: root\n    group: root\n    mode: 0755\n\n- name: Install Go\n  become: yes\n  ansible.builtin.unarchive:\n    src: \"https://go.dev/dl/go{{ arvados_go_version }}.linux-amd64.tar.gz\"\n    dest: \"{{ arvados_go_destdir }}\"\n    extra_opts:\n      - \"--strip-components=1\"\n    remote_src: yes\n    creates: \"{{ (arvados_go_destdir, 'bin/go')|path_join }}\"\n\n- name: Add Go commands to PATH\n  become: yes\n  ansible.builtin.file:\n    state: link\n    src: \"{{ (arvados_go_destdir, 'bin', item)|path_join }}\"\n    dest: \"{{ ('/usr/local/bin', item)|path_join }}\"\n  loop:\n    - go\n    - gofmt\n"
  },
  {
    "path": "tools/ansible/roles/arvados_grafana/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_grafana\n"
  },
  {
    "path": "tools/ansible/roles/arvados_grafana/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install Grafana\n  become: yes\n  ansible.builtin.apt:\n    name: grafana\n\n- name: Write Grafana config\n  become: yes\n  no_log: yes\n  ansible.builtin.copy:\n    src: \"{{ arvados_grafana_config_file }}\"\n    dest: /etc/grafana/grafana.ini\n    owner: root\n    group: grafana\n    mode: 0640 # Matches the default permissions used by the grafana package\n  register: arvados_grafana_config_copy\n\n- name: Start and enable grafana\n  become: yes\n  ansible.builtin.systemd_service:\n    name: grafana-server.service\n    state: \"{{ 'restarted' if arvados_grafana_config_copy.changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_keep_web/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_apt\n  - role: arvados_service\n  - role: arvados_nginx_frontend\n    vars:\n      arvados_nginx_service_key: WebDAV\n"
  },
  {
    "path": "tools/ansible/roles/arvados_keep_web/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install Arvados keep-web\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - keep-web\n\n- name: Start and enable keep-web.service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: keep-web.service\n    state: \"{{ 'restarted' if arvados_config_changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_keepbalance/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_apt\n  - role: arvados_service\n"
  },
  {
    "path": "tools/ansible/roles/arvados_keepbalance/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install Arvados keep-balance\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - keep-balance\n\n- name: Start and enable keep-balance.service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: keep-balance.service\n    state: \"{{ 'restarted' if arvados_config_changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_keepproxy/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_apt\n  - role: arvados_service\n  - role: arvados_nginx_frontend\n    vars:\n      arvados_nginx_service_key: Keepproxy\n"
  },
  {
    "path": "tools/ansible/roles/arvados_keepproxy/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install Arvados keepproxy\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - keepproxy\n\n- name: Start and enable keepproxy.service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: keepproxy.service\n    state: \"{{ 'restarted' if arvados_config_changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_keepstore/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_apt\n  - role: arvados_service\n"
  },
  {
    "path": "tools/ansible/roles/arvados_keepstore/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Ensure Keepstore Directory roots exist\n  become: yes\n  ansible.builtin.file:\n    state: directory\n    path: \"{{ item }}\"\n    owner: root\n    group: root\n    mode: 0750\n  loop: \"{{ arvados_cluster.Volumes.values()|selectattr('Driver', '==', 'Directory')|map(attribute='DriverParameters.Root')|unique }}\"\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install Arvados keepstore\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - keepstore\n\n- name: Start and enable keepstore.service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: keepstore.service\n    state: \"{{ 'restarted' if arvados_config_changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_loki/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_grafana\n"
  },
  {
    "path": "tools/ansible/roles/arvados_loki/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install Loki package\n  become: yes\n  ansible.builtin.apt:\n    name: loki\n\n- name: Create Loki config dir\n  become: yes\n  ansible.builtin.file:\n    path: /etc/loki\n    state: directory\n    owner: root\n    group: root\n    mode: 0755 # Matches the default permissions used by the grafana loki package\n\n- name: Write Loki config.yml\n  become: yes\n  no_log: yes\n  ansible.builtin.copy:\n    src: \"{{ arvados_loki_config_file }}\"\n    dest: /etc/loki/config.yml\n    owner: root\n    group: root\n    mode: 0664 # Matches the default permissions used by the grafana loki package\n  register: arvados_loki_config_copy\n\n- name: Start and enable loki.service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: loki.service\n    state: \"{{ 'restarted' if arvados_loki_config_copy.changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_base/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# A hash that identifies network clients that should be considered\n# \"internal\" to Arvados. Each key is a CIDR netmask. The value is a\n# boolean. If it's the literal `false`, the netmask will not be considered\n# internal. *Any* other value will be considered internal.\narvados_nginx_internal_networks: {}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_base/handlers/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: restart nginx\n  become: yes\n  ansible.builtin.systemd_service:\n    name: nginx\n    daemon_reload: \"{{ nginx_service_definition.changed|default(false) }}\"\n    state: restarted\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_base/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Install nginx\n  become: yes\n  ansible.builtin.apt:\n    name: nginx\n\n- name: Install nginx service override directory\n  become: yes\n  ansible.builtin.file:\n    state: directory\n    path: /etc/systemd/system/nginx.service.d\n    owner: root\n    group: root\n    mode: 0755\n\n- name: Disable nginx default site\n  become: yes\n  ansible.builtin.file:\n    path: /etc/nginx/sites-enabled/default\n    state: absent\n  notify: restart nginx\n\n# Unfortunately Debian does not provide a good way to extend the core\n# configuration directives in nginx.conf. We have to edit that file\n# directly the old-fashioned way.\n# I have opted to simply delete configuration that we can't duplicate\n# (the events block) and then drop in our own include. We could try to\n# edit individual settings and that would be easier in the simple case\n# but I think this will be more reliable on more systems and easier to\n# extend as needed.\n- name: Extend nginx.conf core configuration\n  vars:\n    extport_min: \"{{ arvados_cluster.Services.ContainerWebServices.ExternalPortMin|default(0) }}\"\n    extport_max: \"{{ arvados_cluster.Services.ContainerWebServices.ExternalPortMax|default(0) }}\"\n    extport_count: \"{{ extport_max|int - extport_min|int + 1 }}\"\n    worker_connections: \"{{ extport_count|int * 3 // 2 }}\"\n    worker_rlimit_nofile: \"{{ extport_count|int * 2 }}\"\n  # For now, for limited impact, we only make these changes when we *know*\n  # we need to: to increase the number of worker_connections from Debian's\n  # default to accommodate a block of external container ports for controller.\n  when: |\n    ('arvados_controller' in groups\n     and 0 < extport_min|int < extport_max|int\n     and worker_connections|int > 768)\n  block:\n    - name: Set up nginx core configuration file\n      become: yes\n      ansible.builtin.template:\n        src: nginx-core.conf.j2\n        dest: /etc/nginx/arvados-core.conf\n        owner: root\n        group: root\n        mode: 0644\n      notify:\n        - restart nginx\n\n    - name: Remove nginx default events configuration\n      become: yes\n      ansible.builtin.replace:\n        path: /etc/nginx/nginx.conf\n        regexp: |\n          (?mx)  # Regexp is multiline and verbose\n          # Find the beginning of the events block\n          ^\\s* events \\s* \\{\n          # Consume lines where we don't find the closing } before\n          # the start of a comment with # or the end of the line.\n          # Note this group starts *immediately* after the opening {\n          # and may match zero times/consume nothing.\n          ( [^#}\\n]* (|\\#.*) \\n )*?\n          # Consume remaining text until the closing brace.\n            [^#}\\n]* }\n        replace: \"\"\n      notify:\n        - restart nginx\n\n    - name: Include nginx core configuration in nginx.conf\n      become: yes\n      ansible.builtin.lineinfile:\n        path: /etc/nginx/nginx.conf\n        line: include /etc/nginx/arvados-core.conf;\n      notify:\n        - restart nginx\n\n- name: Set up nginx http configuration\n  become: yes\n  ansible.builtin.template:\n    src: arvados-nginx-http.conf.j2\n    dest: /etc/nginx/conf.d/arvados-ansible.conf\n    owner: root\n    group: root\n    mode: 0644\n  notify: restart nginx\n\n- name: Enable nginx service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: nginx\n    enabled: true\n    state: started\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_base/templates/arvados-nginx-http.conf.j2",
    "content": "### This file is managed by Ansible ###\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #\n # Note this file gets included in an `http` context.\n # Any directives here need to work there.\n #}\n\n{# Justify to 45 characters - the longest IPv6 address+netmask is 43 #}\n{% set geo_fmt = \"{:<45s}\" %}\ngeo $external_client {\n  {{ geo_fmt.format('default') }} 1;\n  {{ geo_fmt.format('127.0.0.0/24') }} 0;\n  {{ geo_fmt.format('::1/128') }} 0;\n{% for netblock, val in arvados_nginx_internal_networks.items() %}\n  {{ geo_fmt.format(netblock) }} {{ 1 if val is false else 0 }};\n{% endfor %}\n}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_base/templates/nginx-core.conf.j2",
    "content": "### This file is managed by Ansible ###\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\n\nevents {\n  worker_connections  {{ worker_connections }};\n}\nworker_rlimit_nofile  {{ worker_rlimit_nofile }};\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_frontend/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# arvados_nginx_proxy_config is an nginx configuration block that will be used\n# for all proxy front-ends. The default has standard configuration suitable for\n# all Arvados services.\narvados_nginx_proxy_config: |\n  proxy_buffering           off;\n  proxy_connect_timeout     90s;\n  proxy_http_version        1.1;\n  proxy_max_temp_file_size  0;\n  proxy_read_timeout        300s;\n  proxy_redirect            off;\n  proxy_request_buffering   off;\n\n  proxy_set_header  Connection         \"upgrade\";\n  proxy_set_header  Host               $http_host;\n  proxy_set_header  Upgrade            $http_upgrade;\n  proxy_set_header  X-External-Client  $external_client;\n  proxy_set_header  X-Forwarded-For    $proxy_add_x_forwarded_for;\n  proxy_set_header  X-Forwarded-Proto  https;\n  proxy_set_header  X-Real-IP          $remote_addr;\n\n# arvados_nginx_servers_config is an nginx configuration block that will be used\n# for all servers. You can set this in inventory to customize servers; e.g., to add\n# server names or listening ports.\narvados_nginx_server_config: \"\"\n\ntls_source: \"{{ arvados_tls.get(arvados_nginx_service_key, arvados_tls.Default) }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_frontend/files/aws_secret.conf",
    "content": "### This file is managed by Ansible ###\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n[Unit]\nWants=arvados_aws_secret.service\nAfter=arvados_aws_secret.service\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_frontend/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_nginx_base\n  - role: arvados_aws_secret\n    when: tls_source.key.aws_secret is defined\n    vars:\n      arvados_secret_id: \"{{ tls_source.key.aws_secret }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_frontend/tasks/install_cert_file.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: \"Copy {{ item }} file from control node\"\n  become: yes\n  ansible.builtin.copy:\n    src: \"{{ tls_source[item].src }}\"\n    dest: \"{{ tls_source[item].path }}\"\n    owner: \"{{ tls_source[item].owner|default(omit) }}\"\n    group: \"{{ tls_source[item].group|default(omit) }}\"\n    mode: \"{{ tls_source[item].mode|default('0640' if item == 'key' else omit) }}\"\n  when: tls_source[item].src is defined\n  notify:\n    - restart nginx\n\n- name: \"Ensure {{ item }} file exists and set attributes\"\n  become: yes\n  ansible.builtin.file:\n    state: file\n    dest: \"{{ tls_source[item].path }}\"\n    owner: \"{{ tls_source[item].owner|default(omit) }}\"\n    group: \"{{ tls_source[item].group|default(omit) }}\"\n    mode: \"{{ tls_source[item].mode|default('0640' if item == 'key' else omit) }}\"\n  when: tls_source[item].src is not defined\n  notify:\n    - restart nginx\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_frontend/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: \"Install nginx {{ arvados_nginx_service_key }} certificates\"\n  ansible.builtin.include_tasks: install_cert_file.yml\n  loop:\n    - cert\n    - key\n\n- name: Add nginx secret service\n  when: tls_source.key.aws_secret is defined\n  become: yes\n  ansible.builtin.copy:\n    src: aws_secret.conf\n    dest: /etc/systemd/system/nginx.service.d/aws_secret.conf\n    owner: root\n    group: root\n    mode: 0644\n  register: nginx_service_definition\n  notify:\n    - restart nginx\n\n- name: \"Set up nginx {{ arvados_nginx_service_key }} site\"\n  become: yes\n  ansible.builtin.template:\n    src: \"arvados-nginx-{{ arvados_nginx_service_key|lower }}.conf.j2\"\n    dest: \"/etc/nginx/sites-available/arvados-{{ arvados_nginx_service_key|lower }}\"\n    owner: root\n    group: root\n    mode: 0644\n  notify:\n    - restart nginx\n  register: arvados_nginx_site\n\n- name: \"Enable nginx {{ arvados_nginx_service_key }} site\"\n  become: yes\n  ansible.builtin.file:\n    state: link\n    src: \"{{ arvados_nginx_site.dest }}\"\n    dest: \"{{ arvados_nginx_site.dest|replace('/sites-available/', '/sites-enabled/') }}\"\n  notify:\n    - restart nginx\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_frontend/templates/arvados-nginx-controller.conf.j2",
    "content": "{% extends \"arvados-nginx-site.conf.j2\" %}\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\n\n{% block server_config %}\n{% set cws_svc = arvados_cluster.Services.ContainerWebServices %}\n{% set containers_addr = cws_svc|external_addr|default(none) %}\n{% if containers_addr.address is not none %}\n  server_name  {{ containers_addr.address }};\n{% endif %}\n\n{% set external_port_min = cws_svc.ExternalPortMin|default(0) %}\n{% set external_port_max = cws_svc.ExternalPortMax|default(0) %}\n{% if 0 < external_port_min <= external_port_max %}\n  listen  {{ external_port_min }}-{{ external_port_max }} ssl;\n{% endif %}\n\n  client_max_body_size  {{ arvados_cluster.API.MaxRequestSize|default('128m') }};\n{{ super() }}\n{% endblock %}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_frontend/templates/arvados-nginx-keepproxy.conf.j2",
    "content": "{% extends \"arvados-nginx-site.conf.j2\" %}\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\n\n{% block server_config %}\n  client_max_body_size  64m;\n{{ super() }}\n{% endblock %}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_frontend/templates/arvados-nginx-site.conf.j2",
    "content": "### This file is managed by Ansible ###\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\n\n{% set upstream_name = \"arvados-\" + arvados_nginx_service_key|lower %}\n{% set upstream_svc = arvados_cluster.Services[arvados_nginx_service_key] %}\n{% block upstream %}\nupstream {{ upstream_name }} {\n{% for addr in upstream_svc | internal_addrs %}\n  server  {{ addr }}  fail_timeout=10s;\n{% endfor %}\n}\n{% endblock %}\n\n{% set ext_addr = upstream_svc | external_addr %}\nserver {\n  listen       {{ ext_addr.port }} ssl;\n  server_name  {{ ext_addr.address }};\n\n  ssl_certificate      {{ tls_source.cert.path }};\n  ssl_certificate_key  {{ tls_source.key.path }};\n{% if tls_source.key.aws_secret is defined %}\n  ssl_password_file    /run/arvados/{{ tls_source.key.aws_secret }};\n{% endif %}\n\n  {{ arvados_nginx_server_config|indent(2) }}\n{% block server_config %}\n\n  location / {\n    proxy_pass  http://{{ upstream_name }};\n\n    {{ arvados_nginx_proxy_config|indent(4) }}\n  }\n{% endblock %}\n}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_frontend/templates/arvados-nginx-webdav.conf.j2",
    "content": "{% extends \"arvados-nginx-site.conf.j2\" %}\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\n\n{% block server_config %}\n  client_max_body_size  0;\n{{ super() }}\n{% endblock %}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_frontend/templates/arvados-nginx-websocket.conf.j2",
    "content": "{% extends \"arvados-nginx-site.conf.j2\" %}\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nginx_frontend/templates/arvados-nginx-workbench2.conf.j2",
    "content": "{% extends \"arvados-nginx-site.conf.j2\" %}\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\n\n{% block upstream %}\n{% endblock %}\n\n{% block server_config %}\n  # Workbench2 uses a call to /config.json to bootstrap itself\n  # and find out where to contact the API server.\n  location /config.json {\n    return 200 '{\"API_HOST\":\"{{ arvados_cluster.Services.Controller|external_addr }}\"}';\n  }\n\n  location / {\n    root      /var/www/arvados-workbench2/workbench2;\n    index     index.html;\n    try_files $uri $uri/ /index.html;\n    if (-f $document_root/maintenance.html) {\n      return 503;\n    }\n  }\n{% endblock %}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nodejs/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_nodejs_version: \"20.19.4\"\narvados_nodejs_destdir: \"/opt/node-v{{ arvados_nodejs_version }}-linux-x64\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nodejs/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_packages\n    vars:\n      task_name: Install NodeJS unarchive dependencies\n      package_names:\n        - tar\n        - xz-utils\n"
  },
  {
    "path": "tools/ansible/roles/arvados_nodejs/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Install Node.js\n  become: yes\n  ansible.builtin.unarchive:\n    src: \"https://nodejs.org/dist/v{{ arvados_nodejs_version }}/node-v{{ arvados_nodejs_version }}-linux-x64.tar.xz\"\n    dest: \"{{ arvados_nodejs_destdir|dirname }}\"\n    remote_src: yes\n    creates: \"{{ (arvados_nodejs_destdir, 'bin/node')|path_join }}\"\n\n- name: Install yarn\n  become: yes\n  ansible.builtin.command:\n    cmd: npm install -g yarn\n    creates: \"{{ (arvados_nodejs_destdir, 'bin/yarn')|path_join }}\"\n  environment:\n    PATH: \"{{ (arvados_nodejs_destdir, 'bin')|path_join }}:{{ ansible_env.PATH }}\"\n\n- name: Add Node commands to PATH\n  become: yes\n  ansible.builtin.file:\n    state: link\n    src: \"{{ (arvados_nodejs_destdir, 'bin', item)|path_join }}\"\n    dest: \"{{ ('/usr/local/bin', item)|path_join }}\"\n  loop:\n    - node\n    - npm\n    - yarn\n    - yarnpkg\n"
  },
  {
    "path": "tools/ansible/roles/arvados_postgresql/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# The role will write these settings to a dedicated file under the PostgreSQL\n# conf.d directory. These defaults are suitable for most production\n# installations. You can set this to an empty hash to skip configuration.\n# Note that you MUST quote string values as appropriate for PostgreSQL config.\narvados_postgresql_config:\n  listen_addresses: \"'*'\"\n# The path where arvados_postgresql_config will be written.\n# If not set, the role will write the file to `conf.d/arvados-ansible.conf`\n# in the same directory as the HBA file.\narvados_postgresql_config_path: \"\"\n# If not specified, the role tasks will query the path from the server.\narvados_postgresql_hba_file: \"\"\narvados_postgresql_hba_contype: host\n# Comma-separated list of database names\narvados_postgresql_hba_databases: \"{{ arvados_cluster.PostgreSQL.Connection.dbname }}\"\narvados_postgresql_hba_method: scram-sha-256\n# Array of IPv4 or v6 netmasks, or special values recognized in pg_hba.conf\narvados_postgresql_hba_sources:\n  - samenet\n# Comma-separated list of user names\narvados_postgresql_hba_users: \"{{ arvados_cluster.PostgreSQL.Connection.user }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_postgresql/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_apt\n  - role: distro_postgresql\n"
  },
  {
    "path": "tools/ansible/roles/arvados_postgresql/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install PostgreSQL server package\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - \"postgresql\"\n  when: \"arvados_postgresql_version is not defined\"\n\n- name: Install PostgreSQL server package from postgresql.org\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - \"postgresql-{{ arvados_postgresql_version }}\"\n  when: \"arvados_postgresql_version is defined\"\n\n- name: Install psycopg2\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - python3-psycopg2\n\n- name: Setup PostgreSQL service\n  when: \"ansible_virtualization_type != 'docker'\"\n  ansible.builtin.include_tasks: setup_service.yml\n"
  },
  {
    "path": "tools/ansible/roles/arvados_postgresql/tasks/setup_service.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Find pg_hba.conf file\n  when: arvados_postgresql_hba_file is falsy\n  become: yes\n  become_user: postgres\n  community.postgresql.postgresql_query:\n    login_db: postgres\n    query: SHOW hba_file;\n  register: pg_hba_query\n\n- name: Create pg_hba.conf entries\n  when: arvados_postgresql_hba_databases is truthy and arvados_postgresql_hba_users is truthy\n  become: true\n  loop: \"{{ arvados_postgresql_hba_sources }}\"\n  community.postgresql.postgresql_pg_hba:\n    dest: \"{{ arvados_postgresql_hba_file or pg_hba_query.query_result.0.hba_file }}\"\n    contype: \"{{ arvados_postgresql_hba_contype }}\"\n    databases: \"{{ arvados_postgresql_hba_databases }}\"\n    method: \"{{ arvados_postgresql_hba_method }}\"\n    users: \"{{ arvados_postgresql_hba_users }}\"\n    source: \"{{ item }}\"\n  register: pg_hba_entries\n\n- name: Write PostgreSQL conf.d file\n  when: arvados_postgresql_config is truthy\n  become: true\n  vars:\n    conf_dir: \"{{ (arvados_postgresql_hba_file or pg_hba_query.query_result.0.hba_file)|dirname }}\"\n  ansible.builtin.template:\n    src: arvados-ansible.conf.j2\n    dest: \"{{ arvados_postgresql_config_path or (conf_dir, 'conf.d', 'arvados-ansible.conf')|path_join }}\"\n    owner: root\n    group: root\n    mode: 0644\n  register: arvados_postgresql_config_update\n\n- name: Set up PostgreSQL service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: \"postgresql@{{ (arvados_postgresql_hba_file or pg_hba_query.query_result.0.hba_file)|dirname|relpath('/etc/postgresql')|replace('/', '-') }}.service\"\n    state: \"{{ 'restarted' if arvados_postgresql_config_update.changed else 'reloaded' if pg_hba_entries.changed else 'started' }}\"\n    enabled: yes\n"
  },
  {
    "path": "tools/ansible/roles/arvados_postgresql/templates/arvados-ansible.conf.j2",
    "content": "### This file is managed by Ansible ###\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\n\n{% for key, val in arvados_postgresql_config.items() %}\n{{ key }} = {{ val }}\n{% endfor %}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_prometheus/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_apt\n"
  },
  {
    "path": "tools/ansible/roles/arvados_prometheus/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install Prometheus\n  become: yes\n  ansible.builtin.apt:\n    name: prometheus\n\n- name: Write Prometheus config\n  become: yes\n  no_log: yes\n  ansible.builtin.copy:\n    src: \"{{ arvados_prometheus_config_file }}\"\n    dest: /etc/prometheus/prometheus.yml\n    owner: root\n    group: root\n    mode: 0644 # Matches the default permissions used by the grafana package\n  register: arvados_prometheus_config_copy\n\n- name: Start and enable prometheus\n  become: yes\n  ansible.builtin.systemd_service:\n    name: prometheus.service\n    state: \"{{ 'restarted' if arvados_prometheus_config_copy.changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_python/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_dev_from_pkgs: false\narvados_python_destdir: \"/opt/Python-{{ arvados_python_version }}\"\narvados_python_version: \"3.10.19\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_python/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_packages\n    vars:\n      # common_deps are required to build extensions whether Python is\n      # installed from source or packages.\n      common_deps:\n        - g++\n        - libcurl4-openssl-dev\n        - libfuse-dev\n        - libssl-dev\n        - make\n        - pkgconf\n      # build_deps are additionally required to build Python itself from source.\n      # These are mostly cribbed from Debian's Build-Depends, minus support\n      # for some optional stdlib modules we don't need like Bluetooth and GUIs.\n      build_deps:\n        - autoconf\n        - bzip2\n        - gzip\n        - libbz2-dev\n        - libdb-dev\n        - libexpat1-dev\n        - libffi-dev\n        - libgdbm-dev\n        - liblzma-dev\n        - libreadline-dev\n        - libsqlite3-dev\n        - locales-all\n        - lsb-release\n        - media-types\n        - net-tools\n        - netbase\n        - sharutils\n        - tar\n        - time\n        - zlib1g-dev\n      # python_pkgs are installed when we're not installing Python from source.\n      python_pkgs:\n        - python3-dev\n        - python3-venv\n      task_name: Install Python runtime dependencies\n      package_names: \"{{ common_deps + (python_pkgs if arvados_dev_from_pkgs else build_deps) }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_python/tasks/install_from_source.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Set up Python build directory\n  ansible.builtin.tempfile:\n    state: directory\n    prefix: \"Python-{{ arvados_python_version }}.\"\n  register: python_builddir\n\n- name: Unarchive Python source\n  ansible.builtin.unarchive:\n    src: \"https://www.python.org/ftp/python/{{ arvados_python_version }}/Python-{{ arvados_python_version }}.tgz\"\n    dest: \"{{ python_builddir.path }}\"\n    extra_opts:\n      - \"--strip-components=1\"\n    remote_src: yes\n\n- name: Configure Python\n  ansible.builtin.command:\n    argv:\n      - \"./configure\"\n      - \"--prefix={{ arvados_python_destdir }}\"\n      - \"--enable-optimizations\"\n      - \"--with-lto=full\"\n    chdir: \"{{ python_builddir.path }}\"\n\n- name: Build Python\n  ansible.builtin.command:\n    argv:\n      - \"make\"\n      - \"-j4\"\n    chdir: \"{{ python_builddir.path }}\"\n\n- name: Install Python\n  become: yes\n  ansible.builtin.command:\n    argv:\n      - \"make\"\n      - \"install\"\n    chdir: \"{{ python_builddir.path }}\"\n\n- name: Remove Python build directory\n  become: yes\n  ansible.builtin.file:\n    path: \"{{ python_builddir.path }}\"\n    state: absent\n"
  },
  {
    "path": "tools/ansible/roles/arvados_python/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Check for Python installed from source\n  ansible.builtin.stat:\n    path: \"{{ (arvados_python_destdir, 'bin/python3')|path_join }}\"\n  register: python_stat\n\n- ansible.builtin.include_tasks: install_from_source.yml\n  when: \"not (arvados_dev_from_pkgs or python_stat.stat.exists)\"\n\n- name: Add Python commands to PATH\n  when: \"not arvados_dev_from_pkgs\"\n  become: yes\n  ansible.builtin.file:\n    state: link\n    src: \"{{ (arvados_python_destdir, 'bin', item)|path_join }}\"\n    dest: \"{{ ('/usr/local/bin', item)|path_join }}\"\n  loop:\n    - python3\n    - \"python{{ arvados_python_version|regex_search('^[0-9]+\\\\.[0-9]+\\\\b') }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_ruby/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_dev_from_pkgs: false\narvados_ruby_destdir: \"/opt/ruby-{{ arvados_ruby_version }}\"\n# The oldest version of Ruby we actually support is 3.0, but unfortunately\n# it can't be built with OpenSSL 3.0, making it much harder to build from\n# source on modern distros. Compromise on 3.1.\narvados_ruby_version: \"3.1.7\"\n\narvados_bundler_gem_dir: /opt/arvados-bundler\narvados_bundler_bin_dir: \"{{ (arvados_bundler_gem_dir, 'bin')|path_join }}\"\narvados_bundler_version: \"~> 2.5.23\"\nruby_minor_version: \"{{ arvados_ruby_version|regex_search('^[0-9]+\\\\.[0-9]+\\\\b') }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_ruby/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_dnf\n    when: \"arvados_dev_from_pkgs is truthy\"\n    vars:\n      arvados_dnf_modules:\n        - ruby\n\n  - role: distro_packages\n    vars:\n      # common_deps are required to build extensions whether Ruby is\n      # installed from source or packages.\n      common_deps:\n        - bison\n        - g++\n        - libcurl4-openssl-dev\n        - libpq-dev\n        - libssl-dev\n        - libyaml-dev\n        - make\n        - pkgconf\n        - postgresql-client\n        - procps\n        - shared-mime-info\n        - zlib1g-dev\n      # build_deps are additionally required to build Ruby itself from source.\n      # These are mostly cribbed from Debian's Build-Depends, minus support\n      # for some optional integration we don't need.\n      build_deps:\n        - file\n        - libffi-dev\n        - libgdbm-compat-dev\n        - libgdbm-dev\n        - libgmp-dev\n        - netbase\n        - openssl\n        - tzdata\n      # ruby_pkgs are installed when we're not installing Ruby from source.\n      ruby_pkgs:\n        - ruby\n        - ruby-dev\n      task_name: Install Ruby runtime dependencies\n      package_names: \"{{ common_deps + (ruby_pkgs if arvados_dev_from_pkgs else build_deps) }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_ruby/tasks/install_from_source.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Set up Ruby build directory\n  ansible.builtin.tempfile:\n    state: directory\n    prefix: \"ruby-{{ arvados_ruby_version }}.\"\n  register: ruby_builddir\n\n- name: Unarchive Ruby source\n  ansible.builtin.unarchive:\n    src: \"https://cache.ruby-lang.org/pub/ruby/{{ ruby_minor_version }}/ruby-{{ arvados_ruby_version }}.tar.gz\"\n    dest: \"{{ ruby_builddir.path }}\"\n    extra_opts:\n      - \"--strip-components=1\"\n    remote_src: yes\n\n- name: Configure Ruby\n  ansible.builtin.command:\n    argv:\n      - \"./configure\"\n      - \"--prefix={{ arvados_ruby_destdir }}\"\n    chdir: \"{{ ruby_builddir.path }}\"\n\n- name: Build Ruby\n  ansible.builtin.command:\n    argv:\n      - \"make\"\n      - \"-j4\"\n    chdir: \"{{ ruby_builddir.path }}\"\n\n- name: Install Ruby\n  become: yes\n  ansible.builtin.command:\n    argv:\n      - \"make\"\n      - \"install\"\n    chdir: \"{{ ruby_builddir.path }}\"\n\n- name: Remove Ruby build directory\n  become: yes\n  ansible.builtin.file:\n    path: \"{{ ruby_builddir.path }}\"\n    state: absent\n"
  },
  {
    "path": "tools/ansible/roles/arvados_ruby/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Check for Ruby installed from source\n  ansible.builtin.stat:\n    path: \"{{ (arvados_ruby_destdir, 'bin/ruby')|path_join }}\"\n  register: ruby_stat\n\n- ansible.builtin.include_tasks: install_from_source.yml\n  when: \"not (arvados_dev_from_pkgs or ruby_stat.stat.exists)\"\n\n- name: Add Ruby commands to PATH\n  when: \"not arvados_dev_from_pkgs\"\n  become: yes\n  ansible.builtin.file:\n    state: link\n    src: \"{{ (arvados_ruby_destdir, 'bin', item)|path_join }}\"\n    dest: \"{{ ('/usr/local/bin', item)|path_join }}\"\n  loop:\n    - gem\n    - irb\n    - rake\n    - rdoc\n    - ri\n    - ruby\n\n# In Bundler 2.5, we have seen `bundle cache` fail to download gems that are\n# already installed on the system. This can cause bad package builds.\n# Install Bundler to a dedicated $GEM_HOME where it is the only thing installed\n# to work around these issues. `build/run-library.sh` depends on this.\n- name: Install bundler gem\n  become: yes\n  community.general.gem:\n    name: bundler\n    version: \"{{ arvados_bundler_version }}\"\n    install_dir: \"{{ arvados_bundler_gem_dir }}\"\n    bindir: \"{{ arvados_bundler_bin_dir }}\"\n    user_install: no\n\n# Make that version usable for other tasks.\n- name: Install Bundler scripts\n  become: yes\n  ansible.builtin.template:\n    src: bundler.sh.j2\n    dest: \"{{ ('/usr/local/bin', item)|path_join }}\"\n    owner: root\n    group: root\n    mode: 0755\n  loop:\n    - bundle\n    - bundler\n"
  },
  {
    "path": "tools/ansible/roles/arvados_ruby/templates/bundler.sh.j2",
    "content": "#!/bin/sh\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\n### This file is managed by Ansible\nexec env \\\n     GEM_PATH={{ arvados_bundler_gem_dir|quote }}:\"$(gem env path)\" \\\n     {{ (arvados_bundler_bin_dir, item)|path_join|quote }} \"$@\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_service/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_config_group: www-data\n# This file gets uploaded to `/etc/arvados/ca-certificates.crt`.\n# Included certificates are trusted by all Arvados TLS clients.\narvados_certificates_file: \"\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_service/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Set up /etc/arvados\n  become: yes\n  ansible.builtin.file:\n    state: directory\n    path: /etc/arvados\n    owner: root\n    group: root\n    mode: 0755\n\n- name: Write Arvados config.yml\n  no_log: yes\n  when: arvados_config_file is truthy\n  become: yes\n  ansible.builtin.copy:\n    src: \"{{ arvados_config_file }}\"\n    dest: /etc/arvados/config.yml\n    owner: root\n    group: \"{{ arvados_config_group }}\"\n    mode: 0640\n  register: arvados_config_copy\n\n# The `arvados_config_changed` fact is permanently set true\n# if `/etc/arvados/config.yml` is ever changed.\n- name: Record if config.yml changed\n  no_log: yes # Not sure if there's a risk of leaking a secret in a diff output here\n  ansible.builtin.set_fact:\n    arvados_config_changed: \"{{ arvados_config_changed|default(false) or arvados_config_copy.changed }}\"\n\n- name: Write Arvados certificates\n  when: arvados_certificates_file is truthy\n  become: yes\n  ansible.builtin.copy:\n    src: \"{{ arvados_certificates_file }}\"\n    dest: /etc/arvados/ca-certificates.crt\n    owner: root\n    group: root\n    mode: 0644\n"
  },
  {
    "path": "tools/ansible/roles/arvados_shell/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# This value will be used as the `hostname` attribute for the shell node's\n# corresponding Arvados record.\narvados_shell_hostname: \"{{ inventory_hostname }}\"\n\n# Set this false if you don't want to run the arvados-login-sync service.\n# It will still be installed but disabled.\narvados_shell_login_sync: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_shell/files/arvados-login-sync.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n[Unit]\nDescription=Sync shell node logins from Arvados\nDocumentation=https://doc.arvados.org/install/install-shell-server.html\nWants=network-online.target\nAfter=network-online.target\n\n[Service]\nType=oneshot\nEnvironmentFile=/etc/arvados/login-sync.env\nExecStart=/usr/local/bin/arvados-login-sync\n\nDevicePolicy=closed\nPrivateMounts=true\nPrivateTmp=true\nProtectControlGroups=true\n\n### Everything below this line implies NoNewPrivileges=true\nNoNewPrivileges=true\nLockPersonality=true\nMemoryDenyWriteExecute=true\nPrivateDevices=true\nProtectKernelModules=true\nProtectKernelTunables=true\nRestrictAddressFamilies=AF_UNIX AF_INET AF_INET6 AF_NETLINK\nRestrictNamespaces=true\nRestrictRealtime=true\nSystemCallErrorNumber=EPERM\nSystemCallFilter=~@clock @cpu-emulation @debug @module @mount @obsolete\n"
  },
  {
    "path": "tools/ansible/roles/arvados_shell/files/arvados-login-sync.timer",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n[Unit]\nDescription=Sync shell node logins from Arvados\nDocumentation=https://doc.arvados.org/install/install-shell-server.html\n\n[Timer]\nOnActiveSec=90sec\nOnUnitInactiveSec=90sec\nRandomizedDelaySec=60sec\nAccuracySec=10sec\n\n[Install]\nWantedBy=timers.target\n"
  },
  {
    "path": "tools/ansible/roles/arvados_shell/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_apt\n  - role: arvados_docker\n  - role: arvados_service\n    vars:\n      # It's better if config.yml isn't installed on the shell node,\n      # so override any existing definition.\n      arvados_config_file: null\n"
  },
  {
    "path": "tools/ansible/roles/arvados_shell/tasks/login_sync.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Install arvados-login-sync\n  become: yes\n  community.general.gem:\n    name: arvados-login-sync\n    user_install: no\n    version: \"{{ arvados_pin_version or omit }}\"\n\n- name: Write arvados-login-sync credentials\n  no_log: yes\n  become: yes\n  ansible.builtin.template:\n    src: login-sync.env.j2\n    dest: /etc/arvados/login-sync.env\n    owner: root\n    group: root\n    mode: 0600\n\n- name: Write arvados-login-sync service and timer definitions\n  become: yes\n  ansible.builtin.copy:\n    src: \"{{ item }}\"\n    dest: \"/etc/systemd/system/{{ item }}\"\n    owner: root\n    group: root\n    mode: 0644\n  loop:\n    - arvados-login-sync.service\n    - arvados-login-sync.timer\n  register: arvados_login_sync_systemd\n\n- name: Configure arvados-login-sync.timer\n  become: yes\n  ansible.builtin.systemd_service:\n    name: arvados-login-sync.timer\n    daemon_reload: \"{{ arvados_login_sync_systemd.changed }}\"\n    enabled: \"{{ arvados_shell_login_sync|bool }}\"\n    state: \"{{ 'stopped' if not arvados_shell_login_sync|bool else 'restarted' if arvados_login_sync_systemd.changed else 'started' }}\"\n"
  },
  {
    "path": "tools/ansible/roles/arvados_shell/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install shell client packages\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - arvados-client\n      - python3-arvados-python-client\n      - python3-arvados-fuse\n      - build-essential\n      - libcurl4-openssl-dev\n      - libssl-dev\n      - ruby-dev\n      - ruby-rubygems\n      - zlib1g-dev\n\n- name: Install shell client gems\n  become: yes\n  community.general.gem:\n    name: arvados-cli\n    user_install: no\n    version: \"{{ arvados_pin_version or omit }}\"\n\n- name: Query virtual machine record\n  no_log: yes\n  vars:\n    filters: [[\"hostname\", \"=\", \"{{ arvados_shell_hostname }}\"]]\n    order: [[\"created_at\", \"desc\"], [\"uuid\", \"desc\"]]\n  ansible.builtin.command:\n    argv:\n      - /usr/local/bin/arv\n      - virtual_machine\n      - list\n      - \"--filters={{ filters|to_json }}\"\n      - \"--order={{ order|to_json }}\"\n  environment:\n    ARVADOS_API_HOST: \"{{ arvados_cluster.Services.Controller | external_addr }}\"\n    ARVADOS_API_TOKEN: \"{{ arvados_cluster.SystemRootToken }}\"\n    SSL_CERT_FILE: \"{{ '/etc/arvados/ca-certificates.crt' if arvados_certificates_file is truthy else omit }}\"\n  register: arvados_virtual_machine_list\n\n- name: Create virtual machine record\n  no_log: yes\n  when: \"(arvados_virtual_machine_list.stdout|from_json)['items'] is falsy\"\n  vars:\n    virtual_machine:\n      hostname: \"{{ arvados_shell_hostname }}\"\n  ansible.builtin.command:\n    argv:\n      - /usr/local/bin/arv\n      - virtual_machine\n      - create\n      - \"--virtual-machine={{ virtual_machine|to_json }}\"\n  environment:\n    ARVADOS_API_HOST: \"{{ arvados_cluster.Services.Controller | external_addr }}\"\n    ARVADOS_API_TOKEN: \"{{ arvados_cluster.SystemRootToken }}\"\n    SSL_CERT_FILE: \"{{ '/etc/arvados/ca-certificates.crt' if arvados_certificates_file is truthy else omit }}\"\n  register: arvados_virtual_machine_create\n\n- name: Register virtual machine fact\n  ansible.builtin.set_fact:\n    arvados_virtual_machine: \"{{ (arvados_virtual_machine_create.stdout|from_json) if arvados_virtual_machine_create.stdout is defined else ((arvados_virtual_machine_list.stdout|from_json)['items']|first) }}\"\n\n- name: Set up arvados-login-sync\n  ansible.builtin.import_tasks: login_sync.yml\n"
  },
  {
    "path": "tools/ansible/roles/arvados_shell/templates/login-sync.env.j2",
    "content": "### This file is managed by Ansible ###\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\nARVADOS_API_HOST={{ arvados_cluster.Services.Controller | external_addr | quote }}\nARVADOS_API_TOKEN={{ arvados_cluster.SystemRootToken | quote }}\nARVADOS_VIRTUAL_MACHINE_UUID={{ arvados_virtual_machine.uuid | quote }}\n{% if arvados_certificates_file is truthy %}\nSSL_CERT_FILE=/etc/arvados/ca-certificates.crt\n{% endif %}\n"
  },
  {
    "path": "tools/ansible/roles/arvados_websocket/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_apt\n  - role: arvados_service\n  - role: arvados_nginx_frontend\n    vars:\n      arvados_nginx_service_key: Websocket\n"
  },
  {
    "path": "tools/ansible/roles/arvados_websocket/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install arvados-ws\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - arvados-ws\n\n- name: Start and enable arvados-ws.service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: arvados-ws.service\n    state: \"{{ 'restarted' if arvados_config_changed else 'started' }}\"\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/arvados_workbench/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_apt\n  - role: arvados_service\n  - role: arvados_nginx_frontend\n    vars:\n      arvados_nginx_service_key: Workbench2\n"
  },
  {
    "path": "tools/ansible/roles/arvados_workbench/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install Arvados Workbench\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - arvados-workbench2\n"
  },
  {
    "path": "tools/ansible/roles/compute_amd_rocm/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_compute_pin_packages: true\narvados_compute_amd_rocm_suites_map:\n  bookworm: jammy\n  jammy: jammy\n  noble: noble\n# This version string is used in the AMD apt repository URLs.\n# You can provide a version number the same way AMD spells it\n# (e.g., \"6.3\", \"6.3.1\", \"6.3.2\", etc.) or the special string \"latest\".\narvados_compute_amd_rocm_version: \"{{ '6.3.2' if arvados_compute_pin_packages else 'latest' }}\"\n"
  },
  {
    "path": "tools/ansible/roles/compute_amd_rocm/files/arvados-amd-rocm.pref",
    "content": "### This file is managed by Ansible ###\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nPackage: hipcc rocm rocminfo rocm-*\nPin: origin \"repo.radeon.com\"\nPin-Priority: 550\n\nPackage: *\nPin: origin \"repo.radeon.com\"\nPin-Priority: 100\n"
  },
  {
    "path": "tools/ansible/roles/compute_amd_rocm/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_apt\n"
  },
  {
    "path": "tools/ansible/roles/compute_amd_rocm/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# THIS IS A PROTOTYPE - NOT FULLY TESTED.\n# This role automates the package install process in the ROCm documentation at\n# <https://rocm.docs.amd.com/projects/install-on-linux/en/latest/install/detailed-install.html>.\n# As of February 2025 it runs successfully on a compatible kernel+distro,\n# but we haven't demonstrated it can run Arvados containers with ROCm yet.\n\n- name: Check for AMD ROCm distribution support\n  ansible.builtin.fail:\n    msg: \"This role does not know where to find AMD's apt repository for {{ ansible_distribution_release }}\"\n  when: arvados_compute_amd_rocm_suites_map[ansible_distribution_release] is undefined\n\n# Unlike most of our package pins, this file does not pin any specific\n# version, it just prioritizes AMD's repository for the specific packages\n# that need it. AMD publishes a separate apt repository for every version,\n# and we specify the version we want through the repository URL in the next\n# task.\n- name: Install AMD ROCm package pins\n  ansible.builtin.copy:\n    src: arvados-amd-rocm.pref\n    dest: /etc/apt/preferences.d/arvados-amd-rocm.pref\n    owner: root\n    group: root\n    mode: 0644\n\n- name: Install AMD GPU+ROCm apt repositories\n  ansible.builtin.deb822_repository:\n    name: amd_rocm\n    types: deb\n    uris:\n      - \"https://repo.radeon.com/amdgpu/{{ arvados_compute_amd_rocm_version }}/ubuntu\"\n      - \"https://repo.radeon.com/rocm/apt/{{ arvados_compute_amd_rocm_version }}\"\n    suites:\n      - \"{{ arvados_compute_amd_rocm_suites_map[ansible_distribution_release] }}\"\n    components:\n      - main\n    architectures:\n      - amd64\n    signed_by: https://repo.radeon.com/rocm/rocm.gpg.key\n  notify:\n    - apt update\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install AMD ROCm build prerequisites\n  ansible.builtin.apt:\n    name:\n      - \"linux-headers-{{ ansible_kernel }}\"\n      # These are listed as installation prerequisites in AMD's documentation:\n      # they have Python tools to build scaffolding like bash completions.\n      - python3-setuptools\n      - python3-wheel\n\n- name: Install extra modules for AMD ROCm\n  when: \"ansible_distribution == 'Ubuntu'\"\n  ansible.builtin.apt:\n    name:\n      - \"linux-modules-extra-{{ ansible_kernel }}\"\n\n- name: Install AMD ROCm packages\n  ansible.builtin.apt:\n    name:\n      - amdgpu-dkms\n      - rocm\n"
  },
  {
    "path": "tools/ansible/roles/compute_docker/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndocker_daemon:\n  data-root: \"{{ arvados_docker_data_root|default('_OMIT_') }}\"\n  default-ulimits:\n    nofile:\n      Name: nofile\n      Soft: 10000\n      Hard: 10000\n  dns: \"{{ dns_resolver|split(None) if dns_resolver is defined else '_OMIT_' }}\"\ndocker_cleaner:\n  Quota: 10G\n  RemoveStoppedContainers: always\n"
  },
  {
    "path": "tools/ansible/roles/compute_docker/handlers/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: stop docker\n  become: yes\n  ansible.builtin.systemd_service:\n    name: docker.service\n    # We only stop the service because docker.socket can start it again.\n    state: stopped\n"
  },
  {
    "path": "tools/ansible/roles/compute_docker/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: arvados_docker\n"
  },
  {
    "path": "tools/ansible/roles/compute_docker/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Configure Docker daemon\n  become: yes\n  ansible.builtin.copy:\n    content: \"{{ docker_daemon|dict2items|selectattr('value', '!=', '_OMIT_')|items2dict|to_json }}\"\n    dest: /etc/docker/daemon.json\n    owner: root\n    group: docker\n    mode: 0640\n  notify:\n    - stop docker\n\n- name: Install Docker cleaner\n  become: yes\n  ansible.builtin.apt:\n    name: arvados-docker-cleaner\n\n- name: Create Docker cleaner configuration directories\n  become: yes\n  ansible.builtin.file:\n    name: \"{{ item }}\"\n    state: directory\n  loop:\n    - /etc/arvados\n    - /etc/arvados/docker-cleaner\n\n- name: Configure Docker cleaner\n  become: yes\n  ansible.builtin.copy:\n    content: \"{{ docker_cleaner|to_json }}\"\n    dest: /etc/arvados/docker-cleaner/docker-cleaner.json\n    owner: root\n    group: root\n    mode: 0644\n"
  },
  {
    "path": "tools/ansible/roles/compute_encrypt_tmp/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_compute_encrypted_tmp: \"{{ 'aws_ebs' if ansible_system_vendor == 'Amazon EC2' else '' }}\"\naws_ebs_autoscale_url: \"https://github.com/arvados/amazon-ebs-autoscale.git\"\naws_ebs_autoscale_version: \"ee323f0751c2b6f733692e805b51b9bf3c251bac\"\n"
  },
  {
    "path": "tools/ansible/roles/compute_encrypt_tmp/files/arvados-ensure-encrypted-partitions.service",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n[Unit]\nDescription=Ensure Arvados compute work directories are encrypted\nBefore=docker.service\nBefore=ssh.service\n\n[Install]\nWantedBy=docker.service\nWantedBy=ssh.service\n\n[Service]\nType=oneshot\nRemainAfterExit=true\nExecStart=/usr/local/sbin/ensure-encrypted-partitions.sh\n"
  },
  {
    "path": "tools/ansible/roles/compute_encrypt_tmp/files/ebs-autoscale.conf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n[Service]\nExecStart=\nExecStart=/usr/bin/bash /opt/amazon-ebs-autoscale/install.sh --imdsv2 -f lvm.ext4 -m /tmp\n"
  },
  {
    "path": "tools/ansible/roles/compute_encrypt_tmp/files/ensure-encrypted-partitions.sh",
    "content": "#!/bin/bash\n\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nset -e\nset -x\n\nVGNAME=compute\nLVNAME=tmp\nLVPATH=\"/dev/mapper/${VGNAME}-${LVNAME}\"\nCRYPTPATH=/dev/mapper/tmp\nMOUNTPATH=/tmp\n\nfindmntq() {\n    findmnt \"$@\" >/dev/null\n}\n\nensure_umount() {\n    if findmntq \"$1\"; then\n        umount \"$1\"\n    fi\n}\n\nif findmntq --source \"$CRYPTPATH\" --target \"$MOUNTPATH\"; then\n    exit 0\nfi\n\nCLOUD_SERVER=\"\"\nwhile [[ ! \"$CLOUD_SERVER\" ]]; do\n    CLOUD_SERVER=\"$(curl --silent --head http://169.254.169.254/ \\\n                    | awk '($1 == \"Server:\"){sub(\"\\\\r+$\", \"\"); print substr($0, 9)}')\"\ndone\n\nDISK_PATTERN=\"\"\ncase \"$CLOUD_SERVER\" in\n    # EC2\n    EC2ws) DISK_PATTERN=/dev/xvd ;;\n    # GCP\n    \"Metadata Server for VM\") DISK_PATTERN=/dev/sd ;;\n    # Azure\n    Microsoft-IIS/*) DISK_PATTERN=/dev/sd ;;\nesac\n\nif [[ -z \"$DISK_PATTERN\" ]]; then\n    echo \"ensure-encrypted-partitions: Unknown disk configuration; can't run.\" >&2\n    exit 3\nfi\n\ndeclare -a LVM_DEVS=()\n\nROOT_PARTITION=`findmnt / -f -o source -n`\nif [[ \"$ROOT_PARTITION\" =~ ^\\/dev\\/nvme ]]; then\n  # e.g. /dev/nvme0n1p1, strip last 4 characters\n  ROOT_DEVICE_STRING=${ROOT_PARTITION%????}\nelse\n  # e.g. /dev/xvda1, strip last character\n  ROOT_DEVICE_STRING=${ROOT_PARTITION//[0-9]/}\nfi\n\n# Newer AWS node types use another pattern, /dev/nvmeXn1 for fast instance SSD disks\nif [[ \"$CLOUD_SERVER\" == \"EC2ws\" ]]; then\n  for dev in `ls /dev/nvme* 2>/dev/null`; do\n    if [[ \"$dev\" == \"$ROOT_PARTITION\" ]] || [[ \"$dev\" =~ ^$ROOT_DEVICE_STRING ]]; then\n      continue\n    fi\n    if [[ -e ${dev}n1 ]]; then\n      ensure_umount \"${dev}n1\"\n      if [[ \"$devtype\" = disk ]]; then\n        dd if=/dev/zero of=\"${dev}n1\" bs=512 count=1\n      fi\n      LVM_DEVS+=(\"${dev}n1\")\n    fi\n  done\nfi\n\n# Look for traditional disks but only if we're not on AWS or if we haven't found\n# a fast instance /dev/nvmeXn1 disk\nif [[ \"$CLOUD_SERVER\" != \"EC2ws\" ]] || [[ ${#LVM_DEVS[@]} -eq 0 ]]; then\n  for dev in `ls $DISK_PATTERN* 2>/dev/null`; do\n    # On Azure, we are dealing with /dev/sdb1, on GCP, /dev/sdb, on AWS, /dev/xvdb\n    if [[ \"$dev\" == \"$ROOT_PARTITION\" ]] || [[ \"$dev\" =~ ^$ROOT_DEVICE_STRING ]]; then\n      continue\n    fi\n    if [[ ! \"$dev\" =~ [a-z]$ ]]; then\n      continue\n    fi\n    if [[ -e ${dev}1 ]]; then\n        dev=${dev}1\n        devtype=partition\n    else\n        devtype=disk\n    fi\n    ensure_umount \"$dev\"\n    if [[ \"$devtype\" = disk ]]; then\n        dd if=/dev/zero of=\"$dev\" bs=512 count=1\n    fi\n    LVM_DEVS+=(\"$dev\")\n  done\nfi\n\nif [[ \"${#LVM_DEVS[@]}\" -eq 0 ]]; then\n    echo \"ensure-encrypted-partitions: No extra disks found.\" >&2\n    exit 4\nfi\n\nvgcreate --force --yes \"$VGNAME\" \"${LVM_DEVS[@]}\"\nlvcreate --extents 100%FREE --name \"$LVNAME\" \"$VGNAME\"\n\nKEYPATH=\"$(mktemp -p /var/tmp key-XXXXXXXX.tmp)\"\nmodprobe dm_mod aes sha256\nhead -c321 /dev/urandom >\"$KEYPATH\"\necho YES | cryptsetup luksFormat \"$LVPATH\" \"$KEYPATH\"\ncryptsetup --key-file \"$KEYPATH\" luksOpen \"$LVPATH\" \"$(basename \"$CRYPTPATH\")\"\nshred -u \"$KEYPATH\"\nmkfs.xfs -f \"$CRYPTPATH\"\nmount -o async \"$CRYPTPATH\" \"$MOUNTPATH\"\nchmod a+w,+t \"$MOUNTPATH\"\n"
  },
  {
    "path": "tools/ansible/roles/compute_encrypt_tmp/tasks/aws_ebs.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Install EBS autoscaler dependencies\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - awscli\n      - bash\n      - git\n      - jq\n      - unzip\n\n- name: Check out EBS autoscaler from Git\n  become: yes\n  ansible.builtin.git:\n    repo: \"{{ aws_ebs_autoscale_url }}\"\n    dest: /opt/amazon-ebs-autoscale\n    version: \"{{ aws_ebs_autoscale_version }}\"\n\n- name: Override encrypted partition service with EBS autoscaler\n  become: yes\n  ansible.builtin.copy:\n    src: ebs-autoscale.conf\n    dest: /etc/systemd/system/arvados-ensure-encrypted-partitions.service.d/ebs-autoscale.conf\n    owner: root\n    group: root\n    mode: 0644\n"
  },
  {
    "path": "tools/ansible/roles/compute_encrypt_tmp/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Install encrypted partition dependencies\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - btrfs-progs\n      - cryptsetup\n      - curl\n      - lvm2\n      - xfsprogs\n\n- name: Install encrypted partition script\n  become: yes\n  ansible.builtin.copy:\n    src: ensure-encrypted-partitions.sh\n    dest: /usr/local/sbin/ensure-encrypted-partitions.sh\n    owner: root\n    group: root\n    mode: 0755\n\n- name: Define encrypted partition service\n  become: yes\n  ansible.builtin.copy:\n    src: arvados-ensure-encrypted-partitions.service\n    dest: /etc/systemd/system/arvados-ensure-encrypted-partitions.service\n    owner: root\n    group: root\n    mode: 0644\n\n- name: Prepare encrypted partition service override directory\n  become: yes\n  ansible.builtin.file:\n    path: /etc/systemd/system/arvados-ensure-encrypted-partitions.service.d\n    state: directory\n    owner: root\n    group: root\n    mode: 0755\n\n- name: Set up AWS EBS-backed encrypted partitions\n  ansible.builtin.include_tasks:\n    file: aws_ebs.yml\n  when: \"arvados_compute_encrypted_tmp|lower == 'aws_ebs'\"\n\n- name: Enable encrypted partition service\n  become: yes\n  ansible.builtin.systemd_service:\n    name: arvados-ensure-encrypted-partitions.service\n    enabled: true\n"
  },
  {
    "path": "tools/ansible/roles/compute_nvidia/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_compute_pin_packages: true\nnvidia_container_apt_url: \"https://nvidia.github.io/libnvidia-container\"\n"
  },
  {
    "path": "tools/ansible/roles/compute_nvidia/files/arvados-nvidia.pref",
    "content": "### This file is managed by Ansible ###\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nPackage: cuda\nPin: version 13.0.*\nPin-Priority: 995\n\nPackage: src:libnvidia-container src:nvidia-container-toolkit\nPin: version 1.17.*\nPin-Priority: 995\n\nPackage: cuda-drivers nvidia-modprobe nvidia-open nvidia-persistenced nvidia-xconfig src:nvidia-graphics-drivers src:nvidia-kmod-open src:nvidia-settings\nPin: version 580.*\nPin-Priority: 995\n"
  },
  {
    "path": "tools/ansible/roles/compute_nvidia/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_apt\n"
  },
  {
    "path": "tools/ansible/roles/compute_nvidia/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Install NVIDIA package pins\n  become: yes\n  ansible.builtin.copy:\n    src: arvados-nvidia.pref\n    dest: /etc/apt/preferences.d/arvados-nvidia.pref\n    owner: root\n    group: root\n    mode: 0644\n  when: \"arvados_compute_pin_packages|bool\"\n\n- name: Install NVIDIA CUDA apt repository\n  become: yes\n  ansible.builtin.apt:\n    deb: \"https://developer.download.nvidia.com/compute/cuda/repos/{{ ansible_distribution|lower }}{{ ansible_distribution_version|replace('.', '') if ansible_distribution == 'Ubuntu' else ansible_distribution_major_version }}/{{ ansible_architecture }}/cuda-keyring_1.1-1_all.deb\"\n  notify:\n    - apt update\n\n- name: Install NVIDIA container toolkit apt repository\n  become: yes\n  ansible.builtin.deb822_repository:\n    name: nvidia-container-toolkit\n    types: deb\n    uris: \"{{ nvidia_container_apt_url }}/stable/deb/$(ARCH)\"\n    suites: \"/\"\n    signed_by: \"{{ nvidia_container_apt_url }}/gpgkey\"\n  notify:\n    - apt update\n\n- name: apt update if needed\n  ansible.builtin.meta: flush_handlers\n\n- name: Install NVIDIA CUDA build prerequisites\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - \"linux-headers-{{ ansible_kernel }}\"\n\n- name: Install NVIDIA packages\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - cuda\n      - libnvidia-container1\n      - libnvidia-container-tools\n      - nvidia-container-toolkit\n\n# NVIDIA packages include some services that are appropriate for desktops etc.\n# but not compute nodes. Query which ones are installed and disable all found.\n- name: Query NVIDIA services to disable\n  become: yes\n  ansible.builtin.systemd_service:\n    name: \"{{ item }}\"\n  loop:\n    # This path triggers driver reloads when packages are upgraded.\n    # That's not expected to happen on compute nodes.\n    - nvidia-cdi-refresh.path\n    # crunch-run has its own CUDA initialization code.\n    # We prefer to use that over NVIDIA's.\n    - nvidia-persistenced.service\n  register: nvidia_services\n\n- name: Disable unneeded NVIDIA services\n  become: yes\n  ansible.builtin.systemd_service:\n    name: \"{{ item.Id }}\"\n    enabled: false\n  loop: \"{{ nvidia_services.results|map(attribute='status')|selectattr('LoadState', '!=', 'not-found') }}\"\n"
  },
  {
    "path": "tools/ansible/roles/compute_singularity/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncompute_go_version: \"{{ arvados_go_version|default('1.25.6') }}\"\ncompute_singularity_destdir: \"/opt/singularity-{{ compute_singularity_version }}\"\ncompute_singularity_version: \"3.10.4\"\ncompute_singularity_url: \"https://github.com/sylabs/singularity\"\nworkdir: \"{{ ansible_env.get('TMPDIR', '/tmp') }}\"\n"
  },
  {
    "path": "tools/ansible/roles/compute_singularity/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# Follows the recipe from\n# <https://docs.sylabs.io/guides/3.10/admin-guide/installation.html#id1>\n\n- name: Install Singularity dependencies\n  become: yes\n  ansible.builtin.apt:\n    name:\n      - build-essential\n      - libglib2.0-dev\n      - libseccomp-dev\n      - pkg-config\n      - squashfs-tools\n\n- name: Check Singularity install\n  become: no\n  ansible.builtin.command:\n    cmd: \"{{ (compute_singularity_destdir, 'bin/singularity')|path_join }} --version\"\n  failed_when: false\n  register: singularity_version_check\n\n- name: Build and install Singularity\n  when: \"singularity_version_check.rc != 0\"\n  block:\n    - name: Create Singularity build directory\n      become: no\n      ansible.builtin.tempfile:\n        state: directory\n        path: \"{{ workdir }}\"\n        prefix: singularity-build-\n      register: singularity_build\n\n    - name: Download Singularity source\n      become: no\n      ansible.builtin.unarchive:\n        src: \"https://github.com/sylabs/singularity/releases/download/v{{ compute_singularity_version }}/singularity-ce-{{ compute_singularity_version }}.tar.gz\"\n        dest: \"{{ singularity_build.path }}\"\n        extra_opts:\n          - \"--strip-components=1\"\n        remote_src: yes\n\n    - name: Check if Go is already installed\n      become: no\n      ansible.builtin.command:\n        cmd: go version\n      failed_when: \"false\"\n      register: go_version\n\n    - name: Install Go\n      when: \"go_version.rc != 0\"\n      become: no\n      ansible.builtin.unarchive:\n        src: \"https://go.dev/dl/go{{ compute_go_version }}.linux-amd64.tar.gz\"\n        dest: \"{{ singularity_build.path }}\"\n        remote_src: yes\n\n    - name: Build Singularity\n      become: no\n      ansible.builtin.command:\n        cmd: \"{{ item }}\"\n        chdir: \"{{ singularity_build.path }}\"\n      environment:\n        GOPATH: \"{{ singularity_build.path }}/GOPATH\"\n        PATH: \"{{ singularity_build.path }}/go/bin:{{ ansible_env.PATH }}:{{ singularity_build.path }}/GOPATH/bin\"\n      loop:\n        - \"./mconfig --prefix={{ compute_singularity_destdir }}\"\n        - env -C builddir make\n\n    - name: Install Singularity\n      become: yes\n      ansible.builtin.command:\n        cmd: make install\n        chdir: \"{{ singularity_build.path }}/builddir\"\n\n    - name: Clean Singularity build directory\n      become: yes\n      ansible.builtin.file:\n        path: \"{{ singularity_build.path }}\"\n        state: absent\n\n- name: Add Singularity commands to PATH\n  become: yes\n  ansible.builtin.file:\n    state: link\n    src: \"{{ (compute_singularity_destdir, 'bin', item)|path_join }}\"\n    dest: \"{{ ('/usr/local/bin', item)|path_join }}\"\n  loop:\n    - run-singularity\n    - singularity\n\n- name: Configure Singularity mksquashfs mem\n  become: yes\n  ansible.builtin.lineinfile:\n    create: true\n    path: \"{{ (compute_singularity_destdir, 'etc/singularity/singularity.conf')|path_join }}\"\n    regexp: \"^ *mksquashfs +mem *=\"\n    line: \"mksquashfs mem = {{ compute_mksquashfs_mem }}\"\n  when: compute_mksquashfs_mem is defined\n"
  },
  {
    "path": "tools/ansible/roles/compute_user/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncompute_user_account: \"{{ arvados_cluster.Containers.CloudVMs.DriverParameters.AdminUsername }}\"\ncompute_user_home: \"/home/{{ compute_user_account }}\"\n"
  },
  {
    "path": "tools/ansible/roles/compute_user/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Create compute user account\n  become: yes\n  ansible.builtin.user:\n    name: \"{{ compute_user_account }}\"\n    comment: Crunch user,,,,\n    home: \"{{ compute_user_home }}\"\n    password_lock: true\n\n- name: Give compute user sudo access\n  become: yes\n  ansible.builtin.lineinfile:\n    line: \"{{ compute_user_account }} ALL=(ALL) NOPASSWD:ALL\"\n    path: /etc/sudoers.d/91-crunch\n    create: true\n    owner: root\n    group: root\n    mode: 0644\n\n- name: Create compute user .ssh directory\n  become: yes\n  ansible.builtin.file:\n    state: directory\n    path: \"{{ compute_user_home }}/.ssh\"\n    owner: \"{{ compute_user_account }}\"\n    mode: 0700\n\n- name: Initialize compute user authorized keys\n  become: yes\n  ansible.builtin.file:\n    path: \"{{ compute_user_home }}/.ssh/authorized_keys\"\n    state: touch\n    owner: \"{{ compute_user_account }}\"\n    mode: 0600\n\n- name: Install dispatch public key\n  when: compute_dispatch_ssh_keygen.changed\n  become: yes\n  ansible.builtin.lineinfile:\n    path: \"{{ compute_user_home }}/.ssh/authorized_keys\"\n    regexp: \"^{{ item.0 | regex_escape }}\\\\s+{{ item.1 | regex_escape }}(\\\\s|$)\"\n    line: \"{{ item | join(' ') }}\"\n  loop: \"{{ compute_dispatch_ssh_keygen.stdout_lines|map('split') }}\"\n\n- name: Install public keys from Ansible configuration\n  when: compute_authorized_keys is defined\n  become: yes\n  ansible.builtin.lineinfile:\n    path: \"{{ compute_user_home }}/.ssh/authorized_keys\"\n    regexp: \"^{{ item.0 | regex_escape }}\\\\s+{{ item.1 | regex_escape }}(\\\\s|$)\"\n    line: \"{{ item | join(' ') }}\"\n  loop: \"{{ lookup('ansible.builtin.file', compute_authorized_keys).splitlines()|select('match', '^\\\\w\\\\S*\\\\s+\\\\S')|map('split') }}\"\n\n- name: Check that at least one SSH key was installed\n  become: yes\n  ansible.builtin.stat:\n    path: \"{{ compute_user_home }}/.ssh/authorized_keys\"\n  register: compute_user_authorized_keys_stat\n  failed_when: compute_user_authorized_keys_stat.stat.size|default(0) == 0\n"
  },
  {
    "path": "tools/ansible/roles/distro_apt/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndistro_apt:\n  Debian:\n    components:\n      - main\n      - contrib\n    mirror: http://deb.debian.org/debian\n    security: http://security.debian.org/debian-security\n    aws_mirror: http://cdn-aws.deb.debian.org/debian\n    aws_security: http://cdn-aws.deb.debian.org/debian-security\n    signed_by: /usr/share/keyrings/debian-archive-keyring.gpg\n  Ubuntu:\n    components:\n      - main\n      - universe\n    mirror: http://archive.ubuntu.com/ubuntu\n    security: http://security.ubuntu.com/ubuntu\n    aws_mirror: http://cdn-aws.archive.ubuntu.com/ubuntu\n    aws_security: http://cdn-aws.archive.ubuntu.com/ubuntu\n    signed_by: /usr/share/keyrings/ubuntu-archive-keyring.gpg\n\n# This dictionary is used to generate an apt configuration file of\n# APT::Periodic settings. null values will fall back to the value of\n# `arvados_apt_periodic_default` below. Other values are used verbatim.\n# Most of these settings are integers representing a number of days, where\n# 0 disables the task. Set `arvados_apt_periodic_default` to 0 to disable all\n# periodic tasks. See /usr/lib/apt/apt.systemd.daily for details.\narvados_apt_periodic:\n  Enable: null\n  Update-Package-Lists: null\n  Download-Upgradeable-Packages: null\n  Download-Upgradeable-Packages-Debdelta: null\n  Unattended-Upgrade: null\narvados_apt_periodic_default: 1\n"
  },
  {
    "path": "tools/ansible/roles/distro_apt/handlers/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: apt update\n  become: yes\n  ansible.builtin.apt:\n    update_cache: yes\n"
  },
  {
    "path": "tools/ansible/roles/distro_apt/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Check distribution\n  ansible.builtin.fail:\n    msg: \"Unsupported distribution: {{ ansible_distribution }}\"\n  when: distro_apt[ansible_distribution] is undefined\n\n- name: Set up distribution apt repository\n  become: yes\n  ansible.builtin.deb822_repository:\n    name: arvados-distro\n    types: deb\n    uris: \"{{ distro_apt[ansible_distribution].aws_mirror if ansible_system_vendor == 'Amazon EC2' else distro_apt[ansible_distribution].mirror }}\"\n    suites:\n      - \"{{ ansible_distribution_release }}\"\n      - \"{{ ansible_distribution_release }}-updates\"\n    components: \"{{ distro_apt[ansible_distribution].components }}\"\n    signed_by: \"{{ distro_apt[ansible_distribution].signed_by }}\"\n  notify:\n    - apt update\n\n- name: Set up distribution's security apt repository\n  become: yes\n  ansible.builtin.deb822_repository:\n    name: arvados-distro-security\n    types: deb\n    uris: \"{{ distro_apt[ansible_distribution].aws_security if ansible_system_vendor == 'Amazon EC2' else distro_apt[ansible_distribution].security }}\"\n    suites:\n      - \"{{ ansible_distribution_release }}-security\"\n    components: \"{{ distro_apt[ansible_distribution].components }}\"\n    signed_by: \"{{ distro_apt[ansible_distribution].signed_by }}\"\n  notify:\n    - apt update\n\n- name: Configure APT::Periodic tasks\n  become: yes\n  ansible.builtin.template:\n    src: 65arvados-ansible-unattended-upgrades.j2\n    dest: /etc/apt/apt.conf.d/65arvados-ansible-unattended-upgrades\n    owner: root\n    group: root\n    mode: 0644\n"
  },
  {
    "path": "tools/ansible/roles/distro_apt/templates/65arvados-ansible-unattended-upgrades.j2",
    "content": "### This file is managed by Ansible ###\n{# Copyright (C) The Arvados Authors. All rights reserved.\n #\n # SPDX-License-Identifier: Apache-2.0\n #}\n\n{% for key, val in arvados_apt_periodic.items() %}\nAPT::Periodic::{{ key }} \"{{ arvados_apt_periodic_default if val is none else val }}\";\n{% endfor %}\n"
  },
  {
    "path": "tools/ansible/roles/distro_bootstrap/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndistro_bootstrap_apt_command: \"apt-get -o DPkg::Lock::Timeout=300 -qy\"\ndistro_bootstrap_apt_packages:\n  # acl is a core Linux utility and required to use become_user.\n  - acl\n  # Required to get repository signing keys via HTTPS\n  - ca-certificates\n  # Required by ansible.builtin.apt\n  - python3-apt\n  # Required by ansible.builtin.deb822_repository\n  - python3-debian\n  # Required to use become_user.\n  - sudo\n\ndistro_bootstrap_dnf_command: \"dnf --quiet --assumeyes\"\ndistro_bootstrap_dnf_packages:\n  # acl is a core Linux utility and required to use become_user.\n  - acl\n  # Required by versionlock\n  - dnf\n  # Required by community.general.dnf_versionlock\n  - dnf-plugin-versionlock\n  # Required by ansible.builtin.dnf\n  - python3-dnf\n  # Required to use become_user.\n  - sudo\n"
  },
  {
    "path": "tools/ansible/roles/distro_bootstrap/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# NOTE: This role is being used with `gather_facts: no` in order to\n# install Ansible's own dependencies.\n\n- name: Get distribution IDs\n  ansible.builtin.raw: \". /etc/os-release && printf '%s\\\\n' $ID $ID_LIKE\"\n  register: distro_ids\n\n- name: Bootstrap apt packages\n  when: \"'debian' in distro_ids.stdout_lines\"\n  become: yes\n  ansible.builtin.raw: \"env DEBIAN_FRONTEND=noninteractive {{ distro_bootstrap_apt_command }} {{ item }}\"\n  loop:\n    - update\n    - \"install {{ distro_bootstrap_apt_packages|map('quote')|join(' ') }}\"\n\n- name: Bootstrap rpm packages\n  when: \"'rhel' in distro_ids.stdout_lines\"\n  become: yes\n  ansible.builtin.raw: \"{{ distro_bootstrap_dnf_command }} {{ item }}\"\n  loop:\n    - makecache\n    - \"install {{ distro_bootstrap_dnf_packages|map('quote')|join(' ') }}\""
  },
  {
    "path": "tools/ansible/roles/distro_dnf/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_dnf_command: \"dnf --quiet --assumeyes\"\n\n# For each version of RHEL we support, map component names to the module\n# that needs to be enabled for it.\narvados_dnf_modules_map:\n  \"8\":\n    postgresql: \"postgresql:15\"\n    ruby: \"ruby:3.1\"\n  \"9\":\n    ruby: \"ruby:3.1\"\n"
  },
  {
    "path": "tools/ansible/roles/distro_dnf/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# Example:\n#\n# - ansible.builtin.include_role:\n#     name: distro_dnf\n#   vars:\n#     arvados_dnf_modules:\n#       - python\n#       - ruby\n\n- name: Enable dnf modules\n  when: \"ansible_pkg_mgr == 'dnf' and module_args is truthy\"\n  become: yes\n  vars:\n    version_map: \"{{ arvados_dnf_modules_map[ansible_distribution_major_version]|default({}) }}\"\n    module_args: \"{{ arvados_dnf_modules|select('in', version_map)|map('extract', version_map)|flatten|unique|list }}\"\n  ansible.builtin.command:\n    cmd: \"{{ arvados_dnf_command }} module enable {{ module_args|map('quote')|join(' ') }}\"\n"
  },
  {
    "path": "tools/ansible/roles/distro_grafana/files/arvados-grafana.pref",
    "content": "### This file is managed by Ansible ###\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# Pin packages to tested and known-good versions\n\nPackage: loki\nPin: version 3.*\nPin-Priority: 995\n\nPackage: alloy\nPin: version 1.*\nPin-Priority: 995\n\nPackage: grafana\nPin: version 12.*\nPin-Priority: 995\n"
  },
  {
    "path": "tools/ansible/roles/distro_grafana/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_apt\n"
  },
  {
    "path": "tools/ansible/roles/distro_grafana/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Install grafana repo package pins\n  become: yes\n  ansible.builtin.copy:\n    src: arvados-grafana.pref\n    dest: /etc/apt/preferences.d/arvados-grafana.pref\n    owner: root\n    group: root\n    mode: 0644\n\n- name: Add Grafana apt repository\n  become: yes\n  ansible.builtin.deb822_repository:\n    name: grafana\n    types: deb\n    uris: https://apt.grafana.com\n    suites: stable\n    components: main\n    signed_by: https://apt.grafana.com/gpg.key\n  notify:\n    - apt update\n"
  },
  {
    "path": "tools/ansible/roles/distro_packages/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\npackage_state: present\ntask_name: Install packages\n"
  },
  {
    "path": "tools/ansible/roles/distro_packages/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# Install a set of packages with both apt and dnf.\n# Requires a `package_names` var with a list of apt package names.\n# Example:\n#\n# - ansible.builtin.include_role:\n#     name: distro_packages\n#   vars:\n#     task_name: Install build dependencies\n#     package_state: present\n#     package_list:\n#       - g++\n#       - make\n#       - zlib1g-dev\n\n- name: \"{{ task_name }} (apt)\"\n  when: \"ansible_pkg_mgr == 'apt'\"\n  become: yes\n  ansible.builtin.apt:\n    name: \"{{ package_names | distro_packages(ansible_distribution, ansible_distribution_major_version) }}\"\n    state: \"{{ package_state }}\"\n\n- name: \"{{ task_name }} (dnf)\"\n  when: \"ansible_pkg_mgr == 'dnf'\"\n  become: yes\n  vars:\n    # `repo_reqs` maps Debian package names to a list of repositories that\n    # need to be enabled for installation.\n    repo_reqs:\n      arvados-api-server:\n        - devel\n      libfuse-dev:\n        - crb\n      libyaml-dev:\n        - devel\n  ansible.builtin.dnf:\n    name: \"{{ package_names | distro_packages(ansible_distribution, ansible_distribution_major_version) }}\"\n    state: \"{{ package_state }}\"\n    enablerepo: \"{{ package_names|select('in', repo_reqs)|map('extract', repo_reqs)|flatten|unique }}\"\n"
  },
  {
    "path": "tools/ansible/roles/distro_postgresql/defaults/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\narvados_postgresql_repository: distro\n"
  },
  {
    "path": "tools/ansible/roles/distro_postgresql/meta/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ndependencies:\n  - role: distro_apt\n"
  },
  {
    "path": "tools/ansible/roles/distro_postgresql/tasks/main.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n- name: Add postgresql repository\n  become: yes\n  ansible.builtin.deb822_repository:\n    name: postgresql\n    types: deb\n    uris: https://apt.postgresql.org/pub/repos/apt\n    suites: '{{ ansible_distribution_release }}-pgdg'\n    components: main\n    signed_by: https://www.postgresql.org/media/keys/ACCC4CF8.asc\n  when: \"arvados_postgresql_repository == 'postgresql'\"\n  notify:\n    - apt update\n"
  },
  {
    "path": "tools/ansible/setup-package-tests.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n# This playbook sets up a Docker image to run Arvados package tests.\n# It's meant to be used with `build-docker-image.yml`.\n\n- name: Bootstrap nodes\n  hosts: all\n  gather_facts: no\n  tasks:\n    - ansible.builtin.include_role:\n        name: distro_bootstrap\n\n- name: Group nodes by package manager\n  hosts: all\n  tasks:\n    - ansible.builtin.group_by:\n        key: \"pkg_mgr_{{ ansible_pkg_mgr }}\"\n\n    - ansible.builtin.include_role:\n        name: distro_packages\n      vars:\n        task_name: Install package test dependencies\n        package_names:\n          - diffutils\n          - findutils\n\n- name: Setup apt nodes\n  hosts: pkg_mgr_apt\n  tasks:\n    - name: Install apt repository\n      ansible.builtin.deb822_repository:\n        name: arvados_pkgtest\n        types: deb\n        uris: \"file:///arvados/packages/{{ ansible_distribution|lower }}{{ ansible_distribution_version|replace('.', '') if ansible_distribution == 'Ubuntu' else ansible_distribution_major_version }}\"\n        suites:\n          - \"/\"\n        trusted: true\n\n- name: Setup dnf nodes\n  hosts: pkg_mgr_dnf\n  tasks:\n    - name: Install RHEL test dependencies\n      ansible.builtin.dnf:\n        name:\n          - cpio\n\n    - ansible.builtin.include_role:\n        name: distro_dnf\n      vars:\n        arvados_dnf_modules:\n          - postgresql\n          - python\n          - ruby\n\n    - name: Enable development repository\n      community.general.ini_file:\n        path: \"/etc/yum.repos.d/{{ arvados_pkgtest_dnf_devel_basename|default('rocky-devel') }}.repo\"\n        section: \"{{ arvados_pkgtest_dnf_devel_section|default('devel') }}\"\n        option: enabled\n        value: \"1\"\n        create: false\n\n    - name: Create test package repository\n      ansible.builtin.yum_repository:\n        name: arvados-test\n        description: Arvados Test Packages\n        baseurl: \"file:///arvados/packages/{{ ansible_distribution|lower }}{{ ansible_distribution_major_version }}\"\n        enabled: true\n        gpgcheck: false\n"
  },
  {
    "path": "tools/cluster-activity/MANIFEST.in",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ninclude agpl-3.0.txt\ninclude arvados_version.py\ninclude *.cwl\ninclude arvados_cluster_activity/*.js\n"
  },
  {
    "path": "tools/cluster-activity/README.rst",
    "content": ".. Copyright (C) The Arvados Authors. All rights reserved.\n..\n.. SPDX-License-Identifier: AGPL-3.0\n\n=================================\n Arvados Cluster Activity Report\n=================================\n\nThis tool reports on the data and workflows in an Arvados cluster to help administrators understand growth and costs. It reports what it has access to: any Arvados user can run it to get a report of their own workflows and others they can see. An Arvados administrator can run a report on all data and workflows in the cluster. If you provide credentials for a Prometheus server in your Arvados cluster, the report includes additional information about compute use.\n\nRunning as a workflow from Workbench\n====================================\n\nWe provide a CWL workflow to generate this report. It's available as a `single file in the Arvados source`_ and included with this Python package. You can register the workflow on your cluster by running::\n\n  arvados-cwl-runner [--project-uuid=UUID] --create-workflow cluster-activity.cwl\n\nThen you can launch the workflow from Workbench. All inputs have documented formats and values.\n\nRunning as a workflow from the command line\n===========================================\n\nAlternatively, you can run the workflow directly with ``arvados-cwl-runner``. Write an input file following this YAML template::\n\n  # Report start date as a `YYYY-MM-DD` string\n  reporting_start: \"YYYY-MM-DD\"\n\n  # Report end date as a `YYYY-MM-DD` string. Default today.\n  #reporting_end: \"YYYY-MM-DD\"\n\n  # The base URL of your Arvados cluster's Prometheus server, like\n  # `https://prometheus.arvados.example/`\n  #prometheus_host: \"\"\n\n  # Prometheus API token\n  #prometheus_apikey: \"\"\n\n  # Prometheus API username\n  #prometheus_user: \"\"\n\n  # Prometheus API password\n  #prometheus_password: \"\"\n\n  # A string with a Python regular expression.\n  # Workflows whose name match the expression will be excluded from the report.\n  #exclude: \"\"\n\n  # A boolean. If true, individual workflow steps will be reported alongside\n  # their parent workflows.\n  include_workflow_steps: false\n\nThen run `the workflow`_ like this::\n\n  arvados-cwl-runner [--project-uuid=UUID] [options ...] cluster-activity.cwl YOUR-INPUTS.yml\n\n.. _the workflow: https://github.com/arvados/arvados/blob/main/tools/cluster-activity/cluster-activity.cwl\n.. _single file in the Arvados source: `the workflow`_\n\nRunning as a command line tool\n==============================\n\nThis Python package provides a command line tool you can run to generate reports on your own system. Install it with `pipx`_ like::\n\n  pipx install \"arvados-cluster-activity[prometheus]\"\n\nIf you don't have a Prometheus server or don't want Prometheus support, remove ``[prometheus]`` from the command line. Advanced users can install the tool to their own virtualenv or elsewhere.\n\nThe command line tool provides options to control the report generation. These correspond to the workflow inputs. Run the tool with ``--help`` for the full list::\n\n  arv-cluster-activity --help\n\nThe tool gets Arvados credentials the same as other client tools: it reads the ``ARVADOS_API_HOST`` and ``ARVADOS_API_TOKEN`` environment variables if those are set, or the ``~/.config/arvados/settings.conf`` file if they are not.\n\nThe tool gets Prometheus credentials from the ``PROMETHEUS_HOST``, ``PROMETHEUS_APIKEY``, ``PROMETHEUS_USER``, and ``PROMETHEUS_PASSWORD`` environment variables. The values follow the format of the workflow inputs above. You can write these environment variables in a dedicated file and load that with the tool's ``--prometheus-auth`` option.\n\n.. _pipx: https://pipx.pypa.io/stable/\n"
  },
  {
    "path": "tools/cluster-activity/agpl-3.0.txt",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "tools/cluster-activity/arvados_cluster_activity/__init__.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n"
  },
  {
    "path": "tools/cluster-activity/arvados_cluster_activity/dygraphs.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nwindow.onload = function() {\n    var charts = {};\n    var fmt = {\n        iso: function(y) {\n            var s='';\n            if (y > 1000000000000000) { y=y/1000000000000000; s='P'; }\n            else if (y > 1000000000000) { y=y/1000000000000; s='T'; }\n            else if (y > 1000000000) { y=y/1000000000; s='G'; }\n            else if (y > 1000000) { y=y/1000000; s='M'; }\n            else if (y > 1000) { y=y/1000; s='K'; }\n            return y.toFixed(2).replace(/\\.0+$/, '')+s;\n        },\n        time: function(s) {\n            var ret = ''\n            if (s >= 86400) ret += Math.floor(s/86400) + 'd'\n            if (s >= 3600) ret += Math.floor(s/3600)%24 + 'h'\n            if (s >= 60) ret += Math.floor(s/60)%60 + 'm'\n            ret += Math.floor(s)%60 + 's'\n            // finally, strip trailing zeroes: 1d0m0s -> 1d\n            return ret.replace(/(\\D)(0\\D)*$/, '$1')\n        },\n        date: function(s, opts, sth, dg, idk, excludeHour) {\n            var date = new Date(s);\n            var options = {month: 'numeric', day: 'numeric'};\n            if (!excludeHour) {\n                options.hour = 'numeric';\n                options.minute = 'numeric';\n                options.hour12 = false;\n            }\n            var r = new Intl.DateTimeFormat(undefined, options).format(date);\n            return r;\n        },\n    }\n    var ticker = {\n        time: function(min, max, pixels, opts, dg) {\n            var max_ticks = Math.floor(pixels / (opts('axisLabelWidth')+opts('pixelsPerLabel')/2))\n            var natural = [1, 5, 10, 30, 60,\n                           120, 300, 600, 1800, 3600,\n                           7200, 14400, 43200, 86400]\n            var interval = natural.shift()*1000\n            while (max>min && (max-min)/interval > max_ticks) {\n                interval = (natural.shift()*1000) || (interval * 2)\n            }\n            var ticks = []\n            var excludeHour = false;\n            var date = new Date(min);\n            // need to take the seconds since midnight and then round off to the nearest interval.\n            var millisecondsSinceMidnight = (date.getHours() * 3600 + date.getMinutes() * 60 + date.getSeconds()) * 1000;\n            if (interval >= 86400000) {\n                excludeHour = true;\n            } else {\n                var roundedOff = Math.ceil(millisecondsSinceMidnight/interval)*interval;\n                min = (min - millisecondsSinceMidnight) + roundedOff;\n            }\n            //for (var i=Math.ceil(min/interval)*interval; i<=max; i+=interval) {\n            for (var i=min; i<=max; i+=interval) {\n                ticks.push({v: i, label: opts('axisLabelFormatter')(i, opts, \"\", false, false, excludeHour)})\n            }\n            return ticks\n        },\n    }\n    chartdata.forEach(function(section, section_idx) {\n        var chartDiv = document.getElementById(\"chart\");\n        section.charts.forEach(function(chart, chart_idx) {\n            // Skip chart if every series has zero data points\n            if (0 == chart.data.reduce(function(len, series) {\n                return len + series.length;\n            }, 0)) {\n                return;\n            }\n            var id = 'chart-'+section_idx+'-'+chart_idx;\n            var div = document.createElement('div');\n            div.setAttribute('id', id);\n            div.setAttribute('style', 'width: 100%; height: 250px');\n            chartDiv.appendChild(div);\n            chart.options.valueFormatter = function(y) {\n            }\n            chart.options.axes = {\n                x: {\n                    axisLabelFormatter: fmt.date,\n                    valueFormatter: fmt.date,\n                    ticker: ticker.time,\n                    axisLabelWidth: 60,\n                    pixelsPerLabel: 20,\n                },\n                y: {\n                    axisLabelFormatter: fmt.iso,\n                    valueFormatter: fmt.iso,\n                },\n            }\n            var div2 = document.createElement('div');\n            div2.setAttribute('style', 'width: 150px; height: 250px');\n            chart.options.labelsDiv = div2;\n            chart.options.labelsSeparateLines = true;\n\n            var div3 = document.createElement('div');\n            div3.setAttribute('style', 'display: flex; padding-bottom: 16px');\n            div3.appendChild(div);\n            div3.appendChild(div2);\n            chartDiv.appendChild(div3);\n\n            charts[id] = new Dygraph(div, chart.data, chart.options);\n        });\n    });\n\n    var sync = Dygraph.synchronize(Object.values(charts), {range: false});\n\n    if (typeof window.debug === 'undefined')\n        window.debug = {};\n    window.debug.charts = charts;\n};\n"
  },
  {
    "path": "tools/cluster-activity/arvados_cluster_activity/main.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport argparse\nimport sys\n\nimport arvados\nimport arvados.util\nimport ciso8601\nimport csv\nimport os\nimport logging\nimport re\n\ntry:\n    from prometheus_api_client import PrometheusConnect\nexcept ImportError as e:\n    PrometheusConnect = None\n\nfrom arvados_cluster_activity.report import ClusterActivityReport, aws_monthly_cost, format_with_suffix_base2\nfrom arvados_cluster_activity.prometheus import get_metric_usage, get_data_usage\n\nfrom arvados_cluster_activity._version import __version__\n\nfrom datetime import timedelta, timezone, datetime\nimport base64\n\ndef parse_arguments(arguments):\n    arg_parser = argparse.ArgumentParser()\n    arg_parser.add_argument('--start', help='Start date for the report in YYYY-MM-DD format (UTC) (or use --days)')\n    arg_parser.add_argument('--end', help='End date for the report in YYYY-MM-DD format (UTC), default \"now\"')\n    arg_parser.add_argument('--days', type=int, help='Number of days before \"end\" to start the report (or use --start)')\n    arg_parser.add_argument('--cost-report-file', type=str, help='Export cost report to specified CSV file')\n    arg_parser.add_argument('--include-workflow-steps', default=False,\n                            action=\"store_true\", help='Include individual workflow steps (optional)')\n    arg_parser.add_argument('--columns', type=str, help=\"\"\"Cost report columns (optional), must be comma separated with no spaces between column names.\n    Available columns are: Project, ProjectUUID, Workflow, WorkflowUUID, Step, StepUUID, Sample, SampleUUID, User, UserUUID, Submitted, Started, Runtime, Cost\"\"\")\n    arg_parser.add_argument('--exclude', type=str, help=\"Exclude workflows containing this substring (may be a regular expression)\")\n\n    arg_parser.add_argument('--html-report-file', type=str, help='Export HTML report to specified file')\n    arg_parser.add_argument(\n        '--version', action='version', version=\"%s %s\" % (sys.argv[0], __version__),\n        help='Print version and exit.')\n\n    arg_parser.add_argument('--cluster', type=str, help='Cluster to query for prometheus stats')\n    arg_parser.add_argument('--prometheus-auth', type=str, help='Authorization file with prometheus info')\n\n    args = arg_parser.parse_args(arguments)\n\n    if args.days and args.start:\n        arg_parser.print_help()\n        print(\"Error: either specify --days or both --start and --end\")\n        exit(1)\n\n    if not args.days and not args.start:\n        arg_parser.print_help()\n        print(\"\\nError: either specify --days or both --start and --end\")\n        exit(1)\n\n    if (args.start and not args.end):\n        arg_parser.print_help()\n        print(\"\\nError: no start or end date found, either specify --days or both --start and --end\")\n        exit(1)\n\n    if args.end:\n        try:\n            to = datetime.strptime(args.end,\"%Y-%m-%d\")\n        except:\n            arg_parser.print_help()\n            print(\"\\nError: end date must be in YYYY-MM-DD format\")\n            exit(1)\n    else:\n        to = datetime.now(timezone.utc)\n\n    if args.days:\n        since = to - timedelta(days=args.days)\n\n    if args.start:\n        try:\n            since = datetime.strptime(args.start,\"%Y-%m-%d\")\n        except:\n            arg_parser.print_help()\n            print(\"\\nError: start date must be in YYYY-MM-DD format\")\n            exit(1)\n\n\n    if args.prometheus_auth:\n        with open(args.prometheus_auth, \"rt\") as f:\n            for line in f:\n                if line.startswith(\"export \"):\n                   line = line[7:]\n                sp = line.strip().split(\"=\")\n                if sp[0].startswith(\"PROMETHEUS_\"):\n                    os.environ[sp[0]] = sp[1]\n\n    return args, since, to\n\ndef print_data_usage(prom, timestamp, cluster, label):\n    value, dedup_ratio = get_data_usage(prom, timestamp, cluster)\n\n    if value is None:\n        return\n\n    monthly_cost = aws_monthly_cost(value)\n    print(label,\n          \"%s apparent,\" % (format_with_suffix_base2(value*dedup_ratio)),\n          \"%s actually stored,\" % (format_with_suffix_base2(value)),\n          \"$%.2f monthly S3 storage cost\" % monthly_cost)\n\ndef print_container_usage(prom, start_time, end_time, metric, label, fn=None):\n    cumulative = 0\n\n    for rs in get_metric_usage(prom, start_time, end_time, metric):\n        # Calculate the sum of values\n        #print(rs.sum()[\"y\"])\n        cumulative += rs.sum()[\"y\"]\n\n    if fn is not None:\n        cumulative = fn(cumulative)\n\n    print(label % cumulative)\n\n\ndef get_prometheus_client():\n    if PrometheusConnect is None:\n        logging.warn(\"Failed to import prometheus_api_client client.  Did you include the [prometheus] option when installing the package?  Error was: %s\" % e)\n        return None\n\n    headers = {}\n    if not (prom_host := os.environ.get(\"PROMETHEUS_HOST\")):\n        logging.warn(\"PROMETHEUS_HOST not found, not collecting activity from Prometheus\")\n        return None\n    elif prom_token := os.environ.get(\"PROMETHEUS_APIKEY\"):\n        headers[\"Authorization\"] = f\"Bearer {prom_token}\"\n    elif prom_user := os.environ.get(\"PROMETHEUS_USER\"):\n        basic_auth = base64.b64encode(\n            f\"{prom_user}:{os.environ.get('PROMETHEUS_PASSWORD', '')}\".encode('utf-8'),\n        ).decode('ascii')\n        headers[\"Authorization\"] = f\"Basic {basic_auth}\"\n    else:\n        logging.warn(\"Prometheus credentials not found, not collecting activity from Prometheus\")\n        return None\n\n    try:\n        return PrometheusConnect(url=prom_host, headers=headers)\n    except Exception as e:\n        logging.warn(\"Connecting to Prometheus failed, will not collect activity from Prometheus.  Error was: %s\" % e)\n        return None\n\ndef report_from_prometheus(prom, cluster, since, to):\n\n    if not cluster:\n        arv_client = arvados.api()\n        cluster = arv_client.config()[\"ClusterID\"]\n\n    print(cluster, \"between\", since, \"and\", to, \"timespan\", (to-since))\n\n    try:\n        print_data_usage(prom, since, cluster, \"at start:\")\n    except:\n        logging.exception(\"Failed to get start value\")\n\n    try:\n        print_data_usage(prom, to - timedelta(minutes=240), cluster, \"current :\")\n    except:\n        logging.exception(\"Failed to get end value\")\n\n    print_container_usage(prom, since, to, \"arvados_dispatchcloud_containers_running{cluster='%s'}\" % cluster, '%.1f container hours', lambda x: x/60)\n    print_container_usage(prom, since, to, \"sum(arvados_dispatchcloud_instances_price{cluster='%s'})\" % cluster, '$%.2f spent on compute', lambda x: x/60)\n    print()\n\n\ndef main(arguments=None):\n    if arguments is None:\n        arguments = sys.argv[1:]\n\n    args, since, to = parse_arguments(arguments)\n\n    logging.getLogger().setLevel(logging.INFO)\n\n    reporter = ClusterActivityReport(get_prometheus_client())\n    if args.cost_report_file:\n        with open(args.cost_report_file, \"wt\") as f:\n            reporter.csv_report(since, to, f, args.include_workflow_steps, args.columns, args.exclude)\n    else:\n        logging.info(\"Use --cost-report-file to get a CSV file of workflow runs\")\n\n    if args.html_report_file:\n        with open(args.html_report_file, \"wt\") as f:\n            f.write(reporter.html_report(since, to, args.exclude, args.include_workflow_steps))\n    else:\n        logging.info(\"Use --html-report-file to get HTML report of cluster usage\")\n\n    if not args.cost_report_file and not args.html_report_file:\n        report_from_prometheus(prom, args.cluster, since, to)\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "tools/cluster-activity/arvados_cluster_activity/prometheus.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nfrom datetime import timedelta, timezone\n\ndef get_metric_usage(prom, start_time, end_time, metric, resampleTo=\"min\"):\n    from prometheus_api_client.utils import parse_datetime\n    from prometheus_api_client import PrometheusConnect, MetricsList, Metric\n    import pandas\n\n    start = start_time\n    chunk_size = timedelta(days=1)\n\n    while start < end_time:\n        if start + chunk_size > end_time:\n            chunk_size = end_time - start\n\n        metric_data = prom.custom_query_range(metric,\n                                              start_time=start,\n                                              end_time=(start + chunk_size),\n                                              step=15\n                                              )\n\n        start += chunk_size\n\n        if len(metric_data) == 0:\n            continue\n\n        if \"__name__\" not in metric_data[0][\"metric\"]:\n            metric_data[0][\"metric\"][\"__name__\"] = metric\n\n        metric_object_list = MetricsList(metric_data)\n        my_metric_object = metric_object_list[0] # one of the metrics from the list\n\n        series = my_metric_object.metric_values.set_index(pandas.DatetimeIndex(my_metric_object.metric_values['ds']))\n\n        # Resample to 1 minute increments, fill in missing values\n        rs = series.resample(resampleTo).max(1).ffill()\n\n        yield rs\n\ndef get_data_usage(prom, timestamp, cluster):\n    from prometheus_api_client import PrometheusConnect, MetricsList, Metric\n\n    metric_data = prom.get_current_metric_value(metric_name='arvados_keep_total_bytes',\n                                                label_config={\"cluster\": cluster},\n                                                params={\"time\": timestamp.timestamp()})\n\n    metric_object_list = MetricsList(metric_data)\n\n    if len(metric_data) == 0:\n        return\n\n    my_metric_object = metric_object_list[0] # one of the metrics from the list\n    value = my_metric_object.metric_values.iloc[0][\"y\"]\n\n    metric_data = prom.get_current_metric_value(metric_name='arvados_keep_dedup_byte_ratio',\n                                                label_config={\"cluster\": cluster},\n                                                params={\"time\": timestamp.timestamp()})\n\n    if len(metric_data) == 0:\n        return (None, None)\n\n    my_metric_object = MetricsList(metric_data)[0]\n    dedup_ratio = my_metric_object.metric_values.iloc[0][\"y\"]\n\n    return value, dedup_ratio\n"
  },
  {
    "path": "tools/cluster-activity/arvados_cluster_activity/report.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport logging\nimport ciso8601\nimport arvados.util\nimport re\nimport csv\nimport math\nimport collections\nimport json\nfrom datetime import date, datetime, timedelta\nfrom typing import Dict, List\nimport statistics\n\nfrom dataclasses import dataclass\nfrom arvados_cluster_activity.prometheus import get_metric_usage, get_data_usage\nfrom arvados_cluster_activity.reportchart import ReportChart\n\n\n@dataclass\nclass WorkflowRunSummary:\n    name: str\n    uuid: str\n    cost: List[float]\n    hours: List[float]\n    count: int = 0\n\n\n@dataclass\nclass ProjectSummary:\n    users: set\n    uuid: str\n    runs: Dict[str, WorkflowRunSummary]\n    earliest: datetime = datetime(year=9999, month=1, day=1)\n    latest: datetime = datetime(year=1900, month=1, day=1)\n    name: str = \"\"\n    cost: float = 0\n    count: int = 0\n    hours: float = 0\n    activityspan: str = \"\"\n    tablerow: str = \"\"\n\n\ndef aws_monthly_cost(value):\n    value_gb = value / (1024*1024*1024)\n    first_50tb = min(1024*50, value_gb)\n    next_450tb = max(min(1024*450, value_gb-1024*50), 0)\n    over_500tb = max(value_gb-1024*500, 0)\n\n    monthly_cost = (first_50tb * 0.023) + (next_450tb * 0.022) + (over_500tb * 0.021)\n    return monthly_cost\n\n\ndef format_with_suffix_base2(summary_value):\n    for scale in [\"KiB\", \"MiB\", \"GiB\", \"TiB\", \"PiB\", \"EiB\"]:\n        summary_value = summary_value / 1024\n        if summary_value < 1024:\n            return \"%.3f %s\" % (summary_value, scale)\n\ndef format_with_suffix_base10(summary_value):\n    for scale in [\"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"]:\n        summary_value = summary_value / 1000\n        if summary_value < 1000:\n            return \"%.3f %s\" % (summary_value, scale)\n\ncontainers_graph = ('Concurrent running containers', 'containers')\nstorage_graph = ('Storage usage', 'used')\nmanaged_graph = ('Data under management', 'managed')\n\n\ndef runtime_str(container_request, containers):\n    length = ciso8601.parse_datetime(containers[container_request[\"container_uuid\"]][\"finished_at\"]) - ciso8601.parse_datetime(containers[container_request[\"container_uuid\"]][\"started_at\"])\n\n    hours = length.days * 24 + (length.seconds // 3600)\n    minutes = (length.seconds // 60) % 60\n    seconds = length.seconds % 60\n\n    return \"%i:%02i:%02i\" % (hours, minutes, seconds)\n\ndef runtime_in_hours(runtime):\n    sp = runtime.split(\":\")\n    hours = float(sp[0])\n    hours += float(sp[1]) / 60\n    hours += float(sp[2]) / 3600\n    return hours\n\ndef hours_to_runtime_str(frac_hours):\n    hours = math.floor(frac_hours)\n    minutes = (frac_hours - math.floor(frac_hours)) * 60.0\n    seconds = (minutes - math.floor(minutes)) * 60.0\n\n    return \"%i:%02i:%02i\" % (hours, minutes, seconds)\n\n\ndef csv_dateformat(datestr):\n    dt = ciso8601.parse_datetime(datestr)\n    return dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\nclass ClusterActivityReport(object):\n    def __init__(self, prom_client):\n        self.arv_client = arvados.api()\n        self.prom_client = prom_client\n        self.cluster = self.arv_client.config()[\"ClusterID\"]\n\n        self.active_users = set()\n        self.project_summary = {}\n        self.total_hours = 0\n        self.total_cost = 0\n        self.total_workflows = 0\n        self.storage_cost = 0\n        self.summary_fetched = False\n        self.graphs = {}\n\n    def collect_graph(self, since, to, metric, resample_to, extra=None):\n        if not self.prom_client:\n            return\n\n        flatdata = []\n\n        for series in get_metric_usage(self.prom_client, since, to, metric % self.cluster, resampleTo=resample_to):\n            for t in series.itertuples():\n                flatdata.append([t[0], t[1]])\n                if extra:\n                    extra(t[0], t[1])\n\n        return flatdata\n\n    def collect_storage_cost(self, timestamp, value):\n        self.storage_cost += aws_monthly_cost(value) / (30*24)\n\n    def html_report(self, since, to, exclude, include_workflow_steps):\n        \"\"\"Get a cluster activity report for the desired time period,\n        returning a string containing the report as an HTML document.\"\"\"\n\n        self.label = \"Cluster report for %s from %s to %s\" % (self.cluster, since.date(), to.date())\n\n        if not self.summary_fetched:\n            # If we haven't done it already, need to fetch everything\n            # from the API to collect summary stats (report_from_api\n            # calls collect_summary_stats on each row).\n            #\n            # Because it is a Python generator, we need call it in a\n            # loop to process all the rows.  This method also yields\n            # each row which is used by a different function to create\n            # the CSV report, but for the HTML report we just discard\n            # them.\n            for row in self.report_from_api(since, to, include_workflow_steps, exclude):\n                pass\n\n        container_cumulative_hours = 0\n        def collect_container_hours(timestamp, value):\n            nonlocal container_cumulative_hours\n            # resampled to 5 minute increments but we want\n            # a sum of hours\n            container_cumulative_hours += value / 12\n\n        logging.info(\"Getting container hours time series\")\n\n        self.graphs[containers_graph] = self.collect_graph(since, to,\n                           \"arvados_dispatchcloud_containers_running{cluster='%s'}\",\n                           resample_to=\"5min\",\n                           extra=collect_container_hours\n                           )\n\n        logging.info(\"Getting data usage time series\")\n        self.graphs[managed_graph] = self.collect_graph(since, to,\n                           \"arvados_keep_collection_bytes{cluster='%s'}\", resample_to=\"60min\")\n\n        self.graphs[storage_graph] = self.collect_graph(since, to,\n                           \"arvados_keep_total_bytes{cluster='%s'}\", resample_to=\"60min\",\n                                                        extra=self.collect_storage_cost)\n\n        label = self.label\n\n        cards = []\n\n        workbench = self.arv_client.config()[\"Services\"][\"Workbench2\"][\"ExternalURL\"]\n        if workbench.endswith(\"/\"):\n            workbench = workbench[:-1]\n\n        if to.date() == self.today():\n            # The deduplication ratio overstates things a bit, you can\n            # have collections which reference a small slice of a large\n            # block, and this messes up the intuitive value of this ratio\n            # and exagerates the effect.\n            #\n            # So for now, as much fun as this is, I'm excluding it from\n            # the report.\n            #\n            # dedup_savings = aws_monthly_cost(managed_data_now) - storage_cost\n            # <tr><th>Monthly savings from storage deduplication</th> <td>${dedup_savings:,.2f}</td></tr>\n\n            data_rows = \"\"\n            if self.graphs[managed_graph] and self.graphs[storage_graph]:\n                managed_data_now = self.graphs[managed_graph][-1][1]\n                storage_used_now = self.graphs[storage_graph][-1][1]\n                data_rows = \"\"\"\n            <tr><th>Total data under management</th> <td>{managed_data_now}</td></tr>\n            <tr><th>Total storage usage</th> <td>{storage_used_now}</td></tr>\n            <tr><th>Deduplication ratio</th> <td>{dedup_ratio:.1f}</td></tr>\n            <tr><th>Approximate monthly storage cost</th> <td>${storage_cost:,.2f}</td></tr>\n                \"\"\".format(\n                       managed_data_now=format_with_suffix_base10(managed_data_now),\n                       storage_used_now=format_with_suffix_base10(storage_used_now),\n                       storage_cost=aws_monthly_cost(storage_used_now),\n                       dedup_ratio=managed_data_now / storage_used_now,\n                )\n\n            cards.append(\"\"\"<h2>Cluster status as of {now}</h2>\n            <table class='aggtable'><tbody>\n            <tr><th><a href=\"{workbench}/users\">Total users</a></th><td>{total_users}</td></tr>\n            <tr><th>Total projects</th><td>{total_projects}</td></tr>\n            {data_rows}\n            </tbody></table>\n            <p>See <a href=\"#prices\">note on usage and cost calculations</a> for details on how costs are calculated.</p>\n            \"\"\".format(now=self.today(),\n                       total_users=self.total_users,\n                       total_projects=self.total_projects,\n                       workbench=workbench,\n                       data_rows=data_rows))\n\n        # We have a couple of options for getting total container hours\n        #\n        # total_hours=container_cumulative_hours\n        #\n        # calculates the sum from prometheus metrics\n        #\n        # total_hours=self.total_hours\n        #\n        # calculates the sum of the containers that were fetched\n        #\n        # The problem is these numbers tend not to match, especially\n        # if the report generation was not called with \"include\n        # workflow steps\".\n        #\n        # I decided to use the sum from containers fetched, because it\n        # will match the sum of compute time for each project listed\n        # in the report.\n\n        cards.append(\"\"\"<h2>Activity and cost over the {reporting_days} day period {since} to {to}</h2>\n        <table class='aggtable'><tbody>\n        <tr><th>Active users</th> <td>{active_users}</td></tr>\n        <tr><th><a href=\"#Active_Projects\">Active projects</a></th> <td>{active_projects}</td></tr>\n        <tr><th>Workflow runs</th> <td>{total_workflows:,}</td></tr>\n        <tr><th>Compute used</th> <td>{total_hours:,.1f} hours</td></tr>\n        <tr><th>Compute cost</th> <td>${total_cost:,.2f}</td></tr>\n        <tr><th>Storage cost</th> <td>${storage_cost:,.2f}</td></tr>\n        </tbody></table>\n        <p>See <a href=\"#prices\">note on usage and cost calculations</a> for details on how costs are calculated.</p>\n        \"\"\".format(active_users=len(self.active_users),\n                   total_users=self.total_users,\n                   total_hours=self.total_hours,\n                   total_cost=self.total_cost,\n                   total_workflows=self.total_workflows,\n                   active_projects=len(self.project_summary),\n                   since=since.date(), to=to.date(),\n                   reporting_days=(to - since).days,\n                   storage_cost=self.storage_cost))\n\n        projectlist = sorted(self.project_summary.items(), key=lambda x: x[1].cost, reverse=True)\n\n        for k, prj in projectlist:\n            if prj.earliest.date() == prj.latest.date():\n                prj.activityspan = \"{}\".format(prj.earliest.date())\n            else:\n                prj.activityspan = \"{} to {}\".format(prj.earliest.date(), prj.latest.date())\n\n            prj.tablerow = \"\"\"<td>{users}</td> <td>{active}</td> <td>{hours:,.1f}</td> <td>${cost:,.2f}</td>\"\"\".format(\n                active=prj.activityspan,\n                cost=prj.cost,\n                hours=prj.hours,\n                users=\", \".join(prj.users),\n            )\n\n        if any(self.graphs.values()):\n            cards.append(\"\"\"\n                <div id=\"chart\"></div>\n            \"\"\")\n\n        cards.append(\n            \"\"\"\n            <a id=\"Active_Projects\"><h2>Active Projects</h2></a>\n            <table class='sortable active-projects'>\n            <thead><tr><th>Project</th> <th>Users</th> <th>Active</th> <th>Compute usage (hours)</th> <th>Compute cost</th> </tr></thead>\n            <tbody><tr>{projects}</tr></tbody>\n            </table>\n            <p>See <a href=\"#prices\">note on usage and cost calculations</a> for details on how costs are calculated.</p>\n            \"\"\".format(projects=\"</tr>\\n<tr>\".join(\"\"\"<td><a href=\"#{name}\">{name}</a></td>{rest}\"\"\".format(name=prj.name, rest=prj.tablerow) for k, prj in projectlist)))\n\n        for k, prj in projectlist:\n            wfsum = []\n            for k2, r in sorted(prj.runs.items(), key=lambda x: x[1].count, reverse=True):\n                wfsum.append(\"\"\"\n                <tr><td>{count}</td> <td>{workflowlink}</td> <td>{median_runtime}</td> <td>{mean_runtime}</td> <td>${median_cost:,.2f}</td> <td>${mean_cost:,.2f}</td> <td>${totalcost:,.2f}</td></tr>\n                \"\"\".format(\n                    count=r.count,\n                    mean_runtime=hours_to_runtime_str(statistics.mean(r.hours)),\n                    median_runtime=hours_to_runtime_str(statistics.median(r.hours)),\n                    mean_cost=statistics.mean(r.cost),\n                    median_cost=statistics.median(r.cost),\n                    totalcost=sum(r.cost),\n                    workflowlink=\"\"\"<a href=\"{workbench}/workflows/{uuid}\">{name}</a>\"\"\".format(workbench=workbench,uuid=r.uuid,name=r.name)\n                    if r.uuid != \"none\" else r.name))\n\n            cards.append(\n                \"\"\"<a id=\"{name}\"></a><a href=\"{workbench}/projects/{uuid}\"><h2>{name}</h2></a>\n\n                <table class='sortable single-project'>\n                <thead><tr> <th>Users</th> <th>Active</th> <th>Compute usage (hours)</th> <th>Compute cost</th> </tr></thead>\n                <tbody><tr>{projectrow}</tr></tbody>\n                </table>\n\n                <table class='sortable project'>\n                <thead><tr><th>Workflow run count</th> <th>Workflow name</th> <th>Median runtime</th> <th>Mean runtime</th> <th>Median cost per run</th> <th>Mean cost per run</th> <th>Sum cost over runs</th></tr></thead>\n                <tbody>\n                {wfsum}\n                </tbody></table>\n                \"\"\".format(name=prj.name,\n                           wfsum=\" \".join(wfsum),\n                           projectrow=prj.tablerow,\n                           workbench=workbench,\n                           uuid=prj.uuid)\n            )\n\n        # The deduplication ratio overstates things a bit, you can\n        # have collections which reference a small slice of a large\n        # block, and this messes up the intuitive value of this ratio\n        # and exagerates the effect.\n        #\n        # So for now, as much fun as this is, I'm excluding it from\n        # the report.\n        #\n        # <p>\"Monthly savings from storage deduplication\" is the\n        # estimated cost difference between \"storage usage\" and \"data\n        # under management\" as a way of comparing with other\n        # technologies that do not support data deduplication.</p>\n\n\n        cards.append(\"\"\"\n        <h2 id=\"prices\">Note on usage and cost calculations</h2>\n\n        <div style=\"max-width: 60em\">\n\n        <p>The numbers presented in this report are estimates and will\n        not perfectly match your cloud bill.  Nevertheless this report\n        should be useful for identifying your main cost drivers.</p>\n\n        <h3>Storage</h3>\n\n        <p>\"Total data under management\" is what you get if you add up\n        all blocks referenced by all collections in Workbench, without\n        considering deduplication.</p>\n\n        <p>\"Total storage usage\" is the actual underlying storage\n        usage, accounting for data deduplication.</p>\n\n        <p>Storage costs are based on AWS \"S3 Standard\"\n        described on the <a href=\"https://aws.amazon.com/s3/pricing/\">Amazon S3 pricing</a> page:</p>\n\n        <ul>\n        <li>$0.023 per GB / Month for the first 50 TB</li>\n        <li>$0.022 per GB / Month for the next 450 TB</li>\n        <li>$0.021 per GB / Month over 500 TB</li>\n        </ul>\n\n        <p>Finally, this only the base storage cost, and does not\n        include any fees associated with S3 API usage.  However, there\n        are generally no ingress/egress fees if your Arvados instance\n        and S3 bucket are in the same region, which is the normal\n        recommended configuration.</p>\n\n        <h3>Compute</h3>\n\n        <p>\"Compute usage\" are instance-hours used in running\n        workflows.  Because multiple steps may run in parallel on\n        multiple instances, a workflow that completes in four hours\n        but runs parallel steps on five instances, would be reported\n        as using 20 instance hours.</p>\n\n        <p>\"Runtime\" is the actual wall clock time that it took to\n        complete a workflow.  This does not include time spent in the\n        queue for the workflow itself, but does include queuing time\n        of individual workflow steps.</p>\n\n        <p>Computational costs are derived from Arvados cost\n        calculations of container runs.  For on-demand instances, this\n        uses the prices from the InstanceTypes section of the Arvado\n        config file, set by the system administrator.  For spot\n        instances, this uses current spot prices retrieved on the fly\n        the AWS API.</p>\n\n        <p>Be aware that the cost calculations are only for the time\n        the container is running and only do not take into account the\n        overhead of launching instances or idle time between scheduled\n        tasks or prior to automatic shutdown.</p>\n\n        </div>\n        \"\"\")\n\n        return ReportChart(label, cards, self.graphs).html()\n\n    def iter_container_info(self, pending, include_steps, exclude):\n        # \"pending\" is a list of arvados-cwl-runner container requests\n        # returned by the API.  This method fetches detailed\n        # information about the runs and yields report rows.\n\n        # 1. Get container records corresponding to container requests.\n        containers = {}\n\n        for container in arvados.util.keyset_list_all(\n            self.arv_client.containers().list,\n            filters=[\n                [\"uuid\", \"in\", [c[\"container_uuid\"] for c in pending if c[\"container_uuid\"]]],\n            ],\n            select=[\"uuid\", \"started_at\", \"finished_at\", \"cost\"]):\n\n            containers[container[\"uuid\"]] = container\n\n        # 2. Look for the template_uuid property and fetch the\n        # corresponding workflow record.\n        workflows = {}\n        workflows[\"none\"] = \"workflow run from command line\"\n\n        for wf in arvados.util.keyset_list_all(\n                self.arv_client.workflows().list,\n                filters=[\n                    [\"uuid\", \"in\", list(set(c[\"properties\"][\"template_uuid\"]\n                                            for c in pending\n                                            if \"template_uuid\" in c[\"properties\"] and c[\"properties\"][\"template_uuid\"].startswith(self.arv_client.config()[\"ClusterID\"])))],\n                ],\n                select=[\"uuid\", \"name\"]):\n            workflows[wf[\"uuid\"]] = wf[\"name\"]\n\n        # 3. Look at owner_uuid and fetch owning projects and users\n        projects = {}\n\n        for pr in arvados.util.keyset_list_all(\n                self.arv_client.groups().list,\n                filters=[\n                    [\"uuid\", \"in\", list(set(c[\"owner_uuid\"] for c in pending if c[\"owner_uuid\"][6:11] == 'j7d0g'))],\n                ],\n                select=[\"uuid\", \"name\"]):\n            projects[pr[\"uuid\"]] = pr[\"name\"]\n\n        # 4. Look at owner_uuid and modified_by_user_uuid and get user records\n        for pr in arvados.util.keyset_list_all(\n                self.arv_client.users().list,\n                filters=[\n                    [\"uuid\", \"in\", list(set(c[\"owner_uuid\"] for c in pending if c[\"owner_uuid\"][6:11] == 'tpzed')|set(c[\"modified_by_user_uuid\"] for c in pending))],\n                ],\n                select=[\"uuid\", \"full_name\", \"first_name\", \"last_name\"]):\n            projects[pr[\"uuid\"]] = pr[\"full_name\"]\n\n        # 5. Optionally iterate over individual workflow steps.\n        if include_steps:\n            name_regex = re.compile(r\"(.+)_[0-9]+\")\n            child_crs = {}\n            child_cr_containers = set()\n            stepcount = 0\n\n            # 5.1. Go through the container requests owned by the toplevel workflow container\n            logging.info(\"Getting workflow steps\")\n            for cr in arvados.util.keyset_list_all(\n                self.arv_client.container_requests().list,\n                filters=[\n                    [\"requesting_container_uuid\", \"in\", list(containers.keys())],\n                ],\n                select=[\"uuid\", \"name\", \"cumulative_cost\", \"requesting_container_uuid\", \"container_uuid\"]):\n\n                if cr[\"cumulative_cost\"] == 0:\n                    continue\n\n                g = name_regex.fullmatch(cr[\"name\"])\n                if g:\n                    cr[\"name\"] = g[1]\n\n                # 5.2. Get the containers corresponding to the\n                # container requests.  This has the same logic as\n                # report_from_api where we batch it into 1000 items at\n                # a time.\n                child_crs.setdefault(cr[\"requesting_container_uuid\"], []).append(cr)\n                child_cr_containers.add(cr[\"container_uuid\"])\n                if len(child_cr_containers) == 1000:\n                    stepcount += len(child_cr_containers)\n                    for container in arvados.util.keyset_list_all(\n                            self.arv_client.containers().list,\n                            filters=[\n                                [\"uuid\", \"in\", list(child_cr_containers)],\n                            ],\n                            select=[\"uuid\", \"started_at\", \"finished_at\", \"cost\"]):\n\n                        containers[container[\"uuid\"]] = container\n\n                    logging.info(\"Got workflow steps %s - %s\", stepcount-len(child_cr_containers), stepcount)\n                    child_cr_containers.clear()\n\n            # Get any remaining containers\n            if child_cr_containers:\n                stepcount += len(child_cr_containers)\n                for container in arvados.util.keyset_list_all(\n                        self.arv_client.containers().list,\n                        filters=[\n                            [\"uuid\", \"in\", list(child_cr_containers)],\n                        ],\n                        select=[\"uuid\", \"started_at\", \"finished_at\", \"cost\"]):\n\n                    containers[container[\"uuid\"]] = container\n                logging.info(\"Got workflow steps %s - %s\", stepcount-len(child_cr_containers), stepcount)\n\n        # 6. Now go through the list of workflow runs, yield a row\n        # with all the information we have collected, as well as the\n        # details for each workflow step (if enabled)\n        for container_request in pending:\n            if not container_request[\"container_uuid\"] or not containers[container_request[\"container_uuid\"]][\"started_at\"] or not containers[container_request[\"container_uuid\"]][\"finished_at\"]:\n                continue\n\n            template_uuid = container_request[\"properties\"].get(\"template_uuid\", \"none\")\n            workflowname = container_request[\"name\"] if template_uuid == \"none\" else workflows.get(template_uuid, template_uuid)\n\n            if exclude and re.search(exclude, workflowname, flags=re.IGNORECASE):\n                continue\n\n            yield {\n                \"Project\": projects.get(container_request[\"owner_uuid\"], \"unknown owner\"),\n                \"ProjectUUID\": container_request[\"owner_uuid\"],\n                \"Workflow\": workflowname,\n                \"WorkflowUUID\": container_request[\"properties\"].get(\"template_uuid\", \"none\"),\n                \"Step\": \"workflow runner\",\n                \"StepUUID\": container_request[\"uuid\"],\n                \"Sample\": container_request[\"name\"],\n                \"SampleUUID\": container_request[\"uuid\"],\n                \"User\": projects.get(container_request[\"modified_by_user_uuid\"], \"unknown user\"),\n                \"UserUUID\": container_request[\"modified_by_user_uuid\"],\n                \"Submitted\": csv_dateformat(container_request[\"created_at\"]),\n                \"Started\": csv_dateformat(containers[container_request[\"container_uuid\"]][\"started_at\"]),\n                \"Finished\": csv_dateformat(containers[container_request[\"container_uuid\"]][\"finished_at\"]),\n                \"Runtime\": runtime_str(container_request, containers),\n                \"Cost\": round(containers[container_request[\"container_uuid\"]][\"cost\"] if include_steps else container_request[\"cumulative_cost\"], 3),\n                \"CumulativeCost\": round(container_request[\"cumulative_cost\"], 3)\n                }\n\n            if include_steps:\n                for child_cr in child_crs.get(container_request[\"container_uuid\"], []):\n                    if not child_cr[\"container_uuid\"] or not containers[child_cr[\"container_uuid\"]][\"started_at\"] or not containers[child_cr[\"container_uuid\"]][\"finished_at\"]:\n                        continue\n                    yield {\n                        \"Project\": projects.get(container_request[\"owner_uuid\"], \"unknown owner\"),\n                        \"ProjectUUID\": container_request[\"owner_uuid\"],\n                        \"Workflow\": workflows.get(container_request[\"properties\"].get(\"template_uuid\", \"none\"), \"workflow missing\"),\n                        \"WorkflowUUID\": container_request[\"properties\"].get(\"template_uuid\", \"none\"),\n                        \"Step\": child_cr[\"name\"],\n                        \"StepUUID\": child_cr[\"uuid\"],\n                        \"Sample\": container_request[\"name\"],\n                        \"SampleUUID\": container_request[\"name\"],\n                        \"User\": projects.get(container_request[\"modified_by_user_uuid\"], \"unknown user\"),\n                        \"UserUUID\": container_request[\"modified_by_user_uuid\"],\n                        \"Submitted\": csv_dateformat(child_cr[\"created_at\"]),\n                        \"Started\": csv_dateformat(containers[child_cr[\"container_uuid\"]][\"started_at\"]),\n                        \"Finished\": csv_dateformat(containers[child_cr[\"container_uuid\"]][\"finished_at\"]),\n                        \"Runtime\": runtime_str(child_cr, containers),\n                        \"Cost\": round(containers[child_cr[\"container_uuid\"]][\"cost\"], 3),\n                        \"CumulativeCost\": round(containers[child_cr[\"container_uuid\"]][\"cost\"], 3),\n                        }\n\n\n    def collect_summary_stats(self, row):\n        self.active_users.add(row[\"User\"])\n        self.project_summary.setdefault(row[\"ProjectUUID\"],\n                                        ProjectSummary(users=set(),\n                                                       runs={},\n                                                       uuid=row[\"ProjectUUID\"],\n                                                       name=row[\"Project\"]))\n        prj = self.project_summary[row[\"ProjectUUID\"]]\n        cost = row[\"Cost\"]\n        prj.cost += cost\n        prj.count += 1\n        prj.users.add(row[\"User\"])\n        hrs = runtime_in_hours(row[\"Runtime\"])\n        prj.hours += hrs\n\n        started = datetime.strptime(row[\"Started\"], \"%Y-%m-%d %H:%M:%S\")\n        finished = datetime.strptime(row[\"Finished\"], \"%Y-%m-%d %H:%M:%S\")\n\n        if started < prj.earliest:\n            prj.earliest = started\n\n        if finished > prj.latest:\n            prj.latest = finished\n\n        if row[\"Step\"] == \"workflow runner\":\n            prj.runs.setdefault(row[\"Workflow\"], WorkflowRunSummary(name=row[\"Workflow\"],\n                                                                    uuid=row[\"WorkflowUUID\"],\n                                                                    cost=[], hours=[]))\n            wfuuid = row[\"Workflow\"]\n            prj.runs[wfuuid].count += 1\n            prj.runs[wfuuid].cost.append(row[\"CumulativeCost\"])\n            prj.runs[wfuuid].hours.append(hrs)\n            self.total_workflows += 1\n\n        self.total_hours += hrs\n        self.total_cost += cost\n\n    def report_from_api(self, since, to, include_steps, exclude):\n        pending = []\n\n        count = 0\n        for container_request in arvados.util.keyset_list_all(\n                self.arv_client.container_requests().list,\n                filters=[\n                    [\"command\", \"like\", \"[\\\"arvados-cwl-runner%\"],\n                    [\"created_at\", \">=\", since.strftime(\"%Y%m%dT%H%M%SZ\")],\n                    [\"created_at\", \"<=\", to.strftime(\"%Y%m%dT%H%M%SZ\")],\n                ],\n                select=[\"uuid\", \"owner_uuid\", \"container_uuid\", \"name\", \"cumulative_cost\", \"properties\", \"modified_by_user_uuid\", \"created_at\"]):\n\n            if container_request[\"cumulative_cost\"] == 0:\n                continue\n\n            # Every 1000 container requests, we fetch the\n            # corresponding container records.\n            #\n            # What's so special about 1000?  Because that's the\n            # maximum Arvados page size, so when we use ['uuid', 'in',\n            # [...]] to fetch associated records it doesn't make sense\n            # to provide more than 1000 uuids.\n            #\n            # TODO: use the ?include=container_uuid feature so a\n            # separate request to the containers table isn't necessary.\n            if len(pending) < 1000:\n                pending.append(container_request)\n            else:\n                count += len(pending)\n                logging.info(\"Exporting workflow runs %s - %s\", count-len(pending), count)\n                for row in self.iter_container_info(pending, include_steps, exclude):\n                    self.collect_summary_stats(row)\n                    yield row\n                pending.clear()\n\n        count += len(pending)\n        logging.info(\"Exporting workflow runs %s - %s\", count-len(pending), count)\n        for row in self.iter_container_info(pending, include_steps, exclude):\n            self.collect_summary_stats(row)\n            yield row\n\n        userinfo = self.arv_client.users().list(filters=[[\"is_active\", \"=\", True]], limit=0).execute()\n        self.total_users = userinfo[\"items_available\"]\n\n        groupinfo = self.arv_client.groups().list(filters=[[\"group_class\", \"=\", \"project\"]], limit=0).execute()\n        self.total_projects = groupinfo[\"items_available\"]\n\n    def csv_report(self, since, to, out, include_steps, columns, exclude):\n        if columns:\n            columns = columns.split(\",\")\n        else:\n            if include_steps:\n                columns = (\"Project\", \"Workflow\", \"Step\", \"Sample\", \"User\", \"Submitted\", \"Runtime\", \"Cost\")\n            else:\n                columns = (\"Project\", \"Workflow\", \"Sample\", \"User\", \"Submitted\", \"Runtime\", \"Cost\")\n\n        csvwriter = csv.DictWriter(out, fieldnames=columns, extrasaction=\"ignore\")\n        csvwriter.writeheader()\n\n        for row in self.report_from_api(since, to, include_steps, exclude):\n            csvwriter.writerow(row)\n\n        self.summary_fetched = True\n\n    def today(self):\n        return date.today()\n"
  },
  {
    "path": "tools/cluster-activity/arvados_cluster_activity/reportchart.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport json\nimport importlib.resources\nfrom datetime import datetime\nfrom arvados._internal.report_template import ReportTemplate\n\nsortablecss = \"\"\"\n<style>\n@charset \"UTF-8\";\n.sortable thead th:not(.no-sort) {\n  cursor: pointer;\n}\n.sortable thead th:not(.no-sort)::after, .sortable thead th:not(.no-sort)::before {\n  transition: color 0.1s ease-in-out;\n  font-size: 1.2em;\n  color: transparent;\n}\n.sortable thead th:not(.no-sort)::after {\n  margin-left: 3px;\n  content: \"▸\";\n}\n.sortable thead th:not(.no-sort):hover::after {\n  color: inherit;\n}\n.sortable thead th:not(.no-sort)[aria-sort=descending]::after {\n  color: inherit;\n  content: \"▾\";\n}\n.sortable thead th:not(.no-sort)[aria-sort=ascending]::after {\n  color: inherit;\n  content: \"▴\";\n}\n.sortable thead th:not(.no-sort).indicator-left::after {\n  content: \"\";\n}\n.sortable thead th:not(.no-sort).indicator-left::before {\n  margin-right: 3px;\n  content: \"▸\";\n}\n.sortable thead th:not(.no-sort).indicator-left:hover::before {\n  color: inherit;\n}\n.sortable thead th:not(.no-sort).indicator-left[aria-sort=descending]::before {\n  color: inherit;\n  content: \"▾\";\n}\n.sortable thead th:not(.no-sort).indicator-left[aria-sort=ascending]::before {\n  color: inherit;\n  content: \"▴\";\n}\n\ntable.aggtable td:nth-child(2) {\n  text-align: right;\n}\n\ntable.active-projects td:nth-child(4),\ntable.active-projects td:nth-child(5) {\n  text-align: right;\n  padding-right: 6em;\n}\n\ntable.single-project td:nth-child(3),\ntable.single-project td:nth-child(4) {\n  text-align: right;\n  padding-right: 6em;\n}\n\ntable.active-projects th:nth-child(4),\ntable.active-projects th:nth-child(5) {\n  text-align: left;\n}\n\ntable.project td:nth-child(3),\ntable.project td:nth-child(4),\ntable.project td:nth-child(5),\ntable.project td:nth-child(6),\ntable.project td:nth-child(7) {\n  text-align: right;\n  padding-right: 6em;\n}\n\ntable.project th:nth-child(3),\ntable.project th:nth-child(4),\ntable.project th:nth-child(5),\ntable.project th:nth-child(6),\ntable.project th:nth-child(7) {\n  text-align: left;\n}\n</style>\n\"\"\"\n\ndef date_export(item):\n    if isinstance(item, datetime):\n        return \"\"\"@new Date(\"{}\")@\"\"\".format(item.strftime(\"%Y-%m-%dT%H:%M:%SZ\"))\n\nclass ReportChart(ReportTemplate):\n    CSS = 'https://cdnjs.cloudflare.com/ajax/libs/dygraph/2.0.0/dygraph.min.css'\n    JSLIB = 'https://cdnjs.cloudflare.com/ajax/libs/dygraph/2.0.0/dygraph.min.js'\n    JSASSETS = ['synchronizer.js', 'dygraphs.js', 'sortable.js']\n\n    def __init__(self, label, cards, graphs):\n        super(ReportChart, self).__init__(label)\n        self.cards = cards\n        self.graphs = graphs\n\n    def sections(self):\n        return [\n            {\n                'label': k[0],\n                'charts': [self.chartdata(k, v)]\n            }\n            for k,v in self.graphs.items()]\n\n    def chartdata(self, label, stats):\n        return {\n            'data': stats,\n            'options': {\n                'legend': 'always',\n                'connectSeparatedPoints': True,\n                'labels': ['date', label[1]],\n                'includeZero': True,\n                'title': label[0]\n            },\n        }\n\n    def js(self):\n\n\n        return '''\n        <script type=\"text/javascript\" src=\"{jslib}\"></script>\n        <script type=\"text/javascript\">\n        var chartdata = {chartdata};\\n{jsassets}\n        </script>'''.format(\n            jslib=self.JSLIB,\n            chartdata=json.dumps(self.sections(), default=date_export).replace('\"@', '').replace('@\"', '').replace('\\\\\"', '\"'),\n            jsassets='\\n'.join(\n                importlib.resources.read_text('arvados_cluster_activity', jsa)\n                for jsa in self.JSASSETS\n            ),\n        )\n\n    def style(self):\n        return '\\n'.join((super().style(),\n                         sortablecss,\n                         '<link rel=\"stylesheet\" href=\"{}\">\\n'.format(self.CSS)))\n"
  },
  {
    "path": "tools/cluster-activity/arvados_cluster_activity/sortable.js",
    "content": "/**\n * Copyright Jonas Earendel. All rights reserved.\n * SPDX-License-Identifier: Unlicense\n *\n * sortable v3.2.3\n *\n * https://www.npmjs.com/package/sortable-tablesort\n * https://github.com/tofsjonas/sortable\n *\n * Makes html tables sortable, No longer ie9+ 😢\n *\n * Styling is done in css.\n *\n * Copyleft 2017 Jonas Earendel\n *\n * This is free and unencumbered software released into the public domain.\n *\n * Anyone is free to copy, modify, publish, use, compile, sell, or\n * distribute this software, either in source code form or as a compiled\n * binary, for any purpose, commercial or non-commercial, and by any\n * means.\n *\n * In jurisdictions that recognize copyright laws, the author or authors\n * of this software dedicate any and all copyright interest in the\n * software to the public domain. We make this dedication for the benefit\n * of the public at large and to the detriment of our heirs and\n * successors. We intend this dedication to be an overt act of\n * relinquishment in perpetuity of all present and future rights to this\n * software under copyright law.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n *\n * For more information, please refer to <http://unlicense.org>\n *\n */\ndocument.addEventListener('click', function (e) {\n    try {\n        // allows for elements inside TH\n        function findElementRecursive(element, tag) {\n            return element.nodeName === tag ? element : findElementRecursive(element.parentNode, tag);\n        }\n        var ascending_table_sort_class = 'asc';\n        var no_sort_class = 'no-sort';\n        var null_last_class = 'n-last';\n        var table_class_name = 'sortable';\n        var alt_sort_1 = e.shiftKey || e.altKey;\n        var element = findElementRecursive(e.target, 'TH');\n        var tr = element.parentNode;\n        var thead = tr.parentNode;\n        var table = thead.parentNode;\n        function getValue(element) {\n            var _a;\n            var value = alt_sort_1 ? element.dataset.sortAlt : (_a = element.dataset.sort) !== null && _a !== void 0 ? _a : element.textContent;\n            return value;\n        }\n        if (thead.nodeName === 'THEAD' && // sortable only triggered in `thead`\n            table.classList.contains(table_class_name) &&\n            !element.classList.contains(no_sort_class) // .no-sort is now core functionality, no longer handled in CSS\n        ) {\n            var column_index_1;\n            var nodes = tr.cells;\n            var tiebreaker_1 = +element.dataset.sortTbr;\n            // Reset thead cells and get column index\n            for (var i = 0; i < nodes.length; i++) {\n                if (nodes[i] === element) {\n                    column_index_1 = +element.dataset.sortCol || i;\n                }\n                else {\n                    nodes[i].setAttribute('aria-sort', 'none');\n                }\n            }\n            var direction = 'descending';\n            if (element.getAttribute('aria-sort') === 'descending' ||\n                (table.classList.contains(ascending_table_sort_class) && element.getAttribute('aria-sort') !== 'ascending')) {\n                direction = 'ascending';\n            }\n            // Update the `th` class accordingly\n            element.setAttribute('aria-sort', direction);\n            var reverse_1 = direction === 'ascending';\n            var sort_null_last_1 = table.classList.contains(null_last_class);\n            var compare_1 = function (a, b, index) {\n                var x = getValue(b.cells[index]);\n                var y = getValue(a.cells[index]);\n                if (sort_null_last_1) {\n                    if (x === '' && y !== '') {\n                        return -1;\n                    }\n                    if (y === '' && x !== '') {\n                        return 1;\n                    }\n                }\n                // Before comparing, clean up formatted numbers that may have a leading dollar sign and/or commas.\n                x = x.replace(\"$\", \"\").replace(\",\", \"\");\n                y = y.replace(\"$\", \"\").replace(\",\", \"\");\n                var temp = +x - +y;\n                var bool = isNaN(temp) ? x.localeCompare(y) : temp;\n                return reverse_1 ? -bool : bool;\n            };\n            // loop through all tbodies and sort them\n            for (var i = 0; i < table.tBodies.length; i++) {\n                var org_tbody = table.tBodies[i];\n                // Put the array rows in an array, so we can sort them...\n                var rows = [].slice.call(org_tbody.rows, 0);\n                // Sort them using Array.prototype.sort()\n                rows.sort(function (a, b) {\n                    var bool = compare_1(a, b, column_index_1);\n                    return bool === 0 && !isNaN(tiebreaker_1) ? compare_1(a, b, tiebreaker_1) : bool;\n                });\n                // Make an empty clone\n                var clone_tbody = org_tbody.cloneNode();\n                // Put the sorted rows inside the clone\n                clone_tbody.append.apply(clone_tbody, rows);\n                // And finally replace the unsorted tbody with the sorted one\n                table.replaceChild(clone_tbody, org_tbody);\n            }\n        }\n        // eslint-disable-next-line no-unused-vars\n    }\n    catch (error) {\n        // console.log(error)\n    }\n});\n"
  },
  {
    "path": "tools/cluster-activity/arvados_cluster_activity/synchronizer.js",
    "content": "// Copyright (c) 2009 Dan Vanderkam. All rights reserved.\n//\n// SPDX-License-Identifier: MIT\n\n/**\n * Synchronize zooming and/or selections between a set of dygraphs.\n *\n * Usage:\n *\n *   var g1 = new Dygraph(...),\n *       g2 = new Dygraph(...),\n *       ...;\n *   var sync = Dygraph.synchronize(g1, g2, ...);\n *   // charts are now synchronized\n *   sync.detach();\n *   // charts are no longer synchronized\n *\n * You can set options using the last parameter, for example:\n *\n *   var sync = Dygraph.synchronize(g1, g2, g3, {\n *      selection: true,\n *      zoom: true\n *   });\n *\n * The default is to synchronize both of these.\n *\n * Instead of passing one Dygraph object as each parameter, you may also pass an\n * array of dygraphs:\n *\n *   var sync = Dygraph.synchronize([g1, g2, g3], {\n *      selection: false,\n *      zoom: true\n *   });\n *\n * You may also set `range: false` if you wish to only sync the x-axis.\n * The `range` option has no effect unless `zoom` is true (the default).\n *\n * Original source: https://github.com/danvk/dygraphs/blob/master/src/extras/synchronizer.js\n * at commit b55a71d768d2f8de62877c32b3aec9e9975ac389\n *\n * Copyright (c) 2009 Dan Vanderkam\n *\n * Permission is hereby granted, free of charge, to any person\n * obtaining a copy of this software and associated documentation\n * files (the \"Software\"), to deal in the Software without\n * restriction, including without limitation the rights to use,\n * copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following\n * conditions:\n *\n * The above copyright notice and this permission notice shall be\n * included in all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n */\n(function() {\n/* global Dygraph:false */\n'use strict';\n\nvar Dygraph;\nif (window.Dygraph) {\n  Dygraph = window.Dygraph;\n} else if (typeof(module) !== 'undefined') {\n  Dygraph = require('../dygraph');\n}\n\nvar synchronize = function(/* dygraphs..., opts */) {\n  if (arguments.length === 0) {\n    throw 'Invalid invocation of Dygraph.synchronize(). Need >= 1 argument.';\n  }\n\n  var OPTIONS = ['selection', 'zoom', 'range'];\n  var opts = {\n    selection: true,\n    zoom: true,\n    range: true\n  };\n  var dygraphs = [];\n  var prevCallbacks = [];\n\n  var parseOpts = function(obj) {\n    if (!(obj instanceof Object)) {\n      throw 'Last argument must be either Dygraph or Object.';\n    } else {\n      for (var i = 0; i < OPTIONS.length; i++) {\n        var optName = OPTIONS[i];\n        if (obj.hasOwnProperty(optName)) opts[optName] = obj[optName];\n      }\n    }\n  };\n\n  if (arguments[0] instanceof Dygraph) {\n    // Arguments are Dygraph objects.\n    for (var i = 0; i < arguments.length; i++) {\n      if (arguments[i] instanceof Dygraph) {\n        dygraphs.push(arguments[i]);\n      } else {\n        break;\n      }\n    }\n    if (i < arguments.length - 1) {\n      throw 'Invalid invocation of Dygraph.synchronize(). ' +\n            'All but the last argument must be Dygraph objects.';\n    } else if (i == arguments.length - 1) {\n      parseOpts(arguments[arguments.length - 1]);\n    }\n  } else if (arguments[0].length) {\n    // Invoked w/ list of dygraphs, options\n    for (var i = 0; i < arguments[0].length; i++) {\n      dygraphs.push(arguments[0][i]);\n    }\n    if (arguments.length == 2) {\n      parseOpts(arguments[1]);\n    } else if (arguments.length > 2) {\n      throw 'Invalid invocation of Dygraph.synchronize(). ' +\n            'Expected two arguments: array and optional options argument.';\n    }  // otherwise arguments.length == 1, which is fine.\n  } else {\n    throw 'Invalid invocation of Dygraph.synchronize(). ' +\n          'First parameter must be either Dygraph or list of Dygraphs.';\n  }\n\n  if (dygraphs.length < 2) {\n    throw 'Invalid invocation of Dygraph.synchronize(). ' +\n          'Need two or more dygraphs to synchronize.';\n  }\n\n  var readycount = dygraphs.length;\n  for (var i = 0; i < dygraphs.length; i++) {\n    var g = dygraphs[i];\n    g.ready( function() {\n      if (--readycount == 0) {\n        // store original callbacks\n        var callBackTypes = ['drawCallback', 'highlightCallback', 'unhighlightCallback'];\n        for (var j = 0; j < dygraphs.length; j++) {\n          if (!prevCallbacks[j]) {\n            prevCallbacks[j] = {};\n          }\n          for (var k = callBackTypes.length - 1; k >= 0; k--) {\n            prevCallbacks[j][callBackTypes[k]] = dygraphs[j].getFunctionOption(callBackTypes[k]);\n          }\n        }\n\n        // Listen for draw, highlight, unhighlight callbacks.\n        if (opts.zoom) {\n          attachZoomHandlers(dygraphs, opts, prevCallbacks);\n        }\n\n        if (opts.selection) {\n          attachSelectionHandlers(dygraphs, prevCallbacks);\n        }\n      }\n    });\n  }\n\n  return {\n    detach: function() {\n      for (var i = 0; i < dygraphs.length; i++) {\n        var g = dygraphs[i];\n        if (opts.zoom) {\n          g.updateOptions({drawCallback: prevCallbacks[i].drawCallback});\n        }\n        if (opts.selection) {\n          g.updateOptions({\n            highlightCallback: prevCallbacks[i].highlightCallback,\n            unhighlightCallback: prevCallbacks[i].unhighlightCallback\n          });\n        }\n      }\n      // release references & make subsequent calls throw.\n      dygraphs = null;\n      opts = null;\n      prevCallbacks = null;\n    }\n  };\n};\n\nfunction arraysAreEqual(a, b) {\n  if (!Array.isArray(a) || !Array.isArray(b)) return false;\n  var i = a.length;\n  if (i !== b.length) return false;\n  while (i--) {\n    if (a[i] !== b[i]) return false;\n  }\n  return true;\n}\n\nfunction attachZoomHandlers(gs, syncOpts, prevCallbacks) {\n  var block = false;\n  for (var i = 0; i < gs.length; i++) {\n    var g = gs[i];\n    g.updateOptions({\n      drawCallback: function(me, initial) {\n        if (block || initial) return;\n        block = true;\n        var opts = {\n          dateWindow: me.xAxisRange()\n        };\n        if (syncOpts.range) opts.valueRange = me.yAxisRange();\n\n        for (var j = 0; j < gs.length; j++) {\n          if (gs[j] == me) {\n            if (prevCallbacks[j] && prevCallbacks[j].drawCallback) {\n              prevCallbacks[j].drawCallback.apply(this, arguments);\n            }\n            continue;\n          }\n\n          // Only redraw if there are new options\n          if (arraysAreEqual(opts.dateWindow, gs[j].getOption('dateWindow')) && \n              arraysAreEqual(opts.valueRange, gs[j].getOption('valueRange'))) {\n            continue;\n          }\n\n          gs[j].updateOptions(opts);\n        }\n        block = false;\n      }\n    }, true /* no need to redraw */);\n  }\n}\n\nfunction attachSelectionHandlers(gs, prevCallbacks) {\n  var block = false;\n  for (var i = 0; i < gs.length; i++) {\n    var g = gs[i];\n\n    g.updateOptions({\n      highlightCallback: function(event, x, points, row, seriesName) {\n        if (block) return;\n        block = true;\n        var me = this;\n        for (var i = 0; i < gs.length; i++) {\n          if (me == gs[i]) {\n            if (prevCallbacks[i] && prevCallbacks[i].highlightCallback) {\n              prevCallbacks[i].highlightCallback.apply(this, arguments);\n            }\n            continue;\n          }\n          var idx = gs[i].getRowForX(x);\n          if (idx !== null) {\n            gs[i].setSelection(idx, seriesName);\n          }\n        }\n        block = false;\n      },\n      unhighlightCallback: function(event) {\n        if (block) return;\n        block = true;\n        var me = this;\n        for (var i = 0; i < gs.length; i++) {\n          if (me == gs[i]) {\n            if (prevCallbacks[i] && prevCallbacks[i].unhighlightCallback) {\n              prevCallbacks[i].unhighlightCallback.apply(this, arguments);\n            }\n            continue;\n          }\n          gs[i].clearSelection();\n        }\n        block = false;\n      }\n    }, true /* no need to redraw */);\n  }\n}\n\nDygraph.synchronize = synchronize;\n\n})();\n"
  },
  {
    "path": "tools/cluster-activity/arvados_version.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport dataclasses\nimport os\nimport re\nimport runpy\nimport subprocess\nimport typing as t\n\nfrom pathlib import Path, PurePath, PurePosixPath\n\nimport setuptools\nimport setuptools.command.build\n\nSETUP_DIR = Path(__file__).absolute().parent\nVERSION_SCRIPT_PATH = PurePath('build', 'version-at-commit.sh')\n# Built by ArvadosPythonPackage.register\nARVADOS_PYTHON_MODULES: dict[str, 'ArvadosPythonPackage'] = {}\n\n### Metadata generation\n\n@dataclasses.dataclass\nclass ArvadosPythonPackage:\n    package_name: str\n    module_name: str\n    src_path: PurePath\n    dependencies: t.Sequence['ArvadosPythonPackage']\n\n    _VERSION_SUBS = {\n        'development-': '',\n        '~dev': '.dev',\n        '~rc': 'rc',\n    }\n\n    @classmethod\n    def register(\n            cls,\n            package_name: str,\n            module_name: str,\n            src_path: PurePath | str,\n            *dependencies: str,\n    ) -> 'ArvadosPythonPackage':\n        if not isinstance(src_path, PurePath):\n            src_path = PurePosixPath(src_path)\n        deps = [ARVADOS_PYTHON_MODULES[key] for key in dependencies]\n        this_pkg = cls(package_name, module_name, src_path, deps)\n        ARVADOS_PYTHON_MODULES[package_name] = this_pkg\n        return this_pkg\n\n    def version_file_path(self):\n        return PurePath(self.module_name, '_version.py')\n\n    def _workspace_path(self, workdir):\n        try:\n            workspace = Path(os.environ['WORKSPACE'])\n            # This will raise ValueError if they're not related,\n            # in which case we don't want to use this $WORKSPACE.\n            workdir.relative_to(workspace)\n        except KeyError:\n            # $WORKSPACE isn't set. Fall back to the Git worktree toplevel.\n            try:\n                git_proc = subprocess.run(\n                    ['git', 'rev-parse', '--show-toplevel'],\n                    capture_output=True,\n                    check=True,\n                    cwd=workdir,\n                    text=True,\n                )\n                workspace = Path(git_proc.stdout.removesuffix('\\n'))\n            except (subprocess.CalledProcessError, FileNotFoundError, ValueError):\n                return None\n        except ValueError:\n            return None\n        if (workspace / VERSION_SCRIPT_PATH).exists():\n            return workspace\n        else:\n            return None\n\n    def _git_version(self, workdir):\n        workspace = self._workspace_path(workdir)\n        if workspace is None:\n            return None\n        git_log_cmd = [\n            'git', 'log', '-n1', '--format=%H', '--',\n            str(VERSION_SCRIPT_PATH), str(self.src_path),\n        ]\n        git_log_cmd.extend(str(dep.src_path) for dep in self.dependencies)\n        git_log_proc = subprocess.run(\n            git_log_cmd,\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        version_proc = subprocess.run(\n            [str(VERSION_SCRIPT_PATH), git_log_proc.stdout.rstrip('\\n')],\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        return version_proc.stdout.rstrip('\\n')\n\n    def _sdist_version(self, workdir):\n        try:\n            pkg_info = (workdir / 'PKG-INFO').open()\n        except FileNotFoundError:\n            return None\n        with pkg_info:\n            for line in pkg_info:\n                key, _, val = line.partition(': ')\n                if key == 'Version':\n                    return val.rstrip('\\n')\n        raise Exception(\"found PKG-INFO file but not Version metadata in it\")\n\n    def get_version(self, workdir=SETUP_DIR):\n        version = (\n            # If we're building out of a distribution, we should pass that\n            # version through unchanged.\n            self._sdist_version(workdir)\n            # Otherwise follow the usual Arvados versioning rules.\n            or os.environ.get('ARVADOS_BUILDING_VERSION')\n            or self._git_version(workdir)\n        )\n        if not version:\n            raise Exception(f\"no version information available for {self.package_name}\")\n        else:\n            return re.sub(\n                r'(^development-|~dev|~rc)',\n                lambda match: self._VERSION_SUBS[match.group(0)],\n                version,\n            )\n\n    def get_dependencies_version(self, workdir=SETUP_DIR, version=None):\n        if version is None:\n            version = self.get_version(workdir)\n        # A packaged development release should be installed with other\n        # development packages built from the same source, but those\n        # dependencies may have earlier \"dev\" versions (read: less recent\n        # Git commit timestamps). This compatible version dependency\n        # expresses that as closely as possible. Allowing versions\n        # compatible with .dev0 allows any development release.\n        # Regular expression borrowed partially from\n        # <https://packaging.python.org/en/latest/specifications/version-specifiers/#version-specifiers-regex>\n        dep_ver, match_count = re.subn(r'\\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)\n        return ('~=' if match_count else '==', dep_ver)\n\n    def iter_dependencies(self, workdir=SETUP_DIR, version=None, extras=None):\n        if extras is None:\n            extras = {}\n        dep_op, dep_ver = self.get_dependencies_version(workdir, version)\n        for dep in self.dependencies:\n            try:\n                dep_extras = f'[{\",\".join(extras[dep.package_name])}]'\n            except KeyError:\n                dep_extras = ''\n            yield f'{dep.package_name}{dep_extras} {dep_op} {dep_ver}'\n\n\n### Package database\n\nArvadosPythonPackage.register(\n    'arvados-python-client',\n    'arvados',\n    'sdk/python',\n),\nArvadosPythonPackage.register(\n    'crunchstat_summary',\n    'crunchstat_summary',\n    'tools/crunchstat-summary',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cluster-activity',\n    'arvados_cluster_activity',\n    'tools/cluster-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cwl-runner',\n    'arvados_cwl',\n    'sdk/cwl',\n    'arvados-python-client',\n    'crunchstat_summary',\n)\nArvadosPythonPackage.register(\n    'arvados_fuse',\n    'arvados_fuse',\n    'services/fuse',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-user-activity',\n    'arvados_user_activity',\n    'tools/user-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-tools',\n    'NO SRCDIR',\n    'tools/python-metapackage',\n    *ARVADOS_PYTHON_MODULES,\n)\nArvadosPythonPackage.register(\n    'arvados-docker-cleaner',\n    'arvados_docker',\n    'services/dockercleaner',\n)\n\n### setuptools integration\n\nclass BuildArvadosVersion(setuptools.Command):\n    \"\"\"Write _version.py for an Arvados module\"\"\"\n    def initialize_options(self):\n        self.build_lib = None\n\n    def finalize_options(self):\n        self.set_undefined_options(\"build_py\", (\"build_lib\", \"build_lib\"))\n        arv_mod = ARVADOS_PYTHON_MODULES[self.distribution.get_name()]\n        self.out_path = Path(self.build_lib, arv_mod.version_file_path())\n\n    def run(self):\n        with self.out_path.open('w') as out_file:\n            print(f'__version__ = {self.distribution.get_version()!r}', file=out_file)\n\n    def get_outputs(self):\n        return [str(self.out_path)]\n\n    def get_source_files(self):\n        return []\n\n    def get_output_mapping(self):\n        return {}\n\n\nclass ArvadosBuildCommand(setuptools.command.build.build):\n    sub_commands = [\n        *setuptools.command.build.build.sub_commands,\n        ('build_arvados_version', None),\n    ]\n\n\nCMDCLASS = {\n    'build': ArvadosBuildCommand,\n    'build_arvados_version': BuildArvadosVersion,\n}\n"
  },
  {
    "path": "tools/cluster-activity/cluster-activity.cwl",
    "content": "#!/usr/bin/env cwl-runner\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ncwlVersion: v1.2\nclass: CommandLineTool\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\n\ndoc: |\n  This workflow reports on the data and workflows in an Arvados cluster to\n  help administrators understand growth and costs. It is entirely\n  self-contained: you can run this workflow with Workbench or\n  `arvados-cwl-runner` to generate a report.\n\ninputs:\n  reporting_start:\n    type: string\n    label: Report start date in `YYYY-MM-DD` format\n  reporting_end:\n    type: string?\n    label: Report end date in `YYYY-MM-DD` format\n    doc: Defaults to today if not provided\n\n  prometheus_host:\n    type: string?\n    label: Prometheus server URL\n    doc: The base URL of your Arvados cluster's Prometheus server, like `https://prometheus.arvados.example/`\n  prometheus_apikey:\n    type: string?\n    label: Prometheus API token\n  prometheus_user:\n    type: string?\n    label: Prometheus API username\n  prometheus_password:\n    type: string?\n    label: Prometheus API password\n  exclude:\n    type: string?\n    label: Exclude matching workflows\n    doc: Specify a Python regular expression. Workflows whose name match the expression will be excluded from the report.\n  include_workflow_steps:\n    type: boolean?\n    label: Include workflow steps?\n    doc: If set, individual workflow steps will be reported alongside their parent workflows.\n\nrequirements:\n  DockerRequirement:\n    dockerFile: |\n      FROM python:3.11-slim-bookworm\n      RUN pip install --no-cache-dir \"arvados-cluster-activity[prometheus]\"\n    dockerImageId: arvados/cluster-activity\n\n  InlineJavascriptRequirement:\n    expressionLib:\n      - |\n        function padZero(n) {\n            var s = n.toString();\n            if (s.length < 2) {\n                return \"0\" + s;\n            }\n            return s;\n        }\n\n      - |\n        function getDateWithDefault(dateString) {\n            if (!dateString) {\n                var now = new Date();\n                var yy = now.getFullYear();\n                // getMonth() is zero-based.\n                var mm = now.getMonth() + 1;\n                var dd = now.getDate();\n                return [yy.toString(), padZero(mm), padZero(dd)].join(\"-\");\n            }\n            return dateString;\n        }\n\n  arv:APIRequirement: {}\n\n  ResourceRequirement:\n    ramMin: 768\n\n  EnvVarRequirement:\n    envDef:\n      REQUESTS_CA_BUNDLE: /etc/arvados/ca-certificates.crt\n\n  InitialWorkDirRequirement:\n    listing:\n      - entryname: prometheus.env\n        entry: |-\n          PROMETHEUS_APIKEY=$(inputs.prometheus_apikey || '')\n          PROMETHEUS_HOST=$(inputs.prometheus_host || '')\n          PROMETHEUS_PASSWORD=$(inputs.prometheus_password || '')\n          PROMETHEUS_USER=$(inputs.prometheus_user || '')\n\nhints:\n  # Disable reuse because missing/empty reporting_end parameter means \"today\",\n  # which is variable and can be an incomplete day.\n  WorkReuse:\n    enableReuse: false\n\n  cwltool:Secrets:\n    secrets: [prometheus_apikey, prometheus_password]\n\narguments:\n  - arv-cluster-activity\n  - \"--prometheus-auth=prometheus.env\"\n  - {prefix: '--start', valueFrom: $(inputs.reporting_start)}\n  - {prefix: '--end', valueFrom: $(getDateWithDefault(inputs.reporting_end))}\n  - {prefix: '--exclude', valueFrom: $(inputs.exclude)}\n  - {prefix: '--html-report-file', valueFrom: report.html}\n  - {prefix: '--cost-report-file', valueFrom: cost.csv}\n  - {prefix: '--include-workflow-steps', valueFrom: $(inputs.include_workflow_steps)}\n\noutputs:\n  report:\n    type: File\n    outputBinding:\n      glob: report.html\n  costData:\n    type: File\n    outputBinding:\n      glob: cost.csv\n"
  },
  {
    "path": "tools/cluster-activity/fpm-info.sh",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ncase \"$TARGET\" in\n    debian12 | ubuntu2204 )\n        fpm_depends+=(libcurl4)\n        ;;\n\n    debian* | ubuntu* )\n        fpm_depends+=(libcurl4t64)\n        ;;\nesac\n"
  },
  {
    "path": "tools/cluster-activity/pyproject.toml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[build-system]\nrequires = [\"setuptools ~= 80.9\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\ndynamic = [\"dependencies\", \"version\"]\nname = \"arvados-cluster-activity\"\ndescription = \"Summarize Arvados cluster activity from audit logs and Prometheus metrics\"\nauthors = [\n  {name = \"Arvados\", email = \"info@arvados.org\"},\n]\nclassifiers = [\n  \"Development Status :: 5 - Production/Stable\",\n  \"Environment :: Console\",\n  \"Intended Audience :: Science/Research\",\n  \"Operating System :: POSIX\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\",\n  \"Programming Language :: Python :: 3.12\",\n  \"Programming Language :: Python :: 3.13\",\n  \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n]\nlicense = \"AGPL-3.0-only\"\nlicense-files = [\n  \"agpl-3.0.txt\",\n]\nreadme = \"README.rst\"\nrequires-python = \"~= 3.10\"\n\n[project.optional-dependencies]\nprometheus = [\n  \"prometheus-api-client[dataframe] ~= 0.7\",\n  # Our code calls Pandas methods directly, so we want to make sure we\n  # retain API compatibility. The requirement from prometheus-api-client\n  # is too broad to do that.\n  \"pandas ~= 2.3.3\",\n]\n\n[project.scripts]\narv-cluster-activity = \"arvados_cluster_activity.main:main\"\n\n[project.urls]\nHomepage = \"https://arvados.org\"\nDocumentation = \"https://doc.arvados.org\"\nRepository = \"https://github.com/arvados/arvados\"\nIssues = \"https://github.com/arvados/arvados/issues\"\nChangelog = \"https://arvados.org/releases/\"\n\n[tool.setuptools.data-files]\n\"share/doc/arvados-cluster-activity\" = [\n  \"agpl-3.0.txt\",\n  \"cluster-activity.cwl\",\n  \"README.rst\",\n]\n\n[tool.setuptools.packages.find]\nexclude = [\"tests*\"]\n"
  },
  {
    "path": "tools/cluster-activity/pytest.ini",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[pytest]\ntestpaths =\n  tests\n"
  },
  {
    "path": "tools/cluster-activity/setup.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport setuptools\nimport runpy\n\nfrom pathlib import Path\n\narvados_version = runpy.run_path(Path(__file__).with_name('arvados_version.py'))\narv_mod = arvados_version['ARVADOS_PYTHON_MODULES']['arvados-cluster-activity']\nversion = arv_mod.get_version()\nsetuptools.setup(\n    cmdclass=arvados_version['CMDCLASS'],\n    install_requires=[\n        *arv_mod.iter_dependencies(version=version),\n    ],\n    version=version,\n)\n"
  },
  {
    "path": "tools/cluster-activity/tests/test_prometheus.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport base64\nimport dataclasses\nimport errno\nimport os\nimport typing as t\n\nimport pytest\n\nfrom arvados_cluster_activity import main as aca_main\n\n_PROMETHEUS_ENVKEYS = [key for key in os.environ if key.startswith('PROMETHEUS_')]\n\n@dataclasses.dataclass\nclass PrometheusConnect:\n    url: str = 'http://127.0.0.1:9090'\n    headers: dict[str, str] | None = None\n    disable_ssl: bool = False\n    retry: 'urllib3.util.retry.Retry | None' = None\n    auth: tuple | None = None\n    proxy: dict | None = None\n    session: 'requests.sessions.Session | None' = None\n    timeout: int = None\n    method: str = 'GET'\n\n    _NONE_HOST: t.ClassVar[str] = 'http://[100::1234:5678:90ab:cdef]:48084/'\n    _NONE_HOST_ERR: t.ClassVar[int] = errno.ENETUNREACH\n\n    def __post_init__(self):\n        if self.url == self._NONE_HOST:\n            raise OSError(self._NONE_HOST_ERR, os.strerror(self._NONE_HOST_ERR))\n\n    def _check_host(self, expected):\n        assert self.url == expected\n\n    def _check_auth(self, token_or_user, password=None):\n        method, sep, auth = self.headers.get('Authorization', '').partition(' ')\n        assert sep, \"Authorization header has malformed value\"\n        if password is None:\n            assert method == 'Bearer'\n            assert auth == token_or_user\n        else:\n            assert method == 'Basic'\n            actual = base64.b64decode(auth)\n            assert actual == f\"{token_or_user}:{password}\".encode('utf-8')\n\n\n@pytest.fixture(autouse=True)\ndef clean_env(monkeypatch):\n    for key in _PROMETHEUS_ENVKEYS:\n        monkeypatch.delenv(key)\n    monkeypatch.setattr(aca_main, 'PrometheusConnect', PrometheusConnect)\n\n\ndef test_no_host():\n    assert aca_main.get_prometheus_client() is None\n\n\ndef test_no_creds(monkeypatch):\n    monkeypatch.setenv('PROMETHEUS_HOST', PrometheusConnect._NONE_HOST)\n    assert aca_main.get_prometheus_client() is None\n\n\ndef test_no_connection(monkeypatch):\n    monkeypatch.setenv('PROMETHEUS_HOST', PrometheusConnect._NONE_HOST)\n    monkeypatch.setenv('PROMETHEUS_APIKEY', 'NoAPIkey')\n    assert aca_main.get_prometheus_client() is None\n\n\ndef test_apikey(monkeypatch):\n    monkeypatch.setenv('PROMETHEUS_HOST', 'https://token.prom.invalid/')\n    monkeypatch.setenv('PROMETHEUS_APIKEY', 'testAPIKEY')\n    actual = aca_main.get_prometheus_client()\n    assert actual is not None\n    actual._check_host('https://token.prom.invalid/')\n    actual._check_auth('testAPIKEY')\n\n\ndef test_username_password(monkeypatch):\n    monkeypatch.setattr(aca_main, 'PrometheusConnect', PrometheusConnect)\n    monkeypatch.setenv('PROMETHEUS_HOST', 'https://namepass.prom.invalid/')\n    monkeypatch.setenv('PROMETHEUS_USER', 'testname')\n    monkeypatch.setenv('PROMETHEUS_PASSWORD', 'testpass')\n    actual = aca_main.get_prometheus_client()\n    assert actual is not None\n    actual._check_host('https://namepass.prom.invalid/')\n    actual._check_auth('testname', 'testpass')\n"
  },
  {
    "path": "tools/cluster-activity/tests/test_report.csv",
    "content": "Project,Workflow,Step,Sample,User,Submitted,Runtime,Cost\r\nWGS chr19 test for 2.7.2~rc3,WGS processing workflow scattered over samples (v1.1-2-gcf002b3),workflow runner,Sample1,User1,2024-04-05 20:38:07,1:19:21,0.113\r\nWGS chr19 test for 2.7.2~rc3,WGS processing workflow scattered over samples (v1.1-2-gcf002b3),bwamem-samtools-view,Sample1,User1,2024-04-05 20:40:42,0:08:22,0.116\r\n"
  },
  {
    "path": "tools/cluster-activity/tests/test_report.html",
    "content": "<!doctype html>\n<html>\n  <head>\n    <title>Cluster report for xzzz1 from 2024-04-04 to 2024-04-06</title>\n\n\n        <script type=\"text/javascript\" src=\"https://cdnjs.cloudflare.com/ajax/libs/dygraph/2.0.0/dygraph.min.js\"></script>\n        <script type=\"text/javascript\">\n        var chartdata = [{\"label\": \"Concurrent running containers\", \"charts\": [{\"data\": [[new Date(\"2024-04-06T11:00:00Z\"), 3], [new Date(\"2024-04-06T11:05:00Z\"), 5], [new Date(\"2024-04-06T11:10:00Z\"), 2], [new Date(\"2024-04-06T11:15:00Z\"), 5], [new Date(\"2024-04-06T11:20:00Z\"), 3]], \"options\": {\"legend\": \"always\", \"connectSeparatedPoints\": true, \"labels\": [\"date\", \"containers\"], \"includeZero\": true, \"title\": \"Concurrent running containers\"}}]}, {\"label\": \"Data under management\", \"charts\": [{\"data\": [[new Date(\"2024-04-06T11:00:00Z\"), 3], [new Date(\"2024-04-06T11:05:00Z\"), 5], [new Date(\"2024-04-06T11:10:00Z\"), 2], [new Date(\"2024-04-06T11:15:00Z\"), 5], [new Date(\"2024-04-06T11:20:00Z\"), 3]], \"options\": {\"legend\": \"always\", \"connectSeparatedPoints\": true, \"labels\": [\"date\", \"managed\"], \"includeZero\": true, \"title\": \"Data under management\"}}]}, {\"label\": \"Storage usage\", \"charts\": [{\"data\": [[new Date(\"2024-04-06T11:00:00Z\"), 3], [new Date(\"2024-04-06T11:05:00Z\"), 5], [new Date(\"2024-04-06T11:10:00Z\"), 2], [new Date(\"2024-04-06T11:15:00Z\"), 5], [new Date(\"2024-04-06T11:20:00Z\"), 3]], \"options\": {\"legend\": \"always\", \"connectSeparatedPoints\": true, \"labels\": [\"date\", \"used\"], \"includeZero\": true, \"title\": \"Storage usage\"}}]}];\n// Copyright (c) 2009 Dan Vanderkam. All rights reserved.\n//\n// SPDX-License-Identifier: MIT\n\n/**\n * Synchronize zooming and/or selections between a set of dygraphs.\n *\n * Usage:\n *\n *   var g1 = new Dygraph(...),\n *       g2 = new Dygraph(...),\n *       ...;\n *   var sync = Dygraph.synchronize(g1, g2, ...);\n *   // charts are now synchronized\n *   sync.detach();\n *   // charts are no longer synchronized\n *\n * You can set options using the last parameter, for example:\n *\n *   var sync = Dygraph.synchronize(g1, g2, g3, {\n *      selection: true,\n *      zoom: true\n *   });\n *\n * The default is to synchronize both of these.\n *\n * Instead of passing one Dygraph object as each parameter, you may also pass an\n * array of dygraphs:\n *\n *   var sync = Dygraph.synchronize([g1, g2, g3], {\n *      selection: false,\n *      zoom: true\n *   });\n *\n * You may also set `range: false` if you wish to only sync the x-axis.\n * The `range` option has no effect unless `zoom` is true (the default).\n *\n * Original source: https://github.com/danvk/dygraphs/blob/master/src/extras/synchronizer.js\n * at commit b55a71d768d2f8de62877c32b3aec9e9975ac389\n *\n * Copyright (c) 2009 Dan Vanderkam\n *\n * Permission is hereby granted, free of charge, to any person\n * obtaining a copy of this software and associated documentation\n * files (the \"Software\"), to deal in the Software without\n * restriction, including without limitation the rights to use,\n * copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following\n * conditions:\n *\n * The above copyright notice and this permission notice shall be\n * included in all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n */\n(function() {\n/* global Dygraph:false */\n'use strict';\n\nvar Dygraph;\nif (window.Dygraph) {\n  Dygraph = window.Dygraph;\n} else if (typeof(module) !== 'undefined') {\n  Dygraph = require('../dygraph');\n}\n\nvar synchronize = function(/* dygraphs..., opts */) {\n  if (arguments.length === 0) {\n    throw 'Invalid invocation of Dygraph.synchronize(). Need >= 1 argument.';\n  }\n\n  var OPTIONS = ['selection', 'zoom', 'range'];\n  var opts = {\n    selection: true,\n    zoom: true,\n    range: true\n  };\n  var dygraphs = [];\n  var prevCallbacks = [];\n\n  var parseOpts = function(obj) {\n    if (!(obj instanceof Object)) {\n      throw 'Last argument must be either Dygraph or Object.';\n    } else {\n      for (var i = 0; i < OPTIONS.length; i++) {\n        var optName = OPTIONS[i];\n        if (obj.hasOwnProperty(optName)) opts[optName] = obj[optName];\n      }\n    }\n  };\n\n  if (arguments[0] instanceof Dygraph) {\n    // Arguments are Dygraph objects.\n    for (var i = 0; i < arguments.length; i++) {\n      if (arguments[i] instanceof Dygraph) {\n        dygraphs.push(arguments[i]);\n      } else {\n        break;\n      }\n    }\n    if (i < arguments.length - 1) {\n      throw 'Invalid invocation of Dygraph.synchronize(). ' +\n            'All but the last argument must be Dygraph objects.';\n    } else if (i == arguments.length - 1) {\n      parseOpts(arguments[arguments.length - 1]);\n    }\n  } else if (arguments[0].length) {\n    // Invoked w/ list of dygraphs, options\n    for (var i = 0; i < arguments[0].length; i++) {\n      dygraphs.push(arguments[0][i]);\n    }\n    if (arguments.length == 2) {\n      parseOpts(arguments[1]);\n    } else if (arguments.length > 2) {\n      throw 'Invalid invocation of Dygraph.synchronize(). ' +\n            'Expected two arguments: array and optional options argument.';\n    }  // otherwise arguments.length == 1, which is fine.\n  } else {\n    throw 'Invalid invocation of Dygraph.synchronize(). ' +\n          'First parameter must be either Dygraph or list of Dygraphs.';\n  }\n\n  if (dygraphs.length < 2) {\n    throw 'Invalid invocation of Dygraph.synchronize(). ' +\n          'Need two or more dygraphs to synchronize.';\n  }\n\n  var readycount = dygraphs.length;\n  for (var i = 0; i < dygraphs.length; i++) {\n    var g = dygraphs[i];\n    g.ready( function() {\n      if (--readycount == 0) {\n        // store original callbacks\n        var callBackTypes = ['drawCallback', 'highlightCallback', 'unhighlightCallback'];\n        for (var j = 0; j < dygraphs.length; j++) {\n          if (!prevCallbacks[j]) {\n            prevCallbacks[j] = {};\n          }\n          for (var k = callBackTypes.length - 1; k >= 0; k--) {\n            prevCallbacks[j][callBackTypes[k]] = dygraphs[j].getFunctionOption(callBackTypes[k]);\n          }\n        }\n\n        // Listen for draw, highlight, unhighlight callbacks.\n        if (opts.zoom) {\n          attachZoomHandlers(dygraphs, opts, prevCallbacks);\n        }\n\n        if (opts.selection) {\n          attachSelectionHandlers(dygraphs, prevCallbacks);\n        }\n      }\n    });\n  }\n\n  return {\n    detach: function() {\n      for (var i = 0; i < dygraphs.length; i++) {\n        var g = dygraphs[i];\n        if (opts.zoom) {\n          g.updateOptions({drawCallback: prevCallbacks[i].drawCallback});\n        }\n        if (opts.selection) {\n          g.updateOptions({\n            highlightCallback: prevCallbacks[i].highlightCallback,\n            unhighlightCallback: prevCallbacks[i].unhighlightCallback\n          });\n        }\n      }\n      // release references & make subsequent calls throw.\n      dygraphs = null;\n      opts = null;\n      prevCallbacks = null;\n    }\n  };\n};\n\nfunction arraysAreEqual(a, b) {\n  if (!Array.isArray(a) || !Array.isArray(b)) return false;\n  var i = a.length;\n  if (i !== b.length) return false;\n  while (i--) {\n    if (a[i] !== b[i]) return false;\n  }\n  return true;\n}\n\nfunction attachZoomHandlers(gs, syncOpts, prevCallbacks) {\n  var block = false;\n  for (var i = 0; i < gs.length; i++) {\n    var g = gs[i];\n    g.updateOptions({\n      drawCallback: function(me, initial) {\n        if (block || initial) return;\n        block = true;\n        var opts = {\n          dateWindow: me.xAxisRange()\n        };\n        if (syncOpts.range) opts.valueRange = me.yAxisRange();\n\n        for (var j = 0; j < gs.length; j++) {\n          if (gs[j] == me) {\n            if (prevCallbacks[j] && prevCallbacks[j].drawCallback) {\n              prevCallbacks[j].drawCallback.apply(this, arguments);\n            }\n            continue;\n          }\n\n          // Only redraw if there are new options\n          if (arraysAreEqual(opts.dateWindow, gs[j].getOption('dateWindow')) && \n              arraysAreEqual(opts.valueRange, gs[j].getOption('valueRange'))) {\n            continue;\n          }\n\n          gs[j].updateOptions(opts);\n        }\n        block = false;\n      }\n    }, true /* no need to redraw */);\n  }\n}\n\nfunction attachSelectionHandlers(gs, prevCallbacks) {\n  var block = false;\n  for (var i = 0; i < gs.length; i++) {\n    var g = gs[i];\n\n    g.updateOptions({\n      highlightCallback: function(event, x, points, row, seriesName) {\n        if (block) return;\n        block = true;\n        var me = this;\n        for (var i = 0; i < gs.length; i++) {\n          if (me == gs[i]) {\n            if (prevCallbacks[i] && prevCallbacks[i].highlightCallback) {\n              prevCallbacks[i].highlightCallback.apply(this, arguments);\n            }\n            continue;\n          }\n          var idx = gs[i].getRowForX(x);\n          if (idx !== null) {\n            gs[i].setSelection(idx, seriesName);\n          }\n        }\n        block = false;\n      },\n      unhighlightCallback: function(event) {\n        if (block) return;\n        block = true;\n        var me = this;\n        for (var i = 0; i < gs.length; i++) {\n          if (me == gs[i]) {\n            if (prevCallbacks[i] && prevCallbacks[i].unhighlightCallback) {\n              prevCallbacks[i].unhighlightCallback.apply(this, arguments);\n            }\n            continue;\n          }\n          gs[i].clearSelection();\n        }\n        block = false;\n      }\n    }, true /* no need to redraw */);\n  }\n}\n\nDygraph.synchronize = synchronize;\n\n})();\n\n// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nwindow.onload = function() {\n    var charts = {};\n    var fmt = {\n        iso: function(y) {\n            var s='';\n            if (y > 1000000000000000) { y=y/1000000000000000; s='P'; }\n            else if (y > 1000000000000) { y=y/1000000000000; s='T'; }\n            else if (y > 1000000000) { y=y/1000000000; s='G'; }\n            else if (y > 1000000) { y=y/1000000; s='M'; }\n            else if (y > 1000) { y=y/1000; s='K'; }\n            return y.toFixed(2).replace(/\\.0+$/, '')+s;\n        },\n        time: function(s) {\n            var ret = ''\n            if (s >= 86400) ret += Math.floor(s/86400) + 'd'\n            if (s >= 3600) ret += Math.floor(s/3600)%24 + 'h'\n            if (s >= 60) ret += Math.floor(s/60)%60 + 'm'\n            ret += Math.floor(s)%60 + 's'\n            // finally, strip trailing zeroes: 1d0m0s -> 1d\n            return ret.replace(/(\\D)(0\\D)*$/, '$1')\n        },\n        date: function(s, opts, sth, dg, idk, excludeHour) {\n            var date = new Date(s);\n            var options = {month: 'numeric', day: 'numeric'};\n            if (!excludeHour) {\n                options.hour = 'numeric';\n                options.minute = 'numeric';\n                options.hour12 = false;\n            }\n            var r = new Intl.DateTimeFormat(undefined, options).format(date);\n            return r;\n        },\n    }\n    var ticker = {\n        time: function(min, max, pixels, opts, dg) {\n            var max_ticks = Math.floor(pixels / (opts('axisLabelWidth')+opts('pixelsPerLabel')/2))\n            var natural = [1, 5, 10, 30, 60,\n                           120, 300, 600, 1800, 3600,\n                           7200, 14400, 43200, 86400]\n            var interval = natural.shift()*1000\n            while (max>min && (max-min)/interval > max_ticks) {\n                interval = (natural.shift()*1000) || (interval * 2)\n            }\n            var ticks = []\n            var excludeHour = false;\n            var date = new Date(min);\n            // need to take the seconds since midnight and then round off to the nearest interval.\n            var millisecondsSinceMidnight = (date.getHours() * 3600 + date.getMinutes() * 60 + date.getSeconds()) * 1000;\n            if (interval >= 86400000) {\n                excludeHour = true;\n            } else {\n                var roundedOff = Math.ceil(millisecondsSinceMidnight/interval)*interval;\n                min = (min - millisecondsSinceMidnight) + roundedOff;\n            }\n            //for (var i=Math.ceil(min/interval)*interval; i<=max; i+=interval) {\n            for (var i=min; i<=max; i+=interval) {\n                ticks.push({v: i, label: opts('axisLabelFormatter')(i, opts, \"\", false, false, excludeHour)})\n            }\n            return ticks\n        },\n    }\n    chartdata.forEach(function(section, section_idx) {\n        var chartDiv = document.getElementById(\"chart\");\n        section.charts.forEach(function(chart, chart_idx) {\n            // Skip chart if every series has zero data points\n            if (0 == chart.data.reduce(function(len, series) {\n                return len + series.length;\n            }, 0)) {\n                return;\n            }\n            var id = 'chart-'+section_idx+'-'+chart_idx;\n            var div = document.createElement('div');\n            div.setAttribute('id', id);\n            div.setAttribute('style', 'width: 100%; height: 250px');\n            chartDiv.appendChild(div);\n            chart.options.valueFormatter = function(y) {\n            }\n            chart.options.axes = {\n                x: {\n                    axisLabelFormatter: fmt.date,\n                    valueFormatter: fmt.date,\n                    ticker: ticker.time,\n                    axisLabelWidth: 60,\n                    pixelsPerLabel: 20,\n                },\n                y: {\n                    axisLabelFormatter: fmt.iso,\n                    valueFormatter: fmt.iso,\n                },\n            }\n            var div2 = document.createElement('div');\n            div2.setAttribute('style', 'width: 150px; height: 250px');\n            chart.options.labelsDiv = div2;\n            chart.options.labelsSeparateLines = true;\n\n            var div3 = document.createElement('div');\n            div3.setAttribute('style', 'display: flex; padding-bottom: 16px');\n            div3.appendChild(div);\n            div3.appendChild(div2);\n            chartDiv.appendChild(div3);\n\n            charts[id] = new Dygraph(div, chart.data, chart.options);\n        });\n    });\n\n    var sync = Dygraph.synchronize(Object.values(charts), {range: false});\n\n    if (typeof window.debug === 'undefined')\n        window.debug = {};\n    window.debug.charts = charts;\n};\n\n/**\n * Copyright Jonas Earendel. All rights reserved.\n * SPDX-License-Identifier: Unlicense\n *\n * sortable v3.2.3\n *\n * https://www.npmjs.com/package/sortable-tablesort\n * https://github.com/tofsjonas/sortable\n *\n * Makes html tables sortable, No longer ie9+ 😢\n *\n * Styling is done in css.\n *\n * Copyleft 2017 Jonas Earendel\n *\n * This is free and unencumbered software released into the public domain.\n *\n * Anyone is free to copy, modify, publish, use, compile, sell, or\n * distribute this software, either in source code form or as a compiled\n * binary, for any purpose, commercial or non-commercial, and by any\n * means.\n *\n * In jurisdictions that recognize copyright laws, the author or authors\n * of this software dedicate any and all copyright interest in the\n * software to the public domain. We make this dedication for the benefit\n * of the public at large and to the detriment of our heirs and\n * successors. We intend this dedication to be an overt act of\n * relinquishment in perpetuity of all present and future rights to this\n * software under copyright law.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n *\n * For more information, please refer to <http://unlicense.org>\n *\n */\ndocument.addEventListener('click', function (e) {\n    try {\n        // allows for elements inside TH\n        function findElementRecursive(element, tag) {\n            return element.nodeName === tag ? element : findElementRecursive(element.parentNode, tag);\n        }\n        var ascending_table_sort_class = 'asc';\n        var no_sort_class = 'no-sort';\n        var null_last_class = 'n-last';\n        var table_class_name = 'sortable';\n        var alt_sort_1 = e.shiftKey || e.altKey;\n        var element = findElementRecursive(e.target, 'TH');\n        var tr = element.parentNode;\n        var thead = tr.parentNode;\n        var table = thead.parentNode;\n        function getValue(element) {\n            var _a;\n            var value = alt_sort_1 ? element.dataset.sortAlt : (_a = element.dataset.sort) !== null && _a !== void 0 ? _a : element.textContent;\n            return value;\n        }\n        if (thead.nodeName === 'THEAD' && // sortable only triggered in `thead`\n            table.classList.contains(table_class_name) &&\n            !element.classList.contains(no_sort_class) // .no-sort is now core functionality, no longer handled in CSS\n        ) {\n            var column_index_1;\n            var nodes = tr.cells;\n            var tiebreaker_1 = +element.dataset.sortTbr;\n            // Reset thead cells and get column index\n            for (var i = 0; i < nodes.length; i++) {\n                if (nodes[i] === element) {\n                    column_index_1 = +element.dataset.sortCol || i;\n                }\n                else {\n                    nodes[i].setAttribute('aria-sort', 'none');\n                }\n            }\n            var direction = 'descending';\n            if (element.getAttribute('aria-sort') === 'descending' ||\n                (table.classList.contains(ascending_table_sort_class) && element.getAttribute('aria-sort') !== 'ascending')) {\n                direction = 'ascending';\n            }\n            // Update the `th` class accordingly\n            element.setAttribute('aria-sort', direction);\n            var reverse_1 = direction === 'ascending';\n            var sort_null_last_1 = table.classList.contains(null_last_class);\n            var compare_1 = function (a, b, index) {\n                var x = getValue(b.cells[index]);\n                var y = getValue(a.cells[index]);\n                if (sort_null_last_1) {\n                    if (x === '' && y !== '') {\n                        return -1;\n                    }\n                    if (y === '' && x !== '') {\n                        return 1;\n                    }\n                }\n                // Before comparing, clean up formatted numbers that may have a leading dollar sign and/or commas.\n                x = x.replace(\"$\", \"\").replace(\",\", \"\");\n                y = y.replace(\"$\", \"\").replace(\",\", \"\");\n                var temp = +x - +y;\n                var bool = isNaN(temp) ? x.localeCompare(y) : temp;\n                return reverse_1 ? -bool : bool;\n            };\n            // loop through all tbodies and sort them\n            for (var i = 0; i < table.tBodies.length; i++) {\n                var org_tbody = table.tBodies[i];\n                // Put the array rows in an array, so we can sort them...\n                var rows = [].slice.call(org_tbody.rows, 0);\n                // Sort them using Array.prototype.sort()\n                rows.sort(function (a, b) {\n                    var bool = compare_1(a, b, column_index_1);\n                    return bool === 0 && !isNaN(tiebreaker_1) ? compare_1(a, b, tiebreaker_1) : bool;\n                });\n                // Make an empty clone\n                var clone_tbody = org_tbody.cloneNode();\n                // Put the sorted rows inside the clone\n                clone_tbody.append.apply(clone_tbody, rows);\n                // And finally replace the unsorted tbody with the sorted one\n                table.replaceChild(clone_tbody, org_tbody);\n            }\n        }\n        // eslint-disable-next-line no-unused-vars\n    }\n    catch (error) {\n        // console.log(error)\n    }\n});\n\n        </script>\n\n\n    <style>\n        body {\n          background: #fafafa;\n          font-family: \"Roboto\", \"Helvetica\", \"Arial\", sans-serif;\n          font-size: 0.875rem;\n          color: rgba(0, 0, 0, 0.87);\n          font-weight: 400;\n        }\n        .card {\n          background: #ffffff;\n          box-shadow: 0px 1px 5px 0px rgba(0,0,0,0.2),0px 2px 2px 0px rgba(0,0,0,0.14),0px 3px 1px -2px rgba(0,0,0,0.12);\n          border-radius: 4px;\n          margin: 20px;\n        }\n        .content {\n          padding: 2px 16px 8px 16px;\n        }\n        table {\n          border-spacing: 0px;\n        }\n        tr {\n          height: 36px;\n          text-align: left;\n        }\n        th {\n          padding-right: 4em;\n          border-top: 1px solid rgba(224, 224, 224, 1);\n        }\n        td {\n          padding-right: 2em;\n          border-top: 1px solid rgba(224, 224, 224, 1);\n        }\n        #chart {\n          margin-left: -20px;\n        }\n    </style>\n    \n\n<style>\n@charset \"UTF-8\";\n.sortable thead th:not(.no-sort) {\n  cursor: pointer;\n}\n.sortable thead th:not(.no-sort)::after, .sortable thead th:not(.no-sort)::before {\n  transition: color 0.1s ease-in-out;\n  font-size: 1.2em;\n  color: transparent;\n}\n.sortable thead th:not(.no-sort)::after {\n  margin-left: 3px;\n  content: \"▸\";\n}\n.sortable thead th:not(.no-sort):hover::after {\n  color: inherit;\n}\n.sortable thead th:not(.no-sort)[aria-sort=descending]::after {\n  color: inherit;\n  content: \"▾\";\n}\n.sortable thead th:not(.no-sort)[aria-sort=ascending]::after {\n  color: inherit;\n  content: \"▴\";\n}\n.sortable thead th:not(.no-sort).indicator-left::after {\n  content: \"\";\n}\n.sortable thead th:not(.no-sort).indicator-left::before {\n  margin-right: 3px;\n  content: \"▸\";\n}\n.sortable thead th:not(.no-sort).indicator-left:hover::before {\n  color: inherit;\n}\n.sortable thead th:not(.no-sort).indicator-left[aria-sort=descending]::before {\n  color: inherit;\n  content: \"▾\";\n}\n.sortable thead th:not(.no-sort).indicator-left[aria-sort=ascending]::before {\n  color: inherit;\n  content: \"▴\";\n}\n\ntable.aggtable td:nth-child(2) {\n  text-align: right;\n}\n\ntable.active-projects td:nth-child(4),\ntable.active-projects td:nth-child(5) {\n  text-align: right;\n  padding-right: 6em;\n}\n\ntable.single-project td:nth-child(3),\ntable.single-project td:nth-child(4) {\n  text-align: right;\n  padding-right: 6em;\n}\n\ntable.active-projects th:nth-child(4),\ntable.active-projects th:nth-child(5) {\n  text-align: left;\n}\n\ntable.project td:nth-child(3),\ntable.project td:nth-child(4),\ntable.project td:nth-child(5),\ntable.project td:nth-child(6),\ntable.project td:nth-child(7) {\n  text-align: right;\n  padding-right: 6em;\n}\n\ntable.project th:nth-child(3),\ntable.project th:nth-child(4),\ntable.project th:nth-child(5),\ntable.project th:nth-child(6),\ntable.project th:nth-child(7) {\n  text-align: left;\n}\n</style>\n\n<link rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/dygraph/2.0.0/dygraph.min.css\">\n\n\n\n\n  </head>\n\n  <body>\n  <div class=\"card\">\n    <div class=\"content\">\n      <h1>Cluster report for xzzz1 from 2024-04-04 to 2024-04-06</h1>\n    </div>\n  </div>\n\n\n                <div class=\"card\">\n                  <div class=\"content\">\n<h2>Cluster status as of 2024-04-06</h2>\n            <table class='aggtable'><tbody>\n            <tr><th><a href=\"https://xzzz1.arvadosapi.com/users\">Total users</a></th><td>4</td></tr>\n            <tr><th>Total projects</th><td>6</td></tr>\n            \n            <tr><th>Total data under management</th> <td>0.003 KB</td></tr>\n            <tr><th>Total storage usage</th> <td>0.003 KB</td></tr>\n            <tr><th>Deduplication ratio</th> <td>1.0</td></tr>\n            <tr><th>Approximate monthly storage cost</th> <td>$0.00</td></tr>\n                \n            </tbody></table>\n            <p>See <a href=\"#prices\">note on usage and cost calculations</a> for details on how costs are calculated.</p>\n            \n                  </div>\n                </div>\n\n                <div class=\"card\">\n                  <div class=\"content\">\n<h2>Activity and cost over the 2 day period 2024-04-04 to 2024-04-06</h2>\n        <table class='aggtable'><tbody>\n        <tr><th>Active users</th> <td>1</td></tr>\n        <tr><th><a href=\"#Active_Projects\">Active projects</a></th> <td>1</td></tr>\n        <tr><th>Workflow runs</th> <td>1</td></tr>\n        <tr><th>Compute used</th> <td>1.5 hours</td></tr>\n        <tr><th>Compute cost</th> <td>$0.23</td></tr>\n        <tr><th>Storage cost</th> <td>$0.00</td></tr>\n        </tbody></table>\n        <p>See <a href=\"#prices\">note on usage and cost calculations</a> for details on how costs are calculated.</p>\n        \n                  </div>\n                </div>\n\n                <div class=\"card\">\n                  <div class=\"content\">\n\n                <div id=\"chart\"></div>\n            \n                  </div>\n                </div>\n\n                <div class=\"card\">\n                  <div class=\"content\">\n\n            <a id=\"Active_Projects\"><h2>Active Projects</h2></a>\n            <table class='sortable active-projects'>\n            <thead><tr><th>Project</th> <th>Users</th> <th>Active</th> <th>Compute usage (hours)</th> <th>Compute cost</th> </tr></thead>\n            <tbody><tr><td><a href=\"#WGS chr19 test for 2.7.2~rc3\">WGS chr19 test for 2.7.2~rc3</a></td><td>User1</td> <td>2024-04-05 to 2024-08-22</td> <td>1.5</td> <td>$0.23</td></tr></tbody>\n            </table>\n            <p>See <a href=\"#prices\">note on usage and cost calculations</a> for details on how costs are calculated.</p>\n            \n                  </div>\n                </div>\n\n                <div class=\"card\">\n                  <div class=\"content\">\n<a id=\"WGS chr19 test for 2.7.2~rc3\"></a><a href=\"https://xzzz1.arvadosapi.com/projects/pirca-j7d0g-cukk4aw4iamj90c\"><h2>WGS chr19 test for 2.7.2~rc3</h2></a>\n\n                <table class='sortable single-project'>\n                <thead><tr> <th>Users</th> <th>Active</th> <th>Compute usage (hours)</th> <th>Compute cost</th> </tr></thead>\n                <tbody><tr><td>User1</td> <td>2024-04-05 to 2024-08-22</td> <td>1.5</td> <td>$0.23</td></tr></tbody>\n                </table>\n\n                <table class='sortable project'>\n                <thead><tr><th>Workflow run count</th> <th>Workflow name</th> <th>Median runtime</th> <th>Mean runtime</th> <th>Median cost per run</th> <th>Mean cost per run</th> <th>Sum cost over runs</th></tr></thead>\n                <tbody>\n                \n                <tr><td>1</td> <td>WGS processing workflow scattered over samples (v1.1-2-gcf002b3)</td> <td>1:19:21</td> <td>1:19:21</td> <td>$1.37</td> <td>$1.37</td> <td>$1.37</td></tr>\n                \n                </tbody></table>\n                \n                  </div>\n                </div>\n\n                <div class=\"card\">\n                  <div class=\"content\">\n\n        <h2 id=\"prices\">Note on usage and cost calculations</h2>\n\n        <div style=\"max-width: 60em\">\n\n        <p>The numbers presented in this report are estimates and will\n        not perfectly match your cloud bill.  Nevertheless this report\n        should be useful for identifying your main cost drivers.</p>\n\n        <h3>Storage</h3>\n\n        <p>\"Total data under management\" is what you get if you add up\n        all blocks referenced by all collections in Workbench, without\n        considering deduplication.</p>\n\n        <p>\"Total storage usage\" is the actual underlying storage\n        usage, accounting for data deduplication.</p>\n\n        <p>Storage costs are based on AWS \"S3 Standard\"\n        described on the <a href=\"https://aws.amazon.com/s3/pricing/\">Amazon S3 pricing</a> page:</p>\n\n        <ul>\n        <li>$0.023 per GB / Month for the first 50 TB</li>\n        <li>$0.022 per GB / Month for the next 450 TB</li>\n        <li>$0.021 per GB / Month over 500 TB</li>\n        </ul>\n\n        <p>Finally, this only the base storage cost, and does not\n        include any fees associated with S3 API usage.  However, there\n        are generally no ingress/egress fees if your Arvados instance\n        and S3 bucket are in the same region, which is the normal\n        recommended configuration.</p>\n\n        <h3>Compute</h3>\n\n        <p>\"Compute usage\" are instance-hours used in running\n        workflows.  Because multiple steps may run in parallel on\n        multiple instances, a workflow that completes in four hours\n        but runs parallel steps on five instances, would be reported\n        as using 20 instance hours.</p>\n\n        <p>\"Runtime\" is the actual wall clock time that it took to\n        complete a workflow.  This does not include time spent in the\n        queue for the workflow itself, but does include queuing time\n        of individual workflow steps.</p>\n\n        <p>Computational costs are derived from Arvados cost\n        calculations of container runs.  For on-demand instances, this\n        uses the prices from the InstanceTypes section of the Arvado\n        config file, set by the system administrator.  For spot\n        instances, this uses current spot prices retrieved on the fly\n        the AWS API.</p>\n\n        <p>Be aware that the cost calculations are only for the time\n        the container is running and only do not take into account the\n        overhead of launching instances or idle time between scheduled\n        tasks or prior to automatic shutdown.</p>\n\n        </div>\n        \n                  </div>\n                </div>\n\n  </body>\n</html>\n        "
  },
  {
    "path": "tools/cluster-activity/tests/test_report.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport datetime\n\nfrom unittest import mock\n\nfrom io import StringIO\n\nfrom arvados_cluster_activity.report import ClusterActivityReport\n\nclass _TestingClusterActivityReport(ClusterActivityReport):\n    def report_from_api(self, since, to, include_steps, exclude):\n        items = [\n            {\n                \"Project\": \"WGS chr19 test for 2.7.2~rc3\",\n                \"ProjectUUID\": \"pirca-j7d0g-cukk4aw4iamj90c\",\n                \"Workflow\": \"WGS processing workflow scattered over samples (v1.1-2-gcf002b3)\",\n                \"WorkflowUUID\": \"none\",\n                \"Step\": \"workflow runner\",\n                \"StepUUID\": \"pirca-xvhdp-zyv7bm0tl3lm2nv\",\n                \"Sample\": \"Sample1\",\n                \"SampleUUID\": \"pirca-xvhdp-zyv7bm0tl3lm2nv\",\n                \"User\": \"User1\",\n                \"UserUUID\": \"jutro-tpzed-a4qnxq3pcfcgtkz\",\n                \"Submitted\": \"2024-04-05 20:38:07\",\n                \"Started\": \"2024-04-05 20:40:31\",\n                \"Finished\": \"2024-08-22 12:34:56\",\n                \"Runtime\": \"1:19:21\",\n                \"Cost\": 0.113,\n                \"CumulativeCost\": 1.371,\n            },\n\n            # WGS chr19 test for 2.7.2~rc3,pirca-j7d0g-cukk4aw4iamj90c,workflow run from command line,none,bwamem-samtools-view,pirca-xvhdp-e63h0f57of5cr3t,WGS processing workflow scattered over samples (v1.1-2-gcf002b3),WGS processing workflow scattered over samples (v1.1-2-gcf002b3),Peter Amstutz,jutro-tpzed-a4qnxq3pcfcgtkz,2024-04-05 20:40:42,2024-04-05 20:43:20,0:08:52,0.121,0.121\n            {\n                \"Project\": \"WGS chr19 test for 2.7.2~rc3\",\n                \"ProjectUUID\": \"pirca-j7d0g-cukk4aw4iamj90c\",\n                \"Workflow\": \"WGS processing workflow scattered over samples (v1.1-2-gcf002b3)\",\n                \"WorkflowUUID\": \"none\",\n                \"Step\": \"bwamem-samtools-view\",\n                \"StepUUID\": \"pirca-xvhdp-e63h0f57of5cr3t\",\n                \"Sample\": \"Sample1\",\n                \"SampleUUID\": \"pirca-xvhdp-zyv7bm0tl3lm2nv\",\n                \"User\": \"User1\",\n                \"UserUUID\": \"jutro-tpzed-a4qnxq3pcfcgtkz\",\n                \"Submitted\": \"2024-04-05 20:40:42\",\n                \"Started\": \"2024-04-05 20:43:20\",\n                \"Finished\": \"2024-04-05 20:51:40\",\n                \"Runtime\": \"0:08:22\",\n                \"Cost\": 0.116,\n                \"CumulativeCost\": 0.116,\n            },\n        ]\n\n        for i in items:\n            self.collect_summary_stats(i)\n            yield i\n\n        self.total_users = 4\n        self.total_projects = 6\n\n    def collect_graph(self, since, to, metric, resample_to, extra=None):\n        items = [[datetime.datetime(year=2024, month=4, day=6, hour=11, minute=0, second=0), 3],\n                 [datetime.datetime(year=2024, month=4, day=6, hour=11, minute=5, second=0), 5],\n                 [datetime.datetime(year=2024, month=4, day=6, hour=11, minute=10, second=0), 2],\n                 [datetime.datetime(year=2024, month=4, day=6, hour=11, minute=15, second=0), 5],\n                 [datetime.datetime(year=2024, month=4, day=6, hour=11, minute=20, second=0), 3]]\n\n        for i in items:\n            if extra:\n                extra(i[0], i[1])\n\n        return items\n\n    def today(self):\n        return datetime.date(year=2024, month=4, day=6)\n\n@mock.patch(\"arvados.api\")\ndef test_report(apistub):\n\n    write_report = False\n\n    apistub().config.return_value = {\n        \"ClusterID\": \"xzzz1\",\n        \"Services\": {\n            \"Workbench2\": {\n                \"ExternalURL\": \"https://xzzz1.arvadosapi.com\"\n            },\n        },\n    }\n\n    prom_client = mock.MagicMock()\n    report_obj = _TestingClusterActivityReport(prom_client)\n\n    ## test CSV report\n    csvreport = StringIO()\n    report_obj.csv_report(datetime.datetime(year=2024, month=4, day=4),\n                          datetime.datetime(year=2024, month=4, day=6),\n                          csvreport,\n                          True,\n                          None,\n                          \"\")\n    if write_report:\n        with open(\"tests/test_report.csv\", \"wt\") as f:\n            f.write(csvreport.getvalue())\n\n    with open(\"tests/test_report.csv\", \"rt\", newline='') as f:\n        assert csvreport.getvalue() == f.read()\n\n\n    ## test HTML report\n    htmlreport = report_obj.html_report(datetime.datetime(year=2024, month=4, day=4),\n                                        datetime.datetime(year=2024, month=4, day=6),\n                                        \"\",\n                                        True)\n\n    if write_report:\n        with open(\"tests/test_report.html\", \"wt\") as f:\n            f.write(htmlreport)\n\n    with open(\"tests/test_report.html\", \"rt\") as f:\n        assert f.read() == htmlreport\n"
  },
  {
    "path": "tools/compute-images/.gitignore",
    "content": "*pem\nsecrets/*\nkeypairs/*\n/*_config.json\n/*_config.yml\n"
  },
  {
    "path": "tools/compute-images/README.md",
    "content": "# Arvados Compute Node Image Builder\n\nThis directory includes templates to build custom cloud images for Arvados compute nodes. For instructions, refer to the [Arvados cloud compute node image documentation](https://doc.arvados.org/install/crunch2-cloud/install-compute-node.html).\n\n## Development\n\nIf you are developing the Ansible playbook, note that you can test it by [running the Ansible playbook independently](https:///doc.arvados.org/install/crunch2-cloud/install-compute-node.html#ansible-build) of Packer.\n\n### Managed Node Requirements\n\nFor testing, you'll need a Debian or Ubuntu system where you don't mind messing with the system configuration. It can be a virtual machine. You must set up the following before you run Ansible (this is stuff that's typically preconfigured in the cloud):\n\n* Install `openssh-server`, `python3`, and `sudo`\n* Set up a user account for yourself that is allowed to SSH in and use `sudo`\n\n### Configuration Requirements\n\nYou must have an Arvados cluster configuration. You can start by copying the defaults from the Arvados source in `arvados/lib/config/config.default.yml`. After you make your copy, you should change the example identifier `xxxxx` under `Clusters` to a unique five-alphanumeric identifier for your test cluster. It SHOULD start with `z` so it's easily identifiable as a test cluster. You may also change other settings that you specifically want to test such as `Containers.RuntimeEngine`.\n\nOnce you have this, you can start [following the Ansible build instructions](https:///doc.arvados.org/install/crunch2-cloud/install-compute-node.html#ansible-build). When you write `host_config.yml`, set `arvados_config_file` to the ABSOLUTE path of the cluster configuration file you wrote, and `arvados_cluster_id` to the cluster identifier you wrote in there under `Clusters`.\n\nYou must define at least one public SSH key for the Crunch user. The easiest way to do this is to set `compute_authorized_keys` in your `host_config.yml` and point it to one of your SSH public keys or `authorized_keys` file. If you set `Containers.DispatchPrivateKey` in your Arvados cluster configuration file, that's sufficient too.\n"
  },
  {
    "path": "tools/compute-images/aws_config.example.json",
    "content": "{\n  \"ansible_command\": \"ansible-playbook\",\n  \"ansible_vars_file\": \"host_config.yml\",\n  \"arvados_cluster\": \"zzzzz\",\n  \"aws_profile\": \"\",\n  \"aws_region\": \"\",\n  \"aws_access_key\": \"\",\n  \"aws_secret_key\": \"\",\n  \"aws_source_ami\": \"ami-0707d92f84e248ff3\",\n  \"aws_volume_gb\": \"20\",\n  \"aws_associate_public_ip_address\": \"true\",\n  \"aws_ena_support\": \"true\",\n  \"ssh_user\": \"admin\",\n  \"subnet_id\": \"\",\n  \"vpc_id\": \"\"\n}\n"
  },
  {
    "path": "tools/compute-images/aws_template.json",
    "content": "{\n  \"variables\": {\n    \"ansible_command\": \"ansible-playbook\",\n    \"ansible_vars_file\": \"\",\n    \"arvados_cluster\": \"\",\n    \"aws_access_key\": \"\",\n    \"aws_profile\": \"\",\n    \"aws_region\": \"\",\n    \"aws_secret_key\": \"\",\n    \"aws_source_ami\": \"ami-0a9d5908c7201e91d\",\n    \"aws_volume_gb\": \"20\",\n    \"aws_associate_public_ip_address\": \"true\",\n    \"aws_ena_support\": \"true\",\n    \"ssh_user\": \"admin\",\n    \"subnet_id\": \"\",\n    \"vpc_id\": \"\"\n  },\n  \"builders\": [{\n    \"type\": \"amazon-ebs\",\n    \"profile\": \"{{ user `aws_profile`}}\",\n    \"access_key\": \"{{user `aws_access_key`}}\",\n    \"secret_key\": \"{{user `aws_secret_key`}}\",\n    \"region\": \"{{user `aws_region`}}\",\n    \"ena_support\": \"{{user `aws_ena_support`}}\",\n    \"source_ami\": \"{{user `aws_source_ami`}}\",\n    \"instance_type\": \"m5.large\",\n    \"vpc_id\": \"{{user `vpc_id`}}\",\n    \"subnet_id\": \"{{user `subnet_id`}}\",\n    \"associate_public_ip_address\": \"{{user `aws_associate_public_ip_address`}}\",\n    \"ssh_username\": \"{{user `ssh_user`}}\",\n    \"temporary_key_pair_type\": \"ed25519\",\n    \"ami_name\": \"arvados-{{user `arvados_cluster`}}-compute-{{isotime \\\"20060102150405\\\"}}\",\n    \"launch_block_device_mappings\": [{\n      \"device_name\": \"/dev/xvda\",\n      \"volume_size\": \"{{user `aws_volume_gb`}}\",\n      \"volume_type\": \"gp3\",\n      \"delete_on_termination\": true\n    }],\n    \"ami_block_device_mappings\": [\n      {\n        \"device_name\": \"/dev/xvdb\",\n        \"encrypted\": true,\n        \"virtual_name\": \"ephemeral0\"\n      },\n      {\n        \"device_name\": \"/dev/xvdc\",\n        \"encrypted\": true,\n        \"virtual_name\": \"ephemeral1\"\n      }\n    ],\n    \"tags\": {\n      \"Name\": \"arvados-{{user `arvados_cluster`}}-compute\",\n      \"creation_date\": \"{{isotime \\\"20060102150405\\\"}}\",\n      \"packer\": \"true\"\n    },\n    \"run_tags\": {\n      \"Name\": \"packer-arvados-{{user `arvados_cluster`}}-compute-builder\",\n      \"creation_date\": \"{{isotime \\\"20060102150405\\\"}}\",\n      \"environment\": \"development\"\n    },\n    \"run_volume_tags\": {\n      \"Name\": \"packer-arvados-{{user `arvados_cluster`}}-compute-builder\",\n      \"creation_date\": \"{{isotime \\\"20060102150405\\\"}}\",\n      \"environment\": \"development\"\n    }\n  }],\n  \"provisioners\": [{\n      \"type\": \"shell\",\n      \"inline\": [ \"if cloud-init --version; then sudo cloud-init status --wait; else : ; fi\" ]\n  }, {\n      \"type\": \"ansible\",\n      \"command\": \"{{user `ansible_command`}}\",\n      \"playbook_file\": \"../ansible/build-compute-image.yml\",\n      \"user\": \"{{user `ssh_user`}}\",\n      \"extra_arguments\": [\n          \"--extra-vars\", \"arvados_cluster_id={{ user `arvados_cluster` }}\",\n          \"--extra-vars\", \"@{{ user `ansible_vars_file` }}\"\n      ]\n  }]\n}\n"
  },
  {
    "path": "tools/compute-images/azure_config.example.json",
    "content": "{\n  \"ansible_command\": \"ansible-playbook\",\n  \"ansible_vars_file\": \"host_config.yml\",\n  \"account_file\": \"\",\n  \"arvados_cluster\": \"zzzzz\",\n  \"build_environment\": \"azure-arm\",\n  \"client_id\": \"{{env `ARM_CLIENT_ID`}}\",\n  \"client_secret\": \"{{env `ARM_CLIENT_SECRET`}}\",\n  \"cloud_environment_name\": \"Public\",\n  \"image_sku\": \"\",\n  \"location\": \"centralus\",\n  \"project_id\": \"\",\n  \"resource_group\": null,\n  \"ssh_private_key_file\": \"{{env `PACKERPRIVKEY`}}\",\n  \"ssh_user\": \"packer\",\n  \"subscription_id\": \"{{env `ARM_SUBSCRIPTION_ID`}}\",\n  \"tenant_id\": \"{{env `ARM_TENANT_ID`}}\"\n}\n"
  },
  {
    "path": "tools/compute-images/azure_template.json",
    "content": "{\n  \"variables\": {\n    \"ansible_command\": \"ansible-playbook\",\n    \"ansible_vars_file\": \"\",\n    \"account_file\": \"\",\n    \"arvados_cluster\": \"\",\n    \"build_environment\": \"azure-arm\",\n    \"client_id\": \"{{env `ARM_CLIENT_ID`}}\",\n    \"client_secret\": \"{{env `ARM_CLIENT_SECRET`}}\",\n    \"cloud_environment_name\": \"Public\",\n    \"image_sku\": \"\",\n    \"location\": \"centralus\",\n    \"project_id\": \"\",\n    \"resource_group\": null,\n    \"ssh_private_key_file\": \"{{env `PACKERPRIVKEY`}}\",\n    \"ssh_user\": \"packer\",\n    \"subscription_id\": \"{{env `ARM_SUBSCRIPTION_ID`}}\",\n    \"tenant_id\": \"{{env `ARM_TENANT_ID`}}\"\n  },\n  \"builders\": [\n    {\n      \"type\": \"azure-arm\",\n      \"cloud_environment_name\": \"{{user `cloud_environment_name`}}\",\n\n      \"client_id\": \"{{user `client_id`}}\",\n      \"client_secret\": \"{{user `client_secret`}}\",\n      \"subscription_id\": \"{{user `subscription_id`}}\",\n      \"tenant_id\": \"{{user `tenant_id`}}\",\n\n      \"managed_image_resource_group_name\": \"{{user `resource_group`}}\",\n      \"managed_image_name\": \"{{user `arvados_cluster`}}-compute-v{{ timestamp }}\",\n\n      \"ssh_username\": \"{{user `ssh_user`}}\",\n      \"ssh_private_key_file\": \"{{user `ssh_private_key_file`}}\",\n\n      \"image_publisher\": \"Canonical\",\n      \"image_offer\": \"UbuntuServer\",\n      \"image_sku\": \"{{user `image_sku`}}\",\n\n      \"os_type\": \"Linux\",\n\n      \"location\": \"{{user `location`}}\",\n      \"vm_size\": \"Standard_D1_v2\"\n    }\n  ],\n  \"provisioners\": [{\n      \"type\": \"ansible\",\n      \"command\": \"{{user `ansible_command`}}\",\n      \"playbook_file\": \"../ansible/build-compute-image.yml\",\n      \"user\": \"{{user `ssh_user`}}\",\n      \"extra_arguments\": [\n          \"--extra-vars\", \"arvados_cluster_id={{ user `arvados_cluster` }}\",\n          \"--extra-vars\", \"@{{ user `ansible_vars_file` }}\"\n      ]\n  }]\n}\n"
  },
  {
    "path": "tools/compute-images/host_config.example.yml",
    "content": "### This file documents common cloud-agnostic configuration options to build\n### an Arvados compute node image. To use this file, copy it to\n### `host_config.yml`, then edit it as desired following the comments below.\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# `arvados_config_file` is the ABSOLUTE path of an Arvados cluster\n# configuration file. Ansible reads various settings from this file to\n# make sure system configuration is consistent with cluster configuration.\n# This file MUST be readable by the user running Ansible/Packer, along with\n# any files it references (e.g., `Containers.DispatchPrivateKey`).\narvados_config_file: /etc/arvados/config.yml\n\n# `arvados_cluster_id` is a five-character cluster identifier defined under\n# `Clusters` in `ansible_config_file`. Ansible will use configuration\n# settings from this specific cluster.\n# If you are running Packer, you do not need to set this; the `arvados_cluster`\n# you set there will be passed through to Ansible.\n# Otherwise, you MUST set this to the identifier of the cluster you are\n# setting up a compute node for.\n#arvados_cluster_id: xxxxx\n\n# `compute_authorized_keys` is the ABSOLUTE path to a file with additional\n# public SSH keys to authorize for the Crunch user, in `authorized_keys`\n# format. If your copy of `arvados_config_file` does not have\n# `Containers.DispatchPrivateKey` set for security reasons, you can name a\n# file with the dispatcher's public key here.\n#compute_authorized_keys: /dev/null\n\n# `arvados_compute_nvidia` is a flag that determines whether or not\n# NVIDIA CUDA and associated drivers will be installed in the compute\n# node image. Set this to true if you want your Arvados cluster to\n# support containers with CUDA requirements.\n#arvados_compute_nvidia: true\n\n# `dns_resolver` can be set to the IP address of a DNS server. If you\n# set this, compute nodes will be configured to use this DNS server\n# before those set by DHCP.\n#dns_resolver: \"192.0.2.2\"\n\n# `arvados_compute_encrypted_tmp` can name a method that will be used\n# to provide an encrypted working directory to running containers.\n# By default, the image will use dynamically scaling EBS storage if it\n# detects that it is being built on AWS EC2, and local storage otherwise.\n# If you are building an image for AWS EC2 and do not want to use dynamic\n# EBS storage, set this to the empty string.\n# If you are building an image for AWS EC2 and the detection fails for\n# any reason, you can set this to 'aws_ebs' to force the use of dynamic\n# EBS storage.\n# EBS autoscaling requires additional configuration in AWS IAM and your\n# cluster's `config.yml`. See\n# <http://doc.arvados.org/install/crunch2-cloud/install-dispatch-cloud.html#aws-ebs-autoscaler>\n#arvados_compute_encrypted_tmp: \"\"\n\n# `workdir` names a directory where Ansible should write temporary files as\n# it sets up the compute node image. The default respects the $TMPDIR\n# environment variable on the compute image, or uses `/tmp` if that's not\n# set. You can specify another directory if `/tmp` is low on space or\n# restricted as a security measure.\n#workdir: \"{{ ansible_env.HOME }}\"\n\n### The settings below should not be changed for production deployments.\n### These settings help support Arvados testing.\n# `arvados_pin_version` identifies the version of Arvados packages that\n# should be installed on the compute node. The default matches the release\n# of this build tool. You can set this to the empty string to install\n# whatever the latest version is at the time you build the image.\n#arvados_pin_version: \"\"\n\n# `arvados_apt_suites` identifies which set of packages to fetch and install\n# from apt.arvados.org. The default is to match the release codename of your\n# distribution, which will get production releases built for that\n# distribution. If this value starts with `-`, the deployment will\n# automatically prepend that release codename. You can set this to\n# '-testing' or '-dev' to install release candidate or development packages\n# for your base distribution, respectively.\n#arvados_apt_suites: \"-testing\"\n\n# `arvados_compute_pin_packages` is a flag that determines whether or not\n# to pin third-party dependencies like Docker and NVIDIA CUDA to versions\n# that have been tested to work with this release of Arvados. You can set\n# this to false to build a compute image with the latest versions of those\n# dependencies.\n#arvados_compute_pin_packages: true\n\n# `arvados_compute_amd_rocm` is a flag that determines whether or not\n# AMD ROCm and associated drivers will be installed in the compute\n# node image. This support is still in development and untested.\n# Note this installs >30GB of packages and requires additional space\n# to compile the drivers. Make sure you allocate enough disk space for\n# this in your Packer configuration.\n#arvados_compute_amd_rocm: true\n"
  },
  {
    "path": "tools/copy-tutorial/copy-tutorial.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nset -e -o pipefail\n\nif test -z \"$1\" ; then\n  echo \"$0: Copies Arvados tutorial resources from public data cluster (jutro)\"\n  echo \"Usage: copy-tutorial.sh <tutorial>\"\n  echo \"<tutorial> is which tutorial to copy, one of:\"\n  echo \" bwa-mem        Tutorial from https://doc.arvados.org/user/tutorials/tutorial-workflow-workbench.html\"\n  echo \" whole-genome   Whole genome variant calling tutorial workflow (large)\"\n  exit\nfi\n\nif test -z \"ARVADOS_API_HOST\" ; then\n    echo \"Please set ARVADOS_API_HOST to the destination cluster\"\n    exit\nfi\n\nsrc=jutro\ntutorial=$1\n\nif ! test -f $HOME/.config/arvados/jutro.conf ; then\n    # Set it up with the anonymous user token.\n    echo \"ARVADOS_API_HOST=jutro.arvadosapi.com\" > $HOME/.config/arvados/jutro.conf\n    echo \"ARVADOS_API_TOKEN=v2/jutro-gj3su-e2o9x84aeg7q005/22idg1m3zna4qe4id3n0b9aw86t72jdw8qu1zj45aboh1mm4ej\" >> $HOME/.config/arvados/jutro.conf\n    exit 1\nfi\n\necho\necho \"Copying from public data cluster (jutro) to $ARVADOS_API_HOST\"\necho\n\nmake_project() {\n    name=\"$1\"\n    owner=\"$2\"\n    if test -z \"$owner\" ; then\n\towner=$(arv --format=uuid user current)\n    fi\n    project_uuid=$(arv --format=uuid group list --filters '[[\"name\", \"=\", \"'\"$name\"'\"], [\"owner_uuid\", \"=\", \"'$owner'\"]]')\n    if test -z \"$project_uuid\" ; then\n\tproject_uuid=$(arv --format=uuid group create --group '{\"name\":\"'\"$name\"'\", \"group_class\": \"project\", \"owner_uuid\": \"'$owner'\"}')\n\n    fi\n    echo $project_uuid\n}\n\ncopy_jobs_image() {\n    if ! arv-keepdocker | grep \"arvados/jobs *latest\" ; then\n\tarv-copy --project-uuid=$parent_project jutro-4zz18-sxmit0qs6i9n2s4\n    fi\n}\n\nparent_project=$(make_project \"Tutorial projects\")\ncopy_jobs_image\n\nif test \"$tutorial\" = \"bwa-mem\" ; then\n    echo\n    echo \"Copying bwa mem tutorial\"\n    echo\n\n    arv-copy --project-uuid=$parent_project jutro-j7d0g-rehmt1w5v2p2drp\n\n    echo\n    echo \"Finished, data copied to \\\"User guide resources\\\" at $parent_project\"\n    echo \"You can now go to Workbench and choose 'Run a process' and then select 'bwa-mem.cwl'\"\n    echo\nfi\n\nif test \"$tutorial\" = \"whole-genome\" ; then\n    echo\n    echo \"Copying whole genome variant calling tutorial\"\n    echo\n\n    arv-copy --project-uuid=$parent_project jutro-j7d0g-n2g87m02rsl4cx2\n\n    echo\n    echo \"Finished, data copied to \\\"WGS Processing Tutorial\\\" at $parent_project\"\n    echo \"You can now go to Workbench and choose 'Run a process' and then select 'WGS Processing Tutorial'\"\n    echo\nfi\n"
  },
  {
    "path": "tools/crunchstat-summary/MANIFEST.in",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ninclude agpl-3.0.txt\ninclude crunchstat_summary/dygraphs.js\ninclude crunchstat_summary/synchronizer.js\ninclude arvados_version.py"
  },
  {
    "path": "tools/crunchstat-summary/README.rst",
    "content": ".. Copyright (C) The Arvados Authors. All rights reserved.\n..\n.. SPDX-License-Identifier: AGPL-3.0\n\n==================\ncrunchstat-summary\n==================\n\nOverview\n--------\n\nThis package provides the ``crunchstat-summary`` tool to analyze the compute performance of processes and workflows run under Arvados_.\n\n.. _Arvados: https://arvados.org/\n\nInstallation\n------------\n\nInstalling under your user account\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis method lets you install the package without root access.  However,\nother users on the same system will need to reconfigure their shell in order\nto be able to use it. Run the following to install the package in an\nenvironment at ``~/arvclients``::\n\n  python3 -m venv ~/arvclients\n  ~/arvclients/bin/pip install crunchstat-summary\n\nCommand line tools will be installed under ``~/arvclients/bin``. You can\ntest one by running::\n\n  ~/arvclients/bin/crunchstat-summary --version\n\nYou can run these tools by specifying the full path every time, or you can\nadd the directory to your shell's search path by running::\n\n  export PATH=\"$PATH:$HOME/arvclients/bin\"\n\nYou can make this search path change permanent by adding this command to\nyour shell's configuration, for example ``~/.bashrc`` if you're using bash.\nYou can test the change by running::\n\n  crunchstat-summary --version\n\nInstalling on Debian and Ubuntu systems\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nArvados publishes packages for Debian 12 \"bookworm,\" Ubuntu 22.04 \"jammy,\" and Ubuntu 24.04 \"noble.\" You can install the Python SDK package on any of these distributions by running the following commands::\n\n  sudo install -d /etc/apt/keyrings\n  sudo curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg\n  sudo tee /etc/apt/sources.list.d/arvados.sources >/dev/null <<EOF\n  Types: deb\n  URIs: https://apt.arvados.org/$(lsb_release -cs)\n  Suites: $(lsb_release -cs)\n  Components: main\n  Signed-by: /etc/apt/keyrings/arvados.asc\n  EOF\n  sudo apt update\n  sudo apt install python3-crunchstat-summary\n\nInstalling on Red Hat, AlmaLinux, and Rocky Linux\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nArvados publishes packages for RHEL 8 and 9, as well as distributions based on those. Note that these packages depend on, and will automatically enable, the Python 3.11 module. You can install the Python SDK package on any of these distributions by running the following commands::\n\n  sudo tee /etc/yum.repos.d/arvados.repo >/dev/null <<'EOF'\n  [arvados]\n  name=Arvados\n  baseurl=https://rpm.arvados.org/RHEL/$releasever/os/$basearch/\n  gpgcheck=1\n  gpgkey=https://rpm.arvados.org/RHEL/$releasever/RPM-GPG-KEY-arvados\n  EOF\n  sudo dnf install python3-crunchstat-summary\n\nConfiguration\n-------------\n\nThis client software needs two pieces of information to connect to\nArvados: the DNS name of the API server, and an API authorization\ntoken. `The Arvados user\ndocumentation\n<http://doc.arvados.org/user/reference/api-tokens.html>`_ describes\nhow to find this information in the Arvados Workbench, and install it\non your system.\n\nTesting and Development\n-----------------------\n\nThis package is one part of the Arvados source package, and it has\nintegration tests to check interoperability with other Arvados\ncomponents.  Our `hacking guide\n<https://dev.arvados.org/projects/arvados/wiki/Hacking_Python_SDK>`_\ndescribes how to set up a development environment and run tests.\n"
  },
  {
    "path": "tools/crunchstat-summary/agpl-3.0.txt",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "tools/crunchstat-summary/arvados_version.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport dataclasses\nimport os\nimport re\nimport runpy\nimport subprocess\nimport typing as t\n\nfrom pathlib import Path, PurePath, PurePosixPath\n\nimport setuptools\nimport setuptools.command.build\n\nSETUP_DIR = Path(__file__).absolute().parent\nVERSION_SCRIPT_PATH = PurePath('build', 'version-at-commit.sh')\n# Built by ArvadosPythonPackage.register\nARVADOS_PYTHON_MODULES: dict[str, 'ArvadosPythonPackage'] = {}\n\n### Metadata generation\n\n@dataclasses.dataclass\nclass ArvadosPythonPackage:\n    package_name: str\n    module_name: str\n    src_path: PurePath\n    dependencies: t.Sequence['ArvadosPythonPackage']\n\n    _VERSION_SUBS = {\n        'development-': '',\n        '~dev': '.dev',\n        '~rc': 'rc',\n    }\n\n    @classmethod\n    def register(\n            cls,\n            package_name: str,\n            module_name: str,\n            src_path: PurePath | str,\n            *dependencies: str,\n    ) -> 'ArvadosPythonPackage':\n        if not isinstance(src_path, PurePath):\n            src_path = PurePosixPath(src_path)\n        deps = [ARVADOS_PYTHON_MODULES[key] for key in dependencies]\n        this_pkg = cls(package_name, module_name, src_path, deps)\n        ARVADOS_PYTHON_MODULES[package_name] = this_pkg\n        return this_pkg\n\n    def version_file_path(self):\n        return PurePath(self.module_name, '_version.py')\n\n    def _workspace_path(self, workdir):\n        try:\n            workspace = Path(os.environ['WORKSPACE'])\n            # This will raise ValueError if they're not related,\n            # in which case we don't want to use this $WORKSPACE.\n            workdir.relative_to(workspace)\n        except KeyError:\n            # $WORKSPACE isn't set. Fall back to the Git worktree toplevel.\n            try:\n                git_proc = subprocess.run(\n                    ['git', 'rev-parse', '--show-toplevel'],\n                    capture_output=True,\n                    check=True,\n                    cwd=workdir,\n                    text=True,\n                )\n                workspace = Path(git_proc.stdout.removesuffix('\\n'))\n            except (subprocess.CalledProcessError, FileNotFoundError, ValueError):\n                return None\n        except ValueError:\n            return None\n        if (workspace / VERSION_SCRIPT_PATH).exists():\n            return workspace\n        else:\n            return None\n\n    def _git_version(self, workdir):\n        workspace = self._workspace_path(workdir)\n        if workspace is None:\n            return None\n        git_log_cmd = [\n            'git', 'log', '-n1', '--format=%H', '--',\n            str(VERSION_SCRIPT_PATH), str(self.src_path),\n        ]\n        git_log_cmd.extend(str(dep.src_path) for dep in self.dependencies)\n        git_log_proc = subprocess.run(\n            git_log_cmd,\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        version_proc = subprocess.run(\n            [str(VERSION_SCRIPT_PATH), git_log_proc.stdout.rstrip('\\n')],\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        return version_proc.stdout.rstrip('\\n')\n\n    def _sdist_version(self, workdir):\n        try:\n            pkg_info = (workdir / 'PKG-INFO').open()\n        except FileNotFoundError:\n            return None\n        with pkg_info:\n            for line in pkg_info:\n                key, _, val = line.partition(': ')\n                if key == 'Version':\n                    return val.rstrip('\\n')\n        raise Exception(\"found PKG-INFO file but not Version metadata in it\")\n\n    def get_version(self, workdir=SETUP_DIR):\n        version = (\n            # If we're building out of a distribution, we should pass that\n            # version through unchanged.\n            self._sdist_version(workdir)\n            # Otherwise follow the usual Arvados versioning rules.\n            or os.environ.get('ARVADOS_BUILDING_VERSION')\n            or self._git_version(workdir)\n        )\n        if not version:\n            raise Exception(f\"no version information available for {self.package_name}\")\n        else:\n            return re.sub(\n                r'(^development-|~dev|~rc)',\n                lambda match: self._VERSION_SUBS[match.group(0)],\n                version,\n            )\n\n    def get_dependencies_version(self, workdir=SETUP_DIR, version=None):\n        if version is None:\n            version = self.get_version(workdir)\n        # A packaged development release should be installed with other\n        # development packages built from the same source, but those\n        # dependencies may have earlier \"dev\" versions (read: less recent\n        # Git commit timestamps). This compatible version dependency\n        # expresses that as closely as possible. Allowing versions\n        # compatible with .dev0 allows any development release.\n        # Regular expression borrowed partially from\n        # <https://packaging.python.org/en/latest/specifications/version-specifiers/#version-specifiers-regex>\n        dep_ver, match_count = re.subn(r'\\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)\n        return ('~=' if match_count else '==', dep_ver)\n\n    def iter_dependencies(self, workdir=SETUP_DIR, version=None, extras=None):\n        if extras is None:\n            extras = {}\n        dep_op, dep_ver = self.get_dependencies_version(workdir, version)\n        for dep in self.dependencies:\n            try:\n                dep_extras = f'[{\",\".join(extras[dep.package_name])}]'\n            except KeyError:\n                dep_extras = ''\n            yield f'{dep.package_name}{dep_extras} {dep_op} {dep_ver}'\n\n\n### Package database\n\nArvadosPythonPackage.register(\n    'arvados-python-client',\n    'arvados',\n    'sdk/python',\n),\nArvadosPythonPackage.register(\n    'crunchstat_summary',\n    'crunchstat_summary',\n    'tools/crunchstat-summary',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cluster-activity',\n    'arvados_cluster_activity',\n    'tools/cluster-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cwl-runner',\n    'arvados_cwl',\n    'sdk/cwl',\n    'arvados-python-client',\n    'crunchstat_summary',\n)\nArvadosPythonPackage.register(\n    'arvados_fuse',\n    'arvados_fuse',\n    'services/fuse',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-user-activity',\n    'arvados_user_activity',\n    'tools/user-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-tools',\n    'NO SRCDIR',\n    'tools/python-metapackage',\n    *ARVADOS_PYTHON_MODULES,\n)\nArvadosPythonPackage.register(\n    'arvados-docker-cleaner',\n    'arvados_docker',\n    'services/dockercleaner',\n)\n\n### setuptools integration\n\nclass BuildArvadosVersion(setuptools.Command):\n    \"\"\"Write _version.py for an Arvados module\"\"\"\n    def initialize_options(self):\n        self.build_lib = None\n\n    def finalize_options(self):\n        self.set_undefined_options(\"build_py\", (\"build_lib\", \"build_lib\"))\n        arv_mod = ARVADOS_PYTHON_MODULES[self.distribution.get_name()]\n        self.out_path = Path(self.build_lib, arv_mod.version_file_path())\n\n    def run(self):\n        with self.out_path.open('w') as out_file:\n            print(f'__version__ = {self.distribution.get_version()!r}', file=out_file)\n\n    def get_outputs(self):\n        return [str(self.out_path)]\n\n    def get_source_files(self):\n        return []\n\n    def get_output_mapping(self):\n        return {}\n\n\nclass ArvadosBuildCommand(setuptools.command.build.build):\n    sub_commands = [\n        *setuptools.command.build.build.sub_commands,\n        ('build_arvados_version', None),\n    ]\n\n\nCMDCLASS = {\n    'build': ArvadosBuildCommand,\n    'build_arvados_version': BuildArvadosVersion,\n}\n"
  },
  {
    "path": "tools/crunchstat-summary/bin/crunchstat-summary",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport crunchstat_summary.command\nimport crunchstat_summary.summarizer\nimport logging\nimport sys\n\nlogging.getLogger().addHandler(logging.StreamHandler())\n\nargs = crunchstat_summary.command.ArgumentParser().parse_args(sys.argv[1:])\ncmd = crunchstat_summary.command.Command(args)\ncmd.run()\nprint(cmd.report(), end='')\n"
  },
  {
    "path": "tools/crunchstat-summary/crunchstat_summary/__init__.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport logging\nimport sys\n\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.StreamHandler(stream=sys.stderr))\nlogger.setLevel(logging.WARNING)\n"
  },
  {
    "path": "tools/crunchstat-summary/crunchstat_summary/command.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport argparse\nimport gzip\nfrom io import open\nimport logging\nimport sys\nimport arvados\n\nfrom crunchstat_summary import logger, summarizer, reader\nfrom crunchstat_summary._version import __version__\n\n\nclass ArgumentParser(argparse.ArgumentParser):\n    def __init__(self):\n        super(ArgumentParser, self).__init__(\n            description='Summarize resource usage of an Arvados Crunch job')\n        src = self.add_mutually_exclusive_group()\n        src.add_argument(\n            '--job', '--container-request',\n            type=str, metavar='UUID',\n            help='Look up the specified job or container request '\n            'and read its log data from Keep (or from the Arvados event log, '\n            'if the job is still running)')\n        src.add_argument(\n            '--container',\n            type=str, metavar='UUID',\n            help='[Deprecated] Look up the specified container find its container request '\n            'and read its log data from Keep (or from the Arvados event log, '\n            'if the job is still running)')\n        src.add_argument(\n            '--log-file', type=str,\n            help='Read log data from a regular file')\n        self.add_argument(\n            '--skip-child-jobs', action='store_true',\n            help='Do not include stats from child jobs/containers')\n        self.add_argument(\n            '--format', type=str, choices=('html', 'text'), default='text',\n            help='Report format')\n        self.add_argument(\n            '--threads', type=int, default=8,\n            help='Maximum worker threads to run')\n        self.add_argument(\n            '--verbose', '-v', action='count', default=0,\n            help='Log more information (once for progress, twice for debug)')\n        self.add_argument('--version', action='version',\n                         version=\"%s %s\" % (sys.argv[0], __version__),\n                         help='Print version and exit.')\n\n\nclass UTF8Decode(object):\n    '''Wrap a file-like iterable to decode UTF-8 bytes into a strings\n    '''\n    def __init__(self, fh):\n        self.fh = fh\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        self.close()\n\n    def __iter__(self):\n        return self\n\n    def __next__(self):\n        return next(self.fh).decode('utf-8')\n\n    next = __next__\n\n    def close(self):\n        # mimic Gzip behavior and don't close underlying object\n        pass\n\n\nclass Command(object):\n    def __init__(self, args):\n        self.args = args\n        logger.setLevel(logging.WARNING - 10 * args.verbose)\n\n    def run(self):\n        kwargs = {\n            'skip_child_jobs': self.args.skip_child_jobs,\n            'threads': self.args.threads,\n            'arv': arvados.api('v1')\n        }\n        if self.args.job:\n            self.summer = summarizer.NewSummarizer(self.args.job, **kwargs)\n        elif self.args.container:\n            self.summer = summarizer.NewSummarizer(self.args.container, **kwargs)\n        elif self.args.log_file:\n            if self.args.log_file.endswith('.gz'):\n                fh = UTF8Decode(gzip.open(self.args.log_file))\n            else:\n                fh = open(self.args.log_file, mode = 'r', encoding = 'utf-8')\n            self.summer = summarizer.Summarizer(reader.StubReader(fh), **kwargs)\n        else:\n            self.summer = summarizer.Summarizer(reader.StubReader(sys.stdin), **kwargs)\n        return self.summer.run()\n\n    def report(self):\n        if self.args.format == 'html':\n            return self.summer.html_report()\n        elif self.args.format == 'text':\n            return self.summer.text_report()\n"
  },
  {
    "path": "tools/crunchstat-summary/crunchstat_summary/dygraphs.js",
    "content": "// Copyright (C) The Arvados Authors. All rights reserved.\n//\n// SPDX-License-Identifier: AGPL-3.0\n\nwindow.onload = function() {\n    var charts = {};\n    var fmt = {\n        iso: function(y) {\n            var s='';\n            if (y > 1000000000000000) { y=y/1000000000000000; s='P'; }\n            else if (y > 1000000000000) { y=y/1000000000000; s='T'; }\n            else if (y > 1000000000) { y=y/1000000000; s='G'; }\n            else if (y > 1000000) { y=y/1000000; s='M'; }\n            else if (y > 1000) { y=y/1000; s='K'; }\n            return y.toFixed(2).replace(/\\.0+$/, '')+s;\n        },\n        time: function(s) {\n            var ret = ''\n            if (s >= 86400) ret += Math.floor(s/86400) + 'd'\n            if (s >= 3600) ret += Math.floor(s/3600)%24 + 'h'\n            if (s >= 60) ret += Math.floor(s/60)%60 + 'm'\n            ret += Math.floor(s)%60 + 's'\n            // finally, strip trailing zeroes: 1d0m0s -> 1d\n            return ret.replace(/(\\D)(0\\D)*$/, '$1')\n        },\n    }\n    var ticker = {\n        time: function(min, max, pixels, opts, dg) {\n            var max_ticks = Math.floor(pixels / (opts('axisLabelWidth')+opts('pixelsPerLabel')/2))\n            var natural = [1, 5, 10, 30, 60,\n                           120, 300, 600, 1800, 3600,\n                           7200, 14400, 43200, 86400]\n            var interval = natural.shift()\n            while (max>min && (max-min)/interval > max_ticks) {\n                interval = (natural.shift()) || (interval * 2)\n            }\n            var ticks = []\n            for (var i=Math.ceil(min/interval)*interval; i<=max; i+=interval) {\n                ticks.push({v: i, label: fmt.time(i)})\n            }\n            return ticks\n        },\n    }\n    chartdata.forEach(function(section, section_idx) {\n        var chartDiv = document.getElementById(\"chart\");\n        section.charts.forEach(function(chart, chart_idx) {\n            // Skip chart if every series has zero data points\n            if (0 == chart.data.reduce(function(len, series) {\n                return len + series.length;\n            }, 0)) {\n                return;\n            }\n            var id = 'chart-'+section_idx+'-'+chart_idx;\n            var div = document.createElement('div');\n            div.setAttribute('id', id);\n            div.setAttribute('style', 'width: 100%; height: 150px');\n            chartDiv.appendChild(div);\n            chart.options.valueFormatter = function(y) {\n            }\n            chart.options.axes = {\n                x: {\n                    axisLabelFormatter: fmt.time,\n                    valueFormatter: fmt.time,\n                    ticker: ticker.time,\n                },\n                y: {\n                    axisLabelFormatter: fmt.iso,\n                    valueFormatter: fmt.iso,\n                },\n            }\n            var div2 = document.createElement('div');\n            div2.setAttribute('style', 'width: 150px; height: 150px');\n            chart.options.labelsDiv = div2;\n            chart.options.labelsSeparateLines = true;\n\n            var div3 = document.createElement('div');\n            div3.setAttribute('style', 'display: flex; padding-bottom: 16px');\n            div3.appendChild(div);\n            div3.appendChild(div2);\n            chartDiv.appendChild(div3);\n\n            charts[id] = new Dygraph(div, chart.data, chart.options);\n        });\n    });\n\n    var sync = Dygraph.synchronize(Object.values(charts), {range: false});\n\n    if (typeof window.debug === 'undefined')\n        window.debug = {};\n    window.debug.charts = charts;\n};\n"
  },
  {
    "path": "tools/crunchstat-summary/crunchstat_summary/dygraphs.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport importlib.resources\nimport json\nfrom arvados._internal.report_template import ReportTemplate\n\nclass DygraphsChart(ReportTemplate):\n    \"\"\"Crunchstat report using dygraphs for charting.\n    \"\"\"\n\n    CSS = 'https://cdnjs.cloudflare.com/ajax/libs/dygraph/2.0.0/dygraph.min.css'\n    JSLIB = 'https://cdnjs.cloudflare.com/ajax/libs/dygraph/2.0.0/dygraph.min.js'\n    JSASSETS = ['synchronizer.js','dygraphs.js']\n\n    def __init__(self, label, summarizers, beforechart, afterchart):\n        super().__init__(label)\n        self.summarizers = summarizers\n        self.beforechart = beforechart\n        self.afterchart = afterchart\n\n    def html(self):\n        self.cards.extend(self.beforechart)\n        self.cards.append(\"\"\"\n                <h2>Graph</h2>\n                <div id=\"chart\"></div>\n            \"\"\")\n        self.cards.extend(self.afterchart)\n\n        return super().html()\n\n    def js(self):\n        return '''\n        <script type=\"text/javascript\" src=\"{jslib}\"></script>\n        <script type=\"text/javascript\">\n        var chartdata = {chartdata};\\n{jsassets}\n        </script>'''.format(\n            jslib=self.JSLIB,\n            chartdata=json.dumps(self.sections()),\n            jsassets='\\n'.join(\n                importlib.resources.read_text('crunchstat_summary', jsa, encoding='utf-8')\n                for jsa in self.JSASSETS\n            ),\n        )\n\n    def sections(self):\n        return [\n            {\n                'label': s.long_label(),\n                'charts': [\n                    self.chartdata(s.label, s.tasks, stat)\n                    for stat in (('cpu', ['user+sys__rate', 'user__rate', 'sys__rate']),\n                                 ('mem', ['rss']),\n                                 ('net:eth0', ['tx+rx__rate','rx__rate','tx__rate']),\n                                 ('net:keep0', ['tx+rx__rate','rx__rate','tx__rate']),\n                                 ('statfs', ['used', 'total']),\n                                 )\n                    ],\n            }\n            for s in self.summarizers]\n\n    def chartdata(self, label, tasks, stats):\n        '''For Crunch2, label is the name of container request,\n        tasks is the top level container and\n        stats is index by a tuple of (category, metric).\n        '''\n        return {\n            'data': self._collate_data(tasks, stats),\n            'options': {\n                'legend': 'always',\n                'connectSeparatedPoints': True,\n                'labels': ['elapsed'] +  stats[1],\n                'includeZero': True,\n                'title': '{}: {}'.format(label, stats[0]) if label else stats[0],\n            },\n        }\n\n    def _collate_data(self, tasks, stats):\n        data = []\n        nulls = []\n        # uuid is category for crunch2\n        for uuid, task in tasks.items():\n            # All stats in a category are assumed to have the same time base and same number of samples\n            category = stats[0]\n            series_names = stats[1]\n            sn0 = series_names[0]\n            series = task.series[(category,sn0)]\n            for i in range(len(series)):\n                pt = series[i]\n                vals = [task.series[(category,stat)][i][1] for stat in series_names[1:]]\n                data.append([pt[0].total_seconds()] + nulls + [pt[1]] + vals)\n            nulls.append(None)\n        return sorted(data)\n\n    def style(self):\n        return '\\n'.join((super().style(),\n                         '<link rel=\"stylesheet\" href=\"{}\">\\n'.format(self.CSS)))\n"
  },
  {
    "path": "tools/crunchstat-summary/crunchstat_summary/reader.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados\nimport itertools\nimport json\nimport queue\nimport threading\n\nfrom crunchstat_summary import logger\n\n\nclass CollectionReader(object):\n    def __init__(self, collection_id, api_client=None, collection_object=None):\n        self._collection_id = collection_id\n        self._label = collection_id\n        self._readers = []\n        self._api_client = api_client\n        self._collection = collection_object or arvados.collection.CollectionReader(self._collection_id, api_client=self._api_client)\n\n    def __str__(self):\n        return self._label\n\n    def __iter__(self):\n        logger.debug('load collection %s', self._collection_id)\n\n        filenames = [filename for filename in self._collection]\n        # Crunch2 has multiple stats files\n        if len(filenames) > 1:\n            filenames = ['crunchstat.txt', 'arv-mount.txt']\n        for filename in filenames:\n            try:\n                self._readers.append(self._collection.open(filename, \"rt\"))\n            except IOError:\n                logger.warn('Unable to open %s', filename)\n        self._label = \"{}/{}\".format(self._collection_id, filenames[0])\n        return itertools.chain(*[iter(reader) for reader in self._readers])\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        if self._readers:\n            for reader in self._readers:\n                reader.close()\n            self._readers = []\n\n    def node_info(self):\n        try:\n            with self._collection.open(\"node.json\", \"rt\") as f:\n                return json.load(f)\n        except IOError:\n            logger.warn('Unable to open node.json')\n        return {}\n\n\nclass LiveLogReader(object):\n    EOF = None\n\n    def __init__(self, job_uuid):\n        self.job_uuid = job_uuid\n        self.event_types = (['stderr'] if '-8i9sb-' in job_uuid else ['crunchstat', 'arv-mount'])\n        logger.debug('load %s events for job %s', self.event_types, self.job_uuid)\n\n    def __str__(self):\n        return self.job_uuid\n\n    def _get_all_pages(self):\n        got = 0\n        last_id = 0\n        filters = [\n            ['object_uuid', '=', self.job_uuid],\n            ['event_type', 'in', self.event_types]]\n        try:\n            while True:\n                page = arvados.api().logs().list(\n                    limit=1000,\n                    order=['id asc'],\n                    filters=filters + [['id','>',str(last_id)]],\n                    select=['id', 'properties'],\n                ).execute(num_retries=2)\n                got += len(page['items'])\n                logger.debug(\n                    '%s: received %d of %d log events',\n                    self.job_uuid, got,\n                    got + page['items_available'] - len(page['items']))\n                for i in page['items']:\n                    for line in i['properties']['text'].split('\\n'):\n                        self._queue.put(line+'\\n')\n                    last_id = i['id']\n                if (len(page['items']) == 0 or\n                    len(page['items']) >= page['items_available']):\n                    break\n        finally:\n            self._queue.put(self.EOF)\n\n    def __iter__(self):\n        self._queue = queue.Queue()\n        self._thread = threading.Thread(target=self._get_all_pages)\n        self._thread.daemon = True\n        self._thread.start()\n        return self\n\n    def __next__(self):\n        line = self._queue.get()\n        if line is self.EOF:\n            self._thread.join()\n            raise StopIteration\n        return line\n\n    next = __next__ # for Python 2\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        pass\n\n    def node_info(self):\n        return {}\n\nclass StubReader(object):\n    def __init__(self, fh):\n        self.fh = fh\n\n    def __str__(self):\n        return \"\"\n\n    def __iter__(self):\n        return iter(self.fh)\n\n    def __enter__(self):\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        pass\n\n    def node_info(self):\n        return {}\n"
  },
  {
    "path": "tools/crunchstat-summary/crunchstat_summary/summarizer.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados\nimport collections\nimport crunchstat_summary.dygraphs\nimport crunchstat_summary.reader\nimport datetime\nimport functools\nimport itertools\nimport math\nimport re\nimport sys\nimport _strptime\nimport arvados.util\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom crunchstat_summary import logger\n\n# Recommend memory constraints that are this multiple of an integral\n# number of GiB. (Actual nodes tend to be sold in sizes like 8 GiB\n# that have amounts like 7.5 GiB according to the kernel.)\nAVAILABLE_RAM_RATIO = 0.90\nMB=2**20\n\n# Workaround datetime.datetime.strptime() thread-safety bug by calling\n# it once before starting threads.  https://bugs.python.org/issue7980\ndatetime.datetime.strptime('1999-12-31_23:59:59', '%Y-%m-%d_%H:%M:%S')\n\n\nWEBCHART_CLASS = crunchstat_summary.dygraphs.DygraphsChart\n\n\nclass Task(object):\n    def __init__(self):\n        self.starttime = None\n        self.finishtime = None\n        self.series = collections.defaultdict(list)\n\n\nclass Summarizer(object):\n    def __init__(self, logdata, label=None, skip_child_jobs=False, uuid=None, **kwargs):\n        self._logdata = logdata\n\n        self.uuid = uuid\n        self.label = label\n        self.starttime = None\n        self.finishtime = None\n        self._skip_child_jobs = skip_child_jobs\n\n        # stats_max: {category: {stat: val}}\n        self.stats_max = collections.defaultdict(\n            functools.partial(collections.defaultdict, lambda: 0))\n        # task_stats: {task_id: {category: {stat: val}}}\n        self.task_stats = collections.defaultdict(\n            functools.partial(collections.defaultdict, dict))\n\n        self.seq_to_uuid = {}\n        self.tasks = collections.defaultdict(Task)\n\n        # We won't bother recommending new runtime constraints if the\n        # constraints given when running the job are known to us and\n        # are already suitable.  If applicable, the subclass\n        # constructor will overwrite this with something useful.\n        self.existing_constraints = {}\n        self.node_info = {}\n        self.cost = 0\n        self.arv_config = {}\n\n        logger.info(\"%s: logdata %s\", self.label, logdata)\n\n    def run(self):\n        logger.debug(\"%s: parsing logdata %s\", self.label, self._logdata)\n        with self._logdata as logdata:\n            self._run(logdata)\n\n    def _run(self, logdata):\n        if not self.node_info:\n            self.node_info = logdata.node_info()\n\n        for line in logdata:\n            # crunch2\n            # 2017-12-01T16:56:24.723509200Z crunchstat: keepcalls 0 put 3 get -- interval 10.0000 seconds 0 put 3 get\n            m = re.search(r'^(?P<timestamp>\\S+) (?P<crunchstat>crunchstat: )?(?P<category>\\S+) (?P<current>.*?)( -- interval (?P<interval>.*))?\\n$', line)\n            if not m:\n                continue\n\n            if self.label is None:\n                try:\n                    self.label = m.group('job_uuid')\n                except IndexError:\n                    self.label = 'label #1'\n\n            task_id = 'container'\n            task = self.tasks[task_id]\n\n            # Use the first and last crunchstat timestamps as\n            # approximations of starttime and finishtime.\n            timestamp = m.group('timestamp')\n            if timestamp[10:11] == '_':\n                timestamp = datetime.datetime.strptime(\n                    timestamp, '%Y-%m-%d_%H:%M:%S')\n            elif timestamp[10:11] == 'T':\n                timestamp = datetime.datetime.strptime(\n                    timestamp[:19], '%Y-%m-%dT%H:%M:%S')\n            else:\n                raise ValueError(\"Cannot parse timestamp {!r}\".format(\n                    timestamp))\n\n            if task.starttime is None:\n                logger.debug('%s: task %s starttime %s',\n                             self.label, task_id, timestamp)\n            if task.starttime is None or timestamp < task.starttime:\n                task.starttime = timestamp\n            if task.finishtime is None or timestamp > task.finishtime:\n                task.finishtime = timestamp\n\n            if self.starttime is None or timestamp < self.starttime:\n                self.starttime = timestamp\n            if self.finishtime is None or timestamp > self.finishtime:\n                self.finishtime = timestamp\n\n            if task.starttime is not None and task.finishtime is not None:\n                elapsed = (task.finishtime - task.starttime).seconds\n                self.task_stats[task_id]['time'] = {'elapsed': elapsed}\n                if elapsed > self.stats_max['time']['elapsed']:\n                    self.stats_max['time']['elapsed'] = elapsed\n\n            category = m.group('category')\n            if category.endswith(':'):\n                # \"stderr crunchstat: notice: ...\"\n                continue\n            elif category in ('error', 'caught'):\n                continue\n            elif category in ('read', 'open', 'cgroup', 'CID', 'Running'):\n                # \"stderr crunchstat: read /proc/1234/net/dev: ...\"\n                # (old logs are less careful with unprefixed error messages)\n                continue\n\n            this_interval_s = None\n            for group in ['current', 'interval']:\n                if not m.group(group):\n                    continue\n                category = m.group('category')\n                words = m.group(group).split(' ')\n                stats = {}\n                try:\n                    for val, stat in zip(words[::2], words[1::2]):\n                        if '.' in val:\n                            stats[stat] = float(val)\n                        else:\n                            stats[stat] = int(val)\n                except ValueError as e:\n                    # If the line doesn't start with 'crunchstat:' we\n                    # might have mistaken an error message for a\n                    # structured crunchstat line.\n                    if m.group(\"crunchstat\") is None or m.group(\"category\") == \"crunchstat\":\n                        logger.warning(\"%s: log contains message\\n  %s\", self.label, line)\n                    else:\n                        logger.warning(\n                            '%s: Error parsing value %r (stat %r, category %r): %r',\n                            self.label, val, stat, category, e)\n                        logger.warning('%s', line)\n                    continue\n                if 'user' in stats or 'sys' in stats:\n                    stats['user+sys'] = stats.get('user', 0) + stats.get('sys', 0)\n                if 'tx' in stats or 'rx' in stats:\n                    stats['tx+rx'] = stats.get('tx', 0) + stats.get('rx', 0)\n                if group == 'interval':\n                    if 'seconds' in stats:\n                        this_interval_s = stats.get('seconds',0)\n                        del stats['seconds']\n                        if this_interval_s <= 0:\n                            logger.error(\n                                \"BUG? interval stat given with duration {!r}\".\n                                format(this_interval_s))\n                    else:\n                        logger.error('BUG? interval stat missing duration')\n                for stat, val in stats.items():\n                    if group == 'interval' and this_interval_s:\n                            stat = stat + '__rate'\n                            val = val / this_interval_s\n                            if stat in ['user+sys__rate', 'user__rate', 'sys__rate', 'tx+rx__rate', 'rx__rate', 'tx__rate']:\n                                task.series[category, stat].append(\n                                    (timestamp - self.starttime, val))\n                    else:\n                        if stat in ['rss','used','total']:\n                            task.series[category, stat].append(\n                                (timestamp - self.starttime, val))\n                        self.task_stats[task_id][category][stat] = val\n                    if val > self.stats_max[category][stat]:\n                        self.stats_max[category][stat] = val\n        logger.debug('%s: done parsing', self.label)\n\n        self.job_tot = collections.defaultdict(\n            functools.partial(collections.defaultdict, int))\n        for task_id, task_stat in self.task_stats.items():\n            for category, stat_last in task_stat.items():\n                for stat, val in stat_last.items():\n                    if stat in ['cpus', 'cache', 'swap', 'rss']:\n                        # meaningless stats like 16 cpu cores x 5 tasks = 80\n                        continue\n                    self.job_tot[category][stat] += val\n        logger.debug('%s: done totals', self.label)\n\n        if self.stats_max['time'].get('elapsed', 0) > 20:\n            # needs to have executed for at least 20 seconds or we may\n            # not have collected any metrics and these warnings are duds.\n            missing_category = {\n                'cpu': 'CPU',\n                'mem': 'memory',\n                'net:': 'network I/O',\n                'statfs': 'storage space',\n            }\n            for task_stat in self.task_stats.values():\n                for category in task_stat.keys():\n                    for checkcat in missing_category:\n                        if checkcat.endswith(':'):\n                            if category.startswith(checkcat):\n                                missing_category.pop(checkcat)\n                                break\n                        else:\n                            if category == checkcat:\n                                missing_category.pop(checkcat)\n                                break\n            for catlabel in missing_category.values():\n                logger.warning('%s: %s stats are missing -- possible cluster configuration issue',\n                            self.label, catlabel)\n\n    def long_label(self):\n        label = self.label\n        if hasattr(self, 'process') and self.process['uuid'] not in label:\n            label = '{} ({})'.format(label, self.process['uuid'])\n        return label\n\n    def elapsed_time(self):\n        if not self.finishtime:\n            return \"\"\n        label = \"\"\n        s = (self.finishtime - self.starttime).total_seconds()\n        if s > 86400:\n            label += '{}d '.format(int(s/86400))\n        if s > 3600:\n            label += '{}h '.format(int(s/3600) % 24)\n        if s > 60:\n            label += '{}m '.format(int(s/60) % 60)\n        label += '{}s'.format(int(s) % 60)\n        return label\n\n    def text_report(self):\n        if not self.tasks:\n            return \"(no report generated)\\n\"\n        return \"\\n\".join(itertools.chain(\n            self._text_report_table_gen(lambda x: \"\\t\".join(x),\n                                  lambda x: \"\\t\".join(x)),\n            self._text_report_agg_gen(lambda x: \"# {}: {}{}\".format(x[0], x[1], x[2])),\n            self._recommend_gen(lambda x: \"#!! \"+x))) + \"\\n\"\n\n    def html_report(self):\n        tophtml = \"\"\"<h2>Summary</h2>{}\\n<table class='aggtable'><tbody>{}</tbody></table>\\n\"\"\".format(\n            \"\\n\".join(self._recommend_gen(lambda x: \"<p>{}</p>\".format(x))),\n            \"\\n\".join(self._text_report_agg_gen(lambda x: \"<tr><th>{}</th><td>{}{}</td></tr>\".format(*x))))\n\n        bottomhtml = \"\"\"<h2>Metrics</h2><table class='metricstable'><tbody>{}</tbody></table>\\n\"\"\".format(\n            \"\\n\".join(self._text_report_table_gen(lambda x: \"<tr><th>{}</th><th>{}</th><th>{}</th><th>{}</th><th>{}</th></tr>\".format(*x),\n                                                        lambda x: \"<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td></tr>\".format(*x))))\n        label = self.long_label()\n\n        return WEBCHART_CLASS(label, [self], [tophtml], [bottomhtml]).html()\n\n    def _text_report_table_gen(self, headerformat, rowformat):\n        yield headerformat(['category', 'metric', 'task_max', 'task_max_rate', 'job_total'])\n        for category, stat_max in sorted(self.stats_max.items()):\n            for stat, val in sorted(stat_max.items()):\n                if stat.endswith('__rate'):\n                    continue\n                max_rate = self._format(stat_max.get(stat+'__rate', '-'))\n                val = self._format(val)\n                tot = self._format(self.job_tot[category].get(stat, '-'))\n                yield rowformat([category, stat, str(val), max_rate, tot])\n\n    def _text_report_agg_gen(self, aggformat):\n        by_single_task = \"\"\n        if len(self.tasks) > 1:\n            by_single_task = \" by a single task\"\n\n        metrics = [\n            ('Elapsed time',\n             self.elapsed_time(),\n             None,\n             ''),\n\n            ('Estimated cost',\n             '${:.3f}'.format(self.cost),\n             None,\n             '') if self.cost > 0 else None,\n\n            ('Assigned instance type',\n             self.node_info.get('ProviderType'),\n             None,\n             '') if self.node_info.get('ProviderType') else None,\n\n            ('Instance hourly price',\n             '${:.3f}'.format(self.node_info.get('Price')),\n             None,\n             '') if self.node_info.get('Price') else None,\n\n            ('Max CPU usage in a single interval',\n             self.stats_max['cpu']['user+sys__rate'],\n             lambda x: x * 100,\n             '%'),\n\n            ('Overall CPU usage',\n             float(self.job_tot['cpu']['user+sys']) /\n             self.job_tot['time']['elapsed']\n             if self.job_tot['time']['elapsed'] > 0 else 0,\n             lambda x: x * 100,\n             '%'),\n\n            ('Requested CPU cores',\n             self.existing_constraints.get(self._map_runtime_constraint('vcpus')),\n             None,\n             '') if self.existing_constraints.get(self._map_runtime_constraint('vcpus')) else None,\n\n            ('Instance VCPUs',\n             self.node_info.get('VCPUs'),\n             None,\n             '') if self.node_info.get('VCPUs') else None,\n\n            ('Max memory used{}'.format(by_single_task),\n             self.stats_max['mem']['rss'],\n             lambda x: x / 2**20,\n             'MB'),\n\n            ('Requested RAM',\n             self.existing_constraints.get(self._map_runtime_constraint('ram')),\n             lambda x: x / 2**20,\n             'MB') if self.existing_constraints.get(self._map_runtime_constraint('ram')) else None,\n\n            ('Maximum RAM request for this instance type',\n             (self.node_info.get('RAM') - self.arv_config.get('Containers', {}).get('ReserveExtraRAM', 0))*.95,\n             lambda x: x / 2**20,\n             'MB') if self.node_info.get('RAM') else None,\n\n            ('Max network traffic{}'.format(by_single_task),\n             self.stats_max['net:eth0']['tx+rx'] +\n             self.stats_max['net:keep0']['tx+rx'],\n             lambda x: x / 1e9,\n             'GB'),\n\n            ('Max network speed in a single interval',\n             self.stats_max['net:eth0']['tx+rx__rate'] +\n             self.stats_max['net:keep0']['tx+rx__rate'],\n             lambda x: x / 1e6,\n             'MB/s'),\n\n            ('Keep cache miss rate',\n             (float(self.job_tot['keepcache']['miss']) /\n              float(self.job_tot['keepcalls']['get']))\n             if self.job_tot['keepcalls']['get'] > 0 else 0,\n             lambda x: x * 100.0,\n             '%'),\n\n            ('Keep cache utilization',\n             (float(self.job_tot['blkio:0:0']['read']) /\n              float(self.job_tot['net:keep0']['rx']))\n             if self.job_tot['net:keep0']['rx'] > 0 else 0,\n             lambda x: x * 100.0,\n             '%'),\n\n            ('Temp disk utilization',\n             (float(self.job_tot['statfs']['used']) /\n              float(self.job_tot['statfs']['total']))\n             if self.job_tot['statfs']['total'] > 0 else 0,\n             lambda x: x * 100.0,\n             '%'),\n        ]\n\n        if len(self.tasks) > 1:\n            metrics.insert(0, ('Number of tasks',\n                 len(self.tasks),\n                 None,\n                 ''))\n        for args in metrics:\n            if args is None:\n                continue\n            format_string, val, transform, suffix = args\n            if val == float('-Inf'):\n                continue\n            if transform:\n                val = transform(val)\n            yield aggformat((format_string, self._format(val), suffix))\n\n    def _recommend_gen(self, recommendformat):\n        # TODO recommend fixing job granularity if elapsed time is too short\n\n        if self.stats_max['time'].get('elapsed', 0) <= 20:\n            # Not enough data\n            return []\n\n        return itertools.chain(\n            self._recommend_cpu(recommendformat),\n            self._recommend_ram(recommendformat),\n            self._recommend_keep_cache(recommendformat),\n            self._recommend_temp_disk(recommendformat),\n            )\n\n    def _recommend_cpu(self, recommendformat):\n        \"\"\"Recommend asking for 4 cores if max CPU usage was 333%\"\"\"\n\n        constraint_key = self._map_runtime_constraint('vcpus')\n        cpu_max_rate = self.stats_max['cpu']['user+sys__rate']\n        if cpu_max_rate == float('-Inf') or cpu_max_rate == 0.0:\n            logger.warning('%s: no CPU usage data', self.label)\n            return\n        # TODO Don't necessarily want to recommend on isolated max peak\n        # take average CPU usage into account as well or % time at max\n        used_cores = max(1, int(math.ceil(cpu_max_rate)))\n        asked_cores = self.existing_constraints.get(constraint_key)\n        if asked_cores is None:\n            asked_cores = 1\n\n        if used_cores < (asked_cores*.5):\n            yield recommendformat(\n                '{} peak CPU usage was only {}% out of possible {}% ({} cores requested)'\n            ).format(\n                self.label,\n                math.ceil(cpu_max_rate*100),\n                asked_cores*100, asked_cores)\n\n    # FIXME: This needs to be updated to account for current a-d-c algorithms\n    def _recommend_ram(self, recommendformat):\n        \"\"\"Recommend an economical RAM constraint for this job.\n\n        Nodes that are advertised as \"8 gibibytes\" actually have what\n        we might call \"8 nearlygibs\" of memory available for jobs.\n        Here, we calculate a whole number of nearlygibs that would\n        have sufficed to run the job, then recommend requesting a node\n        with that number of nearlygibs (expressed as mebibytes).\n\n        Requesting a node with \"nearly 8 gibibytes\" is our best hope\n        of getting a node that actually has nearly 8 gibibytes\n        available.  If the node manager is smart enough to account for\n        the discrepancy itself when choosing/creating a node, we'll\n        get an 8 GiB node with nearly 8 GiB available.  Otherwise, the\n        advertised size of the next-size-smaller node (say, 6 GiB)\n        will be too low to satisfy our request, so we will effectively\n        get rounded up to 8 GiB.\n\n        For example, if we need 7500 MiB, we can ask for 7500 MiB, and\n        we will generally get a node that is advertised as \"8 GiB\" and\n        has at least 7500 MiB available.  However, asking for 8192 MiB\n        would either result in an unnecessarily expensive 12 GiB node\n        (if node manager knows about the discrepancy), or an 8 GiB\n        node which has less than 8192 MiB available and is therefore\n        considered by crunch-dispatch to be too small to meet our\n        constraint.\n\n        When node manager learns how to predict the available memory\n        for each node type such that crunch-dispatch always agrees\n        that a node is big enough to run the job it was brought up\n        for, all this will be unnecessary.  We'll just ask for exactly\n        the memory we want -- even if that happens to be 8192 MiB.\n        \"\"\"\n\n        constraint_key = self._map_runtime_constraint('ram')\n        used_bytes = self.stats_max['mem']['rss']\n        if used_bytes == float('-Inf'):\n            logger.warning('%s: no memory usage data', self.label)\n            return\n        if not self.existing_constraints.get(constraint_key):\n            return\n        used_mib = math.ceil(float(used_bytes) / MB)\n        asked_mib = self.existing_constraints.get(constraint_key) / MB\n\n        nearlygibs = lambda mebibytes: mebibytes/AVAILABLE_RAM_RATIO/1024\n        ratio = 0.5\n        recommend_mib = int(math.ceil(nearlygibs(used_mib/ratio))*AVAILABLE_RAM_RATIO*1024)\n        if used_mib > 0 and (used_mib / asked_mib) < ratio and asked_mib > recommend_mib:\n            yield recommendformat(\n                '{} peak RAM usage was only {}% ({} MiB used / {} MiB requested)'\n            ).format(\n                self.label,\n                int(math.ceil(100*(used_mib / asked_mib))),\n                int(used_mib),\n                int(asked_mib))\n\n    def _recommend_keep_cache(self, recommendformat):\n        \"\"\"Recommend increasing keep cache if utilization < 50%.\n\n        This means the amount of data returned to the program is less\n        than 50% of the amount of data actually downloaded by\n        arv-mount.\n        \"\"\"\n\n        if self.job_tot['net:keep0']['rx'] == 0:\n            return\n\n        miss_rate = (float(self.job_tot['keepcache']['miss']) /\n                     float(self.job_tot['keepcalls']['get']))\n\n        utilization = (float(self.job_tot['blkio:0:0']['read']) /\n                       float(self.job_tot['net:keep0']['rx']))\n        # FIXME: the default on this get won't work correctly\n        asked_cache = self.existing_constraints.get('keep_cache_ram') or self.existing_constraints.get('keep_cache_disk')\n\n        if utilization < 0.5 and miss_rate > .05:\n            yield recommendformat(\n                '{} Keep cache utilization was only {:.2f}% and miss rate was {:.2f}% -- '\n                'recommend increasing keep_cache'\n            ).format(\n                self.label,\n                utilization * 100.0,\n                miss_rate * 100.0)\n\n\n    def _recommend_temp_disk(self, recommendformat):\n        \"\"\"This recommendation is disabled for the time being.  It was\n        using the total disk on the node and not the amount of disk\n        requested, so it would trigger a false positive basically\n        every time.  To get the amount of disk requested we need to\n        fish it out of the mounts, which is extra work I don't want do\n        right now.  You can find the old code at commit 616d135e77\n\n        \"\"\"\n\n        return []\n\n\n    def _format(self, val):\n        \"\"\"Return a string representation of a stat.\n\n        {:.2f} for floats, default format for everything else.\"\"\"\n        if isinstance(val, float):\n            return '{:.2f}'.format(val)\n        else:\n            return '{}'.format(val)\n\n    def _runtime_constraint_mem_unit(self):\n        if hasattr(self, 'runtime_constraint_mem_unit'):\n            return self.runtime_constraint_mem_unit\n        else:\n            return ContainerRequestSummarizer.runtime_constraint_mem_unit\n\n    def _map_runtime_constraint(self, key):\n        return key\n\n\nclass CollectionSummarizer(Summarizer):\n    def __init__(self, collection_id, **kwargs):\n        super(CollectionSummarizer, self).__init__(\n            crunchstat_summary.reader.CollectionReader(collection_id), **kwargs)\n        self.label = collection_id\n\n\ndef NewSummarizer(process_or_uuid, **kwargs):\n    \"\"\"Construct with the appropriate subclass for this uuid/object.\"\"\"\n\n    if isinstance(process_or_uuid, dict):\n        process = process_or_uuid\n        uuid = process['uuid']\n    else:\n        uuid = process_or_uuid\n        process = None\n        arv = kwargs.get(\"arv\") or arvados.api('v1')\n\n    if '-dz642-' in uuid:\n        if process is None:\n            # Get the associated CR. Doesn't matter which since they all have the same logs\n            crs = arv.container_requests().list(filters=[['container_uuid','=',uuid]],limit=1).execute()['items']\n            if len(crs) > 0:\n                process = crs[0]\n        klass = ContainerRequestTreeSummarizer\n    elif '-xvhdp-' in uuid:\n        if process is None:\n            process = arv.container_requests().get(uuid=uuid).execute()\n        klass = ContainerRequestTreeSummarizer\n    elif '-4zz18-' in uuid:\n        return CollectionSummarizer(collection_id=uuid)\n    else:\n        raise ArgumentError(\"Unrecognized uuid %s\", uuid)\n    return klass(process, uuid=uuid, **kwargs)\n\n\nclass ProcessSummarizer(Summarizer):\n    \"\"\"Process is a job, pipeline, or container request.\"\"\"\n\n    def __init__(self, process, label=None, **kwargs):\n        rdr = None\n        self.process = process\n        arv = kwargs.get(\"arv\") or arvados.api('v1')\n        if label is None:\n            label = self.process.get('name', self.process['uuid'])\n        # Pre-Arvados v1.4 everything is in 'log'\n        # For 1.4+ containers have no logs and container_requests have them in 'log_uuid', not 'log'\n        log_collection = self.process.get('log', self.process.get('log_uuid'))\n        if log_collection and self.process.get('state') != 'Uncommitted': # arvados.util.CR_UNCOMMITTED:\n            try:\n                rdr = crunchstat_summary.reader.CollectionReader(\n                    log_collection,\n                    api_client=arv,\n                    collection_object=kwargs.get(\"collection_object\"))\n            except arvados.errors.NotFoundError as e:\n                logger.warning(\"Trying event logs after failing to read \"\n                               \"log collection %s: %s\", self.process['log'], e)\n        if rdr is None:\n            uuid = self.process.get('container_uuid', self.process.get('uuid'))\n            rdr = crunchstat_summary.reader.LiveLogReader(uuid)\n            label = label + ' (partial)'\n\n        super(ProcessSummarizer, self).__init__(rdr, label=label, **kwargs)\n        self.existing_constraints = self.process.get('runtime_constraints', {})\n        self.arv_config = arv.config()\n        self.cost = self.process.get('cost', 0)\n\n\nclass ContainerRequestSummarizer(ProcessSummarizer):\n    runtime_constraint_mem_unit = 1\n\n\nclass MultiSummarizer(object):\n    def __init__(self, children={}, label=None, threads=1, **kwargs):\n        self.children = children\n        self.label = label\n        self.threadcount = threads\n\n    def run(self):\n        if self.threadcount > 1 and len(self.children) > 1:\n            completed = 0\n            def run_and_progress(child):\n                try:\n                    child.run()\n                except Exception as e:\n                    logger.exception(\"parse error\")\n                completed += 1\n                logger.info(\"%s/%s summarized %s\", completed, len(self.children), child.label)\n            with ThreadPoolExecutor(max_workers=self.threadcount) as tpe:\n                for child in self.children.values():\n                    tpe.submit(run_and_progress, child)\n        else:\n            for child in self.children.values():\n                child.run()\n\n    def text_report(self):\n        txt = ''\n        d = self._descendants()\n        for child in d.values():\n            if len(d) > 1:\n                txt += '### Summary for {} ({})\\n'.format(\n                    child.label, child.process['uuid'])\n            txt += child.text_report()\n            txt += '\\n'\n        return txt\n\n    def _descendants(self):\n        \"\"\"Dict of self and all descendants.\n\n        Nodes with nothing of their own to report (like\n        MultiSummarizers) are omitted.\n        \"\"\"\n        d = collections.OrderedDict()\n        for key, child in self.children.items():\n            if isinstance(child, Summarizer):\n                d[key] = child\n            if isinstance(child, MultiSummarizer):\n                d.update(child._descendants())\n        return d\n\n    def html_report(self):\n        tophtml = \"\"\n        bottomhtml = \"\"\n        label = self.label\n        if len(self._descendants()) == 1:\n            summarizer = next(iter(self._descendants().values()))\n            tophtml = \"\"\"{}\\n<table class='aggtable'><tbody>{}</tbody></table>\\n\"\"\".format(\n                \"\\n\".join(summarizer._recommend_gen(lambda x: \"<p>{}</p>\".format(x))),\n                \"\\n\".join(summarizer._text_report_agg_gen(lambda x: \"<tr><th>{}</th><td>{}{}</td></tr>\".format(*x))))\n\n            bottomhtml = \"\"\"<table class='metricstable'><tbody>{}</tbody></table>\\n\"\"\".format(\n                \"\\n\".join(summarizer._text_report_table_gen(lambda x: \"<tr><th>{}</th><th>{}</th><th>{}</th><th>{}</th><th>{}</th></tr>\".format(*x),\n                                                            lambda x: \"<tr><td>{}</td><td>{}</td><td>{}</td><td>{}</td><td>{}</td></tr>\".format(*x))))\n            label = summarizer.long_label()\n\n        return WEBCHART_CLASS(label, iter(self._descendants().values()), [tophtml], [bottomhtml]).html()\n\n\nclass ContainerRequestTreeSummarizer(MultiSummarizer):\n    def __init__(self, root, skip_child_jobs=False, **kwargs):\n        arv = kwargs.get(\"arv\") or arvados.api('v1')\n\n        label = kwargs.pop('label', None) or root.get('name') or root['uuid']\n        root['name'] = label\n\n        children = collections.OrderedDict()\n        todo = collections.deque((root, ))\n        while len(todo) > 0:\n            current = todo.popleft()\n            label = current['name']\n            sort_key = current['created_at']\n\n            summer = ContainerRequestSummarizer(current, label=label, **kwargs)\n            summer.sort_key = sort_key\n            children[current['uuid']] = summer\n\n            if skip_child_jobs:\n                child_crs = arv.container_requests().list(filters=[['requesting_container_uuid', '=', current['container_uuid']]],\n                                                          limit=0).execute()\n                logger.warning('%s: omitting stats from child containers'\n                               ' because --skip-child-jobs flag is on',\n                               label, child_crs['items_available'])\n            else:\n                for cr in arvados.util.keyset_list_all(arv.container_requests().list,\n                                                       filters=[['requesting_container_uuid', '=', current['container_uuid']]]):\n                    if cr['container_uuid']:\n                        logger.debug('%s: container req %s', current['uuid'], cr['uuid'])\n                        cr['name'] = cr.get('name') or cr['uuid']\n                        todo.append(cr)\n        sorted_children = collections.OrderedDict()\n        for uuid in sorted(list(children.keys()), key=lambda uuid: children[uuid].sort_key):\n            sorted_children[uuid] = children[uuid]\n        super(ContainerRequestTreeSummarizer, self).__init__(\n            children=sorted_children,\n            label=root['name'],\n            **kwargs)\n"
  },
  {
    "path": "tools/crunchstat-summary/crunchstat_summary/synchronizer.js",
    "content": "// Copyright (c) 2009 Dan Vanderkam. All rights reserved.\n//\n// SPDX-License-Identifier: MIT\n\n/**\n * Synchronize zooming and/or selections between a set of dygraphs.\n *\n * Usage:\n *\n *   var g1 = new Dygraph(...),\n *       g2 = new Dygraph(...),\n *       ...;\n *   var sync = Dygraph.synchronize(g1, g2, ...);\n *   // charts are now synchronized\n *   sync.detach();\n *   // charts are no longer synchronized\n *\n * You can set options using the last parameter, for example:\n *\n *   var sync = Dygraph.synchronize(g1, g2, g3, {\n *      selection: true,\n *      zoom: true\n *   });\n *\n * The default is to synchronize both of these.\n *\n * Instead of passing one Dygraph object as each parameter, you may also pass an\n * array of dygraphs:\n *\n *   var sync = Dygraph.synchronize([g1, g2, g3], {\n *      selection: false,\n *      zoom: true\n *   });\n *\n * You may also set `range: false` if you wish to only sync the x-axis.\n * The `range` option has no effect unless `zoom` is true (the default).\n *\n * Original source: https://github.com/danvk/dygraphs/blob/master/src/extras/synchronizer.js\n * at commit b55a71d768d2f8de62877c32b3aec9e9975ac389\n *\n * Copyright (c) 2009 Dan Vanderkam\n *\n * Permission is hereby granted, free of charge, to any person\n * obtaining a copy of this software and associated documentation\n * files (the \"Software\"), to deal in the Software without\n * restriction, including without limitation the rights to use,\n * copy, modify, merge, publish, distribute, sublicense, and/or sell\n * copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following\n * conditions:\n *\n * The above copyright notice and this permission notice shall be\n * included in all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n * OTHER DEALINGS IN THE SOFTWARE.\n */\n(function() {\n/* global Dygraph:false */\n'use strict';\n\nvar Dygraph;\nif (window.Dygraph) {\n  Dygraph = window.Dygraph;\n} else if (typeof(module) !== 'undefined') {\n  Dygraph = require('../dygraph');\n}\n\nvar synchronize = function(/* dygraphs..., opts */) {\n  if (arguments.length === 0) {\n    throw 'Invalid invocation of Dygraph.synchronize(). Need >= 1 argument.';\n  }\n\n  var OPTIONS = ['selection', 'zoom', 'range'];\n  var opts = {\n    selection: true,\n    zoom: true,\n    range: true\n  };\n  var dygraphs = [];\n  var prevCallbacks = [];\n\n  var parseOpts = function(obj) {\n    if (!(obj instanceof Object)) {\n      throw 'Last argument must be either Dygraph or Object.';\n    } else {\n      for (var i = 0; i < OPTIONS.length; i++) {\n        var optName = OPTIONS[i];\n        if (obj.hasOwnProperty(optName)) opts[optName] = obj[optName];\n      }\n    }\n  };\n\n  if (arguments[0] instanceof Dygraph) {\n    // Arguments are Dygraph objects.\n    for (var i = 0; i < arguments.length; i++) {\n      if (arguments[i] instanceof Dygraph) {\n        dygraphs.push(arguments[i]);\n      } else {\n        break;\n      }\n    }\n    if (i < arguments.length - 1) {\n      throw 'Invalid invocation of Dygraph.synchronize(). ' +\n            'All but the last argument must be Dygraph objects.';\n    } else if (i == arguments.length - 1) {\n      parseOpts(arguments[arguments.length - 1]);\n    }\n  } else if (arguments[0].length) {\n    // Invoked w/ list of dygraphs, options\n    for (var i = 0; i < arguments[0].length; i++) {\n      dygraphs.push(arguments[0][i]);\n    }\n    if (arguments.length == 2) {\n      parseOpts(arguments[1]);\n    } else if (arguments.length > 2) {\n      throw 'Invalid invocation of Dygraph.synchronize(). ' +\n            'Expected two arguments: array and optional options argument.';\n    }  // otherwise arguments.length == 1, which is fine.\n  } else {\n    throw 'Invalid invocation of Dygraph.synchronize(). ' +\n          'First parameter must be either Dygraph or list of Dygraphs.';\n  }\n\n  if (dygraphs.length < 2) {\n    throw 'Invalid invocation of Dygraph.synchronize(). ' +\n          'Need two or more dygraphs to synchronize.';\n  }\n\n  var readycount = dygraphs.length;\n  for (var i = 0; i < dygraphs.length; i++) {\n    var g = dygraphs[i];\n    g.ready( function() {\n      if (--readycount == 0) {\n        // store original callbacks\n        var callBackTypes = ['drawCallback', 'highlightCallback', 'unhighlightCallback'];\n        for (var j = 0; j < dygraphs.length; j++) {\n          if (!prevCallbacks[j]) {\n            prevCallbacks[j] = {};\n          }\n          for (var k = callBackTypes.length - 1; k >= 0; k--) {\n            prevCallbacks[j][callBackTypes[k]] = dygraphs[j].getFunctionOption(callBackTypes[k]);\n          }\n        }\n\n        // Listen for draw, highlight, unhighlight callbacks.\n        if (opts.zoom) {\n          attachZoomHandlers(dygraphs, opts, prevCallbacks);\n        }\n\n        if (opts.selection) {\n          attachSelectionHandlers(dygraphs, prevCallbacks);\n        }\n      }\n    });\n  }\n\n  return {\n    detach: function() {\n      for (var i = 0; i < dygraphs.length; i++) {\n        var g = dygraphs[i];\n        if (opts.zoom) {\n          g.updateOptions({drawCallback: prevCallbacks[i].drawCallback});\n        }\n        if (opts.selection) {\n          g.updateOptions({\n            highlightCallback: prevCallbacks[i].highlightCallback,\n            unhighlightCallback: prevCallbacks[i].unhighlightCallback\n          });\n        }\n      }\n      // release references & make subsequent calls throw.\n      dygraphs = null;\n      opts = null;\n      prevCallbacks = null;\n    }\n  };\n};\n\nfunction arraysAreEqual(a, b) {\n  if (!Array.isArray(a) || !Array.isArray(b)) return false;\n  var i = a.length;\n  if (i !== b.length) return false;\n  while (i--) {\n    if (a[i] !== b[i]) return false;\n  }\n  return true;\n}\n\nfunction attachZoomHandlers(gs, syncOpts, prevCallbacks) {\n  var block = false;\n  for (var i = 0; i < gs.length; i++) {\n    var g = gs[i];\n    g.updateOptions({\n      drawCallback: function(me, initial) {\n        if (block || initial) return;\n        block = true;\n        var opts = {\n          dateWindow: me.xAxisRange()\n        };\n        if (syncOpts.range) opts.valueRange = me.yAxisRange();\n\n        for (var j = 0; j < gs.length; j++) {\n          if (gs[j] == me) {\n            if (prevCallbacks[j] && prevCallbacks[j].drawCallback) {\n              prevCallbacks[j].drawCallback.apply(this, arguments);\n            }\n            continue;\n          }\n\n          // Only redraw if there are new options\n          if (arraysAreEqual(opts.dateWindow, gs[j].getOption('dateWindow')) && \n              arraysAreEqual(opts.valueRange, gs[j].getOption('valueRange'))) {\n            continue;\n          }\n\n          gs[j].updateOptions(opts);\n        }\n        block = false;\n      }\n    }, true /* no need to redraw */);\n  }\n}\n\nfunction attachSelectionHandlers(gs, prevCallbacks) {\n  var block = false;\n  for (var i = 0; i < gs.length; i++) {\n    var g = gs[i];\n\n    g.updateOptions({\n      highlightCallback: function(event, x, points, row, seriesName) {\n        if (block) return;\n        block = true;\n        var me = this;\n        for (var i = 0; i < gs.length; i++) {\n          if (me == gs[i]) {\n            if (prevCallbacks[i] && prevCallbacks[i].highlightCallback) {\n              prevCallbacks[i].highlightCallback.apply(this, arguments);\n            }\n            continue;\n          }\n          var idx = gs[i].getRowForX(x);\n          if (idx !== null) {\n            gs[i].setSelection(idx, seriesName);\n          }\n        }\n        block = false;\n      },\n      unhighlightCallback: function(event) {\n        if (block) return;\n        block = true;\n        var me = this;\n        for (var i = 0; i < gs.length; i++) {\n          if (me == gs[i]) {\n            if (prevCallbacks[i] && prevCallbacks[i].unhighlightCallback) {\n              prevCallbacks[i].unhighlightCallback.apply(this, arguments);\n            }\n            continue;\n          }\n          gs[i].clearSelection();\n        }\n        block = false;\n      }\n    }, true /* no need to redraw */);\n  }\n}\n\nDygraph.synchronize = synchronize;\n\n})();\n"
  },
  {
    "path": "tools/crunchstat-summary/fpm-info.sh",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ncase \"$TARGET\" in\n    debian12 | ubuntu2204 )\n        fpm_depends+=(libcurl4)\n        ;;\n\n    debian* | ubuntu* )\n        fpm_depends+=(libcurl4t64)\n        ;;\nesac\n"
  },
  {
    "path": "tools/crunchstat-summary/pyproject.toml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[build-system]\nrequires = [\"setuptools ~= 80.9\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\ndynamic = [\"dependencies\", \"version\"]\nname = \"crunchstat_summary\"\ndescription = \"Report resource usage from Arvados Crunch logs\"\nauthors = [\n  {name = \"Arvados\", email = \"info@arvados.org\"},\n]\nclassifiers = [\n  \"Development Status :: 5 - Production/Stable\",\n  \"Environment :: Console\",\n  \"Intended Audience :: Science/Research\",\n  \"Operating System :: POSIX\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\",\n  \"Programming Language :: Python :: 3.12\",\n  \"Programming Language :: Python :: 3.13\",\n  \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n]\nlicense = \"AGPL-3.0-only\"\nlicense-files = [\n  \"agpl-3.0.txt\",\n]\nreadme = \"README.rst\"\nrequires-python = \"~= 3.10\"\n\n[project.urls]\nHomepage = \"https://arvados.org\"\nDocumentation = \"https://doc.arvados.org\"\nRepository = \"https://github.com/arvados/arvados\"\nIssues = \"https://github.com/arvados/arvados/issues\"\nChangelog = \"https://arvados.org/releases/\"\n\n[tool.setuptools]\nscript-files = [\n  \"bin/crunchstat-summary\",\n]\n\n[tool.setuptools.data-files]\n\"share/doc/crunchstat_summary\" = [\n  \"agpl-3.0.txt\",\n  \"README.rst\",\n]\n\n[tool.setuptools.packages.find]\nexclude = [\"tests*\"]\n"
  },
  {
    "path": "tools/crunchstat-summary/setup.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport setuptools\nimport runpy\n\nfrom pathlib import Path\n\narvados_version = runpy.run_path(Path(__file__).with_name('arvados_version.py'))\narv_mod = arvados_version['ARVADOS_PYTHON_MODULES']['crunchstat_summary']\nversion = arv_mod.get_version()\nsetuptools.setup(\n    cmdclass=arvados_version['CMDCLASS'],\n    install_requires=[\n        *arv_mod.iter_dependencies(version=version),\n    ],\n    version=version,\n)\n"
  },
  {
    "path": "tools/crunchstat-summary/tests/__init__.py",
    "content": ""
  },
  {
    "path": "tools/crunchstat-summary/tests/container_9tee4-dz642-lymtndkpy39eibk.txt.gz.report",
    "content": "category\tmetric\ttask_max\ttask_max_rate\tjob_total\nblkio:0:0\tread\t0\t0\t0\nblkio:0:0\twrite\t0\t0\t0\ncpu\tcpus\t20.00\t-\t-\ncpu\tsys\t0.39\t0.04\t0.39\ncpu\tuser\t2.06\t0.20\t2.06\ncpu\tuser+sys\t2.45\t0.24\t2.45\nfuseops\tread\t0\t0\t0\nfuseops\twrite\t0\t0\t0\nkeepcache\thit\t0\t0\t0\nkeepcache\tmiss\t0\t0\t0\nkeepcalls\tget\t0\t0\t0\nkeepcalls\tput\t0\t0\t0\nmem\tcache\t172032\t-\t-\nmem\tpgmajfault\t0\t-\t0\nmem\trss\t69525504\t-\t-\nmem\tswap\t0\t-\t-\nnet:eth0\trx\t859480\t1478.97\t859480\nnet:eth0\ttx\t55888\t395.71\t55888\nnet:eth0\ttx+rx\t915368\t1874.69\t915368\nnet:keep0\trx\t0\t0\t0\nnet:keep0\ttx\t0\t0\t0\nnet:keep0\ttx+rx\t0\t0\t0\nstatfs\tavailable\t397744787456\t-\t397744787456\nstatfs\ttotal\t402611240960\t-\t402611240960\nstatfs\tused\t4870303744\t52426.18\t4866453504\ntime\telapsed\t20\t-\t20\n# Elapsed time: 20s\n# Max CPU usage in a single interval: 23.70%\n# Overall CPU usage: 12.25%\n# Requested CPU cores: 1\n# Max memory used: 66.30MB\n# Requested RAM: 2500.00MB\n# Max network traffic: 0.00GB\n# Max network speed in a single interval: 0.00MB/s\n# Keep cache miss rate: 0.00%\n# Keep cache utilization: 0.00%\n# Temp disk utilization: 1.21%\n"
  },
  {
    "path": "tools/crunchstat-summary/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt.gz.report",
    "content": "category\tmetric\ttask_max\ttask_max_rate\tjob_total\nblkio:0:0\tread\t0\t0\t0\nblkio:0:0\twrite\t0\t0\t0\nfuseops\tread\t0\t0\t0\nfuseops\twrite\t0\t0\t0\nkeepcache\thit\t0\t0\t0\nkeepcache\tmiss\t0\t0\t0\nkeepcalls\tget\t0\t0\t0\nkeepcalls\tput\t0\t0\t0\nnet:keep0\trx\t0\t0\t0\nnet:keep0\ttx\t0\t0\t0\nnet:keep0\ttx+rx\t0\t0\t0\ntime\telapsed\t10\t-\t10\n# Elapsed time: 10s\n# Max CPU usage in a single interval: 0%\n# Overall CPU usage: 0.00%\n# Max memory used: 0.00MB\n# Max network traffic: 0.00GB\n# Max network speed in a single interval: 0.00MB/s\n# Keep cache miss rate: 0.00%\n# Keep cache utilization: 0.00%\n# Temp disk utilization: 0.00%\n"
  },
  {
    "path": "tools/crunchstat-summary/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt.gz.report",
    "content": "category\tmetric\ttask_max\ttask_max_rate\tjob_total\ncpu\tcpus\t20.00\t-\t-\ncpu\tsys\t0.39\t0.04\t0.39\ncpu\tuser\t2.06\t0.20\t2.06\ncpu\tuser+sys\t2.45\t0.24\t2.45\nmem\tcache\t172032\t-\t-\nmem\tpgmajfault\t0\t-\t0\nmem\trss\t69525504\t-\t-\nmem\tswap\t0\t-\t-\nnet:eth0\trx\t859480\t1478.97\t859480\nnet:eth0\ttx\t55888\t395.71\t55888\nnet:eth0\ttx+rx\t915368\t1874.69\t915368\nstatfs\tavailable\t397744787456\t-\t397744787456\nstatfs\ttotal\t402611240960\t-\t402611240960\nstatfs\tused\t4870303744\t52426.18\t4866453504\ntime\telapsed\t20\t-\t20\n# Elapsed time: 20s\n# Max CPU usage in a single interval: 23.70%\n# Overall CPU usage: 12.25%\n# Max memory used: 66.30MB\n# Max network traffic: 0.00GB\n# Max network speed in a single interval: 0.00MB/s\n# Keep cache miss rate: 0.00%\n# Keep cache utilization: 0.00%\n# Temp disk utilization: 1.21%\n"
  },
  {
    "path": "tools/crunchstat-summary/tests/container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y.txt.gz.report",
    "content": "category\tmetric\ttask_max\ttask_max_rate\tjob_total\nblkio:0:0\tread\t0\t0\t0\nblkio:0:0\twrite\t0\t0\t0\ncpu\tcpus\t20.00\t-\t-\ncpu\tsys\t0.39\t0.04\t0.39\ncpu\tuser\t2.06\t0.20\t2.06\ncpu\tuser+sys\t2.45\t0.24\t2.45\nfuseops\tread\t0\t0\t0\nfuseops\twrite\t0\t0\t0\nkeepcache\thit\t0\t0\t0\nkeepcache\tmiss\t0\t0\t0\nkeepcalls\tget\t0\t0\t0\nkeepcalls\tput\t0\t0\t0\nmem\tcache\t172032\t-\t-\nmem\tpgmajfault\t0\t-\t0\nmem\trss\t69525504\t-\t-\nmem\tswap\t0\t-\t-\nnet:eth0\trx\t859480\t1478.97\t859480\nnet:eth0\ttx\t55888\t395.71\t55888\nnet:eth0\ttx+rx\t915368\t1874.69\t915368\nnet:keep0\trx\t0\t0\t0\nnet:keep0\ttx\t0\t0\t0\nnet:keep0\ttx+rx\t0\t0\t0\nstatfs\tavailable\t397744787456\t-\t397744787456\nstatfs\ttotal\t402611240960\t-\t402611240960\nstatfs\tused\t4870303744\t52426.18\t4866453504\ntime\telapsed\t20\t-\t20\n# Elapsed time: 20s\n# Max CPU usage in a single interval: 23.70%\n# Overall CPU usage: 12.25%\n# Requested CPU cores: 1\n# Max memory used: 66.30MB\n# Requested RAM: 2500.00MB\n# Max network traffic: 0.00GB\n# Max network speed in a single interval: 0.00MB/s\n# Keep cache miss rate: 0.00%\n# Keep cache utilization: 0.00%\n# Temp disk utilization: 1.21%\n"
  },
  {
    "path": "tools/crunchstat-summary/tests/crunchstat_error_messages.txt",
    "content": "2016-01-07_00:15:33 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr\n2016-01-07_00:15:33 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr old error message:\n2016-01-07_00:15:33 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr crunchstat: read /proc/3305/net/dev: open /proc/3305/net/dev: no such file or directory\n2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr\n2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr new error message:\n2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr crunchstat: error reading /proc/3305/net/dev: open /proc/3305/net/dev: no such file or directory\n2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr\n2016-01-07_00:15:34 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr cancelled job:\n2016-01-07_00:15:59 tb05z-8i9sb-khsk5rmf4xjdcbl 20819 0 stderr crunchstat: caught signal: interrupt\n"
  },
  {
    "path": "tools/crunchstat-summary/tests/test_examples.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados\nimport collections\nimport crunchstat_summary.command\nimport difflib\nimport glob\nimport gzip\nimport io\nimport logging\nimport os\nimport sys\nimport unittest\n\nfrom unittest import mock\n\nfrom crunchstat_summary.command import UTF8Decode\nfrom crunchstat_summary import logger, reader\n\nTESTS_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass TestCase(unittest.TestCase):\n    def setUp(self):\n        self.logbuf = io.StringIO()\n        self.loghandler = logging.StreamHandler(stream=self.logbuf)\n        logger.addHandler(self.loghandler)\n        logger.setLevel(logging.WARNING)\n\n    def tearDown(self):\n        logger.removeHandler(self.loghandler)\n\n    def diff_known_report(self, logfile, cmd):\n        expectfile = logfile+'.report'\n        with io.open(expectfile, encoding='utf-8') as f:\n            expect = f.readlines()\n        self.diff_report(cmd, expect, expectfile=expectfile)\n\n    def diff_report(self, cmd, expect, expectfile='(expected)'):\n        got = [x+\"\\n\" for x in cmd.report().strip(\"\\n\").split(\"\\n\")]\n        self.assertEqual(got, expect, \"\\n\"+\"\".join(difflib.context_diff(\n            expect, got, fromfile=expectfile, tofile=\"(generated)\")))\n\n\nclass SummarizeFile(TestCase):\n    def test_example_files(self):\n        for fnm in glob.glob(os.path.join(TESTS_DIR, '*.txt.gz')):\n            logfile = os.path.join(TESTS_DIR, fnm)\n            args = crunchstat_summary.command.ArgumentParser().parse_args(\n                ['--log-file', logfile])\n            cmd = crunchstat_summary.command.Command(args)\n            cmd.run()\n            self.diff_known_report(logfile, cmd)\n\n\nclass HTMLFromFile(TestCase):\n    def test_example_files(self):\n        # Note we don't test the output content at all yet; we're\n        # mainly just verifying the --format=html option isn't ignored\n        # and the HTML code path doesn't crash.\n        for fnm in glob.glob(os.path.join(TESTS_DIR, '*.txt.gz')):\n            logfile = os.path.join(TESTS_DIR, fnm)\n            args = crunchstat_summary.command.ArgumentParser().parse_args(\n                ['--format=html', '--log-file', logfile])\n            cmd = crunchstat_summary.command.Command(args)\n            cmd.run()\n            self.assertRegex(cmd.report(), r'(?is)<html>.*</html>\\s*$')\n\n\nclass SummarizeEdgeCases(TestCase):\n    def test_error_messages(self):\n        logfile = io.open(os.path.join(TESTS_DIR, 'crunchstat_error_messages.txt'), encoding='utf-8')\n        s = crunchstat_summary.summarizer.Summarizer(reader.StubReader(logfile))\n        s.run()\n        self.assertRegex(self.logbuf.getvalue(), r'CPU stats are missing -- possible cluster configuration issue')\n        self.assertRegex(self.logbuf.getvalue(), r'memory stats are missing -- possible cluster configuration issue')\n        self.assertRegex(self.logbuf.getvalue(), r'network I/O stats are missing -- possible cluster configuration issue')\n        self.assertRegex(self.logbuf.getvalue(), r'storage space stats are missing -- possible cluster configuration issue')\n\nclass SummarizeContainerCommon(TestCase):\n    fake_container = {\n        'uuid': '9tee4-dz642-lymtndkpy39eibk',\n        'created_at': '2017-08-18T14:27:25.371388141',\n        'log': '9tee4-4zz18-ihyzym9tcwjwg4r',\n    }\n    fake_request = {\n        'uuid': '9tee4-xvhdp-kk0ja1cl8b2kr1y',\n        'name': 'container',\n        'created_at': '2017-08-18T14:27:25.242339223Z',\n        'container_uuid': fake_container['uuid'],\n        'runtime_constraints': {\n            'vcpus': 1,\n            'ram': 2621440000\n            },\n        'log_uuid' : '9tee4-4zz18-m2swj50nk0r8b6y'\n        }\n\n    logfile = os.path.join(\n        TESTS_DIR, 'container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-crunchstat.txt.gz')\n    arvmountlog = os.path.join(\n        TESTS_DIR, 'container_request_9tee4-xvhdp-kk0ja1cl8b2kr1y-arv-mount.txt.gz')\n\n    @mock.patch('arvados.collection.CollectionReader')\n    @mock.patch('arvados.api')\n    def check_common(self, mock_api, mock_cr):\n        items = [ {'items':[self.fake_request]}] + [{'items':[]}] * 100\n        mock_api().container_requests().list().execute.side_effect = items # parent request\n        mock_api().container_requests().get().execute.return_value = self.fake_request\n        mock_api().containers().get().execute.return_value = self.fake_container\n        mock_cr().__iter__.return_value = [\n            'crunch-run.txt', 'stderr.txt', 'node-info.txt',\n            'container.json', 'crunchstat.txt', 'arv-mount.txt']\n        def _open(n, mode):\n            if n == \"crunchstat.txt\":\n                return UTF8Decode(gzip.open(self.logfile))\n            elif n == \"arv-mount.txt\":\n                return UTF8Decode(gzip.open(self.arvmountlog))\n            elif n == \"node.json\":\n                return io.StringIO(\"{}\")\n        mock_cr().open.side_effect = _open\n        args = crunchstat_summary.command.ArgumentParser().parse_args(\n            self.arg_strings)\n        cmd = crunchstat_summary.command.Command(args)\n        cmd.run()\n        self.diff_known_report(self.reportfile, cmd)\n\n\n\nclass SummarizeContainer(SummarizeContainerCommon):\n    uuid = '9tee4-dz642-lymtndkpy39eibk'\n    reportfile = os.path.join(TESTS_DIR, 'container_%s.txt.gz' % uuid)\n    arg_strings = ['--container', uuid, '-v', '-v']\n\n    def test_container(self):\n        self.check_common()\n\n\nclass SummarizeContainerRequest(SummarizeContainerCommon):\n    uuid = '9tee4-xvhdp-kk0ja1cl8b2kr1y'\n    reportfile = os.path.join(TESTS_DIR, 'container_request_%s.txt.gz' % uuid)\n    arg_strings = ['--container-request', uuid, '-v', '-v']\n\n    def test_container_request(self):\n        self.check_common()\n        self.assertNotRegex(self.logbuf.getvalue(), r'stats are missing')\n        self.assertNotRegex(self.logbuf.getvalue(), r'possible cluster configuration issue')\n"
  },
  {
    "path": "tools/jenkins/submit-ci-dev.sh",
    "content": "#!/bin/sh\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nhead=$(git log --first-parent --max-count=1 --format=%H)\ncurl -X POST https://ci.arvados.org/job/developer-run-tests/build \\\n  --user $(cat ~/.jenkins.ci.arvados.org) \\\n  --data-urlencode json='{\"parameter\": [{\"name\":\"git_hash\", \"value\":\"'$head'\"}]}'\n"
  },
  {
    "path": "tools/keep-xref/keep-xref.py",
    "content": "#!/usr/bin/env python3\n#\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n#\n\nimport argparse\nimport arvados\nimport arvados.util\nimport csv\nimport sys\nimport logging\n\nlglvl = logging.INFO+1\nlogging.basicConfig(level=lglvl, format='%(message)s')\n\n\"\"\"\n Given a list of collections missing blocks (as produced by\nkeep-balance), produce a report listing affected collections and\ncontainer requests.\n\"\"\"\n\ndef rerun_request(arv, container_requests_to_rerun, ct):\n    requests = arvados.util.keyset_list_all(\n        arv.container_requests().list,\n        filters=[[\"container_uuid\", \"=\", ct[\"uuid\"]]],\n        order='uuid')\n    for cr in requests:\n        if cr[\"requesting_container_uuid\"]:\n            rerun_request(arv, container_requests_to_rerun, arv.containers().get(uuid=cr[\"requesting_container_uuid\"]).execute())\n        else:\n            container_requests_to_rerun[cr[\"uuid\"]] = cr\n\ndef get_owner(arv, owners, record):\n    uuid = record[\"owner_uuid\"]\n    if uuid not in owners:\n        if uuid[6:11] == \"tpzed\":\n            owners[uuid] = (arv.users().get(uuid=uuid).execute()[\"full_name\"], uuid)\n        else:\n            grp = arv.groups().get(uuid=uuid).execute()\n            _, ou = get_owner(arv, owners, grp)\n            owners[uuid] = (grp[\"name\"], ou)\n    return owners[uuid]\n\ndef main():\n    parser = argparse.ArgumentParser(description='Re-run containers associated with missing blocks')\n    parser.add_argument('inp')\n    args = parser.parse_args()\n\n    arv = arvados.api('v1')\n\n    busted_collections = set()\n\n    logging.log(lglvl, \"Reading %s\", args.inp)\n\n    # Get the list of bad collection PDHs\n    blocksfile = open(args.inp, \"rt\")\n    for line in blocksfile:\n        # Ignore the first item, that's the block id\n        collections = line.rstrip().split(\" \")[1:]\n        for c in collections:\n            busted_collections.add(c)\n\n    out = csv.writer(sys.stdout)\n\n    out.writerow((\"collection uuid\", \"container request uuid\", \"record name\", \"modified at\", \"owner uuid\", \"owner name\", \"root owner uuid\", \"root owner name\", \"notes\"))\n\n    logging.log(lglvl, \"Finding collections\")\n\n    owners = {}\n    collections_to_delete = {}\n    container_requests_to_rerun = {}\n    # Get containers that produced these collections\n    i = 0\n    for b in busted_collections:\n        if (i % 100) == 0:\n            logging.log(lglvl, \"%d/%d\", i, len(busted_collections))\n        i += 1\n        collections_to_delete = arvados.util.keyset_list_all(arv.collections().list, filters=[[\"portable_data_hash\", \"=\", b]], order='uuid')\n        for d in collections_to_delete:\n            t = \"\"\n            if d[\"properties\"].get(\"type\") not in (\"output\", \"log\"):\n                t = \"\\\"type\\\" was '%s', expected one of 'output' or 'log'\" % d[\"properties\"].get(\"type\")\n            ou = get_owner(arv, owners, d)\n            out.writerow((d[\"uuid\"], \"\", d[\"name\"], d[\"modified_at\"], d[\"owner_uuid\"], ou[0], ou[1], owners[ou[1]][0], t))\n\n        maybe_containers_to_rerun = arvados.util.keyset_list_all(arv.containers().list, filters=[[\"output\", \"=\", b]], order='uuid')\n        for ct in maybe_containers_to_rerun:\n            rerun_request(arv, container_requests_to_rerun, ct)\n\n    logging.log(lglvl, \"%d/%d\", i, len(busted_collections))\n    logging.log(lglvl, \"Finding container requests\")\n\n    i = 0\n    for _, cr in container_requests_to_rerun.items():\n        if (i % 100) == 0:\n            logging.log(lglvl, \"%d/%d\", i, len(container_requests_to_rerun))\n        i += 1\n        ou = get_owner(arv, owners, cr)\n        out.writerow((\"\", cr[\"uuid\"], cr[\"name\"], cr[\"modified_at\"], cr[\"owner_uuid\"], ou[0], ou[1], owners[ou[1]][0], \"\"))\n\n    logging.log(lglvl, \"%d/%d\", i, len(container_requests_to_rerun))\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "tools/python-metapackage/LICENSE-2.0.txt",
    "content": "\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "tools/python-metapackage/README.md",
    "content": "# Arvados Client Tools\n\n<!-- Copyright (C) The Arvados Authors. All rights reserved.\n\nSPDX-License-Identifier: Apache-2.0 -->\n\n## Overview\n\nThis is a metapackage that lets you install all of the Python client tools for [Arvados][] in one simple command. It's intended for users setting up an interactive environment. It provides:\n\n* the [Arvados Python SDK](https://doc.arvados.org/sdk/python/api-client.html)\n* command line tools to work with collections and projects: [`arv-ls`, `arv-get`](https://doc.arvados.org/user/tutorials/tutorial-keep-get.html#download-using-arv), [`arv-put`](https://doc.arvados.org/user/tutorials/tutorial-keep.html#upload-using-command), [`arv-copy`](https://doc.arvados.org/user/topics/arv-copy.html), and [`arv-mount`](https://doc.arvados.org/user/tutorials/tutorial-keep-mount-gnu-linux.html)\n* Common Workflow Language (CWL) runners [`arvados-cwl-runner`](https://doc.arvados.org/user/cwl/cwl-runner.html) and the reference implementation [`cwltool`](https://pypi.org/project/cwltool/) that it's based on\n* reporting tools for [workflow performance](https://doc.arvados.org/user/cwl/crunchstat-summary.html), [cluster activity](https://doc.arvados.org/user/cwl/costanalyzer.html), and [user activity](https://doc.arvados.org/admin/user-activity.html)\n\nIf you are building your own Arvados client software, it is better to require the specific package(s) you need, such as [arvados-python-client](https://pypi.org/project/arvados-python-client/), instead of this metapackage.\n\n[Arvados]: https://arvados.org/\n\n## Installation\n\nWe recommend you install with `pipx`. First [install `pipx`][install-pipx]—it's available as the `pipx` package in most Linux distributions. Then run:\n\n      pipx install --include-deps arvados-tools\n\n[install-pipx]: https://pipx.pypa.io/latest/how-to/install-pipx/\n\nAlternatively, if you're comfortable setting up your own virtual environments, you can install the package in one too. For example:\n\n      python3 -m venv MYVENV\n      MYVENV/bin/pip install arvados-tools\n\nNow all the Arvados tools will be available after you activate `MYVENV` with `source MYVENV/bin/activate`.\n\n## Configuration\n\nThis client software needs two pieces of information to connect to Arvados: the hostname of the API server and an API authorization token. [The Arvados user documentation](http://doc.arvados.org/user/reference/api-tokens.html) describes how to find this information in the Arvados Workbench and install it on your system.\n\n## Licenses\n\nThe SDK and most command line tools installed are published under the [Apache License 2.0](https://spdx.org/licenses/Apache-2.0.html). `arv-mount` and the reporting tools are published under the [GNU Affero General Public License 3.0](https://spdx.org/licenses/AGPL-3.0-only.html). Refer to the individual component packages for details.\n"
  },
  {
    "path": "tools/python-metapackage/agpl-3.0.txt",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "tools/python-metapackage/arvados_version.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport dataclasses\nimport os\nimport re\nimport runpy\nimport subprocess\nimport typing as t\n\nfrom pathlib import Path, PurePath, PurePosixPath\n\nimport setuptools\nimport setuptools.command.build\n\nSETUP_DIR = Path(__file__).absolute().parent\nVERSION_SCRIPT_PATH = PurePath('build', 'version-at-commit.sh')\n# Built by ArvadosPythonPackage.register\nARVADOS_PYTHON_MODULES: dict[str, 'ArvadosPythonPackage'] = {}\n\n### Metadata generation\n\n@dataclasses.dataclass\nclass ArvadosPythonPackage:\n    package_name: str\n    module_name: str\n    src_path: PurePath\n    dependencies: t.Sequence['ArvadosPythonPackage']\n\n    _VERSION_SUBS = {\n        'development-': '',\n        '~dev': '.dev',\n        '~rc': 'rc',\n    }\n\n    @classmethod\n    def register(\n            cls,\n            package_name: str,\n            module_name: str,\n            src_path: PurePath | str,\n            *dependencies: str,\n    ) -> 'ArvadosPythonPackage':\n        if not isinstance(src_path, PurePath):\n            src_path = PurePosixPath(src_path)\n        deps = [ARVADOS_PYTHON_MODULES[key] for key in dependencies]\n        this_pkg = cls(package_name, module_name, src_path, deps)\n        ARVADOS_PYTHON_MODULES[package_name] = this_pkg\n        return this_pkg\n\n    def version_file_path(self):\n        return PurePath(self.module_name, '_version.py')\n\n    def _workspace_path(self, workdir):\n        try:\n            workspace = Path(os.environ['WORKSPACE'])\n            # This will raise ValueError if they're not related,\n            # in which case we don't want to use this $WORKSPACE.\n            workdir.relative_to(workspace)\n        except KeyError:\n            # $WORKSPACE isn't set. Fall back to the Git worktree toplevel.\n            try:\n                git_proc = subprocess.run(\n                    ['git', 'rev-parse', '--show-toplevel'],\n                    capture_output=True,\n                    check=True,\n                    cwd=workdir,\n                    text=True,\n                )\n                workspace = Path(git_proc.stdout.removesuffix('\\n'))\n            except (subprocess.CalledProcessError, FileNotFoundError, ValueError):\n                return None\n        except ValueError:\n            return None\n        if (workspace / VERSION_SCRIPT_PATH).exists():\n            return workspace\n        else:\n            return None\n\n    def _git_version(self, workdir):\n        workspace = self._workspace_path(workdir)\n        if workspace is None:\n            return None\n        git_log_cmd = [\n            'git', 'log', '-n1', '--format=%H', '--',\n            str(VERSION_SCRIPT_PATH), str(self.src_path),\n        ]\n        git_log_cmd.extend(str(dep.src_path) for dep in self.dependencies)\n        git_log_proc = subprocess.run(\n            git_log_cmd,\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        version_proc = subprocess.run(\n            [str(VERSION_SCRIPT_PATH), git_log_proc.stdout.rstrip('\\n')],\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        return version_proc.stdout.rstrip('\\n')\n\n    def _sdist_version(self, workdir):\n        try:\n            pkg_info = (workdir / 'PKG-INFO').open()\n        except FileNotFoundError:\n            return None\n        with pkg_info:\n            for line in pkg_info:\n                key, _, val = line.partition(': ')\n                if key == 'Version':\n                    return val.rstrip('\\n')\n        raise Exception(\"found PKG-INFO file but not Version metadata in it\")\n\n    def get_version(self, workdir=SETUP_DIR):\n        version = (\n            # If we're building out of a distribution, we should pass that\n            # version through unchanged.\n            self._sdist_version(workdir)\n            # Otherwise follow the usual Arvados versioning rules.\n            or os.environ.get('ARVADOS_BUILDING_VERSION')\n            or self._git_version(workdir)\n        )\n        if not version:\n            raise Exception(f\"no version information available for {self.package_name}\")\n        else:\n            return re.sub(\n                r'(^development-|~dev|~rc)',\n                lambda match: self._VERSION_SUBS[match.group(0)],\n                version,\n            )\n\n    def get_dependencies_version(self, workdir=SETUP_DIR, version=None):\n        if version is None:\n            version = self.get_version(workdir)\n        # A packaged development release should be installed with other\n        # development packages built from the same source, but those\n        # dependencies may have earlier \"dev\" versions (read: less recent\n        # Git commit timestamps). This compatible version dependency\n        # expresses that as closely as possible. Allowing versions\n        # compatible with .dev0 allows any development release.\n        # Regular expression borrowed partially from\n        # <https://packaging.python.org/en/latest/specifications/version-specifiers/#version-specifiers-regex>\n        dep_ver, match_count = re.subn(r'\\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)\n        return ('~=' if match_count else '==', dep_ver)\n\n    def iter_dependencies(self, workdir=SETUP_DIR, version=None, extras=None):\n        if extras is None:\n            extras = {}\n        dep_op, dep_ver = self.get_dependencies_version(workdir, version)\n        for dep in self.dependencies:\n            try:\n                dep_extras = f'[{\",\".join(extras[dep.package_name])}]'\n            except KeyError:\n                dep_extras = ''\n            yield f'{dep.package_name}{dep_extras} {dep_op} {dep_ver}'\n\n\n### Package database\n\nArvadosPythonPackage.register(\n    'arvados-python-client',\n    'arvados',\n    'sdk/python',\n),\nArvadosPythonPackage.register(\n    'crunchstat_summary',\n    'crunchstat_summary',\n    'tools/crunchstat-summary',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cluster-activity',\n    'arvados_cluster_activity',\n    'tools/cluster-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cwl-runner',\n    'arvados_cwl',\n    'sdk/cwl',\n    'arvados-python-client',\n    'crunchstat_summary',\n)\nArvadosPythonPackage.register(\n    'arvados_fuse',\n    'arvados_fuse',\n    'services/fuse',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-user-activity',\n    'arvados_user_activity',\n    'tools/user-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-tools',\n    'NO SRCDIR',\n    'tools/python-metapackage',\n    *ARVADOS_PYTHON_MODULES,\n)\nArvadosPythonPackage.register(\n    'arvados-docker-cleaner',\n    'arvados_docker',\n    'services/dockercleaner',\n)\n\n### setuptools integration\n\nclass BuildArvadosVersion(setuptools.Command):\n    \"\"\"Write _version.py for an Arvados module\"\"\"\n    def initialize_options(self):\n        self.build_lib = None\n\n    def finalize_options(self):\n        self.set_undefined_options(\"build_py\", (\"build_lib\", \"build_lib\"))\n        arv_mod = ARVADOS_PYTHON_MODULES[self.distribution.get_name()]\n        self.out_path = Path(self.build_lib, arv_mod.version_file_path())\n\n    def run(self):\n        with self.out_path.open('w') as out_file:\n            print(f'__version__ = {self.distribution.get_version()!r}', file=out_file)\n\n    def get_outputs(self):\n        return [str(self.out_path)]\n\n    def get_source_files(self):\n        return []\n\n    def get_output_mapping(self):\n        return {}\n\n\nclass ArvadosBuildCommand(setuptools.command.build.build):\n    sub_commands = [\n        *setuptools.command.build.build.sub_commands,\n        ('build_arvados_version', None),\n    ]\n\n\nCMDCLASS = {\n    'build': ArvadosBuildCommand,\n    'build_arvados_version': BuildArvadosVersion,\n}\n"
  },
  {
    "path": "tools/python-metapackage/pyproject.toml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n[build-system]\nrequires = [\"setuptools ~= 80.9\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\ndynamic = [\"dependencies\", \"version\"]\nname = \"arvados-tools\"\ndescription = \"A single package to install all Arvados client tools\"\nauthors = [\n  {name = \"Arvados\", email = \"info@arvados.org\"},\n]\nclassifiers = [\n  \"Development Status :: 5 - Production/Stable\",\n  \"Environment :: Console\",\n  \"Intended Audience :: Science/Research\",\n  \"Operating System :: POSIX\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\",\n  \"Programming Language :: Python :: 3.12\",\n  \"Programming Language :: Python :: 3.13\",\n  \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n]\nlicense-files = [\n  \"agpl-3.0.txt\",\n  \"LICENSE-2.0.txt\",\n]\nreadme = \"README.md\"\nrequires-python = \"~= 3.10\"\n\n[project.urls]\nHomepage = \"https://arvados.org\"\nDocumentation = \"https://doc.arvados.org\"\nRepository = \"https://git.arvados.org/arvados.git\"\nIssues = \"https://github.com/arvados/arvados/issues\"\nChangelog = \"https://arvados.org/releases/\"\n\n[tool.setuptools.data-files]\n\"share/doc/arvados_tools\" = [\n  \"agpl-3.0.txt\",\n  \"LICENSE-2.0.txt\",\n  \"README.md\",\n]\n"
  },
  {
    "path": "tools/python-metapackage/setup.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport setuptools\nimport runpy\n\nfrom pathlib import Path\n\narvados_version = runpy.run_path(Path(__file__).with_name('arvados_version.py'))\narv_mod = arvados_version['ARVADOS_PYTHON_MODULES']['arvados-tools']\nversion = arv_mod.get_version()\nsetuptools.setup(\n    install_requires=[\n        *arv_mod.iter_dependencies(version=version, extras={\n            'arvados-cluster-activity': ['prometheus'],\n        }),\n    ],\n    version=version,\n)\n"
  },
  {
    "path": "tools/salt-install/.gitignore",
    "content": "local_config_dir\nlocal.params\n*pem\n"
  },
  {
    "path": "tools/salt-install/README.md",
    "content": "[comment]: # (Copyright © The Arvados Authors. All rights reserved.)\n[comment]: # ()\n[comment]: # (SPDX-License-Identifier: CC-BY-SA-3.0)\n\n# Arvados install with Saltstack\n\n## About\n\nThis directory holds a small script to help you get Arvados up and running, using the\n[Saltstack arvados-formula](https://github.com/arvados/arvados-formula)\nin master-less mode.\n\nThere are a few preset examples that you can use:\n\n* `single_host`: Install all the Arvados components in a single host. Suitable for testing\n  or demo-ing, but not recommended for production use.\n* `multi_host/aws`: Let's you install different Arvados components in different hosts on AWS.\n  \nThe fastest way to get it running is to copy the `local.params.example` file to `local.params`,\nedit and modify the file to suit your needs, copy this file along with the `provision.sh` script\ninto the host where you want to install Arvados and run the `provision.sh` script as root.\n\nThere's an example `Vagrantfile` also, to install Arvados in a vagrant box if you want\nto try it locally.\n\nFor more information, please read https://doc.arvados.org/main/install/salt-single-host.html\n"
  },
  {
    "path": "tools/salt-install/Vagrantfile",
    "content": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Vagrantfile API/syntax version. Don\"t touch unless you know what you\"re doing!\nVAGRANTFILE_API_VERSION = \"2\".freeze\n\nVagrant.configure(VAGRANTFILE_API_VERSION) do |config|\n  config.ssh.insert_key = false\n  config.ssh.forward_x11 = true\n\n   # A single_host multiple_hostnames example\n   config.vm.define \"arvados-sh-mn\" do |arv|\n     arv.vm.box = \"bento/debian-12\"\n     arv.vm.hostname = \"harpo\"\n     # CPU/RAM\n     config.vm.provider :virtualbox do |v|\n       v.memory = 2048\n       v.cpus = 2\n     end\n\n     # Networking\n     # WEBUI PORT\n     arv.vm.network \"forwarded_port\", guest: 8443, host: 8443\n     # KEEPPROXY\n     arv.vm.network \"forwarded_port\", guest: 25101, host: 25101\n     # KEEPWEB\n     arv.vm.network \"forwarded_port\", guest: 9002, host: 9002\n     # WEBSOCKET\n     arv.vm.network \"forwarded_port\", guest: 8002, host: 8002\n     arv.vm.provision \"shell\",\n                      inline: \"cp -vr /vagrant/config_examples/single_host/multiple_hostnames /home/vagrant/local_config_dir;\n                               cp -vr /vagrant/tests /home/vagrant/tests;\n                               sed 's#cluster_fixme_or_this_wont_work#harpo#g;\n                                    s#domain_fixme_or_this_wont_work#local#g;\n                                    s#CONTROLLER_EXT_SSL_PORT=443#CONTROLLER_EXT_SSL_PORT=8443#g;\n                                    s#RELEASE=\\\"production\\\"#RELEASE=\\\"development\\\"#g;\n                                    s/# VERSION=.*$/VERSION=\\\"latest\\\"/g;' \\\n                                    /vagrant/local.params.example.single_host_multiple_hostnames > /tmp/local.params.single_host_multiple_hostnames\"\n                                    # s/#\\ BRANCH=\\\"main\\\"/\\ BRANCH=\\\"main\\\"/g;' \\\n\n     arv.vm.provision \"shell\",\n                      inline: \"cp -vr /tmp/local.params.single_host_multiple_hostnames /tmp/local.params.single_host_multiple_hostnames.falla;\n                               cp -vr /vagrant/centos7-local.params.single_host_single_hostname-f258b604f831bb3bd7fab506c670b975ae8e4118 /tmp/local.params.single_host_multiple_hostnames\"\n     arv.vm.provision \"shell\",\n                      path: \"provision.sh\",\n                      args: [\n                        # \"--debug\",\n                        \"--config /tmp/local.params.single_host_multiple_hostnames\",\n                        \"--development\",\n                        \"--test\",\n                        \"--vagrant\"\n                      ].join(\" \")\n   end\n\n   # A single_host single_hostname example\n   config.vm.define \"arvados-sh-sn\" do |arv|\n     #arv.vm.box = \"bento/centos-7\"\n     arv.vm.box = \"bento/ubuntu-24.04\"\n     arv.vm.hostname = \"zeppo\"\n     # CPU/RAM\n     config.vm.provider :virtualbox do |v|\n       v.memory = 2048\n       v.cpus = 2\n     end\n \n     # Networking\n     # WEBUI PORT\n     arv.vm.network \"forwarded_port\", guest: 443, host: 9443\n     # WORKBENCH1\n     arv.vm.network \"forwarded_port\", guest: 8805, host: 9444\n     # WORKBENCH2\n     arv.vm.network \"forwarded_port\", guest: 443, host: 9445\n     # KEEPPROXY\n     arv.vm.network \"forwarded_port\", guest: 8801, host: 35101\n     # KEEPWEB\n     arv.vm.network \"forwarded_port\", guest: 8802, host: 11002\n     # WEBSHELL\n     arv.vm.network \"forwarded_port\", guest: 8803, host: 14202\n     # WEBSOCKET\n     arv.vm.network \"forwarded_port\", guest: 8804, host: 18002\n     arv.vm.provision \"shell\",\n                      inline: \"cp -vr /vagrant/config_examples/single_host/single_hostname /home/vagrant/local_config_dir;\n                               cp -vr /vagrant/tests /home/vagrant/tests;\n                               sed 's#cluster_fixme_or_this_wont_work#cnts7#g;\n                                    s#domain_fixme_or_this_wont_work#local#g;\n                                    s#HOSTNAME_EXT=\\\"hostname_ext_fixme_or_this_wont_work\\\"#HOSTNAME_EXT=\\\"cnts7.local\\\"#g;\n                                    s#IP_INT=\\\"ip_int_fixme_or_this_wont_work\\\"#IP_INT=\\\"127.0.0.1\\\"#g;\n                                    s#RELEASE=\\\"production\\\"#RELEASE=\\\"development\\\"#g;\n                                    s/# BRANCH=\\\"main\\\"/BRANCH=\\\"main\\\"/g;\n                                    s/# VERSION=.*$/VERSION=\\\"latest\\\"/g' \\\n                                    /vagrant/local.params.example.single_host_single_hostname > /tmp/local.params.single_host_single_hostname\"\n\n     arv.vm.provision \"shell\",\n                      path: \"provision.sh\",\n                      args: [\n                        \"--debug\",\n                        \"--config /tmp/local.params.single_host_single_hostname\",\n                        \"--test\",\n                        \"--vagrant\"\n                      ].join(\" \")\n   end\nend\n"
  },
  {
    "path": "tools/salt-install/common.sh",
    "content": "##########################################################\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# This is generic logic used by provision.sh & installer.sh scripts\n\nif [[ -s ${CONFIG_FILE} && -s ${CONFIG_FILE}.secrets ]]; then\n  source ${CONFIG_FILE}.secrets\n  source ${CONFIG_FILE}\nelse\n  echo >&2 \"You don't seem to have a config file with initial values.\"\n  echo >&2 \"Please create a '${CONFIG_FILE}' & '${CONFIG_FILE}.secrets' files as described in\"\n  echo >&2 \"  * https://doc.arvados.org/install/salt-single-host.html#single_host, or\"\n  echo >&2 \"  * https://doc.arvados.org/install/salt-multi-host.html#multi_host_multi_hostnames\"\n  exit 1\nfi\n\nUSE_SSH_JUMPHOST=${USE_SSH_JUMPHOST:-}\nDISABLED_CONTROLLER=\"\"\nDATABASE_POSTGRESQL_DEFAULT_VERSION=15\n\n# Comma-separated list of nodes. This is used to dynamically adjust\n# salt pillars.\nNODELIST=\"\"\nfor node in \"${!NODES[@]}\"; do\n  if [ -z \"$NODELIST\" ]; then\n    NODELIST=\"$node\"\n  else\n    NODELIST=\"$NODELIST,$node\"\n  fi\ndone\n\n# The mapping of roles to nodes. This is used to dynamically adjust\n# salt pillars.\nfor node in \"${!NODES[@]}\"; do\n  roles=\"${NODES[$node]}\"\n\n  # Split the comma-separated roles into an array\n  IFS=',' read -ra roles_array <<< \"$roles\"\n\n  for role in \"${roles_array[@]}\"; do\n    if [ -n \"${ROLE2NODES[$role]:-}\" ]; then\n      ROLE2NODES[\"$role\"]=\"${ROLE2NODES[$role]},$node\"\n    else\n      ROLE2NODES[\"$role\"]=$node\n    fi\n  done\ndone\n\n# Sets TLS certificate expiration thresholds\nTLS_EXPIRATION_YELLOW=5184000 # > 2 months\nTLS_EXPIRATION_GREEN=15552000 # > 6 months\nif [[ \"${SSL_MODE}\" == \"lets-encrypt\" ]]; then\n  TLS_EXPIRATION_YELLOW=1900800 # > 22 days\n  TLS_EXPIRATION_GREEN=2505600 # > 29 days\nfi\n\n# Auto-detects load-balancing mode\nif [ -z \"${ROLE2NODES['balancer']:-}\" ]; then\n  ENABLE_BALANCER=\"no\"\nelse\n  ENABLE_BALANCER=\"yes\"\nfi\n\n# Auto-sets PG version if needed\nif [[ -n \"${ROLE2NODES['database']:-}\" || \"${NODELIST}\" == \"localhost\" ]]; then\n  DATABASE_POSTGRESQL_VERSION=\"${DATABASE_POSTGRESQL_VERSION:-${DATABASE_POSTGRESQL_DEFAULT_VERSION}}\"\nfi"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/README.md",
    "content": "Arvados installation using multiple instances\n=============================================\n\nThese files let you setup Arvados on multiple instances on AWS. This setup\nconsiders deploying the instances on an isolated VPC, created/managed with\n[the Arvados terraform code](https://github.com/arvados/arvados/tree/terraform/tools/terraform)\nin our repo.\n\nPlease check [the Arvados installation documentation](https://doc.arvados.org/install/salt-multi-host.html) for more details.\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/certs/README.md",
    "content": "SSL Certificates\n================\n\nAdd the certificates for your hosts in this directory.\n\nThe nodes requiring certificates are:\n\n* DOMAIN\n* collections.DOMAIN\n* controller.DOMAIN\n* \\*.collections.DOMAIN\n* \\*.containers.DOMAIN\n* grafana.DOMAIN\n* download.DOMAIN\n* keep.DOMAIN\n* prometheus.DOMAIN\n* shell.DOMAIN\n* workbench.DOMAIN\n* workbench2.DOMAIN\n* ws.DOMAIN\n\nThey can be individual certificates or a wildcard certificate for all of them.\n\nPlease remember to modify the *nginx\\_\\** salt pillars accordingly.\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/dashboards/arvados_logs.json",
    "content": "{\n    \"__inputs\": [\n      {\n        \"name\": \"DS_LOKI\",\n        \"label\": \"loki\",\n        \"description\": \"\",\n        \"type\": \"datasource\",\n        \"pluginId\": \"loki\",\n        \"pluginName\": \"Loki\"\n      }\n    ],\n    \"__elements\": {},\n    \"__requires\": [\n      {\n        \"type\": \"grafana\",\n        \"id\": \"grafana\",\n        \"name\": \"Grafana\",\n        \"version\": \"11.1.4\"\n      },\n      {\n        \"type\": \"panel\",\n        \"id\": \"logs\",\n        \"name\": \"Logs\",\n        \"version\": \"\"\n      },\n      {\n        \"type\": \"datasource\",\n        \"id\": \"loki\",\n        \"name\": \"Loki\",\n        \"version\": \"1.0.0\"\n      },\n      {\n        \"type\": \"panel\",\n        \"id\": \"timeseries\",\n        \"name\": \"Time series\",\n        \"version\": \"\"\n      }\n    ],\n    \"annotations\": {\n      \"list\": [\n        {\n          \"builtIn\": 1,\n          \"datasource\": {\n            \"type\": \"grafana\",\n            \"uid\": \"-- Grafana --\"\n          },\n          \"enable\": true,\n          \"hide\": true,\n          \"iconColor\": \"rgba(0, 211, 255, 1)\",\n          \"name\": \"Annotations & Alerts\",\n          \"type\": \"dashboard\"\n        }\n      ]\n    },\n    \"editable\": true,\n    \"fiscalYearStartMonth\": 0,\n    \"graphTooltip\": 0,\n    \"id\": null,\n    \"links\": [],\n    \"panels\": [\n      {\n        \"datasource\": {\n          \"type\": \"loki\",\n          \"uid\": \"${DS_LOKI}\"\n        },\n        \"fieldConfig\": {\n          \"defaults\": {\n            \"color\": {\n              \"mode\": \"palette-classic\"\n            },\n            \"custom\": {\n              \"axisBorderShow\": false,\n              \"axisCenteredZero\": false,\n              \"axisColorMode\": \"text\",\n              \"axisLabel\": \"\",\n              \"axisPlacement\": \"auto\",\n              \"barAlignment\": 0,\n              \"drawStyle\": \"bars\",\n              \"fillOpacity\": 0,\n              \"gradientMode\": \"none\",\n              \"hideFrom\": {\n                \"legend\": false,\n                \"tooltip\": false,\n                \"viz\": false\n              },\n              \"insertNulls\": false,\n              \"lineInterpolation\": \"linear\",\n              \"lineWidth\": 1,\n              \"pointSize\": 5,\n              \"scaleDistribution\": {\n                \"type\": \"linear\"\n              },\n              \"showPoints\": \"auto\",\n              \"spanNulls\": false,\n              \"stacking\": {\n                \"group\": \"A\",\n                \"mode\": \"none\"\n              },\n              \"thresholdsStyle\": {\n                \"mode\": \"off\"\n              }\n            },\n            \"mappings\": [],\n            \"thresholds\": {\n              \"mode\": \"absolute\",\n              \"steps\": [\n                {\n                  \"color\": \"green\",\n                  \"value\": null\n                },\n                {\n                  \"color\": \"red\",\n                  \"value\": 80\n                }\n              ]\n            }\n          },\n          \"overrides\": [\n            {\n              \"matcher\": {\n                \"id\": \"byName\",\n                \"options\": \"errors\"\n              },\n              \"properties\": [\n                {\n                  \"id\": \"color\",\n                  \"value\": {\n                    \"fixedColor\": \"red\",\n                    \"mode\": \"fixed\"\n                  }\n                }\n              ]\n            },\n            {\n              \"matcher\": {\n                \"id\": \"byName\",\n                \"options\": \"ok\"\n              },\n              \"properties\": [\n                {\n                  \"id\": \"color\",\n                  \"value\": {\n                    \"fixedColor\": \"green\",\n                    \"mode\": \"fixed\"\n                  }\n                }\n              ]\n            }\n          ]\n        },\n        \"gridPos\": {\n          \"h\": 3,\n          \"w\": 24,\n          \"x\": 0,\n          \"y\": 0\n        },\n        \"id\": 1,\n        \"options\": {\n          \"legend\": {\n            \"calcs\": [],\n            \"displayMode\": \"list\",\n            \"placement\": \"bottom\",\n            \"showLegend\": true\n          },\n          \"tooltip\": {\n            \"mode\": \"single\",\n            \"sort\": \"none\"\n          }\n        },\n        \"targets\": [\n          {\n            \"datasource\": {\n              \"type\": \"loki\",\n              \"uid\": \"${DS_LOKI}\"\n            },\n            \"editorMode\": \"code\",\n            \"expr\": \"sum(count_over_time({systemd_unit=~\\\"arvados-controller\\\\\\\\.service|arvados-railsapi\\\\\\\\.service|arvados-ws\\\\\\\\.service|keep-balance\\\\\\\\.service|keep-web\\\\\\\\.service|keepproxy\\\\\\\\.service|arvados-dispatch-cloud\\\\\\\\.service|keepstore\\\\\\\\.service\\\"} |~ `${regex_search}` | json RequestId=\\\"RequestID\\\", RespStatusCode=\\\"respStatusCode\\\" | RespStatusCode =~ `2[0-9]{2}` | RequestId =~ `${request_id}` [$__auto])) + sum(count_over_time({filename=\\\"/var/www/arvados-api/shared/log/production.log\\\"} |~ `${regex_search}` | json RequestId=\\\"request_id\\\", RespStatusCode=\\\"status\\\" | RespStatusCode =~ `2[0-9]{2}` | RequestId =~ `${request_id}` [$__auto]))\",\n            \"legendFormat\": \"ok\",\n            \"queryType\": \"range\",\n            \"refId\": \"ok\"\n          },\n          {\n            \"datasource\": {\n              \"type\": \"loki\",\n              \"uid\": \"${DS_LOKI}\"\n            },\n            \"editorMode\": \"builder\",\n            \"expr\": \"sum(count_over_time({systemd_unit=~\\\"arvados-controller\\\\\\\\.service|arvados-railsapi\\\\\\\\.service|arvados-ws\\\\\\\\.service|keep-balance\\\\\\\\.service|keep-web\\\\\\\\.service|keepproxy\\\\\\\\.service|arvados-dispatch-cloud\\\\\\\\.service|keepstore\\\\\\\\.service\\\"} |~ `${regex_search}` | json RequestId=\\\"RequestID\\\", RespStatusCode=\\\"respStatusCode\\\" | RespStatusCode =~ `[45][0-9]{2}` | RequestId =~ `${request_id}` [$__auto])) + sum(count_over_time({filename=\\\"/var/www/arvados-api/shared/log/production.log\\\"} |~ `${regex_search}` | json RequestId=\\\"request_id\\\", RespStatusCode=\\\"status\\\" | RespStatusCode =~ `[45][0-9]{2}` | RequestId =~ `${request_id}` [$__auto]))\",\n            \"hide\": false,\n            \"legendFormat\": \"errors\",\n            \"queryType\": \"range\",\n            \"refId\": \"errors\"\n          }\n        ],\n        \"type\": \"timeseries\"\n      },\n      {\n        \"datasource\": {\n          \"type\": \"loki\",\n          \"uid\": \"${DS_LOKI}\"\n        },\n        \"gridPos\": {\n          \"h\": 13,\n          \"w\": 24,\n          \"x\": 0,\n          \"y\": 3\n        },\n        \"id\": 3,\n        \"options\": {\n          \"dedupStrategy\": \"none\",\n          \"enableLogDetails\": true,\n          \"prettifyLogMessage\": false,\n          \"showCommonLabels\": false,\n          \"showLabels\": false,\n          \"showTime\": true,\n          \"sortOrder\": \"Ascending\",\n          \"wrapLogMessage\": true\n        },\n        \"targets\": [\n          {\n            \"datasource\": {\n              \"type\": \"loki\",\n              \"uid\": \"${DS_LOKI}\"\n            },\n            \"editorMode\": \"code\",\n            \"expr\": \"{systemd_unit=~\\\"arvados-controller\\\\\\\\.service|arvados-railsapi\\\\\\\\.service|arvados-ws\\\\\\\\.service|keep-balance\\\\\\\\.service|keep-web\\\\\\\\.service|keepproxy\\\\\\\\.service|arvados-dispatch-cloud\\\\\\\\.service|keepstore\\\\\\\\.service\\\"} |~ `${regex_search}` | json RequestId=\\\"RequestID\\\", RespStatusCode=\\\"respStatusCode\\\" | RequestId =~ `${request_id}`\",\n            \"queryType\": \"range\",\n            \"refId\": \"Arvados Journald logs\"\n          },\n          {\n            \"datasource\": {\n              \"type\": \"loki\",\n              \"uid\": \"${DS_LOKI}\"\n            },\n            \"editorMode\": \"code\",\n            \"expr\": \"{filename=\\\"/var/www/arvados-api/shared/log/production.log\\\"} |~ `${regex_search}` | json RequestId=\\\"request_id\\\", RespStatusCode=\\\"status\\\" | __error__=`` | RequestId =~ `${request_id}`\",\n            \"queryType\": \"range\",\n            \"refId\": \"RailsAPI logs\"\n          }\n        ],\n        \"title\": \"Arvados Logs\",\n        \"type\": \"logs\"\n      },\n      {\n        \"datasource\": {\n          \"type\": \"loki\",\n          \"uid\": \"${DS_LOKI}\"\n        },\n        \"gridPos\": {\n          \"h\": 8,\n          \"w\": 24,\n          \"x\": 0,\n          \"y\": 16\n        },\n        \"id\": 2,\n        \"options\": {\n          \"dedupStrategy\": \"none\",\n          \"enableLogDetails\": true,\n          \"prettifyLogMessage\": false,\n          \"showCommonLabels\": false,\n          \"showLabels\": false,\n          \"showTime\": false,\n          \"sortOrder\": \"Descending\",\n          \"wrapLogMessage\": false\n        },\n        \"pluginVersion\": \"11.1.4\",\n        \"targets\": [\n          {\n            \"datasource\": {\n              \"type\": \"loki\",\n              \"uid\": \"${DS_LOKI}\"\n            },\n            \"editorMode\": \"builder\",\n            \"expr\": \"{filename=~\\\"/var/log/nginx/.+\\\\\\\\.log\\\"} |= ``\",\n            \"queryType\": \"range\",\n            \"refId\": \"A\"\n          }\n        ],\n        \"title\": \"NGINX logs\",\n        \"type\": \"logs\"\n      }\n    ],\n    \"refresh\": \"5s\",\n    \"schemaVersion\": 39,\n    \"tags\": [],\n    \"templating\": {\n      \"list\": [\n        {\n          \"current\": {\n            \"selected\": false,\n            \"text\": \"\",\n            \"value\": \"\"\n          },\n          \"description\": \"Search by req-id\",\n          \"hide\": 0,\n          \"label\": \"Request ID\",\n          \"name\": \"request_id\",\n          \"options\": [\n            {\n              \"selected\": true,\n              \"text\": \"\",\n              \"value\": \"\"\n            }\n          ],\n          \"query\": \"\",\n          \"skipUrlSync\": false,\n          \"type\": \"textbox\"\n        },\n        {\n          \"current\": {},\n          \"hide\": 0,\n          \"label\": \"Regex Search\",\n          \"name\": \"regex_search\",\n          \"options\": [],\n          \"query\": \"\",\n          \"skipUrlSync\": false,\n          \"type\": \"textbox\"\n        }\n      ]\n    },\n    \"time\": {\n      \"from\": \"now-1h\",\n      \"to\": \"now\"\n    },\n    \"timepicker\": {},\n    \"timezone\": \"browser\",\n    \"title\": \"Arvados Logs\",\n    \"uid\": \"ArvadosClusterLogsDashboard\",\n    \"version\": 11,\n    \"weekStart\": \"\"\n  }"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/dashboards/arvados_overview.json",
    "content": "{\n  \"__inputs\": [\n    {\n      \"name\": \"DS_PROMETHEUS\",\n      \"label\": \"Prometheus\",\n      \"description\": \"\",\n      \"type\": \"datasource\",\n      \"pluginId\": \"prometheus\",\n      \"pluginName\": \"Prometheus\"\n    }\n  ],\n  \"__elements\": {},\n  \"__requires\": [\n    {\n      \"type\": \"grafana\",\n      \"id\": \"grafana\",\n      \"name\": \"Grafana\",\n      \"version\": \"10.2.0\"\n    },\n    {\n      \"type\": \"panel\",\n      \"id\": \"graph\",\n      \"name\": \"Graph (old)\",\n      \"version\": \"\"\n    },\n    {\n      \"type\": \"datasource\",\n      \"id\": \"prometheus\",\n      \"name\": \"Prometheus\",\n      \"version\": \"1.0.0\"\n    },\n    {\n      \"type\": \"panel\",\n      \"id\": \"table\",\n      \"name\": \"Table\",\n      \"version\": \"\"\n    }\n  ],\n  \"annotations\": {\n    \"list\": [\n      {\n        \"builtIn\": 1,\n        \"datasource\": {\n          \"type\": \"prometheus\",\n          \"uid\": \"${DS_PROMETHEUS}\"\n        },\n        \"enable\": true,\n        \"hide\": true,\n        \"iconColor\": \"rgba(0, 211, 255, 1)\",\n        \"name\": \"Annotations & Alerts\",\n        \"target\": {\n          \"limit\": 100,\n          \"matchAny\": false,\n          \"tags\": [],\n          \"type\": \"dashboard\"\n        },\n        \"type\": \"dashboard\"\n      }\n    ]\n  },\n  \"editable\": true,\n  \"fiscalYearStartMonth\": 0,\n  \"graphTooltip\": 0,\n  \"id\": null,\n  \"links\": [],\n  \"liveNow\": false,\n  \"panels\": [\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"custom\": {\n            \"align\": \"center\",\n            \"cellOptions\": {\n              \"type\": \"auto\"\n            },\n            \"inspect\": false\n          },\n          \"decimals\": 2,\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              }\n            ]\n          },\n          \"unit\": \"dtdurations\"\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Earliest SSL cert expiration\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"thresholds\",\n                \"value\": {\n                  \"mode\": \"absolute\",\n                  \"steps\": [\n                    {\n                      \"color\": \"red\",\n                      \"value\": null\n                    },\n                    {\n                      \"color\": \"yellow\",\n                      \"value\": __TLS_EXPIRATION_YELLOW__\n                    },\n                    {\n                      \"color\": \"transparent\",\n                      \"value\": __TLS_EXPIRATION_GREEN__\n                    }\n                  ]\n                }\n              },\n              {\n                \"id\": \"custom.cellOptions\",\n                \"value\": {\n                  \"type\": \"color-background\"\n                }\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 3,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 0\n      },\n      \"id\": 35,\n      \"links\": [],\n      \"options\": {\n        \"cellHeight\": \"sm\",\n        \"footer\": {\n          \"countRows\": false,\n          \"fields\": \"\",\n          \"reducer\": [\n            \"sum\"\n          ],\n          \"show\": false\n        },\n        \"showHeader\": false\n      },\n      \"pluginVersion\": \"10.2.0\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"editorMode\": \"code\",\n          \"exemplar\": false,\n          \"expr\": \"min(probe_ssl_earliest_cert_expiry)-time()\",\n          \"format\": \"time_series\",\n          \"instant\": true,\n          \"legendFormat\": \"__auto\",\n          \"range\": false,\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Earliest SSL certificate expiration\",\n      \"transformations\": [\n        {\n          \"id\": \"organize\",\n          \"options\": {\n            \"excludeByName\": {\n              \"Time\": true\n            },\n            \"indexByName\": {},\n            \"renameByName\": {\n              \"Time\": \"\",\n              \"min(probe_ssl_earliest_cert_expiry)-time()\": \"Earliest SSL cert expiration\"\n            }\n          }\n        }\n      ],\n      \"type\": \"table\"\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 4,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 3\n      },\n      \"hiddenSeries\": false,\n      \"id\": 34,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [\n        {\n          \"$$hashKey\": \"object:424\",\n          \"alias\": \"/out/\",\n          \"stack\": \"B\",\n          \"transform\": \"negative-Y\"\n        }\n      ],\n      \"spaceLength\": 10,\n      \"stack\": true,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"sum(rate(arvados_keepstore_volume_io_bytes{}[1m])) without (operation,device_id)\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{ instance }} {{ direction }}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Keepstore bandwidth [1m]\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:159\",\n          \"format\": \"Bps\",\n          \"logBase\": 1,\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:160\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"links\": []\n        },\n        \"overrides\": []\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 3\n      },\n      \"hiddenSeries\": false,\n      \"id\": 14,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_dispatchcloud_containers_running{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"# containers\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Containers running\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:973\",\n          \"format\": \"short\",\n          \"label\": \"\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:974\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"links\": []\n        },\n        \"overrides\": []\n      },\n      \"fill\": 8,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 11\n      },\n      \"hiddenSeries\": false,\n      \"id\": 8,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": true,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"sum(rate(arvados_keepstore_volume_operations{}[1m])) without (operation,device_id)\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Keepstore volume operations rate/second\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 2,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:982\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:983\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"links\": []\n        },\n        \"overrides\": []\n      },\n      \"fill\": 6,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 11\n      },\n      \"hiddenSeries\": false,\n      \"id\": 12,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": true,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_dispatchcloud_queue_entries{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance_type}} {{state}}\",\n          \"refId\": \"A\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_dispatchcloud_containers_allocated_not_started{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"allocated, not started\",\n          \"refId\": \"B\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_dispatchcloud_containers_not_allocated_over_quota{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"not allocated, over quota\",\n          \"refId\": \"C\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Queue: # containers per {state, instance type}\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 2,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:4306\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:4307\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"links\": []\n        },\n        \"overrides\": []\n      },\n      \"fill\": 8,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 19\n      },\n      \"hiddenSeries\": false,\n      \"id\": 10,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": true,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_keepstore_bufferpool_inuse_buffers{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Keepstore buffers in use\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 2,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:929\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:930\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 19\n      },\n      \"hiddenSeries\": false,\n      \"id\": 24,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_dispatchcloud_containers_longest_wait_time_seconds{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"Longest wait time\",\n          \"refId\": \"A\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"rate(arvados_dispatchcloud_containers_time_from_queue_to_crunch_run_seconds_sum{}[10m]) / rate(arvados_dispatchcloud_containers_time_from_queue_to_crunch_run_seconds_count{}[10m])\",\n          \"interval\": \"\",\n          \"legendFormat\": \"avg wait time [10m]\",\n          \"refId\": \"B\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Container wait times\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:138\",\n          \"format\": \"s\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:139\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"links\": []\n        },\n        \"overrides\": []\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 27\n      },\n      \"hiddenSeries\": false,\n      \"id\": 6,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_keep_total_bytes{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"Total stored\",\n          \"refId\": \"A\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_keep_overreplicated_bytes{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"Overreplicated\",\n          \"refId\": \"B\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_keep_underreplicated_bytes{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"Underreplicated\",\n          \"refId\": \"C\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_keep_lost_bytes{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"Lost\",\n          \"refId\": \"D\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Total bytes by type\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:304\",\n          \"decimals\": 2,\n          \"format\": \"decbytes\",\n          \"label\": \"\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:305\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": true,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 27\n      },\n      \"hiddenSeries\": false,\n      \"id\": 22,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": true,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"rate(arvados_dispatchcloud_instances_time_to_ssh_seconds_sum{}[10m]) / rate(arvados_dispatchcloud_instances_time_to_ssh_seconds_count{}[10m])\",\n          \"hide\": false,\n          \"interval\": \"\",\n          \"legendFormat\": \"ssh\",\n          \"refId\": \"A\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"rate(arvados_dispatchcloud_instances_time_to_ready_for_container_seconds_sum{}[10m]) / rate(arvados_dispatchcloud_instances_time_to_ready_for_container_seconds_count{}[10m])\",\n          \"interval\": \"\",\n          \"legendFormat\": \"ready\",\n          \"refId\": \"B\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Instance time to ... avg [10m]\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:113\",\n          \"format\": \"s\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:114\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 35\n      },\n      \"hiddenSeries\": false,\n      \"id\": 32,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_concurrent_requests{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance}}_{{queue}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Concurrent requests\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:109\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:110\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"links\": []\n        },\n        \"overrides\": []\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 35\n      },\n      \"hiddenSeries\": false,\n      \"id\": 2,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": true,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_dispatchcloud_boot_outcomes{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{outcome}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Boot outcomes\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 2,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:921\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:922\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"links\": []\n        },\n        \"overrides\": []\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 43\n      },\n      \"hiddenSeries\": false,\n      \"id\": 16,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"sum(arvados_dispatchcloud_instances_price{})\",\n          \"interval\": \"\",\n          \"intervalFactor\": 10,\n          \"legendFormat\": \"cost ($)\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Cost\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:623\",\n          \"format\": \"short\",\n          \"label\": \"$ / hour\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:624\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"links\": []\n        },\n        \"overrides\": []\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 43\n      },\n      \"hiddenSeries\": false,\n      \"id\": 4,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": true,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_dispatchcloud_instances_disappeared{}\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{state}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"instance state before disappearance\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 2,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:1025\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:1026\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"links\": []\n        },\n        \"overrides\": []\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 51\n      },\n      \"hiddenSeries\": false,\n      \"id\": 18,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": true,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_dispatchcloud_instances_price{}\",\n          \"interval\": \"\",\n          \"intervalFactor\": 10,\n          \"legendFormat\": \"{{category}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Cost by node state\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 2,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:574\",\n          \"format\": \"short\",\n          \"label\": \"$ / hour\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:575\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 51\n      },\n      \"hiddenSeries\": false,\n      \"id\": 26,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"rate(arvados_dispatchcloud_instances_time_from_shutdown_request_to_disappearance_seconds_sum{}[10m]) / rate(arvados_dispatchcloud_instances_time_from_shutdown_request_to_disappearance_seconds_count{}[10m])\",\n          \"interval\": \"\",\n          \"legendFormat\": \"shutdown to disappearance\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Instances time from shutdown to disappearance avg[10m]\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:450\",\n          \"format\": \"s\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:451\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"links\": []\n        },\n        \"overrides\": []\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 59\n      },\n      \"hiddenSeries\": false,\n      \"id\": 20,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": true,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"arvados_dispatchcloud_instances_total{}\",\n          \"instant\": false,\n          \"interval\": \"\",\n          \"legendFormat\": \"{{instance_type}} : {{category}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [\n        {\n          \"$$hashKey\": \"object:540\",\n          \"colorMode\": \"critical\",\n          \"fill\": true,\n          \"line\": true,\n          \"op\": \"gt\",\n          \"yaxis\": \"left\"\n        }\n      ],\n      \"timeRegions\": [],\n      \"title\": \"Nodes by state\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 2,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:723\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:724\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 67\n      },\n      \"hiddenSeries\": false,\n      \"id\": 28,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null as zero\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"rate(arvados_dispatchcloud_instances_run_probe_duration_seconds_sum{}[10m]) / rate(arvados_dispatchcloud_instances_run_probe_duration_seconds_count{}[10m])\",\n          \"interval\": \"\",\n          \"legendFormat\": \"{{outcome}}\",\n          \"refId\": \"A\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"run probe duration avg[10m]\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:125\",\n          \"format\": \"s\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:126\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 8,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 75\n      },\n      \"hiddenSeries\": false,\n      \"id\": 30,\n      \"legend\": {\n        \"avg\": false,\n        \"current\": false,\n        \"max\": false,\n        \"min\": false,\n        \"show\": true,\n        \"total\": false,\n        \"values\": false\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"10.2.0\",\n      \"pointradius\": 2,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"delta(arvados_dispatchcloud_instances_run_probe_duration_seconds_count{}[1m])\",\n          \"instant\": false,\n          \"interval\": \"\",\n          \"legendFormat\": \"{{outcome}}\",\n          \"refId\": \"B\"\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"run probe count by outcome -- delta[1m]\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:149\",\n          \"format\": \"short\",\n          \"logBase\": 10,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:150\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    }\n  ],\n  \"refresh\": \"10s\",\n  \"revision\": 1,\n  \"schemaVersion\": 38,\n  \"tags\": [],\n  \"templating\": {\n    \"list\": []\n  },\n  \"time\": {\n    \"from\": \"now-1h\",\n    \"to\": \"now\"\n  },\n  \"timepicker\": {\n    \"refresh_intervals\": [\n      \"10s\",\n      \"30s\",\n      \"1m\",\n      \"5m\",\n      \"15m\",\n      \"30m\",\n      \"1h\",\n      \"2h\",\n      \"1d\"\n    ]\n  },\n  \"timezone\": \"\",\n  \"title\": \"Arvados cluster overview\",\n  \"uid\": \"ArvadosClusterOverviewDashboard\",\n  \"version\": 1,\n  \"weekStart\": \"\"\n}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/dashboards/node-exporter-full_rev30.json",
    "content": "{\n  \"__inputs\": [\n    {\n      \"name\": \"DS_PROMETHEUS\",\n      \"label\": \"prometheus\",\n      \"description\": \"\",\n      \"type\": \"datasource\",\n      \"pluginId\": \"prometheus\",\n      \"pluginName\": \"Prometheus\"\n    }\n  ],\n  \"__elements\": {},\n  \"__requires\": [\n    {\n      \"type\": \"panel\",\n      \"id\": \"gauge\",\n      \"name\": \"Gauge\",\n      \"version\": \"\"\n    },\n    {\n      \"type\": \"grafana\",\n      \"id\": \"grafana\",\n      \"name\": \"Grafana\",\n      \"version\": \"9.2.3\"\n    },\n    {\n      \"type\": \"datasource\",\n      \"id\": \"prometheus\",\n      \"name\": \"Prometheus\",\n      \"version\": \"1.0.0\"\n    },\n    {\n      \"type\": \"panel\",\n      \"id\": \"stat\",\n      \"name\": \"Stat\",\n      \"version\": \"\"\n    },\n    {\n      \"type\": \"panel\",\n      \"id\": \"timeseries\",\n      \"name\": \"Time series\",\n      \"version\": \"\"\n    }\n  ],\n  \"annotations\": {\n    \"list\": [\n      {\n        \"$$hashKey\": \"object:1058\",\n        \"builtIn\": 1,\n        \"datasource\": {\n          \"type\": \"datasource\",\n          \"uid\": \"grafana\"\n        },\n        \"enable\": true,\n        \"hide\": true,\n        \"iconColor\": \"rgba(0, 211, 255, 1)\",\n        \"name\": \"Annotations & Alerts\",\n        \"target\": {\n          \"limit\": 100,\n          \"matchAny\": false,\n          \"tags\": [],\n          \"type\": \"dashboard\"\n        },\n        \"type\": \"dashboard\"\n      },\n      {\n        \"datasource\": {\n          \"type\": \"prometheus\",\n          \"uid\": \"${DS_PROMETHEUS}\"\n        },\n        \"enable\": true,\n        \"expr\": \"changes(node_boot_time_seconds{instance=\\\"$node\\\"}[$__rate_interval])\",\n        \"iconColor\": \"red\",\n        \"name\": \"Reboot\"\n      }\n    ]\n  },\n  \"editable\": true,\n  \"fiscalYearStartMonth\": 0,\n  \"gnetId\": 1860,\n  \"graphTooltip\": 0,\n  \"id\": null,\n  \"links\": [\n    {\n      \"icon\": \"external link\",\n      \"tags\": [],\n      \"targetBlank\": true,\n      \"title\": \"GitHub\",\n      \"type\": \"link\",\n      \"url\": \"https://github.com/rfmoz/grafana-dashboards\"\n    },\n    {\n      \"icon\": \"external link\",\n      \"tags\": [],\n      \"targetBlank\": true,\n      \"title\": \"Grafana\",\n      \"type\": \"link\",\n      \"url\": \"https://grafana.com/grafana/dashboards/1860\"\n    }\n  ],\n  \"liveNow\": false,\n  \"panels\": [\n    {\n      \"collapsed\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 0\n      },\n      \"id\": 261,\n      \"panels\": [],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Quick CPU / Mem / Disk\",\n      \"type\": \"row\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Busy state of all CPU cores together\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"max\": 100,\n          \"min\": 0,\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"rgba(50, 172, 45, 0.97)\",\n                \"value\": null\n              },\n              {\n                \"color\": \"rgba(237, 129, 40, 0.89)\",\n                \"value\": 85\n              },\n              {\n                \"color\": \"rgba(245, 54, 54, 0.9)\",\n                \"value\": 95\n              }\n            ]\n          },\n          \"unit\": \"percent\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 4,\n        \"w\": 3,\n        \"x\": 0,\n        \"y\": 1\n      },\n      \"id\": 20,\n      \"links\": [],\n      \"options\": {\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"showThresholdLabels\": false,\n        \"showThresholdMarkers\": true\n      },\n      \"pluginVersion\": \"9.2.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"(sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode!=\\\"idle\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))) * 100\",\n          \"hide\": false,\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"\",\n          \"range\": true,\n          \"refId\": \"A\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"CPU Busy\",\n      \"type\": \"gauge\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Busy state of all CPU cores together (5 min average)\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"max\": 100,\n          \"min\": 0,\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"rgba(50, 172, 45, 0.97)\",\n                \"value\": null\n              },\n              {\n                \"color\": \"rgba(237, 129, 40, 0.89)\",\n                \"value\": 85\n              },\n              {\n                \"color\": \"rgba(245, 54, 54, 0.9)\",\n                \"value\": 95\n              }\n            ]\n          },\n          \"unit\": \"percent\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 4,\n        \"w\": 3,\n        \"x\": 3,\n        \"y\": 1\n      },\n      \"id\": 155,\n      \"links\": [],\n      \"options\": {\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"showThresholdLabels\": false,\n        \"showThresholdMarkers\": true\n      },\n      \"pluginVersion\": \"9.2.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"avg(node_load5{instance=\\\"$node\\\"}) /  count(count(node_cpu_seconds_total{instance=\\\"$node\\\"}) by (cpu)) * 100\",\n          \"format\": \"time_series\",\n          \"hide\": false,\n          \"intervalFactor\": 1,\n          \"refId\": \"A\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"Sys Load (5m avg)\",\n      \"type\": \"gauge\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Busy state of all CPU cores together (15 min average)\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"max\": 100,\n          \"min\": 0,\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"rgba(50, 172, 45, 0.97)\",\n                \"value\": null\n              },\n              {\n                \"color\": \"rgba(237, 129, 40, 0.89)\",\n                \"value\": 85\n              },\n              {\n                \"color\": \"rgba(245, 54, 54, 0.9)\",\n                \"value\": 95\n              }\n            ]\n          },\n          \"unit\": \"percent\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 4,\n        \"w\": 3,\n        \"x\": 6,\n        \"y\": 1\n      },\n      \"id\": 19,\n      \"links\": [],\n      \"options\": {\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"showThresholdLabels\": false,\n        \"showThresholdMarkers\": true\n      },\n      \"pluginVersion\": \"9.2.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"avg(node_load15{instance=\\\"$node\\\"}) /  count(count(node_cpu_seconds_total{instance=\\\"$node\\\"}) by (cpu)) * 100\",\n          \"hide\": false,\n          \"intervalFactor\": 1,\n          \"refId\": \"A\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"Sys Load (15m avg)\",\n      \"type\": \"gauge\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Non available RAM memory\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"decimals\": 0,\n          \"mappings\": [],\n          \"max\": 100,\n          \"min\": 0,\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"rgba(50, 172, 45, 0.97)\",\n                \"value\": null\n              },\n              {\n                \"color\": \"rgba(237, 129, 40, 0.89)\",\n                \"value\": 80\n              },\n              {\n                \"color\": \"rgba(245, 54, 54, 0.9)\",\n                \"value\": 90\n              }\n            ]\n          },\n          \"unit\": \"percent\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 4,\n        \"w\": 3,\n        \"x\": 9,\n        \"y\": 1\n      },\n      \"hideTimeOverride\": false,\n      \"id\": 16,\n      \"links\": [],\n      \"options\": {\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"showThresholdLabels\": false,\n        \"showThresholdMarkers\": true\n      },\n      \"pluginVersion\": \"9.2.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"((node_memory_MemTotal_bytes{instance=\\\"$node\\\"} - node_memory_MemFree_bytes{instance=\\\"$node\\\"}) / (node_memory_MemTotal_bytes{instance=\\\"$node\\\"} )) * 100\",\n          \"format\": \"time_series\",\n          \"hide\": true,\n          \"intervalFactor\": 1,\n          \"refId\": \"A\",\n          \"step\": 240\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"100 - ((node_memory_MemAvailable_bytes{instance=\\\"$node\\\"} * 100) / node_memory_MemTotal_bytes{instance=\\\"$node\\\"})\",\n          \"format\": \"time_series\",\n          \"hide\": false,\n          \"intervalFactor\": 1,\n          \"refId\": \"B\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"RAM Used\",\n      \"type\": \"gauge\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Used Swap\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"max\": 100,\n          \"min\": 0,\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"rgba(50, 172, 45, 0.97)\",\n                \"value\": null\n              },\n              {\n                \"color\": \"rgba(237, 129, 40, 0.89)\",\n                \"value\": 10\n              },\n              {\n                \"color\": \"rgba(245, 54, 54, 0.9)\",\n                \"value\": 25\n              }\n            ]\n          },\n          \"unit\": \"percent\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 4,\n        \"w\": 3,\n        \"x\": 12,\n        \"y\": 1\n      },\n      \"id\": 21,\n      \"links\": [],\n      \"options\": {\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"showThresholdLabels\": false,\n        \"showThresholdMarkers\": true\n      },\n      \"pluginVersion\": \"9.2.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"((node_memory_SwapTotal_bytes{instance=\\\"$node\\\"} - node_memory_SwapFree_bytes{instance=\\\"$node\\\"}) / (node_memory_SwapTotal_bytes{instance=\\\"$node\\\"} )) * 100\",\n          \"intervalFactor\": 1,\n          \"refId\": \"A\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"SWAP Used\",\n      \"type\": \"gauge\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Used Root FS\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"max\": 100,\n          \"min\": 0,\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"rgba(50, 172, 45, 0.97)\",\n                \"value\": null\n              },\n              {\n                \"color\": \"rgba(237, 129, 40, 0.89)\",\n                \"value\": 80\n              },\n              {\n                \"color\": \"rgba(245, 54, 54, 0.9)\",\n                \"value\": 90\n              }\n            ]\n          },\n          \"unit\": \"percent\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 4,\n        \"w\": 3,\n        \"x\": 15,\n        \"y\": 1\n      },\n      \"id\": 154,\n      \"links\": [],\n      \"options\": {\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"showThresholdLabels\": false,\n        \"showThresholdMarkers\": true\n      },\n      \"pluginVersion\": \"9.2.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"100 - ((node_filesystem_avail_bytes{instance=\\\"$node\\\",mountpoint=\\\"/\\\",fstype!=\\\"rootfs\\\"} * 100) / node_filesystem_size_bytes{instance=\\\"$node\\\",mountpoint=\\\"/\\\",fstype!=\\\"rootfs\\\"})\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 1,\n          \"refId\": \"A\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"Root FS Used\",\n      \"type\": \"gauge\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Total number of CPU cores\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"short\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 2,\n        \"w\": 2,\n        \"x\": 18,\n        \"y\": 1\n      },\n      \"id\": 14,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"colorMode\": \"none\",\n        \"graphMode\": \"none\",\n        \"justifyMode\": \"auto\",\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"textMode\": \"auto\"\n      },\n      \"pluginVersion\": \"9.2.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"count(count(node_cpu_seconds_total{instance=\\\"$node\\\"}) by (cpu))\",\n          \"interval\": \"\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"\",\n          \"refId\": \"A\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"CPU Cores\",\n      \"type\": \"stat\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"System uptime\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"decimals\": 1,\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"s\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 2,\n        \"w\": 4,\n        \"x\": 20,\n        \"y\": 1\n      },\n      \"hideTimeOverride\": true,\n      \"id\": 15,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"colorMode\": \"none\",\n        \"graphMode\": \"none\",\n        \"justifyMode\": \"auto\",\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"textMode\": \"auto\"\n      },\n      \"pluginVersion\": \"9.2.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"node_time_seconds{instance=\\\"$node\\\"} - node_boot_time_seconds{instance=\\\"$node\\\"}\",\n          \"intervalFactor\": 1,\n          \"refId\": \"A\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"Uptime\",\n      \"type\": \"stat\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Total RootFS\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"decimals\": 0,\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"rgba(50, 172, 45, 0.97)\",\n                \"value\": null\n              },\n              {\n                \"color\": \"rgba(237, 129, 40, 0.89)\",\n                \"value\": 70\n              },\n              {\n                \"color\": \"rgba(245, 54, 54, 0.9)\",\n                \"value\": 90\n              }\n            ]\n          },\n          \"unit\": \"bytes\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 2,\n        \"w\": 2,\n        \"x\": 18,\n        \"y\": 3\n      },\n      \"id\": 23,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"colorMode\": \"none\",\n        \"graphMode\": \"none\",\n        \"justifyMode\": \"auto\",\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"textMode\": \"auto\"\n      },\n      \"pluginVersion\": \"9.2.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"node_filesystem_size_bytes{instance=\\\"$node\\\",mountpoint=\\\"/\\\",fstype!=\\\"rootfs\\\"}\",\n          \"format\": \"time_series\",\n          \"hide\": false,\n          \"intervalFactor\": 1,\n          \"refId\": \"A\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"RootFS Total\",\n      \"type\": \"stat\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Total RAM\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"decimals\": 0,\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"bytes\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 2,\n        \"w\": 2,\n        \"x\": 20,\n        \"y\": 3\n      },\n      \"id\": 75,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"colorMode\": \"none\",\n        \"graphMode\": \"none\",\n        \"justifyMode\": \"auto\",\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"textMode\": \"auto\"\n      },\n      \"pluginVersion\": \"9.2.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"node_memory_MemTotal_bytes{instance=\\\"$node\\\"}\",\n          \"intervalFactor\": 1,\n          \"refId\": \"A\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"RAM Total\",\n      \"type\": \"stat\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Total SWAP\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"decimals\": 0,\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"bytes\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 2,\n        \"w\": 2,\n        \"x\": 22,\n        \"y\": 3\n      },\n      \"id\": 18,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"colorMode\": \"none\",\n        \"graphMode\": \"none\",\n        \"justifyMode\": \"auto\",\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"textMode\": \"auto\"\n      },\n      \"pluginVersion\": \"9.2.3\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"node_memory_SwapTotal_bytes{instance=\\\"$node\\\"}\",\n          \"intervalFactor\": 1,\n          \"refId\": \"A\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"SWAP Total\",\n      \"type\": \"stat\"\n    },\n    {\n      \"collapsed\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 5\n      },\n      \"id\": 263,\n      \"panels\": [],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Basic CPU / Mem / Net / Disk\",\n      \"type\": \"row\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Basic CPU info\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 40,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"smooth\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"never\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"percent\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"links\": [],\n          \"mappings\": [],\n          \"min\": 0,\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"percentunit\"\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Busy Iowait\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#890F02\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Idle\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#052B51\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Busy Iowait\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#890F02\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Idle\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#7EB26D\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Busy System\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#EAB839\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Busy User\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#0A437C\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Busy Other\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#6D1F62\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 6\n      },\n      \"id\": 77,\n      \"links\": [],\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true,\n          \"width\": 250\n        },\n        \"tooltip\": {\n          \"mode\": \"multi\",\n          \"sort\": \"desc\"\n        }\n      },\n      \"pluginVersion\": \"9.2.0\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"system\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n          \"format\": \"time_series\",\n          \"hide\": false,\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"Busy System\",\n          \"range\": true,\n          \"refId\": \"A\",\n          \"step\": 240\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"user\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n          \"format\": \"time_series\",\n          \"hide\": false,\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"Busy User\",\n          \"range\": true,\n          \"refId\": \"B\",\n          \"step\": 240\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"iowait\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"Busy Iowait\",\n          \"range\": true,\n          \"refId\": \"C\",\n          \"step\": 240\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=~\\\".*irq\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"Busy IRQs\",\n          \"range\": true,\n          \"refId\": \"D\",\n          \"step\": 240\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode!='idle',mode!='user',mode!='system',mode!='iowait',mode!='irq',mode!='softirq'}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"Busy Other\",\n          \"range\": true,\n          \"refId\": \"E\",\n          \"step\": 240\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"editorMode\": \"code\",\n          \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"idle\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"Idle\",\n          \"range\": true,\n          \"refId\": \"F\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"CPU Basic\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Basic memory usage\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 40,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"never\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"normal\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"links\": [],\n          \"mappings\": [],\n          \"min\": 0,\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"bytes\"\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Apps\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#629E51\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Buffers\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#614D93\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Cache\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#6D1F62\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Cached\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#511749\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Committed\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#508642\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Free\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#0A437C\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Hardware Corrupted - Amount of RAM that the kernel identified as corrupted / not working\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#CFFAFF\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Inactive\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#584477\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"PageTables\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#0A50A1\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Page_Tables\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#0A50A1\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"RAM_Free\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#E0F9D7\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"SWAP Used\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#BF1B00\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Slab\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#806EB7\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Slab_Cache\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#E0752D\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Swap\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#BF1B00\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Swap Used\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#BF1B00\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Swap_Cache\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#C15C17\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Swap_Free\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#2F575E\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Unused\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#EAB839\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"RAM Total\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#E0F9D7\",\n                  \"mode\": \"fixed\"\n                }\n              },\n              {\n                \"id\": \"custom.fillOpacity\",\n                \"value\": 0\n              },\n              {\n                \"id\": \"custom.stacking\",\n                \"value\": {\n                  \"group\": false,\n                  \"mode\": \"normal\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"RAM Cache + Buffer\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#052B51\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"RAM Free\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#7EB26D\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Avaliable\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#DEDAF7\",\n                  \"mode\": \"fixed\"\n                }\n              },\n              {\n                \"id\": \"custom.fillOpacity\",\n                \"value\": 0\n              },\n              {\n                \"id\": \"custom.stacking\",\n                \"value\": {\n                  \"group\": false,\n                  \"mode\": \"normal\"\n                }\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 6\n      },\n      \"id\": 78,\n      \"links\": [],\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true,\n          \"width\": 350\n        },\n        \"tooltip\": {\n          \"mode\": \"multi\",\n          \"sort\": \"none\"\n        }\n      },\n      \"pluginVersion\": \"9.2.0\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"node_memory_MemTotal_bytes{instance=\\\"$node\\\"}\",\n          \"format\": \"time_series\",\n          \"hide\": false,\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"RAM Total\",\n          \"refId\": \"A\",\n          \"step\": 240\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"node_memory_MemTotal_bytes{instance=\\\"$node\\\"} - node_memory_MemFree_bytes{instance=\\\"$node\\\"} - (node_memory_Cached_bytes{instance=\\\"$node\\\"} + node_memory_Buffers_bytes{instance=\\\"$node\\\"} + node_memory_SReclaimable_bytes{instance=\\\"$node\\\"})\",\n          \"format\": \"time_series\",\n          \"hide\": false,\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"RAM Used\",\n          \"refId\": \"B\",\n          \"step\": 240\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"node_memory_Cached_bytes{instance=\\\"$node\\\"} + node_memory_Buffers_bytes{instance=\\\"$node\\\"} + node_memory_SReclaimable_bytes{instance=\\\"$node\\\"}\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"RAM Cache + Buffer\",\n          \"refId\": \"C\",\n          \"step\": 240\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"node_memory_MemFree_bytes{instance=\\\"$node\\\"}\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"RAM Free\",\n          \"refId\": \"D\",\n          \"step\": 240\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"(node_memory_SwapTotal_bytes{instance=\\\"$node\\\"} - node_memory_SwapFree_bytes{instance=\\\"$node\\\"})\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"SWAP Used\",\n          \"refId\": \"E\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"Memory Basic\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Basic network info per interface\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 40,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"never\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"links\": [],\n          \"mappings\": [],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"bps\"\n        },\n        \"overrides\": [\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Recv_bytes_eth2\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#7EB26D\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Recv_bytes_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#0A50A1\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Recv_drop_eth2\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#6ED0E0\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Recv_drop_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#E0F9D7\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Recv_errs_eth2\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#BF1B00\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Recv_errs_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#CCA300\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Trans_bytes_eth2\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#7EB26D\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Trans_bytes_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#0A50A1\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Trans_drop_eth2\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#6ED0E0\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Trans_drop_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#E0F9D7\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Trans_errs_eth2\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#BF1B00\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"Trans_errs_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#CCA300\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"recv_bytes_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#0A50A1\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"recv_drop_eth0\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#99440A\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"recv_drop_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#967302\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"recv_errs_eth0\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#BF1B00\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"recv_errs_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#890F02\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"trans_bytes_eth0\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#7EB26D\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"trans_bytes_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#0A50A1\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"trans_drop_eth0\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#99440A\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"trans_drop_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#967302\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"trans_errs_eth0\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#BF1B00\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byName\",\n              \"options\": \"trans_errs_lo\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"color\",\n                \"value\": {\n                  \"fixedColor\": \"#890F02\",\n                  \"mode\": \"fixed\"\n                }\n              }\n            ]\n          },\n          {\n            \"matcher\": {\n              \"id\": \"byRegexp\",\n              \"options\": \"/.*trans.*/\"\n            },\n            \"properties\": [\n              {\n                \"id\": \"custom.transform\",\n                \"value\": \"negative-Y\"\n              }\n            ]\n          }\n        ]\n      },\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 13\n      },\n      \"id\": 74,\n      \"links\": [],\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"multi\",\n          \"sort\": \"none\"\n        }\n      },\n      \"pluginVersion\": \"9.2.0\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"irate(node_network_receive_bytes_total{instance=\\\"$node\\\"}[$__rate_interval])*8\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"recv {{device}}\",\n          \"refId\": \"A\",\n          \"step\": 240\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"irate(node_network_transmit_bytes_total{instance=\\\"$node\\\"}[$__rate_interval])*8\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"trans {{device}} \",\n          \"refId\": \"B\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"Network Traffic Basic\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"description\": \"Disk space used of all filesystems mounted\",\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"palette-classic\"\n          },\n          \"custom\": {\n            \"axisCenteredZero\": false,\n            \"axisColorMode\": \"text\",\n            \"axisLabel\": \"\",\n            \"axisPlacement\": \"auto\",\n            \"barAlignment\": 0,\n            \"drawStyle\": \"line\",\n            \"fillOpacity\": 40,\n            \"gradientMode\": \"none\",\n            \"hideFrom\": {\n              \"legend\": false,\n              \"tooltip\": false,\n              \"viz\": false\n            },\n            \"lineInterpolation\": \"linear\",\n            \"lineWidth\": 1,\n            \"pointSize\": 5,\n            \"scaleDistribution\": {\n              \"type\": \"linear\"\n            },\n            \"showPoints\": \"never\",\n            \"spanNulls\": false,\n            \"stacking\": {\n              \"group\": \"A\",\n              \"mode\": \"none\"\n            },\n            \"thresholdsStyle\": {\n              \"mode\": \"off\"\n            }\n          },\n          \"links\": [],\n          \"mappings\": [],\n          \"max\": 100,\n          \"min\": 0,\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"percent\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 13\n      },\n      \"id\": 152,\n      \"links\": [],\n      \"options\": {\n        \"legend\": {\n          \"calcs\": [],\n          \"displayMode\": \"list\",\n          \"placement\": \"bottom\",\n          \"showLegend\": true\n        },\n        \"tooltip\": {\n          \"mode\": \"multi\",\n          \"sort\": \"none\"\n        }\n      },\n      \"pluginVersion\": \"9.2.0\",\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"expr\": \"100 - ((node_filesystem_avail_bytes{instance=\\\"$node\\\",device!~'rootfs'} * 100) / node_filesystem_size_bytes{instance=\\\"$node\\\",device!~'rootfs'})\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"{{mountpoint}}\",\n          \"refId\": \"A\",\n          \"step\": 240\n        }\n      ],\n      \"title\": \"Disk Space Used Basic\",\n      \"type\": \"timeseries\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 20\n      },\n      \"id\": 265,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"percentage\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 70,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"smooth\",\n                \"lineWidth\": 2,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"percent\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\",\n                    \"value\": null\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"percentunit\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Idle - Waiting for something to happen\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Iowait - Waiting for I/O to complete\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Irq - Servicing interrupts\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Nice - Niced processes executing in user mode\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Softirq - Servicing softirqs\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Steal - Time spent in other operating systems when running in a virtualized environment\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FCE2DE\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"System - Processes executing in kernel mode\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"User - Normal processes executing in user mode\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#5195CE\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 12,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 7\n          },\n          \"id\": 3,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 250\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"desc\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"editorMode\": \"code\",\n              \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"system\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"System - Processes executing in kernel mode\",\n              \"range\": true,\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"editorMode\": \"code\",\n              \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"user\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"User - Normal processes executing in user mode\",\n              \"range\": true,\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"editorMode\": \"code\",\n              \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"nice\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Nice - Niced processes executing in user mode\",\n              \"range\": true,\n              \"refId\": \"C\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"editorMode\": \"code\",\n              \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"iowait\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Iowait - Waiting for I/O to complete\",\n              \"range\": true,\n              \"refId\": \"E\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"editorMode\": \"code\",\n              \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"irq\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Irq - Servicing interrupts\",\n              \"range\": true,\n              \"refId\": \"F\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"editorMode\": \"code\",\n              \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"softirq\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Softirq - Servicing softirqs\",\n              \"range\": true,\n              \"refId\": \"G\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"editorMode\": \"code\",\n              \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"steal\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Steal - Time spent in other operating systems when running in a virtualized environment\",\n              \"range\": true,\n              \"refId\": \"H\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"editorMode\": \"code\",\n              \"expr\": \"sum by(instance) (irate(node_cpu_seconds_total{instance=\\\"$node\\\", mode=\\\"idle\\\"}[$__rate_interval])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])))\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Idle - Waiting for something to happen\",\n              \"range\": true,\n              \"refId\": \"J\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"CPU\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 40,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"normal\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\",\n                    \"value\": null\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Apps\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#629E51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A437C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Hardware Corrupted - Amount of RAM that the kernel identified as corrupted / not working\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#CFFAFF\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"RAM_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap - Swap memory usage\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#2F575E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Unused\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Unused - Free memory unassigned\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Hardware Corrupted - *./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.stacking\",\n                    \"value\": {\n                      \"group\": false,\n                      \"mode\": \"normal\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 12,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 7\n          },\n          \"id\": 24,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 350\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_MemTotal_bytes{instance=\\\"$node\\\"} - node_memory_MemFree_bytes{instance=\\\"$node\\\"} - node_memory_Buffers_bytes{instance=\\\"$node\\\"} - node_memory_Cached_bytes{instance=\\\"$node\\\"} - node_memory_Slab_bytes{instance=\\\"$node\\\"} - node_memory_PageTables_bytes{instance=\\\"$node\\\"} - node_memory_SwapCached_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Apps - Memory used by user-space applications\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_PageTables_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"PageTables - Memory used to map between virtual and physical memory addresses\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_SwapCached_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"SwapCache - Memory that keeps track of pages that have been fetched from swap but not yet been modified\",\n              \"refId\": \"C\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Slab_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Slab - Memory used by the kernel to cache data structures for its own use (caches like inode, dentry, etc)\",\n              \"refId\": \"D\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Cached_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Cache - Parked file data (file content) cache\",\n              \"refId\": \"E\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Buffers_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Buffers - Block device (e.g. harddisk) cache\",\n              \"refId\": \"F\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_MemFree_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Unused - Free memory unassigned\",\n              \"refId\": \"G\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"(node_memory_SwapTotal_bytes{instance=\\\"$node\\\"} - node_memory_SwapFree_bytes{instance=\\\"$node\\\"})\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Swap - Swap space used\",\n              \"refId\": \"H\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_HardwareCorrupted_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Hardware Corrupted - Amount of RAM that the kernel identified as corrupted / not working\",\n              \"refId\": \"I\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Stack\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bits out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 40,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\",\n                    \"value\": null\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bps\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"receive_packets_eth0\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"receive_packets_lo\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"transmit_packets_eth0\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"transmit_packets_lo\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Trans.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 12,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 19\n          },\n          \"id\": 84,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_receive_bytes_total{instance=\\\"$node\\\"}[$__rate_interval])*8\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Receive\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_transmit_bytes_total{instance=\\\"$node\\\"}[$__rate_interval])*8\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Transmit\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Network Traffic\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 40,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\",\n                    \"value\": null\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 12,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 19\n          },\n          \"id\": 156,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_filesystem_size_bytes{instance=\\\"$node\\\",device!~'rootfs'} - node_filesystem_avail_bytes{instance=\\\"$node\\\",device!~'rootfs'}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{mountpoint}}\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Disk Space Used\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"IO read (-) / write (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\",\n                    \"value\": null\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"iops\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Read.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda2_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BA43A9\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda3_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F4D598\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#962D82\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#9AC48A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#65C5DB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9934E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FCEACA\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9E2D2\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 12,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 31\n          },\n          \"id\": 229,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"single\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_reads_completed_total{instance=\\\"$node\\\",device=~\\\"$diskdevices\\\"}[$__rate_interval])\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"{{device}} - Reads completed\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_writes_completed_total{instance=\\\"$node\\\",device=~\\\"$diskdevices\\\"}[$__rate_interval])\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Writes completed\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Disk IOps\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes read (-) / write (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 40,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\",\n                    \"value\": null\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"Bps\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"io time\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#890F02\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*read*./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byType\",\n                  \"options\": \"time\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.axisPlacement\",\n                    \"value\": \"hidden\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 12,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 31\n          },\n          \"id\": 42,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_read_bytes_total{instance=\\\"$node\\\",device=~\\\"$diskdevices\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Successfully read bytes\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_written_bytes_total{instance=\\\"$node\\\",device=~\\\"$diskdevices\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Successfully written bytes\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"I/O Usage Read / Write\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"%util\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 40,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\",\n                    \"value\": null\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"percentunit\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"io time\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#890F02\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byType\",\n                  \"options\": \"time\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.axisPlacement\",\n                    \"value\": \"hidden\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 12,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 43\n          },\n          \"id\": 127,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_io_time_seconds_total{instance=\\\"$node\\\",device=~\\\"$diskdevices\\\"} [$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}}\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"I/O Utilization\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"percentage\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"bars\",\n                \"fillOpacity\": 70,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"smooth\",\n                \"lineWidth\": 2,\n                \"pointSize\": 3,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"mappings\": [],\n              \"max\": 1,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\",\n                    \"value\": null\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"percentunit\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/^Guest - /\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#5195ce\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/^GuestNice - /\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#c15c17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 12,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 43\n          },\n          \"id\": 319,\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"desc\"\n            }\n          },\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"editorMode\": \"code\",\n              \"expr\": \"sum by(instance) (irate(node_cpu_guest_seconds_total{instance=\\\"$node\\\", mode=\\\"user\\\"}[1m])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[1m])))\",\n              \"hide\": false,\n              \"legendFormat\": \"Guest - Time spent running a virtual CPU for a guest operating system\",\n              \"range\": true,\n              \"refId\": \"A\"\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"editorMode\": \"code\",\n              \"expr\": \"sum by(instance) (irate(node_cpu_guest_seconds_total{instance=\\\"$node\\\", mode=\\\"nice\\\"}[1m])) / on(instance) group_left sum by (instance)((irate(node_cpu_seconds_total{instance=\\\"$node\\\"}[1m])))\",\n              \"hide\": false,\n              \"legendFormat\": \"GuestNice - Time spent running a niced guest  (virtual CPU for guest operating system)\",\n              \"range\": true,\n              \"refId\": \"B\"\n            }\n          ],\n          \"title\": \"CPU spent seconds in guests (VMs)\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"CPU / Memory / Net / Disk\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 21\n      },\n      \"id\": 266,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"normal\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Apps\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#629E51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A437C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Hardware Corrupted - Amount of RAM that the kernel identified as corrupted / not working\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#CFFAFF\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"RAM_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#2F575E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Unused\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 38\n          },\n          \"id\": 136,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 350\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Inactive_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Inactive - Memory which has been less recently used.  It is more eligible to be reclaimed for other purposes\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Active_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Active - Memory that has been used more recently and usually not reclaimed unless absolutely necessary\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Active / Inactive\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Apps\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#629E51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A437C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Hardware Corrupted - Amount of RAM that the kernel identified as corrupted / not working\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#CFFAFF\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"RAM_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#2F575E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Unused\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*CommitLimit - *./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 38\n          },\n          \"id\": 135,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 350\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Committed_AS_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Committed_AS - Amount of memory presently allocated on the system\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_CommitLimit_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"CommitLimit - Amount of  memory currently available to be allocated on the system\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Commited\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"normal\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Apps\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#629E51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A437C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Hardware Corrupted - Amount of RAM that the kernel identified as corrupted / not working\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#CFFAFF\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"RAM_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#2F575E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Unused\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 48\n          },\n          \"id\": 191,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 350\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Inactive_file_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Inactive_file - File-backed memory on inactive LRU list\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Inactive_anon_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Inactive_anon - Anonymous and swap cache on inactive LRU list, including tmpfs (shmem)\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Active_file_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Active_file - File-backed memory on active LRU list\",\n              \"refId\": \"C\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Active_anon_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Active_anon - Anonymous and swap cache on active least-recently-used (LRU) list, including tmpfs\",\n              \"refId\": \"D\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Active / Inactive Detail\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Active\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#99440A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#58140C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Dirty\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#B7DBAB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Mapped\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM + Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"VmallocUsed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 48\n          },\n          \"id\": 130,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Writeback_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Writeback - Memory which is actively being written back to disk\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_WritebackTmp_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"WritebackTmp - Memory used by FUSE for temporary writeback buffers\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Dirty_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Dirty - Memory which is waiting to get written back to the disk\",\n              \"refId\": \"C\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Writeback and Dirty\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Apps\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#629E51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A437C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Hardware Corrupted - Amount of RAM that the kernel identified as corrupted / not working\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#CFFAFF\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"RAM_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#2F575E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Unused\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"ShmemHugePages - Memory used by shared memory (shmem) and tmpfs allocated  with huge pages\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"ShmemHugePages - Memory used by shared memory (shmem) and tmpfs allocated  with huge pages\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 58\n          },\n          \"id\": 138,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 350\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Mapped_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Mapped - Used memory in mapped pages files which have been mmaped, such as libraries\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Shmem_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Shmem - Used shared memory (shared between several processes, thus including RAM disks)\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_ShmemHugePages_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"ShmemHugePages - Memory used by shared memory (shmem) and tmpfs allocated  with huge pages\",\n              \"refId\": \"C\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_ShmemPmdMapped_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"ShmemPmdMapped - Ammount of shared (shmem/tmpfs) memory backed by huge pages\",\n              \"refId\": \"D\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Shared and Mapped\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"normal\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Active\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#99440A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#58140C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Dirty\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#B7DBAB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Mapped\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM + Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"VmallocUsed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 58\n          },\n          \"id\": 131,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_SUnreclaim_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"SUnreclaim - Part of Slab, that cannot be reclaimed on memory pressure\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_SReclaimable_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"SReclaimable - Part of Slab, that might be reclaimed, such as caches\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Slab\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Active\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#99440A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#58140C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Dirty\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#B7DBAB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Mapped\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM + Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"VmallocUsed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 68\n          },\n          \"id\": 70,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_VmallocChunk_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"VmallocChunk - Largest contigious block of vmalloc area which is free\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_VmallocTotal_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"VmallocTotal - Total size of vmalloc memory area\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_VmallocUsed_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"VmallocUsed - Amount of vmalloc area which is used\",\n              \"refId\": \"C\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Vmalloc\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Apps\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#629E51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A437C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Hardware Corrupted - Amount of RAM that the kernel identified as corrupted / not working\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#CFFAFF\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"RAM_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#2F575E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Unused\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 68\n          },\n          \"id\": 159,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 350\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Bounce_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Bounce - Memory used for block device bounce buffers\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Bounce\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Active\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#99440A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#58140C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Dirty\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#B7DBAB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Mapped\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM + Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"VmallocUsed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Inactive *./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 78\n          },\n          \"id\": 129,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_AnonHugePages_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"AnonHugePages - Memory in anonymous huge pages\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_AnonPages_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"AnonPages - Memory in user pages not backed by files\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Anonymous\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Apps\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#629E51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A437C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Hardware Corrupted - Amount of RAM that the kernel identified as corrupted / not working\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#CFFAFF\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"RAM_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#2F575E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Unused\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 78\n          },\n          \"id\": 160,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 350\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_KernelStack_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"KernelStack - Kernel memory stack. This is not reclaimable\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Percpu_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"PerCPU - Per CPU memory allocated dynamically by loadable modules\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Kernel / CPU\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"pages\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Active\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#99440A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#58140C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Dirty\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#B7DBAB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Mapped\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM + Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"VmallocUsed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 88\n          },\n          \"id\": 140,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_HugePages_Free{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"HugePages_Free - Huge pages in the pool that are not yet allocated\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_HugePages_Rsvd{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"HugePages_Rsvd - Huge pages for which a commitment to allocate from the pool has been made, but no allocation has yet been made\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_HugePages_Surp{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"HugePages_Surp - Huge pages in the pool above the value in /proc/sys/vm/nr_hugepages\",\n              \"refId\": \"C\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory HugePages Counter\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Active\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#99440A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#58140C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Dirty\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#B7DBAB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Mapped\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM + Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"VmallocUsed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 88\n          },\n          \"id\": 71,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_HugePages_Total{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"HugePages - Total size of the pool of huge pages\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Hugepagesize_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Hugepagesize - Huge Page size\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory HugePages Size\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Active\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#99440A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#58140C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Dirty\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#B7DBAB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Mapped\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM + Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"VmallocUsed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 98\n          },\n          \"id\": 128,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_DirectMap1G_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"DirectMap1G - Amount of pages mapped as this size\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_DirectMap2M_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"DirectMap2M - Amount of pages mapped as this size\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_DirectMap4k_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"DirectMap4K - Amount of pages mapped as this size\",\n              \"refId\": \"C\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory DirectMap\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Apps\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#629E51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A437C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Hardware Corrupted - Amount of RAM that the kernel identified as corrupted / not working\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#CFFAFF\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"RAM_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#2F575E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Unused\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 98\n          },\n          \"id\": 137,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 350\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Unevictable_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Unevictable - Amount of unevictable memory that can't be swapped out for a variety of reasons\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_Mlocked_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"MLocked - Size of pages locked to memory using the mlock() system call\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Unevictable and MLocked\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Active\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#99440A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#58140C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Dirty\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#B7DBAB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Mapped\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM + Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"VmallocUsed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 108\n          },\n          \"id\": 132,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_memory_NFS_Unstable_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"NFS Unstable - Memory in NFS pages sent to the server, but not yet commited to the storage\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory NFS\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Memory Meminfo\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 22\n      },\n      \"id\": 267,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"pages out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*out/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 25\n          },\n          \"id\": 176,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_vmstat_pgpgin{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Pagesin - Page in operations\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_vmstat_pgpgout{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Pagesout - Page out operations\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Pages In / Out\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"pages out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*out/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 25\n          },\n          \"id\": 22,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_vmstat_pswpin{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Pswpin - Pages swapped in\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_vmstat_pswpout{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Pswpout - Pages swapped out\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Pages Swap In / Out\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"faults\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"normal\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Apps\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#629E51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A437C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Hardware Corrupted - Amount of RAM that the kernel identified as corrupted / not working\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#CFFAFF\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"RAM_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#806EB7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#2F575E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Unused\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Pgfault - Page major and minor fault operations\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  },\n                  {\n                    \"id\": \"custom.stacking\",\n                    \"value\": {\n                      \"group\": false,\n                      \"mode\": \"normal\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 35\n          },\n          \"id\": 175,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 350\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_vmstat_pgfault{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Pgfault - Page major and minor fault operations\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_vmstat_pgmajfault{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Pgmajfault - Major page fault operations\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_vmstat_pgfault{instance=\\\"$node\\\"}[$__rate_interval])  - irate(node_vmstat_pgmajfault{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Pgminfault - Minor page fault operations\",\n              \"refId\": \"C\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Memory Page Faults\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Active\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#99440A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Buffers\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#58140C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6D1F62\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Cached\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Committed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#508642\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Dirty\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Free\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#B7DBAB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Mapped\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PageTables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Page_Tables\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Slab_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Swap_Cache\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C15C17\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#511749\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total RAM + Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#052B51\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Total Swap\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"VmallocUsed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 35\n          },\n          \"id\": 307,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_vmstat_oom_kill{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"oom killer invocations \",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"OOM Killer\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Memory Vmstat\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 23\n      },\n      \"id\": 293,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"seconds\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"s\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Variation*./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#890F02\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 40\n          },\n          \"id\": 260,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_timex_estimated_error_seconds{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Estimated error in seconds\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_timex_offset_seconds{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Time offset in between local system and reference clock\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_timex_maxerror_seconds{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Maximum error in seconds\",\n              \"refId\": \"C\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Time Syncronized Drift\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 40\n          },\n          \"id\": 291,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_timex_loop_time_constant{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Phase-locked loop time adjust\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Time PLL Adjust\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Variation*./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#890F02\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 50\n          },\n          \"id\": 168,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_timex_sync_status{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Is clock synchronized to a reliable server (1 = yes, 0 = no)\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_timex_frequency_adjustment_ratio{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Local clock frequency adjustment\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Time Syncronized Status\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"seconds\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"s\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 50\n          },\n          \"id\": 294,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_timex_tick_seconds{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Seconds between clock ticks\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_timex_tai_offset_seconds{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"International Atomic Time (TAI) offset\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Time Misc\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"System Timesync\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 24\n      },\n      \"id\": 312,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 27\n          },\n          \"id\": 62,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_procs_blocked{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Processes blocked waiting for I/O to complete\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_procs_running{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Processes in runnable state\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Processes Status\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"normal\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 27\n          },\n          \"id\": 315,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_processes_state{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ state }}\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Processes State\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"forks / sec\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 37\n          },\n          \"id\": 148,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_forks_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Processes forks second\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Processes  Forks\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"decbytes\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Max.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 37\n          },\n          \"id\": 149,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(process_virtual_memory_bytes{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Processes virtual memory size in bytes\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"process_resident_memory_max_bytes{instance=\\\"$node\\\"}\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Maximum amount of virtual memory available in bytes\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(process_virtual_memory_bytes{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Processes virtual memory size in bytes\",\n              \"refId\": \"C\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(process_virtual_memory_max_bytes{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Maximum amount of virtual memory available in bytes\",\n              \"refId\": \"D\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Processes Memory\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"PIDs limit\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F2495C\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 47\n          },\n          \"id\": 313,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_processes_pids{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Number of PIDs\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_processes_max_processes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"PIDs limit\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"PIDs Number and Limit\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"seconds\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"s\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*waiting.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 47\n          },\n          \"id\": 305,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_schedstat_running_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"CPU {{ cpu }} - seconds spent running a process\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_schedstat_waiting_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"CPU {{ cpu }} - seconds spent by processing waiting for this CPU\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Process schedule stats Running / Waiting\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Threads limit\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F2495C\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 57\n          },\n          \"id\": 314,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_processes_threads{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Allocated threads\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_processes_max_threads{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Threads limit\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Threads Number and Limit\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"System Processes\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 25\n      },\n      \"id\": 269,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 42\n          },\n          \"id\": 8,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_context_switches_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Context switches\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_intr_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Interrupts\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Context Switches / Interrupts\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 42\n          },\n          \"id\": 7,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_load1{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"Load 1m\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_load5{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"Load 5m\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_load15{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"Load 15m\",\n              \"refId\": \"C\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"System Load\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Critical*./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Max*./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 52\n          },\n          \"id\": 259,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_interrupts_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ type }} - {{ info }}\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Interrupts Detail\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 52\n          },\n          \"id\": 306,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_schedstat_timeslices_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"CPU {{ cpu }}\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Schedule timeslices executed by each cpu\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 62\n          },\n          \"id\": 151,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_entropy_available_bits{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Entropy available to random number generators\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Entropy\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"seconds\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"s\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 62\n          },\n          \"id\": 308,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(process_cpu_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Time spent\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"CPU time spent in user and system contexts\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Max*./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#890F02\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 72\n          },\n          \"id\": 64,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"process_max_fds{instance=\\\"$node\\\"}\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Maximum open file descriptors\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"process_open_fds{instance=\\\"$node\\\"}\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Open file descriptors\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"File Descriptors\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"System Misc\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 26\n      },\n      \"id\": 304,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"temperature\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"celsius\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Critical*./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Max*./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 43\n          },\n          \"id\": 158,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_hwmon_temp_celsius{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ chip }} {{ sensor }} temp\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_hwmon_temp_crit_alarm_celsius{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": true,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ chip }} {{ sensor }} Critical Alarm\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_hwmon_temp_crit_celsius{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ chip }} {{ sensor }} Critical\",\n              \"refId\": \"C\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_hwmon_temp_crit_hyst_celsius{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": true,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ chip }} {{ sensor }} Critical Historical\",\n              \"refId\": \"D\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_hwmon_temp_max_celsius{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": true,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ chip }} {{ sensor }} Max\",\n              \"refId\": \"E\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Hardware temperature monitor\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Max*./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 43\n          },\n          \"id\": 300,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_cooling_device_cur_state{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Current {{ name }} in {{ type }}\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_cooling_device_max_state{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Max {{ name }} in {{ type }}\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Throttle cooling device\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 53\n          },\n          \"id\": 302,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_power_supply_online{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ power_supply }} online\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Power supply\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Hardware Misc\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 27\n      },\n      \"id\": 296,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 30\n          },\n          \"id\": 297,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_systemd_socket_accepted_connections_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ name }} Connections\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Systemd Sockets\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"normal\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Failed\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F2495C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Inactive\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FF9830\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Active\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#73BF69\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Deactivating\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FFCB7D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"Activating\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#C8F2C2\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 30\n          },\n          \"id\": 298,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_systemd_units{instance=\\\"$node\\\",state=\\\"activating\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Activating\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_systemd_units{instance=\\\"$node\\\",state=\\\"active\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Active\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_systemd_units{instance=\\\"$node\\\",state=\\\"deactivating\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Deactivating\",\n              \"refId\": \"C\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_systemd_units{instance=\\\"$node\\\",state=\\\"failed\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Failed\",\n              \"refId\": \"D\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_systemd_units{instance=\\\"$node\\\",state=\\\"inactive\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Inactive\",\n              \"refId\": \"E\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Systemd Units State\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Systemd\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 28\n      },\n      \"id\": 270,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"The number (after merges) of I/O requests completed per second for the device\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"IO read (-) / write (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"iops\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Read.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda2_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BA43A9\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda3_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F4D598\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#962D82\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#9AC48A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#65C5DB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9934E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FCEACA\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9E2D2\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 31\n          },\n          \"id\": 9,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"single\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_reads_completed_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"{{device}} - Reads completed\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_writes_completed_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Writes completed\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Disk IOps Completed\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"The number of bytes read from or written to the device per second\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes read (-) / write (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"Bps\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Read.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda2_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BA43A9\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda3_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F4D598\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#962D82\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#9AC48A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#65C5DB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9934E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FCEACA\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9E2D2\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 31\n          },\n          \"id\": 33,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"single\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_read_bytes_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"{{device}} - Read bytes\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_written_bytes_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Written bytes\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Disk R/W Data\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"The average time for requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"time. read (-) / write (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 30,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"s\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Read.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda2_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BA43A9\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda3_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F4D598\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#962D82\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#9AC48A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#65C5DB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9934E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FCEACA\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9E2D2\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 41\n          },\n          \"id\": 37,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"single\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_read_time_seconds_total{instance=\\\"$node\\\"}[$__rate_interval]) / irate(node_disk_reads_completed_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"{{device}} - Read wait time avg\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_write_time_seconds_total{instance=\\\"$node\\\"}[$__rate_interval]) / irate(node_disk_writes_completed_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Write wait time avg\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Disk Average Wait Time\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"The average queue length of the requests that were issued to the device\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"aqu-sz\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"none\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda2_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BA43A9\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda3_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F4D598\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#962D82\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#9AC48A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#65C5DB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9934E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FCEACA\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9E2D2\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 41\n          },\n          \"id\": 35,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"single\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_io_time_weighted_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"interval\": \"\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"{{device}}\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Average Queue Size\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"The number of read and write requests merged per second that were queued to the device\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"I/Os\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"iops\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Read.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda2_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BA43A9\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda3_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F4D598\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#962D82\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#9AC48A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#65C5DB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9934E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FCEACA\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9E2D2\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 51\n          },\n          \"id\": 133,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"single\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_reads_merged_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Read merged\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_writes_merged_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Write merged\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Disk R/W Merged\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"Percentage of elapsed time during which I/O requests were issued to the device (bandwidth utilization for the device). Device saturation occurs when this value is close to 100% for devices serving requests serially.  But for devices  serving requests in parallel, such as RAID arrays and modern SSDs, this number does not reflect their performance limits.\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"%util\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 30,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"percentunit\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda2_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BA43A9\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda3_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F4D598\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#962D82\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#9AC48A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#65C5DB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9934E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FCEACA\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9E2D2\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 51\n          },\n          \"id\": 36,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"single\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_io_time_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"interval\": \"\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"{{device}} - IO\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_discard_time_seconds_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"interval\": \"\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"{{device}} - discard\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Time Spent Doing I/Os\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"The number of outstanding requests at the instant the sample was taken. Incremented as requests are given to appropriate struct request_queue and decremented as they finish.\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"Outstanding req.\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"none\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda2_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BA43A9\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda3_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F4D598\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#962D82\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#9AC48A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#65C5DB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9934E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FCEACA\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9E2D2\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 61\n          },\n          \"id\": 34,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"single\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_disk_io_now{instance=\\\"$node\\\"}\",\n              \"interval\": \"\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"{{device}} - IO now\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Instantaneous Queue Size\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"IOs\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"iops\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EAB839\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#6ED0E0\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EF843C\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#584477\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda2_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BA43A9\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sda3_.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F4D598\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#0A50A1\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#BF1B00\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdb3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0752D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#962D82\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#614D93\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdc3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#9AC48A\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#65C5DB\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9934E\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#EA6460\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde1.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E0F9D7\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sdd2.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#FCEACA\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*sde3.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F9E2D2\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 61\n          },\n          \"id\": 301,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"single\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_discards_completed_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"interval\": \"\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"{{device}} - Discards completed\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_disk_discards_merged_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Discards merged\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Disk IOps Discards completed / merged\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Storage Disk\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 29\n      },\n      \"id\": 271,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 46\n          },\n          \"id\": 43,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_filesystem_avail_bytes{instance=\\\"$node\\\",device!~'rootfs'}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{mountpoint}} - Available\",\n              \"metric\": \"\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_filesystem_free_bytes{instance=\\\"$node\\\",device!~'rootfs'}\",\n              \"format\": \"time_series\",\n              \"hide\": true,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{mountpoint}} - Free\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_filesystem_size_bytes{instance=\\\"$node\\\",device!~'rootfs'}\",\n              \"format\": \"time_series\",\n              \"hide\": true,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{mountpoint}} - Size\",\n              \"refId\": \"C\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Filesystem space available\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"file nodes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 46\n          },\n          \"id\": 41,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_filesystem_files_free{instance=\\\"$node\\\",device!~'rootfs'}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{mountpoint}} - Free file nodes\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"File Nodes Free\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"files\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 56\n          },\n          \"id\": 28,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"single\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_filefd_maximum{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 4,\n              \"legendFormat\": \"Max open files\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_filefd_allocated{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Open files\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"File Descriptor\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"file Nodes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 56\n          },\n          \"id\": 219,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_filesystem_files{instance=\\\"$node\\\",device!~'rootfs'}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{mountpoint}} - File nodes total\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"File Nodes Size\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"normal\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"max\": 1,\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"/ ReadOnly\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#890F02\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 66\n          },\n          \"id\": 44,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_filesystem_readonly{instance=\\\"$node\\\",device!~'rootfs'}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{mountpoint}} - ReadOnly\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_filesystem_device_error{instance=\\\"$node\\\",device!~'rootfs',fstype!~'tmpfs'}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{mountpoint}} - Device error\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Filesystem in ReadOnly / Error\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Storage Filesystem\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 30\n      },\n      \"id\": 272,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"packets out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"pps\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"receive_packets_eth0\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"receive_packets_lo\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"transmit_packets_eth0\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#7EB26D\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"transmit_packets_lo\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#E24D42\",\n                      \"mode\": \"fixed\"\n                    }\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Trans.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 33\n          },\n          \"id\": 60,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_receive_packets_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Receive\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_transmit_packets_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Transmit\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Network Traffic by Packets\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"packets out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"pps\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Trans.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 33\n          },\n          \"id\": 142,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_receive_errs_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Receive errors\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_transmit_errs_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Rransmit errors\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Network Traffic Errors\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"packets out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"pps\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Trans.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 43\n          },\n          \"id\": 143,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_receive_drop_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Receive drop\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_transmit_drop_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Transmit drop\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Network Traffic Drop\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"packets out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"pps\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Trans.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 43\n          },\n          \"id\": 141,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_receive_compressed_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Receive compressed\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_transmit_compressed_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Transmit compressed\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Network Traffic Compressed\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"packets out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"pps\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Trans.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 53\n          },\n          \"id\": 146,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_receive_multicast_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Receive multicast\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Network Traffic Multicast\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"packets out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"pps\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Trans.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 53\n          },\n          \"id\": 144,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_receive_fifo_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Receive fifo\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_transmit_fifo_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Transmit fifo\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Network Traffic Fifo\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"packets out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"pps\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Trans.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 63\n          },\n          \"id\": 145,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_receive_frame_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Receive frame\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Network Traffic Frame\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 63\n          },\n          \"id\": 231,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_transmit_carrier_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Statistic transmit_carrier\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Network Traffic Carrier\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Trans.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 73\n          },\n          \"id\": 232,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_network_transmit_colls_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{device}} - Transmit colls\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Network Traffic Colls\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"entries\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byName\",\n                  \"options\": \"NF conntrack limit\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#890F02\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 73\n          },\n          \"id\": 61,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_nf_conntrack_entries{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"NF conntrack entries\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_nf_conntrack_entries_limit{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"NF conntrack limit\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"NF Contrack\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"Entries\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 83\n          },\n          \"id\": 230,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_arp_entries{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ device }} - ARP entries\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"ARP Entries\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"decimals\": 0,\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 83\n          },\n          \"id\": 288,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_network_mtu_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ device }} - Bytes\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"MTU\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"decimals\": 0,\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 93\n          },\n          \"id\": 280,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_network_speed_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ device }} - Speed\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Speed\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"packets\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"decimals\": 0,\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"none\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 93\n          },\n          \"id\": 289,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_network_transmit_queue_length{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{ device }} -   Interface transmit queue length\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Queue Length\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"packetes drop (-) / process (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Dropped.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 103\n          },\n          \"id\": 290,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_softnet_processed_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"CPU {{cpu}} - Processed\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_softnet_dropped_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"CPU {{cpu}} - Dropped\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Softnet Packets\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 103\n          },\n          \"id\": 310,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_softnet_times_squeezed_total{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"CPU {{cpu}} - Squeezed\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Softnet Out of Quota\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 113\n          },\n          \"id\": 309,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_network_up{operstate=\\\"up\\\",instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{interface}} - Operational state UP\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_network_carrier{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"instant\": false,\n              \"legendFormat\": \"{{device}} - Physical link state\",\n              \"refId\": \"B\"\n            }\n          ],\n          \"title\": \"Network Operational Status\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Network Traffic\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 31\n      },\n      \"id\": 273,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 48\n          },\n          \"id\": 63,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_TCP_alloc{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"TCP_alloc - Allocated sockets\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_TCP_inuse{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"TCP_inuse - Tcp sockets currently in use\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_TCP_mem{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": true,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"TCP_mem - Used memory for tcp\",\n              \"refId\": \"C\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_TCP_orphan{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"TCP_orphan - Orphan sockets\",\n              \"refId\": \"D\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_TCP_tw{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"TCP_tw - Sockets wating close\",\n              \"refId\": \"E\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Sockstat TCP\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 48\n          },\n          \"id\": 124,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_UDPLITE_inuse{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"UDPLITE_inuse - Udplite sockets currently in use\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_UDP_inuse{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"UDP_inuse - Udp sockets currently in use\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_UDP_mem{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"UDP_mem - Used memory for udp\",\n              \"refId\": \"C\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Sockstat UDP\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 58\n          },\n          \"id\": 125,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_FRAG_inuse{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"FRAG_inuse - Frag sockets currently in use\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_RAW_inuse{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"RAW_inuse - Raw sockets currently in use\",\n              \"refId\": \"C\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Sockstat FRAG / RAW\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"bytes\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"bytes\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 58\n          },\n          \"id\": 220,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_TCP_mem_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"mem_bytes - TCP sockets in that state\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_UDP_mem_bytes{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"mem_bytes - UDP sockets in that state\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_FRAG_memory{instance=\\\"$node\\\"}\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"FRAG_memory - Used memory for frag\",\n              \"refId\": \"C\"\n            }\n          ],\n          \"title\": \"Sockstat Memory Size\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"sockets\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 68\n          },\n          \"id\": 126,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_sockstat_sockets_used{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Sockets_used - Sockets currently in use\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Sockstat Used\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Network Sockstat\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 32\n      },\n      \"id\": 274,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"octects out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Out.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 49\n          },\n          \"id\": 221,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_IpExt_InOctets{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"InOctets - Received octets\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_IpExt_OutOctets{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"OutOctets - Sent octets\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Netstat IP In / Out Octets\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"datagrams\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 49\n          },\n          \"id\": 81,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true,\n              \"width\": 300\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Ip_Forwarding{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"Forwarding - IP forwarding\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Netstat IP Forwarding\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"messages out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Out.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 59\n          },\n          \"id\": 115,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Icmp_InMsgs{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"InMsgs -  Messages which the entity received. Note that this counter includes all those counted by icmpInErrors\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Icmp_OutMsgs{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"OutMsgs - Messages which this entity attempted to send. Note that this counter includes all those counted by icmpOutErrors\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"ICMP In / Out\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"messages out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Out.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 59\n          },\n          \"id\": 50,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Icmp_InErrors{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"InErrors - Messages which the entity received but determined as having ICMP-specific errors (bad ICMP checksums, bad length, etc.)\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"ICMP Errors\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"datagrams out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Out.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Snd.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 69\n          },\n          \"id\": 55,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Udp_InDatagrams{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"InDatagrams - Datagrams received\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Udp_OutDatagrams{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"OutDatagrams - Datagrams sent\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"UDP In / Out\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"datagrams\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 69\n          },\n          \"id\": 109,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Udp_InErrors{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"InErrors - UDP Datagrams that could not be delivered to an application\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Udp_NoPorts{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"NoPorts - UDP Datagrams received on a port with no listener\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_UdpLite_InErrors{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"interval\": \"\",\n              \"legendFormat\": \"InErrors Lite - UDPLite Datagrams that could not be delivered to an application\",\n              \"refId\": \"C\"\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Udp_RcvbufErrors{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"RcvbufErrors - UDP buffer errors received\",\n              \"refId\": \"D\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Udp_SndbufErrors{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"SndbufErrors - UDP buffer errors send\",\n              \"refId\": \"E\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"UDP Errors\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"datagrams out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Out.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              },\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Snd.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 79\n          },\n          \"id\": 299,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Tcp_InSegs{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"instant\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"InSegs - Segments received, including those received in error. This count includes segments received on currently established connections\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Tcp_OutSegs{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"OutSegs - Segments sent, including those on current connections but excluding those containing only retransmitted octets\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"TCP In / Out\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 79\n          },\n          \"id\": 104,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_TcpExt_ListenOverflows{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"ListenOverflows - Times the listen queue of a socket overflowed\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_TcpExt_ListenDrops{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"ListenDrops - SYNs to LISTEN sockets ignored\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_TcpExt_TCPSynRetrans{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"TCPSynRetrans - SYN-SYN/ACK retransmits to break down retransmissions in SYN, fast/timeout retransmits\",\n              \"refId\": \"C\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Tcp_RetransSegs{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"interval\": \"\",\n              \"legendFormat\": \"RetransSegs - Segments retransmitted - that is, the number of TCP segments transmitted containing one or more previously transmitted octets\",\n              \"refId\": \"D\"\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Tcp_InErrs{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"interval\": \"\",\n              \"legendFormat\": \"InErrs - Segments received in error (e.g., bad TCP checksums)\",\n              \"refId\": \"E\"\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Tcp_OutRsts{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"interval\": \"\",\n              \"legendFormat\": \"OutRsts - Segments sent with RST flag\",\n              \"refId\": \"F\"\n            }\n          ],\n          \"title\": \"TCP Errors\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"connections\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*MaxConn *./\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#890F02\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.fillOpacity\",\n                    \"value\": 0\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 89\n          },\n          \"id\": 85,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_netstat_Tcp_CurrEstab{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"CurrEstab - TCP connections for which the current state is either ESTABLISHED or CLOSE- WAIT\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_netstat_Tcp_MaxConn{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"MaxConn - Limit on the total number of TCP connections the entity can support (Dinamic is \\\"-1\\\")\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"TCP Connections\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter out (-) / in (+)\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*Sent.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 89\n          },\n          \"id\": 91,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_TcpExt_SyncookiesFailed{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"SyncookiesFailed - Invalid SYN cookies received\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_TcpExt_SyncookiesRecv{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"SyncookiesRecv - SYN cookies received\",\n              \"refId\": \"B\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_TcpExt_SyncookiesSent{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"SyncookiesSent - SYN cookies sent\",\n              \"refId\": \"C\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"TCP SynCookie\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"connections\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"min\": 0,\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 99\n          },\n          \"id\": 82,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Tcp_ActiveOpens{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"ActiveOpens - TCP connections that have made a direct transition to the SYN-SENT state from the CLOSED state\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"irate(node_netstat_Tcp_PassiveOpens{instance=\\\"$node\\\"}[$__rate_interval])\",\n              \"format\": \"time_series\",\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"PassiveOpens - TCP connections that have made a direct transition to the SYN-RCVD state from the LISTEN state\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"TCP Direct Transition\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Network Netstat\",\n      \"type\": \"row\"\n    },\n    {\n      \"collapsed\": true,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"gridPos\": {\n        \"h\": 1,\n        \"w\": 24,\n        \"x\": 0,\n        \"y\": 33\n      },\n      \"id\": 279,\n      \"panels\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"seconds\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"normal\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"s\"\n            },\n            \"overrides\": []\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 0,\n            \"y\": 50\n          },\n          \"id\": 40,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_scrape_collector_duration_seconds{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{collector}} - Scrape duration\",\n              \"refId\": \"A\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Node Exporter Scrape Time\",\n          \"type\": \"timeseries\"\n        },\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"description\": \"\",\n          \"fieldConfig\": {\n            \"defaults\": {\n              \"color\": {\n                \"mode\": \"palette-classic\"\n              },\n              \"custom\": {\n                \"axisCenteredZero\": false,\n                \"axisColorMode\": \"text\",\n                \"axisLabel\": \"counter\",\n                \"axisPlacement\": \"auto\",\n                \"barAlignment\": 0,\n                \"drawStyle\": \"line\",\n                \"fillOpacity\": 20,\n                \"gradientMode\": \"none\",\n                \"hideFrom\": {\n                  \"legend\": false,\n                  \"tooltip\": false,\n                  \"viz\": false\n                },\n                \"lineInterpolation\": \"linear\",\n                \"lineStyle\": {\n                  \"fill\": \"solid\"\n                },\n                \"lineWidth\": 1,\n                \"pointSize\": 5,\n                \"scaleDistribution\": {\n                  \"type\": \"linear\"\n                },\n                \"showPoints\": \"never\",\n                \"spanNulls\": false,\n                \"stacking\": {\n                  \"group\": \"A\",\n                  \"mode\": \"none\"\n                },\n                \"thresholdsStyle\": {\n                  \"mode\": \"off\"\n                }\n              },\n              \"links\": [],\n              \"mappings\": [],\n              \"thresholds\": {\n                \"mode\": \"absolute\",\n                \"steps\": [\n                  {\n                    \"color\": \"green\"\n                  },\n                  {\n                    \"color\": \"red\",\n                    \"value\": 80\n                  }\n                ]\n              },\n              \"unit\": \"short\"\n            },\n            \"overrides\": [\n              {\n                \"matcher\": {\n                  \"id\": \"byRegexp\",\n                  \"options\": \"/.*error.*/\"\n                },\n                \"properties\": [\n                  {\n                    \"id\": \"color\",\n                    \"value\": {\n                      \"fixedColor\": \"#F2495C\",\n                      \"mode\": \"fixed\"\n                    }\n                  },\n                  {\n                    \"id\": \"custom.transform\",\n                    \"value\": \"negative-Y\"\n                  }\n                ]\n              }\n            ]\n          },\n          \"gridPos\": {\n            \"h\": 10,\n            \"w\": 12,\n            \"x\": 12,\n            \"y\": 50\n          },\n          \"id\": 157,\n          \"links\": [],\n          \"options\": {\n            \"legend\": {\n              \"calcs\": [\n                \"mean\",\n                \"lastNotNull\",\n                \"max\",\n                \"min\"\n              ],\n              \"displayMode\": \"table\",\n              \"placement\": \"bottom\",\n              \"showLegend\": true\n            },\n            \"tooltip\": {\n              \"mode\": \"multi\",\n              \"sort\": \"none\"\n            }\n          },\n          \"pluginVersion\": \"9.2.0\",\n          \"targets\": [\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_scrape_collector_success{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{collector}} - Scrape success\",\n              \"refId\": \"A\",\n              \"step\": 240\n            },\n            {\n              \"datasource\": {\n                \"type\": \"prometheus\",\n                \"uid\": \"${DS_PROMETHEUS}\"\n              },\n              \"expr\": \"node_textfile_scrape_error{instance=\\\"$node\\\"}\",\n              \"format\": \"time_series\",\n              \"hide\": false,\n              \"interval\": \"\",\n              \"intervalFactor\": 1,\n              \"legendFormat\": \"{{collector}} - Scrape textfile error (1 = true)\",\n              \"refId\": \"B\",\n              \"step\": 240\n            }\n          ],\n          \"title\": \"Node Exporter Scrape\",\n          \"type\": \"timeseries\"\n        }\n      ],\n      \"targets\": [\n        {\n          \"datasource\": {\n            \"type\": \"prometheus\",\n            \"uid\": \"${DS_PROMETHEUS}\"\n          },\n          \"refId\": \"A\"\n        }\n      ],\n      \"title\": \"Node Exporter\",\n      \"type\": \"row\"\n    }\n  ],\n  \"refresh\": false,\n  \"schemaVersion\": 37,\n  \"style\": \"dark\",\n  \"tags\": [\n    \"linux\"\n  ],\n  \"templating\": {\n    \"list\": [\n      {\n        \"current\": {\n          \"selected\": false,\n          \"text\": \"default\",\n          \"value\": \"default\"\n        },\n        \"hide\": 0,\n        \"includeAll\": false,\n        \"label\": \"datasource\",\n        \"multi\": false,\n        \"name\": \"DS_PROMETHEUS\",\n        \"options\": [],\n        \"query\": \"prometheus\",\n        \"refresh\": 1,\n        \"regex\": \"\",\n        \"skipUrlSync\": false,\n        \"type\": \"datasource\"\n      },\n      {\n        \"current\": {},\n        \"datasource\": {\n          \"type\": \"prometheus\",\n          \"uid\": \"${DS_PROMETHEUS}\"\n        },\n        \"definition\": \"\",\n        \"hide\": 0,\n        \"includeAll\": false,\n        \"label\": \"Job\",\n        \"multi\": false,\n        \"name\": \"job\",\n        \"options\": [],\n        \"query\": {\n          \"query\": \"label_values(node_uname_info, job)\",\n          \"refId\": \"Prometheus-job-Variable-Query\"\n        },\n        \"refresh\": 1,\n        \"regex\": \"\",\n        \"skipUrlSync\": false,\n        \"sort\": 1,\n        \"tagValuesQuery\": \"\",\n        \"tagsQuery\": \"\",\n        \"type\": \"query\",\n        \"useTags\": false\n      },\n      {\n        \"current\": {},\n        \"datasource\": {\n          \"type\": \"prometheus\",\n          \"uid\": \"${DS_PROMETHEUS}\"\n        },\n        \"definition\": \"label_values(node_uname_info{}, instance)\",\n        \"hide\": 0,\n        \"includeAll\": false,\n        \"label\": \"Host:\",\n        \"multi\": false,\n        \"name\": \"node\",\n        \"options\": [],\n        \"query\": {\n          \"query\": \"label_values(node_uname_info{}, instance)\",\n          \"refId\": \"Prometheus-node-Variable-Query\"\n        },\n        \"refresh\": 1,\n        \"regex\": \"\",\n        \"skipUrlSync\": false,\n        \"sort\": 1,\n        \"tagValuesQuery\": \"\",\n        \"tagsQuery\": \"\",\n        \"type\": \"query\",\n        \"useTags\": false\n      },\n      {\n        \"current\": {\n          \"selected\": false,\n          \"text\": \"[a-z]+|nvme[0-9]+n[0-9]+|mmcblk[0-9]+\",\n          \"value\": \"[a-z]+|nvme[0-9]+n[0-9]+|mmcblk[0-9]+\"\n        },\n        \"hide\": 2,\n        \"includeAll\": false,\n        \"multi\": false,\n        \"name\": \"diskdevices\",\n        \"options\": [\n          {\n            \"selected\": true,\n            \"text\": \"[a-z]+|nvme[0-9]+n[0-9]+|mmcblk[0-9]+\",\n            \"value\": \"[a-z]+|nvme[0-9]+n[0-9]+|mmcblk[0-9]+\"\n          }\n        ],\n        \"query\": \"[a-z]+|nvme[0-9]+n[0-9]+|mmcblk[0-9]+\",\n        \"skipUrlSync\": false,\n        \"type\": \"custom\"\n      }\n    ]\n  },\n  \"time\": {\n    \"from\": \"now-24h\",\n    \"to\": \"now\"\n  },\n  \"timepicker\": {\n    \"refresh_intervals\": [\n      \"5s\",\n      \"10s\",\n      \"30s\",\n      \"1m\",\n      \"5m\",\n      \"15m\",\n      \"30m\",\n      \"1h\",\n      \"2h\",\n      \"1d\"\n    ],\n    \"time_options\": [\n      \"5m\",\n      \"15m\",\n      \"1h\",\n      \"6h\",\n      \"12h\",\n      \"24h\",\n      \"2d\",\n      \"7d\",\n      \"30d\"\n    ]\n  },\n  \"timezone\": \"browser\",\n  \"title\": \"Node exporter\",\n  \"uid\": \"NodeExporterDashboard\",\n  \"version\": 9,\n  \"weekStart\": \"\"\n}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/dashboards/postgresql_exporter.json",
    "content": "{\n  \"__inputs\": [\n    {\n      \"name\": \"DS_PROMETHEUS\",\n      \"label\": \"Prometheus\",\n      \"description\": \"\",\n      \"type\": \"datasource\",\n      \"pluginId\": \"prometheus\",\n      \"pluginName\": \"Prometheus\"\n    }\n  ],\n  \"__elements\": [],\n  \"__requires\": [\n    {\n      \"type\": \"panel\",\n      \"id\": \"gauge\",\n      \"name\": \"Gauge\",\n      \"version\": \"\"\n    },\n    {\n      \"type\": \"grafana\",\n      \"id\": \"grafana\",\n      \"name\": \"Grafana\",\n      \"version\": \"8.4.5\"\n    },\n    {\n      \"type\": \"panel\",\n      \"id\": \"graph\",\n      \"name\": \"Graph (old)\",\n      \"version\": \"\"\n    },\n    {\n      \"type\": \"datasource\",\n      \"id\": \"prometheus\",\n      \"name\": \"Prometheus\",\n      \"version\": \"1.0.0\"\n    },\n    {\n      \"type\": \"panel\",\n      \"id\": \"stat\",\n      \"name\": \"Stat\",\n      \"version\": \"\"\n    }\n  ],\n  \"annotations\": {\n    \"list\": [\n      {\n        \"builtIn\": 1,\n        \"datasource\": \"-- Grafana --\",\n        \"enable\": true,\n        \"hide\": true,\n        \"iconColor\": \"rgba(0, 211, 255, 1)\",\n        \"name\": \"Annotations & Alerts\",\n        \"target\": {\n          \"limit\": 100,\n          \"matchAny\": false,\n          \"tags\": [],\n          \"type\": \"dashboard\"\n        },\n        \"type\": \"dashboard\"\n      }\n    ]\n  },\n  \"description\": \"Dashbord works with postgres_exporter for prometheus\",\n  \"editable\": true,\n  \"fiscalYearStartMonth\": 0,\n  \"gnetId\": 3742,\n  \"graphTooltip\": 0,\n  \"id\": null,\n  \"iteration\": 1678370081292,\n  \"links\": [],\n  \"liveNow\": false,\n  \"panels\": [\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"max\": 200,\n          \"min\": 0,\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"rgba(50, 172, 45, 0.97)\",\n                \"value\": null\n              },\n              {\n                \"color\": \"rgba(237, 129, 40, 0.89)\",\n                \"value\": 50\n              },\n              {\n                \"color\": \"rgba(245, 54, 54, 0.9)\",\n                \"value\": 90\n              }\n            ]\n          },\n          \"unit\": \"percent\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 5,\n        \"w\": 4,\n        \"x\": 0,\n        \"y\": 0\n      },\n      \"id\": 16,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"/^iowait$/\",\n          \"values\": false\n        },\n        \"showThresholdLabels\": false,\n        \"showThresholdMarkers\": true\n      },\n      \"pluginVersion\": \"8.4.5\",\n      \"targets\": [\n        {\n          \"expr\": \"sum(rate(node_cpu_seconds_total{instance=\\\"$host\\\", mode=\\\"iowait\\\"}[$interval])) by (mode)* 100 / scalar(count(node_cpu_seconds_total{mode=\\\"user\\\", instance=\\\"$host\\\"})) or sum(irate(node_cpu_seconds_total{instance=\\\"$host\\\", mode=\\\"iowait\\\"}[5m])) by (mode) * 100 / scalar(count(node_cpu_seconds_total{mode=\\\"user\\\", instance=\\\"$host\\\"}))\",\n          \"format\": \"time_series\",\n          \"interval\": \"\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{mode}}\",\n          \"refId\": \"A\",\n          \"step\": 4\n        }\n      ],\n      \"title\": \"Current IOwait\",\n      \"type\": \"gauge\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"max\": 100,\n          \"min\": 0,\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"rgba(50, 172, 45, 0.97)\",\n                \"value\": null\n              },\n              {\n                \"color\": \"rgba(237, 129, 40, 0.89)\",\n                \"value\": 50\n              },\n              {\n                \"color\": \"rgba(245, 54, 54, 0.9)\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"percent\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 5,\n        \"w\": 4,\n        \"x\": 4,\n        \"y\": 0\n      },\n      \"id\": 15,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"/^user$/\",\n          \"values\": false\n        },\n        \"showThresholdLabels\": false,\n        \"showThresholdMarkers\": true\n      },\n      \"pluginVersion\": \"8.4.5\",\n      \"targets\": [\n        {\n          \"expr\": \"sum(rate(node_cpu_seconds_total{instance=\\\"$host\\\", mode=\\\"user\\\"}[$interval])) by (mode)* 100 / scalar(count(node_cpu_seconds_total{mode=\\\"user\\\", instance=\\\"$host\\\"})) or sum(irate(node_cpu_seconds_total{instance=\\\"$host\\\", mode=\\\"user\\\"}[5m])) by (mode) * 100 / scalar(count(node_cpu_seconds_total{mode=\\\"user\\\", instance=\\\"$host\\\"}))\",\n          \"format\": \"time_series\",\n          \"interval\": \"$interval\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{mode}}\",\n          \"refId\": \"A\",\n          \"step\": 4\n        }\n      ],\n      \"title\": \"Current CPU\",\n      \"type\": \"gauge\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"rgba(50, 172, 45, 0.97)\",\n                \"value\": null\n              },\n              {\n                \"color\": \"rgba(237, 129, 40, 0.89)\",\n                \"value\": 50\n              },\n              {\n                \"color\": \"rgba(245, 54, 54, 0.9)\",\n                \"value\": 90\n              }\n            ]\n          },\n          \"unit\": \"percent\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 5,\n        \"w\": 2,\n        \"x\": 8,\n        \"y\": 0\n      },\n      \"id\": 17,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"colorMode\": \"background\",\n        \"graphMode\": \"none\",\n        \"justifyMode\": \"auto\",\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"/^Used$/\",\n          \"values\": false\n        },\n        \"textMode\": \"auto\"\n      },\n      \"pluginVersion\": \"8.4.5\",\n      \"targets\": [\n        {\n          \"expr\": \"(node_memory_MemTotal_bytes{instance=\\\"$host\\\"} - (node_memory_MemFree_bytes{instance=\\\"$host\\\"} + node_memory_Buffers_bytes{instance=\\\"$host\\\"} + node_memory_Cached_bytes{instance=\\\"$host\\\"})) / node_memory_MemTotal_bytes{instance=\\\"$host\\\"} * 100\",\n          \"format\": \"time_series\",\n          \"interval\": \"$interval\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"Used\",\n          \"refId\": \"A\",\n          \"step\": 2\n        }\n      ],\n      \"title\": \"RAM used\",\n      \"type\": \"stat\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"rgba(245, 54, 54, 0.9)\",\n                \"value\": null\n              },\n              {\n                \"color\": \"rgba(237, 129, 40, 0.89)\",\n                \"value\": 50\n              },\n              {\n                \"color\": \"rgba(50, 172, 45, 0.97)\",\n                \"value\": 90\n              }\n            ]\n          },\n          \"unit\": \"percent\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 5,\n        \"w\": 2,\n        \"x\": 10,\n        \"y\": 0\n      },\n      \"id\": 19,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"colorMode\": \"background\",\n        \"graphMode\": \"none\",\n        \"justifyMode\": \"auto\",\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"/^Used$/\",\n          \"values\": false\n        },\n        \"textMode\": \"auto\"\n      },\n      \"pluginVersion\": \"8.4.5\",\n      \"targets\": [\n        {\n          \"expr\": \"(node_memory_Cached_bytes{instance=\\\"$host\\\"} * 100) / node_memory_MemTotal_bytes{instance=\\\"$host\\\"}\",\n          \"format\": \"time_series\",\n          \"interval\": \"$interval\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"Used\",\n          \"refId\": \"A\",\n          \"step\": 2\n        }\n      ],\n      \"title\": \"RAM cached\",\n      \"type\": \"stat\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"decbytes\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 5,\n        \"w\": 4,\n        \"x\": 12,\n        \"y\": 0\n      },\n      \"id\": 10,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"colorMode\": \"none\",\n        \"graphMode\": \"none\",\n        \"justifyMode\": \"auto\",\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"textMode\": \"auto\"\n      },\n      \"pluginVersion\": \"8.4.5\",\n      \"targets\": [\n        {\n          \"expr\": \"SUM(pg_stat_database_tup_fetched{datname=~\\\"$datname\\\", instance=~\\\"$instance\\\"})\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 2,\n          \"refId\": \"A\",\n          \"step\": 4\n        }\n      ],\n      \"title\": \"Current fetch data\",\n      \"type\": \"stat\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"decbytes\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 5,\n        \"w\": 4,\n        \"x\": 16,\n        \"y\": 0\n      },\n      \"id\": 11,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"colorMode\": \"none\",\n        \"graphMode\": \"none\",\n        \"justifyMode\": \"auto\",\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"textMode\": \"auto\"\n      },\n      \"pluginVersion\": \"8.4.5\",\n      \"targets\": [\n        {\n          \"expr\": \"SUM(pg_stat_database_tup_inserted{datname=~\\\"$datname\\\", instance=~\\\"$instance\\\"})\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 2,\n          \"refId\": \"A\",\n          \"step\": 4\n        }\n      ],\n      \"title\": \"Current insert data\",\n      \"type\": \"stat\"\n    },\n    {\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fieldConfig\": {\n        \"defaults\": {\n          \"color\": {\n            \"mode\": \"thresholds\"\n          },\n          \"mappings\": [\n            {\n              \"options\": {\n                \"match\": \"null\",\n                \"result\": {\n                  \"text\": \"N/A\"\n                }\n              },\n              \"type\": \"special\"\n            }\n          ],\n          \"thresholds\": {\n            \"mode\": \"absolute\",\n            \"steps\": [\n              {\n                \"color\": \"green\",\n                \"value\": null\n              },\n              {\n                \"color\": \"red\",\n                \"value\": 80\n              }\n            ]\n          },\n          \"unit\": \"decbytes\"\n        },\n        \"overrides\": []\n      },\n      \"gridPos\": {\n        \"h\": 5,\n        \"w\": 4,\n        \"x\": 20,\n        \"y\": 0\n      },\n      \"id\": 12,\n      \"links\": [],\n      \"maxDataPoints\": 100,\n      \"options\": {\n        \"colorMode\": \"none\",\n        \"graphMode\": \"none\",\n        \"justifyMode\": \"auto\",\n        \"orientation\": \"horizontal\",\n        \"reduceOptions\": {\n          \"calcs\": [\n            \"lastNotNull\"\n          ],\n          \"fields\": \"\",\n          \"values\": false\n        },\n        \"textMode\": \"auto\"\n      },\n      \"pluginVersion\": \"8.4.5\",\n      \"targets\": [\n        {\n          \"expr\": \"SUM(pg_stat_database_tup_updated{datname=~\\\"$datname\\\", instance=~\\\"$instance\\\"})\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 2,\n          \"refId\": \"A\",\n          \"step\": 4\n        }\n      ],\n      \"title\": \"Current update data\",\n      \"type\": \"stat\"\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 8,\n        \"x\": 0,\n        \"y\": 5\n      },\n      \"hiddenSeries\": false,\n      \"id\": 5,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": false,\n        \"show\": true,\n        \"sort\": \"current\",\n        \"sortDesc\": true,\n        \"total\": true,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"links\": [],\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"8.4.5\",\n      \"pointradius\": 5,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"pg_stat_database_tup_fetched{datname=~\\\"$datname\\\", instance=~\\\"$instance\\\"} != 0\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{instance}}, {{datname}}\",\n          \"refId\": \"A\",\n          \"step\": 2\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Fetch data (SELECT)\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"bytes\",\n          \"logBase\": 1,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 8,\n        \"x\": 8,\n        \"y\": 5\n      },\n      \"hiddenSeries\": false,\n      \"id\": 6,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": false,\n        \"show\": true,\n        \"sort\": \"current\",\n        \"sortDesc\": true,\n        \"total\": true,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"links\": [],\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"8.4.5\",\n      \"pointradius\": 5,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"pg_stat_database_tup_inserted{datname=~\\\"$datname\\\", instance=~\\\"$instance\\\"} != 0\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{instance}}, {{datname}}\",\n          \"refId\": \"A\",\n          \"step\": 2\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Insert data\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"bytes\",\n          \"logBase\": 1,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 8,\n        \"x\": 16,\n        \"y\": 5\n      },\n      \"hiddenSeries\": false,\n      \"id\": 8,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": false,\n        \"show\": true,\n        \"sort\": \"current\",\n        \"sortDesc\": true,\n        \"total\": true,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"links\": [],\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"8.4.5\",\n      \"pointradius\": 5,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"pg_stat_database_tup_updated{datname=~\\\"$datname\\\", instance=~\\\"$instance\\\"} != 0\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{instance}}, {{datname}}\",\n          \"refId\": \"A\",\n          \"step\": 2\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Update data\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"bytes\",\n          \"logBase\": 1,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 8,\n        \"x\": 0,\n        \"y\": 12\n      },\n      \"hiddenSeries\": false,\n      \"id\": 1,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": false,\n        \"show\": true,\n        \"sort\": \"current\",\n        \"sortDesc\": true,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": false,\n      \"linewidth\": 1,\n      \"links\": [],\n      \"nullPointMode\": \"connected\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"8.4.5\",\n      \"pointradius\": 3,\n      \"points\": true,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"pg_stat_activity_count{datname=~\\\"$datname\\\", instance=~\\\"$instance\\\", state=\\\"active\\\"} !=0\",\n          \"format\": \"time_series\",\n          \"interval\": \"\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{instance}},{{datname}},state : {{state}}\",\n          \"refId\": \"A\",\n          \"step\": 2\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Active sessions\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"decimals\": 0,\n          \"format\": \"none\",\n          \"logBase\": 1,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 8,\n        \"x\": 8,\n        \"y\": 12\n      },\n      \"hiddenSeries\": false,\n      \"id\": 4,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": false,\n        \"current\": true,\n        \"max\": true,\n        \"min\": false,\n        \"show\": true,\n        \"sort\": \"current\",\n        \"sortDesc\": false,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"links\": [],\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"8.4.5\",\n      \"pointradius\": 5,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"pg_stat_activity_count{datname=~\\\"$datname\\\", instance=~\\\"$instance\\\", state=~\\\"idle|idle in transaction|idle in transaction (aborted)\\\"} !=0\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{instance}},{{datname}},state : {{state}}\",\n          \"refId\": \"A\",\n          \"step\": 2\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Idle sessions\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 2,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 8,\n        \"x\": 16,\n        \"y\": 12\n      },\n      \"hiddenSeries\": false,\n      \"id\": 20,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": false,\n        \"show\": true,\n        \"sort\": \"current\",\n        \"sortDesc\": true,\n        \"total\": true,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 2,\n      \"links\": [],\n      \"nullPointMode\": \"null\",\n      \"options\": {\n        \"alertThreshold\": true\n      },\n      \"percentage\": false,\n      \"pluginVersion\": \"8.4.5\",\n      \"pointradius\": 5,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": true,\n      \"targets\": [\n        {\n          \"expr\": \"rate(node_disk_io_time_seconds_total{device=~\\\"$device\\\", instance=\\\"$host\\\"}[$interval])/10 or irate(node_disk_io_time_seconds_total{device=~\\\"$device\\\", instance=\\\"$host\\\"}[5m])/10\",\n          \"format\": \"time_series\",\n          \"interval\": \"$interval\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{ device }}\",\n          \"refId\": \"A\",\n          \"step\": 4\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Disk IO Utilization\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:333\",\n          \"format\": \"percent\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:334\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 8,\n        \"x\": 0,\n        \"y\": 19\n      },\n      \"hiddenSeries\": false,\n      \"id\": 7,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": false,\n        \"show\": true,\n        \"sort\": \"current\",\n        \"sortDesc\": true,\n        \"total\": true,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"links\": [],\n      \"nullPointMode\": \"null\",\n      \"percentage\": false,\n      \"pluginVersion\": \"8.4.5\",\n      \"pointradius\": 5,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"pg_stat_database_tup_deleted{datname=~\\\"$datname\\\", instance=~\\\"$instance\\\"} != 0\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{instance}}, {{datname}}\",\n          \"refId\": \"A\",\n          \"step\": 2\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Delete data\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"bytes\",\n          \"logBase\": 1,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 8,\n        \"x\": 8,\n        \"y\": 19\n      },\n      \"hiddenSeries\": false,\n      \"id\": 14,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"max\": true,\n        \"min\": false,\n        \"show\": true,\n        \"sort\": \"total\",\n        \"sortDesc\": true,\n        \"total\": true,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"links\": [],\n      \"nullPointMode\": \"null\",\n      \"percentage\": false,\n      \"pluginVersion\": \"8.4.5\",\n      \"pointradius\": 5,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"pg_stat_database_tup_returned{datname=~\\\"$datname\\\", instance=~\\\"$instance\\\"} != 0\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{instance}}, {{datname}}\",\n          \"refId\": \"A\",\n          \"step\": 2\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Return data\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"format\": \"bytes\",\n          \"logBase\": 1,\n          \"show\": true\n        },\n        {\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 8,\n        \"x\": 16,\n        \"y\": 19\n      },\n      \"hiddenSeries\": false,\n      \"id\": 3,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"hideEmpty\": false,\n        \"max\": true,\n        \"min\": false,\n        \"rightSide\": false,\n        \"show\": true,\n        \"sort\": \"current\",\n        \"sortDesc\": true,\n        \"total\": true,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"links\": [],\n      \"nullPointMode\": \"null\",\n      \"percentage\": false,\n      \"pluginVersion\": \"8.4.5\",\n      \"pointradius\": 5,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"pg_locks_count{datname=~\\\"$datname\\\", instance=~\\\"$instance\\\", mode=~\\\"$mode\\\"} != 0\",\n          \"format\": \"time_series\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{instance}},{{datname}},{{mode}}\",\n          \"refId\": \"A\",\n          \"step\": 2\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Lock tables\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:386\",\n          \"decimals\": 0,\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"min\": \"0\",\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:387\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 12,\n        \"x\": 0,\n        \"y\": 26\n      },\n      \"hiddenSeries\": false,\n      \"id\": 9,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": false,\n        \"current\": true,\n        \"max\": false,\n        \"min\": false,\n        \"rightSide\": true,\n        \"show\": true,\n        \"sort\": \"current\",\n        \"sortDesc\": true,\n        \"total\": false,\n        \"values\": true\n      },\n      \"lines\": true,\n      \"linewidth\": 1,\n      \"links\": [],\n      \"nullPointMode\": \"null\",\n      \"percentage\": false,\n      \"pluginVersion\": \"8.4.5\",\n      \"pointradius\": 5,\n      \"points\": false,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"1 - node_filesystem_free_bytes{instance=~\\\"$host\\\", fstype!~\\\"rootfs|selinuxfs|autofs|rpc_pipefs|tmpfs\\\"} / node_filesystem_size_bytes{fstype!~\\\"rootfs|selinuxfs|autofs|rpc_pipefs|tmpfs\\\"}\",\n          \"format\": \"time_series\",\n          \"interval\": \"\",\n          \"intervalFactor\": 2,\n          \"legendFormat\": \"{{instance}} , {{mountpoint}}\",\n          \"refId\": \"A\",\n          \"step\": 2\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Disk Use\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:173\",\n          \"format\": \"percentunit\",\n          \"logBase\": 1,\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:174\",\n          \"format\": \"short\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    },\n    {\n      \"aliasColors\": {},\n      \"bars\": false,\n      \"dashLength\": 10,\n      \"dashes\": false,\n      \"datasource\": {\n        \"type\": \"prometheus\",\n        \"uid\": \"${DS_PROMETHEUS}\"\n      },\n      \"fill\": 1,\n      \"fillGradient\": 0,\n      \"gridPos\": {\n        \"h\": 7,\n        \"w\": 12,\n        \"x\": 12,\n        \"y\": 26\n      },\n      \"hiddenSeries\": false,\n      \"id\": 13,\n      \"legend\": {\n        \"alignAsTable\": true,\n        \"avg\": true,\n        \"current\": true,\n        \"hideEmpty\": true,\n        \"hideZero\": true,\n        \"max\": true,\n        \"min\": true,\n        \"rightSide\": false,\n        \"show\": true,\n        \"sort\": \"current\",\n        \"sortDesc\": true,\n        \"total\": true,\n        \"values\": true\n      },\n      \"lines\": false,\n      \"linewidth\": 1,\n      \"links\": [],\n      \"nullPointMode\": \"null\",\n      \"percentage\": false,\n      \"pluginVersion\": \"8.4.5\",\n      \"pointradius\": 1,\n      \"points\": true,\n      \"renderer\": \"flot\",\n      \"seriesOverrides\": [],\n      \"spaceLength\": 10,\n      \"stack\": false,\n      \"steppedLine\": false,\n      \"targets\": [\n        {\n          \"expr\": \"(rate(node_disk_read_time_seconds_total{device=~\\\"$device\\\", instance=\\\"$host\\\"}[$interval]) / rate(node_disk_reads_completed_total{device=~\\\"$device\\\", instance=\\\"$host\\\"}[$interval])) or (irate(node_disk_read_seconds_total{device=~\\\"$device\\\", instance=\\\"$host\\\"}[5m]) / irate(node_disk_reads_completed_total{device=~\\\"$device\\\", instance=\\\"$host\\\"}[5m]))\",\n          \"format\": \"time_series\",\n          \"hide\": false,\n          \"interval\": \"$interval\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"Read: {{ device }}\",\n          \"refId\": \"A\",\n          \"step\": 2\n        },\n        {\n          \"expr\": \"(rate(node_disk_write_time_seconds_total{device=~\\\"$device\\\", instance=\\\"$host\\\"}[$interval]) / rate(node_disk_writes_completed_total{device=~\\\"$device\\\", instance=\\\"$host\\\"}[$interval])) or (irate(node_disk_write_time_seconds_total{device=~\\\"$device\\\", instance=\\\"$host\\\"}[5m]) / irate(node_disk_writes_completed_total{device=~\\\"$device\\\", instance=\\\"$host\\\"}[5m]))\",\n          \"format\": \"time_series\",\n          \"interval\": \"$interval\",\n          \"intervalFactor\": 1,\n          \"legendFormat\": \"Write: {{ device }}\",\n          \"refId\": \"B\",\n          \"step\": 2\n        }\n      ],\n      \"thresholds\": [],\n      \"timeRegions\": [],\n      \"title\": \"Disk Latency\",\n      \"tooltip\": {\n        \"shared\": true,\n        \"sort\": 0,\n        \"value_type\": \"individual\"\n      },\n      \"type\": \"graph\",\n      \"xaxis\": {\n        \"mode\": \"time\",\n        \"show\": true,\n        \"values\": []\n      },\n      \"yaxes\": [\n        {\n          \"$$hashKey\": \"object:437\",\n          \"format\": \"ms\",\n          \"logBase\": 2,\n          \"show\": true\n        },\n        {\n          \"$$hashKey\": \"object:438\",\n          \"format\": \"ms\",\n          \"logBase\": 1,\n          \"show\": true\n        }\n      ],\n      \"yaxis\": {\n        \"align\": false\n      }\n    }\n  ],\n  \"refresh\": false,\n  \"schemaVersion\": 35,\n  \"style\": \"dark\",\n  \"tags\": [\n    \"postgres\"\n  ],\n  \"templating\": {\n    \"list\": [\n      {\n        \"auto\": true,\n        \"auto_count\": 200,\n        \"auto_min\": \"1s\",\n        \"current\": {\n          \"selected\": false,\n          \"text\": \"auto\",\n          \"value\": \"$__auto_interval_interval\"\n        },\n        \"hide\": 0,\n        \"label\": \"Interval\",\n        \"name\": \"interval\",\n        \"options\": [\n          {\n            \"selected\": true,\n            \"text\": \"auto\",\n            \"value\": \"$__auto_interval_interval\"\n          },\n          {\n            \"selected\": false,\n            \"text\": \"1s\",\n            \"value\": \"1s\"\n          },\n          {\n            \"selected\": false,\n            \"text\": \"5s\",\n            \"value\": \"5s\"\n          },\n          {\n            \"selected\": false,\n            \"text\": \"1m\",\n            \"value\": \"1m\"\n          },\n          {\n            \"selected\": false,\n            \"text\": \"5m\",\n            \"value\": \"5m\"\n          },\n          {\n            \"selected\": false,\n            \"text\": \"1h\",\n            \"value\": \"1h\"\n          },\n          {\n            \"selected\": false,\n            \"text\": \"6h\",\n            \"value\": \"6h\"\n          },\n          {\n            \"selected\": false,\n            \"text\": \"1d\",\n            \"value\": \"1d\"\n          }\n        ],\n        \"query\": \"1s,5s,1m,5m,1h,6h,1d\",\n        \"queryValue\": \"\",\n        \"refresh\": 2,\n        \"skipUrlSync\": false,\n        \"type\": \"interval\"\n      },\n      {\n        \"current\": {},\n        \"datasource\": {\n          \"type\": \"prometheus\",\n          \"uid\": \"${DS_PROMETHEUS}\"\n        },\n        \"definition\": \"label_values(node_disk_reads_completed_total, instance)\",\n        \"hide\": 0,\n        \"includeAll\": false,\n        \"label\": \"Host\",\n        \"multi\": false,\n        \"name\": \"host\",\n        \"options\": [],\n        \"query\": {\n          \"query\": \"label_values(node_disk_reads_completed_total, instance)\",\n          \"refId\": \"Prometheus-host-Variable-Query\"\n        },\n        \"refresh\": 1,\n        \"regex\": \"\",\n        \"skipUrlSync\": false,\n        \"sort\": 1,\n        \"tagValuesQuery\": \"\",\n        \"tagsQuery\": \"\",\n        \"type\": \"query\",\n        \"useTags\": false\n      },\n      {\n        \"current\": {},\n        \"datasource\": {\n          \"type\": \"prometheus\",\n          \"uid\": \"${DS_PROMETHEUS}\"\n        },\n        \"definition\": \"label_values(node_disk_reads_completed_total{instance=\\\"$host\\\", device!~\\\"dm-.+\\\"}, device)\",\n        \"hide\": 0,\n        \"includeAll\": true,\n        \"label\": \"Device\",\n        \"multi\": true,\n        \"name\": \"device\",\n        \"options\": [],\n        \"query\": {\n          \"query\": \"label_values(node_disk_reads_completed_total{instance=\\\"$host\\\", device!~\\\"dm-.+\\\"}, device)\",\n          \"refId\": \"Prometheus-device-Variable-Query\"\n        },\n        \"refresh\": 1,\n        \"regex\": \"\",\n        \"skipUrlSync\": false,\n        \"sort\": 1,\n        \"tagValuesQuery\": \"\",\n        \"tagsQuery\": \"\",\n        \"type\": \"query\",\n        \"useTags\": false\n      },\n      {\n        \"current\": {},\n        \"datasource\": {\n          \"type\": \"prometheus\",\n          \"uid\": \"${DS_PROMETHEUS}\"\n        },\n        \"definition\": \"\",\n        \"hide\": 0,\n        \"includeAll\": true,\n        \"label\": \"Instance\",\n        \"multi\": true,\n        \"name\": \"instance\",\n        \"options\": [],\n        \"query\": {\n          \"query\": \"label_values({job=~\\\"postgresql|postgresql01|postgresql02|postgresql03\\\"}, instance)\",\n          \"refId\": \"Prometheus-instance-Variable-Query\"\n        },\n        \"refresh\": 1,\n        \"regex\": \"\",\n        \"skipUrlSync\": false,\n        \"sort\": 1,\n        \"tagValuesQuery\": \"\",\n        \"tagsQuery\": \"\",\n        \"type\": \"query\",\n        \"useTags\": false\n      },\n      {\n        \"current\": {},\n        \"datasource\": {\n          \"type\": \"prometheus\",\n          \"uid\": \"${DS_PROMETHEUS}\"\n        },\n        \"definition\": \"\",\n        \"hide\": 0,\n        \"includeAll\": true,\n        \"label\": \"Database\",\n        \"multi\": true,\n        \"name\": \"datname\",\n        \"options\": [],\n        \"query\": {\n          \"query\": \"label_values(datname)\",\n          \"refId\": \"Prometheus-datname-Variable-Query\"\n        },\n        \"refresh\": 1,\n        \"regex\": \"\",\n        \"skipUrlSync\": false,\n        \"sort\": 1,\n        \"tagValuesQuery\": \"\",\n        \"tagsQuery\": \"\",\n        \"type\": \"query\",\n        \"useTags\": false\n      },\n      {\n        \"current\": {},\n        \"datasource\": {\n          \"type\": \"prometheus\",\n          \"uid\": \"${DS_PROMETHEUS}\"\n        },\n        \"definition\": \"\",\n        \"hide\": 0,\n        \"includeAll\": true,\n        \"label\": \"Lock table\",\n        \"multi\": true,\n        \"name\": \"mode\",\n        \"options\": [],\n        \"query\": {\n          \"query\": \"label_values({mode=~\\\"accessexclusivelock|accesssharelock|exclusivelock|rowexclusivelock|rowsharelock|sharelock|sharerowexclusivelock|shareupdateexclusivelock\\\"}, mode)\",\n          \"refId\": \"Prometheus-mode-Variable-Query\"\n        },\n        \"refresh\": 1,\n        \"regex\": \"\",\n        \"skipUrlSync\": false,\n        \"sort\": 0,\n        \"tagValuesQuery\": \"\",\n        \"tagsQuery\": \"\",\n        \"type\": \"query\",\n        \"useTags\": false\n      }\n    ]\n  },\n  \"time\": {\n    \"from\": \"now-5m\",\n    \"to\": \"now\"\n  },\n  \"timepicker\": {\n    \"refresh_intervals\": [\n      \"5s\",\n      \"10s\",\n      \"30s\",\n      \"1m\",\n      \"5m\",\n      \"15m\",\n      \"30m\",\n      \"1h\",\n      \"2h\",\n      \"1d\"\n    ],\n    \"time_options\": [\n      \"5m\",\n      \"15m\",\n      \"1h\",\n      \"6h\",\n      \"12h\",\n      \"24h\",\n      \"2d\",\n      \"7d\",\n      \"30d\"\n    ]\n  },\n  \"timezone\": \"\",\n  \"title\": \"Postgres exporter\",\n  \"uid\": \"PGExporterDashboard\",\n  \"version\": 9,\n  \"weekStart\": \"\"\n}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/dashboards/ssl-certificate-monitor.json",
    "content": "{\n    \"__inputs\": [\n      {\n        \"name\": \"DS_PROMETHEUS\",\n        \"label\": \"Prometheus\",\n        \"description\": \"\",\n        \"type\": \"datasource\",\n        \"pluginId\": \"prometheus\",\n        \"pluginName\": \"Prometheus\"\n      }\n    ],\n    \"__elements\": {},\n    \"__requires\": [\n      {\n        \"type\": \"grafana\",\n        \"id\": \"grafana\",\n        \"name\": \"Grafana\",\n        \"version\": \"10.1.5\"\n      },\n      {\n        \"type\": \"datasource\",\n        \"id\": \"prometheus\",\n        \"name\": \"Prometheus\",\n        \"version\": \"1.0.0\"\n      },\n      {\n        \"type\": \"panel\",\n        \"id\": \"table\",\n        \"name\": \"Table\",\n        \"version\": \"\"\n      }\n    ],\n    \"annotations\": {\n      \"list\": [\n        {\n          \"builtIn\": 1,\n          \"datasource\": {\n            \"type\": \"datasource\",\n            \"uid\": \"grafana\"\n          },\n          \"enable\": true,\n          \"hide\": true,\n          \"iconColor\": \"rgba(0, 211, 255, 1)\",\n          \"name\": \"Annotations & Alerts\",\n          \"type\": \"dashboard\"\n        }\n      ]\n    },\n    \"description\": \"\",\n    \"editable\": true,\n    \"fiscalYearStartMonth\": 0,\n    \"gnetId\": 13230,\n    \"graphTooltip\": 0,\n    \"id\": null,\n    \"links\": [],\n    \"liveNow\": false,\n    \"panels\": [\n      {\n        \"datasource\": {\n          \"type\": \"prometheus\",\n          \"uid\": \"${DS_PROMETHEUS}\"\n        },\n        \"description\": \"\",\n        \"fieldConfig\": {\n          \"defaults\": {\n            \"custom\": {\n              \"align\": \"auto\",\n              \"cellOptions\": {\n                \"type\": \"auto\"\n              },\n              \"filterable\": false,\n              \"inspect\": false\n            },\n            \"mappings\": [],\n            \"thresholds\": {\n              \"mode\": \"absolute\",\n              \"steps\": [\n                {\n                  \"color\": \"green\",\n                  \"value\": null\n                },\n                {\n                  \"color\": \"red\",\n                  \"value\": 80\n                }\n              ]\n            }\n          },\n          \"overrides\": [\n            {\n              \"matcher\": {\n                \"id\": \"byName\",\n                \"options\": \"instance\"\n              },\n              \"properties\": [\n                {\n                  \"id\": \"custom.width\",\n                  \"value\": 500\n                },\n                {\n                  \"id\": \"displayName\",\n                  \"value\": \"Instance\"\n                }\n              ]\n            },\n            {\n              \"matcher\": {\n                \"id\": \"byName\",\n                \"options\": \"Value #B\"\n              },\n              \"properties\": [\n                {\n                  \"id\": \"custom.cellOptions\",\n                  \"value\": {\n                    \"mode\": \"lcd\",\n                    \"type\": \"gauge\"\n                  }\n                },\n                {\n                  \"id\": \"max\",\n                  \"value\": 0.5\n                },\n                {\n                  \"id\": \"displayName\",\n                  \"value\": \"Connect Time\"\n                },\n                {\n                  \"id\": \"thresholds\",\n                  \"value\": {\n                    \"mode\": \"absolute\",\n                    \"steps\": [\n                      {\n                        \"color\": \"green\",\n                        \"value\": null\n                      },\n                      {\n                        \"color\": \"#EAB839\",\n                        \"value\": 0.2\n                      },\n                      {\n                        \"color\": \"red\",\n                        \"value\": 0.4\n                      }\n                    ]\n                  }\n                }\n              ]\n            },\n            {\n              \"matcher\": {\n                \"id\": \"byName\",\n                \"options\": \"Value #A\"\n              },\n              \"properties\": [\n                {\n                  \"id\": \"decimals\",\n                  \"value\": 2\n                },\n                {\n                  \"id\": \"displayName\",\n                  \"value\": \"Certificate expires in\"\n                },\n                {\n                  \"id\": \"thresholds\",\n                  \"value\": {\n                    \"mode\": \"absolute\",\n                    \"steps\": [\n                      {\n                        \"color\": \"semi-dark-red\",\n                        \"value\": null\n                      },\n                      {\n                        \"color\": \"semi-dark-yellow\",\n                        \"value\": __TLS_EXPIRATION_YELLOW__\n                      },\n                      {\n                        \"color\": \"semi-dark-green\",\n                        \"value\": __TLS_EXPIRATION_GREEN__\n                      }\n                    ]\n                  }\n                },\n                {\n                  \"id\": \"custom.cellOptions\",\n                  \"value\": {\n                    \"mode\": \"gradient\",\n                    \"type\": \"color-background\"\n                  }\n                },\n                {\n                  \"id\": \"custom.width\",\n                  \"value\": 220\n                },\n                {\n                  \"id\": \"custom.align\",\n                  \"value\": \"left\"\n                },\n                {\n                  \"id\": \"unit\",\n                  \"value\": \"dtdurations\"\n                }\n              ]\n            },\n            {\n              \"matcher\": {\n                \"id\": \"byName\",\n                \"options\": \"Value #D\"\n              },\n              \"properties\": [\n                {\n                  \"id\": \"displayName\",\n                  \"value\": \"HTTP Response\"\n                },\n                {\n                  \"id\": \"thresholds\",\n                  \"value\": {\n                    \"mode\": \"absolute\",\n                    \"steps\": [\n                      {\n                        \"color\": \"green\",\n                        \"value\": null\n                      },\n                      {\n                        \"color\": \"#EAB839\",\n                        \"value\": 300\n                      },\n                      {\n                        \"color\": \"red\",\n                        \"value\": 400\n                      }\n                    ]\n                  }\n                },\n                {\n                  \"id\": \"custom.cellOptions\",\n                  \"value\": {\n                    \"mode\": \"gradient\",\n                    \"type\": \"color-background\"\n                  }\n                },\n                {\n                  \"id\": \"custom.align\",\n                  \"value\": \"center\"\n                },\n                {\n                  \"id\": \"custom.width\",\n                  \"value\": 150\n                }\n              ]\n            },\n            {\n              \"matcher\": {\n                \"id\": \"byName\",\n                \"options\": \"Value #C\"\n              },\n              \"properties\": [\n                {\n                  \"id\": \"displayName\",\n                  \"value\": \"Transfer Time\"\n                },\n                {\n                  \"id\": \"max\",\n                  \"value\": 0.5\n                },\n                {\n                  \"id\": \"custom.cellOptions\",\n                  \"value\": {\n                    \"mode\": \"lcd\",\n                    \"type\": \"gauge\"\n                  }\n                },\n                {\n                  \"id\": \"thresholds\",\n                  \"value\": {\n                    \"mode\": \"absolute\",\n                    \"steps\": [\n                      {\n                        \"color\": \"green\",\n                        \"value\": null\n                      },\n                      {\n                        \"color\": \"#EAB839\",\n                        \"value\": 0.125\n                      },\n                      {\n                        \"color\": \"red\",\n                        \"value\": 0.3\n                      }\n                    ]\n                  }\n                }\n              ]\n            },\n            {\n              \"matcher\": {\n                \"id\": \"byName\",\n                \"options\": \"Value #E\"\n              },\n              \"properties\": [\n                {\n                  \"id\": \"displayName\",\n                  \"value\": \"TLS Time\"\n                },\n                {\n                  \"id\": \"custom.cellOptions\",\n                  \"value\": {\n                    \"mode\": \"lcd\",\n                    \"type\": \"gauge\"\n                  }\n                },\n                {\n                  \"id\": \"max\",\n                  \"value\": 1\n                },\n                {\n                  \"id\": \"thresholds\",\n                  \"value\": {\n                    \"mode\": \"absolute\",\n                    \"steps\": [\n                      {\n                        \"color\": \"green\",\n                        \"value\": null\n                      },\n                      {\n                        \"color\": \"#EAB839\",\n                        \"value\": 0.5\n                      },\n                      {\n                        \"color\": \"red\",\n                        \"value\": 0.9\n                      }\n                    ]\n                  }\n                }\n              ]\n            },\n            {\n              \"matcher\": {\n                \"id\": \"byName\",\n                \"options\": \"Value #F\"\n              },\n              \"properties\": [\n                {\n                  \"id\": \"displayName\",\n                  \"value\": \"Processing Time\"\n                },\n                {\n                  \"id\": \"max\",\n                  \"value\": 0.5\n                },\n                {\n                  \"id\": \"custom.cellOptions\",\n                  \"value\": {\n                    \"mode\": \"lcd\",\n                    \"type\": \"gauge\"\n                  }\n                },\n                {\n                  \"id\": \"thresholds\",\n                  \"value\": {\n                    \"mode\": \"absolute\",\n                    \"steps\": [\n                      {\n                        \"color\": \"green\",\n                        \"value\": null\n                      },\n                      {\n                        \"color\": \"#EAB839\",\n                        \"value\": 0.25\n                      },\n                      {\n                        \"color\": \"red\",\n                        \"value\": 0.4\n                      }\n                    ]\n                  }\n                }\n              ]\n            },\n            {\n              \"matcher\": {\n                \"id\": \"byName\",\n                \"options\": \"Value #G\"\n              },\n              \"properties\": [\n                {\n                  \"id\": \"displayName\",\n                  \"value\": \"Resolve Time\"\n                },\n                {\n                  \"id\": \"custom.cellOptions\",\n                  \"value\": {\n                    \"mode\": \"lcd\",\n                    \"type\": \"gauge\"\n                  }\n                },\n                {\n                  \"id\": \"max\",\n                  \"value\": 0.01\n                },\n                {\n                  \"id\": \"thresholds\",\n                  \"value\": {\n                    \"mode\": \"absolute\",\n                    \"steps\": [\n                      {\n                        \"color\": \"green\",\n                        \"value\": null\n                      },\n                      {\n                        \"color\": \"#EAB839\",\n                        \"value\": 0.005\n                      },\n                      {\n                        \"color\": \"red\",\n                        \"value\": 0.009\n                      }\n                    ]\n                  }\n                }\n              ]\n            }\n          ]\n        },\n        \"gridPos\": {\n          \"h\": 22,\n          \"w\": 24,\n          \"x\": 0,\n          \"y\": 0\n        },\n        \"id\": 2,\n        \"options\": {\n          \"cellHeight\": \"sm\",\n          \"footer\": {\n            \"countRows\": false,\n            \"fields\": \"\",\n            \"reducer\": [\n              \"sum\"\n            ],\n            \"show\": false\n          },\n          \"frameIndex\": 1,\n          \"showHeader\": true,\n          \"sortBy\": [\n            {\n              \"desc\": false,\n              \"displayName\": \"Certificate expires in\"\n            }\n          ]\n        },\n        \"pluginVersion\": \"10.1.5\",\n        \"targets\": [\n          {\n            \"datasource\": {\n              \"type\": \"prometheus\",\n              \"uid\": \"${DS_PROMETHEUS}\"\n            },\n            \"expr\": \"probe_ssl_earliest_cert_expiry-time()\",\n            \"format\": \"table\",\n            \"hide\": false,\n            \"instant\": true,\n            \"interval\": \"\",\n            \"legendFormat\": \"\",\n            \"refId\": \"A\"\n          },\n          {\n            \"datasource\": {\n              \"type\": \"prometheus\",\n              \"uid\": \"${DS_PROMETHEUS}\"\n            },\n            \"expr\": \"probe_http_status_code\",\n            \"format\": \"table\",\n            \"instant\": true,\n            \"interval\": \"\",\n            \"legendFormat\": \"\",\n            \"refId\": \"D\"\n          },\n          {\n            \"datasource\": {\n              \"type\": \"prometheus\",\n              \"uid\": \"${DS_PROMETHEUS}\"\n            },\n            \"expr\": \"probe_http_duration_seconds{phase=\\\"resolve\\\"}\",\n            \"format\": \"table\",\n            \"instant\": true,\n            \"interval\": \"\",\n            \"legendFormat\": \"\",\n            \"refId\": \"G\"\n          },\n          {\n            \"datasource\": {\n              \"type\": \"prometheus\",\n              \"uid\": \"${DS_PROMETHEUS}\"\n            },\n            \"expr\": \"probe_http_duration_seconds{phase=\\\"connect\\\"}\",\n            \"format\": \"table\",\n            \"instant\": true,\n            \"interval\": \"\",\n            \"legendFormat\": \"\",\n            \"refId\": \"B\"\n          },\n          {\n            \"datasource\": {\n              \"type\": \"prometheus\",\n              \"uid\": \"${DS_PROMETHEUS}\"\n            },\n            \"expr\": \"probe_http_duration_seconds{phase=\\\"tls\\\"}\",\n            \"format\": \"table\",\n            \"instant\": true,\n            \"interval\": \"\",\n            \"legendFormat\": \"\",\n            \"refId\": \"E\"\n          },\n          {\n            \"datasource\": {\n              \"type\": \"prometheus\",\n              \"uid\": \"${DS_PROMETHEUS}\"\n            },\n            \"expr\": \"probe_http_duration_seconds{phase=\\\"processing\\\"}\",\n            \"format\": \"table\",\n            \"instant\": true,\n            \"interval\": \"\",\n            \"legendFormat\": \"\",\n            \"refId\": \"F\"\n          },\n          {\n            \"datasource\": {\n              \"type\": \"prometheus\",\n              \"uid\": \"${DS_PROMETHEUS}\"\n            },\n            \"expr\": \"probe_http_duration_seconds{phase=\\\"transfer\\\"}\",\n            \"format\": \"table\",\n            \"instant\": true,\n            \"interval\": \"\",\n            \"legendFormat\": \"\",\n            \"refId\": \"C\"\n          }\n        ],\n        \"title\": \"Certificate & Connection Monitoring\",\n        \"transformations\": [\n          {\n            \"id\": \"seriesToColumns\",\n            \"options\": {\n              \"byField\": \"instance\"\n            }\n          },\n          {\n            \"id\": \"organize\",\n            \"options\": {\n              \"excludeByName\": {\n                \"Time\": true,\n                \"Time 1\": true,\n                \"Time 2\": true,\n                \"Time 3\": true,\n                \"Time 4\": true,\n                \"Time 5\": true,\n                \"Time 6\": true,\n                \"Time 7\": true,\n                \"__name__\": true,\n                \"__name__ 1\": true,\n                \"__name__ 2\": true,\n                \"__name__ 3\": true,\n                \"__name__ 4\": true,\n                \"__name__ 5\": true,\n                \"__name__ 6\": true,\n                \"job\": true,\n                \"job 1\": true,\n                \"job 2\": true,\n                \"job 3\": true,\n                \"job 4\": true,\n                \"job 5\": true,\n                \"job 6\": true,\n                \"job 7\": true,\n                \"phase\": true,\n                \"phase 1\": true,\n                \"phase 2\": true,\n                \"phase 3\": true,\n                \"phase 4\": true,\n                \"phase 5\": true\n              },\n              \"indexByName\": {},\n              \"renameByName\": {}\n            }\n          }\n        ],\n        \"type\": \"table\"\n      }\n    ],\n    \"refresh\": \"\",\n    \"schemaVersion\": 38,\n    \"style\": \"dark\",\n    \"tags\": [],\n    \"templating\": {\n      \"list\": []\n    },\n    \"time\": {\n      \"from\": \"now-6h\",\n      \"to\": \"now\"\n    },\n    \"timepicker\": {},\n    \"timezone\": \"\",\n    \"title\": \"SSL Certificate Monitor\",\n    \"uid\": \"r8eWoHpGz\",\n    \"version\": 4,\n    \"weekStart\": \"\"\n  }"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/alloy.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set prom_user = \"__MONITORING_USERNAME__\" %}\n{%- set prom_pass = \"__MONITORING_PASSWORD__\" %}\n{%- set prom_host = \"prometheus.__DOMAIN__\" %}\n{%- set loki_user = \"__MONITORING_USERNAME__\" %}\n{%- set loki_pass = \"__MONITORING_PASSWORD__\" %}\n{%- set loki_host = \"loki.__DOMAIN__\" %}\n\nalloy:\n  enabled: True\n  package: \"alloy\"\n  service: \"alloy\"\n  config_path: \"/etc/alloy/config.alloy\"\n  config_contents: |\n    ////////////////////////////////////////////////////////////////////////\n    // File managed by Salt. Your changes will be overwritten.\n    ////////////////////////////////////////////////////////////////////////\n    logging {\n      level = \"warn\"\n    }\n\n    prometheus.exporter.unix \"default\" {\n      include_exporter_metrics = true\n      disable_collectors       = [\"mdadm\"]\n    }\n\n    prometheus.scrape \"default\" {\n      targets = concat(\n        prometheus.exporter.unix.default.targets,\n        [{\n          // Self-collect metrics\n          job         = \"alloy\",\n          __address__ = \"127.0.0.1:12345\",\n        }],\n      )\n\n      forward_to = [\n        prometheus.remote_write.metrics_service.receiver,\n      ]\n    }\n\n    prometheus.remote_write \"metrics_service\" {\n      endpoint {\n        url = \"https://{{ prom_host }}/api/v1/write\"\n\n        basic_auth {\n          username = \"{{ prom_user }}\"\n          password = \"{{ prom_pass }}\"\n        }\n      }\n    }\n\n    local.file_match \"file_logs\" {\n      path_targets = [\n        {\"__path__\" = \"/var/log/nginx/*.log\"},\n        {\"__path__\" = \"/var/www/arvados-api/shared/log/production.log\"},\n      ]\n      sync_period = \"5s\"\n    }\n\n    loki.source.file \"log_scrape\" {\n      targets    = local.file_match.file_logs.targets\n      forward_to = [loki.write.grafana_loki.receiver]\n      tail_from_end = true\n    }\n\n    loki.source.journal \"journal_logs\" {\n      relabel_rules = loki.relabel.journal.rules\n      forward_to = [loki.write.grafana_loki.receiver]\n      labels = {component = \"loki.source.journal\"}\n    }\n\n    loki.relabel \"journal\" {\n      forward_to = []\n\n      rule {\n        source_labels = [\"__journal__systemd_unit\"]\n        target_label  = \"systemd_unit\"\n      }\n      rule {\n        source_labels = [\"__journal__hostname\"]\n        target_label = \"systemd_hostname\"\n      }\n      rule {\n        source_labels = [\"__journal__transport\"]\n        target_label = \"systemd_transport\"\n      }\n    }\n\n    loki.write \"grafana_loki\" {\n      endpoint {\n        url = \"https://{{ loki_host }}/loki/api/v1/push\"\n\n        basic_auth {\n          username = \"{{ loki_user }}\"\n          password = \"{{ loki_pass }}\"\n        }\n      }\n    }\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/arvados.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set _workers = (\"__CONTROLLER_MAX_WORKERS__\" or grains['num_cpus']*2)|int %}\n{%- set max_workers = [_workers, 8]|max %}\n{%- set max_reqs = (\"__CONTROLLER_MAX_QUEUED_REQUESTS__\" or 128)|int %}\n{%- set max_tunnels = (\"__CONTROLLER_MAX_GATEWAY_TUNNELS__\" or 1000)|int %}\n{%- set database_host = (\"__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__\" or \"__DATABASE_INT_IP__\") %}\n{%- set database_name = \"__DATABASE_NAME__\" %}\n{%- set database_user = \"__DATABASE_USER__\" %}\n{%- set database_password = \"__DATABASE_PASSWORD__\" %}\n\n# The variables commented out are the default values that the formula uses.\n# The uncommented values are REQUIRED values. If you don't set them, running\n# this formula will fail.\narvados:\n  ### GENERAL CONFIG\n  version: '__VERSION__'\n  ## It makes little sense to disable this flag, but you can, if you want :)\n  # use_upstream_repo: true\n\n  ## Repo URL is built with grains values. If desired, it can be completely\n  ## overwritten with the pillar parameter 'repo_url'\n  # repo:\n  #   humanname: Arvados Official Repository\n\n  release: __RELEASE__\n\n  ## IMPORTANT!!!!!\n  ## api, workbench and shell require some gems, so you need to make sure ruby\n  ## and deps are installed in order to install and compile the gems.\n  ## We default to `false` in these two variables as it's expected you already\n  ## manage OS packages with some other tool and you don't want us messing up\n  ## with your setup.\n  ruby:\n    ## We set these to `true` here for testing purposes.\n    ## They both default to `false`.\n    manage_ruby: true\n    manage_gems_deps: true\n    # pkg: ruby\n    # gems_deps:\n    #     - curl\n    #     - g++\n    #     - gcc\n    #     - git\n    #     - libcurl4-gnutls-dev\n    #     - libpq-dev\n    #     - libxml2\n    #     - libxml2-dev\n    #     - make\n    #     - python3-dev\n    #     - ruby-dev\n    #     - zlib1g-dev\n\n  config:\n    check_command: /usr/bin/arvados-server config-check -strict=false -config\n  #   file: /etc/arvados/config.yml\n  #   user: root\n  ## IMPORTANT!!!!!\n  ## If you're intalling any of the rails apps (api, workbench), the group\n  ## should be set to that of the web server, usually `www-data`\n  #   group: root\n  #   mode: 640\n  dispatcher:\n    pkg:\n      name: arvados-dispatch-cloud\n    service:\n      name: arvados-dispatch-cloud\n\n  ### ARVADOS CLUSTER CONFIG\n  cluster:\n    name: __CLUSTER__\n    domain: __DOMAIN__\n\n    database:\n      # max concurrent connections per arvados server daemon\n      # connection_pool_max: 32\n      name: {{ database_name }}\n      host: {{ database_host }}\n      password: {{ database_password }}\n      user: {{ database_user }}\n      encoding: en_US.utf8\n      client_encoding: UTF8\n\n    tls:\n      # certificate: ''\n      # key: ''\n      # required to test with arvados-snakeoil certs\n      insecure: false\n\n    resources:\n      virtual_machines:\n        shell:\n          name: shell.__DOMAIN__\n          backend: __SHELL_INT_IP__\n          port: 4200\n\n    ### TOKENS\n    tokens:\n      system_root: __SYSTEM_ROOT_TOKEN__\n      management: __MANAGEMENT_TOKEN__\n      anonymous_user: __ANONYMOUS_USER_TOKEN__\n\n    ### KEYS\n    secrets:\n      blob_signing_key: __BLOB_SIGNING_KEY__\n      workbench_secret_key: \"deprecated\"\n\n    Login:\n      Test:\n        Enable: true\n        Users:\n          __INITIAL_USER__:\n            Email: __INITIAL_USER_EMAIL__\n            Password: __INITIAL_USER_PASSWORD__\n\n    ### API\n    API:\n      MaxConcurrentRailsRequests: {{ max_workers * 2 }}\n      MaxConcurrentRequests: {{ max_reqs }}\n      MaxQueuedRequests: {{ max_reqs }}\n      MaxGatewayTunnels: {{ max_tunnels }}\n\n    ### CONTAINERS\n    {%- set dispatcher_ssh_privkey = \"__DISPATCHER_SSH_PRIVKEY__\" %}\n    Containers:\n      MaxRetryAttempts: 10\n      CloudVMs:\n        ResourceTags:\n          Name: __CLUSTER__-compute-node\n        BootProbeCommand: 'systemctl is-system-running'\n        ImageID: __COMPUTE_AMI__\n        Driver: ec2\n        DriverParameters:\n          Region: __COMPUTE_AWS_REGION__\n          EBSVolumeType: gp3\n          AdminUsername: __COMPUTE_USER__\n          ### This SG should allow SSH from the dispatcher to the compute nodes\n          SecurityGroupIDs: ['__COMPUTE_SG__']\n          SubnetID: __COMPUTE_SUBNET__\n          IAMInstanceProfile: __CLUSTER__-compute-node-00-iam-role\n      DispatchPrivateKey: {{ dispatcher_ssh_privkey|yaml_dquote }}\n\n    ### VOLUMES\n    ## This should usually match all your `keepstore` instances\n    Volumes:\n      # the volume name will be composed with\n      # <cluster>-nyw5e-<volume>\n      __CLUSTER__-nyw5e-000000000000000:\n        Replication: 2\n        Driver: S3\n        DriverParameters:\n          Bucket: __KEEP_AWS_S3_BUCKET__\n          IAMRole: __KEEP_AWS_IAM_ROLE__\n          Region: __KEEP_AWS_REGION__\n          # IMPORTANT: The default value for PrefixLength is 0, and should not\n          # be changed once the volume is in use. For new installations it's\n          # recommended to set it to 3 for better performance.\n          # See: https://doc.arvados.org/install/configure-s3-object-storage.html\n          PrefixLength: 3\n\n    Users:\n      NewUsersAreActive: true\n      AutoAdminFirstUser: true\n      AutoSetupNewUsers: true\n\n    Services:\n      ContainerWebServices:\n        ExternalURL: 'https://*.containers.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'\n      Controller:\n        ExternalURL: 'https://__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'\n        InternalURLs:\n          'http://localhost:8003': {}\n      DispatchCloud:\n        InternalURLs:\n          'http://__DISPATCHER_INT_IP__:9006': {}\n      Keepbalance:\n        InternalURLs:\n          'http://__KEEPBALANCE_INT_IP__:9005': {}\n      Keepproxy:\n        ExternalURL: 'https://keep.__DOMAIN__:__KEEP_EXT_SSL_PORT__'\n        InternalURLs:\n          'http://localhost:25107': {}\n      Keepstore:\n        InternalURLs:\n          'http://__KEEPSTORE0_INT_IP__:25107': {}\n      RailsAPI:\n        InternalURLs:\n          'http://localhost:8004': {}\n      WebDAV:\n        ExternalURL: 'https://*.collections.__DOMAIN__:__KEEPWEB_EXT_SSL_PORT__/'\n        InternalURLs:\n          'http://__KEEPWEB_INT_IP__:9002': {}\n      WebDAVDownload:\n        ExternalURL: 'https://download.__DOMAIN__:__KEEPWEB_EXT_SSL_PORT__'\n      WebShell:\n        ExternalURL: 'https://webshell.__DOMAIN__:__KEEPWEB_EXT_SSL_PORT__'\n      Websocket:\n        ExternalURL: 'wss://ws.__DOMAIN__/websocket'\n        InternalURLs:\n          'http://localhost:8005': {}\n      Workbench1:\n        ExternalURL: 'https://workbench.__DOMAIN__:__WORKBENCH1_EXT_SSL_PORT__'\n      Workbench2:\n        ExternalURL: 'https://workbench2.__DOMAIN__:__WORKBENCH2_EXT_SSL_PORT__'\n\n    InstanceTypes:\n      t3small:\n        ProviderType: t3.small\n        VCPUs: 2\n        RAM: 2GiB\n        AddedScratch: 50GB\n        Price: 0.0208\n      c5large:\n        ProviderType: c5.large\n        VCPUs: 2\n        RAM: 4GiB\n        AddedScratch: 50GB\n        Price: 0.085\n      m5large:\n        ProviderType: m5.large\n        VCPUs: 2\n        RAM: 8GiB\n        AddedScratch: 50GB\n        Price: 0.096\n      c5xlarge:\n        ProviderType: c5.xlarge\n        VCPUs: 4\n        RAM: 8GiB\n        AddedScratch: 100GB\n        Price: 0.17\n      m5xlarge:\n        ProviderType: m5.xlarge\n        VCPUs: 4\n        RAM: 16GiB\n        AddedScratch: 100GB\n        Price: 0.192\n      m5xlarge_extradisk:\n        ProviderType: m5.xlarge\n        VCPUs: 4\n        RAM: 16GiB\n        AddedScratch: 400GB\n        Price: 0.193\n      c52xlarge:\n        ProviderType: c5.2xlarge\n        VCPUs: 8\n        RAM: 16GiB\n        AddedScratch: 200GB\n        Price: 0.34\n      m52xlarge:\n        ProviderType: m5.2xlarge\n        VCPUs: 8\n        RAM: 32GiB\n        AddedScratch: 200GB\n        Price: 0.384\n      c54xlarge:\n        ProviderType: c5.4xlarge\n        VCPUs: 16\n        RAM: 32GiB\n        AddedScratch: 400GB\n        Price: 0.68\n      m54xlarge:\n        ProviderType: m5.4xlarge\n        VCPUs: 16\n        RAM: 64GiB\n        AddedScratch: 400GB\n        Price: 0.768\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/aws_credentials.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\naws_credentials:\n  region: __LE_AWS_REGION__\n  access_key_id: __LE_AWS_ACCESS_KEY_ID__\n  secret_access_key: __LE_AWS_SECRET_ACCESS_KEY__\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/docker.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ndocker:\n  pkg:\n    docker:\n      use_upstream: package\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/grafana.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set enable_smtp = (\"__GRAFANA_SMTP_SERVER__\" != \"\") %}\n{%- set smtp_server = \"__GRAFANA_SMTP_SERVER__\" %}\n{%- set smtp_user = \"__GRAFANA_SMTP_USER__\" %}\n{%- set smtp_pwd = \"__GRAFANA_SMTP_PASSWORD__\" %}\n{%- set smtp_from = (\"__GRAFANA_SMTP_FROM_EMAIL__\" or \"grafana@__DOMAIN__\") %}\n{%- set smtp_name = (\"__GRAFANA_SMTP_FROM_NAME__\" or \"Grafana __CLUSTER__\") %}\n\ngrafana:\n  pkg:\n    name: grafana\n    use_upstream_archive: false\n    use_upstream_repo: true\n    repo:\n      humanname: grafana_official\n      name: deb https://apt.grafana.com/ stable main\n      file: /etc/apt/sources.list.d/grafana.list\n      key_url: https://apt.grafana.com/gpg.key\n      require_in:\n        - pkg: grafana\n  config:\n    default:\n      instance_name: __DOMAIN__\n    security:\n      admin_user: {{ \"__MONITORING_USERNAME__\" | yaml_dquote }}\n      admin_password: {{ \"__MONITORING_PASSWORD__\" | yaml_dquote }}\n      admin_email: {{ \"__MONITORING_EMAIL__\" | yaml_dquote }}\n    server:\n      protocol: http\n      http_addr: 127.0.0.1\n      http_port: 3000\n      domain: grafana.__DOMAIN__\n      root_url: https://grafana.__DOMAIN__\n{%- if enable_smtp %}\n    smtp:\n      enabled: yes\n      host: {{ smtp_server }}\n  {%- if smtp_user != '' and smtp_pwd != '' %}\n      user: {{ smtp_user | yaml_dquote }}\n      password: {{ smtp_pwd | yaml_dquote }}\n  {%- endif %}\n      from_address: {{ smtp_from | yaml_dquote }}\n      from_name: {{ smtp_name | yaml_dquote }}\n      skip_verify: false\n{%- endif %}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  use_package: true\n  pkgs:\n    - certbot: latest\n    - python3-certbot-dns-route53\n  config:\n    server: https://acme-v02.api.letsencrypt.org/directory\n    email: __INITIAL_USER_EMAIL__\n    authenticator: dns-route53\n    agree-tos: true\n    keep-until-expiring: true\n    expand: true\n    max-log-backups: 0\n    deploy-hook: systemctl reload nginx\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_balancer_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  domainsets:\n    __BALANCER_NODENAME__:\n      - __DOMAIN__\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_controller_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  domainsets:\n    controller.__DOMAIN__:\n      - __DOMAIN__\n      - containers.__DOMAIN__\n      - '*.containers.__DOMAIN__'\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_grafana_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  domainsets:\n    grafana.__DOMAIN__:\n      - grafana.__DOMAIN__\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepproxy_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  domainsets:\n    keepproxy.__DOMAIN__:\n      - keep.__DOMAIN__\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_keepweb_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  domainsets:\n    download.__DOMAIN__:\n      - download.__DOMAIN__\n    collections.__DOMAIN__:\n      - collections.__DOMAIN__\n      - '*.collections.__DOMAIN__'\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_loki_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  domainsets:\n    loki.__DOMAIN__:\n      - loki.__DOMAIN__\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_prometheus_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  domainsets:\n    prometheus.__DOMAIN__:\n      - prometheus.__DOMAIN__\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_webshell_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  domainsets:\n    webshell.__DOMAIN__:\n      - webshell.__DOMAIN__\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_websocket_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  domainsets:\n    websocket.__DOMAIN__:\n      - ws.__DOMAIN__\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench2_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  domainsets:\n    workbench2.__DOMAIN__:\n      - workbench2.__DOMAIN__\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/letsencrypt_workbench_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  domainsets:\n    workbench.__DOMAIN__:\n      - workbench.__DOMAIN__\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/locale.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nlocale:\n  present:\n    - \"en_US.UTF-8 UTF-8\"\n  default:\n    # Note: On debian systems don't write the second 'UTF-8' here or you will\n    # experience salt problems like: LookupError: unknown encoding: utf_8_utf_8\n    # Restart the minion after you corrected this!\n    name: 'en_US.UTF-8'\n    requires: 'en_US.UTF-8 UTF-8'\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/logrotate.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# The logrotate formula checks that an associated service is running.\n# The default it checks is cron, but all the distributions Arvados supports\n# have switched to a systemd timer, so check that instead.\n# Refer to logrotate-formula's documentation for details\n# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst\n\nlogrotate:\n  service: logrotate.timer\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/logrotate_api.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Refer to logrotate-formula's documentation for information about customization\n# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst\n\nlogrotate:\n  jobs:\n    arvados-api:\n      path:\n        - /var/www/arvados-api/shared/log/*.log\n      config:\n        - daily\n        - missingok\n        - rotate 365\n        - compress\n        - nodelaycompress\n        - copytruncate\n        - sharedscripts\n        - postrotate\n        - '  systemctl try-reload-or-restart arvados-railsapi.service'\n        - endscript\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/logrotate_wb1.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Refer to logrotate-formula's documentation for information about customization\n# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst\n\nlogrotate:\n  jobs:\n    arvados-workbench:\n      path:\n        - /var/www/arvados-workbench/shared/log/*.log\n      config:\n        - daily\n        - missingok\n        - rotate 365\n        - compress\n        - nodelaycompress\n        - copytruncate\n        - sharedscripts\n        - postrotate\n        - '  [ -s /run/nginx.pid ] && kill -USR1 `cat /run/nginx.pid`'\n        - endscript\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/loki.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set aws_region = \"__LOKI_AWS_REGION__\" %}\n{%- set aws_s3_bucket = \"__LOKI_AWS_S3_BUCKET__\" %}\n{%- set log_retention = \"__LOKI_LOG_RETENTION_TIME__\" %}\n{%- set data_path = \"/var/lib/loki\" %}\n\nloki:\n  enabled: True\n  package: \"loki\"\n  service: \"loki\"\n  config_path: \"/etc/loki/config.yml\"\n  data_path: {{ data_path }}\n  config_contents: |\n    ########################################################################\n    # File managed by Salt. Your changes will be overwritten.\n    ########################################################################\n    auth_enabled: false\n    server:\n      http_listen_port: 3100\n      grpc_listen_port: 9096\n\n    common:\n      instance_addr: 127.0.0.1\n      path_prefix: {{ data_path }}\n      storage:\n        filesystem:\n          chunks_directory: {{ data_path }}/chunks\n          rules_directory: {{ data_path }}/rules\n      replication_factor: 1\n      ring:\n        kvstore:\n          store: inmemory\n\n    query_range:\n      results_cache:\n        cache:\n          embedded_cache:\n            enabled: true\n            max_size_mb: 100\n\n    storage_config:\n      tsdb_shipper:\n        active_index_directory: {{ data_path }}/index\n        cache_location: {{ data_path }}/index_cache\n        cache_ttl: 24h\n      aws:\n        s3: s3://{{ aws_region }}\n        bucketnames: {{ aws_s3_bucket }}\n\n    schema_config:\n      configs:\n        - from: 2024-01-01\n          store: tsdb\n          object_store: aws\n          schema: v13\n          index:\n            prefix: index_\n            period: 24h\n\n    limits_config:\n      retention_period: {{ log_retention }}\n\n    compactor:\n      working_directory: {{ data_path }}/retention\n      delete_request_store: aws\n      retention_enabled: true\n      compaction_interval: 10m\n      retention_delete_delay: 2h\n      retention_delete_worker_count: 100\n\n    frontend:\n      encoding: protobuf\n\n    analytics:\n      reporting_enabled: false\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set _workers = (\"__CONTROLLER_MAX_WORKERS__\" or grains['num_cpus']*2)|int %}\n{%- set max_workers = [_workers, 8]|max %}\n{%- set max_reqs = (\"__CONTROLLER_MAX_QUEUED_REQUESTS__\" or 128)|int %}\n{%- set max_tunnels = (\"__CONTROLLER_MAX_GATEWAY_TUNNELS__\" or 1000)|int %}\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      worker_processes: {{ max_workers }}\n\n      # Each client request is up to 3 connections (1 with client, 1 proxy to\n      # controller, then potentially 1 from controller back to\n      # passenger).  Each connection consumes a file descriptor.\n      # That's how we get these calculations\n      # (we're multiplying by 5 instead to be on the safe side)\n      worker_rlimit_nofile: {{ (max_reqs + max_tunnels) * 5 + 1 }}\n      events:\n        worker_connections: {{ (max_reqs + max_tunnels) * 5 + 1 }}\n\n  ### SITES\n  servers:\n    managed:\n      # Remove default webserver\n      default:\n        enabled: false\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_api_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### ARVADOS\narvados:\n  config:\n    group: www-data\n\n### NGINX\nnginx:\n  ### SITES\n  servers:\n    managed:\n      arvados_api.conf:\n        enabled: false\n        overwrite: false\n        config:\n          - server:\n            - listen: 'localhost:8004'\n            - server_name: api\n            - root: /var/www/arvados-api/current/public\n            - index:  index.html index.htm\n            - access_log: /var/log/nginx/api.__DOMAIN__-upstream.access.log combined\n            - error_log: /var/log/nginx/api.__DOMAIN__-upstream.error.log\n            - client_max_body_size: 128m\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_balancer_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n{%- set domain = \"__DOMAIN__\" %}\n{%- set balancer_backends = \"__CONTROLLER_NODES__\".split(\",\") %}\n{%- set controller_nr = balancer_backends|length %}\n{%- set disabled_controller = \"__DISABLED_CONTROLLER__\" %}\n{%- set max_reqs = (\"__CONTROLLER_MAX_QUEUED_REQUESTS__\" or 128)|int %}\n{%- set max_tunnels = (\"__CONTROLLER_MAX_GATEWAY_TUNNELS__\" or 1000)|int %}\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      worker_rlimit_nofile: {{ (max_reqs + max_tunnels) * 5 * controller_nr }}\n      events:\n        worker_connections: {{ (max_reqs + max_tunnels) * 5 * controller_nr }}\n      ### STREAMS\n      http:\n        'geo $external_client':\n          default: 1\n          '127.0.0.0/8': 0\n          '__CLUSTER_INT_CIDR__': 0\n        upstream controller_upstream:\n        {%- for backend in balancer_backends %}\n          {%- if disabled_controller == \"\" or not backend.startswith(disabled_controller) %}\n          'server {{ backend }}:80': ''\n          {%- else %}\n          'server {{ backend }}:80 down': ''\n          {% endif %}\n        {%- endfor %}\n\n  ### SNIPPETS\n  snippets:\n    # Based on https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=intermediate&openssl=1.1.1d&guideline=5.4\n    ssl_hardening_default.conf:\n      - ssl_session_timeout: 1d\n      - ssl_session_cache: 'shared:arvadosSSL:10m'\n      - ssl_session_tickets: 'off'\n\n      # intermediate configuration\n      - ssl_protocols: TLSv1.2 TLSv1.3\n      - ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384\n      - ssl_prefer_server_ciphers: 'off'\n\n      # HSTS (ngx_http_headers_module is required) (63072000 seconds)\n      - add_header: 'Strict-Transport-Security \"max-age=63072000\" always'\n\n      # OCSP stapling\n      - ssl_stapling: 'on'\n      - ssl_stapling_verify: 'on'\n\n      # verify chain of trust of OCSP response using Root CA and Intermediate certs\n      # - ssl_trusted_certificate /path/to/root_CA_cert_plus_intermediates\n\n      # curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam\n      # - ssl_dhparam: /path/to/dhparam\n\n      # replace with the IP address of your resolver\n      # - resolver: 127.0.0.1\n\n  ### SITES\n  servers:\n    managed:\n      # Remove default webserver\n      default:\n        enabled: false\n      ### DEFAULT\n      arvados_balancer_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: {{ domain }}\n            - listen:\n              - 80 default\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_balancer_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: {{ domain }}\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://controller_upstream'\n              - proxy_read_timeout: 300\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_set_header: 'X-External-Client $external_client'\n              - proxy_set_header: 'Upgrade $http_upgrade'\n              - proxy_set_header: 'Connection \"upgrade\"'\n              - proxy_max_temp_file_size: 0\n              - proxy_request_buffering: 'off'\n              - proxy_buffering: 'off'\n              - proxy_http_version: '1.1'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/{{ domain }}.access.log combined\n            - error_log: /var/log/nginx/{{ domain }}.error.log\n            - client_max_body_size: 128m\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_collections_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### NGINX\nnginx:\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_collections_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: '~^(.*\\.)?collections\\.__DOMAIN__'\n            - listen:\n              - 80\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      ### COLLECTIONS\n      arvados_collections_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: '~^(.*\\.)?collections\\.__DOMAIN__'\n            - listen:\n              - __KEEPWEB_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://collections_downloads_upstream'\n              - proxy_read_timeout: 90\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_buffering: 'off'\n            - client_max_body_size: 0\n            - proxy_http_version: '1.1'\n            - proxy_request_buffering: 'off'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/collections.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/collections.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_controller_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n{%- set balanced_controller = (\"__ENABLE_BALANCER__\"|to_bool) %}\n{%- set server_name = grains['fqdn'] if balanced_controller else \"__DOMAIN__\" %}\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        'geo $external_client':\n          default: 1\n          '127.0.0.0/8': 0\n          '__CLUSTER_INT_CIDR__': 0\n        upstream controller_upstream:\n          - server: 'localhost:8003  fail_timeout=10s'\n\n  ### SITES\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_controller_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: {{ server_name }} '~^(.*\\.)?containers\\.__DOMAIN__'\n            - listen:\n              - 80 default\n            - location /.well-known:\n              - root: /var/www\n            {%- if balanced_controller %}\n            {%- set balancer_ip = salt['cmd.run'](\"getent hosts __BALANCER_NODENAME__ | awk '{print $1 ; exit}'\", python_shell=True) %}\n            {%- set prometheus_ip = salt['cmd.run'](\"getent hosts __PROMETHEUS_NODENAME__ | awk '{print $1 ; exit}'\", python_shell=True) %}\n            - index: index.html index.htm\n            - location /:\n              - allow: {{ balancer_ip }}\n              - allow: {{ prometheus_ip }}\n              - deny: all\n              - proxy_pass: 'http://controller_upstream'\n              - proxy_read_timeout: 300\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_max_temp_file_size: 0\n              - proxy_request_buffering: 'off'\n              - proxy_buffering: 'off'\n              - proxy_http_version: '1.1'\n            - access_log: /var/log/nginx/{{ server_name }}.access.log combined\n            - error_log: /var/log/nginx/{{ server_name }}.error.log\n            - client_max_body_size: 128m\n            {%- else %}\n            - location /:\n              - return: '301 https://$host$request_uri'\n            {%- endif %}\n\n      {%- if not balanced_controller %}\n      arvados_controller_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: {{ server_name }} '~^(.*\\.)?containers\\.__DOMAIN__'\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://controller_upstream'\n              - proxy_read_timeout: 300\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_set_header: 'X-External-Client $external_client'\n              - proxy_set_header: 'Upgrade $http_upgrade'\n              - proxy_set_header: 'Connection \"upgrade\"'\n              - proxy_max_temp_file_size: 0\n              - proxy_request_buffering: 'off'\n              - proxy_buffering: 'off'\n              - proxy_http_version: '1.1'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/{{ server_name }}.access.log combined\n            - error_log: /var/log/nginx/{{ server_name }}.error.log\n            - client_max_body_size: 128m\n      {%- endif %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_download_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### NGINX\nnginx:\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_download_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: download.__DOMAIN__\n            - listen:\n              - 80\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      ### DOWNLOAD\n      arvados_download_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: download.__DOMAIN__\n            - listen:\n              - __KEEPWEB_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://collections_downloads_upstream'\n              - proxy_read_timeout: 90\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_buffering: 'off'\n            - client_max_body_size: 0\n            - proxy_http_version: '1.1'\n            - proxy_request_buffering: 'off'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/download.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/download.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_grafana_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream grafana_upstream:\n          - server: '127.0.0.1:3000 fail_timeout=10s'\n\n  ### SITES\n  servers:\n    managed:\n      ### GRAFANA\n      grafana:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: grafana.__DOMAIN__\n            - listen:\n              - 80\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      grafana-ssl:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: grafana.__DOMAIN__\n            - listen:\n              - 443 http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://grafana_upstream'\n              - proxy_read_timeout: 300\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            - include: snippets/ssl_hardening_default.conf\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/grafana.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/grafana.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepproxy_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream keepproxy_upstream:\n          - server: 'localhost:25107 fail_timeout=10s'\n\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_keepproxy_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: keep.__DOMAIN__\n            - listen:\n              - 80\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_keepproxy_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: keep.__DOMAIN__\n            - listen:\n              - __KEEP_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://keepproxy_upstream'\n              - proxy_read_timeout: 90\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_buffering: 'off'\n            - client_body_buffer_size: 64M\n            - client_max_body_size: 64M\n            - proxy_http_version: '1.1'\n            - proxy_request_buffering: 'off'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/keepproxy.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/keepproxy.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_keepweb_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Keepweb upstream is common to both downloads and collections\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream collections_downloads_upstream:\n          - server: '__KEEPWEB_INT_IP__:9002 fail_timeout=10s'\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_loki_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream loki_upstream:\n          - server: '127.0.0.1:3100 fail_timeout=10s'\n\n  ### SITES\n  servers:\n    managed:\n      ### LOKI\n      loki:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: loki.__DOMAIN__\n            - listen:\n              - 80\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      loki-ssl:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: loki.__DOMAIN__\n            - listen:\n              - 443 http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://loki_upstream'\n              - proxy_read_timeout: 300\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            - include: snippets/ssl_hardening_default.conf\n            - auth_basic: '\"Restricted Area\"'\n            - auth_basic_user_file: htpasswd\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/loki.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/loki.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_prometheus_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream prometheus_upstream:\n          - server: '127.0.0.1:9090 fail_timeout=10s'\n\n  ### SITES\n  servers:\n    managed:\n      ### PROMETHEUS\n      prometheus:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: prometheus.__DOMAIN__\n            - listen:\n              - 80\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      prometheus-ssl:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: prometheus.__DOMAIN__\n            - listen:\n              - 443 http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://prometheus_upstream'\n              - proxy_read_timeout: 300\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            - include: snippets/ssl_hardening_default.conf\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - auth_basic: '\"Restricted Area\"'\n            - auth_basic_user_file: htpasswd\n            - access_log: /var/log/nginx/prometheus.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/prometheus.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_snippets.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SNIPPETS\n  snippets:\n    # Based on https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=intermediate&openssl=1.1.1d&guideline=5.4\n    ssl_hardening_default.conf:\n      - ssl_session_timeout: 1d\n      - ssl_session_cache: 'shared:arvadosSSL:10m'\n      - ssl_session_tickets: 'off'\n\n      # intermediate configuration\n      - ssl_protocols: TLSv1.2 TLSv1.3\n      - ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384\n      - ssl_prefer_server_ciphers: 'off'\n\n      # HSTS (ngx_http_headers_module is required) (63072000 seconds)\n      - add_header: 'Strict-Transport-Security \"max-age=63072000\" always'\n\n      # OCSP stapling\n      - ssl_stapling: 'on'\n      - ssl_stapling_verify: 'on'\n\n      # verify chain of trust of OCSP response using Root CA and Intermediate certs\n      # - ssl_trusted_certificate /path/to/root_CA_cert_plus_intermediates\n\n      # curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam\n      # - ssl_dhparam: /path/to/dhparam\n\n      # replace with the IP address of your resolver\n      # - resolver: 127.0.0.1\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_webshell_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n\n      ### STREAMS\n      http:\n        upstream webshell_upstream:\n          - server: 'shell.__DOMAIN__:4200 fail_timeout=10s'\n\n  ### SITES\n  servers:\n    managed:\n      arvados_webshell_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: webshell.__DOMAIN__\n            - listen:\n              - 80\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_webshell_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: webshell.__DOMAIN__\n            - listen:\n              - __WEBSHELL_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /shell.__DOMAIN__:\n              - proxy_pass: 'http://webshell_upstream'\n              - proxy_read_timeout: 90\n              - proxy_connect_timeout: 90\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_ssl_session_reuse: 'off'\n\n              - \"if ($request_method = 'OPTIONS')\":\n                - add_header: \"'Access-Control-Allow-Origin' '*'\"\n                - add_header: \"'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'\"\n                - add_header: \"'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'\"\n                - add_header: \"'Access-Control-Max-Age' 1728000\"\n                - add_header: \"'Content-Type' 'text/plain charset=UTF-8'\"\n                - add_header: \"'Content-Length' 0\"\n                - return: 204\n\n              - \"if ($request_method = 'POST')\":\n                - add_header: \"'Access-Control-Allow-Origin' '*'\"\n                - add_header: \"'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'\"\n                - add_header: \"'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'\"\n\n              - \"if ($request_method = 'GET')\":\n                - add_header: \"'Access-Control-Allow-Origin' '*'\"\n                - add_header: \"'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'\"\n                - add_header: \"'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'\"\n\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/webshell.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/webshell.__DOMAIN__.error.log\n\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_websocket_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream websocket_upstream:\n          - server: 'localhost:8005 fail_timeout=10s'\n\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_websocket_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: ws.__DOMAIN__\n            - listen:\n              - 80\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_websocket_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: ws.__DOMAIN__\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://websocket_upstream'\n              - proxy_read_timeout: 600\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: 'Host $host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'Upgrade $http_upgrade'\n              - proxy_set_header: 'Connection \"upgrade\"'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_buffering: 'off'\n            - client_body_buffer_size: 64M\n            - client_max_body_size: 64M\n            - proxy_http_version: '1.1'\n            - proxy_request_buffering: 'off'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/ws.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/ws.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench2_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### ARVADOS\narvados:\n  config:\n    group: www-data\n\n### NGINX\nnginx:\n  ### SITES\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_workbench2_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: workbench2.__DOMAIN__\n            - listen:\n              - 80\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_workbench2_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: workbench2.__DOMAIN__\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n\n            - location /:\n              - return: '301 https://workbench.__DOMAIN__$request_uri'\n\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/workbench2.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/workbench2.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/nginx_workbench_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### ARVADOS\narvados:\n  config:\n    group: www-data\n\n### NGINX\nnginx:\n  ### SITES\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_workbench_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: workbench.__DOMAIN__\n            - listen:\n              - 80\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_workbench_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          # Maps WB1 '/actions?uuid=X' URLs to their equivalent on WB2\n          - 'map $request_uri $actions_redirect':\n            - '~^/actions\\?uuid=(.*-4zz18-.*)': '/collections/$1'\n            - '~^/actions\\?uuid=(.*-j7d0g-.*)': '/projects/$1'\n            - '~^/actions\\?uuid=(.*-tpzed-.*)': '/projects/$1'\n            - '~^/actions\\?uuid=(.*-7fd4e-.*)': '/workflows/$1'\n            - '~^/actions\\?uuid=(.*-xvhdp-.*)': '/processes/$1'\n            - '~^/actions\\?uuid=(.*)': '/'\n            - default: 0\n\n          - server:\n            - server_name: workbench.__DOMAIN__\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n\n    # REDIRECTS FROM WORKBENCH 1 TO WORKBENCH 2\n\n    # Paths that are not redirected because wb1 and wb2 have similar enough paths\n    # that a redirect is pointless and would create a redirect loop.\n    # rewrite ^/api_client_authorizations.* /api_client_authorizations redirect;\n    # rewrite ^/repositories.* /repositories redirect;\n    # rewrite ^/links.* /links redirect;\n    # rewrite ^/projects.* /projects redirect;\n    # rewrite ^/trash /trash redirect;\n\n            # WB1 '/actions?uuid=X' URL Redirects\n            - 'if ($actions_redirect)':\n              - return: '301 $actions_redirect'\n\n    # Redirects that include a uuid\n            - rewrite: '^/work_units/(.*) /processes/$1 redirect'\n            - rewrite: '^/container_requests/(.*) /processes/$1 redirect'\n            - rewrite: '^/users/(.*) /user/$1 redirect'\n            - rewrite: '^/groups/(.*) /group/$1 redirect'\n\n    # Special file download redirects\n            - 'if ($arg_disposition = attachment)':\n              - rewrite: '^/collections/([^/]*)/(.*) /?redirectToDownload=/c=$1/$2? redirect'\n\n            - 'if ($arg_disposition = inline)':\n              - rewrite: '^/collections/([^/]*)/(.*) /?redirectToPreview=/c=$1/$2? redirect'\n\n    # Redirects that go to a roughly equivalent page\n            - rewrite: '^/virtual_machines.* /virtual-machines-admin redirect'\n            - rewrite: '^/users/.*/virtual_machines /virtual-machines-user redirect'\n            - rewrite: '^/authorized_keys.* /ssh-keys-admin redirect'\n            - rewrite: '^/users/.*/ssh_keys /ssh-keys-user redirect'\n            - rewrite: '^/containers.* /all_processes redirect'\n            - rewrite: '^/container_requests /all_processes redirect'\n            - rewrite: '^/job.* /all_processes redirect'\n            - rewrite: '^/users/link_account /link_account redirect'\n            - rewrite: '^/keep_services.* /keep-services redirect'\n            - rewrite: '^/trash_items.* /trash redirect'\n\n    # Redirects that don't have a good mapping and\n    # just go to root.\n            - rewrite: '^/themes.* / redirect'\n            - rewrite: '^/keep_disks.* / redirect'\n            - rewrite: '^/user_agreements.* / redirect'\n            - rewrite: '^/nodes.* / redirect'\n            - rewrite: '^/humans.* / redirect'\n            - rewrite: '^/traits.* / redirect'\n            - rewrite: '^/sessions.* / redirect'\n            - rewrite: '^/logout.* / redirect'\n            - rewrite: '^/logged_out.* / redirect'\n            - rewrite: '^/current_token / redirect'\n            - rewrite: '^/logs.* / redirect'\n            - rewrite: '^/factory_jobs.* / redirect'\n            - rewrite: '^/uploaded_datasets.* / redirect'\n            - rewrite: '^/specimens.* / redirect'\n            - rewrite: '^/pipeline_templates.* / redirect'\n            - rewrite: '^/pipeline_instances.* / redirect'\n\n            - location /:\n              - root: /var/www/arvados-workbench2/workbench2\n              - try_files: '$uri $uri/ /index.html'\n              - 'if (-f $document_root/maintenance.html)':\n                - return: 503\n            - location /config.json:\n              - return: {{ \"200 '\" ~ '{\"API_HOST\":\"__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__\"}' ~ \"'\" }}\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/workbench2.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/workbench2.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/postgresql.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set domain = \"__DOMAIN__\" %}\n{%- set controller_nodes = \"__CONTROLLER_NODES__\".split(\",\") %}\n{%- set pg_client_ipaddrs = [\"__KEEPBALANCE_INT_IP__\",\"__KEEPWEB_INT_IP__\",\"__WEBSOCKET_INT_IP__\"] %}\n{%- set pg_version = \"__DATABASE_POSTGRESQL_VERSION__\" %}\n\n### POSTGRESQL\npostgres:\n  use_upstream_repo: true\n  version: {{ pg_version }}\n  postgresconf: |-\n    listen_addresses = '*'  # listen on all interfaces\n  acls:\n    - ['local', 'all', 'postgres', 'peer']\n    - ['local', 'all', 'all', 'peer']\n    - ['host', 'all', 'all', '127.0.0.1/32', 'md5']\n    - ['host', 'all', 'all', '::1/128', 'md5']\n    - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '127.0.0.1/32']\n    {%- for client_ipaddr in pg_client_ipaddrs | unique | list %}\n    - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '{{ client_ipaddr }}/32']\n    {%- endfor %}\n    {%- for controller_hostname in controller_nodes %}\n    {%- set controller_ip = salt['cmd.run'](\"getent hosts \"+controller_hostname+\" | awk '{print $1 ; exit}'\", python_shell=True) %}\n    - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '{{ controller_ip }}/32']\n    {%- endfor %}\n  users:\n    __CLUSTER___arvados:\n      ensure: present\n      password: \"__DATABASE_PASSWORD__\"\n    prometheus:\n      ensure: present\n  databases:\n    __CLUSTER___arvados:\n      owner: __CLUSTER___arvados\n      template: template0\n      lc_ctype: en_US.utf8\n      lc_collate: en_US.utf8\n      schemas:\n        public:\n          owner: __CLUSTER___arvados\n      extensions:\n        pg_trgm:\n          if_not_exists: true\n          schema: public\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/postgresql_external.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\npostgresql_external_service:\n  db_host: \"__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__\"\n  db_port: 5432\n  db_name: \"__DATABASE_NAME__\"\n  db_user: \"__DATABASE_USER__\"\n  db_password: \"__DATABASE_PASSWORD__\"\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_node_exporter.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n### PROMETHEUS\nprometheus:\n  wanted:\n    component:\n      - node_exporter\n  pkg:\n    use_upstream_repo: true\n    component:\n      node_exporter:\n        service:\n          args:\n            collector.textfile.directory: /var/lib/prometheus/node-exporter\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_pg_exporter.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nprometheus_pg_exporter:\n  enabled: true\n\n### PROMETHEUS\nprometheus:\n  wanted:\n    component:\n      - postgres_exporter\n      - node_exporter\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/prometheus_server.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set controller_nodes = \"__CONTROLLER_NODES__\".split(',') %}\n{%- set enable_balancer = (\"__ENABLE_BALANCER__\"|to_bool) %}\n{%- set data_retention_time = \"__PROMETHEUS_DATA_RETENTION_TIME__\" %}\n\n### PROMETHEUS\nprometheus:\n  wanted:\n    component:\n      - prometheus\n      - alertmanager\n      - node_exporter\n      - blackbox_exporter\n  pkg:\n    use_upstream_repo: false\n    use_upstream_archive: true\n    component:\n      blackbox_exporter:\n        config_file: /etc/prometheus/blackbox_exporter.yml\n        config:\n          modules:\n            http_2xx:\n              prober: http\n              timeout: 5s\n              http:\n                valid_http_versions: [HTTP/1.1, HTTP/2]\n                valid_status_codes: [200]\n                method: GET\n                tls_config:\n                  insecure_skip_verify: true # Avoid failures on self-signed certs\n                fail_if_ssl: false\n                fail_if_not_ssl: true\n            http_2xx_mngmt_token:\n              prober: http\n              timeout: 5s\n              http:\n                valid_http_versions: [HTTP/1.1, HTTP/2]\n                valid_status_codes: [200]\n                method: GET\n                bearer_token: __MANAGEMENT_TOKEN__\n                tls_config:\n                  insecure_skip_verify: true # Avoid failures on self-signed certs\n                fail_if_ssl: false\n                fail_if_not_ssl: true\n            http_2xx_basic_auth:\n              prober: http\n              timeout: 5s\n              http:\n                valid_http_versions: [HTTP/1.1, HTTP/2]\n                valid_status_codes: [200]\n                method: GET\n                basic_auth:\n                  username: \"__MONITORING_USERNAME__\"\n                  password: \"__MONITORING_PASSWORD__\"\n                tls_config:\n                  insecure_skip_verify: true # Avoid failures on self-signed certs\n                fail_if_ssl: false\n                fail_if_not_ssl: true\n      prometheus:\n        service:\n           args:\n             storage.tsdb.retention.time: {{ data_retention_time }}\n        config:\n          global:\n            scrape_interval: 15s\n            evaluation_interval: 15s\n          rule_files:\n            - rules.yml\n\n          scrape_configs:\n            - job_name: prometheus\n              # metrics_path defaults to /metrics\n              # scheme defaults to http.\n              static_configs:\n              - targets: ['localhost:9090']\n                labels:\n                  instance: mon.__CLUSTER__\n                  cluster: __CLUSTER__\n\n            - job_name: http_probe\n              metrics_path: /probe\n              params:\n                module: [http_2xx]\n              static_configs:\n                - targets: ['https://workbench.__DOMAIN__']\n                  labels:\n                    instance: workbench.__CLUSTER__\n                - targets: ['https://workbench2.__DOMAIN__']\n                  labels:\n                    instance: workbench2.__CLUSTER__\n                - targets: ['https://webshell.__DOMAIN__']\n                  labels:\n                    instance: webshell.__CLUSTER__\n              relabel_configs:\n                - source_labels: [__address__]\n                  target_label: __param_target\n                - source_labels: [__param_target]\n                  target_label: instance\n                - target_label: __address__\n                  replacement: 127.0.0.1:9115          # blackbox exporter.\n\n            - job_name: http_probe_mngmt_token\n              metrics_path: /probe\n              params:\n                module: [http_2xx_mngmt_token]\n              static_configs:\n                - targets: ['https://__DOMAIN__/_health/ping']\n                  labels:\n                    instance: controller.__CLUSTER__\n                - targets: ['https://download.__DOMAIN__/_health/ping']\n                  labels:\n                    instance: download.__CLUSTER__\n                - targets: ['https://ws.__DOMAIN__/_health/ping']\n                  labels:\n                    instance: ws.__CLUSTER__\n              relabel_configs:\n                - source_labels: [__address__]\n                  target_label: __param_target\n                - source_labels: [__param_target]\n                  target_label: instance\n                - target_label: __address__\n                  replacement: 127.0.0.1:9115          # blackbox exporter.\n\n            - job_name: http_probe_basic_auth\n              metrics_path: /probe\n              params:\n                module: [http_2xx_basic_auth]\n              static_configs:\n                - targets: ['https://grafana.__DOMAIN__']\n                  labels:\n                    instance: grafana.__CLUSTER__\n                - targets: ['https://prometheus.__DOMAIN__']\n                  labels:\n                    instance: prometheus.__CLUSTER__\n              relabel_configs:\n                - source_labels: [__address__]\n                  target_label: __param_target\n                - source_labels: [__param_target]\n                  target_label: instance\n                - target_label: __address__\n                  replacement: 127.0.0.1:9115          # blackbox exporter.\n\n            ## Arvados unique jobs\n            - job_name: arvados_ws\n              bearer_token: __MANAGEMENT_TOKEN__\n              scheme: https\n              static_configs:\n                - targets: ['ws.__DOMAIN__:443']\n                  labels:\n                    instance: ws.__CLUSTER__\n                    cluster: __CLUSTER__\n            - job_name: arvados_controller\n              bearer_token: __MANAGEMENT_TOKEN__\n              {%- if enable_balancer %}\n              scheme: http\n              {%- else %}\n              scheme: https\n              {%- endif %}\n              static_configs:\n                {%- if enable_balancer %}\n                  {%- for controller in controller_nodes %}\n                - targets: ['{{ controller }}']\n                  labels:\n                    instance: {{ controller.split('.')[0] }}.__CLUSTER__\n                    cluster: __CLUSTER__\n                  {%- endfor %}\n                {%- else %}\n                - targets: ['__DOMAIN__:443']\n                  labels:\n                    instance: controller.__CLUSTER__\n                    cluster: __CLUSTER__\n                {%- endif %}\n            - job_name: keep_web\n              bearer_token: __MANAGEMENT_TOKEN__\n              scheme: https\n              static_configs:\n                - targets: ['keep.__DOMAIN__:443']\n                  labels:\n                    instance: keep-web.__CLUSTER__\n                    cluster: __CLUSTER__\n            - job_name: keep_balance\n              bearer_token: __MANAGEMENT_TOKEN__\n              static_configs:\n                - targets: ['__KEEPBALANCE_INT_IP__:9005']\n                  labels:\n                    instance: keep-balance.__CLUSTER__\n                    cluster: __CLUSTER__\n            - job_name: keepstore\n              bearer_token: __MANAGEMENT_TOKEN__\n              static_configs:\n                - targets: ['__KEEPSTORE0_INT_IP__:25107']\n                  labels:\n                    instance: keep0.__CLUSTER__\n                    cluster: __CLUSTER__\n            - job_name: arvados_dispatch_cloud\n              bearer_token: __MANAGEMENT_TOKEN__\n              static_configs:\n                - targets: ['__DISPATCHER_INT_IP__:9006']\n                  labels:\n                    instance: arvados-dispatch-cloud.__CLUSTER__\n                    cluster: __CLUSTER__\n\n            {%- if \"__DATABASE_INT_IP__\" != \"\" %}\n            # Database\n            - job_name: postgresql\n              static_configs:\n                - targets: [\n                    '__DATABASE_INT_IP__:9187',\n                    '__DATABASE_INT_IP__:3903'\n                  ]\n                  labels:\n                    instance: database.__CLUSTER__\n                    cluster: __CLUSTER__\n            {%- endif %}\n\n            # Nodes\n            {%- set node_list = \"__NODELIST__\".split(',') %}\n            {%- set nodes = [] %}\n            {%- for node in node_list %}\n              {%- set _ = nodes.append(node.split('.')[0]) %}\n            {%- endfor %}\n            - job_name: node\n              static_configs:\n                {% for node in nodes %}\n                - targets: [ \"{{ node }}.__DOMAIN__:9100\" ]\n                  labels:\n                    instance: \"{{ node }}.__CLUSTER__\"\n                    cluster: __CLUSTER__\n                {% endfor %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/pillars/ssl_key_encrypted.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nssl_key_encrypted:\n  enabled: __SSL_KEY_ENCRYPTED__\n  aws_secret_name: __SSL_KEY_AWS_SECRET_NAME__\n  aws_region: __SSL_KEY_AWS_REGION__\n  privkey_password_filename: ssl-privkey-password\n  privkey_password_script: /usr/local/sbin/password_secret_connector.sh\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/alloy_install.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set alloy = pillar.get('alloy', {'enabled': False}) %}\n\n{%- if alloy.enabled %}\nextra_grafana_package_repo:\n  pkgrepo.managed:\n    - humanname: grafana_official\n    - name: deb https://apt.grafana.com/ stable main\n    - file: /etc/apt/sources.list.d/grafana.list\n    - key_url: https://apt.grafana.com/gpg.key\n\nextra_install_alloy:\n  pkg.installed:\n    - name: {{ alloy.package }}\n    - refresh: true\n    - require:\n      - pkgrepo: extra_grafana_package_repo\n\nextra_alloy_config:\n  file.managed:\n    - name: {{ alloy.config_path }}\n    - contents: {{ alloy.config_contents | yaml_dquote }}\n    - mode: '0640'\n    - user: alloy\n    - group: root\n    - require:\n      - pkg: extra_install_alloy\n\nextra_alloy_service:\n  service.running:\n    - name: {{ alloy.service }}\n    - enable: true\n    - require:\n      - pkg: extra_install_alloy\n      - file: extra_alloy_config\n    - watch:\n      - file: extra_alloy_config\n{%- endif %}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/aws_credentials.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set aws_credentials = pillar.get('aws_credentials', {}) %}\n\n{%- if aws_credentials %}\nextra_extra_aws_credentials_root_aws_config_file_managed:\n  file.managed:\n    - name: /root/.aws/config\n    - makedirs: true\n    - user: root\n    - group: root\n    - mode: '0600'\n    - replace: false\n    - contents: |\n        [default]\n        region= {{ aws_credentials.region }}\n\nextra_extra_aws_credentials_root_aws_credentials_file_managed:\n  file.managed:\n    - name: /root/.aws/credentials\n    - makedirs: true\n    - user: root\n    - group: root\n    - mode: '0600'\n    - replace: false\n    - contents: |\n        [default]\n        aws_access_key_id = {{ aws_credentials.access_key_id }}\n        aws_secret_access_key = {{ aws_credentials.secret_access_key }}\n{%- endif %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/custom_certs.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set orig_cert_dir = salt['pillar.get']('extra_custom_certs_dir', '/srv/salt/certs')  %}\n{%- set dest_cert_dir = '/etc/nginx/ssl' %}\n{%- set certs = salt['pillar.get']('extra_custom_certs', [])  %}\n\n{% if certs %}\nextra_custom_certs_file_directory_certs_dir:\n  file.directory:\n    - name: /etc/nginx/ssl\n    - user: root\n    - group: root\n    - dir_mode: 0750\n    - file_mode: 0640\n    - require:\n      - pkg: nginx_install\n    - recurse:\n      - user\n      - group\n      - mode\n\n  {%- for cert in certs %}\n    {%- set cert_file = 'arvados-' ~ cert ~ '.pem' %}\n    {%- set key_file = 'arvados-' ~ cert ~ '.key' %}\nextra_custom_certs_{{ cert }}_cert_file_copy:\n  file.copy:\n    - name: {{ dest_cert_dir }}/{{ cert_file }}\n    - source: {{ orig_cert_dir }}/{{ cert_file }}\n    - force: true\n    - user: root\n    - group: root\n    - mode: 0640\n    - unless: cmp {{ dest_cert_dir }}/{{ cert_file }} {{ orig_cert_dir }}/{{ cert_file }}\n    - require:\n      - file: extra_custom_certs_file_directory_certs_dir\n    - watch_in:\n      - service: extra_nginx_service_reload_on_certs_changes\n\nextra_custom_certs_{{ cert }}_key_file_copy:\n  file.copy:\n    - name: {{ dest_cert_dir }}/{{ key_file }}\n    - source: {{ orig_cert_dir }}/{{ key_file }}\n    - force: true\n    - user: root\n    - group: root\n    - mode: 0640\n    - unless: cmp {{ dest_cert_dir }}/{{ key_file }} {{ orig_cert_dir }}/{{ key_file }}\n    - require:\n      - file: extra_custom_certs_file_directory_certs_dir\n    - watch_in:\n      - service: extra_nginx_service_reload_on_certs_changes\n  {%- endfor %}\n\nextra_nginx_service_reload_on_certs_changes:\n  service.running:\n    - name: nginx\n    - enable: True\n    - reload: True\n{%- endif %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/grafana_admin_user.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set grafana_server = salt['pillar.get']('grafana', {}) %}\n\n{%- if grafana_server %}\nextra_grafana_admin_user:\n  cmd.run:\n    - name: grafana-cli admin reset-admin-password {{ grafana_server.config.security.admin_password }}\n    - require:\n      - service: grafana-service-running-service-running\n{%- endif %}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/grafana_dashboards.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set grafana_server = salt['pillar.get']('grafana', {}) %}\n{%- set grafana_dashboards_orig_dir = '/srv/salt/dashboards' %}\n{%- set grafana_dashboards_dest_dir = '/var/lib/grafana/dashboards' %}\n\n{%- if grafana_server %}\nextra_grafana_dashboard_directory:\n  file.directory:\n    - name: {{ grafana_dashboards_dest_dir }}\n    - require:\n      - pkg: grafana-package-install-pkg-installed\n\nextra_grafana_dashboard_default_yaml:\n  file.managed:\n    - name: /etc/grafana/provisioning/dashboards/default.yaml\n    - contents: |\n        apiVersion: 1\n        providers:\n          - name: 'General'\n            folder: 'Arvados Cluster'\n            type: file\n            options:\n              path: {{ grafana_dashboards_dest_dir }}\n    - require:\n      - pkg: grafana-package-install-pkg-installed\n      - file: extra_grafana_dashboard_directory\n\nextra_grafana_dashboard_files:\n  file.copy:\n    - name: {{ grafana_dashboards_dest_dir }}\n    - source: {{ grafana_dashboards_orig_dir }}\n    - force: true\n    - recurse: true\n    - require:\n      - file: extra_grafana_dashboard_default_yaml\n\nextra_grafana_dashboards_service_restart:\n  cmd.run:\n    - name: systemctl restart grafana-server\n    - require:\n      - file: extra_grafana_dashboard_default_yaml\n    - onchanges:\n      - file: extra_grafana_dashboard_default_yaml\n      - file: extra_grafana_dashboard_files\n{%- endif %}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/grafana_datasource.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set grafana_server = salt['pillar.get']('grafana', {}) %}\n\n{%- if grafana_server %}\nextra_grafana_datasource_prometheus:\n  file.managed:\n    - name: /etc/grafana/provisioning/datasources/prometheus.yaml\n    - contents: |\n        apiVersion: 1\n        datasources:\n          - name: Prometheus\n            type: prometheus\n            uid: ArvadosPromDataSource\n            url: http://127.0.0.1:9090\n            is_default: true\n    - require:\n      - pkg: grafana-package-install-pkg-installed\n\nextra_grafana_datasource_loki:\n  file.managed:\n    - name: /etc/grafana/provisioning/datasources/loki.yaml\n    - contents: |\n        apiVersion: 1\n        datasources:\n          - name: Loki\n            type: loki\n            uid: ArvadosLokiDataSource\n            url: http://127.0.0.1:3100\n    - require:\n      - pkg: grafana-package-install-pkg-installed\n\n  cmd.run:\n    - name: systemctl restart grafana-server\n    - require:\n      - file: extra_grafana_datasource_prometheus\n      - file: extra_grafana_datasource_loki\n    - onchanges:\n      - file: extra_grafana_datasource_prometheus\n      - file: extra_grafana_datasource_loki\n{%- endif %}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/host_entries.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- set tpldir = curr_tpldir %}\n\n#CRUDE, but functional\n\n{%- if \"__DATABASE_INT_IP__\" != \"\" %}\nextra_extra_hosts_entries_etc_hosts_database_host_present:\n  host.present:\n    - ip: __DATABASE_INT_IP__\n    - names:\n      - db.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n      - database.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n{%- endif %}\n\nextra_extra_hosts_entries_etc_hosts_api_host_present:\n  host.present:\n    - ip: __CONTROLLER_INT_IP__\n    - names:\n      - {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n\nextra_extra_hosts_entries_etc_hosts_websocket_host_present:\n  host.present:\n    - ip: __CONTROLLER_INT_IP__\n    - names:\n      - ws.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n\nextra_extra_hosts_entries_etc_hosts_workbench_host_present:\n  host.present:\n    - ip: __WORKBENCH1_INT_IP__\n    - names:\n      - workbench.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n\nextra_extra_hosts_entries_etc_hosts_workbench2_host_present:\n  host.present:\n    - ip: __WORKBENCH1_INT_IP__\n    - names:\n      - workbench2.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n\nextra_extra_hosts_entries_etc_hosts_keepproxy_host_present:\n  host.present:\n    - ip: __KEEP_INT_IP__\n    - names:\n      - keep.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n\nextra_extra_hosts_entries_etc_hosts_keepweb_host_present:\n  host.present:\n    - ip: __KEEP_INT_IP__\n    - names:\n      - download.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n      - collections.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n\nextra_extra_hosts_entries_etc_hosts_webshell_host_present:\n  host.present:\n    - ip: __WEBSHELL_INT_IP__\n    - names:\n      - webshell.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n\nextra_extra_hosts_entries_etc_hosts_shell_host_present:\n  host.present:\n    - ip: __SHELL_INT_IP__\n    - names:\n      - shell.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n\nextra_extra_hosts_entries_etc_hosts_keep0_host_present:\n  host.present:\n    - ip: __KEEPSTORE0_INT_IP__\n    - names:\n      - keep0.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/loki_install.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set loki = pillar.get('loki', {'enabled': False}) %}\n\n{%- if loki.enabled %}\nextra_install_loki:\n  pkg.installed:\n    - name: {{ loki['package'] }}\n    - refresh: true\n    - require:\n      - pkgrepo: grafana-package-repo-install-pkgrepo-managed\n\nextra_loki_config:\n  file.managed:\n    - name: {{ loki['config_path'] }}\n    - contents: {{ loki['config_contents'] | yaml_dquote }}\n    - mode: '0640'\n    - user: loki\n    - group: root\n    - require:\n      - pkg: extra_install_loki\n\nextra_loki_data_dir:\n  file.directory:\n    - name: {{ loki['data_path'] }}\n    - user: loki\n    - mode: '0750'\n    - require:\n      - pkg: extra_install_loki\n\nextra_loki_service:\n  service.running:\n    - name: {{ loki['service'] }}\n    - enable: true\n    - require:\n      - pkg: extra_install_loki\n      - file: extra_loki_config\n      - file: extra_loki_data_dir\n    - watch:\n      - file: extra_loki_config\n{%- endif %}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/nginx_prometheus_configuration.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- if salt['pillar.get']('nginx:servers:managed:prometheus-ssl') %}\n\nextra_nginx_prometheus_conf_user___MONITORING_USERNAME__:\n  webutil.user_exists:\n    - name: __MONITORING_USERNAME__\n    - password: {{ \"__MONITORING_PASSWORD__\" | yaml_dquote }}\n    - htpasswd_file: /etc/nginx/htpasswd\n    - options: d\n    - force: true\n    - require:\n      - pkg: extra_nginx_prometheus_conf_pkgs\n      - pkg: nginx_install\n\nextra_nginx_prometheus_conf_pkgs:\n  pkg.installed:\n    - name: apache2-utils\n\n{%- endif %}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/passenger_rvm.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- if grains.os_family in ('RedHat',) %}\n  {%- set group = 'nginx' %}\n{%- else %}\n  {%- set group = 'www-data' %}\n{%- endif %}\n\n# Make sure that /var/www/.passenger exists with the proper ownership\n# so that passenger can build passenger_native_support.so\nextra_var_www_passenger:\n  file.directory:\n    - name: /var/www/.passenger\n    - user: {{ group }}\n    - group: {{ group }}\n    - mode: '0755'\n    - makedirs: True\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/postgresql_external.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set pg_svc = pillar.get('postgresql_external_service', {}) %}\n\n{%- if pg_svc %}\n__CLUSTER___external_trgm_extension:\n  postgres_extension.present:\n    - name: pg_trgm\n    - if_not_exists: true\n    - schema: public\n    - db_host: {{ pg_svc.db_host }}\n    - db_port: 5432\n    - db_user: {{ pg_svc.db_user }}\n    - db_password: {{ pg_svc.db_password }}\n    - require:\n      - pkg: postgresql-client-libs\n{%- endif %}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/prometheus_pg_exporter.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set prometheus_pg_exporter = pillar.get('prometheus_pg_exporter', {'enabled': False}) %}\n\n{%- if prometheus_pg_exporter.enabled %}\n### PACKAGES\nmonitoring_required_pkgs:\n  pkg.installed:\n    - name: mtail\n\n### FILES\nprometheus_pg_exporter_etc_default:\n  file.managed:\n    - name: /etc/default/prometheus-postgres-exporter\n    - contents: |\n        ### This file managed by Salt, do not edit by hand!!\n        #\n        # For details, check /usr/share/doc/prometheus-postgres-exporter/README.Debian\n        DATA_SOURCE_NAME='user=prometheus host=/run/postgresql dbname=postgres'\n    - require:\n      - pkg: prometheus-package-install-postgres_exporter-installed\n\nmtail_postgresql_conf:\n  file.managed:\n    - name: /etc/mtail/postgresql.mtail\n    - contents: |\n        ########################################################################\n        # File managed by Salt.\n        # Your changes will be overwritten.\n        ########################################################################\n\n        # Parser for postgresql's log statement duration\n\n        gauge postgresql_statement_duration_seconds by statement\n\n        /^/ +\n        /(?P<timestamp>\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2} (\\w+)) / + # 2019-01-16 16:53:45 GMT\n        /LOG: +duration: / +\n        /(?P<duration>[0-9\\.]+) ms/ + # 153.967 ms\n        /(.*?): (?P<statement>.+)/ + # statement: SELECT COUNT(*) FROM (SELECT rolname FROM pg_roles WHERE rolname='arvados') count\n        /$/ {\n          strptime($timestamp, \"2006-01-02 15:04:05 MST\") # for tests\n\n          postgresql_statement_duration_seconds[$statement] = $duration / 1000\n        }\n    - require:\n      - pkg: monitoring_required_pkgs\n\nmtail_etc_default:\n  file.managed:\n    - name: /etc/default/mtail\n    - contents: |\n        ### This file managed by Salt, do not edit by hand!!\n        #\n        ENABLED=true\n        # List of files to monitor (mandatory).\n        LOGS=/var/log/postgresql/postgresql*log\n    - require:\n      - pkg: monitoring_required_pkgs\n\n### SERVICES\nprometheus_pg_exporter_service:\n  service.running:\n    - name: prometheus-postgres-exporter\n    - enable: true\n    - require:\n      - pkg: prometheus-package-install-postgres_exporter-installed\n    - watch:\n      - file: /etc/default/prometheus-postgres-exporter\n\nmtail_service:\n  service.running:\n    - name: mtail\n    - enable: true\n    - require:\n      - pkg: monitoring_required_pkgs\n    - watch:\n      - file: mtail_postgresql_conf\n      - file: mtail_etc_default\n{%- endif %}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/railsapi_passenger_configs.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set _workers = (\"__CONTROLLER_MAX_WORKERS__\" or grains['num_cpus']*2)|int %}\n{%- set max_workers = [_workers, 8]|max %}\n\n{%- if salt['pillar.get']('nginx:servers:managed:arvados_controller_default.conf') %}\n\n# Make the passenger queue small (twice the concurrency, so\n# there's at most one pending request for each busy worker)\n# because controller reorders requests based on priority, and\n# won't send more than API.MaxConcurrentRailsRequests to passenger\n# (which is max_workers * 2), so things that are moved to the head\n# of the line get processed quickly.\nextra_railsapi_passenger_configs:\n  file.managed:\n    - name: /etc/systemd/system/arvados-railsapi.service.d/26-installer.conf\n    - contents: |\n        ### This file managed by Salt, do not edit by hand!!\n        [Service]\n        Environment=PASSENGER_MAX_POOL_SIZE={{ max_workers }}\n        Environment=PASSENGER_MAX_REQUEST_QUEUE_SIZE={{ max_workers * 2 + 1 }}\n    - user: root\n    - group: root\n    - mode: '0644'\n    - makedirs: True\n    - require_in:\n      - service: arvados-api-service-running-service-running\n    - watch_in:\n      - cmd: extra_systemd_daemon_reload\n      - service: arvados-api-service-running-service-running\n\nextra_systemd_daemon_reload:\n  cmd.run:\n    - name: systemctl daemon-reload\n\n\n{%- endif %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/shell_cron_add_login_sync.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# This state tries to query the controller using the parameters set in\n# the `arvados.cluster.resources.virtual_machines` pillar, to get the\n# ARVADOS_VIRTUAL_MACHINE_UUID for the host and configure the arvados login-sync cron\n# as described in https://doc.arvados.org/main/install/install-shell-server.html\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- from \"arvados/libtofs.jinja\" import files_switch with context %}\n{%- set tpldir = curr_tpldir %}\n\n{%- set virtual_machines = arvados.cluster.resources.virtual_machines | default({}) %}\n{%- set api_token = arvados.cluster.tokens.system_root | yaml_encode %}\n{%- set api_host = arvados.cluster.Services.Controller.ExternalURL | regex_replace('^http(s?)://', '', ignorecase=true) %}\n\nextra_shell_cron_add_login_sync_add_jq_pkg_installed:\n  pkg.installed:\n    - name: jq\n\n{%- for vm, vm_params in virtual_machines.items() %}\n  {%- set vm_name = vm_params.name | default(vm) %}\n\n  # Check if any of the specified virtual_machines parameters corresponds to this instance\n  # It should be an error if we get more than one occurrence\n  {%- if vm_name in [grains['id'], grains['host'], grains['fqdn'], grains['nodename']] or\n         vm_params.backend in [grains['id'], grains['host'], grains['fqdn'], grains['nodename']] +\n                               grains['ipv4'] + grains['ipv6'] %}\n\n    # We need to query the VM UUID\n    {%- set cmd_query_vm_uuid = 'arv --short virtual_machine list' ~\n                                ' --filters \\'[[\"hostname\", \"=\", \"' ~ vm_name ~ '\"]]\\''\n    %}\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_get_vm_uuid_cmd_run:\n  cmd.run:\n    - env:\n      - ARVADOS_API_TOKEN: {{ api_token }}\n      - ARVADOS_API_HOST: {{ api_host }}\n      - ARVADOS_API_HOST_INSECURE: {{ arvados.cluster.tls.insecure | default(false) }}\n    - name: {{ cmd_query_vm_uuid }} | head -1 | tee /tmp/vm_uuid_{{ vm }}\n    - unless:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n    - require:\n      - gem: arvados-shell-package-install-gem-arvados-cli-installed\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_arvados_api_host_cron_env_present:\n  cron.env_present:\n    - name: ARVADOS_API_HOST\n    - value: {{ api_host }}\n    - onlyif:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_arvados_api_token_cron_env_present:\n  cron.env_present:\n    - name: ARVADOS_API_TOKEN\n    - value: {{ api_token }}\n    - onlyif:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_arvados_api_host_insecure_cron_env_present:\n  cron.env_present:\n    - name: ARVADOS_API_HOST_INSECURE\n    - value: {{ arvados.cluster.tls.insecure | default(false) }}\n    - onlyif:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_arvados_virtual_machine_uuid_cron_env_present:\n  cron.env_present:\n    - name: ARVADOS_VIRTUAL_MACHINE_UUID\n    - value: __slot__:salt:cmd.run(\"cat /tmp/vm_uuid_{{ vm }}\")\n    - onlyif:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_sbin_to_path_cron_env_present:\n  cron.env_present:\n    - name: PATH\n    - value: \"/bin:/usr/bin:/usr/sbin\"\n    - onlyif:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_arvados_login_sync_cron_present:\n  cron.present:\n    - name: /usr/local/bin/arvados-login-sync\n    - minute: '*/2'\n    - onlyif:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n\n  {%- endif %}\n{%- endfor %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/shell_sudo_passwordless.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- set tpldir = curr_tpldir %}\n\nextra_shell_sudo_passwordless_sudo_pkg_installed:\n  pkg.installed:\n    - name: sudo\n\nextra_shell_sudo_passwordless_config_file_managed:\n  file.managed:\n    - name: /etc/sudoers.d/arvados_passwordless\n    - makedirs: true\n    - user: root\n    - group: root\n    - mode: '0440'\n    - replace: false\n    - contents: |\n        # This file managed by Salt, do not edit by hand!!\n        # Allow members of group sudo to execute any command without password\n        %sudo ALL=(ALL:ALL) NOPASSWD:ALL\n    - require:\n      - pkg: extra_shell_sudo_passwordless_sudo_pkg_installed\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/ssl_key_encrypted.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set ssl_key_encrypted = pillar.get('ssl_key_encrypted', {'enabled': False}) %}\n\n{%- if ssl_key_encrypted.enabled %}\n\nextra_ssl_key_encrypted_required_pkgs:\n  pkg.installed:\n    - name: jq\n    - name: awscli\n\nextra_ssl_key_encrypted_password_retrieval_script:\n  file.managed:\n    - name: {{ ssl_key_encrypted.privkey_password_script }}\n    - user: root\n    - group: root\n    - mode: '0750'\n    - require:\n      - pkg: extra_ssl_key_encrypted_required_pkgs\n    - contents: |\n        #!/bin/bash\n\n        # RUNTIME_DIRECTORY is provided by systemd.\n        # NOTE: We assume systemd's set up in a way that there's just one\n        # runtime dir for this particular unit, otherwise this variable could\n        # contain multiple paths separated by a colon.\n        PASSWORD_FILE=\"${RUNTIME_DIRECTORY}/{{ ssl_key_encrypted.privkey_password_filename }}\"\n\n        while [ true ]; do\n          # AWS_SHARED_CREDENTIALS_FILE is set to /dev/null to avoid AWS's CLI\n          # loading invalid credentials on nodes who use ~/.aws/credentials for other\n          # purposes (e.g.: the dispatcher credentials)\n          # Access to the secrets manager is given by using an instance profile.\n          AWS_SHARED_CREDENTIALS_FILE=/dev/null aws secretsmanager get-secret-value --secret-id '{{ ssl_key_encrypted.aws_secret_name }}' --region '{{ ssl_key_encrypted.aws_region }}' | jq -r .SecretString > \"${PASSWORD_FILE}\"\n          sleep 1\n        done\n\nextra_ssl_key_encrypted_password_retrieval_service_unit:\n  file.managed:\n    - name: /etc/systemd/system/password_secret_connector.service\n    - user: root\n    - group: root\n    - mode: '0644'\n    - require:\n      - file: extra_ssl_key_encrypted_password_retrieval_script\n    - contents: |\n        [Unit]\n        Description=Arvados SSL private key password retrieval service\n        After=network.target\n        [Service]\n        # WARNING: the script below assumes that RuntimeDirectory only holds one\n        # path value, won't work with multiple paths.\n        RuntimeDirectory=arvados\n        ExecStartPre=/usr/bin/mkfifo --mode=0600 {{ ('%t/arvados/' ~ ssl_key_encrypted.privkey_password_filename) | yaml_dquote }}\n        ExecStart=/bin/bash {{ ssl_key_encrypted.privkey_password_script | yaml_dquote }}\n        [Install]\n        WantedBy=multi-user.target\n\nextra_ssl_key_encrypted_password_retrieval_service:\n  service.running:\n    - name: password_secret_connector\n    - enable: true\n    - require:\n      - file: extra_ssl_key_encrypted_password_retrieval_service_unit\n    - watch:\n      - file: extra_ssl_key_encrypted_password_retrieval_service_unit\n      - file: extra_ssl_key_encrypted_password_retrieval_script\n\n{%- endif %}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/states/workbench1_uninstall.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- set tpldir = curr_tpldir %}\n\nworkbench1_pkg_removed:\n  pkg.removed:\n    - name: {{ arvados.workbench.pkg.name }}"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/tofs/arvados/shell/config/files/default/shell-pam-shellinabox.tmpl.jinja",
    "content": "{#\n##########################################################\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n#}\n########################################################################\n# File managed by Salt at <{{ source }}>.\n# Your changes will be overwritten.\n########################################################################\nauth       optional   pam_faildelay.so  delay=3000000\nauth [success=ok new_authtok_reqd=ok ignore=ignore user_unknown=bad default=die] pam_securetty.so\nauth       requisite  pam_nologin.so\nsession [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close\nsession       required   pam_env.so readenv=1\nsession       required   pam_env.so readenv=1 envfile=/etc/default/locale\n\n# yamllint disable rule:line-length\nauth [success=1 default=ignore] /usr/lib/pam_arvados.so {{ arvados.cluster.domain }} shell.{{ arvados.cluster.domain }}\n# yamllint enable rule:line-length\nauth    requisite            pam_deny.so\nauth    required            pam_permit.so\n\nauth       optional   pam_group.so\nsession    required   pam_limits.so\nsession    optional   pam_lastlog.so\nsession    optional   pam_motd.so  motd=/run/motd.dynamic\nsession    optional   pam_motd.so\nsession    optional   pam_mail.so standard\n\n@include common-account\n@include common-session\n@include common-password\n\nsession [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open\n"
  },
  {
    "path": "tools/salt-install/config_examples/multi_host/aws/tofs/arvados/shell/config/files/default/shell-shellinabox.tmpl.jinja",
    "content": "{#\n##########################################################\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n#}\n########################################################################\n# File managed by Salt at <{{ source }}>.\n# Your changes will be overwritten.\n########################################################################\n# Should shellinaboxd start automatically\nSHELLINABOX_DAEMON_START=1\n# TCP port that shellinboxd's webserver listens on\nSHELLINABOX_PORT={{ arvados.shell.shellinabox.service.port }}\n# SSL is disabled because it is terminated in Nginx. Adjust as needed.\nSHELLINABOX_ARGS=\"--disable-ssl --no-beep --service=/shell.{{ arvados.cluster.domain }}:AUTH:HOME:SHELL\"\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/README.md",
    "content": "Single host with multiple hostnames\n===================================\n\nThese files let you setup Arvados on a single host using different hostnames\nfor each of its components nginx's virtualhosts.\n\nThe hostnames are composed after the variables \"CLUSTER\" and \"DOMAIN\" set in\nthe `local.params` file.\n\nThe virtual hosts' hostnames that will be used are:\n\n* CLUSTER.DOMAIN\n* collections.CLUSTER.DOMAIN\n* download.CLUSTER.DOMAIN\n* keep.CLUSTER.DOMAIN\n* keep0.CLUSTER.DOMAIN\n* webshell.CLUSTER.DOMAIN\n* workbench.CLUSTER.DOMAIN\n* workbench2.CLUSTER.DOMAIN\n* ws.CLUSTER.DOMAIN\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/arvados.sls",
    "content": "# -*- coding: utf-8 -*-\n# vim: ft=yaml\n---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set database_host = (\"__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__\" or \"127.0.0.1\") %}\n{%- set database_name = \"__DATABASE_NAME__\" %}\n{%- set database_user = \"__DATABASE_USER__\" %}\n{%- set database_password = \"__DATABASE_PASSWORD__\" %}\n\n# The variables commented out are the default values that the formula uses.\n# The uncommented values are REQUIRED values. If you don't set them, running\n# this formula will fail.\narvados:\n  ### GENERAL CONFIG\n  version: '__VERSION__'\n  ## It makes little sense to disable this flag, but you can, if you want :)\n  # use_upstream_repo: true\n\n  ## Repo URL is built with grains values. If desired, it can be completely\n  ## overwritten with the pillar parameter 'repo_url'\n  # repo:\n  #   humanname: Arvados Official Repository\n\n  release: __RELEASE__\n\n  ## IMPORTANT!!!!!\n  ## api, workbench and shell require some gems, so you need to make sure ruby\n  ## and deps are installed in order to install and compile the gems.\n  ## We default to `false` in these two variables as it's expected you already\n  ## manage OS packages with some other tool and you don't want us messing up\n  ## with your setup.\n  ruby:\n    ## We set these to `true` here for testing purposes.\n    ## They both default to `false`.\n    manage_ruby: true\n    manage_gems_deps: true\n    # pkg: ruby\n    # gems_deps:\n    #     - curl\n    #     - g++\n    #     - gcc\n    #     - git\n    #     - libcurl4-gnutls-dev\n    #     - libpq-dev\n    #     - libxml2\n    #     - libxml2-dev\n    #     - make\n    #     - python3-dev\n    #     - ruby-dev\n    #     - zlib1g-dev\n\n  config:\n    check_command: /usr/bin/arvados-server config-check -strict=false -config\n  #   file: /etc/arvados/config.yml\n  #   user: root\n  ## IMPORTANT!!!!!\n  ## If you're intalling any of the rails apps (api, workbench), the group\n  ## should be set to that of the web server, usually `www-data`\n  #   group: root\n  #   mode: 640\n\n  ### ARVADOS CLUSTER CONFIG\n  cluster:\n    name: __CLUSTER__\n    domain: __DOMAIN__\n\n    database:\n      # max concurrent connections per arvados server daemon\n      # connection_pool_max: 32\n      name: {{ database_name }}\n      host: {{ database_host }}\n      password: {{ database_password }}\n      user: {{ database_user }}\n      extra_conn_params:\n        client_encoding: UTF8\n\n    tls:\n      # certificate: ''\n      # key: ''\n      # When using arvados-snakeoil certs set insecure: true\n      insecure: false\n\n    resources:\n      virtual_machines:\n        shell:\n          name: webshell\n          backend: 127.0.0.1\n          port: 4200\n\n    ### TOKENS\n    tokens:\n      system_root: __SYSTEM_ROOT_TOKEN__\n      management: __MANAGEMENT_TOKEN__\n      anonymous_user: __ANONYMOUS_USER_TOKEN__\n\n    ### KEYS\n    secrets:\n      blob_signing_key: __BLOB_SIGNING_KEY__\n      workbench_secret_key: \"deprecated\"\n\n    Login:\n      Test:\n        Enable: true\n        Users:\n          __INITIAL_USER__:\n            Email: __INITIAL_USER_EMAIL__\n            Password: __INITIAL_USER_PASSWORD__\n\n    ### VOLUMES\n    ## This should usually match all your `keepstore` instances\n    Volumes:\n      # the volume name will be composed with\n      # <cluster>-nyw5e-<volume>\n      __CLUSTER__-nyw5e-000000000000000:\n        AccessViaHosts:\n          'http://keep0.__CLUSTER__.__DOMAIN__:25107':\n            ReadOnly: false\n        Replication: 2\n        Driver: Directory\n        DriverParameters:\n          Root: /var/lib/arvados/keep\n\n    Users:\n      NewUsersAreActive: true\n      AutoAdminFirstUser: true\n      AutoSetupNewUsers: true\n\n    Services:\n      Controller:\n        ExternalURL: 'https://__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'\n        InternalURLs:\n          'http://controller.internal:8003': {}\n      DispatchCloud:\n        InternalURLs:\n          'http://__CLUSTER__.__DOMAIN__:9006': {}\n      Keepbalance:\n        InternalURLs:\n          'http://localhost:9005': {}\n      Keepproxy:\n        ExternalURL: 'https://keep.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'\n        InternalURLs:\n          'http://keep.internal:25100': {}\n      Keepstore:\n        InternalURLs:\n          'http://keep0.__CLUSTER__.__DOMAIN__:25107': {}\n      RailsAPI:\n        InternalURLs:\n          'http://api.internal:8004': {}\n      WebDAV:\n        ExternalURL: 'https://collections.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'\n        InternalURLs:\n          'http://collections.internal:9002': {}\n      WebDAVDownload:\n        ExternalURL: 'https://download.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'\n      WebShell:\n        ExternalURL: 'https://webshell.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'\n      Websocket:\n        ExternalURL: 'wss://ws.__CLUSTER__.__DOMAIN__/websocket'\n        InternalURLs:\n          'http://ws.internal:8005': {}\n      Workbench1:\n        ExternalURL: 'https://workbench.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'\n      Workbench2:\n        ExternalURL: 'https://workbench2.__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__'\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/docker.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ndocker:\n  pkg:\n    docker:\n      use_upstream: package\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/locale.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nlocale:\n  present:\n    - \"en_US.UTF-8 UTF-8\"\n  default:\n    # Note: On debian systems don't write the second 'UTF-8' here or you will\n    # experience salt problems like: LookupError: unknown encoding: utf_8_utf_8\n    # Restart the minion after you corrected this!\n    name: 'en_US.UTF-8'\n    requires: 'en_US.UTF-8 UTF-8'\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/logrotate.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# The logrotate formula checks that an associated service is running.\n# The default it checks is cron, but all the distributions Arvados supports\n# have switched to a systemd timer, so check that instead.\n# Refer to logrotate-formula's documentation for details\n# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst\n\nlogrotate:\n  service: logrotate.timer\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/logrotate_api.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Refer to logrotate-formula's documentation for information about customization\n# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst\n\nlogrotate:\n  jobs:\n    arvados-api:\n      path:\n        - /var/www/arvados-api/shared/log/*.log\n      config:\n        - daily\n        - missingok\n        - rotate 365\n        - compress\n        - nodelaycompress\n        - copytruncate\n        - sharedscripts\n        - postrotate\n        - '  systemctl try-reload-or-restart arvados-railsapi.service'\n        - endscript\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/logrotate_wb1.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Refer to logrotate-formula's documentation for information about customization\n# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst\n\nlogrotate:\n  jobs:\n    arvados-workbench:\n      path:\n        - /var/www/arvados-workbench/shared/log/*.log\n      config:\n        - daily\n        - missingok\n        - rotate 365\n        - compress\n        - nodelaycompress\n        - copytruncate\n        - sharedscripts\n        - postrotate\n        - '  [ -s /run/nginx.pid ] && kill -USR1 `cat /run/nginx.pid`'\n        - endscript\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      env: GEM_HOME\n      worker_processes: 4\n\n  ### SNIPPETS\n  snippets:\n    # Based on https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=intermediate&openssl=1.1.1d&guideline=5.4\n    ssl_hardening_default.conf:\n      - ssl_session_timeout: 1d\n      - ssl_session_cache: 'shared:arvadosSSL:10m'\n      - ssl_session_tickets: 'off'\n\n      # intermediate configuration\n      - ssl_protocols: TLSv1.2 TLSv1.3\n      - ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384\n      - ssl_prefer_server_ciphers: 'off'\n\n      # HSTS (ngx_http_headers_module is required) (63072000 seconds)\n      - add_header: 'Strict-Transport-Security \"max-age=63072000\" always'\n\n      # OCSP stapling\n      # NOTE! Stapling does not work with self-signed certificates, so disabling for tests\n      # - ssl_stapling: 'on'\n      # - ssl_stapling_verify: 'on'\n\n      # verify chain of trust of OCSP response using Root CA and Intermediate certs\n      # - ssl_trusted_certificate /path/to/root_CA_cert_plus_intermediates\n\n      # curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam\n      # - ssl_dhparam: /path/to/dhparam\n\n      # replace with the IP address of your resolver\n      # - resolver: 127.0.0.1\n\n  ### SITES\n  servers:\n    managed:\n      # Remove default webserver\n      default:\n        enabled: false\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_api_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- if grains.os_family in ('RedHat',) %}\n  {%- set group = 'nginx' %}\n{%- else %}\n  {%- set group = 'www-data' %}\n{%- endif %}\n\n### ARVADOS\narvados:\n  config:\n    group: {{ group }}\n\n### NGINX\nnginx:\n  ### SITES\n  servers:\n    managed:\n      arvados_api.conf:\n        enabled: false\n        overwrite: false\n        config:\n          - server:\n            - listen: 'api.internal:8004'\n            - server_name: api\n            - root: /var/www/arvados-api/current/public\n            - index:  index.html index.htm\n            - access_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.access.log combined\n            - error_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.error.log\n            - client_max_body_size: 128m\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_controller_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        'geo $external_client':\n          default: 1\n          '127.0.0.0/8': 0\n        upstream controller_upstream:\n          - server: 'controller.internal:8003  fail_timeout=10s'\n\n  ### SITES\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_controller_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: __CLUSTER__.__DOMAIN__\n            - listen:\n              - 80 default\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_controller_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: __CLUSTER__.__DOMAIN__\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://controller_upstream'\n              - proxy_read_timeout: 300\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_set_header: 'X-External-Client $external_client'\n              - proxy_set_header: 'Upgrade $http_upgrade'\n              - proxy_set_header: 'Connection \"upgrade\"'\n              - proxy_max_temp_file_size: 0\n              - proxy_request_buffering: 'off'\n              - proxy_buffering: 'off'\n              - proxy_http_version: '1.1'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            - access_log: /var/log/nginx/controller.__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/controller.__CLUSTER__.__DOMAIN__.error.log\n            - client_max_body_size: 128m\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepproxy_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream keepproxy_upstream:\n          - server: 'keep.internal:25100 fail_timeout=10s'\n\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_keepproxy_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: keep.__CLUSTER__.__DOMAIN__\n            - listen:\n              - 80\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_keepproxy_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          file: extra_custom_certs_keepproxy_cert_file_copy\n        config:\n          - server:\n            - server_name: keep.__CLUSTER__.__DOMAIN__\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://keepproxy_upstream'\n              - proxy_read_timeout: 90\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_buffering: 'off'\n            - client_body_buffer_size: 64M\n            - client_max_body_size: 64M\n            - proxy_http_version: '1.1'\n            - proxy_request_buffering: 'off'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: /etc/nginx/ssl/arvados-keepproxy.pem\n            - ssl_certificate_key: /etc/nginx/ssl/arvados-keepproxy.key\n            - access_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_keepweb_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream collections_downloads_upstream:\n          - server: 'collections.internal:9002 fail_timeout=10s'\n\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_collections_download_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: collections.__CLUSTER__.__DOMAIN__ download.__CLUSTER__.__DOMAIN__\n            - listen:\n              - 80\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      ### COLLECTIONS / DOWNLOAD\n      {%- for vh in [\n        'collections',\n        'download'\n        ]\n      %}\n      arvados_{{ vh }}.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          file: extra_custom_certs_{{ vh }}_cert_file_copy\n        config:\n          - server:\n            - server_name: {{ vh }}.__CLUSTER__.__DOMAIN__\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://collections_downloads_upstream'\n              - proxy_read_timeout: 90\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_buffering: 'off'\n            - client_max_body_size: 0\n            - proxy_http_version: '1.1'\n            - proxy_request_buffering: 'off'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: /etc/nginx/ssl/arvados-{{ vh }}.pem\n            - ssl_certificate_key: /etc/nginx/ssl/arvados-{{ vh }}.key\n            - access_log: /var/log/nginx/{{ vh }}.__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/{{ vh }}.__CLUSTER__.__DOMAIN__.error.log\n      {%- endfor %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_webshell_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# This parameter will be used here to generate a list of upstreams and vhosts.\n# This dict is here for convenience and should be managed some other way, but the\n# different ways of orchestration that can be used for this are outside the scope\n# of this formula and their examples.\n# These upstreams should match those defined in `arvados:cluster:resources:virtual_machines`\n{% set webshell_virtual_machines = {\n  'shell': {\n    'name': 'webshell',\n    'backend': '127.0.1.1',\n    'port': 4200,\n  }\n}\n%}\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n\n      ### STREAMS\n      http:\n        {%- for vm, params in webshell_virtual_machines.items() %}\n          {%- set vm_name = params.name | default(vm) %}\n          {%- set vm_backend = params.backend | default(vm_name) %}\n          {%- set vm_port = params.port | default(4200) %}\n\n        upstream {{ vm_name }}_upstream:\n          - server: '{{ vm_backend }}:{{ vm_port }} fail_timeout=10s'\n\n        {%- endfor %}\n\n  ### SITES\n  servers:\n    managed:\n      arvados_webshell_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: webshell.__CLUSTER__.__DOMAIN__\n            - listen:\n              - 80\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_webshell_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          file: extra_custom_certs_webshell_cert_file_copy\n        config:\n          - server:\n            - server_name: webshell.__CLUSTER__.__DOMAIN__\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            {%- for vm, params in webshell_virtual_machines.items() %}\n              {%- set vm_name = params.name | default(vm) %}\n            - location /{{ vm_name }}:\n              - proxy_pass: 'http://{{ vm_name }}_upstream'\n              - proxy_read_timeout: 90\n              - proxy_connect_timeout: 90\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_ssl_session_reuse: 'off'\n\n              - \"if ($request_method = 'OPTIONS')\":\n                - add_header: \"'Access-Control-Allow-Origin' '*'\"\n                - add_header: \"'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'\"\n                - add_header: \"'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'\"\n                - add_header: \"'Access-Control-Max-Age' 1728000\"\n                - add_header: \"'Content-Type' 'text/plain charset=UTF-8'\"\n                - add_header: \"'Content-Length' 0\"\n                - return: 204\n\n              - \"if ($request_method = 'POST')\":\n                - add_header: \"'Access-Control-Allow-Origin' '*'\"\n                - add_header: \"'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'\"\n                - add_header: \"'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'\"\n\n              - \"if ($request_method = 'GET')\":\n                - add_header: \"'Access-Control-Allow-Origin' '*'\"\n                - add_header: \"'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'\"\n                - add_header: \"'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'\"\n            {%- endfor %}\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: /etc/nginx/ssl/arvados-webshell.pem\n            - ssl_certificate_key: /etc/nginx/ssl/arvados-webshell.key\n            - access_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.error.log\n\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_websocket_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream websocket_upstream:\n          - server: 'ws.internal:8005 fail_timeout=10s'\n\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_websocket_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: ws.__CLUSTER__.__DOMAIN__\n            - listen:\n              - 80\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_websocket_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          file: extra_custom_certs_websocket_cert_file_copy\n        config:\n          - server:\n            - server_name: ws.__CLUSTER__.__DOMAIN__\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://websocket_upstream'\n              - proxy_read_timeout: 600\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: 'Host $host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'Upgrade $http_upgrade'\n              - proxy_set_header: 'Connection \"upgrade\"'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_buffering: 'off'\n            - client_body_buffer_size: 64M\n            - client_max_body_size: 64M\n            - proxy_http_version: '1.1'\n            - proxy_request_buffering: 'off'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: /etc/nginx/ssl/arvados-websocket.pem\n            - ssl_certificate_key: /etc/nginx/ssl/arvados-websocket.key\n            - access_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench2_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### ARVADOS\narvados:\n  config:\n    group: www-data\n\n### NGINX\nnginx:\n  ### SITES\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_workbench2_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: workbench2.__DOMAIN__\n            - listen:\n              - 80\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_workbench2_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: workbench2.__DOMAIN__\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n\n            - location /:\n              - return: '301 https://workbench.__DOMAIN__$request_uri'\n\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/workbench2.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/workbench2.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/nginx_workbench_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- import_yaml \"ssl_key_encrypted.sls\" as ssl_key_encrypted_pillar %}\n\n### ARVADOS\narvados:\n  config:\n    group: www-data\n\n### NGINX\nnginx:\n  ### SITES\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_workbench_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: workbench.__DOMAIN__\n            - listen:\n              - 80\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_workbench_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          # Maps WB1 '/actions?uuid=X' URLs to their equivalent on WB2\n          - 'map $request_uri $actions_redirect':\n            - '~^/actions\\?uuid=(.*-4zz18-.*)': '/collections/$1'\n            - '~^/actions\\?uuid=(.*-j7d0g-.*)': '/projects/$1'\n            - '~^/actions\\?uuid=(.*-tpzed-.*)': '/projects/$1'\n            - '~^/actions\\?uuid=(.*-7fd4e-.*)': '/workflows/$1'\n            - '~^/actions\\?uuid=(.*-xvhdp-.*)': '/processes/$1'\n            - '~^/actions\\?uuid=(.*)': '/'\n            - default: 0\n\n          - server:\n            - server_name: workbench.__DOMAIN__\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n\n    # REDIRECTS FROM WORKBENCH 1 TO WORKBENCH 2\n\n    # Paths that are not redirected because wb1 and wb2 have similar enough paths\n    # that a redirect is pointless and would create a redirect loop.\n    # rewrite ^/api_client_authorizations.* /api_client_authorizations redirect;\n    # rewrite ^/repositories.* /repositories redirect;\n    # rewrite ^/links.* /links redirect;\n    # rewrite ^/projects.* /projects redirect;\n    # rewrite ^/trash /trash redirect;\n\n            # WB1 '/actions?uuid=X' URL Redirects\n            - 'if ($actions_redirect)':\n              - return: '301 $actions_redirect'\n\n    # Redirects that include a uuid\n            - rewrite: '^/work_units/(.*) /processes/$1 redirect'\n            - rewrite: '^/container_requests/(.*) /processes/$1 redirect'\n            - rewrite: '^/users/(.*) /user/$1 redirect'\n            - rewrite: '^/groups/(.*) /group/$1 redirect'\n\n    # Special file download redirects\n            - 'if ($arg_disposition = attachment)':\n              - rewrite: '^/collections/([^/]*)/(.*) /?redirectToDownload=/c=$1/$2? redirect'\n\n            - 'if ($arg_disposition = inline)':\n              - rewrite: '^/collections/([^/]*)/(.*) /?redirectToPreview=/c=$1/$2? redirect'\n\n    # Redirects that go to a roughly equivalent page\n            - rewrite: '^/virtual_machines.* /virtual-machines-admin redirect'\n            - rewrite: '^/users/.*/virtual_machines /virtual-machines-user redirect'\n            - rewrite: '^/authorized_keys.* /ssh-keys-admin redirect'\n            - rewrite: '^/users/.*/ssh_keys /ssh-keys-user redirect'\n            - rewrite: '^/containers.* /all_processes redirect'\n            - rewrite: '^/container_requests /all_processes redirect'\n            - rewrite: '^/job.* /all_processes redirect'\n            - rewrite: '^/users/link_account /link_account redirect'\n            - rewrite: '^/keep_services.* /keep-services redirect'\n            - rewrite: '^/trash_items.* /trash redirect'\n\n    # Redirects that don't have a good mapping and\n    # just go to root.\n            - rewrite: '^/themes.* / redirect'\n            - rewrite: '^/keep_disks.* / redirect'\n            - rewrite: '^/user_agreements.* / redirect'\n            - rewrite: '^/nodes.* / redirect'\n            - rewrite: '^/humans.* / redirect'\n            - rewrite: '^/traits.* / redirect'\n            - rewrite: '^/sessions.* / redirect'\n            - rewrite: '^/logout.* / redirect'\n            - rewrite: '^/logged_out.* / redirect'\n            - rewrite: '^/current_token / redirect'\n            - rewrite: '^/logs.* / redirect'\n            - rewrite: '^/factory_jobs.* / redirect'\n            - rewrite: '^/uploaded_datasets.* / redirect'\n            - rewrite: '^/specimens.* / redirect'\n            - rewrite: '^/pipeline_templates.* / redirect'\n            - rewrite: '^/pipeline_instances.* / redirect'\n\n            - location /:\n              - root: /var/www/arvados-workbench2/workbench2\n              - try_files: '$uri $uri/ /index.html'\n              - 'if (-f $document_root/maintenance.html)':\n                - return: 503\n            - location /config.json:\n              - return: {{ \"200 '\" ~ '{\"API_HOST\":\"__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__\"}' ~ \"'\" }}\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            {%- if ssl_key_encrypted_pillar.ssl_key_encrypted.enabled %}\n            - ssl_password_file: {{ '/run/arvados/' | path_join(ssl_key_encrypted_pillar.ssl_key_encrypted.privkey_password_filename) }}\n            {%- endif %}\n            - access_log: /var/log/nginx/workbench2.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/workbench2.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/pillars/postgresql.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### POSTGRESQL\npostgres:\n  use_upstream_repo: false\n  postgresconf: |-\n    listen_addresses = '*'  # listen on all interfaces\n    #ssl = on\n    #ssl_cert_file = '/etc/ssl/certs/arvados-snakeoil-cert.pem'\n    #ssl_key_file = '/etc/ssl/private/arvados-snakeoil-cert.key'\n  acls:\n    - ['local', 'all', 'postgres', 'peer']\n    - ['local', 'all', 'all', 'peer']\n    - ['host', 'all', 'all', '127.0.0.1/32', 'md5']\n    - ['host', 'all', 'all', '::1/128', 'md5']\n    - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '127.0.0.1/32']\n  users:\n    __CLUSTER___arvados:\n      ensure: present\n      password: \"__DATABASE_PASSWORD__\"\n\n  # tablespaces:\n  #   arvados_tablespace:\n  #     directory: /path/to/some/tbspace/arvados_tbsp\n  #     owner: arvados\n\n  databases:\n    __CLUSTER___arvados:\n      owner: __CLUSTER___arvados\n      template: template0\n      lc_ctype: en_US.utf8\n      lc_collate: en_US.utf8\n      # tablespace: arvados_tablespace\n      schemas:\n        public:\n          owner: __CLUSTER___arvados\n      extensions:\n        pg_trgm:\n          if_not_exists: true\n          schema: public\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/states/custom_certs.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set orig_cert_dir = salt['pillar.get']('extra_custom_certs_dir', '/srv/salt/certs')  %}\n{%- set dest_cert_dir = '/etc/nginx/ssl' %}\n{%- set certs = salt['pillar.get']('extra_custom_certs', [])  %}\n\n{% if certs %}\nextra_custom_certs_file_directory_certs_dir:\n  file.directory:\n    - name: /etc/nginx/ssl\n    - require:\n      - pkg: nginx_install\n\n  {%- for cert in certs %}\n    {%- set cert_file = 'arvados-' ~ cert ~ '.pem' %}\n    {%- set key_file = 'arvados-' ~ cert ~ '.key' %}\nextra_custom_certs_{{ cert }}_cert_file_copy:\n  file.copy:\n    - name: {{ dest_cert_dir }}/{{ cert_file }}\n    - source: {{ orig_cert_dir }}/{{ cert_file }}\n    - force: true\n    - user: root\n    - group: root\n    - mode: 0640\n    - unless: cmp {{ dest_cert_dir }}/{{ cert_file }} {{ orig_cert_dir }}/{{ cert_file }}\n    - require:\n      - file: extra_custom_certs_file_directory_certs_dir\n    - watch_in:\n      - service: extra_nginx_service_reload_on_certs_changes\n\nextra_custom_certs_{{ cert }}_key_file_copy:\n  file.copy:\n    - name: {{ dest_cert_dir }}/{{ key_file }}\n    - source: {{ orig_cert_dir }}/{{ key_file }}\n    - force: true\n    - user: root\n    - group: root\n    - mode: 0640\n    - unless: cmp {{ dest_cert_dir }}/{{ key_file }} {{ orig_cert_dir }}/{{ key_file }}\n    - require:\n      - file: extra_custom_certs_file_directory_certs_dir\n    - watch_in:\n      - service: extra_nginx_service_reload_on_certs_changes\n  {%- endfor %}\n\nextra_nginx_service_reload_on_certs_changes:\n  service.running:\n    - name: nginx\n    - enable: True\n    - reload: True\n{%- endif %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/states/dns.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ndns:\n  pkg.installed:\n    - pkgs:\n      - dnsmasq\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/states/host_entries.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- set tpldir = curr_tpldir %}\n\narvados_test_salt_states_examples_single_host_etc_hosts_host_present:\n  host.present:\n    - ip: 127.0.1.1\n    - names:\n      - {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n      # NOTE! This just works for our testings.\n      # Won't work if the cluster name != host name\n      {%- for entry in [\n          'api',\n          'collections',\n          'controller',\n          'download',\n          'keep',\n          'keepweb',\n          'keep0',\n          'shell',\n          'workbench',\n          'workbench2',\n          'ws',\n        ]\n      %}\n      - {{ entry }}\n      - {{ entry }}.internal\n      - {{ entry }}.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n      {%- endfor %}\n    - require_in:\n      - file: nginx_config\n      - service: nginx_service\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/states/keep_volume.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nvar_lib_arvados_keep_dir:\n  file.directory:\n    - name: /var/lib/arvados/keep\n    - user: root\n    - group: root\n    - mode: '0770'\n    - makedirs: true\n    - require_in:\n      - pkg: keepstore\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/states/passenger_rvm.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- if grains.os_family in ('RedHat',) %}\n  {%- set group = 'nginx' %}\n{%- else %}\n  {%- set group = 'www-data' %}\n{%- endif %}\n\n# Make sure that /var/www/.passenger exists with the proper ownership\n# so that passenger can build passenger_native_support.so\nextra_var_www_passenger:\n  file.directory:\n    - name: /var/www/.passenger\n    - user: {{ group }}\n    - group: {{ group }}\n    - mode: '0755'\n    - makedirs: True\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/states/railsapi_passenger_configs.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set _workers = (\"__CONTROLLER_MAX_WORKERS__\" or grains['num_cpus']*2)|int %}\n{%- set max_workers = [_workers, 8]|max %}\n\n{%- if salt['pillar.get']('nginx:servers:managed:arvados_controller_default.conf') %}\n\n# Make the passenger queue small (twice the concurrency, so\n# there's at most one pending request for each busy worker)\n# because controller reorders requests based on priority, and\n# won't send more than API.MaxConcurrentRailsRequests to passenger\n# (which is max_workers * 2), so things that are moved to the head\n# of the line get processed quickly.\nextra_railsapi_passenger_configs:\n  file.managed:\n    - name: /etc/systemd/system/arvados-railsapi.service.d/26-installer.conf\n    - contents: |\n        ### This file managed by Salt, do not edit by hand!!\n        [Service]\n        Environment=PASSENGER_MAX_POOL_SIZE={{ max_workers }}\n        Environment=PASSENGER_MAX_REQUEST_QUEUE_SIZE={{ max_workers * 2 + 1 }}\n    - user: root\n    - group: root\n    - mode: '0644'\n    - makedirs: True\n    - require_in:\n      - service: arvados-api-service-running-service-running\n    - watch_in:\n      - cmd: extra_systemd_daemon_reload\n      - service: arvados-api-service-running-service-running\n\nextra_systemd_daemon_reload:\n  cmd.run:\n    - name: systemctl daemon-reload\n\n\n{%- endif %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/states/snakeoil_certs.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# WARNING: This file is only used for testing purposes, and should not be used\n# in a production environment\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- set tpldir = curr_tpldir %}\n\n{%- set orig_cert_dir = salt['pillar.get']('extra_custom_certs_dir', '/srv/salt/certs')  %}\n\ninclude:\n  - nginx.config\n  - nginx.service\n\n# Debian uses different dirs for certs and keys, but being a Snake Oil example,\n# we'll keep it simple here.\n{%- set arvados_ca_cert_file = '/etc/ssl/private/arvados-snakeoil-ca.pem' %}\n{%- set arvados_ca_key_file = '/etc/ssl/private/arvados-snakeoil-ca.key' %}\n\n{%- if grains.get('os_family') == 'Debian' %}\n  {%- set arvados_ca_cert_dest = '/usr/local/share/ca-certificates/arvados-snakeoil-ca.crt' %}\n  {%- set update_ca_cert = '/usr/sbin/update-ca-certificates' %}\n  {%- set openssl_conf = '/etc/ssl/openssl.cnf' %}\n\nextra_snakeoil_certs_ssl_cert_pkg_installed:\n  pkg.installed:\n    - name: ssl-cert\n    - require_in:\n      - sls: postgres\n\n{%- else %}\n  {%- set arvados_ca_cert_dest = '/etc/pki/ca-trust/source/anchors/arvados-snakeoil-ca.pem' %}\n  {%- set update_ca_cert = '/usr/bin/update-ca-trust' %}\n  {%- set openssl_conf = '/etc/pki/tls/openssl.cnf' %}\n\n{%- endif %}\n\nextra_snakeoil_certs_dependencies_pkg_installed:\n  pkg.installed:\n    - pkgs:\n      - openssl\n      - ca-certificates\n\nextra_snakeoil_certs_arvados_snakeoil_ca_cmd_run:\n  # Taken from https://github.com/arvados/arvados/blob/3.1.2/tools/arvbox/lib/arvbox/docker/service/certificate/run\n  cmd.run:\n    - name: |\n        # These dirs are not too CentOS-ish, but this is a helper script\n        # and they should be enough\n        /bin/bash -c \"mkdir -p /etc/ssl/certs/ /etc/ssl/private/ && \\\n        openssl req \\\n          -new \\\n          -nodes \\\n          -sha256 \\\n          -x509 \\\n          -subj \\\"/C=CC/ST=Some State/O=Arvados Formula/OU=arvados-formula/CN=snakeoil-ca-{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\\\" \\\n          -extensions x509_ext \\\n          -config <(cat {{ openssl_conf }} \\\n                  <(printf \\\"\\n[x509_ext]\\nbasicConstraints=critical,CA:true,pathlen:0\\nkeyUsage=critical,keyCertSign,cRLSign\\\")) \\\n          -out {{ arvados_ca_cert_file }} \\\n          -keyout {{ arvados_ca_key_file }} \\\n          -days 365 && \\\n        cp {{ arvados_ca_cert_file }} {{ arvados_ca_cert_dest }} && \\\n        {{ update_ca_cert }}\"\n    - unless:\n      - test -f {{ arvados_ca_cert_file }}\n      - openssl verify -CAfile {{ arvados_ca_cert_file }} {{ arvados_ca_cert_file }}\n    - require:\n      - pkg: extra_snakeoil_certs_dependencies_pkg_installed\n\n# Create independent certs for each vhost\n{%- for vh in [\n  'collections',\n  'controller',\n  'download',\n  'keepproxy',\n  'webshell',\n  'workbench',\n  'workbench2',\n  'websocket',\n  ]\n%}\n# We're creating these in a tmp directory, so they're copied to their destination\n# with the `custom_certs` state file, as if using custom certificates.\n{%- set arvados_cert_file = orig_cert_dir ~ '/arvados-' ~ vh ~ '.pem' %}\n{%- set arvados_csr_file = orig_cert_dir ~ '/arvados-' ~ vh ~ '.csr' %}\n{%- set arvados_key_file = orig_cert_dir ~ '/arvados-' ~ vh ~ '.key' %}\n\nextra_snakeoil_certs_arvados_snakeoil_cert_{{ vh }}_cmd_run:\n  cmd.run:\n    - name: |\n        cat > /tmp/{{ vh }}.openssl.cnf <<-CNF\n        [req]\n        default_bits = 2048\n        prompt = no\n        default_md = sha256\n        distinguished_name = dn\n        req_extensions = rext\n        [rext]\n        subjectAltName = @alt_names\n        [dn]\n        C   = CC\n        ST  = Some State\n        L   = Some Location\n        O   = Arvados Provision Example Single Host / Multiple Hostnames\n        OU  = arvados-provision-example-single_host_multiple_hostnames\n        CN  = {{ vh }}.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n        emailAddress = admin@{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n        [alt_names]\n        {%- for entry in grains.get('ipv4') %}\n        IP.{{ loop.index }} = {{ entry }}\n        {%- endfor %}\n        DNS.1 = {{ vh }}.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n        {%- if vh in [\n          'controller',\n          'keepproxy',\n          'websocket'\n          ]\n        %}\n          {%- if vh == 'controller' %}\n        DNS.2 = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n          {%- elif vh == 'keepproxy' %}\n        DNS.2 = keep.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n          {%- elif vh == 'websocket' %}\n        DNS.2 = ws.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n          {%- endif %}\n        {%- endif %}\n        CNF\n\n        # The req\n        openssl req \\\n          -config /tmp/{{ vh }}.openssl.cnf \\\n          -new \\\n          -nodes \\\n          -sha256 \\\n          -out {{ arvados_csr_file }} \\\n          -keyout {{ arvados_key_file }} > /tmp/snakeoil_certs.{{ vh }}.output 2>&1 && \\\n        # The cert\n        openssl x509 \\\n          -req \\\n          -days 365 \\\n          -in {{ arvados_csr_file }} \\\n          -out {{ arvados_cert_file }} \\\n          -extfile /tmp/{{ vh }}.openssl.cnf \\\n          -extensions rext \\\n          -CA {{ arvados_ca_cert_file }} \\\n          -CAkey {{ arvados_ca_key_file }} \\\n          -set_serial $(date +%s) && \\\n        chmod 0644 {{ arvados_cert_file }} && \\\n        chmod 0640 {{ arvados_key_file }}\n    - unless:\n      - test -f {{ arvados_key_file }}\n      - openssl verify -CAfile {{ arvados_ca_cert_file }} {{ arvados_cert_file }}\n    - require:\n      - pkg: extra_snakeoil_certs_dependencies_pkg_installed\n      - cmd: extra_snakeoil_certs_arvados_snakeoil_ca_cmd_run\n    - require_in:\n      - file: extra_custom_certs_{{ vh }}_cert_file_copy\n      - file: extra_custom_certs_{{ vh }}_key_file_copy\n\n  {%- if grains.get('os_family') == 'Debian' %}\nextra_snakeoil_certs_certs_permissions_{{ vh}}_cmd_run:\n  file.managed:\n    - name: {{ arvados_key_file }}\n    - owner: root\n    - group: ssl-cert\n    - require:\n      - cmd: extra_snakeoil_certs_arvados_snakeoil_cert_{{ vh }}_cmd_run\n      - pkg: extra_snakeoil_certs_ssl_cert_pkg_installed\n  {%- endif %}\n{%- endfor %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/multiple_hostnames/states/workbench1_uninstall.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- set tpldir = curr_tpldir %}\n\nworkbench1_pkg_removed:\n  pkg.removed:\n    - name: {{ arvados.workbench.pkg.name }}"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/README.md",
    "content": "Single host with a single hostname\n==================================\n\nThese files let you setup Arvados on a single host using a single hostname\nfor all of its components nginx's virtualhosts.\n\nThe hostname MUST be given in the `local.params` file. The script won't try\nto guess it because, depending on the network architecture where you're\ninstalling Arvados, things might not work as expected.\n\nThe services will be available on the same hostname but different ports,\nwhich can be given on the `local.params` file or will default to the following\nvalues:\n\n* CLUSTER.DOMAIN\n* collections\n* download\n* keep\n* keep0\n* webshell\n* workbench\n* workbench2\n* ws\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/arvados.sls",
    "content": "# -*- coding: utf-8 -*-\n# vim: ft=yaml\n---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set database_host = (\"__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__\" or \"127.0.0.1\") %}\n{%- set database_name = \"__DATABASE_NAME__\" %}\n{%- set database_user = \"__DATABASE_USER__\" %}\n{%- set database_password = \"__DATABASE_PASSWORD__\" %}\n\n# The variables commented out are the default values that the formula uses.\n# The uncommented values are REQUIRED values. If you don't set them, running\n# this formula will fail.\narvados:\n  ### GENERAL CONFIG\n  version: '__VERSION__'\n  ## It makes little sense to disable this flag, but you can, if you want :)\n  # use_upstream_repo: true\n\n  ## Repo URL is built with grains values. If desired, it can be completely\n  ## overwritten with the pillar parameter 'repo_url'\n  # repo:\n  #   humanname: Arvados Official Repository\n\n  release: __RELEASE__\n\n  ## IMPORTANT!!!!!\n  ## api, workbench and shell require some gems, so you need to make sure ruby\n  ## and deps are installed in order to install and compile the gems.\n  ## We default to `false` in these two variables as it's expected you already\n  ## manage OS packages with some other tool and you don't want us messing up\n  ## with your setup.\n  ruby:\n    ## We set these to `true` here for testing purposes.\n    ## They both default to `false`.\n    manage_ruby: true\n    manage_gems_deps: true\n    # pkg: ruby\n    # gems_deps:\n    #     - curl\n    #     - g++\n    #     - gcc\n    #     - git\n    #     - libcurl4-gnutls-dev\n    #     - libpq-dev\n    #     - libxml2\n    #     - libxml2-dev\n    #     - make\n    #     - python3-dev\n    #     - ruby-dev\n    #     - zlib1g-dev\n\n  config:\n    check_command: /usr/bin/arvados-server config-check -strict=false -config\n  #   file: /etc/arvados/config.yml\n  #   user: root\n  ## IMPORTANT!!!!!\n  ## If you're intalling any of the rails apps (api, workbench), the group\n  ## should be set to that of the web server, usually `www-data`\n  #   group: root\n  #   mode: 640\n\n  ### ARVADOS CLUSTER CONFIG\n  cluster:\n    name: __CLUSTER__\n    domain: __DOMAIN__\n\n    database:\n      # max concurrent connections per arvados server daemon\n      # connection_pool_max: 32\n      name: {{ database_name }}\n      host: {{ database_host }}\n      password: {{ database_password }}\n      user: {{ database_user }}\n      extra_conn_params:\n        client_encoding: UTF8\n\n    tls:\n      # certificate: ''\n      # key: ''\n      # When using arvados-snakeoil certs set insecure: true\n      insecure: true\n\n    resources:\n      virtual_machines:\n        shell:\n          name: shell.__HOSTNAME_EXT__\n          backend: 127.0.0.1\n          port: 4200\n\n    ### TOKENS\n    tokens:\n      system_root: __SYSTEM_ROOT_TOKEN__\n      management: __MANAGEMENT_TOKEN__\n      anonymous_user: __ANONYMOUS_USER_TOKEN__\n\n    ### KEYS\n    secrets:\n      blob_signing_key: __BLOB_SIGNING_KEY__\n      workbench_secret_key: \"deprecated\"\n\n    Login:\n      Test:\n        Enable: true\n        Users:\n          __INITIAL_USER__:\n            Email: __INITIAL_USER_EMAIL__\n            Password: __INITIAL_USER_PASSWORD__\n\n    ### VOLUMES\n    ## This should usually match all your `keepstore` instances\n    Volumes:\n      # the volume name will be composed with\n      # <cluster>-nyw5e-<volume>\n      __CLUSTER__-nyw5e-000000000000000:\n        AccessViaHosts:\n          'http://__IP_INT__:25107':\n            ReadOnly: false\n        Replication: 2\n        Driver: Directory\n        DriverParameters:\n          Root: /var/lib/arvados/keep\n\n    Containers:\n      LocalKeepBlobBuffersPerVCPU: 0\n\n    Users:\n      NewUsersAreActive: true\n      AutoAdminFirstUser: true\n      AutoSetupNewUsers: true\n\n    Services:\n      Controller:\n        ExternalURL: 'https://__HOSTNAME_EXT__:__CONTROLLER_EXT_SSL_PORT__'\n        InternalURLs:\n          'http://__IP_INT__:8003': {}\n      Keepbalance:\n        InternalURLs:\n          'http://__IP_INT__:9005': {}\n      Keepproxy:\n        ExternalURL: 'https://__HOSTNAME_EXT__:__KEEP_EXT_SSL_PORT__'\n        InternalURLs:\n          'http://__IP_INT__:25100': {}\n      Keepstore:\n        InternalURLs:\n          'http://__IP_INT__:25107': {}\n      RailsAPI:\n        InternalURLs:\n          'http://__IP_INT__:8004': {}\n      WebDAV:\n        ExternalURL: 'https://__HOSTNAME_EXT__:__KEEPWEB_EXT_SSL_PORT__'\n        InternalURLs:\n          'http://__IP_INT__:9003': {}\n      WebDAVDownload:\n        ExternalURL: 'https://__HOSTNAME_EXT__:__KEEPWEB_EXT_SSL_PORT__'\n      WebShell:\n        ExternalURL: 'https://__HOSTNAME_EXT__:__WEBSHELL_EXT_SSL_PORT__'\n      Websocket:\n        ExternalURL: 'wss://__HOSTNAME_EXT__:__WEBSOCKET_EXT_SSL_PORT__/websocket'\n        InternalURLs:\n          'http://__IP_INT__:8005': {}\n      Workbench1:\n        ExternalURL: 'https://__HOSTNAME_EXT__:__WORKBENCH1_EXT_SSL_PORT__'\n      Workbench2:\n        ExternalURL: 'https://__HOSTNAME_EXT__:__WORKBENCH2_EXT_SSL_PORT__'\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/aws_credentials.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\naws_credentials:\n  region: __LE_AWS_REGION__\n  access_key_id: __LE_AWS_ACCESS_KEY_ID__\n  secret_access_key: __LE_AWS_SECRET_ACCESS_KEY__\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/docker.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ndocker:\n  pkg:\n    docker:\n      use_upstream: package\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/letsencrypt.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### LETSENCRYPT\nletsencrypt:\n  use_package: true\n  pkgs:\n    - certbot: latest\n    - python3-certbot-nginx\n  config:\n    server: https://acme-v02.api.letsencrypt.org/directory\n    email: __INITIAL_USER_EMAIL__\n    authenticator: nginx\n    agree-tos: true\n    keep-until-expiring: true\n    expand: true\n    max-log-backups: 0\n    deploy-hook: systemctl reload nginx\n\n  domainsets:\n    __HOSTNAME_EXT__:\n      - __HOSTNAME_EXT__\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/locale.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nlocale:\n  present:\n    - \"en_US.UTF-8 UTF-8\"\n  default:\n    # Note: On debian systems don't write the second 'UTF-8' here or you will\n    # experience salt problems like: LookupError: unknown encoding: utf_8_utf_8\n    # Restart the minion after you corrected this!\n    name: 'en_US.UTF-8'\n    requires: 'en_US.UTF-8 UTF-8'\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/logrotate.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# The logrotate formula checks that an associated service is running.\n# The default it checks is cron, but all the distributions Arvados supports\n# have switched to a systemd timer, so check that instead.\n# Refer to logrotate-formula's documentation for details\n# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst\n\nlogrotate:\n  service: logrotate.timer\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/logrotate_api.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Refer to logrotate-formula's documentation for information about customization\n# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst\n\nlogrotate:\n  jobs:\n    arvados-api:\n      path:\n        - /var/www/arvados-api/shared/log/*.log\n      config:\n        - daily\n        - missingok\n        - rotate 365\n        - compress\n        - nodelaycompress\n        - copytruncate\n        - sharedscripts\n        - postrotate\n        - '  systemctl try-reload-or-restart arvados-railsapi.service'\n        - endscript\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/logrotate_wb1.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n# Refer to logrotate-formula's documentation for information about customization\n# https://github.com/salt-formulas/salt-formula-logrotate/blob/master/README.rst\n\nlogrotate:\n  jobs:\n    arvados-workbench:\n      path:\n        - /var/www/arvados-workbench/shared/log/*.log\n      config:\n        - daily\n        - missingok\n        - rotate 365\n        - compress\n        - nodelaycompress\n        - copytruncate\n        - sharedscripts\n        - postrotate\n        - '  [ -s /run/nginx.pid ] && kill -USR1 `cat /run/nginx.pid`'\n        - endscript\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      env: GEM_HOME\n      worker_processes: 4\n\n  ### SNIPPETS\n  snippets:\n    # Based on https://ssl-config.mozilla.org/#server=nginx&version=1.14.2&config=intermediate&openssl=1.1.1d&guideline=5.4\n    ssl_hardening_default.conf:\n      - ssl_session_timeout: 1d\n      - ssl_session_cache: 'shared:arvadosSSL:10m'\n      - ssl_session_tickets: 'off'\n\n      # intermediate configuration\n      - ssl_protocols: TLSv1.2 TLSv1.3\n      - ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384\n      - ssl_prefer_server_ciphers: 'off'\n\n      # HSTS (ngx_http_headers_module is required) (63072000 seconds)\n      - add_header: 'Strict-Transport-Security \"max-age=63072000\" always'\n\n      # OCSP stapling\n      # NOTE! Stapling does not work with self-signed certificates, so disabling for tests\n      # - ssl_stapling: 'on'\n      # - ssl_stapling_verify: 'on'\n\n      # verify chain of trust of OCSP response using Root CA and Intermediate certs\n      # - ssl_trusted_certificate /path/to/root_CA_cert_plus_intermediates\n\n      # curl https://ssl-config.mozilla.org/ffdhe2048.txt > /path/to/dhparam\n      # - ssl_dhparam: /path/to/dhparam\n\n      # replace with the IP address of your resolver\n      # - resolver: 127.0.0.1\n\n    arvados-snakeoil.conf:\n      - ssl_certificate: /etc/ssl/private/arvados-snakeoil-cert.pem\n      - ssl_certificate_key: /etc/ssl/private/arvados-snakeoil-cert.key\n\n  ### SITES\n  servers:\n    managed:\n      # Update default config to redirect to https\n      default:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: _\n            - listen:\n              - 80 default_server\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_api_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- if grains.os_family in ('RedHat',) %}\n  {%- set group = 'nginx' %}\n{%- else %}\n  {%- set group = 'www-data' %}\n{%- endif %}\n\n### ARVADOS\narvados:\n  config:\n    group: {{ group }}\n\n### NGINX\nnginx:\n  ### SITES\n  servers:\n    managed:\n      arvados_api.conf:\n        enabled: false\n        overwrite: false\n        config:\n          - server:\n            - listen: '__IP_INT__:8004'\n            - server_name: api\n            - root: /var/www/arvados-api/current/public\n            - index:  index.html index.htm\n            - access_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.access.log combined\n            - error_log: /var/log/nginx/api.__CLUSTER__.__DOMAIN__-upstream.error.log\n            - client_max_body_size: 128m\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_controller_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        'geo $external_client':\n          default: 1\n          '127.0.0.0/8': 0\n        upstream controller_upstream:\n          - server: '__IP_INT__:8003  fail_timeout=10s'\n\n  ### SITES\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_controller_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: _\n            - listen:\n              - 80\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_controller_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: __HOSTNAME_EXT__\n            - listen:\n              - __CONTROLLER_EXT_SSL_PORT__ http2 ssl default_server\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://controller_upstream'\n              - proxy_read_timeout: 300\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_set_header: 'X-External-Client $external_client'\n              - proxy_set_header: 'Upgrade $http_upgrade'\n              - proxy_set_header: 'Connection \"upgrade\"'\n              - proxy_max_temp_file_size: 0\n              - proxy_request_buffering: 'off'\n              - proxy_buffering: 'off'\n              - proxy_http_version: '1.1'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            - access_log: /var/log/nginx/__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/__CLUSTER__.__DOMAIN__.error.log\n            - client_max_body_size: 128m\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_keepproxy_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream keepproxy_upstream:\n          - server: '__IP_INT__:25100 fail_timeout=10s'\n\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_keepproxy_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: keep.__CLUSTER__.__DOMAIN__\n            - listen:\n              - 80\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_keepproxy_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: __HOSTNAME_EXT__\n            - listen:\n              - __KEEP_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://keepproxy_upstream'\n              - proxy_read_timeout: 90\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_buffering: 'off'\n            - client_body_buffer_size: 64M\n            - client_max_body_size: 64M\n            - proxy_http_version: '1.1'\n            - proxy_request_buffering: 'off'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            - access_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/keepproxy.__CLUSTER__.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_keepweb_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream collections_downloads_upstream:\n          - server: '__IP_INT__:9003 fail_timeout=10s'\n\n  servers:\n    managed:\n      ### COLLECTIONS / DOWNLOAD\n      arvados_collections_download_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: __HOSTNAME_EXT__\n            - listen:\n              - __KEEPWEB_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://collections_downloads_upstream'\n              - proxy_read_timeout: 90\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_buffering: 'off'\n            - client_max_body_size: 0\n            - proxy_http_version: '1.1'\n            - proxy_request_buffering: 'off'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            - access_log: /var/log/nginx/keepweb.__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/keepweb.__CLUSTER__.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_webshell_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n\n      ### STREAMS\n      http:\n        upstream webshell_upstream:\n          - server: '__IP_INT__:4200 fail_timeout=10s'\n\n  ### SITES\n  servers:\n    managed:\n      arvados_webshell_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: __HOSTNAME_EXT__\n            - listen:\n              - __WEBSHELL_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /shell.__HOSTNAME_EXT__:\n              - proxy_pass: 'http://webshell_upstream'\n              - proxy_read_timeout: 90\n              - proxy_connect_timeout: 90\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_ssl_session_reuse: 'off'\n\n              - \"if ($request_method = 'OPTIONS')\":\n                - add_header: \"'Access-Control-Allow-Origin' '*'\"\n                - add_header: \"'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'\"\n                - add_header: \"'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'\"\n                - add_header: \"'Access-Control-Max-Age' 1728000\"\n                - add_header: \"'Content-Type' 'text/plain charset=UTF-8'\"\n                - add_header: \"'Content-Length' 0\"\n                - return: 204\n\n              - \"if ($request_method = 'POST')\":\n                - add_header: \"'Access-Control-Allow-Origin' '*'\"\n                - add_header: \"'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'\"\n                - add_header: \"'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'\"\n\n              - \"if ($request_method = 'GET')\":\n                - add_header: \"'Access-Control-Allow-Origin' '*'\"\n                - add_header: \"'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'\"\n                - add_header: \"'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type'\"\n\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            - access_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/webshell.__CLUSTER__.__DOMAIN__.error.log\n\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_websocket_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n      ### STREAMS\n      http:\n        upstream websocket_upstream:\n          - server: '__IP_INT__:8005 fail_timeout=10s'\n\n  servers:\n    managed:\n      arvados_websocket_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: __HOSTNAME_EXT__\n            - listen:\n              - __WEBSOCKET_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://websocket_upstream'\n              - proxy_read_timeout: 600\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: 'Host $host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'Upgrade $http_upgrade'\n              - proxy_set_header: 'Connection \"upgrade\"'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n              - proxy_buffering: 'off'\n            - client_body_buffer_size: 64M\n            - client_max_body_size: 64M\n            - proxy_http_version: '1.1'\n            - proxy_request_buffering: 'off'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            - access_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/ws.__CLUSTER__.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_workbench2_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- if grains.os_family in ('RedHat',) %}\n  {%- set group = 'nginx' %}\n{%- else %}\n  {%- set group = 'www-data' %}\n{%- endif %}\n\n### ARVADOS\narvados:\n  config:\n    group: {{ group }}\n\n### NGINX\nnginx:\n  ### SITES\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_workbench2_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: workbench2.__CLUSTER__.__DOMAIN__\n            - listen:\n              - 80\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_workbench2_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: __HOSTNAME_EXT__\n            - listen:\n              - __WORKBENCH2_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - root: /var/www/arvados-workbench2/workbench2\n              - try_files: '$uri $uri/ /index.html'\n              - 'if (-f $document_root/maintenance.html)':\n                - return: 503\n            - location /config.json:\n              - return: {{ \"200 '\" ~ '{\"API_HOST\":\"__HOSTNAME_EXT__:__CONTROLLER_EXT_SSL_PORT__\"}' ~ \"'\" }}\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            - access_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/workbench2.__CLUSTER__.__DOMAIN__.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/nginx_workbench_configuration.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- if grains.os_family in ('RedHat',) %}\n  {%- set group = 'nginx' %}\n{%- else %}\n  {%- set group = 'www-data' %}\n{%- endif %}\n\n### ARVADOS\narvados:\n  config:\n    group: {{ group }}\n\n### NGINX\nnginx:\n  ### SERVER\n  server:\n    config:\n\n      ### STREAMS\n      http:\n        upstream workbench_upstream:\n          - server: '__IP_INT__:9000 fail_timeout=10s'\n\n  ### SITES\n  servers:\n    managed:\n      ### DEFAULT\n      arvados_workbench_default.conf:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - server_name: workbench.__CLUSTER__.__DOMAIN__\n            - listen:\n              - 80\n            - location /.well-known:\n              - root: /var/www\n            - location /:\n              - return: '301 https://$host$request_uri'\n\n      arvados_workbench_ssl.conf:\n        enabled: true\n        overwrite: true\n        requires:\n          __CERT_REQUIRES__\n        config:\n          - server:\n            - server_name: __HOSTNAME_EXT__\n            - listen:\n              - __WORKBENCH1_EXT_SSL_PORT__ http2 ssl\n            - index: index.html index.htm\n            - location /:\n              - proxy_pass: 'http://workbench_upstream'\n              - proxy_read_timeout: 300\n              - proxy_connect_timeout: 90\n              - proxy_redirect: 'off'\n              - proxy_set_header: X-Forwarded-Proto https\n              - proxy_set_header: 'Host $http_host'\n              - proxy_set_header: 'X-Real-IP $remote_addr'\n              - proxy_set_header: 'X-Forwarded-For $proxy_add_x_forwarded_for'\n            - include: snippets/ssl_hardening_default.conf\n            - ssl_certificate: __CERT_PEM__\n            - ssl_certificate_key: __CERT_KEY__\n            - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.access.log combined\n            - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__.error.log\n\n      arvados_workbench_upstream:\n        enabled: true\n        overwrite: true\n        config:\n          - server:\n            - listen: '__IP_INT__:9000'\n            - server_name: workbench\n            - root: /var/www/arvados-workbench/current/public\n            - index:  index.html index.htm\n            # yamllint disable-line rule:line-length\n            - access_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__-upstream.access.log combined\n            - error_log: /var/log/nginx/workbench.__CLUSTER__.__DOMAIN__-upstream.error.log\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/pillars/postgresql.sls",
    "content": "---\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n### POSTGRESQL\npostgres:\n  use_upstream_repo: false\n  postgresconf: |-\n    listen_addresses = '*'  # listen on all interfaces\n    # If you want to enable communications' encryption to the DB server,\n    # uncomment these entries\n    # ssl = on\n    # ssl_cert_file = '/etc/ssl/certs/arvados-snakeoil-cert.pem'\n    # ssl_key_file = '/etc/ssl/private/arvados-snakeoil-cert.key'\n  acls:\n    - ['local', 'all', 'postgres', 'peer']\n    - ['local', 'all', 'all', 'peer']\n    - ['host', 'all', 'all', '127.0.0.1/32', 'md5']\n    - ['host', 'all', 'all', '::1/128', 'md5']\n    - ['host', '__CLUSTER___arvados', '__CLUSTER___arvados', '127.0.0.0/8']\n  users:\n    __CLUSTER___arvados:\n      ensure: present\n      password: \"__DATABASE_PASSWORD__\"\n\n  # tablespaces:\n  #   arvados_tablespace:\n  #     directory: /path/to/some/tbspace/arvados_tbsp\n  #     owner: arvados\n\n  databases:\n    __CLUSTER___arvados:\n      owner: __CLUSTER___arvados\n      template: template0\n      lc_ctype: en_US.utf8\n      lc_collate: en_US.utf8\n      # tablespace: arvados_tablespace\n      schemas:\n        public:\n          owner: __CLUSTER___arvados\n      extensions:\n        pg_trgm:\n          if_not_exists: true\n          schema: public\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/states/custom_certs.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set orig_cert_dir = salt['pillar.get']('extra_custom_certs_dir', '/srv/salt/certs')  %}\n{%- set dest_cert_dir = '/etc/nginx/ssl' %}\n{%- set certs = salt['pillar.get']('extra_custom_certs', [])  %}\n\n{% if certs %}\nextra_custom_certs_file_directory_certs_dir:\n  file.directory:\n    - name: /etc/nginx/ssl\n    - require:\n      - pkg: nginx_install\n\n  {%- for cert in certs %}\n    {%- set cert_file = 'arvados-' ~ cert ~ '.pem' %}\n    {%- set key_file = 'arvados-' ~ cert ~ '.key' %}\nextra_custom_certs_{{ cert }}_cert_file_copy:\n  file.copy:\n    - name: {{ dest_cert_dir }}/{{ cert_file }}\n    - source: {{ orig_cert_dir }}/{{ cert_file }}\n    - force: true\n    - user: root\n    - group: root\n    - mode: 0640\n    - unless: cmp {{ dest_cert_dir }}/{{ cert_file }} {{ orig_cert_dir }}/{{ cert_file }}\n    - require:\n      - file: extra_custom_certs_file_directory_certs_dir\n    - watch_in:\n      - service: extra_nginx_service_reload_on_certs_changes\n\nextra_custom_certs_{{ cert }}_key_file_copy:\n  file.copy:\n    - name: {{ dest_cert_dir }}/{{ key_file }}\n    - source: {{ orig_cert_dir }}/{{ key_file }}\n    - force: true\n    - user: root\n    - group: root\n    - mode: 0640\n    - unless: cmp {{ dest_cert_dir }}/{{ key_file }} {{ orig_cert_dir }}/{{ key_file }}\n    - require:\n      - file: extra_custom_certs_file_directory_certs_dir\n    - watch_in:\n      - service: extra_nginx_service_reload_on_certs_changes\n  {%- endfor %}\n\nextra_nginx_service_reload_on_certs_changes:\n  service.running:\n    - name: nginx\n    - enable: True\n    - reload: True\n{%- endif %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/states/dns.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ndns:\n  pkg.installed:\n    - pkgs:\n      - dnsmasq\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/states/host_entries.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- set tpldir = curr_tpldir %}\n\n# We need the external hostname to resolve to the internal IP for docker. We\n# tell docker to resolve via the local dnsmasq, which reads from /etc/hosts by\n# default.\narvados_local_access_to_hostname_ext:\n  host.present:\n    - ip: __IP_INT__\n    - names:\n      - __HOSTNAME_EXT__\n\narvados_test_salt_states_examples_single_host_etc_hosts_host_present:\n  host.present:\n    - ip: 127.0.1.1\n    - names:\n      - {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n      # NOTE! This just works for our testing.\n      # Won't work if the cluster name != host name\n      {%- for entry in [\n          'api',\n          'collections',\n          'controller',\n          'download',\n          'keep',\n          'keepweb',\n          'keep0',\n          'shell',\n          'workbench',\n          'workbench2',\n          'ws',\n        ]\n      %}\n      - {{ entry }}\n      - {{ entry }}.{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n      {%- endfor %}\n    - require_in:\n      - file: nginx_config\n      - service: nginx_service\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/states/keep_volume.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nvar_lib_arvados_keep_dir:\n  file.directory:\n    - name: /var/lib/arvados/keep\n    - user: root\n    - group: root\n    - mode: '0770'\n    - makedirs: true\n    - require_in:\n      - pkg: keepstore\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/states/passenger_rvm.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- if grains.os_family in ('RedHat',) %}\n  {%- set group = 'nginx' %}\n{%- else %}\n  {%- set group = 'www-data' %}\n{%- endif %}\n\n# Make sure that /var/www/.passenger exists with the proper ownership\n# so that passenger can build passenger_native_support.so\nextra_var_www_passenger:\n  file.directory:\n    - name: /var/www/.passenger\n    - user: {{ group }}\n    - group: {{ group }}\n    - mode: '0755'\n    - makedirs: True\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/states/railsapi_passenger_configs.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set _workers = (\"__CONTROLLER_MAX_WORKERS__\" or grains['num_cpus']*2)|int %}\n{%- set max_workers = [_workers, 8]|max %}\n\n{%- if salt['pillar.get']('nginx:servers:managed:arvados_controller_default.conf') %}\n\n# Make the passenger queue small (twice the concurrency, so\n# there's at most one pending request for each busy worker)\n# because controller reorders requests based on priority, and\n# won't send more than API.MaxConcurrentRailsRequests to passenger\n# (which is max_workers * 2), so things that are moved to the head\n# of the line get processed quickly.\nextra_railsapi_passenger_configs:\n  file.managed:\n    - name: /etc/systemd/system/arvados-railsapi.service.d/26-installer.conf\n    - contents: |\n        ### This file managed by Salt, do not edit by hand!!\n        [Service]\n        Environment=PASSENGER_MAX_POOL_SIZE={{ max_workers }}\n        Environment=PASSENGER_MAX_REQUEST_QUEUE_SIZE={{ max_workers * 2 + 1 }}\n    - user: root\n    - group: root\n    - mode: '0644'\n    - makedirs: True\n    - require_in:\n      - service: arvados-api-service-running-service-running\n    - watch_in:\n      - cmd: extra_systemd_daemon_reload\n      - service: arvados-api-service-running-service-running\n\nextra_systemd_daemon_reload:\n  cmd.run:\n    - name: systemctl daemon-reload\n\n\n{%- endif %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/states/shell_cron_add_login_sync.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# This state tries to query the controller using the parameters set in\n# the `arvados.cluster.resources.virtual_machines` pillar, to get the\n# ARVADOS_VIRTUAL_MACHINE_UUID for the host and configure the arvados login-sync cron\n# as described in https://doc.arvados.org/main/install/install-shell-server.html\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- from \"arvados/libtofs.jinja\" import files_switch with context %}\n{%- set tpldir = curr_tpldir %}\n\n{%- set virtual_machines = arvados.cluster.resources.virtual_machines | default({}) %}\n{%- set api_token = arvados.cluster.tokens.system_root | yaml_encode %}\n{%- set api_host = arvados.cluster.Services.Controller.ExternalURL | regex_replace('^http(s?)://', '', ignorecase=true) %}\n\nextra_shell_cron_add_login_sync_add_jq_pkg_installed:\n  pkg.installed:\n    - name: jq\n\n{%- for vm, vm_params in virtual_machines.items() %}\n  {%- set vm_name = vm_params.name | default(vm) %}\n\n  # Check if any of the specified virtual_machines parameters corresponds to this instance\n  # It should be an error if we get more than one occurrence\n  {%- if vm_name in [grains['id'], grains['host'], grains['fqdn'], grains['nodename']] or\n         vm_params.backend in [grains['id'], grains['host'], grains['fqdn'], grains['nodename']] +\n                               grains['ipv4'] + grains['ipv6'] %}\n\n    # We need to query the VM UUID\n    {%- set cmd_query_vm_uuid = 'arv --short virtual_machine list' ~\n                                ' --filters \\'[[\"hostname\", \"=\", \"' ~ vm_name ~ '\"]]\\''\n    %}\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_get_vm_uuid_cmd_run:\n  cmd.run:\n    - env:\n      - ARVADOS_API_TOKEN: {{ api_token }}\n      - ARVADOS_API_HOST: {{ api_host }}\n      - ARVADOS_API_HOST_INSECURE: {{ arvados.cluster.tls.insecure | default(false) }}\n    - name: {{ cmd_query_vm_uuid }} | head -1 | tee /tmp/vm_uuid_{{ vm }}\n    - unless:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n    - require:\n      - gem: arvados-shell-package-install-gem-arvados-cli-installed\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_arvados_api_host_cron_env_present:\n  cron.env_present:\n    - name: ARVADOS_API_HOST\n    - value: {{ api_host }}\n    - onlyif:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_arvados_api_token_cron_env_present:\n  cron.env_present:\n    - name: ARVADOS_API_TOKEN\n    - value: {{ api_token }}\n    - onlyif:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_arvados_api_host_insecure_cron_env_present:\n  cron.env_present:\n    - name: ARVADOS_API_HOST_INSECURE\n    - value: {{ arvados.cluster.tls.insecure | default(false) }}\n    - onlyif:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_arvados_virtual_machine_uuid_cron_env_present:\n  cron.env_present:\n    - name: ARVADOS_VIRTUAL_MACHINE_UUID\n    - value: __slot__:salt:cmd.run(\"cat /tmp/vm_uuid_{{ vm }}\")\n    - onlyif:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n\nextra_shell_cron_add_login_sync_add_{{ vm }}_arvados_login_sync_cron_present:\n  cron.present:\n    - name: /usr/local/bin/arvados-login-sync\n    - minute: '*/2'\n    - onlyif:\n      - /bin/grep -qE \"[a-z0-9]{5}-2x53u-[a-z0-9]{15}\" /tmp/vm_uuid_{{ vm }}\n\n  {%- endif %}\n{%- endfor %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/states/shell_sudo_passwordless.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- set tpldir = curr_tpldir %}\n\nextra_shell_sudo_passwordless_sudo_pkg_installed:\n  pkg.installed:\n    - name: sudo\n\nextra_shell_sudo_passwordless_config_file_managed:\n  file.managed:\n    - name: /etc/sudoers.d/arvados_passwordless\n    - makedirs: true\n    - user: root\n    - group: root\n    - mode: '0440'\n    - replace: false\n    - contents: |\n        # This file managed by Salt, do not edit by hand!!\n        # Allow members of group sudo to execute any command without password\n        %sudo ALL=(ALL:ALL) NOPASSWD:ALL\n    - require:\n      - pkg: extra_shell_sudo_passwordless_sudo_pkg_installed\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/states/snakeoil_certs.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- set tpldir = curr_tpldir %}\n\n{%- set orig_cert_dir = salt['pillar.get']('extra_custom_certs_dir', '/srv/salt/certs')  %}\n\ninclude:\n  - nginx.config\n  - nginx.service\n\n# Debian uses different dirs for certs and keys, but being a Snake Oil example,\n# we'll keep it simple here.\n{%- set arvados_ca_cert_file = '/etc/ssl/private/arvados-snakeoil-ca.pem' %}\n{%- set arvados_ca_key_file = '/etc/ssl/private/arvados-snakeoil-ca.key' %}\n\n{%- if grains.get('os_family') == 'Debian' %}\n  {%- set arvados_ca_cert_dest = '/usr/local/share/ca-certificates/arvados-snakeoil-ca.crt' %}\n  {%- set update_ca_cert = '/usr/sbin/update-ca-certificates' %}\n  {%- set openssl_conf = '/etc/ssl/openssl.cnf' %}\n\nextra_snakeoil_certs_ssl_cert_pkg_installed:\n  pkg.installed:\n    - name: ssl-cert\n    - require_in:\n      - sls: postgres\n\n{%- else %}\n  {%- set arvados_ca_cert_dest = '/etc/pki/ca-trust/source/anchors/arvados-snakeoil-ca.pem' %}\n  {%- set update_ca_cert = '/usr/bin/update-ca-trust' %}\n  {%- set openssl_conf = '/etc/pki/tls/openssl.cnf' %}\n\n{%- endif %}\n\nextra_snakeoil_certs_dependencies_pkg_installed:\n  pkg.installed:\n    - pkgs:\n      - openssl\n      - ca-certificates\n\nextra_snakeoil_certs_arvados_snakeoil_ca_cmd_run:\n  # Taken from https://github.com/arvados/arvados/blob/3.1.2/tools/arvbox/lib/arvbox/docker/service/certificate/run\n  cmd.run:\n    - name: |\n        # These dirs are not too CentOS-ish, but this is a helper script\n        # and they should be enough\n        /bin/bash -c \"mkdir -p /etc/ssl/certs/ /etc/ssl/private/ && \\\n        openssl req \\\n          -new \\\n          -nodes \\\n          -sha256 \\\n          -x509 \\\n          -subj \\\"/C=CC/ST=Some State/O=Arvados Formula/OU=arvados-formula/CN=snakeoil-ca-{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\\\" \\\n          -extensions x509_ext \\\n          -config <(cat {{ openssl_conf }} \\\n                  <(printf \\\"\\n[x509_ext]\\nbasicConstraints=critical,CA:true,pathlen:0\\nkeyUsage=critical,keyCertSign,cRLSign\\\")) \\\n          -out {{ arvados_ca_cert_file }} \\\n          -keyout {{ arvados_ca_key_file }} \\\n          -days 365 && \\\n        cp {{ arvados_ca_cert_file }} {{ arvados_ca_cert_dest }} && \\\n        {{ update_ca_cert }}\"\n    - unless:\n      - test -f {{ arvados_ca_cert_file }}\n      - openssl verify -CAfile {{ arvados_ca_cert_file }} {{ arvados_ca_cert_file }}\n    - require:\n      - pkg: extra_snakeoil_certs_dependencies_pkg_installed\n\n{%- set arvados_cert_file = orig_cert_dir ~ '/arvados-__HOSTNAME_EXT__.pem' %}\n{%- set arvados_csr_file = orig_cert_dir ~ '/arvadoos-__HOSTNAME_EXT__.csr' %}\n{%- set arvados_key_file = orig_cert_dir ~ '/arvados-__HOSTNAME_EXT__.key' %}\n\nextra_snakeoil_certs_arvados_snakeoil_cert___HOSTNAME_EXT___cmd_run:\n  cmd.run:\n    - name: |\n        cat > /tmp/__HOSTNAME_EXT__.openssl.cnf <<-CNF\n        [req]\n        default_bits = 2048\n        prompt = no\n        default_md = sha256\n        distinguished_name = dn\n        req_extensions = rext\n        [rext]\n        subjectAltName = @alt_names\n        [dn]\n        C   = CC\n        ST  = Some State\n        L   = Some Location\n        O   = Arvados Provision Example Single Host / Single Hostname\n        OU  = arvados-provision-example-single_host_single_hostname\n        CN  = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n        emailAddress = admin@{{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n        [alt_names]\n        {%- for entry in grains.get('ipv4') %}\n        IP.{{ loop.index }} = {{ entry }}\n        {%- endfor %}\n        DNS.1 = {{ arvados.cluster.name }}.{{ arvados.cluster.domain }}\n        DNS.2 = '__HOSTNAME_EXT__'\n        CNF\n\n        # The req\n        openssl req \\\n          -config /tmp/__HOSTNAME_EXT__.openssl.cnf \\\n          -new \\\n          -nodes \\\n          -sha256 \\\n          -out {{ arvados_csr_file }} \\\n          -keyout {{ arvados_key_file }} > /tmp/snake_oil_certs.__HOSTNAME_EXT__.output 2>&1 && \\\n        # The cert\n        openssl x509 \\\n          -req \\\n          -days 365 \\\n          -in {{ arvados_csr_file }} \\\n          -out {{ arvados_cert_file }} \\\n          -extfile /tmp/__HOSTNAME_EXT__.openssl.cnf \\\n          -extensions rext \\\n          -CA {{ arvados_ca_cert_file }} \\\n          -CAkey {{ arvados_ca_key_file }} \\\n          -set_serial $(date +%s) && \\\n        chmod 0644 {{ arvados_cert_file }} && \\\n        chmod 0640 {{ arvados_key_file }}\n    - unless:\n      - test -f {{ arvados_key_file }}\n      - openssl verify -CAfile {{ arvados_ca_cert_file }} {{ arvados_cert_file }}\n    - require:\n      - pkg: extra_snakeoil_certs_dependencies_pkg_installed\n      - cmd: extra_snakeoil_certs_arvados_snakeoil_ca_cmd_run\n    - require_in:\n      - file: extra_custom_certs___HOSTNAME_EXT___cert_file_copy\n      - file: extra_custom_certs___HOSTNAME_EXT___key_file_copy\n\n  {%- if grains.get('os_family') == 'Debian' %}\nextra_snakeoil_certs_certs_permissions___HOSTNAME_EXT___cmd_run:\n  file.managed:\n    - name: {{ arvados_key_file }}\n    - owner: root\n    - group: ssl-cert\n    - require:\n      - cmd: extra_snakeoil_certs_arvados_snakeoil_cert___HOSTNAME_EXT___cmd_run\n      - pkg: extra_snakeoil_certs_ssl_cert_pkg_installed\n  {%- endif %}\n"
  },
  {
    "path": "tools/salt-install/config_examples/single_host/single_hostname/states/workbench1_uninstall.sls",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\n{%- set curr_tpldir = tpldir %}\n{%- set tpldir = 'arvados' %}\n{%- from \"arvados/map.jinja\" import arvados with context %}\n{%- set tpldir = curr_tpldir %}\n\nworkbench1_pkg_removed:\n  pkg.removed:\n    - name: {{ arvados.workbench.pkg.name }}"
  },
  {
    "path": "tools/salt-install/installer.sh",
    "content": "#!/bin/bash\n\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n#\n# installer.sh\n#\n# Helps manage the configuration in a git repository, and then deploy\n# nodes by pushing a copy of the git repository to each node and\n# running the provision script to do the actual installation and\n# configuration.\n#\n\nset -eu\nset -o pipefail\n\n# The parameter file\ndeclare CONFIG_FILE=local.params\n\n# The salt template directory\ndeclare CONFIG_DIR=local_config_dir\n\n# The 5-character Arvados cluster id\n# This will be populated by loadconfig()\ndeclare CLUSTER\n\n# The parent domain (not including the cluster id)\n# This will be populated by loadconfig()\ndeclare DOMAIN\n\n# A bash associative array listing each node and mapping to the roles\n# that should be provisioned on those nodes.\n# This will be populated by loadconfig()\ndeclare -A NODES\n\n# A bash associative array listing each role and mapping to the nodes\n# that should be provisioned with this role.\n# This will be populated by loadconfig()\ndeclare -A ROLE2NODES\n\n# The ssh user we'll use\n# This will be populated by loadconfig()\ndeclare DEPLOY_USER\n\n# The git repository that we'll push to on all the nodes\n# This will be populated by loadconfig()\ndeclare GITTARGET\n\n# The public host used as an SSH jump host\n# This will be populated by loadconfig()\ndeclare USE_SSH_JUMPHOST\n\n# The temp file that will get used to disable envvar forwarding to avoid locale\n# issues in Debian distros.\n# This will be populated by loadconfig()\ndeclare SSH_CONFFILE\n\nchecktools() {\n  local MISSING=''\n  for a in git ip; do\n    if ! which $a; then\n      MISSING=\"$MISSING $a\"\n    fi\n  done\n  if [[ -n \"$MISSING\" ]]; then\n    echo \"Some tools are missing, please make sure you have the 'git' and 'iproute2' packages installed\"\n    exit 1\n  fi\n}\n\ncleanup() {\n  local NODE=$1\n  local SSH=$(ssh_cmd \"$NODE\")\n  # Delete the old repository\n  $SSH $DEPLOY_USER@$NODE rm -rf ${GITTARGET}.git ${GITTARGET}\n}\n\nsync() {\n  local NODE=$1\n  local BRANCH=$2\n\n  # Synchronizes the configuration by creating a git repository on\n  # each node, pushing our branch, and updating the checkout.\n\n  if [[ \"$NODE\" != localhost ]]; then\n    SSH=$(ssh_cmd \"$NODE\")\n    GIT=\"eval $(git_cmd $NODE)\"\n\n    cleanup $NODE\n\n    # Update the git remote for the remote repository.\n    if ! $GIT remote add $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git; then\n      $GIT remote set-url $NODE $DEPLOY_USER@$NODE:${GITTARGET}.git\n    fi\n\n    # Initialize the git repository.  We're\n    # actually going to make two repositories here because git\n    # will complain if you try to push to a repository with a\n    # checkout. So we're going to create a \"bare\" repository\n    # and then clone a regular repository (with a checkout)\n    # from that.\n\n    $SSH $DEPLOY_USER@$NODE git init --bare --shared=0600 ${GITTARGET}.git\n    if [[ \"$BRANCH\" == \"HEAD\" ]]; then\n      # When deploying from an individual commit instead of a branch. This can\n      # happen when deploying from a Jenkins pipeline.\n      $GIT push $NODE HEAD:refs/heads/HEAD\n      $SSH $DEPLOY_USER@$NODE \"umask 0077 && git clone -s ${GITTARGET}.git ${GITTARGET} && git -C ${GITTARGET} checkout remotes/origin/HEAD\"\n    else\n      $GIT push $NODE $BRANCH\n      $SSH $DEPLOY_USER@$NODE \"umask 0077 && git clone -s ${GITTARGET}.git ${GITTARGET} && git -C ${GITTARGET} checkout ${BRANCH}\"\n    fi\n  fi\n}\n\ndeploynode() {\n  local NODE=$1\n  local ROLES=$2\n  local BRANCH=$3\n\n  # Deploy a node.  This runs the provision script on the node, with\n  # the appropriate roles.\n\n  sync $NODE $BRANCH\n\n  if [[ -z \"$ROLES\" ]]; then\n    echo \"No roles specified for $NODE, will deploy all roles\"\n  else\n    ROLES=\"--roles ${ROLES}\"\n  fi\n\n  logfile=deploy-${NODE}-$(date -Iseconds).log\n  SSH=$(ssh_cmd \"$NODE\")\n\n  if [[ \"$NODE\" = localhost ]]; then\n    SUDO=''\n    if [[ $(whoami) != 'root' ]]; then\n      SUDO=sudo\n    fi\n    $SUDO ./provision.sh --config ${CONFIG_FILE} ${ROLES} 2>&1 | tee $logfile\n  else\n    $SSH $DEPLOY_USER@$NODE \"cd ${GITTARGET} && git log -n1 HEAD && DISABLED_CONTROLLER=\\\"$DISABLED_CONTROLLER\\\" sudo --preserve-env=DISABLED_CONTROLLER ./provision.sh --config ${CONFIG_FILE} ${ROLES}\" 2>&1 | tee $logfile\n    cleanup $NODE\n  fi\n}\n\ncheckcert() {\n  local CERTNAME=$1\n  local CERTPATH=\"${CONFIG_DIR}/certs/${CERTNAME}\"\n  if [[ ! -f \"${CERTPATH}.crt\" || ! -e \"${CERTPATH}.key\" ]]; then\n    echo \"Missing ${CERTPATH}.crt or ${CERTPATH}.key files\"\n    exit 1\n  fi\n}\n\nloadconfig() {\n  if ! [[ -s ${CONFIG_FILE} && -s ${CONFIG_FILE}.secrets ]]; then\n    echo \"Must be run from initialized setup dir, maybe you need to 'initialize' first?\"\n  fi\n  source common.sh\n  GITTARGET=arvados-deploy-config-${CLUSTER}\n\n  # Set up SSH so that it doesn't forward any environment variable. This is to avoid\n  # getting \"setlocale\" errors on the first run, depending on the distro being used\n  # to run the installer (like Debian).\n  SSH_CONFFILE=$(mktemp)\n  echo \"Include config SendEnv -*\" >${SSH_CONFFILE}\n}\n\nssh_cmd() {\n  local NODE=$1\n  if [ -z \"${USE_SSH_JUMPHOST}\" -o \"${NODE}\" == \"${USE_SSH_JUMPHOST}\" -o \"${NODE}\" == \"localhost\" ]; then\n    echo \"ssh -F ${SSH_CONFFILE}\"\n  else\n    echo \"ssh -F ${SSH_CONFFILE} -J ${DEPLOY_USER}@${USE_SSH_JUMPHOST}\"\n  fi\n}\n\ngit_cmd() {\n  local NODE=$1\n  echo \"GIT_SSH_COMMAND=\\\"$(ssh_cmd ${NODE})\\\" git\"\n}\n\nset +u\nsubcmd=\"$1\"\nset -u\n\nif [[ -n \"$subcmd\" ]]; then\n  shift\nfi\ncase \"$subcmd\" in\ninitialize)\n  if [[ ! -f provision.sh ]]; then\n    echo \"Must be run from arvados/tools/salt-install\"\n    exit\n  fi\n\n  checktools\n\n  set +u\n  SETUPDIR=$1\n  PARAMS=$2\n  SLS=$3\n  TERRAFORM=$4\n  set -u\n\n  err=\n  if [[ -z \"$PARAMS\" || ! -f local.params.example.$PARAMS ]]; then\n    echo \"Not found: local.params.example.$PARAMS\"\n    echo \"Expected one of multiple_hosts, single_host_multiple_hostnames, single_host_single_hostname\"\n    err=1\n  fi\n\n  if [[ -z \"$SLS\" || ! -d config_examples/$SLS ]]; then\n    echo \"Not found: config_examples/$SLS\"\n    echo \"Expected one of multi_host/aws, single_host/multiple_hostnames, single_host/single_hostname\"\n    err=1\n  fi\n\n  if [[ -z \"$SETUPDIR\" || -z \"$PARAMS\" || -z \"$SLS\" ]]; then\n    echo \"installer.sh <setup dir to initialize> <params template> <config template>\"\n    err=1\n  fi\n\n  if [[ -n \"$err\" ]]; then\n    exit 1\n  fi\n\n  echo \"Initializing $SETUPDIR\"\n  git init --shared=0600 $SETUPDIR\n  cp -r *.sh tests $SETUPDIR\n\n  cp local.params.example.$PARAMS $SETUPDIR/${CONFIG_FILE}\n  cp local.params.secrets.example $SETUPDIR/${CONFIG_FILE}.secrets\n  cp -r config_examples/$SLS $SETUPDIR/${CONFIG_DIR}\n\n  if [[ -n \"$TERRAFORM\" ]]; then\n    mkdir $SETUPDIR/terraform\n    cp -r $TERRAFORM/* $SETUPDIR/terraform/\n  fi\n\n  cd $SETUPDIR\n  echo '*.log' >.gitignore\n  echo '**/.terraform' >>.gitignore\n  echo '**/.infracost' >>.gitignore\n\n  if [[ -n \"$TERRAFORM\" ]]; then\n    git add terraform\n  fi\n\n  git add *.sh ${CONFIG_FILE} ${CONFIG_FILE}.secrets ${CONFIG_DIR} tests .gitignore\n  git commit -m\"initial commit\"\n\n  echo\n  echo \"Setup directory $SETUPDIR initialized.\"\n  if [[ -n \"$TERRAFORM\" ]]; then\n    (cd $SETUPDIR/terraform/vpc && terraform init)\n    (cd $SETUPDIR/terraform/data-storage && terraform init)\n    (cd $SETUPDIR/terraform/services && terraform init)\n    echo \"Now go to $SETUPDIR, customize 'terraform/vpc/terraform.tfvars' as needed, then run 'installer.sh terraform'\"\n  else\n    echo \"Now go to $SETUPDIR, customize '${CONFIG_FILE}', '${CONFIG_FILE}.secrets' and '${CONFIG_DIR}' as needed, then run 'installer.sh deploy'\"\n  fi\n  ;;\n\nterraform)\n  logfile=terraform-$(date -Iseconds).log\n  (cd terraform/vpc && terraform apply -auto-approve) 2>&1 | tee -a $logfile\n  (cd terraform/data-storage && terraform apply -auto-approve) 2>&1 | tee -a $logfile\n  (cd terraform/services && \\\n    terraform apply -auto-approve) 2>&1 | \\\n    grep -v letsencrypt_iam_secret_access_key | \\\n    grep -v database_password | \\\n    tee -a $logfile\n  (cd terraform/services && \\\n    echo -n 'letsencrypt_iam_secret_access_key = ' && \\\n    terraform output letsencrypt_iam_secret_access_key && \\\n    echo -n 'database_password = ' && \\\n    terraform output database_password 2>/dev/null || echo '<not set>'\n  ) 2>&1 | tee -a $logfile\n  ;;\n\nterraform-destroy)\n  logfile=terraform-$(date -Iseconds).log\n  (cd terraform/services && terraform destroy) 2>&1 | tee -a $logfile\n  (cd terraform/data-storage && terraform destroy) 2>&1 | tee -a $logfile\n  (cd terraform/vpc && terraform destroy) 2>&1 | tee -a $logfile\n  ;;\n\ngenerate-tokens)\n  for i in BLOB_SIGNING_KEY MANAGEMENT_TOKEN SYSTEM_ROOT_TOKEN ANONYMOUS_USER_TOKEN DATABASE_PASSWORD; do\n    echo ${i}=$(\n      tr -dc A-Za-z0-9 </dev/urandom | head -c 32\n      echo ''\n    )\n  done\n  ;;\n\ndeploy)\n  set +u\n  NODE=$1\n  set -u\n\n  checktools\n\n  loadconfig\n\n  if grep -rni 'fixme' ${CONFIG_FILE} ${CONFIG_FILE}.secrets ${CONFIG_DIR}; then\n    echo\n    echo \"Some parameters still need to be updated.  Please fix them and then re-run deploy.\"\n    exit 1\n  fi\n\n  if [[ -z \"${DATABASE_POSTGRESQL_VERSION:-}\" ]]; then\n    echo\n    echo \"Please configure DATABASE_POSTGRESQL_VERSION in local.params: It should match the version of the PostgreSQL service you're going to use.\"\n    exit 1\n  fi\n\n  if [[ ${SSL_MODE} == \"bring-your-own\" ]]; then\n    if [[ ! -z \"${ROLE2NODES['balancer']:-}\" ]]; then\n      checkcert balancer\n    fi\n    if [[ ! -z \"${ROLE2NODES['controller']:-}\" ]]; then\n      checkcert controller\n    fi\n    if [[ ! -z \"${ROLE2NODES['keepproxy']:-}\" ]]; then\n      checkcert keepproxy\n    fi\n    if [[ ! -z \"${ROLE2NODES['keepweb']:-}\" ]]; then\n      checkcert collections\n      checkcert download\n    fi\n    if [[ ! -z \"${ROLE2NODES['monitoring']:-}\" ]]; then\n      checkcert grafana\n      checkcert prometheus\n    fi\n    if [[ ! -z \"${ROLE2NODES['webshell']:-}\" ]]; then\n      checkcert webshell\n    fi\n    if [[ ! -z \"${ROLE2NODES['websocket']:-}\" ]]; then\n      checkcert websocket\n    fi\n    if [[ ! -z \"${ROLE2NODES['workbench']:-}\" ]]; then\n      checkcert workbench\n    fi\n    if [[ ! -z \"${ROLE2NODES['workbench2']:-}\" ]]; then\n      checkcert workbench2\n    fi\n  fi\n\n  BRANCH=$(git rev-parse --abbrev-ref HEAD)\n\n  set -x\n\n  git add -A\n  if ! git diff --cached --exit-code --quiet; then\n    git commit -m\"prepare for deploy\"\n  fi\n\n  # Used for rolling updates to disable individual nodes at the\n  # load balancer.\n  export DISABLED_CONTROLLER=\"\"\n  if [[ -z \"$NODE\" ]]; then\n    for NODE in \"${!NODES[@]}\"; do\n      # First, just confirm we can ssh to each node.\n      $(ssh_cmd \"$NODE\") $DEPLOY_USER@$NODE true\n    done\n\n    for NODE in \"${!NODES[@]}\"; do\n      # Do 'database' role first,\n      if [[ \"${NODES[$NODE]}\" =~ database ]]; then\n        deploynode $NODE \"${NODES[$NODE]}\" $BRANCH\n        unset NODES[$NODE]\n      fi\n    done\n\n    BALANCER=${ROLE2NODES['balancer']:-}\n\n    # Check if there are multiple controllers, they'll be comma-separated\n    # in ROLE2NODES\n    if [[ ${ROLE2NODES['controller']:-} =~ , ]]; then\n      # If we have multiple controllers then there must be\n      # load balancer. We want to do a rolling update, take\n      # down each node at the load balancer before updating\n      # it.\n\n      for NODE in \"${!NODES[@]}\"; do\n        if [[ \"${NODES[$NODE]}\" =~ controller ]]; then\n          export DISABLED_CONTROLLER=$NODE\n\n          # Update balancer that the node is disabled\n          deploynode $BALANCER \"${NODES[$BALANCER]}\" $BRANCH\n\n          # Now update the node itself\n          deploynode $NODE \"${NODES[$NODE]}\" $BRANCH\n          unset NODES[$NODE]\n        fi\n      done\n    else\n      # Only one controller, check if it wasn't already taken care of.\n      NODE=${ROLE2NODES['controller']:-}\n      if [[ -n \"${NODE}\" && ! -z \"${NODES[$NODE]:-}\" ]]; then\n        deploynode $NODE \"${NODES[$NODE]}\" $BRANCH\n        unset NODES[$NODE]\n      fi\n    fi\n\n    if [[ -n \"$BALANCER\" ]]; then\n      # Deploy balancer. In the rolling update case, this\n      # will re-enable all the controllers at the balancer.\n      export DISABLED_CONTROLLER=\"\"\n      deploynode $BALANCER \"${NODES[$BALANCER]}\" $BRANCH\n      unset NODES[$BALANCER]\n    fi\n\n    for NODE in \"${!NODES[@]}\"; do\n      # Everything else (we removed the nodes that we\n      # already deployed from the list)\n      deploynode $NODE \"${NODES[$NODE]}\" $BRANCH\n    done\n  else\n    # Just deploy the node that was supplied on the command line.\n    deploynode $NODE \"${NODES[$NODE]}\" $BRANCH\n  fi\n\n  set +x\n  echo\n  echo \"Completed deploy, run 'installer.sh diagnostics' to verify the install\"\n\n  ;;\n\ndiagnostics)\n  loadconfig\n\n  set +u\n  declare LOCATION=$1\n  set -u\n\n  if ! which arvados-client; then\n    echo \"arvados-client not found, install 'arvados-client' package with 'apt-get' or 'yum'\"\n    exit 1\n  fi\n\n  if [[ -z \"$LOCATION\" ]]; then\n    echo \"Need to provide '-internal-client' or '-external-client'\"\n    echo\n    echo \"-internal-client    You are running this on the same private network as the Arvados cluster (e.g. on one of the Arvados nodes)\"\n    echo \"-external-client    You are running this outside the private network of the Arvados cluster (e.g. your workstation)\"\n    exit 1\n  fi\n\n  export ARVADOS_API_HOST=\"${DOMAIN}:${CONTROLLER_EXT_SSL_PORT}\"\n  export ARVADOS_API_TOKEN=\"$SYSTEM_ROOT_TOKEN\"\n\n  arvados-client diagnostics $LOCATION\n  ;;\n\ndiagnostics-internal)\n  loadconfig\n  set -u\n\n  if [ -z \"${ROLE2NODES['shell']:-}\" ]; then\n    echo \"No node with 'shell' role was found, cannot run diagnostics-internal\"\n    exit 1\n  fi\n\n  # Pick the first shell node for test running\n  declare TESTNODE=$(echo ${ROLE2NODES['shell']} | cut -d\\, -f1)\n  declare SSH=$(ssh_cmd \"$TESTNODE\")\n\n  # Run diagnostics\n  echo \"Running diagnostics in $TESTNODE...\"\n  $SSH $DEPLOY_USER@$TESTNODE bash <<EOF\n  export ARVADOS_API_HOST=\"${DOMAIN}:${CONTROLLER_EXT_SSL_PORT}\"\n  export ARVADOS_API_TOKEN=\"$SYSTEM_ROOT_TOKEN\"\n  sudo --preserve-env=ARVADOS_API_HOST,ARVADOS_API_TOKEN arvados-client diagnostics -internal-client\nEOF\n\n  ;;\n\n*)\n  echo \"Arvados installer\"\n  echo \"\"\n  echo \"initialize             initialize the setup directory for configuration\"\n  echo \"terraform              create cloud resources using terraform\"\n  echo \"terraform-destroy      destroy cloud resources created by terraform\"\n  echo \"generate-tokens        generate random values for tokens\"\n  echo \"deploy                 deploy the configuration from the setup directory\"\n  echo \"diagnostics            check your install running diagnostics locally\"\n  echo \"diagnostics-internal   check your install running diagnostics on a shell node\"\n  ;;\nesac\n"
  },
  {
    "path": "tools/salt-install/local.params.example.multiple_hosts",
    "content": "##########################################################\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# These are the basic parameters to configure the installation\n\n# The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters.\nCLUSTER=\"cluster_fixme_or_this_wont_work\"\n\n# The domain name you want to give to your cluster's hosts;\n# the end result hostnames will be $SERVICE.$DOMAIN\nDOMAIN=\"domain_fixme_or_this_wont_work\"\n\n# For multi-node installs, the ssh log in for each node\n# must be root or able to sudo\nDEPLOY_USER=admin\n\nINITIAL_USER=admin\n\n# If not specified, the initial user email will be composed as\n# INITIAL_USER@DOMAIN\nINITIAL_USER_EMAIL=\"admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work\"\n\n# Use a public node as a jump host for SSH sessions. This allows running the\n# installer from the outside of the cluster's local network and still reach\n# the internal servers for configuration deployment.\n# Comment out to disable.\nUSE_SSH_JUMPHOST=\"controller.${DOMAIN}\"\n\nAWS_REGION=\"fixme_or_this_wont_work\"\n\n# SSL CERTIFICATES\n# Arvados requires SSL certificates to work correctly. This installer supports these options:\n# * self-signed: let the installer create self-signed certificate(s)\n# * bring-your-own: supply your own certificate(s) in the `certs` directory\n# * lets-encrypt: automatically obtain and install SSL certificates for your hostname(s)\n#\n# See https://doc.arvados.org/intall/salt-multi-host.html for more information.\nSSL_MODE=\"lets-encrypt\"\nUSE_LETSENCRYPT_ROUTE53=\"yes\"\n# For collections, we need to obtain a wildcard certificate for\n# '*.collections.<cluster>.<domain>'. This is only possible through a DNS-01 challenge.\n# For that reason, you'll need to provide AWS credentials with permissions to manage\n# RRs in the route53 zone for the cluster.\n# WARNING!: If AWS credentials files already exist in the hosts, they won't be replaced.\nLE_AWS_REGION=\"${AWS_REGION}\"\n\n# Compute node configurations\nCOMPUTE_AMI=\"ami_id_fixme_or_this_wont_work\"\nCOMPUTE_SG=\"security_group_fixme_or_this_wont_work\"\nCOMPUTE_SUBNET=\"subnet_fixme_or_this_wont_work\"\nCOMPUTE_AWS_REGION=\"${AWS_REGION}\"\nCOMPUTE_USER=\"${DEPLOY_USER}\"\n\n# Keep S3 backend settings\nKEEP_AWS_REGION=\"${AWS_REGION}\"\nKEEP_AWS_S3_BUCKET=\"${CLUSTER}-nyw5e-000000000000000-volume\"\nKEEP_AWS_IAM_ROLE=\"${CLUSTER}-keepstore-00-iam-role\"\n\n# If you going to provide your own certificates for Arvados, the provision script can\n# help you deploy them. In order to do that, you need to set `SSL_MODE=bring-your-own` above,\n# and copy the required certificates under the directory specified in the next line.\n# The certs will be copied from this directory by the provision script.\n# Please set it to the FULL PATH to the certs dir if you're going to use a different dir\n# Default is \"${SCRIPT_DIR}/certs\", where the variable \"SCRIPT_DIR\" has the path to the\n# directory where the  \"provision.sh\" script was copied in the destination host.\n# CUSTOM_CERTS_DIR=\"${SCRIPT_DIR}/local_config_dir/certs\"\n# The script expects cert/key files with these basenames (matching the role except for\n# keepweb, which is split in both download/collections):\n#  \"controller\"\n#  \"websocket\"\n#  \"workbench\"\n#  \"workbench2\"\n#  \"webshell\"\n#  \"download\"         # Part of keepweb\n#  \"collections\"      # Part of keepweb\n#  \"keepproxy\"        # Keepproxy\n#  \"prometheus\"\n#  \"grafana\"\n# Ie., 'keep', the script will lookup for\n# ${CUSTOM_CERTS_DIR}/keepproxy.crt\n# ${CUSTOM_CERTS_DIR}/keepproxy.key\n\n# Set the following to \"yes\" if the key files are encrypted and optionally set\n# a custom AWS secret name for each node to retrieve the password.\nSSL_KEY_ENCRYPTED=\"no\"\nSSL_KEY_AWS_SECRET_NAME=\"${CLUSTER}-arvados-ssl-privkey-password\"\nSSL_KEY_AWS_REGION=\"${AWS_REGION}\"\n\n# Customize Prometheus, Grafana and Loki web UI access credentials\nMONITORING_USERNAME=${INITIAL_USER}\nMONITORING_EMAIL=${INITIAL_USER_EMAIL}\n\n# Sets the directory for Grafana dashboards\n# GRAFANA_DASHBOARDS_DIR=\"${SCRIPT_DIR}/local_config_dir/dashboards\"\n\n# Grafana's Alertmanager configuration.\n# Set GRAFANA_SMTP_SERVER as \"host:port\" value to enable alerting via SMTP.\n# GRAFANA_SMTP_SERVER=\n# GRAFANA_SMTP_FROM_EMAIL=grafana@example.com\n# GRAFANA_SMTP_FROM_NAME=\"Grafana\"\n\n# Sets the amount of data (expressed in time) Prometheus keeps on its\n# time-series database. Default is 15 days.\n# PROMETHEUS_DATA_RETENTION_TIME=\"180d\"\n\n# Loki S3 storage settings\nLOKI_AWS_S3_BUCKET=\"${CLUSTER}-loki-object-storage\"\nLOKI_LOG_RETENTION_TIME=\"180d\"\nLOKI_AWS_REGION=\"${AWS_REGION}\"\n\n# The mapping of nodes to roles\n# installer.sh will log in to each of these nodes and then provision\n# it for the specified roles.\nNODES=(\n  [controller.${DOMAIN}]=database,controller\n  [workbench.${DOMAIN}]=monitoring,workbench,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance\n  [keep0.${DOMAIN}]=keepstore\n  [shell.${DOMAIN}]=shell\n)\n\n# Host SSL port where you want to point your browser to access Arvados\n# Defaults to 443 for regular runs, and to 8443 when called in Vagrant.\n# You can point it to another port if desired\n# In Vagrant, make sure it matches what you set in the Vagrantfile (8443)\nCONTROLLER_EXT_SSL_PORT=443\nKEEP_EXT_SSL_PORT=443\n# Both for collections and downloads\nKEEPWEB_EXT_SSL_PORT=443\nWEBSHELL_EXT_SSL_PORT=443\nWEBSOCKET_EXT_SSL_PORT=443\nWORKBENCH1_EXT_SSL_PORT=443\nWORKBENCH2_EXT_SSL_PORT=443\n\n# Internal IPs for the configuration\nCLUSTER_INT_CIDR=10.1.0.0/16\n\n# Note the IPs in this example are shared between roles, as suggested in\n# https://doc.arvados.org/main/install/salt-multi-host.html\nCONTROLLER_INT_IP=10.1.1.11\nDATABASE_INT_IP=${CONTROLLER_INT_IP}\nWORKBENCH1_INT_IP=10.1.1.15\nDISPATCHER_INT_IP=${WORKBENCH1_INT_IP}\nKEEPBALANCE_INT_IP=${WORKBENCH1_INT_IP}\nWEBSOCKET_INT_IP=${WORKBENCH1_INT_IP}\n# Both for collections and downloads\nKEEPWEB_INT_IP=${WORKBENCH1_INT_IP}\nWORKBENCH2_INT_IP=${WORKBENCH1_INT_IP}\nWEBSHELL_INT_IP=${WORKBENCH1_INT_IP}\nKEEP_INT_IP=${WORKBENCH1_INT_IP}\nKEEPSTORE0_INT_IP=10.1.2.13\nSHELL_INT_IP=10.1.2.17\n\nDATABASE_NAME=\"${CLUSTER}_arvados\"\nDATABASE_USER=\"${CLUSTER}_arvados\"\n# Set these if using an external PostgreSQL service.\n#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=\n#DATABASE_POSTGRESQL_VERSION=\n\n# Performance tuning parameters.  If these are not set, workers\n# defaults on the number of cpus, queued requests defaults to 128\n# and gateway tunnels defaults to 1000.\n#CONTROLLER_MAX_WORKERS=\n#CONTROLLER_MAX_QUEUED_REQUESTS=\n#CONTROLLER_MAX_GATEWAY_TUNNELS=\n\n# The directory to check for the config files (pillars, states) you want to use.\n# There are a few examples under 'config_examples'.\n# CONFIG_DIR=\"local_config_dir\"\n\n# Extra states to apply. If you use your own subdir, change this value accordingly\n# EXTRA_STATES_DIR=\"${CONFIG_DIR}/states\"\n\n# These are ARVADOS-related settings.\n# Which release of Arvados repo you want to use\nRELEASE=\"production\"\n# Which version of Arvados you want to install. Defaults to latest stable\n# VERSION=\"2.1.2-1\"\n\n# This is an arvados-formula setting.\n# If branch is set, the script will switch to it before running salt\n# Usually not needed, only used for testing\n# BRANCH=\"main\"\n\n##########################################################\n# Usually there's no need to modify things below this line\n\n# Formulas versions\n# ARVADOS_TAG=\"2.2.0\"\n# POSTGRES_TAG=\"v0.44.0\"\n# NGINX_TAG=\"v2.8.1\"\n# DOCKER_TAG=\"v2.4.2\"\n# LOCALE_TAG=\"v0.3.4\"\n# LETSENCRYPT_TAG=\"v2.1.0\"\n# PROMETHEUS_TAG=\"v5.6.5\"\n# GRAFANA_TAG=\"v3.1.3\"\n"
  },
  {
    "path": "tools/salt-install/local.params.example.single_host_multiple_hostnames",
    "content": "##########################################################\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# These are the basic parameters to configure the installation\n\n# The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters.\nCLUSTER=\"cluster_fixme_or_this_wont_work\"\n\n# The domainname you want tou give to your cluster's hosts\nDOMAIN=\"domain_fixme_or_this_wont_work\"\n\n# For multi-node installs, the ssh log in for each node\n# must be root or able to sudo\nDEPLOY_USER=admin\n\nINITIAL_USER=admin\n\n# If not specified, the initial user email will be composed as\n# INITIAL_USER@CLUSTER.DOMAIN\nINITIAL_USER_EMAIL=\"admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work\"\n\n# SSL CERTIFICATES\n# Arvados requires SSL certificates to work correctly. This installer supports these options:\n# * self-signed: let the installer create self-signed certificate(s)\n# * bring-your-own: supply your own certificate(s) in the `certs` directory\n# * lets-encrypt: automatically obtain and install SSL certificates for your hostname(s)\n#\n# See https://doc.arvados.org/intall/salt-single-host.html#certificates for more information.\nSSL_MODE=\"self-signed\"\n\n# CUSTOM_CERTS_DIR is only used when SSL_MODE is set to \"bring-your-own\".\n# See https://doc.arvados.org/intall/salt-single-host.html#bring-your-own for more information.\n# CUSTOM_CERTS_DIR=\"${SCRIPT_DIR}/local_config_dir/certs\"\n\n# Set the following to \"yes\" if the key files are encrypted and optionally set\n# a custom AWS secret name for each node to retrieve the password.\nSSL_KEY_ENCRYPTED=\"no\"\nSSL_KEY_AWS_SECRET_NAME=\"${CLUSTER}-arvados-ssl-privkey-password\"\n\n# Customize Prometheus & Grafana web UI access credentials\nMONITORING_USERNAME=${INITIAL_USER}\nMONITORING_PASSWORD=${INITIAL_USER_PASSWORD}\nMONITORING_EMAIL=${INITIAL_USER_EMAIL}\n# Sets the directory for Grafana dashboards\n# GRAFANA_DASHBOARDS_DIR=\"${SCRIPT_DIR}/local_config_dir/dashboards\"\n\n# The mapping of nodes to roles\n# installer.sh will log in to each of these nodes and then provision\n# it for the specified roles.\nNODES=(\n  [localhost]='database,controller,monitoring,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance,keepstore'\n)\n\n# External ports used by the Arvados services\nCONTROLLER_EXT_SSL_PORT=443\nKEEP_EXT_SSL_PORT=25101\nKEEPWEB_EXT_SSL_PORT=9002\nWEBSHELL_EXT_SSL_PORT=4202\nWEBSOCKET_EXT_SSL_PORT=8002\nWORKBENCH1_EXT_SSL_PORT=443\nWORKBENCH2_EXT_SSL_PORT=3001\n\nCLUSTER_INT_CIDR=\"\"\nCONTROLLER_INT_IP=\"\"\nDATABASE_INT_IP=\"\"\nWORKBENCH1_INT_IP=\"\"\nDISPATCHER_INT_IP=\"\"\nKEEPBALANCE_INT_IP=\"\"\nWEBSOCKET_INT_IP=\"\"\nKEEPWEB_INT_IP=\"\"\nWORKBENCH2_INT_IP=\"\"\nWEBSHELL_INT_IP=\"\"\nKEEP_INT_IP=\"\"\nKEEPSTORE0_INT_IP=\"\"\nSHELL_INT_IP=\"\"\n\nDATABASE_NAME=\"${CLUSTER}_arvados\"\nDATABASE_USER=\"${CLUSTER}_arvados\"\n# Set these if using an external PostgreSQL service.\n#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=\n#DATABASE_POSTGRESQL_VERSION=\n\n# The directory to check for the config files (pillars, states) you want to use.\n# There are a few examples under 'config_examples'.\n# CONFIG_DIR=\"local_config_dir\"\n\n# Extra states to apply. If you use your own subdir, change this value accordingly\n# EXTRA_STATES_DIR=\"${CONFIG_DIR}/states\"\n\n# These are ARVADOS-related settings.\n# Which release of Arvados repo you want to use\nRELEASE=\"production\"\n# Which version of Arvados you want to install. Defaults to latest stable\n# VERSION=\"2.1.2-1\"\n\n# This is an arvados-formula setting.\n# If branch is set, the script will switch to it before running salt\n# Usually not needed, only used for testing\n# BRANCH=\"main\"\n\n##########################################################\n# Usually there's no need to modify things below this line\n\n# Formulas versions\n# ARVADOS_TAG=\"2.2.0\"\n# POSTGRES_TAG=\"v0.44.0\"\n# NGINX_TAG=\"v2.8.1\"\n# DOCKER_TAG=\"v2.4.2\"\n# LOCALE_TAG=\"v0.3.4\"\n# LETSENCRYPT_TAG=\"v2.1.0\"\n# PROMETHEUS_TAG=\"v5.6.5\"\n# GRAFANA_TAG=\"v3.1.3\"\n"
  },
  {
    "path": "tools/salt-install/local.params.example.single_host_single_hostname",
    "content": "##########################################################\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# These are the basic parameters to configure the installation\n\n# The Arvados cluster ID, needs to be 5 lowercase alphanumeric characters.\nCLUSTER=\"cluster_fixme_or_this_wont_work\"\n\n# The domainname for your cluster's hosts\nDOMAIN=\"domain_fixme_or_this_wont_work\"\n\n# For multi-node installs, the ssh log in for each node\n# must be root or able to sudo\nDEPLOY_USER=admin\n\nINITIAL_USER=admin\n\n# If not specified, the initial user email will be composed as\n# INITIAL_USER@CLUSTER.DOMAIN\nINITIAL_USER_EMAIL=\"admin@cluster_fixme_or_this_wont_work.domain_fixme_or_this_wont_work\"\n\n# SSL CERTIFICATES\n# Arvados requires SSL certificates to work correctly. This installer supports these options:\n# * self-signed: let the installer create self-signed certificate(s)\n# * bring-your-own: supply your own certificate(s) in the `certs` directory\n# * lets-encrypt: automatically obtain and install SSL certificates for your hostname(s)\n#\n# See https://doc.arvados.org/intall/salt-single-host.html#certificates for more information.\nSSL_MODE=\"self-signed\"\n\n# CUSTOM_CERTS_DIR is only used when SSL_MODE is set to \"bring-your-own\".\n# See https://doc.arvados.org/intall/salt-single-host.html#bring-your-own for more information.\n# CUSTOM_CERTS_DIR=\"${SCRIPT_DIR}/local_config_dir/certs\"\n\n# Set the following to \"yes\" if the key files are encrypted and optionally set\n# a custom AWS secret name for each node to retrieve the password.\nSSL_KEY_ENCRYPTED=\"no\"\nSSL_KEY_AWS_SECRET_NAME=\"${CLUSTER}-arvados-ssl-privkey-password\"\n\n# Customize Prometheus & Grafana web UI access credentials\nMONITORING_USERNAME=${INITIAL_USER}\nMONITORING_PASSWORD=${INITIAL_USER_PASSWORD}\nMONITORING_EMAIL=${INITIAL_USER_EMAIL}\n# Sets the directory for Grafana dashboards\n# GRAFANA_DASHBOARDS_DIR=\"${SCRIPT_DIR}/local_config_dir/dashboards\"\n\n# The mapping of nodes to roles\n# installer.sh will log in to each of these nodes and then provision\n# it for the specified roles.\nNODES=(\n  [localhost]='database,controller,monitoring,workbench2,webshell,keepproxy,keepweb,websocket,dispatcher,keepbalance,keepstore'\n)\n\n# HOSTNAME_EXT must be set to the address that users will use to\n# connect to the instance (e.g. what they will type into the URL bar\n# of the browser to get to workbench).  If you haven't given the\n# instance a working DNS name, you might need to use an IP address\n# here.\nHOSTNAME_EXT=\"hostname_ext_fixme_or_this_wont_work\"\n\n# The internal IP address for the host.\nIP_INT=\"ip_int_fixme_or_this_wont_work\"\n\n# External ports used by the Arvados services\nCONTROLLER_EXT_SSL_PORT=8800\nKEEP_EXT_SSL_PORT=8801\nKEEPWEB_EXT_SSL_PORT=8802\nWEBSHELL_EXT_SSL_PORT=8803\nWEBSOCKET_EXT_SSL_PORT=8804\nWORKBENCH1_EXT_SSL_PORT=8805\nWORKBENCH2_EXT_SSL_PORT=443\n\nCLUSTER_INT_CIDR=\"\"\nCONTROLLER_INT_IP=\"\"\nDATABASE_INT_IP=\"\"\nWORKBENCH1_INT_IP=\"\"\nDISPATCHER_INT_IP=\"\"\nKEEPBALANCE_INT_IP=\"\"\nWEBSOCKET_INT_IP=\"\"\nKEEPWEB_INT_IP=\"\"\nWORKBENCH2_INT_IP=\"\"\nWEBSHELL_INT_IP=\"\"\nKEEP_INT_IP=\"\"\nKEEPSTORE0_INT_IP=\"\"\nSHELL_INT_IP=\"\"\n\nDATABASE_NAME=\"${CLUSTER}_arvados\"\nDATABASE_USER=\"${CLUSTER}_arvados\"\n# Set these if using an external PostgreSQL service.\n#DATABASE_EXTERNAL_SERVICE_HOST_OR_IP=\n#DATABASE_POSTGRESQL_VERSION=\n\n# The directory to check for the config files (pillars, states) you want to use.\n# There are a few examples under 'config_examples'.\n# CONFIG_DIR=\"local_config_dir\"\n\n# Extra states to apply. If you use your own subdir, change this value accordingly\n# EXTRA_STATES_DIR=\"${CONFIG_DIR}/states\"\n\n# These are ARVADOS-related settings.\n# Which release of Arvados repo you want to use\nRELEASE=\"production\"\n# Which version of Arvados you want to install. Defaults to latest stable\n# VERSION=\"2.1.2-1\"\n\n# This is an arvados-formula setting.\n# If branch is set, the script will switch to it before running salt\n# Usually not needed, only used for testing\n# BRANCH=\"main\"\n\n##########################################################\n# Usually there's no need to modify things below this line\n\n# Formulas versions\n# ARVADOS_TAG=\"2.2.0\"\n# POSTGRES_TAG=\"v0.44.0\"\n# NGINX_TAG=\"v2.8.1\"\n# DOCKER_TAG=\"v2.4.2\"\n# LOCALE_TAG=\"v0.3.4\"\n# LETSENCRYPT_TAG=\"v2.1.0\"\n# PROMETHEUS_TAG=\"v5.6.5\"\n# GRAFANA_TAG=\"v3.1.3\"\n"
  },
  {
    "path": "tools/salt-install/local.params.secrets.example",
    "content": "##########################################################\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# These are the security-sensitive parameters to configure the installation\n\nINITIAL_USER_PASSWORD=\"fixme\"\nMONITORING_PASSWORD=${INITIAL_USER_PASSWORD}\n\n# YOU SHOULD CHANGE THESE TO SOME RANDOM STRINGS\nBLOB_SIGNING_KEY=fixmeblobsigningkeymushaveatleast32characters\nMANAGEMENT_TOKEN=fixmemanagementtokenmushaveatleast32characters\nSYSTEM_ROOT_TOKEN=fixmesystemroottokenmushaveatleast32characters\nANONYMOUS_USER_TOKEN=fixmeanonymoususertokenmushaveatleast32characters\nDATABASE_PASSWORD=fixmeplease_set_this_to_some_secure_value\n\nLE_AWS_ACCESS_KEY_ID=\"FIXME\"\nLE_AWS_SECRET_ACCESS_KEY=\"fixme\"\n\n# SMTP server credentials for Grafana's Alertmanager\n# GRAFANA_SMTP_USER=\n# GRAFANA_SMTP_PASSWORD=\n\n# Read https://doc.arvados.org/install/crunch2-cloud/install-compute-node.html#sshkeypair\n# for details on how to create this key.\nDISPATCHER_SSH_PRIVKEY=\"fixme\"\n\n"
  },
  {
    "path": "tools/salt-install/provision.sh",
    "content": "#!/bin/bash\n\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# If you want to test arvados in a single host, you can run this script, which\n# will install it using salt masterless\n# This script is run by the Vagrant file when you run it with\n#\n# vagrant up\n\nset -eu\nset -o pipefail\n\n# capture the directory that the script is running from\nSCRIPT_DIR=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\n\nusage() {\n  echo >&2\n  echo >&2 \"Usage: ${0} [-h] [-h]\"\n  echo >&2\n  echo >&2 \"${0} options:\"\n  echo >&2 \"  -d, --debug                                 Run salt installation in debug mode\"\n  echo >&2 \"  -c <local.params>, --config <local.params>  Path to the local.params config file\"\n  echo >&2 \"  -t, --test                                  Test cluster by running \\`arvados-client diagnostics\\`\"\n  echo >&2 \"                                              and a simple workflow\"\n  echo >&2 \"  -r, --roles                                 List of Arvados roles to apply to the host, comma separated\"\n  echo >&2 \"                                              Possible values are:\"\n  echo >&2 \"                                                balancer\"\n  echo >&2 \"                                                controller\"\n  echo >&2 \"                                                dispatcher\"\n  echo >&2 \"                                                keepproxy\"\n  echo >&2 \"                                                keepbalance\"\n  echo >&2 \"                                                keepstore\"\n  echo >&2 \"                                                keepweb\"\n  echo >&2 \"                                                monitoring\"\n  echo >&2 \"                                                shell\"\n  echo >&2 \"                                                webshell\"\n  echo >&2 \"                                                websocket\"\n  echo >&2 \"                                                workbench\"\n  echo >&2 \"                                                workbench2\"\n  echo >&2 \"                                              Defaults to applying them all\"\n  echo >&2 \"  -h, --help                                  Display this help and exit\"\n  echo >&2 \"  --dump-config <dest_dir>                    Dumps the pillars and states to a directory\"\n  echo >&2 \"                                              This parameter does not perform any installation at all. It's\"\n  echo >&2 \"                                              intended to give you a parsed set of configuration files so\"\n  echo >&2 \"                                              you can inspect them or use them in you Saltstack infrastructure.\"\n  echo >&2 \"                                              It\"\n  echo >&2 \"                                                - parses the pillar and states templates,\"\n  echo >&2 \"                                                - downloads the helper formulas with their desired versions,\"\n  echo >&2 \"                                                - prepares the 'top.sls' files both for pillars and states\"\n  echo >&2 \"                                                  for the selected role(s)\"\n  echo >&2 \"                                                - writes the resulting files into <dest_dir>\"\n  echo >&2 \"  -v, --vagrant                               Run in vagrant and use the /vagrant shared dir\"\n  echo >&2 \"  --development                               Run in dev mode, using snakeoil certs\"\n  echo >&2\n}\n\narguments() {\n  # NOTE: This requires GNU getopt (part of the util-linux package on Debian-based distros).\n  if ! which getopt > /dev/null; then\n    echo >&2 \"GNU getopt is required to run this script. Please install it and re-reun it\"\n    exit 1\n  fi\n\n  TEMP=$(getopt -o c:dhp:r:tv \\\n    --long config:,debug,development,dump-config:,help,roles:,test,vagrant \\\n    -n \"${0}\" -- \"${@}\")\n\n  if [ ${?} != 0 ];\n    then echo \"Please check the parameters you entered and re-run again\"\n    exit 1\n  fi\n  # Note the quotes around `$TEMP': they are essential!\n  eval set -- \"$TEMP\"\n\n  while [ ${#} -ge 1 ]; do\n    case ${1} in\n      -c | --config)\n        CONFIG_FILE=${2}\n        shift 2\n        ;;\n      -d | --debug)\n        LOG_LEVEL=\"debug\"\n        shift\n        set -x\n        ;;\n      --dump-config)\n        if [[ ${2} = /* ]]; then\n          DUMP_SALT_CONFIG_DIR=${2}\n        else\n          DUMP_SALT_CONFIG_DIR=${PWD}/${2}\n        fi\n        ## states\n        S_DIR=\"${DUMP_SALT_CONFIG_DIR}/salt\"\n        ## formulas\n        F_DIR=\"${DUMP_SALT_CONFIG_DIR}/formulas\"\n        ## pillars\n        P_DIR=\"${DUMP_SALT_CONFIG_DIR}/pillars\"\n        ## tests\n        T_DIR=\"${DUMP_SALT_CONFIG_DIR}/tests\"\n        DUMP_CONFIG=\"yes\"\n        shift 2\n        ;;\n      --development)\n        DEV_MODE=\"yes\"\n        shift 1\n        ;;\n      -r | --roles)\n        for i in ${2//,/ }\n          do\n            # Verify the role exists\n            if [[ ! \"database,balancer,controller,keepstore,websocket,keepweb,workbench2,webshell,keepbalance,keepproxy,shell,workbench,dispatcher,monitoring\" == *\"$i\"* ]]; then\n              echo \"The role '${i}' is not a valid role\"\n              usage\n              exit 1\n            fi\n            ROLES=\"${ROLES:-} ${i}\"\n          done\n          shift 2\n        ;;\n      -t | --test)\n        TEST=\"yes\"\n        shift\n        ;;\n      -v | --vagrant)\n        VAGRANT=\"yes\"\n        shift\n        ;;\n      --)\n        shift\n        break\n        ;;\n      *)\n        usage\n        exit 1\n        ;;\n    esac\n  done\n}\n\ncopy_custom_cert() {\n  cert_dir=${1}\n  cert_name=${2}\n\n  mkdir -p --mode=0700 /srv/salt/certs\n\n  if [ -f ${cert_dir}/${cert_name}.crt ]; then\n    install --mode=0600 ${cert_dir}/${cert_name}.crt /srv/salt/certs/arvados-${cert_name}.pem\n  else\n    echo \"${cert_dir}/${cert_name}.crt does not exist. Exiting\"\n    exit 1\n  fi\n  if [ -f ${cert_dir}/${cert_name}.key ]; then\n    install --mode=0600 ${cert_dir}/${cert_name}.key /srv/salt/certs/arvados-${cert_name}.key\n  else\n    echo \"${cert_dir}/${cert_name}.key does not exist. Exiting\"\n    exit 1\n  fi\n}\n\napply_var_substitutions() {\n  local SRCFILE=$1\n  local DSTFILE=$2\n  sed \"s#__ANONYMOUS_USER_TOKEN__#${ANONYMOUS_USER_TOKEN}#g;\n       s#__BLOB_SIGNING_KEY__#${BLOB_SIGNING_KEY}#g;\n       s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g;\n       s#__CLUSTER__#${CLUSTER}#g;\n       s#__DOMAIN__#${DOMAIN}#g;\n       s#__HOSTNAME_EXT__#${HOSTNAME_EXT}#g;\n       s#__IP_INT__#${IP_INT}#g;\n       s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g;\n       s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g;\n       s#__INITIAL_USER__#${INITIAL_USER}#g;\n       s#__LE_AWS_REGION__#${LE_AWS_REGION:-}#g;\n       s#__LE_AWS_SECRET_ACCESS_KEY__#${LE_AWS_SECRET_ACCESS_KEY:-}#g;\n       s#__LE_AWS_ACCESS_KEY_ID__#${LE_AWS_ACCESS_KEY_ID:-}#g;\n       s#__DATABASE_NAME__#${DATABASE_NAME}#g;\n       s#__DATABASE_USER__#${DATABASE_USER}#g;\n       s#__DATABASE_PASSWORD__#${DATABASE_PASSWORD}#g;\n       s#__DATABASE_INT_IP__#${DATABASE_INT_IP:-}#g;\n       s#__DATABASE_EXTERNAL_SERVICE_HOST_OR_IP__#${DATABASE_EXTERNAL_SERVICE_HOST_OR_IP:-}#g;\n       s#__DATABASE_POSTGRESQL_VERSION__#${DATABASE_POSTGRESQL_VERSION}#g;\n       s#__KEEPWEB_EXT_SSL_PORT__#${KEEPWEB_EXT_SSL_PORT}#g;\n       s#__KEEP_EXT_SSL_PORT__#${KEEP_EXT_SSL_PORT}#g;\n       s#__MANAGEMENT_TOKEN__#${MANAGEMENT_TOKEN}#g;\n       s#__RELEASE__#${RELEASE}#g;\n       s#__SYSTEM_ROOT_TOKEN__#${SYSTEM_ROOT_TOKEN}#g;\n       s#__VERSION__#${VERSION}#g;\n       s#__WEBSHELL_EXT_SSL_PORT__#${WEBSHELL_EXT_SSL_PORT}#g;\n       s#__WEBSOCKET_EXT_SSL_PORT__#${WEBSOCKET_EXT_SSL_PORT}#g;\n       s#__WORKBENCH1_EXT_SSL_PORT__#${WORKBENCH1_EXT_SSL_PORT}#g;\n       s#__WORKBENCH2_EXT_SSL_PORT__#${WORKBENCH2_EXT_SSL_PORT}#g;\n       s#__CLUSTER_INT_CIDR__#${CLUSTER_INT_CIDR}#g;\n       s#__CONTROLLER_INT_IP__#${CONTROLLER_INT_IP}#g;\n       s#__WEBSOCKET_INT_IP__#${WEBSOCKET_INT_IP}#g;\n       s#__KEEP_INT_IP__#${KEEP_INT_IP}#g;\n       s#__KEEPSTORE0_INT_IP__#${KEEPSTORE0_INT_IP}#g;\n       s#__KEEPWEB_INT_IP__#${KEEPWEB_INT_IP}#g;\n       s#__WEBSHELL_INT_IP__#${WEBSHELL_INT_IP}#g;\n       s#__SHELL_INT_IP__#${SHELL_INT_IP}#g;\n       s#__WORKBENCH1_INT_IP__#${WORKBENCH1_INT_IP}#g;\n       s#__WORKBENCH2_INT_IP__#${WORKBENCH2_INT_IP}#g;\n       s#__SSL_KEY_ENCRYPTED__#${SSL_KEY_ENCRYPTED}#g;\n       s#__SSL_KEY_AWS_REGION__#${SSL_KEY_AWS_REGION:-}#g;\n       s#__SSL_KEY_AWS_SECRET_NAME__#${SSL_KEY_AWS_SECRET_NAME}#g;\n       s#__CONTROLLER_MAX_WORKERS__#${CONTROLLER_MAX_WORKERS:-}#g;\n       s#__CONTROLLER_MAX_QUEUED_REQUESTS__#${CONTROLLER_MAX_QUEUED_REQUESTS:-128}#g;\n       s#__CONTROLLER_MAX_GATEWAY_TUNNELS__#${CONTROLLER_MAX_GATEWAY_TUNNELS:-1000}#g;\n       s#__MONITORING_USERNAME__#${MONITORING_USERNAME}#g;\n       s#__MONITORING_EMAIL__#${MONITORING_EMAIL}#g;\n       s#__MONITORING_PASSWORD__#${MONITORING_PASSWORD}#g;\n       s#__GRAFANA_SMTP_SERVER__#${GRAFANA_SMTP_SERVER:-}#g;\n       s#__GRAFANA_SMTP_USER__#${GRAFANA_SMTP_USER:-}#g;\n       s#__GRAFANA_SMTP_PASSWORD__#${GRAFANA_SMTP_PASSWORD:-}#g;\n       s#__GRAFANA_SMTP_FROM_EMAIL__#${GRAFANA_SMTP_FROM_EMAIL:-}#g;\n       s#__GRAFANA_SMTP_FROM_NAME__#${GRAFANA_SMTP_FROM_NAME:-}#g;\n       s#__DISPATCHER_SSH_PRIVKEY__#${DISPATCHER_SSH_PRIVKEY//$'\\n'/\\\\n}#g;\n       s#__ENABLE_BALANCER__#${ENABLE_BALANCER}#g;\n       s#__DISABLED_CONTROLLER__#${DISABLED_CONTROLLER}#g;\n       s#__BALANCER_NODENAME__#${ROLE2NODES['balancer']:-}#g;\n       s#__PROMETHEUS_NODENAME__#${ROLE2NODES['monitoring']:-}#g;\n       s#__PROMETHEUS_DATA_RETENTION_TIME__#${PROMETHEUS_DATA_RETENTION_TIME:-15d}#g;\n       s#__LOKI_AWS_S3_BUCKET__#${LOKI_AWS_S3_BUCKET:-}#g;\n       s#__LOKI_LOG_RETENTION_TIME__#${LOKI_LOG_RETENTION_TIME:-15d}#g;\n       s#__LOKI_AWS_REGION__#${LOKI_AWS_REGION:-}#g;\n       s#__CONTROLLER_NODES__#${ROLE2NODES['controller']:-}#g;\n       s#__NODELIST__#${NODELIST}#g;\n       s#__DISPATCHER_INT_IP__#${DISPATCHER_INT_IP}#g;\n       s#__KEEPBALANCE_INT_IP__#${KEEPBALANCE_INT_IP}#g;\n       s#__COMPUTE_AMI__#${COMPUTE_AMI:-}#g;\n       s#__COMPUTE_SG__#${COMPUTE_SG:-}#g;\n       s#__COMPUTE_SUBNET__#${COMPUTE_SUBNET:-}#g;\n       s#__COMPUTE_AWS_REGION__#${COMPUTE_AWS_REGION:-}#g;\n       s#__COMPUTE_USER__#${COMPUTE_USER:-}#g;\n       s#__KEEP_AWS_S3_BUCKET__#${KEEP_AWS_S3_BUCKET:-}#g;\n       s#__KEEP_AWS_IAM_ROLE__#${KEEP_AWS_IAM_ROLE:-}#g;\n       s#__KEEP_AWS_REGION__#${KEEP_AWS_REGION:-}#g\" \\\n  \"${SRCFILE}\" > \"${DSTFILE}\"\n}\n\nDEV_MODE=\"no\"\nCONFIG_FILE=\"${SCRIPT_DIR}/local.params\"\nCONFIG_DIR=\"local_config_dir\"\nDUMP_CONFIG=\"no\"\nLOG_LEVEL=\"info\"\nCONTROLLER_EXT_SSL_PORT=443\nTESTS_DIR=\"tests\"\n\nNGINX_INSTALL_SOURCE=\"install_from_repo\"\n\nCLUSTER=\"\"\nDOMAIN=\"\"\n\n# Hostnames/IPs used for single-host deploys\nIP_INT=\"127.0.1.1\"\n\n# Initial user setup\nINITIAL_USER=\"\"\nINITIAL_USER_EMAIL=\"\"\nINITIAL_USER_PASSWORD=\"\"\n\nCONTROLLER_EXT_SSL_PORT=8000\nKEEP_EXT_SSL_PORT=25101\n# Both for collections and downloads\nKEEPWEB_EXT_SSL_PORT=9002\nWEBSHELL_EXT_SSL_PORT=4202\nWEBSOCKET_EXT_SSL_PORT=8002\nWORKBENCH1_EXT_SSL_PORT=443\nWORKBENCH2_EXT_SSL_PORT=3001\n\nSSL_MODE=\"self-signed\"\nUSE_LETSENCRYPT_ROUTE53=\"no\"\nCUSTOM_CERTS_DIR=\"${SCRIPT_DIR}/local_config_dir/certs\"\n\nGRAFANA_DASHBOARDS_DIR=\"${SCRIPT_DIR}/local_config_dir/dashboards\"\n\n## These are ARVADOS-related parameters\n# For a stable release, change RELEASE \"production\" and VERSION to the\n# package version (including the iteration, e.g. X.Y.Z-1) of the\n# release.\n# The \"local.params.example.*\" files already set \"RELEASE=production\"\n# to deploy  production-ready packages\nRELEASE=\"development\"\nVERSION=\"latest\"\n\n# We pin the salt version to avoid potential incompatibilities when a new\n# stable version is released.\nSALT_VERSION=\"3006\"\n\n# Other formula versions we depend on\nARVADOS_TAG=\"525eb46bd46efd89a174d8b790d3219cd35ba149\"\nPOSTGRES_TAG=\"0081fa32e85e6fda0a00d9a5241e70b4bf10e506\"\nPOSTGRES_URL=\"https://github.com/arvados/postgres-formula.git\"\nNGINX_TAG=\"4e2d832dae6ecf8f47dc4b46e29faa3c5907edc2\"\nNGINX_URL=\"https://github.com/arvados/nginx-formula.git\"\nDOCKER_TAG=\"v2.4.2\"\nLOCALE_TAG=\"v0.3.5\"\nLETSENCRYPT_TAG=\"v3.2.0\"\nLOGROTATE_TAG=\"v0.14.0\"\nPROMETHEUS_TAG=\"v5.6.5\"\nGRAFANA_TAG=\"v3.1.3\"\n\n# Salt's dir\nDUMP_SALT_CONFIG_DIR=\"\"\n## states\nS_DIR=\"/srv/salt\"\nSTATES_TOP=${S_DIR}/top.sls\n## formulas\nF_DIR=\"/srv/formulas\"\n## pillars\nP_DIR=\"/srv/pillars\"\nPILLARS_TOP=${P_DIR}/top.sls\n## tests\nT_DIR=\"/tmp/cluster_tests\"\n\narguments ${@}\n\ndeclare -A NODES\ndeclare -A ROLE2NODES\ndeclare NODELIST\n\nsource common.sh\n\nif [ ! -d ${CONFIG_DIR} ]; then\n  echo >&2 \"You don't seem to have a config directory with pillars and states.\"\n  echo >&2 \"Please create a '${CONFIG_DIR}' directory (as configured in your '${CONFIG_FILE}'). Please see\"\n  echo >&2 \"  * https://doc.arvados.org/install/salt-single-host.html#single_host, or\"\n  echo >&2 \"  * https://doc.arvados.org/install/salt-multi-host.html#multi_host_multi_hostnames\"\n  exit 1\nfi\n\nif grep -rni 'fixme' ${CONFIG_FILE}.secrets ${CONFIG_FILE} ${CONFIG_DIR} ; then\n  echo >&2 \"The config files has some parameters that need to be modified.\"\n  echo >&2 \"Please, fix them and re-run the provision script.\"\n  exit 1\nfi\n\nif ! grep -qE '^[[:alnum:]]{5}$' <<<${CLUSTER} ; then\n  echo >&2 \"ERROR: <CLUSTER> must be exactly 5 lowercase alphanumeric characters long\"\n  echo >&2 \"Fix the cluster name in the 'local.params' file and re-run the provision script\"\n  exit 1\nfi\n\n# Only used in single_host/single_name deploys\nif [ ! -z \"${HOSTNAME_EXT:-}\" ] ; then\n  # We need to add some extra control vars to manage a single certificate vs. multiple\n  USE_SINGLE_HOSTNAME=\"yes\"\n  # Make sure that the value configured as IP_INT is a real IP on the system.\n  # If we don't error out early here when there is a mismatch, the formula will\n  # fail with hard to interpret nginx errors later on.\n  ip addr list |grep \"${IP_INT}/\" >/dev/null\n  if [[ $? -ne 0 ]]; then\n    echo \"Unable to find the IP_INT address '${IP_INT}' on the system, please correct the value in local.params. Exiting...\"\n    exit 1\n  fi\nelse\n  USE_SINGLE_HOSTNAME=\"no\"\n  # We set this variable, anyway, so sed lines do not fail and we don't need to add more\n  # conditionals\n  HOSTNAME_EXT=\"${DOMAIN}\"\nfi\n\nif [ \"${DUMP_CONFIG}\" = \"yes\" ]; then\n  echo \"The provision installer will just dump a config under ${DUMP_SALT_CONFIG_DIR} and exit\"\nelse\n  # Read the variables of /etc/os-release but prefix their names with `_OS_`\n  # to avoid name conflicts.\n  eval \"$(awk '(/^[A-Z_]+=/) { print \"_OS_\" $0 }' /etc/os-release)\"\n  echo \"Detected distro families: ${_OS_ID:-} ${_OS_ID_LIKE:-}\"\n\n  # Several of our formulas use the cron module, which requires the crontab\n  # command. We install systemd-cron to ensure we have that.\n  # The rest of these packages are required by the rest of the script.\n  for OS_ID in ${_OS_ID:-} ${_OS_ID_LIKE:-}; do\n    case \"$OS_ID\" in\n      rhel)\n        echo \"WARNING! Disabling SELinux, see https://dev.arvados.org/issues/18019\"\n        sed -i 's/SELINUX=enforcing/SELINUX=permissive/g' /etc/sysconfig/selinux\n        setenforce permissive\n        yum install -y curl git jq systemd-cron\n        if command -v salt-call >/dev/null; then\n            echo \"Salt already installed\"\n            break\n        fi\n        curl -L https://bootstrap.saltstack.com -o /tmp/bootstrap_salt.sh\n        sh /tmp/bootstrap_salt.sh -XdfP -x python3 stable ${SALT_VERSION}\n        break\n        ;;\n      debian)\n        DEBIAN_FRONTEND=noninteractive apt-get -o DPkg::Lock::Timeout=120 update\n        # This list includes our own dependencies, plus depdencies necessary\n        # to retrieve the Salt apt repository.\n        DEBIAN_FRONTEND=noninteractive apt-get install -y \\\n                                       apt-transport-https ca-certificates curl git gnupg jq systemd-cron\n        if command -v salt-call >/dev/null; then\n            echo \"Salt already installed\"\n            break\n        fi\n        salt_apt_key=/etc/apt/keyrings/SALT-PROJECT-GPG-PUBKEY-2023.asc\n        install -d -m 755 /etc/apt/keyrings /etc/apt/preferences.d\n        curl -fsSL -o \"$salt_apt_key\" \\\n             \"https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public\"\n        chmod go+r \"$salt_apt_key\"\n        install -b -m 644 /dev/stdin \"/etc/apt/preferences.d/salt.pref\" <<EOFPREFS\nExplanation: Salt $SALT_VERSION has been tested to successfully install Arvados.\nPackage: salt-*\nPin: version $SALT_VERSION.*\nPin-Priority: 995\nEOFPREFS\n        install -b -m 644 /dev/stdin \"/etc/apt/sources.list.d/salt.sources\" <<EOFSOURCES\nTypes: deb\nURIs: https://packages.broadcom.com/artifactory/saltproject-deb/\nSuites: stable\nComponents: main\nArchitectures: amd64\nSigned-by: $salt_apt_key\nEOFSOURCES\n        DEBIAN_FRONTEND=noninteractive apt-get -o DPkg::Lock::Timeout=120 update\n        DEBIAN_FRONTEND=noninteractive apt-get install -y salt-minion\n        break\n        ;;\n    esac\n  done\n\n  # Set salt to masterless mode\n  systemctl disable --now salt-minion.service\n  cat > /etc/salt/minion << EOFSM\nfailhard: \"True\"\n\nfile_client: local\nfile_roots:\n  base:\n    - ${S_DIR}\n    - ${F_DIR}/*\n\npillar_roots:\n  base:\n    - ${P_DIR}\nEOFSM\nfi\n\nmkdir -p ${S_DIR} ${F_DIR} ${P_DIR} ${T_DIR}\n\n# Get the formula and dependencies\ncd ${F_DIR} || exit 1\necho \"Cloning formulas\"\ntest -d docker && ( cd docker && git fetch ) \\\n  || git clone --quiet https://github.com/saltstack-formulas/docker-formula.git ${F_DIR}/docker\n( cd docker && git checkout --quiet tags/\"${DOCKER_TAG}\" )\n\necho \"...locale\"\ntest -d locale && ( cd locale && git fetch ) \\\n  || git clone --quiet https://github.com/saltstack-formulas/locale-formula.git ${F_DIR}/locale\n( cd locale && git checkout --quiet tags/\"${LOCALE_TAG}\" )\n\necho \"...nginx\"\ntest -d nginx && ( cd nginx && git fetch ) \\\n  || git clone --quiet ${NGINX_URL} ${F_DIR}/nginx\n( cd nginx && git checkout --quiet \"${NGINX_TAG}\" )\n\necho \"...postgres\"\ntest -d postgres && ( cd postgres && git fetch ) \\\n  || git clone --quiet ${POSTGRES_URL} ${F_DIR}/postgres\n( cd postgres && git checkout --quiet \"${POSTGRES_TAG}\" )\n\necho \"...prometheus\"\ntest -d prometheus && ( cd prometheus && git fetch ) \\\n  || git clone --quiet https://github.com/saltstack-formulas/prometheus-formula.git ${F_DIR}/prometheus\n( cd prometheus && git checkout --quiet tags/\"${PROMETHEUS_TAG}\" )\n\necho \"...grafana\"\ntest -d grafana && ( cd grafana && git fetch ) \\\n  || git clone --quiet https://github.com/saltstack-formulas/grafana-formula.git ${F_DIR}/grafana\n( cd grafana && git checkout --quiet \"${GRAFANA_TAG}\" )\n\necho \"...letsencrypt\"\ntest -d letsencrypt && ( cd letsencrypt && git fetch ) \\\n  || git clone --quiet https://github.com/saltstack-formulas/letsencrypt-formula.git ${F_DIR}/letsencrypt\n( cd letsencrypt && git checkout --quiet tags/\"${LETSENCRYPT_TAG}\" )\n\necho \"...logrotate\"\ntest -d logrotate && ( cd logrotate && git fetch ) \\\n  || git clone --quiet https://github.com/saltstack-formulas/logrotate-formula.git ${F_DIR}/logrotate\n( cd logrotate && git checkout --quiet tags/\"${LOGROTATE_TAG}\" )\n\necho \"...arvados\"\ntest -d arvados && ( cd arvados && git fetch ) \\\n  || git clone --quiet https://github.com/arvados/arvados-formula ${F_DIR}/arvados\n( cd arvados && git checkout --quiet \"${ARVADOS_TAG}\" )\n\nif [ \"x${VAGRANT:-}\" = \"xyes\" ]; then\n  EXTRA_STATES_DIR=\"/home/vagrant/${CONFIG_DIR}/states\"\n  SOURCE_PILLARS_DIR=\"/home/vagrant/${CONFIG_DIR}/pillars\"\n  SOURCE_TOFS_DIR=\"/home/vagrant/${CONFIG_DIR}/tofs\"\n  SOURCE_TESTS_DIR=\"/home/vagrant/${TESTS_DIR}\"\nelse\n  EXTRA_STATES_DIR=\"${SCRIPT_DIR}/${CONFIG_DIR}/states\"\n  SOURCE_PILLARS_DIR=\"${SCRIPT_DIR}/${CONFIG_DIR}/pillars\"\n  SOURCE_TOFS_DIR=\"${SCRIPT_DIR}/${CONFIG_DIR}/tofs\"\n  SOURCE_TESTS_DIR=\"${SCRIPT_DIR}/${TESTS_DIR}\"\nfi\n\nSOURCE_STATES_DIR=\"${EXTRA_STATES_DIR}\"\n\necho \"Writing pillars and states\"\n\n# Replace variables (cluster,  domain, etc) in the pillars, states and tests\n# to ease deployment for newcomers\nif [ ! -d \"${SOURCE_PILLARS_DIR}\" ]; then\n  echo \"${SOURCE_PILLARS_DIR} does not exist or is not a directory. Exiting.\"\n  exit 1\nfi\nfor f in $(ls \"${SOURCE_PILLARS_DIR}\"/*); do\n  apply_var_substitutions \"${f}\" \"${P_DIR}\"/$(basename \"${f}\")\ndone\n\nif [ ! -d \"${SOURCE_TESTS_DIR}\" ]; then\n  echo \"WARNING: The tests directory was not copied to \\\"${SOURCE_TESTS_DIR}\\\".\"\n  if [ \"x${TEST:-}\" = \"xyes\" ]; then\n    echo \"WARNING: Disabling tests for this installation.\"\n  fi\n  TEST=\"no\"\nelse\n  mkdir -p ${T_DIR}\n  # Replace cluster and domain name in the test files\n  for f in $(ls \"${SOURCE_TESTS_DIR}\"/*); do\n    FILTERS=\"s#__CLUSTER__#${CLUSTER}#g;\n         s#__CONTROLLER_EXT_SSL_PORT__#${CONTROLLER_EXT_SSL_PORT}#g;\n         s#__DOMAIN__#${DOMAIN}#g;\n         s#__IP_INT__#${IP_INT}#g;\n         s#__INITIAL_USER_EMAIL__#${INITIAL_USER_EMAIL}#g;\n         s#__INITIAL_USER_PASSWORD__#${INITIAL_USER_PASSWORD}#g\n         s#__INITIAL_USER__#${INITIAL_USER}#g;\n         s#__DATABASE_PASSWORD__#${DATABASE_PASSWORD}#g;\n         s#__SYSTEM_ROOT_TOKEN__#${SYSTEM_ROOT_TOKEN}#g\"\n    if [ \"$USE_SINGLE_HOSTNAME\" = \"yes\" ]; then\n      FILTERS=\"s#__CLUSTER__.__DOMAIN__#${HOSTNAME_EXT}#g;\n         $FILTERS\"\n    fi\n    sed \"$FILTERS\" \\\n      \"${f}\" > ${T_DIR}/$(basename \"${f}\")\n  done\n  chmod 755 ${T_DIR}/run-test.sh\nfi\n\n# Replace helper state files that differ from the formula's examples\nif [ -d \"${SOURCE_STATES_DIR}\" ]; then\n  mkdir -p \"${F_DIR}\"/extra/extra\n  rm -rf \"${F_DIR}\"/extra/extra/*\n\n  for f in $(ls \"${SOURCE_STATES_DIR}\"/*); do\n    apply_var_substitutions \"${f}\" \"${F_DIR}/extra/extra\"/$(basename \"${f}\")\n  done\nfi\n\n# Now, we build the SALT states/pillars trees\n# As we need to separate both states and pillars in case we want specific\n# roles, we iterate on both at the same time\n\n# Formula template overrides (TOFS)\n# See: https://template-formula.readthedocs.io/en/latest/TOFS_pattern.html#template-override\nif [ -d ${SOURCE_TOFS_DIR} ]; then\n  find ${SOURCE_TOFS_DIR} -mindepth 1 -maxdepth 1 -type d -exec cp -r \"{}\" ${S_DIR} \\;\nfi\n\n# States\ncat > ${STATES_TOP} << EOFTSLS\nbase:\n  '*':\n    - locale\nEOFTSLS\n\n# Pillars\ncat > ${PILLARS_TOP} << EOFPSLS\nbase:\n  '*':\n    - locale\n    - arvados\nEOFPSLS\n\n# States, extra states\nif [ -d \"${F_DIR}\"/extra/extra ]; then\n  SKIP_SNAKE_OIL=\"snakeoil_certs\"\n\n  if [[ \"$DEV_MODE\" = \"yes\" || \"${SSL_MODE}\" == \"self-signed\" ]] ; then\n    # In dev mode, we create some snake oil certs that we'll\n    # use as CUSTOM_CERTS, so we don't skip the states file.\n    # Same when using self-signed certificates.\n    SKIP_SNAKE_OIL=\"dont_add_snakeoil_certs\"\n  fi\n  for f in $(ls \"${F_DIR}\"/extra/extra/*.sls | egrep -v \"${SKIP_SNAKE_OIL}|shell_\"); do\n  echo \"    - extra.$(basename ${f} | sed 's/.sls$//g')\" >> ${STATES_TOP}\n  done\n  # Use byo or self-signed certificates\n  if [ \"${SSL_MODE}\" != \"lets-encrypt\" ]; then\n    mkdir -p \"${F_DIR}\"/extra/extra/files\n  fi\nfi\n\n# If we want specific roles for a node, just add the desired states\n# and its dependencies\nif [ -z \"${ROLES:-}\" ]; then\n  # States\n  echo \"    - nginx\" >> ${STATES_TOP}\n  if [ \"${SSL_MODE}\" = \"lets-encrypt\" ]; then\n    if [ \"${USE_LETSENCRYPT_ROUTE53}\" = \"yes\" ]; then\n      grep -q \"aws_credentials\" ${STATES_TOP} || echo \"    - extra.aws_credentials\" >> ${STATES_TOP}\n    fi\n    grep -q \"letsencrypt\" ${STATES_TOP} || echo \"    - letsencrypt\" >> ${STATES_TOP}\n  else\n    mkdir -p --mode=0700 /srv/salt/certs\n    if [ \"${SSL_MODE}\" = \"bring-your-own\" ]; then\n      # Copy certs to formula extra/files\n      install --mode=0600 ${CUSTOM_CERTS_DIR}/* /srv/salt/certs/\n      # We add the custom_certs state\n      grep -q \"custom_certs\" ${STATES_TOP} || echo \"    - extra.custom_certs\" >> ${STATES_TOP}\n      if [ \"${SSL_KEY_ENCRYPTED}\" = \"yes\" ]; then\n        grep -q \"ssl_key_encrypted\" ${STATES_TOP} || echo \"    - extra.ssl_key_encrypted\" >> ${STATES_TOP}\n      fi\n    fi\n    # In self-signed mode, the certificate files will be created and put in the\n    # destination directory by the snakeoil_certs.sls state file\n  fi\n\n  echo \"    - postgres\" >> ${STATES_TOP}\n  echo \"    - logrotate\" >> ${STATES_TOP}\n  echo \"    - docker.software\" >> ${STATES_TOP}\n  echo \"    - arvados.repo\" >> ${STATES_TOP}\n  echo \"    - arvados.config\" >> ${STATES_TOP}\n  echo \"    - arvados.ruby\" >> ${STATES_TOP}\n  echo \"    - arvados.api\" >> ${STATES_TOP}\n  echo \"    - arvados.controller\" >> ${STATES_TOP}\n  echo \"    - arvados.keepstore\" >> ${STATES_TOP}\n  echo \"    - arvados.websocket\" >> ${STATES_TOP}\n  echo \"    - arvados.keepweb\" >> ${STATES_TOP}\n  echo \"    - arvados.workbench2\" >> ${STATES_TOP}\n  echo \"    - arvados.keepproxy\" >> ${STATES_TOP}\n  echo \"    - arvados.shell\" >> ${STATES_TOP}\n  echo \"    - arvados.dispatcher\" >> ${STATES_TOP}\n  echo \"    - extra.shell_sudo_passwordless\" >> ${STATES_TOP}\n  echo \"    - extra.shell_cron_add_login_sync\" >> ${STATES_TOP}\n  echo \"    - extra.passenger_rvm\" >> ${STATES_TOP}\n  echo \"    - extra.railsapi_passenger_configs\" >> ${STATES_TOP}\n  echo \"    - extra.workbench1_uninstall\" >> ${STATES_TOP}\n\n  # Pillars\n  echo \"    - docker\" >> ${PILLARS_TOP}\n  echo \"    - nginx_api_configuration\" >> ${PILLARS_TOP}\n  echo \"    - logrotate\" >> ${PILLARS_TOP}\n  echo \"    - logrotate_api\" >> ${PILLARS_TOP}\n  echo \"    - nginx_controller_configuration\" >> ${PILLARS_TOP}\n  echo \"    - nginx_keepproxy_configuration\" >> ${PILLARS_TOP}\n  echo \"    - nginx_keepweb_configuration\" >> ${PILLARS_TOP}\n  echo \"    - nginx\" >> ${PILLARS_TOP}\n  echo \"    - nginx_websocket_configuration\" >> ${PILLARS_TOP}\n  echo \"    - nginx_webshell_configuration\" >> ${PILLARS_TOP}\n  echo \"    - nginx_workbench2_configuration\" >> ${PILLARS_TOP}\n  echo \"    - nginx_workbench_configuration\" >> ${PILLARS_TOP}\n  echo \"    - logrotate_wb1\" >> ${PILLARS_TOP}\n  echo \"    - postgresql\" >> ${PILLARS_TOP}\n\n  if [ \"${SSL_MODE}\" = \"lets-encrypt\" ]; then\n    if [ \"${USE_LETSENCRYPT_ROUTE53}\" = \"yes\" ]; then\n      grep -q \"aws_credentials\" ${PILLARS_TOP} || echo \"    - aws_credentials\" >> ${PILLARS_TOP}\n    fi\n    grep -q \"letsencrypt\" ${PILLARS_TOP} || echo \"    - letsencrypt\" >> ${PILLARS_TOP}\n\n    hosts=(\"controller\" \"websocket\" \"workbench\" \"workbench2\" \"webshell\" \"keepproxy\")\n    if [ ${USE_SINGLE_HOSTNAME} = \"no\" ]; then\n      hosts+=(\"download\" \"collections\")\n    else\n      hosts+=(\"keepweb\")\n    fi\n\n    for c in \"${hosts[@]}\"; do\n      # Are we in a single-host-single-hostname env?\n      if [ \"${USE_SINGLE_HOSTNAME}\" = \"yes\" ]; then\n        # Are we in a single-host-single-hostname env?\n        CERT_NAME=${HOSTNAME_EXT}\n      else\n        # We are in a multiple-hostnames env\n        CERT_NAME=${c}.${DOMAIN}\n      fi\n\n      # As the pillar differs whether we use LE or custom certs, we need to do a final edition on them\n      sed -i \"s/__CERT_REQUIRES__/cmd: create-initial-cert-${CERT_NAME}*/g;\n              s#__CERT_PEM__#/etc/letsencrypt/live/${CERT_NAME}/fullchain.pem#g;\n              s#__CERT_KEY__#/etc/letsencrypt/live/${CERT_NAME}/privkey.pem#g\" \\\n      ${P_DIR}/nginx_${c}_configuration.sls\n    done\n  else\n    # Use custom certs (either dev mode or prod)\n    grep -q \"extra_custom_certs\" ${PILLARS_TOP} || echo \"    - extra_custom_certs\" >> ${PILLARS_TOP}\n    # And add the certs in the custom_certs pillar\n    echo \"extra_custom_certs_dir: /srv/salt/certs\" > ${P_DIR}/extra_custom_certs.sls\n    echo \"extra_custom_certs:\" >> ${P_DIR}/extra_custom_certs.sls\n\n    for c in controller websocket workbench workbench2 webshell keepweb keepproxy; do\n      # Are we in a single-host-single-hostname env?\n      if [ \"${USE_SINGLE_HOSTNAME}\" = \"yes\" ]; then\n        # Are we in a single-host-single-hostname env?\n        CERT_NAME=${HOSTNAME_EXT}\n      else\n        # We are in a multiple-hostnames env\n        CERT_NAME=${c}\n      fi\n\n      if [[ \"$SSL_MODE\" == \"bring-your-own\" ]]; then\n        copy_custom_cert ${CUSTOM_CERTS_DIR} ${CERT_NAME}\n      fi\n\n      grep -q ${CERT_NAME} ${P_DIR}/extra_custom_certs.sls || echo \"  - ${CERT_NAME}\" >> ${P_DIR}/extra_custom_certs.sls\n\n      # As the pillar differs whether we use LE or custom certs, we need to do a final edition on them\n      sed -i \"s/__CERT_REQUIRES__/file: extra_custom_certs_${CERT_NAME}_cert_file_copy/g;\n              s#__CERT_PEM__#/etc/nginx/ssl/arvados-${CERT_NAME}.pem#g;\n              s#__CERT_KEY__#/etc/nginx/ssl/arvados-${CERT_NAME}.key#g\" \\\n      ${P_DIR}/nginx_${c}_configuration.sls\n    done\n  fi\nelse\n  # If we add individual roles, make sure we add the repo first\n  echo \"    - arvados.repo\" >> ${STATES_TOP}\n  # We add the extra_custom_certs state\n  grep -q \"extra.custom_certs\"    ${STATES_TOP} || echo \"    - extra.custom_certs\" >> ${STATES_TOP}\n  if [ \"${SSL_KEY_ENCRYPTED}\" = \"yes\" ]; then\n    grep -q \"ssl_key_encrypted\" ${STATES_TOP} || echo \"    - extra.ssl_key_encrypted\" >> ${STATES_TOP}\n  fi\n\n  # And we add the basic part for the certs pillar\n  if [ \"${SSL_MODE}\" != \"lets-encrypt\" ]; then\n    # And add the certs in the custom_certs pillar\n    echo \"extra_custom_certs_dir: /srv/salt/certs\" > ${P_DIR}/extra_custom_certs.sls\n    echo \"extra_custom_certs:\" >> ${P_DIR}/extra_custom_certs.sls\n    grep -q \"extra_custom_certs\" ${PILLARS_TOP} || echo \"    - extra_custom_certs\" >> ${PILLARS_TOP}\n  fi\n\n  # Prometheus state on all nodes due to the node exporter below\n  grep -q \"\\- prometheus$\" ${STATES_TOP} || echo \"    - prometheus\" >> ${STATES_TOP}\n  # Prometheus node exporter pillar\n  grep -q \"prometheus_node_exporter\" ${PILLARS_TOP} || echo \"    - prometheus_node_exporter\" >> ${PILLARS_TOP}\n  # Grafana Alloy OpenTelemetry client state & pillar on all nodes\n  grep -q \"extra.alloy_install\" ${STATES_TOP} || echo \"    - extra.alloy_install\" >> ${STATES_TOP}\n  grep -q \"alloy\" ${PILLARS_TOP} || echo \"    - alloy\" >> ${PILLARS_TOP}\n\n  for R in ${ROLES:-}; do\n    case \"${R}\" in\n      \"database\")\n        # Skip if using an external service\n        if [[ \"${DATABASE_EXTERNAL_SERVICE_HOST_OR_IP:-}\" != \"\" ]]; then\n          continue\n        fi\n        # States\n        grep -q \"\\- postgres$\" ${STATES_TOP} || echo \"    - postgres\" >> ${STATES_TOP}\n        grep -q \"extra.prometheus_pg_exporter\" ${STATES_TOP} || echo \"    - extra.prometheus_pg_exporter\" >> ${STATES_TOP}\n        # Pillars\n        grep -q \"postgresql\" ${PILLARS_TOP} || echo \"    - postgresql\" >> ${PILLARS_TOP}\n        grep -q \"prometheus_pg_exporter\" ${PILLARS_TOP} || echo \"    - prometheus_pg_exporter\" >> ${PILLARS_TOP}\n      ;;\n      \"monitoring\")\n        ### Support files ###\n        GRAFANA_DASHBOARDS_DEST_DIR=/srv/salt/dashboards\n        mkdir -p \"${GRAFANA_DASHBOARDS_DEST_DIR}\"\n        rm -f \"${GRAFANA_DASHBOARDS_DEST_DIR}\"/*\n        # \"ArvadosPromDataSource\" is the hardcoded UID for Prometheus' datasource\n        # in Grafana.\n        # \"ArvadosLokiDataSource\" if Loki's UID in Grafana\n        for f in $(ls \"${GRAFANA_DASHBOARDS_DIR}\"/*.json); do\n          sed \"s#__TLS_EXPIRATION_YELLOW__#${TLS_EXPIRATION_YELLOW}#g;\n               s#__TLS_EXPIRATION_GREEN__#${TLS_EXPIRATION_GREEN}#g;\n               s#\\${DS_PROMETHEUS}#ArvadosPromDataSource#g;\n               s#\\${DS_LOKI}#ArvadosLokiDataSource#g\" \\\n          \"${f}\" > \"${GRAFANA_DASHBOARDS_DEST_DIR}\"/$(basename \"${f}\")\n        done\n\n        ### States ###\n        grep -q \"\\- nginx$\" ${STATES_TOP} || echo \"    - nginx\" >> ${STATES_TOP}\n        grep -q \"extra.nginx_prometheus_configuration\" ${STATES_TOP} || echo \"    - extra.nginx_prometheus_configuration\" >> ${STATES_TOP}\n\n        grep -q \"\\- grafana$\" ${STATES_TOP} || echo \"    - grafana\" >> ${STATES_TOP}\n        grep -q \"extra.grafana_datasource\" ${STATES_TOP} || echo \"    - extra.grafana_datasource\" >> ${STATES_TOP}\n        grep -q \"extra.grafana_dashboards\" ${STATES_TOP} || echo \"    - extra.grafana_dashboards\" >> ${STATES_TOP}\n        grep -q \"extra.grafana_admin_user\" ${STATES_TOP} || echo \"    - extra.grafana_admin_user\" >> ${STATES_TOP}\n\n        grep -q \"extra.loki_install\" ${STATES_TOP} || echo \"    - extra.loki_install\" >> ${STATES_TOP}\n\n        if [ \"${SSL_MODE}\" = \"lets-encrypt\" ]; then\n          grep -q \"letsencrypt\"     ${STATES_TOP} || echo \"    - letsencrypt\" >> ${STATES_TOP}\n          if [ \"x${USE_LETSENCRYPT_ROUTE53:-}\" = \"xyes\" ]; then\n            grep -q \"aws_credentials\" ${STATES_TOP} || echo \"    - aws_credentials\" >> ${STATES_TOP}\n          fi\n        elif [ \"${SSL_MODE}\" = \"bring-your-own\" ]; then\n          for SVC in grafana prometheus loki; do\n            copy_custom_cert ${CUSTOM_CERTS_DIR} ${SVC}\n          done\n        fi\n        ### Pillars ###\n        grep -q \"prometheus_server\" ${PILLARS_TOP} || echo \"    - prometheus_server\" >> ${PILLARS_TOP}\n        grep -q \"grafana\" ${PILLARS_TOP} || echo \"    - grafana\" >> ${PILLARS_TOP}\n        grep -q \"loki\" ${PILLARS_TOP} || echo \"    - loki\" >> ${PILLARS_TOP}\n        for SVC in grafana prometheus loki; do\n          grep -q \"nginx_${SVC}_configuration\" ${PILLARS_TOP} || echo \"    - nginx_${SVC}_configuration\" >> ${PILLARS_TOP}\n        done\n        grep -q \"nginx_snippets\" ${PILLARS_TOP} || echo \"    - nginx_snippets\" >> ${PILLARS_TOP}\n        if [ \"${SSL_MODE}\" = \"lets-encrypt\" ]; then\n          grep -q \"letsencrypt\"     ${PILLARS_TOP} || echo \"    - letsencrypt\" >> ${PILLARS_TOP}\n          for SVC in grafana prometheus loki; do\n            grep -q \"letsencrypt_${SVC}_configuration\" ${PILLARS_TOP} || echo \"    - letsencrypt_${SVC}_configuration\" >> ${PILLARS_TOP}\n            sed -i \"s/__CERT_REQUIRES__/cmd: create-initial-cert-${SVC}.${DOMAIN}*/g;\n                    s#__CERT_PEM__#/etc/letsencrypt/live/${SVC}.${DOMAIN}/fullchain.pem#g;\n                    s#__CERT_KEY__#/etc/letsencrypt/live/${SVC}.${DOMAIN}/privkey.pem#g\" \\\n            ${P_DIR}/nginx_${SVC}_configuration.sls\n          done\n          if [ \"${USE_LETSENCRYPT_ROUTE53}\" = \"yes\" ]; then\n            grep -q \"aws_credentials\" ${PILLARS_TOP} || echo \"    - aws_credentials\" >> ${PILLARS_TOP}\n          fi\n        elif [ \"${SSL_MODE}\" = \"bring-your-own\" ]; then\n          grep -q \"ssl_key_encrypted\" ${PILLARS_TOP} || echo \"    - ssl_key_encrypted\" >> ${PILLARS_TOP}\n          for SVC in grafana prometheus loki; do\n            sed -i \"s/__CERT_REQUIRES__/file: extra_custom_certs_${SVC}_cert_file_copy/g;\n                    s#__CERT_PEM__#/etc/nginx/ssl/arvados-${SVC}.pem#g;\n                    s#__CERT_KEY__#/etc/nginx/ssl/arvados-${SVC}.key#g\" \\\n              ${P_DIR}/nginx_${SVC}_configuration.sls\n            grep -q ${SVC} ${P_DIR}/extra_custom_certs.sls || echo \"  - ${SVC}\" >> ${P_DIR}/extra_custom_certs.sls\n          done\n        fi\n      ;;\n      \"balancer\")\n        ### States ###\n        grep -q \"\\- nginx$\" ${STATES_TOP} || echo \"    - nginx\" >> ${STATES_TOP}\n\n        if [ \"${SSL_MODE}\" = \"lets-encrypt\" ]; then\n          grep -q \"letsencrypt\"     ${STATES_TOP} || echo \"    - letsencrypt\" >> ${STATES_TOP}\n          if [ \"x${USE_LETSENCRYPT_ROUTE53:-}\" = \"xyes\" ]; then\n            grep -q \"aws_credentials\" ${STATES_TOP} || echo \"    - aws_credentials\" >> ${STATES_TOP}\n          fi\n        elif [ \"${SSL_MODE}\" = \"bring-your-own\" ]; then\n          copy_custom_cert ${CUSTOM_CERTS_DIR} ${R}\n        fi\n\n        ### Pillars ###\n        grep -q \"nginx_${R}_configuration\" ${PILLARS_TOP} || echo \"    - nginx_${R}_configuration\" >> ${PILLARS_TOP}\n\n        if [ \"${SSL_MODE}\" = \"lets-encrypt\" ]; then\n          grep -q \"letsencrypt\"     ${PILLARS_TOP} || echo \"    - letsencrypt\" >> ${PILLARS_TOP}\n\n          grep -q \"letsencrypt_${R}_configuration\" ${PILLARS_TOP} || echo \"    - letsencrypt_${R}_configuration\" >> ${PILLARS_TOP}\n          sed -i \"s/__CERT_REQUIRES__/cmd: create-initial-cert-${ROLE2NODES['balancer']}*/g;\n                  s#__CERT_PEM__#/etc/letsencrypt/live/${ROLE2NODES['balancer']}/fullchain.pem#g;\n                  s#__CERT_KEY__#/etc/letsencrypt/live/${ROLE2NODES['balancer']}/privkey.pem#g\" \\\n          ${P_DIR}/nginx_${R}_configuration.sls\n\n          if [ \"${USE_LETSENCRYPT_ROUTE53}\" = \"yes\" ]; then\n            grep -q \"aws_credentials\" ${PILLARS_TOP} || echo \"    - aws_credentials\" >> ${PILLARS_TOP}\n          fi\n        elif [ \"${SSL_MODE}\" = \"bring-your-own\" ]; then\n          grep -q \"ssl_key_encrypted\" ${PILLARS_TOP} || echo \"    - ssl_key_encrypted\" >> ${PILLARS_TOP}\n          sed -i \"s/__CERT_REQUIRES__/file: extra_custom_certs_${R}_cert_file_copy/g;\n                  s#__CERT_PEM__#/etc/nginx/ssl/arvados-${R}.pem#g;\n                  s#__CERT_KEY__#/etc/nginx/ssl/arvados-${R}.key#g\" \\\n            ${P_DIR}/nginx_${R}_configuration.sls\n          grep -q \"${R}\" ${P_DIR}/extra_custom_certs.sls || echo \"  - ${R}\" >> ${P_DIR}/extra_custom_certs.sls\n        fi\n      ;;\n      \"controller\")\n        ### States ###\n        grep -q \"    - logrotate\" ${STATES_TOP} || echo \"    - logrotate\" >> ${STATES_TOP}\n        grep -q \"    - nginx$\" ${STATES_TOP} || echo \"    - nginx\" >> ${STATES_TOP}\n        echo \"    - extra.passenger_rvm\" >> ${STATES_TOP}\n        echo \"    - extra.railsapi_passenger_configs\" >> ${STATES_TOP}\n        grep -q \"^    - postgres\\\\.client$\" ${STATES_TOP} || echo \"    - postgres.client\" >> ${STATES_TOP}\n        if [[ \"${DATABASE_EXTERNAL_SERVICE_HOST_OR_IP:-}\" != \"\" ]]; then\n          grep -q \"    - extra.postgresql_external\" ${STATES_TOP} || echo \"    - extra.postgresql_external\" >> ${STATES_TOP}\n        fi\n\n        ### If we don't install and run LE before arvados-api-server, it fails and breaks everything\n        ### after it. So we add this here as we are, after all, sharing the host for api and controller\n        if [ \"${ENABLE_BALANCER}\" == \"no\" ]; then\n          if [ \"${SSL_MODE}\" = \"lets-encrypt\" ]; then\n            if [ \"x${USE_LETSENCRYPT_ROUTE53:-}\" = \"xyes\" ]; then\n              grep -q \"aws_credentials\" ${STATES_TOP} || echo \"    - aws_credentials\" >> ${STATES_TOP}\n            fi\n            grep -q \"letsencrypt\"     ${STATES_TOP} || echo \"    - letsencrypt\" >> ${STATES_TOP}\n          elif [ \"${SSL_MODE}\" = \"bring-your-own\" ]; then\n            copy_custom_cert ${CUSTOM_CERTS_DIR} ${R}\n            grep -q controller ${P_DIR}/extra_custom_certs.sls || echo \"  - controller\" >> ${P_DIR}/extra_custom_certs.sls\n          fi\n        fi\n        grep -q \"arvados.api\" ${STATES_TOP} || echo \"    - arvados.api\" >> ${STATES_TOP}\n        grep -q \"arvados.controller\" ${STATES_TOP} || echo \"    - arvados.controller\" >> ${STATES_TOP}\n\n        ### Pillars ###\n        grep -q \"logrotate\" ${PILLARS_TOP}                || echo \"    - logrotate\" >> ${PILLARS_TOP}\n        grep -q \"logrotate_api\" ${PILLARS_TOP}            || echo \"    - logrotate_api\" >> ${PILLARS_TOP}\n        grep -q \"aws_credentials\" ${PILLARS_TOP}          || echo \"    - aws_credentials\" >> ${PILLARS_TOP}\n        grep -q \"postgresql\" ${PILLARS_TOP}               || echo \"    - postgresql\" >> ${PILLARS_TOP}\n        grep -q \"nginx\" ${PILLARS_TOP}                    || echo \"    - nginx\" >> ${PILLARS_TOP}\n        grep -q \"nginx_snippets\" ${PILLARS_TOP}           || echo \"    - nginx_snippets\" >> ${PILLARS_TOP}\n        grep -q \"nginx_api_configuration\" ${PILLARS_TOP} || echo \"    - nginx_api_configuration\" >> ${PILLARS_TOP}\n        grep -q \"nginx_controller_configuration\" ${PILLARS_TOP} || echo \"    - nginx_controller_configuration\" >> ${PILLARS_TOP}\n\n        if [[ \"${DATABASE_EXTERNAL_SERVICE_HOST_OR_IP:-}\" != \"\" ]]; then\n          grep -q \"    - postgresql_external\" ${PILLARS_TOP} || echo \"    - postgresql_external\" >> ${PILLARS_TOP}\n        fi\n\n        if [ \"${ENABLE_BALANCER}\" == \"no\" ]; then\n          if [ \"${SSL_MODE}\" = \"lets-encrypt\" ]; then\n            if [ \"${USE_LETSENCRYPT_ROUTE53}\" = \"yes\" ]; then\n              grep -q \"aws_credentials\" ${PILLARS_TOP} || echo \"    - aws_credentials\" >> ${PILLARS_TOP}\n            fi\n\n            grep -q \"letsencrypt\"     ${PILLARS_TOP} || echo \"    - letsencrypt\" >> ${PILLARS_TOP}\n            grep -q \"letsencrypt_${R}_configuration\" ${PILLARS_TOP} || echo \"    - letsencrypt_${R}_configuration\" >> ${PILLARS_TOP}\n            sed -i \"s/__CERT_REQUIRES__/cmd: create-initial-cert-${R}.${DOMAIN}*/g;\n                    s#__CERT_PEM__#/etc/letsencrypt/live/${R}.${DOMAIN}/fullchain.pem#g;\n                    s#__CERT_KEY__#/etc/letsencrypt/live/${R}.${DOMAIN}/privkey.pem#g\" \\\n            ${P_DIR}/nginx_${R}_configuration.sls\n          else\n            grep -q \"ssl_key_encrypted\" ${PILLARS_TOP} || echo \"    - ssl_key_encrypted\" >> ${PILLARS_TOP}\n            sed -i \"s/__CERT_REQUIRES__/file: extra_custom_certs_${R}_cert_file_copy/g;\n                    s#__CERT_PEM__#/etc/nginx/ssl/arvados-${R}.pem#g;\n                    s#__CERT_KEY__#/etc/nginx/ssl/arvados-${R}.key#g\" \\\n            ${P_DIR}/nginx_${R}_configuration.sls\n            grep -q ${R} ${P_DIR}/extra_custom_certs.sls || echo \"  - ${R}\" >> ${P_DIR}/extra_custom_certs.sls\n          fi\n        fi\n      ;;\n      \"websocket\" | \"workbench\" | \"workbench2\" | \"webshell\" | \"keepweb\" | \"keepproxy\")\n        ### States ###\n        grep -q \"\\- nginx$\" ${STATES_TOP} || echo \"    - nginx\" >> ${STATES_TOP}\n\n        if [ \"${SSL_MODE}\" = \"lets-encrypt\" ]; then\n          if [ \"x${USE_LETSENCRYPT_ROUTE53:-}\" = \"xyes\" ]; then\n            grep -q \"aws_credentials\" ${STATES_TOP} || echo \"    - aws_credentials\" >> ${STATES_TOP}\n          fi\n          grep -q \"letsencrypt\"     ${STATES_TOP} || echo \"    - letsencrypt\" >> ${STATES_TOP}\n        else\n          # Use custom certs, special case for keepweb\n          if [ ${R} = \"keepweb\" ]; then\n            if [ \"${SSL_MODE}\" = \"bring-your-own\" ]; then\n              copy_custom_cert ${CUSTOM_CERTS_DIR} download\n              copy_custom_cert ${CUSTOM_CERTS_DIR} collections\n            fi\n          else\n            if [ \"${SSL_MODE}\" = \"bring-your-own\" ]; then\n              copy_custom_cert ${CUSTOM_CERTS_DIR} ${R}\n            fi\n          fi\n        fi\n\n        # webshell role is just a nginx vhost, so it has no state\n        # workbench role is deprecated since 2.7.0\n        if [[ \"${R}\" != \"webshell\" && \"${R}\" != \"workbench\" ]]; then\n          grep -q \"arvados.${R}\" ${STATES_TOP} || echo \"    - arvados.${R}\" >> ${STATES_TOP}\n        fi\n        # Make sure wb1's package get uninstalled\n        if [[ \"${R}\" == \"workbench\" ]]; then\n          grep -q \"workbench1_uninstall\" ${STATES_TOP} || echo \"    - extra.workbench1_uninstall\" >> ${STATES_TOP}\n        fi\n\n        ### Pillars ###\n        grep -q \"nginx_${R}_configuration\" ${PILLARS_TOP} || echo \"    - nginx_${R}_configuration\" >> ${PILLARS_TOP}\n        grep -q \"nginx_snippets\" ${PILLARS_TOP} || echo \"    - nginx_snippets\" >> ${PILLARS_TOP}\n        # Special case for keepweb\n        if [ ${R} = \"keepweb\" ]; then\n          grep -q \"nginx_download_configuration\" ${PILLARS_TOP} || echo \"    - nginx_download_configuration\" >> ${PILLARS_TOP}\n          grep -q \"nginx_collections_configuration\" ${PILLARS_TOP} || echo \"    - nginx_collections_configuration\" >> ${PILLARS_TOP}\n        fi\n\n        if [ \"${SSL_MODE}\" = \"lets-encrypt\" ]; then\n          if [ \"${USE_LETSENCRYPT_ROUTE53}\" = \"yes\" ]; then\n            grep -q \"aws_credentials\" ${PILLARS_TOP} || echo \"    - aws_credentials\" >> ${PILLARS_TOP}\n          fi\n          grep -q \"letsencrypt\"     ${PILLARS_TOP} || echo \"    - letsencrypt\" >> ${PILLARS_TOP}\n          grep -q \"letsencrypt_${R}_configuration\" ${PILLARS_TOP} || echo \"    - letsencrypt_${R}_configuration\" >> ${PILLARS_TOP}\n\n          # As the pillar differ whether we use LE or custom certs, we need to do a final edition on them\n          # Special case for keepweb\n          if [ ${R} = \"keepweb\" ]; then\n            for kwsub in download collections; do\n              sed -i \"s/__CERT_REQUIRES__/cmd: create-initial-cert-${kwsub}.${DOMAIN}*/g;\n                      s#__CERT_PEM__#/etc/letsencrypt/live/${kwsub}.${DOMAIN}/fullchain.pem#g;\n                      s#__CERT_KEY__#/etc/letsencrypt/live/${kwsub}.${DOMAIN}/privkey.pem#g\" \\\n              ${P_DIR}/nginx_${kwsub}_configuration.sls\n            done\n          else\n            sed -i \"s/__CERT_REQUIRES__/cmd: create-initial-cert-${R}.${DOMAIN}*/g;\n                    s#__CERT_PEM__#/etc/letsencrypt/live/${R}.${DOMAIN}/fullchain.pem#g;\n                    s#__CERT_KEY__#/etc/letsencrypt/live/${R}.${DOMAIN}/privkey.pem#g\" \\\n            ${P_DIR}/nginx_${R}_configuration.sls\n          fi\n        else\n          grep -q \"ssl_key_encrypted\" ${PILLARS_TOP} || echo \"    - ssl_key_encrypted\" >> ${PILLARS_TOP}\n          # As the pillar differ whether we use LE or custom certs, we need to do a final edition on them\n          # Special case for keepweb\n          if [ ${R} = \"keepweb\" ]; then\n            for kwsub in download collections; do\n              sed -i \"s/__CERT_REQUIRES__/file: extra_custom_certs_${kwsub}_cert_file_copy/g;\n                      s#__CERT_PEM__#/etc/nginx/ssl/arvados-${kwsub}.pem#g;\n                      s#__CERT_KEY__#/etc/nginx/ssl/arvados-${kwsub}.key#g\" \\\n              ${P_DIR}/nginx_${kwsub}_configuration.sls\n              grep -q ${kwsub} ${P_DIR}/extra_custom_certs.sls || echo \"  - ${kwsub}\" >> ${P_DIR}/extra_custom_certs.sls\n            done\n          else\n            sed -i \"s/__CERT_REQUIRES__/file: extra_custom_certs_${R}_cert_file_copy/g;\n                    s#__CERT_PEM__#/etc/nginx/ssl/arvados-${R}.pem#g;\n                    s#__CERT_KEY__#/etc/nginx/ssl/arvados-${R}.key#g\" \\\n            ${P_DIR}/nginx_${R}_configuration.sls\n            grep -q ${R} ${P_DIR}/extra_custom_certs.sls || echo \"  - ${R}\" >> ${P_DIR}/extra_custom_certs.sls\n          fi\n        fi\n      ;;\n      \"shell\")\n        # States\n        echo \"    - extra.shell_sudo_passwordless\" >> ${STATES_TOP}\n        echo \"    - extra.shell_cron_add_login_sync\" >> ${STATES_TOP}\n        grep -q \"docker\" ${STATES_TOP}       || echo \"    - docker.software\" >> ${STATES_TOP}\n        grep -q \"arvados.${R}\" ${STATES_TOP} || echo \"    - arvados.${R}\" >> ${STATES_TOP}\n        # Pillars\n        grep -q \"docker\" ${PILLARS_TOP}       || echo \"    - docker\" >> ${PILLARS_TOP}\n      ;;\n      \"dispatcher\" | \"keepbalance\" | \"keepstore\")\n        # States\n        grep -q \"arvados.${R}\" ${STATES_TOP} || echo \"    - arvados.${R}\" >> ${STATES_TOP}\n        # Pillars\n        # ATM, no specific pillar needed\n      ;;\n      *)\n        echo \"Unknown role ${R}\"\n        exit 1\n      ;;\n    esac\n  done\nfi\n\nif [ \"${DUMP_CONFIG}\" = \"yes\" ]; then\n  # We won't run the rest of the script because we're just dumping the config\n  exit 0\nfi\n\n# Now run the install\nsalt-call --state-output=mixed --local state.apply -l ${LOG_LEVEL}\n\n# Finally, make sure that /etc/hosts is not overwritten on reboot\nif [ -d /etc/cloud/cloud.cfg.d ]; then\n  # TODO: will this work on CentOS?\n  sed -i 's/^manage_etc_hosts: true/#manage_etc_hosts: true/g' /etc/cloud/cloud.cfg.d/*\nfi\n\n# Leave a copy of the Arvados CA so the user can copy it where it's required\nif [ \"${SSL_MODE}\" = \"self-signed\" ]; then\n  echo \"Copying the Arvados CA certificate '${DOMAIN}-arvados-snakeoil-ca.crt' to the installer dir, so you can import it\"\n  if [ \"x${VAGRANT:-}\" = \"xyes\" ]; then\n    cp /etc/ssl/certs/arvados-snakeoil-ca.pem /vagrant/${DOMAIN}-arvados-snakeoil-ca.pem\n  else\n    cp /etc/ssl/certs/arvados-snakeoil-ca.pem ${SCRIPT_DIR}/${DOMAIN}-arvados-snakeoil-ca.crt\n  fi\nfi\n\nif [ \"x${VAGRANT:-}\" = \"xyes\" ]; then\n    # If running in a vagrant VM, also add default user to docker group\n    echo \"Adding the vagrant user to the docker group\"\n    usermod -a -G docker vagrant\nfi\n\n# Test that the installation finished correctly\nif [ \"x${TEST:-}\" = \"xyes\" ]; then\n  cd ${T_DIR}\n  ./run-test.sh\nfi\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/.gitignore",
    "content": "**/.terraform\n**/terraform.tfstate*\n**/.infracost\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/assumerolepolicy.json",
    "content": "{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n      {\n        \"Action\": \"sts:AssumeRole\",\n        \"Principal\": {\n          \"Service\": \"ec2.amazonaws.com\"\n        },\n        \"Effect\": \"Allow\",\n        \"Sid\": \"\"\n      }\n    ]\n}"
  },
  {
    "path": "tools/salt-install/terraform/aws/data-storage/.terraform.lock.hcl",
    "content": "# This file is maintained automatically by \"terraform init\".\n# Manual edits may be lost in future updates.\n\nprovider \"registry.terraform.io/hashicorp/aws\" {\n  version = \"4.38.0\"\n  hashes = [\n    \"h1:LympybKZJE3L0H12nMmDnFH1iexD9S2GqZbDMo4fuPI=\",\n    \"h1:bhDPZioOF9Uz9mavezCHfYbD5YJ3fEPsixLpcWgV/kU=\",\n    \"zh:0ae61458acf7acecf47f7a02e08da1f7adeee9532e053c0d80432f16197e4799\",\n    \"zh:1ece9bcef41ffc75e0955419d7f8b1708ab7ffe4518bc9a2afe3bc5c79a9e79b\",\n    \"zh:302065a7c3ae798345b92a465b650b025d9c4e9abc3e78421ecc69a17b8c3d6a\",\n    \"zh:52d61f6a3ed6726b821a78f1fb78df818cf24a4d2378cc16afded297b37d4b7b\",\n    \"zh:6c365ed0cae031acdbcca04560997589a94629269cb456d468cbe51a3a020386\",\n    \"zh:70987a51d782f3458f124efea320157a48453864c420421051c56d41e463a948\",\n    \"zh:8b5a5f30240c67e596a89ccd76aa81133e6ae253c8a06a932b8901ef2b4a7486\",\n    \"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425\",\n    \"zh:d672167515ece7c2db4663faf180dfb6cfc6dbf5e149f868d05c39bb54b9ca03\",\n    \"zh:df1bc9926674b2e1246c9ebffd8bf8c4e380f50910a7f0b3ded957e8768ae27a\",\n    \"zh:e304b6e2bd66e7992326aa0446152547eb97e8f77d00bc1a9096022ac37e5d71\",\n    \"zh:f033690f11446af1383ad74149f429fae19e2784af5e151a22f46965dff21b29\",\n  ]\n}\n\nprovider \"registry.terraform.io/hashicorp/external\" {\n  version = \"2.2.2\"\n  hashes = [\n    \"h1:VUkgcWvCliS0HO4kt7oEQhFD2gcx/59XpwMqxfCU1kE=\",\n    \"h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=\",\n    \"zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca\",\n    \"zh:10cf5c9b9524ca2e4302bf02368dc6aac29fb50aeaa6f7758cce9aa36ae87a28\",\n    \"zh:56a016ee871c8501acb3f2ee3b51592ad7c3871a1757b098838349b17762ba6b\",\n    \"zh:719d6ef39c50e4cffc67aa67d74d195adaf42afcf62beab132dafdb500347d39\",\n    \"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3\",\n    \"zh:7fbfc4d37435ac2f717b0316f872f558f608596b389b895fcb549f118462d327\",\n    \"zh:8ac71408204db606ce63fe8f9aeaf1ddc7751d57d586ec421e62d440c402e955\",\n    \"zh:a4cacdb06f114454b6ed0033add28006afa3f65a0ea7a43befe45fc82e6809fb\",\n    \"zh:bb5ce3132b52ae32b6cc005bc9f7627b95259b9ffe556de4dad60d47d47f21f0\",\n    \"zh:bb60d2976f125ffd232a7ccb4b3f81e7109578b23c9c6179f13a11d125dca82a\",\n    \"zh:f9540ecd2e056d6e71b9ea5f5a5cf8f63dd5c25394b9db831083a9d4ea99b372\",\n    \"zh:ffd998b55b8a64d4335a090b6956b4bf8855b290f7554dd38db3302de9c41809\",\n  ]\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/data-storage/data.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\ndata \"terraform_remote_state\" \"vpc\" {\n  backend = \"local\"\n  config = {\n    path = \"../vpc/terraform.tfstate\"\n  }\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/data-storage/locals.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nlocals {\n  region_name = data.terraform_remote_state.vpc.outputs.region_name\n  cluster_name = data.terraform_remote_state.vpc.outputs.cluster_name\n  custom_tags = data.terraform_remote_state.vpc.outputs.custom_tags\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/data-storage/main.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nterraform {\n  required_version = \"~> 1.3.0\"\n  required_providers {\n    aws = {\n      source = \"hashicorp/aws\"\n      version = \"~> 4.38.0\"\n    }\n  }\n}\n\nprovider \"aws\" {\n  region = local.region_name\n  default_tags {\n    tags = merge(local.custom_tags, {\n      Arvados = local.cluster_name\n      Terraform = true\n    })\n  }\n}\n\n# S3 bucket and access resources for Keep blocks\nresource \"aws_s3_bucket\" \"keep_volume\" {\n  bucket = \"${local.cluster_name}-nyw5e-000000000000000-volume\"\n}\n\nresource \"aws_iam_role\" \"keepstore_iam_role\" {\n  name = \"${local.cluster_name}-keepstore-00-iam-role\"\n  assume_role_policy = \"${file(\"../assumerolepolicy.json\")}\"\n}\n\nresource \"aws_iam_role\" \"compute_node_iam_role\" {\n  name = \"${local.cluster_name}-compute-node-00-iam-role\"\n  assume_role_policy = \"${file(\"../assumerolepolicy.json\")}\"\n}\n\nresource \"aws_iam_policy\" \"s3_full_access\" {\n  name = \"${local.cluster_name}_s3_full_access\"\n  policy = jsonencode({\n    Version: \"2012-10-17\",\n    Id: \"arvados-keepstore policy\",\n    Statement: [{\n      Effect: \"Allow\",\n      Action: [\n        \"s3:*\",\n      ],\n      Resource: [\n        \"arn:aws:s3:::${local.cluster_name}-nyw5e-000000000000000-volume\",\n        \"arn:aws:s3:::${local.cluster_name}-nyw5e-000000000000000-volume/*\"\n      ]\n    }]\n  })\n}\n\nresource \"aws_iam_policy_attachment\" \"s3_full_access_policy_attachment\" {\n  name = \"${local.cluster_name}_s3_full_access_attachment\"\n  roles = [\n    aws_iam_role.keepstore_iam_role.name,\n    aws_iam_role.compute_node_iam_role.name,\n  ]\n  policy_arn = aws_iam_policy.s3_full_access.arn\n}\n\n# S3 bucket and access resources for Loki\nresource \"aws_s3_bucket\" \"loki_storage\" {\n  bucket = \"${local.cluster_name}-loki-object-storage\"\n}\n\nresource \"aws_iam_policy\" \"loki_s3_full_access\" {\n  name = \"${local.cluster_name}_loki_s3_full_access\"\n  policy = jsonencode({\n    Version: \"2012-10-17\",\n    Id: \"Loki S3 storage policy\",\n    Statement: [{\n      Effect: \"Allow\",\n      Action: [\n        \"s3:*\",\n      ],\n      Resource: [\n        \"arn:aws:s3:::${local.cluster_name}-loki-object-storage\",\n        \"arn:aws:s3:::${local.cluster_name}-loki-object-storage/*\"\n      ]\n    }]\n  })\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/data-storage/outputs.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\noutput \"keepstore_iam_role_name\" {\n  value = aws_iam_role.keepstore_iam_role.name\n}\n\noutput \"compute_node_iam_role_name\" {\n  value = aws_iam_role.compute_node_iam_role.name\n}\n\noutput \"use_external_db\" {\n  value = var.use_external_db\n}\n\noutput \"loki_iam_policy_arn\" {\n  value = aws_iam_policy.loki_s3_full_access.arn\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/data-storage/terraform.tfvars",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# Set to true if the database server won't be running in any service instance.\n# Default: false\n# use_external_db = true\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/data-storage/variables.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nvariable \"use_external_db\" {\n  description = \"Enable this if the database service won't be installed on these instances\"\n  type = bool\n  default = false\n}\n\nvariable \"keep_cluster_data\" {\n  description = \"Avoids state (database & keep blocks) to be destroyed. Needed for production clusters\"\n  type = bool\n  default = false\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/services/.terraform.lock.hcl",
    "content": "# This file is maintained automatically by \"terraform init\".\n# Manual edits may be lost in future updates.\n\nprovider \"registry.terraform.io/hashicorp/aws\" {\n  version = \"4.38.0\"\n  hashes = [\n    \"h1:LympybKZJE3L0H12nMmDnFH1iexD9S2GqZbDMo4fuPI=\",\n    \"h1:bhDPZioOF9Uz9mavezCHfYbD5YJ3fEPsixLpcWgV/kU=\",\n    \"zh:0ae61458acf7acecf47f7a02e08da1f7adeee9532e053c0d80432f16197e4799\",\n    \"zh:1ece9bcef41ffc75e0955419d7f8b1708ab7ffe4518bc9a2afe3bc5c79a9e79b\",\n    \"zh:302065a7c3ae798345b92a465b650b025d9c4e9abc3e78421ecc69a17b8c3d6a\",\n    \"zh:52d61f6a3ed6726b821a78f1fb78df818cf24a4d2378cc16afded297b37d4b7b\",\n    \"zh:6c365ed0cae031acdbcca04560997589a94629269cb456d468cbe51a3a020386\",\n    \"zh:70987a51d782f3458f124efea320157a48453864c420421051c56d41e463a948\",\n    \"zh:8b5a5f30240c67e596a89ccd76aa81133e6ae253c8a06a932b8901ef2b4a7486\",\n    \"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425\",\n    \"zh:d672167515ece7c2db4663faf180dfb6cfc6dbf5e149f868d05c39bb54b9ca03\",\n    \"zh:df1bc9926674b2e1246c9ebffd8bf8c4e380f50910a7f0b3ded957e8768ae27a\",\n    \"zh:e304b6e2bd66e7992326aa0446152547eb97e8f77d00bc1a9096022ac37e5d71\",\n    \"zh:f033690f11446af1383ad74149f429fae19e2784af5e151a22f46965dff21b29\",\n  ]\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/services/data.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\ndata \"terraform_remote_state\" \"vpc\" {\n  backend = \"local\"\n  config = {\n    path = \"../vpc/terraform.tfstate\"\n  }\n}\n\ndata \"terraform_remote_state\" \"data-storage\" {\n  backend = \"local\"\n  config = {\n    path = \"../data-storage/terraform.tfstate\"\n  }\n}\n\n# https://wiki.debian.org/Cloud/AmazonEC2Image/Bookworm\ndata \"aws_ami\" \"debian-12\" {\n  most_recent = true\n  owners = [\"136693071363\"]\n  filter {\n    name   = \"name\"\n    values = [\"debian-12-amd64-*\"]\n  }\n  filter {\n    name   = \"virtualization-type\"\n    values = [\"hvm\"]\n  }\n}\n\ndata \"aws_vpc\" \"arvados_vpc\" {\n  id = data.terraform_remote_state.vpc.outputs.arvados_vpc_id\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/services/locals.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nlocals {\n  region_name = data.terraform_remote_state.vpc.outputs.region_name\n  cluster_name = data.terraform_remote_state.vpc.outputs.cluster_name\n  use_external_db = data.terraform_remote_state.data-storage.outputs.use_external_db\n  private_only = data.terraform_remote_state.vpc.outputs.private_only\n  public_ip = data.terraform_remote_state.vpc.outputs.public_ip\n  private_ip = data.terraform_remote_state.vpc.outputs.private_ip\n  pubkey_path = pathexpand(var.pubkey_path)\n  public_hosts = data.terraform_remote_state.vpc.outputs.public_hosts\n  private_hosts = data.terraform_remote_state.vpc.outputs.private_hosts\n  user_facing_hosts = data.terraform_remote_state.vpc.outputs.user_facing_hosts\n  internal_service_hosts = data.terraform_remote_state.vpc.outputs.internal_service_hosts\n  ssl_password_secret_name = \"${local.cluster_name}-${var.ssl_password_secret_name_suffix}\"\n  instance_ami_id = var.instance_ami != \"\" ? var.instance_ami : data.aws_ami.debian-12.image_id\n  custom_tags = data.terraform_remote_state.vpc.outputs.custom_tags\n  compute_node_iam_role_name = data.terraform_remote_state.data-storage.outputs.compute_node_iam_role_name\n  instance_profile = {\n    default = aws_iam_instance_profile.default_instance_profile\n    workbench = aws_iam_instance_profile.dispatcher_instance_profile\n    keep0 = aws_iam_instance_profile.keepstore_instance_profile\n  }\n  private_subnet_id = data.terraform_remote_state.vpc.outputs.private_subnet_id\n  public_subnet_id = data.terraform_remote_state.vpc.outputs.public_subnet_id\n  additional_rds_subnet_id = data.terraform_remote_state.vpc.outputs.additional_rds_subnet_id\n  arvados_sg_id = data.terraform_remote_state.vpc.outputs.arvados_sg_id\n  eip_id = data.terraform_remote_state.vpc.outputs.eip_id\n  keepstore_iam_role_name = data.terraform_remote_state.data-storage.outputs.keepstore_iam_role_name\n  use_rds = (var.use_rds && data.terraform_remote_state.vpc.outputs.use_rds)\n  rds_username = var.rds_username != \"\" ? var.rds_username : \"${local.cluster_name}_arvados\"\n  rds_password = var.rds_password != \"\" ? var.rds_password : one(random_string.default_rds_password[*].result)\n  rds_allocated_storage = var.rds_allocated_storage\n  rds_max_allocated_storage = max(var.rds_max_allocated_storage, var.rds_allocated_storage)\n  rds_instance_type = var.rds_instance_type\n  rds_backup_retention_period = var.rds_backup_retention_period\n  rds_backup_before_deletion = var.rds_backup_before_deletion\n  rds_final_backup_name = var.rds_final_backup_name != \"\" ? var.rds_final_backup_name : \"arvados-${local.cluster_name}-db-final-snapshot\"\n  rds_postgresql_version = var.rds_postgresql_version\n  loki_iam_policy_arn = data.terraform_remote_state.data-storage.outputs.loki_iam_policy_arn\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/services/main.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nterraform {\n  required_version = \"~> 1.3.0\"\n  required_providers {\n    aws = {\n      source = \"hashicorp/aws\"\n      version = \"~> 4.38.0\"\n    }\n  }\n}\n\nprovider \"aws\" {\n  region = local.region_name\n  default_tags {\n    tags = merge(local.custom_tags, {\n      Arvados = local.cluster_name\n      Terraform = true\n    })\n  }\n}\n\nprovider \"random\" {}\n\nresource \"random_string\" \"default_rds_password\" {\n  count = (local.use_rds && var.rds_password == \"\") ? 1 : 0\n  length  = 32\n  special = false\n}\n\nresource \"aws_iam_instance_profile\" \"keepstore_instance_profile\" {\n  name = \"${local.cluster_name}-keepstore-00-iam-role\"\n  role = data.terraform_remote_state.data-storage.outputs.keepstore_iam_role_name\n}\n\nresource \"aws_iam_instance_profile\" \"compute_node_instance_profile\" {\n  name = \"${local.cluster_name}-compute-node-00-iam-role\"\n  role = local.compute_node_iam_role_name\n}\n\nresource \"aws_iam_instance_profile\" \"dispatcher_instance_profile\" {\n  name = \"${local.cluster_name}_dispatcher_instance_profile\"\n  role = aws_iam_role.cloud_dispatcher_iam_role.name\n}\n\nresource \"aws_secretsmanager_secret\" \"ssl_password_secret\" {\n  name = local.ssl_password_secret_name\n  recovery_window_in_days = 0\n}\n\nresource \"aws_iam_instance_profile\" \"default_instance_profile\" {\n  name = \"${local.cluster_name}_default_instance_profile\"\n  role = aws_iam_role.default_iam_role.name\n}\n\nresource \"aws_instance\" \"arvados_service\" {\n  for_each = toset(concat(local.public_hosts, local.private_hosts))\n  ami = local.instance_ami_id\n  instance_type = try(var.instance_type[each.value], var.instance_type.default)\n  user_data = templatefile(\"user_data.sh\", {\n    \"hostname\": each.value,\n    \"deploy_user\": var.deploy_user,\n    \"ssh_pubkey\": file(local.pubkey_path)\n  })\n  private_ip = local.private_ip[each.value]\n  subnet_id = contains(local.user_facing_hosts, each.value) ? local.public_subnet_id : local.private_subnet_id\n  vpc_security_group_ids = [ local.arvados_sg_id ]\n  iam_instance_profile = try(local.instance_profile[each.value], local.instance_profile.default).name\n  tags = {\n    Name = \"${local.cluster_name}_arvados_service_${each.value}\"\n  }\n  root_block_device {\n    volume_type = \"gp3\"\n    volume_size = try(var.instance_volume_size[each.value], var.instance_volume_size.default)\n  }\n  metadata_options {\n    # Sets IMDSv2 to required. Default is \"optional\".\n    http_tokens = \"required\"\n    http_endpoint = \"enabled\"\n  }\n  lifecycle {\n    ignore_changes = [\n      # Avoids recreating the instance when the latest AMI changes.\n      # Use 'terraform taint' or 'terraform apply -replace' to force\n      # an AMI change.\n      ami,\n    ]\n  }\n}\n\nresource \"aws_db_subnet_group\" \"arvados_db_subnet_group\" {\n  count = local.use_rds ? 1 : 0\n  name       = \"${local.cluster_name}_db_subnet_group\"\n  subnet_ids = [local.private_subnet_id, local.additional_rds_subnet_id]\n}\n\nresource \"aws_db_instance\" \"postgresql_service\" {\n  count = local.use_rds ? 1 : 0\n  allocated_storage = local.rds_allocated_storage\n  max_allocated_storage = local.rds_max_allocated_storage\n  engine = \"postgres\"\n  engine_version = local.rds_postgresql_version\n  instance_class = local.rds_instance_type\n  db_name = \"${local.cluster_name}_arvados\"\n  username = local.rds_username\n  password = local.rds_password\n  skip_final_snapshot  = !local.rds_backup_before_deletion\n  final_snapshot_identifier = local.rds_final_backup_name\n\n  vpc_security_group_ids = [local.arvados_sg_id]\n  db_subnet_group_name = aws_db_subnet_group.arvados_db_subnet_group[0].name\n\n  backup_retention_period = local.rds_backup_retention_period\n  publicly_accessible = false\n  storage_encrypted = true\n  multi_az = false\n\n  lifecycle {\n    ignore_changes = [\n      username,\n    ]\n  }\n\n  tags = {\n    Name = \"${local.cluster_name}_postgresql_service\"\n  }\n}\n\nresource \"aws_iam_policy\" \"compute_node_ebs_autoscaler\" {\n  name = \"${local.cluster_name}_compute_node_ebs_autoscaler\"\n  policy = jsonencode({\n    Version: \"2012-10-17\",\n    Id: \"compute-node EBS Autoscaler policy\",\n    Statement: [{\n      Effect: \"Allow\",\n      Action: [\n          \"ec2:AttachVolume\",\n          \"ec2:DescribeVolumeStatus\",\n          \"ec2:DescribeVolumes\",\n          \"ec2:DescribeTags\",\n          \"ec2:ModifyInstanceAttribute\",\n          \"ec2:DescribeVolumeAttribute\",\n          \"ec2:CreateVolume\",\n          \"ec2:DeleteVolume\",\n          \"ec2:CreateTags\"\n      ],\n      Resource: \"*\"\n    }]\n  })\n}\n\nresource \"aws_iam_policy_attachment\" \"compute_node_ebs_autoscaler_attachment\" {\n  name = \"${local.cluster_name}_compute_node_ebs_autoscaler_attachment\"\n  roles = [ local.compute_node_iam_role_name ]\n  policy_arn = aws_iam_policy.compute_node_ebs_autoscaler.arn\n}\n\n\nresource \"aws_iam_policy\" \"cmk_access\" {\n  count = var.cmk_arn == \"\" ? 0 : 1\n  name = \"${local.cluster_name}_cmk_access\"\n  policy = jsonencode({\n    Version: \"2012-10-17\",\n    Statement: [{\n      Effect: \"Allow\",\n      Action: [\n        \"kms:Encrypt\",\n        \"kms:Decrypt\",\n        \"kms:DescribeKey\",\n        \"kms:GenerateDataKey*\"\n      ],\n      Resource: [\n        var.cmk_arn\n      ]\n    },\n    {\n      Effect: \"Allow\",\n      Action: \"kms:CreateGrant\",\n      Resource: [\n        var.cmk_arn\n      ],\n      Condition: {\n        Bool: {\n          \"kms:GrantIsForAWSResource\": true\n        }\n      }\n    }]\n  })\n}\n\nresource \"aws_iam_policy_attachment\" \"compute_node_cmk_access_attachment\" {\n  count = var.cmk_arn == \"\" ? 0 : 1\n  name = \"${local.cluster_name}_compute_node_cmk_access_attachment\"\n  roles = [ local.compute_node_iam_role_name ]\n  policy_arn = aws_iam_policy.cmk_access[0].arn\n}\n\nresource \"aws_iam_policy_attachment\" \"dispatcher_cmk_access_attachment\" {\n  count = var.cmk_arn == \"\" ? 0 : 1\n  name = \"${local.cluster_name}_dispatcher_cmk_access_attachment\"\n  roles = [ aws_iam_role.cloud_dispatcher_iam_role.name ]\n  policy_arn = aws_iam_policy.cmk_access[0].arn\n}\n\nresource \"aws_iam_policy\" \"cloud_dispatcher_ec2_access\" {\n  name = \"${local.cluster_name}_cloud_dispatcher_ec2_access\"\n  policy = jsonencode({\n    Version: \"2012-10-17\",\n    Id: \"arvados-dispatch-cloud policy\",\n    Statement: [{\n      Effect: \"Allow\",\n      Action: [\n        \"ec2:DescribeKeyPairs\",\n        \"ec2:ImportKeyPair\",\n        \"ec2:RunInstances\",\n        \"ec2:DescribeInstances\",\n        \"ec2:CreateTags\",\n        \"ec2:TerminateInstances\"\n      ],\n      Resource: \"*\"\n    },\n    {\n      Effect: \"Allow\",\n      Action: [\n        \"iam:PassRole\",\n      ],\n      Resource: \"arn:aws:iam::*:role/${aws_iam_instance_profile.compute_node_instance_profile.name}\"\n    }]\n  })\n}\n\nresource \"aws_iam_role\" \"cloud_dispatcher_iam_role\" {\n  name = \"${local.cluster_name}-dispatcher-00-iam-role\"\n  assume_role_policy = \"${file(\"../assumerolepolicy.json\")}\"\n}\n\nresource \"aws_iam_policy_attachment\" \"cloud_dispatcher_ec2_access_attachment\" {\n  name = \"${local.cluster_name}_cloud_dispatcher_ec2_access_attachment\"\n  roles = [ aws_iam_role.cloud_dispatcher_iam_role.name ]\n  policy_arn = aws_iam_policy.cloud_dispatcher_ec2_access.arn\n}\n\nresource \"aws_iam_policy_attachment\" \"loki_s3_full_access_attachment\" {\n  name = \"${local.cluster_name}_cloud_dispatcher_ec2_access_attachment\"\n  roles = [ aws_iam_role.cloud_dispatcher_iam_role.name ]\n  policy_arn = local.loki_iam_policy_arn\n}\n\nresource \"aws_eip_association\" \"eip_assoc\" {\n  for_each = local.private_only ? [] : toset(local.public_hosts)\n  instance_id = aws_instance.arvados_service[each.value].id\n  allocation_id = local.eip_id[each.value]\n}\n\nresource \"aws_iam_role\" \"default_iam_role\" {\n  name = \"${local.cluster_name}-default-iam-role\"\n  assume_role_policy = \"${file(\"../assumerolepolicy.json\")}\"\n}\n\nresource \"aws_iam_policy\" \"ssl_privkey_password_access\" {\n  name = \"${local.cluster_name}_ssl_privkey_password_access\"\n  policy = jsonencode({\n    Version: \"2012-10-17\",\n    Statement: [{\n      Effect: \"Allow\",\n      Action: \"secretsmanager:GetSecretValue\",\n      Resource: \"${aws_secretsmanager_secret.ssl_password_secret.arn}\"\n    }]\n  })\n}\n\n# Every service node needs access to the SSL privkey password secret for\n# nginx to be able to use it.\nresource \"aws_iam_policy_attachment\" \"ssl_privkey_password_access_attachment\" {\n  name = \"${local.cluster_name}_ssl_privkey_password_access_attachment\"\n  roles = [\n    aws_iam_role.cloud_dispatcher_iam_role.name,\n    aws_iam_role.default_iam_role.name,\n    local.keepstore_iam_role_name,\n  ]\n  policy_arn = aws_iam_policy.ssl_privkey_password_access.arn\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/services/outputs.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\noutput \"vpc_id\" {\n  value = data.terraform_remote_state.vpc.outputs.arvados_vpc_id\n}\noutput \"cluster_int_cidr\" {\n  value = data.aws_vpc.arvados_vpc.cidr_block\n}\noutput \"arvados_subnet_id\" {\n  value = data.terraform_remote_state.vpc.outputs.public_subnet_id\n}\noutput \"compute_subnet_id\" {\n  value = data.terraform_remote_state.vpc.outputs.private_subnet_id\n}\n\noutput \"arvados_sg_id\" {\n  value = data.terraform_remote_state.vpc.outputs.arvados_sg_id\n}\n\noutput \"public_ip\" {\n  value = local.public_ip\n}\n\noutput \"private_ip\" {\n  value = local.private_ip\n}\n\noutput \"route53_dns_ns\" {\n  value = data.terraform_remote_state.vpc.outputs.route53_dns_ns\n}\n\noutput \"letsencrypt_iam_access_key_id\" {\n  value = data.terraform_remote_state.vpc.outputs.letsencrypt_iam_access_key_id\n}\n\noutput \"letsencrypt_iam_secret_access_key\" {\n  value = data.terraform_remote_state.vpc.outputs.letsencrypt_iam_secret_access_key\n  sensitive = true\n}\n\noutput \"cluster_name\" {\n  value = data.terraform_remote_state.vpc.outputs.cluster_name\n}\n\noutput \"domain_name\" {\n  value = data.terraform_remote_state.vpc.outputs.domain_name\n}\n\n# Debian AMI's default user\noutput \"deploy_user\" {\n  value = var.deploy_user\n}\n\noutput \"region_name\" {\n  value = data.terraform_remote_state.vpc.outputs.region_name\n}\n\noutput \"ssl_password_secret_name\" {\n  value = aws_secretsmanager_secret.ssl_password_secret.name\n}\n\noutput \"database_address\" {\n  value = one(aws_db_instance.postgresql_service[*].address)\n}\n\noutput \"database_name\" {\n  value = one(aws_db_instance.postgresql_service[*].db_name)\n}\n\noutput \"database_username\" {\n  value = one(aws_db_instance.postgresql_service[*].username)\n}\n\noutput \"database_password\" {\n  value = one(aws_db_instance.postgresql_service[*].password)\n  sensitive = true\n}\n\noutput \"database_version\" {\n  value = one(aws_db_instance.postgresql_service[*].engine_version_actual)\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/services/terraform.tfvars",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# SSH public key path to use by the installer script. It will be installed in\n# the home directory of the 'deploy_user'. Default: ~/.ssh/id_rsa.pub\n# pubkey_path = \"/path/to/pub.key\"\n\n# Set the instance type for your nodes. Default: m5a.large\n# instance_type = {\n#   default = \"m5a.xlarge\"\n#   controller = \"c5a.4xlarge\"\n# }\n\n# Set the volume size (in GiB) per service node.\n# Default: 100 for controller, 20 the rest.\n# NOTE: The service node will need to be rebooted after increasing its volume's\n# size.\n# instance_volume_size = {\n#   default = 20\n#   controller = 300\n# }\n\n# Use an RDS instance for database. For this to work, make sure to also set\n# 'use_rds' to true in '../vpc/terraform.tfvars'.\n# use_rds = true\n#\n# Provide custom values if needed.\n# rds_username = \"\"\n# rds_password = \"\"\n# rds_instance_type = \"db.m5.xlarge\"\n# rds_postgresql_version = \"16.3\"\n# rds_allocated_storage = 200\n# rds_max_allocated_storage = 1000\n# rds_backup_retention_period = 30\n# rds_backup_before_deletion = false\n# rds_final_backup_name = \"\"\n\n# AWS secret's name which holds the SSL certificate private key's password.\n# Default: \"arvados-ssl-privkey-password\"\n# ssl_password_secret_name_suffix = \"some-name-suffix\"\n\n# User for software deployment. Depends on the AMI's distro.\n# Default: \"admin\"\n# deploy_user = \"ubuntu\"\n\n# Instance AMI to use for service nodes. Default: latest from Debian 12\n# instance_ami = \"ami-abcdef1234567890\"\n\n# Customer-managed Key to use for volume encryption.\n# cmk_arn = \"arn:aws:kms:....\"\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/services/user_data.sh",
    "content": "#!/bin/sh\n\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nhostname ${hostname}\necho ${hostname} > /etc/hostname\n\n# Retry just in case internet access is not yet ready\nwhile true; do\n  apt-get -o Acquire::ForceIPv4=true update\n  ERR=$?\n  if [ \"$${ERR}\" = \"0\" ]; then\n    break\n  fi\ndone\n\napt-get -o Acquire::ForceIPv4=true install -y git curl\n\nSSH_DIR=\"/home/${deploy_user}/.ssh\"\nif [ ! -d \"$${SSH_DIR}\" ]; then\n  install -d -o ${deploy_user} -g ${deploy_user} -m 700 $${SSH_DIR}\nfi\necho \"${ssh_pubkey}\" | install -o ${deploy_user} -g ${deploy_user} -m 600 /dev/stdin $${SSH_DIR}/authorized_keys\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/services/variables.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nvariable \"instance_type\" {\n  description = \"The EC2 instance types to use per service node\"\n  type = map(string)\n  default = {\n    default = \"m5a.large\"\n  }\n}\n\nvariable \"instance_volume_size\" {\n  description = \"EC2 volume size in GiB per service node\"\n  type = map(number)\n  default = {\n    default = 20\n    controller = 100\n  }\n}\n\nvariable \"pubkey_path\" {\n  description = \"Path to the file containing the public SSH key\"\n  type = string\n  default = \"~/.ssh/id_rsa.pub\"\n}\n\nvariable \"deploy_user\" {\n  description = \"User for deploying the software\"\n  type = string\n  default = \"admin\"\n}\n\nvariable \"ssl_password_secret_name_suffix\" {\n  description = \"Name suffix for the SSL certificate's private key password AWS secret.\"\n  type = string\n  default = \"arvados-ssl-privkey-password\"\n}\n\nvariable \"instance_ami\" {\n  description = \"The EC2 instance AMI to use on the nodes\"\n  type = string\n  default = \"\"\n}\n\nvariable \"use_rds\" {\n  description = \"Enable to create an RDS instance as the cluster's database service\"\n  type = bool\n  default = false\n}\n\nvariable \"rds_username\" {\n  description = \"RDS instance's username. Default: <cluster_name>_arvados\"\n  type = string\n  default = \"\"\n}\n\nvariable \"rds_password\" {\n  description = \"RDS instance's password. Default: randomly-generated 32 chars\"\n  type = string\n  default = \"\"\n}\n\nvariable \"rds_instance_type\" {\n  description = \"RDS instance type\"\n  type = string\n  default = \"db.m5.large\"\n}\n\nvariable \"rds_allocated_storage\" {\n  description = \"RDS initial storage size (GiB)\"\n  type = number\n  default = 60\n}\n\nvariable \"rds_max_allocated_storage\" {\n  description = \"RDS maximum storage size that will autoscale to (GiB)\"\n  type = number\n  default = 300\n}\n\nvariable \"rds_backup_retention_period\" {\n  description = \"RDS Backup retention (days). Set to 0 to disable\"\n  type = number\n  default = 7\n  validation {\n    condition = (var.rds_backup_retention_period <= 35)\n    error_message = \"rds_backup_retention_period should be less than 36 days\"\n  }\n}\n\nvariable \"rds_backup_before_deletion\" {\n  description = \"Create a snapshot before deleting the RDS instance\"\n  type = bool\n  default = true\n}\n\nvariable \"rds_final_backup_name\" {\n  description = \"Snapshot name to use for the RDS final snapshot\"\n  type = string\n  default = \"\"\n}\n\nvariable \"rds_postgresql_version\" {\n  description = \"RDS PostgreSQL version\"\n  type = string\n  default = \"15\"\n}\n\nvariable \"cmk_arn\" {\n  description = \"Customer-managed Key ARN to use for volume encryption.\"\n  type = string\n  default = \"\"\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/vpc/.terraform.lock.hcl",
    "content": "# This file is maintained automatically by \"terraform init\".\n# Manual edits may be lost in future updates.\n\nprovider \"registry.terraform.io/hashicorp/aws\" {\n  version = \"4.38.0\"\n  hashes = [\n    \"h1:LympybKZJE3L0H12nMmDnFH1iexD9S2GqZbDMo4fuPI=\",\n    \"h1:bhDPZioOF9Uz9mavezCHfYbD5YJ3fEPsixLpcWgV/kU=\",\n    \"zh:0ae61458acf7acecf47f7a02e08da1f7adeee9532e053c0d80432f16197e4799\",\n    \"zh:1ece9bcef41ffc75e0955419d7f8b1708ab7ffe4518bc9a2afe3bc5c79a9e79b\",\n    \"zh:302065a7c3ae798345b92a465b650b025d9c4e9abc3e78421ecc69a17b8c3d6a\",\n    \"zh:52d61f6a3ed6726b821a78f1fb78df818cf24a4d2378cc16afded297b37d4b7b\",\n    \"zh:6c365ed0cae031acdbcca04560997589a94629269cb456d468cbe51a3a020386\",\n    \"zh:70987a51d782f3458f124efea320157a48453864c420421051c56d41e463a948\",\n    \"zh:8b5a5f30240c67e596a89ccd76aa81133e6ae253c8a06a932b8901ef2b4a7486\",\n    \"zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425\",\n    \"zh:d672167515ece7c2db4663faf180dfb6cfc6dbf5e149f868d05c39bb54b9ca03\",\n    \"zh:df1bc9926674b2e1246c9ebffd8bf8c4e380f50910a7f0b3ded957e8768ae27a\",\n    \"zh:e304b6e2bd66e7992326aa0446152547eb97e8f77d00bc1a9096022ac37e5d71\",\n    \"zh:f033690f11446af1383ad74149f429fae19e2784af5e151a22f46965dff21b29\",\n  ]\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/vpc/data.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\ndata \"aws_availability_zones\" \"available\" {}"
  },
  {
    "path": "tools/salt-install/terraform/aws/vpc/locals.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nlocals {\n  allowed_ports = {\n    http: \"80\",\n    https: \"443\",\n    ssh: \"22\",\n  }\n  availability_zone = data.aws_availability_zones.available.names[0]\n  route53_public_zone = one(aws_route53_zone.public_zone[*])\n  iam_user_letsencrypt = one(aws_iam_user.letsencrypt[*])\n  iam_access_key_letsencrypt = one(aws_iam_access_key.letsencrypt[*])\n\n  arvados_vpc_id = one(aws_vpc.arvados_vpc[*]) != null ? one(aws_vpc.arvados_vpc[*]).id : var.vpc_id\n  arvados_vpc_cidr_block = one(aws_vpc.arvados_vpc[*])\n\n  arvados_sg_id = one(aws_security_group.arvados_sg[*]) != null ? one(aws_security_group.arvados_sg[*]).id : var.sg_id\n\n  private_subnet_id = one(aws_subnet.private_subnet[*]) != null ? one(aws_subnet.private_subnet[*]).id : var.private_subnet_id\n  public_subnet_id = one(aws_subnet.public_subnet[*]) != null ? one(aws_subnet.public_subnet[*]).id : var.public_subnet_id\n  additional_rds_subnet_id = one(aws_subnet.additional_rds_subnet[*]) != null ? one(aws_subnet.additional_rds_subnet[*]).id : var.additional_rds_subnet_id\n\n  public_hosts = var.private_only ? [] : var.user_facing_hosts\n  private_hosts = concat(\n    var.internal_service_hosts,\n    var.private_only ? var.user_facing_hosts : []\n  )\n  public_ip = {\n    for k, v in aws_eip.arvados_eip: k => v.public_ip\n  }\n  private_ip = var.private_ip\n  cname_by_host = flatten([\n    for host, aliases in var.dns_aliases : [\n      for alias in aliases : {\n        record = alias\n        cname = host\n      }\n    ]\n  ])\n  use_rds = var.use_rds\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/vpc/main.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nterraform {\n  required_version = \"~> 1.3.0\"\n  required_providers {\n    aws = {\n      source = \"hashicorp/aws\"\n      version = \"~> 4.38.0\"\n    }\n  }\n}\n\nprovider \"aws\" {\n  region = var.region_name\n  default_tags {\n    tags = merge(var.custom_tags, {\n      Arvados = var.cluster_name\n      Terraform = true\n    })\n  }\n}\n\nresource \"aws_vpc\" \"arvados_vpc\" {\n  count = var.vpc_id == \"\" ? 1 : 0\n  cidr_block = \"10.1.0.0/16\"\n  enable_dns_hostnames = true\n  enable_dns_support = true\n\n  lifecycle {\n    precondition {\n      condition = (var.sg_id == \"\")\n      error_message = \"vpc_id should be set if sg_id is also set\"\n    }\n  }\n}\nresource \"aws_subnet\" \"public_subnet\" {\n  count = var.public_subnet_id == \"\" ? 1 : 0\n  vpc_id = local.arvados_vpc_id\n  availability_zone = local.availability_zone\n  cidr_block = \"10.1.1.0/24\"\n\n  lifecycle {\n    precondition {\n      condition = (var.vpc_id == \"\")\n      error_message = \"public_subnet_id should be set if vpc_id is also set\"\n    }\n  }\n}\nresource \"aws_subnet\" \"private_subnet\" {\n  count = var.private_subnet_id == \"\" ? 1 : 0\n  vpc_id = local.arvados_vpc_id\n  availability_zone = local.availability_zone\n  cidr_block = \"10.1.2.0/24\"\n\n  lifecycle {\n    precondition {\n      condition = (var.vpc_id == \"\")\n      error_message = \"private_subnet_id should be set if vpc_id is also set\"\n    }\n  }\n}\n\n#\n# Additional subnet on a different AZ is required if RDS is enabled\n#\nresource \"aws_subnet\" \"additional_rds_subnet\" {\n  count = (var.additional_rds_subnet_id == \"\" && local.use_rds) ? 1 : 0\n  vpc_id = local.arvados_vpc_id\n  availability_zone = data.aws_availability_zones.available.names[1]\n  cidr_block = \"10.1.3.0/24\"\n\n  lifecycle {\n    precondition {\n      condition = (var.vpc_id == \"\")\n      error_message = \"additional_rds_subnet_id should be set if vpc_id is also set\"\n    }\n  }\n}\n\n#\n# VPC S3 access\n#\nresource \"aws_vpc_endpoint\" \"s3\" {\n  count = var.vpc_id == \"\" ? 1 : 0\n  vpc_id = local.arvados_vpc_id\n  service_name = \"com.amazonaws.${var.region_name}.s3\"\n}\nresource \"aws_vpc_endpoint_route_table_association\" \"compute_s3_route\" {\n  count = var.private_subnet_id == \"\" ? 1 : 0\n  vpc_endpoint_id = aws_vpc_endpoint.s3[0].id\n  route_table_id = aws_route_table.private_subnet_rt[0].id\n}\n\n#\n# Internet access for Public IP instances\n#\nresource \"aws_internet_gateway\" \"internet_gw\" {\n  count = var.vpc_id == \"\" ? 1 : 0\n  vpc_id = local.arvados_vpc_id\n}\nresource \"aws_eip\" \"arvados_eip\" {\n  for_each = toset(local.public_hosts)\n  depends_on = [\n    aws_internet_gateway.internet_gw\n  ]\n}\nresource \"aws_route_table\" \"public_subnet_rt\" {\n  count = var.public_subnet_id == \"\" ? 1 : 0\n  vpc_id = local.arvados_vpc_id\n  route {\n    cidr_block = \"0.0.0.0/0\"\n    gateway_id = aws_internet_gateway.internet_gw[0].id\n  }\n}\nresource \"aws_route_table_association\" \"public_subnet_assoc\" {\n  count = var.public_subnet_id == \"\" ? 1 : 0\n  subnet_id = aws_subnet.public_subnet[0].id\n  route_table_id = aws_route_table.public_subnet_rt[0].id\n}\n\n#\n# Internet access for Private IP instances\n#\nresource \"aws_eip\" \"nat_gw_eip\" {\n  count = var.private_subnet_id == \"\" ? 1 : 0\n  depends_on = [\n    aws_internet_gateway.internet_gw[0]\n  ]\n}\nresource \"aws_nat_gateway\" \"nat_gw\" {\n  count = var.private_subnet_id == \"\" ? 1 : 0\n  # A NAT gateway should be placed on a subnet with an internet gateway\n  subnet_id = aws_subnet.public_subnet[0].id\n  allocation_id = aws_eip.nat_gw_eip[0].id\n}\nresource \"aws_route_table\" \"private_subnet_rt\" {\n  count = var.private_subnet_id == \"\" ? 1 : 0\n  vpc_id = local.arvados_vpc_id\n  route {\n    cidr_block = \"0.0.0.0/0\"\n    nat_gateway_id = aws_nat_gateway.nat_gw[0].id\n  }\n}\nresource \"aws_route_table_association\" \"private_subnet_assoc\" {\n  count = var.private_subnet_id == \"\" ? 1 : 0\n  subnet_id = aws_subnet.private_subnet[0].id\n  route_table_id = aws_route_table.private_subnet_rt[0].id\n}\n\nresource \"aws_security_group\" \"arvados_sg\" {\n  name = \"arvados_sg\"\n  count = var.sg_id == \"\" ? 1 : 0\n  vpc_id = aws_vpc.arvados_vpc[0].id\n\n  lifecycle {\n    precondition {\n      condition = (var.vpc_id == \"\")\n      error_message = \"sg_id should be set if vpc_id is set\"\n    }\n  }\n\n  dynamic \"ingress\" {\n    for_each = local.allowed_ports\n    content {\n      description = \"Ingress rule for ${ingress.key}\"\n      from_port = \"${ingress.value}\"\n      to_port = \"${ingress.value}\"\n      protocol = \"tcp\"\n      cidr_blocks = [\"0.0.0.0/0\"]\n      ipv6_cidr_blocks = [\"::/0\"]\n    }\n  }\n  # Allows communication between nodes in the VPC\n  ingress {\n    from_port = 0\n    to_port = 0\n    protocol = \"-1\"\n    cidr_blocks = [ aws_vpc.arvados_vpc[0].cidr_block ]\n  }\n  # Even though AWS auto-creates an \"allow all\" egress rule,\n  # Terraform deletes it, so we add it explicitly.\n  egress {\n    from_port = 0\n    to_port = 0\n    protocol = \"-1\"\n    cidr_blocks = [\"0.0.0.0/0\"]\n    ipv6_cidr_blocks = [\"::/0\"]\n  }\n}\n\n#\n# Route53 split-horizon DNS zones\n#\n\n# PUBLIC DNS\nresource \"aws_route53_zone\" \"public_zone\" {\n  count = var.private_only ? 0 : 1\n  name = var.domain_name\n}\nresource \"aws_route53_record\" \"public_a_record\" {\n  zone_id = try(local.route53_public_zone.id, \"\")\n  for_each = local.public_ip\n  name = each.key\n  type = \"A\"\n  ttl = 300\n  records = [ each.value ]\n}\nresource \"aws_route53_record\" \"main_a_record\" {\n  count = var.private_only ? 0 : 1\n  zone_id = try(local.route53_public_zone.id, \"\")\n  name = \"\"\n  type = \"A\"\n  ttl = 300\n  records = [ local.public_ip[\"controller\"] ]\n}\nresource \"aws_route53_record\" \"public_cname_record\" {\n  zone_id = try(local.route53_public_zone.id, \"\")\n  for_each = {\n    for i in local.cname_by_host: i.record =>\n      \"${i.cname}.${var.domain_name}\"\n    if var.private_only == false\n  }\n  name = each.key\n  type = \"CNAME\"\n  ttl = 300\n  records = [ each.value ]\n}\n\n# PRIVATE DNS\nresource \"aws_route53_zone\" \"private_zone\" {\n  name = var.domain_name\n  vpc {\n    vpc_id = local.arvados_vpc_id\n  }\n}\nresource \"aws_route53_record\" \"private_a_record\" {\n  zone_id = aws_route53_zone.private_zone.id\n  for_each = local.private_ip\n  name = each.key\n  type = \"A\"\n  ttl = 300\n  records = [ each.value ]\n}\nresource \"aws_route53_record\" \"private_main_a_record\" {\n  zone_id = aws_route53_zone.private_zone.id\n  name = \"\"\n  type = \"A\"\n  ttl = 300\n  records = [ local.private_ip[\"controller\"] ]\n}\nresource \"aws_route53_record\" \"private_cname_record\" {\n  zone_id = aws_route53_zone.private_zone.id\n  for_each = {for i in local.cname_by_host: i.record => \"${i.cname}.${var.domain_name}\" }\n  name = each.key\n  type = \"CNAME\"\n  ttl = 300\n  records = [ each.value ]\n}\n\n#\n# Route53's credentials for Let's Encrypt\n#\nresource \"aws_iam_user\" \"letsencrypt\" {\n  count = var.private_only ? 0 : 1\n  name = \"${var.cluster_name}-letsencrypt\"\n  path = \"/\"\n}\n\nresource \"aws_iam_access_key\" \"letsencrypt\" {\n  count = var.private_only ? 0 : 1\n  user = local.iam_user_letsencrypt.name\n}\nresource \"aws_iam_user_policy\" \"letsencrypt_iam_policy\" {\n  count = var.private_only ? 0 : 1\n  name = \"${var.cluster_name}-letsencrypt_iam_policy\"\n  user = local.iam_user_letsencrypt.name\n  policy = jsonencode({\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [{\n      \"Effect\": \"Allow\",\n      \"Action\": [\n        \"route53:ListHostedZones\",\n        \"route53:GetChange\"\n      ],\n      \"Resource\": [\n          \"*\"\n      ]\n    },{\n      \"Effect\" : \"Allow\",\n      \"Action\" : [\n        \"route53:ChangeResourceRecordSets\"\n      ],\n      \"Resource\" : [\n        \"arn:aws:route53:::hostedzone/${local.route53_public_zone.id}\"\n      ]\n    }]\n  })\n}\n\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/vpc/outputs.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\noutput \"arvados_vpc_id\" {\n  value = local.arvados_vpc_id\n}\noutput \"arvados_vpc_cidr\" {\n  value = try(local.arvados_vpc_cidr_block, \"\")\n}\n\noutput \"public_subnet_id\" {\n  value = local.public_subnet_id\n}\n\noutput \"private_subnet_id\" {\n  value = local.private_subnet_id\n}\n\noutput \"arvados_sg_id\" {\n  value = local.arvados_sg_id\n}\n\noutput \"additional_rds_subnet_id\" {\n  value = local.use_rds ? local.additional_rds_subnet_id : \"\"\n}\n\noutput \"eip_id\" {\n  value = { for k, v in aws_eip.arvados_eip: k => v.id }\n}\n\noutput \"public_ip\" {\n  value = local.public_ip\n}\n\noutput \"public_hosts\" {\n  value = local.public_hosts\n}\n\noutput \"private_ip\" {\n  value = local.private_ip\n}\n\noutput \"private_hosts\" {\n  value = local.private_hosts\n}\n\noutput \"user_facing_hosts\" {\n  value = var.user_facing_hosts\n}\n\noutput \"internal_service_hosts\" {\n  value = var.internal_service_hosts\n}\n\noutput \"private_only\" {\n  value = var.private_only\n}\n\noutput \"route53_dns_ns\" {\n  value = try(local.route53_public_zone.name_servers, [])\n}\n\noutput \"letsencrypt_iam_access_key_id\" {\n  value = try(local.iam_access_key_letsencrypt.id, \"\")\n  sensitive = true\n}\n\noutput \"letsencrypt_iam_secret_access_key\" {\n  value = try(local.iam_access_key_letsencrypt.secret, \"\")\n  sensitive = true\n}\n\noutput \"region_name\" {\n  value = var.region_name\n}\n\noutput \"cluster_name\" {\n  value = var.cluster_name\n}\n\noutput \"domain_name\" {\n  value = var.domain_name\n}\n\noutput \"custom_tags\" {\n  value = var.custom_tags\n}\n\noutput \"use_rds\" {\n  value = var.use_rds\n}\n"
  },
  {
    "path": "tools/salt-install/terraform/aws/vpc/terraform.tfvars",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\n# Main cluster configurations. No sensible defaults provided for these:\n# region_name = \"us-east-1\"\n# cluster_name = \"xarv1\"\n# domain_name = \"xarv1.example.com\"\n\n# Uncomment this to create an non-publicly accessible Arvados cluster\n# private_only = true\n\n# Optional networking options. Set existing resources to be used instead of\n# creating new ones.\n# NOTE: We only support fully managed or fully custom networking, not a mix of both.\n#\n# vpc_id = \"vpc-aaaa\"\n# sg_id = \"sg-bbbb\"\n# public_subnet_id = \"subnet-cccc\"\n# private_subnet_id = \"subnet-dddd\"\n#\n# RDS related parameters:\n# use_rds = true\n# additional_rds_subnet_id = \"subnet-eeee\"\n\n# Optional custom tags to add to every resource. Default: {}\n# custom_tags = {\n#   environment = \"production\"\n#   project = \"Phoenix\"\n#   owner = \"jdoe\"\n# }\n\n# Optional cluster service nodes configuration:\n#\n# List of node names which either will be hosting user-facing or internal\n# services. Defaults:\n# user_facing_hosts = [ \"controller\", \"workbench\" ]\n# internal_service_hosts = [ \"keep0\", \"shell\" ]\n#\n# Map assigning each node name an internal IP address. Defaults:\n# private_ip = {\n#   controller = \"10.1.1.11\"\n#   workbench = \"10.1.1.15\"\n#   shell = \"10.1.2.17\"\n#   keep0 = \"10.1.2.13\"\n# }\n#\n# Map assigning DNS aliases for service node names. Defaults:\n# dns_aliases = {\n#   controller = [\n#     \"*.containers\"\n#   ],\n#   workbench = [\n#     \"ws\",\n#     \"workbench2\",\n#     \"webshell\",\n#     \"keep\",\n#     \"download\",\n#     \"prometheus\",\n#     \"grafana\",\n#     \"loki\",\n#     \"*.collections\"\n#   ]\n# }"
  },
  {
    "path": "tools/salt-install/terraform/aws/vpc/variables.tf",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nvariable \"region_name\" {\n  description = \"Name of the AWS Region where to install Arvados\"\n  type = string\n}\n\nvariable \"cluster_name\" {\n  description = \"A 5-char alphanum identifier for your Arvados cluster\"\n  type = string\n  validation {\n    condition = length(var.cluster_name) == 5\n    error_message = \"cluster_name should be 5 chars long.\"\n  }\n}\n\nvariable \"domain_name\" {\n  description = \"The domain name under which your Arvados cluster will be hosted\"\n  type = string\n}\n\nvariable \"private_only\" {\n  description = \"Don't create infrastructure reachable from the public Internet\"\n  type = bool\n  default = false\n}\n\nvariable \"user_facing_hosts\" {\n  description = \"List of hostnames for nodes that hold user-accesible Arvados services\"\n  type = list(string)\n  default = [ \"controller\", \"workbench\" ]\n}\n\nvariable \"internal_service_hosts\" {\n  description = \"List of hostnames for nodes that hold internal Arvados services\"\n  type = list(string)\n  default = [ \"keep0\", \"shell\" ]\n}\n\nvariable \"private_ip\" {\n  description = \"Map with every node's private IP address\"\n  type = map(string)\n  default = {\n    controller = \"10.1.1.11\"\n    workbench = \"10.1.1.15\"\n    shell = \"10.1.2.17\"\n    keep0 = \"10.1.2.13\"\n  }\n}\n\nvariable \"dns_aliases\" {\n  description = \"Sets DNS name aliases for every service node\"\n  type = map(list(string))\n  default = {\n    controller = [\n      \"*.containers\"\n    ],\n    workbench = [\n      \"ws\",\n      \"workbench2\",\n      \"webshell\",\n      \"keep\",\n      \"download\",\n      \"prometheus\",\n      \"grafana\",\n      \"loki\",\n      \"*.collections\"\n    ]\n  }\n}\n\nvariable \"vpc_id\" {\n  description = \"Use existing VPC instead of creating one for the cluster\"\n  type = string\n  default = \"\"\n}\n\nvariable \"sg_id\" {\n  description = \"Use existing security group instead of creating one for the cluster\"\n  type = string\n  default = \"\"\n}\n\nvariable \"additional_rds_subnet_id\" {\n  description = \"Use existing subnet for RDS instead of creating one for the cluster\"\n  type = string\n  default = \"\"\n}\n\nvariable \"private_subnet_id\" {\n  description = \"Use existing private subnet instead of creating one for the cluster\"\n  type = string\n  default = \"\"\n}\n\nvariable \"public_subnet_id\" {\n  description = \"Use existing public subnet instead of creating one for the cluster\"\n  type = string\n  default = \"\"\n}\n\nvariable \"custom_tags\" {\n  description = \"Apply customized tags to every resource on the cluster\"\n  type = map(string)\n  default = {}\n}\n\nvariable \"use_rds\" {\n  description = \"Enable this to create an RDS instance as the cluster's database service\"\n  type = bool\n  default = false\n}"
  },
  {
    "path": "tools/salt-install/tests/hasher-workflow-job.yml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ninputfile:\n  class: File\n  path: test.txt\nhasher1_outputname: hasher1.md5sum.txt\nhasher2_outputname: hasher2.md5sum.txt\nhasher3_outputname: hasher3.md5sum.txt\n"
  },
  {
    "path": "tools/salt-install/tests/hasher-workflow.cwl",
    "content": "#!/usr/bin/env cwl-runner\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: Workflow\n\n$namespaces:\n  arv: \"http://arvados.org/cwl#\"\n  cwltool: \"http://commonwl.org/cwltool#\"\n\ninputs:\n  inputfile: File\n  hasher1_outputname: string\n  hasher2_outputname: string\n  hasher3_outputname: string\n\noutputs:\n  hasher_out:\n    type: File\n    outputSource: hasher3/hasher_out\n\nsteps:\n  hasher1:\n    run: hasher.cwl\n    in:\n      inputfile: inputfile\n      outputname: hasher1_outputname\n    out: [hasher_out]\n    hints:\n      ResourceRequirement:\n        coresMin: 1\n      arv:IntermediateOutput:\n        outputTTL: 3600\n      arv:ReuseRequirement:\n        enableReuse: false\n\n  hasher2:\n    run: hasher.cwl\n    in:\n      inputfile: hasher1/hasher_out\n      outputname: hasher2_outputname\n    out: [hasher_out]\n    hints:\n      ResourceRequirement:\n        coresMin: 1\n      arv:IntermediateOutput:\n        outputTTL: 3600\n      arv:ReuseRequirement:\n        enableReuse: false\n\n  hasher3:\n    run: hasher.cwl\n    in:\n      inputfile: hasher2/hasher_out\n      outputname: hasher3_outputname\n    out: [hasher_out]\n    hints:\n      ResourceRequirement:\n        coresMin: 1\n      arv:IntermediateOutput:\n        outputTTL: 3600\n      arv:ReuseRequirement:\n        enableReuse: false\n"
  },
  {
    "path": "tools/salt-install/tests/hasher.cwl",
    "content": "#!/usr/bin/env cwl-runner\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ncwlVersion: v1.0\nclass: CommandLineTool\n\nbaseCommand: md5sum\ninputs:\n  inputfile:\n    type: File\n    inputBinding:\n      position: 1\n  outputname:\n    type: string\n\nstdout: $(inputs.outputname)\n\noutputs:\n  hasher_out:\n    type: File\n    outputBinding:\n      glob: $(inputs.outputname)\n"
  },
  {
    "path": "tools/salt-install/tests/run-test.sh",
    "content": "#!/bin/bash\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nexport ARVADOS_API_TOKEN=__SYSTEM_ROOT_TOKEN__\nexport ARVADOS_API_HOST=__CLUSTER__.__DOMAIN__:__CONTROLLER_EXT_SSL_PORT__\nexport ARVADOS_API_HOST_INSECURE=true\n\nset -o pipefail\n\n# First, validate that the CA is installed and that we can query it with no errors.\nif ! curl -s -o /dev/null https://${ARVADOS_API_HOST}/users/welcome?return_to=%2F; then\n  echo \"The Arvados CA was not correctly installed. Although some components will work,\"\n  echo \"others won't. Please verify that the CA cert file was installed correctly and\"\n  echo \"retry running these tests.\"\n  exit 1\nfi\n\n# Then, run a basic diagnostics test.\necho \"Running arvados-client diagnostics...\"\nif ! arvados-client diagnostics -internal-client; then\n  echo \"Diagnostics run FAILED, exiting\"\n  exit 1\nfi\n\n# https://doc.arvados.org/v2.0/install/install-jobs-image.html\necho \"Creating Arvados Standard Docker Images project\"\nuuid_prefix=$(arv --format=uuid user current | cut -d- -f1)\nproject_uuid=$(arv --format=uuid group list --filters '[[\"name\", \"=\", \"Arvados Standard Docker Images\"]]')\n\nif [ \"x${project_uuid}\" = \"x\" ]; then\n  project_uuid=$(arv --format=uuid group create --group \"{\\\"owner_uuid\\\": \\\"${uuid_prefix}-tpzed-000000000000000\\\", \\\"group_class\\\":\\\"project\\\", \\\"name\\\":\\\"Arvados Standard Docker Images\\\"}\")\n\n  read -rd $'\\000' newlink <<EOF; arv link create --link \"${newlink}\"\n{\n  \"tail_uuid\":\"${uuid_prefix}-j7d0g-fffffffffffffff\",\n  \"head_uuid\":\"${project_uuid}\",\n  \"link_class\":\"permission\",\n  \"name\":\"can_read\"\n}\nEOF\nfi\n\necho \"Arvados project uuid is '${project_uuid}'\"\n\n# Create the initial user\necho \"Creating initial user '__INITIAL_USER__'\"\nuser_uuid=$(arv --format=uuid user list --filters '[[\"email\", \"=\", \"__INITIAL_USER_EMAIL__\"], [\"username\", \"=\", \"__INITIAL_USER__\"]]')\n\nif [ \"x${user_uuid}\" = \"x\" ]; then\n  user_uuid=$(arv --format=uuid user create --user '{\"email\": \"__INITIAL_USER_EMAIL__\", \"username\": \"__INITIAL_USER__\"}')\n  echo \"Setting up user '__INITIAL_USER__'\"\n  arv user setup --uuid \"${user_uuid}\"\nfi\n\necho \"Activating user '__INITIAL_USER__'\"\narv user update --uuid \"${user_uuid}\" --user '{\"is_active\": true}'\n\necho \"Getting the user API TOKEN\"\nuser_api_token=$(arv api_client_authorization list | jq -r \".items[] | select( .owner_uuid == \\\"${user_uuid}\\\" ).api_token\" | head -1)\n\nif [ \"x${user_api_token}\" = \"x\" ]; then\n  echo \"No existing token found for user '__INITIAL_USER__' (user_uuid: '${user_uuid}'). Creating token\"\n  user_api_token=$(arv api_client_authorization create --api-client-authorization \"{\\\"owner_uuid\\\": \\\"${user_uuid}\\\"}\" | jq -r .api_token)\nfi\n\necho \"API TOKEN FOR user '__INITIAL_USER__': '${user_api_token}'.\"\n\n# Change to the user's token and run the workflow\necho \"Switching to user '__INITIAL_USER__'\"\nexport ARVADOS_API_TOKEN=\"${user_api_token}\"\n\necho \"Running test CWL workflow\"\ncwl-runner --debug hasher-workflow.cwl hasher-workflow-job.yml\n"
  },
  {
    "path": "tools/salt-install/tests/test.txt",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\ntest\n"
  },
  {
    "path": "tools/terraform/.gitignore",
    "content": ".DS_Store\n.terraform\nexamples\n*backup\n*disabled\n.terraform.lock.hcl\nterraform.tfstate*\n"
  },
  {
    "path": "tools/test-collection-create/test-collection-create.py",
    "content": "#!/usr/bin/env python3\n#\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nimport argparse\nimport logging\nimport random\nimport string\nimport sys\n\nimport arvados\nimport arvados.collection\n\nlogger = logging.getLogger('arvados.test_collection_create')\nlogger.setLevel(logging.INFO)\n\nmax_manifest_size = 127*1024*1024\n\nopts = argparse.ArgumentParser(add_help=False)\nopts.add_argument('--min-files', type=int, default=30000, help=\"\"\"\nMinimum number of files on each directory. Default: 30000.\n\"\"\")\nopts.add_argument('--max-files', type=int, default=30000, help=\"\"\"\nMaximum number of files on each directory. Default: 30000.\n\"\"\")\nopts.add_argument('--min-depth', type=int, default=0, help=\"\"\"\nMinimum depth for the created tree structure. Default: 0.\n\"\"\")\nopts.add_argument('--max-depth', type=int, default=0, help=\"\"\"\nMaximum depth for the created tree structure. Default: 0.\n\"\"\")\nopts.add_argument('--min-subdirs', type=int, default=1, help=\"\"\"\nMinimum number of subdirectories created at every depth level. Default: 1.\n\"\"\")\nopts.add_argument('--max-subdirs', type=int, default=10, help=\"\"\"\nMaximum number of subdirectories created at every depth level. Default: 10.\n\"\"\")\nopts.add_argument('--debug', action='store_true', default=False, help=\"\"\"\nSets logging level to DEBUG.\n\"\"\")\n\narg_parser = argparse.ArgumentParser(\n    description='Create a collection with garbage data for testing purposes.',\n    parents=[opts])\n\nadjectives = ['abandoned','able','absolute','adorable','adventurous','academic',\n    'acceptable','acclaimed','accomplished','accurate','aching','acidic','acrobatic',\n    'active','actual','adept','admirable','admired','adolescent','adorable','adored',\n    'advanced','afraid','affectionate','aged','aggravating','aggressive','agile',\n    'agitated','agonizing','agreeable','ajar','alarmed','alarming','alert','alienated',\n    'alive','all','altruistic','amazing','ambitious','ample','amused','amusing','anchored',\n    'ancient','angelic','angry','anguished','animated','annual','another','antique',\n    'anxious','any','apprehensive','appropriate','apt','arctic','arid','aromatic','artistic',\n    'ashamed','assured','astonishing','athletic','attached','attentive','attractive',\n    'austere','authentic','authorized','automatic','avaricious','average','aware','awesome',\n    'awful','awkward','babyish','bad','back','baggy','bare','barren','basic','beautiful',\n    'belated','beloved','beneficial','better','best','bewitched','big','big-hearted',\n    'biodegradable','bite-sized','bitter','black','black-and-white','bland','blank',\n    'blaring','bleak','blind','blissful','blond','blue','blushing','bogus','boiling',\n    'bold','bony','boring','bossy','both','bouncy','bountiful','bowed','brave','breakable',\n    'brief','bright','brilliant','brisk','broken','bronze','brown','bruised','bubbly',\n    'bulky','bumpy','buoyant','burdensome','burly','bustling','busy','buttery','buzzing',\n    'calculating','calm','candid','canine','capital','carefree','careful','careless',\n    'caring','cautious','cavernous','celebrated','charming','cheap','cheerful','cheery',\n    'chief','chilly','chubby','circular','classic','clean','clear','clear-cut','clever',\n    'close','closed','cloudy','clueless','clumsy','cluttered','coarse','cold','colorful',\n    'colorless','colossal','comfortable','common','compassionate','competent','complete',\n    'complex','complicated','composed','concerned','concrete','confused','conscious',\n    'considerate','constant','content','conventional','cooked','cool','cooperative',\n    'coordinated','corny','corrupt','costly','courageous','courteous','crafty','crazy',\n    'creamy','creative','creepy','criminal','crisp','critical','crooked','crowded',\n    'cruel','crushing','cuddly','cultivated','cultured','cumbersome','curly','curvy',\n    'cute','cylindrical','damaged','damp','dangerous','dapper','daring','darling','dark',\n    'dazzling','dead','deadly','deafening','dear','dearest','decent','decimal','decisive',\n    'deep','defenseless','defensive','defiant','deficient','definite','definitive','delayed',\n    'delectable','delicious','delightful','delirious','demanding','dense','dental',\n    'dependable','dependent','descriptive','deserted','detailed','determined','devoted',\n    'different','difficult','digital','diligent','dim','dimpled','dimwitted','direct',\n    'disastrous','discrete','disfigured','disgusting','disloyal','dismal','distant',\n    'downright','dreary','dirty','disguised','dishonest','dismal','distant','distinct',\n    'distorted','dizzy','dopey','doting','double','downright','drab','drafty','dramatic',\n    'dreary','droopy','dry','dual','dull','dutiful','each','eager','earnest','early',\n    'easy','easy-going','ecstatic','edible','educated','elaborate','elastic','elated',\n    'elderly','electric','elegant','elementary','elliptical','embarrassed','embellished',\n    'eminent','emotional','empty','enchanted','enchanting','energetic','enlightened',\n    'enormous','enraged','entire','envious','equal','equatorial','essential','esteemed',\n    'ethical','euphoric','even','evergreen','everlasting','every','evil','exalted',\n    'excellent','exemplary','exhausted','excitable','excited','exciting','exotic',\n    'expensive','experienced','expert','extraneous','extroverted','extra-large','extra-small',\n    'fabulous','failing','faint','fair','faithful','fake','false','familiar','famous',\n    'fancy','fantastic','far','faraway','far-flung','far-off','fast','fat','fatal',\n    'fatherly','favorable','favorite','fearful','fearless','feisty','feline','female',\n    'feminine','few','fickle','filthy','fine','finished','firm','first','firsthand',\n    'fitting','fixed','flaky','flamboyant','flashy','flat','flawed','flawless','flickering',\n    'flimsy','flippant','flowery','fluffy','fluid','flustered','focused','fond','foolhardy',\n    'foolish','forceful','forked','formal','forsaken','forthright','fortunate','fragrant',\n    'frail','frank','frayed','free','French','fresh','frequent','friendly','frightened',\n    'frightening','frigid','frilly','frizzy','frivolous','front','frosty','frozen',\n    'frugal','fruitful','full','fumbling','functional','funny','fussy','fuzzy','gargantuan',\n    'gaseous','general','generous','gentle','genuine','giant','giddy','gigantic','gifted',\n    'giving','glamorous','glaring','glass','gleaming','gleeful','glistening','glittering',\n    'gloomy','glorious','glossy','glum','golden','good','good-natured','gorgeous',\n    'graceful','gracious','grand','grandiose','granular','grateful','grave','gray',\n    'great','greedy','green','gregarious','grim','grimy','gripping','grizzled','gross',\n    'grotesque','grouchy','grounded','growing','growling','grown','grubby','gruesome',\n    'grumpy','guilty','gullible','gummy','hairy','half','handmade','handsome','handy',\n    'happy','happy-go-lucky','hard','hard-to-find','harmful','harmless','harmonious',\n    'harsh','hasty','hateful','haunting','healthy','heartfelt','hearty','heavenly',\n    'heavy','hefty','helpful','helpless','hidden','hideous','high','high-level','hilarious',\n    'hoarse','hollow','homely','honest','honorable','honored','hopeful','horrible',\n    'hospitable','hot','huge','humble','humiliating','humming','humongous','hungry',\n    'hurtful','husky','icky','icy','ideal','idealistic','identical','idle','idiotic',\n    'idolized','ignorant','ill','illegal','ill-fated','ill-informed','illiterate',\n    'illustrious','imaginary','imaginative','immaculate','immaterial','immediate',\n    'immense','impassioned','impeccable','impartial','imperfect','imperturbable','impish',\n    'impolite','important','impossible','impractical','impressionable','impressive',\n    'improbable','impure','inborn','incomparable','incompatible','incomplete','inconsequential',\n    'incredible','indelible','inexperienced','indolent','infamous','infantile','infatuated',\n    'inferior','infinite','informal','innocent','insecure','insidious','insignificant',\n    'insistent','instructive','insubstantial','intelligent','intent','intentional',\n    'interesting','internal','international','intrepid','ironclad','irresponsible',\n    'irritating','itchy','jaded','jagged','jam-packed','jaunty','jealous','jittery',\n    'joint','jolly','jovial','joyful','joyous','jubilant','judicious','juicy','jumbo',\n    'junior','jumpy','juvenile','kaleidoscopic','keen','key','kind','kindhearted','kindly',\n    'klutzy','knobby','knotty','knowledgeable','knowing','known','kooky','kosher','lame',\n    'lanky','large','last','lasting','late','lavish','lawful','lazy','leading','lean',\n    'leafy','left','legal','legitimate','light','lighthearted','likable','likely','limited',\n    'limp','limping','linear','lined','liquid','little','live','lively','livid','loathsome',\n    'lone','lonely','long','long-term','loose','lopsided','lost','loud','lovable','lovely',\n    'loving','low','loyal','lucky','lumbering','luminous','lumpy','lustrous','luxurious',\n    'mad','made-up','magnificent','majestic','major','male','mammoth','married','marvelous',\n    'masculine','massive','mature','meager','mealy','mean','measly','meaty','medical',\n    'mediocre','medium','meek','mellow','melodic','memorable','menacing','merry','messy',\n    'metallic','mild','milky','mindless','miniature','minor','minty','miserable','miserly',\n    'misguided','misty','mixed','modern','modest','moist','monstrous','monthly','monumental',\n    'moral','mortified','motherly','motionless','mountainous','muddy','muffled','multicolored',\n    'mundane','murky','mushy','musty','muted','mysterious','naive','narrow','nasty','natural',\n    'naughty','nautical','near','neat','necessary','needy','negative','neglected','negligible',\n    'neighboring','nervous','new','next','nice','nifty','nimble','nippy','nocturnal','noisy',\n    'nonstop','normal','notable','noted','noteworthy','novel','noxious','numb','nutritious',\n    'nutty','obedient','obese','oblong','oily','oblong','obvious','occasional','odd',\n    'oddball','offbeat','offensive','official','old','old-fashioned','only','open','optimal',\n    'optimistic','opulent','orange','orderly','organic','ornate','ornery','ordinary',\n    'original','other','our','outlying','outgoing','outlandish','outrageous','outstanding',\n    'oval','overcooked','overdue','overjoyed','overlooked','palatable','pale','paltry',\n    'parallel','parched','partial','passionate','past','pastel','peaceful','peppery',\n    'perfect','perfumed','periodic','perky','personal','pertinent','pesky','pessimistic',\n    'petty','phony','physical','piercing','pink','pitiful','plain','plaintive','plastic',\n    'playful','pleasant','pleased','pleasing','plump','plush','polished','polite','political',\n    'pointed','pointless','poised','poor','popular','portly','posh','positive','possible',\n    'potable','powerful','powerless','practical','precious','present','prestigious',\n    'pretty','precious','previous','pricey','prickly','primary','prime','pristine','private',\n    'prize','probable','productive','profitable','profuse','proper','proud','prudent',\n    'punctual','pungent','puny','pure','purple','pushy','putrid','puzzled','puzzling',\n    'quaint','qualified','quarrelsome','quarterly','queasy','querulous','questionable',\n    'quick','quick-witted','quiet','quintessential','quirky','quixotic','quizzical',\n    'radiant','ragged','rapid','rare','rash','raw','recent','reckless','rectangular',\n    'ready','real','realistic','reasonable','red','reflecting','regal','regular',\n    'reliable','relieved','remarkable','remorseful','remote','repentant','required',\n    'respectful','responsible','repulsive','revolving','rewarding','rich','rigid',\n    'right','ringed','ripe','roasted','robust','rosy','rotating','rotten','rough',\n    'round','rowdy','royal','rubbery','rundown','ruddy','rude','runny','rural','rusty',\n    'sad','safe','salty','same','sandy','sane','sarcastic','sardonic','satisfied',\n    'scaly','scarce','scared','scary','scented','scholarly','scientific','scornful',\n    'scratchy','scrawny','second','secondary','second-hand','secret','self-assured',\n    'self-reliant','selfish','sentimental','separate','serene','serious','serpentine',\n    'several','severe','shabby','shadowy','shady','shallow','shameful','shameless',\n    'sharp','shimmering','shiny','shocked','shocking','shoddy','short','short-term',\n    'showy','shrill','shy','sick','silent','silky','silly','silver','similar','simple',\n    'simplistic','sinful','single','sizzling','skeletal','skinny','sleepy','slight',\n    'slim','slimy','slippery','slow','slushy','small','smart','smoggy','smooth','smug',\n    'snappy','snarling','sneaky','sniveling','snoopy','sociable','soft','soggy','solid',\n    'somber','some','spherical','sophisticated','sore','sorrowful','soulful','soupy',\n    'sour','Spanish','sparkling','sparse','specific','spectacular','speedy','spicy',\n    'spiffy','spirited','spiteful','splendid','spotless','spotted','spry','square',\n    'squeaky','squiggly','stable','staid','stained','stale','standard','starchy','stark',\n    'starry','steep','sticky','stiff','stimulating','stingy','stormy','straight','strange',\n    'steel','strict','strident','striking','striped','strong','studious','stunning',\n    'stupendous','stupid','sturdy','stylish','subdued','submissive','substantial','subtle',\n    'suburban','sudden','sugary','sunny','super','superb','superficial','superior',\n    'supportive','sure-footed','surprised','suspicious','svelte','sweaty','sweet','sweltering',\n    'swift','sympathetic','tall','talkative','tame','tan','tangible','tart','tasty',\n    'tattered','taut','tedious','teeming','tempting','tender','tense','tepid','terrible',\n    'terrific','testy','thankful','that','these','thick','thin','third','thirsty','this',\n    'thorough','thorny','those','thoughtful','threadbare','thrifty','thunderous','tidy',\n    'tight','timely','tinted','tiny','tired','torn','total','tough','traumatic','treasured',\n    'tremendous','tragic','trained','tremendous','triangular','tricky','trifling','trim',\n    'trivial','troubled','true','trusting','trustworthy','trusty','truthful','tubby',\n    'turbulent','twin','ugly','ultimate','unacceptable','unaware','uncomfortable',\n    'uncommon','unconscious','understated','unequaled','uneven','unfinished','unfit',\n    'unfolded','unfortunate','unhappy','unhealthy','uniform','unimportant','unique',\n    'united','unkempt','unknown','unlawful','unlined','unlucky','unnatural','unpleasant',\n    'unrealistic','unripe','unruly','unselfish','unsightly','unsteady','unsung','untidy',\n    'untimely','untried','untrue','unused','unusual','unwelcome','unwieldy','unwilling',\n    'unwitting','unwritten','upbeat','upright','upset','urban','usable','used','useful',\n    'useless','utilized','utter','vacant','vague','vain','valid','valuable','vapid',\n    'variable','vast','velvety','venerated','vengeful','verifiable','vibrant','vicious',\n    'victorious','vigilant','vigorous','villainous','violet','violent','virtual',\n    'virtuous','visible','vital','vivacious','vivid','voluminous','wan','warlike','warm',\n    'warmhearted','warped','wary','wasteful','watchful','waterlogged','watery','wavy',\n    'wealthy','weak','weary','webbed','wee','weekly','weepy','weighty','weird','welcome',\n    'well-documented','well-groomed','well-informed','well-lit','well-made','well-off',\n    'well-to-do','well-worn','wet','which','whimsical','whirlwind','whispered','white',\n    'whole','whopping','wicked','wide','wide-eyed','wiggly','wild','willing','wilted',\n    'winding','windy','winged','wiry','wise','witty','wobbly','woeful','wonderful',\n    'wooden','woozy','wordy','worldly','worn','worried','worrisome','worse','worst',\n    'worthless','worthwhile','worthy','wrathful','wretched','writhing','wrong','wry',\n    'yawning','yearly','yellow','yellowish','young','youthful','yummy','zany','zealous',\n    'zesty','zigzag']\nnouns = ['people','history','way','art','world','information','map','two','family',\n    'government','health','system','computer','meat','year','thanks','music','person',\n    'reading','method','data','food','understanding','theory','law','bird','literature',\n    'problem','software','control','knowledge','power','ability','economics','love',\n    'internet','television','science','library','nature','fact','product','idea',\n    'temperature','investment','area','society','activity','story','industry','media',\n    'thing','oven','community','definition','safety','quality','development','language',\n    'management','player','variety','video','week','security','country','exam','movie',\n    'organization','equipment','physics','analysis','policy','series','thought','basis',\n    'boyfriend','direction','strategy','technology','army','camera','freedom','paper',\n    'environment','child','instance','month','truth','marketing','university','writing',\n    'article','department','difference','goal','news','audience','fishing','growth',\n    'income','marriage','user','combination','failure','meaning','medicine','philosophy',\n    'teacher','communication','night','chemistry','disease','disk','energy','nation',\n    'road','role','soup','advertising','location','success','addition','apartment','education',\n    'math','moment','painting','politics','attention','decision','event','property',\n    'shopping','student','wood','competition','distribution','entertainment','office',\n    'population','president','unit','category','cigarette','context','introduction',\n    'opportunity','performance','driver','flight','length','magazine','newspaper',\n    'relationship','teaching','cell','dealer','finding','lake','member','message','phone',\n    'scene','appearance','association','concept','customer','death','discussion','housing',\n    'inflation','insurance','mood','woman','advice','blood','effort','expression','importance',\n    'opinion','payment','reality','responsibility','situation','skill','statement','wealth',\n    'application','city','county','depth','estate','foundation','grandmother','heart',\n    'perspective','photo','recipe','studio','topic','collection','depression','imagination',\n    'passion','percentage','resource','setting','ad','agency','college','connection',\n    'criticism','debt','description','memory','patience','secretary','solution','administration',\n    'aspect','attitude','director','personality','psychology','recommendation','response',\n    'selection','storage','version','alcohol','argument','complaint','contract','emphasis',\n    'highway','loss','membership','possession','preparation','steak','union','agreement',\n    'cancer','currency','employment','engineering','entry','interaction','mixture','preference',\n    'region','republic','tradition','virus','actor','classroom','delivery','device',\n    'difficulty','drama','election','engine','football','guidance','hotel','owner',\n    'priority','protection','suggestion','tension','variation','anxiety','atmosphere',\n    'awareness','bath','bread','candidate','climate','comparison','confusion','construction',\n    'elevator','emotion','employee','employer','guest','height','leadership','mall','manager',\n    'operation','recording','sample','transportation','charity','cousin','disaster','editor',\n    'efficiency','excitement','extent','feedback','guitar','homework','leader','mom','outcome',\n    'permission','presentation','promotion','reflection','refrigerator','resolution','revenue',\n    'session','singer','tennis','basket','bonus','cabinet','childhood','church','clothes','coffee',\n    'dinner','drawing','hair','hearing','initiative','judgment','lab','measurement','mode','mud',\n    'orange','poetry','police','possibility','procedure','queen','ratio','relation','restaurant',\n    'satisfaction','sector','signature','significance','song','tooth','town','vehicle','volume','wife',\n    'accident','airport','appointment','arrival','assumption','baseball','chapter','committee',\n    'conversation','database','enthusiasm','error','explanation','farmer','gate','girl','hall',\n    'historian','hospital','injury','instruction','maintenance','manufacturer','meal','perception','pie',\n    'poem','presence','proposal','reception','replacement','revolution','river','son','speech','tea',\n    'village','warning','winner','worker','writer','assistance','breath','buyer','chest','chocolate',\n    'conclusion','contribution','cookie','courage','dad','desk','drawer','establishment','examination',\n    'garbage','grocery','honey','impression','improvement','independence','insect','inspection',\n    'inspector','king','ladder','menu','penalty','piano','potato','profession','professor','quantity',\n    'reaction','requirement','salad','sister','supermarket','tongue','weakness','wedding','affair',\n    'ambition','analyst','apple','assignment','assistant','bathroom','bedroom','beer','birthday',\n    'celebration','championship','cheek','client','consequence','departure','diamond','dirt','ear',\n    'fortune','friendship','funeral','gene','girlfriend','hat','indication','intention','lady',\n    'midnight','negotiation','obligation','passenger','pizza','platform','poet','pollution',\n    'recognition','reputation','shirt','sir','speaker','stranger','surgery','sympathy','tale','throat',\n    'trainer','uncle','youth','time','work','film','water','money','example','while','business','study',\n    'game','life','form','air','day','place','number','part','field','fish','back','process','heat',\n    'hand','experience','job','book','end','point','type','home','economy','value','body','market',\n    'guide','interest','state','radio','course','company','price','size','card','list','mind','trade',\n    'line','care','group','risk','word','fat','force','key','light','training','name','school','top',\n    'amount','level','order','practice','research','sense','service','piece','web','boss','sport','fun',\n    'house','page','term','test','answer','sound','focus','matter','kind','soil','board','oil','picture',\n    'access','garden','range','rate','reason','future','site','demand','exercise','image','case','cause',\n    'coast','action','age','bad','boat','record','result','section','building','mouse','cash','class',\n    'nothing','period','plan','store','tax','side','subject','space','rule','stock','weather','chance',\n    'figure','man','model','source','beginning','earth','program','chicken','design','feature','head',\n    'material','purpose','question','rock','salt','act','birth','car','dog','object','scale','sun',\n    'note','profit','rent','speed','style','war','bank','craft','half','inside','outside','standard',\n    'bus','exchange','eye','fire','position','pressure','stress','advantage','benefit','box','frame',\n    'issue','step','cycle','face','item','metal','paint','review','room','screen','structure','view',\n    'account','ball','discipline','medium','share','balance','bit','black','bottom','choice','gift',\n    'impact','machine','shape','tool','wind','address','average','career','culture','morning','pot',\n    'sign','table','task','condition','contact','credit','egg','hope','ice','network','north','square',\n    'attempt','date','effect','link','post','star','voice','capital','challenge','friend','self','shot',\n    'brush','couple','debate','exit','front','function','lack','living','plant','plastic','spot',\n    'summer','taste','theme','track','wing','brain','button','click','desire','foot','gas','influence',\n    'notice','rain','wall','base','damage','distance','feeling','pair','savings','staff','sugar',\n    'target','text','animal','author','budget','discount','file','ground','lesson','minute','officer',\n    'phase','reference','register','sky','stage','stick','title','trouble','bowl','bridge','campaign',\n    'character','club','edge','evidence','fan','letter','lock','maximum','novel','option','pack','park',\n    'plenty','quarter','skin','sort','weight','baby','background','carry','dish','factor','fruit',\n    'glass','joint','master','muscle','red','strength','traffic','trip','vegetable','appeal','chart',\n    'gear','ideal','kitchen','land','log','mother','net','party','principle','relative','sale','season',\n    'signal','spirit','street','tree','wave','belt','bench','commission','copy','drop','minimum','path',\n    'progress','project','sea','south','status','stuff','ticket','tour','angle','blue','breakfast',\n    'confidence','daughter','degree','doctor','dot','dream','duty','essay','father','fee','finance',\n    'hour','juice','limit','luck','milk','mouth','peace','pipe','seat','stable','storm','substance',\n    'team','trick','afternoon','bat','beach','blank','catch','chain','consideration','cream','crew',\n    'detail','gold','interview','kid','mark','match','mission','pain','pleasure','score','screw','sex',\n    'shop','shower','suit','tone','window','agent','band','block','bone','calendar','cap','coat',\n    'contest','corner','court','cup','district','door','east','finger','garage','guarantee','hole',\n    'hook','implement','layer','lecture','lie','manner','meeting','nose','parking','partner','profile',\n    'respect','rice','routine','schedule','swimming','telephone','tip','winter','airline','bag','battle',\n    'bed','bill','bother','cake','code','curve','designer','dimension','dress','ease','emergency',\n    'evening','extension','farm','fight','gap','grade','holiday','horror','horse','host','husband',\n    'loan','mistake','mountain','nail','noise','occasion','package','patient','pause','phrase','proof',\n    'race','relief','sand','sentence','shoulder','smoke','stomach','string','tourist','towel','vacation',\n    'west','wheel','wine','arm','aside','associate','bet','blow','border','branch','breast','brother',\n    'buddy','bunch','chip','coach','cross','document','draft','dust','expert','floor','god','golf',\n    'habit','iron','judge','knife','landscape','league','mail','mess','native','opening','parent',\n    'pattern','pin','pool','pound','request','salary','shame','shelter','shoe','silver','tackle','tank',\n    'trust','assist','bake','bar','bell','bike','blame','boy','brick','chair','closet','clue','collar',\n    'comment','conference','devil','diet','fear','fuel','glove','jacket','lunch','monitor','mortgage',\n    'nurse','pace','panic','peak','plane','reward','row','sandwich','shock','spite','spray','surprise',\n    'till','transition','weekend','welcome','yard','alarm','bend','bicycle','bite','blind','bottle',\n    'cable','candle','clerk','cloud','concert','counter','flower','grandfather','harm','knee','lawyer',\n    'leather','load','mirror','neck','pension','plate','purple','ruin','ship','skirt','slice','snow',\n    'specialist','stroke','switch','trash','tune','zone','anger','award','bid','bitter','boot','bug',\n    'camp','candy','carpet','cat','champion','channel','clock','comfort','cow','crack','engineer',\n    'entrance','fault','grass','guy','hell','highlight','incident','island','joke','jury','leg','lip',\n    'mate','motor','nerve','passage','pen','pride','priest','prize','promise','resident','resort','ring',\n    'roof','rope','sail','scheme','script','sock','station','toe','tower','truck','witness','a','you',\n    'it','can','will','if','one','many','most','other','use','make','good','look','help','go','great',\n    'being','few','might','still','public','read','keep','start','give','human','local','general','she',\n    'specific','long','play','feel','high','tonight','put','common','set','change','simple','past','big',\n    'possible','particular','today','major','personal','current','national','cut','natural','physical',\n    'show','try','check','second','call','move','pay','let','increase','single','individual','turn',\n    'ask','buy','guard','hold','main','offer','potential','professional','international','travel','cook',\n    'alternative','following','special','working','whole','dance','excuse','cold','commercial','low',\n    'purchase','deal','primary','worth','fall','necessary','positive','produce','search','present',\n    'spend','talk','creative','tell','cost','drive','green','support','glad','remove','return','run',\n    'complex','due','effective','middle','regular','reserve','independent','leave','original','reach',\n    'rest','serve','watch','beautiful','charge','active','break','negative','safe','stay','visit',\n    'visual','affect','cover','report','rise','walk','white','beyond','junior','pick','unique',\n    'anything','classic','final','lift','mix','private','stop','teach','western','concern','familiar',\n    'fly','official','broad','comfortable','gain','maybe','rich','save','stand','young','fail','heavy',\n    'hello','lead','listen','valuable','worry','handle','leading','meet','release','sell','finish',\n    'normal','press','ride','secret','spread','spring','tough','wait','brown','deep','display','flow',\n    'hit','objective','shoot','touch','cancel','chemical','cry','dump','extreme','push','conflict','eat',\n    'fill','formal','jump','kick','opposite','pass','pitch','remote','total','treat','vast','abuse',\n    'beat','burn','deposit','print','raise','sleep','somewhere','advance','anywhere','consist','dark',\n    'double','draw','equal','fix','hire','internal','join','kill','sensitive','tap','win','attack',\n    'claim','constant','drag','drink','guess','minor','pull','raw','soft','solid','wear','weird',\n    'wonder','annual','count','dead','doubt','feed','forever','impress','nobody','repeat','round','sing',\n    'slide','strip','whereas','wish','combine','command','dig','divide','equivalent','hang','hunt',\n    'initial','march','mention','smell','spiritual','survey','tie','adult','brief','crazy','escape',\n    'gather','hate','prior','repair','rough','sad','scratch','sick','strike','employ','external','hurt',\n    'illegal','laugh','lay','mobile','nasty','ordinary','respond','royal','senior','split','strain',\n    'struggle','swim','train','upper','wash','yellow','convert','crash','dependent','fold','funny',\n    'grab','hide','miss','permit','quote','recover','resolve','roll','sink','slip','spare','suspect',\n    'sweet','swing','twist','upstairs','usual','abroad','brave','calm','concentrate','estimate','grand',\n    'male','mine','prompt','quiet','refuse','regret','reveal','rush','shake','shift','shine','steal',\n    'suck','surround','anybody','bear','brilliant','dare','dear','delay','drunk','female','hurry',\n    'inevitable','invite','kiss','neat','pop','punch','quit','reply','representative','resist','rip',\n    'rub','silly','smile','spell','stretch','stupid','tear','temporary','tomorrow','wake','wrap',\n    'yesterday']\n\ndef get_random_name(with_ext=True):\n    return \"{}_{}_{}{}\".format(\n        random.choice(adjectives),\n        random.choice(nouns),\n        random.randint(0, 50000),\n        with_ext and '.txt' or '')\n\ndef get_random_file(max_filesize):\n    file_start = random.randint(0, (max_filesize - 1025))\n    file_size = random.randint(0, (max_filesize - file_start))\n    file_name = get_random_name()\n    return \"{}:{}:{}\".format(file_start, file_size, file_name)\n\ndef get_stream(name, max_filesize, data_loc, args):\n    files = []\n    for _ in range(random.randint(args.min_files, args.max_files)):\n        files.append(get_random_file(max_filesize))\n    stream = \"{} {} {}\".format(name, data_loc, ' '.join(files))\n    return stream\n\ndef create_substreams(depth, base_stream_name, max_filesize, data_loc, args, current_size=0):\n    current_stream = get_stream(base_stream_name, max_filesize, data_loc, args)\n    current_size += len(current_stream)\n    streams = [current_stream]\n\n    if current_size >= max_manifest_size:\n        logger.debug(\"Maximum manifest size reached -- finishing early at {}\".format(base_stream_name))\n    elif depth == 0:\n        logger.debug(\"Finished stream {}\".format(base_stream_name))\n    else:\n        for _ in range(random.randint(args.min_subdirs, args.max_subdirs)):\n            stream_name = base_stream_name+'/'+get_random_name(False)\n            substreams = create_substreams(depth-1, stream_name, max_filesize,\n                data_loc, args, current_size)\n            current_size += sum([len(x) for x in substreams])\n            if current_size >= max_manifest_size:\n                break\n            streams.extend(substreams)\n    return streams\n\ndef parse_arguments(arguments):\n    args = arg_parser.parse_args(arguments)\n    if args.debug:\n        logger.setLevel(logging.DEBUG)\n    if args.max_files < args.min_files:\n        arg_parser.error(\"--min-files={} should be less or equal than max-files={}\".format(args.min_files, args.max_files))\n    if args.min_depth < 0:\n        arg_parser.error(\"--min-depth should be at least 0\")\n    if args.max_depth < 0 or args.max_depth < args.min_depth:\n        arg_parser.error(\"--max-depth should be at >= 0 and >= min-depth={}\".format(args.min_depth))\n    if args.max_subdirs < args.min_subdirs:\n        arg_parser.error(\"--min-subdirs={} should be less or equal than max-subdirs={}\".format(args.min_subdirs, args.max_subdirs))\n    return args\n\ndef main(arguments=None):\n    args = parse_arguments(arguments)\n    logger.info(\"Creating test collection with (min={}, max={}) files per directory and a tree depth of (min={}, max={}) and (min={}, max={}) subdirs in each depth level...\".format(args.min_files, args.max_files, args.min_depth, args.max_depth, args.min_subdirs, args.max_subdirs))\n    api = arvados.api('v1', timeout=5*60)\n    max_filesize = 1024*1024\n    data_block = ''.join([random.choice(string.printable) for i in range(max_filesize)])\n    data_loc = arvados.KeepClient(api).put(data_block)\n    streams = create_substreams(random.randint(args.min_depth, args.max_depth),\n        '.', max_filesize, data_loc, args)\n    manifest = ''\n    for s in streams:\n        if len(manifest)+len(s) > max_manifest_size:\n            logger.info(\"Skipping stream {} to avoid making a manifest bigger than 128MiB\".format(s.split(' ')[0]))\n            break\n        manifest += s + '\\n'\n    try:\n        coll_name = get_random_name(False)\n        coll = api.collections().create(\n            body={\"collection\": {\n                \"name\": coll_name,\n                \"manifest_text\": manifest\n            },\n        }).execute()\n    except:\n        logger.info(\"ERROR creating collection with name '{}' and manifest:\\n'{}...'\\nSize: {}\".format(coll_name, manifest[0:1024], len(manifest)))\n        raise\n    logger.info(\"Created collection {} - manifest size: {}\".format(coll[\"uuid\"], len(manifest)))\n    return 0\n\nif __name__ == \"__main__\":\n    sys.exit(main())"
  },
  {
    "path": "tools/user-activity/MANIFEST.in",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ninclude agpl-3.0.txt\ninclude arvados_version.py"
  },
  {
    "path": "tools/user-activity/README.rst",
    "content": ".. Copyright (C) The Arvados Authors. All rights reserved.\n..\n.. SPDX-License-Identifier: AGPL-3.0\n\n=================\narv-user-activity\n=================\n\nOverview\n--------\n\nThis package provides the ``arv-user-activity`` tool to provide a high-level report of user activity on Arvados_ clusters.\n\n.. _Arvados: https://arvados.org/\n\nInstallation\n------------\n\nInstalling under your user account\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis method lets you install the package without root access.  However,\nother users on the same system will need to reconfigure their shell in order\nto be able to use it. Run the following to install the package in an\nenvironment at ``~/arvclients``::\n\n  python3 -m venv ~/arvclients\n  ~/arvclients/bin/pip install arvados-user-activity\n\nCommand line tools will be installed under ``~/arvclients/bin``. You can\ntest one by running::\n\n  ~/arvclients/bin/arv-user-activity --version\n\nYou can run these tools by specifying the full path every time, or you can\nadd the directory to your shell's search path by running::\n\n  export PATH=\"$PATH:$HOME/arvclients/bin\"\n\nYou can make this search path change permanent by adding this command to\nyour shell's configuration, for example ``~/.bashrc`` if you're using bash.\nYou can test the change by running::\n\n  arv-user-activity --version\n\nInstalling on Debian and Ubuntu systems\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nArvados publishes packages for Debian 12 \"bookworm,\" Ubuntu 22.04 \"jammy,\" and Ubuntu 24.04 \"noble.\" You can install the Python SDK package on any of these distributions by running the following commands::\n\n  sudo install -d /etc/apt/keyrings\n  sudo curl -fsSL -o /etc/apt/keyrings/arvados.asc https://apt.arvados.org/pubkey.gpg\n  sudo tee /etc/apt/sources.list.d/arvados.sources >/dev/null <<EOF\n  Types: deb\n  URIs: https://apt.arvados.org/$(lsb_release -cs)\n  Suites: $(lsb_release -cs)\n  Components: main\n  Signed-by: /etc/apt/keyrings/arvados.asc\n  EOF\n  sudo apt update\n  sudo apt install python3-arvados-user-activity\n\nInstalling on Red Hat, AlmaLinux, and Rocky Linux\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nArvados publishes packages for RHEL 8 and 9, as well as distributions based on those. Note that these packages depend on, and will automatically enable, the Python 3.11 module. You can install the Python SDK package on any of these distributions by running the following commands::\n\n  sudo tee /etc/yum.repos.d/arvados.repo >/dev/null <<'EOF'\n  [arvados]\n  name=Arvados\n  baseurl=https://rpm.arvados.org/RHEL/$releasever/os/$basearch/\n  gpgcheck=1\n  gpgkey=https://rpm.arvados.org/RHEL/$releasever/RPM-GPG-KEY-arvados\n  EOF\n  sudo dnf install python3-arvados-user-activity\n\nConfiguration\n-------------\n\nThis client software needs two pieces of information to connect to\nArvados: the DNS name of the API server, and an API authorization\ntoken. `The Arvados user\ndocumentation\n<http://doc.arvados.org/user/reference/api-tokens.html>`_ describes\nhow to find this information in the Arvados Workbench, and install it\non your system.\n\nTesting and Development\n-----------------------\n\nThis package is one part of the Arvados source package, and it has\nintegration tests to check interoperability with other Arvados\ncomponents.  Our `hacking guide\n<https://dev.arvados.org/projects/arvados/wiki/Hacking_Python_SDK>`_\ndescribes how to set up a development environment and run tests.\n"
  },
  {
    "path": "tools/user-activity/agpl-3.0.txt",
    "content": "                    GNU AFFERO GENERAL PUBLIC LICENSE\n                       Version 3, 19 November 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU Affero General Public License is a free, copyleft license for\nsoftware and other kinds of works, specifically designed to ensure\ncooperation with the community in the case of network server software.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nour General Public Licenses are intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  Developers that use our General Public Licenses protect your rights\nwith two steps: (1) assert copyright on the software, and (2) offer\nyou this License which gives you legal permission to copy, distribute\nand/or modify the software.\n\n  A secondary benefit of defending all users' freedom is that\nimprovements made in alternate versions of the program, if they\nreceive widespread use, become available for other developers to\nincorporate.  Many developers of free software are heartened and\nencouraged by the resulting cooperation.  However, in the case of\nsoftware used on network servers, this result may fail to come about.\nThe GNU General Public License permits making a modified version and\nletting the public access it on a server without ever releasing its\nsource code to the public.\n\n  The GNU Affero General Public License is designed specifically to\nensure that, in such cases, the modified source code becomes available\nto the community.  It requires the operator of a network server to\nprovide the source code of the modified version running there to the\nusers of that server.  Therefore, public use of a modified version, on\na publicly accessible server, gives the public access to the source\ncode of the modified version.\n\n  An older license, called the Affero General Public License and\npublished by Affero, was designed to accomplish similar goals.  This is\na different license, not a version of the Affero GPL, but Affero has\nreleased a new version of the Affero GPL which permits relicensing under\nthis license.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU Affero General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Remote Network Interaction; Use with the GNU General Public License.\n\n  Notwithstanding any other provision of this License, if you modify the\nProgram, your modified version must prominently offer all users\ninteracting with it remotely through a computer network (if your version\nsupports such interaction) an opportunity to receive the Corresponding\nSource of your version by providing access to the Corresponding Source\nfrom a network server at no charge, through some standard or customary\nmeans of facilitating copying of software.  This Corresponding Source\nshall include the Corresponding Source for any work covered by version 3\nof the GNU General Public License that is incorporated pursuant to the\nfollowing paragraph.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the work with which it is combined will remain governed by version\n3 of the GNU General Public License.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU Affero General Public License from time to time.  Such new versions\nwill be similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU Affero General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU Affero General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU Affero General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU Affero General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU Affero General Public License for more details.\n\n    You should have received a copy of the GNU Affero General Public License\n    along with this program.  If not, see <http://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If your software can interact with users remotely through a computer\nnetwork, you should also make sure that it provides a way for users to\nget its source.  For example, if your program is a web application, its\ninterface could display a \"Source\" link that leads users to an archive\nof the code.  There are many ways you could offer source, and different\nsolutions will be better for different programs; see section 13 for the\nspecific requirements.\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU AGPL, see\n<http://www.gnu.org/licenses/>.\n"
  },
  {
    "path": "tools/user-activity/arvados_user_activity/__init__.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n"
  },
  {
    "path": "tools/user-activity/arvados_user_activity/main.py",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport argparse\nimport sys\n\nimport arvados\nimport arvados.util\nimport datetime\nimport ciso8601\nimport csv\n\ndef parse_arguments(arguments):\n    arg_parser = argparse.ArgumentParser()\n    arg_parser.add_argument('--start', help='Start date for the report in YYYY-MM-DD format (UTC)')\n    arg_parser.add_argument('--end', help='End date for the report in YYYY-MM-DD format (UTC)')\n    arg_parser.add_argument('--days', type=int, help='Number of days before now() to start the report')\n    arg_parser.add_argument('--csv', action='store_true', help='Output in csv format (default: false)')\n    args = arg_parser.parse_args(arguments)\n\n    if args.days and (args.start or args.end):\n        arg_parser.print_help()\n        print(\"Error: either specify --days or both --start and --end\")\n        exit(1)\n\n    if not args.days and (not args.start or not args.end):\n        arg_parser.print_help()\n        print(\"\\nError: either specify --days or both --start and --end\")\n        exit(1)\n\n    if (args.start and not args.end) or (args.end and not args.start):\n        arg_parser.print_help()\n        print(\"\\nError: no start or end date found, either specify --days or both --start and --end\")\n        exit(1)\n\n    if args.days:\n        to = datetime.datetime.utcnow()\n        since = to - datetime.timedelta(days=args.days)\n\n    if args.start:\n        try:\n            since = datetime.datetime.strptime(args.start,\"%Y-%m-%d\")\n        except:\n            arg_parser.print_help()\n            print(\"\\nError: start date must be in YYYY-MM-DD format\")\n            exit(1)\n\n    if args.end:\n        try:\n            to = datetime.datetime.strptime(args.end,\"%Y-%m-%d\")\n        except:\n            arg_parser.print_help()\n            print(\"\\nError: end date must be in YYYY-MM-DD format\")\n            exit(1)\n\n    return args, since, to\n\ndef getowner(arv, uuid, owners):\n    if uuid is None:\n        return None\n    if uuid[6:11] == \"tpzed\":\n        return uuid\n\n    if uuid not in owners:\n        try:\n            gp = arv.groups().get(uuid=uuid).execute()\n            owners[uuid] = gp[\"owner_uuid\"]\n        except:\n            owners[uuid] = None\n\n    return getowner(arv, owners[uuid], owners)\n\ndef getuserinfo(arv, uuid):\n    try:\n        u = arv.users().get(uuid=uuid).execute()\n    except:\n        return \"deleted user (%susers/%s)\" % (arv.config()[\"Services\"][\"Workbench1\"][\"ExternalURL\"],\n                                                       uuid)\n    prof = \"\\n\".join(\"  %s: \\\"%s\\\"\" % (k, v) for k, v in u[\"prefs\"].get(\"profile\", {}).items() if v)\n    if prof:\n        prof = \"\\n\"+prof+\"\\n\"\n    return \"%s %s <%s> (%susers/%s)%s\" % (u[\"first_name\"], u[\"last_name\"], u[\"email\"],\n                                                       arv.config()[\"Services\"][\"Workbench1\"][\"ExternalURL\"],\n                                                       uuid, prof)\ndef getuserinfocsv(arv, uuid):\n    try:\n        u = arv.users().get(uuid=uuid).execute()\n    except:\n        return [uuid,\"deleted\",\"user\",\"\"]\n    return [uuid, u[\"first_name\"], u[\"last_name\"], u[\"email\"]]\n\n\ncollectionNameCache = {}\ndef getCollectionName(arv, uuid, pdh):\n    lookupField = uuid\n    filters = [[\"uuid\", \"=\", uuid]]\n    order = None\n    cached = uuid in collectionNameCache\n    # look up by uuid if it is available, fall back to look up by pdh\n    if uuid is None or len(uuid) != 27:\n        # Look up by pdh. Note that this can be misleading; the download could\n        # have happened from a collection with the same pdh but different name.\n        # We arbitrarily pick the oldest collection with the pdh to lookup the\n        # name, if the uuid for the request is not known.\n        lookupField = pdh\n        filters = [[\"portable_data_hash\", \"=\", pdh]]\n        order = \"created_at\"\n        cached = pdh in collectionNameCache\n\n    if not cached:\n        u = arv.collections().list(filters=filters, order=order, limit=1, count=\"none\").execute().get(\"items\")\n        if len(u) < 1:\n            return \"(deleted)\"\n        collectionNameCache[lookupField] = u[0][\"name\"]\n    return collectionNameCache[lookupField]\n\ndef getname(u):\n    return \"\\\"%s\\\" (%s)\" % (u[\"name\"], u[\"uuid\"])\n\ndef main(arguments=None):\n    if arguments is None:\n        arguments = sys.argv[1:]\n\n    args, since, to = parse_arguments(arguments)\n\n    arv = arvados.api()\n\n    prefix = ''\n    suffix = \"\\n\"\n    if args.csv:\n        prefix = '# '\n        suffix = ''\n    print(\"%sUser activity on %s between %s and %s%s\" % (prefix, arv.config()[\"ClusterID\"],\n                                                       since.isoformat(sep=\" \", timespec=\"minutes\"),\n                                                       to.isoformat(sep=\" \", timespec=\"minutes\"), suffix))\n\n    events = arvados.util.keyset_list_all(arv.logs().list, filters=[[\"created_at\", \">=\", since.isoformat()],[\"created_at\", \"<\", to.isoformat()]])\n\n    users = {}\n    owners = {}\n\n    for e in events:\n        owner = getowner(arv, e[\"object_owner_uuid\"], owners)\n        users.setdefault(owner, [])\n        event_at = ciso8601.parse_datetime(e[\"event_at\"]).astimezone().isoformat(sep=\" \", timespec=\"minutes\")\n        loguuid = e[\"uuid\"]\n\n        if e[\"event_type\"] == \"create\" and e[\"object_uuid\"][6:11] == \"tpzed\":\n            users.setdefault(e[\"object_uuid\"], [])\n            users[e[\"object_uuid\"]].append([loguuid, event_at, \"User account created\"])\n\n        elif e[\"event_type\"] == \"update\" and e[\"object_uuid\"][6:11] == \"tpzed\":\n            pass\n\n        elif e[\"event_type\"] == \"create\" and e[\"object_uuid\"][6:11] == \"xvhdp\":\n            if e[\"properties\"][\"new_attributes\"][\"requesting_container_uuid\"] is None:\n                users[owner].append([loguuid, event_at, \"Ran container %s\" % (getname(e[\"properties\"][\"new_attributes\"]))])\n\n        elif e[\"event_type\"] == \"update\" and e[\"object_uuid\"][6:11] == \"xvhdp\":\n            pass\n\n        elif e[\"event_type\"] == \"create\" and e[\"object_uuid\"][6:11] == \"j7d0g\":\n            users[owner].append([loguuid, event_at,\"Created project %s\" % (getname(e[\"properties\"][\"new_attributes\"]))])\n\n        elif e[\"event_type\"] == \"delete\" and e[\"object_uuid\"][6:11] == \"j7d0g\":\n            users[owner].append([loguuid, event_at,\"Deleted project %s\" % (getname(e[\"properties\"][\"old_attributes\"]))])\n\n        elif e[\"event_type\"] == \"update\" and e[\"object_uuid\"][6:11] == \"j7d0g\":\n            users[owner].append([loguuid, event_at,\"Updated project %s\" % (getname(e[\"properties\"][\"new_attributes\"]))])\n\n        elif e[\"event_type\"] in (\"create\", \"update\") and e[\"object_uuid\"][6:11] == \"gj3su\":\n            # Don't log token activity, it is too noisy (bug #19179)\n            pass\n\n        # We want to report when a user goes through the login\n        # process, but controller doesn't do that yet, so revisit\n        # this when #19388 is done.\n\n        elif e[\"event_type\"] == \"create\" and e[\"object_uuid\"][6:11] == \"o0j2j\":\n            if e[\"properties\"][\"new_attributes\"][\"link_class\"] == \"tag\":\n                users[owner].append([event_at,\"Tagged %s\" % (e[\"properties\"][\"new_attributes\"][\"head_uuid\"])])\n            elif e[\"properties\"][\"new_attributes\"][\"link_class\"] == \"permission\":\n                users[owner].append([loguuid, event_at,\"Shared %s with %s\" % (e[\"properties\"][\"new_attributes\"][\"tail_uuid\"], e[\"properties\"][\"new_attributes\"][\"head_uuid\"])])\n            else:\n                users[owner].append([loguuid, event_at,\"%s %s %s\" % (e[\"event_type\"], e[\"object_kind\"], e[\"object_uuid\"])])\n\n        elif e[\"event_type\"] == \"delete\" and e[\"object_uuid\"][6:11] == \"o0j2j\":\n            if e[\"properties\"][\"old_attributes\"][\"link_class\"] == \"tag\":\n                users[owner].append([loguuid, event_at,\"Untagged %s\" % (e[\"properties\"][\"old_attributes\"][\"head_uuid\"])])\n            elif e[\"properties\"][\"old_attributes\"][\"link_class\"] == \"permission\":\n                users[owner].append([loguuid, event_at,\"Unshared %s with %s\" % (e[\"properties\"][\"old_attributes\"][\"tail_uuid\"], e[\"properties\"][\"old_attributes\"][\"head_uuid\"])])\n            else:\n                users[owner].append([loguuid, event_at,\"%s %s %s\" % (e[\"event_type\"], e[\"object_kind\"], e[\"object_uuid\"])])\n\n        elif e[\"event_type\"] == \"create\" and e[\"object_uuid\"][6:11] == \"4zz18\":\n            if e[\"properties\"][\"new_attributes\"][\"properties\"].get(\"type\") in (\"log\", \"output\", \"intermediate\"):\n                pass\n            else:\n                users[owner].append([loguuid, event_at,\"Created collection %s\" % (getname(e[\"properties\"][\"new_attributes\"]))])\n\n        elif e[\"event_type\"] == \"update\" and e[\"object_uuid\"][6:11] == \"4zz18\":\n            users[owner].append([loguuid, event_at,\"Updated collection %s\" % (getname(e[\"properties\"][\"new_attributes\"]))])\n\n        elif e[\"event_type\"] == \"delete\" and e[\"object_uuid\"][6:11] == \"4zz18\":\n            if e[\"properties\"][\"old_attributes\"][\"properties\"].get(\"type\") in (\"log\", \"output\", \"intermediate\"):\n                pass\n            else:\n                users[owner].append([loguuid, event_at, \"Deleted collection %s\" % (getname(e[\"properties\"][\"old_attributes\"]))])\n\n        elif e[\"event_type\"] == \"file_download\":\n            users.setdefault(e[\"object_uuid\"], [])\n            users[e[\"object_uuid\"]].append([loguuid, event_at, \"Downloaded file \\\"%s\\\" from \\\"%s\\\" (%s) (%s)\" % (\n                e[\"properties\"].get(\"collection_file_path\") or e[\"properties\"].get(\"reqPath\"),\n                getCollectionName(arv, e[\"properties\"].get(\"collection_uuid\"), e[\"properties\"].get(\"portable_data_hash\")),\n                e[\"properties\"].get(\"collection_uuid\"),\n                e[\"properties\"].get(\"portable_data_hash\"))])\n\n        elif e[\"event_type\"] == \"file_upload\":\n            users.setdefault(e[\"object_uuid\"], [])\n            users[e[\"object_uuid\"]].append([loguuid, event_at, \"Uploaded file \\\"%s\\\" to \\\"%s\\\" (%s)\" % (\n                e[\"properties\"].get(\"collection_file_path\") or e[\"properties\"].get(\"reqPath\"),\n                getCollectionName(arv, e[\"properties\"].get(\"collection_uuid\"), e[\"properties\"].get(\"portable_data_hash\")),\n                e[\"properties\"].get(\"collection_uuid\"))])\n\n        else:\n            users[owner].append([loguuid, event_at, \"%s %s %s\" % (e[\"event_type\"], e[\"object_kind\"], e[\"object_uuid\"])])\n\n    if args.csv:\n        csvwriter = csv.writer(sys.stdout, dialect='unix')\n\n    for k,v in users.items():\n        # Skip system user\n        if k is None or k.endswith(\"-tpzed-000000000000000\"):\n            continue\n\n        # Skip users with no activity to report\n        if not v:\n            continue\n\n        if not args.csv:\n          print(getuserinfo(arv, k))\n          for ev in v:\n              # Remove the log entry uuid, this report is intended for human consumption\n              ev.pop(0)\n              print(\"  %s\" % ' '.join(ev))\n          print(\"\")\n        else:\n          user = getuserinfocsv(arv, k)\n          for ev in v:\n            ev = user + ev\n            csvwriter.writerow(ev)\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "tools/user-activity/arvados_version.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport dataclasses\nimport os\nimport re\nimport runpy\nimport subprocess\nimport typing as t\n\nfrom pathlib import Path, PurePath, PurePosixPath\n\nimport setuptools\nimport setuptools.command.build\n\nSETUP_DIR = Path(__file__).absolute().parent\nVERSION_SCRIPT_PATH = PurePath('build', 'version-at-commit.sh')\n# Built by ArvadosPythonPackage.register\nARVADOS_PYTHON_MODULES: dict[str, 'ArvadosPythonPackage'] = {}\n\n### Metadata generation\n\n@dataclasses.dataclass\nclass ArvadosPythonPackage:\n    package_name: str\n    module_name: str\n    src_path: PurePath\n    dependencies: t.Sequence['ArvadosPythonPackage']\n\n    _VERSION_SUBS = {\n        'development-': '',\n        '~dev': '.dev',\n        '~rc': 'rc',\n    }\n\n    @classmethod\n    def register(\n            cls,\n            package_name: str,\n            module_name: str,\n            src_path: PurePath | str,\n            *dependencies: str,\n    ) -> 'ArvadosPythonPackage':\n        if not isinstance(src_path, PurePath):\n            src_path = PurePosixPath(src_path)\n        deps = [ARVADOS_PYTHON_MODULES[key] for key in dependencies]\n        this_pkg = cls(package_name, module_name, src_path, deps)\n        ARVADOS_PYTHON_MODULES[package_name] = this_pkg\n        return this_pkg\n\n    def version_file_path(self):\n        return PurePath(self.module_name, '_version.py')\n\n    def _workspace_path(self, workdir):\n        try:\n            workspace = Path(os.environ['WORKSPACE'])\n            # This will raise ValueError if they're not related,\n            # in which case we don't want to use this $WORKSPACE.\n            workdir.relative_to(workspace)\n        except KeyError:\n            # $WORKSPACE isn't set. Fall back to the Git worktree toplevel.\n            try:\n                git_proc = subprocess.run(\n                    ['git', 'rev-parse', '--show-toplevel'],\n                    capture_output=True,\n                    check=True,\n                    cwd=workdir,\n                    text=True,\n                )\n                workspace = Path(git_proc.stdout.removesuffix('\\n'))\n            except (subprocess.CalledProcessError, FileNotFoundError, ValueError):\n                return None\n        except ValueError:\n            return None\n        if (workspace / VERSION_SCRIPT_PATH).exists():\n            return workspace\n        else:\n            return None\n\n    def _git_version(self, workdir):\n        workspace = self._workspace_path(workdir)\n        if workspace is None:\n            return None\n        git_log_cmd = [\n            'git', 'log', '-n1', '--format=%H', '--',\n            str(VERSION_SCRIPT_PATH), str(self.src_path),\n        ]\n        git_log_cmd.extend(str(dep.src_path) for dep in self.dependencies)\n        git_log_proc = subprocess.run(\n            git_log_cmd,\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        version_proc = subprocess.run(\n            [str(VERSION_SCRIPT_PATH), git_log_proc.stdout.rstrip('\\n')],\n            check=True,\n            cwd=workspace,\n            stdout=subprocess.PIPE,\n            text=True,\n        )\n        return version_proc.stdout.rstrip('\\n')\n\n    def _sdist_version(self, workdir):\n        try:\n            pkg_info = (workdir / 'PKG-INFO').open()\n        except FileNotFoundError:\n            return None\n        with pkg_info:\n            for line in pkg_info:\n                key, _, val = line.partition(': ')\n                if key == 'Version':\n                    return val.rstrip('\\n')\n        raise Exception(\"found PKG-INFO file but not Version metadata in it\")\n\n    def get_version(self, workdir=SETUP_DIR):\n        version = (\n            # If we're building out of a distribution, we should pass that\n            # version through unchanged.\n            self._sdist_version(workdir)\n            # Otherwise follow the usual Arvados versioning rules.\n            or os.environ.get('ARVADOS_BUILDING_VERSION')\n            or self._git_version(workdir)\n        )\n        if not version:\n            raise Exception(f\"no version information available for {self.package_name}\")\n        else:\n            return re.sub(\n                r'(^development-|~dev|~rc)',\n                lambda match: self._VERSION_SUBS[match.group(0)],\n                version,\n            )\n\n    def get_dependencies_version(self, workdir=SETUP_DIR, version=None):\n        if version is None:\n            version = self.get_version(workdir)\n        # A packaged development release should be installed with other\n        # development packages built from the same source, but those\n        # dependencies may have earlier \"dev\" versions (read: less recent\n        # Git commit timestamps). This compatible version dependency\n        # expresses that as closely as possible. Allowing versions\n        # compatible with .dev0 allows any development release.\n        # Regular expression borrowed partially from\n        # <https://packaging.python.org/en/latest/specifications/version-specifiers/#version-specifiers-regex>\n        dep_ver, match_count = re.subn(r'\\.dev(0|[1-9][0-9]*)$', '.dev0', version, 1)\n        return ('~=' if match_count else '==', dep_ver)\n\n    def iter_dependencies(self, workdir=SETUP_DIR, version=None, extras=None):\n        if extras is None:\n            extras = {}\n        dep_op, dep_ver = self.get_dependencies_version(workdir, version)\n        for dep in self.dependencies:\n            try:\n                dep_extras = f'[{\",\".join(extras[dep.package_name])}]'\n            except KeyError:\n                dep_extras = ''\n            yield f'{dep.package_name}{dep_extras} {dep_op} {dep_ver}'\n\n\n### Package database\n\nArvadosPythonPackage.register(\n    'arvados-python-client',\n    'arvados',\n    'sdk/python',\n),\nArvadosPythonPackage.register(\n    'crunchstat_summary',\n    'crunchstat_summary',\n    'tools/crunchstat-summary',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cluster-activity',\n    'arvados_cluster_activity',\n    'tools/cluster-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-cwl-runner',\n    'arvados_cwl',\n    'sdk/cwl',\n    'arvados-python-client',\n    'crunchstat_summary',\n)\nArvadosPythonPackage.register(\n    'arvados_fuse',\n    'arvados_fuse',\n    'services/fuse',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-user-activity',\n    'arvados_user_activity',\n    'tools/user-activity',\n    'arvados-python-client',\n)\nArvadosPythonPackage.register(\n    'arvados-tools',\n    'NO SRCDIR',\n    'tools/python-metapackage',\n    *ARVADOS_PYTHON_MODULES,\n)\nArvadosPythonPackage.register(\n    'arvados-docker-cleaner',\n    'arvados_docker',\n    'services/dockercleaner',\n)\n\n### setuptools integration\n\nclass BuildArvadosVersion(setuptools.Command):\n    \"\"\"Write _version.py for an Arvados module\"\"\"\n    def initialize_options(self):\n        self.build_lib = None\n\n    def finalize_options(self):\n        self.set_undefined_options(\"build_py\", (\"build_lib\", \"build_lib\"))\n        arv_mod = ARVADOS_PYTHON_MODULES[self.distribution.get_name()]\n        self.out_path = Path(self.build_lib, arv_mod.version_file_path())\n\n    def run(self):\n        with self.out_path.open('w') as out_file:\n            print(f'__version__ = {self.distribution.get_version()!r}', file=out_file)\n\n    def get_outputs(self):\n        return [str(self.out_path)]\n\n    def get_source_files(self):\n        return []\n\n    def get_output_mapping(self):\n        return {}\n\n\nclass ArvadosBuildCommand(setuptools.command.build.build):\n    sub_commands = [\n        *setuptools.command.build.build.sub_commands,\n        ('build_arvados_version', None),\n    ]\n\n\nCMDCLASS = {\n    'build': ArvadosBuildCommand,\n    'build_arvados_version': BuildArvadosVersion,\n}\n"
  },
  {
    "path": "tools/user-activity/bin/arv-user-activity",
    "content": "#!/usr/bin/env python3\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport arvados_user_activity.main\n\narvados_user_activity.main.main()\n"
  },
  {
    "path": "tools/user-activity/fpm-info.sh",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\ncase \"$TARGET\" in\n    debian12 | ubuntu2204 )\n        fpm_depends+=(libcurl4)\n        ;;\n\n    debian* | ubuntu* )\n        fpm_depends+=(libcurl4t64)\n        ;;\nesac\n"
  },
  {
    "path": "tools/user-activity/pyproject.toml",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\n[build-system]\nrequires = [\"setuptools ~= 80.9\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\ndynamic = [\"dependencies\", \"version\"]\nname = \"arvados-user-activity\"\ndescription = \"Summarize user activity on an Arvados cluster from audit logs\"\nauthors = [\n  {name = \"Arvados\", email = \"info@arvados.org\"},\n]\nclassifiers = [\n  \"Development Status :: 5 - Production/Stable\",\n  \"Environment :: Console\",\n  \"Intended Audience :: Science/Research\",\n  \"Operating System :: POSIX\",\n  \"Programming Language :: Python :: 3\",\n  \"Programming Language :: Python :: 3.10\",\n  \"Programming Language :: Python :: 3.11\",\n  \"Programming Language :: Python :: 3.12\",\n  \"Programming Language :: Python :: 3.13\",\n  \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n]\nlicense = \"AGPL-3.0-only\"\nlicense-files = [\n  \"agpl-3.0.txt\",\n]\nreadme = \"README.rst\"\nrequires-python = \"~= 3.10\"\n\n[project.scripts]\narv-user-activity = \"arvados_user_activity.main:main\"\n\n[project.urls]\nHomepage = \"https://arvados.org\"\nDocumentation = \"https://doc.arvados.org\"\nRepository = \"https://github.com/arvados/arvados\"\nIssues = \"https://github.com/arvados/arvados/issues\"\nChangelog = \"https://arvados.org/releases/\"\n\n[tool.setuptools.data-files]\n\"share/doc/arvados-user-activity\" = [\n  \"agpl-3.0.txt\",\n  \"README.rst\",\n]\n\n[tool.setuptools.packages.find]\nexclude = [\"tests*\"]\n"
  },
  {
    "path": "tools/user-activity/setup.py",
    "content": "# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: AGPL-3.0\n\nimport setuptools\nimport runpy\n\nfrom pathlib import Path\n\narvados_version = runpy.run_path(Path(__file__).with_name('arvados_version.py'))\narv_mod = arvados_version['ARVADOS_PYTHON_MODULES']['arvados-user-activity']\nversion = arv_mod.get_version()\nsetuptools.setup(\n    cmdclass=arvados_version['CMDCLASS'],\n    install_requires=[\n        *arv_mod.iter_dependencies(version=version),\n    ],\n    version=version,\n)\n"
  },
  {
    "path": "tools/vocabulary-migrate/vocabulary-migrate.py",
    "content": "#!/usr/bin/env python3\n#\n# Copyright (C) The Arvados Authors. All rights reserved.\n#\n# SPDX-License-Identifier: CC-BY-SA-3.0\n\nimport argparse\nimport copy\nimport json\nimport logging\nimport os\nimport sys\n\nimport arvados\nimport arvados.util\n\nlogger = logging.getLogger('arvados.vocabulary_migrate')\nlogger.setLevel(logging.INFO)\n\nclass VocabularyError(Exception):\n    pass\n\nopts = argparse.ArgumentParser(add_help=False)\nopts.add_argument('--vocabulary-file', type=str, metavar='PATH', required=True,\n                  help=\"\"\"\nUse vocabulary definition file at PATH for migration decisions.\n\"\"\")\nopts.add_argument('--dry-run', action='store_true', default=False,\n                  help=\"\"\"\nDon't actually migrate properties, but only check if any collection/project\nshould be migrated.\n\"\"\")\nopts.add_argument('--debug', action='store_true', default=False,\n                  help=\"\"\"\nSets logging level to DEBUG.\n\"\"\")\narg_parser = argparse.ArgumentParser(\n    description='Migrate collections & projects properties to the new vocabulary format.',\n    parents=[opts])\n\ndef parse_arguments(arguments):\n    args = arg_parser.parse_args(arguments)\n    if args.debug:\n        logger.setLevel(logging.DEBUG)\n    if not os.path.isfile(args.vocabulary_file):\n        arg_parser.error(\"{} doesn't exist or isn't a file.\".format(args.vocabulary_file))\n    return args\n\ndef _label_to_id_mappings(data, obj_name):\n    result = {}\n    for obj_id, obj_data in data.items():\n        for lbl in obj_data['labels']:\n            obj_lbl = lbl['label']\n            if obj_lbl not in result:\n                result[obj_lbl] = obj_id\n            else:\n                raise VocabularyError('{} label \"{}\" for {} ID \"{}\" already seen at {} ID \"{}\".'.format(obj_name, obj_lbl, obj_name, obj_id, obj_name, result[obj_lbl]))\n    return result\n\ndef key_labels_to_ids(vocab):\n    return _label_to_id_mappings(vocab['tags'], 'key')\n\ndef value_labels_to_ids(vocab, key_id):\n    if key_id in vocab['tags'] and 'values' in vocab['tags'][key_id]:\n        return _label_to_id_mappings(vocab['tags'][key_id]['values'], 'value')\n    return {}\n\ndef migrate_properties(properties, key_map, vocab):\n    result = {}\n    for k, v in properties.items():\n        key = key_map.get(k, k)\n        value = value_labels_to_ids(vocab, key).get(v, v)\n        result[key] = value\n    return result\n\ndef main(arguments=None):\n    args = parse_arguments(arguments)\n    vocab = None\n    with open(args.vocabulary_file, 'r') as f:\n        vocab = json.load(f)\n    arv = arvados.api('v1')\n    if 'tags' not in vocab or vocab['tags'] == {}:\n        logger.warning('Empty vocabulary file, exiting.')\n        return 1\n    if not arv.users().current().execute()['is_admin']:\n        logger.error('Admin privileges required.')\n        return 1\n    key_label_to_id_map = key_labels_to_ids(vocab)\n    migrated_counter = 0\n\n    for key_label in key_label_to_id_map:\n        logger.debug('Querying objects with property key \"{}\"'.format(key_label))\n        for resource in [arv.collections(), arv.groups()]:\n            objs = arvados.util.keyset_list_all(\n                resource.list,\n                order='created_at',\n                select=['uuid', 'properties'],\n                filters=[['properties', 'exists', key_label]]\n            )\n            for o in objs:\n                props = copy.copy(o['properties'])\n                migrated_props = migrate_properties(props, key_label_to_id_map, vocab)\n                if not args.dry_run:\n                    logger.debug('Migrating {}: {} -> {}'.format(o['uuid'], props, migrated_props))\n                    arv.collections().update(uuid=o['uuid'], body={\n                        'properties': migrated_props\n                    }).execute()\n                else:\n                    logger.info('Should migrate {}: {} -> {}'.format(o['uuid'], props, migrated_props))\n                migrated_counter += 1\n                if not args.dry_run and migrated_counter % 100 == 0:\n                    logger.info('Migrating {} objects...'.format(migrated_counter))\n\n    if args.dry_run and migrated_counter == 0:\n        logger.info('Nothing to do.')\n    elif not args.dry_run:\n        logger.info('Done, total objects migrated: {}.'.format(migrated_counter))\n    return 0\n\nif __name__ == \"__main__\":\n    sys.exit(main())\n"
  }
]